diff --git a/.clang-tidy.in b/.clang-tidy.in index 31cd6eb31372c..838c09d8873ad 100644 --- a/.clang-tidy.in +++ b/.clang-tidy.in @@ -26,6 +26,7 @@ Checks: '-*, bugprone-undelegated-constructor, hicpp-static-assert, hicpp-undelegated-constructor, + misc-static-assert, misc-uniqueptr-reset-release, modernize-avoid-bind, modernize-deprecated-headers, @@ -33,18 +34,28 @@ Checks: '-*, modernize-replace-random-shuffle, modernize-shrink-to-fit, modernize-unary-static-assert, + mongo-assert-check, mongo-cctype-check, + mongo-config-header-check, + mongo-collection-sharding-runtime-check, + mongo-cxx20-banned-includes-check, + mongo-cxx20-std-chrono-check, mongo-header-bracket-check, - mongo-std-atomic-check, + mongo-macro-definition-leaks-check, mongo-mutex-check, - mongo-assert-check, + mongo-polyfill-check, + mongo-rand-check, + mongo-std-atomic-check, mongo-std-optional-check, + mongo-trace-check, mongo-uninterruptible-lock-guard-check, + mongo-unstructured-log-check, mongo-volatile-check, - mongo-trace-check, + mongo-fcv-constant-check, performance-faster-string-find, performance-implicit-conversion-in-loop, performance-inefficient-algorithm, + performance-no-automatic-move, bugprone-signed-char-misuse, bugprone-suspicious-string-compare, performance-for-range-copy, @@ -94,7 +105,6 @@ Checks: '-*, -misc-misplaced-const, -misc-non-copyable-objects, -misc-redundant-expression, - -misc-static-assert, -misc-throw-by-value-catch-by-reference, -misc-unconventional-assign-operator, -misc-unused-alias-decls, @@ -126,6 +136,8 @@ CheckOptions: value: assert - key: mongo-header-bracket-check.mongoSourceDirs value: 'src/mongo;@MONGO_BUILD_DIR@' + - key: mongo-collection-sharding-runtime-check.exceptionDirs + value: 'src/mongo/db/s' - key: bugprone-assert-side-effect.CheckFunctionCalls value: '0' - key: bugprone-dangling-handle.HandleClasses diff --git a/.gdbinit b/.gdbinit index 6a43ac3fe00d5..2259ac16e39d4 100644 --- a/.gdbinit +++ b/.gdbinit @@ -13,3 +13,6 @@ source buildscripts/gdb/mongo_lock.py # Load methods for printing in-memory contents of WT tables. source buildscripts/gdb/wt_dump_table.py + +# Load third-party pretty printers +source src/third_party/immer/dist/tools/gdb_pretty_printers/autoload.py diff --git a/.gitignore b/.gitignore index ce851fdf7b29b..efe5d4811e7a7 100644 --- a/.gitignore +++ b/.gitignore @@ -13,6 +13,8 @@ venv *~ +*.test_iwyu.h +*.test_iwyu.cpp *.swp *.o *.os @@ -44,6 +46,7 @@ venv *.eslintcache *# .#* +iwyu.dat /src/mongo/*/*Debug*/ /src/mongo/*/*/*Debug*/ @@ -60,6 +63,7 @@ venv /src/third_party/*/*.lastbuildstate /buildscripts/libdeps/graph_visualizer_web_stack/build /buildscripts/libdeps/graph_visualizer_web_stack/node_modules +buildscripts/iwyu/test/*/test_run libdeps.graphml build-metrics.json config.log @@ -82,20 +86,26 @@ scratch # binaries /docgen* /loadgen* +/mongoed* +/mongogrid* +/mongoperf* +/mongoshim* +/mongosniff* +/mongotrafficreader* + +# binaries from db-contrib-tool +/ksdecode* /mongo* +/mongoauditdecrypt* /mongobridge* /mongocryptd* /mongod* -/mongoed* -/mongogrid* +/mongodecrypt* /mongokerberos* /mongoldap* -/mongoperf* +/mongoqd* /mongos* -/mongoshim* -/mongosniff* /mongotmock* -/mongotrafficreader* /mqlrun* /wt* @@ -258,3 +268,6 @@ dist-test/ # node extra stuff (for someone installing eslint) node_modules/ package-lock.json + +# jstestfuzz generated test directory +jstestfuzz/ diff --git a/CreativeCommons.txt b/CreativeCommons.txt index 60efd96a51d45..1b3c7c92156ad 100644 --- a/CreativeCommons.txt +++ b/CreativeCommons.txt @@ -189,4 +189,4 @@ ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual - written agreement of the Licensor and You. \ No newline at end of file + written agreement of the Licensor and You. diff --git a/README.md b/README.md index 491f729ce246e..b441e5a6116e8 100644 --- a/README.md +++ b/README.md @@ -30,12 +30,12 @@ Welcome to MongoDB! To run a single server database: ```bash - $ sudo mkdir -p /data/db - $ ./mongod - $ - $ # The mongo javascript shell connects to localhost and test database by default: - $ ./mongo - > help + $ sudo mkdir -p /data/db + $ ./mongod + $ + $ # The mongo javascript shell connects to localhost and test database by default: + $ ./mongo + > help ``` ## Installing Compass @@ -43,7 +43,7 @@ Welcome to MongoDB! You can install compass using the `install_compass` script packaged with MongoDB: ```bash - $ ./install_compass + $ ./install_compass ``` This will download the appropriate MongoDB Compass package for your platform @@ -66,9 +66,9 @@ Welcome to MongoDB! ## Learn MongoDB - Documentation - https://docs.mongodb.com/manual/ - Developer Center - https://www.mongodb.com/developer/ - MongoDB University - https://learn.mongodb.com + - Documentation - https://docs.mongodb.com/manual/ + - Developer Center - https://www.mongodb.com/developer/ + - MongoDB University - https://learn.mongodb.com ## Cloud Hosted MongoDB diff --git a/README.third_party.md b/README.third_party.md index 8454a55dad89d..f267ef8b6c637 100644 --- a/README.third_party.md +++ b/README.third_party.md @@ -31,6 +31,7 @@ a notice will be included in | [GPerfTools] | BSD-3-Clause | 2.9.1 | | ✗ | | [gRPC] | Apache-2.0 | 1.46.6 | | ✗ | | [ICU4] | ICU | 57.1 | ✗ | ✗ | +| [immer] | BSL-1.0 | d98a68c | | ✗ | | [Intel Decimal FP Library] | BSD-3-Clause | 2.0 Update 1 | | ✗ | | [JSON-Schema-Test-Suite] | MIT | 728066f9c5 | | | | [libstemmer] | BSD-3-Clause | Unknown | ✗ | ✗ | @@ -53,11 +54,10 @@ a notice will be included in | [Unicode] | Unicode-DFS-2015 | 8.0.0 | ✗ | ✗ | | [libunwind] | MIT | 1.6.2 + changes | | ✗ | | [Valgrind] | BSD-3-Clause\[1] | 3.17.0 | | ✗ | -| [variant] | BSL-1.0 | 1.4.0 | | ✗ | | [wiredtiger] | | \[2] | ✗ | ✗ | | [yaml-cpp] | MIT | 0.6.2 | | ✗ | | [Zlib] | Zlib | 1.2.13 | ✗ | ✗ | -| [Zstandard] | BSD-3-Clause | 1.5.2 | ✗ | ✗ | +| [Zstandard] | BSD-3-Clause | 1.5.5 | ✗ | ✗ | [abseil-cpp]: https://github.com/abseil/abseil-cpp [ASIO]: https://github.com/chriskohlhoff/asio @@ -66,6 +66,7 @@ a notice will be included in [fmt]: http://fmtlib.net/ [GPerfTools]: https://github.com/gperftools/gperftools [ICU4]: http://site.icu-project.org/download/ +[immer]: https://github.com/arximboldi/immer [Intel Decimal FP Library]: https://software.intel.com/en-us/articles/intel-decimal-floating-point-math-library [JSON-Schema-Test-Suite]: https://github.com/json-schema-org/JSON-Schema-Test-Suite [libstemmer]: https://github.com/snowballstem/snowball @@ -86,7 +87,6 @@ a notice will be included in [Unicode]: http://www.unicode.org/versions/enumeratedversions.html [libunwind]: http://www.nongnu.org/libunwind/ [Valgrind]: http://valgrind.org/downloads/current.html -[variant]: https://github.com/mpark/variant [wiredtiger]: https://github.com/wiredtiger/wiredtiger [yaml-cpp]: https://github.com/jbeder/yaml-cpp/releases [Zlib]: https://zlib.net/ diff --git a/SConstruct b/SConstruct index d1ef83dbb4467..e692f889c92eb 100644 --- a/SConstruct +++ b/SConstruct @@ -23,8 +23,6 @@ from pkg_resources import parse_version import SCons import SCons.Script -from mongo_tooling_metrics.client import get_mongo_metrics_client -from mongo_tooling_metrics.errors import ExternalHostException from mongo_tooling_metrics.lib.top_level_metrics import SConsToolingMetrics from site_scons.mongo import build_profiles @@ -422,6 +420,12 @@ add_option( nargs=0, ) +add_option( + 'wait-for-debugger', + help='Wait for debugger attach on process startup', + nargs=0, +) + add_option( 'gcov', help='compile with flags for gcov', @@ -593,8 +597,8 @@ add_option( add_option( "cxx-std", - choices=["17", "20"], - default="17", + choices=["20"], + default="20", help="Select the C++ language standard to build with", ) @@ -616,6 +620,13 @@ add_option( help="Specify variables files to load.", ) +add_option( + 'streams-release-build', + default=False, + action='store_true', + help='If set, will include the enterprise streams module in a release build.', +) + link_model_choices = ['auto', 'object', 'static', 'dynamic', 'dynamic-strict', 'dynamic-sdk'] add_option( 'link-model', @@ -910,6 +921,7 @@ def variable_tools_converter(val): "mongo_integrationtest", "mongo_unittest", "mongo_libfuzzer", + "mongo_pretty_printer_tests", "textfile", ] @@ -1634,6 +1646,8 @@ envDict = dict( # TODO: Move unittests.txt to $BUILD_DIR, but that requires # changes to MCI. UNITTEST_LIST='$BUILD_ROOT/unittests.txt', + PRETTY_PRINTER_TEST_ALIAS='install-pretty-printer-tests', + PRETTY_PRINTER_TEST_LIST='$BUILD_ROOT/pretty_printer_tests.txt', LIBFUZZER_TEST_ALIAS='install-fuzzertests', LIBFUZZER_TEST_LIST='$BUILD_ROOT/libfuzzer_tests.txt', INTEGRATION_TEST_ALIAS='install-integration-tests', @@ -1659,22 +1673,13 @@ env.AddMethod(lambda env, name, **kwargs: add_option(name, **kwargs), 'AddOption # The placement of this is intentional. Here we setup an atexit method to store tooling metrics. # We should only register this function after env, env_vars and the parser have been properly initialized. -try: - metrics_client = get_mongo_metrics_client() - metrics_client.register_metrics( - SConsToolingMetrics, - utc_starttime=datetime.utcnow(), - artifact_dir=env.Dir('$BUILD_DIR').get_abspath(), - env_vars=env_vars, - env=env, - parser=_parser, - ) -except ExternalHostException as _: - pass -except Exception as _: - print( - "This MongoDB Virtual Workstation could not connect to the internal cluster\nThis is a non-issue, but if this message persists feel free to reach out in #server-dev-platform" - ) +SConsToolingMetrics.register_metrics( + utc_starttime=datetime.utcnow(), + artifact_dir=env.Dir('$BUILD_DIR').get_abspath(), + env_vars=env_vars, + env=env, + parser=_parser, +) if get_option('build-metrics'): env['BUILD_METRICS_ARTIFACTS_DIR'] = '$BUILD_ROOT/$VARIANT_DIR' @@ -2021,12 +2026,16 @@ if env.get('ENABLE_OOM_RETRY'): ': out of memory', 'virtual memory exhausted: Cannot allocate memory', ': fatal error: Killed signal terminated program cc1', + # TODO: SERVER-77322 remove this non memory related ICE. + r'during IPA pass: cp.+g\+\+: internal compiler error', + 'ld terminated with signal 9', ] elif env.ToolchainIs('msvc'): env['OOM_RETRY_MESSAGES'] = [ 'LNK1102: out of memory', 'C1060: compiler is out of heap space', - 'LNK1171: unable to load mspdbcore.dll', + 'c1xx : fatal error C1063: INTERNAL COMPILER ERROR', + r'LNK1171: unable to load mspdbcore\.dll', "LNK1201: error writing to program database ''", ] env['OOM_RETRY_RETURNCODES'] = [1102] @@ -3250,12 +3259,6 @@ if not env.TargetOSIs('windows', 'macOS') and (env.ToolchainIs('GCC', 'clang')): for flag_value in env[search_variable]): env.Append(CCFLAGS=[f'{targeting_flag}{targeting_flag_value}']) -# Needed for auth tests since key files are stored in git with mode 644. -if not env.TargetOSIs('windows'): - for keysuffix in ["1", "2", "ForRollover"]: - keyfile = "jstests/libs/key%s" % keysuffix - os.chmod(keyfile, stat.S_IWUSR | stat.S_IRUSR) - # boostSuffixList is used when using system boost to select a search sequence # for boost libraries. boostSuffixList = ["-mt", ""] @@ -3666,6 +3669,9 @@ def doConfigure(myenv): # Don't issue warnings about potentially evaluated expressions myenv.AddToCCFLAGSIfSupported("-Wno-potentially-evaluated-expression") + # SERVER-76472 we don't try to maintain ABI so disable warnings about possible ABI issues. + myenv.AddToCCFLAGSIfSupported("-Wno-psabi") + # Warn about moves of prvalues, which can inhibit copy elision. myenv.AddToCXXFLAGSIfSupported("-Wpessimizing-move") @@ -3705,15 +3711,6 @@ def doConfigure(myenv): # only) flag that turns it on. myenv.AddToCXXFLAGSIfSupported("-Wunused-exception-parameter") - # TODO(SERVER-60151): Avoid the dilemma identified in - # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=100493. Unfortunately, - # we don't have a more targeted warning suppression we can use - # other than disabling all deprecation warnings. We will - # revisit this once we are fully on C++20 and can commit the - # C++20 style code. - if get_option('cxx-std') == "20": - myenv.AddToCXXFLAGSIfSupported('-Wno-deprecated') - # TODO SERVER-58675 - Remove this suppression after abseil is upgraded myenv.AddToCXXFLAGSIfSupported("-Wno-deprecated-builtins") @@ -3827,6 +3824,9 @@ def doConfigure(myenv): usingLibStdCxx = False if has_option('libc++'): + # TODO SERVER-54659 - ASIO depends on std::result_of which was removed in C++ 20 + myenv.Append(CPPDEFINES=["ASIO_HAS_STD_INVOKE_RESULT"]) + if not myenv.ToolchainIs('clang'): myenv.FatalError('libc++ is currently only supported for clang') if myenv.AddToCXXFLAGSIfSupported('-stdlib=libc++'): @@ -3859,37 +3859,18 @@ def doConfigure(myenv): conf.Finish() if myenv.ToolchainIs('msvc'): - if get_option('cxx-std') == "17": - myenv.AppendUnique(CCFLAGS=['/std:c++17', - '/Zc:lambda']) # /Zc:lambda is implied by /std:c++20 - elif get_option('cxx-std') == "20": + if get_option('cxx-std') == "20": myenv.AppendUnique(CCFLAGS=['/std:c++20']) else: - if get_option('cxx-std') == "17": - if not myenv.AddToCXXFLAGSIfSupported('-std=c++17'): - myenv.ConfError('Compiler does not honor -std=c++17') - elif get_option('cxx-std') == "20": + if get_option('cxx-std') == "20": if not myenv.AddToCXXFLAGSIfSupported('-std=c++20'): myenv.ConfError('Compiler does not honor -std=c++20') if not myenv.AddToCFLAGSIfSupported('-std=c11'): - myenv.ConfError("C++17 mode selected for C++ files, but can't enable C11 for C files") + myenv.ConfError("C++20 mode selected for C++ files, but can't enable C11 for C files") if using_system_version_of_cxx_libraries(): - print('WARNING: System versions of C++ libraries must be compiled with C++17 support') - - def CheckCxx17(context): - test_body = """ - #if __cplusplus < 201703L - #error - #endif - namespace NestedNamespaceDecls::AreACXX17Feature {}; - """ - - context.Message('Checking for C++17... ') - ret = context.TryCompile(textwrap.dedent(test_body), ".cpp") - context.Result(ret) - return ret + print('WARNING: System versions of C++ libraries must be compiled with C++20 support') def CheckCxx20(context): test_body = """ @@ -3909,15 +3890,12 @@ def doConfigure(myenv): myenv, help=False, custom_tests={ - 'CheckCxx17': CheckCxx17, 'CheckCxx20': CheckCxx20, }, ) - if get_option('cxx-std') == "17" and not conf.CheckCxx17(): - myenv.ConfError('C++17 support is required to build MongoDB') - elif get_option('cxx-std') == "20" and not conf.CheckCxx20(): - myenv.ConfError('C++20 support was not detected') + if get_option('cxx-std') == "20" and not conf.CheckCxx20(): + myenv.ConfError('C++20 support is required to build MongoDB') conf.Finish() @@ -4390,6 +4368,11 @@ def doConfigure(myenv): "Cannot use libunwind with TSAN, please add --use-libunwind=off to your compile flags" ) + # We add supressions based on the library file in etc/tsan.suppressions + # so the link-model needs to be dynamic. + if not link_model.startswith('dynamic'): + env.FatalError("TSAN is only supported with dynamic link models") + # If anything is changed, added, or removed in # tsan_options, be sure to make the corresponding changes # to the appropriate build variants in etc/evergreen.yml @@ -6038,9 +6021,37 @@ env.AddPackageNameAlias( name="mh-debugsymbols", ) +env.AutoInstall( + target='$PREFIX', + source='$PRETTY_PRINTER_TEST_LIST', + AIB_ROLE='runtime', + AIB_COMPONENT='pretty-printer-tests', + AIB_COMPONENTS_EXTRA=['dist-test'], +) + env['RPATH_ESCAPED_DOLLAR_ORIGIN'] = '\\$$$$ORIGIN' +def isSupportedStreamsPlatform(thisEnv): + # TODO https://jira.mongodb.org/browse/SERVER-74961: Support other platforms. + # linux x86 and ARM64 are supported. + return thisEnv.TargetOSIs('linux') and \ + thisEnv['TARGET_ARCH'] in ('x86_64', 'aarch64') \ + and ssl_provider == 'openssl' + + +def shouldBuildStreams(thisEnv): + if releaseBuild: + # The streaming enterprise module and dependencies are only included in release builds. + # when streams-release-build is set. + return get_option('streams-release-build') and isSupportedStreamsPlatform(thisEnv) + else: + return isSupportedStreamsPlatform(thisEnv) + + +env.AddMethod(shouldBuildStreams, 'ShouldBuildStreams') + + def prefix_libdir_rpath_generator(env, source, target, for_signature): # If the PREFIX_LIBDIR has an absolute path, we will use that directly as # RPATH because that indicates the final install destination of the libraries. @@ -6194,7 +6205,7 @@ sconslinters = env.Command( lint_py = env.Command( target="#lint-lint.py", - source=["buildscripts/quickcpplint.py"], + source=["buildscripts/quickmongolint.py"], action="$PYTHON ${SOURCES[0]} lint", ) @@ -6364,7 +6375,7 @@ if get_option('ninja') == 'disabled': compileCommands = env.CompilationDatabase('compile_commands.json') # Initialize generated-sources Alias as a placeholder so that it can be used as a # dependency for compileCommands. This Alias will be properly updated in other SConscripts. - env.Requires(compileCommands, env.Alias("generated-sources")) + env.Depends(compileCommands, env.Alias("generated-sources")) compileDb = env.Alias("compiledb", compileCommands) msvc_version = "" @@ -6426,6 +6437,41 @@ if env.get('UNITTESTS_COMPILE_CONCURRENCY'): source_file_regex=r"^.*_test\.cpp$", ) +first_half_flag = False + + +def half_source_emitter(target, source, env): + global first_half_flag + if first_half_flag: + first_half_flag = False + if not 'conftest' in str(target[0]) and not str(source[0]).endswith('_test.cpp'): + env.Alias('compile_first_half_non_test_source', target) + else: + first_half_flag = True + return target, source + + +# Cribbed from Tool/cc.py and Tool/c++.py. It would be better if +# we could obtain this from SCons. +_CSuffixes = [".c"] +if not SCons.Util.case_sensitive_suffixes(".c", ".C"): + _CSuffixes.append(".C") + +_CXXSuffixes = [".cpp", ".cc", ".cxx", ".c++", ".C++"] +if SCons.Util.case_sensitive_suffixes(".c", ".C"): + _CXXSuffixes.append(".C") + +for object_builder in SCons.Tool.createObjBuilders(env): + emitterdict = object_builder.builder.emitter + for suffix in emitterdict.keys(): + if not suffix in _CSuffixes + _CXXSuffixes: + continue + base = emitterdict[suffix] + emitterdict[suffix] = SCons.Builder.ListEmitter([ + base, + half_source_emitter, + ]) + # Keep this late in the game so that we can investigate attributes set by all the tools that have run. if has_option("cache"): if get_option("cache") == "nolinked": diff --git a/buildscripts/antithesis/base_images/workload/Dockerfile b/buildscripts/antithesis/base_images/workload/Dockerfile index b8d46508252f4..d47e99d07c6ae 100644 --- a/buildscripts/antithesis/base_images/workload/Dockerfile +++ b/buildscripts/antithesis/base_images/workload/Dockerfile @@ -14,12 +14,11 @@ RUN debconf-set-selections /tmp/preseed.txt RUN rm /tmp/preseed.txt RUN apt-get update +RUN DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true apt-get install -qy git-all wget build-essential checkinstall libreadline-gplv2-dev libncursesw5-dev libssl-dev libsqlite3-dev tk-dev libgdbm-dev libc6-dev libbz2-dev libffi-dev zlib1g-dev RUN apt-get install -qy libcurl4 libgssapi-krb5-2 libldap-2.4-2 libwrap0 libsasl2-2 libsasl2-modules libsasl2-modules-gssapi-mit openssl liblzma5 libssl-dev build-essential software-properties-common -RUN add-apt-repository ppa:deadsnakes/ppa -RUN apt-get update # installs that need to be forced to be non-interactive: python 3.9 and git -RUN DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true apt-get install -qy python3.9 python3.9-dev python3.9-venv git-all +RUN bash -c "cd /opt && wget https://www.python.org/ftp/python/3.9.16/Python-3.9.16.tgz && tar xzf Python-3.9.16.tgz && cd Python-3.9.16 && ./configure --enable-optimizations && make altinstall && rm -f /opt/Python-3.9.6.tgz" # ------------------- # Everything above this line should be common image setup diff --git a/buildscripts/antithesis_suite.py b/buildscripts/antithesis_suite.py index bdf3a346070af..9c555cb0d4f76 100755 --- a/buildscripts/antithesis_suite.py +++ b/buildscripts/antithesis_suite.py @@ -15,6 +15,9 @@ "CleanEveryN", "ContinuousStepdown", "CheckOrphansDeleted", + # TODO SERVER-70396 re-enable hook once the checkMetadata feature flag is removed + # To check the feature flag we need to contact directly the config server that is not exposed in the ExternalFixture + "CheckMetadataConsistencyInBackground", ] _SUITES_PATH = os.path.join("buildscripts", "resmokeconfig", "suites") diff --git a/buildscripts/apply_clang_tidy_fixes.py b/buildscripts/apply_clang_tidy_fixes.py index 9735a662097ef..bb0eab5b0ff82 100755 --- a/buildscripts/apply_clang_tidy_fixes.py +++ b/buildscripts/apply_clang_tidy_fixes.py @@ -53,10 +53,15 @@ def main(): # perform the swap replacement of the binary data file_bytes = bytearray(file_bytes) + adjustments = 0 for replacement in fixes[recorded_md5]['replacements']: - file_bytes[replacement['Offset']:replacement['Offset'] + + + file_bytes[replacement['Offset'] + adjustments:replacement['Offset'] + adjustments + replacement['Length']] = replacement['ReplacementText'].encode() + if replacement['Length'] != len(replacement['ReplacementText']): + adjustments += len(replacement['ReplacementText']) - replacement['Length'] + with open(fixes[recorded_md5]['filepath'], 'wb') as fout: fout.write(bytes(file_bytes)) diff --git a/buildscripts/backports_required_for_multiversion_tests_deduplicator.py b/buildscripts/backports_required_for_multiversion_tests_deduplicator.py index ede797d4a5a8f..4ff42e9069d02 100644 --- a/buildscripts/backports_required_for_multiversion_tests_deduplicator.py +++ b/buildscripts/backports_required_for_multiversion_tests_deduplicator.py @@ -12,19 +12,20 @@ # # Usage: # Add the server ticket number and the path to the test file for the test you intend to denylist -# under the appropriate suite. Any test in a (ticket, test_file) pair that appears in this file but +# under the appropriate multiversion branch. Any test in a (ticket, test_file) pair that appears in this file but # not in the last-lts or last-continuous branch version of this file indicates that a commit has # not yet been backported to the last-lts or last-continuous branch and will be excluded from the # multiversion suite corresponding to the root level suite key. # -# Example: To prevent 'my_test_file.js' from running in the 'replica_sets_multiversion' suite with the last-continuous binary -# replica_sets_multiversion: -# - ticket: SERVER-1000 -# test_file: jstests/core/my_test_file.js +# Example: To prevent 'my_test_file.js' from running with the last-continuous binary +# last-continuous: +# all: +# - test_file: jstests/core/my_test_file.js +# ticket: SERVER-1000 # # The above example will denylist jstests/core/my_test_file.js from the -# 'replica_sets_multiversion_gen' task until this file has been updated with the same -# (ticket, test_file) pair on the last-lts branch. +# last-continuous branch until this file has been updated with the same +# (ticket, test_file) pair on the last-continuous branch. # """ diff --git a/buildscripts/blackduck_hub.py b/buildscripts/blackduck_hub.py index 025bbb1cbf0bb..16b6458e6fde0 100644 --- a/buildscripts/blackduck_hub.py +++ b/buildscripts/blackduck_hub.py @@ -1133,9 +1133,8 @@ def _verify_components_in_yaml(self): for mcomp in self.third_party_components: # These components are known to be missing from Black Duck # Aladdin MD5 is a pair of C files for MD5 computation - # timelib is simply missing # Unicode is not code - if mcomp.name in ["Aladdin MD5", "timelib", "unicode"]: + if mcomp.name in ["Aladdin MD5", "unicode"]: continue if mcomp.name not in comp_names: diff --git a/buildscripts/burn_in_tests.py b/buildscripts/burn_in_tests.py index f2ff469137fd7..231747e35a6a8 100755 --- a/buildscripts/burn_in_tests.py +++ b/buildscripts/burn_in_tests.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3 """Command line utility for determining what jstests have been added or modified.""" +import collections import copy +import json import logging import os.path import shlex @@ -48,6 +50,8 @@ SELECTOR_FILE = "etc/burn_in_tests.yml" SUITE_FILES = ["with_server"] +BURN_IN_TEST_MEMBERSHIP_FILE = "burn_in_test_membership_map_file_for_ci.json" + SUPPORTED_TEST_KINDS = ("fsm_workload_test", "js_test", "json_schema_test", "multi_stmt_txn_passthrough", "parallel_fsm_workload_test", "all_versions_js_test") @@ -182,7 +186,13 @@ def create_executor_list(suites, exclude_suites): parameter. Returns a dict keyed by suite name / executor, value is tests to run under that executor. """ - test_membership = create_test_membership_map(test_kind=SUPPORTED_TEST_KINDS) + try: + with open(BURN_IN_TEST_MEMBERSHIP_FILE) as file: + test_membership = collections.defaultdict(list, json.load(file)) + LOGGER.info(f"Using cached test membership file {BURN_IN_TEST_MEMBERSHIP_FILE}.") + except FileNotFoundError: + LOGGER.info("Getting test membership data.") + test_membership = create_test_membership_map(test_kind=SUPPORTED_TEST_KINDS) memberships = defaultdict(list) for suite in suites: @@ -611,7 +621,12 @@ def burn_in(self, repos: List[Repo], build_variant: str) -> None: self.burn_in_executor.execute(tests_by_task) -@click.command(context_settings=dict(ignore_unknown_options=True)) +@click.group() +def cli(): + pass + + +@cli.command(context_settings=dict(ignore_unknown_options=True)) @click.option("--no-exec", "no_exec", default=False, is_flag=True, help="Do not execute the found tests.") @click.option("--build-variant", "build_variant", default=DEFAULT_VARIANT, metavar='BUILD_VARIANT', @@ -635,11 +650,11 @@ def burn_in(self, repos: List[Repo], build_variant: str) -> None: @click.option("--evg-project-file", "evg_project_file", default=DEFAULT_EVG_PROJECT_FILE, help="Evergreen project config file") @click.argument("resmoke_args", nargs=-1, type=click.UNPROCESSED) -def main(build_variant: str, no_exec: bool, repeat_tests_num: Optional[int], - repeat_tests_min: Optional[int], repeat_tests_max: Optional[int], - repeat_tests_secs: Optional[int], resmoke_args: str, verbose: bool, - origin_rev: Optional[str], install_dir: Optional[str], use_yaml: bool, - evg_project_file: Optional[str]) -> None: +def run(build_variant: str, no_exec: bool, repeat_tests_num: Optional[int], + repeat_tests_min: Optional[int], repeat_tests_max: Optional[int], + repeat_tests_secs: Optional[int], resmoke_args: str, verbose: bool, + origin_rev: Optional[str], install_dir: Optional[str], use_yaml: bool, + evg_project_file: Optional[str]) -> None: """ Run new or changed tests in repeated mode to validate their stability. @@ -695,5 +710,27 @@ def main(build_variant: str, no_exec: bool, repeat_tests_num: Optional[int], burn_in_orchestrator.burn_in(repos, build_variant) +@cli.command() +def generate_test_membership_map_file_for_ci(): + """ + Generate a file to cache test membership data for CI. + + This command should only be used in CI. The task generator runs many iterations of this script + for many build variants. The bottleneck is that creating the test membership file takes a long time. + Instead, we can cache this data & reuse it in CI for a significant speedup. + + Run this command in CI before running the burn in task generator. + """ + _configure_logging(False) + buildscripts.resmokelib.parser.set_run_options() + + LOGGER.info("Generating burn_in test membership mapping file.") + test_membership = create_test_membership_map(test_kind=SUPPORTED_TEST_KINDS) + with open(BURN_IN_TEST_MEMBERSHIP_FILE, "w") as file: + json.dump(test_membership, file) + LOGGER.info( + f"Finished writing burn_in test membership mapping to {BURN_IN_TEST_MEMBERSHIP_FILE}") + + if __name__ == "__main__": - main() # pylint: disable=no-value-for-parameter + cli() diff --git a/buildscripts/ciconfig/evergreen.py b/buildscripts/ciconfig/evergreen.py index 6c52a5f55d4a6..a3a028caabf18 100644 --- a/buildscripts/ciconfig/evergreen.py +++ b/buildscripts/ciconfig/evergreen.py @@ -7,6 +7,10 @@ import datetime import distutils.spawn +import os +import subprocess +import sys +import time from typing import Set, List, Optional import yaml @@ -21,17 +25,22 @@ def parse_evergreen_file(path, evergreen_binary="evergreen"): """Read an Evergreen file and return EvergreenProjectConfig instance.""" if evergreen_binary: if not distutils.spawn.find_executable(evergreen_binary): - raise EnvironmentError( - "Executable '{}' does not exist or is not in the PATH.".format(evergreen_binary)) + default_evergreen_location = os.path.expanduser(os.path.join("~", "evergreen")) + if os.path.exists(default_evergreen_location): + evergreen_binary = default_evergreen_location + elif os.path.exists(f"{default_evergreen_location}.exe"): + evergreen_binary = f"{default_evergreen_location}.exe" + else: + raise EnvironmentError( + "Executable '{}' does not exist or is not in the PATH.".format( + evergreen_binary)) # Call 'evergreen evaluate path' to pre-process the project configuration file. - cmd = runcommand.RunCommand(evergreen_binary) - cmd.add("evaluate") - cmd.add_file(path) - error_code, output = cmd.execute() - if error_code: - raise RuntimeError("Unable to evaluate {}: {}".format(path, output)) - config = yaml.safe_load(output) + cmd = [evergreen_binary, "evaluate", path] + result = subprocess.run(cmd, capture_output=True, text=True) + if result.returncode: + raise RuntimeError("Unable to evaluate {}: {}".format(path, result.stdout)) + config = yaml.safe_load(result.stdout) else: with open(path, "r") as fstream: config = yaml.safe_load(fstream) @@ -59,6 +68,7 @@ def __init__(self, conf): self.distro_names = set() for variant in self.variants: self.distro_names.update(variant.distro_names) + self.functions = self._conf["functions"] @property def task_names(self) -> List[str]: @@ -320,7 +330,7 @@ def task_names(self): def is_required_variant(self) -> bool: """Return True if the variant is a required variant.""" - return self.display_name.startswith("! ") + return self.display_name.startswith("!") def get_task(self, task_name): """Return the task with the given name as an instance of VariantTask. diff --git a/buildscripts/clang_format.py b/buildscripts/clang_format.py index ac768880b9f62..974bb3fc4e22d 100755 --- a/buildscripts/clang_format.py +++ b/buildscripts/clang_format.py @@ -264,7 +264,8 @@ def is_interesting_file(file_name): """Return true if this file should be checked.""" return (file_name.startswith("jstests") or file_name.startswith("src") and not file_name.startswith("src/third_party/") - and not file_name.startswith("src/mongo/gotools/")) and FILES_RE.search(file_name) + and not file_name.startswith("src/mongo/gotools/") + and not file_name.startswith("src/streams/third_party")) and FILES_RE.search(file_name) def get_list_from_lines(lines): diff --git a/buildscripts/clang_tidy.py b/buildscripts/clang_tidy.py index 4b7c021f8b237..417885785ddfd 100755 --- a/buildscripts/clang_tidy.py +++ b/buildscripts/clang_tidy.py @@ -186,6 +186,11 @@ def main(): # A few special cases of files to ignore if not file_doc["file"].startswith("src/mongo/"): continue + + # Don't run clang_tidy on the streams/third_party code. + if file_doc["file"].startswith("src/mongo/db/modules/enterprise/src/streams/third_party"): + continue + # TODO SERVER-49884 Remove this when we no longer check in generated Bison. if file_doc["file"].endswith("/parser_gen.cpp"): continue diff --git a/buildscripts/config_diff.py b/buildscripts/config_diff.py index f82522cd21a2b..2f4ad8eef0498 100755 --- a/buildscripts/config_diff.py +++ b/buildscripts/config_diff.py @@ -3,6 +3,8 @@ The comparison is computed by scanning though `base_version_dirs` and `incremented_version_dirs` looking for all configs and setParameters in each tree. It then compares these looking for additions, removals, and deltas. Finally it outputs a summary to the console. + +This comparison does not currently support nested properties is as it does only simple string comparison on key:property pairs - see build_diff_fn as a means of extending the comparison capability in the future. """ import argparse @@ -95,7 +97,7 @@ def _compare_and_partition(self, yaml_props: dict, yaml_file_name: str) -> None: # present in the base version properties, but not in the incremented version properties, # which means they were removed in the incremented version in_both_prop = self.properties_diff.removed.pop(compare_key) - changed_properties = self.calc_diff(yaml_val, in_both_prop) + changed_properties = self.calc_diff(in_both_prop, yaml_val) if len(changed_properties) > 0: self.properties_diff.modified[compare_key] = changed_properties @@ -248,7 +250,7 @@ def test_yaml_obj_filters_comparison_types_correctly(self): short_name: networkMessageCompressors default: 'snappy,zstd,zlib' """ - yaml_obj = yaml.load(document) + yaml_obj = yaml.load(document, Loader=yaml.FullLoader) fixture = BuildBasePropertiesForComparisonHandler(ComparisonType.SERVER_PARAMETERS) fixture.handle(yaml_obj, filename) @@ -271,7 +273,7 @@ def test_empty_yaml_obj_does_nothing(self): cpp_namespace: "mongo" """ - yaml_obj = yaml.load(document) + yaml_obj = yaml.load(document, Loader=yaml.FullLoader) fixture = BuildBasePropertiesForComparisonHandler(ComparisonType.SERVER_PARAMETERS) fixture.handle(yaml_obj, filename) @@ -321,7 +323,7 @@ def test_yaml_obj_filtered_correctly(self): default: 'zlib' """ - inc_yaml_obj = yaml.load(document) + inc_yaml_obj = yaml.load(document, Loader=yaml.FullLoader) inc_fixture = ComputeDiffsFromIncrementedVersionHandler(ComparisonType.CONFIGS, {}, self.config_diff_function) @@ -367,7 +369,7 @@ def test_added_works_correctly(self): default: 'snappy,zstd,zlib' """ - inc_yaml_obj = yaml.load(document) + inc_yaml_obj = yaml.load(document, Loader=yaml.FullLoader) inc_fixture = ComputeDiffsFromIncrementedVersionHandler(ComparisonType.CONFIGS, {}, self.config_diff_function) @@ -400,7 +402,7 @@ def test_removed_works_correctly(self): def get_base_data(): return {("ok", "test.yaml"): {"yes": "no"}, ("also_ok", "blah.yaml"): {"no": "yes"}} - inc_yaml_obj = yaml.load(document) + inc_yaml_obj = yaml.load(document, Loader=yaml.FullLoader) inc_fixture = ComputeDiffsFromIncrementedVersionHandler(ComparisonType.CONFIGS, get_base_data(), @@ -430,7 +432,7 @@ def get_base_data(): self.assertTrue(len(properties_diffs.added) == 0) self.assertTrue(len(properties_diffs.modified) == 0) - def test_modified_works_correctly(self): + def test_empty_modified_works_correctly(self): filename = "test.yaml" document = """ server_parameters: @@ -465,8 +467,7 @@ def test_modified_works_correctly(self): short_name: networkMessageCompressors default: 'snappy,zstd,zlib' """ - - inc_yaml_obj = yaml.load(document) + inc_yaml_obj = yaml.load(document, Loader=yaml.FullLoader) inc_fixture = ComputeDiffsFromIncrementedVersionHandler(ComparisonType.CONFIGS, {}, build_diff_fn(['default'])) @@ -488,6 +489,184 @@ def test_modified_works_correctly(self): self.assertTrue(len(properties_diffs.removed) == 0) self.assertTrue(len(properties_diffs.modified) == 0) + def test_not_modified_between_yamls_reports_correctly(self): + filename = "test.yaml" + document = """ + server_parameters: + testOptions: + description: "Cluster server parameter for change stream options" + set_at: cluster + cpp_class: + name: ChangeStreamOptionsParameter + override_set: true + override_validate: true + + testParameter: + description: "Some parameter" + set_at: cluster + cpp_class: + name: ChangeStreamOptionsParameter + override_set: true + override_validate: true + + configs: + "asdf": + description: 'Comma-separated list of compressors to use for network messages' + source: [ cli, ini, yaml ] + arg_vartype: String + short_name: networkMessageCompressors + default: 'snappy,zstd,zlib' + + "zxcv": + description: 'Comma-separated list of compressors to use for network messages' + source: [ cli, ini, yaml ] + arg_vartype: String + short_name: networkMessageCompressors + default: 'snappy,zstd,zlib' + """ + + document_inc = document + + document_yaml = yaml.load(document, Loader=yaml.FullLoader) + document_inc_yaml = yaml.load(document_inc, Loader=yaml.FullLoader) + + diff_fn = build_diff_fn(_COMPARE_FIELDS_CONFIGS) + + config_base_properties_handler = BuildBasePropertiesForComparisonHandler( + ComparisonType.CONFIGS) + config_base_properties_handler.handle(document_yaml, filename) + + config_inc_properties_handler = ComputeDiffsFromIncrementedVersionHandler( + ComparisonType.CONFIGS, config_base_properties_handler.properties, diff_fn) + config_inc_properties_handler.handle(document_inc_yaml, filename) + + property_diff = config_inc_properties_handler.properties_diff + + self.assertEqual(0, len(property_diff.modified)) + + diff_fn = build_diff_fn(_COMPARE_FIELDS_SERVER_PARAMETERS) + + sp_base_properties_handler = BuildBasePropertiesForComparisonHandler( + ComparisonType.SERVER_PARAMETERS) + sp_base_properties_handler.handle(document_yaml, filename) + + sp_inc_properties_handler = ComputeDiffsFromIncrementedVersionHandler( + ComparisonType.SERVER_PARAMETERS, sp_base_properties_handler.properties, diff_fn) + sp_inc_properties_handler.handle(document_inc_yaml, filename) + + property_diff = sp_inc_properties_handler.properties_diff + + self.assertEqual(0, len(property_diff.modified)) + + def test_modified_between_yamls_reports_correctly(self): + filename = "test.yaml" + document = """ + server_parameters: + testOptions: + description: "Cluster server parameter for change stream options" + set_at: cluster + cpp_class: + name: ChangeStreamOptionsParameter + override_set: true + override_validate: true + + testParameter: + description: "Some parameter" + set_at: cluster + cpp_class: + name: ChangeStreamOptionsParameter + override_set: true + override_validate: true + + configs: + "asdf": + description: 'Comma-separated list of compressors to use for network messages' + source: [ cli, ini, yaml ] + arg_vartype: String + short_name: networkMessageCompressors + default: 'snappy,zstd,zlib' + + "zxcv": + description: 'Comma-separated list of compressors to use for network messages' + source: [ cli, ini, yaml ] + arg_vartype: String + short_name: networkMessageCompressors + default: 'snappy,zstd,zlib' + """ + + document_inc = """ + server_parameters: + testOptions: + description: "Cluster server parameter for change stream options" + set_at: runtime + cpp_class: + name: ChangeStreamOptionsParameter + override_set: true + override_validate: true + + testParameter: + description: "Some parameter" + set_at: cluster + cpp_class: + name: ChangeStreamOptionsParameter + override_set: true + override_validate: true + + configs: + "asdf": + description: 'Comma-separated list of compressors to use for network messages' + source: [ cli, ini, yaml ] + arg_vartype: int + short_name: networkMessageCompressors + default: 'snappy,zstd,zlib' + + "zxcv": + description: 'Comma-separated list of compressors to use for network messages' + source: [ cli, ini, yaml ] + arg_vartype: String + short_name: networkMessageCompressors + default: 'snappy,zstd,zlib' + """ + + document_yaml = yaml.load(document, Loader=yaml.FullLoader) + document_inc_yaml = yaml.load(document_inc, Loader=yaml.FullLoader) + + diff_fn = build_diff_fn(_COMPARE_FIELDS_CONFIGS) + + config_base_properties_handler = BuildBasePropertiesForComparisonHandler( + ComparisonType.CONFIGS) + config_base_properties_handler.handle(document_yaml, filename) + + config_inc_properties_handler = ComputeDiffsFromIncrementedVersionHandler( + ComparisonType.CONFIGS, config_base_properties_handler.properties, diff_fn) + config_inc_properties_handler.handle(document_inc_yaml, filename) + + property_diff = config_inc_properties_handler.properties_diff + + self.assertEqual( + property_diff.modified.get(("asdf", filename)).get("arg_vartype").base, "String") + self.assertEqual( + property_diff.modified.get(("asdf", filename)).get("arg_vartype").inc, "int") + self.assertIsNone(property_diff.modified.get(("zxcv", filename))) + + diff_fn = build_diff_fn(_COMPARE_FIELDS_SERVER_PARAMETERS) + + sp_base_properties_handler = BuildBasePropertiesForComparisonHandler( + ComparisonType.SERVER_PARAMETERS) + sp_base_properties_handler.handle(document_yaml, filename) + + sp_inc_properties_handler = ComputeDiffsFromIncrementedVersionHandler( + ComparisonType.SERVER_PARAMETERS, sp_base_properties_handler.properties, diff_fn) + sp_inc_properties_handler.handle(document_inc_yaml, filename) + + property_diff = sp_inc_properties_handler.properties_diff + + self.assertEqual( + property_diff.modified.get(("testOptions", filename)).get("set_at").base, "cluster") + self.assertEqual( + property_diff.modified.get(("testOptions", filename)).get("set_at").inc, "runtime") + self.assertIsNone(property_diff.modified.get(("testParameter", filename))) + class TestPropertiesDiffFunction(unittest.TestCase): def test_empty_returns_empty(self): diff --git a/buildscripts/cost_model/mongod-inmemory.yaml b/buildscripts/cost_model/mongod-inmemory.yaml index cb0789fb7e7cc..14e2dfdd0cbd3 100644 --- a/buildscripts/cost_model/mongod-inmemory.yaml +++ b/buildscripts/cost_model/mongod-inmemory.yaml @@ -7,6 +7,6 @@ systemLog: logAppend: false setParameter: featureFlagCommonQueryFramework: true - internalQueryFrameworkControl: "tryBonsai" + internalQueryFrameworkControl: "tryBonsaiExperimental" internalMeasureQueryExecutionTimeInNanoseconds: true enableTestCommands: 1 diff --git a/buildscripts/cost_model/mongod.yaml b/buildscripts/cost_model/mongod.yaml index c60835883fda8..bc21abb30be8b 100644 --- a/buildscripts/cost_model/mongod.yaml +++ b/buildscripts/cost_model/mongod.yaml @@ -6,6 +6,6 @@ systemLog: logAppend: false setParameter: featureFlagCommonQueryFramework: true - internalQueryFrameworkControl: "tryBonsai" + internalQueryFrameworkControl: "tryBonsaiExperimental" internalMeasureQueryExecutionTimeInNanoseconds: true enableTestCommands: 1 diff --git a/buildscripts/debugsymb_mapper.py b/buildscripts/debugsymb_mapper.py index d85ad64d9c9e0..f09418cac5f16 100644 --- a/buildscripts/debugsymb_mapper.py +++ b/buildscripts/debugsymb_mapper.py @@ -157,19 +157,20 @@ class Mapper: default_web_service_base_url: str = "https://symbolizer-service.server-tig.prod.corp.mongodb.com" default_cache_dir = os.path.join(os.getcwd(), 'build', 'symbols_cache') - selected_binaries = ('mongos.debug', 'mongod.debug', 'mongo.debug') + selected_binaries = ('mongos', 'mongod', 'mongo') default_client_credentials_scope = "servertig-symbolizer-fullaccess" default_client_credentials_user_name = "client-user" default_creds_file_path = os.path.join(os.getcwd(), '.symbolizer_credentials.json') - def __init__(self, evg_version: str, evg_variant: str, client_id: str, client_secret: str, - cache_dir: str = None, web_service_base_url: str = None, + def __init__(self, evg_version: str, evg_variant: str, is_san_variant: bool, client_id: str, + client_secret: str, cache_dir: str = None, web_service_base_url: str = None, logger: logging.Logger = None): """ Initialize instance. :param evg_version: Evergreen version ID. :param evg_variant: Evergreen build variant name. + :param is_san_variant: Whether build variant is sanitizer build. :param client_id: Client id for Okta Oauth. :param client_secret: Secret key for Okta Oauth. :param cache_dir: Full path to cache directory as a string. @@ -178,6 +179,7 @@ def __init__(self, evg_version: str, evg_variant: str, client_id: str, client_se """ self.evg_version = evg_version self.evg_variant = evg_variant + self.is_san_variant = is_san_variant self.cache_dir = cache_dir or self.default_cache_dir self.web_service_base_url = web_service_base_url or self.default_web_service_base_url @@ -263,11 +265,13 @@ def setup_urls(self): urlinfo = self.multiversion_setup.get_urls(self.evg_version, self.evg_variant) - download_symbols_url = urlinfo.urls.get("mongo-debugsymbols.tgz", None) binaries_url = urlinfo.urls.get("Binaries", "") - - if not download_symbols_url: - download_symbols_url = urlinfo.urls.get("mongo-debugsymbols.zip", None) + if self.is_san_variant: + # Sanitizer builds are not stripped and contain debug symbols + download_symbols_url = binaries_url + else: + download_symbols_url = urlinfo.urls.get("mongo-debugsymbols.tgz") or urlinfo.urls.get( + "mongo-debugsymbols.zip") if not download_symbols_url: self.logger.error("Couldn't find URL for debug symbols. Version: %s, URLs dict: %s", @@ -319,23 +323,17 @@ def generate_build_id_mapping(self) -> Generator[Dict[str, str], None, None]: extractor = CmdOutputExtractor() - debug_symbols_path = self.download(self.debug_symbols_url) - debug_symbols_unpacked_path = self.unpack(debug_symbols_path) - binaries_path = self.download(self.url) binaries_unpacked_path = self.unpack(binaries_path) - # we need to analyze two directories: main binary folder inside debug-symbols and + # we need to analyze two directories: main binary folder and # shared libraries folder inside binaries. # main binary folder holds main binaries, like mongos, mongod, mongo ... # shared libraries folder holds shared libraries, tons of them. # some build variants do not contain shared libraries. - debug_symbols_unpacked_path = os.path.join(debug_symbols_unpacked_path, 'dist-test') binaries_unpacked_path = os.path.join(binaries_unpacked_path, 'dist-test') - self.logger.info("INSIDE unpacked debug-symbols/dist-test: %s", - os.listdir(debug_symbols_unpacked_path)) self.logger.info("INSIDE unpacked binaries/dist-test: %s", os.listdir(binaries_unpacked_path)) @@ -352,19 +350,19 @@ def generate_build_id_mapping(self) -> Generator[Dict[str, str], None, None]: # start with main binary folder for binary in self.selected_binaries: - full_bin_path = os.path.join(debug_symbols_unpacked_path, + full_bin_path = os.path.join(binaries_unpacked_path, self.path_options.main_binary_folder_name, binary) if not os.path.exists(full_bin_path): self.logger.error("Could not find binary at %s", full_bin_path) - return + continue build_id_output = extractor.get_build_id(full_bin_path) if not build_id_output.build_id: self.logger.error("Build ID couldn't be extracted. \nReadELF output %s", build_id_output.cmd_output) - return + continue else: self.logger.info("Extracted build ID: %s", build_id_output.build_id) @@ -397,14 +395,14 @@ def generate_build_id_mapping(self) -> Generator[Dict[str, str], None, None]: if not os.path.exists(sofile_path): self.logger.error("Could not find binary at %s", sofile_path) - return + continue build_id_output = extractor.get_build_id(sofile_path) if not build_id_output.build_id: self.logger.error("Build ID couldn't be extracted. \nReadELF out %s", build_id_output.cmd_output) - return + continue else: self.logger.info("Extracted build ID: %s", build_id_output.build_id) @@ -426,6 +424,7 @@ def run(self): # mappings is a generator, we iterate over to generate mappings on the go for mapping in mappings: + self.logger.info("Creating mapping %s", mapping) response = self.http_client.post('/'.join((self.web_service_base_url, 'add')), json=mapping) if response.status_code != 200: @@ -440,11 +439,12 @@ def make_argument_parser(parser=None, **kwargs): if parser is None: parser = argparse.ArgumentParser(**kwargs) - parser.add_argument('--version') - parser.add_argument('--client-id') - parser.add_argument('--client-secret') - parser.add_argument('--variant') - parser.add_argument('--web-service-base-url', default="") + parser.add_argument("--version") + parser.add_argument("--client-id") + parser.add_argument("--client-secret") + parser.add_argument("--variant") + parser.add_argument("--is-san-variant", action="store_true") + parser.add_argument("--web-service-base-url", default="") return parser @@ -452,7 +452,8 @@ def main(options): """Execute mapper here. Main entry point.""" mapper = Mapper(evg_version=options.version, evg_variant=options.variant, - client_id=options.client_id, client_secret=options.client_secret, + is_san_variant=options.is_san_variant, client_id=options.client_id, + client_secret=options.client_secret, web_service_base_url=options.web_service_base_url) # when used as a context manager, mapper instance automatically cleans files/folders after finishing its job. diff --git a/buildscripts/eslint.py b/buildscripts/eslint.py index 58019284d9c80..6e74eac8e1b63 100755 --- a/buildscripts/eslint.py +++ b/buildscripts/eslint.py @@ -19,6 +19,7 @@ import tarfile import tempfile import threading +import platform from typing import Optional import urllib.error import urllib.parse @@ -50,14 +51,17 @@ # Name of ESLint as a binary. ESLINT_PROGNAME = "eslint" +# Arch of running system +ARCH = platform.machine() if platform.machine() != "aarch64" else "arm64" + # URL location of our provided ESLint binaries. ESLINT_HTTP_LINUX_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/eslint-" + \ - ESLINT_VERSION + "-linux.tar.gz" + ESLINT_VERSION + "-linux-" + ARCH + ".tar.gz" ESLINT_HTTP_DARWIN_CACHE = "https://s3.amazonaws.com/boxes.10gen.com/build/eslint-" + \ ESLINT_VERSION + "-darwin.tar.gz" # Path in the tarball to the ESLint binary. -ESLINT_SOURCE_TAR_BASE = string.Template(ESLINT_PROGNAME + "-$platform-$arch") +ESLINT_SOURCE_TAR_BASE = string.Template(ESLINT_PROGNAME + "-$operating_system-$arch") LOGGER = structlog.get_logger(__name__) @@ -76,15 +80,15 @@ def extract_eslint(tar_path, target_file): tarfp.close() -def get_eslint_from_cache(dest_file, platform, arch): +def get_eslint_from_cache(dest_file, operating_system, arch): """Get ESLint binary from mongodb's cache.""" # Get URL - if platform == "Linux": + if operating_system == "Linux": url = ESLINT_HTTP_LINUX_CACHE - elif platform == "Darwin": + elif operating_system == "Darwin": url = ESLINT_HTTP_DARWIN_CACHE else: - raise ValueError('ESLint is not available as a binary for ' + platform) + raise ValueError('ESLint is not available as a binary for ' + operating_system) dest_dir = tempfile.gettempdir() temp_tar_file = os.path.join(dest_dir, "temp.tar.gz") @@ -94,7 +98,8 @@ def get_eslint_from_cache(dest_file, platform, arch): urllib.request.urlretrieve(url, temp_tar_file) print("Extracting ESLint %s to %s" % (ESLINT_VERSION, dest_file)) - eslint_distfile = ESLINT_SOURCE_TAR_BASE.substitute(platform=platform, arch=arch) + eslint_distfile = ESLINT_SOURCE_TAR_BASE.substitute(operating_system=operating_system, + arch=arch) extract_eslint(temp_tar_file, eslint_distfile) shutil.move(eslint_distfile, dest_file) @@ -108,7 +113,7 @@ def __init__(self, path, cache_dir): # Initialize ESLint configuration information if sys.platform.startswith("linux"): - self.arch = "x86_64" + self.arch = ARCH self.tar_path = None elif sys.platform == "darwin": self.arch = "x86_64" diff --git a/buildscripts/eslint/README.md b/buildscripts/eslint/README.md index ea0d7fc3ae7f3..bba2f46862d9b 100644 --- a/buildscripts/eslint/README.md +++ b/buildscripts/eslint/README.md @@ -21,10 +21,12 @@ "pkg": { "scripts": [ "conf/**/*", "lib/**/*", "messages/**/*" ], "targets": [ "linux-x64", "macos-x64" ] + # "targets": [ "linux-arm" ] }, ``` 6. Run pkg command to make ESLint executables. ``` + npm install pkg . ``` 7. Check that executables are working. @@ -38,6 +40,10 @@ ``` eslint-macos --help ``` + or (if you are on arm) + ``` + eslint --help + ``` (*) If executable fails to find some .js files there are [extra steps](#extra-steps) required to be done before step 6. @@ -48,19 +54,25 @@ Rename produced files. ``` mv eslint-linux eslint-Linux-x86_64 mv eslint-macos eslint-Darwin-x86_64 +# arm +# mv eslint eslint-Linux-arm64 ``` -Archive files. +Archive files. (No leading v in version e.g. 8.28.0 NOT v8.28.0) ``` -tar -czvf eslint-${version}-linux.tar.gz eslint-Linux-x86_64 +tar -czvf eslint-${version}-linux-x86_64.tar.gz eslint-Linux-x86_64 tar -czvf eslint-${version}-darwin.tar.gz eslint-Darwin-x86_64 +# arm +# tar -czvf eslint-${version}-linux-arm64.tar.gz eslint-Linux-arm64 ``` ### Upload archives to `boxes.10gen.com` Archives should be available by the following links: ``` -https://s3.amazonaws.com/boxes.10gen.com/build/eslint-${version}-linux.tar.gz +https://s3.amazonaws.com/boxes.10gen.com/build/eslint-${version}-linux-x86_64.tar.gz https://s3.amazonaws.com/boxes.10gen.com/build/eslint-${version}-darwin.tar.gz +# arm +# https://s3.amazonaws.com/boxes.10gen.com/build/eslint-${version}-linux-arm64.tar.gz ``` Build team has an access to do that. You can create a build ticket in Jira for them to do it diff --git a/buildscripts/evergreen_activate_gen_tasks.py b/buildscripts/evergreen_activate_gen_tasks.py index 5c20b285e6b1d..b757a146d8571 100755 --- a/buildscripts/evergreen_activate_gen_tasks.py +++ b/buildscripts/evergreen_activate_gen_tasks.py @@ -26,6 +26,7 @@ EVG_CONFIG_FILE = "./.evergreen.yml" BURN_IN_TAGS = "burn_in_tags" BURN_IN_TESTS = "burn_in_tests" +BURN_IN_VARIANT_SUFFIX = "generated-by-burn-in-tags" class EvgExpansions(BaseModel): @@ -35,13 +36,11 @@ class EvgExpansions(BaseModel): build_id: ID of build being run. version_id: ID of version being run. task_name: Name of task creating the generated configuration. - burn_in_tag_buildvariants: Buildvariants to run burn_in_tags on. """ build_id: str version_id: str task_name: str - burn_in_tag_buildvariants: Optional[str] = None @classmethod def from_yaml_file(cls, path: str) -> "EvgExpansions": @@ -53,13 +52,6 @@ def task(self) -> str: """Get the task being generated.""" return remove_gen_suffix(self.task_name) - @property - def burn_in_tag_buildvariants_list(self) -> List[str]: - """Get the list of burn_in_tags buildvariants.""" - if self.burn_in_tag_buildvariants is None: - return [] - return self.burn_in_tag_buildvariants.split() - def activate_task(expansions: EvgExpansions, evg_api: EvergreenApi) -> None: """ @@ -70,16 +62,12 @@ def activate_task(expansions: EvgExpansions, evg_api: EvergreenApi) -> None: """ if expansions.task == BURN_IN_TAGS: version = evg_api.version_by_id(expansions.version_id) - for base_build_variant in expansions.burn_in_tag_buildvariants_list: - build_variant = f"{base_build_variant}-required" - try: - build_id = version.build_variants_map[build_variant] - except KeyError: - LOGGER.warning( - "It is likely nothing to burn_in, so burn_in_tags build variant" - " was not generated. Skipping...", build_variant=build_variant) - continue - + burn_in_build_variants = [ + variant for variant in version.build_variants_map.keys() + if variant.endswith(BURN_IN_VARIANT_SUFFIX) + ] + for build_variant in burn_in_build_variants: + build_id = version.build_variants_map[build_variant] task_list = evg_api.tasks_by_build(build_id) for task in task_list: diff --git a/buildscripts/evergreen_gen_build_metrics_tasks.py b/buildscripts/evergreen_gen_build_metrics_tasks.py index 5cba9c832ac1c..0358b64703046 100755 --- a/buildscripts/evergreen_gen_build_metrics_tasks.py +++ b/buildscripts/evergreen_gen_build_metrics_tasks.py @@ -134,7 +134,6 @@ def create_task_group(target_platform, tasks): FunctionCall("cleanup environment"), FunctionCall("set up venv"), FunctionCall("upload pip requirements"), - FunctionCall("get all modified patch files"), FunctionCall("f_expansions_write"), FunctionCall("configure evergreen api credentials"), FunctionCall("get buildnumber"), @@ -197,7 +196,7 @@ def create_task_group(target_platform, tasks): for link_model, tasks in tasks['linux_arm64_tasks'].items(): variant.add_task_group( create_task_group(f'linux_arm64_{link_model}', tasks), - ['amazon2022-arm64-large']) + ['amazon2023-arm64-large']) project = ShrubProject({variant}) with open('build_metrics_task_gen.json', 'w') as fout: diff --git a/buildscripts/evergreen_task_timeout.py b/buildscripts/evergreen_task_timeout.py index 6c51cf79b7b1b..60ce3f8f7fc05 100755 --- a/buildscripts/evergreen_task_timeout.py +++ b/buildscripts/evergreen_task_timeout.py @@ -144,16 +144,6 @@ def lookup_idle_override(self, build_variant: str, task_name: str) -> Optional[t return None -def _is_required_build_variant(build_variant: str) -> bool: - """ - Determine if the given build variants is a required build variant. - - :param build_variant: Name of build variant to check. - :return: True if the given build variant is required. - """ - return build_variant.endswith("-required") - - def output_timeout(exec_timeout: timedelta, idle_timeout: Optional[timedelta], output_file: Optional[str]) -> None: """ @@ -163,9 +153,6 @@ def output_timeout(exec_timeout: timedelta, idle_timeout: Optional[timedelta], :param idle_timeout: Idle timeout to output. :param output_file: Location of output file to write. """ - # the math library is triggering this error in this function for some - # reason - # pylint: disable=c-extension-no-member output = { "exec_timeout_secs": math.ceil(exec_timeout.total_seconds()), } @@ -226,7 +213,7 @@ def determine_exec_timeout(self, task_name: str, variant: str, LOGGER.info("Overriding configured timeout", exec_timeout_secs=override.total_seconds()) determined_timeout = override - elif _is_required_build_variant( + elif self._is_required_build_variant( variant) and determined_timeout > DEFAULT_REQUIRED_BUILD_TIMEOUT: LOGGER.info("Overriding required-builder timeout", exec_timeout_secs=DEFAULT_REQUIRED_BUILD_TIMEOUT.total_seconds()) @@ -314,6 +301,17 @@ def is_build_variant_asan(self, build_variant: str) -> bool: bv = self.evg_project_config.get_variant(build_variant) return bv.is_asan_build() + def _is_required_build_variant(self, build_variant: str) -> bool: + """ + Determine if the given build variants is a required build variant. + + :param build_variant: Name of build variant to check. + :param evergreen_project_config: Evergreen config to query the variant name. + :return: True if the given build variant is required. + """ + bv = self.evg_project_config.get_variant(build_variant) + return "!" in bv.display_name + def determine_timeouts(self, cli_idle_timeout: Optional[timedelta], cli_exec_timeout: Optional[timedelta], outfile: Optional[str], project: str, task: str, variant: str, evg_alias: str, suite_name: str, @@ -357,6 +355,8 @@ def main(): help="Evergreen project task is being executed on.") parser.add_argument("--evg-alias", dest="evg_alias", required=True, help="Evergreen alias used to trigger build.") + parser.add_argument("--test-flags", dest="test_flags", + help="Test flags that are used for `resmoke.py run` command call.") parser.add_argument("--timeout", dest="timeout", type=int, help="Timeout to use (in sec).") parser.add_argument("--exec-timeout", dest="exec_timeout", type=int, help="Exec timeout to use (in sec).") @@ -393,7 +393,9 @@ def dependencies(binder: inject.Binder) -> None: parse_evergreen_file(os.path.expanduser(options.evg_project_config))) binder.bind( ResmokeProxyService, - ResmokeProxyService(run_options=f"--installDir={shlex.quote(options.install_dir)}")) + ResmokeProxyService( + run_options=f"--installDir={shlex.quote(options.install_dir)} {options.test_flags}") + ) inject.configure(dependencies) diff --git a/buildscripts/gdb/mongo.py b/buildscripts/gdb/mongo.py index 317c52acf5cac..fe1a5d443d06a 100644 --- a/buildscripts/gdb/mongo.py +++ b/buildscripts/gdb/mongo.py @@ -14,7 +14,7 @@ if not gdb: sys.path.insert(0, str(Path(os.path.abspath(__file__)).parent.parent.parent)) - from buildscripts.gdb.mongo_printers import absl_get_nodes, get_unique_ptr + from buildscripts.gdb.mongo_printers import absl_get_nodes, get_unique_ptr, get_unique_ptr_bytes def detect_toolchain(progspace): @@ -159,6 +159,36 @@ def get_thread_id(): raise ValueError("Failed to find thread id in {}".format(thread_info)) +MAIN_GLOBAL_BLOCK = None + + +def lookup_type(gdb_type_str: str) -> gdb.Type: + """ + Try to find the type object from string. + + GDB says it searches the global blocks, however this appear not to be the + case or at least it doesn't search all global blocks, sometimes it required + to get the global block based off the current frame. + """ + global MAIN_GLOBAL_BLOCK # pylint: disable=global-statement + + exceptions = [] + try: + return gdb.lookup_type(gdb_type_str) + except Exception as exc: + exceptions.append(exc) + + if MAIN_GLOBAL_BLOCK is None: + MAIN_GLOBAL_BLOCK = gdb.lookup_symbol("main")[0].symtab.global_block() + + try: + return gdb.lookup_type(gdb_type_str, MAIN_GLOBAL_BLOCK) + except Exception as exc: + exceptions.append(exc) + + raise gdb.error("Failed to get type, tried:\n%s" % '\n'.join([str(exc) for exc in exceptions])) + + def get_current_thread_name(): """Return the name of the current GDB thread.""" fallback_name = '"%s"' % (gdb.selected_thread().name or '') @@ -217,7 +247,7 @@ def get_wt_session(recovery_unit, recovery_unit_impl_type): if not wt_session_handle.dereference().address: return None wt_session = wt_session_handle.dereference().cast( - gdb.lookup_type("mongo::WiredTigerSession"))["_session"] + lookup_type("mongo::WiredTigerSession"))["_session"] return wt_session @@ -230,13 +260,13 @@ def get_decorations(obj): TODO: De-duplicate the logic between here and DecorablePrinter. This code was copied from there. """ type_name = str(obj.type).replace("class", "").replace(" ", "") - decorable = obj.cast(gdb.lookup_type("mongo::Decorable<{}>".format(type_name))) + decorable = obj.cast(lookup_type("mongo::Decorable<{}>".format(type_name))) decl_vector = decorable["_decorations"]["_registry"]["_decorationInfo"] start = decl_vector["_M_impl"]["_M_start"] finish = decl_vector["_M_impl"]["_M_finish"] decorable_t = decorable.type.template_argument(0) - decinfo_t = gdb.lookup_type('mongo::DecorationRegistry<{}>::DecorationInfo'.format( + decinfo_t = lookup_type('mongo::DecorationRegistry<{}>::DecorationInfo'.format( str(decorable_t).replace("class", "").strip())) count = int((int(finish) - int(start)) / decinfo_t.sizeof) @@ -249,13 +279,13 @@ def get_decorations(obj): type_name = type_name[0:type_name.rindex(">")] type_name = type_name[type_name.index("constructAt<"):].replace("constructAt<", "") # get_unique_ptr should be loaded from 'mongo_printers.py'. - decoration_data = get_unique_ptr(decorable["_decorations"]["_decorationData"]) + decoration_data = get_unique_ptr_bytes(decorable["_decorations"]["_decorationData"]) if type_name.endswith('*'): type_name = type_name[0:len(type_name) - 1] type_name = type_name.rstrip() try: - type_t = gdb.lookup_type(type_name) + type_t = lookup_type(type_name) obj = decoration_data[dindex].cast(type_t) yield (type_name, obj) except Exception as err: @@ -341,14 +371,14 @@ class GetMongoDecoration(gdb.Command): """ Search for a decoration on an object by typename and print it e.g. - (gdb) mongo-decoration opCtx ReadConcernArgs + (gdb) mongodb-decoration opCtx ReadConcernArgs would print out a decoration on opCtx whose type name contains the string "ReadConcernArgs". """ def __init__(self): """Initialize GetMongoDecoration.""" - RegisterMongoCommand.register(self, "mongo-decoration", gdb.COMMAND_DATA) + RegisterMongoCommand.register(self, "mongodb-decoration", gdb.COMMAND_DATA) def invoke(self, args, _from_tty): """Invoke GetMongoDecoration.""" @@ -501,7 +531,7 @@ def dump_session(session): val = get_boost_optional(txn_part_observable_state['txnResourceStash']) if val: locker_addr = get_unique_ptr(val["_locker"]) - locker_obj = locker_addr.dereference().cast(gdb.lookup_type("mongo::LockerImpl")) + locker_obj = locker_addr.dereference().cast(lookup_type("mongo::LockerImpl")) print('txnResourceStash._locker', "@", locker_addr) print("txnResourceStash._locker._id", "=", locker_obj["_id"]) else: @@ -584,8 +614,6 @@ def dump_mongod_locks(): try: # Call into mongod, and dump the state of lock manager # Note that output will go to mongod's standard output, not the debugger output window - # Do not call mongo::getGlobalLockManager() due to the compiler optimizing this function in a very weird way - # See SERVER-72816 for more context gdb.execute( "call mongo::LockManager::get((mongo::ServiceContext*) mongo::getGlobalServiceContext())->dump()", from_tty=False, to_string=False) @@ -647,7 +675,7 @@ def dump_recovery_units(recovery_unit_impl_type): recovery_unit_handle = get_unique_ptr(operation_context["_recoveryUnit"]) # By default, cast the recovery unit as "mongo::WiredTigerRecoveryUnit" recovery_unit = recovery_unit_handle.dereference().cast( - gdb.lookup_type(recovery_unit_impl_type)) + lookup_type(recovery_unit_impl_type)) output_doc["recoveryUnit"] = hex(recovery_unit_handle) if recovery_unit else "0x0" wt_session = get_wt_session(recovery_unit, recovery_unit_impl_type) @@ -692,7 +720,7 @@ def dump_session(session, recovery_unit_impl_type): recovery_unit_handle = get_unique_ptr(txn_resource_stash["_recoveryUnit"]) # By default, cast the recovery unit as "mongo::WiredTigerRecoveryUnit" recovery_unit = recovery_unit_handle.dereference().cast( - gdb.lookup_type(recovery_unit_impl_type)) + lookup_type(recovery_unit_impl_type)) output_doc["recoveryUnit"] = hex(recovery_unit_handle) if recovery_unit else "0x0" wt_session = get_wt_session(recovery_unit, recovery_unit_impl_type) diff --git a/buildscripts/gdb/mongo_lock.py b/buildscripts/gdb/mongo_lock.py index d05acaf67796f..38dcaa8b0f936 100644 --- a/buildscripts/gdb/mongo_lock.py +++ b/buildscripts/gdb/mongo_lock.py @@ -9,7 +9,7 @@ if not gdb: sys.path.insert(0, str(Path(os.path.abspath(__file__)).parent.parent.parent)) - from buildscripts.gdb.mongo import get_current_thread_name, get_thread_id, RegisterMongoCommand + from buildscripts.gdb.mongo import get_current_thread_name, get_thread_id, lookup_type, RegisterMongoCommand if sys.version_info[0] < 3: raise gdb.GdbError( @@ -323,10 +323,8 @@ def find_lock_manager_holders(graph, thread_dict, show): (_, lock_waiter_lwpid, _) = gdb.selected_thread().ptid lock_waiter = thread_dict[lock_waiter_lwpid] - locker_ptr_type = gdb.lookup_type("mongo::LockerImpl").pointer() + locker_ptr_type = lookup_type("mongo::LockerImpl").pointer() - # Do not call mongo::getGlobalLockManager() due to the compiler optimizing this function in a very weird way - # See SERVER-72816 for more context lock_head = gdb.parse_and_eval( "mongo::LockManager::get((mongo::ServiceContext*) mongo::getGlobalServiceContext())->_getBucket(resId)->findOrInsert(resId)" ) diff --git a/buildscripts/gdb/mongo_printers.py b/buildscripts/gdb/mongo_printers.py index a38c677d9e0cd..0fdb262b15b3a 100644 --- a/buildscripts/gdb/mongo_printers.py +++ b/buildscripts/gdb/mongo_printers.py @@ -9,9 +9,13 @@ import gdb import gdb.printing +ROOT_PATH = str(Path(os.path.abspath(__file__)).parent.parent.parent) +if ROOT_PATH not in sys.path: + sys.path.insert(0, ROOT_PATH) +from src.third_party.immer.dist.tools.gdb_pretty_printers.printers import ListIter as ImmerListIter # pylint: disable=wrong-import-position + if not gdb: - sys.path.insert(0, str(Path(os.path.abspath(__file__)).parent.parent.parent)) - from buildscripts.gdb.mongo import get_boost_optional + from buildscripts.gdb.mongo import get_boost_optional, lookup_type from buildscripts.gdb.optimizer_printers import register_abt_printers try: @@ -29,9 +33,20 @@ "MongoDB gdb extensions only support Python 3. Your GDB was compiled against Python 2") +def get_unique_ptr_bytes(obj): + """Read the value of a libstdc++ std::unique_ptr. + + Returns a gdb.Value where its type resolves to `unsigned char*`. The caller must take care to + cast the returned value themselves. This function is particularly useful in the context of + mongo::Decorable<> types which store the decorations as a slab of memory with + std::unique_ptr. In all other cases get_unique_ptr() can be preferred. + """ + return obj.cast(gdb.lookup_type('std::_Head_base<0, unsigned char*, false>'))['_M_head_impl'] + + def get_unique_ptr(obj): """Read the value of a libstdc++ std::unique_ptr.""" - return obj.cast(gdb.lookup_type('std::_Head_base<0, unsigned char*, false>'))['_M_head_impl'] + return get_unique_ptr_bytes(obj).cast(obj.type.template_argument(0).pointer()) ################################################################################################### @@ -127,7 +142,7 @@ class BSONObjPrinter(object): def __init__(self, val): """Initialize BSONObjPrinter.""" self.val = val - self.ptr = self.val['_objdata'].cast(gdb.lookup_type('void').pointer()) + self.ptr = self.val['_objdata'].cast(lookup_type('void').pointer()) self.is_valid = False # Handle the endianness of the BSON object size, which is represented as a 32-bit integer @@ -200,11 +215,6 @@ def __init__(self, val): """Initialize OplogEntryPrinter.""" self.val = val - @staticmethod - def display_hint(): - """Display hint.""" - return 'string' - def to_string(self): """Return OplogEntry for printing.""" optime = self.val['_entry']['_opTimeBase'] @@ -212,7 +222,7 @@ def to_string(self): return "OplogEntry(%s, %s, %s, %s)" % ( str(self.val['_entry']['_durableReplOperation']['_opType']).split('::')[-1], str(self.val['_entry']['_commandType']).split('::')[-1], - self.val['_entry']['_durableReplOperation']['_nss']['_ns'], optime_str) + self.val['_entry']['_durableReplOperation']['_nss'], optime_str) class UUIDPrinter(object): @@ -248,7 +258,7 @@ def display_hint(): def to_string(self): """Return OID for printing.""" - raw_bytes = [int(self.val['_data'][i]) for i in range(12)] + raw_bytes = [int(self.val['_data'][i]) for i in range(OBJECT_ID_WIDTH)] oid_hex_bytes = [hex(b & 0xFF)[2:].zfill(2) for b in raw_bytes] return "ObjectID('%s')" % "".join(oid_hex_bytes) @@ -286,7 +296,7 @@ def to_string(self): holder = holder_ptr.dereference() str_len = int(holder["_capacity"]) # Start of data is immediately after pointer for holder - start_ptr = (holder_ptr + 1).dereference().cast(gdb.lookup_type("char")).address + start_ptr = (holder_ptr + 1).dereference().cast(lookup_type("char")).address raw_bytes = [int(start_ptr[i]) for i in range(0, str_len)] hex_bytes = [hex(b & 0xFF)[2:].zfill(2) for b in raw_bytes] return "RecordId big string %d hex bytes @ %s: %s" % (str_len, holder_ptr + 1, @@ -295,6 +305,55 @@ def to_string(self): return "unknown RecordId format: %d" % rid_format +TENANT_ID_MASK = 0x80 +OBJECT_ID_WIDTH = 12 + + +def extract_tenant_id(data): + raw_bytes = [int(data[i]) for i in range(1, OBJECT_ID_WIDTH + 1)] + return "".join([hex(b & 0xFF)[2:].zfill(2) for b in raw_bytes]) + + +class DatabaseNamePrinter(object): + """Pretty-printer for mongo::DatabaseName.""" + + def __init__(self, val): + """Initialize DatabaseNamePrinter.""" + self.val = val + + @staticmethod + def display_hint(): + """Display hint.""" + return 'string' + + def to_string(self): + """Return string representation of DatabaseName.""" + data = self.val['_data']['_M_dataplus']['_M_p'] + if data[0] & TENANT_ID_MASK: + return f"{extract_tenant_id(data)}_{(data + OBJECT_ID_WIDTH + 1).string()}" + return (data + 1).string() + + +class NamespaceStringPrinter(object): + """Pretty-printer for mongo::NamespaceString.""" + + def __init__(self, val): + """Initialize NamespaceStringPrinter.""" + self.val = val + + @staticmethod + def display_hint(): + """Display hint.""" + return 'string' + + def to_string(self): + """Return string representation of NamespaceString.""" + data = self.val['_data']['_M_dataplus']['_M_p'] + if data[0] & TENANT_ID_MASK: + return f"{extract_tenant_id(data)}_{(data + OBJECT_ID_WIDTH + 1).string()}" + return (data + 1).string() + + class DecorablePrinter(object): """Pretty-printer for mongo::Decorable<>.""" @@ -307,7 +366,7 @@ def __init__(self, val): self.start = decl_vector["_M_impl"]["_M_start"] finish = decl_vector["_M_impl"]["_M_finish"] decorable_t = val.type.template_argument(0) - decinfo_t = gdb.lookup_type('mongo::DecorationRegistry<{}>::DecorationInfo'.format( + decinfo_t = lookup_type('mongo::DecorationRegistry<{}>::DecorationInfo'.format( str(decorable_t).replace("class", "").strip())) self.count = int((int(finish) - int(self.start)) / decinfo_t.sizeof) @@ -322,7 +381,7 @@ def to_string(self): def children(self): """Children.""" - decoration_data = get_unique_ptr(self.val["_decorations"]["_decorationData"]) + decoration_data = get_unique_ptr_bytes(self.val["_decorations"]["_decorationData"]) for index in range(self.count): descriptor = self.start[index] @@ -342,7 +401,7 @@ def children(self): type_name = type_name.rstrip() # Cast the raw char[] into the actual object that is stored there. - type_t = gdb.lookup_type(type_name) + type_t = lookup_type(type_name) obj = decoration_data[dindex].cast(type_t) yield ('key', "%d:%s:%s" % (index, obj.address, type_name)) @@ -589,6 +648,61 @@ def children(self): yield ('value', kvp['value']) +class ImmutableMapIter(ImmerListIter): + def __init__(self, val): + super().__init__(val) + self.max = (1 << 64) - 1 + self.pair = None + self.curr = (None, self.max, self.max) + + def __next__(self): + if self.pair: + result = ('value', self.pair['second']) + self.pair = None + self.i += 1 + return result + if self.i == self.size: + raise StopIteration + if self.i < self.curr[1] or self.i >= self.curr[2]: + self.curr = self.region() + self.pair = self.curr[0][self.i - self.curr[1]].cast( + gdb.lookup_type(self.v.type.template_argument(0).name)) + result = ('key', self.pair['first']) + return result + + +class ImmutableMapPrinter: + """Pretty-printer for mongo::immutable::map<>.""" + + def __init__(self, val): + self.val = val + + def to_string(self): + return '%s of size %d' % (self.val.type, int(self.val['_storage']['impl_']['size'])) + + def children(self): + return ImmutableMapIter(self.val['_storage']) + + def display_hint(self): + return 'map' + + +class ImmutableSetPrinter: + """Pretty-printer for mongo::immutable::set<>.""" + + def __init__(self, val): + self.val = val + + def to_string(self): + return '%s of size %d' % (self.val.type, int(self.val['_storage']['impl_']['size'])) + + def children(self): + return ImmerListIter(self.val['_storage']) + + def display_hint(self): + return 'array' + + def find_match_brackets(search, opening='<', closing='>'): """Return the index of the closing bracket that matches the first opening bracket. @@ -721,7 +835,7 @@ def make_inverse_enum_dict(enum_type_name): For example, if the enum type is 'mongo::sbe::vm::Builtin' with an element 'regexMatch', the dictionary will contain 'regexMatch' value and not 'mongo::sbe::vm::Builtin::regexMatch'. """ - enum_dict = gdb.types.make_enum_dict(gdb.lookup_type(enum_type_name)) + enum_dict = gdb.types.make_enum_dict(lookup_type(enum_type_name)) enum_inverse_dic = dict() for key, value in enum_dict.items(): enum_inverse_dic[int(value)] = key.split('::')[-1] # take last element @@ -768,11 +882,11 @@ def __init__(self, val): # either use an inline buffer or an allocated one. The choice of storage is decoded in the # last bit of the 'metadata_' field. storage = self.val['_instrs']['storage_'] - meta = storage['metadata_'].cast(gdb.lookup_type('size_t')) + meta = storage['metadata_'].cast(lookup_type('size_t')) self.is_inlined = (meta % 2 == 0) self.size = (meta >> 1) self.pdata = \ - storage['data_']['inlined']['inlined_data'].cast(gdb.lookup_type('uint8_t').pointer()) \ + storage['data_']['inlined']['inlined_data'].cast(lookup_type('uint8_t').pointer()) \ if self.is_inlined \ else storage['data_']['allocated']['allocated_data'] @@ -796,17 +910,17 @@ def children(self): yield 'instrs total size', self.size # Sizes for types we'll use when parsing the insructions stream. - int_size = gdb.lookup_type('int').sizeof - ptr_size = gdb.lookup_type('void').pointer().sizeof - tag_size = gdb.lookup_type('mongo::sbe::value::TypeTags').sizeof - value_size = gdb.lookup_type('mongo::sbe::value::Value').sizeof - uint8_size = gdb.lookup_type('uint8_t').sizeof - uint32_size = gdb.lookup_type('uint32_t').sizeof - uint64_size = gdb.lookup_type('uint64_t').sizeof - builtin_size = gdb.lookup_type('mongo::sbe::vm::Builtin').sizeof - time_unit_size = gdb.lookup_type('mongo::TimeUnit').sizeof - timezone_size = gdb.lookup_type('mongo::TimeZone').sizeof - day_of_week_size = gdb.lookup_type('mongo::DayOfWeek').sizeof + int_size = lookup_type('int').sizeof + ptr_size = lookup_type('void').pointer().sizeof + tag_size = lookup_type('mongo::sbe::value::TypeTags').sizeof + value_size = lookup_type('mongo::sbe::value::Value').sizeof + uint8_size = lookup_type('uint8_t').sizeof + uint32_size = lookup_type('uint32_t').sizeof + uint64_size = lookup_type('uint64_t').sizeof + builtin_size = lookup_type('mongo::sbe::vm::Builtin').sizeof + time_unit_size = lookup_type('mongo::TimeUnit').sizeof + timezone_size = lookup_type('mongo::TimeZone').sizeof + day_of_week_size = lookup_type('mongo::DayOfWeek').sizeof cur_op = self.pdata end_op = self.pdata + self.size @@ -851,9 +965,9 @@ def children(self): cur_op += uint32_size elif op_name in ['function', 'functionSmall']: arity_size = \ - gdb.lookup_type('mongo::sbe::vm::ArityType').sizeof \ + lookup_type('mongo::sbe::vm::ArityType').sizeof \ if op_name == 'function' \ - else gdb.lookup_type('mongo::sbe::vm::SmallArityType').sizeof + else lookup_type('mongo::sbe::vm::SmallArityType').sizeof builtin_id = read_as_integer(cur_op, builtin_size) args = 'builtin: ' + self.builtins_lookup.get(builtin_id, "unknown") args += ' arity: ' + str(read_as_integer(cur_op + builtin_size, arity_size)) @@ -896,6 +1010,8 @@ def build_pretty_printer(): """Build a pretty printer.""" pp = MongoPrettyPrinterCollection() pp.add('BSONObj', 'mongo::BSONObj', False, BSONObjPrinter) + pp.add('DatabaseName', 'mongo::DatabaseName', False, DatabaseNamePrinter) + pp.add('NamespaceString', 'mongo::NamespaceString', False, NamespaceStringPrinter) pp.add('Decorable', 'mongo::Decorable', True, DecorablePrinter) pp.add('Status', 'mongo::Status', False, StatusPrinter) pp.add('StatusWith', 'mongo::StatusWith', True, StatusWithPrinter) @@ -914,6 +1030,8 @@ def build_pretty_printer(): pp.add('__wt_update', '__wt_update', False, WtUpdateToBsonPrinter) pp.add('CodeFragment', 'mongo::sbe::vm::CodeFragment', False, SbeCodeFragmentPrinter) pp.add('boost::optional', 'boost::optional', True, BoostOptionalPrinter) + pp.add('immutable::map', 'mongo::immutable::map', True, ImmutableMapPrinter) + pp.add('immutable::set', 'mongo::immutable::set', True, ImmutableSetPrinter) # Optimizer/ABT related pretty printers that can be used only with a running process. register_abt_printers(pp) diff --git a/buildscripts/gdb/optimizer_printers.py b/buildscripts/gdb/optimizer_printers.py index 8c6c399e6945b..926e190df1b61 100644 --- a/buildscripts/gdb/optimizer_printers.py +++ b/buildscripts/gdb/optimizer_printers.py @@ -8,7 +8,7 @@ if not gdb: sys.path.insert(0, str(Path(os.path.abspath(__file__)).parent.parent.parent)) - from buildscripts.gdb.mongo import get_boost_optional + from buildscripts.gdb.mongo import get_boost_optional, lookup_type def eval_print_fn(val, print_fn): @@ -51,6 +51,14 @@ def __init__(self, val): super().__init__(val, "ExplainGenerator::explainInterval") +class CandidateIndexEntryPrinter(OptimizerTypePrinter): + """Pretty-printer for mongo::optimizer::CandidateIndexEntry.""" + + def __init__(self, val): + """Initialize CandidateIndexEntryPrinter.""" + super().__init__(val, "ExplainGenerator::explainCandidateIndex") + + class IntervalExprPrinter(OptimizerTypePrinter): """Pretty-printer for mongo::optimizer::IntervalRequirement::Node.""" @@ -99,6 +107,7 @@ def __init__(self, val, arity, name): self.val = val self.arity = arity self.name = name + self.custom_children = [] @staticmethod def display_hint(): @@ -109,7 +118,16 @@ def children(self): """children.""" prior_indent = ABTPrinter.indent_level - current_indent = ABTPrinter.indent_level + self.arity - 1 + current_indent = ABTPrinter.indent_level + self.arity + len(self.custom_children) - 1 + for child in self.custom_children: + lhs = "\n" + for _ in range(current_indent): + lhs += "| " + + ABTPrinter.indent_level = current_indent + yield lhs, child + current_indent -= 1 + for i in range(self.arity): lhs = "\n" for _ in range(current_indent): @@ -121,6 +139,10 @@ def children(self): current_indent -= 1 ABTPrinter.indent_level = prior_indent + # Adds a custom child node which is not directly contained in the "_nodes" member variable. + def add_child(self, child): + self.custom_children.append(child) + def to_string(self): # Default for nodes which just print their type. return self.name @@ -262,9 +284,18 @@ def display_hint(): """Display hint.""" return None + @staticmethod + def print_sbe_value(tag, value): + value_print_fn = "sbe::value::print" + (print_fn_symbol, _) = gdb.lookup_symbol(value_print_fn) + if print_fn_symbol is None: + raise gdb.GdbError("Could not find pretty print function: " + value_print_fn) + print_fn = print_fn_symbol.value() + return print_fn(tag, value) + def to_string(self): - return "Constant[tag={},val={}]".format( - str(self.val["_tag"]).split("::")[-1], self.val["_val"]) + return "Constant[{}]".format( + ConstantPrinter.print_sbe_value(self.val["_tag"], self.val["_val"])) class VariablePrinter(object): @@ -550,9 +581,9 @@ def to_string(self): if get_boost_optional(root_proj) is not None: res += ": " + str(root_proj) + ", " # Rely on default printer for std::set, but remove the extra metadata at the start. - # TODO SERVER-75541 pretty print field projections map. - # field_projections = self.val["_fieldProjections"] - # res += str(field_projections).split("elems =")[-1] + field_projections = self.val["_fieldProjections"] + res += "" if field_projections["size_"] == 0 else str(field_projections).split( + "elems =")[-1] res += "}" return res @@ -599,7 +630,7 @@ def __init__(self, val): def to_string(self): return "IndexScan[{{{}}}, scanDef={}, indexDef={}, interval={}]".format( self.val["_fieldProjectionMap"], self.val["_scanDefName"], self.val["_indexDefName"], - self.val["_indexInterval"]) + self.val["_indexInterval"]).replace("\n", "") class SeekNodePrinter(FixedArityNodePrinter): @@ -610,7 +641,7 @@ def __init__(self, val): super().__init__(val, 2, "Seek") def to_string(self): - return "Seek[rid_projection: {}, {}, scanDef: {}]".format(self.val["_rid_projectionName"], + return "Seek[rid_projection: {}, {}, scanDef: {}]".format(self.val["_ridProjectionName"], self.val["_fieldProjectionMap"], self.val["_scanDefName"]) @@ -638,12 +669,70 @@ def to_string(self): self.val["_nodeId"]["_index"]) +class ResidualRequirementPrinter(object): + """Pretty-printer for ResidualRequirement.""" + + def __init__(self, val): + """Initialize ResidualRequirementPrinter.""" + self.val = val + + def to_string(self): + key = self.val["_key"] + req = self.val["_req"] + res = "<" + if get_boost_optional(key["_projectionName"]) is not None: + res += "refProj: " + str(get_boost_optional(key["_projectionName"])) + ", " + + res += "path: '" + str(key["_path"]).replace("| ", "").replace("\n", " -> ") + "'" + + if get_boost_optional(req["_boundProjectionName"]) is not None: + res += "boundProj: " + str(get_boost_optional(req["_boundProjectionName"])) + ", " + + res += ">" + return res + + class SargableNodePrinter(FixedArityNodePrinter): """Pretty-printer for SargableNode.""" def __init__(self, val): """Initialize SargableNodePrinter.""" - super().__init__(val, 3, "Sargable") + # Although Sargable technically has 3 children, avoid printing the refs (child1) and bind block (child2). + super().__init__(val, 1, "Sargable") + + # Add children for requirements, candidateIndex, and scan_params. + self.add_child(str(self.val["_reqMap"]).replace("\n", "")) + self.add_child(self.print_candidate_indexes()) + + self.scan_params = get_boost_optional(self.val["_scanParams"]) + if self.scan_params is not None: + self.add_child(self.print_scan_params()) + + def print_scan_params(self): + res = "scan_params: (proj: " + str(self.scan_params["_fieldProjectionMap"]) + ", " + residual_reqs = get_boost_optional(self.scan_params["_residualRequirements"]) + if residual_reqs is not None: + res += "residual: " + str(residual_reqs) + res += ")" + return res + + def print_candidate_indexes(self): + res = "candidateIndexes: [" + indexes = Vector(self.val["_candidateIndexes"]) + for i in range(indexes.count()): + if i > 0: + res += ", " + res += "" + res += "]" + return res + + @staticmethod + def index_req_to_string(index_req): + req_map = ["Index", "Seek", "Complete"] + return req_map[index_req] + + def to_string(self): + return "Sargable [" + self.index_req_to_string(self.val["_target"]) + "]" class RIDIntersectNodePrinter(FixedArityNodePrinter): @@ -813,7 +902,7 @@ def get_dynamic_type(self): def to_string(self): dynamic_type = self.get_dynamic_type() try: - dynamic_type = gdb.lookup_type(dynamic_type).strip_typedefs() + dynamic_type = lookup_type(dynamic_type).strip_typedefs() except gdb.error: return "Unknown PolyValue tag: {}, did you add a new one?".format(self.tag) # GDB automatically formats types with children, remove the extra characters to get the @@ -899,7 +988,7 @@ class ABTPrinter(PolyValuePrinter): def get_bound_projections(node): # Casts the input node to an ExpressionBinder and returns the set of bound projection names. pp = PolyValuePrinter(ABTPrinter.abt_type_set, ABTPrinter.abt_namespace, node) - dynamic_type = gdb.lookup_type(pp.get_dynamic_type()).strip_typedefs() + dynamic_type = lookup_type(pp.get_dynamic_type()).strip_typedefs() binder = pp.cast_control_block(dynamic_type) return Vector(binder["_names"]) @@ -908,6 +997,69 @@ def __init__(self, val): super().__init__(ABTPrinter.abt_type_set, ABTPrinter.abt_namespace, val) +class AtomPrinter(object): + """Pretty-printer for Atom.""" + + def __init__(self, val): + """Initialize AtomPrinter.""" + self.val = val + + def to_string(self): + return self.val["_expr"] + + +class ConjunctionPrinter(object): + """Pretty-printer for Conjunction.""" + + def __init__(self, val, separator=" ^ "): + """Initialize ConjunctionPrinter.""" + self.val = val + self.dynamic_nodes = Vector(self.val["_dyNodes"]) + self.dynamic_count = self.dynamic_nodes.count() + self.separator = separator + + def to_string(self): + if self.dynamic_count == 0: + return "" + + res = "" + first = True + for child in self.dynamic_nodes: + if first: + first = False + else: + res += self.separator + + res += str(child) + return res + + +class DisjunctionPrinter(ConjunctionPrinter): + """Pretty-printer for Disjunction.""" + + def __init__(self, val): + super().__init__(val, " U ") + + +class BoolExprPrinter(PolyValuePrinter): + """Pretty-printer for BoolExpr.""" + + type_set = ["Atom", "Conjunction", "Disjunction"] + + def __init__(self, val, template_type): + """Initialize BoolExprPrinter.""" + namespace = "mongo::optimizer::BoolExpr<" + template_type + ">::" + super().__init__(BoolExprPrinter.type_set, namespace, val) + + +class ResidualReqExprPrinter(BoolExprPrinter): + """Pretty-printer for BoolExpr.""" + + def __init__(self, val): + """Initialize ResidualReqExprPrinter.""" + super().__init__(val, "mongo::optimizer::ResidualRequirement") + + def register_abt_printers(pp): """Registers a number of pretty printers related to the CQF optimizer.""" @@ -934,6 +1086,28 @@ def register_abt_printers(pp): pp.add("PartialSchemaRequirements", "mongo::optimizer::PartialSchemaRequirements", False, PartialSchemaReqMapPrinter) + # ResidualRequirement printer. + pp.add("ResidualRequirement", "mongo::optimizer::ResidualRequirement", False, + ResidualRequirementPrinter) + + # CandidateIndexEntry printer. + pp.add("CandidateIndexEntry", "mongo::optimizer::CandidateIndexEntry", False, + CandidateIndexEntryPrinter) + + pp.add( + "ResidualRequirementExpr", + ("mongo::optimizer::algebra::PolyValue<" + + "mongo::optimizer::BoolExpr::Atom, " + + "mongo::optimizer::BoolExpr::Conjunction, " + + "mongo::optimizer::BoolExpr::Disjunction>"), + False, + ResidualReqExprPrinter, + ) + for bool_type in BoolExprPrinter.type_set: + pp.add(bool_type, + "mongo::optimizer::BoolExpr::" + bool_type, + False, getattr(sys.modules[__name__], bool_type + "Printer")) + # Utility types within the optimizer. pp.add("StrongStringAlias", "mongo::optimizer::StrongStringAlias", True, StrongStringAliasPrinter) @@ -946,7 +1120,7 @@ def register_abt_printers(pp): # stale. try: # ABT printer. - abt_type = gdb.lookup_type("mongo::optimizer::ABT").strip_typedefs() + abt_type = lookup_type("mongo::optimizer::ABT").strip_typedefs() pp.add('ABT', abt_type.name, False, ABTPrinter) abt_ref_type = abt_type.name + "::Reference" diff --git a/buildscripts/gdb/wt_dump_table.py b/buildscripts/gdb/wt_dump_table.py index 699cb3381e4d2..1add011e3e968 100644 --- a/buildscripts/gdb/wt_dump_table.py +++ b/buildscripts/gdb/wt_dump_table.py @@ -1,6 +1,13 @@ import gdb import bson +import sys +import os from pprint import pprint +from pathlib import Path + +if not gdb: + sys.path.insert(0, str(Path(os.path.abspath(__file__)).parent.parent.parent)) + from buildscripts.gdb.mongo import lookup_type DEBUGGING = False ''' @@ -21,7 +28,7 @@ def dump_pages_for_table(ident): - conn_impl_type = gdb.lookup_type("WT_CONNECTION_IMPL") + conn_impl_type = lookup_type("WT_CONNECTION_IMPL") if not conn_impl_type: print('WT_CONNECTION_IMPL type not found. Try invoking this function from a different \ thread and frame.') @@ -104,7 +111,7 @@ def get_data_handle(conn, handle_name): def get_btree_handle(dhandle): - btree = gdb.lookup_type('WT_BTREE').pointer() + btree = lookup_type('WT_BTREE').pointer() return dhandle['handle'].reinterpret_cast(btree).dereference() diff --git a/buildscripts/idl/gen_all_feature_flag_list.py b/buildscripts/idl/gen_all_feature_flag_list.py index b76920bde07ce..1dcc46d11bc3b 100644 --- a/buildscripts/idl/gen_all_feature_flag_list.py +++ b/buildscripts/idl/gen_all_feature_flag_list.py @@ -56,16 +56,22 @@ def is_third_party_idl(idl_path: str) -> bool: return False -def gen_all_feature_flags(idl_dir: str = os.getcwd()): +def gen_all_feature_flags(idl_dirs: List[str] = None): """Generate a list of all feature flags.""" + default_idl_dirs = ["src", "buildscripts"] + + if not idl_dirs: + idl_dirs = default_idl_dirs + all_flags = [] - for idl_path in sorted(lib.list_idls(idl_dir)): - if is_third_party_idl(idl_path): - continue - doc = parser.parse_file(open(idl_path), idl_path) - for feature_flag in doc.spec.feature_flags: - if feature_flag.default.literal != "true": - all_flags.append(feature_flag.name) + for idl_dir in idl_dirs: + for idl_path in sorted(lib.list_idls(idl_dir)): + if is_third_party_idl(idl_path): + continue + doc = parser.parse_file(open(idl_path), idl_path) + for feature_flag in doc.spec.feature_flags: + if feature_flag.default.literal != "true": + all_flags.append(feature_flag.name) force_disabled_flags = yaml.safe_load( open("buildscripts/resmokeconfig/fully_disabled_feature_flags.yml")) diff --git a/buildscripts/idl/idl/ast.py b/buildscripts/idl/idl/ast.py index 6a27bfad81803..2c338c45d87c2 100644 --- a/buildscripts/idl/idl/ast.py +++ b/buildscripts/idl/idl/ast.py @@ -109,6 +109,9 @@ def __init__(self, file_name, line, column): self.first_element_field_name = None # type: str self.deserialize_with_tenant = False # type: bool self.internal_only = False # type: bool + # Marks whether this type is a query shape component. + # Can only be true if is_struct is true. + self.is_query_shape_component = False # type: bool super(Type, self).__init__(file_name, line, column) @@ -140,6 +143,11 @@ def __init__(self, file_name, line, column): self.cpp_validator_func = None # type: str self.is_command_reply = False # type: bool self.generic_list_type = None # type: Optional[GenericListType] + # Determines whether or not this IDL struct can be a component of a query shape. See WRITING-13831. + self.query_shape_component = False # type: bool + # pylint: disable=invalid-name + self.unsafe_dangerous_disable_extra_field_duplicate_checks = None # type: bool + super(Struct, self).__init__(file_name, line, column) @@ -202,6 +210,31 @@ def __init__(self, file_name, line, column): super(Validator, self).__init__(file_name, line, column) +@enum.unique +class QueryShapeFieldType(enum.Enum): + # Abstract literal from shape. + LITERAL = enum.auto() + # Leave value as-is in shape. + PARAMETER = enum.auto() + # Anonymize string value. + ANONYMIZE = enum.auto() + # IDL type uses custom serializer -- defer to that serializer. + CUSTOM = enum.auto() + + @classmethod + def bind(cls, string_value): + # type: (Optional[str]) -> Optional[QueryShapeFieldType] + if string_value is None: + return None + bindings = { + "literal": cls.LITERAL, + "parameter": cls.PARAMETER, + "anonymize": cls.ANONYMIZE, + "custom": cls.CUSTOM, + } + return bindings.get(string_value, None) + + class Field(common.SourceLocation): """ An instance of a field in a struct. @@ -226,9 +259,6 @@ def __init__(self, file_name, line, column): self.type = None # type: Type self.always_serialize = False # type: bool - # Set if this field must be populated before entering the BSON iteration loop - self.preparse = False # type: bool - # Properties specific to fields which are arrays. self.supports_doc_sequence = False # type: bool @@ -245,8 +275,24 @@ def __init__(self, file_name, line, column): # Extra info for generic fields. self.generic_field_info = None # type: Optional[GenericFieldInfo] + # Determines whether or not this field represents a literal value that should be abstracted when serializing a query shape. + # See WRITING-13831 for details on query shape. + self.query_shape = None # type: Optional[QueryShapeFieldType] + super(Field, self).__init__(file_name, line, column) + @property + def should_serialize_with_options(self): + # type: () -> bool + """Returns true if the IDL compiler should add a call to serialization options for this field.""" + return self.query_shape is not None and self.query_shape in [ + QueryShapeFieldType.LITERAL, QueryShapeFieldType.ANONYMIZE + ] + + @property + def should_shapify(self): + return self.query_shape is not None and self.query_shape != QueryShapeFieldType.PARAMETER + class Privilege(common.SourceLocation): """IDL privilege information.""" diff --git a/buildscripts/idl/idl/binder.py b/buildscripts/idl/idl/binder.py index b6f8446b41de8..0d75ee91a47e4 100644 --- a/buildscripts/idl/idl/binder.py +++ b/buildscripts/idl/idl/binder.py @@ -272,6 +272,13 @@ def _bind_struct_common(ctxt, parsed_spec, struct, ast_struct): ast_struct.allow_global_collection_name = struct.allow_global_collection_name ast_struct.non_const_getter = struct.non_const_getter ast_struct.is_command_reply = struct.is_command_reply + ast_struct.query_shape_component = struct.query_shape_component + ast_struct.unsafe_dangerous_disable_extra_field_duplicate_checks = struct.unsafe_dangerous_disable_extra_field_duplicate_checks + + # Check that unsafe_dangerous_disable_extra_field_duplicate_checks is used correctly + if ast_struct.unsafe_dangerous_disable_extra_field_duplicate_checks and ast_struct.strict is True: + ctxt.add_strict_and_disable_check_not_allowed(ast_struct) + if struct.is_generic_cmd_list: if struct.is_generic_cmd_list == "arg": ast_struct.generic_list_type = ast.GenericListType.ARG @@ -324,6 +331,20 @@ def _bind_struct_common(ctxt, parsed_spec, struct, ast_struct): if not _is_duplicate_field(ctxt, ast_struct.name, ast_struct.fields, ast_field): ast_struct.fields.append(ast_field) + # Verify that each field on the struct defines a query shape type on the field if and only if + # query_shape_component is defined on the struct. + if not field.hidden and struct.query_shape_component and ast_field.query_shape is None: + ctxt.add_must_declare_shape_type(ast_field, ast_struct.name, ast_field.name) + + if not struct.query_shape_component and ast_field.query_shape is not None: + ctxt.add_must_be_query_shape_component(ast_field, ast_struct.name, ast_field.name) + + if ast_field.query_shape == ast.QueryShapeFieldType.ANONYMIZE and not ( + ast_field.type.cpp_type in ["std::string", "std::vector"] + or 'string' in ast_field.type.bson_serialization_type): + ctxt.add_query_shape_anonymize_must_be_string(ast_field, ast_field.name, + ast_field.type.cpp_type) + # Fill out the field comparison_order property as needed if ast_struct.generate_comparison_operators and ast_struct.fields: # If the user did not specify an ordering of fields, then number all fields in @@ -363,6 +384,7 @@ def _inject_hidden_fields(struct): serialization_context_field.cpp_name = "serializationContext" serialization_context_field.optional = False serialization_context_field.default = "SerializationContext()" + serialization_context_field.hidden = True struct.fields.append(serialization_context_field) @@ -422,8 +444,6 @@ def _inject_hidden_command_fields(command): expect_prefix_field.type.type_name = "bool" expect_prefix_field.cpp_name = "expectPrefix" expect_prefix_field.optional = True - # we must extract expectPrefix before any other fields that may consume it - expect_prefix_field.preparse = True command.fields.append(expect_prefix_field) @@ -438,6 +458,7 @@ def _bind_struct_type(struct): ast_type.cpp_type = _get_struct_qualified_cpp_name(struct) ast_type.bson_serialization_type = ["object"] ast_type.first_element_field_name = struct.fields[0].name if struct.fields else None + ast_type.is_query_shape_component = struct.query_shape_component return ast_type @@ -453,6 +474,10 @@ def _bind_struct_field(ctxt, ast_field, idl_type): assert isinstance(array.element_type, syntax.Struct) struct = cast(syntax.Struct, array.element_type) + # Check that unsafe_dangerous_disable_extra_field_duplicate_checks is used correctly + if struct.unsafe_dangerous_disable_extra_field_duplicate_checks: + ctxt.add_inheritance_and_disable_check_not_allowed(ast_field) + ast_field.type = _bind_struct_type(struct) ast_field.type.is_array = isinstance(idl_type, syntax.ArrayType) @@ -1002,6 +1027,7 @@ def _bind_type(idltype): ast_type.deserializer = _normalize_method_name(idltype.cpp_type, idltype.deserializer) ast_type.deserialize_with_tenant = idltype.deserialize_with_tenant ast_type.internal_only = idltype.internal_only + ast_type.is_query_shape_component = True return ast_type @@ -1026,7 +1052,11 @@ def _bind_field(ctxt, parsed_spec, field): # to provide compatibility support. ast_field.stability = field.stability ast_field.always_serialize = field.always_serialize - ast_field.preparse = field.preparse + + if field.query_shape is not None: + ast_field.query_shape = ast.QueryShapeFieldType.bind(field.query_shape) + if ast_field.query_shape is None: + ctxt.add_invalid_query_shape_value(ast_field, field.query_shape) ast_field.cpp_name = field.name if field.cpp_name: @@ -1108,6 +1138,8 @@ def _bind_field(ctxt, parsed_spec, field): if ast_field.validator is None: return None + if ast_field.should_shapify and not ast_field.type.is_query_shape_component: + ctxt.add_must_be_query_shape_component(ast_field, ast_field.type.name, ast_field.name) return ast_field @@ -1195,8 +1227,8 @@ def _bind_chained_struct(ctxt, parsed_spec, ast_struct, chained_struct): ast_struct.fields.append(ast_field) -def _bind_globals(parsed_spec): - # type: (syntax.IDLSpec) -> ast.Global +def _bind_globals(ctxt, parsed_spec): + # type: (errors.ParserContext, syntax.IDLSpec) -> ast.Global """Bind the globals object from the idl.syntax tree into the idl.ast tree by doing a deep copy.""" if parsed_spec.globals: ast_global = ast.Global(parsed_spec.globals.file_name, parsed_spec.globals.line, @@ -1204,6 +1236,9 @@ def _bind_globals(parsed_spec): ast_global.cpp_namespace = parsed_spec.globals.cpp_namespace ast_global.cpp_includes = parsed_spec.globals.cpp_includes + if not ast_global.cpp_namespace.startswith("mongo"): + ctxt.add_bad_cpp_namespace(ast_global, ast_global.cpp_namespace) + configs = parsed_spec.globals.configs if configs: ast_global.configs = ast.ConfigGlobal(configs.file_name, configs.line, configs.column) @@ -1242,15 +1277,6 @@ def _validate_enum_int(ctxt, idl_enum): str(value_error)) return - # Check the values are continuous so they can be static_cast. - min_value = min(int_values_set) - max_value = max(int_values_set) - - valid_int = set(range(min_value, max_value + 1)) - - if valid_int != int_values_set: - ctxt.add_enum_non_continuous_range_error(idl_enum, idl_enum.name) - def _bind_enum(ctxt, idl_enum): # type: (errors.ParserContext, syntax.Enum) -> ast.Enum @@ -1436,13 +1462,20 @@ def _bind_feature_flags(ctxt, param): ctxt.add_feature_flag_default_false_has_version(param) return None - # Feature flags that default to true are required to have a version - if param.default.literal == "true" and not param.version: + # Feature flags that default to true and should be FCV gated are required to have a version + if param.default.literal == "true" and param.shouldBeFCVGated.literal == "true" and not param.version: ctxt.add_feature_flag_default_true_missing_version(param) return None + # Feature flags that should not be FCV gated must not have a version + if param.shouldBeFCVGated.literal == "false" and param.version: + ctxt.add_feature_flag_fcv_gated_false_has_version(param) + return None + expr = syntax.Expression(param.default.file_name, param.default.line, param.default.column) - expr.expr = '%s, "%s"_sd' % (param.default.literal, param.version if param.version else '') + expr.expr = '%s, "%s"_sd, %s' % (param.default.literal, param.version if + (param.shouldBeFCVGated.literal == "true" + and param.version) else '', param.shouldBeFCVGated.literal) ast_param.default = _bind_expression(expr) ast_param.default.export = False @@ -1597,7 +1630,7 @@ def bind(parsed_spec): bound_spec = ast.IDLAST() - bound_spec.globals = _bind_globals(parsed_spec) + bound_spec.globals = _bind_globals(ctxt, parsed_spec) _validate_types(ctxt, parsed_spec) diff --git a/buildscripts/idl/idl/cpp_types.py b/buildscripts/idl/idl/cpp_types.py index a2631bb4f72fb..42696d0714e27 100644 --- a/buildscripts/idl/idl/cpp_types.py +++ b/buildscripts/idl/idl/cpp_types.py @@ -511,14 +511,14 @@ def has_serializer(self): pass @abstractmethod - def gen_serializer_expression(self, indented_writer, expression): - # type: (writer.IndentedTextWriter, str) -> str + def gen_serializer_expression(self, indented_writer, expression, should_shapify=False): + # type: (writer.IndentedTextWriter, str, bool) -> str """Generate code with the text writer and return an expression to serialize the type.""" pass -def _call_method_or_global_function(expression, ast_type): - # type: (str, ast.Type) -> str +def _call_method_or_global_function(expression, ast_type, should_shapify=False): + # type: (str, ast.Type, bool) -> str """ Given a fully-qualified method name, call it correctly. @@ -528,18 +528,32 @@ def _call_method_or_global_function(expression, ast_type): """ method_name = ast_type.serializer serialization_context = 'getSerializationContext()' if ast_type.deserialize_with_tenant else '' + shape_options = '' + if should_shapify: + shape_options = 'options' short_method_name = writer.get_method_name(method_name) if writer.is_function(method_name): if ast_type.deserialize_with_tenant: serialization_context = ', ' + serialization_context - return common.template_args('${method_name}(${expression}${serialization_context})', - expression=expression, method_name=method_name, - serialization_context=serialization_context) + if should_shapify: + shape_options = ', ' + shape_options + + return common.template_args( + '${method_name}(${expression}${shape_options}${serialization_context})', + expression=expression, + method_name=method_name, + shape_options=shape_options, + serialization_context=serialization_context, + ) - return common.template_args('${expression}.${method_name}(${serialization_context})', - expression=expression, method_name=short_method_name, - serialization_context=serialization_context) + return common.template_args( + '${expression}.${method_name}(${shape_options}${serialization_context})', + expression=expression, + method_name=short_method_name, + shape_options=shape_options, + serialization_context=serialization_context, + ) class _CommonBsonCppTypeBase(BsonCppTypeBase): @@ -560,9 +574,9 @@ def has_serializer(self): # type: () -> bool return self._ast_type.serializer is not None - def gen_serializer_expression(self, indented_writer, expression): - # type: (writer.IndentedTextWriter, str) -> str - return _call_method_or_global_function(expression, self._ast_type) + def gen_serializer_expression(self, indented_writer, expression, should_shapify=False): + # type: (writer.IndentedTextWriter, str, bool) -> str + return _call_method_or_global_function(expression, self._ast_type, should_shapify) class _ObjectBsonCppTypeBase(BsonCppTypeBase): @@ -584,8 +598,8 @@ def has_serializer(self): # type: () -> bool return self._ast_type.serializer is not None - def gen_serializer_expression(self, indented_writer, expression): - # type: (writer.IndentedTextWriter, str) -> str + def gen_serializer_expression(self, indented_writer, expression, should_shapify=False): + # type: (writer.IndentedTextWriter, str, bool) -> str method_name = writer.get_method_name(self._ast_type.serializer) if self._ast_type.deserialize_with_tenant: # SerializationContext is tied to tenant deserialization indented_writer.write_line( @@ -618,8 +632,8 @@ def has_serializer(self): # type: () -> bool return self._ast_type.serializer is not None - def gen_serializer_expression(self, indented_writer, expression): - # type: (writer.IndentedTextWriter, str) -> str + def gen_serializer_expression(self, indented_writer, expression, should_shapify=False): + # type: (writer.IndentedTextWriter, str, bool) -> str method_name = writer.get_method_name(self._ast_type.serializer) indented_writer.write_line( common.template_args('BSONArray localArray(${expression}.${method_name}());', @@ -642,8 +656,8 @@ def has_serializer(self): # type: () -> bool return True - def gen_serializer_expression(self, indented_writer, expression): - # type: (writer.IndentedTextWriter, str) -> str + def gen_serializer_expression(self, indented_writer, expression, should_shapify=False): + # type: (writer.IndentedTextWriter, str, bool) -> str if self._ast_type.serializer: method_name = writer.get_method_name(self._ast_type.serializer) indented_writer.write_line( diff --git a/buildscripts/idl/idl/errors.py b/buildscripts/idl/idl/errors.py index 3098fdee09b0e..2cda901647f42 100644 --- a/buildscripts/idl/idl/errors.py +++ b/buildscripts/idl/idl/errors.py @@ -81,7 +81,6 @@ ERROR_ID_ENUM_BAD_TYPE = "ID0036" ERROR_ID_ENUM_BAD_INT_VAUE = "ID0037" ERROR_ID_ENUM_NON_UNIQUE_VALUES = "ID0038" -ERROR_ID_ENUM_NON_CONTINUOUS_RANGE = "ID0039" ERROR_ID_BAD_COMMAND_NAMESPACE = "ID0041" ERROR_ID_FIELD_NO_COMMAND = "ID0042" ERROR_ID_NO_ARRAY_OF_CHAIN = "ID0043" @@ -130,6 +129,16 @@ ERROR_ID_STABILITY_UNKNOWN_VALUE = "ID0091" ERROR_ID_DUPLICATE_UNSTABLE_STABILITY = "ID0092" ERROR_ID_INVALID_ARRAY_VARIANT = "ID0093" +ERROR_ID_FIELD_MUST_DECLARE_SHAPE_LITERAL = "ID0094" +ERROR_ID_CANNOT_DECLARE_SHAPE_LITERAL = "ID0095" +ERROR_ID_INVALID_TYPE_FOR_SHAPIFY = "ID0096" +ERROR_ID_QUERY_SHAPE_PROPERTIES_MUTUALLY_EXCLUSIVE = "ID0097" +ERROR_ID_QUERY_SHAPE_PROPERTY_CANNOT_BE_FALSE = "ID0098" +ERROR_ID_STRICT_AND_DISABLE_CHECK_NOT_ALLOWED = "ID0099" +ERROR_ID_INHERITANCE_AND_DISABLE_CHECK_NOT_ALLOWED = "ID0100" +ERROR_ID_FEATURE_FLAG_SHOULD_BE_FCV_GATED_FALSE_HAS_VERSION = "ID0101" +ERROR_ID_QUERY_SHAPE_INVALID_VALUE = "ID0102" +ERROR_ID_BAD_CPP_NAMESPACE = "ID0103" class IDLError(Exception): @@ -366,6 +375,14 @@ def get_bool(self, node): return True return False + def get_required_bool(self, node): + # type: (Union[yaml.nodes.MappingNode, yaml.nodes.ScalarNode, yaml.nodes.SequenceNode]) -> bool + boolean_value = yaml.safe_load(node.value) + if not isinstance(boolean_value, bool): + self._add_node_error(node, ERROR_ID_IS_NODE_VALID_BOOL, + "Illegal bool value, expected either 'true' or 'false'.") + return boolean_value + def get_list(self, node): # type: (Union[yaml.nodes.MappingNode, yaml.nodes.ScalarNode, yaml.nodes.SequenceNode]) -> List[str] """Get a YAML scalar or sequence node as a list of strings.""" @@ -595,13 +612,6 @@ def add_enum_value_not_unique_error(self, location, enum_name): self._add_error(location, ERROR_ID_ENUM_NON_UNIQUE_VALUES, "Enum '%s' has duplicate values, all values must be unique" % (enum_name)) - def add_enum_non_continuous_range_error(self, location, enum_name): - # type: (common.SourceLocation, str) -> None - """Add an error for an enum having duplicate values.""" - self._add_error(location, ERROR_ID_ENUM_NON_CONTINUOUS_RANGE, - ("Enum '%s' has non-continuous integer variables, enums must have a " + - "continuous range of integer variables.") % (enum_name)) - def add_bad_command_namespace_error(self, location, command_name, command_namespace, valid_commands): # type: (common.SourceLocation, str, str, List[str]) -> None @@ -828,9 +838,10 @@ def add_missing_short_name_with_single_name(self, location, name): def add_feature_flag_default_true_missing_version(self, location): # type: (common.SourceLocation) -> None - """Add an error about a default flag with a default value of true but no version.""" - self._add_error(location, ERROR_ID_FEATURE_FLAG_DEFAULT_TRUE_MISSING_VERSION, - ("Missing 'version' required for feature flag that defaults to true")) + """Add an error about a default flag with a default value of true and should be FCV gated but no version.""" + self._add_error(location, ERROR_ID_FEATURE_FLAG_DEFAULT_TRUE_MISSING_VERSION, ( + "Missing 'version' required for feature flag that defaults to true and should be FCV gated" + )) def add_feature_flag_default_false_has_version(self, location): # type: (common.SourceLocation) -> None @@ -839,6 +850,13 @@ def add_feature_flag_default_false_has_version(self, location): location, ERROR_ID_FEATURE_FLAG_DEFAULT_FALSE_HAS_VERSION, ("The 'version' attribute is not allowed for feature flag that defaults to false")) + def add_feature_flag_fcv_gated_false_has_version(self, location): + # type: (common.SourceLocation) -> None + """Add an error about a feature flag that should not be FCV gated but has a version.""" + self._add_error( + location, ERROR_ID_FEATURE_FLAG_SHOULD_BE_FCV_GATED_FALSE_HAS_VERSION, + ("The 'version' attribute is not allowed for feature flag that should be FCV gated")) + def add_reply_type_invalid_type(self, location, command_name, reply_type_name): # type: (common.SourceLocation, str, str) -> None """Add an error about a command whose reply_type refers to an unknown type.""" @@ -935,6 +953,50 @@ def add_duplicate_unstable_stability(self, location): "Field specifies both 'unstable' and 'stability' options, should use 'stability: [stable|unstable|internal]' instead and remove the deprecated 'unstable' option." )) + def add_must_declare_shape_type(self, location, struct_name, field_name): + # type: (common.SourceLocation, str, str) -> None + """Add an error about a field not specifying either query_shape_literal or query_shape_anonymize if the struct is query_shape_component.""" + self._add_error( + location, ERROR_ID_FIELD_MUST_DECLARE_SHAPE_LITERAL, + f"Field '{field_name}' must specify either 'query_shape_literal' or 'query_shape_anonymize' since struct '{struct_name}' is a query shape component." + ) + + def add_must_be_query_shape_component(self, location, struct_name, field_name): + # type: (common.SourceLocation, str, str) -> None + self._add_error( + location, ERROR_ID_CANNOT_DECLARE_SHAPE_LITERAL, + f"Field '{field_name}' cannot specify 'query_shape_literal' property since struct '{struct_name}' is not a query shape component." + ) + + def add_query_shape_anonymize_must_be_string(self, location, field_name, field_type): + self._add_error( + location, ERROR_ID_INVALID_TYPE_FOR_SHAPIFY, + f"In order for {field_name} to be marked as a query shape fieldpath, it must have a string type, not {field_type}." + ) + + def add_invalid_query_shape_value(self, location, query_shape_value): + self._add_error(location, ERROR_ID_QUERY_SHAPE_INVALID_VALUE, + f"'{query_shape_value}' is not a valid value for 'query_shape'.") + + def add_strict_and_disable_check_not_allowed(self, location): + self._add_error( + location, ERROR_ID_STRICT_AND_DISABLE_CHECK_NOT_ALLOWED, + "Cannot set strict = true and unsafe_dangerous_disable_extra_field_duplicate_checks = true on a struct. unsafe_dangerous_disable_extra_field_duplicate_checks is only permitted on strict = false" + ) + + def add_inheritance_and_disable_check_not_allowed(self, location): + self._add_error( + location, ERROR_ID_INHERITANCE_AND_DISABLE_CHECK_NOT_ALLOWED, + "Fields cannot have unsafe_dangerous_disable_extra_field_duplicate_checks = true. unsafe_dangerous_disable_extra_field_duplicate_checks on non field structs" + ) + + def add_bad_cpp_namespace(self, location, namespace): + # type: (common.SourceLocation, str) -> None + self._add_error( + location, ERROR_ID_BAD_CPP_NAMESPACE, + "cpp_namespace must start with 'mongo::' or be just 'mongo', namespace '%s' is not supported" + % (namespace)) + def _assert_unique_error_messages(): # type: () -> None diff --git a/buildscripts/idl/idl/generator.py b/buildscripts/idl/idl/generator.py index 8f16cbf46c973..5afe093c8a2e4 100644 --- a/buildscripts/idl/idl/generator.py +++ b/buildscripts/idl/idl/generator.py @@ -36,7 +36,7 @@ import textwrap from abc import ABCMeta, abstractmethod from enum import Enum -from typing import Dict, List, Mapping, Tuple, Union, cast +from typing import Callable, Dict, List, Mapping, Optional, Tuple, Union, cast from . import (ast, bson, common, cpp_types, enum_types, generic_field_list_types, struct_types, writer) @@ -175,6 +175,12 @@ def _gen_field_usage_constant(field): return "k%sBit" % (common.title_case(field.cpp_name)) +def _gen_field_element_name(field): + # type: (ast.Field) -> str + """Get the name for a BSONElement pointer in field iteration.""" + return "BSONElement_%s" % (common.title_case(field.cpp_name)) + + def _get_constant(name): # type: (str) -> str """Transform an arbitrary label to a constant name.""" @@ -273,7 +279,7 @@ def _get_field_usage_checker(indented_writer, struct): # Only use the fast field usage checker if we never expect extra fields that we need to ignore # but still wish to do duplicate detection on. - if struct.strict: + if struct.strict or struct.unsafe_dangerous_disable_extra_field_duplicate_checks: return _FastFieldUsageChecker(indented_writer, struct.fields) return _SlowFieldUsageChecker(indented_writer, struct.fields) @@ -767,14 +773,14 @@ def gen_op_msg_request_member(self, command): self._writer.write_empty_line() - def gen_field_list_entries_declaration_struct(self, struct): # type: (ast.Struct) -> None + def gen_field_list_entries_declaration_struct(self, struct): + # type: (ast.Struct) -> None """Generate the field list entries map for a generic argument or reply field list.""" field_list_info = generic_field_list_types.get_field_list_info(struct) self._writer.write_line( common.template_args('// Map: fieldName -> ${should_forward_name}', should_forward_name=field_list_info.get_should_forward_name())) - self._writer.write_line( - "static const stdx::unordered_map _genericFields;") + self._writer.write_line("static const StaticImmortal> _genericFields;") self.write_empty_line() def gen_known_fields_declaration(self): @@ -1079,6 +1085,10 @@ def generate(self, spec): if any(command.api_version for command in spec.commands): header_list.append('mongo/db/commands.h') + # Include serialization options only if there is a struct which is part of a query shape. + if any(struct.query_shape_component for struct in spec.structs): + header_list.append('mongo/db/query/serialization_options.h') + header_list.sort() for include in header_list: @@ -1490,8 +1500,9 @@ def _gen_usage_check(self, field, bson_element, field_usage_check): self._writer.write_line('%s = true;' % (_get_has_field_member_name(field))) def gen_field_deserializer(self, field, field_type, bson_object, bson_element, - field_usage_check, tenant, is_command_field=False, check_type=True): - # type: (ast.Field, ast.Type, str, str, _FieldUsageCheckerBase, str, bool, bool) -> None + field_usage_check, tenant, is_command_field=False, check_type=True, + deserialize_fn=None): + # type: (ast.Field, ast.Type, str, str, _FieldUsageCheckerBase, str, bool, bool, Optional[Callable[[], None]]) -> None """Generate the C++ deserializer piece for a field. If field_type is scalar and check_type is True (the default), generate type-checking code. @@ -1506,12 +1517,18 @@ def gen_field_deserializer(self, field, field_type, bson_object, bson_element, predicate = "MONGO_likely(ctxt.checkAndAssertType(%s, Array))" % (bson_element) with self._predicate(predicate): self._gen_usage_check(field, bson_element, field_usage_check) - self._gen_array_deserializer(field, bson_element, field_type, tenant) + if deserialize_fn: + deserialize_fn() + else: + self._gen_array_deserializer(field, bson_element, field_type, tenant) return elif field_type.is_variant: self._gen_usage_check(field, bson_element, field_usage_check) - self._gen_variant_deserializer(field, bson_element, tenant) + if deserialize_fn: + deserialize_fn() + else: + self._gen_variant_deserializer(field, bson_element, tenant) return def validate_and_assign_or_uassert(field, expression): @@ -1528,6 +1545,7 @@ def validate_and_assign_or_uassert(field, expression): self._writer.write_line('%s = std::move(value);' % (field_name)) if field.chained: + assert not deserialize_fn # Do not generate a predicate check since we always call these deserializers. if field_type.is_struct: @@ -1552,6 +1570,9 @@ def validate_and_assign_or_uassert(field, expression): with self._predicate(predicate): self._gen_usage_check(field, bson_element, field_usage_check) + if deserialize_fn: + deserialize_fn() + return object_value = self._gen_field_deserializer_expression( bson_element, field, field_type, tenant) @@ -1575,6 +1596,10 @@ def validate_and_assign_or_uassert(field, expression): else: validate_and_assign_or_uassert(field, object_value) + # if we explicitly set _dollarTenant, we know we have a non-prefixed tenantId + if field.name == '$tenant': + self._writer.write_line('_serializationContext.setTenantIdSource(true);') + if is_command_field and predicate: with self._block('else {', '}'): self._writer.write_line( @@ -1672,7 +1697,7 @@ def _gen_initializer_vars(self, constructor, is_command): # initialized first; don't move in the event a boost::none is supplied initializer_vars.insert(0, '_%s(%s)' % (arg.name, initializer_var)) elif arg.name in ["nss", "nssOrUUID"]: - # TODO(SERVER-75669): Remove this denylist, prevent use-after-move by defining fields in the correct order. + # TODO (SERVER-74238): Remove this denylist, prevent use-after-move by defining fields in the correct order. initializer_vars.append('_%s(%s)' % (arg.name, arg.name)) else: initializer_vars.append('_%s(std::move(%s))' % (arg.name, arg.name)) @@ -1713,9 +1738,7 @@ def _gen_constructor(self, struct, constructor, default_init): initializes_db_name = True elif [arg for arg in constructor.args if arg.name == 'nssOrUUID']: if [field for field in struct.fields if field.serialize_op_msg_request_only]: - initializers.append( - '_dbName(nssOrUUID.uuid() ? nssOrUUID.dbName().value() : nssOrUUID.nss()->dbName())' - ) + initializers.append('_dbName(nssOrUUID.dbName())') initializes_db_name = True # Serialize has fields third @@ -1749,7 +1772,7 @@ def gen_constructors(self, struct): required_constructor = struct_type_info.get_required_constructor_method() if len(required_constructor.args) != len(constructor.args): - #print(struct.name + ": "+ str(required_constructor.args)) + # print(struct.name + ": "+ str(required_constructor.args)) self._gen_constructor(struct, required_constructor, False) def gen_field_list_entry_lookup_methods_struct(self, struct): @@ -1759,14 +1782,14 @@ def gen_field_list_entry_lookup_methods_struct(self, struct): defn = field_list_info.get_has_field_method().get_definition() with self._block('%s {' % (defn, ), '}'): self._writer.write_line( - 'return _genericFields.find(fieldName.toString()) != _genericFields.end();') + 'return _genericFields->find(fieldName) != _genericFields->end();') self._writer.write_empty_line() defn = field_list_info.get_should_forward_method().get_definition() with self._block('%s {' % (defn, ), '}'): - self._writer.write_line('auto it = _genericFields.find(fieldName.toString());') - self._writer.write_line('return (it == _genericFields.end() || it->second);') + self._writer.write_line('auto it = _genericFields->find(fieldName);') + self._writer.write_line('return (it == _genericFields->end() || it->second);') self._writer.write_empty_line() @@ -1789,25 +1812,32 @@ def _gen_fields_deserializer_common(self, struct, bson_object, tenant): # type: (ast.Struct, str, str) -> _FieldUsageCheckerBase """Generate the C++ code to deserialize list of fields.""" - struct_fields = struct.fields.copy() - preparse_fields = [] # type: List[ast.Field] - field_usage_check = _get_field_usage_checker(self._writer, struct) if isinstance(struct, ast.Command): self._writer.write_line('BSONElement commandElement;') self._writer.write_line('bool firstFieldFound = false;') self._writer.write_empty_line() - # inject a context into the IDLParserContext that tags the class as a command request - self._writer.write_line( - 'setSerializationContext(SerializationContext::stateCommandRequest());') - - # some fields are consumed in the BSON iteration loop and need to be parsed before - # entering the main loop - for field in struct.fields: # iterate over the original list - if field.preparse: - struct_fields.remove(field) - preparse_fields.append(field) + # Update the serialization context whether or not we received a tenantId object + if tenant == 'request.getValidatedTenantId()': + # inject a context into the IDLParserContext that tags the class as a command request + self._writer.write_line( + 'setSerializationContext(SerializationContext::stateCommandRequest());') + self._writer.write_line( + '_serializationContext.setTenantIdSource(request.getValidatedTenantId() != boost::none);' + ) + else: + # if a non-default serialization context was passed in via the IDLParserContext, + # use that to set the local serialization context, otherwise set it to a command + # request + with self._block( + 'if (ctxt.getSerializationContext() != SerializationContext::stateDefault()) {', + '}'): + self._writer.write_line( + 'setSerializationContext(ctxt.getSerializationContext());') + with self._block('else {', '}'): + self._writer.write_line( + 'setSerializationContext(SerializationContext::stateCommandRequest());') else: # set the local serializer flags according to the constexpr set by is_command_reply @@ -1818,86 +1848,103 @@ def _gen_fields_deserializer_common(self, struct, bson_object, tenant): self._writer.write_empty_line() - # we need to build two loops: one for the preparsed fields, and one for fields that don't - # depend on other fields - fields = [] # type: List[List[ast.Field]] - if preparse_fields: - fields.append(preparse_fields) - fields.append(struct_fields) - for_blocks = len(fields) - last_block = for_blocks - 1 - - for block_num in range(for_blocks): - with self._block('for (const auto& element :%s) {' % (bson_object), '}'): - - self._writer.write_line('const auto fieldName = element.fieldNameStringData();') + deferred_fields = [] # type: List[ast.Field] + deferred_field_names = [] # type: List[str] + if 'expectPrefix' in [field.name for field in struct.fields]: + # Deserialization of 'expectPrefix' modifies the deserializationContext and how + # certain other fields are then deserialized. + # Such dependent fields include those which "deserialize_with_tenant" and + # any complex struct type. + # In practice, this typically only occurs on Command structs. + deferred_fields = [ + field for field in struct.fields + if field.type and (field.type.is_struct or field.type.deserialize_with_tenant) + ] + deferred_field_names = [field.name for field in deferred_fields] + if deferred_fields: + self._writer.write_line( + '// Anchors for values of fields which may depend on others.') + for field in deferred_fields: + self._writer.write_line('BSONElement %s;' % (_gen_field_element_name(field))) self._writer.write_empty_line() - if isinstance(struct, ast.Command) and block_num == last_block: - with self._predicate("firstFieldFound == false"): - # Get the Command element if we need it for later in the deserializer to get the - # namespace - if struct.namespace != common.COMMAND_NAMESPACE_IGNORED: - self._writer.write_line('commandElement = element;') + with self._block('for (const auto& element :%s) {' % (bson_object), '}'): - self._writer.write_line('firstFieldFound = true;') - self._writer.write_line('continue;') + self._writer.write_line('const auto fieldName = element.fieldNameStringData();') + self._writer.write_empty_line() - self._writer.write_empty_line() + if isinstance(struct, ast.Command): + with self._predicate("firstFieldFound == false"): + # Get the Command element if we need it for later in the deserializer to get the + # namespace + if struct.namespace != common.COMMAND_NAMESPACE_IGNORED: + self._writer.write_line('commandElement = element;') - first_field = True - for field in fields[block_num]: - # Do not parse chained fields as fields since they are actually chained types. - if field.chained and not field.chained_struct_field: - continue - # Internal only fields are not parsed from BSON objects - if field.type and field.type.internal_only: - continue + self._writer.write_line('firstFieldFound = true;') + self._writer.write_line('continue;') - field_predicate = 'fieldName == %s' % (_get_field_constant_name(field)) + self._writer.write_empty_line() - with self._predicate(field_predicate, not first_field): + first_field = True + for field in struct.fields: + # Do not parse chained fields as fields since they are actually chained types. + if field.chained and not field.chained_struct_field: + continue + # Internal only fields are not parsed from BSON objects + if field.type and field.type.internal_only: + continue - if field.ignore: - field_usage_check.add(field, "element") + field_predicate = 'fieldName == %s' % (_get_field_constant_name(field)) - self._writer.write_line('// ignore field') - else: - self.gen_field_deserializer(field, field.type, bson_object, "element", - field_usage_check, tenant) + with self._predicate(field_predicate, not first_field): - if first_field: - first_field = False + def defer_field(): + # type: () -> None + """Field depends on other field(s), store its location and defer processing till later.""" + assert field.name in deferred_field_names + self._writer.write_line('%s = element;' % (_gen_field_element_name(field))) - # only check for extraneous fields in the final block - if block_num == last_block: - # End of for fields - # Generate strict check for extranous fields - if struct.strict: - # For commands, check if this is a well known command field that the IDL parser - # should ignore regardless of strict mode. - command_predicate = None - if isinstance(struct, ast.Command): - command_predicate = "!mongo::isGenericArgument(fieldName)" - - # Ditto for command replies - if struct.is_command_reply: - command_predicate = "!mongo::isGenericReply(fieldName)" - - with self._block('else {', '}'): - with self._predicate(command_predicate): - self._writer.write_line('ctxt.throwUnknownField(fieldName);') + if field.ignore: + field_usage_check.add(field, "element") + self._writer.write_line('// ignore field') else: - with self._else(not first_field): - self._writer.write_line( - 'auto push_result = usedFieldSet.insert(fieldName);') - with writer.IndentedScopedBlock( - self._writer, - 'if (MONGO_unlikely(push_result.second == false)) {', '}'): - self._writer.write_line('ctxt.throwDuplicateField(fieldName);') + fn = defer_field if field.name in deferred_field_names else None + self.gen_field_deserializer(field, field.type, bson_object, "element", + field_usage_check, tenant, deserialize_fn=fn) + + if first_field: + first_field = False + + # End of for fields + # Generate strict check for extranous fields + if struct.strict: + # For commands, check if this is a well known command field that the IDL parser + # should ignore regardless of strict mode. + command_predicate = None + if isinstance(struct, ast.Command): + command_predicate = "!mongo::isGenericArgument(fieldName)" + + # Ditto for command replies + if struct.is_command_reply: + command_predicate = "!mongo::isGenericReply(fieldName)" - if block_num < last_block: - self._writer.write_empty_line() + with self._block('else {', '}'): + with self._predicate(command_predicate): + self._writer.write_line('ctxt.throwUnknownField(fieldName);') + elif not struct.unsafe_dangerous_disable_extra_field_duplicate_checks: + with self._else(not first_field): + self._writer.write_line('auto push_result = usedFieldSet.insert(fieldName);') + with writer.IndentedScopedBlock( + self._writer, 'if (MONGO_unlikely(push_result.second == false)) {', + '}'): + self._writer.write_line('ctxt.throwDuplicateField(fieldName);') + + # Handle the deferred fields after their possible dependencies have been processed. + for field in deferred_fields: + element_name = _gen_field_element_name(field) + with self._predicate(element_name): + self.gen_field_deserializer(field, field.type, bson_object, element_name, None, + tenant) # Parse chained structs if not inlined # Parse chained types always here @@ -2148,14 +2195,29 @@ def _gen_serializer_method_custom(self, field): self._writer.write_template( 'BSONArrayBuilder arrayBuilder(builder->subarrayStart(${field_name}));') with self._block('for (const auto& item : ${access_member}) {', '}'): - expression = bson_cpp_type.gen_serializer_expression(self._writer, 'item') + expression = bson_cpp_type.gen_serializer_expression( + self._writer, 'item', + field.query_shape == ast.QueryShapeFieldType.CUSTOM) template_params['expression'] = expression self._writer.write_template('arrayBuilder.append(${expression});') else: expression = bson_cpp_type.gen_serializer_expression( - self._writer, _access_member(field)) + self._writer, _access_member(field), + field.query_shape == ast.QueryShapeFieldType.CUSTOM) template_params['expression'] = expression - self._writer.write_template('builder->append(${field_name}, ${expression});') + if not field.should_serialize_with_options: + self._writer.write_template( + 'builder->append(${field_name}, ${expression});') + elif field.query_shape == ast.QueryShapeFieldType.LITERAL: + self._writer.write_template( + 'options.serializeLiteral(${expression}).serializeForIDL(${field_name}, builder);' + ) + elif field.query_shape == ast.QueryShapeFieldType.ANONYMIZE: + self._writer.write_template( + 'builder->append(${field_name}, options.serializeFieldPathFromString(${expression}));' + ) + else: + assert False elif field.type.bson_serialization_type[0] == 'any': # Any types are special @@ -2227,18 +2289,29 @@ def _gen_serializer_method_struct(self, field): if field.chained: # Just directly call the serializer for chained structs without opening up a nested # document. - self._writer.write_template('${access_member}.serialize(builder);') + if not field.should_serialize_with_options: + self._writer.write_template('${access_member}.serialize(builder);') + else: + self._writer.write_template('${access_member}.serialize(builder, options);') + elif field.type.is_array: self._writer.write_template( 'BSONArrayBuilder arrayBuilder(builder->subarrayStart(${field_name}));') with self._block('for (const auto& item : ${access_member}) {', '}'): self._writer.write_line( 'BSONObjBuilder subObjBuilder(arrayBuilder.subobjStart());') - self._writer.write_line('item.serialize(&subObjBuilder);') + if not field.should_serialize_with_options: + self._writer.write_line('item.serialize(&subObjBuilder);') + else: + self._writer.write_line('item.serialize(&subObjBuilder, options);') else: self._writer.write_template( 'BSONObjBuilder subObjBuilder(builder->subobjStart(${field_name}));') - self._writer.write_template('${access_member}.serialize(&subObjBuilder);') + if not field.should_serialize_with_options: + self._writer.write_template('${access_member}.serialize(&subObjBuilder);') + else: + self._writer.write_template( + '${access_member}.serialize(&subObjBuilder, options);') def _gen_serializer_method_array_variant(self, field): template_params = { @@ -2276,19 +2349,46 @@ def _gen_serializer_method_variant_helper(self, field, template_params, builder= template_params[ 'cpp_type'] = 'std::vector<' + variant_type.cpp_type + '>' if variant_type.is_array else variant_type.cpp_type - with self._block('[%s](const ${cpp_type}& value) {' % builder, '},'): + template_params['param_opt'] = "" + if field.should_serialize_with_options: + template_params['param_opt'] = ', options' + with self._block('[%s${param_opt}](const ${cpp_type}& value) {' % builder, '},'): bson_cpp_type = cpp_types.get_bson_cpp_type(variant_type) if field.type.is_variant and field.type.is_array: self._writer.write_template('value.serialize(%s);' % builder) elif bson_cpp_type and bson_cpp_type.has_serializer(): assert not field.type.is_array - expression = bson_cpp_type.gen_serializer_expression(self._writer, 'value') + expression = bson_cpp_type.gen_serializer_expression( + self._writer, 'value', + field.query_shape == ast.QueryShapeFieldType.CUSTOM) template_params['expression'] = expression - self._writer.write_template( - 'builder->append(${field_name}, ${expression});') + if not field.should_serialize_with_options: + self._writer.write_template( + 'builder->append(${field_name}, ${expression});') + elif field.query_shape == ast.QueryShapeFieldType.LITERAL: + self._writer.write_template( + 'options.serializeLiteral(${expression}).serializeForIDL(${field_name}, builder);' + ) + elif field.query_shape == ast.QueryShapeFieldType.ANONYMIZE: + self._writer.write_template( + 'builder->append(${field_name}, options.serializeFieldPathFromString(${expression}));' + ) + else: + assert False else: - self._writer.write_template( - 'idl::idlSerialize(builder, ${field_name}, value);') + if not field.should_serialize_with_options: + self._writer.write_template( + 'idl::idlSerialize(builder, ${field_name}, value);') + elif field.query_shape == ast.QueryShapeFieldType.LITERAL: + self._writer.write_template( + 'options.serializeLiteral(value).serializeForIDL(${field_name}, builder);' + ) + elif field.query_shape == ast.QueryShapeFieldType.ANONYMIZE: + self._writer.write_template( + 'idl::idlSerialize(builder, ${field_name}, options.serializeFieldPathFromString(value));' + ) + else: + assert False def _gen_serializer_method_common(self, field): # type: (ast.Field) -> None @@ -2320,11 +2420,27 @@ def _gen_serializer_method_common(self, field): else: self._gen_serializer_method_variant(field) else: - # Generate default serialization using BSONObjBuilder::append - # Note: BSONObjBuilder::append has overrides for std::vector also - self._writer.write_line( - 'builder->append(%s, %s);' % (_get_field_constant_name(field), - _access_member(field))) + # Generate default serialization + # Note: BSONObjBuilder::append, which all three branches use, has overrides for std::vector also + if not field.should_serialize_with_options: + self._writer.write_line( + 'builder->append(%s, %s);' % (_get_field_constant_name(field), + _access_member(field))) + elif field.query_shape == ast.QueryShapeFieldType.LITERAL: + # serializeLiteral expects an ImplicitValue, which can't be constructed with an int64_t + expression_cast = "" + if field.type.cpp_type == "std::int64_t": + expression_cast = "(long long)" + self._writer.write_line( + 'options.serializeLiteral(%s%s).serializeForIDL(%s, builder);' + % (expression_cast, _access_member(field), + _get_field_constant_name(field))) + elif field.query_shape == ast.QueryShapeFieldType.ANONYMIZE: + self._writer.write_line( + 'builder->append(%s, options.serializeFieldPathFromString(%s));' % + (_get_field_constant_name(field), _access_member(field))) + else: + assert False else: self._gen_serializer_method_struct(field) @@ -2603,8 +2719,8 @@ def gen_field_list_entries_declaration_struct(self, struct): common.template_args('// Map: fieldName -> ${should_forward_name}', should_forward_name=field_list_info.get_should_forward_name())) block_name = common.template_args( - 'const stdx::unordered_map ${klass}::_genericFields {', klass=klass) - with self._block(block_name, "};"): + 'const StaticImmortal> ${klass}::_genericFields {{', klass=klass) + with self._block(block_name, "}};"): sorted_entries = sorted(struct.fields, key=lambda f: f.name) for entry in sorted_entries: self._writer.write_line( @@ -2724,7 +2840,8 @@ def gen_server_parameters(self, params, header_file_name): self._writer.write_line( '%s %s%s;' % (param.cpp_vartype, param.cpp_varname, init)) - blockname = 'idl_' + hashlib.sha1(header_file_name.encode()).hexdigest() + blockname = 'idl_' + \ + hashlib.sha1(header_file_name.encode()).hexdigest() with self._block('MONGO_SERVER_PARAMETER_REGISTER(%s)(InitializerContext*) {' % (blockname), '}'): # ServerParameter instances. @@ -2964,11 +3081,16 @@ def generate(self, spec, header_file_name): # Generate mongo includes third header_list = [ - 'mongo/bson/bsonobjbuilder.h', 'mongo/db/auth/authorization_contract.h', - 'mongo/db/commands.h', 'mongo/idl/command_generic_argument.h', - 'mongo/util/overloaded_visitor.h' + 'mongo/util/overloaded_visitor.h', + 'mongo/util/string_map.h', ] + if spec.commands: + header_list.append('mongo/db/auth/authorization_contract.h') + header_list.append('mongo/idl/command_generic_argument.h') + elif len([s for s in spec.structs if s.is_command_reply]) > 0: + header_list.append('mongo/idl/command_generic_argument.h') + if spec.server_parameters: header_list.append('mongo/db/server_parameter.h') header_list.append('mongo/db/server_parameter_with_storage.h') diff --git a/buildscripts/idl/idl/parser.py b/buildscripts/idl/idl/parser.py index a5f74e3b8c64c..41b73966d267f 100644 --- a/buildscripts/idl/idl/parser.py +++ b/buildscripts/idl/idl/parser.py @@ -132,6 +132,8 @@ def _generic_parser( if ctxt.is_mapping_node(second_node, first_name): syntax_node.__dict__[first_name] = rule_desc.mapping_parser_func( ctxt, second_node) + elif rule_desc.node_type == "required_bool_scalar": + syntax_node.__dict__[first_name] = ctxt.get_required_bool(second_node) else: raise errors.IDLError( "Unknown node_type '%s' for parser rule" % (rule_desc.node_type)) @@ -147,7 +149,7 @@ def _generic_parser( # A bool is never "None" like other types, it simply defaults to "false". # It means "if bool is None" will always return false and there is no support for required - # 'bool' at this time. + # 'bool' at this time. Use the node type 'required_bool_scalar' if this behavior is not desired. if not rule_desc.node_type == 'bool_scalar': if syntax_node.__dict__[name] is None: ctxt.add_missing_required_field_error(node, syntax_node_name, name) @@ -404,6 +406,8 @@ def _parse_field(ctxt, name, node): _RuleDesc('bool_scalar'), "forward_from_shards": _RuleDesc('bool_scalar'), + "query_shape": + _RuleDesc('scalar'), }) return field @@ -575,6 +579,8 @@ def _parse_struct(ctxt, spec, name, node): "cpp_validator_func": _RuleDesc('scalar'), "is_command_reply": _RuleDesc('bool_scalar'), "is_generic_cmd_list": _RuleDesc('scalar'), + "query_shape_component": _RuleDesc('bool_scalar'), + "unsafe_dangerous_disable_extra_field_duplicate_checks": _RuleDesc("bool_scalar"), }) # PyLint has difficulty with some iterables: https://github.com/PyCQA/pylint/issues/3105 @@ -978,6 +984,9 @@ def _parse_feature_flag(ctxt, spec, name, node): mapping_parser_func=_parse_expression), "version": _RuleDesc('scalar'), + "shouldBeFCVGated": + _RuleDesc('scalar_or_mapping', _RuleDesc.REQUIRED, + mapping_parser_func=_parse_expression), }) spec.feature_flags.append(param) diff --git a/buildscripts/idl/idl/struct_types.py b/buildscripts/idl/idl/struct_types.py index ab784420374f9..f2a4b69a8b6f7 100644 --- a/buildscripts/idl/idl/struct_types.py +++ b/buildscripts/idl/idl/struct_types.py @@ -327,14 +327,19 @@ def get_deserializer_method(self): def get_serializer_method(self): # type: () -> MethodInfo + args = ['BSONObjBuilder* builder'] + if self._struct.query_shape_component: + args.append("SerializationOptions options = {}") return MethodInfo( - common.title_case(self._struct.cpp_name), 'serialize', ['BSONObjBuilder* builder'], - 'void', const=True) + common.title_case(self._struct.cpp_name), 'serialize', args, 'void', const=True) def get_to_bson_method(self): # type: () -> MethodInfo + args = [] + if self._struct.query_shape_component: + args.append("SerializationOptions options = {}") return MethodInfo( - common.title_case(self._struct.cpp_name), 'toBSON', [], 'BSONObj', const=True) + common.title_case(self._struct.cpp_name), 'toBSON', args, 'BSONObj', const=True) def get_op_msg_request_serializer_method(self): # type: () -> Optional[MethodInfo] @@ -580,7 +585,14 @@ def gen_namespace_check(self, indented_writer, db_name, element): indented_writer.write_line('invariant(_nss.isEmpty());') allow_global = 'true' if self._struct.allow_global_collection_name else 'false' indented_writer.write_line( - '_nss = ctxt.parseNSCollectionRequired(%s, %s, %s);' % (db_name, element, allow_global)) + 'auto collectionName = ctxt.checkAndAssertCollectionName(%s, %s);' % (element, + allow_global)) + indented_writer.write_line( + '_nss = NamespaceStringUtil::parseNamespaceFromRequest(%s, collectionName);' % + (db_name)) + indented_writer.write_line( + 'uassert(ErrorCodes::InvalidNamespace, str::stream() << "Invalid namespace specified: "' + ' << _nss.toStringForErrorMsg(), _nss.isValid());') class _CommandWithUUIDNamespaceTypeInfo(_CommandBaseTypeInfo): @@ -652,8 +664,15 @@ def gen_serializer(self, indented_writer): def gen_namespace_check(self, indented_writer, db_name, element): # type: (writer.IndentedTextWriter, str, str) -> None - indented_writer.write_line('invariant(_nssOrUUID.nss() || _nssOrUUID.uuid());') - indented_writer.write_line('_nssOrUUID = ctxt.parseNsOrUUID(%s, %s);' % (db_name, element)) + indented_writer.write_line( + 'auto collOrUUID = ctxt.checkAndAssertCollectionNameOrUUID(%s);' % (element)) + indented_writer.write_line( + '_nssOrUUID = stdx::holds_alternative(collOrUUID) ? NamespaceStringUtil::parseNamespaceFromRequest(%s, stdx::get(collOrUUID)) : NamespaceStringOrUUID(%s, stdx::get(collOrUUID));' + % (db_name, db_name)) + indented_writer.write_line( + 'uassert(ErrorCodes::InvalidNamespace, str::stream() << "Invalid namespace specified: "' + ' << _nssOrUUID.toStringForErrorMsg()' + ', !_nssOrUUID.isNamespaceString() || _nssOrUUID.nss().isValid());') def get_struct_info(struct): diff --git a/buildscripts/idl/idl/syntax.py b/buildscripts/idl/idl/syntax.py index e774a07cdad4d..c8e4697525b03 100644 --- a/buildscripts/idl/idl/syntax.py +++ b/buildscripts/idl/idl/syntax.py @@ -493,6 +493,10 @@ def __init__(self, file_name, line, column): self.serialize_op_msg_request_only = False # type: bool self.constructed = False # type: bool + self.query_shape = None # type: Optional[str] + + self.hidden = False # type: bool + super(Field, self).__init__(file_name, line, column) @@ -552,6 +556,8 @@ def __init__(self, file_name, line, column): self.cpp_validator_func = None # type: str self.is_command_reply = False # type: bool self.is_generic_cmd_list = None # type: Optional[str] + # pylint: disable=invalid-name + self.unsafe_dangerous_disable_extra_field_duplicate_checks = None # type: bool # Command only property self.cpp_name = None # type: str @@ -563,6 +569,8 @@ def __init__(self, file_name, line, column): # Internal property: cpp_namespace from globals section self.cpp_namespace = None # type: str + self.query_shape_component = False # type: bool + super(Struct, self).__init__(file_name, line, column) @@ -877,6 +885,8 @@ def __init__(self, file_name, line, column): self.cpp_varname = None # type: str self.default = None # type: Expression self.version = None # type: str + # pylint: disable=C0103 + self.shouldBeFCVGated = None # type: Expression super(FeatureFlag, self).__init__(file_name, line, column) diff --git a/buildscripts/idl/idl_check_compatibility.py b/buildscripts/idl/idl_check_compatibility.py index 2b40867519f9c..8d21fb3a31dc6 100644 --- a/buildscripts/idl/idl_check_compatibility.py +++ b/buildscripts/idl/idl_check_compatibility.py @@ -226,7 +226,6 @@ 'aggregate-param-needsMerge', 'aggregate-param-fromMongos', # Bulk fixes for fields that are strictly internal all along and should thus be marked unstable. - 'aggregate-param-$_generateV2ResumeTokens', 'endSessions-param-txnNumber', 'endSessions-param-txnUUID', 'findAndModify-param-stmtId', @@ -320,7 +319,6 @@ 'update-param-isTimeseriesNamespace', 'delete-param-isTimeseriesNamespace', 'findAndModify-param-stmtId', - 'aggregate-param-$_generateV2ResumeTokens', 'hello-param-loadBalanced', 'hello-reply-serviceId', 'hello-reply-isImplicitDefaultMajorityWC', diff --git a/buildscripts/idl/tests/compatibility_test_fail/new_generic_argument/generic_argument.idl b/buildscripts/idl/tests/compatibility_test_fail/new_generic_argument/generic_argument.idl index 157ba6cb002cf..2fcf36675157d 100644 --- a/buildscripts/idl/tests/compatibility_test_fail/new_generic_argument/generic_argument.idl +++ b/buildscripts/idl/tests/compatibility_test_fail/new_generic_argument/generic_argument.idl @@ -52,4 +52,4 @@ generic_reply_field_lists: description: "IDL checker provides no guarantees about unstable generic reply fields" fields: unstableGenericReplyField: - forward_from_shards: false \ No newline at end of file + forward_from_shards: false diff --git a/buildscripts/idl/tests/compatibility_test_fail/old_generic_argument/generic_argument.idl b/buildscripts/idl/tests/compatibility_test_fail/old_generic_argument/generic_argument.idl index 8220162ccfadc..4f21a7423c646 100644 --- a/buildscripts/idl/tests/compatibility_test_fail/old_generic_argument/generic_argument.idl +++ b/buildscripts/idl/tests/compatibility_test_fail/old_generic_argument/generic_argument.idl @@ -56,4 +56,4 @@ generic_reply_field_lists: description: "IDL checker provides no guarantees about unstable generic reply fields" fields: unstableGenericReplyField: - forward_from_shards: false \ No newline at end of file + forward_from_shards: false diff --git a/buildscripts/idl/tests/compatibility_test_pass/new_generic_argument/generic_argument.idl b/buildscripts/idl/tests/compatibility_test_pass/new_generic_argument/generic_argument.idl index 8aceb00800434..150257b05c917 100644 --- a/buildscripts/idl/tests/compatibility_test_pass/new_generic_argument/generic_argument.idl +++ b/buildscripts/idl/tests/compatibility_test_pass/new_generic_argument/generic_argument.idl @@ -58,4 +58,4 @@ generic_reply_field_lists: Removing an unstable reply field should still pass" fields: unstableGenericReplyField: - forward_from_shards: false \ No newline at end of file + forward_from_shards: false diff --git a/buildscripts/idl/tests/compatibility_test_pass/old_generic_argument/generic_argument.idl b/buildscripts/idl/tests/compatibility_test_pass/old_generic_argument/generic_argument.idl index 6f4b37df1685e..bd3d5a670316a 100644 --- a/buildscripts/idl/tests/compatibility_test_pass/old_generic_argument/generic_argument.idl +++ b/buildscripts/idl/tests/compatibility_test_pass/old_generic_argument/generic_argument.idl @@ -59,4 +59,4 @@ generic_reply_field_lists: unstableGenericReplyField: forward_from_shards: false removedUnstableGenericReplyField: - forward_from_shards: false \ No newline at end of file + forward_from_shards: false diff --git a/buildscripts/idl/tests/test_binder.py b/buildscripts/idl/tests/test_binder.py index 9de1957515925..e52c922795013 100644 --- a/buildscripts/idl/tests/test_binder.py +++ b/buildscripts/idl/tests/test_binder.py @@ -131,13 +131,29 @@ def test_global_positive(self): spec = self.assert_bind( textwrap.dedent(""" global: - cpp_namespace: 'something' + cpp_namespace: 'mongo' cpp_includes: - 'bar' - 'foo'""")) - self.assertEqual(spec.globals.cpp_namespace, "something") + self.assertEqual(spec.globals.cpp_namespace, "mongo") self.assertListEqual(spec.globals.cpp_includes, ['bar', 'foo']) + spec = self.assert_bind( + textwrap.dedent(""" + global: + cpp_namespace: 'mongo::nested' + """)) + self.assertEqual(spec.globals.cpp_namespace, "mongo::nested") + + def test_global_negatives(self): + # type: () -> None + """Postive global tests.""" + self.assert_bind_fail( + textwrap.dedent(""" + global: + cpp_namespace: 'something' + """), idl.errors.ERROR_ID_BAD_CPP_NAMESPACE) + def test_type_positive(self): # type: () -> None """Positive type tests.""" @@ -1587,6 +1603,18 @@ def test_enum_positive(self): v3: 2 """)) + # Test int - non continuous + self.assert_bind( + textwrap.dedent(""" + enums: + foo: + description: foo + type: int + values: + v1: 0 + v3: 2 + """)) + # Test string self.assert_bind( textwrap.dedent(""" @@ -1615,18 +1643,6 @@ def test_enum_negative(self): v1: 0 """), idl.errors.ERROR_ID_ENUM_BAD_TYPE) - # Test int - non continuous - self.assert_bind_fail( - textwrap.dedent(""" - enums: - foo: - description: foo - type: int - values: - v1: 0 - v3: 2 - """), idl.errors.ERROR_ID_ENUM_NON_CONTINUOUS_RANGE) - # Test int - dups self.assert_bind_fail( textwrap.dedent(""" @@ -2335,7 +2351,7 @@ def test_feature_flag(self): # type: () -> None """Test feature flag checks around version.""" - # feature flag can default to false without a version + # feature flag can default to false without a version (shouldBeFCVGated can be true or false) self.assert_bind( textwrap.dedent(""" feature_flags: @@ -2343,9 +2359,20 @@ def test_feature_flag(self): description: "Make toast" cpp_varname: gToaster default: false + shouldBeFCVGated: false """)) - # feature flag can default to true with a version + self.assert_bind( + textwrap.dedent(""" + feature_flags: + featureFlagToaster: + description: "Make toast" + cpp_varname: gToaster + default: false + shouldBeFCVGated: true + """)) + + # if shouldBeFCVGated: true, feature flag can default to true with a version self.assert_bind( textwrap.dedent(""" feature_flags: @@ -2354,9 +2381,21 @@ def test_feature_flag(self): cpp_varname: gToaster default: true version: 123 + shouldBeFCVGated: true + """)) + + # if shouldBeFCVGated: false, we do not need a version + self.assert_bind( + textwrap.dedent(""" + feature_flags: + featureFlagToaster: + description: "Make toast" + cpp_varname: gToaster + default: true + shouldBeFCVGated: false """)) - # true is only allowed with a version + # if shouldBeFCVGated: true and default: true, a version is required self.assert_bind_fail( textwrap.dedent(""" feature_flags: @@ -2364,9 +2403,22 @@ def test_feature_flag(self): description: "Make toast" cpp_varname: gToaster default: true + shouldBeFCVGated: true """), idl.errors.ERROR_ID_FEATURE_FLAG_DEFAULT_TRUE_MISSING_VERSION) - # false is not allowed with a version + # false is not allowed with a version and shouldBeFCVGated: true + self.assert_bind_fail( + textwrap.dedent(""" + feature_flags: + featureFlagToaster: + description: "Make toast" + cpp_varname: gToaster + default: false + version: 123 + shouldBeFCVGated: true + """), idl.errors.ERROR_ID_FEATURE_FLAG_DEFAULT_FALSE_HAS_VERSION) + + # false is not allowed with a version and shouldBeFCVGated: false self.assert_bind_fail( textwrap.dedent(""" feature_flags: @@ -2375,8 +2427,21 @@ def test_feature_flag(self): cpp_varname: gToaster default: false version: 123 + shouldBeFCVGated: false """), idl.errors.ERROR_ID_FEATURE_FLAG_DEFAULT_FALSE_HAS_VERSION) + # if shouldBeFCVGated is false, a version is not allowed + self.assert_bind_fail( + textwrap.dedent(""" + feature_flags: + featureFlagToaster: + description: "Make toast" + cpp_varname: gToaster + default: true + version: 123 + shouldBeFCVGated: false + """), idl.errors.ERROR_ID_FEATURE_FLAG_SHOULD_BE_FCV_GATED_FALSE_HAS_VERSION) + def test_access_check(self): # type: () -> None """Test access check.""" @@ -2659,6 +2724,234 @@ def test_access_check_negative(self): reply_type: reply """), idl.errors.ERROR_ID_MISSING_ACCESS_CHECK) + def test_query_shape_component_validation(self): + self.assert_bind(self.common_types + textwrap.dedent(""" + structs: + struct1: + query_shape_component: true + strict: true + description: "" + fields: + field1: + query_shape: literal + type: string + field2: + type: bool + query_shape: parameter + """)) + + self.assert_bind_fail( + self.common_types + textwrap.dedent(""" + structs: + struct1: + query_shape_component: true + strict: true + description: "" + fields: + field1: + type: string + field2: + type: bool + query_shape: parameter + """), idl.errors.ERROR_ID_FIELD_MUST_DECLARE_SHAPE_LITERAL) + + self.assert_bind_fail( + self.common_types + textwrap.dedent(""" + structs: + struct1: + strict: true + description: "" + fields: + field1: + type: string + field2: + type: bool + query_shape: parameter + """), idl.errors.ERROR_ID_CANNOT_DECLARE_SHAPE_LITERAL) + + # Validating query_shape_anonymize relies on std::string + basic_types = textwrap.dedent(""" + types: + string: + bson_serialization_type: string + description: "A BSON UTF-8 string" + cpp_type: "std::string" + deserializer: "mongo::BSONElement::str" + bool: + bson_serialization_type: bool + description: "A BSON bool" + cpp_type: "bool" + deserializer: "mongo::BSONElement::boolean" + serialization_context: + bson_serialization_type: any + description: foo + cpp_type: foo + internal_only: true + """) + self.assert_bind(basic_types + textwrap.dedent(""" + structs: + struct1: + query_shape_component: true + strict: true + description: "" + fields: + field1: + query_shape: anonymize + type: string + field2: + query_shape: parameter + type: bool + """)) + + self.assert_bind(basic_types + textwrap.dedent(""" + structs: + struct1: + query_shape_component: true + strict: true + description: "" + fields: + field1: + query_shape: anonymize + type: array + field2: + query_shape: parameter + type: bool + """)) + + self.assert_bind_fail( + basic_types + textwrap.dedent(""" + structs: + struct1: + strict: true + description: "" + fields: + field1: + query_shape: blah + type: string + """), idl.errors.ERROR_ID_QUERY_SHAPE_INVALID_VALUE) + + self.assert_bind_fail( + basic_types + textwrap.dedent(""" + structs: + struct1: + query_shape_component: true + strict: true + description: "" + fields: + field1: + query_shape: anonymize + type: bool + field2: + query_shape: parameter + type: bool + """), idl.errors.ERROR_ID_INVALID_TYPE_FOR_SHAPIFY) + + self.assert_bind_fail( + basic_types + textwrap.dedent(""" + structs: + struct1: + query_shape_component: true + strict: true + description: "" + fields: + field1: + query_shape: anonymize + type: array + field2: + query_shape: parameter + type: bool + """), idl.errors.ERROR_ID_INVALID_TYPE_FOR_SHAPIFY) + + self.assert_bind_fail( + basic_types + textwrap.dedent(""" + structs: + StructZero: + strict: true + description: "" + fields: + field1: + query_shape: literal + type: string + """), idl.errors.ERROR_ID_CANNOT_DECLARE_SHAPE_LITERAL) + + self.assert_bind_fail( + basic_types + textwrap.dedent(""" + structs: + StructZero: + strict: true + description: "" + fields: + field1: + type: string + struct1: + query_shape_component: true + strict: true + description: "" + fields: + field2: + type: StructZero + description: "" + query_shape: literal + """), idl.errors.ERROR_ID_CANNOT_DECLARE_SHAPE_LITERAL) + + # pylint: disable=invalid-name + def test_struct_unsafe_dangerous_disable_extra_field_duplicate_checks_negative(self): + # type: () -> None + """Negative struct tests for unsafe_dangerous_disable_extra_field_duplicate_checks.""" + + # Setup some common types + test_preamble = self.common_types + \ + textwrap.dedent(""" + structs: + danger: + description: foo + strict: false + unsafe_dangerous_disable_extra_field_duplicate_checks: true + fields: + foo: string + """) + + # Test strict and unsafe_dangerous_disable_extra_field_duplicate_checks are not allowed + self.assert_bind_fail( + test_preamble + indent_text( + 1, + textwrap.dedent(""" + danger1: + description: foo + strict: true + unsafe_dangerous_disable_extra_field_duplicate_checks: true + fields: + foo: string + """)), idl.errors.ERROR_ID_STRICT_AND_DISABLE_CHECK_NOT_ALLOWED) + + # Test inheritance is prohibited through structs + self.assert_bind_fail( + test_preamble + indent_text( + 1, + textwrap.dedent(""" + danger2: + description: foo + strict: true + fields: + foo: string + d1: danger + """)), idl.errors.ERROR_ID_INHERITANCE_AND_DISABLE_CHECK_NOT_ALLOWED) + + # Test inheritance is prohibited through commands + self.assert_bind_fail( + test_preamble + textwrap.dedent(""" + commands: + dangerc: + description: foo + namespace: ignored + command_name: dangerc + strict: false + api_version: "" + fields: + foo: string + d1: danger + """), idl.errors.ERROR_ID_INHERITANCE_AND_DISABLE_CHECK_NOT_ALLOWED) + if __name__ == '__main__': diff --git a/buildscripts/idl/tests/test_compatibility.py b/buildscripts/idl/tests/test_compatibility.py index 97f08f5075df3..1805355a76e37 100644 --- a/buildscripts/idl/tests/test_compatibility.py +++ b/buildscripts/idl/tests/test_compatibility.py @@ -32,6 +32,7 @@ import unittest import sys from os import path + sys.path.append(path.dirname(path.dirname(path.abspath(__file__)))) import idl_check_compatibility # noqa: E402 pylint: disable=wrong-import-position diff --git a/buildscripts/idl/tests/test_import.py b/buildscripts/idl/tests/test_import.py index 46f7df2eb28ae..0bd3e97007cd5 100644 --- a/buildscripts/idl/tests/test_import.py +++ b/buildscripts/idl/tests/test_import.py @@ -110,7 +110,7 @@ def test_import_positive(self): "basetypes.idl": textwrap.dedent(""" global: - cpp_namespace: 'something' + cpp_namespace: 'mongo' types: string: @@ -174,7 +174,7 @@ def test_import_positive(self): "cycle1a.idl": textwrap.dedent(""" global: - cpp_namespace: 'something' + cpp_namespace: 'mongo' imports: - "cycle1b.idl" @@ -204,7 +204,7 @@ def test_import_positive(self): "cycle1b.idl": textwrap.dedent(""" global: - cpp_namespace: 'something' + cpp_namespace: 'mongo' imports: - "cycle1a.idl" @@ -227,7 +227,7 @@ def test_import_positive(self): "cycle2.idl": textwrap.dedent(""" global: - cpp_namespace: 'something' + cpp_namespace: 'mongo' imports: - "cycle2.idl" @@ -254,7 +254,7 @@ def test_import_positive(self): self.assert_bind( textwrap.dedent(""" global: - cpp_namespace: 'something' + cpp_namespace: 'mongo' imports: - "basetypes.idl" @@ -271,7 +271,7 @@ def test_import_positive(self): self.assert_bind( textwrap.dedent(""" global: - cpp_namespace: 'something' + cpp_namespace: 'mongo' imports: - "recurse2.idl" @@ -290,7 +290,7 @@ def test_import_positive(self): self.assert_bind( textwrap.dedent(""" global: - cpp_namespace: 'something' + cpp_namespace: 'mongo' imports: - "recurse2.idl" @@ -311,7 +311,7 @@ def test_import_positive(self): self.assert_bind( textwrap.dedent(""" global: - cpp_namespace: 'something' + cpp_namespace: 'mongo' imports: - "cycle1a.idl" @@ -329,7 +329,7 @@ def test_import_positive(self): self.assert_bind( textwrap.dedent(""" global: - cpp_namespace: 'something' + cpp_namespace: 'mongo' imports: - "cycle2.idl" @@ -350,7 +350,7 @@ def test_import_negative(self): "basetypes.idl": textwrap.dedent(""" global: - cpp_namespace: 'something' + cpp_namespace: 'mongo' types: string: @@ -380,7 +380,7 @@ def test_import_negative(self): "bug.idl": textwrap.dedent(""" global: - cpp_namespace: 'something' + cpp_namespace: 'mongo' types: bool: diff --git a/buildscripts/idl/tests/test_parser.py b/buildscripts/idl/tests/test_parser.py index 7caca54b07f56..91aa7d4ce2a81 100644 --- a/buildscripts/idl/tests/test_parser.py +++ b/buildscripts/idl/tests/test_parser.py @@ -1591,6 +1591,17 @@ def test_feature_flag(self): featureFlagToaster: description: "Make toast" cpp_varname: gToaster + shouldBeFCVGated: true + """), idl.errors.ERROR_ID_MISSING_REQUIRED_FIELD) + + # Missing shouldBeFCVGated + self.assert_parse_fail( + textwrap.dedent(""" + feature_flags: + featureFlagToaster: + description: "Make toast" + cpp_varname: gToaster + default: false """), idl.errors.ERROR_ID_MISSING_REQUIRED_FIELD) def _test_field_list(self, field_list_name, should_forward_name): @@ -1982,6 +1993,24 @@ def test_access_checks_negative(self): reply_type: foo_reply_struct """), idl.errors.ERROR_ID_EMPTY_ACCESS_CHECK) + # pylint: disable=invalid-name + def test_struct_unsafe_dangerous_disable_extra_field_duplicate_checks_negative(self): + + # Test commands and unsafe_dangerous_disable_extra_field_duplicate_checks are disallowed + self.assert_parse_fail( + textwrap.dedent(""" + commands: + dangerc: + description: foo + namespace: ignored + command_name: dangerc + api_version: "" + strict: false + unsafe_dangerous_disable_extra_field_duplicate_checks: true + fields: + foo: string + """), idl.errors.ERROR_ID_UNKNOWN_NODE) + if __name__ == '__main__': diff --git a/buildscripts/iwyu/README.md b/buildscripts/iwyu/README.md new file mode 100644 index 0000000000000..2e925d7500a27 --- /dev/null +++ b/buildscripts/iwyu/README.md @@ -0,0 +1,64 @@ +# IWYU Analysis tool + +This tool will run +[include-what-you-use](https://github.com/include-what-you-use/include-what-you-use) +(IWYU) analysis across the codebase via `compile_commands.json`. + +The `iwyu_config.yml` file consists of the current options and automatic +pragma marking. You can exclude files from the analysis here. + +The tool has two main modes of operation, `fix` and `check` modes. `fix` +mode will attempt to make changes to the source files based off IWYU's +suggestions. The check mode will simply check if there are any suggestion +at all. + +`fix` mode will take a long time to run, as the tool needs to rerun any +source in which a underlying header was changed to ensure things are not +broken, and so therefore ends up recompile the codebase several times over. + +For more information please refer the the script `--help` option. + +# Example usage: + +First you must generate the `compile_commands.json` file via this command: + +``` +python3 buildscripts/scons.py --build-profile=compiledb compiledb +``` + +Next you can run the analysis: + +``` +python3 buildscripts/iwyu/run_iwyu_analysis.py +``` +The default mode is fix mode, and it will start making changes to the code +if any changes are found. + +# Debugging failures + +Occasionally IWYU tool will run into problems where it is unable to suggest +valid changes and the changes will cause things to break (not compile). When +it his a failure it will copy the source and all the header's that were used +at the time of the compilation into a directory where the same command can be +run to reproduce the error. + +You can examine the suggested changes in the source and headers and compare +them to the working source tree. Then you can make corrective changes to allow + IWYU to get past the failure. + +IWYU is not perfect and it make several mistakes that a human can understand +and fix appropriately. + +# Running the tests + +This tool includes its own end to end testing. The test directory includes +sub directories which contain source and iwyu configs to run the tool against. +The tests will then compare the results to built in expected results and fail +if the the tests are not producing the expected results. + +To run the tests use the command: + +``` +cd buildscripts/iwyu/test +python3 run_tests.py +``` diff --git a/buildscripts/iwyu/iwyu_config.yml b/buildscripts/iwyu/iwyu_config.yml new file mode 100644 index 0000000000000..56a997d626fa5 --- /dev/null +++ b/buildscripts/iwyu/iwyu_config.yml @@ -0,0 +1,83 @@ +# options passed to IWYU +iwyu_options: + - '--mapping_file=etc/iwyu_mapping.imp' + - '--no_fwd_decls' + - '--prefix_header_includes=add' + - '--transitive_includes_only' + +# options passed to the fix script +fix_options: + - '--blank_lines' + - '--nocomments' + - '--noreorder' + - '--separate_project_includes=mongo' + - '--safe_headers' + - '--only_re=^src/mongo\/.*' + # TODO SERVER-77051 we will eventually turn this on when our codebase is cleaned up with out. + # - '--nosafe_headers' + +# filename regex to swap no_include in place +# quotes and brackets not included in this config +# since this is targeting IWYU added headers +no_includes: + # avoid boost crazyness + - 'boost/.+/detail/.+' + - 'asio/impl/.+' + - 'boost/.+\.ipp' + # avoid stdlib detail headers + - 'ext/alloc_traits\.h' + - 'ext/type_traits\.h' + - 'cxxabi\.h' # https://github.com/include-what-you-use/include-what-you-use/issues/909 + - 'bits/.+' + - 'syscall\.h' + # arch specific + - 'boost/predef/hardware/simd/x86.+' + - 'emmintrin\.h' + # we use a third party format which confuses IWYU + - 'format\.h' + # this is a link time symbol overloading thing not meant to be included + - 'libunwind-x86_64\.h' + # abuse of preprocessor + - 'mongo/db/namespace_string_reserved\.def\.h' + +# path prefixes (non regex) to skip +skip_files: + - 'src/third_party' + - 'build/' + - 'src/mongo/tools/mongo_tidy_checks' + - 'src/mongo/util/net' # causes linkage issues + - 'src/mongo/util/text.cpp' + # IWYU confused on forward declares + - 'src/mongo/db/exec/near.cpp' + - 'src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp' + # Asio is going to need some special treatment, the headers are very finicky + - 'src/mongo/transport/asio' + # causes IWYU to crash: + - 'src/mongo/db/update/update_internal_node.cpp' + - 'src/mongo/db/update/update_array_node.cpp' + - 'src/mongo/db/update/update_object_node.cpp' + - 'src/mongo/db/update/update_array_node_test.cpp' + - 'src/mongo/db/update/update_object_node_test.cpp' + - 'src/mongo/util/options_parser/environment.cpp' + - 'src/mongo/util/options_parser/option_section.cpp' + +# regex file paths to add keep pragma +# include quotes are angle brackets +keep_includes: + - '".*\.cstruct"' # these are not true includes, but used for very large initializers + - '' + - '' + - '' + - '' + - '' + - '' + - '' + - '' # IWYU messes up template instantiation + - '"mongo/rpc/object_check\.h"' + - '"mongo/base/init\.h"' + - '"mongo/scripting/mozjs/wrapconstrainedmethod\.h"' + - '"mongo/dbtests/dbtests\.h"' # this is due to using statements in the header + - '"mongo/config\.h"' + - '"mongo/util/overloaded_visitor\.h"' + - '"mongo/db/query/optimizer/node\.h"' + - '"mongo/util/text\.h"' # includes platform specific functions diff --git a/buildscripts/iwyu/run_iwyu_analysis.py b/buildscripts/iwyu/run_iwyu_analysis.py new file mode 100644 index 0000000000000..7d83144e33124 --- /dev/null +++ b/buildscripts/iwyu/run_iwyu_analysis.py @@ -0,0 +1,1004 @@ +#!/usr/bin/env python3 +""" +TOOL FUNCTIONAL DESCRIPTION. + +Currently the tool works by running IWYU on a subset of compile_commands.json +(the ones we care about like checked in mongo source) and testing each change +in a copy of the original source/header tree so that other compiles are not +affected until it passes a normal compile itself. Due to header dependencies +we must recompile the source files to catch issue IWYU may have introduced +with some dependent header change. Header dependencies do not form a DAG so +we can not process sources in a deterministic fashion. The tool will loop +through all the compilations until all dependents in a compilation are +determined unchanged from the last time the compilation was performed. + +The general workflow used here is to run the tool till there no changes +(several hours on rhel-xxlarge) and fix the errors either in the tool config +or as a manual human change in the code. + +TOOL TECHNICAL DESCRIPTION: + +Regarding the code layout, the main function setups a thread pool executor +and processes each source from the compile_commands. From there it runs a +thread function and within that 5 parts (each there own function) for +each source file: + +1. Skip if deps are unchanged +2. Get the headers deps via -MMD +3. Run IWYU +4. Apply Fixes +5. test compile, record new header deps if passed + +The tool uses mtime and MD5 hashing to know if any header dep has changed. + +""" + +import argparse +import json +import subprocess +import tempfile +import shlex +import os +import re +import concurrent.futures +import hashlib +import atexit +import traceback +import threading +import shutil +import signal +import sys +import yaml +import enum +from dataclasses import dataclass, asdict +from typing import Dict, List, Any, Optional, Callable, Union, Tuple + +from tqdm import tqdm +from colorama import init as colorama_init +from colorama import Fore + +colorama_init() + +parser = argparse.ArgumentParser(description='Run include what you use and test output') + +parser.add_argument('--compile-commands', metavar='FILE', type=str, default='compile_commands.json', + help='Path to the compile commands file to use.') +parser.add_argument( + '--check', action='store_true', help= + 'Enables check mode, which does not apply fixes and only runs to see if any files produce IWYU changes. Exit 0 if no new changes detected.' +) +parser.add_argument( + '--config-file', metavar='FILE', type=str, default="", help= + 'Enables check mode, which does not apply fixes and only runs to see if any files produce IWYU changes. Exit 0 if no new changes detected.' +) +parser.add_argument( + '--iwyu-data', metavar='FILE', type=str, default='iwyu.dat', + help='Location of data used by IWYU, contains hash and status info about all files.') +parser.add_argument( + '--keep-going', action='store_true', help= + 'Do not stop on errors, instead resubmit the job to try again later (after things may have been fixed elsewhere)' +) +parser.add_argument( + '--cycle-debugging', action='store_true', help= + 'Once a cycle has been detected, each directory tree for each step in the cycle will be saved to a .cycle directory.' +) +parser.add_argument('--verbose', action='store_true', + help='Prints more info about what is taking place.') +parser.add_argument('--mongo-toolchain-bin-dir', type=str, + help='Which toolchain bin directory to use for this analysis.', + default='/opt/mongodbtoolchain/v4/bin') +parser.add_argument( + '--start-ratio', type=float, help= + 'decimal value between 0 and 1 which indicates what starting ratio index of the total compile commands to run over, can not be greater than the --end-ratio.', + default=0.0) +parser.add_argument( + '--end-ratio', type=float, help= + 'decimal value between 0 and 1 which indicates what ending ratio index of the total compile commands to run over, can not be less than the --start-ratio.', + default=1.0) +command_line_args = parser.parse_args() + +# the current state of all files, contain the cmd_entry, hashes, successes +IWYU_ANALYSIS_STATE: Dict[str, Any] = {} + +# the current state cycles being tracked +IWYU_CYCLE_STATE: Dict[str, Any] = {} + +hash_lookup_locks: Dict[str, threading.Lock] = {} +mtime_hash_lookup: Dict[str, Dict[str, Any]] = {} + +if command_line_args.config_file: + config_file = command_line_args.config_file +else: + config_file = os.path.join(os.path.dirname(__file__), "iwyu_config.yml") + +with open(config_file, "r") as stream: + config = yaml.safe_load(stream) + for key, value in config.items(): + if value is None: + config[key] = [] + +IWYU_OPTIONS = config.get('iwyu_options', []) +IWYU_FIX_OPTIONS = config.get('fix_options', []) +NO_INCLUDES = config.get('no_includes', []) +KEEP_INCLUDES = config.get('keep_includes', []) +SKIP_FILES = tuple(config.get('skip_files', [])) +CYCLE_FILES: List[str] = [] + + +@dataclass +class CompileCommand: + """An entry from compile_commands.json.""" + + file: str + command: str + directory: str + output: str + + +class ResultType(enum.Enum): + """ + Descriptions of enums. + + ERROR: unexpected or unrecognized error cases + FAILED: the IWYU task for a given compile command entry failed + NO_CHANGE: the input header tree and source file have not changed since last time + NOT_RUNNING: sources which we intentionally skip running IWYU all together + RESUBMIT: the IWYU task failed, but it may work later after other header changes + SUCCESS: the IWYU task for a source file has succeeded + """ + + ERROR = enum.auto() + FAILED = enum.auto() + NO_CHANGE = enum.auto() + NOT_RUNNING = enum.auto() + RESUBMIT = enum.auto() + SUCCESS = enum.auto() + + +TOOLCHAIN_DIR = command_line_args.mongo_toolchain_bin_dir +SHUTDOWN_FLAG = False +CLANG_INCLUDES = None +IWYU_OPTIONS = [val for pair in zip(['-Xiwyu'] * len(IWYU_OPTIONS), IWYU_OPTIONS) for val in pair] +if NO_INCLUDES: + NO_INCLUDE_REGEX = re.compile(r'^\s*#include\s+[\",<](' + '|'.join(NO_INCLUDES) + ')[\",>]') +if KEEP_INCLUDES: + KEEP_INCLUDE_REGEX = re.compile(r'^\s*#include\s+(' + '|'.join(KEEP_INCLUDES) + ')') +CHANGED_FILES_REGEX = re.compile(r"^The\sfull\sinclude-list\sfor\s(.+):$", re.MULTILINE) + + +def printer(message: str) -> None: + """ + Prints output as appropriate. + + We don't print output if we are shutting down because the logs will + explode and original error will be hard to locate. + """ + + if not SHUTDOWN_FLAG or command_line_args.verbose: + tqdm.write(str(message)) + + +def debug_printer(message: str) -> None: + """Print each step in the processing of IWYU.""" + + if command_line_args.verbose: + tqdm.write(str(message)) + + +def failed_return() -> ResultType: + """A common method to allow the processing to continue even after some file fails.""" + + if command_line_args.keep_going: + return ResultType.RESUBMIT + else: + return ResultType.FAILED + + +def in_project_root(file: str) -> bool: + """ + Return true if the file is in the project root. + + This is assuming the project root is the same location + as the compile_commands.json file (the format of compile_commands.json + expects this as well). + """ + + return os.path.abspath(file).startswith( + os.path.abspath(os.path.dirname(command_line_args.compile_commands))) + + +def copy_error_state(cmd_entry: CompileCommand, test_dir: str, + dir_ext: str = '.iwyu_test_dir') -> Optional[str]: + """ + When we fail, we want to copy the current state of the temp dir. + + This is so that the command that was used can be replicated and rerun, + primarily for debugging purposes. + """ + + # we never use a test_dir in check mode, since no files are copied in that mode. + if command_line_args.check: + return None + + # make a directory in the output location that we can store the state of the the + # header dep and source file the compile command was run with, delete old results + base, _ = os.path.splitext(cmd_entry.output) + if os.path.exists(base + dir_ext): + shutil.rmtree(base + dir_ext) + os.makedirs(base + dir_ext, exist_ok=True) + basedir = os.path.basename(test_dir) + error_state_dir = os.path.join(base + dir_ext, basedir) + shutil.copytree(test_dir, error_state_dir) + return error_state_dir + + +def calc_hash_of_file(file: str) -> Optional[str]: + """ + Calculate the hash of a file. Use mtime as well. + + If the mtime is unchanged, don't do IO, just look up the last hash. + """ + + # we need to lock on specific file io because GIL does not cover system io, so two threads + # could be doing io on the same file at the same time. + if file not in hash_lookup_locks: + hash_lookup_locks[file] = threading.Lock() + with hash_lookup_locks[file]: + if file in mtime_hash_lookup and os.path.getmtime(file) == mtime_hash_lookup[file]['mtime']: + return mtime_hash_lookup[file]['hash'] + else: + try: + hash_val = hashlib.md5(open(file, 'rb').read()).hexdigest() + except FileNotFoundError: + return None + + mtime_hash_lookup[file] = {'mtime': os.path.getmtime(file), 'hash': hash_val} + return hash_val + + +def find_no_include(line: str, lines: List[str], output_lines: List[str]) -> bool: + """ + We need to regex the line to see if it includes an include that matches our NO_INCLUDE_REGEX. + + If so then we do not include that line + when we rewrite the file, and instead we add a IWYU no_include pragma inplace + """ + + no_include_header_found = False + if "// IWYU pragma: keep" in line: + return no_include_header_found + no_include_header = re.findall(NO_INCLUDE_REGEX, line) + + if no_include_header: + no_include_header_found = True + no_include_line = f'// IWYU pragma: no_include "{no_include_header[0]}"\n' + if no_include_line not in lines: + output_lines.append(no_include_line) + return no_include_header_found + + +def add_pragmas(source_files: List[str]): + """ + We automate some of the pragmas so there is not so much manual work. + + There are general cases for some of the pragmas. In this case we open the target + source/header, search via regexes for specific includes we care about, then add + the pragma comments as necessary. + """ + + for source_file in source_files: + + # before we run IWYU, we take a guess at the likely header by swapping .cpp for .h + # so it may not be a real header. After IWYU runs we know exactly where to add the pragmas + # in case we got it wrong the first time around + if not os.path.exists(source_file): + continue + + # we load in the file content operate on it, and then write it back out + output_lines: List[str] = [] + with open(source_file, 'r') as fin: + file_lines = fin.readlines() + for line in file_lines: + + if NO_INCLUDES and find_no_include(line, file_lines, output_lines): + continue + + if KEEP_INCLUDES and re.search(KEEP_INCLUDE_REGEX, + line) and '// IWYU pragma: keep' not in line: + + output_lines.append(line.strip() + " // IWYU pragma: keep\n") + continue + + output_lines.append(line) + + with open(source_file, 'w') as fout: + for line in output_lines: + fout.write(line) + + +def recalc_hashes(deps: List[str], change_dir: Optional[str] = None) -> Dict[str, Any]: + """ + We calculate the hashes from the header dep list generated by the compiler. + + We also create cumulative hash for convenance. + + Some cases we are operating a test directory, but deps are referenced as if they are + in the project root. The change_dir option here allows us to calc the the hashes from + the test directory we may be working in, but still record the deps files in a compat + fashion with other processes that work out of project root, e.g. testing if there was a + change from last time. + """ + + hashes: Dict[str, Any] = {'deps': {}} + full_hash = hashlib.new('md5') + for dep in sorted(list(deps)): + if not in_project_root(dep): + continue + if change_dir: + orig_dep = dep + dep = os.path.join(change_dir, dep) + dep_hash = calc_hash_of_file(dep) + if dep_hash is None: + continue + if change_dir: + dep = orig_dep + full_hash.update(dep_hash.encode('utf-8')) + hashes['deps'][dep] = dep_hash + hashes['full_hash'] = full_hash.hexdigest() + return hashes + + +def setup_test_dir(cmd_entry: CompileCommand, test_dir: str) -> List[str]: + """ + Here we are copying the source and required header tree from the main source tree. + + Returns the associate source and header that were copied into the test dir. + + We want an isolated location to perform analysis and apply changes so everything is not + clashing. At this point we don't know for sure what header IWYU is going to associate with the source + but for mongo codebase, 99.9% of the time its just swap the .cpp for .h. We need this to apply + some pragma to keep IWYU from removing headers it doesn't understand (cross platform or + third party like boost or asio). The pragmas are harmless in and of themselves so adding the + mistakenly in the 0.1% of the time is negligible. + """ + + original_sources = [ + orig_source for orig_source in [cmd_entry.file, + os.path.splitext(cmd_entry.file)[0] + '.h'] + if os.path.exists(orig_source) + ] + test_source_files = [os.path.join(test_dir, source_file) for source_file in original_sources] + dep_headers = [dep for dep in IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps'].keys()] + + # copy each required header from our source tree into our test dir + # this does cost some time, but the alternative (everything operating in the real source tree) + # was much longer due to constant failures. + for source_file in dep_headers + ['etc/iwyu_mapping.imp']: + if in_project_root(source_file): + os.makedirs(os.path.join(test_dir, os.path.dirname(source_file)), exist_ok=True) + shutil.copyfile(source_file, os.path.join(test_dir, source_file)) + + # need to create dirs for outputs + for output in shlex.split(cmd_entry.output): + os.makedirs(os.path.join(test_dir, os.path.dirname(output)), exist_ok=True) + + return test_source_files + + +def get_clang_includes() -> List[str]: + """ + IWYU needs some extra help to know what default includes clang is going to bring in when it normally compiles. + + The query reliably gets the include dirs that would be used in normal compiles. We cache and reuse the result + so the subprocess only runs once. + """ + global CLANG_INCLUDES # pylint: disable=global-statement + if CLANG_INCLUDES is None: + clang_includes = subprocess.getoutput( + f"{TOOLCHAIN_DIR}/clang++ -Wp,-v -x c++ - -fsyntax-only < /dev/null 2>&1 | sed -e '/^#include <...>/,/^End of search/{{ //!b }};d'" + ).split('\n') + clang_includes = ['-I' + include.strip() for include in clang_includes] + CLANG_INCLUDES = clang_includes + return CLANG_INCLUDES + + +def write_cycle_diff(source_file: str, cycle_dir: str, latest_hashes: Dict[str, Any]) -> None: + """ + Write out the diffs between the last iteration and the latest iteration. + + The file contains the hash for before and after for each file involved in the compilation. + """ + + with open(os.path.join(cycle_dir, 'hashes_diff.txt'), 'w') as out: + dep_list = set( + list(IWYU_ANALYSIS_STATE[source_file]['hashes']['deps'].keys()) + + list(latest_hashes['deps'].keys())) + not_found_str = "not found" + (" " * 23) + for dep in sorted(dep_list): + out.write( + f"Original: {IWYU_ANALYSIS_STATE[source_file]['hashes']['deps'].get(dep, not_found_str)}, Latest: {latest_hashes['deps'].get(dep, not_found_str)} - {dep}\n" + ) + + +def check_for_cycles(cmd_entry: CompileCommand, latest_hashes: Dict[str, Any], + test_dir: str) -> Optional[ResultType]: + """ + IWYU can induce cycles so we should check our previous results to see if a cycle has occurred. + + These cycles can happen if a header change induces some other header change which then inturn induces + the original header change. These cycles are generally harmless and are easily broken with a keep + pragma but finding what files are induces the cycle is the challenge. + + With cycle debug mode enabled, the entire header tree is saved for each iteration in the cycle so + all files can be fully examined. + """ + + if cmd_entry.file not in IWYU_CYCLE_STATE: + IWYU_CYCLE_STATE[cmd_entry.file] = { + 'cycles': [], + } + + if latest_hashes['full_hash'] in IWYU_CYCLE_STATE[cmd_entry.file]['cycles']: + if command_line_args.cycle_debugging: + if 'debug_cycles' not in IWYU_CYCLE_STATE[cmd_entry.file]: + IWYU_CYCLE_STATE[cmd_entry.file]['debug_cycles'] = {} + + IWYU_CYCLE_STATE[cmd_entry.file]['debug_cycles'][ + latest_hashes['full_hash']] = latest_hashes + + cycle_dir = copy_error_state( + cmd_entry, test_dir, dir_ext= + f".{latest_hashes['full_hash']}.cycle{len(IWYU_CYCLE_STATE[cmd_entry.file]['debug_cycles'])}" + ) + write_cycle_diff(cmd_entry.file, cycle_dir, latest_hashes) + if latest_hashes['full_hash'] not in IWYU_CYCLE_STATE[cmd_entry.file]['debug_cycles']: + printer(f"{Fore.YELLOW}[5] - Cycle Found!: {cmd_entry.file}{Fore.RESET}") + else: + printer(f"{Fore.RED}[5] - Cycle Done! : {cmd_entry.file}{Fore.RESET}") + return failed_return() + else: + printer(f"{Fore.RED}[5] - Cycle Found!: {cmd_entry.file}{Fore.RESET}") + CYCLE_FILES.append(cmd_entry.file) + return ResultType.SUCCESS + else: + IWYU_CYCLE_STATE[cmd_entry.file]['cycles'].append(latest_hashes['full_hash']) + + return None + + +def write_iwyu_data() -> None: + """Store the data we have acquired during this run so we can resume at the same spot on subsequent runs.""" + + # There might be faster ways to store this like serialization or + # what not, but having human readable json is good for debugging. + # on a full build this takes around 10 seconds to write out. + if IWYU_ANALYSIS_STATE: + try: + # atomic move operation prevents ctrl+c mashing from + # destroying everything, at least we can keep the original + # data safe from emotional outbursts. + with tempfile.NamedTemporaryFile() as temp: + with open(temp.name, 'w') as iwyu_data_file: + json.dump(IWYU_ANALYSIS_STATE, iwyu_data_file, sort_keys=True, indent=4) + shutil.move(temp.name, command_line_args.iwyu_data) + except FileNotFoundError as exc: + if temp.name in str(exc): + pass + + +def need_to_process(cmd_entry: CompileCommand, + custom_printer: Callable[[str], None] = printer) -> Optional[ResultType]: + """ + The first step in the first step for processing a given source file. + + We have a list of skip prefixes, for example build or third_party, but others can be added. + + If it is a file we are not skipping, then we check if we have already done the work by calculating the + hashes and seeing if what we recorded last time has changed. + """ + + if cmd_entry.file.startswith( + SKIP_FILES) or cmd_entry.file in CYCLE_FILES or '/conftest_' in cmd_entry.file: + custom_printer(f"{Fore.YELLOW}[5] - Not running!: {cmd_entry.file}{Fore.RESET}") + return ResultType.NOT_RUNNING + + if IWYU_ANALYSIS_STATE.get(cmd_entry.file): + hashes = recalc_hashes(IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps'].keys()) + + # we only skip if the matching mode was successful last time, otherwise we assume we need to rerun + mode_success = 'CHECK' if command_line_args.check else 'FIX' + if command_line_args.verbose: + diff_files = list( + set(hashes['deps'].keys()).symmetric_difference( + set(IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps'].keys()))) + if diff_files: + msg = f"[1] Need to process {cmd_entry.file} because different files:\n" + for file in diff_files: + msg += f'{file}\n' + debug_printer(msg) + for file in IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps'].keys(): + if file in hashes['deps'] and hashes['deps'][file] != IWYU_ANALYSIS_STATE[ + cmd_entry.file]['hashes']['deps'][file]: + debug_printer( + f"[1] Need to process {cmd_entry.file} because hash changed:\n{file}: {hashes['deps'][file]}\n{file}: {IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps'][file]}" + ) + + if hashes['full_hash'] == IWYU_ANALYSIS_STATE[ + cmd_entry.file]['hashes']['full_hash'] and mode_success in IWYU_ANALYSIS_STATE[ + cmd_entry.file].get('success', []): + custom_printer(f"{Fore.YELLOW}[5] - No Change! : {cmd_entry.file}{Fore.RESET}") + return ResultType.NO_CHANGE + + return None + + +def calc_dep_headers(cmd_entry: CompileCommand) -> Optional[ResultType]: + """ + The second step in the IWYU process. + + We need to get a list of headers which are dependencies so we can copy them to an isolated + working directory (so parallel IWYU changes don't break us). We will switch on preprocessor + for faster generation of the dep file. + + Once we have the deps list, we parse it and calc the hashes of the deps. + """ + + try: + with tempfile.NamedTemporaryFile() as depfile: + + # first time we could be executing a real command so we make sure the dir + # so the compiler is not mad + outputs = shlex.split(cmd_entry.output) + for output in outputs: + out_dir = os.path.dirname(output) + if out_dir: + os.makedirs(out_dir, exist_ok=True) + + # setup up command for fast depfile generation + cmd = cmd_entry.command + cmd += f' -MD -MF {depfile.name}' + cmd = cmd.replace(' -c ', ' -E ') + debug_printer(f"[1] - Getting Deps: {cmd_entry.file}") + + try: + deps_proc = subprocess.run(cmd, shell=True, capture_output=True, text=True, + timeout=300) + except subprocess.TimeoutExpired: + deps_proc = None + pass + + # if successful, record the latest deps with there hashes, otherwise try again later + if deps_proc is None or deps_proc.returncode != 0: + printer(f"{Fore.RED}[5] - Deps Failed!: {cmd_entry.file}{Fore.RESET}") + printer(deps_proc.stderr) + return ResultType.RESUBMIT + else: + with open(depfile.name) as deps: + deps_str = deps.read() + deps_str = deps_str.replace('\\\n', '').strip() + + hashes = recalc_hashes(shlex.split(deps_str)[1:]) + if not IWYU_ANALYSIS_STATE.get(cmd_entry.file): + IWYU_ANALYSIS_STATE[cmd_entry.file] = asdict(cmd_entry) + IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes'] = hashes + IWYU_ANALYSIS_STATE[cmd_entry.file]['success'] = [] + + # if the dep command failed the context will through an execption, we will ignore just + # that case + except FileNotFoundError as exc: + traceback.print_exc() + if depfile.name in str(exc): + pass + + return None + + +def execute_iwyu(cmd_entry: CompileCommand, test_dir: str) -> Union[ResultType, bytes]: + """ + The third step of IWYU analysis. Check mode will stop here. + + Here we want to execute IWYU on our source. Note at this point in fix mode + we will be working out of an isolated test directory which has the + required header tree copied over. Check mode will just pass in the original + project root as the test_dir (the real source tree). + """ + + # assert we are working with a pure clang++ build + if not cmd_entry.command.startswith(f'{TOOLCHAIN_DIR}/clang++'): + printer("unexpected compiler:") + printer(cmd_entry.command) + return ResultType.FAILED + + # swap out for our tool and add in extra options for IWYU + cmd = f'{TOOLCHAIN_DIR}/include-what-you-use' + cmd_entry.command[len(f'{TOOLCHAIN_DIR}/clang++' + ):] + cmd += ' ' + ' '.join(get_clang_includes()) + cmd += ' ' + ' '.join(IWYU_OPTIONS) + + # mimic the PATH we normally use in our build + env = os.environ.copy() + env['PATH'] += f':{TOOLCHAIN_DIR}' + + debug_printer(f'[2] - Running IWYU: {cmd_entry.file}') + proc = subprocess.run(cmd, shell=True, env=env, capture_output=True, cwd=test_dir) + + # IWYU has some bugs about forward declares I am assuming, because in some cases even though + # we have passed --no_fwd_decls it still sometimes recommend forward declares and sometimes they + # are wrong and cause compilation errors. + remove_fwd_declares = [] + for line in proc.stderr.decode('utf-8').split('\n'): + line = line.strip() + if not line.endswith(':') and not line.startswith( + ('#include ', '-')) and ('class ' in line or 'struct ' in line): + continue + remove_fwd_declares.append(line) + iwyu_output = '\n'.join(remove_fwd_declares) + + # IWYU has weird exit codes, where a >=2 is considered success: + # https://github.com/include-what-you-use/include-what-you-use/blob/clang_12/iwyu_globals.h#L27-L34 + if command_line_args.check and proc.returncode != 2: + printer(f"{Fore.RED}[2] - IWYU Failed: {cmd_entry.file}{Fore.RESET}") + if proc.returncode < 2: + printer(f"exited with error: {proc.returncode}") + else: + printer(f"changes required: {proc.returncode - 2}") + printer(iwyu_output) + return failed_return() + elif proc.returncode < 2: + printer(f'{Fore.RED}[2] - IWYU Failed : {cmd_entry.file}{Fore.RESET}') + printer(cmd) + printer(str(proc.returncode)) + printer(proc.stderr.decode('utf-8')) + copy_error_state(cmd_entry, test_dir) + return failed_return() + + # save the output for debug or inspection later + with open(os.path.splitext(cmd_entry.output)[0] + '.iwyu', 'w') as iwyu_out: + iwyu_out.write(iwyu_output) + + return iwyu_output.encode('utf-8') + + +def apply_fixes(cmd_entry: CompileCommand, iwyu_output: bytes, + test_dir: str) -> Optional[ResultType]: + """ + Step 4 in the IWYU process. + + We need to run the fix_includes script to apply the output from the IWYU binary. + """ + cmd = [f'{sys.executable}', f'{TOOLCHAIN_DIR}/fix_includes.py'] + IWYU_FIX_OPTIONS + + debug_printer(f'[3] - Apply fixes : {cmd_entry.file}') + try: + subprocess.run(cmd, capture_output=True, input=iwyu_output, timeout=180, cwd=test_dir) + except subprocess.TimeoutExpired: + printer(f"{Fore.RED}[5] - Apply failed: {cmd_entry.file}{Fore.RESET}") + return ResultType.RESUBMIT + + return None + + +def test_compile(cmd_entry: CompileCommand, test_dir: str) -> Optional[ResultType]: + """ + Step 5 in the IWYU analysis and the last step for fix mode. + + We run the normal compile command in a test directory and make sure it is successful before + it will be copied back into the real source tree for inclusion into other jobs. + """ + + try: + with tempfile.NamedTemporaryFile() as depfile: + debug_printer(f"[4] - Test compile: {cmd_entry.file}") + + # we want to capture the header deps again because IWYU may have changed them + cmd = cmd_entry.command + cmd += f' -MMD -MF {depfile.name}' + try: + p3 = subprocess.run(cmd, shell=True, capture_output=True, text=True, timeout=300, + cwd=test_dir) + except (subprocess.TimeoutExpired, MemoryError): + p3 = None + pass + + # our test compile has failed so we need to report and setup for debug + if p3 is not None and p3.returncode != 0: + printer(f"{Fore.RED}[5] - IWYU Failed!: {cmd_entry.file}{Fore.RESET}") + printer(f"{cmd}") + printer(f"{p3.stderr}") + copy_error_state(cmd_entry, test_dir) + return failed_return() + + else: + with open(depfile.name) as deps: + # calculate the hashes of the deps used to create + # this successful compile. + deps_str = deps.read() + deps_str = deps_str.replace('\\\n', '').strip() + hashes = recalc_hashes(shlex.split(deps_str)[1:], change_dir=test_dir) + + if result := check_for_cycles(cmd_entry, hashes, test_dir): + return result + + IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes'] = hashes + if 'FIX' not in IWYU_ANALYSIS_STATE[cmd_entry.file]['success']: + IWYU_ANALYSIS_STATE[cmd_entry.file]['success'].append('FIX') + printer(f"{Fore.GREEN}[5] - IWYU Success: {cmd_entry.file}{Fore.RESET}") + return ResultType.SUCCESS + + # if we failed, the depfile may not have been generated, so check for it + # ignore it + except FileNotFoundError as exc: + if depfile.name in str(exc): + pass + + return None + + +def intialize_deps(cmd_entry: CompileCommand) -> Tuple[ResultType, CompileCommand]: + """ + When running in fix mode, we take some time to initialize the header deps. + + This is mainly used to improve the overall time to complete full analysis. We want process + the source files in order of files with least dependencies to most dependencies. The rational + is that if it has a lot of dependencies we should do last so any changes in those dependencies + are automatically accounted for and the change of need to do rework is lessened. Also the + progress bar can be more accurate and not count skip files. + """ + + # step 1 + if result := need_to_process(cmd_entry, custom_printer=debug_printer): + return result, cmd_entry + + # if we have deps from a previous that should be a good enough indicator + # of how dependency heavy it is, and its worth just taking that over + # needing to invoke the compiler. + try: + if len(IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps']): + return ResultType.SUCCESS, cmd_entry + + except KeyError: + pass + + if result := calc_dep_headers(cmd_entry): + return result, cmd_entry + + return ResultType.SUCCESS, cmd_entry + + +def check_iwyu(cmd_entry: CompileCommand) -> ResultType: + """ + One of the two thread functions the main thread pool executor will call. + + Here we execute up to step 3 (steps at the top comment) and report success + if IWYU reports no required changes. + """ + + # step 1 + if result := need_to_process(cmd_entry): + return result + + # step 2 + if result := calc_dep_headers(cmd_entry): + return result + + # step 3 + iwyu_out = execute_iwyu(cmd_entry, '.') + if isinstance(iwyu_out, ResultType): + return iwyu_out + + # success! + printer(f"{Fore.GREEN}[2] - IWYU Success: {cmd_entry.file}{Fore.RESET}") + if "CHECK" not in IWYU_ANALYSIS_STATE[cmd_entry.file]['success']: + IWYU_ANALYSIS_STATE[cmd_entry.file]['success'].append('CHECK') + return ResultType.SUCCESS + + +def fix_iwyu(cmd_entry: CompileCommand) -> ResultType: + """ + One of the two thread functions the main thread pool executor will call. + + Here we execute up to step 5 (steps at the top comment) and report success + if we are able to successfully compile the original command after IWYU + has made its changes. + """ + + # step 1 + if result := need_to_process(cmd_entry): + return result + + # step 2 + if result := calc_dep_headers(cmd_entry): + return result + + with tempfile.TemporaryDirectory() as test_dir: + + # the changes will be done in an isolated test dir so not to conflict with + # other concurrent processes. + test_source_files = setup_test_dir(cmd_entry, test_dir) + + # a first round of pragmas to make sure IWYU doesn't fail or remove things we dont want + add_pragmas(test_source_files) + + # step 3 + iwyu_out = execute_iwyu(cmd_entry, test_dir) + if isinstance(iwyu_out, ResultType): + return iwyu_out + + # now we can extract exactly what files IWYU operated on and copy only those back + changed_files = [ + os.path.join(test_dir, file) + for file in re.findall(CHANGED_FILES_REGEX, iwyu_out.decode('utf-8')) + if in_project_root(file) + ] + test_source_files += [file for file in changed_files if file not in test_source_files] + + # step 4 + if result := apply_fixes(cmd_entry, iwyu_out, test_dir): + return result + + # a final round of pragmas for the next time this is run through IWYU + add_pragmas(test_source_files) + + # step 5 + result = test_compile(cmd_entry, test_dir) + if result == ResultType.SUCCESS: + for file in test_source_files: + if os.path.exists(file): + shutil.move(file, file[len(test_dir) + 1:]) + + return result + + +def run_iwyu(cmd_entry: CompileCommand) -> Tuple[ResultType, CompileCommand]: + """Intermediate function which delegates the underlying mode to run.""" + + if command_line_args.check: + return check_iwyu(cmd_entry), cmd_entry + else: + return fix_iwyu(cmd_entry), cmd_entry + + +def main() -> None: + """Main function.""" + global IWYU_ANALYSIS_STATE, SHUTDOWN_FLAG # pylint: disable=global-statement + atexit.register(write_iwyu_data) + + with concurrent.futures.ThreadPoolExecutor( + max_workers=len(os.sched_getaffinity(0)) + 4) as executor: + + # ctrl+c tru to shutdown as fast as possible. + def sigint_handler(the_signal, frame): + executor.shutdown(wait=False, cancel_futures=True) + sys.exit(1) + + signal.signal(signal.SIGINT, sigint_handler) + + # load in any data from prior runs + if os.path.exists(command_line_args.iwyu_data): + with open(command_line_args.iwyu_data) as iwyu_data_file: + IWYU_ANALYSIS_STATE = json.load(iwyu_data_file) + + # load in the compile commands + with open(command_line_args.compile_commands) as compdb_file: + compiledb = [CompileCommand(**json_data) for json_data in json.load(compdb_file)] + + # assert the generated source code has been generated + for cmd_entry in compiledb: + if cmd_entry.file.endswith('_gen.cpp') and not os.path.exists(cmd_entry.file): + printer(f"{Fore.RED}[5] - Missing Gen!: {cmd_entry.file}{Fore.RESET}") + printer( + f"Error: missing generated file {cmd_entry.file}, make sure generated-sources are generated." + ) + sys.exit(1) + + total_cmds = len(compiledb) + start_index = int(total_cmds * command_line_args.start_ratio) + if start_index < 0: + start_index = 0 + if start_index > total_cmds: + start_index = total_cmds + + end_index = int(total_cmds * command_line_args.end_ratio) + if end_index < 0: + end_index = 0 + if end_index > total_cmds: + end_index = total_cmds + + if start_index == end_index: + print(f"Error: start_index and end_index are the same: {start_index}") + sys.exit(1) + if start_index > end_index: + print( + f"Error: start_index {start_index} can not be greater than end_index {end_index}" + ) + sys.exit(1) + + print(f"Analyzing compile commands from {start_index} to {end_index}.") + compiledb = compiledb[start_index:end_index] + if not command_line_args.check: + # We can optimize the order we process things by processing source files + # with the least number of dependencies first. This is a cost up front + # but will result in huge gains in the amount of re-processing to be done. + printer("Getting Initial Header Dependencies...") + cmd_entry_list = [] + try: + with tqdm(total=len(compiledb), disable=None) as pbar: + + # create and run the dependency check jobs + future_cmd = { + executor.submit(intialize_deps, cmd_entry): cmd_entry + for cmd_entry in compiledb + } + for future in concurrent.futures.as_completed(future_cmd): + result, cmd_entry = future.result() + if result != ResultType.NOT_RUNNING: + cmd_entry_list.append(cmd_entry) + pbar.update(1) + except Exception: + SHUTDOWN_FLAG = True + traceback.print_exc() + executor.shutdown(wait=True, cancel_futures=True) + sys.exit(1) + else: + cmd_entry_list = compiledb + + try: + + # this loop will keep looping until a full run produce no new changes. + changes_left = True + while changes_left: + changes_left = False + + with tqdm(total=len(cmd_entry_list), disable=None) as pbar: + + # create and run the IWYU jobs + def dep_sorted(cmd_entry): + try: + return len(IWYU_ANALYSIS_STATE[cmd_entry.file]['hashes']['deps']) + except KeyError: + return 0 + + future_cmd = { + executor.submit(run_iwyu, cmd_entry): cmd_entry + for cmd_entry in sorted(cmd_entry_list, key=dep_sorted) + } + + # process the results + for future in concurrent.futures.as_completed(future_cmd): + result, cmd_entry = future.result() + + # any result which implies there could be changes required sets the + # next loop + if result not in (ResultType.NO_CHANGE, ResultType.NOT_RUNNING): + changes_left = True + + # if a file is considered done for this loop, update the status bar + if result in [ + ResultType.SUCCESS, ResultType.NO_CHANGE, ResultType.NOT_RUNNING + ]: + pbar.update(1) + # resubmit jobs which may have a better change to run later + elif result == ResultType.RESUBMIT: + executor.submit(run_iwyu, cmd_entry) + # handle a failure case, excpetion quickly drops us out of this loop. + else: + SHUTDOWN_FLAG = True + tqdm.write( + f"{result.name}: Shutting down other threads, please be patient." + ) + raise Exception( + f'Shutdown due to {result.name} {cmd_entry["file"]}') + + except Exception: + SHUTDOWN_FLAG = True + traceback.print_exc() + executor.shutdown(wait=True, cancel_futures=True) + sys.exit(1) + finally: + if CYCLE_FILES: + printer(f"{Fore.YELLOW} Cycles detected:") + for file in CYCLE_FILES: + printer(f' {file}') + + +main() diff --git a/buildscripts/iwyu/test/basic/a.h b/buildscripts/iwyu/test/basic/a.h new file mode 100644 index 0000000000000..ad792ace34b47 --- /dev/null +++ b/buildscripts/iwyu/test/basic/a.h @@ -0,0 +1 @@ +#include "b.h" diff --git a/buildscripts/iwyu/test/basic/b.cpp b/buildscripts/iwyu/test/basic/b.cpp new file mode 100644 index 0000000000000..dcbc86277644a --- /dev/null +++ b/buildscripts/iwyu/test/basic/b.cpp @@ -0,0 +1,5 @@ +#include "a.h" + +type_b return_b_function() { + return type_b(); +} diff --git a/buildscripts/iwyu/test/basic/b.h b/buildscripts/iwyu/test/basic/b.h new file mode 100644 index 0000000000000..422d7626e9077 --- /dev/null +++ b/buildscripts/iwyu/test/basic/b.h @@ -0,0 +1 @@ +class type_b {}; diff --git a/buildscripts/iwyu/test/basic/expected_results.py b/buildscripts/iwyu/test/basic/expected_results.py new file mode 100644 index 0000000000000..98ed60ea4fb80 --- /dev/null +++ b/buildscripts/iwyu/test/basic/expected_results.py @@ -0,0 +1,17 @@ +import os +import sys + +EXPECTED_B_CPP = """ +#include "b.h" + +type_b return_b_function() { + return type_b(); +} +""" + +with open('b.cpp') as f: + content = f.read() + if content != EXPECTED_B_CPP: + print(f'Actual:\n"""{content}"""') + print(f'Expected:\n"""{EXPECTED_B_CPP}"""') + sys.exit(1) diff --git a/buildscripts/iwyu/test/basic/test_config.yml b/buildscripts/iwyu/test/basic/test_config.yml new file mode 100644 index 0000000000000..a5b906f5558b9 --- /dev/null +++ b/buildscripts/iwyu/test/basic/test_config.yml @@ -0,0 +1,25 @@ +# options passed to IWYU +iwyu_options: + - '--max_line_length=100' + - '--no_fwd_decls' + - '--prefix_header_includes=add' + - '--transitive_includes_only' + +# options passed to the fix script +fix_options: + - '--blank_lines' + - '--nocomments' + - '--noreorder' + - '--safe_headers' + +# filename regex to swap no_include in place +# quotes and brackets not included quotes are always assumed +# since this is targeting IWYU added headers +no_includes: + +# prefixes (non regex) to skip +skip_files: + +# regex file paths to add keep pragma +# include quotes are angle brackets +keep_includes: diff --git a/buildscripts/iwyu/test/no_include/a.h b/buildscripts/iwyu/test/no_include/a.h new file mode 100644 index 0000000000000..ad792ace34b47 --- /dev/null +++ b/buildscripts/iwyu/test/no_include/a.h @@ -0,0 +1 @@ +#include "b.h" diff --git a/buildscripts/iwyu/test/no_include/b.cpp b/buildscripts/iwyu/test/no_include/b.cpp new file mode 100644 index 0000000000000..dcbc86277644a --- /dev/null +++ b/buildscripts/iwyu/test/no_include/b.cpp @@ -0,0 +1,5 @@ +#include "a.h" + +type_b return_b_function() { + return type_b(); +} diff --git a/buildscripts/iwyu/test/no_include/b.h b/buildscripts/iwyu/test/no_include/b.h new file mode 100644 index 0000000000000..422d7626e9077 --- /dev/null +++ b/buildscripts/iwyu/test/no_include/b.h @@ -0,0 +1 @@ +class type_b {}; diff --git a/buildscripts/iwyu/test/no_include/expected_results.py b/buildscripts/iwyu/test/no_include/expected_results.py new file mode 100644 index 0000000000000..90bda7e15a48e --- /dev/null +++ b/buildscripts/iwyu/test/no_include/expected_results.py @@ -0,0 +1,18 @@ +import os +import sys + +EXPECTED_B_CPP = """// IWYU pragma: no_include "b.h" + +#include "a.h" // IWYU pragma: keep + +type_b return_b_function() { + return type_b(); +} +""" + +with open('b.cpp') as f: + content = f.read() + if content != EXPECTED_B_CPP: + print(f'Actual:\n"""{content}"""') + print(f'Expected:\n"""{EXPECTED_B_CPP}"""') + sys.exit(1) diff --git a/buildscripts/iwyu/test/no_include/test_config.yml b/buildscripts/iwyu/test/no_include/test_config.yml new file mode 100644 index 0000000000000..e441f5bac352b --- /dev/null +++ b/buildscripts/iwyu/test/no_include/test_config.yml @@ -0,0 +1,27 @@ +# options passed to IWYU +iwyu_options: + - '--max_line_length=100' + - '--no_fwd_decls' + - '--prefix_header_includes=add' + - '--transitive_includes_only' + +# options passed to the fix script +fix_options: + - '--blank_lines' + - '--nocomments' + - '--noreorder' + - '--safe_headers' + +# filename regex to swap no_include in place +# quotes and brackets not included quotes are always assumed +# since this is targeting IWYU added headers +no_includes: + - 'b.h' + +# prefixes (non regex) to skip +skip_files: + +# regex file paths to add keep pragma +# include quotes are angle brackets +keep_includes: +- '"a.h"' diff --git a/buildscripts/iwyu/test/run_tests.py b/buildscripts/iwyu/test/run_tests.py new file mode 100644 index 0000000000000..d0e32f00a8dce --- /dev/null +++ b/buildscripts/iwyu/test/run_tests.py @@ -0,0 +1,97 @@ +import pathlib +import yaml +import json +import shutil +import os +import glob +import subprocess +import sys +import argparse +import concurrent.futures + +parser = argparse.ArgumentParser(description='Run tests for the IWYU analysis script.') + +parser.add_argument('--mongo-toolchain-bin-dir', type=str, + help='Which toolchain bin directory to use for this analysis.', + default='/opt/mongodbtoolchain/v4/bin') + +args = parser.parse_args() + +if os.getcwd() != pathlib.Path(__file__).parent: + print( + f"iwyu test script must run in the tests directory, changing dirs to {pathlib.Path(__file__).parent.resolve()}" + ) + os.chdir(pathlib.Path(__file__).parent.resolve()) + +analysis_script = pathlib.Path(__file__).parent.parent / 'run_iwyu_analysis.py' + + +def run_test(entry): + print(f"Running test {pathlib.Path(entry)}...") + test_dir = pathlib.Path(entry) / 'test_run' + if os.path.exists(test_dir): + shutil.rmtree(test_dir) + + shutil.copytree(pathlib.Path(entry), test_dir) + + source_files = glob.glob('**/*.cpp', root_dir=test_dir, recursive=True) + compile_commands = [] + + for source_file in source_files: + output = os.path.splitext(source_file)[0] + '.o' + compile_commands.append({ + 'file': source_file, + 'command': f"{args.mongo_toolchain_bin_dir}/clang++ -o {output} -c {source_file}", + "directory": os.path.abspath(test_dir), + "output": output, + }) + + with open(test_dir / 'compile_commands.json', 'w') as compdb: + json.dump(compile_commands, compdb) + + os.makedirs(test_dir / 'etc', exist_ok=True) + with open(test_dir / 'etc' / 'iwyu_mapping.imp', 'w') as mapping: + mapping.write( + '[{include: ["\\"placeholder.h\\"", "private", "\\"placeholder2.h\\"", "public"]}]') + + iwyu_run = subprocess.run( + [sys.executable, analysis_script, '--verbose', '--config-file=test_config.yml'], text=True, + stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=test_dir) + + results_run = subprocess.run( + [sys.executable, pathlib.Path(entry) / 'expected_results.py'], stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, text=True, cwd=test_dir) + + msg = '\n'.join([iwyu_run.stdout, results_run.stdout, f"FAILED!: {pathlib.Path(entry)}"]) + msg = '\n'.join([f"[{pathlib.Path(entry).name}] {line}" for line in msg.split('\n')]) + + if results_run.returncode != 0: + return results_run.returncode, msg, pathlib.Path(entry).name + else: + return results_run.returncode, f"[{pathlib.Path(entry).name}] PASSED!: {pathlib.Path(entry)}", pathlib.Path( + entry).name + + +failed_tests = [] +with concurrent.futures.ThreadPoolExecutor( + max_workers=len(os.sched_getaffinity(0)) + 4) as executor: + + # create and run the IWYU jobs + future_cmd = { + executor.submit(run_test, entry): entry + for entry in pathlib.Path(__file__).parent.glob('*') if os.path.isdir(entry) + } + + # process the results + for future in concurrent.futures.as_completed(future_cmd): + result, message, test_name = future.result() + if result != 0: + failed_tests += [test_name] + print(message) + +print("\n***Tests complete.***") +if failed_tests: + print("The following tests failed:") + for test in failed_tests: + print(' - ' + test) + print("Please review the logs above for more information.") diff --git a/buildscripts/large_file_check.py b/buildscripts/large_file_check.py new file mode 100755 index 0000000000000..7c5388f1f34f8 --- /dev/null +++ b/buildscripts/large_file_check.py @@ -0,0 +1,146 @@ +#!/usr/bin/env python3 +"""Check files in git diff to ensure they are within a given size limit.""" + +# pylint: disable=wrong-import-position + +import argparse +import fnmatch +import logging +import os +import pathlib +import sys +import textwrap + +from typing import Any, Callable, Dict, List, Optional, Tuple + +import structlog + +from git import Repo + +mongo_dir = os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__)))) +# Get relative imports to work when the package is not installed on the PYTHONPATH. +if __name__ == "__main__" and __package__ is None: + sys.path.append(mongo_dir) + +from buildscripts.linter import git +from buildscripts.patch_builds.change_data import (RevisionMap, find_changed_files_in_repos, + generate_revision_map) + + +# Console renderer for structured logging +def renderer(_logger: logging.Logger, _name: str, eventdict: Dict[Any, Any]) -> str: + if 'files' in eventdict: + return "{event}: {files}".format(**eventdict) + if 'repo' in eventdict: + return "{event}: {repo}".format(**eventdict) + if 'file' in eventdict: + if 'bytes' in eventdict: + return "{event}: {file} {bytes} bytes".format(**eventdict) + return "{event}: {file}".format(**eventdict) + return "{event}".format(**eventdict) + + +# Configure the logger so it doesn't spam output on huge diffs +structlog.configure( + logger_factory=structlog.stdlib.LoggerFactory(), + wrapper_class=structlog.stdlib.BoundLogger, + cache_logger_on_first_use=True, + processors=[ + structlog.stdlib.filter_by_level, + renderer, + ], +) + +LOGGER = structlog.get_logger(__name__) +MONGO_REVISION_ENV_VAR = "REVISION" +ENTERPRISE_REVISION_ENV_VAR = "ENTERPRISE_REV" + + +def _get_repos_and_revisions() -> Tuple[List[Repo], RevisionMap]: + """Get the repo object and a map of revisions to compare against.""" + modules = git.get_module_paths() + repos = [Repo(path) for path in modules] + revision_map = generate_revision_map( + repos, { + "mongo": os.environ.get(MONGO_REVISION_ENV_VAR), + "enterprise": os.environ.get(ENTERPRISE_REVISION_ENV_VAR) + }) + return repos, revision_map + + +def git_changed_files(excludes: List[pathlib.Path]) -> List[pathlib.Path]: + """ + Get the files that have changes since the last git commit. + + :param excludes: A list of files which should be excluded from changed file checks. + :return: List of changed files. + """ + repos, revision_map = _get_repos_and_revisions() + LOGGER.debug("revisions", revision=revision_map) + + def _filter_fn(file_path: pathlib.Path) -> bool: + if not file_path.exists(): + return False + for exclude in excludes: + if fnmatch.fnmatch(file_path, exclude): + return False + return True + + files = [ + filename + for filename in list(map(pathlib.Path, find_changed_files_in_repos(repos, revision_map))) + if _filter_fn(filename) + ] + + LOGGER.debug("Found files to check", files=list(map(str, files))) + return files + + +def diff_file_sizes(size_limit: int, excludes: Optional[List[str]] = None) -> List[pathlib.Path]: + if excludes is None: + excludes = [] + + large_files: list[pathlib.Path] = [] + + for file_path in git_changed_files(excludes): + LOGGER.debug("Checking file size", file=str(file_path)) + file_size = file_path.stat().st_size + if file_size > size_limit: + LOGGER.error("File too large", file=str(file_path), bytes=file_size) + large_files.append(file_path) + + return large_files + + +def main(*args: str) -> int: + """Execute Main entry point.""" + + parser = argparse.ArgumentParser( + description='Git commit large file checker.', epilog=textwrap.dedent('''\ + NOTE: The --exclude argument is an exact match but can accept glob patterns. If * is used, + it matches *all* characters, including path separators. + ''')) + parser.add_argument("--verbose", action="store_true", help="Enable verbose logging") + parser.add_argument("--exclude", help="Paths to exclude from check", nargs="+", + type=pathlib.Path, required=False) + parser.add_argument("--size-mb", help="File size limit (MiB)", type=int, default="10") + parsed_args = parser.parse_args(args[1:]) + + if parsed_args.verbose: + logging.basicConfig(level=logging.DEBUG) + structlog.stdlib.filter_by_level(LOGGER, 'debug', {}) + else: + logging.basicConfig(level=logging.INFO) + structlog.stdlib.filter_by_level(LOGGER, 'info', {}) + + large_files = diff_file_sizes(parsed_args.size_mb * 1024 * 1024, parsed_args.exclude) + if len(large_files) == 0: + LOGGER.info("All files passed size check") + return 0 + + LOGGER.error("Some files failed size check", files=list(map(str, large_files))) + return 1 + + +if __name__ == '__main__': + sys.exit(main(*sys.argv)) diff --git a/buildscripts/libdeps/SCHEMA_CHANGE_LOG.txt b/buildscripts/libdeps/SCHEMA_CHANGE_LOG.txt index 4e170dfd6e1d4..5e61a0b26fafb 100644 --- a/buildscripts/libdeps/SCHEMA_CHANGE_LOG.txt +++ b/buildscripts/libdeps/SCHEMA_CHANGE_LOG.txt @@ -1,3 +1,3 @@ 3 removed shim node property 2 flipped edge direction in graph file data -1 initial schema \ No newline at end of file +1 initial schema diff --git a/buildscripts/libdeps/find_symbols.c b/buildscripts/libdeps/find_symbols.c index e6eabea1598aa..f28d165a1d8ab 100644 --- a/buildscripts/libdeps/find_symbols.c +++ b/buildscripts/libdeps/find_symbols.c @@ -366,4 +366,4 @@ int main(int argc, char** argv) { clean_up(); exit(0); -} \ No newline at end of file +} diff --git a/buildscripts/libdeps/graph_visualizer.py b/buildscripts/libdeps/graph_visualizer.py index 357e42c9d2048..afd4cbd575adc 100644 --- a/buildscripts/libdeps/graph_visualizer.py +++ b/buildscripts/libdeps/graph_visualizer.py @@ -43,6 +43,8 @@ import textwrap import flask +from werkzeug.serving import is_running_from_reloader + from graph_visualizer_web_stack.flask.flask_backend import BackendServer @@ -98,10 +100,10 @@ def check_node(node_check, cwd): """Check node version and install npm packages.""" status, output = subprocess.getstatusoutput(node_check) - if status != 0 or not output.split('\n')[-1].startswith('v12'): + if status != 0 or not output.split('\n')[-1].startswith('v14'): print( textwrap.dedent(f"""\ - Failed to get node version 12 from 'node -v': + Failed to get node version 14 from 'node -v': output: '{output}' Perhaps run 'source {cwd}/setup_node_env.sh install'""")) exit(1) @@ -179,24 +181,26 @@ def main(): npm_start = ['npm', 'start'] npm_build = ['npm', 'run', 'build'] - check_node(node_check, cwd) + if not is_running_from_reloader(): + check_node(node_check, cwd) - frontend_thread = None - if args.launch in ['frontend', 'both']: - if args.debug: - npm_command = npm_start - else: - npm_command = npm_build + frontend_thread = None + if args.launch in ['frontend', 'both']: + if args.debug: + npm_command = npm_start + else: + npm_command = npm_build - frontend_thread = threading.Thread(target=start_frontend_thread, - args=(web_service_info, npm_command, args.debug)) - frontend_thread.start() + frontend_thread = threading.Thread(target=start_frontend_thread, + args=(web_service_info, npm_command, args.debug)) + frontend_thread.start() if args.launch in ['backend', 'both']: start_backend(web_service_info, args.debug) - if frontend_thread: - frontend_thread.join() + if not is_running_from_reloader(): + if frontend_thread: + frontend_thread.join() if __name__ == "__main__": diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/package.json b/buildscripts/libdeps/graph_visualizer_web_stack/package.json index ace34ed2d8d22..c375f0f2ceeff 100644 --- a/buildscripts/libdeps/graph_visualizer_web_stack/package.json +++ b/buildscripts/libdeps/graph_visualizer_web_stack/package.json @@ -3,7 +3,7 @@ "version": "4.0.0", "private": true, "engines": { - "node": ">=12.0.0" + "node": ">=14.0.0" }, "engineStrict": true, "scripts": { @@ -13,37 +13,36 @@ "test": "react-scripts test", "eject": "react-scripts eject" }, - "//": "TODO: adding bezier and force-graph and locking versions until https://github.com/vasturiano/force-graph/issues/182 is resolved", "dependencies": { - "@emotion/react": "^11.1.4", - "@emotion/styled": "^11.0.0", - "@material-ui/core": "5.0.0-alpha.22", - "@material-ui/icons": "5.0.0-alpha.22", - "@material-ui/lab": "5.0.0-alpha.22", - "bezier-js": "4.0.3", - "canvas": "^2.5.0", - "date-fns": "^2.16.1", - "dayjs": "^1.9.7", - "force-graph": "1.40.0", + "@emotion/react": "^11.11.0", + "@emotion/styled": "^11.11.0", + "@material-ui/core": "^5.0.0-alpha.22", + "@material-ui/icons": "^5.0.0-alpha.22", + "@material-ui/lab": "^5.0.0-alpha.22", + "bezier-js": "6.1.3", + "canvas": "^2.11.2", + "date-fns": "^2.30.0", + "dayjs": "^1.11.7", + "force-graph": "^1.43.1", "http-proxy-middleware": "^2.0.6", - "http-server": "^0.12.3", - "luxon": "^1.25.0", - "moment": "^2.29.1", - "p-limit": "^3.0.2", - "react": "^16.8", - "react-dom": "^16.0.0", - "react-force-graph-2d": "1.18.1", - "react-force-graph-3d": "1.18.8", - "react-indiana-drag-scroll": "^1.8.0", - "react-redux": "^7.2.2", - "react-resize-aware": "^3.1.0", - "react-resize-detector": "^6.6.5", - "react-scripts": "^4.0.3", + "http-server": "^14.1.1", + "luxon": "^3.3.0", + "moment": "^2.29.4", + "p-limit": "^4.0.0", + "react": "^18.2", + "react-dom": "^18.2.0", + "react-force-graph-2d": "1.25.0", + "react-force-graph-3d": "1.23.0", + "react-indiana-drag-scroll": "^2.2.0", + "react-redux": "^8.0.5", + "react-resize-aware": "3.1.1", + "react-resize-detector": "^9.1.0", + "react-scripts": "^5.0.1", "react-split-pane": "^0.1.92", - "react-virtualized": "^9.22.2", - "react-window": "^1.8.6", - "redux": "^4.0.5", - "typescript": "^3.9.7" + "react-virtualized": "^9.22.5", + "react-window": "^1.8.9", + "redux": "^4.2.1", + "typescript": "^5.0.4" }, "browserslist": { "production": [ diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/setup_node_env.sh b/buildscripts/libdeps/graph_visualizer_web_stack/setup_node_env.sh index 22c2d2295cfba..e680c85499b65 100755 --- a/buildscripts/libdeps/graph_visualizer_web_stack/setup_node_env.sh +++ b/buildscripts/libdeps/graph_visualizer_web_stack/setup_node_env.sh @@ -19,7 +19,7 @@ else \. "$NVM_DIR/nvm.sh" fi -nvm install 12 +nvm install 14 if [ "$1" = "install" ] then @@ -46,4 +46,4 @@ then . "$NVM_DIR/nvm.sh" fi -popd > /dev/null \ No newline at end of file +popd > /dev/null diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/DataGrid.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/DataGrid.js index 00220d4078a00..2fbc51e720d0b 100644 --- a/buildscripts/libdeps/graph_visualizer_web_stack/src/DataGrid.js +++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/DataGrid.js @@ -15,6 +15,8 @@ import { setNodeInfos } from "./redux/nodeInfo"; import { setLinks } from "./redux/links"; import { setLinksTrans } from "./redux/linksTrans"; +const {REACT_APP_API_URL} = process.env; + function componentToHex(c) { var hex = c.toString(16); return hex.length == 1 ? "0" + hex : hex; @@ -113,7 +115,7 @@ const DataGrid = ({ "selected_nodes": nodes.filter(node => node.selected == true).map(node => node.node), "transitive_edges": showTransitive }; - fetch('/api/graphs/' + gitHash + '/d3', { + fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/d3', { method: 'POST', headers: { 'Content-Type': 'application/json' @@ -126,7 +128,7 @@ const DataGrid = ({ setLinks(data.graphData.links); setLinksTrans(data.graphData.links_trans); }); - fetch('/api/graphs/' + gitHash + '/nodes/details', { + fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/nodes/details', { method: 'POST', headers: { 'Content-Type': 'application/json' diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/DrawGraph.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/DrawGraph.js index b0724498b71cd..2e374807997e6 100644 --- a/buildscripts/libdeps/graph_visualizer_web_stack/src/DrawGraph.js +++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/DrawGraph.js @@ -20,6 +20,8 @@ import { setLinksTrans } from "./redux/linksTrans"; import { setShowTransitive } from "./redux/showTransitive"; import LoadingBar from "./LoadingBar"; +const {REACT_APP_API_URL} = process.env; + const handleFindNode = (node_value, graphData, activeComponent, forceRef) => { var targetNode = null; if (graphData) { @@ -131,7 +133,7 @@ const DrawGraph = ({ "selected_nodes": nodes.filter(node => node.selected == true).map(node => node.node), "transitive_edges": showTransitive }; - fetch('/api/graphs/' + gitHash + '/d3', { + fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/d3', { method: 'POST', headers: { 'Content-Type': 'application/json' @@ -144,7 +146,7 @@ const DrawGraph = ({ setLinks(data.graphData.links); setLinksTrans(data.graphData.links_trans); }); - fetch('/api/graphs/' + gitHash + '/nodes/details', { + fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/nodes/details', { method: 'POST', headers: { 'Content-Type': 'application/json' diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/EdgeList.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/EdgeList.js index 28226bc6206c0..69830ce83b178 100644 --- a/buildscripts/libdeps/graph_visualizer_web_stack/src/EdgeList.js +++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/EdgeList.js @@ -258,4 +258,4 @@ const EdgeList = ({ selectedGraph, links, setLinks, linksTrans, loading, setFind ); }; -export default connect(getEdges, { setGraphData, setFindNode, setLinks, setSelectedPath })(withStyles(styles)(EdgeList)); \ No newline at end of file +export default connect(getEdges, { setGraphData, setFindNode, setLinks, setSelectedPath })(withStyles(styles)(EdgeList)); diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphInfo.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphInfo.js index 5863d2bea9df3..3552d26cd2c76 100644 --- a/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphInfo.js +++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphInfo.js @@ -11,6 +11,8 @@ import { connect } from "react-redux"; import { getCounts } from "./redux/store"; import { setCounts } from "./redux/counts"; +const {REACT_APP_API_URL} = process.env; + const columns = [ { id: "ID", field: "type", headerName: "Count Type", width: 50 }, { field: "value", headerName: "Value", width: 50 }, @@ -26,7 +28,7 @@ const GraphInfo = ({ selectedGraph, counts, datawidth, setCounts }) => { React.useEffect(() => { let gitHash = selectedGraph; if (gitHash) { - fetch('/api/graphs/' + gitHash + '/analysis') + fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/analysis') .then(response => response.json()) .then(data => { setCounts(data.results); diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphPaths.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphPaths.js index ead5fd4245d2d..ed738f62f169f 100644 --- a/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphPaths.js +++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/GraphPaths.js @@ -22,6 +22,8 @@ import { setLinksTrans } from "./redux/linksTrans"; import OverflowTooltip from "./OverflowTooltip"; +const {REACT_APP_API_URL} = process.env; + const rowHeight = 25; const Accordion = withStyles({ @@ -131,7 +133,7 @@ const GraphPaths = ({ "fromNode": fromNode, "toNode": toNode }; - fetch('/api/graphs/' + gitHash + '/paths', { + fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/paths', { method: 'POST', headers: { 'Content-Type': 'application/json' @@ -146,7 +148,7 @@ const GraphPaths = ({ "extra_nodes": data.extraNodes, "transitive_edges": showTransitive }; - fetch('/api/graphs/' + gitHash + '/d3', { + fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/d3', { method: 'POST', headers: { 'Content-Type': 'application/json' diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/NodeList.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/NodeList.js index bdcc5755f43c6..17c73a3cc28f4 100644 --- a/buildscripts/libdeps/graph_visualizer_web_stack/src/NodeList.js +++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/NodeList.js @@ -17,6 +17,8 @@ import { setLoading } from "./redux/loading"; import { setListSearchTerm } from "./redux/listSearchTerm"; import { Button, Autocomplete, Grid } from "@material-ui/core"; +const {REACT_APP_API_URL} = process.env; + const columns = [ { dataKey: "check", label: "Selected", width: 70 }, { dataKey: "name", label: "Name", width: 200 }, @@ -29,7 +31,7 @@ const NodeList = ({ selectedGraph, nodes, searchedNodes, loading, setFindNode, s React.useEffect(() => { let gitHash = selectedGraph; if (gitHash) { - fetch('/api/graphs/' + gitHash + '/nodes') + fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/nodes') .then(response => response.json()) .then(data => { setNodes(data.nodes.map((node, index) => { @@ -55,7 +57,7 @@ const NodeList = ({ selectedGraph, nodes, searchedNodes, loading, setFindNode, s "selected_nodes": nodes.filter(node => node.selected == true).map(node => node.node), "transitive_edges": showTransitive }; - fetch('/api/graphs/' + gitHash + '/d3', { + fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/d3', { method: 'POST', headers: { 'Content-Type': 'application/json' @@ -68,7 +70,7 @@ const NodeList = ({ selectedGraph, nodes, searchedNodes, loading, setFindNode, s setLinks(data.graphData.links); setLinksTrans(data.graphData.links_trans); }); - fetch('/api/graphs/' + gitHash + '/nodes/details', { + fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/nodes/details', { method: 'POST', headers: { 'Content-Type': 'application/json' @@ -174,4 +176,4 @@ const NodeList = ({ selectedGraph, nodes, searchedNodes, loading, setFindNode, s ); }; -export default connect(getNodes, { setFindNode, setNodes, setNodeInfos, setLinks, setLinksTrans, setLoading, setListSearchTerm, updateCheckbox, updateSelected, setGraphData })(NodeList); \ No newline at end of file +export default connect(getNodes, { setFindNode, setNodes, setNodeInfos, setLinks, setLinksTrans, setLoading, setListSearchTerm, updateCheckbox, updateSelected, setGraphData })(NodeList); diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/OverflowTooltip.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/OverflowTooltip.js index c09ec44f319ac..cb76ba4073934 100644 --- a/buildscripts/libdeps/graph_visualizer_web_stack/src/OverflowTooltip.js +++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/OverflowTooltip.js @@ -14,6 +14,8 @@ import { getGraphData } from "./redux/store"; import { setLinks } from "./redux/links"; import { setLinksTrans } from "./redux/linksTrans"; +const {REACT_APP_API_URL} = process.env; + const OverflowTip = (props) => { const textElementRef = useRef(null); const [hoverStatus, setHover] = useState(false); @@ -33,7 +35,7 @@ const OverflowTip = (props) => { "selected_nodes": props.nodes.filter(node => node.selected == true).map(node => node.node), "transitive_edges": props.showTransitive }; - fetch('/api/graphs/' + gitHash + '/d3', { + fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/d3', { method: 'POST', headers: { 'Content-Type': 'application/json' @@ -46,7 +48,7 @@ const OverflowTip = (props) => { props.setLinks(data.graphData.links); props.setLinksTrans(data.graphData.links_trans); }); - fetch('/api/graphs/' + gitHash + '/nodes/details', { + fetch(REACT_APP_API_URL + '/api/graphs/' + gitHash + '/nodes/details', { method: 'POST', headers: { 'Content-Type': 'application/json' diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/index.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/index.js index 2cf4e2644c246..42533ed320204 100644 --- a/buildscripts/libdeps/graph_visualizer_web_stack/src/index.js +++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/index.js @@ -1,5 +1,5 @@ import React from "react"; -import ReactDOM from "react-dom"; +import ReactDOM from "react-dom/client"; import { Provider } from "react-redux"; import CssBaseline from "@material-ui/core/CssBaseline"; import { ThemeProvider } from "@material-ui/core/styles"; @@ -8,14 +8,12 @@ import theme from "./theme"; import store from "./redux/store"; import App from "./App"; - -ReactDOM.render( +const root = ReactDOM.createRoot(document.getElementById("root")); +root.render( - , - - document.querySelector("#root") + ); diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/redux/listSearchTerm.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/redux/listSearchTerm.js index df288f4af472c..2d9594511254c 100644 --- a/buildscripts/libdeps/graph_visualizer_web_stack/src/redux/listSearchTerm.js +++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/redux/listSearchTerm.js @@ -13,4 +13,4 @@ export const listSearchTerm = (state = initialState, action) => { export const setListSearchTerm = (listSearchTerm) => ({ type: "setListSearchTerm", payload: listSearchTerm, -}); \ No newline at end of file +}); diff --git a/buildscripts/libdeps/graph_visualizer_web_stack/src/setupProxy.js b/buildscripts/libdeps/graph_visualizer_web_stack/src/setupProxy.js index f9c1a14588ff7..31f23ca4341f7 100644 --- a/buildscripts/libdeps/graph_visualizer_web_stack/src/setupProxy.js +++ b/buildscripts/libdeps/graph_visualizer_web_stack/src/setupProxy.js @@ -13,4 +13,4 @@ module.exports = function(app) { secure: false, }) ); -}; \ No newline at end of file +}; diff --git a/buildscripts/linter/filediff.py b/buildscripts/linter/filediff.py index 7dbad6d9d5863..23d4a988b2308 100644 --- a/buildscripts/linter/filediff.py +++ b/buildscripts/linter/filediff.py @@ -34,7 +34,7 @@ def _get_repos_and_revisions() -> Tuple[List[Repo], RevisionMap]: return repos, revision_map -def _filter_file(filename: str, is_interesting_file: Callable) -> bool: +def _filter_file(filename: str, is_interesting_file: Callable[[str], bool]) -> bool: """ Determine if file should be included based on existence and passed in method. @@ -45,7 +45,7 @@ def _filter_file(filename: str, is_interesting_file: Callable) -> bool: return os.path.exists(filename) and is_interesting_file(filename) -def gather_changed_files_for_lint(is_interesting_file: Callable) -> List[str]: +def gather_changed_files_for_lint(is_interesting_file: Callable[[str], bool]) -> List[str]: """ Get the files that have changes since the last git commit. diff --git a/buildscripts/linter/mongolint.py b/buildscripts/linter/mongolint.py new file mode 100644 index 0000000000000..c2edc606eaecd --- /dev/null +++ b/buildscripts/linter/mongolint.py @@ -0,0 +1,278 @@ +#!/usr/bin/env python3 +"""Simple C++ Linter.""" + +import argparse +import bisect +import io +import logging +import re +import sys + +_RE_LINT = re.compile("//.*NOLINT") +_RE_COMMENT_STRIP = re.compile("//.*") +_RE_GENERIC_FCV_COMMENT = re.compile(r'\(Generic FCV reference\):') +GENERIC_FCV = [ + r'::kLatest', + r'::kLastContinuous', + r'::kLastLTS', + r'::kUpgradingFromLastLTSToLatest', + r'::kUpgradingFromLastContinuousToLatest', + r'::kDowngradingFromLatestToLastLTS', + r'::kDowngradingFromLatestToLastContinuous', + r'\.isUpgradingOrDowngrading', + r'::kDowngradingFromLatestToLastContinuous', + r'::kUpgradingFromLastLTSToLastContinuous', +] +_RE_GENERIC_FCV_REF = re.compile(r'(' + '|'.join(GENERIC_FCV) + r')\b') +_RE_FEATURE_FLAG_IGNORE_FCV_CHECK_REF = re.compile(r'isEnabledAndIgnoreFCVUnsafe\(\)') +_RE_FEATURE_FLAG_IGNORE_FCV_CHECK_COMMENT = re.compile(r'\(Ignore FCV check\)') +_RE_HEADER = re.compile(r'\.(h|hpp)$') + + +class Linter: + """Simple C++ Linter.""" + + _license_header = '''\ +/** + * Copyright (C) {year}-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */'''.splitlines() + + def __init__(self, file_name, raw_lines): + """Create new linter.""" + self.file_name = file_name + self.raw_lines = raw_lines + self.clean_lines = [] + self.nolint_suppression = [] + self.generic_fcv_comments = [] + self.feature_flag_ignore_fcv_check_comments = [] + self._error_count = 0 + + def lint(self): + """Run linter, returning error count.""" + # steps: + # - Check for header + # - Check for NOLINT and Strip multi line comments + # - Run file-level checks + # - Run per-line checks + + start_line = self._check_for_server_side_public_license() + + self._check_newlines() + self._check_and_strip_comments() + + # Line-level checks + for linenum in range(start_line, len(self.clean_lines)): + if not self.clean_lines[linenum]: + continue + + # Relax the rule of commenting generic FCV references for files directly related to FCV + # implementations. + if not "feature_compatibility_version" in self.file_name: + self._check_for_generic_fcv(linenum) + + # Don't check feature_flag.h/cpp where the function is defined and test files. + if not "feature_flag" in self.file_name and not "test" in self.file_name: + self._check_for_feature_flag_ignore_fcv(linenum) + + return self._error_count + + def _check_newlines(self): + """Check that each source file ends with a newline character.""" + if self.raw_lines and self.raw_lines[-1][-1:] != '\n': + self._error( + len(self.raw_lines), 'mongo/final_newline', + 'Files must end with a newline character.') + + def _check_and_strip_comments(self): + in_multi_line_comment = False + + for linenum in range(len(self.raw_lines)): + clean_line = self.raw_lines[linenum] + + # Users can write NOLINT different ways + # // NOLINT + # // Some explanation NOLINT + # so we need a regular expression + if _RE_LINT.search(clean_line): + self.nolint_suppression.append(linenum) + + if _RE_GENERIC_FCV_COMMENT.search(clean_line): + self.generic_fcv_comments.append(linenum) + + if _RE_FEATURE_FLAG_IGNORE_FCV_CHECK_COMMENT.search(clean_line): + self.feature_flag_ignore_fcv_check_comments.append(linenum) + + if not in_multi_line_comment: + if "/*" in clean_line and not "*/" in clean_line: + in_multi_line_comment = True + clean_line = "" + + # Trim comments - approximately + # Note, this does not understand if // is in a string + # i.e. it will think URLs are also comments but this should be good enough to find + # violators of the coding convention + if "//" in clean_line: + clean_line = _RE_COMMENT_STRIP.sub("", clean_line) + else: + if "*/" in clean_line: + in_multi_line_comment = False + + clean_line = "" + + self.clean_lines.append(clean_line) + + def _license_error(self, linenum, msg, category='legal/license'): + style_url = 'https://github.com/mongodb/mongo/wiki/Server-Code-Style' + self._error(linenum, category, '{} See {}'.format(msg, style_url)) + return (False, linenum) + + def _check_for_server_side_public_license(self): + """Return the number of the line at which the check ended.""" + src_iter = (x.rstrip() for x in self.raw_lines) + linenum = 0 + for linenum, lic_line in enumerate(self._license_header): + src_line = next(src_iter, None) + if src_line is None: + self._license_error(linenum, 'Missing or incomplete license header.') + return linenum + lic_re = re.escape(lic_line).replace(r'\{year\}', r'\d{4}') + if not re.fullmatch(lic_re, src_line): + self._license_error( + linenum, 'Incorrect license header.\n' + ' Expected: "{}"\n' + ' Received: "{}"\n'.format(lic_line, src_line)) + return linenum + + # Warn if SSPL appears in Enterprise code, which has a different license. + expect_sspl_license = "enterprise" not in self.file_name + if not expect_sspl_license: + self._license_error(linenum, + 'Incorrect license header found. Expected Enterprise license.', + category='legal/enterprise_license') + return linenum + return linenum + + def _check_for_generic_fcv(self, linenum): + line = self.clean_lines[linenum] + if _RE_GENERIC_FCV_REF.search(line): + # Find the first generic FCV comment preceding the current line. + i = bisect.bisect_right(self.generic_fcv_comments, linenum) + if not i or self.generic_fcv_comments[i - 1] < (linenum - 10): + self._error( + linenum, 'mongodb/fcv', + 'Please add a comment containing "(Generic FCV reference):" within 10 lines ' + + 'before the generic FCV reference.') + + def _check_for_feature_flag_ignore_fcv(self, linenum): + line = self.clean_lines[linenum] + if _RE_FEATURE_FLAG_IGNORE_FCV_CHECK_REF.search(line): + # Find the first ignore FCV check comment preceding the current line. + i = bisect.bisect_right(self.feature_flag_ignore_fcv_check_comments, linenum) + if not i or self.feature_flag_ignore_fcv_check_comments[i - 1] < (linenum - 10): + self._error( + linenum, 'mongodb/fcv', + 'Please add a comment containing "(Ignore FCV check)":" within 10 lines ' + + 'before the isEnabledAndIgnoreFCVUnsafe() function call explaining why ' + + 'the FCV check is ignored.') + + def _error(self, linenum, category, message): + if linenum in self.nolint_suppression: + return + + norm_file_name = self.file_name.replace('\\', '/') + + # Custom clang-tidy check tests purposefully produce errors for + # tests to find. They should be ignored. + if "mongo_tidy_checks/tests/" in norm_file_name: + return + + if category == "legal/license": + # Enterprise module does not have the SSPL license + if "enterprise" in self.file_name: + return + + # The following files are in the src/mongo/ directory but technically belong + # in src/third_party/ because their copyright does not belong to MongoDB. + files_to_ignore = set([ + 'src/mongo/scripting/mozjs/PosixNSPR.cpp', + 'src/mongo/shell/linenoise.cpp', + 'src/mongo/shell/linenoise.h', + 'src/mongo/shell/mk_wcwidth.cpp', + 'src/mongo/shell/mk_wcwidth.h', + 'src/mongo/util/md5.cpp', + 'src/mongo/util/md5.h', + 'src/mongo/util/md5main.cpp', + 'src/mongo/util/net/ssl_stream.cpp', + 'src/mongo/util/scopeguard.h', + ]) + + for file_to_ignore in files_to_ignore: + if file_to_ignore in norm_file_name: + return + + # We count internally from 0 but users count from 1 for line numbers + print("Error: %s:%d - %s - %s" % (self.file_name, linenum + 1, category, message)) + self._error_count += 1 + + +def lint_file(file_name): + """Lint file and print errors to console.""" + with io.open(file_name, encoding='utf-8') as file_stream: + raw_lines = file_stream.readlines() + + linter = Linter(file_name, raw_lines) + return linter.lint() + + +def main(): + # type: () -> int + """Execute Main Entry point.""" + parser = argparse.ArgumentParser(description='MongoDB Simple C++ Linter.') + + parser.add_argument('file', type=str, help="C++ input file") + + parser.add_argument('-v', '--verbose', action='count', help="Enable verbose tracing") + + args = parser.parse_args() + + if args.verbose: + logging.basicConfig(level=logging.DEBUG) + + try: + error_count = lint_file(args.file) + if error_count != 0: + print('File "{}" failed with {} errors.'.format(args.file, error_count)) + return 1 + return 0 + except Exception as ex: # pylint: disable=broad-except + print('Exception while checking file "{}": {}'.format(args.file, ex)) + return 2 + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/buildscripts/linter/mypy.py b/buildscripts/linter/mypy.py index b2ec8f6022eb2..eff8bbc50d191 100644 --- a/buildscripts/linter/mypy.py +++ b/buildscripts/linter/mypy.py @@ -16,7 +16,7 @@ def __init__(self): """Create a mypy linter.""" # User can override the location of mypy from an environment variable. - super(MypyLinter, self).__init__("mypy", "0.580", os.getenv("MYPY")) + super(MypyLinter, self).__init__("mypy", "1.3.0", os.getenv("MYPY")) def get_lint_version_cmd_args(self): # type: () -> List[str] diff --git a/buildscripts/linter/simplecpplint.py b/buildscripts/linter/simplecpplint.py deleted file mode 100644 index 2f1ca2a6f9a67..0000000000000 --- a/buildscripts/linter/simplecpplint.py +++ /dev/null @@ -1,421 +0,0 @@ -#!/usr/bin/env python3 -"""Simple C++ Linter.""" - -import argparse -import bisect -import io -import logging -import re -import sys - - -def _make_polyfill_regex(): - polyfill_required_names = [ - '_', - 'adopt_lock', - 'async', - 'chrono', - 'condition_variable', - 'condition_variable_any', - 'cv_status', - 'defer_lock', - 'future', - 'future_status', - 'get_terminate', - 'launch', - 'lock_guard', - 'mutex', - 'notify_all_at_thread_exit', - 'packaged_task', - 'promise', - 'recursive_mutex', - 'set_terminate', - 'shared_lock', - 'shared_mutex', - 'shared_timed_mutex', - 'this_thread(?!::at_thread_exit)', - 'thread', - 'timed_mutex', - 'try_to_lock', - 'unique_lock', - 'unordered_map', - 'unordered_multimap', - 'unordered_multiset', - 'unordered_set', - ] - - qualified_names = ['boost::' + name + "\\b" for name in polyfill_required_names] - qualified_names.extend('std::' + name + "\\b" for name in polyfill_required_names) - qualified_names_regex = '|'.join(qualified_names) - return re.compile('(' + qualified_names_regex + ')') - - -_RE_LINT = re.compile("//.*NOLINT") -_RE_COMMENT_STRIP = re.compile("//.*") - -_RE_PATTERN_MONGO_POLYFILL = _make_polyfill_regex() -_RE_UNSTRUCTURED_LOG = re.compile(r'\blogd\s*\(') -_RE_COLLECTION_SHARDING_RUNTIME = re.compile(r'\bCollectionShardingRuntime\b') -_RE_RAND = re.compile(r'\b(srand\(|rand\(\))') - -_RE_GENERIC_FCV_COMMENT = re.compile(r'\(Generic FCV reference\):') -GENERIC_FCV = [ - r'::kLatest', - r'::kLastContinuous', - r'::kLastLTS', - r'::kUpgradingFromLastLTSToLatest', - r'::kUpgradingFromLastContinuousToLatest', - r'::kDowngradingFromLatestToLastLTS', - r'::kDowngradingFromLatestToLastContinuous', - r'\.isUpgradingOrDowngrading', - r'::kDowngradingFromLatestToLastContinuous', - r'::kUpgradingFromLastLTSToLastContinuous', -] -_RE_GENERIC_FCV_REF = re.compile(r'(' + '|'.join(GENERIC_FCV) + r')\b') -_RE_FEATURE_FLAG_IGNORE_FCV_CHECK_REF = re.compile(r'isEnabledAndIgnoreFCVUnsafe\(\)') -_RE_FEATURE_FLAG_IGNORE_FCV_CHECK_COMMENT = re.compile(r'\(Ignore FCV check\)') -_RE_HEADER = re.compile(r'\.(h|hpp)$') - -_CXX_COMPAT_HEADERS = [ - "assert", "ctype", "errno", "fenv", "float", "inttypes", "limits", "locale", "math", "setjmp", - "signal", "stdarg", "stddef", "stdint", "stdio", "stdlib", "string", "time", "uchar", "wchar", - "wctype" -] - -# Successful matches `m` have a `m["base"]`, the basename of the file that was included. -_RE_CXX_COMPAT_HEADERS = re.compile( - rf'# *include *((<)|("))(?P{"|".join(_CXX_COMPAT_HEADERS)})\.h(?(2)>|")') - - -class Linter: - """Simple C++ Linter.""" - - _license_header = '''\ -/** - * Copyright (C) {year}-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */'''.splitlines() - - def __init__(self, file_name, raw_lines): - """Create new linter.""" - self.file_name = file_name - self.raw_lines = raw_lines - self.clean_lines = [] - self.nolint_suppression = [] - self.generic_fcv_comments = [] - self.feature_flag_ignore_fcv_check_comments = [] - self._error_count = 0 - - self.found_config_header = False - - def lint(self): - """Run linter, returning error count.""" - # steps: - # - Check for header - # - Check for NOLINT and Strip multi line comments - # - Run file-level checks - # - Run per-line checks - - start_line = self._check_for_server_side_public_license() - - self._check_newlines() - self._check_and_strip_comments() - - # File-level checks - self._check_macro_definition_leaks() - - # Line-level checks - for linenum in range(start_line, len(self.clean_lines)): - if not self.clean_lines[linenum]: - continue - - self._check_for_mongo_polyfill(linenum) - self._check_for_mongo_unstructured_log(linenum) - self._check_for_mongo_config_header(linenum) - self._check_for_collection_sharding_runtime(linenum) - self._check_for_rand(linenum) - self._check_for_c_stdlib_headers(linenum) - - # Relax the rule of commenting generic FCV references for files directly related to FCV - # implementations. - if not "feature_compatibility_version" in self.file_name: - self._check_for_generic_fcv(linenum) - - # Don't check feature_flag.h/cpp where the function is defined and test files. - if not "feature_flag" in self.file_name and not "test" in self.file_name: - self._check_for_feature_flag_ignore_fcv(linenum) - - return self._error_count - - def _check_newlines(self): - """Check that each source file ends with a newline character.""" - if self.raw_lines and self.raw_lines[-1][-1:] != '\n': - self._error( - len(self.raw_lines), 'mongo/final_newline', - 'Files must end with a newline character.') - - def _check_and_strip_comments(self): - in_multi_line_comment = False - - for linenum in range(len(self.raw_lines)): - clean_line = self.raw_lines[linenum] - - # Users can write NOLINT different ways - # // NOLINT - # // Some explanation NOLINT - # so we need a regular expression - if _RE_LINT.search(clean_line): - self.nolint_suppression.append(linenum) - - if _RE_GENERIC_FCV_COMMENT.search(clean_line): - self.generic_fcv_comments.append(linenum) - - if _RE_FEATURE_FLAG_IGNORE_FCV_CHECK_COMMENT.search(clean_line): - self.feature_flag_ignore_fcv_check_comments.append(linenum) - - if not in_multi_line_comment: - if "/*" in clean_line and not "*/" in clean_line: - in_multi_line_comment = True - clean_line = "" - - # Trim comments - approximately - # Note, this does not understand if // is in a string - # i.e. it will think URLs are also comments but this should be good enough to find - # violators of the coding convention - if "//" in clean_line: - clean_line = _RE_COMMENT_STRIP.sub("", clean_line) - else: - if "*/" in clean_line: - in_multi_line_comment = False - - clean_line = "" - - self.clean_lines.append(clean_line) - - def _check_macro_definition_leaks(self): - """Some header macros should appear in define/undef pairs.""" - if not _RE_HEADER.search(self.file_name): - return - # Naive check: doesn't consider `#if` scoping. - # Assumes an #undef matches the nearest #define. - for macro in ['MONGO_LOGV2_DEFAULT_COMPONENT']: - re_define = re.compile(fr"^\s*#\s*define\s+{macro}\b") - re_undef = re.compile(fr"^\s*#\s*undef\s+{macro}\b") - def_line = None - for idx, line in enumerate(self.clean_lines): - if def_line is None: - if re_define.match(line): - def_line = idx - else: - if re_undef.match(line): - def_line = None - if def_line is not None: - self._error(def_line, 'mongodb/undefmacro', f'Missing "#undef {macro}"') - - def _check_for_mongo_polyfill(self, linenum): - line = self.clean_lines[linenum] - match = _RE_PATTERN_MONGO_POLYFILL.search(line) - if match: - self._error( - linenum, 'mongodb/polyfill', - 'Illegal use of banned name from std::/boost:: for "%s", use mongo::stdx:: variant instead' - % (match.group(0))) - - def _check_for_mongo_unstructured_log(self, linenum): - line = self.clean_lines[linenum] - if _RE_UNSTRUCTURED_LOG.search(line) or 'doUnstructuredLogImpl' in line: - self._error( - linenum, 'mongodb/unstructuredlog', 'Illegal use of unstructured logging, ' - 'this is only for local development use and should not be committed.') - - def _check_for_collection_sharding_runtime(self, linenum): - line = self.clean_lines[linenum] - if _RE_COLLECTION_SHARDING_RUNTIME.search( - line - ) and "/src/mongo/db/s/" not in self.file_name and "_test.cpp" not in self.file_name: - self._error( - linenum, 'mongodb/collection_sharding_runtime', 'Illegal use of ' - 'CollectionShardingRuntime outside of mongo/db/s/; use CollectionShardingState ' - 'instead; see src/mongo/db/s/collection_sharding_state.h for details.') - - def _check_for_rand(self, linenum): - line = self.clean_lines[linenum] - if _RE_RAND.search(line): - self._error(linenum, 'mongodb/rand', - 'Use of rand or srand, use or PseudoRandom instead.') - - def _license_error(self, linenum, msg, category='legal/license'): - style_url = 'https://github.com/mongodb/mongo/wiki/Server-Code-Style' - self._error(linenum, category, '{} See {}'.format(msg, style_url)) - return (False, linenum) - - def _check_for_server_side_public_license(self): - """Return the number of the line at which the check ended.""" - src_iter = (x.rstrip() for x in self.raw_lines) - linenum = 0 - for linenum, lic_line in enumerate(self._license_header): - src_line = next(src_iter, None) - if src_line is None: - self._license_error(linenum, 'Missing or incomplete license header.') - return linenum - lic_re = re.escape(lic_line).replace(r'\{year\}', r'\d{4}') - if not re.fullmatch(lic_re, src_line): - self._license_error( - linenum, 'Incorrect license header.\n' - ' Expected: "{}"\n' - ' Received: "{}"\n'.format(lic_line, src_line)) - return linenum - - # Warn if SSPL appears in Enterprise code, which has a different license. - expect_sspl_license = "enterprise" not in self.file_name - if not expect_sspl_license: - self._license_error(linenum, - 'Incorrect license header found. Expected Enterprise license.', - category='legal/enterprise_license') - return linenum - return linenum - - def _check_for_mongo_config_header(self, linenum): - """Check for a config file.""" - if self.found_config_header: - return - - line = self.clean_lines[linenum] - self.found_config_header = line.startswith('#include "mongo/config.h"') - - if not self.found_config_header and "MONGO_CONFIG_" in line: - self._error(linenum, 'build/config_h_include', - 'MONGO_CONFIG define used without prior inclusion of config.h.') - - def _check_for_generic_fcv(self, linenum): - line = self.clean_lines[linenum] - if _RE_GENERIC_FCV_REF.search(line): - # Find the first generic FCV comment preceding the current line. - i = bisect.bisect_right(self.generic_fcv_comments, linenum) - if not i or self.generic_fcv_comments[i - 1] < (linenum - 10): - self._error( - linenum, 'mongodb/fcv', - 'Please add a comment containing "(Generic FCV reference):" within 10 lines ' + - 'before the generic FCV reference.') - - def _check_for_c_stdlib_headers(self, linenum): - line = self.clean_lines[linenum] - - if match := _RE_CXX_COMPAT_HEADERS.match(line): - self._error( - linenum, 'mongodb/headers', - f"Prohibited include of C header '<{match['base']}.h>'. " \ - f"Include C++ header '' instead.") - - def _check_for_feature_flag_ignore_fcv(self, linenum): - line = self.clean_lines[linenum] - if _RE_FEATURE_FLAG_IGNORE_FCV_CHECK_REF.search(line): - # Find the first ignore FCV check comment preceding the current line. - i = bisect.bisect_right(self.feature_flag_ignore_fcv_check_comments, linenum) - if not i or self.feature_flag_ignore_fcv_check_comments[i - 1] < (linenum - 10): - self._error( - linenum, 'mongodb/fcv', - 'Please add a comment containing "(Ignore FCV check)":" within 10 lines ' + - 'before the isEnabledAndIgnoreFCVUnsafe() function call explaining why ' + - 'the FCV check is ignored.') - - def _error(self, linenum, category, message): - if linenum in self.nolint_suppression: - return - - norm_file_name = self.file_name.replace('\\', '/') - - # Custom clang-tidy check tests purposefully produce errors for - # tests to find. They should be ignored. - if "mongo_tidy_checks/tests/" in norm_file_name: - return - - if category == "legal/license": - # Enterprise module does not have the SSPL license - if "enterprise" in self.file_name: - return - - # The following files are in the src/mongo/ directory but technically belong - # in src/third_party/ because their copyright does not belong to MongoDB. - files_to_ignore = set([ - 'src/mongo/scripting/mozjs/PosixNSPR.cpp', - 'src/mongo/shell/linenoise.cpp', - 'src/mongo/shell/linenoise.h', - 'src/mongo/shell/mk_wcwidth.cpp', - 'src/mongo/shell/mk_wcwidth.h', - 'src/mongo/util/md5.cpp', - 'src/mongo/util/md5.h', - 'src/mongo/util/md5main.cpp', - 'src/mongo/util/net/ssl_stream.cpp', - 'src/mongo/util/scopeguard.h', - ]) - - for file_to_ignore in files_to_ignore: - if file_to_ignore in norm_file_name: - return - - # We count internally from 0 but users count from 1 for line numbers - print("Error: %s:%d - %s - %s" % (self.file_name, linenum + 1, category, message)) - self._error_count += 1 - - -def lint_file(file_name): - """Lint file and print errors to console.""" - with io.open(file_name, encoding='utf-8') as file_stream: - raw_lines = file_stream.readlines() - - linter = Linter(file_name, raw_lines) - return linter.lint() - - -def main(): - # type: () -> int - """Execute Main Entry point.""" - parser = argparse.ArgumentParser(description='MongoDB Simple C++ Linter.') - - parser.add_argument('file', type=str, help="C++ input file") - - parser.add_argument('-v', '--verbose', action='count', help="Enable verbose tracing") - - args = parser.parse_args() - - if args.verbose: - logging.basicConfig(level=logging.DEBUG) - - try: - error_count = lint_file(args.file) - if error_count != 0: - print('File "{}" failed with {} errors.'.format(args.file, error_count)) - return 1 - return 0 - except Exception as ex: # pylint: disable=broad-except - print('Exception while checking file "{}": {}'.format(args.file, ex)) - return 2 - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/buildscripts/lldb/lldb_printers.py b/buildscripts/lldb/lldb_printers.py index 0c7bfb2650f7d..0bdf74304b2e4 100644 --- a/buildscripts/lldb/lldb_printers.py +++ b/buildscripts/lldb/lldb_printers.py @@ -112,7 +112,7 @@ def StringDataPrinter(valobj, *_args): # pylint: disable=invalid-name return 'nullptr' size1 = valobj.GetChildMemberWithName("_size").GetValueAsUnsigned(0) - return '"{}"'.format(valobj.GetProcess().ReadMemory(ptr, size1, lldb.SBError()).encode("utf-8")) + return '"{}"'.format(valobj.GetProcess().ReadMemory(ptr, size1, lldb.SBError()).decode("utf-8")) def read_memory_as_hex(process, address, size): diff --git a/buildscripts/package_test.py b/buildscripts/package_test.py index 94a385519feba..b01d2280479ed 100644 --- a/buildscripts/package_test.py +++ b/buildscripts/package_test.py @@ -56,7 +56,7 @@ 'amazon2': ('amazonlinux:2', "yum", frozenset(["python", "python3", "wget", "pkgconfig", "systemd", "procps", "file"]), "python3"), - 'amazon2022': ('amazonlinux:2022', "yum", + 'amazon2023': ('amazonlinux:2023', "yum", frozenset( ["python", "python3", "wget", "pkgconfig", "systemd", "procps", "file"]), "python3"), @@ -160,6 +160,7 @@ class Test: python_command: str = dataclasses.field(default="", repr=False) packages_urls: List[str] = dataclasses.field(default_factory=list) packages_paths: List[Path] = dataclasses.field(default_factory=list) + attempts: int = dataclasses.field(default=0) def __post_init__(self) -> None: assert OS_DOCKER_LOOKUP[self.os_name] is not None @@ -206,6 +207,20 @@ def join_commands(commands: List[str], sep: str = ' && ') -> str: return sep.join(commands) +def run_test_with_timeout(test: Test, client: DockerClient, timeout: int) -> Result: + start_time = time.time() + with futures.ThreadPoolExecutor(max_workers=1) as executor: + future = executor.submit(run_test, test, client) + try: + result = future.result(timeout=timeout) + except futures.TimeoutError: + end_time = time.time() + logging.debug("Test %s timed out", test) + result = Result(status="fail", test_file=test.name(), start=start_time, + log_raw="test timed out", end=end_time, exit_code=1) + return result + + def run_test(test: Test, client: DockerClient) -> Result: result = Result(status="pass", test_file=test.name(), start=time.time(), log_raw="") @@ -215,7 +230,6 @@ def run_test(test: Test, client: DockerClient) -> Result: test_external_root = Path(__file__).parent.resolve() logging.debug(test_external_root) log_external_path = Path.joinpath(test_external_root, log_name) - commands: List[str] = ["export PYTHONIOENCODING=UTF-8"] if test.os_name.startswith('rhel'): @@ -307,10 +321,6 @@ def iterate_over_downloads() -> Generator[Dict[str, Any], None, None]: def get_tools_package(arch_name: str, os_name: str) -> Optional[str]: - # TODO: MONGOSH-1308 - we need to sub the arch alias until package - # architectures are named consistently with the server packages - if arch_name == "aarch64" and os_name != "amazon2": - arch_name = "arm64" for download in current_tools_releases["versions"][0]["downloads"]: if download["name"] == os_name and download["arch"] == arch_name: return download["package"]["url"] @@ -318,6 +328,8 @@ def get_tools_package(arch_name: str, os_name: str) -> Optional[str]: def get_mongosh_package(arch_name: str, os_name: str) -> Optional[str]: + # TODO: MONGOSH-1308 - we need to sub the arch alias until package + # architectures are named consistently with the server packages if arch_name == "aarch64": arch_name = "arm64" if arch_name in ("x86_64", "amd64"): @@ -361,6 +373,8 @@ def get_edition_alias(edition_name: str) -> str: 'Test packages on various hosts. This will spin up docker containers and test the installs.') parser.add_argument("--arch", type=str, help="Arch of packages to test", choices=["auto"] + list(arches), default="auto") +parser.add_argument("-r", "--retries", type=int, help="Number of times to retry failed tests", + default=3) subparsers = parser.add_subparsers(dest="command") release_test_parser = subparsers.add_parser("release") release_test_parser.add_argument( @@ -530,17 +544,42 @@ def get_edition_alias(edition_name: str) -> str: logging.warning("Skipping docker login") report = Report(results=[], failures=0) -with futures.ThreadPoolExecutor() as tpe: - test_futures = [tpe.submit(run_test, test, docker_client) for test in tests] - completed_tests = 0 # pylint: disable=invalid-name - for f in futures.as_completed(test_futures): - completed_tests += 1 - test_result = f.result() - if test_result["exit_code"] != 0: - report["failures"] += 1 - - report["results"].append(test_result) - logging.info("Completed %s/%s tests", completed_tests, len(test_futures)) +with futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as tpe: + # Set a timeout of 10mins timeout for a single test + SINGLE_TEST_TIMEOUT = 10 * 60 + test_futures = { + tpe.submit(run_test_with_timeout, test, docker_client, SINGLE_TEST_TIMEOUT): test + for test in tests + } + completed_tests: int = 0 + retried_tests: int = 0 + total_tests: int = len(tests) + while len(test_futures.keys()) > 0: + finished_futures, active_futures = futures.wait(test_futures.keys(), timeout=None, + return_when="FIRST_COMPLETED") + for f in finished_futures: + completed_test = test_futures.pop(f) + test_result = f.result() + if test_result["exit_code"] != 0: + if completed_test.attempts < args.retries: + retried_tests += 1 + completed_test.attempts += 1 + test_futures[tpe.submit(run_test, completed_test, + docker_client)] = completed_test + continue + report["failures"] += 1 + + completed_tests += 1 + report["results"].append(test_result) + + logging.info( + "Completed %s tests, retried %s tests, total %s tests, %s tests are in progress.", + completed_tests, retried_tests, total_tests, len(test_futures)) + + # We are printing here to help diagnose hangs + # This adds a bit of logging so we are only going to log running tests after a test completes + for active_test in test_futures.values(): + logging.info("Test in progress: %s", active_test) with open("report.json", "w") as fh: json.dump(report, fh) diff --git a/buildscripts/package_test_internal.py b/buildscripts/package_test_internal.py index b923a0e6f7714..43d7af8b4296a 100644 --- a/buildscripts/package_test_internal.py +++ b/buildscripts/package_test_internal.py @@ -259,23 +259,6 @@ def get_package_name(package_file: str) -> str: def setup(test_args: TestArgs): logging.info("Setting up test environment.") - # TODO SERVER-70425: We can remove these once we have figured out why - # packager.py sometimes uses distro files from older revisions. - # Remove the PIDFile, PermissionsStartOnly, and Type configurations from - # the systemd service file because they are not needed for simple-type - # (non-forking) services and confuse the systemd emulator script. - run_and_log("sed -Ei '/^PIDFile=|PermissionsStartOnly=|Type=/d' {}/mongod.service".format( - test_args["systemd_units_dir"])) - # Ensure RuntimeDirectory has been added to the systemd unit file. - run_and_log("sed -Ei '/^ExecStart=.*/a RuntimeDirectory=mongodb' {}/mongod.service".format( - test_args["systemd_units_dir"])) - # Remove the journal: line (and the next) from mongod.conf, which is a - # removed configuration. The Debian version of the config never got updated. - run_and_log("sed -i '/journal:/,+1d' /etc/mongod.conf") - # Remove fork: and pidFilePath: from mongod.conf because we want mongod to be - # a non-forking service under systemd. - run_and_log("sed -Ei '/fork:|pidFilePath:/d' /etc/mongod.conf") - # Ensure systemd doesn't try to start anything automatically so we can do # it in our tests run_and_log("mkdir -p /run/systemd/system") @@ -332,6 +315,7 @@ def test_install_is_complete(test_args: TestArgs): required_dirs = [ pathlib.Path('/run/mongodb'), + pathlib.Path('/var/run/mongodb'), pathlib.Path(test_args['mongo_work_dir']), ] # type: List[pathlib.Path] diff --git a/buildscripts/packager.py b/buildscripts/packager.py index 98ab952d619e4..3c08420cd229d 100755 --- a/buildscripts/packager.py +++ b/buildscripts/packager.py @@ -31,6 +31,7 @@ import argparse import errno +import git from glob import glob import os import re @@ -44,7 +45,7 @@ ARCH_CHOICES = ["x86_64", "arm64", "aarch64", "s390x"] # Made up names for the flavors of distribution we package for. -DISTROS = ["suse", "debian", "redhat", "ubuntu", "amazon", "amazon2", "amazon2022"] +DISTROS = ["suse", "debian", "redhat", "ubuntu", "amazon", "amazon2", "amazon2023"] class Spec(object): @@ -289,8 +290,8 @@ def repo_os_version(self, build_os): return "2013.03" elif self.dname == 'amazon2': return "2017.12" - elif self.dname == 'amazon2022': - return "2022.0" + elif self.dname == 'amazon2023': + return "2023.0" elif self.dname == 'ubuntu': if build_os == 'ubuntu1204': return "precise" @@ -353,7 +354,7 @@ def build_os(self, arch): "rhel55", "rhel67", ] - elif self.dname in ['amazon', 'amazon2', 'amazon2022']: + elif self.dname in ['amazon', 'amazon2', 'amazon2023']: return [self.dname] elif self.dname == 'ubuntu': return [ @@ -381,8 +382,8 @@ def release_dist(self, build_os): return 'amzn1' elif self.dname == 'amazon2': return 'amzn2' - elif self.dname == 'amazon2022': - return 'amzn2022' + elif self.dname == 'amazon2023': + return 'amzn2023' return re.sub(r'^rh(el\d).*$', r'\1', build_os) @@ -750,10 +751,31 @@ def write_debian_changelog(path, spec, srcdir): os.chdir(srcdir) preamble = "" try: + + git_repo = git.Repo(srcdir) + # get the original HEAD position of repo + head_commit_sha = git_repo.head.object.hexsha + + # add and commit the uncommited changes + print("Commiting uncommited changes") + git_repo.git.add(all=True) + # only commit changes if there are any + if len(git_repo.index.diff("HEAD")) != 0: + with git_repo.git.custom_environment(GIT_COMMITTER_NAME="Evergreen", + GIT_COMMITTER_EMAIL="evergreen@mongodb.com"): + git_repo.git.commit("--author='Evergreen <>'", "-m", "temp commit") + + # original command to preserve functionality + # FIXME: make consistent with the rest of the code when we have more packaging testing + print("Getting changelog for specified gitspec:", spec.metadata_gitspec()) sb = preamble + backtick([ "sh", "-c", "git archive %s debian/changelog | tar xOf -" % spec.metadata_gitspec() ]).decode('utf-8') + + # reset branch to original state + print("Resetting branch to original state") + git_repo.git.reset("--mixed", head_commit_sha) finally: os.chdir(oldcwd) lines = sb.split("\n") diff --git a/buildscripts/packager_enterprise.py b/buildscripts/packager_enterprise.py index ea89dc94633e8..c4f1c2417fa74 100755 --- a/buildscripts/packager_enterprise.py +++ b/buildscripts/packager_enterprise.py @@ -28,6 +28,7 @@ # echo "Now put the dist gnupg signing keys in ~root/.gnupg" import errno +import git from glob import glob import os import re @@ -44,7 +45,7 @@ ARCH_CHOICES = ["x86_64", "ppc64le", "s390x", "arm64", "aarch64"] # Made up names for the flavors of distribution we package for. -DISTROS = ["suse", "debian", "redhat", "ubuntu", "amazon", "amazon2", "amazon2022"] +DISTROS = ["suse", "debian", "redhat", "ubuntu", "amazon", "amazon2", "amazon2023"] class EnterpriseSpec(packager.Spec): @@ -144,8 +145,8 @@ def build_os(self, arch): return ["rhel82", "rhel90"] if self.dname == 'amazon2': return ["amazon2"] - if self.dname == 'amazon2022': - return ["amazon2022"] + if self.dname == 'amazon2023': + return ["amazon2023"] return [] if re.search("(redhat|fedora|centos)", self.dname): @@ -251,12 +252,33 @@ def make_package(distro, build_os, arch, spec, srcdir): # innocuous in the debianoids' sdirs). for pkgdir in ["debian", "rpm"]: print("Copying packaging files from %s to %s" % ("%s/%s" % (srcdir, pkgdir), sdir)) + git_repo = git.Repo(srcdir) + # get the original HEAD position of repo + head_commit_sha = git_repo.head.object.hexsha + + # add and commit the uncommited changes + print("Commiting uncommited changes") + git_repo.git.add(all=True) + # only commit changes if there are any + if len(git_repo.index.diff("HEAD")) != 0: + with git_repo.git.custom_environment(GIT_COMMITTER_NAME="Evergreen", + GIT_COMMITTER_EMAIL="evergreen@mongodb.com"): + git_repo.git.commit("--author='Evergreen <>'", "-m", "temp commit") + + # original command to preserve functionality + # FIXME: make consistent with the rest of the code when we have more packaging testing # FIXME: sh-dash-cee is bad. See if tarfile can do this. + print("Copying packaging files from specified gitspec:", spec.metadata_gitspec()) packager.sysassert([ "sh", "-c", "(cd \"%s\" && git archive %s %s/ ) | (cd \"%s\" && tar xvf -)" % (srcdir, spec.metadata_gitspec(), pkgdir, sdir) ]) + + # reset branch to original state + print("Resetting branch to original state") + git_repo.git.reset("--mixed", head_commit_sha) + # Splat the binaries under sdir. The "build" stages of the # packaging infrastructure will move the files to wherever they # need to go. diff --git a/buildscripts/patch_builds/change_data.py b/buildscripts/patch_builds/change_data.py index 44af0fca70b21..0d7fee884968b 100644 --- a/buildscripts/patch_builds/change_data.py +++ b/buildscripts/patch_builds/change_data.py @@ -87,7 +87,7 @@ def find_changed_files(repo: Repo, revision_map: Optional[RevisionMap] = None) - work_tree_files = _modified_files_for_diff(diff, LOGGER.bind(diff="working tree diff")) commit = repo.index - diff = commit.diff(revision_map.get(repo.git_dir, repo.head.commit)) + diff = commit.diff(revision_map.get(repo.git_dir, repo.head.commit), R=True) index_files = _modified_files_for_diff(diff, LOGGER.bind(diff="index diff")) untracked_files = set(repo.untracked_files) diff --git a/buildscripts/quickcpplint.py b/buildscripts/quickcpplint.py deleted file mode 100755 index 8d78a9c9d5c66..0000000000000 --- a/buildscripts/quickcpplint.py +++ /dev/null @@ -1,108 +0,0 @@ -#!/usr/bin/env python3 -"""Extensible script to run one or more simple C++ Linters across a subset of files in parallel.""" - -import argparse -import logging -import os -import re -import sys -import threading -from typing import List - -# Get relative imports to work when the package is not installed on the PYTHONPATH. -if __name__ == "__main__" and __package__ is None: - sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__))))) - -from buildscripts.linter import git # pylint: disable=wrong-import-position -from buildscripts.linter import parallel # pylint: disable=wrong-import-position -from buildscripts.linter import simplecpplint # pylint: disable=wrong-import-position - -FILES_RE = re.compile('\\.(h|cpp)$') - - -def is_interesting_file(file_name: str) -> bool: - """Return true if this file should be checked.""" - return (file_name.startswith("jstests") - or file_name.startswith("src") and not file_name.startswith("src/third_party/") - and not file_name.startswith("src/mongo/gotools/") - # TODO SERVER-49805: These files should be generated at compile time. - and not file_name == "src/mongo/db/cst/parser_gen.cpp") and FILES_RE.search(file_name) - - -def _lint_files(file_names: List[str]) -> None: - """Lint a list of files with clang-format.""" - run_lint1 = lambda param1: simplecpplint.lint_file(param1) == 0 - if not parallel.parallel_process([os.path.abspath(f) for f in file_names], run_lint1): - print("ERROR: Code Style does not match coding style") - sys.exit(1) - - -def lint_patch(file_name: str) -> None: - """Lint patch command entry point.""" - file_names = git.get_files_to_check_from_patch(file_name, is_interesting_file) - - # Patch may have files that we do not want to check which is fine - if file_names: - _lint_files(file_names) - - -def lint(file_names: List[str]) -> None: - # type: (str, Dict[str, str], List[str]) -> None - """Lint files command entry point.""" - all_file_names = git.get_files_to_check(file_names, is_interesting_file) - - _lint_files(all_file_names) - - -def lint_all(file_names: List[str]) -> None: - # pylint: disable=unused-argument - """Lint files command entry point based on working tree.""" - all_file_names = git.get_files_to_check_working_tree(is_interesting_file) - - _lint_files(all_file_names) - - -def lint_my(origin_branch: List[str]) -> None: - """Lint files command based on local changes.""" - files = git.get_my_files_to_check(is_interesting_file, origin_branch) - files = [f for f in files if os.path.exists(f)] - - _lint_files(files) - - -def main() -> None: - """Execute Main entry point.""" - - parser = argparse.ArgumentParser(description='Quick C++ Lint frontend.') - - parser.add_argument('-v', "--verbose", action='store_true', help="Enable verbose logging") - - sub = parser.add_subparsers(title="Linter subcommands", help="sub-command help") - - parser_lint = sub.add_parser('lint', help='Lint only Git files') - parser_lint.add_argument("file_names", nargs="*", help="Globs of files to check") - parser_lint.set_defaults(func=lint) - - parser_lint_all = sub.add_parser('lint-all', help='Lint All files') - parser_lint_all.add_argument("file_names", nargs="*", help="Globs of files to check") - parser_lint_all.set_defaults(func=lint_all) - - parser_lint_patch = sub.add_parser('lint-patch', help='Lint the files in a patch') - parser_lint_patch.add_argument("file_names", nargs="*", help="Globs of files to check") - parser_lint_patch.set_defaults(func=lint_patch) - - parser_lint_my = sub.add_parser('lint-my', help='Lint my files') - parser_lint_my.add_argument("--branch", dest="file_names", default="origin/master", - help="Branch to compare against") - parser_lint_my.set_defaults(func=lint_my) - - args = parser.parse_args() - - if args.verbose: - logging.basicConfig(level=logging.DEBUG) - - args.func(args.file_names) - - -if __name__ == "__main__": - main() diff --git a/buildscripts/quickmongolint.py b/buildscripts/quickmongolint.py new file mode 100755 index 0000000000000..53d3461df6ee9 --- /dev/null +++ b/buildscripts/quickmongolint.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 +"""Extensible script to run one or more simple C++ Linters across a subset of files in parallel.""" + +import argparse +import logging +import os +import re +import sys +import threading +from typing import List + +# Get relative imports to work when the package is not installed on the PYTHONPATH. +if __name__ == "__main__" and __package__ is None: + sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__))))) + +from buildscripts.linter import git # pylint: disable=wrong-import-position +from buildscripts.linter import parallel # pylint: disable=wrong-import-position +from buildscripts.linter import mongolint # pylint: disable=wrong-import-position + +FILES_RE = re.compile('\\.(h|cpp)$') + + +def is_interesting_file(file_name: str) -> bool: + """Return true if this file should be checked.""" + return (file_name.startswith("jstests") + or file_name.startswith("src") and not file_name.startswith("src/third_party/") + and not file_name.startswith("src/mongo/gotools/") + and not file_name.startswith("src/streams/third_party") + # TODO SERVER-49805: These files should be generated at compile time. + and not file_name == "src/mongo/db/cst/parser_gen.cpp") and FILES_RE.search(file_name) + + +def _lint_files(file_names: List[str]) -> None: + """Lint a list of files with clang-format.""" + run_lint1 = lambda param1: mongolint.lint_file(param1) == 0 + if not parallel.parallel_process([os.path.abspath(f) for f in file_names], run_lint1): + print("ERROR: Code Style does not match coding style") + sys.exit(1) + + +def lint_patch(file_name: str) -> None: + """Lint patch command entry point.""" + file_names = git.get_files_to_check_from_patch(file_name, is_interesting_file) + + # Patch may have files that we do not want to check which is fine + if file_names: + _lint_files(file_names) + + +def lint(file_names: List[str]) -> None: + # type: (str, Dict[str, str], List[str]) -> None + """Lint files command entry point.""" + all_file_names = git.get_files_to_check(file_names, is_interesting_file) + + _lint_files(all_file_names) + + +def lint_all(file_names: List[str]) -> None: + # pylint: disable=unused-argument + """Lint files command entry point based on working tree.""" + all_file_names = git.get_files_to_check_working_tree(is_interesting_file) + + _lint_files(all_file_names) + + +def lint_my(origin_branch: List[str]) -> None: + """Lint files command based on local changes.""" + files = git.get_my_files_to_check(is_interesting_file, origin_branch) + files = [f for f in files if os.path.exists(f)] + + _lint_files(files) + + +def main() -> None: + """Execute Main entry point.""" + + parser = argparse.ArgumentParser(description='Quick C++ Lint frontend.') + + parser.add_argument('-v', "--verbose", action='store_true', help="Enable verbose logging") + + sub = parser.add_subparsers(title="Linter subcommands", help="sub-command help") + + parser_lint = sub.add_parser('lint', help='Lint only Git files') + parser_lint.add_argument("file_names", nargs="*", help="Globs of files to check") + parser_lint.set_defaults(func=lint) + + parser_lint_all = sub.add_parser('lint-all', help='Lint All files') + parser_lint_all.add_argument("file_names", nargs="*", help="Globs of files to check") + parser_lint_all.set_defaults(func=lint_all) + + parser_lint_patch = sub.add_parser('lint-patch', help='Lint the files in a patch') + parser_lint_patch.add_argument("file_names", nargs="*", help="Globs of files to check") + parser_lint_patch.set_defaults(func=lint_patch) + + parser_lint_my = sub.add_parser('lint-my', help='Lint my files') + parser_lint_my.add_argument("--branch", dest="file_names", default="origin/master", + help="Branch to compare against") + parser_lint_my.set_defaults(func=lint_my) + + args = parser.parse_args() + + if args.verbose: + logging.basicConfig(level=logging.DEBUG) + + args.func(args.file_names) + + +if __name__ == "__main__": + main() diff --git a/buildscripts/resmoke_proxy/resmoke_proxy.py b/buildscripts/resmoke_proxy/resmoke_proxy.py index 42ceeb6c7575d..0074a47c23494 100644 --- a/buildscripts/resmoke_proxy/resmoke_proxy.py +++ b/buildscripts/resmoke_proxy/resmoke_proxy.py @@ -41,4 +41,4 @@ def read_suite_config(self, suite_name: str) -> Dict[str, Any]: :param suite_name: Name of suite to read. :return: Configuration of specified suite. """ - return self._suite_config.SuiteFinder.get_config_obj(suite_name) + return self._suite_config.SuiteFinder.get_config_obj_no_verify(suite_name) diff --git a/buildscripts/resmokeconfig/evg_task_doc/evg_task_doc.yml b/buildscripts/resmokeconfig/evg_task_doc/evg_task_doc.yml index 31387cd70548a..05248400df4c6 100644 --- a/buildscripts/resmokeconfig/evg_task_doc/evg_task_doc.yml +++ b/buildscripts/resmokeconfig/evg_task_doc/evg_task_doc.yml @@ -40,10 +40,3 @@ cqf_parallel: |- parallel. The optimizer will use a default degree of parallelism of 5. Tests in this suite are _forced_ to use the new query optimizer by using the server parameter 'internalQueryFrameworkControl': "forceBonsai". - -cqf_passthrough: |- - A passthrough suite of the tests in the core suite, but attempting usage of - the new optimizer and the CQF framework using the server parameter - internalQueryFrameworkControl: "tryBonsai". Queries that we believe should be - able to correctly use the new optimizer will be routed via that path and - should return the same results. diff --git a/buildscripts/resmokeconfig/feature_flag_test.idl b/buildscripts/resmokeconfig/feature_flag_test.idl new file mode 100644 index 0000000000000..763b0f4c8bcb6 --- /dev/null +++ b/buildscripts/resmokeconfig/feature_flag_test.idl @@ -0,0 +1,8 @@ +# This file is meant to add any feature flags needed for testing to +# all_feature_flags.txt +feature_flags: + featureFlagToaster: + description: "Create a feature flag" + cpp_varname: gFeatureFlagToaster + default: false + shouldBeFCVGated: true diff --git a/buildscripts/resmokeconfig/fully_disabled_feature_flags.yml b/buildscripts/resmokeconfig/fully_disabled_feature_flags.yml index ab21c338c8143..3a89f933deb2c 100644 --- a/buildscripts/resmokeconfig/fully_disabled_feature_flags.yml +++ b/buildscripts/resmokeconfig/fully_disabled_feature_flags.yml @@ -1,10 +1,12 @@ # Feature flags here are turned off even on the "all feature flags" build variants. +# Feature flags here disable jstests that are tagged with these feature flags on all variants. # # These flags can be enabled on a per-task or per-build-variant basis # by modifying their respective definitions in evergreen.yml. - featureFlagFryer - featureFlagCommonQueryFramework +- featureFlagSearchInSbe # This flag exists to help users in managed environments that upgraded to 6.0 before 6.0.0-rc8 was # released create the transactions collection index and is only meant to be enabled adhoc, so only # its targeted tests should enable it. diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_expression_multiversion_fuzzer_last_continuous.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_expression_multiversion_fuzzer_last_continuous.yml new file mode 100644 index 0000000000000..dafecc971b156 --- /dev/null +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_expression_multiversion_fuzzer_last_continuous.yml @@ -0,0 +1,26 @@ +########################################################## +# THIS IS A GENERATED FILE -- DO NOT MODIFY. +# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE +# AND REGENERATE THE MATRIX SUITES. +# +# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_expression_multiversion_fuzzer_last_continuous.yml +# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites +########################################################## +executor: + archive: + tests: true + config: + shell_options: + global_vars: + TestData: + clusterType: standalone + internalQueryAppendIdToSetWindowFieldsSort: true + internalQueryMaxAllowedDensifyDocs: 1000 + traceExceptions: false + useRandomBinVersionsWithinReplicaSet: last-continuous + nodb: '' +matrix_suite: true +selector: + roots: + - jstestfuzz/out/*.js +test_kind: js_test diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_expression_multiversion_fuzzer_last_lts.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_expression_multiversion_fuzzer_last_lts.yml new file mode 100644 index 0000000000000..a56d13ced99e4 --- /dev/null +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_expression_multiversion_fuzzer_last_lts.yml @@ -0,0 +1,26 @@ +########################################################## +# THIS IS A GENERATED FILE -- DO NOT MODIFY. +# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE +# AND REGENERATE THE MATRIX SUITES. +# +# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_expression_multiversion_fuzzer_last_lts.yml +# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites +########################################################## +executor: + archive: + tests: true + config: + shell_options: + global_vars: + TestData: + clusterType: standalone + internalQueryAppendIdToSetWindowFieldsSort: true + internalQueryMaxAllowedDensifyDocs: 1000 + traceExceptions: false + useRandomBinVersionsWithinReplicaSet: last-lts + nodb: '' +matrix_suite: true +selector: + roots: + - jstestfuzz/out/*.js +test_kind: js_test diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_multiversion_fuzzer_last_continuous.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_multiversion_fuzzer_last_continuous.yml new file mode 100644 index 0000000000000..76da7c376d82f --- /dev/null +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_multiversion_fuzzer_last_continuous.yml @@ -0,0 +1,26 @@ +########################################################## +# THIS IS A GENERATED FILE -- DO NOT MODIFY. +# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE +# AND REGENERATE THE MATRIX SUITES. +# +# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_continuous.yml +# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites +########################################################## +executor: + archive: + tests: true + config: + shell_options: + global_vars: + TestData: + clusterType: standalone + internalQueryAppendIdToSetWindowFieldsSort: true + internalQueryMaxAllowedDensifyDocs: 1000 + traceExceptions: false + useRandomBinVersionsWithinReplicaSet: last-continuous + nodb: '' +matrix_suite: true +selector: + roots: + - jstestfuzz/out/*.js +test_kind: js_test diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_multiversion_fuzzer_last_lts.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_multiversion_fuzzer_last_lts.yml new file mode 100644 index 0000000000000..d96066d3f3c89 --- /dev/null +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/aggregation_multiversion_fuzzer_last_lts.yml @@ -0,0 +1,26 @@ +########################################################## +# THIS IS A GENERATED FILE -- DO NOT MODIFY. +# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE +# AND REGENERATE THE MATRIX SUITES. +# +# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_lts.yml +# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites +########################################################## +executor: + archive: + tests: true + config: + shell_options: + global_vars: + TestData: + clusterType: standalone + internalQueryAppendIdToSetWindowFieldsSort: true + internalQueryMaxAllowedDensifyDocs: 1000 + traceExceptions: false + useRandomBinVersionsWithinReplicaSet: last-lts + nodb: '' +matrix_suite: true +selector: + roots: + - jstestfuzz/out/*.js +test_kind: js_test diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_continuous_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_continuous_new_old_old_new.yml index 7c47c5e43b735..727c4fe27fb6d 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_continuous_new_old_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_continuous_new_old_old_new.yml @@ -14,8 +14,8 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js'); global_vars: @@ -67,6 +67,7 @@ executor: global_vars: TestData: checkCollectionCounts: true + - class: CheckOrphansDeleted matrix_suite: true selector: exclude_files: @@ -264,7 +265,6 @@ selector: - requires_fastcount - requires_dbstats - requires_collstats - - requires_datasize - uses_parallel_shell - requires_profiling - requires_capped diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_lts_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_lts_new_old_old_new.yml index 8d61f758dc6ef..33dce07ddfd08 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_lts_new_old_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_downgrade_last_lts_new_old_old_new.yml @@ -14,8 +14,8 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js'); global_vars: @@ -67,6 +67,7 @@ executor: global_vars: TestData: checkCollectionCounts: true + - class: CheckOrphansDeleted matrix_suite: true selector: exclude_files: @@ -264,7 +265,6 @@ selector: - requires_fastcount - requires_dbstats - requires_collstats - - requires_datasize - uses_parallel_shell - requires_profiling - requires_capped diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_new_old.yml index 656a7bf7ce6ea..60137d4a6b5b4 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_new_old.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_new_old.yml @@ -14,7 +14,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); global_vars: TestData: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_old_new.yml index 041668ff85c98..73dd55ef97d21 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_new_old_new.yml @@ -14,7 +14,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); global_vars: TestData: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_old_new_new.yml index 33c5cce1bdc36..05564943ca2e9 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_old_new_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_continuous_old_new_new.yml @@ -14,7 +14,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); global_vars: TestData: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_new_old.yml index ecc72c60c50b2..08fe65930071a 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_new_old.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_new_old.yml @@ -14,7 +14,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); global_vars: TestData: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_old_new.yml index 676fd1aa10449..fbf521649bc65 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_new_old_new.yml @@ -14,7 +14,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); global_vars: TestData: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_old_new_new.yml index 55b87cc488f53..fc6931e5cc5c5 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_old_new_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_last_lts_old_new_new.yml @@ -14,7 +14,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); global_vars: TestData: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_mongos_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_mongos_passthrough.yml index ab7e555698e38..108709aeddd25 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_mongos_passthrough.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_mongos_passthrough.yml @@ -13,7 +13,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); global_vars: TestData: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads.yml index 036057bca395f..877576cec07f1 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads.yml @@ -13,7 +13,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; load('jstests/libs/override_methods/set_read_preference_secondary.js'); global_vars: TestData: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads_sharded_collections.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads_sharded_collections.yml index 08f6e22b50c16..0d1225031554a 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads_sharded_collections.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_secondary_reads_sharded_collections.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; load('jstests/libs/override_methods/set_read_preference_secondary.js');; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js'); global_vars: TestData: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough.yml index c7332c0994dd1..121a2c86c0bfd 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough.yml @@ -13,7 +13,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');; load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); global_vars: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_continuous_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_continuous_new_old_old_new.yml index 8f3c7d4832f88..a51fe721cb6aa 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_continuous_new_old_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_continuous_new_old_old_new.yml @@ -13,7 +13,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');; load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); global_vars: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_lts_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_lts_new_old_old_new.yml index 4eb5c298b3c3c..82e43b5019eff 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_lts_new_old_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_sharded_collections_passthrough_last_lts_new_old_old_new.yml @@ -13,7 +13,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');; load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); global_vars: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_mongos_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_mongos_passthrough.yml index 03918906052fd..15d266e0e9e41 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_mongos_passthrough.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_mongos_passthrough.yml @@ -13,7 +13,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; load('jstests/libs/override_methods/implicit_whole_cluster_changestreams.js');; load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); global_vars: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_passthrough.yml index 5e807bebd0609..05d7c1b13f3f6 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_passthrough.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_passthrough.yml @@ -13,7 +13,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; load('jstests/libs/override_methods/set_read_preference_secondary.js');; load('jstests/libs/override_methods/implicit_whole_cluster_changestreams.js'); global_vars: TestData: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_sharded_collections.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_sharded_collections.yml index ddf91d4b209e7..cf00d0918ee1e 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_sharded_collections.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_secondary_reads_sharded_collections.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; load('jstests/libs/override_methods/set_read_preference_secondary.js');; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');; load('jstests/libs/override_methods/implicit_whole_cluster_changestreams.js'); global_vars: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_sharded_collections_passthrough.yml index 246163ed62dfb..8c0562accab8b 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_sharded_collections_passthrough.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_cluster_sharded_collections_passthrough.yml @@ -13,7 +13,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');; load('jstests/libs/override_methods/implicit_whole_cluster_changestreams.js');; load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_mongos_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_mongos_passthrough.yml index de8dbcb712499..86e726388497d 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_mongos_passthrough.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_mongos_passthrough.yml @@ -13,7 +13,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; load('jstests/libs/override_methods/implicit_whole_db_changestreams.js');; load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); global_vars: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_passthrough.yml index 8606ce9dd7df4..ecd9d919bac99 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_passthrough.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_passthrough.yml @@ -13,7 +13,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; load('jstests/libs/override_methods/set_read_preference_secondary.js');; load('jstests/libs/override_methods/implicit_whole_db_changestreams.js'); global_vars: TestData: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_sharded_collections.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_sharded_collections.yml index 7e623dcce0894..cac632e5fa6c0 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_sharded_collections.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_secondary_reads_sharded_collections.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; load('jstests/libs/override_methods/set_read_preference_secondary.js');; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');; load('jstests/libs/override_methods/implicit_whole_db_changestreams.js'); global_vars: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_sharded_collections_passthrough.yml index ecde23d670d97..40bb6ecebe8a9 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_sharded_collections_passthrough.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/change_streams_whole_db_sharded_collections_passthrough.yml @@ -13,7 +13,7 @@ executor: - ValidateCollections config: shell_options: - eval: var testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js');; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js');; load('jstests/libs/override_methods/implicit_whole_db_changestreams.js');; load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_new_old.yml index 4837221913ccf..e11d04b36d3bb 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_new_old.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_new_old.yml @@ -23,6 +23,8 @@ executor: oplogSize: 1024 set_parameters: enableTestCommands: 1 + queryAnalysisSamplerConfigurationRefreshSecs: 1 + queryAnalysisWriterIntervalSecs: 1 roleGraphInvalidationIsFatal: 1 num_nodes: 3 old_bin_version: last_continuous diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_old_new.yml index 14d2327e9de20..295e825e543ad 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_new_old_new.yml @@ -23,6 +23,8 @@ executor: oplogSize: 1024 set_parameters: enableTestCommands: 1 + queryAnalysisSamplerConfigurationRefreshSecs: 1 + queryAnalysisWriterIntervalSecs: 1 roleGraphInvalidationIsFatal: 1 num_nodes: 3 old_bin_version: last_continuous diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_old_new_new.yml index e1c4ab6f997b6..afb80f07cf140 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_old_new_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_continuous_old_new_new.yml @@ -23,6 +23,8 @@ executor: oplogSize: 1024 set_parameters: enableTestCommands: 1 + queryAnalysisSamplerConfigurationRefreshSecs: 1 + queryAnalysisWriterIntervalSecs: 1 roleGraphInvalidationIsFatal: 1 num_nodes: 3 old_bin_version: last_continuous diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_new_old.yml index 33daa228cc190..3394b06632435 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_new_old.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_new_old.yml @@ -23,6 +23,8 @@ executor: oplogSize: 1024 set_parameters: enableTestCommands: 1 + queryAnalysisSamplerConfigurationRefreshSecs: 1 + queryAnalysisWriterIntervalSecs: 1 roleGraphInvalidationIsFatal: 1 num_nodes: 3 old_bin_version: last_lts diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_old_new.yml index c4b70373bf7d7..0ccf44385ad72 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_new_old_new.yml @@ -23,6 +23,8 @@ executor: oplogSize: 1024 set_parameters: enableTestCommands: 1 + queryAnalysisSamplerConfigurationRefreshSecs: 1 + queryAnalysisWriterIntervalSecs: 1 roleGraphInvalidationIsFatal: 1 num_nodes: 3 old_bin_version: last_lts diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_old_new_new.yml index 287fc21d3c303..c7677f3a944fd 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_old_new_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_replication_last_lts_old_new_new.yml @@ -23,6 +23,8 @@ executor: oplogSize: 1024 set_parameters: enableTestCommands: 1 + queryAnalysisSamplerConfigurationRefreshSecs: 1 + queryAnalysisWriterIntervalSecs: 1 roleGraphInvalidationIsFatal: 1 num_nodes: 3 old_bin_version: last_lts diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_continuous_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_continuous_new_old_old_new.yml index 1ceb5f6c3db7c..efa47665c93aa 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_continuous_new_old_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_continuous_new_old_old_new.yml @@ -26,11 +26,12 @@ executor: mongod_options: set_parameters: enableTestCommands: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 + queryAnalysisWriterIntervalSecs: 1 roleGraphInvalidationIsFatal: 1 mongos_options: set_parameters: enableTestCommands: 1 + queryAnalysisSamplerConfigurationRefreshSecs: 1 num_mongos: 2 num_rs_nodes_per_shard: 2 num_shards: 2 @@ -39,12 +40,12 @@ executor: mongod_options: oplogSize: 1024 hooks: + - class: CheckShardFilteringMetadata - class: CheckReplDBHashInBackground - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections - class: CleanupConcurrencyWorkloads matrix_suite: true @@ -81,11 +82,13 @@ selector: - jstests/concurrency/fsm_workloads/yield_and_hashed.js - jstests/concurrency/fsm_workloads/yield_and_sorted.js - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js - jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js - jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js - jstests/concurrency/fsm_workloads/create_timeseries_collection.js - jstests/concurrency/fsm_workloads/create_collection_and_view.js + - jstests/concurrency/fsm_workloads/map_reduce_drop.js exclude_with_any_tags: - requires_replication - assumes_balancer_on diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_lts_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_lts_new_old_old_new.yml index d2f377dae017c..826c8a8d15128 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_lts_new_old_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/concurrency_sharded_replication_last_lts_new_old_old_new.yml @@ -26,11 +26,12 @@ executor: mongod_options: set_parameters: enableTestCommands: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 + queryAnalysisWriterIntervalSecs: 1 roleGraphInvalidationIsFatal: 1 mongos_options: set_parameters: enableTestCommands: 1 + queryAnalysisSamplerConfigurationRefreshSecs: 1 num_mongos: 2 num_rs_nodes_per_shard: 2 num_shards: 2 @@ -39,12 +40,12 @@ executor: mongod_options: oplogSize: 1024 hooks: + - class: CheckShardFilteringMetadata - class: CheckReplDBHashInBackground - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections - class: CleanupConcurrencyWorkloads matrix_suite: true @@ -81,11 +82,13 @@ selector: - jstests/concurrency/fsm_workloads/yield_and_hashed.js - jstests/concurrency/fsm_workloads/yield_and_sorted.js - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js - jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js - jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js - jstests/concurrency/fsm_workloads/create_timeseries_collection.js - jstests/concurrency/fsm_workloads/create_collection_and_view.js + - jstests/concurrency/fsm_workloads/map_reduce_drop.js exclude_with_any_tags: - requires_replication - assumes_balancer_on diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_ese_gcm.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_ese_gcm.yml index e356846269ad8..f5e86ccf17926 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_ese_gcm.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_ese_gcm.yml @@ -35,6 +35,7 @@ matrix_suite: true selector: exclude_files: - jstests/core/txns/**/*.js + - jstests/core/queryable_encryption/**/*.js exclude_with_any_tags: - does_not_support_encrypted_storage_engine roots: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_min_batch_repeat_queries_ese_gsm.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_min_batch_repeat_queries_ese_gsm.yml index d4d1df4da3441..75094c6e83845 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_min_batch_repeat_queries_ese_gsm.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_min_batch_repeat_queries_ese_gsm.yml @@ -42,6 +42,7 @@ matrix_suite: true selector: exclude_files: - jstests/core/txns/**/*.js + - jstests/core/queryable_encryption/**/*.js - jstests/core/**/profile1.js - jstests/core/**/profile2.js - jstests/core/**/find9.js diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_minimum_batch_size.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_minimum_batch_size.yml index 44cfc82076c57..0bc508270977d 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_minimum_batch_size.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_minimum_batch_size.yml @@ -36,6 +36,7 @@ matrix_suite: true selector: exclude_files: - jstests/core/txns/**/*.js + - jstests/core/queryable_encryption/**/*.js - jstests/core/**/profile1.js - jstests/core/**/profile2.js - jstests/core/**/find9.js diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_repeat_queries.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_repeat_queries.yml index 3254155d4a44d..c455f4f1e78c4 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_repeat_queries.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/core_repeat_queries.yml @@ -33,6 +33,7 @@ matrix_suite: true selector: exclude_files: - jstests/core/txns/**/*.js + - jstests/core/queryable_encryption/**/*.js exclude_with_any_tags: - does_not_support_repeated_reads - requires_profiling diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/generational_fuzzer_last_continuous.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/generational_fuzzer_last_continuous.yml deleted file mode 100644 index 12e35d0acd72c..0000000000000 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/generational_fuzzer_last_continuous.yml +++ /dev/null @@ -1,26 +0,0 @@ -########################################################## -# THIS IS A GENERATED FILE -- DO NOT MODIFY. -# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE -# AND REGENERATE THE MATRIX SUITES. -# -# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/generational_fuzzer_last_continuous.yml -# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites -########################################################## -executor: - archive: - tests: true - config: - shell_options: - global_vars: - TestData: - clusterType: standalone - internalQueryAppendIdToSetWindowFieldsSort: true - internalQueryMaxAllowedDensifyDocs: 1000 - traceExceptions: false - useRandomBinVersionsWithinReplicaSet: last-continuous - nodb: '' -matrix_suite: true -selector: - roots: - - jstestfuzz/out/*.js -test_kind: js_test diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/generational_fuzzer_last_lts.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/generational_fuzzer_last_lts.yml deleted file mode 100644 index 6dd4fbd42d072..0000000000000 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/generational_fuzzer_last_lts.yml +++ /dev/null @@ -1,26 +0,0 @@ -########################################################## -# THIS IS A GENERATED FILE -- DO NOT MODIFY. -# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE -# AND REGENERATE THE MATRIX SUITES. -# -# matrix suite mapping file: buildscripts/resmokeconfig/matrix_suites/mappings/generational_fuzzer_last_lts.yml -# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites -########################################################## -executor: - archive: - tests: true - config: - shell_options: - global_vars: - TestData: - clusterType: standalone - internalQueryAppendIdToSetWindowFieldsSort: true - internalQueryMaxAllowedDensifyDocs: 1000 - traceExceptions: false - useRandomBinVersionsWithinReplicaSet: last-lts - nodb: '' -matrix_suite: true -selector: - roots: - - jstestfuzz/out/*.js -test_kind: js_test diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_auth_future_git_tag.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_auth_future_git_tag.yml index e66071c2edbff..69b4e10574dc0 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_auth_future_git_tag.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_auth_future_git_tag.yml @@ -26,7 +26,6 @@ selector: exclude_files: - jstests/multiVersion/libs/*.js - jstests/multiVersion/targetedTestsLastContinuousFeatures/*.js - - jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js - jstests/multiVersion/genericBinVersion/minor_version_tags_new_old_new.js - jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js - jstests/multiVersion/genericBinVersion/load_keys_on_upgrade.js diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_future_git_tag.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_future_git_tag.yml index ad0315a7904d0..027e53ed8713d 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_future_git_tag.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_future_git_tag.yml @@ -19,7 +19,6 @@ selector: - jstests/multiVersion/libs/*.js - jstests/multiVersion/targetedTestsLastContinuousFeatures/*.js - jstests/multiVersion/targetedTestsLastLtsFeatures/*.js - - jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js - jstests/multiVersion/genericBinVersion/minor_version_tags_new_old_new.js - jstests/multiVersion/genericSetFCVUsage/crash_mongos_against_upgraded_cluster.js exclude_with_any_tags: @@ -28,4 +27,5 @@ selector: roots: - jstests/multiVersion/**/*.js - src/mongo/db/modules/*/jstests/hot_backups/multiVersion/*.js + - src/mongo/db/modules/*/jstests/audit/multiVersion/*.js test_kind: js_test diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check.yml index ac95ff9541252..4e4dcef6d1d06 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: testingReplication = true; + eval: globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mongod_options: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_new_old.yml index 877a942d4e40c..a92d992343b3a 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_new_old.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_new_old.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: testingReplication = true; + eval: globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mixed_bin_versions: new_new_old diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_old_new.yml index b67ec92d8bc7e..8eb95ae671da2 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_new_old_new.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: testingReplication = true; + eval: globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mixed_bin_versions: new_old_new diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_old_new_new.yml index efca99bdd3993..d02aaaae8f9d2 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_old_new_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_continuous_old_new_new.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: testingReplication = true; + eval: globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mixed_bin_versions: old_new_new diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_new_old.yml index 01a7b6b18bf76..608d3b6f8cdb8 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_new_old.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_new_old.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: testingReplication = true; + eval: globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mixed_bin_versions: new_new_old diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_old_new.yml index 3432c5dc7894f..4c117a655456b 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_new_old_new.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: testingReplication = true; + eval: globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mixed_bin_versions: new_old_new diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_old_new_new.yml index b3bb9d3e5d0f3..7dc4e7ed24c48 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_old_new_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/multiversion_sanity_check_last_lts_old_new_new.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: testingReplication = true; + eval: globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mixed_bin_versions: old_new_new diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_new_old.yml index 7a284924c02c5..9f490b1336d73 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_new_old.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_new_old.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: testingReplication = true; + eval: globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mixed_bin_versions: new_new_old diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_old_new.yml index 2625d13d68e79..2033ca4fe566d 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_new_old_new.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: testingReplication = true; + eval: globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mixed_bin_versions: new_old_new diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_old_new_new.yml index 0e1ec0d866302..1d365c4581e13 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_old_new_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_continuous_old_new_new.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: testingReplication = true; + eval: globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mixed_bin_versions: old_new_new diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_new_old.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_new_old.yml index c0cc6f9692ac5..d95c4576ba254 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_new_old.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_new_old.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: testingReplication = true; + eval: globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mixed_bin_versions: new_new_old diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_old_new.yml index 1e486636b9bd1..68c5ca18a1821 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_new_old_new.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: testingReplication = true; + eval: globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mixed_bin_versions: new_old_new diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_old_new_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_old_new_new.yml index 9d3429adf3ac9..5066c58c90d5c 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_old_new_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_jscore_passthrough_last_lts_old_new_new.yml @@ -16,7 +16,7 @@ executor: - ValidateCollections config: shell_options: - eval: testingReplication = true; + eval: globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mixed_bin_versions: old_new_new diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_kill_primary_jscore_passthrough.yml index d9f2b8d04af6d..2819fdd620c47 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_kill_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_kill_primary_jscore_passthrough.yml @@ -15,8 +15,8 @@ executor: tests: true config: shell_options: - eval: testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); global_vars: @@ -111,7 +111,6 @@ selector: - requires_fastcount - requires_dbstats - requires_collstats - - requires_datasize - operations_longer_than_stepdown_interval - uses_parallel_shell roots: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_reconfig_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_reconfig_kill_primary_jscore_passthrough.yml index 159e3b130379d..049bb83d682c2 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_reconfig_kill_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/replica_sets_reconfig_kill_primary_jscore_passthrough.yml @@ -15,8 +15,8 @@ executor: tests: true config: shell_options: - eval: testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); global_vars: @@ -117,7 +117,6 @@ selector: - requires_fastcount - requires_dbstats - requires_collstats - - requires_datasize - operations_longer_than_stepdown_interval - uses_parallel_shell roots: diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_continuous.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_continuous.yml index f435b3daa7bb4..a0ab97003b76b 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_continuous.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_continuous.yml @@ -15,8 +15,8 @@ executor: tests: true config: shell_options: - eval: testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); global_vars: @@ -107,7 +107,6 @@ selector: - requires_fastcount - requires_dbstats - requires_collstats - - requires_datasize - operations_longer_than_stepdown_interval - uses_parallel_shell - cannot_run_during_upgrade_downgrade diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_lts.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_lts.yml index bede1bc20109c..d5e5f7ff61c7d 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_lts.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/retryable_writes_downgrade_last_lts.yml @@ -15,8 +15,8 @@ executor: tests: true config: shell_options: - eval: testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); global_vars: @@ -107,7 +107,6 @@ selector: - requires_fastcount - requires_dbstats - requires_collstats - - requires_datasize - operations_longer_than_stepdown_interval - uses_parallel_shell - cannot_run_during_upgrade_downgrade diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_continuous_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_continuous_new_old_old_new.yml index f9675fc9510e6..a2d49f3c5579d 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_continuous_new_old_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_continuous_new_old_old_new.yml @@ -32,6 +32,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 matrix_suite: true diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_lts_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_lts_new_old_old_new.yml index fd2562ac8b6c6..de194b1018bce 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_lts_new_old_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_collections_jscore_passthrough_last_lts_new_old_old_new.yml @@ -32,6 +32,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 matrix_suite: true diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_continuous_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_continuous_new_old_old_new.yml index 2887689392f34..844c85ce63074 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_continuous_new_old_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_continuous_new_old_old_new.yml @@ -15,8 +15,8 @@ executor: tests: true config: shell_options: - eval: testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); global_vars: @@ -73,6 +73,7 @@ executor: - class: CheckReplOplogs - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 matrix_suite: true @@ -115,6 +116,7 @@ selector: - jstests/core/**/geo_s2sparse.js - jstests/core/**/mixed_version_replica_set.js - jstests/core/timeseries/timeseries_merge.js + - jstests/core/**/command_let_variables.js exclude_with_any_tags: - assumes_against_mongod_not_mongos - assumes_standalone_mongod @@ -132,13 +134,10 @@ selector: - requires_fastcount - requires_dbstats - requires_collstats - - requires_datasize - operations_longer_than_stepdown_interval - uses_parallel_shell - cannot_run_during_upgrade_downgrade - requires_timeseries roots: - jstests/core/**/*.js - - jstests/fle2/**/*.js - - src/mongo/db/modules/*/jstests/fle2/**/*.js test_kind: js_test diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_lts_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_lts_new_old_old_new.yml index 5091d0f011dca..7251d5511a217 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_lts_new_old_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharded_retryable_writes_downgrade_last_lts_new_old_old_new.yml @@ -15,8 +15,8 @@ executor: tests: true config: shell_options: - eval: testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); + eval: globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); global_vars: @@ -73,6 +73,7 @@ executor: - class: CheckReplOplogs - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 matrix_suite: true @@ -115,6 +116,7 @@ selector: - jstests/core/**/geo_s2sparse.js - jstests/core/**/mixed_version_replica_set.js - jstests/core/timeseries/timeseries_merge.js + - jstests/core/**/command_let_variables.js exclude_with_any_tags: - assumes_against_mongod_not_mongos - assumes_standalone_mongod @@ -132,13 +134,10 @@ selector: - requires_fastcount - requires_dbstats - requires_collstats - - requires_datasize - operations_longer_than_stepdown_interval - uses_parallel_shell - cannot_run_during_upgrade_downgrade - requires_timeseries roots: - jstests/core/**/*.js - - jstests/fle2/**/*.js - - src/mongo/db/modules/*/jstests/fle2/**/*.js test_kind: js_test diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_auth_audit.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_auth_audit.yml index 5d92678604505..fe4e27b8b54e5 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_auth_audit.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_auth_audit.yml @@ -23,8 +23,6 @@ executor: keyFile: jstests/libs/authTestsKey keyFileData: Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly roleGraphInvalidationIsFatal: true - setParameters: - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 nodb: '' matrix_suite: true selector: @@ -47,6 +45,7 @@ selector: - jstests/sharding/movechunk_parallel.js - jstests/sharding/migration_critical_section_concurrency.js - jstests/sharding/mongod_returns_no_cluster_time_without_keys.js + - jstests/sharding/cluster_time_across_add_shard.js - jstests/sharding/set_user_write_block_mode.js roots: - jstests/sharding/**/*.js diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_continuous_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_continuous_new_old_old_new.yml index d24bdf66855a4..f0335672780fd 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_continuous_new_old_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_continuous_new_old_old_new.yml @@ -31,6 +31,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 matrix_suite: true diff --git a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_lts_new_old_old_new.yml b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_lts_new_old_old_new.yml index 7a81e66c9471b..aeb4db92d4fba 100644 --- a/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_lts_new_old_old_new.yml +++ b/buildscripts/resmokeconfig/matrix_suites/generated_suites/sharding_jscore_passthrough_last_lts_new_old_old_new.yml @@ -31,6 +31,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 matrix_suite: true diff --git a/buildscripts/resmokeconfig/matrix_suites/mappings/generational_fuzzer_last_continuous.yml b/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_expression_multiversion_fuzzer_last_continuous.yml similarity index 100% rename from buildscripts/resmokeconfig/matrix_suites/mappings/generational_fuzzer_last_continuous.yml rename to buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_expression_multiversion_fuzzer_last_continuous.yml diff --git a/buildscripts/resmokeconfig/matrix_suites/mappings/generational_fuzzer_last_lts.yml b/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_expression_multiversion_fuzzer_last_lts.yml similarity index 100% rename from buildscripts/resmokeconfig/matrix_suites/mappings/generational_fuzzer_last_lts.yml rename to buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_expression_multiversion_fuzzer_last_lts.yml diff --git a/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_continuous.yml b/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_continuous.yml new file mode 100644 index 0000000000000..8e38541e94515 --- /dev/null +++ b/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_continuous.yml @@ -0,0 +1,3 @@ +base_suite: generational_fuzzer +overrides: + - "multiversion.replica_sets_multiversion_testdata_last_continuous" diff --git a/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_lts.yml b/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_lts.yml new file mode 100644 index 0000000000000..d3863bfd6afbe --- /dev/null +++ b/buildscripts/resmokeconfig/matrix_suites/mappings/aggregation_multiversion_fuzzer_last_lts.yml @@ -0,0 +1,3 @@ +base_suite: generational_fuzzer +overrides: + - "multiversion.replica_sets_multiversion_testdata_last_lts" diff --git a/buildscripts/resmokeconfig/matrix_suites/overrides/audit.yml b/buildscripts/resmokeconfig/matrix_suites/overrides/audit.yml index f04c78aaef68b..074d8e47f2773 100644 --- a/buildscripts/resmokeconfig/matrix_suites/overrides/audit.yml +++ b/buildscripts/resmokeconfig/matrix_suites/overrides/audit.yml @@ -7,5 +7,3 @@ global_vars: TestData: auditDestination: 'console' - setParameters: - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 diff --git a/buildscripts/resmokeconfig/matrix_suites/overrides/change_streams.yml b/buildscripts/resmokeconfig/matrix_suites/overrides/change_streams.yml index 4b06f4d4a66a4..87316cda12dfd 100644 --- a/buildscripts/resmokeconfig/matrix_suites/overrides/change_streams.yml +++ b/buildscripts/resmokeconfig/matrix_suites/overrides/change_streams.yml @@ -7,7 +7,7 @@ # We do not always want all of the eval statements from the base suite so we override # the ones we always want eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); - name: causal_consistency @@ -22,7 +22,7 @@ # this is not under the eval section on purpose, we want to override this # to get rid of causal consistency eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); - name: mongos_passthrough diff --git a/buildscripts/resmokeconfig/matrix_suites/overrides/kill_primary.yml b/buildscripts/resmokeconfig/matrix_suites/overrides/kill_primary.yml index b632927e91e37..0bab07b64d532 100644 --- a/buildscripts/resmokeconfig/matrix_suites/overrides/kill_primary.yml +++ b/buildscripts/resmokeconfig/matrix_suites/overrides/kill_primary.yml @@ -9,9 +9,9 @@ config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js"); diff --git a/buildscripts/resmokeconfig/matrix_suites/overrides/multiversion.yml b/buildscripts/resmokeconfig/matrix_suites/overrides/multiversion.yml index 11956227a03e0..a3c3d501b1718 100644 --- a/buildscripts/resmokeconfig/matrix_suites/overrides/multiversion.yml +++ b/buildscripts/resmokeconfig/matrix_suites/overrides/multiversion.yml @@ -80,9 +80,6 @@ # Exclude last-lts specific tests - jstests/multiVersion/targetedTestsLastLtsFeatures/*.js - # TODO: SERVER-21578 - - jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js - # TODO: SERVER-28104 - jstests/multiVersion/genericBinVersion/minor_version_tags_new_old_new.js @@ -100,9 +97,6 @@ # Exclude last-continuous specific tests - jstests/multiVersion/targetedTestsLastContinuousFeatures/*.js - # TODO: SERVER-21578 - - jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js - # TODO: SERVER-28104 - jstests/multiVersion/genericBinVersion/minor_version_tags_new_old_new.js diff --git a/buildscripts/resmokeconfig/matrix_suites/overrides/replica_sets_stepdown_selector.yml b/buildscripts/resmokeconfig/matrix_suites/overrides/replica_sets_stepdown_selector.yml index cca7e08ed840a..159658337cea8 100644 --- a/buildscripts/resmokeconfig/matrix_suites/overrides/replica_sets_stepdown_selector.yml +++ b/buildscripts/resmokeconfig/matrix_suites/overrides/replica_sets_stepdown_selector.yml @@ -178,8 +178,6 @@ - requires_dbstats # "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..." - requires_collstats - # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..." - - requires_datasize ## The next tag corresponds to long running-operations, as they may exhaust their number # of retries and result in a network error being thrown. - operations_longer_than_stepdown_interval diff --git a/buildscripts/resmokeconfig/powercycle/powercycle_tasks.yml b/buildscripts/resmokeconfig/powercycle/powercycle_tasks.yml index c5cceef6749c1..75946b4a634c2 100644 --- a/buildscripts/resmokeconfig/powercycle/powercycle_tasks.yml +++ b/buildscripts/resmokeconfig/powercycle/powercycle_tasks.yml @@ -28,7 +28,7 @@ tasks: crash_method: kill - name: powercycle_last_lts_fcv - fcv: "6.0" + fcv: "7.0" - name: powercycle_replication repl_set: powercycle diff --git a/buildscripts/resmokeconfig/suites/aggregation_mongos_passthrough.yml b/buildscripts/resmokeconfig/suites/aggregation_mongos_passthrough.yml index 770cc05268551..53b4b1044277d 100644 --- a/buildscripts/resmokeconfig/suites/aggregation_mongos_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/aggregation_mongos_passthrough.yml @@ -34,6 +34,7 @@ executor: hooks: - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/aggregation_one_shard_sharded_collections.yml b/buildscripts/resmokeconfig/suites/aggregation_one_shard_sharded_collections.yml index eeb0e097e2f04..697fe52940459 100644 --- a/buildscripts/resmokeconfig/suites/aggregation_one_shard_sharded_collections.yml +++ b/buildscripts/resmokeconfig/suites/aggregation_one_shard_sharded_collections.yml @@ -36,6 +36,7 @@ executor: hooks: - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml index e0b510c5da9de..d3af2a9dc0ee2 100644 --- a/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/aggregation_read_concern_majority_passthrough.yml @@ -37,7 +37,7 @@ executor: defaultReadConcernLevel: majority enableMajorityReadConcern: '' eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); hooks: # The CheckReplDBHash hook waits until all operations have replicated to and have been applied diff --git a/buildscripts/resmokeconfig/suites/aggregation_secondary_reads.yml b/buildscripts/resmokeconfig/suites/aggregation_secondary_reads.yml index 150de7ad3075b..2b9843776115c 100644 --- a/buildscripts/resmokeconfig/suites/aggregation_secondary_reads.yml +++ b/buildscripts/resmokeconfig/suites/aggregation_secondary_reads.yml @@ -44,7 +44,7 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/enable_causal_consistency.js'); load('jstests/libs/override_methods/detect_spawning_own_mongod.js'); hooks: diff --git a/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_causally_consistent_passthrough.yml b/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_causally_consistent_passthrough.yml index dbca71df6de3c..d62c66e4c061f 100644 --- a/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_causally_consistent_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_causally_consistent_passthrough.yml @@ -58,7 +58,7 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/enable_causal_consistency.js'); load('jstests/libs/override_methods/detect_spawning_own_mongod.js'); load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js") @@ -69,6 +69,7 @@ executor: - class: CheckReplOplogs - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml index 384f812a21b50..c60be3bb2b68c 100644 --- a/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/aggregation_sharded_collections_passthrough.yml @@ -38,6 +38,7 @@ executor: hooks: - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/analyze_shard_key_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/analyze_shard_key_jscore_passthrough.yml index 48283fb067b66..893539d06e8d8 100644 --- a/buildscripts/resmokeconfig/suites/analyze_shard_key_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/analyze_shard_key_jscore_passthrough.yml @@ -103,6 +103,9 @@ selector: # The following test requires the collection to be unsharded. - jstests/core/txns/finished_transaction_error_handling.js + # The following test fails because configureQueryAnalyzer is not allowed on QE collections + - jstests/core/queryable_encryption/**/*.js + exclude_with_any_tags: - assumes_against_mongod_not_mongos - assumes_standalone_mongod @@ -142,6 +145,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: @@ -158,6 +162,8 @@ executor: queryAnalysisSamplerConfigurationRefreshSecs: 1 queryAnalysisWriterIntervalSecs: 5 analyzeShardKeyNumRanges: 10 + analyzeShardKeySplitPointExpirationSecs: 10 + ttlMonitorSleepSecs: 5 logComponentVerbosity: verbosity: 0 sharding: 2 diff --git a/buildscripts/resmokeconfig/suites/benchmarks.yml b/buildscripts/resmokeconfig/suites/benchmarks.yml index 88a8a3a48899e..594a928b1bf5b 100644 --- a/buildscripts/resmokeconfig/suites/benchmarks.yml +++ b/buildscripts/resmokeconfig/suites/benchmarks.yml @@ -18,6 +18,9 @@ selector: - build/install/bin/simple8b_bm* # Hash table benchmark is really slow, don't run on evergreen - build/install/bin/hash_table_bm* + # These benchmarks are being run as part of the benchmarks_query.yml + - build/install/bin/percentile_algo_bm* + - build/install/bin/window_function_percentile_bm* # These benchmarks are being run as part of the benchmarks_expression*.yml - build/install/bin/expression_bm* - build/install/bin/sbe_expression_bm* @@ -26,7 +29,8 @@ selector: # These benchmarks are being run as part of the benchmarks_streams.yml test suite. - build/install/bin/streams_operator_dag_bm* # These benchmarks are only run when modifying or upgrading the immutable library. - - build/install/bin/absl_comparison_bm* + - build/install/bin/immutable_absl_comparison_bm* + - build/install/bin/immutable_std_comparison_bm* # These benchmarks are being run as part of the benchmarks_replication.yml test suite. - build/install/bin/oplog_application_bm* diff --git a/buildscripts/resmokeconfig/suites/benchmarks_query.yml b/buildscripts/resmokeconfig/suites/benchmarks_query.yml new file mode 100644 index 0000000000000..d8e1fd3f93f62 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/benchmarks_query.yml @@ -0,0 +1,14 @@ +# Query related google micro-benchmarks unless run in separate dedicated suites. +test_kind: benchmark_test + +selector: + root: build/benchmarks.txt + include_files: + # The trailing asterisk is for handling the .exe extension on Windows. + - build/install/bin/percentile_algo_bm* + - build/install/bin/window_function_percentile_bm* + +executor: + config: {} + hooks: + - class: CombineBenchmarkResults diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_hedged_reads_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/causally_consistent_hedged_reads_jscore_passthrough.yml index 88052220fd0bd..fd4bf68c9bc6b 100644 --- a/buildscripts/resmokeconfig/suites/causally_consistent_hedged_reads_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/causally_consistent_hedged_reads_jscore_passthrough.yml @@ -73,6 +73,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml index c8a496bd521dc..4b75aad624de7 100644 --- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough.yml @@ -74,6 +74,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml index 3caf7b719e3b2..0811ffc4ee49b 100644 --- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml +++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_passthrough_auth.yml @@ -143,19 +143,25 @@ executor: shell_options: global_vars: TestData: *TestData - eval: jsTest.authenticate(db.getMongo()) + eval: jsTest.authenticate(db.getMongo()) <<: *authOptions - class: CheckMetadataConsistencyInBackground shell_options: global_vars: TestData: *TestData - eval: jsTest.authenticate(db.getMongo()) + eval: jsTest.authenticate(db.getMongo()) <<: *authOptions - class: ValidateCollections shell_options: global_vars: TestData: *TestData - eval: jsTest.authenticate(db.getMongo()) + eval: jsTest.authenticate(db.getMongo()) + <<: *authOptions + - class: CheckOrphansDeleted + shell_options: + global_vars: + TestData: *TestData + eval: jsTest.authenticate(db.getMongo()) <<: *authOptions - class: CleanEveryN n: 20 diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_txns_passthrough.yml b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_txns_passthrough.yml index cc4f4ae090011..2a5d91d3fe60f 100644 --- a/buildscripts/resmokeconfig/suites/causally_consistent_jscore_txns_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/causally_consistent_jscore_txns_passthrough.yml @@ -28,7 +28,7 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); hooks: # We don't execute dbHash or oplog consistency checks since there is only a single replica set diff --git a/buildscripts/resmokeconfig/suites/causally_consistent_read_concern_snapshot_passthrough.yml b/buildscripts/resmokeconfig/suites/causally_consistent_read_concern_snapshot_passthrough.yml index 9a9d6a9193ed7..88b77b83ed19d 100644 --- a/buildscripts/resmokeconfig/suites/causally_consistent_read_concern_snapshot_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/causally_consistent_read_concern_snapshot_passthrough.yml @@ -41,7 +41,7 @@ executor: TestData: defaultReadConcernLevel: snapshot eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load("jstests/libs/override_methods/enable_causal_consistency.js"); hooks: diff --git a/buildscripts/resmokeconfig/suites/change_streams.yml b/buildscripts/resmokeconfig/suites/change_streams.yml index 1b8715b573226..1da3a4c9abcd9 100644 --- a/buildscripts/resmokeconfig/suites/change_streams.yml +++ b/buildscripts/resmokeconfig/suites/change_streams.yml @@ -34,7 +34,7 @@ executor: # are bound to the oplog visibility rules. Using causal consistency forces the visibility # point to advance to the timestamp of the last write before doing a new read. eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); hooks: diff --git a/buildscripts/resmokeconfig/suites/change_streams_downgrade.yml b/buildscripts/resmokeconfig/suites/change_streams_downgrade.yml index 6075956ce23fb..74c575b9be77c 100644 --- a/buildscripts/resmokeconfig/suites/change_streams_downgrade.yml +++ b/buildscripts/resmokeconfig/suites/change_streams_downgrade.yml @@ -329,8 +329,6 @@ selector: - requires_dbstats # "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..." - requires_collstats - # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..." - - requires_datasize # "Cowardly fail if startParallelShell is run with a mongod that had an unclean shutdown: ..." - uses_parallel_shell @@ -363,9 +361,9 @@ executor: # shutdown). Workaround by relying on the requires_fastcount/dbstats/collstats/datasize and # uses_parallel_shell tags to denylist tests that uses them unsafely. eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); @@ -404,6 +402,7 @@ executor: global_vars: TestData: checkCollectionCounts: true + - class: CheckOrphansDeleted fixture: class: ShardedClusterFixture mongos_options: diff --git a/buildscripts/resmokeconfig/suites/change_streams_mongos_sessions_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_mongos_sessions_passthrough.yml index 1d9f280832169..ed30992c76684 100644 --- a/buildscripts/resmokeconfig/suites/change_streams_mongos_sessions_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/change_streams_mongos_sessions_passthrough.yml @@ -35,13 +35,14 @@ executor: # Enable causal consistency for change streams suites using 1 node replica sets. See # change_streams.yml for detailed explanation. eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/enable_sessions.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); hooks: - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_mongos_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_mongos_passthrough.yml index 1626b64271984..4d07e1f3a8d89 100644 --- a/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_mongos_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_mongos_passthrough.yml @@ -32,7 +32,7 @@ executor: wrapCRUDinTransactions: true # Enable the transactions passthrough. eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/enable_sessions.js'); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); load('jstests/libs/override_methods/network_error_and_txn_override.js'); @@ -43,6 +43,7 @@ executor: - class: CheckReplOplogs - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_passthrough.yml index db5ffedc85097..4569f7045f6da 100644 --- a/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_passthrough.yml @@ -30,7 +30,7 @@ executor: wrapCRUDinTransactions: true # Enable the transactions passthrough. eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/enable_sessions.js'); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); load('jstests/libs/override_methods/network_error_and_txn_override.js'); diff --git a/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_sharded_collections_passthrough.yml index 17dea97fd4286..6ad5be5bc6330 100644 --- a/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_sharded_collections_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/change_streams_multi_stmt_txn_sharded_collections_passthrough.yml @@ -33,7 +33,7 @@ executor: wrapCRUDinTransactions: true # Enable the transactions passthrough. eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/enable_sessions.js'); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); load('jstests/libs/override_methods/network_error_and_txn_override.js'); @@ -44,6 +44,7 @@ executor: - class: CheckReplOplogs - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/change_streams_multitenant_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_multitenant_passthrough.yml index 17a29d4f9cfb3..68cbea7cea7b8 100644 --- a/buildscripts/resmokeconfig/suites/change_streams_multitenant_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/change_streams_multitenant_passthrough.yml @@ -7,8 +7,6 @@ selector: ## # TODO SERVER-68341: Implement enable/disable command for mongoQ in the serverless. - jstests/change_streams/projection_fakes_internal_event.js - # TODO SERVER-69959: Implement a majority-committed insert listener. - - jstests/change_streams/only_wake_getmore_for_relevant_changes.js ## # TODO SERVER-70760: This test creates its own sharded cluster and uses transaction. The support @@ -47,6 +45,10 @@ selector: # This test uses 'system' database and '$tenant' cannot be injected in 'system.$cmd' namespace. - jstests/change_streams/global_index.js + # Queryable encryption test requires an internal connection for the keyvault that does not + # inject a $tenant. + - jstests/change_streams/queryable_encryption_change_stream.js + exclude_with_any_tags: ## # The next tags correspond to the special errors thrown by the @@ -72,7 +74,7 @@ executor: # Enable causal consistency for change streams suites using 1 node replica sets. See # change_streams.yml for detailed explanation. eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); load('jstests/libs/override_methods/inject_dollar_tenant.js'); diff --git a/buildscripts/resmokeconfig/suites/change_streams_multitenant_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_multitenant_sharded_collections_passthrough.yml index 73adfbc19b4e0..b7c3c0ced95ea 100644 --- a/buildscripts/resmokeconfig/suites/change_streams_multitenant_sharded_collections_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/change_streams_multitenant_sharded_collections_passthrough.yml @@ -4,8 +4,8 @@ selector: roots: - jstests/change_streams/**/*.js exclude_files: - # TODO SERVER-69959: Implement a majority-committed insert listener. - - jstests/change_streams/only_wake_getmore_for_relevant_changes.js + # TODO SERVER-68341: Implement enable/disable command for mongoQ in the serverless. + - jstests/change_streams/**/*.js # TODO SERVER-68341: Implement enable/disable command for mongoQ in the serverless. - jstests/change_streams/projection_fakes_internal_event.js # TODO SERVER-68557 This test list databases that does not work in the sharded-cluster. This test @@ -43,7 +43,7 @@ executor: # Enable causal consistency for change streams suites using 1 node replica sets. See # change_streams.yml for detailed explanation. eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js'); @@ -52,6 +52,7 @@ executor: - class: EnableChangeStream - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/change_streams_per_shard_cursor_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_per_shard_cursor_passthrough.yml index 09d056f1e5423..7790edeabfa4f 100644 --- a/buildscripts/resmokeconfig/suites/change_streams_per_shard_cursor_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/change_streams_per_shard_cursor_passthrough.yml @@ -33,7 +33,7 @@ executor: # Enable causal consistency for change streams suites using 1 node replica sets. See # change_streams.yml for detailed explanation. eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); @@ -41,6 +41,7 @@ executor: hooks: - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/change_streams_v1_resume_token_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_v1_resume_token_passthrough.yml deleted file mode 100644 index f94799973e24c..0000000000000 --- a/buildscripts/resmokeconfig/suites/change_streams_v1_resume_token_passthrough.yml +++ /dev/null @@ -1,70 +0,0 @@ -test_kind: js_test -selector: - roots: - - jstests/change_streams/**/*.js - exclude_files: - # This test explicitly compares v1 and v2 tokens, and must be able to generate the former. - - jstests/change_streams/generate_v1_resume_token.js - - # The following tests run in a sharded fixture where the mongod generates a new shard detected - # internal event, which needs to be swallowed by the mongos. This is not supported here, because - # this suite will return the event op name as 'kNewShardDetected', but the mongos expects the - # event op name to be 'migrateChunkToNewShard'. - - jstests/change_streams/create_event_from_chunk_migration.js - - jstests/change_streams/migrate_last_chunk_from_shard_event.js - - jstests/change_streams/oplog_rewrite/match_pushdown_namespace_rewrite_with_expanded_events.js - - jstests/change_streams/projection_fakes_internal_event.js - # The following test uses the '$changeStreamSplitLargeEvents' stage which requires v2 token. - - jstests/change_streams/split_large_event.js - - exclude_with_any_tags: - ## - # The next tags correspond to the special errors thrown by the - # set_read_and_write_concerns.js override when it refuses to replace the readConcern or - # writeConcern of a particular command. Above each tag are the message(s) that cause the tag to be - # warranted. - ## - # "Cowardly refusing to override write concern of command: ..." - - assumes_write_concern_unchanged - -executor: - archive: - hooks: - - CheckReplDBHash - - CheckReplOplogs - - ValidateCollections - config: - shell_options: - global_vars: - TestData: - defaultReadConcernLevel: null - enableMajorityReadConcern: '' - # Enable causal consistency for change streams suites using 1 node replica sets. Some tests - # rely on the assumption that a w:majority write will be visible immediately in a subsequently - # opened change stream. In 1 node replica sets, an operation that majority commits at - # timestamp T will force the majority snapshot to advance to T, but the oplog visibility point - # may not have advanced to T yet. Subsequent majority snapshot reads will see this write in - # the oplog, but speculative majority reads may not, since they read from a local snapshot and - # are bound to the oplog visibility rules. Using causal consistency forces the visibility - # point to advance to the timestamp of the last write before doing a new read. - eval: >- - var testingReplication = true; - load('jstests/libs/override_methods/set_read_and_write_concerns.js'); - load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); - load('jstests/libs/override_methods/implicit_v1_resume_token_changestreams.js'); - hooks: - # The CheckReplDBHash hook waits until all operations have replicated to and have been applied - # on the secondaries, so we run the ValidateCollections hook after it to ensure we're - # validating the entire contents of the collection. - - class: CheckReplOplogs - - class: CheckReplDBHash - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: ReplicaSetFixture - mongod_options: - bind_ip_all: '' - set_parameters: - enableTestCommands: 1 - num_nodes: 2 diff --git a/buildscripts/resmokeconfig/suites/change_streams_v1_resume_token_sharded_collections_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_v1_resume_token_sharded_collections_passthrough.yml deleted file mode 100644 index d91b200e1fb2a..0000000000000 --- a/buildscripts/resmokeconfig/suites/change_streams_v1_resume_token_sharded_collections_passthrough.yml +++ /dev/null @@ -1,63 +0,0 @@ -test_kind: js_test -selector: - roots: - - jstests/change_streams/**/*.js - exclude_files: - # This test explicitly compares v1 and v2 tokens, and must be able to generate the former. - - jstests/change_streams/generate_v1_resume_token.js - # This test uses the '$changeStreamSplitLargeEvents' stage which requires v2 token. - - jstests/change_streams/split_large_event.js - - exclude_with_any_tags: - ## - # The next tags correspond to the special errors thrown by the - # set_read_and_write_concerns.js override when it refuses to replace the readConcern or - # writeConcern of a particular command. Above each tag are the message(s) that cause the tag to be - # warranted. - ## - # "Cowardly refusing to override write concern of command: ..." - - assumes_write_concern_unchanged - # Exclude any that assume sharding is disabled - - assumes_against_mongod_not_mongos - - assumes_unsharded_collection - -executor: - archive: - hooks: - - CheckReplDBHash - - ValidateCollections - config: - shell_options: - global_vars: - TestData: - defaultReadConcernLevel: null - enableMajorityReadConcern: '' - # Enable causal consistency for change streams suites using 1 node replica sets. See - # change_streams.yml for detailed explanation. - eval: >- - var testingReplication = true; - load('jstests/libs/override_methods/set_read_and_write_concerns.js'); - load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js'); - load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); - load('jstests/libs/override_methods/implicit_v1_resume_token_changestreams.js'); - hooks: - - class: CheckReplDBHash - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: ShardedClusterFixture - mongos_options: - bind_ip_all: '' - set_parameters: - enableTestCommands: 1 - mongod_options: - bind_ip_all: '' - set_parameters: - enableTestCommands: 1 - writePeriodicNoops: 1 - periodicNoopIntervalSecs: 1 - coordinateCommitReturnImmediatelyAfterPersistingDecision: true - num_shards: 2 - enable_sharding: - - test diff --git a/buildscripts/resmokeconfig/suites/change_streams_whole_cluster_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_whole_cluster_passthrough.yml index 1a5763bf5de80..f9718ae9ad638 100644 --- a/buildscripts/resmokeconfig/suites/change_streams_whole_cluster_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/change_streams_whole_cluster_passthrough.yml @@ -30,7 +30,7 @@ executor: # Enable causal consistency for change streams suites using 1 node replica sets. See # change_streams.yml for detailed explanation. eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/implicit_whole_cluster_changestreams.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); diff --git a/buildscripts/resmokeconfig/suites/change_streams_whole_db_passthrough.yml b/buildscripts/resmokeconfig/suites/change_streams_whole_db_passthrough.yml index 068fb6293d2ee..439d88f00e412 100644 --- a/buildscripts/resmokeconfig/suites/change_streams_whole_db_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/change_streams_whole_db_passthrough.yml @@ -31,7 +31,7 @@ executor: # Enable causal consistency for change streams suites using 1 node replica sets. See # change_streams.yml for detailed explanation. eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/implicit_whole_db_changestreams.js'); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); diff --git a/buildscripts/resmokeconfig/suites/clustered_collection_passthrough.yml b/buildscripts/resmokeconfig/suites/clustered_collection_passthrough.yml index 2bbfaf7866e97..b01916e2fc9bc 100644 --- a/buildscripts/resmokeconfig/suites/clustered_collection_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/clustered_collection_passthrough.yml @@ -60,9 +60,11 @@ selector: - jstests/core/**/index_bounds_pipe.js # Expects an index on _id to cover the query. - jstests/core/**/covered_index_simple_id.js - # TODO (SERVER-61259): $text not supported: "No query solutions" + # TODO (SERVER-78045): $text not supported: "No query solutions" - jstests/core/**/fts6.js - jstests/core/**/fts_projection.js + # Assumes there is one collection that is not clustered. + - jstests/core/find_with_resume_after_param.js exclude_with_any_tags: - assumes_standalone_mongod @@ -82,13 +84,11 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; fixture: class: ReplicaSetFixture mongod_options: set_parameters: enableTestCommands: 1 - # SBE is not compatible with clustered collections - internalQueryFrameworkControl: "forceClassicEngine" failpoint.clusterAllCollectionsByDefault: "{mode: 'alwaysOn'}" num_nodes: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency.yml b/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency.yml index 0b0d67e706f0e..78076c90ab1db 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency.yml @@ -3,6 +3,7 @@ test_kind: fsm_workload_test selector: roots: - jstests/concurrency/fsm_workloads/**/*.js + - src/mongo/db/modules/*/jstests/concurrency/fsm_workloads/*.js exclude_files: ## # Disabled due to MongoDB restrictions and/or workload restrictions @@ -25,7 +26,6 @@ selector: # collStats is not causally consistent - requires_collstats - requires_dbstats - - requires_datasize - requires_sharding # Tests which use $currentOp. Running an aggregation with $currentOp and read preference # secondary doesn't make much sense, since there's no guarantee *which* secondary you get results diff --git a/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency_ubsan.yml b/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency_ubsan.yml index 71fcdccb0cc8a..7371f582447c4 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency_ubsan.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_replication_causal_consistency_ubsan.yml @@ -25,7 +25,6 @@ selector: # collStats is not causally consistent - requires_collstats - requires_dbstats - - requires_datasize - requires_sharding # Tests which use $currentOp. Running an aggregation with $currentOp and read preference # secondary doesn't make much sense, since there's no guarantee *which* secondary you get results diff --git a/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_cursor_sweeps.yml b/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_cursor_sweeps.yml index 5a67527cecb0e..52e711d7e31a3 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_cursor_sweeps.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_cursor_sweeps.yml @@ -53,6 +53,8 @@ executor: oplogSize: 1024 set_parameters: enableTestCommands: 1 + queryAnalysisSamplerConfigurationRefreshSecs: 1 + queryAnalysisWriterIntervalSecs: 1 # Setting this parameter to "1" disables cursor caching in WiredTiger, and sets the cache # size to "1" in MongoDB. This forces all resources to be released when done. wiredTigerCursorCacheSize: 1 diff --git a/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_eviction_debug.yml b/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_eviction_debug.yml deleted file mode 100644 index c8c732b6990c8..0000000000000 --- a/buildscripts/resmokeconfig/suites/concurrency_replication_wiredtiger_eviction_debug.yml +++ /dev/null @@ -1,52 +0,0 @@ -test_kind: fsm_workload_test - -selector: - roots: - - jstests/concurrency/fsm_workloads/**/*.js - exclude_files: - ## - # Disabled due to MongoDB restrictions and/or workload restrictions - ## - # These workloads use >100MB of data, which can overwhelm test hosts. - - jstests/concurrency/fsm_workloads/agg_group_external.js - - jstests/concurrency/fsm_workloads/agg_sort_external.js - - # The findAndModify_update_grow.js workload can cause OOM kills on test hosts. - - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js - - # These workloads run the reIndex command, which is only allowed on a standalone node. - - jstests/concurrency/fsm_workloads/reindex.js - - jstests/concurrency/fsm_workloads/reindex_background.js - - jstests/concurrency/fsm_workloads/reindex_writeconflict.js - - exclude_with_any_tags: - - requires_sharding - -executor: - archive: - hooks: - - CheckReplDBHashInBackground - - ValidateCollectionsInBackground - - CheckReplDBHash - - ValidateCollections - tests: true - config: {} - hooks: - # The CheckReplDBHash hook waits until all operations have replicated to and have been applied - # on the secondaries, so we run the ValidateCollections hook after it to ensure we're - # validating the entire contents of the collection. - - class: CheckReplDBHashInBackground - - class: ValidateCollectionsInBackground - - class: CheckReplDBHash - - class: ValidateCollections - - class: CleanupConcurrencyWorkloads - fixture: - class: ReplicaSetFixture - mongod_options: - oplogSize: 1024 - set_parameters: - enableTestCommands: 1 - # Enable aggressive WiredTiger eviction. - wiredTigerEvictionDebugMode: true - roleGraphInvalidationIsFatal: 1 - num_nodes: 3 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml index 22a2a92c40cab..b85581b395d7a 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency.yml @@ -109,12 +109,12 @@ executor: runningWithCausalConsistency: true runningWithBalancer: false hooks: + - class: CheckShardFilteringMetadata - class: CheckReplDBHashInBackground - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -131,7 +131,6 @@ executor: set_parameters: enableTestCommands: 1 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml index e7c35cbf0d3df..08cae19b0aa54 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_causal_consistency_and_balancer.yml @@ -86,9 +86,15 @@ selector: # Time-series collections are not supported on mongos. - jstests/concurrency/fsm_workloads/create_timeseries_collection.js - # Performs finds with $where, which are slow. Because 'orphanCleanupDelaySecs' is set to 1 second, - # this may cause the finds to return incomplete results. + # Because 'orphanCleanupDelaySecs' is set to 1 second, this may cause the finds to return + # incomplete results. + # TODO SERVER-77354: Allow the following tests to run in this suite after 'orphanCleanupDelaySecs' + # is increased. - jstests/concurrency/fsm_workloads/indexed_insert_where.js + - jstests/concurrency/fsm_workloads/agg_sort.js + + # TODO Undenylist (SERVER-71819). + - jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js exclude_with_any_tags: - does_not_support_causal_consistency @@ -119,12 +125,12 @@ executor: runningWithCausalConsistency: true runningWithBalancer: true hooks: + - class: CheckShardFilteringMetadata - class: CheckReplDBHashInBackground - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -140,7 +146,6 @@ executor: set_parameters: enableTestCommands: 1 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_clusterwide_ops_add_remove_shards.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_clusterwide_ops_add_remove_shards.yml index 7a7c7f24b7d8d..afee7a780574d 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_clusterwide_ops_add_remove_shards.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_clusterwide_ops_add_remove_shards.yml @@ -20,12 +20,12 @@ executor: tests: true config: {} hooks: + - class: CheckShardFilteringMetadata - class: CheckReplDBHashInBackground - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -41,7 +41,6 @@ executor: set_parameters: enableTestCommands: 1 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_initial_sync.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_initial_sync.yml index 972af5b6e69ec..f5c72ea9cff88 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_initial_sync.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_initial_sync.yml @@ -82,6 +82,7 @@ selector: # TODO Undenylist (SERVER-38852). - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js # serverStatus does not include transaction metrics on mongos. - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js @@ -124,6 +125,7 @@ selector: - jstests/concurrency/fsm_workloads/indexed_insert_upsert.js - jstests/concurrency/fsm_workloads/indexed_insert_where.js - jstests/concurrency/fsm_workloads/list_indexes.js + - jstests/concurrency/fsm_workloads/query_stats_concurrent.js - jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js - jstests/concurrency/fsm_workloads/reindex.js - jstests/concurrency/fsm_workloads/reindex_background.js @@ -158,6 +160,7 @@ selector: - jstests/concurrency/fsm_workloads/collmod_separate_collections.js - jstests/concurrency/fsm_workloads/collmod_writeconflict.js - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js - jstests/concurrency/fsm_workloads/invalidated_cursors.js - jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js - jstests/concurrency/fsm_workloads/view_catalog.js @@ -217,7 +220,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load("jstests/libs/override_methods/set_read_preference_secondary.js"); load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); global_vars: @@ -233,11 +236,11 @@ executor: - class: ContinuousInitialSync use_action_permitted_file: true sync_interval_secs: 15 + - class: CheckShardFilteringMetadata - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: CheckClusterIndexConsistency - class: ValidateCollections # Validation can interfere with other operations, so this goes last. shell_options: @@ -274,7 +277,6 @@ executor: enableTestCommands: 1 enableElectionHandoff: 0 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 collectionClonerBatchSize: 10 initialSyncOplogFetcherBatchSize: 10 queryAnalysisWriterIntervalSecs: 1 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_kill_primary_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_kill_primary_with_balancer.yml index 5e8f0258da127..13f5b7ebf1045 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_kill_primary_with_balancer.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_kill_primary_with_balancer.yml @@ -111,6 +111,7 @@ selector: - jstests/concurrency/fsm_workloads/indexed_insert_upsert.js - jstests/concurrency/fsm_workloads/indexed_insert_where.js - jstests/concurrency/fsm_workloads/list_indexes.js + - jstests/concurrency/fsm_workloads/query_stats_concurrent.js - jstests/concurrency/fsm_workloads/reindex.js - jstests/concurrency/fsm_workloads/reindex_background.js - jstests/concurrency/fsm_workloads/remove_multiple_documents.js @@ -139,6 +140,7 @@ selector: - jstests/concurrency/fsm_workloads/collmod.js - jstests/concurrency/fsm_workloads/collmod_separate_collections.js - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js - jstests/concurrency/fsm_workloads/invalidated_cursors.js - jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js - jstests/concurrency/fsm_workloads/view_catalog.js @@ -198,11 +200,11 @@ executor: shard_stepdown: true use_action_permitted_file: true kill: true + - class: CheckShardFilteringMetadata - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. shell_options: global_vars: @@ -237,7 +239,6 @@ executor: enableTestCommands: 1 enableElectionHandoff: 0 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn.yml index 0def4bb83c0d5..9cf3a2eae31a1 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn.yml @@ -81,6 +81,7 @@ selector: # TODO Undenylist (SERVER-38852). - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js # serverStatus does not include transaction metrics on mongos. - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js @@ -167,12 +168,12 @@ executor: traceExceptions: false hooks: + - class: CheckShardFilteringMetadata - class: CheckReplDBHashInBackground - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -189,7 +190,6 @@ executor: set_parameters: enableTestCommands: 1 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer.yml index 9010a3b3b2b99..da4f9be7d055a 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_local_read_write_multi_stmt_txn_with_balancer.yml @@ -81,6 +81,7 @@ selector: # TODO Undenylist (SERVER-38852). - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js # serverStatus does not include transaction metrics on mongos. - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js @@ -166,12 +167,12 @@ executor: traceExceptions: false runningWithBalancer: true hooks: + - class: CheckShardFilteringMetadata - class: CheckReplDBHashInBackground - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -187,7 +188,6 @@ executor: set_parameters: enableTestCommands: 1 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn.yml index c43514566b771..d5eaa754d3deb 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn.yml @@ -81,6 +81,7 @@ selector: # TODO Undenylist (SERVER-38852). - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js # serverStatus does not include transaction metrics on mongos. - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js @@ -157,12 +158,12 @@ executor: runningWithSessions: true traceExceptions: false hooks: + - class: CheckShardFilteringMetadata - class: CheckReplDBHashInBackground - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -179,7 +180,6 @@ executor: set_parameters: enableTestCommands: 1 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_kill_primary.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_kill_primary.yml index 63cdc532b5d27..ac5a47e830ad1 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_kill_primary.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_kill_primary.yml @@ -97,6 +97,7 @@ selector: # TODO Undenylist (SERVER-38852). - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js # serverStatus does not include transaction metrics on mongos. - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js @@ -153,6 +154,7 @@ selector: # Uses non retryable commands. - jstests/concurrency/fsm_workloads/agg_out.js - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js - jstests/concurrency/fsm_workloads/agg_sort.js - jstests/concurrency/fsm_workloads/collmod.js - jstests/concurrency/fsm_workloads/collmod_separate_collections.js @@ -179,6 +181,7 @@ selector: - jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js - jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js - jstests/concurrency/fsm_workloads/list_indexes.js + - jstests/concurrency/fsm_workloads/query_stats_concurrent.js # Uses non-retryable commands in the same state function as a command not supported in a # transaction. @@ -248,11 +251,11 @@ executor: shard_stepdown: true stepdown_interval_ms: 15000 use_action_permitted_file: true + - class: CheckShardFilteringMetadata - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. shell_options: global_vars: @@ -288,7 +291,6 @@ executor: enableTestCommands: 1 enableElectionHandoff: 0 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_terminate_primary.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_terminate_primary.yml index 73269fdedba1d..1f42d70170afa 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_terminate_primary.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_terminate_primary.yml @@ -97,6 +97,7 @@ selector: # TODO Undenylist (SERVER-38852). - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js # serverStatus does not include transaction metrics on mongos. - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js @@ -153,6 +154,7 @@ selector: # Uses non retryable commands. - jstests/concurrency/fsm_workloads/agg_out.js - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js - jstests/concurrency/fsm_workloads/agg_sort.js - jstests/concurrency/fsm_workloads/collmod.js - jstests/concurrency/fsm_workloads/collmod_separate_collections.js @@ -179,6 +181,7 @@ selector: - jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js - jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js - jstests/concurrency/fsm_workloads/list_indexes.js + - jstests/concurrency/fsm_workloads/query_stats_concurrent.js # Uses non-retryable commands in the same state function as a command not supported in a # transaction. @@ -248,11 +251,11 @@ executor: stepdown_interval_ms: 15000 terminate: true use_action_permitted_file: true + - class: CheckShardFilteringMetadata - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -283,7 +286,6 @@ executor: enableTestCommands: 1 enableElectionHandoff: 0 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_balancer.yml index 27fe0fb8a3a92..90756a7333006 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_balancer.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_balancer.yml @@ -81,6 +81,7 @@ selector: # TODO Undenylist (SERVER-38852). - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js # serverStatus does not include transaction metrics on mongos. - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js @@ -161,12 +162,12 @@ executor: traceExceptions: false runningWithBalancer: true hooks: + - class: CheckShardFilteringMetadata - class: CheckReplDBHashInBackground - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -182,7 +183,6 @@ executor: set_parameters: enableTestCommands: 1 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_stepdowns.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_stepdowns.yml index af46e1e0b76a4..6bb88b3f35fea 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_stepdowns.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_multi_stmt_txn_with_stepdowns.yml @@ -83,6 +83,7 @@ selector: # TODO Undenylist (SERVER-38852). - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js # serverStatus does not include transaction metrics on mongos. - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js @@ -135,6 +136,7 @@ selector: # Uses non retryable commands. - jstests/concurrency/fsm_workloads/agg_out.js - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js - jstests/concurrency/fsm_workloads/agg_sort.js - jstests/concurrency/fsm_workloads/collmod.js - jstests/concurrency/fsm_workloads/collmod_separate_collections.js @@ -220,11 +222,11 @@ executor: config_stepdown: true shard_stepdown: true use_action_permitted_file: true + - class: CheckShardFilteringMetadata - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -255,7 +257,6 @@ executor: enableTestCommands: 1 enableElectionHandoff: 0 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml index b629912ccb8e1..a3e8153c625dd 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication.yml @@ -74,6 +74,7 @@ selector: # TODO Undenylist (SERVER-38852). - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js # serverStatus does not include transaction metrics on mongos. - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js @@ -92,6 +93,9 @@ selector: # for dbCheck. TODO (SERVER-63951): Remove this exclusion. - jstests/concurrency/fsm_workloads/create_collection_and_view.js + # The test may spuriously fail when run against sharded clusters, due to limitations of the + # infrastructure. See SERVER-77039 for full details. + - jstests/concurrency/fsm_workloads/map_reduce_drop.js exclude_with_any_tags: - requires_replication @@ -116,6 +120,7 @@ executor: TestData: runningWithBalancer: false hooks: + - class: CheckShardFilteringMetadata # TODO (SERVER-63855): remove 'RunDBCheckInBackground' or put it back. # - class: RunDBCheckInBackground - class: CheckReplDBHashInBackground @@ -123,7 +128,6 @@ executor: - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -140,7 +144,6 @@ executor: set_parameters: enableTestCommands: 1 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml index 29eeda51de542..439067088ec05 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_replication_with_balancer.yml @@ -80,6 +80,7 @@ selector: # TODO Undenylist (SERVER-38852). - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js # serverStatus does not include transaction metrics on mongos. - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js @@ -94,6 +95,13 @@ selector: # Time-series collections are not supported on mongos. - jstests/concurrency/fsm_workloads/create_timeseries_collection.js + # The test may spuriously fail when run against sharded clusters, due to limitations of the + # infrastructure. See SERVER-77039 for full details. + - jstests/concurrency/fsm_workloads/map_reduce_drop.js + + # TODO Undenylist (SERVER-71819). + - jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js + exclude_with_any_tags: - assumes_balancer_off - requires_replication @@ -115,12 +123,12 @@ executor: TestData: runningWithBalancer: true hooks: + - class: CheckShardFilteringMetadata - class: CheckReplDBHashInBackground - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -136,7 +144,6 @@ executor: set_parameters: enableTestCommands: 1 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_terminate_primary_with_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_terminate_primary_with_balancer.yml index a239ce66d5534..3e02ef81ec9ab 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_terminate_primary_with_balancer.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_terminate_primary_with_balancer.yml @@ -111,6 +111,7 @@ selector: - jstests/concurrency/fsm_workloads/indexed_insert_upsert.js - jstests/concurrency/fsm_workloads/indexed_insert_where.js - jstests/concurrency/fsm_workloads/list_indexes.js + - jstests/concurrency/fsm_workloads/query_stats_concurrent.js - jstests/concurrency/fsm_workloads/reindex.js - jstests/concurrency/fsm_workloads/reindex_background.js - jstests/concurrency/fsm_workloads/remove_multiple_documents.js @@ -139,6 +140,7 @@ selector: - jstests/concurrency/fsm_workloads/collmod.js - jstests/concurrency/fsm_workloads/collmod_separate_collections.js - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js - jstests/concurrency/fsm_workloads/invalidated_cursors.js - jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js - jstests/concurrency/fsm_workloads/view_catalog.js @@ -198,11 +200,11 @@ executor: shard_stepdown: true use_action_permitted_file: true terminate: true + - class: CheckShardFilteringMetadata - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -232,7 +234,6 @@ executor: enableTestCommands: 1 enableElectionHandoff: 0 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_balancer_and_catalog_shard.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_balancer_and_catalog_shard.yml deleted file mode 100644 index 6a867b9629a9f..0000000000000 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_balancer_and_catalog_shard.yml +++ /dev/null @@ -1,148 +0,0 @@ -test_kind: fsm_workload_test - -selector: - roots: - - jstests/concurrency/fsm_workloads/**/*.js - - src/mongo/db/modules/*/jstests/concurrency/fsm_workloads/*.js - exclude_files: - # SERVER-13116 distinct isn't sharding aware - - jstests/concurrency/fsm_workloads/distinct.js - - jstests/concurrency/fsm_workloads/distinct_noindex.js - - jstests/concurrency/fsm_workloads/distinct_projection.js - - # SERVER-14669 Multi-removes that use $where miscount removed documents - - jstests/concurrency/fsm_workloads/remove_where.js - - # Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded - # collections'. This bug is problematic for these workloads because they assert on count() - # values: - - jstests/concurrency/fsm_workloads/agg_match.js - - # Disabled due to MongoDB restrictions and/or workload restrictions - - # These workloads sometimes trigger 'Could not lock auth data update lock' - # errors because the AuthorizationManager currently waits for only five - # seconds to acquire the lock for authorization documents - - jstests/concurrency/fsm_workloads/auth_create_role.js - - jstests/concurrency/fsm_workloads/auth_create_user.js - - jstests/concurrency/fsm_workloads/auth_drop_role.js - - jstests/concurrency/fsm_workloads/auth_drop_user.js - - # uses >100MB of data, which can overwhelm test hosts - - jstests/concurrency/fsm_workloads/agg_group_external.js - - jstests/concurrency/fsm_workloads/agg_sort_external.js - - # compact can only be run against a standalone mongod - - jstests/concurrency/fsm_workloads/compact.js - - jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js - - # convertToCapped can't be run on mongos processes - - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js - - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js - - # findAndModify requires a shard key - - jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js - - jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js - - jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js - - jstests/concurrency/fsm_workloads/findAndModify_update_queue.js - - jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js - - # remove cannot be {} for findAndModify - - jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js - - # can cause OOM kills on test hosts - - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js - - - # cannot createIndex after dropDatabase without sharding first - - jstests/concurrency/fsm_workloads/plan_cache_drop_database.js - - # reIndex is not supported in mongos. - - jstests/concurrency/fsm_workloads/reindex.js - - jstests/concurrency/fsm_workloads/reindex_background.js - - jstests/concurrency/fsm_workloads/reindex_writeconflict.js - - # The WTWriteConflictException failpoint is not supported on mongos. - - jstests/concurrency/fsm_workloads/collmod_writeconflict.js - - # our .remove(query, {justOne: true}) calls lack shard keys - - jstests/concurrency/fsm_workloads/remove_single_document.js - - # SERVER-20361 Improve the behaviour of multi-update/delete against a sharded collection - - jstests/concurrency/fsm_workloads/update_where.js - - # cannot use upsert command with $where with sharded collections - - jstests/concurrency/fsm_workloads/upsert_where.js - - # stagedebug can only be run against a standalone mongod - - jstests/concurrency/fsm_workloads/yield_and_hashed.js - - jstests/concurrency/fsm_workloads/yield_and_sorted.js - - # TODO Undenylist (SERVER-38852). - - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js - - # serverStatus does not include transaction metrics on mongos. - - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js - - # Uses the same transaction id across different routers, which is not allowed because when either - # router tries to commit, it may not know the full participant list. - - jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js - - # Inserts directly into system.views using applyOps, which is not available on mongos. - - jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js - - # Time-series collections are not supported on mongos. - - jstests/concurrency/fsm_workloads/create_timeseries_collection.js - - exclude_with_any_tags: - - assumes_balancer_off - - requires_replication - # mongos has no system.profile collection. - - requires_profiling - - assumes_unsharded_collection - # TODO SERVER-73279: Remove after branching 7.0 and a catalog shard can be downgraded. - - multiversion_incompatible - -executor: - archive: - hooks: - - CheckReplDBHashInBackground - - CheckReplDBHash - # TODO (SERVER-74534): Enable when this will work with co-located configsvr. - # - CheckMetadataConsistencyInBackground - - ValidateCollections - tests: true - config: - shell_options: - global_vars: - TestData: - runningWithBalancer: true - hooks: - - class: CheckReplDBHashInBackground - - class: CheckReplDBHash - # TODO (SERVER-74534): Enable when this will work with co-located configsvr. - # - class: CheckMetadataConsistencyInBackground - - class: CheckOrphansDeleted - - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - - class: CleanupConcurrencyWorkloads - fixture: - class: ShardedClusterFixture - catalog_shard: "any" - mongos_options: - set_parameters: - enableTestCommands: 1 - queryAnalysisSamplerConfigurationRefreshSecs: 1 - shard_options: - mongod_options: - oplogSize: 1024 - mongod_options: - set_parameters: - enableTestCommands: 1 - roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 - queryAnalysisWriterIntervalSecs: 1 - num_rs_nodes_per_shard: 3 - num_shards: 2 - num_mongos: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_balancer_and_config_shard.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_balancer_and_config_shard.yml new file mode 100644 index 0000000000000..2456f25d9b2a6 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_balancer_and_config_shard.yml @@ -0,0 +1,147 @@ +test_kind: fsm_workload_test + +selector: + roots: + - jstests/concurrency/fsm_workloads/**/*.js + - src/mongo/db/modules/*/jstests/concurrency/fsm_workloads/*.js + exclude_files: + # SERVER-13116 distinct isn't sharding aware + - jstests/concurrency/fsm_workloads/distinct.js + - jstests/concurrency/fsm_workloads/distinct_noindex.js + - jstests/concurrency/fsm_workloads/distinct_projection.js + + # SERVER-14669 Multi-removes that use $where miscount removed documents + - jstests/concurrency/fsm_workloads/remove_where.js + + # Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded + # collections'. This bug is problematic for these workloads because they assert on count() + # values: + - jstests/concurrency/fsm_workloads/agg_match.js + + # Disabled due to MongoDB restrictions and/or workload restrictions + + # These workloads sometimes trigger 'Could not lock auth data update lock' + # errors because the AuthorizationManager currently waits for only five + # seconds to acquire the lock for authorization documents + - jstests/concurrency/fsm_workloads/auth_create_role.js + - jstests/concurrency/fsm_workloads/auth_create_user.js + - jstests/concurrency/fsm_workloads/auth_drop_role.js + - jstests/concurrency/fsm_workloads/auth_drop_user.js + + # uses >100MB of data, which can overwhelm test hosts + - jstests/concurrency/fsm_workloads/agg_group_external.js + - jstests/concurrency/fsm_workloads/agg_sort_external.js + + # compact can only be run against a standalone mongod + - jstests/concurrency/fsm_workloads/compact.js + - jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js + + # convertToCapped can't be run on mongos processes + - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js + - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js + + # findAndModify requires a shard key + - jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js + - jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js + - jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js + - jstests/concurrency/fsm_workloads/findAndModify_update_queue.js + - jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js + + # remove cannot be {} for findAndModify + - jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js + + # can cause OOM kills on test hosts + - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js + + + # cannot createIndex after dropDatabase without sharding first + - jstests/concurrency/fsm_workloads/plan_cache_drop_database.js + + # reIndex is not supported in mongos. + - jstests/concurrency/fsm_workloads/reindex.js + - jstests/concurrency/fsm_workloads/reindex_background.js + - jstests/concurrency/fsm_workloads/reindex_writeconflict.js + + # The WTWriteConflictException failpoint is not supported on mongos. + - jstests/concurrency/fsm_workloads/collmod_writeconflict.js + + # our .remove(query, {justOne: true}) calls lack shard keys + - jstests/concurrency/fsm_workloads/remove_single_document.js + + # SERVER-20361 Improve the behaviour of multi-update/delete against a sharded collection + - jstests/concurrency/fsm_workloads/update_where.js + + # cannot use upsert command with $where with sharded collections + - jstests/concurrency/fsm_workloads/upsert_where.js + + # stagedebug can only be run against a standalone mongod + - jstests/concurrency/fsm_workloads/yield_and_hashed.js + - jstests/concurrency/fsm_workloads/yield_and_sorted.js + + # TODO Undenylist (SERVER-38852). + - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js + + # serverStatus does not include transaction metrics on mongos. + - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js + + # Uses the same transaction id across different routers, which is not allowed because when either + # router tries to commit, it may not know the full participant list. + - jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js + + # Inserts directly into system.views using applyOps, which is not available on mongos. + - jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js + + # Time-series collections are not supported on mongos. + - jstests/concurrency/fsm_workloads/create_timeseries_collection.js + + # TODO Undenylist (SERVER-71819). + - jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js + + exclude_with_any_tags: + - assumes_balancer_off + - requires_replication + # mongos has no system.profile collection. + - requires_profiling + - assumes_unsharded_collection + +executor: + archive: + hooks: + - CheckReplDBHashInBackground + - CheckReplDBHash + - CheckMetadataConsistencyInBackground + - ValidateCollections + tests: true + config: + shell_options: + global_vars: + TestData: + runningWithBalancer: true + hooks: + - class: CheckShardFilteringMetadata + - class: CheckReplDBHashInBackground + - class: CheckReplDBHash + - class: CheckMetadataConsistencyInBackground + - class: CheckOrphansDeleted + - class: CheckRoutingTableConsistency + - class: ValidateCollections # Validation can interfere with other operations, so this goes last. + - class: CleanupConcurrencyWorkloads + fixture: + class: ShardedClusterFixture + config_shard: "any" + mongos_options: + set_parameters: + enableTestCommands: 1 + queryAnalysisSamplerConfigurationRefreshSecs: 1 + shard_options: + mongod_options: + oplogSize: 1024 + mongod_options: + set_parameters: + enableTestCommands: 1 + roleGraphInvalidationIsFatal: 1 + queryAnalysisWriterIntervalSecs: 1 + num_rs_nodes_per_shard: 3 + num_shards: 2 + num_mongos: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_catalog_shard.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_catalog_shard.yml deleted file mode 100644 index d087bd29819ad..0000000000000 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_catalog_shard.yml +++ /dev/null @@ -1,152 +0,0 @@ -test_kind: fsm_workload_test - -selector: - roots: - - jstests/concurrency/fsm_workloads/**/*.js - - src/mongo/db/modules/*/jstests/concurrency/fsm_workloads/*.js - exclude_files: - # SERVER-13116 distinct isn't sharding aware - - jstests/concurrency/fsm_workloads/distinct.js - - jstests/concurrency/fsm_workloads/distinct_noindex.js - - jstests/concurrency/fsm_workloads/distinct_projection.js - - # Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded - # collections'. This bug is problematic for these workloads because they assert on count() - # values: - - jstests/concurrency/fsm_workloads/agg_match.js - - # Disabled due to MongoDB restrictions and/or workload restrictions - - # These workloads sometimes trigger 'Could not lock auth data update lock' - # errors because the AuthorizationManager currently waits for only five - # seconds to acquire the lock for authorization documents - - jstests/concurrency/fsm_workloads/auth_create_role.js - - jstests/concurrency/fsm_workloads/auth_create_user.js - - jstests/concurrency/fsm_workloads/auth_drop_role.js - - jstests/concurrency/fsm_workloads/auth_drop_user.js - - # uses >100MB of data, which can overwhelm test hosts - - jstests/concurrency/fsm_workloads/agg_group_external.js - - jstests/concurrency/fsm_workloads/agg_sort_external.js - - # compact can only be run against a standalone mongod - - jstests/concurrency/fsm_workloads/compact.js - - jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js - - # convertToCapped can't be run on mongos processes - - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js - - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js - - # findAndModify requires a shard key - - jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js - - jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js - - jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js - - jstests/concurrency/fsm_workloads/findAndModify_update_queue.js - - jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js - - # remove cannot be {} for findAndModify - - jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js - - # can cause OOM kills on test hosts - - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js - - - # cannot createIndex after dropDatabase without sharding first - - jstests/concurrency/fsm_workloads/plan_cache_drop_database.js - - # reIndex is not supported in mongos. - - jstests/concurrency/fsm_workloads/reindex.js - - jstests/concurrency/fsm_workloads/reindex_background.js - - jstests/concurrency/fsm_workloads/reindex_writeconflict.js - - # The WTWriteConflictException failpoint is not supported on mongos. - - jstests/concurrency/fsm_workloads/collmod_writeconflict.js - - # our .remove(query, {justOne: true}) calls lack shard keys - - jstests/concurrency/fsm_workloads/remove_single_document.js - - # cannot use upsert command with $where with sharded collections - - jstests/concurrency/fsm_workloads/upsert_where.js - - # stagedebug can only be run against a standalone mongod - - jstests/concurrency/fsm_workloads/yield_and_hashed.js - - jstests/concurrency/fsm_workloads/yield_and_sorted.js - - # TODO Undenylist (SERVER-38852). - - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js - - # serverStatus does not include transaction metrics on mongos. - - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js - - # Uses the same transaction id across different routers, which is not allowed because when either - # router tries to commit, it may not know the full participant list. - - jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js - - # Inserts directly into system.views using applyOps, which is not available on mongos. - - jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js - - # Time-series collections are not supported on mongos. - - jstests/concurrency/fsm_workloads/create_timeseries_collection.js - - # This test concurrently creates views and collections on the same namespace, which causes issues - # for dbCheck. TODO (SERVER-63951): Remove this exclusion. - - jstests/concurrency/fsm_workloads/create_collection_and_view.js - - - exclude_with_any_tags: - - requires_replication - - assumes_balancer_on - # mongos has no system.profile collection. - - requires_profiling - - assumes_unsharded_collection - # TODO SERVER-73279: Remove after branching 7.0 and a catalog shard can be downgraded. - - multiversion_incompatible - -executor: - archive: - hooks: - # TODO (SERVER-63855): remove 'RunDBCheckInBackground' or put it back. - # - RunDBCheckInBackground - - CheckReplDBHashInBackground - - CheckReplDBHash - # TODO (SERVER-74534): Enable when this will work with co-located configsvr. - # - CheckMetadataConsistencyInBackground - - ValidateCollections - tests: true - config: - shell_options: - global_vars: - TestData: - runningWithBalancer: false - hooks: - # TODO (SERVER-63855): remove 'RunDBCheckInBackground' or put it back. - # - class: RunDBCheckInBackground - - class: CheckReplDBHashInBackground - - class: CheckReplDBHash - # TODO (SERVER-74534): Enable when this will work with co-located configsvr. - # - class: CheckMetadataConsistencyInBackground - - class: CheckOrphansDeleted - - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - - class: CleanupConcurrencyWorkloads - fixture: - class: ShardedClusterFixture - catalog_shard: "any" - enable_balancer: false - mongos_options: - set_parameters: - enableTestCommands: 1 - queryAnalysisSamplerConfigurationRefreshSecs: 1 - shard_options: - mongod_options: - oplogSize: 1024 - mongod_options: - set_parameters: - enableTestCommands: 1 - roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 - queryAnalysisWriterIntervalSecs: 1 - num_rs_nodes_per_shard: 3 - num_shards: 2 - num_mongos: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_config_shard.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_config_shard.yml new file mode 100644 index 0000000000000..fb8c1d97f0637 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_config_shard.yml @@ -0,0 +1,148 @@ +test_kind: fsm_workload_test + +selector: + roots: + - jstests/concurrency/fsm_workloads/**/*.js + - src/mongo/db/modules/*/jstests/concurrency/fsm_workloads/*.js + exclude_files: + # SERVER-13116 distinct isn't sharding aware + - jstests/concurrency/fsm_workloads/distinct.js + - jstests/concurrency/fsm_workloads/distinct_noindex.js + - jstests/concurrency/fsm_workloads/distinct_projection.js + + # Disabled due to SERVER-33753, '.count() without a predicate can be wrong on sharded + # collections'. This bug is problematic for these workloads because they assert on count() + # values: + - jstests/concurrency/fsm_workloads/agg_match.js + + # Disabled due to MongoDB restrictions and/or workload restrictions + + # These workloads sometimes trigger 'Could not lock auth data update lock' + # errors because the AuthorizationManager currently waits for only five + # seconds to acquire the lock for authorization documents + - jstests/concurrency/fsm_workloads/auth_create_role.js + - jstests/concurrency/fsm_workloads/auth_create_user.js + - jstests/concurrency/fsm_workloads/auth_drop_role.js + - jstests/concurrency/fsm_workloads/auth_drop_user.js + + # uses >100MB of data, which can overwhelm test hosts + - jstests/concurrency/fsm_workloads/agg_group_external.js + - jstests/concurrency/fsm_workloads/agg_sort_external.js + + # compact can only be run against a standalone mongod + - jstests/concurrency/fsm_workloads/compact.js + - jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js + + # convertToCapped can't be run on mongos processes + - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js + - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js + + # findAndModify requires a shard key + - jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js + - jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js + - jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js + - jstests/concurrency/fsm_workloads/findAndModify_update_queue.js + - jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js + + # remove cannot be {} for findAndModify + - jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js + + # can cause OOM kills on test hosts + - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js + + + # cannot createIndex after dropDatabase without sharding first + - jstests/concurrency/fsm_workloads/plan_cache_drop_database.js + + # reIndex is not supported in mongos. + - jstests/concurrency/fsm_workloads/reindex.js + - jstests/concurrency/fsm_workloads/reindex_background.js + - jstests/concurrency/fsm_workloads/reindex_writeconflict.js + + # The WTWriteConflictException failpoint is not supported on mongos. + - jstests/concurrency/fsm_workloads/collmod_writeconflict.js + + # our .remove(query, {justOne: true}) calls lack shard keys + - jstests/concurrency/fsm_workloads/remove_single_document.js + + # cannot use upsert command with $where with sharded collections + - jstests/concurrency/fsm_workloads/upsert_where.js + + # stagedebug can only be run against a standalone mongod + - jstests/concurrency/fsm_workloads/yield_and_hashed.js + - jstests/concurrency/fsm_workloads/yield_and_sorted.js + + # TODO Undenylist (SERVER-38852). + - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js + + # serverStatus does not include transaction metrics on mongos. + - jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js + + # Uses the same transaction id across different routers, which is not allowed because when either + # router tries to commit, it may not know the full participant list. + - jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js + + # Inserts directly into system.views using applyOps, which is not available on mongos. + - jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js + + # Time-series collections are not supported on mongos. + - jstests/concurrency/fsm_workloads/create_timeseries_collection.js + + # This test concurrently creates views and collections on the same namespace, which causes issues + # for dbCheck. TODO (SERVER-63951): Remove this exclusion. + - jstests/concurrency/fsm_workloads/create_collection_and_view.js + + + exclude_with_any_tags: + - requires_replication + - assumes_balancer_on + # mongos has no system.profile collection. + - requires_profiling + - assumes_unsharded_collection + +executor: + archive: + hooks: + # TODO (SERVER-63855): remove 'RunDBCheckInBackground' or put it back. + # - RunDBCheckInBackground + - CheckReplDBHashInBackground + - CheckReplDBHash + - CheckMetadataConsistencyInBackground + - ValidateCollections + tests: true + config: + shell_options: + global_vars: + TestData: + runningWithBalancer: false + hooks: + - class: CheckShardFilteringMetadata + # TODO (SERVER-63855): remove 'RunDBCheckInBackground' or put it back. + # - class: RunDBCheckInBackground + - class: CheckReplDBHashInBackground + - class: CheckReplDBHash + - class: CheckMetadataConsistencyInBackground + - class: CheckOrphansDeleted + - class: CheckRoutingTableConsistency + - class: ValidateCollections # Validation can interfere with other operations, so this goes last. + - class: CleanupConcurrencyWorkloads + fixture: + class: ShardedClusterFixture + config_shard: "any" + enable_balancer: false + mongos_options: + set_parameters: + enableTestCommands: 1 + queryAnalysisSamplerConfigurationRefreshSecs: 1 + shard_options: + mongod_options: + oplogSize: 1024 + mongod_options: + set_parameters: + enableTestCommands: 1 + roleGraphInvalidationIsFatal: 1 + queryAnalysisWriterIntervalSecs: 1 + num_rs_nodes_per_shard: 3 + num_shards: 2 + num_mongos: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns.yml index 060071fed1c24..6579a095d8654 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns.yml @@ -97,6 +97,7 @@ selector: - jstests/concurrency/fsm_workloads/indexed_insert_upsert.js - jstests/concurrency/fsm_workloads/indexed_insert_where.js - jstests/concurrency/fsm_workloads/list_indexes.js + - jstests/concurrency/fsm_workloads/query_stats_concurrent.js - jstests/concurrency/fsm_workloads/reindex.js - jstests/concurrency/fsm_workloads/reindex_background.js - jstests/concurrency/fsm_workloads/reindex_writeconflict.js @@ -128,6 +129,7 @@ selector: - jstests/concurrency/fsm_workloads/collmod_separate_collections.js - jstests/concurrency/fsm_workloads/collmod_writeconflict.js - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js - jstests/concurrency/fsm_workloads/invalidated_cursors.js - jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js - jstests/concurrency/fsm_workloads/view_catalog.js @@ -154,6 +156,10 @@ selector: # Time-series collections are not supported on mongos. - jstests/concurrency/fsm_workloads/create_timeseries_collection.js + # The test may spuriously fail when run against sharded clusters, due to limitations of the + # infrastructure. See SERVER-77039 for full details. + - jstests/concurrency/fsm_workloads/map_reduce_drop.js + exclude_with_any_tags: - requires_replication - requires_non_retryable_writes @@ -185,11 +191,11 @@ executor: config_stepdown: true shard_stepdown: true use_action_permitted_file: true + - class: CheckShardFilteringMetadata - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -220,7 +226,6 @@ executor: enableTestCommands: 1 enableElectionHandoff: 0 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns_and_balancer.yml b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns_and_balancer.yml index 027380c37b08b..0d869984fc430 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns_and_balancer.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_sharded_with_stepdowns_and_balancer.yml @@ -103,6 +103,7 @@ selector: - jstests/concurrency/fsm_workloads/indexed_insert_upsert.js - jstests/concurrency/fsm_workloads/indexed_insert_where.js - jstests/concurrency/fsm_workloads/list_indexes.js + - jstests/concurrency/fsm_workloads/query_stats_concurrent.js - jstests/concurrency/fsm_workloads/reindex.js - jstests/concurrency/fsm_workloads/reindex_background.js - jstests/concurrency/fsm_workloads/reindex_writeconflict.js @@ -133,6 +134,7 @@ selector: - jstests/concurrency/fsm_workloads/collmod_separate_collections.js - jstests/concurrency/fsm_workloads/collmod_writeconflict.js - jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js + - jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js - jstests/concurrency/fsm_workloads/invalidated_cursors.js - jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js - jstests/concurrency/fsm_workloads/view_catalog.js @@ -159,6 +161,10 @@ selector: # Time-series collections are not supported on mongos. - jstests/concurrency/fsm_workloads/create_timeseries_collection.js + # The test may spuriously fail when run against sharded clusters, due to limitations of the + # infrastructure. See SERVER-77039 for full details. + - jstests/concurrency/fsm_workloads/map_reduce_drop.js + exclude_with_any_tags: - assumes_balancer_off - requires_replication @@ -190,11 +196,11 @@ executor: config_stepdown: true shard_stepdown: true use_action_permitted_file: true + - class: CheckShardFilteringMetadata - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: CheckOrphansDeleted - class: CheckRoutingTableConsistency - - class: CheckShardFilteringMetadata - class: ValidateCollections # Validation can interfere with other operations, so this goes last. - class: CleanupConcurrencyWorkloads fixture: @@ -224,7 +230,6 @@ executor: enableTestCommands: 1 enableElectionHandoff: 0 roleGraphInvalidationIsFatal: 1 - receiveChunkWaitForRangeDeleterTimeoutMS: 90000 queryAnalysisWriterIntervalSecs: 1 num_rs_nodes_per_shard: 3 num_shards: 2 diff --git a/buildscripts/resmokeconfig/suites/concurrency_simultaneous.yml b/buildscripts/resmokeconfig/suites/concurrency_simultaneous.yml index 379505a99ea12..f5044cb0c3fbf 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_simultaneous.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_simultaneous.yml @@ -69,6 +69,6 @@ executor: # system running out of WiredTiger write tickets. We intentionally lower the number of # WiredTiger write tickets available to below the maximum number of database clients to # trigger this situation at least some of the time. - storageEngineConcurrencyAdjustmentAlgorithm: "" + storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions" wiredTigerConcurrentWriteTransactions: 64 roleGraphInvalidationIsFatal: 1 diff --git a/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication.yml b/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication.yml index 7affb118495ce..c711806ffa9c3 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication.yml @@ -94,7 +94,7 @@ executor: # system running out of WiredTiger write tickets. We intentionally lower the number of # WiredTiger write tickets available to below the maximum number of database clients to # trigger this situation at least some of the time. - storageEngineConcurrencyAdjustmentAlgorithm: "" + storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions" wiredTigerConcurrentWriteTransactions: 64 roleGraphInvalidationIsFatal: 1 num_nodes: 3 diff --git a/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_cursor_sweeps.yml b/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_cursor_sweeps.yml index 7fb977e6296f3..9853e9f22b632 100644 --- a/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_cursor_sweeps.yml +++ b/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_cursor_sweeps.yml @@ -97,7 +97,7 @@ executor: # system running out of WiredTiger write tickets. We intentionally lower the number of # WiredTiger write tickets available to below the maximum number of database clients to # trigger this situation at least some of the time. - storageEngineConcurrencyAdjustmentAlgorithm: "" + storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions" wiredTigerConcurrentWriteTransactions: 64 # Setting this parameter to "1" disables cursor caching in WiredTiger, and sets the cache # size to "1" in MongoDB. This forces all resources to be released when done. diff --git a/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_eviction_debug.yml b/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_eviction_debug.yml deleted file mode 100644 index 12d9de079df3c..0000000000000 --- a/buildscripts/resmokeconfig/suites/concurrency_simultaneous_replication_wiredtiger_eviction_debug.yml +++ /dev/null @@ -1,99 +0,0 @@ -test_kind: parallel_fsm_workload_test - -selector: - roots: - - jstests/concurrency/fsm_workloads/**/*.js - exclude_files: - # These workloads implicitly assume that their tid ranges are [0, $config.threadCount). This - # isn't guaranteed to be true when they are run in parallel with other workloads. - - jstests/concurrency/fsm_workloads/findAndModify_inc_pipeline.js - - jstests/concurrency/fsm_workloads/list_indexes.js - - jstests/concurrency/fsm_workloads/secondary_reads.js - - jstests/concurrency/fsm_workloads/update_inc_capped.js - - jstests/concurrency/fsm_workloads/update_inc_pipeline.js - # These workloads implicitly assume that their tid ranges are [0, $config.threadCount), as above, - # but additionally require multiple threads to run, which also isn't guaranteed here. - - jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js - - jstests/concurrency/fsm_workloads/create_index_background_wildcard.js - - jstests/concurrency/fsm_workloads/create_index_background.js - - # These workloads uses >100MB of data, which can overwhelm test hosts. - - jstests/concurrency/fsm_workloads/agg_group_external.js - - jstests/concurrency/fsm_workloads/agg_sort_external.js - # The findAndModify_update_grow.js workload can cause OOM kills on test hosts. - - jstests/concurrency/fsm_workloads/findAndModify_update_grow.js - - # convertToCapped requires a global lock and any background operations on the database causes it - # to fail due to not finishing quickly enough. - - jstests/concurrency/fsm_workloads/convert_to_capped_collection.js - - jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js - - # This workload kills random cursors which takes a collection lock. - # TODO: SERVER-39939. - - jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js - - # This workload may restart running transactions on a different client, causing deadlock if - # there is a concurrent dropDatabase waiting for the global X lock. - # TODO: SERVER-37876 - - jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js - - # This workload assumes no locks are taken outside of the workload. - - jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js - - # SERVER-43053 These workloads set a failpoint that causes intermittent WriteConflict errors, - # which presently can cause other simultaneous workloads to fail. - - jstests/concurrency/fsm_workloads/collmod_writeconflict.js - - # These workloads run the compact command, which takes the checkpoint mutex, thus slowing - # checkpoints. This suite also makes checkpoints slower, and the combination can result in - # timeouts. - - jstests/concurrency/fsm_workloads/compact.js - - jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js - - jstests/concurrency/fsm_workloads/reindex_writeconflict.js - - # These workloads run the reIndex command, which is only allowed on a standalone node. - - jstests/concurrency/fsm_workloads/reindex.js - - jstests/concurrency/fsm_workloads/reindex_background.js - - exclude_with_any_tags: - - requires_sharding - - kills_random_sessions - - incompatible_with_concurrency_simultaneous - - group_size: 10 - group_count_multiplier: 1.0 - -executor: - archive: - hooks: - - CheckReplDBHashInBackground - - CheckReplDBHash - - ValidateCollections - tests: true - config: - shell_options: - global_vars: - TestData: - skipDropDatabaseOnDatabaseDropPending: true - setShellParameter: skipShellCursorFinalize=true - hooks: - # The CheckReplDBHash hook waits until all operations have replicated to and have been applied - # on the secondaries, so we run the ValidateCollections hook after it to ensure we're - # validating the entire contents of the collection. - - class: CheckReplDBHashInBackground - - class: CheckReplDBHash - - class: ValidateCollections - - class: CleanupConcurrencyWorkloads - fixture: - class: ReplicaSetFixture - mongod_options: - oplogSize: 1024 - set_parameters: - # Increase the timeout of the cursor so that the cursor will continue to stay alive even - # when there is a delay in lock acquisition during a getMore command. - cursorTimeoutMillis: 3600000 - enableTestCommands: 1 - # Enable aggressive WiredTiger eviction. - wiredTigerEvictionDebugMode: true - roleGraphInvalidationIsFatal: 1 - num_nodes: 3 diff --git a/buildscripts/resmokeconfig/suites/core.yml b/buildscripts/resmokeconfig/suites/core.yml index 23a08a1b47dae..23f5f30a7ae23 100644 --- a/buildscripts/resmokeconfig/suites/core.yml +++ b/buildscripts/resmokeconfig/suites/core.yml @@ -8,6 +8,8 @@ selector: # Transactions are not supported on MongoDB standalone nodes, so we do not run these tests in the # 'core' suite. Instead we run them against a 1-node replica set in the 'core_txns' suite. - jstests/core/txns/**/*.js + # Queryable encryption is not supported on standalone + - jstests/core/queryable_encryption/**/*.js executor: archive: hooks: diff --git a/buildscripts/resmokeconfig/suites/core_auth.yml b/buildscripts/resmokeconfig/suites/core_auth.yml index f416c77fa0b5f..5359129be969a 100644 --- a/buildscripts/resmokeconfig/suites/core_auth.yml +++ b/buildscripts/resmokeconfig/suites/core_auth.yml @@ -20,6 +20,8 @@ selector: - jstests/core/**/*[aA]uth*.js # Commands using UUIDs are not compatible with name-based auth - jstests/core/**/commands_with_uuid.js + # Queryable encryption is not supported on standalone + - jstests/core/queryable_encryption/**/*.js exclude_with_any_tags: # Multiple users cannot be authenticated on one connection within a session. - creates_and_authenticates_user diff --git a/buildscripts/resmokeconfig/suites/core_column_store_indexes.yml b/buildscripts/resmokeconfig/suites/core_column_store_indexes.yml index ccef09e4a248e..b5fba6c7f39a4 100644 --- a/buildscripts/resmokeconfig/suites/core_column_store_indexes.yml +++ b/buildscripts/resmokeconfig/suites/core_column_store_indexes.yml @@ -29,7 +29,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load("jstests/libs/override_methods/hide_column_store_indexes_from_get_indexes.js"); hooks: - class: ValidateCollections diff --git a/buildscripts/resmokeconfig/suites/core_ese.yml b/buildscripts/resmokeconfig/suites/core_ese.yml index 355dba8f6b6b9..114d6e6a7e712 100644 --- a/buildscripts/resmokeconfig/suites/core_ese.yml +++ b/buildscripts/resmokeconfig/suites/core_ese.yml @@ -10,6 +10,9 @@ selector: exclude_files: # Transactions are not supported on MongoDB standalone nodes. - jstests/core/txns/**/*.js + # Queryable encryption is not supported on standalone + - jstests/core/queryable_encryption/**/*.js + exclude_with_any_tags: - does_not_support_encrypted_storage_engine executor: diff --git a/buildscripts/resmokeconfig/suites/core_txns.yml b/buildscripts/resmokeconfig/suites/core_txns.yml index c396fd7027b2b..a6d73f5785f0c 100644 --- a/buildscripts/resmokeconfig/suites/core_txns.yml +++ b/buildscripts/resmokeconfig/suites/core_txns.yml @@ -19,7 +19,7 @@ executor: - ValidateCollections config: shell_options: - eval: "testingReplication = true;" + eval: "globalThis.testingReplication = true;" hooks: # We don't execute dbHash or oplog consistency checks since there is only a single replica set # node. diff --git a/buildscripts/resmokeconfig/suites/core_txns_large_txns_format.yml b/buildscripts/resmokeconfig/suites/core_txns_large_txns_format.yml index a0121a51f3501..e3d5a3be14a3d 100644 --- a/buildscripts/resmokeconfig/suites/core_txns_large_txns_format.yml +++ b/buildscripts/resmokeconfig/suites/core_txns_large_txns_format.yml @@ -20,7 +20,7 @@ executor: - ValidateCollections config: shell_options: - eval: "testingReplication = true;" + eval: "globalThis.testingReplication = true;" hooks: # We don't execute dbHash or oplog consistency checks since there is only a single replica set # node. diff --git a/buildscripts/resmokeconfig/suites/core_wildcard_indexes.yml b/buildscripts/resmokeconfig/suites/core_wildcard_indexes.yml index f13dc111743fa..29342de466d1a 100644 --- a/buildscripts/resmokeconfig/suites/core_wildcard_indexes.yml +++ b/buildscripts/resmokeconfig/suites/core_wildcard_indexes.yml @@ -30,6 +30,9 @@ selector: - jstests/core/index/wildcard/compound_wildcard_index_hint.js # This test expects a certain number of indexes at the start. - jstests/core/administrative/check_shard_index.js + # This test checks explain, and creating an implicit index results in the following failure: + # "IndexOptionsConflict" that generates an unexpected plan. + - jstests/core/query/or_use_clustered_collection.js # Latency histogram statistics are affected by the creation of an implicit index. - jstests/core/top.js # Creating an implicit index results in the following failure: "add index fails, too many indexes @@ -58,7 +61,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load("jstests/libs/override_methods/implicit_wildcard_indexes.js"); hooks: - class: ValidateCollections diff --git a/buildscripts/resmokeconfig/suites/cqf.yml b/buildscripts/resmokeconfig/suites/cqf.yml index 67a5281be5c1e..71718dc170413 100644 --- a/buildscripts/resmokeconfig/suites/cqf.yml +++ b/buildscripts/resmokeconfig/suites/cqf.yml @@ -30,4 +30,5 @@ executor: set_parameters: enableTestCommands: 1 featureFlagCommonQueryFramework: true + internalQueryCardinalityEstimatorMode: "sampling" internalQueryFrameworkControl: "forceBonsai" diff --git a/buildscripts/resmokeconfig/suites/cqf_disabled_pipeline_opt.yml b/buildscripts/resmokeconfig/suites/cqf_disabled_pipeline_opt.yml index 3dc5fbb67ee72..df27b8b9511ae 100644 --- a/buildscripts/resmokeconfig/suites/cqf_disabled_pipeline_opt.yml +++ b/buildscripts/resmokeconfig/suites/cqf_disabled_pipeline_opt.yml @@ -33,7 +33,7 @@ executor: set_parameters: enableTestCommands: 1 featureFlagCommonQueryFramework: true - # This flag disables the fallback path that may hide bugs in CQF. + internalQueryCardinalityEstimatorMode: "sampling" internalQueryFrameworkControl: "forceBonsai" failpoint.disablePipelineOptimization: mode: alwaysOn diff --git a/buildscripts/resmokeconfig/suites/cqf_experimental_aggregation_passthrough.yml b/buildscripts/resmokeconfig/suites/cqf_experimental_aggregation_passthrough.yml new file mode 100644 index 0000000000000..df0e8f7d7c9b6 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/cqf_experimental_aggregation_passthrough.yml @@ -0,0 +1,32 @@ +# This is equivalent to the aggregation suite, but runs with experimental CQF features enabled. + +test_kind: js_test + +selector: + roots: + - jstests/aggregation/**/*.js + exclude_files: + - jstests/aggregation/extras/*.js + - jstests/aggregation/data/*.js + exclude_with_any_tags: + - cqf_experimental_incompatible + - cqf_incompatible + +executor: + archive: + hooks: + - ValidateCollections + config: + shell_options: + eval: | + load("jstests/libs/override_methods/detect_spawning_own_mongod.js"); + load("jstests/libs/set_try_bonsai_experimental.js"); + hooks: + - class: ValidateCollections + - class: CleanEveryN + n: 20 + fixture: + class: MongoDFixture + mongod_options: + set_parameters: + enableTestCommands: 1 diff --git a/buildscripts/resmokeconfig/suites/cqf_experimental_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/cqf_experimental_jscore_passthrough.yml new file mode 100644 index 0000000000000..538506c7ab296 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/cqf_experimental_jscore_passthrough.yml @@ -0,0 +1,42 @@ +# This is equivalent to the core suite, but runs with experimental CQF features enabled. + +test_kind: js_test + +selector: + roots: + - jstests/core/**/*.js + - jstests/core_standalone/**/*.js + exclude_files: + # Transactions are not supported on MongoDB standalone nodes, so we do not run these tests in the + # 'core' suite. Instead we run them against a 1-node replica set in the 'core_txns' suite. + - jstests/core/txns/**/*.js + # Queryable encryption is not supported on standalone + - jstests/core/queryable_encryption/**/*.js + exclude_with_any_tags: + - cqf_experimental_incompatible + - cqf_incompatible + +executor: + archive: + hooks: + - ValidateCollections + config: + shell_options: + crashOnInvalidBSONError: "" + objcheck: "" + eval: | + load("jstests/libs/override_methods/detect_spawning_own_mongod.js"); + load("jstests/libs/set_try_bonsai_experimental.js"); + hooks: + - class: ValidateCollections + shell_options: + global_vars: + TestData: + skipValidationOnNamespaceNotFound: false + - class: CleanEveryN + n: 20 + fixture: + class: MongoDFixture + mongod_options: + set_parameters: + enableTestCommands: 1 diff --git a/buildscripts/resmokeconfig/suites/cqf_experimental_no_passthrough.yml b/buildscripts/resmokeconfig/suites/cqf_experimental_no_passthrough.yml new file mode 100644 index 0000000000000..1b1aa6785d6b2 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/cqf_experimental_no_passthrough.yml @@ -0,0 +1,33 @@ +# This is equivalent to the noPassthrough suite, but runs with experimental CQF features enabled. + +test_kind: js_test + +selector: + roots: + - jstests/noPassthrough/**/*.js + - src/mongo/db/modules/*/jstests/hot_backups/*.js + - src/mongo/db/modules/*/jstests/live_import/*.js + - src/mongo/db/modules/*/jstests/no_passthrough/*.js + + # Self-tests for the Concurrency testing framework are run as part of this test suite. + - jstests/concurrency/*.js + exclude_files: + - jstests/noPassthrough/libs/*.js + # Disable inmem_full as per SERVER-27014 + - jstests/noPassthrough/inmem_full.js + exclude_with_any_tags: + - cqf_experimental_incompatible + - cqf_incompatible + +# noPassthrough tests start their own mongod's. +executor: + archive: + tests: + - jstests/noPassthrough/backup*.js + - jstests/noPassthrough/oplog_writes_only_permitted_on_standalone.js + - jstests/noPassthrough/wt_unclean_shutdown.js + - src/mongo/db/modules/enterprise/jstests/hot_backups/*.js + config: + shell_options: + nodb: '' + eval: load("jstests/libs/set_try_bonsai_experimental.js"); diff --git a/buildscripts/resmokeconfig/suites/cqf_parallel.yml b/buildscripts/resmokeconfig/suites/cqf_parallel.yml index 9c8456853c6b9..bed4f702a8359 100644 --- a/buildscripts/resmokeconfig/suites/cqf_parallel.yml +++ b/buildscripts/resmokeconfig/suites/cqf_parallel.yml @@ -29,8 +29,9 @@ executor: set_parameters: enableTestCommands: 1 featureFlagCommonQueryFramework: true + internalQueryCardinalityEstimatorMode: "sampling" internalQueryFrameworkControl: "forceBonsai" internalQueryDefaultDOP: 5 # TODO: SERVER-75423: Allow exchange to work independently on the storage concurrency settings. - storageEngineConcurrencyAdjustmentAlgorithm: "" + storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions" storageEngineConcurrentReadTransactions: 128 diff --git a/buildscripts/resmokeconfig/suites/cqf_passthrough.yml b/buildscripts/resmokeconfig/suites/cqf_passthrough.yml deleted file mode 100644 index 3443f8a7522c5..0000000000000 --- a/buildscripts/resmokeconfig/suites/cqf_passthrough.yml +++ /dev/null @@ -1,37 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/aggregation/**/*.js - - jstests/core/**/*.js - exclude_with_any_tags: - - cqf_incompatible - exclude_files: - # Transactions are not supported on MongoDB standalone nodes, so we do not run these tests in the - # 'core' suite. Instead we run them against a 1-node replica set in the 'core_txns' suite. - - jstests/core/txns/**/*.js - -executor: - archive: - hooks: - - ValidateCollections - config: - shell_options: - crashOnInvalidBSONError: "" - objcheck: "" - eval: load("jstests/libs/override_methods/detect_spawning_own_mongod.js"); - hooks: - - class: ValidateCollections - shell_options: - global_vars: - TestData: - skipValidationOnNamespaceNotFound: false - - class: CleanEveryN - n: 20 - fixture: - class: MongoDFixture - mongod_options: - set_parameters: - enableTestCommands: 1 - featureFlagCommonQueryFramework: true - internalQueryFrameworkControl: "tryBonsai" diff --git a/buildscripts/resmokeconfig/suites/cst_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/cst_jscore_passthrough.yml index 0bb4882ae5cab..3e91db56e7484 100755 --- a/buildscripts/resmokeconfig/suites/cst_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/cst_jscore_passthrough.yml @@ -14,6 +14,9 @@ selector: # 'core' suite. Instead we run them against a 1-node replica set in the 'core_txns' suite. - jstests/core/txns/**/*.js + # Queryable encryption is not supported on standalone + - jstests/core/queryable_encryption/**/*.js + # These tests produce different error codes depending on which parser implementation. - jstests/core/**/sort_with_meta_operator.js diff --git a/buildscripts/resmokeconfig/suites/cwrwc_passthrough.yml b/buildscripts/resmokeconfig/suites/cwrwc_passthrough.yml index fc20fd90aef37..aa2c3ea414223 100644 --- a/buildscripts/resmokeconfig/suites/cwrwc_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/cwrwc_passthrough.yml @@ -18,7 +18,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; hooks: - class: ValidateCollections - class: CleanEveryN diff --git a/buildscripts/resmokeconfig/suites/cwrwc_rc_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/cwrwc_rc_majority_passthrough.yml index a75f65281ea43..0c80e5610749e 100644 --- a/buildscripts/resmokeconfig/suites/cwrwc_rc_majority_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/cwrwc_rc_majority_passthrough.yml @@ -36,7 +36,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; hooks: # The CheckReplDBHash hook waits until all operations have replicated to and have been applied # on the secondaries, so we run the ValidateCollections hook after it to ensure we're diff --git a/buildscripts/resmokeconfig/suites/cwrwc_wc_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/cwrwc_wc_majority_passthrough.yml index 5941432e45f93..8eadccbde7c18 100644 --- a/buildscripts/resmokeconfig/suites/cwrwc_wc_majority_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/cwrwc_wc_majority_passthrough.yml @@ -46,7 +46,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_preference_secondary.js'); hooks: # The CheckReplDBHash hook waits until all operations have replicated to and have been applied diff --git a/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_replica_sets_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_replica_sets_jscore_passthrough.yml new file mode 100644 index 0000000000000..435ba271cef78 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_replica_sets_jscore_passthrough.yml @@ -0,0 +1,116 @@ +test_kind: js_test + +# Cloned from buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml +# to run FCV upgrade downgrade in the background + +selector: + roots: + - jstests/core/**/*.js + - jstests/fle2/**/*.js + - jstests/aggregation/**/*.js + - src/mongo/db/modules/*/jstests/fle/**/*.js + exclude_with_any_tags: + - assumes_standalone_mongod + # columnstore indexes are under development and cannot be used without enabling the feature flag + - featureFlagColumnstoreIndexes + # TODO SERVER-68303 Remove this tag. + - featureFlagCompoundWildcardIndexes + # TODO SERVER-52419 Remove this tag. + - featureFlagBulkWriteCommand + - featureFlagFLE2CleanupCommand + # Transactions are aborted upon fcv upgrade or downgrade. + - uses_transactions + # Exclude tests with the latest fcv. + - requires_fcv_71 + + exclude_files: + # The set_param1.js test attempts to compare the response from running the {getParameter: "*"} + # command multiple times, which may observe the change to the "transactionLifetimeLimitSeconds" + # server parameter. + - jstests/core/**/set_param1.js + # Different explain format + - jstests/core/**/or_to_in.js + # SERVER-34772 Tailable Cursors are not allowed with snapshot readconcern. + - jstests/core/**/awaitdata_getmore_cmd.js + - jstests/core/**/getmore_cmd_maxtimems.js + - jstests/core/**/tailable_cursor_invalidation.js + - jstests/core/**/tailable_getmore_batch_size.js + + # TODO (SERVER-78220): Investigate failing api version tests in the fcv jscore passthrough suite. + - jstests/core/api/api_version_unstable_indexes.js + + # TODO (SERVER-78202): Investigate failing timeseries tests in the fcv jscore passthrough suite. + - jstests/core/timeseries/timeseries_update.js + - jstests/core/timeseries/timeseries_update_concurrent.js + - jstests/core/timeseries/timeseries_update_one.js + - jstests/core/timeseries/timeseries_update_multi.js + - jstests/core/timeseries/timeseries_find_and_modify_update.js + - jstests/core/timeseries/timeseries_delete_compressed_buckets.js + - jstests/core/timeseries/timeseries_bucket_limit_count.js + + # These use "columnstore indexes are under development and cannot be used without enabling the feature flag" + - jstests/core/query/null_query_semantics.js + - jstests/core/query/project/projection_semantics.js + - jstests/core/index/hidden_index.js + + # TODO: Remove after fixing SERVER-78201: the following received command without explicit readConcern. + - jstests/aggregation/sources/densify/internal_parse.js + - jstests/aggregation/api_version_stage_allowance_checks.js + + # TODO (SERVER-78200): The tests below sometimes hang when they run concurrently with a setFCV command. + - src/mongo/db/modules/enterprise/jstests/fle/fle_admin_e2e.js + - src/mongo/db/modules/enterprise/jstests/fle/fle_implicit_encryption.js + - src/mongo/db/modules/enterprise/jstests/fle/fle_use_cases.js + - src/mongo/db/modules/enterprise/jstests/fle/fle_drivers_integration.js + + # The test uses a resumeToken from previous calls so FCV change will make the token invalid and fail. + - jstests/core/resume_query_from_non_existent_record.js + - jstests/core/resume_query.js + - jstests/core/find_with_resume_after_param.js + + # TODO (SERVER-78417): Mongod invariant while running bucket_timestamp_rounding.js with new fcv upgrade downgrade suite. + - jstests/core/timeseries/bucket_timestamp_rounding.js + - jstests/core/timeseries/timeseries_filter_extended_range.js + + # Expected failure due to command count being not precise due to potential retry of index build. + - jstests/core/operation_latency_histogram.js + + # Expected failures due to unexpected query execution stats from restarted operations from fcv upgrade. + - jstests/aggregation/sources/lookup/lookup_query_stats.js + - jstests/aggregation/sources/facet/facet_stats.js + - jstests/aggregation/sources/unionWith/unionWith_query_stats.js + + # The tests below use transactions (which are aborted on fcv upgrade/downgrade) and thus are expected to fail. + - jstests/core/role_management_helpers.js + - jstests/core/roles_info.js + - jstests/core/views/views_all_commands.js + + # Queryable encryption uses internal transactions (which are aborted on fcv upgrade/downgrade) + - jstests/core/queryable_encryption/**/*.js + +executor: + archive: + hooks: + - CheckReplDBHash + - CheckReplOplogs + - ValidateCollections + - FCVUpgradeDowngradeInBackground + config: + shell_options: + eval: >- + globalThis.testingReplication = true; + load("jstests/libs/override_methods/retry_aborted_db_and_index_creation.js"); + hooks: + - class: CheckReplOplogs + - class: CheckReplDBHash + - class: ValidateCollections + - class: FCVUpgradeDowngradeInBackground + - class: CleanEveryN + n: 20 + fixture: + class: ReplicaSetFixture + mongod_options: + set_parameters: + enableTestCommands: 1 + disableTransitionFromLatestToLastContinuous: False + num_nodes: 2 diff --git a/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_sharded_collections_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_sharded_collections_jscore_passthrough.yml new file mode 100644 index 0000000000000..33ace6259369d --- /dev/null +++ b/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_sharded_collections_jscore_passthrough.yml @@ -0,0 +1,206 @@ +test_kind: js_test + +# Cloned from buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml +# to run FCV upgrade downgrade in the background + +selector: + roots: + - jstests/core/**/*.js + - jstests/fle2/**/*.js + - jstests/aggregation/**/*.js + - src/mongo/db/modules/*/jstests/fle/**/*.js + + exclude_files: + # The following tests fail because a certain command or functionality is not supported on + # mongos. This command or functionality is placed in a comment next to the failing test. + - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine. + - jstests/core/**/awaitdata_getmore_cmd.js # capped collections. + - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted + - jstests/core/**/check_shard_index.js # checkShardingIndex. + - jstests/core/**/collection_truncate.js # emptycapped. + - jstests/core/**/compact_keeps_indexes.js # compact. + - jstests/core/**/currentop.js # uses fsync. + - jstests/core/**/dbhash.js # dbhash. + - jstests/core/**/dbhash2.js # dbhash. + - jstests/core/**/fsync.js # uses fsync. + - jstests/core/**/geo_s2cursorlimitskip.js # profiling. + - jstests/core/**/geo_update_btree2.js # notablescan. + - jstests/core/**/index9.js # "local" database. + - jstests/core/**/queryoptimizera.js # "local" database. + - jstests/core/**/stages*.js # stageDebug. + - jstests/core/**/startup_log.js # "local" database. + - jstests/core/**/tailable_cursor_invalidation.js # capped collections. + - jstests/core/**/tailable_getmore_batch_size.js # capped collections. + - jstests/core/**/tailable_skip_limit.js # capped collections. + - jstests/core/**/top.js # top. + # The following tests fail because mongos behaves differently from mongod when testing certain + # functionality. The differences are in a comment next to the failing test. + - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047. + - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain(). + - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain(). + - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate(). + - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880. + # The following tests fail because they count indexes. These counts do not take into account the + # additional hashed shard key indexes that are automatically added by this passthrough. + - jstests/core/**/apitest_dbcollection.js + - jstests/core/**/bad_index_plugin.js + - jstests/core/**/create_indexes.js + - jstests/core/**/list_indexes_non_existent_ns.js + - jstests/core/**/mr_preserve_indexes.js + # The following tests fail because they expect no databases to be created. However a DB is created + # automatically when we shard a collection. + - jstests/core/**/dbcase.js + - jstests/core/**/dbcase2.js + - jstests/core/**/no_db_created.js + - jstests/core/**/killop_drop_collection.js # Uses fsyncLock. + # These tests fail because sharded clusters do not clean up correctly after failed index builds. + # See SERVER-33207 as an example. + - jstests/core/**/geo_borders.js + # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded + # queries with a limit or for distinct commands. + - jstests/core/**/distinct_index1.js + - jstests/core/**/explain1.js + - jstests/core/**/explain4.js + - jstests/core/**/sortk.js + # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is + # incorrect on sharded collections. + - jstests/core/**/explain_count.js + - jstests/core/**/explain_server_params.js + # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output. + - jstests/core/**/expr_index_use.js + - jstests/core/**/index_multikey.js + - jstests/core/**/optimized_match_explain.js + - jstests/core/**/sort_array.js + + # Excludes from fcv_upgrade_downgrade_jscore_passthrough.yml + # + # TODO (SERVER-78220): Investigate failing api version tests in the fcv jscore passthrough suite. + - jstests/core/api/api_version_unstable_indexes.js + + # TODO (SERVER-78202): Investigate failing timeseries tests in the fcv jscore passthrough suite. + - jstests/core/timeseries/timeseries_update.js + - jstests/core/timeseries/timeseries_update_concurrent.js + - jstests/core/timeseries/timeseries_update_one.js + - jstests/core/timeseries/timeseries_update_multi.js + - jstests/core/timeseries/timeseries_find_and_modify_update.js + - jstests/core/timeseries/timeseries_delete_compressed_buckets.js + - jstests/core/timeseries/timeseries_bucket_limit_count.js + + # These use "columnstore indexes are under development and cannot be used without enabling the feature flag" + - jstests/core/query/null_query_semantics.js + - jstests/core/query/project/projection_semantics.js + - jstests/core/index/hidden_index.js + + # TODO: Remove after fixing SERVER-78201: the following received command without explicit readConcern. + - jstests/aggregation/sources/densify/internal_parse.js + - jstests/aggregation/api_version_stage_allowance_checks.js + + # TODO (SERVER-78200): The tests below sometimes hang when they run concurrently with a setFCV command. + - src/mongo/db/modules/enterprise/jstests/fle/fle_admin_e2e.js + - src/mongo/db/modules/enterprise/jstests/fle/fle_implicit_encryption.js + - src/mongo/db/modules/enterprise/jstests/fle/fle_use_cases.js + - src/mongo/db/modules/enterprise/jstests/fle/fle_drivers_integration.js + + # The test uses a resumeToken from previous calls so FCV change will make the token invalid and fail. + - jstests/core/resume_query_from_non_existent_record.js + - jstests/core/resume_query.js + - jstests/core/find_with_resume_after_param.js + + # TODO (SERVER-78417): Mongod invariant while running bucket_timestamp_rounding.js with new fcv upgrade downgrade suite. + - jstests/core/timeseries/bucket_timestamp_rounding.js + - jstests/core/timeseries/timeseries_explicit_unpack_bucket.js + + # Expected failure due to command count being not precise due to potential retry of index build. + - jstests/core/operation_latency_histogram.js + + # Expected failures due to unexpected query execution stats from restarted operations from fcv upgrade. + - jstests/aggregation/sources/lookup/lookup_query_stats.js + - jstests/aggregation/sources/facet/facet_stats.js + - jstests/aggregation/sources/unionWith/unionWith_query_stats.js + + # The tests below use transactions (which are aborted on fcv upgrade/downgrade) and thus are expected to fail. + - jstests/core/role_management_helpers.js + - jstests/core/roles_info.js + - jstests/core/views/views_all_commands.js + + # Sharding specific failed: + # + # $unionWith explain output does not check whether the collection is sharded in a sharded + # cluster. + - jstests/aggregation/sources/unionWith/unionWith_explain.js + # Cannot specify runtime constants option to a mongos + - jstests/aggregation/expressions/internal_js_emit_with_scope.js + - jstests/aggregation/accumulators/internal_js_reduce_with_scope.js + # The tests below depend on internal transactions which are expected to get interrupted during an FCV upgrade or downgrade, but they do not have the 'uses_transactions' tag so are not excluded by default in this suite. + - jstests/aggregation/sources/lookup/lookup_non_correlated.js + - jstests/core/ddl/collection_uuid_index_commands.js + # Queryable encryption tests create internal transactions which are expected to fail. + - jstests/core/queryable_encryption/*.js + # TODO (SERVER-78753): setFeatureCompatibilityVersion times out waiting for replication + - jstests/core/transaction_too_large_for_cache.js + # TODO (SERVER-77910): Find out why spill_to_disk.js fails to spill to disk after FCV down. + - jstests/aggregation/spill_to_disk.js + # TODO (SERVER-32311): These tests use getAggPlanStage(), which can't handle sharded explain output. + - jstests/aggregation/match_swapping_renamed_fields.js + - jstests/aggregation/use_query_project_and_sort.js + - jstests/aggregation/use_query_projection.js + - jstests/aggregation/use_query_sort.js + # TODO: Remove when SERVER-23229 is fixed. + - jstests/aggregation/bugs/groupMissing.js + - jstests/aggregation/sources/graphLookup/variables.js + # TODO (SERVER-77935): Investigate timeout from fcv downgrade in jstests/core/query/push/push2.js. + - jstests/core/query/push/push2.js + - jstests/core/write/update/update_addToSet2.js + + exclude_with_any_tags: + - assumes_standalone_mongod + - assumes_against_mongod_not_mongos + # Tests tagged with the following will fail because they assume collections are not sharded. + - assumes_no_implicit_collection_creation_after_drop + - assumes_no_implicit_index_creation + - assumes_unsharded_collection + - cannot_create_unique_index_when_using_hashed_shard_key + # system.profile collection doesn't exist on mongos. + - requires_profiling + # columnstore indexes are under development and cannot be used without enabling the feature flag + - featureFlagColumnstoreIndexes + # TODO SERVER-52419 Remove this tag. + - featureFlagBulkWriteCommand + - featureFlagFLE2CleanupCommand + # Transactions are aborted upon fcv upgrade or downgrade. + - uses_transactions + # Exclude tests with the latest fcv. + - requires_fcv_71 + +executor: + archive: + hooks: + - CheckReplDBHash + - CheckMetadataConsistencyInBackground + - ValidateCollections + - FCVUpgradeDowngradeInBackground + config: + shell_options: + eval: >- + load("jstests/libs/override_methods/retry_aborted_db_and_index_creation.js"); + load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js"); + hooks: + - class: CheckReplDBHash + - class: CheckMetadataConsistencyInBackground + - class: ValidateCollections + - class: FCVUpgradeDowngradeInBackground + - class: CleanEveryN + n: 20 + fixture: + class: ShardedClusterFixture + num_shards: 2 + enable_balancer: false + mongos_options: + set_parameters: + enableTestCommands: 1 + disableTransitionFromLatestToLastContinuous: False + mongod_options: + set_parameters: + enableTestCommands: 1 + disableTransitionFromLatestToLastContinuous: False + num_rs_nodes_per_shard: 2 diff --git a/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_sharding_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_sharding_jscore_passthrough.yml new file mode 100644 index 0000000000000..314d1a504d9aa --- /dev/null +++ b/buildscripts/resmokeconfig/suites/fcv_upgrade_downgrade_sharding_jscore_passthrough.yml @@ -0,0 +1,160 @@ +test_kind: js_test + +# Cloned from buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml +# to run FCV upgrade downgrade in the background + +selector: + roots: + - jstests/core/**/*.js + - jstests/fle2/**/*.js + - jstests/aggregation/**/*.js + - src/mongo/db/modules/*/jstests/fle/**/*.js + + exclude_files: + # The following tests fail because a certain command or functionality is not supported on + # mongos. This command or functionality is placed in a comment next to the failing test. + - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine. + - jstests/core/**/check_shard_index.js # checkShardingIndex. + - jstests/core/**/collection_truncate.js # emptycapped. + - jstests/core/**/compact_keeps_indexes.js # compact. + - jstests/core/**/currentop.js # uses fsync. + - jstests/core/**/dbhash.js # dbhash. + - jstests/core/**/dbhash2.js # dbhash. + - jstests/core/**/fsync.js # uses fsync. + - jstests/core/**/geo_s2cursorlimitskip.js # profiling. + - jstests/core/**/geo_update_btree2.js # notablescan. + - jstests/core/**/index9.js # "local" database. + - jstests/core/**/queryoptimizera.js # "local" database. + - jstests/core/**/stages*.js # stageDebug. + - jstests/core/**/startup_log.js # "local" database. + - jstests/core/**/top.js # top. + # The following tests fail because mongos behaves differently from mongod when testing certain + # functionality. The differences are in a comment next to the failing test. + - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos. + - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain(). + - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain(). + - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate(). + - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880. + - jstests/core/**/killop_drop_collection.js # Uses fsyncLock. + - jstests/core/**/or_to_in.js # queryPlanner in different spot in explain() + # The following tests fail because of divergent dropCollection behavior between standalones and + # sharded clusters. These tests expect a second drop command to error, whereas in sharded clusters + # we expect a second drop to return status OK. + - jstests/core/**/explain_upsert.js + + # Excludes from fcv_upgrade_downgrade_jscore_passthrough.yml + # + # TODO (SERVER-78220): Investigate failing api version tests in the fcv jscore passthrough suite. + - jstests/core/api/api_version_unstable_indexes.js + + # TODO (SERVER-78202): Investigate failing timeseries tests in the fcv jscore passthrough suite. + - jstests/core/timeseries/timeseries_update.js + - jstests/core/timeseries/timeseries_update_concurrent.js + - jstests/core/timeseries/timeseries_update_one.js + - jstests/core/timeseries/timeseries_update_multi.js + - jstests/core/timeseries/timeseries_find_and_modify_update.js + - jstests/core/timeseries/timeseries_delete_compressed_buckets.js + - jstests/core/timeseries/timeseries_bucket_limit_count.js + + # These use "columnstore indexes are under development and cannot be used without enabling the feature flag" + - jstests/core/query/null_query_semantics.js + - jstests/core/query/project/projection_semantics.js + - jstests/core/index/hidden_index.js + + # TODO: Remove after fixing SERVER-78201: the following received command without explicit readConcern. + - jstests/aggregation/sources/densify/internal_parse.js + - jstests/aggregation/api_version_stage_allowance_checks.js + + # TODO (SERVER-78200): The tests below sometimes hang when they run concurrently with a setFCV command. + - src/mongo/db/modules/enterprise/jstests/fle/fle_admin_e2e.js + - src/mongo/db/modules/enterprise/jstests/fle/fle_implicit_encryption.js + - src/mongo/db/modules/enterprise/jstests/fle/fle_use_cases.js + - src/mongo/db/modules/enterprise/jstests/fle/fle_drivers_integration.js + + # The test uses a resumeToken from previous calls so FCV change will make the token invalid and fail. + - jstests/core/resume_query_from_non_existent_record.js + - jstests/core/resume_query.js + - jstests/core/find_with_resume_after_param.js + + # TODO (SERVER-78417): Mongod invariant while running bucket_timestamp_rounding.js with new fcv upgrade downgrade suite. + - jstests/core/timeseries/bucket_timestamp_rounding.js + - jstests/core/timeseries/timeseries_explicit_unpack_bucket.js + + # Expected failure due to command count being not precise due to potential retry of index build. + - jstests/core/operation_latency_histogram.js + + # Expected failures due to unexpected query execution stats from restarted operations from fcv upgrade. + - jstests/aggregation/sources/lookup/lookup_query_stats.js + - jstests/aggregation/sources/facet/facet_stats.js + - jstests/aggregation/sources/unionWith/unionWith_query_stats.js + + # The tests below use transactions (which are aborted on fcv upgrade/downgrade) and thus are expected to fail. + - jstests/core/role_management_helpers.js + - jstests/core/roles_info.js + - jstests/core/views/views_all_commands.js + + # Sharding specific failed: + # + # $unionWith explain output does not check whether the collection is sharded in a sharded + # cluster. + - jstests/aggregation/sources/unionWith/unionWith_explain.js + # Cannot specify runtime constants option to a mongos + - jstests/aggregation/expressions/internal_js_emit_with_scope.js + - jstests/aggregation/accumulators/internal_js_reduce_with_scope.js + # The tests below depend on internal transactions which are expected to get interrupted during an FCV upgrade or downgrade, but they do not have the 'uses_transactions' tag so are not excluded by default in this suite. + - jstests/aggregation/sources/lookup/lookup_non_correlated.js + - jstests/core/ddl/collection_uuid_index_commands.js + # Queryable encryption tests create internal transactions which are expected to fail. + - jstests/core/queryable_encryption/*.js + # TODO (SERVER-78753): setFeatureCompatibilityVersion times out waiting for replication + - jstests/core/transaction_too_large_for_cache.js + # TODO (SERVER-77910): Find out why spill_to_disk.js fails to spill to disk after FCV down. + - jstests/aggregation/spill_to_disk.js + + exclude_with_any_tags: + - assumes_standalone_mongod + - assumes_against_mongod_not_mongos + # system.profile collection doesn't exist on mongos. + - requires_profiling + # columnstore indexes are under development and cannot be used without enabling the feature flag + - featureFlagColumnstoreIndexes + # TODO SERVER-52419 Remove this tag. + - featureFlagBulkWriteCommand + - featureFlagFLE2CleanupCommand + # Transactions are aborted upon fcv upgrade or downgrade. + - uses_transactions + # Exclude tests with the latest fcv. + - requires_fcv_71 + +executor: + archive: + hooks: + - CheckReplDBHash + - CheckMetadataConsistencyInBackground + - ValidateCollections + - FCVUpgradeDowngradeInBackground + config: + shell_options: + eval: >- + load("jstests/libs/override_methods/retry_aborted_db_and_index_creation.js"); + hooks: + - class: CheckReplDBHash + - class: CheckMetadataConsistencyInBackground + - class: ValidateCollections + - class: FCVUpgradeDowngradeInBackground + - class: CleanEveryN + n: 20 + fixture: + class: ShardedClusterFixture + num_shards: 1 + mongos_options: + set_parameters: + enableTestCommands: 1 + disableTransitionFromLatestToLastContinuous: False + mongod_options: + set_parameters: + enableTestCommands: 1 + disableTransitionFromLatestToLastContinuous: False + num_rs_nodes_per_shard: 2 + enable_sharding: + - test diff --git a/buildscripts/resmokeconfig/suites/fle2.yml b/buildscripts/resmokeconfig/suites/fle2.yml index c109b6542e4eb..fa8d55e2f6b83 100644 --- a/buildscripts/resmokeconfig/suites/fle2.yml +++ b/buildscripts/resmokeconfig/suites/fle2.yml @@ -11,10 +11,6 @@ executor: archive: hooks: - ValidateCollections - config: - shell_options: - eval: "testingReplication = true; testingFLE2Range = true;" - setShellParameter: featureFlagFLE2Range=true hooks: # We don't execute dbHash or oplog consistency checks since there is only a single replica set # node. @@ -26,6 +22,5 @@ executor: mongod_options: set_parameters: enableTestCommands: 1 - featureFlagFLE2Range: true # Use a 2-node replica set. num_nodes: 2 diff --git a/buildscripts/resmokeconfig/suites/fle2_high_cardinality.yml b/buildscripts/resmokeconfig/suites/fle2_high_cardinality.yml index d9176f718b838..8b1582ec19f7f 100644 --- a/buildscripts/resmokeconfig/suites/fle2_high_cardinality.yml +++ b/buildscripts/resmokeconfig/suites/fle2_high_cardinality.yml @@ -12,10 +12,6 @@ executor: archive: hooks: - ValidateCollections - config: - shell_options: - eval: "testingReplication = true; testingFLE2Range = true;" - setShellParameter: featureFlagFLE2Range=true hooks: # We don't execute dbHash or oplog consistency checks since there is only a single replica set # node. @@ -27,7 +23,6 @@ executor: mongod_options: set_parameters: enableTestCommands: 1 - featureFlagFLE2Range: true internalQueryFLEAlwaysUseEncryptedCollScanMode: 1 # Use a 2-node replica set. num_nodes: 2 diff --git a/buildscripts/resmokeconfig/suites/fle2_query_analysis.yml b/buildscripts/resmokeconfig/suites/fle2_query_analysis.yml index 894cef89d1d37..cabc1903d9526 100644 --- a/buildscripts/resmokeconfig/suites/fle2_query_analysis.yml +++ b/buildscripts/resmokeconfig/suites/fle2_query_analysis.yml @@ -18,5 +18,3 @@ executor: global_vars: TestData: useFle2Protocol: true - setParametersMongocryptd: - featureFlagFLE2Range: true diff --git a/buildscripts/resmokeconfig/suites/fle2_sharding.yml b/buildscripts/resmokeconfig/suites/fle2_sharding.yml index c4658aedca13c..76c6f9a061fec 100644 --- a/buildscripts/resmokeconfig/suites/fle2_sharding.yml +++ b/buildscripts/resmokeconfig/suites/fle2_sharding.yml @@ -3,6 +3,8 @@ selector: roots: - jstests/fle2/*.js - src/mongo/db/modules/*/jstests/fle2/**/*.js + exclude_files: + - src/mongo/db/modules/enterprise/jstests/fle2/bulk_write_insert.js # TODO SERVER-77497 to support mongos exclude_with_any_tags: # Don't run tests that require the encrypted collscan mode in this suite. - requires_fle2_encrypted_collscan @@ -12,13 +14,10 @@ executor: hooks: - CheckReplDBHash - ValidateCollections - config: - shell_options: - eval: "testingReplication = false; testingFLE2Range = true;" - setShellParameter: featureFlagFLE2Range=true hooks: - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: @@ -29,7 +28,6 @@ executor: mongod_options: set_parameters: enableTestCommands: 1 - featureFlagFLE2Range: true num_rs_nodes_per_shard: 2 enable_sharding: - test diff --git a/buildscripts/resmokeconfig/suites/fle2_sharding_high_cardinality.yml b/buildscripts/resmokeconfig/suites/fle2_sharding_high_cardinality.yml index 8760caa26bc71..dd29e251b3d59 100644 --- a/buildscripts/resmokeconfig/suites/fle2_sharding_high_cardinality.yml +++ b/buildscripts/resmokeconfig/suites/fle2_sharding_high_cardinality.yml @@ -3,23 +3,23 @@ selector: roots: - jstests/fle2/*.js - src/mongo/db/modules/*/jstests/fle2/**/*.js + exclude_files: + - src/mongo/db/modules/enterprise/jstests/fle2/bulk_write_insert.js # TODO SERVER-77497 to support mongos exclude_with_any_tags: # Not compatible with tests the expect fle to always using $in in queries, # i.e. verify explain output - requires_fle2_in_always + - fle2_no_mongos executor: archive: hooks: - CheckReplDBHash - ValidateCollections - config: - shell_options: - eval: "testingReplication = false; testingFLE2Range = true;" - setShellParameter: featureFlagFLE2Range=true hooks: - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: @@ -32,7 +32,6 @@ executor: set_parameters: enableTestCommands: 1 internalQueryFLEAlwaysUseEncryptedCollScanMode: 1 - featureFlagFLE2Range: true num_rs_nodes_per_shard: 2 enable_sharding: - test diff --git a/buildscripts/resmokeconfig/suites/integration_tests_replset_ssl_auth.yml b/buildscripts/resmokeconfig/suites/integration_tests_replset_ssl_auth.yml index b6bae6b1ab4c6..98f1a82048fd4 100644 --- a/buildscripts/resmokeconfig/suites/integration_tests_replset_ssl_auth.yml +++ b/buildscripts/resmokeconfig/suites/integration_tests_replset_ssl_auth.yml @@ -34,19 +34,19 @@ executor: shell_options: global_vars: TestData: *TestData - eval: jsTest.authenticate(db.getMongo()) + eval: jsTest.authenticate(db.getMongo()) <<: *authOptions - class: CheckReplDBHash shell_options: global_vars: TestData: *TestData - eval: jsTest.authenticate(db.getMongo()) + eval: jsTest.authenticate(db.getMongo()) <<: *authOptions - class: ValidateCollections shell_options: global_vars: TestData: *TestData - eval: jsTest.authenticate(db.getMongo()) + eval: jsTest.authenticate(db.getMongo()) <<: *authOptions fixture: class: ReplicaSetFixture diff --git a/buildscripts/resmokeconfig/suites/integration_tests_sharded.yml b/buildscripts/resmokeconfig/suites/integration_tests_sharded.yml index 7aabaa4d96e74..7c86fd80cf518 100644 --- a/buildscripts/resmokeconfig/suites/integration_tests_sharded.yml +++ b/buildscripts/resmokeconfig/suites/integration_tests_sharded.yml @@ -16,6 +16,7 @@ executor: hooks: - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted fixture: class: ShardedClusterFixture mongod_options: diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_100ms_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_replication_100ms_refresh_jscore_passthrough.yml deleted file mode 100644 index 02b9116d197c0..0000000000000 --- a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_100ms_refresh_jscore_passthrough.yml +++ /dev/null @@ -1,58 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - - jstests/fle2/**/*.js - - src/mongo/db/modules/*/jstests/fle2/**/*.js - exclude_files: - # The set_param1.js test attempts to compare the response from running the {getParameter: "*"} - # command multiple times, which may observe the change to the "transactionLifetimeLimitSeconds" - # server parameter. - - jstests/core/**/set_param1.js - # These tests expect the logical session cache refresh thread to be turned off, so that refreshes - # can be triggered deterministically. - - jstests/core/**/list_all_local_sessions.js - - jstests/core/**/list_all_sessions.js - - jstests/core/**/list_sessions.js - # The awaitdata_getmore_cmd.js test tails the oplog and waits for the getMore batch size to equal - # zero. The CheckReplDBHashInBackground hook consistently runs and creates sessions. At the same - # time, the logical session cache refresh thread will flush these sessions to disk, creating more - # opLog entries. To avoid this infinite loop, we will denylist the test from this suite. - - jstests/core/**/awaitdata_getmore_cmd.js - # These tests verify that an expected number of update operations were tracked in the server - # status metrics, but the logical session cache refresh causes additional updates to be recorded. - - jstests/core/**/find_and_modify_metrics.js - - jstests/core/**/update_metrics.js - - exclude_with_any_tags: - - assumes_standalone_mongod - -executor: - archive: - hooks: - - CheckReplDBHashInBackground - - CheckReplDBHash - - CheckReplOplogs - - ValidateCollections - config: - shell_options: - eval: "testingReplication = true;" - hooks: - # The CheckReplDBHash hook waits until all operations have replicated to and have been applied - # on the secondaries, so we run the ValidateCollections hook after it to ensure we're - # validating the entire contents of the collection. - - class: CheckReplDBHashInBackground - - class: CheckReplOplogs - - class: CheckReplDBHash - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: ReplicaSetFixture - mongod_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 100 - num_nodes: 3 diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_10sec_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_replication_10sec_refresh_jscore_passthrough.yml deleted file mode 100644 index 1cb8d42ce2d2a..0000000000000 --- a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_10sec_refresh_jscore_passthrough.yml +++ /dev/null @@ -1,58 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - - jstests/fle2/**/*.js - - src/mongo/db/modules/*/jstests/fle2/**/*.js - exclude_files: - # The set_param1.js test attempts to compare the response from running the {getParameter: "*"} - # command multiple times, which may observe the change to the "transactionLifetimeLimitSeconds" - # server parameter. - - jstests/core/**/set_param1.js - # These tests expect the logical session cache refresh thread to be turned off, so that refreshes - # can be triggered deterministically. - - jstests/core/**/list_all_local_sessions.js - - jstests/core/**/list_all_sessions.js - - jstests/core/**/list_sessions.js - # The awaitdata_getmore_cmd.js test tails the oplog and waits for the getMore batch size to equal - # zero. The CheckReplDBHashInBackground hook consistently runs and creates sessions. At the same - # time, the logical session cache refresh thread will flush these sessions to disk, creating more - # opLog entries. To avoid this infinite loop, we will denylist the test from this suite. - - jstests/core/**/awaitdata_getmore_cmd.js - # These tests verify that an expected number of update operations were tracked in the server - # status metrics, but the logical session cache refresh causes additional updates to be recorded. - - jstests/core/**/find_and_modify_metrics.js - - jstests/core/**/update_metrics.js - - exclude_with_any_tags: - - assumes_standalone_mongod - -executor: - archive: - hooks: - - CheckReplDBHashInBackground - - CheckReplDBHash - - CheckReplOplogs - - ValidateCollections - config: - shell_options: - eval: "testingReplication = true;" - hooks: - # The CheckReplDBHash hook waits until all operations have replicated to and have been applied - # on the secondaries, so we run the ValidateCollections hook after it to ensure we're - # validating the entire contents of the collection. - - class: CheckReplDBHashInBackground - - class: CheckReplOplogs - - class: CheckReplDBHash - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: ReplicaSetFixture - mongod_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 10000 - num_nodes: 3 diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_1sec_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_replication_1sec_refresh_jscore_passthrough.yml deleted file mode 100644 index 95e276ed112cf..0000000000000 --- a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_1sec_refresh_jscore_passthrough.yml +++ /dev/null @@ -1,58 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - - jstests/fle2/**/*.js - - src/mongo/db/modules/*/jstests/fle2/**/*.js - exclude_files: - # The set_param1.js test attempts to compare the response from running the {getParameter: "*"} - # command multiple times, which may observe the change to the "transactionLifetimeLimitSeconds" - # server parameter. - - jstests/core/**/set_param1.js - # These tests expect the logical session cache refresh thread to be turned off, so that refreshes - # can be triggered deterministically. - - jstests/core/**/list_all_local_sessions.js - - jstests/core/**/list_all_sessions.js - - jstests/core/**/list_sessions.js - # The awaitdata_getmore_cmd.js test tails the oplog and waits for the getMore batch size to equal - # zero. The CheckReplDBHashInBackground hook consistently runs and creates sessions. At the same - # time, the logical session cache refresh thread will flush these sessions to disk, creating more - # opLog entries. To avoid this infinite loop, we will denylist the test from this suite. - - jstests/core/**/awaitdata_getmore_cmd.js - # These tests verify that an expected number of update operations were tracked in the server - # status metrics, but the logical session cache refresh causes additional updates to be recorded. - - jstests/core/**/find_and_modify_metrics.js - - jstests/core/**/update_metrics.js - - exclude_with_any_tags: - - assumes_standalone_mongod - -executor: - archive: - hooks: - - CheckReplDBHashInBackground - - CheckReplDBHash - - CheckReplOplogs - - ValidateCollections - config: - shell_options: - eval: "testingReplication = true;" - hooks: - # The CheckReplDBHash hook waits until all operations have replicated to and have been applied - # on the secondaries, so we run the ValidateCollections hook after it to ensure we're - # validating the entire contents of the collection. - - class: CheckReplDBHashInBackground - - class: CheckReplOplogs - - class: CheckReplDBHash - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: ReplicaSetFixture - mongod_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 1000 - num_nodes: 3 diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_default_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_replication_default_refresh_jscore_passthrough.yml deleted file mode 100644 index a8434414a2282..0000000000000 --- a/buildscripts/resmokeconfig/suites/logical_session_cache_replication_default_refresh_jscore_passthrough.yml +++ /dev/null @@ -1,59 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - - jstests/fle2/**/*.js - - src/mongo/db/modules/*/jstests/fle2/**/*.js - exclude_files: - # The set_param1.js test attempts to compare the response from running the {getParameter: "*"} - # command multiple times, which may observe the change to the "transactionLifetimeLimitSeconds" - # server parameter. - - jstests/core/**/set_param1.js - # These tests expect the logical session cache refresh thread to be turned off, so that refreshes - # can be triggered deterministically. - - jstests/core/**/list_all_local_sessions.js - - jstests/core/**/list_all_sessions.js - - jstests/core/**/list_sessions.js - # The awaitdata_getmore_cmd.js test tails the oplog and waits for the getMore batch size to equal - # zero. The CheckReplDBHashInBackground hook consistently runs and creates sessions. At the same - # time, the logical session cache refresh thread will flush these sessions to disk, creating more - # opLog entries. To prevent this infinite loop, we will denylist the test from this suite. - - jstests/core/**/awaitdata_getmore_cmd.js - # These tests verify that an expected number of update operations were tracked in the server - # status metrics, but the logical session cache refresh causes additional updates to be recorded. - - jstests/core/**/find_and_modify_metrics.js - - jstests/core/**/update_metrics.js - - exclude_with_any_tags: - - assumes_standalone_mongod - -executor: - archive: - hooks: - - CheckReplDBHashInBackground - - ValidateCollectionsInBackground - - CheckReplDBHash - - CheckReplOplogs - - ValidateCollections - config: - shell_options: - eval: "testingReplication = true;" - hooks: - # The CheckReplDBHash hook waits until all operations have replicated to and have been applied - # on the secondaries, so we run the ValidateCollections hook after it to ensure we're - # validating the entire contents of the collection. - - class: CheckReplDBHashInBackground - - class: ValidateCollectionsInBackground - - class: CheckReplOplogs - - class: CheckReplDBHash - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: ReplicaSetFixture - mongod_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - num_nodes: 3 diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_100ms_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_100ms_refresh_jscore_passthrough.yml deleted file mode 100644 index 0e9f1c4d1a099..0000000000000 --- a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_100ms_refresh_jscore_passthrough.yml +++ /dev/null @@ -1,116 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - - jstests/fle2/**/*.js - - src/mongo/db/modules/*/jstests/fle2/**/*.js - exclude_files: - # These tests are run in logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml. - - jstests/core/txns/**/*.js - # The following tests fail because a certain command or functionality is not supported by - # mongos. This command or functionality is placed in a comment next to the failing test. - - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine. - - jstests/core/**/awaitdata_getmore_cmd.js # capped collections. - - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted - - jstests/core/**/check_shard_index.js # checkShardingIndex. - - jstests/core/**/collection_truncate.js # emptycapped. - - jstests/core/**/compact_keeps_indexes.js # compact. - - jstests/core/**/currentop.js # uses fsync. - - jstests/core/**/dbhash.js # dbhash. - - jstests/core/**/dbhash2.js # dbhash. - - jstests/core/**/fsync.js # uses fsync. - - jstests/core/**/geo_s2cursorlimitskip.js # profiling. - - jstests/core/**/geo_update_btree2.js # notablescan. - - jstests/core/**/index9.js # "local" database. - - jstests/core/**/queryoptimizera.js # "local" database. - - jstests/core/**/stages*.js # stageDebug. - - jstests/core/**/startup_log.js # "local" database. - - jstests/core/**/tailable_cursor_invalidation.js # capped collections. - - jstests/core/**/tailable_getmore_batch_size.js # capped collections. - - jstests/core/**/tailable_skip_limit.js # capped collections. - - jstests/core/**/top.js # top. - # The following tests fail because mongos behaves differently from mongod when testing certain - # functionality. The differences are in a comment next to the failing test. - - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047. - - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain(). - - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain(). - - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate(). - - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880. - # The following tests fail because they count indexes. These counts do not take into account the - # additional hashed shard key indexes that are automatically added by this passthrough. - - jstests/core/**/apitest_dbcollection.js - - jstests/core/**/bad_index_plugin.js - - jstests/core/**/create_indexes.js - - jstests/core/**/list_indexes_non_existent_ns.js - - jstests/core/**/mr_preserve_indexes.js - # The following tests fail because they expect no databases to be created. However a DB is created - # automatically when we shard a collection. - - jstests/core/**/dbcase.js - - jstests/core/**/dbcase2.js - - jstests/core/**/no_db_created.js - - jstests/core/**/killop_drop_collection.js # Uses fsyncLock. - # These tests fail because sharded clusters do not clean up correctly after failed index builds. - # See SERVER-33207 as an example. - - jstests/core/**/geo_borders.js - # These tests expect the logical session cache refresh thread to be turned off, so that refreshes - # can be triggered deterministically. - - jstests/core/**/list_all_local_sessions.js - - jstests/core/**/list_all_sessions.js - - jstests/core/**/list_sessions.js - # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded - # queries with a limit or for distinct commands. - - jstests/core/**/distinct_index1.js - - jstests/core/**/explain1.js - - jstests/core/**/explain4.js - - jstests/core/**/sortk.js - # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is - # incorrect on sharded collections. - - jstests/core/**/explain_count.js - - jstests/core/**/explain_server_params.js - # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output. - - jstests/core/**/expr_index_use.js - - jstests/core/**/index_multikey.js - - jstests/core/**/optimized_match_explain.js - - jstests/core/**/sort_array.js - - exclude_with_any_tags: - - assumes_standalone_mongod - - assumes_against_mongod_not_mongos - # Tests tagged with the following will fail because they assume collections are not sharded. - - assumes_no_implicit_collection_creation_after_drop - - assumes_no_implicit_index_creation - - assumes_unsharded_collection - - cannot_create_unique_index_when_using_hashed_shard_key - # system.profile collection doesn't exist on mongos. - - requires_profiling - -executor: - archive: - hooks: - - CheckReplDBHash - - CheckMetadataConsistencyInBackground - - ValidateCollections - config: - shell_options: - eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js") - hooks: - - class: CheckReplDBHash - - class: CheckMetadataConsistencyInBackground - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: ShardedClusterFixture - num_shards: 2 - enable_balancer: false - mongos_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 100 - mongod_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 100 diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml deleted file mode 100644 index 3464f572f79be..0000000000000 --- a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml +++ /dev/null @@ -1,72 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/txns/**/*.js - exclude_files: - # Profile can only be run against the admin database on mongos. - - jstests/core/txns/transactions_profiling.js - - jstests/core/txns/transactions_profiling_with_drops.js - - # Implicitly creates a database through a collection rename, which does not work in a sharded - # cluster. - - jstests/core/txns/transactions_block_ddl.js - - # transactionLifetimeLimitSeconds parameter is not available in mongos. - - jstests/core/txns/abort_expired_transaction.js - - jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js - - jstests/core/txns/kill_op_on_txn_expiry.js - - # Uses hangAfterCollectionInserts failpoint not available on mongos. - - jstests/core/txns/speculative_snapshot_includes_all_writes.js - - # View tests aren't expected to work when collections are implicitly sharded. - - jstests/core/txns/view_reads_in_transaction.js - - # These workloads explicitly create collections inside multi-document transactions. These are - # non-idempotent operations, and the implicit collection sharding logic upon collection access - # results in premature collection creation, causing the workloads to fail. - - jstests/core/txns/create_collection.js - - jstests/core/txns/create_collection_parallel.js - - jstests/core/txns/create_indexes.js - - jstests/core/txns/create_indexes_parallel.js - - exclude_with_any_tags: - - assumes_against_mongod_not_mongos - # Tests tagged with the following will fail because they assume collections are not sharded. - - assumes_no_implicit_collection_creation_after_drop - - assumes_no_implicit_index_creation - - assumes_unsharded_collection - - cannot_create_unique_index_when_using_hashed_shard_key - # Transactions are not allowed to operate on capped collections. - - requires_capped - # Prepare is not a command on mongos. - - uses_prepare_transaction - -executor: - archive: - hooks: - - CheckReplDBHash - - ValidateCollections - config: - shell_options: - eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js") - hooks: - - class: CheckReplDBHash - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: ShardedClusterFixture - num_shards: 2 - enable_balancer: false - mongos_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 100 - mongod_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 100 diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_10sec_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_10sec_refresh_jscore_passthrough.yml deleted file mode 100644 index 076717acd11f0..0000000000000 --- a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_10sec_refresh_jscore_passthrough.yml +++ /dev/null @@ -1,116 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - - jstests/fle2/**/*.js - - src/mongo/db/modules/*/jstests/fle2/**/*.js - exclude_files: - # These tests are run in logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml. - - jstests/core/txns/**/*.js - # The following tests fail because a certain command or functionality is not supported by - # mongos. This command or functionality is placed in a comment next to the failing test. - - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine. - - jstests/core/**/awaitdata_getmore_cmd.js # capped collections. - - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted - - jstests/core/**/check_shard_index.js # checkShardingIndex. - - jstests/core/**/collection_truncate.js # emptycapped. - - jstests/core/**/compact_keeps_indexes.js # compact. - - jstests/core/**/currentop.js # uses fsync. - - jstests/core/**/dbhash.js # dbhash. - - jstests/core/**/dbhash2.js # dbhash. - - jstests/core/**/fsync.js # uses fsync. - - jstests/core/**/geo_s2cursorlimitskip.js # profiling. - - jstests/core/**/geo_update_btree2.js # notablescan. - - jstests/core/**/index9.js # "local" database. - - jstests/core/**/queryoptimizera.js # "local" database. - - jstests/core/**/stages*.js # stageDebug. - - jstests/core/**/startup_log.js # "local" database. - - jstests/core/**/tailable_cursor_invalidation.js # capped collections. - - jstests/core/**/tailable_getmore_batch_size.js # capped collections. - - jstests/core/**/tailable_skip_limit.js # capped collections. - - jstests/core/**/top.js # top. - # The following tests fail because mongos behaves differently from mongod when testing certain - # functionality. The differences are in a comment next to the failing test. - - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047. - - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain(). - - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain(). - - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate(). - - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880. - # The following tests fail because they count indexes. These counts do not take into account the - # additional hashed shard key indexes that are automatically added by this passthrough. - - jstests/core/**/apitest_dbcollection.js - - jstests/core/**/bad_index_plugin.js - - jstests/core/**/create_indexes.js - - jstests/core/**/list_indexes_non_existent_ns.js - - jstests/core/**/mr_preserve_indexes.js - # The following tests fail because they expect no databases to be created. However a DB is created - # automatically when we shard a collection. - - jstests/core/**/dbcase.js - - jstests/core/**/dbcase2.js - - jstests/core/**/no_db_created.js - - jstests/core/**/killop_drop_collection.js # Uses fsyncLock. - # These tests fail because sharded clusters do not clean up correctly after failed index builds. - # See SERVER-33207 as an example. - - jstests/core/**/geo_borders.js - # These tests expect the logical session cache refresh thread to be turned off, so that refreshes - # can be triggered deterministically. - - jstests/core/**/list_all_local_sessions.js - - jstests/core/**/list_all_sessions.js - - jstests/core/**/list_sessions.js - # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded - # queries with a limit or for distinct commands. - - jstests/core/**/distinct_index1.js - - jstests/core/**/explain1.js - - jstests/core/**/explain4.js - - jstests/core/**/sortk.js - # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is - # incorrect on sharded collections. - - jstests/core/**/explain_count.js - - jstests/core/**/explain_server_params.js - # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output. - - jstests/core/**/expr_index_use.js - - jstests/core/**/index_multikey.js - - jstests/core/**/optimized_match_explain.js - - jstests/core/**/sort_array.js - - exclude_with_any_tags: - - assumes_standalone_mongod - - assumes_against_mongod_not_mongos - # Tests tagged with the following will fail because they assume collections are not sharded. - - assumes_no_implicit_collection_creation_after_drop - - assumes_no_implicit_index_creation - - assumes_unsharded_collection - - cannot_create_unique_index_when_using_hashed_shard_key - # system.profile collection doesn't exist on mongos. - - requires_profiling - -executor: - archive: - hooks: - - CheckReplDBHash - - CheckMetadataConsistencyInBackground - - ValidateCollections - config: - shell_options: - eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js") - hooks: - - class: CheckReplDBHash - - class: CheckMetadataConsistencyInBackground - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: ShardedClusterFixture - num_shards: 2 - enable_balancer: false - mongos_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 10000 - mongod_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 10000 diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_1sec_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_1sec_refresh_jscore_passthrough.yml deleted file mode 100644 index 26838b46c0445..0000000000000 --- a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_1sec_refresh_jscore_passthrough.yml +++ /dev/null @@ -1,116 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - - jstests/fle2/**/*.js - - src/mongo/db/modules/*/jstests/fle2/**/*.js - exclude_files: - # These tests are run in logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml. - - jstests/core/txns/**/*.js - # The following tests fail because a certain command or functionality is not supported by - # mongos. This command or functionality is placed in a comment next to the failing test. - - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine. - - jstests/core/**/awaitdata_getmore_cmd.js # capped collections. - - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted - - jstests/core/**/check_shard_index.js # checkShardingIndex. - - jstests/core/**/collection_truncate.js # emptycapped. - - jstests/core/**/compact_keeps_indexes.js # compact. - - jstests/core/**/currentop.js # uses fsync. - - jstests/core/**/dbhash.js # dbhash. - - jstests/core/**/dbhash2.js # dbhash. - - jstests/core/**/fsync.js # uses fsync. - - jstests/core/**/geo_s2cursorlimitskip.js # profiling. - - jstests/core/**/geo_update_btree2.js # notablescan. - - jstests/core/**/index9.js # "local" database. - - jstests/core/**/queryoptimizera.js # "local" database. - - jstests/core/**/stages*.js # stageDebug. - - jstests/core/**/startup_log.js # "local" database. - - jstests/core/**/tailable_cursor_invalidation.js # capped collections. - - jstests/core/**/tailable_getmore_batch_size.js # capped collections. - - jstests/core/**/tailable_skip_limit.js # capped collections. - - jstests/core/**/top.js # top. - # The following tests fail because mongos behaves differently from mongod when testing certain - # functionality. The differences are in a comment next to the failing test. - - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047. - - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain(). - - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain(). - - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate(). - - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880. - # The following tests fail because they count indexes. These counts do not take into account the - # additional hashed shard key indexes that are automatically added by this passthrough. - - jstests/core/**/apitest_dbcollection.js - - jstests/core/**/bad_index_plugin.js - - jstests/core/**/create_indexes.js - - jstests/core/**/list_indexes_non_existent_ns.js - - jstests/core/**/mr_preserve_indexes.js - # The following tests fail because they expect no databases to be created. However a DB is created - # automatically when we shard a collection. - - jstests/core/**/dbcase.js - - jstests/core/**/dbcase2.js - - jstests/core/**/no_db_created.js - - jstests/core/**/killop_drop_collection.js # Uses fsyncLock. - # These tests fail because sharded clusters do not clean up correctly after failed index builds. - # See SERVER-33207 as an example. - - jstests/core/**/geo_borders.js - # These tests expect the logical session cache refresh thread to be turned off, so that refreshes - # can be triggered deterministically. - - jstests/core/**/list_all_local_sessions.js - - jstests/core/**/list_all_sessions.js - - jstests/core/**/list_sessions.js - # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded - # queries with a limit or for distinct commands. - - jstests/core/**/distinct_index1.js - - jstests/core/**/explain1.js - - jstests/core/**/explain4.js - - jstests/core/**/sortk.js - # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is - # incorrect on sharded collections. - - jstests/core/**/explain_count.js - - jstests/core/**/explain_server_params.js - # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output. - - jstests/core/**/expr_index_use.js - - jstests/core/**/index_multikey.js - - jstests/core/**/optimized_match_explain.js - - jstests/core/**/sort_array.js - - exclude_with_any_tags: - - assumes_standalone_mongod - - assumes_against_mongod_not_mongos - # Tests tagged with the following will fail because they assume collections are not sharded. - - assumes_no_implicit_collection_creation_after_drop - - assumes_no_implicit_index_creation - - assumes_unsharded_collection - - cannot_create_unique_index_when_using_hashed_shard_key - # system.profile collection doesn't exist on mongos. - - requires_profiling - -executor: - archive: - hooks: - - CheckReplDBHash - - CheckMetadataConsistencyInBackground - - ValidateCollections - config: - shell_options: - eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js") - hooks: - - class: CheckReplDBHash - - class: CheckMetadataConsistencyInBackground - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: ShardedClusterFixture - num_shards: 2 - enable_balancer: false - mongos_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 1000 - mongod_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 1000 diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_default_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_default_refresh_jscore_passthrough.yml deleted file mode 100644 index fbc8e2736e30f..0000000000000 --- a/buildscripts/resmokeconfig/suites/logical_session_cache_sharding_default_refresh_jscore_passthrough.yml +++ /dev/null @@ -1,114 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - - jstests/fle2/**/*.js - - src/mongo/db/modules/*/jstests/fle2/**/*.js - exclude_files: - # These tests are run in logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough.yml. - - jstests/core/txns/**/*.js - # The following tests fail because a certain command or functionality is not supported by - # mongos. This command or functionality is placed in a comment next to the failing test. - - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine. - - jstests/core/**/awaitdata_getmore_cmd.js # capped collections. - - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted - - jstests/core/**/check_shard_index.js # checkShardingIndex. - - jstests/core/**/collection_truncate.js # emptycapped. - - jstests/core/**/compact_keeps_indexes.js # compact. - - jstests/core/**/currentop.js # uses fsync. - - jstests/core/**/dbhash.js # dbhash. - - jstests/core/**/dbhash2.js # dbhash. - - jstests/core/**/fsync.js # uses fsync. - - jstests/core/**/geo_s2cursorlimitskip.js # profiling. - - jstests/core/**/geo_update_btree2.js # notablescan. - - jstests/core/**/index9.js # "local" database. - - jstests/core/**/queryoptimizera.js # "local" database. - - jstests/core/**/stages*.js # stageDebug. - - jstests/core/**/startup_log.js # "local" database. - - jstests/core/**/tailable_cursor_invalidation.js # capped collections. - - jstests/core/**/tailable_getmore_batch_size.js # capped collections. - - jstests/core/**/tailable_skip_limit.js # capped collections. - - jstests/core/**/top.js # top. - # The following tests fail because mongos behaves differently from mongod when testing certain - # functionality. The differences are in a comment next to the failing test. - - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047. - - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain(). - - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain(). - - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate(). - - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880. - # The following tests fail because they count indexes. These counts do not take into account the - # additional hashed shard key indexes that are automatically added by this passthrough. - - jstests/core/**/apitest_dbcollection.js - - jstests/core/**/bad_index_plugin.js - - jstests/core/**/create_indexes.js - - jstests/core/**/list_indexes_non_existent_ns.js - - jstests/core/**/mr_preserve_indexes.js - # The following tests fail because they expect no databases to be created. However a DB is created - # automatically when we shard a collection. - - jstests/core/**/dbcase.js - - jstests/core/**/dbcase2.js - - jstests/core/**/no_db_created.js - - jstests/core/**/killop_drop_collection.js # Uses fsyncLock. - # These tests fail because sharded clusters do not clean up correctly after failed index builds. - # See SERVER-33207 as an example. - - jstests/core/**/geo_borders.js - # These tests expect the logical session cache refresh thread to be turned off, so that refreshes - # can be triggered deterministically. - - jstests/core/**/list_all_local_sessions.js - - jstests/core/**/list_all_sessions.js - - jstests/core/**/list_sessions.js - # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded - # queries with a limit or for distinct commands. - - jstests/core/**/distinct_index1.js - - jstests/core/**/explain1.js - - jstests/core/**/explain4.js - - jstests/core/**/sortk.js - # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is - # incorrect on sharded collections. - - jstests/core/**/explain_count.js - - jstests/core/**/explain_server_params.js - # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output. - - jstests/core/**/expr_index_use.js - - jstests/core/**/index_multikey.js - - jstests/core/**/optimized_match_explain.js - - jstests/core/**/sort_array.js - - exclude_with_any_tags: - - assumes_standalone_mongod - - assumes_against_mongod_not_mongos - # Tests tagged with the following will fail because they assume collections are not sharded. - - assumes_no_implicit_collection_creation_after_drop - - assumes_no_implicit_index_creation - - assumes_unsharded_collection - - cannot_create_unique_index_when_using_hashed_shard_key - # system.profile collection doesn't exist on mongos. - - requires_profiling - -executor: - archive: - hooks: - - CheckReplDBHash - - CheckMetadataConsistencyInBackground - - ValidateCollections - config: - shell_options: - eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js") - hooks: - - class: CheckReplDBHash - - class: CheckMetadataConsistencyInBackground - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: ShardedClusterFixture - num_shards: 2 - enable_balancer: false - mongos_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - mongod_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_100ms_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_100ms_refresh_jscore_passthrough.yml deleted file mode 100644 index 2b61effb3a172..0000000000000 --- a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_100ms_refresh_jscore_passthrough.yml +++ /dev/null @@ -1,37 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - exclude_files: - # Transactions are not supported on MongoDB standalone nodes. - - jstests/core/txns/**/*.js - # This test expects a fixed number of operations. The logical session cache will perform its own - # operations, inflating the number of operations and causing the test to fail. - - jstests/core/**/opcounters_write_cmd.js - # These tests expect the logical session cache refresh thread to be turned off, so that refreshes - # can be triggered deterministically. - - jstests/core/**/list_all_local_sessions.js - - jstests/core/**/list_all_sessions.js - - jstests/core/**/list_sessions.js - # These tests verify that an expected number of update operations were tracked in the server - # status metrics, but the logical session cache refresh causes additional updates to be recorded. - - jstests/core/**/find_and_modify_metrics.js - - jstests/core/**/update_metrics.js - -executor: - archive: - hooks: - - ValidateCollections - config: {} - hooks: - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: MongoDFixture - mongod_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 100 diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_10sec_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_10sec_refresh_jscore_passthrough.yml deleted file mode 100644 index 4d3ef6b270192..0000000000000 --- a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_10sec_refresh_jscore_passthrough.yml +++ /dev/null @@ -1,37 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - exclude_files: - # Transactions are not supported on MongoDB standalone nodes. - - jstests/core/txns/**/*.js - # This test expects a fixed number of operations. The logical session cache will perform its own - # operations, inflating the number of operations and causing the test to fail. - - jstests/core/**/opcounters_write_cmd.js - # These tests expect the logical session cache refresh thread to be turned off, so that refreshes - # can be triggered deterministically. - - jstests/core/**/list_all_local_sessions.js - - jstests/core/**/list_all_sessions.js - - jstests/core/**/list_sessions.js - # These tests verify that an expected number of update operations were tracked in the server - # status metrics, but the logical session cache refresh causes additional updates to be recorded. - - jstests/core/**/find_and_modify_metrics.js - - jstests/core/**/update_metrics.js - -executor: - archive: - hooks: - - ValidateCollections - config: {} - hooks: - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: MongoDFixture - mongod_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 10000 diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_1sec_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_1sec_refresh_jscore_passthrough.yml deleted file mode 100644 index fa7b9c13daa97..0000000000000 --- a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_1sec_refresh_jscore_passthrough.yml +++ /dev/null @@ -1,37 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - exclude_files: - # Transactions are not supported on MongoDB standalone nodes. - - jstests/core/txns/**/*.js - # This test expects a fixed number of operations. The logical session cache will perform its own - # operations, inflating the number of operations and causing the test to fail. - - jstests/core/**/opcounters_write_cmd.js - # These tests expect the logical session cache refresh thread to be turned off, so that refreshes - # can be triggered deterministically. - - jstests/core/**/list_all_local_sessions.js - - jstests/core/**/list_all_sessions.js - - jstests/core/**/list_sessions.js - # These tests verify that an expected number of update operations were tracked in the server - # status metrics, but the logical session cache refresh causes additional updates to be recorded. - - jstests/core/**/find_and_modify_metrics.js - - jstests/core/**/update_metrics.js - -executor: - archive: - hooks: - - ValidateCollections - config: {} - hooks: - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: MongoDFixture - mongod_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false - logicalSessionRefreshMillis: 1000 diff --git a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_default_refresh_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_default_refresh_jscore_passthrough.yml deleted file mode 100644 index 5f91f3b17f3b4..0000000000000 --- a/buildscripts/resmokeconfig/suites/logical_session_cache_standalone_default_refresh_jscore_passthrough.yml +++ /dev/null @@ -1,36 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - exclude_files: - # Transactions are not supported on MongoDB standalone nodes. - - jstests/core/txns/**/*.js - # This test expects a fixed number of operations. The logical session cache will perform its own - # operations, inflating the number of operations and causing the test to fail. - - jstests/core/**/opcounters_write_cmd.js - # These tests expect the logical session cache refresh thread to be turned off, so that refreshes - # can be triggered deterministically. - - jstests/core/**/list_all_local_sessions.js - - jstests/core/**/list_all_sessions.js - - jstests/core/**/list_sessions.js - # These tests verify that an expected number of update operations were tracked in the server - # status metrics, but the logical session cache refresh causes additional updates to be recorded. - - jstests/core/**/find_and_modify_metrics.js - - jstests/core/**/update_metrics.js - -executor: - archive: - hooks: - - ValidateCollections - config: {} - hooks: - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: MongoDFixture - mongod_options: - set_parameters: - enableTestCommands: 1 - disableLogicalSessionCacheRefresh: false diff --git a/buildscripts/resmokeconfig/suites/multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough.yml index 83e2b67dc4ab7..3bdb9d440c66d 100644 --- a/buildscripts/resmokeconfig/suites/multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/multi_shard_local_read_write_multi_stmt_txn_jscore_passthrough.yml @@ -287,7 +287,7 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js'); load('jstests/libs/override_methods/enable_sessions.js'); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); @@ -313,6 +313,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_jscore_passthrough.yml index c8b08e5fe5ac8..77fa867b7bcd0 100644 --- a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_jscore_passthrough.yml @@ -304,7 +304,7 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js'); load('jstests/libs/override_methods/enable_sessions.js'); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); @@ -330,6 +330,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough.yml index 407bb4c391365..42fccf797d7c9 100644 --- a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_kill_primary_jscore_passthrough.yml @@ -326,8 +326,6 @@ selector: - requires_dbstats # "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..." - requires_collstats - # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..." - - requires_datasize # "Cowardly fail if startParallelShell is run with a mongod that had an unclean shutdown: ..." - uses_parallel_shell # system.profile collection doesn't exist on mongos. Also, transactions are not allowed to operate @@ -358,9 +356,9 @@ executor: # shutdown). Workaround by relying on the requires_fastcount/dbstats/collstats/datasize tags # to denylist tests that uses them unsafely. eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); @@ -395,6 +393,7 @@ executor: - class: CheckReplOplogs - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground + - class: CheckOrphansDeleted - class: ValidateCollections shell_options: global_vars: diff --git a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough.yml index 8e9f5447f228c..f1894ffc00098 100644 --- a/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/multi_shard_multi_stmt_txn_stepdown_primary_jscore_passthrough.yml @@ -326,8 +326,6 @@ selector: - requires_dbstats # "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..." - requires_collstats - # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..." - - requires_datasize # "Cowardly fail if startParallelShell is run with a mongod that had an unclean shutdown: ..." - uses_parallel_shell # system.profile collection doesn't exist on mongos. @@ -356,9 +354,9 @@ executor: # shutdown). Workaround by relying on the requires_fastcount/dbstats/collstats/datasize tags # to denylist tests that uses them unsafely. eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); @@ -393,6 +391,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/multi_stmt_txn_jscore_passthrough_with_migration.yml b/buildscripts/resmokeconfig/suites/multi_stmt_txn_jscore_passthrough_with_migration.yml index 1dbf0f1f49710..dfb40cab4a249 100644 --- a/buildscripts/resmokeconfig/suites/multi_stmt_txn_jscore_passthrough_with_migration.yml +++ b/buildscripts/resmokeconfig/suites/multi_stmt_txn_jscore_passthrough_with_migration.yml @@ -315,7 +315,7 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js'); ImplicitlyShardAccessCollSettings.setMode(ImplicitlyShardAccessCollSettings.Modes.kHashedMoveToSingleShard); load('jstests/libs/override_methods/enable_sessions.js'); @@ -337,6 +337,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: DropShardedCollections - class: CleanEveryN n: 20 diff --git a/buildscripts/resmokeconfig/suites/multiversion.yml b/buildscripts/resmokeconfig/suites/multiversion.yml index aa7939823f52d..b7d4e95b7516c 100644 --- a/buildscripts/resmokeconfig/suites/multiversion.yml +++ b/buildscripts/resmokeconfig/suites/multiversion.yml @@ -4,13 +4,11 @@ selector: roots: - jstests/multiVersion/**/*.js - src/mongo/db/modules/*/jstests/hot_backups/multiVersion/*.js + - src/mongo/db/modules/*/jstests/audit/multiVersion/*.js exclude_files: # Do not execute files with helper functions. - jstests/multiVersion/libs/*.js - # TODO: SERVER-21578 - - jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js - # TODO: SERVER-28104 - jstests/multiVersion/genericBinVersion/minor_version_tags_new_old_new.js diff --git a/buildscripts/resmokeconfig/suites/multiversion_auth.yml b/buildscripts/resmokeconfig/suites/multiversion_auth.yml index aacf5eb8d943c..6d47fbe873d52 100644 --- a/buildscripts/resmokeconfig/suites/multiversion_auth.yml +++ b/buildscripts/resmokeconfig/suites/multiversion_auth.yml @@ -12,9 +12,6 @@ selector: # Do not execute files with helper functions. - jstests/multiVersion/libs/*.js - # TODO: SERVER-21578 - - jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js - # TODO: SERVER-28104 - jstests/multiVersion/genericBinVersion/minor_version_tags_new_old_new.js diff --git a/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_dollar_tenant_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_dollar_tenant_jscore_passthrough.yml index 13cc52a24cbbd..ccbc43594328d 100644 --- a/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_dollar_tenant_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_dollar_tenant_jscore_passthrough.yml @@ -74,17 +74,15 @@ selector: # These tests create a new thread, so $tenant won't be properly injected. - jstests/core/txns/transactions_block_ddl.js - jstests/core/txns/write_conflicts_with_non_txns.js + - jstests/core/txns/kill_op_on_txn_expiry.js # TODO SERVER-72357: cannot get the expected error due to an authorization contract issue. - jstests/core/txns/multi_statement_transaction_command_args.js - # TODO SERVER-72187: bulkWrite command does not support Tenant ID command - - jstests/core/write/bulk/bulk_write_insert_cursor.js - - jstests/core/write/bulk/bulk_write_update_cursor.js - # TODO SERVER-73023 The tenantId is not attached to the namespace provided to failcommand - # failpoint - - jstests/core/failcommand_failpoint.js # This test looks for the presence of a log line that contains a db name. Injecting a tenantId in # the requests causes the test to fails due to a mismatch. - jstests/core/api//apitest_db_profile_level.js + # Queryable encryption test requires an internal connection for the keyvault that does not + # inject a $tenant. + - jstests/core/queryable_encryption/**/*.js executor: archive: @@ -99,7 +97,7 @@ executor: <<: *authOptions eval: | jsTest.authenticate(db.getMongo()); - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/inject_dollar_tenant.js'); global_vars: TestData: &TestData @@ -117,19 +115,19 @@ executor: shell_options: global_vars: TestData: *TestData - eval: jsTest.authenticate(db.getMongo()) + eval: jsTest.authenticate(db.getMongo()) <<: *authOptions - class: CheckReplDBHash shell_options: global_vars: TestData: *TestData - eval: jsTest.authenticate(db.getMongo()) + eval: jsTest.authenticate(db.getMongo()) <<: *authOptions - class: ValidateCollections shell_options: global_vars: TestData: *TestData - eval: jsTest.authenticate(db.getMongo()) + eval: jsTest.authenticate(db.getMongo()) <<: *authOptions - class: CleanEveryN n: 20 diff --git a/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_security_token_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_security_token_jscore_passthrough.yml index fa88a3527d5e8..aca599e2ccf48 100644 --- a/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_security_token_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/native_tenant_data_isolation_with_security_token_jscore_passthrough.yml @@ -66,6 +66,9 @@ selector: - jstests/core/**/list_catalog.js # This test uses '_hashBSONElement' command that cannot be run with security token. - jstests/core/**/index_key_expression.js + # Queryable encryption test performs implicit encryption which issues commands that don't + # include the security token. + - jstests/core/queryable_encryption/**/*.js executor: archive: @@ -77,7 +80,7 @@ executor: config: shell_options: eval: | - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/inject_security_token.js'); hooks: # The CheckReplDBHash hook waits until all operations have replicated to and have been applied @@ -96,7 +99,7 @@ executor: enableTestCommands: 1 multitenancySupport: true featureFlagSecurityToken: true - # TODO SERVER-74284: remove featureFlagRequireTenantID from the parameters and have the + # TODO SERVER-78300: remove featureFlagRequireTenantID from the parameters and have the # inject_security_token override to be able to test both tenant-prefixed request and non-tenant-prefixed request. # Currently, we only test non-tenant-prefixed request and enable the featureFlagRequireTenantID # to have mongod return non-tenant-prefixed response too. diff --git a/buildscripts/resmokeconfig/suites/pretty-printer-tests.yml b/buildscripts/resmokeconfig/suites/pretty-printer-tests.yml new file mode 100644 index 0000000000000..fa101e6cfcc9d --- /dev/null +++ b/buildscripts/resmokeconfig/suites/pretty-printer-tests.yml @@ -0,0 +1,7 @@ +test_kind: pretty_printer_test + +selector: + root: build/pretty_printer_tests.txt + +executor: + config: {} diff --git a/buildscripts/resmokeconfig/suites/query_golden_classic.yml b/buildscripts/resmokeconfig/suites/query_golden_classic.yml index 44915921403df..a3c72c852c03a 100644 --- a/buildscripts/resmokeconfig/suites/query_golden_classic.yml +++ b/buildscripts/resmokeconfig/suites/query_golden_classic.yml @@ -18,7 +18,7 @@ executor: eval: | // Keep in sync with query_golden_cqf.yml. load("jstests/libs/override_methods/detect_spawning_own_mongod.js"); - load("jstests/libs/golden_test.js"); + await import("jstests/libs/override_methods/golden_overrides.js"); _openGoldenData(jsTestName(), {relativePath: "jstests/query_golden/expected_output"}); hooks: - class: ValidateCollections diff --git a/buildscripts/resmokeconfig/suites/query_golden_cqf.yml b/buildscripts/resmokeconfig/suites/query_golden_cqf.yml index 578f720f3da36..3418d0d70759b 100644 --- a/buildscripts/resmokeconfig/suites/query_golden_cqf.yml +++ b/buildscripts/resmokeconfig/suites/query_golden_cqf.yml @@ -17,7 +17,7 @@ executor: // Keep in sync with query_golden_classic.yml. load("jstests/libs/override_methods/detect_spawning_own_mongod.js"); load("jstests/libs/set_force_bonsai.js"); - load("jstests/libs/golden_test.js"); + await import("jstests/libs/override_methods/golden_overrides.js"); _openGoldenData(jsTestName(), {relativePath: "jstests/query_golden/expected_output"}); hooks: - class: ValidateCollections @@ -34,4 +34,5 @@ executor: set_parameters: enableTestCommands: 1 featureFlagCommonQueryFramework: true + internalQueryCardinalityEstimatorMode: "sampling" internalQueryFrameworkControl: "forceBonsai" diff --git a/buildscripts/resmokeconfig/suites/query_stats_passthrough.yml b/buildscripts/resmokeconfig/suites/query_stats_passthrough.yml new file mode 100644 index 0000000000000..088a46966e450 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/query_stats_passthrough.yml @@ -0,0 +1,41 @@ +test_kind: js_test +description: | + This suite enables the collection of query stats metrics on a mongod server, then runs the tests in + core and aggregation as normal. This should cause each query or aggregation to compute a query + shape and query stats key, and record in-memory some metrics like execution time and number of + scanned documents. Then it uses the 'RunQueryStats' hook to have a background thread ask to collect + the query stats every one second. It doesn't assert anything about the collected query stats, it is + just meant to make sure nothing is going seriously awry (e.g. crashing). + +selector: + roots: + - jstests/core/**/*.js + # - jstests/aggregation/**/*.js # TODO: SERVER-75596 enable aggregation tests in the full passthrough. + exclude_files: + # Transactions are not supported on MongoDB standalone nodes, so we do not run these tests. + - jstests/core/txns/**/*.js + - jstests/core/views/invalid_system_views.js # TODO SERVER-78025 reenable coverage on this test + # Queryable encryption is not supported on standalone + - jstests/core/queryable_encryption/**/*.js + exclude_with_any_tags: + # Running $queryStats will increment these counters which can screw up some test assertions. + - inspects_command_opcounters + +executor: + archive: + hooks: + - ValidateCollections + hooks: + # Be sure to run the hooks which depend on the fixture being alive before the CleanEveryN hook. + # That way the fixture restart can't cause any trouble for the other hooks. + - class: RunQueryStats + - class: ValidateCollections + - class: CleanEveryN + n: 20 + fixture: + class: MongoDFixture + mongod_options: + set_parameters: + enableTestCommands: 1 + internalQueryStatsRateLimit: -1 + internalQueryStatsErrorsAreCommandFatal: true diff --git a/buildscripts/resmokeconfig/suites/query_stats_passthrough_writeonly.yml b/buildscripts/resmokeconfig/suites/query_stats_passthrough_writeonly.yml new file mode 100644 index 0000000000000..331c26145f612 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/query_stats_passthrough_writeonly.yml @@ -0,0 +1,28 @@ +# TODO: SERVER-75596 delete this suite and run aggregation through RunQueryStats hook as well. +test_kind: js_test +description: | + This suite enables the collection of query stats metrics on a mongod server, then runs the tests in + aggregation as normal. This should cause each query or aggregation to compute a query + shape and query stats key, and record in-memory some metrics like execution time and number of + scanned documents. + +selector: + roots: + - jstests/aggregation/**/*.js + +executor: + archive: + hooks: + - ValidateCollections + hooks: + # Be sure to run the hooks which depend on the fixture being alive before the CleanEveryN hook. + # That way the fixture restart can't cause any trouble for the other hooks. + - class: ValidateCollections + - class: CleanEveryN + n: 20 + fixture: + class: MongoDFixture + mongod_options: + set_parameters: + enableTestCommands: 1 + internalQueryStatsRateLimit: -1 diff --git a/buildscripts/resmokeconfig/suites/read_concern_linearizable_passthrough.yml b/buildscripts/resmokeconfig/suites/read_concern_linearizable_passthrough.yml index c84deae1fa223..e0fd4bc2b3b8c 100644 --- a/buildscripts/resmokeconfig/suites/read_concern_linearizable_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/read_concern_linearizable_passthrough.yml @@ -41,7 +41,7 @@ executor: TestData: defaultReadConcernLevel: linearizable eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); hooks: # The CheckReplDBHash hook waits until all operations have replicated to and have been applied diff --git a/buildscripts/resmokeconfig/suites/read_concern_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/read_concern_majority_passthrough.yml index 78cd645be67d0..a7a95d5b8a998 100644 --- a/buildscripts/resmokeconfig/suites/read_concern_majority_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/read_concern_majority_passthrough.yml @@ -36,7 +36,7 @@ executor: TestData: defaultReadConcernLevel: majority eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); hooks: # The CheckReplDBHash hook waits until all operations have replicated to and have been applied diff --git a/buildscripts/resmokeconfig/suites/replica_sets_api_version_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_api_version_jscore_passthrough.yml index 3809405d3f781..d81a69346e9df 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_api_version_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_api_version_jscore_passthrough.yml @@ -26,7 +26,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_api_version.js'); hooks: # The CheckReplDBHash hook waits until all operations have replicated to and have been applied diff --git a/buildscripts/resmokeconfig/suites/replica_sets_fcbis_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_fcbis_jscore_passthrough.yml index 51be09c6f421b..4025689dad977 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_fcbis_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_fcbis_jscore_passthrough.yml @@ -48,7 +48,7 @@ executor: - BackgroundInitialSync config: shell_options: - eval: "testingReplication = true;" + eval: "globalThis.testingReplication = true;" hooks: - class: BackgroundInitialSync n: *run_hook_interval diff --git a/buildscripts/resmokeconfig/suites/replica_sets_initsync_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_initsync_jscore_passthrough.yml index 9ae8f19f6f750..3fbf8730a3059 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_initsync_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_initsync_jscore_passthrough.yml @@ -47,7 +47,7 @@ executor: - BackgroundInitialSync config: shell_options: - eval: "testingReplication = true;" + eval: "globalThis.testingReplication = true;" hooks: - class: BackgroundInitialSync n: *run_hook_interval diff --git a/buildscripts/resmokeconfig/suites/replica_sets_initsync_static_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_initsync_static_jscore_passthrough.yml index 35fd9456b932b..fee8c8cc34bbc 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_initsync_static_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_initsync_static_jscore_passthrough.yml @@ -21,7 +21,7 @@ executor: - IntermediateInitialSync config: shell_options: - eval: "testingReplication = true;" + eval: "globalThis.testingReplication = true;" hooks: - class: IntermediateInitialSync n: *run_hook_interval diff --git a/buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml index 9bcd88b16730f..5ffdf8251cdc3 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_jscore_passthrough.yml @@ -31,7 +31,7 @@ executor: - ValidateCollections config: shell_options: - eval: "testingReplication = true;" + eval: "globalThis.testingReplication = true;" hooks: # The CheckReplDBHash hook waits until all operations have replicated to and have been applied # on the secondaries, so we run the ValidateCollections hook after it to ensure we're diff --git a/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml index 6df62268b9f05..2a70d88d335be 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_kill_secondaries_jscore_passthrough.yml @@ -37,7 +37,7 @@ executor: - PeriodicKillSecondaries config: shell_options: - eval: "testingReplication = true;" + eval: "globalThis.testingReplication = true;" hooks: - class: PeriodicKillSecondaries fixture: diff --git a/buildscripts/resmokeconfig/suites/replica_sets_large_txns_format_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_large_txns_format_jscore_passthrough.yml index 9fb34b66fc61e..56aa1a77cb9c1 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_large_txns_format_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_large_txns_format_jscore_passthrough.yml @@ -23,7 +23,7 @@ executor: - ValidateCollections config: shell_options: - eval: "testingReplication = true;" + eval: "globalThis.testingReplication = true;" hooks: # The CheckReplDBHash hook waits until all operations have replicated to and have been applied # on the secondaries, so we run the ValidateCollections hook after it to ensure we're diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml index 47406b42bc388..ef35798901c43 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_jscore_passthrough.yml @@ -245,7 +245,7 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/enable_sessions.js'); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); load('jstests/libs/override_methods/network_error_and_txn_override.js'); diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough.yml index 53b8d8e3de771..0237313a7f2e3 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_kill_primary_jscore_passthrough.yml @@ -277,8 +277,6 @@ selector: - requires_dbstats # "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..." - requires_collstats - # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..." - - requires_datasize ## The next tag corresponds to long running-operations, as they may exhaust their number # of retries and result in a network error being thrown. - operations_longer_than_stepdown_interval @@ -311,9 +309,9 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_stepdown_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_stepdown_jscore_passthrough.yml index bb59e2d0b099c..b3820ce194300 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_stepdown_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_stepdown_jscore_passthrough.yml @@ -291,9 +291,9 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); global_vars: diff --git a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough.yml index 6233179e62715..c25433031f946 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_multi_stmt_txn_terminate_primary_jscore_passthrough.yml @@ -300,9 +300,9 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); diff --git a/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_passthrough.yml index 9baa473f3e46d..df3e2048a8cdf 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_passthrough.yml @@ -46,7 +46,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); load("jstests/libs/override_methods/enable_sessions.js"); global_vars: diff --git a/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_stepdown_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_stepdown_passthrough.yml index 8fd0180ba922b..58299ecb3b17b 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_stepdown_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_reconfig_jscore_stepdown_passthrough.yml @@ -116,7 +116,6 @@ selector: # collStats is not causally consistent - requires_collstats - requires_dbstats - - requires_datasize - requires_sharding # Operations in the main test shell aren't guaranteed to be causally consistent with operations # performed earlier in a parallel shell if multiple nodes are electable because the latest @@ -133,9 +132,9 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); global_vars: diff --git a/buildscripts/resmokeconfig/suites/replica_sets_terminate_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/replica_sets_terminate_primary_jscore_passthrough.yml index 3df5d47056154..e1ed7ace28304 100644 --- a/buildscripts/resmokeconfig/suites/replica_sets_terminate_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/replica_sets_terminate_primary_jscore_passthrough.yml @@ -106,9 +106,9 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_start_parallel_shell.js"); diff --git a/buildscripts/resmokeconfig/suites/retryable_writes_downgrade.yml b/buildscripts/resmokeconfig/suites/retryable_writes_downgrade.yml index 8097a74ffc360..2123fbdecd2ac 100644 --- a/buildscripts/resmokeconfig/suites/retryable_writes_downgrade.yml +++ b/buildscripts/resmokeconfig/suites/retryable_writes_downgrade.yml @@ -101,8 +101,6 @@ selector: - requires_dbstats # "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..." - requires_collstats - # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..." - - requires_datasize ## The next tag corresponds to long running-operations, as they may exhaust their number # of retries and result in a network error being thrown. - operations_longer_than_stepdown_interval @@ -125,9 +123,9 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js"); diff --git a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml index 35caa128159f8..711e4b0b28e28 100644 --- a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_passthrough.yml @@ -56,7 +56,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/retry_writes_at_least_once.js"); global_vars: diff --git a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml index d6b9a27e561a6..f941f415095bc 100644 --- a/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/retryable_writes_jscore_stepdown_passthrough.yml @@ -105,9 +105,9 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); global_vars: diff --git a/buildscripts/resmokeconfig/suites/secondary_reads_passthrough.yml b/buildscripts/resmokeconfig/suites/secondary_reads_passthrough.yml index 03553d7dfd1f7..dea2b7851fcb6 100644 --- a/buildscripts/resmokeconfig/suites/secondary_reads_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/secondary_reads_passthrough.yml @@ -57,7 +57,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_preference_secondary.js'); load('jstests/libs/override_methods/enable_causal_consistency.js'); hooks: diff --git a/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml index f9d0fa83f26eb..908cc2be5d759 100644 --- a/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/session_jscore_passthrough.yml @@ -7,6 +7,9 @@ selector: # Transactions are not supported on MongoDB standalone nodes. - jstests/core/txns/**/*.js + # Queryable encryption is not supported on standalone + - jstests/core/queryable_encryption/**/*.js + # These test run commands using legacy queries, which are not supported on sessions. - jstests/core/**/comment_field.js - jstests/core/**/exhaust.js diff --git a/buildscripts/resmokeconfig/suites/shard_merge_causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_merge_causally_consistent_jscore_passthrough.yml index cdcd52a7d6e9d..5dd408cac638e 100644 --- a/buildscripts/resmokeconfig/suites/shard_merge_causally_consistent_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/shard_merge_causally_consistent_jscore_passthrough.yml @@ -264,7 +264,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/inject_tenant_prefix.js'); load('jstests/libs/override_methods/enable_sessions.js'); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); @@ -331,9 +331,6 @@ executor: # so. Therefore, the garbage collection delay doesn't need to be large. tenantMigrationGarbageCollectionDelayMS: 1 ttlMonitorSleepSecs: 1 - # Tenant migrations is not currently compatible with implicitly replicated retryable - # findAndModify images. - storeFindAndModifyImagesInSideCollection: false minSnapshotHistoryWindowInSeconds: 30 tlsMode: allowTLS tlsCAFile: jstests/libs/ca.pem diff --git a/buildscripts/resmokeconfig/suites/shard_merge_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_merge_jscore_passthrough.yml index 9153cafd46432..f810b70efba24 100644 --- a/buildscripts/resmokeconfig/suites/shard_merge_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/shard_merge_jscore_passthrough.yml @@ -53,7 +53,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/inject_tenant_prefix.js'); jsTest.authenticate(db.getMongo()); global_vars: @@ -111,9 +111,6 @@ executor: # so. Therefore, the garbage collection delay doesn't need to be large. tenantMigrationGarbageCollectionDelayMS: 1 ttlMonitorSleepSecs: 1 - # Tenant migrations is not currently compatible with implicitly replicated retryable - # findAndModify images. - storeFindAndModifyImagesInSideCollection: false minSnapshotHistoryWindowInSeconds: 30 tlsMode: allowTLS tlsCAFile: jstests/libs/ca.pem diff --git a/buildscripts/resmokeconfig/suites/shard_merge_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_merge_multi_stmt_txn_jscore_passthrough.yml index 3e828cd582960..4e3b5ce567233 100644 --- a/buildscripts/resmokeconfig/suites/shard_merge_multi_stmt_txn_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/shard_merge_multi_stmt_txn_jscore_passthrough.yml @@ -264,7 +264,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/inject_tenant_prefix.js'); load('jstests/libs/override_methods/enable_sessions.js'); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); @@ -328,9 +328,6 @@ executor: # so. Therefore, the garbage collection delay doesn't need to be large. tenantMigrationGarbageCollectionDelayMS: 1 ttlMonitorSleepSecs: 1 - # Tenant migrations is not currently compatible with implicitly replicated retryable - # findAndModify images. - storeFindAndModifyImagesInSideCollection: false minSnapshotHistoryWindowInSeconds: 30 tlsMode: allowTLS tlsCAFile: jstests/libs/ca.pem diff --git a/buildscripts/resmokeconfig/suites/shard_split_causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_split_causally_consistent_jscore_passthrough.yml index 6b5af0d852954..fc4e960736d47 100644 --- a/buildscripts/resmokeconfig/suites/shard_split_causally_consistent_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/shard_split_causally_consistent_jscore_passthrough.yml @@ -119,7 +119,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/inject_tenant_prefix.js'); load('jstests/libs/override_methods/enable_causal_consistency.js'); load('jstests/libs/override_methods/enable_sessions.js'); diff --git a/buildscripts/resmokeconfig/suites/shard_split_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_split_jscore_passthrough.yml index bbf4f369ee437..20aaa9f37ec7b 100644 --- a/buildscripts/resmokeconfig/suites/shard_split_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/shard_split_jscore_passthrough.yml @@ -55,7 +55,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/inject_tenant_prefix.js'); jsTest.authenticate(db.getMongo()); global_vars: diff --git a/buildscripts/resmokeconfig/suites/shard_split_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_split_kill_primary_jscore_passthrough.yml index b25bda49b404d..e528effec0186 100644 --- a/buildscripts/resmokeconfig/suites/shard_split_kill_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/shard_split_kill_primary_jscore_passthrough.yml @@ -97,8 +97,6 @@ selector: - requires_dbstats # "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..." - requires_collstats - # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..." - - requires_datasize # Due to background shard splits, operations in the main test shell are not guaranteed to # be causally consistent with operations in a parallel shell. The reason is that # TenantMigrationCommitted error is only thrown when the client does a write or a atClusterTime/ @@ -135,9 +133,9 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load('jstests/libs/override_methods/inject_tenant_prefix.js'); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); @@ -180,7 +178,7 @@ executor: - class: ContinuousShardSplit shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); global_vars: diff --git a/buildscripts/resmokeconfig/suites/shard_split_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_split_multi_stmt_txn_jscore_passthrough.yml index f6a94169ea9f4..5bc0bc14f88cc 100644 --- a/buildscripts/resmokeconfig/suites/shard_split_multi_stmt_txn_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/shard_split_multi_stmt_txn_jscore_passthrough.yml @@ -259,7 +259,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/inject_tenant_prefix.js'); load('jstests/libs/override_methods/enable_sessions.js'); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); diff --git a/buildscripts/resmokeconfig/suites/shard_split_stepdown_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_split_stepdown_jscore_passthrough.yml index eef468bda3e48..0b33f27d51474 100644 --- a/buildscripts/resmokeconfig/suites/shard_split_stepdown_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/shard_split_stepdown_jscore_passthrough.yml @@ -127,9 +127,9 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load('jstests/libs/override_methods/inject_tenant_prefix.js'); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); @@ -169,7 +169,7 @@ executor: - class: ContinuousShardSplit shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); global_vars: diff --git a/buildscripts/resmokeconfig/suites/shard_split_terminate_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/shard_split_terminate_primary_jscore_passthrough.yml index 8b4e5132be92e..f93aa05bf2298 100644 --- a/buildscripts/resmokeconfig/suites/shard_split_terminate_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/shard_split_terminate_primary_jscore_passthrough.yml @@ -127,9 +127,9 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load('jstests/libs/override_methods/inject_tenant_prefix.js'); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); @@ -170,7 +170,7 @@ executor: - class: ContinuousShardSplit shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); global_vars: diff --git a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml index ddcbd2c22bfcb..54b941b97392f 100644 --- a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_passthrough.yml @@ -91,6 +91,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_txns_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_txns_passthrough.yml index ba6868bdc0753..67dea09960b86 100644 --- a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_txns_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_jscore_txns_passthrough.yml @@ -41,12 +41,13 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); hooks: # We don't execute dbHash or oplog consistency checks since there is only a single replica set # node. - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_read_concern_snapshot_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_read_concern_snapshot_passthrough.yml index fb183730ac584..233d8e6a2a6eb 100644 --- a/buildscripts/resmokeconfig/suites/sharded_causally_consistent_read_concern_snapshot_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/sharded_causally_consistent_read_concern_snapshot_passthrough.yml @@ -93,13 +93,14 @@ executor: defaultReadConcernLevel: snapshot disallowSnapshotDistinct: true eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load("jstests/libs/override_methods/enable_causal_consistency.js"); load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js"); hooks: - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_causally_consistent_jscore_txns_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_causally_consistent_jscore_txns_passthrough.yml index 0d90f77f3e9fd..cf1fa432b3e66 100644 --- a/buildscripts/resmokeconfig/suites/sharded_collections_causally_consistent_jscore_txns_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/sharded_collections_causally_consistent_jscore_txns_passthrough.yml @@ -55,13 +55,14 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/enable_causal_consistency_without_read_pref.js'); load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js'); hooks: # We don't execute dbHash or oplog consistency checks since there is only a single replica set # node. - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml index a3bfb2adea7bb..c19a1bb33f8b0 100644 --- a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough.yml @@ -94,6 +94,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_catalog_shard.yml b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_catalog_shard.yml deleted file mode 100644 index 11c8f5e1a83ef..0000000000000 --- a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_catalog_shard.yml +++ /dev/null @@ -1,109 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - - jstests/fle2/**/*.js - - src/mongo/db/modules/*/jstests/fle2/**/*.js - exclude_files: - # These tests run in the jscore_txn passthrough suites. - - jstests/core/txns/**/*.js - - # The following tests fail because a certain command or functionality is not supported by - # mongos. This command or functionality is placed in a comment next to the failing test. - - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine. - - jstests/core/**/awaitdata_getmore_cmd.js # capped collections. - - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted - - jstests/core/**/check_shard_index.js # checkShardingIndex. - - jstests/core/**/collection_truncate.js # emptycapped. - - jstests/core/**/compact_keeps_indexes.js # compact. - - jstests/core/**/currentop.js # uses fsync. - - jstests/core/**/dbhash.js # dbhash. - - jstests/core/**/dbhash2.js # dbhash. - - jstests/core/**/fsync.js # uses fsync. - - jstests/core/**/geo_s2cursorlimitskip.js # profiling. - - jstests/core/**/geo_update_btree2.js # notablescan. - - jstests/core/**/index9.js # "local" database. - - jstests/core/**/queryoptimizera.js # "local" database. - - jstests/core/**/stages*.js # stageDebug. - - jstests/core/**/startup_log.js # "local" database. - - jstests/core/**/tailable_cursor_invalidation.js # capped collections. - - jstests/core/**/tailable_getmore_batch_size.js # capped collections. - - jstests/core/**/tailable_skip_limit.js # capped collections. - - jstests/core/**/top.js # top. - # The following tests fail because mongos behaves differently from mongod when testing certain - # functionality. The differences are in a comment next to the failing test. - - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047. - - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain(). - - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain(). - - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate(). - - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880. - # The following tests fail because they count indexes. These counts do not take into account the - # additional hashed shard key indexes that are automatically added by this passthrough. - - jstests/core/**/apitest_dbcollection.js - - jstests/core/**/bad_index_plugin.js - - jstests/core/**/create_indexes.js - - jstests/core/**/list_indexes_non_existent_ns.js - - jstests/core/**/mr_preserve_indexes.js - # The following tests fail because they expect no databases to be created. However a DB is created - # automatically when we shard a collection. - - jstests/core/**/dbcase.js - - jstests/core/**/dbcase2.js - - jstests/core/**/no_db_created.js - - jstests/core/**/killop_drop_collection.js # Uses fsyncLock. - # These tests fail because sharded clusters do not clean up correctly after failed index builds. - # See SERVER-33207 as an example. - - jstests/core/**/geo_borders.js - # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded - # queries with a limit or for distinct commands. - - jstests/core/**/distinct_index1.js - - jstests/core/**/explain1.js - - jstests/core/**/explain4.js - - jstests/core/**/sortk.js - # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is - # incorrect on sharded collections. - - jstests/core/**/explain_count.js - - jstests/core/**/explain_server_params.js - # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output. - - jstests/core/**/expr_index_use.js - - jstests/core/**/index_multikey.js - - jstests/core/**/optimized_match_explain.js - - jstests/core/**/sort_array.js - - exclude_with_any_tags: - - assumes_standalone_mongod - - assumes_against_mongod_not_mongos - # Tests tagged with the following will fail because they assume collections are not sharded. - - assumes_no_implicit_collection_creation_after_drop - - assumes_no_implicit_index_creation - - assumes_unsharded_collection - - cannot_create_unique_index_when_using_hashed_shard_key - # system.profile collection doesn't exist on mongos. - - requires_profiling - - catalog_shard_incompatible - - temporary_catalog_shard_incompatible - -executor: - archive: - hooks: - - CheckReplDBHash - - ValidateCollections - config: - shell_options: - eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js") - hooks: - - class: CheckReplDBHash - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: ShardedClusterFixture - catalog_shard: "any" - num_shards: 2 - enable_balancer: false - mongos_options: - set_parameters: - enableTestCommands: 1 - mongod_options: - set_parameters: - enableTestCommands: 1 diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_config_shard.yml b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_config_shard.yml new file mode 100644 index 0000000000000..4ff75632b9b45 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/sharded_collections_jscore_passthrough_with_config_shard.yml @@ -0,0 +1,109 @@ +test_kind: js_test + +selector: + roots: + - jstests/core/**/*.js + - jstests/fle2/**/*.js + - src/mongo/db/modules/*/jstests/fle2/**/*.js + exclude_files: + # These tests run in the jscore_txn passthrough suites. + - jstests/core/txns/**/*.js + + # The following tests fail because a certain command or functionality is not supported by + # mongos. This command or functionality is placed in a comment next to the failing test. + - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine. + - jstests/core/**/awaitdata_getmore_cmd.js # capped collections. + - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted + - jstests/core/**/check_shard_index.js # checkShardingIndex. + - jstests/core/**/collection_truncate.js # emptycapped. + - jstests/core/**/compact_keeps_indexes.js # compact. + - jstests/core/**/currentop.js # uses fsync. + - jstests/core/**/dbhash.js # dbhash. + - jstests/core/**/dbhash2.js # dbhash. + - jstests/core/**/fsync.js # uses fsync. + - jstests/core/**/geo_s2cursorlimitskip.js # profiling. + - jstests/core/**/geo_update_btree2.js # notablescan. + - jstests/core/**/index9.js # "local" database. + - jstests/core/**/queryoptimizera.js # "local" database. + - jstests/core/**/stages*.js # stageDebug. + - jstests/core/**/startup_log.js # "local" database. + - jstests/core/**/tailable_cursor_invalidation.js # capped collections. + - jstests/core/**/tailable_getmore_batch_size.js # capped collections. + - jstests/core/**/tailable_skip_limit.js # capped collections. + - jstests/core/**/top.js # top. + # The following tests fail because mongos behaves differently from mongod when testing certain + # functionality. The differences are in a comment next to the failing test. + - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047. + - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain(). + - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain(). + - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate(). + - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880. + # The following tests fail because they count indexes. These counts do not take into account the + # additional hashed shard key indexes that are automatically added by this passthrough. + - jstests/core/**/apitest_dbcollection.js + - jstests/core/**/bad_index_plugin.js + - jstests/core/**/create_indexes.js + - jstests/core/**/list_indexes_non_existent_ns.js + - jstests/core/**/mr_preserve_indexes.js + # The following tests fail because they expect no databases to be created. However a DB is created + # automatically when we shard a collection. + - jstests/core/**/dbcase.js + - jstests/core/**/dbcase2.js + - jstests/core/**/no_db_created.js + - jstests/core/**/killop_drop_collection.js # Uses fsyncLock. + # These tests fail because sharded clusters do not clean up correctly after failed index builds. + # See SERVER-33207 as an example. + - jstests/core/**/geo_borders.js + # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded + # queries with a limit or for distinct commands. + - jstests/core/**/distinct_index1.js + - jstests/core/**/explain1.js + - jstests/core/**/explain4.js + - jstests/core/**/sortk.js + # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is + # incorrect on sharded collections. + - jstests/core/**/explain_count.js + - jstests/core/**/explain_server_params.js + # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output. + - jstests/core/**/expr_index_use.js + - jstests/core/**/index_multikey.js + - jstests/core/**/optimized_match_explain.js + - jstests/core/**/sort_array.js + + exclude_with_any_tags: + - assumes_standalone_mongod + - assumes_against_mongod_not_mongos + # Tests tagged with the following will fail because they assume collections are not sharded. + - assumes_no_implicit_collection_creation_after_drop + - assumes_no_implicit_index_creation + - assumes_unsharded_collection + - cannot_create_unique_index_when_using_hashed_shard_key + # system.profile collection doesn't exist on mongos. + - requires_profiling + - config_shard_incompatible + +executor: + archive: + hooks: + - CheckReplDBHash + - ValidateCollections + config: + shell_options: + eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js") + hooks: + - class: CheckReplDBHash + - class: ValidateCollections + - class: CheckOrphansDeleted + - class: CleanEveryN + n: 20 + fixture: + class: ShardedClusterFixture + config_shard: "any" + num_shards: 2 + enable_balancer: false + mongos_options: + set_parameters: + enableTestCommands: 1 + mongod_options: + set_parameters: + enableTestCommands: 1 diff --git a/buildscripts/resmokeconfig/suites/sharded_collections_single_writes_without_shard_key_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_collections_single_writes_without_shard_key_jscore_passthrough.yml new file mode 100644 index 0000000000000..65a5f1d9f194d --- /dev/null +++ b/buildscripts/resmokeconfig/suites/sharded_collections_single_writes_without_shard_key_jscore_passthrough.yml @@ -0,0 +1,321 @@ +test_kind: js_test + +selector: + roots: + - jstests/core/**/*.js + exclude_files: + # The following tests fail because a certain command or functionality is not supported by mongos. + # This command or functionality is placed in a comment next to the failing test. + - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine. + - jstests/core/**/awaitdata_getmore_cmd.js # capped collections. + - jstests/core/**/bypass_doc_validation.js # sharded $out output not permitted + - jstests/core/**/check_shard_index.js # checkShardingIndex. + - jstests/core/**/collection_truncate.js # emptycapped. + - jstests/core/**/compact_keeps_indexes.js # compact. + - jstests/core/**/currentop.js # uses fsync. + - jstests/core/**/dbhash.js # dbhash. + - jstests/core/**/dbhash2.js # dbhash. + - jstests/core/**/fsync.js # uses fsync. + - jstests/core/**/geo_s2cursorlimitskip.js # profiling. + - jstests/core/**/geo_update_btree2.js # notablescan. + - jstests/core/**/index9.js # "local" database. + - jstests/core/**/queryoptimizera.js # "local" database. + - jstests/core/**/stages*.js # stageDebug. + - jstests/core/**/startup_log.js # "local" database. + - jstests/core/**/tailable_cursor_invalidation.js # capped collections. + - jstests/core/**/tailable_getmore_batch_size.js # capped collections. + - jstests/core/**/tailable_skip_limit.js # capped collections. + - jstests/core/**/top.js # top. + - jstests/core/**/collection_truncate.js # emptycapped. + - jstests/core/**/index_many.js # renameCollection. + - jstests/core/**/fts_index2.js # renameCollection. + - jstests/core/**/list_indexes_invalidation.js # renameCollection. + - jstests/core/**/long_index_rename.js # renameCollection. + # The following tests fail because mongos behaves differently from mongod when testing certain + # functionality. The differences are in a comment next to the failing test. + - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos, SERVER-18047. + - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain(). + - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain(). + - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate(). + - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880. + + # The following tests fail because they count indexes. These counts do not take into account the + # additional hashed shard key indexes that are automatically added by this passthrough. + - jstests/core/**/apitest_dbcollection.js + - jstests/core/**/bad_index_plugin.js + - jstests/core/**/create_indexes.js + - jstests/core/**/list_indexes_non_existent_ns.js + - jstests/core/**/mr_preserve_indexes.js + + # The following tests fail because they expect no databases to be created. However a DB is created + # automatically when we shard a collection. + - jstests/core/**/dbcase.js + - jstests/core/**/dbcase2.js + - jstests/core/**/no_db_created.js + - jstests/core/**/killop_drop_collection.js # Uses fsyncLock. + # These tests fail because sharded clusters do not clean up correctly after failed index builds. + # See SERVER-33207 as an example. + - jstests/core/**/geo_borders.js + + # TODO: Remove after fixing SERVER-29449. executionStats.nReturned is incorrect for sharded + # queries with a limit or for distinct commands. + - jstests/core/**/distinct_index1.js + - jstests/core/**/explain1.js + - jstests/core/**/explain4.js + - jstests/core/**/sortk.js + + # TODO: Remove after fixing SERVER-32563. The implementation of explain for the count command is + # incorrect on sharded collections. + - jstests/core/**/explain_count.js + - jstests/core/**/explain_server_params.js + + # TODO SERVER-32311: These tests use plan stage helpers which can't handle sharded explain output. + - jstests/core/**/expr_index_use.js + - jstests/core/**/index_multikey.js + - jstests/core/**/optimized_match_explain.js + - jstests/core/**/sort_array.js + + # These tests create secondary unique: true indexes without the shard key prefix. + - jstests/core/**/batch_write_command_update.js + - jstests/core/**/batch_write_command_w0.js + - jstests/core/**/bulk_api_unordered.js + - jstests/core/**/bulk_api_ordered.js + - jstests/core/**/compound_index_max_fields.js + + # Test assertions expect a certain document is deleted whereas updateOnes, deleteOnes, and + # findAndModify without shard key can pick and modify any matching document. + - jstests/core/**/crud_api.js + + # Aggregation does not support $where. + - jstests/core/**/find_and_modify_concurrent_update.js + - jstests/core/**/find_and_modify_where.js + + # {multi: true} upsert requires specifying the full shard key. + - jstests/core/**/update_multi_halts.js + - jstests/core/**/updatei.js + - jstests/core/**/server1470.js + + # {multi: true} update testing behavior not applicable to sharded clusters + - jstests/core/**/updatej.js + - jstests/core/**/write_result.js + + # Expects to validate that findandmodify alias is not valid in the stable api, mongos + # implementation of findAndModify does not support that currently. + - jstests/core/**/api_version_find_and_modify.js + + # Capped collections cannot be sharded. + - jstests/core/**/capped*.js + - jstests/core/**/rename_collection_capped.js + + # Queries on a sharded collection are not able to be covered when they aren't on the shard key + # since the document needs to be fetched in order to apply the SHARDING_FILTER stage. + - jstests/core/**/coveredIndex1.js + - jstests/core/**/coveredIndex2.js + - jstests/core/**/covered_index_compound_1.js + - jstests/core/**/covered_index_simple_1.js + - jstests/core/**/covered_index_simple_2.js + - jstests/core/**/covered_index_simple_3.js + - jstests/core/**/covered_index_sort_1.js + - jstests/core/**/covered_index_sort_3.js + - jstests/core/**/covered_index_sort_no_fetch_optimization.js + - jstests/core/**/covered_query_with_sort.js + - jstests/core/**/return_key.js + + # $near, $nearSphere are not supported in aggregate (which underlies the two phase write + # protocol). + - jstests/core/**/geo_update.js + - jstests/core/**/geo_update_dedup.js + + # These tests assert on query plans expected from unsharded collections. + - jstests/core/**/hashed_index_collation.js + - jstests/core/**/hashed_index_covered_queries.js + - jstests/core/**/hashed_index_sort.js + - jstests/core/**/index_bounds_code.js + - jstests/core/**/index_bounds_maxkey.js + - jstests/core/**/index_bounds_minkey.js + - jstests/core/**/index_check6.js + - jstests/core/**/index_decimal.js + - jstests/core/**/index_filter_commands_invalidate_plan_cache_entries.js + - jstests/core/**/wildcard_index_collation.js + - jstests/core/**/wildcard_index_count.js + - jstests/core/**/wildcard_index_covered_queries.js + - jstests/core/**/covered_multikey.js + - jstests/core/**/distinct_multikey_dotted_path.js + - jstests/core/**/distinct_with_hashed_index.js + - jstests/core/**/explain_multikey.js + - jstests/core/**/explain_plan_scores.js + - jstests/core/**/explain_shell_helpers.js + - jstests/core/**/explain_sort_type.js + - jstests/core/**/explain_winning_plan.js + - jstests/core/**/find_covered_projection.js + - jstests/core/**/or_to_in.js + - jstests/core/**/partial_index_logical.js + - jstests/core/**/cached_plan_trial_does_not_discard_work.js + - jstests/core/**/collation_plan_cache.js + - jstests/core/**/plan_cache*.js + - jstests/core/**/projection_dotted_paths.js + - jstests/core/**/regex6.js + - jstests/core/**/sbe_plan_cache_autoparameterize_ixscan.js + - jstests/core/**/index_bounds_object.js + - jstests/core/**/column_scan_skip_row_store_projection.js + - jstests/core/**/sbe_explain_rejected_plans.js + - jstests/core/**/sbe_plan_cache_autoparameterize_collscan.js + - jstests/core/**/sparse_index_supports_ne_null.js + - jstests/core/**/update_hint.js + - jstests/core/**/delete_hint.js + - jstests/core/**/find_and_modify_hint.js + - jstests/core/**/index_stats.js + - jstests/core/**/index_partial_read_ops.js + - jstests/core/**/explain_upsert.js + - jstests/core/**/explain_multi_plan.js + + # Test not applicable for sharded collections. + - jstests/core/**/add_skip_stage_before_fetch.js + + # Aggregation pipeline does not support the use of sharded collections as the output collection. + - jstests/core/**/explain_agg_write_concern.js + + # Can't shard collection with invalid dbName. + - jstests/core/**/invalid_db_name.js + + # Cannot output to a non-sharded collection because sharded collection exists already. + - jstests/core/**/mr_bigobject_replace.js + - jstests/core/**/mr_merge.js + - jstests/core/**/mr_reduce.js + + # Cannot implicitly shard accessed collections because mapReduce cannot replace a sharded + # collection as output. + - jstests/core/**/mr_compute_avg.js + - jstests/core/**/mr_replace_into_other_db.js + + # Cannot implicitly shard accessed collections because the "limit" option to the "mapReduce" + # command cannot be used on a sharded collection. + - jstests/core/**/mr_sort.js + + # This test expects a function stored in the system.js collection to be available for an operation + # which may not be the case if it is implicitly sharded in a passthrough. + - jstests/core/**/mr_stored.js + - jstests/core/**/where_system_js.js + - jstests/core/**/system_js_access.js + - jstests/core/**/system_js_drop.js + + # Test expects failure, but two phase write protocol exits early with OK status if there are no + # matching documents. + - jstests/core/**/rename_operator.js + - jstests/core/**/field_name_validation.js + + # Operation is not supported on a view. + - jstests/core/views/**/*.js + + # Operation not supported in a transaction. + - jstests/core/**/create_collection_not_blocked_by_txn.js + - jstests/core/**/drop_collection_not_blocked_by_txn.js + - jstests/core/**/indexing_not_blocked_by_txn.js + - jstests/core/**/listcollections_autocomplete.js + - jstests/core/**/rename_collection_not_blocked_by_txn.js + + # $natural not supported in $sort for aggregation pipelines. + - jstests/core/**/natural_validation.js + + # Test expects no index to be created, but shardCollection implicitly creates one. + - jstests/core/**/timeseries_id_range.js + + # Test relies on keeping the test collection unsharded. + - jstests/core/**/command_let_variables_merge_only.js + - jstests/core/**/illegal_cmd_namespace.js + + # Cannot implicitly shard accessed collections because the error response from the shard about + # using the empty string as the out collection name is converted to an error and no longer retains + # the "code" property. + - jstests/core/**/commands_namespace_parsing.js + + # Cannot implicitly shard accessed collections because the "dataSize" command returns an + # "keyPattern must equal shard key" error response. + - jstests/core/**/datasize2.js + - jstests/core/**/datasize_validation.js + + # Cannot implicitly shard accessed collections because of following error: GridFS fs.chunks + # collection must be sharded on either {files_id:1} or {files_id:1, n:1} + - jstests/core/**/filemd5.js + + # This test assumes that timestamps inserted within the same second will have increasing increment + # values, which may not be the case if the inserts are into a sharded collection. + - jstests/core/**/ts1.js + + # Cannot implicitly shard accessed collections because the "splitVector" command cannot be run + # on a sharded collection + - jstests/core/**/splitvector.js + + # Profile can only be run against the admin database on mongos. + - jstests/core/txns/transactions_profiling.js + - jstests/core/txns/transactions_profiling_with_drops.js + + # Implicitly creates a database through a collection rename, which does not work in a sharded + # cluster. + - jstests/core/txns/transactions_block_ddl.js + + # Set the transactionLifetimeLimitSeconds parameter, which is not on mongos. + - jstests/core/txns/abort_expired_transaction.js + - jstests/core/txns/abort_transaction_thread_does_not_block_on_locks.js + - jstests/core/txns/kill_op_on_txn_expiry.js + + # Uses hangAfterCollectionInserts failpoint not available on mongos. + - jstests/core/txns/speculative_snapshot_includes_all_writes.js + + # View tests aren't expected to work when collections are implicitly sharded. + - jstests/core/txns/view_reads_in_transaction.js + + # Does not use the transactions shell helpers so afterClusterTime read concern is incorrectly + # attached to statements in a transaction beyond the first one. + - jstests/core/txns/non_transactional_operations_on_session_with_transaction.js + + # These workloads explicitly create collections inside multi-document transactions. These are + # non-idempotent operations, and the implicit collection sharding logic upon collection access + # results in premature collection creation, causing the workloads to fail. + - jstests/core/txns/create_collection.js + - jstests/core/txns/create_collection_parallel.js + - jstests/core/txns/create_indexes.js + - jstests/core/txns/create_indexes_parallel.js + - jstests/core/txns/commands_in_txns_read_concern.js + + exclude_with_any_tags: + - assumes_unsharded_collection + - assumes_standalone_mongod + - assumes_against_mongod_not_mongos + # Tests tagged with the following will fail because they assume collections are not sharded. + - assumes_no_implicit_collection_creation_after_drop + - assumes_no_implicit_index_creation + - cannot_create_unique_index_when_using_hashed_shard_key + # system.profile collection doesn't exist on mongos. + - requires_profiling + # Transactions are not allowed to operate on capped collections. + - requires_capped + # Prepare is not a command on mongos. + - uses_prepare_transaction + +executor: + archive: + hooks: + - CheckReplDBHash + - CheckMetadataConsistencyInBackground + - ValidateCollections + config: + shell_options: + eval: load("jstests/libs/override_methods/implicitly_shard_accessed_collections.js") + hooks: + - class: CheckReplDBHash + - class: CheckMetadataConsistencyInBackground + - class: ValidateCollections + - class: CheckOrphansDeleted + - class: CleanEveryN + n: 20 + fixture: + class: ShardedClusterFixture + num_shards: 2 + enable_balancer: false + mongos_options: + set_parameters: + enableTestCommands: 1 + mongod_options: + set_parameters: + enableTestCommands: 1 diff --git a/buildscripts/resmokeconfig/suites/sharded_jscore_txns.yml b/buildscripts/resmokeconfig/suites/sharded_jscore_txns.yml index af1def051dd37..20b294caa4c35 100644 --- a/buildscripts/resmokeconfig/suites/sharded_jscore_txns.yml +++ b/buildscripts/resmokeconfig/suites/sharded_jscore_txns.yml @@ -35,11 +35,12 @@ executor: - ValidateCollections config: shell_options: - eval: "testingReplication = true;" + eval: "globalThis.testingReplication = true;" hooks: # We don't execute dbHash or oplog consistency checks since there is only a single replica set # node. - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/sharded_jscore_txns_sharded_collections.yml b/buildscripts/resmokeconfig/suites/sharded_jscore_txns_sharded_collections.yml index 3e2fc25d61e5e..440cf2c06543a 100644 --- a/buildscripts/resmokeconfig/suites/sharded_jscore_txns_sharded_collections.yml +++ b/buildscripts/resmokeconfig/suites/sharded_jscore_txns_sharded_collections.yml @@ -50,12 +50,13 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/implicitly_shard_accessed_collections.js'); hooks: # We don't execute dbHash or oplog consistency checks since there is only a single replica set # node. - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/sharded_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharded_multi_stmt_txn_jscore_passthrough.yml index 1d62448b10db7..23860c730f8cb 100644 --- a/buildscripts/resmokeconfig/suites/sharded_multi_stmt_txn_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/sharded_multi_stmt_txn_jscore_passthrough.yml @@ -269,7 +269,7 @@ executor: config: shell_options: eval: >- - var testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/enable_sessions.js'); load('jstests/libs/override_methods/network_error_and_txn_override.js'); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); @@ -290,6 +290,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/sharded_retryable_writes_downgrade.yml b/buildscripts/resmokeconfig/suites/sharded_retryable_writes_downgrade.yml index daa760ca855ed..fcf6a1b40d79c 100644 --- a/buildscripts/resmokeconfig/suites/sharded_retryable_writes_downgrade.yml +++ b/buildscripts/resmokeconfig/suites/sharded_retryable_writes_downgrade.yml @@ -3,8 +3,6 @@ test_kind: js_test selector: roots: - jstests/core/**/*.js - - jstests/fle2/**/*.js - - src/mongo/db/modules/*/jstests/fle2/**/*.js exclude_files: # Transactions do not support retryability of individual operations. # TODO: Remove this once it is supported (SERVER-33952). @@ -74,6 +72,9 @@ selector: # TODO SERVER-61050 - jstests/core/timeseries/timeseries_merge.js + # Explain doesn't support read concern majority in sharding. + - jstests/core/**/command_let_variables.js + exclude_with_any_tags: - assumes_against_mongod_not_mongos - assumes_standalone_mongod @@ -129,8 +130,6 @@ selector: - requires_dbstats # "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..." - requires_collstats - # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..." - - requires_datasize ## The next tag corresponds to long running-operations, as they may exhaust their number # of retries and result in a network error being thrown. - operations_longer_than_stepdown_interval @@ -154,9 +153,9 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js"); @@ -189,6 +188,7 @@ executor: - class: CheckReplOplogs - class: CheckReplDBHash - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/sharding_api_strict_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_api_strict_passthrough.yml index ca81b50ad6cf7..1154afc753e8d 100644 --- a/buildscripts/resmokeconfig/suites/sharding_api_strict_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/sharding_api_strict_passthrough.yml @@ -36,6 +36,6 @@ executor: config: shell_options: eval: >- - testingReplication = false; + globalThis.testingReplication = false; load('jstests/libs/override_methods/set_api_strict.js'); nodb: '' diff --git a/buildscripts/resmokeconfig/suites/sharding_api_version_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_api_version_jscore_passthrough.yml index 300a779e6ff07..d5a06c7eb1e4e 100644 --- a/buildscripts/resmokeconfig/suites/sharding_api_version_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/sharding_api_version_jscore_passthrough.yml @@ -87,7 +87,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_api_version.js'); hooks: # The CheckReplDBHash hook waits until all operations have replicated to and have been applied @@ -97,6 +97,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/sharding_auth.yml b/buildscripts/resmokeconfig/suites/sharding_auth.yml index 2b2862e5a8e48..26d78a585f79b 100644 --- a/buildscripts/resmokeconfig/suites/sharding_auth.yml +++ b/buildscripts/resmokeconfig/suites/sharding_auth.yml @@ -32,6 +32,7 @@ selector: - jstests/sharding/migration_critical_section_concurrency.js # SERVER-21713 # Runs with auth enabled. - jstests/sharding/mongod_returns_no_cluster_time_without_keys.js + - jstests/sharding/cluster_time_across_add_shard.js # Skip because this suite implicitly authenticates as __system, which allows bypassing user write # blocking. - jstests/sharding/set_user_write_block_mode.js diff --git a/buildscripts/resmokeconfig/suites/sharding_auth_catalog_shard.yml b/buildscripts/resmokeconfig/suites/sharding_auth_catalog_shard.yml deleted file mode 100644 index 459b2debd5a02..0000000000000 --- a/buildscripts/resmokeconfig/suites/sharding_auth_catalog_shard.yml +++ /dev/null @@ -1,59 +0,0 @@ -# Section that is ignored by resmoke.py. -config_variables: -- &keyFile jstests/libs/authTestsKey -- &keyFileData Thiskeyisonlyforrunningthesuitewithauthenticationdontuseitinanytestsdirectly - -test_kind: js_test - -selector: - roots: - - jstests/sharding/**/*.js - exclude_files: - - jstests/sharding/**/libs/**/*.js - # Skip any tests that run with auth explicitly. - - jstests/sharding/*[aA]uth*.js - - jstests/sharding/analyze_shard_key/*[aA]uth*.js - - jstests/sharding/query/*[aA]uth*.js - - jstests/sharding/change_streams/*[aA]uth*.js - - - jstests/sharding/advance_cluster_time_action_type.js - - jstests/sharding/query/aggregation_currentop.js - - jstests/sharding/internal_txns/internal_client_restrictions.js - - jstests/sharding/kill_sessions.js - # Skip these additional tests when running with auth enabled. - - jstests/sharding/parallel.js - # Skip the testcases that do not have auth bypass when running ops in parallel. - - jstests/sharding/migration_ignore_interrupts_1.js # SERVER-21713 - - jstests/sharding/migration_ignore_interrupts_2.js # SERVER-21713 - - jstests/sharding/migration_server_status.js # SERVER-21713 - - jstests/sharding/migration_sets_fromMigrate_flag.js # SERVER-21713 - - jstests/sharding/migration_with_source_ops.js # SERVER-21713 - - jstests/sharding/movechunk_parallel.js # SERVER-21713 - - jstests/sharding/migration_critical_section_concurrency.js # SERVER-21713 - # Runs with auth enabled. - - jstests/sharding/mongod_returns_no_cluster_time_without_keys.js - # Skip because this suite implicitly authenticates as __system, which allows bypassing user write - # blocking. - - jstests/sharding/set_user_write_block_mode.js - exclude_with_any_tags: - - catalog_shard_incompatible - - temporary_catalog_shard_incompatible - -executor: - archive: - tests: - - jstests/sharding/*reshard*.js - config: - shell_options: - global_vars: - TestData: - auth: true - authMechanism: SCRAM-SHA-256 - catalogShard: true - keyFile: *keyFile - keyFileData: *keyFileData - roleGraphInvalidationIsFatal: true - # TODO (SERVER-74534): Enable the metadata consistency check when it will work with - # co-located configsvr. - skipCheckMetadataConsistency: true - nodb: '' diff --git a/buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml b/buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml deleted file mode 100644 index b988b4581fdf3..0000000000000 --- a/buildscripts/resmokeconfig/suites/sharding_catalog_shard.yml +++ /dev/null @@ -1,24 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/sharding/**/*.js - exclude_files: - - jstests/sharding/**/libs/**/*.js - exclude_with_any_tags: - - catalog_shard_incompatible - - temporary_catalog_shard_incompatible - -executor: - archive: - tests: - - jstests/sharding/*reshard*.js - config: - shell_options: - global_vars: - TestData: - catalogShard: true - # TODO (SERVER-74534): Enable the metadata consistency check when it will work with - # co-located configsvr. - skipCheckMetadataConsistency: true - nodb: '' diff --git a/buildscripts/resmokeconfig/suites/sharding_config_shard.yml b/buildscripts/resmokeconfig/suites/sharding_config_shard.yml new file mode 100644 index 0000000000000..c92994bc7d3f1 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/sharding_config_shard.yml @@ -0,0 +1,20 @@ +test_kind: js_test + +selector: + roots: + - jstests/sharding/**/*.js + exclude_files: + - jstests/sharding/**/libs/**/*.js + exclude_with_any_tags: + - config_shard_incompatible + +executor: + archive: + tests: + - jstests/sharding/*reshard*.js + config: + shell_options: + global_vars: + TestData: + configShard: true + nodb: '' diff --git a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml b/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml deleted file mode 100644 index a4468831cc0f6..0000000000000 --- a/buildscripts/resmokeconfig/suites/sharding_continuous_config_stepdown.yml +++ /dev/null @@ -1,260 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/sharding/**/*.js - exclude_files: - - jstests/sharding/**/libs/**/*.js - # Skip any tests that run with auth explicitly. - # Auth tests require authentication on the stepdown thread's connection - - jstests/sharding/*[aA]uth*.js - - jstests/sharding/analyze_shard_key/*[aA]uth*.js - - jstests/sharding/query/*[aA]uth*.js - - jstests/sharding/change_streams/*[aA]uth*.js - - jstests/sharding/internal_txns/internal_client_restrictions.js - - jstests/sharding/internal_txns/non_retryable_writes_during_migration.js - - jstests/sharding/internal_txns/retry_on_transient_error_validation.js - - jstests/sharding/internal_txns/retryable_findAndModify_during_migration_oplog.js - - jstests/sharding/internal_txns/retryable_findAndModify_during_migration_side_coll.js - - jstests/sharding/internal_txns/retryable_writes_aborted_during_migration.js - - jstests/sharding/internal_txns/retryable_writes_committed_during_migration.js - - - jstests/sharding/localhostAuthBypass.js - - jstests/sharding/kill_sessions.js - - jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js - - jstests/sharding/query/mrShardedOutputAuth.js - - jstests/sharding/query/aggregation_currentop.js - - jstests/sharding/advance_cluster_time_action_type.js - - jstests/sharding/mongod_returns_no_cluster_time_without_keys.js - # Count/write/aggregate commands against the config shard do not support retries yet - - jstests/sharding/addshard1.js - - jstests/sharding/addshard2.js - - jstests/sharding/basic_merge.js - - jstests/sharding/count1.js - - jstests/sharding/count2.js - - jstests/sharding/query/current_op_with_drop_shard.js - - jstests/sharding/cursor1.js - - jstests/sharding/diffservers1.js - - jstests/sharding/findandmodify1.js - - jstests/sharding/query/geo_near_sharded.js - - jstests/sharding/hash_basic.js - - jstests/sharding/hash_shard1.js - - jstests/sharding/hash_shard_non_empty.js - - jstests/sharding/hash_shard_num_chunks.js - - jstests/sharding/hash_single_shard.js - - jstests/sharding/key_many.js - - jstests/sharding/key_string.js - - jstests/sharding/large_chunk.js - - jstests/sharding/limit_push.js - - jstests/sharding/merge_with_drop_shard.js - - jstests/sharding/merge_with_move_primary.js - - jstests/sharding/move_chunk_basic.js - - jstests/sharding/movePrimary1.js - - jstests/sharding/names.js - - jstests/sharding/prefix_shard_key.js - - jstests/sharding/query_config.js - - jstests/sharding/range_deleter_interacts_correctly_with_refine_shard_key.js - - jstests/sharding/remove1.js - - jstests/sharding/rename_across_mongos.js - - jstests/sharding/shard2.js - - jstests/sharding/shard3.js - - jstests/sharding/shard_collection_basic.js - - jstests/sharding/tag_range.js - - jstests/sharding/count_config_servers.js - - jstests/sharding/split_large_key.js - - jstests/sharding/balancer_window.js - - jstests/sharding/zone_changes_compound.js - - jstests/sharding/zone_changes_hashed.js - - jstests/sharding/zone_changes_range.js - # No retries on direct writes to the config/admin databases on the config servers - - jstests/sharding/listDatabases.js - - jstests/sharding/bulk_insert.js - - jstests/sharding/printShardingStatus.js - - jstests/sharding/refresh_sessions.js - - jstests/sharding/shard_collection_existing_zones.js - - jstests/sharding/catalog_shard_mongos_ops_on_config_and_admin.js - # Balancer writes (direct write to config database with no retries) - - jstests/sharding/convert_to_and_from_sharded.js - - jstests/sharding/remove2.js - - jstests/sharding/features3.js - - jstests/sharding/in_memory_sort_limit.js - - jstests/sharding/parallel.js - - jstests/sharding/migrateBig.js - - jstests/sharding/sharding_rs1.js - - jstests/sharding/move_primary_fails_without_database_version.js - # Calls the config server primary directly (not through mongos) - - jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js - - jstests/sharding/analyze_shard_key/invalid_config_docs.js - - jstests/sharding/analyze_shard_key/persist_sampled_diffs.js - - jstests/sharding/analyze_shard_key/persist_sampled_read_queries.js - - jstests/sharding/analyze_shard_key/persist_sampled_write_queries.js - - jstests/sharding/analyze_shard_key/refresh_sample_rates.js - - jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js - - jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js - - jstests/sharding/read_after_optime.js - - jstests/sharding/server_status.js - - jstests/sharding/drop_configdb.js - - jstests/sharding/shard_identity_config_update.js - - jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js - - jstests/sharding/key_rotation.js - - jstests/sharding/keys_rotation_interval_sec.js - - jstests/sharding/migration_coordinator_basic.js # sets a failpoint on the config primary - - jstests/sharding/migration_coordinator_abort_failover.js # sets a failpoint on the config primary - - jstests/sharding/migration_coordinator_commit_failover.js # sets a failpoint on the config primary - - jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js - - jstests/sharding/move_chunk_insert_with_write_retryability.js - - jstests/sharding/move_chunk_remove_with_write_retryability.js - - jstests/sharding/move_chunk_update_with_write_retryability.js - - jstests/sharding/refine_collection_shard_key_atomic.js # sets a failpoint on the config primary - - jstests/sharding/restart_transactions.js - - jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js - - jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js - - jstests/sharding/txn_two_phase_commit_failover.js - - jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js - # Runs setShardVersion/getShardVersion against the config server and we don't support retries - # for this command - - jstests/sharding/major_version_check.js - # Runs replSetGetStatus -- via awaitLastOpCommitted -- directly against the config server: - # retries aren't supported. - - jstests/sharding/coll_epoch_test1.js - - jstests/sharding/move_stale_mongos.js - - jstests/sharding/shard4.js - - jstests/sharding/shard5.js - - jstests/sharding/split_stale_mongos.js - - jstests/sharding/stale_mongos_updates_and_removes.js - - jstests/sharding/zero_shard_version.js - # Already stop or blackholes the primary of the CSRS config shard - - jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js - - jstests/sharding/all_config_servers_blackholed_from_mongos.js - - jstests/sharding/batch_write_command_sharded.js - - jstests/sharding/config_rs_no_primary.js - - jstests/sharding/startup_with_all_configs_down.js - - jstests/sharding/lagged_config_secondary.js - - jstests/sharding/autodiscover_config_rs_from_secondary.js - - jstests/sharding/rs_stepdown_and_pooling.js - - jstests/sharding/mongos_no_replica_set_refresh.js - - jstests/sharding/primary_config_server_blackholed_from_mongos.js - - jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js - - jstests/sharding/health_monitor/config_server_health_observer_crash.js - # Nothing is affected by config server step down - - jstests/sharding/basic_sharding_params.js - # ShardingTest is never used, so continuous step down thread never starts - - jstests/sharding/config_rs_change.js - - jstests/sharding/empty_cluster_init.js - # Temporarily denylisted until more robust - # Expects same secondaries for entire test - - jstests/sharding/commands_that_write_accept_wc_configRS.js - - jstests/sharding/commands_that_write_accept_wc_shards.js - - jstests/sharding/move_chunk_wc.js - # Expects that connections to all shards/config servers will never close - - jstests/sharding/shard6.js - # Stepping down the primary can make the balancer rerun a migration that was designed to fail - # earlier, but can potentially pass or have different side effects on the second try - - jstests/sharding/migration_ignore_interrupts_1.js - - jstests/sharding/migration_sets_fromMigrate_flag.js - - jstests/sharding/migration_waits_for_majority_commit.js - # listCollections is not retryable - - jstests/sharding/sessions_collection_auto_healing.js - # shardCollection is not retryable - - jstests/sharding/shard_collection_config_db.js - # creates collection, does movePrimary, then shards the collection and moves a chunk to the old - # primary (SERVER-31909) - - jstests/sharding/mongos_validate_writes.js - # Test expects a specific chunk distribution after shardCollection and it can be broken when - # a step down occurs. - - jstests/sharding/regex_targeting.js - # Calls movePrimary after data has been inserted into an unsharded collection, so will fail if - # a stepdown causes the command to be sent again. - - jstests/sharding/move_primary_clone.js - - jstests/sharding/mongos_validate_writes.js - - jstests/sharding/movePrimary1.js - # Asserts that the _flushDatabaseCacheUpdates at the end of _configsvrCreateDatabase is sent, but - # it may not be sent if the config server primary steps down just before sending it. - - jstests/sharding/database_versioning_all_commands.js - # Calls removeShard/removeshard which is not idempotent and these tests expect it to be run an exact number of times - - jstests/sharding/addshard5.js - - jstests/sharding/auth_add_shard.js - - jstests/sharding/remove3.js - - jstests/sharding/authCommands.js - # - jstests/sharding/addshard2.js - # - jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js - # - jstests/sharding/convert_to_and_from_sharded.js - # - jstests/sharding/names.js - # - jstests/sharding/remove1.js - # - jstests/sharding/remove2.js - - # Moves a chunk before continuing a transaction, which can lead to snapshot errors if the - # CSRS failovers are sufficiently slow. - - jstests/sharding/transactions_reject_writes_for_moved_chunks.js - - jstests/sharding/snapshot_reads_target_at_point_in_time.js - # Tests that rely on shards becoming aware of collection drops regardless of config stepdowns. - # (SERVER-34760) - - jstests/sharding/merge_requires_unique_index.js - - jstests/sharding/query/merge_stale_on_fields.js - - jstests/sharding/query/out_fails_to_replace_sharded_collection.js - # In this suite the cluster may end up in a state where each shard believes the - # collection is sharded and the mongos believes it is unsharded. $merge is not - # prepared to work correctly in this situation. This should be fixed by a future - # improvement in sharding infrastructure, and can be undenylisted by SERVER-40172. - - jstests/sharding/query/merge_to_existing.js - # Sets a failpoint on find commands which can lead to a hang when a config steps down. - - jstests/sharding/sharding_statistics_server_status.js - # setShardVersion is not robust during config server stepdown. - - jstests/sharding/mongos_no_detect_sharding.js - # Runs commands directly on the config server primary and is not robust to the primary changing. - - jstests/sharding/read_write_concern_defaults_commands_api.js - - jstests/sharding/read_write_concern_defaults_propagation.js - - jstests/sharding/live_shard_startup_recovery_config_server.js - - jstests/sharding/live_shard_logical_initial_sync_config_server.js - - # On stepdown there is not gurantee that changelog entries have been inserted [SERVER-45655] - - jstests/sharding/refine_collection_shard_key_changelog.js - # This is expected to fail if the config server steps down during moveChunk. - - jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js - - jstests/sharding/move_chunk_critical_section_non_internal_client_abort.js - # Runs commands on mongos which target the primary configsvr, and then checks the outcome using - # profiling/logging (on the configsvr primary), so cannot tolerate the configsvr primary changing. - - jstests/sharding/read_write_concern_defaults_application.js - - # SERVER-48537 addShard is not idempotent for retries - - jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js - - jstests/sharding/move_primary_with_writes.js - - # Expects same CSRS primary and shard primary throughout the test - - jstests/sharding/api_params_nontransaction_sharded.js - - jstests/sharding/api_params_nontransaction_unsharded.js - - jstests/sharding/api_params_transaction_sharded.js - - jstests/sharding/api_params_transaction_unsharded.js - - # TODO SERVER-51495: Re-enable these tests after reshardCollection is resilient to config server - # primary failovers. - - jstests/sharding/*reshard*.js - - # SERVER-51805 splitChunk op is not idempotent - - jstests/sharding/mongos_get_shard_version.js - - # Expects reshardCollection executes without config server stepdown - - jstests/sharding/shard_encrypted_collection.js - - # Runs commands against mongos which target the config server primary and may fail with - # FailedToSatisfyReadPreference when electing a new primary of the config server replica - # set takes a while. - - jstests/sharding/move_chunk_respects_maxtimems.js - - # TODO (SERVER-75863): Investigate the timeout issue for read_and_write_distribution.js in the - # config stepdown suite - - jstests/sharding/analyze_shard_key/read_and_write_distribution.js - - exclude_with_any_tags: - - does_not_support_stepdowns - -executor: - config: - shell_options: - global_vars: - TestData: - # TODO: SERVER-45994 remove - skipCheckingCatalogCacheConsistencyWithShardingCatalog: true - skipCheckOrphans: true - eval: "load('jstests/libs/override_methods/sharding_continuous_config_stepdown.js');" - nodb: '' diff --git a/buildscripts/resmokeconfig/suites/sharding_csrs_continuous_config_stepdown.yml b/buildscripts/resmokeconfig/suites/sharding_csrs_continuous_config_stepdown.yml new file mode 100644 index 0000000000000..b77b79ea6a455 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/sharding_csrs_continuous_config_stepdown.yml @@ -0,0 +1,257 @@ +test_kind: js_test + +selector: + roots: + - jstests/sharding/**/*.js + exclude_files: + - jstests/sharding/**/libs/**/*.js + # Skip any tests that run with auth explicitly. + # Auth tests require authentication on the stepdown thread's connection + - jstests/sharding/*[aA]uth*.js + - jstests/sharding/analyze_shard_key/*[aA]uth*.js + - jstests/sharding/query/*[aA]uth*.js + - jstests/sharding/change_streams/*[aA]uth*.js + - jstests/sharding/cluster_time_across_add_shard.js + + - jstests/sharding/internal_txns/internal_client_restrictions.js + - jstests/sharding/internal_txns/non_retryable_writes_during_migration.js + - jstests/sharding/internal_txns/retry_on_transient_error_validation.js + - jstests/sharding/internal_txns/retryable_findAndModify_during_migration_side_coll.js + - jstests/sharding/internal_txns/retryable_writes_aborted_during_migration.js + - jstests/sharding/internal_txns/retryable_writes_committed_during_migration.js + + - jstests/sharding/localhostAuthBypass.js + - jstests/sharding/kill_sessions.js + - jstests/sharding/mongos_rs_auth_shard_failure_tolerance.js + - jstests/sharding/query/mrShardedOutputAuth.js + - jstests/sharding/query/aggregation_currentop.js + - jstests/sharding/advance_cluster_time_action_type.js + - jstests/sharding/mongod_returns_no_cluster_time_without_keys.js + # Count/write/aggregate commands against the config shard do not support retries yet + - jstests/sharding/addshard1.js + - jstests/sharding/addshard2.js + - jstests/sharding/basic_merge.js + - jstests/sharding/count1.js + - jstests/sharding/count2.js + - jstests/sharding/query/current_op_with_drop_shard.js + - jstests/sharding/cursor1.js + - jstests/sharding/diffservers1.js + - jstests/sharding/findandmodify1.js + - jstests/sharding/query/geo_near_sharded.js + - jstests/sharding/hash_basic.js + - jstests/sharding/hash_shard1.js + - jstests/sharding/hash_shard_non_empty.js + - jstests/sharding/hash_shard_num_chunks.js + - jstests/sharding/hash_single_shard.js + - jstests/sharding/key_many.js + - jstests/sharding/key_string.js + - jstests/sharding/large_chunk.js + - jstests/sharding/limit_push.js + - jstests/sharding/merge_with_drop_shard.js + - jstests/sharding/merge_with_move_primary.js + - jstests/sharding/move_chunk_basic.js + - jstests/sharding/movePrimary1.js + - jstests/sharding/names.js + - jstests/sharding/prefix_shard_key.js + - jstests/sharding/query_config.js + - jstests/sharding/range_deleter_interacts_correctly_with_refine_shard_key.js + - jstests/sharding/remove1.js + - jstests/sharding/rename_across_mongos.js + - jstests/sharding/shard2.js + - jstests/sharding/shard3.js + - jstests/sharding/shard_collection_basic.js + - jstests/sharding/tag_range.js + - jstests/sharding/count_config_servers.js + - jstests/sharding/split_large_key.js + - jstests/sharding/balancer_window.js + - jstests/sharding/zone_changes_compound.js + - jstests/sharding/zone_changes_hashed.js + - jstests/sharding/zone_changes_range.js + # No retries on direct writes to the config/admin databases on the config servers + - jstests/sharding/listDatabases.js + - jstests/sharding/bulk_insert.js + - jstests/sharding/printShardingStatus.js + - jstests/sharding/refresh_sessions.js + - jstests/sharding/shard_collection_existing_zones.js + - jstests/sharding/catalog_shard_mongos_ops_on_config_and_admin.js + # Balancer writes (direct write to config database with no retries) + - jstests/sharding/convert_to_and_from_sharded.js + - jstests/sharding/remove2.js + - jstests/sharding/features3.js + - jstests/sharding/in_memory_sort_limit.js + - jstests/sharding/parallel.js + - jstests/sharding/migrateBig.js + - jstests/sharding/sharding_rs1.js + - jstests/sharding/move_primary_fails_without_database_version.js + # Calls the config server primary directly (not through mongos) + - jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js + - jstests/sharding/analyze_shard_key/invalid_config_docs.js + - jstests/sharding/analyze_shard_key/persist_sampled_diffs.js + - jstests/sharding/analyze_shard_key/persist_sampled_read_queries.js + - jstests/sharding/analyze_shard_key/persist_sampled_write_queries.js + - jstests/sharding/analyze_shard_key/refresh_sample_rates.js + - jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js + - jstests/sharding/read_after_optime.js + - jstests/sharding/server_status.js + - jstests/sharding/drop_configdb.js + - jstests/sharding/shard_identity_config_update.js + - jstests/sharding/mongos_does_not_gossip_logical_time_without_keys.js + - jstests/sharding/key_rotation.js + - jstests/sharding/keys_rotation_interval_sec.js + - jstests/sharding/migration_coordinator_basic.js # sets a failpoint on the config primary + - jstests/sharding/migration_coordinator_abort_failover.js # sets a failpoint on the config primary + - jstests/sharding/migration_coordinator_commit_failover.js # sets a failpoint on the config primary + - jstests/sharding/move_chunk_find_and_modify_with_write_retryability.js + - jstests/sharding/move_chunk_insert_with_write_retryability.js + - jstests/sharding/move_chunk_remove_with_write_retryability.js + - jstests/sharding/move_chunk_update_with_write_retryability.js + - jstests/sharding/refine_collection_shard_key_atomic.js # sets a failpoint on the config primary + - jstests/sharding/restart_transactions.js + - jstests/sharding/txn_two_phase_commit_commands_basic_requirements.js + - jstests/sharding/txn_two_phase_commit_coordinator_shutdown_and_restart.js + - jstests/sharding/txn_two_phase_commit_failover.js + - jstests/sharding/txn_two_phase_commit_wait_for_majority_commit_after_stepup.js + # Runs setShardVersion/getShardVersion against the config server and we don't support retries + # for this command + - jstests/sharding/major_version_check.js + # Runs replSetGetStatus -- via awaitLastOpCommitted -- directly against the config server: + # retries aren't supported. + - jstests/sharding/coll_epoch_test1.js + - jstests/sharding/move_stale_mongos.js + - jstests/sharding/shard4.js + - jstests/sharding/shard5.js + - jstests/sharding/split_stale_mongos.js + - jstests/sharding/stale_mongos_updates_and_removes.js + - jstests/sharding/zero_shard_version.js + # Already stop or blackholes the primary of the CSRS config shard + - jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js + - jstests/sharding/all_config_servers_blackholed_from_mongos.js + - jstests/sharding/batch_write_command_sharded.js + - jstests/sharding/config_rs_no_primary.js + - jstests/sharding/startup_with_all_configs_down.js + - jstests/sharding/lagged_config_secondary.js + - jstests/sharding/autodiscover_config_rs_from_secondary.js + - jstests/sharding/rs_stepdown_and_pooling.js + - jstests/sharding/mongos_no_replica_set_refresh.js + - jstests/sharding/primary_config_server_blackholed_from_mongos.js + - jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js + - jstests/sharding/health_monitor/config_server_health_observer_crash.js + # Nothing is affected by config server step down + - jstests/sharding/basic_sharding_params.js + # ShardingTest is never used, so continuous step down thread never starts + - jstests/sharding/config_rs_change.js + - jstests/sharding/empty_cluster_init.js + # Temporarily denylisted until more robust + # Expects same secondaries for entire test + - jstests/sharding/commands_that_write_accept_wc_configRS.js + - jstests/sharding/commands_that_write_accept_wc_shards.js + - jstests/sharding/move_chunk_wc.js + # Expects that connections to all shards/config servers will never close + - jstests/sharding/shard6.js + # Stepping down the primary can make the balancer rerun a migration that was designed to fail + # earlier, but can potentially pass or have different side effects on the second try + - jstests/sharding/migration_ignore_interrupts_1.js + - jstests/sharding/migration_sets_fromMigrate_flag.js + - jstests/sharding/migration_waits_for_majority_commit.js + # listCollections is not retryable + - jstests/sharding/sessions_collection_auto_healing.js + # shardCollection is not retryable + - jstests/sharding/shard_collection_config_db.js + # creates collection, does movePrimary, then shards the collection and moves a chunk to the old + # primary (SERVER-31909) + - jstests/sharding/mongos_validate_writes.js + # Test expects a specific chunk distribution after shardCollection and it can be broken when + # a step down occurs. + - jstests/sharding/regex_targeting.js + # Calls movePrimary after data has been inserted into an unsharded collection, so will fail if + # a stepdown causes the command to be sent again. + - jstests/sharding/move_primary_clone.js + - jstests/sharding/mongos_validate_writes.js + - jstests/sharding/movePrimary1.js + # Asserts that the _flushDatabaseCacheUpdates at the end of _configsvrCreateDatabase is sent, but + # it may not be sent if the config server primary steps down just before sending it. + - jstests/sharding/database_versioning_all_commands.js + # Calls removeShard/removeshard which is not idempotent and these tests expect it to be run an exact number of times + - jstests/sharding/addshard5.js + - jstests/sharding/auth_add_shard.js + - jstests/sharding/remove3.js + - jstests/sharding/authCommands.js + # - jstests/sharding/addshard2.js + # - jstests/sharding/configsvr_metadata_commands_require_majority_write_concern.js + # - jstests/sharding/convert_to_and_from_sharded.js + # - jstests/sharding/names.js + # - jstests/sharding/remove1.js + # - jstests/sharding/remove2.js + + # Moves a chunk before continuing a transaction, which can lead to snapshot errors if the + # CSRS failovers are sufficiently slow. + - jstests/sharding/transactions_reject_writes_for_moved_chunks.js + - jstests/sharding/snapshot_reads_target_at_point_in_time.js + # Tests that rely on shards becoming aware of collection drops regardless of config stepdowns. + # (SERVER-34760) + - jstests/sharding/merge_requires_unique_index.js + - jstests/sharding/query/merge_stale_on_fields.js + - jstests/sharding/query/out_fails_to_replace_sharded_collection.js + # Sets a failpoint on find commands which can lead to a hang when a config steps down. + - jstests/sharding/sharding_statistics_server_status.js + # setShardVersion is not robust during config server stepdown. + - jstests/sharding/mongos_no_detect_sharding.js + # Runs commands directly on the config server primary and is not robust to the primary changing. + - jstests/sharding/read_write_concern_defaults_commands_api.js + - jstests/sharding/read_write_concern_defaults_propagation.js + - jstests/sharding/live_shard_startup_recovery_config_server.js + - jstests/sharding/live_shard_logical_initial_sync_config_server.js + + # On stepdown there is not gurantee that changelog entries have been inserted [SERVER-45655] + - jstests/sharding/refine_collection_shard_key_changelog.js + # This is expected to fail if the config server steps down during moveChunk. + - jstests/sharding/index_operations_abort_concurrent_outgoing_migrations.js + - jstests/sharding/move_chunk_critical_section_non_internal_client_abort.js + # Runs commands on mongos which target the primary configsvr, and then checks the outcome using + # profiling/logging (on the configsvr primary), so cannot tolerate the configsvr primary changing. + - jstests/sharding/read_write_concern_defaults_application.js + + # SERVER-48537 addShard is not idempotent for retries + - jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js + - jstests/sharding/move_primary_with_writes.js + + # Expects same CSRS primary and shard primary throughout the test + - jstests/sharding/api_params_nontransaction_sharded.js + - jstests/sharding/api_params_nontransaction_unsharded.js + - jstests/sharding/api_params_transaction_sharded.js + - jstests/sharding/api_params_transaction_unsharded.js + + # TODO SERVER-51495: Re-enable these tests after reshardCollection is resilient to config server + # primary failovers. + - jstests/sharding/*reshard*.js + + # SERVER-51805 splitChunk op is not idempotent + - jstests/sharding/mongos_get_shard_version.js + + # Expects reshardCollection executes without config server stepdown + - jstests/sharding/shard_encrypted_collection.js + + # Runs commands against mongos which target the config server primary and may fail with + # FailedToSatisfyReadPreference when electing a new primary of the config server replica + # set takes a while. + - jstests/sharding/move_chunk_respects_maxtimems.js + + # This test verifies that the number of queries that each mongos or shardsvr mongod samples is + # proportional to the number of queries it routes. This is enforced via the sample rate assignment + # by the configsvr primary based on the traffic distribution information it has in memory. So the + # test doesn't pass reliably when there is continuous stepdown on the config server. + - jstests/sharding/analyze_shard_key/sample_rates_sharded.js + + exclude_with_any_tags: + - does_not_support_stepdowns + +executor: + config: + shell_options: + global_vars: + TestData: + # TODO: SERVER-45994 remove + skipCheckingCatalogCacheConsistencyWithShardingCatalog: true + skipCheckOrphans: true + eval: "load('jstests/libs/override_methods/sharding_csrs_continuous_config_stepdown.js');" + nodb: '' diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml index a16c47a3e1f01..4608fd9684827 100644 --- a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough.yml @@ -56,6 +56,7 @@ executor: - class: CheckReplDBHash - class: CheckMetadataConsistencyInBackground - class: ValidateCollections + - class: CheckOrphansDeleted - class: CleanEveryN n: 20 fixture: diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_catalog_shard.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_catalog_shard.yml deleted file mode 100644 index 9550b5331d934..0000000000000 --- a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_catalog_shard.yml +++ /dev/null @@ -1,72 +0,0 @@ -test_kind: js_test - -selector: - roots: - - jstests/core/**/*.js - - jstests/fle2/**/*.js - - src/mongo/db/modules/*/jstests/fle2/**/*.js - exclude_files: - # These tests are run in sharded_jscore_txns. - - jstests/core/txns/**/*.js - # The following tests fail because a certain command or functionality is not supported on - # mongos. This command or functionality is placed in a comment next to the failing test. - - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine. - - jstests/core/**/check_shard_index.js # checkShardingIndex. - - jstests/core/**/collection_truncate.js # emptycapped. - - jstests/core/**/compact_keeps_indexes.js # compact. - - jstests/core/**/currentop.js # uses fsync. - - jstests/core/**/dbhash.js # dbhash. - - jstests/core/**/dbhash2.js # dbhash. - - jstests/core/**/fsync.js # uses fsync. - - jstests/core/**/geo_s2cursorlimitskip.js # profiling. - - jstests/core/**/geo_update_btree2.js # notablescan. - - jstests/core/**/index9.js # "local" database. - - jstests/core/**/queryoptimizera.js # "local" database. - - jstests/core/**/stages*.js # stageDebug. - - jstests/core/**/startup_log.js # "local" database. - - jstests/core/**/top.js # top. - # The following tests fail because mongos behaves differently from mongod when testing certain - # functionality. The differences are in a comment next to the failing test. - - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos. - - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain(). - - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain(). - - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate(). - - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880. - - jstests/core/**/killop_drop_collection.js # Uses fsyncLock. - - jstests/core/**/or_to_in.js # queryPlanner in different spot in explain() - # The following tests fail because of divergent dropCollection behavior between standalones and - # sharded clusters. These tests expect a second drop command to error, whereas in sharded clusters - # we expect a second drop to return status OK. - - jstests/core/**/explain_upsert.js - - exclude_with_any_tags: - - assumes_standalone_mongod - - assumes_against_mongod_not_mongos - # system.profile collection doesn't exist on mongos. - - requires_profiling - - catalog_shard_incompatible - - temporary_catalog_shard_incompatible - -executor: - archive: - hooks: - - CheckReplDBHash - - ValidateCollections - config: {} - hooks: - - class: CheckReplDBHash - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: ShardedClusterFixture - catalog_shard: "any" - mongos_options: - set_parameters: - enableTestCommands: 1 - mongod_options: - set_parameters: - enableTestCommands: 1 - num_rs_nodes_per_shard: 1 - enable_sharding: - - test diff --git a/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_config_shard.yml b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_config_shard.yml new file mode 100644 index 0000000000000..99cfc9c7e1a71 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/sharding_jscore_passthrough_with_config_shard.yml @@ -0,0 +1,72 @@ +test_kind: js_test + +selector: + roots: + - jstests/core/**/*.js + - jstests/fle2/**/*.js + - src/mongo/db/modules/*/jstests/fle2/**/*.js + exclude_files: + # These tests are run in sharded_jscore_txns. + - jstests/core/txns/**/*.js + # The following tests fail because a certain command or functionality is not supported on + # mongos. This command or functionality is placed in a comment next to the failing test. + - jstests/core/**/apitest_db.js # serverStatus output doesn't have storageEngine. + - jstests/core/**/check_shard_index.js # checkShardingIndex. + - jstests/core/**/collection_truncate.js # emptycapped. + - jstests/core/**/compact_keeps_indexes.js # compact. + - jstests/core/**/currentop.js # uses fsync. + - jstests/core/**/dbhash.js # dbhash. + - jstests/core/**/dbhash2.js # dbhash. + - jstests/core/**/fsync.js # uses fsync. + - jstests/core/**/geo_s2cursorlimitskip.js # profiling. + - jstests/core/**/geo_update_btree2.js # notablescan. + - jstests/core/**/index9.js # "local" database. + - jstests/core/**/queryoptimizera.js # "local" database. + - jstests/core/**/stages*.js # stageDebug. + - jstests/core/**/startup_log.js # "local" database. + - jstests/core/**/top.js # top. + # The following tests fail because mongos behaves differently from mongod when testing certain + # functionality. The differences are in a comment next to the failing test. + - jstests/core/**/explain_missing_database.js # Behavior with no db different on mongos. + - jstests/core/**/geo_2d_explain.js # executionSuccess in different spot in explain(). + - jstests/core/**/geo_s2explain.js # inputStage in different spot in explain(). + - jstests/core/**/geo_s2sparse.js # keysPerIndex in different spot in validate(). + - jstests/core/**/operation_latency_histogram.js # Stats are counted differently on mongos, SERVER-24880. + - jstests/core/**/killop_drop_collection.js # Uses fsyncLock. + - jstests/core/**/or_to_in.js # queryPlanner in different spot in explain() + # The following tests fail because of divergent dropCollection behavior between standalones and + # sharded clusters. These tests expect a second drop command to error, whereas in sharded clusters + # we expect a second drop to return status OK. + - jstests/core/**/explain_upsert.js + + exclude_with_any_tags: + - assumes_standalone_mongod + - assumes_against_mongod_not_mongos + # system.profile collection doesn't exist on mongos. + - requires_profiling + - config_shard_incompatible + +executor: + archive: + hooks: + - CheckReplDBHash + - ValidateCollections + config: {} + hooks: + - class: CheckReplDBHash + - class: ValidateCollections + - class: CheckOrphansDeleted + - class: CleanEveryN + n: 20 + fixture: + class: ShardedClusterFixture + config_shard: "any" + mongos_options: + set_parameters: + enableTestCommands: 1 + mongod_options: + set_parameters: + enableTestCommands: 1 + num_rs_nodes_per_shard: 1 + enable_sharding: + - test diff --git a/buildscripts/resmokeconfig/suites/simulate_crash_concurrency_replication.yml b/buildscripts/resmokeconfig/suites/simulate_crash_concurrency_replication.yml index a9f0a03cbd941..562152fcce1f5 100644 --- a/buildscripts/resmokeconfig/suites/simulate_crash_concurrency_replication.yml +++ b/buildscripts/resmokeconfig/suites/simulate_crash_concurrency_replication.yml @@ -40,4 +40,6 @@ executor: syncdelay: 10 set_parameters: enableTestCommands: 1 + queryAnalysisSamplerConfigurationRefreshSecs: 1 + queryAnalysisWriterIntervalSecs: 1 num_nodes: 3 diff --git a/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_jscore_passthrough.yml index 5c7fb0c2a604b..e2402782812f3 100644 --- a/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_jscore_passthrough.yml @@ -33,7 +33,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; hooks: # The CheckReplDBHash hook waits until all operations have replicated to and have been applied # on the secondaries, so we run the ValidateCollections hook after it to ensure we're diff --git a/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_kill_primary_jscore_passthrough.yml index 11d26f61e8dd9..05626cf8ce085 100644 --- a/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_kill_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/talk_directly_to_shardsvrs_kill_primary_jscore_passthrough.yml @@ -139,9 +139,9 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); load("jstests/libs/override_methods/fail_unclean_shutdown_incompatible_commands.js"); diff --git a/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml b/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml deleted file mode 100644 index 1aa2a490a5f8e..0000000000000 --- a/buildscripts/resmokeconfig/suites/telemetry_passthrough.yml +++ /dev/null @@ -1,30 +0,0 @@ -test_kind: js_test -description: | - This suite enables the collection of telemetry metrics on a mongod server, then runs the tests in - core and aggregation as normal. This should cause each query or aggregation to compute a query - shape and telemetry key, and record in-memory some metrics like execution time and number of - scanned documents. It doesn't assert anything about the collected telemetry, it is just meant to - make sure nothing is going seriously awry (e.g. crashing). - -selector: - roots: - - jstests/aggregation/**/*.js - - jstests/core/**/*.js - exclude_files: - # Transactions are not supported on MongoDB standalone nodes, so we do not run these tests. - - jstests/core/txns/**/*.js - -executor: - archive: - hooks: - - ValidateCollections - hooks: - - class: ValidateCollections - - class: CleanEveryN - n: 20 - fixture: - class: MongoDFixture - mongod_options: - set_parameters: - enableTestCommands: 1 - internalQueryConfigureTelemetrySamplingRate: -1 diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_causally_consistent_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_causally_consistent_jscore_passthrough.yml index c67d77742a988..43887e1b8332d 100644 --- a/buildscripts/resmokeconfig/suites/tenant_migration_causally_consistent_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/tenant_migration_causally_consistent_jscore_passthrough.yml @@ -78,7 +78,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/inject_tenant_prefix.js'); load('jstests/libs/override_methods/enable_causal_consistency.js'); load('jstests/libs/override_methods/enable_sessions.js'); @@ -138,9 +138,6 @@ executor: # so. Therefore, the garbage collection delay doesn't need to be large. tenantMigrationGarbageCollectionDelayMS: 1 ttlMonitorSleepSecs: 1 - # Tenant migrations is not currently compatible with implicitly replicated retryable - # findAndModify images. - storeFindAndModifyImagesInSideCollection: false tlsMode: allowTLS tlsCAFile: jstests/libs/ca.pem tlsAllowInvalidHostnames: '' diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_jscore_passthrough.yml index ad61ab8662a4c..2b8bccbc121be 100644 --- a/buildscripts/resmokeconfig/suites/tenant_migration_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/tenant_migration_jscore_passthrough.yml @@ -51,7 +51,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/inject_tenant_prefix.js'); jsTest.authenticate(db.getMongo()); global_vars: @@ -103,9 +103,6 @@ executor: # so. Therefore, the garbage collection delay doesn't need to be large. tenantMigrationGarbageCollectionDelayMS: 1 ttlMonitorSleepSecs: 1 - # Tenant migrations is not currently compatible with implicitly replicated retryable - # findAndModify images. - storeFindAndModifyImagesInSideCollection: false tlsMode: allowTLS tlsCAFile: jstests/libs/ca.pem tlsAllowInvalidHostnames: '' diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_kill_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_kill_primary_jscore_passthrough.yml index 994837fffaccd..196474da76eba 100644 --- a/buildscripts/resmokeconfig/suites/tenant_migration_kill_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/tenant_migration_kill_primary_jscore_passthrough.yml @@ -117,8 +117,6 @@ selector: - requires_dbstats # "Cowardly fail if collStats is run with a mongod that had an unclean shutdown: ..." - requires_collstats - # "Cowardly fail if unbounded dataSize is run with a mongod that had an unclean shutdown: ..." - - requires_datasize # Due to background tenant migrations, operations in the main test shell are not guaranteed to # be causally consistent with operations in a parallel shell. The reason is that # TenantMigrationCommitted error is only thrown when the client does a write or a atClusterTime/ @@ -155,9 +153,9 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load('jstests/libs/override_methods/inject_tenant_prefix.js'); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); @@ -195,7 +193,7 @@ executor: - class: ContinuousTenantMigration shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); global_vars: @@ -242,9 +240,6 @@ executor: # so. Therefore, the garbage collection delay doesn't need to be large. tenantMigrationGarbageCollectionDelayMS: 1 ttlMonitorSleepSecs: 1 - # Tenant migrations is not currently compatible with implicitly replicated retryable - # findAndModify images. - storeFindAndModifyImagesInSideCollection: false tlsMode: allowTLS tlsCAFile: jstests/libs/ca.pem tlsAllowInvalidHostnames: '' diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_multi_stmt_txn_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_multi_stmt_txn_jscore_passthrough.yml index e3c9ad3cb9825..a7ae31167c453 100644 --- a/buildscripts/resmokeconfig/suites/tenant_migration_multi_stmt_txn_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/tenant_migration_multi_stmt_txn_jscore_passthrough.yml @@ -262,7 +262,7 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/inject_tenant_prefix.js'); load('jstests/libs/override_methods/enable_sessions.js'); load('jstests/libs/override_methods/txn_passthrough_cmd_massage.js'); @@ -323,9 +323,6 @@ executor: # so. Therefore, the garbage collection delay doesn't need to be large. tenantMigrationGarbageCollectionDelayMS: 1 ttlMonitorSleepSecs: 1 - # Tenant migrations is not currently compatible with implicitly replicated retryable - # findAndModify images. - storeFindAndModifyImagesInSideCollection: false tlsMode: allowTLS tlsCAFile: jstests/libs/ca.pem tlsAllowInvalidHostnames: '' diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_stepdown_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_stepdown_jscore_passthrough.yml index 02b9b194ce3fb..9e6193b1bf7dd 100644 --- a/buildscripts/resmokeconfig/suites/tenant_migration_stepdown_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/tenant_migration_stepdown_jscore_passthrough.yml @@ -147,9 +147,9 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load('jstests/libs/override_methods/inject_tenant_prefix.js'); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); @@ -184,7 +184,7 @@ executor: - class: ContinuousTenantMigration shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); global_vars: @@ -229,9 +229,6 @@ executor: # so. Therefore, the garbage collection delay doesn't need to be large. tenantMigrationGarbageCollectionDelayMS: 1 ttlMonitorSleepSecs: 1 - # Tenant migrations is not currently compatible with implicitly replicated retryable - # findAndModify images. - storeFindAndModifyImagesInSideCollection: false tlsMode: allowTLS tlsCAFile: jstests/libs/ca.pem tlsAllowInvalidHostnames: '' diff --git a/buildscripts/resmokeconfig/suites/tenant_migration_terminate_primary_jscore_passthrough.yml b/buildscripts/resmokeconfig/suites/tenant_migration_terminate_primary_jscore_passthrough.yml index 8848be9298c6c..3e6208604364a 100644 --- a/buildscripts/resmokeconfig/suites/tenant_migration_terminate_primary_jscore_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/tenant_migration_terminate_primary_jscore_passthrough.yml @@ -147,9 +147,9 @@ executor: config: shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); - db = connect(TestData.connectionString); + globalThis.db = connect(TestData.connectionString); load('jstests/libs/override_methods/inject_tenant_prefix.js'); load("jstests/libs/override_methods/enable_sessions.js"); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); @@ -185,7 +185,7 @@ executor: - class: ContinuousTenantMigration shell_options: eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/network_error_and_txn_override.js'); load("jstests/libs/override_methods/set_read_and_write_concerns.js"); global_vars: @@ -230,9 +230,6 @@ executor: # so. Therefore, the garbage collection delay doesn't need to be large. tenantMigrationGarbageCollectionDelayMS: 1 ttlMonitorSleepSecs: 1 - # Tenant migrations is not currently compatible with implicitly replicated retryable - # findAndModify images. - storeFindAndModifyImagesInSideCollection: false tlsMode: allowTLS tlsCAFile: jstests/libs/ca.pem tlsAllowInvalidHostnames: '' diff --git a/buildscripts/resmokeconfig/suites/vector_search.yml b/buildscripts/resmokeconfig/suites/vector_search.yml new file mode 100644 index 0000000000000..35174e5a00396 --- /dev/null +++ b/buildscripts/resmokeconfig/suites/vector_search.yml @@ -0,0 +1,10 @@ +test_kind: js_test + +selector: + roots: + - src/mongo/db/modules/*/jstests/vector_search/*.js + +executor: + config: + shell_options: + nodb: '' diff --git a/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml b/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml index 1ff025d34f796..422a1911f1c96 100644 --- a/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml +++ b/buildscripts/resmokeconfig/suites/write_concern_majority_passthrough.yml @@ -46,7 +46,7 @@ executor: TestData: defaultReadConcernLevel: local eval: >- - testingReplication = true; + globalThis.testingReplication = true; load('jstests/libs/override_methods/set_read_and_write_concerns.js'); load('jstests/libs/override_methods/set_read_preference_secondary.js'); hooks: diff --git a/buildscripts/resmokelib/cli.py b/buildscripts/resmokelib/cli.py index 1a72d6e638904..0302272726a8b 100644 --- a/buildscripts/resmokelib/cli.py +++ b/buildscripts/resmokelib/cli.py @@ -4,8 +4,6 @@ import time import os import psutil -from mongo_tooling_metrics.client import get_mongo_metrics_client -from mongo_tooling_metrics.errors import ExternalHostException from mongo_tooling_metrics.lib.top_level_metrics import ResmokeToolingMetrics from buildscripts.resmokelib import parser @@ -25,20 +23,10 @@ def main(argv): usage="Resmoke is MongoDB's correctness testing orchestrator.\n" "For more information, see the help message for each subcommand.\n" "For example: resmoke.py run -h\n" - "Note: bisect and setup-multiversion subcommands have been moved to db-contrib-tool (https://github.com/10gen/db-contrib-tool#readme).\n" + "Note: bisect, setup-multiversion and symbolize subcommands have been moved to db-contrib-tool (https://github.com/10gen/db-contrib-tool#readme).\n" + ) + ResmokeToolingMetrics.register_metrics( + utc_starttime=datetime.utcfromtimestamp(__start_time), + parser=parser.get_parser(), ) - try: - metrics_client = get_mongo_metrics_client() - metrics_client.register_metrics( - ResmokeToolingMetrics, - utc_starttime=datetime.utcfromtimestamp(__start_time), - parser=parser.get_parser(), - ) - except ExternalHostException as _: - pass - except Exception as _: # pylint: disable=broad-except - print( - "This MongoDB Virtual Workstation could not connect to the internal cluster\nThis is a non-issue, but if this message persists feel free to reach out in #server-dev-platform" - ) - subcommand.execute() diff --git a/buildscripts/resmokelib/config.py b/buildscripts/resmokelib/config.py index 7643c05da7ac3..368a1a5df36bd 100644 --- a/buildscripts/resmokelib/config.py +++ b/buildscripts/resmokelib/config.py @@ -57,7 +57,7 @@ "base_port": 20000, "backup_on_restart_dir": None, "buildlogger_url": "https://logkeeper2.build.10gen.cc", - "catalog_shard": None, + "config_shard": None, "continue_on_failure": False, "dbpath_prefix": None, "dbtest_executable": None, @@ -67,6 +67,7 @@ "flow_control_tickets": None, "force_excluded_tests": False, "fuzz_mongod_configs": None, + "fuzz_mongos_configs": None, "config_fuzz_seed": None, "genny_executable": None, "include_with_any_tags": None, @@ -83,6 +84,7 @@ "mrlog": None, "no_journal": False, "num_clients_per_fixture": 1, + "origin_suite": None, "perf_report_file": None, "cedar_report_file": None, "repeat_suites": 1, @@ -300,8 +302,8 @@ def all_options(cls): # actually running them). DRY_RUN = None -# If set, specifies which node is the catalog shard. Can also be set to 'any'. -CATALOG_SHARD = None +# If set, specifies which node is the config shard. Can also be set to 'any'. +CONFIG_SHARD = None # URL to connect to the Evergreen service. EVERGREEN_URL = None @@ -357,7 +359,13 @@ def all_options(cls): # If true, then a test failure or error will cause resmoke.py to exit and not run any more tests. FAIL_FAST = None +# Defines how to fuzz mongod parameters FUZZ_MONGOD_CONFIGS = None + +# Defines how to fuzz mongos parameters +FUZZ_MONGOS_CONFIGS = None + +# This seeds the random number generator used to fuzz mongod and mongos parameters CONFIG_FUZZ_SEED = None # Executable file for genny, passed in as a command line arg. @@ -420,6 +428,9 @@ def all_options(cls): # If set, then each fixture runs tests with the specified number of clients. NUM_CLIENTS_PER_FIXTURE = None +# Indicates the name of the test suite prior to the suite being split up by uite generation +ORIGIN_SUITE = None + # Report file for the Evergreen performance plugin. PERF_REPORT_FILE = None @@ -570,12 +581,13 @@ def all_options(cls): DEFAULT_UNIT_TEST_LIST = "build/unittests.txt" DEFAULT_INTEGRATION_TEST_LIST = "build/integration_tests.txt" DEFAULT_LIBFUZZER_TEST_LIST = "build/libfuzzer_tests.txt" +DEFAULT_PRETTY_PRINTER_TEST_LIST = "build/pretty_printer_tests.txt" # External files or executables, used as suite selectors, that are created during the build and # therefore might not be available when creating a test membership map. EXTERNAL_SUITE_SELECTORS = (DEFAULT_BENCHMARK_TEST_LIST, DEFAULT_UNIT_TEST_LIST, DEFAULT_INTEGRATION_TEST_LIST, DEFAULT_DBTEST_EXECUTABLE, - DEFAULT_LIBFUZZER_TEST_LIST) + DEFAULT_LIBFUZZER_TEST_LIST, DEFAULT_PRETTY_PRINTER_TEST_LIST) # Where to look for logging and suite configuration files CONFIG_DIR = None diff --git a/buildscripts/resmokelib/configure_resmoke.py b/buildscripts/resmokelib/configure_resmoke.py index ce9019c90b8ff..f3d3042476eb0 100644 --- a/buildscripts/resmokelib/configure_resmoke.py +++ b/buildscripts/resmokelib/configure_resmoke.py @@ -14,13 +14,14 @@ import shlex import pymongo.uri_parser +import yaml from buildscripts.idl import gen_all_feature_flag_list from buildscripts.idl.lib import ALL_FEATURE_FLAG_FILE from buildscripts.resmokelib import config as _config from buildscripts.resmokelib import utils -from buildscripts.resmokelib import mongod_fuzzer_configs +from buildscripts.resmokelib import mongo_fuzzer_configs from buildscripts.resmokelib.suitesconfig import SuiteFinder @@ -238,6 +239,11 @@ def setup_feature_flags(): _config.EXCLUDE_WITH_ANY_TAGS.extend( utils.default_if_none(_tags_from_list(config.pop("exclude_with_any_tags")), [])) + force_disabled_flags = yaml.safe_load( + open("buildscripts/resmokeconfig/fully_disabled_feature_flags.yml")) + + _config.EXCLUDE_WITH_ANY_TAGS.extend(force_disabled_flags) + if _config.RUN_NO_FEATURE_FLAG_TESTS: # Don't run any feature flag tests. _config.EXCLUDE_WITH_ANY_TAGS.extend(all_feature_flags) @@ -301,6 +307,7 @@ def _merge_set_params(param_list): _config.MONGOD_SET_PARAMETERS = _merge_set_params(mongod_set_parameters) _config.FUZZ_MONGOD_CONFIGS = config.pop("fuzz_mongod_configs") + _config.FUZZ_MONGOS_CONFIGS = config.pop("fuzz_mongos_configs") _config.CONFIG_FUZZ_SEED = config.pop("config_fuzz_seed") if _config.FUZZ_MONGOD_CONFIGS: @@ -309,7 +316,7 @@ def _merge_set_params(param_list): else: _config.CONFIG_FUZZ_SEED = int(_config.CONFIG_FUZZ_SEED) _config.MONGOD_SET_PARAMETERS, _config.WT_ENGINE_CONFIG, _config.WT_COLL_CONFIG, \ - _config.WT_INDEX_CONFIG = mongod_fuzzer_configs.fuzz_set_parameters( + _config.WT_INDEX_CONFIG = mongo_fuzzer_configs.fuzz_mongod_set_parameters( _config.FUZZ_MONGOD_CONFIGS, _config.CONFIG_FUZZ_SEED, _config.MONGOD_SET_PARAMETERS) _config.EXCLUDE_WITH_ANY_TAGS.extend(["uses_compact"]) _config.EXCLUDE_WITH_ANY_TAGS.extend(["requires_emptycapped"]) @@ -318,6 +325,15 @@ def _merge_set_params(param_list): mongos_set_parameters = config.pop("mongos_set_parameters") _config.MONGOS_SET_PARAMETERS = _merge_set_params(mongos_set_parameters) + if _config.FUZZ_MONGOS_CONFIGS: + if not _config.CONFIG_FUZZ_SEED: + _config.CONFIG_FUZZ_SEED = random.randrange(sys.maxsize) + else: + _config.CONFIG_FUZZ_SEED = int(_config.CONFIG_FUZZ_SEED) + + _config.MONGOS_SET_PARAMETERS = mongo_fuzzer_configs.fuzz_mongos_set_parameters( + _config.FUZZ_MONGOS_CONFIGS, _config.CONFIG_FUZZ_SEED, _config.MONGOS_SET_PARAMETERS) + _config.MONGOCRYPTD_SET_PARAMETERS = _merge_set_params(config.pop("mongocryptd_set_parameters")) _config.MRLOG = config.pop("mrlog") @@ -325,8 +341,9 @@ def _merge_set_params(param_list): _config.NUM_CLIENTS_PER_FIXTURE = config.pop("num_clients_per_fixture") _config.NUM_REPLSET_NODES = config.pop("num_replset_nodes") _config.NUM_SHARDS = config.pop("num_shards") - _config.CATALOG_SHARD = utils.pick_catalog_shard_node( - config.pop("catalog_shard"), _config.NUM_SHARDS) + _config.CONFIG_SHARD = utils.pick_catalog_shard_node( + config.pop("config_shard"), _config.NUM_SHARDS) + _config.ORIGIN_SUITE = config.pop("origin_suite") _config.PERF_REPORT_FILE = config.pop("perf_report_file") _config.CEDAR_REPORT_FILE = config.pop("cedar_report_file") _config.RANDOM_SEED = config.pop("seed") diff --git a/buildscripts/resmokelib/core/programs.py b/buildscripts/resmokelib/core/programs.py index a82383f1958c9..f0de0044f4c4e 100644 --- a/buildscripts/resmokelib/core/programs.py +++ b/buildscripts/resmokelib/core/programs.py @@ -252,8 +252,8 @@ def basename(filepath): test_data["undoRecorderPath"] = config.UNDO_RECORDER_PATH - if "catalogShard" not in test_data and config.CATALOG_SHARD is not None: - test_data["catalogShard"] = config.CATALOG_SHARD + if "configShard" not in test_data and config.CONFIG_SHARD is not None: + test_data["configShard"] = True # There's a periodic background thread that checks for and aborts expired transactions. # "transactionLifetimeLimitSeconds" specifies for how long a transaction can run before expiring @@ -279,7 +279,7 @@ def basename(filepath): eval_sb.append(str(kwargs.pop("eval"))) # Load a callback to check that the cluster-wide metadata is consistent. - eval_sb.append("load('jstests/libs/override_methods/check_metadata_consistency.js');") + eval_sb.append("await import('jstests/libs/override_methods/check_metadata_consistency.js');") # Load this file to allow a callback to validate collections before shutting down mongod. eval_sb.append("load('jstests/libs/override_methods/validate_collections_on_shutdown.js');") @@ -297,7 +297,8 @@ def basename(filepath): # Load a callback to check that the info stored in config.collections and config.chunks is # semantically correct before shutting down a ShardingTest. - eval_sb.append("load('jstests/libs/override_methods/check_routing_table_consistency.js');") + eval_sb.append( + "await import('jstests/libs/override_methods/check_routing_table_consistency.js');") # Load a callback to check that all shards have correct filtering information before shutting # down a ShardingTest. @@ -454,6 +455,11 @@ def _set_keyfile_permissions(opts): We can't permanently set the keyfile permissions because git is not aware of them. """ + for keysuffix in ["1", "2", "ForRollover"]: + keyfile = "jstests/libs/key%s" % keysuffix + if os.path.exists(keyfile): + os.chmod(keyfile, stat.S_IRUSR | stat.S_IWUSR) + if "keyFile" in opts: os.chmod(opts["keyFile"], stat.S_IRUSR | stat.S_IWUSR) if "encryptionKeyFile" in opts: diff --git a/buildscripts/resmokelib/errors.py b/buildscripts/resmokelib/errors.py index f323782ca9bc3..d95cb4ef7911f 100644 --- a/buildscripts/resmokelib/errors.py +++ b/buildscripts/resmokelib/errors.py @@ -86,3 +86,9 @@ class InvalidMatrixSuiteError(ResmokeError): """Exception raised when validating a matrix suite mapping file.""" pass + + +class TagFileDoesNotExistError(ResmokeError): + """Exception raised when a tag file is passed into resmoke that does not exist.""" + + pass diff --git a/buildscripts/resmokelib/flags.py b/buildscripts/resmokelib/flags.py new file mode 100644 index 0000000000000..6aff696166684 --- /dev/null +++ b/buildscripts/resmokelib/flags.py @@ -0,0 +1,5 @@ +"""Global flags used by resmoke.""" + +import threading + +HANG_ANALYZER_CALLED = threading.Event() diff --git a/buildscripts/resmokelib/hang_analyzer/extractor.py b/buildscripts/resmokelib/hang_analyzer/extractor.py index 2e8870064aa03..23fc02f431d49 100644 --- a/buildscripts/resmokelib/hang_analyzer/extractor.py +++ b/buildscripts/resmokelib/hang_analyzer/extractor.py @@ -7,19 +7,22 @@ from buildscripts.resmokelib.setup_multiversion.download import DownloadError from buildscripts.resmokelib.run import compare_start_time from buildscripts.resmokelib.utils.filesystem import build_hygienic_bin_path +from buildscripts.resmokelib.symbolizer import Symbolizer _DEBUG_FILE_BASE_NAMES = ['mongo', 'mongod', 'mongos'] -def download_debug_symbols(root_logger, symbolizer): +def download_debug_symbols(root_logger, symbolizer: Symbolizer, retry_secs: int = 10, + download_timeout_secs: int = 10 * 60): """ Extract debug symbols. Idempotent. :param root_logger: logger to use :param symbolizer: pre-configured instance of symbolizer for downloading symbols. + :param retry_secs: seconds before retrying to download symbols + :param download_timeout_secs: timeout in seconds before failing to download :return: None """ - retry_secs = 10 # Check if the files are already there. They would be on *SAN builds. sym_files = _get_symbol_files() @@ -32,17 +35,18 @@ def download_debug_symbols(root_logger, symbolizer): while True: try: symbolizer.execute() + root_logger.info("Debug symbols successfully downloaded") break except (tarfile.ReadError, DownloadError): - root_logger.info("Debug symbols unavailable after %s secs, retrying in %s secs", - compare_start_time(time.time()), retry_secs) + root_logger.warn( + "Debug symbols unavailable after %s secs, retrying in %s secs, waiting for a total of %s secs", + compare_start_time(time.time()), retry_secs, download_timeout_secs) time.sleep(retry_secs) - ten_min = 10 * 60 - if compare_start_time(time.time()) > ten_min: - root_logger.info( + if compare_start_time(time.time()) > download_timeout_secs: + root_logger.warn( 'Debug-symbols archive-file does not exist after %s secs; ' - 'Hang-Analyzer may not complete successfully.', ten_min) + 'Hang-Analyzer may not complete successfully.', download_timeout_secs) break diff --git a/buildscripts/resmokelib/hang_analyzer/hang_analyzer.py b/buildscripts/resmokelib/hang_analyzer/hang_analyzer.py index 190aa9824da78..28dc4248467c0 100755 --- a/buildscripts/resmokelib/hang_analyzer/hang_analyzer.py +++ b/buildscripts/resmokelib/hang_analyzer/hang_analyzer.py @@ -68,6 +68,12 @@ def configure_task_id(): self._configure_processes() self._setup_logging(logger) + def kill_rogue_processes(self): + """Kill any processes that are currently being analyzed.""" + processes = process_list.get_processes(self.process_ids, self.interesting_processes, + self.options.process_match, self.root_logger) + process.teardown_processes(self.root_logger, processes, dump_pids={}) + def execute(self): """ Execute hang analysis. @@ -193,13 +199,12 @@ def _configure_processes(self): def _setup_logging(self, logger): if logger is None: self.root_logger = logging.Logger("hang_analyzer", level=logging.DEBUG) + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter(logging.Formatter(fmt="%(message)s")) + self.root_logger.addHandler(handler) else: self.root_logger = logger - handler = logging.StreamHandler(sys.stdout) - handler.setFormatter(logging.Formatter(fmt="%(message)s")) - self.root_logger.addHandler(handler) - def _log_system_info(self): self.root_logger.info("Python Version: %s", sys.version) self.root_logger.info("OS: %s", platform.platform()) diff --git a/buildscripts/resmokelib/mongo_fuzzer_configs.py b/buildscripts/resmokelib/mongo_fuzzer_configs.py new file mode 100644 index 0000000000000..2e91f9e76e0e7 --- /dev/null +++ b/buildscripts/resmokelib/mongo_fuzzer_configs.py @@ -0,0 +1,191 @@ +"""Generator functions for all parameters that we fuzz when invoked with --fuzzMongodConfigs.""" + +import random +from buildscripts.resmokelib import utils + + +def generate_eviction_configs(rng, mode): + """Generate random configurations for wiredTigerEngineConfigString parameter.""" + eviction_checkpoint_target = rng.randint(1, 99) + eviction_target = rng.randint(50, 95) + eviction_trigger = rng.randint(eviction_target + 1, 99) + + # Fuzz eviction_dirty_target and trigger both as relative and absolute values + target_bytes_min = 50 * 1024 * 1024 # 50MB # 5% of 1GB default cache size on Evergreen + target_bytes_max = 256 * 1024 * 1024 # 256MB # 1GB default cache size on Evergreen + eviction_dirty_target = rng.choice( + [rng.randint(5, 50), rng.randint(target_bytes_min, target_bytes_max)]) + trigger_max = 75 if eviction_dirty_target <= 50 else target_bytes_max + eviction_dirty_trigger = rng.randint(eviction_dirty_target + 1, trigger_max) + + assert eviction_dirty_trigger > eviction_dirty_target + assert eviction_dirty_trigger <= trigger_max + + # Fuzz eviction_updates_target and eviction_updates_trigger. These are by default half the + # values of the corresponding eviction dirty target and trigger. They need to stay less than the + # dirty equivalents. The default updates target is 2.5% of the cache, so let's start fuzzing + # from 2%. + updates_target_min = 2 if eviction_dirty_target <= 100 else 20 * 1024 * 1024 # 2% of 1GB cache + eviction_updates_target = rng.randint(updates_target_min, eviction_dirty_target - 1) + eviction_updates_trigger = rng.randint(eviction_updates_target + 1, eviction_dirty_trigger - 1) + + # Fuzz File manager settings + close_idle_time_secs = rng.randint(1, 100) + close_handle_minimum = rng.randint(0, 1000) + close_scan_interval = rng.randint(1, 100) + + # The debug_mode for WiredTiger offers some settings to change internal behavior that could help + # find bugs. Settings to fuzz: + # eviction - Turns aggressive eviction on/off + # realloc_exact - Finds more memory bugs by allocating the memory for the exact size asked + # rollback_error - Forces WiredTiger to return a rollback error every Nth call + # slow_checkpoint - Adds internal delays in processing internal leaf pages during a checkpoint + dbg_eviction = rng.choice(['true', 'false']) + dbg_realloc_exact = rng.choice(['true', 'false']) + # Rollback every Nth transaction. The values have been tuned after looking at how many + # WiredTiger transactions happen per second for the config-fuzzed jstests. + # The setting is trigerring bugs, disabled until they get resolved. + # dbg_rollback_error = rng.choice([0, rng.randint(250, 1500)]) + dbg_rollback_error = 0 + dbg_slow_checkpoint = 'false' if mode != 'stress' else rng.choice(['true', 'false']) + + return "debug_mode=(eviction={0},realloc_exact={1},rollback_error={2}, slow_checkpoint={3}),"\ + "eviction_checkpoint_target={4},eviction_dirty_target={5},eviction_dirty_trigger={6},"\ + "eviction_target={7},eviction_trigger={8},eviction_updates_target={9},"\ + "eviction_updates_trigger={10},file_manager=(close_handle_minimum={11},"\ + "close_idle_time={12},close_scan_interval={13})".format(dbg_eviction, + dbg_realloc_exact, + dbg_rollback_error, + dbg_slow_checkpoint, + eviction_checkpoint_target, + eviction_dirty_target, + eviction_dirty_trigger, + eviction_target, + eviction_trigger, + eviction_updates_target, + eviction_updates_trigger, + close_handle_minimum, + close_idle_time_secs, + close_scan_interval) + + +def generate_table_configs(rng): + """Generate random configurations for WiredTiger tables.""" + + internal_page_max = rng.choice([4, 8, 12, 1024, 10 * 1024]) * 1024 + leaf_page_max = rng.choice([4, 8, 12, 1024, 10 * 1024]) * 1024 + leaf_value_max = rng.choice([1, 32, 128, 256]) * 1024 * 1024 + + memory_page_max_lower_bound = leaf_page_max + # Assume WT cache size of 1GB as most MDB tests specify this as the cache size. + memory_page_max_upper_bound = round( + (rng.randint(256, 1024) * 1024 * 1024) / 10) # cache_size / 10 + memory_page_max = rng.randint(memory_page_max_lower_bound, memory_page_max_upper_bound) + + split_pct = rng.choice([50, 60, 75, 100]) + prefix_compression = rng.choice(["true", "false"]) + block_compressor = rng.choice(["none", "snappy", "zlib", "zstd"]) + + return "block_compressor={0},internal_page_max={1},leaf_page_max={2},leaf_value_max={3},"\ + "memory_page_max={4},prefix_compression={5},split_pct={6}".format(block_compressor, + internal_page_max, + leaf_page_max, + leaf_value_max, + memory_page_max, + prefix_compression, + split_pct) + + +def generate_flow_control_parameters(rng): + """Generate parameters related to flow control and returns a dictionary.""" + configs = {} + configs["enableFlowControl"] = rng.choice([True, False]) + if not configs["enableFlowControl"]: + return configs + + configs["flowControlTargetLagSeconds"] = rng.randint(1, 1000) + configs["flowControlThresholdLagPercentage"] = rng.random() + configs["flowControlMaxSamples"] = rng.randint(1, 1000 * 1000) + configs["flowControlSamplePeriod"] = rng.randint(1, 1000 * 1000) + configs["flowControlMinTicketsPerSecond"] = rng.randint(1, 10 * 1000) + + return configs + + +def generate_mongod_parameters(rng, mode): + """Return a dictionary with values for each mongod parameter.""" + ret = {} + ret["analyzeShardKeySplitPointExpirationSecs"] = rng.randint(1, 300) + ret["chunkMigrationConcurrency"] = rng.choice([1, 4, 16]) + ret["disableLogicalSessionCacheRefresh"] = rng.choice([True, False]) + ret["initialServiceExecutorUseDedicatedThread"] = rng.choice([True, False]) + # TODO (SERVER-75632): Uncomment this to enable passthrough testing. + # ret["lockCodeSegmentsInMemory"] = rng.choice([True, False]) + if not ret["disableLogicalSessionCacheRefresh"]: + ret["logicalSessionRefreshMillis"] = rng.choice([100, 1000, 10000, 100000]) + ret["maxNumberOfTransactionOperationsInSingleOplogEntry"] = rng.randint(1, 10) * rng.choice( + [1, 10, 100]) + ret["minSnapshotHistoryWindowInSeconds"] = rng.choice([300, rng.randint(30, 600)]) + ret["mirrorReads"] = {"samplingRate": rng.random()} + ret["queryAnalysisWriterMaxMemoryUsageBytes"] = rng.randint(1, 100) * 1024 * 1024 + ret["syncdelay"] = rng.choice([60, rng.randint(15, 180)]) + ret["wiredTigerCursorCacheSize"] = rng.randint(-100, 100) + ret["wiredTigerSessionCloseIdleTimeSecs"] = rng.randint(0, 300) + ret["storageEngineConcurrencyAdjustmentAlgorithm"] = rng.choices( + ["throughputProbing", "fixedConcurrentTransactions"], weights=[10, 1])[0] + ret["throughputProbingStepMultiple"] = rng.uniform(0.1, 0.5) + ret["throughputProbingInitialConcurrency"] = rng.randint(4, 128) + ret["throughputProbingMinConcurrency"] = rng.randint(4, + ret["throughputProbingInitialConcurrency"]) + ret["throughputProbingMaxConcurrency"] = rng.randint(ret["throughputProbingInitialConcurrency"], + 128) + ret["throughputProbingReadWriteRatio"] = rng.uniform(0, 1) + ret["throughputProbingConcurrencyMovingAverageWeight"] = 1 - rng.random() + + ret["wiredTigerConcurrentWriteTransactions"] = rng.randint(5, 32) + ret["wiredTigerConcurrentReadTransactions"] = rng.randint(5, 32) + ret["wiredTigerStressConfig"] = False if mode != 'stress' else rng.choice([True, False]) + + # We need a higher timeout to account for test slowness + ret["receiveChunkWaitForRangeDeleterTimeoutMS"] = 300000 + return ret + + +def generate_mongos_parameters(rng, mode): + """Return a dictionary with values for each mongos parameter.""" + ret = {} + ret["initialServiceExecutorUseDedicatedThread"] = rng.choice([True, False]) + ret["opportunisticSecondaryTargeting"] = rng.choice([True, False]) + return ret + + +def fuzz_mongod_set_parameters(mode, seed, user_provided_params): + """Randomly generate mongod configurations and wiredTigerConnectionString.""" + rng = random.Random(seed) + + ret = {} + params = [generate_flow_control_parameters(rng), generate_mongod_parameters(rng, mode)] + for dct in params: + for key, value in dct.items(): + ret[key] = value + + for key, value in utils.load_yaml(user_provided_params).items(): + ret[key] = value + + return utils.dump_yaml(ret), generate_eviction_configs(rng, mode), generate_table_configs(rng), \ + generate_table_configs(rng) + + +def fuzz_mongos_set_parameters(mode, seed, user_provided_params): + """Randomly generate mongos configurations.""" + rng = random.Random(seed) + + ret = {} + params = generate_mongos_parameters(rng, mode) + for key, value in params.items(): + ret[key] = value + + for key, value in utils.load_yaml(user_provided_params).items(): + ret[key] = value + + return utils.dump_yaml(ret) diff --git a/buildscripts/resmokelib/mongod_fuzzer_configs.py b/buildscripts/resmokelib/mongod_fuzzer_configs.py deleted file mode 100644 index be00b89931d2c..0000000000000 --- a/buildscripts/resmokelib/mongod_fuzzer_configs.py +++ /dev/null @@ -1,150 +0,0 @@ -"""Generator functions for all parameters that we fuzz when invoked with --fuzzMongodConfigs.""" - -import random -from buildscripts.resmokelib import utils - - -def generate_eviction_configs(rng, mode): - """Generate random configurations for wiredTigerEngineConfigString parameter.""" - eviction_checkpoint_target = rng.randint(1, 99) - eviction_target = rng.randint(50, 95) - eviction_trigger = rng.randint(eviction_target + 1, 99) - - # Fuzz eviction_dirty_target and trigger both as relative and absolute values - target_bytes_min = 50 * 1024 * 1024 # 50MB # 5% of 1GB default cache size on Evergreen - target_bytes_max = 256 * 1024 * 1024 # 256MB # 1GB default cache size on Evergreen - eviction_dirty_target = rng.choice( - [rng.randint(5, 50), rng.randint(target_bytes_min, target_bytes_max)]) - trigger_max = 75 if eviction_dirty_target <= 50 else target_bytes_max - eviction_dirty_trigger = rng.randint(eviction_dirty_target + 1, trigger_max) - - assert eviction_dirty_trigger > eviction_dirty_target - assert eviction_dirty_trigger <= trigger_max - - # Fuzz eviction_updates_target and eviction_updates_trigger. These are by default half the - # values of the corresponding eviction dirty target and trigger. They need to stay less than the - # dirty equivalents. The default updates target is 2.5% of the cache, so let's start fuzzing - # from 2%. - updates_target_min = 2 if eviction_dirty_target <= 100 else 20 * 1024 * 1024 # 2% of 1GB cache - eviction_updates_target = rng.randint(updates_target_min, eviction_dirty_target - 1) - eviction_updates_trigger = rng.randint(eviction_updates_target + 1, eviction_dirty_trigger - 1) - - # Fuzz File manager settings - close_idle_time_secs = rng.randint(1, 100) - close_handle_minimum = rng.randint(0, 1000) - close_scan_interval = rng.randint(1, 100) - - # The debug_mode for WiredTiger offers some settings to change internal behavior that could help - # find bugs. Settings to fuzz: - # eviction - Turns aggressive eviction on/off - # realloc_exact - Finds more memory bugs by allocating the memory for the exact size asked - # rollback_error - Forces WiredTiger to return a rollback error every Nth call - # slow_checkpoint - Adds internal delays in processing internal leaf pages during a checkpoint - dbg_eviction = rng.choice(['true', 'false']) - dbg_realloc_exact = rng.choice(['true', 'false']) - # Rollback every Nth transaction. The values have been tuned after looking at how many - # WiredTiger transactions happen per second for the config-fuzzed jstests. - # The setting is trigerring bugs, disabled until they get resolved. - # dbg_rollback_error = rng.choice([0, rng.randint(250, 1500)]) - dbg_rollback_error = 0 - dbg_slow_checkpoint = 'false' if mode != 'stress' else rng.choice(['true', 'false']) - - return "debug_mode=(eviction={0},realloc_exact={1},rollback_error={2}, slow_checkpoint={3}),"\ - "eviction_checkpoint_target={4},eviction_dirty_target={5},eviction_dirty_trigger={6},"\ - "eviction_target={7},eviction_trigger={8},eviction_updates_target={9},"\ - "eviction_updates_trigger={10},file_manager=(close_handle_minimum={11},"\ - "close_idle_time={12},close_scan_interval={13})".format(dbg_eviction, - dbg_realloc_exact, - dbg_rollback_error, - dbg_slow_checkpoint, - eviction_checkpoint_target, - eviction_dirty_target, - eviction_dirty_trigger, - eviction_target, - eviction_trigger, - eviction_updates_target, - eviction_updates_trigger, - close_handle_minimum, - close_idle_time_secs, - close_scan_interval) - - -def generate_table_configs(rng): - """Generate random configurations for WiredTiger tables.""" - - internal_page_max = rng.choice([4, 8, 12, 1024, 10 * 1024]) * 1024 - leaf_page_max = rng.choice([4, 8, 12, 1024, 10 * 1024]) * 1024 - leaf_value_max = rng.choice([1, 32, 128, 256]) * 1024 * 1024 - - memory_page_max_lower_bound = leaf_page_max - # Assume WT cache size of 1GB as most MDB tests specify this as the cache size. - memory_page_max_upper_bound = round( - (rng.randint(256, 1024) * 1024 * 1024) / 10) # cache_size / 10 - memory_page_max = rng.randint(memory_page_max_lower_bound, memory_page_max_upper_bound) - - split_pct = rng.choice([50, 60, 75, 100]) - prefix_compression = rng.choice(["true", "false"]) - block_compressor = rng.choice(["none", "snappy", "zlib", "zstd"]) - - return "block_compressor={0},internal_page_max={1},leaf_page_max={2},leaf_value_max={3},"\ - "memory_page_max={4},prefix_compression={5},split_pct={6}".format(block_compressor, - internal_page_max, - leaf_page_max, - leaf_value_max, - memory_page_max, - prefix_compression, - split_pct) - - -def generate_flow_control_parameters(rng): - """Generate parameters related to flow control and returns a dictionary.""" - configs = {} - configs["enableFlowControl"] = rng.choice([True, False]) - if not configs["enableFlowControl"]: - return configs - - configs["flowControlTargetLagSeconds"] = rng.randint(1, 1000) - configs["flowControlThresholdLagPercentage"] = rng.random() - configs["flowControlMaxSamples"] = rng.randint(1, 1000 * 1000) - configs["flowControlSamplePeriod"] = rng.randint(1, 1000 * 1000) - configs["flowControlMinTicketsPerSecond"] = rng.randint(1, 10 * 1000) - - return configs - - -def generate_independent_parameters(rng, mode): - """Return a dictionary with values for each independent parameter.""" - ret = {} - ret["wiredTigerCursorCacheSize"] = rng.randint(-100, 100) - ret["wiredTigerSessionCloseIdleTimeSecs"] = rng.randint(0, 300) - ret["storageEngineConcurrencyAdjustmentAlgorithm"] = "" - ret["wiredTigerConcurrentWriteTransactions"] = rng.randint(5, 32) - ret["wiredTigerConcurrentReadTransactions"] = rng.randint(5, 32) - ret["wiredTigerStressConfig"] = False if mode != 'stress' else rng.choice([True, False]) - if rng.choice(3 * [True] + [False]): - # The old retryable writes format is used by other variants. Weight towards turning on the - # new retryable writes format on in this one. - ret["storeFindAndModifyImagesInSideCollection"] = True - ret["syncdelay"] = rng.choice([60, rng.randint(15, 180)]) - ret["minSnapshotHistoryWindowInSeconds"] = rng.choice([300, rng.randint(5, 600)]) - # TODO (SERVER-75632): Uncomment this to enable passthrough testing. - # ret["lockCodeSegmentsInMemory"] = rng.choice([True, False]) - - return ret - - -def fuzz_set_parameters(mode, seed, user_provided_params): - """Randomly generate mongod configurations and wiredTigerConnectionString.""" - rng = random.Random(seed) - - ret = {} - params = [generate_flow_control_parameters(rng), generate_independent_parameters(rng, mode)] - for dct in params: - for key, value in dct.items(): - ret[key] = value - - for key, value in utils.load_yaml(user_provided_params).items(): - ret[key] = value - - return utils.dump_yaml(ret), generate_eviction_configs(rng, mode), generate_table_configs(rng), \ - generate_table_configs(rng) diff --git a/buildscripts/resmokelib/multiversionconstants.py b/buildscripts/resmokelib/multiversionconstants.py index 6d499728a337c..57dbbc349150d 100644 --- a/buildscripts/resmokelib/multiversionconstants.py +++ b/buildscripts/resmokelib/multiversionconstants.py @@ -2,8 +2,10 @@ import os import shutil from subprocess import DEVNULL, STDOUT, CalledProcessError, call, check_output +import requests import structlog +import buildscripts.resmokelib.config as _config from buildscripts.resmokelib.multiversion.multiversion_service import ( MongoReleases, MongoVersion, MultiversionService, MONGO_VERSION_YAML, RELEASES_YAML) @@ -13,6 +15,11 @@ LAST_LTS = "last_lts" LAST_CONTINUOUS = "last_continuous" +# We use the "releases.yml" file from "master" because it is guaranteed to be up-to-date +# with the latest EOL versions. If a "last-continuous" version is EOL, we don't include +# it in the multiversion config and therefore don't test against it. +MASTER_RELEASES_FILE = "https://raw.githubusercontent.com/mongodb/mongo/master/src/mongo/util/version/releases.yml" + LOGGER = structlog.getLogger(__name__) @@ -32,15 +39,20 @@ def generate_mongo_version_file(): def generate_releases_file(): """Generate the releases constants file.""" - # Copy the 'releases.yml' file from the source tree. - releases_yaml_path = os.path.join("src", "mongo", "util", "version", "releases.yml") - if not os.path.isfile(releases_yaml_path): - LOGGER.info( - 'Skipping yml file generation because file .resmoke_mongo_release_values.yml does not exist at path {}.' - .format(releases_yaml_path)) - return - - shutil.copyfile(releases_yaml_path, RELEASES_YAML) + try: + # Get the latest releases.yml from github + with open(RELEASES_YAML, "wb") as file: + file.write(requests.get(MASTER_RELEASES_FILE).content) + except Exception as exc: + LOGGER.warning(f"Could not get releases.yml file: {MASTER_RELEASES_FILE}") + + # If this fails in CI we want to be aware and fix this + if _config.EVERGREEN_TASK_ID: + raise exc + + # Fallback to the current releases.yml + releases_yaml_path = os.path.join("src", "mongo", "util", "version", "releases.yml") + shutil.copyfile(releases_yaml_path, RELEASES_YAML) def in_git_root_dir(): diff --git a/buildscripts/resmokelib/parser.py b/buildscripts/resmokelib/parser.py index e09b76ec29288..80e647a9906c3 100644 --- a/buildscripts/resmokelib/parser.py +++ b/buildscripts/resmokelib/parser.py @@ -11,7 +11,6 @@ from buildscripts.resmokelib.multiversion import MultiversionPlugin from buildscripts.resmokelib.powercycle import PowercyclePlugin from buildscripts.resmokelib.run import RunPlugin -from buildscripts.resmokelib.symbolizer import SymbolizerPlugin from buildscripts.resmokelib.undodb import UndoDbPlugin _PLUGINS = [ @@ -19,7 +18,6 @@ HangAnalyzerPlugin(), UndoDbPlugin(), PowercyclePlugin(), - SymbolizerPlugin(), GenerateFCVConstantsPlugin(), DiscoveryPlugin(), MultiversionPlugin(), diff --git a/buildscripts/resmokelib/run/__init__.py b/buildscripts/resmokelib/run/__init__.py index 2afd9aa64e1a2..76b3dde383258 100644 --- a/buildscripts/resmokelib/run/__init__.py +++ b/buildscripts/resmokelib/run/__init__.py @@ -15,6 +15,7 @@ import pkg_resources import psutil +from buildscripts.ciconfig.evergreen import parse_evergreen_file from buildscripts.resmokelib import parser as main_parser from buildscripts.resmokelib import config from buildscripts.resmokelib import configure_resmoke @@ -31,6 +32,7 @@ from buildscripts.resmokelib.run import list_tags from buildscripts.resmokelib.run.runtime_recorder import compare_start_time from buildscripts.resmokelib.suitesconfig import get_suite_files +from buildscripts.resmokelib.utils.dictionary import get_dict_value _INTERNAL_OPTIONS_TITLE = "Internal Options" _MONGODB_SERVER_OPTIONS_TITLE = "MongoDB Server Options" @@ -259,21 +261,81 @@ def _run_suite(self, suite): def _log_local_resmoke_invocation(self): """Log local resmoke invocation example.""" + + # Do not log local args if this is not being ran in evergreen + if not config.EVERGREEN_TASK_ID: + print("Skipping local invocation because evergreen task id was not provided.") + return + + evg_conf = parse_evergreen_file("etc/evergreen.yml") + + suite = self._get_suites()[0] + suite_name = config.ORIGIN_SUITE or suite.get_name() + + # try to find the evergreen task from the resmoke suite name + task = evg_conf.get_task(suite_name) or evg_conf.get_task(f"{suite_name}_gen") + + multiversion_bin_version = None + # Some evergreen task names do not reflect what suite names they run. + # The suite names should be in the evergreen functions in this case + if task is None: + for current_task in evg_conf.tasks: + func = current_task.find_func_command("run tests") \ + or current_task.find_func_command("generate resmoke tasks") + if func and get_dict_value(func, ["vars", "suite"]) == suite_name: + task = current_task + break + + func = current_task.find_func_command("initialize multiversion tasks") + if not func: + continue + for subtask in func["vars"]: + if subtask == suite_name: + task = current_task + multiversion_bin_version = func["vars"][subtask] + break + + if task: + break + + if task is None: + raise RuntimeError(f"Error: Could not find evergreen task definition for {suite_name}") + + is_multiversion = "multiversion" in task.tags + generate_func = task.find_func_command("generate resmoke tasks") + is_jstestfuzz = False + if generate_func: + is_jstestfuzz = get_dict_value(generate_func, ["vars", "is_jstestfuzz"]) == "true" + local_args = to_local_args() + local_args = strip_fuzz_config_params(local_args) local_resmoke_invocation = ( f"{os.path.join('buildscripts', 'resmoke.py')} {' '.join(local_args)}") + using_config_fuzzer = False if config.FUZZ_MONGOD_CONFIGS: - local_args = strip_fuzz_config_params(local_args) - local_resmoke_invocation = ( - f"{os.path.join('buildscripts', 'resmoke.py')} {' '.join(local_args)}" - f" --fuzzMongodConfigs={config.FUZZ_MONGOD_CONFIGS} --configFuzzSeed={str(config.CONFIG_FUZZ_SEED)}" - ) + using_config_fuzzer = True + local_resmoke_invocation += f" --fuzzMongodConfigs={config.FUZZ_MONGOD_CONFIGS}" self._resmoke_logger.info("Fuzzed mongodSetParameters:\n%s", config.MONGOD_SET_PARAMETERS) self._resmoke_logger.info("Fuzzed wiredTigerConnectionString: %s", config.WT_ENGINE_CONFIG) + + if config.FUZZ_MONGOS_CONFIGS: + using_config_fuzzer = True + local_resmoke_invocation += f" --fuzzMongosConfigs={config.FUZZ_MONGOS_CONFIGS}" + + self._resmoke_logger.info("Fuzzed mongosSetParameters:\n%s", + config.MONGOS_SET_PARAMETERS) + + if using_config_fuzzer: + local_resmoke_invocation += f" --configFuzzSeed={str(config.CONFIG_FUZZ_SEED)}" + + if multiversion_bin_version: + default_tag_file = config.DEFAULTS["exclude_tags_file_path"] + local_resmoke_invocation += f" --tagFile={default_tag_file}" + resmoke_env_options = '' if os.path.exists('resmoke_env_options.txt'): with open('resmoke_env_options.txt') as fin: @@ -282,26 +344,60 @@ def _log_local_resmoke_invocation(self): self._resmoke_logger.info("resmoke.py invocation for local usage: %s %s", resmoke_env_options, local_resmoke_invocation) - suite = self._get_suites()[0] + lines = [] + + if is_multiversion: + lines.append("# DISCLAIMER:") + lines.append( + "# The `db-contrib-tool` command downloads the latest last-continuous/lts mongo shell binaries available in CI." + ) + if multiversion_bin_version: + lines.append( + "# The generated `multiversion_exclude_tags.yml` is dependent on the `backports_required_for_multiversion_tests.yml` file of the last-continuous/lts mongo shell binary git commit." + ) + lines.append( + "# If there have been new commits to last-continuous/lts, the excluded tests & binaries may be slightly different on this task vs locally." + ) + if is_jstestfuzz: + lines.append( + "# This is a jstestfuzz suite and is dependent on the generated tests specific to this task execution." + ) + if suite.get_description(): - self._resmoke_logger.info("'%s' suite description:\n\n%s\n", suite.get_name(), - suite.get_description()) + lines.append(f"# {suite.get_description()}") - if suite.is_matrix_suite(): - self._resmoke_logger.info( - "This suite is a matrix suite. To view the generated matrix suite run python3 ./buildscripts/resmoke.py suiteconfig %s", - suite.get_name()) - - if config.EVERGREEN_TASK_ID: - with open("local-resmoke-invocation.txt", "w") as fh: - lines = [f"{resmoke_env_options} {local_resmoke_invocation}"] - if suite.get_description(): - lines.append(f"{suite.get_name()}: {suite.get_description()}") - if suite.is_matrix_suite(): - lines.append( - f"This suite is a matrix suite. To view the generated matrix suite run python3 ./buildscripts/resmoke.py suiteconfig {suite.get_name()}" - ) - fh.write("\n".join(lines)) + lines.append( + "# Having trouble reproducing your failure with this? Feel free to reach out in #server-testing." + ) + lines.append("") + if is_multiversion: + if not os.path.exists("local-db-contrib-tool-invocation.txt"): + raise RuntimeError( + "ERROR: local-db-contrib-tool-invocation.txt does not exist for multiversion task" + ) + + with open("local-db-contrib-tool-invocation.txt", "r") as fh: + db_contrib_tool_invocation = fh.read().strip() + " && \\" + lines.append(db_contrib_tool_invocation) + + if multiversion_bin_version: + generate_tag_file_invocation = f"buildscripts/resmoke.py generate-multiversion-exclude-tags --oldBinVersion={multiversion_bin_version} && \\" + lines.append(generate_tag_file_invocation) + + if is_jstestfuzz: + download_url = f"https://mciuploads.s3.amazonaws.com/{config.EVERGREEN_PROJECT_NAME}/{config.EVERGREEN_VARIANT_NAME}/{config.EVERGREEN_REVISION}/jstestfuzz/{config.EVERGREEN_TASK_ID}-{config.EVERGREEN_EXECUTION}.tgz" + jstestfuzz_dir = "jstestfuzz/" + jstests_tar = "jstests.tgz" + lines.append(f"mkdir -p {jstestfuzz_dir} && \\") + lines.append(f"rm -rf {jstestfuzz_dir}* && \\") + lines.append(f"wget '{download_url}' -O {jstests_tar} && \\") + lines.append(f"tar -xf {jstests_tar} -C {jstestfuzz_dir} && \\") + lines.append(f"rm {jstests_tar} && \\") + + lines.append(local_resmoke_invocation) + + with open("local-resmoke-invocation.txt", "w") as fh: + fh.write("\n".join(lines)) def _check_for_mongo_processes(self): """Check for existing mongo processes as they could interfere with running the tests.""" @@ -442,6 +538,9 @@ def _get_suites(self): self._resmoke_logger.error("Failed to parse YAML suite definition: %s", str(err)) self.list_suites() self.exit(1) + except errors.InvalidMatrixSuiteError as err: + self._resmoke_logger.error("Failed to get matrix suite: %s", str(err)) + self.exit(1) except errors.ResmokeError as err: self._resmoke_logger.error( "Cannot run excluded test in suite config. Use '--force-excluded-tests' to override: %s", @@ -850,6 +949,9 @@ def _add_run(cls, subparsers): parser.add_argument("--maxTestQueueSize", type=int, dest="max_test_queue_size", help=argparse.SUPPRESS) + parser.add_argument("--tagFile", action="append", dest="tag_files", metavar="TAG_FILES", + help="One or more YAML files that associate tests and tags.") + mongodb_server_options = parser.add_argument_group( title=_MONGODB_SERVER_OPTIONS_TITLE, description=("Options related to starting a MongoDB cluster that are forwarded from" @@ -933,18 +1035,23 @@ def _add_run(cls, subparsers): mongodb_server_options.add_argument( "--fuzzMongodConfigs", dest="fuzz_mongod_configs", - help="Randomly chooses server parameters that were not specified. Use 'stress' to fuzz " + help="Randomly chooses mongod parameters that were not specified. Use 'stress' to fuzz " "all configs including stressful storage configurations that may significantly " "slow down the server. Use 'normal' to only fuzz non-stressful configurations. ", metavar="MODE", choices=('normal', 'stress')) - mongodb_server_options.add_argument("--configFuzzSeed", dest="config_fuzz_seed", - metavar="PATH", - help="Sets the seed used by storage config fuzzer") + mongodb_server_options.add_argument( + "--fuzzMongosConfigs", dest="fuzz_mongos_configs", + help="Randomly chooses mongos parameters that were not specified", metavar="MODE", + choices=('normal', )) + + mongodb_server_options.add_argument( + "--configFuzzSeed", dest="config_fuzz_seed", metavar="PATH", + help="Sets the seed used by mongod and mongos config fuzzers") mongodb_server_options.add_argument( - "--catalogShard", dest="catalog_shard", metavar="CONFIG", - help="If set, specifies which node is the catalog shard. Can also be set to 'any'.") + "--configShard", dest="config_shard", metavar="CONFIG", + help="If set, specifies which node is the config shard. Can also be set to 'any'.") internal_options = parser.add_argument_group( title=_INTERNAL_OPTIONS_TITLE, @@ -1062,10 +1169,6 @@ def _add_run(cls, subparsers): metavar="REVISION_ORDER_ID", help="Sets the chronological order number of this commit.") - evergreen_options.add_argument("--tagFile", action="append", dest="tag_files", - metavar="TAG_FILES", - help="One or more YAML files that associate tests and tags.") - evergreen_options.add_argument( "--taskName", dest="task_name", metavar="TASK_NAME", help="Sets the name of the Evergreen task running the tests.") @@ -1208,6 +1311,9 @@ def to_local_args(input_args=None): run_parser = command_subparser.choices.get("run") + # arguments that are in the standard run parser that we do not want to include in the local invocation + skipped_args = ["install_dir", "tag_files"] + suites_arg = None storage_engine_arg = None other_local_args = [] @@ -1219,7 +1325,10 @@ def format_option(option_name, option_value): This function assumes that 'option_name' is always "--" prefix and isn't "-" prefixed. """ - return f"{option_name}={option_value}" + if " " not in str(option_value): + return f"{option_name}={option_value}" + else: + return f"'{option_name}={option_value}'" # Trim the argument namespace of any args we don't want to return. for group in run_parser._action_groups: # pylint: disable=protected-access @@ -1245,6 +1354,8 @@ def format_option(option_name, option_value): _INTERNAL_OPTIONS_TITLE, _EVERGREEN_ARGUMENT_TITLE, _CEDAR_ARGUMENT_TITLE ]: continue + elif arg_dest in skipped_args: + continue elif group.title == 'positional arguments': positional_args.extend(arg_value) # Keep all remaining args. @@ -1286,7 +1397,7 @@ def strip_fuzz_config_params(input_args): ret = [] for arg in input_args: - if "--fuzzMongodConfigs" not in arg and "--fuzzConfigSeed" not in arg: + if not arg.startswith(("--fuzzMongodConfigs", "--fuzzMongosConfigs", "--configFuzzSeed")): ret.append(arg) return ret diff --git a/buildscripts/resmokelib/run/generate_multiversion_exclude_tags.py b/buildscripts/resmokelib/run/generate_multiversion_exclude_tags.py index 98be3db0fa862..1266806285c54 100755 --- a/buildscripts/resmokelib/run/generate_multiversion_exclude_tags.py +++ b/buildscripts/resmokelib/run/generate_multiversion_exclude_tags.py @@ -79,8 +79,7 @@ def generate_exclude_yaml(old_bin_version: str, output: str, logger: logging.Log output = os.path.abspath(output) location, _ = os.path.split(output) if not os.path.isdir(location): - logger.info(f"Cannot write to {output}. Not generating tag file.") - return + os.makedirs(location) backports_required_latest = read_yaml_file(os.path.join(ETC_DIR, BACKPORTS_REQUIRED_FILE)) diff --git a/buildscripts/resmokelib/selector.py b/buildscripts/resmokelib/selector.py index b26ac4a374b65..332dc3891b1d2 100644 --- a/buildscripts/resmokelib/selector.py +++ b/buildscripts/resmokelib/selector.py @@ -126,7 +126,9 @@ def parse_tag_files(test_kind, tag_files=None, tagged_tests=None): if tag_files is None: tag_files = [] for tag_file in tag_files: - if tag_file and os.path.exists(tag_file): + if not tag_file: + continue + if os.path.exists(tag_file): tags_conf = _tags.TagsConfig.from_file(tag_file) tagged_roots = tags_conf.get_test_patterns(test_kind) for tagged_root in tagged_roots: @@ -137,6 +139,12 @@ def parse_tag_files(test_kind, tag_files=None, tagged_tests=None): # A test could have a tag in more than one place, due to wildcards in the # selector. tagged_tests[test].extend(test_tags) + else: + # TODO SERVER-77265 always validate tag file input when mongo-task-generator + # no longer passes in invalid tag files + if not config.EVERGREEN_TASK_ID: + raise errors.TagFileDoesNotExistError(f"A tag file was not found at {tag_file}") + return tagged_tests @@ -604,6 +612,39 @@ def select(self, selector_config): return _Selector.select(self, selector_config) +class _PrettyPrinterTestSelectorConfig(_SelectorConfig): + """_SelectorConfig subclass for pretty-printer-tests.""" + + def __init__(self, root=config.DEFAULT_INTEGRATION_TEST_LIST, roots=None, include_files=None, + exclude_files=None): + """Initialize _PrettyPrinterTestSelectorConfig.""" + if roots: + # The 'roots' argument is only present when tests are specified on the command line + # and in that case they take precedence over the tests in the root file. + _SelectorConfig.__init__(self, roots=roots, include_files=include_files, + exclude_files=exclude_files) + else: + _SelectorConfig.__init__(self, root=root, include_files=include_files, + exclude_files=exclude_files) + + +class _PrettyPrinterTestSelector(_Selector): + """_Selector subclass for pretty-printer-tests.""" + + def __init__(self, test_file_explorer): + """Initialize _PrettyPrinterTestSelector.""" + _Selector.__init__(self, test_file_explorer) + + def select(self, selector_config): + """Return selected tests.""" + if selector_config.roots: + # Tests have been specified on the command line. We use them without additional + # filtering. + test_list = _TestList(self._test_file_explorer, selector_config.roots) + return test_list.get_tests() + return _Selector.select(self, selector_config) + + class _DbTestSelectorConfig(_SelectorConfig): """_Selector config subclass for db_test tests.""" @@ -715,6 +756,7 @@ def __init__(self, test_file_explorer): _SELECTOR_REGISTRY = { "cpp_integration_test": (_CppTestSelectorConfig, _CppTestSelector), "cpp_unit_test": (_CppTestSelectorConfig, _CppTestSelector), + "pretty_printer_test": (_PrettyPrinterTestSelectorConfig, _PrettyPrinterTestSelector), "benchmark_test": (_CppTestSelectorConfig, _CppTestSelector), "sdam_json_test": (_FileBasedSelectorConfig, _Selector), "server_selection_json_test": (_FileBasedSelectorConfig, _Selector), diff --git a/buildscripts/resmokelib/setup_multiversion/setup_multiversion.py b/buildscripts/resmokelib/setup_multiversion/setup_multiversion.py index 5df6e47725a3b..b26277099ed6c 100644 --- a/buildscripts/resmokelib/setup_multiversion/setup_multiversion.py +++ b/buildscripts/resmokelib/setup_multiversion/setup_multiversion.py @@ -29,25 +29,6 @@ SUBCOMMAND = "setup-multiversion" -LOGGER = structlog.getLogger(__name__) - - -def setup_logging(debug=False): - """Enable logging.""" - log_level = logging.DEBUG if debug else logging.INFO - logging.basicConfig( - format="[%(asctime)s - %(name)s - %(levelname)s] %(message)s", - level=log_level, - stream=sys.stdout, - ) - logging.getLogger("urllib3").setLevel(logging.WARNING) - logging.getLogger("s3transfer").setLevel(logging.WARNING) - logging.getLogger("botocore").setLevel(logging.WARNING) - logging.getLogger("boto3").setLevel(logging.WARNING) - logging.getLogger("evergreen").setLevel(logging.WARNING) - logging.getLogger("github").setLevel(logging.WARNING) - structlog.configure(logger_factory=structlog.stdlib.LoggerFactory()) - def infer_platform(edition=None, version=None): """Infer platform for popular OS.""" @@ -70,16 +51,16 @@ def infer_platform(edition=None, version=None): return pltf -def get_merge_base_commit(version: str) -> Optional[str]: +def get_merge_base_commit(version: str, logger: logging.Logger) -> Optional[str]: """Get merge-base commit hash between origin/master and version.""" cmd = ["git", "merge-base", "origin/master", f"origin/v{version}"] result = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False) if result.returncode: - LOGGER.warning("Git merge-base command failed. Falling back to latest master", cmd=cmd, + logger.warning("Git merge-base command failed. Falling back to latest master", cmd=cmd, error=result.stderr.decode("utf-8").strip()) return None commit_hash = result.stdout.decode("utf-8").strip() - LOGGER.info("Found merge-base commit.", cmd=cmd, commit=commit_hash) + logger.info("Found merge-base commit.", cmd=cmd, commit=commit_hash) return commit_hash @@ -93,16 +74,31 @@ class EvgURLInfo(NamedTuple): class SetupMultiversion(Subcommand): """Main class for the setup multiversion subcommand.""" - def __init__(self, download_options, install_dir="", link_dir="", mv_platform=None, - edition=None, architecture=None, use_latest=None, versions=None, variant=None, - install_last_lts=None, install_last_continuous=None, evergreen_config=None, - github_oauth_token=None, debug=None, ignore_failed_push=False, - evg_versions_file=None): + def __init__( + self, + download_options, + install_dir="", + link_dir="", + mv_platform=None, + edition=None, + architecture=None, + use_latest=None, + versions=None, + variant=None, + install_last_lts=None, + install_last_continuous=None, + evergreen_config=None, + github_oauth_token=None, + debug=None, + ignore_failed_push=False, + evg_versions_file=None, + logger: Optional[logging.Logger] = None, + ): """Initialize.""" - setup_logging(debug) + + self.logger = logger or self.setup_logger() self.install_dir = os.path.abspath(install_dir) self.link_dir = os.path.abspath(link_dir) - self.edition = edition.lower() if edition else None self.platform = mv_platform.lower() if mv_platform else None self.inferred_platform = bool(self.platform is None) @@ -132,6 +128,29 @@ def __init__(self, download_options, install_dir="", link_dir="", mv_platform=No self._is_windows = is_windows() self._windows_bin_install_dirs = [] + @staticmethod + def setup_logger(debug=False) -> logging.Logger: + """ + Setup logger. + + :param debug: Whether to enable debugging or not. + :return: Logger instance. + """ + logging.getLogger("urllib3").setLevel(logging.WARNING) + logging.getLogger("s3transfer").setLevel(logging.WARNING) + logging.getLogger("botocore").setLevel(logging.WARNING) + logging.getLogger("boto3").setLevel(logging.WARNING) + logging.getLogger("evergreen").setLevel(logging.WARNING) + logging.getLogger("github").setLevel(logging.WARNING) + + log_level = logging.DEBUG if debug else logging.INFO + logger = logging.Logger("SetupMultiversion", level=log_level) + handler = logging.StreamHandler(sys.stdout) + handler.setFormatter( + logging.Formatter(fmt="[%(asctime)s - %(name)s - %(levelname)s] %(message)s")) + logger.addHandler(handler) + return logger + @staticmethod def _get_bin_suffix(version, evg_project_id): """Get the multiversion bin suffix from the evergreen project ID.""" @@ -146,27 +165,29 @@ def _get_bin_suffix(version, evg_project_id): # Use the Evergreen project ID as fallback. return re.search(r"(\d+\.\d+$)", evg_project_id).group(0) - @staticmethod - def _get_release_versions(install_last_lts: Optional[bool], + def _get_release_versions(self, install_last_lts: Optional[bool], install_last_continuous: Optional[bool]) -> List[str]: """Return last-LTS and/or last-continuous versions.""" out = [] if not os.path.isfile( os.path.join(os.getcwd(), "buildscripts", "resmokelib", "multiversionconstants.py")): - LOGGER.error("This command should be run from the root of the mongo repo.") - LOGGER.error("If you're running it from the root of the mongo repo and still seeing" - " this error, please reach out in #server-testing slack channel.") + self.logger.error("This command should be run from the root of the mongo repo.") + self.logger.error( + "If you're running it from the root of the mongo repo and still seeing" + " this error, please reach out in #server-testing slack channel.") exit(1) try: import buildscripts.resmokelib.multiversionconstants as multiversionconstants except ImportError: - LOGGER.error("Could not import `buildscripts.resmokelib.multiversionconstants`.") - LOGGER.error("If you're passing `--installLastLTS` and/or `--installLastContinuous`" - " flags, this module is required to automatically calculate last-LTS" - " and/or last-continuous versions.") - LOGGER.error("Try omitting these flags if you don't need the automatic calculation." - " Otherwise please reach out in #server-testing slack channel.") + self.logger.error("Could not import `buildscripts.resmokelib.multiversionconstants`.") + self.logger.error( + "If you're passing `--installLastLTS` and/or `--installLastContinuous`" + " flags, this module is required to automatically calculate last-LTS" + " and/or last-continuous versions.") + self.logger.error( + "Try omitting these flags if you don't need the automatic calculation." + " Otherwise please reach out in #server-testing slack channel.") exit(1) else: releases = { @@ -181,14 +202,15 @@ def execute(self): """Execute setup multiversion mongodb.""" if self.install_last_lts or self.install_last_continuous: self.versions.extend( - self._get_release_versions(self.install_last_lts, self.install_last_continuous)) + self._get_release_versions(self, self.install_last_lts, + self.install_last_continuous)) self.versions = list(set(self.versions)) downloaded_versions = [] for version in self.versions: - LOGGER.info("Setting up version.", version=version) - LOGGER.info("Fetching download URL from Evergreen.") + self.logger.info("Setting up version. version=%s", version) + self.logger.info("Fetching download URL from Evergreen.") try: self.platform = infer_platform(self.edition, @@ -197,18 +219,18 @@ def execute(self): if self.use_latest: urls_info = self.get_latest_urls(version) if self.use_latest and not urls_info.urls: - LOGGER.warning("Latest URL is not available, falling back" - " to getting the URL from 'mongodb-mongo-master'" - " project preceding the merge-base commit.") - merge_base_revision = get_merge_base_commit(version) + self.logger.warning("Latest URL is not available, falling back" + " to getting the URL from 'mongodb-mongo-master'" + " project preceding the merge-base commit.") + merge_base_revision = get_merge_base_commit(version, self.logger) urls_info = self.get_latest_urls("master", merge_base_revision) if not urls_info.urls: - LOGGER.warning("Latest URL is not available or not requested," - " falling back to getting the URL for a specific" - " version.") + self.logger.warning("Latest URL is not available or not requested," + " falling back to getting the URL for a specific" + " version.") urls_info = self.get_urls(version, self.variant) if not urls_info: - LOGGER.error("URL is not available for the version.", version=version) + self.logger.error("URL is not available for the version. version=%s", version) exit(1) urls = urls_info.urls @@ -219,21 +241,21 @@ def execute(self): # Give each version a unique install dir install_dir = os.path.join(self.install_dir, version) - self.download_and_extract_from_urls(urls, bin_suffix, install_dir) + self.download_and_extract_from_urls(self, urls, bin_suffix, install_dir) except (github_conn.GithubConnError, evergreen_conn.EvergreenConnError, download.DownloadError) as ex: - LOGGER.error(ex) + self.logger.error(ex) exit(1) else: - LOGGER.info("Setup version completed.", version=version) - LOGGER.info("-" * 50) + self.logger.info("Setup version completed. version=%s", version) + self.logger.info("-" * 50) if self._is_windows: - self._write_windows_install_paths(self._windows_bin_install_dirs) + self._write_windows_install_paths(self, self._windows_bin_install_dirs) if self.evg_versions_file: - self._write_evg_versions_file(self.evg_versions_file, downloaded_versions) + self._write_evg_versions_file(self, self.evg_versions_file, downloaded_versions) def download_and_extract_from_urls(self, urls, bin_suffix, install_dir): """Download and extract values indicated in `urls`.""" @@ -269,22 +291,21 @@ def download_and_extract_from_urls(self, urls, bin_suffix, install_dir): install_dir, bin_suffix, link_dir=self.link_dir, install_dir_list=self._windows_bin_install_dirs) - @staticmethod - def _write_windows_install_paths(paths): + def _write_windows_install_paths(self, paths): with open(config.WINDOWS_BIN_PATHS_FILE, "a") as out: if os.stat(config.WINDOWS_BIN_PATHS_FILE).st_size > 0: out.write(os.pathsep) out.write(os.pathsep.join(paths)) - LOGGER.info(f"Finished writing binary paths on Windows to {config.WINDOWS_BIN_PATHS_FILE}") + self.logger.info("Finished writing binary paths on Windows to %s", + config.WINDOWS_BIN_PATHS_FILE) - @staticmethod - def _write_evg_versions_file(file_name: str, versions: List[str]): + def _write_evg_versions_file(self, file_name: str, versions: List[str]): with open(file_name, "a") as out: out.write("\n".join(versions)) - LOGGER.info( - f"Finished writing downloaded Evergreen versions to {os.path.abspath(file_name)}") + self.logger.info("Finished writing downloaded Evergreen versions to %s", + os.path.abspath(file_name)) def get_latest_urls(self, version: str, start_from_revision: Optional[str] = None) -> EvgURLInfo: @@ -308,14 +329,14 @@ def get_latest_urls(self, version: str, return EvgURLInfo() buildvariant_name = self.get_buildvariant_name(version) - LOGGER.debug("Found buildvariant.", buildvariant_name=buildvariant_name) + self.logger.debug("Found buildvariant. buildvariant_name=%s", buildvariant_name) found_start_revision = start_from_revision is None for evg_version in chain(iter([evg_version]), evg_versions): # Skip all versions until we get the revision we should start looking from if found_start_revision is False and evg_version.revision != start_from_revision: - LOGGER.warning("Skipping evergreen version.", evg_version=evg_version) + self.logger.warning("Skipping evergreen version. evg_version=%s", evg_version) continue else: found_start_revision = True @@ -341,14 +362,15 @@ def get_urls(self, version: str, buildvariant_name: Optional[str] = None) -> Evg if evg_version is None: git_tag, commit_hash = github_conn.get_git_tag_and_commit(self.github_oauth_token, version) - LOGGER.info("Found git attributes.", git_tag=git_tag, commit_hash=commit_hash) + self.logger.info("Found git attributes. git_tag=%s, commit_hash=%s", git_tag, + commit_hash) evg_version = evergreen_conn.get_evergreen_version(self.evg_api, commit_hash) if evg_version is None: return EvgURLInfo() if not buildvariant_name: evg_project = evg_version.project_identifier - LOGGER.debug("Found evergreen project.", evergreen_project=evg_project) + self.logger.debug("Found evergreen project. evergreen_project=%s", evg_project) try: major_minor_version = re.findall(r"\d+\.\d+", evg_project)[-1] @@ -356,7 +378,7 @@ def get_urls(self, version: str, buildvariant_name: Optional[str] = None) -> Evg major_minor_version = "master" buildvariant_name = self.get_buildvariant_name(major_minor_version) - LOGGER.debug("Found buildvariant.", buildvariant_name=buildvariant_name) + self.logger.debug("Found buildvariant. buildvariant_name=%s", buildvariant_name) if buildvariant_name not in evg_version.build_variants_map: raise ValueError( @@ -369,8 +391,7 @@ def get_urls(self, version: str, buildvariant_name: Optional[str] = None) -> Evg return EvgURLInfo(urls=urls, evg_version_id=evg_version.version_id) - @staticmethod - def setup_mongodb(artifacts_url, binaries_url, symbols_url, python_venv_url, install_dir, + def setup_mongodb(self, artifacts_url, binaries_url, symbols_url, python_venv_url, install_dir, bin_suffix=None, link_dir=None, install_dir_list=None): """Download, extract and symlink.""" @@ -385,8 +406,8 @@ def try_download(download_url): try: try_download(url) except Exception as err: # pylint: disable=broad-except - LOGGER.warning("Setting up tarball failed with error, retrying once...", - error=err) + self.logger.warning( + "Setting up tarball failed with error, retrying once... error=%s", err) time.sleep(1) try_download(url) @@ -397,7 +418,7 @@ def try_download(download_url): if not is_windows(): link_dir = download.symlink_version(bin_suffix, install_dir, link_dir) else: - LOGGER.info( + self.logger.info( "Linking to install_dir on Windows; executable have to live in different working" " directories to avoid DLLs for different versions clobbering each other") link_dir = download.symlink_version(bin_suffix, install_dir, None) @@ -450,7 +471,7 @@ def parse(self, subcommand, parser, parsed_args, **kwargs): install_last_continuous=args.install_last_continuous, download_options=download_options, evergreen_config=args.evergreen_config, github_oauth_token=args.github_oauth_token, ignore_failed_push=(not args.require_push), evg_versions_file=args.evg_versions_file, - debug=args.debug) + debug=args.debug, logger=SetupMultiversion.setup_logger(parsed_args.debug)) @classmethod def _add_args_to_parser(cls, parser): diff --git a/buildscripts/resmokelib/sighandler.py b/buildscripts/resmokelib/sighandler.py index 5df67812d06cc..609ea8a6e3e5f 100644 --- a/buildscripts/resmokelib/sighandler.py +++ b/buildscripts/resmokelib/sighandler.py @@ -10,10 +10,10 @@ import psutil +from buildscripts.resmokelib.flags import HANG_ANALYZER_CALLED from buildscripts.resmokelib import reportfile from buildscripts.resmokelib import testing from buildscripts.resmokelib import config -from buildscripts.resmokelib.hang_analyzer import hang_analyzer from buildscripts.resmokelib import parser _IS_WINDOWS = (sys.platform == "win32") @@ -32,8 +32,8 @@ def _handle_sigusr1(signum, frame): # pylint: disable=unused-argument log suite summaries. """ + HANG_ANALYZER_CALLED.set() header_msg = "Dumping stacks due to SIGUSR1 signal" - _dump_and_log(header_msg) def _handle_set_event(event_handle): @@ -53,6 +53,7 @@ def _handle_set_event(event_handle): except win32event.error as err: logger.error("Exception from win32event.WaitForSingleObject with error: %s" % err) else: + HANG_ANALYZER_CALLED.set() header_msg = "Dumping stacks due to signal from win32event.SetEvent" _dump_and_log(header_msg) @@ -159,4 +160,26 @@ def _analyze_pids(logger, pids): if not os.getenv('ASAN_OPTIONS'): hang_analyzer_args.append('-c') _hang_analyzer = parser.parse_command_line(hang_analyzer_args, logger=logger) - _hang_analyzer.execute() + + # Evergreen has a 15 minute timeout for task timeout commands + # Limit the hang analyzer to 12 minutes so there is time for other tasks. + hang_analyzer_hard_timeout = None + if config.EVERGREEN_TASK_ID: + hang_analyzer_hard_timeout = 60 * 12 + logger.info( + "Limit the resmoke invoked hang analyzer to 12 minutes so there is time for resmoke to finish up." + ) + + hang_analyzer_thread = threading.Thread(target=_hang_analyzer.execute, daemon=True) + hang_analyzer_thread.start() + hang_analyzer_thread.join(hang_analyzer_hard_timeout) + + if hang_analyzer_thread.is_alive(): + logger.warning( + "Resmoke invoked hang analyzer thread did not finish, but will continue running in the background. The thread may be disruputed and may show extraneous output." + ) + logger.warning("Cleaning up resmoke child processes so that resmoke can fail gracefully.") + _hang_analyzer.kill_rogue_processes() + + else: + logger.info("Done running resmoke invoked hang analyzer thread.") diff --git a/buildscripts/resmokelib/suitesconfig.py b/buildscripts/resmokelib/suitesconfig.py index 0fe935b05aaf1..a6ea326288b95 100644 --- a/buildscripts/resmokelib/suitesconfig.py +++ b/buildscripts/resmokelib/suitesconfig.py @@ -2,6 +2,7 @@ import collections import copy import os +import pathlib from threading import Lock from typing import Dict, List @@ -128,7 +129,7 @@ def _make_suite_roots(files): def _get_suite_config(suite_name_or_path): """Attempt to read YAML configuration from 'suite_path' for the suite.""" - return SuiteFinder.get_config_obj(suite_name_or_path) + return SuiteFinder.get_config_obj_no_verify(suite_name_or_path) def generate(): @@ -139,7 +140,7 @@ class SuiteConfigInterface: """Interface for suite configs.""" @classmethod - def get_config_obj(cls, suite_name): + def get_config_obj_no_verify(cls, suite_name): """Get the config object given the suite name, which can be a path.""" pass @@ -161,7 +162,7 @@ class ExplicitSuiteConfig(SuiteConfigInterface): _named_suites = {} @classmethod - def get_config_obj(cls, suite_name): + def get_config_obj_no_verify(cls, suite_name): """Get the suite config object in the given file.""" if suite_name in cls.get_named_suites(): # Check if is a named suite first for efficiency. @@ -226,36 +227,39 @@ def get_suites_dir(): return os.path.join(_config.CONFIG_DIR, "matrix_suites") @classmethod - def get_config_obj(cls, suite_name): + def get_config_obj_and_verify(cls, suite_name): """Get the suite config object in the given file and verify it matches the generated file.""" - config = cls._get_config_obj_no_verify(suite_name) + config = cls.get_config_obj_no_verify(suite_name) if not config: return None - # TODO: SERVER-75688 add validation back - # generated_path = cls.get_generated_suite_path(suite_name) - # if not os.path.exists(generated_path): - # raise errors.InvalidMatrixSuiteError( - # f"No generated suite file was found for {suite_name}" + - # "To (re)generate the matrix suite files use `python3 buildscripts/resmoke.py generate-matrix-suites`" - # ) - - # new_text = cls.generate_matrix_suite_text(suite_name) - # with open(generated_path, "r") as file: - # old_text = file.read() - # if new_text != old_text: - # raise errors.InvalidMatrixSuiteError( - # f"The generated file found on disk did not match the mapping file for {suite_name}. " - # + - # "To (re)generate the matrix suite files use `python3 buildscripts/resmoke.py generate-matrix-suites`" - # ) + generated_path = cls.get_generated_suite_path(suite_name) + if not os.path.exists(generated_path): + raise errors.InvalidMatrixSuiteError( + f"No generated suite file was found for {suite_name}" + + "To (re)generate the matrix suite files use `python3 buildscripts/resmoke.py generate-matrix-suites`" + ) + + new_text = cls.generate_matrix_suite_text(suite_name) + with open(generated_path, "r") as file: + old_text = file.read() + if new_text != old_text: + loggers.ROOT_EXECUTOR_LOGGER.error("Generated file on disk:") + loggers.ROOT_EXECUTOR_LOGGER.error(old_text) + loggers.ROOT_EXECUTOR_LOGGER.error("Generated text from mapping file:") + loggers.ROOT_EXECUTOR_LOGGER.error(new_text) + raise errors.InvalidMatrixSuiteError( + f"The generated file found on disk did not match the mapping file for {suite_name}. " + + + "To (re)generate the matrix suite files use `python3 buildscripts/resmoke.py generate-matrix-suites`" + ) return config @classmethod - def _get_config_obj_no_verify(cls, suite_name): + def get_config_obj_no_verify(cls, suite_name): """Get the suite config object in the given file.""" suites_dir = cls.get_suites_dir() matrix_suite = cls.parse_mappings_file(suites_dir, suite_name) @@ -275,7 +279,7 @@ def process_overrides(cls, suite, overrides, suite_name): eval_names = suite.get("eval", None) description = suite.get("description") - base_suite = ExplicitSuiteConfig.get_config_obj(base_suite_name) + base_suite = ExplicitSuiteConfig.get_config_obj_no_verify(base_suite_name) if base_suite is None: raise ValueError(f"Unknown base suite {base_suite_name} for matrix suite {suite_name}") @@ -412,12 +416,14 @@ def generate_matrix_suite_text(cls, suite_name): if os.path.exists(path): mapping_path = path - matrix_suite = cls._get_config_obj_no_verify(suite_name) + matrix_suite = cls.get_config_obj_no_verify(suite_name) if not matrix_suite or not mapping_path: print(f"Could not find mappings file for {suite_name}") return None + # This path needs to output the same text on both windows and linux/mac + mapping_path = pathlib.PurePath(mapping_path) yml = yaml.safe_dump(matrix_suite) comments = [ "##########################################################", @@ -425,7 +431,7 @@ def generate_matrix_suite_text(cls, suite_name): "# IF YOU WISH TO MODIFY THIS SUITE, MODIFY THE CORRESPONDING MATRIX SUITE MAPPING FILE", "# AND REGENERATE THE MATRIX SUITES.", "#", - f"# matrix suite mapping file: {mapping_path}", + f"# matrix suite mapping file: {mapping_path.as_posix()}", "# regenerate matrix suites: buildscripts/resmoke.py generate-matrix-suites", "##########################################################", ] @@ -451,10 +457,10 @@ class SuiteFinder(object): """Utility/Factory class for getting polymorphic suite classes given a directory.""" @staticmethod - def get_config_obj(suite_path): + def get_config_obj_no_verify(suite_path): """Get the suite config object in the given file.""" - explicit_suite = ExplicitSuiteConfig.get_config_obj(suite_path) - matrix_suite = MatrixSuiteConfig.get_config_obj(suite_path) + explicit_suite = ExplicitSuiteConfig.get_config_obj_no_verify(suite_path) + matrix_suite = MatrixSuiteConfig.get_config_obj_no_verify(suite_path) if not (explicit_suite or matrix_suite): raise errors.SuiteNotFound("Unknown suite '%s'" % suite_path) diff --git a/buildscripts/resmokelib/symbolizer/__init__.py b/buildscripts/resmokelib/symbolizer/__init__.py index 4381fa97a88bb..60acf7f8cd96a 100644 --- a/buildscripts/resmokelib/symbolizer/__init__.py +++ b/buildscripts/resmokelib/symbolizer/__init__.py @@ -115,6 +115,7 @@ def _get_compile_artifacts(self): urlinfo = self.multiversion_setup.get_urls(version=version_id, buildvariant_name=buildvariant_name) + self.logger.info("Found urls to download and extract %s", urlinfo.urls) self.multiversion_setup.download_and_extract_from_urls(urlinfo.urls, bin_suffix=None, install_dir=self.dest_dir) diff --git a/buildscripts/resmokelib/testing/fixtures/_builder.py b/buildscripts/resmokelib/testing/fixtures/_builder.py index 429d2253eaa03..7fa3fe7543481 100644 --- a/buildscripts/resmokelib/testing/fixtures/_builder.py +++ b/buildscripts/resmokelib/testing/fixtures/_builder.py @@ -1,13 +1,9 @@ """Utilities for constructing fixtures that may span multiple versions.""" -import io import logging -import os import threading from abc import ABC, abstractmethod from typing import Any, Dict, List, Optional, Tuple, Type -from git import Repo - import buildscripts.resmokelib.config as config import buildscripts.resmokelib.utils.registry as registry from buildscripts.resmokelib import errors @@ -24,12 +20,11 @@ FIXTURE_DIR = "buildscripts/resmokelib/testing/fixtures" RETRIEVE_DIR = "build/multiversionfixtures" RETRIEVE_LOCK = threading.Lock() -MULTIVERSION_CLASS_SUFFIX = "_multiversion_class_suffix" _BUILDERS = {} # type: ignore -def make_fixture(class_name, logger, job_num, *args, **kwargs): +def make_fixture(class_name, logger, job_num, *args, enable_feature_flags=True, **kwargs): """Provide factory function for creating Fixture instances.""" fixturelib = FixtureLib() @@ -43,7 +38,7 @@ def make_fixture(class_name, logger, job_num, *args, **kwargs): # Special case MongoDFixture or _MongosFixture for now since we only add one option. # If there's more logic, we should add a builder class for them. - if class_name in ["MongoDFixture", "_MongoSFixture"]: + if class_name in ["MongoDFixture", "_MongoSFixture"] and enable_feature_flags: return _FIXTURES[class_name](logger, job_num, fixturelib, *args, add_feature_flags=bool(config.ENABLED_FEATURE_FLAGS), **kwargs) @@ -162,7 +157,7 @@ def build_fixture(self, logger: logging.Logger, job_num: int, fixturelib: Type[F self._mutate_kwargs(kwargs) mixed_bin_versions, old_bin_version = _extract_multiversion_options(kwargs) self._validate_multiversion_options(kwargs, mixed_bin_versions) - mongod_classes, mongod_executables, mongod_binary_versions = self._get_mongod_assets( + mongod_class, mongod_executables, mongod_binary_versions = self._get_mongod_assets( kwargs, mixed_bin_versions, old_bin_version) replset = _FIXTURES[self.REGISTERED_NAME](logger, job_num, fixturelib, *args, **kwargs) @@ -180,7 +175,7 @@ def build_fixture(self, logger: logging.Logger, job_num: int, fixturelib: Type[F return replset for node_index in range(replset.num_nodes): - node = self._new_mongod(replset, node_index, mongod_executables, mongod_classes, + node = self._new_mongod(replset, node_index, mongod_executables, mongod_class, mongod_binary_versions[node_index], is_multiversion) replset.install_mongod(node) @@ -188,7 +183,7 @@ def build_fixture(self, logger: logging.Logger, job_num: int, fixturelib: Type[F if not replset.initial_sync_node: replset.initial_sync_node_idx = replset.num_nodes replset.initial_sync_node = self._new_mongod(replset, replset.initial_sync_node_idx, - mongod_executables, mongod_classes, + mongod_executables, mongod_class, BinVersionEnum.NEW, is_multiversion) return replset @@ -240,19 +235,13 @@ def _get_mongod_assets( and the list of binary versions """ executables = {BinVersionEnum.NEW: kwargs["mongod_executable"]} - classes = {BinVersionEnum.NEW: cls.LATEST_MONGOD_CLASS} + _class = cls.LATEST_MONGOD_CLASS # Default to NEW for all bin versions; may be overridden below. binary_versions = [BinVersionEnum.NEW for _ in range(kwargs["num_nodes"])] if mixed_bin_versions is not None: from buildscripts.resmokelib import multiversionconstants - old_shell_version = { - config.MultiversionOptions.LAST_LTS: - multiversionconstants.LAST_LTS_MONGO_BINARY, - config.MultiversionOptions.LAST_CONTINUOUS: - multiversionconstants.LAST_CONTINUOUS_MONGO_BINARY, - }[old_bin_version] old_mongod_version = { config.MultiversionOptions.LAST_LTS: @@ -262,13 +251,9 @@ def _get_mongod_assets( }[old_bin_version] executables[BinVersionEnum.OLD] = old_mongod_version - classes[BinVersionEnum.OLD] = f"{cls.LATEST_MONGOD_CLASS}{MULTIVERSION_CLASS_SUFFIX}" binary_versions = [x for x in mixed_bin_versions] - load_version(version_path_suffix=MULTIVERSION_CLASS_SUFFIX, - shell_path=old_shell_version) - - return classes, executables, binary_versions + return _class, executables, binary_versions @staticmethod def _get_fcv(is_multiversion: bool, old_bin_version: Optional[str]) -> str: @@ -293,7 +278,7 @@ def _get_fcv(is_multiversion: bool, old_bin_version: Optional[str]) -> str: @staticmethod def _new_mongod(replset: ReplicaSetFixture, replset_node_index: int, - executables: Dict[str, str], classes: Dict[str, str], cur_version: str, + executables: Dict[str, str], _class: str, cur_version: str, is_multiversion: bool) -> FixtureContainer: """Make a fixture container with configured mongod fixture(s) in it. @@ -303,7 +288,7 @@ def _new_mongod(replset: ReplicaSetFixture, replset_node_index: int, :param replset: replica set fixture :param replset_node_index: the index of node in replica set :param executables: dict with a new and the old (if multiversion) mongod executable names - :param classes: dict with a new and the old (if multiversion) mongod fixture names + :param _class: str with the mongod fixture name :param cur_version: old or new version :param is_multiversion: whether we are in multiversion mode :return: fixture container with configured mongod fixture(s) in it @@ -315,10 +300,11 @@ def _new_mongod(replset: ReplicaSetFixture, replset_node_index: int, old_fixture = None if is_multiversion: - old_fixture = make_fixture(classes[BinVersionEnum.OLD], mongod_logger, replset.job_num, - mongod_executable=executables[BinVersionEnum.OLD], - mongod_options=mongod_options, - preserve_dbpath=replset.preserve_dbpath) + # We do not run old versions with feature flags enabled + old_fixture = make_fixture( + _class, mongod_logger, replset.job_num, enable_feature_flags=False, + mongod_executable=executables[BinVersionEnum.OLD], mongod_options=mongod_options, + preserve_dbpath=replset.preserve_dbpath) # Assign the same port for old and new fixtures so upgrade/downgrade can be done without # changing the replicaset config. @@ -326,7 +312,7 @@ def _new_mongod(replset: ReplicaSetFixture, replset_node_index: int, new_fixture_mongod_options = replset.get_options_for_mongod(replset_node_index) - new_fixture = make_fixture(classes[BinVersionEnum.NEW], mongod_logger, replset.job_num, + new_fixture = make_fixture(_class, mongod_logger, replset.job_num, mongod_executable=executables[BinVersionEnum.NEW], mongod_options=new_fixture_mongod_options, preserve_dbpath=replset.preserve_dbpath, port=new_fixture_port) @@ -343,40 +329,6 @@ def get_package_name(dir_path: str) -> str: return dir_path.replace('/', '.').replace("\\", ".") -def load_version(version_path_suffix=None, shell_path=None): - """Load the last_lts/last_continuous fixtures.""" - with RETRIEVE_LOCK, registry.suffix(version_path_suffix): - # Only one thread needs to retrieve the fixtures. - retrieve_dir = os.path.relpath(os.path.join(RETRIEVE_DIR, version_path_suffix)) - if not os.path.exists(retrieve_dir): - try: - # Avoid circular import - import buildscripts.resmokelib.run.generate_multiversion_exclude_tags as gen_tests - commit = gen_tests.get_backports_required_hash_for_shell_version( - mongo_shell_path=shell_path) - except FileNotFoundError as err: - print("Error running the mongo shell, please ensure it's in your $PATH: ", err) - raise - retrieve_fixtures(retrieve_dir, commit) - - package_name = get_package_name(retrieve_dir) - autoloader.load_all_modules(name=package_name, path=[retrieve_dir]) # type: ignore - - -def retrieve_fixtures(directory, commit): - """Populate a directory with the fixture files corresponding to a commit.""" - repo = Repo(MONGO_REPO_LOCATION) - real_commit = repo.commit(commit) - tree = real_commit.tree / FIXTURE_DIR - - os.makedirs(directory, exist_ok=True) - - for blob in tree.blobs: - output = os.path.join(directory, blob.name) - with io.BytesIO(blob.data_stream.read()) as retrieved, open(output, "w") as file: - file.write(retrieved.read().decode("utf-8")) - - class ShardedClusterBuilder(FixtureBuilder): """Builder class for sharded cluster fixtures.""" @@ -395,8 +347,8 @@ def build_fixture(self, logger: logging.Logger, job_num: int, fixturelib: Type[F self._mutate_kwargs(kwargs) mixed_bin_versions, old_bin_version = _extract_multiversion_options(kwargs) self._validate_multiversion_options(kwargs, mixed_bin_versions) - mongos_classes, mongos_executables = self._get_mongos_assets(kwargs, mixed_bin_versions, - old_bin_version) + mongos_class, mongos_executables = self._get_mongos_assets(kwargs, mixed_bin_versions, + old_bin_version) sharded_cluster = _FIXTURES[self.REGISTERED_NAME](logger, job_num, fixturelib, *args, **kwargs) @@ -408,16 +360,16 @@ def build_fixture(self, logger: logging.Logger, job_num: int, fixturelib: Type[F rs_shard_index, kwargs["num_rs_nodes_per_shard"]) sharded_cluster.install_rs_shard(rs_shard) - catalog_shard = kwargs["catalog_shard"] + config_shard = kwargs["config_shard"] config_svr = None - if catalog_shard is None: + if config_shard is None: config_svr = self._new_configsvr(sharded_cluster, is_multiversion, old_bin_version) else: - config_svr = sharded_cluster.shards[catalog_shard] + config_svr = sharded_cluster.shards[config_shard] sharded_cluster.install_configsvr(config_svr) for mongos_index in range(kwargs["num_mongos"]): - mongos = self._new_mongos(sharded_cluster, mongos_executables, mongos_classes, + mongos = self._new_mongos(sharded_cluster, mongos_executables, mongos_class, mongos_index, kwargs["num_mongos"], is_multiversion) sharded_cluster.install_mongos(mongos) @@ -445,9 +397,9 @@ def _mutate_kwargs(kwargs: Dict[str, Any]) -> None: config.DEFAULT_MONGOS_EXECUTABLE) kwargs["mongos_executable"] = mongos_executable - catalog_shard = pick_catalog_shard_node( - kwargs.pop("catalog_shard", config.CATALOG_SHARD), num_shards) - kwargs["catalog_shard"] = catalog_shard + config_shard = pick_catalog_shard_node( + kwargs.pop("config_shard", config.CONFIG_SHARD), num_shards) + kwargs["config_shard"] = config_shard @staticmethod def _validate_multiversion_options(kwargs: Dict[str, Any], @@ -478,16 +430,10 @@ def _get_mongos_assets(cls, kwargs: Dict[str, Any], mixed_bin_versions: Optional """ executables = {BinVersionEnum.NEW: kwargs["mongos_executable"]} - classes = {BinVersionEnum.NEW: cls.LATEST_MONGOS_CLASS} + _class = cls.LATEST_MONGOS_CLASS if mixed_bin_versions is not None: from buildscripts.resmokelib import multiversionconstants - old_shell_version = { - config.MultiversionOptions.LAST_LTS: - multiversionconstants.LAST_LTS_MONGO_BINARY, - config.MultiversionOptions.LAST_CONTINUOUS: - multiversionconstants.LAST_CONTINUOUS_MONGO_BINARY, - }[old_bin_version] old_mongos_version = { config.MultiversionOptions.LAST_LTS: @@ -497,12 +443,7 @@ def _get_mongos_assets(cls, kwargs: Dict[str, Any], mixed_bin_versions: Optional }[old_bin_version] executables[BinVersionEnum.OLD] = old_mongos_version - classes[BinVersionEnum.OLD] = f"{cls.LATEST_MONGOS_CLASS}{MULTIVERSION_CLASS_SUFFIX}" - - load_version(version_path_suffix=MULTIVERSION_CLASS_SUFFIX, - shell_path=old_shell_version) - - return classes, executables + return _class, executables @staticmethod def _new_configsvr(sharded_cluster: ShardedClusterFixture, is_multiversion: bool, @@ -556,7 +497,7 @@ def _new_rs_shard(sharded_cluster: ShardedClusterFixture, @staticmethod def _new_mongos(sharded_cluster: ShardedClusterFixture, executables: Dict[str, str], - classes: Dict[str, str], mongos_index: int, total: int, + _class: str, mongos_index: int, total: int, is_multiversion: bool) -> FixtureContainer: """Make a fixture container with configured mongos fixture(s) in it. @@ -565,7 +506,7 @@ def _new_mongos(sharded_cluster: ShardedClusterFixture, executables: Dict[str, s :param sharded_cluster: sharded cluster fixture we are configuring mongos for :param executables: dict with a new and the old (if multiversion) mongos executable names - :param classes: dict with a new and the old (if multiversion) mongos fixture names + :param _class: str with the mongos fixture name :param mongos_index: the index of mongos :param total: total number of mongos :param is_multiversion: whether we are in multiversion mode @@ -578,15 +519,16 @@ def _new_mongos(sharded_cluster: ShardedClusterFixture, executables: Dict[str, s old_fixture = None if is_multiversion: + # We do not run old versions with feature flags enabled old_fixture = make_fixture( - classes[BinVersionEnum.OLD], mongos_logger, sharded_cluster.job_num, + _class, mongos_logger, sharded_cluster.job_num, enable_feature_flags=False, mongos_executable=executables[BinVersionEnum.OLD], **mongos_kwargs) # We can't restart mongos since explicit ports are not supported. new_fixture_mongos_kwargs = sharded_cluster.get_mongos_kwargs() - new_fixture = make_fixture( - classes[BinVersionEnum.NEW], mongos_logger, sharded_cluster.job_num, - mongos_executable=executables[BinVersionEnum.NEW], **new_fixture_mongos_kwargs) + new_fixture = make_fixture(_class, mongos_logger, sharded_cluster.job_num, + mongos_executable=executables[BinVersionEnum.NEW], + **new_fixture_mongos_kwargs) # Always spin up an old mongos if in multiversion mode given mongos is the last thing in the update path. return FixtureContainer(new_fixture, old_fixture, diff --git a/buildscripts/resmokelib/testing/fixtures/replicaset.py b/buildscripts/resmokelib/testing/fixtures/replicaset.py index 61e7d01d7fc57..9ea3d2fa0a6fe 100644 --- a/buildscripts/resmokelib/testing/fixtures/replicaset.py +++ b/buildscripts/resmokelib/testing/fixtures/replicaset.py @@ -47,7 +47,7 @@ def __init__(self, logger, job_num, fixturelib, mongod_executable=None, mongod_o replset_config_options=None, voting_secondaries=True, all_nodes_electable=False, use_replica_set_connection_string=None, linear_chain=False, default_read_concern=None, default_write_concern=None, shard_logging_prefix=None, - replicaset_logging_prefix=None, replset_name=None): + replicaset_logging_prefix=None, replset_name=None, config_shard=None): """Initialize ReplicaSetFixture.""" interface.ReplFixture.__init__(self, logger, job_num, fixturelib, @@ -110,6 +110,7 @@ def __init__(self, logger, job_num, fixturelib, mongod_executable=None, mongod_o self.replset_name = self.mongod_options.setdefault("replSet", "rs") self.initial_sync_node = None self.initial_sync_node_idx = -1 + self.config_shard = config_shard def setup(self): """Set up the replica set.""" diff --git a/buildscripts/resmokelib/testing/fixtures/shard_split.py b/buildscripts/resmokelib/testing/fixtures/shard_split.py index 5be3e390efa0e..e707254b011df 100644 --- a/buildscripts/resmokelib/testing/fixtures/shard_split.py +++ b/buildscripts/resmokelib/testing/fixtures/shard_split.py @@ -247,44 +247,46 @@ def add_recipient_nodes(self, recipient_set_name, recipient_tag_name=None): # Reconfig the donor to add the recipient nodes as non-voting members donor_client = self._create_client(self.get_donor_rs()) - repl_config = with_naive_retry(lambda: donor_client.admin.command({"replSetGetConfig": 1})[ - "config"]) - repl_members = repl_config["members"] - - for recipient_node in recipient_nodes: - # It is possible for the reconfig below to fail with a retryable error code like - # 'InterruptedDueToReplStateChange'. In these cases, we need to run the reconfig - # again, but some or all of the recipient nodes might have already been added to - # the member list. Only add recipient nodes which have not yet been added on a - # retry. - recipient_host = recipient_node.get_internal_connection_string() - recipient_entry = { - "host": recipient_host, "votes": 0, "priority": 0, "hidden": True, - "tags": {recipient_tag_name: str(ObjectId())} - } - member_exists = False - for index, member in enumerate(repl_members): - if member["host"] == recipient_host: - repl_members[index] = recipient_entry - member_exists = True - - if not member_exists: - repl_members.append(recipient_entry) - - # Re-index all members from 0 - for idx, member in enumerate(repl_members): - member["_id"] = idx - - # Prepare the new config - repl_config["version"] = repl_config["version"] + 1 - repl_config["members"] = repl_members - - self.logger.info( - f"Reconfiguring donor replica set to add non-voting recipient nodes: {repl_config}") - with_naive_retry(lambda: donor_client.admin.command({ - "replSetReconfig": repl_config, "maxTimeMS": self.AWAIT_REPL_TIMEOUT_MINS * 60 * 1000 - })) - + def reconfig_add_node_rs(client): + repl_config = client.admin.command({"replSetGetConfig": 1})["config"] + repl_members = repl_config["members"] + + for recipient_node in recipient_nodes: + # It is possible for the reconfig below to fail with a retryable error code like + # 'InterruptedDueToReplStateChange'. In these cases, we need to run the reconfig + # again, but some or all of the recipient nodes might have already been added to + # the member list. Only add recipient nodes which have not yet been added on a + # retry. + recipient_host = recipient_node.get_internal_connection_string() + recipient_entry = { + "host": recipient_host, "votes": 0, "priority": 0, "hidden": True, + "tags": {recipient_tag_name: str(ObjectId())} + } + member_exists = False + for index, member in enumerate(repl_members): + if member["host"] == recipient_host: + repl_members[index] = recipient_entry + member_exists = True + + if not member_exists: + repl_members.append(recipient_entry) + + # Re-index all members from 0 + for idx, member in enumerate(repl_members): + member["_id"] = idx + + # Prepare the new config + repl_config["version"] = repl_config["version"] + 1 + repl_config["members"] = repl_members + + self.logger.info( + f"Reconfiguring donor replica set to add non-voting recipient nodes: {repl_config}") + client.admin.command({ + "replSetReconfig": repl_config, + "maxTimeMS": self.AWAIT_REPL_TIMEOUT_MINS * 60 * 1000 + }) + + with_naive_retry(lambda: reconfig_add_node_rs(donor_client)) # Wait for recipient nodes to become secondaries self._await_recipient_nodes() @@ -334,30 +336,37 @@ def remove_recipient_nodes(self, recipient_tag_name=None): self.fixtures = [donor_rs] donor_client = self._create_client(self.get_donor_rs()) - repl_config = with_naive_retry(lambda: donor_client.admin.command({"replSetGetConfig": 1})[ - "config"]) - repl_members = [ - member for member in repl_config["members"] - if not 'tags' in member or not recipient_tag_name in member["tags"] - ] - # Re-index all members from 0 - for idx, member in enumerate(repl_members): - member["_id"] = idx - - # Prepare the new config - repl_config["version"] = repl_config["version"] + 1 - repl_config["members"] = repl_members - - # It's possible that the recipient config has been removed in a previous remove attempt. - if "recipientConfig" in repl_config: - del repl_config["recipientConfig"] - - self.logger.info( - f"Reconfiguring donor '{donor_rs_name}' to remove recipient nodes: {repl_config}") - with_naive_retry(lambda: donor_client.admin.command({ - "replSetReconfig": repl_config, "maxTimeMS": self.AWAIT_REPL_TIMEOUT_MINS * 60 * 1000 - })) + def reconfig_rs(client): + repl_config = client.admin.command({"replSetGetConfig": 1})["config"] + repl_members = [ + member for member in repl_config["members"] + if not 'tags' in member or not recipient_tag_name in member["tags"] + ] + + if "recipientConfig" in repl_config: + del repl_config["recipientConfig"] + elif repl_members == repl_config["members"]: + # The recipientConfig and recipient nodes have already been cleaned, no need to + # reconfig. + return + + # Re-index all members from 0 + for idx, member in enumerate(repl_members): + member["_id"] = idx + + # Prepare the new config + repl_config["version"] = repl_config["version"] + 1 + repl_config["members"] = repl_members + + self.logger.info( + f"Reconfiguring donor '{donor_rs_name}' to remove recipient nodes: {repl_config}") + donor_client.admin.command({ + "replSetReconfig": repl_config, + "maxTimeMS": self.AWAIT_READY_TIMEOUT_SECS * 60 * 1000 + }) + + with_naive_retry(func=lambda: reconfig_rs(donor_client)) self.logger.info("Tearing down recipient nodes and removing data directories.") for recipient_node in reversed(recipient_nodes): diff --git a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py index ba2e27099e10c..a561c44f471ba 100644 --- a/buildscripts/resmokelib/testing/fixtures/shardedcluster.py +++ b/buildscripts/resmokelib/testing/fixtures/shardedcluster.py @@ -24,7 +24,7 @@ def __init__(self, logger, job_num, fixturelib, mongos_executable=None, mongos_o preserve_dbpath=False, num_shards=1, num_rs_nodes_per_shard=1, num_mongos=1, enable_sharding=None, enable_balancer=True, auth_options=None, configsvr_options=None, shard_options=None, cluster_logging_prefix=None, - catalog_shard=None): + config_shard=None): """Initialize ShardedClusterFixture with different options for the cluster processes.""" interface.Fixture.__init__(self, logger, job_num, fixturelib, dbpath_prefix=dbpath_prefix) @@ -42,7 +42,7 @@ def __init__(self, logger, job_num, fixturelib, mongos_executable=None, mongos_o mongod_options.get("set_parameters", {})).copy() self.mongod_options["set_parameters"]["migrationLockAcquisitionMaxWaitMS"] = \ self.mongod_options["set_parameters"].get("migrationLockAcquisitionMaxWaitMS", 30000) - self.catalog_shard = catalog_shard + self.config_shard = config_shard self.preserve_dbpath = preserve_dbpath self.num_shards = num_shards self.num_rs_nodes_per_shard = num_rs_nodes_per_shard @@ -91,7 +91,7 @@ def pids(self): def setup(self): """Set up the sharded cluster.""" - if self.catalog_shard is None: + if self.config_shard is None: self.configsvr.setup() # Start up each of the shards @@ -139,11 +139,11 @@ def await_ready(self): # Turn off the balancer if it is not meant to be enabled. if not self.enable_balancer: - self.stop_balancer() + self.stop_balancer(join_migrations=False) # Inform mongos about each of the shards for idx, shard in enumerate(self.shards): - self._add_shard(client, shard, self.catalog_shard == idx) + self._add_shard(client, shard, self.config_shard == idx) # Ensure that all CSRS nodes are up to date. This is strictly needed for tests that use # multiple mongoses. In those cases, the first mongos initializes the contents of the config @@ -192,13 +192,18 @@ def _await_mongod_sharding_initialization(self): .format(port, interface.Fixture.AWAIT_READY_TIMEOUT_SECS)) time.sleep(0.1) - def stop_balancer(self, timeout_ms=60000): + # TODO SERVER-76343 remove the join_migrations parameter and the if clause depending on it. + def stop_balancer(self, timeout_ms=300000, join_migrations=True): """Stop the balancer.""" client = interface.build_client(self, self.auth_options) client.admin.command({"balancerStop": 1}, maxTimeMS=timeout_ms) + if join_migrations: + for shard in self.shards: + shard_client = interface.build_client(shard.get_primary(), self.auth_options) + shard_client.admin.command({"_shardsvrJoinMigrations": 1}) self.logger.info("Stopped the balancer") - def start_balancer(self, timeout_ms=60000): + def start_balancer(self, timeout_ms=300000): """Start the balancer.""" client = interface.build_client(self, self.auth_options) client.admin.command({"balancerStart": 1}, maxTimeMS=timeout_ms) @@ -329,8 +334,8 @@ def get_rs_shard_kwargs(self, index): auth_options = shard_options.pop("auth_options", self.auth_options) preserve_dbpath = shard_options.pop("preserve_dbpath", self.preserve_dbpath) - replset_config_options = self.fixturelib.make_historic( - shard_options.pop("replset_config_options", {})) + replset_config_options = shard_options.pop("replset_config_options", {}) + replset_config_options = replset_config_options.copy() replset_config_options["configsvr"] = False mongod_options = self.mongod_options.copy() @@ -340,16 +345,24 @@ def get_rs_shard_kwargs(self, index): mongod_options["dbpath"] = os.path.join(self._dbpath_prefix, "shard{}".format(index)) mongod_options["replSet"] = self._SHARD_REPLSET_NAME_PREFIX + str(index) - if self.catalog_shard == index: + if self.config_shard == index: del mongod_options["shardsvr"] mongod_options["configsvr"] = "" replset_config_options["configsvr"] = True - mongod_options["set_parameters"]["featureFlagCatalogShard"] = "true" mongod_options["set_parameters"]["featureFlagTransitionToCatalogShard"] = "true" + mongod_options["storageEngine"] = "wiredTiger" configsvr_options = self.configsvr_options.copy() + + if "mongod_options" in configsvr_options: + mongod_options = self.fixturelib.merge_mongo_option_dicts( + mongod_options, configsvr_options["mongod_options"]) + if "replset_config_options" in configsvr_options: + replset_config_options = self.fixturelib.merge_mongo_option_dicts( + replset_config_options, configsvr_options["replset_config_options"]) + for option, value in configsvr_options.items(): - if option == "num_nodes": + if option in ("num_nodes", "mongod_options", "replset_config_options"): continue if option in shard_options: if shard_options[option] != value: @@ -363,8 +376,8 @@ def get_rs_shard_kwargs(self, index): return { "mongod_options": mongod_options, "mongod_executable": self.mongod_executable, "auth_options": auth_options, "preserve_dbpath": preserve_dbpath, - "replset_config_options": replset_config_options, - "shard_logging_prefix": shard_logging_prefix, **shard_options + "replset_config_options": replset_config_options, "shard_logging_prefix": + shard_logging_prefix, "config_shard": self.config_shard, **shard_options } def install_rs_shard(self, rs_shard): @@ -390,7 +403,7 @@ def install_mongos(self, mongos): """Install a mongos. Called by a builder.""" self.mongos.append(mongos) - def _add_shard(self, client, shard, is_catalog_shard): + def _add_shard(self, client, shard, is_config_shard): """ Add the specified program as a shard by executing the addShard command. @@ -398,9 +411,9 @@ def _add_shard(self, client, shard, is_catalog_shard): """ connection_string = shard.get_internal_connection_string() - if is_catalog_shard: - self.logger.info("Adding %s as catalog shard...", connection_string) - client.admin.command({"transitionToCatalogShard": 1}) + if is_config_shard: + self.logger.info("Adding %s as config shard...", connection_string) + client.admin.command({"transitionFromDedicatedConfigServer": 1}) else: self.logger.info("Adding %s as a shard...", connection_string) client.admin.command({"addShard": connection_string}) diff --git a/buildscripts/resmokelib/testing/fixtures/standalone.py b/buildscripts/resmokelib/testing/fixtures/standalone.py index 9c88a64c45a9e..055f06077c5d1 100644 --- a/buildscripts/resmokelib/testing/fixtures/standalone.py +++ b/buildscripts/resmokelib/testing/fixtures/standalone.py @@ -236,6 +236,16 @@ def launch_mongod_program(self, logger, job_num, executable=None, process_kwargs if self.config.MONGOD_SET_PARAMETERS is not None: suite_set_parameters.update(yaml.safe_load(self.config.MONGOD_SET_PARAMETERS)) + # Some storage options are both a mongod option (as in config file option and its equivalent + # "--xyz" command line parameter) and a "--setParameter". In case of conflict, for instance + # due to the config fuzzer adding "xyz" as a "--setParameter" when the "--xyz" option is + # already defined in the suite's YAML, the "--setParameter" value shall be preserved and the + # "--xyz" option discarded to avoid hitting an error due to conflicting definitions. + mongod_option_and_set_parameter_conflicts = ["syncdelay", "journalCommitInterval"] + for key in mongod_option_and_set_parameter_conflicts: + if (key in mongod_options and key in suite_set_parameters): + del mongod_options[key] + # Set default log verbosity levels if none were specified. if "logComponentVerbosity" not in suite_set_parameters: suite_set_parameters[ @@ -257,6 +267,15 @@ def launch_mongod_program(self, logger, job_num, executable=None, process_kwargs and "orphanCleanupDelaySecs" not in suite_set_parameters): suite_set_parameters["orphanCleanupDelaySecs"] = 1 + # receiveChunkWaitForRangeDeleterTimeoutMS controls the amount of time an incoming migration + # will wait for an intersecting range with data in it to be cleared up before failing. The + # default is 10 seconds, but in some slower variants this is not enough time for the range + # deleter to finish so we increase it here to 90 seconds. Setting a value for this parameter + # in the .yml file overrides this. + if (("shardsvr" in mongod_options or "configsvr" in mongod_options) + and "receiveChunkWaitForRangeDeleterTimeoutMS" not in suite_set_parameters): + suite_set_parameters["receiveChunkWaitForRangeDeleterTimeoutMS"] = 90000 + # The LogicalSessionCache does automatic background refreshes in the server. This is # race-y for tests, since tests trigger their own immediate refreshes instead. Turn off # background refreshing for tests. Set in the .yml file to override this. diff --git a/buildscripts/resmokelib/testing/hook_test_archival.py b/buildscripts/resmokelib/testing/hook_test_archival.py index 38909056ce379..23ebfda8de34e 100644 --- a/buildscripts/resmokelib/testing/hook_test_archival.py +++ b/buildscripts/resmokelib/testing/hook_test_archival.py @@ -6,6 +6,7 @@ from buildscripts.resmokelib import config from buildscripts.resmokelib import errors from buildscripts.resmokelib import utils +from buildscripts.resmokelib.flags import HANG_ANALYZER_CALLED from buildscripts.resmokelib.utils import globstar @@ -105,5 +106,9 @@ def _archive_hook_or_test(self, logger, test_name, test, manager): else: logger.info("Archive succeeded for %s: %s", test_name, message) - if not manager.setup_fixture(logger): + if HANG_ANALYZER_CALLED.is_set(): + logger.info("Hang Analyzer has been called. Fixtures will not be restarted.") + raise errors.StopExecution( + "Hang analyzer has been called. Stopping further execution of tests.") + elif not manager.setup_fixture(logger): raise errors.StopExecution("Error while restarting test fixtures after archiving.") diff --git a/buildscripts/resmokelib/testing/hooks/aggregate_metrics_background.py b/buildscripts/resmokelib/testing/hooks/aggregate_metrics_background.py index d392c6ae0b8f9..c6b415087359a 100644 --- a/buildscripts/resmokelib/testing/hooks/aggregate_metrics_background.py +++ b/buildscripts/resmokelib/testing/hooks/aggregate_metrics_background.py @@ -4,70 +4,69 @@ internally sleep for 1 second between runs. """ -import os.path +import pymongo +import random -from buildscripts.resmokelib import errors -from buildscripts.resmokelib.testing.hooks import jsfile -from buildscripts.resmokelib.testing.hooks.background_job import _BackgroundJob, _ContinuousDynamicJSTestCase +from buildscripts.resmokelib.testing.hooks.bghook import BGHook -class AggregateResourceConsumptionMetricsInBackground(jsfile.JSHook): +class AggregateResourceConsumptionMetricsInBackground(BGHook): """A hook to run $operationMetrics stage in the background.""" - IS_BACKGROUND = True - def __init__(self, hook_logger, fixture, shell_options=None): """Initialize AggregateResourceConsumptionMetricsInBackground.""" - description = "Run background $operationMetrics on all mongods while a test is running" - js_filename = os.path.join("jstests", "hooks", "run_aggregate_metrics_background.js") - jsfile.JSHook.__init__(self, hook_logger, fixture, js_filename, description, - shell_options=shell_options) - self._background_job = None - - def before_suite(self, test_report): - """Start the background thread.""" - self._background_job = _BackgroundJob("AggregateResourceConsumptionMetricsInBackground") - self.logger.info("Starting the background aggregate metrics thread.") - self._background_job.start() - - def after_suite(self, test_report, teardown_flag=None): - """Signal the background aggregate metrics thread to exit, and wait until it does.""" - if self._background_job is None: - return - - self.logger.info("Stopping the background aggregate metrics thread.") - self._background_job.stop() - - def before_test(self, test, test_report): - """Instruct the background aggregate metrics thread to run while 'test' is also running.""" - if self._background_job is None: - return - hook_test_case = _ContinuousDynamicJSTestCase.create_before_test( - test.logger, test, self, self._js_filename, self._shell_options) - hook_test_case.configure(self.fixture) - - self.logger.info("Resuming the background aggregate metrics thread.") - self._background_job.resume(hook_test_case, test_report) - - def after_test(self, test, test_report): # noqa: D205,D400 - """Instruct the background aggregate metrics thread to stop running now that 'test' has - finished running. - """ - if self._background_job is None: - return - - self.logger.info("Pausing the background aggregate metrics thread.") - self._background_job.pause() - - if self._background_job.exc_info is not None: - if isinstance(self._background_job.exc_info[1], errors.TestFailure): - # If the mongo shell process running the JavaScript file exited with a non-zero - # return code, then we raise an errors.ServerFailure exception to cause resmoke.py's - # test execution to stop. - raise errors.ServerFailure(self._background_job.exc_info[1].args[0]) - else: - self.logger.error( - "Encountered an error inside the background aggregate metrics thread.", - exc_info=self._background_job.exc_info) - raise self._background_job.exc_info[1] + description = "Run background $operationMetrics on all mongods while a test is running" + super().__init__(hook_logger, fixture, description, tests_per_cycle=None, + loop_delay_ms=1000) + + def run_action(self): + """Collects $operationMetrics on all non-arbiter nodes in the fixture.""" + for node_info in self.fixture.get_node_info(): + conn = pymongo.MongoClient(port=node_info.port) + # Filter out arbiters. + if "arbiterOnly" in conn.admin.command({"isMaster": 1}): + self.logger.info( + "Skipping background aggregation against test node: %s because it is an " + + "arbiter and has no data.", node_info.full_name) + return + + # Clear the metrics about 10% of the time. + clear_metrics = random.random() < 0.1 + self.logger.info("Running $operationMetrics with {clearMetrics: %s} on host: %s", + clear_metrics, node_info.full_name) + with conn.admin.aggregate( + [{"$operationMetrics": {"clearMetrics": clear_metrics}}]) as cursor: + for doc in cursor: + try: + self.verify_metrics(doc) + except: + self.logger.info( + "caught exception while verifying that all expected fields are in the" + + " metrics output: ", doc) + raise + + def verify_metrics(self, doc): + """Checks whether the output from $operatiomMetrics has the schema we expect.""" + + top_level_fields = [ + "docBytesWritten", "docUnitsWritten", "idxEntryBytesWritten", "idxEntryUnitsWritten", + "totalUnitsWritten", "cpuNanos", "db", "primaryMetrics", "secondaryMetrics" + ] + read_fields = [ + "docBytesRead", "docUnitsRead", "idxEntryBytesRead", "idxEntryUnitsRead", "keysSorted", + "docUnitsReturned" + ] + + for key in top_level_fields: + assert key in doc, ("The metrics output is missing the property: " + key) + + primary_metrics = doc["primaryMetrics"] + for key in read_fields: + assert key in primary_metrics, ( + "The metrics output is missing the property: primaryMetrics." + key) + + secondary_metrics = doc["secondaryMetrics"] + for key in read_fields: + assert key in secondary_metrics, ( + "The metrics output is missing the property: secondaryMetrics." + key) diff --git a/buildscripts/resmokelib/testing/hooks/background_job.py b/buildscripts/resmokelib/testing/hooks/background_job.py index b01196ba462ac..2adba99b97e76 100644 --- a/buildscripts/resmokelib/testing/hooks/background_job.py +++ b/buildscripts/resmokelib/testing/hooks/background_job.py @@ -3,6 +3,7 @@ import sys import threading +from buildscripts.resmokelib import errors from buildscripts.resmokelib.testing.hooks import jsfile diff --git a/buildscripts/resmokelib/testing/hooks/bghook.py b/buildscripts/resmokelib/testing/hooks/bghook.py index b61f37b4e2d8e..1b3fed3b10427 100644 --- a/buildscripts/resmokelib/testing/hooks/bghook.py +++ b/buildscripts/resmokelib/testing/hooks/bghook.py @@ -13,11 +13,13 @@ class BGJob(threading.Thread): BGJob will call 'run_action' without any delay and expects the 'run_action' function to add some form of delay. """ - def __init__(self, hook): + def __init__(self, hook, loop_delay_ms=None): """Initialize the background job.""" threading.Thread.__init__(self, name=f"BGJob-{hook.__class__.__name__}") + self._loop_delay_ms = loop_delay_ms self.daemon = True self._hook = hook + self._interrupt_event = threading.Event() self.__is_alive = True self.err = None @@ -29,6 +31,14 @@ def run(self): try: self._hook.run_action() + if self._loop_delay_ms is not None: + # The configured loop delay asked us to wait before running the action again. Do + # that wait, but listen to see if we finish running the test or are killed in + # the meantime. + interrupted = self._interrupt_event.wait(self._loop_delay_ms / 1000.0) + if interrupted: + self._hook.logger.info("interrupted") + break except Exception as err: # pylint: disable=broad-except self._hook.logger.error("Background thread caught exception: %s.", err) self.err = err @@ -37,6 +47,7 @@ def run(self): def kill(self): """Kill the background job.""" self.__is_alive = False + self._interrupt_event.set() class BGHook(interface.Hook): @@ -46,8 +57,13 @@ class BGHook(interface.Hook): # By default, we continuously run the background hook for the duration of the suite. DEFAULT_TESTS_PER_CYCLE = math.inf - def __init__(self, hook_logger, fixture, desc, tests_per_cycle=None): - """Initialize the background hook.""" + def __init__(self, hook_logger, fixture, desc, tests_per_cycle=None, loop_delay_ms=None): + """ + Initialize the background hook. + + 'tests_per_cycle' or 'loop_delay_ms' can be used to configure how often the background job + is restarted, and how often run_action() is called, respectively. + """ interface.Hook.__init__(self, hook_logger, fixture, desc) self.logger = hook_logger @@ -57,15 +73,21 @@ def __init__(self, hook_logger, fixture, desc, tests_per_cycle=None): self._test_num = 0 # The number of tests we execute before restarting the background hook. self._tests_per_cycle = self.DEFAULT_TESTS_PER_CYCLE if tests_per_cycle is None else tests_per_cycle + self._loop_delay_ms = loop_delay_ms def run_action(self): - """Perform an action. This function will be called continuously in the BgJob.""" + """ + Perform an action. This function will be called continuously in the BgJob. + + If a sleep_delay_ms was given, that many milliseconds of sleep will happen between each + invocation. + """ raise NotImplementedError def before_suite(self, test_report): """Start the background thread.""" self.logger.info("Starting the background thread.") - self._background_job = BGJob(self) + self._background_job = BGJob(self, self._loop_delay_ms) self._background_job.start() def after_suite(self, test_report, teardown_flag=None): @@ -86,7 +108,7 @@ def before_test(self, test, test_report): return self.logger.info("Restarting the background thread.") - self._background_job = BGJob(self) + self._background_job = BGJob(self, self._loop_delay_ms) self._background_job.start() def after_test(self, test, test_report): diff --git a/buildscripts/resmokelib/testing/hooks/cleanup_concurrency_workloads.py b/buildscripts/resmokelib/testing/hooks/cleanup_concurrency_workloads.py index e391f8e56644e..1c1caaf3cb553 100644 --- a/buildscripts/resmokelib/testing/hooks/cleanup_concurrency_workloads.py +++ b/buildscripts/resmokelib/testing/hooks/cleanup_concurrency_workloads.py @@ -68,11 +68,6 @@ def run_test(self): exclude_dbs.append(same_db_name) self.logger.info("Dropping all databases except for %s", exclude_dbs) - is_sharded_fixture = isinstance(self._hook.fixture, shardedcluster.ShardedClusterFixture) - # Stop the balancer. - if is_sharded_fixture and self._hook.fixture.enable_balancer: - self._hook.fixture.stop_balancer() - for db_name in [db for db in db_names if db not in exclude_dbs]: self.logger.info("Dropping database %s", db_name) try: @@ -93,7 +88,3 @@ def run_test(self): self.logger.exception("Encountered an error while dropping db % collection %s.", same_db_name, coll) raise - - # Start the balancer. - if is_sharded_fixture and self._hook.fixture.enable_balancer: - self._hook.fixture.start_balancer() diff --git a/buildscripts/resmokelib/testing/hooks/configshard_transition.py b/buildscripts/resmokelib/testing/hooks/configshard_transition.py new file mode 100644 index 0000000000000..1b66933de28e5 --- /dev/null +++ b/buildscripts/resmokelib/testing/hooks/configshard_transition.py @@ -0,0 +1,186 @@ +"""Test hook that periodically transitions the config server in/out of config shard mode.""" + +import time +import threading +import random + +from buildscripts.resmokelib import errors +from buildscripts.resmokelib.testing.hooks import interface +from buildscripts.resmokelib.testing.hooks import lifecycle as lifecycle_interface +from buildscripts.resmokelib.testing.fixtures import shardedcluster +from buildscripts.resmokelib.testing.fixtures import interface as fixture_interface + + +class ContinuousConfigShardTransition(interface.Hook): + DESCRIPTION = ( + "Continuous config shard transition (transitions in/out of config shard mode at regular" + " intervals)") + + IS_BACKGROUND = True + + STOPS_FIXTURE = False + + def __init__(self, hook_logger, fixture, auth_options=None): + interface.Hook.__init__(self, hook_logger, fixture, + ContinuousConfigShardTransition.DESCRIPTION) + self._fixture = fixture + self._transition_thread = None + self._auth_options = auth_options + + def before_suite(self, test_report): + """Before suite.""" + lifecycle = lifecycle_interface.FlagBasedThreadLifecycle() + + if not isinstance(self._fixture, shardedcluster.ShardedClusterFixture): + msg = "Can only transition config shard mode for sharded cluster fixtures." + self.logger.error(msg) + raise errors.ServerFailure(msg) + + self._transition_thread = _TransitionThread(self.logger, lifecycle, self._fixture, + self._auth_options) + self.logger.info("Starting the transition thread.") + self._transition_thread.start() + + def after_suite(self, test_report, teardown_flag=None): + """After suite.""" + self.logger.info("Stopping the transition thread.") + self._transition_thread.stop() + self.logger.info("Transition thread stopped.") + + def before_test(self, test, test_report): + """Before test.""" + self.logger.info("Resuming the transition thread.") + self._transition_thread.pause() + self._transition_thread.resume() + + def after_test(self, test, test_report): + """After test.""" + self.logger.info("Pausing the transition thread.") + self._transition_thread.pause() + self.logger.info("Paused the transition thread.") + + +class _TransitionThread(threading.Thread): + CONFIG_SHARD = "config shard mode" + DEDICATED = "dedicated config server mode" + + def __init__(self, logger, stepdown_lifecycle, fixture, auth_options): + threading.Thread.__init__(self, name="TransitionThread") + self.logger = logger + self.__lifecycle = stepdown_lifecycle + self._fixture = fixture + self._auth_options = auth_options + self._client = fixture_interface.build_client(self._fixture, self._auth_options) + self._current_mode = self._current_fixture_mode() + + self._last_exec = time.time() + # Event set when the thread has been stopped using the 'stop()' method. + self._is_stopped_evt = threading.Event() + # Event set when the thread is not performing stepdowns. + self._is_idle_evt = threading.Event() + self._is_idle_evt.set() + + def _current_fixture_mode(self): + res = self._client.admin.command({"listShards": 1}) + + for shard_info in res["shards"]: + if shard_info["_id"] == "config": + return self.CONFIG_SHARD + + return self.DEDICATED + + def run(self): + try: + while True: + self._is_idle_evt.set() + + permitted = self.__lifecycle.wait_for_action_permitted() + if not permitted: + break + + self._is_idle_evt.clear() + secs = float(10) + now = time.time() + if now - self._last_exec > secs: + self.logger.info("Starting transition from " + self._current_mode) + if self._current_mode is self.CONFIG_SHARD: + self._transition_to_dedicated() + self._current_mode = self.DEDICATED + else: + self._transition_to_config_shard() + self._current_mode = self.CONFIG_SHARD + self._last_exec = time.time() + self.logger.info("Completed transition to %s in %0d ms", self._current_mode, + (self._last_exec - now) * 1000) + except Exception: # pylint: disable=W0703 + # Proactively log the exception when it happens so it will be + # flushed immediately. + self.logger.exception("Transition Thread threw exception") + # The event should be signaled whenever the thread is not performing stepdowns. + self._is_idle_evt.set() + + def stop(self): + """Stop the thread.""" + self.__lifecycle.stop() + self._is_stopped_evt.set() + # Unpause to allow the thread to finish. + self.resume() + self.join() + + def pause(self): + """Pause the thread.""" + self.__lifecycle.mark_test_finished() + + # Wait until we are no longer executing stepdowns. + self._is_idle_evt.wait() + # Check if the thread is alive in case it has thrown an exception while running. + self._check_thread() + + def resume(self): + """Resume the thread.""" + self.__lifecycle.mark_test_started() + + def _check_thread(self): + if not self.is_alive(): + msg = "The transition thread is not running." + self.logger.error(msg) + raise errors.ServerFailure(msg) + + def _transition_to_dedicated(self): + res = None + start_time = time.time() + while True: + res = self._client.admin.command({"transitionToDedicatedConfigServer": 1}) + + if res["state"] == "completed": + break + elif res["state"] == "ongoing" and res["dbsToMove"]: + non_config_shard_id = self._get_non_config_shard_id() + for db in res["dbsToMove"]: + msg = "running movePrimary for: " + str(db) + self.logger.info(msg) + self._client.admin.command({"movePrimary": db, "to": non_config_shard_id}) + + time.sleep(1) + + if time.time() - start_time > float(300): + msg = "Could not transition to dedicated config server. with last response: " + str( + res) + self.logger.error(msg) + raise errors.ServerFailure(msg) + + def _transition_to_config_shard(self): + self._client.admin.command({"transitionFromDedicatedConfigServer": 1}) + + def _get_non_config_shard_id(self): + res = self._client.admin.command({"listShards": 1}) + + if len(res["shards"]) < 2: + msg = "Did not find a non-config shard" + self.logger.error(msg) + raise errors.ServerFailure(msg) + + possible_choices = [ + shard_info["_id"] for shard_info in res["shards"] if shard_info["_id"] != "config" + ] + return random.choice(possible_choices) diff --git a/buildscripts/resmokelib/testing/hooks/continuous_initial_sync.py b/buildscripts/resmokelib/testing/hooks/continuous_initial_sync.py index b2328e1fe0628..a8b679904c2fd 100644 --- a/buildscripts/resmokelib/testing/hooks/continuous_initial_sync.py +++ b/buildscripts/resmokelib/testing/hooks/continuous_initial_sync.py @@ -106,7 +106,8 @@ def _add_fixture(self, fixture): for shard_fixture in fixture.shards: self._add_fixture(shard_fixture) - self._add_fixture(fixture.configsvr) + if fixture.config_shard is None: + self._add_fixture(fixture.configsvr) for mongos_fixture in fixture.mongos: self._mongos_fixtures.append(mongos_fixture) diff --git a/buildscripts/resmokelib/testing/hooks/fcv_upgrade_downgrade.py b/buildscripts/resmokelib/testing/hooks/fcv_upgrade_downgrade.py new file mode 100644 index 0000000000000..d8d7c21cad603 --- /dev/null +++ b/buildscripts/resmokelib/testing/hooks/fcv_upgrade_downgrade.py @@ -0,0 +1,75 @@ +"""Test hook for running background FCV upgrade and downgrade commands. + +This hook runs continously in a background thread while the test is running. + +This can be run against a replicaset or sharded cluster. +""" + +import os.path + +from buildscripts.resmokelib import errors +from buildscripts.resmokelib.testing.hooks import jsfile +from buildscripts.resmokelib.testing.hooks.background_job import _BackgroundJob, _ContinuousDynamicJSTestCase + + +class FCVUpgradeDowngradeInBackground(jsfile.JSHook): + """A hook to run background FCV upgrade and downgrade against test servers while a test is running.""" + + IS_BACKGROUND = True + + def __init__(self, hook_logger, fixture, shell_options=None): + """Initialize FCVUpgradeDowngradeInBackground.""" + description = "Run background FCV upgrade/downgrade while a test is running" + js_filename = os.path.join("jstests", "hooks", "run_fcv_upgrade_downgrade_background.js") + jsfile.JSHook.__init__(self, hook_logger, fixture, js_filename, description, + shell_options=shell_options) + + self._background_job = None + + def before_suite(self, test_report): + """Start the background thread.""" + self._background_job = _BackgroundJob("FCVUpgradeDowngradeInBackground") + self.logger.info("Starting the background FCV upgrade/downgrade thread.") + self._background_job.start() + + def after_suite(self, test_report, teardown_flag=None): + """Signal the background FCV upgrade/downgrade thread to exit, and wait until it does.""" + if self._background_job is None: + return + + self.logger.info("Stopping the background FCV upgrade/downgrade thread.") + self._background_job.stop() + + def before_test(self, test, test_report): + """Instruct the background FCV upgrade/downgrade thread to run while 'test' is also running.""" + if self._background_job is None: + return + + hook_test_case = _ContinuousDynamicJSTestCase.create_before_test( + test.logger, test, self, self._js_filename, self._shell_options) + hook_test_case.configure(self.fixture) + + self.logger.info("Resuming the background FCV upgrade/downgrade thread.") + self._background_job.resume(hook_test_case, test_report) + + def after_test(self, test, test_report): # noqa: D205,D400 + """Instruct the background FCV upgrade/downgrade thread to stop running now that 'test' has + finished running. + """ + if self._background_job is None: + return + + self.logger.info("Pausing the background FCV upgrade/downgrade thread.") + self._background_job.pause() + + if self._background_job.exc_info is not None: + if isinstance(self._background_job.exc_info[1], errors.TestFailure): + # If the mongo shell process running the JavaScript file exited with a non-zero + # return code, then we raise an errors.ServerFailure exception to cause resmoke.py's + # test execution to stop. + raise errors.ServerFailure(self._background_job.exc_info[1].args[0]) + else: + self.logger.error( + "Encountered an error inside the background FCV upgrade/downgrade thread.", + exc_info=self._background_job.exc_info) + raise self._background_job.exc_info[1] diff --git a/buildscripts/resmokelib/testing/hooks/orphans.py b/buildscripts/resmokelib/testing/hooks/orphans.py index 38d62f51010c7..2d2d3947aecc5 100644 --- a/buildscripts/resmokelib/testing/hooks/orphans.py +++ b/buildscripts/resmokelib/testing/hooks/orphans.py @@ -22,16 +22,3 @@ def __init__(self, hook_logger, fixture, shell_options=None): js_filename = os.path.join("jstests", "hooks", "run_check_orphans_are_deleted.js") super().__init__(hook_logger, fixture, js_filename, description, shell_options=shell_options) - - def after_test(self, test, test_report): - """Run the run_check_orphans_are_deleted.js hook.""" - - # We temporarily disable the balancer so more work isn't generated for the range deleter - # while the hook is running. - if self.fixture.enable_balancer: - self.fixture.stop_balancer() - - super().after_test(test, test_report) - - if self.fixture.enable_balancer: - self.fixture.start_balancer() diff --git a/buildscripts/resmokelib/testing/hooks/run_query_stats.py b/buildscripts/resmokelib/testing/hooks/run_query_stats.py new file mode 100644 index 0000000000000..06cb182e422fc --- /dev/null +++ b/buildscripts/resmokelib/testing/hooks/run_query_stats.py @@ -0,0 +1,37 @@ +""" +Test hook for verifying $queryStats collects expected metrics and can redact query shapes. + +This runs in the background as other tests are ongoing. +""" + +from buildscripts.resmokelib.testing.hooks.interface import Hook +from bson import binary + + +class RunQueryStats(Hook): + """Runs $queryStats after every test, and clears the query stats store before every test.""" + + IS_BACKGROUND = False + + def __init__(self, hook_logger, fixture): + description = "Read query stats data after each test." + super().__init__(hook_logger, fixture, description) + self.client = self.fixture.mongo_client() + self.hmac_key = binary.Binary(("0" * 32).encode('utf-8')) + + def verify_query_stats(self, querystats_spec): + """Verify a $queryStats call has all the right properties.""" + with self.client.admin.aggregate([{"$queryStats": querystats_spec}]) as cursor: + for operation in cursor: + assert "key" in operation + assert "metrics" in operation + assert "asOf" in operation + + def after_test(self, test, test_report): + self.verify_query_stats({}) + self.verify_query_stats( + {"transformIdentifiers": {"algorithm": "hmac-sha-256", "hmacKey": self.hmac_key}}) + + def before_test(self, test, test_report): + self.client.admin.command("setParameter", 1, internalQueryStatsCacheSize="0%") + self.client.admin.command("setParameter", 1, internalQueryStatsCacheSize="1%") diff --git a/buildscripts/resmokelib/testing/hooks/stepdown.py b/buildscripts/resmokelib/testing/hooks/stepdown.py index 1d274c5e1fece..ac73bb01bbf15 100644 --- a/buildscripts/resmokelib/testing/hooks/stepdown.py +++ b/buildscripts/resmokelib/testing/hooks/stepdown.py @@ -55,6 +55,10 @@ def __init__(self, hook_logger, fixture, config_stepdown=True, shard_stepdown=Tr interface.Hook.__init__(self, hook_logger, fixture, ContinuousStepdown.DESCRIPTION) self._fixture = fixture + if hasattr(fixture, "config_shard") and fixture.config_shard is not None and shard_stepdown: + # If the config server is a shard, shard_stepdown implies config_stepdown. + config_stepdown = shard_stepdown + self._config_stepdown = config_stepdown self._shard_stepdown = shard_stepdown self._stepdown_interval_secs = float(stepdown_interval_ms) / 1000 @@ -128,8 +132,9 @@ def _add_fixture(self, fixture): elif isinstance(fixture, shardedcluster.ShardedClusterFixture): if self._shard_stepdown: for shard_fixture in fixture.shards: - self._add_fixture(shard_fixture) - if self._config_stepdown: + if shard_fixture.config_shard is None or self._config_stepdown: + self._add_fixture(shard_fixture) + if self._config_stepdown and fixture.config_shard is None: self._add_fixture(fixture.configsvr) for mongos_fixture in fixture.mongos: self._mongos_fixtures.append(mongos_fixture) diff --git a/buildscripts/resmokelib/testing/hooks/tenant_migration.py b/buildscripts/resmokelib/testing/hooks/tenant_migration.py index 6b805fa137ee1..a629d1206dca4 100644 --- a/buildscripts/resmokelib/testing/hooks/tenant_migration.py +++ b/buildscripts/resmokelib/testing/hooks/tenant_migration.py @@ -9,6 +9,7 @@ from bson.binary import Binary, UUID_SUBTYPE from pymongo.errors import OperationFailure, PyMongoError +from functools import partial from buildscripts.resmokelib import errors from buildscripts.resmokelib.testing.fixtures import tenant_migration @@ -234,6 +235,7 @@ class _TenantMigrationThread(threading.Thread): WAIT_SECS_RANGES = [[0.05, 0.1], [0.1, 0.5], [1, 5], [5, 15]] POLL_INTERVAL_SECS = 0.1 + MIGRATION_ABORTED_ERR_CODE = 325 NO_SUCH_MIGRATION_ERR_CODE = 327 INTERNAL_ERR_CODE = 1 INVALID_SYNC_SOURCE_ERR_CODE = 119 @@ -364,6 +366,10 @@ def _is_fail_point_abort_reason(self, abort_reason): return abort_reason["code"] == self.INTERNAL_ERR_CODE and abort_reason[ "errmsg"] == "simulate a tenant migration error" + def _is_recipient_failover_abort_reason(self, abort_reason): + return abort_reason["code"] == self.MIGRATION_ABORTED_ERR_CODE and abort_reason[ + "errmsg"].find("Recipient failover happened during migration") + def _create_migration_opts(self, donor_rs_index, recipient_rs_index): donor_rs = self._tenant_migration_fixture.get_replset(donor_rs_index) recipient_rs = self._tenant_migration_fixture.get_replset(recipient_rs_index) @@ -432,7 +438,11 @@ def _run_migration(self, migration_opts): # noqa: D205,D400 return True abort_reason = res["abortReason"] - if self._is_fail_point_abort_reason(abort_reason): + if self._is_recipient_failover_abort_reason(abort_reason): + self.logger.info("Tenant migration '%s' aborted due to recipient failover: %s", + migration_opts.migration_id, str(res)) + return False + elif self._is_fail_point_abort_reason(abort_reason): self.logger.info( "Tenant migration '%s' with donor replica set '%s' aborted due to failpoint: " + "%s.", migration_opts.migration_id, migration_opts.get_donor_name(), str(res)) @@ -590,7 +600,7 @@ def _drop_tenant_databases_on_recipient(self, migration_opts): for database in res["databases"]: db_name = database["name"] if db_name.startswith(self._tenant_id + "_"): - recipient_client.drop_database(db_name) + with_naive_retry(partial(recipient_client.drop_database, db_name)) except PyMongoError as err: self.logger.exception( f"Error dropping databases for tenant '{self._tenant_id}' on replica set '{migration_opts.get_recipient_name()}': '{str(err)}'." diff --git a/buildscripts/resmokelib/testing/hooks/wait_for_replication.py b/buildscripts/resmokelib/testing/hooks/wait_for_replication.py index 7e3129b746cfc..6b587f6e1f347 100644 --- a/buildscripts/resmokelib/testing/hooks/wait_for_replication.py +++ b/buildscripts/resmokelib/testing/hooks/wait_for_replication.py @@ -25,9 +25,9 @@ def after_test(self, test, test_report): start_time = time.time() client_conn = self.fixture.get_driver_connection_url() js_cmds = """ - conn = '{}'; + const conn = '{}'; try {{ - rst = new ReplSetTest(conn); + const rst = new ReplSetTest(conn); rst.awaitReplication(); }} catch (e) {{ jsTestLog("WaitForReplication got error: " + tojson(e)); diff --git a/buildscripts/resmokelib/testing/job.py b/buildscripts/resmokelib/testing/job.py index db29bb06a870c..3379dd51777fa 100644 --- a/buildscripts/resmokelib/testing/job.py +++ b/buildscripts/resmokelib/testing/job.py @@ -7,6 +7,7 @@ from buildscripts.resmokelib import config from buildscripts.resmokelib import errors from buildscripts.resmokelib.testing import testcases +from buildscripts.resmokelib.testing.fixtures import shardedcluster from buildscripts.resmokelib.testing.fixtures.interface import create_fixture_table from buildscripts.resmokelib.testing.testcases import fixture as _fixture from buildscripts.resmokelib.utils import queue as _queue @@ -282,7 +283,13 @@ def _run_hooks_after_tests(self, test, hook_failure_flag, background=False): @param test: the test after which we run the hooks. @param background: whether to run background hooks. """ + suite_with_balancer = isinstance( + self.fixture, shardedcluster.ShardedClusterFixture) and self.fixture.enable_balancer + try: + if not background and suite_with_balancer: + self.logger.info("Stopping the balancer before running end-test hooks") + self.fixture.stop_balancer() for hook in self.hooks: if hook.IS_BACKGROUND == background: self._run_hook(hook, hook.after_test, test, hook_failure_flag) @@ -307,6 +314,10 @@ def _run_hooks_after_tests(self, test, hook_failure_flag, background=False): self.report.setError(test, sys.exc_info()) raise + if not background and suite_with_balancer: + self.logger.info("Resuming the balancer after running end-test hooks") + self.fixture.start_balancer() + def _fail_test(self, test, exc_info, return_code=1): """Provide helper to record a test as a failure with the provided return code. diff --git a/buildscripts/resmokelib/testing/report.py b/buildscripts/resmokelib/testing/report.py index cc98664fabeb2..5dd2cbe870b4e 100644 --- a/buildscripts/resmokelib/testing/report.py +++ b/buildscripts/resmokelib/testing/report.py @@ -146,13 +146,9 @@ def stopTest(self, test): try: # check if there are stacktrace files, if so, invoke the symbolizer here. - # If there are no stacktrace files for this job, we do not need to invoke the symbolizer at all. - # Take a lock to download the debug symbols if it hasn't already been downloaded. # log symbolized output to test.logger.info() - symbolizer = ResmokeSymbolizer() symbolizer.symbolize_test_logs(test) - # symbolization completed unittest.TestResult.stopTest(self, test) @@ -404,7 +400,7 @@ def __init__(self, test_id, test_file, dynamic): self.evergreen_status = None self.return_code = None self.url_endpoint = None - self.exception_extractors = None + self.exception_extractors = [] self.error = None diff --git a/buildscripts/resmokelib/testing/retry.py b/buildscripts/resmokelib/testing/retry.py index 3b5287c4b6155..9d722caf9154b 100644 --- a/buildscripts/resmokelib/testing/retry.py +++ b/buildscripts/resmokelib/testing/retry.py @@ -22,17 +22,17 @@ ] -def is_retryable_error(exc): +def is_retryable_error(exc, retryable_error_codes): if isinstance(exc, ConnectionFailure): return True if exc.has_error_label("RetryableWriteError"): return True - if isinstance(exc, OperationFailure) and exc.code in retryable_codes: + if isinstance(exc, OperationFailure) and exc.code in retryable_error_codes: return True return False -def with_naive_retry(func, timeout=100): +def with_naive_retry(func, timeout=100, extra_retryable_error_codes=None): """ Retry execution of a provided function naively for up to `timeout` seconds. @@ -41,8 +41,13 @@ def with_naive_retry(func, timeout=100): :param func: The function to execute :param timeout: The maximum amount of time to retry execution + :param extra_retryable_error_codes: List of additional error codes that should be considered retryable """ + retryable_error_codes = set(retryable_codes) + if extra_retryable_error_codes: + retryable_error_codes.update(extra_retryable_error_codes) + last_exc = None start = time.monotonic() while time.monotonic() - start < timeout: @@ -50,7 +55,7 @@ def with_naive_retry(func, timeout=100): return func() except PyMongoError as exc: last_exc = exc - if not is_retryable_error(exc): + if not is_retryable_error(exc, retryable_error_codes): raise time.sleep(0.1) diff --git a/buildscripts/resmokelib/testing/symbolizer_service.py b/buildscripts/resmokelib/testing/symbolizer_service.py index 767e8cb50923a..b4468ef317598 100644 --- a/buildscripts/resmokelib/testing/symbolizer_service.py +++ b/buildscripts/resmokelib/testing/symbolizer_service.py @@ -11,6 +11,7 @@ from typing import List, Optional, NamedTuple, Set from buildscripts.resmokelib import config as _config +from buildscripts.resmokelib.flags import HANG_ANALYZER_CALLED from buildscripts.resmokelib.testing.testcases.interface import TestCase # This lock prevents different resmoke jobs from symbolizing stacktraces concurrently, @@ -57,6 +58,15 @@ def is_windows() -> bool: """ return sys.platform == "win32" or sys.platform == "cygwin" + @staticmethod + def is_macos() -> bool: + """ + Whether we are on MacOS. + + :return: True if on MacOS. + """ + return sys.platform == "darwin" + class ResmokeSymbolizer: """Symbolize stacktraces inside test logs.""" @@ -135,6 +145,16 @@ def should_symbolize(self, test: TestCase) -> bool: test.logger.info("Running on Windows, skipping symbolization") return False + if self.config.is_macos(): + test.logger.info("Running on MacOS, skipping symbolization") + return False + + if HANG_ANALYZER_CALLED.is_set(): + test.logger.info( + "Hang analyzer has been called, skipping symbolization to meet timeout constraints." + ) + return False + return True def get_stacktrace_dir(self, test: TestCase) -> Optional[str]: @@ -305,8 +325,8 @@ def run_symbolizer_script(full_file_path: str, retry_timeout_secs: int) -> str: """ symbolizer_args = [ - "python", - "buildscripts/mongosymb.py", + "db-contrib-tool", + "symbolize", "--client-secret", _config.SYMBOLIZER_CLIENT_SECRET, "--client-id", diff --git a/buildscripts/resmokelib/testing/testcases/pretty_printer_testcase.py b/buildscripts/resmokelib/testing/testcases/pretty_printer_testcase.py new file mode 100644 index 0000000000000..15cb9cff96d36 --- /dev/null +++ b/buildscripts/resmokelib/testing/testcases/pretty_printer_testcase.py @@ -0,0 +1,25 @@ +"""The unittest.TestCase for pretty printer tests.""" +import os + +from buildscripts.resmokelib import config +from buildscripts.resmokelib import core +from buildscripts.resmokelib import utils +from buildscripts.resmokelib.testing.testcases import interface + + +class PrettyPrinterTestCase(interface.ProcessTestCase): + """A pretty printer test to execute.""" + + REGISTERED_NAME = "pretty_printer_test" + + def __init__(self, logger, program_executable, program_options=None): + """Initialize the PrettyPrinterTestCase with the executable to run.""" + + interface.ProcessTestCase.__init__(self, logger, "pretty printer test", program_executable) + + self.program_executable = program_executable + self.program_options = utils.default_if_none(program_options, {}).copy() + + def _make_process(self): + return core.programs.make_process(self.logger, [self.program_executable], + **self.program_options) diff --git a/buildscripts/resmokelib/utils/__init__.py b/buildscripts/resmokelib/utils/__init__.py index c1054af800c02..2f5475ddb9426 100644 --- a/buildscripts/resmokelib/utils/__init__.py +++ b/buildscripts/resmokelib/utils/__init__.py @@ -92,19 +92,18 @@ def get_task_name_without_suffix(task_name, variant_name): return re.sub(fr"(_[0-9]+)?(_{variant_name})?$", "", task_name) -def pick_catalog_shard_node(catalog_shard, num_shards): - """Get catalog_shard node index or None if no catalog_shard.""" - if catalog_shard is None: +def pick_catalog_shard_node(config_shard, num_shards): + """Get config_shard node index or None if no config_shard.""" + if config_shard is None: return None - if num_shards is None or int(num_shards) <= 0: - raise ValueError("Num shards > 0 for catalog shard to exist") + if config_shard == "any": + if num_shards is None or num_shards == 0: + return 0 + return random.randint(0, num_shards - 1) - if catalog_shard == "any": - return random.randrange(0, num_shards) + config_shard_index = int(config_shard) + if config_shard_index < 0 or config_shard_index >= num_shards: + raise ValueError("Config shard value must be in range 0..num_shards-1 or \"any\"") - catalog_shard_index = int(catalog_shard) - if catalog_shard_index < 0 or catalog_shard_index >= num_shards: - raise ValueError("Catalog shard value must be in range 0..num_shards-1 or \"any\"") - - return catalog_shard_index + return config_shard_index diff --git a/buildscripts/tests/resmoke_end2end/test_resmoke.py b/buildscripts/tests/resmoke_end2end/test_resmoke.py index 787a4bb51946b..74b83410dece7 100644 --- a/buildscripts/tests/resmoke_end2end/test_resmoke.py +++ b/buildscripts/tests/resmoke_end2end/test_resmoke.py @@ -13,7 +13,9 @@ import yaml -from buildscripts.resmokelib import core +from buildscripts.ciconfig.evergreen import parse_evergreen_file +from buildscripts.resmokelib import config, core, suitesconfig +from buildscripts.resmokelib.utils.dictionary import get_dict_value # pylint: disable=unsupported-membership-test @@ -64,8 +66,13 @@ def setUpClass(cls): cls.archival_file = "test_archival.txt" def test_archival_on_task_failure(self): + # The --originSuite argument is to trick the resmoke local invocation into passing + # because when we pass --taskId into resmoke it thinks that it is being ran in evergreen + # and cannot normally find an evergreen task associated with + # buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_failure.yml resmoke_args = [ "--suites=buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_failure.yml", + "--originSuite=resmoke_end2end_tests", "--taskId=123", "--internalParam=test_archival", "--repeatTests=2", @@ -79,9 +86,14 @@ def test_archival_on_task_failure(self): self.assert_dir_file_count(self.test_dir, self.archival_file, archival_dirs_to_expect) def test_archival_on_task_failure_no_passthrough(self): + # The --originSuite argument is to trick the resmoke local invocation into passing + # because when we pass --taskId into resmoke it thinks that it is being ran in evergreen + # and cannot normally find an evergreen task associated with + # buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_failure_no_passthrough.yml resmoke_args = [ "--suites=buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_failure_no_passthrough.yml", "--taskId=123", + "--originSuite=resmoke_end2end_tests", "--internalParam=test_archival", "--repeatTests=2", "--jobs=2", @@ -146,9 +158,14 @@ def execute_resmoke(self, resmoke_args, sleep_secs=30, **kwargs): # pylint: dis self.signal_resmoke() def test_task_timeout(self): + # The --originSuite argument is to trick the resmoke local invocation into passing + # because when we pass --taskId into resmoke it thinks that it is being ran in evergreen + # and cannot normally find an evergreen task associated with + # buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_timeout.yml resmoke_args = [ "--suites=buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_timeout.yml", "--taskId=123", + "--originSuite=resmoke_end2end_tests", "--internalParam=test_archival", "--internalParam=test_analysis", "--repeatTests=2", @@ -163,9 +180,14 @@ def test_task_timeout(self): self.assert_dir_file_count(self.test_dir, self.analysis_file, analysis_pids_to_expect) def test_task_timeout_no_passthrough(self): + # The --originSuite argument is to trick the resmoke local invocation into passing + # because when we pass --taskId into resmoke it thinks that it is being ran in evergreen + # and cannot normally find an evergreen task associated with + # buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_timeout_no_passthrough.yml resmoke_args = [ "--suites=buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_task_timeout_no_passthrough.yml", "--taskId=123", + "--originSuite=resmoke_end2end_tests", "--internalParam=test_archival", "--internalParam=test_analysis", "--repeatTests=2", @@ -181,9 +203,14 @@ def test_task_timeout_no_passthrough(self): # Test scenarios where an resmoke-launched process launches resmoke. def test_nested_timeout(self): + # The --originSuite argument is to trick the resmoke local invocation into passing + # because when we pass --taskId into resmoke it thinks that it is being ran in evergreen + # and cannot normally find an evergreen task associated with + # buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_nested_timeout.yml resmoke_args = [ "--suites=buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_nested_timeout.yml", "--taskId=123", + "--originSuite=resmoke_end2end_tests", "--internalParam=test_archival", "--internalParam=test_analysis", "jstests/resmoke_selftest/end2end/timeout/nested/top_level_timeout.js", @@ -541,3 +568,78 @@ def test_random_shell_seed(self): self.assertTrue( len(random_seeds) > 1, msg="Resmoke generated the same random seed 10 times in a row.") + + +# In resmoke we expect certain parts of the evergreen config to be a certain way +# These tests will fail if something is not as expected and also needs to change somewhere else in resmoke +class TestEvergreenYML(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.evg_conf = parse_evergreen_file("etc/evergreen.yml") + config.CONFIG_DIR = "buildscripts/resmokeconfig" + + def validate_jstestfuzz_selector(self, suite_name): + suite_config = suitesconfig.get_suite(suite_name).get_config() + expected_selector = ["jstestfuzz/out/*.js"] + self.assertEqual( + suite_config["selector"]["roots"], expected_selector, + msg=f"The jstestfuzz selector for {suite_name} did not match 'jstestfuzz/out/*.js'") + + # This test asserts that the jstestfuzz tasks uploads the the URL we expect it to + # If the remote url changes, also change it in the _log_local_resmoke_invocation method + # before fixing this test to the correct url + def test_jstestfuzz_download_url(self): + functions = self.evg_conf.functions + run_jstestfuzz = functions["run jstestfuzz"] + contains_correct_url = False + for item in run_jstestfuzz: + if item["command"] != "s3.put": + continue + + remote_url = item["params"]["remote_file"] + if remote_url == "${project}/${build_variant}/${revision}/jstestfuzz/${task_id}-${execution}.tgz": + contains_correct_url = True + break + + self.assertTrue( + contains_correct_url, msg= + "The 'run jstestfuzz' function in evergreen did not contain the remote_url that was expected" + ) + + # This tasks asserts that the way implicit multiversion tasks are defined has not changed + # If this fails, you will need to correct the _log_local_resmoke_invocation method before fixing + # this test + def test_implicit_multiversion_tasks(self): + multiverson_task_names = self.evg_conf.get_task_names_by_tag("multiversion") + implicit_multiversion_count = 0 + for multiversion_task_name in multiverson_task_names: + task_config = self.evg_conf.get_task(multiversion_task_name) + func = task_config.find_func_command("initialize multiversion tasks") + if func is not None: + implicit_multiversion_count += 1 + + self.assertNotEqual(0, implicit_multiversion_count, + msg="Could not find any implicit multiversion tasks in evergreen") + + # This tasks asserts that the way jstestfuzz tasks are defined has not changed + # It also asserts that the selector for jstestfuzz tasks always points to jstestfuzz/out/*.js + # If this fails, you will need to correct the _log_local_resmoke_invocation method before fixing + # this test + def test_jstestfuzz_tasks(self): + jstestfuzz_count = 0 + for task in self.evg_conf.tasks: + generate_func = task.find_func_command("generate resmoke tasks") + if (generate_func is None + or get_dict_value(generate_func, ["vars", "is_jstestfuzz"]) != "true"): + continue + + jstestfuzz_count += 1 + + multiversion_func = task.find_func_command("initialize multiversion tasks") + if multiversion_func is not None: + for subtask in multiversion_func["vars"]: + self.validate_jstestfuzz_selector(subtask) + else: + self.validate_jstestfuzz_selector(task.get_suite_name()) + + self.assertNotEqual(0, jstestfuzz_count, msg="Could not find any jstestfuzz tasks") diff --git a/buildscripts/tests/resmoke_proxy/test_resmoke_proxy.py b/buildscripts/tests/resmoke_proxy/test_resmoke_proxy.py index d075b60fb4df9..dbd1bf476c700 100644 --- a/buildscripts/tests/resmoke_proxy/test_resmoke_proxy.py +++ b/buildscripts/tests/resmoke_proxy/test_resmoke_proxy.py @@ -1,4 +1,4 @@ -"""Unit tests for resmoke_proxy.py""" +"""Unit tests for resmoke_proxy.py.""" import unittest from unittest.mock import MagicMock diff --git a/buildscripts/tests/resmoke_validation/test_find_suites.py b/buildscripts/tests/resmoke_validation/test_find_suites.py new file mode 100644 index 0000000000000..6f9bc8bd7fead --- /dev/null +++ b/buildscripts/tests/resmoke_validation/test_find_suites.py @@ -0,0 +1,13 @@ +import subprocess +import unittest +import glob + + +class TestFindSuites(unittest.TestCase): + def test_find_suites(self): + jstests = glob.glob("jstests/core/*.js") + resmoke_process = subprocess.run( + ["python3", "buildscripts/resmoke.py", "find-suites", jstests[0]]) + + self.assertEqual(0, resmoke_process.returncode, + msg="find-suites subcommand did not execute successfully.") diff --git a/buildscripts/tests/resmoke_validation/test_generated_matrix_suites.py b/buildscripts/tests/resmoke_validation/test_generated_matrix_suites.py index df93b2369e998..2484ef3ecadef 100644 --- a/buildscripts/tests/resmoke_validation/test_generated_matrix_suites.py +++ b/buildscripts/tests/resmoke_validation/test_generated_matrix_suites.py @@ -18,11 +18,12 @@ def test_generated_suites(self): for suite_name in suite_names: try: - suite = self.matrix_suite_config.get_config_obj(suite_name) + suite = self.matrix_suite_config.get_config_obj_and_verify(suite_name) self.assertIsNotNone( suite, msg= f"{suite_name} was not found. This means either MatrixSuiteConfig.get_named_suites() " - + "or MatrixSuiteConfig.get_config_obj() are not working as intended.") + + + "or MatrixSuiteConfig.get_config_obj_and_verify() are not working as intended.") except Exception as ex: self.fail(repr(ex)) diff --git a/buildscripts/tests/resmoke_validation/test_matrix_suite_generation.py b/buildscripts/tests/resmoke_validation/test_matrix_suite_generation.py index c4b47ff74ab15..6c5ec15bef660 100644 --- a/buildscripts/tests/resmoke_validation/test_matrix_suite_generation.py +++ b/buildscripts/tests/resmoke_validation/test_matrix_suite_generation.py @@ -25,12 +25,12 @@ def verify_suite_generation(self): InvalidMatrixSuiteError, msg= f"{tested_suite} suite should have failed because the generated suite does not exist." ): - self.matrix_suite_config.get_config_obj(tested_suite) + self.matrix_suite_config.get_config_obj_and_verify(tested_suite) self.matrix_suite_config.generate_matrix_suite_file(tested_suite) try: - suite = self.matrix_suite_config.get_config_obj(tested_suite) + suite = self.matrix_suite_config.get_config_obj_and_verify(tested_suite) self.assertIsNotNone(suite, msg=f"{suite} was not found.") except Exception as ex: self.fail(repr(ex)) @@ -45,13 +45,13 @@ def verify_altered_generated_suite(self): with self.assertRaises( InvalidMatrixSuiteError, msg= f"{tested_suite} suite should have failed because the generated suite was edited."): - self.matrix_suite_config.get_config_obj(tested_suite) + self.matrix_suite_config.get_config_obj_and_verify(tested_suite) # restore original file back self.matrix_suite_config.generate_matrix_suite_file(tested_suite) try: - suite = self.matrix_suite_config.get_config_obj(tested_suite) + suite = self.matrix_suite_config.get_config_obj_and_verify(tested_suite) self.assertIsNotNone(suite, msg=f"{suite} was not found.") except Exception as ex: self.fail(repr(ex)) diff --git a/buildscripts/tests/resmokelib/core/test_redirect.py b/buildscripts/tests/resmokelib/core/test_redirect.py index b08514811ac93..0f827a96c816e 100644 --- a/buildscripts/tests/resmokelib/core/test_redirect.py +++ b/buildscripts/tests/resmokelib/core/test_redirect.py @@ -15,9 +15,10 @@ class TestStdoutRedirect(unittest.TestCase): is_windows = os.name == "nt" def test_process_pipes(self): - """Write a string, one word per line into the beginning of a chain of processes. The input - will be tee'd into a temporary file and grepped. Verify the contents of the tee'd file and - the final output of the grep. + """Write a string, one word per line into the beginning of a chain of processes. + + The input will be tee'd into a temporary file and grepped. Verify the contents of + the tee'd file and the final output of the grep. """ if self.is_windows: diff --git a/buildscripts/tests/resmokelib/powercycle/test_remote_operations.py b/buildscripts/tests/resmokelib/powercycle/test_remote_operations.py index 7c00760412695..89f37a0e9d2d7 100755 --- a/buildscripts/tests/resmokelib/powercycle/test_remote_operations.py +++ b/buildscripts/tests/resmokelib/powercycle/test_remote_operations.py @@ -1,8 +1,10 @@ #!/usr/bin/env python3 -"""Unit test for buildscripts/remote_operations.py. +""" +Unit test for buildscripts/remote_operations.py. - Note - Tests require sshd to be enabled on localhost with paswordless login - and can fail otherwise.""" +Note - Tests require sshd to be enabled on localhost with paswordless login +and can fail otherwise. +""" import os import shutil diff --git a/buildscripts/tests/resmokelib/run/test_generate_multiversion_exclude_tags.py b/buildscripts/tests/resmokelib/run/test_generate_multiversion_exclude_tags.py index 56195f468c778..f601caf2d91de 100644 --- a/buildscripts/tests/resmokelib/run/test_generate_multiversion_exclude_tags.py +++ b/buildscripts/tests/resmokelib/run/test_generate_multiversion_exclude_tags.py @@ -25,9 +25,7 @@ def assert_contents(self, expected): self.assertEqual(actual, expected) def patch_and_run(self, latest, old, old_bin_version): - """ - Helper to patch and run the test. - """ + """Helper to patch and run the test.""" mock_multiversion_methods = { 'get_backports_required_hash_for_shell_version': MagicMock(), 'get_old_yaml': MagicMock(return_value=old) diff --git a/buildscripts/tests/resmokelib/test_selector.py b/buildscripts/tests/resmokelib/test_selector.py index 589a99aa2d25d..e94c6c097bcbf 100644 --- a/buildscripts/tests/resmokelib/test_selector.py +++ b/buildscripts/tests/resmokelib/test_selector.py @@ -460,7 +460,7 @@ def test_multi_js_test_selector_normal(self): @unittest.skip("Known broken. SERVER-48969 tracks re-enabling.") def test_multi_js_test_selector_one_group(self): - """Test we return only one group if the group size equals number of files""" + """Test we return only one group if the group size equals number of files.""" num_files = MockTestFileExplorer.NUM_JS_FILES config = selector._MultiJSTestSelectorConfig(roots=["dir/**/*.js"], group_size=num_files, group_count_multiplier=9999999) diff --git a/buildscripts/tests/resmokelib/testing/fixtures/test_builder.py b/buildscripts/tests/resmokelib/testing/fixtures/test_builder.py index 29ac90572c9e8..fe29b52b17c86 100644 --- a/buildscripts/tests/resmokelib/testing/fixtures/test_builder.py +++ b/buildscripts/tests/resmokelib/testing/fixtures/test_builder.py @@ -1,34 +1,15 @@ """Unit tests for the resmokelib.testing.fixtures._builder module.""" # pylint: disable=protected-access,invalid-name import unittest -import filecmp -import os from unittest.mock import MagicMock from buildscripts.resmokelib import logging, parser, config from buildscripts.resmokelib.core import network from buildscripts.resmokelib.testing.fixtures import _builder as under_test -TEST_COMMIT = "1de5826097917875f48ca1ea4f2e53b40139f9ff" -TEST_SUFFIX = "_unittest_suffix" -TEST_RETRIEVE_DIR = os.path.join(under_test.RETRIEVE_DIR, TEST_SUFFIX) SET_PARAMS = "set_parameters" -class TestRetrieveFixtures(unittest.TestCase): - """Class that test retrieve_fixtures methods.""" - - def test_retrieve_fixtures(self): - """function to test retrieve_fixtures""" - expected_standalone = os.path.join("buildscripts", "tests", "resmokelib", "testing", - "fixtures", "retrieved_fixture.txt") - under_test.retrieve_fixtures(TEST_RETRIEVE_DIR, TEST_COMMIT) - retrieved_standalone = os.path.join(TEST_RETRIEVE_DIR, "standalone.py") - self.assertTrue( - filecmp.cmpfiles(retrieved_standalone, expected_standalone, - ["standalone.py", "retrieved_fixture.txt"], shallow=False)) - - class TestGetPackageName(unittest.TestCase): def test_get_package_name_from_posix_path(self): path = "build/multiversionfixtures/_unittest_suffix" @@ -49,20 +30,12 @@ class TestBuildShardedCluster(unittest.TestCase): @classmethod def setUpClass(cls): - under_test.retrieve_fixtures(TEST_RETRIEVE_DIR, TEST_COMMIT) - cls.original_constants["MULTIVERSION_CLASS_SUFFIX"] = under_test.MULTIVERSION_CLASS_SUFFIX - under_test.MULTIVERSION_CLASS_SUFFIX = TEST_SUFFIX - cls.mock_logger = MagicMock(spec=logging.Logger) logging.loggers._FIXTURE_LOGGER_REGISTRY[cls.job_num] = cls.mock_logger def tearDown(self): network.PortAllocator.reset() - @classmethod - def tearDownClass(cls): - under_test.MULTIVERSION_CLASS_SUFFIX = cls.original_constants["MULTIVERSION_CLASS_SUFFIX"] - def test_build_sharded_cluster_simple(self): parser.set_run_options() fixture_config = {"mongod_options": {SET_PARAMS: {"enableTestCommands": 1}}} diff --git a/buildscripts/tests/resmokelib/testing/fixtures/test_fixturelib.py b/buildscripts/tests/resmokelib/testing/fixtures/test_fixturelib.py index b492e74ce2ee8..6b9495c5207b5 100644 --- a/buildscripts/tests/resmokelib/testing/fixtures/test_fixturelib.py +++ b/buildscripts/tests/resmokelib/testing/fixtures/test_fixturelib.py @@ -1,4 +1,4 @@ -"""Unittest for the resmokelib.testing.fixturelib.utils module""" +"""Unittest for the resmokelib.testing.fixturelib.utils module.""" import copy import unittest diff --git a/buildscripts/tests/resmokelib/testing/test_symbolizer_service.py b/buildscripts/tests/resmokelib/testing/test_symbolizer_service.py index ea8ac1003ea86..28519e56391e1 100644 --- a/buildscripts/tests/resmokelib/testing/test_symbolizer_service.py +++ b/buildscripts/tests/resmokelib/testing/test_symbolizer_service.py @@ -15,6 +15,7 @@ def mock_resmoke_symbolizer_config(): config_mock.client_id = "client_id" config_mock.client_secret = "client_secret" config_mock.is_windows.return_value = False + config_mock.is_macos.return_value = False return config_mock @@ -85,6 +86,11 @@ def test_should_not_symbolize_if_on_windows(self): ret = self.resmoke_symbolizer.should_symbolize(MagicMock()) self.assertFalse(ret) + def test_should_not_symbolize_if_on_macos(self): + self.config_mock.is_macos.return_value = True + ret = self.resmoke_symbolizer.should_symbolize(MagicMock()) + self.assertFalse(ret) + def test_should_symbolize_return_true(self): ret = self.resmoke_symbolizer.should_symbolize(MagicMock()) self.assertTrue(ret) diff --git a/buildscripts/tests/resmokelib/utils/test_archival.py b/buildscripts/tests/resmokelib/utils/test_archival.py index 96820b9bc3613..e6bee0b473a2f 100644 --- a/buildscripts/tests/resmokelib/utils/test_archival.py +++ b/buildscripts/tests/resmokelib/utils/test_archival.py @@ -1,4 +1,4 @@ -""" Unit tests for archival. """ +"""Unit tests for archival.""" import logging import os @@ -15,14 +15,14 @@ def create_random_file(file_name, num_chars_mb): - """ Creates file with random characters, which will have minimal compression. """ + """Creates file with random characters, which will have minimal compression.""" with open(file_name, "wb") as fileh: for _ in range(num_chars_mb * 1024 * 1024): fileh.write(chr(random.randint(0, 255))) class MockS3Client(object): - """ Class to mock the S3 client. """ + """Class to mock the S3 client.""" def __init__(self, logger): self.logger = logger diff --git a/buildscripts/tests/resmokelib/utils/test_scheduler.py b/buildscripts/tests/resmokelib/utils/test_scheduler.py index 48ffb77117af4..052a5e758064e 100644 --- a/buildscripts/tests/resmokelib/utils/test_scheduler.py +++ b/buildscripts/tests/resmokelib/utils/test_scheduler.py @@ -12,6 +12,7 @@ def noop(): class TestScheduler(unittest.TestCase): """Unit tests for the Scheduler class.""" + scheduler = _scheduler.Scheduler def setUp(self): diff --git a/buildscripts/tests/test_burn_in_tests.py b/buildscripts/tests/test_burn_in_tests.py index 5c358b5095391..7e10973ea4bcb 100644 --- a/buildscripts/tests/test_burn_in_tests.py +++ b/buildscripts/tests/test_burn_in_tests.py @@ -16,6 +16,7 @@ import buildscripts.burn_in_tests as under_test from buildscripts.ciconfig.evergreen import parse_evergreen_file, VariantTask import buildscripts.resmokelib.parser as _parser + _parser.set_run_options() # pylint: disable=protected-access diff --git a/buildscripts/tests/test_errorcodes.py b/buildscripts/tests/test_errorcodes.py index 17c102454b5da..5f640b73d98ac 100644 --- a/buildscripts/tests/test_errorcodes.py +++ b/buildscripts/tests/test_errorcodes.py @@ -48,9 +48,13 @@ def test_generate_next_code(self): self.assertEqual(22, next(generator)) def test_generate_next_server_code(self): - """ This call to `read_error_codes` technically has no bearing on `get_next_code` when a + """ + Test `generate_next_server_code`. + + This call to `read_error_codes` technically has no bearing on `get_next_code` when a `server_ticket` is passed in. But it maybe makes sense for the test to do so in case a - future patch changes that relationship.""" + future patch changes that relationship. + """ _, _, seen = errorcodes.read_error_codes(TESTDATA_DIR + 'generate_next_server_code/') print("Seen: " + str(seen)) generator = errorcodes.get_next_code(seen, server_ticket=12301) diff --git a/buildscripts/tests/test_evergreen_activate_gen_tasks.py b/buildscripts/tests/test_evergreen_activate_gen_tasks.py index 1632fab75eaeb..f1c85a9f2221d 100644 --- a/buildscripts/tests/test_evergreen_activate_gen_tasks.py +++ b/buildscripts/tests/test_evergreen_activate_gen_tasks.py @@ -17,17 +17,39 @@ def build_mock_task_list(num_tasks): return [build_mock_task(f"task_{i}", f"id_{i}") for i in range(num_tasks)] -def build_mock_evg_api(mock_tasks_list, variant_map_side_effects=None): +class MockVariantData(): + """An object to help create a mock evg api.""" + + def __init__(self, build_id, variant_name, task_list): + self.build_id = build_id + self.variant_name = variant_name + self.task_list = task_list + + +def build_mock_evg_api(variant_data_list): class VersionPatchedSpec(Version): """A patched `Version` with instance properties included for magic mock spec.""" + build_variants_map = MagicMock() mock_version = MagicMock(spec_set=VersionPatchedSpec) - mock_version.build_variants_map.side_effect = variant_map_side_effects + mock_version.build_variants_map = { + variant_data.variant_name: variant_data.build_id + for variant_data in variant_data_list + } mock_evg_api = MagicMock(spec_set=EvergreenApi) mock_evg_api.version_by_id.return_value = mock_version - mock_evg_api.tasks_by_build.side_effect = mock_tasks_list + + build_id_mapping = { + variant_data.build_id: variant_data.task_list + for variant_data in variant_data_list + } + + def tasks_by_build_side_effect(build_id): + return build_id_mapping[build_id] + + mock_evg_api.tasks_by_build.side_effect = tasks_by_build_side_effect return mock_evg_api @@ -39,7 +61,8 @@ def test_task_with_display_name_is_activated(self): "task_name": "task_3_gen", }) mock_task_list = build_mock_task_list(5) - mock_evg_api = build_mock_evg_api([mock_task_list]) + mock_evg_api = build_mock_evg_api( + [MockVariantData("build_id", "non-burn-in-bv", mock_task_list)]) under_test.activate_task(expansions, mock_evg_api) @@ -52,25 +75,27 @@ def test_task_with_no_matching_name(self): "task_name": "not_an_existing_task", }) mock_task_list = build_mock_task_list(5) - mock_evg_api = build_mock_evg_api([mock_task_list]) + mock_evg_api = build_mock_evg_api( + [MockVariantData("build_id", "non-burn-in-bv", mock_task_list)]) under_test.activate_task(expansions, mock_evg_api) mock_evg_api.configure_task.assert_not_called() def test_burn_in_tags_tasks_are_activated(self): - expansions = under_test.EvgExpansions( - **{ - "build_id": "build_id", - "version_id": "version_id", - "task_name": "burn_in_tags_gen", - "burn_in_tag_buildvariants": "build_variant_2 build_variant_3", - }) + expansions = under_test.EvgExpansions(**{ + "build_id": "build_id", + "version_id": "version_id", + "task_name": "burn_in_tags_gen", + }) mock_task_list_2 = build_mock_task_list(5) mock_task_list_2.append(build_mock_task("burn_in_tests", "burn_in_tests_id_2")) mock_task_list_3 = build_mock_task_list(5) mock_task_list_3.append(build_mock_task("burn_in_tests", "burn_in_tests_id_3")) - mock_evg_api = build_mock_evg_api([mock_task_list_2, mock_task_list_3]) + mock_evg_api = build_mock_evg_api([ + MockVariantData("1", "variant1-generated-by-burn-in-tags", mock_task_list_2), + MockVariantData("2", "variant2-generated-by-burn-in-tags", mock_task_list_3) + ]) under_test.activate_task(expansions, mock_evg_api) @@ -80,25 +105,6 @@ def test_burn_in_tags_tasks_are_activated(self): ]) def test_burn_in_tags_task_skips_non_existing_build_variant(self): - expansions = under_test.EvgExpansions( - **{ - "build_id": "build_id", - "version_id": "version_id", - "task_name": "burn_in_tags_gen", - "burn_in_tag_buildvariants": "not_an_existing_build_variant build_variant_2", - }) - mock_task_list_1 = build_mock_task_list(5) - mock_task_list_1.append(build_mock_task("burn_in_tags_gen", "burn_in_tags_gen_id_1")) - mock_task_list_2 = build_mock_task_list(5) - mock_task_list_2.append(build_mock_task("burn_in_tests", "burn_in_tests_id_2")) - mock_evg_api = build_mock_evg_api([mock_task_list_1, mock_task_list_2], - [None, KeyError, None]) - - under_test.activate_task(expansions, mock_evg_api) - - mock_evg_api.configure_task.assert_called_once_with("burn_in_tests_id_2", activated=True) - - def test_burn_in_tags_task_with_missing_burn_in_tag_buildvariants_expansion(self): expansions = under_test.EvgExpansions(**{ "build_id": "build_id", "version_id": "version_id", @@ -106,8 +112,13 @@ def test_burn_in_tags_task_with_missing_burn_in_tag_buildvariants_expansion(self }) mock_task_list_1 = build_mock_task_list(5) mock_task_list_1.append(build_mock_task("burn_in_tags_gen", "burn_in_tags_gen_id_1")) - mock_evg_api = build_mock_evg_api(mock_task_list_1) + mock_task_list_2 = build_mock_task_list(5) + mock_task_list_2.append(build_mock_task("burn_in_tests", "burn_in_tests_id_2")) + mock_evg_api = build_mock_evg_api([ + MockVariantData("1", "variant1-non-burn-in", mock_task_list_1), + MockVariantData("2", "variant2-generated-by-burn-in-tags", mock_task_list_2) + ]) under_test.activate_task(expansions, mock_evg_api) - mock_evg_api.configure_task.assert_not_called() + mock_evg_api.configure_task.assert_called_once_with("burn_in_tests_id_2", activated=True) diff --git a/buildscripts/tests/test_evergreen_task_timeout.py b/buildscripts/tests/test_evergreen_task_timeout.py index 4e681e47f529e..72c6c5f8cc1a5 100644 --- a/buildscripts/tests/test_evergreen_task_timeout.py +++ b/buildscripts/tests/test_evergreen_task_timeout.py @@ -110,7 +110,7 @@ def test_looking_up_an_idle_override_should_work(self): class TestDetermineExecTimeout(unittest.TestCase): def _validate_exec_timeout(self, idle_timeout, exec_timeout, historic_timeout, evg_alias, - build_variant, timeout_override, expected_timeout): + build_variant, display_name, timeout_override, expected_timeout): task_name = "task_name" variant = build_variant overrides = {} @@ -121,8 +121,9 @@ def _validate_exec_timeout(self, idle_timeout, exec_timeout, historic_timeout, e orchestrator = under_test.TaskTimeoutOrchestrator( timeout_service=MagicMock(spec_set=TimeoutService), - timeout_overrides=mock_timeout_overrides, - evg_project_config=MagicMock(spec_set=EvergreenProjectConfig)) + timeout_overrides=mock_timeout_overrides, evg_project_config=MagicMock( + spec_set=EvergreenProjectConfig, + get_variant=MagicMock(return_value=MagicMock(display_name=display_name)))) actual_timeout = orchestrator.determine_exec_timeout( task_name, variant, idle_timeout, exec_timeout, evg_alias, historic_timeout) @@ -132,78 +133,83 @@ def _validate_exec_timeout(self, idle_timeout, exec_timeout, historic_timeout, e def test_timeout_used_if_specified(self): self._validate_exec_timeout(idle_timeout=None, exec_timeout=timedelta(seconds=42), historic_timeout=None, evg_alias=None, build_variant="variant", - timeout_override=None, expected_timeout=timedelta(seconds=42)) + display_name="not required", timeout_override=None, + expected_timeout=timedelta(seconds=42)) def test_default_is_returned_with_no_timeout(self): self._validate_exec_timeout(idle_timeout=None, exec_timeout=None, historic_timeout=None, - evg_alias=None, build_variant="variant", timeout_override=None, + evg_alias=None, build_variant="variant", + display_name="not required", timeout_override=None, expected_timeout=under_test.DEFAULT_NON_REQUIRED_BUILD_TIMEOUT) def test_default_is_returned_with_timeout_at_zero(self): self._validate_exec_timeout(idle_timeout=None, exec_timeout=timedelta(seconds=0), historic_timeout=None, evg_alias=None, build_variant="variant", - timeout_override=None, + display_name="not required", timeout_override=None, expected_timeout=under_test.DEFAULT_NON_REQUIRED_BUILD_TIMEOUT) def test_default_required_returned_on_required_variants(self): self._validate_exec_timeout(idle_timeout=None, exec_timeout=None, historic_timeout=None, evg_alias=None, build_variant="variant-required", - timeout_override=None, + display_name="! required", timeout_override=None, expected_timeout=under_test.DEFAULT_REQUIRED_BUILD_TIMEOUT) def test_override_on_required_should_use_override(self): self._validate_exec_timeout(idle_timeout=None, exec_timeout=None, historic_timeout=None, evg_alias=None, build_variant="variant-required", - timeout_override=3 * 60, + display_name="! required", timeout_override=3 * 60, expected_timeout=timedelta(minutes=3 * 60)) def test_task_specific_timeout(self): self._validate_exec_timeout(idle_timeout=None, exec_timeout=timedelta(seconds=0), historic_timeout=None, evg_alias=None, build_variant="variant", - timeout_override=60, expected_timeout=timedelta(minutes=60)) + display_name="not required", timeout_override=60, + expected_timeout=timedelta(minutes=60)) def test_commit_queue_items_use_commit_queue_timeout(self): self._validate_exec_timeout(idle_timeout=None, exec_timeout=None, historic_timeout=None, evg_alias=under_test.COMMIT_QUEUE_ALIAS, - build_variant="variant", timeout_override=None, + build_variant="variant", display_name="not required", + timeout_override=None, expected_timeout=under_test.COMMIT_QUEUE_TIMEOUT) def test_use_idle_timeout_if_greater_than_exec_timeout(self): self._validate_exec_timeout( idle_timeout=timedelta(hours=2), exec_timeout=timedelta(minutes=10), - historic_timeout=None, evg_alias=None, build_variant="variant", timeout_override=None, - expected_timeout=timedelta(hours=2)) + historic_timeout=None, evg_alias=None, build_variant="variant", + display_name="not required", timeout_override=None, expected_timeout=timedelta(hours=2)) def test_historic_timeout_should_be_used_if_given(self): self._validate_exec_timeout(idle_timeout=None, exec_timeout=None, historic_timeout=timedelta(minutes=15), evg_alias=None, - build_variant="variant", timeout_override=None, - expected_timeout=timedelta(minutes=15)) + build_variant="variant", display_name="not required", + timeout_override=None, expected_timeout=timedelta(minutes=15)) def test_commit_queue_should_override_historic_timeouts(self): self._validate_exec_timeout( idle_timeout=None, exec_timeout=None, historic_timeout=timedelta(minutes=15), - evg_alias=under_test.COMMIT_QUEUE_ALIAS, build_variant="variant", timeout_override=None, + evg_alias=under_test.COMMIT_QUEUE_ALIAS, build_variant="variant", + display_name="not required", timeout_override=None, expected_timeout=under_test.COMMIT_QUEUE_TIMEOUT) def test_override_should_override_historic_timeouts(self): self._validate_exec_timeout(idle_timeout=None, exec_timeout=None, historic_timeout=timedelta(minutes=15), evg_alias=None, - build_variant="variant", timeout_override=33, - expected_timeout=timedelta(minutes=33)) + build_variant="variant", display_name="not required", + timeout_override=33, expected_timeout=timedelta(minutes=33)) def test_historic_timeout_should_not_be_overridden_by_required_bv(self): self._validate_exec_timeout(idle_timeout=None, exec_timeout=None, historic_timeout=timedelta(minutes=15), evg_alias=None, - build_variant="variant-required", timeout_override=None, - expected_timeout=timedelta(minutes=15)) + build_variant="variant-required", display_name="! required", + timeout_override=None, expected_timeout=timedelta(minutes=15)) def test_historic_timeout_should_not_be_increase_required_bv_timeout(self): self._validate_exec_timeout( idle_timeout=None, exec_timeout=None, historic_timeout=under_test.DEFAULT_REQUIRED_BUILD_TIMEOUT + timedelta(minutes=30), - evg_alias=None, build_variant="variant-required", timeout_override=None, - expected_timeout=under_test.DEFAULT_REQUIRED_BUILD_TIMEOUT) + evg_alias=None, build_variant="variant-required", display_name="! required", + timeout_override=None, expected_timeout=under_test.DEFAULT_REQUIRED_BUILD_TIMEOUT) class TestDetermineIdleTimeout(unittest.TestCase): diff --git a/buildscripts/tests/test_packager.py b/buildscripts/tests/test_packager.py index 022c14c6c78c3..975a85dee3d80 100644 --- a/buildscripts/tests/test_packager.py +++ b/buildscripts/tests/test_packager.py @@ -6,14 +6,15 @@ class TestPackager(TestCase): - """Test packager.py""" + """Test packager.py.""" def test_is_nightly(self) -> None: """Test is_nightly.""" @dataclass class Case: - """Test case data""" + """Test case data.""" + name: str version: str want: bool diff --git a/buildscripts/tests/tooling_metrics_e2e/test_ninja_tooling_metrics.py b/buildscripts/tests/tooling_metrics_e2e/test_ninja_tooling_metrics.py index 2033b2461fab1..5a8c05c9128ac 100644 --- a/buildscripts/tests/tooling_metrics_e2e/test_ninja_tooling_metrics.py +++ b/buildscripts/tests/tooling_metrics_e2e/test_ninja_tooling_metrics.py @@ -5,8 +5,7 @@ import unittest from unittest.mock import patch from mock import MagicMock -from mongo_tooling_metrics import client -from mongo_tooling_metrics.base_metrics import TopLevelMetrics +from mongo_tooling_metrics.lib.top_level_metrics import NinjaToolingMetrics import ninja as under_test BUILD_DIR = os.path.join(os.getcwd(), 'build') @@ -34,9 +33,8 @@ "fast.ninja", "install-platform", ]) -@patch.object(TopLevelMetrics, 'should_collect_metrics', MagicMock(return_value=True)) class TestNinjaAtExitMetricsCollection(unittest.TestCase): - @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=True)) + @patch.object(NinjaToolingMetrics, 'should_collect_metrics', MagicMock(return_value=True)) @patch.object(atexit, "register", MagicMock()) def test_at_exit_metrics_collection(self): with self.assertRaises(SystemExit) as _: @@ -44,11 +42,10 @@ def test_at_exit_metrics_collection(self): atexit_functions = [ call for call in atexit.register.call_args_list - if call[0][0].__name__ == '_verbosity_enforced_save_metrics' + if call[0][0].__name__ == '_safe_save_metrics' ] - generate_metrics = atexit_functions[0][0][1].generate_metrics kwargs = atexit_functions[0][1] - metrics = generate_metrics(**kwargs) + metrics = NinjaToolingMetrics.generate_metrics(**kwargs) assert not metrics.is_malformed() assert len(metrics.build_info.build_artifacts) > 0 @@ -64,10 +61,10 @@ def test_at_exit_metrics_collection(self): assert metrics.command_info.options['f'] == "fast.ninja" assert metrics.command_info.options['j'] == "400" - @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=False)) + @patch.object(NinjaToolingMetrics, 'should_collect_metrics', MagicMock(return_value=False)) @patch.object(atexit, "register", MagicMock()) def test_no_at_exit_metrics_collection(self): with self.assertRaises(SystemExit) as _: under_test.ninja() atexit_functions = [call[0][0].__name__ for call in atexit.register.call_args_list] - assert "_verbosity_enforced_save_metrics" not in atexit_functions + assert "_safe_save_metrics" not in atexit_functions diff --git a/buildscripts/tests/tooling_metrics_e2e/test_resmoke_tooling_metrics.py b/buildscripts/tests/tooling_metrics_e2e/test_resmoke_tooling_metrics.py index 1073e1a76a6f2..be7a34b79e639 100644 --- a/buildscripts/tests/tooling_metrics_e2e/test_resmoke_tooling_metrics.py +++ b/buildscripts/tests/tooling_metrics_e2e/test_resmoke_tooling_metrics.py @@ -2,8 +2,8 @@ import unittest from unittest.mock import patch from mock import MagicMock -from mongo_tooling_metrics import client -from mongo_tooling_metrics.base_metrics import TopLevelMetrics +from mongo_tooling_metrics.lib.top_level_metrics import ResmokeToolingMetrics +import mongo_tooling_metrics.lib.utils as metrics_utils import buildscripts.resmoke as under_test @@ -16,18 +16,16 @@ @patch("atexit.register") class TestResmokeAtExitMetricsCollection(unittest.TestCase): @patch("sys.argv", ['buildscripts/resmoke.py', 'list-suites']) - @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=True)) - @patch.object(TopLevelMetrics, 'should_collect_metrics', MagicMock(return_value=True)) + @patch.object(ResmokeToolingMetrics, 'should_collect_metrics', MagicMock(return_value=True)) def test_resmoke_at_exit_metrics_collection(self, mock_atexit_register): under_test.entrypoint() atexit_functions = [ call for call in mock_atexit_register.call_args_list - if call[0][0].__name__ == '_verbosity_enforced_save_metrics' + if call[0][0].__name__ == '_safe_save_metrics' ] - generate_metrics = atexit_functions[0][0][1].generate_metrics kwargs = atexit_functions[0][1] - metrics = generate_metrics(**kwargs) + metrics = ResmokeToolingMetrics.generate_metrics(**kwargs) assert not metrics.is_malformed() assert metrics.command_info.command == ['buildscripts/resmoke.py', 'list-suites'] @@ -35,24 +33,21 @@ def test_resmoke_at_exit_metrics_collection(self, mock_atexit_register): assert metrics.command_info.positional_args == [] @patch("sys.argv", ['buildscripts/resmoke.py', 'list-suites']) - @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=True)) - @patch.object(TopLevelMetrics, 'should_collect_metrics', MagicMock(return_value=False)) + @patch.object(ResmokeToolingMetrics, 'should_collect_metrics', MagicMock(return_value=False)) def test_no_resmoke_at_exit_metrics_collection(self, mock_atexit_register): under_test.entrypoint() atexit_functions = [call[0][0].__name__ for call in mock_atexit_register.call_args_list] - assert "_verbosity_enforced_save_metrics" not in atexit_functions + assert "_safe_save_metrics" not in atexit_functions @patch("sys.argv", ['buildscripts/resmoke.py', 'list-suites']) - @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=False)) - @patch.object(TopLevelMetrics, 'should_collect_metrics', MagicMock(return_value=True)) + @patch.object(metrics_utils, '_is_virtual_workstation', MagicMock(return_value=False)) def test_resmoke_no_metric_collection_non_vw(self, mock_atexit_register): under_test.entrypoint() atexit_functions = [call[0][0].__name__ for call in mock_atexit_register.call_args_list] - assert "_verbosity_enforced_save_metrics" not in atexit_functions + assert "_safe_save_metrics" not in atexit_functions @patch("sys.argv", ['buildscripts/resmoke.py', 'run', '--suite', 'buildscripts_test']) - @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=True)) - @patch.object(TopLevelMetrics, 'should_collect_metrics', MagicMock(return_value=True)) + @patch.object(ResmokeToolingMetrics, 'should_collect_metrics', MagicMock(return_value=True)) @patch("buildscripts.resmokelib.testing.executor.TestSuiteExecutor._run_tests", side_effect=Exception()) def test_resmoke_at_exit_metrics_collection_exc(self, mock_exc_method, mock_atexit_register): @@ -61,11 +56,10 @@ def test_resmoke_at_exit_metrics_collection_exc(self, mock_exc_method, mock_atex atexit_functions = [ call for call in mock_atexit_register.call_args_list - if call[0][0].__name__ == '_verbosity_enforced_save_metrics' + if call[0][0].__name__ == '_safe_save_metrics' ] - generate_metrics = atexit_functions[0][0][1].generate_metrics kwargs = atexit_functions[0][1] - metrics = generate_metrics(**kwargs) + metrics = ResmokeToolingMetrics.generate_metrics(**kwargs) assert not metrics.is_malformed() assert metrics.command_info.command == [ diff --git a/buildscripts/tests/tooling_metrics_e2e/test_scons_tooling_metrics.py b/buildscripts/tests/tooling_metrics_e2e/test_scons_tooling_metrics.py index 9d103e6e5f5ae..fef7c13f9c9db 100644 --- a/buildscripts/tests/tooling_metrics_e2e/test_scons_tooling_metrics.py +++ b/buildscripts/tests/tooling_metrics_e2e/test_scons_tooling_metrics.py @@ -4,8 +4,7 @@ import unittest from unittest.mock import patch from mock import MagicMock -from mongo_tooling_metrics import client -from mongo_tooling_metrics.base_metrics import TopLevelMetrics +from mongo_tooling_metrics.lib.top_level_metrics import SConsToolingMetrics import buildscripts.scons as under_test BUILD_DIR = os.path.join(os.getcwd(), 'build') @@ -27,9 +26,8 @@ @patch("sys.argv", ARGS) -@patch.object(TopLevelMetrics, 'should_collect_metrics', MagicMock(return_value=True)) class TestSconsAtExitMetricsCollection(unittest.TestCase): - @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=True)) + @patch.object(SConsToolingMetrics, 'should_collect_metrics', MagicMock(return_value=True)) @patch.object(atexit, "register", MagicMock()) def test_at_exit_metrics_collection(self): with self.assertRaises(SystemExit) as _: @@ -37,11 +35,10 @@ def test_at_exit_metrics_collection(self): atexit_functions = [ call for call in atexit.register.call_args_list - if call[0][0].__name__ == '_verbosity_enforced_save_metrics' + if call[0][0].__name__ == '_safe_save_metrics' ] - generate_metrics = atexit_functions[0][0][1].generate_metrics kwargs = atexit_functions[0][1] - metrics = generate_metrics(**kwargs) + metrics = SConsToolingMetrics.generate_metrics(**kwargs) assert not metrics.is_malformed() assert len(metrics.build_info.build_artifacts) > 0 @@ -50,15 +47,15 @@ def test_at_exit_metrics_collection(self): f"VARIANT_DIR={VARIANT_DIR}", "install-platform" ] - @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=False)) + @patch.object(SConsToolingMetrics, 'should_collect_metrics', MagicMock(return_value=False)) @patch.object(atexit, "register", MagicMock()) def test_no_at_exit_metrics_collection(self): with self.assertRaises(SystemExit) as _: under_test.entrypoint() atexit_functions = [call[0][0].__name__ for call in atexit.register.call_args_list] - assert "_verbosity_enforced_save_metrics" not in atexit_functions + assert "_safe_save_metrics" not in atexit_functions - @patch.object(client, 'should_collect_internal_metrics', MagicMock(return_value=True)) + @patch.object(SConsToolingMetrics, 'should_collect_metrics', MagicMock(return_value=True)) @patch("buildscripts.moduleconfig.get_module_sconscripts", MagicMock(side_effect=Exception())) @patch.object(atexit, "register", MagicMock()) def test_at_exit_metrics_collection_exc(self): @@ -67,11 +64,10 @@ def test_at_exit_metrics_collection_exc(self): atexit_functions = [ call for call in atexit.register.call_args_list - if call[0][0].__name__ == '_verbosity_enforced_save_metrics' + if call[0][0].__name__ == '_safe_save_metrics' ] - generate_metrics = atexit_functions[0][0][1].generate_metrics kwargs = atexit_functions[0][1] - metrics = generate_metrics(**kwargs) + metrics = SConsToolingMetrics.generate_metrics(**kwargs) assert not metrics.is_malformed() assert metrics.command_info.command == ARGS diff --git a/buildscripts/tests/util/test_teststats.py b/buildscripts/tests/util/test_teststats.py index 55b5da308bea6..191e4b0b0208b 100644 --- a/buildscripts/tests/util/test_teststats.py +++ b/buildscripts/tests/util/test_teststats.py @@ -155,12 +155,14 @@ def test_get_stats_from_s3_returns_data(self, mock_get): "num_pass": 74, "num_fail": 0, "avg_duration_pass": 23.16216216216216, + "max_duration_pass": 27.123, }, { "test_name": "shell_advance_cluster_time:ValidateCollections", "num_pass": 74, "num_fail": 0, "avg_duration_pass": 1.662162162162162, + "max_duration_pass": 100.0987, }, ] mock_get.return_value = mock_response @@ -173,12 +175,14 @@ def test_get_stats_from_s3_returns_data(self, mock_get): num_pass=74, num_fail=0, avg_duration_pass=23.16216216216216, + max_duration_pass=27.123, ), under_test.HistoricalTestInformation( test_name="shell_advance_cluster_time:ValidateCollections", num_pass=74, num_fail=0, avg_duration_pass=1.662162162162162, + max_duration_pass=100.0987, ), ]) diff --git a/buildscripts/util/teststats.py b/buildscripts/util/teststats.py index c0934f2582f32..11e2e605b9cba 100644 --- a/buildscripts/util/teststats.py +++ b/buildscripts/util/teststats.py @@ -21,12 +21,14 @@ class HistoricalTestInformation(NamedTuple): avg_duration_pass: Average of runtime of test that passed. num_pass: Number of times the test has passed. num_fail: Number of times the test has failed. + max_duration_pass: Maximum runtime of the test when it passed. """ test_name: str num_pass: int num_fail: int avg_duration_pass: float + max_duration_pass: Optional[float] = None class TestRuntime(NamedTuple): diff --git a/distsrc/LICENSE.OpenSSL b/distsrc/LICENSE.OpenSSL index 84300a4257671..b1ac5b7c7cf2d 100644 --- a/distsrc/LICENSE.OpenSSL +++ b/distsrc/LICENSE.OpenSSL @@ -113,4 +113,4 @@ Original SSLeay License * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] - */ \ No newline at end of file + */ diff --git a/distsrc/THIRD-PARTY-NOTICES b/distsrc/THIRD-PARTY-NOTICES index 5f47c9cb329f9..8c46b92676fce 100644 --- a/distsrc/THIRD-PARTY-NOTICES +++ b/distsrc/THIRD-PARTY-NOTICES @@ -1495,32 +1495,6 @@ all derivative works of the Software, unless such copies or derivative works are solely in the form of machine-executable object code generated by a source language processor. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT -SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE -FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, -ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER -DEALINGS IN THE SOFTWARE. - - 24) License notice for MPark.Variant -------------------------------------- -Boost Software License - Version 1.0 - August 17th, 2003 - -Permission is hereby granted, free of charge, to any person or organization -obtaining a copy of the software and accompanying documentation covered by -this license (the "Software") to use, reproduce, display, distribute, -execute, and transmit the Software, and to prepare derivative works of the -Software, and to permit third-parties to whom the Software is furnished to -do so, all subject to the following: - -The copyright notices in the Software and this entire statement, including -the above license grant, this restriction and the following disclaimer, -must be included in all copies of the Software, in whole or in part, and -all derivative works of the Software, unless such copies or derivative -works are solely in the form of machine-executable object code generated by -a source language processor. - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT @@ -2441,4 +2415,4 @@ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -END \ No newline at end of file +END diff --git a/docs/building.md b/docs/building.md index 63fbde131593a..9cc95c2d77bc9 100644 --- a/docs/building.md +++ b/docs/building.md @@ -7,7 +7,7 @@ way to get started, rather than building from source. To build MongoDB, you will need: -* A modern C++ compiler capable of compiling C++17. One of the following is required: +* A modern C++ compiler capable of compiling C++20. One of the following is required: * GCC 11.3 or newer * Clang 12.0 (or Apple XCode 13.0 Clang) or newer * Visual Studio 2022 version 17.0 or newer (See Windows section below for details) diff --git a/docs/command_dispatch.md b/docs/command_dispatch.md index eb8ff324f6be1..4b2391afbd241 100644 --- a/docs/command_dispatch.md +++ b/docs/command_dispatch.md @@ -82,4 +82,4 @@ For details on transport internals, including ingress networking, see [this docu [transaction_router_h]: ../src/mongo/s/transaction_router.h [commands_h]: ../src/mongo/db/commands.h [template_method_pattern]: https://en.wikipedia.org/wiki/Template_method_pattern -[transport_internals]: ../src/mongo/transport/README.md \ No newline at end of file +[transport_internals]: ../src/mongo/transport/README.md diff --git a/docs/egress_networking.md b/docs/egress_networking.md index cd5d4c098f41c..65108b479f7b6 100644 --- a/docs/egress_networking.md +++ b/docs/egress_networking.md @@ -38,4 +38,4 @@ document][transport_internals]. [network_interface_h]: ../src/mongo/executor/network_interface.h [dbclient_rs_h]: ../src/mongo/client/dbclient_rs.h [automatic_failover]: https://docs.mongodb.com/manual/replication/#automatic-failover -[transport_internals]: ../src/mongo/transport/README.md \ No newline at end of file +[transport_internals]: ../src/mongo/transport/README.md diff --git a/docs/evergreen-testing/burn_in_tags.md b/docs/evergreen-testing/burn_in_tags.md index 579f432efc43f..9600645a1fcba 100644 --- a/docs/evergreen-testing/burn_in_tags.md +++ b/docs/evergreen-testing/burn_in_tags.md @@ -17,3 +17,11 @@ will be generated, each of which will have a `burn_in_tests` task generated by t [generated task](task_generation.md), may have multiple sub-tasks which run the test suites only for the new or changed javascript tests (note that a javascript test can be included in multiple test suites). Each of those tests will be run 2 times minimum, and 1000 times maximum or for 10 minutes, whichever is reached first. + +## ! Run All Affected JStests +The `! Run All Affected JStests` variant has a single `burn_in_tags_gen` task. This task will create & +activate [`burn_in_tests`](burn_in_tests.md) tasks for all required and suggested +variants. The end result is that any jstests that have been modified in the patch will +run on all required and suggested variants. This should give users a clear signal on +whether their jstests changes have introduced a failure that could potentially lead +to a revert or follow-up bug fix commit. diff --git a/docs/evergreen-testing/multiversion.md b/docs/evergreen-testing/multiversion.md index 3498b696aa6ea..4bda73b7081d0 100644 --- a/docs/evergreen-testing/multiversion.md +++ b/docs/evergreen-testing/multiversion.md @@ -51,9 +51,9 @@ versions. In such context we refer to `last-lts` and `last-continuous` versions version and to `latest` as a `new` version. A `new` version is compiled in the same way as for non-multiversion tasks. The `old` versions of -compiled binaries are downloaded from the old branch projects with the following script: -[evergreen/multiversion_setup.sh](https://github.com/mongodb/mongo/blob/e91cda950e50aa4c707efbdd0be208481493fc96/evergreen/multiversion_setup.sh). -The script searches for the latest available compiled binaries on the old branch projects in +compiled binaries are downloaded from the old branch projects with +[`db-contrib-tool`](https://github.com/10gen/db-contrib-tool). +`db-contrib-tool` searches for the latest available compiled binaries on the old branch projects in Evergreen. @@ -64,17 +64,19 @@ Multiversion suites can be explicit and implicit. * Explicit - JS tests are aware of the binary versions they are running, e.g. [multiversion.yml](https://github.com/mongodb/mongo/blob/e91cda950e50aa4c707efbdd0be208481493fc96/buildscripts/resmokeconfig/suites/multiversion.yml). The version of binaries is explicitly set in JS tests, -e.g. [jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js](https://github.com/mongodb/mongo/blob/e91cda950e50aa4c707efbdd0be208481493fc96/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js#L33-L42): +e.g. [jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js](https://github.com/mongodb/mongo/blob/397c8da541940b3fbe6257243f97a342fe7e0d3b/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js#L33-L44): ```js const versions = [ - {binVersion: '4.0', featureCompatibilityVersion: '4.0', testCollection: 'four_zero'}, - {binVersion: '4.2', featureCompatibilityVersion: '4.2', testCollection: 'four_two'}, {binVersion: '4.4', featureCompatibilityVersion: '4.4', testCollection: 'four_four'}, {binVersion: '5.0', featureCompatibilityVersion: '5.0', testCollection: 'five_zero'}, {binVersion: '6.0', featureCompatibilityVersion: '6.0', testCollection: 'six_zero'}, - {binVersion: 'last-lts', testCollection: 'last_lts'}, - {binVersion: 'last-continuous', testCollection: 'last_continuous'}, + {binVersion: 'last-lts', featureCompatibilityVersion: lastLTSFCV, testCollection: 'last_lts'}, + { + binVersion: 'last-continuous', + featureCompatibilityVersion: lastContinuousFCV, + testCollection: 'last_continuous' + }, {binVersion: 'latest', featureCompatibilityVersion: latestFCV, testCollection: 'latest'}, ]; ``` @@ -151,7 +153,7 @@ of shell fixture configuration override: In implicit multiversion suites the same set of tests may run in similar suites that are using various mixed version combinations. Those version combinations depend on the type of resmoke -fixture the suite is running with: +fixture the suite is running with. These are the recommended version combinations to test against based on the suite fixtures: * Replica set fixture combinations: * `last-lts new-new-old` (i.e. suite runs the replica set fixture that spins up the `latest` and @@ -162,6 +164,13 @@ the `last-lts` versions in a 3-node replica set where the 1st node is the `lates * `last-continuous new-new-old` * `last-continuous new-old-new` * `last-continuous old-new-new` + * Ex: [change_streams](https://github.com/10gen/mongo/blob/88d59bfe9d5ee2c9938ae251f7a77a8bf1250a6b/buildscripts/resmokeconfig/suites/change_streams.yml) uses a [`ReplicaSetFixture`](https://github.com/10gen/mongo/blob/88d59bfe9d5ee2c9938ae251f7a77a8bf1250a6b/buildscripts/resmokeconfig/suites/change_streams.yml#L50) so the corresponding multiversion suites are + * [`change_streams_last_continuous_new_new_old`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_last_continuous_new_new_old.yml) + * [`change_streams_last_continuous_new_old_new`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_last_continuous_new_old_new.yml) + * [`change_streams_last_continuous_old_new_new`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_last_continuous_old_new_new.yml) + * [`change_streams_last_lts_new_new_old`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_last_lts_new_new_old.yml) + * [`change_streams_last_lts_new_old_new`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_last_lts_new_old_new.yml) + * [`change_streams_last_lts_old_new_new`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_last_lts_old_new_new.yml) * Sharded cluster fixture combinations: * `last-lts new-old-old-new` (i.e. suite runs the sharded cluster fixture that spins up the @@ -169,14 +178,21 @@ the `last-lts` versions in a 3-node replica set where the 1st node is the `lates replica sets per shard where the 1st node of the 1st shard is the `latest`, 2nd node of 1st shard - `last-lts`, 1st node of 2nd shard - `last-lts`, 2nd node of 2nd shard - `latest`, etc.) * `last-continuous new-old-old-new` + * Ex: [change_streams_downgrade](https://github.com/10gen/mongo/blob/a96b83b2fa7010a5823fefac2469b4a06a697cf1/buildscripts/resmokeconfig/suites/change_streams_downgrade.yml) uses a [`ShardedClusterFixture`](https://github.com/10gen/mongo/blob/a96b83b2fa7010a5823fefac2469b4a06a697cf1/buildscripts/resmokeconfig/suites/change_streams_downgrade.yml#L408) so the corresponding multiversion suites are + * [`change_streams_downgrade_last_continuous_new_old_old_new`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_downgrade_last_continuous_new_old_old_new.yml) + * [`change_streams_downgrade_last_lts_new_old_old_new`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/change_streams_downgrade_last_lts_new_old_old_new.yml) * Shell fixture combinations: * `last-lts` (i.e. suite runs the shell fixture that spins up `last-lts` as the `old` versions, etc.) * `last-continuous` + * Ex: [initial_sync_fuzzer](https://github.com/10gen/mongo/blob/908625ffdec050a71aa2ce47c35788739f629c60/buildscripts/resmokeconfig/suites/initial_sync_fuzzer.yml) uses a Shell Fixture, so the corresponding multiversion suites are + * [`initial_sync_fuzzer_last_lts`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/initial_sync_fuzzer_last_lts.yml) + * [`initial_sync_fuzzer_last_continuous`](https://github.com/10gen/mongo/blob/612814f4ce56282c47d501817ba28337c26d7aba/buildscripts/resmokeconfig/matrix_suites/mappings/initial_sync_fuzzer_last_continuous.yml) -If `last-lts` and `last-continuous` versions happen to be the same, we skip `last-continuous` and -run multiversion suites with only `last-lts` combinations in Evergreen. + +If `last-lts` and `last-continuous` versions happen to be the same, or last-continuous is EOL, we skip `last-continuous` +and run multiversion suites with only `last-lts` combinations in Evergreen. ## Working with multiversion tasks in Evergreen @@ -212,6 +228,13 @@ below the specified FCV version, e.g. when the `latest` version is `6.2`, `last- tasks that run `latest` with `last-lts`, but will run in multiversion tasks that run `lastest` with `last-continuous`. +In addition to disabling multiversion tests based on FCV, there is no need to run in-development `featureFlagXYZ` tests +(featureFlags that have `default: false`) because these tests will most likely fail on older versions that +have not implemented this feature. For multiversion tasks, we pass the `--runNoFeatureFlagTests` flag to avoid these +failures on `all feature flag` variants. + +For more info on FCV, take a look at [FCV_AND_FEATURE_FLAG_README.md](https://github.com/10gen/mongo/blob/master/src/mongo/db/repl/FCV_AND_FEATURE_FLAG_README.md). + Another common case could be that the changes on master branch are breaking multiversion tests, but with those changes backported to the older branches the multiversion tests should work. In order to temporarily disable the test from running in multiversion it can be added to the diff --git a/docs/exception_architecture.md b/docs/exception_architecture.md index 3e5203f60befb..05c6eb752a1d7 100644 --- a/docs/exception_architecture.md +++ b/docs/exception_architecture.md @@ -19,7 +19,7 @@ __Note__: Calling C function `assert` is not allowed. Use one of the above inste The following types of assertions are deprecated: -- `verify` +- `MONGO_verify` - Checks per-operation invariants. A synonym for massert but doesn't require an error code. Process fatal in debug mode. Do not use for new code; use invariant or fassert instead. - `dassert` @@ -39,7 +39,7 @@ Some assertions will increment an assertion counter. The `serverStatus` command "asserts" section including these counters: - `regular` - - Incremented by `verify`. + - Incremented by `MONGO_verify`. - `warning` - Always 0. Nothing increments this anymore. - `msg` @@ -55,7 +55,7 @@ Some assertions will increment an assertion counter. The `serverStatus` command ## Considerations When per-operation invariant checks fail, the current operation fails, but the process and -connection persist. This means that `massert`, `uassert`, `iassert` and `verify` only +connection persist. This means that `massert`, `uassert`, `iassert` and `MONGO_verify` only terminate the current operation, not the whole process. Be careful not to corrupt process state by mistakenly using these assertions midway through mutating process state. diff --git a/docs/images/shard_merge_diagram.png b/docs/images/shard_merge_diagram.png new file mode 100644 index 0000000000000..0578f7a39499d Binary files /dev/null and b/docs/images/shard_merge_diagram.png differ diff --git a/docs/libfuzzer.md b/docs/libfuzzer.md new file mode 100644 index 0000000000000..3ab836e641d18 --- /dev/null +++ b/docs/libfuzzer.md @@ -0,0 +1,87 @@ +--- +title: LibFuzzer +--- + +LibFuzzer is a tool for performing coverage guided fuzzing of C/C++ +code. LibFuzzer will try to trigger AUBSAN failures in a function you +provide, by repeatedly calling it with a carefully crafted byte array as +input. Each input will be assigned a "score". Byte arrays which exercise +new or more regions of code will score better. LibFuzzer will merge and +mutate high scoring inputs in order to gradually cover more and more +possible behavior. + +# When to use LibFuzzer + +LibFuzzer is great for testing functions which accept a opaque blob of +untrusted user-provided data. + +# How to use LibFuzzer + +LibFuzzer implements `int main`, and expects to be linked with an object +file which provides the function under test. You will achieve this by +writing a cpp file which implements + +``` cpp +extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { + // Your code here +} +``` + +`LLVMFuzzerTestOneInput` will be called repeatedly, with fuzzer +generated bytes in `Data`. `Size` will always truthfully tell your +implementation how many bytes are in `Data`. If your function crashes or +induces an AUBSAN fault, LibFuzzer will consider that to be a finding +worth reporting. + +Keep in mind that your function will often "just" be adapting `Data` to +whatever format our internal C++ functions requires. However, you have a +lot of freedom in exactly what you choose to do. Just make sure your +function crashes or produces an invariant when something interesting +happens! As just a few ideas: + +- You might choose to call multiple implementations of a single + operation, and validate that they produce the same output when + presented the same input. +- You could tease out individual bytes from `Data` and provide them as + different arguments to the function under test. + +Finally, your cpp file will need a SCons target. There is a method which +defines fuzzer targets, much like how we define unittests. For example: + +``` python + env.CppLibfuzzerTest( + target='op_msg_fuzzer', + source=[ + 'op_msg_fuzzer.cpp', + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/base', + 'op_msg_fuzzer_fixture', + ], +) +``` + +# Running LibFuzzer + +Your test's object file and **all** of its dependencies must be compiled +with the "fuzzer" sanitizer, plus a set of sanitizers which might +produce interesting runtime errors like AUBSAN. Evergreen has a build +variant, whose name will include the string "FUZZER", which will compile +and run all of the fuzzer tests. + +The fuzzers can be built locally, for development and debugging. Check +our Evergreen configuration for the current SCons arguments. + +LibFuzzer binaries will accept a path to a directory containing its +"corpus". A corpus is a list of examples known to produce interesting +outputs. LibFuzzer will start producing interesting results more quickly +if starts off with a set of inputs which it can begin mutating. When its +done, it will write down any new inputs it discovered into its corpus. +Re-using a corpus across executions is a good way to make LibFuzzer +return more results in less time. Our Evergreen tasks will try to +acquire and re-use a corpus from an earlier commit, if it can. + +# References + +- [LibFuzzer's official + documentation](https://llvm.org/docs/LibFuzzer.html) diff --git a/docs/linting.md b/docs/linting.md index a499505442d44..6149808671dfb 100644 --- a/docs/linting.md +++ b/docs/linting.md @@ -29,12 +29,12 @@ assertion codes are distinct. You can see the usage by running the following com Ex: `buildscripts/errorcodes.py` -### `quickcpplint.py` -The `buildscripts/quickcpplint.py` script runs a simple MongoDB C++ linter. You can see the usage -by running the following command: `buildscripts/quickcpplint.py --help`. You can take a look at -`buildscripts/linter/simplecpplint.py` to better understand the rules for this linter. +### `quickmongolint.py` +The `buildscripts/quickmongolint.py` script runs a simple MongoDB C++ linter. You can see the usage +by running the following command: `buildscripts/quickmongolint.py --help`. You can take a look at +`buildscripts/linter/mongolint.py` to better understand the rules for this linter. -Ex: `buildscripts/quickcpplint.py lint` +Ex: `buildscripts/quickmongolint.py lint` ## Javascript Linters The `buildscripts/eslint.py` wrapper script runs the `eslint` javascript linter. You can see the @@ -87,11 +87,11 @@ Here are some examples: | SCons Target | Linter(s) | Example | | --- | --- | --- | -| `lint` | `clang-format` `errorcodes.py` `quickcpplint.py` `eslint` `pylint` `mypy` `pydocstyle` `yapf` | `buildscripts/scons.py lint` | +| `lint` | `clang-format` `errorcodes.py` `quickmongolint.py` `eslint` `pylint` `mypy` `pydocstyle` `yapf` | `buildscripts/scons.py lint` | | `lint-fast` | `clang-format` `errorcodes.py` `eslint` `pylint` `mypy` `pydocstyle` `yapf` | `buildscripts/scons.py lint-fast` | | `lint-clang-format` | `clang-format` | `buildscripts/scons.py lint-clang-format` | | `lint-errorcodes` | `errorcodes.py` | `buildscripts/scons.py lint-errorcodes` | -| `lint-lint.py` | `quickcpplint.py` | `buildscripts/scons.py lint-lint.py` | +| `lint-lint.py` | `quickmongolint.py` | `buildscripts/scons.py lint-lint.py` | | `lint-eslint` | `eslint` | `buildscripts/scons.py lint-eslint` | | `lint-pylinters` | `pylint` `mypy` `pydocstyle` `yapf` | `buildscripts/scons.py lint-pylinters` | | `lint-sconslinters` | `yapf` | `buildscripts/scons.py lint-sconslinters` | diff --git a/docs/primary_only_service.md b/docs/primary_only_service.md index 415d9da51006d..269af5671c0a6 100644 --- a/docs/primary_only_service.md +++ b/docs/primary_only_service.md @@ -103,4 +103,4 @@ responsible for deleting its state document, such logic needs to be careful as t document is deleted, the corresponding PrimaryOnlyService is no longer keeping that Instance alive. If an Instance has any additional logic or internal state to update after deleting its state document, it must extend its own lifetime by capturing a shared_ptr to itself by calling -shared_from_this() before deleting its state document. \ No newline at end of file +shared_from_this() before deleting its state document. diff --git a/docs/testing/fsm_concurrency_testing_framework.md b/docs/testing/fsm_concurrency_testing_framework.md index c8dd4b0d29007..3ff2ce8f5e0f6 100644 --- a/docs/testing/fsm_concurrency_testing_framework.md +++ b/docs/testing/fsm_concurrency_testing_framework.md @@ -116,29 +116,29 @@ $config = (function() { function getRand() { return Random.randInt(10); } - + function init(db, collName) { this.start = getRand() * this.tid; } - + function scanGT(db, collName) { db[collName].find({ _id: { $gt: this.start } }).itcount(); } - + function scanLTE(db, collName) { db[collName].find({ _id: { $lte: this.start } }).itcount(); } - - + + return { init: init, scanGT: scanGT, scanLTE: scanLTE }; })(); - + /* ... */ - + return { /* ... */ states: states, @@ -204,7 +204,7 @@ $config = (function() { printjson(db.serverCmdLineOpts()); }); } - + function teardown(db, collName, cluster) { cluster.executeOnMongodNodes(function(db) { db.adminCommand({ setParameter: 1, internalQueryExecYieldIterations: 128 }); @@ -288,7 +288,7 @@ engine, and work as you would expect. One thing to note is that before calling either isMMAPv1 or isWiredTiger, first verify isMongod. When special casing functionality for sharded environments or storage engines, try to special case a test for the exceptionality while still leaving in place assertions for either -case. +case. #### indexed_noindex.js @@ -300,12 +300,11 @@ workload you are extending has a function in its data object called "getIndexSpec" that returns the spec for the index to be removed. ```javascript - -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex -load('jstests/concurrency/fsm_workloads/workload_with_index.js'); //for $config - -$config = extendWorkload($config, indexedNoIndex); +import {$config as $baseConfig} from 'jstests/concurrency/fsm_workloads/workload_with_index.js'; + +export const $config = extendWorkload($baseConfig, indexedNoIndex); ``` #### drop_utils.js @@ -336,7 +335,7 @@ will always correspond to the mongod the mongo shell initially connected to. Serial is the simplest of all three modes and basically works as explained above. Setup is run single threaded, data is copied into multiple threads where the states are executed, and once all the threads have finished a teardown -function is run and the runner moves onto the next workload. +function is run and the runner moves onto the next workload. ![fsm_serial_example.png](../images/testing/fsm_serial_example.png) @@ -393,7 +392,7 @@ runWorkloads functions, the third argument, can contain the following options Runs all workloads serially. For each workload, `$config.threadCount` threads are spawned and each thread runs for exactly `$config.iterations` steps starting at `$config.startState` and transitioning to other states based on the -transition probabilities defined in $config.transitions. +transition probabilities defined in $config.transitions. #### fsm_all_composed.js @@ -408,7 +407,7 @@ composition of workloads. By default, each workload in each subset is run between 2 and 3 times. The number of threads used during composition equals the sum of the `$config.threadCount` values for each workload in each subset. -#### fsm_all_simultaneous.js +#### fsm_all_simultaneous.js options: numSubsets, subsetSize @@ -533,4 +532,4 @@ and OwnColl assertions. fewer than 20% of the threads fail while spawning we allow the non-failed threads to continue with the test. The 20% threshold is somewhat arbitrary; the goal is to abort if "mostly all" of the threads failed but to tolerate "a - few" threads failing. \ No newline at end of file + few" threads failing. diff --git a/etc/backports_required_for_multiversion_tests.yml b/etc/backports_required_for_multiversion_tests.yml index e0cb1e6b7b49e..58e005f2d58e4 100644 --- a/etc/backports_required_for_multiversion_tests.yml +++ b/etc/backports_required_for_multiversion_tests.yml @@ -4,19 +4,20 @@ # # Usage: # Add the server ticket number and the path to the test file for the test you intend to denylist -# under the appropriate suite. Any test in a (ticket, test_file) pair that appears in this file but +# under the appropriate multiversion branch. Any test in a (ticket, test_file) pair that appears in this file but # not in the last-lts or last-continuous branch version of this file indicates that a commit has # not yet been backported to the last-lts or last-continuous branch and will be excluded from the # multiversion suite corresponding to the root level suite key. # -# Example: To prevent 'my_test_file.js' from running in the 'replica_sets_multiversion' suite with the last-continuous binary -# replica_sets_multiversion: -# - ticket: SERVER-1000 -# test_file: jstests/core/my_test_file.js +# Example: To prevent 'my_test_file.js' from running with the last-continuous binary +# last-continuous: +# all: +# - test_file: jstests/core/my_test_file.js +# ticket: SERVER-1000 # # The above example will denylist jstests/core/my_test_file.js from the -# 'replica_sets_multiversion_gen' task until this file has been updated with the same -# (ticket, test_file) pair on the last-lts branch. +# last-continuous branch until this file has been updated with the same +# (ticket, test_file) pair on the last-continuous branch. # last-continuous: all: @@ -200,6 +201,8 @@ last-continuous: ticket: SERVER-65022 - test_file: jstests/sharding/database_versioning_all_commands.js ticket: SERVER-65101 + - test_file: jstests/sharding/database_versioning_all_commands.js + ticket: SERVER-75911 - test_file: jstests/sharding/sessions_collection_auto_healing.js ticket: SERVER-65188 - test_file: jstests/replsets/sessions_collection_auto_healing.js @@ -265,7 +268,7 @@ last-continuous: - test_file: jstests/sharding/collection_uuid_shard_capped_collection.js ticket: SERVER-67885 - test_file: jstests/sharding/prepare_transaction_then_migrate.js - ticket: SERVER-68361 + ticket: SERVER-71219 - test_file: jstests/core/txns/txn_ops_allowed_on_buckets_coll.js ticket: SERVER-68556 - test_file: jstests/core/txns/no_writes_to_system_collections_in_txn.js @@ -328,8 +331,6 @@ last-continuous: ticket: SERVER-72224 - test_file: jstests/sharding/internal_txns/incomplete_transaction_history_during_migration.js ticket: SERVER-73938 - - test_file: jstests/sharding/shard_keys_with_dollar_sign.js - ticket: SERVER-74124 - test_file: jstests/core/query/partial_index_logical.js ticket: SERVER-68434 - test_file: jstests/core/timeseries/timeseries_collmod.js @@ -344,6 +345,8 @@ last-continuous: ticket: SERVER-67105 - test_file: jstests/core/clustered/clustered_collection_bounded_scan.js ticket: SERVER-67105 + - test_file: jstests/noPassthrough/clustered_collection_sorted_scan.js + ticket: SERVER-76102 - test_file: src/mongo/db/modules/enterprise/jstests/fle2/collection_coll_stats.js ticket: SERVER-74461 - test_file: src/mongo/db/modules/enterprise/jstests/fle2/top_command.js @@ -362,9 +365,61 @@ last-continuous: ticket: SERVER-75517 - test_file: jstests/replsets/startup_recovery_for_restore_needs_rollback.js ticket: SERVER-67180 + - test_file: jstests/sharding/analyze_shard_key/cardinality_and_frequency_basic.js + ticket: SERVER-75886 + - test_file: jstests/core/clustered/clustered_collection_hint.js + ticket: SERVER-73482 + - test_file: jstests/core/command_let_variables.js + ticket: SERVER-75356 + - test_file: jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js + ticket: SERVER-76311 + - test_file: jstests/sharding/refine_collection_shard_key_basic.js + ticket: SERVER-76394 + - test_file: jstests/sharding/shard_drain_works_with_chunks_of_any_size.js + ticket: SERVER-76550 + - test_file: jstests/sharding/analyze_shard_key/monotonicity_hashed_sharding_compound.js + ticket: SERVER-76719 + - test_file: jstests/sharding/ddl_commits_with_two_phase_oplog_notification.js + ticket: SERVER-76908 + - test_file: jstests/sharding/shard_keys_with_dollar_sign.js + ticket: SERVER-76948 + - test_file: jstests/sharding/merge_let_params_size_estimation.js + ticket: SERVER-74806 + - test_file: jstests/sharding/resharding_with_multi_deletes_reduced_ticket_pool_size.js + ticket: SERVER-77097 + - test_file: jstests/sharding/move_primary_donor_cleaned_up_if_coordinator_steps_up_aborted.js + ticket: SERVER-76872 + - test_file: jstests/sharding/resharding_update_tag_zones_large.js + ticket: SERVER-76988 + - test_file: jstests/sharding/analyze_shard_key/configure_query_analyzer_persistence.js + ticket: SERVER-77247 + - test_file: jstests/core/timeseries/timeseries_create_collection.js + ticket: SERVER-77382 + - test_file: jstests/replsets/tenant_migration_retryable_internal_transaction.js + ticket: SERVER-77237 + - test_file: jstests/sharding/cluster_time_across_add_shard.js + ticket: SERVER-60466 + - test_file: jstests/sharding/move_chunk_deferred_lookup.js + ticket: SERVER-78050 + - test_file: jstests/replsets/tenant_migrations_back_to_back_2.js + ticket: SERVER-78176 + - test_file: jstests/sharding/transfer_mods_large_batches.js + ticket: SERVER-78414 + - test_file: jstests/core/query/elemmatch/elemmatch_or_pushdown_paths.js + ticket: SERVER-74954 + - test_file: jstests/core/find_with_resume_after_param.js + ticket: SERVER-77386 + - test_file: jstests/core/timeseries/timeseries_resume_after.js + ticket: SERVER-77386 + - test_file: jstests/sharding/analyze_shard_key/timeseries.js + ticket: SERVER-78595 + - test_file: jstests/replsets/config_txns_reaping_interrupt.js + ticket: SERVER-78187 suites: null last-lts: all: + - test_file: jstests/sharding/database_versioning_all_commands.js + ticket: SERVER-75911 - test_file: jstests/core/query/null_query_semantics.js ticket: SERVER-21929 - test_file: jstests/core/query/or/or_to_in.js @@ -434,7 +489,7 @@ last-lts: - test_file: jstests/core/txns/errors_on_committed_transaction.js ticket: SERVER-52547 - test_file: jstests/sharding/prepare_transaction_then_migrate.js - ticket: SERVER-52906 + ticket: SERVER-71219 - test_file: jstests/sharding/migration_waits_for_majority_commit.js ticket: SERVER-52906 - test_file: jstests/sharding/migration_ignore_interrupts_1.js @@ -703,8 +758,6 @@ last-lts: ticket: SERVER-67723 - test_file: jstests/sharding/collection_uuid_shard_capped_collection.js ticket: SERVER-67885 - - test_file: jstests/sharding/prepare_transaction_then_migrate.js - ticket: SERVER-68361 - test_file: jstests/core/txns/txn_ops_allowed_on_buckets_coll.js ticket: SERVER-68556 - test_file: jstests/core/txns/no_writes_to_system_collections_in_txn.js @@ -765,8 +818,6 @@ last-lts: ticket: SERVER-72224 - test_file: jstests/sharding/internal_txns/incomplete_transaction_history_during_migration.js ticket: SERVER-73938 - - test_file: jstests/sharding/shard_keys_with_dollar_sign.js - ticket: SERVER-74124 - test_file: jstests/core/timeseries/timeseries_filter_extended_range.js ticket: SERVER-69952 - test_file: jstests/replsets/log_ddl_ops.js @@ -775,6 +826,8 @@ last-lts: ticket: SERVER-67105 - test_file: jstests/core/clustered/clustered_collection_bounded_scan.js ticket: SERVER-67105 + - test_file: jstests/noPassthrough/clustered_collection_sorted_scan.js + ticket: SERVER-76102 - test_file: src/mongo/db/modules/enterprise/jstests/fle2/collection_coll_stats.js ticket: SERVER-74461 - test_file: src/mongo/db/modules/enterprise/jstests/fle2/top_command.js @@ -791,4 +844,78 @@ last-lts: ticket: SERVER-75517 - test_file: jstests/replsets/startup_recovery_for_restore_needs_rollback.js ticket: SERVER-67180 + - test_file: jstests/replsets/crud_ops_do_not_throw_locktimeout_on_ticket_exhaustion.js + ticket: SERVER-76012 + - test_file: jstests/replsets/transactions_committed_with_tickets_exhausted.js + ticket: SERVER-76012 + - test_file: jstests/replsets/transactions_reaped_with_tickets_exhausted.js + ticket: SERVER-76012 + - test_file: jstests/sharding/cancel_coordinate_txn_commit_with_tickets_exhausted.js + ticket: SERVER-76012 + - test_file: jstests/sharding/coordinate_txn_commit_with_tickets_exhausted.js + ticket: SERVER-76012 + - test_file: jstests/sharding/analyze_shard_key/cardinality_and_frequency_basic.js + ticket: SERVER-75886 + - test_file: jstests/core/clustered/clustered_collection_hint.js + ticket: SERVER-73482 + - test_file: jstests/core/command_let_variables.js + ticket: SERVER-75356 + - test_file: jstests/sharding/invalid_shard_identity_doc.js + ticket: SERVER-76310 + - test_file: jstests/sharding/auth_catalog_shard_localhost_exception.js + ticket: SERVER-76310 + - test_file: jstests/sharding/catalog_shard_mongos_ops_on_config_and_admin.js + ticket: SERVER-76310 + - test_file: jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js + ticket: SERVER-76311 + - test_file: jstests/sharding/refine_collection_shard_key_basic.js + ticket: SERVER-76394 + - test_file: jstests/sharding/shard_drain_works_with_chunks_of_any_size.js + ticket: SERVER-76550 + - test_file: jstests/sharding/analyze_shard_key/monotonicity_hashed_sharding_compound.js + ticket: SERVER-76719 + - test_file: jstests/sharding/implicit_create_collection_triggered_by_DDLs.js + ticket: SERVER-76489 + - test_file: jstests/sharding/ddl_commits_with_two_phase_oplog_notification.js + ticket: SERVER-76908 + - test_file: jstests/sharding/shard_keys_with_dollar_sign.js + ticket: SERVER-76948 + - test_file: jstests/sharding/merge_let_params_size_estimation.js + ticket: SERVER-74806 + - test_file: jstests/sharding/resharding_with_multi_deletes_reduced_ticket_pool_size.js + ticket: SERVER-77097 + - test_file: jstests/sharding/move_primary_donor_cleaned_up_if_coordinator_steps_up_aborted.js + ticket: SERVER-76872 + - test_file: jstests/sharding/resharding_update_tag_zones_large.js + ticket: SERVER-76988 + - test_file: jstests/sharding/analyze_shard_key/configure_query_analyzer_persistence.js + ticket: SERVER-77247 + - test_file: jstests/core/timeseries/timeseries_create_collection.js + ticket: SERVER-77382 + - test_file: jstests/replsets/tenant_migration_retryable_internal_transaction.js + ticket: SERVER-77237 + - test_file: jstests/sharding/cluster_time_across_add_shard.js + ticket: SERVER-60466 + - test_file: jstests/sharding/move_chunk_deferred_lookup.js + ticket: SERVER-78050 + - test_file: jstests/core/find_with_resume_after_param.js + ticket: SERVER-77386 + - test_file: jstests/core/timeseries/timeseries_resume_after.js + ticket: SERVER-77386 + - test_file: jstests/replsets/tenant_migrations_back_to_back_2.js + ticket: SERVER-78176 + - test_file: jstests/core/index/wildcard/compound_wildcard_index_or.js + ticket: SERVER-78307 + - test_file: jstests/core/index/wildcard/compound_wildcard_index_unbounded.js + ticket: SERVER-78307 + - test_file: jstests/core/index/wildcard/wildcard_index_basic_index_bounds.js + ticket: SERVER-78307 + - test_file: jstests/sharding/transfer_mods_large_batches.js + ticket: SERVER-78414 + - test_file: jstests/core/query/elemmatch/elemmatch_or_pushdown_paths.js + ticket: SERVER-74954 + - test_file: jstests/sharding/analyze_shard_key/timeseries.js + ticket: SERVER-78595 + - test_file: jstests/replsets/config_txns_reaping_interrupt.js + ticket: SERVER-78187 suites: null diff --git a/etc/evergreen.yml b/etc/evergreen.yml index 28313f81151b3..45c69e99f8f6b 100644 --- a/etc/evergreen.yml +++ b/etc/evergreen.yml @@ -53,27 +53,22 @@ # - func: "set up venv" include: -- filename: etc/evergreen_yml_components/project_and_distro_settings.yml - filename: etc/evergreen_yml_components/definitions.yml - filename: etc/evergreen_yml_components/variants/task_generation.yml - filename: etc/evergreen_yml_components/variants/sanitizer.yml - filename: etc/evergreen_yml_components/variants/in_memory.yml - filename: etc/evergreen_yml_components/variants/ninja.yml - filename: etc/evergreen_yml_components/variants/compile_static_analysis.yml +- filename: etc/evergreen_yml_components/variants/config_shard.yml variables: -- &libfuzzertests - name: libfuzzertests! - execution_tasks: - - compile_and_archive_libfuzzertests - - fetch_and_run_libfuzzertests - # Common compile variant dependency specifications. +# THIS WAS COPIED TO config_shard.yml - ANY MODIFICATIONS HERE SHOULD ALSO BE MADE IN THAT FILE. - &linux_x86_dynamic_compile_variant_dependency depends_on: - name: archive_dist_test_debug - variant: &linux_x86_dynamic_compile_variant_name linux-x86-dynamic-compile-required + variant: &linux_x86_dynamic_compile_variant_name linux-x86-dynamic-compile - name: version_gen variant: generate-tasks-for-version # This is added because of EVG-18211. @@ -89,8 +84,6 @@ variables: # This is added because of EVG-18211. # Without this we are adding extra dependencies on evergreen and it is causing strain omit_generated_tasks: true - - name: version_burn_in_gen - variant: generate-tasks-for-version - &linux_x86_dynamic_debug_compile_variant_dependency depends_on: @@ -121,9 +114,8 @@ variables: # This is added because of EVG-18211. # Without this we are adding extra dependencies on evergreen and it is causing strain omit_generated_tasks: true - - name: version_burn_in_gen - variant: generate-tasks-for-version +# THIS WAS COPIED TO config_shard.yml - ANY MODIFICATIONS HERE SHOULD ALSO BE MADE IN THAT FILE. - &linux_x86_generic_expansions multiversion_platform: rhel80 multiversion_edition: enterprise @@ -145,7 +137,7 @@ variables: # If you add anything to san_options, make sure the appropriate changes are # also made to SConstruct. -# and also to the san_options in compile_static_analysis.yml +# and also to the san_options in compile_static_analysis.yml and sanitizer.yml - aubsan_options: &aubsan_options >- UBSAN_OPTIONS="print_stacktrace=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer" @@ -190,6 +182,11 @@ variables: distros: - windows-vsCurrent-large - name: burn_in_tests_gen + depends_on: + - name: version_burn_in_gen + variant: generate-tasks-for-version + omit_generated_tasks: true + - name: archive_dist_test_debug - name: .aggfuzzer .common !.feature_flag_guarded - name: audit - name: auth_audit_gen @@ -203,7 +200,6 @@ variables: - windows-2016-dc - name: .jscore .common !.sharding - name: .jstestfuzz .common - - name: .logical_session_cache - name: replica_sets_auth_gen - name: sasl - name: sharding_auth_audit_gen @@ -263,7 +259,6 @@ variables: - name: jsCore_txns_large_txns_format - name: .jstestfuzz .common - name: libunwind_tests - - name: .logical_session_cache .one_sec - name: .ocsp - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough @@ -280,8 +275,8 @@ variables: - name: test_packages distros: - ubuntu2004-package - # TODO: BF-24515 re-enable when build failure cause determined and resolved - # - name: selinux_rhel7_enterprise + - name: vector_search + - name: selinux_rhel7_enterprise - name: generate_buildid_to_debug_symbols_mapping @@ -390,120 +385,6 @@ buildvariants: tasks: - name: tla_plus -- &enterprise-rhel80-dynamic-gcc-debug-experimental-template - name: &enterprise-rhel80-dynamic-gcc-cxx20-debug-experimental enterprise-rhel80-dynamic-gcc-cxx20-debug-experimental - display_name: "~ Shared Library Enterprise RHEL 8.0 Toolchain GCC C++20 DEBUG" - cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. - modules: - - enterprise - run_on: - - rhel80-small - expansions: &enterprise-rhel80-dynamic-gcc-debug-experimental-expansions - additional_package_targets: >- - archive-mongocryptd - archive-mongocryptd-debug - archive-mh - archive-mh-debug - compile_flags: >- - --dbg=on - --opt=on - -j$(grep -c ^processor /proc/cpuinfo) - --link-model=dynamic - --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - --variables-files=etc/scons/scons_experimental_scheduler.vars - --cxx-std=20 - has_packages: false - scons_cache_scope: shared - scons_cache_mode: all - large_distro_name: rhel80-medium - num_scons_link_jobs_available: 0.99 - compile_variant: *enterprise-rhel80-dynamic-gcc-cxx20-debug-experimental - tasks: &enterprise-rhel80-dynamic-gcc-debug-experimental-tasks - - name: compile_test_and_package_parallel_core_stream_TG - distros: - - rhel80-xlarge - - name: compile_test_and_package_parallel_unittest_stream_TG - distros: - - rhel80-xlarge - - name: compile_test_and_package_parallel_dbtest_stream_TG - distros: - - rhel80-xlarge - - name: compile_integration_and_test_parallel_stream_TG - distros: - - rhel80-large - - name: test_api_version_compatibility - - name: .aggfuzzer !.multiversion !.feature_flag_guarded - - name: .aggregation !.multiversion !.feature_flag_guarded - - name: audit - - name: .auth !.multiversion - - name: .causally_consistent !.sharding - - name: .change_streams !.multiversion - - name: .misc_js !.multiversion - - name: .concurrency !.large !.ubsan !.no_txns !.debug_only !.multiversion - - name: .concurrency .large !.ubsan !.no_txns !.debug_only !.multiversion - distros: - - rhel80-xlarge - - name: disk_wiredtiger - - name: .encrypt !.multiversion - - name: idl_tests - - name: jsCore - distros: - - rhel80-xlarge - - name: .jscore .common !jsCore !.multiversion - - name: jsCore_min_batch_repeat_queries_ese_gsm - - name: jsCore_txns_large_txns_format - - name: json_schema - - name: libunwind_tests - - name: mqlrun - - name: .multi_shard !.multiversion - - name: multi_stmt_txn_jscore_passthrough_with_migration_gen - - name: .read_write_concern .large !.multiversion - distros: - - rhel80-xlarge - - name: .read_write_concern !.large !.multiversion - - name: .replica_sets !.encrypt !.auth !.multiversion - distros: - - rhel80-xlarge - - name: replica_sets_api_version_jscore_passthrough_gen - - name: replica_sets_reconfig_jscore_passthrough_gen - - name: retryable_writes_jscore_passthrough_gen - - name: .read_only !.multiversion - - name: sasl - - name: search - - name: search_auth - - name: search_pinned_connections_auth - - name: search_ssl - - name: session_jscore_passthrough - - name: .sharding .jscore !.wo_snapshot !.multi_stmt !.multiversion - - name: sharding_api_version_jscore_passthrough_gen - - name: .sharding .txns !.multiversion - - name: .sharding .common !.multiversion !.jstestfuzz - - name: .stitch - - name: secondary_reads_passthrough_gen - - name: server_discovery_and_monitoring_json_test_TG - - name: .serverless !.multiversion - distros: - - rhel80-xlarge - - name: server_selection_json_test_TG - distros: - - rhel80-xlarge - - name: generate_buildid_to_debug_symbols_mapping - -- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-template - name: &enterprise-rhel80-dynamic-clang-cxx20-debug-experimental enterprise-rhel80-dynamic-clang-cxx20-debug-experimental - display_name: "~ Shared Library Enterprise RHEL 8.0 Toolchain Clang C++20 DEBUG" - expansions: - <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-expansions - compile_flags: >- - --dbg=on - --opt=on - -j$(grep -c ^processor /proc/cpuinfo) - --link-model=dynamic - --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars - --variables-files=etc/scons/scons_experimental_scheduler.vars - --cxx-std=20 - compile_variant: *enterprise-rhel80-dynamic-clang-cxx20-debug-experimental - - name: &enterprise-rhel-80-64-bit-coverage enterprise-rhel-80-64-bit-coverage display_name: "~ Enterprise RHEL 8.0 DEBUG Code Coverage" modules: @@ -603,140 +484,6 @@ buildvariants: tasks: *enterprise-rhel-80-64-bit-coverage-tasks -- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-template - name: enterprise-rhel80-dynamic-gcc-cxx20-debug-pm-1328-experimental - display_name: "~ Shared Library Enterprise RHEL 8.0 GCC C++20 DEBUG + PM-1328" - cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter - expansions: - <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-expansions - compile_flags: >- - --dbg=on - --opt=on - -j$(grep -c ^processor /proc/cpuinfo) - --link-model=dynamic - --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - --cxx-std=20 - --experimental-optimization=* - --experimental-runtime-hardening=* - --disable-warnings-as-errors - -- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-template - name: &enterprise-rhel80-dynamic-clang-cxx20-debug-pm-1328-experimental enterprise-rhel80-dynamic-clang-cxx20-debug-pm-1328-experimental - display_name: "~ Shared Library Enterprise RHEL 8.0 Clang C++20 DEBUG + PM-1328" - cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter - expansions: - <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-expansions - compile_flags: >- - --dbg=on - --opt=on - -j$(grep -c ^processor /proc/cpuinfo) - --link-model=dynamic - --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars - --cxx-std=20 - --experimental-optimization=* - --experimental-runtime-hardening=* - compile_variant: *enterprise-rhel80-dynamic-clang-cxx20-debug-pm-1328-experimental - -- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-template - name: &enterprise-rhel80-gcc-cxx20-pm-1328-experimental enterprise-rhel80-gcc-cxx20-pm-1328-experimental - display_name: "~ Enterprise RHEL 8.0 Toolchain GCC C++20 + PM-1328" - cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter - expansions: - <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-expansions - compile_flags: >- - --opt=on - -j$(grep -c ^processor /proc/cpuinfo) - --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - --cxx-std=20 - --experimental-optimization=* - --experimental-runtime-hardening=* - --disable-warnings-as-errors - compile_variant: *enterprise-rhel80-gcc-cxx20-pm-1328-experimental - tasks: &enterprise-rhel80-dynamic-gcc-debug-experimental-tasks-no-unittests - - name: compile_test_and_package_parallel_core_stream_TG - distros: - - rhel80-xlarge - - name: compile_test_and_package_parallel_dbtest_stream_TG - distros: - - rhel80-xlarge - - name: compile_integration_and_test_parallel_stream_TG - distros: - - rhel80-large - - name: test_api_version_compatibility - - name: .aggfuzzer !.multiversion !.feature_flag_guarded - - name: .aggregation !.multiversion !.feature_flag_guarded - - name: audit - - name: .auth !.multiversion - - name: .causally_consistent !.sharding - - name: .change_streams !.multiversion - - name: .misc_js !.multiversion - - name: .concurrency !.large !.ubsan !.no_txns !.debug_only !.multiversion - - name: .concurrency .large !.ubsan !.no_txns !.debug_only !.multiversion - distros: - - rhel80-xlarge - - name: disk_wiredtiger - - name: .encrypt !.multiversion - - name: idl_tests - - name: jsCore - distros: - - rhel80-xlarge - - name: .jscore .common !jsCore !.multiversion - - name: jsCore_min_batch_repeat_queries_ese_gsm - - name: jsCore_txns_large_txns_format - - name: json_schema - - name: libunwind_tests - - name: mqlrun - - name: .multi_shard !.multiversion - - name: multi_stmt_txn_jscore_passthrough_with_migration_gen - - name: .read_write_concern .large !.multiversion - distros: - - rhel80-xlarge - - name: .read_write_concern !.large !.multiversion - - name: .replica_sets !.encrypt !.auth !.multiversion - distros: - - rhel80-xlarge - - name: replica_sets_api_version_jscore_passthrough_gen - - name: replica_sets_reconfig_jscore_passthrough_gen - - name: retryable_writes_jscore_passthrough_gen - - name: .read_only !.multiversion - - name: sasl - - name: search - - name: search_auth - - name: search_pinned_connections_auth - - name: search_ssl - - name: session_jscore_passthrough - - name: .sharding .jscore !.wo_snapshot !.multi_stmt !.multiversion - - name: sharding_api_version_jscore_passthrough_gen - - name: .sharding .txns !.multiversion - - name: .sharding .common !.multiversion - - name: .stitch - - name: secondary_reads_passthrough_gen - - name: server_discovery_and_monitoring_json_test_TG - - name: .serverless !.multiversion - distros: - - rhel80-xlarge - - name: server_selection_json_test_TG - distros: - - rhel80-xlarge - - name: generate_buildid_to_debug_symbols_mapping - - -- <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-template - name: &enterprise-rhel80-clang-cxx20-pm-1328-experimental enterprise-rhel80-clang-cxx20-pm-1328-experimental - display_name: "~ Enterprise RHEL 8.0 Toolchain Clang C++20 + PM-1328" - cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter - expansions: - <<: *enterprise-rhel80-dynamic-gcc-debug-experimental-expansions - compile_flags: >- - --opt=on - -j$(grep -c ^processor /proc/cpuinfo) - --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars - --cxx-std=20 - --experimental-optimization=* - --experimental-runtime-hardening=* - compile_variant: *enterprise-rhel80-clang-cxx20-pm-1328-experimental - tasks: *enterprise-rhel80-dynamic-gcc-debug-experimental-tasks-no-unittests - - name: &stm-daily-cron stm-daily-cron modules: - enterprise @@ -765,11 +512,20 @@ buildvariants: tasks: - name: blackduck_scanner -- name: tooling-metrics - display_name: "* Tooling Metrics" +- name: tooling-metrics-x86 + display_name: "* Tooling Metrics x86" + cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. + run_on: + - ubuntu2204-small + stepback: false + tasks: + - name: tooling_metrics_test + +- name: tooling-metrics-arm64 + display_name: "* Tooling Metrics ARM64" cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. run_on: - - ubuntu1804-small + - ubuntu2204-arm64-small stepback: false tasks: - name: tooling_metrics_test @@ -818,12 +574,19 @@ buildvariants: - name: .causally_consistent !.sharding - name: .change_streams - name: .misc_js !.non_win_dbg - - name: .concurrency .debug_only + - name: .concurrency !.ubsan + distros: + - windows-vsCurrent-large + - name: .config_fuzzer !.large !.linux_only + - name: .config_fuzzer .large !.linux_only !.sharded distros: - windows-vsCurrent-large - name: disk_wiredtiger - name: free_monitoring - name: initial_sync_fuzzer_gen + - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen - name: .jscore .common !.auth !.sharding - name: jsCore_txns_large_txns_format - name: json_schema @@ -881,7 +644,6 @@ buildvariants: test_flags: *windows_common_test_excludes external_auth_jobs_max: 1 tasks: - - name: burn_in_tests_gen - name: audit - name: auth_audit_gen - name: causally_consistent_jscore_txns_passthrough @@ -942,12 +704,15 @@ buildvariants: tasks: - name: cqf - name: cqf_disabled_pipeline_opt - - name: cqf_passthrough - name: cqf_parallel - name: query_golden_classic - - name: query_golden_cqf - - name: burn_in_tests_gen # - name: burn_in_tasks_gen + # depends_on: + # - name: version_burn_in_gen + # variant: generate-tasks-for-version + # omit_generated_tasks: true + # - name: archive_dist_test_debug + # variant: *windows_compile_variant_name - name: audit - name: auth_audit_gen - name: buildscripts_test @@ -977,77 +742,10 @@ buildvariants: - name: .sharding .txns - name: sharding_auth_audit_gen - name: sharding_max_mirroring_opportunistic_secondary_targeting_ese_gen - - name: telemetry_passthrough + - name: query_stats_passthrough + - name: query_stats_passthrough_writeonly - name: unittest_shell_hang_analyzer_gen -- name: &enterprise-windows-cxx20-debug-experimental enterprise-windows-cxx20-debug-experimental - display_name: "~ Enterprise Windows C++20 DEBUG" - cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. - modules: - - enterprise - run_on: - - windows-vsCurrent-small - expansions: - exe: ".exe" - compile_variant: *enterprise-windows-cxx20-debug-experimental - additional_package_targets: >- - archive-mongocryptd - archive-mongocryptd-debug - msi - archive-mh - archive-mh-debug - content_type: application/zip - compile_flags: >- - --dbg=on - --opt=on - --ssl - MONGO_DISTMOD=windows - CPPPATH="c:/sasl/include" - LIBPATH="c:/sasl/lib" - -j$(bc <<< "$(grep -c '^processor' /proc/cpuinfo) / 1.5") - --win-version-min=win10 - --cxx-std=20 - num_scons_link_jobs_available: 0.25 - python: '/cygdrive/c/python/python37/python.exe' - ext: zip - scons_cache_scope: shared - multiversion_platform: windows - multiversion_edition: enterprise - jstestfuzz_num_generated_files: 35 - target_resmoke_time: 20 - max_sub_suites: 3 - large_distro_name: windows-vsCurrent-large - test_flags: *windows_common_test_excludes - exec_timeout_secs: 14400 # 3 hour timeout - tasks: - - name: compile_test_and_package_serial_TG - distros: - - windows-vsCurrent-large - - name: .aggfuzzer !.feature_flag_guarded - - name: .aggregation !.auth !.encrypt !.unwind !.feature_flag_guarded - - name: auth_gen - - name: causally_consistent_jscore_txns_passthrough - - name: .misc_js - # Some concurrency workloads require a lot of memory, so we use machines - # with more RAM for these suites. - - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.common !.debug_only - distros: - - windows-vsCurrent-large - - name: .concurrency .common - - name: disk_wiredtiger - - name: .jscore .common !.auth - - name: json_schema - - name: .query_fuzzer - - name: .read_write_concern - - name: replica_sets_gen - - name: replica_sets_jscore_passthrough_gen - - name: .sharding .jscore !.wo_snapshot !.multi_stmt !.multiversion - - name: .sharding .txns !.multiversion - - name: .sharding .common !.csrs !.multiversion !.gcm - - name: .ssl - - name: .stitch - - name: .updatefuzzer !.multiversion - - name: &enterprise-windows-debug-unoptimized enterprise-windows-debug-unoptimized display_name: "Enterprise Windows DEBUG (Unoptimized)" cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. @@ -1128,8 +826,6 @@ buildvariants: - name: replica_sets_max_mirroring_large_txns_format_gen - name: .ssl - name: .stitch - - name: unittest_shell_hang_analyzer_gen - - name: generate_buildid_to_debug_symbols_mapping - name: &enterprise-macos-rosetta-2 enterprise-macos-rosetta-2 display_name: "Enterprise macOS Via Rosetta 2" @@ -1156,24 +852,22 @@ buildvariants: - name: causally_consistent_jscore_txns_passthrough - name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore !.ssl - name: .jscore .common !.decimal !.sharding - - name: .logical_session_cache .one_sec - name: mqlrun # TODO(SERVER-64009): Re-enable replica_sets_auth_gen. # - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough_gen - name: sasl - name: .crypt - - name: generate_buildid_to_debug_symbols_mapping -- name: &enterprise-macos-cxx20 enterprise-macos-cxx20 - display_name: "Enterprise macOS C++20 DEBUG" +- name: &enterprise-macos enterprise-macos + display_name: "Enterprise macOS DEBUG" cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. modules: - enterprise run_on: - macos-1100 expansions: - compile_variant: *enterprise-macos-cxx20 + compile_variant: *enterprise-macos test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_gcm compile_env: DEVELOPER_DIR=/Applications/Xcode13.app compile_flags: >- @@ -1183,7 +877,6 @@ buildvariants: -j$(sysctl -n hw.logicalcpu) --libc++ --variables-files=etc/scons/xcode_macosx.vars - --cxx-std=20 resmoke_jobs_max: 6 num_scons_link_jobs_available: 0.99 tasks: @@ -1191,16 +884,13 @@ buildvariants: - name: audit - name: auth_audit_gen - name: causally_consistent_jscore_txns_passthrough - # TODO: SERVER-66945 Re-enable ESE on enterprise macos - # - name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore !.ssl + - name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore !.ssl - name: .jscore .common !.decimal !.sharding - - name: .logical_session_cache .one_sec - name: mqlrun - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough_gen - name: sasl - name: .crypt - - name: generate_buildid_to_debug_symbols_mapping - name: &enterprise-macos-arm64 enterprise-macos-arm64 display_name: "~ Enterprise macOS arm64" @@ -1226,18 +916,47 @@ buildvariants: - name: causally_consistent_jscore_txns_passthrough - name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore - name: .jscore .common !.decimal !.sharding - - name: .logical_session_cache .one_sec - name: mqlrun - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough - name: sasl - name: .crypt - - name: generate_buildid_to_debug_symbols_mapping ########################################### # Redhat buildvariants # ########################################### +- name: run-all-affected-jstests + display_name: "! Run All Affected JStests" + patch_only: true + run_on: + - rhel80-medium + expansions: + large_distro_name: rhel80-large + burn_in_tag_include_all_required_and_suggested: true + burn_in_tag_exclude_build_variants: >- + macos-debug-suggested + burn_in_tag_include_build_variants: >- + enterprise-rhel-80-64-bit-inmem + enterprise-rhel-80-64-bit-multiversion + burn_in_tag_compile_task_dependency: archive_dist_test_debug + compile_variant: *amazon_linux2_arm64_compile_variant_name + depends_on: + - name: archive_dist_test_debug + variant: *amazon_linux2_arm64_compile_variant_name + - name: version_gen + variant: generate-tasks-for-version + # This is added because of EVG-18211. + # Without this we are adding extra dependencies on evergreen and it is causing strain + omit_generated_tasks: true + - name: version_burn_in_gen + variant: generate-tasks-for-version + # This is added because of EVG-18211. + # Without this we are adding extra dependencies on evergreen and it is causing strain + omit_generated_tasks: true + tasks: + - name: burn_in_tags_gen + - &enterprise-rhel-80-64-bit-dynamic-template <<: *linux_x86_dynamic_compile_variant_dependency name: &enterprise-rhel-80-64-bit-dynamic enterprise-rhel-80-64-bit-dynamic @@ -1247,6 +966,7 @@ buildvariants: - enterprise run_on: - rhel80-small + # THIS WAS COPIED TO config_shard.yml - ANY MODIFICATIONS HERE SHOULD ALSO BE MADE IN THAT FILE. expansions: &enterprise-rhel-80-64-bit-dynamic-expansions <<: *linux_x86_generic_expansions scons_cache_scope: shared @@ -1259,10 +979,6 @@ buildvariants: idle_timeout_factor: 1.5 exec_timeout_factor: 1.5 large_distro_name: rhel80-medium - burn_in_tag_buildvariants: >- - enterprise-rhel-80-64-bit-inmem - enterprise-rhel-80-64-bit-multiversion - burn_in_tag_compile_task_dependency: archive_dist_test_debug depends_on: - name: archive_dist_test_debug variant: *linux_x86_dynamic_compile_variant_name @@ -1271,10 +987,7 @@ buildvariants: # This is added because of EVG-18211. # Without this we are adding extra dependencies on evergreen and it is causing strain omit_generated_tasks: true - - name: version_burn_in_gen - variant: generate-tasks-for-version tasks: - - name: burn_in_tests_gen - name: .aggfuzzer !.feature_flag_guarded - name: .aggregation !.feature_flag_guarded - name: aggregation_repeat_queries @@ -1286,8 +999,8 @@ buildvariants: - name: .change_streams - name: .change_stream_fuzzer - name: .misc_js - - name: .concurrency !.large !.ubsan !.no_txns !.debug_only - - name: .concurrency .large !.ubsan !.no_txns !.debug_only + - name: .concurrency !.large !.ubsan !.no_txns + - name: .concurrency .large !.ubsan !.no_txns distros: - rhel80-medium - name: .config_fuzzer !.large @@ -1298,6 +1011,9 @@ buildvariants: - name: .encrypt - name: idl_tests - name: initial_sync_fuzzer_gen + - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen - name: jsCore distros: - rhel80-xlarge @@ -1340,6 +1056,7 @@ buildvariants: - name: search_pinned_connections_auth - name: search_ssl - name: session_jscore_passthrough + - name: sharded_collections_single_writes_without_shard_key_jscore_passthrough_gen - name: .sharding .jscore !.wo_snapshot !.multi_stmt - name: sharding_api_version_jscore_passthrough_gen - name: sharding_api_strict_passthrough_gen @@ -1350,6 +1067,7 @@ buildvariants: - name: .serverless distros: - rhel80-xlarge + - name: vector_search - <<: *enterprise-rhel-80-64-bit-dynamic-template name: &enterprise-rhel-80-64-bit-dynamic-debug-mode enterprise-rhel-80-64-bit-dynamic-debug-mode @@ -1393,8 +1111,8 @@ buildvariants: - name: .change_streams !.no_debug_mode - name: .change_stream_fuzzer - name: .misc_js !.no_debug_mode - - name: .concurrency !.large !.ubsan !.no_txns !.debug_only !.no_debug_mode - - name: .concurrency .large !.ubsan !.no_txns !.debug_only !.no_debug_mode + - name: .concurrency !.large !.ubsan !.no_txns !.no_debug_mode + - name: .concurrency .large !.ubsan !.no_txns !.no_debug_mode distros: - rhel80-medium - name: .config_fuzzer !.large @@ -1408,6 +1126,10 @@ buildvariants: - name: .encrypt !.no_debug_mode - name: idl_tests - name: initial_sync_fuzzer_gen + - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen + # TODO (SERVER-78417) reenable after ticket is complete + #- name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen + #- name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen - name: jsCore distros: - rhel80-xlarge @@ -1466,12 +1188,13 @@ buildvariants: distros: - rhel80-xlarge - name: streams + - name: vector_search - name: generate_buildid_to_debug_symbols_mapping -- &enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-template +- &enterprise-rhel-80-64-bit-dynamic-all-feature-flags-template <<: *linux_x86_dynamic_compile_variant_dependency - name: &enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required - display_name: "! Shared Library Enterprise RHEL 8.0 (all feature flags)" + name: &enterprise-rhel-80-64-bit-dynamic-all-feature-flags enterprise-rhel-80-64-bit-dynamic-all-feature-flags + display_name: "* Shared Library Enterprise RHEL 8.0 (all feature flags)" cron: "0 */4 * * *" # From the ${project_required_suggested_cron} parameter modules: - enterprise @@ -1497,21 +1220,21 @@ buildvariants: # This is added because of EVG-18211. # Without this we are adding extra dependencies on evergreen and it is causing strain omit_generated_tasks: true - - name: version_burn_in_gen - variant: generate-tasks-for-version tasks: - name: analyze_shard_key_jscore_passthrough_gen - name: cqf - name: cqf_disabled_pipeline_opt - - name: cqf_passthrough - name: cqf_parallel - name: query_golden_classic - - name: query_golden_cqf - name: lint_fuzzer_sanity_patch - name: test_api_version_compatibility - - name: burn_in_tests_gen - - name: burn_in_tags_gen # - name: burn_in_tasks_gen + # depends_on: + # - name: version_burn_in_gen + # variant: generate-tasks-for-version + # omit_generated_tasks: true + # - name: archive_dist_test_debug + # variant: *linux_x86_dynamic_compile_variant_name - name: check_feature_flag_tags - name: check_for_todos - name: .aggfuzzer @@ -1525,21 +1248,30 @@ buildvariants: - name: .causally_consistent !.sharding - name: .change_streams - name: .change_stream_fuzzer + # TODO SERVER-57866: Remove the explicit mentions of change stream multitenant suites. - name: change_streams_multitenant_passthrough - name: change_streams_multitenant_sharded_collections_passthrough - name: .misc_js - name: .clustered_collections - - name: .concurrency !.large !.ubsan !.no_txns !.debug_only - - name: .concurrency .large !.ubsan !.no_txns !.debug_only + - name: .concurrency !.large !.ubsan !.no_txns + - name: .concurrency .large !.ubsan !.no_txns distros: - rhel80-medium - - name: sharding_auth_catalog_shard_gen - name: .config_fuzzer !.large + - name: .config_fuzzer .large + distros: + - rhel80-medium + - name: .config_fuzzer_stress + distros: + - rhel80-large - name: disk_wiredtiger - name: .encrypt - name: feature_flag_multiversion_gen - name: idl_tests - name: initial_sync_fuzzer_gen + - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen - name: .jscore .common - name: jsCore_column_store_indexes - name: jsCore_min_batch_repeat_queries_ese_gsm @@ -1588,6 +1320,7 @@ buildvariants: - name: sharding_api_strict_passthrough_gen - name: .sharding .txns - name: .sharding .common + - name: sharded_collections_single_writes_without_shard_key_jscore_passthrough_gen - name: sharded_multi_stmt_txn_jscore_passthrough - name: .serverless - name: sharding_max_mirroring_opportunistic_secondary_targeting_ese_gcm_gen @@ -1596,7 +1329,9 @@ buildvariants: - name: .shard_split - name: .shard_merge - name: streams - - name: telemetry_passthrough + - name: vector_search + - name: query_stats_passthrough + - name: query_stats_passthrough_writeonly - &enterprise-rhel-80-64-bit-dynamic-classic-engine <<: *linux_x86_dynamic_compile_variant_dependency @@ -1617,10 +1352,6 @@ buildvariants: test_flags: >- --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}" large_distro_name: rhel80-medium - burn_in_tag_buildvariants: >- - enterprise-rhel-80-64-bit-inmem - enterprise-rhel-80-64-bit-multiversion - burn_in_tag_compile_task_dependency: archive_dist_test_debug depends_on: - name: archive_dist_test_debug variant: *linux_x86_dynamic_compile_variant_name @@ -1629,8 +1360,6 @@ buildvariants: # This is added because of EVG-18211. # Without this we are adding extra dependencies on evergreen and it is causing strain omit_generated_tasks: true - - name: version_burn_in_gen - variant: generate-tasks-for-version tasks: - name: .aggfuzzer !.sbe_only - name: .aggregation !.sbe_only @@ -1638,8 +1367,8 @@ buildvariants: - name: .causally_consistent !.sharding - name: .change_stream_fuzzer - name: .change_streams - - name: .concurrency !.large !.ubsan !.no_txns !.debug_only !.compute_mode - - name: .concurrency .large !.ubsan !.no_txns !.debug_only !.compute_mode + - name: .concurrency !.large !.ubsan !.no_txns !.compute_mode + - name: .concurrency .large !.ubsan !.no_txns !.compute_mode distros: - rhel80-medium - name: .encrypt @@ -1648,6 +1377,7 @@ buildvariants: - name: .misc_js - name: .multi_shard - name: .query_fuzzer + - name: query_golden_classic - name: .random_multiversion_ds - name: .read_only - name: .read_write_concern !.large @@ -1667,19 +1397,26 @@ buildvariants: - name: .updatefuzzer - name: aggregation_repeat_queries - name: audit - - name: burn_in_tags_gen - name: burn_in_tests_gen + depends_on: + - name: version_burn_in_gen + variant: generate-tasks-for-version + omit_generated_tasks: true + - name: archive_dist_test_debug + variant: *linux_x86_dynamic_compile_variant_name - name: check_feature_flag_tags - name: check_for_todos - name: disk_wiredtiger - name: initial_sync_fuzzer_gen + - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen - name: jsCore distros: - rhel80-xlarge - name: jsCore_min_batch_repeat_queries_ese_gsm - name: jsCore_txns_large_txns_format - name: json_schema - - name: lint_fuzzer_sanity_patch - name: mqlrun - name: multi_stmt_txn_jscore_passthrough_with_migration_gen - name: multiversion_gen @@ -1704,26 +1441,154 @@ buildvariants: - name: sharding_api_version_jscore_passthrough_gen - name: test_api_version_compatibility - name: unittest_shell_hang_analyzer_gen + - name: vector_search -- <<: *linux_x86_dynamic_compile_variant_dependency - name: &enterprise-rhel-80-64-bit-large-txns-format enterprise-rhel-80-64-bit-large-txns-format - display_name: "Enterprise RHEL 8.0 (large transactions format)" - cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter - modules: - - enterprise - run_on: - - rhel80-small +# The CQF feature flag is currently part of the always-disabled feature flags list, so it is not +# enabled in all-feature-flags variants besides this one. This variant allows us to get some initial +# coverage for CQF without disrupting coverage for other feature flags (in particular, SBE). Once +# CQF is enabled by default, it will be tested in the release variants and all-feature-flags +# variants, and we will no longer need this dedicated variant to test it. At that point, we will +# replace this variant with a dedicated variant for SBE stage builders, similar in spirit to the +# Classic Engine variant above. +# TODO SERVER-71163: Replace this variant with a dedicated variant for stage builders once the CQF +# feature flag is not always-disabled. +- <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-template + name: enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-and-cqf-enabled + display_name: "Shared Library Enterprise RHEL 8.0 Query (all feature flags and CQF enabled)" + cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. expansions: - <<: *linux_x86_generic_expansions + <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-expansions test_flags: >- - --mongodSetParameters="{maxNumberOfTransactionOperationsInSingleOplogEntry: 2}" - --excludeWithAnyTags=exclude_from_large_txns + --additionalFeatureFlagsFile all_feature_flags.txt + --excludeWithAnyTags=incompatible_with_shard_merge + --excludeWithAnyTags=cqf_incompatible + --mongosSetParameters="{featureFlagCommonQueryFramework: true, internalQueryFrameworkControl: 'tryBonsai'}" + --mongodSetParameters="{featureFlagCommonQueryFramework: true, internalQueryFrameworkControl: 'tryBonsai'}" tasks: - - name: auth_gen - - name: auth_audit_gen - - name: causally_consistent_jscore_txns_passthrough - - name: change_streams - - name: change_streams_whole_db_passthrough + - name: analyze_shard_key_jscore_passthrough_gen + - name: .cqf + - name: lint_fuzzer_sanity_patch + - name: test_api_version_compatibility + # - name: burn_in_tasks_gen + # depends_on: + # - name: version_burn_in_gen + # variant: generate-tasks-for-version + # omit_generated_tasks: true + # - name: archive_dist_test_debug + # variant: *linux_x86_dynamic_compile_variant_name + - name: check_feature_flag_tags + - name: check_for_todos + - name: .aggfuzzer + - name: .aggregation + - name: aggregation_repeat_queries + - name: audit + - name: .auth + - name: buildscripts_test + - name: resmoke_end2end_tests + - name: unittest_shell_hang_analyzer_gen + - name: .causally_consistent !.sharding + - name: .change_streams + - name: .change_stream_fuzzer + # TODO SERVER-57866: Remove the explicit mentions of change stream multitenant suites. + - name: change_streams_multitenant_passthrough + - name: change_streams_multitenant_sharded_collections_passthrough + - name: .misc_js + - name: .clustered_collections + - name: .concurrency !.large !.ubsan !.no_txns + - name: .concurrency .large !.ubsan !.no_txns + distros: + - rhel80-medium + - name: .config_fuzzer !.large + - name: .config_fuzzer .large + distros: + - rhel80-medium + - name: .config_fuzzer_stress + distros: + - rhel80-large + - name: disk_wiredtiger + - name: .encrypt + - name: feature_flag_multiversion_gen + - name: idl_tests + - name: initial_sync_fuzzer_gen + - name: .jscore .common + - name: jsCore_column_store_indexes + - name: jsCore_min_batch_repeat_queries_ese_gsm + - name: jsCore_txns_large_txns_format + - name: jsCore_wildcard_indexes + - name: json_schema + - name: .jstestfuzz !.flow_control # Flow control jstestfuzz take longer. + - name: libunwind_tests + - name: .multiversion_sanity_check + - name: mqlrun + - name: .multi_shard + - name: multi_stmt_txn_jscore_passthrough_with_migration_gen + - name: multiversion_gen + - name: powercycle_smoke + - name: .query_fuzzer + - name: .random_multiversion_ds + - name: .read_write_concern .large + distros: + - rhel80-medium + - name: .read_write_concern !.large + - name: .replica_sets !.encrypt !.auth + distros: + - rhel80-medium + - name: replica_sets_api_version_jscore_passthrough_gen + - name: replica_sets_reconfig_jscore_passthrough_gen + - name: replica_sets_reconfig_jscore_stepdown_passthrough_gen + distros: + - rhel80-medium + - name: replica_sets_reconfig_kill_primary_jscore_passthrough_gen + distros: + - rhel80-medium + - name: retryable_writes_jscore_passthrough_gen + - name: retryable_writes_jscore_stepdown_passthrough_gen + distros: + - rhel80-medium + - name: .read_only + - name: .rollbackfuzzer + - name: sasl + - name: search + - name: search_auth + - name: search_pinned_connections_auth + - name: search_ssl + - name: session_jscore_passthrough + - name: .sharding .jscore !.wo_snapshot !.multi_stmt + - name: sharding_api_version_jscore_passthrough_gen + - name: sharding_api_strict_passthrough_gen + - name: .sharding .txns + - name: .sharding .common + - name: sharded_collections_single_writes_without_shard_key_jscore_passthrough_gen + - name: sharded_multi_stmt_txn_jscore_passthrough + - name: .serverless + - name: sharding_max_mirroring_opportunistic_secondary_targeting_ese_gcm_gen + - name: .updatefuzzer + - name: secondary_reads_passthrough_gen + - name: .shard_split + - name: .shard_merge + - name: streams + - name: query_stats_passthrough + - name: query_stats_passthrough_writeonly + +- <<: *linux_x86_dynamic_compile_variant_dependency + name: &enterprise-rhel-80-64-bit-large-txns-format enterprise-rhel-80-64-bit-large-txns-format + display_name: "Enterprise RHEL 8.0 (large transactions format)" + cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter + modules: + - enterprise + run_on: + - rhel80-small + expansions: + <<: *linux_x86_generic_expansions + test_flags: >- + --mongodSetParameters="{maxNumberOfTransactionOperationsInSingleOplogEntry: 2}" + --excludeWithAnyTags=exclude_from_large_txns + tasks: + - name: auth_gen + - name: auth_audit_gen + - name: causally_consistent_jscore_txns_passthrough + - name: change_streams + - name: change_streams_whole_db_passthrough - name: change_streams_whole_cluster_passthrough - name: concurrency_replication_gen - name: concurrency_replication_multi_stmt_txn_gen @@ -1739,9 +1604,11 @@ buildvariants: - name: concurrency_sharded_with_stepdowns_and_balancer_gen - name: concurrency_sharded_initial_sync_gen - name: initial_sync_fuzzer_gen + - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen - name: jsCore - name: jsCore_txns - - name: .logical_session_cache .repl - name: .multi_shard - name: multi_stmt_txn_jscore_passthrough_with_migration_gen - name: multiversion_auth_gen @@ -1791,43 +1658,55 @@ buildvariants: # No feature flag tests since they aren't compatible with the older binaries. test_flags: >- --runNoFeatureFlagTests - --excludeWithAnyTags=incompatible_with_shard_merge + --excludeWithAnyTags=incompatible_with_shard_merge, -- <<: *linux-x86-multiversion-template +# This variant exists becuase this is the only way to test future multiversion tags +# version_expansions_gen will pretend we are upgrading to "bv_future_git_tag" +# which is like simulating a branching task +- &enterprise-rhel-80-64-bit-future-git-tag-multiversion-template + <<: *linux-x86-multiversion-template name: &enterprise-rhel-80-64-bit-future-git-tag-multiversion enterprise-rhel-80-64-bit-future-git-tag-multiversion display_name: "Enterprise RHEL 8.0 (future git tag multiversion)" expansions: - compile_flags: >- - -j$(grep -c ^processor /proc/cpuinfo) - --ssl - --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - MONGO_DISTMOD=rhel80 - --link-model=dynamic - multiversion_platform: rhel80 - multiversion_edition: enterprise - repo_edition: enterprise + <<: *linux_x86_generic_expansions scons_cache_scope: shared scons_cache_mode: all - num_scons_link_jobs_available: 0.99 - tooltags: "ssl sasl gssapi" - build_mongoreplay: true - large_distro_name: rhel80-medium - resmoke_jobs_factor: 0.25 + resmoke_jobs_factor: 0.5 bv_future_git_tag: r100.0.0-9999 + compile_variant: linux-x86-dynamic-compile-future-tag-multiversion test_flags: >- --excludeWithAnyTags=future_git_tag_incompatible - compile_variant: *enterprise-rhel-80-64-bit-future-git-tag-multiversion + depends_on: + - name: version_expansions_gen + variant: &enterprise-rhel-80-64-bit-future-git-tag-multiversion-version-gen enterprise-rhel-80-64-bit-future-git-tag-multiversion-version-gen + - name: version_gen + variant: generate-tasks-for-version + # This is added because of EVG-18211. + # Without this we are adding extra dependencies on evergreen and it is causing strain + omit_generated_tasks: true + - name: archive_dist_test + variant: linux-x86-dynamic-compile-future-tag-multiversion + tasks: + - name: .multiversion !.future_git_tag_incompatible + - name: .multiversion_future_git_tag + # This task does not work because it depends on archive_dist_test_debug + # Because we override the task dependencies in the future git tag varients we can't have + # multiple tasks in one varient depend on muliple different compile variant tasks + # If we decide we need this task we can add it to its own variant that depends on archive_dist_test_debug + # - name: generate_buildid_to_debug_symbols_mapping + +# This variant exists becuase this is the only way to correctly have +# enterprise-rhel-80-64-bit-future-git-tag-multiversion depend the the "correct" version_expansions_gen task +# Without this extra variant depending on version_expansions_gen will yeild the version_expansions_gen task in version_gen +# Adding this variant removes that race condition +- <<: *enterprise-rhel-80-64-bit-future-git-tag-multiversion-template + name: *enterprise-rhel-80-64-bit-future-git-tag-multiversion-version-gen + display_name: "Enterprise RHEL 8.0 (future git tag multiversion) (version gen)" depends_on: null tasks: - name: version_expansions_gen distros: - rhel80-small - - name: compile_test_and_package_parallel_core_stream_TG - distros: - - rhel80-xlarge - - name: .multiversion !.future_git_tag_incompatible - - name: .multiversion_future_git_tag - - name: generate_buildid_to_debug_symbols_mapping - name: &enterprise-rhel-80-64-bit-suggested enterprise-rhel-80-64-bit-suggested display_name: "* Enterprise RHEL 8.0" @@ -1873,7 +1752,6 @@ buildvariants: - name: .jscore .common !.decimal !.sharding - name: jsCore_txns_large_txns_format - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: libunwind_tests - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough @@ -1889,8 +1767,8 @@ buildvariants: - name: test_packages distros: - ubuntu2004-package - # TODO: BF-24515 re-enable when build failure cause determined and resolved - #- name: selinux_rhel8_enterprise + - name: vector_search + - name: selinux_rhel8_enterprise - name: generate_buildid_to_debug_symbols_mapping - name: &enterprise-rhel-80-64-bit-build-metrics enterprise-rhel-80-64-bit-build-metrics @@ -1919,14 +1797,14 @@ buildvariants: modules: - enterprise run_on: - - amazon2022-arm64-large + - amazon2023.0-arm64-large expansions: compile_flags: >- --ssl MONGO_DISTMOD=rhel82 -j$(grep -c ^processor /proc/cpuinfo) repo_edition: enterprise - large_distro_name: amazon2022-arm64-large + large_distro_name: amazon2023.0-arm64-large num_scons_link_jobs_available: 0.1 tasks: - name: build_metrics_gen_TG @@ -1996,35 +1874,6 @@ buildvariants: - name: noPassthroughHotBackups_gen - name: generate_buildid_to_debug_symbols_mapping -- name: &ubuntu1804-container ubuntu1804-container - display_name: "Ubuntu 18.04 Container" - cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. - run_on: - - ubuntu1804-container-server - expansions: - resmoke_jobs_factor: 1 - compile_variant: *ubuntu1804-container - disable_shared_scons_cache: true - compile_flags: >- - MONGO_DISTMOD=ubuntu1804 - --opt=on - -j$(grep -c ^processor /proc/cpuinfo) - --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - tooltags: "" - build_mongoreplay: true - test_flags: >- - --excludeWithAnyTags=requires_os_access - tasks: - - name: compile_and_archive_dist_test_then_package_TG - distros: - - ubuntu1804-build - - name: jsCore - - name: sharding_gen - - name: replica_sets_gen - - name: generate_buildid_to_debug_symbols_mapping - distros: - - ubuntu1804-build - - name: &enterprise-rhel-72-s390x-compile enterprise-rhel-72-s390x-compile display_name: "Enterprise RHEL 7.2 s390x Compile" modules: @@ -2097,8 +1946,8 @@ buildvariants: - &enterprise-amazon-linux2-arm64-all-feature-flags-template <<: *amazon_linux2_arm64_compile_variant_dependency name: &enterprise-amazon-linux2-arm64-all-feature-flags enterprise-amazon-linux2-arm64-all-feature-flags - display_name: "* Amazon Linux 2 arm64 (all feature flags)" - cron: "0 4 * * *" # From the ${project_required_suggested_cron} parameter + display_name: "! Amazon Linux 2 arm64 (all feature flags)" + cron: "0 */4 * * *" # From the ${project_required_suggested_cron} parameter modules: - enterprise run_on: @@ -2119,15 +1968,10 @@ buildvariants: --additionalFeatureFlagsFile all_feature_flags.txt --excludeWithAnyTags=incompatible_with_amazon_linux,incompatible_with_shard_merge,requires_external_data_source tasks: - - name: cqf - - name: cqf_disabled_pipeline_opt - - name: cqf_passthrough - - name: cqf_parallel + - name: analyze_shard_key_jscore_passthrough_gen - name: query_golden_classic - - name: query_golden_cqf - name: lint_fuzzer_sanity_patch - name: test_api_version_compatibility - - name: burn_in_tests_gen - name: check_feature_flag_tags - name: check_for_todos - name: .aggfuzzer @@ -2135,19 +1979,22 @@ buildvariants: - name: aggregation_repeat_queries - name: audit - name: .auth - #- name: burn_in_tags_gen - name: buildscripts_test - name: resmoke_end2end_tests - name: unittest_shell_hang_analyzer_gen - name: .causally_consistent !.sharding - name: .change_streams - name: .change_stream_fuzzer + # TODO SERVER-57866: Remove the explicit mentions of change stream multitenant suites. - name: change_streams_multitenant_passthrough - name: change_streams_multitenant_sharded_collections_passthrough + - name: cqf + - name: cqf_disabled_pipeline_opt + - name: cqf_parallel - name: .misc_js - name: .clustered_collections - - name: .concurrency !.large !.ubsan !.no_txns !.debug_only - - name: .concurrency .large !.ubsan !.no_txns !.debug_only + - name: .concurrency !.large !.ubsan !.no_txns + - name: .concurrency .large !.ubsan !.no_txns - name: .config_fuzzer !.large - name: .config_fuzzer .large distros: @@ -2178,7 +2025,8 @@ buildvariants: - name: .random_multiversion_ds - name: .read_write_concern .large - name: .read_write_concern !.large - - name: .replica_sets !.encrypt !.auth + - name: .replica_sets !.encrypt !.auth !.ignore_non_generated_replica_sets_jscore_passthrough + - name: replica_sets_jscore_passthrough_gen - name: replica_sets_api_version_jscore_passthrough_gen - name: replica_sets_reconfig_jscore_passthrough_gen - name: replica_sets_reconfig_jscore_stepdown_passthrough_gen @@ -2192,112 +2040,31 @@ buildvariants: - name: search_auth - name: search_pinned_connections_auth - name: search_ssl + - name: secondary_reads_passthrough_gen + - name: .serverless + distros: + - amazon2-arm64-large - name: session_jscore_passthrough + - name: .shard_split + - name: .shard_merge - name: .sharding .jscore !.wo_snapshot !.multi_stmt - name: sharding_api_version_jscore_passthrough_gen + - name: sharding_api_strict_passthrough_gen - name: .sharding .txns - name: .sharding .common - - name: sharded_multi_stmt_txn_jscore_passthrough - - name: .serverless - distros: - - amazon2-arm64-large - name: sharding_max_mirroring_opportunistic_secondary_targeting_ese_gcm_gen + - name: sharded_collections_single_writes_without_shard_key_jscore_passthrough_gen + - name: sharded_multi_stmt_txn_jscore_passthrough + - name: streams - name: .updatefuzzer - - name: secondary_reads_passthrough_gen - - name: .shard_split - - name: .shard_merge - - name: telemetry_passthrough - + - name: vector_search + - name: query_stats_passthrough + - name: query_stats_passthrough_writeonly ########################################### # Experimental buildvariants # ########################################### -- name: &rhel80-debug-asan-classic-engine rhel80-debug-asan-classic-engine - display_name: "* ASAN Enterprise RHEL 8.0 DEBUG (Classic Engine)" - cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. - modules: - - enterprise - run_on: - - rhel80-build - stepback: false - expansions: - additional_package_targets: >- - archive-mongocryptd - archive-mongocryptd-debug - lang_environment: LANG=C - san_options: *aubsan_options - compile_flags: >- - --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars - --dbg=on - --opt=on - --allocator=system - --sanitize=address - --ssl - --ocsp-stapling=off - --enable-free-mon=on - -j$(grep -c ^processor /proc/cpuinfo) - compile_variant: *rhel80-debug-asan-classic-engine - test_flags: >- - --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}" - --excludeWithAnyTags=requires_fast_memory,requires_ocsp_stapling - multiversion_platform: rhel80 - multiversion_edition: enterprise - resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under ASAN build. - hang_analyzer_dump_core: false - scons_cache_scope: shared - exec_timeout_secs: 14400 # 3 hour timeout - separate_debug: off - large_distro_name: rhel80-build - tasks: - - name: compile_test_benchmark_and_package_serial_TG - - name: .aggregation !.sbe_only - - name: .auth - - name: audit - - name: .benchmarks - - name: .causally_consistent !.wo_snapshot - - name: .change_streams - - name: .misc_js - - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.compute_mode - - name: .encrypt - - name: free_monitoring - - name: external_auth - - name: external_auth_aws - - name: external_auth_oidc - - name: initial_sync_fuzzer_gen - - name: compile_integration_and_test_parallel_stream_TG - distros: - - rhel80-large - - name: .jscore .common !.sbe_only - - name: jsCore_min_batch_repeat_queries_ese_gsm - - name: jsCore_txns_large_txns_format - - name: json_schema - - name: .logical_session_cache - - name: .multi_shard .common - - name: .query_fuzzer - - name: .read_write_concern - - name: replica_sets_large_txns_format_jscore_passthrough - - name: .replica_sets !.multi_oplog - - name: .replica_sets .encrypt - - name: .resharding_fuzzer - - name: .retry - - name: .read_only - - name: .rollbackfuzzer - - name: .updatefuzzer - - name: sasl - - name: secondary_reads_passthrough_gen - - name: session_jscore_passthrough - - name: .sharding .jscore !.wo_snapshot - - name: .sharding .common !.csrs !.jstestfuzz - - name: .watchdog - - name: .stitch - - name: .serverless - - name: unittest_shell_hang_analyzer_gen - - name: .updatefuzzer - - name: server_discovery_and_monitoring_json_test_TG - - name: server_selection_json_test_TG - - name: generate_buildid_to_debug_symbols_mapping - - &rhel80-debug-ubsan-all-feature-flags-template name: &rhel80-debug-ubsan-all-feature-flags rhel80-debug-ubsan-all-feature-flags display_name: "* Shared Library UBSAN Enterprise RHEL 8.0 DEBUG (all feature flags)" @@ -2327,7 +2094,7 @@ buildvariants: # To force disable feature flags even on the all feature flags variant, please use this file: # buildscripts/resmokeconfig/fully_disabled_feature_flags.yml test_flags: >- - --excludeWithAnyTags=requires_ocsp_stapling + --excludeWithAnyTags=requires_ocsp_stapling,requires_increased_memlock_limits --excludeWithAnyTags=incompatible_with_shard_merge --additionalFeatureFlagsFile all_feature_flags.txt multiversion_platform: rhel80 @@ -2342,62 +2109,66 @@ buildvariants: - name: disk_wiredtiger - name: generate_buildid_to_debug_symbols_mapping -- name: &rhel80-debug-ubsan-classic-engine rhel80-debug-ubsan-classic-engine - display_name: "* UBSAN Enterprise RHEL 8.0 DEBUG (Classic Engine)" - cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. +- &rhel80-debug-aubsan-lite-template + <<: *linux_debug_aubsan_compile_variant_dependency + name: &rhel80-debug-aubsan-lite rhel80-debug-aubsan-lite + display_name: "* Shared Library {A,UB}SAN Enterprise RHEL 8.0 DEBUG" + cron: "0 */4 * * *" # From the ${project_required_suggested_cron} parameter modules: - enterprise run_on: - rhel80-build - stepback: false - expansions: - additional_package_targets: >- - archive-mongocryptd - archive-mongocryptd-debug + expansions: &aubsan-lite-required-expansions + compile_variant: *linux_debug_aubsan_compile_variant_name lang_environment: LANG=C san_options: *aubsan_options - compile_variant: *rhel80-debug-ubsan-classic-engine - compile_flags: >- - --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars - --dbg=on - --opt=on - --sanitize=undefined - --ssl - --ocsp-stapling=off - --enable-free-mon=on - -j$(grep -c ^processor /proc/cpuinfo) - test_flags: >- - --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}" - --excludeWithAnyTags=requires_ocsp_stapling + test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_increased_memlock_limits + resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under {A,UB}SAN build. + hang_analyzer_dump_core: false + max_sub_suites: 3 + num_scons_link_jobs_available: 0.99 + large_distro_name: rhel80-build multiversion_platform: rhel80 multiversion_edition: enterprise - resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under UBSAN build. - scons_cache_scope: shared - separate_debug: off - large_distro_name: rhel80-build + gcov_tool: /opt/mongodbtoolchain/v4/bin/gcov + + tasks: + - name: jsCore + - name: jsCore_txns + +- <<: *rhel80-debug-aubsan-lite-template + name: &rhel80-debug-aubsan-classic-engine rhel80-debug-aubsan-classic-engine + display_name: "* {A,UB}SAN Enterprise RHEL 8.0 DEBUG (Classic Engine)" + cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. + expansions: + <<: *aubsan-lite-required-expansions + test_flags: >- + --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}" + --excludeWithAnyTags=requires_ocsp_stapling,requires_increased_memlock_limits tasks: - - name: compile_test_benchmark_and_package_serial_TG - name: .aggregation !.sbe_only - name: .auth - name: audit - - name: .benchmarks - name: .causally_consistent !.wo_snapshot - name: .change_streams + # - name: disk_wiredtiger - name: .misc_js - - name: .concurrency !.no_txns !.repl !.kill_terminate !.compute_mode - - name: disk_wiredtiger + - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.compute_mode - name: .encrypt - name: free_monitoring + - name: external_auth + - name: external_auth_aws + - name: external_auth_oidc - name: initial_sync_fuzzer_gen - - name: compile_integration_and_test_parallel_stream_TG - distros: - - rhel80-large + - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen - name: .jscore .common !.sbe_only - name: jsCore_min_batch_repeat_queries_ese_gsm - name: jsCore_txns_large_txns_format - name: json_schema - - name: .logical_session_cache .one_sec - name: .multi_shard .common + - name: .query_fuzzer - name: .read_write_concern - name: replica_sets_large_txns_format_jscore_passthrough - name: .replica_sets !.multi_oplog @@ -2411,41 +2182,12 @@ buildvariants: - name: session_jscore_passthrough - name: .sharding .jscore !.wo_snapshot - name: .sharding .common !.csrs !.jstestfuzz - - name: .stitch - name: .updatefuzzer - name: .serverless - - name: watchdog_wiredtiger - - name: server_discovery_and_monitoring_json_test_TG - - name: server_selection_json_test_TG + - name: unittest_shell_hang_analyzer_gen + - name: .watchdog - name: generate_buildid_to_debug_symbols_mapping -- &rhel80-debug-aubsan-lite-template - <<: *linux_debug_aubsan_compile_variant_dependency - name: &rhel80-debug-aubsan-lite rhel80-debug-aubsan-lite - display_name: "* Shared Library {A,UB}SAN Enterprise RHEL 8.0 DEBUG" - cron: "0 */4 * * *" # From the ${project_required_suggested_cron} parameter - modules: - - enterprise - run_on: - - rhel80-build - expansions: &aubsan-lite-required-expansions - compile_variant: *linux_debug_aubsan_compile_variant_name - lang_environment: LANG=C - san_options: *aubsan_options - test_flags: --excludeWithAnyTags=requires_ocsp_stapling - resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under {A,UB}SAN build. - hang_analyzer_dump_core: false - max_sub_suites: 3 - num_scons_link_jobs_available: 0.99 - large_distro_name: rhel80-build - multiversion_platform: rhel80 - multiversion_edition: enterprise - gcov_tool: /opt/mongodbtoolchain/v4/bin/gcov - - tasks: - - name: jsCore - - name: jsCore_txns - - <<: *rhel80-debug-aubsan-lite-template name: &rhel80-debug-aubsan-lite-all-feature-flags-required rhel80-debug-aubsan-lite-all-feature-flags-required display_name: "! Shared Library {A,UB}SAN Enterprise RHEL 8.0 DEBUG (all feature flags)" @@ -2480,33 +2222,37 @@ buildvariants: - name: audit - name: .aggregation - name: .auth + - name: .concurrency !.no_txns !.repl !.kill_terminate - name: .config_fuzzer - name: .config_fuzzer_stress - name: cqf - name: cqf_disabled_pipeline_opt - - name: cqf_passthrough - name: cqf_parallel - name: .causally_consistent !.wo_snapshot - name: .change_streams + # TODO SERVER-57866: Remove the explicit mentions of change stream multitenant suites. - name: change_streams_multitenant_passthrough - name: change_streams_multitenant_sharded_collections_passthrough + # - name: disk_wiredtiger - name: external_auth - name: external_auth_aws - name: external_auth_oidc - name: .encrypt - name: free_monitoring + - name: generate_buildid_to_debug_symbols_mapping - name: initial_sync_fuzzer_gen + - name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen + - name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen - name: .jscore .common - name: jsCore_column_store_indexes - name: jsCore_min_batch_repeat_queries_ese_gsm - name: jsCore_txns_large_txns_format - name: jsCore_wildcard_indexes - name: json_schema - - name: .logical_session_cache - name: .misc_js - name: .multi_shard .common - name: query_golden_classic - - name: query_golden_cqf - name: .query_fuzzer - name: .read_write_concern - name: replica_sets_large_txns_format_jscore_passthrough @@ -2525,7 +2271,8 @@ buildvariants: - name: .serverless - name: .shard_split - name: .shard_merge - - name: telemetry_passthrough + - name: query_stats_passthrough + - name: query_stats_passthrough_writeonly - name: .updatefuzzer - name: .watchdog @@ -2575,38 +2322,43 @@ buildvariants: large_distro_name: rhel80-medium multiversion_platform: rhel80 multiversion_edition: enterprise + test_flags: >- + --excludeWithAnyTags=tsan_incompatible tasks: - name: compile_test_and_package_serial_TG # - name: compile_integration_and_test_parallel_stream_TG # Not passing # - name: test_api_version_compatibility # Not relevant for TSAN - # - name: burn_in_tests_gen # No burn in tests needed - name: check_feature_flag_tags - # - name: .aggfuzzer !.feature_flag_guarded # Not passing + - name: .aggfuzzer !.feature_flag_guarded # - name: .aggregation !.feature_flag_guarded # Not passing - name: audit # - name: .auth # Not passing # - name: burn_in_tags_gen # No burn in tests needed + # depends_on: + # - name: version_burn_in_gen + # variant: generate-tasks-for-version + # omit_generated_tasks: true + # - name: archive_dist_test_debug + # variant: *enterprise-rhel80-debug-tsan - name: buildscripts_test - # - name: resmoke_end2end_tests # Not passing # - name: unittest_shell_hang_analyzer_gen # Not passing # - name: .config_fuzzer # Not passing - name: config_fuzzer_jsCore - name: cqf - name: cqf_disabled_pipeline_opt - - name: cqf_passthrough - name: cqf_parallel # - name: .causally_consistent !.sharding # Not passing # - name: .change_streams # Not passing # - name: .change_stream_fuzzer # Not passing # - name: .misc_js # Not passing - # - name: .concurrency !.large !.ubsan !.no_txns !.debug_only # Not passing - # - name: .concurrency .large !.ubsan !.no_txns !.debug_only # Not passing + # - name: .concurrency !.large !.ubsan !.no_txns # Not passing + # - name: .concurrency .large !.ubsan !.no_txns # Not passing # distros: # - rhel80-medium - name: disk_wiredtiger # - name: .encrypt # Not passing # - name: idl_tests # Not relevant for TSAN - # - name: initial_sync_fuzzer_gen # Not passing + - name: initial_sync_fuzzer_gen # distros: # - rhel80-medium - name: jsCore @@ -2616,7 +2368,7 @@ buildvariants: - name: jsCore_min_batch_repeat_queries_ese_gsm - name: jsCore_txns_large_txns_format - name: json_schema - # - name: .jstestfuzz !.flow_control # Flow control jstestfuzz take longer. # Not passing + - name: .jstestfuzz !.flow_control # Flow control jstestfuzz take longer. # - name: libunwind_tests # Cant be used because tsan does not use libunwind # - name: .multiversion_sanity_check # Multiversion does not pass yet, also making this work is going to be pretty tricky - name: mqlrun @@ -2625,7 +2377,6 @@ buildvariants: # - name: multiversion_gen # Multiversion does not pass yet, also making this work is going to be pretty tricky - name: .query_fuzzer - name: query_golden_classic - - name: query_golden_cqf # - name: .random_multiversion_ds # Multiversion does not pass yet, also making this work is going to be pretty tricky # - name: .read_write_concern .large # Not passing # distros: @@ -2646,9 +2397,8 @@ buildvariants: # - name: retryable_writes_jscore_stepdown_passthrough_gen # Not passing # distros: # - rhel80-medium - - name: read_only # TODO: replace with .read_only after read_only_sharded is fixed - # - name: .read_only # Not passing, see above - # - name: .rollbackfuzzer # Not passing + - name: .read_only + - name: .rollbackfuzzer - name: sasl - name: search - name: search_auth @@ -2673,6 +2423,7 @@ buildvariants: - name: server_selection_json_test_TG distros: - rhel80-xlarge + - name: vector_search - name: generate_buildid_to_debug_symbols_mapping - &enterprise-rhel80-debug-complete-tsan-template @@ -2691,24 +2442,20 @@ buildvariants: tasks: - name: compile_test_and_package_serial_TG - name: compile_integration_and_test_parallel_stream_TG - - name: .aggfuzzer !.feature_flag_guarded - name: .aggregation !.feature_flag_guarded - name: .auth - - name: resmoke_end2end_tests - name: unittest_shell_hang_analyzer_gen - name: .causally_consistent !.sharding - name: .change_streams - name: .change_stream_fuzzer - name: .misc_js - - name: .concurrency !.large !.ubsan !.no_txns !.debug_only !.compute_mode - - name: .concurrency .large !.ubsan !.no_txns !.debug_only !.compute_mode + - name: .concurrency !.large !.ubsan !.no_txns !.compute_mode + - name: .concurrency .large !.ubsan !.no_txns !.compute_mode distros: - rhel80-large - name: .config_fuzzer - name: .encrypt - - name: initial_sync_fuzzer_gen - name: .jscore .common !jsCore - - name: .jstestfuzz !.flow_control # Flow control jstestfuzz take longer. # - name: libunwind_tests # Cant be used because tsan does not use libunwind # - name: .multiversion_sanity_check # Multiversion does not pass yet, also making this work is going to be pretty tricky - name: .multi_shard @@ -2734,8 +2481,6 @@ buildvariants: - name: retryable_writes_jscore_stepdown_passthrough_gen distros: - rhel80-medium - - name: .read_only - - name: .rollbackfuzzer - name: .sharding .jscore !.wo_snapshot !.multi_stmt - name: sharding_api_version_jscore_passthrough_gen - name: .sharding .txns @@ -2853,7 +2598,6 @@ buildvariants: - name: .jscore .common - name: noPassthrough_gen - name: noPassthroughWithMongod_gen - - name: .logical_session_cache .one_sec - name: .sharding .jscore !.wo_snapshot !.multi_stmt - name: .sharding .common !.csrs !.encrypt - name: sharding_max_mirroring_opportunistic_secondary_targeting_gen @@ -2897,7 +2641,6 @@ buildvariants: - name: .sharding .causally_consistent !.wo_snapshot - name: .concurrency .common !.kill_terminate - name: .jscore .common - - name: .logical_session_cache .one_sec - name: .sharding .jscore !.wo_snapshot !.multi_stmt - name: .sharding .common !.csrs !.encrypt - name: sharding_max_mirroring_opportunistic_secondary_targeting_gen @@ -2940,7 +2683,6 @@ buildvariants: - name: .sharding .causally_consistent !.wo_snapshot - name: .concurrency .common !.kill_terminate - name: .jscore .common - - name: .logical_session_cache .one_sec - name: .sharding .jscore !.wo_snapshot !.multi_stmt - name: .sharding .common !.csrs !.encrypt - name: sharding_max_mirroring_opportunistic_secondary_targeting_gen @@ -2988,7 +2730,6 @@ buildvariants: - name: .sharding .causally_consistent !.wo_snapshot - name: .concurrency .common !.kill_terminate - name: .jscore .common - - name: .logical_session_cache .one_sec - name: .sharding .jscore !.wo_snapshot !.multi_stmt - name: .sharding .common !.csrs !.encrypt - name: sharding_max_mirroring_opportunistic_secondary_targeting_gen @@ -3036,7 +2777,6 @@ buildvariants: - name: .sharding .causally_consistent !.wo_snapshot - name: .concurrency .common !.kill_terminate - name: .jscore .common - - name: .logical_session_cache .one_sec - name: .sharding .jscore !.wo_snapshot !.multi_stmt - name: .sharding .common !.csrs !.encrypt - name: sharding_max_mirroring_opportunistic_secondary_targeting_gen @@ -3083,8 +2823,6 @@ buildvariants: patch_only: true - name: .jscore .common patch_only: true - - name: .logical_session_cache .one_sec - patch_only: true - name: .sharding .jscore !.wo_snapshot !.multi_stmt patch_only: true - name: .sharding .common !.csrs !.encrypt @@ -3112,20 +2850,20 @@ buildvariants: tasks: - name: win_shared_scons_cache_pruning -- <<: *enterprise-rhel-80-64-bit-dynamic-template +- <<: *enterprise-amazon-linux2-arm64-all-feature-flags-template name: &commit-queue commit-queue display_name: "~ Commit Queue" cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter stepback: false expansions: - <<: *linux_x86_generic_expansions + <<: *linux_arm64_generic_expansions scons_cache_scope: shared scons_cache_mode: all - commit_queue_alternate_cache: linux-x86-dynamic-compile-required + commit_queue_alternate_cache: amazon-linux2-arm64-compile has_packages: false compile_flags: >- --ssl - MONGO_DISTMOD=rhel80 + MONGO_DISTMOD=amazon2 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --link-model=dynamic @@ -3139,26 +2877,28 @@ buildvariants: depends_on: [] tasks: - name: compile_ninja_quick_TG - distros: - - rhel80-xlarge-commitqueue - name: compile_test_and_package_parallel_core_stream_TG distros: - - rhel80-xlarge-commitqueue + - amazon2-arm64-xlarge-commitqueue - name: compile_test_and_package_parallel_unittest_stream_TG distros: - - rhel80-xlarge-commitqueue + - amazon2-arm64-xlarge-commitqueue - name: compile_test_and_package_parallel_dbtest_stream_TG distros: - - rhel80-xlarge-commitqueue + - amazon2-arm64-xlarge-commitqueue - name: jsCore distros: - - rhel80-xlarge-commitqueue + - amazon2-arm64-large - name: .lint - name: test_api_version_compatibility - name: validate_commit_message + - name: lint_large_files_check - name: check_feature_flag_tags - name: compile_venv_deps_check - name: resmoke_validation_tests + - name: version_gen_validation + distros: + - ubuntu2004-small - name: &windows-dynamic-visibility-test windows-dynamic-visibility-test display_name: "~ Shared Library Windows (visibility test)" @@ -3185,38 +2925,6 @@ buildvariants: ### QO & QE Patch-Specific Build Variants ### -# The CQF feature flag is currently part of the always-disabled feature flags list, so it is not -# enabled in all-feature-flags variants besides this one. This variant allows us to get some initial -# coverage for CQF without disrupting coverage for other feature flags (in particular, SBE). -# TODO SERVER-71163: Remove this variant once the CQF feature flag is not always-disabled. -- <<: *linux_x86_dynamic_compile_variant_dependency - name: enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-and-cqf-enabled-patch-only - display_name: "Shared Library Enterprise RHEL 8.0 Query Patch Only (all feature flags and CQF enabled)" - cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter # This is a patch-only variant but we run on mainline to pick up task history. - modules: - - enterprise - run_on: - - rhel80-small - stepback: false - expansions: - <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-expansions - test_flags: >- - --excludeWithAnyTags=cqf_incompatible - --excludeWithAnyTags=resource_intensive - --mongosSetParameters="{featureFlagCommonQueryFramework: true, internalQueryFrameworkControl: 'tryBonsai'}" - --mongodSetParameters="{featureFlagCommonQueryFramework: true, internalQueryFrameworkControl: 'tryBonsai'}" - tasks: - - name: .aggregation .common - - name: causally_consistent_jscore_txns_passthrough - - name: cqf - - name: cqf_disabled_pipeline_opt - - name: cqf_parallel - - name: .jscore .common - - name: .jstestfuzz !.flow_control # Flow control jstestfuzz take longer. - - name: noPassthrough_gen - - name: query_golden_cqf - - name: retryable_writes_jscore_passthrough_gen - - <<: *enterprise-rhel-80-64-bit-dynamic-classic-engine name: &enterprise-rhel-80-64-bit-dynamic-classic-engine-query-patch-only enterprise-rhel-80-64-bit-dynamic-classic-engine-query-patch-only display_name: "~ Shared Library Enterprise RHEL 8.0 Query Patch Only (Classic Engine)" @@ -3231,7 +2939,7 @@ buildvariants: --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}" --excludeWithAnyTags=resource_intensive -- <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-template +- <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-template name: &enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-query-patch-only enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-query-patch-only display_name: "~ Shared Library Enterprise RHEL 8.0 Query Patch Only (all feature flags)" cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter # This is a patch-only variant but we run on mainline to pick up task history. @@ -3246,7 +2954,8 @@ buildvariants: --excludeWithAnyTags=resource_intensive --excludeWithAnyTags=incompatible_with_shard_merge -- <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-template +### Security Patch-Specific Build Variants ### +- <<: *enterprise-rhel-80-64-bit-dynamic-all-feature-flags-template name: &enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-security-patch-only enterprise-rhel-80-64-bit-dynamic-all-feature-flags-required-security-patch-only display_name: "~ Shared Library Enterprise RHEL 8.0 Security Patch Only (all feature flags)" cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter # This is a patch-only variant but we run on mainline to pick up task history. @@ -3256,14 +2965,33 @@ buildvariants: max_sub_suites: 15 tasks: - name: burn_in_tests_gen + depends_on: + - name: version_burn_in_gen + variant: generate-tasks-for-version + omit_generated_tasks: true + - name: archive_dist_test_debug + variant: *linux_x86_dynamic_compile_variant_name - name: .audit .patch_build - - name: .sasl .patch_build - name: .encrypt .patch_build + - name: .sasl .patch_build - name: external_auth - name: external_auth_aws - name: external_auth_oidc - name: lint_fuzzer_sanity_patch +- <<: *enterprise-windows-template + name: &windows-compile-security-patch-only windows-compile-security-patch-only + display_name: "~ Windows Security Patch Only" + cron: "0 4 * * 0" # From the ${project_weekly_cron} parameter # This is a patch-only variant but we run on mainline to pick up task history. + expansions: + <<: *windows_required_expansions + exe: ".exe" + tasks: + - name: .encrypt .patch_build + - name: .sasl .patch_build + - name: external_auth_aws + - name: external_auth_oidc + - name: &enterprise-ubuntu1804-64-libvoidstar enterprise-ubuntu1804-64-libvoidstar display_name: "~ Enterprise Ubuntu 18.04 w/ libvoidstar" modules: @@ -3329,43 +3057,3 @@ buildvariants: - name: compile_dist_test_TG distros: - windows-vsCurrent-large - -- &rhel80-debug-aubsan-lite_fuzzer-template - name: &rhel80-debug-aubsan-lite_fuzzer rhel80-debug-aubsan-lite_fuzzer - display_name: "{A,UB}SAN Enterprise RHEL 8.0 FUZZER" - cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. - modules: - - enterprise - run_on: - - rhel80-build - stepback: false - expansions: - additional_package_targets: >- - archive-mongocryptd - archive-mongocryptd-debug - lang_environment: LANG=C - toolchain_version: stable - # If you add anything to san_options, make sure the appropriate changes are - # also made to SConstruct. - san_options: *aubsan_options - compile_flags: >- - LINKFLAGS=-nostdlib++ - LIBS=stdc++ - --variables-files=etc/scons/mongodbtoolchain_${toolchain_version}_clang.vars - --dbg=on - --opt=on - --allocator=system - --sanitize=undefined,address,fuzzer - --ssl - --ocsp-stapling=off - -j$(grep -c ^processor /proc/cpuinfo) - test_flags: --excludeWithAnyTags=requires_ocsp_stapling - resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under {A,UB}SAN build. - hang_analyzer_dump_core: false - scons_cache_scope: shared - separate_debug: off - compile_variant: *rhel80-debug-aubsan-lite_fuzzer - display_tasks: - - *libfuzzertests - tasks: - - name: compile_archive_and_run_libfuzzertests_TG diff --git a/etc/evergreen_nightly.yml b/etc/evergreen_nightly.yml index 548a69d6ecb93..4736899719170 100644 --- a/etc/evergreen_nightly.yml +++ b/etc/evergreen_nightly.yml @@ -5,7 +5,7 @@ include: - filename: etc/evergreen_yml_components/variants/task_generation.yml - filename: etc/evergreen_yml_components/variants/atlas.yml - filename: etc/evergreen_yml_components/variants/misc_release.yml -### Comment out when using this file for a LTS or Rapid release branch. ### +### Comment out when using this file for a Rapid release branch. ### - filename: etc/evergreen_yml_components/variants/ibm.yml ### Uncomment when using this file for a LTS release branch. ### # - filename: etc/evergreen_yml_components/variants/in_memory.yml @@ -13,6 +13,10 @@ include: # - filename: etc/evergreen_yml_components/variants/sanitizer.yml ### Uncomment when using this file for a LTS or Rapid release branch. ### # - filename: etc/evergreen_yml_components/variants/ninja.yml +### Uncomment when using this file for a LTS or Rapid release branch. ### +# - filename: etc/evergreen_yml_components/variants/classic_engine.yml +### Uncomment when using this file for a LTS or Rapid release branch. ### +# - filename: etc/evergreen_yml_components/variants/config_shard.yml parameters: diff --git a/etc/evergreen_timeouts.yml b/etc/evergreen_timeouts.yml index c86d764470582..e7e35fe26f65a 100644 --- a/etc/evergreen_timeouts.yml +++ b/etc/evergreen_timeouts.yml @@ -12,41 +12,37 @@ overrides: enterprise-macos: - - task: concurrency - idle_timeout: 15 - task: replica_sets_jscore_passthrough exec_timeout: 150 # 2.5 hours enterprise-macos-arm64: - - task: concurrency - idle_timeout: 15 - task: replica_sets_jscore_passthrough exec_timeout: 150 # 2.5 hours - enterprise-rhel-80-64-bit-coverage: - - task: replica_sets_jscore_passthrough - exec_timeout: 150 # 2.5 hours. + enterprise-rhel80-debug-complete-tsan: + - task: aggregation_timeseries_fuzzer + exec_timeout: 150 # 2.5 hours + + enterprise-rhel80-debug-tsan: + - task: aggregation_timeseries_fuzzer + exec_timeout: 150 # 2.5 hours macos: - - task: concurrency - idle_timeout: 15 - task: replica_sets_jscore_passthrough exec_timeout: 150 # 2.5 hours macos-arm64: - - task: concurrency - idle_timeout: 15 - task: replica_sets_jscore_passthrough exec_timeout: 150 # 2.5 hours + rhel80-asan: + - task: aggregation_timeseries_fuzzer + exec_timeout: 150 # 2.5 hours + rhel80-debug-ubsan: - task: update_timeseries_fuzzer exec_timeout: 150 # 2.5 hours - rhel80-debug-suggested: - - task: replica_sets_jscore_passthrough - exec_timeout: 180 # 3 hours. - rhel80-debug-ubsan-classic-engine: - task: update_timeseries_fuzzer exec_timeout: 150 # 2.5 hours @@ -54,3 +50,7 @@ overrides: rhel80-debug-aubsan-all-feature-flags: - task: update_timeseries_fuzzer exec_timeout: 150 # 2.5 hours + + ubuntu1804-asan: + - task: aggregation_timeseries_fuzzer + exec_timeout: 150 # 2.5 hours diff --git a/etc/evergreen_yml_components/definitions.yml b/etc/evergreen_yml_components/definitions.yml index b2cd5bc1bdd11..175ba9caaed2b 100644 --- a/etc/evergreen_yml_components/definitions.yml +++ b/etc/evergreen_yml_components/definitions.yml @@ -45,6 +45,9 @@ parameters: - key: antithesis_image_tag description: "The docker tag to use when pushing images to Antithesis" +- key: build_patch_id + description: "Patch id of evergreen patch to pull binaries from for testing." + ## Cron parameters. - key: project_required_suggested_cron value: "0 */4 * * *" # Every 4 hours starting at 0000 UTC @@ -96,6 +99,9 @@ variables: depends_on: - name: version_gen variant: generate-tasks-for-version + # This is added because of EVG-18211. + # Without this we are adding extra dependencies on evergreen and it is causing strain + omit_generated_tasks: true - name: archive_dist_test_debug commands: - func: "generate resmoke tasks" @@ -105,10 +111,11 @@ variables: - &gen_burn_in_task_template name: gen_burn_in_task_template depends_on: - - name: version_gen - variant: generate-tasks-for-version - name: version_burn_in_gen variant: generate-tasks-for-version + # This is added because of EVG-18211. + # Without this we are adding extra dependencies on evergreen and it is causing strain + omit_generated_tasks: true - name: archive_dist_test_debug commands: - func: "generate resmoke tasks" @@ -158,7 +165,12 @@ variables: is_jstestfuzz: true num_files: 15 num_tasks: 5 # Upperbound by `max_sub_suites` specified through the variant with each task still running `num_files` files. - resmoke_args: --help # resmoke_args needs to be overridden to specify one of the jstestfuzz suites + # It is error prone to require each fuzzer-related Evergreen task to need to override the + # resmoke_args variable. However, the resmoke_args variable must be defined as a string in the + # task generation configuration to satisfy mongodb/mongo-task-generator. We therefore specify an + # empty string for the variable to reflect there are no additional arguments provided to resmoke + # by default for the fuzzer-related tasks. + resmoke_args: "" resmoke_jobs_max: 1 should_shuffle: false continue_on_failure: false @@ -175,6 +187,9 @@ variables: depends_on: - name: version_gen variant: generate-tasks-for-version + # This is added because of EVG-18211. + # Without this we are adding extra dependencies on evergreen and it is causing strain + omit_generated_tasks: true - archive_dist_test_debug commands: - func: "generate resmoke tasks" @@ -240,7 +255,6 @@ variables: # "set up venv". - func: "set up venv" - func: "upload pip requirements" - - func: "get all modified patch files" - func: "f_expansions_write" - func: "configure evergreen api credentials" - func: "get buildnumber" @@ -291,8 +305,8 @@ variables: - mongocryptd_variants: &mongocryptd_variants - enterprise-amazon2 - enterprise-amazon2-arm64 - - enterprise-amazon2022 - - enterprise-amazon2022-arm64 + - enterprise-amazon2023 + - enterprise-amazon2023-arm64 - enterprise-debian10-64 - enterprise-debian11-64 - enterprise-linux-64-amazon-ami @@ -338,8 +352,8 @@ variables: - enterprise-rhel-90-64-bit - enterprise-rhel-90-arm64 - enterprise-amazon2-arm64 - - enterprise-amazon2022 - - enterprise-amazon2022-arm64 + - enterprise-amazon2023 + - enterprise-amazon2023-arm64 - enterprise-ubuntu1804-64 - enterprise-ubuntu2004-64 - enterprise-ubuntu2204-64 @@ -350,12 +364,12 @@ variables: - amazon - enterprise-linux-64-amazon-ami - amazon2 - - amazon2022 - - amazon2022-arm64 + - amazon2023 + - amazon2023-arm64 - enterprise-amazon2 - enterprise-amazon2-arm64 - - enterprise-amazon2022 - - enterprise-amazon2022-arm64 + - enterprise-amazon2023 + - enterprise-amazon2023-arm64 - debian10 - debian11 - enterprise-debian10-64 @@ -529,7 +543,12 @@ functions: params: binary: bash args: - - "src/evergreen/functions/binaries_extract.sh" + - "src/evergreen/run_python_script.sh" + - "evergreen/functions/binaries_extract.py" + - "--tarball=mongo-binaries.tgz" + - "--extraction-command=${decompress}" + - "--change-dir=${extraction_change_dir}" + - "${move_outputs}" "get version expansions": &get_version_expansions command: s3.get @@ -567,16 +586,6 @@ functions: extract_to: src/corpora remote_file: ${mongo_fuzzer_corpus} - "fetch legacy corpus": &fetch_legacy_corpus - command: s3.get - params: - aws_key: ${s3_access_key_id} - aws_secret: ${s3_secret_access_key} - bucket: fuzzer-artifacts - # Extract the legacy corpora to the merge directory to synthesize together until we burn in. - extract_to: src/corpora-merged - remote_file: ${project}/corpus/mongo-${build_variant}-latest.tgz - "archive new corpus": &archive_new_corpus command: archive.targz_pack params: @@ -732,23 +741,6 @@ functions: args: - "./src/evergreen/functions/shared_scons_directory_umount.sh" - "get all modified patch files": - - *f_expansions_write - - command: subprocess.exec - params: - binary: bash - args: - - "./src/evergreen/functions/modified_patch_files_get_all.sh" - - # This function should only be called from patch-build-only tasks. - "get added and modified patch files": - - *f_expansions_write - - command: subprocess.exec - params: - binary: bash - args: - - "./src/evergreen/functions/added_and_modified_patch_files_get.sh" - "determine resmoke jobs": &determine_resmoke_jobs command: subprocess.exec params: @@ -1084,6 +1076,30 @@ functions: files: - src/generated_resmoke_config/*.json + "generate version validation": + - *f_expansions_write + - *configure_evergreen_api_credentials + - command: subprocess.exec + type: test + params: + binary: bash + args: + - "./src/evergreen/generate_version.sh" + - command: archive.targz_pack + params: + target: generate_tasks_config.tgz + source_dir: src/generated_resmoke_config + include: + - "*" + - command: subprocess.exec + type: test + params: + binary: bash + args: + - "src/evergreen/run_python_script.sh" + - "buildscripts/validate_file_size.py" + - "generate_tasks_config.tgz" + "generate version burn in": - *f_expansions_write - *configure_evergreen_api_credentials @@ -1124,6 +1140,11 @@ functions: files: - src/generated_resmoke_config/*.json + "initialize multiversion tasks": &initialize_multiversion_tasks + - command: shell.exec + params: + script: "echo 'noop'" + "generate resmoke tasks": - *fetch_artifacts - *f_expansions_write @@ -1477,7 +1498,8 @@ functions: params: binary: bash args: - - "./src/evergreen/lint_fuzzer_sanity_patch.sh" + - "src/evergreen/run_python_script.sh" + - "evergreen/lint_fuzzer_sanity_patch.py" "lint fuzzer sanity all": - *f_expansions_write @@ -1516,8 +1538,7 @@ functions: local_file: jstests.tgz remote_file: ${project}/${build_variant}/${revision}/jstestfuzz/${task_id}-${execution}.tgz bucket: mciuploads - permissions: private - visibility: signed + permissions: public-read content_type: application/gzip display_name: Generated Tests - Execution ${execution} @@ -1571,6 +1592,18 @@ functions: visibility: signed content_type: text/javascript display_name: Minimized jstestfuzz Test - Execution ${execution} + - command: s3.put + params: + optional: true + aws_key: ${aws_key} + aws_secret: ${aws_secret} + local_file: src/statistics-report.json + remote_file: ${project}/${build_variant}/${revision}/artifacts/statistics-report-${task_id}-${execution}.json + bucket: mciuploads + permissions: private + visibility: signed + content_type: application/json + display_name: Statistics Report - Execution ${execution} - *f_expansions_write - command: subprocess.exec params: @@ -1725,8 +1758,8 @@ functions: args: - "./src/evergreen/selinux_run_test.sh" env: - test_list: ${test_list} - user: ec2-user + TEST_LIST: ${test_list} + SELINUX_USER: ec2-user ### Process & archive remote EC2 artifacts ### @@ -2349,7 +2382,7 @@ functions: bucket: mciuploads permissions: public-read content_type: text/plain - display_name: Generated multiversion exclude tags options + display_name: multiversion_exclude_tags.yml from resmoke invocation # Pre task steps pre: @@ -2410,6 +2443,21 @@ tasks: task_compile_flags: >- PREFIX=dist-test +## compile - build all scons targets except unittests ## +- name: compile_dist_test_half + tags: [] + depends_on: + - name: version_expansions_gen + variant: generate-tasks-for-version + commands: + - func: "scons compile" + vars: + targets: >- + compile_first_half_non_test_source + ${additional_compile_targets|} + task_compile_flags: >- + PREFIX=dist-test + - name: compile_upload_benchmarks tags: [] depends_on: @@ -2770,6 +2818,16 @@ tasks: targets: install-core +- name: iwyu_self_test + tags: [] + commands: + - command: subprocess.exec + params: + binary: bash + args: + - "src/evergreen/run_python_script.sh" + - "buildscripts/iwyu/test/run_tests.py" + - name: libdeps_graph_linting tags: [] depends_on: @@ -2938,6 +2996,46 @@ tasks: suite: unittests install_dir: build/install/bin +## pretty_printer ## +- <<: *task_template + name: run_pretty_printer_tests + tags: [] + commands: + - func: "git get project and add git tag" + - *f_expansions_write + - *kill_processes + - *cleanup_environment + - func: "set up venv" + - func: "upload pip requirements" + - func: "configure evergreen api credentials" + - func: "do setup" + vars: + extraction_change_dir: build/install/ + - command: s3.get + params: + aws_key: ${aws_key} + aws_secret: ${aws_secret} + remote_file: ${mongo_debugsymbols} + bucket: mciuploads + local_file: src/mongo-debugsymbols.tgz + optional: true + - command: subprocess.exec + params: + binary: bash + args: + - "src/evergreen/run_python_script.sh" + - "evergreen/functions/binaries_extract.py" + - "--tarball=mongo-debugsymbols.tgz" + - "--extraction-command=${decompress}" + - "--change-dir=build/install/" + - "--move-output=build/install/dist-test/pretty_printer_tests.txt:build/" + optional: true + - func: "run tests" + vars: + suite: pretty-printer-tests + install_dir: build/install/dist-test/bin + + ## run_unittests with UndoDB live-record ## #- name: run_unittests_with_recording # depends_on: @@ -2995,8 +3093,6 @@ tasks: - name: fetch_and_run_libfuzzertests tags: [] commands: - - func: "fetch corpus" - - func: "fetch legacy corpus" - func: "run tests" vars: suite: libfuzzer @@ -3075,6 +3171,7 @@ tasks: vars: targets: install-integration-tests compiling_for_test: true + separate_debug: off - name: compile_visibility_test tags: [] @@ -3484,7 +3581,7 @@ tasks: - func: "run tests" vars: suite: benchmarks - exec_timeout_secs: 18000 # 5 hour timeout. + exec_timeout_secs: 21600 # 6 hour timeout. resmoke_jobs_max: 1 - func: "send benchmark results" # - func: "analyze benchmark results" @@ -3527,6 +3624,17 @@ tasks: resmoke_jobs_max: 1 - func: "send benchmark results" +- <<: *benchmark_template + name: benchmarks_query + tags: ["benchmarks"] + commands: + - func: "do benchmark setup" + - func: "run tests" + vars: + suite: benchmarks_query + resmoke_jobs_max: 1 + - func: "send benchmark results" + - <<: *benchmark_template name: benchmarks_expression tags: ["benchmarks"] @@ -3690,6 +3798,10 @@ tasks: name: initial_sync_multiversion_fuzzer_gen tags: ["multiversion_fuzzer", "require_npm", "random_name", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + initial_sync_fuzzer_last_lts: last_lts + initial_sync_fuzzer_last_continuous: last_continuous - func: "generate resmoke tasks" vars: <<: *jstestfuzz_config_vars @@ -3719,12 +3831,15 @@ tasks: name: aggregation_multiversion_fuzzer_gen tags: ["aggfuzzer", "common", "multiversion", "require_npm", "random_name", "future_git_tag_incompatible"] commands: + - func: "initialize multiversion tasks" + vars: + aggregation_expression_multiversion_fuzzer_last_lts: last_lts + aggregation_expression_multiversion_fuzzer_last_continuous: last_continuous - func: "generate resmoke tasks" vars: <<: *jstestfuzz_config_vars num_files: 5 num_tasks: 5 - suite: generational_fuzzer resmoke_args: "--mongodSetParameters='{logComponentVerbosity: {command: 2}}'" npm_command: agg-fuzzer run_no_feature_flag_tests: "true" @@ -3734,12 +3849,15 @@ tasks: name: aggregation_expression_multiversion_fuzzer_gen tags: ["aggfuzzer", "multiversion", "require_npm", "random_name"] commands: + - func: "initialize multiversion tasks" + vars: + aggregation_multiversion_fuzzer_last_lts: last_lts + aggregation_multiversion_fuzzer_last_continuous: last_continuous - func: "generate resmoke tasks" vars: <<: *jstestfuzz_config_vars num_files: 5 num_tasks: 5 - suite: generational_fuzzer resmoke_args: "--mongodSetParameters='{logComponentVerbosity: {command: 2}}'" npm_command: agg-expr-fuzzer run_no_feature_flag_tests: "true" @@ -3884,13 +4002,16 @@ tasks: name: update_fuzzer_gen tags: ["updatefuzzer", "require_npm", "random_name", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + update_fuzzer_last_lts: last_lts + update_fuzzer_last_continuous: last_continuous - func: "generate resmoke tasks" vars: <<: *jstestfuzz_config_vars num_files: 5 num_tasks: 5 npm_command: update-fuzzer - suite: update_fuzzer resmoke_args: "--mongodSetParameters='{logComponentVerbosity: {command: 2}}'" run_no_feature_flag_tests: "true" @@ -3914,13 +4035,16 @@ tasks: name: update_fuzzer_replication_gen tags: ["updatefuzzer", "require_npm", "random_name", "multiversion", "no_debug_mode"] commands: + - func: "initialize multiversion tasks" + vars: + update_fuzzer_replication_last_lts: last_lts + update_fuzzer_replication_last_continuous: last_continuous - func: "generate resmoke tasks" vars: <<: *jstestfuzz_config_vars num_files: 5 num_tasks: 5 npm_command: update-fuzzer - suite: update_fuzzer_replication resmoke_args: "--mongodSetParameters='{logComponentVerbosity: {command: 2}}'" run_no_feature_flag_tests: "true" @@ -3929,6 +4053,10 @@ tasks: name: rollback_multiversion_fuzzer_gen tags: ["multiversion_fuzzer", "require_npm", "random_name", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + rollback_fuzzer_last_lts: last_lts + rollback_fuzzer_last_continuous: last_continuous - func: "generate resmoke tasks" vars: <<: *jstestfuzz_config_vars @@ -4245,6 +4373,14 @@ tasks: name: jstestfuzz_replication_multiversion_gen tags: ["multiversion_fuzzer", "require_npm", "random_name", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + jstestfuzz_replication_last_continuous_new_new_old: last_continuous + jstestfuzz_replication_last_continuous_new_old_new: last_continuous + jstestfuzz_replication_last_continuous_old_new_new: last_continuous + jstestfuzz_replication_last_lts_new_new_old: last_lts + jstestfuzz_replication_last_lts_new_old_new: last_lts + jstestfuzz_replication_last_lts_old_new_new: last_lts - func: "generate resmoke tasks" vars: <<: *jstestfuzz_config_vars @@ -4312,6 +4448,10 @@ tasks: name: jstestfuzz_sharded_multiversion_gen tags: ["multiversion_fuzzer", "require_npm", "random_name", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + jstestfuzz_sharded_last_continuous_new_old_old_new: last_continuous + jstestfuzz_sharded_last_lts_new_old_old_new: last_lts - func: "generate resmoke tasks" vars: <<: *jstestfuzz_config_vars @@ -4445,6 +4585,14 @@ tasks: name: multiversion_sanity_check_gen tags: ["multiversion", "multiversion_sanity_check"] commands: + - func: "initialize multiversion tasks" + vars: + multiversion_sanity_check_last_continuous_new_new_old: last_continuous + multiversion_sanity_check_last_continuous_new_old_new: last_continuous + multiversion_sanity_check_last_continuous_old_new_new: last_continuous + multiversion_sanity_check_last_lts_new_new_old: last_lts + multiversion_sanity_check_last_lts_new_old_new: last_lts + multiversion_sanity_check_last_lts_old_new_new: last_lts - func: "generate resmoke tasks" vars: run_no_feature_flag_tests: "true" @@ -4453,6 +4601,14 @@ tasks: name: replica_sets_jscore_multiversion_gen tags: ["multiversion", "multiversion_passthrough"] commands: + - func: "initialize multiversion tasks" + vars: + replica_sets_jscore_passthrough_last_continuous_new_new_old: last_continuous + replica_sets_jscore_passthrough_last_continuous_new_old_new: last_continuous + replica_sets_jscore_passthrough_last_continuous_old_new_new: last_continuous + replica_sets_jscore_passthrough_last_lts_new_new_old: last_lts + replica_sets_jscore_passthrough_last_lts_new_old_new: last_lts + replica_sets_jscore_passthrough_last_lts_old_new_new: last_lts - func: "generate resmoke tasks" vars: suite: replica_sets_jscore_passthrough @@ -4470,7 +4626,6 @@ tasks: - *kill_processes - *cleanup_environment - *set_up_venv - - func: "get added and modified patch files" - func: "setup jstestfuzz" - func: "lint fuzzer sanity patch" @@ -4641,6 +4796,18 @@ tasks: - func: "upload pip requirements" - func: "generate version" +- name: version_gen_validation + commands: + - command: manifest.load + - *git_get_project + - *f_expansions_write + - *add_git_tag + - *kill_processes + - *cleanup_environment + - func: "set up venv" + - func: "upload pip requirements" + - func: "generate version validation" + - name: version_burn_in_gen commands: - command: manifest.load @@ -4679,17 +4846,18 @@ tasks: - func: "do setup" - func: "run tests" -- <<: *task_template - name: change_streams_v1_resume_token_passthrough - tags: ["change_streams"] - commands: - - func: "do setup" - - func: "run tests" - - <<: *gen_task_template name: change_streams_multiversion_gen tags: ["multiversion", "multiversion_passthrough"] commands: + - func: "initialize multiversion tasks" + vars: + change_streams_last_continuous_new_new_old: last_continuous + change_streams_last_continuous_new_old_new: last_continuous + change_streams_last_continuous_old_new_new: last_continuous + change_streams_last_lts_new_new_old: last_lts + change_streams_last_lts_new_old_new: last_lts + change_streams_last_lts_old_new_new: last_lts - func: "generate resmoke tasks" vars: suite: change_streams @@ -4699,6 +4867,10 @@ tasks: name: change_streams_downgrade_gen tags: ["multiversion_passthrough", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + change_streams_downgrade_last_continuous_new_old_old_new: last_continuous + change_streams_downgrade_last_lts_new_old_old_new: last_lts - func: "generate resmoke tasks" vars: run_no_feature_flag_tests: "true" @@ -4721,19 +4893,14 @@ tasks: - func: "do setup" - func: "run tests" -- <<: *task_template - name: change_streams_v1_resume_token_sharded_collections_passthrough - tags: ["change_streams"] - depends_on: - - name: change_streams - commands: - - func: "do setup" - - func: "run tests" - - <<: *gen_task_template name: change_streams_sharded_collections_multiversion_gen tags: ["multiversion_passthrough", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + change_streams_sharded_collections_passthrough_last_continuous_new_old_old_new: last_continuous + change_streams_sharded_collections_passthrough_last_lts_new_old_old_new: last_lts - func: "generate resmoke tasks" vars: suite: change_streams_sharded_collections_passthrough @@ -4741,7 +4908,7 @@ tasks: - <<: *gen_task_template name: multiversion_future_git_tag_gen - tags: ["multiversion", "no_version_combination", "multiversion_future_git_tag"] + tags: ["multiversion", "no_multiversion_generate_tasks", "multiversion_future_git_tag"] commands: - func: "generate resmoke tasks" vars: @@ -4749,7 +4916,7 @@ tasks: - <<: *gen_task_template name: multiversion_auth_future_git_tag_gen - tags: ["auth", "multiversion", "no_version_combination", "multiversion_future_git_tag"] + tags: ["auth", "multiversion", "no_multiversion_generate_tasks", "multiversion_future_git_tag"] commands: - func: "generate resmoke tasks" vars: @@ -4827,14 +4994,14 @@ tasks: - <<: *task_template name: change_streams_multitenant_passthrough - tags: ["change_streams"] + tags: [] # TODO SERVER-57866: Add the "change_streams" tag here. commands: - func: "do setup" - func: "run tests" - <<: *task_template name: change_streams_multitenant_sharded_collections_passthrough - tags: ["change_streams"] + tags: [] # TODO SERVER-57866: Add the "change_streams" tag here. commands: - func: "do setup" - func: "run tests" @@ -4998,6 +5165,8 @@ tasks: args: - "src/evergreen/external_auth_oidc_setup.sh" - func: "run tests" + vars: + resmoke_jobs_max: ${external_auth_oidc_jobs_max|1} - <<: *task_template name: external_auth_windows @@ -5044,7 +5213,7 @@ tasks: - <<: *task_template name: config_fuzzer_simulate_crash_concurrency_replication_gen - tags: ["config_fuzzer", "large"] + tags: ["config_fuzzer", "large", "linux_only"] commands: - func: "generate resmoke tasks" vars: @@ -5066,15 +5235,28 @@ tasks: --excludeWithAnyTags=does_not_support_config_fuzzer use_large_distro: "true" +- <<: *task_template + name: config_fuzzer_concurrency_simultaneous_replication_gen + tags: ["config_fuzzer", "large"] + commands: + - func: "generate resmoke tasks" + vars: + suite: concurrency_simultaneous_replication + resmoke_args: >- + --fuzzMongodConfigs=normal + --excludeWithAnyTags=does_not_support_config_fuzzer + use_large_distro: "true" + - <<: *gen_task_template name: config_fuzzer_concurrency_sharded_replication_gen - tags: ["config_fuzzer", "large"] + tags: ["config_fuzzer", "large", "sharded"] commands: - func: "generate resmoke tasks" vars: suite: concurrency_sharded_replication resmoke_args: >- --fuzzMongodConfigs=normal + --fuzzMongosConfigs=normal --excludeWithAnyTags=does_not_support_config_fuzzer use_large_distro: "true" @@ -5101,6 +5283,7 @@ tasks: suite: concurrency_sharded_replication resmoke_args: >- --fuzzMongodConfigs=stress + --fuzzMongosConfigs=normal --excludeWithAnyTags=does_not_support_config_fuzzer use_large_distro: "true" exec_timeout_secs: 14400 # 4 hours @@ -5127,6 +5310,7 @@ tasks: suite: sharding_jscore_passthrough resmoke_args: >- --fuzzMongodConfigs=normal + --fuzzMongosConfigs=normal --excludeWithAnyTags=does_not_support_config_fuzzer use_large_distro: "true" @@ -5404,13 +5588,13 @@ tasks: - <<: *gen_task_template name: multiversion_auth_gen - tags: ["auth", "multiversion", "no_version_combination", "future_git_tag_incompatible"] + tags: ["auth", "multiversion", "no_multiversion_generate_tasks", "future_git_tag_incompatible"] commands: - func: "generate resmoke tasks" - <<: *gen_task_template name: multiversion_gen - tags: ["multiversion", "no_version_combination", "future_git_tag_incompatible"] + tags: ["multiversion", "no_multiversion_generate_tasks", "future_git_tag_incompatible"] commands: - func: "generate resmoke tasks" @@ -5419,7 +5603,7 @@ tasks: # build variants that enable this task. - <<: *gen_task_template name: feature_flag_multiversion_gen - tags: ["multiversion", "no_version_combination"] + tags: ["multiversion", "no_multiversion_generate_tasks"] commands: - func: "generate resmoke tasks" @@ -5485,17 +5669,20 @@ tasks: commands: - func: "generate resmoke tasks" -- <<: *task_template - name: sharded_collections_jscore_passthrough_with_catalog_shard +- <<: *gen_task_template + name: sharded_collections_jscore_passthrough_with_config_shard_gen tags: ["sharding", "jscore"] commands: - - func: "do setup" - - func: "run tests" + - func: "generate resmoke tasks" - <<: *gen_task_template name: sharded_collections_jscore_multiversion_gen tags: ["multiversion_passthrough", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + sharded_collections_jscore_passthrough_last_continuous_new_old_old_new: last_continuous + sharded_collections_jscore_passthrough_last_lts_new_old_old_new: last_lts - func: "generate resmoke tasks" vars: suite: sharded_collections_jscore_passthrough @@ -5508,17 +5695,20 @@ tasks: - func: "do setup" - func: "run tests" -- <<: *task_template - name: sharding_jscore_passthrough_with_catalog_shard +- <<: *gen_task_template + name: sharding_jscore_passthrough_with_config_shard_gen tags: ["sharding", "jscore", "common"] commands: - - func: "do setup" - - func: "run tests" + - func: "generate resmoke tasks" - <<: *gen_task_template name: sharding_jscore_multiversion_gen tags: ["multiversion_passthrough", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + sharding_jscore_passthrough_last_lts_new_old_old_new: last_lts + sharding_jscore_passthrough_last_continuous_new_old_old_new: last_continuous - func: "generate resmoke tasks" vars: suite: sharding_jscore_passthrough @@ -5647,7 +5837,14 @@ tasks: use_large_distro: "true" - <<: *task_template - name: telemetry_passthrough + name: query_stats_passthrough + tags: [] + commands: + - func: "do setup" + - func: "run tests" + +- <<: *task_template + name: query_stats_passthrough_writeonly tags: [] commands: - func: "do setup" @@ -5812,6 +6009,14 @@ tasks: name: concurrency_replication_multiversion_gen tags: ["multiversion", "multiversion_passthrough"] commands: + - func: "initialize multiversion tasks" + vars: + concurrency_replication_last_continuous_new_new_old: last_continuous + concurrency_replication_last_continuous_new_old_new: last_continuous + concurrency_replication_last_continuous_old_new_new: last_continuous + concurrency_replication_last_lts_new_new_old: last_lts + concurrency_replication_last_lts_new_old_new: last_lts + concurrency_replication_last_lts_old_new_new: last_lts - func: "generate resmoke tasks" vars: suite: concurrency_replication @@ -5864,15 +6069,7 @@ tasks: - <<: *gen_task_template name: concurrency_replication_wiredtiger_cursor_sweeps_gen - tags: ["concurrency", "repl"] - commands: - - func: "generate resmoke tasks" - vars: - resmoke_jobs_max: 1 - -- <<: *gen_task_template - name: concurrency_replication_wiredtiger_eviction_debug_gen - tags: ["concurrency", "repl", "debug_only"] + tags: ["concurrency", "repl", "cursor_sweeps"] commands: - func: "generate resmoke tasks" vars: @@ -5888,7 +6085,7 @@ tasks: resmoke_jobs_max: 1 - <<: *gen_task_template - name: concurrency_sharded_with_catalog_shard_gen + name: concurrency_sharded_with_config_shard_gen tags: ["concurrency", "common", "read_concern_maj", "large", "sharded", "no_debug_mode"] commands: - func: "generate resmoke tasks" @@ -5900,6 +6097,10 @@ tasks: name: concurrency_sharded_replication_multiversion_gen tags: ["multiversion_passthrough", "sharded", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + concurrency_sharded_replication_last_continuous_new_old_old_new: last_continuous + concurrency_sharded_replication_last_lts_new_old_old_new: last_lts - func: "generate resmoke tasks" vars: suite: concurrency_sharded_replication @@ -5915,7 +6116,7 @@ tasks: resmoke_jobs_max: 1 - <<: *gen_task_template - name: concurrency_sharded_with_balancer_and_catalog_shard_gen + name: concurrency_sharded_with_balancer_and_config_shard_gen tags: ["concurrency", "common", "read_concern_maj", "large", "sharded", "no_debug_mode"] commands: - func: "generate resmoke tasks" @@ -6098,16 +6299,7 @@ tasks: - <<: *task_template name: concurrency_simultaneous_replication_wiredtiger_cursor_sweeps - tags: ["concurrency", "repl", "random_name"] - commands: - - func: "do setup" - - func: "run tests" - vars: - resmoke_jobs_max: 1 - -- <<: *task_template - name: concurrency_simultaneous_replication_wiredtiger_eviction_debug - tags: ["concurrency", "repl", "debug_only", "random_name"] + tags: ["concurrency", "repl", "cursor_sweeps"] commands: - func: "do setup" - func: "run tests" @@ -6208,6 +6400,10 @@ tasks: name: replica_sets_multiversion_gen tags: ["random_multiversion_ds", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + replica_sets_last_lts: last_lts + replica_sets_last_continuous: last_continuous - func: "generate resmoke tasks" vars: suite: replica_sets @@ -6236,7 +6432,7 @@ tasks: use_large_distro: "true" - <<: *gen_task_template - name: sharding_catalog_shard_gen + name: sharding_config_shard_gen tags: ["sharding", "common"] commands: - func: "generate resmoke tasks" @@ -6247,6 +6443,10 @@ tasks: name: sharding_multiversion_gen tags: ["random_multiversion_ds", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + sharding_last_continuous: last_continuous + sharding_last_lts: last_lts - func: "generate resmoke tasks" vars: use_large_distro: "true" @@ -6259,7 +6459,6 @@ tasks: commands: - func: "generate resmoke tasks" vars: - suite: sharding_continuous_config_stepdown use_large_distro: "true" # This is a subset of sharding_max_mirroring_opportunistic_secondary_targeting_ese_gen and @@ -6297,14 +6496,6 @@ tasks: vars: use_large_distro: "true" -- <<: *gen_task_template - name: sharding_auth_catalog_shard_gen - tags: [] - commands: - - func: "generate resmoke tasks" - vars: - use_large_distro: "true" - - <<: *gen_task_template name: sharding_auth_audit_gen tags: ["sharding", "auth", "audit", "non_live_record", "no_debug_mode"] @@ -6331,13 +6522,15 @@ tasks: resmoke_jobs_max: 1 - <<: *gen_task_template - name: sharding_last_lts_mongos_and_mixed_shards_gen - tags: ["sharding", "common", "multiversion", "no_version_combination"] + name: sharding_mongos_and_mixed_shards_gen + tags: ["sharding", "common", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + sharding_last_lts_mongos_and_mixed_shards: last_lts - func: "generate resmoke tasks" vars: use_large_distro: "true" - multiversion_exclude_tags_version: last_lts run_no_feature_flag_tests: "true" - <<: *gen_task_template @@ -6346,6 +6539,12 @@ tasks: commands: - func: "generate resmoke tasks" +- <<: *gen_task_template + name: sharded_collections_single_writes_without_shard_key_jscore_passthrough_gen + tags: ["sharding"] + commands: + - func: "generate resmoke tasks" + - <<: *gen_task_template name: ssl_gen tags: ["encrypt", "ssl", "patch_build"] @@ -6370,6 +6569,24 @@ tasks: vars: suite: ssl_x509 +- <<: *gen_task_template + name: fcv_upgrade_downgrade_replica_sets_jscore_passthrough_gen + tags: ["jscore"] + commands: + - func: "generate resmoke tasks" + +- <<: *gen_task_template + name: fcv_upgrade_downgrade_sharding_jscore_passthrough_gen + tags: ["sharding"] + commands: + - func: "generate resmoke tasks" + +- <<: *gen_task_template + name: fcv_upgrade_downgrade_sharded_collections_jscore_passthrough_gen + tags: ["sharding"] + commands: + - func: "generate resmoke tasks" + - <<: *task_template name: jsCore_decimal tags: ["jscore", "common", "decimal"] @@ -6438,112 +6655,30 @@ tasks: vars: use_large_distro: "true" -# Use explicit task definitions for retryable_writes_downgrade suites to avoid running -# with all Repl multiversion combinations. - <<: *gen_task_template - name: retryable_writes_downgrade_last_continuous_gen - tags: ["multiversion_passthrough", "multiversion", "no_version_combination"] + name: retryable_writes_downgrade_gen + tags: ["multiversion_passthrough", "multiversion"] commands: - - func: "generate resmoke tasks" + - func: "initialize multiversion tasks" vars: - multiversion_exclude_tags_version: last_continuous - run_no_feature_flag_tests: "true" - -- <<: *gen_task_template - name: retryable_writes_downgrade_last_lts_gen - tags: ["multiversion_passthrough", "multiversion", "no_version_combination"] - commands: + retryable_writes_downgrade_last_lts: last_lts + retryable_writes_downgrade_last_continuous: last_continuous - func: "generate resmoke tasks" vars: - multiversion_exclude_tags_version: last_lts run_no_feature_flag_tests: "true" - <<: *gen_task_template name: sharded_retryable_writes_downgrade_gen tags: ["multiversion_passthrough", "multiversion"] commands: + - func: "initialize multiversion tasks" + vars: + sharded_retryable_writes_downgrade_last_continuous_new_old_old_new: last_continuous + sharded_retryable_writes_downgrade_last_lts_new_old_old_new: last_lts - func: "generate resmoke tasks" vars: run_no_feature_flag_tests: "true" -- <<: *gen_task_template - name: logical_session_cache_replication_default_refresh_jscore_passthrough_gen - tags: ["logical_session_cache", "repl"] - commands: - - func: "generate resmoke tasks" - -- <<: *gen_task_template - name: logical_session_cache_replication_100ms_refresh_jscore_passthrough_gen - tags: ["logical_session_cache", "repl"] - commands: - - func: "generate resmoke tasks" - -- <<: *gen_task_template - name: logical_session_cache_replication_1sec_refresh_jscore_passthrough_gen - tags: ["logical_session_cache", "one_sec", "repl"] - commands: - - func: "generate resmoke tasks" - -- <<: *gen_task_template - name: logical_session_cache_replication_10sec_refresh_jscore_passthrough_gen - tags: ["logical_session_cache", "repl"] - commands: - - func: "generate resmoke tasks" - -- <<: *gen_task_template - name: logical_session_cache_sharding_default_refresh_jscore_passthrough_gen - tags: ["logical_session_cache"] - commands: - - func: "generate resmoke tasks" - -- <<: *gen_task_template - name: logical_session_cache_sharding_100ms_refresh_jscore_passthrough_gen - tags: ["logical_session_cache"] - commands: - - func: "generate resmoke tasks" - -- <<: *gen_task_template - name: logical_session_cache_sharding_100ms_refresh_jscore_txns_passthrough_gen - tags: ["logical_session_cache"] - commands: - - func: "generate resmoke tasks" - -- <<: *gen_task_template - name: logical_session_cache_sharding_1sec_refresh_jscore_passthrough_gen - tags: ["logical_session_cache", "one_sec"] - commands: - - func: "generate resmoke tasks" - -- <<: *gen_task_template - name: logical_session_cache_sharding_10sec_refresh_jscore_passthrough_gen - tags: ["logical_session_cache"] - commands: - - func: "generate resmoke tasks" - -- <<: *gen_task_template - name: logical_session_cache_standalone_default_refresh_jscore_passthrough_gen - tags: ["logical_session_cache"] - commands: - - func: "generate resmoke tasks" - -- <<: *gen_task_template - name: logical_session_cache_standalone_100ms_refresh_jscore_passthrough_gen - tags: ["logical_session_cache"] - commands: - - func: "generate resmoke tasks" - -- <<: *gen_task_template - name: logical_session_cache_standalone_1sec_refresh_jscore_passthrough_gen - tags: ["logical_session_cache", "one_sec"] - commands: - - func: "generate resmoke tasks" - -- <<: *gen_task_template - name: logical_session_cache_standalone_10sec_refresh_jscore_passthrough_gen - tags: ["logical_session_cache"] - commands: - - func: "generate resmoke tasks" - - <<: *gen_task_template name: retryable_writes_jscore_stepdown_passthrough_gen tags: ["retry"] @@ -6801,7 +6936,6 @@ tasks: - func: "run powercycle test" timeout_secs: 1800 # 30 minute timeout for no output - - name: selinux_rhel8_org tags: [] depends_on: @@ -6836,6 +6970,8 @@ tasks: - func: "run selinux tests" vars: distro: rhel80-selinux + test_list: jstests/selinux/*.js src/mongo/db/modules/enterprise/jstests/selinux/*.js + - name: selinux_rhel9_org tags: [] depends_on: @@ -6983,9 +7119,13 @@ tasks: - <<: *task_template name: resmoke_validation_tests tags: [] - depends_on: [] + depends_on: + - name: archive_dist_test commands: - - func: "do non-compile setup" + - func: "do setup" + vars: + extraction_change_dir: build/install/ + move_outputs: "--move-output=build/install/dist-test/pretty_printer_tests.txt:build/" - func: "run tests" - name: test_packages @@ -7063,8 +7203,8 @@ tasks: content_type: ${content_type|application/gzip} display_name: Source tarball # We only need to upload the source tarball from one of the build variants - # because it should be the same everywhere, so just use rhel70/windows. - build_variants: [rhel70, windows] + # because it should be the same everywhere, so just use rhel80/windows. + build_variants: [rhel80, windows] - command: s3.put params: optional: true @@ -7173,7 +7313,7 @@ tasks: - name: publish_packages - run_on: rhel80-small + run_on: rhel8.7-small tags: ["publish"] # This should prevent this task from running in patch builds, where we # don't want to publish packages. @@ -7202,14 +7342,27 @@ tasks: aws_secret_remote: ${repo_aws_secret} - func: "set up notary client credentials" - *f_expansions_write + - command: shell.exec + params: + shell: bash + script: | + set -oe + podman login --username ${release_tools_container_registry_username} --password ${release_tools_container_registry_password} ${release_tools_container_registry} - command: subprocess.exec params: binary: bash + env: + AWS_ACCESS_KEY_ID: ${upload_lock_access_key_id} + AWS_SECRET_ACCESS_KEY: ${upload_lock_secret_access_key} + UPLOAD_LOCK_IMAGE: ${upload_lock_image} + UPLOAD_BUCKET: ${upload_lock_bucket} + AWS_REGION: ${upload_lock_region} + EVERGREEN_TASK_ID: ${task_id} args: - "./src/evergreen/packages_publish.sh" - name: push - run_on: rhel80-small + run_on: rhel8.7-small tags: ["publish"] patchable: false depends_on: @@ -7579,6 +7732,70 @@ tasks: content_type: text/plain remote_file: ${push_path}-STAGE/${push_name}/mongodb-${push_name}-${push_arch}-${suffix}-${task_id}-signed.msi.md5 + - command: subprocess.exec + params: + continue_on_err: true + binary: bash + env: + SERVER_TARBALL_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz} + SERVER_TARBALL_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz} + CRYPTD_TARBALL_PATH: src/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz} + CRYPTD_TARBALL_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz} + MONGOHOUSE_TARBALL_PATH: src/mh-${push_name}-${push_arch}-${suffix}.${ext|tgz} + MONGOHOUSE_TARBALL_KEY: ${version_id}/${build_id}/push/${push_path}/mh-${push_name}-${push_arch}-${suffix}.${ext|tgz} + SOURCE_TARBALL_PATH: src/mongodb-src-${src_suffix}.${ext|tar.gz} + SOURCE_TARBALL_KEY: ${version_id}/${build_id}/push/src/mongodb-src-${src_suffix}.${ext|tar.gz} + DEBUG_SYMBOLS_TARBALL_PATH: src/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz} + DEBUG_SYMBOLS_TARBALL_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz} + SERVER_TARBALL_SIGNATURE_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sig + SERVER_TARBALL_SIGNATURE_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sig + CRYPTD_TARBALL_SIGNATURE_PATH: src/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sig + CRYPTD_TARBALL_SIGNATURE_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sig + SOURCE_TARBALL_SIGNATURE_PATH: src/mongodb-src-${src_suffix}.${ext|tar.gz}.sig + SOURCE_TARBALL_SIGNATURE_KEY: ${version_id}/${build_id}/push/src/mongodb-src-${src_suffix}.${ext|tar.gz}.sig + DEBUG_SYMBOLS_TARBALL_SIGNATURE_PATH: src/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sig + DEBUG_SYMBOLS_TARBALL_SIGNATURE_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sig + MSI_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.msi + MSI_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}-signed.msi + SERVER_TARBALL_SHA1_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha1 + SERVER_TARBALL_SHA1_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha1 + CRYPTD_TARBALL_SHA1_PATH: src/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha1 + CRYPTD_TARBALL_SHA1_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha1 + SOURCE_TARBALL_SHA1_PATH: src/mongodb-src-${src_suffix}.${ext|tar.gz}.sha1 + SOURCE_TARBALL_SHA1_KEY: ${version_id}/${build_id}/push/src/mongodb-src-${src_suffix}.${ext|tar.gz}.sha1 + DEBUG_SYMBOLS_TARBALL_SHA1_PATH: src/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sha1 + DEBUG_SYMBOLS_TARBALL_SHA1_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sha1 + MSI_SHA1_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.msi.sha1 + MSI_SHA1_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}-signed.msi.sha1 + SERVER_TARBALL_SHA256_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha256 + SERVER_TARBALL_SHA256_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha256 + CRYPTD_TARBALL_SHA256_PATH: src/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha256 + CRYPTD_TARBALL_SHA256_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.sha256 + SOURCE_TARBALL_SHA256_PATH: src/mongodb-src-${src_suffix}.${ext|tar.gz}.sha256 + SOURCE_TARBALL_SHA256_KEY: ${version_id}/${build_id}/push/src/mongodb-src-${src_suffix}.${ext|tar.gz}.sha256 + DEBUG_SYMBOLS_TARBALL_SHA256_PATH: src/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sha256 + DEBUG_SYMBOLS_TARBALL_SHA256_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.sha256 + MSI_SHA256_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.msi.sha256 + MSI_SHA256_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}-signed.msi.sha256 + SERVER_TARBALL_MD5_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.md5 + SERVER_TARBALL_MD5_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}.${ext|tgz}.md5 + CRYPTD_TARBALL_MD5_PATH: src/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.md5 + CRYPTD_TARBALL_MD5_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext|tgz}.md5 + SOURCE_TARBALL_MD5_PATH: src/mongodb-src-${src_suffix}.${ext|tar.gz}.md5 + SOURCE_TARBALL_MD5_KEY: ${version_id}/${build_id}/push/src/mongodb-src-${src_suffix}.${ext|tar.gz}.md5 + DEBUG_SYMBOLS_TARBALL_MD5_PATH: src/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.md5 + DEBUG_SYMBOLS_TARBALL_MD5_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.md5 + MSI_MD5_PATH: src/mongodb-${push_name}-${push_arch}-${suffix}.msi.md5 + MSI_MD5_KEY: ${version_id}/${build_id}/push/${push_path}/mongodb-${push_name}-${push_arch}-${suffix}-signed.msi.md5 + AWS_ACCESS_KEY_ID: ${upload_lock_access_key_id} + AWS_SECRET_ACCESS_KEY: ${upload_lock_secret_access_key} + UPLOAD_LOCK_IMAGE: ${upload_lock_image} + UPLOAD_BUCKET: ${upload_lock_bucket} + AWS_REGION: ${upload_lock_region} + EVERGREEN_TASK_ID: ${task_id} + args: + - "./src/evergreen/run_upload_lock_push.sh" + - command: s3Copy.copy params: aws_key: ${aws_key} @@ -7704,7 +7921,7 @@ tasks: 'destination': {'path': '${push_path}/mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext|tgz}.md5', 'bucket': '${push_bucket}'}} - name: crypt_push - run_on: rhel80-small + run_on: rhel8.7-small tags: ["publish_crypt"] patchable: false stepback: false @@ -7741,13 +7958,18 @@ tasks: aws_key_remote: ${repo_aws_key} aws_secret_remote: ${repo_aws_secret} - func: "f_expansions_write" - - func: "set up notary client credentials" + # login to container registry + - command: shell.exec + params: + shell: bash + script: | + set -oe + podman login --username ${release_tools_container_registry_username} --password ${release_tools_container_registry_password} ${release_tools_container_registry} - command: subprocess.exec - type: test params: binary: bash args: - - "./src/evergreen/notary_client_crypt_run.sh" + - "./src/evergreen/garasign_gpg_crypt_sign.sh" # Put the crypt tarball/zipfile - command: s3.put params: @@ -7853,33 +8075,58 @@ tasks: resmoke_jobs_max: 1 - <<: *task_template - name: cqf + name: vector_search tags: [] commands: - func: "do setup" - func: "run tests" + vars: + resmoke_jobs_max: 1 - <<: *task_template - name: cqf_disabled_pipeline_opt - tags: [] + name: cqf + tags: [cqf] commands: - func: "do setup" - func: "run tests" - <<: *task_template - name: cqf_passthrough - tags: [] + name: cqf_disabled_pipeline_opt + tags: [cqf] commands: - func: "do setup" - func: "run tests" - <<: *task_template name: cqf_parallel - tags: [] + tags: [cqf] + commands: + - func: "do setup" + - func: "run tests" + +- <<: *task_template + name: cqf_experimental_jscore_passthrough + tags: [cqf] + commands: + - func: "do setup" + - func: "run tests" + +- <<: *task_template + name: cqf_experimental_aggregation_passthrough + tags: [cqf] commands: - func: "do setup" - func: "run tests" +- <<: *gen_task_template + name: cqf_experimental_no_passthrough_gen + tags: ["cqf"] + commands: + - func: "generate resmoke tasks" + vars: + suite: cqf_experimental_no_passthrough + use_large_distro: "true" + - <<: *task_template name: streams tags: [] @@ -7938,6 +8185,28 @@ tasks: JIRA_AUTH_CONSUMER_KEY: ${jira_auth_consumer_key} JIRA_AUTH_KEY_CERT: ${jira_auth_key_cert} +- name: lint_large_files_check + tags: [] + exec_timeout_secs: 600 # 10 minute timeout + commands: + - command: manifest.load + - func: "git get project and add git tag" + - *f_expansions_write + - *kill_processes + - *cleanup_environment + - func: "set up venv" + - func: "upload pip requirements" + - func: "configure evergreen api credentials" + - command: subprocess.exec + type: test + params: + binary: bash + args: + - "./src/evergreen/run_python_script.sh" + - "buildscripts/large_file_check.py" + - "--exclude" + - "src/third_party/*" + - name: check_for_todos tags: [] exec_timeout_secs: 600 # 10 minute timeout @@ -8055,7 +8324,7 @@ tasks: - <<: *task_template name: query_golden_cqf - tags: [] + tags: [cqf] commands: - func: "do setup" - func: "run tests" @@ -8119,6 +8388,11 @@ task_groups: tasks: - libdeps_graph_linting +- <<: *compile_task_group_template + name: iwyu_self_test_TG + tasks: + - iwyu_self_test + - <<: *compile_task_group_template name: compile_ninja_TG tasks: @@ -8190,6 +8464,28 @@ task_groups: - <<: *compile_task_group_template name: compile_archive_and_run_libfuzzertests_TG + setup_group_can_fail_task: false + setup_group: + - command: manifest.load + - func: "git get project and add git tag" + - func: "set task expansion macros" + - func: "f_expansions_write" + - func: "kill processes" + - func: "cleanup environment" + # The python virtual environment is installed in ${workdir}, which is created in + # "set up venv". + - func: "set up venv" + - func: "upload pip requirements" + - func: "f_expansions_write" + - func: "configure evergreen api credentials" + - func: "get buildnumber" + - func: "f_expansions_write" + - func: "set up credentials" + - func: "use WiredTiger develop" # noop if ${use_wt_develop} is not "true" + - func: "set up win mount script" + - func: "generate compile expansions" + - func: "f_expansions_write" + - func: "fetch corpus" tasks: - compile_and_archive_libfuzzertests - fetch_and_run_libfuzzertests @@ -8223,6 +8519,29 @@ task_groups: - compile_all_but_not_unittests - package +# SERVER-76006 +# This is a compile stream meant for non-cached and/or underpowered systems. +# It joins most of the compile tasks together under a single host spread out +# across different tasks. +- <<: *compile_task_group_template + name: small_compile_test_and_package_serial_no_unittests_TG + tasks: + - compile_dist_test_half + - compile_dist_test + - archive_dist_test + - archive_dist_test_debug + - compile_integration_test + - integration_tests_standalone + - integration_tests_standalone_audit + - integration_tests_replset + - integration_tests_replset_ssl_auth + - integration_tests_sharded + - compile_dbtest + - run_dbtest + - archive_dbtest + - compile_all_but_not_unittests + - package + - <<: *compile_task_group_template name: compile_test_benchmark_and_package_serial_TG tasks: diff --git a/etc/evergreen_yml_components/project_and_distro_settings.yml b/etc/evergreen_yml_components/project_and_distro_settings.yml deleted file mode 100644 index 83e3d74b2bfbc..0000000000000 --- a/etc/evergreen_yml_components/project_and_distro_settings.yml +++ /dev/null @@ -1,76 +0,0 @@ -### -# Definitions for project and distro settings associated with this Evergreen project. -### - - -## Aliases. -patch_aliases: - - alias: "embedded" - variant: ".*" - task: "embedded_.*" - variant_tags: [] - task_tags: [] - - alias: "hourly" - variant: "^(enterprise-windows-all-feature-flags-required|linux-64-debug|ubuntu1804-debug-aubsan-lite|enterprise-rhel-70-64-bit|ubuntu1604-debug|macos-debug|windows-debug )$" - task: ".*" - variant_tags: [] - task_tags: [] - - alias: "query" - variant: "^(.*query-patch-only|enterprise-ubuntu-dynamic-1804-clang-tidy-required)$" - task: ".*" - variant_tags: [] - task_tags: [] - - alias: "required" - variant: "^(.*-required$|linux-64-debug|ubuntu1804-debug-aubsan-lite)$" - task: ".*" - variant_tags: [] - task_tags: [] - - alias: "security" - variant: "^(.*security-patch-only)$" - task: ".*" - variant_tags: [] - task_tags: [] - -commit_queue_aliases: - - variant: "commit-queue" - task: "^(run_.*|compile_.*|lint_.*|validate_commit_message|test_api_version_compatibility|jsCore)$" - variant_tags: [] - task_tags: [] - - variant: "^(enterprise-ubuntu-dynamic-1804-clang-tidy-required|linux-x86-dynamic-compile-required)$" - task: "clang_tidy" - variant_tags: [] - task_tags: [] - -github_pr_aliases: -- variant: "^(.*query-patch-only|enterprise-ubuntu-dynamic-1804-clang-tidy-required)$" - task: ".*" - variant_tags: [] - task_tags: [] - -git_tag_aliases: [] - -github_checks_aliases: [] - -## Virtual Workstation Commands. -workstation_config: - git_clone: no - setup_commands: - - command: "git clone git@github.com:mongodb/server-workflow-tool.git" - directory: "" - - command: "bash virtual_workstation_setup.sh" - directory: "server-workflow-tool" - - -## Task Sync. -task_sync: - config_enabled: true - patch_enabled: true - - -## Build Baron. -build_baron_settings: - ticket_create_project: BF - ticket_search_projects: - - BF - - WT - - SERVER diff --git a/etc/evergreen_yml_components/variants/atlas.yml b/etc/evergreen_yml_components/variants/atlas.yml index bb1b3b05cc1c2..04f51c9bbb441 100644 --- a/etc/evergreen_yml_components/variants/atlas.yml +++ b/etc/evergreen_yml_components/variants/atlas.yml @@ -18,10 +18,12 @@ buildvariants: push_bucket: downloads.10gen.com push_name: linux push_arch: x86_64-enterprise-rhel70 + test_flags: --excludeWithAnyTags=requires_latch_analyzer compile_flags: >- --ssl MONGO_DISTMOD=rhel70 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" @@ -54,7 +56,6 @@ buildvariants: - name: jsCore_txns_large_txns_format - name: .jstestfuzz .common - name: libunwind_tests - - name: .logical_session_cache .one_sec - name: .ocsp - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough @@ -72,7 +73,7 @@ buildvariants: - name: test_packages distros: - ubuntu2004-package - # TODO: BF-24515 restore when BF is resolved - #- name: selinux_rhel7_enterprise + - name: vector_search + - name: selinux_rhel7_enterprise - name: .publish - name: generate_buildid_to_debug_symbols_mapping diff --git a/etc/evergreen_yml_components/variants/classic_engine.yml b/etc/evergreen_yml_components/variants/classic_engine.yml new file mode 100644 index 0000000000000..437c282c4ef2d --- /dev/null +++ b/etc/evergreen_yml_components/variants/classic_engine.yml @@ -0,0 +1,327 @@ +# Build variants for testing the classic engine. + +variables: +- &linux_x86_dynamic_compile_variant_dependency + depends_on: + - name: archive_dist_test_debug + variant: &linux_x86_dynamic_compile_variant_name linux-x86-dynamic-compile + - name: version_gen + variant: generate-tasks-for-version + # This is added because of EVG-18211. + # Without this we are adding extra dependencies on evergreen and it is causing strain + omit_generated_tasks: true + +- &linux_x86_generic_expansions + multiversion_platform: rhel80 + multiversion_edition: enterprise + repo_edition: enterprise + large_distro_name: rhel80-medium + num_scons_link_jobs_available: 0.99 + compile_variant: *linux_x86_dynamic_compile_variant_name + +- &enterprise-rhel-80-64-bit-dynamic-expansions + <<: *linux_x86_generic_expansions + scons_cache_scope: shared + scons_cache_mode: all + has_packages: false + jstestfuzz_num_generated_files: 40 + jstestfuzz_concurrent_num_files: 10 + target_resmoke_time: 10 + max_sub_suites: 5 + idle_timeout_factor: 1.5 + exec_timeout_factor: 1.5 + large_distro_name: rhel80-medium + burn_in_tag_buildvariants: >- + enterprise-rhel-80-64-bit-inmem + enterprise-rhel-80-64-bit-multiversion + burn_in_tag_compile_task_dependency: archive_dist_test_debug + +# If you add anything to san_options, make sure the appropriate changes are +# also made to SConstruct. +# and also to the san_options in compile_static_analysis.yml +- aubsan_options: &aubsan_options + >- + UBSAN_OPTIONS="print_stacktrace=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer" + LSAN_OPTIONS="suppressions=etc/lsan.suppressions:report_objects=1" + ASAN_OPTIONS="detect_leaks=1:check_initialization_order=true:strict_init_order=true:abort_on_error=1:disable_coredump=0:handle_abort=1:strict_string_checks=true:detect_invalid_pointer_pairs=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer" + +buildvariants: +- &enterprise-rhel-80-64-bit-dynamic-classic-engine + <<: *linux_x86_dynamic_compile_variant_dependency + name: enterprise-rhel-80-64-bit-dynamic-classic-engine + display_name: "Shared Library Enterprise RHEL 8.0 (Classic Engine)" + cron: "0 0 * * 0" # once a week (Sunday midnight UTC) + modules: + - enterprise + run_on: + - rhel80-small + stepback: false + expansions: + <<: *enterprise-rhel-80-64-bit-dynamic-expansions + jstestfuzz_num_generated_files: 40 + jstestfuzz_concurrent_num_files: 10 + target_resmoke_time: 10 + max_sub_suites: 5 + test_flags: >- + --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}" + large_distro_name: rhel80-medium + burn_in_tag_buildvariants: >- + enterprise-rhel-80-64-bit-inmem + enterprise-rhel-80-64-bit-multiversion + burn_in_tag_compile_task_dependency: archive_dist_test_debug + depends_on: + - name: archive_dist_test_debug + variant: *linux_x86_dynamic_compile_variant_name + - name: version_gen + variant: generate-tasks-for-version + # This is added because of EVG-18211. + # Without this we are adding extra dependencies on evergreen and it is causing strain + omit_generated_tasks: true + tasks: + - name: .aggfuzzer !.sbe_only + - name: .aggregation !.sbe_only + - name: .auth + - name: .causally_consistent !.sharding + - name: .change_stream_fuzzer + - name: .change_streams + - name: .concurrency !.large !.ubsan !.no_txns !.compute_mode + - name: .concurrency .large !.ubsan !.no_txns !.compute_mode + distros: + - rhel80-medium + - name: .encrypt + - name: .jscore .common !jsCore !.sbe_only + - name: .jstestfuzz !.flow_control + - name: .misc_js + - name: .multi_shard + - name: .query_fuzzer + - name: .random_multiversion_ds + - name: .read_only + - name: .read_write_concern !.large + - name: .read_write_concern .large + distros: + - rhel80-medium + - name: .replica_sets !.encrypt !.auth + distros: + - rhel80-xlarge + - name: .rollbackfuzzer + - name: .sharding .common + - name: .sharding .jscore !.wo_snapshot !.multi_stmt + - name: .sharding .txns + - name: .serverless + distros: + - rhel80-xlarge + - name: .updatefuzzer + - name: aggregation_repeat_queries + - name: audit + - name: burn_in_tags_gen + depends_on: + - name: version_burn_in_gen + variant: generate-tasks-for-version + omit_generated_tasks: true + - name: archive_dist_test_debug + variant: *linux_x86_dynamic_compile_variant_name + - name: burn_in_tests_gen + depends_on: + - name: version_burn_in_gen + variant: generate-tasks-for-version + omit_generated_tasks: true + - name: archive_dist_test_debug + variant: *linux_x86_dynamic_compile_variant_name + - name: check_feature_flag_tags + - name: check_for_todos + - name: disk_wiredtiger + - name: initial_sync_fuzzer_gen + - name: jsCore + distros: + - rhel80-xlarge + - name: jsCore_min_batch_repeat_queries_ese_gsm + - name: jsCore_txns_large_txns_format + - name: json_schema + - name: lint_fuzzer_sanity_patch + - name: mqlrun + - name: multi_stmt_txn_jscore_passthrough_with_migration_gen + - name: multiversion_gen + - name: .multiversion_sanity_check + - name: replica_sets_api_version_jscore_passthrough_gen + - name: replica_sets_reconfig_jscore_passthrough_gen + - name: replica_sets_reconfig_jscore_stepdown_passthrough_gen + distros: + - rhel80-xlarge + - name: replica_sets_reconfig_kill_primary_jscore_passthrough_gen + distros: + - rhel80-xlarge + - name: retryable_writes_jscore_passthrough_gen + - name: retryable_writes_jscore_stepdown_passthrough_gen + - name: sasl + - name: search + - name: search_auth + - name: search_pinned_connections_auth + - name: search_ssl + - name: secondary_reads_passthrough_gen + - name: session_jscore_passthrough + - name: sharding_api_version_jscore_passthrough_gen + - name: test_api_version_compatibility + - name: unittest_shell_hang_analyzer_gen + - name: vector_search + +- name: &rhel80-debug-asan-classic-engine rhel80-debug-asan-classic-engine + display_name: "* ASAN Enterprise RHEL 8.0 DEBUG (Classic Engine)" + cron: "0 0 * * 2" # once a week (Tuesday midnight UTC) + modules: + - enterprise + run_on: + - rhel80-build + stepback: false + expansions: + additional_package_targets: >- + archive-mongocryptd + archive-mongocryptd-debug + lang_environment: LANG=C + san_options: *aubsan_options + compile_flags: >- + --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars + --dbg=on + --opt=on + --allocator=system + --sanitize=address + --ssl + --ocsp-stapling=off + --enable-free-mon=on + -j$(grep -c ^processor /proc/cpuinfo) + compile_variant: *rhel80-debug-asan-classic-engine + test_flags: >- + --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}" + --excludeWithAnyTags=requires_fast_memory,requires_ocsp_stapling,requires_increased_memlock_limits + multiversion_platform: rhel80 + multiversion_edition: enterprise + resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under ASAN build. + hang_analyzer_dump_core: false + scons_cache_scope: shared + exec_timeout_secs: 14400 # 3 hour timeout + separate_debug: off + large_distro_name: rhel80-build + tasks: + - name: compile_test_benchmark_and_package_serial_TG + - name: .aggregation !.sbe_only + - name: .auth + - name: audit + - name: .benchmarks + - name: .causally_consistent !.wo_snapshot + - name: .change_streams + - name: .misc_js + - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.compute_mode + - name: .encrypt + - name: free_monitoring + - name: external_auth + - name: external_auth_aws + - name: external_auth_oidc + - name: initial_sync_fuzzer_gen + - name: compile_integration_and_test_parallel_stream_TG + distros: + - rhel80-large + - name: .jscore .common !.sbe_only + - name: jsCore_min_batch_repeat_queries_ese_gsm + - name: jsCore_txns_large_txns_format + - name: json_schema + - name: .logical_session_cache + - name: .multi_shard .common + - name: .query_fuzzer + - name: .read_write_concern + - name: replica_sets_large_txns_format_jscore_passthrough + - name: .replica_sets !.multi_oplog + - name: .replica_sets .encrypt + - name: .resharding_fuzzer + - name: .retry + - name: .read_only + - name: .rollbackfuzzer + - name: .updatefuzzer + - name: sasl + - name: secondary_reads_passthrough_gen + - name: session_jscore_passthrough + - name: .sharding .jscore !.wo_snapshot + - name: .sharding .common !.csrs !.jstestfuzz + - name: .watchdog + - name: .stitch + - name: .serverless + - name: unittest_shell_hang_analyzer_gen + - name: .updatefuzzer + - name: server_discovery_and_monitoring_json_test_TG + - name: server_selection_json_test_TG + - name: generate_buildid_to_debug_symbols_mapping + +- name: &rhel80-debug-ubsan-classic-engine rhel80-debug-ubsan-classic-engine + display_name: "* UBSAN Enterprise RHEL 8.0 DEBUG (Classic Engine)" + cron: "0 0 * * 4" # once a week (Thursday midnight UTC) + modules: + - enterprise + run_on: + - rhel80-build + stepback: false + expansions: + additional_package_targets: >- + archive-mongocryptd + archive-mongocryptd-debug + lang_environment: LANG=C + san_options: *aubsan_options + compile_variant: *rhel80-debug-ubsan-classic-engine + compile_flags: >- + --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars + --dbg=on + --opt=on + --sanitize=undefined + --ssl + --ocsp-stapling=off + --enable-free-mon=on + -j$(grep -c ^processor /proc/cpuinfo) + test_flags: >- + --mongodSetParameters="{internalQueryFrameworkControl: forceClassicEngine}" + --excludeWithAnyTags=requires_ocsp_stapling,requires_increased_memlock_limits + multiversion_platform: rhel80 + multiversion_edition: enterprise + resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under UBSAN build. + scons_cache_scope: shared + separate_debug: off + large_distro_name: rhel80-build + tasks: + - name: compile_test_benchmark_and_package_serial_TG + - name: .aggregation !.sbe_only + - name: .auth + - name: audit + - name: .benchmarks + - name: .causally_consistent !.wo_snapshot + - name: .change_streams + - name: .misc_js + - name: .concurrency !.no_txns !.repl !.kill_terminate !.compute_mode + - name: disk_wiredtiger + - name: .encrypt + - name: free_monitoring + - name: initial_sync_fuzzer_gen + - name: compile_integration_and_test_parallel_stream_TG + distros: + - rhel80-large + - name: .jscore .common !.sbe_only + - name: jsCore_min_batch_repeat_queries_ese_gsm + - name: jsCore_txns_large_txns_format + - name: json_schema + - name: .logical_session_cache .one_sec + - name: .multi_shard .common + - name: .read_write_concern + - name: replica_sets_large_txns_format_jscore_passthrough + - name: .replica_sets !.multi_oplog + - name: .replica_sets .encrypt + - name: .resharding_fuzzer + - name: .retry + - name: .rollbackfuzzer + - name: .read_only + - name: sasl + - name: secondary_reads_passthrough_gen + - name: session_jscore_passthrough + - name: .sharding .jscore !.wo_snapshot + - name: .sharding .common !.csrs !.jstestfuzz + - name: .stitch + - name: .updatefuzzer + - name: .serverless + - name: watchdog_wiredtiger + - name: server_discovery_and_monitoring_json_test_TG + - name: server_selection_json_test_TG + - name: generate_buildid_to_debug_symbols_mapping diff --git a/etc/evergreen_yml_components/variants/compile_static_analysis.yml b/etc/evergreen_yml_components/variants/compile_static_analysis.yml index cd82501355638..36c4fb2ca77ab 100644 --- a/etc/evergreen_yml_components/variants/compile_static_analysis.yml +++ b/etc/evergreen_yml_components/variants/compile_static_analysis.yml @@ -57,7 +57,7 @@ variables: # If you add anything to san_options, make sure the appropriate changes are # also made to SConstruct. -# and also to the san_options in evergreen.yml +# and also to the san_options in evergreen.yml and sanitizer.yml - aubsan_options: &aubsan_options >- UBSAN_OPTIONS="print_stacktrace=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer" @@ -67,12 +67,11 @@ variables: buildvariants: - <<: *linux-x86-dynamic-compile-params - name: &linux-x86-dynamic-compile-required linux-x86-dynamic-compile-required - display_name: "! Linux x86 Shared Library Compile & Static Analysis" + name: &linux-x86-dynamic-compile linux-x86-dynamic-compile + display_name: "* Linux x86 Shared Library" expansions: <<: *linux-x86-dynamic-compile-expansions - clang_tidy_toolchain: v4 - compile_variant: *linux-x86-dynamic-compile-required + compile_variant: *linux-x86-dynamic-compile tasks: - name: compile_ninja_quick_TG - name: compile_test_and_package_parallel_unittest_stream_TG @@ -80,10 +79,7 @@ buildvariants: - name: compile_test_and_package_parallel_dbtest_stream_TG - name: compile_integration_and_test_parallel_stream_TG - name: generate_buildid_to_debug_symbols_mapping - - name: .lint - - name: clang_tidy_TG - distros: - - rhel80-xxlarge + - name: run_pretty_printer_tests - name: server_discovery_and_monitoring_json_test_TG distros: - rhel80-large @@ -91,6 +87,21 @@ buildvariants: distros: - rhel80-large +- <<: *generic_linux_compile_params + name: &linux-x86-dynamic-compile-future-tag-multiversion linux-x86-dynamic-compile-future-tag-multiversion + display_name: "Linux x86 Shared Library Compile (future git tag multiversion)" + modules: + - enterprise + expansions: + <<: *linux-x86-dynamic-compile-expansions + bv_future_git_tag: r100.0.0-9999 + compile_variant: *linux-x86-dynamic-compile-future-tag-multiversion + depends_on: + - name: version_expansions_gen + variant: enterprise-rhel-80-64-bit-future-git-tag-multiversion-version-gen + tasks: + - name: compile_test_and_package_serial_TG + - <<: *generic_linux_compile_params name: &linux-x86-dynamic-debug-compile-required linux-x86-dynamic-debug-compile-required # TODO: replace with Sanitizer. display_name: "! Linux x86 Shared Library DEBUG Compile" @@ -156,15 +167,15 @@ buildvariants: - name: .stitch - <<: *linux-x86-dynamic-compile-params - name: &linux-crypt-compile-required linux-crypt-compile-required - display_name: "! Linux x86 Crypt Enterprise Compile" + name: &linux-crypt-compile linux-crypt-compile + display_name: "* Linux x86 Crypt Enterprise Compile" expansions: <<: *linux-x86-dynamic-compile-expansions crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" - compile_variant: *linux-crypt-compile-required + compile_variant: *linux-crypt-compile tasks: - name: .crypt - name: crypt_build_debug_and_test @@ -209,22 +220,31 @@ buildvariants: - <<: *linux-arm64-dynamic-compile-params name: &amazon-linux2-arm64-compile amazon-linux2-arm64-compile - display_name: "* Amazon Linux 2 arm64 Shared Library Compile" + display_name: "! Amazon Linux 2 arm64 Shared Library Compile & Static Analysis" expansions: <<: *linux-arm64-dynamic-compile-expansions + clang_tidy_toolchain: v4 compile_variant: *amazon-linux2-arm64-compile tasks: + - name: clang_tidy_TG + distros: + - amazon2-arm64-xlarge + - name: compile_ninja_quick_TG - name: compile_test_and_package_parallel_unittest_stream_TG - name: compile_test_and_package_parallel_core_stream_TG - name: compile_test_and_package_parallel_dbtest_stream_TG - name: compile_integration_and_test_parallel_stream_TG - name: generate_buildid_to_debug_symbols_mapping + - name: iwyu_self_test_TG + - name: .lint + - name: resmoke_validation_tests - name: server_discovery_and_monitoring_json_test_TG - name: server_selection_json_test_TG + - name: run_pretty_printer_tests - <<: *linux-arm64-dynamic-compile-params name: &amazon-linux2-arm64-crypt-compile amazon-linux2-arm64-crypt-compile - display_name: "* Amazon Linux 2 arm64 Crypt Compile" + display_name: "! Amazon Linux 2 arm64 Crypt Compile" expansions: <<: *linux-arm64-dynamic-compile-expansions compile_variant: *amazon-linux2-arm64-crypt-compile diff --git a/etc/evergreen_yml_components/variants/config_shard.yml b/etc/evergreen_yml_components/variants/config_shard.yml new file mode 100644 index 0000000000000..17d423b1437e1 --- /dev/null +++ b/etc/evergreen_yml_components/variants/config_shard.yml @@ -0,0 +1,88 @@ +# This build variant is used to test suites that use sharded cluster fixture with config shard mode. +# TODO (SERVER-75884): Remove this once we switch to config shard as the default. + +# THIS WAS COPIED FROM EVERGREEN.YML - ANY MODIFICATIONS HERE SHOULD ALSO BE MADE IN THAT FILE. +variables: +- &linux_x86_dynamic_compile_variant_dependency + depends_on: + - name: archive_dist_test_debug + variant: &linux_x86_dynamic_compile_variant_name linux-x86-dynamic-compile + - name: version_gen + variant: generate-tasks-for-version + # This is added because of EVG-18211. + # Without this we are adding extra dependencies on evergreen and it is causing strain + omit_generated_tasks: true + +# THIS WAS COPIED FROM EVERGREEN.YML - ANY MODIFICATIONS HERE SHOULD ALSO BE MADE IN THAT FILE. +- &linux_x86_generic_expansions + multiversion_platform: rhel80 + multiversion_edition: enterprise + repo_edition: enterprise + large_distro_name: rhel80-medium + num_scons_link_jobs_available: 0.99 + compile_variant: *linux_x86_dynamic_compile_variant_name + +# THIS WAS COPIED FROM EVERGREEN.YML - ANY MODIFICATIONS HERE SHOULD ALSO BE MADE IN THAT FILE. +- &enterprise-rhel-80-64-bit-dynamic-expansions + <<: *linux_x86_generic_expansions + scons_cache_scope: shared + scons_cache_mode: all + has_packages: false + jstestfuzz_num_generated_files: 40 + jstestfuzz_concurrent_num_files: 10 + target_resmoke_time: 10 + max_sub_suites: 5 + idle_timeout_factor: 1.5 + exec_timeout_factor: 1.5 + large_distro_name: rhel80-medium + +buildvariants: +- &enterprise-rhel-80-64-bit-dynamic-config-shard + <<: *linux_x86_dynamic_compile_variant_dependency + name: enterprise-rhel-80-64-bit-dynamic-config-shard + display_name: "* Shared Library Enterprise RHEL 8.0 (Config Shard)" + cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. + modules: + - enterprise + run_on: + - rhel80-small + stepback: false + expansions: &enterprise-rhel-80-64-bit-dynamic-config-shard-expansions + <<: *enterprise-rhel-80-64-bit-dynamic-expansions + test_flags: >- + --configShard=any + --excludeWithAnyTags=config_shard_incompatible + tasks: + - name: aggregation_mongos_passthrough + - name: aggregation_one_shard_sharded_collections + - name: aggregation_sharded_collections_causally_consistent_passthrough + - name: aggregation_sharded_collections_passthrough + - name: causally_consistent_hedged_reads_jscore_passthrough_gen + - name: causally_consistent_jscore_passthrough_auth_gen + - name: causally_consistent_jscore_passthrough_gen + - name: change_streams + - name: change_streams_mongos_sessions_passthrough + - name: change_streams_multi_stmt_txn_mongos_passthrough + - name: change_streams_multi_stmt_txn_sharded_collections_passthrough + - name: change_streams_per_shard_cursor_passthrough + - name: fle2_sharding_high_cardinality + - name: fle2_sharding + - name: jstestfuzz_sharded_causal_consistency_gen + - name: jstestfuzz_sharded_continuous_stepdown_gen + - name: jstestfuzz_sharded_gen + - name: jstestfuzz_sharded_session_gen + - name: sharded_causally_consistent_jscore_passthrough_gen + - name: sharded_causally_consistent_read_concern_snapshot_passthrough_gen + - name: sharding_auth_gen + # Explicitly include instead of using tags to avoid pulling in replica_sets_multiversion_gen. This + # variant will be removed when config shards become the default, so this is only temporary. + - name: sharding_multiversion_gen + - name: .sharding .txns + # Skip csrs stepdown suite because most tests can't handle the first shard stepping down. + - name: .sharding .common !.csrs + - name: .sharding .jscore !.wo_snapshot !.multi_stmt + - name: .concurrency .sharded !.large + - name: .concurrency .sharded .large + distros: + - rhel80-medium + - name: .multi_shard diff --git a/etc/evergreen_yml_components/variants/ibm.yml b/etc/evergreen_yml_components/variants/ibm.yml index f21592344f2ae..02546b982ef2b 100644 --- a/etc/evergreen_yml_components/variants/ibm.yml +++ b/etc/evergreen_yml_components/variants/ibm.yml @@ -14,12 +14,14 @@ buildvariants: additional_package_targets: >- archive-mongocryptd archive-mongocryptd-debug + test_flags: --excludeWithAnyTags=requires_latch_analyzer # We need to compensate for SMT8 setting the cpu count very high and lower the amount of parallelism down compile_flags: >- --ssl MONGO_DISTMOD=rhel81 -j$(echo "$(grep -c processor /proc/cpuinfo)/2" | bc) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" @@ -74,6 +76,7 @@ buildvariants: MONGO_DISTMOD=rhel81 -j$(echo "$(grep -c processor /proc/cpuinfo)/2" | bc) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off tasks: - name: compile_test_and_package_serial_TG distros: @@ -98,7 +101,10 @@ buildvariants: -j3 --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --linker=gold - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: >- + --excludeWithAnyTags=requires_external_data_source,requires_increased_memlock_limits + --excludeWithAnyTags=requires_latch_analyzer,incompatible_with_s390x crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" @@ -118,10 +124,7 @@ buildvariants: multiversion_edition: enterprise compile_variant: enterprise-rhel-72-s390x tasks: - - name: compile_test_and_package_serial_no_unittests_TG - distros: - - rhel72-zseries-build - - name: compile_integration_and_test_no_audit_parallel_stream_TG + - name: small_compile_test_and_package_serial_no_unittests_TG distros: - rhel72-zseries-build - name: .aggregation .common @@ -142,7 +145,8 @@ buildvariants: - name: .publish - name: generate_buildid_to_debug_symbols_mapping -- name: enterprise-rhel-83-s390x +- &enterprise-rhel-83-s390x-template + name: enterprise-rhel-83-s390x display_name: Enterprise RHEL 8.3 s390x modules: - enterprise @@ -150,17 +154,19 @@ buildvariants: - rhel83-zseries-small cron: "0 4 * * 0" stepback: false - expansions: + expansions: &enterprise-rhel-83-s390x-expansions-template additional_package_targets: >- archive-mongocryptd archive-mongocryptd-debug release_buid: true + test_flags: --excludeWithAnyTags=incompatible_with_s390x,requires_latch_analyzer compile_flags: >- --ssl MONGO_DISTMOD=rhel83 -j3 --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --linker=gold + --use-diagnostic-latches=off crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" @@ -179,10 +185,7 @@ buildvariants: multiversion_edition: enterprise compile_variant: enterprise-rhel-83-s390x tasks: - - name: compile_test_and_package_serial_TG - distros: - - rhel83-zseries-large - - name: compile_integration_and_test_no_audit_parallel_stream_TG + - name: small_compile_test_and_package_serial_no_unittests_TG distros: - rhel83-zseries-large - name: .aggregation .common @@ -202,3 +205,21 @@ buildvariants: - name: .publish_crypt - name: .publish - name: generate_buildid_to_debug_symbols_mapping + +- <<: *enterprise-rhel-83-s390x-template + name: enterprise-rhel-83-s390x-dynamic + display_name: Enterprise RHEL 8.3 s390x Shared + expansions: + <<: *enterprise-rhel-83-s390x-expansions-template + compile_flags: >- + --link-model=dynamic + --ssl + MONGO_DISTMOD=rhel83 + -j$(echo "$(grep -c processor /proc/cpuinfo)/2" | bc) + --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --linker=gold + --use-diagnostic-latches=off + tasks: + - name: compile_test_and_package_serial_TG + distros: + - rhel83-zseries-large diff --git a/etc/evergreen_yml_components/variants/in_memory.yml b/etc/evergreen_yml_components/variants/in_memory.yml index 547676d8cee2d..ac56df2ee645d 100644 --- a/etc/evergreen_yml_components/variants/in_memory.yml +++ b/etc/evergreen_yml_components/variants/in_memory.yml @@ -40,14 +40,13 @@ buildvariants: - name: .change_streams - name: .change_stream_fuzzer - name: .misc_js - - name: .concurrency !.ubsan !.no_txns !.debug_only !.kill_terminate !.requires_wt + - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.requires_wt distros: - rhel80-medium # Some workloads require a lot of memory, use a bigger machine for this suite. - name: initial_sync_fuzzer_gen - name: .jscore .common !.decimal - name: jsCore_txns_large_txns_format - name: .jstestfuzz !.initsync - - name: .logical_session_cache - name: .multi_shard .common - name: multi_stmt_txn_jscore_passthrough_with_migration_gen - name: .read_write_concern !.durable_history diff --git a/etc/evergreen_yml_components/variants/misc_release.yml b/etc/evergreen_yml_components/variants/misc_release.yml index d4f7e3863621e..f956cf40b146b 100644 --- a/etc/evergreen_yml_components/variants/misc_release.yml +++ b/etc/evergreen_yml_components/variants/misc_release.yml @@ -8,7 +8,7 @@ buildvariants: - amazon2-test expansions: test_flags: >- - --excludeWithAnyTags=SERVER-34286,incompatible_with_amazon_linux,requires_external_data_source + --excludeWithAnyTags=SERVER-34286,incompatible_with_amazon_linux,requires_external_data_source,requires_latch_analyzer push_path: linux push_bucket: downloads.mongodb.org push_name: linux @@ -18,6 +18,7 @@ buildvariants: MONGO_DISTMOD=amazon2 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off multiversion_platform: amazon2 multiversion_edition: targeted has_packages: true @@ -45,7 +46,6 @@ buildvariants: - name: .jscore .common - name: .jstestfuzz .common - name: libunwind_tests - - name: .logical_session_cache .one_sec - name: multiversion_gen - name: replica_sets_gen - name: .replica_sets .common @@ -74,7 +74,7 @@ buildvariants: archive-mongocryptd-debug # TODO BUILD-13887 should fix uses_pykmip incompatibility. test_flags: >- - --excludeWithAnyTags=SERVER-34286,incompatible_with_amazon_linux,uses_pykmip,requires_external_data_source + --excludeWithAnyTags=SERVER-34286,incompatible_with_amazon_linux,uses_pykmip,requires_external_data_source,requires_latch_analyzer push_path: linux push_bucket: downloads.10gen.com push_name: linux @@ -84,6 +84,7 @@ buildvariants: MONGO_DISTMOD=amazon2 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" @@ -109,7 +110,6 @@ buildvariants: - name: .jscore .common - name: .jstestfuzz .common - name: libunwind_tests - - name: .logical_session_cache .one_sec - name: noPassthrough_gen - name: noPassthroughWithMongod_gen - name: .replica_sets .common @@ -142,7 +142,8 @@ buildvariants: MONGO_DISTMOD=amazon2 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_external_data_source,requires_latch_analyzer has_packages: true packager_script: packager.py packager_arch: aarch64 @@ -168,7 +169,6 @@ buildvariants: - name: free_monitoring - name: .jscore .common - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_gen - name: .replica_sets .common - name: .sharding .txns @@ -204,11 +204,12 @@ buildvariants: MONGO_DISTMOD=amazon2 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" # TODO BUILD-13887 should fix uses_pykmip incompatibility. - test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_ldap_pool,uses_pykmip,requires_v4_0,requires_external_data_source + test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_ldap_pool,uses_pykmip,requires_v4_0,requires_external_data_source,requires_latch_analyzer has_packages: true multiversion_platform: amazon2 multiversion_edition: enterprise @@ -235,8 +236,8 @@ buildvariants: - name: .change_streams - name: .change_stream_fuzzer - name: .misc_js - - name: .concurrency !.large !.ubsan !.no_txns !.debug_only - - name: .concurrency .large !.ubsan !.no_txns !.debug_only + - name: .concurrency !.large !.ubsan !.no_txns + - name: .concurrency .large !.ubsan !.no_txns distros: - amazon2-arm64-large - name: .config_fuzzer !.large @@ -301,46 +302,46 @@ buildvariants: - name: test_packages distros: - ubuntu1804-arm64-build + - name: vector_search - name: .publish - name: generate_buildid_to_debug_symbols_mapping -- name: amazon2022 - display_name: Amazon Linux 2022 +- name: amazon2023 + display_name: Amazon Linux 2023 cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. run_on: - - amazon2022-small + - amazon2023.0-small expansions: push_path: linux push_bucket: downloads.mongodb.org push_name: linux - push_arch: x86_64-amazon2022 - compile_flags: --ssl MONGO_DISTMOD=amazon2022 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_external_data_source + push_arch: x86_64-amazon2023 + compile_flags: --ssl MONGO_DISTMOD=amazon2023 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_external_data_source,requires_latch_analyzer has_packages: true packager_script: packager.py packager_arch: x86_64 - packager_distro: amazon2022 + packager_distro: amazon2023 repo_edition: org scons_cache_scope: shared - large_distro_name: amazon2022-large - compile_variant: amazon2022 + large_distro_name: amazon2023.0-large + compile_variant: amazon2023 tasks: - name: compile_test_and_package_serial_no_unittests_TG distros: - - amazon2022-large + - amazon2023.0-large - name: aggregation - name: .auth !.audit !.multiversion - name: causally_consistent_jscore_txns_passthrough - name: .misc_js - name: .concurrency .common distros: - - amazon2022-large + - amazon2023.0-large - name: concurrency_replication_causal_consistency_gen - name: disk_wiredtiger - name: free_monitoring - name: .jscore .common - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_gen - name: .replica_sets .common - name: .sharding .txns @@ -354,37 +355,37 @@ buildvariants: - name: .publish - name: generate_buildid_to_debug_symbols_mapping -- name: enterprise-amazon2022 - display_name: "Enterprise Amazon Linux 2022" +- name: enterprise-amazon2023 + display_name: "Enterprise Amazon Linux 2023" cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. modules: - enterprise run_on: - - amazon2022-small + - amazon2023.0-small expansions: additional_package_targets: archive-mongocryptd archive-mongocryptd-debug archive-mh archive-mh-debug push_path: linux push_bucket: downloads.10gen.com push_name: linux - push_arch: x86_64-enterprise-amazon2022 - compile_flags: --ssl MONGO_DISTMOD=amazon2022 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + push_arch: x86_64-enterprise-amazon2023 + compile_flags: --ssl MONGO_DISTMOD=amazon2023 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off crypt_task_compile_flags: SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" # TODO BUILD-13887 should fix uses_pykmip incompatibility. - test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_ldap_pool,uses_pykmip,requires_v4_0,requires_external_data_source + test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_ldap_pool,uses_pykmip,requires_v4_0,requires_external_data_source,requires_latch_analyzer has_packages: true - multiversion_platform: amazon2022 + multiversion_platform: amazon2023 multiversion_edition: enterprise multiversion_architecture: x86_64 packager_script: packager_enterprise.py packager_arch: x86_64 - packager_distro: amazon2022 + packager_distro: amazon2023 repo_edition: enterprise scons_cache_scope: shared - compile_variant: enterprise-amazon2022 + compile_variant: enterprise-amazon2023 tasks: - name: compile_test_and_package_serial_no_unittests_TG distros: - - amazon2022-large + - amazon2023.0-large - name: test_api_version_compatibility - name: .aggfuzzer !.feature_flag_guarded !.multiversion - name: .aggregation !.feature_flag_guarded @@ -394,10 +395,10 @@ buildvariants: - name: .change_streams - name: .change_stream_fuzzer - name: .misc_js - - name: .concurrency !.large !.ubsan !.no_txns !.debug_only - - name: .concurrency .large !.ubsan !.no_txns !.debug_only + - name: .concurrency !.large !.ubsan !.no_txns + - name: .concurrency .large !.ubsan !.no_txns distros: - - amazon2022-large + - amazon2023.0-large - name: config_fuzzer_concurrency - name: config_fuzzer_jsCore - name: disk_wiredtiger @@ -406,7 +407,7 @@ buildvariants: - name: initial_sync_fuzzer_gen - name: jsCore distros: - - amazon2022-large + - amazon2023.0-large - name: .jscore .common !jsCore !.feature_flag_guarded - name: jsCore_txns_large_txns_format - name: json_schema @@ -418,17 +419,17 @@ buildvariants: - name: .query_fuzzer - name: .read_write_concern .large distros: - - amazon2022-large + - amazon2023.0-large - name: .read_write_concern !.large - name: .replica_sets !.encrypt !.auth distros: - - amazon2022-large + - amazon2023.0-large - name: replica_sets_api_version_jscore_passthrough_gen - name: replica_sets_reconfig_jscore_passthrough_gen - name: retryable_writes_jscore_passthrough_gen - name: retryable_writes_jscore_stepdown_passthrough_gen distros: - - amazon2022-large + - amazon2023.0-large - name: .read_only - name: .rollbackfuzzer - name: sasl @@ -444,46 +445,47 @@ buildvariants: - name: .stitch - name: .crypt distros: - - amazon2022-large + - amazon2023.0-large - name: .publish_crypt - name: secondary_reads_passthrough_gen - name: server_discovery_and_monitoring_json_test_TG - name: .serverless distros: - - amazon2022-large + - amazon2023.0-large - name: server_selection_json_test_TG distros: - - amazon2022-large + - amazon2023.0-large - name: test_packages distros: - ubuntu2204-large + - name: vector_search - name: .publish - name: generate_buildid_to_debug_symbols_mapping -- name: amazon2022-arm64 - display_name: Amazon Linux 2022 arm64 +- name: amazon2023-arm64 + display_name: Amazon Linux 2023 arm64 cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. run_on: - - amazon2022-arm64-small + - amazon2023.0-arm64-small expansions: push_path: linux push_bucket: downloads.mongodb.org push_name: linux - push_arch: aarch64-amazon2022 - compile_flags: --ssl MONGO_DISTMOD=amazon2022 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_external_data_source + push_arch: aarch64-amazon2023 + compile_flags: --ssl MONGO_DISTMOD=amazon2023 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_external_data_source,requires_latch_analyzer has_packages: true packager_script: packager.py packager_arch: aarch64 - packager_distro: amazon2022 + packager_distro: amazon2023 repo_edition: org scons_cache_scope: shared - large_distro_name: amazon2022-arm64-large - compile_variant: amazon2022-arm64 + large_distro_name: amazon2023.0-arm64-large + compile_variant: amazon2023-arm64 tasks: - name: compile_test_and_package_serial_no_unittests_TG distros: - - amazon2022-arm64-large + - amazon2023.0-arm64-large - name: aggregation !.feature_flag_guarded - name: .auth !.audit !.multiversion - name: sharding_auth_gen @@ -491,13 +493,12 @@ buildvariants: - name: .misc_js - name: .concurrency .common distros: - - amazon2022-arm64-large + - amazon2023.0-arm64-large - name: concurrency_replication_causal_consistency_gen - name: disk_wiredtiger - name: free_monitoring - name: .jscore .common - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_gen - name: .replica_sets .common - name: .sharding .txns @@ -511,37 +512,37 @@ buildvariants: - name: .publish - name: generate_buildid_to_debug_symbols_mapping -- name: enterprise-amazon2022-arm64 - display_name: "Enterprise Amazon Linux 2022 arm64" +- name: enterprise-amazon2023-arm64 + display_name: "Enterprise Amazon Linux 2023 arm64" cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. modules: - enterprise run_on: - - amazon2022-arm64-small + - amazon2023.0-arm64-small expansions: additional_package_targets: archive-mongocryptd archive-mongocryptd-debug archive-mh archive-mh-debug push_path: linux push_bucket: downloads.10gen.com push_name: linux - push_arch: aarch64-enterprise-amazon2022 - compile_flags: --ssl MONGO_DISTMOD=amazon2022 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + push_arch: aarch64-enterprise-amazon2023 + compile_flags: --ssl MONGO_DISTMOD=amazon2023 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off crypt_task_compile_flags: SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" # TODO BUILD-13887 should fix uses_pykmip incompatibility. - test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_ldap_pool,uses_pykmip,requires_v4_0,requires_external_data_source + test_flags: --excludeWithAnyTags=incompatible_with_amazon_linux,requires_ldap_pool,uses_pykmip,requires_v4_0,requires_external_data_source,requires_latch_analyzer has_packages: true - multiversion_platform: amazon2022 + multiversion_platform: amazon2023 multiversion_edition: enterprise multiversion_architecture: aarch64 packager_script: packager_enterprise.py packager_arch: aarch64 - packager_distro: amazon2022 + packager_distro: amazon2023 repo_edition: enterprise scons_cache_scope: shared - compile_variant: enterprise-amazon2022-arm64 + compile_variant: enterprise-amazon2023-arm64 tasks: - name: compile_test_and_package_serial_no_unittests_TG distros: - - amazon2022-arm64-large + - amazon2023.0-arm64-large - name: test_api_version_compatibility - name: .aggfuzzer !.feature_flag_guarded !.multiversion - name: .aggregation !.feature_flag_guarded @@ -551,21 +552,21 @@ buildvariants: - name: .change_streams - name: .change_stream_fuzzer - name: .misc_js - - name: .concurrency !.large !.ubsan !.no_txns !.debug_only - - name: .concurrency .large !.ubsan !.no_txns !.debug_only + - name: .concurrency !.large !.ubsan !.no_txns + - name: .concurrency .large !.ubsan !.no_txns distros: - - amazon2022-arm64-large + - amazon2023.0-arm64-large - name: .config_fuzzer !.large - name: .config_fuzzer .large distros: - - amazon2022-arm64-large + - amazon2023.0-arm64-large - name: disk_wiredtiger - name: .encrypt - name: idl_tests - name: initial_sync_fuzzer_gen - name: jsCore distros: - - amazon2022-arm64-large + - amazon2023.0-arm64-large - name: .jscore .common !jsCore !.feature_flag_guarded - name: jsCore_min_batch_repeat_queries_ese_gsm - name: jsCore_txns_large_txns_format @@ -578,17 +579,17 @@ buildvariants: - name: .query_fuzzer - name: .read_write_concern .large distros: - - amazon2022-arm64-large + - amazon2023.0-arm64-large - name: .read_write_concern !.large - name: .replica_sets !.encrypt !.auth distros: - - amazon2022-arm64-large + - amazon2023.0-arm64-large - name: replica_sets_api_version_jscore_passthrough_gen - name: replica_sets_reconfig_jscore_passthrough_gen - name: retryable_writes_jscore_passthrough_gen - name: retryable_writes_jscore_stepdown_passthrough_gen distros: - - amazon2022-arm64-large + - amazon2023.0-arm64-large - name: .read_only - name: .rollbackfuzzer - name: sasl @@ -604,19 +605,20 @@ buildvariants: - name: .stitch - name: .crypt distros: - - amazon2022-arm64-large + - amazon2023.0-arm64-large - name: .publish_crypt - name: secondary_reads_passthrough_gen - name: server_discovery_and_monitoring_json_test_TG - name: .serverless distros: - - amazon2022-arm64-large + - amazon2023.0-arm64-large - name: server_selection_json_test_TG distros: - - amazon2022-arm64-large + - amazon2023.0-arm64-large - name: test_packages distros: - ubuntu2204-arm64-large + - name: vector_search - name: .publish - name: generate_buildid_to_debug_symbols_mapping @@ -630,12 +632,13 @@ buildvariants: push_bucket: downloads.mongodb.org push_name: linux push_arch: x86_64-debian10 - test_flags: --excludeWithAnyTags=requires_external_data_source + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer compile_flags: >- --ssl MONGO_DISTMOD=debian10 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off multiversion_platform: debian10 multiversion_edition: targeted has_packages: true @@ -663,7 +666,6 @@ buildvariants: - name: free_monitoring - name: .jscore .common !.decimal !.feature_flag_guarded - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: multiversion_gen - name: replica_sets_gen - name: .replica_sets .common @@ -700,7 +702,8 @@ buildvariants: MONGO_DISTMOD=debian10 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" @@ -728,7 +731,6 @@ buildvariants: - debian10-large - name: .jscore .common !.decimal !.sharding !.feature_flag_guarded - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough - name: sasl @@ -757,7 +759,8 @@ buildvariants: MONGO_DISTMOD=debian11 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer multiversion_platform: debian11 multiversion_edition: targeted has_packages: true @@ -785,7 +788,6 @@ buildvariants: - name: free_monitoring - name: .jscore .common !.decimal !.feature_flag_guarded - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: multiversion_gen - name: replica_sets_gen - name: .replica_sets .common @@ -821,7 +823,8 @@ buildvariants: --ssl MONGO_DISTMOD=debian11 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" @@ -845,7 +848,6 @@ buildvariants: - name: .encrypt !.replica_sets !.aggregation !.sharding !.jscore - name: .jscore .common !.decimal !.sharding !.feature_flag_guarded - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough - name: sasl @@ -873,7 +875,8 @@ buildvariants: --ssl MONGO_DISTMOD=rhel70 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer multiversion_platform: rhel70 multiversion_edition: targeted has_packages: true @@ -901,7 +904,6 @@ buildvariants: - name: free_monitoring - name: .jscore .common - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: multiversion_gen - name: replica_sets_gen - name: .replica_sets .common @@ -913,8 +915,7 @@ buildvariants: - name: test_packages distros: - ubuntu2004-package - #TODO: BF-24515 renable once fixed - #- name: selinux_rhel7_org + - name: selinux_rhel7_org - name: .publish - name: generate_buildid_to_debug_symbols_mapping @@ -925,10 +926,12 @@ buildvariants: run_on: - rhel76-test expansions: + test_flags: --excludeWithAnyTags=requires_latch_analyzer compile_flags: >- --ssl MONGO_DISTMOD=rhel70 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off compile_variant: rhel76_compile_rhel70 tasks: - name: compile_and_archive_dist_test_then_package_TG @@ -952,10 +955,11 @@ buildvariants: --opt=on -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off tooltags: "" build_mongoreplay: true test_flags: >- - --excludeWithAnyTags=requires_os_access + --excludeWithAnyTags=requires_os_access,requires_latch_analyzer compile_variant: ubi8 tasks: - name: compile_and_archive_dist_test_then_package_TG @@ -982,7 +986,8 @@ buildvariants: --ssl MONGO_DISTMOD=rhel80 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer multiversion_platform: rhel80 multiversion_edition: targeted has_packages: true @@ -1010,7 +1015,6 @@ buildvariants: - name: free_monitoring - name: .jscore .common - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: multiversion_gen - name: replica_sets_gen - name: .replica_sets .common @@ -1022,6 +1026,7 @@ buildvariants: - name: test_packages distros: - ubuntu2004-package + - name: selinux_rhel8_org - name: .publish - name: generate_buildid_to_debug_symbols_mapping @@ -1046,7 +1051,8 @@ buildvariants: --ssl MONGO_DISTMOD=rhel80 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CFLAGS="-fno-gnu-unique" @@ -1080,8 +1086,8 @@ buildvariants: - name: .change_streams - name: .change_stream_fuzzer - name: .misc_js - - name: .concurrency !.large !.ubsan !.no_txns !.debug_only - - name: .concurrency .large !.ubsan !.no_txns !.debug_only + - name: .concurrency !.large !.ubsan !.no_txns + - name: .concurrency .large !.ubsan !.no_txns distros: - rhel80-medium - name: disk_wiredtiger @@ -1148,9 +1154,8 @@ buildvariants: - name: test_packages distros: - ubuntu2004-package - - #TODO: BF-24515 renable once fixed - #- name: selinux_rhel8_enterprise + - name: vector_search + - name: selinux_rhel8_enterprise - name: .publish - name: generate_buildid_to_debug_symbols_mapping @@ -1168,7 +1173,8 @@ buildvariants: --ssl MONGO_DISTMOD=rhel82 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_increased_memlock_limits,requires_latch_analyzer has_packages: true packager_script: packager.py packager_arch: aarch64 @@ -1194,7 +1200,6 @@ buildvariants: - name: free_monitoring - name: .jscore .common - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_gen - name: .replica_sets .common - name: .sharding .txns @@ -1229,7 +1234,8 @@ buildvariants: --ssl MONGO_DISTMOD=rhel82 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" @@ -1256,7 +1262,6 @@ buildvariants: - name: .jscore .common !.decimal !.sharding !.feature_flag_guarded - name: jsCore_txns_large_txns_format - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough - name: .replica_sets .multi_oplog @@ -1274,6 +1279,7 @@ buildvariants: - name: test_packages distros: - ubuntu1804-arm64-build + - name: vector_search - name: .publish - name: generate_buildid_to_debug_symbols_mapping @@ -1287,8 +1293,8 @@ buildvariants: push_bucket: downloads.mongodb.org push_name: linux push_arch: x86_64-rhel90 - compile_flags: --ssl MONGO_DISTMOD=rhel90 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + compile_flags: --ssl MONGO_DISTMOD=rhel90 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer multiversion_platform: rhel90 multiversion_edition: targeted has_packages: true @@ -1316,7 +1322,6 @@ buildvariants: - name: free_monitoring - name: .jscore .common - name: .jstestfuzz .common !.multiversion - - name: .logical_session_cache .one_sec - name: replica_sets_gen - name: .replica_sets .common - name: .sharding .txns @@ -1327,8 +1332,7 @@ buildvariants: - name: test_packages distros: - ubuntu2204-large - #TODO: BF-24515 renable once fixed - #- name: selinux_rhel9_org + - name: selinux_rhel9_org - name: .publish - name: generate_buildid_to_debug_symbols_mapping @@ -1345,8 +1349,8 @@ buildvariants: push_bucket: downloads.10gen.com push_name: linux push_arch: x86_64-enterprise-rhel90 - compile_flags: --ssl MONGO_DISTMOD=rhel90 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + compile_flags: --ssl MONGO_DISTMOD=rhel90 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer crypt_task_compile_flags: SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" multiversion_platform: rhel90 multiversion_edition: enterprise @@ -1380,7 +1384,6 @@ buildvariants: - name: jsCore_txns_large_txns_format - name: .jstestfuzz .common - name: libunwind_tests - - name: .logical_session_cache .one_sec - name: .ocsp - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough @@ -1398,9 +1401,8 @@ buildvariants: - name: test_packages distros: - ubuntu2204-large - - #TODO: BF-24515 renable once fixed - #- name: selinux_rhel9_enterprise + - name: vector_search + - name: selinux_rhel9_enterprise - name: .publish - name: generate_buildid_to_debug_symbols_mapping @@ -1419,7 +1421,8 @@ buildvariants: --ssl MONGO_DISTMOD=rhel90 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer has_packages: true packager_script: packager.py packager_arch: aarch64 @@ -1445,7 +1448,6 @@ buildvariants: - name: free_monitoring - name: .jscore .common - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_gen - name: .replica_sets .common - name: .sharding .txns @@ -1459,7 +1461,7 @@ buildvariants: - name: .publish - name: generate_buildid_to_debug_symbols_mapping -- name: enterprise-rhel90-arm64 +- name: enterprise-rhel-90-arm64 display_name: "Enterprise RHEL 9.0 arm64" cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. modules: @@ -1480,7 +1482,8 @@ buildvariants: --ssl MONGO_DISTMOD=rhel90 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" @@ -1492,7 +1495,7 @@ buildvariants: repo_edition: enterprise scons_cache_scope: shared large_distro_name: rhel90-arm64-large - compile_variant: enterprise-rhel90-arm64 + compile_variant: enterprise-rhel-90-arm64 tasks: - name: compile_test_and_package_serial_no_unittests_TG distros: @@ -1508,7 +1511,6 @@ buildvariants: - name: .jscore .common !.decimal !.sharding - name: jsCore_txns_large_txns_format - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough - name: .replica_sets .multi_oplog @@ -1527,6 +1529,7 @@ buildvariants: - name: test_packages distros: - ubuntu2204-arm64-small + - name: vector_search - name: .publish - name: generate_buildid_to_debug_symbols_mapping @@ -1549,7 +1552,8 @@ buildvariants: -j$(echo $(grep -c ^processor /proc/cpuinfo) / 2 | bc) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --linker=gold - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_increased_memlock_limits,requires_latch_analyzer multiversion_platform: suse12 multiversion_edition: targeted has_packages: true @@ -1580,7 +1584,6 @@ buildvariants: - name: .jscore .common !.decimal !.feature_flag_guarded - name: .jstestfuzz .common - name: multiversion_gen - - name: .logical_session_cache .one_sec - name: replica_sets_gen - name: .replica_sets .common - name: .sharding .txns @@ -1618,7 +1621,8 @@ buildvariants: -j$(echo $(grep -c ^processor /proc/cpuinfo) / 2 | bc) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --linker=gold - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" @@ -1641,7 +1645,6 @@ buildvariants: - name: .encrypt !.replica_sets !.aggregation !.sharding !.jscore - name: .jscore .common !.decimal !.sharding !.feature_flag_guarded - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough - name: sasl @@ -1677,7 +1680,8 @@ buildvariants: MONGO_DISTMOD=suse15 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" @@ -1698,7 +1702,6 @@ buildvariants: - name: .encrypt !.replica_sets !.aggregation !.sharding !.jscore - name: .jscore .common !.decimal !.sharding !.feature_flag_guarded - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough - name: sasl @@ -1727,7 +1730,8 @@ buildvariants: MONGO_DISTMOD=suse15 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_increased_memlock_limits,requires_latch_analyzer multiversion_platform: suse15 multiversion_edition: targeted has_packages: true @@ -1754,7 +1758,6 @@ buildvariants: - name: free_monitoring - name: .jscore .common !.decimal !.feature_flag_guarded - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: multiversion_gen - name: replica_sets_gen - name: .replica_sets .common @@ -1786,9 +1789,10 @@ buildvariants: MONGO_DISTMOD=ubuntu1804 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off multiversion_platform: ubuntu1804 multiversion_edition: targeted - test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source + test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source,requires_latch_analyzer has_packages: true packager_script: packager.py packager_arch: x86_64 @@ -1816,7 +1820,6 @@ buildvariants: - name: .jscore .common - name: .jstestfuzz .common - name: libunwind_tests - - name: .logical_session_cache .one_sec - name: multiversion_gen - name: .powercycle - name: replica_sets_gen @@ -1857,13 +1860,14 @@ buildvariants: MONGO_DISTMOD=ubuntu1804 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" multiversion_platform: ubuntu1804 multiversion_edition: enterprise - test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source + test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source,requires_latch_analyzer has_packages: true packager_script: packager_enterprise.py packager_arch: x86_64 @@ -1896,7 +1900,6 @@ buildvariants: - name: jsCore_auth - name: .jstestfuzz .common - name: libunwind_tests - - name: .logical_session_cache .one_sec - name: .multiversion_fuzzer - name: .multiversion_passthrough - name: .ocsp @@ -1933,11 +1936,12 @@ buildvariants: MONGO_DISTMOD=ubuntu1804 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" - test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source + test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source,requires_increased_memlock_limits,requires_latch_analyzer resmoke_jobs_max: 4 # Avoid starting too many mongod's on ARM test servers has_packages: true packager_script: packager_enterprise.py @@ -1962,7 +1966,6 @@ buildvariants: - name: fle - name: .jscore .common !.auth !.feature_flag_guarded - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_gen - name: .replica_sets .common - name: .sharding .txns @@ -1994,7 +1997,8 @@ buildvariants: MONGO_DISTMOD=ubuntu1804 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_external_data_source,requires_latch_analyzer resmoke_jobs_max: 8 # Avoid starting too many mongod's on ARM test servers has_packages: true packager_script: packager.py @@ -2032,7 +2036,8 @@ buildvariants: MONGO_DISTMOD=ubuntu2204 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_increased_memlock_limits,requires_latch_analyzer multiversion_platform: ubuntu2204 multiversion_edition: targeted has_packages: true @@ -2059,7 +2064,6 @@ buildvariants: - name: .jscore .common - name: .jstestfuzz .common - name: libunwind_tests - - name: .logical_session_cache .one_sec # - name: multiversion_gen - name: replica_sets_gen - name: replica_sets_jscore_passthrough @@ -2089,7 +2093,8 @@ buildvariants: MONGO_DISTMOD=ubuntu2004 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer multiversion_platform: ubuntu2004 multiversion_edition: targeted has_packages: true @@ -2116,7 +2121,6 @@ buildvariants: - name: .jscore .common - name: .jstestfuzz .common - name: libunwind_tests - - name: .logical_session_cache .one_sec # - name: multiversion_gen - name: replica_sets_gen - name: replica_sets_jscore_passthrough @@ -2153,7 +2157,8 @@ buildvariants: --ssl MONGO_DISTMOD=ubuntu2004 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" @@ -2185,7 +2190,6 @@ buildvariants: - name: jsCore_auth - name: .jstestfuzz .common - name: libunwind_tests - - name: .logical_session_cache .one_sec - name: .ocsp - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough @@ -2220,7 +2224,8 @@ buildvariants: MONGO_DISTMOD=ubuntu2204 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" @@ -2252,7 +2257,6 @@ buildvariants: - name: jsCore_auth - name: .jstestfuzz .common - name: libunwind_tests - - name: .logical_session_cache .one_sec - name: .ocsp - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough @@ -2286,7 +2290,8 @@ buildvariants: --ssl MONGO_DISTMOD=ubuntu2004 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer crypt_task_compile_flags: >- SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" @@ -2314,7 +2319,6 @@ buildvariants: - name: fle - name: .jscore .common !.auth !.feature_flag_guarded - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_gen - name: .replica_sets .common - name: .sharding .txns @@ -2345,7 +2349,8 @@ buildvariants: MONGO_DISTMOD=ubuntu2004 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer resmoke_jobs_max: 8 # Avoid starting too many mongod's on ARM test servers has_packages: true packager_script: packager.py @@ -2382,8 +2387,8 @@ buildvariants: push_bucket: downloads.10gen.com push_name: linux push_arch: aarch64-enterprise-ubuntu2204 - compile_flags: --ssl MONGO_DISTMOD=ubuntu2204 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + compile_flags: --ssl MONGO_DISTMOD=ubuntu2204 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer crypt_task_compile_flags: SHLINKFLAGS_EXTRA="-Wl,-Bsymbolic -Wl,--no-gnu-unique" CCFLAGS="-fno-gnu-unique" resmoke_jobs_max: 4 # Avoid starting too many mongod's on ARM test servers has_packages: true @@ -2409,7 +2414,6 @@ buildvariants: - name: fle - name: .jscore .common !.auth !.feature_flag_guarded - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: replica_sets_gen - name: .replica_sets .common - name: .sharding .txns @@ -2438,8 +2442,8 @@ buildvariants: push_bucket: downloads.mongodb.org push_name: linux push_arch: aarch64-ubuntu2204 - compile_flags: --ssl MONGO_DISTMOD=ubuntu2204 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars - test_flags: --excludeWithAnyTags=requires_external_data_source + compile_flags: --ssl MONGO_DISTMOD=ubuntu2204 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars --use-diagnostic-latches=off + test_flags: --excludeWithAnyTags=requires_external_data_source,requires_latch_analyzer resmoke_jobs_max: 8 # Avoid starting too many mongod's on ARM test servers has_packages: true packager_script: packager.py @@ -2474,9 +2478,7 @@ buildvariants: push_bucket: downloads.mongodb.org push_name: windows push_arch: x86_64 - multiversion_platform: windows_x86_64-2008plus-ssl - multiversion_platform_42_or_later: windows_x86_64-2012plus - multiversion_platform_44_or_later: windows + multiversion_platform: windows multiversion_edition: base content_type: application/zip compile_flags: >- @@ -2484,12 +2486,13 @@ buildvariants: MONGO_DISTMOD=windows -j$(bc <<< "$(grep -c '^processor' /proc/cpuinfo) / 1.5") --win-version-min=win10 + --use-diagnostic-latches=off num_scons_link_jobs_available: 0.25 python: '/cygdrive/c/python/python37/python.exe' ext: zip scons_cache_scope: shared large_distro_name: windows-vsCurrent-large - test_flags: &windows_common_test_excludes --excludeWithAnyTags=incompatible_with_windows_tls,requires_external_data_source + test_flags: &windows_common_test_excludes --excludeWithAnyTags=incompatible_with_windows_tls,requires_external_data_source,requires_latch_analyzer compile_variant: windows tasks: - name: compile_test_and_package_serial_no_unittests_TG @@ -2502,7 +2505,7 @@ buildvariants: - name: .misc_js # Some concurrency workloads require a lot of memory, so we use machines # with more RAM for these suites. - - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.common !.debug_only + - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.common distros: - windows-vsCurrent-large - name: .concurrency .common @@ -2549,6 +2552,7 @@ buildvariants: LIBPATH="c:/sasl/lib" -j$(bc <<< "$(grep -c '^processor' /proc/cpuinfo) / 1.5") --win-version-min=win10 + --use-diagnostic-latches=off num_scons_link_jobs_available: 0.25 python: '/cygdrive/c/python/python37/python.exe' ext: zip @@ -2577,7 +2581,7 @@ buildvariants: - name: .misc_js # Some concurrency workloads require a lot of memory, so we use machines # with more RAM for these suites. - - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.common !.debug_only + - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.common distros: - windows-vsCurrent-large - name: .concurrency .common @@ -2612,7 +2616,7 @@ buildvariants: run_on: - macos-1100 expansions: - test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_external_data_source + test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_external_data_source,requires_latch_analyzer push_path: osx push_bucket: downloads.mongodb.org push_name: macos @@ -2623,6 +2627,7 @@ buildvariants: -j$(sysctl -n hw.logicalcpu) --libc++ --variables-files=etc/scons/xcode_macosx.vars + --use-diagnostic-latches=off resmoke_jobs_max: 6 compile_variant: macos tasks: @@ -2633,7 +2638,8 @@ buildvariants: - name: .causally_consistent !.sharding - name: .change_streams - name: .misc_js - - name: .concurrency !.ubsan !.no_txns !.debug_only !.kill_terminate + # TODO(SERVER-78135): remove !.cursor_sweeps. + - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.cursor_sweeps - name: disk_wiredtiger - name: free_monitoring - name: initial_sync_fuzzer_gen @@ -2643,7 +2649,6 @@ buildvariants: - name: .jstestfuzz .interrupt - name: .jstestfuzz .common - name: .jstestfuzz .session - - name: .logical_session_cache .one_sec - name: .query_fuzzer - name: .read_write_concern !.linearize - name: replica_sets_gen @@ -2657,9 +2662,7 @@ buildvariants: - name: .sharding .txns - name: .ssl - name: .stitch - - name: unittest_shell_hang_analyzer_gen - name: push - - name: generate_buildid_to_debug_symbols_mapping - name: macos-arm64 display_name: macOS arm64 @@ -2667,7 +2670,7 @@ buildvariants: run_on: - macos-1100-arm64 expansions: - test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_external_data_source + test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_external_data_source,requires_latch_analyzer push_path: osx push_bucket: downloads.mongodb.org push_name: macos @@ -2678,6 +2681,7 @@ buildvariants: -j$(sysctl -n hw.logicalcpu) --libc++ --variables-files=etc/scons/xcode_macosx_arm.vars + --use-diagnostic-latches=off resmoke_jobs_max: 6 compile_variant: macos-arm64 tasks: @@ -2688,7 +2692,8 @@ buildvariants: - name: .causally_consistent !.sharding - name: .change_streams - name: .misc_js - - name: .concurrency !.ubsan !.no_txns !.debug_only !.kill_terminate + # TODO(SERVER-78135): remove !.cursor_sweeps. + - name: .concurrency !.ubsan !.no_txns !.kill_terminate !.cursor_sweeps - name: disk_wiredtiger - name: free_monitoring - name: initial_sync_fuzzer_gen @@ -2698,7 +2703,6 @@ buildvariants: - name: .jstestfuzz .interrupt - name: .jstestfuzz .common - name: .jstestfuzz .session - - name: .logical_session_cache .one_sec - name: .query_fuzzer - name: .read_write_concern !.linearize - name: replica_sets_gen @@ -2712,9 +2716,7 @@ buildvariants: - name: .sharding .txns - name: .ssl - name: .stitch - - name: unittest_shell_hang_analyzer_gen - name: push - - name: generate_buildid_to_debug_symbols_mapping - name: enterprise-macos display_name: Enterprise macOS @@ -2724,7 +2726,7 @@ buildvariants: run_on: - macos-1100 expansions: - test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_gcm,requires_external_data_source + test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_gcm,requires_external_data_source,requires_latch_analyzer additional_package_targets: >- archive-mongocryptd archive-mongocryptd-debug @@ -2740,6 +2742,7 @@ buildvariants: -j$(sysctl -n hw.logicalcpu) --libc++ --variables-files=etc/scons/xcode_macosx.vars + --use-diagnostic-latches=off resmoke_jobs_max: 6 compile_variant: enterprise-macos tasks: @@ -2748,11 +2751,9 @@ buildvariants: - name: audit - name: auth_audit_gen - name: causally_consistent_jscore_txns_passthrough - # TODO: SERVER-66945 Re-enable ESE on enterprise macos - # - name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore + - name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore - name: .jscore .common !.decimal !.sharding !.feature_flag_guarded - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: mqlrun - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough @@ -2760,7 +2761,6 @@ buildvariants: - name: push - name: .crypt - name: .publish_crypt - - name: generate_buildid_to_debug_symbols_mapping - name: enterprise-macos-arm64 display_name: Enterprise macOS arm64 @@ -2770,7 +2770,7 @@ buildvariants: run_on: - macos-1100-arm64 expansions: - test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_gcm,requires_external_data_source + test_flags: --excludeWithAnyTags=incompatible_with_macos,requires_gcm,requires_external_data_source,requires_latch_analyzer additional_package_targets: >- archive-mongocryptd archive-mongocryptd-debug @@ -2786,6 +2786,7 @@ buildvariants: -j$(sysctl -n hw.logicalcpu) --libc++ --variables-files=etc/scons/xcode_macosx_arm.vars + --use-diagnostic-latches=off resmoke_jobs_max: 6 compile_variant: enterprise-macos-arm64 tasks: @@ -2797,7 +2798,6 @@ buildvariants: - name: .encrypt !.replica_sets !.sharding !.aggregation !.jscore - name: .jscore .common !.decimal !.sharding !.feature_flag_guarded - name: .jstestfuzz .common - - name: .logical_session_cache .one_sec - name: mqlrun - name: replica_sets_auth_gen - name: replica_sets_jscore_passthrough @@ -2805,7 +2805,6 @@ buildvariants: - name: push - name: .crypt - name: .publish_crypt - - name: generate_buildid_to_debug_symbols_mapping - name: enterprise-rhel-82-arm64-grpc display_name: "Enterprise RHEL 8.2 arm64 GRPC" @@ -2816,12 +2815,14 @@ buildvariants: - rhel82-arm64-large stepback: false expansions: + test_flags: --excludeWithAnyTags=requires_latch_analyzer compile_flags: >- --ssl --dbg=on MONGO_DISTMOD=rhel80 -j$(grep -c ^processor /proc/cpuinfo) --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off --link-model=dynamic ENABLE_GRPC_BUILD=1 --use-libunwind=off @@ -2832,3 +2833,39 @@ buildvariants: - name: compile_test_and_package_parallel_unittest_stream_TG - name: compile_test_and_package_parallel_core_stream_TG - name: compile_test_and_package_parallel_dbtest_stream_TG + +- name: enterprise-amazon2-streams + display_name: "Amazon Linux 2 enterprise build with streams" + cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. + modules: + - enterprise + run_on: + - amazon2-test + expansions: + test_flags: >- + --excludeWithAnyTags=SERVER-34286,incompatible_with_amazon_linux,uses_pykmip,requires_external_data_source + push_path: linux + push_bucket: downloads.10gen.com + push_name: linux + push_arch: x86_64-enterprise-amazon2-streams + compile_flags: >- + --ssl + MONGO_DISTMOD=amazon2 + -j$(grep -c ^processor /proc/cpuinfo) + --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --streams-release-build + multiversion_platform: amazon2 + multiversion_edition: enterprise-streams + has_packages: true + packager_script: packager_enterprise.py + packager_arch: x86_64 + packager_distro: amazon2 + repo_edition: enterprise + scons_cache_scope: shared + compile_variant: enterprise-amazon2-streams + tasks: + - name: compile_test_and_package_serial_no_unittests_TG + distros: + - amazon2-build + - name: .publish + - name: generate_buildid_to_debug_symbols_mapping diff --git a/etc/evergreen_yml_components/variants/sanitizer.yml b/etc/evergreen_yml_components/variants/sanitizer.yml index 8358e7c175b6a..29c5eea251e35 100644 --- a/etc/evergreen_yml_components/variants/sanitizer.yml +++ b/etc/evergreen_yml_components/variants/sanitizer.yml @@ -1,6 +1,16 @@ # Build variant definitions for vanilla sanitizers that can be used across # release and dev environments. +variables: +# If you add anything to san_options, make sure the appropriate changes are +# also made to SConstruct. +# and also to the san_options in evergreen.yml and compile_static_analysis.yml +- aubsan_options: &aubsan_options + >- + UBSAN_OPTIONS="print_stacktrace=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer" + LSAN_OPTIONS="suppressions=etc/lsan.suppressions:report_objects=1" + ASAN_OPTIONS="detect_leaks=1:check_initialization_order=true:strict_init_order=true:abort_on_error=1:disable_coredump=0:handle_abort=1:strict_string_checks=true:detect_invalid_pointer_pairs=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer" + buildvariants: - name: rhel80-asan @@ -11,9 +21,7 @@ buildvariants: stepback: true expansions: lang_environment: LANG=C - san_options: >- - LSAN_OPTIONS="suppressions=etc/lsan.suppressions:report_objects=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer" - ASAN_OPTIONS="detect_leaks=1:check_initialization_order=true:strict_init_order=true:abort_on_error=1:disable_coredump=0:handle_abort=1:strict_string_checks=true:detect_invalid_pointer_pairs=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer" + san_options: *aubsan_options compile_flags: >- --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars --opt=on @@ -50,11 +58,7 @@ buildvariants: archive-mongocryptd archive-mongocryptd-debug lang_environment: LANG=C - # If you add anything to san_options, make sure the appropriate changes are - # also made to SConstruct. - san_options: >- - LSAN_OPTIONS="suppressions=etc/lsan.suppressions:report_objects=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer" - ASAN_OPTIONS="detect_leaks=1:check_initialization_order=true:strict_init_order=true:abort_on_error=1:disable_coredump=0:handle_abort=1:strict_string_checks=true:detect_invalid_pointer_pairs=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer" + san_options: *aubsan_options compile_flags: >- --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars --dbg=on @@ -65,7 +69,7 @@ buildvariants: --ocsp-stapling=off --enable-free-mon=on -j$(grep -c ^processor /proc/cpuinfo) - test_flags: --excludeWithAnyTags=requires_fast_memory,requires_ocsp_stapling + test_flags: --excludeWithAnyTags=requires_fast_memory,requires_ocsp_stapling,requires_increased_memlock_limits multiversion_platform: rhel80 multiversion_edition: enterprise resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under ASAN build. @@ -98,7 +102,6 @@ buildvariants: - name: jsCore_min_batch_repeat_queries_ese_gsm - name: jsCore_txns_large_txns_format - name: json_schema - - name: .logical_session_cache - name: .multi_shard .common - name: multiversion_gen - name: .multiversion_fuzzer @@ -141,9 +144,7 @@ buildvariants: archive-mongocryptd archive-mongocryptd-debug lang_environment: LANG=C - # If you add anything to san_options, make sure the appropriate changes are - # also made to SConstruct. - san_options: UBSAN_OPTIONS="print_stacktrace=1:external_symbolizer_path=/opt/mongodbtoolchain/v4/bin/llvm-symbolizer" + san_options: *aubsan_options compile_flags: >- --variables-files=etc/scons/mongodbtoolchain_stable_clang.vars --dbg=on @@ -153,7 +154,7 @@ buildvariants: --ocsp-stapling=off --enable-free-mon=on -j$(grep -c ^processor /proc/cpuinfo) - test_flags: --excludeWithAnyTags=requires_ocsp_stapling + test_flags: --excludeWithAnyTags=requires_ocsp_stapling,requires_increased_memlock_limits multiversion_platform: rhel80 multiversion_edition: enterprise resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under UBSAN build. @@ -166,6 +167,7 @@ buildvariants: - name: compile_integration_and_test_parallel_stream_TG distros: - rhel80-large + - name: run_pretty_printer_tests - name: .aggregation !.feature_flag_guarded - name: .auth - name: audit @@ -182,7 +184,6 @@ buildvariants: - name: jsCore_min_batch_repeat_queries_ese_gsm - name: jsCore_txns_large_txns_format - name: json_schema - - name: .logical_session_cache .one_sec - name: .multi_shard .common - name: multiversion_gen - name: .multiversion_fuzzer @@ -208,3 +209,46 @@ buildvariants: - name: server_discovery_and_monitoring_json_test_TG - name: server_selection_json_test_TG - name: generate_buildid_to_debug_symbols_mapping + +- &rhel80-debug-aubsan-lite_fuzzer-template + name: &rhel80-debug-aubsan-lite_fuzzer rhel80-debug-aubsan-lite_fuzzer + display_name: "{A,UB}SAN Enterprise RHEL 8.0 FUZZER" + cron: "0 4 * * *" # From the ${project_nightly_cron} parameter. + modules: + - enterprise + run_on: + - rhel80-build + stepback: false + expansions: + additional_package_targets: >- + archive-mongocryptd + archive-mongocryptd-debug + lang_environment: LANG=C + toolchain_version: stable + # If you add anything to san_options, make sure the appropriate changes are + # also made to SConstruct. + san_options: *aubsan_options + compile_flags: >- + LINKFLAGS=-nostdlib++ + LIBS=stdc++ + --variables-files=etc/scons/mongodbtoolchain_${toolchain_version}_clang.vars + --dbg=on + --opt=on + --allocator=system + --sanitize=undefined,address,fuzzer + --ssl + --ocsp-stapling=off + -j$(grep -c ^processor /proc/cpuinfo) + test_flags: --excludeWithAnyTags=requires_ocsp_stapling + resmoke_jobs_factor: 0.3 # Avoid starting too many mongod's under {A,UB}SAN build. + hang_analyzer_dump_core: false + scons_cache_scope: shared + separate_debug: off + compile_variant: *rhel80-debug-aubsan-lite_fuzzer + display_tasks: + - name: libfuzzertests! + execution_tasks: + - compile_and_archive_libfuzzertests + - fetch_and_run_libfuzzertests + tasks: + - name: compile_archive_and_run_libfuzzertests_TG diff --git a/etc/evergreen_yml_components/variants/task_generation.yml b/etc/evergreen_yml_components/variants/task_generation.yml index 169561518b387..ae952462c2a3f 100644 --- a/etc/evergreen_yml_components/variants/task_generation.yml +++ b/etc/evergreen_yml_components/variants/task_generation.yml @@ -11,5 +11,9 @@ buildvariants: - rhel80-medium tasks: - name: version_gen + distros: + - ubuntu2004-medium - name: version_burn_in_gen + distros: + - ubuntu2004-medium - name: version_expansions_gen diff --git a/etc/generate_subtasks_config.yml b/etc/generate_subtasks_config.yml index bf2471d40125d..b212c99a5730d 100644 --- a/etc/generate_subtasks_config.yml +++ b/etc/generate_subtasks_config.yml @@ -1,20 +1,19 @@ build_variant_large_distro_exceptions: - amazon - amazon2 - - amazon2022 - - amazon2022-arm64 + - amazon2023 + - amazon2023-arm64 - debian10 - debian11 - enterprise-amazon2 - enterprise-amazon2-arm64 - - enterprise-amazon2022 - - enterprise-amazon2022-arm64 + - enterprise-amazon2023 + - enterprise-amazon2023-arm64 - enterprise-debian10-64 - enterprise-debian11-64 - enterprise-linux-64-amazon-ami - enterprise-macos - enterprise-macos-rosetta-2 - - enterprise-macos-cxx20 - enterprise-macos-arm64 - enterprise-rhel-67-s390x - enterprise-rhel-70-64-bit diff --git a/etc/iwyu_mapping.imp b/etc/iwyu_mapping.imp new file mode 100644 index 0000000000000..75dcd55d6196d --- /dev/null +++ b/etc/iwyu_mapping.imp @@ -0,0 +1,21 @@ +[ + {include: ["\"mongo/platform/compiler_gcc.h\"", "private", "\"mongo/platform/compiler.h\"", "public"]}, + {include: ["\"float.h\"", "private", "", "public"]}, + {include: ["\"limits.h\"", "private", "", "public"]}, + {include: ["\"stdarg.h\"", "private", "", "public"]}, + + {include: ["", "private", "", "public"]}, + {include: ["\"boost/smart_ptr/detail/operator_bool.hpp\"", "private", "", "public"]}, + {include: ["", "private", "", "public"]}, + {include: ["\"boost/optional/detail/optional_relops.hpp\"", "private", "", "public"]}, + {include: ["", "private", "", "public"]}, + {include: ["\"boost/optional/detail/optional_reference_spec.hpp\"", "private", "", "public"]}, + {include: ["", "private", "", "public"]}, + {include: ["\"boost/tuple/detail/tuple_basic.hpp\"", "private", "", "public"]}, + {include: ["", "private", "", "public"]}, + {include: ["\"boost/program_options/detail/value_semantic.hpp\"", "private", "", "public"]}, + {include: ["", "private", "", "public"]}, + {include: ["\"boost/optional/detail/optional_swap.hpp\"", "private", "", "public"]}, + {include: ["", "private", "", "public"]}, + {include: ["\"boost/preprocessor/iteration/detail/iter/limits/forward1_256.hpp\"", "private", "", "public"]}, +] diff --git a/etc/lsan.suppressions b/etc/lsan.suppressions index 80d58180cae1e..2c88c8cce9c43 100644 --- a/etc/lsan.suppressions +++ b/etc/lsan.suppressions @@ -9,4 +9,9 @@ leak:mongo::Interruptible::installWaitListener # The singleton must live throughout the lifetime of all SSL threads leak::mongo::SSLThreadInfo::ThreadIDManager::idManager +# Thread names leak from threads that are never terminated. +leak:mongo::setThreadName +leak:mongo::getThreadName +leak:__cxa_thread_atexit_impl + leak:glob64 diff --git a/etc/pip/components/build_metrics.req b/etc/pip/components/build_metrics.req index 05066289e1858..288240c805b4b 100644 --- a/etc/pip/components/build_metrics.req +++ b/etc/pip/components/build_metrics.req @@ -2,4 +2,4 @@ psutil jsonschema memory_profiler puremagic -tabulate \ No newline at end of file +tabulate diff --git a/etc/pip/components/evergreen.req b/etc/pip/components/evergreen.req index 986f727df9d02..dbf22a50bd10b 100644 --- a/etc/pip/components/evergreen.req +++ b/etc/pip/components/evergreen.req @@ -3,4 +3,4 @@ dataclasses; python_version < "3.7" inject ~= 4.3.1 GitPython ~= 3.1.7 pydantic ~= 1.8.2 -structlog ~= 19.2.0 +structlog ~= 23.1.0 diff --git a/etc/pip/components/idl.req b/etc/pip/components/idl.req index 0b0ef8924b208..13ad323f0cdad 100644 --- a/etc/pip/components/idl.req +++ b/etc/pip/components/idl.req @@ -1,3 +1,2 @@ unittest-xml-reporting >= 2.2.0, <= 3.0.4 -typing <= 3.7.4.3 packaging <= 21.3 diff --git a/etc/pip/components/lint.req b/etc/pip/components/lint.req index 3437ce33961e8..d216c950f158d 100644 --- a/etc/pip/components/lint.req +++ b/etc/pip/components/lint.req @@ -1,13 +1,14 @@ # Linters # Note: These versions are checked by python modules in buildscripts/linter/ -GitPython ~= 3.1.7 -mypy ~= 0.942 -pydocstyle == 6.1.1 -pylint == 2.7.2 -structlog ~= 19.2.0 -typing <= 3.7.4.3 -yamllint == 1.15.0 -yapf == 0.26.0 -evergreen-lint == 0.1.3 -types-setuptools == 57.4.12 -types-requests == 2.26.3 +GitPython ~= 3.1.31 +mypy ~= 1.3.0 +pydocstyle == 6.3.0 +pylint == 2.7.2 # latest is 2.17.4, but that causes pip install requirements to fail +structlog ~= 23.1.0 +yamllint == 1.32.0 +yapf == 0.26.0 # latest is 0.40.1, but that causes CI failures +evergreen-lint == 0.1.4 +types-setuptools == 57.4.12 # latest is 68.0.0.0, but that causes pip install requirements to fail +types-requests == 2.31.0.1 +tqdm +colorama diff --git a/etc/pip/components/platform.req b/etc/pip/components/platform.req index 2d2f7893ad3dc..e14765ddf0afd 100644 --- a/etc/pip/components/platform.req +++ b/etc/pip/components/platform.req @@ -5,5 +5,5 @@ pywin32>=225; sys_platform == "win32" and python_version > "3" cryptography == 2.3; platform_machine == "s390x" or platform_machine == "ppc64le" # Needed for oauthlib to use RSAAlgorithm # Version locked - see SERVER-36618 cryptography == 36.0.2; platform_machine != "s390x" and platform_machine != "ppc64le" -mongo-ninja-python == 1.11.1.4; platform_machine == "x86_64" and sys_platform == "linux" -ninja >= 1.10.0; platform_machine != "x86_64" or sys_platform != "linux" +mongo-ninja-python == 1.11.1.5; (platform_machine == "x86_64" or platform_machine == "aarch64") and sys_platform == "linux" +ninja >= 1.10.0; (platform_machine != "x86_64" and platform_machine != "aarch64") or sys_platform != "linux" diff --git a/etc/pip/components/testing.req b/etc/pip/components/testing.req index 616235a4235ac..2841fd4b30f9e 100644 --- a/etc/pip/components/testing.req +++ b/etc/pip/components/testing.req @@ -25,3 +25,4 @@ mongomock == 4.1.2 pyjwt selenium geckodriver-autoinstaller +pipx==1.2.0 diff --git a/etc/pip/components/tooling_metrics.req b/etc/pip/components/tooling_metrics.req index 0583e72f5c7cb..09c4ba3f77e5d 100644 --- a/etc/pip/components/tooling_metrics.req +++ b/etc/pip/components/tooling_metrics.req @@ -1 +1 @@ -mongo-tooling-metrics == 1.0.7 +mongo-tooling-metrics == 1.0.8 diff --git a/etc/repo_config.yaml b/etc/repo_config.yaml index 53f6340b618a7..3579077c2eaed 100644 --- a/etc/repo_config.yaml +++ b/etc/repo_config.yaml @@ -128,7 +128,7 @@ repos: repos: - yum/amazon/2/mongodb-org - - name: amazon2022 + - name: amazon2023 type: rpm edition: org bucket: repo.mongodb.org @@ -376,7 +376,7 @@ repos: repos: - yum/amazon/2/mongodb-enterprise - - name: amazon2022 + - name: amazon2023 type: rpm edition: enterprise bucket: repo.mongodb.com diff --git a/etc/system_perf.yml b/etc/system_perf.yml index 8ca61ff128999..c4e29cfe972ed 100755 --- a/etc/system_perf.yml +++ b/etc/system_perf.yml @@ -45,10 +45,10 @@ variables: # _skip_compile_rhel70: &_compile_rhel70 # - name: schedule_global_auto_tasks # variant: task_generation -# _skip_compile_amazon_linux2_arm64: &_compile_amazon_linux2_arm64 +# _skip_compile_amazon_linux2_arm64: &_compile_amazon_linux2_arm64 # - name: schedule_global_auto_tasks # variant: task_generation -# _skip_compile_amazon_linux2_arm64_with_mongocrypt_shlib: &_compile_amazon_linux2_arm64_with_mongocrypt_shlib +# _skip_compile_amazon_linux2_arm64_with_mongocrypt_shlib: &_compile_amazon_linux2_arm64_with_mongocrypt_shlib # - name: schedule_global_auto_tasks # variant: task_generation # _skip_expansions: &_expansion_updates @@ -132,10 +132,11 @@ modules: repo: git@github.com:10gen/mongo-enterprise-modules.git prefix: src/mongo/db/modules branch: master +# Pinned to version 100.7.2 - name: mongo-tools repo: git@github.com:mongodb/mongo-tools.git prefix: mongo-tools/src/github.com/mongodb - branch: master + branch: db8c5c4 - name: PrivateWorkloads repo: git@github.com:10gen/PrivateWorkloads.git prefix: ${workdir}/src @@ -344,7 +345,7 @@ functions: set -o igncr fi; - # set_goenv provides set_goenv(), print_ldflags() and print_tags() used below + # set_goenv provides set_goenv() . ./set_goenv.sh GOROOT="" set_goenv || exit go version @@ -354,7 +355,7 @@ functions: build_tools="$build_tools mongoreplay" fi for i in $build_tools; do - go build -ldflags "$(print_ldflags)" ${args} -tags "$(print_tags ${tooltags})" -o "../../../../../mongodb/bin/$i${exe|}" $i/main/$i.go + go build -o "../../../../../mongodb/bin/$i${exe|}" $i/main/$i.go "../../../../../mongodb/bin/$i${exe|}" --version done - command: shell.exec @@ -391,6 +392,9 @@ functions: EOF fi tar czf mongodb${compile_variant|}.tar.gz mongodb + # Put all matching mongo debug from the build directory in an archive in the same location + # as the library archive (i.e. mongodb/bin). + tar czvf mongodb${compile_variant|}-debugsymbols.tar.gz $(find ./build/cached -name mongo\*.debug -type f) --xform 's:^.*/:mongodb/bin/:' - command: s3.put params: aws_key: ${aws_key} @@ -401,6 +405,16 @@ functions: permissions: public-read content_type: ${content_type|application/x-gzip} display_name: mongodb${compile_variant|}.tar.gz + - command: s3.put + params: + aws_key: ${aws_key} + aws_secret: ${aws_secret} + local_file: src/mongodb${compile_variant|}-debugsymbols.tar.gz + remote_file: ${project_dir}/${version_id}/${revision}/${platform}/mongodb${compile_variant|}-${version_id}-debugsymbols.tar.gz + bucket: mciuploads + permissions: public-read + content_type: ${content_type|application/x-gzip} + display_name: mongo-debugsymbols.tgz ### ### @@ -414,6 +428,9 @@ functions: set -o verbose source "${workdir}/compile_venv/bin/activate" python ./buildscripts/scons.py ${compile_flags|} ${scons_cache_args|} $extra_args SPLIT_DWARF=0 archive-mongo-crypt-dev MONGO_VERSION=${version} DESTDIR=$(pwd)/crypt-lib-${version} PKGDIR=$(pwd) ${patch_compile_flags|} + # Put all matching mongo .debug from the build directory in an archive in the same location + # as the library archive (i.e. lib). + tar czvf mongo-crypt-dev-debugsymbols.tar.gz $(find ./build/cached -name mongo\*.debug -type f) --xform 's:^.*/:lib/:' - command: s3.put params: aws_key: ${aws_key} @@ -424,6 +441,16 @@ functions: permissions: public-read content_type: ${content_type|application/x-gzip} display_name: mongo_crypt_shared_v1-${version|}-${compile_variant|}.tgz + - command: s3.put + params: + aws_key: ${aws_key} + aws_secret: ${aws_secret} + local_file: src/mongo-crypt-dev-debugsymbols.tar.gz + remote_file: ${project_dir}/${version_id}/${revision}/${platform}/mongo_crypt_shared_v1-${compile_variant|}-${version_id}-debugsymbols.tgz + bucket: mciuploads + permissions: public-read + content_type: ${content_type|application/x-gzip} + display_name: mongo_crypt_shared_v1-debugsymbols.tgz ### ## Schedule Tasks ## @@ -1120,7 +1147,7 @@ tasks: test_control: "initialsync-logkeeper" mongodb_setup: "initialsync-logkeeper-short" # Logkeeper dataset with FCV set to 6.0 - mongodb_dataset: "https://dsi-donot-remove.s3-us-west-2.amazonaws.com/InitialSyncLogKeeper/logkeeper-slice-data-mongodb-6.3.tgz" + mongodb_dataset: "https://dsi-donot-remove.s3-us-west-2.amazonaws.com/InitialSyncLogKeeper/logkeeper-slice-data-mongodb-7.0.tgz" - name: initialsync-logkeeper-short-fcbis priority: 5 @@ -1130,7 +1157,7 @@ tasks: test_control: "initialsync-logkeeper" mongodb_setup: "initialsync-logkeeper-short-fcbis" # Logkeeper dataset with FCV set to 6.0 - mongodb_dataset: "https://dsi-donot-remove.s3-us-west-2.amazonaws.com/InitialSyncLogKeeper/logkeeper-slice-data-mongodb-6.3.tgz" + mongodb_dataset: "https://dsi-donot-remove.s3-us-west-2.amazonaws.com/InitialSyncLogKeeper/logkeeper-slice-data-mongodb-7.0.tgz" - name: initialsync-logkeeper priority: 5 @@ -1162,7 +1189,7 @@ tasks: test_control: "initialsync-logkeeper-short-s3-update" mongodb_setup: "initialsync-logkeeper-short-s3-update" # Update this to Logkeeper dataset with FCV set to latest after each LTS release. - mongodb_dataset: "https://dsi-donot-remove.s3-us-west-2.amazonaws.com/InitialSyncLogKeeper/logkeeper-slice-data-mongodb-6.3.tgz" + mongodb_dataset: "https://dsi-donot-remove.s3-us-west-2.amazonaws.com/InitialSyncLogKeeper/logkeeper-slice-data-mongodb-7.0.tgz" - name: initialsync-logkeeper-snapshot-update priority: 5 @@ -1187,13 +1214,6 @@ tasks: test_control: "initialsync-large" mongodb_setup: "replica-2node-fcbis" - - name: change_streams_throughput - priority: 5 - commands: - - func: f_run_dsi_workload - vars: - test_control: "change_streams_throughput" - - name: change_streams_latency priority: 5 commands: @@ -1264,6 +1284,33 @@ tasks: threads: "1 4", read_cmd: 'true', share_dataset: 'true'} + - name: fast_running_queries + commands: + - func: f_run_dsi_workload + vars: + test_control: mongo-perf.2023-02 + # We can see tests start to peak throughput around 4 threads, but we go much higher to + # purposefully stress out the system. We can see that we can get pretty high concurrency + # levels and maintain throughput, so we want to test this still holds on experimental + # variants like those testing query shape stats. + test_control_params: | + {include_filter_1: fast_running_query, + include_filter_2: core regression, + exclude_filter: single_threaded, + threads: "128", + read_cmd: 'true'} + - name: fast_running_queries_large_dataset + commands: + - func: f_run_dsi_workload + vars: + test_control: mongo-perf.2023-02 + test_control_params: | + {include_filter_1: fast_running_query, + include_filter_2: query_large_dataset, + exclude_filter: none, + threads: "128", + read_cmd: 'true', + share_dataset: 'true'} - name: big_collection commands: - func: f_run_dsi_workload @@ -1554,6 +1601,7 @@ buildvariants: -j$(grep -c ^processor /proc/cpuinfo) --release --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off install-mongocryptd run_on: - "amazon2-xlarge" @@ -1586,6 +1634,7 @@ buildvariants: -j$(grep -c ^processor /proc/cpuinfo) --release --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off --allocator=system --enable-free-mon=off --enterprise-features=fle,search @@ -1602,8 +1651,7 @@ buildvariants: - name: linux-intel-standalone-classic-query-engine display_name: Linux Intel Standalone (Classic Query Engine) - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * 4" # 00:00 on Thursday modules: *modules expansions: mongodb_setup_release: 2022-11 @@ -1641,8 +1689,7 @@ buildvariants: - name: linux-intel-standalone-sbe display_name: Linux Intel Standalone (SBE) - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * 4" # 00:00 on Thursday modules: *modules expansions: mongodb_setup_release: 2022-11 @@ -1661,8 +1708,7 @@ buildvariants: - name: linux-intel-1-node-replSet-classic-query-engine display_name: Linux Intel 1-Node ReplSet (Classic Query Engine) - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: &linux-1-node-repl-cron "0 0 * * 4" # 00:00 on Thursday modules: *modules expansions: mongodb_setup_release: 2022-11 @@ -1686,8 +1732,7 @@ buildvariants: - name: linux-intel-1-node-replSet-sbe display_name: Linux Intel 1-Node ReplSet (SBE) - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * 4" # 00:00 on Thursday modules: *modules expansions: mongodb_setup_release: 2022-11 @@ -1747,6 +1792,7 @@ buildvariants: -j$(grep -c ^processor /proc/cpuinfo) --release --variables-files=etc/scons/mongodbtoolchain_stable_gcc.vars + --use-diagnostic-latches=off compile_variant: -rhel70 run_on: - rhel70-large @@ -1755,11 +1801,13 @@ buildvariants: - name: atlas-like-replica.2022-10 display_name: M60-like-replica.2022-10 3-Node ReplSet - cron: "0 0 * * 0,4" # 00:00 on Sunday,Thursday + # TODO SERVER-74399 Reduce frequency back to baseline. + # cron: &atlas-like-repl-cron "0 0 * * 0,4" # 00:00 on Sunday,Thursday + cron: &atlas-like-repl-cron "0 0 * * *" # Every day at 00:00 modules: *modules expansions: mongodb_setup: atlas-like-replica.2022-10 - infrastructure_provisioning: M60-like-replica.2022-10 + infrastructure_provisioning: M60-like-replica.2023-04 infrastructure_provisioning_release: 2022-11 workload_setup: 2022-11 platform: linux @@ -1780,6 +1828,67 @@ buildvariants: - name: linkbench - name: linkbench2 + - name: atlas-like-replica-query-stats.2022-10 + display_name: M60-like-replica.2022-10 3-Node ReplSet (Query Stats) + cron: *atlas-like-repl-cron + modules: *modules + expansions: + mongodb_setup: atlas-like-replica-query-stats.2022-10 + infrastructure_provisioning: M60-like-replica.2023-04 + infrastructure_provisioning_release: 2022-11 + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + authentication: enabled + storageEngine: wiredTiger + compile_variant: -arm64 + run_on: + - "rhel70-perf-M60-like" + depends_on: *_compile_amazon_linux2_arm64 + tasks: # Cannot use *3nodetasks because secondary_performance uses a special mongodb setup. + - name: schedule_patch_auto_tasks + - name: schedule_variant_auto_tasks + - name: industry_benchmarks + - name: ycsb_60GB + - name: tpcc + - name: tpcc_majority + - name: linkbench + - name: linkbench2 + + - name: atlas-M30-real + display_name: M30-Atlas ReplSet AWS + cron: "0 0 * * 0,4" # 00:00 on Sunday, Thursday + modules: *modules + expansions: + mongodb_setup: atlas + canaries: none + atlas_setup: M30-repl + use_custom_build: true + infrastructure_provisioning: workload_client_arm.2023-04 + infrastructure_provisioning_release: 2022-11 + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + storageEngine: wiredTiger + compile_variant: "-arm64" + run_on: + - "rhel70-perf-atlas-large" + depends_on: + - name: compile + variant: compile-amazon2 + - name: schedule_global_auto_tasks + variant: task_generation + - name: compile + variant: compile-amazon-linux2-arm64 + - name: schedule_global_auto_tasks + variant: task_generation + tasks: # Cannot use *3nodetasks because secondary_performance uses a special mongodb setup + - name: schedule_patch_auto_tasks + - name: schedule_variant_auto_tasks + - name: industry_benchmarks + - name: tpcc + - name: linkbench2 + - name: atlas-M60-real display_name: M60-Atlas ReplSet AWS cron: "0 0 * * 0,4" # 00:00 on Sunday, Thursday @@ -1789,14 +1898,13 @@ buildvariants: canaries: none atlas_setup: M60-repl use_custom_build: true - infrastructure_provisioning: workload_client + infrastructure_provisioning: workload_client_arm.2023-04 infrastructure_provisioning_release: 2022-11 workload_setup: 2022-11 platform: linux project_dir: *project_dir storageEngine: wiredTiger compile_variant: "-arm64" - client_compile_variant: "" # Explicitly set this. Otherwise it will use the server version run_on: - "rhel70-perf-atlas-large" depends_on: @@ -1828,7 +1936,7 @@ buildvariants: atlas_setup: M60-repl-azure use_custom_build_azure: true compile_variant: -rhel70 - infrastructure_provisioning: workload_client + infrastructure_provisioning: workload_client_intel.2023-04 infrastructure_provisioning_release: 2022-11 workload_setup: 2022-11 platform: linux @@ -1866,7 +1974,9 @@ buildvariants: - name: linux-standalone.2022-11 display_name: Linux Standalone 2022-11 - cron: "0 0 * * 2,4,6" # Tuesday, Thursday and Saturday at 00:00 + # TODO SERVER-74399 Reduce frequency back to baseline. + # cron: &linux-standalone-cron "0 0 * * 2,4,6" # Tuesday, Thursday and Saturday at 00:00 + cron: &linux-standalone-cron "0 0 * * *" # Everyday at 00:00 modules: *modules expansions: mongodb_setup_release: 2022-11 @@ -1958,8 +2068,7 @@ buildvariants: - name: linux-standalone-classic-query-engine.2022-11 display_name: Linux Standalone (Classic Query Engine) 2022-11 - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * 4" # 00:00 on Thursday modules: *modules expansions: mongodb_setup_release: 2022-11 @@ -1979,8 +2088,7 @@ buildvariants: - name: linux-standalone-sbe.2022-11 display_name: Linux Standalone (SBE) 2022-11 - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * 4" # 00:00 on Thursday modules: *modules expansions: mongodb_setup_release: 2022-11 @@ -1998,14 +2106,13 @@ buildvariants: depends_on: *_compile_amazon_linux2_arm64 tasks: *classic_engine_tasks - - name: linux-1-node-replSet-classic-query-engine.2022-11 - display_name: Linux 1-Node ReplSet (Classic Query Engine) 2022-11 - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). + - name: linux-standalone-sampling-bonsai.2022-11 + display_name: Linux Standalone (Bonsai with Sampling CE) 2022-11 cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. modules: *modules expansions: mongodb_setup_release: 2022-11 - mongodb_setup: single-replica-classic-query-engine + mongodb_setup: standalone-sampling-bonsai infrastructure_provisioning_release: 2022-11 infrastructure_provisioning: single workload_setup: 2022-11 @@ -2017,16 +2124,34 @@ buildvariants: run_on: - "rhel70-perf-single" depends_on: *_compile_amazon_linux2_arm64 - tasks: *classic_engine_1nodereplset_tasks + tasks: *classic_engine_tasks - - name: linux-1-node-replSet-sbe.2022-11 - display_name: Linux 1-Node ReplSet (SBE) 2022-11 - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). + - name: linux-intel-standalone-sampling-bonsai + display_name: Linux Intel Standalone (Bonsai with Sampling CE) cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. modules: *modules expansions: mongodb_setup_release: 2022-11 - mongodb_setup: single-replica-sbe + mongodb_setup: standalone-sampling-bonsai + infrastructure_provisioning_release: 2022-11 + infrastructure_provisioning: single-intel + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + authentication: enabled + storageEngine: wiredTiger + run_on: + - "rhel70-perf-single" + depends_on: *_compile_amazon2 + tasks: *classic_engine_tasks + + - name: linux-standalone-heuristic-bonsai.2022-11 + display_name: Linux Standalone (Bonsai with Heuristic CE) 2022-11 + cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + modules: *modules + expansions: + mongodb_setup_release: 2022-11 + mongodb_setup: standalone-heuristic-bonsai infrastructure_provisioning_release: 2022-11 infrastructure_provisioning: single workload_setup: 2022-11 @@ -2038,17 +2163,53 @@ buildvariants: run_on: - "rhel70-perf-single" depends_on: *_compile_amazon_linux2_arm64 + tasks: *classic_engine_tasks + + - name: linux-intel-standalone-heuristic-bonsai + display_name: Linux Intel Standalone (Bonsai with Heuristic CE) + cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + modules: *modules + expansions: + mongodb_setup_release: 2022-11 + mongodb_setup: standalone-heuristic-bonsai + infrastructure_provisioning_release: 2022-11 + infrastructure_provisioning: single-intel + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + authentication: enabled + storageEngine: wiredTiger + run_on: + - "rhel70-perf-single" + depends_on: *_compile_amazon2 + tasks: *classic_engine_tasks + + - name: linux-intel-1-node-replSet-sampling-bonsai + display_name: Linux Intel 1-Node ReplSet (Bonsai with Sampling CE) + cron: *linux-1-node-repl-cron + modules: *modules + expansions: + mongodb_setup_release: 2022-11 + mongodb_setup: single-replica-sampling-bonsai + infrastructure_provisioning_release: 2022-11 + infrastructure_provisioning: single-intel + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + authentication: enabled + storageEngine: wiredTiger + run_on: + - "rhel70-perf-single" + depends_on: *_compile_amazon2 tasks: *classic_engine_1nodereplset_tasks - - name: linux-standalone-telemetry - display_name: Linux Standalone 2022-11 (Telemetry) - # Run it twice a day. - # Will make it less frequent when perf is finished. - cron: "0 3,15 * * 0,1,2,3,4,5,6" + - name: linux-1-node-replSet-sampling-bonsai.2022-11 + display_name: Linux 1-Node ReplSet (Bonsai with Sampling CE) 2022-11 + cron: *linux-1-node-repl-cron modules: *modules expansions: mongodb_setup_release: 2022-11 - mongodb_setup: standalone-telemetry + mongodb_setup: single-replica-sampling-bonsai infrastructure_provisioning_release: 2022-11 infrastructure_provisioning: single workload_setup: 2022-11 @@ -2060,23 +2221,34 @@ buildvariants: run_on: - "rhel70-perf-single" depends_on: *_compile_amazon_linux2_arm64 - tasks: - - name: schedule_patch_auto_tasks - - name: schedule_variant_auto_tasks - - name: ycsb_60GB - - name: ycsb_60GB.long - - name: crud_workloads_majority - - name: tpcc + tasks: *classic_engine_1nodereplset_tasks + + - name: linux-intel-1-node-replSet-heuristic-bonsai + display_name: Linux Intel 1-Node ReplSet (Bonsai with Heuristic CE) + cron: *linux-1-node-repl-cron + modules: *modules + expansions: + mongodb_setup_release: 2022-11 + mongodb_setup: single-replica-heuristic-bonsai + infrastructure_provisioning_release: 2022-11 + infrastructure_provisioning: single-intel + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + authentication: enabled + storageEngine: wiredTiger + run_on: + - "rhel70-perf-single" + depends_on: *_compile_amazon2 + tasks: *classic_engine_1nodereplset_tasks - - name: linux-1-node-replSet-telemetry - display_name: Linux 1-Node ReplSet 2022-11 (Telemetry) - # Run it twice a day. - # Will make it less frequent when perf is finished. - cron: "0 3,15 * * 0,1,2,3,4,5,6" + - name: linux-1-node-replSet-heuristic-bonsai.2022-11 + display_name: Linux 1-Node ReplSet (Bonsai with Heuristic CE) 2022-11 + cron: *linux-1-node-repl-cron modules: *modules expansions: mongodb_setup_release: 2022-11 - mongodb_setup: single-replica-telemetry + mongodb_setup: single-replica-heuristic-bonsai infrastructure_provisioning_release: 2022-11 infrastructure_provisioning: single workload_setup: 2022-11 @@ -2088,26 +2260,17 @@ buildvariants: run_on: - "rhel70-perf-single" depends_on: *_compile_amazon_linux2_arm64 - tasks: - - name: schedule_patch_auto_tasks - - name: schedule_variant_auto_tasks - - name: ycsb_60GB - - name: ycsb_60GB.long - - name: crud_workloads_majority - - name: mixed_workloads - - name: tpcc + tasks: *classic_engine_1nodereplset_tasks - - name: linux-3-node-replSet-telemetry - display_name: Linux 3-Node ReplSet 2022-11 (Telemetry) - # Run it twice a day. - # Will make it less frequent when perf is finished. - cron: "0 3,15 * * 0,1,2,3,4,5,6" + - name: linux-1-node-replSet-classic-query-engine.2022-11 + display_name: Linux 1-Node ReplSet (Classic Query Engine) 2022-11 + cron: "0 0 * * 4" # 00:00 on Thursday modules: *modules expansions: mongodb_setup_release: 2022-11 - mongodb_setup: replica-telemetry + mongodb_setup: single-replica-classic-query-engine infrastructure_provisioning_release: 2022-11 - infrastructure_provisioning: replica + infrastructure_provisioning: single workload_setup: 2022-11 platform: linux project_dir: *project_dir @@ -2115,29 +2278,68 @@ buildvariants: storageEngine: wiredTiger compile_variant: "-arm64" run_on: - - "rhel70-perf-replset" + - "rhel70-perf-single" + depends_on: *_compile_amazon_linux2_arm64 + tasks: *classic_engine_1nodereplset_tasks + + - name: linux-1-node-replSet-sbe.2022-11 + display_name: Linux 1-Node ReplSet (SBE) 2022-11 + cron: "0 0 * * 4" # 00:00 on Thursday + modules: *modules + expansions: + mongodb_setup_release: 2022-11 + mongodb_setup: single-replica-sbe + infrastructure_provisioning_release: 2022-11 + infrastructure_provisioning: single + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + authentication: enabled + storageEngine: wiredTiger + compile_variant: "-arm64" + run_on: + - "rhel70-perf-single" + depends_on: *_compile_amazon_linux2_arm64 + tasks: *classic_engine_1nodereplset_tasks + + - name: linux-standalone-query-stats + display_name: Linux Standalone 2022-11 (QueryStats) + # Match the baseline non-query-stats cron + cron: *linux-standalone-cron + modules: *modules + expansions: + mongodb_setup_release: 2022-11 + mongodb_setup: standalone-query-stats + infrastructure_provisioning_release: 2022-11 + infrastructure_provisioning: single + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + authentication: enabled + storageEngine: wiredTiger + compile_variant: "-arm64" + run_on: + - "rhel70-perf-single" depends_on: *_compile_amazon_linux2_arm64 tasks: - name: schedule_patch_auto_tasks - name: schedule_variant_auto_tasks + - name: industry_benchmarks - name: ycsb_60GB - name: ycsb_60GB.long - name: crud_workloads_majority - - name: crud_workloads_w1 - - name: mixed_workloads - name: tpcc - - name: linux-3-shard-telemetry - display_name: Linux 3-Shard Cluster 2022-11 (Telemetry) - # Run it twice a day. - # Will make it less frequent when perf is finished. - cron: "0 3,15 * * 0,1,2,3,4,5,6" + - name: linux-standalone-limited-query-stats + display_name: Linux Standalone 2022-11 (Rate Limited QueryStats) + # Match the baseline non-query-stats cron + cron: *linux-standalone-cron modules: *modules expansions: mongodb_setup_release: 2022-11 - mongodb_setup: shard-telemetry + mongodb_setup: standalone-query-stats-small-rate-limit infrastructure_provisioning_release: 2022-11 - infrastructure_provisioning: shard + infrastructure_provisioning: single workload_setup: 2022-11 platform: linux project_dir: *project_dir @@ -2145,14 +2347,11 @@ buildvariants: storageEngine: wiredTiger compile_variant: "-arm64" run_on: - - "rhel70-perf-shard" + - "rhel70-perf-single" depends_on: *_compile_amazon_linux2_arm64 tasks: - name: schedule_patch_auto_tasks - name: schedule_variant_auto_tasks - - name: crud_workloads_majority - - name: crud_workloads_w1 - - name: mixed_workloads - name: linux-1-node-replSet-all-feature-flags.2022-11 display_name: Linux 1-Node ReplSet (all feature flags) 2022-11 @@ -2180,7 +2379,7 @@ buildvariants: - name: linux-1-node-replSet.2022-11 display_name: Linux 1-Node ReplSet 2022-11 - cron: "0 0 * * 4" # 00:00 on Thursday + cron: *linux-1-node-repl-cron modules: *modules expansions: mongodb_setup_release: 2022-11 @@ -2213,7 +2412,6 @@ buildvariants: - name: bestbuy_agg_merge_same_db - name: bestbuy_agg_merge_wordcount - name: bestbuy_query - - name: change_streams_throughput - name: change_streams_latency - name: change_streams_listen_throughput - name: snapshot_reads @@ -2295,6 +2493,36 @@ buildvariants: depends_on: *_compile_amazon_linux2_arm64 tasks: *audit-tasks + - name: linux-1-node-replSet-query-stats + display_name: Linux 1-Node ReplSet 2022-11 (QueryStats) + cron: *linux-1-node-repl-cron + modules: *modules + expansions: + mongodb_setup_release: 2022-11 + mongodb_setup: single-replica-query-stats + infrastructure_provisioning_release: 2022-11 + infrastructure_provisioning: single + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + authentication: enabled + storageEngine: wiredTiger + compile_variant: "-arm64" + run_on: + - "rhel70-perf-single" + depends_on: *_compile_amazon_linux2_arm64 + tasks: + - name: schedule_patch_auto_tasks + - name: schedule_variant_auto_tasks + - name: industry_benchmarks + - name: ycsb_60GB + - name: ycsb_60GB.long + - name: crud_workloads_majority + - name: mixed_workloads + - name: tpcc + - name: linkbench + - name: linkbench2 + - name: linux-shard-lite-fle.2022-11 display_name: Linux Shard Lite FLE 2022-11 cron: "0 0 * * 0,4" # 00:00 on Sunday,Thursday @@ -2471,7 +2699,7 @@ buildvariants: - name: linux-3-shard.2022-11 display_name: Linux 3-Shard Cluster 2022-11 - cron: "0 0 * * 4" # 00:00 on Thursday + cron: &linux-3-shard-cron "0 0 * * 4" # 00:00 on Thursday modules: *modules expansions: mongodb_setup_release: 2022-11 @@ -2487,7 +2715,7 @@ buildvariants: run_on: - "rhel70-perf-shard" depends_on: *_compile_amazon_linux2_arm64 - tasks: + tasks: &linux_3_shard_tasks - name: schedule_patch_auto_tasks - name: schedule_variant_auto_tasks - name: industry_benchmarks @@ -2500,7 +2728,6 @@ buildvariants: - name: smoke_test - name: mongos_workloads - name: mongos_large_catalog_workloads - - name: change_streams_throughput - name: change_streams_latency - name: change_streams_listen_throughput - name: change_streams_multi_mongos @@ -2509,6 +2736,52 @@ buildvariants: - name: tsbs_query_sharded_balancer - name: tsbs_query_finance_sharded_balancer + - name: linux-3-shard-query-stats + display_name: Linux 3-Shard Cluster 2022-11 (QueryStats) + cron: *linux-3-shard-cron + modules: *modules + expansions: + mongodb_setup_release: 2022-11 + mongodb_setup: shard-query-stats + infrastructure_provisioning_release: 2022-11 + infrastructure_provisioning: shard + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + authentication: enabled + storageEngine: wiredTiger + compile_variant: "-arm64" + run_on: + - "rhel70-perf-shard" + depends_on: *_compile_amazon_linux2_arm64 + tasks: + - name: schedule_patch_auto_tasks + - name: schedule_variant_auto_tasks + - name: industry_benchmarks + - name: crud_workloads_majority + - name: crud_workloads_w1 + - name: mixed_workloads + + - name: linux-3-shard-heuristic-bonsai.2022-11 + display_name: Linux 3-Shard Cluster 2022-11 (Bonsai with Heuristic CE) + cron: *linux-3-shard-cron + modules: *modules + expansions: + mongodb_setup_release: 2022-11 + mongodb_setup: shard-heuristic-bonsai + infrastructure_provisioning_release: 2022-11 + infrastructure_provisioning: shard + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + authentication: enabled + storageEngine: wiredTiger + compile_variant: "-arm64" + run_on: + - "rhel70-perf-shard" + depends_on: *_compile_amazon_linux2_arm64 + tasks: *linux_3_shard_tasks + - name: linux-shard-lite-audit.2022-11 display_name: Linux Shard Lite Cluster Audit 2022-11 cron: "0 0 * * 0,4" # 00:00 on Sunday,Thursday @@ -2559,7 +2832,6 @@ buildvariants: - name: bestbuy_query - name: change_streams_latency - name: change_streams_preimage_latency - - name: change_streams_throughput - name: change_streams_preimage_throughput - name: change_streams_listen_throughput - name: industry_benchmarks @@ -2679,7 +2951,7 @@ buildvariants: - name: linux-3-node-replSet.2022-11 display_name: Linux 3-Node ReplSet 2022-11 - cron: "0 0 * * 1,2,3,4,5,6" # Everyday except Sunday at 00:00 + cron: &linux-3-node-cron "0 0 * * 1,2,3,4,5,6" # Everyday except Sunday at 00:00 modules: *modules expansions: mongodb_setup_release: 2022-11 @@ -2717,7 +2989,6 @@ buildvariants: - name: bestbuy_agg_merge_same_db - name: bestbuy_agg_merge_wordcount - name: bestbuy_query - - name: change_streams_throughput - name: change_streams_preimage_throughput - name: change_streams_latency - name: change_streams_preimage_latency @@ -2807,7 +3078,6 @@ buildvariants: - name: bestbuy_agg_merge_same_db - name: bestbuy_agg_merge_wordcount - name: bestbuy_query - - name: change_streams_throughput - name: change_streams_latency - name: change_streams_listen_throughput - name: snapshot_reads @@ -2866,7 +3136,6 @@ buildvariants: - name: bestbuy_agg_merge_same_db - name: bestbuy_agg_merge_wordcount - name: bestbuy_query - - name: change_streams_throughput - name: change_streams_latency - name: change_streams_listen_throughput - name: snapshot_reads @@ -2925,7 +3194,6 @@ buildvariants: - name: bestbuy_agg_merge_same_db - name: bestbuy_agg_merge_wordcount - name: bestbuy_query - - name: change_streams_throughput - name: change_streams_latency - name: change_streams_listen_throughput - name: change_streams_preimage_throughput @@ -3027,6 +3295,28 @@ buildvariants: - name: initialsync-large - name: initialsync-large-fcbis + - name: linux-3-node-replSet-cpu-cycle-metrics.2023-06 + display_name: Linux 3-Node ReplSet CPU Cycle Metrics 2023-06 + cron: "0 0 * * 4" # 00:00 on Thursday + modules: *modules + expansions: + mongodb_setup_release: 2022-11 + mongodb_setup: replica-ipc-counters.2023-06 + infrastructure_provisioning_release: 2022-11 + infrastructure_provisioning: replica + workload_setup: 2022-11 + platform: linux + authentication: disabled + storageEngine: wiredTiger + compile_variant: "-arm64" + project_dir: *project_dir + depends_on: *_compile_amazon_linux2_arm64 + run_on: + - "rhel70-perf-replset" + tasks: + - name: schedule_patch_auto_tasks + - name: schedule_variant_auto_tasks + - name: linux-replSet-initialsync-logkeeper.2022-11 display_name: Linux ReplSet Initial Sync LogKeeper 2022-11 cron: "0 0 * * 4" # 00:00 on Thursday @@ -3038,7 +3328,7 @@ buildvariants: infrastructure_provisioning: initialsync-logkeeper workload_setup: 2022-11 # EBS logkeeper snapshot with FCV set to 6.0 - snapshotId: snap-0e28e73fe0f1c503a + snapshotId: snap-0eca13ca4935455a2 platform: linux authentication: disabled storageEngine: wiredTiger @@ -3065,7 +3355,7 @@ buildvariants: # infrastructure_provisioning_release: 2022-11 # infrastructure_provisioning: initialsync-logkeeper-snapshot-update # # Update this to latest snapshot after each LTS release. - # snapshotId: snap-0e28e73fe0f1c503a + # snapshotId: snap-0eca13ca4935455a2 # platform: linux # authentication: disabled # storageEngine: wiredTiger @@ -3124,10 +3414,95 @@ buildvariants: - name: schedule_patch_auto_tasks - name: schedule_variant_auto_tasks + - name: linux-3-node-replSet-query-stats + display_name: Linux 3-Node ReplSet 2022-11 (QueryStats) + cron: *linux-3-node-cron + modules: *modules + expansions: + mongodb_setup_release: 2022-11 + mongodb_setup: replica-query-stats + infrastructure_provisioning_release: 2022-11 + infrastructure_provisioning: replica + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + authentication: enabled + storageEngine: wiredTiger + compile_variant: "-arm64" + run_on: + - "rhel70-perf-replset" + depends_on: *_compile_amazon_linux2_arm64 + tasks: + - name: schedule_patch_auto_tasks + - name: schedule_variant_auto_tasks + - name: industry_benchmarks + - name: ycsb_60GB + - name: ycsb_60GB.long + - name: crud_workloads_majority + - name: crud_workloads_w1 + - name: mixed_workloads + - name: tpcc + - name: linkbench + - name: linkbench2 + + - name: linux-3-node-replSet-disable-execution-control + display_name: Linux 3-Node ReplSet 2022-11 (Execution Control Off) + cron: *linux-3-node-cron + modules: *modules + expansions: + mongodb_setup_release: 2022-11 + mongodb_setup: replica-disable-execution-control + infrastructure_provisioning_release: 2022-11 + infrastructure_provisioning: replica + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + authentication: enabled + storageEngine: wiredTiger + compile_variant: "-arm64" + run_on: + - "rhel70-perf-replset" + depends_on: *_compile_amazon_linux2_arm64 + tasks: + - name: schedule_patch_auto_tasks + - name: schedule_variant_auto_tasks + - name: bestbuy_agg_merge_wordcount + - name: industry_benchmarks + - name: linkbench2 + - name: misc_workloads + - name: mixed_workloads + - name: snapshot_reads + - name: tpcc + - name: ycsb_60GB + + - name: linux-3-node-replSet-250mbwtcache.2023-05 + display_name: Linux 3-Node ReplSet 250 MB WiredTiger Cache 2023-05 + cron: *linux-3-node-cron + modules: *modules + expansions: + mongodb_setup_release: 2022-11 + mongodb_setup: replica-250mbwtcache.2023-05 + infrastructure_provisioning_release: 2022-11 + infrastructure_provisioning: replica + workload_setup: 2022-11 + platform: linux + project_dir: *project_dir + authentication: enabled + storageEngine: wiredTiger + compile_variant: "-arm64" + run_on: + - "rhel70-perf-replset" + depends_on: *_compile_amazon_linux2_arm64 + tasks: + - name: schedule_patch_auto_tasks + - name: schedule_variant_auto_tasks + - &linux-microbenchmarks-standalone-arm name: linux-microbenchmarks-standalone-arm.2023-01 display_name: MicroBenchmarks Arm Standalone inMemory.2023-01 - cron: "0 */4 * * *" # Every 4 hours starting at midnight + # TODO SERVER-74399 Reduce frequency back to baseline. + # cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: &linux-microbench-cron "0 0 * * *" # Everyday at 00:00 modules: *modules expansions: &standalone-arm-expansions mongodb_setup_release: 2022-11 @@ -3142,7 +3517,7 @@ buildvariants: project_dir: *project_dir compile_variant: "-arm64" run_on: - - "rhel70-perf-single" + - "rhel70-perf-microbenchmarks" depends_on: *_compile_amazon_linux2_arm64 tasks: - name: big_collection @@ -3173,10 +3548,56 @@ buildvariants: - name: compound_wildcard_index_write_commands - name: compound_wildcard_index_read_commands +# Specialized Temporary Variants to understand the impact of query stats better. + - <<: *linux-microbenchmarks-standalone-arm + name: fast-query-microbenchmarks-baseline.2023-06 + display_name: Fast Query Microbenchmarks (Baseline) + # TODO SERVER-78295 Fold these tasks in to the regular variants on the normal schedule. + cron: &fast-query-cron 0 */4 * * * # Every 4 hours, temporarily. + tasks: + - name: fast_running_queries + - name: fast_running_queries_large_dataset + - <<: *linux-microbenchmarks-standalone-arm + name: fast-query-microbenchmarks-query-stats.2023-06 + display_name: Fast Query Microbenchmarks (QueryStats) + cron: *fast-query-cron + tasks: + - name: fast_running_queries + - name: fast_running_queries_large_dataset + expansions: + <<: *standalone-arm-expansions + mongodb_setup: mongo-perf-standalone-query-stats + - <<: *linux-microbenchmarks-standalone-arm + name: fast-query-microbenchmarks-limited-query-stats.2023-06 + display_name: Fast Query Microbenchmarks (Limited QueryStats) + cron: *fast-query-cron + tasks: + - name: fast_running_queries + - name: fast_running_queries_large_dataset + expansions: + <<: *standalone-arm-expansions + mongodb_setup: mongo-perf-standalone-query-stats-small-rate-limit + +# Variant: Microbenchmarks with QueryStats + - <<: *linux-microbenchmarks-standalone-arm + name: linux-microbenchmarks-standalone-arm-query-stats.2023-01 + display_name: MicroBenchmarks Arm Standalone inMemory.2023-01 (QueryStats) + expansions: + <<: *standalone-arm-expansions + mongodb_setup: mongo-perf-standalone-query-stats + +# Variant: Microbenchmarks with rate limited QueryStats + - <<: *linux-microbenchmarks-standalone-arm + name: linux-microbenchmarks-standalone-arm-limited-query-stats.2023-01 + display_name: MicroBenchmarks (Rate Limitied QueryStats) + expansions: + <<: *standalone-arm-expansions + mongodb_setup: mongo-perf-standalone-query-stats-small-rate-limit + - &linux-microbenchmarks-repl-arm name: linux-microbenchmarks-repl-arm.2023-01 display_name: MicroBenchmarks Arm 1-Node ReplSet inMemory.2023-01 - cron: "0 */4 * * *" # Every 4 hours starting at midnight + cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. modules: *modules expansions: &repl-arm-expansions mongodb_setup_release: 2022-11 @@ -3191,7 +3612,7 @@ buildvariants: project_dir: *project_dir compile_variant: "-arm64" run_on: - - "rhel70-perf-single" + - "rhel70-perf-microbenchmarks" depends_on: *_compile_amazon_linux2_arm64 tasks: - name: genny_scale_InsertRemove @@ -3205,7 +3626,7 @@ buildvariants: - <<: *linux-microbenchmarks-standalone-arm name: linux-microbenchmarks-standalone-all-feature-flags-arm.2023-01 display_name: MicroBenchmarks Arm Standalone inMemory (all feature flags).2023-01 - cron: "0 */4 * * *" # Every 4 hours starting at midnight + cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. expansions: <<: *standalone-arm-expansions mongodb_setup: mongo-perf-standalone-all-feature-flags.2023-02 @@ -3242,8 +3663,7 @@ buildvariants: - <<: *linux-microbenchmarks-standalone-arm name: linux-microbenchmarks-standalone-classic-query-engine-arm.2023-01 display_name: MicroBenchmarks Arm Standalone inMemory (Classic Query Engine).2023-01 - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * 4" # 00:00 on Thursday expansions: <<: *standalone-arm-expansions mongodb_setup: mongo-perf-standalone-classic-query-engine.2023-02 @@ -3254,8 +3674,7 @@ buildvariants: - <<: *linux-microbenchmarks-standalone-arm name: linux-microbenchmarks-standalone-sbe-arm.2023-01 display_name: MicroBenchmarks Arm Standalone inMemory (SBE).2023-01 - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * 4" # 00:00 on Thursday expansions: <<: *standalone-arm-expansions mongodb_setup: mongo-perf-standalone-sbe.2023-02 @@ -3263,10 +3682,32 @@ buildvariants: # Add tasks to the anchor that this variant references # If diverging from that list, add the entire list of desired tasks here + - <<: *linux-microbenchmarks-standalone-arm + name: linux-microbenchmarks-standalone-sampling-bonsai-arm.2023-01 + display_name: MicroBenchmarks Arm Standalone inMemory (Bonsai with Sampling CE).2023-01 + cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + expansions: + <<: *standalone-arm-expansions + mongodb_setup: mongo-perf-standalone-sampling-bonsai.2023-02 + # yaml does not nicely merge arrays, so DO NOT ADD INDIVIDUAL TASKS HERE. + # Add tasks to the anchor that this variant references + # If diverging from that list, add the entire list of desired tasks here + + - <<: *linux-microbenchmarks-standalone-arm + name: linux-microbenchmarks-standalone-heuristic-bonsai-arm.2023-01 + display_name: MicroBenchmarks Arm Standalone inMemory (Bonsai with Heuristic CE).2023-01 + cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + expansions: + <<: *standalone-arm-expansions + mongodb_setup: mongo-perf-standalone-heuristic-bonsai.2023-02 + # yaml does not nicely merge arrays, so DO NOT ADD INDIVIDUAL TASKS HERE. + # Add tasks to the anchor that this variant references + # If diverging from that list, add the entire list of desired tasks here + - <<: *linux-microbenchmarks-repl-arm name: linux-microbenchmarks-repl-all-feature-flags-arm.2023-01 display_name: MicroBenchmarks Arm 1-Node ReplSet inMemory (all feature flags).2023-01 - cron: "0 */4 * * *" # Every 4 hours starting at midnight + cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. expansions: <<: *repl-arm-expansions mongodb_setup: mongo-perf-replica-all-feature-flags.2023-02 @@ -3278,13 +3719,13 @@ buildvariants: <<: *linux-microbenchmarks-standalone-arm name: linux-microbenchmarks-standalone-intel.2023-01 display_name: MicroBenchmarks Intel Standalone inMemory.2023-01 - cron: "0 */12 * * *" # Every 12 hours starting at midnight + cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. expansions: &standalone-intel-expansions <<: *standalone-arm-expansions infrastructure_provisioning: workload_client_mongod_combined_intel.2023-01 compile_variant: "" run_on: - - "rhel70-perf-single" + - "rhel70-perf-microbenchmarks" depends_on: *_compile_amazon2 # yaml does not nicely merge arrays, so DO NOT ADD INDIVIDUAL TASKS HERE. # Add tasks to the anchor that this variant references @@ -3294,13 +3735,13 @@ buildvariants: <<: *linux-microbenchmarks-repl-arm name: linux-microbenchmarks-repl-intel.2023-01 display_name: MicroBenchmarks Intel 1-Node ReplSet inMemory.2023-01 - cron: "0 */12 * * *" # Every 12 hours starting at midnight + cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. expansions: &repl-intel-expansions <<: *repl-arm-expansions infrastructure_provisioning: workload_client_mongod_combined_intel.2023-01 compile_variant: "" run_on: - - "rhel70-perf-single" + - "rhel70-perf-microbenchmarks" depends_on: *_compile_amazon2 # yaml does not nicely merge arrays, so DO NOT ADD INDIVIDUAL TASKS HERE. # Add tasks to the anchor that this variant references @@ -3309,7 +3750,7 @@ buildvariants: - <<: *linux-microbenchmarks-standalone-intel name: linux-microbenchmarks-standalone-all-feature-flags.2023-01 display_name: MicroBenchmarks Intel Standalone inMemory (all feature flags).2023-01 - cron: "0 */12 * * *" # Every 12 hours starting at midnight + cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. expansions: <<: *standalone-intel-expansions mongodb_setup: mongo-perf-standalone-all-feature-flags.2023-02 @@ -3321,8 +3762,7 @@ buildvariants: - <<: *linux-microbenchmarks-standalone-intel name: linux-microbenchmarks-standalone-classic-query-engine.2023-01 display_name: MicroBenchmarks Intel Standalone inMemory (Classic Query Engine).2023-01 - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * 4" # 00:00 on Thursday expansions: <<: *standalone-intel-expansions mongodb_setup: mongo-perf-standalone-classic-query-engine.2023-02 @@ -3333,8 +3773,7 @@ buildvariants: - <<: *linux-microbenchmarks-standalone-intel name: linux-microbenchmarks-standalone-sbe.2023-01 display_name: MicroBenchmarks Intel Standalone inMemory (SBE).2023-01 - # Will make it less frequent when the current SBE perf improvement is finished (SERVER-69799). - cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + cron: "0 0 * * 4" # 00:00 on Thursday expansions: <<: *standalone-intel-expansions mongodb_setup: mongo-perf-standalone-sbe.2023-02 @@ -3342,10 +3781,32 @@ buildvariants: # Add tasks to the anchor that this variant references # If diverging from that list, add the entire list of desired tasks here + - <<: *linux-microbenchmarks-standalone-intel + name: linux-microbenchmarks-standalone-sampling-bonsai.2023-01 + display_name: MicroBenchmarks Intel Standalone inMemory (Bonsai with Sampling CE).2023-01 + cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + expansions: + <<: *standalone-intel-expansions + mongodb_setup: mongo-perf-standalone-sampling-bonsai.2023-02 + # yaml does not nicely merge arrays, so DO NOT ADD INDIVIDUAL TASKS HERE. + # Add tasks to the anchor that this variant references + # If diverging from that list, add the entire list of desired tasks here + + - <<: *linux-microbenchmarks-standalone-intel + name: linux-microbenchmarks-standalone-heuristic-bonsai.2023-01 + display_name: MicroBenchmarks Intel Standalone inMemory (Bonsai with Heuristic CE).2023-01 + cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. + expansions: + <<: *standalone-intel-expansions + mongodb_setup: mongo-perf-standalone-heuristic-bonsai.2023-02 + # yaml does not nicely merge arrays, so DO NOT ADD INDIVIDUAL TASKS HERE. + # Add tasks to the anchor that this variant references + # If diverging from that list, add the entire list of desired tasks here + - <<: *linux-microbenchmarks-repl-intel name: linux-microbenchmarks-repl-all-feature-flags.2023-01 display_name: MicroBenchmarks Intel 1-Node ReplSet inMemory (all feature flags).2023-01 - cron: "0 */12 * * *" # Every 12 hours starting at midnight + cron: "0 0 * * 0,2,3,4,5" # Run it every day except Saturday and Monday. expansions: <<: *repl-intel-expansions mongodb_setup: mongo-perf-replica-all-feature-flags.2023-02 diff --git a/etc/third_party_components.yml b/etc/third_party_components.yml index 8ef3575924b6b..cf88f09c00176 100644 --- a/etc/third_party_components.yml +++ b/etc/third_party_components.yml @@ -103,6 +103,13 @@ components: local_directory_path: src/third_party/SafeInt team_owner: "Service Architecture" + derickr/timelib: + homepage_url: https://github.com/derickr/timelib + open_hub_url: N/A + release_monitoring_id: -1 + local_directory_path: src/third_party/timelib + team_owner: "Query" + discover-python: homepage_url: https://pypi.org/project/discover/ open_hub_url: N/A @@ -248,13 +255,6 @@ components: team_owner: "Query" upgrade_suppression: TODO SERVER-64574 - "mpark-variant-devel": - homepage_url: https://github.com/mpark/variant - open_hub_url: N/A - release_monitoring_id: 18301 - local_directory_path: src/third_party/variant-1.4.0 - team_owner: "Service Architecture" - nlohmann.json.decomposed: homepage_url: https://github.com/nlohmann/json open_hub_url: https://www.openhub.net/p/nlohmann_json @@ -359,14 +359,6 @@ components: is_test_only: true team_owner: "Wiredtiger" - timelib: - homepage_url: https://github.com/derickr/timelib - open_hub_url: N/A - release_monitoring_id: -1 - local_directory_path: src/third_party/timelib - team_owner: "Query" - # Note: Not in Black Duck - unicode: homepage_url: http://www.unicode.org open_hub_url: N/A diff --git a/etc/tsan.suppressions b/etc/tsan.suppressions index b9fd2d6960385..323d4383a8a6c 100644 --- a/etc/tsan.suppressions +++ b/etc/tsan.suppressions @@ -17,7 +17,7 @@ race:tzset_internal # that false positives are more likely, we're deferring them until we have # fixed the ones we know are real. # TODO: https://jira.mongodb.org/browse/SERVER-48599 -race:src/third_party/wiredtiger/* +called_from_lib:libwiredtiger.so # These functions call malloc() down the line while inside a signal handler. # Since we've never had problems with any of the allocators we use, and since diff --git a/evergreen/build_metric_cedar_report.py b/evergreen/build_metric_cedar_report.py index 6c89e733ccb6f..25b1356df8c39 100644 --- a/evergreen/build_metric_cedar_report.py +++ b/evergreen/build_metric_cedar_report.py @@ -16,65 +16,82 @@ pull_cache_metrics_json = args.cache_pull_metrics cedar_report = [] + def single_metric_test(test_name, metric_name, value): return { - "info": { - "test_name": test_name, - }, - "metrics": [ - { - "name": metric_name, - "value": round(value, 2) - }, - ] + "info": {"test_name": test_name, }, + "metrics": [{"name": metric_name, "value": round(value, 2)}, ] } + with open(clean_build_metrics_json) as f: aggregated_build_tasks = {} build_metrics = json.load(f) for task in build_metrics['build_tasks']: - outputs_key = ' '.join(task['outputs']) - if outputs_key in aggregated_build_tasks: - aggregated_build_tasks[outputs_key]['mem_usage'] += task['mem_usage'] - aggregated_build_tasks[outputs_key]['time'] += (task['end_time'] - task['start_time']) - else: - aggregated_build_tasks[outputs_key] = { - 'mem_usage': task['mem_usage'], - 'time': task['end_time'] - task['start_time'], - } + if task['builder'] in [ + 'SharedLibrary', + 'StaticLibrary', + 'Program', + 'Object', + 'SharedObject', + 'StaticObject', + ]: + outputs_key = ' '.join(task['outputs']) + if outputs_key in aggregated_build_tasks: + if aggregated_build_tasks[outputs_key]['mem_usage'] < task['mem_usage']: + aggregated_build_tasks[outputs_key]['mem_usage'] = task['mem_usage'] + aggregated_build_tasks[outputs_key]['time'] += ( + task['end_time'] - task['start_time']) + else: + aggregated_build_tasks[outputs_key] = { + 'mem_usage': task['mem_usage'], + 'time': task['end_time'] - task['start_time'], + } for output_files in aggregated_build_tasks: cedar_report.append({ - "info": { - "test_name": output_files, - }, - "metrics": [ + "info": {"test_name": output_files, }, "metrics": [ { - "name": "seconds", - "value": round(aggregated_build_tasks[output_files]['time'] / (10.0**9.0), 2) + "name": "seconds", "value": round( + aggregated_build_tasks[output_files]['time'] / (10.0**9.0), 2) }, { - "name": "MBs", - "value": round(aggregated_build_tasks[output_files]['mem_usage'] / 1024.0 / 1024.0, 2) + "name": + "MBs", "value": + round( + aggregated_build_tasks[output_files]['mem_usage'] / 1024.0 / 1024.0, + 2) }, ] }) try: - cedar_report.append(single_metric_test("SCons memory usage", "MBs", build_metrics['scons_metrics']['memory']['post_build'] / 1024.0 / 1024.0)) + cedar_report.append( + single_metric_test( + "SCons memory usage", "MBs", + build_metrics['scons_metrics']['memory']['post_build'] / 1024.0 / 1024.0)) except KeyError: if sys.platform == 'darwin': # MacOS has known memory reporting issues, although this is not directly related to scons which does not use # psutil for this case, I think both use underlying OS calls to determine the memory: https://github.com/giampaolo/psutil/issues/1908 pass - - cedar_report.append(single_metric_test("System Memory Peak", "MBs", build_metrics['system_memory']['max'] / 1024.0 / 1024.0)) - cedar_report.append(single_metric_test("Total Build time", "seconds", build_metrics['scons_metrics']['time']['total'])) - cedar_report.append(single_metric_test("Total Build output size", "MBs", build_metrics['artifact_metrics']['total_artifact_size'] / 1024.0 / 1024.0)) + + cedar_report.append( + single_metric_test("System Memory Peak", "MBs", + build_metrics['system_memory']['max'] / 1024.0 / 1024.0)) + cedar_report.append( + single_metric_test("Total Build time", "seconds", + build_metrics['scons_metrics']['time']['total'])) + cedar_report.append( + single_metric_test( + "Total Build output size", "MBs", + build_metrics['artifact_metrics']['total_artifact_size'] / 1024.0 / 1024.0)) try: - cedar_report.append(single_metric_test("Transitive Libdeps Edges", "edges", build_metrics['libdeps_metrics']['TRANS_EDGE'])) + cedar_report.append( + single_metric_test("Transitive Libdeps Edges", "edges", + build_metrics['libdeps_metrics']['TRANS_EDGE'])) except KeyError: pass @@ -87,20 +104,17 @@ def single_metric_test(test_name, metric_name, value): break if mongod_metrics and mongod_metrics.get('bin_metrics'): - cedar_report.append(single_metric_test("Mongod debug info size", "MBs", mongod_metrics['bin_metrics']['debug']['filesize'] / 1024.0 / 1024.0)) + cedar_report.append( + single_metric_test( + "Mongod debug info size", "MBs", + mongod_metrics['bin_metrics']['debug']['filesize'] / 1024.0 / 1024.0)) with open(populate_cache_metrics_json) as f: build_metrics = json.load(f) cedar_report.append({ - "info": { - "test_name": "cache_push_time", - }, - "metrics": [ - { - "name": "seconds", - "value": build_metrics["cache_metrics"]['push_time'] / (10.0**9.0) - }, + "info": {"test_name": "cache_push_time", }, "metrics": [ + {"name": "seconds", "value": build_metrics["cache_metrics"]['push_time'] / (10.0**9.0)}, ] }) @@ -108,18 +122,12 @@ def single_metric_test(test_name, metric_name, value): build_metrics = json.load(f) cedar_report.append({ - "info": { - "test_name": "cache_pull_time", - }, - "metrics": [ - { - "name": "seconds", - "value": build_metrics["cache_metrics"]['pull_time'] / (10.0**9.0) - }, + "info": {"test_name": "cache_pull_time", }, "metrics": [ + {"name": "seconds", "value": build_metrics["cache_metrics"]['pull_time'] / (10.0**9.0)}, ] }) +print(f"Generated Cedar Report with {len(cedar_report)} perf results.") + with open("build_metrics_cedar_report.json", "w") as fh: json.dump(cedar_report, fh) - - diff --git a/evergreen/compiled_binaries_get.sh b/evergreen/compiled_binaries_get.sh index 6c0f8d751467a..0a03489eafd9b 100755 --- a/evergreen/compiled_binaries_get.sh +++ b/evergreen/compiled_binaries_get.sh @@ -8,7 +8,7 @@ set -o verbose # activate_venv will make sure we are using python 3 activate_venv -setup_db_contrib_tool_venv +setup_db_contrib_tool rm -rf /data/install /data/multiversion @@ -16,26 +16,6 @@ edition="${multiversion_edition}" platform="${multiversion_platform}" architecture="${multiversion_architecture}" -if [ ! -z "${multiversion_edition_42_or_later}" ]; then - edition="${multiversion_edition_42_or_later}" -fi -if [ ! -z "${multiversion_platform_42_or_later}" ]; then - platform="${multiversion_platform_42_or_later}" -fi -if [ ! -z "${multiversion_architecture_42_or_later}" ]; then - architecture="${multiversion_architecture_42_or_later}" -fi - -if [ ! -z "${multiversion_edition_44_or_later}" ]; then - edition="${multiversion_edition_44_or_later}" -fi -if [ ! -z "${multiversion_platform_44_or_later}" ]; then - platform="${multiversion_platform_44_or_later}" -fi -if [ ! -z "${multiversion_architecture_44_or_later}" ]; then - architecture="${multiversion_architecture_44_or_later}" -fi - version=${project#mongodb-mongo-} version=${version#v} diff --git a/evergreen/external_auth_oidc_setup.sh b/evergreen/external_auth_oidc_setup.sh old mode 100644 new mode 100755 index c082304a02230..c49e7caaa41ec --- a/evergreen/external_auth_oidc_setup.sh +++ b/evergreen/external_auth_oidc_setup.sh @@ -8,6 +8,9 @@ set -o errexit # Should output contents to new file in home directory. cat << EOF > $HOME/oidc_e2e_setup.json { + "tD548GwE1@outlook.com" : "${oidc_azure_test_user_account_one_secret}", + "tD548GwE2@outlook.com" : "${oidc_azure_test_user_account_two_secret}", + "tD548GwE3@outlook.com" : "${oidc_azure_test_user_account_three_secret}", "testserversecurityone@okta-test.com" : "${oidc_okta_test_user_account_one_secret}", "testserversecuritytwo@okta-test.com" : "${oidc_okta_test_user_account_two_secret}", "testserversecuritythree@okta-test.com" : "${oidc_okta_test_user_account_three_secret}" diff --git a/evergreen/failed_unittests_gather.sh b/evergreen/failed_unittests_gather.sh index 7a3ff6b7a0f9e..72668fe55c35f 100644 --- a/evergreen/failed_unittests_gather.sh +++ b/evergreen/failed_unittests_gather.sh @@ -6,7 +6,9 @@ cd src set -eou pipefail # Only run on unit test tasks so we don't target mongod binaries from cores. -if [ "${task_name}" != "run_unittests" ] && [ "${task_name}" != "run_dbtest" ] && [ "${task_name}" != "run_unittests_with_recording" ]; then +if [ "${task_name}" != "run_unittests" ] && [ "${task_name}" != "run_dbtest" ] \ + && [ "${task_name}" != "run_unittests_with_recording" ] \ + && [[ ${task_name} != integration_tests* ]]; then exit 0 fi @@ -15,7 +17,7 @@ mkdir -p $unittest_bin_dir || true # Find all core files core_files=$(/usr/bin/find -H . \( -name "dump_*.core" -o -name "*.mdmp" \) 2> /dev/null) -for core_file in $core_files; do +while read -r core_file; do # A core file name does not always have the executable name that generated it. # See http://stackoverflow.com/questions/34801353/core-dump-filename-gets-thread-name-instead-of-executable-name-with-core-pattern # On platforms with GDB, we get the binary name from core file @@ -63,7 +65,7 @@ for core_file in $core_files; do fi done -done +done <<< "${core_files}" # For recorded tests, use the text file to copy them over instead of relying on core dumps. has_recorded_failures="" diff --git a/evergreen/functions/added_and_modified_patch_files_get.sh b/evergreen/functions/added_and_modified_patch_files_get.sh deleted file mode 100755 index 608b53787efdf..0000000000000 --- a/evergreen/functions/added_and_modified_patch_files_get.sh +++ /dev/null @@ -1,14 +0,0 @@ -DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" -. "$DIR/../prelude.sh" - -cd src - -set -o verbose -set -o errexit - -git diff --name-only origin/${branch_name}... --line-prefix="${workdir}/src/" --diff-filter=d >> modified_and_created_patch_files.txt -if [ -d src/mongo/db/modules/enterprise ]; then - pushd src/mongo/db/modules/enterprise - git diff HEAD --name-only --line-prefix="${workdir}/src/src/mongo/db/modules/enterprise/" --diff-filter=d >> ~1/modified_and_created_patch_files.txt - popd -fi diff --git a/evergreen/functions/binaries_extract.py b/evergreen/functions/binaries_extract.py new file mode 100644 index 0000000000000..356c9f15668d2 --- /dev/null +++ b/evergreen/functions/binaries_extract.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +# +# Copyright 2020 MongoDB Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + +import argparse +import subprocess +import os +import sys +import pathlib +import shutil +import glob + +parser = argparse.ArgumentParser() + +parser.add_argument('--change-dir', type=str, action='store', + help="The directory to change into to perform the extraction.") +parser.add_argument('--extraction-command', type=str, action='store', + help="The command to use for the extraction.") +parser.add_argument('--tarball', type=str, action='store', + help="The tarball to perform the extraction on.") +parser.add_argument( + '--move-output', type=str, action='append', help= + "Move an extracted entry to a new location after extraction. Format is colon separated, e.g. '--move-output=file/to/move:path/to/destination'. Can accept glob like wildcards." +) +args = parser.parse_args() + +if args.change_dir: + working_dir = pathlib.Path(args.change_dir).as_posix() + tarball = pathlib.Path(args.tarball).resolve().as_posix() + print(f"Switching to {working_dir} to perform the extraction in.") + os.makedirs(working_dir, exist_ok=True) +else: + working_dir = None + tarball = pathlib.Path(args.tarball).as_posix() + +shell = os.environ.get('SHELL', '/bin/bash') + +if sys.platform == 'win32': + proc = subprocess.run(['C:/cygwin/bin/cygpath.exe', '-w', shell], text=True, + capture_output=True) + bash = pathlib.Path(proc.stdout.strip()) + cmd = [bash.as_posix(), '-c', f"{args.extraction_command} {tarball}"] +else: + cmd = [shell, '-c', f"{args.extraction_command} {tarball}"] + +print(f"Extracting: {' '.join(cmd)}") +proc = subprocess.run(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + cwd=working_dir) + +print(proc.stdout) + +if args.move_output: + for arg in args.move_output: + try: + src, dst = arg.split(':') + print(f"Moving {src} to {dst}...") + files_to_move = glob.glob(src, recursive=True) + for file in files_to_move: + result_dst = shutil.move(file, dst) + print(f"Moved {file} to {result_dst}") + except ValueError as exc: + print(f"Bad format, needs to be glob like paths in the from 'src:dst', got: {arg}") + raise exc + +sys.exit(proc.returncode) diff --git a/evergreen/functions/binaries_extract.sh b/evergreen/functions/binaries_extract.sh deleted file mode 100755 index ec7b8dd3f8f06..0000000000000 --- a/evergreen/functions/binaries_extract.sh +++ /dev/null @@ -1,7 +0,0 @@ -DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" -. "$DIR/../prelude.sh" - -cd src - -set -o errexit -${decompress} mongo-binaries.tgz diff --git a/evergreen/functions/task_timeout_determine.sh b/evergreen/functions/task_timeout_determine.sh index 49dda4dd4a992..b307586792db2 100644 --- a/evergreen/functions/task_timeout_determine.sh +++ b/evergreen/functions/task_timeout_determine.sh @@ -31,8 +31,15 @@ else evg_alias="evg-alias-absent" fi +resmoke_test_flags="" +if [[ -n "${test_flags}" ]]; then + resmoke_test_flags="--test-flags='${test_flags}'" +fi + activate_venv -PATH=$PATH:$HOME:/ $python buildscripts/evergreen_task_timeout.py $timeout_factor \ +PATH=$PATH:$HOME:/ eval $python buildscripts/evergreen_task_timeout.py \ + $timeout_factor \ + $resmoke_test_flags \ --install-dir "${install_dir}" \ --task-name ${task_name} \ --suite-name ${suite_name} \ diff --git a/evergreen/garasign_gpg_crypt_sign.sh b/evergreen/garasign_gpg_crypt_sign.sh new file mode 100644 index 0000000000000..378e63b138541 --- /dev/null +++ b/evergreen/garasign_gpg_crypt_sign.sh @@ -0,0 +1,31 @@ +DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" +. "$DIR/prelude.sh" + +cd src + +set -o errexit +set -o verbose + +ext="${ext:-tgz}" + +crypt_file_name=mongo_crypt_shared_v1-${push_name}-${push_arch}-${suffix}.${ext} +mv "mongo_crypt_shared_v1.$ext" $crypt_file_name + +# generating checksums +shasum -a 1 $crypt_file_name | tee $crypt_file_name.sha1 +shasum -a 256 $crypt_file_name | tee $crypt_file_name.sha256 +md5sum $crypt_file_name | tee $crypt_file_name.md5 + +# signing crypt linux artifact with gpg +cat << EOF >> gpg_signing_commands.sh +gpgloader # loading gpg keys. +gpg --yes -v --armor -o $crypt_file_name.sig --detach-sign $crypt_file_name +EOF + +podman run \ + -e GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username_70} \ + -e GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password_70} \ + --rm \ + -v $(pwd):$(pwd) -w $(pwd) \ + ${garasign_gpg_image} \ + /bin/bash -c "$(cat ./gpg_signing_commands.sh)" diff --git a/evergreen/garasign_gpg_sign.sh b/evergreen/garasign_gpg_sign.sh index a75a6042118da..27e7f7916f35b 100644 --- a/evergreen/garasign_gpg_sign.sh +++ b/evergreen/garasign_gpg_sign.sh @@ -55,8 +55,8 @@ sign mongodb-cryptd-$push_name-$push_arch-$suffix.$ext EOF podman run \ - -e GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username} \ - -e GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password} \ + -e GRS_CONFIG_USER1_USERNAME=${garasign_gpg_username_70} \ + -e GRS_CONFIG_USER1_PASSWORD=${garasign_gpg_password_70} \ --rm \ -v $(pwd):$(pwd) -w $(pwd) \ ${garasign_gpg_image} \ diff --git a/evergreen/garasign_jsign_sign.sh b/evergreen/garasign_jsign_sign.sh index 3ae3176cae9a8..474a517631849 100644 --- a/evergreen/garasign_jsign_sign.sh +++ b/evergreen/garasign_jsign_sign.sh @@ -9,15 +9,6 @@ cd src msi_filename=mongodb-${push_name}-${push_arch}-${suffix}.msi /usr/bin/find build/ -type f | grep msi$ | xargs -I original_filename cp original_filename $msi_filename || true -# generating checksums -if [ -e $msi_filename ]; then - shasum -a 1 $msi_filename | tee $msi_filename.sha1 - shasum -a 256 $msi_filename | tee $msi_filename.sha256 - md5sum $msi_filename | tee $msi_filename.md5 -else - echo "$msi_filename does not exist. Skipping checksum generation" -fi - # signing windows artifacts with jsign cat << 'EOF' > jsign_signing_commands.sh function sign(){ @@ -40,3 +31,12 @@ podman run \ -v $(pwd):$(pwd) -w $(pwd) \ ${garasign_jsign_image} \ /bin/bash -c "$(cat ./jsign_signing_commands.sh)" + +# generating checksums +if [ -e $msi_filename ]; then + shasum -a 1 $msi_filename | tee $msi_filename.sha1 + shasum -a 256 $msi_filename | tee $msi_filename.sha256 + md5sum $msi_filename | tee $msi_filename.md5 +else + echo "$msi_filename does not exist. Skipping checksum generation" +fi diff --git a/evergreen/generate_buildid_debug_symbols_mapping.sh b/evergreen/generate_buildid_debug_symbols_mapping.sh index d1866e961a555..142614ce821bd 100644 --- a/evergreen/generate_buildid_debug_symbols_mapping.sh +++ b/evergreen/generate_buildid_debug_symbols_mapping.sh @@ -6,10 +6,16 @@ cd src set -o errexit set -o verbose +is_san_variant_arg="" +if [[ -n "${san_options}" ]]; then + is_san_variant_arg="--is-san-variant" +fi + activate_venv $python buildscripts/debugsymb_mapper.py \ --version "${version_id}" \ --client-id "${symbolizer_client_id}" \ --client-secret "${symbolizer_client_secret}" \ - --variant "${build_variant}" + --variant "${build_variant}" \ + $is_san_variant_arg diff --git a/evergreen/generate_version_burn_in.sh b/evergreen/generate_version_burn_in.sh index b7d2ff2471463..b92b9fcfd16d7 100644 --- a/evergreen/generate_version_burn_in.sh +++ b/evergreen/generate_version_burn_in.sh @@ -8,6 +8,7 @@ set -o verbose setup_mongo_task_generator activate_venv +$python buildscripts/burn_in_tests.py generate-test-membership-map-file-for-ci PATH=$PATH:$HOME:/ ./mongo-task-generator \ --expansion-file ../expansions.yml \ --evg-auth-file ./.evergreen.yml \ diff --git a/evergreen/jepsen_docker/list-append.sh b/evergreen/jepsen_docker/list-append.sh index 4378b1bcadc94..2204a6a2e9b24 100644 --- a/evergreen/jepsen_docker/list-append.sh +++ b/evergreen/jepsen_docker/list-append.sh @@ -25,7 +25,6 @@ cd src activate_venv $python buildscripts/jepsen_report.py --start_time=$start_time --end_time=$end_time --elapsed=$elapsed_secs --emit_status_files --store ./jepsen-mongodb jepsen-mongodb/jepsen_${task_name}_${execution}.log exit_code=$? -cat report.json if [ -f "jepsen_system_fail.txt" ]; then mv jepsen_system_fail.txt jepsen-mongodb/jepsen_system_failure_${task_name}_${execution} diff --git a/evergreen/lint_fuzzer_sanity_patch.py b/evergreen/lint_fuzzer_sanity_patch.py new file mode 100644 index 0000000000000..3047ec0eb99df --- /dev/null +++ b/evergreen/lint_fuzzer_sanity_patch.py @@ -0,0 +1,96 @@ +import os +import sys +import shutil +import subprocess +import glob +from concurrent import futures +from pathlib import Path +import time +from typing import List, Tuple + +# Get relative imports to work when the package is not installed on the PYTHONPATH. +if __name__ == "__main__" and __package__ is None: + sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(os.path.realpath(__file__))))) + +# pylint: disable=wrong-import-position +from buildscripts.linter.filediff import gather_changed_files_for_lint +from buildscripts import simple_report + +# pylint: enable=wrong-import-position + + +def is_js_file(filename: str) -> bool: + # return True + return (filename.startswith("jstests") or filename.startswith("src/mongo/db/modules/enterprise/jstests")) and filename.endswith(".js") + + +diffed_files = [Path(f) for f in gather_changed_files_for_lint(is_js_file)] +num_changed_files = len(diffed_files) + +if num_changed_files == 0: + print("No js files had changes in them. Exiting.") + sys.exit(0) + +INPUT_DIR = "jstestfuzzinput" +OUTPUT_DIR = "jstestfuzzoutput" +os.makedirs(INPUT_DIR, exist_ok=True) +os.makedirs(OUTPUT_DIR, exist_ok=True) + +for file in diffed_files: + copy_dest = INPUT_DIR / file + os.makedirs(copy_dest.parent, exist_ok=True) + shutil.copy(file, copy_dest) + +OUTPUT_FULL_DIR = Path(os.getcwd()) / OUTPUT_DIR +INPUT_FULL_DIR = Path(os.getcwd()) / INPUT_DIR + +subprocess.run([ + "./src/scripts/npm_run.sh", "jstestfuzz", "--", "--jsTestsDir", INPUT_FULL_DIR, "--out", + OUTPUT_FULL_DIR, "--numSourceFiles", + str(min(num_changed_files, 250)), "--numGeneratedFiles", "250" +], check=True, cwd="jstestfuzz") + + +def _parse_jsfile(jsfile: Path) -> simple_report.Result: + """ + Takes in a path to be attempted to parse + Returns what should be added to the report given to evergreen + """ + print(f"Trying to parse jsfile {jsfile}") + start_time = time.time() + proc = subprocess.run(["./src/scripts/npm_run.sh", "parse-jsfiles", "--", + str(jsfile)], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, + cwd="jstestfuzz") + end_time = time.time() + status = "pass" if proc.returncode == 0 else "fail" + npm_run_output = proc.stdout.decode("UTF-8") + if proc.returncode == 0: + print(f"Successfully to parsed jsfile {jsfile}") + else: + print(f"Failed to parsed jsfile {jsfile}") + print(npm_run_output) + return simple_report.Result(status=status, exit_code=proc.returncode, start=start_time, + end=end_time, test_file=jsfile.name, log_raw=npm_run_output) + + +report = simple_report.Report(failures=0, results=[]) + +with futures.ThreadPoolExecutor() as executor: + parse_jsfiles_futures = [ + executor.submit(_parse_jsfile, Path(jsfile)) + for jsfile in glob.iglob(str(OUTPUT_FULL_DIR / "**"), recursive=True) + if os.path.isfile(jsfile) + ] + + for future in futures.as_completed(parse_jsfiles_futures): + result = future.result() + report["results"].append(result) + report["failures"] += 1 if result["exit_code"] != 0 else 0 + +simple_report.put_report(report) +if report["failures"] > 0: + print("Had at least one failure, exiting with 1") + sys.exit(1) + +print("No failures, exiting success") +sys.exit(0) diff --git a/evergreen/lint_fuzzer_sanity_patch.sh b/evergreen/lint_fuzzer_sanity_patch.sh deleted file mode 100644 index 85a387c66317a..0000000000000 --- a/evergreen/lint_fuzzer_sanity_patch.sh +++ /dev/null @@ -1,40 +0,0 @@ -DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" -. "$DIR/prelude.sh" - -cd src - -set -o pipefail -set -o verbose - -activate_venv - -mkdir -p jstestfuzzinput jstestfuzzoutput - -# We need to be the jstestfuzz repo for node to install/run -cd jstestfuzz - -indir="$(pwd)/../jstestfuzzinput" -outdir="$(pwd)/../jstestfuzzoutput" - -# Grep all the js files from modified_and_created_patch_files.txt and put them into $indir. -(grep -v "\.tpl\.js$" ../modified_and_created_patch_files.txt | grep ".*jstests/.*\.js$" | xargs -I {} cp {} $indir || true) - -# Count the number of files in $indir. -if [[ "$(ls -A $indir)" ]]; then - num_files=$(ls -A $indir | wc -l) - - # Only fetch 50 files to generate jsfuzz testing files. - if [[ $num_files -gt 50 ]]; then - num_files=50 - fi - - ./src/scripts/npm_run.sh jstestfuzz -- --jsTestsDir $indir --out $outdir --numSourceFiles $num_files --numGeneratedFiles 50 - - # Run parse-jsfiles on 50 files at a time with 32 processes in parallel. - ls -1 -d $outdir/* | xargs -P 32 -L 50 ./src/scripts/npm_run.sh parse-jsfiles -- 2>&1 | tee lint_fuzzer_sanity.log - exit_code=$? - - # Exit out of the jstestfuzz directory - cd .. - $python ./buildscripts/simple_report.py --test-name lint_fuzzer_sanity_patch --log-file jstestfuzz/lint_fuzzer_sanity.log --exit-code $exit_code -fi diff --git a/evergreen/multiversion_setup.sh b/evergreen/multiversion_setup.sh index 4690f713457f0..aab4e86c7bc6e 100644 --- a/evergreen/multiversion_setup.sh +++ b/evergreen/multiversion_setup.sh @@ -7,8 +7,7 @@ set -o errexit set -o verbose activate_venv - -setup_db_contrib_tool_venv +setup_db_contrib_tool export PIPX_HOME="${workdir}/pipx" export PIPX_BIN_DIR="${workdir}/pipx/bin" @@ -20,49 +19,6 @@ edition="${multiversion_edition}" platform="${multiversion_platform}" architecture="${multiversion_architecture}" -# The platform and architecture for how some of the binaries are reported in -# https://downloads.mongodb.org/full.json changed between MongoDB 4.0 and MongoDB 4.2. -# Certain build variants define additional multiversion_*_42_or_later expansions in order to -# be able to fetch a complete set of versions. - -if [ ! -z "${multiversion_edition_42_or_later}" ]; then - edition="${multiversion_edition_42_or_later}" -fi - -if [ ! -z "${multiversion_platform_42_or_later}" ]; then - platform="${multiversion_platform_42_or_later}" -fi - -if [ ! -z "${multiversion_architecture_42_or_later}" ]; then - architecture="${multiversion_architecture_42_or_later}" -fi - -db-contrib-tool setup-repro-env \ - --installDir /data/install \ - --linkDir /data/multiversion \ - --edition $edition \ - --platform $platform \ - --architecture $architecture \ - --debug \ - 4.2 - -# The platform and architecture for how some of the binaries are reported in -# https://downloads.mongodb.org/full.json changed between MongoDB 4.2 and MongoDB 4.4. -# Certain build variants define additional multiversion_*_44_or_later expansions in order to -# be able to fetch a complete set of versions. - -if [ ! -z "${multiversion_edition_44_or_later}" ]; then - edition="${multiversion_edition_44_or_later}" -fi - -if [ ! -z "${multiversion_platform_44_or_later}" ]; then - platform="${multiversion_platform_44_or_later}" -fi - -if [ ! -z "${multiversion_architecture_44_or_later}" ]; then - architecture="${multiversion_architecture_44_or_later}" -fi - last_lts_arg="--installLastLTS" last_continuous_arg="--installLastContinuous" @@ -74,14 +30,23 @@ if [[ -n "${last_continuous_evg_version_id}" ]]; then last_continuous_arg="${last_continuous_evg_version_id}" fi -db-contrib-tool setup-repro-env \ - --installDir /data/install \ +base_command="db-contrib-tool setup-repro-env" +evergreen_args="--installDir /data/install \ --linkDir /data/multiversion \ - --edition $edition \ --platform $platform \ - --architecture $architecture \ + --architecture $architecture" +local_args="--edition $edition \ --fallbackToMaster \ - --resmokeCmd "python buildscripts/resmoke.py" \ + --resmokeCmd \"python buildscripts/resmoke.py\" \ --debug \ - $last_lts_arg \ - $last_continuous_arg 4.4 5.0 + ${last_lts_arg} \ + ${last_continuous_arg} 4.4 5.0 6.0" + +remote_invocation="${base_command} ${evergreen_args} ${local_args}" +eval "${remote_invocation}" +echo "Verbatim db-contrib-tool invocation: ${remote_invocation}" + +local_invocation="${base_command} ${local_args}" +echo "Local db-contrib-tool invocation: ${local_invocation}" + +echo "${local_invocation}" > local-db-contrib-tool-invocation.txt diff --git a/evergreen/notary_client_crypt_run.sh b/evergreen/notary_client_crypt_run.sh deleted file mode 100644 index 4d73c3fed988f..0000000000000 --- a/evergreen/notary_client_crypt_run.sh +++ /dev/null @@ -1,20 +0,0 @@ -DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" -. "$DIR/prelude.sh" - -cd src - -. ./notary_env.sh - -set -o errexit -set -o verbose - -ext="${ext:-tgz}" - -mv "mongo_crypt_shared_v1.$ext" mongo_crypt_shared_v1-${push_name}-${push_arch}-${suffix}.${ext} - -/usr/local/bin/notary-client.py \ - --key-name "server-7.0" \ - --auth-token-file ${workdir}/src/signing_auth_token \ - --comment "Evergreen Automatic Signing ${revision} - ${build_variant} - ${branch_name}" \ - --notary-url http://notary-service.build.10gen.cc:5000 \ - mongo_crypt_shared_v1-${push_name}-${push_arch}-${suffix}.${ext} diff --git a/evergreen/notary_client_run.sh b/evergreen/notary_client_run.sh deleted file mode 100644 index 41173e36fb1b6..0000000000000 --- a/evergreen/notary_client_run.sh +++ /dev/null @@ -1,23 +0,0 @@ -DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" -. "$DIR/prelude.sh" - -cd src - -. ./notary_env.sh - -set -o errexit -set -o verbose - -long_ext=${ext} -if [ "$long_ext" == "tgz" ]; then - long_ext="tar.gz" -fi - -mv mongo-binaries.tgz mongodb-${push_name}-${push_arch}-${suffix}.${ext} -mv mongo-cryptd.tgz mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext} || true -mv mh.tgz mh-${push_name}-${push_arch}-${suffix}.${ext} || true -mv mongo-debugsymbols.tgz mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext} || true -mv distsrc.${ext} mongodb-src-${src_suffix}.${long_ext} || true -/usr/bin/find build/ -type f | grep msi$ | xargs -I original_filename cp original_filename mongodb-${push_name}-${push_arch}-${suffix}.msi || true - -/usr/local/bin/notary-client.py --key-name "server-7.0" --auth-token-file ${workdir}/src/signing_auth_token --comment "Evergreen Automatic Signing ${revision} - ${build_variant} - ${branch_name}" --notary-url http://notary-service.build.10gen.cc:5000 --skip-missing mongodb-${push_name}-${push_arch}-${suffix}.${ext} mongodb-${push_name}-${push_arch}-debugsymbols-${suffix}.${ext} mongodb-${push_name}-${push_arch}-${suffix}.msi mongodb-src-${src_suffix}.${long_ext} mongodb-cryptd-${push_name}-${push_arch}-${suffix}.${ext} diff --git a/evergreen/packages_publish.sh b/evergreen/packages_publish.sh index 960bc3e0a9246..60686adcb0710 100644 --- a/evergreen/packages_publish.sh +++ b/evergreen/packages_publish.sh @@ -1,12 +1,24 @@ DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" . "$DIR/prelude.sh" +set -o verbose + +packagesfile=packages.tgz + +curl https://s3.amazonaws.com/mciuploads/${project}/${build_variant}/${revision}/artifacts/${build_id}-packages.tgz >> $packagesfile + +podman run \ + -v $(pwd):$(pwd) \ + -w $(pwd) \ + --env-host \ + ${UPLOAD_LOCK_IMAGE} \ + -key=${version_id}/${build_id}/packages/${packagesfile} -tag=task-id=${EVERGREEN_TASK_ID} ${packagesfile} + cd src . ./notary_env.sh set -o errexit -set -o verbose CURATOR_RELEASE=${curator_release} curl -L -O http://boxes.10gen.com/build/curator/curator-dist-rhel70-$CURATOR_RELEASE.tar.gz diff --git a/evergreen/prelude_db_contrib_tool.sh b/evergreen/prelude_db_contrib_tool.sh index 8e9ad1a3995dd..40e0e3458b2fc 100644 --- a/evergreen/prelude_db_contrib_tool.sh +++ b/evergreen/prelude_db_contrib_tool.sh @@ -1,11 +1,13 @@ -function setup_db_contrib_tool_venv { +function setup_db_contrib_tool { - mkdir ${workdir}/pipx + mkdir -p ${workdir}/pipx export PIPX_HOME="${workdir}/pipx" export PIPX_BIN_DIR="${workdir}/pipx/bin" export PATH="$PATH:$PIPX_BIN_DIR" python -m pip --disable-pip-version-check install "pip==21.0.1" "wheel==0.37.0" || exit 1 - python -m pip --disable-pip-version-check install "pipx" || exit 1 - pipx install "db-contrib-tool==0.6.0" || exit 1 + # We force reinstall here because when we download the previous venv the shebang + # in pipx still points to the old machines python location. + python -m pip --disable-pip-version-check install --force-reinstall --no-deps "pipx==1.2.0" || exit 1 + pipx install "db-contrib-tool==0.6.5" || exit 1 } diff --git a/evergreen/prelude_mongo_task_generator.sh b/evergreen/prelude_mongo_task_generator.sh index 94c363f4e4c4a..bda9892144637 100644 --- a/evergreen/prelude_mongo_task_generator.sh +++ b/evergreen/prelude_mongo_task_generator.sh @@ -1,6 +1,6 @@ function setup_mongo_task_generator { if [ ! -f mongo-task-generator ]; then - curl -L https://github.com/mongodb/mongo-task-generator/releases/download/v0.7.3/mongo-task-generator --output mongo-task-generator + curl -L https://github.com/mongodb/mongo-task-generator/releases/download/v0.7.8/mongo-task-generator --output mongo-task-generator chmod +x mongo-task-generator fi } diff --git a/evergreen/prelude_venv.sh b/evergreen/prelude_venv.sh index 6b97a8a9cfb75..a4e9318375e83 100644 --- a/evergreen/prelude_venv.sh +++ b/evergreen/prelude_venv.sh @@ -22,8 +22,24 @@ function activate_venv { if [ "Windows_NT" = "$OS" ]; then export PYTHONPATH="$PYTHONPATH;$(cygpath -w ${workdir}/src)" + elif [ "$(uname)" = "Darwin" ]; then + #SERVER-75626 After activating the virtual environment under the mocos host. the PYTHONPATH setting + #is incorrect, and the site-packages directory of the virtual environment cannot be found in the sys.path. + python_version=$($python -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")') + export PYTHONPATH="${workdir}/venv/lib/python${python_version}/site-packages:${PYTHONPATH}:${workdir}/src" else - export PYTHONPATH="$PYTHONPATH:${workdir}/src" + python_version=$($python -c 'import sys; print(f"{sys.version_info.major}.{sys.version_info.minor}")') + site_packages="${workdir}/venv/lib/python${python_version}/site-packages" + python -c "import sys; print(sys.path)" + + # Check if site_packages is already in sys.path + in_sys_path=$($python -c "import sys; print('$site_packages' in sys.path)") + if [ "$in_sys_path" = "False" ]; then + export PYTHONPATH="${site_packages}:${PYTHONPATH}:${workdir}/src" + else + export PYTHONPATH="$PYTHONPATH:${workdir}/src" + fi + python -c "import sys; print(sys.path)" fi echo "python set to $(which $python)" diff --git a/evergreen/resmoke_tests_execute.sh b/evergreen/resmoke_tests_execute.sh index 37dc3773e7175..6a812a9e32e61 100644 --- a/evergreen/resmoke_tests_execute.sh +++ b/evergreen/resmoke_tests_execute.sh @@ -14,6 +14,11 @@ if [[ ${disable_unit_tests} = "false" && ! -f ${skip_tests} ]]; then # activate the virtualenv if it has been set up activate_venv + # Install db-contrib-tool to symbolize crashes during resmoke suite runs + # This is not supported on Windows and MacOS, so doing it only on Linux + if [ "$(uname)" == "Linux" ]; then + setup_db_contrib_tool + fi if [[ -f "patch_test_tags.tgz" ]]; then tags_build_variant="${build_variant}" diff --git a/evergreen/run_python_script.sh b/evergreen/run_python_script.sh index 35181dec3c1be..746c229e9d8ad 100644 --- a/evergreen/run_python_script.sh +++ b/evergreen/run_python_script.sh @@ -8,4 +8,5 @@ set -o verbose cd src activate_venv -$python $@ +echo $python $@ +$python "$@" diff --git a/evergreen/run_upload_lock_push.sh b/evergreen/run_upload_lock_push.sh new file mode 100755 index 0000000000000..7b69d921e0980 --- /dev/null +++ b/evergreen/run_upload_lock_push.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash + +# in the future we will want to errexit, but only once we remove +# continue_on_err from the command + +# executables and source archive are always expected on every build +# source archives should be fine to be uploaded by whichever variant gets +# there first +declare -A ARTIFACTS=( + [${SERVER_TARBALL_PATH}]=${SERVER_TARBALL_KEY} + [${SOURCE_TARBALL_PATH}]=${SOURCE_TARBALL_KEY} + [${SERVER_TARBALL_SIGNATURE_PATH}]=${SERVER_TARBALL_SIGNATURE_KEY} + [${SOURCE_TARBALL_SIGNATURE_PATH}]=${SOURCE_TARBALL_SIGNATURE_KEY} + [${SERVER_TARBALL_SHA1_PATH}]=${SERVER_TARBALL_SHA1_KEY} + [${SOURCE_TARBALL_SHA1_PATH}]=${SOURCE_TARBALL_SHA1_KEY} + [${SERVER_TARBALL_SHA256_PATH}]=${SERVER_TARBALL_SHA256_KEY} + [${SOURCE_TARBALL_SHA256_PATH}]=${SOURCE_TARBALL_SHA256_KEY} + [${SERVER_TARBALL_MD5_PATH}]=${SERVER_TARBALL_MD5_KEY} + [${SOURCE_TARBALL_MD5_PATH}]=${SOURCE_TARBALL_MD5_KEY} +) + +# mongocryptd is only built for enterprise variants +if [ -f "${CRYPTD_TARBALL_PATH}" ]; then + ARTIFACTS[${CRYPTD_TARBALL_PATH}]=${CRYPTD_TARBALL_KEY} + ARTIFACTS[${CRYPTD_TARBALL_SIGNATURE_PATH}]=${CRYPTD_TARBALL_SIGNATURE_KEY} + ARTIFACTS[${CRYPTD_TARBALL_SHA1_PATH}]=${CRYPTD_TARBALL_SHA1_KEY} + ARTIFACTS[${CRYPTD_TARBALL_SHA256_PATH}]=${CRYPTD_TARBALL_SHA256_KEY} + ARTIFACTS[${CRYPTD_TARBALL_MD5_PATH}]=${CRYPTD_TARBALL_MD5_KEY} +fi + +# mongohouse only built sometimes +# we do not sign mongohouse, so no detached signature and no checksums +if [ -f "${MONGOHOUSE_TARBALL_PATH}" ]; then + ARTIFACTS[${MONGOHOUSE_TARBALL_PATH}]=${MONGOHOUSE_TARBALL_KEY} +fi + +# debug symbols are only built sometimes +# not clear which variants that is the case for +if [ -f "${DEBUG_SYMBOLS_TARBALL_PATH}" ]; then + ARTIFACTS[${DEBUG_SYMBOLS_TARBALL_PATH}]=${DEBUG_SYMBOLS_TARBALL_KEY} + ARTIFACTS[${DEBUG_SYMBOLS_TARBALL_SIGNATURE_PATH}]=${DEBUG_SYMBOLS_TARBALL_SIGNATURE_KEY} + ARTIFACTS[${DEBUG_SYMBOLS_TARBALL_SHA1_PATH}]=${DEBUG_SYMBOLS_TARBALL_SHA1_KEY} + ARTIFACTS[${DEBUG_SYMBOLS_TARBALL_SHA256_PATH}]=${DEBUG_SYMBOLS_TARBALL_SHA256_KEY} + ARTIFACTS[${DEBUG_SYMBOLS_TARBALL_MD5_PATH}]=${DEBUG_SYMBOLS_TARBALL_MD5_KEY} +fi + +# MSIs are only built on windows +# note there is no detached signature file +if [ -f "${MSI_PATH}" ]; then + ARTIFACTS[${MSI_PATH}]=${MSI_KEY} + ARTIFACTS[${MSI_SHA1_PATH}]=${MSI_SHA1_KEY} + ARTIFACTS[${MSI_SHA256_PATH}]=${MSI_SHA256_KEY} + ARTIFACTS[${MSI_MD5_PATH}]=${MSI_MD5_KEY} +fi + +set -o verbose + +for path in "${!ARTIFACTS[@]}"; do + + key=${ARTIFACTS[${path}]} + podman run \ + -v $(pwd):$(pwd) \ + -w $(pwd) \ + --env-host \ + ${UPLOAD_LOCK_IMAGE} \ + -key=${key} -tag=task-id=${EVERGREEN_TASK_ID} ${path} + +done diff --git a/evergreen/scons_compile.sh b/evergreen/scons_compile.sh index 2da927b9039cc..0fe4cc99c08be 100755 --- a/evergreen/scons_compile.sh +++ b/evergreen/scons_compile.sh @@ -60,6 +60,64 @@ if [ "${generating_for_ninja}" = "true" ] && [ "Windows_NT" = "$OS" ]; then fi activate_venv +# if build_patch_id is passed, try to download binaries from specified +# evergreen patch. +# This is purposfully before the venv setup so we do not touch the venv deps +if [ -n "${build_patch_id}" ]; then + echo "build_patch_id detected, trying to skip task" + if [ "${task_name}" = "compile_dist_test" ] || [ "${task_name}" = "compile_dist_test_half" ]; then + echo "Skipping ${task_name} compile without downloading any files" + exit 0 + fi + + # On windows we change the extension to zip + if [ -z "${ext}" ]; then + ext="tgz" + fi + + extra_db_contrib_args="" + + # get the platform of the dist archive. This is needed if + # db-contrib-tool cannot autodetect the platform of the ec2 instance. + regex='MONGO_DISTMOD=([a-z0-9]*)' + if [[ ${compile_flags} =~ ${regex} ]]; then + extra_db_contrib_args="${extra_db_contrib_args} --platform=${BASH_REMATCH[1]}" + fi + + if [ "${task_name}" = "archive_dist_test" ]; then + file_name="mongodb-binaries.${ext}" + invocation="db-contrib-tool setup-repro-env ${build_patch_id} \ + --variant=${compile_variant} --extractDownloads=False \ + --binariesName=${file_name} --installDir=./ ${extra_db_contrib_args}" + fi + + if [ "${task_name}" = "archive_dist_test_debug" ]; then + file_name="mongo-debugsymbols.${ext}" + invocation="db-contrib-tool setup-repro-env ${build_patch_id} \ + --variant=${compile_variant} --extractDownloads=False \ + --debugsymbolsName=${file_name} --installDir=./ \ + --skipBinaries --downloadSymbols ${extra_db_contrib_args}" + fi + + if [ -n "${invocation}" ]; then + setup_db_contrib_tool + + echo "db-contrib-tool invocation: ${invocation}" + eval ${invocation} + if [ $? -ne 0 ]; then + echo "Could not retrieve files with db-contrib-tool" + exit 1 + fi + echo "Downloaded: ${file_name}" + mv "${build_patch_id}/${file_name}" "${file_name}" + echo "Moved ${file_name} to the correct location" + echo "Skipping ${task_name} compile" + exit 0 + fi + + echo "Could not skip ${task_name} compile, compiling as normal" +fi + set -o pipefail eval ${compile_env} $python ./buildscripts/scons.py \ ${compile_flags} ${task_compile_flags} ${task_compile_flags_extra} \ diff --git a/evergreen/selinux_run_test.sh b/evergreen/selinux_run_test.sh index 318d73adbe2f3..c7ec9502a4645 100755 --- a/evergreen/selinux_run_test.sh +++ b/evergreen/selinux_run_test.sh @@ -3,76 +3,139 @@ # Notes on how to run this manually: # - repo must be unpacked into source tree # -# export ssh_key=$HOME/.ssh/id_rsa -# export hostname=ec2-3-91-230-150.compute-1.amazonaws.com -# export user=ec2-user -# export bypass_prelude=yes +# export SSH_KEY=$HOME/.ssh/id_rsa +# export SELINUX_HOSTNAME=ec2-3-91-230-150.compute-1.amazonaws.com +# export SELINUX_USER=ec2-user +# export BYPASS_PRELUDE=yes +# export SRC="$(basename $(pwd) | tee /dev/stderr)" +# export TEST_LIST='jstests/selinux/*.js' # export workdir="$(dirname $(pwd) | tee /dev/stderr)" -# export src="$(basename $(pwd) | tee /dev/stderr)" -# export test_list='jstests/selinux/*.js' -# export pkg_variant=mongodb-enterprise # evergreen/selinux_run_test.sh set -o errexit -DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" -if [ "$bypass_prelude" != "yes" ]; then - . "$DIR/prelude.sh" +readonly k_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)" + +if [ "$BYPASS_PRELUDE" != "yes" ]; then + . "$k_dir/prelude.sh" activate_venv - src="src" + readonly k_src="src" +else + readonly k_src="$SRC" fi -set -o xtrace - -if [ "$hostname" == "" ]; then - hostname="$(tr -d '"[]{}' < "$workdir"/$src/hosts.yml | cut -d , -f 1 | awk -F : '{print $2}')" +# If no selinux hostname is defined by external env, then we are running through evergreen, which has dumped spawn host +# properties about this host into hosts.yml via host.list +# (https://github.com/evergreen-ci/evergreen/blob/main/docs/Project-Configuration/Project-Commands.md#hostlist), +# from which we can derive the hostname of the remote host +# Also note that $workdir here is a built-in expansion from evergreen: see more info at +# https://github.com/evergreen-ci/evergreen/blob/main/docs/Project-Configuration/Project-Configuration-Files.md#default-expansions +if [ "$SELINUX_HOSTNAME" == "" ]; then + readonly k_selinux_hostname="$(tr -d '"[]{}' < "$workdir"/$k_src/hosts.yml | cut -d , -f 1 | awk -F : '{print $2}')" + cat "$workdir"/$k_src/hosts.yml +else + readonly k_selinux_hostname="$SELINUX_HOSTNAME" fi -if [ "$user" == "" ]; then - user=$USER -fi +# SELINUX_USER injected from evergreen config, do not change +readonly k_host="${SELINUX_USER}@${k_selinux_hostname}" + +# Obtain the ssh key and properties from expansions.yml, output from evergreen via the expansions.write command +# (https://github.com/evergreen-ci/evergreen/blob/main/docs/Project-Configuration/Project-Commands.md#expansionswrite) +if [ "$SSH_KEY" == "" ]; then + readonly k_ssh_key="$workdir/selinux.pem" -host="${user}@${hostname}" -python="${python:-python3}" + "$workdir"/$k_src/buildscripts/yaml_key_value.py --yamlFile="$workdir"/expansions.yml \ + --yamlKey=__project_aws_ssh_key_value > "$k_ssh_key" + + chmod 600 "$k_ssh_key" + + result="$(openssl rsa -in "$k_ssh_key" -check -noout | tee /dev/stderr)" -if [ "$ssh_key" == "" ]; then - ssh_key="$workdir/selinux.pem" - "$workdir"/$src/buildscripts/yaml_key_value.py --yamlFile="$workdir"/expansions.yml \ - --yamlKey=__project_aws_ssh_key_value > "$ssh_key" - chmod 600 "$ssh_key" - result="$(openssl rsa -in "$ssh_key" -check -noout | tee /dev/stderr)" if [ "$result" != "RSA key ok" ]; then exit 1 fi +else + readonly k_ssh_key="$SSH_KEY" fi -attempts=0 -connection_attempts=50 - -# Check for remote connectivity -set +o errexit -ssh_options="-i $ssh_key -o IdentitiesOnly=yes -o StrictHostKeyChecking=no" -while ! ssh $ssh_options -o ConnectTimeout=10 "$host" echo "I am working"; do - if [ "$attempts" -ge "$connection_attempts" ]; then exit 1; fi - ((attempts++)) - printf "SSH connection attempt %d/%d failed. Retrying...\n" "$attempts" "$connection_attempts" - sleep 10 -done +readonly k_ssh_options="-i $k_ssh_key -o IdentitiesOnly=yes -o StrictHostKeyChecking=no" + +function copy_sources_to_target() { + + rsync -ar -e "ssh $k_ssh_options" \ + --exclude 'tmp' --exclude 'build' --exclude '.*' \ + "$workdir"/$k_src/* "$k_host": + + return $? +} + +function configure_target_machine() { + ssh $k_ssh_options "$k_host" evergreen/selinux_test_setup.sh + return $? +} + +function execute_tests_on_target() { + ssh $k_ssh_options "$k_host" evergreen/selinux_test_executor.sh "$1" + return $? +} + +function check_remote_connectivity() { + ssh -q $k_ssh_options -o ConnectTimeout=10 "$k_host" echo "I am working" + return $? +} + +function retry_command() { + + local connection_attempts=$1 + local cmd="$2" + shift 2 #eat the first 2 parameters to pass on any remaining to the calling function + + local attempts=0 + set +o errexit + + while true; do + "$cmd" "$@" + + local result=$? + + if [[ $result -eq 0 ]]; then + set -o errexit + return $result + fi + + if [[ $attempts -ge $connection_attempts ]]; then + printf "%s failed after %d attempts with final error code %s.\n" "$cmd" "$attempts" "$result" + exit 1 + fi + + sleep 10 + ((attempts++)) + + done +} + +echo "===> Checking for remote connectivity..." +retry_command 20 check_remote_connectivity -set -o errexit echo "===> Copying sources to target..." -rsync -ar -e "ssh $ssh_options" \ - --exclude 'tmp' --exclude 'build' --exclude '.*' \ - "$workdir"/$src/* "$host": +retry_command 5 copy_sources_to_target echo "===> Configuring target machine..." -ssh $ssh_options "$host" evergreen/selinux_test_setup.sh +retry_command 5 configure_target_machine echo "===> Executing tests..." -list="$( +readonly list="$( cd src - for x in $test_list; do echo "$x"; done + + # $TEST_LIST defined in evegreen "run selinux tests" function, do not change + for x in $TEST_LIST; do echo "$x"; done )" + for test in $list; do - ssh $ssh_options "$host" evergreen/selinux_test_executor.sh "$test" + execute_tests_on_target "$test" + res="$?" + if [[ $res -ne 0 ]]; then + exit "$res" + fi done diff --git a/evergreen/selinux_test_executor.sh b/evergreen/selinux_test_executor.sh index e4f5c8bc1bb62..481cc2c519931 100755 --- a/evergreen/selinux_test_executor.sh +++ b/evergreen/selinux_test_executor.sh @@ -1,108 +1,175 @@ #!/bin/bash +set +o errexit -set -o errexit -set -o xtrace +readonly k_log_path="/var/log/mongodb/mongod.log" +readonly k_mongo="$(pwd)/dist-test/bin/mongo" +readonly k_test_path="$1" +return_code=1 -mongo="$(pwd)/dist-test/bin/mongo" -export PATH="$(dirname "$mongo"):$PATH" -if [ ! -f "$mongo" ]; then - echo "Mongo shell at $mongo is missing" - exit 1 -fi +export PATH="$(dirname "$k_mongo"):$PATH" -function print() { +function print_err() { echo "$@" >&2 } function monitor_log() { - sed "s!^!mongod| $(date '+%F %H-%M-%S') !" <(sudo --non-interactive tail -f /var/log/mongodb/mongod.log) + sed "s!^!mongod| $(date '+%F %H-%M-%S') !" <(sudo --non-interactive tail -f $k_log_path) } -TEST_PATH="$1" -if [ ! -f "$TEST_PATH" ]; then - print "No test supplied or test file not found. Run:" - print " $(basename "${BASH_SOURCE[0]}") " - exit 1 -fi +function output_ausearch() { + local cmd_parameters="AVC,USER_AVC,SELINUX_ERR,USER_SELINUX_ERR" + + echo "" + echo "====== SELinux errors (ausearch -m $cmd_parameters): ======" + sudo --non-interactive ausearch -m $cmd_parameters -ts $1 +} -# test file is even good before going on -if ! "$mongo" --nodb --norc --quiet "$TEST_PATH"; then - print "File $TEST_PATH has syntax errors" +function output_journalctl() { + echo "" + echo "============================== journalctl =========================================" + sudo --non-interactive journalctl --no-pager --catalog --since="$1" | grep -i mongo +} + +function fail_and_exit_err() { + + echo "" + echo "===================================================================================" + echo "++++++++ Test failed, outputting last 5 seconds of additional log info ++++++++++++" + echo "===================================================================================" + output_ausearch "$(date --utc --date='5 seconds ago' '+%x %H:%M:%S')" + output_journalctl "$(date --utc --date='5 seconds ago' +'%Y-%m-%d %H:%M:%S')" + + echo "" + echo "==== FAIL: $1 ====" exit 1 -fi +} -# stop mongod, zero mongo log, clean up database, set all booleans to off -sudo --non-interactive bash -c ' - systemctl stop mongod +function create_mongo_config() { + echo "Writing /etc/mongod.conf for $k_test_path:" + "$k_mongo" --nodb --norc --quiet --eval=' + assert(load("'"$k_test_path"'")); + const test = new TestDefinition(); + print(JSON.stringify(test.config, null, 2)); - rm -f /var/log/mongodb/mongod.log - touch /var/log/mongodb/mongod.log - chown mongod /var/log/mongodb/mongod.log + ' | sudo --non-interactive tee /etc/mongod.conf +} - rm -rf /var/lib/mongo/* +function start_mongod() { + # Start mongod and if it won't come up, fail and exit - rm -rf /etc/sysconfig/mongod /etc/mongod + sudo --non-interactive systemctl start mongod \ + && sudo --non-interactive systemctl status mongod || ( + fail_and_exit_err "systemd failed to start mongod server!" + ) +} - setsebool mongod_can_connect_ldap off - setsebool mongod_can_use_kerberos off -' +function wait_for_mongod_to_accept_connections() { + # Once the mongod process starts via systemd, it can still take a couple of seconds + # to set up and accept connections... we will wait for log id 23016 to show up + # indicating that the server is ready to accept incoming connections before starting the tests -# create mongo config -"$mongo" --nodb --norc --quiet --eval=' - assert(load("'"$TEST_PATH"'")); - const test = new TestDefinition(); - print(typeof(test.config) === "string" ? test.config : JSON.stringify(test.config, null, 2)); -' | sudo --non-interactive tee /etc/mongod.conf + local server_ready=0 + local wait_seconds=2 + local wait_retries_max=30 + local wait_retries=0 -# setup -"$mongo" --nodb --norc --quiet --eval=' - assert(load("'"$TEST_PATH"'")); - const test = new TestDefinition(); - jsTest.log("Running setup()"); - test.setup(); -' + while [[ $wait_retries -le $wait_retries_max ]]; do + local server_status="$(grep 23016 $k_log_path || echo "")" -# start log monitor, also kill it on exit -monitor_log & -MONITORPID="$!" -trap "sudo --non-interactive pkill -P $MONITORPID" SIGINT SIGTERM ERR EXIT - -# start mongod and if it won't come up, log SELinux errors -ts="$(date --utc --date='1 seconds ago' '+%x %H:%M:%S')" -tsj="$(date --utc --date='1 seconds ago' +'%Y-%m-%d %H:%M:%S')" -sudo --non-interactive systemctl start mongod \ - && sudo --non-interactive systemctl status mongod || ( - set +o errexit - echo "================== SELinux errors: ==================" - sudo --non-interactive ausearch -m AVC,USER_AVC,SELINUX_ERR,USER_SELINUX_ERR -ts $ts - echo "================== journalctl ==================" - sudo --non-interactive journalctl --no-pager --catalog --since="$tsj" | grep -i mongo - echo "================== /var/log/mongodb/mongod.log ==================" - sudo --non-interactive cat /var/log/mongodb/mongod.log - echo "==== FAIL: mongod service was not started successfully" + if [ "$server_status" != "" ]; then + server_ready=1 + break + fi + + sleep $wait_seconds + ((wait_retries++)) + done + + if [ ! $server_ready ]; then + fail_and_exit_err "failed to connect to mongod server after waiting for $(($wait_seconds * $wait_retries)) seconds!" + fi +} + +function clear_mongo_config() { + # stop mongod, zero mongo log, clean up database, set all booleans to off + sudo --non-interactive bash -c ' + systemctl stop mongod + + rm -f '"$k_log_path"' + touch '"$k_log_path"' + chown mongod '"$k_log_path"' + + rm -rf /var/lib/mongo/* + + rm -rf /etc/sysconfig/mongod /etc/mongod + + setsebool mongod_can_connect_ldap off + setsebool mongod_can_use_kerberos off + ' +} + +function exit_with_code() { + exit $return_code +} + +function setup_test_definition() { + "$k_mongo" --nodb --norc --quiet --eval=' + assert(load("'"$k_test_path"'")); + (() => { + const test = new TestDefinition(); + print("Running setup() for '"$k_test_path"'"); + test.setup(); + })(); + ' +} + +function run_test() { + "$k_mongo" --norc --gssapiServiceName=mockservice --eval=' + assert(load("'"$k_test_path"'")); + print("Running test '"$k_test_path"'"); + + const test = new TestDefinition(); + + try { + await test.run(); + } finally { + test.teardown(); + } + ' || fail_and_exit_err "Test failed" + + echo "SUCCESS: $k_test_path" +} + +if [ ! -f "$k_mongo" ]; then + print_err "Mongo shell at $k_mongo is missing" exit 1 -) - -# run test and teardown -"$mongo" --norc --gssapiServiceName=mockservice --eval=' - assert(load("'"$TEST_PATH"'")); - // name is such to prevent collisions - const test_812de7ce = new TestDefinition(); - try { - jsTest.log("Running test"); - test_812de7ce.run(); - } finally { - test_812de7ce.teardown(); - } -' || ( - echo "==== FAIL: test returned result: $?" - echo "=== SELinux errors:" - set +o errexit - sudo --non-interactive ausearch -m AVC,USER_AVC,SELINUX_ERR,USER_SELINUX_ERR -ts $ts - echo "=== /var/log/mongodb/mongod.log:" - sudo --non-interactive cat /var/log/mongodb/mongod.log +fi + +if [ ! -f "$k_test_path" ]; then + print_err "No test supplied or test file not found. Run:" + print_err "$(basename "${BASH_SOURCE[0]}") " + exit 1 +fi + +# Ensure file containing tests is valid before executing +if ! "$k_mongo" --nodb --norc --quiet "$k_test_path"; then + print_err "File $k_test_path has syntax errors" exit 1 -) +fi + +echo "STARTING TEST: $k_test_path" + +clear_mongo_config +create_mongo_config +setup_test_definition + +# start log monitor, also kill it on exit +monitor_log & +monitor_pid="$!" +trap "sudo --non-interactive pkill -P $monitor_pid; exit_with_code" SIGINT SIGTERM ERR EXIT + +start_mongod +wait_for_mongod_to_accept_connections +run_test -set +o xtrace -echo "SUCCESS: $TEST_PATH" +return_code=0 diff --git a/jstests/aggregation/accumulators/first_n_last_n.js b/jstests/aggregation/accumulators/first_n_last_n.js index 87c35c35c77f5..926535eb1ffee 100644 --- a/jstests/aggregation/accumulators/first_n_last_n.js +++ b/jstests/aggregation/accumulators/first_n_last_n.js @@ -4,6 +4,7 @@ (function() { "use strict"; +load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs. load("jstests/aggregation/extras/utils.js"); const coll = db[jsTestName()]; @@ -20,17 +21,21 @@ const kMaxSales = 20; let expectedFirstThree = []; let expectedLastThree = []; let expectedAllResults = []; +let expectedFirstNWithInitExpr = []; +let expectedLastNWithInitExpr = []; for (const states of [{state: 'AZ', sales: 3}, {state: 'CA', sales: 2}, {state: 'NY', sales: kMaxSales}]) { let allResults = []; let firstThree = []; let lastThree = []; + let firstWithInitExpr = []; + let lastWithInitExpr = []; const state = states['state']; const sales = states['sales']; for (let i = 0; i < kMaxSales; ++i) { const salesAmt = i * 10; if (i < sales) { - docs.push({state: state, sales: salesAmt}); + docs.push({state: state, sales: salesAmt, stateObj: {"st": state}, n: 3}); // First N candidate. if (i < defaultN) { @@ -40,12 +45,21 @@ for (const states if (i + defaultN >= sales) { lastThree.push(salesAmt); } + + if (i == 0 || (state == 'AZ' && i < defaultN)) { + firstWithInitExpr.push(salesAmt); + } + if (i + 1 == sales || (state == 'AZ' && i + defaultN >= sales)) { + lastWithInitExpr.push(salesAmt); + } allResults.push(salesAmt); } } expectedFirstThree.push({_id: state, sales: firstThree}); expectedLastThree.push({_id: state, sales: lastThree}); expectedAllResults.push({_id: state, sales: allResults}); + expectedFirstNWithInitExpr.push({_id: state, sales: firstWithInitExpr}); + expectedLastNWithInitExpr.push({_id: state, sales: lastWithInitExpr}); } assert.commandWorked(coll.insert(docs)); @@ -65,6 +79,54 @@ function runFirstLastN(n, expectedFirstNResults, expectedLastNResults) { () => "expected " + tojson(expectedFirstNResults) + " actual " + tojson(actualFirstNResults)); + const firstNResultsWithInitExpr = + coll.aggregate([ + {$sort: {_id: 1}}, + { + $group: { + _id: {"st": "$state"}, + sales: { + $firstN: { + input: "$sales", + n: {$cond: {if: {$eq: ["$st", 'AZ']}, then: defaultN, else: 1}} + } + } + } + }, + ]) + .toArray(); + + let expectedResult = []; + expectedFirstNWithInitExpr.forEach( + i => expectedResult.push({'_id': {'st': i['_id']}, sales: i['sales']})); + assert(arrayEq(expectedResult, firstNResultsWithInitExpr), + () => "expected " + tojson(expectedResult) + " actual " + + tojson(firstNResultsWithInitExpr)); + + const firstNResultsWithInitExprAndVariableGroupId = + coll.aggregate([ + {$sort: {_id: 1}}, + { + $group: { + _id: "$stateObj", + sales: { + $firstN: { + input: "$sales", + n: {$cond: {if: {$eq: ["$st", 'AZ']}, then: defaultN, else: 1}} + } + } + } + }, + ]) + .toArray(); + + expectedResult = []; + expectedFirstNWithInitExpr.forEach( + i => expectedResult.push({'_id': {'st': i['_id']}, sales: i['sales']})); + assert(arrayEq(expectedResult, firstNResultsWithInitExprAndVariableGroupId), + () => "expected " + tojson(expectedResult) + " actual " + + tojson(firstNResultsWithInitExprAndVariableGroupId)); + const actualLastNResults = coll.aggregate([ {$sort: {_id: 1}}, @@ -75,6 +137,54 @@ function runFirstLastN(n, expectedFirstNResults, expectedLastNResults) { arrayEq(expectedLastNResults, actualLastNResults), () => "expected " + tojson(expectedLastNResults) + " actual " + tojson(actualLastNResults)); + const lastNResultsWithInitExpr = + coll.aggregate([ + {$sort: {_id: 1}}, + { + $group: { + _id: {"st": "$state"}, + sales: { + $lastN: { + input: "$sales", + n: {$cond: {if: {$eq: ["$st", 'AZ']}, then: defaultN, else: 1}} + } + } + } + }, + ]) + .toArray(); + + expectedResult = []; + expectedLastNWithInitExpr.forEach( + i => expectedResult.push({'_id': {'st': i['_id']}, sales: i['sales']})); + assert( + arrayEq(expectedResult, lastNResultsWithInitExpr), + () => "expected " + tojson(expectedResult) + " actual " + tojson(lastNResultsWithInitExpr)); + + const lastNResultsWithInitExprAndVariableGroupId = + coll.aggregate([ + {$sort: {_id: 1}}, + { + $group: { + _id: "$stateObj", + sales: { + $lastN: { + input: "$sales", + n: {$cond: {if: {$eq: ["$st", 'AZ']}, then: defaultN, else: 1}} + } + } + } + }, + ]) + .toArray(); + + expectedResult = []; + expectedLastNWithInitExpr.forEach( + i => expectedResult.push({'_id': {'st': i['_id']}, sales: i['sales']})); + assert(arrayEq(expectedResult, lastNResultsWithInitExprAndVariableGroupId), + () => "expected " + tojson(expectedResult) + " actual " + + tojson(lastNResultsWithInitExprAndVariableGroupId)); + function reorderBucketResults(bucketResults) { // Using a computed projection will put the fields out of order. As such, we re-order them // below. @@ -213,4 +323,4 @@ assert.commandFailedWithCode( "aggregate", {pipeline: [{$group: {_id: {'st': '$state'}, sales: {$firstN: {n: 2}}}}], cursor: {}}), 5787907); -})(); \ No newline at end of file +})(); diff --git a/jstests/aggregation/accumulators/median_approx.js b/jstests/aggregation/accumulators/median_approx.js index 0e0b1d6dbfaaa..166172d0661f9 100644 --- a/jstests/aggregation/accumulators/median_approx.js +++ b/jstests/aggregation/accumulators/median_approx.js @@ -3,7 +3,6 @@ * field 'p':[0.5]. * @tags: [ * requires_fcv_70, - * featureFlagApproxPercentiles * ] */ (function() { diff --git a/jstests/aggregation/accumulators/min_n_max_n.js b/jstests/aggregation/accumulators/min_n_max_n.js index c7660fc5df265..e7a4ea329cf3d 100644 --- a/jstests/aggregation/accumulators/min_n_max_n.js +++ b/jstests/aggregation/accumulators/min_n_max_n.js @@ -4,6 +4,8 @@ (function() { "use strict"; +load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs. + const coll = db[jsTestName()]; coll.drop(); diff --git a/jstests/aggregation/accumulators/percentiles_approx.js b/jstests/aggregation/accumulators/percentiles_approx.js index ee6b8db778a4f..29bad4f376a54 100644 --- a/jstests/aggregation/accumulators/percentiles_approx.js +++ b/jstests/aggregation/accumulators/percentiles_approx.js @@ -2,7 +2,6 @@ * Tests for the approximate percentile accumulator semantics. * @tags: [ * requires_fcv_70, - * featureFlagApproxPercentiles * ] */ (function() { @@ -16,10 +15,11 @@ const coll = db[jsTestName()]; * Tests for correctness without grouping. Each group gets its own accumulator so we can validate * the basic $percentile functionality using a single group. */ -function testWithSingleGroup({docs, percentileSpec, expectedResult, msg}) { +function testWithSingleGroup({docs, percentileSpec, letSpec, expectedResult, msg}) { coll.drop(); coll.insertMany(docs); - const res = coll.aggregate([{$group: {_id: null, p: percentileSpec}}]).toArray(); + const res = + coll.aggregate([{$group: {_id: null, p: percentileSpec}}], {let : letSpec}).toArray(); // For $percentile the result should be ordered to match the spec, so assert exact equality. assert.eq(expectedResult, res[0].p, msg + `; Result: ${tojson(res)}`); @@ -67,6 +67,41 @@ testWithSingleGroup({ msg: "Multiple percentiles" }); +testWithSingleGroup({ + docs: [{x: 0}, {x: 1}, {x: 2}], + percentileSpec: {$percentile: {p: "$$ps", input: "$x", method: "approximate"}}, + letSpec: {ps: [0.5, 0.9, 0.1]}, + expectedResult: [1, 2, 0], + msg: "Multiple percentiles using variable in the percentile spec for the whole array" +}); + +testWithSingleGroup({ + docs: [{x: 0}, {x: 1}, {x: 2}], + percentileSpec: {$percentile: {p: ["$$p90"], input: "$x", method: "approximate"}}, + letSpec: {p90: 0.9}, + expectedResult: [2], + msg: "Single percentile using variable in the percentile spec for the array elements" +}); + +testWithSingleGroup({ + docs: [{x: 0}, {x: 1}, {x: 2}], + percentileSpec: { + $percentile: + {p: {$concatArrays: [[0.1, 0.5], ["$$p90"]]}, input: "$x", method: "approximate"} + }, + letSpec: {p90: 0.9}, + expectedResult: [0, 1, 2], + msg: "Multiple percentiles using const expression in the percentile spec" +}); + +testWithSingleGroup({ + docs: [{x: 0}, {x: 1}, {x: 2}], + percentileSpec: {$percentile: {p: "$$ps", input: {$add: [42, "$x"]}, method: "approximate"}}, + letSpec: {ps: [0.5, 0.9, 0.1]}, + expectedResult: [42 + 1, 42 + 2, 42 + 0], + msg: "Multiple percentiles using expression as input" +}); + function testWithMultipleGroups({docs, percentileSpec, expectedResult, msg}) { coll.drop(); coll.insertMany(docs); diff --git a/jstests/aggregation/accumulators/percentiles_syntax.js b/jstests/aggregation/accumulators/percentiles_syntax.js index c1aa4a050fed6..b93954cbe67f5 100644 --- a/jstests/aggregation/accumulators/percentiles_syntax.js +++ b/jstests/aggregation/accumulators/percentiles_syntax.js @@ -2,7 +2,6 @@ * Tests for the $percentile accumulator syntax. * @tags: [ * requires_fcv_70, - * featureFlagApproxPercentiles * ] */ (function() { @@ -16,91 +15,218 @@ coll.drop(); // order to check its format. coll.insert({x: 42}); -/** - * Tests to check that invalid $percentile specifications are rejected. - */ -function assertInvalidSyntax(percentileSpec, msg) { - assert.commandFailed( - coll.runCommand("aggregate", - {pipeline: [{$group: {_id: null, p: percentileSpec}}], cursor: {}}), - msg); +function assertInvalidSyntax({pSpec, letSpec, msg}) { + let command = {pipeline: [{$group: {_id: null, p: pSpec}}], let : letSpec, cursor: {}}; + assert.commandFailed(coll.runCommand("aggregate", command), msg); } -assertInvalidSyntax({$percentile: 0.5}, "Should fail if $percentile is not an object"); - -assertInvalidSyntax({$percentile: {input: "$x", method: "approximate"}}, - "Should fail if $percentile is missing 'p' field"); - -assertInvalidSyntax({$percentile: {p: [0.5], method: "approximate"}}, - "Should fail if $percentile is missing 'input' field"); +function assertValidSyntax({pSpec, letSpec, msg}) { + let command = {pipeline: [{$group: {_id: null, p: pSpec}}], let : letSpec, cursor: {}}; + assert.commandWorked(coll.runCommand("aggregate", command), msg); +} -assertInvalidSyntax({$percentile: {p: [0.5], input: "$x"}}, - "Should fail if $percentile is missing 'method' field"); +/** + * Test missing or unexpected fields in $percentile spec. + */ +assertInvalidSyntax( + {pSpec: {$percentile: 0.5}, msg: "Should fail if $percentile is not an object"}); -assertInvalidSyntax({$percentile: {p: [0.5], input: "$x", method: "approximate", extras: 42}}, - "Should fail if $percentile contains an unexpected field"); +assertInvalidSyntax({ + pSpec: {$percentile: {input: "$x", method: "approximate"}}, + msg: "Should fail if $percentile is missing 'p' field" +}); -assertInvalidSyntax({$percentile: {p: 0.5, input: "$x", method: "approximate"}}, - "Should fail if 'p' field in $percentile isn't array"); +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5], method: "approximate"}}, + msg: "Should fail if $percentile is missing 'input' field" +}); -assertInvalidSyntax({$percentile: {p: [], input: "$x", method: "approximate"}}, - "Should fail if 'p' field in $percentile is an empty array"); +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5], input: "$x"}}, + msg: "Should fail if $percentile is missing 'method' field" +}); -assertInvalidSyntax( - {$percentile: {p: [0.5, "foo"], input: "$x", method: "approximate"}}, - "Should fail if 'p' field in $percentile is an array with a non-numeric element"); +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5], input: "$x", method: "approximate", extras: 42}}, + msg: "Should fail if $percentile contains an unexpected field" +}); -assertInvalidSyntax( - {$percentile: {p: [0.5, 10], input: "$x", method: "approximate"}}, - "Should fail if 'p' field in $percentile is an array with any value outside of [0, 1] range"); +/** + * Test invalid 'p' field, specified as a constant. + */ +assertInvalidSyntax({ + pSpec: {$percentile: {p: 0.5, input: "$x", method: "approximate"}}, + msg: "Should fail if 'p' field in $percentile isn't array" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: [], input: "$x", method: "approximate"}}, + msg: "Should fail if 'p' field in $percentile is an empty array" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5, "foo"], input: "$x", method: "approximate"}}, + msg: "Should fail if 'p' field in $percentile is an array with a non-numeric element" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5, 10], input: "$x", method: "approximate"}}, + msg: + "Should fail if 'p' field in $percentile is an array with any value outside of [0, 1] range" +}); -assertInvalidSyntax({$percentile: {p: [0.5, 0.7], input: "$x", method: 42}}, - "Should fail if 'method' field isn't a string"); +/** + * Test invalid 'p' field, specified as an expression. + */ +assertInvalidSyntax({ + pSpec: {$percentile: {p: ["$x"], input: "$x", method: "approximate"}}, + msg: "'p' should not accept non-const expressions" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: {$add: [0.1, 0.5]}, input: "$x", method: "approximate"}}, + msg: "'p' should not accept expressions that evaluate to a non-array" +}); + +assertInvalidSyntax({ + pSpec: { + $percentile: + {p: {$concatArrays: [[0.01, 0.1], ["foo"]]}, input: "$x", method: "approximate"} + }, + msg: "'p' should not accept expressions that evaluate to an array with non-numeric elements" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: "$$pvals", input: "$x", method: "approximate"}}, + letSpec: {pvals: 0.5}, + msg: "'p' should not accept variables that evaluate to a non-array" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: "$$pvals", input: "$x", method: "approximate"}}, + letSpec: {pvals: [0.5, "foo"]}, + msg: "'p' should not accept variables that evaluate to an array with non-numeric elements" +}); -assertInvalidSyntax({$percentile: {p: [0.5, 0.7], input: "$x", method: "fancy"}}, - "Should fail if 'method' isn't one of _predefined_ strings"); +/** + * Test invalid 'method' field. + */ +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5, 0.7], input: "$x", method: 42}}, + msg: "$percentile should fail if 'method' field isn't a string" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5, 0.7], input: "$x", method: "fancy"}}, + msg: "$percentile should fail if 'method' isn't one of _predefined_ strings" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5, 0.7], input: "$x", method: "discrete"}}, + msg: "$percentile should fail because discrete 'method' isn't supported yet" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5, 0.7], input: "$x", method: "continuous"}}, + msg: "$percentile should fail because continuous 'method' isn't supported yet" +}); /** - * Tests for $median. $median desugars to $percentile with the field p:[0.5] added, and therefore - * has similar syntax to $percentile. + * Tests for invalid $median. */ +assertInvalidSyntax({ + pSpec: {$median: {p: [0.5], input: "$x", method: "approximate"}}, + msg: "$median should fail if 'p' is defined" +}); -assertInvalidSyntax({$median: {p: [0.5], input: "$x", method: "approximate"}}, - "Should fail if 'p' is defined"); +assertInvalidSyntax({ + pSpec: {$median: {method: "approximate"}}, + msg: "$median should fail if 'input' field is missing" +}); + +assertInvalidSyntax( + {pSpec: {$median: {input: "$x"}}, msg: "Median should fail if 'method' field is missing"}); -assertInvalidSyntax({$median: {method: "approximate"}}, - "Should fail if $median is missing 'input' field"); +assertInvalidSyntax({ + pSpec: {$median: {input: "$x", method: "approximate", extras: 42}}, + msg: "$median should fail if there is an unexpected field" +}); -assertInvalidSyntax({$median: {input: "$x"}}, "Should fail if $median is missing 'method' field"); +assertInvalidSyntax({ + pSpec: {$median: {input: "$x", method: "fancy"}}, + msg: "$median should fail if 'method' isn't one of the _predefined_ strings" +}); + +assertInvalidSyntax({ + pSpec: {$median: {input: "$x", method: "discrete"}}, + msg: "$median should fail because discrete 'method' isn't supported yet" +}); + +assertInvalidSyntax({ + pSpec: {$median: {input: "$x", method: "continuous"}}, + msg: "$median should fail because continuous 'method' isn't supported yet" +}); -assertInvalidSyntax({$median: {input: "$x", method: "approximate", extras: 42}}, - "Should fail if $median contains an unexpected field"); /** * Test that valid $percentile specifications are accepted. The results, i.e. semantics, are tested * elsewhere and would cover all of the cases below, we are providing them here nonetheless for * completeness. */ -function assertValidSyntax(percentileSpec, msg) { - assert.commandWorked( - coll.runCommand("aggregate", - {pipeline: [{$group: {_id: null, p: percentileSpec}}], cursor: {}}), - msg); -} - -assertValidSyntax( - {$percentile: {p: [0.0, 0.0001, 0.5, 0.995, 1.0], input: "$x", method: "approximate"}}, - "Should be able to specify an array of percentiles"); - -assertValidSyntax( - {$percentile: {p: [0.5, 0.9], input: {$divide: ["$x", 2]}, method: "approximate"}}, - "Should be able to specify 'input' as an expression"); - -assertValidSyntax({$percentile: {p: [0.5, 0.9], input: "x", method: "approximate"}}, - "Non-numeric inputs should be gracefully ignored"); +assertValidSyntax({ + pSpec: {$percentile: {p: [0.0, 0.0001, 0.5, 0.995, 1.0], input: "$x", method: "approximate"}}, + msg: "Should be able to specify an array of percentiles" +}); + +assertValidSyntax({ + pSpec: {$percentile: {p: [0.5, 0.9], input: {$divide: ["$x", 2]}, method: "approximate"}}, + msg: "Should be able to specify 'input' as an expression" +}); + +assertValidSyntax({ + pSpec: {$percentile: {p: [0.5, 0.9], input: "x", method: "approximate"}}, + msg: "Non-numeric inputs should be gracefully ignored" +}); + +assertValidSyntax({ + pSpec: {$percentile: {p: [0.5, 0.9], input: {$add: [2, "$x"]}, method: "approximate"}}, + msg: "'input' should be able to use expressions" +}); + +assertValidSyntax({ + pSpec: { + $percentile: {p: [0.5, 0.9], input: {$concatArrays: [[2], ["$x"]]}, method: "approximate"} + }, + msg: "'input' should be able to use expressions even if the result of their eval is non-numeric" +}); + +assertValidSyntax({ + pSpec: { + $percentile: + {p: {$concatArrays: [[0.01, 0.1], [0.9, 0.99]]}, input: "$x", method: "approximate"} + }, + msg: "'p' should be able to use expressions that evaluate to an array" +}); + +assertValidSyntax({ + pSpec: {$percentile: {p: [{$add: [0.1, 0.5]}], input: "$x", method: "approximate"}}, + msg: "'p' should be able to use expressions for the array elements" +}); + +assertValidSyntax({ + pSpec: {$percentile: {p: "$$pvals", input: "$x", method: "approximate"}}, + letSpec: {pvals: [0.5, 0.9]}, + msg: "'p' should be able to use variables for the array" +}); + +assertValidSyntax({ + pSpec: {$percentile: {p: ["$$p1", "$$p2"], input: "$x", method: "approximate"}}, + letSpec: {p1: 0.5, p2: 0.9}, + msg: "'p' should be able to use variables for the array elements" +}); /** - * Tests for $median. $median desugars to $percentile with the field p:[0.5] added. + * Tests for valid $median. */ - -assertValidSyntax({$median: {input: "$x", method: "approximate"}}, "Simple base case for $median."); +assertValidSyntax( + {pSpec: {$median: {input: "$x", method: "approximate"}}, msg: "Simple base case for $median."}); })(); diff --git a/jstests/aggregation/accumulators/top_bottom_top_n_bottom_n.js b/jstests/aggregation/accumulators/top_bottom_top_n_bottom_n.js index d0844a4acacae..4e33a4c42b215 100644 --- a/jstests/aggregation/accumulators/top_bottom_top_n_bottom_n.js +++ b/jstests/aggregation/accumulators/top_bottom_top_n_bottom_n.js @@ -4,6 +4,8 @@ (function() { "use strict"; +load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs. + const coll = db[jsTestName()]; coll.drop(); @@ -392,4 +394,17 @@ const testOperatorText = (op) => { // most relevant results first. testOperatorText("$bottomN"); testOperatorText("$topN"); + +// Test constant output and sortBy. +assert(coll.drop()); +assert.commandWorked(coll.insertMany([{a: 1}, {a: 2}, {a: 3}])); +const testConstantOutputAndSort = (op) => { + const results = + coll.aggregate([{$group: {_id: null, result: {[op]: {n: 3, output: "abc", sortBy: {}}}}}]) + .toArray(); + assert.eq(results.length, 1, results); + assert.docEq(results[0], {_id: null, result: ["abc", "abc", "abc"]}, results); +}; +testConstantOutputAndSort("$topN"); +testConstantOutputAndSort("$bottomN"); })(); diff --git a/jstests/aggregation/add_with_date.js b/jstests/aggregation/add_with_date.js index 4d76a6908d7cd..00b4a3ddb2b34 100644 --- a/jstests/aggregation/add_with_date.js +++ b/jstests/aggregation/add_with_date.js @@ -1,9 +1,5 @@ // Test $add with date -(function() { -"use strict"; - load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. const coll = db.getSiblingDB(jsTestName()).coll; coll.drop(); @@ -132,5 +128,4 @@ assert.eq(ISODate("2019-01-30T07:30:10.958Z"), getResultOfExpression({ "$doubleVal", NumberLong("-2397083434877565864") ] - })); -}()); + })); \ No newline at end of file diff --git a/jstests/aggregation/api_version_stage_allowance_checks.js b/jstests/aggregation/api_version_stage_allowance_checks.js index 3e6ec804b69a8..50b40ce4a3a30 100644 --- a/jstests/aggregation/api_version_stage_allowance_checks.js +++ b/jstests/aggregation/api_version_stage_allowance_checks.js @@ -140,18 +140,6 @@ result = testDB.runCommand({ }); assert.commandWorked(result); -// Tests that the internal '$_generateV2ResumeTokens' option does not fail with 'apiStrict: true'. -result = testDB.runCommand({ - aggregate: collName, - pipeline: [{$project: {_id: 0}}], - cursor: {}, - writeConcern: {w: "majority"}, - $_generateV2ResumeTokens: false, - apiVersion: "1", - apiStrict: true -}); -assert.commandWorked(result); - // Tests that time-series collection can be queried (invoking $_internalUnpackBucket stage) // from an external client with 'apiStrict'. (function testInternalUnpackBucketAllowance() { diff --git a/jstests/aggregation/bugs/exclusion_projection_does_not_affect_field_order.js b/jstests/aggregation/bugs/exclusion_projection_does_not_affect_field_order.js index 09d2239389a6c..135b33e794f56 100644 --- a/jstests/aggregation/bugs/exclusion_projection_does_not_affect_field_order.js +++ b/jstests/aggregation/bugs/exclusion_projection_does_not_affect_field_order.js @@ -33,4 +33,9 @@ assert.eq( {$sort: {_id: 1}} ]) .toArray()); + +assert.commandWorked(coll.insert({_id: 4, c: {y: 11, z: 22, a: 33}, a: 1})); + +assert.eq([{_id: 1}, {_id: 2, c: 1}, {_id: 3, y: 1, z: 1}, {_id: 4, c: {y: 11, a: 33}, a: 1}], + coll.aggregate([{$project: {"c.z": 0}}, {$sort: {_id: 1}}]).toArray()); }()); diff --git a/jstests/aggregation/bugs/groupMissing.js b/jstests/aggregation/bugs/groupMissing.js index 2bbad19baafba..d54d5a7cc05b4 100644 --- a/jstests/aggregation/bugs/groupMissing.js +++ b/jstests/aggregation/bugs/groupMissing.js @@ -6,8 +6,6 @@ // covered, which will not happen if the $sort is within a $facet stage. // @tags: [ // do_not_wrap_aggregations_in_facets, -// # TODO SERVER-67550: Equality to null does not match undefined in CQF. -// cqf_incompatible, // ] load('jstests/aggregation/extras/utils.js'); // For assertArrayEq. @@ -83,20 +81,28 @@ coll.insert({a: 1}); let collScanResult = coll.aggregate({$match: {a: 1}}, {$project: {_id: 0, a: 1, b: 1}}).toArray(); assertArrayEq({actual: collScanResult, expected: [{"a": 1, "b": null}, {"a": 1}]}); -// After creating the index, the plan will use PROJECTION_COVERED, and the index will incorrectly -// provide a null for the missing "b" value. -coll.createIndex({a: 1, b: 1}); -// Assert that the bug SERVER-23229 is still present. -assertArrayEq({ - actual: coll.aggregate({$match: {a: 1}}, {$project: {_id: 0, a: 1, b: 1}}).toArray(), - expected: [{"a": 1, "b": null}, {"a": 1, "b": null}] -}); -// Correct behavior after SERVER-23229 is fixed. -if (0) { - assertArrayEq({ - actual: coll.aggregate({$match: {a: 1}}, {$project: {_id: 0, a: 1, b: 1}}).toArray(), - expected: collScanResult - }); +// After creating the index, the classic plan will use PROJECTION_COVERED, and the index will +// incorrectly provide a null for the missing "b" value. Bonsai does not exhibit SERVER-23229. So, +// either the new engine is used and the correct results (collScanResult) are seen, or we see the +// incorrect result, where all values of "b" are null. +assert.commandWorked(coll.createIndex({a: 1, b: 1})); +const possibleResults = [collScanResult, [{"a": 1, "b": null}, {"a": 1, "b": null}]]; + +function checkActualMatchesAnExpected(actual) { + let foundMatch = false; + for (let i = 0; i < possibleResults.length; i++) { + foundMatch |= arrayEq(actual, possibleResults[i]); + } + assert(foundMatch, + `Expected actual results to match one of the possible results. actual=${ + tojson(actual)}, possibleResults=${tojson(possibleResults)}`); } + +// Check behavior with and without a hint. +checkActualMatchesAnExpected( + coll.aggregate([{$match: {a: 1}}, {$project: {_id: 0, a: 1, b: 1}}]).toArray()); +checkActualMatchesAnExpected( + coll.aggregate([{$match: {a: 1}}, {$project: {_id: 0, a: 1, b: 1}}], {hint: {a: 1, b: 1}}) + .toArray()); }()); diff --git a/jstests/aggregation/bugs/hash_lookup_spill_large_and_small_documents_correctly.js b/jstests/aggregation/bugs/hash_lookup_spill_large_and_small_documents_correctly.js new file mode 100644 index 0000000000000..de8f79bbea7b7 --- /dev/null +++ b/jstests/aggregation/bugs/hash_lookup_spill_large_and_small_documents_correctly.js @@ -0,0 +1,74 @@ +// Regression test to check that different document sizes work correctly with $lookup. +// @tags: [ +// requires_fcv_71, +// ] +(function() { +'use strict'; + +load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers' + +const localColl = db.lookup_spill_local; +const foreignColl = db.lookup_spill_foreign; +localColl.drop(); +foreignColl.drop(); + +const memoryLimit = 128; // Spill at 128 bytes + +function setHashLookupMemoryLimit(memoryLimit) { + const commandResArr = FixtureHelpers.runCommandOnEachPrimary({ + db: db.getSiblingDB("admin"), + cmdObj: { + setParameter: 1, + internalQuerySlotBasedExecutionHashLookupApproxMemoryUseInBytesBeforeSpill: memoryLimit, + } + }); + assert.gt(commandResArr.length, 0, "Setting memory limit on primaries failed"); + assert.commandWorked(commandResArr[0]); +} + +function runHashLookupSpill() { + const smallStr = "small"; + const bigStr = Array(memoryLimit).toString(); + const localDoc = {_id: 1, a: 2}; + const foreignDocs = [ + {_id: 0, b: 1, padding: smallStr}, + {_id: 1, b: 2, padding: bigStr}, + {_id: 2, b: 1, padding: smallStr}, + {_id: 3, b: 2, padding: bigStr}, + {_id: 4, b: 1, padding: smallStr}, + {_id: 5, b: 2, padding: bigStr}, + {_id: 6, b: 1, padding: smallStr}, + {_id: 7, b: 2, padding: bigStr}, + {_id: 8, b: 1, padding: smallStr}, + ]; + + assert.commandWorked(localColl.insert(localDoc)); + assert.commandWorked(foreignColl.insertMany(foreignDocs)); + const pipeline = [ + {$lookup: {from: foreignColl.getName(), localField: "a", foreignField: "b", as: "matched"}}, + {$sort: {_id: 1}} + ]; + + const result = localColl.aggregate(pipeline).toArray(); + assert.eq(result.length, 1, result); + assert.eq(result[0].matched.length, 4, result); + for (let matched of result[0].matched) { + assert.eq(matched.padding, bigStr); + } +} + +const oldMemoryLimit = + assert + .commandWorked(db.adminCommand({ + getParameter: 1, + internalQuerySlotBasedExecutionHashLookupApproxMemoryUseInBytesBeforeSpill: 1 + })) + .internalQuerySlotBasedExecutionHashLookupApproxMemoryUseInBytesBeforeSpill; + +try { + setHashLookupMemoryLimit(memoryLimit); + runHashLookupSpill(); +} finally { + setHashLookupMemoryLimit(oldMemoryLimit); +} +})(); diff --git a/jstests/aggregation/bugs/optimize_text.js b/jstests/aggregation/bugs/optimize_text.js index 9dcede0c57a1c..6909003c392f9 100644 --- a/jstests/aggregation/bugs/optimize_text.js +++ b/jstests/aggregation/bugs/optimize_text.js @@ -4,10 +4,7 @@ // # because the shard doesn't know whether the merger needs the textScore metadata. // assumes_unsharded_collection, // ] -(function() { -'use strict'; - -load("jstests/libs/analyze_plan.js"); +import {planHasStage} from "jstests/libs/analyze_plan.js"; const coll = db.optimize_text; assert.commandWorked(coll.createIndex({"$**": "text"})); @@ -37,5 +34,4 @@ assert(!planHasStage(db, aggExplain, 'TEXT_OR'), aggExplain); // Non-blocking $text plans with just one search term do not need an OR stage, as a further // optimization. assert(!planHasStage(db, findSingleTermExplain, 'OR'), findSingleTermExplain); -assert(!planHasStage(db, findSingleTermExplain, 'TEXT_OR'), findSingleTermExplain); -})(); +assert(!planHasStage(db, findSingleTermExplain, 'TEXT_OR'), findSingleTermExplain); \ No newline at end of file diff --git a/jstests/aggregation/bugs/server14670.js b/jstests/aggregation/bugs/server14670.js index adadb154da030..c8422cf9be7c0 100644 --- a/jstests/aggregation/bugs/server14670.js +++ b/jstests/aggregation/bugs/server14670.js @@ -1,21 +1,49 @@ -// SERVER-14670 introduced the $strLenBytes and $strLenCP aggregation expressions. In this file, we -// test the error cases for these expressions. -load("jstests/aggregation/extras/utils.js"); // For assertErrorCode. +/** + * SERVER-14670 introduced the $strLenBytes and $strLenCP aggregation expressions. In this file, we + * test their expected behaviour. + * */ (function() { "use strict"; -var coll = db.substr; -coll.drop(); +load("jstests/aggregation/extras/utils.js"); // For assertErrorCode. +load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs. -// Need an empty document for the pipeline. -coll.insert({}); +var coll = db.substr; +assert(coll.drop()); -assertErrorCode(coll, - [{$project: {strLen: {$strLenBytes: 1}}}], - 34473, - "$strLenBytes requires a string argument."); +assert.commandWorked( + coll.insert({strField: "MyString", intField: 1, nullField: null, specialCharField: "é"})); assertErrorCode( coll, [{$project: {strLen: {$strLenCP: 1}}}], 34471, "$strLenCP requires a string argument."); + +assert.eq({"strLen": 8}, + coll.aggregate({$project: {_id: 0, strLen: {$strLenBytes: "$strField"}}}).toArray()[0]); + +assertErrorCode(coll, + [{$project: {strLen: {$strLenBytes: "$intField"}}}], + 5155800, + "$strLenBytes requires a string argument"); + +assertErrorCode(coll, + [{$project: {strLen: {$strLenBytes: "$nullField"}}}], + 5155800, + "$strLenBytes requires a string argument"); + +assertErrorCode(coll, + [{$project: {strLen: {$strLenBytes: "$b"}}}], + 5155800, + "$strLenBytes requires a string argument"); + +// Checks that strLenBytes and strLenCP return different things for multi-byte characters. +assert.eq({"strLenBytes": 2, "strLenCP": 1}, + coll.aggregate({ + $project: { + _id: 0, + strLenBytes: {$strLenBytes: "$specialCharField"}, + strLenCP: {$strLenCP: "$specialCharField"} + } + }) + .toArray()[0]); }()); diff --git a/jstests/aggregation/bugs/server22093.js b/jstests/aggregation/bugs/server22093.js index 1adc279571d75..65430a4673ff0 100644 --- a/jstests/aggregation/bugs/server22093.js +++ b/jstests/aggregation/bugs/server22093.js @@ -11,10 +11,7 @@ // assumes_unsharded_collection, // do_not_wrap_aggregations_in_facets, // ] -load('jstests/libs/analyze_plan.js'); - -(function() { -"use strict"; +import {aggPlanHasStage, getAggPlanStage, planHasStage} from "jstests/libs/analyze_plan.js"; var coll = db.countscan; coll.drop(); @@ -32,10 +29,20 @@ var simpleGroup = coll.aggregate([{$group: {_id: null, count: {$sum: 1}}}]).toAr assert.eq(simpleGroup.length, 1); assert.eq(simpleGroup[0]["count"], 15); +// Retrieve the query plain from explain, whose shape varies depending on the query and the +// engines used (classic/sbe). +const getQueryPlan = function(explain) { + if (explain.stages) { + explain = explain.stages[0].$cursor; + } + let winningPlan = explain.queryPlanner.winningPlan; + return winningPlan.queryPlan ? winningPlan.queryPlan : winningPlan; +}; + var explained = coll.explain().aggregate([{$match: {foo: {$gt: 0}}}, {$group: {_id: null, count: {$sum: 1}}}]); -assert(planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN")); +assert(planHasStage(db, getQueryPlan(explained), "COUNT_SCAN")); explained = coll.explain().aggregate([ {$match: {foo: {$gt: 0}}}, @@ -43,15 +50,15 @@ explained = coll.explain().aggregate([ {$group: {_id: null, count: {$sum: 1}}} ]); -assert(planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN")); +assert(planHasStage(db, getQueryPlan(explained), "COUNT_SCAN")); // Make sure a $count stage can use the COUNT_SCAN optimization. explained = coll.explain().aggregate([{$match: {foo: {$gt: 0}}}, {$count: "count"}]); -assert(planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN")); +assert(planHasStage(db, getQueryPlan(explained), "COUNT_SCAN")); // A $match that is not a single range cannot use the COUNT_SCAN optimization. explained = coll.explain().aggregate([{$match: {foo: {$in: [0, 1]}}}, {$count: "count"}]); -assert(!planHasStage(db, explained.stages[0].$cursor.queryPlanner.winningPlan, "COUNT_SCAN")); +assert(!planHasStage(db, getQueryPlan(explained), "COUNT_SCAN")); // Test that COUNT_SCAN can be used when there is a $sort. explained = coll.explain().aggregate([{$sort: {foo: 1}}, {$count: "count"}]); @@ -67,8 +74,8 @@ assert.eq(true, countScan.indexBounds.startKeyInclusive, explained); assert.eq({foo: MaxKey}, countScan.indexBounds.endKey, explained); assert.eq(true, countScan.indexBounds.endKeyInclusive, explained); -// Test that the inclusivity/exclusivity of the index bounds for COUNT_SCAN are correct when there -// is a $sort in the opposite direction of the index. +// Test that the inclusivity/exclusivity of the index bounds for COUNT_SCAN are correct when +// there is a $sort in the opposite direction of the index. explained = coll.explain().aggregate( [{$match: {foo: {$gte: 0, $lt: 10}}}, {$sort: {foo: -1}}, {$count: "count"}]); countScan = getAggPlanStage(explained, "COUNT_SCAN"); @@ -77,4 +84,3 @@ assert.eq({foo: 0}, countScan.indexBounds.startKey, explained); assert.eq(true, countScan.indexBounds.startKeyInclusive, explained); assert.eq({foo: 10}, countScan.indexBounds.endKey, explained); assert.eq(false, countScan.indexBounds.endKeyInclusive, explained); -}()); diff --git a/jstests/aggregation/bugs/server4638.js b/jstests/aggregation/bugs/server4638.js index ee6f7cfd6df1e..1b0fe18f0dd2a 100644 --- a/jstests/aggregation/bugs/server4638.js +++ b/jstests/aggregation/bugs/server4638.js @@ -1,13 +1,13 @@ // SERVER-4638 - this tests explicit undefined values // This case is marked as a dup of SERVER-4674 -t = db.server4638; +let t = db.server4638; t.drop(); t.insert({_id: 0, x: 0, undef: undefined}); // Make sure having an undefined doesn't break pipelines not using the field -res = t.aggregate({$project: {x: 1}}).toArray(); +let res = t.aggregate({$project: {x: 1}}).toArray(); assert.eq(res[0].x, 0); // Make sure having an undefined doesn't break pipelines that do use the field diff --git a/jstests/aggregation/bugs/server4656.js b/jstests/aggregation/bugs/server4656.js index 185f74bec548c..f0451a5c3f548 100644 --- a/jstests/aggregation/bugs/server4656.js +++ b/jstests/aggregation/bugs/server4656.js @@ -3,7 +3,7 @@ var c = db.c; c.drop(); -NUM_OBJS = 100; +let NUM_OBJS = 100; var randoms = {}; function generateRandom() { diff --git a/jstests/aggregation/bugs/server4738.js b/jstests/aggregation/bugs/server4738.js index 7a482ab0042bb..e1ad3fead386d 100644 --- a/jstests/aggregation/bugs/server4738.js +++ b/jstests/aggregation/bugs/server4738.js @@ -1,5 +1,5 @@ // test to make sure we accept all numeric types for inclusion -c = db.blah; +let c = db.blah; c.drop(); c.save({key: 4, v: 3, x: 2}); diff --git a/jstests/aggregation/bugs/server6120.js b/jstests/aggregation/bugs/server6120.js index c66b296a5a7a7..d6ca0a129fdd1 100644 --- a/jstests/aggregation/bugs/server6120.js +++ b/jstests/aggregation/bugs/server6120.js @@ -1,6 +1,6 @@ // Value::coerceToBool() is consistent with BSONElement::trueValue(). SERVER-6120 -t = db.jstests_aggregation_server6120; +let t = db.jstests_aggregation_server6120; t.drop(); t.save({object: {a: 1}}); diff --git a/jstests/aggregation/bugs/server6125.js b/jstests/aggregation/bugs/server6125.js index 6414a2eae4581..00e6c9530fe2a 100644 --- a/jstests/aggregation/bugs/server6125.js +++ b/jstests/aggregation/bugs/server6125.js @@ -10,7 +10,7 @@ // to make results array nested (problem 2) function nestArray(nstArray) { - for (x = 0; x < nstArray.length; x++) { + for (let x = 0; x < nstArray.length; x++) { nstArray[x].a = {b: nstArray[x].a}; } } diff --git a/jstests/aggregation/bugs/server6131.js b/jstests/aggregation/bugs/server6131.js index 640eea2723e17..67f662e10101c 100644 --- a/jstests/aggregation/bugs/server6131.js +++ b/jstests/aggregation/bugs/server6131.js @@ -1,6 +1,6 @@ // $unwind applied to an empty array field drops the field from the source document. SERVER-6131 -t = db.jstests_aggregation_server6131; +let t = db.jstests_aggregation_server6131; t.drop(); function assertAggregationResults(expected, aggregation) { diff --git a/jstests/aggregation/bugs/server6181.js b/jstests/aggregation/bugs/server6181.js index d48a5dbfe02b2..d894962015665 100644 --- a/jstests/aggregation/bugs/server6181.js +++ b/jstests/aggregation/bugs/server6181.js @@ -1,11 +1,11 @@ // SERVER-6181 Correctly support an expression for _id -c = db.c; +let c = db.c; c.drop(); c.save({a: 2}); -res = c.aggregate({$project: {_id: '$a'}}); +let res = c.aggregate({$project: {_id: '$a'}}); assert.eq(res.toArray(), [{_id: 2}]); res = c.aggregate({$project: {_id: {$add: [1, '$a']}}}); diff --git a/jstests/aggregation/bugs/server6184.js b/jstests/aggregation/bugs/server6184.js index bc2ce8c0f675f..8e18e3efb46bc 100644 --- a/jstests/aggregation/bugs/server6184.js +++ b/jstests/aggregation/bugs/server6184.js @@ -1,13 +1,13 @@ // SERVER-6184 Support mixing nested and dotted fields with common prefixes // @tags: [tests_projection_field_order] -c = db.c; +let c = db.c; c.drop(); c.save({a: 'missing', b: {c: 'bar', a: 'baz', z: 'not there'}}); function test(projection) { - res = c.aggregate({$project: projection}); + let res = c.aggregate({$project: projection}); assert.eq(res.toArray()[0], {b: {c: 'bar', a: 'baz'}}); } @@ -16,8 +16,6 @@ test({_id: 0, 'b.c': 1, b: {a: 1}}); // Synthetic fields should be in the order they appear in the $project -one = { - $add: [1] -}; -res = c.aggregate({$project: {_id: 0, 'A.Z': one, A: {Y: one, A: one}, 'A.B': one}}); +let one = {$add: [1]}; +let res = c.aggregate({$project: {_id: 0, 'A.Z': one, A: {Y: one, A: one}, 'A.B': one}}); assert.eq(res.toArray()[0], {A: {Z: 1, Y: 1, A: 1, B: 1}}); diff --git a/jstests/aggregation/bugs/server6186.js b/jstests/aggregation/bugs/server6186.js index 391643b1d2493..58bb1afcce2cd 100644 --- a/jstests/aggregation/bugs/server6186.js +++ b/jstests/aggregation/bugs/server6186.js @@ -1,6 +1,6 @@ // $substr returns an empty string if the position argument is out of bounds. SERVER-6186 -t = db.jstests_aggregation_server6186; +let t = db.jstests_aggregation_server6186; t.drop(); t.save({}); @@ -23,8 +23,8 @@ function assertSubstr(string, pos, n) { } function checkVariousSubstrings(string) { - for (pos = 0; pos < 5; ++pos) { - for (n = -2; n < 7; ++n) { + for (let pos = 0; pos < 5; ++pos) { + for (let n = -2; n < 7; ++n) { assertSubstr(string, pos, n); } } diff --git a/jstests/aggregation/bugs/server6189.js b/jstests/aggregation/bugs/server6189.js index 13385aa0443ee..d13417da6462b 100644 --- a/jstests/aggregation/bugs/server6189.js +++ b/jstests/aggregation/bugs/server6189.js @@ -1,6 +1,6 @@ // server6189 - Support date operators with dates before 1970 -c = db.c; +let c = db.c; function test(date, testSynthetics) { print("testing " + date); diff --git a/jstests/aggregation/bugs/server6190.js b/jstests/aggregation/bugs/server6190.js index 1eebb5af73c22..b750f4b2aaa76 100644 --- a/jstests/aggregation/bugs/server6190.js +++ b/jstests/aggregation/bugs/server6190.js @@ -3,7 +3,7 @@ load('jstests/aggregation/extras/utils.js'); load("jstests/libs/sbe_assert_error_override.js"); -t = db.jstests_aggregation_server6190; +let t = db.jstests_aggregation_server6190; t.drop(); t.save({}); diff --git a/jstests/aggregation/bugs/server6192_server6193.js b/jstests/aggregation/bugs/server6192_server6193.js index 114a967598799..e38b03cdd86a3 100644 --- a/jstests/aggregation/bugs/server6192_server6193.js +++ b/jstests/aggregation/bugs/server6192_server6193.js @@ -11,10 +11,7 @@ // do_not_wrap_aggregations_in_facets, // requires_pipeline_optimization, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For 'getPlanStage'. +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; const t = db.jstests_aggregation_server6192; t.drop(); @@ -71,5 +68,4 @@ assertNotOptimized({$or: [0, '$x']}); assertNotOptimized({$and: ['$x', '$x']}); assertNotOptimized({$or: ['$x', '$x']}); assertNotOptimized({$and: ['$x']}); -assertNotOptimized({$or: ['$x']}); -}()); +assertNotOptimized({$or: ['$x']}); \ No newline at end of file diff --git a/jstests/aggregation/bugs/server6194.js b/jstests/aggregation/bugs/server6194.js index 53c23f60c1f48..731735d7a68c2 100644 --- a/jstests/aggregation/bugs/server6194.js +++ b/jstests/aggregation/bugs/server6194.js @@ -1,11 +1,9 @@ // make sure $concat doesn't optimize constants to the end -c = db.c; +let c = db.c; c.drop(); c.save({x: '3'}); -project = { - $project: {a: {$concat: ['1', {$concat: ['foo', '$x', 'bar']}, '2']}} -}; +let project = {$project: {a: {$concat: ['1', {$concat: ['foo', '$x', 'bar']}, '2']}}}; assert.eq('1foo3bar2', c.aggregate(project).toArray()[0].a); diff --git a/jstests/aggregation/bugs/server6195.js b/jstests/aggregation/bugs/server6195.js index 156489e9c374a..e007429fe58e4 100644 --- a/jstests/aggregation/bugs/server6195.js +++ b/jstests/aggregation/bugs/server6195.js @@ -3,7 +3,7 @@ load('jstests/aggregation/extras/utils.js'); load('jstests/libs/sbe_assert_error_override.js'); // Override error-code-checking APIs. -c = db.s6570; +let c = db.s6570; c.drop(); c.save({v: "$", w: ".", x: "foo", y: "bar", z: "z\0z"}); diff --git a/jstests/aggregation/bugs/server6232.js b/jstests/aggregation/bugs/server6232.js index 21ed599af7345..73ab0e50317c4 100644 --- a/jstests/aggregation/bugs/server6232.js +++ b/jstests/aggregation/bugs/server6232.js @@ -5,7 +5,7 @@ db.s6232.drop(); db.s6232.save({}); // case where an empty object is evaluated -result = db.s6232.aggregate({$project: {a: {$and: [{}]}}}); +let result = db.s6232.aggregate({$project: {a: {$and: [{}]}}}); assert.eq(result.toArray()[0].a, true); // case where result should contain a new empty object diff --git a/jstests/aggregation/bugs/server6238.js b/jstests/aggregation/bugs/server6238.js index ddc29ec33d8b1..90192cb626369 100644 --- a/jstests/aggregation/bugs/server6238.js +++ b/jstests/aggregation/bugs/server6238.js @@ -1,7 +1,7 @@ // do not allow creation of fields with a $ prefix load('jstests/aggregation/extras/utils.js'); -c = db.c; +const c = db.c; c.drop(); c.insert({a: 1}); diff --git a/jstests/aggregation/bugs/server6269.js b/jstests/aggregation/bugs/server6269.js index c92245f619836..b6fb2f809cc8b 100644 --- a/jstests/aggregation/bugs/server6269.js +++ b/jstests/aggregation/bugs/server6269.js @@ -1,6 +1,6 @@ // Correct skipping behavior when $skip is applied after $unwind. SERVER-6269 -c = db.jstests_aggregation_server6269; +let c = db.jstests_aggregation_server6269; c.drop(); c.save({_id: 0, a: [1, 2, 3]}); diff --git a/jstests/aggregation/bugs/server6275.js b/jstests/aggregation/bugs/server6275.js index 39feeb2552ee0..dccba755d5073 100644 --- a/jstests/aggregation/bugs/server6275.js +++ b/jstests/aggregation/bugs/server6275.js @@ -1,5 +1,5 @@ // confirm that undefined no longer counts as 0 in $avg -c = db.c; +let c = db.c; c.drop(); c.save({a: 1}); c.save({a: 4}); diff --git a/jstests/aggregation/bugs/server6468.js b/jstests/aggregation/bugs/server6468.js index 09515c746fafb..ab5a89d5ca2e4 100644 --- a/jstests/aggregation/bugs/server6468.js +++ b/jstests/aggregation/bugs/server6468.js @@ -1,11 +1,11 @@ // SERVER-6468 nested and dotted projections should be treated the same -c = db.c; +let c = db.c; c.drop(); c.save({a: 'foo', b: {c: 'bar', z: 'not there'}}); function test(projection) { - res = c.aggregate({$project: projection}); + let res = c.aggregate({$project: projection}); assert.eq(res.toArray()[0], {b: {c: 'bar'}}); } diff --git a/jstests/aggregation/bugs/server6531.js b/jstests/aggregation/bugs/server6531.js index 5f5ebee3836d6..cc6d05079c9c4 100644 --- a/jstests/aggregation/bugs/server6531.js +++ b/jstests/aggregation/bugs/server6531.js @@ -1,6 +1,6 @@ // SERVER-6531 support $within in $match aggregation operations -c = db.s6531; +let c = db.s6531; c.drop(); for (var x = 0; x < 10; x++) { @@ -10,10 +10,10 @@ for (var x = 0; x < 10; x++) { } function test(variant) { - query = {loc: {$within: {$center: [[5, 5], 3]}}}; - sort = {_id: 1}; - aggOut = c.aggregate({$match: query}, {$sort: sort}); - cursor = c.find(query).sort(sort); + let query = {loc: {$within: {$center: [[5, 5], 3]}}}; + let sort = {_id: 1}; + let aggOut = c.aggregate({$match: query}, {$sort: sort}); + let cursor = c.find(query).sort(sort); assert.eq(aggOut.toArray(), cursor.toArray()); } diff --git a/jstests/aggregation/bugs/server6556.js b/jstests/aggregation/bugs/server6556.js index a6d1a0c483765..ef31a8111d1a8 100644 --- a/jstests/aggregation/bugs/server6556.js +++ b/jstests/aggregation/bugs/server6556.js @@ -1,6 +1,6 @@ // ensure strings containing null characters dont end at that null -c = db.s6556; +let c = db.s6556; c.drop(); c.save({foo: "as\0df"}); diff --git a/jstests/aggregation/bugs/server6570.js b/jstests/aggregation/bugs/server6570.js index b12a83967bab0..c423a1c60c758 100644 --- a/jstests/aggregation/bugs/server6570.js +++ b/jstests/aggregation/bugs/server6570.js @@ -2,7 +2,7 @@ load('jstests/aggregation/extras/utils.js'); load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs. -c = db.s6570; +let c = db.s6570; c.drop(); c.save({x: 17, y: "foo"}); diff --git a/jstests/aggregation/bugs/server6861.js b/jstests/aggregation/bugs/server6861.js index e6748bd07da68..ee012e705c941 100644 --- a/jstests/aggregation/bugs/server6861.js +++ b/jstests/aggregation/bugs/server6861.js @@ -2,7 +2,7 @@ // SERVER-6861 load('jstests/aggregation/extras/utils.js'); -t = db.jstests_server6861; +let t = db.jstests_server6861; t.drop(); t.save({a: 1}); diff --git a/jstests/aggregation/bugs/server72651.js b/jstests/aggregation/bugs/server72651.js index b4100bdc32c4f..e1f0b57ffcd1f 100644 --- a/jstests/aggregation/bugs/server72651.js +++ b/jstests/aggregation/bugs/server72651.js @@ -9,4 +9,4 @@ assert.commandWorked(c.insert({_id: 0, a: 1})); assert.eq( [], c.aggregate([{$project: {"b": 1}}, {$match: {$expr: {$getField: {$literal: "a"}}}}]).toArray()); -})(); \ No newline at end of file +})(); diff --git a/jstests/aggregation/bugs/server75670.js b/jstests/aggregation/bugs/server75670.js index 932c04d7b4db8..60bdcbb48d265 100644 --- a/jstests/aggregation/bugs/server75670.js +++ b/jstests/aggregation/bugs/server75670.js @@ -27,4 +27,4 @@ assert(resultsEq( ]) .toArray(), )); -})(); \ No newline at end of file +})(); diff --git a/jstests/aggregation/bugs/server7768.js b/jstests/aggregation/bugs/server7768.js index b7ce2669e3adb..e33739ea2416f 100644 --- a/jstests/aggregation/bugs/server7768.js +++ b/jstests/aggregation/bugs/server7768.js @@ -20,4 +20,4 @@ let res = db.runCommand({ assert.commandWorked(res); assert.eq(res.cursor.firstBatch, [{foo: 1}]); -}()); \ No newline at end of file +}()); diff --git a/jstests/aggregation/bugs/server7900.js b/jstests/aggregation/bugs/server7900.js index 20bf085c7a1d9..103ebd0b7260d 100644 --- a/jstests/aggregation/bugs/server7900.js +++ b/jstests/aggregation/bugs/server7900.js @@ -1,10 +1,10 @@ // server-7900 - $sort + $limit ignores limit when using index for sort -c = db.s7900; +let c = db.s7900; c.drop(); for (var i = 0; i < 5; i++) c.insert({_id: i}); -res = c.aggregate({$sort: {_id: -1}}, {$limit: 2}); // uses index for sort +let res = c.aggregate({$sort: {_id: -1}}, {$limit: 2}); // uses index for sort assert.eq(res.toArray(), [{_id: 4}, {_id: 3}]); diff --git a/jstests/aggregation/bugs/server8581.js b/jstests/aggregation/bugs/server8581.js index ae616cb21d316..0045425d39723 100644 --- a/jstests/aggregation/bugs/server8581.js +++ b/jstests/aggregation/bugs/server8581.js @@ -1,7 +1,7 @@ // Check $redact pipeline stage. load('jstests/aggregation/extras/utils.js'); -t = db.jstests_aggregation_redact; +let t = db.jstests_aggregation_redact; t.drop(); // this document will always be present but its content will change @@ -42,15 +42,15 @@ t.save({ level: 4, }); -a1 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 1]}, "$$DESCEND", "$$PRUNE"]}}); -a2 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 2]}, "$$DESCEND", "$$PRUNE"]}}); -a3 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 3]}, "$$DESCEND", "$$PRUNE"]}}); -a4 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 4]}, "$$DESCEND", "$$PRUNE"]}}); -a5 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 5]}, "$$DESCEND", "$$PRUNE"]}}); +let a1 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 1]}, "$$DESCEND", "$$PRUNE"]}}); +let a2 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 2]}, "$$DESCEND", "$$PRUNE"]}}); +let a3 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 3]}, "$$DESCEND", "$$PRUNE"]}}); +let a4 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 4]}, "$$DESCEND", "$$PRUNE"]}}); +let a5 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 5]}, "$$DESCEND", "$$PRUNE"]}}); -a1result = [{_id: 1, level: 1, l: {}, o: [], q: 14}]; +let a1result = [{_id: 1, level: 1, l: {}, o: [], q: 14}]; -a2result = [{ +let a2result = [{ _id: 1, level: 1, h: { @@ -61,7 +61,7 @@ a2result = [{ q: 14 }]; -a3result = [{ +let a3result = [{ _id: 1, level: 1, b: { @@ -77,7 +77,7 @@ a3result = [{ q: 14 }]; -a4result = [ +let a4result = [ { _id: 1, level: 1, @@ -97,7 +97,7 @@ a4result = [ } ]; -a5result = [ +let a5result = [ { _id: 1, level: 1, @@ -139,13 +139,13 @@ t.drop(); // entire document should be present at 2 and beyond t.save({_id: 1, level: 2, b: {level: 3, c: 2}, d: {level: 1, e: 8}, f: 9}); -b1 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 1]}, "$$KEEP", "$$PRUNE"]}}); -b2 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 2]}, "$$KEEP", "$$PRUNE"]}}); -b3 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 3]}, "$$KEEP", "$$PRUNE"]}}); +let b1 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 1]}, "$$KEEP", "$$PRUNE"]}}); +let b2 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 2]}, "$$KEEP", "$$PRUNE"]}}); +let b3 = t.aggregate({$redact: {$cond: [{$lte: ['$level', 3]}, "$$KEEP", "$$PRUNE"]}}); -b1result = []; +let b1result = []; -b23result = [{_id: 1, level: 2, b: {level: 3, c: 2}, d: {level: 1, e: 8}, f: 9}]; +let b23result = [{_id: 1, level: 2, b: {level: 3, c: 2}, d: {level: 1, e: 8}, f: 9}]; assert.eq(b1.toArray(), b1result); assert.eq(b2.toArray(), b23result); diff --git a/jstests/aggregation/bugs/skip_limit_overflow.js b/jstests/aggregation/bugs/skip_limit_overflow.js index 2ca22a0c3e561..e5448bad3df7f 100644 --- a/jstests/aggregation/bugs/skip_limit_overflow.js +++ b/jstests/aggregation/bugs/skip_limit_overflow.js @@ -12,10 +12,7 @@ * requires_pipeline_optimization, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStages' and other explain helpers. +import {aggPlanHasStage, getAggPlanStages} from "jstests/libs/analyze_plan.js"; const coll = db.server39788; coll.drop(); @@ -217,5 +214,4 @@ testPipeline( $skip: {path: "$skip", expectedValue: [NumberLong("9223372036854775807")]}, SKIP: {path: "skipAmount", expectedValue: [10]} }, - ["$sort"]); -})(); + ["$sort"]); \ No newline at end of file diff --git a/jstests/aggregation/bugs/strcasecmp.js b/jstests/aggregation/bugs/strcasecmp.js index 736e7ec0dac19..44a2668bcc435 100644 --- a/jstests/aggregation/bugs/strcasecmp.js +++ b/jstests/aggregation/bugs/strcasecmp.js @@ -1,6 +1,6 @@ // Aggregation $strcasecmp tests. -t = db.jstests_aggregation_strcasecmp; +let t = db.jstests_aggregation_strcasecmp; t.drop(); t.save({}); diff --git a/jstests/aggregation/explain.js b/jstests/aggregation/explain.js index 6ec350d96e56e..1cba7ca539940 100644 --- a/jstests/aggregation/explain.js +++ b/jstests/aggregation/explain.js @@ -1,10 +1,7 @@ // Tests the behavior of explain() when used with the aggregation pipeline. // - Explain() should not read or modify the plan cache. // - The result should always include serverInfo. -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); // For getAggPlanStage(). +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; let coll = db.explain; coll.drop(); @@ -34,5 +31,4 @@ assert.eq(null, getAggPlanStage(result, "CACHED_PLAN")); // that this implementation also includes serverInfo. result = coll.explain().aggregate([{$lookup: {from: 'other_coll', pipeline: [], as: 'docs'}}]); assert(result.hasOwnProperty('serverInfo'), result); -assert.hasFields(result.serverInfo, ['host', 'port', 'version', 'gitVersion']); -})(); +assert.hasFields(result.serverInfo, ['host', 'port', 'version', 'gitVersion']); \ No newline at end of file diff --git a/jstests/aggregation/explain_limit.js b/jstests/aggregation/explain_limit.js index 5e017f9b74e76..171e6f6e2fbf6 100644 --- a/jstests/aggregation/explain_limit.js +++ b/jstests/aggregation/explain_limit.js @@ -8,11 +8,8 @@ // # Implicit index creation may change the plan/engine used. // assumes_no_implicit_index_creation, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages(). -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; let coll = db.explain_limit; @@ -100,4 +97,3 @@ checkResults({results: execLevel, verbosity: "executionStats"}); allPlansExecLevel = coll.explain("allPlansExecution").aggregate(pipeline); checkResults({results: allPlansExecLevel, verbosity: "allPlansExecution"}); -})(); diff --git a/jstests/aggregation/explain_per_stage_exec_stats.js b/jstests/aggregation/explain_per_stage_exec_stats.js index f7f65121a2dae..4c47ac14bf338 100644 --- a/jstests/aggregation/explain_per_stage_exec_stats.js +++ b/jstests/aggregation/explain_per_stage_exec_stats.js @@ -3,10 +3,8 @@ * execution time (executionTimeMillisEstimate) when explain is run with verbosities * "executionStats" and "allPlansExecution". */ -(function() { -"use strict"; +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages(). load("jstests/libs/fixture_helpers.js"); // For isReplSet(). const coll = db.explain_per_stage_exec_stats; @@ -156,5 +154,4 @@ assert.eq(numberOfDocsReturnedByMatchStage(coll.explain("executionStats").aggreg } }; checkResults(result, assertOutputBytesSize); -})(); -}()); +})(); \ No newline at end of file diff --git a/jstests/aggregation/explain_writing_aggs.js b/jstests/aggregation/explain_writing_aggs.js index 67b71d017fd07..9a74584c2c371 100644 --- a/jstests/aggregation/explain_writing_aggs.js +++ b/jstests/aggregation/explain_writing_aggs.js @@ -8,11 +8,8 @@ * assumes_write_concern_unchanged, * ] */ -(function() { -"use strict"; - -load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos(). -load("jstests/libs/analyze_plan.js"); // For getAggPlanStage(). +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos(). +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode(). let sourceColl = db.explain_writing_aggs_source; @@ -71,5 +68,4 @@ withEachMergeMode(function({whenMatchedMode, whenNotMatchedMode}) { assert.eq(mergeExplain.$merge.whenNotMatched, whenNotMatchedMode, mergeExplain); assert.eq(mergeExplain.$merge.on, "_id", mergeExplain); assert.eq(targetColl.find().itcount(), 0, explain); -}); -}()); +}); \ No newline at end of file diff --git a/jstests/aggregation/expressions/add.js b/jstests/aggregation/expressions/add.js index cc074a9ae08d4..c411c3f213bab 100644 --- a/jstests/aggregation/expressions/add.js +++ b/jstests/aggregation/expressions/add.js @@ -1,5 +1,7 @@ (function() { "use strict"; +load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and assertErrMsgContains. +load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs. // In SERVER-63012, translation of $add expression into sbe now defaults the translation of $add // with no operands to a zero integer constant. @@ -43,4 +45,49 @@ let addResult = coll.aggregate([{$project: {add: {$add: queryArr}}}]).toArray(); let sumResult = coll.aggregate([{$project: {sum: {$sum: queryArr}}}]).toArray(); assert.neq(addResult[0]["add"], sumResult[0]["sum"]); assert.eq(addResult[0]["add"], arr.reduce((a, b) => a + b)); + +assert.eq(true, coll.drop()); +// Doubles are rounded to int64 when added to Date +assert.commandWorked(coll.insert({_id: 0, lhs: new Date(1683794065002), rhs: 0.5})); +assert.commandWorked(coll.insert({_id: 1, lhs: new Date(1683794065002), rhs: 1.4})); +assert.commandWorked(coll.insert({_id: 2, lhs: new Date(1683794065002), rhs: 1.5})); +assert.commandWorked(coll.insert({_id: 3, lhs: new Date(1683794065002), rhs: 1.7})); +// Decimals are rounded to int64, when tie rounded to even, when added to Date +assert.commandWorked( + coll.insert({_id: 4, lhs: new Date(1683794065002), rhs: new NumberDecimal("1.4")})); +assert.commandWorked( + coll.insert({_id: 5, lhs: new Date(1683794065002), rhs: new NumberDecimal("1.5")})); +assert.commandWorked( + coll.insert({_id: 6, lhs: new Date(1683794065002), rhs: new NumberDecimal("1.7")})); +assert.commandWorked( + coll.insert({_id: 7, lhs: new Date(1683794065002), rhs: new NumberDecimal("2.5")})); + +let result1 = + coll.aggregate([{$project: {sum: {$add: ["$lhs", "$rhs"]}}}, {$sort: {_id: 1}}]).toArray(); +assert.eq(result1[0].sum, new Date(1683794065003)); +assert.eq(result1[1].sum, new Date(1683794065003)); +assert.eq(result1[2].sum, new Date(1683794065004)); +assert.eq(result1[3].sum, new Date(1683794065004)); +assert.eq(result1[4].sum, new Date(1683794065003)); +assert.eq(result1[5].sum, new Date(1683794065004)); +assert.eq(result1[6].sum, new Date(1683794065004)); +assert.eq(result1[7].sum, new Date(1683794065004)); + +coll.drop(); + +assert.commandWorked(coll.insert([{ + _id: 0, + veryBigPositiveLong: NumberLong("9223372036854775806"), + veryBigPositiveDouble: 9223372036854775806, + veryBigPositiveDecimal: NumberDecimal("9223372036854775806") +}])); + +let pipeline = [{$project: {res: {$add: [new Date(10), "$veryBigPositiveLong"]}}}]; +assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow"); + +pipeline = [{$project: {res: {$add: [new Date(10), "$veryBigPositiveDouble"]}}}]; +assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow"); + +pipeline = [{$project: {res: {$add: [new Date(10), "$veryBigPositiveDecimal"]}}}]; +assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow"); }()); diff --git a/jstests/aggregation/expressions/arith_overflow.js b/jstests/aggregation/expressions/arith_overflow.js new file mode 100644 index 0000000000000..a391224ff8469 --- /dev/null +++ b/jstests/aggregation/expressions/arith_overflow.js @@ -0,0 +1,31 @@ +// Tests for $add, $subtract and $multiply aggregation expression type promotion on overflow +// @tags: [require_fcv_71] +(function() { +"use strict"; + +const coll = db.arith_overflow; + +function runTest(operator, expectedResults) { + const result = + coll.aggregate([{$project: {res: {[operator]: ["$lhs", "$rhs"]}}}, {$sort: {_id: 1}}]) + .toArray() + .map(r => r.res); + assert.eq(result, expectedResults); +} + +// $add +coll.drop(); +assert.commandWorked(coll.insert({_id: 0, lhs: NumberInt(2e+9), rhs: NumberInt(2e+9)})); +assert.commandWorked(coll.insert({_id: 1, lhs: NumberLong(9e+18), rhs: NumberLong(9e+18)})); + +runTest("$add", [NumberLong(4e+9), 1.8e+19]); + +// $subtract +coll.drop(); +assert.commandWorked(coll.insert({_id: 0, lhs: NumberInt(2e+9), rhs: NumberInt(-2e+9)})); +assert.commandWorked(coll.insert({_id: 1, lhs: NumberLong(9e+18), rhs: NumberLong(-9e+18)})); + +runTest("$subtract", [NumberLong(4e+9), 1.8e+19]); +// $multiply uses same arguments +runTest("$multiply", [NumberLong(-4e+18), -8.1e+37]); +}()); diff --git a/jstests/aggregation/expressions/array_expression.js b/jstests/aggregation/expressions/array_expression.js new file mode 100644 index 0000000000000..8eb9c06c0e274 --- /dev/null +++ b/jstests/aggregation/expressions/array_expression.js @@ -0,0 +1,33 @@ +// Tests for $array expression. +(function() { +"use strict"; + +let coll = db.array_expr; +coll.drop(); + +function assertArray(expArray, ...inputs) { + assert(coll.drop()); + if (inputs.length == 0) { + assert.commandWorked(coll.insert({})); + } else if (inputs.length == 1) { + assert.commandWorked(coll.insert({a: inputs[0]})); + } else { + assert.commandWorked(coll.insert({a: inputs[0], b: inputs[1]})); + } + const result = coll.aggregate([{$project: {out: ["$a", "$b"]}}]).toArray()[0].out; + assert.eq(result, expArray); +} + +assertArray([1, 2], 1, 2); +assertArray([null, null], null, null); +assertArray(["TestInput", null], "TestInput", null); +assertArray([{a: 1, b: 2}, [1, 2]], {a: 1, b: 2}, [1, 2]); +assertArray(["TestInput", null], "TestInput"); +assertArray([null, null]); + +// no arg +assert(coll.drop()); +assert.commandWorked(coll.insert({})); +let result = coll.aggregate([{$project: {out: []}}]).toArray()[0].out; +assert.eq(result, []); +}()); diff --git a/jstests/aggregation/expressions/collation_optimize_fetch.js b/jstests/aggregation/expressions/collation_optimize_fetch.js new file mode 100644 index 0000000000000..4ce827aca686d --- /dev/null +++ b/jstests/aggregation/expressions/collation_optimize_fetch.js @@ -0,0 +1,95 @@ +/** + * The combination of collation, index scan, sorting and fetching needs close consideration to + * ensure optimal ordering of the operations. If the collation of the query is the same as the + * collation of the index, the index can be used to satisfy group, sort and limiting before fetching + * the data to return non ICU encoded values. The before mentioned operations can operate on ICU + * encoded values. This testsuite analyzes the number of documents fetched from the collated + * collection in combination with a limit operator. This optimization was added with SERVER-63132. + * + * @tags: [ + * requires_fcv_71, + * assumes_no_implicit_collection_creation_after_drop, + * ] + */ + +(function() { +"use strict"; + +var results; +const caseInsensitiveUS = { + locale: "en", + strength: 2 +}; +const caseInsensitiveDE = { + locale: "de_AT", + strength: 2 +}; +const documents = [ + {_id: 0, a: 'A', b: 'B', c: 'A', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}}, + {_id: 1, a: 'a', b: 'B', c: 'b', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}}, + {_id: 2, a: 'A', b: 'B', c: 'C', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}}, + {_id: 3, a: 'a', b: 'B', c: 'D', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}}, + {_id: 4, a: 'A', b: 'B', c: 'e', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}}, + {_id: 5, a: 'a', b: 'b', c: 'F', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}}, + {_id: 6, a: 'A', b: 'b', c: 'g', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}}, + {_id: 7, a: 'a', b: 'b', c: 'H', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}}, + {_id: 8, a: 'A', b: 'b', c: 'I', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}}, + {_id: 9, a: 'a', b: 'b', c: 'j', d: ['x', 'y', 'z', 'h', 't'], e: {a: 'ae', b: 'be'}}, +]; +const indexes = [{a: 1, b: 1, c: 1}, {a: 1, d: 1}, {"e.a": 1, "e.b": 1}]; + +function initCollection(collectionCollation, indexCollation) { + db.collation_optimize_fetch.drop(); + + // Setup the collection. + assert.commandWorked(db.createCollection( + "collation_optimize_fetch", collectionCollation ? {collation: collectionCollation} : "")); + + // Setup the indexes. + indexes.forEach(idx => (assert.commandWorked(db.collation_optimize_fetch.createIndex( + idx, indexCollation ? {collation: indexCollation} : "")))); + + // Insert docs. + assert.commandWorked(db.collation_optimize_fetch.insert(documents)); +} + +function runTest(expectedDocumentCount) { + // Run the tests with the provided indexes. + assert.eq(expectedDocumentCount, + db.collation_optimize_fetch.explain("executionStats") + .find({a: 'a'}) + .sort({c: 1}) + .limit(5) + .next() + .executionStats.totalDocsExamined); + assert.eq(expectedDocumentCount, + db.collation_optimize_fetch.explain("executionStats") + .find({a: 'a'}) + .sort({d: 1}) + .limit(5) + .next() + .executionStats.totalDocsExamined); + assert.eq(expectedDocumentCount, + db.collation_optimize_fetch.explain("executionStats") + .find({"e.a": 'ae'}) + .sort({"e.b": 1}) + .limit(5) + .next() + .executionStats.totalDocsExamined); +} + +// Only 5 documents should be fetched as the sort and limit can be satisfied by the IDX. +initCollection(caseInsensitiveUS); +runTest(5); + +// 10 documents need to be fetched as the IDX has a different collation than the query. +initCollection(null, caseInsensitiveUS); +runTest(10); + +// Two different collations on the index and collection requires to fetch all 10 documents. +initCollection(caseInsensitiveDE, caseInsensitiveUS); +runTest(10); + +// Cleanup. +db.collation_optimize_fetch.drop(); +})(); diff --git a/jstests/aggregation/expressions/concat_arrays.js b/jstests/aggregation/expressions/concat_arrays.js index 3c52f31cd08c9..02648550eda03 100644 --- a/jstests/aggregation/expressions/concat_arrays.js +++ b/jstests/aggregation/expressions/concat_arrays.js @@ -11,12 +11,9 @@ // # tests from implicit index creation suites. // assumes_no_implicit_index_creation, // ] -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For assertArrayEq. load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const coll = db.projection_expr_concat_arrays; coll.drop(); @@ -174,5 +171,4 @@ runAndAssert(["$arr1", [1, 2, 3], "$arr2"], [ [1, 2, 3], null, null -]); -}()); +]); \ No newline at end of file diff --git a/jstests/aggregation/expressions/date_to_string.js b/jstests/aggregation/expressions/date_to_string.js index 5ee2873288227..3aa293287392b 100644 --- a/jstests/aggregation/expressions/date_to_string.js +++ b/jstests/aggregation/expressions/date_to_string.js @@ -189,7 +189,11 @@ assert.eq( .toArray()); /* --------------------------------------------------------------------------------------- */ -/* Test that the default format is "%Y-%m-%dT%H:%M:%S.%LZ" if none specified. */ +/* Test that the default format is +/* "%Y-%m-%dT%H:%M:%S.%LZ" if no timezone is specified or UTC is explicitly specified +/* "%Y-%m-%dT%H:%M:%S.%L" if a non-UTC timezone is explicitly specified +/* The last case also verifies the Daylight Savings Time change versus UTC. + */ coll.drop(); assert.commandWorked(coll.insert([ @@ -198,11 +202,35 @@ assert.commandWorked(coll.insert([ {_id: 2, date: new ISODate("2017-12-04T15:09:14.911Z")}, ])); +// No timezone specified. Defaults to UTC time, and the format includes the 'Z' (UTC) suffix. assert.eq( [ - {_id: 0, date: "2017-01-04T10:08:51.911Z"}, - {_id: 1, date: "2017-07-04T11:09:12.911Z"}, - {_id: 2, date: "2017-12-04T10:09:14.911Z"}, + {_id: 0, date: "2017-01-04T15:08:51.911Z"}, + {_id: 1, date: "2017-07-04T15:09:12.911Z"}, + {_id: 2, date: "2017-12-04T15:09:14.911Z"}, + ], + coll.aggregate([{$project: {date: {$dateToString: {date: "$date"}}}}, {$sort: {_id: 1}}]) + .toArray()); + +// UTC timezone explicitly specified. Gives UTC time, and the format includes the 'Z' (UTC) suffix. +assert.eq( + [ + {_id: 0, date: "2017-01-04T15:08:51.911Z"}, + {_id: 1, date: "2017-07-04T15:09:12.911Z"}, + {_id: 2, date: "2017-12-04T15:09:14.911Z"}, + ], + coll.aggregate([ + {$project: {date: {$dateToString: {date: "$date", timezone: "UTC"}}}}, + {$sort: {_id: 1}} + ]) + .toArray()); + +// Non-UTC timezone explicitly specified. Gives the requested time, and the format omits 'Z'. +assert.eq( + [ + {_id: 0, date: "2017-01-04T10:08:51.911"}, + {_id: 1, date: "2017-07-04T11:09:12.911"}, + {_id: 2, date: "2017-12-04T10:09:14.911"}, ], coll.aggregate([ {$project: {date: {$dateToString: {date: "$date", timezone: "America/New_York"}}}}, diff --git a/jstests/aggregation/expressions/median_expression_approx.js b/jstests/aggregation/expressions/median_expression_approx.js index dd05a9106ba09..92964e0754b29 100644 --- a/jstests/aggregation/expressions/median_expression_approx.js +++ b/jstests/aggregation/expressions/median_expression_approx.js @@ -3,7 +3,6 @@ * with the field 'p':[0.5]. * @tags: [ * requires_fcv_70, - * featureFlagApproxPercentiles * ] */ (function() { diff --git a/jstests/aggregation/expressions/multiply.js b/jstests/aggregation/expressions/multiply.js index da4c612ff7771..101a0e365a08b 100644 --- a/jstests/aggregation/expressions/multiply.js +++ b/jstests/aggregation/expressions/multiply.js @@ -55,6 +55,7 @@ const binaryTestCases = [ }, {document: {left: NumberDecimal("12.5"), right: null}, expected: null}, + // Test null {document: {left: null, right: NumberInt(2)}, expected: null}, {document: {left: null, right: 2.55}, expected: null}, {document: {left: null, right: NumberLong("2")}, expected: null}, diff --git a/jstests/aggregation/expressions/n_expressions.js b/jstests/aggregation/expressions/n_expressions.js index 9798488900e13..10213dc9683fa 100644 --- a/jstests/aggregation/expressions/n_expressions.js +++ b/jstests/aggregation/expressions/n_expressions.js @@ -56,4 +56,4 @@ testExpr({$minN: args}, [3, 4]); testExpr({$maxN: args}, [5, 4]); testExpr({$firstN: args}, [3, 4]); testExpr({$lastN: args}, [4, 5]); -})(); \ No newline at end of file +})(); diff --git a/jstests/aggregation/expressions/percentile_expression_approx.js b/jstests/aggregation/expressions/percentile_expression_approx.js index 34673489fdca4..498e864a31541 100644 --- a/jstests/aggregation/expressions/percentile_expression_approx.js +++ b/jstests/aggregation/expressions/percentile_expression_approx.js @@ -2,7 +2,6 @@ * Tests for the approximate percentile expression semantics. * @tags: [ * requires_fcv_70, - * featureFlagApproxPercentiles * ] */ (function() { @@ -12,10 +11,10 @@ load("jstests/aggregation/extras/utils.js"); const coll = db[jsTestName()]; -function testWithProject({doc, percentileSpec, expectedResult, msg}) { +function testWithProject({doc, percentileSpec, letSpec, expectedResult, msg}) { coll.drop(); coll.insert(doc); - const res = coll.aggregate([{$project: {p: percentileSpec}}]).toArray(); + const res = coll.aggregate([{$project: {p: percentileSpec}}], {let : letSpec}).toArray(); // For $percentile the result should be ordered to match the spec, so assert exact equality. assert.eq(expectedResult, res[0].p, msg + ` result: ${tojson(res)}`); } @@ -102,6 +101,27 @@ testWithProject({ msg: "Multiple percentiles when single input expression resolves to a non-numeric scalar" }); +testWithProject({ + doc: {x: [2, 1], y: 3}, + percentileSpec: { + $percentile: { + p: [0.5, 0.9], + input: {$concatArrays: ["$x", [{$add: [42, "$y"]}]]}, + method: "approximate" + } + }, + expectedResult: [2, 42 + 3], + msg: "Input as complex expression" +}); + +testWithProject({ + doc: {x: [2, 3, 1]}, + percentileSpec: {$percentile: {p: "$$ps", input: "$x", method: "approximate"}}, + letSpec: {ps: [0.1, 0.5, 0.9]}, + expectedResult: [1, 2, 3], + msg: "'p' specified as a variable" +}); + /** * 'rand()' generates a uniform distribution from [0.0, 1.0] so we can check accuracy of the result * in terms of values rather than in terms of rank. diff --git a/jstests/aggregation/expressions/percentile_expression_syntax.js b/jstests/aggregation/expressions/percentile_expression_syntax.js index a156b599eec7e..65d112a67e4a4 100644 --- a/jstests/aggregation/expressions/percentile_expression_syntax.js +++ b/jstests/aggregation/expressions/percentile_expression_syntax.js @@ -2,7 +2,6 @@ * Tests for the $percentile expression syntax. * @tags: [ * requires_fcv_70, - * featureFlagApproxPercentiles * ] */ (function() { @@ -15,101 +14,220 @@ coll.drop(); assert.commandWorked(coll.insert([{_id: 0, k1: 3, k2: 2, k3: "hi", k4: [1, 2, 3]}])); -/** - * Tests to check that invalid $percentile specifications are rejected. - */ -function assertInvalidSyntax(percentileSpec, msg) { - assert.commandFailed( - coll.runCommand("aggregate", {pipeline: [{$project: {p: percentileSpec}}], cursor: {}}), - msg); +function assertInvalidSyntax({pSpec, letSpec, msg}) { + let command = {pipeline: [{$project: {p: pSpec}}], let : letSpec, cursor: {}}; + assert.commandFailed(coll.runCommand("aggregate", command), msg); } -assertInvalidSyntax({$percentile: 0.5}, "Should fail if $percentile is not an object"); - -assertInvalidSyntax({$percentile: {input: ["$k1", "$k2"], method: "approximate"}}, - "Should fail if $percentile is missing 'p' field"); - -assertInvalidSyntax({$percentile: {p: [0.5], method: "approximate"}}, - "Should fail if $percentile is missing 'input' field"); - -assertInvalidSyntax({$percentile: {p: [0.5], input: "$k1"}}, - "Should fail if $percentile is missing 'method' field"); +function assertValidSyntax({pSpec, letSpec, msg}) { + let command = {pipeline: [{$project: {p: pSpec}}], let : letSpec, cursor: {}}; + assert.commandWorked(coll.runCommand("aggregate", command), msg); +} +/** + * Test missing or unexpected fields in $percentile spec. + */ assertInvalidSyntax( - {$percentile: {p: [0.5], input: ["$k1", "$k2"], method: "approximate", extras: 42}}, - "Should fail if $percentile contains an unexpected field"); - -assertInvalidSyntax({$percentile: {p: 0.5, input: ["$k1", "$k2"], method: "approximate"}}, - "Should fail if 'p' field in $percentile isn't array"); + {pSpec: {$percentile: 0.5}, msg: "Should fail if $percentile is not an object"}); -assertInvalidSyntax({$percentile: {p: [], input: ["$k1", "$k2"], method: "approximate"}}, - "Should fail if 'p' field in $percentile is an empty array"); +assertInvalidSyntax({ + pSpec: {$percentile: {input: ["$k1", "$k2"], method: "approximate"}}, + msg: "Should fail if $percentile is missing 'p' field" +}); -assertInvalidSyntax({$percentile: {p: [0.5], input: [], method: "approximate"}}, - "Should fail if 'input' field in $percentile is an empty array"); +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5], method: "approximate"}}, + msg: "Should fail if $percentile is missing 'input' field" +}); -assertInvalidSyntax( - {$percentile: {p: [0.5, "foo"], input: ["$k1", "$k2"], method: "approximate"}}, - "Should fail if 'p' field in $percentile is an array with a non-numeric element"); +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5], input: "$k1"}}, + msg: "Should fail if $percentile is missing 'method' field" +}); -assertInvalidSyntax( - {$percentile: {p: [0.5, 10], input: ["$k1", "$k2"], method: "approximate"}}, - "Should fail if 'p' field in $percentile is an array with any value outside of [0, 1] range"); - -assertInvalidSyntax({$percentile: {p: [0.5, 0.7], input: ["$k1", "$k2"], method: 42}}, - "Should fail if 'method' field isn't a string"); - -assertInvalidSyntax({$percentile: {p: [0.5, 0.7], input: ["$k1", "$k2"], method: "fancy"}}, - "Should fail if 'method' isn't one of _predefined_ strings"); +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5], input: ["$k1", "$k2"], method: "approximate", extras: 42}}, + msg: "Should fail if $percentile contains an unexpected field" +}); /** - * Tests for $median. $median desugars to $percentile with the field p:[0.5] added, and therefore - * has similar syntax to $percentile. + * Test invalid 'p' field, specified as a constant. */ +assertInvalidSyntax({ + pSpec: {$percentile: {p: 0.5, input: ["$k1", "$k2"], method: "approximate"}}, + msg: "Should fail if 'p' field in $percentile isn't array" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: [], input: ["$k1", "$k2"], method: "approximate"}}, + msg: "Should fail if 'p' field in $percentile is an empty array" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5, "foo"], input: ["$k1", "$k2"], method: "approximate"}}, + msg: "Should fail if 'p' field in $percentile is an array with a non-numeric element" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5, 10], input: ["$k1", "$k2"], method: "approximate"}}, + msg: + "Should fail if 'p' field in $percentile is an array with any value outside of [0, 1] range" +}); -assertInvalidSyntax({$median: {p: [0.5], input: "$k4", method: "approximate"}}, - "Should fail if 'p' is defined"); - -assertInvalidSyntax({$median: {method: "approximate"}}, - "Should fail if $median is missing 'input' field"); - -assertInvalidSyntax({$median: {input: [], method: "approximate"}}, - "Should fail if $median has an empty array as its 'input' field"); +/** + * Test invalid 'p' field, specified as an expression. + */ +assertInvalidSyntax({ + pSpec: {$percentile: {p: ["$x"], input: ["$k1", "$k2"], method: "approximate"}}, + msg: "'p' should not accept non-const expressions" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: {$add: [0.1, 0.5]}, input: ["$k1", "$k2"], method: "approximate"}}, + msg: "'p' should not accept expressions that evaluate to a non-array" +}); + +assertInvalidSyntax({ + pSpec: { + $percentile: { + p: {$concatArrays: [[0.01, 0.1], ["foo"]]}, + input: ["$k1", "$k2"], + method: "approximate" + } + }, + msg: "'p' should not accept expressions that evaluate to an array with non-numeric elements" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: "$$pvals", input: ["$k1", "$k2"], method: "approximate"}}, + letSpec: {pvals: 0.5}, + msg: "'p' should not accept variables that evaluate to a non-array" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: "$$pvals", input: ["$k1", "$k2"], method: "approximate"}}, + letSpec: {pvals: [0.5, "foo"]}, + msg: "'p' should not accept variables that evaluate to an array with non-numeric elements" +}); -assertInvalidSyntax({$median: {input: ["$k1", "$k2"]}}, - "Should fail if $median is missing 'method' field"); +/** + * Test invalid 'method' field. + */ +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5, 0.7], input: ["$k1", "$k2"], method: 42}}, + msg: "$percentile should fail if 'method' field isn't a string" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5, 0.7], input: ["$k1", "$k2"], method: "fancy"}}, + msg: "$percentile should fail if 'method' isn't one of the _predefined_ strings" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5, 0.7], input: ["$k1", "$k2"], method: "continuous"}}, + msg: "$percentile should fail because continuous 'method' isn't supported yet" +}); + +assertInvalidSyntax({ + pSpec: {$percentile: {p: [0.5, 0.7], input: ["$k1", "$k2"], method: "discrete"}}, + msg: "$percentile should fail because discrete 'method' isn't supported yet" +}); -assertInvalidSyntax({$median: {input: "$x", method: "approximate", extras: 42}}, - "Should fail if $median contains an unexpected field"); +/** + * Tests for $median. + */ +assertInvalidSyntax({ + pSpec: {$median: {p: [0.5], input: "$k4", method: "approximate"}}, + msg: "Should fail if 'p' is defined" +}); + +assertInvalidSyntax({ + pSpec: {$median: {method: "approximate"}}, + msg: "Should fail if $median is missing 'input' field" +}); + +assertInvalidSyntax({ + pSpec: {$median: {input: ["$k1", "$k2"]}}, + msg: "Should fail if $median is missing 'method' field" +}); + +assertInvalidSyntax({ + pSpec: {$median: {input: "$x", method: "approximate", extras: 42}}, + msg: "Should fail if $median contains an unexpected field" +}); + +assertInvalidSyntax({ + pSpec: {$median: {input: ["$k1", "$k2"], method: "fancy"}}, + msg: "$median should fail if 'method' isn't one of the _predefined_ strings" +}); + +assertInvalidSyntax({ + pSpec: {$median: {input: ["$k1", "$k2"], method: "continuous"}}, + msg: "$median should fail because continuous 'method' isn't supported yet" +}); + +assertInvalidSyntax({ + pSpec: {$median: {input: ["$k1", "$k2"], method: "discrete"}}, + msg: "$median should fail because discrete 'method' isn't supported yet" +}); /** * Test that valid $percentile specifications are accepted. The results, i.e. semantics, are * tested elsewhere and would cover all of the cases below, we are providing them here * nonetheless for completeness. */ -function assertValidSyntax(percentileSpec, msg) { - assert.commandWorked( - coll.runCommand("aggregate", {pipeline: [{$project: {p: percentileSpec}}], cursor: {}}), - msg); -} - -assertValidSyntax( - {$percentile: {p: [0.0, 0.0001, 0.5, 0.995, 1.0], input: ["$k1"], method: "approximate"}}, - "Should be able to specify an array of percentiles"); - -assertValidSyntax({$percentile: {p: [0.5, 0.9], input: ["k3"], method: "approximate"}}, - "Non-numeric expressions in input array should be gracefully ignored"); - -assertValidSyntax({$percentile: {p: [0.5], input: "$k4", method: "approximate"}}, - "Should work if 'input' field in $percentile is a single expression"); +assertValidSyntax({ + pSpec: { + $percentile: + {p: [0.0, 0.0001, 0.5, 0.995, 1.0], input: ["$k1", "$k2"], method: "approximate"} + }, + msg: "Should be able to specify an array of percentiles" +}); /** - * Tests for $median. $median desugars to $percentile with the field p:[0.5] added. + * Test valid 'input' field (even if they don't make sense). */ +assertValidSyntax({ + pSpec: {$percentile: {p: [0.5, 0.9], input: "something", method: "approximate"}}, + msg: "Non-array 'input' field should be gracefully ignored" +}); + +assertValidSyntax({ + pSpec: {$percentile: {p: [0.5], input: [], method: "approximate"}}, + msg: "Empty array in the 'input' should be ignored" +}); + +assertValidSyntax({ + pSpec: {$percentile: {p: [0.5, 0.9], input: ["k3"], method: "approximate"}}, + msg: "Non-numeric expressions in the 'input' array should be gracefully ignored" +}); + +assertValidSyntax({ + pSpec: {$percentile: {p: [0.5], input: "$k4", method: "approximate"}}, + msg: "Should work if 'input' field in $percentile is a simple expression" +}); + +assertValidSyntax({ + pSpec: { + $percentile: { + p: [0.5], + input: {$concatArrays: [["$k1", "$k2"], [{$add: [2, "$k1"]}], "$k4"]}, + method: "approximate" + } + }, + msg: "Should work if 'input' field in $percentile is a complex expression" +}); -assertValidSyntax({$median: {input: "$k4", method: "approximate"}}, - "Simple base case for $median with single expression input field"); - -assertValidSyntax({$median: {input: ["$k1", "$k2"], method: "approximate"}}, - "Simple base case for $median with array input field"); +/** + * Tests for $median. + */ +assertValidSyntax({ + pSpec: {$median: {input: "$k4", method: "approximate"}}, + msg: "Simple base case for $median with single expression input field" +}); + +assertValidSyntax({ + pSpec: {$median: {input: ["$k1", "$k2"], method: "approximate"}}, + msg: "Simple base case for $median with array input field" +}); })(); diff --git a/jstests/aggregation/expressions/rand.js b/jstests/aggregation/expressions/rand.js index d4d3559bc1adf..a5086b61ea756 100644 --- a/jstests/aggregation/expressions/rand.js +++ b/jstests/aggregation/expressions/rand.js @@ -1,11 +1,6 @@ /** * Test the $rand expression. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAggPlanStage(). - const coll = db.expression_rand; coll.drop(); @@ -28,5 +23,4 @@ print("Average: ", avg); // Test certainty within 10 standard deviations. const err = 10.0 / Math.sqrt(12.0 * N); assert.lte(0.5 - err, avg); -assert.gte(0.5 + err, avg); -}()); +assert.gte(0.5 + err, avg); \ No newline at end of file diff --git a/jstests/aggregation/expressions/subtract.js b/jstests/aggregation/expressions/subtract.js index 91e46c08b72ce..1be9f731cbbfd 100644 --- a/jstests/aggregation/expressions/subtract.js +++ b/jstests/aggregation/expressions/subtract.js @@ -1,7 +1,11 @@ +load("jstests/aggregation/extras/utils.js"); // For assertErrorCode and assertErrMsgContains. +load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs. + +// Tests for $subtract aggregation expression (function() { "use strict"; -const coll = db.add_coll; +const coll = db.subtract_coll; coll.drop(); assert.commandWorked(coll.insert({_id: 0, lhs: 1, rhs: 1})); @@ -14,8 +18,20 @@ assert.commandWorked( assert.commandWorked(coll.insert({_id: 5, lhs: new Date(1912392670000), rhs: 70000})); assert.commandWorked( coll.insert({_id: 6, lhs: new Date(1912392670000), rhs: new Date(1912392600000)})); -assert.commandWorked(coll.insert( - {_id: 7, lhs: NumberLong("9000000000000000000"), rhs: NumberLong("-9000000000000000000")})); +// Doubles are rounded to int64 when subtracted from Date +assert.commandWorked(coll.insert({_id: 7, lhs: new Date(1683794065002), rhs: 0.5})); +assert.commandWorked(coll.insert({_id: 8, lhs: new Date(1683794065002), rhs: 1.4})); +assert.commandWorked(coll.insert({_id: 9, lhs: new Date(1683794065002), rhs: 1.5})); +assert.commandWorked(coll.insert({_id: 10, lhs: new Date(1683794065002), rhs: 1.7})); +// Decimals are rounded to int64, when tie rounded to even, when subtracted from Date +assert.commandWorked( + coll.insert({_id: 11, lhs: new Date(1683794065002), rhs: new NumberDecimal("1.4")})); +assert.commandWorked( + coll.insert({_id: 12, lhs: new Date(1683794065002), rhs: new NumberDecimal("1.5")})); +assert.commandWorked( + coll.insert({_id: 13, lhs: new Date(1683794065002), rhs: new NumberDecimal("1.7")})); +assert.commandWorked( + coll.insert({_id: 14, lhs: new Date(1683794065002), rhs: new NumberDecimal("2.5")})); const result = coll.aggregate([{$project: {diff: {$subtract: ["$lhs", "$rhs"]}}}, {$sort: {_id: 1}}]) @@ -27,8 +43,39 @@ assert.eq(result[3].diff, 10.0); assert.eq(result[4].diff, NumberDecimal("9990.00005")); assert.eq(result[5].diff, new Date(1912392600000)); assert.eq(result[6].diff, 70000); +assert.eq(result[7].diff, new Date(1683794065001)); +assert.eq(result[8].diff, new Date(1683794065001)); +assert.eq(result[9].diff, new Date(1683794065000)); +assert.eq(result[10].diff, new Date(1683794065000)); +assert.eq(result[11].diff, new Date(1683794065001)); +assert.eq(result[12].diff, new Date(1683794065000)); +assert.eq(result[13].diff, new Date(1683794065000)); +assert.eq(result[14].diff, new Date(1683794065000)); + +// Following cases will report overflow error +coll.drop(); + +assert.commandWorked(coll.insert([{ + _id: 0, + veryBigNegativeLong: NumberLong("-9223372036854775808"), + veryBigNegativeDouble: -9223372036854775808, + veryBigNegativeDecimal: NumberDecimal("-9223372036854775808"), + doubleNaN: NaN, + decimalNaN: NumberDecimal("NaN"), +}])); + +let pipeline = [{$project: {res: {$subtract: [new Date(10), "$veryBigNegativeLong"]}}}]; +assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow"); + +pipeline = [{$project: {res: {$subtract: [new Date(10), "$veryBigNegativeDouble"]}}}]; +assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow"); + +pipeline = [{$project: {res: {$subtract: [new Date(10), "$veryBigNegativeDecimal"]}}}]; +assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow"); + +pipeline = [{$project: {res: {$subtract: [new Date(-1), "$doubleNaN"]}}}]; +assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow"); -// TODO WRITING-10039 After type promotion algorithm is fixed, we need to use more strict assert -// to check type promotion -assert.eq(bsonWoCompare(result[7].diff, 1.8e+19), 0); +pipeline = [{$project: {res: {$subtract: [new Date(-1), "$decimalNaN"]}}}]; +assertErrCodeAndErrMsgContains(coll, pipeline, ErrorCodes.Overflow, "date overflow"); }()); diff --git a/jstests/aggregation/expressions/trim.js b/jstests/aggregation/expressions/trim.js index 821e15ea4a38f..472c37c15e578 100644 --- a/jstests/aggregation/expressions/trim.js +++ b/jstests/aggregation/expressions/trim.js @@ -5,6 +5,7 @@ "use strict"; load("jstests/aggregation/extras/utils.js"); // For assertErrorCode, testExpression and // testExpressionWithCollation. +load("jstests/libs/sbe_assert_error_override.js"); const coll = db.trim_expressions; @@ -81,10 +82,48 @@ assert.eq( {_id: 4, proof: null}, ]); -// Test that errors are reported correctly. -assertErrorCode(coll, [{$project: {x: {$trim: " x "}}}], 50696); -assertErrorCode(coll, [{$project: {x: {$trim: {input: 4}}}}], 50699); -assertErrorCode(coll, [{$project: {x: {$trim: {input: {$add: [4, 2]}}}}}], 50699); -assertErrorCode(coll, [{$project: {x: {$trim: {input: "$_id"}}}}], 50699); -assertErrorCode(coll, [{$project: {x: {$trim: {input: " x ", chars: "$_id"}}}}], 50700); +// Semantically same as the tests above but non-constant input for 'chars' +coll.drop(); +assert.commandWorked(coll.insert([ + {_id: 0, proof: "Left as an exercise for the reader∎", extra: "∎"}, + {_id: 1, proof: "∎∃ proof∎", extra: "∎"}, + { + _id: 2, + proof: "Just view the problem as a continuous DAG whose elements are taylor series∎", + extra: "∎" + }, + {_id: 3, proof: null}, + {_id: 4}, +])); +assert.eq( + coll.aggregate( + [{$sort: {_id: 1}}, {$project: {proof: {$rtrim: {input: "$proof", chars: "$extra"}}}}]) + .toArray(), + [ + {_id: 0, proof: "Left as an exercise for the reader"}, + {_id: 1, proof: "∎∃ proof"}, + { + _id: 2, + proof: "Just view the problem as a continuous DAG whose elements are taylor series" + }, + {_id: 3, proof: null}, + {_id: 4, proof: null}, + ]); + +coll.drop(); +assert.commandWorked(coll.insert([ + {_id: 0, nonObject: " x "}, + {_id: 1, constantNum: 4}, +])); + +// Test that errors are reported correctly (for all of $trim, $ltrim, $rtrim). +for (const op of ["$trim", "$ltrim", "$rtrim"]) { + assertErrorCode(coll, [{$project: {x: {[op]: {}}}}], 50695); + assertErrorCode(coll, [{$project: {x: {[op]: "$nonObject"}}}], 50696); + assertErrorCode(coll, [{$project: {x: {[op]: {input: "$constantNum"}}}}], 50699); + assertErrorCode( + coll, [{$project: {x: {[op]: {input: {$add: ["$constantNum", "$constantNum"]}}}}}], 50699); + assertErrorCode(coll, [{$project: {x: {[op]: {input: "$_id"}}}}], 50699); + assertErrorCode(coll, [{$project: {x: {[op]: {input: "$nonObject", chars: "$_id"}}}}], 50700); +} }()); diff --git a/jstests/aggregation/expressions/unary_numeric.js b/jstests/aggregation/expressions/unary_numeric.js index 4c01bbdc750e4..90527cc4a54dc 100644 --- a/jstests/aggregation/expressions/unary_numeric.js +++ b/jstests/aggregation/expressions/unary_numeric.js @@ -315,4 +315,4 @@ assertErrorCode(coll, [{$project: {a: {$exp: "$x"}}}], 28765); assertErrorCode(coll, [{$project: {a: {$log10: "$x"}}}], 28765); assertErrorCode(coll, [{$project: {a: {$ln: "$x"}}}], 28765); assertErrorCode(coll, [{$project: {a: {$sqrt: "$x"}}}], 28765); -}()); \ No newline at end of file +}()); diff --git a/jstests/aggregation/extras/merge_helpers.js b/jstests/aggregation/extras/merge_helpers.js index 0475f3d3d1be0..4ef88541ab6b3 100644 --- a/jstests/aggregation/extras/merge_helpers.js +++ b/jstests/aggregation/extras/merge_helpers.js @@ -86,4 +86,4 @@ function assertMergeSucceedsWithExpectedUniqueIndex( // recreation in the sharded collections passthrough suites. function dropWithoutImplicitRecreate(collName) { db.runCommand({drop: collName}); -} \ No newline at end of file +} diff --git a/jstests/aggregation/extras/window_function_helpers.js b/jstests/aggregation/extras/window_function_helpers.js index 49e2af29c4a60..f30b5c0c1e955 100644 --- a/jstests/aggregation/extras/window_function_helpers.js +++ b/jstests/aggregation/extras/window_function_helpers.js @@ -1,10 +1,10 @@ load("jstests/aggregation/extras/utils.js"); // arrayEq -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages(). +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; /** * Create a collection of tickers and prices. */ -function seedWithTickerData(coll, docsPerTicker) { +export function seedWithTickerData(coll, docsPerTicker) { for (let i = 0; i < docsPerTicker; i++) { assert.commandWorked( coll.insert({_id: i, partIndex: i, ticker: "T1", price: (500 - i * 10)})); @@ -14,13 +14,13 @@ function seedWithTickerData(coll, docsPerTicker) { } } -function forEachPartitionCase(callback) { +export function forEachPartitionCase(callback) { callback(null); callback("$ticker"); callback({$toLower: "$ticker"}); } -const documentBounds = [ +export const documentBounds = [ ["unbounded", 0], ["unbounded", -1], ["unbounded", 1], @@ -39,7 +39,7 @@ const documentBounds = [ [-2, 3], ]; -function forEachDocumentBoundsCombo(callback) { +export function forEachDocumentBoundsCombo(callback) { documentBounds.forEach(function(bounds, index) { let boundsCombo = [bounds]; for (let j = index + 1; j < documentBounds.length; j++) { @@ -74,7 +74,7 @@ function forEachDocumentBoundsCombo(callback) { * Note that this function assumes that the data in 'coll' has been seeded with the documents from * the seedWithTickerData() method above. */ -function computeAsGroup({ +export function computeAsGroup({ coll, partitionKey, accumSpec, @@ -105,7 +105,7 @@ function computeAsGroup({ /** * Helper to calculate the correct skip based on the lowerBound given. */ -function calculateSkip(lowerBound, indexInPartition) { +export function calculateSkip(lowerBound, indexInPartition) { let skipValueToUse = 0; if (lowerBound === "current") { skipValueToUse = indexInPartition; @@ -123,7 +123,7 @@ function calculateSkip(lowerBound, indexInPartition) { /** * Helper to calculate the correct limit based on the bounds given. */ -function calculateLimit(lowerBound, upperBound, indexInPartition) { +export function calculateLimit(lowerBound, upperBound, indexInPartition) { let limitValueToUse = "unbounded"; if (upperBound === "current") { if (lowerBound === "unbounded") { @@ -160,7 +160,7 @@ function calculateLimit(lowerBound, upperBound, indexInPartition) { return limitValueToUse; } -function assertResultsEqual(wfRes, index, groupRes, accum) { +export function assertResultsEqual(wfRes, index, groupRes, accum) { // On DEBUG builds, the computed $group may be slightly different due to precision // loss when spilling to disk. // TODO SERVER-42616: Enable the exact check for $stdDevPop/Samp. @@ -180,7 +180,7 @@ function assertResultsEqual(wfRes, index, groupRes, accum) { "Window function result for index " + index + ": " + tojson(wfRes)); } -function assertExplainResult(explainResult) { +export function assertExplainResult(explainResult) { const stages = getAggPlanStages(explainResult, "$_internalSetWindowFields"); for (let stage of stages) { assert(stage.hasOwnProperty("$_internalSetWindowFields"), stage); @@ -209,7 +209,7 @@ function assertExplainResult(explainResult) { * Note that this function assumes that the documents in 'coll' were initialized using the * seedWithTickerData() method above. */ -function testAccumAgainstGroup(coll, accum, onNoResults = null, accumArgs = "$price") { +export function testAccumAgainstGroup(coll, accum, onNoResults = null, accumArgs = "$price") { const accumSpec = {[accum]: accumArgs}; forEachPartitionCase(function(partition) { documentBounds.forEach(function(bounds, index) { diff --git a/jstests/aggregation/group_conversion_to_distinct_scan.js b/jstests/aggregation/group_conversion_to_distinct_scan.js index 546f8238f44ec..240cd94f92e0b 100644 --- a/jstests/aggregation/group_conversion_to_distinct_scan.js +++ b/jstests/aggregation/group_conversion_to_distinct_scan.js @@ -18,10 +18,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; const coll = db.group_conversion_to_distinct_scan; coll.drop(); @@ -1067,5 +1064,4 @@ assertPipelineResultsAndExplain({ ], expectedOutput: expectedResult, validateExplain: (explain) => assertPlanUsesDistinctScan(explain, {_id: 1}) -}); -}()); +}); \ No newline at end of file diff --git a/jstests/aggregation/large_bson_mid_pipeline.js b/jstests/aggregation/large_bson_mid_pipeline.js index 0604bff86c27d..13fcbfb2a0d02 100644 --- a/jstests/aggregation/large_bson_mid_pipeline.js +++ b/jstests/aggregation/large_bson_mid_pipeline.js @@ -2,11 +2,6 @@ * Tests that extra-large BSON objects (>16MB) can be materialized for the '$match' stage in the * middle of the query plan without throwing 'BSONObjectTooLarge' exception. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStage()'. - const testDB = db.getSiblingDB("jsTestName"); assert.commandWorked(testDB.dropDatabase()); @@ -23,5 +18,4 @@ const pipeline = [ {$project: {a: 1}} ]; -assert.doesNotThrow(() => coll.aggregate(pipeline).toArray()); -})(); +assert.doesNotThrow(() => coll.aggregate(pipeline).toArray()); \ No newline at end of file diff --git a/jstests/aggregation/match_no_swap_rand.js b/jstests/aggregation/match_no_swap_rand.js index 3206f6fa792e8..3aad6d86c53f3 100644 --- a/jstests/aggregation/match_no_swap_rand.js +++ b/jstests/aggregation/match_no_swap_rand.js @@ -10,11 +10,8 @@ * requires_pipeline_optimization, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/feature_flag_util.js"); +import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; function getWinningPlanForPipeline({coll, pipeline}) { const explain = assert.commandWorked(coll.explain().aggregate(pipeline)); @@ -161,4 +158,3 @@ function assertScanFilterEq({coll, pipeline, filter}) { }); } } -}()); diff --git a/jstests/aggregation/match_swapping_renamed_fields.js b/jstests/aggregation/match_swapping_renamed_fields.js index 1657282e73522..7adf4aab69d32 100644 --- a/jstests/aggregation/match_swapping_renamed_fields.js +++ b/jstests/aggregation/match_swapping_renamed_fields.js @@ -6,10 +6,7 @@ * requires_pipeline_optimization, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getAggPlanStage, getAggPlanStages} from "jstests/libs/analyze_plan.js"; let coll = db.match_swapping_renamed_fields; coll.drop(); @@ -203,5 +200,4 @@ explain = coll.explain().aggregate(pipeline); // We expect that the $match stage has been split into two, since one predicate has an // applicable rename that allows swapping, while the other does not. let matchStages = getAggPlanStages(explain, "$match"); -assert.eq(2, matchStages.length); -}()); +assert.eq(2, matchStages.length); \ No newline at end of file diff --git a/jstests/aggregation/no_output_to_system.js b/jstests/aggregation/no_output_to_system.js index 2f966e0bea4af..594db28232cf0 100644 --- a/jstests/aggregation/no_output_to_system.js +++ b/jstests/aggregation/no_output_to_system.js @@ -50,4 +50,4 @@ if (!FixtureHelpers.isMongos(db)) { // $out allows for the source collection to be the same as the destination collection. assertErrorCode(outputToLocal, {$out: outputToLocal.getName()}, 31321); } -})(); \ No newline at end of file +})(); diff --git a/jstests/aggregation/optimize_away_pipeline.js b/jstests/aggregation/optimize_away_pipeline.js index 8dfe834018206..f4fde7b8e01ff 100644 --- a/jstests/aggregation/optimize_away_pipeline.js +++ b/jstests/aggregation/optimize_away_pipeline.js @@ -14,13 +14,18 @@ // requires_pipeline_optimization, // requires_profiling, // ] -(function() { -"use strict"; - load("jstests/concurrency/fsm_workload_helpers/server_types.js"); // For isWiredTiger. -load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers. + +import { + getPlanStages, + getAggPlanStage, + aggPlanHasStage, + planHasStage, + isAggregationPlan, + isQueryPlan, +} from "jstests/libs/analyze_plan.js"; load("jstests/libs/fixture_helpers.js"); // For 'isMongos' and 'isSharded'. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const coll = db.optimize_away_pipeline; coll.drop(); @@ -799,4 +804,3 @@ if (!FixtureHelpers.isMongos(db) && isWiredTiger(db)) { [{op: "query", ns: view.getFullName()}, {op: "getmore", ns: view.getFullName()}]); } } -}()); diff --git a/jstests/aggregation/sources/collStats/query_exec_stats.js b/jstests/aggregation/sources/collStats/query_exec_stats.js index 76c1b46ac7cf1..fa63a65f0c278 100644 --- a/jstests/aggregation/sources/collStats/query_exec_stats.js +++ b/jstests/aggregation/sources/collStats/query_exec_stats.js @@ -2,8 +2,6 @@ // @tags: [ // assumes_no_implicit_collection_creation_after_drop, // does_not_support_repeated_reads, -// # TODO SERVER-67640: Verify 'top' and $collStats work correctly for queries in CQF. -// cqf_incompatible, // ] (function() { "use strict"; diff --git a/jstests/aggregation/sources/densify/densify_sort_opt_comparison.js b/jstests/aggregation/sources/densify/densify_sort_opt_comparison.js index 9ad9ee37bfa0d..63de418760983 100644 --- a/jstests/aggregation/sources/densify/densify_sort_opt_comparison.js +++ b/jstests/aggregation/sources/densify/densify_sort_opt_comparison.js @@ -7,12 +7,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); load("jstests/aggregation/extras/utils.js"); // For arrayEq. -load("jstests/libs/feature_flag_util.js"); // For isEnabled. load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. load("jstests/libs/fail_point_util.js"); // For configureFailPoint @@ -124,5 +119,4 @@ FixtureHelpers.runCommandOnEachPrimary({ } }); assert.commandWorked( - db.adminCommand({'configureFailPoint': 'disablePipelineOptimization', 'mode': 'off'})); -})(); + db.adminCommand({'configureFailPoint': 'disablePipelineOptimization', 'mode': 'off'})); \ No newline at end of file diff --git a/jstests/aggregation/sources/densify/densify_sort_optimization.js b/jstests/aggregation/sources/densify/densify_sort_optimization.js index ddc18ede260ef..38c8e8c518e56 100644 --- a/jstests/aggregation/sources/densify/densify_sort_optimization.js +++ b/jstests/aggregation/sources/densify/densify_sort_optimization.js @@ -7,11 +7,7 @@ * ] */ -(function() { -"use strict"; - load("jstests/libs/fixture_helpers.js"); -load("jstests/libs/feature_flag_util.js"); // For isEnabled. load("jstests/aggregation/extras/utils.js"); // For getExplainedPipelineFromAggregation. const coll = db[jsTestName()]; @@ -289,5 +285,4 @@ for (let i = 0; i < testCases.length; i++) { assert(anyEq(result, testCases[i][1]), "Test case " + i + " failed.\n" + "Expected:\n" + tojson(testCases[i][1]) + "\nGot:\n" + tojson(result)); -} -})(); +} \ No newline at end of file diff --git a/jstests/aggregation/sources/densify/explicit_range.js b/jstests/aggregation/sources/densify/explicit_range.js index 842ae79d5bfb0..74a92e2b88e40 100644 --- a/jstests/aggregation/sources/densify/explicit_range.js +++ b/jstests/aggregation/sources/densify/explicit_range.js @@ -104,4 +104,24 @@ for (let i = 0; i < densifyUnits.length; i++) { runDensifyRangeTest({step, bounds: [10, 45]}); } } + +// Run a test where there are no documents in the range to ensure we don't generate anything before +// the range. +coll.drop(); +let documents = [ + {"date": ISODate("2022-10-29T23:00:00Z")}, +]; +coll.insert(documents); +let stage = { + field: "date", + range: { + step: 1, + unit: "month", + bounds: [ + ISODate("2022-10-31T23:00:00.000Z"), + ISODate("2022-11-30T23:00:00.000Z"), + ], + }, +}; +testDensifyStage(stage, coll, "Ensure no docs before range"); })(); diff --git a/jstests/aggregation/sources/fill/fill.js b/jstests/aggregation/sources/fill/fill.js index 332bc7f7e4c68..ff64803d7dcc1 100644 --- a/jstests/aggregation/sources/fill/fill.js +++ b/jstests/aggregation/sources/fill/fill.js @@ -6,12 +6,7 @@ * ] */ -(function() { - -"use strict"; - load("jstests/libs/fixture_helpers.js"); -load("jstests/libs/feature_flag_util.js"); // For isEnabled. load("jstests/aggregation/extras/utils.js"); // For arrayEq. const coll = db[jsTestName()]; @@ -382,5 +377,4 @@ for (let i = 0; i < testCases.length; i++) { const result = coll.aggregate(testCases[i][0]).toArray(); assertArrayEq( {actual: result, expected: testCases[i][1], extraErrorMsg: " during testCase " + i}); -} -})(); +} \ No newline at end of file diff --git a/jstests/aggregation/sources/fill/fill_and_densify.js b/jstests/aggregation/sources/fill/fill_and_densify.js index e374481e5a226..0e9de5d340d36 100644 --- a/jstests/aggregation/sources/fill/fill_and_densify.js +++ b/jstests/aggregation/sources/fill/fill_and_densify.js @@ -5,10 +5,7 @@ * ] */ -(function() { -"use strict"; load("jstests/libs/fixture_helpers.js"); -load("jstests/libs/feature_flag_util.js"); // For isEnabled. load("jstests/aggregation/extras/utils.js"); // For arrayEq. const coll = db[jsTestName()]; @@ -153,5 +150,4 @@ expected = [ {"part": 2, "val": 8, "toFill": 13, "possible": 6.833333333333334}, {"val": 9, "toFill": 16, "possible": 8, "part": 2} ]; -assertArrayEq({actual: result, expected: expected}); -})(); +assertArrayEq({actual: result, expected: expected}); \ No newline at end of file diff --git a/jstests/aggregation/sources/fill/fill_parse.js b/jstests/aggregation/sources/fill/fill_parse.js index ccb7f460b9280..e07abf626cc09 100644 --- a/jstests/aggregation/sources/fill/fill_parse.js +++ b/jstests/aggregation/sources/fill/fill_parse.js @@ -7,9 +7,7 @@ * ] */ -(function() { load("jstests/libs/fixture_helpers.js"); -load("jstests/libs/feature_flag_util.js"); // For isEnabled. load("jstests/aggregation/extras/utils.js"); // For anyEq and desugarSingleStageAggregation. const coll = db[jsTestName()]; @@ -255,5 +253,4 @@ for (let i = 0; i < testCases.length; i++) { assert(anyEq(result, testCases[i][1], false, null, "UUIDPLACEHOLDER"), "Test case " + i + " failed.\n" + "Expected:\n" + tojson(testCases[i][1]) + "\nGot:\n" + tojson(result)); -} -})(); +} \ No newline at end of file diff --git a/jstests/aggregation/sources/graphLookup/graphlookup_rewrite.js b/jstests/aggregation/sources/graphLookup/graphlookup_rewrite.js index 2f10862d94d38..bdbfb96373e8f 100644 --- a/jstests/aggregation/sources/graphLookup/graphlookup_rewrite.js +++ b/jstests/aggregation/sources/graphLookup/graphlookup_rewrite.js @@ -6,42 +6,31 @@ // do_not_wrap_aggregations_in_facets, // requires_pipeline_optimization, // ] -(function() { -"use strict"; - load('jstests/aggregation/extras/utils.js'); -load("jstests/libs/analyze_plan.js"); +import {aggPlanHasStage} from "jstests/libs/analyze_plan.js"; load("jstests/libs/fixture_helpers.js"); const coll = db.graphlookup_rewrite; coll.drop(); assert.commandWorked(coll.insertMany([ - {"from": "a", "foo": 1}, - {"from": "b", "to": "a", "foo": 2}, - {"from": "c", "to": "b", "foo": 3}, - {"from": "d", "to": "b", "foo": 4}, - {"from": "e", "to": "c", "foo": 5}, - {"from": "f", "to": "d", "foo": 6} + {"_id": 1, "from": "a", "foo": 1}, + {"_id": 2, "from": "b", "to": "a", "foo": 2}, + {"_id": 3, "from": "c", "to": "b", "foo": 3}, + {"_id": 4, "from": "d", "to": "b", "foo": 4}, + {"_id": 5, "from": "e", "to": "c", "foo": 5}, + {"_id": 6, "from": "f", "to": "d", "foo": 6} ])); -const admin = db.getSiblingDB("admin"); - -const setPipelineOptimizationMode = (mode) => { - FixtureHelpers.runCommandOnEachPrimary( - {db: admin, cmdObj: {configureFailPoint: "disablePipelineOptimization", mode}}); -}; - -// Get initial optimization mode. -const pipelineOptParameter = assert.commandWorked( - db.adminCommand({getParameter: 1, "failpoint.disablePipelineOptimization": 1})); -const oldMode = - pipelineOptParameter["failpoint.disablePipelineOptimization"].mode ? 'alwaysOn' : 'off'; - -function assertStagesAndOutput( - {pipeline = [], expectedStages = [], optimizedAwayStages = [], fieldsToSkip = [], msg = ""}) { - setPipelineOptimizationMode("off"); - +function assertStagesAndOutput({ + pipeline = [], + expectedStages = [], + optimizedAwayStages = [], + expectedOutput = [], + orderedArrayComparison = true, + fieldsToSkip = [], + msg = "" +}) { const explain = coll.explain().aggregate(pipeline); const output = coll.aggregate(pipeline).toArray(); @@ -54,10 +43,10 @@ function assertStagesAndOutput( `${msg}: stage ${stage} not optimized away: ${tojson(explain)}`); } - setPipelineOptimizationMode("alwaysOn"); - - const expectedOutput = coll.aggregate(pipeline).toArray(); - assert(orderedArrayEq(output, expectedOutput, true, fieldsToSkip), msg); + const res = orderedArrayComparison + ? orderedArrayEq(output, expectedOutput, false, fieldsToSkip) + : arrayEq(output, expectedOutput, false, null /*valueComparator*/, fieldsToSkip); + assert(res, `actual=${tojson(output)}, expected=t${tojson(expectedOutput)}`); } const graphLookup = { @@ -74,6 +63,48 @@ assertStagesAndOutput({ pipeline: [graphLookup, {$sort: {foo: 1}}], expectedStages: ["SORT", "COLLSCAN", "$graphLookup"], optimizedAwayStages: ["$sort"], + expectedOutput: [ + { + "_id": 1, + "from": "a", + "foo": 1, + "out": [ + {"_id": 2, "from": "b", "to": "a", "foo": 2}, + {"_id": 3, "from": "c", "to": "b", "foo": 3}, + {"_id": 5, "from": "e", "to": "c", "foo": 5}, + {"_id": 6, "from": "f", "to": "d", "foo": 6}, + {"_id": 4, "from": "d", "to": "b", "foo": 4} + ] + }, + { + "_id": 2, + "from": "b", + "to": "a", + "foo": 2, + "out": [ + {"_id": 6, "from": "f", "to": "d", "foo": 6}, + {"_id": 3, "from": "c", "to": "b", "foo": 3}, + {"_id": 5, "from": "e", "to": "c", "foo": 5}, + {"_id": 4, "from": "d", "to": "b", "foo": 4} + ] + }, + { + "_id": 3, + "from": "c", + "to": "b", + "foo": 3, + "out": [{"_id": 5, "from": "e", "to": "c", "foo": 5}] + }, + { + "_id": 4, + "from": "d", + "to": "b", + "foo": 4, + "out": [{"_id": 6, "from": "f", "to": "d", "foo": 6}] + }, + {"_id": 5, "from": "e", "to": "c", "foo": 5, "out": []}, + {"_id": 6, "from": "f", "to": "d", "foo": 6, "out": []} + ], msg: "$graphLookup should swap with $sort if there is no internal $unwind" }); @@ -81,6 +112,49 @@ assertStagesAndOutput({ pipeline: [graphLookup, {$limit: 100}], expectedStages: ["LIMIT", "COLLSCAN", "$graphLookup"], optimizedAwayStages: ["$limit"], + orderedArrayComparison: false, + expectedOutput: [ + { + "_id": 1, + "from": "a", + "foo": 1, + "out": [ + {"_id": 2, "from": "b", "to": "a", "foo": 2}, + {"_id": 3, "from": "c", "to": "b", "foo": 3}, + {"_id": 5, "from": "e", "to": "c", "foo": 5}, + {"_id": 6, "from": "f", "to": "d", "foo": 6}, + {"_id": 4, "from": "d", "to": "b", "foo": 4} + ] + }, + { + "_id": 2, + "from": "b", + "to": "a", + "foo": 2, + "out": [ + {"_id": 5, "from": "e", "to": "c", "foo": 5}, + {"_id": 3, "from": "c", "to": "b", "foo": 3}, + {"_id": 6, "from": "f", "to": "d", "foo": 6}, + {"_id": 4, "from": "d", "to": "b", "foo": 4} + ] + }, + { + "_id": 3, + "from": "c", + "to": "b", + "foo": 3, + "out": [{"_id": 5, "from": "e", "to": "c", "foo": 5}] + }, + { + "_id": 4, + "from": "d", + "to": "b", + "foo": 4, + "out": [{"_id": 6, "from": "f", "to": "d", "foo": 6}] + }, + {"_id": 5, "from": "e", "to": "c", "foo": 5, "out": []}, + {"_id": 6, "from": "f", "to": "d", "foo": 6, "out": []} + ], msg: "$graphLookup should swap with $limit if there is no internal $unwind" }); @@ -88,6 +162,7 @@ assertStagesAndOutput({ pipeline: [graphLookup, {$skip: 100}], expectedStages: ["SKIP", "COLLSCAN", "$graphLookup"], optimizedAwayStages: ["$skip"], + expectedOutput: [], msg: "$graphLookup should swap with $skip if there is no internal $unwind" }); @@ -95,23 +170,152 @@ assertStagesAndOutput({ pipeline: [graphLookup, {$sort: {foo: 1}}, {$limit: 100}], expectedStages: ["SORT", "COLLSCAN", "$graphLookup"], optimizedAwayStages: ["LIMIT", "$limit"], + expectedOutput: [ + { + "_id": 1, + "from": "a", + "foo": 1, + "out": [ + {"_id": 6, "from": "f", "to": "d", "foo": 6}, + {"_id": 2, "from": "b", "to": "a", "foo": 2}, + {"_id": 4, "from": "d", "to": "b", "foo": 4}, + {"_id": 5, "from": "e", "to": "c", "foo": 5}, + {"_id": 3, "from": "c", "to": "b", "foo": 3} + ] + }, + { + "_id": 2, + "from": "b", + "to": "a", + "foo": 2, + "out": [ + {"_id": 6, "from": "f", "to": "d", "foo": 6}, + {"_id": 4, "from": "d", "to": "b", "foo": 4}, + {"_id": 5, "from": "e", "to": "c", "foo": 5}, + {"_id": 3, "from": "c", "to": "b", "foo": 3} + ] + }, + { + "_id": 3, + "from": "c", + "to": "b", + "foo": 3, + "out": [{"_id": 5, "from": "e", "to": "c", "foo": 5}] + }, + { + "_id": 4, + "from": "d", + "to": "b", + "foo": 4, + "out": [{"_id": 6, "from": "f", "to": "d", "foo": 6}] + }, + {"_id": 5, "from": "e", "to": "c", "foo": 5, "out": []}, + {"_id": 6, "from": "f", "to": "d", "foo": 6, "out": []} + ], msg: "$graphLookup should swap with $limit and $sort, and $sort should absorb $limit if " + "there is no internal $unwind" }); assertStagesAndOutput({ - pipeline: [graphLookup, {$sort: {out: 1}}], + pipeline: [graphLookup, {$sort: {out: 1, foo: 1}}], expectedStages: ["COLLSCAN", "$graphLookup", "$sort"], + expectedOutput: [ + {"_id": 5, "from": "e", "to": "c", "foo": 5, "out": []}, + {"_id": 6, "from": "f", "to": "d", "foo": 6, "out": []}, + { + "_id": 1, + "from": "a", + "foo": 1, + "out": [ + {"_id": 6, "from": "f", "to": "d", "foo": 6}, + {"_id": 2, "from": "b", "to": "a", "foo": 2}, + {"_id": 4, "from": "d", "to": "b", "foo": 4}, + {"_id": 5, "from": "e", "to": "c", "foo": 5}, + {"_id": 3, "from": "c", "to": "b", "foo": 3} + ] + }, + { + "_id": 2, + "from": "b", + "to": "a", + "foo": 2, + "out": [ + {"_id": 6, "from": "f", "to": "d", "foo": 6}, + {"_id": 4, "from": "d", "to": "b", "foo": 4}, + {"_id": 5, "from": "e", "to": "c", "foo": 5}, + {"_id": 3, "from": "c", "to": "b", "foo": 3} + ] + }, + { + "_id": 3, + "from": "c", + "to": "b", + "foo": 3, + "out": [{"_id": 5, "from": "e", "to": "c", "foo": 5}] + }, + { + "_id": 4, + "from": "d", + "to": "b", + "foo": 4, + "out": [{"_id": 6, "from": "f", "to": "d", "foo": 6}] + } + ], msg: "$graphLookup should not swap with $sort if sort uses fields created by $graphLookup" }); assertStagesAndOutput({ pipeline: [graphLookup, {$unwind: "$out"}, {$sort: {foo: 1}}], expectedStages: ["COLLSCAN", "$graphLookup", "$sort"], + expectedOutput: [ + {"_id": 1, "from": "a", "foo": 1, "out": {"_id": 6, "from": "f", "to": "d", "foo": 6}}, + {"_id": 1, "from": "a", "foo": 1, "out": {"_id": 2, "from": "b", "to": "a", "foo": 2}}, + {"_id": 1, "from": "a", "foo": 1, "out": {"_id": 4, "from": "d", "to": "b", "foo": 4}}, + {"_id": 1, "from": "a", "foo": 1, "out": {"_id": 5, "from": "e", "to": "c", "foo": 5}}, + {"_id": 1, "from": "a", "foo": 1, "out": {"_id": 3, "from": "c", "to": "b", "foo": 3}}, + { + "_id": 2, + "from": "b", + "to": "a", + "foo": 2, + "out": {"_id": 6, "from": "f", "to": "d", "foo": 6} + }, + { + "_id": 2, + "from": "b", + "to": "a", + "foo": 2, + "out": {"_id": 4, "from": "d", "to": "b", "foo": 4} + }, + { + "_id": 2, + "from": "b", + "to": "a", + "foo": 2, + "out": {"_id": 5, "from": "e", "to": "c", "foo": 5} + }, + { + "_id": 2, + "from": "b", + "to": "a", + "foo": 2, + "out": {"_id": 3, "from": "c", "to": "b", "foo": 3} + }, + { + "_id": 3, + "from": "c", + "to": "b", + "foo": 3, + "out": {"_id": 5, "from": "e", "to": "c", "foo": 5} + }, + { + "_id": 4, + "from": "d", + "to": "b", + "foo": 4, + "out": {"_id": 6, "from": "f", "to": "d", "foo": 6} + } + ], msg: "$graphLookup with an internal $unwind should not swap with $sort", fieldsToSkip: ["out"] -}); - -// Reset optimization mode. -setPipelineOptimizationMode(oldMode); -})(); +}); \ No newline at end of file diff --git a/jstests/aggregation/sources/group/group_large_documents.js b/jstests/aggregation/sources/group/group_large_documents.js index 324da85e5d6c2..d15ffccedd2f7 100644 --- a/jstests/aggregation/sources/group/group_large_documents.js +++ b/jstests/aggregation/sources/group/group_large_documents.js @@ -34,4 +34,4 @@ for (let preventProjectPushdown of [false, true]) { assert(arrayEq(results, [{a: 2}]), "Pipeline:\n" + tojson(pipeline) + "Actual results:\n" + tojson(results)); } -}()); \ No newline at end of file +}()); diff --git a/jstests/aggregation/sources/indexStats/verify_index_stats_output.js b/jstests/aggregation/sources/indexStats/verify_index_stats_output.js index f19ee681b60b5..c8d7300bef61d 100644 --- a/jstests/aggregation/sources/indexStats/verify_index_stats_output.js +++ b/jstests/aggregation/sources/indexStats/verify_index_stats_output.js @@ -110,4 +110,4 @@ let finishedOutput = coll.aggregate([{$indexStats: {}}, {$match: {name: indexNam for (const indexStats of finishedOutput) { assert(!indexStats.hasOwnProperty("building"), tojson(indexStats)); } -})(); \ No newline at end of file +})(); diff --git a/jstests/aggregation/sources/lookup/lookup_collation.js b/jstests/aggregation/sources/lookup/lookup_collation.js index 4c23cb4a54287..1eb834a314c68 100644 --- a/jstests/aggregation/sources/lookup/lookup_collation.js +++ b/jstests/aggregation/sources/lookup/lookup_collation.js @@ -13,11 +13,7 @@ * 2. 'collation' option overrides local collection's collation */ load("jstests/aggregation/extras/utils.js"); // For anyEq. -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages, getWinningPlan. - -(function() { - -"use strict"; +import {getWinningPlan, getAggPlanStages} from "jstests/libs/analyze_plan.js"; const testDB = db.getSiblingDB(jsTestName()); assert.commandWorked(testDB.dropDatabase()); @@ -217,4 +213,3 @@ let explain; assertIndexJoinStrategy(explain); } })(); -})(); diff --git a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_hj.js b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_hj.js index c490314901387..58ad84899bf2d 100644 --- a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_hj.js +++ b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_hj.js @@ -1,15 +1,15 @@ /** * Tests for $lookup with localField/foreignField syntax using hash join algorithm. */ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js"); // For runTests. +import { + JoinAlgorithm, + runTests +} from "jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; if (!checkSBEEnabled(db)) { jsTestLog("Skipping the test because it only applies to $lookup in SBE"); - return; + quit(); } runTests({ @@ -17,4 +17,3 @@ runTests({ foreignColl: db.lookup_arrays_semantics_foreign_hj, currentJoinAlgorithm: JoinAlgorithm.HJ }); -})(); diff --git a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_inlj.js b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_inlj.js index aca8b222acea6..fde36478984e3 100644 --- a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_inlj.js +++ b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_inlj.js @@ -1,11 +1,12 @@ /** * Tests for $lookup with localField/foreignField syntax using indexed nested loop join algorithm. */ -(function() { -"use strict"; - -load("jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js"); // For runTests and - // runTest_*. +import { + JoinAlgorithm, + runTest_SingleForeignRecord, + runTest_SingleLocalRecord, + runTests, +} from "jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js"; /** * Run the tests with sorted ascending/descending indexes. @@ -148,4 +149,3 @@ runTests({ }); })(); })(); -})(); diff --git a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js index b4016587fb651..2b2679ce4cfe4 100644 --- a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js +++ b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js @@ -15,10 +15,10 @@ */ load("jstests/aggregation/extras/utils.js"); -load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStages()' -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; -const JoinAlgorithm = { +export const JoinAlgorithm = { HJ: {name: "HJ", strategy: "HashJoin"}, NLJ: {name: "NLJ", strategy: "NestedLoopJoin"}, INLJ_Asc: {name: "INLJ_Asc", indexType: 1, strategy: "IndexedLoopJoin"}, @@ -26,7 +26,7 @@ const JoinAlgorithm = { INLJ_Hashed: {name: "INLJ_Hashed", indexType: "hashed", strategy: "IndexedLoopJoin"} }; -function setupCollections(testConfig, localRecords, foreignRecords, foreignField) { +export function setupCollections(testConfig, localRecords, foreignRecords, foreignField) { const {localColl, foreignColl, currentJoinAlgorithm} = testConfig; localColl.drop(); assert.commandWorked(localColl.insert(localRecords)); @@ -44,7 +44,7 @@ function setupCollections(testConfig, localRecords, foreignRecords, foreignField * Checks that the expected join algorithm has been used (to sanity check that the tests do provide * the intended coverage). */ -function checkJoinConfiguration(testConfig, explain) { +export function checkJoinConfiguration(testConfig, explain) { const {currentJoinAlgorithm} = testConfig; const eqLookupNodes = getAggPlanStages(explain, "EQ_LOOKUP"); if (checkSBEEnabled(db)) { @@ -66,7 +66,7 @@ function checkJoinConfiguration(testConfig, explain) { * content of the "as" field but only that it's not empty for local records with ids in * 'idsExpectToMatch'. */ -function runTest_SingleForeignRecord( +export function runTest_SingleForeignRecord( testConfig, {testDescription, localRecords, localField, foreignRecord, foreignField, idsExpectedToMatch}) { const {localColl, foreignColl, currentJoinAlgorithm} = testConfig; @@ -115,7 +115,7 @@ function runTest_SingleForeignRecord( * Executes $lookup with exactly one record in the local collection and checks that the "as" field * for it contains documents with ids from `idsExpectedToMatch`. */ -function runTest_SingleLocalRecord( +export function runTest_SingleLocalRecord( testConfig, {testDescription, localRecord, localField, foreignRecords, foreignField, idsExpectedToMatch}) { const {localColl, foreignColl, currentJoinAlgorithm} = testConfig; @@ -154,7 +154,7 @@ function runTest_SingleLocalRecord( /** * Executes $lookup and expects it to fail with the specified 'expectedErrorCode`. */ -function runTest_ExpectFailure( +export function runTest_ExpectFailure( testConfig, {testDescription, localRecords, localField, foreignRecords, foreignField, expectedErrorCode}) { const {localColl, foreignColl, currentJoinAlgorithm} = testConfig; @@ -182,7 +182,7 @@ function runTest_ExpectFailure( /** * Tests. */ -function runTests(testConfig) { +export function runTests(testConfig) { const {localColl, foreignColl, currentJoinAlgorithm} = testConfig; // Sanity-test that the join is configured correctly. diff --git a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_nlj.js b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_nlj.js index 1ed304a0a18da..e5f711b3a8697 100644 --- a/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_nlj.js +++ b/jstests/aggregation/sources/lookup/lookup_equijoin_semantics_nlj.js @@ -1,14 +1,13 @@ /** * Tests for $lookup with localField/foreignField syntax using nested loop join algorithm. */ -(function() { -"use strict"; - -load("jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js"); // For runTests. +import { + JoinAlgorithm, + runTests +} from "jstests/aggregation/sources/lookup/lookup_equijoin_semantics_lib.js"; runTests({ localColl: db.lookup_arrays_semantics_local_nlj, foreignColl: db.lookup_arrays_semantics_foreign_nlj, currentJoinAlgorithm: JoinAlgorithm.NLJ }); -})(); diff --git a/jstests/aggregation/sources/lookup/lookup_large_documents.js b/jstests/aggregation/sources/lookup/lookup_large_documents.js index f645fbbdc2613..9778b04dc79ea 100644 --- a/jstests/aggregation/sources/lookup/lookup_large_documents.js +++ b/jstests/aggregation/sources/lookup/lookup_large_documents.js @@ -35,4 +35,4 @@ for (let preventProjectPushdown of [false, true]) { assert(arrayEq(results, [{foo: 3}]), "Pipeline:\n" + tojson(pipeline) + "Actual results:\n" + tojson(results)); } -}()); \ No newline at end of file +}()); diff --git a/jstests/aggregation/sources/lookup/lookup_numeric_field.js b/jstests/aggregation/sources/lookup/lookup_numeric_field.js new file mode 100644 index 0000000000000..a40f68e961f79 --- /dev/null +++ b/jstests/aggregation/sources/lookup/lookup_numeric_field.js @@ -0,0 +1,256 @@ +// Tests that numeric field components in $lookup and $graphLookup arguments behave correctly. This +// includes $lookup 'localField' and $graphLookup 'startsWith', 'connectFromField', and +// 'connectToField'. +// @tags: [ +// # Using a column scan removes the transformBy we search for. +// assumes_no_implicit_index_creation, +// ] +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; + +const local = db.local; +const foreign = db.foreign; + +foreign.drop(); +assert.commandWorked(foreign.insert({y: 3, z: 4})); + +function testFieldTraversal(pipeline, localDoc, shouldMatchDoc, prefix) { + local.drop(); + assert.commandWorked(local.insert(localDoc)); + + // Test correctness. + const results = db.local.aggregate(pipeline).toArray(); + if (shouldMatchDoc) { + assert.eq(results, [{count: 1}]); + } else { + assert.eq(results.length, 0); + } + + // Look for the transformBy. + const explain = db.local.explain().aggregate(pipeline); + const projStages = [ + ...getAggPlanStages(explain, "PROJECTION_SIMPLE"), + ...getAggPlanStages(explain, "PROJECTION_DEFAULT") + ]; + assert.gt(projStages.length, 0, explain); + + for (const projStage of projStages) { + // We have the stage, now make sure we have the correct projection. + let transform = projStage.transformBy; + if (transform.hasOwnProperty(prefix.join("."))) { + transform = transform[prefix.join(".")]; + } else { + for (const field of prefix) { + transform = transform[field]; + } + } + assert.eq(transform, true, explain); + } +} + +function testLookupLocalField(localField, localDoc, shouldMatchDoc, prefix) { + // Some prefix of the localField argument gets pushed down to find as a "transformBy" since it's + // the only field we need for this pipeline. + // We should see + // {transformBy: {prefix: true, _id: false}} + const pipeline = [ + {$lookup: {from: "foreign", localField: localField, foreignField: "y", as: "docs"}}, + {$match: {"docs.0.z": 4}}, + {$count: "count"} + ]; + testFieldTraversal(pipeline, localDoc, shouldMatchDoc, prefix); +} + +function testGraphLookupStartsWith(localField, localDoc, shouldMatchDoc, prefix) { + // Similar to the lookup transformBy case, but for $graphLookup. + const pipeline = [ + {$graphLookup: { + from: "foreign", + startWith: localField, + connectFromField: "z", + connectToField: "y", + maxDepth: 0, + as: "docs" + }}, + {$match: {"docs.0.z": 4}}, + {$count: "count"} + ]; + testFieldTraversal(pipeline, localDoc, shouldMatchDoc, prefix); +} + +function testGraphLookupToFromField(foreignDocs, fromField, toField, expectedDocs) { + foreign.drop(); + assert.commandWorked(foreign.insert(foreignDocs)); + + const pipeline = [ + {$graphLookup: { + from: "foreign", + startWith: 0, + connectFromField: fromField, + connectToField: toField, + as: "docs" + }}, + {$project: {docs: {$sortArray: {input: "$docs", sortBy: {_id: 1}}}}} + ]; + + const result = local.aggregate(pipeline).toArray(); + assert.eq(result.length, 1); + assert.eq(result[0].docs, expectedDocs); +} + +// Test the $lookup 'localField' field. +{ + // Non-numeric cases shouldn't be affected. + testLookupLocalField("a", {a: 3}, true, ["a"]); + testLookupLocalField("a", {a: 1}, false, ["a"]); + testLookupLocalField("a.b", {a: {b: 3}}, true, ["a", "b"]); + testLookupLocalField("a.b.0", {a: {b: [3]}}, true, ["a", "b"]); + + // Basic numeric cases. + testLookupLocalField("a.0", {a: [3, 2, 1]}, true, ["a"]); + testLookupLocalField("a.0", {a: {"0": 3, "1": 2, "3": 1}}, true, ["a"]); + testLookupLocalField("a.1", {a: [3, 2, 1]}, false, ["a"]); + testLookupLocalField("a.3", {a: [3, 2, 1]}, false, ["a"]); + testLookupLocalField("b.3", {a: [3, 2, 1]}, false, ["b"]); + + // Consecutive numeric fields. + testLookupLocalField("c.1.0", {c: [0, [3, 4, 3], [1, 2]]}, true, ["c"]); + testLookupLocalField("c.1.2", {c: [0, [3, 4, 3], [1, 2]]}, true, ["c"]); + testLookupLocalField("c.0.0", {c: [0, [3, 4, 3], [1, 2]]}, false, ["c"]); + testLookupLocalField("b.2.1", {a: [0, [3, 4, 3], [1, 2]]}, false, ["b"]); + + // Mix numeric and regular fields. + testLookupLocalField("a.2.b.1", {a: [{}, {b: [2]}, {b: [1, 3]}]}, true, ["a"]); + testLookupLocalField("a.2.b.1", {a: {"2": {b: [1, 3]}}}, true, ["a"]); + testLookupLocalField("a.2.b.2", {a: [{}, {b: [2]}, {b: [1, 3]}]}, false, ["a"]); + testLookupLocalField("a.1.b.1", {a: [{}, {b: [2]}, {b: [1, 3]}]}, false, ["a"]); + testLookupLocalField("a.1.b.2", {a: [{}, {b: [2]}, {b: [1, 3]}]}, false, ["a"]); + + // Test two regular fields then a numeric to make sure "transformBy" has "a.b" instead of just + // "a". + testLookupLocalField("a.b.0", {a: {b: [3]}}, true, ["a", "b"]); + testLookupLocalField("a.b.c.1", {a: {b: {c: [1, 3]}}}, true, ["a", "b", "c"]); + + // Verify that $lookup does not treat 0-prefixed numeric fields as array indices. + testLookupLocalField("a.00", {a: [3]}, false, ["a"]); + testLookupLocalField("a.b.01", {a: {b: [1, 3]}}, false, ["a", "b"]); + testLookupLocalField("a.00.b", {a: [{b: 3}]}, false, ["a"]); + + // Verify that $lookup always treats 0-prefixed numeric fields as field names. + testLookupLocalField("a.00", {a: {"00": 3}}, true, ["a"]); + testLookupLocalField("a.b.01", {a: {b: {"01": 3}}}, true, ["a", "b"]); + testLookupLocalField("a.00.b", {a: {"00": {b: 3}}}, true, ["a"]); + + // Regular index fields shouldn't match "00"-type fields. + testLookupLocalField("a.0", {a: {"00": 3}}, false, ["a"]); + testLookupLocalField("a.b.1", {a: {b: {"01": 3}}}, false, ["a", "b"]); + testLookupLocalField("a.0.b", {a: {"00": {b: 3}}}, false, ["a"]); +} + +// Test the $graphLookup 'startsWith' field. +{ + // Non-numeric cases shouldn't be affected. + testGraphLookupStartsWith("$a", {a: 3}, true, ["a"]); + testGraphLookupStartsWith("$a", {a: 1}, false, ["a"]); + testGraphLookupStartsWith("$a.b", {a: {b: 3}}, true, ["a", "b"]); + testGraphLookupStartsWith("$a.b.0", {a: {b: {"0": 3}}}, true, ["a", "b", "0"]); + testGraphLookupStartsWith("$a.b.0", {a: {b: [{"0": 3}]}}, true, ["a", "b", "0"]); + testGraphLookupStartsWith("$a.b.0", {a: {b: [3]}}, false, ["a", "b", "0"]); + testGraphLookupStartsWith("$a.0", {a: {"0": 3}}, true, ["a", "0"]); + testGraphLookupStartsWith("$a.0", {a: {"0": 2}}, false, ["a", "0"]); + testGraphLookupStartsWith("$a.0", {a: [3, 2, 1]}, false, ["a", "0"]); + + // Should traverse once. + testGraphLookupStartsWith("$a.0", {a: [{"0": 3}]}, true, ["a", "0"]); + testGraphLookupStartsWith("$a.0", {a: [[{"0": 3}]]}, false, ["a", "0"]); + + // Consecutive numeric fields. + testGraphLookupStartsWith("$c.1.0", {c: {"1": {"0": 3}}}, true, ["c", "1", "0"]); + testGraphLookupStartsWith("$c.1.0", {c: {"01": {"0": 3}}}, false, ["c", "1", "0"]); + testGraphLookupStartsWith("$c.1.0", {c: {"1": {"00": 3}}}, false, ["c", "1", "0"]); + testGraphLookupStartsWith("$c.1.0", {c: {"0": {"1": 3}}}, false, ["c", "1", "0"]); + + // Mix numeric and regular fields. + testGraphLookupStartsWith("$a.2.b.1", {a: {"2": {b: {"1": 3}}}}, true, ["a", "2", "b", "1"]); + testGraphLookupStartsWith( + "$a.2.b.1", {a: [{}, {b: [2]}, {b: [1, 3]}]}, false, ["a", "2", "b", "1"]); + + testGraphLookupStartsWith("$a.00", {a: {"00": 3}}, true, ["a", "00"]); + testGraphLookupStartsWith("$a.00", {a: [{"00": 3}]}, true, ["a", "00"]); + testGraphLookupStartsWith("$a.00", {a: {"00": [3]}}, true, ["a", "00"]); + testGraphLookupStartsWith("$a.00", {a: [{"00": [3]}]}, false, ["a", "00"]); + testGraphLookupStartsWith("$a.00", {a: [3]}, false, ["a", "00"]); +} + +local.drop(); +foreign.drop(); + +assert.commandWorked(local.insert({_id: 0})); + +// Test the $graphLookup 'connectFromField' field. +const fromSpecs = [ + // Finding a value of "1" should match the next document. + {singleField: "0", doubleField: "00", array: [1, 2]}, + {singleField: "1", doubleField: "01", array: [2, 1]} +]; +for (const spec of fromSpecs) { + // "00"-type fields should act as field names. + testGraphLookupToFromField([{_id: 1, to: 0, from: {[spec.doubleField]: 1}}, {_id: 2, to: 1}], + "from." + spec.doubleField, + "to", + [{_id: 1, to: 0, from: {[spec.doubleField]: 1}}, {_id: 2, to: 1}]); + // "00"-type fields should not act as an index into an array. + testGraphLookupToFromField([{_id: 1, to: 0, from: spec.array}, {_id: 2, to: 1}], + "from." + spec.doubleField, + "to", + [{_id: 1, to: 0, from: spec.array}]); + // Regular numeric fields should not match "00"-type fields. + testGraphLookupToFromField([{_id: 1, to: 0, from: {[spec.doubleField]: 1}}, {_id: 2, to: 1}], + "from." + spec.singleField, + "to", + [{_id: 1, to: 0, from: {[spec.doubleField]: 1}}]); + // Regular numeric fields can act as an array index. + testGraphLookupToFromField([{_id: 1, to: 0, from: spec.array}, {_id: 2, to: 1}], + "from." + spec.singleField, + "to", + [{_id: 1, to: 0, from: spec.array}, {_id: 2, to: 1}]); + // "00"-type fields should not match "0"-type field names. + testGraphLookupToFromField([{_id: 1, to: 0, from: {[spec.singleField]: 1}}, {_id: 2, to: 1}], + "from." + spec.doubleField, + "to", + [{_id: 1, to: 0, from: {[spec.singleField]: 1}}]); + // Regular numeric fields can match themselves as field names. + testGraphLookupToFromField([{_id: 1, to: 0, from: {[spec.singleField]: 1}}, {_id: 2, to: 1}], + "from." + spec.singleField, + "to", + [{_id: 1, to: 0, from: {[spec.singleField]: 1}}, {_id: 2, to: 1}]); +} + +// Test the $graphLookup 'connectToField' field. +const toSpecs = [ + // Finding a value of "0" should match the document. + {singleField: "0", doubleField: "00", array: [0, 2]}, + {singleField: "1", doubleField: "01", array: [2, 0]} +]; +for (const spec of toSpecs) { + // "00"-type fields should act as field names. + testGraphLookupToFromField([{_id: 1, to: {[spec.doubleField]: 0}}], + "from", + "to." + spec.doubleField, + [{_id: 1, to: {[spec.doubleField]: 0}}]); + // "00"-type fields should not act as an index into an array. + testGraphLookupToFromField([{_id: 1, to: spec.array}], "from", "to." + spec.doubleField, []); + // Regular numeric fields should not match "00"-type fields. + testGraphLookupToFromField( + [{_id: 1, to: {[spec.doubleField]: 0}}], "from", "to." + spec.singleField, []); + // Regular numeric fields can act as an array index. + testGraphLookupToFromField( + [{_id: 1, to: spec.array}], "from", "to." + spec.singleField, [{_id: 1, to: spec.array}]); + // "00"-type fields should not match "0"-type field names. + testGraphLookupToFromField( + [{_id: 1, to: {[spec.singleField]: 0}}], "from", "to." + spec.doubleField, []); + // Regular numeric fields can match themselves as field names. + testGraphLookupToFromField([{_id: 1, to: {[spec.singleField]: 0}}], + "from", + "to." + spec.singleField, + [{_id: 1, to: {[spec.singleField]: 0}}]); +} \ No newline at end of file diff --git a/jstests/aggregation/sources/lookup/lookup_query_stats.js b/jstests/aggregation/sources/lookup/lookup_query_stats.js index 8c598b6c2f2d9..8f413d2652877 100644 --- a/jstests/aggregation/sources/lookup/lookup_query_stats.js +++ b/jstests/aggregation/sources/lookup/lookup_query_stats.js @@ -14,13 +14,12 @@ * requires_pipeline_optimization * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStages' -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/sbe_explain_helpers.js"); // For getSbePlanStages and - // getQueryInfoAtTopLevelOrFirstStage. +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; +import { + getQueryInfoAtTopLevelOrFirstStage, + getSbePlanStages +} from "jstests/libs/sbe_explain_helpers.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const isSBELookupEnabled = checkSBEEnabled(db); const testDB = db.getSiblingDB("lookup_query_stats"); @@ -404,4 +403,3 @@ testQueryExecutorStatsWithIndexScan({withUnwind: false}); // taking place within the lookup stage. testQueryExecutorStatsWithCollectionScan({withUnwind: true}); testQueryExecutorStatsWithIndexScan({withUnwind: true}); -}()); diff --git a/jstests/aggregation/sources/lookup/lookup_sort_limit.js b/jstests/aggregation/sources/lookup/lookup_sort_limit.js index f100c4bb66c81..58f29ba9728bb 100644 --- a/jstests/aggregation/sources/lookup/lookup_sort_limit.js +++ b/jstests/aggregation/sources/lookup/lookup_sort_limit.js @@ -2,11 +2,6 @@ * Test that a $lookup correctly optimizes a foreign pipeline containing a $sort and a $limit. This * test is designed to reproduce SERVER-36715. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages(). - const testDB = db.getSiblingDB("lookup_sort_limit"); testDB.dropDatabase(); @@ -45,5 +40,4 @@ res = localColl }]) .toArray(); -assert.eq({_id: 0, result: [{_id: 9, foreignField: 9}]}, res[0]); -}()); +assert.eq({_id: 0, result: [{_id: 9, foreignField: 9}]}, res[0]); \ No newline at end of file diff --git a/jstests/aggregation/sources/lookup/profile_lookup.js b/jstests/aggregation/sources/lookup/profile_lookup.js index edc94e0b99079..0b77b137a10a5 100644 --- a/jstests/aggregation/sources/lookup/profile_lookup.js +++ b/jstests/aggregation/sources/lookup/profile_lookup.js @@ -4,10 +4,7 @@ // requires_profiling, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages. +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; const localColl = db.local; const foreignColl = db.foreign; @@ -53,5 +50,4 @@ const eqLookupNodes = getAggPlanStages(localColl.explain().aggregate(pipeline), if (eqLookupNodes.length === 0) { expectedCount += 3; } -assert.eq(expectedCount, actualCount); -}()); +assert.eq(expectedCount, actualCount); \ No newline at end of file diff --git a/jstests/aggregation/sources/match/trivial_match_expr.js b/jstests/aggregation/sources/match/trivial_match_expr.js index 93be40bfeeaac..dcfd05148f498 100644 --- a/jstests/aggregation/sources/match/trivial_match_expr.js +++ b/jstests/aggregation/sources/match/trivial_match_expr.js @@ -10,10 +10,7 @@ // # Explicitly testing optimization. // requires_pipeline_optimization, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan} from "jstests/libs/analyze_plan.js"; const coll = db.trivial_match_expr; coll.drop(); @@ -85,5 +82,4 @@ const explainFind = coll.explain().find({$and: [{$expr: "foo"}, {$expr: "$foo"}] assert.eq(getWinningPlan(explainFind.queryPlanner).filter, {$expr: "$foo"}, "$expr truthy constant expression should be optimized away when used " + - "in conjunction with $expr containing non-constant expression"); -})(); + "in conjunction with $expr containing non-constant expression"); \ No newline at end of file diff --git a/jstests/aggregation/sources/merge/merge_to_same_collection.js b/jstests/aggregation/sources/merge/merge_to_same_collection.js index 4e90036f64636..cc2fd7940a253 100644 --- a/jstests/aggregation/sources/merge/merge_to_same_collection.js +++ b/jstests/aggregation/sources/merge/merge_to_same_collection.js @@ -30,4 +30,4 @@ assert.doesNotThrow(() => coll.aggregate(pipeline)); assertArrayEq( {actual: coll.find().toArray(), expected: [{_id: 0, a: 3}, {_id: 1, a: 1}, {_id: 2, a: 2}]}); -}()); \ No newline at end of file +}()); diff --git a/jstests/aggregation/sources/merge/merge_with_dollar_fields.js b/jstests/aggregation/sources/merge/merge_with_dollar_fields.js new file mode 100644 index 0000000000000..b79b3a278dd6e --- /dev/null +++ b/jstests/aggregation/sources/merge/merge_with_dollar_fields.js @@ -0,0 +1,135 @@ +// Tests $merge over documents with $-field in it. +// +// Sharded collections have special requirements on the join field. +// @tags: [assumes_unsharded_collection] + +(function() { +"use strict"; + +load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection. + +const sourceName = 'merge_with_dollar_fields_source'; +const source = db[sourceName]; +const targetName = 'merge_with_dollar_fields_target'; +const target = db[targetName]; + +const joinField = 'joinField'; +const sourceDoc = { + $dollar: 1, + joinField +}; +const targetDoc = { + a: 1, + joinField +}; +assertDropCollection(db, sourceName); +assert.commandWorked(source.insert(sourceDoc)); + +function runTest({whenMatched, whenNotMatched}, targetDocs) { + assertDropCollection(db, targetName); + assert.commandWorked(target.createIndex({joinField: 1}, {unique: true})); + assert.commandWorked(target.insert(targetDocs)); + source.aggregate([ + {$project: {_id: 0}}, + { + $merge: { + into: targetName, + on: joinField, + whenMatched, + whenNotMatched, + } + } + ]); + return target.findOne({}, {_id: 0}); +} + +function runTestMatched(mode) { + return runTest(mode, [targetDoc]); +} + +function runTestNotMatched(mode) { + return runTest(mode, []); +} + +// TODO: SERVER-76999: Currently $merge may throw 'FailedToParse' error due to non-local updates. +// We should return consistent results for dollar field documents. + +// whenMatched: 'replace', whenNotMatched: 'insert' +assert.throwsWithCode(() => runTestMatched({whenMatched: 'replace', whenNotMatched: 'insert'}), + [ErrorCodes.DollarPrefixedFieldName, ErrorCodes.FailedToParse]); + +try { + assert.docEq(sourceDoc, runTestNotMatched({whenMatched: 'replace', whenNotMatched: 'insert'})); +} catch (error) { + assert.commandFailedWithCode(error, ErrorCodes.FailedToParse); +} + +// whenMatched: 'replace', whenNotMatched: 'fail' +assert.throwsWithCode(() => runTestMatched({whenMatched: 'replace', whenNotMatched: 'fail'}), + [ErrorCodes.DollarPrefixedFieldName, ErrorCodes.FailedToParse]); + +assert.throwsWithCode(() => runTestNotMatched({whenMatched: 'replace', whenNotMatched: 'fail'}), + [ErrorCodes.MergeStageNoMatchingDocument, ErrorCodes.FailedToParse]); + +// whenMatched: 'replace', whenNotMatched: 'discard' +assert.throwsWithCode(() => runTestMatched({whenMatched: 'replace', whenNotMatched: 'discard'}), + [ErrorCodes.DollarPrefixedFieldName, ErrorCodes.FailedToParse]); + +try { + assert.eq(null, runTestNotMatched({whenMatched: 'replace', whenNotMatched: 'discard'})); +} catch (error) { + assert.commandFailedWithCode(error, ErrorCodes.FailedToParse); +} + +// whenMatched: 'merge', whenNotMatched: 'insert' +assert.throwsWithCode(() => runTestMatched({whenMatched: 'merge', whenNotMatched: 'insert'}), + ErrorCodes.DollarPrefixedFieldName); + +assert.docEq(sourceDoc, runTestNotMatched({whenMatched: 'merge', whenNotMatched: 'insert'})); + +// whenMatched: 'merge', whenNotMatched: 'fail' +assert.throwsWithCode(() => runTestMatched({whenMatched: 'merge', whenNotMatched: 'fail'}), + ErrorCodes.DollarPrefixedFieldName); + +assert.throwsWithCode(() => runTestNotMatched({whenMatched: 'merge', whenNotMatched: 'fail'}), + ErrorCodes.MergeStageNoMatchingDocument); + +// whenMatched: 'merge', whenNotMatched: 'discard' +assert.throwsWithCode(() => runTestMatched({whenMatched: 'merge', whenNotMatched: 'discard'}), + ErrorCodes.DollarPrefixedFieldName); + +assert.eq(null, runTestNotMatched({whenMatched: 'merge', whenNotMatched: 'discard'})); + +// whenMatched: 'keepExisting', whenNotMatched: 'insert' +assert.docEq(targetDoc, runTestMatched({whenMatched: 'keepExisting', whenNotMatched: 'insert'})); + +assert.docEq(sourceDoc, runTestNotMatched({whenMatched: 'keepExisting', whenNotMatched: 'insert'})); + +// whenMatched: 'fail', whenNotMatched: 'insert' +assert.throwsWithCode(() => runTestMatched({whenMatched: 'fail', whenNotMatched: 'insert'}), + ErrorCodes.DuplicateKey); + +assert.docEq(sourceDoc, runTestNotMatched({whenMatched: 'fail', whenNotMatched: 'insert'})); + +// whenMatched: 'pipeline', whenNotMatched: 'insert' +const pipeline = [{$addFields: {b: 1}}]; +const targetDocAddFields = { + ...targetDoc, + b: 1 +}; +assert.docEq(targetDocAddFields, runTestMatched({whenMatched: pipeline, whenNotMatched: 'insert'})); + +assert.docEq(sourceDoc, runTestNotMatched({whenMatched: pipeline, whenNotMatched: 'insert'})); + +// whenMatched: 'pipeline', whenNotMatched: 'fail' +assert.docEq(targetDocAddFields, runTestMatched({whenMatched: pipeline, whenNotMatched: 'fail'})); + +assert.throwsWithCode(() => runTestNotMatched({whenMatched: pipeline, whenNotMatched: 'fail'}), + ErrorCodes.MergeStageNoMatchingDocument); + +// whenMatched: 'pipeline', whenNotMatched: 'discard' +assert.docEq(targetDocAddFields, + runTestMatched({whenMatched: pipeline, whenNotMatched: 'discard'})); + +assert.eq(null, runTestNotMatched({whenMatched: pipeline, whenNotMatched: 'discard'})); +}()); diff --git a/jstests/aggregation/sources/project/remove_redundant_projects.js b/jstests/aggregation/sources/project/remove_redundant_projects.js index 512efdd254602..ae19b320b5435 100644 --- a/jstests/aggregation/sources/project/remove_redundant_projects.js +++ b/jstests/aggregation/sources/project/remove_redundant_projects.js @@ -4,12 +4,14 @@ // do_not_wrap_aggregations_in_facets, // requires_pipeline_optimization, // ] -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For orderedArrayEq. -load('jstests/libs/analyze_plan.js'); // For planHasStage(). -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import { + getWinningPlan, + planHasStage, + isAggregationPlan, + isQueryPlan +} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; let coll = db.remove_redundant_projects; coll.drop(); @@ -175,5 +177,4 @@ assertResultsMatch({ index: indexSpec, pipelineOptimizedAway: true, removedProjectStage: {'_id.a': 1}, -}); -}()); +}); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/add_to_set.js b/jstests/aggregation/sources/setWindowFields/add_to_set.js index a6370f4c46ef3..ebf6fbe90d38d 100644 --- a/jstests/aggregation/sources/setWindowFields/add_to_set.js +++ b/jstests/aggregation/sources/setWindowFields/add_to_set.js @@ -1,10 +1,10 @@ /** * Test that $addToSet works as a window function. */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); +import { + seedWithTickerData, + testAccumAgainstGroup +} from "jstests/aggregation/extras/window_function_helpers.js"; const coll = db[jsTestName()]; coll.drop(); @@ -14,5 +14,4 @@ const nDocsPerTicker = 10; seedWithTickerData(coll, nDocsPerTicker); // Run the suite of partition and bounds tests against the $addToSet function. -testAccumAgainstGroup(coll, "$addToSet", []); -})(); +testAccumAgainstGroup(coll, "$addToSet", []); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/avg.js b/jstests/aggregation/sources/setWindowFields/avg.js index da5ee8925ce6c..a48b5c0114386 100644 --- a/jstests/aggregation/sources/setWindowFields/avg.js +++ b/jstests/aggregation/sources/setWindowFields/avg.js @@ -1,10 +1,11 @@ /** * Test that $avg works as a window function. */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); +import { + computeAsGroup, + seedWithTickerData, + testAccumAgainstGroup +} from "jstests/aggregation/extras/window_function_helpers.js"; const coll = db[jsTestName()]; coll.drop(); @@ -53,5 +54,4 @@ for (let index = 0; index < results.length; index++) { defaultValue: null }); assert.eq(groupRes, results[index].runningAvgLead); -} -})(); +} \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/count.js b/jstests/aggregation/sources/setWindowFields/count.js index 13fbe83c537a7..89e2245704f60 100644 --- a/jstests/aggregation/sources/setWindowFields/count.js +++ b/jstests/aggregation/sources/setWindowFields/count.js @@ -124,4 +124,4 @@ verifyResults(result, function(num, baseObj) { } return baseObj; }); -})(); \ No newline at end of file +})(); diff --git a/jstests/aggregation/sources/setWindowFields/covariance.js b/jstests/aggregation/sources/setWindowFields/covariance.js index 8f2b3e2319458..7dd617b39d573 100644 --- a/jstests/aggregation/sources/setWindowFields/covariance.js +++ b/jstests/aggregation/sources/setWindowFields/covariance.js @@ -1,10 +1,7 @@ /** * Test that $covariance(Pop/Samp) works as a window function. */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); +import {documentBounds} from "jstests/aggregation/extras/window_function_helpers.js"; const coll = db[jsTestName()]; coll.drop(); @@ -137,5 +134,4 @@ function compareCovarianceOfflineAndOnline(bounds) { } // Test various type of window. -documentBounds.forEach(compareCovarianceOfflineAndOnline); -})(); +documentBounds.forEach(compareCovarianceOfflineAndOnline); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/exp_moving_avg.js b/jstests/aggregation/sources/setWindowFields/exp_moving_avg.js index a7d0028583f4c..f9987b9a6daff 100644 --- a/jstests/aggregation/sources/setWindowFields/exp_moving_avg.js +++ b/jstests/aggregation/sources/setWindowFields/exp_moving_avg.js @@ -1,10 +1,7 @@ /** * Test that exponential moving average works as a window function. */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); +import {seedWithTickerData} from "jstests/aggregation/extras/window_function_helpers.js"; const coll = db[jsTestName()]; coll.drop(); @@ -285,5 +282,4 @@ assert.commandWorked(db.runCommand({ }, ], cursor: {}, -})); -})(); +})); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/explain.js b/jstests/aggregation/sources/setWindowFields/explain.js index cae112cb9e5e9..9650493135d49 100644 --- a/jstests/aggregation/sources/setWindowFields/explain.js +++ b/jstests/aggregation/sources/setWindowFields/explain.js @@ -6,10 +6,7 @@ * * @tags: [assumes_against_mongod_not_mongos] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages(). +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; const coll = db[jsTestName()]; coll.drop(); @@ -213,5 +210,4 @@ function checkExplainResult(pipeline, expectedFunctionMemUsages, expectedTotalMe checkExplainResult(pipeline, expectedFunctionMemUsages, expectedTotal, "executionStats"); checkExplainResult(pipeline, expectedFunctionMemUsages, expectedTotal, "allPlansExecution"); -})(); -}()); +})(); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/first.js b/jstests/aggregation/sources/setWindowFields/first.js index 599bddf35cd08..93b71f0702e9d 100644 --- a/jstests/aggregation/sources/setWindowFields/first.js +++ b/jstests/aggregation/sources/setWindowFields/first.js @@ -1,10 +1,10 @@ /** * Test the behavior of $first. */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); +import { + seedWithTickerData, + testAccumAgainstGroup +} from "jstests/aggregation/extras/window_function_helpers.js"; const coll = db[jsTestName()]; coll.drop(); @@ -90,5 +90,4 @@ result = coll.runCommand({ ] } }); -assert.commandFailedWithCode(result, ErrorCodes.FailedToParse, "'window' field must be an object"); -})(); +assert.commandFailedWithCode(result, ErrorCodes.FailedToParse, "'window' field must be an object"); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/integral.js b/jstests/aggregation/sources/setWindowFields/integral.js index bb0e962c504e4..42aa154486e6d 100644 --- a/jstests/aggregation/sources/setWindowFields/integral.js +++ b/jstests/aggregation/sources/setWindowFields/integral.js @@ -1,10 +1,7 @@ /** * Test the behavior of $integral. */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); +import {documentBounds} from "jstests/aggregation/extras/window_function_helpers.js"; const coll = db.setWindowFields_integral; @@ -270,5 +267,4 @@ assert.sameMembers(runRangeBasedIntegral([-6, 6]), [ {time: ISODate("2020-01-01T00:00:10.000Z"), y: 5.6, integral: 24.0}, // Empty window. {time: ISODate("2020-01-01T00:00:18.000Z"), y: 6.8, integral: 0.0}, -]); -})(); +]); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/last.js b/jstests/aggregation/sources/setWindowFields/last.js index 6cd69979add76..f5fc3e74db3b7 100644 --- a/jstests/aggregation/sources/setWindowFields/last.js +++ b/jstests/aggregation/sources/setWindowFields/last.js @@ -1,10 +1,10 @@ /** * Test the behavior of $last. */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); +import { + seedWithTickerData, + testAccumAgainstGroup +} from "jstests/aggregation/extras/window_function_helpers.js"; const coll = db[jsTestName()]; coll.drop(); @@ -90,5 +90,4 @@ result = coll.runCommand({ ] } }); -assert.commandFailedWithCode(result, ErrorCodes.FailedToParse, "'window' field must be an object"); -})(); +assert.commandFailedWithCode(result, ErrorCodes.FailedToParse, "'window' field must be an object"); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/linear_fill.js b/jstests/aggregation/sources/setWindowFields/linear_fill.js index 2f742db70f215..ac0a9c7507606 100644 --- a/jstests/aggregation/sources/setWindowFields/linear_fill.js +++ b/jstests/aggregation/sources/setWindowFields/linear_fill.js @@ -4,12 +4,7 @@ * requires_fcv_52, * ] */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); load("jstests/aggregation/extras/utils.js"); // For arrayEq. -load("jstests/libs/feature_flag_util.js"); // For isEnabled. const coll = db.linear_fill; coll.drop(); @@ -516,5 +511,4 @@ assert.commandFailedWithCode(db.runCommand({ ], cursor: {} }), - ErrorCodes.TypeMismatch); -})(); + ErrorCodes.TypeMismatch); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/locf.js b/jstests/aggregation/sources/setWindowFields/locf.js index dcc8ddb6d4cd5..1995803f100b2 100644 --- a/jstests/aggregation/sources/setWindowFields/locf.js +++ b/jstests/aggregation/sources/setWindowFields/locf.js @@ -5,12 +5,7 @@ * requires_fcv_52, * ] */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); load("jstests/aggregation/extras/utils.js"); // For arrayEq. -load("jstests/libs/feature_flag_util.js"); // For isEnabled. const coll = db[jsTestName()]; coll.drop(); @@ -152,5 +147,4 @@ result = coll.aggregate([{ }]) .toArray(); -assertArrayEq({actual: result, expected: expected}); -})(); +assertArrayEq({actual: result, expected: expected}); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/min_max.js b/jstests/aggregation/sources/setWindowFields/min_max.js index d15d6e119913f..457bd3612d412 100644 --- a/jstests/aggregation/sources/setWindowFields/min_max.js +++ b/jstests/aggregation/sources/setWindowFields/min_max.js @@ -1,10 +1,10 @@ /** * Test that $min/max works as a window function. */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); +import { + seedWithTickerData, + testAccumAgainstGroup +} from "jstests/aggregation/extras/window_function_helpers.js"; const coll = db[jsTestName()]; coll.drop(); @@ -38,5 +38,4 @@ let results = for (let index = 0; index < results.length; index++) { assert.eq("hiya", results[index].minStr); assert.eq("hiya", results[index].maxStr); -} -})(); +} \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/n_accumulators.js b/jstests/aggregation/sources/setWindowFields/n_accumulators.js index 4fb2c1242be69..1fa1244ab3db5 100644 --- a/jstests/aggregation/sources/setWindowFields/n_accumulators.js +++ b/jstests/aggregation/sources/setWindowFields/n_accumulators.js @@ -1,10 +1,10 @@ /** * Test that the 'n' family of accumulators work as window functions. */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); +import { + seedWithTickerData, + testAccumAgainstGroup +} from "jstests/aggregation/extras/window_function_helpers.js"; const coll = db[jsTestName()]; coll.drop(); @@ -138,5 +138,4 @@ for (const acc of Object.keys(nAccumulators)) { // Missing sortBy. testError({[acc]: {output}, window: {documents: [-1, 1]}}, 5788005); } -} -})(); +} \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/optimize.js b/jstests/aggregation/sources/setWindowFields/optimize.js index 0b250c2980fe6..b08138b94e7c9 100644 --- a/jstests/aggregation/sources/setWindowFields/optimize.js +++ b/jstests/aggregation/sources/setWindowFields/optimize.js @@ -7,10 +7,7 @@ * requires_pipeline_optimization, * ] */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); +import {aggPlanHasStage, getAggPlanStages} from "jstests/libs/analyze_plan.js"; // Find how many stages of the plan are 'stageName'. function numberOfStages(explain, stageName) { @@ -224,5 +221,4 @@ const explain13 = coll.explain().aggregate([ }, {$sort: {a: {$meta: "textScore"}}}, ]); -assert.eq(2, numberOfStages(explain13, '$sort'), explain13); -})(); +assert.eq(2, numberOfStages(explain13, '$sort'), explain13); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/partition.js b/jstests/aggregation/sources/setWindowFields/partition.js index db0b5bd39dc25..dd5e08c168ae9 100644 --- a/jstests/aggregation/sources/setWindowFields/partition.js +++ b/jstests/aggregation/sources/setWindowFields/partition.js @@ -88,4 +88,4 @@ assert(resultsEq(res.toArray(), [ {int_field: 0, count: 1}, {other_field: 0, count: 1} ])); -})(); \ No newline at end of file +})(); diff --git a/jstests/aggregation/sources/setWindowFields/percentile.js b/jstests/aggregation/sources/setWindowFields/percentile.js index 2121d9c7e5c47..7447f72c904c6 100644 --- a/jstests/aggregation/sources/setWindowFields/percentile.js +++ b/jstests/aggregation/sources/setWindowFields/percentile.js @@ -2,13 +2,12 @@ * Test that $percentile and $median work as window functions. * @tags: [ * requires_fcv_70, - * featureFlagApproxPercentiles * ] */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); +import { + seedWithTickerData, + testAccumAgainstGroup +} from "jstests/aggregation/extras/window_function_helpers.js"; const coll = db[jsTestName()]; coll.drop(); @@ -30,20 +29,22 @@ testAccumAgainstGroup( coll, "$percentile", [null, null], {p: [0.1, 0.6], input: "$price", method: "approximate"}); testAccumAgainstGroup(coll, "$median", null, {input: "$price", method: "approximate"}); -function runSetWindowStage(percentileSpec, medianSpec) { +function runSetWindowStage(percentileSpec, medianSpec, letSpec) { return coll - .aggregate([ - {$addFields: {str: "hiya"}}, - { - $setWindowFields: { - sortBy: {_id: 1}, - output: { - runningPercentile: percentileSpec, - runningMedian: medianSpec, + .aggregate( + [ + {$addFields: {str: "hiya"}}, + { + $setWindowFields: { + sortBy: {_id: 1}, + output: { + runningPercentile: percentileSpec, + runningMedian: medianSpec, + } } } - } - ]) + ], + {let : letSpec}) .toArray(); } @@ -69,6 +70,27 @@ results = assertResultEqToVal( {resultArray: results, percentile: [minDoc.price, maxDoc.price], median: medianDoc.price}); +// Test that an expression can be used for 'input'. +results = runSetWindowStage( + {$percentile: {p: [0.01, 0.99], input: {$add: [42, "$price"]}, method: "approximate"}}, + {$median: {input: {$add: [42, "$price"]}, method: "approximate"}}); +// Since our percentiles are 0.01 and 0.99 and our collection is small, we will always return the +// minimum and maximum value in the collection. +assertResultEqToVal({ + resultArray: results, + percentile: [42 + minDoc.price, 42 + maxDoc.price], + median: 42 + medianDoc.price +}); + +// Test that a variable can be used for 'p'. +results = runSetWindowStage({$percentile: {p: "$$ps", input: "$price", method: "approximate"}}, + {$median: {input: "$price", method: "approximate"}}, + {ps: [0.01, 0.99]}); +// Since our percentiles are 0.01 and 0.99 and our collection is small, we will always return the +// minimum and maximum value in the collection. +assertResultEqToVal( + {resultArray: results, percentile: [minDoc.price, maxDoc.price], median: medianDoc.price}); + // Test that a removable window calculates $percentile and $median correctly using an approximate // method. results = runSetWindowStage( @@ -85,14 +107,15 @@ for (let index = 0; index < results.length; index++) { assert.eq(minVal, results[index].runningMedian, results[index]); } -function testError(percentileSpec, expectedCode) { +function testError(percentileSpec, expectedCode, letSpec) { assert.throwsWithCode(() => coll.aggregate([{ - $setWindowFields: { - partitionBy: "$ticket", - sortBy: {ts: 1}, - output: {outputField: percentileSpec}, - } - }]), + $setWindowFields: { + partitionBy: "$ticket", + sortBy: {ts: 1}, + output: {outputField: percentileSpec}, + } + }], + {let : letSpec}), expectedCode); } @@ -118,9 +141,20 @@ testError({$median: "not an object"}, 7436100); testError({$percentile: {p: [0.1, 0.6], input: "$str", method: false}}, ErrorCodes.TypeMismatch); testError({$median: {input: "$str", method: false}}, ErrorCodes.TypeMismatch); +testError({$percentile: {p: [0.1, 0.6], input: "$str", method: "discrete"}}, ErrorCodes.BadValue); +testError({$median: {input: "$str", method: "discrete"}}, ErrorCodes.BadValue); +testError({$percentile: {p: [0.1, 0.6], input: "$str", method: "continuous"}}, ErrorCodes.BadValue); +testError({$median: {input: "$str", method: "continuous"}}, ErrorCodes.BadValue); + +// invalid expressions or variables for 'p' +testError({$percentile: {p: "$$ps", input: "$price", method: "continuous"}}, + ErrorCodes.BadValue /* non-numeric 'p' value in the variable */, + {ps: "foo"} /* letSpec */); + +testError({$percentile: {p: ["$price"], input: "$str", method: "continuous"}}, + ErrorCodes.BadValue /* non-const 'p' expression */); testError({$percentile: {input: "$str", method: "approximate"}}, 40414 /* IDL required field error */); testError({$median: {p: [0.1, 0.6], input: "$str", method: "approximate"}}, - 40415 /* IDL unknown field error */); -})(); + 40415 /* IDL unknown field error */); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/push.js b/jstests/aggregation/sources/setWindowFields/push.js index d512def432906..ce579fd7ecf59 100644 --- a/jstests/aggregation/sources/setWindowFields/push.js +++ b/jstests/aggregation/sources/setWindowFields/push.js @@ -1,10 +1,10 @@ /** * Test that $push works as a window function. */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); +import { + seedWithTickerData, + testAccumAgainstGroup +} from "jstests/aggregation/extras/window_function_helpers.js"; const coll = db[jsTestName()]; coll.drop(); @@ -14,5 +14,4 @@ const nDocsPerTicker = 10; seedWithTickerData(coll, nDocsPerTicker); // Run the suite of partition and bounds tests against the $push function. -testAccumAgainstGroup(coll, "$push", []); -})(); +testAccumAgainstGroup(coll, "$push", []); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/range.js b/jstests/aggregation/sources/setWindowFields/range.js index d31386761de98..350eab0d455ea 100644 --- a/jstests/aggregation/sources/setWindowFields/range.js +++ b/jstests/aggregation/sources/setWindowFields/range.js @@ -1,11 +1,6 @@ /** * Test range-based window bounds. */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); - const coll = db.setWindowFields_range; coll.drop(); @@ -275,5 +270,4 @@ const pipeline = [{ output: {min: {$min: "$temp", window: {range: [-1, 0], unit: "hour"}}} } }]; -assert.commandWorked(db.runCommand({aggregate: coll.getName(), pipeline: pipeline, cursor: {}})); -})(); +assert.commandWorked(db.runCommand({aggregate: coll.getName(), pipeline: pipeline, cursor: {}})); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/rank.js b/jstests/aggregation/sources/setWindowFields/rank.js index 75662829d606f..34fb5b6fd0e95 100644 --- a/jstests/aggregation/sources/setWindowFields/rank.js +++ b/jstests/aggregation/sources/setWindowFields/rank.js @@ -110,4 +110,4 @@ verifyResults(result, function(num, baseObj) { }); result = runRankBasedAccumulator({double: 1}, {$documentNumber: {}}); verifyResults(result, noTieFunc); -})(); \ No newline at end of file +})(); diff --git a/jstests/aggregation/sources/setWindowFields/shift.js b/jstests/aggregation/sources/setWindowFields/shift.js index 6c3f38a822688..27aeaf9a6a140 100644 --- a/jstests/aggregation/sources/setWindowFields/shift.js +++ b/jstests/aggregation/sources/setWindowFields/shift.js @@ -334,4 +334,4 @@ assert.commandFailedWithCode(coll.runCommand({ cursor: {} }), ErrorCodes.FailedToParse); -})(); \ No newline at end of file +})(); diff --git a/jstests/aggregation/sources/setWindowFields/spill_to_disk.js b/jstests/aggregation/sources/setWindowFields/spill_to_disk.js index 2f687ae6ffc20..b2c214d181bfd 100644 --- a/jstests/aggregation/sources/setWindowFields/spill_to_disk.js +++ b/jstests/aggregation/sources/setWindowFields/spill_to_disk.js @@ -5,25 +5,25 @@ * requires_profiling, * assumes_read_concern_unchanged, * do_not_wrap_aggregations_in_facets, - * featureFlagApproxPercentiles * ] */ -(function() { -"use strict"; - load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts. load("jstests/libs/discover_topology.js"); // For findNonConfigNodes. -load("jstests/aggregation/extras/window_function_helpers.js"); -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages(). +import { + seedWithTickerData, + testAccumAgainstGroup +} from "jstests/aggregation/extras/window_function_helpers.js"; +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; load("jstests/aggregation/extras/utils.js"); // arrayEq. load("jstests/libs/profiler.js"); // getLatestProfileEntry. -const origParamValue = assert.commandWorked(db.adminCommand({ - getParameter: 1, - internalDocumentSourceSetWindowFieldsMaxMemoryBytes: 1 -}))["internalDocumentSourceSetWindowFieldsMaxMemoryBytes"]; +// Doc size was found through logging the size in the SpillableCache. Partition sizes were chosen +// arbitrarily. +const avgDocSize = 171; +const smallPartitionSize = 6; +const largePartitionSize = 21; const coll = db[jsTestName()]; -coll.drop(); +const admin = db.getSiblingDB("admin"); function checkProfilerForDiskWrite(dbToCheck, expectedFirstStage) { if (!FixtureHelpers.isMongos(dbToCheck)) { @@ -46,123 +46,145 @@ function resetProfiler(db) { FixtureHelpers.runCommandOnEachPrimary({db: db, cmdObj: {profile: 2}}); } -// Doc size was found through logging the size in the SpillableCache. Partition sizes were chosen -// arbitrarily. -let avgDocSize = 171; -let smallPartitionSize = 6; -let largePartitionSize = 21; -// The number 600 was chosen by observing how much memory is required for the accumulators to run -// on all windows (~1600 bytes). -setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()), - "internalDocumentSourceSetWindowFieldsMaxMemoryBytes", - avgDocSize * smallPartitionSize + 600); - -seedWithTickerData(coll, 10); - -// Run $sum test with memory limits that cause spilling to disk. -testAccumAgainstGroup(coll, "$sum", 0); - -// Run a $percentile test that fails since we go over the memory limit allowed and can't spill. -let errorPipeline = [ - { - $setWindowFields: { - partitionBy: "$partition", - sortBy: {partition: 1}, - output: { - p: { - $percentile: {p: [0.9], input: "$price", method: "approximate"}, - window: {documents: [0, "unbounded"]} - } - } +function changeSpillLimit({mode, maxDocs}) { + FixtureHelpers.runCommandOnEachPrimary({ + db: admin, + cmdObj: { + configureFailPoint: 'overrideMemoryLimitForSpill', + mode: mode, + 'data': {maxDocsBeforeSpill: maxDocs} } - }, - {$sort: {_id: 1}} -]; -assert.commandFailedWithCode( - db.runCommand( - {aggregate: coll.getName(), pipeline: errorPipeline, allowDiskUse: false, cursor: {}}), - 5643011); - -// Run $percentile test with memory limits that cause spilling to disk and assert it succeeds. -// In the test suite below, we will run a query identical to the one that failed above. -resetProfiler(db); -testAccumAgainstGroup( - coll, "$percentile", [null], {p: [0.9], input: "$price", method: "approximate"}); -// Confirm that spilling did occur. -checkProfilerForDiskWrite(db, "$setWindowFields"); - -// Run $median test with memory limits that cause spilling to disk. -resetProfiler(db); -testAccumAgainstGroup(coll, "$median", null, {input: "$price", method: "approximate"}); -// Confirm that spilling did occur. -checkProfilerForDiskWrite(db, "$setWindowFields"); - -// Test that a query that spills to disk succeeds across getMore requests. -// The next test uses less memory. Reduce memory limit to ensure spilling occurs. The number 70 was -// chosen by observing how much memory is required for the test to run. -setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()), - "internalDocumentSourceSetWindowFieldsMaxMemoryBytes", - avgDocSize * smallPartitionSize + 70); -resetProfiler(db); -const wfResults = - coll.aggregate( - [ - { - $setWindowFields: { - sortBy: {_id: 1}, - output: {res: {$sum: "$price", window: {documents: ["unbounded", 5]}}} - }, - }, - ], - {allowDiskUse: true, cursor: {batchSize: 1}}) - .toArray(); -assert.eq(wfResults.length, 20); -checkProfilerForDiskWrite(db, "$setWindowFields"); - -// Test a small, in memory, partition and a larger partition that requires spilling to disk. -coll.drop(); -// Create small partition. -for (let i = 0; i < smallPartitionSize; i++) { - assert.commandWorked(coll.insert({_id: i, val: i, partition: 1})); + }); } -// Create large partition. -for (let i = 0; i < largePartitionSize; i++) { - assert.commandWorked(coll.insert({_id: i + smallPartitionSize, val: i, partition: 2})); + +function testSingleAccumulator(accumulator, nullValue, spec) { + resetProfiler(db); + testAccumAgainstGroup(coll, accumulator, nullValue, spec); + checkProfilerForDiskWrite(db, "$setWindowFields"); } -// Run an aggregation that will keep all documents in the cache for all documents. -resetProfiler(db); -let results = - coll.aggregate( - [ - { - $setWindowFields: { - partitionBy: "$partition", - sortBy: {partition: 1}, - output: { - sum: { - $sum: "$val", - window: {documents: [-largePartitionSize, largePartitionSize]} +// Assert that spilling to disk doesn't affect the correctness of different accumulators. +function testSpillWithDifferentAccumulators() { + coll.drop(); + seedWithTickerData(coll, 10); + + // Spill to disk after 5 documents. + changeSpillLimit({mode: 'alwaysOn', maxDocs: 5}); + + testSingleAccumulator("$sum", 0, "$price"); + testSingleAccumulator( + "$percentile", [null], {p: [0.9], input: "$price", method: "approximate"}); + testSingleAccumulator("$median", null, {input: "$price", method: "approximate"}); + + // Assert that spilling works across 'getMore' commands + resetProfiler(db); + const wfResults = + coll.aggregate( + [ + { + $setWindowFields: { + sortBy: {_id: 1}, + output: {res: {$sum: "$price", window: {documents: ["unbounded", 5]}}} + }, + }, + ], + {allowDiskUse: true, cursor: {batchSize: 1}}) + .toArray(); + assert.eq(wfResults.length, 20); + checkProfilerForDiskWrite(db, "$setWindowFields"); + + // Turn off the failpoint for future tests. + changeSpillLimit({mode: 'off', maxDocs: null}); +} + +// Assert a small, in memory, partition and a larger partition that requires spilling to disk +// returns correct results. +function testSpillWithDifferentPartitions() { + // Spill to disk after 5 documents. This number should be less than 'smallPartitionSize'. + changeSpillLimit({mode: 'alwaysOn', maxDocs: 5}); + + coll.drop(); + // Create small partition. + for (let i = 0; i < smallPartitionSize; i++) { + assert.commandWorked(coll.insert({_id: i, val: i, partition: 1})); + } + // Create large partition. + for (let i = 0; i < largePartitionSize; i++) { + assert.commandWorked(coll.insert({_id: i + smallPartitionSize, val: i, partition: 2})); + } + // Run an aggregation that will keep all documents in the cache for all documents. + resetProfiler(db); + let results = + coll.aggregate( + [ + { + $setWindowFields: { + partitionBy: "$partition", + sortBy: {partition: 1}, + output: { + sum: { + $sum: "$val", + window: {documents: [-largePartitionSize, largePartitionSize]} + } } } - } - }, - {$sort: {_id: 1}} - ], - {allowDiskUse: true}) - .toArray(); -for (let i = 0; i < results.length; i++) { - if (results[i].partition === 1) { - assert.eq(results[i].sum, 15, "Unexepected result in first partition at position " + i); - } else { - assert.eq(results[i].sum, 210, "Unexepcted result in second partition at position " + i); + }, + {$sort: {_id: 1}} + ], + {allowDiskUse: true}) + .toArray(); + for (let i = 0; i < results.length; i++) { + if (results[i].partition === 1) { + assert.eq(results[i].sum, 15, "Unexpected result in first partition at position " + i); + } else { + assert.eq( + results[i].sum, 210, "Unexpected result in second partition at position " + i); + } + } + checkProfilerForDiskWrite(db, "$setWindowFields"); + + // Run an aggregation that will store too many documents in the function and force a spill. + // Spill to disk after 10 documents. + changeSpillLimit({mode: 'alwaysOn', maxDocs: 10}); + resetProfiler(db); + results = coll.aggregate( + [ + { + $setWindowFields: { + partitionBy: "$partition", + sortBy: {partition: 1}, + output: {arr: {$push: "$val", window: {documents: [-25, 25]}}} + } + }, + {$sort: {_id: 1}} + ], + {allowDiskUse: true}) + .toArray(); + checkProfilerForDiskWrite(db, "$setWindowFields"); + for (let i = 0; i < results.length; i++) { + if (results[i].partition === 1) { + assert(arrayEq(results[i].arr, [0, 1, 2, 3, 4, 5]), + "Unexpected result in first partition at position " + i); + } else { + assert( + arrayEq(results[i].arr, + [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), + "Unexpected result in second partition at position " + i); + } } + + // Turn off the failpoint for future tests. + changeSpillLimit({mode: 'off', maxDocs: null}); } -checkProfilerForDiskWrite(db, "$setWindowFields"); -// We don't execute setWindowFields in a sharded explain. -if (!FixtureHelpers.isMongos(db)) { - // Test that an explain that executes the query reports usedDisk correctly. +// Assert that 'usedDisk' is correctly set in an explain query. +function testUsedDiskAppearsInExplain() { + // Don't drop the collection, since the set up in spillWithDifferentPartitions() is valid. + + // Spill after 10 documents. This number should be bigger than the window size. + changeSpillLimit({mode: 'alwaysOn', maxDocs: 10}); + + // Run an explain query where 'usedDisk' should be true. let explainPipeline = [ { $setWindowFields: { @@ -178,126 +200,65 @@ if (!FixtureHelpers.isMongos(db)) { coll.explain("allPlansExecution").aggregate(explainPipeline, {allowDiskUse: true}), "$_internalSetWindowFields"); assert(stages[0]["usedDisk"], stages); - setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()), - "internalDocumentSourceSetWindowFieldsMaxMemoryBytes", - avgDocSize * largePartitionSize * 2); - explainPipeline = [ - { - $setWindowFields: { - partitionBy: "$partition", - sortBy: {partition: 1}, - output: {arr: {$sum: "$val", window: {documents: [0, 0]}}} - } - }, - {$sort: {_id: 1}} - ]; + // Run an explain query with the default memory limit, so 'usedDisk' should be false. + changeSpillLimit({mode: 'off', maxDocs: null}); stages = getAggPlanStages( coll.explain("allPlansExecution").aggregate(explainPipeline, {allowDiskUse: true}), "$_internalSetWindowFields"); assert(!stages[0]["usedDisk"], stages); } -// Run an aggregation that will store too many documents in the function and force a spill. Set the -// memory limit to be over the size of the large partition. -setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()), - "internalDocumentSourceSetWindowFieldsMaxMemoryBytes", - largePartitionSize * avgDocSize + 1); -resetProfiler(db); -results = coll.aggregate( - [ - { - $setWindowFields: { - partitionBy: "$partition", - sortBy: {partition: 1}, - output: {arr: {$push: "$val", window: {documents: [-25, 25]}}} - } - }, - {$sort: {_id: 1}} - ], - {allowDiskUse: true}) - .toArray(); -checkProfilerForDiskWrite(db, "$setWindowFields"); -for (let i = 0; i < results.length; i++) { - if (results[i].partition === 1) { - assert(arrayEq(results[i].arr, [0, 1, 2, 3, 4, 5]), - "Unexepected result in first partition at position " + i); - } else { - assert(arrayEq(results[i].arr, - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), - "Unexepcted result in second partition at position " + i); +// Assert that situations that would require a large spill successfully write to disk. +function testLargeSpill() { + coll.drop(); + + let numDocs = 1111; + let batchArr = []; + for (let docNum = 0; docNum < numDocs; docNum++) { + batchArr.push({_id: docNum, val: docNum, partition: 1}); } -} + assert.commandWorked(coll.insert(batchArr)); + // Spill to disk after 1000 documents. + changeSpillLimit({mode: 'alwaysOn', maxDocs: 1000}); -// Check that if function memory limit exceeds we fail even though the partition iterator spilled. -// $push uses about ~950 to store all the values in the second partition. -setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()), - "internalDocumentSourceSetWindowFieldsMaxMemoryBytes", - avgDocSize * 2); + // Run a document window over the whole collection to keep everything in the cache. + resetProfiler(db); + const results = + coll.aggregate( + [ + { + $setWindowFields: { + sortBy: {partition: 1}, + output: {arr: {$sum: "$val", window: {documents: [-numDocs, numDocs]}}} + } + }, + {$sort: {_id: 1}} + ], + {allowDiskUse: true}) + .toArray(); + checkProfilerForDiskWrite(db, "$setWindowFields"); + // Check that the command succeeded. + assert.eq(results.length, numDocs); + for (let i = 0; i < numDocs; i++) { + assert.eq(results[i].arr, 616605, results); + } -function runExceedMemoryLimitTest(spec) { - assert.commandFailedWithCode(db.runCommand({ - aggregate: coll.getName(), - pipeline: [ - {$setWindowFields: {partitionBy: "$partition", sortBy: {partition: 1}, output: spec}}, - {$sort: {_id: 1}} - ], - allowDiskUse: true, - cursor: {} - }), - 5414201); + // Turn off the failpoint for future tests. + changeSpillLimit({mode: 'off', maxDocs: null}); } -runExceedMemoryLimitTest({arr: {$push: "$val", window: {documents: [-21, 21]}}}); -runExceedMemoryLimitTest({ - percentile: { - $percentile: {p: [0.6, 0.7], input: "$price", method: "approximate"}, - window: {documents: [-21, 21]} +// Assert that usedDisk true is set to true if spilling occurs inside $lookup subpipline. +function testUsedDiskInLookupPipeline() { + coll.drop(); + for (let i = 0; i < largePartitionSize; i++) { + assert.commandWorked(coll.insert({_id: i, val: i})); } -}); - -coll.drop(); -// Test that situations that would require a large spill successfully write to disk. -// Set the limit to spill after ~1000 documents since that is the batch size when we write to disk. -setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()), - "internalDocumentSourceSetWindowFieldsMaxMemoryBytes", - 1000 * avgDocSize); -let numDocs = 1111; -let batchArr = []; -for (let docNum = 0; docNum < numDocs; docNum++) { - batchArr.push({_id: docNum, val: docNum, partition: 1}); -} -assert.commandWorked(coll.insert(batchArr)); -// Run a document window over the whole collection to keep everything in the cache. -resetProfiler(db); -results = - coll.aggregate( - [ - { - $setWindowFields: { - // partitionBy: "$partition", - sortBy: {partition: 1}, - output: {arr: {$sum: "$val", window: {documents: [-numDocs, numDocs]}}} - } - }, - {$sort: {_id: 1}} - ], - {allowDiskUse: true}) - .toArray(); -checkProfilerForDiskWrite(db, "$setWindowFields"); -// Check that the command succeeded. -assert.eq(results.length, numDocs); -for (let i = 0; i < numDocs; i++) { - assert.eq(results[i].arr, 616605, results); -} + // Spill to disk after 5 documents. + changeSpillLimit({mode: 'alwaysOn', maxDocs: 5}); -// Test that usedDisk true is set when spilling occurs inside $lookup subpipline. -// Lower the memory limit to ensure spilling occurs. -setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()), - "internalDocumentSourceSetWindowFieldsMaxMemoryBytes", - 500); -resetProfiler(db); -coll.aggregate( + resetProfiler(db); + coll.aggregate( [ { $lookup: { @@ -313,12 +274,70 @@ coll.aggregate( }], { allowDiskUse: true, cursor: {} }) .toArray(); -checkProfilerForDiskWrite(db, "$lookup"); + checkProfilerForDiskWrite(db, "$lookup"); + + // Turn off the failpoint for future tests. + changeSpillLimit({mode: 'off', maxDocs: null}); +} + +function runSingleErrorTest({spec, errorCode, diskUse}) { + assert.commandFailedWithCode(db.runCommand({ + aggregate: coll.getName(), + pipeline: [ + {$setWindowFields: {partitionBy: "$partition", sortBy: {partition: 1}, output: spec}}, + {$sort: {_id: 1}} + ], + allowDiskUse: diskUse, + cursor: {} + }), + errorCode); +} + +// Assert that an error is raised when the pipeline exceeds the memory limit or disk use is not +// allowed. +function testErrorsWhenCantSpill() { + // Don't drop the collection, since the set up in testUsedDiskInLookupPipeline() is valid. + + const origParamValue = assert.commandWorked(db.adminCommand({ + getParameter: 1, + internalDocumentSourceSetWindowFieldsMaxMemoryBytes: 1 + }))["internalDocumentSourceSetWindowFieldsMaxMemoryBytes"]; + // Decrease the maximum memory limit allowed. $push uses about ~950 to store all the values in + // the second partition. + setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()), + "internalDocumentSourceSetWindowFieldsMaxMemoryBytes", + avgDocSize * 2); + + // Assert the pipeline errors when exceeding maximum memory, even though the data spilled. + runSingleErrorTest({ + spec: {arr: {$push: "$val", window: {documents: [-21, 21]}}}, + errorCode: 5414201, + diskUse: true + }); + // Assert the pipeline errors when exceeding the maximum memory, even though the data spilled. + let percentileSpec = { + $percentile: {p: [0.6, 0.7], input: "$price", method: "approximate"}, + window: {documents: [-21, 21]} + }; + runSingleErrorTest({spec: {percentile: percentileSpec}, errorCode: 5414201, diskUse: true}); + // Assert the pipeline fails when trying to spill, but 'allowDiskUse' is set to false. + runSingleErrorTest({spec: {percentile: percentileSpec}, errorCode: 5643011, diskUse: false}); + // Reset the memory limit for other tests. + setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()), + "internalDocumentSourceSetWindowFieldsMaxMemoryBytes", + origParamValue); +} + +// Run the tests. +testSpillWithDifferentAccumulators(); +testSpillWithDifferentPartitions(); +// We don't execute setWindowFields in a sharded explain. +if (!FixtureHelpers.isMongos(db)) { + testUsedDiskAppearsInExplain(); +} +testLargeSpill(); +testUsedDiskInLookupPipeline(); +testErrorsWhenCantSpill(); -// Reset limit for other tests. -setParameterOnAllHosts(DiscoverTopology.findNonConfigNodes(db.getMongo()), - "internalDocumentSourceSetWindowFieldsMaxMemoryBytes", - origParamValue); // Reset profiler. -FixtureHelpers.runCommandOnEachPrimary({db: db, cmdObj: {profile: 0}}); -})(); +FixtureHelpers.runCommandOnEachPrimary({db: db, cmdObj: {profile: 0}}); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/stddev.js b/jstests/aggregation/sources/setWindowFields/stddev.js index caa76b4926804..7b3612afe4bb3 100644 --- a/jstests/aggregation/sources/setWindowFields/stddev.js +++ b/jstests/aggregation/sources/setWindowFields/stddev.js @@ -1,10 +1,10 @@ /** * Test that standard deviation works as a window function. */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); +import { + seedWithTickerData, + testAccumAgainstGroup +} from "jstests/aggregation/extras/window_function_helpers.js"; const coll = db[jsTestName()]; coll.drop(); @@ -38,5 +38,4 @@ let results = for (let index = 0; index < results.length; index++) { assert.eq(null, results[index].stdDevPop); assert.eq(null, results[index].stdDevSamp); -} -})(); +} \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/sum.js b/jstests/aggregation/sources/setWindowFields/sum.js index d8267a84af086..064200fec62ed 100644 --- a/jstests/aggregation/sources/setWindowFields/sum.js +++ b/jstests/aggregation/sources/setWindowFields/sum.js @@ -1,10 +1,11 @@ /** * Test that $sum works as a window function. */ -(function() { -"use strict"; +import { + seedWithTickerData, + testAccumAgainstGroup +} from "jstests/aggregation/extras/window_function_helpers.js"; -load("jstests/aggregation/extras/window_function_helpers.js"); load("jstests/aggregation/extras/utils.js"); // documentEq const coll = db[jsTestName()]; @@ -276,5 +277,4 @@ verifyResults(result, function(num, baseObj) { baseObj.mixedTypeSum += (i % 2) ? 0 : i; } return baseObj; -}); -})(); +}); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/time.js b/jstests/aggregation/sources/setWindowFields/time.js index e4f7d7cfa6c62..1e07bfebdc794 100644 --- a/jstests/aggregation/sources/setWindowFields/time.js +++ b/jstests/aggregation/sources/setWindowFields/time.js @@ -1,11 +1,6 @@ /** * Test time-based window bounds. */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/window_function_helpers.js"); - const coll = db.setWindowFields_time; coll.drop(); @@ -195,5 +190,4 @@ assert.commandWorked(coll.insert([ error = assert.throws(() => { run([range('unbounded', 'unbounded')]); }); -assert.commandFailedWithCode(error, 5429513); -})(); +assert.commandFailedWithCode(error, 5429513); \ No newline at end of file diff --git a/jstests/aggregation/sources/setWindowFields/window_functions_on_timeseries_coll.js b/jstests/aggregation/sources/setWindowFields/window_functions_on_timeseries_coll.js index dcfdb0ec6b410..58ac3db6d5b0d 100644 --- a/jstests/aggregation/sources/setWindowFields/window_functions_on_timeseries_coll.js +++ b/jstests/aggregation/sources/setWindowFields/window_functions_on_timeseries_coll.js @@ -11,11 +11,8 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. -load("jstests/libs/analyze_plan.js"); // For getAggPlanStage(). +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; const coll = db.window_functions_on_timeseries_coll; @@ -327,5 +324,4 @@ assertExplainBehaviorAndCorrectResults( {_id: 1, rank: 1}, {_id: 5, rank: 2}, {_id: 3, rank: 3}, - ]); -})(); + ]); \ No newline at end of file diff --git a/jstests/aggregation/sources/sort/explain_sort.js b/jstests/aggregation/sources/sort/explain_sort.js index 7d76527e1e6ec..f196683b3c187 100644 --- a/jstests/aggregation/sources/sort/explain_sort.js +++ b/jstests/aggregation/sources/sort/explain_sort.js @@ -5,10 +5,7 @@ // # Asserts on the number of documents examined in an explain plan. // assumes_no_implicit_index_creation // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages(). +import {getAggPlanStages, isQueryPlan} from "jstests/libs/analyze_plan.js"; const coll = db.explain_sort; coll.drop(); @@ -69,5 +66,4 @@ for (let verbosity of ["queryPlanner", "executionStats", "allPlansExecution"]) { pipeline = [{$project: {_id: 1}}, {$limit: 5}]; checkResults(coll.explain(verbosity).aggregate(pipeline), verbosity, optimizeDisabled ? 10 : 5); -} -})(); +} \ No newline at end of file diff --git a/jstests/aggregation/sources/unionWith/unionWith_explain.js b/jstests/aggregation/sources/unionWith/unionWith_explain.js index a1db4d975e74c..e1ca53d7d5b42 100644 --- a/jstests/aggregation/sources/unionWith/unionWith_explain.js +++ b/jstests/aggregation/sources/unionWith/unionWith_explain.js @@ -6,12 +6,9 @@ * ] */ -(function() { -"use strict"; load("jstests/aggregation/extras/utils.js"); // arrayEq, documentEq load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. -load("jstests/libs/analyze_plan.js"); // For getAggPlanStage. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; const testDB = db.getSiblingDB(jsTestName()); const collA = testDB.A; @@ -114,8 +111,8 @@ function assertExplainEq(union, regular) { } else { assert(false, "Don't know how to compare following explains.\n" + - "regular: " + tojson(regularExplain) + "\n" + - "union: " + tojson(unionSubExplain) + "\n"); + "regular: " + tojson(regular) + "\n" + + "union: " + tojson(union) + "\n"); } } @@ -259,5 +256,4 @@ if (!res["failpoint.disablePipelineOptimization"].mode) { .aggregate([{$unionWith: indexedColl.getName()}, {$match: {val: {$gt: 2}}}]); expectedResult = indexedColl.explain("executionStats").aggregate([{$match: {val: {$gt: 2}}}]); assertExplainMatch(result, expectedResult); -} -})(); +} \ No newline at end of file diff --git a/jstests/aggregation/spill_to_disk.js b/jstests/aggregation/spill_to_disk.js index 8d37777c75b12..445e097ad7ab7 100644 --- a/jstests/aggregation/spill_to_disk.js +++ b/jstests/aggregation/spill_to_disk.js @@ -12,14 +12,11 @@ // requires_pipeline_optimization, // requires_persistence, // ] -(function() { -'use strict'; - load("jstests/aggregation/extras/utils.js"); load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers' load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs. -load("jstests/libs/sbe_explain_helpers.js"); // For getSbePlanStages. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getSbePlanStages} from "jstests/libs/sbe_explain_helpers.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const coll = db.spill_to_disk; coll.drop(); @@ -564,4 +561,3 @@ const oldMemSettings = setHashLookupParameters(oldMemSettings); } })(); -})(); diff --git a/jstests/aggregation/split_match_and_swap_with_sort.js b/jstests/aggregation/split_match_and_swap_with_sort.js index 53b5bcf97d427..ecc382adda71d 100644 --- a/jstests/aggregation/split_match_and_swap_with_sort.js +++ b/jstests/aggregation/split_match_and_swap_with_sort.js @@ -11,10 +11,7 @@ // # Don't disable the thing we are specifically testing for! // requires_pipeline_optimization, // ] -load('jstests/libs/analyze_plan.js'); - -(function() { -"use strict"; +import {getAggPlanStage, getPlanStage} from "jstests/libs/analyze_plan.js"; const coll = db.getSiblingDB("split_match_and_swap_with_sort")["test"]; coll.drop(); @@ -66,4 +63,3 @@ assert.commandWorked( collScanStage.filter, collScanStage); } -}()); \ No newline at end of file diff --git a/jstests/aggregation/unwind.js b/jstests/aggregation/unwind.js index ffd2a3da9c68c..7db316de1e1bc 100644 --- a/jstests/aggregation/unwind.js +++ b/jstests/aggregation/unwind.js @@ -1,6 +1,6 @@ // SERVER-8088: test $unwind with a scalar -t = db.agg_unwind; +let t = db.agg_unwind; t.drop(); t.insert({_id: 1}); diff --git a/jstests/aggregation/use_query_project_and_sort.js b/jstests/aggregation/use_query_project_and_sort.js index dbe5b6a9f6f92..f65e6ce61efbe 100644 --- a/jstests/aggregation/use_query_project_and_sort.js +++ b/jstests/aggregation/use_query_project_and_sort.js @@ -7,10 +7,7 @@ // @tags: [ // do_not_wrap_aggregations_in_facets, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers. +import {hasRejectedPlans, isQueryPlan, planHasStage} from "jstests/libs/analyze_plan.js"; const coll = db.use_query_project_and_sort; coll.drop(); @@ -58,5 +55,4 @@ assertQueryCoversProjectionAndSort( assertQueryCoversProjectionAndSort( [{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 0, a: 1, x: 1}}]); assertQueryCoversProjectionAndSort( - [{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 1, x: 1, a: 1}}]); -}()); + [{$match: {x: "string"}}, {$sort: {x: 1, a: -1, _id: 1}}, {$project: {_id: 1, x: 1, a: 1}}]); \ No newline at end of file diff --git a/jstests/aggregation/use_query_projection.js b/jstests/aggregation/use_query_projection.js index 1f92b30d7b346..15cc420c85bc5 100644 --- a/jstests/aggregation/use_query_projection.js +++ b/jstests/aggregation/use_query_projection.js @@ -7,10 +7,13 @@ // @tags: [ // do_not_wrap_aggregations_in_facets, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers. +import { + aggPlanHasStage, + hasRejectedPlans, + isAggregationPlan, + isQueryPlan, + planHasStage, +} from "jstests/libs/analyze_plan.js"; const coll = db.use_query_projection; coll.drop(); @@ -91,5 +94,4 @@ assert.commandWorked(coll.insert({x: ["an", "array!"]})); assertQueryDoesNotCoverProjection( {pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1}}]}); assertQueryDoesNotCoverProjection( - {pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1, a: 1}}]}); -}()); + {pipeline: [{$match: {x: "string"}}, {$project: {_id: 1, x: 1, a: 1}}]}); \ No newline at end of file diff --git a/jstests/aggregation/use_query_sort.js b/jstests/aggregation/use_query_sort.js index ca8c6f3bd7703..4c601f49f3573 100644 --- a/jstests/aggregation/use_query_sort.js +++ b/jstests/aggregation/use_query_sort.js @@ -6,10 +6,13 @@ // @tags: [ // do_not_wrap_aggregations_in_facets, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For 'aggPlanHasStage' and other explain helpers. +import { + aggPlanHasStage, + hasRejectedPlans, + isAggregationPlan, + isQueryPlan, + planHasStage, +} from "jstests/libs/analyze_plan.js"; const coll = db.use_query_sort; coll.drop(); @@ -86,5 +89,4 @@ assertHasBlockingQuerySort( // Verify that meta-sort on "randVal" can be pushed into the query layer. Although "randVal" $meta // sort is currently a supported way to randomize the order of the data, it shouldn't preclude // pushdown of the sort into the plan stage layer. -assertHasBlockingQuerySort([{$sort: {key: {$meta: "randVal"}}}], false); -}()); +assertHasBlockingQuerySort([{$sort: {key: {$meta: "randVal"}}}], false); \ No newline at end of file diff --git a/jstests/auth/auth1.js b/jstests/auth/auth1.js index 65e12f0442561..e784fcd1e0bab 100644 --- a/jstests/auth/auth1.js +++ b/jstests/auth/auth1.js @@ -5,6 +5,7 @@ // Multiple users cannot be authenticated on one connection within a session. TestData.disableImplicitSessions = true; +let baseName; function setupTest() { print("START auth1.js"); baseName = "jstests_auth_auth1"; @@ -16,14 +17,14 @@ function setupTest() { function runTest(m) { // these are used by read-only user db = m.getDB("test"); - mro = new Mongo(m.host); - dbRO = mro.getDB("test"); - tRO = dbRO[baseName]; + let mro = new Mongo(m.host); + let dbRO = mro.getDB("test"); + let tRO = dbRO[baseName]; db.getSiblingDB("admin").createUser({user: "root", pwd: "root", roles: ["root"]}); db.getSiblingDB("admin").auth("root", "root"); - t = db[baseName]; + let t = db[baseName]; t.drop(); db.dropAllUsers(); @@ -51,7 +52,7 @@ function runTest(m) { assert(!db.auth("eliot", "eliot"), "auth succeeded with wrong password"); assert(db.auth("eliot", "eliot2"), "auth failed"); - for (i = 0; i < 1000; ++i) { + for (let i = 0; i < 1000; ++i) { t.save({i: i}); } assert.eq(1000, t.count(), "A1"); diff --git a/jstests/auth/auth2.js b/jstests/auth/auth2.js index baf57753cef0d..077acf07c0f2c 100644 --- a/jstests/auth/auth2.js +++ b/jstests/auth/auth2.js @@ -1,6 +1,6 @@ // test read/write permissions -m = MongoRunner.runMongod({auth: "", bind_ip: "127.0.0.1"}); +let m = MongoRunner.runMongod({auth: "", bind_ip: "127.0.0.1"}); db = m.getDB("admin"); // These statements throw because the localhost exception does not allow @@ -26,7 +26,7 @@ assert.throws(function() { db.auth("eliot", "eliot"); -users = db.getCollection("system.users"); +let users = db.getCollection("system.users"); assert.eq(1, users.count()); db.shutdownServer(); diff --git a/jstests/auth/authn_session_abandoned.js b/jstests/auth/authn_session_abandoned.js new file mode 100644 index 0000000000000..d5298ecbe02eb --- /dev/null +++ b/jstests/auth/authn_session_abandoned.js @@ -0,0 +1,31 @@ +// Test for auth counters in serverStatus. + +(function() { +'use strict'; +load('jstests/libs/parallel_shell_helpers.js'); + +const kFailedToAuthMsgId = 5286307; + +const mongod = MongoRunner.runMongod(); + +try { + mongod.getDB("admin").createUser( + {"user": "admin", "pwd": "pwd", roles: ['root'], mechanisms: ["SCRAM-SHA-256"]}); + + const shellCmd = () => { + // base64 encoded: 'n,,n=admin,r=deadbeefcafeba11'; + const kClientPayload = 'biwsbj1hZG1pbixyPWRlYWRiZWVmY2FmZWJhMTE='; + + db.getSiblingDB("admin").runCommand( + {saslStart: 1, mechanism: "SCRAM-SHA-256", payload: kClientPayload}); + }; + + startParallelShell(shellCmd, mongod.port)(); + + assert.soon(() => checkLog.checkContainsOnceJson( + mongod, kFailedToAuthMsgId, {"result": ErrorCodes.AuthenticationAbandoned})); + +} finally { + MongoRunner.stopMongod(mongod); +} +})(); diff --git a/jstests/auth/bulk_write_mongod.js b/jstests/auth/bulk_write_mongod.js index 987a48ff47b3d..5a072a5bda51f 100644 --- a/jstests/auth/bulk_write_mongod.js +++ b/jstests/auth/bulk_write_mongod.js @@ -1,12 +1,8 @@ /* * Auth test for the bulkWrite command on mongods. */ -(function() { -'use strict'; - -load("jstests/auth/lib/bulk_write_base.js"); +import {runTest} from "jstests/auth/lib/bulk_write_base.js"; const mongod = MongoRunner.runMongod({auth: ""}); runTest(mongod); MongoRunner.stopMongod(mongod); -})(); diff --git a/jstests/auth/change_stream_change_collection_role_auth.js b/jstests/auth/change_stream_change_collection_role_auth.js index 4d174543d66e0..4ed7464cdc898 100644 --- a/jstests/auth/change_stream_change_collection_role_auth.js +++ b/jstests/auth/change_stream_change_collection_role_auth.js @@ -6,7 +6,8 @@ * assumes_read_preference_unchanged, * requires_replication, * requires_fcv_62, - * __TEMPORARILY_DISABLED__ + * # TODO SERVER-74811: Re-enable this test. + * __TEMPORARILY_DISABLED__, * ] */ (function() { diff --git a/jstests/auth/change_stream_pre_image_coll_role_auth.js b/jstests/auth/change_stream_pre_image_coll_role_auth.js index fcbeec157b624..a32eb716614fe 100644 --- a/jstests/auth/change_stream_pre_image_coll_role_auth.js +++ b/jstests/auth/change_stream_pre_image_coll_role_auth.js @@ -199,4 +199,4 @@ assertActionAuthorized(removePreImage.bind(null, rootPrimary), true); rootPrimary.logout(); replSetTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/auth/check_metadata_consistency.js b/jstests/auth/check_metadata_consistency.js deleted file mode 100644 index 84dd5f89e88f2..0000000000000 --- a/jstests/auth/check_metadata_consistency.js +++ /dev/null @@ -1,207 +0,0 @@ -/* - * Tests to validate the privileges of checkMetadataConsistency command. - * - * @tags: [ - * featureFlagCheckMetadataConsistency, - * requires_fcv_70, - * # TODO SERVER-74445: Remove tag once the command will be compatible with catalog shard - * temporary_catalog_shard_incompatible, - * ] - */ - -(function() { -"use strict"; - -const kClusterLevel = "clusterLevel"; -const kDatabaseLevel = "databaseLevel"; -const kCollectionLevel = "collectionLevel"; - -// Helper function to assert that the checkMetadataConsistency command succeeds -function assertAuthCommandWorked(adminDb, conn, user, level) { - assert(adminDb.logout()); - assert(adminDb.auth(user, "pwd")); - const cmd = () => { - if (level === kClusterLevel || level === kDatabaseLevel) { - return conn.checkMetadataConsistency().toArray(); - } else { - return conn.coll.checkMetadataConsistency().toArray(); - } - }; - const inconsistencies = cmd(); - assert.eq(1, inconsistencies.length); - assert.eq("MisplacedCollection", inconsistencies[0].type); -} - -// Helper function to assert that the checkMetadataConsistency command fails -function assertAuthCommandFailed(adminDb, conn, user, level) { - assert(adminDb.logout()); - assert(adminDb.auth(user, "pwd")); - - const cmd = () => { - if (level === kClusterLevel || level === kDatabaseLevel) { - return conn.runCommand({checkMetadataConsistency: 1}); - } else { - return conn.runCommand({checkMetadataConsistency: "coll"}); - } - }; - - assert.commandFailedWithCode( - cmd(), - ErrorCodes.Unauthorized, - "user should no longer have privileges to execute checkMetadataConsistency command."); -} - -// Configure initial sharding cluster -const st = new ShardingTest({keyFile: "jstests/libs/key1", useHostname: false}); - -const shardAdmin = st.shard0.getDB("admin"); -shardAdmin.createUser({user: "admin", pwd: "x", roles: ["root"]}); -shardAdmin.auth("admin", "x"); - -const adminDb = st.s.getDB("admin"); -adminDb.createUser({user: "admin", pwd: "x", roles: ["root"]}); -adminDb.auth("admin", "x"); - -const dbName = "testCheckMetadataConsistencyDB"; -const db = st.s.getDB(dbName); - -// Insert a hidden unsharded collection inconsistency. -assert.commandWorked( - adminDb.adminCommand({enableSharding: dbName, primaryShard: st.shard1.shardName})); -assert.commandWorked( - st.shard0.getDB(dbName).runCommand({insert: "coll", documents: [{_id: "foo"}]})); - -(function createRolesToTest() { - assert.commandWorked(adminDb.runCommand({ - createRole: "clusterLevelRole", - roles: [], - privileges: [{resource: {cluster: true}, actions: ["checkMetadataConsistency"]}] - })); - - assert.commandWorked(adminDb.runCommand({ - createRole: "databaseLevelRole", - roles: [], - privileges: - [{resource: {db: dbName, collection: ""}, actions: ["checkMetadataConsistency"]}] - })); - - assert.commandWorked(adminDb.runCommand({ - createRole: "collectionLevelRole", - roles: [], - privileges: - [{resource: {db: dbName, collection: "coll"}, actions: ["checkMetadataConsistency"]}] - })); - - assert.commandWorked(adminDb.runCommand({ - createRole: "roleWithAllNonSystemCollectionsPrivileges", - roles: [], - privileges: [{resource: {db: "", collection: ""}, actions: ["checkMetadataConsistency"]}] - })); - - assert.commandWorked(adminDb.runCommand({ - createRole: "roleWithNotRelatedAction", - roles: [], - privileges: [{resource: {cluster: true}, actions: ["allCollectionStats"]}] - })); -})(); - -(function createUsersToTest() { - assert.commandWorked(adminDb.runCommand({ - createUser: "clusterManagerUser", - pwd: "pwd", - roles: [{role: "clusterManager", db: "admin"}] - })); - - assert.commandWorked(adminDb.runCommand({ - createUser: "clusterAdminUser", - pwd: "pwd", - roles: [{role: "clusterAdmin", db: "admin"}] - })); - - assert.commandWorked(adminDb.runCommand({ - createUser: "userWithClusterLevelRole", - pwd: "pwd", - roles: [{role: "clusterLevelRole", db: "admin"}] - })); - - assert.commandWorked(adminDb.runCommand({ - createUser: "userWithDatabaseLevelRole", - pwd: "pwd", - roles: [{role: "databaseLevelRole", db: "admin"}] - })); - - assert.commandWorked(adminDb.runCommand({ - createUser: "userWithCollectionLevelRole", - pwd: "pwd", - roles: [{role: "collectionLevelRole", db: "admin"}] - })); - - assert.commandWorked(adminDb.runCommand({ - createUser: "userWithAllNonSystemCollectionsPrivileges", - pwd: "pwd", - roles: [{role: "roleWithAllNonSystemCollectionsPrivileges", db: "admin"}] - })); - - assert.commandWorked(adminDb.runCommand({ - createUser: "userWithUnrelatedRole", - pwd: "pwd", - roles: [{role: "hostManager", db: "admin"}] - })); - - assert.commandWorked(adminDb.runCommand({ - createUser: "userWithUnrelatedAction", - pwd: "pwd", - roles: [{role: "roleWithNotRelatedAction", db: "admin"}] - })); - - assert.commandWorked( - adminDb.runCommand({createUser: "userWithNoRoles", pwd: "pwd", roles: []})); -})(); - -shardAdmin.logout(); -adminDb.logout(); - -(function testClusterLevelModePrivileges() { - assertAuthCommandWorked(adminDb, adminDb, "clusterManagerUser", kClusterLevel); - assertAuthCommandWorked(adminDb, adminDb, "clusterAdminUser", kClusterLevel); - assertAuthCommandWorked(adminDb, adminDb, "userWithClusterLevelRole", kClusterLevel); - - assertAuthCommandFailed( - adminDb, adminDb, "userWithAllNonSystemCollectionsPrivileges", kClusterLevel); - assertAuthCommandFailed(adminDb, adminDb, "userWithDatabaseLevelRole", kClusterLevel); - assertAuthCommandFailed(adminDb, adminDb, "userWithCollectionLevelRole", kClusterLevel); - assertAuthCommandFailed(adminDb, adminDb, "userWithUnrelatedAction", kClusterLevel); - assertAuthCommandFailed(adminDb, adminDb, "userWithUnrelatedRole", kClusterLevel); - assertAuthCommandFailed(adminDb, adminDb, "userWithNoRoles", kClusterLevel); -})(); - -(function testDatabaseLevelModePrivileges() { - assertAuthCommandWorked(adminDb, db, "clusterManagerUser", kDatabaseLevel); - assertAuthCommandWorked(adminDb, db, "clusterAdminUser", kDatabaseLevel); - assertAuthCommandWorked(adminDb, db, "userWithClusterLevelRole", kDatabaseLevel); - assertAuthCommandWorked(adminDb, db, "userWithDatabaseLevelRole", kDatabaseLevel); - assertAuthCommandWorked( - adminDb, db, "userWithAllNonSystemCollectionsPrivileges", kDatabaseLevel); - - assertAuthCommandFailed(adminDb, db, "userWithCollectionLevelRole", kDatabaseLevel); - assertAuthCommandFailed(adminDb, db, "userWithUnrelatedAction", kDatabaseLevel); - assertAuthCommandFailed(adminDb, db, "userWithUnrelatedRole", kDatabaseLevel); - assertAuthCommandFailed(adminDb, db, "userWithNoRoles", kDatabaseLevel); -})(); - -(function testCollectionLevelModePrivileges() { - assertAuthCommandWorked(adminDb, db, "clusterManagerUser", kCollectionLevel); - assertAuthCommandWorked(adminDb, db, "clusterAdminUser", kCollectionLevel); - assertAuthCommandWorked(adminDb, db, "userWithClusterLevelRole", kCollectionLevel); - assertAuthCommandWorked(adminDb, db, "userWithDatabaseLevelRole", kCollectionLevel); - assertAuthCommandWorked(adminDb, db, "userWithCollectionLevelRole", kCollectionLevel); - assertAuthCommandWorked( - adminDb, db, "userWithAllNonSystemCollectionsPrivileges", kCollectionLevel); - - assertAuthCommandFailed(adminDb, db, "userWithUnrelatedAction", kCollectionLevel); - assertAuthCommandFailed(adminDb, db, "userWithUnrelatedRole", kCollectionLevel); - assertAuthCommandFailed(adminDb, db, "userWithNoRoles", kCollectionLevel); -})(); - -st.stop(); -})(); diff --git a/jstests/auth/currentop_cursors_auth.js b/jstests/auth/currentop_cursors_auth.js index b51325de61a76..d0fc30f236ff1 100644 --- a/jstests/auth/currentop_cursors_auth.js +++ b/jstests/auth/currentop_cursors_auth.js @@ -21,16 +21,22 @@ Random.setRandomSeed(); const pass = "a" + Random.rand(); // Create one root user and one regular user on the given connection. -function createUsers(conn) { +function createUsers(conn, grantDirectShardOperationsRole) { const adminDB = conn.getDB("admin"); + adminDB.createUser({user: "ted", pwd: pass, roles: ["root"]}); assert(adminDB.auth("ted", pass), "Authentication 1 Failed"); - adminDB.createUser({user: "yuta", pwd: pass, roles: ["readWriteAnyDatabase"]}); + + let yutaRoles = ["readWriteAnyDatabase"]; + if (grantDirectShardOperationsRole) + yutaRoles.push("directShardOperations"); + + adminDB.createUser({user: "yuta", pwd: pass, roles: yutaRoles}); } // Create the necessary users at both cluster and shard-local level. -createUsers(shardConn); -createUsers(mongosConn); +createUsers(shardConn, /* grantDirectShardOperationsRole */ true); +createUsers(mongosConn, /* grantDirectShardOperationsRole */ false); // Run the various auth tests on the given shard or mongoS connection. function runCursorTests(conn) { diff --git a/jstests/auth/getMore.js b/jstests/auth/getMore.js index d7588d73dc648..8480568cd4c1f 100644 --- a/jstests/auth/getMore.js +++ b/jstests/auth/getMore.js @@ -8,9 +8,6 @@ TestData.disableImplicitSessions = true; function runTest(conn) { const adminDB = conn.getDB("admin"); - const hello = adminDB.runCommand("hello"); - assert.commandWorked(hello); - const isMongos = (hello.msg === "isdbgrid"); // Create the admin user. assert.commandWorked(adminDB.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]})); diff --git a/jstests/auth/iteration_count_control.js b/jstests/auth/iteration_count_control.js index d003347bdbc2d..0111ecf3edd98 100644 --- a/jstests/auth/iteration_count_control.js +++ b/jstests/auth/iteration_count_control.js @@ -3,7 +3,7 @@ (function() { 'use strict'; -load('./jstests/multiVersion/libs/auth_helpers.js'); +load('jstests/multiVersion/libs/auth_helpers.js'); const conn = MongoRunner.runMongod({auth: ''}); const adminDB = conn.getDB('admin'); diff --git a/jstests/auth/killop_own_ops.js b/jstests/auth/killop_own_ops.js index ae1058bca4641..d31d4629cdc04 100644 --- a/jstests/auth/killop_own_ops.js +++ b/jstests/auth/killop_own_ops.js @@ -4,7 +4,8 @@ * Theory of operation: Create a long running operation from a user which does not have the killOp * or inProg privileges. Using the same user, run currentOp to get the opId, and then run killOp * against it. - * @tags: [requires_sharding] + * TODO SERVER-78101: Investigate the test failure and re-enable the test with CQF enabled. + * @tags: [requires_sharding, cqf_incompatible] */ (function() { diff --git a/jstests/auth/lib/automated_idp_authn_simulator.py b/jstests/auth/lib/automated_idp_authn_simulator.py deleted file mode 100644 index dc34d294bb286..0000000000000 --- a/jstests/auth/lib/automated_idp_authn_simulator.py +++ /dev/null @@ -1,92 +0,0 @@ -#! /usr/bin/env python3 -""" -Simulates a human authenticating to an identity provider on the Web, specifically with the -device authorization grant flow. - -Given a device authorization endpoint, a username, and a file with necessary setup information, it -will simulate automatically logging in as a human would. - -""" -import argparse -import os -import json - -import geckodriver_autoinstaller -from pathlib import Path -from selenium import webdriver -from selenium.webdriver.common.by import By -from selenium.webdriver.firefox.options import Options -from selenium.webdriver.support import expected_conditions as EC -from selenium.webdriver.support.ui import WebDriverWait - -def authenticate_okta(activation_endpoint, username, test_credentials): - # Install GeckoDriver if needed. - geckodriver_autoinstaller.install() - - # Launch headless Firefox to the device authorization endpoint. - firefox_options = Options() - firefox_options.add_argument('-headless') - driver = webdriver.Firefox(options=firefox_options) - driver.get(activation_endpoint) - - try: - # User code will be pre-populated, so wait for next button to load and click. - next_button = WebDriverWait(driver, 30).until( - EC.presence_of_element_located((By.XPATH, "//input[@class='button button-primary'][@value='Next']")) - ) - next_button.click() - - # Wait for the username prompt and next button to load. - username_input_box = WebDriverWait(driver, 30).until( - EC.presence_of_element_located((By.XPATH, "//input[@name='username']")) - ) - next_button = WebDriverWait(driver, 30).until( - EC.presence_of_element_located((By.XPATH, "//input[@class='button button-primary'][@value='Next']")) - ) - - # Enter username. - username_input_box.send_keys(username) - next_button.click() - - # Wait for the password prompt and next button to load. - password_input_box = WebDriverWait(driver, 30).until( - EC.presence_of_element_located((By.XPATH, "//input[@name='password']")) - ) - verify_button = WebDriverWait(driver, 30).until( - EC.presence_of_element_located((By.XPATH, "//input[@class='button button-primary'][@value='Sign In']")) - ) - - # Enter password. - password_input_box.send_keys(test_credentials[username]) - verify_button.click() - - # Assert that the landing page contains the "Device activated" text, indicating successful auth. - landing_header = WebDriverWait(driver, 30).until( - EC.presence_of_element_located((By.XPATH, "//h2[@class='okta-form-title o-form-head'][contains(text(), 'Device activated')]")) - ) - assert landing_header is not None - - except Exception as e: - print(e) - else: - print('Success') - finally: - driver.quit() - -def main(): - parser = argparse.ArgumentParser(description='Okta Automated Authentication Simulator') - - parser.add_argument('-e', '--activationEndpoint', type=str, help="Endpoint to start activation at with code filled in") - parser.add_argument('-u', '--username', type=str, help="Username to authenticate as") - parser.add_argument('-s', '--setupFile', type=str, help="File containing information generated during test setup, relative to home directory") - - args = parser.parse_args() - - with open(Path.home() / args.setupFile) as setup_file: - setup_information = json.load(setup_file) - assert args.username in setup_information - - authenticate_okta(args.activationEndpoint, args.username, setup_information) - -if __name__ == '__main__': - main() diff --git a/jstests/auth/lib/automated_idp_authn_simulator_azure.py b/jstests/auth/lib/automated_idp_authn_simulator_azure.py new file mode 100644 index 0000000000000..f931981a3ae68 --- /dev/null +++ b/jstests/auth/lib/automated_idp_authn_simulator_azure.py @@ -0,0 +1,106 @@ +#! /usr/bin/env python3 +""" +Simulates a human authenticating to azure on the Web, specifically with the +device authorization grant flow. + +Given a device authorization endpoint, a username, a user code and a file with necessary setup information, it +will simulate automatically logging in as a human would. + +""" +import argparse +import os +import json + +import geckodriver_autoinstaller +from pathlib import Path +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.firefox.options import Options +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.support.ui import WebDriverWait + +def authenticate_azure(activation_endpoint, userCode, username, test_credentials): + # Install GeckoDriver if needed. + geckodriver_autoinstaller.install() + + # Launch headless Firefox to the device authorization endpoint. + firefox_options = Options() + firefox_options.add_argument('-headless') + driver = webdriver.Firefox(options=firefox_options) + driver.get(activation_endpoint) + + try: + # User code will be added to the input box. + # Wait for the user code prompt and next button to load. + user_code_input_box = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//input[@name='otc']")) + ) + next_button = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//input[@type='submit'][@value='Next']")) + ) + + # Enter usercode. + user_code_input_box.send_keys(userCode) + next_button.click() + + # Wait for the username prompt and next button to load. + username_input_box = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//input[@name='loginfmt']")) + ) + next_button = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//input[@type='submit'][@value='Next']")) + ) + + # Enter username. + username_input_box.send_keys(username) + next_button.click() + + # Wait for the password prompt and next button to load. + password_input_box = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//input[@name='passwd'][@placeholder='Password']")) + ) + verify_button = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//input[@type='submit'][@value='Sign in']")) + ) + + # Enter password. + password_input_box.send_keys(test_credentials[username]) + verify_button.click() + + # Assert 'Are you trying to sign in to OIDC_EVG_TESTING?' message. + continue_button = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//input[@type='submit'][@value='Continue']")) + ) + continue_button.click() + + # Assert that the landing page contains the "You have signed in to the OIDC_EVG_TESTING application on your device" text, indicating successful auth. + landing_header = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//p[@id='message'][@class='text-block-body no-margin-top']")) + ) + assert landing_header is not None and "You have signed in" in landing_header.text + + except Exception as e: + print(e) + else: + print('Success') + finally: + driver.quit() + +def main(): + parser = argparse.ArgumentParser(description='Azure Automated Authentication Simulator') + + parser.add_argument('-e', '--activationEndpoint', type=str, help="Endpoint to start activation at") + parser.add_argument('-c', '--userCode', type=str, help="Code to be added in the endpoint to authenticate") + parser.add_argument('-u', '--username', type=str, help="Username to authenticate as") + parser.add_argument('-s', '--setupFile', type=str, help="File containing information generated during test setup, relative to home directory") + + args = parser.parse_args() + + with open(Path.home() / args.setupFile) as setup_file: + setup_information = json.load(setup_file) + assert args.username in setup_information + + authenticate_azure(args.activationEndpoint, args.userCode, args.username, setup_information) + +if __name__ == '__main__': + main() diff --git a/jstests/auth/lib/automated_idp_authn_simulator_okta.py b/jstests/auth/lib/automated_idp_authn_simulator_okta.py new file mode 100644 index 0000000000000..208ebe530272a --- /dev/null +++ b/jstests/auth/lib/automated_idp_authn_simulator_okta.py @@ -0,0 +1,99 @@ +#! /usr/bin/env python3 +""" +Simulates a human authenticating to an identity provider on the Web, specifically with the +device authorization grant flow. + +Given a device authorization endpoint, a username, and a file with necessary setup information, it +will simulate automatically logging in as a human would. + +""" +import argparse +import os +import json + +import geckodriver_autoinstaller +from pathlib import Path +from selenium import webdriver +from selenium.webdriver.common.by import By +from selenium.webdriver.firefox.options import Options +from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.support.ui import WebDriverWait + +def authenticate_okta(activation_endpoint, userCode, username, test_credentials): + # Install GeckoDriver if needed. + geckodriver_autoinstaller.install() + + # Launch headless Firefox to the device authorization endpoint. + firefox_options = Options() + firefox_options.add_argument('-headless') + driver = webdriver.Firefox(options=firefox_options) + driver.get(activation_endpoint) + + try: + # Wait for activation code input box and next button to load and click. + activationCode_input_box = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//input[@name='userCode']")) + ) + next_button = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//input[@class='button button-primary'][@value='Next']")) + ) + + # Enter user activation code. + activationCode_input_box.send_keys(userCode) + next_button.click() + + # Wait for the username prompt and next button to load. + username_input_box = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//input[@name='username']")) + ) + next_button = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//input[@class='button button-primary'][@value='Next']")) + ) + + # Enter username. + username_input_box.send_keys(username) + next_button.click() + + # Wait for the password prompt and next button to load. + password_input_box = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//input[@name='password']")) + ) + verify_button = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//input[@class='button button-primary'][@value='Sign In']")) + ) + + # Enter password. + password_input_box.send_keys(test_credentials[username]) + verify_button.click() + + # Assert that the landing page contains the "Device activated" text, indicating successful auth. + landing_header = WebDriverWait(driver, 30).until( + EC.presence_of_element_located((By.XPATH, "//h2[@class='okta-form-title o-form-head'][contains(text(), 'Device activated')]")) + ) + assert landing_header is not None + + except Exception as e: + print(e) + else: + print('Success') + finally: + driver.quit() + +def main(): + parser = argparse.ArgumentParser(description='Okta Automated Authentication Simulator') + + parser.add_argument('-e', '--activationEndpoint', type=str, help="Endpoint to start activation at") + parser.add_argument('-c', '--userCode', type=str, help="Code to be added in the endpoint to authenticate") + parser.add_argument('-u', '--username', type=str, help="Username to authenticate as") + parser.add_argument('-s', '--setupFile', type=str, help="File containing information generated during test setup, relative to home directory") + + args = parser.parse_args() + + with open(Path.home() / args.setupFile) as setup_file: + setup_information = json.load(setup_file) + assert args.username in setup_information + + authenticate_okta(args.activationEndpoint, args.userCode, args.username, setup_information) + +if __name__ == '__main__': + main() diff --git a/jstests/auth/lib/bulk_write_base.js b/jstests/auth/lib/bulk_write_base.js index eeb851dea5e3a..fdca6bbff83a4 100644 --- a/jstests/auth/lib/bulk_write_base.js +++ b/jstests/auth/lib/bulk_write_base.js @@ -1,10 +1,8 @@ -'use strict'; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; // Auth test the BulkWrite command. // These test cover privilege combination scenarios that commands_lib.js format cannot. -function runTest(mongod) { - load("jstests/libs/feature_flag_util.js"); - +export function runTest(mongod) { const admin = mongod.getDB('admin'); admin.createUser({user: 'admin', pwd: 'pass', roles: jsTest.adminUserRoles}); assert(admin.auth('admin', 'pass')); diff --git a/jstests/auth/lib/commands_lib.js b/jstests/auth/lib/commands_lib.js index de2782a5ac251..fc9cc64b359b7 100644 --- a/jstests/auth/lib/commands_lib.js +++ b/jstests/auth/lib/commands_lib.js @@ -425,29 +425,23 @@ export const authCommandsLib = { ] }, { - testname: 'transitionToCatalogShard', - command: {transitionToCatalogShard: 1}, + testname: 'transitionFromDedicatedConfigServer', + command: {transitionFromDedicatedConfigServer: 1}, skipUnlessSharded: true, - skipTest: (conn) => { - return !TestData.setParameters.featureFlagCatalogShard; - }, testcases: [ { - runOnDb: adminDbName, - roles: roles_clusterManager, - privileges: [{resource: {cluster: true}, actions: ["transitionToCatalogShard"]}] + runOnDb: adminDbName, + roles: roles_clusterManager, + privileges: [{resource: {cluster: true}, actions: ["transitionFromDedicatedConfigServer"]}] }, {runOnDb: firstDbName, roles: {}}, {runOnDb: secondDbName, roles: {}} ] }, { - testname: "_configsvrTransitionToCatalogShard", - command: {_configsvrTransitionToCatalogShard: 1}, + testname: "_configsvrTransitionFromDedicatedConfigServer", + command: {_configsvrTransitionFromDedicatedConfigServer: 1}, skipSharded: true, - skipTest: (conn) => { - return !TestData.setParameters.featureFlagCatalogShard; - }, testcases: [ { runOnDb: adminDbName, @@ -3074,6 +3068,43 @@ export const authCommandsLib = { } ] }, + { + testname: "cleanupStructuredEncryptionData", + command: {cleanupStructuredEncryptionData: "foo", cleanupTokens : {}}, + skipSharded: true, + skipUnlessReplicaSet: true, + setup: function(db) { + assert.commandWorked(db.createCollection("foo", { + encryptedFields: { + "fields": [ + { + "path": "firstName", + "keyId": UUID("11d58b8a-0c6c-4d69-a0bd-70c6d9befae9"), + "bsonType": "string", + "queries": {"queryType": "equality"} + }, + ] + } + })); + }, + teardown: function(db) { + assert.commandWorked(db.dropDatabase()); + }, + testcases: [ + { + runOnDb: firstDbName, + roles: { readWrite : 1, readWriteAnyDatabase : 1, dbOwner : 1, root : 1, __system : 1 }, + privileges: + [{resource: {db: firstDbName, collection: "foo"}, actions: ["cleanupStructuredEncryptionData"]}] + }, + { + runOnDb: secondDbName, + roles: { readWriteAnyDatabase : 1, root : 1, __system : 1 }, + privileges: + [{resource: {db: secondDbName, collection: "foo"}, actions: ["cleanupStructuredEncryptionData"]}] + } + ] + }, { testname: "connectionStatus", command: {connectionStatus: 1}, @@ -3264,6 +3295,148 @@ export const authCommandsLib = { {runOnDb: adminDbName, roles: {__system: 1}, expectFail: true}, ] }, + { + testname: "checkClusterMetadataConsistency", + command: {checkMetadataConsistency: 1}, + skipUnlessSharded: true, + setup: function(db) { + assert.commandWorked(db.getSiblingDB("test").createCollection("coll")); + }, + teardown: function(db) { + assert.commandWorked(db.getSiblingDB("test").dropDatabase()); + }, + testcases: [ + { + runOnDb: adminDbName, + roles: {clusterManager: 1, clusterAdmin: 1, root: 1, __system: 1} + }, + { + runOnDb: adminDbName, + privileges: [{resource: {cluster: true}, actions: ["checkMetadataConsistency"]}] + }, + { + runOnDb: adminDbName, + privileges: [ + {resource: {db: "", collection: ""}, actions: ["checkMetadataConsistency"]} + ], + expectAuthzFailure: true + }, + { + runOnDb: adminDbName, + privileges: [{ + resource: {db: adminDbName, collection: ""}, + actions: ["checkMetadataConsistency"] + }], + expectAuthzFailure: true + }, + { + runOnDb: adminDbName, + privileges: [{ + resource: {db: adminDbName, collection: "coll"}, + actions: ["checkMetadataConsistency"] + }], + expectAuthzFailure: true + }, + { + runOnDb: adminDbName, + privileges: [{resource: {cluster: true}, actions: ["allCollectionStats"]}], + expectAuthzFailure: true + } + ] + }, + { + testname: "checkDatabaseMetadataConsistency", + command: {checkMetadataConsistency: 1}, + skipUnlessSharded: true, + setup: function(db) { + assert.commandWorked(db.getSiblingDB("test").createCollection("coll")); + }, + teardown: function(db) { + assert.commandWorked(db.getSiblingDB("test").dropDatabase()); + }, + testcases: [ + { + runOnDb: "test", + roles: {clusterManager: 1, clusterAdmin: 1, root: 1, __system: 1} + }, + { + runOnDb: "test", + privileges: [{resource: {cluster: true}, actions: ["checkMetadataConsistency"]}] + }, + { + runOnDb: "test", + privileges: [ + {resource: {db: "", collection: ""}, actions: ["checkMetadataConsistency"]} + ] + }, + { + runOnDb: "test", + privileges: [{ + resource: {db: "test", collection: ""}, + actions: ["checkMetadataConsistency"] + }] + }, + { + runOnDb: "test", + privileges: [{ + resource: {db: "test", collection: "coll"}, + actions: ["checkMetadataConsistency"] + }], + expectAuthzFailure: true + }, + { + runOnDb: "test", + privileges: [{resource: {cluster: true}, actions: ["allCollectionStats"]}], + expectAuthzFailure: true + } + ] + }, + { + testname: "checkCollectionMetadataConsistency", + command: {checkMetadataConsistency: "coll"}, + skipUnlessSharded: true, + setup: function(db) { + assert.commandWorked(db.getSiblingDB("test").createCollection("coll")); + }, + teardown: function(db) { + assert.commandWorked(db.getSiblingDB("test").dropDatabase()); + }, + testcases: [ + { + runOnDb: "test", + roles: {clusterManager: 1, clusterAdmin: 1, root: 1, __system: 1} + }, + { + runOnDb: "test", + privileges: [{resource: {cluster: true}, actions: ["checkMetadataConsistency"]}] + }, + { + runOnDb: "test", + privileges: [ + {resource: {db: "", collection: ""}, actions: ["checkMetadataConsistency"]} + ] + }, + { + runOnDb: "test", + privileges: [{ + resource: {db: "test", collection: ""}, + actions: ["checkMetadataConsistency"] + }] + }, + { + runOnDb: "test", + privileges: [{ + resource: {db: "test", collection: "coll"}, + actions: ["checkMetadataConsistency"] + }] + }, + { + runOnDb: "test", + privileges: [{resource: {cluster: true}, actions: ["allCollectionStats"]}], + expectAuthzFailure: true + } + ] + }, { testname: "clusterCount", command: {clusterCount: "x"}, @@ -4807,6 +4980,25 @@ export const authCommandsLib = { } ] }, + { + testname: "clusterBulkWrite", + command: { + clusterBulkWrite: 1, + ops: [ + {insert: 0, document: {skey: "MongoDB"}}, + {insert: 1, document: {skey: "MongoDB"}}], + nsInfo: [{ns: firstDbName + ".coll"}, {ns: secondDbName + ".coll1"}], + }, + skipSharded: true, + testcases: [ + { + runOnDb: adminDbName, + roles: {__system: 1}, + privileges: [{resource: {cluster: true}, actions: ["internal"]}], + expectFail: true, + }, + ] + }, { testname: "clusterDelete", command: {clusterDelete: "foo", deletes: [{q: {}, limit: 1}]}, @@ -5478,14 +5670,14 @@ export const authCommandsLib = { testcases: [ { runOnDb: firstDbName, - roles: Object.extend({restore: 1}, roles_dbAdmin), + roles: Object.extend({restore: 1}, roles_writeDbAdmin), privileges: [{resource: {db: firstDbName, collection: "foo"}, actions: ["updateSearchIndex"]}], expectFail: true, }, { runOnDb: secondDbName, - roles: Object.extend({restore: 1}, roles_dbAdminAny), + roles: Object.extend({restore: 1}, roles_writeDbAdminAny), privileges: [{resource: {db: secondDbName, collection: "foo"}, actions: ["updateSearchIndex"]}], expectFail: true, @@ -5522,9 +5714,39 @@ export const authCommandsLib = { {runOnDb: secondDbName, roles: {}} ] }, + { + testname: "s_moveRange", + command: {moveRange: "test.x", min: {x:1}, toShard:"a"}, + skipUnlessSharded: true, + testcases: [ + { + runOnDb: adminDbName, + roles: roles_clusterManager, + privileges: [{resource: {db: "test", collection: "x"}, actions: ["moveChunk"]}], + expectFail: true + }, + {runOnDb: firstDbName, roles: {}}, + {runOnDb: secondDbName, roles: {}} + ] + }, + { + testname: "d_moveRange", + command: {_shardsvrMoveRange: "test.x", fromShard: "a", toShard: "b", min: {}, max: {}, maxChunkSizeBytes: 1024}, + skipSharded: true, + testcases: [ + { + runOnDb: adminDbName, + roles: {__system: 1}, + privileges: [{resource: {cluster: true}, actions: ["internal"]}], + expectFail: true + }, + {runOnDb: firstDbName, roles: {}}, + {runOnDb: secondDbName, roles: {}} + ] + }, { testname: "movePrimary", - command: {movePrimary: "x"}, + command: {movePrimary: "x", to: "a"}, skipUnlessSharded: true, testcases: [ { @@ -5558,8 +5780,7 @@ export const authCommandsLib = { // Only enterprise knows of this command. skipTest: (conn) => { - return !getBuildInfo().modules.includes("enterprise") - || !TestData.setParameters.featureFlagOIDC; + return !getBuildInfo().modules.includes("enterprise"); }, testcases: [ { @@ -5576,8 +5797,7 @@ export const authCommandsLib = { // Only enterprise knows of this command. skipTest: (conn) => { - return !getBuildInfo().modules.includes("enterprise") - || !TestData.setParameters.featureFlagOIDC; + return !getBuildInfo().modules.includes("enterprise"); }, testcases: [ { @@ -5958,9 +6178,9 @@ export const authCommandsLib = { skipUnlessSharded: true, testcases: [ { - runOnDb: adminDbName, - roles: roles_clusterManager, - expectFail: true, + runOnDb: adminDbName, + roles: roles_clusterManager, + expectFail: true, privileges: [{resource: {cluster: true}, actions: ["transitionToDedicatedConfigServer"]}] }, {runOnDb: firstDbName, roles: {}}, @@ -6422,12 +6642,12 @@ export const authCommandsLib = { ] }, { - // Test that only clusterManager has permission to run $telemetry + // Test that only clusterManager has permission to run $queryStats testname: "testTelemetryReadPrivilege", - command: {aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}, + command: {aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}, skipSharded: false, skipTest: (conn) => { - return !TestData.setParameters.featureFlagTelemetry; + return !TestData.setParameters.featureFlagQueryStats; }, testcases: [{runOnDb: adminDbName, roles: roles_clusterManager}] }, diff --git a/jstests/auth/list_all_local_sessions.js b/jstests/auth/list_all_local_sessions.js index 3b90d01b545e9..20fd0f7e021fa 100644 --- a/jstests/auth/list_all_local_sessions.js +++ b/jstests/auth/list_all_local_sessions.js @@ -1,5 +1,5 @@ // Auth tests for the $listLocalSessions {allUsers:true} aggregation stage. -// @tags: [requires_fcv_70, requires_sharding] +// @tags: [requires_sharding] (function() { 'use strict'; diff --git a/jstests/auth/listcommands_preauth_mongod.js b/jstests/auth/listcommands_preauth_mongod.js index f049e75de7abf..bf7ca437c6fee 100644 --- a/jstests/auth/listcommands_preauth_mongod.js +++ b/jstests/auth/listcommands_preauth_mongod.js @@ -9,4 +9,4 @@ load("jstests/auth/listcommands_preauth_base.js"); const mongod = MongoRunner.runMongod({auth: ""}); runTest(mongod); MongoRunner.stopMongod(mongod); -})(); \ No newline at end of file +})(); diff --git a/jstests/auth/listcommands_preauth_sharded_cluster.js b/jstests/auth/listcommands_preauth_sharded_cluster.js index 76115d26f5e1b..325f27cf47964 100644 --- a/jstests/auth/listcommands_preauth_sharded_cluster.js +++ b/jstests/auth/listcommands_preauth_sharded_cluster.js @@ -11,4 +11,4 @@ const st = new ShardingTest({shards: 1, mongos: 1, config: 1, other: {keyFile: 'jstests/libs/key1'}}); runTest(st.s0); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/auth/logout_reconnect.js b/jstests/auth/logout_reconnect.js index fa5d8a8fcdb2c..614a89e7857a0 100644 --- a/jstests/auth/logout_reconnect.js +++ b/jstests/auth/logout_reconnect.js @@ -30,7 +30,7 @@ conn = MongoRunner.runMongod({restart: conn, noCleanData: true}); // expect to fail on first attempt since the socket is no longer valid try { - val = testDB.foo.findOne(); + testDB.foo.findOne(); } catch (err) { } diff --git a/jstests/auth/mr_auth.js b/jstests/auth/mr_auth.js index 9c398af1ba1b8..35c8ad1215f22 100644 --- a/jstests/auth/mr_auth.js +++ b/jstests/auth/mr_auth.js @@ -4,26 +4,26 @@ // This test requires users to persist across a restart. // @tags: [requires_persistence] -baseName = "jstests_mr_auth"; -dbName = "test"; -out = baseName + "_out"; +let baseName = "jstests_mr_auth"; +let dbName = "test"; +let out = baseName + "_out"; -map = function() { +let map = function() { emit(this.x, this.y); }; -red = function(k, vs) { +let red = function(k, vs) { var s = 0; for (var i = 0; i < vs.length; i++) s += vs[i]; return s; }; -red2 = function(k, vs) { +let red2 = function(k, vs) { return 42; }; // make sure writing is allowed when started without --auth enabled -dbms = MongoRunner.runMongod({bind_ip: "127.0.0.1"}); +let dbms = MongoRunner.runMongod({bind_ip: "127.0.0.1"}); var d = dbms.getDB(dbName); var t = d[baseName]; diff --git a/jstests/auth/readIndex.js b/jstests/auth/readIndex.js index 53c5d63ecba00..517415d865d77 100644 --- a/jstests/auth/readIndex.js +++ b/jstests/auth/readIndex.js @@ -19,4 +19,4 @@ var indexList = testDB.foo.getIndexes().filter(function(idx) { }); assert.eq(1, indexList.length, tojson(indexList)); assert.docEq(indexList[0].key, {a: 1}, tojson(indexList)); -MongoRunner.stopMongod(conn, null, {user: 'root', pwd: 'password'}); \ No newline at end of file +MongoRunner.stopMongod(conn, null, {user: 'root', pwd: 'password'}); diff --git a/jstests/auth/rename_encrypted_collection.js b/jstests/auth/rename_encrypted_collection.js index b9240bf650c95..311ee357eae35 100644 --- a/jstests/auth/rename_encrypted_collection.js +++ b/jstests/auth/rename_encrypted_collection.js @@ -6,11 +6,6 @@ * requires_fcv_61, * ] */ -load("jstests/fle2/libs/encrypted_client_util.js"); - -(function() { -'use strict'; - function runTestWithAuth(conn, allowsRename, verifyFunction) { const db = conn.getDB("test"); const srcDbName = 'rename_encrypted_collection_src_db'; @@ -125,5 +120,4 @@ jsTestLog("Sharding: Testing fle2 collection rename"); runTest(st.s); st.stop(); -} -}()); +} \ No newline at end of file diff --git a/jstests/auth/rename_system_buckets_collection.js b/jstests/auth/rename_system_buckets_collection.js new file mode 100644 index 0000000000000..2a6fa9cfe3e72 --- /dev/null +++ b/jstests/auth/rename_system_buckets_collection.js @@ -0,0 +1,128 @@ +// Tests renaming the system.buckets collection. +(function() { +"use strict"; + +// Set up the test database. +const dbName = "test"; +const collName = "mongosync.tmp.UUID123"; +const bucketsCollName = `system.buckets.${collName}`; +const targetBucketsCollName = "system.buckets.manual"; + +function renameBucketsCollection(adminDB, username, shouldSucceed) { + // Create collection under admin user + assert.eq(1, adminDB.auth("admin", "admin")); + + const testDB = adminDB.getSiblingDB(dbName); + + testDB[bucketsCollName].drop(); + testDB[targetBucketsCollName].drop(); + + assert.commandWorked( + testDB.createCollection(bucketsCollName, {timeseries: {timeField: "time"}})); + adminDB.logout(); + + // Try rename with test users + jsTestLog("Testing system.buckets renaming with username: " + username); + assert(adminDB.auth(username, 'password')); + + // No privilege grants the ability to rename a system.buckets collection to a non-bucket + // namespace. + assert.commandFailed(testDB.adminCommand({ + renameCollection: `${testDB}.${bucketsCollName}`, + to: `${testDB}.${collName}`, + dropTarget: false + })); + + const res = testDB.adminCommand({ + renameCollection: `${testDB}.${bucketsCollName}`, + to: `${testDB}.${targetBucketsCollName}`, + dropTarget: true + }); + + assert.eq((shouldSucceed) ? 1 : 0, + res.ok, + "Rename collection failed or succeeded unexpectedly:" + tojson(res)); + + adminDB.logout(); +} + +function runTest(conn) { + const adminDB = conn.getDB("admin"); + + // Create the admin user. + adminDB.createUser({user: 'admin', pwd: 'admin', roles: ['root']}); + assert.eq(1, adminDB.auth("admin", "admin")); + + // Create roles with ability to rename system.buckets collections. + adminDB.createRole({ + role: "renameBucketsOnly", + privileges: [{ + resource: {db: '', system_buckets: ''}, + actions: [ + "createIndex", + "dropCollection", + "find", + "insert", + ] + }], + roles: [] + }); + + // Create test users. + adminDB.createUser( + {user: 'userAdmin', pwd: 'password', roles: ['userAdminAnyDatabase', 'renameBucketsOnly']}); + + // Create read and write users. + adminDB.createUser({ + user: 'readWriteAdmin', + pwd: 'password', + roles: ['readWriteAnyDatabase', 'renameBucketsOnly'] + }); + + // Create strong users. + adminDB.createUser({user: 'restore', pwd: 'password', roles: ['restore', 'renameBucketsOnly']}); + adminDB.createUser({user: 'root', pwd: 'password', roles: ['root', 'renameBucketsOnly']}); + adminDB.createUser( + {user: 'rootier', pwd: 'password', roles: ['__system', 'renameBucketsOnly']}); + adminDB.createUser( + {user: 'reader', pwd: 'password', roles: ['readAnyDatabase', 'renameBucketsOnly']}); + + adminDB.logout(); + + // Expect renaming system.buckets collection to succeed. + renameBucketsCollection(adminDB, 'restore', true); + renameBucketsCollection(adminDB, 'root', true); + renameBucketsCollection(adminDB, 'rootier', true); + + // Second test case should fail for user with inadequate role. + renameBucketsCollection(adminDB, 'reader', false); + renameBucketsCollection(adminDB, 'readWriteAdmin', false); + renameBucketsCollection(adminDB, 'userAdmin', false); +} + +jsTestLog("ReplicaSet: Testing rename timeseries collection"); +{ + const rst = new ReplSetTest({nodes: 1, auth: "", keyFile: 'jstests/libs/key1'}); + rst.startSet(); + + rst.initiate(); + rst.awaitReplication(); + runTest(rst.getPrimary()); + rst.stopSet(); +} + +jsTestLog("Sharding: Testing rename timeseries collection"); +{ + const st = new ShardingTest({ + shards: 1, + mongos: 1, + config: 1, + keyFile: "jstests/libs/key1", + other: {shardOptions: {auth: ""}} + }); + + runTest(st.s); + + st.stop(); +} +})(); diff --git a/jstests/auth/repl.js b/jstests/auth/repl.js index 6f5b7ed0dcb8f..b4c3ae7aa6f81 100644 --- a/jstests/auth/repl.js +++ b/jstests/auth/repl.js @@ -172,7 +172,7 @@ var AuthReplTest = function(spec) { }; jsTest.log("1 test replica sets"); -var rs = new ReplSetTest({name: rsName, nodes: 2}); +const rs = new ReplSetTest({name: rsName, nodes: 2}); var nodes = rs.startSet(mongoOptions); rs.initiate(); authutil.asCluster(nodes, "jstests/libs/key1", function() { diff --git a/jstests/auth/repl_require_keyfile.js b/jstests/auth/repl_require_keyfile.js index fc5977a2d1da3..6753c8282d34b 100644 --- a/jstests/auth/repl_require_keyfile.js +++ b/jstests/auth/repl_require_keyfile.js @@ -14,4 +14,4 @@ const mongoOutput = rawMongoProgramOutput(); assert(mongoOutput.indexOf( "security.keyFile is required when authorization is enabled with replica sets") >= 0, "Expected error message about missing keyFile on startup"); -})(); \ No newline at end of file +})(); diff --git a/jstests/auth/restore_role_create_collection_via_apply_ops.js b/jstests/auth/restore_role_create_collection_via_apply_ops.js index be30be7db4767..bcfc57b03b56a 100644 --- a/jstests/auth/restore_role_create_collection_via_apply_ops.js +++ b/jstests/auth/restore_role_create_collection_via_apply_ops.js @@ -58,4 +58,4 @@ function runTest(conn) { const standalone = MongoRunner.runMongod({auth: ''}); runTest(standalone); MongoRunner.stopMongod(standalone); -})(); \ No newline at end of file +})(); diff --git a/jstests/auth/sbe_plan_cache_user_roles.js b/jstests/auth/sbe_plan_cache_user_roles.js index 8c4313a4ee57a..20e24cf220836 100644 --- a/jstests/auth/sbe_plan_cache_user_roles.js +++ b/jstests/auth/sbe_plan_cache_user_roles.js @@ -2,14 +2,11 @@ * Test $$USER_ROLES works correctly with the SBE plan cache. The same query should return the * updated user role info when a different user logs in. * @tags: [ - * featureFlagUserRoles, * # Multiple servers can mess up the plan cache list. * assumes_standalone_mongod, * ] */ -(function() { -"use strict"; -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const mongod = MongoRunner.runMongod(); const dbName = "test"; @@ -54,5 +51,4 @@ assert.eq(results[0].roles, [{_id: "test.readWrite", role: "readWrite", db: "tes verifyPlanCache("test.readWrite"); db.logout(); -MongoRunner.stopMongod(mongod); -})(); +MongoRunner.stopMongod(mongod); \ No newline at end of file diff --git a/jstests/auth/secondary_invalidation.js b/jstests/auth/secondary_invalidation.js index 16f2938ebbf78..d44dec85ca103 100644 --- a/jstests/auth/secondary_invalidation.js +++ b/jstests/auth/secondary_invalidation.js @@ -20,7 +20,7 @@ admin.auth('admin', 'password'); primary.getDB('foo').createUser({user: 'foo', pwd: 'foopwd', roles: []}, {w: NUM_NODES}); -secondaryFoo = secondary.getDB('foo'); +let secondaryFoo = secondary.getDB('foo'); secondaryFoo.auth('foo', 'foopwd'); assert.throws(function() { secondaryFoo.col.findOne(); diff --git a/jstests/auth/speculative-auth-replset.js b/jstests/auth/speculative-auth-replset.js index 9f36444a0204d..3bd960780d6e0 100644 --- a/jstests/auth/speculative-auth-replset.js +++ b/jstests/auth/speculative-auth-replset.js @@ -27,8 +27,9 @@ function countAuthInLog(conn) { } } else if (entry.id === kAuthenticationFailedLogId) { // Authentication can fail legitimately because the secondary abandons the connection - // during shutdown. - assert.eq(entry.attr.error.code, ErrorCodes.AuthenticationAbandoned); + // during shutdown - if we do encounter an authentication failure in the log, make sure + // that it is only of this type, fail anything else + assert.eq(entry.attr.result, ErrorCodes.AuthenticationAbandoned); } else { // Irrelevant. return; diff --git a/jstests/auth/system_buckets_invalid_nss.js b/jstests/auth/system_buckets_invalid_nss.js new file mode 100644 index 0000000000000..dd182c4858462 --- /dev/null +++ b/jstests/auth/system_buckets_invalid_nss.js @@ -0,0 +1,21 @@ +// Validate that *.system.buckets.system.buckets.* is an invalid namespace + +(function() { +"use strict"; + +function runTest(conn) { + const admin = conn.getDB('admin'); + assert.commandWorked(admin.runCommand({createUser: 'admin', pwd: 'admin', roles: ['root']})); + + assert.commandFailedWithCode(admin.system.buckets.system.buckets.foo.insert({x: 1}), + [ErrorCodes.Unauthorized]); + + assert(admin.auth('admin', 'admin')); + assert.commandFailedWithCode(admin.system.buckets.system.buckets.foo.insert({x: 1}), + [ErrorCodes.InvalidNamespace]); +} + +const mongod = MongoRunner.runMongod({auth: ''}); +runTest(mongod); +MongoRunner.stopMongod(mongod); +}()); diff --git a/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js b/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js index 4c0ff0c8518be..fe00d5fbc219e 100644 --- a/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js +++ b/jstests/auth/upgrade_noauth_to_keyfile_with_sharding.js @@ -11,6 +11,7 @@ load('jstests/ssl/libs/ssl_helpers.js'); TestData.skipCheckingIndexesConsistentAcrossCluster = true; TestData.skipCheckOrphans = true; TestData.skipCheckShardFilteringMetadata = true; +TestData.skipCheckRoutingTableConsistency = true; // Disable auth explicitly var noAuthOptions = {noauth: ''}; diff --git a/jstests/auth/user_cache_acquisition_stats.js b/jstests/auth/user_cache_acquisition_stats.js index a2f9996d127b0..2a2e36d73b7ce 100644 --- a/jstests/auth/user_cache_acquisition_stats.js +++ b/jstests/auth/user_cache_acquisition_stats.js @@ -150,4 +150,4 @@ function runTest(conn, mode) { jsTest.log('SUCCESS user_cache_acquisition_stats.js Sharding'); st.stop(); } -})(); \ No newline at end of file +})(); diff --git a/jstests/auth/user_defined_roles_on_secondaries.js b/jstests/auth/user_defined_roles_on_secondaries.js index d27fbb7d522a4..5e4733e9d6736 100644 --- a/jstests/auth/user_defined_roles_on_secondaries.js +++ b/jstests/auth/user_defined_roles_on_secondaries.js @@ -48,7 +48,7 @@ function assertListContainsRole(list, role, msg) { if (list[i].role == role.role && list[i].db == role.db) return; } - doassert("Could not find value " + tojson(val) + " in " + tojson(list) + + doassert("Could not find value " + tojson(role) + " in " + tojson(list) + (msg ? ": " + msg : "")); } diff --git a/jstests/auth/user_roles_disable_parameter.js b/jstests/auth/user_roles_disable_parameter.js index 1f93968832eeb..c5fe17a304b10 100644 --- a/jstests/auth/user_roles_disable_parameter.js +++ b/jstests/auth/user_roles_disable_parameter.js @@ -1,5 +1,5 @@ // Tests that $$USER_ROLES is not available when the server parameter is set to false. -// @tags: [featureFlagUserRoles, requires_fcv_70] +// @tags: [requires_fcv_70] (function() { "use strict"; diff --git a/jstests/auth/user_roles_empty.js b/jstests/auth/user_roles_empty.js index f450ad42d1b8d..938e22404d75a 100644 --- a/jstests/auth/user_roles_empty.js +++ b/jstests/auth/user_roles_empty.js @@ -1,6 +1,6 @@ // Tests that $$USER_ROLES works as expected in a find command when the array returned by // $$USER_ROLES is empty and when mongod was started with auth disabled. -// @tags: [featureFlagUserRoles, requires_fcv_70] +// @tags: [requires_fcv_70] (function() { "use strict"; diff --git a/jstests/auth/user_roles_find_agg.js b/jstests/auth/user_roles_find_agg.js index 0e49f67134db6..860ac3f8cc50c 100644 --- a/jstests/auth/user_roles_find_agg.js +++ b/jstests/auth/user_roles_find_agg.js @@ -1,6 +1,6 @@ // Tests that $$USER_ROLES works as expected in a find command and an aggregate command (on both a // standalone mongod and a sharded cluster). -// @tags: [featureFlagUserRoles, requires_fcv_70] +// @tags: [requires_fcv_70] (function() { "use strict"; diff --git a/jstests/auth/user_roles_update.js b/jstests/auth/user_roles_update.js deleted file mode 100644 index b01a3a60d5542..0000000000000 --- a/jstests/auth/user_roles_update.js +++ /dev/null @@ -1,28 +0,0 @@ -// Tests that $$USER_ROLES is not able to be accessed within an update command. -// @tags: [featureFlagUserRoles, requires_fcv_70] - -(function() { -"use strict"; - -const dbName = "test"; -const collName = "coll"; -const varNotAvailableErr = 51144; - -const mongod = MongoRunner.runMongod({auth: ""}); - -// Create a user on the admin database. -let admin = mongod.getDB("admin"); -assert.commandWorked(admin.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]})); -admin.auth("admin", "admin"); - -const db = mongod.getDB(dbName); -let coll = db.getCollection(collName); -assert.commandWorked(coll.insert({a: 1})); - -// Check that $$USER_ROLES is not available within an update command. -assert.commandFailedWithCode(coll.update({$expr: {$in: ["root", '$$USER_ROLES.role']}}, {a: 2}), - varNotAvailableErr); - -db.logout(); -MongoRunner.stopMongod(mongod); -}()); diff --git a/jstests/auth/user_roles_update_findAndModify.js b/jstests/auth/user_roles_update_findAndModify.js new file mode 100644 index 0000000000000..3faf74a08a1a8 --- /dev/null +++ b/jstests/auth/user_roles_update_findAndModify.js @@ -0,0 +1,174 @@ +// Tests that $$USER_ROLES is able to be used in an "update" and "findAndModify" commands. +// @tags: [requires_fcv_70] + +(function() { +"use strict"; + +const dbName = "test"; +const collName = "coll"; + +function initialize(db) { + let engDoc = { + _id: 0, + allowedRoles: ["eng-app-prod", "eng-app-stg", "read"], + allowedRole: "read", + comment: "only for engineering team", + teamMembers: ["John", "Ashley", "Gina"], + yearlyEduBudget: 15000, + yearlyTnEBudget: 2000, + salesWins: 1000 + }; + + let salesDoc = { + _id: 1, + allowedRoles: ["sales-person"], + allowedRole: "observe", + comment: "only for sales team", + salesWins: 1000 + }; + + let testUpdate = {_id: 2, allowedRole: "test", teamMembersRights: ["testUpdate"]}; + + let testFindAndModify = { + _id: 3, + allowedRole: "write", + teamMembersRights: ["testFindAndModify"] + }; + + let coll = db.getCollection(collName); + assert.commandWorked(coll.insertMany([engDoc, salesDoc, testUpdate, testFindAndModify])); +} + +// Test accessing $$USER_ROLES in the query portion of "update" command. +function runUpdateQuery(db) { + let coll = db.getCollection(collName); + + let pre = coll.findOne( + {$expr: {$eq: [{$setIntersection: ["$allowedRoles", "$$USER_ROLES.role"]}, []]}}); + var preSalesWins = pre.salesWins; + + assert.commandWorked(coll.update( + {$expr: {$eq: [{$setIntersection: ["$allowedRoles", "$$USER_ROLES.role"]}, []]}}, + {$inc: {salesWins: 1000}}, + {multi: true})); + + let post = coll.findOne( + {$expr: {$eq: [{$setIntersection: ["$allowedRoles", "$$USER_ROLES.role"]}, []]}}); + var postSalesWins = post.salesWins; + + assert.eq(postSalesWins, preSalesWins + 1000); +} + +// Test accessing $$USER_ROLES in the update portion of "update" command. +function runUpdateUpdate(db) { + let coll = db.getCollection(collName); + + assert.commandWorked( + coll.update({_id: 2}, [{$set: {"teamMembersRights": "$$USER_ROLES.role"}}])); + + let post = coll.findOne({_id: 2}); + + let expectedResult = { + _id: 2, + allowedRole: "test", + teamMembersRights: ["readWriteAnyDatabase", "read"] + }; + + assert.eq(post, expectedResult); +} + +// Test accessing $$USER_ROLES in the query portion of "findAndModify" command. +function runFindAndModifyQuery(db) { + let coll = db.getCollection(collName); + + let pre = coll.findOne({$expr: {allowedRole: "$$USER_ROLES.role"}}); + var preSalesWins = pre.salesWins; + + db.coll.findAndModify({ + query: {allowedRole: "read", $expr: {allowedRole: "$$USER_ROLES.role"}}, + update: {$inc: {salesWins: 1000}} + }); + + let post = coll.findOne({$expr: {allowedRole: "$$USER_ROLES.role"}}); + var postSalesWins = post.salesWins; + + assert.eq(postSalesWins, preSalesWins + 1000); +} + +// Test accessing $$USER_ROLES in the update portion of "findAndModify" command. +function runFindAndModifyUpdate(db) { + let coll = db.getCollection(collName); + + coll.findAndModify({ + query: {allowedRole: "write"}, + update: [{$set: {"teamMembersRights": "$$USER_ROLES.role"}}] + }); + + let post = coll.findOne({_id: 3}); + + let expectedResult = { + _id: 3, + allowedRole: "write", + teamMembersRights: ["readWriteAnyDatabase", "read"] + }; + + assert.eq(post, expectedResult); +} + +function runTest(conn, st = null) { + // Create a user on the admin database. + let admin = conn.getDB("admin"); + assert.commandWorked(admin.runCommand({createUser: "admin", pwd: "admin", roles: ["root"]})); + admin.auth("admin", "admin"); + + if (st) { + // Shard the collection that will be used in the update and findAndModify commands. + assert.commandWorked(conn.getDB("admin").runCommand({enableSharding: dbName})); + st.shardColl(conn.getDB(dbName).getCollection(collName), {allowedRole: 1}); + } + + const db = conn.getDB(dbName); + let coll = db.getCollection(collName); + + // Create a user that has roles on more than one database. The readWriteAnyDatabase is + // necessary for the inserts that follow to work. + assert.commandWorked(db.runCommand({ + createUser: "user", + pwd: "pwd", + roles: [{role: "readWriteAnyDatabase", db: "admin"}, {role: "read", db: dbName}] + })); + + // Logout of the admin user so that we can log into the other user so we can access those + // roles with $$USER_ROLES below. + admin.logout(); + db.auth("user", "pwd"); + + initialize(db); + + runUpdateQuery(db); + + runUpdateUpdate(db); + + runFindAndModifyQuery(db); + + runFindAndModifyUpdate(db); + + db.logout(); +} + +jsTest.log("Test standalone"); +const mongod = MongoRunner.runMongod({auth: ""}); +runTest(mongod); +MongoRunner.stopMongod(mongod); + +jsTest.log("Test sharded cluster"); +const st = new ShardingTest({ + mongos: 1, + config: 1, + shards: 2, + keyFile: 'jstests/libs/key1', +}); + +runTest(st.s, st); +st.stop(); +}()); diff --git a/jstests/auth/user_roles_view.js b/jstests/auth/user_roles_view.js index a21d3dae8720c..87dc0650fe3f2 100644 --- a/jstests/auth/user_roles_view.js +++ b/jstests/auth/user_roles_view.js @@ -1,6 +1,6 @@ // Tests that $$USER_ROLES works as expected in view creation and queries on the view (on both a // standalone mongod and a sharded cluster). -// @tags: [featureFlagUserRoles, requires_fcv_70] +// @tags: [requires_fcv_70] (function() { "use strict"; diff --git a/jstests/change_streams/change_stream.js b/jstests/change_streams/change_stream.js index a7a100ea12430..38b39bb5529b9 100644 --- a/jstests/change_streams/change_stream.js +++ b/jstests/change_streams/change_stream.js @@ -10,8 +10,6 @@ load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and // assert[Valid|Invalid]ChangeStreamNss. -const isMongos = FixtureHelpers.isMongos(db); - // Drop and recreate the collections to be used in this set of tests. assertDropAndRecreateCollection(db, "t1"); assertDropAndRecreateCollection(db, "t2"); @@ -33,7 +31,7 @@ checkArgFails([1, 2, "invalid", {x: 1}]); assertInvalidChangeStreamNss("admin", "testColl"); assertInvalidChangeStreamNss("config", "testColl"); // Not allowed to access 'local' database through mongos. -if (!isMongos) { +if (!FixtureHelpers.isMongos(db)) { assertInvalidChangeStreamNss("local", "testColl"); } diff --git a/jstests/change_streams/change_stream_pre_image_lookup_whole_db_whole_cluster.js b/jstests/change_streams/change_stream_pre_image_lookup_whole_db_whole_cluster.js index f2ca07b1c54bc..fc96b63e5c5f8 100644 --- a/jstests/change_streams/change_stream_pre_image_lookup_whole_db_whole_cluster.js +++ b/jstests/change_streams/change_stream_pre_image_lookup_whole_db_whole_cluster.js @@ -13,7 +13,7 @@ load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. -const testDB = db.getSiblingDB(jsTestName()); +const testDB = db.getSiblingDB("preImage_lookup_whole_db_whole_cluster"); const adminDB = db.getSiblingDB("admin"); const collWithPreImageName = "coll_with_pre_images"; const collWithNoPreImageName = "coll_with_no_pre_images"; diff --git a/jstests/change_streams/ddl_create_drop_index_events.js b/jstests/change_streams/ddl_create_drop_index_events.js index 7119dbc140d7b..de17ea51409d1 100644 --- a/jstests/change_streams/ddl_create_drop_index_events.js +++ b/jstests/change_streams/ddl_create_drop_index_events.js @@ -7,14 +7,11 @@ * assumes_against_mongod_not_mongos, * ] */ -(function() { -"use strict"; - load('jstests/libs/collection_drop_recreate.js'); // For 'assertDropAndRecreateCollection' and // 'assertDropCollection'. load('jstests/libs/change_stream_util.js'); // For 'ChangeStreamTest' and // 'assertChangeStreamEventEq'. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {safeToCreateColumnStoreIndex} from "jstests/libs/columnstore_util.js"; const testDB = db.getSiblingDB(jsTestName()); @@ -180,4 +177,3 @@ runTest((() => cst.startWatchingChanges({pipeline, collection: 1})), true); // Run the test using a single collection change stream on a non-empty collection. runTest((() => cst.startWatchingChanges({pipeline, collection: collName})), true); -}()); diff --git a/jstests/change_streams/ddl_create_event.js b/jstests/change_streams/ddl_create_event.js index 72276e2ce4c62..de1c3ca24ee0f 100644 --- a/jstests/change_streams/ddl_create_event.js +++ b/jstests/change_streams/ddl_create_event.js @@ -4,14 +4,10 @@ * * @tags: [ requires_fcv_60, ] */ -(function() { -"use strict"; - load('jstests/libs/collection_drop_recreate.js'); // For 'assertDropAndRecreateCollection' and // 'assertDropCollection'. load('jstests/libs/change_stream_util.js'); // For 'ChangeStreamTest' and // 'assertChangeStreamEventEq'. -load("jstests/libs/feature_flag_util.js"); const testDB = db.getSiblingDB(jsTestName()); @@ -69,14 +65,7 @@ function runTest(startChangeStream) { assertDropCollection(testDB, collName); // With capped collection parameters. - let expectedSize; - - // TODO SERVER-74653: Remove feature flag check. - if (FeatureFlagUtil.isPresentAndEnabled(testDB, "CappedCollectionsRelaxedSize")) { - expectedSize = 1000; - } else { - expectedSize = 1024; - } + let expectedSize = 1000; validateExpectedEventAndDropCollection({create: collName, capped: true, size: 1000, max: 1000}, { operationType: "create", @@ -198,4 +187,3 @@ function runTest(startChangeStream) { const pipeline = [{$changeStream: {showExpandedEvents: true}}]; runTest(() => test.startWatchingChanges({pipeline, collection: 1})); runTest(() => test.startWatchingChanges({pipeline, collection: collName})); -}()); diff --git a/jstests/change_streams/error_label.js b/jstests/change_streams/error_label.js index 31b38f2f7cbd9..f117e9d2184a8 100644 --- a/jstests/change_streams/error_label.js +++ b/jstests/change_streams/error_label.js @@ -30,4 +30,4 @@ const err = assert.throws(function() { assert.commandFailedWithCode(err, ErrorCodes.ChangeStreamFatalError); assert("errorLabels" in err, err); assert.contains("NonResumableChangeStreamError", err.errorLabels, err); -}()); \ No newline at end of file +}()); diff --git a/jstests/change_streams/expanded_update_description.js b/jstests/change_streams/expanded_update_description.js index 51ee93381b92d..9cd45eb2e45c1 100644 --- a/jstests/change_streams/expanded_update_description.js +++ b/jstests/change_streams/expanded_update_description.js @@ -1,10 +1,5 @@ /** * Test change stream 'updateDescription' with 'showExpandedEvents'. - * - * @tags: [ - * requires_fcv_61, - * featureFlagChangeStreamsFurtherEnrichedEvents, - * ] */ (function() { diff --git a/jstests/change_streams/generate_v1_resume_token.js b/jstests/change_streams/generate_v1_resume_token.js deleted file mode 100644 index 05ebc2f6d9295..0000000000000 --- a/jstests/change_streams/generate_v1_resume_token.js +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Test that the $_generateV2ResumeTokens parameter can be used to force change streams to return v1 - * tokens. - * @tags: [ - * requires_fcv_61 - * ] - */ -(function() { -"use strict"; - -load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection. - -const coll = assertDropAndRecreateCollection(db, jsTestName()); - -// Create one stream that returns v2 tokens, the default. -const v2Stream = coll.watch([]); - -// Create a second stream that explicitly requests v1 tokens. -const v1Stream = coll.watch([], {$_generateV2ResumeTokens: false}); - -// Insert a test document into the collection. -assert.commandWorked(coll.insert({_id: 1})); - -// Wait until both streams have encountered the insert operation. -assert.soon(() => v1Stream.hasNext() && v2Stream.hasNext()); -const v1Event = v1Stream.next(); -const v2Event = v2Stream.next(); - -// Confirm that the streams see the same event, but the resume tokens differ. -const v1ResumeToken = v1Event._id; -const v2ResumeToken = v2Event._id; - -delete v1Event._id; -delete v2Event._id; - -assert.docEq(v1Event, v2Event); -assert.neq(v1ResumeToken, v2ResumeToken, {v1ResumeToken, v2ResumeToken}); -})(); \ No newline at end of file diff --git a/jstests/change_streams/lookup_pit_pre_and_post_image_in_transaction.js b/jstests/change_streams/lookup_pit_pre_and_post_image_in_transaction.js index 7c5b274cd4135..0f3be92b5c873 100644 --- a/jstests/change_streams/lookup_pit_pre_and_post_image_in_transaction.js +++ b/jstests/change_streams/lookup_pit_pre_and_post_image_in_transaction.js @@ -145,4 +145,4 @@ if (!FixtureHelpers.isMongos(testDB)) { {_id: 6, operationType: "delete", preImage: {_id: 6, a: 1}}, ]); } -})(); \ No newline at end of file +})(); diff --git a/jstests/change_streams/oplog_rewrite/change_stream_basic_match_pushdown_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_basic_match_pushdown_rewrite.js index ec45e81871ac0..9e6bd6285f762 100644 --- a/jstests/change_streams/oplog_rewrite/change_stream_basic_match_pushdown_rewrite.js +++ b/jstests/change_streams/oplog_rewrite/change_stream_basic_match_pushdown_rewrite.js @@ -10,10 +10,11 @@ // assumes_unsharded_collection, // assumes_read_preference_unchanged // ] -(function() { -"use strict"; - -load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers. +import { + assertNumChangeStreamDocsReturnedFromShard, + assertNumMatchingOplogEventsForShard, + createShardedCollection, +} from "jstests/libs/change_stream_rewrite_util.js"; const dbName = "change_stream_match_pushdown_and_rewrite"; const collName = "coll1"; @@ -74,15 +75,15 @@ const stats = coll.explain("executionStats").aggregate([ // Verify the number of documents seen from each shard by the mongoS pipeline. Because we expect // the $match to be pushed down to the shards, we expect to only see the 1 "insert" operation on // each shard. All other operations should be filtered out on the shards. -assertNumChangeStreamDocsReturnedFromShard(stats, st.rs0.name, 1); -assertNumChangeStreamDocsReturnedFromShard(stats, st.rs1.name, 1); +assertNumChangeStreamDocsReturnedFromShard(stats, st.shard0.shardName, 1); +assertNumChangeStreamDocsReturnedFromShard(stats, st.shard1.shardName, 1); // Because it is possible to rewrite the {operationType: "insert"} predicate so that it applies // to the oplog entry, we expect the $match to get pushed all the way to the initial oplog // query. This query executes in an internal "$cursor" stage, and we expect to see exactly 1 // document from this stage on each shard. -assertNumMatchingOplogEventsForShard(stats, st.rs0.name, 1); -assertNumMatchingOplogEventsForShard(stats, st.rs1.name, 1); +assertNumMatchingOplogEventsForShard(stats, st.shard0.shardName, 1); +assertNumMatchingOplogEventsForShard(stats, st.shard1.shardName, 1); // Generate another 7 oplog events, this time within a transaction. One of the events is in a // different collection, to validate that events from outside the watched namespace get filtered @@ -134,11 +135,11 @@ const txnStatsAfterEvent2 = coll.explain("executionStats").aggregate([ // Verify the number of documents seen from each shard by the mongoS pipeline. As before, we expect // that everything except the inserts will be filtered on the shard, limiting the number of events // the mongoS needs to retrieve. -assertNumChangeStreamDocsReturnedFromShard(txnStatsAfterEvent2, st.rs0.name, 1); +assertNumChangeStreamDocsReturnedFromShard(txnStatsAfterEvent2, st.shard0.shardName, 1); // Note that the event we are resuming from is sent to the mongoS from shard 2, even though it gets // filtered out, which is why we see 2 events here. -assertNumChangeStreamDocsReturnedFromShard(txnStatsAfterEvent2, st.rs1.name, 2); +assertNumChangeStreamDocsReturnedFromShard(txnStatsAfterEvent2, st.shard1.shardName, 2); // Generate a second transaction. session.startTransaction({readConcern: {level: "majority"}}); @@ -161,8 +162,8 @@ const txnStatsAfterEvent1 = coll.explain("executionStats").aggregate([ // The "lsid" and "txnNumber" filters should get pushed all the way to the initial oplog query // in the $cursor stage, meaning that every oplog entry gets filtered out except the // 'commitTransaction' on each shard for the one transaction we select with our filter. -assertNumMatchingOplogEventsForShard(txnStatsAfterEvent1, st.rs0.name, 1); -assertNumMatchingOplogEventsForShard(txnStatsAfterEvent1, st.rs1.name, 1); +assertNumMatchingOplogEventsForShard(txnStatsAfterEvent1, st.shard0.shardName, 1); +assertNumMatchingOplogEventsForShard(txnStatsAfterEvent1, st.shard1.shardName, 1); // Ensure that optimization does not attempt to create a filter that disregards the collation. const collationChangeStream = coll.aggregate( @@ -183,5 +184,4 @@ assert.eq(stringValues.slice(0, 2), ["Value", "vAlue"]); // transaction, they effectively occur at exactly the same time. assert.sameMembers(stringValues.slice(2, 4), ["vaLue", "valUe"]); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_documentKey_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_documentKey_rewrite.js index 646a6c581325c..3058b9e65018f 100644 --- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_documentKey_rewrite.js +++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_documentKey_rewrite.js @@ -11,11 +11,11 @@ // assumes_unsharded_collection, // assumes_read_preference_unchanged // ] -(function() { -"use strict"; - -load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers. -load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. +import { + createShardedCollection, + verifyChangeStreamOnWholeCluster +} from "jstests/libs/change_stream_rewrite_util.js"; +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. const dbName = "change_stream_match_pushdown_documentKey_rewrite"; const collName = "change_stream_match_pushdown_documentKey_rewrite"; @@ -201,4 +201,3 @@ verifyOnWholeCluster({$match: {operationType: "drop"}}, [1, 0] /* expectedChangeStreamDocsForEachShard */); st.stop(); -})(); diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocumentBeforeChange_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocumentBeforeChange_rewrite.js index 7b1884a11f08a..9272d9a61f5f0 100644 --- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocumentBeforeChange_rewrite.js +++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocumentBeforeChange_rewrite.js @@ -12,10 +12,10 @@ // assumes_unsharded_collection, // assumes_read_preference_unchanged // ] -(function() { -"use strict"; - -load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers. +import { + createShardedCollection, + verifyChangeStreamOnWholeCluster +} from "jstests/libs/change_stream_rewrite_util.js"; const dbName = "change_stream_match_pushdown_fullDocumentBeforeChange_rewrite"; const collName = "change_stream_match_pushdown_fullDocumentBeforeChange_rewrite"; @@ -289,5 +289,4 @@ verifyOnWholeCluster({$match: {operationType: "drop"}}, [1, 0] /* expectedOplogRetDocsForEachShard */, [1, 0] /* expectedChangeStreamDocsForEachShard */); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocument_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocument_rewrite.js index b149335a06956..e26903f68f250 100644 --- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocument_rewrite.js +++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_fullDocument_rewrite.js @@ -11,10 +11,10 @@ // assumes_unsharded_collection, // assumes_read_preference_unchanged // ] -(function() { -"use strict"; - -load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers. +import { + createShardedCollection, + verifyChangeStreamOnWholeCluster +} from "jstests/libs/change_stream_rewrite_util.js"; const dbName = "change_stream_match_pushdown_fullDocument_rewrite"; const collName = "change_stream_match_pushdown_fullDocument_rewrite"; @@ -279,5 +279,4 @@ runVerifyOpsTestcases("delete"); assert(coll.drop()); runVerifyOpsTestcases("drop"); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_namespace_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_namespace_rewrite.js index af9b0b9c3b6a3..1294c30d5dfff 100644 --- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_namespace_rewrite.js +++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_namespace_rewrite.js @@ -10,11 +10,11 @@ // assumes_unsharded_collection, // assumes_read_preference_unchanged // ] -(function() { -"use strict"; - -load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers. -load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. +import { + createShardedCollection, + verifyChangeStreamOnWholeCluster +} from "jstests/libs/change_stream_rewrite_util.js"; +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. const dbName = "change_stream_match_pushdown_and_rewrite"; const otherDbName = "other_db"; @@ -1094,4 +1094,3 @@ verifyOnWholeCluster(thirdResumeAfterToken, 1 /* expectedOplogRetDocsForEachShard */); st.stop(); -})(); diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_operation_type_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_operation_type_rewrite.js index 5287c5c033628..a117949ffefb4 100644 --- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_operation_type_rewrite.js +++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_operation_type_rewrite.js @@ -11,10 +11,10 @@ // assumes_unsharded_collection, // assumes_read_preference_unchanged // ] -(function() { -"use strict"; - -load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers. +import { + createShardedCollection, + verifyChangeStreamOnWholeCluster +} from "jstests/libs/change_stream_rewrite_util.js"; const dbName = "change_stream_match_pushdown_and_rewrite"; const collName = "coll1"; @@ -305,5 +305,4 @@ verifyOnWholeCluster({ }, [6, 5] /* expectedOplogRetDocsForEachShard */); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_to_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_to_rewrite.js index a421a4375dc9f..df4f20117a05a 100644 --- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_to_rewrite.js +++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_to_rewrite.js @@ -10,11 +10,11 @@ // assumes_unsharded_collection, // assumes_read_preference_unchanged // ] -(function() { -"use strict"; - -load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers. -load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. +import { + createShardedCollection, + verifyChangeStreamOnWholeCluster +} from "jstests/libs/change_stream_rewrite_util.js"; +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. const dbName = "change_stream_match_pushdown_and_rewrite"; @@ -672,4 +672,3 @@ verifyOnWholeCluster({ 4 /* expectedOplogRetDocsForEachShard*/); st.stop(); -})(); diff --git a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_updateDescription_rewrite.js b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_updateDescription_rewrite.js index e1affb18167ca..f5bd897cba333 100644 --- a/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_updateDescription_rewrite.js +++ b/jstests/change_streams/oplog_rewrite/change_stream_match_pushdown_updateDescription_rewrite.js @@ -11,10 +11,10 @@ // assumes_unsharded_collection, // assumes_read_preference_unchanged // ] -(function() { -"use strict"; - -load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers. +import { + createShardedCollection, + verifyChangeStreamOnWholeCluster +} from "jstests/libs/change_stream_rewrite_util.js"; const dbName = "change_stream_match_pushdown_updateDescription_rewrite"; const collName = "change_stream_match_pushdown_updateDescription_rewrite"; @@ -266,5 +266,4 @@ verifyOnWholeCluster( [1, 0] /* expectedOplogRetDocsForEachShard*/, [1, 0] /*expectedChangeStreamDocsForEachShard*/); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/change_streams/oplog_rewrite/change_stream_null_existence_eq_rewrite_test.js b/jstests/change_streams/oplog_rewrite/change_stream_null_existence_eq_rewrite_test.js index 8e0869ad243df..7e9fc9b604358 100644 --- a/jstests/change_streams/oplog_rewrite/change_stream_null_existence_eq_rewrite_test.js +++ b/jstests/change_streams/oplog_rewrite/change_stream_null_existence_eq_rewrite_test.js @@ -8,12 +8,13 @@ * uses_change_streams * ] */ -(function() { -"use strict"; +import { + generateChangeStreamWriteWorkload, + getAllChangeStreamEvents, + isPlainObject +} from "jstests/libs/change_stream_rewrite_util.js"; -load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers. - -const dbName = "change_stream_rewrite_null_existence_test"; +const dbName = "rewrite_null_existence_test"; const collName = "coll1"; const testDB = db.getSiblingDB(dbName); @@ -210,5 +211,4 @@ for (let csConfig of [{fullDocument: "updateLookup", showExpandedEvents: true}]) } // Assert that there were no failed test cases. -assert(failedTestCases.length == 0, failedTestCases); -})(); +assert(failedTestCases.length == 0, failedTestCases); \ No newline at end of file diff --git a/jstests/change_streams/oplog_rewrite/match_pushdown_namespace_rewrite_with_expanded_events.js b/jstests/change_streams/oplog_rewrite/match_pushdown_namespace_rewrite_with_expanded_events.js index b49ce4d721d6e..c74b8cf599d37 100644 --- a/jstests/change_streams/oplog_rewrite/match_pushdown_namespace_rewrite_with_expanded_events.js +++ b/jstests/change_streams/oplog_rewrite/match_pushdown_namespace_rewrite_with_expanded_events.js @@ -11,11 +11,11 @@ // assumes_unsharded_collection, // assumes_read_preference_unchanged // ] -(function() { -"use strict"; - -load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers. -load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. +import { + createShardedCollection, + verifyChangeStreamOnWholeCluster +} from "jstests/libs/change_stream_rewrite_util.js"; +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. const dbName = "change_stream_match_pushdown_and_rewrite"; const shard0Only = "shard0Only"; @@ -860,4 +860,3 @@ verifyOnWholeCluster(thirdResumeAfterToken, [9, 4] /* expectedOplogRetDocsForEachShard */); st.stop(); -})(); diff --git a/jstests/change_streams/oplog_rewrite/projection_changes_type.js b/jstests/change_streams/oplog_rewrite/projection_changes_type.js index b463444cea952..cb9b8cae08157 100644 --- a/jstests/change_streams/oplog_rewrite/projection_changes_type.js +++ b/jstests/change_streams/oplog_rewrite/projection_changes_type.js @@ -3,10 +3,11 @@ * change stream framework to throw exceptions. Exercises the fix for SERVER-65497. * @tags: [ requires_fcv_60 ] */ -(function() { -"use strict"; - -load("jstests/libs/change_stream_rewrite_util.js"); // For rewrite helpers. +import { + generateChangeStreamWriteWorkload, + getAllChangeStreamEvents, + isPlainObject +} from "jstests/libs/change_stream_rewrite_util.js"; const dbName = jsTestName(); const collName = "coll1"; @@ -83,5 +84,4 @@ for (let fieldName of fieldsToInclude) { // Test projection of all accumulated fields. assertProjection(Object.assign(accumulatedProjection, currentFieldProjection)); -} -})(); \ No newline at end of file +} \ No newline at end of file diff --git a/jstests/change_streams/pipeline_style_updates.js b/jstests/change_streams/pipeline_style_updates.js index 4b9f56842dfcb..75c0e5fb1cf0c 100644 --- a/jstests/change_streams/pipeline_style_updates.js +++ b/jstests/change_streams/pipeline_style_updates.js @@ -72,4 +72,4 @@ expected = { testPipelineStyleUpdate(updatePipeline, expected, "update"); cst.cleanUp(); -}()); \ No newline at end of file +}()); diff --git a/jstests/change_streams/projection_fakes_internal_event.js b/jstests/change_streams/projection_fakes_internal_event.js index 4c95ac78fe575..cbc8bbdb40e55 100644 --- a/jstests/change_streams/projection_fakes_internal_event.js +++ b/jstests/change_streams/projection_fakes_internal_event.js @@ -2,7 +2,10 @@ * Tests that a user projection which fakes an internal topology-change event is handled gracefully * in a sharded cluster. * TODO SERVER-65778: rework this test when we can handle faked internal events more robustly. - * @tags: [assumes_read_preference_unchanged] + * + * Tests that if a user fakes an internal event with a projection nothing crashes, so not valuable + * to test with a config shard. + * @tags: [assumes_read_preference_unchanged, config_shard_incompatible] */ (function() { "use strict"; @@ -212,4 +215,4 @@ testProjection = { assertChangeStreamBehaviour(testProjection, null, ErrorCodes.TypeMismatch); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/change_streams/queryable_encryption_change_stream.js b/jstests/change_streams/queryable_encryption_change_stream.js new file mode 100644 index 0000000000000..d4f6b7b6234af --- /dev/null +++ b/jstests/change_streams/queryable_encryption_change_stream.js @@ -0,0 +1,405 @@ +// +// Basic $changeStream tests for operations that perform queryable encryption. +// +// @tags: [ +// change_stream_does_not_expect_txns, +// assumes_unsharded_collection, +// featureFlagFLE2CleanupCommand +// ] +// +load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest and + // assert[Valid|Invalid]ChangeStreamNss. +import {EncryptedClient} from "jstests/fle2/libs/encrypted_client_util.js"; + +if (!buildInfo().modules.includes("enterprise")) { + jsTestLog("Skipping test as it requires the enterprise module"); + quit(); +} + +const dbName = "qetestdb"; +const collName = "qetestcoll"; +const initialConn = db.getMongo(); +const testDb = db.getSiblingDB(dbName); +const placeholderBinData0 = BinData(0, "WMdGo/tcDkE4UL6bgGYTN6oKFitgLXvhyhB9sbKxprk="); +const placeholderBinData6 = BinData(6, "WMdGo/tcDkE4UL6bgGYTN6oKFitgLXvhyhB9sbKxprk="); +const placeholderOID = ObjectId(); + +const origCanonicalizeEventForTesting = canonicalizeEventForTesting; + +function replaceRandomDataWithPlaceholders(event) { + for (let field in event) { + if (!Object.prototype.hasOwnProperty.call(event, field)) { + continue; + } + if (event[field] instanceof BinData) { + if (event[field].subtype() === 6) { + event[field] = placeholderBinData6; + } else if (event[field].subtype() === 0) { + event[field] = placeholderBinData0; + } + } else if (event[field] instanceof ObjectId) { + event[field] = placeholderOID; + } else if (typeof event[field] === "object") { + replaceRandomDataWithPlaceholders(event[field]); + } + } +} +canonicalizeEventForTesting = function(event, expected) { + if (event.hasOwnProperty("fullDocument") || event.hasOwnProperty("documentKey")) { + replaceRandomDataWithPlaceholders(event); + } + return origCanonicalizeEventForTesting(event, expected); +}; + +testDb.dropDatabase(); + +let encryptedClient = new EncryptedClient(initialConn, dbName); +assert.commandWorked(encryptedClient.createEncryptionCollection(collName, { + encryptedFields: { + "fields": [ + { + "path": "first", + "bsonType": "string", + /* contention: 0 is required for the cleanup tests to work */ + "queries": {"queryType": "equality", "contention": 0} + }, + ] + } +})); + +const cst = new ChangeStreamTest(testDb); +const ecoll = encryptedClient.getDB()[collName]; +const [escName, ecocName] = (() => { + let names = encryptedClient.getStateCollectionNamespaces(collName); + return [names.esc, names.ecoc]; +})(); + +const escInsertChange = { + documentKey: {_id: placeholderBinData0}, + fullDocument: {_id: placeholderBinData0}, + ns: {db: dbName, coll: escName}, + operationType: "insert", +}; +const ecocInsertChange = { + documentKey: {_id: placeholderOID}, + fullDocument: {_id: placeholderOID, fieldName: "first", value: placeholderBinData0}, + ns: {db: dbName, coll: ecocName}, + operationType: "insert", +}; +function expectedEDCInsertChange(id, last, implicitShardKey = undefined) { + let expected = { + documentKey: {_id: id}, + fullDocument: { + _id: id, + first: placeholderBinData6, + last: last, + "__safeContent__": [placeholderBinData0] + }, + ns: {db: dbName, coll: collName}, + operationType: "insert", + }; + if (encryptedClient.useImplicitSharding && implicitShardKey) { + expected.documentKey = Object.assign(expected.documentKey, implicitShardKey); + } + return expected; +} + +let expectedChange = undefined; +const testValues = [ + ["frodo", "baggins"], + ["merry", "brandybuck"], + ["pippin", "took"], + ["sam", "gamgee"], + ["rosie", "gamgee"], + ["paladin", "took"], +]; + +let cursor = + cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]}); +let cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1}); + +// Test that if there are no changes, we return an empty batch. +assert.eq(0, cursor.firstBatch.length, "Cursor had changes: " + tojson(cursor)); +assert.eq(0, cursordb.firstBatch.length, "Cursor had changes: " + tojson(cursordb)); + +jsTestLog("Testing single insert"); +{ + assert.commandWorked(ecoll.insert({_id: 0, first: "frodo", last: "baggins"})); + expectedChange = expectedEDCInsertChange(0, "baggins", {last: "baggins"}); + + cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expectedChange]}); + cst.assertNextChangesEqualUnordered( + {cursor: cursordb, expectedChanges: [expectedChange, escInsertChange, ecocInsertChange]}); + cst.assertNoChange(cursor); + cst.assertNoChange(cursordb); +} + +jsTestLog("Testing second insert"); +{ + cursor = + cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]}); + cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1}); + + assert.commandWorked(ecoll.insert({_id: 1, first: "merry", last: "brandybuck"})); + expectedChange = expectedEDCInsertChange(1, "brandybuck", {last: "brandybuck"}); + + cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expectedChange]}); + cst.assertNextChangesEqualUnordered( + {cursor: cursordb, expectedChanges: [expectedChange, escInsertChange, ecocInsertChange]}); + cst.assertNoChange(cursor); + cst.assertNoChange(cursordb); +} + +jsTestLog("Testing replacement update"); +{ + cursor = + cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]}); + cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1}); + + assert.commandWorked( + ecoll.update({last: "baggins"}, {first: "pippin", last: "took", location: "shire"})); + expectedChange = expectedEDCInsertChange(0, "took", {last: "baggins"}); + expectedChange.operationType = "replace"; + expectedChange.fullDocument.location = "shire"; + + cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expectedChange]}); + cst.assertNextChangesEqualUnordered( + {cursor: cursordb, expectedChanges: [expectedChange, escInsertChange, ecocInsertChange]}); +} + +jsTestLog("Testing upsert"); +{ + cursor = + cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]}); + cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1}); + + assert.commandWorked( + ecoll.update({last: "gamgee"}, {_id: 2, first: "sam", last: "gamgee"}, {upsert: true})); + + expectedChange = expectedEDCInsertChange(2, "gamgee", {last: "gamgee"}); + cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expectedChange]}); + cst.assertNextChangesEqualUnordered( + {cursor: cursordb, expectedChanges: [expectedChange, escInsertChange, ecocInsertChange]}); + cst.assertNoChange(cursor); + cst.assertNoChange(cursordb); +} + +jsTestLog("Testing modification update"); +{ + cursor = + cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]}); + cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1}); + assert.commandWorked(ecoll.update({last: "gamgee"}, {$set: {first: "rosie"}})); + expectedChange = { + documentKey: {_id: 2}, + ns: {db: dbName, coll: collName}, + operationType: "update", + updateDescription: { + removedFields: [], + updatedFields: {first: placeholderBinData6, "__safeContent__.1": placeholderBinData0}, + truncatedArrays: [] + }, + }; + let safeContentPullChange = { + documentKey: {_id: 2}, + ns: {db: dbName, coll: collName}, + operationType: "update", + updateDescription: { + removedFields: [], + updatedFields: {"__safeContent__": [placeholderBinData0]}, + truncatedArrays: [] + }, + }; + if (encryptedClient.useImplicitSharding) { + expectedChange.documentKey.last = "gamgee"; + safeContentPullChange.documentKey.last = "gamgee"; + } + + cst.assertNextChangesEqual( + {cursor: cursor, expectedChanges: [expectedChange, safeContentPullChange]}); + cst.assertNextChangesEqualUnordered({ + cursor: cursordb, + expectedChanges: [expectedChange, escInsertChange, ecocInsertChange, safeContentPullChange] + }); + cst.assertNoChange(cursor); + cst.assertNoChange(cursordb); +} + +jsTestLog("Testing findAndModify"); +{ + cursor = + cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]}); + cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1}); + assert.commandWorked(ecoll.runCommand({ + findAndModify: ecoll.getName(), + query: {last: "took"}, + update: {$set: {first: "paladin"}, $unset: {location: ""}}, + })); + expectedChange = { + documentKey: {_id: 0}, + ns: {db: dbName, coll: collName}, + operationType: "update", + updateDescription: { + removedFields: ["location"], + updatedFields: {first: placeholderBinData6, "__safeContent__.1": placeholderBinData0}, + truncatedArrays: [] + }, + }; + let safeContentPullChange = { + documentKey: {_id: 0}, + ns: {db: dbName, coll: collName}, + operationType: "update", + updateDescription: { + removedFields: [], + updatedFields: {"__safeContent__": [placeholderBinData0]}, + truncatedArrays: [] + }, + }; + if (encryptedClient.useImplicitSharding) { + expectedChange.documentKey.last = "took"; + safeContentPullChange.documentKey.last = "took"; + } + + cst.assertNextChangesEqual( + {cursor: cursor, expectedChanges: [expectedChange, safeContentPullChange]}); + cst.assertNextChangesEqualUnordered({ + cursor: cursordb, + expectedChanges: [expectedChange, escInsertChange, ecocInsertChange, safeContentPullChange] + }); + cst.assertNoChange(cursor); + cst.assertNoChange(cursordb); +} + +jsTestLog("Testing delete"); +{ + cursor = + cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]}); + cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1}); + assert.commandWorked(ecoll.remove({last: "gamgee"})); + expectedChange = { + documentKey: {_id: 2}, + ns: {db: dbName, coll: collName}, + operationType: "delete", + }; + if (encryptedClient.useImplicitSharding) { + expectedChange.documentKey.last = "gamgee"; + } + cst.assertNextChangesEqual({cursor: cursor, expectedChanges: [expectedChange]}); + cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [expectedChange]}); + cst.assertNoChange(cursor); + cst.assertNoChange(cursordb); +} + +const ecocRenameChange = { + operationType: "rename", + ns: {db: dbName, coll: ecocName}, + to: {db: dbName, coll: ecocName + ".compact"}, +}; +const escDeleteChange = { + operationType: "delete", + ns: {db: dbName, coll: escName}, + documentKey: {_id: placeholderBinData0}, +}; +const escDeletesDropChange = { + operationType: "drop", + ns: {db: dbName, coll: escName + ".deletes"}, +}; +const ecocCompactDropChange = { + operationType: "drop", + ns: {db: dbName, coll: ecocName + ".compact"}, +}; + +jsTestLog("Testing compact"); +{ + // all non-anchors will be deleted by compact + const deleteCount = testDb[escName].countDocuments({value: {$exists: false}}); + const numUniqueValues = testValues.length; + + encryptedClient.assertEncryptedCollectionCounts(collName, 2, numUniqueValues, numUniqueValues); + + cursor = + cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]}); + cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1}); + + assert.commandWorked(ecoll.compact()); + encryptedClient.assertEncryptedCollectionCounts(collName, 2, numUniqueValues, 0); + const anchorCount = testDb[escName].countDocuments({value: {$exists: true}}); + const nonAnchorCount = testDb[escName].countDocuments({value: {$exists: false}}); + assert.eq(anchorCount, numUniqueValues); + assert.eq(nonAnchorCount, 0); + + cst.assertNoChange(cursor); + + escInsertChange.fullDocument.value = placeholderBinData0; + // temp ecoc rename + cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [ecocRenameChange]}); + // normal anchor inserts + for (let i = 0; i < numUniqueValues; i++) { + cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [escInsertChange]}); + } + // non-anchor deletes + for (let i = 0; i < deleteCount; i++) { + cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [escDeleteChange]}); + } + // temp ecoc drop + cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [ecocCompactDropChange]}); + cst.assertNoChange(cursordb); +} + +jsTestLog("Testing cleanup"); +{ + // insert new documents for each test value, so the ECOC & ESC have documents to clean up + for (let val of testValues) { + assert.commandWorked(ecoll.insert({first: val[0], last: val[1]})); + } + // ESC doesn't have null anchors yet, so the total delete count == ESC count before cleanup + const deleteCount = testDb[escName].countDocuments({}); + const nonAnchorCount = testDb[escName].countDocuments({value: {$exists: false}}); + const anchorCount = deleteCount - nonAnchorCount; + const numUniqueValues = testValues.length; + + encryptedClient.assertEncryptedCollectionCounts( + collName, 2 + numUniqueValues, numUniqueValues * 2, numUniqueValues); + assert.eq(anchorCount, numUniqueValues); + assert.eq(nonAnchorCount, numUniqueValues); + + cursor = + cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: testDb[collName]}); + cursordb = cst.startWatchingChanges({pipeline: [{$changeStream: {}}], collection: 1}); + + assert.commandWorked(ecoll.cleanup()); + encryptedClient.assertEncryptedCollectionCounts( + collName, 2 + numUniqueValues, numUniqueValues, 0); + encryptedClient.assertESCNonAnchorCount(collName, 0); + + cst.assertNoChange(cursor); + + const escDeletesInsertChange = { + documentKey: {_id: placeholderBinData0}, + fullDocument: {_id: placeholderBinData0}, + ns: {db: dbName, coll: escName + ".deletes"}, + operationType: "insert", + }; + + // temp ecoc rename + cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [ecocRenameChange]}); + // each null anchor insert is followed by a single insert to esc.deletes + escInsertChange.fullDocument.value = placeholderBinData0; + for (let i = 0; i < anchorCount; i++) { + cst.assertNextChangesEqual( + {cursor: cursordb, expectedChanges: [escInsertChange, escDeletesInsertChange]}); + } + // non-anchors and regular anchors are deleted from ESC + for (let i = 0; i < deleteCount; i++) { + cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [escDeleteChange]}); + } + // temp esc.deletes drop + cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [escDeletesDropChange]}); + // temp ecoc.compact drop + cst.assertNextChangesEqual({cursor: cursordb, expectedChanges: [ecocCompactDropChange]}); + cst.assertNoChange(cursordb); +} + +cst.cleanUp(); + +canonicalizeEventForTesting = origCanonicalizeEventForTesting; diff --git a/jstests/change_streams/refine_collection_shard_key_event.js b/jstests/change_streams/refine_collection_shard_key_event.js index 5acde86c27bea..330caa302ea58 100644 --- a/jstests/change_streams/refine_collection_shard_key_event.js +++ b/jstests/change_streams/refine_collection_shard_key_event.js @@ -2,13 +2,11 @@ * Test that change streams returns refineCollectionShardKey events. * * @tags: [ - * requires_fcv_61, * requires_sharding, * uses_change_streams, * change_stream_does_not_expect_txns, * assumes_unsharded_collection, * assumes_read_preference_unchanged, - * featureFlagChangeStreamsFurtherEnrichedEvents * ] */ diff --git a/jstests/change_streams/reshard_collection_event.js b/jstests/change_streams/reshard_collection_event.js index f55e081322cd8..6992a66c63850 100644 --- a/jstests/change_streams/reshard_collection_event.js +++ b/jstests/change_streams/reshard_collection_event.js @@ -2,13 +2,11 @@ * Test that change streams returns reshardCollection events. * * @tags: [ - * requires_fcv_61, * requires_sharding, * uses_change_streams, * change_stream_does_not_expect_txns, * assumes_unsharded_collection, * assumes_read_preference_unchanged, - * featureFlagChangeStreamsFurtherEnrichedEvents * ] */ diff --git a/jstests/change_streams/resume_from_high_water_mark_token.js b/jstests/change_streams/resume_from_high_water_mark_token.js index e0004c2dd2589..4d5c59eced077 100644 --- a/jstests/change_streams/resume_from_high_water_mark_token.js +++ b/jstests/change_streams/resume_from_high_water_mark_token.js @@ -263,4 +263,4 @@ assert.soon(() => { return csCursor.hasNext() && csCursor.next().operationType === "invalidate"; }); csCursor.close(); -})(); \ No newline at end of file +})(); diff --git a/jstests/change_streams/shell_helper.js b/jstests/change_streams/shell_helper.js index bb13609746795..f43f5fa655c19 100644 --- a/jstests/change_streams/shell_helper.js +++ b/jstests/change_streams/shell_helper.js @@ -124,8 +124,7 @@ checkNextChange(changeStreamCursor, expected); jsTestLog("Testing watch() with batchSize"); // Only test mongod because mongos uses batch size 0 for aggregate commands internally to // establish cursors quickly. GetMore on mongos doesn't respect batch size due to SERVER-31992. -const isMongos = FixtureHelpers.isMongos(db); -if (!isMongos) { +if (!FixtureHelpers.isMongos(db)) { // Increase a field by 5 times and verify the batch size is respected. for (let i = 0; i < 5; i++) { assert.commandWorked(coll.update({_id: 1}, {$inc: {x: 1}})); diff --git a/jstests/change_streams/show_expanded_events.js b/jstests/change_streams/show_expanded_events.js index b3ae9c6faafca..cba41827548c5 100644 --- a/jstests/change_streams/show_expanded_events.js +++ b/jstests/change_streams/show_expanded_events.js @@ -2,11 +2,9 @@ * Tests the behavior of change streams in the presence of 'showExpandedEvents' flag. * * @tags: [ - * requires_fcv_61, * # The test assumes certain ordering of the events. The chunk migrations on a sharded collection * # could break the test. * assumes_unsharded_collection, - * featureFlagChangeStreamsFurtherEnrichedEvents, * ] */ (function() { diff --git a/jstests/change_streams/show_resharding_system_events.js b/jstests/change_streams/show_resharding_system_events.js index d931500746a3f..698a890db49ec 100644 --- a/jstests/change_streams/show_resharding_system_events.js +++ b/jstests/change_streams/show_resharding_system_events.js @@ -4,8 +4,6 @@ * operate in a sharded cluster. * * @tags: [ - * requires_fcv_61, - * featureFlagChangeStreamsFurtherEnrichedEvents, * requires_sharding, * uses_change_streams, * change_stream_does_not_expect_txns, @@ -13,9 +11,7 @@ * assumes_read_preference_unchanged, * ] */ -(function() { -"use strict"; - +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load('jstests/libs/change_stream_util.js'); // For 'assertChangeStreamEventEq'. // Create a single-shard cluster for this test. @@ -74,7 +70,7 @@ const origNs = { db: testDB.getName(), coll: testColl.getName() }; -const expectedReshardingEvents = [ +let expectedReshardingEvents = [ {ns: reshardingNs, collectionUUID: newUUID, operationType: "create"}, { ns: reshardingNs, @@ -118,6 +114,52 @@ const expectedReshardingEvents = [ }, ]; +if (FeatureFlagUtil.isEnabled(st.s, "ReshardingImprovements")) { + expectedReshardingEvents = [ + {ns: reshardingNs, collectionUUID: newUUID, operationType: "create"}, + { + ns: reshardingNs, + collectionUUID: newUUID, + operationType: "shardCollection", + operationDescription: {shardKey: {a: 1}} + }, + { + ns: reshardingNs, + collectionUUID: newUUID, + operationType: "insert", + fullDocument: {_id: 0, a: 0}, + documentKey: {a: 0, _id: 0} + }, + { + ns: reshardingNs, + collectionUUID: newUUID, + operationType: "insert", + fullDocument: {_id: 1, a: 1}, + documentKey: {a: 1, _id: 1} + }, + { + ns: reshardingNs, + collectionUUID: newUUID, + operationType: "createIndexes", + operationDescription: {indexes: [{v: 2, key: {a: 1}, name: "a_1"}]} + }, + { + ns: origNs, + collectionUUID: oldUUID, + operationType: "reshardCollection", + operationDescription: + {reshardUUID: newUUID, shardKey: {a: 1}, oldShardKey: {_id: 1}, unique: false} + }, + { + ns: origNs, + collectionUUID: newUUID, + operationType: "insert", + fullDocument: {_id: 2, a: 2}, + documentKey: {a: 2, _id: 2} + }, + ]; +} + // Helper to confirm the sequence of events observed in the change stream. function assertChangeStreamEventSequence(csConfig, expectedEvents) { // Open a change stream on the test DB using the given configuration. @@ -141,4 +183,3 @@ const nonSystemEvents = assertChangeStreamEventSequence({showSystemEvents: false}, nonSystemEvents); st.stop(); -}()); diff --git a/jstests/change_streams/timeseries.js b/jstests/change_streams/timeseries.js index a279bfc14ad04..e285e26a92769 100644 --- a/jstests/change_streams/timeseries.js +++ b/jstests/change_streams/timeseries.js @@ -7,11 +7,8 @@ * requires_fcv_61, * ] */ -(function() { -"use strict"; - load("jstests/libs/change_stream_util.js"); // For ChangeStreamTest. -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; let testDB = db.getSiblingDB(jsTestName()); testDB.dropDatabase(); @@ -43,12 +40,12 @@ let curNoEvents = testDB.watch([], {showExpandedEvents: true}); assert.commandWorked(testDB.createCollection( jsTestName(), - {timeseries: {timeField: "ts", metaField: "meta"}})); // on buckets ns and view ns -coll.createIndex({ts: 1, "meta.b": 1}, {name: "dropMe"}); // on buckets ns -coll.insertOne({_id: 1, ts: new Date(1000), meta: {a: 1}}); // on buckets ns -coll.insertOne({_id: 1, ts: new Date(1000), meta: {a: 1}}); // on buckets ns -coll.update({"meta.a": 1}, {$set: {"meta.b": 2}}); // on buckets ns -coll.remove({"meta.a": 1}); // on buckets ns + {timeseries: {timeField: "ts", metaField: "meta"}})); // on buckets ns and view ns +coll.createIndex({ts: 1, "meta.b": 1}, {name: "dropMe"}); // on buckets ns +coll.insertOne({_id: 1, ts: new Date(1000), meta: {a: 1}}); // on buckets ns +coll.insertOne({_id: 1, ts: new Date(1000), meta: {a: 1}}); // on buckets ns +coll.update({"meta.a": 1}, {$set: {"meta.b": 2}}, {multi: true}); // on buckets ns +coll.remove({"meta.a": 1}); // on buckets ns // collMod granularity. on both buckets ns and view ns assert.commandWorked(testDB.runCommand({collMod: collName, timeseries: {granularity: "hours"}})); // collMod expiration. just on buckets ns @@ -163,6 +160,16 @@ let expectedChanges = [ {"data._id.1": ["data", "_id", "1"], "data.ts.1": ["data", "ts", "1"]} } }, + { + "operationType": "update", + "ns": {"db": dbName, "coll": bucketsCollName}, + "updateDescription": { + "updatedFields": {"meta.b": 2}, + "removedFields": [], + "truncatedArrays": [], + "disambiguatedPaths": {} + } + }, {"operationType": "delete", "ns": {"db": dbName, "coll": bucketsCollName}}, { "operationType": "modify", @@ -313,5 +320,4 @@ let curWithEventsNormal = new DBCommandCursor(testDB, {ok: 1, cursor: curWithEve assertNoMoreBucketsEvents(curWithEventsNormal); // No events cursor should have no system.buckets events. -assertNoMoreBucketsEvents(curNoEvents); -}()); +assertNoMoreBucketsEvents(curNoEvents); \ No newline at end of file diff --git a/jstests/change_streams/whole_cluster_metadata_notifications.js b/jstests/change_streams/whole_cluster_metadata_notifications.js index e960affc2ef8c..02164b0b68259 100644 --- a/jstests/change_streams/whole_cluster_metadata_notifications.js +++ b/jstests/change_streams/whole_cluster_metadata_notifications.js @@ -12,7 +12,8 @@ load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Col load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. // Define two databases. We will conduct our tests by creating one collection in each. -const testDB1 = db.getSiblingDB(jsTestName()), testDB2 = db.getSiblingDB(jsTestName() + "_other"); +const testDB1 = db.getSiblingDB("whole_cluster_metadata"), + testDB2 = db.getSiblingDB("whole_cluster_metadata_other"); const adminDB = db.getSiblingDB("admin"); assert.commandWorked(testDB1.dropDatabase()); @@ -153,7 +154,7 @@ for (let collToInvalidate of [db1Coll, db2Coll]) { // passthrough suites since we cannot guarantee the primary shard of the target database // and renameCollection requires the source and destination to be on the same shard. if (!FixtureHelpers.isMongos(testDB)) { - const otherDB = testDB.getSiblingDB(testDB.getName() + "_rename_target"); + const otherDB = testDB.getSiblingDB(testDB.getName() + "_target"); // Ensure the target database exists. const collOtherDB = assertDropAndRecreateCollection(otherDB, "test"); assertDropCollection(otherDB, collOtherDB.getName()); diff --git a/jstests/change_streams/whole_db_metadata_notifications.js b/jstests/change_streams/whole_db_metadata_notifications.js index 35b75cbc05ca6..29fe3507c7bac 100644 --- a/jstests/change_streams/whole_db_metadata_notifications.js +++ b/jstests/change_streams/whole_db_metadata_notifications.js @@ -10,8 +10,10 @@ load('jstests/replsets/libs/two_phase_drops.js'); // For 'TwoPhaseDropCollectio load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection. load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. -const testDB = db.getSiblingDB(jsTestName()); +const testDB = db.getSiblingDB("whole_db_metadata_notifs"); +const otherDB = testDB.getSiblingDB("whole_db_metadata_notifs_other"); testDB.dropDatabase(); +otherDB.dropDatabase(); let cst = new ChangeStreamTest(testDB); // Write a document to the collection and test that the change stream returns it @@ -55,7 +57,6 @@ assert.commandWorked(testDB.runCommand( {aggregate: 1, pipeline: [{$changeStream: {resumeAfter: resumeToken}}], cursor: {}})); // Test that invalidation entries for other databases are filtered out. -const otherDB = testDB.getSiblingDB(jsTestName() + "other"); const otherDBColl = otherDB[collName + "_other"]; assert.commandWorked(otherDBColl.insert({_id: 0})); diff --git a/jstests/client_encrypt/fle_auto_decrypt.js b/jstests/client_encrypt/fle_auto_decrypt.js index 650125e0fb7da..60c20b899d27f 100644 --- a/jstests/client_encrypt/fle_auto_decrypt.js +++ b/jstests/client_encrypt/fle_auto_decrypt.js @@ -71,4 +71,4 @@ const clientSideFLEOptionsBypassAutoEncrypt = { test(conn, clientSideFLEOptionsBypassAutoEncrypt, keyId); MongoRunner.stopMongod(conn); -}()); \ No newline at end of file +}()); diff --git a/jstests/client_encrypt/fle_key_faults.js b/jstests/client_encrypt/fle_key_faults.js index 204ee277ec5e4..c57f5ff372b22 100644 --- a/jstests/client_encrypt/fle_key_faults.js +++ b/jstests/client_encrypt/fle_key_faults.js @@ -82,4 +82,4 @@ testFaults((keyId, shell) => { }); MongoRunner.stopMongod(conn); -}()); \ No newline at end of file +}()); diff --git a/jstests/client_encrypt/fle_keys.js b/jstests/client_encrypt/fle_keys.js index 93fb00004784b..75eaa0889b876 100644 --- a/jstests/client_encrypt/fle_keys.js +++ b/jstests/client_encrypt/fle_keys.js @@ -75,4 +75,4 @@ keyVault.createKey("local", ['mongoKey3']); assert.eq(3, keyVault.getKeys().itcount()); MongoRunner.stopMongod(conn); -}()); \ No newline at end of file +}()); diff --git a/jstests/concurrency/fsm_example.js b/jstests/concurrency/fsm_example.js index fb012462a158f..d4cd9683fdaf1 100644 --- a/jstests/concurrency/fsm_example.js +++ b/jstests/concurrency/fsm_example.js @@ -6,7 +6,7 @@ * Includes documentation of each property on $config. * Serves as a template for new workloads. */ -var $config = (function() { +export const $config = (function() { // 'data' is passed (copied) to each of the worker threads. var data = {}; diff --git a/jstests/concurrency/fsm_example_inheritance.js b/jstests/concurrency/fsm_example_inheritance.js index dd6364b2d87a7..7352980c5488d 100644 --- a/jstests/concurrency/fsm_example_inheritance.js +++ b/jstests/concurrency/fsm_example_inheritance.js @@ -1,10 +1,8 @@ -'use strict'; - -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_example.js'); // for $config +import {$config as $baseConfig} from 'jstests/concurrency/fsm_example.js'; +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; // extendWorkload takes a $config object and a callback, and returns an extended $config object. -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // In the callback, $super is the base workload definition we're // extending, // and $config is the extended workload definition we're creating. diff --git a/jstests/concurrency/fsm_libs/cluster.js b/jstests/concurrency/fsm_libs/cluster.js index 44fe572bff077..a78f6a8c76791 100644 --- a/jstests/concurrency/fsm_libs/cluster.js +++ b/jstests/concurrency/fsm_libs/cluster.js @@ -217,7 +217,7 @@ var Cluster = function(options) { i = 0; while (st.rs(i)) { - var rs = st.rs(i++); + const rs = st.rs(i++); this._addReplicaSetConns(rs); replSets.push(rs); } @@ -459,6 +459,12 @@ var Cluster = function(options) { return cluster; }; + this.getReplicaSets = function getReplicaSets() { + assert(initialized, 'cluster must be initialized first'); + assert(this.isReplication() || this.isSharded()); + return replSets; + }; + this.isBalancerEnabled = function isBalancerEnabled() { return this.isSharded() && options.sharded.enableBalancer; }; @@ -594,10 +600,10 @@ var Cluster = function(options) { }; /* - * Returns true if this cluster has a catalog shard. - * Catalog shard always have shard ID equal to "config". + * Returns true if this cluster has a config shard. + * Config shard always have shard ID equal to "config". */ - this.hasCatalogShard = function hasCatalogShard() { + this.hasConfigShard = function hasConfigShard() { if (!this.isSharded()) { return false; } diff --git a/jstests/concurrency/fsm_libs/extend_workload.js b/jstests/concurrency/fsm_libs/extend_workload.js index 84d094cb36e00..ced5511d06520 100644 --- a/jstests/concurrency/fsm_libs/extend_workload.js +++ b/jstests/concurrency/fsm_libs/extend_workload.js @@ -1,5 +1,3 @@ -'use strict'; - load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig /** @@ -13,7 +11,7 @@ load('jstests/concurrency/fsm_libs/parse_config.js'); // for parseConfig * return $config; * }); */ -function extendWorkload($config, callback) { +export function extendWorkload($config, callback) { assert.eq(2, arguments.length, 'extendWorkload must be called with 2 arguments: $config and callback'); diff --git a/jstests/concurrency/fsm_libs/resmoke_runner.js b/jstests/concurrency/fsm_libs/resmoke_runner.js index 2ca66fee68b45..bf3461cda6347 100644 --- a/jstests/concurrency/fsm_libs/resmoke_runner.js +++ b/jstests/concurrency/fsm_libs/resmoke_runner.js @@ -1,4 +1,3 @@ -(function() { 'use strict'; load('jstests/concurrency/fsm_libs/runner.js'); // for runner.internals @@ -28,8 +27,8 @@ function cleanupWorkload(workload, context, cluster, errors, header) { return true; } -function runWorkloads(workloads, - {cluster: clusterOptions = {}, execution: executionOptions = {}} = {}) { +async function runWorkloads(workloads, + {cluster: clusterOptions = {}, execution: executionOptions = {}} = {}) { assert.gt(workloads.length, 0, 'need at least one workload to run'); const executionMode = {serial: true}; @@ -53,7 +52,7 @@ function runWorkloads(workloads, const context = {}; const applyMultipliers = true; - loadWorkloadContext(workloads, context, executionOptions, applyMultipliers); + await loadWorkloadContext(workloads, context, executionOptions, applyMultipliers); // Constructing a Cluster instance calls its internal validateClusterOptions() function, // which fills in any properties that aren't explicitly present in 'clusterOptions'. We do @@ -287,5 +286,4 @@ if (Object.keys(sessionOptions).length > 0 || TestData.runningWithSessions) { executionOptions.sessionOptions = sessionOptions; } -runWorkloads(workloads, {cluster: clusterOptions, execution: executionOptions}); -})(); +await runWorkloads(workloads, {cluster: clusterOptions, execution: executionOptions}); diff --git a/jstests/concurrency/fsm_libs/runner.js b/jstests/concurrency/fsm_libs/runner.js index 272c58b1fd704..b9001815361fb 100644 --- a/jstests/concurrency/fsm_libs/runner.js +++ b/jstests/concurrency/fsm_libs/runner.js @@ -400,16 +400,17 @@ var runner = (function() { config.data, 'threadCount', {enumerable: true, value: config.threadCount}); } - function loadWorkloadContext(workloads, context, executionOptions, applyMultipliers) { - workloads.forEach(function(workload) { - load(workload); // for $config + async function loadWorkloadContext(workloads, context, executionOptions, applyMultipliers) { + for (const workload of workloads) { + const {$config} = await import(workload); assert.neq('undefined', typeof $config, '$config was not defined by ' + workload); + print(tojson($config)); context[workload] = {config: parseConfig($config)}; if (applyMultipliers) { context[workload].config.iterations *= executionOptions.iterationMultiplier; context[workload].config.threadCount *= executionOptions.threadMultiplier; } - }); + } } function printWorkloadSchedule(schedule) { @@ -591,7 +592,7 @@ var runner = (function() { 'after workload-group teardown and data clean-up'); } - function runWorkloads( + async function runWorkloads( workloads, clusterOptions, executionMode, executionOptions, cleanupOptions) { assert.gt(workloads.length, 0, 'need at least one workload to run'); @@ -625,7 +626,8 @@ var runner = (function() { globalAssertLevel = assertLevel; var context = {}; - loadWorkloadContext(workloads, context, executionOptions, true /* applyMultipliers */); + await loadWorkloadContext( + workloads, context, executionOptions, true /* applyMultipliers */); var threadMgr = new ThreadManager(clusterOptions, executionMode); var cluster = new Cluster(clusterOptions); @@ -694,32 +696,34 @@ var runner = (function() { } return { - serial: function serial(workloads, clusterOptions, executionOptions, cleanupOptions) { + serial: async function serial(workloads, clusterOptions, executionOptions, cleanupOptions) { clusterOptions = clusterOptions || {}; executionOptions = executionOptions || {}; cleanupOptions = cleanupOptions || {}; - runWorkloads( + await runWorkloads( workloads, clusterOptions, {serial: true}, executionOptions, cleanupOptions); }, - parallel: function parallel(workloads, clusterOptions, executionOptions, cleanupOptions) { - clusterOptions = clusterOptions || {}; - executionOptions = executionOptions || {}; - cleanupOptions = cleanupOptions || {}; - - runWorkloads( - workloads, clusterOptions, {parallel: true}, executionOptions, cleanupOptions); - }, - - composed: function composed(workloads, clusterOptions, executionOptions, cleanupOptions) { - clusterOptions = clusterOptions || {}; - executionOptions = executionOptions || {}; - cleanupOptions = cleanupOptions || {}; - - runWorkloads( - workloads, clusterOptions, {composed: true}, executionOptions, cleanupOptions); - }, + parallel: + async function parallel(workloads, clusterOptions, executionOptions, cleanupOptions) { + clusterOptions = clusterOptions || {}; + executionOptions = executionOptions || {}; + cleanupOptions = cleanupOptions || {}; + + await runWorkloads( + workloads, clusterOptions, {parallel: true}, executionOptions, cleanupOptions); + }, + + composed: + async function composed(workloads, clusterOptions, executionOptions, cleanupOptions) { + clusterOptions = clusterOptions || {}; + executionOptions = executionOptions || {}; + cleanupOptions = cleanupOptions || {}; + + await runWorkloads( + workloads, clusterOptions, {composed: true}, executionOptions, cleanupOptions); + }, internals: { validateExecutionOptions, diff --git a/jstests/concurrency/fsm_libs/worker_thread.js b/jstests/concurrency/fsm_libs/worker_thread.js index 3a4623f317d59..deeeb83e14191 100644 --- a/jstests/concurrency/fsm_libs/worker_thread.js +++ b/jstests/concurrency/fsm_libs/worker_thread.js @@ -20,7 +20,7 @@ var workerThread = (function() { // args.errorLatch = CountDownLatch instance that threads count down when they error // args.sessionOptions = the options to start a session with // run = callback that takes a map of workloads to their associated $config - function main(workloads, args, run) { + async function main(workloads, args, run) { var myDB; var configs = {}; var connectionString = 'mongodb://' + args.host + '/?appName=tid:' + args.tid; @@ -172,8 +172,8 @@ var workerThread = (function() { load('jstests/libs/override_methods/set_read_and_write_concerns.js'); } - workloads.forEach(function(workload) { - load(workload); // for $config + for (const workload of workloads) { + const {$config} = await import(workload); var config = parseConfig($config); // to normalize // Copy any modifications that were made to $config.data @@ -213,7 +213,7 @@ var workerThread = (function() { tid: args.tid, transitions: config.transitions }; - }); + } args.latch.countDown(); diff --git a/jstests/concurrency/fsm_workload_helpers/delete_in_transaction_states.js b/jstests/concurrency/fsm_workload_helpers/delete_in_transaction_states.js index 5861d5dc48b21..4be2fa55007a3 100644 --- a/jstests/concurrency/fsm_workload_helpers/delete_in_transaction_states.js +++ b/jstests/concurrency/fsm_workload_helpers/delete_in_transaction_states.js @@ -10,17 +10,17 @@ load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js'); // In-memory representation of the documents owned by this thread for all given collections. Used to // verify the expected documents are deleted in the collection. let expectedDocuments = {}; - -// The number of "groups" each document within those assigned to a thread can belong to for a given -// collection. Entire groups will be deleted at once by the multiDelete state function, so this is -// effectively the number of times that stage can be meaningfully run per thread. -const numGroupsWithinThread = $config.data.partitionSize / 5; let nextGroupId = {}; /** * Returns the next groupId for the multiDelete state function to use. */ -function getNextGroupIdForDelete(collName) { +function getNextGroupIdForDelete(collName, partitionSize) { + // The number of "groups" each document within those assigned to a thread can belong to for a + // given collection. Entire groups will be deleted at once by the multiDelete state function, so + // this is effectively the number of times that stage can be meaningfully run per thread. + const numGroupsWithinThread = partitionSize / 5; + const nextId = nextGroupId[collName]; nextGroupId[collName] = (nextGroupId[collName] + 1) % numGroupsWithinThread; return nextId; @@ -65,7 +65,7 @@ function exactIdDelete(db, collName, session) { * Sends a multi=true delete without the shard key that targets all documents assigned to this * thread, which should be sent to all shards. */ -function multiDelete(db, collName, session, tid) { +function multiDelete(db, collName, session, tid, partitionSize) { // If no documents remain in our partition, there is nothing to do. if (!expectedDocuments[collName].length) { print('This thread owns no more documents for collection ' + db[collName] + @@ -74,7 +74,7 @@ function multiDelete(db, collName, session, tid) { } // Delete a group of documents within those assigned to this thread. - const groupIdToDelete = getNextGroupIdForDelete(collName); + const groupIdToDelete = getNextGroupIdForDelete(collName, partitionSize); const collection = session.getDatabase(db.getName()).getCollection(collName); withTxnAndAutoRetry(session, () => { @@ -119,7 +119,12 @@ function verifyDocuments(db, collName, tid) { * Gives each document assigned to this thread a group id for multi=true deletes, and loads each * document into memory. */ -function initDeleteInTransactionStates(db, collName, tid) { +function initDeleteInTransactionStates(db, collName, tid, partitionSize) { + // The number of "groups" each document within those assigned to a thread can belong to for a + // given collection. Entire groups will be deleted at once by the multiDelete state function, so + // this is effectively the number of times that stage can be meaningfully run per thread. + const numGroupsWithinThread = partitionSize / 5; + // Assign each document owned by this thread to a different "group" so they can be multi // deleted by group later. let nextGroupIdForInit = nextGroupId[collName] = 0; diff --git a/jstests/concurrency/fsm_workload_helpers/kill_session.js b/jstests/concurrency/fsm_workload_helpers/kill_session.js index f440f25d8d55a..004db48ecacd1 100644 --- a/jstests/concurrency/fsm_workload_helpers/kill_session.js +++ b/jstests/concurrency/fsm_workload_helpers/kill_session.js @@ -23,7 +23,10 @@ function killSession(db, collName) { ourSessionWasKilled = true; continue; } else { - assertAlways.commandFailedWithCode(res, ErrorCodes.DuplicateKey); + assertAlways.commandFailedWithCode( + res, + [ErrorCodes.DuplicateKey, ErrorCodes.WriteConcernFailed], + 'unexpected error code: ' + res.code + ': ' + res.message); } const sessionToKill = db.getSiblingDB("config").system.sessions.aggregate([ diff --git a/jstests/concurrency/fsm_workloads/CRUD_and_commands.js b/jstests/concurrency/fsm_workloads/CRUD_and_commands.js index 047c22bb6829c..95768079b8f95 100644 --- a/jstests/concurrency/fsm_workloads/CRUD_and_commands.js +++ b/jstests/concurrency/fsm_workloads/CRUD_and_commands.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Perform CRUD operations, some of which may implicitly create collections, in parallel with * collection-dropping operations. @@ -7,14 +5,13 @@ * @tags: [ * ] */ -var $config = (function() { - const data = {numIds: 10}; +export const $config = (function() { + const data = {numIds: 10, docValue: "mydoc"}; const states = { init: function init(db, collName) { this.session = db.getMongo().startSession({causalConsistency: true}); this.sessionDb = this.session.getDatabase(db.getName()); - this.docValue = "mydoc"; }, insertDocs: function insertDocs(db, collName) { diff --git a/jstests/concurrency/fsm_workloads/CRUD_and_commands_with_createindexes.js b/jstests/concurrency/fsm_workloads/CRUD_and_commands_with_createindexes.js index 890225b15fed7..148d2db19e1a9 100644 --- a/jstests/concurrency/fsm_workloads/CRUD_and_commands_with_createindexes.js +++ b/jstests/concurrency/fsm_workloads/CRUD_and_commands_with_createindexes.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Perform CRUD operations, some of which may implicitly create collections. Also perform index * creations which may implicitly create collections. Performs these in parallel with collection- @@ -8,13 +6,13 @@ * @tags: [ * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/CRUD_and_commands.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/CRUD_and_commands.js"; // TODO(SERVER-46971) combine with CRUD_and_commands.js and remove `local` readConcern. TestData.defaultTransactionReadConcernLevel = "local"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { const origStates = Object.keys($config.states); $config.states = Object.extend({ createIndex: function createIndex(db, collName) { diff --git a/jstests/concurrency/fsm_workloads/CRUD_clustered_collection.js b/jstests/concurrency/fsm_workloads/CRUD_clustered_collection.js index 351bd0bfbb1ec..fce942e3fa4b8 100644 --- a/jstests/concurrency/fsm_workloads/CRUD_clustered_collection.js +++ b/jstests/concurrency/fsm_workloads/CRUD_clustered_collection.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Perform CRUD operations in parallel on a clustered collection. Disallows dropping the collection * to prevent implicit creation of a non-clustered collection. @@ -9,10 +7,10 @@ * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/CRUD_and_commands.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/CRUD_and_commands.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Exclude dropCollection to prevent implicit collection creation of a non-clustered // collection. const newStates = $super.states; diff --git a/jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js b/jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js index 36bddb4c7d6f0..72f18b0eff507 100644 --- a/jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js +++ b/jstests/concurrency/fsm_workloads/access_collection_in_transaction_after_catalog_changes.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Transactions with local (and majority) readConcern perform untimestamped reads and do not check * the min visible snapshot for collections, so they can access collections whose catalog @@ -14,7 +12,7 @@ * @tags: [uses_transactions, requires_replication] */ -var $config = (function() { +export const $config = (function() { var states = (function() { function init(db, collName) { this.session = db.getMongo().startSession(); diff --git a/jstests/concurrency/fsm_workloads/agg_base.js b/jstests/concurrency/fsm_workloads/agg_base.js index 0b3ad43241855..9495c6cc53518 100644 --- a/jstests/concurrency/fsm_workloads/agg_base.js +++ b/jstests/concurrency/fsm_workloads/agg_base.js @@ -1,12 +1,10 @@ -'use strict'; - /** * agg_base.js * * Base workload for aggregation. Inserts a bunch of documents in its setup, * then each thread does an aggregation with an empty $match. */ -var $config = (function() { +export const $config = (function() { var data = { numDocs: 1000, // Use 12KB documents by default. This number is useful because 12,000 documents each of diff --git a/jstests/concurrency/fsm_workloads/agg_graph_lookup.js b/jstests/concurrency/fsm_workloads/agg_graph_lookup.js index e503873619e5d..71bfbb3bcadea 100644 --- a/jstests/concurrency/fsm_workloads/agg_graph_lookup.js +++ b/jstests/concurrency/fsm_workloads/agg_graph_lookup.js @@ -1,11 +1,9 @@ -'use strict'; - /** * agg_graph_lookup.js * * Runs a $graphLookup aggregation simultaneously with updates. */ -var $config = (function() { +export const $config = (function() { const data = {numDocs: 1000}; const states = (function() { diff --git a/jstests/concurrency/fsm_workloads/agg_group_external.js b/jstests/concurrency/fsm_workloads/agg_group_external.js index adb7a787e20d5..c61378bcafc63 100644 --- a/jstests/concurrency/fsm_workloads/agg_group_external.js +++ b/jstests/concurrency/fsm_workloads/agg_group_external.js @@ -1,5 +1,3 @@ -'use strict'; - /** * agg_group_external.js * @@ -8,10 +6,10 @@ * The data passed to the $group is greater than 100MB, which should force * disk to be used. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/agg_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // use enough docs to exceed 100MB, the in-memory limit for $sort and $group $config.data.numDocs = 24 * 1000; var MB = 1024 * 1024; // bytes diff --git a/jstests/concurrency/fsm_workloads/agg_lookup.js b/jstests/concurrency/fsm_workloads/agg_lookup.js index cfc2cfc8086fe..a688094fa1ac4 100644 --- a/jstests/concurrency/fsm_workloads/agg_lookup.js +++ b/jstests/concurrency/fsm_workloads/agg_lookup.js @@ -1,11 +1,9 @@ -'use strict'; - /** * agg_lookup.js * * Runs a $lookup aggregation simultaneously with updates. */ -var $config = (function() { +export const $config = (function() { const data = {numDocs: 100}; const states = (function() { diff --git a/jstests/concurrency/fsm_workloads/agg_match.js b/jstests/concurrency/fsm_workloads/agg_match.js index 25aa5a62eee41..78b54f7a9340a 100644 --- a/jstests/concurrency/fsm_workloads/agg_match.js +++ b/jstests/concurrency/fsm_workloads/agg_match.js @@ -1,14 +1,12 @@ -'use strict'; - /** * agg_match.js * * Runs an aggregation with a $match that returns half the documents. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/agg_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.getOutCollName = function getOutCollName(collName) { return collName + '_out_agg_match'; }; diff --git a/jstests/concurrency/fsm_workloads/agg_merge_when_matched_replace_with_new.js b/jstests/concurrency/fsm_workloads/agg_merge_when_matched_replace_with_new.js index 06587f20be08f..93db7dfa87cdf 100644 --- a/jstests/concurrency/fsm_workloads/agg_merge_when_matched_replace_with_new.js +++ b/jstests/concurrency/fsm_workloads/agg_merge_when_matched_replace_with_new.js @@ -1,5 +1,3 @@ -'use strict'; - /** * agg_merge_when_matched_replace_with_new.js * @@ -12,10 +10,12 @@ * requires_non_retryable_writes, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Set the collection to run concurrent moveChunk operations as the output collection. $config.data.collWithMigrations = "agg_merge_when_matched_replace_with_new"; $config.data.threadRunCount = 0; diff --git a/jstests/concurrency/fsm_workloads/agg_merge_when_not_matched_insert.js b/jstests/concurrency/fsm_workloads/agg_merge_when_not_matched_insert.js index 739a7dc50e01d..0f9cf18e2b1fa 100644 --- a/jstests/concurrency/fsm_workloads/agg_merge_when_not_matched_insert.js +++ b/jstests/concurrency/fsm_workloads/agg_merge_when_not_matched_insert.js @@ -1,5 +1,3 @@ -'use strict'; - /** * agg_merge_when_not_matched_insert.js * @@ -11,18 +9,20 @@ * assumes_balancer_off, * requires_non_retryable_writes, * incompatible_with_gcov, - * # The config fuzzer causes certain commands to time out periodically. - * does_not_support_config_fuzzer, *] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Set the collection to run concurrent moveChunk operations as the output collection. $config.data.collWithMigrations = "agg_merge_when_not_matched_insert"; $config.data.threadRunCount = 0; + let initialMaxCatchUpPercentageBeforeBlockingWrites = null; + $config.states.aggregate = function aggregate(db, collName, connCache) { const res = db[collName].aggregate([ { @@ -52,5 +52,47 @@ var $config = extendWorkload($config, function($config, $super) { this.threadRunCount += 1; }; + // This test is sensitive to low values of the parameter + // maxCatchUpPercentageBeforeBlockingWrites, which can be set by the config server. We set a min + // bound for this parameter here. + $config.setup = function setup(db, collName, cluster) { + $super.setup.apply(this, [db, collName, cluster]); + + cluster.executeOnMongodNodes((db) => { + const param = assert.commandWorked( + db.adminCommand({getParameter: 1, maxCatchUpPercentageBeforeBlockingWrites: 1})); + if (param.hasOwnProperty("maxCatchUpPercentageBeforeBlockingWrites")) { + const defaultValue = 10; + if (param.maxCatchUpPercentageBeforeBlockingWrites < defaultValue) { + jsTest.log( + "Parameter `maxCatchUpPercentageBeforeBlockingWrites` value too low: " + + param.maxCatchUpPercentageBeforeBlockingWrites + + ". Setting value to default: " + defaultValue + "."); + initialMaxCatchUpPercentageBeforeBlockingWrites = + param.maxCatchUpPercentageBeforeBlockingWrites; + assert.commandWorked(db.adminCommand( + {setParameter: 1, maxCatchUpPercentageBeforeBlockingWrites: defaultValue})); + } + } + }); + }; + + $config.teardown = function teardown(db, collName, cluster) { + if (initialMaxCatchUpPercentageBeforeBlockingWrites) { + jsTest.log( + "Resetting parameter `maxCatchUpPercentageBeforeBlockingWrites` to original value: " + + initialMaxCatchUpPercentageBeforeBlockingWrites); + cluster.executeOnMongodNodes((db) => { + assert.commandWorked(db.adminCommand({ + setParameter: 1, + maxCatchUpPercentageBeforeBlockingWrites: + initialMaxCatchUpPercentageBeforeBlockingWrites + })); + }); + } + + $super.teardown.apply(this, [db, collName, cluster]); + }; + return $config; }); diff --git a/jstests/concurrency/fsm_workloads/agg_out.js b/jstests/concurrency/fsm_workloads/agg_out.js index 86ebca8801a9e..cbec48e16929a 100644 --- a/jstests/concurrency/fsm_workloads/agg_out.js +++ b/jstests/concurrency/fsm_workloads/agg_out.js @@ -1,5 +1,3 @@ -'use strict'; - /** * agg_out.js * @@ -14,11 +12,11 @@ * * @tags: [requires_capped] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/agg_base.js"; load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Use a smaller document size, but more iterations. The smaller documents will ensure each // operation is faster, giving us time to do more operations and thus increasing the likelihood // that any two operations will be happening concurrently. @@ -29,6 +27,7 @@ var $config = extendWorkload($config, function($config, $super) { // because it is assumed to be unique. $config.data.indexSpecs = [{rand: -1, randInt: 1}, {randInt: -1}, {flag: 1}, {padding: 'text'}]; + $config.data.shardKey = {_id: 'hashed'}; // We'll use document validation so that we can change the collection options in the middle of // an $out, to test that the $out stage will notice this and error. This validator is not very @@ -144,7 +143,7 @@ var $config = extendWorkload($config, function($config, $super) { if (isMongos(db) && this.tid === 0) { assertWhenOwnDB.commandWorked(db.adminCommand({enableSharding: db.getName()})); assertWhenOwnDB.commandWorked(db.adminCommand( - {shardCollection: db[this.outputCollName].getFullName(), key: {_id: 'hashed'}})); + {shardCollection: db[this.outputCollName].getFullName(), key: this.shardKey})); } }; diff --git a/jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js b/jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js index d6bdebb5d570a..44434a35bc80e 100644 --- a/jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js +++ b/jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js @@ -10,11 +10,10 @@ * * @tags: [uses_curop_agg_stage] */ -'use strict'; -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/agg_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states.aggregate = function aggregate(db, collName) { // $out to the same collection so that concurrent aggregate commands would cause congestion. db[collName].runCommand( diff --git a/jstests/concurrency/fsm_workloads/agg_sort.js b/jstests/concurrency/fsm_workloads/agg_sort.js index 757ecf76097ff..9628c3bee9e1d 100644 --- a/jstests/concurrency/fsm_workloads/agg_sort.js +++ b/jstests/concurrency/fsm_workloads/agg_sort.js @@ -1,15 +1,13 @@ -'use strict'; - /** * agg_sort.js * * Runs an aggregation with a $match that returns half the documents followed * by a $sort on a field containing a random float. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/agg_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.getOutputCollPrefix = function getOutputCollPrefix(collName) { return collName + '_out_agg_sort_'; }; diff --git a/jstests/concurrency/fsm_workloads/agg_sort_external.js b/jstests/concurrency/fsm_workloads/agg_sort_external.js index b8cbad826bbe7..94ed43cbb5f58 100644 --- a/jstests/concurrency/fsm_workloads/agg_sort_external.js +++ b/jstests/concurrency/fsm_workloads/agg_sort_external.js @@ -1,5 +1,3 @@ -'use strict'; - /** * agg_sort_external.js * @@ -8,10 +6,10 @@ * * The data returned by the $match is greater than 100MB, which should force an external sort. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/agg_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/agg_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // use enough docs to exceed 100MB, the in-memory limit for $sort and $group $config.data.numDocs = 24 * 1000; var MB = 1024 * 1024; // bytes diff --git a/jstests/concurrency/fsm_workloads/agg_unionWith_interrupt_cleanup.js b/jstests/concurrency/fsm_workloads/agg_unionWith_interrupt_cleanup.js index c2992479aa751..82276ba7ef305 100644 --- a/jstests/concurrency/fsm_workloads/agg_unionWith_interrupt_cleanup.js +++ b/jstests/concurrency/fsm_workloads/agg_unionWith_interrupt_cleanup.js @@ -6,11 +6,12 @@ * uses_curop_agg_stage * ] */ -'use strict'; -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.commentStr = "agg_unionWith_interrupt_cleanup"; $config.states.aggregate = function aggregate(db, collName) { diff --git a/jstests/concurrency/fsm_workloads/agg_union_with_chunk_migrations.js b/jstests/concurrency/fsm_workloads/agg_union_with_chunk_migrations.js index e5a2178d44645..a4c3225f96ff1 100644 --- a/jstests/concurrency/fsm_workloads/agg_union_with_chunk_migrations.js +++ b/jstests/concurrency/fsm_workloads/agg_union_with_chunk_migrations.js @@ -1,5 +1,3 @@ -'use strict'; - /** * agg_union_with_chunk_migrations.js * @@ -17,10 +15,12 @@ * requires_non_retryable_writes, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.collWithMigrations = "union_ns"; $config.states.aggregate = function aggregate(db, collName, connCache) { diff --git a/jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js b/jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js index 1d7685ee75189..f16c3dd7c5e31 100644 --- a/jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js +++ b/jstests/concurrency/fsm_workloads/agg_with_chunk_migrations.js @@ -1,5 +1,3 @@ -'use strict'; - /** * agg_with_chunk_migrations.js * @@ -16,10 +14,12 @@ * requires_non_retryable_writes, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // The base setup will insert 'partitionSize' number of documents per thread, evenly // distributing across the chunks. Documents will only have the "_id" field. $config.data.partitionSize = 50; diff --git a/jstests/concurrency/fsm_workloads/analyze_shard_key.js b/jstests/concurrency/fsm_workloads/analyze_shard_key.js index 6ee75f0124a90..35fbacb70a4ef 100644 --- a/jstests/concurrency/fsm_workloads/analyze_shard_key.js +++ b/jstests/concurrency/fsm_workloads/analyze_shard_key.js @@ -1,54 +1,45 @@ -'use strict'; - /** * Tests that the analyzeShardKey command returns correct metrics. * * This workload implicitly assumes that its tid range is [0, $config.threadCount). This isn't * guaranteed to be true when it is run in parallel with other workloads. * - * TODO (SERVER-75532): Investigate the high variability of the runtime of analyze_shard_key.js in - * suites with chunk migration and/or stepdown/kill/terminate. * @tags: [ - * requires_fcv_70, - * featureFlagUpdateOneWithoutShardKey, + * requires_fcv_71, * uses_transactions, * resource_intensive, * incompatible_with_concurrency_simultaneous, - * does_not_support_stepdowns, - * assumes_balancer_off * ] */ -load("jstests/concurrency/fsm_libs/extend_workload.js"); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; + load("jstests/concurrency/fsm_workload_helpers/server_types.js"); // for isMongos load("jstests/libs/fail_point_util.js"); +load("jstests/libs/retryable_writes_util.js"); load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js"); const aggregateInterruptErrors = [ErrorCodes.CursorNotFound, ErrorCodes.CursorKilled, ErrorCodes.QueryPlanKilled]; -if ($config === undefined) { - // There is no workload to extend. Define a noop base workload to make the 'extendWorkload' call - // below still work. - $config = { - threadCount: 1, - iterations: 1, - startState: "init", - data: {}, - states: {init: function(db, collName) {}}, - transitions: {init: {init: 1}}, - setup: function(db, collName) {}, - teardown: function(db, collName) {}, - }; -} - -var $config = extendWorkload($config, function($config, $super) { +const kBaseConfig = { + threadCount: 1, + iterations: 1, + startState: "init", + data: {}, + states: {init: function(db, collName) {}}, + transitions: {init: {init: 1}}, + setup: function(db, collName) {}, + teardown: function(db, collName) {}, +}; + +export const $config = extendWorkload(kBaseConfig, function($config, $super) { $config.threadCount = 10; $config.iterations = 500; // The sample rate range for query sampling. - $config.data.minSampleRate = 1000; - $config.data.maxSampleRate = 1500; + $config.data.minSamplesPerSecond = 1000; + $config.data.maxSamplesPerSecond = 1500; // The comment to attached to queries in the read and write states below to mark them as // eligible for sampling. Queries such as the aggregate queries for looking up documents to // update will not have this comment attached since they do not follow the query patterns @@ -102,11 +93,6 @@ var $config = extendWorkload($config, function($config, $super) { key: {[this.currentShardKeyFieldName]: 1, [this.candidateShardKeyFieldName]: 1}, unique: isUnique }]; - // For a compound hashed shard key, the exact shard key index is needed for determining - // the monotonicity. - if (isHashed) { - indexSpecs.push({name: "exact_index", key: shardKey}); - } } else { shardKey = {[this.candidateShardKeyFieldName]: isHashed ? "hashed" : 1}; indexSpecs = [{ @@ -196,6 +182,7 @@ var $config = extendWorkload($config, function($config, $super) { /** * Generates and inserts initial documents. */ + $config.data.insertBatchSize = 1000; $config.data.generateInitialDocuments = function generateInitialDocuments( db, collName, cluster) { this.numInitialDocuments = 0; @@ -230,12 +217,17 @@ var $config = extendWorkload($config, function($config, $super) { assert.commandWorked( db.runCommand({createIndexes: collName, indexes: this.shardKeyOptions.indexSpecs})); - assert.commandWorked(db.runCommand({insert: collName, documents: docs})); - - // Wait for the documents to get replicated to all nodes so that a analyzeShardKey command - // runs immediately after this can assert on the metrics regardless of which nodes it - // targets. - cluster.awaitReplication(); + // To reduce the insertion order noise caused by parallel oplog application on + // secondaries, insert the documents in multiple batches. + let currIndex = 0; + while (currIndex < docs.length) { + const endIndex = currIndex + this.insertBatchSize; + assert.commandWorked(db.runCommand( + {insert: collName, documents: docs.slice(currIndex, endIndex), ordered: true})); + currIndex = endIndex; + // Wait for secondaries to have replicated the writes. + cluster.awaitReplication(); + } print(`Set up collection that have the following shard key to analyze ${tojson({ shardKeyOptions: this.shardKeyOptions, @@ -463,9 +455,10 @@ var $config = extendWorkload($config, function($config, $super) { * ranges. */ $config.data.assertKeyCharacteristicsMetrics = function assertKeyCharacteristicsMetrics( - metrics) { + res, isSampling) { // Perform basic validation of the metrics. - AnalyzeShardKeyUtil.assertContainKeyCharacteristicsMetrics(metrics); + AnalyzeShardKeyUtil.assertContainKeyCharacteristicsMetrics(res); + const metrics = res.keyCharacteristics; assert.eq(metrics.isUnique, this.shardKeyOptions.isUnique, metrics); // Validate the cardinality metrics. Due to the concurrent writes by other threads, it is @@ -479,7 +472,7 @@ var $config = extendWorkload($config, function($config, $super) { // not unique, they are calculated using an aggregation with readConcern "available" (i.e. // it opts out of shard versioning and filtering). If the shard key is unique, they are // inferred from fast count of the documents. - if (metrics.numDistinctValues < this.numInitialDistinctValues) { + if (!isSampling && (metrics.numDistinctValues < this.numInitialDistinctValues)) { if (!TestData.runningWithBalancer) { assert(this.shardKeyOptions.isUnique, metrics); if (!TestData.runningWithShardStepdowns) { @@ -500,7 +493,7 @@ var $config = extendWorkload($config, function($config, $super) { // since chunk migration deletes documents from the donor shard and re-inserts them on the // recipient shard so there is no guarantee that the insertion order from the client is // preserved. - if (!TestData.runningWithBalancer) { + if (!isSampling && !TestData.runningWithBalancer) { assert.eq(metrics.monotonicity.type, this.shardKeyOptions.isMonotonic && !this.shardKeyOptions.isHashed ? "monotonic" @@ -513,23 +506,89 @@ var $config = extendWorkload($config, function($config, $super) { // distribution. $config.data.intermediateReadDistributionMetricsMaxDiff = 20; $config.data.intermediateWriteDistributionMetricsMaxDiff = 20; - // The final diff windows are larger when the reads and writes are run inside transactions or - // with stepdown/kill/terminate in the background due to the presence of retries from the - // external client. - $config.data.finalReadDistributionMetricsMaxDiff = - (TestData.runInsideTransaction || TestData.runningWithShardStepdowns) ? 15 : 12; - $config.data.finalWriteDistributionMetricsMaxDiff = - (TestData.runInsideTransaction || TestData.runningWithShardStepdowns) ? 15 : 12; + $config.data.finalReadDistributionMetricsMaxDiff = 15; + $config.data.finalWriteDistributionMetricsMaxDiff = 15; // The minimum number of sampled queries to wait for before verifying the read and write // distribution metrics. $config.data.numSampledQueriesThreshold = 1500; + // The diff window for the sample size for each command for the sample population to be + // considered as matching the mock query pattern. + $config.data.sampleSizePercentageMaxDiff = 5; + + // The number of sampled queries returned by the latest analyzeShardKey command. + $config.data.previousNumSampledQueries = 0; + + $config.data.isAcceptableSampleSize = function isAcceptableSampleSize( + part, whole, expectedPercentage) { + return Math.abs(AnalyzeShardKeyUtil.calculatePercentage(part, whole) - expectedPercentage) < + this.sampleSizePercentageMaxDiff; + }; + + $config.data.shouldValidateReadDistribution = function shouldValidateReadDistribution( + sampleSize) { + if (sampleSize.total < this.numSampledQueriesThreshold) { + return false; + } + + // There are 4 read states (i.e. find, aggregate, count and distinct) and they have the + // same incoming and outgoing state transition probabilities. + const isAcceptable = this.isAcceptableSampleSize( + sampleSize.find, sampleSize.total, 25 /* expectedPercentage */) && + this.isAcceptableSampleSize( + sampleSize.aggregate, sampleSize.total, 25 /* expectedPercentage */) && + this.isAcceptableSampleSize( + sampleSize.count, sampleSize.total, 25 /* expectedPercentage */) && + this.isAcceptableSampleSize( + sampleSize.distinct, sampleSize.total, 25 /* expectedPercentage */); + + if (!isAcceptable) { + print( + `Skip validating the read distribution metrics because the sample ` + + `population does not match the mock query patterns: ${tojsononeline(sampleSize)}`); + // The sample population should always match the mock query patterns unless there are + // retries. + assert(TestData.runningWithShardStepdowns || TestData.runningWithBalancer || + TestData.runInsideTransaction); + } + return isAcceptable; + }; + + $config.data.shouldValidateWriteDistribution = function shouldValidateWriteDistribution( + sampleSize) { + if (sampleSize.total < this.numSampledQueriesThreshold) { + return false; + } + + // There are 4 write states (i.e. update, remove, findAndModifyUpdate and + // findAndModifyRemove) and they have the same incoming and outgoing state transition + // probabilities. + const isAcceptable = + this.isAcceptableSampleSize( + sampleSize.update, sampleSize.total, 25 /* expectedPercentage */) && + this.isAcceptableSampleSize( + sampleSize.delete, sampleSize.total, 25 /* expectedPercentage */) && + this.isAcceptableSampleSize( + sampleSize.findAndModify, sampleSize.total, 50 /* expectedPercentage */); + + if (!isAcceptable) { + print( + `Skip validating the write distribution metrics because the sample ` + + `population does not match the mock query patterns: ${tojsononeline(sampleSize)}`); + // The sample population should always match the mock query patterns unless there are + // retries. + assert(TestData.runningWithShardStepdowns || TestData.runningWithBalancer || + TestData.runInsideTransaction); + } + return isAcceptable; + }; + /** * Verifies that the metrics about the read and write distribution are within acceptable ranges. */ $config.data.assertReadWriteDistributionMetrics = function assertReadWriteDistributionMetrics( - metrics, isFinal) { - AnalyzeShardKeyUtil.assertContainReadWriteDistributionMetrics(metrics); + res, isFinal) { + AnalyzeShardKeyUtil.assertContainReadWriteDistributionMetrics(res); let assertReadMetricsDiff = (actual, expected) => { const maxDiff = isFinal ? this.finalReadDistributionMetricsMaxDiff @@ -542,32 +601,34 @@ var $config = extendWorkload($config, function($config, $super) { assert.lt(Math.abs(actual - expected), maxDiff, {actual, expected}); }; - if (metrics.readDistribution.sampleSize.total > this.numSampledQueriesThreshold) { - assertReadMetricsDiff(metrics.readDistribution.percentageOfSingleShardReads, + const currentNumSampledQueries = + res.readDistribution.sampleSize.total + res.writeDistribution.sampleSize.total; + this.previousNumSampledQueries = currentNumSampledQueries; + + if (this.shouldValidateReadDistribution(res.readDistribution.sampleSize)) { + assertReadMetricsDiff(res.readDistribution.percentageOfSingleShardReads, this.readDistribution.percentageOfSingleShardReads); - assertReadMetricsDiff(metrics.readDistribution.percentageOfMultiShardReads, + assertReadMetricsDiff(res.readDistribution.percentageOfMultiShardReads, this.readDistribution.percentageOfMultiShardReads); - assertReadMetricsDiff(metrics.readDistribution.percentageOfScatterGatherReads, + assertReadMetricsDiff(res.readDistribution.percentageOfScatterGatherReads, this.readDistribution.percentageOfScatterGatherReads); - assert.eq(metrics.readDistribution.numReadsByRange.length, - this.analyzeShardKeyNumRanges); + assert.eq(res.readDistribution.numReadsByRange.length, this.analyzeShardKeyNumRanges); } - if (metrics.writeDistribution.sampleSize.total > this.numSampledQueriesThreshold) { - assertWriteMetricsDiff(metrics.writeDistribution.percentageOfSingleShardWrites, + + if (this.shouldValidateWriteDistribution(res.writeDistribution.sampleSize)) { + assertWriteMetricsDiff(res.writeDistribution.percentageOfSingleShardWrites, this.writeDistribution.percentageOfSingleShardWrites); - assertWriteMetricsDiff(metrics.writeDistribution.percentageOfMultiShardWrites, + assertWriteMetricsDiff(res.writeDistribution.percentageOfMultiShardWrites, this.writeDistribution.percentageOfMultiShardWrites); - assertWriteMetricsDiff(metrics.writeDistribution.percentageOfScatterGatherWrites, + assertWriteMetricsDiff(res.writeDistribution.percentageOfScatterGatherWrites, this.writeDistribution.percentageOfScatterGatherWrites); - assertWriteMetricsDiff(metrics.writeDistribution.percentageOfShardKeyUpdates, + assertWriteMetricsDiff(res.writeDistribution.percentageOfShardKeyUpdates, this.writeDistribution.percentageOfShardKeyUpdates); - assertWriteMetricsDiff( - metrics.writeDistribution.percentageOfSingleWritesWithoutShardKey, - this.writeDistribution.percentageOfSingleWritesWithoutShardKey); - assertWriteMetricsDiff(metrics.writeDistribution.percentageOfMultiWritesWithoutShardKey, + assertWriteMetricsDiff(res.writeDistribution.percentageOfSingleWritesWithoutShardKey, + this.writeDistribution.percentageOfSingleWritesWithoutShardKey); + assertWriteMetricsDiff(res.writeDistribution.percentageOfMultiWritesWithoutShardKey, this.writeDistribution.percentageOfMultiWritesWithoutShardKey); - assert.eq(metrics.writeDistribution.numWritesByRange.length, - this.analyzeShardKeyNumRanges); + assert.eq(res.writeDistribution.numWritesByRange.length, this.analyzeShardKeyNumRanges); } }; @@ -589,8 +650,11 @@ var $config = extendWorkload($config, function($config, $super) { // non-duplicate document using a random cursor. 4952606 is the error that the sampling // based split policy throws if it fails to find the specified number of split points. print( - `Failed to analyze the shard key due to duplicate keys returned by random cursor ${ - tojsononeline(err)}`); + `Failed to analyze the shard key due to duplicate keys returned by random ` + + `cursor. Skipping the next ${this.numAnalyzeShardKeySkipsAfterRandomCursorError} ` + + `analyzeShardKey states since the analyzeShardKey command is likely to fail with ` + + `this error again. ${tojsononeline(err)}`); + this.numAnalyzeShardKeySkips = this.numAnalyzeShardKeySkipsAfterRandomCursorError; return true; } if (this.expectedAggregateInterruptErrors.includes(err.code)) { @@ -599,6 +663,31 @@ var $config = extendWorkload($config, function($config, $super) { tojsononeline(err)}`); return true; } + if (err.code == 7559401) { + print(`Failed to analyze the shard key because one of the shards fetched the split ` + + `point documents after the TTL deletions had started. ${tojsononeline(err)}`); + return true; + } + if (err.code == 7588600) { + print(`Failed to analyze the shard key because the document for one of the most ` + + `common shard key values got deleted while the command was running. ${ + tojsononeline(err)}`); + return true; + } + if (err.code == 7826501) { + print(`Failed to analyze the shard key because $collStats indicates that the ` + + `collection is empty. ${tojsononeline(err)}`); + // Inaccurate fast count is only expected when there is unclean shutdown. + return TestData.runningWithShardStepdowns; + } + if (err.code == ErrorCodes.IllegalOperation && err.errmsg && + err.errmsg.includes("monotonicity") && err.errmsg.includes("empty collection")) { + print(`Failed to analyze the shard key because the fast count during the ` + + `step for calculating the monotonicity metrics indicates that collection ` + + `is empty. ${tojsononeline(err)}`); + // Inaccurate fast count is only expected when there is unclean shutdown. + return TestData.runningWithShardStepdowns; + } return false; }; @@ -634,6 +723,139 @@ var $config = extendWorkload($config, function($config, $super) { return truncatedRes; }; + /** + * Runs $listSampledQueries and asserts that the number of sampled queries is greater or equal + * to the number of sampled queries returned by the latest analyzeShardKey command. + */ + $config.data.listSampledQueries = function listSampledQueries(db, collName) { + const ns = db.getName() + "." + collName; + let docs; + try { + docs = db.getSiblingDB("admin") + .aggregate( + [{$listSampledQueries: {namespace: ns}}], + // The network override does not support issuing getMore commands since + // if a network error occurs during it then it won't know whether the + // cursor was advanced or not. To allow this workload to run in a suite + // with network error, use a large batch size so that no getMore commands + // would be issued. + {cursor: TestData.runningWithShardStepdowns ? {batchSize: 100000} : {}}) + .toArray(); + } catch (e) { + if (this.expectedAggregateInterruptErrors.includes(e.code)) { + return; + } + throw e; + } + assert.gte(docs.length, this.previousNumSampledQueries); + }; + + // To avoid leaving a lot of config.analyzeShardKeySplitPoints documents around which could + // make restart recovery take a long time, overwrite the values of the + // 'analyzeShardKeySplitPointExpirationSecs' and 'ttlMonitorSleepSecs' server parameters to make + // the clean up occur as the workload runs, and then restore the original values during + // teardown(). + $config.data.splitPointExpirationSecs = 10; + $config.data.ttlMonitorSleepSecs = 5; + $config.data.originalSplitPointExpirationSecs = {}; + $config.data.originalTTLMonitorSleepSecs = {}; + + $config.data.overrideSplitPointExpiration = function overrideSplitPointExpiration(cluster) { + cluster.executeOnMongodNodes((db) => { + const res = assert.commandWorked(db.adminCommand({ + setParameter: 1, + analyzeShardKeySplitPointExpirationSecs: this.splitPointExpirationSecs, + })); + this.originalSplitPointExpirationSecs[db.getMongo().host] = res.was; + }); + }; + + $config.data.overrideTTLMonitorSleepSecs = function overrideTTLMonitorSleepSecs(cluster) { + cluster.executeOnMongodNodes((db) => { + const res = assert.commandWorked( + db.adminCommand({setParameter: 1, ttlMonitorSleepSecs: this.ttlMonitorSleepSecs})); + this.originalTTLMonitorSleepSecs[db.getMongo().host] = res.was; + }); + }; + + $config.data.restoreSplitPointExpiration = function restoreSplitPointExpiration(cluster) { + cluster.executeOnMongodNodes((db) => { + assert.commandWorked(db.adminCommand({ + setParameter: 1, + analyzeShardKeySplitPointExpirationSecs: + this.originalSplitPointExpirationSecs[db.getMongo().host], + })); + }); + }; + + $config.data.restoreTTLMonitorSleepSecs = function restoreTTLMonitorSleepSecs(cluster) { + cluster.executeOnMongodNodes((db) => { + assert.commandWorked(db.adminCommand({ + setParameter: 1, + ttlMonitorSleepSecs: this.originalTTLMonitorSleepSecs[db.getMongo().host], + })); + }); + }; + + /** + * Returns the number of documents that match the given filter in the given collection. + */ + $config.data.getNumDocuments = function getNumDocuments(db, collName, filter) { + const firstBatch = assert + .commandWorked(db.runCommand({ + aggregate: collName, + pipeline: [{$match: filter}, {$count: "count"}], + cursor: {} + })) + .cursor.firstBatch; + return firstBatch.length == 0 ? 0 : firstBatch[0].count; + }; + + // To avoid leaving unnecessary documents in config database after this workload finishes, + // remove all the sampled query documents and split point documents during teardown(). + $config.data.removeSampledQueryAndSplitPointDocuments = + function removeSampledQueryAndSplitPointDocuments(db, collName, cluster) { + const ns = db.getName() + "." + collName; + cluster.getReplicaSets().forEach(rst => { + while (true) { + try { + const configDb = rst.getPrimary().getDB("config"); + jsTest.log("Removing sampled query documents and split points documents"); + jsTest.log( + "The counts before removing " + tojsononeline({ + sampledQueries: this.getNumDocuments(configDb, "sampledQueries", {ns}), + sampledQueriesDiff: + this.getNumDocuments(configDb, "sampledQueriesDiff", {ns}), + analyzeShardKeySplitPoints: + this.getNumDocuments(configDb, "analyzeShardKeySplitPoints", {ns}), + + })); + + assert.commandWorked(configDb.sampledQueries.remove({})); + assert.commandWorked(configDb.sampledQueriesDiff.remove({})); + assert.commandWorked(configDb.analyzeShardKeySplitPoints.remove({})); + + jsTest.log( + "The counts after removing " + tojsononeline({ + sampledQueries: this.getNumDocuments(configDb, "sampledQueries", {ns}), + sampledQueriesDiff: + this.getNumDocuments(configDb, "sampledQueriesDiff", {ns}), + analyzeShardKeySplitPoints: + this.getNumDocuments(configDb, "analyzeShardKeySplitPoints", {ns}), + + })); + return; + } catch (e) { + if (RetryableWritesUtil.isRetryableCode(e.code)) { + print("Retry documents removal after error: " + tojson(e)); + continue; + } + throw e; + } + } + }); + }; + //// // The body of the workload. @@ -674,6 +896,9 @@ var $config = extendWorkload($config, function($config, $super) { {comment: this.eligibleForSamplingComment}); }); + this.overrideSplitPointExpiration(cluster); + this.overrideTTLMonitorSleepSecs(cluster); + // On a sharded cluster, running an aggregate command by default involves running getMore // commands since the cursor establisher in sharding is pessimistic about the router being // stale so it always makes a cursor with {batchSize: 0} on the shards and then run getMore @@ -712,6 +937,16 @@ var $config = extendWorkload($config, function($config, $super) { print("Doing final validation of read and write distribution metrics " + tojson(this.truncateAnalyzeShardKeyResponseForLogging(metrics))); this.assertReadWriteDistributionMetrics(metrics, true /* isFinal */); + + print("Listing sampled queries " + + tojsononeline({lastNumSampledQueries: this.previousNumSampledQueries})); + assert.gt(this.previousNumSampledQueries, 0); + this.listSampledQueries(db, collName); + + print("Cleaning up"); + this.restoreSplitPointExpiration(cluster); + this.restoreTTLMonitorSleepSecs(cluster); + this.removeSampledQueryAndSplitPointDocuments(db, collName, cluster); }; $config.states.init = function init(db, collName) { @@ -719,15 +954,37 @@ var $config = extendWorkload($config, function($config, $super) { this.metricsDocId = new UUID(this.metricsDocIdString); }; + $config.data.numAnalyzeShardKeySkipsAfterRandomCursorError = 5; + // Set to a positive value when the analyzeShardKey command fails with an error that is likely + // to occur again upon the next try. + $config.data.numAnalyzeShardKeySkips = 0; + $config.states.analyzeShardKey = function analyzeShardKey(db, collName) { - print("Starting analyzeShardKey state"); + if (this.numAnalyzeShardKeySkips > 0) { + print("Skipping the analyzeShardKey state"); + this.numAnalyzeShardKeySkips--; + return; + } + const ns = db.getName() + "." + collName; - const res = db.adminCommand({analyzeShardKey: ns, key: this.shardKeyOptions.shardKey}); + const cmdObj = {analyzeShardKey: ns, key: this.shardKeyOptions.shardKey}; + const rand = Math.random(); + if (rand < 0.25) { + cmdObj.sampleRate = Math.random() * 0.5 + 0.5; + } else if (rand < 0.5) { + cmdObj.sampleSize = + NumberLong(Math.floor(Math.random() * 1.5 * this.numInitialDocuments)); + } + const isSampling = + cmdObj.hasOwnProperty("sampleRate") || cmdObj.hasOwnProperty("sampleSize"); + + print("Starting analyzeShardKey state " + tojsononeline(cmdObj)); + const res = db.adminCommand(cmdObj); try { assert.commandWorked(res); print("Metrics: " + tojsononeline({res: this.truncateAnalyzeShardKeyResponseForLogging(res)})); - this.assertKeyCharacteristicsMetrics(res); + this.assertKeyCharacteristicsMetrics(res, isSampling); this.assertReadWriteDistributionMetrics(res, false /* isFinal */); // Persist the metrics so we can do the final validation during teardown. assert.commandWorked( @@ -748,7 +1005,8 @@ var $config = extendWorkload($config, function($config, $super) { assert.commandWorked(db.adminCommand({ configureQueryAnalyzer: ns, mode: "full", - sampleRate: AnalyzeShardKeyUtil.getRandInteger(this.minSampleRate, this.maxSampleRate) + samplesPerSecond: AnalyzeShardKeyUtil.getRandInteger(this.minSamplesPerSecond, + this.maxSamplesPerSecond) })); print("Finished enableQuerySampling state"); }; @@ -756,14 +1014,16 @@ var $config = extendWorkload($config, function($config, $super) { $config.states.disableQuerySampling = function disableQuerySampling(db, collName) { print("Starting disableQuerySampling state"); const ns = db.getName() + "." + collName; - // If query sampling is off, this command is expected to fail with an IllegalOperation - // error. - assert.commandWorkedOrFailedWithCode( - db.adminCommand({configureQueryAnalyzer: ns, mode: "off"}), - ErrorCodes.IllegalOperation); + assert.commandWorked(db.adminCommand({configureQueryAnalyzer: ns, mode: "off"})); print("Finished disableQuerySampling state"); }; + $config.states.listSampledQueries = function listSampledQueries(db, collName) { + print("Starting listSampledQueries state"); + this.listSampledQueries(db, collName); + print("Finished listSampledQueries state"); + }; + $config.states.find = function find(db, collName) { const cmdObj = { find: collName, @@ -841,7 +1101,7 @@ var $config = extendWorkload($config, function($config, $super) { } catch (e) { if (!this.isAcceptableUpdateError(res) && !(res.hasOwnProperty("writeErrors") && - isAcceptableUpdateError(res.writeErrors[0]))) { + this.isAcceptableUpdateError(res.writeErrors[0]))) { throw e; } } @@ -863,6 +1123,7 @@ var $config = extendWorkload($config, function($config, $super) { print("Starting remove state " + tojsononeline(cmdObj)); const res = assert.commandWorked(db.runCommand(cmdObj)); assert.eq(res.n, 1, {cmdObj, res}); + // Insert a random document to restore the original number of documents. assert.commandWorked( db.runCommand({insert: collName, documents: [this.generateRandomDocument(this.tid)]})); @@ -911,6 +1172,7 @@ var $config = extendWorkload($config, function($config, $super) { print("Starting findAndModifyRemove state " + tojsononeline(cmdObj)); const res = assert.commandWorked(db.runCommand(cmdObj)); assert.eq(res.lastErrorObject.n, 1, {cmdObj, res}); + // Insert a random document to restore the original number of documents. assert.commandWorked( db.runCommand({insert: collName, documents: [this.generateRandomDocument(this.tid)]})); @@ -936,6 +1198,11 @@ var $config = extendWorkload($config, function($config, $super) { originalDisableQuerySampling.call(this, db, collName); }; + const originalListSampledQueries = $config.states.listSampledQueries; + $config.states.listSampledQueries = function(db, collName, connCache) { + originalListSampledQueries.call(this, db, collName); + }; + const originalAnalyzeShardKey = $config.states.analyzeShardKey; $config.states.analyzeShardKey = function(db, collName, connCache) { originalAnalyzeShardKey.call(this, db, collName); @@ -992,8 +1259,9 @@ var $config = extendWorkload($config, function($config, $super) { enableQuerySampling: 1, }, analyzeShardKey: { - enableQuerySampling: 0.18, + enableQuerySampling: 0.15, disableQuerySampling: 0.02, + listSampledQueries: 0.03, find: 0.1, aggregate: 0.1, count: 0.1, @@ -1004,8 +1272,9 @@ var $config = extendWorkload($config, function($config, $super) { findAndModifyRemove: 0.1, }, enableQuerySampling: { - analyzeShardKey: 0.18, + analyzeShardKey: 0.15, disableQuerySampling: 0.02, + listSampledQueries: 0.03, find: 0.1, aggregate: 0.1, count: 0.1, @@ -1019,7 +1288,7 @@ var $config = extendWorkload($config, function($config, $super) { analyzeShardKey: 0.05, enableQuerySampling: 0.95, }, - find: { + listSampledQueries: { analyzeShardKey: 0.2, enableQuerySampling: 0.1, aggregate: 0.1, @@ -1030,9 +1299,22 @@ var $config = extendWorkload($config, function($config, $super) { findAndModifyUpdate: 0.1, findAndModifyRemove: 0.1, }, + find: { + analyzeShardKey: 0.15, + enableQuerySampling: 0.1, + listSampledQueries: 0.05, + aggregate: 0.1, + count: 0.1, + distinct: 0.1, + update: 0.1, + remove: 0.1, + findAndModifyUpdate: 0.1, + findAndModifyRemove: 0.1, + }, aggregate: { - analyzeShardKey: 0.2, + analyzeShardKey: 0.15, enableQuerySampling: 0.1, + listSampledQueries: 0.05, find: 0.1, count: 0.1, distinct: 0.1, @@ -1042,8 +1324,9 @@ var $config = extendWorkload($config, function($config, $super) { findAndModifyRemove: 0.1, }, count: { - analyzeShardKey: 0.2, + analyzeShardKey: 0.15, enableQuerySampling: 0.1, + listSampledQueries: 0.05, find: 0.1, aggregate: 0.1, distinct: 0.1, @@ -1053,8 +1336,9 @@ var $config = extendWorkload($config, function($config, $super) { findAndModifyRemove: 0.1, }, distinct: { - analyzeShardKey: 0.2, + analyzeShardKey: 0.15, enableQuerySampling: 0.1, + listSampledQueries: 0.05, find: 0.1, aggregate: 0.1, count: 0.1, @@ -1064,8 +1348,9 @@ var $config = extendWorkload($config, function($config, $super) { findAndModifyRemove: 0.1, }, update: { - analyzeShardKey: 0.2, + analyzeShardKey: 0.15, enableQuerySampling: 0.1, + listSampledQueries: 0.05, find: 0.1, aggregate: 0.1, count: 0.1, @@ -1075,8 +1360,9 @@ var $config = extendWorkload($config, function($config, $super) { findAndModifyRemove: 0.1, }, remove: { - analyzeShardKey: 0.2, + analyzeShardKey: 0.15, enableQuerySampling: 0.1, + listSampledQueries: 0.05, find: 0.1, aggregate: 0.1, count: 0.1, @@ -1086,8 +1372,9 @@ var $config = extendWorkload($config, function($config, $super) { findAndModifyRemove: 0.1, }, findAndModifyUpdate: { - analyzeShardKey: 0.2, + analyzeShardKey: 0.15, enableQuerySampling: 0.1, + listSampledQueries: 0.05, find: 0.1, aggregate: 0.1, count: 0.1, @@ -1097,8 +1384,9 @@ var $config = extendWorkload($config, function($config, $super) { findAndModifyRemove: 0.1, }, findAndModifyRemove: { - analyzeShardKey: 0.2, + analyzeShardKey: 0.15, enableQuerySampling: 0.1, + listSampledQueries: 0.05, find: 0.1, aggregate: 0.1, count: 0.1, diff --git a/jstests/concurrency/fsm_workloads/auth_create_role.js b/jstests/concurrency/fsm_workloads/auth_create_role.js index 05ec4a9fb32eb..9dec84d40fdfa 100644 --- a/jstests/concurrency/fsm_workloads/auth_create_role.js +++ b/jstests/concurrency/fsm_workloads/auth_create_role.js @@ -1,5 +1,3 @@ -'use strict'; - /** * auth_create_role.js * @@ -10,7 +8,7 @@ load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRole // UMC commands are not supported in transactions. TestData.runInsideTransaction = false; -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the role name, // since the workload name is assumed to be unique. diff --git a/jstests/concurrency/fsm_workloads/auth_create_user.js b/jstests/concurrency/fsm_workloads/auth_create_user.js index 26c327dd0cde0..fcb4930fd14b1 100644 --- a/jstests/concurrency/fsm_workloads/auth_create_user.js +++ b/jstests/concurrency/fsm_workloads/auth_create_user.js @@ -1,5 +1,3 @@ -'use strict'; - /** * auth_create_user.js * @@ -10,7 +8,7 @@ load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropUser // UMC commands are not supported in transactions. TestData.runInsideTransaction = false; -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the username, // since the workload name is assumed to be unique. diff --git a/jstests/concurrency/fsm_workloads/auth_drop_role.js b/jstests/concurrency/fsm_workloads/auth_drop_role.js index 6a5ebc2240dd2..43bf23bef6d7f 100644 --- a/jstests/concurrency/fsm_workloads/auth_drop_role.js +++ b/jstests/concurrency/fsm_workloads/auth_drop_role.js @@ -1,5 +1,3 @@ -'use strict'; - /** * auth_drop_role.js * @@ -12,7 +10,7 @@ load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRole // UMC commands are not supported in transactions. TestData.runInsideTransaction = false; -var $config = (function() { +export const $config = (function() { const kMaxCmdTimeMs = 60000; const kMaxTxnLockReqTimeMs = 100; const kDefaultTxnLockReqTimeMs = 5; diff --git a/jstests/concurrency/fsm_workloads/auth_drop_user.js b/jstests/concurrency/fsm_workloads/auth_drop_user.js index 248e0139287ac..75fe9e61fdbd8 100644 --- a/jstests/concurrency/fsm_workloads/auth_drop_user.js +++ b/jstests/concurrency/fsm_workloads/auth_drop_user.js @@ -1,5 +1,3 @@ -'use strict'; - // UMC commands are not supported in transactions. TestData.runInsideTransaction = false; @@ -10,7 +8,7 @@ TestData.runInsideTransaction = false; * drops the user from the database. * @tags: [incompatible_with_concurrency_simultaneous] */ -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the username, // since the workload name is assumed to be unique. diff --git a/jstests/concurrency/fsm_workloads/auth_privilege_cache_miss.js b/jstests/concurrency/fsm_workloads/auth_privilege_cache_miss.js index 3d50438cc1313..5a91ad52def05 100644 --- a/jstests/concurrency/fsm_workloads/auth_privilege_cache_miss.js +++ b/jstests/concurrency/fsm_workloads/auth_privilege_cache_miss.js @@ -1,5 +1,3 @@ -'use strict'; - /** * auth_privilege_cache_miss.js * @@ -9,10 +7,12 @@ */ // Use the auth_privilege_consistency workload as a base. -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/auth_privilege_consistency.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/auth_privilege_consistency.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Override setup() to also set cache-miss and slow load failpoints. const kResolveRolesDelayMS = 100; diff --git a/jstests/concurrency/fsm_workloads/auth_privilege_consistency.js b/jstests/concurrency/fsm_workloads/auth_privilege_consistency.js index 4997d47ad2eeb..474ed1fcbb52f 100644 --- a/jstests/concurrency/fsm_workloads/auth_privilege_consistency.js +++ b/jstests/concurrency/fsm_workloads/auth_privilege_consistency.js @@ -1,5 +1,3 @@ -'use strict'; - /** * auth_privilege_consistency.js * @@ -12,7 +10,7 @@ load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRole // UMC commands are not supported in transactions. TestData.runInsideTransaction = false; -var $config = (function() { +export const $config = (function() { const kTestUserPassword = 'secret'; const kMaxCmdTimeMs = 60000; const kMaxTxnLockReqTimeMs = 100; diff --git a/jstests/concurrency/fsm_workloads/auth_role_consistency.js b/jstests/concurrency/fsm_workloads/auth_role_consistency.js index 4ce7d3300f697..506ba1b0d98d9 100644 --- a/jstests/concurrency/fsm_workloads/auth_role_consistency.js +++ b/jstests/concurrency/fsm_workloads/auth_role_consistency.js @@ -1,5 +1,3 @@ -'use strict'; - /** * auth_role_consistency.js * @@ -8,7 +6,7 @@ */ load('jstests/concurrency/fsm_workload_helpers/drop_utils.js'); // for dropRoles -var $config = (function() { +export const $config = (function() { const kRoleNamePrefix = 'auth_role_consistency'; const states = (function() { diff --git a/jstests/concurrency/fsm_workloads/batched_multi_deletes_with_write_conflicts.js b/jstests/concurrency/fsm_workloads/batched_multi_deletes_with_write_conflicts.js index ec01224dda36f..5965adcca8bd2 100644 --- a/jstests/concurrency/fsm_workloads/batched_multi_deletes_with_write_conflicts.js +++ b/jstests/concurrency/fsm_workloads/batched_multi_deletes_with_write_conflicts.js @@ -1,6 +1,3 @@ -'use strict'; -load("jstests/libs/analyze_plan.js"); - /** * batched_multi_deletes_with_write_conflicts.js * @@ -13,8 +10,9 @@ load("jstests/libs/analyze_plan.js"); * requires_fcv_61, * ] */ +import {getPlanStage, getPlanStages} from "jstests/libs/analyze_plan.js"; -var $config = (function() { +export const $config = (function() { // 'data' is passed (copied) to each of the worker threads. var data = { // Defines the number of subsets of data, which are randomly picked to create conflicts. diff --git a/jstests/concurrency/fsm_workloads/cleanupOrphanedWhileMigrating.js b/jstests/concurrency/fsm_workloads/cleanupOrphanedWhileMigrating.js index c6c287082f822..7c07d20d31f94 100644 --- a/jstests/concurrency/fsm_workloads/cleanupOrphanedWhileMigrating.js +++ b/jstests/concurrency/fsm_workloads/cleanupOrphanedWhileMigrating.js @@ -1,16 +1,16 @@ -'use strict'; - /** * Performs range deletions while chunks are being moved. * * @tags: [requires_sharding, assumes_balancer_on, antithesis_incompatible] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/sharded_base_partitioned.js"; load('jstests/concurrency/fsm_workload_helpers/balancer.js'); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.threadCount = 5; $config.iterations = 50; diff --git a/jstests/concurrency/fsm_workloads/collection_defragmentation.js b/jstests/concurrency/fsm_workloads/collection_defragmentation.js index f91b62a67412c..ec56ee992a701 100644 --- a/jstests/concurrency/fsm_workloads/collection_defragmentation.js +++ b/jstests/concurrency/fsm_workloads/collection_defragmentation.js @@ -1,5 +1,3 @@ -'use strict'; - /** * collection_defragmentation.js * @@ -51,7 +49,7 @@ function getAllChunks(configDB, ns, keyPattern) { return chunkArray; } -var $config = (function() { +export const $config = (function() { var states = { init: function init(db, collName, connCache) { // Initialize defragmentation @@ -223,14 +221,14 @@ var $config = (function() { const fullNs = dbName + "." + collPrefix + j; const numChunks = Random.randInt(30); const numZones = Random.randInt(numChunks / 2); - const docSizeBytes = Random.randInt(1024 * 1024) + 50; + const docSizeBytesRange = [50, 1024 * 1024]; defragmentationUtil.createFragmentedCollection( mongos, fullNs, numChunks, 5 /* maxChunkFillMB */, numZones, - docSizeBytes, + docSizeBytesRange, 1000 /* chunkSpacing */, true /* disableCollectionBalancing*/); } diff --git a/jstests/concurrency/fsm_workloads/collection_uuid.js b/jstests/concurrency/fsm_workloads/collection_uuid.js index 0151d65d1d450..9eb1fed6ea055 100644 --- a/jstests/concurrency/fsm_workloads/collection_uuid.js +++ b/jstests/concurrency/fsm_workloads/collection_uuid.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Tests running operations with 'collectionUUID' parameter while the collection is being renamed * concurrently, and makes sure that all operations will succeed eventually when using the correct @@ -133,7 +131,7 @@ const verifyFailingWithCollectionUUIDMismatch = function( assert.eq(res.actualCollection, actualCollection); }; -const testCommand = function( +export const testCommand = function( db, namespace, cmdName, cmdObj, data, expectedNonRetryableErrors = []) { verifyFailingWithCollectionUUIDMismatch( db, cmdName, cmdObj, data.sameDbCollUUID, sameDbCollName, namespace, data); @@ -152,7 +150,7 @@ const testCommand = function( runCommandInLoop(db, namespace, cmdName, cmdObj, data, expectedNonRetryableErrors); }; -var $config = (function() { +export const $config = (function() { const data = {}; const states = (function() { diff --git a/jstests/concurrency/fsm_workloads/collection_uuid_sharded.js b/jstests/concurrency/fsm_workloads/collection_uuid_sharded.js index 7fbaba0357f65..3dd02cbb3fdf5 100644 --- a/jstests/concurrency/fsm_workloads/collection_uuid_sharded.js +++ b/jstests/concurrency/fsm_workloads/collection_uuid_sharded.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Tests running sharding operations with 'collectionUUID' parameter while the sharded collection is * being renamed concurrenlty. @@ -11,10 +9,13 @@ * requires_sharding, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/collection_uuid.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig, + testCommand +} from "jstests/concurrency/fsm_workloads/collection_uuid.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { const origStates = Object.keys($config.states); $config.states = Object.extend({ shardingCommands: function shardingCommands(db, collName) { diff --git a/jstests/concurrency/fsm_workloads/collmod.js b/jstests/concurrency/fsm_workloads/collmod.js index 4fb7945ae5231..c512ff7a84074 100644 --- a/jstests/concurrency/fsm_workloads/collmod.js +++ b/jstests/concurrency/fsm_workloads/collmod.js @@ -1,5 +1,3 @@ -'use strict'; - /** * collmod.js * @@ -10,7 +8,7 @@ * * All threads update the same TTL index on the same collection. */ -var $config = (function() { +export const $config = (function() { var data = { numDocs: 1000, maxTTL: 5000 // max time to live diff --git a/jstests/concurrency/fsm_workloads/collmod_separate_collections.js b/jstests/concurrency/fsm_workloads/collmod_separate_collections.js index 615af927c1ba9..de459d97bba3b 100644 --- a/jstests/concurrency/fsm_workloads/collmod_separate_collections.js +++ b/jstests/concurrency/fsm_workloads/collmod_separate_collections.js @@ -1,5 +1,3 @@ -'use strict'; - /** * collmod_separate_collections.js * @@ -9,10 +7,10 @@ * * Each thread updates a TTL index on a separate collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/collmod.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/collmod.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.prefix = 'collmod_separate_collections'; $config.data.shardKey = {createdAt: 1}; diff --git a/jstests/concurrency/fsm_workloads/collmod_writeconflict.js b/jstests/concurrency/fsm_workloads/collmod_writeconflict.js index 32241ef4ab015..6d27d4c3abb97 100644 --- a/jstests/concurrency/fsm_workloads/collmod_writeconflict.js +++ b/jstests/concurrency/fsm_workloads/collmod_writeconflict.js @@ -1,14 +1,12 @@ -'use strict'; - /** * collmod_writeconflict.js * * Ensures collMod successfully handles WriteConflictExceptions. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/collmod.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/collmod.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.prefix = 'collmod_writeconflict'; $config.setup = function setup(db, collName, cluster) { $super.setup.apply(this, arguments); diff --git a/jstests/concurrency/fsm_workloads/compact.js b/jstests/concurrency/fsm_workloads/compact.js index e373cfe6063e0..784a9d24189fe 100644 --- a/jstests/concurrency/fsm_workloads/compact.js +++ b/jstests/concurrency/fsm_workloads/compact.js @@ -1,5 +1,3 @@ -'use strict'; - /** * compact.js * @@ -11,13 +9,13 @@ * with wiredTiger LSM variants. Bypass this command for the wiredTiger LSM variant * until a fix is available for WT-2523. * - * @tags: [does_not_support_wiredtiger_lsm, requires_compact] + * @tags: [does_not_support_wiredtiger_lsm, incompatible_with_macos, requires_compact] */ load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeral load("jstests/concurrency/fsm_workload_helpers/assert_handle_fail_in_transaction.js"); -var $config = (function() { +export const $config = (function() { var data = { nDocumentsToInsert: 1000, nIndexes: 3 + 1, // The number of indexes created in createIndexes + 1 for { _id: 1 } diff --git a/jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js b/jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js index 0a74749a138de..86a64786a1859 100644 --- a/jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js +++ b/jstests/concurrency/fsm_workloads/compact_while_creating_indexes.js @@ -1,5 +1,3 @@ -'use strict'; - /** * WiredTiger allows online compaction of its collections so it does not require an exclusive lock. * This workload is meant to test the behavior of the locking changes done in SERVER-16413. To @@ -11,7 +9,7 @@ load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isEphemeral -var $config = (function() { +export const $config = (function() { var states = (function() { function init(db, collName) { insertDocuments.call(this, db, collName); @@ -38,7 +36,7 @@ var $config = (function() { } function createIndex(db, collName) { - db[collName].createIndex({x: 1}, {background: true}); + db[collName].createIndex({x: 1}); } function dropIndex(db, collName) { diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js index e8c84f35fac67..48cc7e0afef39 100644 --- a/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js +++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection.js @@ -12,9 +12,7 @@ * @tags: [requires_collstats, requires_capped] */ -var $config = (function() { - load("jstests/libs/feature_flag_util.js"); - +export const $config = (function() { // TODO: This workload may fail if an iteration multiplier is specified. var data = {prefix: 'convert_to_capped_collection'}; @@ -23,10 +21,6 @@ var $config = (function() { return prefix + '_' + tid; } - function isMultiple256(num) { - return num % 256 === 0; - } - function init(db, collName) { this.threadCollName = uniqueCollectionName(this.prefix, this.tid); @@ -42,9 +36,6 @@ var $config = (function() { assertWhenOwnDB(!db[this.threadCollName].isCapped()); assertWhenOwnDB.commandWorked(db[this.threadCollName].convertToCapped(this.size)); assertWhenOwnDB(db[this.threadCollName].isCapped()); - if (!FeatureFlagUtil.isPresentAndEnabled(db, "CappedCollectionsRelaxedSize")) { - assertWhenOwnDB(isMultiple256(db[this.threadCollName].stats().maxSize)); - } } function convertToCapped(db, collName) { @@ -54,9 +45,6 @@ var $config = (function() { assertWhenOwnDB.commandWorked(db[this.threadCollName].convertToCapped(this.size)); assertWhenOwnDB(db[this.threadCollName].isCapped()); - if (!FeatureFlagUtil.isPresentAndEnabled(db, "CappedCollectionsRelaxedSize")) { - assertWhenOwnDB(isMultiple256(db[this.threadCollName].stats().maxSize)); - } // only the _id index should remain after running convertToCapped var indexKeys = db[this.threadCollName].getIndexKeys(); diff --git a/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js b/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js index be4414f642dc6..91e1e3d35318f 100644 --- a/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js +++ b/jstests/concurrency/fsm_workloads/convert_to_capped_collection_index.js @@ -15,10 +15,12 @@ * * @tags: [requires_collstats, requires_capped] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/convert_to_capped_collection.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/convert_to_capped_collection.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states.convertToCapped = function convertToCapped(db, collName) { assertWhenOwnDB.commandWorked(db[this.threadCollName].createIndex({i: 1, rand: 1})); assertWhenOwnDB.eq(2, db[this.threadCollName].getIndexes().length); diff --git a/jstests/concurrency/fsm_workloads/count.js b/jstests/concurrency/fsm_workloads/count.js index 517941e97470c..f9f6229ffa709 100644 --- a/jstests/concurrency/fsm_workloads/count.js +++ b/jstests/concurrency/fsm_workloads/count.js @@ -1,5 +1,3 @@ -'use strict'; - /** * count.js * @@ -13,7 +11,7 @@ */ load("jstests/libs/fixture_helpers.js"); // For isMongos. -var $config = (function() { +export const $config = (function() { var data = { randRange: function randRange(low, high) { // return random number in range [low, high] diff --git a/jstests/concurrency/fsm_workloads/count_indexed.js b/jstests/concurrency/fsm_workloads/count_indexed.js index 9887d1a113ece..77aa22ab9db82 100644 --- a/jstests/concurrency/fsm_workloads/count_indexed.js +++ b/jstests/concurrency/fsm_workloads/count_indexed.js @@ -1,5 +1,3 @@ -'use strict'; - /** * count_indexed.js * @@ -10,10 +8,10 @@ * and then inserts 'modulus * countPerNum' documents. [250, 1000] * Each thread inserts docs into a unique collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/count.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/count.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.prefix = 'count_fsm'; $config.data.shardKey = {tid: 1, i: 1}; diff --git a/jstests/concurrency/fsm_workloads/count_limit_skip.js b/jstests/concurrency/fsm_workloads/count_limit_skip.js index 99a2149cc1f91..931e0539164f2 100644 --- a/jstests/concurrency/fsm_workloads/count_limit_skip.js +++ b/jstests/concurrency/fsm_workloads/count_limit_skip.js @@ -1,5 +1,3 @@ -'use strict'; - /** * count_limit_skip.js * @@ -10,11 +8,11 @@ * and then inserts 'modulus * countPerNum' documents. [250, 1000] * Each thread inserts docs into a unique collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/count.js'); // for $config -load("jstests/libs/fixture_helpers.js"); // For isMongos. +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/count.js"; +load("jstests/libs/fixture_helpers.js"); // For isMongos. -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.prefix = 'count_fsm_q_l_s'; $config.data.getCount = function getCount(db, predicate) { diff --git a/jstests/concurrency/fsm_workloads/count_odd.js b/jstests/concurrency/fsm_workloads/count_odd.js index 666f65fe66c51..abd99ca4dd5a2 100644 --- a/jstests/concurrency/fsm_workloads/count_odd.js +++ b/jstests/concurrency/fsm_workloads/count_odd.js @@ -1,5 +1,3 @@ -'use strict'; - /** * count_odd.js * @@ -10,7 +8,7 @@ * ] * */ -var $config = (function() { +export const $config = (function() { var states = (function() { function init(db, collName) { } diff --git a/jstests/concurrency/fsm_workloads/create_and_drop_collection.js b/jstests/concurrency/fsm_workloads/create_and_drop_collection.js index fd23d5e153ec3..366fa95efc72a 100644 --- a/jstests/concurrency/fsm_workloads/create_and_drop_collection.js +++ b/jstests/concurrency/fsm_workloads/create_and_drop_collection.js @@ -1,5 +1,3 @@ -'use strict'; - /** * create_and_drop_collection.js * @@ -7,7 +5,7 @@ * * @tags: [requires_sharding] */ -var $config = (function() { +export const $config = (function() { var data = {}; var states = (function() { diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection.js b/jstests/concurrency/fsm_workloads/create_capped_collection.js index 942af8a823100..59af04e65eb0b 100644 --- a/jstests/concurrency/fsm_workloads/create_capped_collection.js +++ b/jstests/concurrency/fsm_workloads/create_capped_collection.js @@ -1,5 +1,3 @@ -'use strict'; - /** * create_capped_collection.js * @@ -9,7 +7,7 @@ * @tags: [requires_capped] */ -var $config = (function() { +export const $config = (function() { // Returns a document of the form { _id: ObjectId(...), field: '...' } // with specified BSON size. function makeDocWithSize(targetSize) { diff --git a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js index 3f435b9eb438a..f4dc6a11a29cb 100644 --- a/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js +++ b/jstests/concurrency/fsm_workloads/create_capped_collection_maxdocs.js @@ -1,5 +1,3 @@ -'use strict'; - /** * create_capped_collection_maxdocs.js * @@ -14,10 +12,12 @@ * * @tags: [does_not_support_stepdowns, requires_capped] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/create_capped_collection.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/create_capped_collection.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. $config.data.prefix = 'create_capped_collection_maxdocs'; diff --git a/jstests/concurrency/fsm_workloads/create_collection.js b/jstests/concurrency/fsm_workloads/create_collection.js index 61db8d1082460..2886c29fc6a03 100644 --- a/jstests/concurrency/fsm_workloads/create_collection.js +++ b/jstests/concurrency/fsm_workloads/create_collection.js @@ -1,12 +1,10 @@ -'use strict'; - /** * create_collection.js * * Repeatedly creates a collection. */ -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. diff --git a/jstests/concurrency/fsm_workloads/create_collection_and_view.js b/jstests/concurrency/fsm_workloads/create_collection_and_view.js index 29bcddee65f46..083fa6d0e185d 100644 --- a/jstests/concurrency/fsm_workloads/create_collection_and_view.js +++ b/jstests/concurrency/fsm_workloads/create_collection_and_view.js @@ -7,7 +7,7 @@ * @tags: [catches_command_failures, antithesis_incompatible] */ -var $config = (function() { +export const $config = (function() { const prefix = "create_collection_and_view"; // We'll use a single unique collection for all operations executed by this test. The diff --git a/jstests/concurrency/fsm_workloads/create_database.js b/jstests/concurrency/fsm_workloads/create_database.js index 332303f7100bd..0e4a8acf171c5 100644 --- a/jstests/concurrency/fsm_workloads/create_database.js +++ b/jstests/concurrency/fsm_workloads/create_database.js @@ -1,5 +1,3 @@ -'use strict'; - /** * create_database.js * @@ -14,7 +12,7 @@ load("jstests/concurrency/fsm_workload_helpers/assert_handle_fail_in_transaction.js"); -var $config = (function() { +export const $config = (function() { let data = { checkCommandResult: function checkCommandResult(mayFailWithDatabaseDifferCase, res) { if (mayFailWithDatabaseDifferCase) diff --git a/jstests/concurrency/fsm_workloads/create_index_background.js b/jstests/concurrency/fsm_workloads/create_index_background.js index 2fd28949bc3ec..67e7f72288a36 100644 --- a/jstests/concurrency/fsm_workloads/create_index_background.js +++ b/jstests/concurrency/fsm_workloads/create_index_background.js @@ -1,5 +1,3 @@ -'use strict'; - /** * create_index_background.js * @@ -15,7 +13,7 @@ */ load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos -var $config = (function() { +export const $config = (function() { var data = { nDocumentsToSeed: 1000, nDocumentsToCreate: 200, @@ -68,7 +66,7 @@ var $config = (function() { return coll.find({crud: {$exists: true}}).itcount() > 0; }, 'No documents with "crud" field have been inserted or updated', 60 * 1000); - let createOptions = {background: true}; + let createOptions = {}; let filter = this.getPartialFilterExpression(); if (filter !== undefined) { createOptions['partialFilterExpression'] = filter; diff --git a/jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js b/jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js index f352ece062607..e8e78d6cd0ea6 100644 --- a/jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js +++ b/jstests/concurrency/fsm_workloads/create_index_background_partial_filter.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Executes the create_index_background.js workload, but with a partial filter expression on the * indexed field. @@ -9,10 +7,10 @@ * creates_background_indexes * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // For extendWorkload. -load('jstests/concurrency/fsm_workloads/create_index_background.js'); // For $config. +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/create_index_background.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { const fieldName = "isIndexed"; $config.data.getIndexSpec = function() { diff --git a/jstests/concurrency/fsm_workloads/create_index_background_unique.js b/jstests/concurrency/fsm_workloads/create_index_background_unique.js index 9f43db15b8843..6cd31867b3f31 100644 --- a/jstests/concurrency/fsm_workloads/create_index_background_unique.js +++ b/jstests/concurrency/fsm_workloads/create_index_background_unique.js @@ -1,5 +1,3 @@ -'use strict'; - /** * create_index_background_unique.js * @@ -15,7 +13,8 @@ * ] */ load("jstests/concurrency/fsm_workload_helpers/assert_handle_fail_in_transaction.js"); -var $config = (function() { + +export const $config = (function() { var data = { prefix: "create_index_background_unique_", numDocsToLoad: 5000, @@ -40,7 +39,7 @@ var $config = (function() { const res = db.runCommand({ createIndexes: this.getCollectionNameForThread(this.tid), - indexes: [{key: {x: 1}, name: "x_1", unique: true, background: true}] + indexes: [{key: {x: 1}, name: "x_1", unique: true}] }); // Multi-statement Transactions can fail with SnapshotUnavailable if there are // pending catalog changes as of the transaction start (see SERVER-43018). diff --git a/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js b/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js index c1f0da117f25c..2c0677128a4cc 100644 --- a/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js +++ b/jstests/concurrency/fsm_workloads/create_index_background_unique_capped.js @@ -1,5 +1,3 @@ -'use strict'; - /** * create_index_background_unique_capped.js * @@ -8,10 +6,12 @@ * @tags: [creates_background_indexes, requires_capped] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/create_index_background_unique.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/create_index_background_unique.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.prefix = "create_index_background_unique_capped_"; $config.data.getCollectionOptions = function() { // We create an 8MB capped collection, as it will comfortably fit the collection data diff --git a/jstests/concurrency/fsm_workloads/create_index_background_wildcard.js b/jstests/concurrency/fsm_workloads/create_index_background_wildcard.js index f3d5f972cb1cb..80e4f31c33c68 100644 --- a/jstests/concurrency/fsm_workloads/create_index_background_wildcard.js +++ b/jstests/concurrency/fsm_workloads/create_index_background_wildcard.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Executes the create_index_background.js workload, but with a wildcard index. * @@ -8,10 +6,10 @@ * creates_background_indexes * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // For extendWorkload. -load('jstests/concurrency/fsm_workloads/create_index_background.js'); // For $config. +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/create_index_background.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.getIndexSpec = function() { return {"$**": 1}; }; diff --git a/jstests/concurrency/fsm_workloads/create_timeseries_collection.js b/jstests/concurrency/fsm_workloads/create_timeseries_collection.js index c85d9946feb4d..8a2d32ceb42e7 100644 --- a/jstests/concurrency/fsm_workloads/create_timeseries_collection.js +++ b/jstests/concurrency/fsm_workloads/create_timeseries_collection.js @@ -8,7 +8,7 @@ * does_not_support_stepdowns, * ] */ -var $config = (function() { +export const $config = (function() { var data = {prefix: "create_timeseries_collection"}; var states = (function() { diff --git a/jstests/concurrency/fsm_workloads/dbhash_test.js b/jstests/concurrency/fsm_workloads/dbhash_test.js new file mode 100644 index 0000000000000..a493f5569f55e --- /dev/null +++ b/jstests/concurrency/fsm_workloads/dbhash_test.js @@ -0,0 +1,87 @@ +/** + * Tests dbHash collisions in WT with full validation. + * dbHash should not experience races on data, or EBUSY errors in the storage engine. + * @tags: [ + * requires_wiredtiger, + * requires_replication, + * uses_full_validation, + * ] + */ + +"use strict"; + +load("jstests/concurrency/fsm_workload_helpers/state_transition_utils.js"); + +export const dbPrefix = jsTestName() + '_db_'; + +export const $config = (function() { + let states = { + init: function(db, collName) { + jsTestLog("init tid: " + this.tid); + }, + dbHash: function(db, collName) { + jsTestLog("dbHash: " + db + "." + collName + " tid: " + this.tid); + let opTime = + assert + .commandWorked(db.runCommand( + {insert: collName, documents: [{x: 1}], writeConcern: {w: "majority"}})) + .operationTime; + jsTestLog("dbHash opTime:" + tojson(opTime)); + jsTestLog("dbHash begin opTime:" + tojson(opTime)); + let dbHashRes = assert.commandWorked(db.collName.runCommand( + {dbHash: 1, $_internalReadAtClusterTime: Timestamp(opTime['t'], opTime['i'])})); + jsTestLog("dbHash done" + dbHashRes.timeMillis); + }, + fullValidation: function(db, collName) { + jsTestLog("fullValidation: " + db + "." + collName + " tid: " + this.tid); + let res = assert.commandWorked(db.collName.validate({full: true})); + jsTestLog("fullValidation done: " + db + "." + collName + " " + this.tid); + assert(res.valid); + }, + }; + + const setSyncDelay = function(db, delay) { + jsTestLog("setSyncDelay: ", delay); + assert.commandWorked(db.adminCommand({setParameter: 1, syncdelay: delay})); + }; + + const setup = function(db, collName) { + jsTestLog("Creating:" + db + "." + collName + " tid: " + this.tid); + let x = 'x'.repeat(20 * 1024); // 20KB + + let bulk = db.collName.initializeOrderedBulkOp(); + for (let i = 0; i < 80; i++) { + bulk.insert({_id: x + i.toString()}); + } + assertAlways.commandWorked(bulk.execute()); + + // Avoid filling the cache by flushing on a shorter interval + setSyncDelay(db, 10); + + jsTestLog("Creating done:" + db + "." + collName); + }; + + const teardown = function(db, collName) { + setSyncDelay(db, 60); + }; + + const standardTransition = { + dbHash: 0.5, + fullValidation: 0.5, + }; + + const transitions = { + init: standardTransition, + dbHash: {dbHash: 0.8, fullValidation: 0.2}, + fullValidation: {dbHash: 0.2, fullValidation: 0.8}, + }; + + return { + threadCount: 5, + iterations: 2, + setup: setup, + states: states, + teardown: teardown, + transitions: transitions, + }; +})(); diff --git a/jstests/concurrency/fsm_workloads/distinct.js b/jstests/concurrency/fsm_workloads/distinct.js index 2c56372c7d7b5..aaf5ca3f2ae1a 100644 --- a/jstests/concurrency/fsm_workloads/distinct.js +++ b/jstests/concurrency/fsm_workloads/distinct.js @@ -1,5 +1,3 @@ -'use strict'; - /** * distinct.js * @@ -8,7 +6,7 @@ * Each thread operates on a separate collection. */ -var $config = (function() { +export const $config = (function() { var data = {numDocs: 1000, prefix: 'distinct_fsm', shardKey: {i: 1}}; var states = (function() { diff --git a/jstests/concurrency/fsm_workloads/distinct_noindex.js b/jstests/concurrency/fsm_workloads/distinct_noindex.js index b2f2f69bcdd0d..e741abda56461 100644 --- a/jstests/concurrency/fsm_workloads/distinct_noindex.js +++ b/jstests/concurrency/fsm_workloads/distinct_noindex.js @@ -1,5 +1,3 @@ -'use strict'; - /** * distinct_noindex.js * @@ -7,7 +5,7 @@ * The field contains non-unique values. * Each thread operates on the same collection. */ -var $config = (function() { +export const $config = (function() { var data = { randRange: function randRange(low, high) { assertAlways.gt(high, low); diff --git a/jstests/concurrency/fsm_workloads/distinct_projection.js b/jstests/concurrency/fsm_workloads/distinct_projection.js index cf287cdb2106f..633fb5c099b7f 100644 --- a/jstests/concurrency/fsm_workloads/distinct_projection.js +++ b/jstests/concurrency/fsm_workloads/distinct_projection.js @@ -1,5 +1,3 @@ -'use strict'; - /** * distinct_projection.js * @@ -7,10 +5,10 @@ * The indexed field contains unique values. * Each thread operates on a separate collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/distinct.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/distinct.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.prefix = 'distinct_projection_fsm'; $config.states.distinct = function distinct(db, collName) { diff --git a/jstests/concurrency/fsm_workloads/drop_collection.js b/jstests/concurrency/fsm_workloads/drop_collection.js index 950c9f3d5ed28..b6a5e3c0a7495 100644 --- a/jstests/concurrency/fsm_workloads/drop_collection.js +++ b/jstests/concurrency/fsm_workloads/drop_collection.js @@ -1,11 +1,9 @@ -'use strict'; - /** * drop_collection.js * * Repeatedly creates and drops a collection. */ -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. @@ -33,13 +31,5 @@ var $config = (function() { var transitions = {init: {createAndDrop: 1}, createAndDrop: {createAndDrop: 1}}; - // This test performs dropCollection concurrently from many threads, and dropCollection on a - // sharded cluster takes a distributed lock. Since a distributed lock is acquired by repeatedly - // attempting to grab the lock every half second for 20 seconds (a max of 40 attempts), it's - // possible that some thread will be starved by the other threads and fail to grab the lock - // after 40 attempts. To reduce the likelihood of this, we choose threadCount and iterations so - // that threadCount * iterations < 40. - // The threadCount and iterations can be increased once PM-697 ("Remove all usages of - // distributed lock") is complete. - return {threadCount: 5, iterations: 5, data: data, states: states, transitions: transitions}; + return {threadCount: 10, iterations: 10, data: data, states: states, transitions: transitions}; })(); diff --git a/jstests/concurrency/fsm_workloads/drop_collection_sharded.js b/jstests/concurrency/fsm_workloads/drop_collection_sharded.js index 4f840353b653d..2cc36253521ec 100644 --- a/jstests/concurrency/fsm_workloads/drop_collection_sharded.js +++ b/jstests/concurrency/fsm_workloads/drop_collection_sharded.js @@ -7,8 +7,6 @@ * requires_sharding, * ] */ -'use strict'; - const dbPrefix = jsTestName() + '_DB_'; const dbCount = 2; const collPrefix = 'sharded_coll_'; @@ -22,7 +20,7 @@ function getRandomCollection(db) { return getRandomDb(db)[collPrefix + Random.randInt(collCount)]; } -var $config = (function() { +export const $config = (function() { var setup = function(db, collName, cluster) { // Initialize databases for (var i = 0; i < dbCount; i++) { diff --git a/jstests/concurrency/fsm_workloads/drop_database.js b/jstests/concurrency/fsm_workloads/drop_database.js index dcf72f8ec9611..8e7df183e9e19 100644 --- a/jstests/concurrency/fsm_workloads/drop_database.js +++ b/jstests/concurrency/fsm_workloads/drop_database.js @@ -1,5 +1,3 @@ -'use strict'; - /** * drop_database.js * @@ -8,7 +6,7 @@ * @tags: [ * ] */ -var $config = (function() { +export const $config = (function() { var states = { init: function init(db, collName) { this.uniqueDBName = db.getName() + 'drop_database' + this.tid; diff --git a/jstests/concurrency/fsm_workloads/drop_database_sharded.js b/jstests/concurrency/fsm_workloads/drop_database_sharded.js index ece356a23e0b2..41cec6cde8a02 100644 --- a/jstests/concurrency/fsm_workloads/drop_database_sharded.js +++ b/jstests/concurrency/fsm_workloads/drop_database_sharded.js @@ -5,8 +5,6 @@ * requires_sharding, * ] */ -'use strict'; - const dbPrefix = jsTestName() + '_DB_'; const dbCount = 2; const collPrefix = 'sharded_coll_'; @@ -47,7 +45,7 @@ function getDropDbStateResults(db) { return {ok: countOK, notOK: countNotOK}; } -var $config = (function() { +export const $config = (function() { var states = (function() { function init(db, collName) { } diff --git a/jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js b/jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js index 6b604abdc81c2..99bf723dc0f5c 100644 --- a/jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js +++ b/jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Repeatedly creates and drops a database in concurrency with FCV changes * @@ -10,10 +8,10 @@ * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/drop_database_sharded.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/drop_database_sharded.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states.setFCV = function(db, collName) { const fcvValues = [lastLTSFCV, lastContinuousFCV, latestFCV]; const targetFCV = fcvValues[Random.randInt(3)]; diff --git a/jstests/concurrency/fsm_workloads/drop_index_during_lookup.js b/jstests/concurrency/fsm_workloads/drop_index_during_lookup.js index 05b15c755ffbd..3e679be8869b8 100644 --- a/jstests/concurrency/fsm_workloads/drop_index_during_lookup.js +++ b/jstests/concurrency/fsm_workloads/drop_index_during_lookup.js @@ -1,12 +1,10 @@ -'use strict'; - /** * drop_index_during_lookup.js * * Sets up a situation where index join strategy will be chosen for $lookup while while running * concurrent dropIndexes against the index chosen for the foreign side. */ -var $config = (function() { +export const $config = (function() { let data = { collName: 'localColl', foreignCollName: 'foreignColl', diff --git a/jstests/concurrency/fsm_workloads/drop_index_during_replan.js b/jstests/concurrency/fsm_workloads/drop_index_during_replan.js index f4f68e0ffb36f..36b9406308b52 100644 --- a/jstests/concurrency/fsm_workloads/drop_index_during_replan.js +++ b/jstests/concurrency/fsm_workloads/drop_index_during_replan.js @@ -1,5 +1,3 @@ -'use strict'; - /** * drop_index_during_replan.js * @@ -8,7 +6,7 @@ * time, other threads may be dropping {b: 1}. This tests that the replanning process is robust to * index drops. */ -var $config = (function() { +export const $config = (function() { let data = { collName: 'drop_index_during_replan', indexSpecs: [ diff --git a/jstests/concurrency/fsm_workloads/drop_sharded_timeseries_collection.js b/jstests/concurrency/fsm_workloads/drop_sharded_timeseries_collection.js index d5b17b67b0740..5883d35b789b5 100644 --- a/jstests/concurrency/fsm_workloads/drop_sharded_timeseries_collection.js +++ b/jstests/concurrency/fsm_workloads/drop_sharded_timeseries_collection.js @@ -8,8 +8,6 @@ * requires_fcv_51, * ] */ -'use strict'; - const dbPrefix = 'fsm_db_for_sharded_timeseries_collection_'; const dbCount = 2; const collPrefix = 'sharded_timeseries_collection_'; @@ -17,8 +15,6 @@ const collCount = 2; const timeField = 'time'; const metaField = 'hostId'; -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. - function getRandomDb(db) { return db.getSiblingDB(dbPrefix + Random.randInt(dbCount)); } @@ -27,23 +23,8 @@ function getRandomTimeseriesView(db) { return getRandomDb(db)[collPrefix + Random.randInt(collCount)]; } -var $config = (function() { +export const $config = (function() { const setup = function(db, collName, cluster) { - // Check that necessary feature flags are enabled on each of the mongods. - let isEnabled = true; - cluster.executeOnMongodNodes(function(db) { - if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(db)) { - isEnabled = false; - } - }); - this.isShardedTimeseriesEnabled = isEnabled; - - if (!this.isShardedTimeseriesEnabled) { - jsTestLog( - "Feature flags for sharded time-series collections are not enabled. This test will do nothing."); - return; - } - // Enable sharding for the test databases. for (var i = 0; i < dbCount; i++) { const dbName = dbPrefix + i; @@ -54,10 +35,6 @@ var $config = (function() { const states = { init: function(db, collName) {}, create: function(db, collName) { - if (!this.isShardedTimeseriesEnabled) { - return; - } - const coll = getRandomTimeseriesView(db); jsTestLog("Executing create state on: " + coll.getFullName()); assertAlways.commandWorked(db.adminCommand({ @@ -67,10 +44,6 @@ var $config = (function() { })); }, dropView: function(db, collName) { - if (!this.isShardedTimeseriesEnabled) { - return; - } - const coll = getRandomTimeseriesView(db); jsTestLog("Executing dropView state on: " + coll.getFullName()); assertAlways.commandWorked(coll.getDB().runCommand({drop: coll.getName()})); diff --git a/jstests/concurrency/fsm_workloads/explain.js b/jstests/concurrency/fsm_workloads/explain.js index 135e7720bee10..b666bacfbaceb 100644 --- a/jstests/concurrency/fsm_workloads/explain.js +++ b/jstests/concurrency/fsm_workloads/explain.js @@ -1,5 +1,3 @@ -'use strict'; - /** * explain.js * @@ -7,9 +5,9 @@ * */ load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod -load('jstests/libs/analyze_plan.js'); +import {getWinningPlan} from "jstests/libs/analyze_plan.js"; -var $config = (function() { +export const $config = (function() { var data = { collNotExist: 'donotexist__', nInserted: 0, diff --git a/jstests/concurrency/fsm_workloads/explain_aggregate.js b/jstests/concurrency/fsm_workloads/explain_aggregate.js index b59524c2abb0a..44f2a4762df35 100644 --- a/jstests/concurrency/fsm_workloads/explain_aggregate.js +++ b/jstests/concurrency/fsm_workloads/explain_aggregate.js @@ -1,15 +1,13 @@ -'use strict'; - /** * explain_aggregate.js * * Runs explain() and aggregate() on a collection. * */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/explain.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/explain.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { function assertCursorStages(num, obj) { assertAlways(obj.stages, tojson(obj)); assertAlways.eq(num, obj.stages.length, tojson(obj.stages)); diff --git a/jstests/concurrency/fsm_workloads/explain_count.js b/jstests/concurrency/fsm_workloads/explain_count.js index fe5a71e3dfbb1..5eb5ac98cced6 100644 --- a/jstests/concurrency/fsm_workloads/explain_count.js +++ b/jstests/concurrency/fsm_workloads/explain_count.js @@ -1,16 +1,14 @@ -'use strict'; - /** * explain_count.js * * Runs explain() and count() on a collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/explain.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/explain.js"; load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos -load('jstests/libs/analyze_plan.js'); // for planHasStage +import {planHasStage} from "jstests/libs/analyze_plan.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { function assertNCounted(num, obj, db) { var stage = obj.executionStats.executionStages; // get sharded stage(s) if counting on mongos diff --git a/jstests/concurrency/fsm_workloads/explain_distinct.js b/jstests/concurrency/fsm_workloads/explain_distinct.js index 2786a7f80ecd0..6d8a66c464c64 100644 --- a/jstests/concurrency/fsm_workloads/explain_distinct.js +++ b/jstests/concurrency/fsm_workloads/explain_distinct.js @@ -1,15 +1,13 @@ -'use strict'; - /** * explain_distinct.js * * Runs explain() and distinct() on a collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/explain.js'); // for $config -load('jstests/libs/analyze_plan.js'); // for planHasStage +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/explain.js"; +import {planHasStage} from "jstests/libs/analyze_plan.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states = Object.extend({ explainBasicDistinct: function(db, collName) { var res = db[collName].explain().distinct('i'); diff --git a/jstests/concurrency/fsm_workloads/explain_find.js b/jstests/concurrency/fsm_workloads/explain_find.js index 65378c9ac9009..dc8ae4e921c60 100644 --- a/jstests/concurrency/fsm_workloads/explain_find.js +++ b/jstests/concurrency/fsm_workloads/explain_find.js @@ -1,16 +1,14 @@ -'use strict'; - /** * explain_find.js * * Runs explain() and find() on a collection. * */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/explain.js'); // for $config -load('jstests/libs/analyze_plan.js'); // for planHasStage and isIxscan +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/explain.js"; +import {isIxscan, planHasStage} from "jstests/libs/analyze_plan.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states = Object.extend({ explainLimit: function explainLimit(db, collName) { var res = db[collName].find().limit(3).explain(); diff --git a/jstests/concurrency/fsm_workloads/explain_remove.js b/jstests/concurrency/fsm_workloads/explain_remove.js index fe47554a83fa6..6465c14430963 100644 --- a/jstests/concurrency/fsm_workloads/explain_remove.js +++ b/jstests/concurrency/fsm_workloads/explain_remove.js @@ -1,14 +1,12 @@ -'use strict'; - /** * explain_remove.js * * Runs explain() and remove() on a collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/explain.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/explain.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states = Object.extend({ explainSingleRemove: function explainSingleRemove(db, collName) { var res = db[collName] diff --git a/jstests/concurrency/fsm_workloads/explain_update.js b/jstests/concurrency/fsm_workloads/explain_update.js index 5176f8d6c6267..91ff4894d79ff 100644 --- a/jstests/concurrency/fsm_workloads/explain_update.js +++ b/jstests/concurrency/fsm_workloads/explain_update.js @@ -1,15 +1,13 @@ -'use strict'; - /** * explain_update.js * * Runs explain() and update() on a collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/explain.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/explain.js"; load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states = Object.extend({ explainBasicUpdate: function explainBasicUpdate(db, collName) { var res = diff --git a/jstests/concurrency/fsm_workloads/findAndModify_flip_location.js b/jstests/concurrency/fsm_workloads/findAndModify_flip_location.js deleted file mode 100644 index cfd2871737f70..0000000000000 --- a/jstests/concurrency/fsm_workloads/findAndModify_flip_location.js +++ /dev/null @@ -1,163 +0,0 @@ -'use strict'; - -/** - * Each thread uses its own LSID and performs `findAndModify`s with retries on documents while the - * `storeFindAndModifyImagesInSideCollection` server parameter gets flipped. - * - * @tags: [requires_replication, requires_non_retryable_commands, uses_transactions]; - */ -var $config = (function() { - var data = { - numDocs: 100, - }; - - var states = (function() { - function init(db, collName) { - this._lastTxnId = 0; - this._lsid = UUID(); - } - - function findAndModifyUpsert(db, collName) { - // `auto_retry_transactions` is not compatible with explicitly testing retryable writes. - // This avoids issues regarding the multi_stmt tasks. - fsm.forceRunningOutsideTransaction(this); - - this._lastTxnId += 1; - this._lastCmd = { - findandmodify: collName, - lsid: {id: this._lsid}, - txnNumber: NumberLong(this._lastTxnId), - stmtId: NumberInt(1), - query: {_id: Math.round(Math.random() * this.numDocs)}, - new: Math.random() > 0.5, - upsert: true, - update: {$inc: {counter: 1}}, - }; - this._lastResponse = assert.commandWorked(db.runCommand(this._lastCmd)); - } - - function findAndModifyUpdate(db, collName) { - // `auto_retry_transactions` is not compatible with explicitly testing retryable writes. - // This avoids issues regarding the multi_stmt tasks. - fsm.forceRunningOutsideTransaction(this); - - this._lastTxnId += 1; - this._lastCmd = { - findandmodify: collName, - lsid: {id: this._lsid}, - txnNumber: NumberLong(this._lastTxnId), - stmtId: NumberInt(1), - query: {_id: Math.round(Math.random() * this.numDocs)}, - new: Math.random() > 0.5, - upsert: false, - update: {$inc: {counter: 1}}, - }; - this._lastResponse = assert.commandWorked(db.runCommand(this._lastCmd)); - } - - function findAndModifyDelete(db, collName) { - // `auto_retry_transactions` is not compatible with explicitly testing retryable writes. - // This avoids issues regarding the multi_stmt tasks. - fsm.forceRunningOutsideTransaction(this); - - this._lastTxnId += 1; - this._lastCmd = { - findandmodify: collName, - lsid: {id: this._lsid}, - txnNumber: NumberLong(this._lastTxnId), - stmtId: NumberInt(1), - query: {_id: Math.round(Math.random() * this.numDocs)}, - // Deletes may not ask for the postImage - new: false, - remove: true, - }; - this._lastResponse = assert.commandWorked(db.runCommand(this._lastCmd)); - } - - function findAndModifyRetry(db, collName) { - // `auto_retry_transactions` is not compatible with explicitly testing retryable writes. - // This avoids issues regarding the multi_stmt tasks. - fsm.forceRunningOutsideTransaction(this); - - assert(this._lastCmd); - assert(this._lastResponse); - - let response = assert.commandWorked(db.runCommand(this._lastCmd)); - let debugMsg = { - "TID": this.tid, - "LastCmd": this._lastCmd, - "LastResponse": this._lastResponse, - "Response": response - }; - assert.eq(this._lastResponse.hasOwnProperty("lastErrorObject"), - response.hasOwnProperty("lastErrorObject"), - debugMsg); - if (response.hasOwnProperty("lastErrorObject") && - // If the original command affected `n=1` document, all retries must return - // identical results. If an original command receives `n=0`, then a retry may find a - // match and return `n=1`. Only compare `lastErrorObject` and `value` when retries - // must be identical. - this._lastResponse["lastErrorObject"].n === 1) { - assert.eq( - this._lastResponse["lastErrorObject"], response["lastErrorObject"], debugMsg); - } - assert.eq(this._lastResponse.hasOwnProperty("value"), - response.hasOwnProperty("value"), - debugMsg); - if (response.hasOwnProperty("value") && this._lastResponse["lastErrorObject"].n === 1) { - assert.eq(this._lastResponse["value"], response["value"], debugMsg); - } - - // Have all workers participate in creating some chaos. - assert.commandWorked(db.adminCommand({ - setParameter: 1, - storeFindAndModifyImagesInSideCollection: Math.random() > 0.5, - })); - } - - return { - init: init, - findAndModifyUpsert: findAndModifyUpsert, - findAndModifyUpdate: findAndModifyUpdate, - findAndModifyDelete: findAndModifyDelete, - findAndModifyRetry: findAndModifyRetry - }; - })(); - - var transitions = { - init: {findAndModifyUpsert: 1.0}, - findAndModifyUpsert: { - findAndModifyRetry: 3.0, - findAndModifyUpsert: 1.0, - findAndModifyUpdate: 1.0, - findAndModifyDelete: 1.0 - }, - findAndModifyUpdate: { - findAndModifyRetry: 3.0, - findAndModifyUpsert: 1.0, - findAndModifyUpdate: 1.0, - findAndModifyDelete: 1.0 - }, - findAndModifyDelete: { - findAndModifyRetry: 3.0, - findAndModifyUpsert: 1.0, - findAndModifyUpdate: 1.0, - findAndModifyDelete: 1.0 - }, - findAndModifyRetry: { - findAndModifyRetry: 1.0, - findAndModifyUpsert: 1.0, - findAndModifyUpdate: 1.0, - findAndModifyDelete: 1.0 - }, - }; - - return { - threadCount: 10, - iterations: 100, - data: data, - states: states, - transitions: transitions, - setup: function() {}, - }; -})(); diff --git a/jstests/concurrency/fsm_workloads/findAndModify_inc.js b/jstests/concurrency/fsm_workloads/findAndModify_inc.js index 4b823bfb9c335..489694f3b165e 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_inc.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_inc.js @@ -1,5 +1,3 @@ -'use strict'; - /** * findAndModify_inc.js * @@ -14,7 +12,7 @@ // For isMongod. load('jstests/concurrency/fsm_workload_helpers/server_types.js'); -var $config = (function() { +export const $config = (function() { let data = { getUpdateArgument: function(fieldName) { return {$inc: {[fieldName]: 1}}; diff --git a/jstests/concurrency/fsm_workloads/findAndModify_inc_pipeline.js b/jstests/concurrency/fsm_workloads/findAndModify_inc_pipeline.js index c275ac35021e1..05423a687baa4 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_inc_pipeline.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_inc_pipeline.js @@ -1,15 +1,13 @@ -'use strict'; - /** * findAndModify_inc_pipeline.js * * This is the same workload as findAndModify_inc.js, but substitutes a $mod-style update with a * pipeline-style one. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/findAndModify_inc.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/findAndModify_inc.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.getUpdateArgument = function getUpdateArgument(fieldName) { return [{$addFields: {[fieldName]: {$add: ["$" + fieldName, 1]}}}]; }; diff --git a/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js b/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js index 4faefca2fd11f..e1cdfd33173a4 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_mixed_queue_unindexed.js @@ -1,5 +1,3 @@ -'use strict'; - /** * findAndModify_mixed_queue_unindexed.js * @@ -15,13 +13,13 @@ * * This workload was designed to reproduce SERVER-21434. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config - -// For isMongod. -load('jstests/concurrency/fsm_workload_helpers/server_types.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js"; +load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod. -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Use the workload name as the database name, since the workload name is assumed to be // unique. $config.data.uniqueDBName = 'findAndModify_mixed_queue_unindexed'; diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove.js b/jstests/concurrency/fsm_workloads/findAndModify_remove.js index 3c20a7ae61f1b..d1a2902ec8264 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_remove.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_remove.js @@ -1,12 +1,10 @@ -'use strict'; - /** * findAndModify_remove.js * * Each thread repeatedly inserts a document, and subsequently performs * the findAndModify command to remove it. */ -var $config = (function() { +export const $config = (function() { var data = {shardKey: {tid: 1}}; var states = (function() { diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js index 68796a2000a4c..25fcf95421612 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js @@ -1,5 +1,3 @@ -'use strict'; - /** * findAndModify_remove_queue.js * @@ -14,7 +12,7 @@ // For isMongod. load('jstests/concurrency/fsm_workload_helpers/server_types.js'); -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as the database name, since the workload name is assumed to be // unique. diff --git a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js index 981568904ad42..5a86dd399250d 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_remove_queue_unindexed.js @@ -1,5 +1,3 @@ -'use strict'; - /** * findAndModify_remove_queue_unindexed.js * @@ -13,10 +11,12 @@ * * This workload was designed to reproduce SERVER-21434. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Use the workload name as the database name, since the workload // name is assumed to be unique. $config.data.uniqueDBName = 'findAndModify_remove_queue_unindexed'; diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update.js b/jstests/concurrency/fsm_workloads/findAndModify_update.js index e97281b5a8a94..7e7f72bddeaab 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_update.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_update.js @@ -1,5 +1,3 @@ -'use strict'; - /** * findAndModify_update.js * @@ -8,7 +6,7 @@ * selected based on 'query' and 'sort' specifications, and updated * using either the $min or $max operator. */ -var $config = (function() { +export const $config = (function() { var data = { numDocsPerThread: 3, // >1 for 'sort' to be meaningful shardKey: {tid: 1} diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js index aba96be9648c4..7dec62d6bcf5c 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_update_collscan.js @@ -1,5 +1,3 @@ -'use strict'; - /** * findAndModify_update_collscan.js * @@ -10,10 +8,10 @@ * * Attempts to force a collection scan by not creating an index. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/findAndModify_update.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/findAndModify_update.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Do not create the { tid: 1, value: 1 } index so that a // collection // scan is performed for the query and sort operations. diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js index 0eebb29cb5904..e068fa8c6bd3b 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_update_grow.js @@ -1,5 +1,3 @@ -'use strict'; - /** * findAndModify_update_grow.js * @@ -10,7 +8,7 @@ */ load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod -var $config = (function() { +export const $config = (function() { var data = { shardKey: {tid: 1}, }; diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js index 6937756ed0f76..398fb5a912063 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_update_queue.js @@ -1,5 +1,3 @@ -'use strict'; - /** * findAndModify_update_queue.js * @@ -11,13 +9,13 @@ * This workload was designed to reproduce an issue similar to SERVER-18304 for update operations * using the findAndModify command where the old version of the document is returned. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js'); // for $config - -// For isMongod. -load('jstests/concurrency/fsm_workload_helpers/server_types.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/findAndModify_remove_queue.js"; +load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongod. -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Use the workload name as the database name, since the workload name is assumed to be // unique. $config.data.uniqueDBName = 'findAndModify_update_queue'; diff --git a/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js b/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js index c70b80058f0d2..d7384994104f4 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_update_queue_unindexed.js @@ -1,5 +1,3 @@ -'use strict'; - /** * findAndModify_update_queue_unindexed.js * @@ -13,10 +11,12 @@ * * This workload was designed to reproduce SERVER-21434. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/findAndModify_update_queue.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/findAndModify_update_queue.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Use the workload name as the database name, since the workload // name is assumed to be unique. $config.data.uniqueDBName = 'findAndModify_update_queue_unindexed'; diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js index e9b06e7afb72e..b12b83380b109 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_upsert.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert.js @@ -1,5 +1,3 @@ -'use strict'; - /** * findAndModify_upsert.js * @@ -8,7 +6,7 @@ * created) based on the 'query' specification, and updated using the * $push operator. */ -var $config = (function() { +export const $config = (function() { var data = {sort: false, shardKey: {tid: 1}}; var states = (function() { diff --git a/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js index 0cbfbd3ab2145..fb6f5ced90df8 100644 --- a/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js +++ b/jstests/concurrency/fsm_workloads/findAndModify_upsert_collscan.js @@ -1,5 +1,3 @@ -'use strict'; - /** * findAndModify_upsert_collscan.js * @@ -10,10 +8,10 @@ * * Forces 'sort' to perform a collection scan by using $natural. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/findAndModify_upsert.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/findAndModify_upsert.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.sort = {$natural: 1}; return $config; diff --git a/jstests/concurrency/fsm_workloads/find_cmd_with_indexes_timeseries.js b/jstests/concurrency/fsm_workloads/find_cmd_with_indexes_timeseries.js index 901b45520001e..b48bb62c9e5f7 100644 --- a/jstests/concurrency/fsm_workloads/find_cmd_with_indexes_timeseries.js +++ b/jstests/concurrency/fsm_workloads/find_cmd_with_indexes_timeseries.js @@ -1,5 +1,3 @@ -'use strict'; - /** * This test verifies that neither index creation nor find cmd operation on a time-series collection * leads to incorrect data results. @@ -17,7 +15,7 @@ * ] */ -var $config = (function() { +export const $config = (function() { // Hardcode time-series collection information so that the threads can all obtain it and run on // the same fields and indexes. const timeFieldName = "tm"; diff --git a/jstests/concurrency/fsm_workloads/find_flip_sbe_enabled.js b/jstests/concurrency/fsm_workloads/find_flip_sbe_enabled.js index 32c9352a62510..dc1c90061c03d 100644 --- a/jstests/concurrency/fsm_workloads/find_flip_sbe_enabled.js +++ b/jstests/concurrency/fsm_workloads/find_flip_sbe_enabled.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Sets the internalQueryFrameworkControl flag to "forceClassicEngine" and "trySbeEngine", and * asserts that find queries using the plan cache produce the correct results. @@ -11,7 +9,7 @@ * ] */ -var $config = (function() { +export const $config = (function() { let data = {originalParamValues: {}}; function getCollectionName(collName) { diff --git a/jstests/concurrency/fsm_workloads/global_index.js b/jstests/concurrency/fsm_workloads/global_index.js index e41a5f77a99c4..e5f0b6ecd92fb 100644 --- a/jstests/concurrency/fsm_workloads/global_index.js +++ b/jstests/concurrency/fsm_workloads/global_index.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Perform global index CRUD operations, with create and drop commands. * @@ -9,7 +7,7 @@ * requires_replication * ] */ -var $config = (function() { +export const $config = (function() { const data = { uuidArr: ["47b5c083-8d60-4920-90e2-ba3ff668c371", "8acc9ba2-2d8f-4b01-b835-8f1818c1df1c"], range: 5 diff --git a/jstests/concurrency/fsm_workloads/globally_managed_cursors.js b/jstests/concurrency/fsm_workloads/globally_managed_cursors.js index 42e4abfa3e2b1..91b56e5d92bdc 100644 --- a/jstests/concurrency/fsm_workloads/globally_managed_cursors.js +++ b/jstests/concurrency/fsm_workloads/globally_managed_cursors.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Runs a variety commands which need to interact with the global cursor manager. This test was * designed to reproduce SERVER-33959. @@ -7,10 +5,12 @@ * The "grandparent test," invalidated_cursors.js, uses $currentOp. * @tags: [uses_curop_agg_stage, state_functions_share_cursor] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states.listCollections = function listCollections(unusedDB, _) { const db = unusedDB.getSiblingDB(this.uniqueDBName); const cmdRes = diff --git a/jstests/concurrency/fsm_workloads/index_build_abort.js b/jstests/concurrency/fsm_workloads/index_build_abort.js new file mode 100644 index 0000000000000..bbf58ab93e9df --- /dev/null +++ b/jstests/concurrency/fsm_workloads/index_build_abort.js @@ -0,0 +1,145 @@ +/** + * Build indexes with different abort causes. + * - Indexing error. + * - Abort due to dropIndexes. + * - Abort due to killOp on primary. + * + * Abort due to DiskSpaceMonitor is not tested as it would interfere with other concurrent tests + * creating index builds. Similarly, killOp on secondary nodes is not tested as it can result in a + * node crash, interfering with other tests. + * + * @tags: [ + * creates_background_indexes, + * # The test uses $currentOp, which is not supported in transactions. + * does_not_support_transactions, + * requires_fcv_71, + * requires_replication + * ] + */ + +load("jstests/libs/fail_point_util.js"); +load("jstests/noPassthrough/libs/index_build.js"); + +export const $config = (function() { + const data = { + prefix: "index_build_abort_", + nCollections: 3, + nDocuments: 25000, + expectedErrorCodes: [ErrorCodes.IndexBuildAborted, ErrorCodes.Interrupted, 13026], + mutexColl: "index_build_abort_mutexes", + }; + + function randInt(max) { + return Math.floor(Math.random() * max); + } + + function getRandCollectionName() { + return data.prefix + randInt(data.nCollections); + } + + function getCollMutexName(collName) { + return collName + "_mutex"; + } + + function mutexTryLock(db, collName) { + const collMutex = getCollMutexName(collName); + let doc = db[data.mutexColl].findAndModify( + {query: {mutex: collMutex, locked: 0}, update: {$set: {locked: 1}}}); + if (doc === null) { + return false; + } + return true; + } + + function mutexUnlock(db, collName) { + const collMutex = getCollMutexName(collName); + db[data.mutexColl].update({mutex: collMutex}, {$set: {locked: 0}}); + } + + const states = { + dropCollAndCreateIndexBuild: function dropCollAndCreateIndexBuild(db, collName) { + const randomColl = getRandCollectionName(); + var coll = db[randomColl]; + if (mutexTryLock(db, randomColl)) { + try { + // Having the collection drop outside the lock to allow a drop concurrent to an + // index build might be more interesting, but we'd also be allowing a drop in + // the middle of bulk insert, or before the createIndexes starts. + coll.drop(); + var bulk = coll.initializeUnorderedBulkOp(); + const failDocumentIndex = randInt(this.nDocuments); + for (var i = 0; i < this.nDocuments; ++i) { + if (failDocumentIndex == i) { + bulk.insert({a: [0, "a"]}); + } else { + bulk.insert({a: [0, 0]}); + } + } + let bulkRes = bulk.execute(); + assertAlways.commandWorked(bulkRes); + assertAlways.eq(this.nDocuments, bulkRes.nInserted, tojson(bulkRes)); + assertAlways.commandFailedWithCode(coll.createIndexes([{a: "2d"}]), + this.expectedErrorCodes); + } finally { + mutexUnlock(db, randomColl); + } + } + }, + dropIndexes: function dropIndexes(db, collName) { + assertAlways.commandWorkedOrFailedWithCode( + db.runCommand({dropIndexes: getRandCollectionName(), index: "*"}), + ErrorCodes.NamespaceNotFound); + }, + killOpIndexBuild: function killOpIndexBuild(db, collName) { + let nTry = 0; + while (nTry++ < 10) { + try { + const opId = IndexBuildTest.getIndexBuildOpId(db, getRandCollectionName()); + if (opId != -1) { + db.killOp(opId); + break; + } + } catch (e) { + jsTestLog("Suppressed exception during killOp attempt: " + e); + } + } + } + }; + + const transtitionToAllStates = { + dropCollAndCreateIndexBuild: 1, + dropIndexes: 1, + killOpIndexBuild: 1, + }; + const transitions = { + dropCollAndCreateIndexBuild: transtitionToAllStates, + dropIndexes: transtitionToAllStates, + killOpIndexBuild: transtitionToAllStates + }; + + const setup = function(db, collName, cluster) { + for (let coll = 0; coll < this.nCollections; ++coll) { + const mutexName = getCollMutexName(data.prefix + coll); + db[data.mutexColl].insert({mutex: mutexName, locked: 0}); + } + }; + + const teardown = function(db, collName, cluster) { + for (let coll = 0; coll < this.nCollections; ++coll) { + const collName = data.prefix + coll; + db[collName].drop(); + db[getCollMutexName(collName)].drop(); + } + }; + + return { + threadCount: 12, + iterations: 200, + startState: 'dropCollAndCreateIndexBuild', + states: states, + transitions: transitions, + setup: setup, + teardown: teardown, + data: data, + }; +})(); diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_1char.js b/jstests/concurrency/fsm_workloads/indexed_insert_1char.js index b264b6561a5dd..a6c7dfb49decf 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_1char.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_1char.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_1char.js * @@ -7,10 +5,10 @@ * documents appear in both a collection scan and an index scan. The indexed * value is a 1-character string based on the thread's id. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.indexedField = 'indexed_insert_1char'; $config.data.shardKey = {}; $config.data.shardKey[$config.data.indexedField] = 1; diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js index 1a8165cf82ed0..161261cab54cf 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_1char_noindex.js @@ -1,12 +1,10 @@ -'use strict'; - /** * indexed_insert_1char_noindex.js * * Executes the indexed_insert_1char.js workload after dropping its index. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_1char.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_1char.js"; load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex -var $config = extendWorkload($config, indexedNoindex); +export const $config = extendWorkload($baseConfig, indexedNoindex); diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_2d.js b/jstests/concurrency/fsm_workloads/indexed_insert_2d.js index d4ec75e992d78..91f35686c39c5 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_2d.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_2d.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_2d.js * @@ -7,10 +5,10 @@ * appear in both a collection scan and an index scan. The indexed value is a * legacy coordinate pair, indexed with a 2d index. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.indexedField = 'indexed_insert_2d'; // Remove the shard key for 2d indexes, as they are not supported delete $config.data.shardKey; diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js b/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js index 20ac7b4b5883a..9cd60c6d7b9b1 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_2dsphere.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_2dsphere.js * @@ -7,10 +5,10 @@ * appear in both a collection scan and an index scan. The indexed value is a * legacy coordinate pair, indexed with a 2dsphere index. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_2d.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_2d.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.indexedField = 'indexed_insert_2dsphere'; $config.data.getIndexSpec = function getIndexSpec() { diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base.js b/jstests/concurrency/fsm_workloads/indexed_insert_base.js index 6b8649b4dbe65..b5d719b8d41ff 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_base.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_base.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_base.js * @@ -7,7 +5,7 @@ * documents appear in both a collection scan and an index scan. The indexed * value is the thread's id. */ -var $config = (function() { +export const $config = (function() { function makeSortSpecFromIndexSpec(ixSpec) { var sort = {}; diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js b/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js index e03d5359ab103..a5f508e3c18c0 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_base_capped.js @@ -1,16 +1,14 @@ -'use strict'; - /** * indexed_insert_base_capped.js * * Executes the indexed_insert_base.js workload on a capped collection. * @tags: [requires_capped] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js"; load('jstests/concurrency/fsm_workload_modifiers/make_capped.js'); // for makeCapped -var $config = extendWorkload($config, makeCapped); +export const $config = extendWorkload($baseConfig, makeCapped); // Remove the shard key for capped collections, as they cannot be sharded delete $config.data.shardKey; diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js index 5e4a2f0f6099a..1d4ec58b1e21b 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_base_noindex.js @@ -1,12 +1,10 @@ -'use strict'; - /** * indexed_insert_base_noindex.js * * Executes the indexed_insert_base.js workload after dropping its index. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js"; load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex -var $config = extendWorkload($config, indexedNoindex); +export const $config = extendWorkload($baseConfig, indexedNoindex); diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_compound.js b/jstests/concurrency/fsm_workloads/indexed_insert_compound.js index f8b44cfd97819..95808e9090f87 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_compound.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_compound.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_compound.js * @@ -7,10 +5,10 @@ * appear in both a collection scan and an index scan. The collection is indexed * with a compound index on three different fields. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states.init = function init(db, collName) { $super.states.init.apply(this, arguments); }; diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js index 5306facb834b2..48dbcecedc75f 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_heterogeneous.js * @@ -7,10 +5,10 @@ * documents appear in both a collection scan and an index scan. The indexed * value is a different BSON type, depending on the thread's id. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.indexedField = 'indexed_insert_heterogeneous'; $config.data.shardKey = {}; $config.data.shardKey[$config.data.indexedField] = 1; diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js index f8adab70ffc0d..73308989fc86c 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous_noindex.js @@ -1,12 +1,12 @@ -'use strict'; - /** * indexed_insert_heterogeneous_noindex.js * * Executes the indexed_insert_heterogeneous.js workload after dropping its index. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js'); // for $config -load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/indexed_insert_heterogeneous.js"; +load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex -var $config = extendWorkload($config, indexedNoindex); +export const $config = extendWorkload($baseConfig, indexedNoindex); diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_large.js b/jstests/concurrency/fsm_workloads/indexed_insert_large.js index 55dd1daf4dc26..cc87a318059e4 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_large.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_large.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_large.js * @@ -8,10 +6,10 @@ * value is a string large enough to make the whole index key be 1K, which is * the maximum. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.indexedField = 'indexed_insert_large'; // Remove the shard key, since it cannot be greater than 512 bytes diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js index 625de8a387ee0..02c99f0b94550 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_large_noindex.js @@ -1,12 +1,10 @@ -'use strict'; - /** * indexed_insert_large_noindex.js * * Executes the indexed_insert_large.js workload after dropping its index. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_large.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_large.js"; load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex -var $config = extendWorkload($config, indexedNoindex); +export const $config = extendWorkload($baseConfig, indexedNoindex); diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js index 06e92f5907fbd..c1373ab08932a 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_long_fieldname.js * @@ -7,10 +5,10 @@ * documents appear in both a collection scan and an index scan. The indexed * field name is a long string. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // The indexedField must be limited such that the namespace and indexedField does not // exceed 128 characters. The namespace defaults to // "test_fsmdb.fsmcoll", // where i, j & k are increasing integers for each test, workload and thread. diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js index f8960c40b4d02..e9619b11842c2 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname_noindex.js @@ -1,12 +1,12 @@ -'use strict'; - /** * indexed_insert_long_fieldname_noindex.js * * Executes the indexed_insert_long_fieldname.js workload after dropping its index. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js'); // for $config -load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/indexed_insert_long_fieldname.js"; +load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex -var $config = extendWorkload($config, indexedNoindex); +export const $config = extendWorkload($baseConfig, indexedNoindex); diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js b/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js index 4343de2d2bea9..d938c0bac7c76 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_multikey.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_multikey.js * @@ -7,10 +5,10 @@ * documents appear in both a collection scan and an index scan. The indexed * value is an array of numbers. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.indexedField = 'indexed_insert_multikey'; // Remove the shard key, since it cannot be a multikey index delete $config.data.shardKey; diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js b/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js index 8995d209a47d8..459a786a8dfcf 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_multikey_noindex.js @@ -1,12 +1,10 @@ -'use strict'; - /** * indexed_insert_multikey_noindex.js * * Executes the indexed_insert_multikey.js workload after dropping its index. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_multikey.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_multikey.js"; load('jstests/concurrency/fsm_workload_modifiers/indexed_noindex.js'); // for indexedNoindex -var $config = extendWorkload($config, indexedNoindex); +export const $config = extendWorkload($baseConfig, indexedNoindex); diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js index 148c77edbfb87..c65bdc71dcda0 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_ordered_bulk.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_ordered_bulk.js * @@ -8,10 +6,10 @@ * * Uses an ordered, bulk operation to perform the inserts. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.indexedField = 'indexed_insert_ordered_bulk'; $config.data.shardKey = {}; $config.data.shardKey[$config.data.indexedField] = 1; diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text.js b/jstests/concurrency/fsm_workloads/indexed_insert_text.js index 0e2a815f8dbc4..2ac6deed0a4d0 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_text.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_text.js @@ -1,11 +1,9 @@ -'use strict'; - /** * indexed_insert_text.js * * Inserts some documents into a collection with a text index. */ -var $config = (function() { +export const $config = (function() { var states = { init: function init(db, collName) { // noop diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js b/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js index a665b7e61acee..5c197fc967445 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_text_multikey.js @@ -1,14 +1,12 @@ -'use strict'; - /** * indexed_insert_text_multikey.js * * like indexed_insert_text.js but the indexed value is an array of strings */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_text.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_text.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states.init = function init(db, collName) { $super.states.init.apply(this, arguments); }; diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js index 12a7529e1108b..375fee0c5215e 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_ttl.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_ttl.js * @@ -12,7 +10,7 @@ load('jstests/concurrency/fsm_workload_helpers/balancer.js'); -var $config = (function() { +export const $config = (function() { var states = { init: function init(db, collName) { var res = db[collName].insert({indexed_insert_ttl: new ISODate(), first: true}); diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js index e8fe91483d425..09ce0918d4831 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_unordered_bulk.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_unordered_bulk.js * @@ -8,10 +6,10 @@ * * Uses an unordered, bulk operation to perform the inserts. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.indexedField = 'indexed_insert_unordered_bulk'; $config.data.shardKey = {}; $config.data.shardKey[$config.data.indexedField] = 1; diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js b/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js index 627fad30832c4..f3a9fee78a13d 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_upsert.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_upsert.js * @@ -10,10 +8,10 @@ * Instead of inserting via coll.insert(), this workload inserts using an * upsert. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.indexedField = 'indexed_insert_upsert'; $config.data.shardKey = {}; $config.data.shardKey[$config.data.indexedField] = 1; diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_where.js b/jstests/concurrency/fsm_workloads/indexed_insert_where.js index 4cd751c1e1ccc..05a5f3bff1f27 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_where.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_where.js @@ -1,5 +1,3 @@ -'use strict'; - /** * indexed_insert_where.js * @@ -8,7 +6,7 @@ * data.insertedDocuments is used as a counter by all of those workloads for their own checks. */ -var $config = (function() { +export const $config = (function() { var data = { documentsToInsert: 100, insertedDocuments: 0, diff --git a/jstests/concurrency/fsm_workloads/indexed_insert_wildcard.js b/jstests/concurrency/fsm_workloads/indexed_insert_wildcard.js index 05cd8fa8494e3..178381951cca6 100644 --- a/jstests/concurrency/fsm_workloads/indexed_insert_wildcard.js +++ b/jstests/concurrency/fsm_workloads/indexed_insert_wildcard.js @@ -1,15 +1,13 @@ -'use strict'; - /** * indexed_insert_wildcard.js * * Inserts documents into an indexed collection and asserts that the documents appear in both a * collection scan and an index scan. The collection is indexed with a wildcard index. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // For extendWorkload(). -load('jstests/concurrency/fsm_workloads/indexed_insert_base.js'); // For $config(). +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.setup = function init(db, collName) { $super.setup.apply(this, arguments); }; diff --git a/jstests/concurrency/fsm_workloads/insert_duplicates_unique_index.js b/jstests/concurrency/fsm_workloads/insert_duplicates_unique_index.js index 5ca0c603b9ae9..e6d155c3821f9 100644 --- a/jstests/concurrency/fsm_workloads/insert_duplicates_unique_index.js +++ b/jstests/concurrency/fsm_workloads/insert_duplicates_unique_index.js @@ -11,7 +11,7 @@ "use strict"; -var $config = (function() { +export const $config = (function() { const initData = { getCollectionName: function(collName) { return "insert_duplicates_unique_index_" + collName; diff --git a/jstests/concurrency/fsm_workloads/insert_ttl_retry_writes_timeseries.js b/jstests/concurrency/fsm_workloads/insert_ttl_retry_writes_timeseries.js index e916c366f457b..49f3d77054cfd 100644 --- a/jstests/concurrency/fsm_workloads/insert_ttl_retry_writes_timeseries.js +++ b/jstests/concurrency/fsm_workloads/insert_ttl_retry_writes_timeseries.js @@ -1,5 +1,3 @@ -'use strict'; - /** * insert_ttl_retry_writes_timeseries.js * @@ -13,10 +11,10 @@ * uses_ttl, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/insert_ttl_timeseries.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/insert_ttl_timeseries.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.getCollectionName = function getCollectionName(collName) { return "insert_ttl_retry_writes_timeseries_" + collName; }; diff --git a/jstests/concurrency/fsm_workloads/insert_ttl_timeseries.js b/jstests/concurrency/fsm_workloads/insert_ttl_timeseries.js index 891df4042c34e..d16d80975631f 100644 --- a/jstests/concurrency/fsm_workloads/insert_ttl_timeseries.js +++ b/jstests/concurrency/fsm_workloads/insert_ttl_timeseries.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Creates a time-series collection with a short expireAfterSeconds value. Each thread does an * insert on each iteration with a time, a metadata field, 'tid', and random measurement, 'data'. At @@ -15,7 +13,7 @@ load('jstests/concurrency/fsm_workload_helpers/balancer.js'); -var $config = (function() { +export const $config = (function() { const initData = { getCollectionName: function(collName) { return "insert_ttl_timeseries_" + collName; diff --git a/jstests/concurrency/fsm_workloads/insert_with_data_size_aware_balancing.js b/jstests/concurrency/fsm_workloads/insert_with_data_size_aware_balancing.js index b7b4fb6806b53..94859ac588562 100644 --- a/jstests/concurrency/fsm_workloads/insert_with_data_size_aware_balancing.js +++ b/jstests/concurrency/fsm_workloads/insert_with_data_size_aware_balancing.js @@ -1,5 +1,3 @@ -'use strict'; - /** * - Shard several collections with different (random) configured maxChunkSize * - Perform continuous inserts of random amounts of data into the collections @@ -31,7 +29,7 @@ function getRandomCollName(tid) { return collNames[Random.randInt(tid * tid) % collNames.length]; } -var $config = (function() { +export const $config = (function() { let states = { /* * Insert into a test collection a random amount of documents (up to 10MB per iteration) diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_kill_sessions.js b/jstests/concurrency/fsm_workloads/internal_transactions_kill_sessions.js index cefe90d143b8a..5b59a9bcdd07a 100644 --- a/jstests/concurrency/fsm_workloads/internal_transactions_kill_sessions.js +++ b/jstests/concurrency/fsm_workloads/internal_transactions_kill_sessions.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Runs insert, update, delete and findAndModify commands in internal transactions using all the * available client session settings, and occasionally kills a random session. @@ -11,12 +9,14 @@ * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js"; load('jstests/concurrency/fsm_workload_helpers/kill_session.js'); // for killSession -load('jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js'); load('jstests/libs/override_methods/retry_on_killed_session.js'); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.retryOnKilledSession = true; // Insert initial documents during setup instead of the init state, otherwise the insert could diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_move_chunk.js b/jstests/concurrency/fsm_workloads/internal_transactions_move_chunk.js index 457124bfdaf09..e9ed5551da169 100644 --- a/jstests/concurrency/fsm_workloads/internal_transactions_move_chunk.js +++ b/jstests/concurrency/fsm_workloads/internal_transactions_move_chunk.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Runs insert, update, delete and findAndModify commands against a sharded collection inside * single-shard and cross-shard internal transactions using all the available client session @@ -14,10 +12,12 @@ * does_not_support_config_fuzzer, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/internal_transactions_sharded.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/internal_transactions_sharded.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.transitions = { init: { moveChunk: 0.2, diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_resharding.js b/jstests/concurrency/fsm_workloads/internal_transactions_resharding.js index 949b5cf32d3e6..9423e791a150b 100644 --- a/jstests/concurrency/fsm_workloads/internal_transactions_resharding.js +++ b/jstests/concurrency/fsm_workloads/internal_transactions_resharding.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Runs insert, update, delete and findAndModify commands against a sharded collection inside * single-shard and cross-shard internal transactions using all client session configurations, and @@ -12,12 +10,13 @@ * antithesis_incompatible, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/internal_transactions_sharded.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/internal_transactions_sharded.js"; load('jstests/libs/fail_point_util.js'); -load("jstests/libs/feature_flag_util.js"); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // reshardingMinimumOperationDurationMillis is set to 30 seconds when there are stepdowns. // So in order to limit the overall time for the test, we limit the number of resharding // operations to maxReshardingExecutions. @@ -87,13 +86,6 @@ var $config = extendWorkload($config, function($config, $super) { } assert(res.hasOwnProperty("code")); - if (!FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) { - // Expected error prior to the PointInTimeCatalogLookups project. - if (res.code === ErrorCodes.SnapshotUnavailable) { - return true; - } - } - // Race to retry. if (res.code === ErrorCodes.ReshardCollectionInProgress) { return false; diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_sharded.js b/jstests/concurrency/fsm_workloads/internal_transactions_sharded.js index 0ba8b429730d1..eb4cb39d04415 100644 --- a/jstests/concurrency/fsm_workloads/internal_transactions_sharded.js +++ b/jstests/concurrency/fsm_workloads/internal_transactions_sharded.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Runs insert, update, delete and findAndModify commands against a sharded collection inside * single-shard and cross-shard internal transactions using all the available client session @@ -12,13 +10,19 @@ * antithesis_incompatible, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js'); -load('jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + extendWithInternalTransactionsUnsharded +} from "jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js"; +import {$config as $baseConfig} from 'jstests/concurrency/fsm_workloads/random_moveChunk_base.js'; load('jstests/concurrency/fsm_workload_helpers/balancer.js'); load('jstests/libs/fail_point_util.js'); -var $config = extendWorkload($config, function($config, $super) { +const parsedBaseConfig = parseConfig($baseConfig); +const $extendedBaseConfig = extendWithInternalTransactionsUnsharded( + Object.extend({}, parsedBaseConfig, true), parsedBaseConfig); + +export const $config = extendWorkload($extendedBaseConfig, function($config, $super) { $config.data.getQueryForDocument = function getQueryForDocument(doc) { // The query for a write command against a sharded collection must contain the shard key. const query = $super.data.getQueryForDocument.apply(this, arguments); diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod.js b/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod.js index f2ae41e3ac5c0..bc3acb7170517 100644 --- a/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod.js +++ b/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Runs insert, update, delete and findAndModify commands against a sharded collection inside * single-shard and cross-shard internal transactions started on a shard using all the available @@ -12,11 +10,13 @@ * antithesis_incompatible * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/internal_transactions_sharded.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/internal_transactions_sharded.js"; load('jstests/libs/fixture_helpers.js'); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.expectDirtyDocs = { // The client is either not using a session or is using a session without retryable writes // enabled. Therefore, when a write is interrupted due to stepdown/kill/terminate, they diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod_kill_sessions.js b/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod_kill_sessions.js index 013850a86d6ff..2d1e74b684e13 100644 --- a/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod_kill_sessions.js +++ b/jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod_kill_sessions.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Runs insert, update, delete and findAndModify commands against a sharded collection inside * single-shard and cross-shard internal transactions started on a shard using all the available @@ -14,11 +12,13 @@ * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/internal_transactions_sharded_from_mongod.js"; load('jstests/libs/override_methods/retry_on_killed_session.js'); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.retryOnKilledSession = true; // Insert initial documents during setup instead of the init state, otherwise the insert could diff --git a/jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js b/jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js index 383de45585657..b87dc3fad104f 100644 --- a/jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js +++ b/jstests/concurrency/fsm_workloads/internal_transactions_unsharded.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Runs insert, update, delete and findAndModify commands in internal transactions using all the * available client session settings. This workload works on both standalone replica sets and @@ -14,29 +12,14 @@ * assumes_unsharded_collection, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js'); load("jstests/libs/override_methods/retry_writes_at_least_once.js"); // This workload involves running commands outside a session. TestData.disableImplicitSessions = true; -if ($config === undefined) { - // There is no workload to extend. Define a noop base workload to make the 'extendWorkload' call - // below still work. - $config = { - threadCount: 1, - iterations: 1, - startState: "init", - data: {}, - states: {init: function(db, collName) {}}, - transitions: {init: {init: 1}}, - setup: function(db, collName) {}, - teardown: function(db, collName) {}, - }; -} - -var $config = extendWorkload($config, function($config, $super) { +export function extendWithInternalTransactionsUnsharded($config, $super) { $config.threadCount = 5; $config.iterations = 50; @@ -749,4 +732,17 @@ var $config = extendWorkload($config, function($config, $super) { }; return $config; -}); +} + +const kBaseConfig = { + threadCount: 1, + iterations: 1, + startState: "init", + data: {}, + states: {init: function(db, collName) {}}, + transitions: {init: {init: 1}}, + setup: function(db, collName) {}, + teardown: function(db, collName) {}, +}; + +export const $config = extendWorkload(kBaseConfig, extendWithInternalTransactionsUnsharded); diff --git a/jstests/concurrency/fsm_workloads/invalidated_cursors.js b/jstests/concurrency/fsm_workloads/invalidated_cursors.js index 3c9ffcf55d7ff..70ffc1706802c 100644 --- a/jstests/concurrency/fsm_workloads/invalidated_cursors.js +++ b/jstests/concurrency/fsm_workloads/invalidated_cursors.js @@ -1,5 +1,3 @@ -'use strict'; - /** * invalidated_cursors.js * @@ -13,7 +11,7 @@ load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos load("jstests/concurrency/fsm_workload_helpers/assert_handle_fail_in_transaction.js"); -var $config = (function() { +export const $config = (function() { let data = { chooseRandomlyFrom: function chooseRandomlyFrom(arr) { if (!Array.isArray(arr)) { diff --git a/jstests/concurrency/fsm_workloads/kill_aggregation.js b/jstests/concurrency/fsm_workloads/kill_aggregation.js index 8d658d0ae5b2e..2197849b36523 100644 --- a/jstests/concurrency/fsm_workloads/kill_aggregation.js +++ b/jstests/concurrency/fsm_workloads/kill_aggregation.js @@ -1,5 +1,3 @@ -'use strict'; - /** * kill_aggregation.js * @@ -10,10 +8,10 @@ * This workload was designed to reproduce SERVER-25039. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/kill_rooted_or.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/kill_rooted_or.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Use the workload name as the collection name, since the workload name is assumed to be // unique. Note that we choose our own collection name instead of using the collection provided // by the concurrency framework, because this workload drops its collection. diff --git a/jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js b/jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js index 484d4febdecc2..caae498baae41 100644 --- a/jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js +++ b/jstests/concurrency/fsm_workloads/kill_multicollection_aggregation.js @@ -1,5 +1,3 @@ -'use strict'; - /** * kill_multicollection_aggregation.js * @@ -12,10 +10,10 @@ * The parent test, invalidated_cursors.js, uses $currentOp. * @tags: [uses_curop_agg_stage, state_functions_share_cursor] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/invalidated_cursors.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/invalidated_cursors.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { /** * Runs the specified aggregation pipeline and stores the resulting cursor (if the command * is successful) in 'this.cursor'. diff --git a/jstests/concurrency/fsm_workloads/kill_rooted_or.js b/jstests/concurrency/fsm_workloads/kill_rooted_or.js index 53e745128634e..f95774d0fefb0 100644 --- a/jstests/concurrency/fsm_workloads/kill_rooted_or.js +++ b/jstests/concurrency/fsm_workloads/kill_rooted_or.js @@ -1,5 +1,3 @@ -'use strict'; - /** * kill_rooted_or.js * @@ -10,7 +8,8 @@ * This workload was designed to reproduce SERVER-24761. */ load("jstests/concurrency/fsm_workload_helpers/assert_handle_fail_in_transaction.js"); -var $config = (function() { + +export const $config = (function() { // Use the workload name as the collection name, since the workload name is assumed to be // unique. Note that we choose our own collection name instead of using the collection provided // by the concurrency framework, because this workload drops its collection. diff --git a/jstests/concurrency/fsm_workloads/list_indexes.js b/jstests/concurrency/fsm_workloads/list_indexes.js index 687f871a37830..4c135a2d4f041 100644 --- a/jstests/concurrency/fsm_workloads/list_indexes.js +++ b/jstests/concurrency/fsm_workloads/list_indexes.js @@ -1,12 +1,10 @@ -'use strict'; - /** * list_indexes.js * * Checks that the listIndexes command can tolerate concurrent modifications to the * index catalog. */ -var $config = (function() { +export const $config = (function() { var states = (function() { // Picks a random index to drop and recreate. function modifyIndices(db, collName) { diff --git a/jstests/concurrency/fsm_workloads/map_reduce_drop.js b/jstests/concurrency/fsm_workloads/map_reduce_drop.js index 0a287f2f0054e..bad68e7eda11c 100644 --- a/jstests/concurrency/fsm_workloads/map_reduce_drop.js +++ b/jstests/concurrency/fsm_workloads/map_reduce_drop.js @@ -1,5 +1,3 @@ -'use strict'; - /** * map_reduce_drop.js * @@ -16,7 +14,7 @@ * does_not_support_causal_consistency, * ] */ -var $config = (function() { +export const $config = (function() { var data = { mapper: function mapper() { emit(this.key, 1); @@ -61,8 +59,8 @@ var $config = (function() { var res = bulk.execute(); assertAlways.commandWorked(res); } catch (ex) { - assert.writeErrorWithCode(ex, ErrorCodes.DatabaseDropPending); assert.eq(true, ex instanceof BulkWriteError, tojson(ex)); + assert.writeErrorWithCode(ex, ErrorCodes.DatabaseDropPending); } var options = { diff --git a/jstests/concurrency/fsm_workloads/map_reduce_inline.js b/jstests/concurrency/fsm_workloads/map_reduce_inline.js index 96b71c191f42e..b37b0515cb540 100644 --- a/jstests/concurrency/fsm_workloads/map_reduce_inline.js +++ b/jstests/concurrency/fsm_workloads/map_reduce_inline.js @@ -1,5 +1,3 @@ -'use strict'; - /** * map_reduce_inline.js * @@ -13,7 +11,7 @@ * does_not_support_causal_consistency * ] */ -var $config = (function() { +export const $config = (function() { function mapper() { if (this.hasOwnProperty('key') && this.hasOwnProperty('value')) { var obj = {}; diff --git a/jstests/concurrency/fsm_workloads/map_reduce_interrupt.js b/jstests/concurrency/fsm_workloads/map_reduce_interrupt.js index 6113d7c949149..45cdbf7c6b8de 100644 --- a/jstests/concurrency/fsm_workloads/map_reduce_interrupt.js +++ b/jstests/concurrency/fsm_workloads/map_reduce_interrupt.js @@ -1,5 +1,3 @@ -'use strict'; - /** * map_reduce_interrupt.js * @@ -13,10 +11,12 @@ * uses_curop_agg_stage * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.prefix = 'map_reduce_interrupt'; $config.states.killOp = function killOp(db, collName) { diff --git a/jstests/concurrency/fsm_workloads/map_reduce_merge.js b/jstests/concurrency/fsm_workloads/map_reduce_merge.js index 05fd0d549100c..b4eaf035f7b63 100644 --- a/jstests/concurrency/fsm_workloads/map_reduce_merge.js +++ b/jstests/concurrency/fsm_workloads/map_reduce_merge.js @@ -1,5 +1,3 @@ -'use strict'; - /** * map_reduce_merge.js * @@ -17,10 +15,10 @@ * does_not_support_causal_consistency * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/map_reduce_inline.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Use the workload name as the database name, // since the workload name is assumed to be unique. var uniqueDBName = 'map_reduce_merge'; diff --git a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js index 20328d6da0b5a..ac85c1ca5e629 100644 --- a/jstests/concurrency/fsm_workloads/map_reduce_reduce.js +++ b/jstests/concurrency/fsm_workloads/map_reduce_reduce.js @@ -1,5 +1,3 @@ -'use strict'; - /** * map_reduce_reduce.js * @@ -15,10 +13,10 @@ * does_not_support_causal_consistency, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/map_reduce_inline.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. var prefix = 'map_reduce_reduce'; diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace.js b/jstests/concurrency/fsm_workloads/map_reduce_replace.js index 074dad1732a9c..be9f5c9fe8ede 100644 --- a/jstests/concurrency/fsm_workloads/map_reduce_replace.js +++ b/jstests/concurrency/fsm_workloads/map_reduce_replace.js @@ -1,5 +1,3 @@ -'use strict'; - /** * map_reduce_replace.js * @@ -15,10 +13,10 @@ * does_not_support_causal_consistency * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/map_reduce_inline.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. var prefix = 'map_reduce_replace'; diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js index f328b9f5358e4..e6f99540ed0b5 100644 --- a/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js +++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_nonexistent.js @@ -1,5 +1,3 @@ -'use strict'; - /** * map_reduce_replace_nonexistent.js * @@ -14,10 +12,10 @@ * does_not_support_causal_consistency * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/map_reduce_inline.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/map_reduce_inline.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. $config.data.prefix = 'map_reduce_replace_nonexistent'; diff --git a/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js b/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js index 60e29554027a9..d58ba35804237 100644 --- a/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js +++ b/jstests/concurrency/fsm_workloads/map_reduce_replace_remove.js @@ -1,5 +1,3 @@ -'use strict'; - /** * map_reduce_replace_remove.js * @@ -15,10 +13,10 @@ * does_not_support_causal_consistency * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/map_reduce_replace.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/map_reduce_replace.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states.remove = function remove(db, collName) { for (var i = 0; i < 20; ++i) { var res = db[collName].remove({_id: Random.randInt(this.numDocs)}, {justOne: true}); diff --git a/jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js b/jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js index 1c11384d0e9c1..fd0717a5faa64 100644 --- a/jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js +++ b/jstests/concurrency/fsm_workloads/map_reduce_with_chunk_migrations.js @@ -1,5 +1,3 @@ -'use strict'; - /** * map_reduce_with_chunk_migrations.js * @@ -17,10 +15,12 @@ * does_not_support_causal_consistency * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // The base setup will insert 'partitionSize' number of documents per thread, evenly // distributing across the chunks. Documents will only have the "_id" field. $config.data.partitionSize = 50; diff --git a/jstests/concurrency/fsm_workloads/move_primary_with_crud.js b/jstests/concurrency/fsm_workloads/move_primary_with_crud.js index a9f50d57debaa..44f9d46400ada 100644 --- a/jstests/concurrency/fsm_workloads/move_primary_with_crud.js +++ b/jstests/concurrency/fsm_workloads/move_primary_with_crud.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Randomly performs a series of CRUD and movePrimary operations on unsharded collections, checking * for data consistency as a consequence of these operations. @@ -10,9 +8,9 @@ * ] */ -load('jstests/libs/feature_flag_util.js'); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; -const $config = (function() { +export const $config = (function() { const kCollNamePrefix = 'unsharded_coll_'; const kInitialCollSize = 100; const kBatchSizeForDocsLookup = kInitialCollSize * 2; @@ -167,9 +165,7 @@ const $config = (function() { // Due to a stepdown of the donor during the cloning phase, the movePrimary // operation failed. It is not automatically recovered, but any orphaned data on // the recipient has been deleted. - 7120202, - // Same as the above, but due to a stepdown of the recipient. - ErrorCodes.MovePrimaryAborted + 7120202 ]); }, checkDatabaseMetadataConsistency: function(db, collName, connCache) { @@ -220,9 +216,7 @@ const $config = (function() { let setup = function(db, collName, cluster) { this.skipMetadataChecks = // TODO SERVER-70396: remove this flag - !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency') || - // TODO SERVER-74445: re-enable metadata checks on catalog shard deployments - cluster.hasCatalogShard(); + !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency'); }; const standardTransition = { diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js index 81cc596e228c1..c0eac298a79fd 100644 --- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js +++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js @@ -1,12 +1,11 @@ -'use strict'; - /** * Runs findAndModify, update, delete, find, and getMore within a transaction. * * @tags: [uses_transactions, state_functions_share_transaction] */ load('jstests/concurrency/fsm_workload_helpers/cleanup_txns.js'); -var $config = (function() { + +export const $config = (function() { function quietly(func) { const printOriginal = print; try { diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js index 2ecd324f04d6d..caa0055a14b40 100644 --- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js +++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands_same_session.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Runs update, findAndModify, delete, find, and getMore in a transaction with all threads using the * same session. @@ -7,11 +5,12 @@ * @tags: [uses_transactions, state_functions_share_transaction, assumes_snapshot_transactions] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js'); // for - // $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.setup = function(db, collName, cluster) { $super.setup.apply(this, arguments); this.lsid = tojson({id: UUID()}); diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js index 4c5e8e30b8fef..70c5caa3a78d9 100644 --- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js +++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js @@ -1,5 +1,3 @@ -'use strict'; - /** * multi_statement_transaction_atomicity_isolation.js * @@ -52,7 +50,7 @@ load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js'); // For arrayEq. load("jstests/aggregation/extras/utils.js"); -var $config = (function() { +export const $config = (function() { function checkTransactionCommitOrder(documents) { const graph = new Graph(); diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js index fb53631212911..cbfe20679be53 100644 --- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js +++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_metrics_test.js @@ -1,5 +1,3 @@ -'use strict'; - /** * This test checks high level invariants of various transaction related metrics reported in * serverStatus and currentOp. @@ -7,13 +5,14 @@ * @tags: [uses_transactions, uses_prepare_transaction, assumes_snapshot_transactions] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js"; load('jstests/concurrency/fsm_workload_helpers/check_transaction_server_status_invariants.js'); -load('jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js'); load('jstests/core/txns/libs/prepare_helpers.js'); -// for $config -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.setup = function(db, collName, cluster) { $super.setup.apply(this, arguments); this.prepareProbability = 0.5; diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_multi_db.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_multi_db.js index 7946b83b2a2cd..5255727173703 100644 --- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_multi_db.js +++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_multi_db.js @@ -1,15 +1,15 @@ -'use strict'; - /** * Test transactions atomicity and isolation guarantees for transactions across multiple DBs. * * @tags: [uses_transactions, assumes_snapshot_transactions] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js"; -var $config = extendWorkload($config, ($config, $super) => { +export const $config = extendWorkload($baseConfig, ($config, $super) => { // Number of unique collections and number of unique databases. The square root is used // here to ensure the total number of namespaces (coll * db) is roughly equal to the // number of threads. diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_repeated_reads.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_repeated_reads.js index 72661493d5d94..135082f6f1bed 100644 --- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_repeated_reads.js +++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_repeated_reads.js @@ -1,16 +1,15 @@ -'use strict'; - /** * Performs repeated reads of the documents in the collection to test snapshot isolation. * * @tags: [uses_transactions, assumes_snapshot_transactions] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js'); -// for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.numReads = 5; $config.states.repeatedRead = function repeatedRead(db, collName) { diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_server_status_mongos.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_server_status_mongos.js index 9f4ff6a31e19e..7b5dbbbc1519a 100644 --- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_server_status_mongos.js +++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation_server_status_mongos.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Verifies the transactions server status metrics on mongos while running transactions. * Temporarily disabled for BF-24311. @@ -8,11 +6,13 @@ * uses_transactions] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js"; load('jstests/concurrency/fsm_workload_helpers/check_transaction_server_status_invariants.js'); -load('jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js'); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.teardown = function(db, collName, cluster) { // Check the server-wide invariants one last time with only a single sample, since all user // operations should have finished. diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_current_op.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_current_op.js index c8e69017336be..e3eb5e3e897bd 100644 --- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_current_op.js +++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_current_op.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Runs update, findAndModify, delete, find, and getMore in a transaction with all threads using the * same session. @@ -13,11 +11,12 @@ * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js'); // for - // $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_all_commands.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.verifyMongosSessionsWithTxns = function verifyMongosSessionsWithTxns(sessions) { const acceptableReadConcernLevels = ['snapshot', 'local']; sessions.forEach((session) => { diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_kill_sessions_atomicity_isolation.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_kill_sessions_atomicity_isolation.js index a186164622009..b1797c3723cb1 100644 --- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_kill_sessions_atomicity_isolation.js +++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_kill_sessions_atomicity_isolation.js @@ -1,16 +1,16 @@ -'use strict'; - /** * Tests periodically killing sessions that are running transactions. * * @tags: [uses_transactions, assumes_snapshot_transactions, kills_random_sessions] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js"; load('jstests/concurrency/fsm_workload_helpers/kill_session.js'); // for killSession -load('jstests/concurrency/fsm_workloads/multi_statement_transaction_atomicity_isolation.js'); -var $config = extendWorkload($config, ($config, $super) => { +export const $config = extendWorkload($baseConfig, ($config, $super) => { $config.data.retryOnKilledSession = true; $config.states.killSession = killSession; diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js index ef3178f9dc998..00421f70edb51 100644 --- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js +++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Creates several bank accounts. On each iteration, each thread: * - chooses two accounts and amount of money being transfer @@ -11,7 +9,7 @@ // For withTxnAndAutoRetry. load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js'); -var $config = (function() { +export const $config = (function() { function computeTotalOfAllBalances(documents) { return documents.reduce((total, account) => total + account.balance, 0); } diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_kill_sessions.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_kill_sessions.js index 94b595825cc74..453ad7b680717 100644 --- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_kill_sessions.js +++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_kill_sessions.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Tests periodically killing sessions that are running transactions. The base workload runs * transactions with two writes, which will require two phase commit in a sharded cluster if each @@ -8,11 +6,13 @@ * @tags: [uses_transactions, assumes_snapshot_transactions, kills_random_sessions] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js"; load('jstests/concurrency/fsm_workload_helpers/kill_session.js'); // for killSession -load('jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js'); // for $config -var $config = extendWorkload($config, ($config, $super) => { +export const $config = extendWorkload($baseConfig, ($config, $super) => { $config.data.retryOnKilledSession = true; $config.states.killSession = killSession; diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js index ed01e10ad88c6..542cb491fbc80 100644 --- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js +++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_majority_writes.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Performs concurrent majority writes alongside transactions to verify both will eventually * complete as expected. @@ -8,11 +6,13 @@ * @tags: [uses_transactions, assumes_snapshot_transactions] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js'); // for $config -load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js"; +load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.majorityWriteCollName = 'majority_writes'; $config.data.counter = 0; diff --git a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_repeated_reads.js b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_repeated_reads.js index 93d89bbce0f20..25ede737e2855 100644 --- a/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_repeated_reads.js +++ b/jstests/concurrency/fsm_workloads/multi_statement_transaction_simple_repeated_reads.js @@ -1,15 +1,15 @@ -'use strict'; - /** * Performs repeated reads of the documents in the collection to test snapshot isolation. * * @tags: [uses_transactions, assumes_snapshot_transactions] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/multi_statement_transaction_simple.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.numReads = 5; $config.states.repeatedRead = function repeatedRead(db, collName) { diff --git a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js index acca1470b3f02..61547bf708f13 100644 --- a/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js +++ b/jstests/concurrency/fsm_workloads/plan_cache_drop_database.js @@ -1,5 +1,3 @@ -'use strict'; - /** * plan_cache_drop_database.js * @@ -8,7 +6,7 @@ * events triggers the concurrent destruction of a Collection object and * the updating of said object's PlanCache (SERVER-17117). */ -var $config = (function() { +export const $config = (function() { function populateData(db, collName) { var coll = db[collName]; diff --git a/jstests/concurrency/fsm_workloads/profile_command.js b/jstests/concurrency/fsm_workloads/profile_command.js index dd85296aa8d40..1374c00031c50 100644 --- a/jstests/concurrency/fsm_workloads/profile_command.js +++ b/jstests/concurrency/fsm_workloads/profile_command.js @@ -1,5 +1,3 @@ -'use strict'; - /** * profile_command.js * @@ -13,7 +11,7 @@ * ] */ -var $config = (function() { +export const $config = (function() { const data = { numDocs: 1000, checkProfileResult: function(result) { diff --git a/jstests/concurrency/fsm_workloads/query_stats_concurrent.js b/jstests/concurrency/fsm_workloads/query_stats_concurrent.js new file mode 100644 index 0000000000000..a44b269c35f03 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/query_stats_concurrent.js @@ -0,0 +1,157 @@ +/** + * query_stats_concurrent.js + * + * Stresses $queryStats running concurrently with queries. + * + * @tags: [ + * featureFlagQueryStats, + * does_not_support_causal_consistency, + * ] + * + */ +export const $config = (function() { + var states = (function() { + function init(db, collName) { + } + + function reInit(db, collName) { + } + + // Runs one find query so that the queryStatsEntry is updated. + function findOneShape(db, collName) { + assertWhenOwnColl.gt(db[collName].find({i: {$lt: 50}}).itcount(), 0); + } + + // Runs one agg query so that the queryStatsEntry is updated. + function aggOneShape(db, collName) { + assertWhenOwnColl.gt(db[collName].aggregate([{$match: {i: {$gt: 900}}}]).itcount(), 0); + } + + // Runs many queries with different shapes to ensure eviction occurs in the queryStats + // store. + function multipleShapes(db, collName) { + for (var i = 0; i < 10000; i++) { + let query = {}; + query["foo" + i] = "bar"; + db[collName].aggregate([{$match: query}]).itcount(); + } + const evictedAfter = db.serverStatus().metrics.queryStats.numEvicted; + assertAlways.gt(evictedAfter, 0); + } + + // Runs queryStats with transformation. + function runQueryStatsWithHmac(db, collName) { + let response = db.adminCommand({ + aggregate: 1, + pipeline: [{ + $queryStats: { + transformIdentifiers: { + algorithm: "hmac-sha-256", + hmacKey: BinData(0, "MjM0NTY3ODkxMDExMTIxMzE0MTUxNjE3MTgxOTIwMjE=") + } + } + }], + // Use a small batch size to ensure these operations open up a cursor and use + // multiple getMores. + cursor: {batchSize: 1} + }); + assertAlways.commandWorked(response); + const cursor = new DBCommandCursor(db.getSiblingDB("admin"), response); + assertAlways.gt(cursor.itcount(), 0); + } + + // Runs queryStats without transformation. + function runQueryStatsWithoutHmac(db, collName) { + let response = db.adminCommand({ + aggregate: 1, + pipeline: [{$queryStats: {}}], + // Use a small batch size to ensure these operations open up a cursor and use + // multiple getMores. + cursor: {batchSize: 1} + }); + assertAlways.commandWorked(response); + const cursor = new DBCommandCursor(db.getSiblingDB("admin"), response); + assertAlways.gt(cursor.itcount(), 0); + } + + return { + init: init, + reInit: reInit, + findOneShape: findOneShape, + multipleShapes: multipleShapes, + aggOneShape: aggOneShape, + runQueryStatsWithHmac: runQueryStatsWithHmac, + runQueryStatsWithoutHmac: runQueryStatsWithoutHmac + }; + })(); + + var internalQueryStatsRateLimit; + var internalQueryStatsCacheSize; + + let setup = function(db, collName, cluster) { + const setQueryStatsParams = (db) => { + var res; + res = db.adminCommand({setParameter: 1, internalQueryStatsRateLimit: -1}); + assertAlways.commandWorked(res); + internalQueryStatsRateLimit = res.was; + + res = db.adminCommand({setParameter: 1, internalQueryStatsCacheSize: "1MB"}); + assertAlways.commandWorked(res); + internalQueryStatsCacheSize = res.was; + }; + + cluster.executeOnMongodNodes(setQueryStatsParams); + cluster.executeOnMongosNodes(setQueryStatsParams); + + assert.commandWorked(db[collName].createIndex({i: 1})); + const bulk = db[collName].initializeUnorderedBulkOp(); + for (let i = 0; i < 1000; ++i) { + bulk.insert({i: i}); + } + assert.commandWorked(bulk.execute()); + }; + + let teardown = function(db, collName, cluster) { + const resetQueryStatsParams = (db) => assert.commandWorked(db.adminCommand({ + setParameter: 1, + internalQueryStatsRateLimit: internalQueryStatsRateLimit, + internalQueryStatsCacheSize: internalQueryStatsCacheSize + })); + + cluster.executeOnMongodNodes(resetQueryStatsParams); + cluster.executeOnMongosNodes(resetQueryStatsParams); + + db[collName].drop(); + }; + + let transitions = { + // To start, add some $queryStats data so that it is never empty. + init: { + aggOneShape: 0.33, + findOneShape: 0.33, + multipleShapes: 0.34, + }, + // From then on, choose evenly among all possibilities: + reInit: { + aggOneShape: 0.2, + findOneShape: 0.2, + multipleShapes: 0.2, + runQueryStatsWithHmac: 0.2, + runQueryStatsWithoutHmac: 0.2 + }, + findOneShape: {reInit: 1}, + multipleShapes: {reInit: 1}, + runQueryStatsWithHmac: {reInit: 1}, + runQueryStatsWithoutHmac: {reInit: 1}, + aggOneShape: {reInit: 1} + }; + + return { + threadCount: 10, + iterations: 10, + states: states, + setup: setup, + teardown: teardown, + transitions: transitions + }; +})(); diff --git a/jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js b/jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js index d0f2df581821c..d2a15b4483b03 100644 --- a/jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js +++ b/jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Performs a series of CRUD operations while DDL commands are running in the background * and verifies guarantees are not broken. @@ -19,9 +17,9 @@ load("jstests/concurrency/fsm_workload_helpers/state_transition_utils.js"); load("jstests/libs/uuid_util.js"); -load('jstests/libs/feature_flag_util.js'); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; -var $config = (function() { +export const $config = (function() { function threadCollectionName(prefix, tid) { return prefix + tid; } @@ -78,13 +76,39 @@ var $config = (function() { tid = Random.randInt(this.threadCount); const targetThreadColl = threadCollectionName(collName, tid); - const coll = db[threadCollectionName(collName, tid)]; + const coll = db[targetThreadColl]; const fullNs = coll.getFullName(); jsTestLog('create state tid:' + tid + ' currentTid:' + this.tid + ' collection:' + targetThreadColl); - assertAlways.commandWorked( - db.adminCommand({shardCollection: fullNs, key: {_id: 1}, unique: false})); - jsTestLog('create state finished'); + // Add necessary indexes for resharding. + assertAlways.commandWorked(db.adminCommand({ + createIndexes: targetThreadColl, + indexes: [ + {key: {[`tid_${tid}_0`]: 1}, name: `tid_${tid}_0_1`, unique: false}, + {key: {[`tid_${tid}_1`]: 1}, name: `tid_${tid}_1_1`, unique: false} + ], + writeConcern: {w: 'majority'} + })); + try { + assertAlways.commandWorked(db.adminCommand( + {shardCollection: fullNs, key: {[`tid_${tid}_0`]: 1}, unique: false})); + } catch (e) { + const exceptionCode = e.code; + if (exceptionCode) { + if (exceptionCode == ErrorCodes.AlreadyInitialized || + exceptionCode == ErrorCodes.InvalidOptions) { + // It is fine for a shardCollection to throw AlreadyInitialized, a + // resharding state might have changed the shard key for the namespace. It + // is also fine to fail with InvalidOptions, a drop state could've removed + // the indexes and the CRUD state might have added some documents, forcing + // the need to manually create indexes. + return; + } + } + throw e; + } finally { + jsTestLog('create state finished'); + } }, drop: function(db, collName, connCache) { let tid = this.tid; @@ -155,6 +179,34 @@ var $config = (function() { jsTestLog('rename state finished'); } }, + resharding: function(db, collName, connCache) { + let tid = this.tid; + // Pick a tid at random until we pick one that doesn't target this thread's collection. + while (tid === this.tid) + tid = Random.randInt(this.threadCount); + const fullNs = db[threadCollectionName(collName, tid)].getFullName(); + let newKey = 'tid_' + tid + '_' + Random.randInt(2); + try { + jsTestLog('resharding state tid:' + tid + ' currentTid:' + this.tid + + ' collection:' + fullNs + ' newKey ' + newKey); + assertAlways.commandWorked( + db.adminCommand({reshardCollection: fullNs, key: {[`${newKey}`]: 1}})); + } catch (e) { + const exceptionCode = e.code; + if (exceptionCode == ErrorCodes.ConflictingOperationInProgress || + exceptionCode == ErrorCodes.ReshardCollectionInProgress || + exceptionCode == ErrorCodes.NamespaceNotSharded) { + // It is fine for a resharding operation to throw ConflictingOperationInProgress + // if a concurrent resharding with the same collection is ongoing. + // It is also fine for a resharding operation to throw NamespaceNotSharded, + // because a drop state could've happend recently. + return; + } + throw e; + } finally { + jsTestLog('resharding state finished'); + } + }, checkDatabaseMetadataConsistency: function(db, collName, connCache) { if (this.skipMetadataChecks) { return; @@ -188,7 +240,6 @@ var $config = (function() { jsTestLog('CRUD state tid:' + tid + ' currentTid:' + this.tid + ' collection:' + targetThreadColl); const coll = db[targetThreadColl]; - const fullNs = coll.getFullName(); const generation = new Date().getTime(); // Insert Data @@ -196,7 +247,8 @@ var $config = (function() { let insertBulkOp = coll.initializeUnorderedBulkOp(); for (let i = 0; i < numDocs; ++i) { - insertBulkOp.insert({generation: generation, count: i, tid: tid}); + insertBulkOp.insert( + {generation: generation, count: i, [`tid_${tid}_0`]: i, [`tid_${tid}_1`]: i}); } mutexLock(db, tid, targetThreadColl); @@ -247,9 +299,7 @@ var $config = (function() { let setup = function(db, collName, cluster) { this.skipMetadataChecks = // TODO SERVER-70396: remove this flag - !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency') || - // TODO SERVER-74445: re-enable metadata checks on catalog shard deployments - cluster.hasCatalogShard(); + !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency'); for (let tid = 0; tid < this.threadCount; ++tid) { db[data.CRUDMutex].insert({tid: tid, mutex: 0}); diff --git a/jstests/concurrency/fsm_workloads/random_DDL_CRUD_setFCV_operations.js b/jstests/concurrency/fsm_workloads/random_DDL_CRUD_setFCV_operations.js index 3cd6110c939f5..0fc9f11645a5d 100644 --- a/jstests/concurrency/fsm_workloads/random_DDL_CRUD_setFCV_operations.js +++ b/jstests/concurrency/fsm_workloads/random_DDL_CRUD_setFCV_operations.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Concurrently performs CRUD operations, DDL commands and FCV changes and verifies guarantees are * not broken. @@ -19,10 +17,12 @@ * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/random_DDL_CRUD_operations.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states.setFCV = function(db, collName, connCache) { const fcvValues = [lastLTSFCV, lastContinuousFCV, latestFCV]; const targetFCV = fcvValues[Random.randInt(3)]; diff --git a/jstests/concurrency/fsm_workloads/random_DDL_operations.js b/jstests/concurrency/fsm_workloads/random_DDL_operations.js index 6749900610b83..47f9ca6d7af0e 100644 --- a/jstests/concurrency/fsm_workloads/random_DDL_operations.js +++ b/jstests/concurrency/fsm_workloads/random_DDL_operations.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Concurrently performs DDL commands and verifies guarantees are not broken. * @@ -11,7 +9,7 @@ */ load("jstests/concurrency/fsm_workload_helpers/state_transition_utils.js"); -load('jstests/libs/feature_flag_util.js'); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const dbPrefix = jsTestName() + '_DB_'; const dbCount = 2; @@ -31,7 +29,7 @@ function getRandomShard(connCache) { return shards[Random.randInt(shards.length)]; } -var $config = (function() { +export const $config = (function() { let states = { create: function(db, collName, connCache) { db = getRandomDb(db); @@ -65,10 +63,6 @@ var $config = (function() { ]); }, movePrimary: function(db, collName, connCache) { - if (this.skipMovePrimary) { - return; - } - db = getRandomDb(db); const shardId = getRandomShard(connCache); @@ -77,10 +71,7 @@ var $config = (function() { db.adminCommand({movePrimary: db.getName(), to: shardId}), [ ErrorCodes.ConflictingOperationInProgress, // The cloning phase has failed (e.g. as a result of a stepdown). When a failure - // occurs at this phase, the movePrimary operation does not recover. Either of - // the following error codes could be seen depending on if the failover was on - // the donor or recipient node. - ErrorCodes.MovePrimaryAborted, + // occurs at this phase, the movePrimary operation does not recover. 7120202 ]); }, @@ -116,14 +107,9 @@ var $config = (function() { }; let setup = function(db, collName, cluster) { - // TODO (SERVER-71309): Remove once 7.0 becomes last LTS. Prevent non-resilient movePrimary - // operations from being executed in multiversion suites. - this.skipMovePrimary = !FeatureFlagUtil.isEnabled(db.getMongo(), 'ResilientMovePrimary'); this.skipMetadataChecks = // TODO SERVER-70396: remove this flag - !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency') || - // TODO SERVER-74445: re-enable metadata checks on catalog shard deployments - cluster.hasCatalogShard(); + !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency'); for (var i = 0; i < dbCount; i++) { const dbName = dbPrefix + i; diff --git a/jstests/concurrency/fsm_workloads/random_DDL_resetPlacementHistory_operations.js b/jstests/concurrency/fsm_workloads/random_DDL_resetPlacementHistory_operations.js new file mode 100644 index 0000000000000..ab9ca9eaf925b --- /dev/null +++ b/jstests/concurrency/fsm_workloads/random_DDL_resetPlacementHistory_operations.js @@ -0,0 +1,211 @@ +/** + * Performs a series of placement-changing commands (DDLs and chunk migrations) while + * resetPlacementHistory may be run in parallel. After tearing down the test, the + * check_routing_table_consistency hook will verify that the content config.placementHistory will + * still be consistent with the rest of the catalog. + * + * @tags: [ + * requires_fcv_71, + * requires_sharding, + * assumes_balancer_off, + * does_not_support_causal_consistency, + * does_not_support_add_remove_shards, + * # The mechanism to pick a random collection is not resilient to stepdowns + * does_not_support_stepdowns, + * does_not_support_transactions, + * ] + */ + +load("jstests/libs/uuid_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; +load('jstests/concurrency/fsm_workload_helpers/chunks.js'); + +export const $config = (function() { + const testCollectionsState = 'testCollectionsState'; + const resetPlacementHistoryState = 'resetPlacementHistoryState'; + const resetPlacementHistoryStateId = 'x'; + const numThreads = 12; + const numTestCollections = numThreads + 5; + + function getConfig(db) { + return db.getSiblingDB('config'); + } + + /** + * Used to guarantee that a namespace isn't targeted by multiple FSM thread at the same time. + */ + function acquireCollectionName(db, mustBeAlreadyCreated = true) { + let acquiredCollDoc = null; + assertAlways.soon(function() { + const query = {acquired: false}; + if (mustBeAlreadyCreated) { + query.created = true; + } + acquiredCollDoc = db[testCollectionsState].findAndModify({ + query: query, + sort: {lastAcquired: 1}, + update: {$set: {acquired: true, lastAcquired: new Date()}} + }); + return acquiredCollDoc !== null; + }); + return acquiredCollDoc.collName; + } + + function releaseCollectionName(db, collName, wasDropped = false) { + // in case of collection dropped, leave a chance of reusing the same name during the next + // shardCollection + const newExtension = wasDropped && Math.random() < 0.5 ? 'e' : ''; + const match = db[testCollectionsState].findAndModify({ + query: {collName: collName, acquired: true}, + update: + {$set: {collName: collName + newExtension, acquired: false, created: !wasDropped}} + }); + assertAlways(match !== null); + } + + let states = { + shardCollection: function(db, _, connCache) { + // To avoid starvation problems during the execution of the FSM, it is OK to pick + // up an already sharded collection. + const collName = acquireCollectionName(db, false /*mustBeAlreadyCreated*/); + try { + jsTestLog(`Beginning shardCollection state for ${collName}`); + assertAlways.commandWorked( + db.adminCommand({shardCollection: db[collName].getFullName(), key: {_id: 1}})); + jsTestLog(`shardCollection state for ${collName} completed`); + } catch (e) { + throw e; + } finally { + releaseCollectionName(db, collName); + } + }, + + dropCollection: function(db, _, connCache) { + // To avoid starvation problems during the execution of the FSM, it is OK to pick + // up an already dropped collection. + const collName = acquireCollectionName(db, false /*mustBeAlreadyCreated*/); + try { + jsTestLog(`Beginning dropCollection state for ${collName}`); + // Avoid checking the outcome, as the drop may result into a no-op. + db[collName].drop(); + jsTestLog(`dropCollection state for ${collName} completed`); + } catch (e) { + throw e; + } finally { + releaseCollectionName(db, collName, true /*wasDropped*/); + } + }, + + renameCollection: function(db, _, connCache) { + const collName = acquireCollectionName(db); + const renamedCollName = collName + '_renamed'; + try { + jsTestLog(`Beginning renameCollection state for ${collName}`); + assertAlways.commandWorked(db[collName].renameCollection(renamedCollName)); + // reverse the rename before leaving the state. + assertAlways.commandWorked(db[renamedCollName].renameCollection(collName)); + jsTestLog(`renameCollection state for ${collName} completed`); + } catch (e) { + throw e; + } finally { + releaseCollectionName(db, collName); + } + }, + + moveChunk: function(db, _, connCache) { + const collName = acquireCollectionName(db); + try { + jsTestLog(`Beginning moveChunk state for ${collName}`); + const collUUID = + getConfig(db).collections.findOne({_id: db[collName].getFullName()}).uuid; + assertAlways(collUUID); + const shards = getConfig(db).shards.find().toArray(); + const chunkToMove = getConfig(db).chunks.findOne({uuid: collUUID}); + const destination = shards.filter( + s => s._id !== + chunkToMove.shard)[Math.floor(Math.random() * (shards.length - 1))]; + ChunkHelper.moveChunk( + db, collName, [chunkToMove.min, chunkToMove.max], destination._id, true); + jsTestLog(`moveChunk state for ${collName} completed`); + } catch (e) { + throw e; + } finally { + releaseCollectionName(db, collName); + } + }, + + resetPlacementHistory: function(db, collName, connCache) { + jsTestLog(`Beginning resetPlacementHistory state`); + assertAlways.commandWorked(db.adminCommand({resetPlacementHistory: 1})); + jsTestLog(`resetPlacementHistory state completed`); + }, + + }; + + let transitions = { + shardCollection: { + shardCollection: 0.22, + dropCollection: 0.22, + renameCollection: 0.22, + moveChunk: 0.22, + resetPlacementHistory: 0.12 + }, + dropCollection: { + shardCollection: 0.22, + dropCollection: 0.22, + renameCollection: 0.22, + moveChunk: 0.22, + resetPlacementHistory: 0.12 + }, + renameCollection: { + shardCollection: 0.22, + dropCollection: 0.22, + renameCollection: 0.22, + moveChunk: 0.22, + resetPlacementHistory: 0.12 + }, + moveChunk: { + shardCollection: 0.22, + dropCollection: 0.22, + renameCollection: 0.22, + moveChunk: 0.22, + resetPlacementHistory: 0.12 + }, + resetPlacementHistory: { + shardCollection: 0.22, + dropCollection: 0.22, + renameCollection: 0.22, + moveChunk: 0.22, + }, + }; + + let setup = function(db, _, cluster) { + this.skipMetadataChecks = + // TODO SERVER-70396: remove this flag + !FeatureFlagUtil.isEnabled(db.getMongo(), 'CheckMetadataConsistency'); + + for (let i = 0; i < numTestCollections; ++i) { + db[testCollectionsState].insert({ + collName: `testColl_${i}`, + acquired: false, + lastAcquired: new Date(), + created: false + }); + } + + db[resetPlacementHistoryState].insert({_id: resetPlacementHistoryStateId, ongoing: false}); + }; + + let teardown = function(db, collName, cluster) {}; + + return { + threadCount: numThreads, + iterations: 32, + startState: 'shardCollection', + states: states, + transitions: transitions, + setup: setup, + teardown: teardown, + passConnectionCache: true + }; +})(); diff --git a/jstests/concurrency/fsm_workloads/random_DDL_setFCV_operations.js b/jstests/concurrency/fsm_workloads/random_DDL_setFCV_operations.js index b6740c8cf89cc..87ee5c7297ed7 100644 --- a/jstests/concurrency/fsm_workloads/random_DDL_setFCV_operations.js +++ b/jstests/concurrency/fsm_workloads/random_DDL_setFCV_operations.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Concurrently performs DDL commands and FCV changes and verifies guarantees are * not broken. @@ -13,10 +11,10 @@ * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/random_DDL_operations.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_DDL_operations.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states.setFCV = function(db, collName, connCache) { const fcvValues = [lastLTSFCV, lastContinuousFCV, latestFCV]; const targetFCV = fcvValues[Random.randInt(3)]; diff --git a/jstests/concurrency/fsm_workloads/random_internal_transactions_setFCV_operations.js b/jstests/concurrency/fsm_workloads/random_internal_transactions_setFCV_operations.js index 7e08e5b4e1d60..429bff0ca21f0 100644 --- a/jstests/concurrency/fsm_workloads/random_internal_transactions_setFCV_operations.js +++ b/jstests/concurrency/fsm_workloads/random_internal_transactions_setFCV_operations.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Performs updates that will change a document's shard key across chunks while simultaneously * changing the FCV. @@ -13,14 +11,16 @@ * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js"; // Transactions that run concurrently with a setFCV may get interrupted due to setFCV issuing for a // killSession any open sessions during an FCV change. We want to have to retryability support for // such scenarios. load('jstests/libs/override_methods/retry_on_killed_session.js'); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Sessions of open transactions can be killed and throw "Interrupted" if we run it concurrently // with a setFCV command, so we want to be able to catch those as acceptable killSession errors. $config.data.retryOnKilledSession = true; diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_base.js b/jstests/concurrency/fsm_workloads/random_moveChunk_base.js index 38644ab1d6333..927f9ae1c0244 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_base.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_base.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Shards a collection by 'skey' and creates one chunk per thread, filling each chunk with * documents, and assigning each document to a random thread. Meant to be extended by workloads that @@ -10,10 +8,12 @@ * assumes_balancer_off, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/sharded_base_partitioned.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.threadCount = 1; $config.iterations = 1; diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js index bc60f549c7298..293f6b4f0b3d7 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_delete_transaction.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Performs deletes in transactions without the shard key while chunks are being moved. This * includes multi=true deletes and multi=false deletes with exact _id queries. @@ -11,11 +9,11 @@ * uses_transactions, * ]; */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js"; load('jstests/concurrency/fsm_workload_helpers/delete_in_transaction_states.js'); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.threadCount = 5; $config.iterations = 50; @@ -37,7 +35,7 @@ var $config = extendWorkload($config, function($config, $super) { exactIdDelete(db, collName, this.session); }; $config.states.multiDelete = function(db, collName, connCache) { - multiDelete(db, collName, this.session, this.tid); + multiDelete(db, collName, this.session, this.tid, this.partitionSize); }; $config.states.verifyDocuments = function(db, collName, connCache) { verifyDocuments(db, collName, this.tid); @@ -50,7 +48,7 @@ var $config = extendWorkload($config, function($config, $super) { $config.states.init = function init(db, collName, connCache) { $super.states.init.apply(this, arguments); this.session = db.getMongo().startSession({causalConsistency: false}); - initDeleteInTransactionStates(db, collName, this.tid); + initDeleteInTransactionStates(db, collName, this.tid, this.partitionSize); }; $config.transitions = { diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js index 18557281a315a..e059cb07ed796 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_broadcast_update_transaction.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Performs updates in transactions without the shard key while chunks are being moved. This * includes multi=true updates and multi=false updates with exact _id queries. @@ -11,11 +9,11 @@ * uses_transactions, * ]; */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js"; load('jstests/concurrency/fsm_workload_helpers/update_in_transaction_states.js'); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.threadCount = 5; $config.iterations = 50; diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_index_operations.js b/jstests/concurrency/fsm_workloads/random_moveChunk_index_operations.js index e5db4d61d5362..7989de521c1e7 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_index_operations.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_index_operations.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Performs a series of index operations while chunk migrations are running in the background * and verifies that indexes are not left in an inconsistent state. @@ -13,7 +11,7 @@ load("jstests/concurrency/fsm_workload_helpers/chunks.js"); // for chunk helpers load("jstests/sharding/libs/sharded_index_util.js"); // for findInconsistentIndexesAcrossShards -var $config = (function() { +export const $config = (function() { function threadCollectionName(prefix, tid) { return prefix + tid; } diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js b/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js index cc72d684522ab..16fbc727b08d2 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_multi_update_delete_change_streams.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Performs a series of {multi: true} updates/deletes while moving chunks, and checks that the * expected change stream events are received and that no events are generated to writes on orphan @@ -11,10 +9,10 @@ * uses_change_streams * ]; */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.threadCount = 5; $config.iterations = 50; diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_multiple_collections.js b/jstests/concurrency/fsm_workloads/random_moveChunk_multiple_collections.js index c9eb00ac7c761..523fb4fde048e 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_multiple_collections.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_multiple_collections.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Perform continuous moveChunk on multiple collections/databases. * @@ -9,8 +7,8 @@ * does_not_support_add_remove_shards, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js"; const dbNames = ['db0', 'db1', 'db2']; const collNames = ['collA', 'collB', 'collC']; @@ -44,7 +42,7 @@ const runWithManualRetriesIfInStepdownSuite = (fn) => { } }; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.threadCount = dbNames.length * collNames.length; $config.iterations = 64; diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js index c397518ecc53d..f8b93ae941c41 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Performs updates in transactions without the shard key while chunks are being moved. This * includes multi=true updates and multi=false updates with exact _id queries. @@ -11,10 +9,10 @@ * uses_transactions, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.threadCount = 5; $config.iterations = 50; diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_delete_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_delete_transaction.js index 85eac01c61871..49a5fb438dff6 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_delete_transaction.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_delete_transaction.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Performs these actions in parallel: * 1. Refine a collection's shard key. @@ -19,18 +17,24 @@ * uses_transactions, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js"; load('jstests/concurrency/fsm_workload_helpers/delete_in_transaction_states.js'); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.iterations = 10; $config.states.exactIdDelete = function(db, collName, connCache) { exactIdDelete(db, this.getCurrentOrPreviousLatchCollName(collName), this.session); }; $config.states.multiDelete = function(db, collName, connCache) { - multiDelete(db, this.getCurrentOrPreviousLatchCollName(collName), this.session, this.tid); + multiDelete(db, + this.getCurrentOrPreviousLatchCollName(collName), + this.session, + this.tid, + this.partitionSize); }; $config.states.verifyDocuments = function(db, collName, connCache) { verifyDocuments(db, this.getCurrentOrPreviousLatchCollName(collName), this.tid); @@ -48,7 +52,7 @@ var $config = extendWorkload($config, function($config, $super) { for (let i = this.latchCount; i >= 0; --i) { const latchCollName = collName + '_' + i; - initDeleteInTransactionStates(db, latchCollName, this.tid); + initDeleteInTransactionStates(db, latchCollName, this.tid, this.partitionSize); } }; diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_update_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_update_transaction.js index 984b713d36a25..db31f2f9e9eb8 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_update_transaction.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_update_transaction.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Performs these actions in parallel: * 1. Refine a collection's shard key. @@ -19,11 +17,13 @@ * uses_transactions, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key.js"; load('jstests/concurrency/fsm_workload_helpers/update_in_transaction_states.js'); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.threadCount = 5; $config.iterations = 10; diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_delete_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_delete_transaction.js index 9e5df7168f737..22b7c2b5f6a45 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_delete_transaction.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_delete_transaction.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Same as the base workload, but refines to a nested shard key. * @@ -15,11 +13,13 @@ * uses_transactions, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load( - 'jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_delete_transaction.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from + "jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_delete_transaction.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.newShardKey = {a: 1, "b.c": 1}; $config.data.newShardKeyFields = ["a", "b.c"]; return $config; diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_update_transaction.js b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_update_transaction.js index b796aee38e140..8b12af42b2c1b 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_update_transaction.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_nested_broadcast_update_transaction.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Same as the base workload, but refines to a nested shard key. * @@ -15,11 +13,13 @@ * uses_transactions, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load( - 'jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_update_transaction.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from + "jstests/concurrency/fsm_workloads/random_moveChunk_refine_collection_shard_key_broadcast_update_transaction.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.newShardKey = {a: 1, "b.c": 1}; $config.data.newShardKeyFields = ["a", "b.c"]; return $config; diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_deletes.js b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_deletes.js index 3dd6588ef6838..08f748980dcf3 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_deletes.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_deletes.js @@ -9,13 +9,13 @@ * requires_fcv_51, * ] */ -'use strict'; +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from 'jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js'; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; -load('jstests/core/timeseries/libs/timeseries.js'); // For 'TimeseriesTest' helpers. -// Load parent workload for extending below. -load('jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js'); - -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.generateMetaFieldValueForInitialInserts = () => { let meta = {}; // Insert a document with a field for every thread to test concurrent deletes of the @@ -38,30 +38,37 @@ var $config = extendWorkload($config, function($config, $super) { $config.states.init = function init(db, collName, connCache) { $super.states.init.call(this, db, collName, connCache); - this.featureFlagDisabled = this.featureFlagDisabled || - !TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(db); - if (this.featureFlagDisabled) { - jsTestLog( - "Skipping executing this test as the requisite feature flags are not enabled."); - } + this.arbitraryDeletesEnabled = + FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesDeletesSupport"); }; $config.states.doDelete = function doDelete(db, collName, connCache) { - if (this.featureFlagDisabled) { - return; - } - + // Alternate between filtering on the meta field and filtering on a data field. This will + // cover both the timeseries batch delete and arbitrary delete paths. + const filterFieldName = !this.arbitraryDeletesEnabled || Random.randInt(2) == 0 + ? "m.tid" + this.tid + : "f.tid" + this.tid; const filter = { - m: { - ["tid" + this.tid]: { - $gte: Random.randInt($config.data.numMetaCount), - }, + [filterFieldName]: { + $gte: Random.randInt($config.data.numMetaCount), }, }; assertAlways.commandWorked(db[collName].deleteMany(filter)); assertAlways.commandWorked(db[this.nonShardCollName].deleteMany(filter)); }; + $config.data.validateCollection = function validate(db, collName) { + // Since we can't use a 'snapshot' read concern for timeseries deletes, deletes on the + // sharded collection may not see the exact same records as the non-sharded, so the + // validation needs to be more lenient. + const count = db[collName].find().itcount(); + const countNonSharded = db[this.nonShardCollName].find().itcount(); + assertAlways.gte( + count, + countNonSharded, + "Expected sharded collection to have the same or more records than unsharded"); + }; + $config.transitions = { init: {insert: 1}, insert: {insert: 3, doDelete: 3, moveChunk: 1}, diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js index 4f95d508df058..e43b3c8b53427 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js @@ -11,14 +11,14 @@ * requires_fcv_51, * ] */ -'use strict'; - +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from 'jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js'; load('jstests/concurrency/fsm_workload_helpers/chunks.js'); // for chunk helpers -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. -load("jstests/libs/analyze_plan.js"); // for 'getPlanStages' -load('jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js'); +import {getPlanStages} from "jstests/libs/analyze_plan.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.nonShardCollName = "unsharded"; // A random non-round start value was chosen so that we can verify the rounding behavior that @@ -32,12 +32,10 @@ var $config = extendWorkload($config, function($config, $super) { $config.data.numInitialDocs = 60 * 24 * 30; $config.data.numMetaCount = 30; - $config.data.featureFlagDisabled = true; - $config.data.bucketPrefix = "system.buckets."; - $config.data.timeField = 't'; $config.data.metaField = 'm'; + $config.data.timeField = 't'; $config.data.generateMetaFieldValueForInitialInserts = () => { return Math.floor(Random.rand() * $config.data.numMetaCount); @@ -52,18 +50,16 @@ var $config = extendWorkload($config, function($config, $super) { $config.startState = "init"; $config.states.insert = function insert(db, collName, connCache) { - if (this.featureFlagDisabled) { - return; - } - for (let i = 0; i < 10; i++) { // Generate a random timestamp between 'startTime' and largest timestamp we inserted. const timer = this.startTime + Math.floor(Random.rand() * this.numInitialDocs * this.increment); + const metaVal = this.generateMetaFieldValueForInsertStage(this.tid); const doc = { _id: new ObjectId(), + [this.metaField]: metaVal, [this.timeField]: new Date(timer), - [this.metaField]: this.generateMetaFieldValueForInsertStage(this.tid), + f: metaVal, }; assertAlways.commandWorked(db[collName].insert(doc)); assertAlways.commandWorked(db[this.nonShardCollName].insert(doc)); @@ -74,10 +70,6 @@ var $config = extendWorkload($config, function($config, $super) { * Moves a random chunk in the target collection. */ $config.states.moveChunk = function moveChunk(db, collName, connCache) { - if (this.featureFlagDisabled) { - return; - } - const configDB = db.getSiblingDB('config'); const ns = db[this.bucketPrefix + collName].getFullName(); const chunks = findChunksUtil.findChunksByNs(configDB, ns).toArray(); @@ -100,11 +92,7 @@ var $config = extendWorkload($config, function($config, $super) { waitForDelete); }; - $config.states.init = function init(db, collName, connCache) { - if (TimeseriesTest.shardedtimeseriesCollectionsEnabled(db.getMongo())) { - this.featureFlagDisabled = false; - } - }; + $config.states.init = function init(db, collName, connCache) {}; $config.transitions = { init: {insert: 1}, @@ -112,31 +100,33 @@ var $config = extendWorkload($config, function($config, $super) { moveChunk: {insert: 1, moveChunk: 0} }; - $config.teardown = function teardown(db, collName, cluster) { - if (this.featureFlagDisabled) { - return; - } + $config.data.validateCollection = function validate(db, collName) { + const pipeline = + [{$project: {_id: "$_id", m: "$m", t: "$t"}}, {$sort: {m: 1, t: 1, _id: 1}}]; + const diff = DataConsistencyChecker.getDiff(db[collName].aggregate(pipeline), + db[this.nonShardCollName].aggregate(pipeline)); + assertAlways.eq( + diff, {docsWithDifferentContents: [], docsMissingOnFirst: [], docsMissingOnSecond: []}); + }; + $config.teardown = function teardown(db, collName, cluster) { const numBuckets = db[this.bucketPrefix + collName].find({}).itcount(); const numInitialDocs = db[collName].find().itcount(); jsTestLog("NumBuckets " + numBuckets + ", numDocs on sharded cluster" + db[collName].find().itcount() + "numDocs on unsharded collection " + db[this.nonShardCollName].find({}).itcount()); - const pipeline = - [{$project: {_id: "$_id", m: "$m", t: "$t"}}, {$sort: {m: 1, t: 1, _id: 1}}]; - const diff = DataConsistencyChecker.getDiff(db[collName].aggregate(pipeline), - db[this.nonShardCollName].aggregate(pipeline)); - assertAlways.eq( - diff, {docsWithDifferentContents: [], docsMissingOnFirst: [], docsMissingOnSecond: []}); + + // Validate the contents of the collection. + this.validateCollection(db, collName); // Make sure that queries using various indexes on time-series buckets collection return // buckets with all documents. const verifyBucketIndex = (bucketIndex) => { const unpackStage = { "$_internalUnpackBucket": { - "timeField": this.timeField, "metaField": this.metaField, + "timeField": this.timeField, "bucketMaxSpanSeconds": NumberInt(3600) } }; @@ -157,17 +147,11 @@ var $config = extendWorkload($config, function($config, $super) { }; $config.setup = function setup(db, collName, cluster) { - if (TimeseriesTest.shardedtimeseriesCollectionsEnabled(db.getMongo())) { - this.featureFlagDisabled = false; - } else { - return; - } - db[collName].drop(); db[this.nonShardCollName].drop(); assertAlways.commandWorked(db.createCollection( - collName, {timeseries: {timeField: this.timeField, metaField: this.metaField}})); + collName, {timeseries: {metaField: this.metaField, timeField: this.timeField}})); cluster.shardCollection(db[collName], {t: 1}, false); // Create indexes to verify index integrity during the teardown state. @@ -185,10 +169,12 @@ var $config = extendWorkload($config, function($config, $super) { for (let i = 0; i < this.numInitialDocs; ++i) { currentTimeStamp += this.increment; + const metaVal = this.generateMetaFieldValueForInitialInserts(i); const doc = { _id: new ObjectId(), + [this.metaField]: metaVal, [this.timeField]: new Date(currentTimeStamp), - [this.metaField]: this.generateMetaFieldValueForInitialInserts(i), + f: metaVal, }; bulk.insert(doc); bulkUnsharded.insert(doc); diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_updates.js b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_updates.js index 7046ebac1edef..5cac3c4aa9ed5 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_updates.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_updates.js @@ -11,23 +11,15 @@ * requires_fcv_51, * ] */ -'use strict'; +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from 'jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js'; const numValues = 10; -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. -load('jstests/concurrency/fsm_workloads/random_moveChunk_timeseries_inserts.js'); - -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states.init = function(db, collName, connCache) { - if (TimeseriesTest.shardedtimeseriesCollectionsEnabled(db) && - TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(db)) { - this.featureFlagDisabled = false; - } else { - jsTestLog( - "Skipping executing this test as the requisite feature flags are not enabled."); - } - $super.states.init(db, collName); }; @@ -46,26 +38,27 @@ var $config = extendWorkload($config, function($config, $super) { }; $config.states.update = function(db, collName, connCache) { - if (this.featureFlagDisabled) { - return; - } - const shardedColl = db[collName]; - const unshardedColl = db[this.nonShardCollName]; - const updateField = "tid" + this.tid; + const updateField = this.metaField + ".tid" + this.tid; const oldValue = Random.randInt(numValues); // Updates some measurements along the field owned by this thread in both sharded and // unsharded ts collections. jsTestLog("Executing update state on: " + collName + " on field " + updateField); - assertAlways.commandWorked( - shardedColl.update({[this.metaField]: {[updateField]: {$gte: oldValue}}}, - {$inc: {[this.metaField + "." + updateField]: 1}}, - {multi: true})); - assertAlways.commandWorked( - unshardedColl.update({[this.metaField]: {[updateField]: {$gte: oldValue}}}, - {$inc: {[this.metaField + "." + updateField]: 1}}, - {multi: true})); + assertAlways.commandWorked(shardedColl.update( + {[updateField]: {$gte: oldValue}}, {$inc: {[updateField]: 1}}, {multi: true})); + }; + + $config.data.validateCollection = function validate(db, collName) { + // Since we can't use a 'snapshot' read concern for timeseries updates, updates on the + // sharded collection may not see the exact same records as the non-sharded, so the + // validation needs to be more lenient. + const count = db[collName].find().itcount(); + const countNonSharded = db[this.nonShardCollName].find().itcount(); + assertAlways.eq( + count, + countNonSharded, + "Expected sharded collection to have the same number of records as unsharded"); }; $config.transitions = { @@ -75,5 +68,14 @@ var $config = extendWorkload($config, function($config, $super) { moveChunk: {insert: 0.4, moveChunk: 0.1, update: 0.5}, }; + // Reduced iteration and document counts to avoid timeouts. + $config.iterations = 20; + + // Five minutes. + $config.data.increment = 1000 * 60 * 5; + + // This should generate documents for a span of one month. + $config.data.numInitialDocs = 12 * 24 * 30; + return $config; }); diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js index cd7831128f7bf..26fa4f8be1c1d 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Performs updates that will change a document's shard key while migrating chunks. Uses both * retryable writes and multi-statement transactions. @@ -10,11 +8,11 @@ * uses_transactions, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js"; load('jstests/concurrency/fsm_workload_helpers/auto_retry_transaction.js'); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.threadCount = 5; $config.iterations = 50; diff --git a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key_kill_sessions.js b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key_kill_sessions.js index 1f258d7614deb..f769dd4563bab 100644 --- a/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key_kill_sessions.js +++ b/jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key_kill_sessions.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Performs updates that will change a document's shard key while migrating chunks and killing * sessions. Only runs updates that cause a document to change shards to increase the odds of @@ -11,9 +9,11 @@ * uses_transactions, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js"; load('jstests/concurrency/fsm_workload_helpers/kill_session.js'); // for killSession -load('jstests/concurrency/fsm_workloads/random_moveChunk_update_shard_key.js'); load('jstests/libs/override_methods/retry_on_killed_session.js'); // By default retry_on_killed_session.js will only retry known retryable operations like reads and @@ -21,7 +21,7 @@ load('jstests/libs/override_methods/retry_on_killed_session.js'); // into always retrying killed operations. TestData.alwaysRetryOnKillSessionErrors = true; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.retryOnKilledSession = true; // The base workload uses connCache, so wrap killSessions so the fsm runner doesn't complain diff --git a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_crud_ops.js b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_crud_ops.js index 87b71758213d8..ea01ba0808e81 100644 --- a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_crud_ops.js +++ b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_crud_ops.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Runs refineCollectionShardKey and CRUD operations concurrently. * @@ -8,7 +6,7 @@ load('jstests/libs/parallelTester.js'); -var $config = (function() { +export const $config = (function() { // The organization of documents in every collection is as follows: // // (i) Reserved for find: {tid: tid, a: 0, b: 0} -->> {tid: tid, a: 24, b: 24} diff --git a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_crud_ops.js b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_crud_ops.js index fc8181fda9b8f..7f70e9d13f5e0 100644 --- a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_crud_ops.js +++ b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_crud_ops.js @@ -1,15 +1,15 @@ -'use strict'; - /** * Same as the base workload, but refines to a nested shard key. * * @tags: [requires_persistence, requires_sharding] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/refine_collection_shard_key_crud_ops.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/refine_collection_shard_key_crud_ops.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { // Note the base workload assumes this is the nested key when constructing the crud ops. $config.data.newShardKey = {a: 1, "b.c": 1}; $config.data.usingNestedKey = true; diff --git a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_zone_ops.js b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_zone_ops.js index 5123475ecbfbf..535bfa67eeea4 100644 --- a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_zone_ops.js +++ b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_nested_zone_ops.js @@ -1,15 +1,15 @@ -'use strict'; - /** * Same as the base workload, but refines to a nested shard key. * * @tags: [requires_persistence, requires_sharding] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/refine_collection_shard_key_zone_ops.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/refine_collection_shard_key_zone_ops.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.newShardKey = {a: 1, "b.c": 1}; $config.data.newShardKeyFields = ["a", "b.c"]; return $config; diff --git a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_zone_ops.js b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_zone_ops.js index 9323b64edb1ca..79fb1416fa1a6 100644 --- a/jstests/concurrency/fsm_workloads/refine_collection_shard_key_zone_ops.js +++ b/jstests/concurrency/fsm_workloads/refine_collection_shard_key_zone_ops.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Runs refineCollectionShardKey and zone operations concurrently. * @@ -20,7 +18,7 @@ load('jstests/libs/parallelTester.js'); -var $config = (function() { +export const $config = (function() { var data = { oldShardKeyField: 'a', newShardKeyFields: ['a', 'b'], diff --git a/jstests/concurrency/fsm_workloads/reindex.js b/jstests/concurrency/fsm_workloads/reindex.js index 1fe4393fe35e7..ce62e01798985 100644 --- a/jstests/concurrency/fsm_workloads/reindex.js +++ b/jstests/concurrency/fsm_workloads/reindex.js @@ -1,5 +1,3 @@ -'use strict'; - /** * reindex.js * @@ -10,7 +8,7 @@ */ load("jstests/concurrency/fsm_workload_helpers/assert_handle_fail_in_transaction.js"); -var $config = (function() { +export const $config = (function() { var data = { nIndexes: 4 + 1, // 4 created and 1 for _id. nDocumentsToInsert: 1000, diff --git a/jstests/concurrency/fsm_workloads/reindex_background.js b/jstests/concurrency/fsm_workloads/reindex_background.js index 575cf89400eba..336edc1a2638a 100644 --- a/jstests/concurrency/fsm_workloads/reindex_background.js +++ b/jstests/concurrency/fsm_workloads/reindex_background.js @@ -1,5 +1,3 @@ -'use strict'; - /** * reindex_background.js * @@ -11,24 +9,23 @@ * @tags: [SERVER-40561, creates_background_indexes] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/reindex.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/reindex.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.prefix = 'reindex_background'; $config.states.createIndexes = function createIndexes(db, collName) { const coll = db[this.threadCollName]; - const options = {background: true}; // The number of indexes created here is also stored in data.nIndexes. - assertWorkedHandleTxnErrors(coll.createIndex({text: 'text'}, options), + assertWorkedHandleTxnErrors(coll.createIndex({text: 'text'}), ErrorCodes.IndexBuildAlreadyInProgress); - assertWorkedHandleTxnErrors(coll.createIndex({geo: '2dsphere'}, options), + assertWorkedHandleTxnErrors(coll.createIndex({geo: '2dsphere'}), ErrorCodes.IndexBuildAlreadyInProgress); - assertWorkedHandleTxnErrors(coll.createIndex({integer: 1}, options), + assertWorkedHandleTxnErrors(coll.createIndex({integer: 1}), ErrorCodes.IndexBuildAlreadyInProgress); - assertWorkedHandleTxnErrors(coll.createIndex({"$**": 1}, options), + assertWorkedHandleTxnErrors(coll.createIndex({"$**": 1}), ErrorCodes.IndexBuildAlreadyInProgress); }; diff --git a/jstests/concurrency/fsm_workloads/reindex_writeconflict.js b/jstests/concurrency/fsm_workloads/reindex_writeconflict.js index 36818b04fe0df..20d5f3533e29f 100644 --- a/jstests/concurrency/fsm_workloads/reindex_writeconflict.js +++ b/jstests/concurrency/fsm_workloads/reindex_writeconflict.js @@ -1,11 +1,9 @@ -'use strict'; - /** * reindex_writeconflict.js * * Ensures reIndex successfully handles WriteConflictExceptions. */ -var $config = (function() { +export const $config = (function() { var states = { reIndex: function reIndex(db, collName) { var res = db[collName].reIndex(); diff --git a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js index 8ef93b538c9d4..1abb9530460c0 100644 --- a/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js +++ b/jstests/concurrency/fsm_workloads/remove_and_bulk_insert.js @@ -1,5 +1,3 @@ -'use strict'; - /** * remove_and_bulk_insert.js * @@ -9,7 +7,7 @@ * This workload was designed to reproduce SERVER-20512, where a record in an evicted page was * accessed after a WriteConflictException occurred in Collection::deleteDocument(). */ -var $config = (function() { +export const $config = (function() { var states = { insert: function insert(db, collName) { var bulk = db[collName].initializeUnorderedBulkOp(); diff --git a/jstests/concurrency/fsm_workloads/remove_multiple_documents.js b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js index c349dc2087487..3ddbebd3ea097 100644 --- a/jstests/concurrency/fsm_workloads/remove_multiple_documents.js +++ b/jstests/concurrency/fsm_workloads/remove_multiple_documents.js @@ -1,5 +1,3 @@ -'use strict'; - /** * remove_multiple_documents.js * @@ -12,7 +10,7 @@ * * @tags: [assumes_balancer_off] */ -var $config = (function() { +export const $config = (function() { var states = { init: function init(db, collName) { this.numDocs = 200; diff --git a/jstests/concurrency/fsm_workloads/remove_single_document.js b/jstests/concurrency/fsm_workloads/remove_single_document.js index ce5660c9fe065..148d087a3e367 100644 --- a/jstests/concurrency/fsm_workloads/remove_single_document.js +++ b/jstests/concurrency/fsm_workloads/remove_single_document.js @@ -1,5 +1,3 @@ -'use strict'; - /** * remove_single_document.js * @@ -7,7 +5,7 @@ * * @tags: [assumes_balancer_off] */ -var $config = (function() { +export const $config = (function() { var states = { remove: function remove(db, collName) { // try removing a random document diff --git a/jstests/concurrency/fsm_workloads/remove_where.js b/jstests/concurrency/fsm_workloads/remove_where.js index 3565a77d62f00..e342a775644d6 100644 --- a/jstests/concurrency/fsm_workloads/remove_where.js +++ b/jstests/concurrency/fsm_workloads/remove_where.js @@ -1,5 +1,3 @@ -'use strict'; - /** * remove_where.js * @@ -13,10 +11,10 @@ * @tags: [assumes_balancer_off] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_where.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.randomBound = 10; $config.data.generateDocumentToInsert = function generateDocumentToInsert() { return {tid: this.tid, x: Random.randInt(this.randomBound)}; diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js index 1725314a07704..865cf85ebaff0 100644 --- a/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js +++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_chain.js @@ -1,5 +1,3 @@ -'use strict'; - /** * rename_capped_collection_chain.js * @@ -10,7 +8,7 @@ * @tags: [requires_capped] */ -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js index 5110802cd20aa..bdb080f60054b 100644 --- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js +++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_chain.js @@ -1,5 +1,3 @@ -'use strict'; - /** * rename_capped_collection_dbname_chain.js * @@ -14,7 +12,7 @@ * ] */ -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js index 8933f45b12474..026e31b92c049 100644 --- a/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js +++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_dbname_droptarget.js @@ -1,5 +1,3 @@ -'use strict'; - /** * rename_capped_collection_dbname_droptarget.js * @@ -14,7 +12,7 @@ * ] */ -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. diff --git a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js index f27afe5120802..b724766c5a7fc 100644 --- a/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js +++ b/jstests/concurrency/fsm_workloads/rename_capped_collection_droptarget.js @@ -1,5 +1,3 @@ -'use strict'; - /** * rename_capped_collection_droptarget.js * @@ -10,7 +8,7 @@ * @tags: [requires_capped] */ -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. diff --git a/jstests/concurrency/fsm_workloads/rename_collection_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_chain.js index c9a34ce850a34..fe54fe130238d 100644 --- a/jstests/concurrency/fsm_workloads/rename_collection_chain.js +++ b/jstests/concurrency/fsm_workloads/rename_collection_chain.js @@ -1,5 +1,3 @@ -'use strict'; - /** * rename_collection_chain.js * @@ -12,7 +10,7 @@ * ] */ -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js index e12bc1e4eef05..1c66aad7e48ba 100644 --- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js +++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_chain.js @@ -1,5 +1,3 @@ -'use strict'; - /** * rename_collection_dbname_chain.js * @@ -13,7 +11,7 @@ * ] */ -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. diff --git a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js index 51b6a82ec9d7a..f718698ad61d8 100644 --- a/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js +++ b/jstests/concurrency/fsm_workloads/rename_collection_dbname_droptarget.js @@ -1,5 +1,3 @@ -'use strict'; - /** * rename_collection_dbname_droptarget.js * @@ -12,7 +10,7 @@ * ] */ -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. diff --git a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js index 9888f6a040d84..98098d84e7064 100644 --- a/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js +++ b/jstests/concurrency/fsm_workloads/rename_collection_droptarget.js @@ -1,5 +1,3 @@ -'use strict'; - /** * rename_collection_droptarget.js * @@ -12,7 +10,7 @@ * ] */ -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the collection name, // since the workload name is assumed to be unique. diff --git a/jstests/concurrency/fsm_workloads/rename_sharded_collection.js b/jstests/concurrency/fsm_workloads/rename_sharded_collection.js index 49152c3dcbbc1..594dcc1a519da 100644 --- a/jstests/concurrency/fsm_workloads/rename_sharded_collection.js +++ b/jstests/concurrency/fsm_workloads/rename_sharded_collection.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Perform continuous renames on 3 collections per database, with the objective to verify that: * - Upon successful renames, no data are lost @@ -97,7 +95,7 @@ function checkExceptionHasBeenThrown(db, exceptionCode) { assert.gte(count, 1, 'No exception with error code ' + exceptionCode + ' has been thrown'); } -var $config = (function() { +export const $config = (function() { let states = { rename: function(db, collName, connCache) { const dbName = getRandomDbName(this.threadCount); diff --git a/jstests/concurrency/fsm_workloads/reshard_collection_crud_ops.js b/jstests/concurrency/fsm_workloads/reshard_collection_crud_ops.js index 28a0fb258a5de..a39ab11fe0fef 100644 --- a/jstests/concurrency/fsm_workloads/reshard_collection_crud_ops.js +++ b/jstests/concurrency/fsm_workloads/reshard_collection_crud_ops.js @@ -1,12 +1,10 @@ -'use strict'; - /** * Runs reshardCollection and CRUD operations concurrently. * * @tags: [requires_sharding] */ -var $config = (function() { +export const $config = (function() { const shardKeys = [ {a: 1}, {b: 1}, diff --git a/jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js b/jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js index 4e6d69da704d2..a088972dfdebc 100644 --- a/jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js +++ b/jstests/concurrency/fsm_workloads/schema_validator_with_expr_variables.js @@ -11,7 +11,7 @@ "use strict"; -var $config = (function() { +export const $config = (function() { function setup(db, collName) { for (let i = 0; i < 200; ++i) { assertAlways.commandWorked( diff --git a/jstests/concurrency/fsm_workloads/secondary_reads.js b/jstests/concurrency/fsm_workloads/secondary_reads.js index 403773c2e4a45..1d6498f0ab5a0 100644 --- a/jstests/concurrency/fsm_workloads/secondary_reads.js +++ b/jstests/concurrency/fsm_workloads/secondary_reads.js @@ -1,5 +1,3 @@ -'use strict'; - /** * secondary_reads.js * @@ -20,7 +18,7 @@ * @tags: [requires_replication, uses_write_concern] */ -var $config = (function() { +export const $config = (function() { // Use the workload name as the collection name. var uniqueCollectionName = 'secondary_reads'; diff --git a/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js b/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js index b3d7224a40d48..067a5721e2ccb 100644 --- a/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js +++ b/jstests/concurrency/fsm_workloads/secondary_reads_with_catalog_changes.js @@ -1,8 +1,3 @@ -'use strict'; - -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/secondary_reads.js'); // for $config - /** * secondary_reads_with_catalog_changes.js * @@ -27,7 +22,11 @@ load('jstests/concurrency/fsm_workloads/secondary_reads.js'); // for $config * uses_write_concern, * ] */ -var $config = extendWorkload($config, function($config, $super) { +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/secondary_reads.js"; +load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs. + +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.buildIndex = function buildIndex(db, spec) { // Index must be built eventually. assertWhenOwnColl.soon(() => { diff --git a/jstests/concurrency/fsm_workloads/server_status.js b/jstests/concurrency/fsm_workloads/server_status.js index 0c95bfe0ff371..d0ffe0403ee8c 100644 --- a/jstests/concurrency/fsm_workloads/server_status.js +++ b/jstests/concurrency/fsm_workloads/server_status.js @@ -1,11 +1,9 @@ -'use strict'; - /** * server_status.js * * Simply checks that the serverStatus command works */ -var $config = (function() { +export const $config = (function() { var states = { status: function status(db, collName) { var opts = diff --git a/jstests/concurrency/fsm_workloads/server_status_with_time_out_cursors.js b/jstests/concurrency/fsm_workloads/server_status_with_time_out_cursors.js index 5d60c79b58054..8b342a79b2a60 100644 --- a/jstests/concurrency/fsm_workloads/server_status_with_time_out_cursors.js +++ b/jstests/concurrency/fsm_workloads/server_status_with_time_out_cursors.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Run serverStatus() while running a large number of queries which are expected to reach maxTimeMS * and time out. @@ -12,7 +10,7 @@ */ load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // for isMongos -var $config = (function() { +export const $config = (function() { const states = { /** * This is a no-op, used only as a transition state. diff --git a/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js index a466f3bb18ba9..b8ebca24fe281 100644 --- a/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js +++ b/jstests/concurrency/fsm_workloads/sharded_base_partitioned.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Provides an init state that partitions the data space into chunks evenly across threads. * @@ -27,7 +25,7 @@ load('jstests/concurrency/fsm_workload_helpers/chunks.js'); // for chunk helpers load("jstests/sharding/libs/find_chunks_util.js"); -var $config = (function() { +export const $config = (function() { var data = { partitionSize: 1, // We use a non-hashed shard key of { _id: 1 } so that documents reside on their expected diff --git a/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js index 5b29d413bbf04..9fa5ad5fb8138 100644 --- a/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js +++ b/jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Extends sharded_base_partitioned.js. * @@ -12,11 +10,13 @@ * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/sharded_base_partitioned.js"; load("jstests/sharding/libs/find_chunks_util.js"); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.iterations = 8; $config.threadCount = 5; diff --git a/jstests/concurrency/fsm_workloads/sharded_mergeSplitChunks_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_mergeSplitChunks_partitioned.js index 9ce7dc23a7948..1d5247b9fe2f6 100644 --- a/jstests/concurrency/fsm_workloads/sharded_mergeSplitChunks_partitioned.js +++ b/jstests/concurrency/fsm_workloads/sharded_mergeSplitChunks_partitioned.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Extends sharded_mergeChunks_partitioned.js. * @@ -9,12 +7,14 @@ * @tags: [requires_sharding, assumes_balancer_off, does_not_support_stepdowns] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/sharded_mergeChunks_partitioned.js"; load("jstests/sharding/libs/find_chunks_util.js"); load("jstests/concurrency/fsm_workload_helpers/state_transition_utils.js"); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.iterations = 6; $config.threadCount = 5; diff --git a/jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js index d5313a25776df..e3983380abd3e 100644 --- a/jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js +++ b/jstests/concurrency/fsm_workloads/sharded_moveChunk_partitioned.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Extends sharded_base_partitioned.js. * @@ -12,10 +10,12 @@ * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/sharded_base_partitioned.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.iterations = 5; $config.threadCount = 5; diff --git a/jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js b/jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js index 2f20353213824..746380a2e4367 100644 --- a/jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js +++ b/jstests/concurrency/fsm_workloads/sharded_splitChunk_partitioned.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Extends sharded_base_partitioned.js. * @@ -12,10 +10,12 @@ * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/sharded_base_partitioned.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/sharded_base_partitioned.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.iterations = 5; $config.threadCount = 5; diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_crud_operations.js b/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_crud_operations.js index 78cc71e87a7f3..635a70fb20ebe 100644 --- a/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_crud_operations.js +++ b/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_crud_operations.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Perform point-in-time snapshot reads that span a 'find' and multiple 'getmore's concurrently with * CRUD operations, after initial insert operations. This tests that the effects of concurrent CRUD @@ -17,7 +15,8 @@ */ load('jstests/concurrency/fsm_workload_helpers/snapshot_read_utils.js'); -var $config = (function() { + +export const $config = (function() { const data = {numIds: 100, numDocsToInsertPerThread: 5, batchSize: 10}; const states = { diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_ddl_operations.js b/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_ddl_operations.js index 9f3ae16a19eb9..e6eda9a57b820 100644 --- a/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_ddl_operations.js +++ b/jstests/concurrency/fsm_workloads/snapshot_read_at_cluster_time_ddl_operations.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Perform point-in-time snapshot reads that span a 'find' and multiple 'getmore's concurrently with * CRUD operations. @@ -13,7 +11,8 @@ */ load('jstests/concurrency/fsm_workload_helpers/snapshot_read_utils.js'); -var $config = (function() { + +export const $config = (function() { const data = {numIds: 100, numDocsToInsertPerThread: 5, batchSize: 10}; const states = { @@ -78,7 +77,7 @@ var $config = (function() { }, createIndex: function createIndex(db, collName) { - db[collName].createIndex({a: 1}, {background: true}); + db[collName].createIndex({a: 1}); }, dropIndex: function dropIndex(db, collName) { diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js b/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js index 5b89d01d29fdf..253c53a089302 100644 --- a/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js +++ b/jstests/concurrency/fsm_workloads/snapshot_read_catalog_operations.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Perform snapshot reads that span a find and a getmore concurrently with CRUD operations. The * snapshot reads and CRUD operations will all contend for locks on db and collName. Since the @@ -11,7 +9,8 @@ */ load('jstests/concurrency/fsm_workload_helpers/snapshot_read_utils.js'); -var $config = (function() { + +export const $config = (function() { const data = {numIds: 100, numDocsToInsertPerThread: 5, valueToBeInserted: 1, batchSize: 50}; const states = { @@ -106,7 +105,7 @@ var $config = (function() { }, createIndex: function createIndex(db, collName) { - db[collName].createIndex({value: 1}, {background: true}); + db[collName].createIndex({value: 1}); }, dropIndex: function dropIndex(db, collName) { diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js b/jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js index fbf9de53ad8b3..94328bac6648d 100644 --- a/jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js +++ b/jstests/concurrency/fsm_workloads/snapshot_read_kill_op_only.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Test a snapshot read spanning a find and getmore that runs concurrently with * killOp and txnNumber change. @@ -9,10 +7,12 @@ * @tags: [uses_transactions, state_functions_share_transaction] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.transitions = { init: {snapshotFind: 1.0}, snapshotFind: {incrementTxnNumber: 0.33, killOp: 0.34, snapshotGetMore: 0.33}, diff --git a/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js b/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js index ccefd552a3f0b..fedbca1fc7f69 100644 --- a/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js +++ b/jstests/concurrency/fsm_workloads/snapshot_read_kill_operations.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Test a snapshot read spanning a find and getmore that runs concurrently with killSessions, * killOp, killCursors, and txnNumber change. @@ -9,7 +7,7 @@ load('jstests/concurrency/fsm_workload_helpers/snapshot_read_utils.js'); -var $config = (function() { +export const $config = (function() { const data = {numIds: 100, batchSize: 50}; const states = { diff --git a/jstests/concurrency/fsm_workloads/timeseries_agg_out.js b/jstests/concurrency/fsm_workloads/timeseries_agg_out.js new file mode 100644 index 0000000000000..fc4b194fc31c1 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/timeseries_agg_out.js @@ -0,0 +1,125 @@ +/** + * This test runs many concurrent aggregations using $out, writing to the same time-series + * collection. While this is happening, other threads may be creating or dropping indexes, changing + * the collection options, or sharding the collection. We expect an aggregate with a $out stage to + * fail if another client executed one of these changes between the creation of $out's temporary + * collection and the eventual rename to the target collection. + * + * Unfortunately, there aren't very many assertions we can make here, so this is mostly to test that + * the server doesn't deadlock or crash, and that temporary namespaces are cleaned up. + * + * @tags: [ + * requires_timeseries, + * does_not_support_transactions, + * does_not_support_stepdowns, + * requires_fcv_71, + * featureFlagAggOutTimeseries + * ] + */ +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from 'jstests/concurrency/fsm_workloads/agg_out.js'; + +export const $config = extendWorkload($baseConfig, function($config, $super) { + const timeFieldName = 'time'; + const metaFieldName = 'tag'; + const numDocs = 100; + $config.data.outputCollName = 'timeseries_agg_out'; + $config.data.shardKey = {[metaFieldName]: 1}; + + /** + * Runs an aggregate with a $out with time-series into '$config.data.outputCollName'. + */ + $config.states.query = function query(db, collName) { + const res = db[collName].runCommand({ + aggregate: collName, + pipeline: [ + {$set: {"time": new Date()}}, + { + $out: { + db: db.getName(), + coll: this.outputCollName, + timeseries: {timeField: timeFieldName, metaField: metaFieldName} + } + } + ], + cursor: {} + }); + + const allowedErrorCodes = [ + ErrorCodes.CommandFailed, // indexes of target collection changed during processing. + ErrorCodes.IllegalOperation, // $out is not supported to an existing *sharded* output + // collection. + 17152, // namespace is capped so it can't be used for $out. + 28769, // $out collection cannot be sharded. + ErrorCodes.NamespaceExists, // $out tries to create a view when a buckets collection + // already exists. This error is not caught because the + // view is being dropped by a previous thread. + ]; + assertWhenOwnDB.commandWorkedOrFailedWithCode(res, allowedErrorCodes); + if (res.ok) { + const cursor = new DBCommandCursor(db, res); + assertAlways.eq(0, cursor.itcount()); // No matter how many documents were in the + // original input stream, $out should never return any results. + } + }; + + /** + * Changes the 'expireAfterSeconds' value for the time-series collection. + */ + $config.states.collMod = function collMod(db, unusedCollName) { + let expireAfterSeconds = "off"; + if (Random.rand() < 0.5) { + // Change the expireAfterSeconds + expireAfterSeconds = Random.rand(); + } + + assertWhenOwnDB.commandWorkedOrFailedWithCode( + db.runCommand({collMod: this.outputCollName, expireAfterSeconds: expireAfterSeconds}), + [ErrorCodes.ConflictingOperationInProgress, ErrorCodes.NamespaceNotFound]); + }; + + /** + * 'convertToCapped' should always fail with a 'CommandNotSupportedOnView' error. + */ + $config.states.convertToCapped = function convertToCapped(db, unusedCollName) { + if (isMongos(db)) { + return; // convertToCapped can't be run against a mongos. + } + assertWhenOwnDB.commandFailedWithCode( + db.runCommand({convertToCapped: this.outputCollName, size: 100000}), + ErrorCodes.CommandNotSupportedOnView); + }; + + $config.teardown = function teardown(db) { + const collNames = db.getCollectionNames(); + // Ensure that a temporary collection is not left behind. + assertAlways.eq(db.getCollectionNames() + .filter(col => col.includes('system.buckets.tmp.agg_out')) + .length, + 0); + + // Ensure that for the buckets collection there is a corresponding view. + assertAlways(!(collNames.includes('system.buckets.timeseries_agg_out') && + !collNames.includes('timeseries_agg_out'))); + }; + + /** + * Create a time-series collection and insert 100 documents. + */ + $config.setup = function setup(db, collName, cluster) { + db[collName].drop(); + assertWhenOwnDB.commandWorked(db.createCollection( + collName, {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); + const docs = []; + for (let i = 0; i < numDocs; ++i) { + docs.push({ + [timeFieldName]: ISODate(), + [metaFieldName]: (this.tid * numDocs) + i, + }); + } + assertWhenOwnDB.commandWorked( + db.runCommand({insert: collName, documents: docs, ordered: false})); + }; + + return $config; +}); diff --git a/jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js b/jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js new file mode 100644 index 0000000000000..bf1c01c5f2b94 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/timeseries_agg_out_interrupt_cleanup.js @@ -0,0 +1,93 @@ +/** + * Tests $out stage of aggregate command with time-series collections concurrently with killOp. + * Ensures that all the temporary collections created during the aggregate command are deleted and + * that all buckets collection have a corresponding view. This workloads extends + * 'agg_out_interrupt_cleanup'. + * + * @tags: [ + * requires_timeseries, + * does_not_support_transactions, + * does_not_support_stepdowns, + * uses_curop_agg_stage, + * requires_fcv_71, + * featureFlagAggOutTimeseries + * ] + */ +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/agg_out_interrupt_cleanup.js"; + +export const $config = extendWorkload($baseConfig, function($config, $super) { + const timeFieldName = 'time'; + const metaFieldName = 'tag'; + const numDocs = 100; + + $config.states.aggregate = function aggregate(db, collName) { + // drop the view to ensure that each time a buckets collection is made, the view will also + // be made or both be destroyed. + assert(db["interrupt_temp_out"].drop()); + // $out to the same collection so that concurrent aggregate commands would cause congestion. + db[collName].runCommand({ + aggregate: collName, + pipeline: [{ + $out: { + db: db.getName(), + coll: "interrupt_temp_out", + timeseries: {timeField: timeFieldName, metaField: metaFieldName} + } + }], + cursor: {} + }); + }; + + $config.states.killOp = function killOp(db, collName) { + // The aggregate command could be running different commands internally (renameCollection, + // insertDocument, etc.) depending on which stage of execution it is in. So, get all the + // operations that are running against the input, output or temp collections. + $super.data.killOpsMatchingFilter(db, { + op: "command", + active: true, + $or: [ + {"ns": db.getName() + ".interrupt_temp_out"}, // For the view. + {"ns": db.getName() + "." + collName}, // For input collection. + // For the tmp collection. + {"ns": {$regex: "^" + db.getName() + "\.system.buckets\.tmp\.agg_out.*"}} + ], + "command.drop": { + $exists: false + } // Exclude 'drop' command from the filter to make sure that we don't kill the the + // drop command which is responsible for dropping the temporary collection. + }); + }; + + $config.teardown = function teardown(db) { + const collNames = db.getCollectionNames(); + // Ensure that a temporary collection is not left behind. + assertAlways.eq( + collNames.filter(coll => coll.includes('system.buckets.tmp.agg_out')).length, 0); + + // Ensure that for the buckets collection there is a corresponding view. + assertAlways(!(collNames.includes('system.buckets.interrupt_temp_out') && + !collNames.includes('interrupt_temp_out'))); + }; + + /** + * Create a time-series collection and insert 100 documents. + */ + $config.setup = function setup(db, collName, cluster) { + db[collName].drop(); + assert.commandWorked(db.createCollection( + collName, {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); + const docs = []; + for (let i = 0; i < numDocs; ++i) { + docs.push({ + [timeFieldName]: ISODate(), + [metaFieldName]: (this.tid * numDocs) + i, + }); + } + assert.commandWorked(db.runCommand({insert: collName, documents: docs, ordered: false})); + }; + + return $config; +}); diff --git a/jstests/concurrency/fsm_workloads/timeseries_collmod_granularity_update.js b/jstests/concurrency/fsm_workloads/timeseries_collmod_granularity_update.js index d406cd34802d9..67efd3dfb9ee6 100644 --- a/jstests/concurrency/fsm_workloads/timeseries_collmod_granularity_update.js +++ b/jstests/concurrency/fsm_workloads/timeseries_collmod_granularity_update.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Tests read and write operations concurrent with granularity updates on sharded time-series * collection. @@ -11,9 +9,9 @@ * ] */ -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; -var $config = (function() { +export const $config = (function() { const shardedCollName = i => `sharded_${i}`; const unshardedCollName = i => `unsharded_${i}`; const collCount = 50; diff --git a/jstests/concurrency/fsm_workloads/timeseries_deletes_and_inserts.js b/jstests/concurrency/fsm_workloads/timeseries_deletes_and_inserts.js index c766b42265ada..fdca16127bec1 100644 --- a/jstests/concurrency/fsm_workloads/timeseries_deletes_and_inserts.js +++ b/jstests/concurrency/fsm_workloads/timeseries_deletes_and_inserts.js @@ -1,5 +1,3 @@ -'use strict'; - /** * timeseries_deletes_and_inserts.js * @@ -17,7 +15,7 @@ * ] */ -var $config = (function() { +export const $config = (function() { const data = { logColl: "deletes_and_inserts_log", nReadingsPerSensor: 100, diff --git a/jstests/concurrency/fsm_workloads/timeseries_insert_idle_bucket_expiration.js b/jstests/concurrency/fsm_workloads/timeseries_insert_idle_bucket_expiration.js index eeceb57ae5109..2e7f96c1e34c3 100644 --- a/jstests/concurrency/fsm_workloads/timeseries_insert_idle_bucket_expiration.js +++ b/jstests/concurrency/fsm_workloads/timeseries_insert_idle_bucket_expiration.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Tests concurrent time-series inserts, with enough batches and data to force buckets to be closed * due to the memory usage threshold. @@ -8,15 +6,10 @@ * requires_timeseries, * # Timeseries do not support multi-document transactions with inserts. * does_not_support_transactions, - * # Stepdowns can cause inserts to fail in some cases in sharded passthroughs and not be - * # automatically retried. We aren't sure of the root cause yet, but we are excluding this tests - * # from those suites for now. - * # TODO (SERVER-67609): Remove this tag, or update the explanation above. - * does_not_support_stepdowns, * ] */ -var $config = (function() { +export const $config = (function() { const timeFieldName = 'time'; const metaFieldName = 'tag'; const numDocs = 100; diff --git a/jstests/concurrency/fsm_workloads/timeseries_insert_kill_op.js b/jstests/concurrency/fsm_workloads/timeseries_insert_kill_op.js index 16c21363d9470..682865258cb58 100644 --- a/jstests/concurrency/fsm_workloads/timeseries_insert_kill_op.js +++ b/jstests/concurrency/fsm_workloads/timeseries_insert_kill_op.js @@ -1,5 +1,3 @@ -'use strict'; - /** * Tests killing time-series inserts. * @@ -14,7 +12,7 @@ * ] */ -var $config = (function() { +export const $config = (function() { const timeFieldName = 'time'; const metaFieldName = 'tag'; diff --git a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js index e07300d6c5c62..c1a5dcc7a226a 100644 --- a/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js +++ b/jstests/concurrency/fsm_workloads/update_and_bulk_insert.js @@ -1,5 +1,3 @@ -'use strict'; - /** * update_and_bulk_insert.js * @@ -10,7 +8,7 @@ * we attempted to make a copy of a record after a WriteConflictException occurred in * Collection::updateDocument(). */ -var $config = (function() { +export const $config = (function() { var states = { insert: function insert(db, collName) { var bulk = db[collName].initializeUnorderedBulkOp(); diff --git a/jstests/concurrency/fsm_workloads/update_array.js b/jstests/concurrency/fsm_workloads/update_array.js index e0b802f4e611c..d3f77daf40133 100644 --- a/jstests/concurrency/fsm_workloads/update_array.js +++ b/jstests/concurrency/fsm_workloads/update_array.js @@ -1,5 +1,3 @@ -'use strict'; - /** * update_array.js * @@ -13,7 +11,7 @@ // For isMongod. load('jstests/concurrency/fsm_workload_helpers/server_types.js'); -var $config = (function() { +export const $config = (function() { var states = (function() { // db: explicitly passed to avoid accidentally using the global `db` // res: WriteResult diff --git a/jstests/concurrency/fsm_workloads/update_array_noindex.js b/jstests/concurrency/fsm_workloads/update_array_noindex.js index c61116843807d..f6b7b31677f1e 100644 --- a/jstests/concurrency/fsm_workloads/update_array_noindex.js +++ b/jstests/concurrency/fsm_workloads/update_array_noindex.js @@ -1,13 +1,11 @@ -'use strict'; - /** * update_array_noindex.js * * Executes the update_array.js workload after dropping all non-_id indexes on * the collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/update_array.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_array.js"; load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes -var $config = extendWorkload($config, dropAllIndexes); +export const $config = extendWorkload($baseConfig, dropAllIndexes); diff --git a/jstests/concurrency/fsm_workloads/update_check_index.js b/jstests/concurrency/fsm_workloads/update_check_index.js index f960f2abf831f..6c03bef33d1dc 100644 --- a/jstests/concurrency/fsm_workloads/update_check_index.js +++ b/jstests/concurrency/fsm_workloads/update_check_index.js @@ -1,12 +1,10 @@ -'use strict'; - /** * update_check_index.js * * Ensures that concurrent multi updates cannot produce duplicate index entries. Regression test * for SERVER-17132. */ -var $config = (function() { +export const $config = (function() { var states = (function() { function multiUpdate(db, collName) { // Set 'c' to some random value. diff --git a/jstests/concurrency/fsm_workloads/update_inc.js b/jstests/concurrency/fsm_workloads/update_inc.js index 5e71dd40d7081..1ae5dcaa05b21 100644 --- a/jstests/concurrency/fsm_workloads/update_inc.js +++ b/jstests/concurrency/fsm_workloads/update_inc.js @@ -1,5 +1,3 @@ -'use strict'; - /** * update_inc.js * @@ -12,7 +10,7 @@ // For isMongod. load('jstests/concurrency/fsm_workload_helpers/server_types.js'); -var $config = (function() { +export const $config = (function() { var data = { // uses the workload name as _id on the document. // assumes this name will be unique. @@ -34,6 +32,7 @@ var $config = (function() { var updateDoc = this.getUpdateArgument(this.fieldName); var res = db[collName].update({_id: this.id}, updateDoc); + assert.commandWorked(res); assertAlways.eq(0, res.nUpserted, tojson(res)); if (isMongod(db)) { diff --git a/jstests/concurrency/fsm_workloads/update_inc_capped.js b/jstests/concurrency/fsm_workloads/update_inc_capped.js index fb37c1ee027b4..37285c7b7635d 100644 --- a/jstests/concurrency/fsm_workloads/update_inc_capped.js +++ b/jstests/concurrency/fsm_workloads/update_inc_capped.js @@ -1,13 +1,11 @@ -'use strict'; - /** * update_inc_capped.js * * Executes the update_inc.js workload on a capped collection. * @tags: [requires_capped] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/update_inc.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_inc.js"; load('jstests/concurrency/fsm_workload_modifiers/make_capped.js'); // for makeCapped -var $config = extendWorkload($config, makeCapped); +export const $config = extendWorkload($baseConfig, makeCapped); diff --git a/jstests/concurrency/fsm_workloads/update_inc_pipeline.js b/jstests/concurrency/fsm_workloads/update_inc_pipeline.js index 95fdc674ab1ad..712d70a2fd126 100644 --- a/jstests/concurrency/fsm_workloads/update_inc_pipeline.js +++ b/jstests/concurrency/fsm_workloads/update_inc_pipeline.js @@ -1,15 +1,13 @@ -'use strict'; - /** * update_inc_pipeline.js * * This is the same workload as update_inc.js, but substitutes a $mod-style update with a * pipeline-style one. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/update_inc.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_inc.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.getUpdateArgument = function getUpdateArgument(fieldName) { return [{$set: {[fieldName]: {$add: ["$" + fieldName, 1]}}}]; }; diff --git a/jstests/concurrency/fsm_workloads/update_multifield.js b/jstests/concurrency/fsm_workloads/update_multifield.js index 02b2e6962abab..a657d63e9735c 100644 --- a/jstests/concurrency/fsm_workloads/update_multifield.js +++ b/jstests/concurrency/fsm_workloads/update_multifield.js @@ -1,5 +1,3 @@ -'use strict'; - /** * update_multifield.js * @@ -10,7 +8,7 @@ // For isMongod. load('jstests/concurrency/fsm_workload_helpers/server_types.js'); -var $config = (function() { +export const $config = (function() { function makeQuery(options) { var query = {}; if (!options.multi) { diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js index c33451fe28e2b..f82882449fd55 100644 --- a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js +++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js @@ -1,39 +1,49 @@ -'use strict'; - /** * update_multifield_multiupdate.js * * Does updates that affect multiple fields on multiple documents. * The collection has an index for each field, and a multikey index for all fields. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config - -// For isMongod -load('jstests/concurrency/fsm_workload_helpers/server_types.js'); - -var $config = extendWorkload($config, function($config, $super) { - $config.data.multi = true; - - $config.data.assertResult = function(res, db, collName, query) { - assertAlways.eq(0, res.nUpserted, tojson(res)); - - if (isMongod(db)) { - // If a document's RecordId cannot change, then we should not have updated any document - // more than once, since the update stage internally de-duplicates based on RecordId. - assertWhenOwnColl.lte(this.numDocs, res.nMatched, tojson(res)); - } else { // mongos - assertAlways.gte(res.nMatched, 0, tojson(res)); - } - - assertWhenOwnColl.eq(res.nMatched, res.nModified, tojson(res)); - - var docs = db[collName].find().toArray(); - docs.forEach(function(doc) { - assertWhenOwnColl.eq('number', typeof doc.z); - assertWhenOwnColl.gt(doc.z, 0); - }); - }; - - return $config; -}); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_multifield.js"; +load('jstests/concurrency/fsm_workload_helpers/server_types.js'); // For isMongod + +export const $config = extendWorkload($baseConfig, + function($config, $super) { + $config.data.multi = true; + + $config.data.assertResult = function( + res, db, collName, query) { + assertAlways.eq(0, res.nUpserted, tojson(res)); + + if (isMongod(db)) { + // If a document's RecordId cannot change, then we + // should not have updated any document more than + // once, since the update stage internally + // de-duplicates based on RecordId. + assertWhenOwnColl.lte( + this.numDocs, res.nMatched, tojson(res)); + } else { // mongos + assertAlways.gte(res.nMatched, 0, tojson(res)); + } + + assertWhenOwnColl.eq( + res.nMatched, res.nModified, tojson(res)); + + if (TestData.runningWithBalancer !== true) { + var docs = db[collName].find().toArray(); + docs.forEach(function(doc) { + assertWhenOwnColl.eq( +'number', +typeof doc.z, +`The query is ${tojson(query)}, and doc is ${ +tojson(doc)}, the number of all docs is ${ +docs.length}. The response of update is ${tojson(res)}, and config multi is ${ +$config.data.multi.toString()}`); + assertWhenOwnColl.gt(doc.z, 0); + }); + } + }; + + return $config; + }); diff --git a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js index ea0cc4fc4b770..4075d424299ff 100644 --- a/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js +++ b/jstests/concurrency/fsm_workloads/update_multifield_multiupdate_noindex.js @@ -1,13 +1,13 @@ -'use strict'; - /** * update_multifield_multiupdate_noindex.js * * Executes the update_multifield_multiupdate.js workload after dropping all * non-_id indexes on the collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js'); // for $config -load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/update_multifield_multiupdate.js"; +load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes -var $config = extendWorkload($config, dropAllIndexes); +export const $config = extendWorkload($baseConfig, dropAllIndexes); diff --git a/jstests/concurrency/fsm_workloads/update_multifield_noindex.js b/jstests/concurrency/fsm_workloads/update_multifield_noindex.js index 0f15037c56874..e56c348ba63c6 100644 --- a/jstests/concurrency/fsm_workloads/update_multifield_noindex.js +++ b/jstests/concurrency/fsm_workloads/update_multifield_noindex.js @@ -1,13 +1,11 @@ -'use strict'; - /** * update_multifield_noindex.js * * Executes the update_multifield.js workload after dropping all non-_id indexes * on the collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/update_multifield.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_multifield.js"; load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes -var $config = extendWorkload($config, dropAllIndexes); +export const $config = extendWorkload($baseConfig, dropAllIndexes); diff --git a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js index 863a9deac7c66..f6af33ebc2088 100644 --- a/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js +++ b/jstests/concurrency/fsm_workloads/update_ordered_bulk_inc.js @@ -1,5 +1,3 @@ -'use strict'; - /** * update_ordered_bulk_inc.js * @@ -14,7 +12,7 @@ // For isMongod. load('jstests/concurrency/fsm_workload_helpers/server_types.js'); -var $config = (function() { +export const $config = (function() { var states = { init: function init(db, collName) { this.fieldName = 't' + this.tid; diff --git a/jstests/concurrency/fsm_workloads/update_rename.js b/jstests/concurrency/fsm_workloads/update_rename.js index 26c52371ac90a..b6c22732b3cbc 100644 --- a/jstests/concurrency/fsm_workloads/update_rename.js +++ b/jstests/concurrency/fsm_workloads/update_rename.js @@ -1,11 +1,9 @@ -'use strict'; - /** * update_rename.js * * Each thread does a $rename to cause documents to jump between indexes. */ -var $config = (function() { +export const $config = (function() { var fieldNames = ['update_rename_x', 'update_rename_y', 'update_rename_z']; function choose(array) { diff --git a/jstests/concurrency/fsm_workloads/update_rename_noindex.js b/jstests/concurrency/fsm_workloads/update_rename_noindex.js index 96af5a8f1cc2c..34be43de205ca 100644 --- a/jstests/concurrency/fsm_workloads/update_rename_noindex.js +++ b/jstests/concurrency/fsm_workloads/update_rename_noindex.js @@ -1,13 +1,11 @@ -'use strict'; - /** * update_rename_noindex.js * * Executes the update_rename.js workload after dropping all non-_id indexes on * the collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/update_rename.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_rename.js"; load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes -var $config = extendWorkload($config, dropAllIndexes); +export const $config = extendWorkload($baseConfig, dropAllIndexes); diff --git a/jstests/concurrency/fsm_workloads/update_replace.js b/jstests/concurrency/fsm_workloads/update_replace.js index aad7feb171695..0602c99670c01 100644 --- a/jstests/concurrency/fsm_workloads/update_replace.js +++ b/jstests/concurrency/fsm_workloads/update_replace.js @@ -1,5 +1,3 @@ -'use strict'; - /** * update_replace.js * @@ -10,7 +8,7 @@ // For isMongod. load('jstests/concurrency/fsm_workload_helpers/server_types.js'); -var $config = (function() { +export const $config = (function() { // explicitly pass db to avoid accidentally using the global `db` function assertResult(db, res) { assertAlways.eq(0, res.nUpserted, tojson(res)); diff --git a/jstests/concurrency/fsm_workloads/update_replace_noindex.js b/jstests/concurrency/fsm_workloads/update_replace_noindex.js index 14dc0b16e2a5b..90ae75dc52e81 100644 --- a/jstests/concurrency/fsm_workloads/update_replace_noindex.js +++ b/jstests/concurrency/fsm_workloads/update_replace_noindex.js @@ -1,13 +1,11 @@ -'use strict'; - /** * update_replace_noindex.js * * Executes the update_replace.js workload after dropping all non-_id indexes * on the collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/update_replace.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_replace.js"; load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes -var $config = extendWorkload($config, dropAllIndexes); +export const $config = extendWorkload($baseConfig, dropAllIndexes); diff --git a/jstests/concurrency/fsm_workloads/update_simple.js b/jstests/concurrency/fsm_workloads/update_simple.js index f530adbe78e27..3b952e8699a9e 100644 --- a/jstests/concurrency/fsm_workloads/update_simple.js +++ b/jstests/concurrency/fsm_workloads/update_simple.js @@ -1,5 +1,3 @@ -'use strict'; - /** * update_simple.js * @@ -12,7 +10,7 @@ // For isMongod. load('jstests/concurrency/fsm_workload_helpers/server_types.js'); -var $config = (function() { +export const $config = (function() { var states = { set: function set(db, collName) { this.setOrUnset(db, collName, true, this.numDocs); diff --git a/jstests/concurrency/fsm_workloads/update_simple_noindex.js b/jstests/concurrency/fsm_workloads/update_simple_noindex.js index f255967b6145e..f636b9871297b 100644 --- a/jstests/concurrency/fsm_workloads/update_simple_noindex.js +++ b/jstests/concurrency/fsm_workloads/update_simple_noindex.js @@ -1,13 +1,11 @@ -'use strict'; - /** * update_simple_noindex.js * * Executes the update_simple.js workload after dropping all non-_id indexes on * the collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/update_simple.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_simple.js"; load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes -var $config = extendWorkload($config, dropAllIndexes); +export const $config = extendWorkload($baseConfig, dropAllIndexes); diff --git a/jstests/concurrency/fsm_workloads/update_upsert.js b/jstests/concurrency/fsm_workloads/update_upsert.js index f0b4da763eafb..d31df50eba554 100644 --- a/jstests/concurrency/fsm_workloads/update_upsert.js +++ b/jstests/concurrency/fsm_workloads/update_upsert.js @@ -1,12 +1,10 @@ -'use strict'; - /** * update_upsert_multi.js * * Tests updates that specify upsert=true. */ -var $config = (function() { +export const $config = (function() { let states = { update: function update(db, collName) { const docId = Random.randInt(5) * 4; diff --git a/jstests/concurrency/fsm_workloads/update_upsert_multi.js b/jstests/concurrency/fsm_workloads/update_upsert_multi.js index 12825d51702fc..7a93b41892509 100644 --- a/jstests/concurrency/fsm_workloads/update_upsert_multi.js +++ b/jstests/concurrency/fsm_workloads/update_upsert_multi.js @@ -1,5 +1,3 @@ -'use strict'; - /** * update_upsert_multi.js * @@ -11,7 +9,7 @@ * * @tags: [requires_non_retryable_writes] */ -var $config = (function() { +export const $config = (function() { var states = { insert: function insert(db, collName) { var query, update, options; diff --git a/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js b/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js index be943cf7b02cb..86b76a323abe4 100644 --- a/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js +++ b/jstests/concurrency/fsm_workloads/update_upsert_multi_noindex.js @@ -1,5 +1,3 @@ -'use strict'; - /** * update_upsert_multi_noindex.js * @@ -8,8 +6,8 @@ * * @tags: [requires_non_retryable_writes] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/update_upsert_multi.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/update_upsert_multi.js"; load('jstests/concurrency/fsm_workload_modifiers/drop_all_indexes.js'); // for dropAllIndexes -var $config = extendWorkload($config, dropAllIndexes); +export const $config = extendWorkload($baseConfig, dropAllIndexes); diff --git a/jstests/concurrency/fsm_workloads/update_where.js b/jstests/concurrency/fsm_workloads/update_where.js index c5526f8205c69..0add009504a4c 100644 --- a/jstests/concurrency/fsm_workloads/update_where.js +++ b/jstests/concurrency/fsm_workloads/update_where.js @@ -1,5 +1,3 @@ -'use strict'; - /** * update_where.js * @@ -7,10 +5,10 @@ * thread and updates them. Also queries by the thread that created the documents to verify counts. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_where.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.randomBound = 10; $config.data.generateDocumentToInsert = function generateDocumentToInsert() { return {tid: this.tid, x: Random.randInt(this.randomBound)}; diff --git a/jstests/concurrency/fsm_workloads/upsert_unique_index.js b/jstests/concurrency/fsm_workloads/upsert_unique_index.js index 15a7d1b14fcf0..61c39f3dfeba6 100644 --- a/jstests/concurrency/fsm_workloads/upsert_unique_index.js +++ b/jstests/concurrency/fsm_workloads/upsert_unique_index.js @@ -1,11 +1,9 @@ -'use strict'; - /** * Performs concurrent upsert and delete operations against a small set of documents with a unique * index in place. One specific scenario this test exercises is upsert retry in the case where an * upsert generates an insert, which then fails due to another operation inserting first. */ -var $config = (function() { +export const $config = (function() { const data = { numDocs: 4, getDocValue: function() { diff --git a/jstests/concurrency/fsm_workloads/upsert_where.js b/jstests/concurrency/fsm_workloads/upsert_where.js index 522fef6b32f9d..8ad43bed5085a 100644 --- a/jstests/concurrency/fsm_workloads/upsert_where.js +++ b/jstests/concurrency/fsm_workloads/upsert_where.js @@ -1,5 +1,3 @@ -'use strict'; - /** * upsert_where.js * @@ -7,10 +5,10 @@ * updates it, and queries by the thread that created the documents to verify counts. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/indexed_insert_where.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/indexed_insert_where.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.data.randomBound = 10; $config.data.generateDocumentToInsert = function generateDocumentToInsert() { return {tid: this.tid, x: Random.randInt(this.randomBound)}; diff --git a/jstests/concurrency/fsm_workloads/view_catalog.js b/jstests/concurrency/fsm_workloads/view_catalog.js index 9557a9f93879e..5d09f22d4e4e1 100644 --- a/jstests/concurrency/fsm_workloads/view_catalog.js +++ b/jstests/concurrency/fsm_workloads/view_catalog.js @@ -1,5 +1,3 @@ -'use strict'; - /** * view_catalog.js * @@ -7,7 +5,7 @@ * built on a shared underlying collection. */ -var $config = (function() { +export const $config = (function() { var data = { // Use the workload name as a prefix for the view name, since the workload name is assumed // to be unique. @@ -80,18 +78,9 @@ var $config = (function() { assertAlways.commandWorked(bulk.execute()); }; - // This test performs createCollection concurrently from many threads, and createCollection on a - // sharded cluster takes a distributed lock. Since a distributed lock is acquired by repeatedly - // attempting to grab the lock every half second for 20 seconds (a max of 40 attempts), it's - // possible that some thread will be starved by the other threads and fail to grab the lock - // after 40 attempts. To reduce the likelihood of this, we choose threadCount and iterations so - // that threadCount * iterations < 40. - // The threadCount and iterations can be increased once PM-697 ("Remove all usages of - // distributed lock") is complete. - return { - threadCount: 5, - iterations: 5, + threadCount: 10, + iterations: 10, data: data, setup: setup, states: states, diff --git a/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js b/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js index 64d4a81b1f6f9..192ac75b565e7 100644 --- a/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js +++ b/jstests/concurrency/fsm_workloads/view_catalog_cycle_lookup.js @@ -1,5 +1,3 @@ -'use strict'; - /** * view_catalog_cycle_lookup.js * @@ -10,7 +8,7 @@ * @tags: [requires_fcv_51] */ -var $config = (function() { +export const $config = (function() { // Use the workload name as a prefix for the view names, since the workload name is assumed // to be unique. const prefix = 'view_catalog_cycle_lookup_'; diff --git a/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js b/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js index ef5bd2d6028da..658fa64e397e6 100644 --- a/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js +++ b/jstests/concurrency/fsm_workloads/view_catalog_cycle_with_drop.js @@ -1,5 +1,3 @@ -'use strict'; - /** * view_catalog_cycle_with_drop.js * @@ -7,7 +5,7 @@ * underlying collection. */ -var $config = (function() { +export const $config = (function() { // Use the workload name as a prefix for the view names, since the workload name is assumed // to be unique. const prefix = 'view_catalog_cycle_with_drop_'; @@ -98,18 +96,9 @@ var $config = (function() { } } - // This test performs createCollection concurrently from many threads, and createCollection on a - // sharded cluster takes a distributed lock. Since a distributed lock is acquired by repeatedly - // attempting to grab the lock every half second for 20 seconds (a max of 40 attempts), it's - // possible that some thread will be starved by the other threads and fail to grab the lock - // after 40 attempts. To reduce the likelihood of this, we choose threadCount and iterations so - // that threadCount * iterations < 40. - // The threadCount and iterations can be increased once PM-697 ("Remove all usages of - // distributed lock") is complete. - return { - threadCount: 5, - iterations: 5, + threadCount: 10, + iterations: 10, data: data, states: states, startState: 'readFromView', diff --git a/jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js b/jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js index 6531268d2ed9e..52d0d8d844357 100644 --- a/jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js +++ b/jstests/concurrency/fsm_workloads/view_catalog_direct_system_writes.js @@ -1,5 +1,3 @@ -'use strict'; - /** * view_catalog_direct_system_writes.js * @@ -7,10 +5,10 @@ * does so via direct writes to system.views instead of using the collMod or drop commands. Each * worker operates on their own view, built on a shared underlying collection. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/view_catalog.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/view_catalog.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states.create = function create(db, collName) { this.counter++; let pipeline = [{$match: {_id: this.counter}}]; diff --git a/jstests/concurrency/fsm_workloads/write_without_shard_key_base.js b/jstests/concurrency/fsm_workloads/write_without_shard_key_base.js index f601c9990d830..a9b2f1bbd4968 100644 --- a/jstests/concurrency/fsm_workloads/write_without_shard_key_base.js +++ b/jstests/concurrency/fsm_workloads/write_without_shard_key_base.js @@ -1,29 +1,26 @@ -'use strict'; - /** * Runs updateOne, deleteOne, and findAndModify without shard key against a sharded cluster. * * @tags: [ - * featureFlagUpdateOneWithoutShardKey, - * requires_fcv_70, + * requires_fcv_71, * requires_sharding, * uses_transactions, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); - +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/random_moveChunk_base.js"; // This workload does not make use of random moveChunks, but other workloads that extend this base // workload may. -load('jstests/concurrency/fsm_workloads/random_moveChunk_base.js'); // for $config load('jstests/concurrency/fsm_workload_helpers/balancer.js'); -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.threadCount = 10; - $config.iterations = 10; + $config.iterations = 50; $config.startState = "init"; // Inherited from random_moveChunk_base.js. - $config.data.partitionSize = 100; + $config.data.partitionSize = 50; $config.data.secondaryDocField = 'y'; + $config.data.tertiaryDocField = 'tertiaryField'; $config.data.runningWithStepdowns = TestData.runningWithConfigStepdowns || TestData.runningWithShardStepdowns; @@ -34,6 +31,13 @@ var $config = extendWorkload($config, function($config, $super) { return Math.floor(Math.random() * (max - min + 1)) + min; }; + /** + * Returns a random boolean. + */ + $config.data.generateRandomBool = function generateRandomBool() { + return Math.random() > 0.5; + }; + /** * Generates a random document. */ @@ -67,7 +71,7 @@ var $config = extendWorkload($config, function($config, $super) { * mean the query could target a variable number of shards. */ $config.data.generateRandomQuery = function generateRandomQuery(db, collName) { - const queryType = this.generateRandomInt(0, 2); + const queryType = this.generateRandomInt(0, 3); if (queryType === 0 /* Range query on shard key field. */) { return { [this.defaultShardKeyField]: @@ -78,6 +82,8 @@ var $config = extendWorkload($config, function($config, $super) { [this.secondaryDocField]: {$gte: this.partition.lower, $lte: this.partition.upper - 1} }; + } else if (queryType === 2 /* Equality query on a field that does not exist */) { + return {[this.tertiaryDocField]: {$eq: this.generateRandomInt(0, 500)}, tid: this.tid}; } else { /* Query any document in the partition. */ return {tid: this.tid}; } @@ -93,10 +99,18 @@ var $config = extendWorkload($config, function($config, $super) { const newValue = this.generateRandomInt(this.partition.lower, this.partition.upper - 1); const updateType = this.generateRandomInt(0, 2); const doShardKeyUpdate = this.generateRandomInt(0, 1); + const doUpsert = this.generateRandomBool(); // Used for validation after running the write operation. const containsMatchedDocs = db[collName].findOne(query) != null; + jsTestLog("updateOne state running with the following parameters: \n" + + "query: " + tojson(query) + "\n" + + "updateType: " + updateType + "\n" + + "doShardKeyUpdate: " + doShardKeyUpdate + "\n" + + "doUpsert: " + doUpsert + "\n" + + "containsMatchedDocs: " + containsMatchedDocs); + let res; try { if (updateType === 0 /* Update operator document */) { @@ -104,15 +118,17 @@ var $config = extendWorkload($config, function($config, $super) { [doShardKeyUpdate ? this.defaultShardKeyField : this.secondaryDocField]: newValue }; - res = db[collName].updateOne(query, {$set: update}); + res = db[collName].updateOne(query, {$set: update}, {upsert: doUpsert}); } else if (updateType === 1 /* Replacement Update */) { // Always including a shard key update for replacement documents in order to keep // the new document within the current thread's partition. - res = db[collName].replaceOne(query, { - [this.defaultShardKeyField]: newValue, - [this.secondaryDocField]: newValue, - tid: this.tid - }); + res = db[collName].replaceOne(query, + { + [this.defaultShardKeyField]: newValue, + [this.secondaryDocField]: newValue, + tid: this.tid + }, + {upsert: doUpsert}); } else { /* Aggregation pipeline update */ const update = { [doShardKeyUpdate ? this.defaultShardKeyField : this.secondaryDocField]: @@ -121,7 +137,8 @@ var $config = extendWorkload($config, function($config, $super) { // The $unset will result in a no-op since 'z' is not a field populated in any of // the documents. - res = db[collName].updateOne(query, [{$set: update}, {$unset: "z"}]); + res = db[collName].updateOne( + query, [{$set: update}, {$unset: "z"}], {upsert: doUpsert}); } } catch (err) { if (this.shouldSkipWriteResponseValidation(err)) { @@ -136,6 +153,14 @@ var $config = extendWorkload($config, function($config, $super) { assert.eq(res.matchedCount, 1, query); } else { assert.eq(res.matchedCount, 0, res); + + if (doUpsert) { + assert.neq(res.upsertedId, null, res); + assert.eq(db[collName].find({"_id": res.upsertedId}).itcount(), 1); + + // Clean up, remove upserted document. + assert.commandWorked(db[collName].deleteOne({"_id": res.upsertedId})); + } } assert.contains(res.modifiedCount, [0, 1], res); @@ -158,6 +183,8 @@ var $config = extendWorkload($config, function($config, $super) { ErrorCodes.IncompleteTransactionHistory, ErrorCodes.NoSuchTransaction, ErrorCodes.StaleConfig, + ErrorCodes.ShardCannotRefreshDueToLocksHeld, + ErrorCodes.WriteConflict ]; // If we're running in a stepdown suite, then attempting to update the shard key may @@ -190,10 +217,12 @@ var $config = extendWorkload($config, function($config, $super) { } // This is a possible transient transaction error issue that could occur with - // concurrent moveChunks and transactions (if we happen to run a + // concurrent moveChunks and/or reshardings and transactions (if we happen to run a // WouldChangeOwningShard update). if (res.code === ErrorCodes.LockTimeout || res.code === ErrorCodes.StaleConfig || - res.code === ErrorCodes.ConflictingOperationInProgress) { + res.code === ErrorCodes.ConflictingOperationInProgress || + res.code === ErrorCodes.ShardCannotRefreshDueToLocksHeld || + res.code == ErrorCodes.WriteConflict) { if (!msg.includes(otherErrorsInChangeShardKeyMsg)) { return false; } @@ -241,34 +270,55 @@ var $config = extendWorkload($config, function($config, $super) { // Used for validation after running the write operation. const containsMatchedDocs = db[collName].findOne(query) != null; + // Only test sort when there are matching documents in the collection. + const doSort = containsMatchedDocs && this.generateRandomBool(); + let sortDoc, sortVal; + + // If sorting, ensure that the correct document is modified. + if (doSort) { + sortVal = {[this.secondaryDocField]: this.generateRandomInt(0, 1) === 0 ? -1 : 1}; + sortDoc = db[collName].find(query).sort(sortVal)[0]; + } + let res; const findAndModifyType = this.generateRandomInt(0, 1); if (findAndModifyType === 0 /* Update */) { const newValue = this.generateRandomInt(this.partition.lower, this.partition.upper - 1); const updateType = this.generateRandomInt(0, 2); const doShardKeyUpdate = this.generateRandomInt(0, 1); + const doUpsert = this.generateRandomBool(); + + jsTestLog("findAndModifyUpdate state running with the following parameters: \n" + + "query: " + tojson(query) + "\n" + + "updateType: " + updateType + "\n" + + "doShardKeyUpdate: " + doShardKeyUpdate + "\n" + + "doUpsert: " + doUpsert + "\n" + + "doSort: " + doSort + "\n" + + "containsMatchedDocs: " + containsMatchedDocs); + + const cmdObj = { + findAndModify: collName, + query: query, + upsert: doUpsert, + }; + Object.assign(cmdObj, doSort && {sort: sortVal}); + if (updateType === 0 /* Update operator document */) { const update = { [doShardKeyUpdate ? this.defaultShardKeyField : this.secondaryDocField]: newValue }; - res = db.runCommand({ - findAndModify: collName, - query: query, - update: {$set: update}, - }); + cmdObj.update = {$set: update}; + res = db.runCommand(cmdObj); } else if (updateType === 1 /* Replacement Update */) { // Always including a shard key update for replacement documents in order to // keep the new document within the current thread's partition. - res = db.runCommand({ - findAndModify: collName, - query: query, - update: { - [this.defaultShardKeyField]: newValue, - [this.secondaryDocField]: newValue, - tid: this.tid - }, - }); + cmdObj.update = { + [this.defaultShardKeyField]: newValue, + [this.secondaryDocField]: newValue, + tid: this.tid + }; + res = db.runCommand(cmdObj); } else { /* Aggregation pipeline update */ const update = { [doShardKeyUpdate ? this.defaultShardKeyField : this.secondaryDocField]: @@ -277,11 +327,8 @@ var $config = extendWorkload($config, function($config, $super) { // The $unset will result in a no-op since 'z' is not a field populated in any // of the documents. - res = db.runCommand({ - findAndModify: collName, - query: query, - update: [{$set: update}, {$unset: "z"}], - }); + cmdObj.update = [{$set: update}, {$unset: "z"}]; + res = db.runCommand(cmdObj); } if (this.shouldSkipWriteResponseValidation(res)) { @@ -292,18 +339,35 @@ var $config = extendWorkload($config, function($config, $super) { if (containsMatchedDocs) { assert.eq(res.lastErrorObject.n, 1, res); assert.eq(res.lastErrorObject.updatedExisting, true, res); + } else if (doUpsert) { + assert.eq(res.lastErrorObject.n, 1, res); + assert.eq(res.lastErrorObject.updatedExisting, false, res); + assert.neq(res.lastErrorObject.upserted, null, res); + assert.eq(db[collName].find({"_id": res.lastErrorObject.upserted}).itcount(), 1); + + // Clean up, remove upserted document. + assert.commandWorked(db[collName].deleteOne({"_id": res.lastErrorObject.upserted})); } else { assert.eq(res.lastErrorObject.n, 0, res); assert.eq(res.lastErrorObject.updatedExisting, false, res); } } else { /* Remove */ const numMatchedDocsBefore = db[collName].find(query).itcount(); - - res = assert.commandWorked(db.runCommand({ + const cmdObj = { findAndModify: collName, query: query, remove: true, - })); + }; + if (doSort) { + cmdObj.sort = sortVal; + } + + jsTestLog("findAndModifyDelete state running with the following parameters: \n" + + "query: " + tojson(query) + "\n" + + "numMatchedDocsBefore: " + numMatchedDocsBefore + "\n" + + "containsMatchedDocs: " + containsMatchedDocs); + + res = assert.commandWorked(db.runCommand(cmdObj)); const numMatchedDocsAfter = db[collName].find(query).itcount(); @@ -317,11 +381,18 @@ var $config = extendWorkload($config, function($config, $super) { assert.eq(numMatchedDocsAfter, numMatchedDocsBefore); } } + + if (doSort) { + // Ensure correct document was modified by comparing sort field of the sortDoc and + // response image. + assert.eq(sortDoc.secondaryDocField, res.value.secondaryDocField, res); + } }; $config.states.updateOne = function updateOne(db, collName, connCache) { jsTestLog("Running updateOne state"); this.generateAndRunRandomUpdateOp(db, collName); + jsTestLog("Finished updateOne state"); }; $config.states.deleteOne = function deleteOne(db, collName, connCache) { @@ -332,6 +403,10 @@ var $config = extendWorkload($config, function($config, $super) { const containsMatchedDocs = db[collName].findOne(query) != null; const numMatchedDocsBefore = db[collName].find(query).itcount(); + jsTestLog("deleteOne state running with query: " + tojson(query) + "\n" + + "containsMatchedDocs: " + containsMatchedDocs + "\n" + + "numMatchedDocsBefore: " + numMatchedDocsBefore); + let res = assert.commandWorked(db[collName].deleteOne(query)); const numMatchedDocsAfter = db[collName].find(query).itcount(); @@ -345,11 +420,13 @@ var $config = extendWorkload($config, function($config, $super) { // The count should both be 0. assert.eq(numMatchedDocsAfter, numMatchedDocsBefore); } + jsTestLog("Finished deleteOne state"); }; $config.states.findAndModify = function findAndModify(db, collName, connCache) { jsTestLog("Running findAndModify state"); this.generateAndRunRandomFindAndModifyOp(db, collName); + jsTestLog("Finished findAndModify state"); }; $config.setup = function setup(db, collName, cluster) { diff --git a/jstests/concurrency/fsm_workloads/write_without_shard_key_with_move_chunk.js b/jstests/concurrency/fsm_workloads/write_without_shard_key_with_move_chunk.js index cd1bed6b593ff..1ac1c5e96e474 100644 --- a/jstests/concurrency/fsm_workloads/write_without_shard_key_with_move_chunk.js +++ b/jstests/concurrency/fsm_workloads/write_without_shard_key_with_move_chunk.js @@ -1,21 +1,20 @@ -'use strict'; - /** * Runs updateOne, deleteOne, and findAndModify without shard key against a sharded cluster while * there are concurrent chunk migrations. * * @tags: [ - * featureFlagUpdateOneWithoutShardKey, - * requires_fcv_70, + * requires_fcv_71, * requires_sharding, * uses_transactions, * ] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); -load('jstests/concurrency/fsm_workloads/write_without_shard_key_base.js'); +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/write_without_shard_key_base.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.startState = "init"; $config.transitions = { init: {moveChunk: 0.3, updateOne: 0.3, deleteOne: 0.2, findAndModify: 0.2}, diff --git a/jstests/concurrency/fsm_workloads/write_without_shard_key_with_refine_collection_shard_key.js b/jstests/concurrency/fsm_workloads/write_without_shard_key_with_refine_collection_shard_key.js new file mode 100644 index 0000000000000..42106a22da3b1 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/write_without_shard_key_with_refine_collection_shard_key.js @@ -0,0 +1,148 @@ +/** + * Runs updateOne, deleteOne, and findAndModify without shard key against a sharded cluster while + * concurrently refining the collection's shard key. + * + * @tags: [ + * requires_fcv_71, + * requires_sharding, + * uses_transactions, + * ] + */ + +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/write_without_shard_key_base.js"; + +export const $config = extendWorkload($baseConfig, function($config, $super) { + $config.startState = "init"; + + // Use a CountDownLatch as if it were a std::atomic shared between all of the + // threads. The collection name is suffixed with the current this.latch.getCount() value + // when concurrent CRUD operations are run against it. With every refineCollectionShardKey, + // call this.latch.countDown() and run CRUD operations against the new collection suffixed + // with this.latch.getCount(). This bypasses the need to drop and reshard the current + // collection with every refineCollectionShardKey since it cannot be achieved in an atomic + // fashion under the FSM infrastructure (meaning CRUD operations would fail). + $config.data.latchCount = $config.iterations; + $config.data.latch = new CountDownLatch($config.data.latchCount); + + $config.data.shardKey = {a: 1}; + $config.data.defaultShardKeyField = 'a'; + $config.data.defaultShardKey = {a: 1}; + + // The variables used by the random_moveChunk_base config in order to move chunks. + $config.data.newShardKey = {a: 1, b: 1}; + $config.data.newShardKeyFields = ["a", "b"]; + + $config.setup = function setup(db, collName, cluster) { + // Proactively create and shard all possible collections suffixed with this.latch.getCount() + // that could receive CRUD operations over the course of the FSM workload. This prevents the + // race that could occur between sharding a collection and creating an index on the new + // shard key (if this step were done after every refineCollectionShardKey). + for (let i = this.latchCount; i >= 0; --i) { + const latchCollName = collName + '_' + i; + let coll = db.getCollection(latchCollName); + assertAlways.commandWorked( + db.adminCommand({shardCollection: coll.getFullName(), key: this.defaultShardKey})); + assertAlways.commandWorked(coll.createIndex(this.newShardKey)); + $super.setup.apply(this, [db, latchCollName, cluster]); + } + }; + + // Occasionally flush the router's cached metadata to verify the metadata for the refined + // collections can be successfully loaded. + $config.states.flushRouterConfig = function flushRouterConfig(db, collName, connCache) { + jsTestLog("Running flushRouterConfig state"); + assert.commandWorked(db.adminCommand({flushRouterConfig: db.getName()})); + }; + + $config.data.getCurrentLatchCollName = function(collName) { + return collName + '_' + this.latch.getCount().toString(); + }; + + $config.states.refineCollectionShardKey = function refineCollectionShardKey( + db, collName, connCache) { + jsTestLog("Running refineCollectionShardKey state."); + const latchCollName = this.getCurrentLatchCollName(collName); + + try { + const cmdObj = { + refineCollectionShardKey: db.getCollection(latchCollName).getFullName(), + key: this.newShardKey + }; + + assertAlways.commandWorked(db.adminCommand(cmdObj)); + } catch (e) { + // There is a race that could occur where two threads run refineCollectionShardKey + // concurrently on the same collection. Since the epoch of the collection changes, + // the later thread may receive a StaleEpoch error, which is an acceptable error. + // + // It is also possible to receive a LockBusy error if refineCollectionShardKey is unable + // to acquire the distlock before timing out due to ongoing migrations acquiring the + // distlock first. + // TODO SERVER-68551: Remove lockbusy error since the balancer won't acquire anymore the + // DDL lock for migrations + if (e.code == ErrorCodes.StaleEpoch || e.code == ErrorCodes.LockBusy) { + print("Ignoring acceptable refineCollectionShardKey error: " + tojson(e)); + return; + } + throw e; + } + + this.shardKeyField[latchCollName] = this.newShardKeyFields; + this.latch.countDown(); + }; + + $config.states.findAndModify = function findAndModify(db, collName, connCache) { + $super.states.findAndModify.apply(this, + [db, this.getCurrentLatchCollName(collName), connCache]); + }; + + $config.states.updateOne = function findAndModify(db, collName, connCache) { + $super.states.updateOne.apply(this, + [db, this.getCurrentLatchCollName(collName), connCache]); + }; + + $config.states.deleteOne = function findAndModify(db, collName, connCache) { + $super.states.deleteOne.apply(this, + [db, this.getCurrentLatchCollName(collName), connCache]); + }; + + $config.transitions = { + init: + {refineCollectionShardKey: 0.25, updateOne: 0.25, deleteOne: 0.25, findAndModify: 0.25}, + updateOne: { + refineCollectionShardKey: 0.2, + updateOne: 0.2, + deleteOne: 0.2, + findAndModify: 0.2, + flushRouterConfig: 0.2 + }, + deleteOne: { + refineCollectionShardKey: 0.2, + updateOne: 0.2, + deleteOne: 0.2, + findAndModify: 0.2, + flushRouterConfig: 0.2 + }, + findAndModify: { + refineCollectionShardKey: 0.2, + updateOne: 0.2, + deleteOne: 0.2, + findAndModify: 0.2, + flushRouterConfig: 0.2 + }, + refineCollectionShardKey: { + refineCollectionShardKey: 0.2, + updateOne: 0.2, + deleteOne: 0.2, + findAndModify: 0.2, + flushRouterConfig: 0.2 + }, + flushRouterConfig: + {refineCollectionShardKey: 0.25, updateOne: 0.25, deleteOne: 0.25, findAndModify: 0.25}, + }; + + return $config; +}); diff --git a/jstests/concurrency/fsm_workloads/write_without_shard_key_with_resharding.js b/jstests/concurrency/fsm_workloads/write_without_shard_key_with_resharding.js new file mode 100644 index 0000000000000..fd23c2d4a4300 --- /dev/null +++ b/jstests/concurrency/fsm_workloads/write_without_shard_key_with_resharding.js @@ -0,0 +1,129 @@ +/** + * Runs updateOne, deleteOne, and findAndModify without shard key against a sharded cluster while + * the collection reshards concurrently. + * + * @tags: [ + * requires_fcv_71, + * requires_sharding, + * uses_transactions, + * ] + */ + +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import { + $config as $baseConfig +} from "jstests/concurrency/fsm_workloads/write_without_shard_key_base.js"; + +export const $config = extendWorkload($baseConfig, function($config, $super) { + $config.startState = "init"; + + // reshardingMinimumOperationDurationMillis is set to 30 seconds when there are stepdowns. + // So in order to limit the overall time for the test, we limit the number of resharding + // operations to maxReshardingExecutions. + const maxReshardingExecutions = TestData.runningWithShardStepdowns ? 4 : $config.iterations; + const customShardKeyFieldName = "customShardKey"; + + $config.data.shardKeys = []; + $config.data.currentShardKeyIndex = -1; + $config.data.reshardingCount = 0; + + $config.states.init = function init(db, collName, connCache) { + $super.states.init.apply(this, arguments); + this.shardKeys.push({[this.defaultShardKeyField]: 1}); + this.shardKeys.push({[customShardKeyFieldName]: 1}); + this.currentShardKeyIndex = 0; + }; + + $config.data.generateRandomDocument = function generateRandomDocument(tid, partition) { + const doc = $super.data.generateRandomDocument.apply(this, arguments); + assert.neq(partition, null); + doc[customShardKeyFieldName] = this.generateRandomInt(partition.lower, partition.upper - 1); + return doc; + }; + + /** + * Returns a random boolean. + */ + $config.data.generateRandomBool = function generateRandomBool() { + return Math.random() > 0.5; + }; + + $config.data.shouldSkipWriteResponseValidation = function shouldSkipWriteResponseValidation( + res) { + let shouldSkip = $super.data.shouldSkipWriteResponseValidation.apply(this, arguments); + + // This workload does in-place resharding so a retry that is sent + // reshardingMinimumOperationDurationMillis after resharding completes is expected to fail + // with IncompleteTransactionHistory. + if (!shouldSkip && (res.code == ErrorCodes.IncompleteTransactionHistory)) { + return res.errmsg.includes("Incomplete history detected for transaction"); + } + + return shouldSkip; + }; + + $config.states.reshardCollection = function reshardCollection(db, collName, connCache) { + const collection = db.getCollection(collName); + const ns = collection.getFullName(); + jsTestLog("Running reshardCollection state on: " + tojson(ns)); + + if (this.tid === 0 && (this.reshardingCount <= maxReshardingExecutions)) { + const newShardKeyIndex = (this.currentShardKeyIndex + 1) % this.shardKeys.length; + const newShardKey = this.shardKeys[newShardKeyIndex]; + const reshardCollectionCmdObj = { + reshardCollection: ns, + key: newShardKey, + }; + + print(`Started resharding collection ${ns}: ${tojson({newShardKey})}`); + if (TestData.runningWithShardStepdowns) { + assert.soon(function() { + var res = db.adminCommand(reshardCollectionCmdObj); + if (res.ok) { + return true; + } + assert(res.hasOwnProperty("code")); + + // Race to retry. + if (res.code === ErrorCodes.ReshardCollectionInProgress) { + return false; + } + // Unexpected error. + doassert(`Failed with unexpected ${tojson(res)}`); + }, "Reshard command failed", 10 * 1000); + } else { + assert.commandWorked(db.adminCommand(reshardCollectionCmdObj)); + } + print(`Finished resharding collection ${ns}: ${tojson({newShardKey})}`); + + // If resharding fails with SnapshotUnavailable, then this will be incorrect. But + // its fine since reshardCollection will succeed if the new shard key matches the + // existing one. + this.currentShardKeyIndex = newShardKeyIndex; + this.reshardingCount += 1; + + db.printShardingStatus(); + + connCache.mongos.forEach(mongos => { + if (this.generateRandomBool()) { + // Without explicitly refreshing mongoses, retries of retryable write statements + // would always be routed to the donor shards. Non-deterministically refreshing + // enables us to have test coverage for retrying against both the donor and + // recipient shards. + assert.commandWorked(mongos.adminCommand({flushRouterConfig: 1})); + } + }); + } + }; + + $config.transitions = { + init: {reshardCollection: 0.3, updateOne: 0.3, deleteOne: 0.2, findAndModify: 0.2}, + updateOne: {reshardCollection: 0.3, updateOne: 0.3, deleteOne: 0.2, findAndModify: 0.2}, + deleteOne: {reshardCollection: 0.3, updateOne: 0.3, deleteOne: 0.2, findAndModify: 0.2}, + findAndModify: {reshardCollection: 0.3, updateOne: 0.3, deleteOne: 0.2, findAndModify: 0.2}, + reshardCollection: + {reshardCollection: 0.3, updateOne: 0.3, deleteOne: 0.2, findAndModify: 0.2} + }; + + return $config; +}); diff --git a/jstests/concurrency/fsm_workloads/yield.js b/jstests/concurrency/fsm_workloads/yield.js index 5202e5e4ee879..101bdc0aec1d1 100644 --- a/jstests/concurrency/fsm_workloads/yield.js +++ b/jstests/concurrency/fsm_workloads/yield.js @@ -1,12 +1,10 @@ -'use strict'; - /** * yield.js * * Designed to execute queries and make them yield as much as possible while also updating and * removing documents that they operate on. */ -var $config = (function() { +export const $config = (function() { // The explain used to build the assertion message in advanceCursor() is the only command not // allowed in a transaction used in the query state function. With shard stepdowns, getMores // aren't allowed outside a transaction, so if the explain runs when the suite is configured to diff --git a/jstests/concurrency/fsm_workloads/yield_and_hashed.js b/jstests/concurrency/fsm_workloads/yield_and_hashed.js index 21d43a6d536da..2d8a432b333d3 100644 --- a/jstests/concurrency/fsm_workloads/yield_and_hashed.js +++ b/jstests/concurrency/fsm_workloads/yield_and_hashed.js @@ -1,15 +1,13 @@ -'use strict'; - /* * yield_and_hashed.js (extends yield_rooted_or.js) * * Intersperse queries which use the AND_HASH stage with updates and deletes of documents they may * match. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield_rooted_or.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { /* * Issue a query that will use the AND_HASH stage. This is a little tricky, so use * stagedebug to force it to happen. Unfortunately this means it can't be batched. diff --git a/jstests/concurrency/fsm_workloads/yield_and_sorted.js b/jstests/concurrency/fsm_workloads/yield_and_sorted.js index 480b9258d7863..465491153c335 100644 --- a/jstests/concurrency/fsm_workloads/yield_and_sorted.js +++ b/jstests/concurrency/fsm_workloads/yield_and_sorted.js @@ -1,15 +1,13 @@ -'use strict'; - /* * yield_and_sorted.js (extends yield_rooted_or.js) * * Intersperse queries which use the AND_SORTED stage with updates and deletes of documents they * may match. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield_rooted_or.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { /* * Issue a query that will use the AND_SORTED stage. This is a little tricky, so use * stagedebug to force it to happen. Unfortunately this means it can't be batched. diff --git a/jstests/concurrency/fsm_workloads/yield_fetch.js b/jstests/concurrency/fsm_workloads/yield_fetch.js index 7b5a000704235..b74c06b9cf7ca 100644 --- a/jstests/concurrency/fsm_workloads/yield_fetch.js +++ b/jstests/concurrency/fsm_workloads/yield_fetch.js @@ -1,15 +1,13 @@ -'use strict'; - /* * yield_fetch.js (extends yield_rooted_or.js) * * Intersperse queries which use the FETCH stage with updates and deletes of documents they may * match. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/yield_rooted_or.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield_rooted_or.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { /* * Issue a query that will use the FETCH stage. */ diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near.js b/jstests/concurrency/fsm_workloads/yield_geo_near.js index f3016c5332e90..6a95d952e9c76 100644 --- a/jstests/concurrency/fsm_workloads/yield_geo_near.js +++ b/jstests/concurrency/fsm_workloads/yield_geo_near.js @@ -1,13 +1,11 @@ -'use strict'; - /* * Intersperses $geoNear aggregations with updates and deletes of documents they may match. * @tags: [requires_non_retryable_writes] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/yield.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states.query = function geoNear(db, collName) { // This distance gets about 80 docs around the origin. There is one doc inserted // every 1m^2 and the area scanned by a 5m radius is PI*(5m)^2 ~ 79. diff --git a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js index 7d3af9d2c2477..78d84660ecb75 100644 --- a/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js +++ b/jstests/concurrency/fsm_workloads/yield_geo_near_dedup.js @@ -1,13 +1,11 @@ -'use strict'; - /* * Intersperses $geoNear aggregations with updates of non-geo fields to test deduplication. * @tags: [requires_non_retryable_writes] */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/yield_geo_near.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield_geo_near.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { $config.states.remove = function remove(db, collName) { var id = Random.randInt(this.nDocs); var doc = db[collName].findOne({_id: id}); diff --git a/jstests/concurrency/fsm_workloads/yield_id_hack.js b/jstests/concurrency/fsm_workloads/yield_id_hack.js index a0ba2ffcf1606..310d0cb62ec87 100644 --- a/jstests/concurrency/fsm_workloads/yield_id_hack.js +++ b/jstests/concurrency/fsm_workloads/yield_id_hack.js @@ -1,15 +1,13 @@ -'use strict'; - /* * yield_id_hack.js (extends yield.js) * * Intersperse queries which use the ID_HACK stage with updates and deletes of documents they may * match. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/yield.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { /* * Issue a query that will use the ID_HACK stage. This cannot be * batched, so issue a diff --git a/jstests/concurrency/fsm_workloads/yield_rooted_or.js b/jstests/concurrency/fsm_workloads/yield_rooted_or.js index 7b5cd4b3cddb5..09bc1fd4b2c97 100644 --- a/jstests/concurrency/fsm_workloads/yield_rooted_or.js +++ b/jstests/concurrency/fsm_workloads/yield_rooted_or.js @@ -1,5 +1,3 @@ -'use strict'; - /* * yield_rooted_or.js (extends yield.js) * @@ -7,10 +5,10 @@ * match. * Other workloads that need an index on c and d can inherit from this. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/yield.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { /* * Issue a query with an or stage as the root. */ diff --git a/jstests/concurrency/fsm_workloads/yield_sort.js b/jstests/concurrency/fsm_workloads/yield_sort.js index af8fef20510fb..fba02611c2337 100644 --- a/jstests/concurrency/fsm_workloads/yield_sort.js +++ b/jstests/concurrency/fsm_workloads/yield_sort.js @@ -1,15 +1,13 @@ -'use strict'; - /* * yield_sort.js (extends yield_sort_merge.js) * * Intersperse queries which use the SORT stage with updates and deletes of documents they may * match. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/yield_sort_merge.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield_sort_merge.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { /* * Execute a query that will use the SORT stage. */ diff --git a/jstests/concurrency/fsm_workloads/yield_sort_merge.js b/jstests/concurrency/fsm_workloads/yield_sort_merge.js index dd57841d75e49..b17a16d1307c0 100644 --- a/jstests/concurrency/fsm_workloads/yield_sort_merge.js +++ b/jstests/concurrency/fsm_workloads/yield_sort_merge.js @@ -1,5 +1,3 @@ -'use strict'; - /* * yield_sort_merge.js (extends yield_fetch.js) * @@ -8,10 +6,10 @@ * * Other workloads that need an index { a: 1, b: 1 } can extend this. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/yield.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { /* * Execute a query that will use the SORT_MERGE stage. */ diff --git a/jstests/concurrency/fsm_workloads/yield_text.js b/jstests/concurrency/fsm_workloads/yield_text.js index 2bd4a5608ffe8..e129a0222d136 100644 --- a/jstests/concurrency/fsm_workloads/yield_text.js +++ b/jstests/concurrency/fsm_workloads/yield_text.js @@ -1,15 +1,13 @@ -'use strict'; - /* * yield_text.js (extends yield.js) * * Intersperse queries which use the TEXT stage with updates and deletes of documents they may * match. */ -load('jstests/concurrency/fsm_libs/extend_workload.js'); // for extendWorkload -load('jstests/concurrency/fsm_workloads/yield.js'); // for $config +import {extendWorkload} from "jstests/concurrency/fsm_libs/extend_workload.js"; +import {$config as $baseConfig} from "jstests/concurrency/fsm_workloads/yield.js"; -var $config = extendWorkload($config, function($config, $super) { +export const $config = extendWorkload($baseConfig, function($config, $super) { /* * Pick a random word and search for it using full text search. */ diff --git a/jstests/concurrency/fsm_workloads/yield_with_drop.js b/jstests/concurrency/fsm_workloads/yield_with_drop.js index c2172f8c4869b..da84e737b7f74 100644 --- a/jstests/concurrency/fsm_workloads/yield_with_drop.js +++ b/jstests/concurrency/fsm_workloads/yield_with_drop.js @@ -4,7 +4,7 @@ * Executes query operations that can yield while the source collection is dropped and recreated. */ -var $config = (function() { +export const $config = (function() { const data = { kAllowedErrors: [ ErrorCodes.ConflictingOperationInProgress, diff --git a/jstests/concurrency/fsm_workloads_add_remove_shards/clusterwide_ops_with_add_remove_shards.js b/jstests/concurrency/fsm_workloads_add_remove_shards/clusterwide_ops_with_add_remove_shards.js index b16ddcd319b7c..fc023262fc0d4 100644 --- a/jstests/concurrency/fsm_workloads_add_remove_shards/clusterwide_ops_with_add_remove_shards.js +++ b/jstests/concurrency/fsm_workloads_add_remove_shards/clusterwide_ops_with_add_remove_shards.js @@ -8,7 +8,7 @@ "use strict"; -var $config = (function() { +export const $config = (function() { // The 'setup' function is run once by the parent thread after the cluster has been initialized, // before the worker threads have been spawned. The 'this' argument is bound as '$config.data'. function setup(db, collName, cluster) { diff --git a/jstests/concurrency/fsm_workloads_no_passthrough_with_mongod/external_data_source.js b/jstests/concurrency/fsm_workloads_no_passthrough_with_mongod/external_data_source.js index 75ffc15938953..2e67203928e31 100644 --- a/jstests/concurrency/fsm_workloads_no_passthrough_with_mongod/external_data_source.js +++ b/jstests/concurrency/fsm_workloads_no_passthrough_with_mongod/external_data_source.js @@ -5,7 +5,7 @@ * * Runs multiple aggregations with $_externalDataSources option concurrently. */ -var $config = (function() { +export const $config = (function() { var data = (() => { Random.setRandomSeed(); diff --git a/jstests/core/administrative/auth1.js b/jstests/core/administrative/auth1.js index 7633644c0da0d..4b41c7e21bb91 100644 --- a/jstests/core/administrative/auth1.js +++ b/jstests/core/administrative/auth1.js @@ -15,7 +15,7 @@ var mydb = db.getSiblingDB('auth1_db'); mydb.dropAllUsers(); -pass = "a" + Math.random(); +let pass = "a" + Math.random(); // print( "password [" + pass + "]" ); mydb.createUser({user: "eliot", pwd: pass, roles: jsTest.basicUserRoles}); @@ -23,7 +23,7 @@ mydb.createUser({user: "eliot", pwd: pass, roles: jsTest.basicUserRoles}); assert(mydb.auth("eliot", pass), "auth failed"); assert(!mydb.auth("eliot", pass + "a"), "auth should have failed"); -pass2 = "b" + Math.random(); +let pass2 = "b" + Math.random(); mydb.changeUserPassword("eliot", pass2); assert(!mydb.auth("eliot", pass), "failed to change password failed"); diff --git a/jstests/core/administrative/auth2.js b/jstests/core/administrative/auth2.js index eb2b47f696f95..4851ac1057503 100644 --- a/jstests/core/administrative/auth2.js +++ b/jstests/core/administrative/auth2.js @@ -8,7 +8,7 @@ // SERVER-724 db.runCommand({logout: 1}); -x = db.runCommand({logout: 1}); +let x = db.runCommand({logout: 1}); assert.eq(1, x.ok, "A"); x = db.logout(); diff --git a/jstests/core/administrative/check_shard_index.js b/jstests/core/administrative/check_shard_index.js index f6bb9f3ee7a4d..bf12d6e4417c6 100644 --- a/jstests/core/administrative/check_shard_index.js +++ b/jstests/core/administrative/check_shard_index.js @@ -11,7 +11,7 @@ // CHECKSHARDINGINDEX TEST UTILS // ------------------------- -f = db.jstests_shardingindex; +let f = db.jstests_shardingindex; f.drop(); // ------------------------- @@ -22,7 +22,8 @@ f.drop(); f.createIndex({x: 1, y: 1}); assert.eq(0, f.count(), "1. initial count should be zero"); -res = db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}}); +let res = + db.runCommand({checkShardingIndex: "test.jstests_shardingindex", keyPattern: {x: 1, y: 1}}); assert.eq(true, res.ok, "1a"); f.save({x: 1, y: 1}); diff --git a/jstests/core/administrative/current_op/currentop_cursors.js b/jstests/core/administrative/current_op/currentop_cursors.js index 2d3608b21abb3..483d2d3eb62ba 100644 --- a/jstests/core/administrative/current_op/currentop_cursors.js +++ b/jstests/core/administrative/current_op/currentop_cursors.js @@ -10,8 +10,6 @@ * # This test contains assertions for the hostname that operations run on. * tenant_migration_incompatible, * docker_incompatible, - * # TODO SERVER-70142: Populate planSummary field which is shown in output of $currentOp. - * cqf_incompatible, * ] */ diff --git a/jstests/core/administrative/current_op/currentop_waiting_for_latch.js b/jstests/core/administrative/current_op/currentop_waiting_for_latch.js index b02f219fe02ed..d744050fc9c00 100644 --- a/jstests/core/administrative/current_op/currentop_waiting_for_latch.js +++ b/jstests/core/administrative/current_op/currentop_waiting_for_latch.js @@ -7,7 +7,9 @@ * not_allowed_with_security_token, * assumes_read_concern_unchanged, * assumes_read_preference_unchanged, - * no_selinux + * no_selinux, + * multiversion_incompatible, + * requires_latch_analyzer * ] */ (function() { diff --git a/jstests/core/administrative/getlog1.js b/jstests/core/administrative/getlog1.js index a5989e87c75db..07c623573ef75 100644 --- a/jstests/core/administrative/getlog1.js +++ b/jstests/core/administrative/getlog1.js @@ -5,7 +5,7 @@ // to run: // ./mongo jstests/ -contains = function(arr, obj) { +let contains = function(arr, obj) { var i = arr.length; while (i--) { if (arr[i] === obj) { diff --git a/jstests/core/administrative/list_all_local_sessions.js b/jstests/core/administrative/list_all_local_sessions.js index 434e8ed660c2a..5bd896301a923 100644 --- a/jstests/core/administrative/list_all_local_sessions.js +++ b/jstests/core/administrative/list_all_local_sessions.js @@ -5,6 +5,9 @@ // # former operation must be routed to the primary in a replica set, whereas the latter may be // # routed to a secondary. // assumes_read_preference_unchanged, +// # The config fuzzer may run logical session cache refreshes in the background, which interferes +// # with this test. +// does_not_support_config_fuzzer, // # Sessions are asynchronously flushed to disk, so a stepdown immediately after calling // # startSession may cause this test to fail to find the returned sessionId. // does_not_support_stepdowns, diff --git a/jstests/core/administrative/list_all_sessions.js b/jstests/core/administrative/list_all_sessions.js index 7ecaefd00f9f6..a6b23c8340d3c 100644 --- a/jstests/core/administrative/list_all_sessions.js +++ b/jstests/core/administrative/list_all_sessions.js @@ -1,6 +1,9 @@ // Sessions are asynchronously flushed to disk, so a stepdown immediately after calling // startSession may cause this test to fail to find the returned sessionId. // @tags: [ +// # The config fuzzer may run logical session cache refreshes in the background, which interferes +// # with this test. +// does_not_support_config_fuzzer, // does_not_support_stepdowns, // uses_testing_only_commands, // no_selinux, diff --git a/jstests/core/administrative/list_sessions.js b/jstests/core/administrative/list_sessions.js index 18f684ef11ded..316962e9b3ee0 100644 --- a/jstests/core/administrative/list_sessions.js +++ b/jstests/core/administrative/list_sessions.js @@ -6,6 +6,9 @@ // does_not_support_stepdowns, // uses_testing_only_commands, // no_selinux, +// # The config fuzzer may run logical session cache refreshes in the background, which interferes +// # with this test. +// does_not_support_config_fuzzer, // ] // Basic tests for the $listSessions aggregation stage. diff --git a/jstests/core/administrative/profile/profile3.js b/jstests/core/administrative/profile/profile3.js index 68334e72b084f..1ab42d622168e 100644 --- a/jstests/core/administrative/profile/profile3.js +++ b/jstests/core/administrative/profile/profile3.js @@ -14,10 +14,10 @@ var stddb = db; var db = db.getSiblingDB("profile3"); db.dropAllUsers(); -t = db.profile3; +let t = db.profile3; t.drop(); -profileCursor = function(query) { +let profileCursor = function(query) { print("----"); query = query || {}; Object.extend(query, {user: username + "@" + db.getName()}); @@ -25,7 +25,7 @@ profileCursor = function(query) { }; try { - username = "jstests_profile3_user"; + var username = "jstests_profile3_user"; db.createUser({user: username, pwd: "password", roles: jsTest.basicUserRoles}); db.logout(); diff --git a/jstests/core/administrative/profile/profile_agg.js b/jstests/core/administrative/profile/profile_agg.js index 1f273d47c006b..6e673a3fe43b8 100644 --- a/jstests/core/administrative/profile/profile_agg.js +++ b/jstests/core/administrative/profile/profile_agg.js @@ -4,6 +4,7 @@ // does_not_support_stepdowns, // requires_fcv_70, // requires_profiling, +// references_foreign_collection, // ] // Confirms that profiled aggregation execution contains all expected metrics with proper values. diff --git a/jstests/core/administrative/profile/profile_getmore.js b/jstests/core/administrative/profile/profile_getmore.js index 26040af1934f2..cc682dde58a7b 100644 --- a/jstests/core/administrative/profile/profile_getmore.js +++ b/jstests/core/administrative/profile/profile_getmore.js @@ -5,7 +5,8 @@ // requires_getmore, // requires_fcv_70, // requires_profiling, -// cqf_incompatible, +// # TODO: SERVER-70142 populate planSummary. +// cqf_experimental_incompatible, // ] // Confirms that profiled getMore execution contains all expected metrics with proper values. diff --git a/jstests/core/administrative/profile/profile_query_hash.js b/jstests/core/administrative/profile/profile_query_hash.js index d8ddaf06f36fd..735942250f370 100644 --- a/jstests/core/administrative/profile/profile_query_hash.js +++ b/jstests/core/administrative/profile/profile_query_hash.js @@ -8,7 +8,7 @@ // does_not_support_stepdowns, // requires_profiling, // # TODO SERVER-67607: Test plan cache with CQF enabled. -// cqf_incompatible, +// cqf_experimental_incompatible, // ] (function() { "use strict"; diff --git a/jstests/core/administrative/set_param1.js b/jstests/core/administrative/set_param1.js index dfe71da85d22a..35eaa68ca447f 100644 --- a/jstests/core/administrative/set_param1.js +++ b/jstests/core/administrative/set_param1.js @@ -9,6 +9,7 @@ // # migration hook. // tenant_migration_incompatible, // ] +load("jstests/libs/fixture_helpers.js"); // Tests for accessing logLevel server parameter using getParameter/setParameter commands // and shell helpers. @@ -165,8 +166,7 @@ assert(!result, assert.commandWorked( db.adminCommand({"setParameter": 1, redactEncryptedFields: old.redactEncryptedFields})); -const isMongos = (db.hello().msg === 'isdbgrid'); -if (!isMongos) { +if (!FixtureHelpers.isMongos(db)) { // // oplogFetcherSteadyStateMaxFetcherRestarts // diff --git a/jstests/core/api/api_version_create.js b/jstests/core/api/api_version_create.js index e3a4ed00f6671..f42dfd2c1b59d 100644 --- a/jstests/core/api/api_version_create.js +++ b/jstests/core/api/api_version_create.js @@ -36,4 +36,4 @@ assert.commandFailedWithCode(testDB.runCommand({ apiStrict: true, }), ErrorCodes.InvalidOptions); -})(); \ No newline at end of file +})(); diff --git a/jstests/core/api/api_version_new_50_language_features.js b/jstests/core/api/api_version_new_50_language_features.js index 14c4f7e6aa84b..e3a8255e29e12 100644 --- a/jstests/core/api/api_version_new_50_language_features.js +++ b/jstests/core/api/api_version_new_50_language_features.js @@ -11,9 +11,10 @@ "use strict"; load("jstests/libs/api_version_helpers.js"); // For 'APIVersionHelpers'. +const testDb = db.getSiblingDB(jsTestName()); const collName = "api_version_new_50_language_features"; const viewName = collName + "_view"; -const coll = db[collName]; +const coll = testDb[collName]; coll.drop(); assert.commandWorked(coll.insert({a: 1, date: new ISODate()})); @@ -35,7 +36,7 @@ for (let pipeline of stablePipelines) { APIVersionHelpers.assertViewSucceedsWithAPIStrict(pipeline, viewName, collName); // Assert error is not thrown when running without apiStrict=true. - assert.commandWorked(db.runCommand({ + assert.commandWorked(testDb.runCommand({ aggregate: coll.getName(), pipeline: pipeline, apiVersion: "1", @@ -62,12 +63,12 @@ APIVersionHelpers.assertViewSucceedsWithAPIStrict(setWindowFieldsPipeline, viewN // Creating a collection with dotted paths is allowed with apiStrict:true. -assert.commandWorked(db.runCommand({ +assert.commandWorked(testDb.runCommand({ create: 'new_50_features_validator', validator: {$expr: {$eq: [{$getField: {input: "$$ROOT", field: "dotted.path"}}, 2]}}, apiVersion: "1", apiStrict: true })); -assert.commandWorked(db.runCommand({drop: 'new_50_features_validator'})); +assert.commandWorked(testDb.runCommand({drop: 'new_50_features_validator'})); })(); diff --git a/jstests/core/api/api_version_new_51_language_features.js b/jstests/core/api/api_version_new_51_language_features.js index 48e73d7e052b0..6e4f7280f9748 100644 --- a/jstests/core/api/api_version_new_51_language_features.js +++ b/jstests/core/api/api_version_new_51_language_features.js @@ -11,9 +11,10 @@ "use strict"; load("jstests/libs/api_version_helpers.js"); // For 'APIVersionHelpers'. +const testDb = db.getSiblingDB(jsTestName()); const collName = "api_version_new_51_language_features"; const viewName = collName + "_view"; -const coll = db[collName]; +const coll = testDb[collName]; coll.drop(); assert.commandWorked(coll.insert({a: 1, date: new ISODate()})); @@ -30,7 +31,7 @@ for (let pipeline of stablePipelines) { APIVersionHelpers.assertViewSucceedsWithAPIStrict(pipeline, viewName, collName); // Assert error is not thrown when running without apiStrict=true. - assert.commandWorked(db.runCommand({ + assert.commandWorked(testDb.runCommand({ aggregate: coll.getName(), pipeline: pipeline, apiVersion: "1", diff --git a/jstests/core/api/api_version_new_52_language_features.js b/jstests/core/api/api_version_new_52_language_features.js index b1ab2107fbf96..e2073d5ed2453 100644 --- a/jstests/core/api/api_version_new_52_language_features.js +++ b/jstests/core/api/api_version_new_52_language_features.js @@ -13,9 +13,10 @@ "use strict"; load("jstests/libs/api_version_helpers.js"); // For 'APIVersionHelpers'. +const testDb = db.getSiblingDB(jsTestName()); const collName = "api_version_new_52_language_features"; const viewName = collName + "_view"; -const coll = db[collName]; +const coll = testDb[collName]; coll.drop(); assert.commandWorked(coll.insert({a: 1, arr: [2, 1, 4]})); @@ -89,7 +90,7 @@ for (const pipeline of stablePipelines) { APIVersionHelpers.assertViewSucceedsWithAPIStrict(pipeline, viewName, collName); // Assert error is not thrown when running without apiStrict=true. - assert.commandWorked(db.runCommand({ + assert.commandWorked(testDb.runCommand({ aggregate: coll.getName(), pipeline: pipeline, apiVersion: "1", diff --git a/jstests/core/api/api_version_parameters.js b/jstests/core/api/api_version_parameters.js index cb397395d7ff7..2d41aadf5f674 100644 --- a/jstests/core/api/api_version_parameters.js +++ b/jstests/core/api/api_version_parameters.js @@ -12,74 +12,77 @@ (function() { "use strict"; +const testDb = db.getSiblingDB(jsTestName()); + // Test parsing logic on command included in API V1. // If the client passed apiStrict, they must also pass apiVersion. -assert.commandFailedWithCode(db.runCommand({ping: 1, apiStrict: true}), +assert.commandFailedWithCode(testDb.runCommand({ping: 1, apiStrict: true}), 4886600, "Provided apiStrict without passing apiVersion"); // If the client passed apiDeprecationErrors, they must also pass apiVersion. -assert.commandFailedWithCode(db.runCommand({ping: 1, apiDeprecationErrors: false}), +assert.commandFailedWithCode(testDb.runCommand({ping: 1, apiDeprecationErrors: false}), 4886600, "Provided apiDeprecationErrors without passing apiVersion"); // If the client passed apiVersion, it must be of type string. -assert.commandFailedWithCode(db.runCommand({ping: 1, apiVersion: 1}), +assert.commandFailedWithCode(testDb.runCommand({ping: 1, apiVersion: 1}), ErrorCodes.TypeMismatch, "apiVersion' is the wrong type 'double', expected type 'string'"); // If the client passed apiVersion, its value must be "1". -assert.commandFailedWithCode(db.runCommand({ping: 1, apiVersion: "2"}), +assert.commandFailedWithCode(testDb.runCommand({ping: 1, apiVersion: "2"}), ErrorCodes.APIVersionError, "API version must be \"1\""); // If the client passed apiStrict, it must be of type boolean. -assert.commandFailedWithCode(db.runCommand({ping: 1, apiVersion: "1", apiStrict: "true"}), +assert.commandFailedWithCode(testDb.runCommand({ping: 1, apiVersion: "1", apiStrict: "true"}), ErrorCodes.TypeMismatch, "apiStrict' is the wrong type 'string', expected type 'boolean'"); // If the client passed apiDeprecationErrors, it must be of type boolean. assert.commandFailedWithCode( - db.runCommand({ping: 1, apiVersion: "1", apiDeprecationErrors: "false"}), + testDb.runCommand({ping: 1, apiVersion: "1", apiDeprecationErrors: "false"}), ErrorCodes.TypeMismatch, "apiDeprecationErrors' is the wrong type 'string', expected type 'boolean'"); // Sanity check that command works with proper parameters. assert.commandWorked( - db.runCommand({ping: 1, apiVersion: "1", apiStrict: true, apiDeprecationErrors: true})); + testDb.runCommand({ping: 1, apiVersion: "1", apiStrict: true, apiDeprecationErrors: true})); assert.commandWorked( - db.runCommand({ping: 1, apiVersion: "1", apiStrict: false, apiDeprecationErrors: false})); -assert.commandWorked(db.runCommand({ping: 1, apiVersion: "1"})); + testDb.runCommand({ping: 1, apiVersion: "1", apiStrict: false, apiDeprecationErrors: false})); +assert.commandWorked(testDb.runCommand({ping: 1, apiVersion: "1"})); // Test parsing logic on command not included in API V1. -assert.commandWorked(db.runCommand({listCommands: 1, apiVersion: "1"})); +assert.commandWorked(testDb.runCommand({listCommands: 1, apiVersion: "1"})); // If the client passed apiStrict: true, but the command is not in V1, reply with // APIStrictError. -assert.commandFailedWithCode(db.runCommand({listCommands: 1, apiVersion: "1", apiStrict: true}), +assert.commandFailedWithCode(testDb.runCommand({listCommands: 1, apiVersion: "1", apiStrict: true}), ErrorCodes.APIStrictError); -assert.commandFailedWithCode(db.runCommand({isMaster: 1, apiVersion: "1", apiStrict: true}), +assert.commandFailedWithCode(testDb.runCommand({isMaster: 1, apiVersion: "1", apiStrict: true}), ErrorCodes.APIStrictError); -assert.commandWorked(db.runCommand({listCommands: 1, apiVersion: "1", apiDeprecationErrors: true})); +assert.commandWorked( + testDb.runCommand({listCommands: 1, apiVersion: "1", apiDeprecationErrors: true})); // Test parsing logic of command deprecated in API V1. -assert.commandWorked(db.runCommand({testDeprecation: 1, apiVersion: "1"})); -assert.commandWorked(db.runCommand({testDeprecation: 1, apiVersion: "1", apiStrict: true})); +assert.commandWorked(testDb.runCommand({testDeprecation: 1, apiVersion: "1"})); +assert.commandWorked(testDb.runCommand({testDeprecation: 1, apiVersion: "1", apiStrict: true})); // If the client passed apiDeprecationErrors: true, but the command is // deprecated in API Version 1, reply with APIDeprecationError. assert.commandFailedWithCode( - db.runCommand({testDeprecation: 1, apiVersion: "1", apiDeprecationErrors: true}), + testDb.runCommand({testDeprecation: 1, apiVersion: "1", apiDeprecationErrors: true}), ErrorCodes.APIDeprecationError, "Provided apiDeprecationErrors: true, but the invoked command's deprecatedApiVersions() does not include \"1\""); // Assert APIStrictError message for unsupported commands contains link to docs site var err = assert.commandFailedWithCode( - db.runCommand({buildInfo: 1, apiStrict: true, apiVersion: "1"}), ErrorCodes.APIStrictError); + testDb.runCommand({buildInfo: 1, apiStrict: true, apiVersion: "1"}), ErrorCodes.APIStrictError); assert.includes(err.errmsg, 'buildInfo'); assert.includes(err.errmsg, 'dochub.mongodb.org'); // Test writing to system.js fails. assert.commandFailedWithCode( - db.runCommand({ + testDb.runCommand({ insert: "system.js", documents: [{ _id: "shouldntExist", @@ -93,7 +96,7 @@ assert.commandFailedWithCode( ErrorCodes.APIStrictError, "Provided apiStrict:true, but the command insert attempts to write to system.js"); assert.commandFailedWithCode( - db.runCommand({ + testDb.runCommand({ update: "system.js", updates: [{ q: { @@ -115,7 +118,7 @@ assert.commandFailedWithCode( ErrorCodes.APIStrictError, "Provided apiStrict:true, but the command update attempts to write to system.js"); assert.commandFailedWithCode( - db.runCommand({ + testDb.runCommand({ delete: "system.js", deletes: [{ q: { @@ -132,7 +135,7 @@ assert.commandFailedWithCode( ErrorCodes.APIStrictError, "Provided apiStrict:true, but the command delete attempts to write to system.js"); assert.commandFailedWithCode( - db.runCommand({ + testDb.runCommand({ findAndModify: "system.js", query: { _id: "shouldExist", @@ -147,6 +150,6 @@ assert.commandFailedWithCode( ErrorCodes.APIStrictError, "Provided apiStrict:true, but the command findAndModify attempts to write to system.js"); // Test reading from system.js succeeds. -assert.commandWorked(db.runCommand( +assert.commandWorked(testDb.runCommand( {find: "system.js", filter: {_id: "shouldExist"}, apiVersion: "1", apiStrict: true})); })(); diff --git a/jstests/core/api/api_version_pipeline_stages.js b/jstests/core/api/api_version_pipeline_stages.js index c9772e4fa0c01..f86274d783060 100644 --- a/jstests/core/api/api_version_pipeline_stages.js +++ b/jstests/core/api/api_version_pipeline_stages.js @@ -13,8 +13,9 @@ (function() { "use strict"; +const testDb = db.getSiblingDB(jsTestName()); const collName = "api_version_pipeline_stages"; -const coll = db[collName]; +const coll = testDb[collName]; coll.drop(); coll.insert({a: 1}); @@ -32,7 +33,7 @@ const unstablePipelines = [ ]; function assertAggregateFailsWithAPIStrict(pipeline) { - assert.commandFailedWithCode(db.runCommand({ + assert.commandFailedWithCode(testDb.runCommand({ aggregate: collName, pipeline: pipeline, cursor: {}, @@ -47,7 +48,7 @@ for (let pipeline of unstablePipelines) { assertAggregateFailsWithAPIStrict(pipeline); // Assert error thrown when creating a view on a pipeline with stages not in API Version 1. - assert.commandFailedWithCode(db.runCommand({ + assert.commandFailedWithCode(testDb.runCommand({ create: 'api_version_pipeline_stages_should_fail', viewOn: collName, pipeline: pipeline, @@ -67,14 +68,14 @@ assertAggregateFailsWithAPIStrict([{$collStats: {latencyStats: {}, queryExecStat assertAggregateFailsWithAPIStrict( [{$collStats: {latencyStats: {}, storageStats: {scale: 1024}, queryExecStats: {}}}]); -assert.commandWorked(db.runCommand({ +assert.commandWorked(testDb.runCommand({ aggregate: collName, pipeline: [{$collStats: {}}], cursor: {}, apiVersion: "1", apiStrict: true })); -assert.commandWorked(db.runCommand({ +assert.commandWorked(testDb.runCommand({ aggregate: collName, pipeline: [{$collStats: {count: {}}}], cursor: {}, @@ -86,7 +87,7 @@ assert.commandWorked(db.runCommand({ // compute the count, we get back a single result in the first batch - no getMore is required. // This test is meant to mimic a drivers test and serve as a warning if we may be making a breaking // change for the drivers. -const cmdResult = assert.commandWorked(db.runCommand({ +const cmdResult = assert.commandWorked(testDb.runCommand({ aggregate: collName, pipeline: [{$collStats: {count: {}}}, {$group: {_id: 1, count: {$sum: "$count"}}}], cursor: {}, diff --git a/jstests/core/api/api_version_test_expression.js b/jstests/core/api/api_version_test_expression.js index 41bbd9c040270..23edac27c4db2 100644 --- a/jstests/core/api/api_version_test_expression.js +++ b/jstests/core/api/api_version_test_expression.js @@ -8,17 +8,19 @@ * assumes_unsharded_collection, * uses_api_parameters, * no_selinux, + * references_foreign_collection, * ] */ (function() { "use strict"; +const testDb = db.getSiblingDB(jsTestName()); const collName = "api_version_test_expression"; -const coll = db[collName]; +const coll = testDb[collName]; coll.drop(); const collForeignName = collName + "_foreign"; -const collForeign = db[collForeignName]; +const collForeign = testDb[collForeignName]; collForeign.drop(); for (let i = 0; i < 5; i++) { @@ -30,14 +32,14 @@ for (let i = 0; i < 5; i++) { // true}. let pipeline = [{$project: {v: {$_testApiVersion: {unstable: true}}}}]; assert.commandFailedWithCode( - db.runCommand( + testDb.runCommand( {aggregate: collName, pipeline: pipeline, cursor: {}, apiStrict: true, apiVersion: "1"}), ErrorCodes.APIStrictError); // Assert error thrown when command specifies {apiDeprecationErrors: true} and expression specifies // {deprecated: true} pipeline = [{$project: {v: {$_testApiVersion: {deprecated: true}}}}]; -assert.commandFailedWithCode(db.runCommand({ +assert.commandFailedWithCode(testDb.runCommand({ aggregate: collName, pipeline: pipeline, cursor: {}, @@ -49,7 +51,7 @@ assert.commandFailedWithCode(db.runCommand({ // Assert error thrown when the command specifies apiStrict:true and an inner pipeline contains an // unstable expression. const unstableInnerPipeline = [{$project: {v: {$_testApiVersion: {unstable: true}}}}]; -assert.commandFailedWithCode(db.runCommand({ +assert.commandFailedWithCode(testDb.runCommand({ aggregate: collName, pipeline: [{$lookup: {from: collForeignName, as: "output", pipeline: unstableInnerPipeline}}], cursor: {}, @@ -57,7 +59,7 @@ assert.commandFailedWithCode(db.runCommand({ apiVersion: "1" }), ErrorCodes.APIStrictError); -assert.commandFailedWithCode(db.runCommand({ +assert.commandFailedWithCode(testDb.runCommand({ aggregate: collName, pipeline: [{$unionWith: {coll: collForeignName, pipeline: unstableInnerPipeline}}], cursor: {}, @@ -68,14 +70,14 @@ assert.commandFailedWithCode(db.runCommand({ // Assert command worked when the command specifies apiStrict:false and an inner pipeline contains // an unstable expression. -assert.commandWorked(db.runCommand({ +assert.commandWorked(testDb.runCommand({ aggregate: collName, pipeline: [{$lookup: {from: collForeignName, as: "output", pipeline: unstableInnerPipeline}}], cursor: {}, apiStrict: false, apiVersion: "1" })); -assert.commandWorked(db.runCommand({ +assert.commandWorked(testDb.runCommand({ aggregate: collName, pipeline: [{$unionWith: {coll: collForeignName, pipeline: unstableInnerPipeline}}], cursor: {}, @@ -86,7 +88,7 @@ assert.commandWorked(db.runCommand({ // Assert error thrown when the command specifies apiDeprecationErrors:true and an inner pipeline // contains a deprecated expression. const deprecatedInnerPipeline = [{$project: {v: {$_testApiVersion: {deprecated: true}}}}]; -assert.commandFailedWithCode(db.runCommand({ +assert.commandFailedWithCode(testDb.runCommand({ aggregate: collName, pipeline: [{$lookup: {from: collForeignName, as: "output", pipeline: deprecatedInnerPipeline}}], cursor: {}, @@ -94,7 +96,7 @@ assert.commandFailedWithCode(db.runCommand({ apiVersion: "1" }), ErrorCodes.APIDeprecationError); -assert.commandFailedWithCode(db.runCommand({ +assert.commandFailedWithCode(testDb.runCommand({ aggregate: collName, pipeline: [{$unionWith: {coll: collForeignName, pipeline: deprecatedInnerPipeline}}], cursor: {}, @@ -105,14 +107,14 @@ assert.commandFailedWithCode(db.runCommand({ // Assert command worked when the command specifies apiDeprecationErrors:false and an inner pipeline // contains a deprecated expression. -assert.commandWorked(db.runCommand({ +assert.commandWorked(testDb.runCommand({ aggregate: collName, pipeline: [{$lookup: {from: collForeignName, as: "output", pipeline: deprecatedInnerPipeline}}], cursor: {}, apiDeprecationErrors: false, apiVersion: "1" })); -assert.commandWorked(db.runCommand({ +assert.commandWorked(testDb.runCommand({ aggregate: collName, pipeline: [{$unionWith: {coll: collForeignName, pipeline: deprecatedInnerPipeline}}], cursor: {}, @@ -122,24 +124,24 @@ assert.commandWorked(db.runCommand({ // Test that command successfully runs to completion without any API parameters. pipeline = [{$project: {v: {$_testApiVersion: {unstable: true}}}}]; -assert.commandWorked(db.runCommand({aggregate: collName, pipeline: pipeline, cursor: {}})); +assert.commandWorked(testDb.runCommand({aggregate: collName, pipeline: pipeline, cursor: {}})); // Create a view with {apiStrict: true}. -db.view.drop(); -assert.commandWorked(db.runCommand( +testDb.view.drop(); +assert.commandWorked(testDb.runCommand( {create: "view", viewOn: collName, pipeline: [], apiStrict: true, apiVersion: "1"})); // find() on views should work normally if 'apiStrict' is true. -assert.commandWorked(db.runCommand({find: "view", apiStrict: true, apiVersion: "1"})); +assert.commandWorked(testDb.runCommand({find: "view", apiStrict: true, apiVersion: "1"})); // This command will work because API parameters are not inherited from views. -assert.commandWorked(db.runCommand({aggregate: "view", pipeline: pipeline, cursor: {}})); +assert.commandWorked(testDb.runCommand({aggregate: "view", pipeline: pipeline, cursor: {}})); assert.commandFailedWithCode( - db.runCommand( + testDb.runCommand( {aggregate: "view", pipeline: pipeline, cursor: {}, apiVersion: "1", apiStrict: true}), ErrorCodes.APIStrictError); // Create a view with 'unstable' parameter should fail with 'apiStrict'. -db.unstableView.drop(); -assert.commandFailedWithCode(db.runCommand({ +testDb.unstableView.drop(); +assert.commandFailedWithCode(testDb.runCommand({ create: "unstableView", viewOn: collName, pipeline: pipeline, @@ -149,18 +151,18 @@ assert.commandFailedWithCode(db.runCommand({ ErrorCodes.APIStrictError); // Create a view with 'unstable' should be allowed without 'apiStrict'. -assert.commandWorked(db.runCommand({ +assert.commandWorked(testDb.runCommand({ create: "unstableView", viewOn: collName, pipeline: pipeline, apiVersion: "1", apiStrict: false })); -assert.commandWorked(db.runCommand({aggregate: "unstableView", pipeline: [], cursor: {}})); +assert.commandWorked(testDb.runCommand({aggregate: "unstableView", pipeline: [], cursor: {}})); // This commmand will fail even with the empty pipeline because of the view. assert.commandFailedWithCode( - db.runCommand( + testDb.runCommand( {aggregate: "unstableView", pipeline: [], cursor: {}, apiVersion: "1", apiStrict: true}), ErrorCodes.APIStrictError); @@ -169,33 +171,33 @@ let validator = {$expr: {$_testApiVersion: {unstable: true}}}; let validatedCollName = collName + "_validated"; // Creating a collection with the unstable validator is not allowed with apiStrict:true. -db[validatedCollName].drop(); +testDb[validatedCollName].drop(); assert.commandFailedWithCode( - db.runCommand( + testDb.runCommand( {create: validatedCollName, validator: validator, apiVersion: "1", apiStrict: true}), ErrorCodes.APIStrictError); // Run create and insert commands without apiStrict:true and verify that it is successful. -assert.commandWorked(db.runCommand( +assert.commandWorked(testDb.runCommand( {create: validatedCollName, validator: validator, apiVersion: "1", apiStrict: false})); assert.commandWorked( - db[validatedCollName].runCommand({insert: validatedCollName, documents: [{num: 1}]})); + testDb[validatedCollName].runCommand({insert: validatedCollName, documents: [{num: 1}]})); // Specifying apiStrict: true results in an error. assert.commandFailedWithCode( - db[validatedCollName].runCommand( + testDb[validatedCollName].runCommand( {insert: validatedCollName, documents: [{num: 1}], apiVersion: "1", apiStrict: true}), ErrorCodes.APIStrictError); // Recreate the validator containing a deprecated test expression. -db[validatedCollName].drop(); +testDb[validatedCollName].drop(); validator = { $expr: {$_testApiVersion: {deprecated: true}} }; // Creating a collection with the deprecated validator is not allowed with // apiDeprecationErrors:true. -assert.commandFailedWithCode(db.runCommand({ +assert.commandFailedWithCode(testDb.runCommand({ create: validatedCollName, validator: validator, apiVersion: "1", @@ -205,17 +207,17 @@ assert.commandFailedWithCode(db.runCommand({ // Run create and insert commands without apiDeprecationErrors:true and verify that it is // successful. -assert.commandWorked(db.runCommand({ +assert.commandWorked(testDb.runCommand({ create: validatedCollName, validator: validator, apiVersion: "1", apiDeprecationErrors: false, })); assert.commandWorked( - db[validatedCollName].runCommand({insert: validatedCollName, documents: [{num: 1}]})); + testDb[validatedCollName].runCommand({insert: validatedCollName, documents: [{num: 1}]})); // Specifying apiDeprecationErrors: true results in an error. -assert.commandFailedWithCode(db[validatedCollName].runCommand({ +assert.commandFailedWithCode(testDb[validatedCollName].runCommand({ insert: validatedCollName, documents: [{num: 1}], apiVersion: "1", @@ -226,12 +228,13 @@ assert.commandFailedWithCode(db[validatedCollName].runCommand({ // Test that API version parameters are inherited into the inner command of the explain command. function checkExplainInnerCommandGetsAPIVersionParameters(explainedCmd, errCode) { assert.commandFailedWithCode( - db.runCommand( + testDb.runCommand( {explain: explainedCmd, apiVersion: "1", apiDeprecationErrors: true, apiStrict: true}), errCode); // If 'apiStrict: false' the inner aggregate command will execute successfully. - const explainRes = db.runCommand({explain: explainedCmd, apiVersion: "1", apiStrict: false}); + const explainRes = + testDb.runCommand({explain: explainedCmd, apiVersion: "1", apiStrict: false}); assert(explainRes.hasOwnProperty('executionStats'), explainRes); assert.eq(explainRes['executionStats']['executionSuccess'], true, explainRes); } @@ -257,6 +260,6 @@ findCmd = { }; checkExplainInnerCommandGetsAPIVersionParameters(findCmd, ErrorCodes.APIDeprecationError); -db[validatedCollName].drop(); -db.unstableView.drop(); +testDb[validatedCollName].drop(); +testDb.unstableView.drop(); })(); diff --git a/jstests/core/api/api_version_unstable_fields.js b/jstests/core/api/api_version_unstable_fields.js index c34189c759df9..768665c3e476e 100644 --- a/jstests/core/api/api_version_unstable_fields.js +++ b/jstests/core/api/api_version_unstable_fields.js @@ -12,8 +12,9 @@ (function() { "use strict"; +const testDb = db.getSiblingDB(jsTestName()); const collName = "api_version_unstable_fields"; -assert.commandWorked(db[collName].insert({a: 1})); +assert.commandWorked(testDb[collName].insert({a: 1})); const unstableFieldsForAggregate = { isMapReduceCommand: false, @@ -44,8 +45,9 @@ function testCommandWithUnstableFields(command, containsUnstableFields) { const cmd = JSON.parse(JSON.stringify(command)); const cmdWithUnstableField = Object.assign(cmd, {[field]: containsUnstableFields[field]}); - assert.commandFailedWithCode( - db.runCommand(cmdWithUnstableField), ErrorCodes.APIStrictError, cmdWithUnstableField); + assert.commandFailedWithCode(testDb.runCommand(cmdWithUnstableField), + ErrorCodes.APIStrictError, + cmdWithUnstableField); } } @@ -73,16 +75,16 @@ let createIndexesCmd = { apiStrict: true, }; assert.commandFailedWithCode( - db.runCommand(createIndexesCmd), ErrorCodes.APIStrictError, createIndexesCmd); + testDb.runCommand(createIndexesCmd), ErrorCodes.APIStrictError, createIndexesCmd); createIndexesCmd["indexes"] = [{key: {a: "geoHaystack"}, name: "a_1"}]; assert.commandFailedWithCode( - db.runCommand(createIndexesCmd), ErrorCodes.CannotCreateIndex, createIndexesCmd); + testDb.runCommand(createIndexesCmd), ErrorCodes.CannotCreateIndex, createIndexesCmd); // Test that collMod command with an unstable field ('prepareUnique') in an inner struct throws when // 'apiStrict' is set to true. assert.commandWorked( - db.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]})); + testDb.runCommand({createIndexes: collName, indexes: [{key: {a: 1}, name: "a_1"}]})); let collModCommand = {collMod: "col", apiVersion: "1", apiStrict: true}; testCommandWithUnstableFields(collModCommand, {index: {name: "a_1", prepareUnique: true}}); }()); diff --git a/jstests/core/api/api_version_unstable_indexes.js b/jstests/core/api/api_version_unstable_indexes.js index cbd612654349c..5064e6edc0671 100644 --- a/jstests/core/api/api_version_unstable_indexes.js +++ b/jstests/core/api/api_version_unstable_indexes.js @@ -14,15 +14,13 @@ * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For 'getWinningPlan'. -load("jstests/libs/fixture_helpers.js"); // For 'isMongos'. -load("jstests/libs/columnstore_util.js"); // For 'setUpServerForColumnStoreIndexTest'. +import {getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js"; +load("jstests/libs/fixture_helpers.js"); // For 'isMongos'. +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; +const testDb = db.getSiblingDB(jsTestName()); const collName = "api_verision_unstable_indexes"; -const coll = db[collName]; +const coll = testDb[collName]; coll.drop(); assert.commandWorked(coll.insert([ @@ -36,14 +34,14 @@ assert.commandWorked(coll.createIndex({subject: "text"})); assert.commandWorked(coll.createIndex({"views": 1}, {sparse: true})); // The "text" index, "subject_text", can be used normally. -if (!FixtureHelpers.isMongos(db)) { +if (!FixtureHelpers.isMongos(testDb)) { const explainRes = assert.commandWorked( - db.runCommand({explain: {"find": collName, "filter": {$text: {$search: "coffee"}}}})); + testDb.runCommand({explain: {"find": collName, "filter": {$text: {$search: "coffee"}}}})); assert.eq(getWinningPlan(explainRes.queryPlanner).indexName, "subject_text", explainRes); } // No "text" index can be used for $text search as the "text" index is excluded from API version 1. -assert.commandFailedWithCode(db.runCommand({ +assert.commandFailedWithCode(testDb.runCommand({ explain: {"find": collName, "filter": {$text: {$search: "coffee"}}}, apiVersion: "1", apiStrict: true @@ -51,7 +49,7 @@ assert.commandFailedWithCode(db.runCommand({ ErrorCodes.NoQueryExecutionPlans); // Can not hint a sparse index which is excluded from API version 1 with 'apiStrict: true'. -assert.commandFailedWithCode(db.runCommand({ +assert.commandFailedWithCode(testDb.runCommand({ "find": collName, "filter": {views: 50}, "hint": {views: 1}, @@ -60,15 +58,15 @@ assert.commandFailedWithCode(db.runCommand({ }), ErrorCodes.BadValue); -if (!FixtureHelpers.isMongos(db)) { - const explainRes = assert.commandWorked( - db.runCommand({explain: {"find": collName, "filter": {views: 50}, "hint": {views: 1}}})); +if (!FixtureHelpers.isMongos(testDb)) { + const explainRes = assert.commandWorked(testDb.runCommand( + {explain: {"find": collName, "filter": {views: 50}, "hint": {views: 1}}})); assert.eq(getWinningPlan(explainRes.queryPlanner).inputStage.indexName, "views_1", explainRes); } -if (setUpServerForColumnStoreIndexTest(db)) { +if (setUpServerForColumnStoreIndexTest(testDb)) { // Column store indexes cannot be created with apiStrict: true. - assert.commandFailedWithCode(db.runCommand({ + assert.commandFailedWithCode(testDb.runCommand({ createIndexes: coll.getName(), indexes: [{key: {"$**": "columnstore"}, name: "$**_columnstore"}], apiVersion: "1", @@ -82,14 +80,14 @@ if (setUpServerForColumnStoreIndexTest(db)) { const projection = {_id: 0, x: 1}; // Sanity check that this query can use column scan. - assert(planHasStage(db, coll.find({}, projection).explain(), "COLUMN_SCAN")); + assert(planHasStage(testDb, coll.find({}, projection).explain(), "COLUMN_SCAN")); // No hint should work (but redirect to coll scan). - assert.commandWorked(db.runCommand( + assert.commandWorked(testDb.runCommand( {find: coll.getName(), projection: {_id: 0, x: 1}, apiVersion: "1", apiStrict: true})); // Hint should fail. - assert.commandFailedWithCode(db.runCommand({ + assert.commandFailedWithCode(testDb.runCommand({ find: coll.getName(), projection: projection, hint: {"$**": "columnstore"}, @@ -98,4 +96,3 @@ if (setUpServerForColumnStoreIndexTest(db)) { }), ErrorCodes.BadValue); } -})(); diff --git a/jstests/core/api/apitest_db.js b/jstests/core/api/apitest_db.js index 805e2c8f99051..a2f293400884a 100644 --- a/jstests/core/api/apitest_db.js +++ b/jstests/core/api/apitest_db.js @@ -8,7 +8,7 @@ assert("test" == db, "wrong database currently not test"); -dd = function(x) { +let dd = function(x) { // print( x ); }; diff --git a/jstests/core/capped/capped.js b/jstests/core/capped/capped.js index 6c62ba755606d..1a26f5a1e22a8 100644 --- a/jstests/core/capped/capped.js +++ b/jstests/core/capped/capped.js @@ -9,7 +9,7 @@ db.jstests_capped.drop(); db.createCollection("jstests_capped", {capped: true, size: 30000}); -t = db.jstests_capped; +let t = db.jstests_capped; assert.eq(1, t.getIndexes().length, "expected a count of one index for new capped collection"); t.save({x: 1}); diff --git a/jstests/core/capped/capped1.js b/jstests/core/capped/capped1.js index df9b12ddf8989..5a02fb6c55f26 100644 --- a/jstests/core/capped/capped1.js +++ b/jstests/core/capped/capped1.js @@ -6,11 +6,11 @@ * ] */ -t = db.capped1; +let t = db.capped1; t.drop(); db.createCollection("capped1", {capped: true, size: 1024}); -v = t.validate(); +let v = t.validate(); assert(v.valid, "A : " + tojson(v)); // SERVER-485 t.save({x: 1}); diff --git a/jstests/core/capped/capped5.js b/jstests/core/capped/capped5.js index a276baf043d33..d2b704c6cb97b 100644 --- a/jstests/core/capped/capped5.js +++ b/jstests/core/capped/capped5.js @@ -6,9 +6,9 @@ * ] */ -tn = "capped5"; +let tn = "capped5"; -t = db[tn]; +let t = db[tn]; t.drop(); db.createCollection(tn, {capped: true, size: 1024 * 1024 * 1}); diff --git a/jstests/core/capped/capped_empty.js b/jstests/core/capped/capped_empty.js index e0515967ca84d..020be638c61b2 100644 --- a/jstests/core/capped/capped_empty.js +++ b/jstests/core/capped/capped_empty.js @@ -15,7 +15,7 @@ * ] */ -t = db.capped_empty; +let t = db.capped_empty; t.drop(); db.createCollection(t.getName(), {capped: true, size: 100}); diff --git a/jstests/core/capped/capped_resize.js b/jstests/core/capped/capped_resize.js index e1ebac19ac1c5..17125589817ce 100644 --- a/jstests/core/capped/capped_resize.js +++ b/jstests/core/capped/capped_resize.js @@ -10,9 +10,6 @@ * assumes_unsharded_collection, * ] */ -(function() { -load("jstests/libs/feature_flag_util.js"); - const testDB = db.getSiblingDB(jsTestName()); const cappedColl = testDB["capped_coll"]; @@ -106,12 +103,9 @@ let verifyLimitUpdate = function(updates) { // We modify the collection to have a size multiple of 256, then // we modify the collection to have a size non multiple of 256 and finally // we modify the collection to have a size multiple of 256 - // TODO SERVER-74653: Remove feature flag check. - if (FeatureFlagUtil.isPresentAndEnabled(testDB, "CappedCollectionsRelaxedSize")) { - verifyLimitUpdate({cappedSize: 25 * 1024}); - verifyLimitUpdate({cappedSize: 50 * 1023}); - verifyLimitUpdate({cappedSize: 50 * 1024}); - } + verifyLimitUpdate({cappedSize: 25 * 1024}); + verifyLimitUpdate({cappedSize: 50 * 1023}); + verifyLimitUpdate({cappedSize: 50 * 1024}); })(); (function updateMaxLimit() { @@ -169,5 +163,4 @@ let verifyLimitUpdate = function(updates) { stats = assert.commandWorked(cappedColl.stats()); assert.eq(stats.count, initialDocSize); assert.lte(stats.size, maxSize); -})(); -}()); +})(); \ No newline at end of file diff --git a/jstests/core/capped/cappeda.js b/jstests/core/capped/cappeda.js index 3ec0074eefe43..816f5be4e0d3a 100644 --- a/jstests/core/capped/cappeda.js +++ b/jstests/core/capped/cappeda.js @@ -6,13 +6,13 @@ * ] */ -t = db.scan_capped_id; +let t = db.scan_capped_id; t.drop(); -x = t.runCommand("create", {capped: true, size: 10000}); +let x = t.runCommand("create", {capped: true, size: 10000}); assert(x.ok); -for (i = 0; i < 100; i++) +for (let i = 0; i < 100; i++) t.insert({_id: i, x: 1}); function q() { diff --git a/jstests/core/clustered/clustered_collection_bounded_scan.js b/jstests/core/clustered/clustered_collection_bounded_scan.js index 8a6a0a1a9d595..90abe0ba8bf25 100644 --- a/jstests/core/clustered/clustered_collection_bounded_scan.js +++ b/jstests/core/clustered/clustered_collection_bounded_scan.js @@ -8,15 +8,13 @@ * assumes_unsharded_collection, * ] */ -(function() { -"use strict"; - load("jstests/libs/clustered_collections/clustered_collection_util.js"); -load("jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js"); +import { + testClusteredCollectionBoundedScan +} from "jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js"; const replicatedDB = db.getSiblingDB(jsTestName()); const collName = "coll"; const replicatedColl = replicatedDB[collName]; -testClusteredCollectionBoundedScan(replicatedColl, {_id: 1}); -})(); +testClusteredCollectionBoundedScan(replicatedColl, {_id: 1}); \ No newline at end of file diff --git a/jstests/core/clustered/clustered_collection_collation.js b/jstests/core/clustered/clustered_collection_collation.js index 92c032c3b80e0..f09c35c150e80 100644 --- a/jstests/core/clustered/clustered_collection_collation.js +++ b/jstests/core/clustered/clustered_collection_collation.js @@ -10,12 +10,12 @@ * ] */ -(function() { -"use strict"; - +import {getWinningPlan} from "jstests/libs/analyze_plan.js"; load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection. load("jstests/libs/clustered_collections/clustered_collection_util.js"); -load("jstests/libs/clustered_collections/clustered_collection_hint_common.js"); +import { + validateClusteredCollectionHint +} from "jstests/libs/clustered_collections/clustered_collection_hint_common.js"; const collatedName = 'clustered_collection_with_collation'; const collated = db[collatedName]; @@ -121,8 +121,9 @@ const verifyHasBoundsAndFindsN = function(coll, expected, predicate, queryCollat const res = queryCollation === undefined ? assert.commandWorked(coll.find(predicate).explain()) : assert.commandWorked(coll.find(predicate).collation(queryCollation).explain()); - const min = assert(res.queryPlanner.winningPlan.minRecord, "No min bound"); - const max = assert(res.queryPlanner.winningPlan.maxRecord, "No max bound"); + const queryPlan = getWinningPlan(res.queryPlanner); + const min = assert(queryPlan.minRecord, "No min bound"); + const max = assert(queryPlan.maxRecord, "No max bound"); assert.eq(min, max, "COLLSCAN bounds are not equal"); assert.eq(expected, coll.find(predicate).count(), "Didn't find the expected records"); }; @@ -131,8 +132,9 @@ const verifyNoBoundsAndFindsN = function(coll, expected, predicate, queryCollati const res = queryCollation === undefined ? assert.commandWorked(coll.find(predicate).explain()) : assert.commandWorked(coll.find(predicate).collation(queryCollation).explain()); - assert.eq(null, res.queryPlanner.winningPlan.minRecord, "There's a min bound"); - assert.eq(null, res.queryPlanner.winningPlan.maxRecord, "There's a max bound"); + const queryPlan = getWinningPlan(res.queryPlanner); + assert.eq(null, queryPlan.minRecord, "There's a min bound"); + assert.eq(null, queryPlan.maxRecord, "There's a max bound"); assert.eq(expected, coll.find(predicate).count(), "Didn't find the expected records"); }; @@ -140,8 +142,9 @@ const verifyNoTightBoundsAndFindsN = function(coll, expected, predicate, queryCo const res = queryCollation === undefined ? assert.commandWorked(coll.find(predicate).explain()) : assert.commandWorked(coll.find(predicate).collation(queryCollation).explain()); - const min = res.queryPlanner.winningPlan.minRecord; - const max = res.queryPlanner.winningPlan.maxRecord; + const queryPlan = getWinningPlan(res.queryPlanner); + const min = queryPlan.minRecord; + const max = queryPlan.maxRecord; assert.neq(null, min, "No min bound"); assert.neq(null, max, "No max bound"); assert(min !== max, "COLLSCAN bounds are equal"); @@ -330,4 +333,3 @@ validateClusteredCollectionHint(noncollated, { expectedWinningPlanStats: {stage: "CLUSTERED_IXSCAN", direction: "forward", minRecord: 5, maxRecord: 11} }); -})(); diff --git a/jstests/core/clustered/clustered_collection_hint.js b/jstests/core/clustered/clustered_collection_hint.js index ba7820b24635a..b1f24fc39f9c6 100644 --- a/jstests/core/clustered/clustered_collection_hint.js +++ b/jstests/core/clustered/clustered_collection_hint.js @@ -8,14 +8,13 @@ * requires_non_retryable_writes, * ] */ -(function() { -"use strict"; load("jstests/libs/clustered_collections/clustered_collection_util.js"); -load("jstests/libs/clustered_collections/clustered_collection_hint_common.js"); +import { + testClusteredCollectionHint +} from "jstests/libs/clustered_collections/clustered_collection_hint_common.js"; const replicatedDB = db.getSiblingDB(jsTestName()); const collName = "coll"; const replicatedColl = replicatedDB[collName]; testClusteredCollectionHint(replicatedColl, {_id: 1}, "_id_"); -})(); diff --git a/jstests/core/collation.js b/jstests/core/collation.js index df72d6bd6177a..5b9b1b9130b64 100644 --- a/jstests/core/collation.js +++ b/jstests/core/collation.js @@ -18,10 +18,14 @@ // ] // Integration tests for the collation feature. -(function() { -'use strict'; +import { + getPlanStage, + getWinningPlan, + isCollscan, + isIxscan, + planHasStage +} from "jstests/libs/analyze_plan.js"; -load("jstests/libs/analyze_plan.js"); load("jstests/libs/index_catalog_helpers.js"); // For isWiredTiger. load("jstests/concurrency/fsm_workload_helpers/server_types.js"); @@ -1860,5 +1864,4 @@ assert.throws(() => coll.find({}, {_id: 0}) res = testDb.runCommand({create: 'view', viewOn: 'coll'}); assert(res.ok == 1 || res.errmsg == ErrorCodes.NamespaceExists); res = testDb.runCommand({create: 'view', viewOn: 'coll', collation: {locale: 'en'}}); -assert.commandFailedWithCode(res, ErrorCodes.NamespaceExists); -})(); +assert.commandFailedWithCode(res, ErrorCodes.NamespaceExists); \ No newline at end of file diff --git a/jstests/core/columnstore/column_scan_skip_row_store_projection.js b/jstests/core/columnstore/column_scan_skip_row_store_projection.js index 8cfee05c075a7..eedb619e7d5f4 100644 --- a/jstests/core/columnstore/column_scan_skip_row_store_projection.js +++ b/jstests/core/columnstore/column_scan_skip_row_store_projection.js @@ -21,24 +21,22 @@ * not_allowed_with_security_token, * ] */ -(function() { -"use strict"; - load('jstests/aggregation/extras/utils.js'); // For assertArrayEq. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -// For areAllCollectionsClustered. -load("jstests/libs/clustered_collections/clustered_collection_util.js"); -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +load( + "jstests/libs/clustered_collections/clustered_collection_util.js"); // For + // areAllCollectionsClustered. +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const columnstoreEnabled = checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"], true /* checkAllNodes */); if (!columnstoreEnabled) { jsTestLog("Skipping columnstore index test since the feature flag is not enabled."); - return; + quit(); } if (!setUpServerForColumnStoreIndexTest(db)) { - return; + quit(); } const indexedColl = db.column_scan_skip_row_store_projection_indexed; @@ -254,4 +252,3 @@ function runAllAggregations() { setupCollections(); runAllAggregations(); -}()); diff --git a/jstests/core/columnstore/column_store_index_compression.js b/jstests/core/columnstore/column_store_index_compression.js index f59a78d5696c7..a022797ae1260 100644 --- a/jstests/core/columnstore/column_store_index_compression.js +++ b/jstests/core/columnstore/column_store_index_compression.js @@ -17,24 +17,21 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; load("jstests/libs/discover_topology.js"); // For findNonConfigNodes load("jstests/libs/fixture_helpers.js"); // For isMongos load("jstests/libs/index_catalog_helpers.js"); // For IndexCatalogHelpers -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const columnstoreEnabled = checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"], true /* checkAllNodes */); if (!columnstoreEnabled) { jsTestLog("Skipping columnstore index test since the feature flag is not enabled."); - return; + quit(); } if (!setUpServerForColumnStoreIndexTest(db)) { - return; + quit(); } const coll = db.column_store_index_compression; @@ -178,4 +175,3 @@ for (let {node, indexDetails} of reader.statsForEachMongod(coll, zstdIndex)) { "zstd", {node, indexDetails}); } -}()); diff --git a/jstests/core/columnstore/column_store_projection.js b/jstests/core/columnstore/column_store_projection.js index 65a3f78388ffd..48dbe03da264d 100644 --- a/jstests/core/columnstore/column_store_projection.js +++ b/jstests/core/columnstore/column_store_projection.js @@ -15,16 +15,13 @@ * not_allowed_with_security_token, * ] */ -(function() { -"use strict"; - load("jstests/libs/fail_point_util.js"); -load("jstests/libs/analyze_plan.js"); // For "planHasStage." +import {planHasStage} from "jstests/libs/analyze_plan.js"; load("jstests/aggregation/extras/utils.js"); // For "resultsEq." -load("jstests/libs/columnstore_util.js"); // For "setUpServerForColumnStoreIndexTest." +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; if (!setUpServerForColumnStoreIndexTest(db)) { - return; + quit(); } // @@ -209,5 +206,4 @@ runTestWithDocsAndIndexes("sibling_paths_5", {keys: {"a.$**": "columnstore"}}); // Note that this test does not drop any of its test collections or indexes, so that they will be -// available to follow-on index validation tests. -})(); +// available to follow-on index validation tests. \ No newline at end of file diff --git a/jstests/core/columnstore/columnstore_eligibility.js b/jstests/core/columnstore/columnstore_eligibility.js index 0ae6e9b0e01a1..02679fa844383 100644 --- a/jstests/core/columnstore/columnstore_eligibility.js +++ b/jstests/core/columnstore/columnstore_eligibility.js @@ -17,15 +17,12 @@ * requires_fcv_70, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. -load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos. +import {aggPlanHasStage, planHasStage} from "jstests/libs/analyze_plan.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers.isMongos. if (!setUpServerForColumnStoreIndexTest(db)) { - return; + quit(); } const coll = db.columnstore_eligibility; @@ -254,4 +251,3 @@ assert.commandFailedWithCode(db.runCommand({ hint: {"a.$**": "columnstore"} }), 6714002); -}()); diff --git a/jstests/core/columnstore/columnstore_index.js b/jstests/core/columnstore/columnstore_index.js index 895f93ae93f93..e261b974b96bc 100644 --- a/jstests/core/columnstore/columnstore_index.js +++ b/jstests/core/columnstore/columnstore_index.js @@ -21,16 +21,14 @@ * tenant_migration_incompatible, * does_not_support_stepdowns, * not_allowed_with_security_token, + * uses_full_validation, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {planHasStage} from "jstests/libs/analyze_plan.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; if (!setUpServerForColumnStoreIndexTest(db)) { - return; + quit(); } const coll = db.columnstore_index; @@ -127,4 +125,3 @@ assert( {v: 2, key: {"$**": "columnstore"}, name: "$**_columnstore", columnstoreProjection: {x: 1}}, listIndexesResult), listIndexesResult); -}()); diff --git a/jstests/core/columnstore/columnstore_index_correctness.js b/jstests/core/columnstore/columnstore_index_correctness.js index f6fc1621fb3af..b3362fb324fd5 100644 --- a/jstests/core/columnstore/columnstore_index_correctness.js +++ b/jstests/core/columnstore/columnstore_index_correctness.js @@ -12,18 +12,16 @@ * tenant_migration_incompatible, * does_not_support_stepdowns, * not_allowed_with_security_token, + * uses_full_validation, * ] */ -(function() { -"use strict"; - load("jstests/libs/fail_point_util.js"); -load("jstests/libs/analyze_plan.js"); // For "planHasStage." +import {getPlanStages, aggPlanHasStage, planHasStage} from "jstests/libs/analyze_plan.js"; load("jstests/aggregation/extras/utils.js"); // For "resultsEq." -load("jstests/libs/columnstore_util.js"); // For "setUpServerForColumnStoreIndexTest." +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; if (!setUpServerForColumnStoreIndexTest(db)) { - return; + quit(); } const coll = db.columnstore_index_correctness; @@ -180,102 +178,102 @@ const coll = db.columnstore_index_correctness; // Multiple tests in this file use the same dataset. Intentionally not using _id as the unique // identifier, to avoid getting IDHACK plans when we query by it. const docs = [ - {num: 0}, - {num: 1, a: null}, - {num: 2, a: "scalar"}, - {num: 3, a: {}}, - {num: 4, a: {x: 1, b: "scalar"}}, - {num: 5, a: {b: {}}}, - {num: 6, a: {x: 1, b: {}}}, - {num: 7, a: {x: 1, b: {x: 1}}}, - {num: 8, a: {b: {c: "scalar"}}}, - {num: 9, a: {b: {c: null}}}, - {num: 10, a: {b: {c: [[1, 2], [{}], 2]}}}, - {num: 11, a: {x: 1, b: {x: 1, c: ["scalar"]}}}, - {num: 12, a: {x: 1, b: {c: {x: 1}}}}, - {num: 13, a: {b: []}}, - {num: 14, a: {b: [null]}}, - {num: 15, a: {b: ["scalar"]}}, - {num: 16, a: {b: [[]]}}, - {num: 17, a: {b: [1, {}, 2]}}, - {num: 18, a: {b: [[1, 2], [{}], 2]}}, - {num: 19, a: {x: 1, b: [[1, 2], [{}], 2]}}, - {num: 20, a: {b: [{c: "scalar"}]}}, - {num: 21, a: {b: [{c: "scalar"}, {c: "scalar2"}]}}, - {num: 22, a: {b: [{c: [[1, 2], [{}], 2]}]}}, - {num: 23, a: {b: [1, {c: "scalar"}, 2]}}, - {num: 24, a: {b: [1, {c: [[1, 2], [{}], 2]}, 2]}}, - {num: 25, a: {x: 1, b: [1, {c: [[1, 2], [{}], 2]}, 2]}}, - {num: 26, a: {b: [[1, 2], [{c: "scalar"}], 2]}}, - {num: 27, a: {b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}}, - {num: 28, a: {x: 1, b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}}, - {num: 29, a: []}, - {num: 30, a: [null]}, - {num: 31, a: ["scalar"]}, - {num: 32, a: [[]]}, - {num: 33, a: [{}]}, - {num: 34, a: [1, {}, 2]}, - {num: 35, a: [[1, 2], [{}], 2]}, - {num: 36, a: [{b: "scalar"}]}, - {num: 37, a: [{b: null}]}, - {num: 38, a: [1, {b: "scalar"}, 2]}, - {num: 39, a: [1, {b: []}, 2]}, - {num: 40, a: [1, {b: [null]}, 2]}, - {num: 41, a: [1, {b: ["scalar"]}, 2]}, - {num: 42, a: [1, {b: [[]]}, 2]}, - {num: 43, a: [{b: []}]}, - {num: 44, a: [{b: ["scalar"]}]}, - {num: 45, a: [{b: [[]]}]}, - {num: 46, a: [{b: {}}]}, - {num: 47, a: [{b: {c: "scalar"}}]}, - {num: 48, a: [{b: {c: [[1, 2], [{}], 2]}}]}, - {num: 49, a: [{b: {x: 1}}]}, - {num: 50, a: [{b: {x: 1, c: "scalar"}}]}, - {num: 51, a: [{b: [{c: "scalar"}]}]}, - {num: 52, a: [{b: [{c: ["scalar"]}]}]}, - {num: 53, a: [{b: [1, {c: ["scalar"]}, 2]}]}, - {num: 54, a: [{b: [{}]}]}, - {num: 55, a: [{b: [[1, 2], [{}], 2]}]}, - {num: 56, a: [{b: [[1, 2], [{c: "scalar"}], 2]}]}, - {num: 57, a: [{b: [[1, 2], [{c: ["scalar"]}], 2]}]}, - {num: 58, a: [1, {b: {}}, 2]}, - {num: 59, a: [1, {b: {c: "scalar"}}, 2]}, - {num: 60, a: [1, {b: {c: {x: 1}}}, 2]}, - {num: 61, a: [1, {b: {c: [1, {}, 2]}}, 2]}, - {num: 62, a: [1, {b: {x: 1}}, 2]}, - {num: 63, a: [1, {b: {x: 1, c: "scalar"}}, 2]}, - {num: 64, a: [1, {b: {x: 1, c: [[]]}}, 2]}, - {num: 65, a: [1, {b: {x: 1, c: [1, {}, 2]}}, 2]}, - {num: 66, a: [1, {b: [{}]}, 2]}, - {num: 67, a: [1, {b: [{c: "scalar"}]}, 2]}, - {num: 68, a: [1, {b: [{c: {x: 1}}]}, 2]}, - {num: 69, a: [1, {b: [{c: [1, {}, 2]}]}, 2]}, - {num: 70, a: [1, {b: [1, {}, 2]}, 2]}, - {num: 71, a: [1, {b: [1, {c: null}, 2]}, 2]}, - {num: 72, a: [1, {b: [1, {c: "scalar"}, 2]}, 2]}, - {num: 73, a: [1, {b: [1, {c: [1, {}, 2]}, 2]}, 2]}, - {num: 74, a: [1, {b: [[1, 2], [{}], 2]}, 2]}, - {num: 75, a: [1, {b: [[1, 2], [{c: "scalar"}], 2]}, 2]}, - {num: 76, a: [1, {b: [[1, 2], [{c: [1, {}, 2]}], 2]}, 2]}, - {num: 77, a: [[1, 2], [{b: "scalar"}], 2]}, - {num: 78, a: [[1, 2], [{b: {x: 1, c: "scalar"}}], 2]}, - {num: 79, a: [[1, 2], [{b: {x: 1, c: [1, {}, 2]}}], 2]}, - {num: 80, a: [[1, 2], [{b: []}], 2]}, - {num: 81, a: [[1, 2], [{b: [1, {c: "scalar"}, 2]}], 2]}, - {num: 82, a: [[1, 2], [{b: [[1, 2], [{c: "scalar"}], 2]}], 2]}, - {num: 83, a: [[1, 2], [{b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}], 2]}, - {num: 84, a: [{b: [{c: 1}, {}]}]}, - {num: 85, a: [{b: [{c: 1}, {d: 1}]}]}, - {num: 86, a: [{b: {c: 1}}, {b: {}}]}, - {num: 87, a: [{b: {c: 1}}, {b: {d: 1}}]}, - {num: 88, a: [{b: {c: 1}}, {}]}, - {num: 89, a: [{b: {c: 1}}, {b: null}]}, - {num: 90, a: [{b: {c: 1}}, {b: []}]}, - {num: 91, a: [{b: []}, {b: []}]}, - {num: 92, a: {b: [{c: [1, 2]}]}}, - {num: 93, a: {b: {c: [1, 2]}}}, - {num: 94, a: [[1, 2], [{b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}], 2]}, - {num: 95, a: [{m: 1, n: 2}, {m: 2, o: 1}]}, + {_num: 0}, + {_num: 1, a: null}, + {_num: 2, a: "scalar"}, + {_num: 3, a: {}}, + {_num: 4, a: {x: 1, b: "scalar"}}, + {_num: 5, a: {b: {}}}, + {_num: 6, a: {x: 1, b: {}}}, + {_num: 7, a: {x: 1, b: {x: 1}}}, + {_num: 8, a: {b: {c: "scalar"}}}, + {_num: 9, a: {b: {c: null}}}, + {_num: 10, a: {b: {c: [[1, 2], [{}], 2]}}}, + {_num: 11, a: {x: 1, b: {x: 1, c: ["scalar"]}}}, + {_num: 12, a: {x: 1, b: {c: {x: 1}}}}, + {_num: 13, a: {b: []}}, + {_num: 14, a: {b: [null]}}, + {_num: 15, a: {b: ["scalar"]}}, + {_num: 16, a: {b: [[]]}}, + {_num: 17, a: {b: [1, {}, 2]}}, + {_num: 18, a: {b: [[1, 2], [{}], 2]}}, + {_num: 19, a: {x: 1, b: [[1, 2], [{}], 2]}}, + {_num: 20, a: {b: [{c: "scalar"}]}}, + {_num: 21, a: {b: [{c: "scalar"}, {c: "scalar2"}]}}, + {_num: 22, a: {b: [{c: [[1, 2], [{}], 2]}]}}, + {_num: 23, a: {b: [1, {c: "scalar"}, 2]}}, + {_num: 24, a: {b: [1, {c: [[1, 2], [{}], 2]}, 2]}}, + {_num: 25, a: {x: 1, b: [1, {c: [[1, 2], [{}], 2]}, 2]}}, + {_num: 26, a: {b: [[1, 2], [{c: "scalar"}], 2]}}, + {_num: 27, a: {b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}}, + {_num: 28, a: {x: 1, b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}}, + {_num: 29, a: []}, + {_num: 30, a: [null]}, + {_num: 31, a: ["scalar"]}, + {_num: 32, a: [[]]}, + {_num: 33, a: [{}]}, + {_num: 34, a: [1, {}, 2]}, + {_num: 35, a: [[1, 2], [{}], 2]}, + {_num: 36, a: [{b: "scalar"}]}, + {_num: 37, a: [{b: null}]}, + {_num: 38, a: [1, {b: "scalar"}, 2]}, + {_num: 39, a: [1, {b: []}, 2]}, + {_num: 40, a: [1, {b: [null]}, 2]}, + {_num: 41, a: [1, {b: ["scalar"]}, 2]}, + {_num: 42, a: [1, {b: [[]]}, 2]}, + {_num: 43, a: [{b: []}]}, + {_num: 44, a: [{b: ["scalar"]}]}, + {_num: 45, a: [{b: [[]]}]}, + {_num: 46, a: [{b: {}}]}, + {_num: 47, a: [{b: {c: "scalar"}}]}, + {_num: 48, a: [{b: {c: [[1, 2], [{}], 2]}}]}, + {_num: 49, a: [{b: {x: 1}}]}, + {_num: 50, a: [{b: {x: 1, c: "scalar"}}]}, + {_num: 51, a: [{b: [{c: "scalar"}]}]}, + {_num: 52, a: [{b: [{c: ["scalar"]}]}]}, + {_num: 53, a: [{b: [1, {c: ["scalar"]}, 2]}]}, + {_num: 54, a: [{b: [{}]}]}, + {_num: 55, a: [{b: [[1, 2], [{}], 2]}]}, + {_num: 56, a: [{b: [[1, 2], [{c: "scalar"}], 2]}]}, + {_num: 57, a: [{b: [[1, 2], [{c: ["scalar"]}], 2]}]}, + {_num: 58, a: [1, {b: {}}, 2]}, + {_num: 59, a: [1, {b: {c: "scalar"}}, 2]}, + {_num: 60, a: [1, {b: {c: {x: 1}}}, 2]}, + {_num: 61, a: [1, {b: {c: [1, {}, 2]}}, 2]}, + {_num: 62, a: [1, {b: {x: 1}}, 2]}, + {_num: 63, a: [1, {b: {x: 1, c: "scalar"}}, 2]}, + {_num: 64, a: [1, {b: {x: 1, c: [[]]}}, 2]}, + {_num: 65, a: [1, {b: {x: 1, c: [1, {}, 2]}}, 2]}, + {_num: 66, a: [1, {b: [{}]}, 2]}, + {_num: 67, a: [1, {b: [{c: "scalar"}]}, 2]}, + {_num: 68, a: [1, {b: [{c: {x: 1}}]}, 2]}, + {_num: 69, a: [1, {b: [{c: [1, {}, 2]}]}, 2]}, + {_num: 70, a: [1, {b: [1, {}, 2]}, 2]}, + {_num: 71, a: [1, {b: [1, {c: null}, 2]}, 2]}, + {_num: 72, a: [1, {b: [1, {c: "scalar"}, 2]}, 2]}, + {_num: 73, a: [1, {b: [1, {c: [1, {}, 2]}, 2]}, 2]}, + {_num: 74, a: [1, {b: [[1, 2], [{}], 2]}, 2]}, + {_num: 75, a: [1, {b: [[1, 2], [{c: "scalar"}], 2]}, 2]}, + {_num: 76, a: [1, {b: [[1, 2], [{c: [1, {}, 2]}], 2]}, 2]}, + {_num: 77, a: [[1, 2], [{b: "scalar"}], 2]}, + {_num: 78, a: [[1, 2], [{b: {x: 1, c: "scalar"}}], 2]}, + {_num: 79, a: [[1, 2], [{b: {x: 1, c: [1, {}, 2]}}], 2]}, + {_num: 80, a: [[1, 2], [{b: []}], 2]}, + {_num: 81, a: [[1, 2], [{b: [1, {c: "scalar"}, 2]}], 2]}, + {_num: 82, a: [[1, 2], [{b: [[1, 2], [{c: "scalar"}], 2]}], 2]}, + {_num: 83, a: [[1, 2], [{b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}], 2]}, + {_num: 84, a: [{b: [{c: 1}, {}]}]}, + {_num: 85, a: [{b: [{c: 1}, {d: 1}]}]}, + {_num: 86, a: [{b: {c: 1}}, {b: {}}]}, + {_num: 87, a: [{b: {c: 1}}, {b: {d: 1}}]}, + {_num: 88, a: [{b: {c: 1}}, {}]}, + {_num: 89, a: [{b: {c: 1}}, {b: null}]}, + {_num: 90, a: [{b: {c: 1}}, {b: []}]}, + {_num: 91, a: [{b: []}, {b: []}]}, + {_num: 92, a: {b: [{c: [1, 2]}]}}, + {_num: 93, a: {b: {c: [1, 2]}}}, + {_num: 94, a: [[1, 2], [{b: [[1, 2], [{c: [[1, 2], [{}], 2]}], 2]}], 2]}, + {_num: 95, a: [{m: 1, n: 2}, {m: 2, o: 1}]}, ]; coll.drop(); @@ -283,7 +281,7 @@ let bulk = coll.initializeUnorderedBulkOp(); for (let doc of docs) { let insertObj = {}; Object.assign(insertObj, doc); - if (doc.num % 2 == 0) { + if (doc._num % 2 == 0) { insertObj.optionalField = "foo"; } bulk.insert(insertObj); @@ -293,7 +291,7 @@ bulk.execute(); assert.commandWorked(coll.createIndex({"$**": "columnstore"})); (function testProjectionOfIndependentPaths() { - const kProjection = {_id: 0, "a.b.c": 1, num: 1, optionalField: 1}; + const kProjection = {_id: 0, _num: 1, "a.b.c": 1, optionalField: 1}; let explain = coll.find({}, kProjection).explain(); assert(planHasStage(db, explain, "COLUMN_SCAN"), @@ -303,15 +301,16 @@ assert.commandWorked(coll.createIndex({"$**": "columnstore"})); assert.eq(results.length, docs.length, "With no filter should have returned all docs"); for (let res of results) { - const trueResult = coll.find({num: res.num}, kProjection).hint({$natural: 1}).toArray()[0]; - const originalDoc = coll.findOne({num: res.num}); + const trueResult = + coll.find({_num: res._num}, kProjection).hint({$natural: 1}).toArray()[0]; + const originalDoc = coll.findOne({_num: res._num}); assert.docEq(res, trueResult, "Mismatched projection of " + tojson(originalDoc)); } })(); // Run a similar query that projects multiple fields with a shared parent object. (function testProjectionOfSiblingPaths() { - const kSiblingProjection = {_id: 0, "a.m": 1, "a.n": 1, num: 1}; + const kSiblingProjection = {_id: 0, _num: 1, "a.m": 1, "a.n": 1}; let explain = coll.find({}, kSiblingProjection).explain(); assert(planHasStage(db, explain, "COLUMN_SCAN"), @@ -322,15 +321,15 @@ assert.commandWorked(coll.createIndex({"$**": "columnstore"})); for (let res of results) { const trueResult = - coll.find({num: res.num}, kSiblingProjection).hint({$natural: 1}).toArray()[0]; - const originalDoc = coll.findOne({num: res.num}); + coll.find({_num: res._num}, kSiblingProjection).hint({$natural: 1}).toArray()[0]; + const originalDoc = coll.findOne({_num: res._num}); assert.eq(res, trueResult, "Mismatched projection of " + tojson(originalDoc)); } })(); // Run a query that tests the SERVER-67742 fix. (function testPrefixPath() { - const kPrefixProjection = {_id: 0, "a": 1, num: 1}; + const kPrefixProjection = {_id: 0, _num: 1, "a": 1}; // Have to use the index hint because SERVER-67264 blocks selection of CSI. let explain = coll.find({"a.m": 1}, kPrefixProjection).hint({"$**": "columnstore"}).explain(); @@ -346,8 +345,8 @@ assert.commandWorked(coll.createIndex({"$**": "columnstore"})); for (let res of results) { const trueResult = - coll.find({num: res.num}, kPrefixProjection).hint({$natural: 1}).toArray()[0]; - const originalDoc = coll.findOne({num: res.num}); + coll.find({_num: res._num}, kPrefixProjection).hint({$natural: 1}).toArray()[0]; + const originalDoc = coll.findOne({_num: res._num}); assert.eq(res, trueResult, "Mismatched projection of " + tojson(originalDoc)); } })(); @@ -357,7 +356,7 @@ assert.commandWorked(coll.createIndex({"$**": "columnstore"})); (function testGroup() { // Sanity check that we are comparing the plans we expect to be. let pipeline = [ - {$group: {_id: "$a.b.c", docs: {$push: "$num"}}}, + {$group: {_id: "$a.b.c", docs: {$push: "$_num"}}}, {$set: {docs: {$sortArray: {input: "$docs", sortBy: 1}}}} ]; let naturalExplain = coll.explain().aggregate(pipeline, {hint: {$natural: 1}}); @@ -485,4 +484,3 @@ assert.commandWorked(coll.createIndex({"$**": "columnstore"})); runTest({locale: "en", strength: 3}, 1); // case sensitive runTest({locale: "en", strength: 2}, 3); // case insensitive })(); -})(); diff --git a/jstests/core/columnstore/columnstore_index_per_path_filters.js b/jstests/core/columnstore/columnstore_index_per_path_filters.js index 32b457c6b6a61..c96fff5aa699c 100644 --- a/jstests/core/columnstore/columnstore_index_per_path_filters.js +++ b/jstests/core/columnstore/columnstore_index_per_path_filters.js @@ -16,16 +16,12 @@ * not_allowed_with_security_token, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For "resultsEq." -load("jstests/libs/analyze_plan.js"); // For "planHasStage." -load("jstests/libs/sbe_explain_helpers.js"); // For getSbePlanStages. -load("jstests/libs/columnstore_util.js"); // For "setUpServerForColumnStoreIndexTest." +import {getSbePlanStages} from "jstests/libs/sbe_explain_helpers.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; if (!setUpServerForColumnStoreIndexTest(db)) { - return; + quit(); } const coll_filters = db.columnstore_index_per_path_filters; @@ -1060,5 +1056,4 @@ function testInExpr(test) { assert.lt(x.numSeeks + y.numSeeks, 2 * expectedToMatchCount, "Number of seeks in filtered columns should be small"); -})(); -})(); +})(); \ No newline at end of file diff --git a/jstests/core/columnstore/columnstore_large_array_index_correctness.js b/jstests/core/columnstore/columnstore_large_array_index_correctness.js index 3c15b62e24712..8f01cdcb4065d 100644 --- a/jstests/core/columnstore/columnstore_large_array_index_correctness.js +++ b/jstests/core/columnstore/columnstore_large_array_index_correctness.js @@ -12,30 +12,27 @@ * not_allowed_with_security_token, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For "planHasStage." -load("jstests/libs/columnstore_util.js"); // For "setUpServerForColumnStoreIndexTest." +import {planHasStage} from "jstests/libs/analyze_plan.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; if (!setUpServerForColumnStoreIndexTest(db)) { - return; + quit(); } const coll = db.columnstore_large_array_index_correctness; coll.drop(); const uint8 = { - num: 0, - a: Array.from({length: 50}, (_, i) => ({b: [2 * i, 2 * i + 1]})) + _num: 0, + o: Array.from({length: 50}, (_, i) => ({b: [2 * i, 2 * i + 1]})), }; const uint16 = { - num: 1, - a: Array.from({length: 150}, (_, i) => ({b: [2 * i, 2 * i + 1]})) + _num: 1, + o: Array.from({length: 150}, (_, i) => ({b: [2 * i, 2 * i + 1]})), }; const uint32 = { - num: 2, - a: Array.from({length: 15000}, (_, i) => ({b: [2 * i, 2 * i + 1]})) + _num: 2, + o: Array.from({length: 15000}, (_, i) => ({b: [2 * i, 2 * i + 1]})), }; const docs = [uint8, uint16, uint32]; @@ -47,27 +44,26 @@ for (let doc of docs) { assert.commandWorked(coll.createIndex({"$**": "columnstore"})); const kProjection = { _id: 0, - "a.b": 1, - num: 1, + _num: 1, + "o.b": 1, }; // Ensure this test is exercising the column scan. -let explain = coll.find({}, kProjection).sort({num: 1}).explain(); +let explain = coll.find({}, kProjection).sort({_num: 1}).explain(); assert(planHasStage(db, explain, "COLUMN_SCAN"), explain); // Run a query getting all of the results using the column index. -let results = coll.find({}, kProjection).sort({num: 1}).toArray(); +let results = coll.find({}, kProjection).sort({_num: 1}).toArray(); assert.gt(results.length, 0); // Run a query getting all results without column index -let trueResults = coll.find({}, kProjection).hint({$natural: 1}).sort({num: 1}).toArray(); +let trueResults = coll.find({}, kProjection).hint({$natural: 1}).sort({_num: 1}).toArray(); assert.eq(results.length, trueResults.length); for (let i = 0; i < results.length; i++) { - const originalDoc = coll.findOne({num: results[i].num}); + const originalDoc = coll.findOne({_num: results[i]._num}); assert.eq(results[i], trueResults[i], () => - `column store index output number: ${results[i].num}, collection scan output number: ${trueResults[i].num}, - original document number was: ${originalDoc.num}`); + `column store index output number: ${results[i]._num}, collection scan output number: ${trueResults[i]._num}, + original document number was: ${originalDoc._num}`); } -})(); diff --git a/jstests/core/columnstore/columnstore_validindex.js b/jstests/core/columnstore/columnstore_validindex.js index 421ea4a6edc06..8015a891d342e 100644 --- a/jstests/core/columnstore/columnstore_validindex.js +++ b/jstests/core/columnstore/columnstore_validindex.js @@ -13,15 +13,12 @@ * not_allowed_with_security_token, * ] */ -(function() { -"use strict"; - load("jstests/libs/index_catalog_helpers.js"); // For "IndexCatalogHelpers." load("jstests/libs/collection_drop_recreate.js"); // For "assertDropCollection." -load("jstests/libs/columnstore_util.js"); // For "setUpServerForColumnStoreIndexTest." +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; if (!setUpServerForColumnStoreIndexTest(db)) { - return; + quit(); } const kCollectionName = "columnstore_validindex"; @@ -35,12 +32,6 @@ const kKeyPattern = { // Can create a valid columnstore index. IndexCatalogHelpers.createIndexAndVerifyWithDrop(coll, kKeyPattern, {name: kIndexName}); -// Can create a columnstore index with foreground & background construction. -IndexCatalogHelpers.createIndexAndVerifyWithDrop( - coll, kKeyPattern, {background: false, name: kIndexName}); -IndexCatalogHelpers.createIndexAndVerifyWithDrop( - coll, kKeyPattern, {background: true, name: kIndexName}); - // Test that you cannot create a columnstore index with a collation - either with the argument or // because the collection has a default collation specified. assert.commandFailedWithCode( @@ -197,5 +188,4 @@ assert.commandFailedWithCode( ErrorCodes.InvalidIndexSpecificationOption); assert.commandFailedWithCode( db.runCommand({create: clusteredCollName, clusteredIndex: {key: kKeyPattern, unique: false}}), - 5979700); -})(); + 5979700); \ No newline at end of file diff --git a/jstests/core/command_let_variables.js b/jstests/core/command_let_variables.js index 1e4286dbc1908..a408b3a247c52 100644 --- a/jstests/core/command_let_variables.js +++ b/jstests/core/command_let_variables.js @@ -3,16 +3,16 @@ // @tags: [ // ] // -(function() { -"use strict"; +import {getPlanStage} from "jstests/libs/analyze_plan.js"; load("jstests/libs/fixture_helpers.js"); // For 'isMongos' and 'isSharded'. const testDB = db.getSiblingDB("command_let_variables"); const coll = testDB.command_let_variables; -const targetColl = testDB.command_let_variables_target; +coll.drop(); -assert.commandWorked(testDB.dropDatabase()); +const isMongos = FixtureHelpers.isMongos(testDB); +const isCollSharded = FixtureHelpers.isSharded(coll); const testDocs = [ { @@ -82,7 +82,20 @@ expectedResults = [ assert.eq(coll.aggregate(pipeline, {let : {target_trend: "weak decline"}}).toArray(), expectedResults); -if (!FixtureHelpers.isMongos(testDB)) { +// Test that running explain on the agg command works as expected. +let explain = assert.commandWorked(testDB.runCommand({ + explain: + {aggregate: coll.getName(), pipeline, let : {target_trend: "weak decline"}, cursor: {}}, + verbosity: "executionStats" +})); +if (!isMongos) { + assert(explain.hasOwnProperty("stages"), explain); + assert.neq(explain.stages.length, 0, explain); + let lastStage = explain.stages[explain.stages.length - 1]; + assert.eq(lastStage.nReturned, 2, explain); +} + +if (!isMongos) { // Test that if runtimeConstants and let are both specified, both will coexist. // Runtime constants are not allowed on mongos passthroughs. let constants = { @@ -262,9 +275,23 @@ expectedResults = { assert.eq(result.length, 1); assert.eq(expectedResults, result[0]); -// Delete tests with let params will delete a record, assert that a point-wise find yields an empty -// result, and then restore the collection state for further tests down the line. We can't exercise -// a multi-delete here (limit: 0) because of failures in sharded txn passthrough tests. +// Test that let parameters work as expected when the find is run as an explain. +explain = assert.commandWorked(testDB.runCommand({ + explain: { + find: coll.getName(), + let : {target_species: "Song Thrush (Turdus philomelos)"}, + filter: {$expr: {$eq: ["$Species", "$$target_species"]}}, + projection: {_id: 0} + }, + verbosity: "executionStats" +})); +if (!isMongos) { + assert.eq(explain.executionStats.nReturned, 1, explain); +} + +// Delete tests with let params will delete a record, assert that a point-wise find yields an +// empty result, and then restore the collection state for further tests down the line. We can't +// exercise a multi-delete here (limit: 0) because of failures in sharded txn passthrough tests. assert.commandWorked(testDB.runCommand({ delete: coll.getName(), let : {target_species: "Song Thrush (Turdus philomelos)"}, @@ -277,8 +304,24 @@ result = assert .cursor.firstBatch; assert.eq(result.length, 0); -// Test that the .remove() shell helper supports let parameters. assert.commandWorked(coll.insert({_id: 4, Species: "bird_to_remove"})); + +// Test that explain of a delete command works as expected with 'let' parameters. +explain = assert.commandWorked(testDB.runCommand({ + explain: { + delete: coll.getName(), + let : {target_species: "bird_to_remove"}, + deletes: + [{q: {$and: [{_id: 4}, {$expr: {$eq: ["$Species", "$$target_species"]}}]}, limit: 1}] + }, + verbosity: "executionStats" +})); +if (!isMongos) { + let deleteStage = getPlanStage(explain.executionStats.executionStages, "DELETE"); + assert.eq(deleteStage.nWouldDelete, 1, explain); +} + +// Test that the .remove() shell helper supports let parameters. result = assert.commandWorked( coll.remove({$and: [{_id: 4}, {$expr: {$eq: ["$Species", "$$target_species"]}}]}, {justOne: true, let : {target_species: "bird_to_remove"}})); @@ -333,30 +376,49 @@ assert.commandWorked(testDB.runCommand({ cursor: {} })); -// Test that findAndModify works correctly with let parameter arguments. assert.commandWorked(coll.insert({_id: 5, Species: "spy_bird"})); -result = testDB.runCommand({ + +// Test that explain of findAndModify works correctly with let parameters. +explain = assert.commandWorked(testDB.runCommand({ + explain: { + findAndModify: coll.getName(), + let : {target_species: "spy_bird"}, + // Querying on _id field for sharded collection passthroughs. + query: {$and: [{_id: 5}, {$expr: {$eq: ["$Species", "$$target_species"]}}]}, + update: {Species: "questionable_bird"}, + new: true + }, + verbosity: "executionStats" +})); +if (!isMongos) { + let updateStage = getPlanStage(explain.executionStats.executionStages, "UPDATE"); + assert.eq(updateStage.nMatched, 1, explain); + assert.eq(updateStage.nWouldModify, 1, explain); +} + +// Test that findAndModify works correctly with let parameter arguments. +result = assert.commandWorked(testDB.runCommand({ findAndModify: coll.getName(), let : {target_species: "spy_bird"}, // Querying on _id field for sharded collection passthroughs. query: {$and: [{_id: 5}, {$expr: {$eq: ["$Species", "$$target_species"]}}]}, update: {Species: "questionable_bird"}, new: true -}); +})); expectedResults = { _id: 5, Species: "questionable_bird" }; assert.eq(expectedResults, result.value, result); -result = testDB.runCommand({ +result = assert.commandWorked(testDB.runCommand({ findAndModify: coll.getName(), let : {species_name: "not_a_bird", realSpecies: "dino"}, // Querying on _id field for sharded collection passthroughs. query: {$and: [{_id: 5}, {$expr: {$eq: ["$Species", "questionable_bird"]}}]}, update: [{$project: {Species: "$$species_name"}}, {$addFields: {suspect: "$$realSpecies"}}], new: true -}); +})); expectedResults = { _id: 5, Species: "not_a_bird", @@ -364,12 +426,31 @@ expectedResults = { }; assert.eq(expectedResults, result.value, result); +// Test that explain of update works correctly with let parameters. +explain = assert.commandWorked(testDB.runCommand({ + explain: { + update: coll.getName(), + updates: [{ + q: {_id: 3, $expr: {$eq: ["$Species", "$$target_species"]}}, + u: [{$set: {Species: "$$new_name"}}], + }], + let : {target_species: "Chaffinch (Fringilla coelebs)", new_name: "Chaffinch"} + }, + verbosity: "executionStats" +})); +if (!isMongos) { + let updateStage = getPlanStage(explain.executionStats.executionStages, "UPDATE"); + assert.eq(updateStage.nMatched, 1, explain); + assert.eq(updateStage.nWouldModify, 1, explain); +} + // Test that update respects different parameters in both the query and update part. result = assert.commandWorked(testDB.runCommand({ update: coll.getName(), - updates: [ - {q: {$expr: {$eq: ["$Species", "$$target_species"]}}, u: [{$set: {Species: "$$new_name"}}]} - ], + updates: [{ + q: {_id: 3, $expr: {$eq: ["$Species", "$$target_species"]}}, + u: [{$set: {Species: "$$new_name"}}], + }], let : {target_species: "Chaffinch (Fringilla coelebs)", new_name: "Chaffinch"} })); assert.eq(result.n, 1); @@ -387,8 +468,8 @@ assert.eq(result.cursor.firstBatch.length, 1); result = assert.commandWorked(testDB.runCommand({ update: coll.getName(), updates: [{ - q: {$expr: {$eq: ["$Species", "$$target_species"]}}, - u: [{$set: {Timestamp: "$$NOW"}}, {$set: {Species: "$$new_name"}}] + q: {_id: 3, $expr: {$eq: ["$Species", "$$target_species"]}}, + u: [{$set: {Timestamp: "$$NOW"}}, {$set: {Species: "$$new_name"}}], }], let : {target_species: "Chaffinch", new_name: "Pied Piper"} })); @@ -403,6 +484,12 @@ result = assert.commandWorked( testDB.runCommand({find: coll.getName(), filter: {$expr: {$eq: ["$Species", "Pied Piper"]}}})); assert.eq(result.cursor.firstBatch.length, 1, result); +// This forces a multi-statement transaction to commit if this test is running in one of the +// multi-statement transaction passthrough suites. We need to do this to ensure the updates +// above commit before running an update that will fail, as the failed update aborts the entire +// transaction and rolls back the updates above. +assert.commandWorked(testDB.runCommand({ping: 1})); + // Test that undefined let params in the update's query part fail gracefully. assert.commandFailedWithCode(testDB.runCommand({ update: coll.getName(), @@ -418,8 +505,8 @@ assert.commandFailedWithCode(testDB.runCommand({ assert.commandFailedWithCode(testDB.runCommand({ update: coll.getName(), updates: [{ - q: {$expr: {$eq: ["$Species", "Chaffinch (Fringilla coelebs)"]}}, - u: [{$set: {Species: "$$new_name"}}] + q: {_id: 3, $expr: {$eq: ["$Species", "Chaffinch (Fringilla coelebs)"]}}, + u: [{$set: {Species: "$$new_name"}}], }], let : {cat: "not_a_bird"} }), @@ -427,7 +514,7 @@ assert.commandFailedWithCode(testDB.runCommand({ // Test that the .update() shell helper supports let parameters. result = assert.commandWorked( - coll.update({$expr: {$eq: ["$Species", "$$target_species"]}}, + coll.update({_id: 3, $expr: {$eq: ["$Species", "$$target_species"]}}, [{$set: {Species: "$$new_name"}}], {let : {target_species: "Pied Piper", new_name: "Chaffinch"}})); assert.eq(result.nMatched, 1); @@ -519,7 +606,10 @@ assert.between(0, result, 1); } // Test that the expressions are evaluated once up front. -{ +// +// TODO SERVER-75927: This does not work as expected when the collection is sharded. Once the bug +// is fixed, we should re-enable this test case when the collection is sharded. +if (!isCollSharded) { const values = assert .commandWorked(testDB.runCommand({ find: coll.getName(), @@ -539,5 +629,4 @@ assert.between(0, result, 1); result = coll.aggregate([{$match: {$expr: {$eq: ["$_id", 2]}}}, {$project: {a: "$$b"}}], {let : {b: {$literal: "$notAFieldPath"}}}) .toArray(); -assert.eq(result, [{_id: 2, a: "$notAFieldPath"}]); -}()); +assert.eq(result, [{_id: 2, a: "$notAFieldPath"}]); \ No newline at end of file diff --git a/jstests/core/command_let_variables_merge_only.js b/jstests/core/command_let_variables_merge_only.js index 884533314c996..4bb6bfe7fac5f 100644 --- a/jstests/core/command_let_variables_merge_only.js +++ b/jstests/core/command_let_variables_merge_only.js @@ -6,6 +6,7 @@ * does_not_support_stepdowns, * does_not_support_causal_consistency, * uses_$out, + * references_foreign_collection, * ] */ (function() { diff --git a/jstests/core/commands_namespace_parsing.js b/jstests/core/commands_namespace_parsing.js index d352a0e3576c9..b48801beef590 100644 --- a/jstests/core/commands_namespace_parsing.js +++ b/jstests/core/commands_namespace_parsing.js @@ -19,6 +19,7 @@ // does_not_support_causal_consistency, // uses_compact, // ] +load("jstests/libs/fixture_helpers.js"); // This file tests that commands namespace parsing rejects embedded null bytes. // Note that for each command, a properly formatted command object must be passed to the helper @@ -63,9 +64,7 @@ function assertFailsWithInvalidNamespacesForField( } } -const hello = db.runCommand("hello"); -assert.commandWorked(hello); -const isMongos = (hello.msg === "isdbgrid"); +const runningOnMongos = FixtureHelpers.isMongos(db); db.commands_namespace_parsing.drop(); assert.commandWorked(db.commands_namespace_parsing.insert({a: 1})); @@ -182,7 +181,7 @@ assertFailsWithInvalidNamespacesForField("collection", isNotFullyQualified, isNotAdminCommand); -if (!isMongos) { +if (!runningOnMongos) { // Test godinsert fails with an invalid collection name. assertFailsWithInvalidNamespacesForField( "godinsert", {godinsert: "", obj: {_id: 1}}, isNotFullyQualified, isNotAdminCommand); @@ -206,13 +205,13 @@ assertFailsWithInvalidNamespacesForField( assertFailsWithInvalidNamespacesForField( "planCacheClear", {planCacheClear: ""}, isNotFullyQualified, isNotAdminCommand); -if (!isMongos) { +if (!runningOnMongos) { // Test cleanupOrphaned fails with an invalid collection name. assertFailsWithInvalidNamespacesForField( "cleanupOrphaned", {cleanupOrphaned: ""}, isFullyQualified, isAdminCommand); } -if (isMongos) { +if (runningOnMongos) { // Test enableSharding fails with an invalid database name. assertFailsWithInvalidNamespacesForField( "enableSharding", {enableSharding: ""}, isNotFullyQualified, isAdminCommand); @@ -266,7 +265,7 @@ assertFailsWithInvalidNamespacesForField( assertFailsWithInvalidNamespacesForField( "create", {create: ""}, isNotFullyQualified, isNotAdminCommand); -if (!isMongos) { +if (!runningOnMongos) { // Test cloneCollectionAsCapped fails with an invalid source collection name. assertFailsWithInvalidNamespacesForField( "cloneCollectionAsCapped", @@ -308,7 +307,7 @@ assertFailsWithInvalidNamespacesForField( assertFailsWithInvalidNamespacesForField( "dropIndexes", {dropIndexes: "", index: "*"}, isNotFullyQualified, isNotAdminCommand); -if (!isMongos) { +if (!runningOnMongos) { // Test compact fails with an invalid collection name. assertFailsWithInvalidNamespacesForField( "compact", {compact: ""}, isNotFullyQualified, isNotAdminCommand); @@ -322,7 +321,7 @@ assertFailsWithInvalidNamespacesForField( isNotAdminCommand); // Test reIndex fails with an invalid collection name. -if (!isMongos) { +if (!runningOnMongos) { assertFailsWithInvalidNamespacesForField( "reIndex", {reIndex: ""}, isNotFullyQualified, isNotAdminCommand); } diff --git a/jstests/core/commands_with_uuid.js b/jstests/core/commands_with_uuid.js index b0260e0bc3180..3744e038992f5 100644 --- a/jstests/core/commands_with_uuid.js +++ b/jstests/core/commands_with_uuid.js @@ -10,6 +10,7 @@ * assumes_no_implicit_index_creation * ] */ +load("jstests/libs/fixture_helpers.js"); (function() { 'use strict'; @@ -29,10 +30,7 @@ if (uuid == null) { } // No support for UUIDs on mongos. -const hello = db.runCommand("hello"); -assert.commandWorked(hello); -const isMongos = (hello.msg === "isdbgrid"); -if (isMongos) { +if (FixtureHelpers.isMongos(db)) { return; } diff --git a/jstests/core/compound_wildcard_index_validation.js b/jstests/core/compound_wildcard_index_validation.js index e10b0d8883690..c64074342aad7 100644 --- a/jstests/core/compound_wildcard_index_validation.js +++ b/jstests/core/compound_wildcard_index_validation.js @@ -7,6 +7,7 @@ * does_not_support_stepdowns, * featureFlagCompoundWildcardIndexes, * requires_fcv_70, + * uses_full_validation, * ] */ diff --git a/jstests/core/connection_string_validation.js b/jstests/core/connection_string_validation.js index 6236e445a775d..43a7373b97301 100644 --- a/jstests/core/connection_string_validation.js +++ b/jstests/core/connection_string_validation.js @@ -5,7 +5,7 @@ // ] // Related to SERVER-8030. -port = "27017"; +let port = "27017"; if (db.getMongo().host.indexOf(":") >= 0) { var idx = db.getMongo().host.indexOf(":"); diff --git a/jstests/core/cover_null_queries.js b/jstests/core/cover_null_queries.js index 30b8025ecc936..7ac2d5d6d746f 100644 --- a/jstests/core/cover_null_queries.js +++ b/jstests/core/cover_null_queries.js @@ -10,11 +10,8 @@ * cqf_incompatible, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq(). -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages() and getPlanStages(). +import {getPlanStages, getAggPlanStages} from "jstests/libs/analyze_plan.js"; load("jstests/libs/clustered_collections/clustered_collection_util.js"); const coll = db.cover_null_queries; @@ -1020,5 +1017,4 @@ validateGroupCountAggCmdOutputAndPlan({ filter: {"a.b": {$in: [null, []]}}, expectedCount: 10, expectedStages: {"IXSCAN": 1, "FETCH": 1}, -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/covered_query_with_sort.js b/jstests/core/covered_query_with_sort.js index 2f299c55d13a5..1aa32308331cc 100644 --- a/jstests/core/covered_query_with_sort.js +++ b/jstests/core/covered_query_with_sort.js @@ -7,10 +7,7 @@ // assumes_unsharded_collection, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For 'isIndexOnly', 'getPlanStage' and 'getWinningPlan'. +import {getPlanStage, getWinningPlan, isIndexOnly} from "jstests/libs/analyze_plan.js"; const coll = db.covered_query_with_sort; coll.drop(); @@ -39,5 +36,4 @@ const ixScanStage = getPlanStage(projectionCoveredStage, "IXSCAN"); assert.neq(ixScanStage, null, plan); const results = buildQuery().toArray(); -assert.eq(results, [{y: 0, x: 1}, {y: 0, x: 0}], results); -}()); \ No newline at end of file +assert.eq(results, [{y: 0, x: 1}, {y: 0, x: 0}], results); \ No newline at end of file diff --git a/jstests/core/dbcase.js b/jstests/core/dbcase.js index 23b5bccd30503..78d6e05f8abd4 100644 --- a/jstests/core/dbcase.js +++ b/jstests/core/dbcase.js @@ -5,15 +5,15 @@ // multiple_tenants_incompatible, // ] -a = db.getSiblingDB("dbcasetest_dbnamea"); -b = db.getSiblingDB("dbcasetest_dbnameA"); +let a = db.getSiblingDB("dbcasetest_dbnamea"); +let b = db.getSiblingDB("dbcasetest_dbnameA"); a.dropDatabase(); b.dropDatabase(); assert.commandWorked(a.foo.save({x: 1})); -res = b.foo.save({x: 1}); +let res = b.foo.save({x: 1}); assert.writeError(res); assert.neq(-1, db.getMongo().getDBNames().indexOf(a.getName())); @@ -23,8 +23,8 @@ printjson(db.getMongo().getDBs().databases); a.dropDatabase(); b.dropDatabase(); -ai = db.getMongo().getDBNames().indexOf(a.getName()); -bi = db.getMongo().getDBNames().indexOf(b.getName()); +let ai = db.getMongo().getDBNames().indexOf(a.getName()); +let bi = db.getMongo().getDBNames().indexOf(b.getName()); // One of these dbs may exist if there is a secondary active, but they must // not both exist. assert(ai == -1 || bi == -1); diff --git a/jstests/core/dbhash.js b/jstests/core/dbhash.js index c601c721f5be9..33ee736e4b114 100644 --- a/jstests/core/dbhash.js +++ b/jstests/core/dbhash.js @@ -3,15 +3,15 @@ // not_allowed_with_security_token, // ] -a = db.dbhasha; -b = db.dbhashb; +let a = db.dbhasha; +let b = db.dbhashb; a.drop(); b.drop(); // debug SERVER-761 db.getCollectionNames().forEach(function(x) { - v = db[x].validate(); + let v = db[x].validate(); if (!v.valid) { print(x); printjson(v); @@ -45,8 +45,8 @@ assert.neq(gh(a), gh(b), "A2"); b.insert({_id: 5}); assert.eq(gh(a), gh(b), "A3"); -dba = db.getSiblingDB("dbhasha"); -dbb = db.getSiblingDB("dbhashb"); +let dba = db.getSiblingDB("dbhasha"); +let dbb = db.getSiblingDB("dbhashb"); dba.dropDatabase(); dbb.dropDatabase(); diff --git a/jstests/core/dbhash2.js b/jstests/core/dbhash2.js index 93ec2abf99c3b..cf86a1a6d031d 100644 --- a/jstests/core/dbhash2.js +++ b/jstests/core/dbhash2.js @@ -4,20 +4,20 @@ // assumes_superuser_permissions, // ] -mydb = db.getSiblingDB("config"); +let mydb = db.getSiblingDB("config"); -t = mydb.foo; +let t = mydb.foo; t.drop(); assert.commandWorked(t.insert({x: 1})); -res1 = mydb.runCommand("dbhash"); -res2 = mydb.runCommand("dbhash"); +let res1 = mydb.runCommand("dbhash"); +let res2 = mydb.runCommand("dbhash"); assert.commandWorked(res1); assert.commandWorked(res2); assert.eq(res1.collections.foo, res2.collections.foo); assert.commandWorked(t.insert({x: 2})); -res3 = mydb.runCommand("dbhash"); +let res3 = mydb.runCommand("dbhash"); assert.commandWorked(res3); assert.neq(res1.collections.foo, res3.collections.foo); diff --git a/jstests/core/ddl/background_unique_indexes.js b/jstests/core/ddl/background_unique_indexes.js index fb1d0d9aee7ef..819dd1be89183 100644 --- a/jstests/core/ddl/background_unique_indexes.js +++ b/jstests/core/ddl/background_unique_indexes.js @@ -52,12 +52,10 @@ for (let iteration = 0; iteration < nIterations; iteration++) { assert.commandWorked(testDB.runCommand({update: collName, updates: updates})); // Create a background unique index on the collection. - assert.commandWorked(testDB.runCommand({ - createIndexes: collName, - indexes: [{key: {x: 1}, name: "x_1", background: true, unique: true}] - })); + assert.commandWorked(testDB.runCommand( + {createIndexes: collName, indexes: [{key: {x: 1}, name: "x_1", unique: true}]})); - // Generate updates that increment x on each document backwards by _id to avoid conficts + // Generate updates that increment x on each document backwards by _id to avoid conflicts // when applied in-order. updates = []; for (let i = 0; i < nOps; i++) { diff --git a/jstests/core/ddl/bad_index_plugin.js b/jstests/core/ddl/bad_index_plugin.js index c9cd549cc03e0..cb52ef0c1abcf 100644 --- a/jstests/core/ddl/bad_index_plugin.js +++ b/jstests/core/ddl/bad_index_plugin.js @@ -1,5 +1,5 @@ // SERVER-5826 ensure you can't build an index with a non-existent plugin -t = db.bad_index_plugin; +let t = db.bad_index_plugin; assert.commandWorked(t.createIndex({good: 1})); assert.eq(t.getIndexes().length, 2); // good + _id diff --git a/jstests/core/ddl/capped_convertToCapped1.js b/jstests/core/ddl/capped_convertToCapped1.js index 137705c8661c2..4f51edc6c247e 100644 --- a/jstests/core/ddl/capped_convertToCapped1.js +++ b/jstests/core/ddl/capped_convertToCapped1.js @@ -17,21 +17,21 @@ * ] */ -source = db.capped_convertToCapped1; -dest = db.capped_convertToCapped1_clone; +let source = db.capped_convertToCapped1; +let dest = db.capped_convertToCapped1_clone; source.drop(); dest.drop(); -N = 1000; +let N = 1000; -for (i = 0; i < N; ++i) { +for (let i = 0; i < N; ++i) { source.save({i: i}); } assert.eq(N, source.count()); // should all fit -res = db.runCommand( +let res = db.runCommand( {cloneCollectionAsCapped: source.getName(), toCollection: dest.getName(), size: 100000}); assert.commandWorked(res); assert.eq(source.count(), dest.count()); diff --git a/jstests/core/ddl/collection_uuid_index_commands.js b/jstests/core/ddl/collection_uuid_index_commands.js index 85c9f0a91247f..fae47998e33a2 100644 --- a/jstests/core/ddl/collection_uuid_index_commands.js +++ b/jstests/core/ddl/collection_uuid_index_commands.js @@ -31,7 +31,7 @@ const validateErrorResponse = function( }; const testCommand = function(cmd, cmdObj) { - const testDB = db.getSiblingDB(jsTestName()); + const testDB = db.getSiblingDB("coll_uuid_index_cmds"); assert.commandWorked(testDB.dropDatabase()); const coll = testDB['coll']; assert.commandWorked(coll.insert({x: 1, y: 2})); @@ -104,4 +104,4 @@ const testCommand = function(cmd, cmdObj) { testCommand("createIndexes", {createIndexes: "", indexes: [{name: "x_1", key: {x: 1}}]}); testCommand("dropIndexes", {dropIndexes: "", index: {y: 1}}); -})(); \ No newline at end of file +})(); diff --git a/jstests/core/ddl/collection_uuid_rename_collection.js b/jstests/core/ddl/collection_uuid_rename_collection.js index bc294fd7aabc6..81d695cc03929 100644 --- a/jstests/core/ddl/collection_uuid_rename_collection.js +++ b/jstests/core/ddl/collection_uuid_rename_collection.js @@ -11,7 +11,7 @@ (function() { 'use strict'; -const testDB = db.getSiblingDB(jsTestName()); +const testDB = db.getSiblingDB("coll_uuid_rename_coll"); assert.commandWorked(testDB.dropDatabase()); const coll = testDB.coll; diff --git a/jstests/core/ddl/collmod_convert_index_uniqueness.js b/jstests/core/ddl/collmod_convert_index_uniqueness.js index ab2d82377889d..738b20a7d9e27 100644 --- a/jstests/core/ddl/collmod_convert_index_uniqueness.js +++ b/jstests/core/ddl/collmod_convert_index_uniqueness.js @@ -14,14 +14,11 @@ * ] */ -(function() { -'use strict'; - -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; if (!FeatureFlagUtil.isEnabled(db, "CollModIndexUnique")) { jsTestLog('Skipping test because the collMod unique index feature flag is disabled.'); - return; + quit(); } const collName = 'collmod_convert_to_unique'; @@ -183,5 +180,4 @@ if (db.getMongo().isMongos()) { } // Tests the index now accepts duplicate keys. -assert.commandWorked(coll.insert({_id: 100, a: 100})); -})(); +assert.commandWorked(coll.insert({_id: 100, a: 100})); \ No newline at end of file diff --git a/jstests/core/ddl/collmod_convert_to_unique_apply_ops.js b/jstests/core/ddl/collmod_convert_to_unique_apply_ops.js index 658382999941d..ce7548a7fa75f 100644 --- a/jstests/core/ddl/collmod_convert_to_unique_apply_ops.js +++ b/jstests/core/ddl/collmod_convert_to_unique_apply_ops.js @@ -20,14 +20,11 @@ * ] */ -(function() { -'use strict'; - -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; if (!FeatureFlagUtil.isEnabled(db, "CollModIndexUnique")) { jsTestLog('Skipping test because the collMod unique index feature flag is disabled.'); - return; + quit(); } const collName = 'collmod_convert_to_unique_apply_ops'; @@ -97,5 +94,4 @@ assert.sameMembers([true], result.results, tojson(result)); assert.eq(countUnique({a: 1}), 1, 'index should be unique now: ' + tojson(coll.getIndexes())); // Test uniqueness constraint. -assert.commandFailedWithCode(coll.insert({_id: 100, a: 100}), ErrorCodes.DuplicateKey); -})(); +assert.commandFailedWithCode(coll.insert({_id: 100, a: 100}), ErrorCodes.DuplicateKey); \ No newline at end of file diff --git a/jstests/core/ddl/collmod_convert_to_unique_violations.js b/jstests/core/ddl/collmod_convert_to_unique_violations.js index 53b8abeec6e65..02d055dfa3bf2 100644 --- a/jstests/core/ddl/collmod_convert_to_unique_violations.js +++ b/jstests/core/ddl/collmod_convert_to_unique_violations.js @@ -15,15 +15,13 @@ * ] */ -(function() { -'use strict'; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; -load("jstests/libs/feature_flag_util.js"); load("jstests/libs/fixture_helpers.js"); // For 'isMongos' if (!FeatureFlagUtil.isEnabled(db, "CollModIndexUnique")) { jsTestLog('Skipping test because the collMod unique index feature flag is disabled.'); - return; + quit(); } function sortViolationsArray(arr) { @@ -97,5 +95,4 @@ assert.commandWorked(coll.insert({_id: 9, a: 101, b: 4})); assertFailedWithViolations({a: 1, b: 1}, [{ids: [4, 9]}, {ids: [6, 7, 8]}]); assert.commandWorked(coll.insert({_id: "10", a: 101, b: 4})); -assertFailedWithViolations({a: 1, b: 1}, [{ids: [4, 9, "10"]}, {ids: [6, 7, 8]}]); -})(); +assertFailedWithViolations({a: 1, b: 1}, [{ids: [4, 9, "10"]}, {ids: [6, 7, 8]}]); \ No newline at end of file diff --git a/jstests/core/ddl/create_collection.js b/jstests/core/ddl/create_collection.js index a38d9efb97004..e0a09b70b8aab 100644 --- a/jstests/core/ddl/create_collection.js +++ b/jstests/core/ddl/create_collection.js @@ -12,13 +12,8 @@ load("jstests/libs/index_catalog_helpers.js"); load("jstests/libs/clustered_collections/clustered_collection_util.js"); -// TODO SERVER-73934: Change assertions on 'drop' command results throughout this file to -// always expect the command worked. Currently, they can return NamespaceNotFound on -// server versions < 7.0. - // "create" command rejects invalid options. -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandFailedWithCode(db.createCollection("create_collection", {unknown: 1}), 40415); // Cannot create a collection with null characters. @@ -29,8 +24,7 @@ assert.commandFailedWithCode(db.createCollection("ab\0"), ErrorCodes.InvalidName // The collection name length limit was upped in 4.4, try creating a collection with a longer // name than previously allowed. const longCollName = 'a'.repeat(200); -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: longCollName}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: longCollName})); assert.commandWorked(db.createCollection(longCollName)); // @@ -38,8 +32,7 @@ assert.commandWorked(db.createCollection(longCollName)); // // "idIndex" field not allowed with "viewOn". -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandWorked(db.createCollection("create_collection")); assert.commandFailedWithCode(db.runCommand({ create: "create_view", @@ -49,42 +42,36 @@ assert.commandFailedWithCode(db.runCommand({ ErrorCodes.InvalidOptions); // "idIndex" field not allowed with "autoIndexId". -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandFailedWithCode( db.createCollection("create_collection", {autoIndexId: false, idIndex: {key: {_id: 1}, name: "_id_"}}), ErrorCodes.InvalidOptions); // "idIndex" field must be an object. -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandFailedWithCode(db.createCollection("create_collection", {idIndex: 1}), ErrorCodes.TypeMismatch); // "idIndex" field cannot be empty. -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandFailedWithCode(db.createCollection("create_collection", {idIndex: {}}), ErrorCodes.FailedToParse); // "idIndex" field must be a specification for an _id index. -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandFailedWithCode( db.createCollection("create_collection", {idIndex: {key: {a: 1}, name: "a_1"}}), ErrorCodes.BadValue); // "idIndex" field must have "key" equal to {_id: 1}. -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandFailedWithCode( db.createCollection("create_collection", {idIndex: {key: {a: 1}, name: "_id_"}}), ErrorCodes.BadValue); // The name of an _id index gets corrected to "_id_". -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandWorked( db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "a_1"}})); var indexSpec = IndexCatalogHelpers.findByKeyPattern(db.create_collection.getIndexes(), {_id: 1}); @@ -92,16 +79,14 @@ assert.neq(indexSpec, null); assert.eq(indexSpec.name, "_id_", tojson(indexSpec)); // "idIndex" field must only contain fields that are allowed for an _id index. -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandFailedWithCode( db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", sparse: true}}), ErrorCodes.InvalidIndexSpecificationOption); // "create" creates v=2 _id index when "v" is not specified in "idIndex". -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandWorked( db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_"}})); indexSpec = IndexCatalogHelpers.findByName(db.create_collection.getIndexes(), "_id_"); @@ -109,8 +94,7 @@ assert.neq(indexSpec, null); assert.eq(indexSpec.v, 2, tojson(indexSpec)); // "create" creates v=1 _id index when "idIndex" has "v" equal to 1. -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandWorked( db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", v: 1}})); indexSpec = IndexCatalogHelpers.findByName(db.create_collection.getIndexes(), "_id_"); @@ -118,8 +102,7 @@ assert.neq(indexSpec, null); assert.eq(indexSpec.v, 1, tojson(indexSpec)); // "create" creates v=2 _id index when "idIndex" has "v" equal to 2. -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandWorked( db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", v: 2}})); indexSpec = IndexCatalogHelpers.findByName(db.create_collection.getIndexes(), "_id_"); @@ -127,31 +110,27 @@ assert.neq(indexSpec, null); assert.eq(indexSpec.v, 2, tojson(indexSpec)); // "collation" field of "idIndex" must match collection default collation. -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandFailedWithCode( db.createCollection("create_collection", {idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}}}), ErrorCodes.BadValue); -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandFailedWithCode(db.createCollection("create_collection", { collation: {locale: "fr_CA"}, idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}} }), ErrorCodes.BadValue); -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandFailedWithCode(db.createCollection("create_collection", { collation: {locale: "fr_CA"}, idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "simple"}} }), ErrorCodes.BadValue); -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandWorked(db.createCollection("create_collection", { collation: {locale: "en_US", strength: 3}, idIndex: {key: {_id: 1}, name: "_id_", collation: {locale: "en_US"}} @@ -162,8 +141,7 @@ assert.eq(indexSpec.collation.locale, "en_US", tojson(indexSpec)); // If "collation" field is not present in "idIndex", _id index inherits collection default // collation. -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); assert.commandWorked(db.createCollection( "create_collection", {collation: {locale: "en_US"}, idIndex: {key: {_id: 1}, name: "_id_"}})); indexSpec = IndexCatalogHelpers.findByName(db.create_collection.getIndexes(), "_id_"); @@ -179,14 +157,11 @@ assert.commandFailedWithCode(db.createCollection('capped_no_size_no_max', {cappe ErrorCodes.InvalidOptions); assert.commandFailedWithCode(db.createCollection('capped_no_size', {capped: true, max: 10}), ErrorCodes.InvalidOptions); -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "no_capped"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "no_capped"})); assert.commandWorked(db.createCollection('no_capped'), {capped: false}); -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "capped_no_max"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "capped_no_max"})); assert.commandWorked(db.createCollection('capped_no_max', {capped: true, size: 256})); -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "capped_with_max_and_size"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "capped_with_max_and_size"})); assert.commandWorked( db.createCollection('capped_with_max_and_size', {capped: true, max: 10, size: 256})); @@ -203,14 +178,7 @@ if (ClusteredCollectionUtil.areAllCollectionsClustered(db.getMongo())) { return; } -// The remainder of this test will not work on server versions < 7.0 as the 'create' command -// is not idempotent there. TODO SERVER-74062: remove this. -if (db.version().split('.')[0] < 7) { - return; -} - -assert.commandWorkedOrFailedWithCode(db.runCommand({drop: "create_collection"}), - ErrorCodes.NamespaceNotFound); +assert.commandWorked(db.runCommand({drop: "create_collection"})); // Creating a collection that already exists with no options specified reports success. assert.commandWorked(db.createCollection("create_collection")); diff --git a/jstests/core/ddl/create_index_helper_validation.js b/jstests/core/ddl/create_index_helper_validation.js index 1b11a50c6ca83..854c82be064cc 100644 --- a/jstests/core/ddl/create_index_helper_validation.js +++ b/jstests/core/ddl/create_index_helper_validation.js @@ -10,20 +10,17 @@ assert.throws(() => coll.createIndexes( /* keys */[{a: 1}], /* options */ {}, /* commitQuorum */ "majority", - {background: true}, {unique: true})); assert.throws(() => coll.createIndex( /* keys */ {a: 1}, /* options */ {}, /* commitQuorum */ "majority", - {background: true}, {unique: true})); assert.throws(() => coll.createIndex( /* keys */ {a: 1}, /* options */ {}, /* commitQuorum */ "majority", - {background: true}, {unique: true})); -}()); \ No newline at end of file +}()); diff --git a/jstests/core/ddl/create_indexes.js b/jstests/core/ddl/create_indexes.js index 6459883846642..3ab29ffb6a40a 100644 --- a/jstests/core/ddl/create_indexes.js +++ b/jstests/core/ddl/create_indexes.js @@ -6,12 +6,13 @@ */ (function() { 'use strict'; +load("jstests/libs/fixture_helpers.js"); const kUnknownIDLFieldError = 40415; -const isMongos = ("isdbgrid" == db.runCommand("hello").msg); +const runningOnMongos = FixtureHelpers.isMongos(db); const extractResult = function(obj) { - if (!isMongos) + if (!runningOnMongos) return obj; // Sample mongos format: diff --git a/jstests/core/ddl/drop3.js b/jstests/core/ddl/drop3.js index 78d4872a6c01a..7829b9cf3108f 100644 --- a/jstests/core/ddl/drop3.js +++ b/jstests/core/ddl/drop3.js @@ -4,8 +4,8 @@ // does_not_support_causal_consistency, // ] -t = db.jstests_drop3; -sub = t.sub; +let t = db.jstests_drop3; +let sub = t.sub; t.drop(); sub.drop(); diff --git a/jstests/core/ddl/drop_collection.js b/jstests/core/ddl/drop_collection.js index faf4a2e64d2cf..35df17b10ffe8 100644 --- a/jstests/core/ddl/drop_collection.js +++ b/jstests/core/ddl/drop_collection.js @@ -28,9 +28,7 @@ const coll = db[jsTestName() + "_coll"]; jsTest.log("Drop Unexistent collection."); { // Drop the collection - // TODO (SERVER-73934): NamespaceNotFound will be returned by mongod versions earlier than 7.0. - assert.commandWorkedOrFailedWithCode(db.runCommand({drop: coll.getName()}), - ErrorCodes.NamespaceNotFound); + assert.commandWorked(db.runCommand({drop: coll.getName()})); assertCollectionDropped(coll.getFullName()); } @@ -45,9 +43,7 @@ jsTest.log("Drop existing collection."); assertCollectionDropped(coll.getFullName()); // Test idempotency - // TODO (SERVER-73934): NamespaceNotFound will be returned by mongod versions earlier than 7.0. - assert.commandWorkedOrFailedWithCode(db.runCommand({drop: coll.getName()}), - ErrorCodes.NamespaceNotFound); + assert.commandWorked(db.runCommand({drop: coll.getName()})); assertCollectionDropped(coll.getFullName()); } diff --git a/jstests/core/ddl/drop_index.js b/jstests/core/ddl/drop_index.js index 3f3e815d47dde..ed11b3a5e4677 100644 --- a/jstests/core/ddl/drop_index.js +++ b/jstests/core/ddl/drop_index.js @@ -3,6 +3,7 @@ // @tags: [assumes_no_implicit_index_creation] (function() { 'use strict'; +load("jstests/libs/fixture_helpers.js"); const t = db.drop_index; t.drop(); @@ -51,7 +52,7 @@ assertIndexes(['b_1', 'c_1', 'd_1', 'e_1'], 'dropping {a: 1} by name'); assert.commandWorked(t.dropIndex({b: 1})); assertIndexes(['c_1', 'd_1', 'e_1'], 'dropping {b: 1} by key pattern'); -const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid"; +const runningOnMongos = FixtureHelpers.isMongos(db); // Not allowed to drop _id index. for (const dropIndexArg of ['_id_', {_id: 1}]) { @@ -59,7 +60,7 @@ for (const dropIndexArg of ['_id_', {_id: 1}]) { jsTestLog(`Reply to dropIndexes with arg ${tojson(dropIndexArg)}: ${tojson(dropIdIndexReply)}`); assert.commandFailedWithCode(dropIdIndexReply, ErrorCodes.InvalidOptions); assert(dropIdIndexReply.hasOwnProperty('errmsg')); - if (isMongos) { + if (runningOnMongos) { assert(dropIdIndexReply.hasOwnProperty('raw')); } } diff --git a/jstests/core/ddl/index_prepareUnique.js b/jstests/core/ddl/index_prepareUnique.js index 7e47db9840a2d..fa66168201ca6 100644 --- a/jstests/core/ddl/index_prepareUnique.js +++ b/jstests/core/ddl/index_prepareUnique.js @@ -3,14 +3,11 @@ * * @tags: [assumes_no_implicit_collection_creation_after_drop] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; if (!FeatureFlagUtil.isEnabled(db, "CollModIndexUnique")) { jsTestLog('Skipping test because the collMod unique index feature flag is disabled.'); - return; + quit(); } const collName_prefix = "index_prepareUnique"; @@ -79,5 +76,4 @@ assert.commandWorked(coll4.createIndex({a: 1}, {unique: true, prepareUnique: fal indexesWithPrepareUnique = coll4.getIndexes().filter(function(doc) { return friendlyEqual(doc.prepareUnique, true); }); -assert.eq(0, indexesWithPrepareUnique.length); -})(); +assert.eq(0, indexesWithPrepareUnique.length); \ No newline at end of file diff --git a/jstests/core/ddl/killop_drop_collection.js b/jstests/core/ddl/killop_drop_collection.js index 17e0154fbfbba..a3df160c99112 100644 --- a/jstests/core/ddl/killop_drop_collection.js +++ b/jstests/core/ddl/killop_drop_collection.js @@ -22,7 +22,7 @@ collection.drop(); for (let i = 0; i < 1000; i++) { assert.commandWorked(collection.insert({x: i})); } -assert.commandWorked(collection.createIndex({x: 1}, {background: true})); +assert.commandWorked(collection.createIndex({x: 1})); // Attempt to fsyncLock the database, aborting early if the storage engine doesn't support it. const storageEngine = jsTest.options().storageEngine; diff --git a/jstests/core/ddl/rename_collection_capped.js b/jstests/core/ddl/rename_collection_capped.js index 670e5ce611b29..e0eb2efa9c317 100644 --- a/jstests/core/ddl/rename_collection_capped.js +++ b/jstests/core/ddl/rename_collection_capped.js @@ -13,9 +13,9 @@ * ] */ -a = db.jstests_rename_a; -b = db.jstests_rename_b; -c = db.jstests_rename_c; +let a = db.jstests_rename_a; +let b = db.jstests_rename_b; +let c = db.jstests_rename_c; a.drop(); b.drop(); @@ -26,14 +26,14 @@ c.drop(); // note we use floats to make sure numbers are represented as doubles for SpiderMonkey, since test // relies on record size db.createCollection("jstests_rename_a", {capped: true, size: 10000}); -for (i = 0.1; i < 10; ++i) { +for (let i = 0.1; i < 10; ++i) { a.save({i: i}); } assert.commandWorked( db.adminCommand({renameCollection: "test.jstests_rename_a", to: "test.jstests_rename_b"})); assert.eq(1, b.countDocuments({i: 9.1})); printjson(b.stats()); -for (i = 10.1; i < 1000; ++i) { +for (var i = 10.1; i < 1000; ++i) { b.save({i: i}); } printjson(b.stats()); diff --git a/jstests/core/ddl/rename_collection_staytemp.js b/jstests/core/ddl/rename_collection_staytemp.js index 5db125f1a23db..5107b39962742 100644 --- a/jstests/core/ddl/rename_collection_staytemp.js +++ b/jstests/core/ddl/rename_collection_staytemp.js @@ -12,8 +12,8 @@ * ] */ -orig = 'rename_stayTemp_orig'; -dest = 'rename_stayTemp_dest'; +let orig = 'rename_stayTemp_orig'; +let dest = 'rename_stayTemp_dest'; db[orig].drop(); db[dest].drop(); diff --git a/jstests/core/delx.js b/jstests/core/delx.js index 331f6a18b677a..b78e3051f8fdd 100644 --- a/jstests/core/delx.js +++ b/jstests/core/delx.js @@ -1,11 +1,11 @@ // @tags: [assumes_against_mongod_not_mongos, requires_getmore, requires_non_retryable_writes] -a = db.getSiblingDB("delxa"); -b = db.getSiblingDB("delxb"); +let a = db.getSiblingDB("delxa"); +let b = db.getSiblingDB("delxb"); function setup(mydb) { mydb.dropDatabase(); - for (i = 0; i < 100; i++) { + for (let i = 0; i < 100; i++) { mydb.foo.insert({_id: i}); } } @@ -16,8 +16,8 @@ setup(b); assert.eq(100, a.foo.find().itcount(), "A1"); assert.eq(100, b.foo.find().itcount(), "A2"); -x = a.foo.find().sort({_id: 1}).batchSize(60); -y = b.foo.find().sort({_id: 1}).batchSize(60); +let x = a.foo.find().sort({_id: 1}).batchSize(60); +let y = b.foo.find().sort({_id: 1}).batchSize(60); x.next(); y.next(); @@ -27,7 +27,7 @@ a.foo.remove({_id: {$gt: 50}}); assert.eq(51, a.foo.find().itcount(), "B1"); assert.eq(100, b.foo.find().itcount(), "B2"); -xCount = x.itcount(); +let xCount = x.itcount(); assert(xCount == 59 || xCount == 99, "C1 : " + xCount); // snapshot or not is ok assert.eq( 99, diff --git a/jstests/core/doc_validation/bypass_doc_validation.js b/jstests/core/doc_validation/bypass_doc_validation.js index 4ba40069ee9c0..dff3a22847e87 100644 --- a/jstests/core/doc_validation/bypass_doc_validation.js +++ b/jstests/core/doc_validation/bypass_doc_validation.js @@ -12,6 +12,7 @@ // tenant_migration_incompatible, // # This test has statements that do not support non-local read concern. // does_not_support_causal_consistency, +// references_foreign_collection, // ] /** diff --git a/jstests/core/embedded_dollar_prefixed_field_validation.js b/jstests/core/embedded_dollar_prefixed_field_validation.js new file mode 100644 index 0000000000000..2e58ccd057ebd --- /dev/null +++ b/jstests/core/embedded_dollar_prefixed_field_validation.js @@ -0,0 +1,61 @@ +/** + * SERVER-75880 Test that _id cannot be an object with a deep nested element that has a $-prefixed + * field name. + * + * @tags: [ + * assumes_unsharded_collection, + * requires_fcv_71, + * ] + */ +(function() { +"use strict"; + +const coll = db.field_name_validation; +coll.drop(); + +// Insert command field name validation +assert.writeErrorWithCode(coll.insert({_id: {a: {$b: 1}}, x: 1}), + ErrorCodes.DollarPrefixedFieldName); + +// Update commands with $set +coll.drop(); +assert.commandWorked(coll.insert({_id: 0, a: 1})); +assert.writeErrorWithCode(coll.update({"_id.a.$b": 1}, {$set: {x: 1}}, {upsert: true}), + ErrorCodes.DollarPrefixedFieldName); +assert.writeErrorWithCode(coll.update({x: 1}, {$set: {_id: {a: {$b: 1}}}}, {upsert: true}), + ErrorCodes.DollarPrefixedFieldName); +assert.writeErrorWithCode(coll.update({a: 1}, {$set: {_id: {a: {$b: 1}}}}, {upsert: true}), + [ErrorCodes.DollarPrefixedFieldName, ErrorCodes.ImmutableField]); + +// Replacement-style updates +coll.drop(); +assert.commandWorked(coll.insert({_id: 0, a: 1})); +assert.writeErrorWithCode(coll.update({_id: 0}, {_id: {a: {$b: 1}}}), + ErrorCodes.DollarPrefixedFieldName); +assert.writeErrorWithCode(coll.update({"_id.a.$b": 1}, {_id: {a: {$b: 1}}}, {upsert: true}), + ErrorCodes.NotExactValueField); +assert.writeErrorWithCode(coll.update({_id: {a: {$b: 1}}}, {_id: {a: {$b: 1}}}, {upsert: true}), + ErrorCodes.DollarPrefixedFieldName); + +// Pipeline-style updates with $replaceWith +coll.drop(); +assert.commandWorked(coll.insert({_id: 0, a: 1})); +assert.writeErrorWithCode(coll.update({_id: 0}, [{$replaceWith: {$literal: {_id: {a: {$b: 1}}}}}]), + ErrorCodes.DollarPrefixedFieldName); +assert.writeErrorWithCode( + coll.update({"_id.a.$b": 1}, [{$replaceWith: {$literal: {a: {$a: 1}}}}], {upsert: true}), + ErrorCodes.DollarPrefixedFieldName); + +// FindAndModify field name validation +coll.drop(); +assert.commandWorked(coll.insert({_id: 0, a: 1})); +assert.throwsWithCode(() => { + coll.findAndModify({query: {_id: 0}, update: {_id: {a: {$b: 1}}}}); +}, ErrorCodes.DollarPrefixedFieldName); +assert.throwsWithCode(() => { + coll.findAndModify({query: {"_id.a.$b": 1}, update: {_id: {a: {$b: 1}}}, upsert: true}); +}, ErrorCodes.NotExactValueField); +assert.throwsWithCode(() => { + coll.findAndModify({query: {_id: {a: {$b: 1}}}, update: {_id: {a: {$b: 1}}}, upsert: true}); +}, ErrorCodes.DollarPrefixedFieldName); +})(); diff --git a/jstests/core/error5.js b/jstests/core/error5.js index edcfa059d25bf..fe4efef15e3bb 100644 --- a/jstests/core/error5.js +++ b/jstests/core/error5.js @@ -1,6 +1,6 @@ // @tags: [requires_fastcount] -t = db.error5; +let t = db.error5; t.drop(); assert.throws(function() { diff --git a/jstests/core/exhaust.js b/jstests/core/exhaust.js index 19b6c04d0fc5d..a2e9028bdd1dd 100644 --- a/jstests/core/exhaust.js +++ b/jstests/core/exhaust.js @@ -3,6 +3,7 @@ // # This test uses exhaust which does not use runCommand (required by the inject_tenant_prefix.js // # override). // tenant_migration_incompatible, +// no_selinux // ] (function() { diff --git a/jstests/core/find_with_resume_after_param.js b/jstests/core/find_with_resume_after_param.js new file mode 100644 index 0000000000000..628a9e3a86f58 --- /dev/null +++ b/jstests/core/find_with_resume_after_param.js @@ -0,0 +1,133 @@ +/** + * Tests that the internal parameter "$_resumeAfter" validates the type of the 'recordId' for + * clustered and non clustered collections. + * @tags: [ + * # Queries on mongoS may not request or provide a resume token. + * assumes_against_mongod_not_mongos, + * ] + */ + +load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + +const clustered = db.clusteredColl; +const nonClustered = db.normalColl; +const clusteredName = clustered.getName(); +const nonClusteredName = nonClustered.getName(); + +assertDropCollection(db, clusteredName); +assertDropCollection(db, nonClusteredName); + +db.createCollection(clusteredName, {clusteredIndex: {key: {_id: 1}, unique: true}}); +db.createCollection(nonClusteredName); + +// Insert some documents. +const docs = [{_id: 1, a: 1}, {_id: 2, a: 2}, {_id: 3, a: 3}]; +assert.commandWorked(clustered.insertMany(docs)); +assert.commandWorked(nonClustered.insertMany(docs)); + +function validateFailedResumeAfterInFind({collName, resumeAfterSpec, errorCode, explainFail}) { + const spec = { + find: collName, + filter: {}, + $_requestResumeToken: true, + $_resumeAfter: resumeAfterSpec, + hint: {$natural: 1} + }; + assert.commandFailedWithCode(db.runCommand(spec), errorCode); + // Run the same query under an explain. + if (explainFail) { + assert.commandFailedWithCode(db.runCommand({explain: spec}), errorCode); + } else { + assert.commandWorked(db.runCommand({explain: spec})); + } +} + +function validateFailedResumeAfterInAggregate({collName, resumeAfterSpec, errorCode, explainFail}) { + const spec = { + aggregate: collName, + pipeline: [], + $_requestResumeToken: true, + $_resumeAfter: resumeAfterSpec, + hint: {$natural: 1}, + cursor: {} + }; + assert.commandFailedWithCode(db.runCommand(spec), errorCode); + // Run the same query under an explain. + if (explainFail) { + assert.commandFailedWithCode(db.runCommand({explain: spec}), errorCode); + } else { + assert.commandWorked(db.runCommand({explain: spec})); + } +} + +function testResumeAfter(validateFunction) { + // Confirm $_resumeAfter will fail for clustered collections if the recordId is Long. + validateFunction({ + collName: clusteredName, + resumeAfterSpec: {'$recordId': NumberLong(2)}, + errorCode: 7738600, + explainFail: true + }); + + // Confirm $_resumeAfter will fail with 'KeyNotFound' if given a non existent recordId. + validateFunction({ + collName: clusteredName, + resumeAfterSpec: {'$recordId': BinData(5, '1234')}, + errorCode: ErrorCodes.KeyNotFound + }); + + // Confirm $_resumeAfter will fail for normal collections if it is of type BinData. + validateFunction({ + collName: nonClusteredName, + resumeAfterSpec: {'$recordId': BinData(5, '1234')}, + errorCode: 7738600, + explainFail: true + }); + + // Confirm $_resumeAfter token will fail with 'KeyNotFound' if given a non existent recordId. + validateFunction({ + collName: nonClusteredName, + resumeAfterSpec: {'$recordId': NumberLong(8)}, + errorCode: ErrorCodes.KeyNotFound + }); + + if (checkSBEEnabled(db)) { + // This case really means that 'forceClassicEngine' has not been set. It does not mean any + // SBE-specific feature flags are turned on. + validateFunction({ + collName: nonClusteredName, + resumeAfterSpec: {'$recordId': null}, + errorCode: ErrorCodes.KeyNotFound + }); + } else { + assert.commandWorked(db.runCommand({ + find: nonClusteredName, + filter: {}, + $_requestResumeToken: true, + $_resumeAfter: {'$recordId': null}, + hint: {$natural: 1} + })); + } + + // Confirm $_resumeAfter will fail to parse if collection does not exist. + validateFunction({ + collName: "random", + resumeAfterSpec: {'$recordId': null, "anotherField": null}, + errorCode: ErrorCodes.BadValue, + explainFail: true + }); + validateFunction({ + collName: "random", + resumeAfterSpec: "string", + errorCode: ErrorCodes.TypeMismatch, + explainFail: true + }); +} + +testResumeAfter(validateFailedResumeAfterInFind); +// TODO(SERVER-77873): remove "featureFlagReshardingImprovements" +if (FeatureFlagUtil.isPresentAndEnabled(db, "ReshardingImprovements")) { + testResumeAfter(validateFailedResumeAfterInAggregate); +} \ No newline at end of file diff --git a/jstests/core/geo_parse_err.js b/jstests/core/geo_parse_err.js index 73bc451bd7ccd..a03d194932a68 100644 --- a/jstests/core/geo_parse_err.js +++ b/jstests/core/geo_parse_err.js @@ -121,4 +121,4 @@ err = t.insert({ }); assert.includes(err.getWriteError().errmsg, "Element 1 of \"geometries\" must be an object, instead got type double:"); -})(); \ No newline at end of file +})(); diff --git a/jstests/core/hashed_partial_and_sparse_index.js b/jstests/core/hashed_partial_and_sparse_index.js index 4c57f3e1eee38..5036d999bbae8 100644 --- a/jstests/core/hashed_partial_and_sparse_index.js +++ b/jstests/core/hashed_partial_and_sparse_index.js @@ -6,11 +6,8 @@ * assumes_read_concern_local, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq(). -load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand(). +import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js"; const coll = db.hashed_partial_index; coll.drop(); @@ -91,5 +88,4 @@ testSparseHashedIndex({a: "hashed", b: 1}); // Verify that index is used if the query predicate matches the 'partialFilterExpression'. validateFindCmdOutputAndPlan( {filter: {b: 6}, expectedOutput: [{a: 1, b: 6}], expectedStages: ["IXSCAN", "FETCH"]}); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/hint1.js b/jstests/core/hint1.js index d584144e64170..09d328350c3da 100644 --- a/jstests/core/hint1.js +++ b/jstests/core/hint1.js @@ -1,4 +1,4 @@ -p = db.jstests_hint1; +let p = db.jstests_hint1; p.drop(); p.save({ts: new Date(1), cls: "entry", verticals: "alleyinsider", live: true}); diff --git a/jstests/core/id1.js b/jstests/core/id1.js index 79e26e33e90e2..a3fbaea3bfcb8 100644 --- a/jstests/core/id1.js +++ b/jstests/core/id1.js @@ -1,6 +1,6 @@ // @tags: [requires_fastcount] -t = db.id1; +let t = db.id1; t.drop(); t.save({_id: {a: 1, b: 2}, x: "a"}); diff --git a/jstests/core/index/bindata_indexonly.js b/jstests/core/index/bindata_indexonly.js index 3fc83d0f2fd6e..d9386370672ad 100644 --- a/jstests/core/index/bindata_indexonly.js +++ b/jstests/core/index/bindata_indexonly.js @@ -6,10 +6,7 @@ * assumes_read_concern_local, * ] */ -(function() { -'use strict'; - -load("jstests/libs/analyze_plan.js"); +import {isIndexOnly} from "jstests/libs/analyze_plan.js"; var coll = db.jstests_bindata_indexonly; @@ -73,5 +70,4 @@ assert(isIndexOnly(db, explain), "indexonly.$gte.3 - must be index-only"); assert.eq( 2, explain.executionStats.nReturned, "correctcount.$gte.3 - not returning correct documents"); -coll.drop(); -})(); +coll.drop(); \ No newline at end of file diff --git a/jstests/core/index/covered/coveredIndex1.js b/jstests/core/index/covered/coveredIndex1.js index 0ea6b523a26fa..ea4c223fa85e3 100644 --- a/jstests/core/index/covered/coveredIndex1.js +++ b/jstests/core/index/covered/coveredIndex1.js @@ -11,15 +11,12 @@ * assumes_no_implicit_index_creation, * ] */ -(function() { -"use strict"; +// Include helpers for analyzing explain output. +import {getWinningPlan, isIndexOnly} from "jstests/libs/analyze_plan.js"; const coll = db["jstests_coveredIndex1"]; coll.drop(); -// Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); - assert.commandWorked(coll.insert({order: 0, fn: "john", ln: "doe"})); assert.commandWorked(coll.insert({order: 1, fn: "jack", ln: "doe"})); assert.commandWorked(coll.insert({order: 2, fn: "john", ln: "smith"})); @@ -90,4 +87,3 @@ assertIfQueryIsCovered({obj: {a: 1, b: "blah"}}, {obj: 1, _id: 0}, true); assert.commandWorked(coll.dropIndex({obj: 1})); assert.commandWorked(coll.createIndex({"obj.a": 1, "obj.b": 1})); assertIfQueryIsCovered({"obj.a": 1}, {obj: 1}, false); -}()); diff --git a/jstests/core/index/covered/coveredIndex2.js b/jstests/core/index/covered/coveredIndex2.js index 72724dede1fab..e3fb2a5936007 100644 --- a/jstests/core/index/covered/coveredIndex2.js +++ b/jstests/core/index/covered/coveredIndex2.js @@ -8,15 +8,12 @@ // # plans. // assumes_no_implicit_index_creation, // ] -(function() { -"use strict"; +// Include helpers for analyzing explain output. +import {getWinningPlan, isIndexOnly} from "jstests/libs/analyze_plan.js"; const t = db["jstests_coveredIndex2"]; t.drop(); -// Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); - assert.commandWorked(t.insert({a: 1})); assert.commandWorked(t.insert({a: 2})); assert.eq(t.findOne({a: 1}).a, 1, "Cannot find right record"); @@ -38,4 +35,3 @@ assert.commandWorked(t.insert({a: [3, 4]})); plan = t.find({a: 1}, {a: 1, _id: 0}).explain(); assert(!isIndexOnly(db, getWinningPlan(plan.queryPlanner)), "Find is using covered index even after multikey insert"); -}()); diff --git a/jstests/core/index/covered/coveredIndex3.js b/jstests/core/index/covered/coveredIndex3.js index 8b15b40e86259..dd44f2544eaef 100644 --- a/jstests/core/index/covered/coveredIndex3.js +++ b/jstests/core/index/covered/coveredIndex3.js @@ -6,8 +6,8 @@ if (0) { // SERVER-4975 - t = db.jstests_coveredIndex3; - t2 = db.jstests_coveredIndex3_other; + let t = db.jstests_coveredIndex3; + let t2 = db.jstests_coveredIndex3_other; t.drop(); t2.drop(); @@ -15,30 +15,30 @@ if (0) { // SERVER-4975 // Insert an array, which will make the { a:1 } index multikey and should disable covered // index // matching. - p1 = startParallelShell( + let p1 = startParallelShell( 'for( i = 0; i < 60; ++i ) { \ db.jstests_coveredIndex3.save( { a:[ 2000, 2001 ] } ); \ sleep( 300 ); \ }'); // Frequent writes cause the find operation to yield. - p2 = startParallelShell( + let p2 = startParallelShell( 'for( i = 0; i < 1800; ++i ) { \ db.jstests_coveredIndex3_other.save( {} ); \ sleep( 10 ); \ }'); - for (i = 0; i < 30; ++i) { + for (let i = 0; i < 30; ++i) { t.drop(); t.createIndex({a: 1}); - for (j = 0; j < 1000; ++j) { + for (let j = 0; j < 1000; ++j) { t.save({a: j}); } - c = t.find({}, {_id: 0, a: 1}).hint({a: 1}).batchSize(batchSize); + let c = t.find({}, {_id: 0, a: 1}).hint({a: 1}).batchSize(batchSize); while (c.hasNext()) { - o = c.next(); + let o = c.next(); // If o contains a high numeric 'a' value, it must come from an array saved in p1. assert(!(o.a > 1500), 'improper object returned ' + tojson(o)); } diff --git a/jstests/core/index/covered/coveredIndex4.js b/jstests/core/index/covered/coveredIndex4.js index 7433ed7b9d60f..b415328474e0f 100644 --- a/jstests/core/index/covered/coveredIndex4.js +++ b/jstests/core/index/covered/coveredIndex4.js @@ -5,14 +5,14 @@ // Test covered index projection with $or clause, specifically in getMore // SERVER-4980 -t = db.jstests_coveredIndex4; +let t = db.jstests_coveredIndex4; t.drop(); t.createIndex({a: 1}); t.createIndex({b: 1}); -orClause = []; -for (i = 0; i < 200; ++i) { +let orClause = []; +for (let i = 0; i < 200; ++i) { if (i % 2 == 0) { t.save({a: i}); orClause.push({a: i}); @@ -22,11 +22,11 @@ for (i = 0; i < 200; ++i) { } } -c = t.find({$or: orClause}, {_id: 0, a: 1}); +let c = t.find({$or: orClause}, {_id: 0, a: 1}); // No odd values of a were saved, so we should not see any in the results. while (c.hasNext()) { - o = c.next(); + let o = c.next(); if (o.a) { assert.eq(0, o.a % 2, 'unexpected result: ' + tojson(o)); } @@ -36,7 +36,7 @@ c = t.find({$or: orClause}, {_id: 0, b: 1}); // No even values of b were saved, so we should not see any in the results. while (c.hasNext()) { - o = c.next(); + let o = c.next(); if (o.b) { assert.eq(1, o.b % 2, 'unexpected result: ' + tojson(o)); } diff --git a/jstests/core/index/covered/covered_index_compound_1.js b/jstests/core/index/covered/covered_index_compound_1.js index 0efff371059a1..27da21d5cebb9 100644 --- a/jstests/core/index/covered/covered_index_compound_1.js +++ b/jstests/core/index/covered/covered_index_compound_1.js @@ -8,11 +8,11 @@ // Compound index covered query tests // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); +import {isIndexOnly} from "jstests/libs/analyze_plan.js"; var coll = db.getCollection("covered_compound_1"); coll.drop(); -for (i = 0; i < 100; i++) { +for (let i = 0; i < 100; i++) { coll.insert({a: i, b: "strvar_" + (i % 13), c: NumberInt(i % 10)}); } coll.createIndex({a: 1, b: -1, c: 1}); diff --git a/jstests/core/index/covered/covered_index_negative_1.js b/jstests/core/index/covered/covered_index_negative_1.js index 2e2179d908a9d..826fa19e37ad1 100644 --- a/jstests/core/index/covered/covered_index_negative_1.js +++ b/jstests/core/index/covered/covered_index_negative_1.js @@ -8,10 +8,7 @@ // assumes_balancer_off, // does_not_support_stepdowns, // ] -(function() { -'use strict'; - -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan, isIndexOnly} from "jstests/libs/analyze_plan.js"; const coll = db.covered_negative_1; coll.drop(); @@ -95,5 +92,4 @@ assert.neq(0, plan.executionStats.totalDocsExamined, "negative.1.8 - nscannedObjects should not be 0 for a non covered query"); -print('all tests passed'); -})(); +print('all tests passed'); \ No newline at end of file diff --git a/jstests/core/index/covered/covered_index_simple_1.js b/jstests/core/index/covered/covered_index_simple_1.js index 16a6e3cdc6a2b..688b4c4b9a00d 100644 --- a/jstests/core/index/covered/covered_index_simple_1.js +++ b/jstests/core/index/covered/covered_index_simple_1.js @@ -8,17 +8,17 @@ // Simple covered index query test // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); +import {isIndexOnly} from "jstests/libs/analyze_plan.js"; var coll = db.getCollection("covered_simple_1"); coll.drop(); -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { coll.insert({foo: i}); } -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { coll.insert({foo: i}); } -for (i = 0; i < 5; i++) { +for (let i = 0; i < 5; i++) { coll.insert({bar: i}); } coll.insert({foo: "string"}); diff --git a/jstests/core/index/covered/covered_index_simple_2.js b/jstests/core/index/covered/covered_index_simple_2.js index cf04f940ad0d9..fa59e70caeb4f 100644 --- a/jstests/core/index/covered/covered_index_simple_2.js +++ b/jstests/core/index/covered/covered_index_simple_2.js @@ -8,11 +8,11 @@ // Simple covered index query test with unique index // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); +import {isIndexOnly} from "jstests/libs/analyze_plan.js"; var coll = db.getCollection("covered_simple_2"); coll.drop(); -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { coll.insert({foo: i}); } coll.insert({foo: "string"}); diff --git a/jstests/core/index/covered/covered_index_simple_3.js b/jstests/core/index/covered/covered_index_simple_3.js index 4beff2b3c5a50..1bc1070be9f79 100644 --- a/jstests/core/index/covered/covered_index_simple_3.js +++ b/jstests/core/index/covered/covered_index_simple_3.js @@ -8,14 +8,14 @@ // Simple covered index query test with a unique sparse index // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); +import {isIndexOnly} from "jstests/libs/analyze_plan.js"; var coll = db.getCollection("covered_simple_3"); coll.drop(); -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { coll.insert({foo: i}); } -for (i = 0; i < 5; i++) { +for (let i = 0; i < 5; i++) { coll.insert({bar: i}); } coll.insert({foo: "string"}); diff --git a/jstests/core/index/covered/covered_index_simple_id.js b/jstests/core/index/covered/covered_index_simple_id.js index 59efcd7f13aec..818398433c418 100644 --- a/jstests/core/index/covered/covered_index_simple_id.js +++ b/jstests/core/index/covered/covered_index_simple_id.js @@ -5,11 +5,11 @@ // Simple covered index query test // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); +import {isIndexOnly} from "jstests/libs/analyze_plan.js"; var coll = db.getCollection("covered_simple_id"); coll.drop(); -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { coll.insert({_id: i}); } coll.insert({_id: "string"}); diff --git a/jstests/core/index/covered/covered_index_sort_1.js b/jstests/core/index/covered/covered_index_sort_1.js index 499bff128e7ec..e08e9061df660 100644 --- a/jstests/core/index/covered/covered_index_sort_1.js +++ b/jstests/core/index/covered/covered_index_sort_1.js @@ -8,17 +8,17 @@ // Simple covered index query test with sort // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); +import {isIndexOnly} from "jstests/libs/analyze_plan.js"; var coll = db.getCollection("covered_sort_1"); coll.drop(); -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { coll.insert({foo: i}); } -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { coll.insert({foo: i}); } -for (i = 0; i < 5; i++) { +for (let i = 0; i < 5; i++) { coll.insert({bar: i}); } coll.insert({foo: "1"}); diff --git a/jstests/core/index/covered/covered_index_sort_2.js b/jstests/core/index/covered/covered_index_sort_2.js index 736a48bb4526e..a2acff2ec5cce 100644 --- a/jstests/core/index/covered/covered_index_sort_2.js +++ b/jstests/core/index/covered/covered_index_sort_2.js @@ -5,11 +5,11 @@ // ] // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); +import {isIndexOnly} from "jstests/libs/analyze_plan.js"; var coll = db.getCollection("covered_sort_2"); coll.drop(); -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { coll.insert({_id: i}); } coll.insert({_id: "1"}); diff --git a/jstests/core/index/covered/covered_index_sort_3.js b/jstests/core/index/covered/covered_index_sort_3.js index 9e57a79a2a2e6..1066103162531 100644 --- a/jstests/core/index/covered/covered_index_sort_3.js +++ b/jstests/core/index/covered/covered_index_sort_3.js @@ -8,11 +8,11 @@ // Compound index covered query tests with sort // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); +import {isIndexOnly} from "jstests/libs/analyze_plan.js"; var coll = db.getCollection("covered_sort_3"); coll.drop(); -for (i = 0; i < 100; i++) { +for (let i = 0; i < 100; i++) { coll.insert({a: i, b: "strvar_" + (i % 13), c: NumberInt(i % 10)}); } diff --git a/jstests/core/index/covered/covered_index_sort_no_fetch_optimization.js b/jstests/core/index/covered/covered_index_sort_no_fetch_optimization.js index 416549acb8753..ad2d11ecf8248 100644 --- a/jstests/core/index/covered/covered_index_sort_no_fetch_optimization.js +++ b/jstests/core/index/covered/covered_index_sort_no_fetch_optimization.js @@ -9,10 +9,7 @@ * assumes_unsharded_collection, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan, isIndexOnly, planHasStage} from "jstests/libs/analyze_plan.js"; const collName = "covered_index_sort_no_fetch_optimization"; const coll = db.getCollection(collName); @@ -239,5 +236,4 @@ findCmd = { }; expected = [{"b": {"c": 1}}, {"b": {"c": 2}}, {"b": {"c": 3}}, {"b": {"c": "A"}}, {"b": {"c": "a"}}]; -assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); -})(); +assertExpectedResult(findCmd, expected, kIsCovered, kBlockingSort); \ No newline at end of file diff --git a/jstests/core/index/elemmatch_index.js b/jstests/core/index/elemmatch_index.js index a1941620a484a..b8012988a30d7 100644 --- a/jstests/core/index/elemmatch_index.js +++ b/jstests/core/index/elemmatch_index.js @@ -6,10 +6,7 @@ * assumes_read_concern_local, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js"; const coll = db.elemMatch_index; coll.drop(); @@ -111,5 +108,4 @@ assert.eq(count, 1); const explain = coll.find(query).hint({"arr.x": 1, a: 1}).explain("executionStats"); assert.commandWorked(explain); assert.eq(count, explain.executionStats.totalKeysExamined, explain); -})(); -})(); +})(); \ No newline at end of file diff --git a/jstests/core/index/fts/fts2.js b/jstests/core/index/fts/fts2.js index 79be057fed835..418a8ca321529 100644 --- a/jstests/core/index/fts/fts2.js +++ b/jstests/core/index/fts/fts2.js @@ -1,6 +1,6 @@ load("jstests/libs/fts.js"); -t = db.text2; +let t = db.text2; t.drop(); t.save({_id: 1, x: "az b x", y: "c d m", z: 1}); diff --git a/jstests/core/index/fts/fts3.js b/jstests/core/index/fts/fts3.js index 9b89cda029c64..b28227e57bbe9 100644 --- a/jstests/core/index/fts/fts3.js +++ b/jstests/core/index/fts/fts3.js @@ -1,6 +1,6 @@ load("jstests/libs/fts.js"); -t = db.text3; +let t = db.text3; t.drop(); t.save({_id: 1, x: "az b x", y: "c d m", z: 1}); diff --git a/jstests/core/index/fts/fts4.js b/jstests/core/index/fts/fts4.js index bb19fba22211b..3e92b6f057458 100644 --- a/jstests/core/index/fts/fts4.js +++ b/jstests/core/index/fts/fts4.js @@ -1,6 +1,6 @@ load("jstests/libs/fts.js"); -t = db.text4; +let t = db.text4; t.drop(); t.save({_id: 1, x: ["az", "b", "x"], y: ["c", "d", "m"], z: 1}); diff --git a/jstests/core/index/fts/fts5.js b/jstests/core/index/fts/fts5.js index 28d9b48d957ea..13bde38545b34 100644 --- a/jstests/core/index/fts/fts5.js +++ b/jstests/core/index/fts/fts5.js @@ -1,6 +1,6 @@ load("jstests/libs/fts.js"); -t = db.text5; +let t = db.text5; t.drop(); t.save({_id: 1, x: [{a: "az"}, {a: "b"}, {a: "x"}], y: ["c", "d", "m"], z: 1}); diff --git a/jstests/core/index/fts/fts_blog.js b/jstests/core/index/fts/fts_blog.js index 5208c16625846..5b7b7559512e6 100644 --- a/jstests/core/index/fts/fts_blog.js +++ b/jstests/core/index/fts/fts_blog.js @@ -1,4 +1,4 @@ -t = db.text_blog; +let t = db.text_blog; t.drop(); t.save({_id: 1, title: "my blog post", text: "this is a new blog i am writing. yay"}); @@ -9,7 +9,7 @@ t.save({_id: 3, title: "knives are Fun", text: "this is a new blog i am writing. // specify weights if you want a field to be more meaningull t.createIndex({"title": "text", text: "text"}, {weights: {title: 10}}); -res = t.find({"$text": {"$search": "blog"}}, {score: {"$meta": "textScore"}}).sort({ +let res = t.find({"$text": {"$search": "blog"}}, {score: {"$meta": "textScore"}}).sort({ score: {"$meta": "textScore"} }); assert.eq(3, res.length()); diff --git a/jstests/core/index/fts/fts_blogwild.js b/jstests/core/index/fts/fts_blogwild.js index f449b6b827e89..ffb28533e117a 100644 --- a/jstests/core/index/fts/fts_blogwild.js +++ b/jstests/core/index/fts/fts_blogwild.js @@ -4,7 +4,7 @@ // assumes_no_implicit_index_creation, // ] -t = db.text_blogwild; +let t = db.text_blogwild; t.drop(); t.save({_id: 1, title: "my blog post", text: "this is a new blog i am writing. yay eliot"}); @@ -21,7 +21,7 @@ t.createIndex({dummy: "text"}, {weights: "$**"}); // ensure listIndexes can handle a string-valued "weights" assert.eq(2, t.getIndexes().length); -res = t.find({"$text": {"$search": "blog"}}); +let res = t.find({"$text": {"$search": "blog"}}); assert.eq(3, res.length(), "A1"); res = t.find({"$text": {"$search": "write"}}); diff --git a/jstests/core/index/fts/fts_index.js b/jstests/core/index/fts/fts_index.js index c78301509f15b..be5035d0babbb 100644 --- a/jstests/core/index/fts/fts_index.js +++ b/jstests/core/index/fts/fts_index.js @@ -78,6 +78,17 @@ assert.eq(0, }) .length); +// $-prefixed fields cannot be indexed. +coll = db.getCollection(collNamePrefix + collCount++); +coll.drop(); +assert.commandFailed(coll.createIndex({"a.$custom": "text"}, {name: indexName})); +assert.eq(0, + coll.getIndexes() + .filter(function(z) { + return z.name == indexName; + }) + .length); + // SERVER-19519 Spec fails if '_fts' is specified on a non-text index. coll = db.getCollection(collNamePrefix + collCount++); coll.drop(); @@ -141,7 +152,7 @@ assert.commandWorked(coll.createIndex({a: 1, b: "text", c: 1})); assert.eq(2, coll.getIndexes().length); assert.commandWorked(coll.createIndex({a: 1, b: "text", c: 1})); assert.eq(2, coll.getIndexes().length); -assert.commandWorked(coll.createIndex({a: 1, b: "text", c: 1}, {background: true})); +assert.commandWorked(coll.createIndex({a: 1, b: "text", c: 1})); assert.eq(2, coll.getIndexes().length); assert.commandFailedWithCode(coll.createIndex({a: 1, b: 1, c: "text"}), ErrorCodes.CannotCreateIndex); diff --git a/jstests/core/index/fts/fts_index3.js b/jstests/core/index/fts/fts_index3.js index ac4730d0bd0a7..4d0efb2c1d949 100644 --- a/jstests/core/index/fts/fts_index3.js +++ b/jstests/core/index/fts/fts_index3.js @@ -110,6 +110,11 @@ assert.commandWorked(coll.update({}, {$set: {"a.language": "en"}})); assert.eq(0, coll.find({$text: {$search: "testing", $language: "es"}}).itcount()); assert.eq(1, coll.find({$text: {$search: "testing", $language: "en"}}).itcount()); +// SERVER-78238: index with a dotted path should not index fields with a dot inside that make it +// look like a dotted path. +assert.commandWorked(coll.insert({"a.b": "ignored"})); +assert.eq(0, coll.find({$text: {$search: "ignored"}}).itcount()); + // 10) Same as #9, but with a wildcard text index. coll = db.getCollection(collNamePrefix + collCount++); coll.drop(); @@ -121,6 +126,11 @@ assert.commandWorked(coll.update({}, {$set: {"a.language": "en"}})); assert.eq(0, coll.find({$text: {$search: "testing", $language: "es"}}).itcount()); assert.eq(1, coll.find({$text: {$search: "testing", $language: "en"}}).itcount()); +// SERVER-78238: index with a wildcard should not index fields with a dot inside or starting with $. +assert.commandWorked(coll.insert({"a.b": "ignored"})); +assert.commandWorked(coll.insert({"$personal": "ignored"})); +assert.eq(0, coll.find({$text: {$search: "ignored"}}).itcount()); + // 11) Create a text index on a single field with a custom language override, insert a document, // update the language of the document (so as to change the stemming), and verify that $text with // the new language returns the document. diff --git a/jstests/core/index/fts/fts_partition1.js b/jstests/core/index/fts/fts_partition1.js index 4a26a3ad62957..3791c461f7ddf 100644 --- a/jstests/core/index/fts/fts_partition1.js +++ b/jstests/core/index/fts/fts_partition1.js @@ -1,6 +1,6 @@ load("jstests/libs/fts.js"); -t = db.text_parition1; +let t = db.text_parition1; t.drop(); t.insert({_id: 1, x: 1, y: "foo"}); @@ -19,7 +19,7 @@ assert.throws(function() { assert.eq([1], queryIDS(t, "foo", {x: 1})); -res = t.find({"$text": {"$search": "foo"}, x: 1}, {score: {"$meta": "textScore"}}); +let res = t.find({"$text": {"$search": "foo"}, x: 1}, {score: {"$meta": "textScore"}}); assert(res[0].score > 0, tojson(res.toArray())); // repeat "search" with "language" specified, SERVER-8999 diff --git a/jstests/core/index/fts/fts_partition_no_multikey.js b/jstests/core/index/fts/fts_partition_no_multikey.js index b819c3abfbd83..c8adead6100c6 100644 --- a/jstests/core/index/fts/fts_partition_no_multikey.js +++ b/jstests/core/index/fts/fts_partition_no_multikey.js @@ -1,4 +1,4 @@ -t = db.fts_partition_no_multikey; +let t = db.fts_partition_no_multikey; t.drop(); t.createIndex({x: 1, y: "text"}); diff --git a/jstests/core/index/fts/fts_phrase.js b/jstests/core/index/fts/fts_phrase.js index 1a3d747032682..31356e8a4ea05 100644 --- a/jstests/core/index/fts/fts_phrase.js +++ b/jstests/core/index/fts/fts_phrase.js @@ -1,4 +1,4 @@ -t = db.text_phrase; +let t = db.text_phrase; t.drop(); t.save({_id: 1, title: "my blog post", text: "i am writing a blog. yay"}); @@ -7,7 +7,7 @@ t.save({_id: 3, title: "knives are Fun", text: "this is a new blog i am writing. t.createIndex({"title": "text", text: "text"}, {weights: {title: 10}}); -res = t.find({"$text": {"$search": "blog write"}}, {score: {"$meta": "textScore"}}).sort({ +let res = t.find({"$text": {"$search": "blog write"}}, {score: {"$meta": "textScore"}}).sort({ score: {"$meta": "textScore"} }); assert.eq(3, res.length()); diff --git a/jstests/core/index/fts/fts_projection.js b/jstests/core/index/fts/fts_projection.js index 6e4c99b92308d..67e69179e8549 100644 --- a/jstests/core/index/fts/fts_projection.js +++ b/jstests/core/index/fts/fts_projection.js @@ -3,10 +3,7 @@ // assumes_read_concern_local, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js"; var t = db.getSiblingDB("test").getCollection("fts_projection"); t.drop(); @@ -121,5 +118,4 @@ assert.eq(results.length, 2); assert(results[0].score, "invalid text score for " + tojson(results[0], '', true) + " when $text is in $or"); assert(results[1].score, - "invalid text score for " + tojson(results[0], '', true) + " when $text is in $or"); -})(); + "invalid text score for " + tojson(results[0], '', true) + " when $text is in $or"); \ No newline at end of file diff --git a/jstests/core/index/geo/geo1.js b/jstests/core/index/geo/geo1.js index 8a80f59d69237..126aae3505d82 100644 --- a/jstests/core/index/geo/geo1.js +++ b/jstests/core/index/geo/geo1.js @@ -5,13 +5,10 @@ // requires_fastcount, // ] -t = db.geo1; +let t = db.geo1; t.drop(); -idx = { - loc: "2d", - zip: 1 -}; +let idx = {loc: "2d", zip: 1}; t.insert({zip: "06525", loc: [41.352964, 73.01212]}); t.insert({zip: "10024", loc: [40.786387, 73.97709]}); @@ -31,7 +28,7 @@ assert.eq(3, t.count(), "B3"); // test normal access -wb = t.findOne({zip: "06525"}); +let wb = t.findOne({zip: "06525"}); assert(wb, "C1"); assert.eq("06525", t.find({loc: wb.loc}).hint({"$natural": 1})[0].zip, "C2"); diff --git a/jstests/core/index/geo/geo10.js b/jstests/core/index/geo/geo10.js index 45463711406f5..adbf0862cbedc 100644 --- a/jstests/core/index/geo/geo10.js +++ b/jstests/core/index/geo/geo10.js @@ -6,7 +6,7 @@ // Test for SERVER-2746 -coll = db.geo10; +let coll = db.geo10; coll.drop(); assert.commandWorked(db.geo10.createIndex({c: '2d', t: 1}, {min: 0, max: Math.pow(2, 40)})); diff --git a/jstests/core/index/geo/geo2.js b/jstests/core/index/geo/geo2.js index 34588acac9e43..e4521b5b45e00 100644 --- a/jstests/core/index/geo/geo2.js +++ b/jstests/core/index/geo/geo2.js @@ -3,11 +3,11 @@ // requires_fastcount, // ] -t = db.geo2; +let t = db.geo2; t.drop(); -n = 1; -arr = []; +let n = 1; +let arr = []; for (var x = -100; x < 100; x += 2) { for (var y = -100; y < 100; y += 2) { arr.push({_id: n++, loc: [x, y]}); diff --git a/jstests/core/index/geo/geo3.js b/jstests/core/index/geo/geo3.js index 7e54fd7dc5f91..381ec12f3991d 100644 --- a/jstests/core/index/geo/geo3.js +++ b/jstests/core/index/geo/geo3.js @@ -4,11 +4,11 @@ // ] (function() { -t = db.geo3; +let t = db.geo3; t.drop(); -n = 1; -arr = []; +let n = 1; +let arr = []; for (var x = -100; x < 100; x += 2) { for (var y = -100; y < 100; y += 2) { arr.push({_id: n++, loc: [x, y], a: Math.abs(x) % 5, b: Math.abs(y) % 5}); @@ -67,9 +67,7 @@ assert.commandWorked(t.createIndex({loc: "2d", b: 1})); testFiltering("loc and b"); -q = { - loc: {$near: [50, 50]} -}; +let q = {loc: {$near: [50, 50]}}; assert.eq(100, t.find(q).limit(100).itcount(), "D1"); assert.eq(100, t.find(q).limit(100).size(), "D2"); diff --git a/jstests/core/index/geo/geo6.js b/jstests/core/index/geo/geo6.js index 8d32c066c88e2..98022ad7940ba 100644 --- a/jstests/core/index/geo/geo6.js +++ b/jstests/core/index/geo/geo6.js @@ -1,4 +1,4 @@ -t = db.geo6; +let t = db.geo6; t.drop(); t.createIndex({loc: "2d"}); diff --git a/jstests/core/index/geo/geo9.js b/jstests/core/index/geo/geo9.js index 6b1bfb6063143..701c6bd6680f1 100644 --- a/jstests/core/index/geo/geo9.js +++ b/jstests/core/index/geo/geo9.js @@ -1,4 +1,4 @@ -t = db.geo9; +let t = db.geo9; t.drop(); t.save({_id: 1, a: [10, 10], b: [50, 50]}); @@ -15,7 +15,7 @@ t.createIndex({b: "2d"}); function check(field) { var q = {}; q[field] = {$near: [11, 11]}; - arr = t.find(q).limit(3).map(function(z) { + let arr = t.find(q).limit(3).map(function(z) { return Geo.distance([11, 11], z[field]); }); assert.eq(2 * Math.sqrt(2), Array.sum(arr), "test " + field); diff --git a/jstests/core/index/geo/geo_2d_explain.js b/jstests/core/index/geo/geo_2d_explain.js index 36adad19a6c60..c115b958186ac 100644 --- a/jstests/core/index/geo/geo_2d_explain.js +++ b/jstests/core/index/geo/geo_2d_explain.js @@ -1,11 +1,10 @@ // @tags: [ // assumes_balancer_off, // ] +import {getPlanStages} from "jstests/libs/analyze_plan.js"; var t = db.geo_2d_explain; -load("jstests/libs/analyze_plan.js"); - t.drop(); var n = 1000; diff --git a/jstests/core/index/geo/geo_allowedcomparisons.js b/jstests/core/index/geo/geo_allowedcomparisons.js index 2f689f4be6870..80ea1e25fee09 100644 --- a/jstests/core/index/geo/geo_allowedcomparisons.js +++ b/jstests/core/index/geo/geo_allowedcomparisons.js @@ -1,33 +1,27 @@ // A test for what geometries can interact with what other geometries. -t = db.geo_allowedcomparisons; +let t = db.geo_allowedcomparisons; // Any GeoJSON object can intersect with any geojson object. -geojsonPoint = { - "type": "Point", - "coordinates": [0, 0] -}; -oldPoint = [0, 0]; +let geojsonPoint = {"type": "Point", "coordinates": [0, 0]}; +let oldPoint = [0, 0]; // GeoJSON polygons can contain any geojson object and OLD points. -geojsonPoly = { +let geojsonPoly = { "type": "Polygon", "coordinates": [[[-5, -5], [-5, 5], [5, 5], [5, -5], [-5, -5]]] }; // This can be contained by GJ polygons, intersected by anything GJ and old points. -geojsonLine = { - "type": "LineString", - "coordinates": [[0, 0], [1, 1]] -}; +let geojsonLine = {"type": "LineString", "coordinates": [[0, 0], [1, 1]]}; // $centerSphere can contain old or new points. -oldCenterSphere = [[0, 0], Math.PI / 180]; +let oldCenterSphere = [[0, 0], Math.PI / 180]; // $box can contain old points. -oldBox = [[-5, -5], [5, 5]]; +let oldBox = [[-5, -5], [5, 5]]; // $polygon can contain old points. -oldPolygon = [[-5, -5], [-5, 5], [5, 5], [5, -5], [-5, -5]]; +let oldPolygon = [[-5, -5], [-5, 5], [5, 5], [5, -5], [-5, -5]]; // $center can contain old points. -oldCenter = [[0, 0], 1]; +let oldCenter = [[0, 0], 1]; t.drop(); t.createIndex({geo: "2d"}); @@ -47,10 +41,7 @@ assert.writeError(t.insert({geo: oldCenter})); // Verify that even if we can't index them, we can use them in a matcher. t.insert({gj: geojsonLine}); t.insert({gj: geojsonPoly}); -geojsonPoint2 = { - "type": "Point", - "coordinates": [0, 0.001] -}; +let geojsonPoint2 = {"type": "Point", "coordinates": [0, 0.001]}; t.insert({gjp: geojsonPoint2}); // We convert between old and new style points. diff --git a/jstests/core/index/geo/geo_array0.js b/jstests/core/index/geo/geo_array0.js index 0c9dc096afb79..29798952d6074 100644 --- a/jstests/core/index/geo/geo_array0.js +++ b/jstests/core/index/geo/geo_array0.js @@ -5,7 +5,7 @@ // ] // Make sure the very basics of geo arrays are sane by creating a few multi location docs -t = db.geoarray; +let t = db.geoarray; function test(index) { t.drop(); diff --git a/jstests/core/index/geo/geo_array2.js b/jstests/core/index/geo/geo_array2.js index 7da56a576eb6e..7e09608c07ebe 100644 --- a/jstests/core/index/geo/geo_array2.js +++ b/jstests/core/index/geo/geo_array2.js @@ -1,6 +1,6 @@ // Check the semantics of near calls with multiple locations -t = db.geoarray2; +let t = db.geoarray2; t.drop(); var numObjs = 10; @@ -12,7 +12,7 @@ Random.setRandomSeed(); for (var i = -1; i < 2; i++) { for (var j = -1; j < 2; j++) { - locObj = []; + let locObj = []; if (i != 0 || j != 0) locObj.push({x: i * 50 + Random.rand(), y: j * 50 + Random.rand()}); @@ -28,7 +28,7 @@ assert.commandWorked(t.createIndex({loc: "2d", type: 1})); print("Starting testing phase... "); -for (var t = 0; t < 2; t++) { +for (let t = 0; t < 2; t++) { var type = t == 0 ? "A" : "B"; for (var i = -1; i < 2; i++) { diff --git a/jstests/core/index/geo/geo_big_polygon2.js b/jstests/core/index/geo/geo_big_polygon2.js index 389bc9ef7bcf3..a4dc09a132e88 100644 --- a/jstests/core/index/geo/geo_big_polygon2.js +++ b/jstests/core/index/geo/geo_big_polygon2.js @@ -461,9 +461,9 @@ function nGonGenerator(N, D, clockwise, LON, LAT) { lat = (-D / 2); } lon = Math.sqrt((D / 2) * (D / 2) - (lat * lat)); - newlat = lat + LAT; - newlon = lon + LON; - conjugateLon = LON - lon; + let newlat = lat + LAT; + let newlon = lon + LON; + let conjugateLon = LON - lon; pts[i] = [newlon, newlat]; pts[N - i] = [conjugateLon, newlat]; } diff --git a/jstests/core/index/geo/geo_borders.js b/jstests/core/index/geo/geo_borders.js index 897697a88cb33..09b89c0d753d5 100644 --- a/jstests/core/index/geo/geo_borders.js +++ b/jstests/core/index/geo/geo_borders.js @@ -4,16 +4,16 @@ * ] */ -t = db.borders; +let t = db.borders; t.drop(); -epsilon = 0.0001; +let epsilon = 0.0001; // For these tests, *required* that step ends exactly on max -min = -1; -max = 1; -step = 1; -numItems = 0; +let min = -1; +let max = 1; +let step = 1; +let numItems = 0; for (var x = min; x <= max; x += step) { for (var y = min; y <= max; y += step) { @@ -22,8 +22,8 @@ for (var x = min; x <= max; x += step) { } } -overallMin = -1; -overallMax = 1; +let overallMin = -1; +let overallMax = 1; // Create a point index slightly smaller than the points we have var res = @@ -113,14 +113,14 @@ assert.eq(numItems, t.find({ // Circle tests // ************** -center = (overallMax + overallMin) / 2; +let center = (overallMax + overallMin) / 2; center = [center, center]; -radius = overallMax; +let radius = overallMax; -offCenter = [center[0] + radius, center[1] + radius]; -onBounds = [offCenter[0] + epsilon, offCenter[1] + epsilon]; -offBounds = [onBounds[0] + epsilon, onBounds[1] + epsilon]; -onBoundsNeg = [-onBounds[0], -onBounds[1]]; +let offCenter = [center[0] + radius, center[1] + radius]; +let onBounds = [offCenter[0] + epsilon, offCenter[1] + epsilon]; +let offBounds = [onBounds[0] + epsilon, onBounds[1] + epsilon]; +let onBoundsNeg = [-onBounds[0], -onBounds[1]]; // Make sure we can get all points when radius is exactly at full bounds assert.lt(0, t.find({loc: {$within: {$center: [center, radius + epsilon]}}}).count()); @@ -133,7 +133,7 @@ assert.lt(0, t.find({loc: {$within: {$center: [offCenter, radius + 2 * epsilon]} // Make sure we get correct corner point when center is in bounds // (x bounds wrap, so could get other corner) -cornerPt = t.findOne({loc: {$within: {$center: [offCenter, step / 2]}}}); +let cornerPt = t.findOne({loc: {$within: {$center: [offCenter, step / 2]}}}); assert.eq(cornerPt.loc.y, overallMax); // Make sure we get correct corner point when center is on bounds diff --git a/jstests/core/index/geo/geo_box1.js b/jstests/core/index/geo/geo_box1.js index d0c4ff50bdff6..40a754d64b3a8 100644 --- a/jstests/core/index/geo/geo_box1.js +++ b/jstests/core/index/geo/geo_box1.js @@ -2,32 +2,32 @@ // requires_getmore, // ] -t = db.geo_box1; +let t = db.geo_box1; t.drop(); -num = 0; -for (x = 0; x <= 20; x++) { - for (y = 0; y <= 20; y++) { - o = {_id: num++, loc: [x, y]}; +let num = 0; +for (let x = 0; x <= 20; x++) { + for (let y = 0; y <= 20; y++) { + let o = {_id: num++, loc: [x, y]}; t.save(o); } } t.createIndex({loc: "2d"}); -searches = [ +let searches = [ [[1, 2], [4, 5]], [[1, 1], [2, 2]], [[0, 2], [4, 5]], [[1, 1], [2, 8]], ]; -for (i = 0; i < searches.length; i++) { - b = searches[i]; +for (let i = 0; i < searches.length; i++) { + let b = searches[i]; // printjson( b ); - q = {loc: {$within: {$box: b}}}; - numWanetd = (1 + b[1][0] - b[0][0]) * (1 + b[1][1] - b[0][1]); + let q = {loc: {$within: {$box: b}}}; + let numWanetd = (1 + b[1][0] - b[0][0]) * (1 + b[1][1] - b[0][1]); assert.eq(numWanetd, t.find(q).itcount(), "itcount: " + tojson(q)); printjson(t.find(q).explain()); } diff --git a/jstests/core/index/geo/geo_box1_noindex.js b/jstests/core/index/geo/geo_box1_noindex.js index 879f17e300288..3da72a96b4478 100644 --- a/jstests/core/index/geo/geo_box1_noindex.js +++ b/jstests/core/index/geo/geo_box1_noindex.js @@ -3,28 +3,28 @@ // ] // SERVER-7343: allow $within without a geo index. -t = db.geo_box1_noindex; +let t = db.geo_box1_noindex; t.drop(); -num = 0; -for (x = 0; x <= 20; x++) { - for (y = 0; y <= 20; y++) { - o = {_id: num++, loc: [x, y]}; +let num = 0; +for (let x = 0; x <= 20; x++) { + for (let y = 0; y <= 20; y++) { + let o = {_id: num++, loc: [x, y]}; t.save(o); } } -searches = [ +let searches = [ [[1, 2], [4, 5]], [[1, 1], [2, 2]], [[0, 2], [4, 5]], [[1, 1], [2, 8]], ]; -for (i = 0; i < searches.length; i++) { - b = searches[i]; - q = {loc: {$within: {$box: b}}}; - numWanted = (1 + b[1][0] - b[0][0]) * (1 + b[1][1] - b[0][1]); +for (let i = 0; i < searches.length; i++) { + let b = searches[i]; + let q = {loc: {$within: {$box: b}}}; + let numWanted = (1 + b[1][0] - b[0][0]) * (1 + b[1][1] - b[0][1]); assert.eq(numWanted, t.find(q).itcount(), "itcount: " + tojson(q)); printjson(t.find(q).explain()); } diff --git a/jstests/core/index/geo/geo_box2.js b/jstests/core/index/geo/geo_box2.js index 7120cfd3e4a16..60c76603b5b3a 100644 --- a/jstests/core/index/geo/geo_box2.js +++ b/jstests/core/index/geo/geo_box2.js @@ -1,9 +1,9 @@ -t = db.geo_box2; +let t = db.geo_box2; t.drop(); -for (i = 1; i < 10; i++) { - for (j = 1; j < 10; j++) { +for (let i = 1; i < 10; i++) { + for (let j = 1; j < 10; j++) { t.insert({loc: [i, j]}); } } diff --git a/jstests/core/index/geo/geo_box3.js b/jstests/core/index/geo/geo_box3.js index 4a91ffb0d1d19..3ca3d4b82e35d 100644 --- a/jstests/core/index/geo/geo_box3.js +++ b/jstests/core/index/geo/geo_box3.js @@ -4,7 +4,7 @@ // bounding box. // This is the bug reported in SERVER-994. -t = db.geo_box3; +let t = db.geo_box3; t.drop(); t.insert({point: {x: -15000000, y: 10000000}}); t.createIndex({point: "2d"}, {min: -21000000, max: 21000000}); diff --git a/jstests/core/index/geo/geo_center_sphere1.js b/jstests/core/index/geo/geo_center_sphere1.js index 2b37c7c3f96ff..c3cdb5aa7d076 100644 --- a/jstests/core/index/geo/geo_center_sphere1.js +++ b/jstests/core/index/geo/geo_center_sphere1.js @@ -3,13 +3,14 @@ // requires_fastcount, // ] -t = db.geo_center_sphere1; +let t = db.geo_center_sphere1; function test(index) { t.drop(); - skip = 8; // lower for more rigor, higher for more speed (tested with .5, .678, 1, 2, 3, and 4) + let skip = + 8; // lower for more rigor, higher for more speed (tested with .5, .678, 1, 2, 3, and 4) - searches = [ + let searches = [ // x , y rad [[5, 0], 0.05], // ~200 miles [[135, 0], 0.05], @@ -23,18 +24,18 @@ function test(index) { [[-20, 60], 0.25], [[-20, -70], 0.25], ]; - correct = searches.map(function(z) { + let correct = searches.map(function(z) { return []; }); - num = 0; + let num = 0; var bulk = t.initializeUnorderedBulkOp(); - for (x = -179; x <= 179; x += skip) { - for (y = -89; y <= 89; y += skip) { - o = {_id: num++, loc: [x, y]}; + for (let x = -179; x <= 179; x += skip) { + for (let y = -89; y <= 89; y += skip) { + let o = {_id: num++, loc: [x, y]}; bulk.insert(o); - for (i = 0; i < searches.length; i++) { + for (let i = 0; i < searches.length; i++) { if (Geo.sphereDistance([x, y], searches[i][0]) <= searches[i][1]) correct[i].push(o); } @@ -47,10 +48,10 @@ function test(index) { t.createIndex({loc: index}); } - for (i = 0; i < searches.length; i++) { + for (let i = 0; i < searches.length; i++) { print('------------'); print(tojson(searches[i]) + "\t" + correct[i].length); - q = {loc: {$within: {$centerSphere: searches[i]}}}; + let q = {loc: {$within: {$centerSphere: searches[i]}}}; // correct[i].forEach( printjson ) // printjson( q ); @@ -69,8 +70,8 @@ function test(index) { return z._id; }); - missing = []; - epsilon = 0.001; // allow tenth of a percent error due to conversions + let missing = []; + let epsilon = 0.001; // allow tenth of a percent error due to conversions for (var j = 0; j < x.length; j++) { if (!Array.contains(y, x[j])) { missing.push(x[j]); diff --git a/jstests/core/index/geo/geo_center_sphere2.js b/jstests/core/index/geo/geo_center_sphere2.js index 1c59850d84182..ecf3622217c9e 100644 --- a/jstests/core/index/geo/geo_center_sphere2.js +++ b/jstests/core/index/geo/geo_center_sphere2.js @@ -18,8 +18,8 @@ function computexscandist(y, maxDistDegrees) { } function pointIsOK(startPoint, radius) { - yscandist = rad2deg(radius) + 0.01; - xscandist = computexscandist(startPoint[1], yscandist); + let yscandist = rad2deg(radius) + 0.01; + let xscandist = computexscandist(startPoint[1], yscandist); return (startPoint[0] + xscandist < 180) && (startPoint[0] - xscandist > -180) && (startPoint[1] + yscandist < 90) && (startPoint[1] - yscandist > -90); } diff --git a/jstests/core/index/geo/geo_circle2.js b/jstests/core/index/geo/geo_circle2.js index d7947f96502eb..1d228db988ca8 100644 --- a/jstests/core/index/geo/geo_circle2.js +++ b/jstests/core/index/geo/geo_circle2.js @@ -1,4 +1,4 @@ -t = db.geo_circle2; +let t = db.geo_circle2; t.drop(); t.createIndex({loc: "2d", categories: 1}, {"name": "placesIdx", "min": -100, "max": 100}); diff --git a/jstests/core/index/geo/geo_circle3.js b/jstests/core/index/geo/geo_circle3.js index da7a9af6b34fe..9466f1e0c5f96 100644 --- a/jstests/core/index/geo/geo_circle3.js +++ b/jstests/core/index/geo/geo_circle3.js @@ -1,7 +1,7 @@ // SERVER-848 and SERVER-1191. db.places.drop(); -n = 0; +let n = 0; db.places.save({"_id": n++, "loc": {"x": 4.9999, "y": 52}}); db.places.save({"_id": n++, "loc": {"x": 5, "y": 52}}); db.places.save({"_id": n++, "loc": {"x": 5.0001, "y": 52}}); @@ -12,8 +12,8 @@ db.places.save({"_id": n++, "loc": {"x": 5.0001, "y": 52.0001}}); db.places.save({"_id": n++, "loc": {"x": 4.9999, "y": 51.9999}}); db.places.save({"_id": n++, "loc": {"x": 5.0001, "y": 51.9999}}); db.places.createIndex({loc: "2d"}); -radius = 0.0001; -center = [5, 52]; +let radius = 0.0001; +let center = [5, 52]; // print(db.places.find({"loc" : {"$within" : {"$center" : [center, radius]}}}).count()) // FIXME: we want an assert, e.g., that there be 5 answers in the find(). db.places.find({"loc": {"$within": {"$center": [center, radius]}}}).forEach(printjson); diff --git a/jstests/core/index/geo/geo_circle4.js b/jstests/core/index/geo/geo_circle4.js index c2194142795e0..50cff15512ffe 100644 --- a/jstests/core/index/geo/geo_circle4.js +++ b/jstests/core/index/geo/geo_circle4.js @@ -2,8 +2,8 @@ function test(index) { db.server848.drop(); - radius = 0.0001; - center = [5, 52]; + let radius = 0.0001; + let center = [5, 52]; db.server848.save({"_id": 1, "loc": {"x": 4.9999, "y": 52}}); db.server848.save({"_id": 2, "loc": {"x": 5, "y": 52}}); @@ -17,12 +17,12 @@ function test(index) { if (index) { db.server848.createIndex({loc: "2d"}); } - r = db.server848.find({"loc": {"$within": {"$center": [center, radius]}}}, {_id: 1}); + let r = db.server848.find({"loc": {"$within": {"$center": [center, radius]}}}, {_id: 1}); assert.eq(5, r.count(), "A1"); // FIXME: surely code like this belongs in utils.js. - a = r.toArray(); - x = []; - for (k in a) { + let a = r.toArray(); + let x = []; + for (let k in a) { x.push(a[k]["_id"]); } x.sort(); diff --git a/jstests/core/index/geo/geo_circle5.js b/jstests/core/index/geo/geo_circle5.js index 27b973a8edeb1..4f43d8b3becbb 100644 --- a/jstests/core/index/geo/geo_circle5.js +++ b/jstests/core/index/geo/geo_circle5.js @@ -12,9 +12,9 @@ db.server1238.createIndex({loc: "2d"}, {min: -21000000, max: 21000000}); db.server1238.save({loc: [5000000, 900000], id: 3}); db.server1238.save({loc: [5000000, 900000], id: 4}); -c1 = db.server1238.find({"loc": {"$within": {"$center": [[5000000, 900000], 1.0]}}}).count(); +let c1 = db.server1238.find({"loc": {"$within": {"$center": [[5000000, 900000], 1.0]}}}).count(); -c2 = db.server1238.find({"loc": {"$within": {"$center": [[5000001, 900000], 5.0]}}}).count(); +let c2 = db.server1238.find({"loc": {"$within": {"$center": [[5000001, 900000], 5.0]}}}).count(); assert.eq(4, c1, "A1"); assert.eq(c1, c2, "B1"); diff --git a/jstests/core/index/geo/geo_exactfetch.js b/jstests/core/index/geo/geo_exactfetch.js index 43ef46fb55845..6ced564107f8c 100644 --- a/jstests/core/index/geo/geo_exactfetch.js +++ b/jstests/core/index/geo/geo_exactfetch.js @@ -1,5 +1,5 @@ // SERVER-7322 -t = db.geo_exactfetch; +let t = db.geo_exactfetch; t.drop(); function test(indexname) { diff --git a/jstests/core/index/geo/geo_fiddly_box.js b/jstests/core/index/geo/geo_fiddly_box.js index 9f5a9e8d6c41f..84e58aa60e32a 100644 --- a/jstests/core/index/geo/geo_fiddly_box.js +++ b/jstests/core/index/geo/geo_fiddly_box.js @@ -8,7 +8,7 @@ // "expand" portion of the geo-lookup expands the 2d range in only one // direction (so points are required on either side of the expanding range) -t = db.geo_fiddly_box; +let t = db.geo_fiddly_box; t.drop(); t.createIndex({loc: "2d"}); @@ -28,11 +28,11 @@ assert.eq( // Test normal lookup of a small square of points as a sanity check. -epsilon = 0.0001; -min = -1; -max = 1; -step = 1; -numItems = 0; +let epsilon = 0.0001; +let min = -1; +let max = 1; +let step = 1; +let numItems = 0; t.drop(); t.createIndex({loc: "2d"}, {max: max + epsilon / 2, min: min - epsilon / 2}); diff --git a/jstests/core/index/geo/geo_fiddly_box2.js b/jstests/core/index/geo/geo_fiddly_box2.js index a0f87203163ec..c71c4778603bf 100644 --- a/jstests/core/index/geo/geo_fiddly_box2.js +++ b/jstests/core/index/geo/geo_fiddly_box2.js @@ -4,7 +4,7 @@ // required to do // exact lookups on the points to get correct results. -t = db.geo_fiddly_box2; +let t = db.geo_fiddly_box2; t.drop(); t.insert({"letter": "S", "position": [-3, 0]}); @@ -17,7 +17,7 @@ t.insert({"letter": "L", "position": [3, 0]}); t.insert({"letter": "E", "position": [4, 0]}); t.createIndex({position: "2d"}); -result = t.find({"position": {"$within": {"$box": [[-3, -1], [0, 1]]}}}); +let result = t.find({"position": {"$within": {"$box": [[-3, -1], [0, 1]]}}}); assert.eq(4, result.count()); t.dropIndex({position: "2d"}); diff --git a/jstests/core/index/geo/geo_invalid_polygon.js b/jstests/core/index/geo/geo_invalid_polygon.js index 0eab7ca5406cc..7736f26073185 100644 --- a/jstests/core/index/geo/geo_invalid_polygon.js +++ b/jstests/core/index/geo/geo_invalid_polygon.js @@ -1,6 +1,6 @@ // With invalid geometry, error message should include _id // SERVER-8992 -t = db.geo_invalid_polygon; +let t = db.geo_invalid_polygon; t.drop(); // Self-intersecting polygon, triggers diff --git a/jstests/core/index/geo/geo_max.js b/jstests/core/index/geo/geo_max.js index 3ef6e39dcaa06..8db0cbb5858c5 100644 --- a/jstests/core/index/geo/geo_max.js +++ b/jstests/core/index/geo/geo_max.js @@ -16,10 +16,10 @@ test.t.insert({loc: [-180, 0]}); test.t.insert({loc: [179.999, 0]}); test.t.insert({loc: [-179.999, 0]}); -assertXIsNegative = function(obj) { +let assertXIsNegative = function(obj) { assert.lt(obj.loc[0], 0); }; -assertXIsPositive = function(obj) { +let assertXIsPositive = function(obj) { assert.gt(obj.loc[0], 0); }; diff --git a/jstests/core/index/geo/geo_multikey0.js b/jstests/core/index/geo/geo_multikey0.js index 27075a779ee90..8296dcd6854c6 100644 --- a/jstests/core/index/geo/geo_multikey0.js +++ b/jstests/core/index/geo/geo_multikey0.js @@ -1,6 +1,6 @@ // Multikey geo values tests - SERVER-3793. -t = db.jstests_geo_multikey0; +let t = db.jstests_geo_multikey0; t.drop(); // Check that conflicting constraints are satisfied by parallel array elements. diff --git a/jstests/core/index/geo/geo_multikey1.js b/jstests/core/index/geo/geo_multikey1.js index e6d0ec086da91..3bdbea422726a 100644 --- a/jstests/core/index/geo/geo_multikey1.js +++ b/jstests/core/index/geo/geo_multikey1.js @@ -1,11 +1,11 @@ // Multikey geo index tests with parallel arrays. -t = db.jstests_geo_multikey1; +let t = db.jstests_geo_multikey1; t.drop(); -locArr = []; -arr = []; -for (i = 0; i < 10; ++i) { +let locArr = []; +let arr = []; +for (let i = 0; i < 10; ++i) { locArr.push([i, i + 1]); arr.push(i); } diff --git a/jstests/core/index/geo/geo_multinest0.js b/jstests/core/index/geo/geo_multinest0.js index 746a530e19b0d..11111f17a5515 100644 --- a/jstests/core/index/geo/geo_multinest0.js +++ b/jstests/core/index/geo/geo_multinest0.js @@ -6,7 +6,7 @@ // Make sure nesting of location arrays also works. -t = db.geonest; +let t = db.geonest; t.drop(); t.insert({zip: "10001", data: [{loc: [10, 10], type: "home"}, {loc: [50, 50], type: "work"}]}); diff --git a/jstests/core/index/geo/geo_multinest1.js b/jstests/core/index/geo/geo_multinest1.js index b6aa53cae168c..4cc829e79ba6c 100644 --- a/jstests/core/index/geo/geo_multinest1.js +++ b/jstests/core/index/geo/geo_multinest1.js @@ -6,7 +6,7 @@ // Test distance queries with interleaved distances -t = db.multinest; +let t = db.multinest; t.drop(); t.insert({zip: "10001", data: [{loc: [10, 10], type: "home"}, {loc: [29, 29], type: "work"}]}); diff --git a/jstests/core/index/geo/geo_near_random1.js b/jstests/core/index/geo/geo_near_random1.js index 1573e72c1f816..b1f5457bdef52 100644 --- a/jstests/core/index/geo/geo_near_random1.js +++ b/jstests/core/index/geo/geo_near_random1.js @@ -18,9 +18,7 @@ test.testPt(test.mkPt()); test.testPt(test.mkPt()); test.testPt(test.mkPt()); -opts = { - sphere: 1 -}; +let opts = {sphere: 1}; // Test $nearSphere with a 2d index test.testPt([0, 0], opts); diff --git a/jstests/core/index/geo/geo_near_random2.js b/jstests/core/index/geo/geo_near_random2.js index a9e242edbcca2..257b27f4f314d 100644 --- a/jstests/core/index/geo/geo_near_random2.js +++ b/jstests/core/index/geo/geo_near_random2.js @@ -18,10 +18,7 @@ test.insertPts(5000); // distances are in increasing order. The test runs in O(N^2). // Test $near with 2d index -opts = { - sphere: 0, - nToTest: test.nPts * 0.01 -}; +let opts = {sphere: 0, nToTest: test.nPts * 0.01}; test.testPt([0, 0], opts); test.testPt(test.mkPt(), opts); test.testPt(test.mkPt(), opts); diff --git a/jstests/core/index/geo/geo_or.js b/jstests/core/index/geo/geo_or.js index cee91d4929dfc..a8fe7ae7b5e1c 100644 --- a/jstests/core/index/geo/geo_or.js +++ b/jstests/core/index/geo/geo_or.js @@ -1,6 +1,6 @@ // multiple geo clauses with $or -t = db.geoor; +let t = db.geoor; t.drop(); diff --git a/jstests/core/index/geo/geo_poly_line.js b/jstests/core/index/geo/geo_poly_line.js index 85dc927415827..58a1ae215f899 100644 --- a/jstests/core/index/geo/geo_poly_line.js +++ b/jstests/core/index/geo/geo_poly_line.js @@ -1,6 +1,6 @@ // Test that weird polygons work SERVER-3725 -t = db.geo_polygon5; +let t = db.geo_polygon5; t.drop(); t.insert({loc: [0, 0]}); diff --git a/jstests/core/index/geo/geo_polygon1_noindex.js b/jstests/core/index/geo/geo_polygon1_noindex.js index 5f43f736b45b4..8b441c81dc409 100644 --- a/jstests/core/index/geo/geo_polygon1_noindex.js +++ b/jstests/core/index/geo/geo_polygon1_noindex.js @@ -1,22 +1,22 @@ // SERVER-7343: allow $within without a geo index. -t = db.geo_polygon1_noindex; +let t = db.geo_polygon1_noindex; t.drop(); -num = 0; -for (x = 1; x < 9; x++) { - for (y = 1; y < 9; y++) { - o = {_id: num++, loc: [x, y]}; +let num = 0; +for (let x = 1; x < 9; x++) { + for (let y = 1; y < 9; y++) { + let o = {_id: num++, loc: [x, y]}; t.save(o); } } -triangle = [[0, 0], [1, 1], [0, 2]]; +let triangle = [[0, 0], [1, 1], [0, 2]]; // Look at only a small slice of the data within a triangle assert.eq(1, t.find({loc: {"$within": {"$polygon": triangle}}}).count(), "Triangle Test"); -boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]]; +let boxBounds = [[0, 0], [0, 10], [10, 10], [10, 0]]; assert.eq(num, t.find({loc: {"$within": {"$polygon": boxBounds}}}).count(), "Bounding Box Test"); @@ -33,7 +33,7 @@ assert.eq( t.drop(); -pacman = [ +let pacman = [ [0, 2], [0, 4], [2, 6], diff --git a/jstests/core/index/geo/geo_polygon2.js b/jstests/core/index/geo/geo_polygon2.js index 7a6ad0aedb53e..0452b28f5631b 100644 --- a/jstests/core/index/geo/geo_polygon2.js +++ b/jstests/core/index/geo/geo_polygon2.js @@ -29,8 +29,8 @@ for (var test = 0; test < numTests; test++) { x = x[0]; } - xp = x * Math.cos(rotation) - y * Math.sin(rotation); - yp = y * Math.cos(rotation) + x * Math.sin(rotation); + let xp = x * Math.cos(rotation) - y * Math.sin(rotation); + let yp = y * Math.cos(rotation) + x * Math.sin(rotation); var scaleX = (bounds[1] - bounds[0]) / 360; var scaleY = (bounds[1] - bounds[0]) / 360; @@ -121,12 +121,13 @@ for (var test = 0; test < numTests; test++) { } } - turtlePaths = []; + let turtlePaths = []; for (var t = 0; t < numTurtles; t++) { - turtlePath = []; + let turtlePath = []; var nextSeg = function(currTurtle, prevTurtle) { var pathX = currTurtle[0]; + let pathY; if (currTurtle[1] < prevTurtle[1]) { pathX = currTurtle[0] + 1; @@ -150,15 +151,15 @@ for (var test = 0; test < numTests; test++) { }; for (var s = 1; s < turtles[t].length; s++) { - currTurtle = turtles[t][s]; - prevTurtle = turtles[t][s - 1]; + let currTurtle = turtles[t][s]; + let prevTurtle = turtles[t][s - 1]; turtlePath.push(nextSeg(currTurtle, prevTurtle)); } for (var s = turtles[t].length - 2; s >= 0; s--) { - currTurtle = turtles[t][s]; - prevTurtle = turtles[t][s + 1]; + let currTurtle = turtles[t][s]; + let prevTurtle = turtles[t][s + 1]; turtlePath.push(nextSeg(currTurtle, prevTurtle)); } @@ -169,7 +170,7 @@ for (var test = 0; test < numTests; test++) { var lastTurtle = turtles[t][turtles[t].length - 1]; grid[lastTurtle[0]][lastTurtle[1]] = undefined; - fixedTurtlePath = []; + let fixedTurtlePath = []; for (var s = 1; s < turtlePath.length; s++) { if (turtlePath[s - 1][0] == turtlePath[s][0] && turtlePath[s - 1][1] == turtlePath[s][1]) { @@ -236,7 +237,7 @@ for (var test = 0; test < numTests; test++) { t.insert({loc: allPointsIn}); t.insert({loc: allPointsOut}); - allPoints = allPointsIn.concat(allPointsOut); + let allPoints = allPointsIn.concat(allPointsOut); t.insert({loc: allPoints}); print("Points : "); diff --git a/jstests/core/index/geo/geo_queryoptimizer.js b/jstests/core/index/geo/geo_queryoptimizer.js index 199cedf5330f9..a9c3378925f0a 100644 --- a/jstests/core/index/geo/geo_queryoptimizer.js +++ b/jstests/core/index/geo/geo_queryoptimizer.js @@ -1,4 +1,4 @@ -t = db.geo_qo1; +let t = db.geo_qo1; t.drop(); t.createIndex({loc: "2d"}); diff --git a/jstests/core/index/geo/geo_regex0.js b/jstests/core/index/geo/geo_regex0.js index 7629c193ea2a9..1a7ef2ffde68e 100644 --- a/jstests/core/index/geo/geo_regex0.js +++ b/jstests/core/index/geo/geo_regex0.js @@ -1,22 +1,15 @@ // From SERVER-2247 // Tests to make sure regex works with geo indices -t = db.regex0; +let t = db.regex0; t.drop(); t.createIndex({point: '2d', words: 1}); t.insert({point: [1, 1], words: ['foo', 'bar']}); -regex = { - words: /^f/ -}; -geo = { - point: {$near: [1, 1]} -}; -both = { - point: {$near: [1, 1]}, - words: /^f/ -}; +let regex = {words: /^f/}; +let geo = {point: {$near: [1, 1]}}; +let both = {point: {$near: [1, 1]}, words: /^f/}; assert.eq(1, t.find(regex).count()); assert.eq(1, t.find(geo).count()); diff --git a/jstests/core/index/geo/geo_s2dedupnear.js b/jstests/core/index/geo/geo_s2dedupnear.js index ad1674f1e6e93..89a1c705c7194 100644 --- a/jstests/core/index/geo/geo_s2dedupnear.js +++ b/jstests/core/index/geo/geo_s2dedupnear.js @@ -1,6 +1,6 @@ // Make sure that we don't return several of the same result due to faulty // assumptions about the btree cursor. That is, don't return duplicate results. -t = db.geo_s2dedupnear; +let t = db.geo_s2dedupnear; t.drop(); t.createIndex({geo: "2dsphere"}); @@ -9,5 +9,5 @@ var x = { "coordinates": [[[100.0, 0.0], [101.0, 0.0], [101.0, 1.0], [100.0, 1.0], [100.0, 0.0]]] }; t.insert({geo: x}); -res = t.find({geo: {$geoNear: {"type": "Point", "coordinates": [31, 41]}}}); +let res = t.find({geo: {$geoNear: {"type": "Point", "coordinates": [31, 41]}}}); assert.eq(res.itcount(), 1); diff --git a/jstests/core/index/geo/geo_s2dupe_points.js b/jstests/core/index/geo/geo_s2dupe_points.js index 9f94f69caaa0d..54f9b3b3fc80f 100644 --- a/jstests/core/index/geo/geo_s2dupe_points.js +++ b/jstests/core/index/geo/geo_s2dupe_points.js @@ -2,7 +2,7 @@ // s2 rejects shapes with duplicate adjacent points as invalid, but they are // valid in GeoJSON. We store the duplicates, but internally remove them // before indexing or querying. -t = db.geo_s2dupe_points; +let t = db.geo_s2dupe_points; t.drop(); t.createIndex({geo: "2dsphere"}); diff --git a/jstests/core/index/geo/geo_s2edgecases.js b/jstests/core/index/geo/geo_s2edgecases.js index 6e4633f58c8c3..b083ea88ae092 100644 --- a/jstests/core/index/geo/geo_s2edgecases.js +++ b/jstests/core/index/geo/geo_s2edgecases.js @@ -1,51 +1,42 @@ -t = db.geo_s2edgecases; +let t = db.geo_s2edgecases; t.drop(); -roundworldpoint = { - "type": "Point", - "coordinates": [180, 0] -}; +let roundworldpoint = {"type": "Point", "coordinates": [180, 0]}; // Opposite the equator -roundworld = { +let roundworld = { "type": "Polygon", "coordinates": [[[179, 1], [-179, 1], [-179, -1], [179, -1], [179, 1]]] }; t.insert({geo: roundworld}); -roundworld2 = { +let roundworld2 = { "type": "Polygon", "coordinates": [[[179, 1], [179, -1], [-179, -1], [-179, 1], [179, 1]]] }; t.insert({geo: roundworld2}); // North pole -santapoint = { - "type": "Point", - "coordinates": [180, 90] -}; -santa = { +let santapoint = {"type": "Point", "coordinates": [180, 90]}; +let santa = { "type": "Polygon", "coordinates": [[[179, 89], [179, 90], [-179, 90], [-179, 89], [179, 89]]] }; t.insert({geo: santa}); -santa2 = { +let santa2 = { "type": "Polygon", "coordinates": [[[179, 89], [-179, 89], [-179, 90], [179, 90], [179, 89]]] }; t.insert({geo: santa2}); // South pole -penguinpoint = { - "type": "Point", - "coordinates": [0, -90] -}; -penguin1 = { +let penguinpoint = {"type": "Point", "coordinates": [0, -90]}; +let penguin1 = { "type": "Polygon", "coordinates": [[[0, -89], [0, -90], [179, -90], [179, -89], [0, -89]]] }; t.insert({geo: penguin1}); -penguin2 = { +let penguin2 = { "type": "Polygon", "coordinates": [[[0, -89], [179, -89], [179, -90], [0, -90], [0, -89]]] }; @@ -53,7 +44,7 @@ t.insert({geo: penguin2}); t.createIndex({geo: "2dsphere", nonGeo: 1}); -res = t.find({"geo": {"$geoIntersects": {"$geometry": roundworldpoint}}}); +let res = t.find({"geo": {"$geoIntersects": {"$geometry": roundworldpoint}}}); assert.eq(res.count(), 2); res = t.find({"geo": {"$geoIntersects": {"$geometry": santapoint}}}); assert.eq(res.count(), 2); diff --git a/jstests/core/index/geo/geo_s2exact.js b/jstests/core/index/geo/geo_s2exact.js index 92ce551d87320..eb563498c07d4 100644 --- a/jstests/core/index/geo/geo_s2exact.js +++ b/jstests/core/index/geo/geo_s2exact.js @@ -1,5 +1,5 @@ // Queries on exact geometry should return the exact geometry. -t = db.geo_s2exact; +let t = db.geo_s2exact; t.drop(); function test(geometry) { @@ -10,20 +10,11 @@ function test(geometry) { t.dropIndex({geo: "2dsphere"}); } -pointA = { - "type": "Point", - "coordinates": [40, 5] -}; +let pointA = {"type": "Point", "coordinates": [40, 5]}; test(pointA); -someline = { - "type": "LineString", - "coordinates": [[40, 5], [41, 6]] -}; +let someline = {"type": "LineString", "coordinates": [[40, 5], [41, 6]]}; test(someline); -somepoly = { - "type": "Polygon", - "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]] -}; +let somepoly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]}; test(somepoly); diff --git a/jstests/core/index/geo/geo_s2index.js b/jstests/core/index/geo/geo_s2index.js index af4475a79cbaf..9ffdc2c02a0fe 100644 --- a/jstests/core/index/geo/geo_s2index.js +++ b/jstests/core/index/geo/geo_s2index.js @@ -1,56 +1,36 @@ -t = db.geo_s2index; +let t = db.geo_s2index; t.drop(); // We internally drop adjacent duplicate points in lines. -someline = { - "type": "LineString", - "coordinates": [[40, 5], [40, 5], [40, 5], [41, 6], [41, 6]] -}; +let someline = {"type": "LineString", "coordinates": [[40, 5], [40, 5], [40, 5], [41, 6], [41, 6]]}; t.insert({geo: someline, nonGeo: "someline"}); t.createIndex({geo: "2dsphere"}); -foo = t.find({geo: {$geoIntersects: {$geometry: {type: "Point", coordinates: [40, 5]}}}}).next(); +let foo = + t.find({geo: {$geoIntersects: {$geometry: {type: "Point", coordinates: [40, 5]}}}}).next(); assert.eq(foo.geo, someline); t.dropIndex({geo: "2dsphere"}); -pointA = { - "type": "Point", - "coordinates": [40, 5] -}; +let pointA = {"type": "Point", "coordinates": [40, 5]}; t.insert({geo: pointA, nonGeo: "pointA"}); -pointD = { - "type": "Point", - "coordinates": [41.001, 6.001] -}; +let pointD = {"type": "Point", "coordinates": [41.001, 6.001]}; t.insert({geo: pointD, nonGeo: "pointD"}); -pointB = { - "type": "Point", - "coordinates": [41, 6] -}; +let pointB = {"type": "Point", "coordinates": [41, 6]}; t.insert({geo: pointB, nonGeo: "pointB"}); -pointC = { - "type": "Point", - "coordinates": [41, 6] -}; +let pointC = {"type": "Point", "coordinates": [41, 6]}; t.insert({geo: pointC}); // Add a point within the polygon but not on the border. Don't want to be on // the path of the polyline. -pointE = { - "type": "Point", - "coordinates": [40.6, 5.4] -}; +let pointE = {"type": "Point", "coordinates": [40.6, 5.4]}; t.insert({geo: pointE}); // Make sure we can index this without error. t.insert({nonGeo: "noGeoField!"}); -somepoly = { - "type": "Polygon", - "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]] -}; +let somepoly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]}; t.insert({geo: somepoly, nonGeo: "somepoly"}); var res = t.createIndex({geo: "2dsphere", nonGeo: 1}); diff --git a/jstests/core/index/geo/geo_s2indexoldformat.js b/jstests/core/index/geo/geo_s2indexoldformat.js index ffe472464d4af..ad1008115dddb 100644 --- a/jstests/core/index/geo/geo_s2indexoldformat.js +++ b/jstests/core/index/geo/geo_s2indexoldformat.js @@ -1,6 +1,6 @@ // Make sure that the 2dsphere index can deal with non-GeoJSON points. // 2dsphere does not accept legacy shapes, only legacy points. -t = db.geo_s2indexoldformat; +let t = db.geo_s2indexoldformat; t.drop(); t.insert({geo: [40, 5], nonGeo: ["pointA"]}); @@ -11,7 +11,7 @@ t.insert({geo: {x: 40.6, y: 5.4}}); t.createIndex({geo: "2dsphere", nonGeo: 1}); -res = t.find({"geo": {"$geoIntersects": {"$geometry": {x: 40, y: 5}}}}); +let res = t.find({"geo": {"$geoIntersects": {"$geometry": {x: 40, y: 5}}}}); assert.eq(res.count(), 1); res = t.find({"geo": {"$geoIntersects": {"$geometry": [41, 6]}}}); diff --git a/jstests/core/index/geo/geo_s2largewithin.js b/jstests/core/index/geo/geo_s2largewithin.js index e1eed8a2e6a98..d94ebc5f0a774 100644 --- a/jstests/core/index/geo/geo_s2largewithin.js +++ b/jstests/core/index/geo/geo_s2largewithin.js @@ -1,20 +1,17 @@ // If our $within is enormous, create a coarse covering for the search so it // doesn't take forever. -t = db.geo_s2largewithin; +let t = db.geo_s2largewithin; t.drop(); t.createIndex({geo: "2dsphere"}); -testPoint = { - name: "origin", - geo: {type: "Point", coordinates: [0.0, 0.0]} -}; +let testPoint = {name: "origin", geo: {type: "Point", coordinates: [0.0, 0.0]}}; -testHorizLine = { +let testHorizLine = { name: "horiz", geo: {type: "LineString", coordinates: [[-2.0, 10.0], [2.0, 10.0]]} }; -testVertLine = { +let testVertLine = { name: "vert", geo: {type: "LineString", coordinates: [[10.0, -2.0], [10.0, 2.0]]} }; @@ -25,12 +22,12 @@ t.insert(testVertLine); // Test a poly that runs horizontally along the equator. -longPoly = { +let longPoly = { type: "Polygon", coordinates: [[[30.0, 1.0], [-30.0, 1.0], [-30.0, -1.0], [30.0, -1.0], [30.0, 1.0]]] }; -result = t.find({geo: {$geoWithin: {$geometry: longPoly}}}); +let result = t.find({geo: {$geoWithin: {$geometry: longPoly}}}); assert.eq(result.itcount(), 1); result = t.find({geo: {$geoWithin: {$geometry: longPoly}}}); assert.eq("origin", result[0].name); diff --git a/jstests/core/index/geo/geo_s2meridian.js b/jstests/core/index/geo/geo_s2meridian.js index 99eb0c63dc439..a0196bb5a41aa 100644 --- a/jstests/core/index/geo/geo_s2meridian.js +++ b/jstests/core/index/geo/geo_s2meridian.js @@ -1,4 +1,4 @@ -t = db.geo_s2meridian; +let t = db.geo_s2meridian; t.drop(); t.createIndex({geo: "2dsphere"}); @@ -8,18 +8,15 @@ t.createIndex({geo: "2dsphere"}); * that runs along the meridian. */ -meridianCrossingLine = { +let meridianCrossingLine = { geo: {type: "LineString", coordinates: [[-178.0, 10.0], [178.0, 10.0]]} }; assert.commandWorked(t.insert(meridianCrossingLine)); -lineAlongMeridian = { - type: "LineString", - coordinates: [[180.0, 11.0], [180.0, 9.0]] -}; +let lineAlongMeridian = {type: "LineString", coordinates: [[180.0, 11.0], [180.0, 9.0]]}; -result = t.find({geo: {$geoIntersects: {$geometry: lineAlongMeridian}}}); +let result = t.find({geo: {$geoIntersects: {$geometry: lineAlongMeridian}}}); assert.eq(result.itcount(), 1); t.drop(); @@ -29,21 +26,15 @@ t.createIndex({geo: "2dsphere"}); * on the meridian, and immediately on either side, and confirm that a poly * covering all of them returns them all. */ -pointOnNegativeSideOfMeridian = { - geo: {type: "Point", coordinates: [-179.0, 1.0]} -}; -pointOnMeridian = { - geo: {type: "Point", coordinates: [180.0, 1.0]} -}; -pointOnPositiveSideOfMeridian = { - geo: {type: "Point", coordinates: [179.0, 1.0]} -}; +let pointOnNegativeSideOfMeridian = {geo: {type: "Point", coordinates: [-179.0, 1.0]}}; +let pointOnMeridian = {geo: {type: "Point", coordinates: [180.0, 1.0]}}; +let pointOnPositiveSideOfMeridian = {geo: {type: "Point", coordinates: [179.0, 1.0]}}; t.insert(pointOnMeridian); t.insert(pointOnNegativeSideOfMeridian); t.insert(pointOnPositiveSideOfMeridian); -meridianCrossingPoly = { +let meridianCrossingPoly = { type: "Polygon", coordinates: [[[-178.0, 10.0], [178.0, 10.0], [178.0, -10.0], [-178.0, -10.0], [-178.0, 10.0]]] }; @@ -58,15 +49,9 @@ t.createIndex({geo: "2dsphere"}); * closer, but across the meridian, and confirm they both come back, and * that the order is correct. */ -pointOnNegativeSideOfMerid = { - name: "closer", - geo: {type: "Point", coordinates: [-179.0, 0.0]} -}; +let pointOnNegativeSideOfMerid = {name: "closer", geo: {type: "Point", coordinates: [-179.0, 0.0]}}; -pointOnPositiveSideOfMerid = { - name: "farther", - geo: {type: "Point", coordinates: [176.0, 0.0]} -}; +let pointOnPositiveSideOfMerid = {name: "farther", geo: {type: "Point", coordinates: [176.0, 0.0]}}; t.insert(pointOnNegativeSideOfMerid); t.insert(pointOnPositiveSideOfMerid); diff --git a/jstests/core/index/geo/geo_s2multi.js b/jstests/core/index/geo/geo_s2multi.js index 858dfd1efa1d9..3da229ad743ad 100644 --- a/jstests/core/index/geo/geo_s2multi.js +++ b/jstests/core/index/geo/geo_s2multi.js @@ -1,22 +1,19 @@ -t = db.geo_s2multi; +let t = db.geo_s2multi; t.drop(); t.createIndex({geo: "2dsphere"}); // Let's try the examples in the GeoJSON spec. -multiPointA = { - "type": "MultiPoint", - "coordinates": [[100.0, 0.0], [101.0, 1.0]] -}; +let multiPointA = {"type": "MultiPoint", "coordinates": [[100.0, 0.0], [101.0, 1.0]]}; assert.commandWorked(t.insert({geo: multiPointA})); -multiLineStringA = { +let multiLineStringA = { "type": "MultiLineString", "coordinates": [[[100.0, 0.0], [101.0, 1.0]], [[102.0, 2.0], [103.0, 3.0]]] }; assert.commandWorked(t.insert({geo: multiLineStringA})); -multiPolygonA = { +let multiPolygonA = { "type": "MultiPolygon", "coordinates": [ [[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]], @@ -61,7 +58,7 @@ assert.eq( // Polygon contains itself and the multipoint. assert.eq(2, t.find({geo: {$geoWithin: {$geometry: multiPolygonA}}}).itcount()); -partialPolygonA = { +let partialPolygonA = { "type": "Polygon", "coordinates": [[[102.0, 2.0], [103.0, 2.0], [103.0, 3.0], [102.0, 3.0], [102.0, 2.0]]] }; diff --git a/jstests/core/index/geo/geo_s2near.js b/jstests/core/index/geo/geo_s2near.js index 2cd732da67a2a..7c584992e8a77 100644 --- a/jstests/core/index/geo/geo_s2near.js +++ b/jstests/core/index/geo/geo_s2near.js @@ -4,17 +4,14 @@ // Test 2dsphere near search, called via find and $geoNear. (function() { -t = db.geo_s2near; +let t = db.geo_s2near; t.drop(); // Make sure that geoNear gives us back loc -goldenPoint = { - type: "Point", - coordinates: [31.0, 41.0] -}; +let goldenPoint = {type: "Point", coordinates: [31.0, 41.0]}; t.insert({geo: goldenPoint}); t.createIndex({geo: "2dsphere"}); -resNear = +let resNear = t.aggregate([ {$geoNear: {near: [30, 40], distanceField: "d", spherical: true, includeLocs: "loc"}}, {$limit: 1} @@ -25,31 +22,22 @@ assert.eq(resNear[0].loc, goldenPoint); // FYI: // One degree of long @ 0 is 111km or so. // One degree of lat @ 0 is 110km or so. -lat = 0; -lng = 0; -points = 10; +let lat = 0; +let lng = 0; +let points = 10; for (var x = -points; x < points; x += 1) { for (var y = -points; y < points; y += 1) { t.insert({geo: {"type": "Point", "coordinates": [lng + x / 1000.0, lat + y / 1000.0]}}); } } -origin = { - "type": "Point", - "coordinates": [lng, lat] -}; +let origin = {"type": "Point", "coordinates": [lng, lat]}; t.createIndex({geo: "2dsphere"}); // Near only works when the query is a point. -someline = { - "type": "LineString", - "coordinates": [[40, 5], [41, 6]] -}; -somepoly = { - "type": "Polygon", - "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]] -}; +let someline = {"type": "LineString", "coordinates": [[40, 5], [41, 6]]}; +let somepoly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]}; assert.throws(function() { return t.find({"geo": {"$near": {"$geometry": someline}}}).count(); }); @@ -70,7 +58,7 @@ assert.commandFailedWithCode(db.runCommand({ 2); // Do some basic near searches. -res = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: 2000}}}).limit(10); +let res = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: 2000}}}).limit(10); resNear = t.aggregate([ {$geoNear: {near: [0, 0], distanceField: "dis", maxDistance: Math.PI, spherical: true}}, {$limit: 10}, @@ -109,29 +97,30 @@ assert.eq(res.itcount(), resNear.itcount(), ((2 * points) * (2 * points) + 4).to function testRadAndDegreesOK(distance) { // Distance for old style points is radians. - resRadians = t.find({geo: {$nearSphere: [0, 0], $maxDistance: (distance / (6378.1 * 1000))}}); + let resRadians = + t.find({geo: {$nearSphere: [0, 0], $maxDistance: (distance / (6378.1 * 1000))}}); // Distance for new style points is meters. - resMeters = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: distance}}}); + let resMeters = t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: distance}}}); // And we should get the same # of results no matter what. assert.eq(resRadians.itcount(), resMeters.itcount()); // Also, $geoNear should behave the same way. - resGNMeters = t.aggregate({ - $geoNear: { - near: origin, - distanceField: "dis", - maxDistance: distance, - spherical: true, - } - }).toArray(); - resGNRadians = t.aggregate({ - $geoNear: { - near: [0, 0], - distanceField: "dis", - maxDistance: (distance / (6378.1 * 1000)), - spherical: true, - } - }).toArray(); + let resGNMeters = t.aggregate({ + $geoNear: { + near: origin, + distanceField: "dis", + maxDistance: distance, + spherical: true, + } + }).toArray(); + let resGNRadians = t.aggregate({ + $geoNear: { + near: [0, 0], + distanceField: "dis", + maxDistance: (distance / (6378.1 * 1000)), + spherical: true, + } + }).toArray(); const errmsg = `$geoNear using meter distances returned ${tojson(resGNMeters)}, but ` + `$geoNear using radian distances returned ${tojson(resGNRadians)}`; assert.eq(resGNRadians.length, resGNMeters.length, errmsg); diff --git a/jstests/core/index/geo/geo_s2nearComplex.js b/jstests/core/index/geo/geo_s2nearComplex.js index 22fe3112499d6..df978d45e6114 100644 --- a/jstests/core/index/geo/geo_s2nearComplex.js +++ b/jstests/core/index/geo/geo_s2nearComplex.js @@ -18,7 +18,7 @@ var atan2 = Math.atan2; var originGeo = {type: "Point", coordinates: [20.0, 20.0]}; // Center point for all tests. -var origin = {name: "origin", geo: originGeo}; +let origin = {name: "origin", geo: originGeo}; /* * Convenience function for checking that coordinates match. threshold let's you @@ -114,7 +114,7 @@ function uniformPointsWithClusters( origin, count, minDist, maxDist, numberOfClusters, minClusterSize, maxClusterSize, distRatio) { distRatio = distRatio || 10; var points = uniformPoints(origin, count, minDist, maxDist); - for (j = 0; j < numberOfClusters; j++) { + for (let j = 0; j < numberOfClusters; j++) { var randomPoint = points[Math.floor(random() * points.length)]; var clusterSize = (random() * (maxClusterSize - minClusterSize)) + minClusterSize; uniformPoints(randomPoint, clusterSize, minDist / distRatio, maxDist / distRatio); @@ -146,18 +146,18 @@ function validateOrdering(query) { var near30 = t.find(query).limit(30); var near40 = t.find(query).limit(40); - for (i = 0; i < 10; i++) { + for (let i = 0; i < 10; i++) { assert(coordinateEqual(near10[i], near20[i])); assert(coordinateEqual(near10[i], near30[i])); assert(coordinateEqual(near10[i], near40[i])); } - for (i = 0; i < 20; i++) { + for (let i = 0; i < 20; i++) { assert(coordinateEqual(near20[i], near30[i])); assert(coordinateEqual(near20[i], near40[i])); } - for (i = 0; i < 30; i++) { + for (let i = 0; i < 30; i++) { assert(coordinateEqual(near30[i], near40[i])); } } @@ -281,7 +281,7 @@ origin = { uniformPoints(origin, 10, 89, 90); -cur = t.find({geo: {$near: {$geometry: originGeo}}}); +let cur = t.find({geo: {$near: {$geometry: originGeo}}}); assert.eq(cur.itcount(), 10); cur = t.find({geo: {$near: {$geometry: originGeo}}}); @@ -290,5 +290,5 @@ print("Near search on very distant points:"); print(t.find({geo: {$geoNear: {$geometry: originGeo}}}) .explain("executionStats") .executionStats.executionTimeMillis); -pt = cur.next(); +let pt = cur.next(); assert(pt); diff --git a/jstests/core/index/geo/geo_s2nearcorrect.js b/jstests/core/index/geo/geo_s2nearcorrect.js index 80ece4223d2c8..b21782140be99 100644 --- a/jstests/core/index/geo/geo_s2nearcorrect.js +++ b/jstests/core/index/geo/geo_s2nearcorrect.js @@ -2,17 +2,11 @@ // A geometry may have several covers, one of which is in a search ring and the other of which is // not. If we see the cover that's not in the search ring, we can't mark the object as 'seen' for // this ring. -t = db.geo_s2nearcorrect; +let t = db.geo_s2nearcorrect; t.drop(); -longline = { - "type": "LineString", - "coordinates": [[0, 0], [179, 89]] -}; +let longline = {"type": "LineString", "coordinates": [[0, 0], [179, 89]]}; t.insert({geo: longline}); t.createIndex({geo: "2dsphere"}); -origin = { - "type": "Point", - "coordinates": [45, 45] -}; +let origin = {"type": "Point", "coordinates": [45, 45]}; assert.eq(1, t.find({"geo": {"$near": {"$geometry": origin, $maxDistance: 20000000}}}).count()); diff --git a/jstests/core/index/geo/geo_s2nongeoarray.js b/jstests/core/index/geo/geo_s2nongeoarray.js index f5830e8702d60..3e2b0d5eb52cd 100644 --- a/jstests/core/index/geo/geo_s2nongeoarray.js +++ b/jstests/core/index/geo/geo_s2nongeoarray.js @@ -1,8 +1,8 @@ // Explode arrays when indexing non-geo fields in 2dsphere, and make sure that // we find them with queries. -t = db.geo_s2nongeoarray; +let t = db.geo_s2nongeoarray; -oldPoint = [40, 5]; +let oldPoint = [40, 5]; var data = {geo: oldPoint, nonGeo: [123, 456], otherNonGeo: [{b: [1, 2]}, {b: [3, 4]}]}; diff --git a/jstests/core/index/geo/geo_s2nonstring.js b/jstests/core/index/geo/geo_s2nonstring.js index a76a7cd073e5b..b7f9496a3b976 100644 --- a/jstests/core/index/geo/geo_s2nonstring.js +++ b/jstests/core/index/geo/geo_s2nonstring.js @@ -1,5 +1,5 @@ // Added to make sure that S2 indexing's string AND non-string keys work. -t = db.geo_s2nonstring; +let t = db.geo_s2nonstring; t.drop(); t.createIndex({geo: '2dsphere', x: 1}); diff --git a/jstests/core/index/geo/geo_s2nopoints.js b/jstests/core/index/geo/geo_s2nopoints.js index 7b3a3b970a76f..b27c2ea20343f 100644 --- a/jstests/core/index/geo/geo_s2nopoints.js +++ b/jstests/core/index/geo/geo_s2nopoints.js @@ -1,5 +1,5 @@ // See SERVER-7794. -t = db.geo_s2nopoints; +let t = db.geo_s2nopoints; t.drop(); t.createIndex({loc: "2dsphere", x: 1}); diff --git a/jstests/core/index/geo/geo_s2oddshapes.js b/jstests/core/index/geo/geo_s2oddshapes.js index db30d431caa53..e6610dea12a47 100644 --- a/jstests/core/index/geo/geo_s2oddshapes.js +++ b/jstests/core/index/geo/geo_s2oddshapes.js @@ -96,7 +96,7 @@ outsidePoint = { t.insert(insidePoint); t.insert(outsidePoint); -smallPoly = { +let smallPoly = { type: "Polygon", coordinates: [[[0.0, -0.01], [0.015, -0.01], [0.015, 0.01], [0.0, 0.01], [0.0, -0.01]]] }; diff --git a/jstests/core/index/geo/geo_s2twofields.js b/jstests/core/index/geo/geo_s2twofields.js index 9f769f6897f57..af7f07f64ebc1 100644 --- a/jstests/core/index/geo/geo_s2twofields.js +++ b/jstests/core/index/geo/geo_s2twofields.js @@ -33,7 +33,7 @@ for (var i = 0; i < maxPoints; ++i) { arr.push( {from: {type: "Point", coordinates: fromCoord}, to: {type: "Point", coordinates: toCoord}}); } -res = t.insert(arr); +let res = t.insert(arr); assert.commandWorked(res); assert.eq(t.count(), maxPoints); diff --git a/jstests/core/index/geo/geo_s2within.js b/jstests/core/index/geo/geo_s2within.js index 04915c77dcb80..6950cb3e482e6 100644 --- a/jstests/core/index/geo/geo_s2within.js +++ b/jstests/core/index/geo/geo_s2within.js @@ -1,18 +1,15 @@ // Test some cases that might be iffy with $within, mostly related to polygon w/holes. -t = db.geo_s2within; +let t = db.geo_s2within; t.drop(); t.createIndex({geo: "2dsphere"}); -somepoly = { - "type": "Polygon", - "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]] -}; +let somepoly = {"type": "Polygon", "coordinates": [[[40, 5], [40, 6], [41, 6], [41, 5], [40, 5]]]}; t.insert({geo: {"type": "LineString", "coordinates": [[40.1, 5.1], [40.2, 5.2]]}}); // This is only partially contained within the polygon. t.insert({geo: {"type": "LineString", "coordinates": [[40.1, 5.1], [42, 7]]}}); -res = t.find({"geo": {"$within": {"$geometry": somepoly}}}); +let res = t.find({"geo": {"$within": {"$geometry": somepoly}}}); assert.eq(res.itcount(), 1); t.drop(); diff --git a/jstests/core/index/geo/geo_sort1.js b/jstests/core/index/geo/geo_sort1.js index 4d32c2f4ae20e..5f90ea50d60a0 100644 --- a/jstests/core/index/geo/geo_sort1.js +++ b/jstests/core/index/geo/geo_sort1.js @@ -1,21 +1,21 @@ -t = db.geo_sort1; +let t = db.geo_sort1; t.drop(); -for (x = 0; x < 10; x++) { - for (y = 0; y < 10; y++) { +for (let x = 0; x < 10; x++) { + for (let y = 0; y < 10; y++) { t.insert({loc: [x, y], foo: x * x * y}); } } t.createIndex({loc: "2d", foo: 1}); -q = t.find({loc: {$near: [5, 5]}, foo: {$gt: 20}}); -m = function(z) { +let q = t.find({loc: {$near: [5, 5]}, foo: {$gt: 20}}); +let m = function(z) { return z.foo; }; -a = q.clone().map(m); -b = q.clone().sort({foo: 1}).map(m); +let a = q.clone().map(m); +let b = q.clone().sort({foo: 1}).map(m); assert.neq(a, b, "A"); a.sort(); diff --git a/jstests/core/index/geo/geo_uniqueDocs.js b/jstests/core/index/geo/geo_uniqueDocs.js index 9e0a811e7e77e..418f48691ed69 100644 --- a/jstests/core/index/geo/geo_uniqueDocs.js +++ b/jstests/core/index/geo/geo_uniqueDocs.js @@ -1,8 +1,8 @@ // Test uniqueDocs option for $within queries and the $geoNear aggregation stage. SERVER-3139 // SERVER-12120 uniqueDocs is deprecated. Server always returns unique documents. -collName = 'geo_uniqueDocs_test'; -t = db.geo_uniqueDocs_test; +let collName = 'geo_uniqueDocs_test'; +let t = db.geo_uniqueDocs_test; t.drop(); assert.commandWorked(t.save({locs: [[0, 2], [3, 4]]})); @@ -21,7 +21,8 @@ assert.eq(2, t.aggregate({$geoNear: {near: [0, 0], distanceField: "dis", uniqueDocs: true}}) .toArray() .length); -results = t.aggregate([{$geoNear: {near: [0, 0], distanceField: "dis"}}, {$limit: 2}]).toArray(); +let results = + t.aggregate([{$geoNear: {near: [0, 0], distanceField: "dis"}}, {$limit: 2}]).toArray(); assert.eq(2, results.length); assert.close(2, results[0].dis); assert.close(10, results[1].dis); diff --git a/jstests/core/index/geo/geo_update1.js b/jstests/core/index/geo/geo_update1.js index f982966afd769..67b40a66ffb07 100644 --- a/jstests/core/index/geo/geo_update1.js +++ b/jstests/core/index/geo/geo_update1.js @@ -3,7 +3,7 @@ // requires_non_retryable_writes, // ] -t = db.geo_update1; +let t = db.geo_update1; t.drop(); for (var x = 0; x < 10; x++) { diff --git a/jstests/core/index/geo/geo_update2.js b/jstests/core/index/geo/geo_update2.js index 280023ffe442d..4073a5ccd9cf4 100644 --- a/jstests/core/index/geo/geo_update2.js +++ b/jstests/core/index/geo/geo_update2.js @@ -3,7 +3,7 @@ // requires_non_retryable_writes, // ] -t = db.geo_update2; +let t = db.geo_update2; t.drop(); for (var x = 0; x < 10; x++) { diff --git a/jstests/core/index/geo/geo_update_btree.js b/jstests/core/index/geo/geo_update_btree.js index 7a58362fe18cf..403af8e5f0c95 100644 --- a/jstests/core/index/geo/geo_update_btree.js +++ b/jstests/core/index/geo/geo_update_btree.js @@ -27,7 +27,7 @@ var parallelInsert = startParallelShell( " db.jstests_geo_update_btree.insert(doc);" + "}"); -for (i = 0; i < 1000; i++) { +for (let i = 0; i < 1000; i++) { coll.update({ loc: {$within: {$center: [[Random.rand() * 180, Random.rand() * 180], Random.rand() * 50]}} }, diff --git a/jstests/core/index/geo/geo_withinquery.js b/jstests/core/index/geo/geo_withinquery.js index 13f20c1433fad..c4069d93f4db1 100644 --- a/jstests/core/index/geo/geo_withinquery.js +++ b/jstests/core/index/geo/geo_withinquery.js @@ -3,13 +3,13 @@ // ] // SERVER-7343: allow $within without a geo index. -t = db.geo_withinquery; +let t = db.geo_withinquery; t.drop(); -num = 0; -for (x = 0; x <= 20; x++) { - for (y = 0; y <= 20; y++) { - o = {_id: num++, loc: [x, y]}; +let num = 0; +for (let x = 0; x <= 20; x++) { + for (let y = 0; y <= 20; y++) { + let o = {_id: num++, loc: [x, y]}; t.save(o); } } diff --git a/jstests/core/index/geo/geoa.js b/jstests/core/index/geo/geoa.js index 78cf6c960c944..26bfc10b8cfe3 100644 --- a/jstests/core/index/geo/geoa.js +++ b/jstests/core/index/geo/geoa.js @@ -1,4 +1,4 @@ -t = db.geoa; +let t = db.geoa; t.drop(); t.save({_id: 1, a: {loc: [5, 5]}}); @@ -7,5 +7,5 @@ t.save({_id: 3, a: {loc: [7, 7]}}); t.createIndex({"a.loc": "2d"}); -cur = t.find({"a.loc": {$near: [6, 6]}}); +let cur = t.find({"a.loc": {$near: [6, 6]}}); assert.eq(2, cur.next()._id, "A1"); diff --git a/jstests/core/index/geo/geoc.js b/jstests/core/index/geo/geoc.js index 8875cd44614c5..5d8752b95a3e9 100644 --- a/jstests/core/index/geo/geoc.js +++ b/jstests/core/index/geo/geoc.js @@ -2,10 +2,10 @@ // requires_getmore, // ] -t = db.geoc; +let t = db.geoc; t.drop(); -N = 1000; +let N = 1000; for (var i = 0; i < N; i++) t.insert({loc: [100 + Math.random(), 100 + Math.random()], z: 0}); diff --git a/jstests/core/index/geo/geod.js b/jstests/core/index/geo/geod.js index 8586d64e3981e..27272929f65bb 100644 --- a/jstests/core/index/geo/geod.js +++ b/jstests/core/index/geo/geod.js @@ -5,13 +5,13 @@ t.save({loc: [0.5, 0]}); t.createIndex({loc: "2d"}); // do a few geoNears with different maxDistances. The first iteration // should match no points in the dataset. -dists = [.49, .51, 1.0]; -for (idx in dists) { - b = db.geod - .aggregate([ - {$geoNear: {near: [1, 0], distanceField: "d", maxDistance: dists[idx]}}, - {$limit: 2}, - ]) - .toArray(); +let dists = [.49, .51, 1.0]; +for (let idx in dists) { + let b = db.geod + .aggregate([ + {$geoNear: {near: [1, 0], distanceField: "d", maxDistance: dists[idx]}}, + {$limit: 2}, + ]) + .toArray(); assert.eq(b.length, idx, "B" + idx); } diff --git a/jstests/core/index/geo/geoe.js b/jstests/core/index/geo/geoe.js index 84bc34f7a74c4..eac049e057436 100644 --- a/jstests/core/index/geo/geoe.js +++ b/jstests/core/index/geo/geoe.js @@ -4,7 +4,7 @@ // the end of the btree and not reverse direction (leaving the rest of // the search always looking at some random non-matching point). -t = db.geo_box; +let t = db.geo_box; t.drop(); t.insert({"_id": 1, "geo": [33, -11.1]}); @@ -24,7 +24,7 @@ t.insert({"_id": 14, "geo": [-122.289505, 37.695774]}); t.createIndex({geo: "2d"}); -c = t.find({geo: {"$within": {"$box": [[-125.078461, 36.494473], [-120.320648, 38.905199]]}}}); +let c = t.find({geo: {"$within": {"$box": [[-125.078461, 36.494473], [-120.320648, 38.905199]]}}}); assert.eq(11, c.count(), "A1"); c = t.find({geo: {"$within": {"$box": [[-124.078461, 36.494473], [-120.320648, 38.905199]]}}}); diff --git a/jstests/core/index/geo/geof.js b/jstests/core/index/geo/geof.js index 1f1d9e0cc670b..8dce3f68b6f0d 100644 --- a/jstests/core/index/geo/geof.js +++ b/jstests/core/index/geo/geof.js @@ -1,4 +1,4 @@ -t = db.geof; +let t = db.geof; t.drop(); // corners (dist ~0.98) diff --git a/jstests/core/index/geo/geonear_cmd_input_validation.js b/jstests/core/index/geo/geonear_cmd_input_validation.js index 5b247759db116..7b936c9359a8b 100644 --- a/jstests/core/index/geo/geonear_cmd_input_validation.js +++ b/jstests/core/index/geo/geonear_cmd_input_validation.js @@ -10,7 +10,7 @@ t.createIndex({loc: "2dsphere"}); // 2dsphere index with legacy coordinate pair and spherical=false. var indexTypes = ['2d', '2dsphere'], pointTypes = [{type: 'Point', coordinates: [0, 0]}, [0, 0]], sphericalOptions = [true, false], optionNames = ['minDistance', 'maxDistance'], - badNumbers = [-1, undefined, 'foo']; + badNumbers = [-1, undefined, 'foo'], unknownArg = 'foo'; indexTypes.forEach(function(indexType) { t.drop(); @@ -100,6 +100,12 @@ indexTypes.forEach(function(indexType) { command['distanceMultiplier'] = badNumber; assert.commandFailed(db.runCommand(command), msg); }); + + // Unknown argument + var msg = ("geoNear should've failed with unknown arg " + unknownArg); + var command = makeCommand(1); + command[unknownArg] = "unknown"; + assert.commandFailed(db.runCommand(command), msg); }); }); }); diff --git a/jstests/core/index/geo/geonear_key.js b/jstests/core/index/geo/geonear_key.js index 1ecce018b081f..6a3cde1c65194 100644 --- a/jstests/core/index/geo/geonear_key.js +++ b/jstests/core/index/geo/geonear_key.js @@ -1,11 +1,6 @@ /** * Tests for the 'key' field accepted by the $geoNear aggregation stage. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); - const coll = db.jstests_geonear_key; coll.drop(); @@ -97,5 +92,4 @@ assertGeoNearFails({near: {type: "Point", coordinates: [0, 0]}, key: "b.c"}, // -- spherical=false. // -- The search point is a legacy coordinate pair. assertGeoNearFails({near: [0, 0], key: "b.d"}, ErrorCodes.NoQueryExecutionPlans); -assertGeoNearFails({near: [0, 0], key: "b.d", spherical: false}, ErrorCodes.NoQueryExecutionPlans); -}()); +assertGeoNearFails({near: [0, 0], key: "b.d", spherical: false}, ErrorCodes.NoQueryExecutionPlans); \ No newline at end of file diff --git a/jstests/core/index/hashed/hashed_index_collation.js b/jstests/core/index/hashed/hashed_index_collation.js index ead8b69bbfc3e..d0f92e6978e0f 100644 --- a/jstests/core/index/hashed/hashed_index_collation.js +++ b/jstests/core/index/hashed/hashed_index_collation.js @@ -5,11 +5,8 @@ * assumes_unsharded_collection, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq(). -load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand(). +import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js"; const coll = db.hashed_index_collation; coll.drop(); @@ -110,5 +107,4 @@ validateFindCmdOutputAndPlan({ projection: {"a.e": 1, _id: 0}, expectedStages: ["IXSCAN", "FETCH"], expectedOutput: [{a: {e: 5}}, {a: {e: 5}}] -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/index/hashed/hashed_index_covered_queries.js b/jstests/core/index/hashed/hashed_index_covered_queries.js index fa7753a89a56e..31e4acf704ac0 100644 --- a/jstests/core/index/hashed/hashed_index_covered_queries.js +++ b/jstests/core/index/hashed/hashed_index_covered_queries.js @@ -8,11 +8,8 @@ * assumes_no_implicit_index_creation, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq(). -load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand(). +import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js"; const coll = db.compound_hashed_index; coll.drop(); @@ -103,5 +100,4 @@ validateCountCmdOutputAndPlan( // Verify that a count operation with range query on a non-hashed prefix field can use // COUNT_SCAN. validateCountCmdOutputAndPlan( - {filter: {a: {$gt: 25, $lt: 29}}, expectedStages: ["COUNT_SCAN"], expectedOutput: 3}); -})(); + {filter: {a: {$gt: 25, $lt: 29}}, expectedStages: ["COUNT_SCAN"], expectedOutput: 3}); \ No newline at end of file diff --git a/jstests/core/index/hashed/hashed_index_queries.js b/jstests/core/index/hashed/hashed_index_queries.js index 54044855cc0b0..024cd311a655b 100644 --- a/jstests/core/index/hashed/hashed_index_queries.js +++ b/jstests/core/index/hashed/hashed_index_queries.js @@ -5,10 +5,8 @@ * assumes_read_concern_local, * ] */ -(function() { -"use strict"; load("jstests/aggregation/extras/utils.js"); // For arrayEq(). -load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand(). +import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js"; const collNamePrefix = 'hashed_index_queries_'; let collCount = 0; @@ -132,5 +130,4 @@ validateCountCmdOutputAndPlan({ filter: {a: {$gt: 25, $lt: 29}, b: 0}, expectedOutput: 1, expectedStages: ["IXSCAN", "FETCH"] -}); -})(); \ No newline at end of file +}); \ No newline at end of file diff --git a/jstests/core/index/hashed/hashed_index_queries_with_logical_operators.js b/jstests/core/index/hashed/hashed_index_queries_with_logical_operators.js index 79d5c33759f97..ff112e74bb0fa 100644 --- a/jstests/core/index/hashed/hashed_index_queries_with_logical_operators.js +++ b/jstests/core/index/hashed/hashed_index_queries_with_logical_operators.js @@ -7,11 +7,8 @@ * assumes_read_concern_local, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq(). -load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand(). +import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js"; const coll = db.hashed_index_queries_with_logical_operators; coll.drop(); @@ -151,5 +148,4 @@ validateFindCmdOutputAndPlan({ filter: {a: {$not: {$gt: 12}}, b: 12}, expectedOutput: [{a: 12, b: 12}, {a: null, b: 12}, {b: 12}], expectedStages: ["IXSCAN"] -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/index/hashed/hashed_index_sort.js b/jstests/core/index/hashed/hashed_index_sort.js index 189e40a156902..64d53cbdfa8e0 100644 --- a/jstests/core/index/hashed/hashed_index_sort.js +++ b/jstests/core/index/hashed/hashed_index_sort.js @@ -6,10 +6,7 @@ * assumes_unsharded_collection, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand(). +import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js"; const coll = db.hashed_index_sort; coll.drop(); @@ -193,5 +190,4 @@ validateFindCmdOutputAndPlan({ sort: {c: 1}, expectedOutput: [{c: 2}], expectedStages: ["IXSCAN", "FETCH", "SORT"] -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/index/hashed/hashed_index_with_arrays.js b/jstests/core/index/hashed/hashed_index_with_arrays.js index 8d6cc157eb6c3..b332d7d10abc4 100644 --- a/jstests/core/index/hashed/hashed_index_with_arrays.js +++ b/jstests/core/index/hashed/hashed_index_with_arrays.js @@ -73,4 +73,4 @@ assert.commandFailedWithCode(coll.insert({a: [1], b: 6}), 16766); // Array insertion allowed when the document doesn't match the partial filter predication. assert.commandWorked(coll.insert({a: [1], b: 1})); -})(); \ No newline at end of file +})(); diff --git a/jstests/core/index/hidden_index.js b/jstests/core/index/hidden_index.js index 248b3c7b87511..ac140238c469b 100644 --- a/jstests/core/index/hidden_index.js +++ b/jstests/core/index/hidden_index.js @@ -15,14 +15,12 @@ * ] */ -(function() { -'use strict'; -load("jstests/libs/analyze_plan.js"); // For getPlanStages. +import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js"; load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection. load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. load("jstests/libs/index_catalog_helpers.js"); // For IndexCatalogHelpers.findByName. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const columnstoreEnabled = checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"], true /* checkAllNodes */) && @@ -176,4 +174,3 @@ assert(idxSpec.hidden); assert.commandWorked(coll.unhideIndex("y")); idxSpec = IndexCatalogHelpers.findByName(coll.getIndexes(), "y"); assert.eq(idxSpec.hidden, undefined); -})(); diff --git a/jstests/core/index/index1.js b/jstests/core/index/index1.js index 4c06bfe03fd4f..e3805a6d4e8c3 100644 --- a/jstests/core/index/index1.js +++ b/jstests/core/index/index1.js @@ -1,13 +1,10 @@ // @tags: [requires_non_retryable_writes] -t = db.embeddedIndexTest; +let t = db.embeddedIndexTest; t.remove({}); -o = { - name: "foo", - z: {a: 17, b: 4} -}; +let o = {name: "foo", z: {a: 17, b: 4}}; t.save(o); assert(t.findOne().z.a == 17); diff --git a/jstests/core/index/index13.js b/jstests/core/index/index13.js index 97a3a85f7738a..ff2cb55332ef7 100644 --- a/jstests/core/index/index13.js +++ b/jstests/core/index/index13.js @@ -21,7 +21,7 @@ // SERVER-3104 implementation, the index constraints become [3,3] on the 'a.b' field _and_ [3,3] on // the 'a.c' field. -t = db.jstests_index13; +let t = db.jstests_index13; t.drop(); function assertConsistentResults(query) { @@ -30,16 +30,13 @@ function assertConsistentResults(query) { } function assertResults(query) { - explain = t.find(query).hint(index).explain(); + let explain = t.find(query).hint(index).explain(); // printjson( explain ); // debug assertConsistentResults(query); } // Cases with single dotted index fied names. -index = { - 'a.b': 1, - 'a.c': 1 -}; +let index = {'a.b': 1, 'a.c': 1}; t.createIndex(index); t.save({a: [{b: 1}, {c: 1}]}); t.save({a: [{b: 1, c: 1}]}); diff --git a/jstests/core/index/index4.js b/jstests/core/index/index4.js index 179bcdd97267f..d577186213174 100644 --- a/jstests/core/index/index4.js +++ b/jstests/core/index/index4.js @@ -6,7 +6,7 @@ // index4.js -t = db.index4; +let t = db.index4; t.drop(); t.save({name: "alleyinsider", instances: [{pool: "prod1"}, {pool: "dev1"}]}); @@ -21,7 +21,7 @@ t.createIndex({"instances.pool": 1}); sleep(10); -a = t.find({instances: {pool: "prod1"}}); +let a = t.find({instances: {pool: "prod1"}}); assert(a.length() == 1, "len1"); assert(a[0].name == "alleyinsider", "alley"); diff --git a/jstests/core/index/index5.js b/jstests/core/index/index5.js index 908b433c29985..8175d5bcc545a 100644 --- a/jstests/core/index/index5.js +++ b/jstests/core/index/index5.js @@ -4,17 +4,17 @@ function validate() { assert.eq(2, t.find().count()); - f = t.find().sort({a: 1}); + let f = t.find().sort({a: 1}); assert.eq(2, t.count()); assert.eq(1, f[0].a); assert.eq(2, f[1].a); - r = t.find().sort({a: -1}); + let r = t.find().sort({a: -1}); assert.eq(2, r.count()); assert.eq(2, r[0].a); assert.eq(1, r[1].a); } -t = db.index5; +let t = db.index5; t.drop(); t.save({a: 1}); diff --git a/jstests/core/index/index6.js b/jstests/core/index/index6.js index 4626e9359eae9..3edefedfc1bc4 100644 --- a/jstests/core/index/index6.js +++ b/jstests/core/index/index6.js @@ -1,6 +1,6 @@ // index6.js Test indexes on array subelements. -r = db.ed.db.index6; +let r = db.ed.db.index6; r.drop(); r.save({comments: [{name: "eliot", foo: 1}]}); diff --git a/jstests/core/index/index8.js b/jstests/core/index/index8.js index 3887906dddc2a..2b62040ca5033 100644 --- a/jstests/core/index/index8.js +++ b/jstests/core/index/index8.js @@ -12,14 +12,14 @@ // Test key uniqueness (function() { -t = db.jstests_index8; +let t = db.jstests_index8; t.drop(); t.createIndex({a: 1}); t.createIndex({b: 1}, true); t.createIndex({c: 1}, [false, "cIndex"]); -checkIndexes = function(num) { +let checkIndexes = function(num) { const indexes = t.getIndexes(); assert.eq(4, indexes.length); diff --git a/jstests/core/index/index9.js b/jstests/core/index/index9.js index 7bf7ec5ac438e..ed7da66285810 100644 --- a/jstests/core/index/index9.js +++ b/jstests/core/index/index9.js @@ -2,7 +2,7 @@ // expected. Also, the primary node cannot change because we use the local database in this test. // @tags: [assumes_no_implicit_collection_creation_after_drop, does_not_support_stepdowns] -t = db.jstests_index9; +let t = db.jstests_index9; t.drop(); assert.commandWorked(db.createCollection("jstests_index9")); diff --git a/jstests/core/index/index_arr1.js b/jstests/core/index/index_arr1.js index d6db3e621cd61..749b54543d583 100644 --- a/jstests/core/index/index_arr1.js +++ b/jstests/core/index/index_arr1.js @@ -2,7 +2,7 @@ // collection. // @tags: [assumes_no_implicit_index_creation] -t = db.index_arr1; +let t = db.index_arr1; t.drop(); t.insert({_id: 1, a: 5, b: [{x: 1}]}); diff --git a/jstests/core/index/index_arr2.js b/jstests/core/index/index_arr2.js index 999508804228a..ae977f5a514c8 100644 --- a/jstests/core/index/index_arr2.js +++ b/jstests/core/index/index_arr2.js @@ -1,27 +1,27 @@ // @tags: [assumes_balancer_off, requires_multi_updates, requires_non_retryable_writes] -NUM = 20; -M = 5; +let NUM = 20; +let M = 5; -t = db.jstests_arr2; +let t = db.jstests_arr2; function test(withIndex) { t.drop(); // insert a bunch of items to force queries to use the index. - newObject = {_id: 1, a: [{b: {c: 1}}]}; + let newObject = {_id: 1, a: [{b: {c: 1}}]}; - now = (new Date()).getTime() / 1000; - for (created = now - NUM; created <= now; created++) { + let now = (new Date()).getTime() / 1000; + for (let created = now - NUM; created <= now; created++) { newObject['created'] = created; t.insert(newObject); newObject['_id']++; } // change the last M items. - query = {'created': {'$gte': now - M}}; + let query = {'created': {'$gte': now - M}}; - Z = t.find(query).count(); + let Z = t.find(query).count(); if (withIndex) { // t.createIndex( { 'a.b.c' : 1, 'created' : -1 } ) @@ -35,7 +35,7 @@ function test(withIndex) { // now see how many were actually updated. query['a.b.c'] = 0; - count = t.count(query); + let count = t.count(query); assert.eq(Z, count, "count after withIndex:" + withIndex); } diff --git a/jstests/core/index/index_bounds_code.js b/jstests/core/index/index_bounds_code.js index 0bc80201eb47d..b96fa1f7cfe44 100644 --- a/jstests/core/index/index_bounds_code.js +++ b/jstests/core/index/index_bounds_code.js @@ -3,10 +3,7 @@ // assumes_unsharded_collection, // requires_non_retryable_writes, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount. +import {assertCoveredQueryAndCount} from "jstests/libs/analyze_plan.js"; const coll = db.index_bounds_code; coll.drop(); @@ -49,5 +46,4 @@ assert.commandWorked(coll.insert({a: MaxKey})); assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: func}}, project: proj, count: 0}); assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: func}}, project: proj, count: 0}); assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: func}}, project: proj, count: 0}); -assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: func}}, project: proj, count: 0}); -})(); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: func}}, project: proj, count: 0}); \ No newline at end of file diff --git a/jstests/core/index/index_bounds_maxkey.js b/jstests/core/index/index_bounds_maxkey.js index 1b59340fffe77..ea2fce6f5801b 100644 --- a/jstests/core/index/index_bounds_maxkey.js +++ b/jstests/core/index/index_bounds_maxkey.js @@ -3,10 +3,7 @@ // assumes_unsharded_collection, // requires_non_retryable_writes, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount. +import {assertCoveredQueryAndCount} from "jstests/libs/analyze_plan.js"; const coll = db.index_bounds_maxkey; coll.drop(); @@ -33,5 +30,4 @@ assert.commandWorked(coll.insert({})); assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MaxKey}}, project: proj, count: 0}); assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MaxKey}}, project: proj, count: 0}); assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MaxKey}}, project: proj, count: 3}); -assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MaxKey}}, project: proj, count: 3}); -})(); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MaxKey}}, project: proj, count: 3}); \ No newline at end of file diff --git a/jstests/core/index/index_bounds_minkey.js b/jstests/core/index/index_bounds_minkey.js index 78efd322b1212..c22a9bcb4b624 100644 --- a/jstests/core/index/index_bounds_minkey.js +++ b/jstests/core/index/index_bounds_minkey.js @@ -3,10 +3,7 @@ // assumes_unsharded_collection, // requires_non_retryable_writes, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount. +import {assertCoveredQueryAndCount} from "jstests/libs/analyze_plan.js"; const coll = db.index_bounds_minkey; coll.drop(); @@ -33,5 +30,4 @@ assert.commandWorked(coll.insert({})); assertCoveredQueryAndCount({collection: coll, query: {a: {$gt: MinKey}}, project: proj, count: 3}); assertCoveredQueryAndCount({collection: coll, query: {a: {$gte: MinKey}}, project: proj, count: 3}); assertCoveredQueryAndCount({collection: coll, query: {a: {$lt: MinKey}}, project: proj, count: 0}); -assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MinKey}}, project: proj, count: 0}); -})(); +assertCoveredQueryAndCount({collection: coll, query: {a: {$lte: MinKey}}, project: proj, count: 0}); \ No newline at end of file diff --git a/jstests/core/index/index_bounds_number_edge_cases.js b/jstests/core/index/index_bounds_number_edge_cases.js index 3da4e53e2d0ad..6f08c6632463c 100644 --- a/jstests/core/index/index_bounds_number_edge_cases.js +++ b/jstests/core/index/index_bounds_number_edge_cases.js @@ -2,7 +2,7 @@ // should handle numerical extremes // such as Number.MAX_VALUE and Infinity -t = db.indexboundsnumberedgecases; +let t = db.indexboundsnumberedgecases; t.drop(); diff --git a/jstests/core/index/index_bounds_object.js b/jstests/core/index/index_bounds_object.js index 255fb55f117dc..79b77f7308a60 100644 --- a/jstests/core/index/index_bounds_object.js +++ b/jstests/core/index/index_bounds_object.js @@ -3,10 +3,11 @@ // assumes_unsharded_collection, // requires_non_retryable_writes, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For assertCoveredQueryAndCount. +import { + assertCoveredQueryAndCount, + getWinningPlan, + isIndexOnly +} from "jstests/libs/analyze_plan.js"; const coll = db.index_bounds_object; coll.drop(); @@ -58,5 +59,4 @@ assert.commandWorked(coll.insert({a: []})); assert(!isIndexOnly(db, getWinningPlan(coll.find({a: {$gt: {}}}, proj).explain().queryPlanner))); assert(!isIndexOnly(db, getWinningPlan(coll.find({a: {$gte: {}}}, proj).explain().queryPlanner))); assert(!isIndexOnly(db, getWinningPlan(coll.find({a: {$lt: {}}}, proj).explain().queryPlanner))); -assert(!isIndexOnly(db, getWinningPlan(coll.find({a: {$lte: {}}}, proj).explain().queryPlanner))); -})(); +assert(!isIndexOnly(db, getWinningPlan(coll.find({a: {$lte: {}}}, proj).explain().queryPlanner))); \ No newline at end of file diff --git a/jstests/core/index/index_bounds_pipe.js b/jstests/core/index/index_bounds_pipe.js index f94c5a748fa12..e8ec773fc5af5 100644 --- a/jstests/core/index/index_bounds_pipe.js +++ b/jstests/core/index/index_bounds_pipe.js @@ -5,10 +5,7 @@ * assumes_read_concern_local, * ] */ -(function() { -'use strict'; - -load('jstests/libs/analyze_plan.js'); +import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js"; const collName = 'index_bounds_pipe'; const coll = db.getCollection(collName); @@ -113,5 +110,4 @@ assertIndexBoundsAndResult({ regex: /^\Q\|\E/, bounds: ['["\\|", "\\}")', '[/^\\Q\\|\\E/, /^\\Q\\|\\E/]'], results: [{_id: '\\|'}] -}); -}()); +}); \ No newline at end of file diff --git a/jstests/core/index/index_bounds_timestamp.js b/jstests/core/index/index_bounds_timestamp.js index 1edf62b929eec..ca6ccca9653f2 100644 --- a/jstests/core/index/index_bounds_timestamp.js +++ b/jstests/core/index/index_bounds_timestamp.js @@ -5,10 +5,7 @@ // assumes_read_concern_local, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {assertExplainCount, isIndexOnly} from "jstests/libs/analyze_plan.js"; // Setup the test collection. let coll = db.index_bounds_timestamp; @@ -137,5 +134,4 @@ plan = coll.explain("executionStats") .find({ts: {$gte: Timestamp(0, 1), $lte: Timestamp(1, 0)}}, {ts: 1, _id: 0}) .finish(); assert(isIndexOnly(db, plan.queryPlanner.winningPlan), - "ts $gte, $lte find with project should be a covered query"); -})(); + "ts $gte, $lte find with project should be a covered query"); \ No newline at end of file diff --git a/jstests/core/index/index_check2.js b/jstests/core/index/index_check2.js index e296e3b558af1..c5c05c5e6d5af 100644 --- a/jstests/core/index/index_check2.js +++ b/jstests/core/index/index_check2.js @@ -3,11 +3,11 @@ // requires_getmore // ] -t = db.index_check2; +let t = db.index_check2; t.drop(); // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js"; for (var i = 0; i < 1000; i++) { var a = []; @@ -17,15 +17,9 @@ for (var i = 0; i < 1000; i++) { t.save({num: i, tags: a}); } -q1 = { - tags: "tag6" -}; -q2 = { - tags: "tag12" -}; -q3 = { - tags: {$all: ["tag6", "tag12"]} -}; +let q1 = {tags: "tag6"}; +let q2 = {tags: "tag12"}; +let q3 = {tags: {$all: ["tag6", "tag12"]}}; assert.eq(120, t.find(q1).itcount(), "q1 a"); assert.eq(120, t.find(q2).itcount(), "q2 a"); @@ -42,9 +36,9 @@ assert(isIxscan(db, getWinningPlan(t.find(q1).explain().queryPlanner)), "e1"); assert(isIxscan(db, getWinningPlan(t.find(q2).explain().queryPlanner)), "e2"); assert(isIxscan(db, getWinningPlan(t.find(q3).explain().queryPlanner)), "e3"); -scanned1 = t.find(q1).explain("executionStats").executionStats.totalKeysExamined; -scanned2 = t.find(q2).explain("executionStats").executionStats.totalKeysExamined; -scanned3 = t.find(q3).explain("executionStats").executionStats.totalKeysExamined; +let scanned1 = t.find(q1).explain("executionStats").executionStats.totalKeysExamined; +let scanned2 = t.find(q2).explain("executionStats").executionStats.totalKeysExamined; +let scanned3 = t.find(q3).explain("executionStats").executionStats.totalKeysExamined; // print( "scanned1: " + scanned1 + " scanned2: " + scanned2 + " scanned3: " + scanned3 ); diff --git a/jstests/core/index/index_check5.js b/jstests/core/index/index_check5.js index 8921e014fcd05..62f72c0e61282 100644 --- a/jstests/core/index/index_check5.js +++ b/jstests/core/index/index_check5.js @@ -1,5 +1,5 @@ -t = db.index_check5; +let t = db.index_check5; t.drop(); t.save({ diff --git a/jstests/core/index/index_count_scan.js b/jstests/core/index/index_count_scan.js new file mode 100644 index 0000000000000..f0870de08a027 --- /dev/null +++ b/jstests/core/index/index_count_scan.js @@ -0,0 +1,88 @@ +// Test that an index can be used to accelerate count commands, as well as the $count agg +// stage. +// +// The collection cannot be sharded, since the requirement to SHARD_FILTER precludes the planner +// from generating a COUNT_SCAN plan. Further, we do not allow stepdowns, since the code responsible +// for retrying on interrupt is not prepared to handle aggregation explain. +// @tags: [ +// assumes_unsharded_collection, +// does_not_support_stepdowns, +// # Test may fail with "index already exists". +// assumes_no_implicit_index_creation, +// # Explain for the aggregate command cannot run within a multi-document transaction. +// does_not_support_transactions, +// ] +import {getPlanStage} from "jstests/libs/analyze_plan.js"; + +const coll = db.index_count; +coll.drop(); + +assert.commandWorked(coll.insert([ + {a: 1}, + {a: 1, b: 1}, + {a: 2}, + {a: 3}, + {a: null}, + {a: [-1, 0]}, + {a: [4, -3, 5]}, + {}, + {a: {b: 4}}, + {a: []}, + {a: [[], {}]}, + {a: {}}, +])); + +const runTest = function(indexPattern, indexOption = {}) { + assert.commandWorked(coll.createIndex(indexPattern, indexOption)); + + assert.eq(5, coll.count({a: {$gt: 0}})); + assert.eq(5, coll.find({a: {$gt: 0}}).itcount()); + + // Retrieve the query plain from explain, whose shape varies depending on the query and the + // engines used (classic/sbe). + const getQueryPlan = function(explain) { + if (explain.stages) { + explain = explain.stages[0].$cursor; + } + let winningPlan = explain.queryPlanner.winningPlan; + return winningPlan.queryPlan ? [winningPlan.queryPlan, winningPlan.slotBasedPlan] + : [winningPlan, null]; + }; + + // Verify that this query uses a COUNT_SCAN. + const runAndVerify = function(expectedCount, pipeline) { + assert.eq(expectedCount, coll.aggregate(pipeline).next().count); + let explain = coll.explain().aggregate(pipeline); + const [queryPlan, sbePlan] = getQueryPlan(explain); + let countScan = getPlanStage(queryPlan, "COUNT_SCAN"); + assert.neq(null, countScan, explain); + if (sbePlan) { + assert.eq(true, sbePlan.stages.includes("ixseek"), sbePlan); + } + }; + + runAndVerify(2, [{$match: {a: 1}}, {$count: "count"}]); + // Run more times to ensure the query is cached. + runAndVerify(2, [{$match: {a: 1}}, {$count: "count"}]); + runAndVerify(2, [{$match: {a: 1}}, {$count: "count"}]); + // Make sure query is parameterized correctly for count scan index keys. + runAndVerify(1, [{$match: {a: 2}}, {$count: "count"}]); + if (indexPattern.b) { + runAndVerify(1, [{$match: {a: 1, b: 1}}, {$count: "count"}]); + } + runAndVerify(2, [{$match: {a: {}}}, {$count: "count"}]); + runAndVerify(3, [{$match: {a: {$gt: 1}}}, {$count: "count"}]); + // Add a $project stage between $match and $count to avoid pushdown. + runAndVerify(2, [{$match: {a: 1}}, {$project: {_id: 0, a: 0}}, {$count: "count"}]); + if (indexPattern.a) { + runAndVerify(12, [{$sort: {a: 1}}, {$count: "count"}]); + runAndVerify(12, [{$sort: {a: -1}}, {$count: "count"}]); + runAndVerify(12, [{$sort: {a: -1}}, {$group: {_id: null, count: {$sum: 1}}}]); + } + + assert.commandWorked(coll.dropIndex(indexPattern)); +}; + +runTest({a: 1}); +runTest({"$**": 1}); +runTest({"$**": -1, b: -1}, {wildcardProjection: {b: 0}}); \ No newline at end of file diff --git a/jstests/core/index/index_decimal.js b/jstests/core/index/index_decimal.js index 01cd343b2a415..ffa50b8077543 100644 --- a/jstests/core/index/index_decimal.js +++ b/jstests/core/index/index_decimal.js @@ -6,11 +6,8 @@ // ] // Test indexing of decimal numbers -(function() { -'use strict'; - // Include helpers for analyzing explain output. -load('jstests/libs/analyze_plan.js'); +import {isIndexOnly} from "jstests/libs/analyze_plan.js"; var t = db.decimal_indexing; t.drop(); @@ -54,5 +51,4 @@ assert.eq(t.find({y: {$lt: NumberDecimal('0.3')}}, {y: 1, _id: 0}).toArray(), 'querying for double less than decimal 0.3 should return double 0.3'); assert.eq(t.find({_id: 0}, {_id: 1}).toArray(), [{_id: NumberDecimal('0E3')}], - 'querying for zero does not return the correct decimal'); -})(); + 'querying for zero does not return the correct decimal'); \ No newline at end of file diff --git a/jstests/core/index/index_diag.js b/jstests/core/index/index_diag.js index 769e5575bf168..bb9cd4ed3239f 100644 --- a/jstests/core/index/index_diag.js +++ b/jstests/core/index/index_diag.js @@ -3,14 +3,14 @@ // ] load("jstests/libs/fixture_helpers.js"); -t = db.index_diag; +let t = db.index_diag; t.drop(); assert.commandWorked(t.createIndex({x: 1})); -all = []; -ids = []; -xs = []; +let all = []; +let ids = []; +let xs = []; function r(a) { var n = []; @@ -19,8 +19,8 @@ function r(a) { return n; } -for (i = 1; i < 4; i++) { - o = {_id: i, x: -i}; +for (let i = 1; i < 4; i++) { + let o = {_id: i, x: -i}; t.insert(o); all.push(o); ids.push({_id: i}); diff --git a/jstests/core/index/index_filter_catalog_independent.js b/jstests/core/index/index_filter_catalog_independent.js index 2c244fdb0707b..bf0992c1ef326 100644 --- a/jstests/core/index/index_filter_catalog_independent.js +++ b/jstests/core/index/index_filter_catalog_independent.js @@ -13,10 +13,7 @@ * tenant_migration_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getPlanStages. +import {getPlanStages, getWinningPlan, isCollscan} from "jstests/libs/analyze_plan.js"; const collName = "index_filter_catalog_independent"; const coll = db[collName]; @@ -88,5 +85,4 @@ assertOneIndexFilter({x: 3}, [{x: 1, y: 1}]); explain = assert.commandWorked(coll.find({x: 3}).explain()); checkIndexFilterSet(explain, true); -assertIsIxScanOnIndex(getWinningPlan(explain.queryPlanner), {x: 1, y: 1}); -})(); +assertIsIxScanOnIndex(getWinningPlan(explain.queryPlanner), {x: 1, y: 1}); \ No newline at end of file diff --git a/jstests/core/index/index_filter_collation.js b/jstests/core/index/index_filter_collation.js index b1b3edaa3d039..8b6abc11ce4d2 100644 --- a/jstests/core/index/index_filter_collation.js +++ b/jstests/core/index/index_filter_collation.js @@ -13,10 +13,7 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getPlanStages. +import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js"; const collName = "index_filter_collation"; const coll = db[collName]; @@ -117,5 +114,4 @@ assertIsIxScanOnIndex(getWinningPlan(explain.queryPlanner), {x: 1, y: 1}); explain = coll.explain().distinct("_id", {x: 3}, {collation: caseInsensitive}); checkIndexFilterSet(explain, true); -assertIsIxScanOnIndex(getWinningPlan(explain.queryPlanner), {x: 1}); -})(); +assertIsIxScanOnIndex(getWinningPlan(explain.queryPlanner), {x: 1}); \ No newline at end of file diff --git a/jstests/core/index/index_filter_commands.js b/jstests/core/index/index_filter_commands.js index 429a79adea818..0872f264d9ee4 100644 --- a/jstests/core/index/index_filter_commands.js +++ b/jstests/core/index/index_filter_commands.js @@ -35,14 +35,23 @@ * does_not_support_stepdowns, * # The SBE plan cache was first enabled in 6.3. * requires_fcv_63, + * references_foreign_collection, * ] */ -(function() { -load("jstests/libs/analyze_plan.js"); +import { + getPlanCacheKeyFromPipeline, + getPlanCacheKeyFromShape, + getPlanStage, + getWinningPlan, + isClusteredIxscan, + isCollscan, + isIdhack, + isIxscan, +} from "jstests/libs/analyze_plan.js"; load("jstests/libs/clustered_collections/clustered_collection_util.js"); load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const coll = db.jstests_index_filter_commands; @@ -187,7 +196,7 @@ assert.eq(null, planCacheEntryForQuery(shape), coll.getPlanCache().list()); // Check details of winning plan in plan cache after setting filter and re-executing query. assert.eq(1, coll.find(queryA1, projectionA1).sort(sortA1).itcount(), 'unexpected document count'); -planAfterSetFilter = planCacheEntryForQuery(shape); +let planAfterSetFilter = planCacheEntryForQuery(shape); assert.neq(null, planAfterSetFilter, coll.getPlanCache().list()); // Check 'indexFilterSet' field in plan details assert.eq(true, planAfterSetFilter.indexFilterSet, planAfterSetFilter); @@ -441,7 +450,7 @@ if (checkSBEEnabled(db)) { assert.eq(lookupStage.strategy, "IndexedLoopJoin", explain); assert.eq(lookupStage.indexName, "foreign_a_1"); - ixscanStage = getPlanStage(explain, "IXSCAN"); + let ixscanStage = getPlanStage(explain, "IXSCAN"); assert.neq(null, ixscanStage, explain); assert.eq(ixscanStage.indexName, "main_a_1_c_1", explain); @@ -474,4 +483,3 @@ if (checkSBEEnabled(db)) { planCacheEntry = planCacheEntryForPipeline(pipeline); assert.eq(null, planCacheEntry, coll.getPlanCache().list()); } -}()); diff --git a/jstests/core/index/index_filter_commands_invalidate_plan_cache_entries.js b/jstests/core/index/index_filter_commands_invalidate_plan_cache_entries.js index bc2dc74c0e1cd..d5a888d2f1c5b 100644 --- a/jstests/core/index/index_filter_commands_invalidate_plan_cache_entries.js +++ b/jstests/core/index/index_filter_commands_invalidate_plan_cache_entries.js @@ -16,14 +16,11 @@ * # Plan cache state is node-local and will not get migrated alongside tenant data. * tenant_migration_incompatible, * # TODO SERVER-67607: Test plan cache with CQF enabled. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; // For testing convenience this variable is made an integer "1" if SBE is fully enabled, because the // expected amount of plan cache entries differs between the SBE plan cache and the classic one. @@ -109,4 +106,3 @@ assert.commandWorked( db.runCommand({planCacheSetFilter: collName, query: {a: 1, b: 1}, indexes: [{a: 1}]})); assert(!existsInPlanCache({a: 1, b: 1}, {}, {}, coll)); assert(existsInPlanCache({a: 1, b: 1}, {}, {}, collOther)); -})(); diff --git a/jstests/core/index/index_filter_on_hidden_index.js b/jstests/core/index/index_filter_on_hidden_index.js index 770802b8b3e73..85b71acf33a05 100644 --- a/jstests/core/index/index_filter_on_hidden_index.js +++ b/jstests/core/index/index_filter_on_hidden_index.js @@ -20,10 +20,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For 'getPlanStages' and 'isCollscan'. +import {getPlanStages, getWinningPlan, isCollscan} from "jstests/libs/analyze_plan.js"; const collName = 'hidden_indexes_remain_visible_in_index_filters'; db[collName].drop(); @@ -119,5 +116,4 @@ validateIxscanOrCollscanUsed(queryShape, null); // Unhiding the index should make it able to be used. assert.commandWorked(coll.unhideIndex("a_1")); -validateIxscanOrCollscanUsed(queryShape, "a_1"); -})(); +validateIxscanOrCollscanUsed(queryShape, "a_1"); \ No newline at end of file diff --git a/jstests/core/index/index_id_options.js b/jstests/core/index/index_id_options.js index ca19627a48e3a..49b2128407a2a 100644 --- a/jstests/core/index/index_id_options.js +++ b/jstests/core/index/index_id_options.js @@ -44,7 +44,6 @@ assert.commandFailed(coll.createIndex({_id: 1}, {name: "_id_", unique: true})); assert.commandFailed(coll.createIndex({_id: 1}, {name: "_id_", sparse: false})); assert.commandFailed(coll.createIndex({_id: 1}, {name: "_id_", partialFilterExpression: {a: 1}})); assert.commandFailed(coll.createIndex({_id: 1}, {name: "_id_", expireAfterSeconds: 3600})); -assert.commandFailed(coll.createIndex({_id: 1}, {name: "_id_", background: false})); assert.commandFailed(coll.createIndex({_id: 1}, {name: "_id_", unknown: true})); assert.commandWorked(coll.createIndex( {_id: 1}, {name: "_id_", ns: coll.getFullName(), v: 2, collation: {locale: "simple"}})); diff --git a/jstests/core/index/index_many.js b/jstests/core/index/index_many.js index cdd559c9be5d9..ee1f71477c01d 100644 --- a/jstests/core/index/index_many.js +++ b/jstests/core/index/index_many.js @@ -4,7 +4,7 @@ /* test using lots of indexes on one collection */ -t = db.many; +let t = db.many; function f() { t.drop(); @@ -13,10 +13,10 @@ function f() { t.save({x: 9, y: 99}); t.save({x: 19, y: 99}); - x = 2; + let x = 2; var lastErr = null; while (x < 70) { - patt = {}; + let patt = {}; patt[x] = 1; if (x == 20) patt = {x: 1}; @@ -29,7 +29,7 @@ function f() { assert.commandFailed(lastErr, "should have got an error 'too many indexes'"); // 40 is the limit currently - lim = t.getIndexes().length; + let lim = t.getIndexes().length; if (lim != 64) { print("# of indexes should be 64 but is : " + lim); return; diff --git a/jstests/core/index/index_multikey.js b/jstests/core/index/index_multikey.js index 9dc26c9c13bd0..dd7c022f27298 100644 --- a/jstests/core/index/index_multikey.js +++ b/jstests/core/index/index_multikey.js @@ -5,11 +5,8 @@ * assumes_read_concern_local, * ] */ -(function() { -"use strict"; - // For making assertions about explain output. -load("jstests/libs/analyze_plan.js"); +import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js"; const coll = db.getCollection("index_multikey"); coll.drop(); @@ -36,5 +33,4 @@ assert.eq(ixscan.isMultiKey, "index should have been marked as multikey after insert; plan: " + tojson(ixscan)); assert.eq(ixscan.multiKeyPaths, {a: ["a"], b: []}, - "index has wrong multikey paths after insert; plan: " + ixscan); -})(); + "index has wrong multikey paths after insert; plan: " + ixscan); \ No newline at end of file diff --git a/jstests/core/index/index_partial_create_drop.js b/jstests/core/index/index_partial_create_drop.js index c0a095b30df71..c61c5502f54c6 100644 --- a/jstests/core/index/index_partial_create_drop.js +++ b/jstests/core/index/index_partial_create_drop.js @@ -13,10 +13,8 @@ // Test partial index creation and drops. -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; -(function() { -"use strict"; var coll = db.index_partial_create_drop; var getNumKeys = function(idxName) { @@ -77,8 +75,7 @@ assert.commandWorked(coll.dropIndex({x: 1})); assert.eq(1, coll.getIndexes().length); // Create partial index in background. -assert.commandWorked( - coll.createIndex({x: 1}, {background: true, partialFilterExpression: {a: {$lt: 5}}})); +assert.commandWorked(coll.createIndex({x: 1}, {partialFilterExpression: {a: {$lt: 5}}})); assert.eq(5, getNumKeys("x_1")); assert.commandWorked(coll.dropIndex({x: 1})); assert.eq(1, coll.getIndexes().length); @@ -120,4 +117,3 @@ numIndexesBefore = coll.getIndexes().length; assert.commandFailedWithCode(coll.dropIndex({x: 1}), ErrorCodes.AmbiguousIndexKeyPattern); assert.commandWorked(coll.dropIndex("partialIndex2")); assert.eq(coll.getIndexes().length, numIndexesBefore - 1); -})(); diff --git a/jstests/core/index/index_partial_read_ops.js b/jstests/core/index/index_partial_read_ops.js index 2bc6578479e3b..339134e5e2a09 100644 --- a/jstests/core/index/index_partial_read_ops.js +++ b/jstests/core/index/index_partial_read_ops.js @@ -11,11 +11,9 @@ // Read ops tests for partial indexes. // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/feature_flag_util.js"); +import {getRejectedPlans, getWinningPlan, isCollscan, isIxscan} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; -(function() { -"use strict"; let explain; const coll = db.index_partial_read_ops; @@ -137,7 +135,7 @@ const coll = db.index_partial_read_ops; if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) { jsTest.log( "Skipping partialFilterExpression testing for $in, $or and non-top level $and as timeseriesMetricIndexesEnabled is false"); - return; + quit(); } (function testFilterWithInExpression() { @@ -195,4 +193,3 @@ if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) { assert.eq(1, explain.executionStats.nReturned); assert(isCollscan(db, getWinningPlan(explain.queryPlanner))); })(); -})(); diff --git a/jstests/core/index/index_signature.js b/jstests/core/index/index_signature.js index 9d015bfa2d042..bc3796ce4e185 100644 --- a/jstests/core/index/index_signature.js +++ b/jstests/core/index/index_signature.js @@ -9,11 +9,8 @@ * requires_non_retryable_writes, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" -load("jstests/libs/fixture_helpers.js"); // For 'isSharded'. +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; +load("jstests/libs/fixture_helpers.js"); // For 'isSharded'. const testDB = db.getSiblingDB(jsTestName()); const coll = testDB.test; @@ -143,7 +140,7 @@ assert.commandFailedWithCode(coll.createIndex(keyPattern, partialFilterUnsortedL ErrorCodes.IndexKeySpecsConflict); // Verifies that non-signature options cannot distinguish a new index from an existing index. -const nonSignatureOptions = [{expireAfterSeconds: 10}, {background: true}]; +const nonSignatureOptions = [{expireAfterSeconds: 10}]; // Builds a new, basic index on {a: 1}, since some of the options we intend to test are not // compatible with the partialFilterExpression on the existing {a: 1} indexes. @@ -249,4 +246,3 @@ if (allowCompoundWildcardIndexes) { {name: "cwi_a_sub_b_c_1", wildcardProjection: {"a.c": 1, "a.b": 1}}), ErrorCodes.IndexOptionsConflict); } -})(); diff --git a/jstests/core/index/index_sparse1.js b/jstests/core/index/index_sparse1.js index 58bc5baa9b8a6..0aec94e8ab221 100644 --- a/jstests/core/index/index_sparse1.js +++ b/jstests/core/index/index_sparse1.js @@ -2,7 +2,7 @@ // collection. // @tags: [assumes_no_implicit_index_creation, requires_non_retryable_writes, requires_fastcount] -t = db.index_sparse1; +let t = db.index_sparse1; t.drop(); t.insert({_id: 1, x: 1}); diff --git a/jstests/core/index/index_sparse2.js b/jstests/core/index/index_sparse2.js index 324b46d82163a..c7a9b0fac0461 100644 --- a/jstests/core/index/index_sparse2.js +++ b/jstests/core/index/index_sparse2.js @@ -2,7 +2,7 @@ // collection. // @tags: [assumes_no_implicit_index_creation, requires_fastcount] -t = db.index_sparse2; +let t = db.index_sparse2; t.drop(); t.insert({_id: 1, x: 1, y: 1}); diff --git a/jstests/core/index/index_stats.js b/jstests/core/index/index_stats.js index b7e3e64d3242a..c3d13b7d1e103 100644 --- a/jstests/core/index/index_stats.js +++ b/jstests/core/index/index_stats.js @@ -16,16 +16,14 @@ // # errors. // tenant_migration_incompatible, // # TODO SERVER-67639: Verify $indexStats works for queries that are eligible for CQF. -// cqf_incompatible, +// cqf_experimental_incompatible, // # Uses mapReduce command. // requires_scripting, +// references_foreign_collection, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getAggPlanStage, getPlanStages} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; var colName = "jstests_index_stats"; var col = db[colName]; @@ -360,4 +358,3 @@ assert.commandWorked(col.unhideIndex("a_1")); res = col.findOne({a: 1}); assert(1, res); assert.eq(1, getUsageCount("a_1")); -})(); diff --git a/jstests/core/index/index_type_change.js b/jstests/core/index/index_type_change.js index 455d9b6067d77..294170ad933a4 100644 --- a/jstests/core/index/index_type_change.js +++ b/jstests/core/index/index_type_change.js @@ -10,10 +10,7 @@ * will update the index entries associated with that document. */ -load("jstests/libs/analyze_plan.js"); // For 'isIndexOnly'. - -(function() { -"use strict"; +import {isIndexOnly} from "jstests/libs/analyze_plan.js"; var coll = db.index_type_change; coll.drop(); @@ -40,4 +37,3 @@ assert(isIndexOnly(db, explain)); var updated = coll.findOne({a: 2}, {_id: 0, a: 1}); assert(updated.a instanceof NumberLong, "Index entry did not change type"); -})(); diff --git a/jstests/core/index/indexa.js b/jstests/core/index/indexa.js index 01fde035621c5..937dde52df180 100644 --- a/jstests/core/index/indexa.js +++ b/jstests/core/index/indexa.js @@ -5,7 +5,7 @@ // unique index constraint test for updates // case where object doesn't grow tested here -t = db.indexa; +let t = db.indexa; t.drop(); t.createIndex({x: 1}, true); @@ -18,8 +18,8 @@ assert.eq(2, t.count(), "indexa 1"); t.update({x: 'B'}, {x: 'A'}); -a = t.find().toArray(); -u = Array.unique(a.map(function(z) { +let a = t.find().toArray(); +let u = Array.unique(a.map(function(z) { return z.x; })); assert.eq(2, t.count(), "indexa 2"); diff --git a/jstests/core/index/indexb.js b/jstests/core/index/indexb.js index 59546f7fae273..5c5177fd23974 100644 --- a/jstests/core/index/indexb.js +++ b/jstests/core/index/indexb.js @@ -8,15 +8,13 @@ // see indexa.js for the test case for an update with dup id check // when it doesn't move -t = db.indexb; +let t = db.indexb; t.drop(); t.createIndex({a: 1}, true); t.insert({a: 1}); -x = { - a: 2 -}; +let x = {a: 2}; t.save(x); { diff --git a/jstests/core/index/indexc.js b/jstests/core/index/indexc.js index bf5735380faad..b65d5a6e89123 100644 --- a/jstests/core/index/indexc.js +++ b/jstests/core/index/indexc.js @@ -1,5 +1,5 @@ -t = db.indexc; +let t = db.indexc; t.drop(); const startMillis = new Date().getTime(); @@ -7,7 +7,7 @@ for (var i = 1; i < 100; i++) { var d = new Date(startMillis + i); t.save({a: i, ts: d, cats: [i, i + 1, i + 2]}); if (i == 51) - mid = d; + var mid = d; } assert.eq(50, t.find({ts: {$lt: mid}}).itcount(), "A"); diff --git a/jstests/core/index/indexe.js b/jstests/core/index/indexe.js index a307882adc3aa..aaafc5bda3bb0 100644 --- a/jstests/core/index/indexe.js +++ b/jstests/core/index/indexe.js @@ -1,11 +1,11 @@ // @tags: [requires_getmore, requires_fastcount] -t = db.indexe; +let t = db.indexe; t.drop(); var num = 1000; -for (i = 0; i < num; i++) { +for (let i = 0; i < num; i++) { t.insert({a: "b"}); } diff --git a/jstests/core/index/indexf.js b/jstests/core/index/indexf.js index 37c279672293d..8837b9e2571d0 100644 --- a/jstests/core/index/indexf.js +++ b/jstests/core/index/indexf.js @@ -1,5 +1,5 @@ -t = db.indexf; +let t = db.indexf; t.drop(); t.createIndex({x: 1}); diff --git a/jstests/core/index/indexg.js b/jstests/core/index/indexg.js index 486f4be0ebe9c..47f7a587d9e8a 100644 --- a/jstests/core/index/indexg.js +++ b/jstests/core/index/indexg.js @@ -1,13 +1,13 @@ -f = db.jstests_indexg; +let f = db.jstests_indexg; f.drop(); f.save({list: [1, 2]}); f.save({list: [1, 3]}); -doit = function() { +let doit = function() { assert.eq(1, f.count({list: {$in: [1], $ne: 3}})); assert.eq(1, f.count({list: {$in: [1], $not: {$in: [3]}}})); }; doit(); f.createIndex({list: 1}); -doit(); \ No newline at end of file +doit(); diff --git a/jstests/core/index/indexj.js b/jstests/core/index/indexj.js index 93034fec923ec..11b70ff2d4c36 100644 --- a/jstests/core/index/indexj.js +++ b/jstests/core/index/indexj.js @@ -12,10 +12,8 @@ // requires_fcv_63, // ] -(function() { -"use strict"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; -load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'. const t = db[jsTestName()]; t.drop(); @@ -93,5 +91,4 @@ assert.commandWorked(t.insert({a: 1, b: 1.5})); // both sets of bounds being scanned. expectedKeys = isSBEEnabled ? 1 : 4; numKeys = keysExamined({a: {$in: [1, 2]}, b: {$gt: 1, $lt: 2}}, {a: 1, b: 1}); -assert.eq(numKeys, expectedKeys, errMsg(numKeys)); -})(); +assert.eq(numKeys, expectedKeys, errMsg(numKeys)); \ No newline at end of file diff --git a/jstests/core/index/indexl.js b/jstests/core/index/indexl.js index cde169eda71dd..d0ba873e4377a 100644 --- a/jstests/core/index/indexl.js +++ b/jstests/core/index/indexl.js @@ -1,6 +1,6 @@ // Check nonoverlapping $in/$all with multikeys SERVER-2165 -t = db.jstests_indexl; +let t = db.jstests_indexl; function test(t) { t.save({a: [1, 2]}); @@ -24,4 +24,4 @@ t.drop(); test(t); t.drop(); t.createIndex({a: 1}); -test(t); \ No newline at end of file +test(t); diff --git a/jstests/core/index/indexm.js b/jstests/core/index/indexm.js index 7613b12535739..2fc042632733d 100644 --- a/jstests/core/index/indexm.js +++ b/jstests/core/index/indexm.js @@ -1,6 +1,6 @@ // Check proper range combinations with or clauses overlapping non or portion of query SERVER-2302 -t = db.jstests_indexm; +let t = db.jstests_indexm; t.drop(); t.save({a: [{x: 1}, {x: 2}, {x: 3}, {x: 4}]}); diff --git a/jstests/core/index/indexn.js b/jstests/core/index/indexn.js index a292ae2e5d310..ddd7dc5169d50 100644 --- a/jstests/core/index/indexn.js +++ b/jstests/core/index/indexn.js @@ -4,7 +4,7 @@ // assumes_read_concern_local, // ] -t = db.jstests_indexn; +let t = db.jstests_indexn; t.drop(); t.save({a: 1, b: [1, 2]}); diff --git a/jstests/core/index/indexr.js b/jstests/core/index/indexr.js index d3ff5f7e02ebd..a056d29281f12 100644 --- a/jstests/core/index/indexr.js +++ b/jstests/core/index/indexr.js @@ -4,7 +4,7 @@ // Check multikey index cases with parallel nested fields SERVER-958. -t = db.jstests_indexr; +let t = db.jstests_indexr; t.drop(); // Check without indexes. diff --git a/jstests/core/index/indexs.js b/jstests/core/index/indexs.js index 6ee625071e2de..6cbabbb8f821b 100644 --- a/jstests/core/index/indexs.js +++ b/jstests/core/index/indexs.js @@ -1,7 +1,7 @@ // Test index key generation issue with parent and nested fields in same index and array containing // subobject SERVER-3005. -t = db.jstests_indexs; +let t = db.jstests_indexs; t.drop(); t.createIndex({a: 1}); diff --git a/jstests/core/index/indext.js b/jstests/core/index/indext.js index 1ac92d6be5d02..13e0ce0fb57f5 100644 --- a/jstests/core/index/indext.js +++ b/jstests/core/index/indext.js @@ -1,6 +1,6 @@ // Sparse indexes with arrays SERVER-3216 -t = db.jstests_indext; +let t = db.jstests_indext; t.drop(); t.createIndex({'a.b': 1}, {sparse: true}); diff --git a/jstests/core/index/partial_index_optimization.js b/jstests/core/index/partial_index_optimization.js new file mode 100644 index 0000000000000..0e8fa2ccd1a60 --- /dev/null +++ b/jstests/core/index/partial_index_optimization.js @@ -0,0 +1,225 @@ +/** + * Tests for classic query optimization with partial indices: do not generate fetch stage in the + * plan if a query predicate is satisfied by the filter expression of the chosen partial index. If + * the fetch phase is needed for another reason, make sure that the predicate is not in the fetch + * filter. + * + * @tags: [ + * # the test conflicts with hidden wildcard indexes + * assumes_no_implicit_index_creation, + * does_not_support_stepdowns, + * multiversion_incompatible, + * requires_fcv_70, + * ] + */ + +import { + assertFetchFilter, + assertNoFetchFilter, + assertStagesForExplainOfCommand, + getWinningPlan, + isCollscan, +} from "jstests/libs/analyze_plan.js"; + +function flagVal(n) { + return (n % 5 > 3) ? true : false; +} + +function stateVal(n) { + const states = ["open", "closed", "unknown"]; + return states[n % 3]; +} + +function getDocs(len, start = 0) { + return Array.from({length: len}, (_, i) => ({ + _id: start + i, + a: i, + b: i + 3, + c: [i, i + 5], + flag: flagVal(i), + state: stateVal(i), + array: [{a: i, state: stateVal(i)}, {b: i}] + })); +} + +const coll = db.partial_index_opt; +coll.drop(); +assert.commandWorked(coll.insertMany(getDocs(100))); +assert.commandWorked(coll.insertMany([ + { + _id: 100, + a: 100, + state: "open", + array: [{a: 100, state: "closed"}, {a: 101, state: "closed"}] + }, + {_id: 101, a: 101, state: "open", array: [{a: 101, state: "open"}]}, + {_id: 102, a: 102, state: "closed", array: [{a: 102, state: "open"}]} +])); + +const expectedStagesCount = ["COUNT", "COUNT_SCAN"]; + +assert.commandWorked(coll.createIndex({a: 1}, {"partialFilterExpression": {flag: true}})); +let cmdObj = {find: coll.getName(), filter: {flag: true, a: 4}, projection: {_id: 0, a: 1}}; +assertNoFetchFilter({coll: coll, cmdObj: cmdObj}); + +// The following plan has a fetch phase because of the projection, but no filter on it. +cmdObj = { + find: coll.getName(), + filter: {flag: true, a: 4}, + projection: {a: 1} +}; +assertNoFetchFilter({coll: coll, cmdObj: cmdObj}); + +// Count command. +cmdObj = { + count: coll.getName(), + query: {flag: true, a: 4} +}; +assertStagesForExplainOfCommand({ + coll: coll, + cmdObj: cmdObj, + expectedStages: expectedStagesCount, + stagesNotExpected: ["FETCH"] +}); + +// Partial index with filter expression with conjunction. +assert.commandWorked(coll.createIndex( + {a: 1}, {name: "a_1_range", "partialFilterExpression": {a: {$gte: 20, $lte: 40}}})); +cmdObj = { + find: coll.getName(), + filter: {a: {$gte: 20, $lte: 40}}, + projection: {_id: 0, a: 1} +}; +assertNoFetchFilter({coll: coll, cmdObj: cmdObj}); + +cmdObj = { + find: coll.getName(), + filter: {a: {$gte: 25, $lte: 30}}, + projection: {_id: 0, a: 1} +}; +assertNoFetchFilter({coll: coll, cmdObj: cmdObj}); + +// Partial index with compound key. +assert.commandWorked(coll.createIndex({a: 1, b: 1}, {"partialFilterExpression": {flag: true}})); +cmdObj = { + find: coll.getName(), + filter: {a: {$gte: 50}, b: {$in: [55, 57, 59, 62]}, flag: true}, + projection: {_id: 0, a: 1, b: 1} +}; +assertNoFetchFilter({coll: coll, cmdObj: cmdObj}); + +// Filter expression with conjunction on multiple fields. +assert.commandWorked(coll.createIndex( + {b: 1}, {name: "b_1_state_open", "partialFilterExpression": {state: "open", b: {$gt: 50}}})); + +cmdObj = { + find: coll.getName(), + filter: {state: "open", b: {$gt: 80}}, + projection: {_id: 0, b: 1} +}; +assertNoFetchFilter({coll: coll, cmdObj: cmdObj}); + +cmdObj = { + count: coll.getName(), + query: {state: "open", b: {$gt: 50}} +}; +assertStagesForExplainOfCommand({ + coll: coll, + cmdObj: cmdObj, + expectedStages: expectedStagesCount, + stagesNotExpected: ["FETCH"] +}); + +// Index filter expression with $exists. +assert.commandWorked(coll.createIndex( + {a: 1}, {name: "a_1_b_exists", "partialFilterExpression": {b: {$exists: true}}})); + +cmdObj = { + find: coll.getName(), + filter: {a: {$gte: 90}, b: {$exists: true}}, + projection: {_id: 0, a: 1} +}; +assertNoFetchFilter({coll: coll, cmdObj: cmdObj}); + +// Filter expression in a multi-key index. +assert.commandWorked( + coll.createIndex({c: 1}, {name: "c_1_a", partialFilterExpression: {a: {$lte: 30}}})); + +cmdObj = { + count: coll.getName(), + query: {c: {$lte: 50}, a: {$lte: 30}} +}; +assertStagesForExplainOfCommand({ + coll: coll, + cmdObj: cmdObj, + expectedStages: expectedStagesCount, + stagesNotExpected: ["FETCH"] +}); + +// The following plan has a fetch phase, but no filter on 'a'. +cmdObj = { + find: coll.getName(), + filter: {c: {$lte: 50}, a: {$lte: 30}}, + projection: {_id: 0, c: 1} +}; +assertNoFetchFilter({coll: coll, cmdObj: cmdObj}); + +// Test that the same filter expression under $elemMatch will not be removed from the fetch filter. +assert.commandWorked( + coll.createIndex({a: 1}, {name: "a_1_state_open", "partialFilterExpression": {state: "open"}})); + +let predicate = { + a: {$gte: 100}, + state: "open", + array: {$elemMatch: {$and: [{a: {$gte: 100}}, {state: "open"}]}} +}; +let fetchFilter = { + "array": {"$elemMatch": {"$and": [{"a": {"$gte": 100}}, {"state": {"$eq": "open"}}]}} +}; +assertFetchFilter({coll: coll, predicate: predicate, expectedFilter: fetchFilter, nReturned: 1}); + +// Index on $elemMatch predicate. Test that the index filter predicate is removed from the fetch +// filter while $elemMatch predicate is preserved. +assert.commandWorked(coll.createIndex( + {"array.a": 1}, {name: "array_a_1_state_open", "partialFilterExpression": {state: "open"}})); + +predicate = { + state: "open", + array: {$elemMatch: {$and: [{a: {$gte: 100}}, {state: "open"}]}} +}; +fetchFilter = { + "array": {"$elemMatch": {"$and": [{"a": {"$gte": 100}}, {"state": {"$eq": "open"}}]}} +}; +assertFetchFilter({coll: coll, predicate: predicate, expectedFilter: fetchFilter, nReturned: 1}); + +// Test for index filter expression over nested field. +assert.commandWorked(coll.createIndex( + {"array.a": 1}, + {name: "array_a_1_array_state_open", "partialFilterExpression": {"array.state": "open"}})); + +cmdObj = { + find: coll.getName(), + filter: {$and: [{"array.a": {$gte: 100}}, {"array.state": "open"}]}, + projection: {_id: 0, array: 1} +}; +assertNoFetchFilter({coll: coll, cmdObj: cmdObj}); + +// Tests that the query predicate is not removed if it is a subset of an $or index filter. +assert.commandWorked(coll.createIndex( + {a: 1}, {name: "a_1_or", "partialFilterExpression": {$or: [{b: {$gte: 80}}, {flag: "true"}]}})); + +predicate = { + $and: [{a: {$gte: 75}}, {b: {$gte: 80}}] +}; +fetchFilter = { + "b": {"$gte": 80} +}; +assertFetchFilter({coll: coll, predicate: predicate, expectedFilter: fetchFilter, nReturned: 23}); + +// Possible optimization: the following query could use a bounded index scan on 'a' and remove the +// $or sub-predicate as it is covered by the partial index filter. Currently, the index is not +// considered and a collection scan is used instead. +const exp = + coll.find({$and: [{a: {$gte: 90}}, {$or: [{b: {$gte: 80}}, {flag: "true"}]}]}).explain(); +assert(isCollscan(db, exp), + "Expected collection scan, got " + tojson(getWinningPlan(exp.queryPlanner))); diff --git a/jstests/core/index/sparse_index_internal_expr.js b/jstests/core/index/sparse_index_internal_expr.js index 9ac47a5c1006f..2f1c6f58a2064 100644 --- a/jstests/core/index/sparse_index_internal_expr.js +++ b/jstests/core/index/sparse_index_internal_expr.js @@ -5,13 +5,11 @@ * * @tags: [ * multiversion_incompatible, + * does_not_support_transaction, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js"; const coll = db.sparse_index_internal_expr; coll.drop(); @@ -47,7 +45,8 @@ assert.docEq(res[0], {a: 1}); // Drop the non-sparse index and create a sparse index with the same key pattern. assert.commandWorked(coll.dropIndex("missing_1")); -assert.commandWorked(coll.createIndex({'missing': 1}, {'sparse': true})); +assert.commandWorked( + coll.createIndex({'missing': 1}, {'sparse': true, 'name': 'missing_1_sparse'})); // Run the same query to test that a COLLSCAN plan is used rather than an indexed plan. const collScans = @@ -58,13 +57,12 @@ assert.gt(collScans.length, 0, collScans); // Test that a sparse index can be hinted to answer $expr query but incomplete results in returned, // because the document is not indexed by the sparse index. -res = coll.find(exprQuery, {_id: 0}).hint("missing_1").toArray(); +res = coll.find(exprQuery, {_id: 0}).hint("missing_1_sparse").toArray(); assert.eq(res.length, 0); ixScans = getPlanStages( - getWinningPlan(coll.find(exprQuery).hint("missing_1").explain().queryPlanner), "IXSCAN"); + getWinningPlan(coll.find(exprQuery).hint("missing_1_sparse").explain().queryPlanner), "IXSCAN"); assert.gt(ixScans.length, 0, ixScans); -assert.eq("missing_1", ixScans[0].indexName, ixScans); -assert.eq(true, ixScans[0].isSparse, ixScans); -}()); +assert.eq("missing_1_sparse", ixScans[0].indexName, ixScans); +assert.eq(true, ixScans[0].isSparse, ixScans); \ No newline at end of file diff --git a/jstests/core/index/useindexonobjgtlt.js b/jstests/core/index/useindexonobjgtlt.js index 7b393de7c2fd7..bcdf36f3279f6 100644 --- a/jstests/core/index/useindexonobjgtlt.js +++ b/jstests/core/index/useindexonobjgtlt.js @@ -1,6 +1,6 @@ // @tags: [requires_fastcount] -t = db.factories; +let t = db.factories; t.drop(); t.insert({name: "xyz", metro: {city: "New York", state: "NY"}}); t.createIndex({metro: 1}); @@ -9,4 +9,4 @@ assert(db.factories.find().count()); assert.eq(1, db.factories.find({metro: {city: "New York", state: "NY"}}).hint({metro: 1}).count()); -assert.eq(1, db.factories.find({metro: {$gte: {city: "New York"}}}).hint({metro: 1}).count()); \ No newline at end of file +assert.eq(1, db.factories.find({metro: {$gte: {city: "New York"}}}).hint({metro: 1}).count()); diff --git a/jstests/core/index/wildcard/compound_wildcard_index_filter.js b/jstests/core/index/wildcard/compound_wildcard_index_filter.js index c9f2324c116d4..508be239c4d5c 100644 --- a/jstests/core/index/wildcard/compound_wildcard_index_filter.js +++ b/jstests/core/index/wildcard/compound_wildcard_index_filter.js @@ -10,10 +10,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/libs/wildcard_index_helpers.js"); +import {WildcardIndexHelpers} from "jstests/libs/wildcard_index_helpers.js"; /** * Utility function to find an index filter by keyPattern or index name in the given filterList. @@ -189,5 +186,4 @@ for (const cwiFilter of cwiFilterList) { for (const cwiFilter of cwiFilterList) { assertExpectedIndexAnswersQueryWithFilter( coll, cwiFilter.query, [cwiFilter.keyPattern], cwiFilter.query, cwiFilter.indexName); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/index/wildcard/compound_wildcard_index_hiding.js b/jstests/core/index/wildcard/compound_wildcard_index_hiding.js index 968baeff12308..b5cbfe68c41ab 100644 --- a/jstests/core/index/wildcard/compound_wildcard_index_hiding.js +++ b/jstests/core/index/wildcard/compound_wildcard_index_hiding.js @@ -5,16 +5,13 @@ * @tags: [ * not_allowed_with_security_token, * does_not_support_stepdowns, + * does_not_support_transaction, * featureFlagCompoundWildcardIndexes, * requires_fcv_70, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/wildcard_index_helpers.js"); +import {WildcardIndexHelpers} from "jstests/libs/wildcard_index_helpers.js"; const collectionName = "compound_wildcard_index_hiding"; const cwiList = [ @@ -50,17 +47,6 @@ const cwiList = [ }, ]; -function validateIndex(coll, indexSpec) { - const index = findIndex(coll, indexSpec); - assert.neq(null, index); - - if (indexSpec.hidden) { - assert.eq(true, index.hidden); - } else { - assert.neq(true, index.hidden); - } -} - function setIndexVisibilityByKeyPattern(collectionName, keyPattern, hidden) { assert.commandWorked(db.runCommand({collMod: collectionName, index: {keyPattern, hidden}})); } @@ -168,5 +154,4 @@ testCompoundWildcardIndexesHiding(cwiList, collectionName); ///////////////////////////////////////////////////////////////////////// // 3. Test that queries do not use hidden Compound Wildcard Indexes. -assertHiddenIndexesIsNotUsed(cwiList, collectionName); -})(); +assertHiddenIndexesIsNotUsed(cwiList, collectionName); \ No newline at end of file diff --git a/jstests/core/index/wildcard/compound_wildcard_index_hint.js b/jstests/core/index/wildcard/compound_wildcard_index_hint.js index 552b385f5bb79..e15059dc6bcfd 100644 --- a/jstests/core/index/wildcard/compound_wildcard_index_hint.js +++ b/jstests/core/index/wildcard/compound_wildcard_index_hint.js @@ -7,9 +7,7 @@ * ] */ -(function() { - -load("jstests/libs/wildcard_index_helpers.js"); +import {WildcardIndexHelpers} from "jstests/libs/wildcard_index_helpers.js"; const cwiList = [ // Note: 'wildcardProjection' cannot be specified if the wildcard field is not "$**". @@ -74,5 +72,4 @@ for (const testCase of cwiList) { const explain = assert.commandWorked( coll.find(testCase.query).hint(testCase.keyPattern).explain('executionStats')); WildcardIndexHelpers.assertExpectedIndexIsUsed(explain, testCase.indexName); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/index/wildcard/compound_wildcard_index_or.js b/jstests/core/index/wildcard/compound_wildcard_index_or.js index 1eea064888b1c..b479bd76fba39 100644 --- a/jstests/core/index/wildcard/compound_wildcard_index_or.js +++ b/jstests/core/index/wildcard/compound_wildcard_index_or.js @@ -11,11 +11,8 @@ * requires_fcv_70, * ] */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/utils.js"); // For arrayEq(). -load("jstests/libs/wildcard_index_helpers.js"); // For WildcardIndexHelpers. +load("jstests/aggregation/extras/utils.js"); // For arrayEq(). +import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js"; const documentList = [ { @@ -63,15 +60,15 @@ const idxResult = wild.aggregate(pipeline).toArray(); assertArrayEq({expected: documentList, actual: noIdxResult}); assertArrayEq({expected: noIdxResult, actual: idxResult}); -const explain = assert.commandWorked(wild.explain('executionStats').aggregate(pipeline)); +let explain = assert.commandWorked(wild.explain('executionStats').aggregate(pipeline)); // We want to make sure that the correct expanded CWI key pattern was used. The CWI, // {"str": -1, "obj.obj.obj.obj.$**": -1}, could be expanded internally to two key patterns: // 1) {"str": -1, "obj.obj.obj.obj.obj": -1} for predicates including "obj.obj.obj.obj.obj". // 2) {"str": -1, "$_path": -1} for queries only on the prefix field 'str'. // The latter key pattern should be used for the predicate with {"str": {$regex: /^Chicken/}}. -const winningPlan = getWinningPlan(explain.queryPlanner); -const planStages = getPlanStages(winningPlan, 'IXSCAN'); +let winningPlan = getWinningPlan(explain.queryPlanner); +let planStages = getPlanStages(winningPlan, 'IXSCAN'); let idxUsedCnt = 0; for (const stage of planStages) { @@ -79,10 +76,11 @@ for (const stage of planStages) { if (stage.indexName === "str_-1_obj.obj.obj.obj.$**_-1") { idxUsedCnt++; - // This key pattern should contain "$_path" rather than any specific field. const expectedKeyPattern = {"str": -1, "$_path": 1}; assert.eq(stage.keyPattern, expectedKeyPattern, stage); - assert.eq(stage.indexBounds["$_path"], ["[MinKey, MaxKey]"], stage); + // The index bounds of "$_path" should always be expanded to "all-value" bounds no matter + // whether the CWI's key pattern being expanded to a known field or not. + assert.eq(stage.indexBounds["$_path"], ["[MinKey, MinKey]", "[\"\", {})"], stage); } if (stage.indexName === "obj.obj.obj.$**_1") { idxUsedCnt++; @@ -99,5 +97,91 @@ for (const stage of planStages) { assert.eq(stage.indexBounds["obj.obj.obj.obj.obj"], ["[MinKey, MaxKey]"], stage); } } -assert.eq(idxUsedCnt, 2); -})(); +assert.eq(idxUsedCnt, 2, winningPlan); + +// Test that two different CWI can be used to answer a $or query. +const collTwoCWI = db[jsTestName() + "_wild_2"]; +const docs = [ + {num: 1, sub: {num: 1, str: 'aa'}, str: '1'}, + {num: 2, sub: {num: 2, str: 'bb'}, str: '2'}, + {num: 3, sub: {num: 3, str: 'cc'}, str: '3'}, +]; +collTwoCWI.drop(); +assert.commandWorked(collTwoCWI.insertMany(docs)); +assert.commandWorked(collTwoCWI.createIndexes([{num: 1, "sub.$**": 1}, {"sub.$**": 1, num: 1}])); + +explain = assert.commandWorked( + collTwoCWI.find({$or: [{num: {$gte: 1}}, {'sub.str': 'aa'}]}).explain("executionStats")); +winningPlan = getWinningPlan(explain.queryPlanner); +planStages = getPlanStages(winningPlan, 'IXSCAN'); + +idxUsedCnt = 0; +for (const stage of planStages) { + assert(stage.hasOwnProperty('indexName'), stage); + if (stage.indexName === "sub.$**_1_num_1") { + idxUsedCnt++; + + const expectedKeyPattern = {"$_path": 1, "sub.str": 1, "num": 1}; + assert.eq(stage.keyPattern, expectedKeyPattern, stage); + // The "$_path" field shouldn't be expanded because this CWI is wildcard-field-prefixed. + assert.eq(stage.indexBounds["$_path"], ["[\"sub.str\", \"sub.str\"]"], stage); + } + if (stage.indexName === "num_1_sub.$**_1") { + idxUsedCnt++; + + // The CWI used to answer a $or query should be expanded to a generic CWI with "$_path" + // field being the wildcard field. + const expectedKeyPattern = {"num": 1, "$_path": 1}; + assert.eq(stage.keyPattern, expectedKeyPattern, stage); + assert.eq(stage.indexBounds["num"], ["[1.0, inf.0]"], stage); + // The CWI used to answer a $or query should be expanded to include all paths and all keys + // for the wildcard field. + assert.eq(stage.indexBounds["$_path"], ["[MinKey, MinKey]", "[\"\", {})"], stage); + } +} +assert.eq(idxUsedCnt, 2, winningPlan); + +collTwoCWI.dropIndexes(); +assert.commandWorked(collTwoCWI.createIndexes([{num: 1, "sub.$**": 1}, {str: 1, "sub.$**": 1}])); + +// Test a filter with nested $and under a $or. +explain = assert.commandWorked( + collTwoCWI + .find({$or: [{$and: [{num: 1}, {"sub.num": {$gt: 4}}]}, {str: '1', "sub.num": {$lt: 10}}]}) + .explain("executionStats")); +winningPlan = getWinningPlan(explain.queryPlanner); +planStages = getPlanStages(winningPlan, 'IXSCAN'); + +idxUsedCnt = 0; +for (const stage of planStages) { + assert(stage.hasOwnProperty('indexName'), stage); + if (stage.indexName === "num_1_sub.$**_1") { + idxUsedCnt++; + + // If the IndexScan stage has a filter on field 'sub.num', then this CWI's key pattern + // cannot be overwritten. + if (stage.hasOwnProperty("filter") && stage["filter"].hasOwnProperty("sub.num")) { + const expectedKeyPattern = {"num": 1, "$_path": 1, "sub.num": 1}; + assert.eq(stage.keyPattern, expectedKeyPattern, stage); + } else { + const expectedKeyPattern = {"num": 1, "$_path": 1}; + assert.eq(stage.keyPattern, expectedKeyPattern, stage); + assert.eq(stage.indexBounds["$_path"], ["[MinKey, MinKey]", "[\"\", {})"], stage); + } + } + if (stage.indexName === "str_1_sub.$**_1") { + idxUsedCnt++; + + // If the IndexScan stage has a filter on field 'sub.num', then this CWI's key pattern + // cannot be overwritten. + if (stage.hasOwnProperty("filter") && stage["filter"].hasOwnProperty("sub.num")) { + const expectedKeyPattern = {"num": 1, "$_path": 1, "sub.num": 1}; + assert.eq(stage.keyPattern, expectedKeyPattern, stage); + } else { + const expectedKeyPattern = {"str": 1, "$_path": 1}; + assert.eq(stage.keyPattern, expectedKeyPattern, stage); + assert.eq(stage.indexBounds["$_path"], ["[MinKey, MinKey]", "[\"\", {})"], stage); + } + } +} +assert.eq(idxUsedCnt, 2, winningPlan); diff --git a/jstests/core/index/wildcard/compound_wildcard_index_prefix.js b/jstests/core/index/wildcard/compound_wildcard_index_prefix.js index 41e98fe74157b..68077ae55f68f 100644 --- a/jstests/core/index/wildcard/compound_wildcard_index_prefix.js +++ b/jstests/core/index/wildcard/compound_wildcard_index_prefix.js @@ -9,12 +9,9 @@ * requires_fcv_70, * ] */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/utils.js"); // For arrayEq(). -load("jstests/libs/analyze_plan.js"); // For getPlanStages(). -load("jstests/libs/wildcard_index_helpers.js"); // For WildcardIndexHelpers. +load("jstests/aggregation/extras/utils.js"); // For arrayEq(). +import {getWinningPlan, getPlanStages} from "jstests/libs/analyze_plan.js"; +import {WildcardIndexHelpers} from "jstests/libs/wildcard_index_helpers.js"; const coll = db.query_on_prefix_of_compound_wildcard_index; @@ -98,5 +95,4 @@ for (const query of supportedQueries) { const expected = coll.find(query).sort(sortOrder).hint({$natural: 1}).toArray(); assertArrayEq({actual, expected}); } -} -})(); +} \ No newline at end of file diff --git a/jstests/core/index/wildcard/compound_wildcard_index_unbounded.js b/jstests/core/index/wildcard/compound_wildcard_index_unbounded.js new file mode 100644 index 0000000000000..ed7d3399697ab --- /dev/null +++ b/jstests/core/index/wildcard/compound_wildcard_index_unbounded.js @@ -0,0 +1,55 @@ +/** + * Tests compound wildcard index with unbounded scans including multikey metadata entries doesn't + * cause any errors. + * + * @tags: [ + * featureFlagCompoundWildcardIndexes, + * requires_fcv_70, + * # explain does not support majority read concern + * assumes_read_concern_local, + * ] + */ +import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js"; + +const coll = db.compound_wildcard_index_unbounded; +coll.drop(); +const keyPattern = { + a: 1, + "$**": 1 +}; +const keyProjection = { + wildcardProjection: {a: 0} +}; +assert.commandWorked(coll.createIndex(keyPattern, keyProjection)); +assert.commandWorked(coll.insert({a: 1, b: 1})); +// Add an array field in order to make wildcard insert a multikey path metadata entry. +assert.commandWorked(coll.insert({b: [1, 2]})); + +const query = { + a: {$exists: true} +}; +const explain = coll.find(query).hint(keyPattern).explain('executionStats'); +const plan = getWinningPlan(explain.queryPlanner); +const ixscans = getPlanStages(plan, "IXSCAN"); +// Asserting that we have unbounded index scans on $_path so that multikey metadata will also be +// included in the scan. +assert.gt(ixscans.length, 0, explain); +ixscans.forEach(ixscan => { + assert.eq({a: 1, $_path: 1}, ixscan.keyPattern, explain); + assert.eq({a: ["[MinKey, MaxKey]"], $_path: ["[MinKey, MinKey]", "[\"\", {})"]}, + ixscan.indexBounds, + explain); +}); + +const assertNoIndexCorruption = (executionStats) => { + if (typeof executionStats === 'object') { + if ("executionSuccess" in executionStats) { + // The execution should succeed rather than spot any index corruption. + assert.eq(true, executionStats.executionSuccess, explain); + } + assert.eq(executionStats.nReturned, 1, executionStats); + } else if (Array.isArray(executionStats)) { + executionStats.forEach(stats => assertNoIndexCorruption(stats)); + } +}; +assertNoIndexCorruption(explain.executionStats); \ No newline at end of file diff --git a/jstests/core/index/wildcard/compound_wildcard_sort.js b/jstests/core/index/wildcard/compound_wildcard_sort.js index a9b8a0b8adb4f..acdab8dfe6c32 100644 --- a/jstests/core/index/wildcard/compound_wildcard_sort.js +++ b/jstests/core/index/wildcard/compound_wildcard_sort.js @@ -5,17 +5,19 @@ * # We may choose a different plan if other indexes are created, which would break the test. * assumes_no_implicit_index_creation, * assumes_read_concern_local, + * # Some expected query plans require the multi-planner to choose the optimal plan that uses a + * # more efficient CWI (non-generic). Sharded suites could mislead the multi-planner to choose a + * # worse CWI because the planner may not run sufficient trials if there's no enough docs in some + * # shard. + * assumes_unsharded_collection, * does_not_support_stepdowns, * featureFlagCompoundWildcardIndexes, * requires_fcv_70, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq(). -load("jstests/libs/analyze_plan.js"); // For getWinningPlan(), getPlanStages(). -load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection(). +import {getWinningPlan, getPlanStages} from "jstests/libs/analyze_plan.js"; +load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection(). const coll = db.compound_wildcard_sort; coll.drop(); @@ -177,22 +179,18 @@ function testIndexesForWildcardField(wildcardField, subFields) { const valid = getValidKeyPatternPrefixesForSort(keyPattern); for (const kp of valid) { - // CWI with regular prefix cannot provide blocking sort for sort orders containing the - // wildcard field. - if (!keyPattern.hasOwnProperty('pre')) { - { - // Test sort on compound fields + first wildcard field (number). - const sort = replaceFieldWith(kp, wildcardField, [subFields[0]]); - const wildFieldPred = {[subFields[0]]: {$lte: 43}}; - runSortTestForWildcardField({index: keyPattern, sort, wildFieldPred}); - } + { + // Test sort on compound fields + first wildcard field (number). + const sort = replaceFieldWith(kp, wildcardField, [subFields[0]]); + const wildFieldPred = {[subFields[0]]: {$lte: 43}}; + runSortTestForWildcardField({index: keyPattern, sort, wildFieldPred}); + } - { - // Test sort on compound fields + second wildcard field (string). - const sort = replaceFieldWith(kp, wildcardField, [subFields[1]]); - const wildFieldPred = {[subFields[1]]: {$gt: ""}}; - runSortTestForWildcardField({index: keyPattern, sort, wildFieldPred}); - } + { + // Test sort on compound fields + second wildcard field (string). + const sort = replaceFieldWith(kp, wildcardField, [subFields[1]]); + const wildFieldPred = {[subFields[1]]: {$gt: ""}}; + runSortTestForWildcardField({index: keyPattern, sort, wildFieldPred}); } { @@ -217,5 +215,4 @@ function testIndexesForWildcardField(wildcardField, subFields) { initializeDocs(); testIndexesForWildcardField("wild.$**", ["wild.num1", "wild.str1"]); -testIndexesForWildcardField("$**", ["num2", "str2"]); -})(); +testIndexesForWildcardField("$**", ["num2", "str2"]); \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_and_text_indexes.js b/jstests/core/index/wildcard/wildcard_and_text_indexes.js index c3cb8ef0bff7d..6ea4026a21155 100644 --- a/jstests/core/index/wildcard/wildcard_and_text_indexes.js +++ b/jstests/core/index/wildcard/wildcard_and_text_indexes.js @@ -6,13 +6,15 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. -load("jstests/libs/analyze_plan.js"); // For getPlanStages and planHasStage. -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" -load("jstests/libs/fixture_helpers.js"); // For isMongos. +import { + getWinningPlan, + getPlanStages, + getRejectedPlans, + planHasStage +} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; +load("jstests/libs/fixture_helpers.js"); // For isMongos. const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); @@ -78,9 +80,6 @@ if (allowCompoundWildcardIndexes) { assertWildcardQuery({_fts: {$gt: 0, $lt: 4}}, {'_fts': 1}, false /* isCompound */); if (allowCompoundWildcardIndexes) { - // The expanded CWI key pattern shouldn't have '_fts'. The query is a $and query and 'pre' field - // is the prefix of the CWI, so it's basically a query on the non-wildcard prefix field of a - // CWI. The only eligible expanded CWI is with key pattern {"pre": 1, "$_path": 1}. assertWildcardQuery({_fts: 10, pre: 1}, {'pre': 1, '$_path': 1}, true /* isCompound */); } @@ -128,5 +127,4 @@ for (let textIndex of [{'$**': 'text'}, {a: 1, '$**': 'text'}]) { // Drop the index so that a different text index can be created. assert.commandWorked(coll.dropIndex("textIndex")); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_and_with_not.js b/jstests/core/index/wildcard/wildcard_and_with_not.js index 1571ef56c94d6..1e3935e369976 100644 --- a/jstests/core/index/wildcard/wildcard_and_with_not.js +++ b/jstests/core/index/wildcard/wildcard_and_with_not.js @@ -12,15 +12,13 @@ * does_not_support_transactions, * featureFlagCompoundWildcardIndexes, * requires_fcv_70, + * references_foreign_collection, * ] */ -(function() { -'use strict'; - load("jstests/aggregation/extras/utils.js"); // For assertArrayEq. -load("jstests/libs/analyze_plan.js"); // For getWinningPlan(), getAggPlanStages(). -load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection(). +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; +load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection(). const documentList = [ { @@ -130,5 +128,4 @@ testAndMatches(false /* useCollScan */); // Create a compound wildcard index with obj.date as a suffix (always ineligible). assert.commandWorked(that.dropIndexes()); assert.commandWorked(that.createIndex({"obj.obj.obj.$**": 1, "obj.date": 1}, {})); -testAndMatches(true /* useCollScan */); -})(); +testAndMatches(true /* useCollScan */); \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_index_basic_index_bounds.js b/jstests/core/index/wildcard/wildcard_index_basic_index_bounds.js index c9ef6bfa50210..fd02c968030ef 100644 --- a/jstests/core/index/wildcard/wildcard_index_basic_index_bounds.js +++ b/jstests/core/index/wildcard/wildcard_index_basic_index_bounds.js @@ -7,14 +7,22 @@ * @tags: [ * assumes_balancer_off, * does_not_support_stepdowns, + * # Some expected index bounds require the multi-planner to choose the optimal plan that uses a + * # more efficient CWI (non-generic). Sharded suites could mislead the multi-planner to choose a + * # worse CWI because the planner may not run sufficient trials if there's no enough docs in some + * # shard. + * assumes_unsharded_collection, + * featureFlagCompoundWildcardIndexes, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getPlanStages. -load("jstests/libs/fixture_helpers.js"); // For isMongos and numberOfShardsForCollection. -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import { + getPlanStages, + getRejectedPlan, + getRejectedPlans, + getWinningPlan +} from "jstests/libs/analyze_plan.js"; +load("jstests/libs/fixture_helpers.js"); // For isMongos and numberOfShardsForCollection. +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; // Asserts that the given cursors produce identical result sets. function assertResultsEq(cursor1, cursor2) { @@ -35,17 +43,19 @@ const allowCompoundWildcardIndexes = // Template document which defines the 'schema' of the documents in the test collection. const templateDoc = { a: 0, - b: {c: 0, d: {e: 0}, f: {}} + b: {c: 0, d: {e: 0, g: 1}, f: {}, arr: [1]} }; const pathList = ['a', 'b.c', 'b.d.e', 'b.f']; // Insert a set of documents into the collection, based on the template document and populated // with an increasing sequence of values. This is to ensure that the range of values present for // each field in the dataset is not entirely homogeneous. -for (let i = 0; i < 10; i++) { +for (let i = 0; i < 100; i++) { (function populateDoc(doc, value) { for (let key in doc) { - if (typeof doc[key] === 'object') + if (Array.isArray(doc[key])) { + doc[key].push(value); + } else if (typeof doc[key] === 'object') value = populateDoc(doc[key], value); else doc[key] = value++; @@ -88,34 +98,39 @@ const operationList = [ // In principle we could have tighter bounds for this. See SERVER-36765. {expression: {$eq: null, $exists: true}, bounds: ['[MinKey, MaxKey]'], subpathBounds: true}, {expression: {$eq: []}, bounds: ['[undefined, undefined]', '[[], []]']}, - ]; // Operations for compound wildcard indexes. const operationListCompound = [ { query: {'a': 3, 'b.c': {$gte: 3}}, - bounds: {'a': ['[3.0, 3.0]'], '$_path': ['[MinKey, MaxKey]'], 'c': ['[MinKey, MaxKey]']}, - path: '$_path', - expectedKeyPattern: {'a': 1, '$_path': 1, 'c': 1} + bounds: {'a': ['[3.0, 3.0]'], 'b.c': ['[3.0, inf.0]'], 'c': ['[MinKey, MaxKey]']}, + path: 'b.c', + subpathBounds: false, + expectedKeyPattern: {'a': 1, '$_path': 1, 'b.c': 1, 'c': 1} }, { query: {'a': 3, 'b.c': {$gte: 3}, 'c': {$lt: 3}}, - bounds: {'a': ['[3.0, 3.0]'], '$_path': ['[MinKey, MaxKey]'], 'c': ['[MinKey, MaxKey]']}, - path: '$_path', - expectedKeyPattern: {'a': 1, '$_path': 1, 'c': 1} + bounds: {'a': ['[3.0, 3.0]'], 'b.c': ['[3.0, inf.0]'], 'c': ['[-inf.0, 3.0)']}, + path: 'b.c', + subpathBounds: false, + expectedKeyPattern: {'a': 1, '$_path': 1, 'b.c': 1, 'c': 1} }, { - query: {'a': 3, 'b.c': {$in: [1, 2]}}, - bounds: {'a': ['[3.0, 3.0]'], '$_path': ['[MinKey, MaxKey]'], 'c': ['[MinKey, MaxKey]']}, - path: '$_path', + query: {'a': 3, 'b.c': {$in: [1]}}, + bounds: {'a': ['[3.0, 3.0]'], 'b.c': ['[1.0, 1.0]'], 'c': ['[MinKey, MaxKey]']}, + path: 'b.c', subpathBounds: false, - expectedKeyPattern: {'a': 1, '$_path': 1, 'c': 1} + expectedKeyPattern: {'a': 1, '$_path': 1, 'b.c': 1, 'c': 1} }, { query: {'a': 3, 'b.c': {$exists: true}, 'c': {$lt: 3}}, - bounds: {'a': ['[3.0, 3.0]'], '$_path': ['[MinKey, MaxKey]'], 'c': ['[MinKey, MaxKey]']}, + bounds: { + 'a': ['[3.0, 3.0]'], + '$_path': ["[MinKey, MinKey]", "[\"\", {})"], + 'c': ['[MinKey, MaxKey]'] + }, path: '$_path', subpathBounds: false, expectedKeyPattern: {'a': 1, '$_path': 1, 'c': 1} @@ -304,14 +319,17 @@ function runCompoundWildcardIndexTest(keyPattern, pathProjection) { // Verify that the winning plan uses the compound wildcard index with the expected bounds. assert.eq(ixScans.length, FixtureHelpers.numberOfShardsForCollection(coll)); // Use "tojson()" in order to make ordering of fields matter. - assert.docEq(tojson(op.expectedKeyPattern), tojson(ixScans[0].keyPattern)); - assert.docEq(tojson(expectedBounds), tojson(ixScans[0].indexBounds)); + assert.docEq(tojson(op.expectedKeyPattern), tojson(ixScans[0].keyPattern), explainRes); + if (tojson(expectedBounds) != tojson(ixScans[0].indexBounds)) { + assert.docEq(expectedBounds, ixScans[0].indexBounds, explainRes); + } // Verify that the results obtained from the compound wildcard index are identical to a // COLLSCAN. We must explicitly hint the wildcard index, because we also sort on {_id: 1} to // ensure that both result sets are in the same order. assertResultsEq(coll.find(op.query).sort({_id: 1}).hint(keyPattern), - coll.find(op.query).sort({_id: 1}).hint({$natural: 1})); + coll.find(op.query).sort({_id: 1}).hint({$natural: 1}), + explainRes); } } @@ -338,4 +356,3 @@ runWildcardIndexTest({'$**': 1}, {a: 0, 'b.d': 0}, ['b.c', 'b.f']); // Test a compound wildcard index. runCompoundWildcardIndexTest({'a': 1, 'b.$**': 1, 'c': 1}, null); runCompoundWildcardIndexTest({'a': 1, '$**': 1, 'c': 1}, {'a': 0, 'c': 0}); -})(); diff --git a/jstests/core/index/wildcard/wildcard_index_cached_plans.js b/jstests/core/index/wildcard/wildcard_index_cached_plans.js index cc19b56a669c9..d9c961815bfbb 100644 --- a/jstests/core/index/wildcard/wildcard_index_cached_plans.js +++ b/jstests/core/index/wildcard/wildcard_index_cached_plans.js @@ -20,14 +20,17 @@ * assumes_no_implicit_index_creation, * ] */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); // For getPlanStage(). +import { + getPlanCacheKeyFromExplain, + getPlanCacheKeyFromShape, + getPlanStage, + getPlanStages, + getWinningPlan, +} from "jstests/libs/analyze_plan.js"; load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection. -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load('jstests/libs/fixture_helpers.js'); // For getPrimaryForNodeHostingDatabase and isMongos. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const coll = db.wildcard_cached_plans; @@ -192,4 +195,3 @@ for (const indexSpec of wildcardIndexes) { getPlanCacheKeyFromExplain(queryUnindexedExplain, db)); } } -})(); diff --git a/jstests/core/index/wildcard/wildcard_index_collation.js b/jstests/core/index/wildcard/wildcard_index_collation.js index 29be78dbc3579..e65e4c3327da5 100644 --- a/jstests/core/index/wildcard/wildcard_index_collation.js +++ b/jstests/core/index/wildcard/wildcard_index_collation.js @@ -12,12 +12,9 @@ * requires_non_retryable_writes, * ] */ -(function() { -"user strict"; - -load("jstests/aggregation/extras/utils.js"); // For arrayEq. -load("jstests/libs/analyze_plan.js"); // For getPlanStages. -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +load("jstests/aggregation/extras/utils.js"); // For arrayEq. +import {getWinningPlan, getPlanStages, isIndexOnly} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load("jstests/libs/index_catalog_helpers.js"); // For IndexCatalogHelpers. load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection. load("jstests/libs/fixture_helpers.js"); // For isMongos. @@ -141,4 +138,3 @@ for (const indexSpec of wildcardIndexes) { coll = assertDropAndRecreateCollection( db, "wildcard_collation", {collation: {locale: "en_US", strength: 1}}); } -})(); diff --git a/jstests/core/index/wildcard/wildcard_index_count.js b/jstests/core/index/wildcard/wildcard_index_count.js index 6be792353bba3..646ccfef4c823 100644 --- a/jstests/core/index/wildcard/wildcard_index_count.js +++ b/jstests/core/index/wildcard/wildcard_index_count.js @@ -8,11 +8,8 @@ // assumes_unsharded_collection, // does_not_support_stepdowns, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {getAggPlanStage, getPlanStage, isCollscan} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const coll = db.wildcard_index_count; coll.drop(); @@ -120,4 +117,3 @@ for (const indexSpec of wildcardIndexes) { assert.commandWorked(coll.dropIndex(indexSpec.keyPattern)); } -}()); diff --git a/jstests/core/index/wildcard/wildcard_index_covered_queries.js b/jstests/core/index/wildcard/wildcard_index_covered_queries.js index 7cade95f88fc9..cfb62203e0217 100644 --- a/jstests/core/index/wildcard/wildcard_index_covered_queries.js +++ b/jstests/core/index/wildcard/wildcard_index_covered_queries.js @@ -10,12 +10,9 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. -load("jstests/libs/analyze_plan.js"); // For getPlanStages and isIndexOnly. -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {getWinningPlan, getPlanStages, isIndexOnly} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const assertArrayEq = (l, r) => assert(arrayEq(l, r)); @@ -109,5 +106,4 @@ for (const indexSpec of wildcardIndexes) { // Verify that predicates which produce inexact-fetch bounds are not covered by a $** index. assertWildcardProvidesCoveredSolution( {d: {$elemMatch: {$eq: 50}}}, {_id: 0, d: 1}, shouldFailToCover); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_index_dedup.js b/jstests/core/index/wildcard/wildcard_index_dedup.js index da27f6d8f7873..accb038d5c79b 100644 --- a/jstests/core/index/wildcard/wildcard_index_dedup.js +++ b/jstests/core/index/wildcard/wildcard_index_dedup.js @@ -9,10 +9,7 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const coll = db.wildcard_index_dedup; coll.drop(); @@ -50,5 +47,4 @@ if (allowCompoundWildcardIndexes) { // Test compound wildcard indexes do not return duplicates. assert.eq(1, coll.find({"a.c": {$exists: true}, post: 1}).hint(compoundKeyPattern).itcount()); assert.eq(1, coll.find({"a.h": {$exists: true}, post: 1}).hint(compoundKeyPattern).itcount()); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_index_distinct_scan.js b/jstests/core/index/wildcard/wildcard_index_distinct_scan.js index 6d578a8fae385..407ffdf657e40 100644 --- a/jstests/core/index/wildcard/wildcard_index_distinct_scan.js +++ b/jstests/core/index/wildcard/wildcard_index_distinct_scan.js @@ -6,12 +6,9 @@ * no_selinux, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. -load("jstests/libs/analyze_plan.js"); // For planHasStage and getPlanStages. -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {getWinningPlan, getPlanStages, planHasStage} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); @@ -224,5 +221,4 @@ for (let testCase of testCases) { expectedResults: distinctValues, expectedPath: null }); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_index_dup_predicates.js b/jstests/core/index/wildcard/wildcard_index_dup_predicates.js index 5ec6e6d1a6f8c..9afe35dd2811e 100644 --- a/jstests/core/index/wildcard/wildcard_index_dup_predicates.js +++ b/jstests/core/index/wildcard/wildcard_index_dup_predicates.js @@ -5,11 +5,8 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getPlanStages. -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const coll = db.wildcard_index_dup_predicates; coll.drop(); @@ -27,9 +24,9 @@ const allowCompoundWildcardIndexes = // Inserts the given document and runs the given query to confirm that: // (1) query matches the given document // (2) the winning plan does a wildcard index scan -function assertExpectedDocAnswersWildcardIndexQuery(doc, query, match, excludeCWI = false) { +function assertExpectedDocAnswersWildcardIndexQuery(doc, query, match) { for (const indexSpec of wildcardIndexes) { - if ((!allowCompoundWildcardIndexes || excludeCWI) && indexSpec.wildcardProjection) { + if (!allowCompoundWildcardIndexes && indexSpec.wildcardProjection) { continue; } coll.drop(); @@ -77,12 +74,11 @@ assertExpectedDocAnswersWildcardIndexQuery( {a: {b: "foo"}}, {$and: [{a: {$gt: {}}}, {a: {$gt: {}}}, {"a.b": "foo"}]}, true); assertExpectedDocAnswersWildcardIndexQuery( - {a: {b: "foo"}}, {$and: [{a: {$ne: 3}}, {a: {$ne: 3}}, {"a.b": "foo"}]}, true, true); + {a: {b: "foo"}}, {$and: [{a: {$ne: 3}}, {a: {$ne: 3}}, {"a.b": "foo"}]}, true); assertExpectedDocAnswersWildcardIndexQuery( {a: {b: "foo"}}, {$and: [{a: {$nin: [3, 4, 5]}}, {a: {$nin: [3, 4, 5]}}, {"a.b": "foo"}]}, - true, true); assertExpectedDocAnswersWildcardIndexQuery( @@ -92,4 +88,3 @@ assertExpectedDocAnswersWildcardIndexQuery( {a: {b: "foo"}}, {$and: [{a: {$elemMatch: {$gt: {}}}}, {a: {$elemMatch: {$gt: {}}}}, {"a.b": "foo"}]}, false); -})(); diff --git a/jstests/core/index/wildcard/wildcard_index_empty_arrays.js b/jstests/core/index/wildcard/wildcard_index_empty_arrays.js index 2471ad1d40f42..e3b7e0b5bb2fd 100644 --- a/jstests/core/index/wildcard/wildcard_index_empty_arrays.js +++ b/jstests/core/index/wildcard/wildcard_index_empty_arrays.js @@ -5,11 +5,8 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const coll = db.wildcard_empty_arrays; coll.drop(); @@ -57,5 +54,4 @@ for (const indexSpec of wildcardIndexes) { // $** index matches empty array nested within an array. assertArrayEq(coll.find({"b": []}, {_id: 0}).hint(indexSpec.keyPattern).toArray(), [{a: 2, b: [[]], c: 1, d: 4}]); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_index_equality_to_empty_obj.js b/jstests/core/index/wildcard/wildcard_index_equality_to_empty_obj.js index 861ecca09646b..b6b551759b44e 100644 --- a/jstests/core/index/wildcard/wildcard_index_equality_to_empty_obj.js +++ b/jstests/core/index/wildcard/wildcard_index_equality_to_empty_obj.js @@ -5,10 +5,7 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const coll = db.wildcard_index_equality_to_empty_obj; coll.drop(); @@ -100,5 +97,4 @@ for (const indexSpec of wildcardIndexes) { assert.eq(results, coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).hint({$natural: 1}).toArray()); assert.eq(results, coll.find({"a.b": {$eq: {}}}, {_id: 1}).sort({_id: 1}).toArray()); -} -}()); +} \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_index_filter.js b/jstests/core/index/wildcard/wildcard_index_filter.js index 879e245c42247..d9badc369a0dc 100644 --- a/jstests/core/index/wildcard/wildcard_index_filter.js +++ b/jstests/core/index/wildcard/wildcard_index_filter.js @@ -10,10 +10,8 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; +import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js"; -load("jstests/libs/analyze_plan.js"); load("jstests/libs/fixture_helpers.js"); // For 'isMongos()'. const coll = db.wildcard_index_filter; @@ -111,5 +109,4 @@ const indexAWildcard = { assert.commandWorked(coll.createIndex(indexAWildcard)); // Filtering on a path specified $** index. Check that the $** is used over other indices. -assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexAWildcard], {a: "a"}, "a.$**_1"); -})(); +assertExpectedIndexAnswersQueryWithFilter({a: "a"}, [indexAWildcard], {a: "a"}, "a.$**_1"); \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_index_hint.js b/jstests/core/index/wildcard/wildcard_index_hint.js index 91e1cae3c5c02..2665f59b1e5d4 100644 --- a/jstests/core/index/wildcard/wildcard_index_hint.js +++ b/jstests/core/index/wildcard/wildcard_index_hint.js @@ -5,11 +5,8 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. -load("jstests/libs/analyze_plan.js"); // For getPlanStages. +import {getWinningPlan, getPlanStages} from "jstests/libs/analyze_plan.js"; const coll = db.wildcard_hint; coll.drop(); @@ -97,5 +94,4 @@ assertExpectedIndexAnswersQueryWithHint( // Hint a $** index by name. assertExpectedIndexAnswersQueryWithHint( - {"a": 1}, "$**_1", "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]); -})(); + {"a": 1}, "$**_1", "$**_1", [{a: 1, b: 1, c: {d: 1, e: 1}}, {a: 1, b: 2, c: {d: 2, e: 2}}]); \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_index_minmax.js b/jstests/core/index/wildcard/wildcard_index_minmax.js index f1ee35669ef03..ecd13ecdf9f27 100644 --- a/jstests/core/index/wildcard/wildcard_index_minmax.js +++ b/jstests/core/index/wildcard/wildcard_index_minmax.js @@ -5,11 +5,8 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const coll = db.wildcard_index_minmax; coll.drop(); @@ -103,5 +100,4 @@ for (const indexSpec of wildcardIndexes) { // $** index does not interfere with valid min/max. assertArrayEq(coll.find({}, {_id: 0}).min({"a": 0.5}).max({"a": 1.5}).hint({a: 1}).toArray(), [{a: 1, b: 1}, {a: 1, b: 2}]); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_index_multikey.js b/jstests/core/index/wildcard/wildcard_index_multikey.js index e91ea7223e4ff..b7d87fb324b2c 100644 --- a/jstests/core/index/wildcard/wildcard_index_multikey.js +++ b/jstests/core/index/wildcard/wildcard_index_multikey.js @@ -6,12 +6,9 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq. -load("jstests/libs/analyze_plan.js"); // For getPlanStages. -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {getWinningPlan, getPlanStages} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const assertArrayEq = (l, r) => assert(arrayEq(l, r), tojson(l) + " != " + tojson(r)); @@ -387,4 +384,3 @@ for (const indexSpec of wildcardIndexes) { assertWildcardQuery({"a.3.4": {$exists: true}}, null, {"executionStats.nReturned": 0}); assertWildcardQuery({"a.3.4.b": {$exists: true}}, null, {"executionStats.nReturned": 0}); } -}()); diff --git a/jstests/core/index/wildcard/wildcard_index_nonblocking_sort.js b/jstests/core/index/wildcard/wildcard_index_nonblocking_sort.js index 7aa328d4e3ece..ac894b968fe20 100644 --- a/jstests/core/index/wildcard/wildcard_index_nonblocking_sort.js +++ b/jstests/core/index/wildcard/wildcard_index_nonblocking_sort.js @@ -3,13 +3,10 @@ // assumes_read_concern_local, // does_not_support_stepdowns, // ] -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq(). -load("jstests/libs/analyze_plan.js"); // For getPlanStages(). -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" -load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection(). +import {getWinningPlan, getPlanStages} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; +load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection(). // TODO SERVER-68303: Remove the feature flag and update corresponding tests. const allowCompoundWildcardIndexes = @@ -114,5 +111,4 @@ if (allowCompoundWildcardIndexes) { runSortTests(dir, proj, {a: dir, excludedField: dir}, true /* isCompound */); } } -} -})(); +} \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_index_partial_index.js b/jstests/core/index/wildcard/wildcard_index_partial_index.js index d07b03e919848..220399f428425 100644 --- a/jstests/core/index/wildcard/wildcard_index_partial_index.js +++ b/jstests/core/index/wildcard/wildcard_index_partial_index.js @@ -5,11 +5,8 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For isIxScan, isCollscan. -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {getWinningPlan, isCollscan, isIxscan} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const coll = db.wildcard_partial_index; @@ -86,4 +83,3 @@ for (let i = 0; i < 2; ++i) { // should match the document in the collection (but would fail to match if it incorrectly indexed // the $eq:null predicate using the wildcard index). assert.eq(1, coll.find({x: 1, y: null}).itcount()); -})(); diff --git a/jstests/core/index/wildcard/wildcard_index_projection.js b/jstests/core/index/wildcard/wildcard_index_projection.js index b21ffa74afd34..d33a3f6d9bec4 100644 --- a/jstests/core/index/wildcard/wildcard_index_projection.js +++ b/jstests/core/index/wildcard/wildcard_index_projection.js @@ -8,11 +8,8 @@ * ] */ -(function() { -"use strict"; - load("jstests/libs/fixture_helpers.js"); // For isMongos. -load("jstests/libs/analyze_plan.js"); // For getRejectedPlan helper to analyze explain() output. +import {getWinningPlan, getRejectedPlan} from "jstests/libs/analyze_plan.js"; const collName = jsTestName(); const coll = db[collName]; @@ -66,5 +63,4 @@ assert.eq(winningPlan.inputStage.keyPattern, {$_path: 1, _id: 1}, winningPlan.in // Test that the results are correct. const hintedResults = coll.find({_id: {$eq: 1}}).hint("$**_1").toArray(); assert.eq(hintedResults.length, 1, hintedResults); -assert.eq(hintedResults[0]._id, 1, hintedResults); -})(); +assert.eq(hintedResults[0]._id, 1, hintedResults); \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_index_type.js b/jstests/core/index/wildcard/wildcard_index_type.js index 205a318bd92e3..966fbac59003c 100644 --- a/jstests/core/index/wildcard/wildcard_index_type.js +++ b/jstests/core/index/wildcard/wildcard_index_type.js @@ -5,11 +5,8 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getPlanStages. -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const coll = db.wildcard_index_type; coll.drop(); @@ -175,4 +172,3 @@ assertExpectedDocAnswersWildcardIndexQuery({a: new Date()}, {a: {$type: "date"}} // A $type of 'timestamp' won't match a date value. assertExpectedDocAnswersWildcardIndexQuery({a: new Date()}, {a: {$type: "timestamp"}}, false); -})(); diff --git a/jstests/core/index/wildcard/wildcard_index_update.js b/jstests/core/index/wildcard/wildcard_index_update.js index 9f0353ea7b27f..6964243a26320 100644 --- a/jstests/core/index/wildcard/wildcard_index_update.js +++ b/jstests/core/index/wildcard/wildcard_index_update.js @@ -4,13 +4,11 @@ * @tags: [ * requires_fcv_63, * does_not_support_stepdowns, + * uses_full_validation, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const collName = jsTestName(); const coll = db[collName]; @@ -51,5 +49,4 @@ assert.commandWorked(coll.update({_id: 0}, {$set: {"pre": 1}})); validate(); assert.commandWorked(coll.update({_id: 0}, {$set: {"other": 1}})); -validate(); -})(); +validate(); \ No newline at end of file diff --git a/jstests/core/index/wildcard/wildcard_index_validindex.js b/jstests/core/index/wildcard/wildcard_index_validindex.js index 3a694e524eeea..c410fa9d024c3 100644 --- a/jstests/core/index/wildcard/wildcard_index_validindex.js +++ b/jstests/core/index/wildcard/wildcard_index_validindex.js @@ -4,14 +4,12 @@ * # Uses index building in background * requires_background_index, * does_not_support_stepdowns, + * does_not_support_transactions, * ] */ -(function() { -"use strict"; - load("jstests/libs/index_catalog_helpers.js"); // For "IndexCatalogHelpers." load("jstests/libs/collection_drop_recreate.js"); // For "assertDropCollection." -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const kCollectionName = "wildcard_validindex"; const coll = db.getCollection(kCollectionName); @@ -32,12 +30,6 @@ IndexCatalogHelpers.createIndexAndVerifyWithDrop(coll, {"a.$**": 1}, {name: kInd IndexCatalogHelpers.createIndexAndVerifyWithDrop( coll, {"$**": 1}, {name: kIndexName, partialFilterExpression: {a: {"$gt": 0}}}); -// Can create a wildcard index with foreground & background construction. -IndexCatalogHelpers.createIndexAndVerifyWithDrop( - coll, {"$**": 1}, {background: false, name: kIndexName}); -IndexCatalogHelpers.createIndexAndVerifyWithDrop( - coll, {"$**": 1}, {background: true, name: kIndexName}); - // Can create a wildcard index with index level collation. IndexCatalogHelpers.createIndexAndVerifyWithDrop( coll, {"$**": 1}, {collation: {locale: "fr"}, name: kIndexName}); @@ -173,5 +165,4 @@ assert.commandFailedWithCode( 40414); // Need to specify 'unique'. assert.commandFailedWithCode( db.runCommand({create: clusteredCollName, clusteredIndex: {key: {"$**": 1}, unique: true}}), - ErrorCodes.InvalidIndexSpecificationOption); -})(); + ErrorCodes.InvalidIndexSpecificationOption); \ No newline at end of file diff --git a/jstests/core/json1.js b/jstests/core/json1.js index 731bef9fcdcf6..4f026e7ba32f5 100644 --- a/jstests/core/json1.js +++ b/jstests/core/json1.js @@ -1,8 +1,6 @@ -x = { - quotes: "a\"b", - nulls: null -}; +let x = {quotes: "a\"b", nulls: null}; +let y; eval("y = " + tojson(x)); assert.eq(tojson(x), tojson(y), "A"); assert.eq(typeof (x.nulls), typeof (y.nulls), "B"); @@ -79,4 +77,4 @@ x = { assert.eq( JSON.stringify(x), - '{"data_binary":{"$binary":"VG8gYmUgb3Igbm90IHRvIGJlLi4uIFRoYXQgaXMgdGhlIHF1ZXN0aW9uLg==","$type":"00"},"data_timestamp":{"$timestamp":{"t":987654321,"i":0}},"data_regex":{"$regex":"^acme","$options":"i"},"data_oid":{"$oid":"579a70d9e249393f153b5bc1"},"data_ref":{"$ref":"test","$id":"579a70d9e249393f153b5bc1"},"data_minkey":{"$minKey":1},"data_maxkey":{"$maxKey":1},"data_numberlong":{"$numberLong":"12345"},"data_numberint":5,"data_numberdecimal":{"$numberDecimal":"3.14000000000000"}}'); \ No newline at end of file + '{"data_binary":{"$binary":"VG8gYmUgb3Igbm90IHRvIGJlLi4uIFRoYXQgaXMgdGhlIHF1ZXN0aW9uLg==","$type":"00"},"data_timestamp":{"$timestamp":{"t":987654321,"i":0}},"data_regex":{"$regex":"^acme","$options":"i"},"data_oid":{"$oid":"579a70d9e249393f153b5bc1"},"data_ref":{"$ref":"test","$id":"579a70d9e249393f153b5bc1"},"data_minkey":{"$minKey":1},"data_maxkey":{"$maxKey":1},"data_numberlong":{"$numberLong":"12345"},"data_numberint":5,"data_numberdecimal":{"$numberDecimal":"3.14000000000000"}}'); diff --git a/jstests/core/json_schema/json_schema.js b/jstests/core/json_schema/json_schema.js index c1e063444989f..0b3c01fb5e6b1 100644 --- a/jstests/core/json_schema/json_schema.js +++ b/jstests/core/json_schema/json_schema.js @@ -7,11 +7,8 @@ /** * Tests for JSON Schema document validation. */ -(function() { -"use strict"; - load("jstests/libs/assert_schema_match.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const isSBEEnabled = checkSBEEnabled(db); @@ -343,5 +340,4 @@ if (!isSBEEnabled) { assert.eq(1, coll.find({$_internalSchemaMaxProperties: 3, b: 2}).itcount()); } assert.eq(1, coll.find({$alwaysTrue: 1, b: 2}).itcount()); -assert.eq(0, coll.find({$alwaysFalse: 1, b: 2}).itcount()); -}()); +assert.eq(0, coll.find({$alwaysFalse: 1, b: 2}).itcount()); \ No newline at end of file diff --git a/jstests/core/json_schema/misc_validation.js b/jstests/core/json_schema/misc_validation.js index 4821c0643f98c..7e95ed729ecfe 100644 --- a/jstests/core/json_schema/misc_validation.js +++ b/jstests/core/json_schema/misc_validation.js @@ -19,6 +19,7 @@ * requires_replication, * # This test depends on hardcoded database name equality. * tenant_migration_incompatible, + * references_foreign_collection, * ] */ (function() { @@ -38,8 +39,6 @@ assert.commandWorked(testDB.createCollection(testName)); const coll = testDB.getCollection(testName); coll.drop(); -const isMongos = (testDB.runCommand("hello").msg === "isdbgrid"); - // Test that $jsonSchema is rejected in an $elemMatch projection. assert.throws(function() { coll.find({}, {a: {$elemMatch: {$jsonSchema: {}}}}).itcount(); diff --git a/jstests/core/jssymbol.js b/jstests/core/jssymbol.js index 714ea48870a2e..1f805feac0c38 100644 --- a/jstests/core/jssymbol.js +++ b/jstests/core/jssymbol.js @@ -2,6 +2,8 @@ // // @tags: [ // no_selinux, +// # TODO SERVER-77024 enable on sharded passthrough suites when orphans hook will be supported +// assumes_unsharded_collection, // ] (function() { @@ -16,9 +18,9 @@ assert(db[Symbol.species] != 1); assert(db[Symbol.toPrimitive] != 1); // Exercise Symbol.toPrimitive on BSON objects -col1 = db.jssymbol_col; +let col1 = db.jssymbol_col; col1.insert({}); -a = db.getCollection("jssymbol_col").getIndexes()[0]; +let a = db.getCollection("jssymbol_col").getIndexes()[0]; assert(isNaN(+a)); assert(+a.v >= 1); diff --git a/jstests/core/latch_analyzer.js b/jstests/core/latch_analyzer.js index 73aa652c6c181..96c20d9c32083 100644 --- a/jstests/core/latch_analyzer.js +++ b/jstests/core/latch_analyzer.js @@ -1,7 +1,7 @@ /** * Verify that the LatchAnalyzer is working to expectations * - * @tags: [multiversion_incompatible, no_selinux] + * @tags: [multiversion_incompatible, no_selinux, requires_latch_analyzer] */ (function() { diff --git a/jstests/core/loadserverscripts.js b/jstests/core/loadserverscripts.js index db2e15fb05bbd..4dfed2188ffdf 100644 --- a/jstests/core/loadserverscripts.js +++ b/jstests/core/loadserverscripts.js @@ -5,6 +5,9 @@ // uses_parallel_shell, // # This test has statements that do not support non-local read concern. // does_not_support_causal_consistency, +// # DB.prototype.loadServerScripts does not behave as expected in module mode, and the SELinux +// # test runner loads scripts with dynamic load. +// no_selinux // ] // Test db.loadServerScripts() diff --git a/jstests/core/logprocessdetails.js b/jstests/core/logprocessdetails.js index 4c72f802262f5..fe9c8a99b1c75 100644 --- a/jstests/core/logprocessdetails.js +++ b/jstests/core/logprocessdetails.js @@ -14,7 +14,7 @@ * Checks an array for match against regex. * Returns true if regex matches a string in the array */ -doesLogMatchRegex = function(logArray, regex) { +let doesLogMatchRegex = function(logArray, regex) { for (var i = (logArray.length - 1); i >= 0; i--) { var regexInLine = regex.exec(logArray[i]); if (regexInLine != null) { @@ -24,7 +24,7 @@ doesLogMatchRegex = function(logArray, regex) { return false; }; -doTest = function() { +let doTest = function() { var log = db.adminCommand({getLog: 'global'}); // this regex will need to change if output changes var re = new RegExp(".*conn.*options.*"); diff --git a/jstests/core/mod_overflow.js b/jstests/core/mod_overflow.js index 77f0a68ef3b74..8481988b4038b 100644 --- a/jstests/core/mod_overflow.js +++ b/jstests/core/mod_overflow.js @@ -42,4 +42,4 @@ for (let divisor of [-1.0, NumberInt("-1"), NumberLong("-1"), NumberDecimal("-1" .aggregate([{$project: {val: 1, modVal: {$mod: ["$val", divisor]}}}, {$sort: {_id: 1}}]) .toArray()); } -})(); \ No newline at end of file +})(); diff --git a/jstests/core/mr_single_reduce.js b/jstests/core/mr_single_reduce.js index 371837e92ce4b..779f2f650e7c2 100644 --- a/jstests/core/mr_single_reduce.js +++ b/jstests/core/mr_single_reduce.js @@ -1,5 +1,7 @@ // The test runs commands that are not allowed with security token: mapReduce. // @tags: [ +// # Step-down can cause mapReduce to fail. +// does_not_support_stepdowns, // not_allowed_with_security_token, // # Uses mapReduce command. // requires_scripting, diff --git a/jstests/core/multi.js b/jstests/core/multi.js index ac961ed7a9c69..58cb327f86f0e 100644 --- a/jstests/core/multi.js +++ b/jstests/core/multi.js @@ -1,4 +1,4 @@ -t = db.jstests_multi; +let t = db.jstests_multi; t.drop(); t.createIndex({a: 1}); diff --git a/jstests/core/multi2.js b/jstests/core/multi2.js index 64473e4de987f..cf9dd924d5163 100644 --- a/jstests/core/multi2.js +++ b/jstests/core/multi2.js @@ -1,6 +1,6 @@ // @tags: [requires_fastcount] -t = db.multi2; +let t = db.multi2; t.drop(); t.save({x: 1, a: [1]}); diff --git a/jstests/core/notablescan.js b/jstests/core/notablescan.js index f9addc7809918..ff9a0b91f2d6b 100644 --- a/jstests/core/notablescan.js +++ b/jstests/core/notablescan.js @@ -16,7 +16,7 @@ // tenant_migration_incompatible, // ] -t = db.test_notablescan; +let t = db.test_notablescan; t.drop(); try { diff --git a/jstests/core/notablescan_capped.js b/jstests/core/notablescan_capped.js index cc7405ff88bae..43ac4d43f945f 100644 --- a/jstests/core/notablescan_capped.js +++ b/jstests/core/notablescan_capped.js @@ -17,14 +17,14 @@ // tenant_migration_incompatible, // ] -t = db.test_notablescan_capped; +let t = db.test_notablescan_capped; t.drop(); assert.commandWorked(db.createCollection(t.getName(), {capped: true, size: 100})); try { assert.commandWorked(db._adminCommand({setParameter: 1, notablescan: true})); - err = assert.throws(function() { + let err = assert.throws(function() { t.find({a: 1}).tailable(true).next(); }); assert.includes(err.toString(), "tailable"); diff --git a/jstests/core/opcounters_write_cmd.js b/jstests/core/opcounters_write_cmd.js index 6c604a9348a36..e3ec59863fa27 100644 --- a/jstests/core/opcounters_write_cmd.js +++ b/jstests/core/opcounters_write_cmd.js @@ -4,6 +4,10 @@ // not_allowed_with_security_token, // uses_multiple_connections, // assumes_standalone_mongod, +// # The config fuzzer may run logical session cache refreshes in the background, which modifies +// # some serverStatus metrics read in this test. +// does_not_support_config_fuzzer, +// inspects_command_opcounters, // does_not_support_repeated_reads, // ] diff --git a/jstests/core/operation_latency_histogram.js b/jstests/core/operation_latency_histogram.js index cd220deae48ba..703d5da8f6bd7 100644 --- a/jstests/core/operation_latency_histogram.js +++ b/jstests/core/operation_latency_histogram.js @@ -27,6 +27,7 @@ (function() { "use strict"; +load("jstests/libs/fixture_helpers.js"); load("jstests/libs/stats.js"); var name = "operationalLatencyHistogramTest"; @@ -145,8 +146,7 @@ lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); // Reindex (Only standalone mode supports the reIndex command.) const hello = db.runCommand({hello: 1}); -const isMongos = (hello.msg === "isdbgrid"); -const isStandalone = !isMongos && !hello.hasOwnProperty('setName'); +const isStandalone = !FixtureHelpers.isMongos(db) && !hello.hasOwnProperty('setName'); if (isStandalone) { assert.commandWorked(testColl.reIndex()); lastHistogram = assertHistogramDiffEq(testColl, lastHistogram, 0, 0, 1); diff --git a/jstests/core/optimized_match_explain.js b/jstests/core/optimized_match_explain.js index bf4893519bdd9..dd5216a79bd7a 100644 --- a/jstests/core/optimized_match_explain.js +++ b/jstests/core/optimized_match_explain.js @@ -5,9 +5,7 @@ /** * Tests that the explain output for $match reflects any optimizations. */ -(function() { -"use strict"; -load("jstests/libs/analyze_plan.js"); +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; const coll = db.match_explain; coll.drop(); @@ -23,5 +21,4 @@ let explain = coll.explain().aggregate( [{$sort: {b: -1}}, {$addFields: {c: {$mod: ["$a", 4]}}}, {$match: {$and: [{c: 1}]}}]); assert.commandWorked(explain); -assert.eq(getAggPlanStage(explain, "$match"), {$match: {c: {$eq: 1}}}); -}()); +assert.eq(getAggPlanStage(explain, "$match"), {$match: {c: {$eq: 1}}}); \ No newline at end of file diff --git a/jstests/core/partialFilterExpression_with_geoWithin.js b/jstests/core/partialFilterExpression_with_geoWithin.js index f3ce022636b63..4cc244b4e7ad9 100644 --- a/jstests/core/partialFilterExpression_with_geoWithin.js +++ b/jstests/core/partialFilterExpression_with_geoWithin.js @@ -1,10 +1,8 @@ // @tags: [requires_non_retryable_writes, requires_fcv_51] -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/feature_flag_util.js"); +import {getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; -(function() { -"use strict"; const coll = db.partialFilterExpression_with_geoWithin; coll.drop(); @@ -166,4 +164,3 @@ if (FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) { // inside the limits of our polygon (or in other words, inside the UWS of Manhattan ). assert.eq(results.length, 1); } -})(); diff --git a/jstests/core/query/add_skip_stage_before_fetch.js b/jstests/core/query/add_skip_stage_before_fetch.js index bef29a795e6e6..4d5255198f358 100644 --- a/jstests/core/query/add_skip_stage_before_fetch.js +++ b/jstests/core/query/add_skip_stage_before_fetch.js @@ -8,10 +8,7 @@ // operations_longer_than_stepdown_interval_in_txns, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {isIndexOnly} from "jstests/libs/analyze_plan.js"; const coll = db.add_skip_stage_before_fetch; @@ -65,5 +62,4 @@ assert(isIndexOnly(db, explainResult.queryPlanner.winningPlan)); explainResult = coll.find({a: 0, b: 2}).hint(testIndex).sort({d: 1}).skip(2400).explain("executionStats"); assert.gte(explainResult.executionStats.totalKeysExamined, 2500); -assert.eq(explainResult.executionStats.totalDocsExamined, 2500); -})(); +assert.eq(explainResult.executionStats.totalDocsExamined, 2500); \ No newline at end of file diff --git a/jstests/core/query/agg_hint.js b/jstests/core/query/agg_hint.js index 8bc2748e228fa..071a6d165887e 100644 --- a/jstests/core/query/agg_hint.js +++ b/jstests/core/query/agg_hint.js @@ -11,10 +11,7 @@ // where agg execution differs from query. It also includes confirmation that hint works for find // command against views, which is converted to a hinted aggregation on execution. -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAggPlanStage. +import {getAggPlanStage, getPlanStage} from "jstests/libs/analyze_plan.js"; const testDB = db.getSiblingDB("agg_hint"); assert.commandWorked(testDB.dropDatabase()); @@ -261,5 +258,4 @@ confirmCommandUsesIndex({ command: {count: "view", query: {x: 3}}, hintKeyPattern: {_id: 1}, expectedKeyPattern: {_id: 1} -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/query/all/all.js b/jstests/core/query/all/all.js index a718e2615e8dd..9132c6f7c51ed 100644 --- a/jstests/core/query/all/all.js +++ b/jstests/core/query/all/all.js @@ -1,7 +1,7 @@ -t = db.jstests_all; +let t = db.jstests_all; t.drop(); -doTest = function() { +let doTest = function() { assert.commandWorked(t.save({a: [1, 2, 3]})); assert.commandWorked(t.save({a: [1, 2, 4]})); assert.commandWorked(t.save({a: [1, 8, 5]})); diff --git a/jstests/core/query/all/all2.js b/jstests/core/query/all/all2.js index b0c6d40cf68ef..738b5f746d001 100644 --- a/jstests/core/query/all/all2.js +++ b/jstests/core/query/all/all2.js @@ -1,11 +1,11 @@ -t = db.all2; +let t = db.all2; t.drop(); t.save({a: [{x: 1}, {x: 2}]}); t.save({a: [{x: 2}, {x: 3}]}); t.save({a: [{x: 3}, {x: 4}]}); -state = "no index"; +let state = "no index"; function check(n, q, e) { assert.eq(n, t.find(q).count(), tojson(q) + " " + e + " count " + state); diff --git a/jstests/core/query/all/all3.js b/jstests/core/query/all/all3.js index 37cb6c690b24f..ee14e3a1102ef 100644 --- a/jstests/core/query/all/all3.js +++ b/jstests/core/query/all/all3.js @@ -2,7 +2,7 @@ // Check that $all matching null is consistent with $in - SERVER-3820 -t = db.jstests_all3; +let t = db.jstests_all3; t.drop(); t.save({}); diff --git a/jstests/core/query/all/all4.js b/jstests/core/query/all/all4.js index eb97928949653..d3486b467ad4a 100644 --- a/jstests/core/query/all/all4.js +++ b/jstests/core/query/all/all4.js @@ -1,6 +1,6 @@ // Test $all/$elemMatch with missing field - SERVER-4492 -t = db.jstests_all4; +let t = db.jstests_all4; t.drop(); function checkQuery(query, val) { diff --git a/jstests/core/query/all/all5.js b/jstests/core/query/all/all5.js index a5faaa1767f0a..ff14671d52480 100644 --- a/jstests/core/query/all/all5.js +++ b/jstests/core/query/all/all5.js @@ -1,6 +1,6 @@ // Test $all/$elemMatch/null matching - SERVER-4517 -t = db.jstests_all5; +let t = db.jstests_all5; t.drop(); function checkMatch(doc) { diff --git a/jstests/core/query/and/and.js b/jstests/core/query/and/and.js index 20fd583c31fa5..c68f8381886a9 100644 --- a/jstests/core/query/and/and.js +++ b/jstests/core/query/and/and.js @@ -4,7 +4,7 @@ // requires_scripting // ] -t = db.jstests_and; +let t = db.jstests_and; t.drop(); t.save({a: [1, 2]}); diff --git a/jstests/core/query/and/and2.js b/jstests/core/query/and/and2.js index 5a946c2cb76dc..79d4fc590c4fb 100644 --- a/jstests/core/query/and/and2.js +++ b/jstests/core/query/and/and2.js @@ -5,7 +5,7 @@ // Test dollar sign operator with $and SERVER-1089 -t = db.jstests_and2; +let t = db.jstests_and2; t.drop(); t.save({a: [1, 2]}); diff --git a/jstests/core/query/and/and3.js b/jstests/core/query/and/and3.js index 5256237bc33d6..1dfecc87ffa51 100644 --- a/jstests/core/query/and/and3.js +++ b/jstests/core/query/and/and3.js @@ -6,7 +6,7 @@ // assumes_read_concern_local, // ] -t = db.jstests_and3; +let t = db.jstests_and3; t.drop(); t.save({a: 1}); diff --git a/jstests/core/query/and/andor.js b/jstests/core/query/and/andor.js index 5bac12d83ca19..b8ae2df86a906 100644 --- a/jstests/core/query/and/andor.js +++ b/jstests/core/query/and/andor.js @@ -1,6 +1,6 @@ // SERVER-1089 Test and/or/nor nesting -t = db.jstests_andor; +let t = db.jstests_andor; t.drop(); // not ok @@ -10,7 +10,7 @@ function ok(q) { t.save({a: 1}); -test = function() { +let test = function() { ok({a: 1}); ok({$and: [{a: 1}]}); diff --git a/jstests/core/query/array/array3.js b/jstests/core/query/array/array3.js index 42acdfb6d3e54..16d03b880ed76 100644 --- a/jstests/core/query/array/array3.js +++ b/jstests/core/query/array/array3.js @@ -2,6 +2,6 @@ assert.eq(5, Array.sum([1, 4]), "A"); assert.eq(2.5, Array.avg([1, 4]), "B"); -arr = [2, 4, 4, 4, 5, 5, 7, 9]; +let arr = [2, 4, 4, 4, 5, 5, 7, 9]; assert.eq(5, Array.avg(arr), "C"); assert.eq(2, Array.stdDev(arr), "D"); diff --git a/jstests/core/query/array/array_match1.js b/jstests/core/query/array/array_match1.js index 0c56e8d4c3484..e21d0a9e7c986 100644 --- a/jstests/core/query/array/array_match1.js +++ b/jstests/core/query/array/array_match1.js @@ -1,4 +1,4 @@ -t = db.array_match1; +let t = db.array_match1; t.drop(); t.insert({_id: 1, a: [5, 5]}); diff --git a/jstests/core/query/array/array_match2.js b/jstests/core/query/array/array_match2.js index 3e0dde8f5f5dc..fa4034d5c3c9e 100644 --- a/jstests/core/query/array/array_match2.js +++ b/jstests/core/query/array/array_match2.js @@ -1,6 +1,6 @@ // @tags: [requires_non_retryable_writes] -t = db.jstests_array_match2; +let t = db.jstests_array_match2; t.drop(); t.save({a: [{1: 4}, 5]}); diff --git a/jstests/core/query/array/array_match3.js b/jstests/core/query/array/array_match3.js index 4990bdd90fde6..ad362e5e43bc1 100644 --- a/jstests/core/query/array/array_match3.js +++ b/jstests/core/query/array/array_match3.js @@ -1,6 +1,6 @@ // SERVER-2902 Test indexing of numerically referenced array elements. -t = db.jstests_array_match3; +let t = db.jstests_array_match3; t.drop(); // Test matching numericallly referenced array element. diff --git a/jstests/core/query/array/arrayfind1.js b/jstests/core/query/array/arrayfind1.js index 4e9330549ae7b..b50ecae78097e 100644 --- a/jstests/core/query/array/arrayfind1.js +++ b/jstests/core/query/array/arrayfind1.js @@ -2,7 +2,7 @@ // requires_fastcount, // ] -t = db.arrayfind1; +let t = db.arrayfind1; t.drop(); t.save({a: [{x: 1}]}); diff --git a/jstests/core/query/array/arrayfind2.js b/jstests/core/query/array/arrayfind2.js index 38de844f6f412..14eb2411e7c70 100644 --- a/jstests/core/query/array/arrayfind2.js +++ b/jstests/core/query/array/arrayfind2.js @@ -1,6 +1,6 @@ // @tags: [requires_fastcount] -t = db.arrayfind2; +let t = db.arrayfind2; t.drop(); function go(prefix) { diff --git a/jstests/core/query/array/arrayfind3.js b/jstests/core/query/array/arrayfind3.js index 6dba0bf625ff0..619ed2e1046e4 100644 --- a/jstests/core/query/array/arrayfind3.js +++ b/jstests/core/query/array/arrayfind3.js @@ -1,4 +1,4 @@ -t = db.arrayfind3; +let t = db.arrayfind3; t.drop(); t.save({a: [1, 2]}); diff --git a/jstests/core/query/array/arrayfind4.js b/jstests/core/query/array/arrayfind4.js index 2d7c0e0366844..231abee50dd77 100644 --- a/jstests/core/query/array/arrayfind4.js +++ b/jstests/core/query/array/arrayfind4.js @@ -4,7 +4,7 @@ // Test query empty array SERVER-2258 -t = db.jstests_arrayfind4; +let t = db.jstests_arrayfind4; t.drop(); t.save({a: []}); diff --git a/jstests/core/query/array/arrayfind5.js b/jstests/core/query/array/arrayfind5.js index 004231e08939e..6238284f89a2a 100644 --- a/jstests/core/query/array/arrayfind5.js +++ b/jstests/core/query/array/arrayfind5.js @@ -4,7 +4,7 @@ // cqf_incompatible, // ] -t = db.jstests_arrayfind5; +let t = db.jstests_arrayfind5; t.drop(); function check(nullElemMatch) { diff --git a/jstests/core/query/array/arrayfind6.js b/jstests/core/query/array/arrayfind6.js index bd91859c9dc4e..2788fa6d07e5d 100644 --- a/jstests/core/query/array/arrayfind6.js +++ b/jstests/core/query/array/arrayfind6.js @@ -1,6 +1,6 @@ // Check index bound determination for $not:$elemMatch queries. SERVER-5740 -t = db.jstests_arrayfind6; +let t = db.jstests_arrayfind6; t.drop(); t.save({a: [{b: 1, c: 2}]}); diff --git a/jstests/core/query/array/arrayfind7.js b/jstests/core/query/array/arrayfind7.js index be2061c5c0d92..2d3fe47179716 100644 --- a/jstests/core/query/array/arrayfind7.js +++ b/jstests/core/query/array/arrayfind7.js @@ -1,6 +1,6 @@ // Nested $elemMatch clauses. SERVER-5741 -t = db.jstests_arrayfind7; +let t = db.jstests_arrayfind7; t.drop(); t.save({a: [{b: [{c: 1, d: 2}]}]}); diff --git a/jstests/core/query/array/arrayfind9.js b/jstests/core/query/array/arrayfind9.js index 5406ae8c1d1d3..88a781e999950 100644 --- a/jstests/core/query/array/arrayfind9.js +++ b/jstests/core/query/array/arrayfind9.js @@ -1,6 +1,6 @@ // Assorted $elemMatch behavior checks. -t = db.jstests_arrayfind9; +let t = db.jstests_arrayfind9; t.drop(); // Top level field $elemMatch:$not matching @@ -27,8 +27,8 @@ t.drop(); t.save({a: [{b: [0, 2]}]}); t.createIndex({a: 1}); t.createIndex({'a.b': 1}); -plans = [{$natural: 1}, {a: 1}, {'a.b': 1}]; -for (i in plans) { - p = plans[i]; +let plans = [{$natural: 1}, {a: 1}, {'a.b': 1}]; +for (let i in plans) { + let p = plans[i]; assert.eq(1, t.find({a: {$elemMatch: {b: {$gte: 1, $lte: 1}}}}).hint(p).itcount()); } diff --git a/jstests/core/query/array/arrayfinda.js b/jstests/core/query/array/arrayfinda.js index 163af3d8d29ea..f79c7bf7a379c 100644 --- a/jstests/core/query/array/arrayfinda.js +++ b/jstests/core/query/array/arrayfinda.js @@ -1,6 +1,6 @@ // Assorted $elemMatch matching behavior checks. -t = db.jstests_arrayfinda; +let t = db.jstests_arrayfinda; t.drop(); // $elemMatch only matches elements within arrays (a descriptive, not a normative test). diff --git a/jstests/core/query/awaitdata_getmore_cmd.js b/jstests/core/query/awaitdata_getmore_cmd.js index 5efcf6590efac..2deaa5a518984 100644 --- a/jstests/core/query/awaitdata_getmore_cmd.js +++ b/jstests/core/query/awaitdata_getmore_cmd.js @@ -19,39 +19,48 @@ load("jstests/libs/fixture_helpers.js"); load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology. -var cmdRes; -var cursorId; -var defaultBatchSize = 101; -var collName = 'await_data'; -var coll = db[collName]; +let collName = 'await_data_non_capped'; +let coll = db[collName]; // Create a non-capped collection with 10 documents. +jsTestLog('Create a non-capped collection with 10 documents.'); coll.drop(); -for (var i = 0; i < 10; i++) { - assert.commandWorked(coll.insert({a: i})); +let docs = []; +for (let i = 0; i < 10; i++) { + docs.push({a: i}); } +assert.commandWorked(coll.insert(docs)); // Find with tailable flag set should fail for a non-capped collection. -cmdRes = db.runCommand({find: collName, tailable: true}); +jsTestLog('Find with tailable flag set should fail for a non-capped collection.'); +let cmdRes = db.runCommand({find: collName, tailable: true}); assert.commandFailed(cmdRes); // Should also fail in the non-capped case if both the tailable and awaitData flags are set. +jsTestLog( + 'Should also fail in the non-capped case if both the tailable and awaitData flags are set.'); cmdRes = db.runCommand({find: collName, tailable: true, awaitData: true}); assert.commandFailed(cmdRes); // With a non-existent collection, should succeed but return no data and a closed cursor. +jsTestLog('With a non-existent collection, should succeed but return no data and a closed cursor.'); +collName = 'await_data_missing'; +coll = db[collName]; coll.drop(); cmdRes = assert.commandWorked(db.runCommand({find: collName, tailable: true})); assert.eq(cmdRes.cursor.id, NumberLong(0)); assert.eq(cmdRes.cursor.firstBatch.length, 0); // Create a capped collection with 10 documents. +jsTestLog('Create a capped collection with 10 documents.'); +collName = 'await_data'; // collection name must match parallel shell task. +coll = db[collName]; +coll.drop(); assert.commandWorked(db.createCollection(collName, {capped: true, size: 2048})); -for (var i = 0; i < 10; i++) { - assert.commandWorked(coll.insert({a: i})); -} +assert.commandWorked(coll.insert(docs)); // GetMore should succeed if query has awaitData but no maxTimeMS is supplied. +jsTestLog('getMore should succeed if query has awaitData but no maxTimeMS is supplied.'); cmdRes = db.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true}); assert.commandWorked(cmdRes); assert.gt(cmdRes.cursor.id, NumberLong(0)); @@ -63,6 +72,7 @@ assert.gt(cmdRes.cursor.id, NumberLong(0)); assert.eq(cmdRes.cursor.ns, coll.getFullName()); // Should also succeed if maxTimeMS is supplied on the original find. +jsTestLog('Should also succeed if maxTimeMS is supplied on the original find.'); const sixtyMinutes = 60 * 60 * 1000; cmdRes = db.runCommand( {find: collName, batchSize: 2, awaitData: true, tailable: true, maxTimeMS: sixtyMinutes}); @@ -76,6 +86,7 @@ assert.gt(cmdRes.cursor.id, NumberLong(0)); assert.eq(cmdRes.cursor.ns, coll.getFullName()); // Check that we can set up a tailable cursor over the capped collection. +jsTestLog('Check that we can set up a tailable cursor over the capped collection.'); cmdRes = db.runCommand({find: collName, batchSize: 5, awaitData: true, tailable: true}); assert.commandWorked(cmdRes); assert.gt(cmdRes.cursor.id, NumberLong(0)); @@ -84,6 +95,8 @@ assert.eq(cmdRes.cursor.firstBatch.length, 5); // Check that tailing the capped collection with awaitData eventually ends up returning an empty // batch after hitting the timeout. +jsTestLog('Check that tailing the capped collection with awaitData eventually ends up returning ' + + 'an empty batch after hitting the timeout.'); cmdRes = db.runCommand({find: collName, batchSize: 2, awaitData: true, tailable: true}); assert.commandWorked(cmdRes); assert.gt(cmdRes.cursor.id, NumberLong(0)); @@ -91,6 +104,7 @@ assert.eq(cmdRes.cursor.ns, coll.getFullName()); assert.eq(cmdRes.cursor.firstBatch.length, 2); // Issue getMore until we get an empty batch of results. +jsTestLog('Issue getMore until we get an empty batch of results.'); cmdRes = db.runCommand({ getMore: cmdRes.cursor.id, collection: coll.getName(), @@ -102,8 +116,10 @@ assert.gt(cmdRes.cursor.id, NumberLong(0)); assert.eq(cmdRes.cursor.ns, coll.getFullName()); // Keep issuing getMore until we get an empty batch after the timeout expires. +jsTestLog('Keep issuing getMore until we get an empty batch after the timeout expires.'); +let now; while (cmdRes.cursor.nextBatch.length > 0) { - var now = new Date(); + now = new Date(); cmdRes = db.runCommand({ getMore: cmdRes.cursor.id, collection: coll.getName(), @@ -111,6 +127,7 @@ while (cmdRes.cursor.nextBatch.length > 0) { maxTimeMS: 4000 }); assert.commandWorked(cmdRes); + jsTestLog('capped collection tailing cursor getMore: ' + now + ': ' + tojson(cmdRes)); assert.gt(cmdRes.cursor.id, NumberLong(0)); assert.eq(cmdRes.cursor.ns, coll.getFullName()); } @@ -118,38 +135,55 @@ assert.gte((new Date()) - now, 2000); // Repeat the test, this time tailing the oplog rather than a user-created capped collection. // The oplog tailing in not possible on mongos. +jsTestLog( + 'Repeat the test, this time tailing the oplog rather than a user-created capped collection.'); if (FixtureHelpers.isReplSet(db)) { - var localDB = db.getSiblingDB("local"); - var oplogColl = localDB.oplog.rs; + const localDB = db.getSiblingDB("local"); + const oplogColl = localDB.oplog.rs; - cmdRes = localDB.runCommand( - {find: oplogColl.getName(), batchSize: 2, awaitData: true, tailable: true}); + jsTestLog('Check that tailing the oplog with awaitData eventually ends up returning ' + + 'an empty batch after hitting the timeout.'); + cmdRes = localDB.runCommand({ + find: oplogColl.getName(), + batchSize: 2, + awaitData: true, + tailable: true, + filter: {ns: {$ne: "config.system.sessions"}} + }); assert.commandWorked(cmdRes); + jsTestLog('Oplog tailing result: ' + tojson(cmdRes)); if (cmdRes.cursor.id > NumberLong(0)) { assert.eq(cmdRes.cursor.ns, oplogColl.getFullName()); assert.eq(cmdRes.cursor.firstBatch.length, 2); + jsTestLog('Issue getMore on the oplog until we get an empty batch of results.'); cmdRes = localDB.runCommand( {getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 1000}); assert.commandWorked(cmdRes); assert.gt(cmdRes.cursor.id, NumberLong(0)); assert.eq(cmdRes.cursor.ns, oplogColl.getFullName()); - while (cmdRes.cursor.nextBatch.length > 0) { + jsTestLog('Keep issuing getMore on the oplog until we get an empty batch after the ' + + 'timeout expires.'); + assert.soon(() => { now = new Date(); cmdRes = localDB.runCommand( {getMore: cmdRes.cursor.id, collection: oplogColl.getName(), maxTimeMS: 4000}); assert.commandWorked(cmdRes); + jsTestLog('oplog tailing cursor getMore: ' + now + ': ' + tojson(cmdRes)); assert.gt(cmdRes.cursor.id, NumberLong(0)); assert.eq(cmdRes.cursor.ns, oplogColl.getFullName()); - } + return cmdRes.cursor.nextBatch.length == 0; + }); assert.gte((new Date()) - now, 2000); } } -let originalCmdLogLevel = assert.commandWorked(db.setLogLevel(5, 'command')).was.command.verbosity; -let originalQueryLogLevel = assert.commandWorked(db.setLogLevel(5, 'query')).was.query.verbosity; +const originalCmdLogLevel = + assert.commandWorked(db.setLogLevel(5, 'command')).was.command.verbosity; +const originalQueryLogLevel = assert.commandWorked(db.setLogLevel(5, 'query')).was.query.verbosity; +jsTestLog('Test filtered inserts while writing to a capped collection.'); try { // Test filtered inserts while writing to a capped collection. // Find with a filter which doesn't match any documents in the collection. diff --git a/jstests/core/query/basic1.js b/jstests/core/query/basic1.js index cc2917fb5a32c..6ed22a1422cea 100644 --- a/jstests/core/query/basic1.js +++ b/jstests/core/query/basic1.js @@ -1,11 +1,9 @@ // @tags: [does_not_support_stepdowns] -t = db.getCollection("basic1"); +let t = db.getCollection("basic1"); t.drop(); -o = { - a: 1 -}; +let o = {a: 1}; t.save(o); assert.eq(1, t.findOne().a, "first"); diff --git a/jstests/core/query/basic2.js b/jstests/core/query/basic2.js index 3500d9fbdb5eb..d4501e3474fb0 100644 --- a/jstests/core/query/basic2.js +++ b/jstests/core/query/basic2.js @@ -3,12 +3,10 @@ // requires_non_retryable_writes, // ] -t = db.getCollection("basic2"); +let t = db.getCollection("basic2"); t.drop(); -o = { - n: 2 -}; +let o = {n: 2}; t.save(o); assert.eq(1, t.find().count()); diff --git a/jstests/core/query/basic4.js b/jstests/core/query/basic4.js index 4b2cf6f96be7a..e94c5868c80f5 100644 --- a/jstests/core/query/basic4.js +++ b/jstests/core/query/basic4.js @@ -1,4 +1,4 @@ -t = db.getCollection("basic4"); +let t = db.getCollection("basic4"); t.drop(); t.save({a: 1, b: 1.0}); diff --git a/jstests/core/query/basic5.js b/jstests/core/query/basic5.js index 7ec41ef7872e0..e1a0df35bbbeb 100644 --- a/jstests/core/query/basic5.js +++ b/jstests/core/query/basic5.js @@ -1,4 +1,4 @@ -t = db.getCollection("basic5"); +let t = db.getCollection("basic5"); t.drop(); t.save({a: 1, b: [1, 2, 3]}); diff --git a/jstests/core/query/basic6.js b/jstests/core/query/basic6.js index 89aef4acc2e44..fb8d85b2f794e 100644 --- a/jstests/core/query/basic6.js +++ b/jstests/core/query/basic6.js @@ -4,7 +4,7 @@ * ] */ -t = db.basic6; +let t = db.basic6; t.findOne(); t.a.findOne(); diff --git a/jstests/core/query/basic7.js b/jstests/core/query/basic7.js index dc6f18cc95bd6..334033803555f 100644 --- a/jstests/core/query/basic7.js +++ b/jstests/core/query/basic7.js @@ -1,5 +1,5 @@ -t = db.basic7; +let t = db.basic7; t.drop(); t.save({a: 1}); diff --git a/jstests/core/query/basic8.js b/jstests/core/query/basic8.js index 4a35de6963b3f..58ab069fa3bb7 100644 --- a/jstests/core/query/basic8.js +++ b/jstests/core/query/basic8.js @@ -1,10 +1,10 @@ // @tags: [requires_fastcount] -t = db.basic8; +let t = db.basic8; t.drop(); t.save({a: 1}); -o = t.findOne(); +let o = t.findOne(); o.b = 2; t.save(o); diff --git a/jstests/core/query/basica.js b/jstests/core/query/basica.js index 1fe8b7c5de4f3..569b41ee90d10 100644 --- a/jstests/core/query/basica.js +++ b/jstests/core/query/basica.js @@ -1,11 +1,11 @@ -t = db.basica; +let t = db.basica; t.drop(); t.save({a: 1, b: [{x: 2, y: 2}, {x: 3, y: 3}]}); -x = t.findOne(); +let x = t.findOne(); x.b["0"].x = 4; x.b["0"].z = 4; x.b[0].m = 9; diff --git a/jstests/core/query/bittest.js b/jstests/core/query/bittest.js index a4a7272ae28fe..77aa3f46cab60 100644 --- a/jstests/core/query/bittest.js +++ b/jstests/core/query/bittest.js @@ -4,10 +4,7 @@ * assumes_read_concern_local, * ] */ -(function() { -'use strict'; - -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan, isCollscan} from "jstests/libs/analyze_plan.js"; var coll = db.jstests_bitwise; @@ -217,5 +214,4 @@ assertQueryCorrect({ }, 4); -assert(coll.drop()); -})(); +assert(coll.drop()); \ No newline at end of file diff --git a/jstests/core/query/count/count3.js b/jstests/core/query/count/count3.js index d93df020f0b0d..51fb5d5d4c84b 100644 --- a/jstests/core/query/count/count3.js +++ b/jstests/core/query/count/count3.js @@ -1,5 +1,5 @@ -t = db.count3; +let t = db.count3; t.drop(); diff --git a/jstests/core/query/count/count5.js b/jstests/core/query/count/count5.js index ceedf62b33625..81fd8786bfc91 100644 --- a/jstests/core/query/count/count5.js +++ b/jstests/core/query/count/count5.js @@ -1,15 +1,13 @@ // @tags: [requires_fastcount] -t = db.count5; +let t = db.count5; t.drop(); -for (i = 0; i < 100; i++) { +for (let i = 0; i < 100; i++) { t.save({x: i}); } -q = { - x: {$gt: 25, $lte: 75} -}; +let q = {x: {$gt: 25, $lte: 75}}; assert.eq(50, t.find(q).count(), "A"); assert.eq(50, t.find(q).itcount(), "B"); diff --git a/jstests/core/query/count/count6.js b/jstests/core/query/count/count6.js index 78735c89c2659..8814b2b76a0fa 100644 --- a/jstests/core/query/count/count6.js +++ b/jstests/core/query/count/count6.js @@ -2,7 +2,7 @@ // // @tags: [requires_fastcount] -t = db.jstests_count6; +let t = db.jstests_count6; function checkCountForObject(obj) { t.drop(); diff --git a/jstests/core/query/count/count7.js b/jstests/core/query/count/count7.js index 443134474a868..cef47b46c2c04 100644 --- a/jstests/core/query/count/count7.js +++ b/jstests/core/query/count/count7.js @@ -2,7 +2,7 @@ // Check normal count matching and deduping. -t = db.jstests_count7; +let t = db.jstests_count7; t.drop(); t.createIndex({a: 1}); diff --git a/jstests/core/query/count/count9.js b/jstests/core/query/count/count9.js index 062f099e513de..250ada94cb1dd 100644 --- a/jstests/core/query/count/count9.js +++ b/jstests/core/query/count/count9.js @@ -1,6 +1,6 @@ // Test fast mode count with multikey entries. -t = db.jstests_count9; +let t = db.jstests_count9; t.drop(); t.createIndex({a: 1}); diff --git a/jstests/core/query/count/countb.js b/jstests/core/query/count/countb.js index 869825b25a1c2..cbce10c7e4e4b 100644 --- a/jstests/core/query/count/countb.js +++ b/jstests/core/query/count/countb.js @@ -4,7 +4,7 @@ // requires_scripting, // ] -t = db.jstests_countb; +let t = db.jstests_countb; t.drop(); t.createIndex({a: 1}); diff --git a/jstests/core/query/covered_multikey.js b/jstests/core/query/covered_multikey.js index 9270a2b2c20b0..19bfd54c4f6b3 100644 --- a/jstests/core/query/covered_multikey.js +++ b/jstests/core/query/covered_multikey.js @@ -8,11 +8,8 @@ /** * Test covering behavior for queries over a multikey index. */ -(function() { -"use strict"; - // For making assertions about explain output. -load("jstests/libs/analyze_plan.js"); +import {getPlanStage, getWinningPlan, isIxscan, planHasStage} from "jstests/libs/analyze_plan.js"; let coll = db.covered_multikey; coll.drop(); @@ -116,5 +113,4 @@ explainRes = coll.explain().find().hint({"a.b": 1, c: "2dsphere"}).finish(); winningPlan = getWinningPlan(explainRes.queryPlanner); ixscanStage = getPlanStage(winningPlan, "IXSCAN"); assert.neq(null, ixscanStage); -assert.eq(true, ixscanStage.isMultiKey); -}()); +assert.eq(true, ixscanStage.isMultiKey); \ No newline at end of file diff --git a/jstests/core/query/cursor/aggregation_accepts_write_concern.js b/jstests/core/query/cursor/aggregation_accepts_write_concern.js index 2c764414a1d7b..8117c296e03d5 100644 --- a/jstests/core/query/cursor/aggregation_accepts_write_concern.js +++ b/jstests/core/query/cursor/aggregation_accepts_write_concern.js @@ -1,7 +1,11 @@ /** * Confirms that the aggregate command accepts writeConcern regardless of whether the pipeline * writes or is read-only. - * @tags: [assumes_write_concern_unchanged, does_not_support_stepdowns] + * @tags: [ + * assumes_write_concern_unchanged, + * does_not_support_stepdowns, + * references_foreign_collection + * ] */ (function() { "use strict"; diff --git a/jstests/core/query/cursor/cursor1.js b/jstests/core/query/cursor/cursor1.js index 1cb00cc82fb0d..458787bf6d0c6 100644 --- a/jstests/core/query/cursor/cursor1.js +++ b/jstests/core/query/cursor/cursor1.js @@ -1,13 +1,13 @@ // @tags: [requires_getmore, requires_fastcount] -t = db.cursor1; +let t = db.cursor1; t.drop(); -big = ""; +let big = ""; while (big.length < 50000) big += "asdasdasdasdsdsdadsasdasdasD"; -num = Math.ceil(10000000 / big.length); +let num = Math.ceil(10000000 / big.length); for (var i = 0; i < num; i++) { t.save({num: i, str: big}); diff --git a/jstests/core/query/cursor/cursor3.js b/jstests/core/query/cursor/cursor3.js index 8e5672d4b3b27..acf1339c345b7 100644 --- a/jstests/core/query/cursor/cursor3.js +++ b/jstests/core/query/cursor/cursor3.js @@ -4,19 +4,19 @@ // assumes_read_concern_local, // ] -testNum = 1; +let testNum = 1; function checkResults(expected, cursor, testNum) { assert.eq(expected.length, cursor.count(), "testNum: " + testNum + " A : " + tojson(cursor.toArray()) + " " + tojson(cursor.explain())); - for (i = 0; i < expected.length; ++i) { + for (let i = 0; i < expected.length; ++i) { assert.eq(expected[i], cursor[i]["a"], "testNum: " + testNum + " B"); } } -t = db.cursor3; +let t = db.cursor3; t.drop(); t.save({a: 0}); diff --git a/jstests/core/query/cursor/cursor4.js b/jstests/core/query/cursor/cursor4.js index d0440d329f1bf..32688ef7c558b 100644 --- a/jstests/core/query/cursor/cursor4.js +++ b/jstests/core/query/cursor/cursor4.js @@ -2,21 +2,21 @@ function checkResults(expected, cursor) { assert.eq(expected.length, cursor.count()); - for (i = 0; i < expected.length; ++i) { + for (let i = 0; i < expected.length; ++i) { assert.eq(expected[i].a, cursor[i].a); assert.eq(expected[i].b, cursor[i].b); } } function testConstrainedFindMultiFieldSorting(db) { - r = db.ed_db_cursor4_cfmfs; + let r = db.ed_db_cursor4_cfmfs; r.drop(); - entries = [{a: 0, b: 0}, {a: 0, b: 1}, {a: 1, b: 1}, {a: 1, b: 1}, {a: 2, b: 0}]; - for (i = 0; i < entries.length; ++i) + let entries = [{a: 0, b: 0}, {a: 0, b: 1}, {a: 1, b: 1}, {a: 1, b: 1}, {a: 2, b: 0}]; + for (let i = 0; i < entries.length; ++i) r.save(entries[i]); r.createIndex({a: 1, b: 1}); - reverseEntries = entries.slice(); + let reverseEntries = entries.slice(); reverseEntries.reverse(); checkResults(entries.slice(2, 4), r.find({a: 1, b: 1}).sort({a: 1, b: 1}).hint({a: 1, b: 1})); diff --git a/jstests/core/query/cursor/cursor5.js b/jstests/core/query/cursor/cursor5.js index aab03473255ee..eab5e5c5cc527 100644 --- a/jstests/core/query/cursor/cursor5.js +++ b/jstests/core/query/cursor/cursor5.js @@ -2,7 +2,7 @@ function checkResults(expected, cursor) { assert.eq(expected.length, cursor.count()); - for (i = 0; i < expected.length; ++i) { + for (let i = 0; i < expected.length; ++i) { assert.eq(expected[i].a.b, cursor[i].a.b); assert.eq(expected[i].a.c, cursor[i].a.c); assert.eq(expected[i].a.d, cursor[i].a.d); @@ -11,10 +11,10 @@ function checkResults(expected, cursor) { } function testBoundsWithSubobjectIndexes(db) { - r = db.ed_db_cursor5_bwsi; + let r = db.ed_db_cursor5_bwsi; r.drop(); - z = [ + let z = [ {a: {b: 1, c: 2, d: 3}, e: 4}, {a: {b: 1, c: 2, d: 3}, e: 5}, {a: {b: 1, c: 2, d: 4}, e: 4}, @@ -22,11 +22,11 @@ function testBoundsWithSubobjectIndexes(db) { {a: {b: 2, c: 2, d: 3}, e: 4}, {a: {b: 2, c: 2, d: 3}, e: 5} ]; - for (i = 0; i < z.length; ++i) + for (let i = 0; i < z.length; ++i) r.save(z[i]); - idx = {"a.d": 1, a: 1, e: -1}; - rIdx = {"a.d": -1, a: -1, e: 1}; - r.createIndex(idx); + let idx = {"a.d": 1, a: 1, e: -1}; + let rIdx = {"a.d": -1, a: -1, e: 1}; + assert.commandWorked(r.createIndex(idx)); checkResults([z[0], z[4], z[2]], r.find({e: 4}).sort(idx).hint(idx)); checkResults([z[1], z[3]], r.find({e: {$gt: 4}, "a.b": 1}).sort(idx).hint(idx)); diff --git a/jstests/core/query/cursor/cursor6.js b/jstests/core/query/cursor/cursor6.js index dde1f9069cb3a..ad55dad6d2579 100644 --- a/jstests/core/query/cursor/cursor6.js +++ b/jstests/core/query/cursor/cursor6.js @@ -13,7 +13,7 @@ function check(indexed) { hint = {$natural: 1}; } - f = r.find().sort({a: 1, b: 1}).hint(hint); + let f = r.find().sort({a: 1, b: 1}).hint(hint); eq(z[0], f[0]); eq(z[1], f[1]); eq(z[2], f[2]); @@ -50,11 +50,11 @@ function check(indexed) { eq(z[0], f[3]); } -r = db.ed_db_cursor6; +let r = db.ed_db_cursor6; r.drop(); -z = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 2, b: 1}, {a: 2, b: 2}]; -for (i = 0; i < z.length; ++i) +let z = [{a: 1, b: 1}, {a: 1, b: 2}, {a: 2, b: 1}, {a: 2, b: 2}]; +for (let i = 0; i < z.length; ++i) r.save(z[i]); r.createIndex({a: 1, b: -1}); diff --git a/jstests/core/query/cursor/cursor7.js b/jstests/core/query/cursor/cursor7.js index 4a21c4202740f..9e75ff605fff8 100644 --- a/jstests/core/query/cursor/cursor7.js +++ b/jstests/core/query/cursor/cursor7.js @@ -2,21 +2,21 @@ function checkResults(expected, cursor) { assert.eq(expected.length, cursor.count()); - for (i = 0; i < expected.length; ++i) { + for (let i = 0; i < expected.length; ++i) { assert.eq(expected[i].a, cursor[i].a); assert.eq(expected[i].b, cursor[i].b); } } function testMultipleInequalities(db) { - r = db.ed_db_cursor_mi; + let r = db.ed_db_cursor_mi; r.drop(); - z = [{a: 1, b: 2}, {a: 3, b: 4}, {a: 5, b: 6}, {a: 7, b: 8}]; - for (i = 0; i < z.length; ++i) + let z = [{a: 1, b: 2}, {a: 3, b: 4}, {a: 5, b: 6}, {a: 7, b: 8}]; + for (let i = 0; i < z.length; ++i) r.save(z[i]); - idx = {a: 1, b: 1}; - rIdx = {a: -1, b: -1}; + let idx = {a: 1, b: 1}; + let rIdx = {a: -1, b: -1}; r.createIndex(idx); checkResults([z[2], z[3]], r.find({a: {$gt: 3}}).sort(idx).hint(idx)); diff --git a/jstests/core/query/cursor/tailable_cursor_invalidation.js b/jstests/core/query/cursor/tailable_cursor_invalidation.js index d59e852d63df2..1cd82554ef564 100644 --- a/jstests/core/query/cursor/tailable_cursor_invalidation.js +++ b/jstests/core/query/cursor/tailable_cursor_invalidation.js @@ -6,6 +6,7 @@ // # This test has statements that do not support non-local read concern. // does_not_support_causal_consistency, // ] +load("jstests/libs/fixture_helpers.js"); // Tests for the behavior of tailable cursors when a collection is dropped or the cursor is // otherwise invalidated. @@ -26,8 +27,7 @@ const emptyBatchCursorId = assert .commandWorked(db.runCommand( {find: collName, tailable: true, awaitData: true, batchSize: 0})) .cursor.id; -const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid; -if (isMongos) { +if (FixtureHelpers.isMongos(db)) { // Mongos will let you establish a cursor with batch size 0 and return to you before it // realizes the shard's cursor is exhausted. The next getMore should return a 0 cursor id // though. diff --git a/jstests/core/query/date/date1.js b/jstests/core/query/date/date1.js index 65449c662b578..f5e608bbaeb6a 100644 --- a/jstests/core/query/date/date1.js +++ b/jstests/core/query/date/date1.js @@ -1,5 +1,5 @@ -t = db.date1; +let t = db.date1; function go(d, msg) { t.drop(); diff --git a/jstests/core/query/date/date2.js b/jstests/core/query/date/date2.js index a398058d7716e..b1d8c3d8b2ffb 100644 --- a/jstests/core/query/date/date2.js +++ b/jstests/core/query/date/date2.js @@ -1,6 +1,6 @@ // Check that it's possible to compare a Date to a Timestamp, but they are never equal - SERVER-3304 -t = db.jstests_date2; +let t = db.jstests_date2; t.drop(); t.createIndex({a: 1}); diff --git a/jstests/core/query/date/date3.js b/jstests/core/query/date/date3.js index e3eaea620ecf1..06f8af3fce758 100644 --- a/jstests/core/query/date/date3.js +++ b/jstests/core/query/date/date3.js @@ -2,12 +2,12 @@ // // @tags: [requires_fastcount] -t = db.date3; +let t = db.date3; t.drop(); -d1 = new Date(-1000); -dz = new Date(0); -d2 = new Date(1000); +let d1 = new Date(-1000); +let dz = new Date(0); +let d2 = new Date(1000); t.save({x: 3, d: dz}); t.save({x: 2, d: d2}); diff --git a/jstests/core/query/dbref/dbref1.js b/jstests/core/query/dbref/dbref1.js index b5bb06f230d2d..da88f8d552548 100644 --- a/jstests/core/query/dbref/dbref1.js +++ b/jstests/core/query/dbref/dbref1.js @@ -1,6 +1,6 @@ -a = db.dbref1a; -b = db.dbref1b; +let a = db.dbref1a; +let b = db.dbref1b; a.drop(); b.drop(); diff --git a/jstests/core/query/dbref/dbref2.js b/jstests/core/query/dbref/dbref2.js index be0deefeb2d9f..4efddccbbac4a 100644 --- a/jstests/core/query/dbref/dbref2.js +++ b/jstests/core/query/dbref/dbref2.js @@ -1,6 +1,6 @@ -a = db.dbref2a; -b = db.dbref2b; -c = db.dbref2c; +let a = db.dbref2a; +let b = db.dbref2b; +let c = db.dbref2c; a.drop(); b.drop(); diff --git a/jstests/core/query/dbref/ref.js b/jstests/core/query/dbref/ref.js index 0d1160482fc50..696b44c7d9174 100644 --- a/jstests/core/query/dbref/ref.js +++ b/jstests/core/query/dbref/ref.js @@ -10,7 +10,7 @@ assert.throws(function() { }); db.things.save({name: "abc"}); -x = db.things.findOne(); +let x = db.things.findOne(); x.o = new DBPointer("otherthings", other._id); db.things.save(x); diff --git a/jstests/core/query/dbref/ref2.js b/jstests/core/query/dbref/ref2.js index 6b284b1f59f2e..a17b084e8b108 100644 --- a/jstests/core/query/dbref/ref2.js +++ b/jstests/core/query/dbref/ref2.js @@ -1,16 +1,10 @@ // @tags: [requires_fastcount] -t = db.ref2; +let t = db.ref2; t.drop(); -a = { - $ref: "foo", - $id: 1 -}; -b = { - $ref: "foo", - $id: 2 -}; +let a = {$ref: "foo", $id: 1}; +let b = {$ref: "foo", $id: 2}; t.save({name: "a", r: a}); t.save({name: "b", r: b}); diff --git a/jstests/core/query/dbref/ref3.js b/jstests/core/query/dbref/ref3.js index 4406863d89966..f73c7d0fad404 100644 --- a/jstests/core/query/dbref/ref3.js +++ b/jstests/core/query/dbref/ref3.js @@ -8,7 +8,7 @@ var other = {s: "other thing", n: 1}; db.otherthings3.save(other); db.things3.save({name: "abc"}); -x = db.things3.findOne(); +let x = db.things3.findOne(); x.o = new DBRef("otherthings3", other._id); db.things3.save(x); diff --git a/jstests/core/query/dbref/ref4.js b/jstests/core/query/dbref/ref4.js index 882253f38837a..45519b8cac168 100644 --- a/jstests/core/query/dbref/ref4.js +++ b/jstests/core/query/dbref/ref4.js @@ -1,6 +1,6 @@ -a = db.ref4a; -b = db.ref4b; +let a = db.ref4a; +let b = db.ref4b; a.drop(); b.drop(); @@ -11,7 +11,7 @@ b.save(other); a.save({name: "abc", others: [new DBRef("ref4b", other._id), new DBPointer("ref4b", other._id)]}); assert(a.findOne().others[0].fetch().n == 17, "dbref broken 1"); -x = Array.fetchRefs(a.findOne().others); +let x = Array.fetchRefs(a.findOne().others); assert.eq(2, x.length, "A"); assert.eq(17, x[0].n, "B"); assert.eq(17, x[1].n, "C"); diff --git a/jstests/core/query/distinct/distinct2.js b/jstests/core/query/distinct/distinct2.js index fc6ff7779b75a..9b886e56b433e 100644 --- a/jstests/core/query/distinct/distinct2.js +++ b/jstests/core/query/distinct/distinct2.js @@ -1,5 +1,5 @@ -t = db.distinct2; +let t = db.distinct2; t.drop(); t.save({a: null}); diff --git a/jstests/core/query/distinct/distinct3.js b/jstests/core/query/distinct/distinct3.js index c2aaaad79e5a5..eb3a0077d9c38 100644 --- a/jstests/core/query/distinct/distinct3.js +++ b/jstests/core/query/distinct/distinct3.js @@ -6,35 +6,35 @@ // Yield and delete test case for query optimizer cursor. SERVER-4401 -t = db.jstests_distinct3; +let t = db.jstests_distinct3; t.drop(); t.createIndex({a: 1}); t.createIndex({b: 1}); var bulk = t.initializeUnorderedBulkOp(); -for (i = 0; i < 50; ++i) { - for (j = 0; j < 2; ++j) { +for (let i = 0; i < 50; ++i) { + for (let j = 0; j < 2; ++j) { bulk.insert({a: i, c: i, d: j}); } } -for (i = 0; i < 100; ++i) { +for (let i = 0; i < 100; ++i) { bulk.insert({b: i, c: i + 50}); } assert.commandWorked(bulk.execute()); // Attempt to remove the last match for the {a:1} index scan while distinct is yielding. -p = startParallelShell('for( i = 0; i < 100; ++i ) { ' + - ' var bulk = db.jstests_distinct3.initializeUnorderedBulkOp();' + - ' bulk.find( { a:49 } ).remove(); ' + - ' for( j = 0; j < 20; ++j ) { ' + - ' bulk.insert( { a:49, c:49, d:j } ); ' + - ' } ' + - ' assert.commandWorked(bulk.execute()); ' + - '} '); +let p = startParallelShell('for( i = 0; i < 100; ++i ) { ' + + ' var bulk = db.jstests_distinct3.initializeUnorderedBulkOp();' + + ' bulk.find( { a:49 } ).remove(); ' + + ' for( j = 0; j < 20; ++j ) { ' + + ' bulk.insert( { a:49, c:49, d:j } ); ' + + ' } ' + + ' assert.commandWorked(bulk.execute()); ' + + '} '); -for (i = 0; i < 100; ++i) { - count = t.distinct('c', {$or: [{a: {$gte: 0}, d: 0}, {b: {$gte: 0}}]}).length; +for (let i = 0; i < 100; ++i) { + let count = t.distinct('c', {$or: [{a: {$gte: 0}, d: 0}, {b: {$gte: 0}}]}).length; assert.gt(count, 100); } diff --git a/jstests/core/query/distinct/distinct_array1.js b/jstests/core/query/distinct/distinct_array1.js index cb82c25bbff14..a812c1c23f0ad 100644 --- a/jstests/core/query/distinct/distinct_array1.js +++ b/jstests/core/query/distinct/distinct_array1.js @@ -1,4 +1,4 @@ -t = db.distinct_array1; +let t = db.distinct_array1; t.drop(); t.save({a: [1, 2, 3]}); @@ -7,7 +7,7 @@ t.save({a: [3, 4, 5]}); t.save({a: 9}); // Without index. -res = t.distinct("a").sort(); +let res = t.distinct("a").sort(); assert.eq("1,2,3,4,5,9", res.toString()); // Array element 0 without index. diff --git a/jstests/core/query/distinct/distinct_compound_index.js b/jstests/core/query/distinct/distinct_compound_index.js index dfca2d712f35b..d7ec41e39c7f7 100644 --- a/jstests/core/query/distinct/distinct_compound_index.js +++ b/jstests/core/query/distinct/distinct_compound_index.js @@ -4,11 +4,12 @@ // # Asserts that some queries use a collection scan. // assumes_no_implicit_index_creation, // ] -(function() { -"use strict"; - load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. -load("jstests/libs/analyze_plan.js"); // For planHasStage. +import { + getWinningPlan, + planHasStage, + assertStagesForExplainOfCommand +} from "jstests/libs/analyze_plan.js"; const coll = db.distinct_multikey_index; @@ -55,10 +56,12 @@ assertStagesForExplainOfCommand({ assert.commandWorked(coll.dropIndexes()); assert.commandWorked(coll.createIndex({a: 1, b: 1, text: "text"})); -assertStagesForExplainOfCommand({ +// TODO SERVER-76084: build a test similar to this to check that the distinct output contains the +// prefix according to expectPrefix value/presence ie. +// if (!expectPrefix) assert.eq(result["queryPlanner"]["namespace"], "test.distinct_multikey_index") +let result = assertStagesForExplainOfCommand({ coll: coll, cmdObj: cmdObj, expectedStages: ["COLLSCAN"], stagesNotExpected: ["DISTINCT_SCAN"] -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/query/distinct/distinct_hint.js b/jstests/core/query/distinct/distinct_hint.js new file mode 100644 index 0000000000000..3b8749e60e5fc --- /dev/null +++ b/jstests/core/query/distinct/distinct_hint.js @@ -0,0 +1,70 @@ +/** + * This test ensures that hint on the distinct command works. + * + * @tags: [ + * assumes_unsharded_collection, + * requires_fcv_71, + * ] + */ + +import {getPlanStage} from "jstests/libs/analyze_plan.js"; + +const collName = "jstests_explain_distinct_hint"; +const coll = db[collName]; + +coll.drop(); + +// Insert the data to perform distinct() on. +assert.commandWorked(db.coll.insert({a: 1, b: 2})); +assert.commandWorked(db.coll.insert({a: 1, b: 2, c: 3})); +assert.commandWorked(db.coll.insert({a: 2, b: 2, d: 3})); +assert.commandWorked(db.coll.insert({a: 1, b: 2})); +assert.commandWorked(db.coll.createIndex({a: 1})); +assert.commandWorked(db.coll.createIndex({b: 1})); +assert.commandWorked(db.coll.createIndex({x: 1}, {sparse: true})); + +// Use .explain() to make sure the index we specify is being used when we use a hint. +let explain = db.coll.explain().distinct("a", {a: 1, b: 2}); +assert.eq(getPlanStage(explain, "IXSCAN").indexName, "a_1"); + +explain = db.coll.explain().distinct("a", {a: 1, b: 2}, {hint: {b: 1}}); +let ixScanStage = getPlanStage(explain, "IXSCAN"); +assert(ixScanStage, tojson(explain)); +assert.eq(ixScanStage.indexName, "b_1", tojson(ixScanStage)); +assert.eq(explain.command.hint, {"b": 1}); + +explain = db.coll.explain().distinct("a", {a: 1, b: 2}, {hint: "b_1"}); +ixScanStage = getPlanStage(explain, "IXSCAN"); +assert(ixScanStage, tojson(explain)); +assert.eq(ixScanStage.indexName, "b_1"); +assert.eq(explain.command.hint, "b_1"); + +// Make sure the hint produces the right values when the query is run. +let cmdObj = db.coll.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: {a: 1}}); +assert.eq(1, cmdObj.values); + +cmdObj = db.coll.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: "a_1"}); +assert.eq(1, cmdObj.values); + +cmdObj = db.coll.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: {b: 1}}); +assert.eq(1, cmdObj.values); + +cmdObj = db.coll.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: {x: 1}}); +assert.eq([], cmdObj.values); + +cmdObj = db.coll.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: "x_1"}); +assert.eq([], cmdObj.values); + +assert.throws(function() { + db.coll.explain().distinct("a", {a: 1, b: 2}, {hint: {bad: 1, hint: 1}}); +}); + +assert.throws(function() { + db.coll.explain().distinct("a", {a: 1, b: 2}, {hint: "BAD HINT"}); +}); + +let cmdRes = + db.coll.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: {bad: 1, hint: 1}}); +assert.commandFailedWithCode(cmdRes, ErrorCodes.BadValue, cmdRes); +var regex = new RegExp("hint provided does not correspond to an existing index"); +assert(regex.test(cmdRes.errmsg)); diff --git a/jstests/core/query/distinct/distinct_index1.js b/jstests/core/query/distinct/distinct_index1.js index 75a7f8adc223e..44c99d2087b9c 100644 --- a/jstests/core/query/distinct/distinct_index1.js +++ b/jstests/core/query/distinct/distinct_index1.js @@ -5,8 +5,7 @@ * assumes_read_concern_local, * ] */ -(function() { -load("jstests/libs/analyze_plan.js"); // For getPlanStage. +import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js"; const coll = db.distinct_index1; coll.drop(); @@ -80,5 +79,4 @@ assert.commandWorked(coll.createIndex({a: "hashed"})); explain = getDistinctExplainWithExecutionStats("a", {$or: [{a: 3}, {a: 5}]}); assert.eq(188, explain.executionStats.nReturned); const indexScanStage = getPlanStage(getWinningPlan(explain.queryPlanner), "IXSCAN"); -assert.eq("hashed", indexScanStage.keyPattern.a); -})(); +assert.eq("hashed", indexScanStage.keyPattern.a); \ No newline at end of file diff --git a/jstests/core/query/distinct/distinct_multikey.js b/jstests/core/query/distinct/distinct_multikey.js index 21d060bddf2ca..ca98b6e1d32c5 100644 --- a/jstests/core/query/distinct/distinct_multikey.js +++ b/jstests/core/query/distinct/distinct_multikey.js @@ -4,10 +4,7 @@ * assumes_read_concern_local, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js"; let coll = db.jstest_distinct_multikey; coll.drop(); @@ -110,5 +107,4 @@ assert.eq([1, 7, 8], result.sort()); explain = coll.explain("queryPlanner").distinct("b.c", {a: 3}); winningPlan = getWinningPlan(explain.queryPlanner); assert(planHasStage(db, winningPlan, "PROJECTION_DEFAULT")); -assert(planHasStage(db, winningPlan, "DISTINCT_SCAN")); -}()); +assert(planHasStage(db, winningPlan, "DISTINCT_SCAN")); \ No newline at end of file diff --git a/jstests/core/query/distinct/distinct_multikey_dotted_path.js b/jstests/core/query/distinct/distinct_multikey_dotted_path.js index c8530fe679997..3bbbfd92a8b5e 100644 --- a/jstests/core/query/distinct/distinct_multikey_dotted_path.js +++ b/jstests/core/query/distinct/distinct_multikey_dotted_path.js @@ -11,9 +11,7 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; -load("jstests/libs/analyze_plan.js"); // For planHasStage(). +import {getAggPlanStages, getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js"; const coll = db.distinct_multikey; coll.drop(); @@ -205,5 +203,4 @@ assert.commandWorked(coll.insert({a: {b: {c: []}}})); // Will not attempt the equivalent query with aggregation, since $group by "a.b.0" will // only treat '0' as a field name (not array index). -})(); -})(); +})(); \ No newline at end of file diff --git a/jstests/core/query/distinct/distinct_with_hashed_index.js b/jstests/core/query/distinct/distinct_with_hashed_index.js index 8476c5c5f14a3..203867652aa66 100644 --- a/jstests/core/query/distinct/distinct_with_hashed_index.js +++ b/jstests/core/query/distinct/distinct_with_hashed_index.js @@ -8,9 +8,14 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; -load("jstests/libs/analyze_plan.js"); // For planHasStage(). +import { + getAggPlanStage, + getWinningPlan, + isCollscan, + isIndexOnly, + isIxscan, + planHasStage, +} from "jstests/libs/analyze_plan.js"; const coll = db.distinct_with_hashed_index; coll.drop(); @@ -150,5 +155,4 @@ pipeline = [{$group: {_id: "$b"}}]; assert.eq(26, coll.aggregate(pipeline).itcount()); explainPlan = coll.explain().aggregate(pipeline); assert.eq(null, getAggPlanStage(explainPlan, "DISTINCT_SCAN"), explainPlan); -assert.neq(null, getAggPlanStage(explainPlan, "COLLSCAN"), explainPlan); -})(); +assert.neq(null, getAggPlanStage(explainPlan, "COLLSCAN"), explainPlan); \ No newline at end of file diff --git a/jstests/core/query/elemmatch/elemmatch_or_pushdown_paths.js b/jstests/core/query/elemmatch/elemmatch_or_pushdown_paths.js new file mode 100644 index 0000000000000..9d5b73b5b3999 --- /dev/null +++ b/jstests/core/query/elemmatch/elemmatch_or_pushdown_paths.js @@ -0,0 +1,69 @@ +/** + * Test OR-pushdown fixes for elemMatch based on SERVER-74954. + */ +load("jstests/aggregation/extras/utils.js"); // for "arrayEq". + +const coll = db.jstests_elemmatch_or_pushdown_paths; + +coll.drop(); + +assert.commandWorked(coll.insert([ + {a: 1, b: [{c: 1}]}, + {a: 2, b: [{c: 1}]}, + {a: 3, b: [{c: 1}]}, + {a: 4, b: [{c: 1}]}, +])); +assert.commandWorked(coll.createIndex({"b.c": 1, a: 1})); + +// Test exact bounds. +assert(arrayEq(coll.find({ + $and: [ + {$or: [{a: {$lt: 2}}, {a: {$gt: 3}}]}, + {b: {$elemMatch: {c: {$eq: 1, $exists: true}}}} + ] + }, + {_id: 0}) + .hint({"b.c": 1, a: 1}) + .toArray(), + [ + {a: 1, b: [{c: 1}]}, + {a: 4, b: [{c: 1}]}, + ])); + +// Similar test, but use $mod instead of $exists. +const results = coll.find({ + $and: [ + {$or: [{a: {$lt: 2}}, {a: {$gt: 3}}]}, + {b: {$elemMatch: {c: {$eq: 1, $mod: [2, 1]}}}} + ] + }, + {_id: 0}) + .toArray(); + +assert(arrayEq(results, + [ + {a: 1, b: [{c: 1}]}, + {a: 4, b: [{c: 1}]}, + ]), + results); + +assert(coll.drop()); +assert.commandWorked(coll.insert([ + {a: 5, b: [{c: 5, d: 6, e: 7}]}, + {a: 5, b: [{c: 5, d: 6, e: 8}]}, + {a: 5, b: [{c: 5, d: 5, e: 7}]}, + {a: 4, b: [{c: 5, d: 6, e: 7}]}, +])); +assert.commandWorked(coll.createIndex({"b.d": 1, "b.c": 1})); +assert.commandWorked(coll.createIndex({"b.e": 1, "b.c": 1})); + +// Test OR within elemmatch. +assert(arrayEq( + coll.find({$and: [{a: 5}, {b: {$elemMatch: {$and: [{c: 5}, {$or: [{d: 6}, {e: 7}]}]}}}]}, + {_id: 0}) + .toArray(), + [ + {a: 5, b: [{c: 5, d: 6, e: 7}]}, + {a: 5, b: [{c: 5, d: 6, e: 8}]}, + {a: 5, b: [{c: 5, d: 5, e: 7}]}, + ])); \ No newline at end of file diff --git a/jstests/core/query/exists/exists.js b/jstests/core/query/exists/exists.js index b85d80c36a7b6..f29320b002eb4 100644 --- a/jstests/core/query/exists/exists.js +++ b/jstests/core/query/exists/exists.js @@ -2,7 +2,7 @@ // requires_fastcount, // ] -t = db.jstests_exists; +let t = db.jstests_exists; t.drop(); t.save({}); diff --git a/jstests/core/query/exists/exists2.js b/jstests/core/query/exists/exists2.js index 0764d859c3b51..6d175584eec5a 100644 --- a/jstests/core/query/exists/exists2.js +++ b/jstests/core/query/exists/exists2.js @@ -1,4 +1,4 @@ -t = db.exists2; +let t = db.exists2; t.drop(); t.save({a: 1, b: 1}); diff --git a/jstests/core/query/exists/exists3.js b/jstests/core/query/exists/exists3.js index 510d63c37526c..1b5939f07ceb7 100644 --- a/jstests/core/query/exists/exists3.js +++ b/jstests/core/query/exists/exists3.js @@ -1,6 +1,6 @@ // Check exists with non empty document, based on SERVER-2470 example. -t = db.jstests_exists3; +let t = db.jstests_exists3; t.drop(); t.insert({a: 1, b: 2}); diff --git a/jstests/core/query/exists/exists4.js b/jstests/core/query/exists/exists4.js index a533ca53e9ca6..4960fe32ff75c 100644 --- a/jstests/core/query/exists/exists4.js +++ b/jstests/core/query/exists/exists4.js @@ -1,6 +1,6 @@ // Check various exists cases, based on SERVER-1735 example. -t = db.jstests_exists4; +let t = db.jstests_exists4; t.drop(); t.createIndex({date: -1, country_code: 1, user_id: 1}, {unique: 1, background: 1}); diff --git a/jstests/core/query/exists/exists5.js b/jstests/core/query/exists/exists5.js index 2f4b1a9b8de41..112e8243b99ea 100644 --- a/jstests/core/query/exists/exists5.js +++ b/jstests/core/query/exists/exists5.js @@ -1,6 +1,6 @@ // Test some $not/$exists cases. -t = db.jstests_exists5; +let t = db.jstests_exists5; t.drop(); t.save({a: 1}); diff --git a/jstests/core/query/exists/exists6.js b/jstests/core/query/exists/exists6.js index 736574db8953d..71fd20d3c2669 100644 --- a/jstests/core/query/exists/exists6.js +++ b/jstests/core/query/exists/exists6.js @@ -1,6 +1,6 @@ // SERVER-393 Test indexed matching with $exists. -t = db.jstests_exists6; +let t = db.jstests_exists6; t.drop(); t.createIndex({b: 1}); diff --git a/jstests/core/query/exists/exists7.js b/jstests/core/query/exists/exists7.js index 285559e82f8f7..1f25db5fedf69 100644 --- a/jstests/core/query/exists/exists7.js +++ b/jstests/core/query/exists/exists7.js @@ -2,7 +2,7 @@ // Test that non boolean value types are allowed with $exists spec. SERVER-2322 -t = db.jstests_exists7; +let t = db.jstests_exists7; t.drop(); function testIntegerExistsSpec() { diff --git a/jstests/core/query/exists/exists8.js b/jstests/core/query/exists/exists8.js index f22a3be44f49e..3c506cd578108 100644 --- a/jstests/core/query/exists/exists8.js +++ b/jstests/core/query/exists/exists8.js @@ -2,7 +2,7 @@ // Test $exists with array element field names SERVER-2897 -t = db.jstests_exists8; +let t = db.jstests_exists8; t.drop(); t.save({a: [1]}); diff --git a/jstests/core/query/exists/exists9.js b/jstests/core/query/exists/exists9.js index c187bb3a10193..d822ae95f0af5 100644 --- a/jstests/core/query/exists/exists9.js +++ b/jstests/core/query/exists/exists9.js @@ -1,6 +1,6 @@ // SERVER-393 Test exists with various empty array and empty object cases. -t = db.jstests_exists9; +let t = db.jstests_exists9; t.drop(); // Check existence of missing nested field. diff --git a/jstests/core/query/exists/existsb.js b/jstests/core/query/exists/existsb.js index 64ee3cf9a889a..b327311a4efda 100644 --- a/jstests/core/query/exists/existsb.js +++ b/jstests/core/query/exists/existsb.js @@ -20,7 +20,7 @@ // everything but {} will have an index entry. // Let's make sure we handle this properly! -t = db.jstests_existsb; +let t = db.jstests_existsb; t.drop(); t.save({}); diff --git a/jstests/core/query/explain/explain1.js b/jstests/core/query/explain/explain1.js index 2de3e2f89d208..cdbd8751ddae5 100644 --- a/jstests/core/query/explain/explain1.js +++ b/jstests/core/query/explain/explain1.js @@ -2,16 +2,14 @@ // assumes_read_concern_local, // ] -t = db.explain1; +let t = db.explain1; t.drop(); for (var i = 0; i < 100; i++) { t.save({x: i}); } -q = { - x: {$gt: 50} -}; +let q = {x: {$gt: 50}}; assert.eq(49, t.find(q).count(), "A"); assert.eq(49, t.find(q).itcount(), "B"); diff --git a/jstests/core/query/explain/explain4.js b/jstests/core/query/explain/explain4.js index e49b188cb65f8..f3e2b460c5e1c 100644 --- a/jstests/core/query/explain/explain4.js +++ b/jstests/core/query/explain/explain4.js @@ -3,16 +3,16 @@ // assumes_read_concern_local, // ] -t = db.jstests_explain4; +let t = db.jstests_explain4; t.drop(); t.createIndex({a: 1}); -for (i = 0; i < 10; ++i) { +for (let i = 0; i < 10; ++i) { t.save({a: i, b: 0}); } -explain = t.find({a: {$gte: 0}, b: 0}).sort({a: 1}).hint({a: 1}).limit(5).explain(true); +let explain = t.find({a: {$gte: 0}, b: 0}).sort({a: 1}).hint({a: 1}).limit(5).explain(true); // Five results are expected, matching the limit spec. assert.eq(5, explain.executionStats.nReturned); diff --git a/jstests/core/query/explain/explain6.js b/jstests/core/query/explain/explain6.js index fbb6ecf6d7655..73bfced511673 100644 --- a/jstests/core/query/explain/explain6.js +++ b/jstests/core/query/explain/explain6.js @@ -6,7 +6,7 @@ // Basic test which checks the number of documents returned, keys examined, and documents // examined as reported by explain. -t = db.jstests_explain6; +let t = db.jstests_explain6; t.drop(); t.createIndex({a: 1, b: 1}); @@ -15,7 +15,7 @@ t.createIndex({b: 1, a: 1}); t.save({a: 0, b: 1}); t.save({a: 1, b: 0}); -explain = t.find({a: {$gte: 0}, b: {$gte: 0}}).explain(true); +let explain = t.find({a: {$gte: 0}, b: {$gte: 0}}).explain(true); assert.eq(2, explain.executionStats.nReturned); assert.eq(2, explain.executionStats.totalKeysExamined); diff --git a/jstests/core/query/explain/explain_agg_write_concern.js b/jstests/core/query/explain/explain_agg_write_concern.js index 9ff556489fa7f..ec246140abef3 100644 --- a/jstests/core/query/explain/explain_agg_write_concern.js +++ b/jstests/core/query/explain/explain_agg_write_concern.js @@ -5,6 +5,7 @@ // assumes_unsharded_collection, // assumes_write_concern_unchanged, // does_not_support_stepdowns, +// references_foreign_collection, // requires_non_retryable_commands, // ] diff --git a/jstests/core/query/explain/explain_batch_size.js b/jstests/core/query/explain/explain_batch_size.js index d31b54b3f16ff..aec1124f171ba 100644 --- a/jstests/core/query/explain/explain_batch_size.js +++ b/jstests/core/query/explain/explain_batch_size.js @@ -8,11 +8,11 @@ // requires_fastcount, // ] -t = db.explain_batch_size; +let t = db.explain_batch_size; t.drop(); var n = 3; -for (i = 0; i < n; i++) { +for (let i = 0; i < n; i++) { t.save({x: i}); } diff --git a/jstests/core/query/explain/explain_count.js b/jstests/core/query/explain/explain_count.js index 8dc0854ab154c..92c960bd9acb5 100644 --- a/jstests/core/query/explain/explain_count.js +++ b/jstests/core/query/explain/explain_count.js @@ -2,7 +2,7 @@ // // @tags: [requires_fastcount] -load("jstests/libs/analyze_plan.js"); // For assertExplainCount. +import {assertExplainCount, getPlanStage} from "jstests/libs/analyze_plan.js"; load("jstests/libs/fixture_helpers.js"); // For isMongos and isSharded. var collName = "jstests_explain_count"; diff --git a/jstests/core/query/explain/explain_distinct.js b/jstests/core/query/explain/explain_distinct.js index 19059b8b1bd90..eae3bdd34fe16 100644 --- a/jstests/core/query/explain/explain_distinct.js +++ b/jstests/core/query/explain/explain_distinct.js @@ -7,10 +7,7 @@ /** * This test ensures that explain on the distinct command works. */ -(function() { -'use strict'; - -load("jstests/libs/analyze_plan.js"); +import {getPlanStage, getWinningPlan, isCollscan, planHasStage} from "jstests/libs/analyze_plan.js"; const collName = "jstests_explain_distinct"; const coll = db[collName]; @@ -99,5 +96,4 @@ winningPlan = getWinningPlan(explain.queryPlanner); assert.eq(1, explain.executionStats.nReturned); assert(!planHasStage(db, winningPlan, "FETCH")); assert(planHasStage(db, winningPlan, "PROJECTION_COVERED")); -assert(planHasStage(db, winningPlan, "DISTINCT_SCAN")); -})(); +assert(planHasStage(db, winningPlan, "DISTINCT_SCAN")); \ No newline at end of file diff --git a/jstests/core/query/explain/explain_multi_plan_count.js b/jstests/core/query/explain/explain_multi_plan_count.js index 2ac52019cf36f..066a3b835f3c0 100644 --- a/jstests/core/query/explain/explain_multi_plan_count.js +++ b/jstests/core/query/explain/explain_multi_plan_count.js @@ -8,10 +8,14 @@ // assumes_unsharded_collection, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import { + assertExplainCount, + getRejectedPlan, + getRejectedPlans, + getWinningPlan, + isIndexOnly, + isIxscan, +} from "jstests/libs/analyze_plan.js"; const coll = db.explain_multi_plan_count; coll.drop(); @@ -38,5 +42,4 @@ for (let curRejectedPlan of rejectedPlans) { isIxscan(db, rejectedPlan); } -assert(coll.drop()); -}()); +assert(coll.drop()); \ No newline at end of file diff --git a/jstests/core/query/explain/explain_multikey.js b/jstests/core/query/explain/explain_multikey.js index 9bea359edb44a..7b34492178081 100644 --- a/jstests/core/query/explain/explain_multikey.js +++ b/jstests/core/query/explain/explain_multikey.js @@ -5,10 +5,7 @@ // @tags: [ // assumes_unsharded_collection, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getPlanStage, getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js"; const coll = db.explain_multikey; const keyPattern = { @@ -81,5 +78,4 @@ verifyMultikeyInfoInExplainOutput({ verifyMultikeyInfoInExplainOutput({ commandObj: {distinct: coll.getName(), key: "a"}, stage: "DISTINCT_SCAN", -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/query/explain/explain_plan_scores.js b/jstests/core/query/explain/explain_plan_scores.js index 80788bd455856..ae00c00f0bce2 100644 --- a/jstests/core/query/explain/explain_plan_scores.js +++ b/jstests/core/query/explain/explain_plan_scores.js @@ -9,10 +9,7 @@ // assumes_against_mongod_not_mongos, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getRejectedPlan, getRejectedPlans, getWinningPlan} from "jstests/libs/analyze_plan.js"; const coll = db.explain_plan_scores; coll.drop(); @@ -51,5 +48,4 @@ assert.commandWorked(coll.createIndex({a: 1, b: 1})); const explain = coll.find({a: {$gte: 0}}).explain(verbosity); assert.commandWorked(explain); checkExplainOutput(explain, verbosity); -}); -}()); +}); \ No newline at end of file diff --git a/jstests/core/query/explain/explain_sample.js b/jstests/core/query/explain/explain_sample.js index efb6d2b6f8ac6..2b001ec838595 100644 --- a/jstests/core/query/explain/explain_sample.js +++ b/jstests/core/query/explain/explain_sample.js @@ -4,10 +4,7 @@ * assumes_read_concern_local, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; const coll = db.explain_sample; coll.drop(); @@ -32,5 +29,4 @@ assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.advanced, 0), tojson(multiIteratorStages)); assert.gt(multiIteratorStages.reduce((acc, stage) => acc + stage.works, 0), 0, - tojson(multiIteratorStages)); -}()); + tojson(multiIteratorStages)); \ No newline at end of file diff --git a/jstests/core/query/explain/explain_shell_helpers.js b/jstests/core/query/explain/explain_shell_helpers.js index 67b13b0b858d9..1223bffd22191 100644 --- a/jstests/core/query/explain/explain_shell_helpers.js +++ b/jstests/core/query/explain/explain_shell_helpers.js @@ -12,13 +12,12 @@ */ // Tests for the .explain() shell helper, which provides syntactic sugar for the explain command. +// Include helpers for analyzing explain output. +import {getPlanStage, getWinningPlan, isIxscan, planHasStage} from "jstests/libs/analyze_plan.js"; var t = db.jstests_explain_helpers; t.drop(); -// Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); - var explain; var stage; diff --git a/jstests/core/query/explain/explain_sort_type.js b/jstests/core/query/explain/explain_sort_type.js index df4d91c0bbf9c..df1ba7b1248e1 100644 --- a/jstests/core/query/explain/explain_sort_type.js +++ b/jstests/core/query/explain/explain_sort_type.js @@ -10,10 +10,7 @@ * requires_non_retryable_writes, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js"; const coll = db.explain_sort_type; coll.drop(); @@ -99,5 +96,4 @@ explain = winningPlan = getWinningPlan(explain.queryPlanner); sortStage = getPlanStage(winningPlan, "SORT"); assert.neq(null, sortStage, explain); -assert.eq("default", sortStage.type, explain); -}()); +assert.eq("default", sortStage.type, explain); \ No newline at end of file diff --git a/jstests/core/query/explain/explain_writecmd_nonexistent_collection.js b/jstests/core/query/explain/explain_writecmd_nonexistent_collection.js index 2496f4b63a092..79431aa20727c 100644 --- a/jstests/core/query/explain/explain_writecmd_nonexistent_collection.js +++ b/jstests/core/query/explain/explain_writecmd_nonexistent_collection.js @@ -2,10 +2,7 @@ // // @tags: [requires_non_retryable_writes, requires_fastcount, // assumes_no_implicit_collection_creation_after_drop] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {planHasStage} from "jstests/libs/analyze_plan.js"; function assertCollectionDoesNotExist(collName) { const collectionList = db.getCollectionInfos({name: collName}); @@ -36,5 +33,4 @@ explain = assert.commandWorked(db.runCommand( {explain: {update: collName, updates: [{q: {a: 1}, u: {$set: {b: 1}}, upsert: true}]}})); assert(planHasStage(db, explain.queryPlanner.winningPlan, "EOF"), explain); assert(!planHasStage(db, explain.queryPlanner.winningPlan, "UPDATE"), explain); -assertCollectionDoesNotExist(collName); -}()); +assertCollectionDoesNotExist(collName); \ No newline at end of file diff --git a/jstests/core/query/explode_for_sort_equality_to_array.js b/jstests/core/query/explode_for_sort_equality_to_array.js index 9f976f89ef88f..94d5644b37107 100644 --- a/jstests/core/query/explode_for_sort_equality_to_array.js +++ b/jstests/core/query/explode_for_sort_equality_to_array.js @@ -1,31 +1,27 @@ -/** - * Test that a query eligible for the "explode for sort" optimization works correctly when the query - * involves an equality-to-array predicate. Specifically, we use an `$all` where the constants - * inside the `$all` list are singleton arrays rather than scalars. - * - * This test was originally designed to reproduce SERVER-75304. - * - * @tags: [ - * # explain does not support majority read concern - * assumes_read_concern_local, - * ] - */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); - -const testDB = db.getSiblingDB(jsTestName()); -assert.commandWorked(testDB.dropDatabase()); -const coll = testDB.explode_for_sort_equality_to_array; - -assert.commandWorked(coll.createIndex({array: -1, num: 1})); -assert.commandWorked(coll.insert({array: [[1], [2]]})); -assert.commandWorked(coll.insert({array: [[1]]})); -assert.commandWorked(coll.insert({array: [[2]]})); -const explain = assert.commandWorked( - coll.find({array: {$all: [[1], [2]]}}).sort({num: 1}).explain('executionStats')); -assert.gt( - getPlanStages(getWinningPlan(explain.queryPlanner), "SORT_MERGE").length, 0, tojson(explain)); -assert.eq(1, explain.executionStats.nReturned, tojson(explain)); -}()); +/** + * Test that a query eligible for the "explode for sort" optimization works correctly when the query + * involves an equality-to-array predicate. Specifically, we use an `$all` where the constants + * inside the `$all` list are singleton arrays rather than scalars. + * + * This test was originally designed to reproduce SERVER-75304. + * + * @tags: [ + * # explain does not support majority read concern + * assumes_read_concern_local, + * ] + */ +import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js"; + +const testDB = db.getSiblingDB(jsTestName()); +assert.commandWorked(testDB.dropDatabase()); +const coll = testDB.explode_for_sort_equality_to_array; + +assert.commandWorked(coll.createIndex({array: -1, num: 1})); +assert.commandWorked(coll.insert({array: [[1], [2]]})); +assert.commandWorked(coll.insert({array: [[1]]})); +assert.commandWorked(coll.insert({array: [[2]]})); +const explain = assert.commandWorked( + coll.find({array: {$all: [[1], [2]]}}).sort({num: 1}).explain('executionStats')); +assert.gt( + getPlanStages(getWinningPlan(explain.queryPlanner), "SORT_MERGE").length, 0, tojson(explain)); +assert.eq(1, explain.executionStats.nReturned, tojson(explain)); \ No newline at end of file diff --git a/jstests/core/query/explode_for_sort_plan_cache.js b/jstests/core/query/explode_for_sort_plan_cache.js index 53d343b35d8ff..b630610a93066 100644 --- a/jstests/core/query/explode_for_sort_plan_cache.js +++ b/jstests/core/query/explode_for_sort_plan_cache.js @@ -22,11 +22,12 @@ * does_not_support_repeated_reads, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/sbe_util.js"); +import { + getPlanCacheKeyFromShape, + getPlanStages, + getWinningPlan +} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const isSBEEnabled = checkSBEEnabled(db); const coll = db.explode_for_sort_plan_cache; @@ -216,4 +217,3 @@ assertExplodeForSortCacheParameterizedCorrectly({ newQueryCount: 0, reuseEntry: false, }); -}()); diff --git a/jstests/core/query/expr/expr.js b/jstests/core/query/expr/expr.js index 737cab39f156b..81ebf13a8d3a6 100644 --- a/jstests/core/query/expr/expr.js +++ b/jstests/core/query/expr/expr.js @@ -15,13 +15,10 @@ "use strict"; load("jstests/libs/sbe_assert_error_override.js"); // For 'assert.errorCodeEq'. +load("jstests/libs/fixture_helpers.js"); const coll = db.expr; -const hello = db.runCommand("hello"); -assert.commandWorked(hello); -const isMongos = (hello.msg === "isdbgrid"); - // // $expr in aggregate. // @@ -126,7 +123,7 @@ assert.throws(function() { // 'executionSuccess' field. let explain = coll.find({$expr: {$divide: [1, "$a"]}}).explain("executionStats"); // Accommodate format differences between explain via mongos and explain directly on a mongod. -if (!isMongos) { +if (!FixtureHelpers.isMongos(db)) { assert(explain.hasOwnProperty("executionStats"), explain); assert.eq(explain.executionStats.executionSuccess, false, explain); assert.errorCodeEq(explain.executionStats.errorCode, [16609, ErrorCodes.TypeMismatch], explain); diff --git a/jstests/core/query/expr/expr_index_use.js b/jstests/core/query/expr/expr_index_use.js index f7a04aed48237..2a2f096b0d4f6 100644 --- a/jstests/core/query/expr/expr_index_use.js +++ b/jstests/core/query/expr/expr_index_use.js @@ -4,11 +4,8 @@ // requires_fcv_63, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getAggPlanStage, getPlanStage, hasRejectedPlans} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const coll = db.expr_index_use; coll.drop(); @@ -330,4 +327,3 @@ confirmExpectedExprExecution({$lt: ["$w", {z: undefined, u: ["array"]}]}, {nReturned: 6, expectedIndex: {w: 1}}); confirmExpectedExprExecution({$lte: ["$w", {z: undefined, u: ["array"]}]}, {nReturned: 7, expectedIndex: {w: 1}}); -})(); diff --git a/jstests/core/query/expr/expr_valid_positions.js b/jstests/core/query/expr/expr_valid_positions.js index cd3ae2bf91753..83529eb55726a 100644 --- a/jstests/core/query/expr/expr_valid_positions.js +++ b/jstests/core/query/expr/expr_valid_positions.js @@ -20,4 +20,4 @@ assert.throws(function() { assert.throws(function() { coll.find({a: {$_internalSchemaObjectMatch: {$expr: {$eq: ["$foo", "$bar"]}}}}).itcount(); }); -}()); \ No newline at end of file +}()); diff --git a/jstests/core/query/find/find2.js b/jstests/core/query/find/find2.js index a793c60b804f1..747775bd4b0cd 100644 --- a/jstests/core/query/find/find2.js +++ b/jstests/core/query/find/find2.js @@ -3,13 +3,13 @@ // @tags: [requires_fastcount] function testObjectIdFind(db) { - r = db.ed_db_find2_oif; + let r = db.ed_db_find2_oif; r.drop(); - for (i = 0; i < 3; ++i) + for (let i = 0; i < 3; ++i) r.save({}); - f = r.find().sort({_id: 1}); + let f = r.find().sort({_id: 1}); assert.eq(3, f.count()); assert(f[0]._id < f[1]._id); assert(f[1]._id < f[2]._id); diff --git a/jstests/core/query/find/find6.js b/jstests/core/query/find/find6.js index d76cc1d5fb515..4b90cfe8cf753 100644 --- a/jstests/core/query/find/find6.js +++ b/jstests/core/query/find/find6.js @@ -4,7 +4,7 @@ // requires_scripting, // ] -t = db.find6; +let t = db.find6; t.drop(); t.save({a: 1}); @@ -16,7 +16,7 @@ assert.eq(1, t.find("function() { return this.b == null; }").itcount(), "C"); assert.eq(1, t.find("function() { return this.b == null; }").count(), "D"); /* test some stuff with dot array notation */ -q = db.find6a; +let q = db.find6a; q.drop(); q.insert({"a": [{"0": 1}]}); q.insert({"a": [{"0": 2}]}); diff --git a/jstests/core/query/find/find7.js b/jstests/core/query/find/find7.js index ed18dcbb0ff0a..56fbd859299ba 100644 --- a/jstests/core/query/find/find7.js +++ b/jstests/core/query/find/find7.js @@ -1,10 +1,7 @@ -t = db.find7; +let t = db.find7; t.drop(); -x = { - "_id": {"d": 3649, "w": "signed"}, - "u": {"3649": 5} -}; +let x = {"_id": {"d": 3649, "w": "signed"}, "u": {"3649": 5}}; t.insert(x); assert.eq(x, t.findOne(), "A1"); assert.eq(x, t.findOne({_id: x._id}), "A2"); diff --git a/jstests/core/query/find/find8.js b/jstests/core/query/find/find8.js index 14930a056e72c..a64f89496f5d3 100644 --- a/jstests/core/query/find/find8.js +++ b/jstests/core/query/find/find8.js @@ -1,6 +1,6 @@ // SERVER-1932 Test unindexed matching of a range that is only valid in a multikey context. -t = db.jstests_find8; +let t = db.jstests_find8; t.drop(); t.save({a: [1, 10]}); diff --git a/jstests/core/query/find/find9.js b/jstests/core/query/find/find9.js index be6bfdb2ccf77..e4858e094905d 100644 --- a/jstests/core/query/find/find9.js +++ b/jstests/core/query/find/find9.js @@ -2,11 +2,11 @@ // Test that the MaxBytesToReturnToClientAtOnce limit is enforced. -t = db.jstests_find9; +let t = db.jstests_find9; t.drop(); -big = new Array(500000).toString(); -for (i = 0; i < 60; ++i) { +let big = new Array(500000).toString(); +for (let i = 0; i < 60; ++i) { t.save({a: i, b: big}); } @@ -18,12 +18,12 @@ assert.gt(60, t.find().objsLeftInBatch()); assert.eq(60, t.find({}, {a: 1}).batchSize(80).objsLeftInBatch()); assert.gt(60, t.find().batchSize(80).objsLeftInBatch()); -for (i = 0; i < 60; ++i) { +for (let i = 0; i < 60; ++i) { t.save({a: i, b: big}); } // Check size limit with get more. -c = t.find().batchSize(80); +let c = t.find().batchSize(80); while (c.hasNext()) { assert.gt(60, c.objsLeftInBatch()); c.next(); diff --git a/jstests/core/query/find/find_project_sort.js b/jstests/core/query/find/find_project_sort.js index 3d359fd9e41cb..16fb1193da38d 100644 --- a/jstests/core/query/find/find_project_sort.js +++ b/jstests/core/query/find/find_project_sort.js @@ -33,9 +33,6 @@ const documents = [ ]; assert.commandWorked(coll.insert(documents)); -assert.commandWorked(coll.createIndex({a: 1})); -assert.commandWorked(coll.createIndex({z: 1})); - function checkQuery( {expected = [], query = {}, proj = {}, sort = null, limit = null, skip = null, desc = null}, hint) { @@ -803,6 +800,10 @@ runIDHackTest(); runCollScanTests(); runFindTestsWithHint({$natural: 1}); + +assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({z: 1})); + runFindTestsWithHint({a: 1}); runFindTestsWithHint({z: 1}); // Multi-key }()); diff --git a/jstests/core/query/find/finda.js b/jstests/core/query/find/finda.js index 2e1a93cbad8de..2a3799f72bf2d 100644 --- a/jstests/core/query/find/finda.js +++ b/jstests/core/query/find/finda.js @@ -4,10 +4,10 @@ // Tests where the QueryOptimizerCursor enters takeover mode during a query rather than a get more. -t = db.jstests_finda; +let t = db.jstests_finda; t.drop(); -numDocs = 200; +let numDocs = 200; function clearQueryPlanCache() { t.createIndex({c: 1}); @@ -16,12 +16,12 @@ function clearQueryPlanCache() { function assertAllFound(matches) { // printjson( matches ); - found = new Array(numDocs); + let found = new Array(numDocs); for (var i = 0; i < numDocs; ++i) { found[i] = false; } for (var i in matches) { - m = matches[i]; + let m = matches[i]; found[m._id] = true; } for (var i = 0; i < numDocs; ++i) { @@ -34,7 +34,7 @@ function makeCursor(query, projection, sort, batchSize, returnKey) { printjson(query); print("proj:"); printjson(projection); - cursor = t.find(query, projection); + let cursor = t.find(query, projection); if (sort) { cursor.sort(sort); print("sort:"); @@ -53,7 +53,7 @@ function makeCursor(query, projection, sort, batchSize, returnKey) { function checkCursorWithBatchSizeProjection( query, projection, sort, batchSize, expectedLeftInBatch) { clearQueryPlanCache(); - cursor = makeCursor(query, projection, sort, batchSize); + let cursor = makeCursor(query, projection, sort, batchSize); if (TestData.batchSize && batchSize == null) { expectedLeftInBatch = Math.min(TestData.batchSize, expectedLeftInBatch); } @@ -71,10 +71,10 @@ function checkCursorWithBatchSize(query, sort, batchSize, expectedLeftInBatch) { // from the a,_id index. clearQueryPlanCache(); if (expectedLeftInBatch > 110) { - cursor = makeCursor(query, {}, sort, batchSize, true); - lastNonAIndexResult = -1; + let cursor = makeCursor(query, {}, sort, batchSize, true); + let lastNonAIndexResult = -1; for (var i = 0; i < expectedLeftInBatch; ++i) { - next = cursor.next(); + let next = cursor.next(); // Identify the query plan used by checking the fields of a returnKey query. if (!friendlyEqual(['a', '_id'], Object.keySet(next))) { lastNonAIndexResult = i; diff --git a/jstests/core/query/idhack.js b/jstests/core/query/idhack.js index 1ddab70d4cd2a..0ab6622ebd67e 100644 --- a/jstests/core/query/idhack.js +++ b/jstests/core/query/idhack.js @@ -3,16 +3,13 @@ // requires_multi_updates, // requires_non_retryable_writes, // ] -(function() { -"use strict"; +// Include helpers for analyzing explain output. +import {getWinningPlan, isIdhack} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const t = db.idhack; t.drop(); -// Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. - assert.commandWorked(t.insert({_id: {x: 1}, z: 1})); assert.commandWorked(t.insert({_id: {x: 2}, z: 2})); assert.commandWorked(t.insert({_id: {x: 3}, z: 3})); @@ -114,4 +111,3 @@ assert.eq(0, t.find({_id: 1}).hint({_id: 1}).min({_id: 2}).itcount()); explain = t.find({_id: 2}).hint({_id: 1}).min({_id: 1}).max({_id: 3}).explain(); winningPlan = getWinningPlan(explain.queryPlanner); assert(!isIdhack(db, winningPlan), winningPlan); -})(); diff --git a/jstests/core/query/in/in.js b/jstests/core/query/in/in.js index ca8cd77b1895e..f2a3ab6fcff3c 100644 --- a/jstests/core/query/in/in.js +++ b/jstests/core/query/in/in.js @@ -1,5 +1,5 @@ -t = db.in1; +let t = db.in1; t.drop(); t.save({a: 1}); diff --git a/jstests/core/query/in/in2.js b/jstests/core/query/in/in2.js index 6682bbc79c187..48f94f6aed64f 100644 --- a/jstests/core/query/in/in2.js +++ b/jstests/core/query/in/in2.js @@ -1,5 +1,5 @@ -t = db.in2; +let t = db.in2; function go(name, index) { t.drop(); diff --git a/jstests/core/query/in/in3.js b/jstests/core/query/in/in3.js index bd64329ded498..5bc2b353b45e1 100644 --- a/jstests/core/query/in/in3.js +++ b/jstests/core/query/in/in3.js @@ -1,6 +1,6 @@ // SERVER-2829 Test arrays matching themselves within a $in expression. -t = db.jstests_in8; +let t = db.jstests_in8; t.drop(); t.save({key: [1]}); diff --git a/jstests/core/query/in/in4.js b/jstests/core/query/in/in4.js index e916ca7c82d93..8d18a8b4287af 100644 --- a/jstests/core/query/in/in4.js +++ b/jstests/core/query/in/in4.js @@ -2,7 +2,7 @@ // SERVER-2343 Test $in empty array matching. -t = db.jstests_in9; +let t = db.jstests_in9; t.drop(); function someData() { diff --git a/jstests/core/query/in/in5.js b/jstests/core/query/in/in5.js index 80f37e6b473f7..5a8e63d188881 100644 --- a/jstests/core/query/in/in5.js +++ b/jstests/core/query/in/in5.js @@ -4,28 +4,28 @@ // assumes_no_implicit_index_creation, // ] -t = db.in5; +let t = db.in5; function go(fn) { t.drop(); - o = {}; + let o = {}; o[fn] = {a: 1, b: 2}; t.insert(o); - x = {}; + let x = {}; x[fn] = {a: 1, b: 2}; assert.eq(1, t.find(x).itcount(), "A1 - " + fn); - y = {}; + let y = {}; y[fn] = {$in: [{a: 1, b: 2}]}; assert.eq(1, t.find(y).itcount(), "A2 - " + fn); - z = {}; + let z = {}; z[fn + ".a"] = 1; z[fn + ".b"] = {$in: [2]}; assert.eq(1, t.find(z).itcount(), "A3 - " + fn); // SERVER-1366 - i = {}; + let i = {}; i[fn] = 1; t.createIndex(i); diff --git a/jstests/core/query/in/in6.js b/jstests/core/query/in/in6.js index ab8322cfe0a00..8584e1ee95435 100644 --- a/jstests/core/query/in/in6.js +++ b/jstests/core/query/in/in6.js @@ -1,4 +1,4 @@ -t = db.jstests_in6; +let t = db.jstests_in6; t.drop(); t.save({}); diff --git a/jstests/core/query/in/in7.js b/jstests/core/query/in/in7.js index 2f6c9e3ff1aa7..ee2de08c0f48f 100644 --- a/jstests/core/query/in/in7.js +++ b/jstests/core/query/in/in7.js @@ -1,6 +1,6 @@ // Uassert when $elemMatch is attempted within $in SERVER-3545 -t = db.jstests_ina; +let t = db.jstests_ina; t.drop(); t.save({}); @@ -20,4 +20,4 @@ assert.throws(function() { // NOTE Above we don't check cases like {b:2,$elemMatch:{b:3,4}} - generally // we assume that the first key is $elemMatch if any key is, and validating -// every key is expensive in some cases. \ No newline at end of file +// every key is expensive in some cases. diff --git a/jstests/core/query/in/in8.js b/jstests/core/query/in/in8.js index f1b6188347c8a..42a99407ce0a3 100644 --- a/jstests/core/query/in/in8.js +++ b/jstests/core/query/in/in8.js @@ -1,6 +1,6 @@ // Test $in regular expressions with overlapping index bounds. SERVER-4677 -t = db.jstests_inb; +let t = db.jstests_inb; t.drop(); function checkResults(query) { diff --git a/jstests/core/query/inc/inc1.js b/jstests/core/query/inc/inc1.js index 551d15e0f63b5..2470b146d70f7 100644 --- a/jstests/core/query/inc/inc1.js +++ b/jstests/core/query/inc/inc1.js @@ -1,6 +1,6 @@ // @tags: [requires_fastcount] -t = db.inc1; +let t = db.inc1; t.drop(); function test(num, name) { diff --git a/jstests/core/query/inc/inc2.js b/jstests/core/query/inc/inc2.js index debdfde82cb0a..1d253723b2896 100644 --- a/jstests/core/query/inc/inc2.js +++ b/jstests/core/query/inc/inc2.js @@ -1,5 +1,5 @@ -t = db.inc2; +let t = db.inc2; t.drop(); t.save({_id: 1, x: 1}); diff --git a/jstests/core/query/inc/inc3.js b/jstests/core/query/inc/inc3.js index e24165876c697..51a9c9c1587ee 100644 --- a/jstests/core/query/inc/inc3.js +++ b/jstests/core/query/inc/inc3.js @@ -3,7 +3,7 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.inc3; +let t = db.inc3; t.drop(); t.save({_id: 1, z: 1, a: 1}); diff --git a/jstests/core/query/index_key_expression.js b/jstests/core/query/index_key_expression.js index fcdfe066c281d..6b255722910a2 100644 --- a/jstests/core/query/index_key_expression.js +++ b/jstests/core/query/index_key_expression.js @@ -6,10 +6,7 @@ * requires_fcv_63, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); // For "FeatureFlagUtil" +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const collection = db.index_key_expression; @@ -1094,5 +1091,4 @@ testScenarios.forEach(testScenario => { assert.throwsWithCode(() => collection.aggregate(pipeline).toArray(), testScenario.expectedErrorCode); } -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/query/internal_hash_eq/expr_rewrites.js b/jstests/core/query/internal_hash_eq/expr_rewrites.js index b95284dc9c67d..e71de20f3d914 100644 --- a/jstests/core/query/internal_hash_eq/expr_rewrites.js +++ b/jstests/core/query/internal_hash_eq/expr_rewrites.js @@ -2,12 +2,13 @@ * Tests that $expr with equality of $toHashedIndexKey to a NumberLong results in an IXSCAN plan * with a point bound. This is because we rewrite this structure to a $_internalEqHash expression * and generate a tight index bound. - * @tags: [requires_fcv_70] + * @tags: [ + * # explain doesn't support read concern + * assumes_read_concern_unchanged, + * requires_fcv_70, + * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getExecutionStages, getPlanStages, isIxscan} from "jstests/libs/analyze_plan.js"; const collName = jsTestName(); const coll = db.getCollection(collName); @@ -165,5 +166,4 @@ function assertExplainIxscan(explainPlan, expectedIndexSpec, expectedKeysExamine // We couldn't create a tight bound for the index scan as the index is not hashed. assertExplainIxscan(explainPlan, indexSpec, 3 /* keyExamined */); -})(); })(); \ No newline at end of file diff --git a/jstests/core/query/internal_hash_eq/lookup_using_hash_key.js b/jstests/core/query/internal_hash_eq/lookup_using_hash_key.js index 8e46265133790..9343e5f0e564b 100644 --- a/jstests/core/query/internal_hash_eq/lookup_using_hash_key.js +++ b/jstests/core/query/internal_hash_eq/lookup_using_hash_key.js @@ -7,12 +7,9 @@ * @tags: [ * does_not_support_transactions, * requires_fcv_70, + * references_foreign_collection, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For 'isCollscan()' and similar. load("jstests/aggregation/extras/utils.js"); // For 'resultsEq().' const coll = db.lookup_using_hash_key; @@ -45,5 +42,4 @@ let results = coll.aggregate( // We essentially just looked up ourselves for each document. let expected = allDocs.map(doc => Object.merge(doc, {relookup: [doc]})); -assert(resultsEq(results, expected, true), [results, expected]); -}()); +assert(resultsEq(results, expected, true), [results, expected]); \ No newline at end of file diff --git a/jstests/core/query/internal_hash_eq/match_internal_eq_hash.js b/jstests/core/query/internal_hash_eq/match_internal_eq_hash.js index d5135f57587dc..2795c8973a874 100644 --- a/jstests/core/query/internal_hash_eq/match_internal_eq_hash.js +++ b/jstests/core/query/internal_hash_eq/match_internal_eq_hash.js @@ -1,11 +1,13 @@ /** * Basic tests for the $_internalEqHash match expression. - * @tags: [requires_fcv_70] + * @tags: [ + * # explain doesn't support read concern + * assumes_read_concern_unchanged, + * requires_fcv_70, + * ] */ -(function() { -"use strict"; +import {isCollscan, isIxscan} from "jstests/libs/analyze_plan.js"; -load("jstests/libs/analyze_plan.js"); // For 'isCollscan()' and similar. load("jstests/aggregation/extras/utils.js"); // For 'resultsEq().' const coll = db.match_internal_eq_hash; @@ -144,5 +146,4 @@ const coll = db.match_internal_eq_hash; assert.commandFailedWithCode( db.runCommand({find: "match_internal_eq_hash", filter: {a: {$_internalEqHash: v}}}), 2); }); -})(); -}()); +})(); \ No newline at end of file diff --git a/jstests/core/query/introspect_hidden_index_plan_cache_entries.js b/jstests/core/query/introspect_hidden_index_plan_cache_entries.js index 1d56012bcbff9..6d5afe613ecde 100644 --- a/jstests/core/query/introspect_hidden_index_plan_cache_entries.js +++ b/jstests/core/query/introspect_hidden_index_plan_cache_entries.js @@ -13,9 +13,7 @@ * ] */ -(function() { -'use strict'; -load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape. +import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js"; const collName = 'introspect_hidden_index_plan_cache_entries'; const collNotAffectedName = 'introspect_hidden_index_plan_cache_entries_unaffected'; @@ -97,5 +95,4 @@ assert.gt(cachedPlan.length, 0); // Unhide an index. assert.commandWorked(coll.unhideIndex("b_1")); cachedPlan = getPlansForCacheEntry(queryShape, coll); -assert.eq(0, cachedPlan.length); -})(); +assert.eq(0, cachedPlan.length); \ No newline at end of file diff --git a/jstests/core/query/js/js1.js b/jstests/core/query/js/js1.js index f569606ba5417..0a9254c015419 100644 --- a/jstests/core/query/js/js1.js +++ b/jstests/core/query/js/js1.js @@ -4,7 +4,7 @@ // requires_scripting, // ] -t = db.jstests_js1; +let t = db.jstests_js1; t.remove({}); t.save({z: 1}); diff --git a/jstests/core/query/js/js2.js b/jstests/core/query/js/js2.js index a278a520a8841..05daee8394004 100644 --- a/jstests/core/query/js/js2.js +++ b/jstests/core/query/js/js2.js @@ -2,10 +2,10 @@ // requires_non_retryable_writes //] -t = db.jstests_js2; +let t = db.jstests_js2; t.remove({}); -t2 = db.jstests_js2_2; +let t2 = db.jstests_js2_2; t2.remove({}); assert.eq(0, t2.find().length(), "A"); diff --git a/jstests/core/query/js/js3.js b/jstests/core/query/js/js3.js index c9239bb417685..cb2b03e778789 100644 --- a/jstests/core/query/js/js3.js +++ b/jstests/core/query/js/js3.js @@ -7,13 +7,13 @@ // requires_scripting, // ] -t = db.jstests_js3; +let t = db.jstests_js3; -debug = function(s) { +let debug = function(s) { // printjson( s ); }; -for (z = 0; z < 2; z++) { +for (let z = 0; z < 2; z++) { debug(z); t.drop(); @@ -23,7 +23,7 @@ for (z = 0; z < 2; z++) { t.createIndex({i: 1}); } - for (i = 0; i < 1000; i++) + for (let i = 0; i < 1000; i++) t.save({ i: i, z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" @@ -57,17 +57,17 @@ for (z = 0; z < 2; z++) { debug("before indexed find"); - arr = t.find({ - $where: function() { - return obj.i == 7 || obj.i == 8; - } - }).toArray(); + let arr = t.find({ + $where: function() { + return obj.i == 7 || obj.i == 8; + } + }).toArray(); debug(arr); assert.eq(2, arr.length); debug("after indexed find"); - for (i = 1000; i < 2000; i++) + for (let i = 1000; i < 2000; i++) t.save({ i: i, z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" diff --git a/jstests/core/query/js/js4.js b/jstests/core/query/js/js4.js index a8beab4a8f6f9..11e67b2cd2cb6 100644 --- a/jstests/core/query/js/js4.js +++ b/jstests/core/query/js/js4.js @@ -3,17 +3,10 @@ // requires_scripting, // ] -t = db.jstests_js4; +let t = db.jstests_js4; t.drop(); -real = { - a: 1, - b: "abc", - c: /abc/i, - d: new Date(111911100111), - e: null, - f: true -}; +let real = {a: 1, b: "abc", c: /abc/i, d: new Date(111911100111), e: null, f: true}; t.save(real); diff --git a/jstests/core/query/js/js5.js b/jstests/core/query/js/js5.js index 05071a2b6a69b..f41a4de78d4aa 100644 --- a/jstests/core/query/js/js5.js +++ b/jstests/core/query/js/js5.js @@ -3,7 +3,7 @@ // requires_scripting // ] -t = db.jstests_js5; +let t = db.jstests_js5; t.drop(); t.save({a: 1}); diff --git a/jstests/core/query/js/js8.js b/jstests/core/query/js/js8.js index 4bdf942e2c671..1954464923670 100644 --- a/jstests/core/query/js/js8.js +++ b/jstests/core/query/js/js8.js @@ -3,7 +3,7 @@ // requires_scripting, // ] -t = db.jstests_js8; +let t = db.jstests_js8; t.drop(); t.save({a: 1, b: [2, 3, 4]}); diff --git a/jstests/core/query/js/js_jit.js b/jstests/core/query/js/js_jit.js index 72290d457589d..110d07c03128c 100644 --- a/jstests/core/query/js/js_jit.js +++ b/jstests/core/query/js/js_jit.js @@ -37,4 +37,4 @@ function testDBQuery() { testDBCollection(); testDB(); testDBQuery(); -})(); \ No newline at end of file +})(); diff --git a/jstests/core/query/mr/mr_agg_explain.js b/jstests/core/query/mr/mr_agg_explain.js index 801c2793b61be..cc224f439464f 100644 --- a/jstests/core/query/mr/mr_agg_explain.js +++ b/jstests/core/query/mr/mr_agg_explain.js @@ -8,10 +8,7 @@ * requires_scripting, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getPlanStages. +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; const coll = db.mr_explain; coll.drop(); @@ -61,5 +58,4 @@ runTest("out_collection"); runTest({out: {inline: 1}}); // Explain on mapReduce fails when the 3rd 'optionsOrOutString' argument is missing. -assert.throws(() => coll.explain().mapReduce(mapFunc, reduceFunc)); -}()); +assert.throws(() => coll.explain().mapReduce(mapFunc, reduceFunc)); \ No newline at end of file diff --git a/jstests/core/query/mr/mr_bigobject.js b/jstests/core/query/mr/mr_bigobject.js index 83089935f3550..57d8283af235e 100644 --- a/jstests/core/query/mr/mr_bigobject.js +++ b/jstests/core/query/mr/mr_bigobject.js @@ -37,7 +37,7 @@ assert.eq([{_id: 1, value: 1}], outputColl.find().toArray()); // The reduce function processes the expected amount of data. reduceFn = function(k, v) { - total = 0; + let total = 0; for (let i = 0; i < v.length; i++) { const x = v[i]; if (typeof (x) == "number") diff --git a/jstests/core/query/mr/mr_bigobject_replace.js b/jstests/core/query/mr/mr_bigobject_replace.js index db7d0bc3712cf..ba534bed0961b 100644 --- a/jstests/core/query/mr/mr_bigobject_replace.js +++ b/jstests/core/query/mr/mr_bigobject_replace.js @@ -59,13 +59,19 @@ function runTest(testOptions) { }, testOptions)); - // In most cases we expect this to fail because it tries to insert a document that is too large. + // In most cases we expect this to fail because it tries to insert a document that is too large, + // or we see a particular error code which happens when the input is too large to reduce. + // // In some cases we may see the javascript execution interrupted because it takes longer than // our default time limit, so we allow that possibility. - assert.commandFailedWithCode(res, - [ErrorCodes.BadValue, ErrorCodes.Interrupted], - "creating a document larger than 16MB didn't fail"); - if (res.code != ErrorCodes.Interrupted) { + const kCannotReduceLargeObjCode = 31392; + assert.commandFailedWithCode( + res, + [ErrorCodes.BadValue, ErrorCodes.Interrupted, kCannotReduceLargeObjCode], + "creating a document larger than 16MB didn't fail"); + // If we see 'BadValue', make sure the message indicates it's the kind of error we were + // expecting. + if (res.code === ErrorCodes.BadValue) { assert.lte( 0, res.errmsg.indexOf("object to insert too large"), diff --git a/jstests/core/query/mr/mr_reduce_merge_other_db.js b/jstests/core/query/mr/mr_reduce_merge_other_db.js index 5c730ada182cd..78559f6788e4c 100644 --- a/jstests/core/query/mr/mr_reduce_merge_other_db.js +++ b/jstests/core/query/mr/mr_reduce_merge_other_db.js @@ -23,7 +23,7 @@ const outDb = db.getMongo().getDB(outDbStr); const outColl = outDb[outCollStr]; const mapFn = function() { - for (i = 0; i < this.a.length; i++) + for (let i = 0; i < this.a.length; i++) emit(this.a[i], 1); }; const reduceFn = function(k, vs) { diff --git a/jstests/core/query/mr/mr_replace_into_other_db.js b/jstests/core/query/mr/mr_replace_into_other_db.js index 010b9d114afe9..a807b01f9686b 100644 --- a/jstests/core/query/mr/mr_replace_into_other_db.js +++ b/jstests/core/query/mr/mr_replace_into_other_db.js @@ -25,7 +25,7 @@ assert.commandWorked(outDb.random_coll.insert({val: 1})); const outColl = outDb[outCollStr]; const mapFn = function() { - for (i = 0; i < this.a.length; i++) + for (let i = 0; i < this.a.length; i++) emit(this.a[i], 1); }; const reduceFn = function(k, vs) { diff --git a/jstests/core/query/mr/mr_sort.js b/jstests/core/query/mr/mr_sort.js index 11ced9f2722f8..b01eb832e7168 100644 --- a/jstests/core/query/mr/mr_sort.js +++ b/jstests/core/query/mr/mr_sort.js @@ -11,7 +11,7 @@ // requires_scripting, // ] -t = db.mr_sort; +let t = db.mr_sort; t.drop(); t.createIndex({x: 1}); @@ -27,15 +27,15 @@ t.insert({x: 7}); t.insert({x: 5}); t.insert({x: 6}); -m = function() { +let m = function() { emit("a", this.x); }; -r = function(k, v) { +let r = function(k, v) { return Array.sum(v); }; -out = db.mr_sort_out; +let out = db.mr_sort_out; assert.commandWorked(t.mapReduce(m, r, out.getName())); assert.eq([{_id: "a", value: 55}], out.find().toArray(), "A1"); out.drop(); diff --git a/jstests/core/query/ne/ne1.js b/jstests/core/query/ne/ne1.js index 5069637eb30ee..5135294a43323 100644 --- a/jstests/core/query/ne/ne1.js +++ b/jstests/core/query/ne/ne1.js @@ -1,4 +1,4 @@ -t = db.ne1; +let t = db.ne1; t.drop(); t.save({x: 1}); diff --git a/jstests/core/query/ne/ne2.js b/jstests/core/query/ne/ne2.js index 8f2b3d52f4c63..4c8654560d3d2 100644 --- a/jstests/core/query/ne/ne2.js +++ b/jstests/core/query/ne/ne2.js @@ -3,7 +3,7 @@ // assumes_read_concern_local, // ] -t = db.jstests_ne2; +let t = db.jstests_ne2; t.drop(); t.createIndex({a: 1}); @@ -12,7 +12,7 @@ t.save({a: 0}); t.save({a: 0}); t.save({a: 0.5}); -e = t.find({a: {$ne: 0}}).explain(true); +let e = t.find({a: {$ne: 0}}).explain(true); assert.eq(2, e.executionStats.nReturned, 'A'); e = t.find({a: {$gt: -1, $lt: 1, $ne: 0}}).explain(true); diff --git a/jstests/core/query/ne/ne3.js b/jstests/core/query/ne/ne3.js index 5c38858c019dd..ccdb06a357f51 100644 --- a/jstests/core/query/ne/ne3.js +++ b/jstests/core/query/ne/ne3.js @@ -1,6 +1,6 @@ // don't allow most operators with regex -t = db.jstests_ne3; +let t = db.jstests_ne3; t.drop(); assert.throws(function() { diff --git a/jstests/core/query/nin/nin.js b/jstests/core/query/nin/nin.js index b9887e4882899..f327bb1315ba7 100644 --- a/jstests/core/query/nin/nin.js +++ b/jstests/core/query/nin/nin.js @@ -1,6 +1,6 @@ // @tags: [requires_fastcount] -t = db.jstests_nin; +let t = db.jstests_nin; t.drop(); function checkEqual(name, key, value) { @@ -16,7 +16,7 @@ function checkEqual(name, key, value) { " != " + t.find().count()); } -doTest = function(n) { +let doTest = function(n) { t.save({a: [1, 2, 3]}); t.save({a: [1, 2, 4]}); t.save({a: [1, 8, 5]}); diff --git a/jstests/core/query/nin/nin2.js b/jstests/core/query/nin/nin2.js index d134f5ad4a496..de373c7119059 100644 --- a/jstests/core/query/nin/nin2.js +++ b/jstests/core/query/nin/nin2.js @@ -2,13 +2,13 @@ // Check that $nin is the opposite of $in SERVER-3264 -t = db.jstests_nin2; +let t = db.jstests_nin2; t.drop(); // Check various operator types. function checkOperators(array, inMatches) { - inCount = inMatches ? 1 : 0; - notInCount = 1 - inCount; + let inCount = inMatches ? 1 : 0; + let notInCount = 1 - inCount; assert.eq(inCount, t.count({foo: {$in: array}})); assert.eq(notInCount, t.count({foo: {$not: {$in: array}}})); assert.eq(notInCount, t.count({foo: {$nin: array}})); diff --git a/jstests/core/query/not/not1.js b/jstests/core/query/not/not1.js index 0726895ebbd83..8ccfeaf7d8769 100644 --- a/jstests/core/query/not/not1.js +++ b/jstests/core/query/not/not1.js @@ -1,6 +1,6 @@ // @tags: [requires_fastcount] -t = db.not1; +let t = db.not1; t.drop(); t.insert({a: 1}); diff --git a/jstests/core/query/null_field_name.js b/jstests/core/query/null_field_name.js index f90ce65cc872e..58d6441ce1cb0 100644 --- a/jstests/core/query/null_field_name.js +++ b/jstests/core/query/null_field_name.js @@ -5,4 +5,4 @@ assert.throws(function() { assert.throws(function() { Object.bsonsize({"\0asdf": 1}); -}, [], "null char in field name"); \ No newline at end of file +}, [], "null char in field name"); diff --git a/jstests/core/query/null_query_semantics.js b/jstests/core/query/null_query_semantics.js index 71cd4b7c84fcc..2a1369a44caa4 100644 --- a/jstests/core/query/null_query_semantics.js +++ b/jstests/core/query/null_query_semantics.js @@ -11,12 +11,9 @@ // not_allowed_with_security_token, // ] // -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For 'resultsEq'. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; function extractAValues(results) { return results.map(function(res) { @@ -817,5 +814,4 @@ coll = db.getCollection(collNamePrefix + collCount++); coll.drop(); assert.commandFailedWithCode( coll.createIndex({a: 1}, {partialFilterExpression: {a: {$elemMatch: {$ne: null}}}}), - ErrorCodes.CannotCreateIndex); -}()); + ErrorCodes.CannotCreateIndex); \ No newline at end of file diff --git a/jstests/core/query/number/numberint.js b/jstests/core/query/number/numberint.js index 55c923aea7943..f6ed7b5b8d7f4 100644 --- a/jstests/core/query/number/numberint.js +++ b/jstests/core/query/number/numberint.js @@ -1,14 +1,14 @@ assert.eq.automsg("0", "new NumberInt()"); -n = new NumberInt(4); +let n = new NumberInt(4); assert.eq.automsg("4", "n"); assert.eq.automsg("4", "n.toNumber()"); assert.eq.automsg("8", "n + 4"); assert.eq.automsg("'NumberInt(4)'", "n.toString()"); assert.eq.automsg("'NumberInt(4)'", "tojson( n )"); -a = {}; +let a = {}; a.a = n; -p = tojson(a); +let p = tojson(a); assert.eq.automsg("'{ \"a\" : NumberInt(4) }'", "p"); assert.eq.automsg("NumberInt(4 )", "eval( tojson( NumberInt( 4 ) ) )"); @@ -63,12 +63,10 @@ assert(NumberInt(1), "to bool a"); // assert( ! NumberInt( 0 ) , "to bool b" ); // create doc with int value in db -t = db.getCollection("numberint"); +let t = db.getCollection("numberint"); t.drop(); -o = { - a: NumberInt(42) -}; +let o = {a: NumberInt(42)}; t.save(o); assert.eq(42, t.findOne().a, "save doc 1"); @@ -76,7 +74,7 @@ assert.eq(1, t.find({a: {$type: 16}}).count(), "save doc 2"); assert.eq(0, t.find({a: {$type: 1}}).count(), "save doc 3"); // roundtripping -mod = t.findOne({a: 42}); +let mod = t.findOne({a: 42}); mod.a += 10; mod.b = "foo"; delete mod._id; diff --git a/jstests/core/query/number/numberlong.js b/jstests/core/query/number/numberlong.js index a7dfd014539e1..d3477fd9ce0b8 100644 --- a/jstests/core/query/number/numberlong.js +++ b/jstests/core/query/number/numberlong.js @@ -1,14 +1,14 @@ assert.eq.automsg("0", "new NumberLong()"); -n = new NumberLong(4); +let n = new NumberLong(4); assert.eq.automsg("4", "n"); assert.eq.automsg("4", "n.toNumber()"); assert.eq.automsg("8", "n + 4"); assert.eq.automsg("'NumberLong(4)'", "n.toString()"); assert.eq.automsg("'NumberLong(4)'", "tojson( n )"); -a = {}; +let a = {}; a.a = n; -p = tojson(a); +let p = tojson(a); assert.eq.automsg("'{ \"a\" : NumberLong(4) }'", "p"); assert.eq.automsg("NumberLong(4 )", "eval( tojson( NumberLong( 4 ) ) )"); diff --git a/jstests/core/query/number/numberlong2.js b/jstests/core/query/number/numberlong2.js index be254027b7919..6d04ddd84e97b 100644 --- a/jstests/core/query/number/numberlong2.js +++ b/jstests/core/query/number/numberlong2.js @@ -4,7 +4,7 @@ // Test precision of NumberLong values with v1 index code SERVER-3717 -t = db.jstests_numberlong2; +let t = db.jstests_numberlong2; t.drop(); t.createIndex({x: 1}); @@ -24,8 +24,8 @@ chk(NumberLong("4503599627370497")); t.remove({}); -s = "11235399833116571"; -for (i = 99; i >= 0; --i) { +let s = "11235399833116571"; +for (let i = 99; i >= 0; --i) { t.save({x: NumberLong(s + i)}); } diff --git a/jstests/core/query/number/numberlong3.js b/jstests/core/query/number/numberlong3.js index 0dabdec2a0506..ac89075ee7503 100644 --- a/jstests/core/query/number/numberlong3.js +++ b/jstests/core/query/number/numberlong3.js @@ -1,26 +1,26 @@ // Test sorting with long longs and doubles - SERVER-3719 -t = db.jstests_numberlong3; +let t = db.jstests_numberlong3; t.drop(); -s = "11235399833116571"; -for (i = 10; i >= 0; --i) { - n = NumberLong(s + i); +let s = "11235399833116571"; +for (let i = 10; i >= 0; --i) { + let n = NumberLong(s + i); t.save({x: n}); if (0) { // SERVER-3719 t.save({x: n.floatApprox}); } } -ret = t.find().sort({x: 1}).toArray().filter(function(x) { +let ret = t.find().sort({x: 1}).toArray().filter(function(x) { return typeof (x.x.floatApprox) != 'undefined'; }); // printjson( ret ); -for (i = 1; i < ret.length; ++i) { - first = ret[i - 1].x.toString(); - second = ret[i].x.toString(); +for (let i = 1; i < ret.length; ++i) { + let first = ret[i - 1].x.toString(); + let second = ret[i].x.toString(); if (first.length == second.length) { assert.lte(ret[i - 1].x.toString(), ret[i].x.toString()); } diff --git a/jstests/core/query/number/numberlong4.js b/jstests/core/query/number/numberlong4.js index d7d73898b34b9..2cd381cc61c9a 100644 --- a/jstests/core/query/number/numberlong4.js +++ b/jstests/core/query/number/numberlong4.js @@ -1,7 +1,7 @@ // Test handling of comparison between long longs and their double approximations in btrees - // SERVER-3719. -t = db.jstests_numberlong4; +let t = db.jstests_numberlong4; t.drop(); if (0) { // SERVER-3719 @@ -10,9 +10,9 @@ if (0) { // SERVER-3719 Random.setRandomSeed(); - s = "11235399833116571"; - for (i = 0; i < 10000; ++i) { - n = NumberLong(s + Random.randInt(10)); + let s = "11235399833116571"; + for (let i = 0; i < 10000; ++i) { + let n = NumberLong(s + Random.randInt(10)); t.insert({x: (Random.randInt(2) ? n : n.floatApprox)}); } diff --git a/jstests/core/query/objid/objid1.js b/jstests/core/query/objid/objid1.js index d08089c26dbd9..5c252a9531db3 100644 --- a/jstests/core/query/objid/objid1.js +++ b/jstests/core/query/objid/objid1.js @@ -1,18 +1,16 @@ -t = db.objid1; +let t = db.objid1; t.drop(); -b = new ObjectId(); +let b = new ObjectId(); assert(b.str, "A"); -a = new ObjectId(b.str); +let a = new ObjectId(b.str); assert.eq(a.str, b.str, "B"); t.save({a: a}); assert(t.findOne().a.isObjectId, "C"); assert.eq(a.str, t.findOne().a.str, "D"); -x = { - a: new ObjectId() -}; +let x = {a: new ObjectId()}; eval(" y = " + tojson(x)); assert.eq(x.a.str, y.a.str, "E"); diff --git a/jstests/core/query/objid/objid2.js b/jstests/core/query/objid/objid2.js index 247843b587b3f..d25b1a6d9dc76 100644 --- a/jstests/core/query/objid/objid2.js +++ b/jstests/core/query/objid/objid2.js @@ -1,4 +1,4 @@ -t = db.objid2; +let t = db.objid2; t.drop(); t.save({_id: 517, a: "hello"}); diff --git a/jstests/core/query/objid/objid3.js b/jstests/core/query/objid/objid3.js index 12d45530e52cf..79e495e25cb49 100644 --- a/jstests/core/query/objid/objid3.js +++ b/jstests/core/query/objid/objid3.js @@ -1,4 +1,4 @@ -t = db.objid3; +let t = db.objid3; t.drop(); t.save({a: "bob", _id: 517}); diff --git a/jstests/core/query/objid/objid4.js b/jstests/core/query/objid/objid4.js index 7513e07702967..de82d397b5830 100644 --- a/jstests/core/query/objid/objid4.js +++ b/jstests/core/query/objid/objid4.js @@ -1,13 +1,11 @@ - - -o = new ObjectId(); +let o = new ObjectId(); assert(o.str); -a = new ObjectId(o.str); +let a = new ObjectId(o.str); assert.eq(o.str, a.str); assert.eq(a.str, a.str.toString()); -b = ObjectId(o.str); +let b = ObjectId(o.str); assert.eq(o.str, b.str); assert.eq(b.str, b.str.toString()); diff --git a/jstests/core/query/objid/objid5.js b/jstests/core/query/objid/objid5.js index c656b286f5bde..c8599509c864b 100644 --- a/jstests/core/query/objid/objid5.js +++ b/jstests/core/query/objid/objid5.js @@ -1,11 +1,11 @@ -t = db.objid5; +let t = db.objid5; t.drop(); t.save({_id: 5.5}); assert.eq(18, Object.bsonsize(t.findOne()), "A"); -x = db.runCommand({features: 1}); -y = db.runCommand({features: 1, oidReset: 1}); +let x = db.runCommand({features: 1}); +let y = db.runCommand({features: 1, oidReset: 1}); if (!x.ok) print("x: " + tojson(x)); diff --git a/jstests/core/query/objid/objid7.js b/jstests/core/query/objid/objid7.js index 4c3505f8965b1..2fc289b600054 100644 --- a/jstests/core/query/objid/objid7.js +++ b/jstests/core/query/objid/objid7.js @@ -1,7 +1,6 @@ - -a = new ObjectId("4c1a478603eba73620000000"); -b = new ObjectId("4c1a478603eba73620000000"); -c = new ObjectId(); +let a = new ObjectId("4c1a478603eba73620000000"); +let b = new ObjectId("4c1a478603eba73620000000"); +let c = new ObjectId(); assert.eq(a.toString(), b.toString(), "A"); assert.eq(a.toString(), "ObjectId(\"4c1a478603eba73620000000\")", "B"); diff --git a/jstests/core/query/or/or1.js b/jstests/core/query/or/or1.js index d90947d5f8716..df3f1eb0ac415 100644 --- a/jstests/core/query/or/or1.js +++ b/jstests/core/query/or/or1.js @@ -1,22 +1,22 @@ -t = db.jstests_or1; +let t = db.jstests_or1; t.drop(); -checkArrs = function(a, b) { +let checkArrs = function(a, b) { assert.eq(a.length, b.length); - aStr = []; - bStr = []; + let aStr = []; + let bStr = []; a.forEach(function(x) { aStr.push(tojson(x)); }); b.forEach(function(x) { bStr.push(tojson(x)); }); - for (i = 0; i < aStr.length; ++i) { + for (let i = 0; i < aStr.length; ++i) { assert.neq(-1, bStr.indexOf(aStr[i])); } }; -doTest = function() { +let doTest = function() { t.save({_id: 0, a: 1}); t.save({_id: 1, a: 2}); t.save({_id: 2, b: 1}); @@ -36,10 +36,10 @@ doTest = function() { t.find({$or: ["a"]}).toArray(); }); - a1 = t.find({$or: [{a: 1}]}).toArray(); + let a1 = t.find({$or: [{a: 1}]}).toArray(); checkArrs([{_id: 0, a: 1}, {_id: 4, a: 1, b: 1}, {_id: 5, a: 1, b: 2}], a1); - a1b2 = t.find({$or: [{a: 1}, {b: 2}]}).toArray(); + let a1b2 = t.find({$or: [{a: 1}, {b: 2}]}).toArray(); checkArrs( [ {_id: 0, a: 1}, diff --git a/jstests/core/query/or/or2.js b/jstests/core/query/or/or2.js index 03acfc32174ec..e572255afd1cc 100644 --- a/jstests/core/query/or/or2.js +++ b/jstests/core/query/or/or2.js @@ -2,11 +2,8 @@ // assumes_read_concern_local, // ] -(function() { -"use strict"; - // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js"; const t = db.jstests_or2; t.drop(); @@ -82,5 +79,4 @@ doTest(); assert(t.drop()); assert.commandWorked(t.createIndex({x: 1, a: 1, b: 1})); -doTest(); -})(); +doTest(); \ No newline at end of file diff --git a/jstests/core/query/or/or3.js b/jstests/core/query/or/or3.js index 57b151d8f4bc0..7502afee49cc6 100644 --- a/jstests/core/query/or/or3.js +++ b/jstests/core/query/or/or3.js @@ -2,11 +2,8 @@ // assumes_read_concern_local, // ] -(function() { -"use strict"; - // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js"; const t = db.jstests_or3; t.drop(); @@ -81,5 +78,4 @@ doTest(); assert(t.drop()); assert.commandWorked(t.createIndex({x: 1, a: 1, b: 1})); -doTest(); -})(); +doTest(); \ No newline at end of file diff --git a/jstests/core/query/or/or5.js b/jstests/core/query/or/or5.js index 1843c3d6cb770..ba4f529359c5c 100644 --- a/jstests/core/query/or/or5.js +++ b/jstests/core/query/or/or5.js @@ -2,7 +2,7 @@ // requires_getmore, // ] -t = db.jstests_or5; +let t = db.jstests_or5; t.drop(); t.createIndex({a: 1}); @@ -28,7 +28,7 @@ assert.eq.automsg("6", "t.find( {$or:[{a:6},{b:3},{c:4}]} ).toArray().length"); assert.eq.automsg("6", "t.find( {$or:[{a:2},{b:6},{c:4}]} ).toArray().length"); assert.eq.automsg("6", "t.find( {$or:[{a:2},{b:3},{c:6}]} ).toArray().length"); -for (i = 2; i <= 7; ++i) { +for (var i = 2; i <= 7; ++i) { assert.eq.automsg("7", "t.find( {$or:[{a:2},{b:3},{c:4}]} ).batchSize( i ).toArray().length"); assert.eq.automsg("6", "t.find( {$or:[{a:6},{b:3},{c:4}]} ).batchSize( i ).toArray().length"); assert.eq.automsg("6", "t.find( {$or:[{a:2},{b:6},{c:4}]} ).batchSize( i ).toArray().length"); diff --git a/jstests/core/query/or/or7.js b/jstests/core/query/or/or7.js index e639a1957392c..414fbff858e3c 100644 --- a/jstests/core/query/or/or7.js +++ b/jstests/core/query/or/or7.js @@ -1,6 +1,6 @@ // @tags: [requires_non_retryable_writes] -t = db.jstests_or7; +let t = db.jstests_or7; t.drop(); t.createIndex({a: 1}); diff --git a/jstests/core/query/or/or8.js b/jstests/core/query/or/or8.js index f33ef3146ce2d..00d1ea0cbed12 100644 --- a/jstests/core/query/or/or8.js +++ b/jstests/core/query/or/or8.js @@ -4,7 +4,7 @@ // missing collection -t = db.jstests_or8; +let t = db.jstests_or8; t.drop(); t.find({"$or": [{"PropA": {"$lt": "b"}}, {"PropA": {"$lt": "b", "$gt": "a"}}]}).toArray(); diff --git a/jstests/core/query/or/or9.js b/jstests/core/query/or/or9.js index 4938e84ca0443..bcd527bb601cd 100644 --- a/jstests/core/query/or/or9.js +++ b/jstests/core/query/or/or9.js @@ -2,7 +2,7 @@ // index skipping and previous index range negation -t = db.jstests_or9; +let t = db.jstests_or9; t.drop(); t.createIndex({a: 1, b: 1}); diff --git a/jstests/core/query/or/or_to_in.js b/jstests/core/query/or/or_to_in.js index 332a2e2e55936..2f8ce52353fa1 100644 --- a/jstests/core/query/or/or_to_in.js +++ b/jstests/core/query/or/or_to_in.js @@ -4,14 +4,11 @@ // This test is not prepared to handle explain output for sharded collections. // @tags: [ // assumes_unsharded_collection, -// requires_fcv_63, +// requires_fcv_70, // ] -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For assertArrayEq. -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan} from "jstests/libs/analyze_plan.js"; var coll = db.orToIn; coll.drop(); @@ -168,5 +165,4 @@ coll.drop(); assert.commandWorked(db.createCollection("orToIn", {collation: {locale: 'de'}})); coll = db.orToIn; assert.commandWorked(coll.insert(data)); -testOrToIn(positiveTestQueries); -}()); +testOrToIn(positiveTestQueries); \ No newline at end of file diff --git a/jstests/core/query/or/orb.js b/jstests/core/query/or/orb.js index aa75bb97e0be9..1c4a65ad91256 100644 --- a/jstests/core/query/or/orb.js +++ b/jstests/core/query/or/orb.js @@ -15,4 +15,4 @@ t.createIndex({a: 1, b: -1}); assert.eq.automsg("1", "t.count( {$or: [ { a: { $gt:0,$lt:2 } }, { a: { $gt:-1,$lt:3 } } ] } )"); assert.eq.automsg( - "1", "t.count( {$or: [ { a:1, b: { $gt:0,$lt:2 } }, { a:1, b: { $gt:-1,$lt:3 } } ] } )"); \ No newline at end of file + "1", "t.count( {$or: [ { a:1, b: { $gt:0,$lt:2 } }, { a:1, b: { $gt:-1,$lt:3 } } ] } )"); diff --git a/jstests/core/query/or/orc.js b/jstests/core/query/or/orc.js index 001d6f4bc9fe1..bb632e0ce6cb7 100644 --- a/jstests/core/query/or/orc.js +++ b/jstests/core/query/or/orc.js @@ -1,7 +1,7 @@ // @tags: [requires_non_retryable_writes] // test that or duplicates are dropped in certain special cases -t = db.jstests_orc; +let t = db.jstests_orc; t.drop(); // The goal here will be to ensure the full range of valid values is scanned for each or clause, in diff --git a/jstests/core/query/or/ore.js b/jstests/core/query/or/ore.js index 756db6215c59f..775d5e3b2c113 100644 --- a/jstests/core/query/or/ore.js +++ b/jstests/core/query/or/ore.js @@ -1,7 +1,7 @@ // verify that index direction is considered when deduping based on an earlier // index -t = db.jstests_ore; +let t = db.jstests_ore; t.drop(); t.createIndex({a: -1}); diff --git a/jstests/core/query/or/org.js b/jstests/core/query/or/org.js index 0c6808330c8ee..4780c54582bf2 100644 --- a/jstests/core/query/or/org.js +++ b/jstests/core/query/or/org.js @@ -2,7 +2,7 @@ // SERVER-2282 $or de duping with sparse indexes -t = db.jstests_org; +let t = db.jstests_org; t.drop(); t.createIndex({a: 1}, {sparse: true}); diff --git a/jstests/core/query/or/orh.js b/jstests/core/query/or/orh.js index 91ce121e5a4d3..9a870cf27b10c 100644 --- a/jstests/core/query/or/orh.js +++ b/jstests/core/query/or/orh.js @@ -2,7 +2,7 @@ // SERVER-2831 Demonstration of sparse index matching semantics in a multi index $or query. -t = db.jstests_orh; +let t = db.jstests_orh; t.drop(); t.createIndex({a: 1}, {sparse: true}); diff --git a/jstests/core/query/or/orj.js b/jstests/core/query/or/orj.js index 6aabb3c39c7f4..9ff2288dc6bcc 100644 --- a/jstests/core/query/or/orj.js +++ b/jstests/core/query/or/orj.js @@ -1,6 +1,6 @@ // Test nested $or clauses SERVER-2585 SERVER-3192 -t = db.jstests_orj; +let t = db.jstests_orj; t.drop(); t.save({a: 1, b: 2}); diff --git a/jstests/core/query/or/ork.js b/jstests/core/query/or/ork.js index 8ce2346b6f322..525181f9061b7 100644 --- a/jstests/core/query/or/ork.js +++ b/jstests/core/query/or/ork.js @@ -1,6 +1,6 @@ // SERVER-2585 Test $or clauses within indexed top level $or clauses. -t = db.jstests_ork; +let t = db.jstests_ork; t.drop(); t.createIndex({a: 1}); diff --git a/jstests/core/query/or/oro.js b/jstests/core/query/or/oro.js index 5ceda73818ec9..43bd0b87a811d 100644 --- a/jstests/core/query/or/oro.js +++ b/jstests/core/query/or/oro.js @@ -5,26 +5,26 @@ // Test $or query with several clauses on separate indexes. -t = db.jstests_oro; +let t = db.jstests_oro; t.drop(); -orClauses = []; -for (idxKey = 'a'; idxKey <= 'aaaaaaaaaa'; idxKey += 'a') { - idx = {}; +let orClauses = []; +for (let idxKey = 'a'; idxKey <= 'aaaaaaaaaa'; idxKey += 'a') { + let idx = {}; idx[idxKey] = 1; t.createIndex(idx); - for (i = 0; i < 200; ++i) { + for (let i = 0; i < 200; ++i) { t.insert(idx); } orClauses.push(idx); } printjson(t.find({$or: orClauses}).explain()); -c = t.find({$or: orClauses}).batchSize(100); -count = 0; +let c = t.find({$or: orClauses}).batchSize(100); +let count = 0; while (c.hasNext()) { - for (i = 0; i < 50 && c.hasNext(); ++i, c.next(), ++count) + for (let i = 0; i < 50 && c.hasNext(); ++i, c.next(), ++count) ; // Interleave with another operation. t.stats(); diff --git a/jstests/core/query/or_use_clustered_collection.js b/jstests/core/query/or_use_clustered_collection.js new file mode 100644 index 0000000000000..fce3f3347cd4d --- /dev/null +++ b/jstests/core/query/or_use_clustered_collection.js @@ -0,0 +1,373 @@ +/** + * Verifies that $or queries on clustered collections produce plans with IXSCAN and + * CLUSTERED_IXSCAN stages when possible. + * @tags: [ + * requires_fcv_71, + * # Explain for the aggregate command cannot run within a multi-document transaction. + * does_not_support_transactions, + * # Refusing to run a test that issues an aggregation command with explain because it may return + * # incomplete results if interrupted by a stepdown. + * does_not_support_stepdowns + * ] + */ + +import { + getAggPlanStages, + getPlanStage, + getPlanStages, + getWinningPlan +} from "jstests/libs/analyze_plan.js"; +load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; + +const coll = db.or_use_clustered_collection; +assertDropCollection(db, coll.getName()); + +// Create a clustered collection and create indexes. +assert.commandWorked( + db.createCollection(coll.getName(), {clusteredIndex: {key: {_id: 1}, unique: true}})); +assert.commandWorked(coll.createIndex({a: 1})); +assert.commandWorked(coll.createIndex({c: 1})); +assert.commandWorked(coll.createIndex({b: "text"})); + +// Insert documents, and store them to be used later in the test. +const docs = []; +const textFields = ["foo", "one", "two", "three", "four", "foo", "foo", "seven", "eight", "nine"]; +const numDocs = textFields.length; +for (let i = 0; i < numDocs; i++) { + docs.push({b: textFields[i], a: i, _id: i, c: i * 2, d: [{e: i * 2}, {g: i / 2}], noIndex: i}); +} +assert.commandWorked(coll.insertMany(docs)); + +function haveShardMergeStage(winningPlan, stage = "SHARD_MERGE") { + let shardMergeStage = getPlanStages(winningPlan, stage); + return shardMergeStage.length > 0; +} + +function assertCorrectResults({query, expectedDocIds, projection, limit, skip}) { + // Test different find queries. With and without a sort, and with and without a projection. + let results = query.toArray(); + let expectedResults = []; + // Create the document set that we expect. + if (skip) { + // Confirm we only skipped 1 document. + assert.eq(results.length, expectedDocIds.length - 1); + // Remove the document that was skipped. + expectedDocIds = expectedDocIds.filter(id => results.some(el => el["_id"] == id)); + } + expectedDocIds.forEach(id => projection + ? expectedResults.push({"_id": docs[id]["_id"], "a": docs[id]["a"]}) + : expectedResults.push(docs[id])); + if (limit) { + assert.eq(results.length, 2); + assert.neq(results[0]["_id"], results[1]["_id"]); + for (let i = 0; i < results.length; ++i) { + let doc = expectedResults.filter(r => r["_id"] == results[i]["_id"]); + assert.eq(1, doc.length); + assert.docEq(doc[0], results[i]); + } + return; + } + + assert.sameMembers(results, expectedResults); +} + +// $or query which uses a clustered collection scan plan for one branch and secondary index plan for +// the other, and returns no matching documents. +assertCorrectResults({query: coll.find({$or: [{_id: 123}, {a: 11}]}), expectedDocIds: []}); + +//$or query which uses a clustered collection scan plan and secondary index plan, and each predicate +// matches some of the documents. +assertCorrectResults( + {query: coll.find({$or: [{_id: 9}, {a: {$lte: 3}}]}), expectedDocIds: [0, 1, 2, 3, 9]}); + +// $or query which uses a clustered collection scan plan and secondary index plan, and some +// documents match both predicates. +assertCorrectResults( + {query: coll.find({$or: [{_id: {$lt: 2}}, {a: {$lte: 3}}]}), expectedDocIds: [0, 1, 2, 3]}); + +// $or query that uses two clustered collection scan plans. +assertCorrectResults( + {query: coll.find({$or: [{_id: {$lt: 2}}, {_id: {$gt: 8}}]}), expectedDocIds: [0, 1, 9]}); + +// $or query that uses two secondary index scan plans. +assertCorrectResults( + {query: coll.find({$or: [{a: {$lt: 2}}, {a: {$gt: 8}}]}), expectedDocIds: [0, 1, 9]}); + +function validateQueryPlan({query, expectedStageCount, expectedDocIds, noFetchWithCount}) { + // TODO SERVER-77601 add coll.find(query).sort({_id: 1}) to 'queries'. + const testCases = [ + { + explainQuery: coll.explain().find(query).finish(), + additionalStages: {}, + actualQuery: coll.find(query) + }, + { + explainQuery: coll.explain().find(query, {_id: 1, a: 1}).limit(2).finish(), + additionalStages: {"LIMIT": 1, "PROJECTION_SIMPLE": 1}, + actualQuery: coll.find(query, {_id: 1, a: 1}).limit(2), + }, + { + explainQuery: coll.explain().find(query).limit(2).finish(), + additionalStages: {"LIMIT": 1}, + actualQuery: coll.find(query).limit(2), + }, + { + explainQuery: coll.explain().find(query).skip(1).finish(), + additionalStages: {"SKIP": 1}, + actualQuery: coll.find(query).skip(1), + }, + { + explainQuery: coll.explain().aggregate([{$match: query}, {$project: {_id: 1, a: 1}}]), + additionalStages: {"PROJECTION_SIMPLE": 1}, + actualQuery: coll.aggregate([{$match: query}, {$project: {_id: 1, a: 1}}]), + aggregate: true, + }, + { + explainQuery: coll.explain().aggregate( + [{$match: query}, {$group: {_id: null, count: {$sum: 1}}}]), + additionalStages: {"GROUP": 1}, + actualQuery: coll.aggregate([{$match: query}, {$group: {_id: null, count: {$sum: 1}}}]), + aggregate: true + }, + { + explainQuery: coll.explain().find(query).count(), + additionalStages: {"COUNT": 1}, + actualQuery: coll.find(query).count(), + } + ]; + + testCases.forEach(test => { + const explain = test.explainQuery; + + // If there is a 'SHARD_MERGE' stage or 'shards', then we should expect more than our + // 'expectedStageCount', since each stage will appear for each shard. + const shardMergeStage = getPlanStage(explain, "SHARD_MERGE"); + const shards = "shards" in explain; + + // There won't be a 'FETCH' stage if we have a 'COUNT' or 'GROUP' stage with just index scan + // plans. + const count = test.additionalStages.hasOwnProperty('COUNT'); + const fetch = expectedStageCount.hasOwnProperty('FETCH'); + const group = test.additionalStages.hasOwnProperty('GROUP'); + if (noFetchWithCount && (count || group) && fetch) { + expectedStageCount["FETCH"] = 0; + } + + // Classic engine doesn't have a GROUP stage like SBE for $group. + if (group && !checkSBEEnabled(db)) { + test.additionalStages["GROUP"] = 0; + } + + // Validate all the stages appear the correct number of times in the winning plan. + const expectedStages = Object.assign({}, expectedStageCount, test.additionalStages); + for (let stage in expectedStages) { + let planStages = + test.aggregate ? getAggPlanStages(explain, stage) : getPlanStages(explain, stage); + assert(planStages, tojson(explain)); + if (shardMergeStage || shards) { + assert.gte(planStages.length, + expectedStages[stage], + "Expected " + stage + " to appear, but got plan: " + tojson(explain)); + } else { + assert.eq(planStages.length, + expectedStages[stage], + "Expected " + stage + " to appear, but got plan: " + tojson(explain)); + } + } + + const projection = test.additionalStages.hasOwnProperty('PROJECTION_SIMPLE'); + const limit = test.additionalStages.hasOwnProperty('LIMIT'); + const skip = test.additionalStages.hasOwnProperty('SKIP'); + if (count || group) { + // If we have GROUP stage we are in an aggregation pipeline. + let results = group ? test.actualQuery.toArray()[0]["count"] : test.actualQuery; + assert.eq(expectedDocIds.length, + results, + "Expected " + expectedDocIds.length.toString() + " number of docs, but got " + + tojson(test.actualQuery)); + } else { + assertCorrectResults({ + query: test.actualQuery, + expectedDocIds: expectedDocIds, + projection: projection, + limit: limit, + skip: skip, + }); + } + }); +} + +// Validates that we use an OR stage with the correct plans for each child branch. +function validateQueryOR({query, expectedStageCount, expectedDocIds, noFetchWithCount}) { + expectedStageCount["OR"] = 1; + validateQueryPlan({ + query: query, + expectedStageCount: expectedStageCount, + expectedDocIds: expectedDocIds, + noFetchWithCount: noFetchWithCount + }); +} + +// $or with a CLUSTERED_IXSCAN stage and a IXSCAN stage. +validateQueryOR({ + query: {$or: [{_id: {$lt: 2}}, {a: 5}]}, + expectedStageCount: {"CLUSTERED_IXSCAN": 1, "IXSCAN": 1, "FETCH": 1}, + expectedDocIds: [0, 1, 5], +}); + +validateQueryOR({ + query: {$or: [{_id: 5}, {a: 6}]}, + expectedStageCount: {"CLUSTERED_IXSCAN": 1, "IXSCAN": 1, "FETCH": 1}, + expectedDocIds: [5, 6], +}); + +// $or with two IXSCAN stages. +validateQueryOR({ + query: {$or: [{c: {$gte: 10}}, {a: 0}]}, + expectedStageCount: {"IXSCAN": 2, "FETCH": 1}, + expectedDocIds: [0, 5, 6, 7, 8, 9], + // This is an optimization for IXSCAN for count queries that does not exist for plans with + // clustered indexes. + noFetchWithCount: true +}); + +// $or with 2 CLUSTERED_IXSCAN stages. +validateQueryOR({ + query: {$or: [{_id: {$lt: 1}}, {_id: {$gt: 8}}]}, + expectedStageCount: {"CLUSTERED_IXSCAN": 2}, + expectedDocIds: [0, 9] +}); + +validateQueryOR({ + query: {$or: [{_id: {$gt: 5}}, {_id: 8}]}, + expectedStageCount: {"CLUSTERED_IXSCAN": 2}, + expectedDocIds: [6, 7, 8, 9] +}); + +// $or with many children branches that are either IXSCAN or CLUSTERED_IXSCAN stages. Note that we +// expect our IXSCAN nodes to be optimized down to one stage. +validateQueryOR({ + query: {$or: [{_id: {$gt: 5}}, {_id: 8}, {a: 1}, {a: 1}, {a: {$gte: 8}}]}, + expectedStageCount: {"CLUSTERED_IXSCAN": 2, "IXSCAN": 1}, + expectedDocIds: [1, 6, 7, 8, 9] +}); + +// $or with many children branches that are either IXSCAN or CLUSTERED_IXSCAN stages. +validateQueryOR({ + query: {$or: [{_id: {$gt: 7}}, {_id: 8}, {a: 1}, {a: {$gte: 8}}, {c: {$lt: 10}}]}, + expectedStageCount: {"CLUSTERED_IXSCAN": 2, "IXSCAN": 2}, + expectedDocIds: [0, 1, 2, 3, 4, 8, 9] +}); + +// $or query where the branch of the clustered collection scan is not a leaf node. +validateQueryOR({ + query: {$or: [{a: 1}, {$and: [{_id: {$gt: 7}}, {_id: {$lt: 10}}]}]}, + expectedStageCount: {"CLUSTERED_IXSCAN": 1, "IXSCAN": 1, "FETCH": 1}, + expectedDocIds: [1, 8, 9] +}); + +// $or inside an $and should not change, and still use a FETCH with an IXSCAN. +validateQueryPlan({ + query: {$and: [{a: {$gte: 8}}, {$or: [{_id: 2}, {c: {$gt: 10}}]}]}, + expectedStageCount: {"FETCH": 1, "IXSCAN": 1, "OR": 0}, + expectedDocIds: [8, 9], +}); + +// $or that can't use the clustered collection nor another index should still fallback to COLLSCAN. +validateQueryPlan({ + query: {$or: [{noIndex: 3}, {_id: 1}]}, + expectedStageCount: {"COLLSCAN": 1, "OR": 0}, + expectedDocIds: [1, 3], +}); + +validateQueryPlan({ + query: {$or: [{noIndex: 3}, {a: 1}]}, + expectedStageCount: {"COLLSCAN": 1, "OR": 0}, + expectedDocIds: [1, 3], +}); + +//$or inside an $elemMatch that is not indexed should not change, and still use a COLLSCAN. +validateQueryPlan({ + query: {d: {$elemMatch: {$or: [{e: 6}, {g: 2}]}}}, + expectedStageCount: {"COLLSCAN": 1, "OR": 0}, + expectedDocIds: [3, 4] +}); + +// $or inside an $elemMatch that is indexed should use only IXSCAN. +assert.commandWorked(coll.createIndex({"d.e": 1})); +assert.commandWorked(coll.createIndex({"d.g": 1})); +validateQueryOR({ + query: {d: {$elemMatch: {$or: [{e: 10}, {g: 4}]}}}, + expectedStageCount: {"IXSCAN": 2, "COLLSCAN": 0}, + expectedDocIds: [5, 8], +}); + +// TODO SERVER-77601 remove this function, once supported in SBE. +// We prevented allowing MERGE_SORT plans with clustered collection scans, so the plan should +// fallback to using a collection scan. +function validateQuerySort() { + let explain = + coll.explain().find({$or: [{_id: {$lt: 1}}, {_id: {$gt: 8}}]}).sort({_id: 1}).finish(); + const winningPlan = getWinningPlan(explain.queryPlanner); + let expectedStageCount = {"MERGE_SORT": 0, "COLLSCAN": 1, "CLUSTERED_IXSCAN": 0, "OR": 0}; + const shardMergeStage = haveShardMergeStage(winningPlan, "SHARD_MERGE_SORT"); + const shards = "shards" in winningPlan; + for (var stage in expectedStageCount) { + let planStages = getPlanStages(winningPlan, stage); + assert(planStages, tojson(winningPlan)); + if (shardMergeStage || shards) { + assert.gte(planStages.length, + expectedStageCount[stage], + "Expected " + stage + " to appear, but got plan: " + tojson(winningPlan)); + } else { + assert.eq(planStages.length, + expectedStageCount[stage], + "Expected " + stage + " to appear, but got plan: " + tojson(winningPlan)); + } + } + assertCorrectResults({ + query: coll.find({$or: [{_id: {$lt: 1}}, {_id: {$gt: 8}}]}).sort({_id: 1}), + expectedDocIds: [0, 9] + }); +} +validateQuerySort(); + +// +// These tests validate that $or queries with a text index work. +// + +// Basic case $or with text and a clustered collection scan. +validateQueryOR({ + query: {$or: [{$text: {$search: "foo"}}, {_id: 1}]}, + expectedStageCount: {"CLUSTERED_IXSCAN": 1, "TEXT_MATCH": 1, "IXSCAN": 1}, + expectedDocIds: [0, 1, 5, 6] +}); + +// $or with a text index work with a clustered collection scan plan and a secondary index scan plan. +// We expected 2 IXSCAN nodes because the TEXT_MATCH stage has a IXSCAN node child, and there is an +// index scan plan for the {a: 9} predicate. +validateQueryOR({ + query: {$or: [{$text: {$search: "foo"}}, {_id: {$lt: 2}}, {a: 9}]}, + expectedStageCount: {"CLUSTERED_IXSCAN": 1, "TEXT_MATCH": 1, "IXSCAN": 2}, + expectedDocIds: [0, 1, 5, 6, 9] +}); + +// $or inside an and with a text index works. +validateQueryPlan({ + query: {$and: [{a: {$gte: 8}}, {$or: [{$text: {$search: "foo"}}, {c: {$gt: 10}}]}]}, + expectedStageCount: {"FETCH": 2, "IXSCAN": 2, "TEXT_MATCH": 1}, + expectedDocIds: [8, 9], +}); + +// $or inside an or with a text index works. +validateQueryOR({ + query: {$or: [{_id: {$gte: 8}}, {$or: [{$text: {$search: "foo"}}, {c: {$gt: 10}}]}]}, + expectedStageCount: {"FETCH": 2, "IXSCAN": 2, "TEXT_MATCH": 1, "CLUSTERED_IXSCAN": 1}, + expectedDocIds: [0, 5, 6, 7, 8, 9], +}); + +// $or with a text index and an unindexed field should still fail. +const err = + assert.throws(() => coll.find({$or: [{$text: {$search: "foo"}}, {noIndex: 1}]}).toArray()); +assert.commandFailedWithCode(err, ErrorCodes.NoQueryExecutionPlans); diff --git a/jstests/core/query/partial_index_logical.js b/jstests/core/query/partial_index_logical.js index fad4d87136676..b9f3f210b8b29 100644 --- a/jstests/core/query/partial_index_logical.js +++ b/jstests/core/query/partial_index_logical.js @@ -3,8 +3,6 @@ * containing logical expressions ($and, $or). * * @tags: [ - * # TODO SERVER-67607: Test plan cache with CQF enabled. - * cqf_incompatible, * # Since the plan cache is per-node state, this test assumes that all operations are happening * # against the same mongod. * assumes_read_preference_unchanged, @@ -18,10 +16,7 @@ * tenant_migration_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape. +import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js"; const coll = db[jsTestName()]; coll.drop(); @@ -174,5 +169,4 @@ coll.drop(); // a very similar shape, however the predicate parameters are not satisfied by the partial // filter expression. assert.eq(2, coll.find({num: {$gt: 0, $lt: 12}}).itcount()); -})(); -})(); +})(); \ No newline at end of file diff --git a/jstests/core/query/plan_cache/cached_plan_trial_does_not_discard_work.js b/jstests/core/query/plan_cache/cached_plan_trial_does_not_discard_work.js index 941ec0106b597..42d63a43d894d 100644 --- a/jstests/core/query/plan_cache/cached_plan_trial_does_not_discard_work.js +++ b/jstests/core/query/plan_cache/cached_plan_trial_does_not_discard_work.js @@ -18,20 +18,17 @@ // # Plan cache state is node-local and will not get migrated alongside tenant data. // tenant_migration_incompatible, // # TODO SERVER-67607: Test plan cache with CQF enabled. -// cqf_incompatible, +// cqf_experimental_incompatible, // ] -(function() { -'use strict'; - load("jstests/libs/profiler.js"); // getLatestProfileEntry. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE is disabled"); - return; + quit(); } -const testDb = db.getSiblingDB('cached_plan_trial_does_not_discard_work'); +const testDb = db.getSiblingDB('trial_does_not_discard_work'); assert.commandWorked(testDb.dropDatabase()); const coll = testDb.getCollection('test'); @@ -129,5 +126,4 @@ assert.eq(numResults, 0); const replanProfileEntry = getLatestProfilerEntry( testDb, {'command.find': coll.getName(), 'command.comment': lastComment}); -assert(replanProfileEntry.replanned, replanProfileEntry); -}()); +assert(replanProfileEntry.replanned, replanProfileEntry); \ No newline at end of file diff --git a/jstests/core/query/plan_cache/collation_plan_cache.js b/jstests/core/query/plan_cache/collation_plan_cache.js index 99e983dd2fa86..9a96ae27a6634 100644 --- a/jstests/core/query/plan_cache/collation_plan_cache.js +++ b/jstests/core/query/plan_cache/collation_plan_cache.js @@ -9,16 +9,16 @@ // # former operation may be routed to a secondary in the replica set, whereas the latter must be // # routed to the primary. // assumes_read_preference_unchanged, +// # Make sure to obtain stable counts. Background tasks may create plan cache entries. +// assumes_standalone_mongod, // assumes_unsharded_collection, // does_not_support_stepdowns, // requires_fcv_61, // # Plan cache state is node-local and will not get migrated alongside tenant data. // tenant_migration_incompatible, // ] -(function() { -'use strict'; -load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromExplain. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getPlanCacheKeyFromExplain} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; var coll = db.collation_plan_cache; coll.drop(); @@ -227,4 +227,3 @@ assert.commandWorked(coll.runCommand('planCacheClearFilters', assert.eq(0, coll.runCommand('planCacheListFilters').filters.length, 'unexpected number of plan cache filters'); -})(); diff --git a/jstests/core/query/plan_cache/plan_cache_clear.js b/jstests/core/query/plan_cache/plan_cache_clear.js index d03330ab08ec7..12d8c48137207 100644 --- a/jstests/core/query/plan_cache/plan_cache_clear.js +++ b/jstests/core/query/plan_cache/plan_cache_clear.js @@ -18,23 +18,71 @@ // # The SBE plan cache was first enabled in 6.3. // requires_fcv_63, // # TODO SERVER-67607: Test plan cache with CQF enabled. -// cqf_incompatible, +// cqf_experimental_incompatible, +// references_foreign_collection, // ] -(function() { -'use strict'; - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getPlanCacheKeyFromPipeline, getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +load("jstests/libs/fixture_helpers.js"); const coll = db.jstests_plan_cache_clear; coll.drop(); -function numPlanCacheEntries(coll) { - return coll.aggregate([{$planCacheStats: {}}]).itcount(); +function planCacheContainsQuerySet(curCache, collArg, expectedQuerySetSize) { + const keyHashes = Array.from(curCache); + const res = + collArg.aggregate([{$planCacheStats: {}}, {$match: {planCacheKey: {$in: keyHashes}}}]) + .toArray(); + return (res.length == curCache.size && curCache.size == expectedQuerySetSize); +} + +// Run query 'queryArg' against collection 'collArg' and add it to the map 'curCache'. +// Check that the query produced 'resCount' results, and that 'curCache' contains +// 'numCachedQueries' entries. +// Essentially, this functions runs a query, and add its both to the query cache and to +// the map 'curCache' which should mirror the queries in the query cache. +// This allows the test to keep curCache in sync with the query cache. +function addToQueryCache( + {queryArg = {}, projectArg = {}, collArg, resCount, curCache, numCachedQueries}) { + let keyHash = ''; + if (queryArg instanceof Array) { + assert.eq(resCount, collArg.aggregate(queryArg).toArray().length); + keyHash = getPlanCacheKeyFromPipeline(queryArg, collArg, db); + } else { + assert.eq(resCount, collArg.find(queryArg, projectArg).itcount()); + keyHash = getPlanCacheKeyFromShape( + {query: queryArg, projection: projectArg, collection: collArg, db: db}); + } + curCache.add(keyHash); + assert.eq(curCache.size, numCachedQueries); +} + +// Remove a query both from the query cache, and curCache. +// In this way both are kept in sync. +function deleteFromQueryCache(queryArg, collArg, curCache) { + const beforeClearKeys = + collArg.aggregate([{$planCacheStats: {}}, {$project: {planCacheKey: 1}}]) + .toArray() + .map(k => k.planCacheKey); + assert.commandWorked(collArg.runCommand('planCacheClear', {query: queryArg})); + const afterClearKeys = collArg.aggregate([{$planCacheStats: {}}, {$project: {planCacheKey: 1}}]) + .toArray() + .map(k => k.planCacheKey); + for (let key of beforeClearKeys) { + if (!afterClearKeys.includes(key)) { + curCache.delete(key); + } + } +} + +function clearQueryCaches(collArg, curCache) { + assert.commandWorked(collArg.runCommand('planCacheClear', {})); + curCache.clear(); } -function dumpPlanCacheState(coll) { - return coll.aggregate([{$planCacheStats: {}}]).toArray(); +function dumpPlanCacheState(collArg) { + return collArg.aggregate([{$planCacheStats: {}}]).toArray(); } assert.commandWorked(coll.insert({a: 1, b: 1})); @@ -46,42 +94,72 @@ assert.commandWorked(coll.insert({a: 2, b: 2})); assert.commandWorked(coll.createIndex({a: 1})); assert.commandWorked(coll.createIndex({a: 1, b: 1})); +// The queries in this set are expected to be in the query cache at any time. +const cachedQueries = new Set(); + // Run a query so that an entry is inserted into the cache. -assert.eq(1, coll.find({a: 1, b: 1}).itcount()); +addToQueryCache({ + queryArg: {a: 1, b: 1}, + collArg: coll, + resCount: 1, + curCache: cachedQueries, + numCachedQueries: 1 +}); // Invalid key should be a no-op. -assert.commandWorked(coll.runCommand('planCacheClear', {query: {unknownfield: 1}})); -assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); +deleteFromQueryCache({unknownfield: 1}, coll, cachedQueries); +assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll)); // Introduce a second plan cache entry. -assert.eq(0, coll.find({a: 1, b: 1, c: 1}).itcount()); -assert.eq(2, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); +addToQueryCache({ + queryArg: {a: 1, b: 1, c: 1}, + collArg: coll, + resCount: 0, + curCache: cachedQueries, + numCachedQueries: 2 +}); +assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 2), dumpPlanCacheState(coll)); // Drop one of the two shapes from the cache. -assert.commandWorked(coll.runCommand('planCacheClear', {query: {a: 1, b: 1}}), - dumpPlanCacheState(coll)); -assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); +deleteFromQueryCache({a: 1, b: 1}, coll, cachedQueries); +assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll)); // Drop the second shape from the cache. -assert.commandWorked(coll.runCommand('planCacheClear', {query: {a: 1, b: 1, c: 1}}), - dumpPlanCacheState(coll)); -assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); +deleteFromQueryCache({a: 1, b: 1, c: 1}, coll, cachedQueries); +assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 0), dumpPlanCacheState(coll)); // planCacheClear can clear $expr queries. -assert.eq(1, coll.find({a: 1, b: 1, $expr: {$eq: ['$a', 1]}}).itcount()); -assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); -assert.commandWorked( - coll.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', 1]}}})); -assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); +addToQueryCache({ + queryArg: {a: 1, b: 1, $expr: {$eq: ['$a', 1]}}, + collArg: coll, + resCount: 1, + curCache: cachedQueries, + numCachedQueries: 1 +}); +deleteFromQueryCache({a: 1, b: 1, $expr: {$eq: ['$a', 1]}}, coll, cachedQueries); +assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 0), dumpPlanCacheState(coll)); // planCacheClear fails with an $expr query with an unbound variable. assert.commandFailed( coll.runCommand('planCacheClear', {query: {a: 1, b: 1, $expr: {$eq: ['$a', '$$unbound']}}})); // Insert two more shapes into the cache. -assert.eq(1, coll.find({a: 1, b: 1}).itcount()); -assert.eq(1, coll.find({a: 1, b: 1}, {_id: 0, a: 1}).itcount()); -assert.eq(2, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); +addToQueryCache({ + queryArg: {a: 1, b: 1}, + collArg: coll, + resCount: 1, + curCache: cachedQueries, + numCachedQueries: 1 +}); +addToQueryCache({ + queryArg: {a: 1, b: 1}, + projectArg: {_id: 0, a: 1}, + collArg: coll, + resCount: 1, + curCache: cachedQueries, + numCachedQueries: 2 +}); +assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 2), dumpPlanCacheState(coll)); // Error cases. assert.commandFailedWithCode(coll.runCommand('planCacheClear', {query: 12345}), @@ -98,8 +176,7 @@ assert.commandFailedWithCode(coll.runCommand('planCacheClear', {projection: {_id ErrorCodes.BadValue); // Drop query cache. This clears all cached queries in the collection. -assert.commandWorked(coll.runCommand('planCacheClear')); -assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); +clearQueryCaches(coll, cachedQueries); // Clearing the plan cache for a non-existent collection should succeed. const nonExistentColl = db.plan_cache_clear_nonexistent; @@ -111,6 +188,7 @@ if (checkSBEEnabled(db)) { // collections, when $lookup is pushed down into SBE. const foreignColl = db.plan_cache_clear_foreign; foreignColl.drop(); + const foreignCachedQueries = new Set(); // We need two indices so that the multi-planner is executed. assert.commandWorked(foreignColl.createIndex({b: 1})); @@ -126,86 +204,151 @@ if (checkSBEEnabled(db)) { // Test case 1: clear plan cache on the main collection. // // Run a query against the 'foreignColl' and make sure it's cached. - assert.eq(0, foreignColl.find({b: 1, c: 1}).itcount()); - assert.eq(1, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl)); + addToQueryCache({ + queryArg: {a: 1, b: 1}, + collArg: foreignColl, + resCount: 0, + curCache: foreignCachedQueries, + numCachedQueries: 1 + }); + assert.eq(true, + planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 1), + dumpPlanCacheState(foreignColl)); // Run the '$lookup' query and make sure it's cached. - let results = coll.aggregate(pipeline).toArray(); - assert.eq(3, results.length, results); - assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); + addToQueryCache({ + queryArg: pipeline, + collArg: coll, + resCount: 3, + curCache: cachedQueries, + numCachedQueries: 1 + }); + assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll)); // Drop query cache on the main collection. This clears all cached queries in the main // collection only. - assert.commandWorked(coll.runCommand("planCacheClear")); - assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); - assert.eq(1, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl)); + clearQueryCaches(coll, cachedQueries); + assert.eq(true, + planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 1), + dumpPlanCacheState(foreignColl)); // Test case 2: clear plan cache on the foreign collection. // // Run the '$lookup' query again and make sure it's cached. - results = coll.aggregate(pipeline).toArray(); - assert.eq(3, results.length, results); - assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); + addToQueryCache({ + queryArg: pipeline, + collArg: coll, + resCount: 3, + curCache: cachedQueries, + numCachedQueries: 1 + }); + assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll)); // Drop query cache on the foreign collection. Make sure that the plan cache on the main // collection is not affected. - assert.commandWorked(foreignColl.runCommand("planCacheClear")); - assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); - assert.eq(0, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl)); + clearQueryCaches(foreignColl, foreignCachedQueries); + assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll)); // Test case 3: clear plan cache on the main collection by query shape. // // Run a query against the 'foreignColl' and make sure it's cached. - assert.eq(0, foreignColl.find({b: 1, c: 1}).itcount()); - assert.eq(1, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl)); + addToQueryCache({ + queryArg: {b: 1, c: 1}, + collArg: foreignColl, + resCount: 0, + curCache: foreignCachedQueries, + numCachedQueries: 1 + }); + assert.eq(true, + planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 1), + dumpPlanCacheState(foreignColl)); // Run the '$lookup' query and make sure it's cached. - results = coll.aggregate(pipeline).toArray(); - assert.eq(3, results.length, results); - assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); + addToQueryCache({ + queryArg: pipeline, + collArg: coll, + resCount: 3, + curCache: cachedQueries, + numCachedQueries: 1 + }); + assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll)); // Drop query cache by the query shape. This clears all cached queries in the main // collection only. - assert.commandWorked(coll.runCommand("planCacheClear", {query: {a: 1}})); - assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); - assert.eq(1, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl)); + deleteFromQueryCache(pipeline[0].$match, coll, cachedQueries); + assert.eq(true, + planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 1), + dumpPlanCacheState(foreignColl)); // Test case 4: clear plan cache on the foreign collection by (empty) query shape. // // Run two queries against the 'foreignColl' and make sure they're cached. - assert.eq(2, foreignColl.find({}).itcount()); - assert.eq(0, foreignColl.find({b: 1, c: 1}).itcount()); - assert.eq(2, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl)); + addToQueryCache({ + queryArg: {}, + collArg: foreignColl, + resCount: 2, + curCache: foreignCachedQueries, + numCachedQueries: 2 + }); + addToQueryCache({ + queryArg: {b: 1, c: 1}, + collArg: foreignColl, + resCount: 0, + curCache: foreignCachedQueries, + numCachedQueries: 2 + }); + assert.eq(true, + planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 2), + dumpPlanCacheState(foreignColl)); // Run the '$lookup' query and make sure it's cached. - results = coll.aggregate(pipeline).toArray(); - assert.eq(3, results.length, results); - assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); + addToQueryCache({ + queryArg: pipeline, + collArg: coll, + resCount: 3, + curCache: cachedQueries, + numCachedQueries: 1 + }); + assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll)); // Drop query cache on the foreign collection by the query shape. This clears one cached // query in the foreign collection only. - assert.commandWorked(foreignColl.runCommand("planCacheClear", {query: {}})); - assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); - assert.eq(1, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl)); + deleteFromQueryCache({}, foreignColl, foreignCachedQueries); + assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll)); + assert.eq(true, + planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 1), + dumpPlanCacheState(foreignColl)); // Test case 5: clear by query shape which matches $lookup and non-$lookup queries. // // Run the query on the main collection whose plan cache key command shape matches the shape of // the $lookup query. - results = coll.aggregate({$match: {a: 1}}).toArray(); - assert.eq(3, results.length, results); - assert.eq(2, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); + addToQueryCache({ + queryArg: [{$match: {a: 1}}], + collArg: coll, + resCount: 3, + curCache: cachedQueries, + numCachedQueries: 2 + }); + assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 2), dumpPlanCacheState(coll)); // Run another query on the main collection with a totally different shape. - results = coll.aggregate({$match: {a: {$in: [1, 2]}}}).toArray(); - assert.eq(4, results.length, results); - assert.eq(3, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); + addToQueryCache({ + queryArg: [{$match: {a: {$in: [1, 2]}}}], + collArg: coll, + resCount: 4, + curCache: cachedQueries, + numCachedQueries: 3 + }); + assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 3), dumpPlanCacheState(coll)); // Drop query cache on the main collection by the query shape. This clears two cached queries in // the main collection which match the query shape. - assert.commandWorked(coll.runCommand("planCacheClear", {query: {a: 1}})); - assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); - assert.eq(1, numPlanCacheEntries(foreignColl), dumpPlanCacheState(foreignColl)); + deleteFromQueryCache({a: 1}, coll, cachedQueries); + assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll)); + assert.eq(true, + planCacheContainsQuerySet(foreignCachedQueries, foreignColl, 1), + dumpPlanCacheState(foreignColl)); } // @@ -219,8 +362,7 @@ if (checkSBEEnabled(db)) { // // Make sure the cache is emtpy. -assert.commandWorked(coll.runCommand('planCacheClear')); -assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); +clearQueryCaches(coll, cachedQueries); // Case 1: The reIndex rebuilds the index. // Steps: @@ -228,13 +370,18 @@ assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); // Run reIndex on the collection. // Confirm that cache is empty. // (Only standalone mode supports the reIndex command.) -const isMongos = db.adminCommand({isdbgrid: 1}).isdbgrid; -const isStandalone = !isMongos && !db.runCommand({hello: 1}).hasOwnProperty('setName'); +const isStandalone = + !FixtureHelpers.isMongos(db) && !db.runCommand({hello: 1}).hasOwnProperty('setName'); if (isStandalone) { - assert.eq(1, coll.find({a: 1, b: 1}).itcount()); - assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); + addToQueryCache({ + queryArg: {a: 1, b: 1}, + collArg: coll, + resCount: 1, + curCache: cachedQueries, + numCachedQueries: 1 + }); + assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll)); assert.commandWorked(coll.reIndex()); - assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); } // Case 2: You add or drop an index. @@ -242,11 +389,17 @@ if (isStandalone) { // Populate the cache with 1 entry. // Add an index. // Confirm that cache is empty. -assert.eq(1, coll.find({a: 1, b: 1}).itcount()); -assert.eq(1, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); +clearQueryCaches(coll, cachedQueries); +addToQueryCache({ + queryArg: {a: 1, b: 1}, + collArg: coll, + resCount: 1, + curCache: cachedQueries, + numCachedQueries: 1 +}); +assert.eq(true, planCacheContainsQuerySet(cachedQueries, coll, 1), dumpPlanCacheState(coll)); assert.commandWorked(coll.createIndex({b: 1})); -assert.eq(0, numPlanCacheEntries(coll), dumpPlanCacheState(coll)); +assert.eq(false, planCacheContainsQuerySet(cachedQueries, coll, 0), dumpPlanCacheState(coll)); // Case 3: The mongod process restarts // Not applicable. -})(); diff --git a/jstests/core/query/plan_cache/plan_cache_list_plans.js b/jstests/core/query/plan_cache/plan_cache_list_plans.js index b0f7a24c61530..a0b42a9b2347e 100644 --- a/jstests/core/query/plan_cache/plan_cache_list_plans.js +++ b/jstests/core/query/plan_cache/plan_cache_list_plans.js @@ -17,14 +17,16 @@ // # Plan cache state is node-local and will not get migrated alongside tenant data. // tenant_migration_incompatible, // # TODO SERVER-67607: Test plan cache with CQF enabled. -// cqf_incompatible, +// cqf_experimental_incompatible, +// references_foreign_collection, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import { + getPlanCacheKeyFromPipeline, + getPlanCacheKeyFromShape, + getPlanStage +} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; let coll = db.jstests_plan_cache_list_plans; coll.drop(); @@ -192,4 +194,3 @@ if (!isSbeEnabled) { const res = foreignColl.aggregate([{$planCacheStats: {}}]).toArray(); assert.eq(0, res.length, dumpPlanCacheState()); } -})(); diff --git a/jstests/core/query/plan_cache/plan_cache_list_shapes.js b/jstests/core/query/plan_cache/plan_cache_list_shapes.js index 48535eae0c383..e34b4edc52b43 100644 --- a/jstests/core/query/plan_cache/plan_cache_list_shapes.js +++ b/jstests/core/query/plan_cache/plan_cache_list_shapes.js @@ -14,15 +14,13 @@ // # Plan cache state is node-local and will not get migrated alongside tenant data. // tenant_migration_incompatible, // # TODO SERVER-67607: Test plan cache with CQF enabled. -// cqf_incompatible, +// cqf_experimental_incompatible, // ] -(function() { -'use strict'; -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; if (checkSBEEnabled(db)) { jsTest.log("Skipping test because SBE is enabled."); - return; + quit(); } const coll = db.jstests_plan_cache_list_shapes; @@ -89,5 +87,4 @@ regexQuery.s.$options = 'mi'; // There is one more result since the query is now case sensitive. assert.eq(6, coll.find(regexQuery).itcount()); shapes = getCachedQueryShapes(); -assert.eq(4, shapes.length, shapes); -})(); +assert.eq(4, shapes.length, shapes); \ No newline at end of file diff --git a/jstests/core/query/plan_cache/plan_cache_sbe.js b/jstests/core/query/plan_cache/plan_cache_sbe.js index 2a35b786d70c4..5adb96f51b216 100644 --- a/jstests/core/query/plan_cache/plan_cache_sbe.js +++ b/jstests/core/query/plan_cache/plan_cache_sbe.js @@ -23,10 +23,7 @@ * assumes_no_implicit_index_creation, * ] */ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const coll = db.plan_cache_sbe; coll.drop(); @@ -67,5 +64,4 @@ if (isSbeEnabled) { } else { assert(!stats.cachedPlan.hasOwnProperty("queryPlan"), stats); assert(!stats.cachedPlan.hasOwnProperty("slotBasedPlan"), stats); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/query/plan_cache/plan_cache_sbe_hinted.js b/jstests/core/query/plan_cache/plan_cache_sbe_hinted.js index 7db4e8ba504d0..22b3550aaeb27 100644 --- a/jstests/core/query/plan_cache/plan_cache_sbe_hinted.js +++ b/jstests/core/query/plan_cache/plan_cache_sbe_hinted.js @@ -12,17 +12,14 @@ * # Multiple servers can mess up the plan cache list. * assumes_standalone_mongod, * # TODO SERVER-67607: Test plan cache with CQF enabled. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; if (!checkSBEEnabled(db)) { jsTest.log("Skip running the test because SBE is not enabled"); - return; + quit(); } const coll = db.plan_cache_sbe; @@ -48,5 +45,4 @@ queryAndVerify({}, 2); // Hinted query cached is reused. queryAndVerify({a: 1}, 2); // Query with different hint. -queryAndVerify({a: 1, b: 1}, 3); -})(); +queryAndVerify({a: 1, b: 1}, 3); \ No newline at end of file diff --git a/jstests/core/query/plan_cache/plan_cache_shell_helpers.js b/jstests/core/query/plan_cache/plan_cache_shell_helpers.js index a102d8a001d9d..3a1384488fe66 100644 --- a/jstests/core/query/plan_cache/plan_cache_shell_helpers.js +++ b/jstests/core/query/plan_cache/plan_cache_shell_helpers.js @@ -13,13 +13,11 @@ // # Plan cache state is node-local and will not get migrated alongside tenant data. // tenant_migration_incompatible, // # TODO SERVER-67607: Test plan cache with CQF enabled. -// cqf_incompatible, +// cqf_experimental_incompatible, // ] -(function() { -'use strict'; load('jstests/aggregation/extras/utils.js'); // For assertArrayEq. -load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const isSbeEnabled = checkSBEEnabled(db); var coll = db.jstests_plan_cache_shell_helpers; @@ -178,5 +176,4 @@ assertCacheLength(0); // Verify that explaining a find command does not write to the plan cache. planCache.clear(); const explain = coll.find(queryB, projectionB).sort(sortC).explain(true); -assertCacheLength(0); -}()); +assertCacheLength(0); \ No newline at end of file diff --git a/jstests/core/query/plan_cache/plan_cache_stats_shard_and_host.js b/jstests/core/query/plan_cache/plan_cache_stats_shard_and_host.js index 69a2214e6b93a..a73722425f095 100644 --- a/jstests/core/query/plan_cache/plan_cache_stats_shard_and_host.js +++ b/jstests/core/query/plan_cache/plan_cache_stats_shard_and_host.js @@ -9,12 +9,11 @@ // # Plan cache state is node-local and will not get migrated alongside tenant data. // tenant_migration_incompatible, // # TODO SERVER-67607: Test plan cache with CQF enabled. -// cqf_incompatible, +// cqf_experimental_incompatible, +// requires_fcv_71, // ] -(function() { -"use strict"; - load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'. +import {getPlanCacheKeyFromExplain} from "jstests/libs/analyze_plan.js"; const coll = db.plan_cache_stats_shard_and_host; coll.drop(); @@ -26,8 +25,20 @@ assert.commandWorked(coll.createIndex({b: 1})); assert.commandWorked(coll.insert({a: 2, b: 3})); assert.eq(1, coll.find({a: 2, b: 3}).itcount()); -// List the contents of the plan cache for the collection. -let planCacheContents = planCache.list(); +const explain = coll.find({a: 2, b: 3}).explain(); +const planCacheKey = getPlanCacheKeyFromExplain(explain, db); + +function filterPlanCacheEntriesByKey(planCacheKey, planCacheContents) { + let filteredPlanCacheEntries = []; + for (const entry of planCacheContents) { + if (entry.planCacheKey === planCacheKey) { + filteredPlanCacheEntries.push(entry); + } + } + return filteredPlanCacheEntries; +} + +let planCacheContents = filterPlanCacheEntriesByKey(planCacheKey, planCache.list()); // We expect every shard that has a chunk for the collection to have produced a plan cache entry. assert.eq( @@ -49,11 +60,16 @@ for (const entry of planCacheContents) { // shard/host. As a future improvement, we should return plan cache information from every host in // every shard. But for now, we use regular host targeting to choose a particular host in each // shard. -planCacheContents = planCache.list([{$group: {_id: "$shard", count: {$sum: 1}}}]); +planCacheContents = filterPlanCacheEntriesByKey( + planCacheKey, planCache.list([{$group: {_id: "$shard", count: {$sum: 1}}}])); + for (const entry of planCacheContents) { assert.eq(entry.count, 1, entry); } -planCacheContents = planCache.list([{$group: {_id: "$host", count: {$sum: 1}}}]); + +planCacheContents = filterPlanCacheEntriesByKey( + planCacheKey, planCache.list([{$group: {_id: "$host", count: {$sum: 1}}}])); + for (const entry of planCacheContents) { assert.eq(entry.count, 1, entry); } @@ -61,5 +77,4 @@ for (const entry of planCacheContents) { // Clear the plan cache and verify that attempting to list the plan cache now returns an empty // array. coll.getPlanCache().clear(); -assert.eq([], planCache.list()); -}()); +assert.eq([], filterPlanCacheEntriesByKey(planCacheKey, planCache.list())); \ No newline at end of file diff --git a/jstests/core/query/project/proj_key1.js b/jstests/core/query/project/proj_key1.js index c4834ffad49f4..0d6423156e0a8 100644 --- a/jstests/core/query/project/proj_key1.js +++ b/jstests/core/query/project/proj_key1.js @@ -2,12 +2,12 @@ // requires_getmore, // ] -t = db.proj_key1; +let t = db.proj_key1; t.drop(); -as = []; +let as = []; -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { as.push({a: i}); t.insert({a: i, b: i}); } diff --git a/jstests/core/query/project/projection_dotted_paths.js b/jstests/core/query/project/projection_dotted_paths.js index 7f6eb1bfef2d5..63170fc2bcacb 100644 --- a/jstests/core/query/project/projection_dotted_paths.js +++ b/jstests/core/query/project/projection_dotted_paths.js @@ -8,10 +8,7 @@ * Test projections with dotted field paths. Also test that such projections result in covered plans * when appropriate. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan, isIdhack, isIndexOnly, isIxscan} from "jstests/libs/analyze_plan.js"; load("jstests/aggregation/extras/utils.js"); // arrayEq let coll = db["projection_dotted_paths"]; @@ -129,4 +126,3 @@ assert.eq(resultDoc, {x: {y: {y: null, z: null}, z: null}}); assert(arrayEq(coll.find({}, {_id: 0, "a.x": "$a.x", "a.b.x": "$a.x"}).toArray(), [{a: {x: 1, b: {x: 1}}}])); } -}()); diff --git a/jstests/core/query/project/projection_semantics.js b/jstests/core/query/project/projection_semantics.js index 1f811c84b36ec..c284916f3f8e7 100644 --- a/jstests/core/query/project/projection_semantics.js +++ b/jstests/core/query/project/projection_semantics.js @@ -14,19 +14,16 @@ * not_allowed_with_security_token, * ] */ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; if (!checkSBEEnabled(db, ["featureFlagColumnstoreIndexes"])) { jsTestLog("Skipping test since columnstore Indexes are not enabled"); - return; + quit(); } if (!setUpServerForColumnStoreIndexTest(db)) { - return; + quit(); } const coll = db.projection_semantics; @@ -720,5 +717,4 @@ function testInputOutput({input, projection, expectedOutput, interestingIndexes projection: {measurements: {humidity: 0, time: 0}, _id: 0}, expectedOutput: {measurements: {temperature: 20, pressure: 0.7}}, }); -}()); -}()); +}()); \ No newline at end of file diff --git a/jstests/core/query/pull/pull.js b/jstests/core/query/pull/pull.js index 612c65f9e6502..c5d1519f2aa47 100644 --- a/jstests/core/query/pull/pull.js +++ b/jstests/core/query/pull/pull.js @@ -3,7 +3,7 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.jstests_pull; +let t = db.jstests_pull; t.drop(); t.save({a: [1, 2, 3]}); diff --git a/jstests/core/query/pull/pull2.js b/jstests/core/query/pull/pull2.js index a1b79955bb0e0..f3c04d517ca67 100644 --- a/jstests/core/query/pull/pull2.js +++ b/jstests/core/query/pull/pull2.js @@ -3,7 +3,7 @@ // key. // @tags: [assumes_unsharded_collection, requires_fastcount] -t = db.pull2; +let t = db.pull2; t.drop(); t.save({a: [{x: 1}, {x: 1, b: 2}]}); diff --git a/jstests/core/query/pull/pull_or.js b/jstests/core/query/pull/pull_or.js index 9ef1e091d2942..42b0d93bb1587 100644 --- a/jstests/core/query/pull/pull_or.js +++ b/jstests/core/query/pull/pull_or.js @@ -3,13 +3,10 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.pull_or; +let t = db.pull_or; t.drop(); -doc = { - _id: 1, - a: {b: [{x: 1}, {y: 'y'}, {x: 2}, {z: 'z'}]} -}; +let doc = {_id: 1, a: {b: [{x: 1}, {y: 'y'}, {x: 2}, {z: 'z'}]}}; t.insert(doc); diff --git a/jstests/core/query/pull/pull_remove1.js b/jstests/core/query/pull/pull_remove1.js index ab9368bab3bbc..307d84f237182 100644 --- a/jstests/core/query/pull/pull_remove1.js +++ b/jstests/core/query/pull/pull_remove1.js @@ -3,13 +3,10 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.pull_remove1; +let t = db.pull_remove1; t.drop(); -o = { - _id: 1, - a: [1, 2, 3, 4, 5, 6, 7, 8] -}; +let o = {_id: 1, a: [1, 2, 3, 4, 5, 6, 7, 8]}; t.insert(o); assert.eq(o, t.findOne(), "A1"); diff --git a/jstests/core/query/pull/pullall.js b/jstests/core/query/pull/pullall.js index 2cd0872b0799c..7679bc6db71e0 100644 --- a/jstests/core/query/pull/pullall.js +++ b/jstests/core/query/pull/pullall.js @@ -3,7 +3,7 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.jstests_pullall; +let t = db.jstests_pullall; t.drop(); t.save({a: [1, 2, 3]}); diff --git a/jstests/core/query/pull/pullall2.js b/jstests/core/query/pull/pullall2.js index 10e8c89caa16b..e1bc26cb0bd9d 100644 --- a/jstests/core/query/pull/pullall2.js +++ b/jstests/core/query/pull/pullall2.js @@ -3,14 +3,11 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.pullall2; +let t = db.pullall2; t.drop(); -o = { - _id: 1, - a: [] -}; -for (i = 0; i < 5; i++) +let o = {_id: 1, a: []}; +for (let i = 0; i < 5; i++) o.a.push({x: i, y: i}); t.insert(o); diff --git a/jstests/core/query/push/push2.js b/jstests/core/query/push/push2.js index 10669aa258165..0e00617b1ec6a 100644 --- a/jstests/core/query/push/push2.js +++ b/jstests/core/query/push/push2.js @@ -1,14 +1,14 @@ (function() { -t = db.push2; +let t = db.push2; t.drop(); t.save({_id: 1, a: []}); -s = new Array(700000).toString(); +let s = new Array(700000).toString(); -gotError = null; +let gotError = null; -for (x = 0; x < 100; x++) { +for (let x = 0; x < 100; x++) { print(x + " pushes"); var res = t.update({}, {$push: {a: s}}); gotError = res.hasWriteError(); diff --git a/jstests/core/query/push/push_sort.js b/jstests/core/query/push/push_sort.js index 2d74a3909f8f7..9ff12d5698c39 100644 --- a/jstests/core/query/push/push_sort.js +++ b/jstests/core/query/push/push_sort.js @@ -7,7 +7,7 @@ // test exercises such $sort clause from the shell user's perspective. // -t = db.push_sort; +let t = db.push_sort; t.drop(); // diff --git a/jstests/core/query/query1.js b/jstests/core/query/query1.js index 7e16a03a8fcc0..08a31d8850273 100644 --- a/jstests/core/query/query1.js +++ b/jstests/core/query/query1.js @@ -1,14 +1,14 @@ // @tags: [requires_fastcount] -t = db.query1; +let t = db.query1; t.drop(); t.save({num: 1}); t.save({num: 3}); t.save({num: 4}); -num = 0; -total = 0; +let num = 0; +let total = 0; t.find().forEach(function(z) { num++; diff --git a/jstests/core/query/query_hash_stability.js b/jstests/core/query/query_hash_stability.js index c358f1c7d3455..bd26ac525d3bd 100644 --- a/jstests/core/query/query_hash_stability.js +++ b/jstests/core/query/query_hash_stability.js @@ -3,6 +3,10 @@ * across catalog changes. * @tags: [ * assumes_read_concern_local, + * # This test expects query shapes and plans to stay the same at the beginning and + * # at the end of test run. That's just wrong expectation when chunks are moving + * # randomly across shards. + * assumes_balancer_off, * requires_fcv_51, * # The test expects the plan cache key on a given node to remain stable. However, the plan * # cache key is allowed to change between versions. Therefore, this test cannot run in @@ -10,10 +14,8 @@ * cannot_run_during_upgrade_downgrade, * ] */ -(function() { -"use strict"; load('jstests/libs/fixture_helpers.js'); // For and isMongos(). -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const collName = "query_hash_stability"; const coll = db[collName]; @@ -120,5 +122,4 @@ if (!checkSBEEnabled(db)) { planCacheField: 'planCacheKey', expectedToMatch: true }); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/query/queryoptimizera.js b/jstests/core/query/queryoptimizera.js index 2f3f226684731..42ca0032ea11f 100644 --- a/jstests/core/query/queryoptimizera.js +++ b/jstests/core/query/queryoptimizera.js @@ -9,8 +9,8 @@ // constraint is printed at appropriate times. SERVER-5353 function numWarnings() { - logs = db.adminCommand({getLog: "global"}).log; - ret = 0; + let logs = db.adminCommand({getLog: "global"}).log; + let ret = 0; logs.forEach(function(x) { if (x.match(warningMatchRegexp)) { ++ret; @@ -19,23 +19,23 @@ function numWarnings() { return ret; } -collectionNameIndex = 0; +let collectionNameIndex = 0; // Generate a collection name not already present in the log. do { - testCollectionName = 'jstests_queryoptimizera__' + collectionNameIndex++; - warningMatchString = + var testCollectionName = 'jstests_queryoptimizera__' + collectionNameIndex++; + var warningMatchString = 'unindexed _id query on capped collection.*collection: test.' + testCollectionName; - warningMatchRegexp = new RegExp(warningMatchString); + var warningMatchRegexp = new RegExp(warningMatchString); } while (numWarnings() > 0); -t = db[testCollectionName]; +let t = db[testCollectionName]; t.drop(); -notCappedCollectionName = testCollectionName + '_notCapped'; +let notCappedCollectionName = testCollectionName + '_notCapped'; -notCapped = db.getSiblingDB("local").getCollection(notCappedCollectionName); +let notCapped = db.getSiblingDB("local").getCollection(notCappedCollectionName); notCapped.drop(); assert.commandWorked(db.createCollection(testCollectionName, {capped: true, size: 1000})); @@ -45,14 +45,14 @@ assert.commandWorked( t.insert({}); notCapped.insert({}); -oldNumWarnings = 0; +let oldNumWarnings = 0; function assertNoNewWarnings() { assert.eq(oldNumWarnings, numWarnings()); } function assertNewWarning() { - newNumWarnings = numWarnings(); + let newNumWarnings = numWarnings(); // Ensure that newNumWarnings > oldNumWarnings. It's not safe to test that oldNumWarnings + 1 // == newNumWarnings, because a (simulated) page fault exception may cause multiple messages to // be logged instead of only one. diff --git a/jstests/core/query/regex/regex.js b/jstests/core/query/regex/regex.js index 363a03db20cff..409c69b0b59f7 100644 --- a/jstests/core/query/regex/regex.js +++ b/jstests/core/query/regex/regex.js @@ -1,16 +1,13 @@ // @tags: [ // assumes_read_concern_local, // ] +load("jstests/libs/fixture_helpers.js"); (function() { 'use strict'; const t = db.jstests_regex; -const hello = db.runCommand("hello"); -assert.commandWorked(hello); -const isMongos = (hello.msg === "isdbgrid"); - t.drop(); assert.commandWorked(t.save({a: "bcd"})); assert.eq(1, t.count({a: /b/}), "A"); @@ -48,7 +45,7 @@ assert.eq(1, t.count(query)); const result = t.find(query).explain(); assert.commandWorked(result); -if (!isMongos) { +if (!FixtureHelpers.isMongos(db)) { assert(result.hasOwnProperty("queryPlanner")); assert(result.queryPlanner.hasOwnProperty("parsedQuery"), tojson(result)); assert.eq(result.queryPlanner.parsedQuery, query); diff --git a/jstests/core/query/regex/regex2.js b/jstests/core/query/regex/regex2.js index 6ed1f2d290c3b..4801c7b807058 100644 --- a/jstests/core/query/regex/regex2.js +++ b/jstests/core/query/regex/regex2.js @@ -1,6 +1,6 @@ // @tags: [requires_fastcount] -t = db.regex2; +let t = db.regex2; t.drop(); assert.commandWorked(t.save({a: "test"})); @@ -15,8 +15,8 @@ assert.eq(2, t.find({a: /test/i}).count(), "F"); t.drop(); -a = "\u0442\u0435\u0441\u0442"; -b = "\u0422\u0435\u0441\u0442"; +let a = "\u0442\u0435\u0441\u0442"; +let b = "\u0422\u0435\u0441\u0442"; assert((new RegExp(a)).test(a), "B 1"); assert(!(new RegExp(a)).test(b), "B 2"); diff --git a/jstests/core/query/regex/regex3.js b/jstests/core/query/regex/regex3.js index b21a7a81435ae..c84f64020e4ab 100644 --- a/jstests/core/query/regex/regex3.js +++ b/jstests/core/query/regex/regex3.js @@ -3,7 +3,7 @@ // assumes_read_concern_local, // ] -t = db.regex3; +let t = db.regex3; t.drop(); assert.commandWorked(t.save({name: "eliot"})); diff --git a/jstests/core/query/regex/regex4.js b/jstests/core/query/regex/regex4.js index 0f7963fdd1e9d..d07614b199466 100644 --- a/jstests/core/query/regex/regex4.js +++ b/jstests/core/query/regex/regex4.js @@ -3,7 +3,7 @@ // assumes_read_concern_local, // ] -t = db.regex4; +let t = db.regex4; t.drop(); assert.commandWorked(t.save({name: "eliot"})); diff --git a/jstests/core/query/regex/regex5.js b/jstests/core/query/regex/regex5.js index d0836a6268666..ea32b4864df96 100644 --- a/jstests/core/query/regex/regex5.js +++ b/jstests/core/query/regex/regex5.js @@ -1,5 +1,5 @@ -t = db.regex5; +let t = db.regex5; t.drop(); // Add filler data to make sure that indexed solutions are @@ -11,10 +11,10 @@ for (var i = 0; i < 10; i++) { t.save({x: ["abc", "xyz1"]}); t.save({x: ["ac", "xyz2"]}); -a = /.*b.*c/; -x = /.*y.*/; +let a = /.*b.*c/; +let x = /.*y.*/; -doit = function() { +let doit = function() { assert.eq(1, t.find({x: a}).count(), "A"); assert.eq(2, t.find({x: x}).count(), "B"); assert.eq(2, t.find({x: {$in: [x]}}).count(), "C"); // SERVER-322 diff --git a/jstests/core/query/regex/regex6.js b/jstests/core/query/regex/regex6.js index cc7b507f610cb..168826bbae1af 100644 --- a/jstests/core/query/regex/regex6.js +++ b/jstests/core/query/regex/regex6.js @@ -4,7 +4,7 @@ // @tags: [ // assumes_unsharded_collection, // ] -t = db.regex6; +let t = db.regex6; t.drop(); t.save({name: "eliot"}); diff --git a/jstests/core/query/regex/regex7.js b/jstests/core/query/regex/regex7.js index c9c5454dbc547..9a7e6153f1a06 100644 --- a/jstests/core/query/regex/regex7.js +++ b/jstests/core/query/regex/regex7.js @@ -1,4 +1,4 @@ -t = db.regex_matches_self; +let t = db.regex_matches_self; t.drop(); t.insert({r: /^a/}); @@ -23,4 +23,4 @@ assert.eq(/^b/, t.findOne({r: /^b/}).r, '3 1 b'); assert.eq(1, t.count({r: /^b/}), '3 2 b'); t.insert({r: "a"}); -assert.eq(2, t.count({r: /^a/}), 'c'); \ No newline at end of file +assert.eq(2, t.count({r: /^a/}), 'c'); diff --git a/jstests/core/query/regex/regex8.js b/jstests/core/query/regex/regex8.js index 20164acf464f1..e57d01db067d7 100644 --- a/jstests/core/query/regex/regex8.js +++ b/jstests/core/query/regex/regex8.js @@ -1,4 +1,4 @@ -t = db.regex8; +let t = db.regex8; t.drop(); t.insert({_id: 1, a: "abc"}); diff --git a/jstests/core/query/regex/regex9.js b/jstests/core/query/regex/regex9.js index 96188d689dc71..497b1cbdabc93 100644 --- a/jstests/core/query/regex/regex9.js +++ b/jstests/core/query/regex/regex9.js @@ -1,5 +1,5 @@ -t = db.regex9; +let t = db.regex9; t.drop(); t.insert({_id: 1, a: ["a", "b", "c"]}); diff --git a/jstests/core/query/regex/regex_distinct.js b/jstests/core/query/regex/regex_distinct.js index 7852950853c72..7611628aa6fc8 100644 --- a/jstests/core/query/regex/regex_distinct.js +++ b/jstests/core/query/regex/regex_distinct.js @@ -7,9 +7,7 @@ * ] */ -(function() { -"use strict"; -load("jstests/libs/analyze_plan.js"); // For getPlanStages. +import {getPlanStages} from "jstests/libs/analyze_plan.js"; load("jstests/libs/fixture_helpers.js"); // For numberOfShardsForCollection. const coll = db.regex_distinct; @@ -24,4 +22,3 @@ const distinctScanStages = getPlanStages(coll.explain().distinct("a", {a: {"$regex": "^ab.*"}}), "DISTINCT_SCAN"); assert.eq(distinctScanStages.length, FixtureHelpers.numberOfShardsForCollection(coll)); -})(); diff --git a/jstests/core/query/regex/regex_embed1.js b/jstests/core/query/regex/regex_embed1.js index 11d92a6f3dc64..97a0cae65525f 100644 --- a/jstests/core/query/regex/regex_embed1.js +++ b/jstests/core/query/regex/regex_embed1.js @@ -1,4 +1,4 @@ -t = db.regex_embed1; +let t = db.regex_embed1; t.drop(); diff --git a/jstests/core/query/regex/regexa.js b/jstests/core/query/regex/regexa.js index 694436e87a888..24ea8f906865d 100644 --- a/jstests/core/query/regex/regexa.js +++ b/jstests/core/query/regex/regexa.js @@ -1,6 +1,6 @@ // Test simple regex optimization with a regex | (bar) present - SERVER-3298 -t = db.jstests_regexa; +let t = db.jstests_regexa; t.drop(); function check() { diff --git a/jstests/core/query/regex/regexb.js b/jstests/core/query/regex/regexb.js index 09e3518728b6f..398b9c5b83659 100644 --- a/jstests/core/query/regex/regexb.js +++ b/jstests/core/query/regex/regexb.js @@ -1,6 +1,6 @@ // Test more than four regex expressions in a query -- SERVER-969 -t = db.jstests_regexb; +let t = db.jstests_regexb; t.drop(); t.save({a: 'a', b: 'b', c: 'c', d: 'd', e: 'e'}); diff --git a/jstests/core/query/rename/rename_operator.js b/jstests/core/query/rename/rename_operator.js index e86cc077e65df..71965f269bd3c 100644 --- a/jstests/core/query/rename/rename_operator.js +++ b/jstests/core/query/rename/rename_operator.js @@ -11,7 +11,7 @@ * ] */ -t = db.jstests_rename_operator; +let t = db.jstests_rename_operator; t.drop(); function bad(f) { diff --git a/jstests/core/query/rename/rename_operator_missing_source.js b/jstests/core/query/rename/rename_operator_missing_source.js index ff219cda56378..d9c087af9aae3 100644 --- a/jstests/core/query/rename/rename_operator_missing_source.js +++ b/jstests/core/query/rename/rename_operator_missing_source.js @@ -8,7 +8,7 @@ * ] */ -t = db.jstests_rename5; +let t = db.jstests_rename5; t.drop(); t.createIndex({a: 1}); diff --git a/jstests/core/query/sbe_plan_cache_autoparameterize_ixscan.js b/jstests/core/query/sbe_plan_cache_autoparameterize_ixscan.js index 4280d54a700d4..0591eb29408d5 100644 --- a/jstests/core/query/sbe_plan_cache_autoparameterize_ixscan.js +++ b/jstests/core/query/sbe_plan_cache_autoparameterize_ixscan.js @@ -12,20 +12,16 @@ * requires_fcv_63, * # Plan cache state is node-local and will not get migrated alongside tenant data. * tenant_migration_incompatible, - * cqf_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'. -load("jstests/libs/analyze_plan.js"); // For 'getQueryHashFromExplain'. +import {getPlanCacheKeyFromExplain, getQueryHashFromExplain} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; // This test is specifically verifying the behavior of the SBE plan cache, which is only enabled // when SBE is enabled. if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE is not enabled"); - return; + quit(); } const coll = db[jsTestName()]; @@ -87,5 +83,4 @@ assert.eq(newCacheEntry.planCacheKey, planCacheKey, newCacheEntry); assert.eq(newCacheEntry.queryHash, queryHash, newCacheEntry); // The query should also return the same results as before. -assert.eq(results, cacheResults); -}()); +assert.eq(results, cacheResults); \ No newline at end of file diff --git a/jstests/core/query/set/set1.js b/jstests/core/query/set/set1.js index bae41fc5803f3..6914ddc217f92 100644 --- a/jstests/core/query/set/set1.js +++ b/jstests/core/query/set/set1.js @@ -1,5 +1,5 @@ -t = db.set1; +let t = db.set1; t.drop(); t.insert({_id: 1, emb: {}}); diff --git a/jstests/core/query/set/set2.js b/jstests/core/query/set/set2.js index c5b6e1c95534e..9f37dd1e6b382 100644 --- a/jstests/core/query/set/set2.js +++ b/jstests/core/query/set/set2.js @@ -1,5 +1,5 @@ -t = db.set2; +let t = db.set2; t.drop(); t.save({_id: 1, x: true, y: {x: true}}); diff --git a/jstests/core/query/set/set3.js b/jstests/core/query/set/set3.js index 4af579fa0b9a8..6dab2e3df2ba5 100644 --- a/jstests/core/query/set/set3.js +++ b/jstests/core/query/set/set3.js @@ -3,12 +3,12 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.set3; +let t = db.set3; t.drop(); t.insert({"test1": {"test2": {"abcdefghijklmnopqrstu": {"id": 1}}}}); t.update({}, {"$set": {"test1.test2.abcdefghijklmnopqrstuvwxyz": {"id": 2}}}); -x = t.findOne(); +let x = t.findOne(); assert.eq(1, x.test1.test2.abcdefghijklmnopqrstu.id, "A"); assert.eq(2, x.test1.test2.abcdefghijklmnopqrstuvwxyz.id, "B"); diff --git a/jstests/core/query/set/set4.js b/jstests/core/query/set/set4.js index 99c0913b977d7..c1ccb87d1a046 100644 --- a/jstests/core/query/set/set4.js +++ b/jstests/core/query/set/set4.js @@ -3,13 +3,10 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.set4; +let t = db.set4; t.drop(); -orig = { - _id: 1, - a: [{x: 1}] -}; +let orig = {_id: 1, a: [{x: 1}]}; t.insert(orig); t.update({}, {$set: {'a.0.x': 2, 'foo.bar': 3}}); diff --git a/jstests/core/query/set/set5.js b/jstests/core/query/set/set5.js index a848899f4affc..fbaa832c3a0cd 100644 --- a/jstests/core/query/set/set5.js +++ b/jstests/core/query/set/set5.js @@ -3,7 +3,7 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.set5; +let t = db.set5; t.drop(); function check(want, err) { diff --git a/jstests/core/query/set/set6.js b/jstests/core/query/set/set6.js index 2f82eb40a68f8..ac96cee0228fc 100644 --- a/jstests/core/query/set/set6.js +++ b/jstests/core/query/set/set6.js @@ -3,13 +3,10 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.set6; +let t = db.set6; t.drop(); -x = { - _id: 1, - r: new DBRef("foo", new ObjectId()) -}; +let x = {_id: 1, r: new DBRef("foo", new ObjectId())}; t.insert(x); assert.eq(x, t.findOne(), "A"); diff --git a/jstests/core/query/set/set7.js b/jstests/core/query/set/set7.js index e1cdd0f3bf266..232e3de5f8d19 100644 --- a/jstests/core/query/set/set7.js +++ b/jstests/core/query/set/set7.js @@ -9,7 +9,7 @@ // test $set with array indicies -t = db.jstests_set7; +let t = db.jstests_set7; var res; t.drop(); diff --git a/jstests/core/query/sort/sort10.js b/jstests/core/query/sort/sort10.js index 6819c76fa5b33..4369a179fcfbd 100644 --- a/jstests/core/query/sort/sort10.js +++ b/jstests/core/query/sort/sort10.js @@ -2,7 +2,7 @@ // // @tags: [requires_fastcount] -t = db.sort10; +let t = db.sort10; function checkSorting1(opts) { t.drop(); @@ -20,25 +20,26 @@ checkSorting1({}); checkSorting1({"background": true}); function checkSorting2(dates, sortOrder) { - cur = t.find().sort({x: sortOrder}); + let cur = t.find().sort({x: sortOrder}); assert.eq(dates.length, cur.count(), "Incorrect number of results returned"); - index = 0; + let index = 0; while (cur.hasNext()) { - date = cur.next().x; + let date = cur.next().x; assert.eq(dates[index].valueOf(), date.valueOf()); index++; } } t.drop(); -dates = [new Date(-5000000000000), new Date(5000000000000), new Date(0), new Date(5), new Date(-5)]; +let dates = + [new Date(-5000000000000), new Date(5000000000000), new Date(0), new Date(5), new Date(-5)]; for (var i = 0; i < dates.length; i++) { t.insert({x: dates[i]}); } dates.sort(function(a, b) { return a - b; }); -reverseDates = dates.slice(0).reverse(); +let reverseDates = dates.slice(0).reverse(); checkSorting2(dates, 1); checkSorting2(reverseDates, -1); diff --git a/jstests/core/query/sort/sort2.js b/jstests/core/query/sort/sort2.js index 1d373193fccbb..38dd69921c882 100644 --- a/jstests/core/query/sort/sort2.js +++ b/jstests/core/query/sort/sort2.js @@ -1,5 +1,5 @@ // test sorting, mainly a test ver simple with no index -t = db.sort2; +let t = db.sort2; t.drop(); t.save({x: 1, y: {a: 5, b: 4}}); diff --git a/jstests/core/query/sort/sort6.js b/jstests/core/query/sort/sort6.js index f2658bbcbada5..9198c2fc1361b 100644 --- a/jstests/core/query/sort/sort6.js +++ b/jstests/core/query/sort/sort6.js @@ -1,4 +1,4 @@ -t = db.sort6; +let t = db.sort6; function get(x) { return t.find().sort({c: x}).map(function(z) { diff --git a/jstests/core/query/sort/sort8.js b/jstests/core/query/sort/sort8.js index fa0b0040de11d..ab40d4578e300 100644 --- a/jstests/core/query/sort/sort8.js +++ b/jstests/core/query/sort/sort8.js @@ -1,15 +1,15 @@ // Check sorting of arrays indexed by key SERVER-2884 -t = db.jstests_sort8; +let t = db.jstests_sort8; t.drop(); t.save({a: [1, 10]}); t.save({a: 5}); -unindexedForward = t.find().sort({a: 1}).toArray(); -unindexedReverse = t.find().sort({a: -1}).toArray(); +let unindexedForward = t.find().sort({a: 1}).toArray(); +let unindexedReverse = t.find().sort({a: -1}).toArray(); t.createIndex({a: 1}); -indexedForward = t.find().sort({a: 1}).hint({a: 1}).toArray(); -indexedReverse = t.find().sort({a: -1}).hint({a: 1}).toArray(); +let indexedForward = t.find().sort({a: 1}).hint({a: 1}).toArray(); +let indexedReverse = t.find().sort({a: -1}).hint({a: 1}).toArray(); assert.eq(unindexedForward, indexedForward); assert.eq(unindexedReverse, indexedReverse); diff --git a/jstests/core/query/sort/sort9.js b/jstests/core/query/sort/sort9.js index 57496b40da15d..a606ec237fbbd 100644 --- a/jstests/core/query/sort/sort9.js +++ b/jstests/core/query/sort/sort9.js @@ -1,6 +1,6 @@ // Unindexed array sorting SERVER-2884 -t = db.jstests_sort9; +let t = db.jstests_sort9; t.drop(); t.save({a: []}); diff --git a/jstests/core/query/sort/sort_array.js b/jstests/core/query/sort/sort_array.js index fccfe744a7139..3dd52164c84e2 100644 --- a/jstests/core/query/sort/sort_array.js +++ b/jstests/core/query/sort/sort_array.js @@ -6,10 +6,7 @@ /** * Tests for sorting documents by fields that contain arrays. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {aggPlanHasStage, isQueryPlan, planHasStage} from "jstests/libs/analyze_plan.js"; let coll = db.jstests_array_sort; @@ -307,5 +304,4 @@ testAggAndFindSort({ hint: {"a.x": 1}, expected: [{_id: 1}, {_id: 0}, {_id: 2}], expectBlockingSort: false -}); -}()); +}); \ No newline at end of file diff --git a/jstests/core/query/sort/sort_dotted_paths_collation.js b/jstests/core/query/sort/sort_dotted_paths_collation.js index b0b91f7126d46..fd9dafacfb994 100644 --- a/jstests/core/query/sort/sort_dotted_paths_collation.js +++ b/jstests/core/query/sort/sort_dotted_paths_collation.js @@ -11,6 +11,9 @@ * @tags: [ * does_not_support_transactions, * assumes_no_implicit_collection_creation_after_drop, + * # Fixes behavior which was buggy in 7.0, so multiversion incompatible for now. + * # TODO SERVER-76127: Remove this tag. + * multiversion_incompatible, * ] */ (function() { @@ -286,4 +289,42 @@ testSortAndSortWithLimit( testSortAndSortWithLimit( {"a.b.c": -1, _id: -1}, [{_id: 8}, {_id: 6}, {_id: 9}, {_id: 5}, {_id: 1}, {_id: 4}, {_id: 7}, {_id: 3}, {_id: 2}]); + +// Tests for a case where all values are scalars and the sort components do not have a common +// parent path. +assert(coll.drop()); +assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive)); +assert.commandWorked(coll.insert({_id: 1, a: "a", b: "X"})); +assert.commandWorked(coll.insert({_id: 2, a: "a", b: "y"})); +assert.commandWorked(coll.insert({_id: 3, a: "a", b: "Z"})); +assert.commandWorked(coll.insert({_id: 4, a: "b", b: "x"})); +assert.commandWorked(coll.insert({_id: 5, a: "B", b: "Y"})); +assert.commandWorked(coll.insert({_id: 6, a: "B", b: "Z"})); +testSortAndSortWithLimit({"a": 1, "b": 1}, + [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}]); +testSortAndSortWithLimit({"a": 1, "b": -1}, + [{_id: 3}, {_id: 2}, {_id: 1}, {_id: 6}, {_id: 5}, {_id: 4}]); +testSortAndSortWithLimit({"a": -1, "b": 1}, + [{_id: 4}, {_id: 5}, {_id: 6}, {_id: 1}, {_id: 2}, {_id: 3}]); +testSortAndSortWithLimit({"a": -1, "b": -1}, + [{_id: 6}, {_id: 5}, {_id: 4}, {_id: 3}, {_id: 2}, {_id: 1}]); + +// Tests for a case where all values are scalar and the sort components have a common parent +// path. +assert(coll.drop()); +assert.commandWorked(db.createCollection(coll.getName(), caseInsensitive)); +assert.commandWorked(coll.insert({_id: 1, obj: {a: "a", b: "X"}})); +assert.commandWorked(coll.insert({_id: 2, obj: {a: "a", b: "y"}})); +assert.commandWorked(coll.insert({_id: 3, obj: {a: "a", b: "Z"}})); +assert.commandWorked(coll.insert({_id: 4, obj: {a: "b", b: "x"}})); +assert.commandWorked(coll.insert({_id: 5, obj: {a: "B", b: "Y"}})); +assert.commandWorked(coll.insert({_id: 6, obj: {a: "B", b: "Z"}})); +testSortAndSortWithLimit({"obj.a": 1, "obj.b": 1}, + [{_id: 1}, {_id: 2}, {_id: 3}, {_id: 4}, {_id: 5}, {_id: 6}]); +testSortAndSortWithLimit({"obj.a": 1, "obj.b": -1}, + [{_id: 3}, {_id: 2}, {_id: 1}, {_id: 6}, {_id: 5}, {_id: 4}]); +testSortAndSortWithLimit({"obj.a": -1, "obj.b": 1}, + [{_id: 4}, {_id: 5}, {_id: 6}, {_id: 1}, {_id: 2}, {_id: 3}]); +testSortAndSortWithLimit({"obj.a": -1, "obj.b": -1}, + [{_id: 6}, {_id: 5}, {_id: 4}, {_id: 3}, {_id: 2}, {_id: 1}]); })(); diff --git a/jstests/core/query/sort/sort_merge.js b/jstests/core/query/sort/sort_merge.js index fe6e24917d784..2206f7b03ae4e 100644 --- a/jstests/core/query/sort/sort_merge.js +++ b/jstests/core/query/sort/sort_merge.js @@ -5,10 +5,7 @@ * assumes_read_concern_local, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getPlanStage, getPlanStages, isIndexOnly, isIxscan} from "jstests/libs/analyze_plan.js"; load("jstests/libs/fixture_helpers.js"); // For 'isMongos'. const collNamePrefix = 'sort_merge_'; @@ -114,7 +111,7 @@ function runTest(sorts, filters, verifyCallback) { // Check that there are no duplicates. let ids = new Set(); for (let doc of res) { - assert(!ids.has(doc._id), () => "Duplicate _id: " + tojson(_id)); + assert(!ids.has(doc._id), () => "Duplicate _id: " + tojson(doc._id)); ids.add(doc._id); } } @@ -332,4 +329,3 @@ function runTest(sorts, filters, verifyCallback) { }; runTest([kSortPattern], [kCoveredFilter], verifyCoveredPlan); })(); -})(); diff --git a/jstests/core/query/sort/sort_merge_collation.js b/jstests/core/query/sort/sort_merge_collation.js index 38e6f33d9da64..015b41fedd835 100644 --- a/jstests/core/query/sort/sort_merge_collation.js +++ b/jstests/core/query/sort/sort_merge_collation.js @@ -5,10 +5,7 @@ * assumes_no_implicit_collection_creation_after_drop, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getPlanStages} from "jstests/libs/analyze_plan.js"; const numericOrdering = { collation: {locale: "en_US", numericOrdering: true} @@ -74,7 +71,7 @@ function runTest(sorts, filters) { // Check that there are no duplicates. let ids = new Set(); for (let doc of res) { - assert(!ids.has(doc._id), () => "Duplicate _id: " + tojson(_id)); + assert(!ids.has(doc._id), () => "Duplicate _id: " + tojson(doc._id)); ids.add(doc._id); } } @@ -160,5 +157,4 @@ const kSorts = [ ]; runTest(kSorts, kFilterPredicates); -})(); -})(); +})(); \ No newline at end of file diff --git a/jstests/core/query/sort/sort_numeric.js b/jstests/core/query/sort/sort_numeric.js index 5ff3e71a06654..908a9e7184a6b 100644 --- a/jstests/core/query/sort/sort_numeric.js +++ b/jstests/core/query/sort/sort_numeric.js @@ -1,5 +1,5 @@ -t = db.sort_numeric; +let t = db.sort_numeric; t.drop(); // there are two numeric types int he db; make sure it handles them right @@ -18,7 +18,7 @@ for (var pass = 0; pass < 2; pass++) { var c = t.find().sort({a: 1}); var last = 0; while (c.hasNext()) { - current = c.next(); + let current = c.next(); assert(current.a > last); last = current.a; } diff --git a/jstests/core/query/sort/sortb.js b/jstests/core/query/sort/sortb.js index 7c6abe340b478..1ec0851d4c9eb 100644 --- a/jstests/core/query/sort/sortb.js +++ b/jstests/core/query/sort/sortb.js @@ -5,31 +5,40 @@ load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. -const t = db.jstests_sortb; +const t = db[jsTestName()]; t.drop(); -t.createIndex({b: 1}); +assert.commandWorked(t.createIndex({b: 1})); -let i; -for (i = 0; i < 100; ++i) { - t.save({a: i, b: i}); -} +let docs = Array.from({length: 100}, (_, i) => { + return {a: i, b: i}; +}); +assert.commandWorked(t.insert(docs)); const numShards = FixtureHelpers.numberOfShardsForCollection(t); const numLargeDocumentsToWrite = 120 * numShards; +jsTestLog('numShards = ' + numShards + '; numLargeDocumentsToWrite = ' + numLargeDocumentsToWrite); + // These large documents will not be part of the initial set of "top 100" matches, and they will // not be part of the final set of "top 100" matches returned to the client. However, they are // an intermediate set of "top 100" matches and should trigger an in memory sort capacity // exception. const big = new Array(1024 * 1024).toString(); -for (; i < 100 + numLargeDocumentsToWrite; ++i) { - t.save({a: i, b: i, big: big}); -} +docs = Array.from({length: numLargeDocumentsToWrite}, (_, i) => { + const k = 100 + i; + return {a: k, b: k, big: big}; +}); +assert.commandWorked(t.insert(docs)); + +docs = Array.from({length: 100}, (_, i) => { + const k = 100 + numLargeDocumentsToWrite + i; + return {a: k, b: k}; +}); +assert.commandWorked(t.insert(docs)); -for (; i < 200 + numLargeDocumentsToWrite; ++i) { - t.save({a: i, b: i}); -} +jsTestLog('Collection ' + t.getFullName() + ' populated with ' + t.countDocuments({}) + + ' documents. Checking allowDiskUse=false behavior.'); assert.throwsWithCode( () => t.find().sort({a: -1}).allowDiskUse(false).hint({b: 1}).limit(100).itcount(), @@ -38,5 +47,4 @@ assert.throwsWithCode( () => t.find().sort({a: -1}).allowDiskUse(false).hint({b: 1}).showDiskLoc().limit(100).itcount(), ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed); -t.drop(); })(); diff --git a/jstests/core/query/sort/sortd.js b/jstests/core/query/sort/sortd.js index 525f08ea9470e..39aacb7733111 100644 --- a/jstests/core/query/sort/sortd.js +++ b/jstests/core/query/sort/sortd.js @@ -4,12 +4,12 @@ // Test sorting with dups and multiple candidate query plans. -t = db.jstests_sortd; +let t = db.jstests_sortd; function checkNumSorted(n, query) { - docs = query.toArray(); + let docs = query.toArray(); assert.eq(n, docs.length); - for (i = 1; i < docs.length; ++i) { + for (let i = 1; i < docs.length; ++i) { assert.lte(docs[i - 1].a, docs[i].a); } } @@ -31,10 +31,10 @@ t.drop(); t.save({a: 1}); t.save({a: 10}); -for (i = 2; i <= 9; ++i) { +for (let i = 2; i <= 9; ++i) { t.save({a: i}); } -for (i = 0; i < 30; ++i) { +for (let i = 0; i < 30; ++i) { t.save({a: 100}); } t.createIndex({a: 1}); @@ -49,10 +49,10 @@ t.drop(); t.save({a: 1}); t.save({a: 200}); -for (i = 2; i <= 199; ++i) { +for (let i = 2; i <= 199; ++i) { t.save({a: i}); } -for (i = 0; i < 30; ++i) { +for (let i = 0; i < 30; ++i) { t.save({a: 2000}); } t.createIndex({a: 1}); @@ -65,7 +65,7 @@ checkNumSorted(200, t.find({a: {$gte: 0, $lte: 200}, b: null}).sort({a: 1})); t.drop(); -for (i = 399; i >= 0; --i) { +for (let i = 399; i >= 0; --i) { t.save({a: i}); } t.createIndex({a: 1}); diff --git a/jstests/core/query/sort/sortf.js b/jstests/core/query/sort/sortf.js index 47c6a27ed9f4e..8efa55c794e3a 100644 --- a/jstests/core/query/sort/sortf.js +++ b/jstests/core/query/sort/sortf.js @@ -5,20 +5,20 @@ // Unsorted plan on {a:1}, sorted plan on {b:1}. The unsorted plan exhausts its memory limit before // the sorted plan is chosen by the query optimizer. -t = db.jstests_sortf; +let t = db.jstests_sortf; t.drop(); t.createIndex({a: 1}); t.createIndex({b: 1}); -for (i = 0; i < 100; ++i) { +for (let i = 0; i < 100; ++i) { t.save({a: 0, b: 0}); } -big = new Array(10 * 1000 * 1000).toString(); -for (i = 0; i < 5; ++i) { +let big = new Array(10 * 1000 * 1000).toString(); +for (let i = 0; i < 5; ++i) { t.save({a: 1, b: 1, big: big}); } assert.eq(5, t.find({a: 1}).sort({b: 1}).itcount()); -t.drop(); \ No newline at end of file +t.drop(); diff --git a/jstests/core/query/sort/sorti.js b/jstests/core/query/sort/sorti.js index e30739b4867d6..4816f83be7a8d 100644 --- a/jstests/core/query/sort/sorti.js +++ b/jstests/core/query/sort/sorti.js @@ -1,6 +1,6 @@ // Check that a projection is applied after an in memory sort. -t = db.jstests_sorti; +let t = db.jstests_sorti; t.drop(); t.save({a: 1, b: 0}); @@ -9,10 +9,10 @@ t.save({a: 2, b: 2}); t.save({a: 4, b: 3}); function checkBOrder(query) { - arr = query.toArray(); - order = []; - for (i in arr) { - a = arr[i]; + let arr = query.toArray(); + let order = []; + for (let i in arr) { + let a = arr[i]; order.push(a.b); } assert.eq([0, 2, 1, 3], order); diff --git a/jstests/core/query/sort/sortk.js b/jstests/core/query/sort/sortk.js index 1561d61675e11..adb829d182d7d 100644 --- a/jstests/core/query/sort/sortk.js +++ b/jstests/core/query/sort/sortk.js @@ -8,7 +8,7 @@ // requires_scripting, // ] -t = db.jstests_sortk; +let t = db.jstests_sortk; t.drop(); function resetCollection() { @@ -25,7 +25,7 @@ resetCollection(); t.createIndex({a: 1, b: 1}); function simpleQuery(extraFields, sort, hint) { - query = {a: {$in: [1, 2]}}; + let query = {a: {$in: [1, 2]}}; Object.extend(query, extraFields); sort = sort || {b: 1}; hint = hint || {a: 1, b: 1}; @@ -67,7 +67,7 @@ assert.eq(0, simpleQuery({}, {a: -1, b: 1}).limit(-1)[0].b); // Without a hint, multiple cursors are attempted. assert.eq(0, t.find({a: {$in: [1, 2]}}).sort({b: 1}).limit(-1)[0].b); -explain = t.find({a: {$in: [1, 2]}}).sort({b: 1}).limit(-1).explain(true); +let explain = t.find({a: {$in: [1, 2]}}).sort({b: 1}).limit(-1).explain(true); assert.eq(1, explain.executionStats.nReturned); // The expected first result now comes from the first interval. diff --git a/jstests/core/query/type/type1.js b/jstests/core/query/type/type1.js index 8066de2a5c4b9..49674d834eab4 100644 --- a/jstests/core/query/type/type1.js +++ b/jstests/core/query/type/type1.js @@ -1,6 +1,6 @@ // @tags: [requires_fastcount] -t = db.type1; +let t = db.type1; t.drop(); t.save({x: 1.1}); diff --git a/jstests/core/query/type/type2.js b/jstests/core/query/type/type2.js index d93d313d60f8d..701d99f816e51 100644 --- a/jstests/core/query/type/type2.js +++ b/jstests/core/query/type/type2.js @@ -1,6 +1,6 @@ // SERVER-1735 $type:10 matches null value, not missing value. -t = db.jstests_type2; +let t = db.jstests_type2; t.drop(); t.save({a: null}); @@ -16,4 +16,4 @@ function test() { test(); t.createIndex({a: 1}); -test(); \ No newline at end of file +test(); diff --git a/jstests/core/query/type/type3.js b/jstests/core/query/type/type3.js index 8b4858662dd04..14c7697bc6628 100644 --- a/jstests/core/query/type/type3.js +++ b/jstests/core/query/type/type3.js @@ -4,7 +4,7 @@ // Check query type bracketing SERVER-3222 -t = db.jstests_type3; +let t = db.jstests_type3; t.drop(); t.createIndex({a: 1}); diff --git a/jstests/core/query/type/type_operator_on_missing_values.js b/jstests/core/query/type/type_operator_on_missing_values.js index 9a67b23b882e1..10130a930fba1 100644 --- a/jstests/core/query/type/type_operator_on_missing_values.js +++ b/jstests/core/query/type/type_operator_on_missing_values.js @@ -27,4 +27,4 @@ for (const type of bsonTypes) { results = coll.find({a: {$not: {$type: type}}}).sort({_id: 1}).toArray(); assert.eq(results, documentList); } -}()); \ No newline at end of file +}()); diff --git a/jstests/core/query/unset/unset.js b/jstests/core/query/unset/unset.js index 14e18229723fd..c8e6ca501ef10 100644 --- a/jstests/core/query/unset/unset.js +++ b/jstests/core/query/unset/unset.js @@ -1,10 +1,7 @@ -t = db.unset; +let t = db.unset; t.drop(); -orig = { - _id: 1, - emb: {} -}; +let orig = {_id: 1, emb: {}}; t.insert(orig); t.update({_id: 1}, {$unset: {'emb.a': 1}}); diff --git a/jstests/core/query/unset/unset2.js b/jstests/core/query/unset/unset2.js index e120ae2b6747d..4d192c5211e81 100644 --- a/jstests/core/query/unset/unset2.js +++ b/jstests/core/query/unset/unset2.js @@ -5,7 +5,7 @@ var res; -t = db.unset2; +let t = db.unset2; t.drop(); t.save({a: ["a", "b", "c", "d"]}); diff --git a/jstests/core/query/where/where1.js b/jstests/core/query/where/where1.js index 1082bb902e8bc..d093388c3d146 100644 --- a/jstests/core/query/where/where1.js +++ b/jstests/core/query/where/where1.js @@ -3,7 +3,7 @@ // requires_scripting, // ] -t = db.getCollection("where1"); +let t = db.getCollection("where1"); t.drop(); t.save({a: 1}); diff --git a/jstests/core/query/where/where2.js b/jstests/core/query/where/where2.js index 6561c829353c7..12ba7382e3cb9 100644 --- a/jstests/core/query/where/where2.js +++ b/jstests/core/query/where/where2.js @@ -3,7 +3,7 @@ // requires_scripting, // ] -t = db.getCollection("where2"); +let t = db.getCollection("where2"); t.drop(); t.save({a: 1}); diff --git a/jstests/core/query/where/where3.js b/jstests/core/query/where/where3.js index ffbe690312c7f..3b48bab56f535 100644 --- a/jstests/core/query/where/where3.js +++ b/jstests/core/query/where/where3.js @@ -3,7 +3,7 @@ // requires_scripting // ] -t = db.where3; +let t = db.where3; t.drop(); t.save({returned_date: 5}); diff --git a/jstests/core/queryable_encryption/basic_crud.js b/jstests/core/queryable_encryption/basic_crud.js new file mode 100644 index 0000000000000..86d63b3b29638 --- /dev/null +++ b/jstests/core/queryable_encryption/basic_crud.js @@ -0,0 +1,152 @@ +/** + * Tests basic CRUD operations with queryable encrypted fields. + * + * @tags: [ + * no_selinux, + * tenant_migration_incompatible, + * does_not_support_transactions, + * does_not_support_stepdowns, + * ] + */ +import { + assertIsIndexedEncryptedField, + EncryptedClient, + kSafeContentField +} from "jstests/fle2/libs/encrypted_client_util.js"; + +if (!(buildInfo().modules.includes("enterprise"))) { + jsTestLog("Skipping test as it requires the enterprise module"); + quit(); +} + +const dbName = "qetestdb"; +const collName = "qetestcoll"; +const initialConn = db.getMongo(); +const localKMS = { + key: BinData( + 0, + "/tu9jUCBqZdwCelwE/EAm/4WqdxrSMi04B8e9uAV+m30rI1J2nhKZZtQjdvsSCwuI4erR6IEcEK+5eGUAODv43NDNIR9QheT2edWFewUfHKsl9cnzTc86meIzOmYl6dr") +}; + +// Some tests silently change the DB name to prefix it with a tenant ID, but we +// need to pass the real DB name for the keyvault when setting up the auto encryption, +// so that the internal connection for the key vault will target the right DB name. +const kvDbName = (typeof (initialConn.getDbNameWithTenantPrefix) === "function") + ? initialConn.getDbNameWithTenantPrefix(dbName) + : dbName; +jsTestLog("Using key vault db " + kvDbName); + +const clientSideFLEOptions = { + kmsProviders: {local: localKMS}, + keyVaultNamespace: kvDbName + ".keystore", + schemaMap: {}, +}; + +db.getSiblingDB(dbName).dropDatabase(); + +assert(initialConn.setAutoEncryption(clientSideFLEOptions)); +initialConn.toggleAutoEncryption(true); + +let encryptedClient = new EncryptedClient(initialConn, dbName); +assert.commandWorked(encryptedClient.createEncryptionCollection(collName, { + encryptedFields: { + "fields": [ + {"path": "first", "bsonType": "string", "queries": {"queryType": "equality"}}, + ] + } +})); + +function runIndexedEqualityEncryptedCRUDTest(client, iterations) { + let conn = client.getDB().getMongo(); + let ecoll = client.getDB()[collName]; + let values = + [["frodo", "baggins"], ["sam", "gamgee"], ["pippin", "took"], ["merry", "brandybuck"]]; + let count = 0; + let escCount = 0; + let ecocCount = 0; + + // Do encrypted inserts + for (let it = 0; it < iterations; it++) { + for (let val of values) { + assert.commandWorked(ecoll.insert({_id: count, first: val[0], last: val[1]})); + count++; + client.assertEncryptedCollectionCounts(collName, count, count, count); + } + } + escCount = count; + ecocCount = count; + + // Do finds using unencrypted connection + { + conn.toggleAutoEncryption(false); + + let rawDocs = ecoll.find().toArray(); + assert.eq(rawDocs.length, count); + for (let rawDoc of rawDocs) { + assertIsIndexedEncryptedField(rawDoc.first); + assert(rawDoc[kSafeContentField] !== undefined); + } + conn.toggleAutoEncryption(true); + } + + // Do encrypted queries using encrypted connection + for (let mod = 0; mod < values.length; mod++) { + let docs = ecoll.find({last: values[mod][1]}).toArray(); + + for (let doc of docs) { + assert.eq(doc._id % values.length, mod); + assert.eq(doc.first, values[mod][0]); + assert(doc[kSafeContentField] !== undefined); + } + } + + // Do updates on encrypted fields + for (let it = 0; it < iterations; it++) { + let res = assert.commandWorked(ecoll.updateOne( + {$and: [{last: "baggins"}, {first: "frodo"}]}, {$set: {first: "bilbo"}})); + assert.eq(res.matchedCount, 1); + assert.eq(res.modifiedCount, 1); + escCount++; + ecocCount++; + client.assertEncryptedCollectionCounts(collName, count, escCount, ecocCount); + + res = assert.commandWorked( + ecoll.replaceOne({last: "took"}, {first: "paladin", last: "took"})); + assert.eq(res.matchedCount, 1); + assert.eq(res.modifiedCount, 1); + escCount++; + ecocCount++; + client.assertEncryptedCollectionCounts(collName, count, escCount, ecocCount); + } + + // Do findAndModifies + for (let it = 0; it < iterations; it++) { + let res = assert.commandWorked(ecoll.runCommand({ + findAndModify: ecoll.getName(), + query: {$and: [{last: "gamgee"}, {first: "sam"}]}, + update: {$set: {first: "rosie"}}, + })); + print(tojson(res)); + assert.eq(res.value.first, "sam"); + assert(res.value[kSafeContentField] !== undefined); + escCount++; + ecocCount++; + client.assertEncryptedCollectionCounts(collName, count, escCount, ecocCount); + } + + // Do deletes + for (let it = 0; it < iterations; it++) { + let res = assert.commandWorked( + ecoll.deleteOne({last: "brandybuck"}, {writeConcern: {w: "majority"}})); + assert.eq(res.deletedCount, 1); + count--; + client.assertEncryptedCollectionCounts(collName, count, escCount, ecocCount); + } + assert.eq(ecoll.find({last: "brandybuck"}).count(), 0); +} + +// Test CRUD on indexed equality encrypted fields +runIndexedEqualityEncryptedCRUDTest(encryptedClient, 10); + +encryptedClient = undefined; +initialConn.unsetAutoEncryption(); diff --git a/jstests/core/record_store_count.js b/jstests/core/record_store_count.js index da64870dfe164..222f3e82a47d3 100644 --- a/jstests/core/record_store_count.js +++ b/jstests/core/record_store_count.js @@ -6,12 +6,9 @@ * ] */ -load("jstests/libs/analyze_plan.js"); // For 'planHasStage'. +import {planHasStage} from "jstests/libs/analyze_plan.js"; load("jstests/libs/fixture_helpers.js"); // For isMongos and isSharded. -(function() { -"use strict"; - var coll = db.record_store_count; coll.drop(); @@ -63,7 +60,7 @@ if (!isMongos(db) || !FixtureHelpers.isSharded(coll)) { // In an unsharded collection we can use the COUNT_SCAN stage. testExplainAndExpectStage( {expectedStages: ["COUNT_SCAN"], unexpectedStages: [], hintIndex: {x: 1}}); - return; + quit(); } // The remainder of the test is only relevant for sharded clusters. @@ -87,4 +84,3 @@ testExplainAndExpectStage({ unexpectedStages: ["FETCH"], hintIndex: kNewIndexSpec }); -})(); diff --git a/jstests/core/resume_query.js b/jstests/core/resume_query.js index 711c19980a16e..18fa29f4ce187 100644 --- a/jstests/core/resume_query.js +++ b/jstests/core/resume_query.js @@ -11,66 +11,136 @@ * ] */ -(function() { -"use strict"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + const testName = TestData.testName; -const testDb = db.getSiblingDB(testName); -assert.commandWorked(testDb.dropDatabase()); - -jsTestLog("Setting up the data."); -const testData = [{_id: 0, a: 1}, {_id: 1, b: 2}, {_id: 2, c: 3}, {_id: 3, d: 4}]; -assert.commandWorked(testDb.test.insert(testData)); - -jsTestLog("Running the initial query."); -let res = assert.commandWorked(testDb.runCommand( - {find: "test", hint: {$natural: 1}, batchSize: 1, $_requestResumeToken: true})); -assert.eq(1, res.cursor.firstBatch.length); -assert.contains(res.cursor.firstBatch[0], testData); -let queryData = res.cursor.firstBatch; -assert.hasFields(res.cursor, ["postBatchResumeToken"]); -let resumeToken = res.cursor.postBatchResumeToken; - -// Kill the cursor before attempting to resume. -assert.commandWorked(testDb.runCommand({killCursors: "test", cursors: [res.cursor.id]})); - -jsTestLog("Running the second query after killing the cursor."); -res = assert.commandWorked(testDb.runCommand({ - find: "test", - hint: {$natural: 1}, - batchSize: 1, - $_requestResumeToken: true, - $_resumeAfter: resumeToken -})); -assert.eq(1, res.cursor.firstBatch.length); -// The return value should not be the same as the one before. -assert.neq(queryData[0], res.cursor.firstBatch[0]); -assert.contains(res.cursor.firstBatch[0], testData); -queryData.push(res.cursor.firstBatch[0]); -let cursorId = res.cursor.id; - -jsTestLog("Running getMore."); -res = - assert.commandWorked(testDb.runCommand({getMore: cursorId, collection: "test", batchSize: 1})); -queryData.push(res.cursor.nextBatch[0]); -assert.hasFields(res.cursor, ["postBatchResumeToken"]); -resumeToken = res.cursor.postBatchResumeToken; - -// Kill the cursor before attempting to resume. -assert.commandWorked(testDb.runCommand({killCursors: "test", cursors: [res.cursor.id]})); - -jsTestLog("Testing resume from getMore"); -res = assert.commandWorked(testDb.runCommand({ - find: "test", - hint: {$natural: 1}, - batchSize: 10, - $_requestResumeToken: true, - $_resumeAfter: resumeToken -})); -assert.eq(1, res.cursor.firstBatch.length); -// This should have exhausted the collection. -assert.eq(0, res.cursor.id); -queryData.push(res.cursor.firstBatch[0]); - -assert.sameMembers(testData, queryData); -})(); +const testFindCmd = function() { + const testDb = db.getSiblingDB(testName); + assert.commandWorked(testDb.dropDatabase()); + + jsTestLog("[Find] Setting up the data."); + const testData = [{_id: 0, a: 1}, {_id: 1, b: 2}, {_id: 2, c: 3}, {_id: 3, d: 4}]; + assert.commandWorked(testDb.test.insert(testData)); + jsTestLog("[Find] Running the initial query."); + let res = assert.commandWorked(testDb.runCommand( + {find: "test", hint: {$natural: 1}, batchSize: 1, $_requestResumeToken: true})); + assert.eq(1, res.cursor.firstBatch.length); + assert.contains(res.cursor.firstBatch[0], testData); + let queryData = res.cursor.firstBatch; + assert.hasFields(res.cursor, ["postBatchResumeToken"]); + let resumeToken = res.cursor.postBatchResumeToken; + + // Kill the cursor before attempting to resume. + assert.commandWorked(testDb.runCommand({killCursors: "test", cursors: [res.cursor.id]})); + + jsTestLog("[Find] Running the second query after killing the cursor."); + res = assert.commandWorked(testDb.runCommand({ + find: "test", + hint: {$natural: 1}, + batchSize: 1, + $_requestResumeToken: true, + $_resumeAfter: resumeToken + })); + assert.eq(1, res.cursor.firstBatch.length); + // The return value should not be the same as the one before. + assert.neq(queryData[0], res.cursor.firstBatch[0]); + assert.contains(res.cursor.firstBatch[0], testData); + queryData.push(res.cursor.firstBatch[0]); + let cursorId = res.cursor.id; + + jsTestLog("[Find] Running getMore."); + res = assert.commandWorked( + testDb.runCommand({getMore: cursorId, collection: "test", batchSize: 1})); + queryData.push(res.cursor.nextBatch[0]); + assert.hasFields(res.cursor, ["postBatchResumeToken"]); + resumeToken = res.cursor.postBatchResumeToken; + + // Kill the cursor before attempting to resume. + assert.commandWorked(testDb.runCommand({killCursors: "test", cursors: [res.cursor.id]})); + + jsTestLog("[Find] Testing resume from getMore"); + res = assert.commandWorked(testDb.runCommand({ + find: "test", + hint: {$natural: 1}, + batchSize: 10, + $_requestResumeToken: true, + $_resumeAfter: resumeToken + })); + assert.eq(1, res.cursor.firstBatch.length); + // This should have exhausted the collection. + assert.eq(0, res.cursor.id); + queryData.push(res.cursor.firstBatch[0]); + + assert.sameMembers(testData, queryData); +}; + +const testAggregateCmd = function() { + if (!FeatureFlagUtil.isEnabled(db, "ReshardingImprovements")) { + jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled."); + return; + } + const testDb = db.getSiblingDB(testName); + assert.commandWorked(testDb.dropDatabase()); + + jsTestLog("[Aggregate] Setting up the data."); + const testData = [{_id: 0, a: 1}, {_id: 1, b: 2}, {_id: 2, c: 3}, {_id: 3, d: 4}]; + assert.commandWorked(testDb.test.insert(testData)); + jsTestLog("[Aggregate] Running the initial query."); + let res = assert.commandWorked(testDb.runCommand({ + aggregate: "test", + pipeline: [], + hint: {$natural: 1}, + cursor: {batchSize: 1}, + $_requestResumeToken: true + })); + assert.eq(1, res.cursor.firstBatch.length); + assert.contains(res.cursor.firstBatch[0], testData); + let queryData = res.cursor.firstBatch; + assert.hasFields(res.cursor, ["postBatchResumeToken"]); + let resumeToken = res.cursor.postBatchResumeToken; + + assert.commandWorked(testDb.runCommand({killCursors: "test", cursors: [res.cursor.id]})); + + jsTestLog("[Aggregate] Running the second query after killing the cursor."); + res = assert.commandWorked(testDb.runCommand({ + aggregate: "test", + pipeline: [], + hint: {$natural: 1}, + cursor: {batchSize: 1}, + $_requestResumeToken: true, + $_resumeAfter: resumeToken + })); + assert.eq(1, res.cursor.firstBatch.length); + assert.neq(queryData[0], res.cursor.firstBatch[0]); + assert.contains(res.cursor.firstBatch[0], testData); + queryData.push(res.cursor.firstBatch[0]); + let cursorId = res.cursor.id; + + jsTestLog("[Aggregate] Running getMore."); + res = assert.commandWorked( + testDb.runCommand({getMore: cursorId, collection: "test", batchSize: 1})); + queryData.push(res.cursor.nextBatch[0]); + assert.hasFields(res.cursor, ["postBatchResumeToken"]); + resumeToken = res.cursor.postBatchResumeToken; + + assert.commandWorked(testDb.runCommand({killCursors: "test", cursors: [res.cursor.id]})); + + jsTestLog("[Aggregate] Testing resume from getMore"); + res = assert.commandWorked(testDb.runCommand({ + aggregate: "test", + pipeline: [], + hint: {$natural: 1}, + cursor: {batchSize: 10}, + $_requestResumeToken: true, + $_resumeAfter: resumeToken + })); + assert.eq(1, res.cursor.firstBatch.length); + assert.eq(0, res.cursor.id); + queryData.push(res.cursor.firstBatch[0]); + + assert.sameMembers(testData, queryData); +}; + +testFindCmd(); +testAggregateCmd(); \ No newline at end of file diff --git a/jstests/core/resume_query_from_non_existent_record.js b/jstests/core/resume_query_from_non_existent_record.js index 8dba152a97926..62bc8481bd56f 100644 --- a/jstests/core/resume_query_from_non_existent_record.js +++ b/jstests/core/resume_query_from_non_existent_record.js @@ -15,59 +15,121 @@ * ] */ -(function() { -"use strict"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const collName = "resume_query_from_non_existent_record"; const coll = db[collName]; -coll.drop(); - -const testData = [{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}]; -assert.commandWorked(coll.insert(testData)); - -// Run the initial query and request to return a resume token. We're interested only in a single -// document, so 'batchSize' is set to 1. -let res = assert.commandWorked( - db.runCommand({find: collName, hint: {$natural: 1}, batchSize: 1, $_requestResumeToken: true})); -assert.eq(1, res.cursor.firstBatch.length); -assert.contains(res.cursor.firstBatch[0], testData); -const savedData = res.cursor.firstBatch; - -// Make sure the query returned a resume token which will be used to resume the query from. -assert.hasFields(res.cursor, ["postBatchResumeToken"]); -const resumeToken = res.cursor.postBatchResumeToken; - -// Kill the cursor before attempting to resume. -assert.commandWorked(db.runCommand({killCursors: collName, cursors: [res.cursor.id]})); - -// Try to resume the query from the saved resume token. -res = assert.commandWorked(db.runCommand({ - find: collName, - hint: {$natural: 1}, - batchSize: 1, - $_requestResumeToken: true, - $_resumeAfter: resumeToken -})); -assert.eq(1, res.cursor.firstBatch.length); -assert.contains(res.cursor.firstBatch[0], testData); -assert.neq(savedData[0], res.cursor.firstBatch[0]); - -// Kill the cursor before attempting to resume. -assert.commandWorked(db.runCommand({killCursors: collName, cursors: [res.cursor.id]})); - -// Delete a document which corresponds to the saved resume token, so that we can guarantee it does -// not exist. -assert.commandWorked(coll.remove({_id: savedData[0]._id}, {justOne: true})); - -// Try to resume the query from the same token and check that it fails to position the cursor to -// the record specified in the resume token. -assert.commandFailedWithCode(db.runCommand({ - find: collName, - hint: {$natural: 1}, - batchSize: 1, - $_requestResumeToken: true, - $_resumeAfter: resumeToken -}), - ErrorCodes.KeyNotFound); -})(); +const testFindCmd = function() { + coll.drop(); + + const testData = [{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}]; + assert.commandWorked(coll.insert(testData)); + + jsTestLog("[Find] request a resumeToken then use it to resume."); + // Run the initial query and request to return a resume token. We're interested only in a single + // document, so 'batchSize' is set to 1. + let res = assert.commandWorked(db.runCommand( + {find: collName, hint: {$natural: 1}, batchSize: 1, $_requestResumeToken: true})); + assert.eq(1, res.cursor.firstBatch.length); + assert.contains(res.cursor.firstBatch[0], testData); + const savedData = res.cursor.firstBatch; + + // Make sure the query returned a resume token which will be used to resume the query from. + assert.hasFields(res.cursor, ["postBatchResumeToken"]); + const resumeToken = res.cursor.postBatchResumeToken; + + // Kill the cursor before attempting to resume. + assert.commandWorked(db.runCommand({killCursors: collName, cursors: [res.cursor.id]})); + + // Try to resume the query from the saved resume token. + res = assert.commandWorked(db.runCommand({ + find: collName, + hint: {$natural: 1}, + batchSize: 1, + $_requestResumeToken: true, + $_resumeAfter: resumeToken + })); + assert.eq(1, res.cursor.firstBatch.length); + assert.contains(res.cursor.firstBatch[0], testData); + assert.neq(savedData[0], res.cursor.firstBatch[0]); + + // Kill the cursor before attempting to resume. + assert.commandWorked(db.runCommand({killCursors: collName, cursors: [res.cursor.id]})); + + jsTestLog( + "[Find] Delete the document which corresponds to the saved resume token, then resumeAfter should fail."); + // Delete a document which corresponds to the saved resume token, so that we can guarantee it + // does not exist. + assert.commandWorked(coll.remove({_id: savedData[0]._id}, {justOne: true})); + + // Try to resume the query from the same token and check that it fails to position the cursor to + // the record specified in the resume token. + assert.commandFailedWithCode(db.runCommand({ + find: collName, + hint: {$natural: 1}, + batchSize: 1, + $_requestResumeToken: true, + $_resumeAfter: resumeToken + }), + ErrorCodes.KeyNotFound); +}; + +const testAggregateCmd = function() { + if (!FeatureFlagUtil.isEnabled(db, "ReshardingImprovements")) { + jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled."); + return; + } + coll.drop(); + + const testData = [{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}]; + assert.commandWorked(coll.insert(testData)); + + jsTestLog("[Aggregate] request a resumeToken then use it to resume."); + let res = assert.commandWorked(db.runCommand({ + aggregate: collName, + pipeline: [], + hint: {$natural: 1}, + cursor: {batchSize: 1}, + $_requestResumeToken: true + })); + assert.eq(1, res.cursor.firstBatch.length); + assert.contains(res.cursor.firstBatch[0], testData); + const savedData = res.cursor.firstBatch; + + assert.hasFields(res.cursor, ["postBatchResumeToken"]); + const resumeToken = res.cursor.postBatchResumeToken; + + assert.commandWorked(db.runCommand({killCursors: collName, cursors: [res.cursor.id]})); + + res = assert.commandWorked(db.runCommand({ + aggregate: collName, + pipeline: [], + hint: {$natural: 1}, + cursor: {batchSize: 1}, + $_requestResumeToken: true, + $_resumeAfter: resumeToken + })); + assert.eq(1, res.cursor.firstBatch.length); + assert.contains(res.cursor.firstBatch[0], testData); + assert.neq(savedData[0], res.cursor.firstBatch[0]); + + assert.commandWorked(db.runCommand({killCursors: collName, cursors: [res.cursor.id]})); + + jsTestLog( + "[Aggregate] Delete the document which corresponds to the saved resume token, then resumeAfter should fail."); + assert.commandWorked(coll.remove({_id: savedData[0]._id}, {justOne: true})); + + assert.commandFailedWithCode(db.runCommand({ + aggregate: collName, + pipeline: [], + hint: {$natural: 1}, + cursor: {batchSize: 1}, + $_requestResumeToken: true, + $_resumeAfter: resumeToken + }), + ErrorCodes.KeyNotFound); +}; + +testFindCmd(); +testAggregateCmd(); \ No newline at end of file diff --git a/jstests/core/return_key.js b/jstests/core/return_key.js index 58c48360597a3..c23ff7800c0d8 100644 --- a/jstests/core/return_key.js +++ b/jstests/core/return_key.js @@ -8,10 +8,7 @@ /** * Tests for returnKey. */ -load("jstests/libs/analyze_plan.js"); - -(function() { -'use strict'; +import {isIndexOnly} from "jstests/libs/analyze_plan.js"; var results; var explain; @@ -84,4 +81,3 @@ assert.eq(results, [{a: 3, c: [1], d: [1]}, {a: 2, c: [2], d: [2]}, {a: 1, c: [3 results = coll.find({}, {"c.d": {$meta: "sortKey"}}).hint({a: 1}).sort({b: 1}).returnKey().toArray(); assert.eq(results, [{a: 3, c: {d: [1]}}, {a: 2, c: {d: [2]}}, {a: 1, c: {d: [3]}}]); -})(); diff --git a/jstests/core/role_management_helpers.js b/jstests/core/role_management_helpers.js index 9fd3d28ce5551..05e93f7a70de7 100644 --- a/jstests/core/role_management_helpers.js +++ b/jstests/core/role_management_helpers.js @@ -13,7 +13,7 @@ // It is not a comprehensive test of the functionality of the role manipulation commands function assertHasRole(rolesArray, roleName, roleDB) { - for (i in rolesArray) { + for (let i in rolesArray) { var curRole = rolesArray[i]; if (curRole.role == roleName && curRole.db == roleDB) { return; @@ -23,7 +23,7 @@ function assertHasRole(rolesArray, roleName, roleDB) { } function assertHasPrivilege(privilegeArray, privilege) { - for (i in privilegeArray) { + for (let i in privilegeArray) { var curPriv = privilegeArray[i]; if (curPriv.resource.cluster == privilege.resource.cluster && curPriv.resource.anyResource == privilege.resource.anyResource && @@ -31,7 +31,7 @@ function assertHasPrivilege(privilegeArray, privilege) { curPriv.resource.collection == privilege.resource.collection) { // Same resource assert.eq(curPriv.actions.length, privilege.actions.length); - for (k in curPriv.actions) { + for (let k in curPriv.actions) { assert.eq(curPriv.actions[k], privilege.actions[k]); } return; diff --git a/jstests/core/sbe/from_plan_cache_flag.js b/jstests/core/sbe/from_plan_cache_flag.js index 87cb795e0acb7..a7612e8049a74 100644 --- a/jstests/core/sbe/from_plan_cache_flag.js +++ b/jstests/core/sbe/from_plan_cache_flag.js @@ -8,16 +8,12 @@ // # TODO SERVER-67607: Test plan cache with CQF enabled. // cqf_incompatible, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. if (!checkSBEEnabled(db)) { jsTest.log("Skip running the test because SBE is not enabled"); - return; + quit(); } const testDB = db.getSiblingDB("from_plan_cache_flag"); assert.commandWorked(testDB.dropDatabase()); @@ -42,4 +38,3 @@ assert.eq(!!profileObj.fromPlanCache, true, profileObj); coll.aggregate([{$match: {a: 3}}], {comment}).toArray(); profileObj = getLatestProfilerEntry(testDB, {"command.comment": comment}); assert.eq(!!profileObj.fromPlanCache, true, profileObj); -}()); diff --git a/jstests/core/sbe/plan_cache_sbe_with_or_queries.js b/jstests/core/sbe/plan_cache_sbe_with_or_queries.js index f4a90d42dc159..3f27b34780e54 100644 --- a/jstests/core/sbe/plan_cache_sbe_with_or_queries.js +++ b/jstests/core/sbe/plan_cache_sbe_with_or_queries.js @@ -12,18 +12,15 @@ // # Plan cache state is node-local and will not get migrated alongside tenant data. // tenant_migration_incompatible, // # TODO SERVER-67607: Test plan cache with CQF enabled. -// cqf_incompatible, +// cqf_experimental_incompatible, // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getPlanCacheKeyFromShape, getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; if (!checkSBEEnabled(db)) { jsTest.log("Skip running the test because SBE is not enabled"); - return; + quit(); } function getPlanCacheEntries(query, collection, db) { @@ -59,4 +56,3 @@ assert.eq(true, planCacheEntries[0].isPinned, planCacheEntries); assert.eq(true, planCacheEntries[0].isActive, planCacheEntries); // Works is always 0 for pinned plan cache entries. assert.eq(0, planCacheEntries[0].works, planCacheEntries); -}()); diff --git a/jstests/core/sbe/sbe_explain_rejected_plans.js b/jstests/core/sbe/sbe_explain_rejected_plans.js index a6ae25751868d..0dc66d494f030 100644 --- a/jstests/core/sbe/sbe_explain_rejected_plans.js +++ b/jstests/core/sbe/sbe_explain_rejected_plans.js @@ -5,17 +5,20 @@ * requires_fcv_63, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import { + getExecutionStages, + getPlanStages, + getRejectedPlan, + getRejectedPlans, + getWinningPlan, +} from "jstests/libs/analyze_plan.js"; load("jstests/libs/collection_drop_recreate.js"); -load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const isSBEEnabled = checkSBEEnabled(db); if (!isSBEEnabled) { jsTestLog("Skipping test because SBE is disabled"); - return; + quit(); } const coll = assertDropAndRecreateCollection(db, "sbe_explain_rejected_plans"); @@ -66,4 +69,3 @@ for (let rejectedPlan of getRejectedPlans(explain)) { rejectedPlan.slotBasedPlan.stages.includes("@\"b_1\""), explain); } -})(); diff --git a/jstests/core/sbe/sbe_ixscan_explain.js b/jstests/core/sbe/sbe_ixscan_explain.js index d79b8a209fc57..21062a5848ae0 100644 --- a/jstests/core/sbe/sbe_ixscan_explain.js +++ b/jstests/core/sbe/sbe_ixscan_explain.js @@ -7,16 +7,13 @@ // requires_fcv_63, // ] -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); // For getPlanStages -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getPlanStages, getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const isSBEEnabled = checkSBEEnabled(db); if (!isSBEEnabled) { jsTestLog("Skipping test because SBE is disabled"); - return; + quit(); } function assertStageContainsIndexName(stage) { @@ -45,4 +42,3 @@ assert(ixscanStages.length !== 0); for (let ixscanStage of ixscanStages) { assertStageContainsIndexName(ixscanStage); } -}()); diff --git a/jstests/core/sbe_plan_cache_autoparameterize_collscan.js b/jstests/core/sbe_plan_cache_autoparameterize_collscan.js index c1aaea782be6b..7f0ac77d90f5e 100644 --- a/jstests/core/sbe_plan_cache_autoparameterize_collscan.js +++ b/jstests/core/sbe_plan_cache_autoparameterize_collscan.js @@ -18,17 +18,14 @@ * requires_scripting, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/sbe_util.js"); +import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; // This test is specifically verifying the behavior of the SBE plan cache, which is only enabled // when SBE is enabled. if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE is not enabled"); - return; + quit(); } const coll = db.sbe_plan_cache_autoparameterize_collscan; @@ -432,4 +429,3 @@ runTest({query: {a: {$type: ["string", "regex"]}}, projection: {_id: 1}}, {query: {a: {$type: ["string", "array"]}}, projection: {_id: 1}}, [{_id: 5}, {_id: 6}, {_id: 8}, {_id: 11}, {_id: 12}, {_id: 13}, {_id: 15}], false); -}()); diff --git a/jstests/core/server1470.js b/jstests/core/server1470.js index c3c7d47aaeb7c..bbca047d8ee7c 100644 --- a/jstests/core/server1470.js +++ b/jstests/core/server1470.js @@ -3,15 +3,12 @@ // key. // @tags: [assumes_unsharded_collection, requires_multi_updates, requires_non_retryable_writes] -t = db.server1470; +let t = db.server1470; t.drop(); -q = { - "name": "first", - "pic": {"$ref": "foo", "$id": ObjectId("4c48d04cd33a5a92628c9af6")} -}; +let q = {"name": "first", "pic": {"$ref": "foo", "$id": ObjectId("4c48d04cd33a5a92628c9af6")}}; t.update(q, {$set: {x: 1}}, true, true); -ref = t.findOne().pic; +let ref = t.findOne().pic; assert.eq("object", typeof (ref)); assert.eq(q.pic["$ref"], ref["$ref"]); assert.eq(q.pic["$id"], ref["$id"]); diff --git a/jstests/core/server22053.js b/jstests/core/server22053.js index d803c732b869c..5c93f60faaece 100644 --- a/jstests/core/server22053.js +++ b/jstests/core/server22053.js @@ -16,4 +16,4 @@ assert.eq(3, doc['mys'][2]); assert.eq(undefined, doc['mys'][3]); assert.eq(undefined, doc['mys'][4]); assert.eq(6, doc['mys'][5]); -}()); \ No newline at end of file +}()); diff --git a/jstests/core/server5346.js b/jstests/core/server5346.js index 18f2f019e5e1f..f9ad685296104 100644 --- a/jstests/core/server5346.js +++ b/jstests/core/server5346.js @@ -1,11 +1,8 @@ -t = db.server5346; +let t = db.server5346; t.drop(); -x = { - _id: 1, - versions: {} -}; +let x = {_id: 1, versions: {}}; t.insert(x); t.update({_id: 1}, {$inc: {"versions.2_01": 1}}); diff --git a/jstests/core/server7756.js b/jstests/core/server7756.js index 844c3a40d4d40..574e5587845f8 100644 --- a/jstests/core/server7756.js +++ b/jstests/core/server7756.js @@ -1,5 +1,5 @@ -t = db.server7756; +let t = db.server7756; t.drop(); t.save({a: [{1: 'x'}, 'y']}); diff --git a/jstests/core/server9385.js b/jstests/core/server9385.js index 4eb11076139d2..3e8cd4c96bd44 100644 --- a/jstests/core/server9385.js +++ b/jstests/core/server9385.js @@ -2,11 +2,11 @@ // // @tags: [requires_fastcount] -t = db.server9385; +let t = db.server9385; t.drop(); t.insert({_id: 1, x: 1}); -x = t.findOne(); +let x = t.findOne(); x._id = 2; t.save(x); diff --git a/jstests/core/shell/autocomplete.js b/jstests/core/shell/autocomplete.js index 6b8fb6ffa78ff..3ffa0db77cf77 100644 --- a/jstests/core/shell/autocomplete.js +++ b/jstests/core/shell/autocomplete.js @@ -1,6 +1,11 @@ /** * Validate auto complete works for various javascript types implemented by C++. + * @tags: [ + * # TODO SERVER-77024 enable on sharded passthrough suites when orphans hook will be supported + * assumes_unsharded_collection, + * ] */ + (function() { 'use strict'; diff --git a/jstests/core/shell/collection_save.js b/jstests/core/shell/collection_save.js index d375aa9f31eaf..fb3e53b756d09 100644 --- a/jstests/core/shell/collection_save.js +++ b/jstests/core/shell/collection_save.js @@ -32,4 +32,4 @@ assert.throws(() => coll.save("The answer to life, the universe and everything") assert.throws(() => coll.save([{"fruit": "mango"}, {"fruit": "orange"}]), [], "saving an array must throw an error"); -})(); \ No newline at end of file +})(); diff --git a/jstests/core/shell/shell1.js b/jstests/core/shell/shell1.js index 4fc4c3a1c15e2..cbe52f144019d 100644 --- a/jstests/core/shell/shell1.js +++ b/jstests/core/shell/shell1.js @@ -1,4 +1,4 @@ -x = 1; +let x = 1; shellHelper("show", "tables;"); shellHelper("show", "tables"); diff --git a/jstests/core/shell/shellkillop.js b/jstests/core/shell/shellkillop.js index 0ac2ad681ed6f..b6c3746e05225 100644 --- a/jstests/core/shell/shellkillop.js +++ b/jstests/core/shell/shellkillop.js @@ -1,15 +1,15 @@ -baseName = "jstests_shellkillop"; +let baseName = "jstests_shellkillop"; // 'retry' should be set to true in contexts where an exception should cause the test to be retried // rather than to fail. -retry = false; +let retry = false; function testShellAutokillop() { if (true) { // toggle to disable test db[baseName].drop(); print("shellkillop.js insert data"); - for (i = 0; i < 100000; ++i) { + for (let i = 0; i < 100000; ++i) { db[baseName].insert({i: 1}); } assert.eq(100000, db[baseName].count()); @@ -19,7 +19,7 @@ function testShellAutokillop() { var evalStr = "print('SKO subtask started'); db." + baseName + ".update( {}, {$set:{i:'abcdefghijkl'}}, false, true ); db." + baseName + ".count();"; print("shellkillop.js evalStr:" + evalStr); - spawn = startMongoProgramNoConnect( + let spawn = startMongoProgramNoConnect( "mongo", "--autokillop", "--port", myPort(), "--eval", evalStr); sleep(100); @@ -35,7 +35,7 @@ function testShellAutokillop() { print("count abcdefghijkl:" + db[baseName].find({i: 'abcdefghijkl'}).count()); var inprog = db.currentOp().inprog; - for (i in inprog) { + for (let i in inprog) { if (inprog[i].ns == "test." + baseName) throw Error("shellkillop.js op is still running: " + tojson(inprog[i])); } diff --git a/jstests/core/show_record_id.js b/jstests/core/show_record_id.js index 3af12d74d61f5..6bf75a42d372d 100644 --- a/jstests/core/show_record_id.js +++ b/jstests/core/show_record_id.js @@ -8,8 +8,8 @@ var t = db.show_record_id; t.drop(); function checkResults(arr) { - for (i in arr) { - a = arr[i]; + for (let i in arr) { + let a = arr[i]; assert(a['$recordId']); } } diff --git a/jstests/core/single_field_hashed_index.js b/jstests/core/single_field_hashed_index.js index a05400c4d581f..6f64a8f0dc3ad 100644 --- a/jstests/core/single_field_hashed_index.js +++ b/jstests/core/single_field_hashed_index.js @@ -7,9 +7,7 @@ * requires_fastcount, * ] */ -(function() { -"use strict"; -load("jstests/libs/analyze_plan.js"); // For isIxscan(). +import {getWinningPlan, isIxscan} from "jstests/libs/analyze_plan.js"; const t = db.single_field_hashed_index; t.drop(); @@ -112,5 +110,4 @@ assert.commandFailedWithCode(t.insert({field1: {field2: {0: []}}}), 16766); assert.commandFailedWithCode(t.insert({field1: [{field2: {0: []}}]}), 16766); assert.commandFailedWithCode(t.insert({field1: {field2: {0: {field4: []}}}}), 16766); assert.commandWorked(t.insert({field1: {field2: {0: {otherField: []}}}})); -assert.commandWorked(t.insert({field1: {field2: {0: {field4: 1}}}})); -})(); +assert.commandWorked(t.insert({field1: {field2: {0: {field4: 1}}}})); \ No newline at end of file diff --git a/jstests/core/sparse_index_supports_ne_null.js b/jstests/core/sparse_index_supports_ne_null.js index 4ad53e72996de..a8c115dba2a19 100644 --- a/jstests/core/sparse_index_supports_ne_null.js +++ b/jstests/core/sparse_index_supports_ne_null.js @@ -9,9 +9,7 @@ * assumes_unsharded_collection, * ] */ -(function() { -"use strict"; -load("jstests/libs/analyze_plan.js"); // For getPlanStages. +import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js"; const coll = db.sparse_index_supports_ne_null; coll.drop(); @@ -198,5 +196,4 @@ checkQuery({ nResultsExpected: 0, indexKeyPattern: keyPattern }); -})(); -})(); +})(); \ No newline at end of file diff --git a/jstests/core/splitvector.js b/jstests/core/splitvector.js index 5c3c1b2e559c4..54d8112ab97ae 100644 --- a/jstests/core/splitvector.js +++ b/jstests/core/splitvector.js @@ -26,14 +26,14 @@ // e.g. 20000 // @param maxChunkSize is in MBs. // -assertChunkSizes = function(splitVec, numDocs, maxChunkSize, msg) { +let assertChunkSizes = function(splitVec, numDocs, maxChunkSize, msg) { splitVec = [{x: -1}].concat(splitVec); splitVec.push({x: numDocs + 1}); - for (i = 0; i < splitVec.length - 1; i++) { - min = splitVec[i]; - max = splitVec[i + 1]; + for (let i = 0; i < splitVec.length - 1; i++) { + let min = splitVec[i]; + let max = splitVec[i + 1]; var avgObjSize = db.jstests_splitvector.stats().avgObjSize; - size = db.runCommand({datasize: "test.jstests_splitvector", min: min, max: max}).size; + let size = db.runCommand({datasize: "test.jstests_splitvector", min: min, max: max}).size; // It is okay for the last chunk to be smaller. A collection's size does not // need to be exactly a multiple of maxChunkSize. @@ -90,7 +90,7 @@ let bulkInsertDocsFixedX = function(coll, numDocs, filler, xVal) { // ------------------------- // TESTS START HERE // ------------------------- -f = db.jstests_splitvector; +let f = db.jstests_splitvector; resetCollection(); // ------------------------- @@ -125,29 +125,30 @@ assert.eq( resetCollection(); f.createIndex({x: 1}); +let filler; var case4 = function() { // Get baseline document size filler = ""; while (filler.length < 500) filler += "a"; f.save({x: 0, y: filler}); - docSize = db.runCommand({datasize: "test.jstests_splitvector"}).size; + let docSize = db.runCommand({datasize: "test.jstests_splitvector"}).size; assert.gt(docSize, 500, "4a"); // Fill collection and get split vector for 1MB maxChunkSize let numDocs = 4500; bulkInsertDocs(f, numDocs - 1, filler); // 1 document was already inserted. - res = db.runCommand( + let res = db.runCommand( {splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1}); // splitVector aims at getting half-full chunks after split - factor = 0.5; + let factor = 0.5; assert.eq(true, res.ok, "4b"); assert.close( numDocs * docSize / ((1 << 20) * factor), res.splitKeys.length, "num split keys", -1); assertChunkSizes(res.splitKeys, numDocs, (1 << 20) * factor, "4d"); - for (i = 0; i < res.splitKeys.length; i++) { + for (let i = 0; i < res.splitKeys.length; i++) { assertFieldNamesMatch(res.splitKeys[i], {x: 1}); } }; @@ -162,7 +163,7 @@ f.createIndex({x: 1}); var case5 = function() { // Fill collection and get split vector for 1MB maxChunkSize bulkInsertDocs(f, 4499, filler); - res = db.runCommand({ + let res = db.runCommand({ splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1, @@ -171,7 +172,7 @@ var case5 = function() { assert.eq(true, res.ok, "5a"); assert.eq(1, res.splitKeys.length, "5b"); - for (i = 0; i < res.splitKeys.length; i++) { + for (let i = 0; i < res.splitKeys.length; i++) { assertFieldNamesMatch(res.splitKeys[i], {x: 1}); } }; @@ -186,7 +187,7 @@ f.createIndex({x: 1}); var case6 = function() { // Fill collection and get split vector for 1MB maxChunkSize bulkInsertDocs(f, 1999, filler); - res = db.runCommand({ + let res = db.runCommand({ splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1, @@ -195,7 +196,7 @@ var case6 = function() { assert.eq(true, res.ok, "6a"); assert.eq(3, res.splitKeys.length, "6b"); - for (i = 0; i < res.splitKeys.length; i++) { + for (let i = 0; i < res.splitKeys.length; i++) { assertFieldNamesMatch(res.splitKeys[i], {x: 1}); } }; @@ -212,12 +213,12 @@ var case7 = function() { // Fill collection and get split vector for 1MB maxChunkSize bulkInsertDocsFixedX(f, 2099, filler, 1); bulkInsertDocsFixedX(f, 9, filler, 2); - res = db.runCommand( + let res = db.runCommand( {splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1}); assert.eq(true, res.ok, "7a"); assert.eq(2, res.splitKeys[0].x, "7b"); - for (i = 0; i < res.splitKeys.length; i++) { + for (let i = 0; i < res.splitKeys.length; i++) { assertFieldNamesMatch(res.splitKeys[i], {x: 1}); } }; @@ -234,14 +235,14 @@ var case8 = function() { bulkInsertDocsFixedX(f, 9, filler, 1); bulkInsertDocsFixedX(f, 2099, filler, 2); bulkInsertDocsFixedX(f, 9, filler, 3); - res = db.runCommand( + let res = db.runCommand( {splitVector: "test.jstests_splitvector", keyPattern: {x: 1}, maxChunkSize: 1}); assert.eq(true, res.ok, "8a"); assert.eq(2, res.splitKeys.length, "8b"); assert.eq(2, res.splitKeys[0].x, "8c"); assert.eq(3, res.splitKeys[1].x, "8d"); - for (i = 0; i < res.splitKeys.length; i++) { + for (let i = 0; i < res.splitKeys.length; i++) { assertFieldNamesMatch(res.splitKeys[i], {x: 1}); } }; @@ -262,7 +263,7 @@ var case9 = function() { assert.eq(3, f.count()); print(f.getFullName()); - res = db.runCommand({splitVector: f.getFullName(), keyPattern: {x: 1}, force: true}); + let res = db.runCommand({splitVector: f.getFullName(), keyPattern: {x: 1}, force: true}); assert.eq(true, res.ok, "9a"); assert.eq(1, res.splitKeys.length, "9b"); @@ -275,7 +276,7 @@ var case9 = function() { assert.eq(true, res.ok, "9a: " + tojson(res)); assert.eq(1, res.splitKeys.length, "9b: " + tojson(res)); assert.eq(2, res.splitKeys[0].x, "9c: " + tojson(res)); - for (i = 0; i < res.splitKeys.length; i++) { + for (let i = 0; i < res.splitKeys.length; i++) { assertFieldNamesMatch(res.splitKeys[i], {x: 1}); } } diff --git a/jstests/core/stage_debug/stages_and_hash.js b/jstests/core/stage_debug/stages_and_hash.js index 4e75dace007df..741a6778ad6a1 100644 --- a/jstests/core/stage_debug/stages_and_hash.js +++ b/jstests/core/stage_debug/stages_and_hash.js @@ -6,7 +6,7 @@ // no_selinux, // ] -t = db.stages_and_hashed; +let t = db.stages_and_hashed; t.drop(); var collname = "stages_and_hashed"; @@ -21,7 +21,7 @@ t.createIndex({bar: 1}); t.createIndex({baz: 1}); // Scan foo <= 20 -ixscan1 = { +let ixscan1 = { ixscan: { args: { name: "stages_and_hashed", @@ -36,7 +36,7 @@ ixscan1 = { }; // Scan bar >= 40 -ixscan2 = { +let ixscan2 = { ixscan: { args: { name: "stages_and_hashed", @@ -52,15 +52,13 @@ ixscan2 = { // bar = 50 - foo // Intersection is (foo=0 bar=50, foo=1 bar=49, ..., foo=10 bar=40) -andix1ix2 = { - andHash: {args: {nodes: [ixscan1, ixscan2]}} -}; -res = db.runCommand({stageDebug: {plan: andix1ix2, collection: collname}}); +let andix1ix2 = {andHash: {args: {nodes: [ixscan1, ixscan2]}}}; +let res = db.runCommand({stageDebug: {plan: andix1ix2, collection: collname}}); assert.eq(res.ok, 1); assert.eq(res.results.length, 11); // Filter predicates from 2 indices. Tests that we union the idx info. -andix1ix2filter = { +let andix1ix2filter = { fetch: { filter: {bar: {$in: [45, 46, 48]}, foo: {$in: [4, 5, 6]}}, args: {node: {andHash: {args: {nodes: [ixscan1, ixscan2]}}}} diff --git a/jstests/core/stage_debug/stages_and_sorted.js b/jstests/core/stage_debug/stages_and_sorted.js index 34826baaa3234..55c5abd8676db 100644 --- a/jstests/core/stage_debug/stages_and_sorted.js +++ b/jstests/core/stage_debug/stages_and_sorted.js @@ -6,7 +6,7 @@ // no_selinux, // ] -t = db.stages_and_sorted; +let t = db.stages_and_sorted; t.drop(); var collname = "stages_and_sorted"; @@ -31,7 +31,7 @@ t.createIndex({bar: 1}); t.createIndex({baz: 1}); // Scan foo == 1 -ixscan1 = { +let ixscan1 = { ixscan: { args: { name: "stages_and_sorted", @@ -46,7 +46,7 @@ ixscan1 = { }; // Scan bar == 1 -ixscan2 = { +let ixscan2 = { ixscan: { args: { name: "stages_and_sorted", @@ -61,7 +61,7 @@ ixscan2 = { }; // Scan baz == 12 -ixscan3 = { +let ixscan3 = { ixscan: { args: { name: "stages_and_sorted", @@ -76,18 +76,14 @@ ixscan3 = { }; // Intersect foo==1 with bar==1 with baz==12. -andix1ix2 = { - andSorted: {args: {nodes: [ixscan1, ixscan2, ixscan3]}} -}; -res = db.runCommand({stageDebug: {collection: collname, plan: andix1ix2}}); +let andix1ix2 = {andSorted: {args: {nodes: [ixscan1, ixscan2, ixscan3]}}}; +let res = db.runCommand({stageDebug: {collection: collname, plan: andix1ix2}}); printjson(res); assert.eq(res.ok, 1); assert.eq(res.results.length, N); // Might as well make sure that hashed does the same thing. -andix1ix2hash = { - andHash: {args: {nodes: [ixscan1, ixscan2, ixscan3]}} -}; +let andix1ix2hash = {andHash: {args: {nodes: [ixscan1, ixscan2, ixscan3]}}}; res = db.runCommand({stageDebug: {collection: collname, plan: andix1ix2hash}}); assert.eq(res.ok, 1); assert.eq(res.results.length, N); diff --git a/jstests/core/stage_debug/stages_collection_scan.js b/jstests/core/stage_debug/stages_collection_scan.js index 08b3eb2eeb63a..ce072ad68b4ec 100644 --- a/jstests/core/stage_debug/stages_collection_scan.js +++ b/jstests/core/stage_debug/stages_collection_scan.js @@ -8,7 +8,7 @@ // ] // Test basic query stage collection scan functionality. -t = db.stages_collection_scan; +let t = db.stages_collection_scan; t.drop(); var collname = "stages_collection_scan"; @@ -17,37 +17,29 @@ for (var i = 0; i < N; ++i) { t.insert({foo: i}); } -forward = { - cscan: {args: {direction: 1}} -}; -res = db.runCommand({stageDebug: {collection: collname, plan: forward}}); +let forward = {cscan: {args: {direction: 1}}}; +let res = db.runCommand({stageDebug: {collection: collname, plan: forward}}); assert.eq(res.ok, 1); assert.eq(res.results.length, N); assert.eq(res.results[0].foo, 0); assert.eq(res.results[49].foo, 49); // And, backwards. -backward = { - cscan: {args: {direction: -1}} -}; +let backward = {cscan: {args: {direction: -1}}}; res = db.runCommand({stageDebug: {collection: collname, plan: backward}}); assert.eq(res.ok, 1); assert.eq(res.results.length, N); assert.eq(res.results[0].foo, 49); assert.eq(res.results[49].foo, 0); -forwardFiltered = { - cscan: {args: {direction: 1}, filter: {foo: {$lt: 25}}} -}; +let forwardFiltered = {cscan: {args: {direction: 1}, filter: {foo: {$lt: 25}}}}; res = db.runCommand({stageDebug: {collection: collname, plan: forwardFiltered}}); assert.eq(res.ok, 1); assert.eq(res.results.length, 25); assert.eq(res.results[0].foo, 0); assert.eq(res.results[24].foo, 24); -backwardFiltered = { - cscan: {args: {direction: -1}, filter: {foo: {$lt: 25}}} -}; +let backwardFiltered = {cscan: {args: {direction: -1}, filter: {foo: {$lt: 25}}}}; res = db.runCommand({stageDebug: {collection: collname, plan: backwardFiltered}}); assert.eq(res.ok, 1); assert.eq(res.results.length, 25); diff --git a/jstests/core/stage_debug/stages_fetch.js b/jstests/core/stage_debug/stages_fetch.js index bdec393ce1c93..30a7ef61ac6fb 100644 --- a/jstests/core/stage_debug/stages_fetch.js +++ b/jstests/core/stage_debug/stages_fetch.js @@ -7,7 +7,7 @@ // ] // Test basic fetch functionality. -t = db.stages_fetch; +let t = db.stages_fetch; t.drop(); var collname = "stages_fetch"; @@ -20,7 +20,7 @@ t.createIndex({foo: 1}); // 20 <= foo <= 30 // bar == 25 (not covered, should error.) -ixscan1 = { +let ixscan1 = { ixscan: { args: { keyPattern: {foo: 1}, @@ -33,11 +33,11 @@ ixscan1 = { filter: {bar: 25} } }; -res = db.runCommand({stageDebug: {collection: collname, plan: ixscan1}}); +let res = db.runCommand({stageDebug: {collection: collname, plan: ixscan1}}); assert.eq(res.ok, 0); // Now, add a fetch. We should be able to filter on the non-covered field since we fetched the obj. -ixscan2 = { +let ixscan2 = { ixscan: { args: { keyPattern: {foo: 1}, @@ -49,9 +49,7 @@ ixscan2 = { } } }; -fetch = { - fetch: {args: {node: ixscan2}, filter: {bar: 25}} -}; +let fetch = {fetch: {args: {node: ixscan2}, filter: {bar: 25}}}; res = db.runCommand({stageDebug: {collection: collname, plan: fetch}}); printjson(res); assert.eq(res.ok, 1); diff --git a/jstests/core/stage_debug/stages_ixscan.js b/jstests/core/stage_debug/stages_ixscan.js index de345fabb02a8..eb4d3c641d1b6 100644 --- a/jstests/core/stage_debug/stages_ixscan.js +++ b/jstests/core/stage_debug/stages_ixscan.js @@ -7,7 +7,7 @@ // ] // Test basic query stage index scan functionality. -t = db.stages_ixscan; +let t = db.stages_ixscan; t.drop(); var collname = "stages_ixscan"; @@ -40,7 +40,7 @@ assert.commandFailed(db.runCommand({ })); // foo <= 20 -ixscan1 = { +let ixscan1 = { ixscan: { args: { keyPattern: {foo: 1}, diff --git a/jstests/core/stage_debug/stages_limit_skip.js b/jstests/core/stage_debug/stages_limit_skip.js index 623e403c50445..680ae9d682d30 100644 --- a/jstests/core/stage_debug/stages_limit_skip.js +++ b/jstests/core/stage_debug/stages_limit_skip.js @@ -7,7 +7,7 @@ // ] // Test limit and skip -t = db.stages_limit_skip; +let t = db.stages_limit_skip; t.drop(); var collname = "stages_limit_skip"; @@ -20,7 +20,7 @@ t.createIndex({foo: 1}); // foo <= 20, decreasing // Limit of 5 results. -ixscan1 = { +let ixscan1 = { ixscan: { args: { keyPattern: {foo: 1}, @@ -32,10 +32,8 @@ ixscan1 = { } } }; -limit1 = { - limit: {args: {node: ixscan1, num: 5}} -}; -res = db.runCommand({stageDebug: {collection: collname, plan: limit1}}); +let limit1 = {limit: {args: {node: ixscan1, num: 5}}}; +let res = db.runCommand({stageDebug: {collection: collname, plan: limit1}}); assert.eq(res.ok, 1); assert.eq(res.results.length, 5); assert.eq(res.results[0].foo, 20); @@ -43,9 +41,7 @@ assert.eq(res.results[4].foo, 16); // foo <= 20, decreasing // Skip 5 results. -skip1 = { - skip: {args: {node: ixscan1, num: 5}} -}; +let skip1 = {skip: {args: {node: ixscan1, num: 5}}}; res = db.runCommand({stageDebug: {collection: collname, plan: skip1}}); assert.eq(res.ok, 1); assert.eq(res.results.length, 16); diff --git a/jstests/core/stage_debug/stages_mergesort.js b/jstests/core/stage_debug/stages_mergesort.js index c202a3ca82729..b3203b98dacd0 100644 --- a/jstests/core/stage_debug/stages_mergesort.js +++ b/jstests/core/stage_debug/stages_mergesort.js @@ -7,7 +7,7 @@ // ] // Test query stage merge sorting. -t = db.stages_mergesort; +let t = db.stages_mergesort; t.drop(); var collname = "stages_mergesort"; @@ -22,7 +22,7 @@ t.createIndex({baz: 1, bar: 1}); // foo == 1 // We would (internally) use "": MinKey and "": MaxKey for the bar index bounds. -ixscan1 = { +let ixscan1 = { ixscan: { args: { keyPattern: {foo: 1, bar: 1}, @@ -35,7 +35,7 @@ ixscan1 = { } }; // baz == 1 -ixscan2 = { +let ixscan2 = { ixscan: { args: { keyPattern: {baz: 1, bar: 1}, @@ -48,10 +48,8 @@ ixscan2 = { } }; -mergesort = { - mergeSort: {args: {nodes: [ixscan1, ixscan2], pattern: {bar: 1}}} -}; -res = db.runCommand({stageDebug: {plan: mergesort, collection: collname}}); +let mergesort = {mergeSort: {args: {nodes: [ixscan1, ixscan2], pattern: {bar: 1}}}}; +let res = db.runCommand({stageDebug: {plan: mergesort, collection: collname}}); assert.eq(res.ok, 1); assert.eq(res.results.length, 2 * N); assert.eq(res.results[0].bar, 0); diff --git a/jstests/core/stage_debug/stages_or.js b/jstests/core/stage_debug/stages_or.js index 6eb2e9812fb0b..3c674c5f0178d 100644 --- a/jstests/core/stage_debug/stages_or.js +++ b/jstests/core/stage_debug/stages_or.js @@ -7,7 +7,7 @@ // ] // Test basic OR functionality -t = db.stages_or; +let t = db.stages_or; t.drop(); var collname = "stages_or"; @@ -21,7 +21,7 @@ t.createIndex({bar: 1}); t.createIndex({baz: 1}); // baz >= 40 -ixscan1 = { +let ixscan1 = { ixscan: { args: { keyPattern: {baz: 1}, @@ -34,7 +34,7 @@ ixscan1 = { } }; // foo >= 40 -ixscan2 = { +let ixscan2 = { ixscan: { args: { keyPattern: {foo: 1}, @@ -48,17 +48,13 @@ ixscan2 = { }; // OR of baz and foo. Baz == foo and we dedup. -orix1ix2 = { - or: {args: {nodes: [ixscan1, ixscan2], dedup: true}} -}; -res = db.runCommand({stageDebug: {collection: collname, plan: orix1ix2}}); +let orix1ix2 = {or: {args: {nodes: [ixscan1, ixscan2], dedup: true}}}; +let res = db.runCommand({stageDebug: {collection: collname, plan: orix1ix2}}); assert.eq(res.ok, 1); assert.eq(res.results.length, 10); // No deduping, 2x the results. -orix1ix2nodd = { - or: {args: {nodes: [ixscan1, ixscan2], dedup: false}} -}; +let orix1ix2nodd = {or: {args: {nodes: [ixscan1, ixscan2], dedup: false}}}; res = db.runCommand({stageDebug: {collection: collname, plan: orix1ix2nodd}}); assert.eq(res.ok, 1); assert.eq(res.results.length, 20); diff --git a/jstests/core/stage_debug/stages_sort.js b/jstests/core/stage_debug/stages_sort.js index 2fd682edc3c4b..a79b64bca0abb 100644 --- a/jstests/core/stage_debug/stages_sort.js +++ b/jstests/core/stage_debug/stages_sort.js @@ -3,7 +3,7 @@ // @tags: [not_allowed_with_security_token] if (false) { - t = db.stages_sort; + let t = db.stages_sort; t.drop(); var N = 50; @@ -14,7 +14,7 @@ if (false) { t.createIndex({foo: 1}); // Foo <= 20, descending. - ixscan1 = { + let ixscan1 = { ixscan: { args: { name: "stages_sort", @@ -29,8 +29,8 @@ if (false) { }; // Sort with foo ascending. - sort1 = {sort: {args: {node: ixscan1, pattern: {foo: 1}}}}; - res = db.runCommand({stageDebug: sort1}); + let sort1 = {sort: {args: {node: ixscan1, pattern: {foo: 1}}}}; + let res = db.runCommand({stageDebug: sort1}); assert.eq(res.ok, 1); assert.eq(res.results.length, 21); assert.eq(res.results[0].foo, 0); diff --git a/jstests/core/string_with_nul_bytes.js b/jstests/core/string_with_nul_bytes.js index e72cc0b6dc1e5..718a13646663d 100644 --- a/jstests/core/string_with_nul_bytes.js +++ b/jstests/core/string_with_nul_bytes.js @@ -1,9 +1,9 @@ // SERVER-6649 - issues round-tripping strings with embedded NUL bytes -t = db.string_with_nul_bytes.js; +let t = db.string_with_nul_bytes.js; t.drop(); -string = "string with a NUL (\0) byte"; +let string = "string with a NUL (\0) byte"; t.insert({str: string}); assert.eq(t.findOne().str, string); assert.eq(t.findOne().str.length, string.length); // just to be sure diff --git a/jstests/core/sub1.js b/jstests/core/sub1.js index d42677f32662a..f38c5ccc4a01e 100644 --- a/jstests/core/sub1.js +++ b/jstests/core/sub1.js @@ -1,16 +1,13 @@ // sub1.js -t = db.sub1; +let t = db.sub1; t.drop(); -x = { - a: 1, - b: {c: {d: 2}} -}; +let x = {a: 1, b: {c: {d: 2}}}; t.save(x); -y = t.findOne(); +let y = t.findOne(); assert.eq(1, y.a); assert.eq(2, y.b.c.d); diff --git a/jstests/core/testminmax.js b/jstests/core/testminmax.js index 3ebf0a13f3a36..712b00f5151c8 100644 --- a/jstests/core/testminmax.js +++ b/jstests/core/testminmax.js @@ -1,6 +1,6 @@ // @tags: [requires_fastcount] -t = db.minmaxtest; +let t = db.minmaxtest; t.drop(); t.insert({ "_id": "IBM.N|00001264779918428889", diff --git a/jstests/core/text_covered_matching.js b/jstests/core/text_covered_matching.js index df4fced499ab1..446502e876370 100644 --- a/jstests/core/text_covered_matching.js +++ b/jstests/core/text_covered_matching.js @@ -12,10 +12,8 @@ // assumes_read_concern_local, // ] -load("jstests/libs/analyze_plan.js"); +import {getPlanStages, getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js"; -(function() { -"use strict"; const coll = db.text_covered_matching; coll.drop(); @@ -216,4 +214,3 @@ assert.gt(explainResult.executionStats.totalDocsExamined, assert.eq(explainResult.executionStats.nReturned, 1, "Unexpected number of results returned: " + tojson(explainResult)); -})(); diff --git a/jstests/core/timeseries/bucket_span_and_rounding_seconds.js b/jstests/core/timeseries/bucket_span_and_rounding_seconds.js index 2b339b810c758..3a827938f4eb4 100644 --- a/jstests/core/timeseries/bucket_span_and_rounding_seconds.js +++ b/jstests/core/timeseries/bucket_span_and_rounding_seconds.js @@ -10,15 +10,12 @@ * requires_timeseries, * ] */ -(function() { -'use strict'; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db.getMongo())) { jsTestLog( "Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled."); - return; + quit(); } const testDB = db.getSiblingDB(jsTestName()); @@ -209,5 +206,4 @@ const verifyCreateCommandFails = function(secondsOptions = {}, errorCode) { verifyCreateCommandFails( {bucketRoundingSeconds: bucketingValueMax + 1, bucketMaxSpanSeconds: bucketingValueMax + 1}, idlInvalidValueError); -})(); -})(); +})(); \ No newline at end of file diff --git a/jstests/core/timeseries/bucket_unpacking_with_limit.js b/jstests/core/timeseries/bucket_unpacking_with_limit.js new file mode 100644 index 0000000000000..de6649168b240 --- /dev/null +++ b/jstests/core/timeseries/bucket_unpacking_with_limit.js @@ -0,0 +1,160 @@ +/** + * Tests that the bucket unpacking with limit rewrite is performed and pushes the limit before + * unpacking all buckets, while ensuring no incorrect results are created + + * @tags: [ + * # This test depends on certain writes ending up in the same bucket. Stepdowns may result in + * # writes splitting between two primaries, and thus different buckets. + * does_not_support_stepdowns, + * # We need a timeseries collection. + * requires_timeseries, + * # Explain of a resolved view must be executed by mongos. + * directly_against_shardsvrs_incompatible, + * # This complicates aggregation extraction. + * do_not_wrap_aggregations_in_facets, + * # Refusing to run a test that issues an aggregation command with explain because it may + * # return incomplete results if interrupted by a stepdown. + * does_not_support_stepdowns, + * requires_fcv_71 + * ] + */ + +load("jstests/aggregation/extras/utils.js"); // For getExplainedPipelineFromAggregation + +const collName = "timeseries_bucket_unpacking_with_limit"; +const coll = db[collName]; +const metaCollName = "timeseries_bucket_unpacking_with_limit_meta"; +const metaColl = db[metaCollName]; + +// Helper function to set up collections. +const setupColl = (coll, collName, usesMeta) => { + coll.drop(); + + // If usesMeta is true, we want the collection to have a onlyMeta field + if (usesMeta) { + assert.commandWorked( + db.createCollection(collName, {timeseries: {timeField: "t", metaField: "m"}})); + } else { + assert.commandWorked(db.createCollection(collName, {timeseries: {timeField: "t"}})); + } + const bucketsColl = db.getCollection('system.buckets.' + collName); + assert.contains(bucketsColl.getName(), db.getCollectionNames()); + + let docs = []; + // If usesMeta is true, we push 10 documents with all different onlyMeta field. This tests the + // case when documents come from multiple different buckets. If usesMeta is false, we generate + // 20 documents that all go into the same bucket. + for (let i = 0; i < 10; ++i) { + if (usesMeta) { + docs.push({m: {"sensorId": i, "type": "temperature"}, t: new Date(i), _id: i}); + } else { + docs.push({t: new Date(i), _id: i}); + docs.push({t: new Date(i * 10), _id: i * 10}); + } + } + assert.commandWorked(coll.insert(docs)); + return docs; +}; + +// Helper function to check the PlanStage. +const assertPlanStagesInPipeline = + ({pipeline, expectedStages, expectedResults = [], onlyMeta = false}) => { + // If onlyMeta is set to true, we only want to include the collection with onlyMeta field + // specified to ensure sort can be done on the onlyMeta field + var colls = onlyMeta ? [metaColl] : [coll, metaColl]; + for (const c of colls) { + const aggRes = c.explain().aggregate(pipeline); + const planStage = + getExplainedPipelineFromAggregation(db, c, pipeline, {inhibitOptimization: false}); + // We check index at i in the PlanStage against the i'th index in expectedStages + // Should rewrite [{$_unpack}, {$limit: x}] pipeline as [{$limit: + // x}, {$_unpack}, {$limit: x}] + assert(expectedStages.length == planStage.length); + for (var i = 0; i < expectedStages.length; i++) { + assert(planStage[i].hasOwnProperty(expectedStages[i]), tojson(aggRes)); + } + + if (expectedResults.length != 0) { + const result = c.aggregate(pipeline).toArray(); + assert(expectedResults.length == result.length); + for (var i = 0; i < expectedResults.length; i++) { + assert.docEq(result[i], expectedResults[i], tojson(result)); + } + } + } + }; + +// Helper function to test correctness. +const testLimitCorrectness = (size) => { + for (const c of [coll, metaColl]) { + const res = c.aggregate([{$limit: size}]).toArray(); + const allElements = c.find().toArray(); + // Checks that the result length is correct, and that each element is unique + assert.eq(res.length, Math.min(size, allElements.length), tojson(res)); + assert.eq(res.length, new Set(res).size, tojson(res)); + // checks that each element in the result is actually from the collection + for (var i = 0; i < res.length; i++) { + assert.contains(res[i], allElements, tojson(res)); + } + } +}; + +setupColl(coll, collName, false); +const metaDocs = setupColl(metaColl, metaCollName, true); + +// Simple limit test. Because the pushed down limit is in the PlanStage now, +// getExplainedPipelineFromAggregation does not display it and we don't see the first limit / sort +// stage. The presence of the pushed limit is tested in unit tests. +assertPlanStagesInPipeline( + {pipeline: [{$limit: 2}], expectedStages: ["$_internalUnpackBucket", "$limit"]}); +// Test that when two limits are present, they get squashed into 1 taking limit of the smaller +// (tighter) value +assertPlanStagesInPipeline( + {pipeline: [{$limit: 2}, {$limit: 10}], expectedStages: ["$_internalUnpackBucket", "$limit"]}); +// Adding another stage after $limit to make sure that is also executed +assertPlanStagesInPipeline({ + pipeline: [{$limit: 2}, {$match: {"temp": 11}}], + expectedStages: ["$_internalUnpackBucket", "$limit", "$match"] +}); + +// Correctness test +testLimitCorrectness(2); +testLimitCorrectness(10); +testLimitCorrectness(20); + +// Test that sort absorbs the limits following it. +assertPlanStagesInPipeline({ + pipeline: [{$sort: {'m.sensorId': 1}}, {$limit: 2}], + expectedStages: ["$_internalUnpackBucket", "$limit"], + expectedResults: [metaDocs[0], metaDocs[1]], + onlyMeta: true +}); +assertPlanStagesInPipeline({ + pipeline: [{$sort: {"m.sensorId": -1}}, {$limit: 10}, {$limit: 2}], + expectedStages: ["$_internalUnpackBucket", "$limit"], + expectedResults: [metaDocs[9], metaDocs[8]], + onlyMeta: true +}); +assertPlanStagesInPipeline({ + pipeline: [{$sort: {"m.sensorId": 1}}, {$limit: 10}, {$limit: 50}], + expectedStages: ["$_internalUnpackBucket", "$limit"], + expectedResults: [ + metaDocs[0], + metaDocs[1], + metaDocs[2], + metaDocs[3], + metaDocs[4], + metaDocs[5], + metaDocs[6], + metaDocs[7], + metaDocs[8], + metaDocs[9] + ], + onlyMeta: true +}); +// Test limit comes before sort. +assertPlanStagesInPipeline({ + pipeline: [{$limit: 2}, {$sort: {"m.sensorId": 1}}], + expectedStages: ["$_internalUnpackBucket", "$limit", "$sort"], + onlyMeta: true +}); diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort.js b/jstests/core/timeseries/bucket_unpacking_with_sort.js index 13248964ec830..1e7cdbc5194b1 100644 --- a/jstests/core/timeseries/bucket_unpacking_with_sort.js +++ b/jstests/core/timeseries/bucket_unpacking_with_sort.js @@ -14,18 +14,10 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. -load("jstests/aggregation/extras/utils.js"); // For getExplainedPipelineFromAggregation. -load("jstests/core/timeseries/libs/timeseries.js"); // For TimeseriesTest -load("jstests/libs/analyze_plan.js"); // For getAggPlanStage - -if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) { - jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled."); - return; -} +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. +load("jstests/aggregation/extras/utils.js"); // For getExplainedPipelineFromAggregation. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; const collName = "bucket_unpacking_with_sort"; const coll = db[collName]; @@ -665,4 +657,3 @@ runDoesntRewriteTest({t: 1}, runDoesntRewriteTest({t: 1}, null, {m: 1, t: 1}, csStringColl, [{$match: {m: 'a'}}]); runDoesntRewriteTest({t: 1}, null, {m: 1, t: 1}, ciStringColl, [{$match: {m: 'a'}}]); } -})(); diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort_extended_range.js b/jstests/core/timeseries/bucket_unpacking_with_sort_extended_range.js index ad4f6cf868986..ae3b0dc262b34 100644 --- a/jstests/core/timeseries/bucket_unpacking_with_sort_extended_range.js +++ b/jstests/core/timeseries/bucket_unpacking_with_sort_extended_range.js @@ -15,17 +15,8 @@ * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For getExplainedPipelineFromAggregation. -load("jstests/core/timeseries/libs/timeseries.js"); -load('jstests/libs/analyze_plan.js'); - -if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) { - jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled."); - return; -} +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; const timeFieldName = "t"; @@ -262,4 +253,3 @@ function runTest(ascending) { runTest(false); // descending runTest(true); // ascending -})(); diff --git a/jstests/core/timeseries/bucket_unpacking_with_sort_plan_cache.js b/jstests/core/timeseries/bucket_unpacking_with_sort_plan_cache.js index cca7187ff61d9..05dc167c7882d 100644 --- a/jstests/core/timeseries/bucket_unpacking_with_sort_plan_cache.js +++ b/jstests/core/timeseries/bucket_unpacking_with_sort_plan_cache.js @@ -22,18 +22,13 @@ * tenant_migration_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/profiler.js"); // For getLatestProfileEntry. -load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. -load("jstests/libs/analyze_plan.js"); // For planHasStage. -load("jstests/core/timeseries/libs/timeseries.js"); // For TimeseriesTest. - -if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) { - jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled."); - return; -} +load("jstests/libs/profiler.js"); // For getLatestProfileEntry. +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. +import { + getAggPlanStages, + getAggPlanStage, + getPlanCacheKeyFromExplain +} from "jstests/libs/analyze_plan.js"; const fields = ["a", "b", "i"]; @@ -146,4 +141,3 @@ for (const sortDirection of [-1, 1]) { testBoundedSorterPlanCache(sortDirection, indexDirection); } } -})(); diff --git a/jstests/core/timeseries/clustered_index_crud.js b/jstests/core/timeseries/clustered_index_crud.js index ae5a3c528611d..be5eb35e0e0e8 100644 --- a/jstests/core/timeseries/clustered_index_crud.js +++ b/jstests/core/timeseries/clustered_index_crud.js @@ -6,11 +6,6 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); - const collName = 'system.buckets.clustered_index_crud'; const coll = db[collName]; coll.drop(); @@ -47,5 +42,4 @@ assert.eq(1, coll.find({_id: null}).itcount()); assert.commandWorked(coll.insert({_id: 'x'.repeat(100), a: 10})); assert.commandWorked(coll.createIndex({a: 1})); -assert.commandWorked(coll.dropIndex({a: 1})); -})(); +assert.commandWorked(coll.dropIndex({a: 1})); \ No newline at end of file diff --git a/jstests/core/timeseries/libs/geo.js b/jstests/core/timeseries/libs/geo.js index 3c3e5b6db919e..90d59f8aa5096 100644 --- a/jstests/core/timeseries/libs/geo.js +++ b/jstests/core/timeseries/libs/geo.js @@ -22,4 +22,4 @@ function randomLongLat() { const lat = latRadians * 180 / Math.PI; return [long, lat]; } -} \ No newline at end of file +} diff --git a/jstests/core/timeseries/libs/timeseries.js b/jstests/core/timeseries/libs/timeseries.js index ed82d6cbda667..03b3640249a7a 100644 --- a/jstests/core/timeseries/libs/timeseries.js +++ b/jstests/core/timeseries/libs/timeseries.js @@ -2,10 +2,10 @@ // The test runs commands that are not allowed with security token: movechunk, split. // @tags: [not_allowed_with_security_token] -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load("jstests/aggregation/extras/utils.js"); -var TimeseriesTest = class { +export var TimeseriesTest = class { static getBucketMaxSpanSecondsFromGranularity(granularity) { switch (granularity) { case 'seconds': @@ -40,27 +40,14 @@ var TimeseriesTest = class { return FeatureFlagUtil.isPresentAndEnabled(conn, "TimeseriesScalabilityImprovements"); } - /** - * Returns whether sharded time-series updates and deletes are supported. - * TODO SERVER-69320 remove this helper. - */ - static shardedTimeseriesUpdatesAndDeletesEnabled(conn) { - return FeatureFlagUtil.isPresentAndEnabled(conn, "ShardedTimeSeriesUpdateDelete"); - } - - // TODO SERVER-69320 remove this helper. - static shardedtimeseriesCollectionsEnabled(conn) { - return FeatureFlagUtil.isPresentAndEnabled(conn, "ShardedTimeSeries"); - } - // TODO SERVER-65082 remove this helper. static timeseriesMetricIndexesEnabled(conn) { return FeatureFlagUtil.isPresentAndEnabled(conn, "TimeseriesMetricIndexes"); } - // TODO SERVER-69324 remove this helper. - static bucketUnpackWithSortEnabled(conn) { - return FeatureFlagUtil.isPresentAndEnabled(conn, "BucketUnpackWithSort"); + // TODO SERVER-68058 remove this helper. + static arbitraryUpdatesEnabled(conn) { + return FeatureFlagUtil.isPresentAndEnabled(conn, "TimeseriesUpdatesSupport"); } /** diff --git a/jstests/core/timeseries/libs/timeseries_agg_helpers.js b/jstests/core/timeseries/libs/timeseries_agg_helpers.js index a2a2f74393fbf..f3d549bc22a81 100644 --- a/jstests/core/timeseries/libs/timeseries_agg_helpers.js +++ b/jstests/core/timeseries/libs/timeseries_agg_helpers.js @@ -1,9 +1,9 @@ -load("jstests/core/timeseries/libs/timeseries.js"); - /** * Helper class for aggregate tests with time-series collection. */ -var TimeseriesAggTests = class { +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; + +export var TimeseriesAggTests = class { /** * Gets a test db object based on the test suite name. */ @@ -20,14 +20,15 @@ var TimeseriesAggTests = class { * @returns An array of a time-series collection and a non time-series collection, * respectively in this order. */ - static prepareInputCollections(numHosts, numIterations, includeIdleMeasurements = true) { + static prepareInputCollections(numHosts, + numIterations, + includeIdleMeasurements = true, + testDB = TimeseriesAggTests.getTestDb()) { const timeseriesCollOption = {timeseries: {timeField: "time", metaField: "tags"}}; Random.setRandomSeed(); const hosts = TimeseriesTest.generateHosts(numHosts); - const testDB = TimeseriesAggTests.getTestDb(); - // Creates a time-series input collection. const inColl = testDB.getCollection("in"); inColl.drop(); @@ -37,7 +38,7 @@ var TimeseriesAggTests = class { const observerInColl = testDB.getCollection("observer_in"); observerInColl.drop(); assert.commandWorked(testDB.createCollection(observerInColl.getName())); - const currTime = new Date(); + let currTime = new Date(); // Inserts exactly the same random measurement to both inColl and observerInColl. for (let i = 0; i < numIterations; i++) { @@ -45,7 +46,7 @@ var TimeseriesAggTests = class { const userUsage = TimeseriesTest.getRandomUsage(); let newMeasurement = { tags: host.tags, - time: new Date(currTime + i), + time: new Date(currTime++), usage_guest: TimeseriesTest.getRandomUsage(), usage_guest_nice: TimeseriesTest.getRandomUsage(), usage_idle: TimeseriesTest.getRandomUsage(), @@ -63,7 +64,7 @@ var TimeseriesAggTests = class { if (includeIdleMeasurements && (i % 2)) { let idleMeasurement = { tags: host.tags, - time: new Date(currTime + i), + time: new Date(currTime++), idle_user: 100 - userUsage }; assert.commandWorked(inColl.insert(idleMeasurement)); @@ -78,11 +79,11 @@ var TimeseriesAggTests = class { /** * Gets an output collection object with the name 'outCollname'. */ - static getOutputCollection(outCollName) { - const testDB = TimeseriesAggTests.getTestDb(); - + static getOutputCollection(outCollName, shouldDrop, testDB = TimeseriesAggTests.getTestDb()) { let outColl = testDB.getCollection(outCollName); - outColl.drop(); + if (shouldDrop) { + outColl.drop(); + } return outColl; } @@ -96,21 +97,35 @@ var TimeseriesAggTests = class { * Executes 'prepareAction' before executing 'pipeline'. 'prepareAction' takes a collection * parameter and returns nothing. * + * If 'shouldDrop' is set to false, the output collection will not be dropped before executing + * 'pipeline'. + * + * If 'testDB' is set, that database will be used in the aggregation pipeline. + * * Returns sorted data by "time" field. The sorted result data will help simplify comparison * logic. */ - static getOutputAggregateResults(inColl, pipeline, prepareAction = null) { + static getOutputAggregateResults(inColl, + pipeline, + prepareAction = null, + shouldDrop = true, + testDB = TimeseriesAggTests.getTestDb()) { // Figures out the output collection name from the last pipeline stage. var outCollName = "out"; if (pipeline[pipeline.length - 1]["$out"] != undefined) { - // If the last stage is "$out", gets the output collection name from it. - outCollName = pipeline[pipeline.length - 1]["$out"]; + // If the last stage is "$out", gets the output collection name from the string or + // object input. + if (typeof pipeline[pipeline.length - 1]["$out"] == 'string') { + outCollName = pipeline[pipeline.length - 1]["$out"]; + } else { + outCollName = pipeline[pipeline.length - 1]["$out"]["coll"]; + } } else if (pipeline[pipeline.length - 1]["$merge"] != undefined) { // If the last stage is "$merge", gets the output collection name from it. outCollName = pipeline[pipeline.length - 1]["$merge"].into; } - let outColl = TimeseriesAggTests.getOutputCollection(outCollName); + let outColl = TimeseriesAggTests.getOutputCollection(outCollName, shouldDrop, testDB); if (prepareAction != null) { prepareAction(outColl); } @@ -122,4 +137,22 @@ var TimeseriesAggTests = class { .sort({"time": 1}) .toArray(); } + + static verifyResults(actualResults, expectedResults) { + // Verifies that the number of measurements is same as expected. + assert.eq(actualResults.length, expectedResults.length, actualResults); + + // Verifies that every measurement is same as expected. + for (var i = 0; i < expectedResults.length; ++i) { + assert.eq(actualResults[i], expectedResults[i], actualResults); + } + } + + static generateOutPipeline(collName, dbName, options, aggStage = null) { + let outStage = {$out: {db: dbName, coll: collName, timeseries: options}}; + if (aggStage) { + return [aggStage, outStage]; + } + return [outStage]; + } }; diff --git a/jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js b/jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js index 200b257084ca0..af28500c152a3 100644 --- a/jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js +++ b/jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js @@ -3,19 +3,19 @@ */ load("jstests/aggregation/extras/utils.js"); -load("jstests/core/timeseries/libs/timeseries_agg_helpers.js"); -load("jstests/libs/analyze_plan.js"); +import {TimeseriesAggTests} from "jstests/core/timeseries/libs/timeseries_agg_helpers.js"; +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; // These are functions instead of const variables to avoid tripping up the parallel jstests. -function getEquivalentStrings() { +export function getEquivalentStrings() { return ['a', 'A', 'b', 'B']; } -function getEquivalentNumbers() { +export function getEquivalentNumbers() { return [7, NumberInt(7), NumberLong(7), NumberDecimal(7)]; } -function verifyLastpoint({tsColl, observerColl, pipeline, precedingFilter, expectStage}) { +export function verifyLastpoint({tsColl, observerColl, pipeline, precedingFilter, expectStage}) { // Verify lastpoint optmization. const explain = tsColl.explain().aggregate(pipeline); expectStage({explain, precedingFilter}); @@ -26,7 +26,7 @@ function verifyLastpoint({tsColl, observerColl, pipeline, precedingFilter, expec assertArrayEq({actual, expected}); } -function createBoringCollections(includeIdleMeasurements = false) { +export function createBoringCollections(includeIdleMeasurements = false) { // Prepare collections. Note: we usually test without idle measurements (all meta subfields are // non-null). If we allow the insertion of idle measurements, we will obtain multiple lastpoints // per bucket, and may have different results on the observer and timeseries collections. @@ -37,7 +37,7 @@ function createBoringCollections(includeIdleMeasurements = false) { } // Generate interesting values. -function generateInterestingValues() { +export function generateInterestingValues() { const epoch = ISODate('1970-01-01'); // Pick values with interesting equality behavior. @@ -94,7 +94,7 @@ function generateInterestingValues() { return docs; } -function getMapInterestingValuesToEquivalentsStage() { +export function getMapInterestingValuesToEquivalentsStage() { const firstElemInId = {$arrayElemAt: ["$_id", 0]}; const isIdArray = {$isArray: "$_id"}; const equivalentStrings = getEquivalentStrings(); @@ -135,7 +135,7 @@ function getMapInterestingValuesToEquivalentsStage() { }; } -function createInterestingCollections() { +export function createInterestingCollections() { const testDB = TimeseriesAggTests.getTestDb(); const collation = {locale: 'en_US', strength: 2}; @@ -164,7 +164,7 @@ function createInterestingCollections() { return [tsColl, observerColl]; } -function expectDistinctScan({explain}) { +export function expectDistinctScan({explain}) { // The query can utilize DISTINCT_SCAN. assert.neq(getAggPlanStage(explain, "DISTINCT_SCAN"), null, explain); @@ -172,7 +172,7 @@ function expectDistinctScan({explain}) { assert.eq(getAggPlanStage(explain, "SORT"), null, explain); } -function expectCollScan({explain, precedingFilter, noSortInCursor}) { +export function expectCollScan({explain, precedingFilter, noSortInCursor}) { if (noSortInCursor) { // We need a separate sort stage. assert.eq(getAggPlanStage(explain, "SORT"), null, explain); @@ -189,7 +189,7 @@ function expectCollScan({explain, precedingFilter, noSortInCursor}) { } } -function expectIxscan({explain, noSortInCursor}) { +export function expectIxscan({explain, noSortInCursor}) { if (noSortInCursor) { // We can rely on the index without a cursor $sort. assert.eq(getAggPlanStage(explain, "SORT"), null, explain); @@ -209,7 +209,7 @@ function expectIxscan({explain, noSortInCursor}) { 3. Lastpoint queries on indexes with ascending time and $last/$bottom and an additional secondary index so that we can use the DISTINCT_SCAN optimization. */ -function testAllTimeMetaDirections(tsColl, observerColl, getTestCases) { +export function testAllTimeMetaDirections(tsColl, observerColl, getTestCases) { const testDB = TimeseriesAggTests.getTestDb(); const testCases = [ {time: -1, useBucketsIndex: false}, diff --git a/jstests/core/timeseries/libs/timeseries_writes_util.js b/jstests/core/timeseries/libs/timeseries_writes_util.js new file mode 100644 index 0000000000000..4b6c345372dd4 --- /dev/null +++ b/jstests/core/timeseries/libs/timeseries_writes_util.js @@ -0,0 +1,1032 @@ +/** + * Helpers for testing timeseries arbitrary writes. + */ + +import {getExecutionStages, getPlanStage} from "jstests/libs/analyze_plan.js"; + +export const timeFieldName = "time"; +export const metaFieldName = "tag"; +export const sysCollNamePrefix = "system.buckets."; + +export const closedBucketFilter = { + "control.closed": {$not: {$eq: true}} +}; + +// The split point is between the 'A' and 'B' meta values which is _id: 4. [1, 3] goes to the +// primary shard and [4, 7] goes to the other shard. +export const splitMetaPointBetweenTwoShards = { + meta: "B" +}; + +// This split point is the same as the 'splitMetaPointBetweenTwoShards'. +export const splitTimePointBetweenTwoShards = { + [`control.min.${timeFieldName}`]: ISODate("2003-06-30") +}; + +export function generateTimeValue(index) { + return ISODate(`${2000 + index}-01-01`); +} + +// Defines sample data set for testing. +export const doc1_a_nofields = { + _id: 1, + [timeFieldName]: generateTimeValue(1), + [metaFieldName]: "A", +}; + +export const doc2_a_f101 = { + _id: 2, + [timeFieldName]: generateTimeValue(2), + [metaFieldName]: "A", + f: 101 +}; + +export const doc3_a_f102 = { + _id: 3, + [timeFieldName]: generateTimeValue(3), + [metaFieldName]: "A", + f: 102 +}; + +export const doc4_b_f103 = { + _id: 4, + [timeFieldName]: generateTimeValue(4), + [metaFieldName]: "B", + f: 103 +}; + +export const doc5_b_f104 = { + _id: 5, + [timeFieldName]: generateTimeValue(5), + [metaFieldName]: "B", + f: 104 +}; + +export const doc6_c_f105 = { + _id: 6, + [timeFieldName]: generateTimeValue(6), + [metaFieldName]: "C", + f: 105 +}; + +export const doc7_c_f106 = { + _id: 7, + [timeFieldName]: generateTimeValue(7), + [metaFieldName]: "C", + f: 106, +}; + +export let testDB = null; +export let st = null; +export let primaryShard = null; +export let otherShard = null; +export let mongos0DB = null; +export let mongos1DB = null; + +/** + * Composes and returns a bucket-level filter for timeseries arbitrary writes. + * + * The bucket-level filter is composed of the closed bucket filter and the given filter(s) which + * are ANDed together. The closed bucket filter is always the first element of the AND array. + * Zero or more filters can be passed in as arguments. + */ +export function makeBucketFilter(...args) { + if (!args.length) { + return closedBucketFilter; + } + + return {$and: [closedBucketFilter].concat(Array.from(args))}; +} + +export function getTestDB() { + if (!testDB) { + testDB = db.getSiblingDB(jsTestName()); + assert.commandWorked(testDB.dropDatabase()); + } + return testDB; +} + +export function prepareCollection({dbToUse, collName, initialDocList}) { + if (!dbToUse) { + dbToUse = getTestDB(); + } + const coll = dbToUse.getCollection(collName); + coll.drop(); + assert.commandWorked(dbToUse.createCollection( + coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); + assert.commandWorked(coll.insert(initialDocList)); + + return coll; +} + +export function prepareShardedCollection( + {dbToUse, collName, initialDocList, includeMeta = true, shardKey, splitPoint}) { + if (!dbToUse) { + assert.neq( + null, testDB, "testDB must be initialized before calling prepareShardedCollection"); + dbToUse = testDB; + } + + const coll = dbToUse.getCollection(collName); + const sysCollName = sysCollNamePrefix + coll.getName(); + coll.drop(); + + const tsOptions = includeMeta + ? {timeseries: {timeField: timeFieldName, metaField: metaFieldName}} + : {timeseries: {timeField: timeFieldName}}; + assert.commandWorked(dbToUse.createCollection(coll.getName(), tsOptions)); + assert.commandWorked(coll.insert(initialDocList)); + + if (!shardKey) { + shardKey = includeMeta ? {[metaFieldName]: 1} : {[timeFieldName]: 1}; + } + assert.commandWorked(coll.createIndex(shardKey)); + assert.commandWorked( + dbToUse.adminCommand({shardCollection: coll.getFullName(), key: shardKey})); + + if (!splitPoint) { + splitPoint = includeMeta ? splitMetaPointBetweenTwoShards : splitTimePointBetweenTwoShards; + } + // [MinKey, splitPoint) and [splitPoint, MaxKey) are the two chunks after the split. + assert.commandWorked( + dbToUse.adminCommand({split: dbToUse[sysCollName].getFullName(), middle: splitPoint})); + + assert.commandWorked(dbToUse.adminCommand({ + moveChunk: dbToUse[sysCollName].getFullName(), + find: splitPoint, + to: otherShard.shardName, + _waitForDelete: true + })); + + return coll; +} + +export function makeFindOneAndRemoveCommand(coll, filter, fields, sort, collation) { + let findAndModifyCmd = {findAndModify: coll.getName(), query: filter, remove: true}; + if (fields) { + findAndModifyCmd["fields"] = fields; + } + if (sort) { + findAndModifyCmd["sort"] = sort; + } + if (collation) { + findAndModifyCmd["collation"] = collation; + } + + return findAndModifyCmd; +} + +export function makeFindOneAndUpdateCommand( + coll, filter, update, returnNew, upsert, fields, sort, collation) { + assert(filter !== undefined && update !== undefined); + let findAndModifyCmd = {findAndModify: coll.getName(), query: filter, update: update}; + if (returnNew !== undefined) { + findAndModifyCmd["new"] = returnNew; + } + if (upsert !== undefined) { + findAndModifyCmd["upsert"] = upsert; + } + if (fields !== undefined) { + findAndModifyCmd["fields"] = fields; + } + if (sort !== undefined) { + findAndModifyCmd["sort"] = sort; + } + if (collation !== undefined) { + findAndModifyCmd["collation"] = collation; + } + + return findAndModifyCmd; +} + +/** + * Returns the name of the caller of the function that called this function using the stack trace. + * + * This is useful for generating unique collection names. If the return function name is not unique + * and the caller needs to generate a unique collection name, the caller can append a unique suffix. + */ +export function getCallerName(callDepth = 2) { + return `${new Error().stack.split('\n')[callDepth].split('@')[0]}`; +} + +export function verifyResultDocs(coll, initialDocList, expectedResultDocs, nDeleted) { + let resultDocs = coll.find().toArray(); + assert.eq(resultDocs.length, initialDocList.length - nDeleted, tojson(resultDocs)); + + // Validate the collection's exact contents if we were given the expected results. We may skip + // this step in some cases, if the delete doesn't pinpoint a specific document. + if (expectedResultDocs) { + assert.eq(expectedResultDocs.length, resultDocs.length, tojson(resultDocs)); + assert.sameMembers(expectedResultDocs, resultDocs, tojson(resultDocs)); + } +} + +export function verifyExplain({ + explain, + rootStageName, + opType, + bucketFilter, + residualFilter, + nBucketsUnpacked, + nReturned, + nMatched, + nModified, + nUpserted, +}) { + jsTestLog(`Explain: ${tojson(explain)}`); + assert(opType === "updateOne" || opType === "deleteOne" || opType === "updateMany" || + opType === "deleteMany"); + + if (!rootStageName) { + rootStageName = "TS_MODIFY"; + } + assert("PROJECTION_DEFAULT" === rootStageName || "TS_MODIFY" === rootStageName, + "Only PROJECTION_DEFAULT or TS_MODIFY is allowed"); + + let foundStage = getPlanStage(explain.queryPlanner.winningPlan, rootStageName); + assert.neq(null, + foundStage, + `The root ${rootStageName} stage not found in the plan: ${tojson(explain)}`); + if (rootStageName === "PROJECTION_DEFAULT") { + assert.eq("TS_MODIFY", + foundStage.inputStage.stage, + `TS_MODIFY is not a child of ${rootStageName} in the plan: ${tojson(explain)}`); + foundStage = foundStage.inputStage; + } + + assert.eq(opType, foundStage.opType, `TS_MODIFY opType is wrong: ${tojson(foundStage)}`); + assert.eq(bucketFilter, + foundStage.bucketFilter, + `TS_MODIFY bucketFilter is wrong: ${tojson(foundStage)}`); + assert.eq(residualFilter, + foundStage.residualFilter, + `TS_MODIFY residualFilter is wrong: ${tojson(foundStage)}`); + + const execStages = getExecutionStages(explain); + assert.eq(rootStageName, execStages[0].stage, `The root stage is wrong: ${tojson(execStages)}`); + let tsModifyStage = execStages[0]; + if (tsModifyStage.stage === "PROJECTION_DEFAULT") { + tsModifyStage = tsModifyStage.inputStage; + } + assert.eq( + "TS_MODIFY", tsModifyStage.stage, `Can't find TS_MODIFY stage: ${tojson(execStages)}`); + + if (nBucketsUnpacked !== undefined) { + assert.eq(nBucketsUnpacked, + tsModifyStage.nBucketsUnpacked, + `Got wrong nBucketsUnpacked ${tojson(tsModifyStage)}`); + } + if (nReturned !== undefined) { + assert.eq( + nReturned, tsModifyStage.nReturned, `Got wrong nReturned ${tojson(tsModifyStage)}`); + } + if (nMatched !== undefined) { + assert.eq(nMatched, + tsModifyStage.nMeasurementsMatched, + `Got wrong nMeasurementsMatched ${tojson(tsModifyStage)}`); + } + if (nModified !== undefined) { + if (opType.startsWith("update")) { + assert.eq(nModified, + tsModifyStage.nMeasurementsUpdated, + `Got wrong nMeasurementsModified ${tojson(tsModifyStage)}`); + } else { + assert.eq(nModified, + tsModifyStage.nMeasurementsDeleted, + `Got wrong nMeasurementsModified ${tojson(tsModifyStage)}`); + } + } + if (nUpserted !== undefined) { + assert.eq(nUpserted, + tsModifyStage.nMeasurementsUpserted, + `Got wrong nMeasurementsUpserted ${tojson(tsModifyStage)}`); + } +} + +/** + * Verifies that a deleteOne returns the expected set of documents. + * + * - initialDocList: The initial documents in the collection. + * - filter: The filter for the deleteOne command. + * - expectedResultDocs: The expected documents in the collection after the delete. + * - nDeleted: The expected number of documents deleted. + */ +export function testDeleteOne({initialDocList, filter, expectedResultDocs, nDeleted}) { + const callerName = getCallerName(); + jsTestLog(`Running ${callerName}(${tojson(arguments[0])})`); + + const coll = prepareCollection({collName: callerName, initialDocList: initialDocList}); + + const res = assert.commandWorked(coll.deleteOne(filter)); + assert.eq(nDeleted, res.deletedCount); + + verifyResultDocs(coll, initialDocList, expectedResultDocs, nDeleted); +} + +export function getBucketCollection(coll) { + return coll.getDB()[sysCollNamePrefix + coll.getName()]; +} + +/** + * Ensure the updateOne command operates correctly by examining documents after the update. + */ +export function testUpdateOne({ + initialDocList, + updateQuery, + updateObj, + c, + resultDocList, + nMatched, + nModified = nMatched, + upsert = false, + upsertedDoc, + failCode +}) { + const collName = getCallerName(); + jsTestLog(`Running ${collName}(${tojson(arguments[0])})`); + + const testDB = getTestDB(); + const coll = testDB.getCollection(collName); + prepareCollection({collName, initialDocList}); + + let upd = {q: updateQuery, u: updateObj, multi: false, upsert: upsert}; + if (c) { + upd["c"] = c; + upd["upsertSupplied"] = true; + } + const updateCommand = { + update: coll.getName(), + updates: [upd], + }; + + const res = failCode ? assert.commandFailedWithCode(testDB.runCommand(updateCommand), failCode) + : assert.commandWorked(testDB.runCommand(updateCommand)); + if (!failCode) { + if (upsertedDoc) { + assert.eq(1, res.n, tojson(res)); + assert.eq(0, res.nModified, tojson(res)); + assert(res.hasOwnProperty("upserted"), tojson(res)); + assert.eq(1, res.upserted.length); + + if (upsertedDoc.hasOwnProperty("_id")) { + assert.eq(upsertedDoc._id, res.upserted[0]._id); + } else { + upsertedDoc["_id"] = res.upserted[0]._id; + } + resultDocList.push(upsertedDoc); + } else { + assert.eq(nMatched, res.n, tojson(res)); + assert.eq(nModified, res.nModified, tojson(res)); + assert(!res.hasOwnProperty("upserted"), tojson(res)); + } + } + + if (resultDocList) { + assert.sameMembers(resultDocList, + coll.find().toArray(), + "Collection contents did not match expected after update"); + } +} + +/** + * Verifies that a findAndModify remove returns the expected result(s) 'res'. + * + * - initialDocList: The initial documents in the collection. + * - cmd.filter: The filter for the findAndModify command. + * - cmd.fields: The projection for the findAndModify command. + * - cmd.sort: The sort option for the findAndModify command. + * - cmd.collation: The collation option for the findAndModify command. + * - res.errorCode: If errorCode is set, we expect the command to fail with that code and other + * fields of 'res' are ignored. + * - res.expectedResultDocs: The expected documents in the collection after the delete. + * - res.nDeleted: The expected number of documents deleted. + * - res.deletedDoc: The expected document returned by the findAndModify command. + * - res.rootStage: The expected root stage of the explain plan. + * - res.bucketFilter: The expected bucket filter of the TS_MODIFY stage. + * - res.residualFilter: The expected residual filter of the TS_MODIFY stage. + * - res.nBucketsUnpacked: The expected number of buckets unpacked by the TS_MODIFY stage. + * - res.nReturned: The expected number of documents returned by the TS_MODIFY stage. + */ +export function testFindOneAndRemove({ + initialDocList, + cmd: {filter, fields, sort, collation}, + res: { + errorCode, + expectedResultDocs, + nDeleted, + deletedDoc, + rootStage, + bucketFilter, + residualFilter, + nBucketsUnpacked, + nReturned, + }, +}) { + const callerName = getCallerName(); + jsTestLog(`Running ${callerName}(${tojson(arguments[0])})`); + + const coll = prepareCollection({collName: callerName, initialDocList: initialDocList}); + + const findAndModifyCmd = makeFindOneAndRemoveCommand(coll, filter, fields, sort, collation); + jsTestLog(`Running findAndModify remove: ${tojson(findAndModifyCmd)}`); + + const session = coll.getDB().getSession(); + const shouldRetryWrites = session.getOptions().shouldRetryWrites(); + // TODO SERVER-76583: Remove this check and always verify the result or verify the 'errorCode'. + if (coll.getDB().getSession().getOptions().shouldRetryWrites()) { + assert.commandFailedWithCode( + testDB.runCommand(findAndModifyCmd), 7308305, `cmd = ${tojson(findAndModifyCmd)}`); + return; + } + + if (errorCode) { + assert.commandFailedWithCode( + testDB.runCommand(findAndModifyCmd), errorCode, `cmd = ${tojson(findAndModifyCmd)}`); + return; + } + + if (bucketFilter !== undefined) { + const explainRes = assert.commandWorked( + coll.runCommand({explain: findAndModifyCmd, verbosity: "executionStats"})); + verifyExplain({ + explain: explainRes, + rootStageName: rootStage, + opType: "deleteOne", + bucketFilter: bucketFilter, + residualFilter: residualFilter, + nBucketsUnpacked: nBucketsUnpacked, + nReturned: nReturned, + }); + } + + const res = assert.commandWorked(testDB.runCommand(findAndModifyCmd)); + jsTestLog(`findAndModify remove result: ${tojson(res)}`); + assert.eq(nDeleted, res.lastErrorObject.n, tojson(res)); + if (deletedDoc) { + assert.docEq(deletedDoc, res.value, tojson(res)); + } else if (nDeleted === 1) { + assert.neq(null, res.value, tojson(res)); + } else if (nDeleted === 0) { + assert.eq(null, res.value, tojson(res)); + } + + verifyResultDocs(coll, initialDocList, expectedResultDocs, nDeleted); +} + +/** + * Verifies that a findAndModify update returns the expected result(s) 'res'. + * + * - initialDocList: The initial documents in the collection. + * - cmd.filter: The 'query' spec for the findAndModify command. + * - cmd.update: The 'update' spec for the findAndModify command. + * - cmd.returnNew: The 'new' option for the findAndModify command. + * - cmd.upsert: The 'upsert' option for the findAndModify command. + * - cmd.fields: The projection for the findAndModify command. + * - cmd.sort: The sort option for the findAndModify command. + * - cmd.collation: The collation option for the findAndModify command. + * - res.errorCode: If errorCode is set, we expect the command to fail with that code and other + * fields of 'res' are ignored. + * - res.resultDocList: The expected documents in the collection after the update. + * - res.nModified: The expected number of documents deleted. + * - res.returnDoc: The expected document returned by the findAndModify command. + * - res.rootStage: The expected root stage of the explain plan. + * - res.bucketFilter: The expected bucket filter of the TS_MODIFY stage. + * - res.residualFilter: The expected residual filter of the TS_MODIFY stage. + * - res.nBucketsUnpacked: The expected number of buckets unpacked by the TS_MODIFY stage. + * - res.nMatched: The expected number of documents matched by the TS_MODIFY stage. + * - res.nModified: The expected number of documents modified by the TS_MODIFY stage. + * - res.nUpserted: The expected number of documents upserted by the TS_MODIFY stage. + */ +export function testFindOneAndUpdate({ + initialDocList, + cmd: {filter, update, returnNew, upsert, fields, sort, collation}, + res: { + errorCode, + resultDocList, + returnDoc, + rootStage, + bucketFilter, + residualFilter, + nBucketsUnpacked, + nMatched, + nModified, + nUpserted, + }, +}) { + const collName = getCallerName(); + jsTestLog(`Running ${collName}(${tojson(arguments[0])})`); + + const testDB = getTestDB(); + const coll = testDB.getCollection(collName); + prepareCollection({collName, initialDocList}); + + const findAndModifyCmd = makeFindOneAndUpdateCommand( + coll, filter, update, returnNew, upsert, fields, sort, collation); + jsTestLog(`Running findAndModify update: ${tojson(findAndModifyCmd)}`); + + // TODO SERVER-76583: Remove this check and always verify the result or verify the 'errorCode'. + if (coll.getDB().getSession().getOptions().shouldRetryWrites()) { + assert.commandFailedWithCode(testDB.runCommand(findAndModifyCmd), 7314600); + return; + } + + if (errorCode) { + assert.commandFailedWithCode(testDB.runCommand(findAndModifyCmd), errorCode); + return; + } + + if (bucketFilter !== undefined) { + const explainRes = assert.commandWorked( + coll.runCommand({explain: findAndModifyCmd, verbosity: "executionStats"})); + verifyExplain({ + explain: explainRes, + rootStageName: rootStage, + opType: "updateOne", + bucketFilter: bucketFilter, + residualFilter: residualFilter, + nBucketsUnpacked: nBucketsUnpacked, + nReturned: returnDoc ? 1 : 0, + nMatched: nMatched, + nModified: nModified, + nUpserted: nUpserted, + }); + } + + const res = assert.commandWorked(testDB.runCommand(findAndModifyCmd)); + jsTestLog(`findAndModify update result: ${tojson(res)}`); + if (upsert) { + assert(nUpserted !== undefined && (nUpserted === 0 || nUpserted === 1), + "nUpserted must be 0 or 1"); + + assert.eq(1, res.lastErrorObject.n, tojson(res)); + if (returnNew !== undefined) { + assert(returnDoc, "returnDoc must be provided when upsert are true"); + assert.docEq(returnDoc, res.value, tojson(res)); + } + + if (nUpserted === 1) { + assert(res.lastErrorObject.upserted, `Expected upserted ObjectId: ${tojson(res)}`); + assert.eq(false, res.lastErrorObject.updatedExisting, tojson(res)); + } else { + assert(!res.lastErrorObject.upserted, `Expected no upserted ObjectId: ${tojson(res)}`); + assert.eq(true, res.lastErrorObject.updatedExisting, tojson(res)); + } + } else { + if (returnDoc !== undefined && returnDoc !== null) { + assert.eq(1, res.lastErrorObject.n, tojson(res)); + assert.eq(true, res.lastErrorObject.updatedExisting, tojson(res)); + assert.docEq(returnDoc, res.value, tojson(res)); + } else { + assert.eq(0, res.lastErrorObject.n, tojson(res)); + assert.eq(false, res.lastErrorObject.updatedExisting, tojson(res)); + assert.eq(null, res.value, tojson(res)); + } + } + + if (resultDocList !== undefined) { + assert.sameMembers(resultDocList, + coll.find().toArray(), + "Collection contents did not match expected after update"); + } +} + +export function getRelevantProfilerEntries(db, coll, requestType) { + const sysCollName = sysCollNamePrefix + coll.getName(); + const profilerFilter = { + $or: [ + // Potential two-phase protocol cluster query. + { + "op": "command", + "ns": `${db.getName()}.${sysCollName}`, + "command.aggregate": `${sysCollName}`, + "command.$_isClusterQueryWithoutShardKeyCmd": true, + // Filters out events recorded because of StaleConfig error. + "ok": {$ne: 0}, + }, + // Potential two-phase protocol write command. + { + "op": "command", + "ns": `${db.getName()}.${sysCollName}`, + [`command.${requestType}`]: `${sysCollName}`, + }, + // Targeted write command. + { + "op": "command", + "ns": `${db.getName()}.${sysCollName}`, + [`command.${requestType}`]: `${coll.getName()}`, + } + ] + }; + return db.system.profile.find(profilerFilter).toArray(); +} + +export function verifyThatRequestIsRoutedToCorrectShard( + coll, requestType, writeType, dataBearingShard) { + assert(primaryShard && otherShard, "The sharded cluster must be initialized"); + assert(dataBearingShard === "primary" || dataBearingShard === "other" || + dataBearingShard === "none" || dataBearingShard === "any", + "Invalid shard: " + dataBearingShard); + assert(writeType === "twoPhaseProtocol" || writeType === "targeted", + "Invalid write type: " + writeType); + assert(requestType === "findAndModify" || requestType === "delete" || requestType === "update", + "Invalid request type: " + requestType); + + const primaryDB = primaryShard.getDB(testDB.getName()); + const otherDB = otherShard.getDB(testDB.getName()); + + const primaryEntries = getRelevantProfilerEntries(primaryDB, coll, requestType); + const otherEntries = getRelevantProfilerEntries(otherDB, coll, requestType); + + /* + * The profiler entries for the two-phase protocol are expected to be in the following order: + * On the data bearing shard: + * 1. Cluster query. + * 2. Targeted request. + * + * On the non-data bearing shard: + * 1. Cluster query. + * + * The profiler entries for the targeted write are expected to be in the following order: + * On the data bearing shard: + * 1. Targeted request. + */ + + if (dataBearingShard === "none") { + // If dataBearingShard is "none", the writeType must be "twoPhaseProtocol". So, no shards + // should get the targeted request after the cluster query for the case of "none". + + assert.eq("twoPhaseProtocol", + writeType, + "Expected data bearing shard to be 'none' only for 'twoPhaseProtocol' mode"); + + assert.eq(1, primaryEntries.length, "Expected one profiler entry on primary shard"); + // The entry must be for the cluster query. + assert(primaryEntries[0].command.hasOwnProperty("aggregate"), + "Unexpected profile entries: " + tojson(primaryEntries)); + + assert.eq(1, otherEntries.length, "Expected one profiler entry on other shard"); + // The entry must be for the cluster query. + assert(otherEntries[0].command.hasOwnProperty("aggregate"), + "Unexpected profile entries: " + tojson(otherEntries)); + return; + } + + const [dataBearingShardEntries, nonDataBearingShardEntries] = (() => { + if (dataBearingShard === "any") { + assert.eq("twoPhaseProtocol", + writeType, + "Expected data bearing shard to be 'any' only for 'twoPhaseProtocol' mode"); + return primaryEntries.length === 2 ? [primaryEntries, otherEntries] + : [otherEntries, primaryEntries]; + } + + return dataBearingShard === "primary" ? [primaryEntries, otherEntries] + : [otherEntries, primaryEntries]; + })(); + + if (writeType === "twoPhaseProtocol") { + // At this point, we know that the data bearing shard is either primary or other. So, we + // expect two profiler entries on the data bearing shard and one on the non-data bearing + // shard. + + assert.eq( + 2, + dataBearingShardEntries.length, + `Expected two profiler entries for data bearing shard in 'twoPhaseProtocol' mode but + got: ${tojson(dataBearingShardEntries)}`); + // The first entry must be for the cluster query. + assert(dataBearingShardEntries[0].command.hasOwnProperty("aggregate"), + "Unexpected profile entries: " + tojson(dataBearingShardEntries)); + // The second entry must be the findAndModify command. + assert(dataBearingShardEntries[1].command.hasOwnProperty(requestType), + "Unexpected profile entries: " + tojson(dataBearingShardEntries)); + + assert.eq( + 1, + nonDataBearingShardEntries.length, + `Expected one profiler entry for non data bearing shard in 'twoPhaseProtocol' mode but + got: ${tojson(nonDataBearingShardEntries)}`); + // The first entry must be for the cluster query. + assert(nonDataBearingShardEntries[0].command.hasOwnProperty("aggregate"), + "Unexpected profile entries: " + tojson(nonDataBearingShardEntries)); + } else { + // This is the targeted write case. So, we expect one profiler entry on the data bearing + // shard and none on the non-data bearing shard. + + assert.eq(1, dataBearingShardEntries.length, tojson(dataBearingShardEntries)); + // The first entry must be the findAndModify command. + assert(dataBearingShardEntries[0].command.hasOwnProperty(requestType), + "Unexpected profile entries: " + tojson(dataBearingShardEntries)); + + assert.eq(0, nonDataBearingShardEntries.length, tojson(nonDataBearingShardEntries)); + } +} + +export function restartProfiler() { + assert(primaryShard && otherShard, "The sharded cluster must be initialized"); + + const primaryDB = primaryShard.getDB(testDB.getName()); + const otherDB = otherShard.getDB(testDB.getName()); + + primaryDB.setProfilingLevel(0); + primaryDB.system.profile.drop(); + primaryDB.setProfilingLevel(2); + otherDB.setProfilingLevel(0); + otherDB.system.profile.drop(); + otherDB.setProfilingLevel(2); +} + +/** + * Verifies that a findAndModify remove on a sharded timeseries collection returns the expected + * result(s) 'res'. + * + * - initialDocList: The initial documents in the collection. + * - cmd.filter: The filter for the findAndModify command. + * - cmd.fields: The projection for the findAndModify command. + * - cmd.sort: The sort option for the findAndModify command. + * - cmd.collation: The collation option for the findAndModify command. + * - res.errorCode: If errorCode is set, we expect the command to fail with that code and other + * fields of 'res' are ignored. + * - res.nDeleted: The expected number of documents deleted. + * - res.deletedDoc: The expected document returned by the findAndModify command. + * - res.writeType: "twoPhaseProtocol" or "targeted". On sharded time-series collection, we route + * queries to shards if the queries contain the shardkey. "twoPhaseProtocol" means + * that we cannot target a specific data-bearing shard from the query and should + * the scatter-gather-like two-phase protocol. On the other hand, "targeted" means + * we can from the query. + * - res.dataBearingShard: "primary", "other", "none", or "any". For "none" and "any", only + * the "twoPhaseProtocol" is allowed. + * - res.rootStage: The expected root stage of the explain plan. + * - res.bucketFilter: The expected bucket filter of the TS_MODIFY stage. + * - res.residualFilter: The expected residual filter of the TS_MODIFY stage. + * - res.nBucketsUnpacked: The expected number of buckets unpacked by the TS_MODIFY stage. + * - res.nReturned: The expected number of documents returned by the TS_MODIFY stage. + */ +export function testFindOneAndRemoveOnShardedCollection({ + initialDocList, + includeMeta = true, + cmd: {filter, fields, sort, collation}, + res: { + errorCode, + nDeleted, + deletedDoc, + writeType, + dataBearingShard, + rootStage, + bucketFilter, + residualFilter, + nBucketsUnpacked, + nReturned, + }, +}) { + const callerName = getCallerName(); + jsTestLog(`Running ${callerName}(${tojson(arguments[0])})`); + + const coll = prepareShardedCollection( + {collName: callerName, initialDocList: initialDocList, includeMeta: includeMeta}); + + const findAndModifyCmd = makeFindOneAndRemoveCommand(coll, filter, fields, sort, collation); + jsTestLog(`Running findAndModify remove: ${tojson(findAndModifyCmd)}`); + + const session = coll.getDB().getSession(); + const shouldRetryWrites = session.getOptions().shouldRetryWrites(); + // TODO SERVER-76583: Remove this check and always verify the result or verify the 'errorCode'. + if (!shouldRetryWrites && !errorCode) { + if (bucketFilter) { + // Due to the limitation of two-phase write protocol, the TS_MODIFY stage's execution + // stats can't really show the results close to real execution. We can just verify + // plan part. + assert(writeType !== "twoPhaseProtocol" || (!nBucketsUnpacked && !nReturned), + "Can't verify nBucketsUnpacked and nReturned for the two-phase protocol."); + + const explainRes = assert.commandWorked( + coll.runCommand({explain: findAndModifyCmd, verbosity: "executionStats"})); + verifyExplain({ + explain: explainRes, + rootStageName: rootStage, + opType: "deleteOne", + bucketFilter: bucketFilter, + residualFilter: residualFilter, + nBucketsUnpacked: nBucketsUnpacked, + nReturned: nReturned, + }); + } + + restartProfiler(); + const res = assert.commandWorked(testDB.runCommand(findAndModifyCmd)); + jsTestLog(`findAndModify remove result: ${tojson(res)}`); + assert.eq(nDeleted, res.lastErrorObject.n, tojson(res)); + let expectedResultDocs = initialDocList; + if (deletedDoc) { + // Note: To figure out the expected result documents, we need to know the _id of the + // deleted document. + assert(deletedDoc.hasOwnProperty("_id"), + `deletedDoc must have _id but got ${tojson(deletedDoc)}`); + assert.docEq(deletedDoc, res.value, tojson(res)); + expectedResultDocs = initialDocList.filter(doc => doc._id !== deletedDoc._id); + } else if (nDeleted === 1) { + // Note: To figure out the expected result documents, we need to know the _id of the + // deleted document. And so we don't allow 'fields' to be specified because it might + // exclude _id field. + assert(!fields, `Must specify deletedDoc when fields are specified: ${tojson(fields)}`); + assert.neq(null, res.value, tojson(res)); + expectedResultDocs = initialDocList.filter(doc => doc._id !== res.value._id); + } else if (nDeleted === 0) { + assert.eq(null, res.value, tojson(res)); + } + + verifyResultDocs(coll, initialDocList, expectedResultDocs, nDeleted); + verifyThatRequestIsRoutedToCorrectShard(coll, "findAndModify", writeType, dataBearingShard); + } else if (errorCode) { + assert.commandFailedWithCode( + testDB.runCommand(findAndModifyCmd), errorCode, `cmd = ${tojson(findAndModifyCmd)}`); + } else { + // TODO SERVER-76583: Remove this test. + assert.commandFailedWithCode( + testDB.runCommand(findAndModifyCmd), 7308305, `cmd = ${tojson(findAndModifyCmd)}`); + } +} + +/** + * Verifies that a findAndModify update on a sharded timeseries collection returns the expected + * result(s) 'res'. + * + * - initialDocList: The initial documents in the collection. + * - cmd.filter: The 'query' spec for the findAndModify command. + * - cmd.update: The 'update' spec for the findAndModify command. + * - cmd.returnNew: The 'new' option for the findAndModify command. + * - cmd.upsert: The 'upsert' option for the findAndModify command. + * - cmd.fields: The projection for the findAndModify command. + * - cmd.sort: The sort option for the findAndModify command. + * - cmd.collation: The collation option for the findAndModify command. + * - res.errorCode: If errorCode is set, we expect the command to fail with that code and other + * fields of 'res' are ignored. + * - res.resultDocList: The expected documents in the collection after the update. + * - res.returnDoc: The expected document returned by the findAndModify command. + * - res.writeType: "twoPhaseProtocol" or "targeted". On sharded time-series collection, we route + * queries to shards if the queries contain the shardkey. "twoPhaseProtocol" means + * that we cannot target a specific data-bearing shard from the query and should + * the scatter-gather-like two-phase protocol. On the other hand, "targeted" means + * we can from the query. + * - res.dataBearingShard: "primary", "other", "none", or "any". For "none" and "any", only + * the "twoPhaseProtocol" is allowed. + * - res.rootStage: The expected root stage of the explain plan. + * - res.bucketFilter: The expected bucket filter of the TS_MODIFY stage. + * - res.residualFilter: The expected residual filter of the TS_MODIFY stage. + * - res.nBucketsUnpacked: The expected number of buckets unpacked by the TS_MODIFY stage. + * - res.nMatched: The expected number of documents matched by the TS_MODIFY stage. + * - res.nModified: The expected number of documents modified by the TS_MODIFY stage. + * - res.nUpserted: The expected number of documents upserted by the TS_MODIFY stage. + */ +export function testFindOneAndUpdateOnShardedCollection({ + initialDocList, + startTxn = false, + includeMeta = true, + cmd: {filter, update, returnNew, upsert, fields, sort, collation}, + res: { + errorCode, + resultDocList, + returnDoc, + writeType, + dataBearingShard, + rootStage, + bucketFilter, + residualFilter, + nBucketsUnpacked, + nMatched, + nModified, + nUpserted, + }, +}) { + const callerName = getCallerName(); + jsTestLog(`Running ${callerName}(${tojson(arguments[0])})`); + + const coll = prepareShardedCollection( + {collName: callerName, initialDocList: initialDocList, includeMeta: includeMeta}); + + const findAndModifyCmd = makeFindOneAndUpdateCommand( + coll, filter, update, returnNew, upsert, fields, sort, collation); + jsTestLog(`Running findAndModify update: ${tojson(findAndModifyCmd)}`); + + if (errorCode) { + assert.commandFailedWithCode(coll.runCommand(findAndModifyCmd), errorCode); + assert.sameMembers(initialDocList, + coll.find().toArray(), + "Collection contents did not match expected after update failure."); + return; + } + + // Explain can't be run inside a transaction. + if (!startTxn && bucketFilter) { + // Due to the limitation of two-phase write protocol, the TS_MODIFY stage's execution + // stats can't really show the results close to real execution. We can just verify + // plan part. + assert(writeType !== "twoPhaseProtocol" || + (nBucketsUnpacked === undefined && nMatched === undefined && + nModified === undefined), + "Can't verify stats for the two-phase protocol."); + + const explainRes = assert.commandWorked( + coll.runCommand({explain: findAndModifyCmd, verbosity: "executionStats"})); + verifyExplain({ + explain: explainRes, + rootStageName: rootStage, + opType: "updateOne", + bucketFilter: bucketFilter, + residualFilter: residualFilter, + nBucketsUnpacked: nBucketsUnpacked, + nReturned: returnDoc ? 1 : 0, + nMatched: nMatched, + nModified: nModified, + nUpserted: nUpserted, + }); + } + + restartProfiler(); + const res = (() => { + if (!startTxn) { + return assert.commandWorked(testDB.runCommand(findAndModifyCmd)); + } + + const session = coll.getDB().getMongo().startSession(); + const sessionDb = session.getDatabase(coll.getDB().getName()); + session.startTransaction(); + const res = assert.commandWorked(sessionDb.runCommand(findAndModifyCmd)); + session.commitTransaction(); + + return res; + })(); + jsTestLog(`findAndModify update result: ${tojson(res)}`); + if (upsert) { + assert(nUpserted !== undefined && (nUpserted === 0 || nUpserted === 1), + "nUpserted must be 0 or 1"); + + assert.eq(1, res.lastErrorObject.n, tojson(res)); + if (returnNew !== undefined) { + assert(returnDoc, "returnDoc must be provided when upsert are true"); + assert.docEq(returnDoc, res.value, tojson(res)); + } + + if (nUpserted === 1) { + assert(res.lastErrorObject.upserted, `Expected upserted ObjectId: ${tojson(res)}`); + assert.eq(false, res.lastErrorObject.updatedExisting, tojson(res)); + } else { + assert(!res.lastErrorObject.upserted, `Expected no upserted ObjectId: ${tojson(res)}`); + assert.eq(true, res.lastErrorObject.updatedExisting, tojson(res)); + } + } else { + if (returnDoc !== undefined && returnDoc !== null) { + assert.eq(1, res.lastErrorObject.n, tojson(res)); + assert.eq(true, res.lastErrorObject.updatedExisting, tojson(res)); + assert.docEq(returnDoc, res.value, tojson(res)); + } else { + assert.eq(0, res.lastErrorObject.n, tojson(res)); + assert.eq(false, res.lastErrorObject.updatedExisting, tojson(res)); + assert.eq(null, res.value, tojson(res)); + } + } + + if (resultDocList !== undefined) { + assert.sameMembers(resultDocList, + coll.find().toArray(), + "Collection contents did not match expected after update"); + } + + verifyThatRequestIsRoutedToCorrectShard(coll, "findAndModify", writeType, dataBearingShard); +} + +/** + * Sets up a sharded cluster. 'nMongos' is the number of mongos in the cluster. + */ +export function setUpShardedCluster({nMongos} = { + nMongos: 1 +}) { + assert.eq(null, st, "A sharded cluster must not be initialized yet"); + assert.eq(null, primaryShard, "The primary shard must not be initialized yet"); + assert.eq(null, otherShard, "The other shard must not be initialized yet"); + assert.eq(null, testDB, "testDB must be not initialized yet"); + assert.eq(null, mongos0DB, "mongos0DB must be not initialized yet"); + assert.eq(null, mongos1DB, "mongos1DB must be not initialized yet"); + assert(nMongos === 1 || nMongos === 2, "nMongos must be 1 or 2"); + + st = new ShardingTest({mongos: nMongos, shards: 2, rs: {nodes: 2}}); + + testDB = st.s.getDB(jsTestName()); + assert.commandWorked(testDB.dropDatabase()); + assert.commandWorked(testDB.adminCommand({enableSharding: testDB.getName()})); + primaryShard = st.getPrimaryShard(testDB.getName()); + st.ensurePrimaryShard(testDB.getName(), primaryShard.shardName); + otherShard = st.getOther(primaryShard); + mongos0DB = st.s0.getDB(testDB.getName()); + if (nMongos > 1) { + mongos1DB = st.s1.getDB(testDB.getName()); + } +} + +/** + * Tears down the sharded cluster created by setUpShardedCluster(). + */ +export function tearDownShardedCluster() { + assert.neq(null, st, "A sharded cluster must be initialized"); + st.stop(); +} diff --git a/jstests/core/timeseries/nondefault_collation.js b/jstests/core/timeseries/nondefault_collation.js index f101ed4a323fb..d4686a448d62d 100644 --- a/jstests/core/timeseries/nondefault_collation.js +++ b/jstests/core/timeseries/nondefault_collation.js @@ -1,6 +1,10 @@ /** - * Test ensures that users can specify non-default collation when querying on time-series - * collections. + * Correctness tests for TS collections with collation that might not match the explicit collation, + * specified in the query. + * + * Queries on timeseries attempt various optimizations to avoid unpacking of buckets. These rely on + * the meta field and the control data (currently, min and max), computed for each bucket. + * Collection's collation might affect the computed control values. * * @tags: [ * # TODO (SERVER-73322): remove @@ -15,131 +19,195 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {aggPlanHasStage} from "jstests/libs/analyze_plan.js"; const coll = db.timeseries_nondefault_collation; const bucketsColl = db.getCollection('system.buckets.' + coll.getName()); -coll.drop(); // implicitly drops bucketsColl. - -const timeFieldName = 'time'; -const metaFieldName = 'meta'; - const numericOrdering = { - collation: {locale: "en_US", numericOrdering: true} + locale: "en_US", + numericOrdering: true, + strength: 1 // case and diacritics ignored }; - const caseSensitive = { - collation: {locale: "en_US", strength: 1, caseLevel: true, numericOrdering: true} + locale: "en_US", + strength: 1, + caseLevel: true }; - const diacriticSensitive = { - collation: {locale: "en_US", strength: 2} + locale: "en_US", + strength: 2, + caseLevel: false }; - -const englishCollation = { - locale: 'en', +const insensitive = { + locale: "en_US", strength: 1 }; -const simpleCollation = { - locale: "simple" -}; +// Find on meta field isn't different from a find on any other view, but let's check it anyway. +(function testFind_MetaField() { + coll.drop(); -assert.commandWorked(db.createCollection(coll.getName(), { - timeseries: {timeField: timeFieldName, metaField: metaFieldName}, - collation: englishCollation -})); -assert.contains(bucketsColl.getName(), db.getCollectionNames()); - -assert.commandWorked( - coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "1", name: 'A', name2: "á"})); -assert.commandWorked( - coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "2", name: 'a', name2: "á"})); -assert.commandWorked( - coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "5", name: 'A', name2: "á"})); -assert.commandWorked( - coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "10", name: 'a', name2: "á"})); -assert.commandWorked( - coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "20", name: 'A', name2: "a"})); -assert.commandWorked( - coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "50", name: 'B', name2: "a"})); -assert.commandWorked( - coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "100", name: 'b', name2: "a"})); -assert.commandWorked( - coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "200", name: 'B', name2: "a"})); -assert.commandWorked( - coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: "500", name: 'b', name2: "a"})); - -// Default collation is case and diacretic insensitive. -assert.eq(2, coll.aggregate([{$sortByCount: "$name"}]).itcount()); -assert.eq(1, coll.aggregate([{$sortByCount: "$name2"}]).itcount()); - -// Test that a explicit collation different from collection's default passes for a timeseries -// collection. -let results = - coll.aggregate([{$bucket: {groupBy: "$meta", boundaries: ["1", "10", "100", "1000"]}}], - numericOrdering) - .toArray(); -assert.eq(3, results.length); -assert.eq({_id: "1", count: 3}, results[0]); -assert.eq({_id: "10", count: 3}, results[1]); -assert.eq({_id: "100", count: 3}, results[2]); - -assert.eq(4, coll.aggregate([{$sortByCount: "$name"}], caseSensitive).itcount()); -assert.eq(2, coll.aggregate([{$sortByCount: "$name2"}], diacriticSensitive).itcount()); - -coll.drop(); -const defaultCollation = { - locale: "en", - numericOrdering: true, - caseLevel: true, - strength: 2 -}; -assert.commandWorked(db.createCollection(coll.getName(), { - timeseries: {timeField: timeFieldName, metaField: metaFieldName}, - collation: defaultCollation -})); -assert.contains(bucketsColl.getName(), db.getCollectionNames()); -assert.commandWorked(coll.createIndex({[metaFieldName]: 1}, {collation: {locale: "simple"}})); - -assert.commandWorked(coll.insert( - {[timeFieldName]: ISODate(), [metaFieldName]: 1, name: 'A', name2: "á", value: "1"})); -assert.commandWorked(coll.insert( - {[timeFieldName]: ISODate(), [metaFieldName]: 2, name: 'a', name2: "á", value: "11"})); -assert.commandWorked(coll.insert( - {[timeFieldName]: ISODate(), [metaFieldName]: 1, name: 'A', name2: "á", value: "50"})); -assert.commandWorked(coll.insert( - {[timeFieldName]: ISODate(), [metaFieldName]: 1, name: 'a', name2: "á", value: "100"})); -assert.commandWorked(coll.insert( - {[timeFieldName]: ISODate(), [metaFieldName]: "2", name: 'A', name2: "a", value: "3"})); -assert.commandWorked(coll.insert( - {[timeFieldName]: ISODate(), [metaFieldName]: "5", name: 'B', name2: "a", value: "-100"})); -assert.commandWorked(coll.insert( - {[timeFieldName]: ISODate(), [metaFieldName]: "1", name: 'b', name2: "a", value: "-200"})); -assert.commandWorked(coll.insert( - {[timeFieldName]: ISODate(), [metaFieldName]: "2", name: 'B', name2: "a", value: "1000"})); -assert.commandWorked(coll.insert( - {[timeFieldName]: ISODate(), [metaFieldName]: "5", name: 'b', name2: "a", value: "4"})); - -// This collection has been created using non simple collation. The collection was then indexed on -// its metadata using simple collation. These tests confirm that queries on the indexed field using -// nondefault (simple) collation use the index. They also confirm that queries that don't involve -// strings but do use default collation, on indexed fields, also use the index. -const nonDefaultCollationQuery = coll.find({meta: 2}).collation(englishCollation).explain(); -assert(aggPlanHasStage(nonDefaultCollationQuery, "IXSCAN"), nonDefaultCollationQuery); - -const simpleNonDefaultCollationQuery = coll.find({meta: 2}).collation(simpleCollation).explain(); -assert(aggPlanHasStage(simpleNonDefaultCollationQuery, "IXSCAN"), simpleNonDefaultCollationQuery); - -const defaultCollationQuery = coll.find({meta: 1}).collation(defaultCollation).explain(); -assert(aggPlanHasStage(defaultCollationQuery, "IXSCAN"), defaultCollationQuery); - -// This test guarantees that the bucket's min/max matches the query's min/max regardless of -// collation. -results = coll.find({value: {$gt: "4"}}).collation(simpleCollation); -assert.eq(1, results.itcount()); + assert.commandWorked(db.createCollection( + coll.getName(), + {timeseries: {timeField: 'time', metaField: 'meta'}, collation: numericOrdering})); + assert.contains(bucketsColl.getName(), db.getCollectionNames()); + + assert.commandWorked(coll.insert({time: ISODate(), meta: "1", value: 42})); + assert.commandWorked(coll.insert({time: ISODate(), meta: "10", value: 42})); + assert.commandWorked(coll.insert({time: ISODate(), meta: "5", value: 42})); + + // Use the collection's collation with numeric ordering. + let res1 = coll.find({meta: {$gt: "4"}}); + assert.eq(2, res1.itcount(), res1.toArray()); // should match "5" and "10" + + // Use explicit collation with lexicographic ordering. + let res2 = coll.find({meta: {$gt: "4"}}).collation(insensitive); + assert.eq(1, res2.itcount(), res2.toArray()); // should match only "5" +}()); + +// For the measurement fields each bucket computes additional "control values", such as min/max and +// might use them to avoid unpacking. +(function testFind_MeasurementField() { + coll.drop(); + + assert.commandWorked(db.createCollection( + coll.getName(), + {timeseries: {timeField: 'time', metaField: 'meta'}, collation: numericOrdering})); + assert.contains(bucketsColl.getName(), db.getCollectionNames()); + + // The 'numericOrdering' on the collection means that the max of the bucket with the three docs + // below is "10" (while the lexicographic max is "5"). + assert.commandWorked(coll.insert({time: ISODate(), meta: 42, value: "1"})); + assert.commandWorked(coll.insert({time: ISODate(), meta: 42, value: "10"})); + assert.commandWorked(coll.insert({time: ISODate(), meta: 42, value: "5"})); + + // A query with default collation would use the bucket's min/max and find the matches. We are + // not checking the unpacking optimizations here as it's not a concern of collation per se. + let res1 = coll.find({value: {$gt: "4"}}); + assert.eq(2, res1.itcount(), res1.toArray()); // should match "5" and "10" + + // If a query with 'insensitive' collation, which doesn't do numeric ordering, used the bucket's + // min/max it would miss the bucket. Check, that it doesn't. + let res2 = coll.find({value: {$gt: "4"}}).collation(insensitive); + assert.eq(1, res2.itcount(), res2.toArray()); // should match only "5" +}()); + +(function testAgg_GroupByMetaField() { + coll.drop(); + + assert.commandWorked(db.createCollection( + coll.getName(), + {timeseries: {timeField: 'time', metaField: 'meta'}, collation: numericOrdering})); + assert.contains(bucketsColl.getName(), db.getCollectionNames()); + + assert.commandWorked(coll.insert({time: ISODate(), meta: "1", val: 1})); + assert.commandWorked(coll.insert({time: ISODate(), meta: "5", val: 1})); + + // Using collection's collation with numeric ordering. + let res1 = + coll.aggregate([{$bucket: {groupBy: "$meta", boundaries: ["1", "10", "50"]}}]).toArray(); + assert.eq(1, res1.length); + assert.eq({_id: "1", count: 2}, res1[0]); + + // Using explicit collation with lexicographic ordering. + let res2 = coll.aggregate([{$bucket: {groupBy: "$meta", boundaries: ["1", "10", "50"]}}], + {collation: insensitive}) + .toArray(); + assert.eq(2, res2.length); + assert.eq({_id: "1", count: 1}, res2[0]); // "1" goes here + assert.eq({_id: "10", count: 1}, res2[1]); // "5" goes here +}()); + +(function testAgg_GroupByMeasurementField() { + coll.drop(); + + assert.commandWorked(db.createCollection( + coll.getName(), + {timeseries: {timeField: 'time', metaField: 'meta'}, collation: insensitive})); + assert.contains(bucketsColl.getName(), db.getCollectionNames()); + + // Cause two different buckets with various case/diacritics in each for the measurement 'name'. + assert.commandWorked(coll.insert({time: ISODate(), meta: "a", name: 'A'})); + assert.commandWorked(coll.insert({time: ISODate(), meta: "a", name: 'a'})); + assert.commandWorked(coll.insert({time: ISODate(), meta: "a", name: 'á'})); + assert.commandWorked(coll.insert({time: ISODate(), meta: "b", name: 'A'})); + assert.commandWorked(coll.insert({time: ISODate(), meta: "b", name: 'a'})); + assert.commandWorked(coll.insert({time: ISODate(), meta: "b", name: 'ä'})); + + // Test with the collection's collation, which is case and diacritic insensitive. + assert.eq(1, coll.aggregate([{$sortByCount: "$name"}]).itcount()); + + // Test with explicit collation that is different from the collection's. + assert.eq(2, coll.aggregate([{$sortByCount: "$name"}], {collation: caseSensitive}).itcount()); + assert.eq(3, + coll.aggregate([{$sortByCount: "$name"}], {collation: diacriticSensitive}).itcount()); +}()); + +// For $group queries that would put whole buckets into the same group, it might be possible to +// avoid unpacking if the information the group is computing is exposed in the control data of each +// bucket. Currently, we only do this optimization for min/max with the meta as the group key. +(function testAgg_MinMaxOptimization() { + coll.drop(); + + assert.commandWorked(db.createCollection( + coll.getName(), + {timeseries: {timeField: 'time', metaField: 'meta'}, collation: numericOrdering})); + assert.contains(bucketsColl.getName(), db.getCollectionNames()); + + // These two docs will be placed in the same bucket, and the max for the bucket will be computed + // using collection's collation, that is, it should be "10". + assert.commandWorked(coll.insert({time: ISODate(), meta: 42, val: "10"})); + assert.commandWorked(coll.insert({time: ISODate(), meta: 42, val: "5"})); + + // Let's check our understanding of what happens with the bucketing as otherwise the tests below + // won't be testing what we think they are. + let buckets = bucketsColl.find().toArray(); + assert.eq(1, buckets.length, "All docs should be placed into the same bucket"); + assert.eq("10", buckets[0].control.max.val, "Computed max control for 'val' measurement"); + + // Use the collection's collation with numeric ordering. + let res1 = coll.aggregate([{$group: {_id: "$meta", v: {$max: "$val"}}}]).toArray(); + assert.eq("10", res1[0].v, "max val in numeric ordering per the collection's collation"); + + // Use the collection's collation with lexicographic ordering. + let res2 = + coll.aggregate([{$group: {_id: "$meta", v: {$max: "$val"}}}], {collation: insensitive}) + .toArray(); + assert.eq("5", res2[0].v, "max val in lexicographic ordering per the query collation"); }()); + +(function testFind_IndexWithDifferentCollation() { + coll.drop(); + + assert.commandWorked(db.createCollection( + coll.getName(), + {timeseries: {timeField: 'time', metaField: 'meta'}, collation: diacriticSensitive})); + assert.contains(bucketsColl.getName(), db.getCollectionNames()); + + // Create index with a different collation. + assert.commandWorked(coll.createIndex({meta: 1}, {collation: insensitive})); + + // We only check that the correct plan is chosen so the contents of the collection don't matter + // as long as it's not empty. + assert.commandWorked(coll.insert({time: ISODate(), meta: 42})); + assert.commandWorked(coll.insert({time: ISODate(), meta: "the answer"})); + + // Queries that don't specify explicit collation should use the collection's default collation + // which isn't compatible with the index, so the index should NOT be used. + let query = coll.find({meta: "str"}).explain(); + assert(!aggPlanHasStage(query, "IXSCAN"), query); + + // Queries with an explicit collation which isn't compatible with the index, should NOT do + // index scan. + query = coll.find({meta: "str"}).collation(caseSensitive).explain(); + assert(!aggPlanHasStage(query, "IXSCAN"), query); + + // Queries with the same collation as in the index, should do index scan. + query = coll.find({meta: "str"}).collation(insensitive).explain(); + assert(aggPlanHasStage(query, "IXSCAN"), query); + + // Numeric queries that don't rely on collation should do index scan. + query = coll.find({meta: 1}).explain(); + assert(aggPlanHasStage(query, "IXSCAN"), query); +}()); \ No newline at end of file diff --git a/jstests/core/timeseries/partialFilterExpression_with_internalBucketGeoWithin.js b/jstests/core/timeseries/partialFilterExpression_with_internalBucketGeoWithin.js index d0a94ff3affad..50e20a5b6555c 100644 --- a/jstests/core/timeseries/partialFilterExpression_with_internalBucketGeoWithin.js +++ b/jstests/core/timeseries/partialFilterExpression_with_internalBucketGeoWithin.js @@ -11,12 +11,11 @@ * ] */ -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/feature_flag_util.js"); +import {getWinningPlan, isCollscan, isIxscan} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load("jstests/libs/fixture_helpers.js"); // For isSharded. load('jstests/noPassthrough/libs/index_build.js'); -(function() { if (FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) { const timeFieldName = "timestamp"; @@ -183,4 +182,3 @@ if (FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) { .explain()); assert(isCollscan(db, getWinningPlan(findAndExplain.queryPlanner))); } -})(); diff --git a/jstests/core/timeseries/timeseries_bucket_index.js b/jstests/core/timeseries/timeseries_bucket_index.js index 1a816b7c84d1a..093302190892c 100644 --- a/jstests/core/timeseries/timeseries_bucket_index.js +++ b/jstests/core/timeseries/timeseries_bucket_index.js @@ -9,11 +9,8 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/analyze_plan.js"); // For 'planHasStage' helper. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {planHasStage} from "jstests/libs/analyze_plan.js"; TimeseriesTest.run((insert) => { const coll = db.timeseries_bucket_index; @@ -54,4 +51,3 @@ TimeseriesTest.run((insert) => { assert.commandWorked(bucketsColl.remove({_id: bucketId})); assert.docEq([], bucketsColl.find().toArray()); }); -})(); diff --git a/jstests/core/timeseries/timeseries_bucket_limit_count.js b/jstests/core/timeseries/timeseries_bucket_limit_count.js index a6942d3f42cda..c60d540d662de 100644 --- a/jstests/core/timeseries/timeseries_bucket_limit_count.js +++ b/jstests/core/timeseries/timeseries_bucket_limit_count.js @@ -9,10 +9,8 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; TimeseriesTest.run((insert) => { const collNamePrefix = 'timeseries_bucket_limit_count_'; @@ -70,9 +68,15 @@ TimeseriesTest.run((insert) => { assert.eq(bucketMaxCount - 1, bucketDocs[0].control.max.x, 'invalid control.max for x in first bucket: ' + tojson(bucketDocs)); - assert.eq(2, - bucketDocs[0].control.version, - 'unexpected control.version in first bucket: ' + tojson(bucketDocs)); + if (FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) { + assert.eq(1, + bucketDocs[0].control.version, + 'unexpected control.version in first bucket: ' + tojson(bucketDocs)); + } else { + assert.eq(2, + bucketDocs[0].control.version, + 'unexpected control.version in first bucket: ' + tojson(bucketDocs)); + } assert(!bucketDocs[0].control.hasOwnProperty("closed"), 'unexpected control.closed in first bucket: ' + tojson(bucketDocs)); @@ -100,4 +104,3 @@ TimeseriesTest.run((insert) => { runTest(numDocs / 2); runTest(numDocs); }); -})(); diff --git a/jstests/core/timeseries/timeseries_bucket_limit_time_range.js b/jstests/core/timeseries/timeseries_bucket_limit_time_range.js index 23454d6068e97..2509ad01ca21c 100644 --- a/jstests/core/timeseries/timeseries_bucket_limit_time_range.js +++ b/jstests/core/timeseries/timeseries_bucket_limit_time_range.js @@ -8,10 +8,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const isTimeseriesScalabilityImprovementsEnabled = @@ -124,5 +121,4 @@ TimeseriesTest.run((insert) => { runTest(1); runTest(numDocs); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_bucket_manual_removal.js b/jstests/core/timeseries/timeseries_bucket_manual_removal.js index 698aed1104fa7..dd439bfff0d2a 100644 --- a/jstests/core/timeseries/timeseries_bucket_manual_removal.js +++ b/jstests/core/timeseries/timeseries_bucket_manual_removal.js @@ -10,10 +10,7 @@ * requires_timeseries, * ] */ -(function() { -'use strict'; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const coll = db.timeseries_bucket_manual_removal; @@ -68,5 +65,4 @@ TimeseriesTest.run((insert) => { buckets = bucketsColl.find().toArray(); assert.eq(buckets.length, 1, 'Expected one bucket but found ' + tojson(buckets)); assert.neq(buckets[0]._id, bucketId); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_bucket_rename.js b/jstests/core/timeseries/timeseries_bucket_rename.js deleted file mode 100644 index d219d10480211..0000000000000 --- a/jstests/core/timeseries/timeseries_bucket_rename.js +++ /dev/null @@ -1,27 +0,0 @@ -/** - * Tests that a system.buckets collection cannot be renamed. - * - * @tags: [ - * # We need a timeseries collection. - * requires_timeseries, - * ] - */ -(function() { -'use strict'; - -const coll = db.timeseries_bucket_rename; -const bucketsColl = db.getCollection('system.buckets.' + coll.getName()); - -const timeFieldName = 'time'; - -coll.drop(); -assert.commandWorked(db.createCollection(coll.getName(), {timeseries: {timeField: timeFieldName}})); -assert.contains(bucketsColl.getName(), db.getCollectionNames()); - -assert.commandFailedWithCode(db.adminCommand({ - renameCollection: bucketsColl.getFullName(), - to: db.getName() + ".otherColl", - dropTarget: false -}), - ErrorCodes.IllegalOperation); -})(); diff --git a/jstests/core/timeseries/timeseries_collation.js b/jstests/core/timeseries/timeseries_collation.js index 76ef7fca54a6f..f86402953dea5 100644 --- a/jstests/core/timeseries/timeseries_collation.js +++ b/jstests/core/timeseries/timeseries_collation.js @@ -9,10 +9,7 @@ * requires_timeseries, * ] */ -(function() { -'use strict'; - -load('jstests/core/timeseries/libs/timeseries.js'); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const coll = db.timeseries_collation; @@ -92,5 +89,4 @@ TimeseriesTest.run((insert) => { assert.eq(buckets[2].control.min.y, null); assert.eq(buckets[2].control.max.x, null); assert.eq(buckets[2].control.max.y, null); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_collmod.js b/jstests/core/timeseries/timeseries_collmod.js index 18ae48a421f27..503b100697914 100644 --- a/jstests/core/timeseries/timeseries_collmod.js +++ b/jstests/core/timeseries/timeseries_collmod.js @@ -11,10 +11,7 @@ * ] */ -(function() { -'use strict'; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; const collName = "timeseries_collmod"; const coll = db.getCollection(collName); @@ -214,5 +211,4 @@ if (TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db.getMongo())) { // No-op command should succeed with empty time-series options. assert.commandWorked(db.runCommand({"collMod": collName, "timeseries": {}})); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_compact.js b/jstests/core/timeseries/timeseries_compact.js index b5a3b22f1d7d6..549f3d801131d 100644 --- a/jstests/core/timeseries/timeseries_compact.js +++ b/jstests/core/timeseries/timeseries_compact.js @@ -13,10 +13,7 @@ * uses_compact, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run(() => { const coll = db.timeseries_compact; @@ -33,5 +30,4 @@ TimeseriesTest.run(() => { assert.commandWorked(db.runCommand({compact: coll.getName(), force: true})); assert.commandWorked(db.runCommand({compact: "system.buckets." + coll.getName(), force: true})); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_create_collection.js b/jstests/core/timeseries/timeseries_create_collection.js index 04d37eff5c09f..0cea8021481b0 100644 --- a/jstests/core/timeseries/timeseries_create_collection.js +++ b/jstests/core/timeseries/timeseries_create_collection.js @@ -19,6 +19,15 @@ assert.commandWorked(testDB.dropDatabase()); const timeFieldName = 'time'; const coll = testDB.t; +// Fails to create a time-series collection with null-embedded timeField or metaField. +assert.commandFailedWithCode( + testDB.createCollection(coll.getName(), {timeseries: {timeField: '\0time'}}), + ErrorCodes.BadValue); +assert.commandFailedWithCode( + testDB.createCollection(coll.getName(), + {timeseries: {timeField: timeFieldName, metaField: 't\0ag'}}), + ErrorCodes.BadValue); + // Create a timeseries collection, listCollection should show view and bucket collection assert.commandWorked( testDB.createCollection(coll.getName(), {timeseries: {timeField: timeFieldName}})); diff --git a/jstests/core/timeseries/timeseries_create_invalid_view.js b/jstests/core/timeseries/timeseries_create_invalid_view.js deleted file mode 100644 index 54254e8c3cada..0000000000000 --- a/jstests/core/timeseries/timeseries_create_invalid_view.js +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Verify we cannot create a view on a system.buckets collection. - * - * @tags: [ - * # This restriction was added in 6.1. - * requires_fcv_61, - * # We need a timeseries collection. - * requires_timeseries, - * ] - */ -(function() { -'use strict'; - -const testDB = db.getSiblingDB(jsTestName()); -assert.commandWorked(testDB.dropDatabase()); - -const timeFieldName = 'time'; -const coll = testDB.t; - -// Create a timeseries collection, listCollection should show view and bucket collection -assert.commandWorked( - testDB.createCollection(coll.getName(), {timeseries: {timeField: timeFieldName}})); -let collections = assert.commandWorked(testDB.runCommand({listCollections: 1})).cursor.firstBatch; -jsTestLog('Checking listCollections result: ' + tojson(collections)); -assert(collections.find(entry => entry.name === 'system.buckets.' + coll.getName())); -assert(collections.find(entry => entry.name === coll.getName())); - -// Ensure we cannot create a view on a system.buckets collection. -assert.commandFailedWithCode(testDB.createView("badView", "system.buckets." + coll.getName(), []), - ErrorCodes.InvalidNamespace); -})(); diff --git a/jstests/core/timeseries/timeseries_delete_collation.js b/jstests/core/timeseries/timeseries_delete_collation.js index 913654b63439f..b808f21e52408 100644 --- a/jstests/core/timeseries/timeseries_delete_collation.js +++ b/jstests/core/timeseries/timeseries_delete_collation.js @@ -6,12 +6,13 @@ * # We need a timeseries collection. * requires_timeseries, * requires_non_retryable_writes, - * requires_fcv_70, + * requires_fcv_71, * ] */ -(function() { -"use strict"; +import {getPlanStage} from "jstests/libs/analyze_plan.js"; + +load("jstests/libs/fixture_helpers.js"); // For 'isMongos' const timeFieldName = "time"; const metaFieldName = "tag"; @@ -33,6 +34,9 @@ const caseInsensitive = { const simple = { locale: "simple" }; +const closedBucketFilter = { + "control.closed": {"$not": {"$eq": true}} +}; const docs = [ {_id: 0, [timeFieldName]: dateTime, [metaFieldName]: "A", str: "HELLO"}, @@ -50,9 +54,22 @@ const docs = [ ]; /** - * Confirms that a set of deletes returns the expected set of documents. + * Confirms that a set of deletes returns the expected set of documents and runs the correct delete + * stage and bucket query. */ -function runTest({deleteFilter, queryCollation, collectionCollation, nDeleted}) { +function runTest({ + deleteFilter, + queryCollation, + collectionCollation, + nDeleted, + expectedBucketQuery, + expectedDeleteStage +}) { + jsTestLog(`Running ${tojson(deleteFilter)} with queryCollation: ${ + tojson(queryCollation)} and collectionCollation: ${tojson(collectionCollation)}`); + + assert(expectedDeleteStage === "TS_MODIFY" || expectedDeleteStage === "DELETE"); + const coll = testDB.getCollection(collNamePrefix + testCaseId++); assert.commandWorked(testDB.createCollection(coll.getName(), { timeseries: {timeField: timeFieldName, metaField: metaFieldName}, @@ -60,40 +77,186 @@ function runTest({deleteFilter, queryCollation, collectionCollation, nDeleted}) })); assert.commandWorked(coll.insert(docs)); - const res = assert.commandWorked(coll.deleteMany(deleteFilter, {collation: queryCollation})); - assert.eq(nDeleted, res.deletedCount); + const deleteCommand = { + delete: coll.getName(), + deletes: [{q: deleteFilter, limit: 0, collation: queryCollation}] + }; + const explain = testDB.runCommand({explain: deleteCommand, verbosity: "queryPlanner"}); + const parsedQuery = FixtureHelpers.isMongos(testDB) + ? explain.queryPlanner.winningPlan.shards[0].parsedQuery + : explain.queryPlanner.parsedQuery; + + assert.eq(expectedBucketQuery, parsedQuery, `Got wrong parsedQuery: ${tojson(explain)}`); + assert.neq(null, + getPlanStage(explain.queryPlanner.winningPlan, expectedDeleteStage), + `${expectedDeleteStage} stage not found in the plan: ${tojson(explain)}`); + + const res = assert.commandWorked(testDB.runCommand(deleteCommand)); + assert.eq(nDeleted, res.n); } (function testNoCollation() { // Residual filter. - runTest({deleteFilter: {str: "Hello"}, nDeleted: 0}); - runTest({deleteFilter: {str: "hello"}, nDeleted: 3}); + runTest({ + deleteFilter: {str: "Hello"}, + nDeleted: 0, + expectedBucketQuery: { + $and: [ + closedBucketFilter, + {"control.max.str": {$_internalExprGte: "Hello"}}, + {"control.min.str": {$_internalExprLte: "Hello"}} + ] + }, + expectedDeleteStage: "TS_MODIFY" + }); + runTest({ + deleteFilter: {str: "hello"}, + nDeleted: 3, + expectedBucketQuery: { + $and: [ + closedBucketFilter, + {"control.max.str": {$_internalExprGte: "hello"}}, + {"control.min.str": {$_internalExprLte: "hello"}} + ] + }, + expectedDeleteStage: "TS_MODIFY" + }); // Bucket filter. - runTest({deleteFilter: {[metaFieldName]: "a"}, nDeleted: 0}); - runTest({deleteFilter: {[metaFieldName]: "A"}, nDeleted: 4}); + runTest({ + deleteFilter: {[metaFieldName]: "a"}, + nDeleted: 0, + expectedBucketQuery: { + $and: [ + {meta: {$eq: "a"}}, + closedBucketFilter, + ] + }, + expectedDeleteStage: "DELETE" + }); + runTest({ + deleteFilter: {[metaFieldName]: "A"}, + nDeleted: 4, + expectedBucketQuery: { + $and: [ + {meta: {$eq: "A"}}, + closedBucketFilter, + ] + }, + expectedDeleteStage: "DELETE" + }); })(); (function testQueryLevelCollation() { // Residual filter. - runTest({deleteFilter: {str: "Hello"}, queryCollation: caseSensitive, nDeleted: 0}); - runTest({deleteFilter: {str: "Hello"}, queryCollation: caseInsensitive, nDeleted: 6}); + runTest({ + deleteFilter: {str: "Hello"}, + queryCollation: caseSensitive, + nDeleted: 0, + expectedBucketQuery: { + $and: [ + closedBucketFilter, + {"control.max.str": {$_internalExprGte: "Hello"}}, + {"control.min.str": {$_internalExprLte: "Hello"}} + ] + }, + expectedDeleteStage: "TS_MODIFY" + }); + runTest({ + deleteFilter: {str: "Hello"}, + queryCollation: caseInsensitive, + nDeleted: 6, + expectedBucketQuery: { + $and: [ + closedBucketFilter, + {"control.max.str": {$_internalExprGte: "Hello"}}, + {"control.min.str": {$_internalExprLte: "Hello"}} + ] + }, + expectedDeleteStage: "TS_MODIFY" + }); // Bucket filter. - runTest({deleteFilter: {[metaFieldName]: "a"}, queryCollation: caseSensitive, nDeleted: 0}); - runTest({deleteFilter: {[metaFieldName]: "a"}, queryCollation: caseInsensitive, nDeleted: 4}); + runTest({ + deleteFilter: {[metaFieldName]: "a"}, + queryCollation: caseSensitive, + nDeleted: 0, + expectedBucketQuery: { + $and: [ + {meta: {$eq: "a"}}, + closedBucketFilter, + ] + }, + expectedDeleteStage: "DELETE" + }); + runTest({ + deleteFilter: {[metaFieldName]: "a"}, + queryCollation: caseInsensitive, + nDeleted: 4, + expectedBucketQuery: { + $and: [ + {meta: {$eq: "a"}}, + closedBucketFilter, + ] + }, + expectedDeleteStage: "DELETE" + }); })(); (function testCollectionLevelCollation() { // Residual filter. - runTest({deleteFilter: {str: "Hello"}, collectionCollation: caseSensitive, nDeleted: 0}); - runTest({deleteFilter: {str: "Hello"}, collectionCollation: caseInsensitive, nDeleted: 6}); + runTest({ + deleteFilter: {str: "Hello"}, + collectionCollation: caseSensitive, + nDeleted: 0, + expectedBucketQuery: { + $and: [ + closedBucketFilter, + {"control.max.str": {$_internalExprGte: "Hello"}}, + {"control.min.str": {$_internalExprLte: "Hello"}} + ] + }, + expectedDeleteStage: "TS_MODIFY" + }); + runTest({ + deleteFilter: {str: "Hello"}, + collectionCollation: caseInsensitive, + nDeleted: 6, + expectedBucketQuery: { + $and: [ + closedBucketFilter, + {"control.max.str": {$_internalExprGte: "Hello"}}, + {"control.min.str": {$_internalExprLte: "Hello"}} + ] + }, + expectedDeleteStage: "TS_MODIFY" + }); // Bucket filter. - runTest( - {deleteFilter: {[metaFieldName]: "a"}, collectionCollation: caseSensitive, nDeleted: 0}); - runTest( - {deleteFilter: {[metaFieldName]: "a"}, collectionCollation: caseInsensitive, nDeleted: 4}); + runTest({ + deleteFilter: {[metaFieldName]: "a"}, + collectionCollation: caseSensitive, + nDeleted: 0, + expectedBucketQuery: { + $and: [ + {meta: {$eq: "a"}}, + closedBucketFilter, + ] + }, + expectedDeleteStage: "DELETE" + }); + runTest({ + deleteFilter: {[metaFieldName]: "a"}, + collectionCollation: caseInsensitive, + nDeleted: 4, + expectedBucketQuery: { + $and: [ + {meta: {$eq: "a"}}, + closedBucketFilter, + ] + }, + expectedDeleteStage: "DELETE" + }); })(); (function testQueryLevelCollationOverridesDefault() { @@ -102,13 +265,40 @@ function runTest({deleteFilter, queryCollation, collectionCollation, nDeleted}) deleteFilter: {str: "Hello"}, queryCollation: caseInsensitive, collectionCollation: caseInsensitive, - nDeleted: 6 + nDeleted: 6, + expectedBucketQuery: { + $and: [ + closedBucketFilter, + {"control.max.str": {$_internalExprGte: "Hello"}}, + {"control.min.str": {$_internalExprLte: "Hello"}} + ] + }, + expectedDeleteStage: "TS_MODIFY" }); runTest({ deleteFilter: {str: "Hello"}, queryCollation: caseInsensitive, collectionCollation: caseSensitive, - nDeleted: 6 + nDeleted: 6, + // We cannot push down bucket metric predicate for TS_MODIFY stage when the query level + // collation overrides the collection level collation. + expectedBucketQuery: closedBucketFilter, + expectedDeleteStage: "TS_MODIFY" + }); + runTest({ + deleteFilter: {[metaFieldName]: "A", str: "Hello"}, + queryCollation: caseInsensitive, + collectionCollation: caseSensitive, + nDeleted: 2, + // We cannot push down bucket metric predicate for TS_MODIFY stage when the query level + // collation overrides the collection level collation. + expectedBucketQuery: { + $and: [ + {meta: {$eq: "A"}}, + closedBucketFilter, + ] + }, + expectedDeleteStage: "TS_MODIFY" }); // Bucket filter. @@ -116,13 +306,29 @@ function runTest({deleteFilter, queryCollation, collectionCollation, nDeleted}) deleteFilter: {[metaFieldName]: "a"}, queryCollation: caseInsensitive, collectionCollation: caseInsensitive, - nDeleted: 4 + nDeleted: 4, + // We can push down bucket filter for DELETE stage with the query level collation. + expectedBucketQuery: { + $and: [ + {meta: {$eq: "a"}}, + closedBucketFilter, + ] + }, + expectedDeleteStage: "DELETE" }); runTest({ deleteFilter: {[metaFieldName]: "a"}, queryCollation: caseInsensitive, collectionCollation: caseSensitive, - nDeleted: 4 + nDeleted: 4, + // We can push down bucket filter for DELETE stage with the query level collation. + expectedBucketQuery: { + $and: [ + {meta: {$eq: "a"}}, + closedBucketFilter, + ] + }, + expectedDeleteStage: "DELETE" }); })(); @@ -132,13 +338,51 @@ function runTest({deleteFilter, queryCollation, collectionCollation, nDeleted}) deleteFilter: {str: "Hello"}, queryCollation: simple, collectionCollation: caseInsensitive, - nDeleted: 0 + nDeleted: 0, + // We cannot push down bucket metric predicate for TS_MODIFY stage when the query level + // collation overrides the collection level collation. + expectedBucketQuery: closedBucketFilter, + expectedDeleteStage: "TS_MODIFY" }); runTest({ deleteFilter: {str: "hello"}, queryCollation: simple, collectionCollation: caseInsensitive, - nDeleted: 3 + nDeleted: 3, + // We cannot push down bucket metric predicate for TS_MODIFY stage when the query level + // collation overrides the collection level collation. + expectedBucketQuery: closedBucketFilter, + expectedDeleteStage: "TS_MODIFY" + }); + runTest({ + deleteFilter: {[metaFieldName]: "a", str: "hello"}, + queryCollation: simple, + collectionCollation: caseInsensitive, + nDeleted: 0, + // We cannot push down bucket metric predicate for TS_MODIFY stage when the query level + // collation overrides the collection level collation. + expectedBucketQuery: { + $and: [ + {meta: {$eq: "a"}}, + closedBucketFilter, + ] + }, + expectedDeleteStage: "TS_MODIFY" + }); + runTest({ + deleteFilter: {[metaFieldName]: "A", str: "HELLO"}, + queryCollation: simple, + collectionCollation: caseInsensitive, + nDeleted: 1, + // We cannot push down bucket metric predicate for TS_MODIFY stage when the query level + // collation overrides the collection level collation. + expectedBucketQuery: { + $and: [ + {meta: {$eq: "A"}}, + closedBucketFilter, + ] + }, + expectedDeleteStage: "TS_MODIFY" }); // Bucket filter. @@ -146,13 +390,28 @@ function runTest({deleteFilter, queryCollation, collectionCollation, nDeleted}) deleteFilter: {[metaFieldName]: "a"}, queryCollation: simple, collectionCollation: caseInsensitive, - nDeleted: 0 + nDeleted: 0, + // We can push down bucket filter for DELETE stage with the query level collation. + expectedBucketQuery: { + $and: [ + {meta: {$eq: "a"}}, + closedBucketFilter, + ] + }, + expectedDeleteStage: "DELETE" }); runTest({ deleteFilter: {[metaFieldName]: "A"}, queryCollation: simple, collectionCollation: caseInsensitive, - nDeleted: 4 + nDeleted: 4, + // We can push down bucket filter for DELETE stage with the query level collation. + expectedBucketQuery: { + $and: [ + {meta: {$eq: "A"}}, + closedBucketFilter, + ] + }, + expectedDeleteStage: "DELETE" }); -})(); -})(); +})(); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_delete_compressed_buckets.js b/jstests/core/timeseries/timeseries_delete_compressed_buckets.js index 0b290d9240113..d159e70cf8d23 100644 --- a/jstests/core/timeseries/timeseries_delete_compressed_buckets.js +++ b/jstests/core/timeseries/timeseries_delete_compressed_buckets.js @@ -10,8 +10,13 @@ * ] */ -(function() { -"use strict"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + +// TODO SERVER-77454: Investigate re-enabling this. +if (FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) { + jsTestLog("Skipping test as the always use compressed buckets feature is enabled"); + quit(); +} const timeFieldName = "time"; const metaFieldName = "tag"; @@ -74,5 +79,4 @@ if (FeatureFlagUtil.isPresentAndEnabled(db, "UpdateOneWithoutShardKey")) { assert.eq(coll.countDocuments({f: {$lt: 100}}), 100 - 50 - 1, // 100 records to start + 50 deleted above + 1 more deleted "Expected records matching the filter to be deleted."); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_delete_hint.js b/jstests/core/timeseries/timeseries_delete_hint.js index 187173f48d412..0e8b47216047c 100644 --- a/jstests/core/timeseries/timeseries_delete_hint.js +++ b/jstests/core/timeseries/timeseries_delete_hint.js @@ -17,11 +17,7 @@ * uses_parallel_shell, * ] */ -(function() { -"use strict"; - load("jstests/libs/curop_helpers.js"); -load("jstests/libs/feature_flag_util.js"); load('jstests/libs/parallel_shell_helpers.js'); const timeFieldName = "time"; @@ -68,7 +64,7 @@ const validateDeleteIndex = (docsToInsert, : assert.commandWorked( testDB.runCommand({delete: coll.getName(), deletes: deleteQuery})); assert.eq(res["n"], expectedNRemoved); - assert.docEq(expectedRemainingDocs, coll.find({}, {_id: 0}).toArray()); + assert.sameMembers(expectedRemainingDocs, coll.find({}, {_id: 0}).toArray()); assert(coll.drop()); }, docsToInsert, @@ -207,5 +203,4 @@ validateDeleteIndex([objA, objB, objC], [{q: {[metaFieldName]: {c: "C"}}, limit: 0, hint: {"test_hint": 1}}], [{[metaFieldName]: -1}, {[timeFieldName]: 1}], "IXSCAN { control.min.time: 1, control.max.time: 1 }", - {expectedErrorCode: ErrorCodes.BadValue}); -})(); + {expectedErrorCode: ErrorCodes.BadValue}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_delete_one.js b/jstests/core/timeseries/timeseries_delete_one.js index 39ad152a7d215..deeab37afb791 100644 --- a/jstests/core/timeseries/timeseries_delete_one.js +++ b/jstests/core/timeseries/timeseries_delete_one.js @@ -4,97 +4,24 @@ * @tags: [ * # We need a timeseries collection. * requires_timeseries, - * featureFlagUpdateOneWithoutShardKey, + * requires_fcv_71 * ] */ -(function() { -"use strict"; - -const timeFieldName = "time"; -const metaFieldName = "tag"; -const dateTime = ISODate("2021-07-12T16:00:00Z"); -const collNamePrefix = "timeseries_delete_one_"; -let testCaseId = 0; - -const testDB = db.getSiblingDB(jsTestName()); -assert.commandWorked(testDB.dropDatabase()); - -/** - * Confirms that a deleteOne() returns the expected set of documents. - */ -function testDeleteOne({initialDocList, filter, expectedResultDocs, nDeleted}) { - const coll = testDB.getCollection(collNamePrefix + testCaseId++); - assert.commandWorked(testDB.createCollection( - coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); - - assert.commandWorked(coll.insert(initialDocList)); - - const res = assert.commandWorked(coll.deleteOne(filter)); - assert.eq(nDeleted, res.deletedCount); - - const resultDocs = coll.find().toArray(); - assert.eq(resultDocs.length, initialDocList.length - nDeleted, tojson(resultDocs)); - - // Validate the collection's exact contents if we were given the expected results. We may skip - // this step in some cases, if the delete doesn't pinpoint a specific document. - if (expectedResultDocs) { - assert.eq(expectedResultDocs.length, resultDocs.length, resultDocs); - expectedResultDocs.forEach(expectedDoc => { - assert.docEq( - expectedDoc, - coll.findOne({_id: expectedDoc._id}), - `Expected document (_id = ${expectedDoc._id}) not found in result collection: ${ - tojson(resultDocs)}`); - }); - } -} - -const doc1_a_nofields = { - _id: 1, - [timeFieldName]: dateTime, - [metaFieldName]: "A", -}; -const doc2_a_f101 = { - _id: 2, - [timeFieldName]: dateTime, - [metaFieldName]: "A", - f: 101 -}; -const doc3_a_f102 = { - _id: 3, - [timeFieldName]: dateTime, - [metaFieldName]: "A", - f: 102 -}; -const doc4_b_f103 = { - _id: 4, - [timeFieldName]: dateTime, - [metaFieldName]: "B", - f: 103 -}; -const doc5_b_f104 = { - _id: 5, - [timeFieldName]: dateTime, - [metaFieldName]: "B", - f: 104 -}; -const doc6_c_f105 = { - _id: 6, - [timeFieldName]: dateTime, - [metaFieldName]: "C", - f: 105 -}; -const doc7_c_f106 = { - _id: 7, - [timeFieldName]: dateTime, - [metaFieldName]: "C", - f: 106, -}; +import { + doc1_a_nofields, + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc5_b_f104, + doc6_c_f105, + doc7_c_f106, + metaFieldName, + testDeleteOne +} from "jstests/core/timeseries/libs/timeseries_writes_util.js"; // Query on the 'f' field leads to zero measurement delete. (function testZeroMeasurementDelete() { - jsTestLog("Running testZeroMeasurementDelete()"); testDeleteOne({ initialDocList: [doc1_a_nofields, doc4_b_f103, doc6_c_f105], filter: {f: 17}, @@ -105,7 +32,6 @@ const doc7_c_f106 = { // Query on the 'f' field leads to a partial bucket delete. (function testPartialBucketDelete() { - jsTestLog("Running testPartialBucketDelete()"); testDeleteOne({ initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102], filter: {f: 101}, @@ -116,7 +42,6 @@ const doc7_c_f106 = { // Query on the 'f' field leads to a full (single document) bucket delete. (function testFullBucketDelete() { - jsTestLog("Running testFullBucketDelete()"); testDeleteOne({ initialDocList: [doc2_a_f101], filter: {f: 101}, @@ -127,7 +52,6 @@ const doc7_c_f106 = { // Query on the 'tag' field matches all docs and deletes one. (function testMatchFullBucketOnlyDeletesOne() { - jsTestLog("Running testMatchFullBucketOnlyDeletesOne()"); testDeleteOne({ initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102], filter: {[metaFieldName]: "A"}, @@ -138,7 +62,6 @@ const doc7_c_f106 = { // Query on the 'tag' and metric field. (function testMetaAndMetricFilterOnlyDeletesOne() { - jsTestLog("Running testMetaAndMetricFilterOnlyDeletesOne()"); testDeleteOne({ initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102], filter: {[metaFieldName]: "A", f: {$gt: 100}}, @@ -149,7 +72,6 @@ const doc7_c_f106 = { // Query on the 'f' field matches docs in multiple buckets but only deletes from one. (function testMatchMultiBucketOnlyDeletesOne() { - jsTestLog("Running testMatchMultiBucketOnlyDeletesOne()"); testDeleteOne({ initialDocList: [ doc1_a_nofields, @@ -168,7 +90,6 @@ const doc7_c_f106 = { // Empty filter matches all docs but only deletes one. (function testEmptyFilterOnlyDeletesOne() { - jsTestLog("Running testEmptyFilterOnlyDeletesOne()"); testDeleteOne({ initialDocList: [ doc1_a_nofields, @@ -184,4 +105,3 @@ const doc7_c_f106 = { nDeleted: 1, }); })(); -})(); diff --git a/jstests/core/timeseries/timeseries_delete_with_meta.js b/jstests/core/timeseries/timeseries_delete_with_meta.js index 4f3becabcceca..f376babcb1535 100644 --- a/jstests/core/timeseries/timeseries_delete_with_meta.js +++ b/jstests/core/timeseries/timeseries_delete_with_meta.js @@ -10,19 +10,10 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. -load("jstests/libs/analyze_plan.js"); // For planHasStage(). -load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'. - -if (FixtureHelpers.isMongos(db) && - TimeseriesTest.shardedtimeseriesCollectionsEnabled(db.getMongo()) && - !TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(db.getMongo())) { - jsTestLog("Skipping test because the sharded time-series feature flag is disabled"); - return; -} +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {planHasStage} from "jstests/libs/analyze_plan.js"; +load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'. +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const testDB = db.getSiblingDB(jsTestName()); assert.commandWorked(testDB.dropDatabase()); @@ -181,4 +172,3 @@ TimeseriesTest.run((insert) => { includeMetaField: false }); }); -})(); diff --git a/jstests/core/timeseries/timeseries_delete_with_meta_concurrent.js b/jstests/core/timeseries/timeseries_delete_with_meta_concurrent.js index f5d752d80a463..7f8ff3c95eeb5 100644 --- a/jstests/core/timeseries/timeseries_delete_with_meta_concurrent.js +++ b/jstests/core/timeseries/timeseries_delete_with_meta_concurrent.js @@ -20,10 +20,6 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); load("jstests/libs/curop_helpers.js"); load('jstests/libs/parallel_shell_helpers.js'); @@ -114,5 +110,4 @@ validateDeleteIndex([objA], validateDeleteIndex([objA], [{q: {[metaFieldName]: {a: "A"}}, limit: 0}], ErrorCodes.NamespaceNotFound, - testCases.REPLACE_COLLECTION); -})(); + testCases.REPLACE_COLLECTION); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_explain_delete.js b/jstests/core/timeseries/timeseries_explain_delete.js index bbee36a45ab37..1021ef77811cb 100644 --- a/jstests/core/timeseries/timeseries_explain_delete.js +++ b/jstests/core/timeseries/timeseries_explain_delete.js @@ -5,16 +5,14 @@ * # We need a timeseries collection. * requires_timeseries, * # To avoid multiversion tests - * requires_fcv_70, + * requires_fcv_71, * # To avoid burn-in tests in in-memory build variants * requires_persistence, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getPlanStage() and getExecutionStages(). +import {getExecutionStages, getPlanStage} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const timeFieldName = "time"; const metaFieldName = "tag"; @@ -31,18 +29,21 @@ const docs = [ {_id: 3, [timeFieldName]: dateTime, [metaFieldName]: 2}, {_id: 4, [timeFieldName]: dateTime, [metaFieldName]: 2}, ]; +const closedBucketFilter = { + "control.closed": {$not: {$eq: true}} +}; function testDeleteExplain({ singleDeleteOp, expectedDeleteStageName, - expectedOpType, + expectedOpType = null, expectedBucketFilter, - expectedResidualFilter, + expectedResidualFilter = null, expectedNumDeleted, - expectedNumUnpacked, + expectedNumUnpacked = null, expectedUsedIndexName = null }) { - assert(expectedDeleteStageName === "TS_MODIFY" || expectedDeleteStageName === "BATCHED_DELETE"); + assert(expectedDeleteStageName === "TS_MODIFY" || expectedDeleteStageName === "DELETE"); // Prepares a timeseries collection. const coll = testDB.getCollection(collNamePrefix + testCaseId++); @@ -62,7 +63,6 @@ function testDeleteExplain({ const innerDeleteCommand = {delete: coll.getName(), deletes: [singleDeleteOp]}; const deleteExplainPlanCommand = {explain: innerDeleteCommand, verbosity: "queryPlanner"}; let explain = assert.commandWorked(testDB.runCommand(deleteExplainPlanCommand)); - jsTestLog(tojson(explain)); const deleteStage = getPlanStage(explain.queryPlanner.winningPlan, expectedDeleteStageName); assert.neq(null, deleteStage, @@ -71,25 +71,22 @@ function testDeleteExplain({ assert.eq(expectedOpType, deleteStage.opType, `TS_MODIFY opType is wrong: ${tojson(deleteStage)}`); - - if (Object.keys(expectedBucketFilter).length) { - expectedBucketFilter = { - "$and": [expectedBucketFilter, {"control.closed": {$not: {$eq: true}}}] - }; - } else { - expectedBucketFilter = {"control.closed": {$not: {$eq: true}}}; - } assert.eq(expectedBucketFilter, deleteStage.bucketFilter, `TS_MODIFY bucketFilter is wrong: ${tojson(deleteStage)}`); assert.eq(expectedResidualFilter, deleteStage.residualFilter, `TS_MODIFY residualFilter is wrong: ${tojson(deleteStage)}`); + } else { + const collScanStage = getPlanStage(explain.queryPlanner.winningPlan, "COLLSCAN"); + assert.neq(null, collScanStage, `COLLSCAN stage not found in the plan: ${tojson(explain)}`); + assert.eq(expectedBucketFilter, + collScanStage.filter, + `COLLSCAN filter is wrong: ${tojson(collScanStage)}`); } if (expectedUsedIndexName) { const ixscanStage = getPlanStage(explain.queryPlanner.winningPlan, "IXSCAN"); - jsTestLog(tojson(ixscanStage)); assert.eq(expectedUsedIndexName, ixscanStage.indexName, `Wrong index used: ${tojson(ixscanStage)}`); @@ -98,7 +95,6 @@ function testDeleteExplain({ // Verifies the TS_MODIFY stage in the execution stats. const deleteExplainStatsCommand = {explain: innerDeleteCommand, verbosity: "executionStats"}; explain = assert.commandWorked(testDB.runCommand(deleteExplainStatsCommand)); - jsTestLog(tojson(explain)); const execStages = getExecutionStages(explain); assert.gt(execStages.length, 0, `No execution stages found: ${tojson(explain)}`); assert.eq(expectedDeleteStageName, @@ -127,12 +123,11 @@ function testDeleteExplain({ q: {}, limit: 0, }, - // If the delete query is empty, we should use the BATCHED_DELETE plan. - expectedDeleteStageName: "BATCHED_DELETE", + // If the delete query is empty, we should use the DELETE plan. + expectedDeleteStageName: "DELETE", expectedOpType: "deleteMany", - expectedBucketFilter: {}, - expectedResidualFilter: {}, - expectedNumDeleted: 2, + expectedBucketFilter: closedBucketFilter, + expectedNumDeleted: 4, }); })(); @@ -146,94 +141,137 @@ function testDeleteExplain({ }, expectedDeleteStageName: "TS_MODIFY", expectedOpType: "deleteMany", - // The bucket filter is the one with metaFieldName translated to 'meta'. - // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 3" - expectedBucketFilter: {meta: {$eq: 2}}, + expectedBucketFilter: { + $and: + [closedBucketFilter, {meta: {$eq: 2}}, {"control.max._id": {$_internalExprGte: 3}}] + }, expectedResidualFilter: {_id: {$gte: 3}}, expectedNumDeleted: 2, expectedNumUnpacked: 1 }); })(); -(function testDeleteManyWithBucketFilterAndIndexHint() { +(function testDeleteManyWithBucketMetricFilterOnly() { testDeleteExplain({ singleDeleteOp: { // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so // 'expectedNumUnpacked' is exactly 1. - q: {[metaFieldName]: 2, _id: {$gte: 3}}, + q: {_id: {$lte: 3}}, limit: 0, - hint: {[metaFieldName]: 1} }, expectedDeleteStageName: "TS_MODIFY", expectedOpType: "deleteMany", - // The bucket filter is the one with metaFieldName translated to 'meta'. - // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 3" - expectedBucketFilter: {meta: {$eq: 2}}, - expectedResidualFilter: {_id: {$gte: 3}}, - expectedNumDeleted: 2, - expectedNumUnpacked: 1, - expectedUsedIndexName: metaFieldName + "_1" - }); -})(); - -// TODO SERVER-75518: Enable following three test cases. -/* -(function testDeleteOneWithEmptyBucketFilter() { - testDeleteExplain({ - singleDeleteOp: { - // The non-meta field filter leads to a COLLSCAN below the TS_MODIFY stage and so - // 'expectedNumUnpacked' is 2. - q: {_id: 3}, - limit: 1, - }, - expectedDeleteStageName: "TS_MODIFY", - expectedOpType: "deleteOne", - // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 3" - expectedBucketFilter: {}, - expectedResidualFilter: {_id: {$eq: 3}}, - expectedNumDeleted: 1, + expectedBucketFilter: + {$and: [closedBucketFilter, {"control.min._id": {$_internalExprLte: 3}}]}, + expectedResidualFilter: {_id: {$lte: 3}}, + expectedNumDeleted: 3, expectedNumUnpacked: 2 }); })(); -(function testDeleteOneWithBucketFilter() { - testDeleteExplain({ - singleDeleteOp: { - // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so - // 'expectedNumUnpacked' is exactly 1. - q: {[metaFieldName]: 2, _id: {$gte: 1}}, - limit: 1, - }, - expectedDeleteStageName: "TS_MODIFY", - expectedOpType: "deleteOne", - // The bucket filter is the one with metaFieldName translated to 'meta'. - // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 2" - expectedBucketFilter: {meta: {$eq: 2}}, - expectedResidualFilter: {_id: {$gte: 1}}, - expectedNumDeleted: 1, - expectedNumUnpacked: 1 - }); -})(); - -(function testDeleteOneWithBucketFilterAndIndexHint() { +(function testDeleteManyWithBucketFilterAndIndexHint() { testDeleteExplain({ singleDeleteOp: { // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so // 'expectedNumUnpacked' is exactly 1. - q: {[metaFieldName]: 2, _id: {$gte: 1}}, - limit: 1, + q: {[metaFieldName]: 2, _id: 3}, + limit: 0, hint: {[metaFieldName]: 1} }, expectedDeleteStageName: "TS_MODIFY", - expectedOpType: "deleteOne", - // The bucket filter is the one with metaFieldName translated to 'meta'. - // TODO SERVER-75424: The bucket filter should be further optimized to "control.min._id: 3" - expectedBucketFilter: {meta: {$eq: 2}}, - expectedResidualFilter: {_id: {$gte: 1}}, + expectedOpType: "deleteMany", + expectedBucketFilter: { + $and: [ + closedBucketFilter, + {meta: {$eq: 2}}, + { + $and: [ + {"control.min._id": {$_internalExprLte: 3}}, + {"control.max._id": {$_internalExprGte: 3}} + ] + } + ] + }, + expectedResidualFilter: {_id: {$eq: 3}}, expectedNumDeleted: 1, expectedNumUnpacked: 1, expectedUsedIndexName: metaFieldName + "_1" }); })(); -*/ -})(); + +if (FeatureFlagUtil.isPresentAndEnabled(db, "UpdateOneWithoutShardKey")) { + (function testDeleteOneWithEmptyBucketFilter() { + testDeleteExplain({ + singleDeleteOp: { + // The non-meta field filter leads to a COLLSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is 2. + q: {_id: 3}, + limit: 1, + }, + expectedDeleteStageName: "TS_MODIFY", + expectedOpType: "deleteOne", + expectedBucketFilter: { + $and: [ + closedBucketFilter, + { + $and: [ + {"control.min._id": {$_internalExprLte: 3}}, + {"control.max._id": {$_internalExprGte: 3}} + ] + } + ] + }, + expectedResidualFilter: {_id: {$eq: 3}}, + expectedNumDeleted: 1, + expectedNumUnpacked: 1 + }); + })(); + + (function testDeleteOneWithBucketFilter() { + testDeleteExplain({ + singleDeleteOp: { + // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is exactly 1. + q: {[metaFieldName]: 2, _id: {$gte: 1}}, + limit: 1, + }, + expectedDeleteStageName: "TS_MODIFY", + expectedOpType: "deleteOne", + expectedBucketFilter: { + $and: [ + closedBucketFilter, + {meta: {$eq: 2}}, + {"control.max._id": {$_internalExprGte: 1}} + ] + }, + expectedResidualFilter: {_id: {$gte: 1}}, + expectedNumDeleted: 1, + expectedNumUnpacked: 1 + }); + })(); + + (function testDeleteOneWithBucketFilterAndIndexHint() { + testDeleteExplain({ + singleDeleteOp: { + // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is exactly 1. + q: {[metaFieldName]: 2, _id: {$gte: 1}}, + limit: 1, + hint: {[metaFieldName]: 1} + }, + expectedDeleteStageName: "TS_MODIFY", + expectedOpType: "deleteOne", + expectedBucketFilter: { + $and: [ + closedBucketFilter, + {meta: {$eq: 2}}, + {"control.max._id": {$_internalExprGte: 1}} + ] + }, + expectedResidualFilter: {_id: {$gte: 1}}, + expectedNumDeleted: 1, + expectedNumUnpacked: 1, + expectedUsedIndexName: metaFieldName + "_1" + }); + })(); +} diff --git a/jstests/core/timeseries/timeseries_explain_update.js b/jstests/core/timeseries/timeseries_explain_update.js new file mode 100644 index 0000000000000..2ec666996c52c --- /dev/null +++ b/jstests/core/timeseries/timeseries_explain_update.js @@ -0,0 +1,293 @@ +/** + * Tests whether the explain works for a single update operation on a timeseries collection. + * + * @tags: [ + * # We need a timeseries collection. + * requires_timeseries, + * # To avoid multiversion tests + * requires_fcv_71, + * # To avoid burn-in tests in in-memory build variants + * requires_persistence, + * # TODO SERVER-66393 Remove this tag. + * featureFlagTimeseriesUpdatesSupport, + * # TODO SERVER-73726 Remove this tag. + * assumes_unsharded_collection, + * ] + */ + +import { + getCallerName, + getTestDB, + makeBucketFilter, + metaFieldName, + prepareCollection, + timeFieldName +} from "jstests/core/timeseries/libs/timeseries_writes_util.js"; +import {getExecutionStages, getPlanStage} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + +const dateTime = ISODate("2021-07-12T16:00:00Z"); + +const testDB = getTestDB(); + +const docs = [ + {_id: 1, [timeFieldName]: dateTime, [metaFieldName]: 1}, + {_id: 2, [timeFieldName]: dateTime, [metaFieldName]: 1}, + {_id: 3, [timeFieldName]: dateTime, [metaFieldName]: 2}, + {_id: 4, [timeFieldName]: dateTime, [metaFieldName]: 2}, +]; + +function testUpdateExplain({ + singleUpdateOp, + expectedUpdateStageName, + expectedOpType = null, + expectedBucketFilter, + expectedResidualFilter = null, + expectedNumUpdated, + expectedNumMatched = expectedNumUpdated, + expectedNumUpserted = 0, + expectedNumUnpacked = null, + expectedUsedIndexName = null +}) { + assert(expectedUpdateStageName === "TS_MODIFY" || expectedUpdateStageName === "UPDATE"); + + // Prepares a timeseries collection. + const collName = getCallerName(); + const coll = testDB.getCollection(collName); + prepareCollection({collName, initialDocList: docs}); + + // Creates an index same as the one in the hint so as to verify that the index hint is honored. + if (singleUpdateOp.hasOwnProperty("hint")) { + assert.commandWorked(coll.createIndex(singleUpdateOp.hint)); + } + + // Verifies the TS_MODIFY stage in the plan. + const innerUpdateCommand = {update: coll.getName(), updates: [singleUpdateOp]}; + const updateExplainPlanCommand = {explain: innerUpdateCommand, verbosity: "queryPlanner"}; + let explain = assert.commandWorked(testDB.runCommand(updateExplainPlanCommand)); + const updateStage = getPlanStage(explain.queryPlanner.winningPlan, expectedUpdateStageName); + assert.neq(null, + updateStage, + `${expectedUpdateStageName} stage not found in the plan: ${tojson(explain)}`); + if (expectedUpdateStageName === "TS_MODIFY") { + assert.eq(expectedOpType, + updateStage.opType, + `TS_MODIFY opType is wrong: ${tojson(updateStage)}`); + assert.eq(expectedBucketFilter, + updateStage.bucketFilter, + `TS_MODIFY bucketFilter is wrong: ${tojson(updateStage)}`); + assert.eq(expectedResidualFilter, + updateStage.residualFilter, + `TS_MODIFY residualFilter is wrong: ${tojson(updateStage)}`); + } else { + const collScanStage = getPlanStage(explain.queryPlanner.winningPlan, "COLLSCAN"); + assert.neq(null, collScanStage, `COLLSCAN stage not found in the plan: ${tojson(explain)}`); + assert.eq(expectedBucketFilter, + collScanStage.filter, + `COLLSCAN filter is wrong: ${tojson(collScanStage)}`); + } + + if (expectedUsedIndexName) { + const ixscanStage = getPlanStage(explain.queryPlanner.winningPlan, "IXSCAN"); + assert.eq(expectedUsedIndexName, + ixscanStage.indexName, + `Wrong index used: ${tojson(ixscanStage)}`); + } + + // Verifies the TS_MODIFY stage in the execution stats. + const updateExplainStatsCommand = {explain: innerUpdateCommand, verbosity: "executionStats"}; + explain = assert.commandWorked(testDB.runCommand(updateExplainStatsCommand)); + const execStages = getExecutionStages(explain); + assert.gt(execStages.length, 0, `No execution stages found: ${tojson(explain)}`); + assert.eq(expectedUpdateStageName, + execStages[0].stage, + `TS_MODIFY stage not found in executionStages: ${tojson(explain)}`); + if (expectedUpdateStageName === "TS_MODIFY") { + assert.eq(expectedNumUpdated, + execStages[0].nMeasurementsUpdated, + `Got wrong nMeasurementsUpdated: ${tojson(execStages[0])}`); + assert.eq(expectedNumMatched, + execStages[0].nMeasurementsMatched, + `Got wrong nMeasurementsMatched: ${tojson(execStages[0])}`); + assert.eq(expectedNumUpserted, + execStages[0].nMeasurementsUpserted, + `Got wrong nMeasurementsUpserted: ${tojson(execStages[0])}`); + assert.eq(expectedNumUnpacked, + execStages[0].nBucketsUnpacked, + `Got wrong nBucketsUnpacked: ${tojson(execStages[0])}`); + } else { + assert.eq(expectedNumUpdated, + execStages[0].nWouldModify, + `Got wrong nWouldModify: ${tojson(execStages[0])}`); + assert.eq(expectedNumMatched, + execStages[0].nMatched, + `Got wrong nMatched: ${tojson(execStages[0])}`); + } + + assert.sameMembers( + docs, coll.find().toArray(), "Explain command must not touch documents in the collection"); +} + +(function testUpdateManyWithEmptyQuery() { + testUpdateExplain({ + singleUpdateOp: { + q: {}, + u: {$set: {[metaFieldName]: 3}}, + multi: true, + }, + // If the update query is empty, we should use the UPDATE plan. + expectedUpdateStageName: "UPDATE", + expectedOpType: "updateMany", + expectedBucketFilter: makeBucketFilter(), + expectedNumUpdated: 4, + }); +})(); + +(function testUpdateManyWithBucketFilter() { + testUpdateExplain({ + singleUpdateOp: { + // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is exactly 1. + q: {[metaFieldName]: 2, _id: {$gte: 3}}, + u: {$set: {[metaFieldName]: 2}}, + multi: true, + }, + expectedUpdateStageName: "TS_MODIFY", + expectedOpType: "updateMany", + expectedBucketFilter: + makeBucketFilter({meta: {$eq: 2}}, {"control.max._id": {$_internalExprGte: 3}}), + expectedResidualFilter: {_id: {$gte: 3}}, + expectedNumUpdated: 0, + expectedNumMatched: 2, + expectedNumUnpacked: 1 + }); +})(); + +(function testUpdateManyWithBucketFilterAndIndexHint() { + testUpdateExplain({ + singleUpdateOp: { + // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is exactly 1. + q: {[metaFieldName]: 2, _id: 3}, + u: {$set: {[metaFieldName]: 3}}, + multi: true, + hint: {[metaFieldName]: 1} + }, + expectedUpdateStageName: "TS_MODIFY", + expectedOpType: "updateMany", + expectedBucketFilter: makeBucketFilter({meta: {$eq: 2}}, { + $and: [ + {"control.min._id": {$_internalExprLte: 3}}, + {"control.max._id": {$_internalExprGte: 3}} + ] + }), + expectedResidualFilter: {_id: {$eq: 3}}, + expectedNumUpdated: 1, + expectedNumUnpacked: 1, + expectedUsedIndexName: metaFieldName + "_1" + }); +})(); + +(function testUpsert() { + testUpdateExplain({ + singleUpdateOp: { + q: {[metaFieldName]: 100}, + u: {$set: {[timeFieldName]: dateTime}}, + multi: true, + upsert: true, + }, + expectedUpdateStageName: "TS_MODIFY", + expectedOpType: "updateMany", + expectedBucketFilter: makeBucketFilter({meta: {$eq: 100}}), + expectedResidualFilter: {}, + expectedNumUpdated: 0, + expectedNumMatched: 0, + expectedNumUnpacked: 0, + expectedNumUpserted: 1, + }); +})(); + +(function testUpsertNoop() { + testUpdateExplain({ + singleUpdateOp: { + q: {[metaFieldName]: 1}, + u: {$set: {f: 10}}, + multi: true, + upsert: true, + }, + expectedUpdateStageName: "TS_MODIFY", + expectedOpType: "updateMany", + expectedBucketFilter: makeBucketFilter({meta: {$eq: 1}}), + expectedResidualFilter: {}, + expectedNumUpdated: 2, + expectedNumMatched: 2, + expectedNumUnpacked: 1, + expectedNumUpserted: 0, + }); +})(); + +// TODO SERVER-73726 Reevaluate whether this exclusion is needed. +if (FeatureFlagUtil.isPresentAndEnabled(db, "UpdateOneWithoutShardKey")) { + (function testUpdateOneWithEmptyBucketFilter() { + testUpdateExplain({ + singleUpdateOp: { + // The non-meta field filter leads to a COLLSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is 2. + q: {_id: 3}, + u: {$set: {[metaFieldName]: 3}}, + multi: false, + }, + expectedUpdateStageName: "TS_MODIFY", + expectedOpType: "updateOne", + expectedBucketFilter: makeBucketFilter({ + $and: [ + {"control.min._id": {$_internalExprLte: 3}}, + {"control.max._id": {$_internalExprGte: 3}} + ] + }), + expectedResidualFilter: {_id: {$eq: 3}}, + expectedNumUpdated: 1, + expectedNumUnpacked: 1 + }); + })(); + + (function testUpdateOneWithBucketFilter() { + testUpdateExplain({ + singleUpdateOp: { + // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is exactly 1. + q: {[metaFieldName]: 2, _id: {$gte: 1}}, + u: {$set: {[metaFieldName]: 3}}, + multi: false, + }, + expectedUpdateStageName: "TS_MODIFY", + expectedOpType: "updateOne", + expectedBucketFilter: + makeBucketFilter({meta: {$eq: 2}}, {"control.max._id": {$_internalExprGte: 1}}), + expectedResidualFilter: {_id: {$gte: 1}}, + expectedNumUpdated: 1, + expectedNumUnpacked: 1 + }); + })(); + + (function testUpdateOneWithBucketFilterAndIndexHint() { + testUpdateExplain({ + singleUpdateOp: { + // The meta field filter leads to a FETCH/IXSCAN below the TS_MODIFY stage and so + // 'expectedNumUnpacked' is exactly 1. + q: {[metaFieldName]: 2, _id: {$gte: 1}}, + u: {$set: {[metaFieldName]: 3}}, + multi: false, + hint: {[metaFieldName]: 1} + }, + expectedUpdateStageName: "TS_MODIFY", + expectedOpType: "updateOne", + expectedBucketFilter: + makeBucketFilter({meta: {$eq: 2}}, {"control.max._id": {$_internalExprGte: 1}}), + expectedResidualFilter: {_id: {$gte: 1}}, + expectedNumUpdated: 1, + expectedNumUnpacked: 1, + expectedUsedIndexName: metaFieldName + "_1" + }); + })(); +} diff --git a/jstests/core/timeseries/timeseries_field_parsed_as_bson.js b/jstests/core/timeseries/timeseries_field_parsed_as_bson.js new file mode 100644 index 0000000000000..6e4d582825bc0 --- /dev/null +++ b/jstests/core/timeseries/timeseries_field_parsed_as_bson.js @@ -0,0 +1,68 @@ +/** + * Tests that timeseries timeField is parsed as bson. + * + * @tags: [ + * # We need a timeseries collection. + * requires_timeseries, + * requires_fcv_71, + * ] + */ + +(function() { +'use strict'; + +const collName = "timeseries_field_parsed_as_bson"; +const coll = db.getCollection(collName); + +coll.drop(); +const timeField = "badInput']}}}}}}"; +assert.commandWorked(db.createCollection(collName, {timeseries: {timeField: timeField}})); + +const timeseriesCollInfo = db.getCollectionInfos({name: "system.buckets." + collName})[0]; +jsTestLog("Timeseries system collection info: " + tojson(timeseriesCollInfo)); +const properties = {}; +properties[timeField] = { + "bsonType": "date" +}; +const expectedValidator = { + "$jsonSchema": { + "bsonType": "object", + "required": ["_id", "control", "data"], + "properties": { + "_id": {"bsonType": "objectId"}, + "control": { + "bsonType": "object", + "required": ["version", "min", "max"], + "properties": { + "version": {"bsonType": "number"}, + "min": + {"bsonType": "object", "required": [timeField], "properties": properties}, + "max": + {"bsonType": "object", "required": [timeField], "properties": properties}, + "closed": {"bsonType": "bool"}, + "count": {"bsonType": "number", "minimum": 1} + }, + "additionalProperties": false + }, + "data": {"bsonType": "object"}, + "meta": {} + }, + "additionalProperties": false + } +}; + +assert(timeseriesCollInfo.options); +assert.eq(timeseriesCollInfo.options.validator, expectedValidator); + +const doc = { + a: 1, + [timeField]: new Date("2021-01-01") +}; +assert.commandWorked(coll.insert(doc)); +assert.docEq([doc], coll.aggregate([{$match: {}}, {$project: {_id: 0}}]).toArray()); + +coll.drop(); +assert.commandWorked(db.createCollection(collName, {timeseries: {timeField: "\\"}})); +coll.drop(); +assert.commandWorked(db.createCollection(collName, {timeseries: {timeField: "\\\\"}})); +})(); diff --git a/jstests/core/timeseries/timeseries_find_and_modify_remove.js b/jstests/core/timeseries/timeseries_find_and_modify_remove.js new file mode 100644 index 0000000000000..6d334b2214a2e --- /dev/null +++ b/jstests/core/timeseries/timeseries_find_and_modify_remove.js @@ -0,0 +1,223 @@ +/** + * Tests findAndModify with remove: true on a timeseries collection. + * + * @tags: [ + * # We need a timeseries collection. + * requires_timeseries, + * # findAndModify with remove: true on a timeseries collection is supported since 7.1 + * requires_fcv_71, + * # TODO SERVER-76583: Remove following two tags. + * does_not_support_retryable_writes, + * requires_non_retryable_writes, + * ] + */ + +import { + doc1_a_nofields, + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc5_b_f104, + doc6_c_f105, + doc7_c_f106, + makeBucketFilter, + metaFieldName, + testFindOneAndRemove, + timeFieldName +} from "jstests/core/timeseries/libs/timeseries_writes_util.js"; + +// findAndModify with a sort option is not supported. +(function testSortOptionFails() { + testFindOneAndRemove({ + initialDocList: [doc1_a_nofields, doc4_b_f103, doc6_c_f105], + cmd: {filter: {f: {$gt: 100}}, sort: {f: 1}}, + res: {errorCode: ErrorCodes.InvalidOptions}, + }); +})(); + +// Query on the 'f' field leads to zero measurement delete. +(function testZeroMeasurementDelete() { + testFindOneAndRemove({ + initialDocList: [doc1_a_nofields, doc4_b_f103, doc6_c_f105], + cmd: {filter: {f: 17}}, + res: { + expectedDocList: [doc1_a_nofields, doc4_b_f103, doc6_c_f105], + nDeleted: 0, + bucketFilter: makeBucketFilter({ + $and: [ + {"control.min.f": {$_internalExprLte: 17}}, + {"control.max.f": {$_internalExprGte: 17}}, + ] + }), + residualFilter: {f: {$eq: 17}}, + nBucketsUnpacked: 0, + nReturned: 0, + }, + }); +})(); + +// Query on the 'f' field leads to a partial bucket delete. +(function testPartialBucketDelete() { + testFindOneAndRemove({ + initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102], + cmd: {filter: {f: 101}}, + res: + {expectedDocList: [doc1_a_nofields, doc3_a_f102], nDeleted: 1, deletedDoc: doc2_a_f101}, + }); +})(); + +// Query on the 'f' field leads to a partial bucket delete and 'fields' project the returned doc. +(function testPartialBucketDeleteWithFields() { + testFindOneAndRemove({ + initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102], + cmd: {filter: {f: 102}, fields: {f: 1, [metaFieldName]: 1, _id: 0}}, + res: { + expectedDocList: [doc1_a_nofields, doc2_a_f101], + nDeleted: 1, + deletedDoc: {f: 102, [metaFieldName]: "A"}, + rootStage: "PROJECTION_DEFAULT", + bucketFilter: makeBucketFilter({ + $and: [ + {"control.min.f": {$_internalExprLte: 102}}, + {"control.max.f": {$_internalExprGte: 102}}, + ] + }), + residualFilter: {f: {$eq: 102}}, + nBucketsUnpacked: 1, + nReturned: 1, + }, + }); +})(); + +// Query on the 'f' field leads to a full (single document) bucket delete. +(function testFullBucketDelete() { + testFindOneAndRemove({ + initialDocList: [doc2_a_f101], + cmd: {filter: {f: 101}}, + res: { + expectedDocList: [], + nDeleted: 1, + deletedDoc: doc2_a_f101, + bucketFilter: makeBucketFilter({ + $and: [ + {"control.min.f": {$_internalExprLte: 101}}, + {"control.max.f": {$_internalExprGte: 101}}, + ] + }), + residualFilter: {f: {$eq: 101}}, + nBucketsUnpacked: 1, + nReturned: 1, + }, + }); +})(); + +// Query on the 'tag' field matches all docs and deletes one. +(function testMatchFullBucketOnlyDeletesOne() { + testFindOneAndRemove({ + initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102], + cmd: {filter: {[metaFieldName]: "A"}}, + // Don't validate exact results as we could delete any doc. + res: { + nDeleted: 1, + bucketFilter: makeBucketFilter({meta: {$eq: "A"}}), + residualFilter: {}, + nBucketsUnpacked: 1, + nReturned: 1, + }, + }); +})(); + +// Query on the 'tag' and metric field. +(function testMetaAndMetricFilterOnlyDeletesOne() { + testFindOneAndRemove({ + initialDocList: [doc1_a_nofields, doc2_a_f101, doc3_a_f102], + cmd: {filter: {[metaFieldName]: "A", f: {$gt: 101}}}, + res: { + nDeleted: 1, + deletedDoc: doc3_a_f102, + bucketFilter: + makeBucketFilter({meta: {$eq: "A"}}, {"control.max.f": {$_internalExprGt: 101}}), + residualFilter: {f: {$gt: 101}}, + nBucketsUnpacked: 1, + nReturned: 1, + } + }); +})(); + +// Query on the 'f' field matches docs in multiple buckets but only deletes from one. +(function testMatchMultiBucketOnlyDeletesOne() { + testFindOneAndRemove({ + initialDocList: [ + doc1_a_nofields, + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc5_b_f104, + doc6_c_f105, + doc7_c_f106 + ], + cmd: {filter: {f: {$gt: 101}}}, + // Don't validate exact results as we could delete one of a few docs. + res: { + nDeleted: 1, + bucketFilter: makeBucketFilter({"control.max.f": {$_internalExprGt: 101}}), + residualFilter: {f: {$gt: 101}}, + nBucketsUnpacked: 1, + nReturned: 1, + }, + }); +})(); + +// Empty filter matches all docs but only deletes one. +(function testEmptyFilterOnlyDeletesOne() { + testFindOneAndRemove({ + initialDocList: [ + doc1_a_nofields, + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc5_b_f104, + doc6_c_f105, + doc7_c_f106 + ], + cmd: {filter: {}}, + // Don't validate exact results as we could delete any doc. + res: { + nDeleted: 1, + bucketFilter: makeBucketFilter({}), + residualFilter: {}, + nBucketsUnpacked: 1, + nReturned: 1 + }, + }); +})(); + +// Verifies that the collation is properly propagated to the bucket-level filter when the +// query-level collation overrides the collection default collation. +(function testFindAndRemoveWithCollation() { + testFindOneAndRemove({ + initialDocList: [ + doc1_a_nofields, + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc5_b_f104, + doc6_c_f105, + doc7_c_f106 + ], + cmd: { + filter: {[metaFieldName]: "a", f: {$gt: 101}}, + /*caseInsensitive collation*/ + collation: {locale: "en", strength: 2} + }, + res: { + nDeleted: 1, + deletedDoc: doc3_a_f102, + bucketFilter: + makeBucketFilter({meta: {$eq: "a"}}, {"control.max.f": {$_internalExprGt: 101}}), + residualFilter: {f: {$gt: 101}}, + nBucketsUnpacked: 1, + nReturned: 1, + }, + }); +})(); diff --git a/jstests/core/timeseries/timeseries_find_and_modify_update.js b/jstests/core/timeseries/timeseries_find_and_modify_update.js new file mode 100644 index 0000000000000..c2c5bb007f9cb --- /dev/null +++ b/jstests/core/timeseries/timeseries_find_and_modify_update.js @@ -0,0 +1,538 @@ +/** + * Tests singleton updates on a time-series collection. + * + * @tags: [ + * # We need a timeseries collection. + * requires_timeseries, + * featureFlagTimeseriesUpdatesSupport, + * # TODO SERVER-76454 Remove the following two tags. + * does_not_support_retryable_writes, + * requires_non_retryable_writes, + * ] + */ + +import { + makeBucketFilter, + metaFieldName, + testFindOneAndUpdate, + timeFieldName +} from "jstests/core/timeseries/libs/timeseries_writes_util.js"; +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. + +/** + * Tests op-style updates. + */ +{ + const doc_m1_a_b = + {[timeFieldName]: ISODate("2023-02-06T19:19:01Z"), [metaFieldName]: 1, _id: 1, a: 1, b: 1}; + const doc_a_b = {[timeFieldName]: ISODate("2023-02-06T19:19:01Z"), _id: 1, a: 1, b: 1}; + const doc_m1_b = + {[timeFieldName]: ISODate("2023-02-06T19:19:01Z"), [metaFieldName]: 1, _id: 1, b: 1}; + const doc_m2_b = + {[timeFieldName]: ISODate("2023-02-06T19:19:01Z"), [metaFieldName]: 2, _id: 1, b: 1}; + const doc_m1_arrayA_b = { + [timeFieldName]: ISODate("2023-02-06T19:19:01Z"), + [metaFieldName]: 1, + _id: 1, + a: ["arr", "ay"], + b: 1 + }; + const doc_stringM1_a_b = { + [timeFieldName]: ISODate("2023-02-06T19:19:01Z"), + [metaFieldName]: "1", + _id: 1, + a: 1, + b: 1 + }; + const doc_m1_c_d = + {[timeFieldName]: ISODate("2023-02-06T19:19:02Z"), [metaFieldName]: 1, _id: 2, c: 1, d: 1}; + const doc_m1_a_b_later = + {[timeFieldName]: ISODate("2023-02-07T19:19:01Z"), [metaFieldName]: 1, _id: 1, a: 1, b: 1}; + const query_m1_a1 = {a: {$eq: 1}, [metaFieldName]: {$eq: 1}}; + const query_m1_b1 = {b: {$eq: 1}, [metaFieldName]: {$eq: 1}}; + + // Verifies that sort option is rejected. + (function testSortOptionFails() { + testFindOneAndUpdate({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + cmd: { + filter: {}, + update: {$unset: {a: ""}}, + sort: {_id: 1}, + }, + res: {errorCode: ErrorCodes.InvalidOptions}, + }); + })(); + + // Metric field update: unset field and return the old doc. + (function testUnsetMetricField() { + testFindOneAndUpdate({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + cmd: { + filter: query_m1_a1, + update: {$unset: {a: ""}}, + }, + res: { + resultDocList: [doc_m1_b, doc_m1_c_d], + returnDoc: doc_m1_a_b, + bucketFilter: makeBucketFilter({meta: {$eq: 1}}, { + $and: [ + {"control.min.a": {$_internalExprLte: 1}}, + {"control.max.a": {$_internalExprGte: 1}} + ] + }), + residualFilter: {a: {$eq: 1}}, + nBucketsUnpacked: 1, + nMatched: 1, + nModified: 1, + }, + }); + })(); + + // Metric field update: add new field and return the new doc. + (function testAddNewMetricField() { + testFindOneAndUpdate({ + initialDocList: [doc_m1_b, doc_m1_c_d], + cmd: {filter: query_m1_b1, update: {$set: {a: 1}}, returnNew: true}, + res: { + resultDocList: [doc_m1_a_b, doc_m1_c_d], + returnDoc: doc_m1_a_b, + bucketFilter: makeBucketFilter({meta: {$eq: 1}}, { + $and: [ + {"control.min.b": {$_internalExprLte: 1}}, + {"control.max.b": {$_internalExprGte: 1}} + ] + }), + residualFilter: {b: {$eq: 1}}, + nBucketsUnpacked: 1, + nMatched: 1, + nModified: 1, + }, + }); + })(); + + // Metric field update: change field type (integer to array) with 'fields' option. + (function testChangeFieldTypeWithFields() { + testFindOneAndUpdate({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + cmd: { + filter: query_m1_a1, + update: {$set: {a: ["arr", "ay"]}}, + fields: {a: 1, b: 1, _id: 0}, + }, + res: {resultDocList: [doc_m1_arrayA_b, doc_m1_c_d], returnDoc: {a: 1, b: 1}}, + }); + })(); + + // Metric field update: no-op with non-existent field to unset. + (function testMatchOneNoopUpdate() { + testFindOneAndUpdate({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + cmd: { + filter: query_m1_a1, + update: {$unset: {z: ""}}, + }, + res: { + resultDocList: [doc_m1_a_b, doc_m1_c_d], + returnDoc: doc_m1_a_b, + }, + }); + })(); + + // Metric field update: no-op with non-existent field to unset and returnNew. + (function testMatchOneNoopUpdateWithReturnNew() { + testFindOneAndUpdate({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + cmd: { + filter: query_m1_a1, + update: {$unset: {z: ""}}, + returnNew: true, + }, + res: { + resultDocList: [doc_m1_a_b, doc_m1_c_d], + // The return doc is the same as the original doc, since the update is a no-op. + returnDoc: doc_m1_a_b, + }, + }); + })(); + + // Metric field update: no-op with non-existent field to unset. + (function testMatchMultipleNoopUpdate() { + testFindOneAndUpdate({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + cmd: { + filter: {}, + update: {$unset: {z: ""}}, + }, + res: { + resultDocList: [doc_m1_a_b, doc_m1_c_d], + returnDoc: doc_m1_a_b, + bucketFilter: makeBucketFilter({}), + residualFilter: {}, + nBucketsUnpacked: 1, + nMatched: 1, + nModified: 0, + nUpserted: 0, + }, + }); + })(); + + // Metric field update: match multiple docs, only update one, returning the new doc. + (function testMatchMultipleUpdateOne() { + const resultDoc = Object.assign({}, doc_a_b, {a: 100}); + testFindOneAndUpdate({ + initialDocList: [doc_a_b, doc_m1_a_b_later], + cmd: { + filter: {}, + update: {$set: {a: 100}}, + returnNew: true, + }, + res: { + resultDocList: [resultDoc, doc_m1_a_b_later], + returnDoc: resultDoc, + bucketFilter: makeBucketFilter({}), + residualFilter: {}, + nBucketsUnpacked: 1, + nMatched: 1, + nModified: 1, + }, + }); + })(); + + // Match and update zero docs. + (function testMatchNone() { + testFindOneAndUpdate({ + initialDocList: [doc_a_b, doc_m1_a_b, doc_m1_c_d], + cmd: { + filter: {[metaFieldName]: {z: "Z"}}, + update: {$set: {a: 100}}, + }, + res: { + resultDocList: [doc_a_b, doc_m1_a_b, doc_m1_c_d], + bucketFilter: makeBucketFilter({meta: {$eq: {z: "Z"}}}), + residualFilter: {}, + nBucketsUnpacked: 0, + nMatched: 0, + nModified: 0, + nUpserted: 0, + }, + }); + })(); + + // Meta-only update only updates one. + (function testMetaOnlyUpdateOne() { + const returnDoc = Object.assign({}, doc_m1_a_b, {[metaFieldName]: 2}); + testFindOneAndUpdate({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + cmd: { + filter: {[metaFieldName]: 1}, + update: {$set: {[metaFieldName]: 2}}, + returnNew: true, + }, + res: { + resultDocList: [doc_m1_c_d, returnDoc], + returnDoc: returnDoc, + bucketFilter: makeBucketFilter({meta: {$eq: 1}}), + residualFilter: {}, + nBucketsUnpacked: 1, + nMatched: 1, + nModified: 1, + nUpserted: 0, + }, + }); + })(); + + // Meta field update: remove meta field. + (function testRemoveMetaField() { + testFindOneAndUpdate({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + cmd: { + filter: query_m1_a1, + update: {$unset: {[metaFieldName]: ""}}, + }, + res: {resultDocList: [doc_a_b, doc_m1_c_d], returnDoc: doc_m1_a_b}, + }); + })(); + + // Meta field update: add meta field. + (function testAddMetaField() { + testFindOneAndUpdate({ + initialDocList: [doc_a_b], + cmd: { + filter: {}, + update: {$set: {[metaFieldName]: 1}}, + }, + res: {resultDocList: [doc_m1_a_b], returnDoc: doc_a_b}, + }); + })(); + + // Meta field update: update meta field. + (function testUpdateMetaField() { + testFindOneAndUpdate({ + initialDocList: [doc_m1_b], + cmd: { + filter: {}, + update: {$set: {[metaFieldName]: 2}}, + }, + res: {resultDocList: [doc_m2_b], returnDoc: doc_m1_b}, + }); + })(); + + // Meta field update: update meta field to different type (integer to string). + (function testChangeMetaFieldType() { + testFindOneAndUpdate({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + cmd: { + filter: query_m1_a1, + update: {$set: {[metaFieldName]: "1"}}, + }, + res: {resultDocList: [doc_stringM1_a_b, doc_m1_c_d], returnDoc: doc_m1_a_b}, + }); + })(); +} + +/** + * Tests pipeline-style updates. + */ +{ + const timestamp2023 = ISODate("2023-02-06T19:19:00Z"); + const doc_2023_m1_a1 = {[timeFieldName]: timestamp2023, [metaFieldName]: 1, _id: 1, a: 1}; + const doc_2023_m2_a1_newField = + {[timeFieldName]: timestamp2023, [metaFieldName]: 2, _id: 1, a: 1, "newField": 42}; + + // Update metaField and add a new field. + (function testPipelineUpdateSetMultipleFields() { + testFindOneAndUpdate({ + initialDocList: [doc_2023_m1_a1], + cmd: { + filter: {a: {$eq: 1}, [metaFieldName]: {$eq: 1}}, + update: [ + {$set: {[metaFieldName]: 2}}, + {$set: {"newField": 42}}, + ], + }, + res: { + resultDocList: [doc_2023_m2_a1_newField], + returnDoc: doc_2023_m1_a1, + bucketFilter: makeBucketFilter({meta: {$eq: 1}}, { + $and: [ + {"control.min.a": {$_internalExprLte: 1}}, + {"control.max.a": {$_internalExprGte: 1}}, + ] + }), + residualFilter: {a: {$eq: 1}}, + nBucketsUnpacked: 1, + nMatched: 1, + nModified: 1, + nUpserted: 0, + }, + }); + })(); + + // Expect removal of the timeField to fail. + (function testPipelineRemoveTimeField() { + testFindOneAndUpdate({ + initialDocList: [doc_2023_m1_a1], + cmd: { + filter: {}, + update: [{$set: {[metaFieldName]: 2}}, {$unset: timeFieldName}], + }, + res: { + errorCode: ErrorCodes.BadValue, + resultDocList: [doc_2023_m1_a1], + }, + }); + })(); + + // Expect changing the type of the timeField to fail. + (function testPipelineChangeTimeFieldType() { + testFindOneAndUpdate({ + initialDocList: [doc_2023_m1_a1], + cmd: { + filter: {}, + update: [{$set: {[timeFieldName]: "string"}}], + }, + res: { + errorCode: ErrorCodes.BadValue, + resultDocList: [doc_2023_m1_a1], + }, + }); + })(); +} + +/** + * Tests full measurement replacement. + */ +{ + const timestamp2023 = ISODate("2023-02-06T19:19:00Z"); + const doc_t2023_m1_id1_a1 = {[timeFieldName]: timestamp2023, [metaFieldName]: 1, _id: 1, a: 1}; + const doc_t2023_m2_id2_a2 = {[timeFieldName]: timestamp2023, [metaFieldName]: 2, _id: 2, a: 2}; + const doc_t2023_m2_noId_a2 = {[timeFieldName]: timestamp2023, [metaFieldName]: 2, a: 2}; + + // Full measurement replacement: update every field in the document, including the _id. + (function testReplacementUpdateChangeId() { + testFindOneAndUpdate({ + initialDocList: [doc_t2023_m1_id1_a1], + cmd: { + filter: {}, + update: doc_t2023_m2_id2_a2, + }, + res: {resultDocList: [doc_t2023_m2_id2_a2], returnDoc: doc_t2023_m1_id1_a1}, + }); + })(); + + // Full measurement replacement: update every field in the document, except the _id. + (function testReplacementUpdateNoId() { + const returnDoc = {[timeFieldName]: timestamp2023, [metaFieldName]: 2, a: 2, _id: 1}; + testFindOneAndUpdate({ + initialDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2], + cmd: { + filter: {_id: 1}, + update: doc_t2023_m2_noId_a2, + returnNew: true, + }, + res: { + resultDocList: [ + doc_t2023_m2_id2_a2, + returnDoc, + ], + returnDoc: returnDoc, + }, + }); + })(); + + // Replacement with no time field. + (function testReplacementUpdateNoTimeField() { + testFindOneAndUpdate({ + initialDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2], + cmd: { + filter: {_id: 1}, + update: {[metaFieldName]: 1, a: 1, _id: 10}, + }, + res: { + errorCode: ErrorCodes.BadValue, + resultDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2], + } + }); + })(); + + // Replacement with time field of the wrong type. + (function testReplacementUpdateWrongTypeTimeField() { + testFindOneAndUpdate({ + initialDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2], + cmd: { + filter: {_id: 1}, + update: {[metaFieldName]: 1, a: 1, _id: 10, [timeFieldName]: "string"}, + }, + res: { + errorCode: ErrorCodes.BadValue, + resultDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2], + }, + }); + })(); + + // Replacement that results in two duplicate measurements. + (function testReplacementUpdateDuplicateIds() { + testFindOneAndUpdate({ + initialDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2], + cmd: { + filter: {_id: 1}, + update: doc_t2023_m2_id2_a2, + }, + res: { + resultDocList: [doc_t2023_m2_id2_a2, doc_t2023_m2_id2_a2], + returnDoc: doc_t2023_m1_id1_a1, + }, + }); + })(); + + // Replacement that only references the meta field. Still fails because of the missing time + // field. + (function testReplacementMetaOnly() { + testFindOneAndUpdate({ + initialDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2], + cmd: { + filter: {[metaFieldName]: 1}, + update: {[metaFieldName]: 3}, + }, + res: { + errorCode: ErrorCodes.BadValue, + resultDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2], + }, + }); + })(); + + // Tests upsert with full measurement & returnNew = false. + (function testUpsert() { + if (FixtureHelpers.isMongos(db)) { + jsTestLog("Skipping findAndModify upsert test on sharded cluster."); + return; + } + + testFindOneAndUpdate({ + initialDocList: [doc_t2023_m1_id1_a1], + cmd: { + filter: {[metaFieldName]: {$eq: 2}}, + update: doc_t2023_m2_id2_a2, + // returnNew defaults to false. + upsert: true, + }, + res: { + resultDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2], + returnDoc: null, + bucketFilter: makeBucketFilter({meta: {$eq: 2}}), + residualFilter: {}, + nBucketsUnpacked: 0, + nMatched: 0, + nModified: 0, + nUpserted: 1, + }, + }); + })(); + + // Tests upsert with full measurement & returnNew = true. + (function testUpsertWithReturnNew() { + if (FixtureHelpers.isMongos(db)) { + jsTestLog("Skipping findAndModify upsert test on sharded cluster."); + return; + } + + testFindOneAndUpdate({ + initialDocList: [doc_t2023_m1_id1_a1], + cmd: { + filter: {[metaFieldName]: {$eq: 2}}, + update: doc_t2023_m2_id2_a2, + returnNew: true, + upsert: true, + }, + res: { + resultDocList: [doc_t2023_m1_id1_a1, doc_t2023_m2_id2_a2], + returnDoc: doc_t2023_m2_id2_a2, + bucketFilter: makeBucketFilter({meta: {$eq: 2}}), + residualFilter: {}, + nBucketsUnpacked: 0, + nMatched: 0, + nModified: 0, + nUpserted: 1, + }, + }); + })(); + + // Tests upsert with full measurement: no-op when the query matches but update is a no-op. + (function testNoopUpsert() { + if (FixtureHelpers.isMongos(db)) { + jsTestLog("Skipping findAndModify upsert test on sharded cluster."); + return; + } + + testFindOneAndUpdate({ + initialDocList: [doc_t2023_m1_id1_a1], + cmd: {filter: {}, update: {$unset: {z: ""}}, upsert: true}, + res: { + resultDocList: [doc_t2023_m1_id1_a1], + returnDoc: doc_t2023_m1_id1_a1, + nUpserted: 0, + }, + }); + })(); +} diff --git a/jstests/core/timeseries/timeseries_geonear_edge_case_measurements.js b/jstests/core/timeseries/timeseries_geonear_edge_case_measurements.js index b68ce61349956..1e01661f40a66 100644 --- a/jstests/core/timeseries/timeseries_geonear_edge_case_measurements.js +++ b/jstests/core/timeseries/timeseries_geonear_edge_case_measurements.js @@ -9,16 +9,13 @@ * ] */ -(function() { -"use strict"; - load("jstests/core/timeseries/libs/geo.js"); -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; if (!TimeseriesTest.timeseriesMetricIndexesEnabled(db.getMongo())) { jsTestLog( "Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled."); - return; + quit(); } Random.setRandomSeed(7813223789272959000); @@ -178,5 +175,4 @@ for (const minOrMax of ['maxDistance', 'minDistance']) { // Make sure the time-series results match. const tsResult = tsColl.aggregate(pipeline).toArray(); assert.sameMembers(result, tsResult); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_geonear_measurements.js b/jstests/core/timeseries/timeseries_geonear_measurements.js index a79b7ed98095d..fa8683b60757b 100644 --- a/jstests/core/timeseries/timeseries_geonear_measurements.js +++ b/jstests/core/timeseries/timeseries_geonear_measurements.js @@ -22,16 +22,13 @@ * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/feature_flag_util.js"); +import {aggPlanHasStage, getAggPlanStage} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) { jsTestLog( "Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled."); - return; + quit(); } Random.setRandomSeed(); @@ -655,4 +652,3 @@ function runExamples(coll, isTimeseries, has2dsphereIndex) { insertTestData(coll); runExamples(coll, true /* isTimeseries */, true /* has2dsphereIndex */); } -})(); diff --git a/jstests/core/timeseries/timeseries_geonear_random_measurements.js b/jstests/core/timeseries/timeseries_geonear_random_measurements.js index 3f1d59f4c6cca..a38d577fffef9 100644 --- a/jstests/core/timeseries/timeseries_geonear_random_measurements.js +++ b/jstests/core/timeseries/timeseries_geonear_random_measurements.js @@ -9,16 +9,13 @@ * ] */ -(function() { -"use strict"; - load("jstests/core/timeseries/libs/geo.js"); -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; if (!TimeseriesTest.timeseriesMetricIndexesEnabled(db.getMongo())) { jsTestLog( "Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled."); - return; + quit(); } Random.setRandomSeed(); @@ -116,5 +113,4 @@ for (const doc of docs) { assertSortedAscending(result.map(d => d.dist)); assertSortedAscending(tsResult.map(d => d.dist)); print('Got ' + result.length + ' results'); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_graph_lookup.js b/jstests/core/timeseries/timeseries_graph_lookup.js index 6b71e56ea9bea..bf43190ae9c29 100644 --- a/jstests/core/timeseries/timeseries_graph_lookup.js +++ b/jstests/core/timeseries/timeseries_graph_lookup.js @@ -4,12 +4,10 @@ * @tags: [ * # We need a timeseries collection. * requires_timeseries, + * references_foreign_collection, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const testDB = db.getSiblingDB(jsTestName()); @@ -148,5 +146,4 @@ TimeseriesTest.run((insert) => { collAOption = timeseriesCollOption; collBOption = timeseriesCollOption; testFunc(collAOption, collBOption); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_groupby_reorder.js b/jstests/core/timeseries/timeseries_groupby_reorder.js index c4c34bc12492e..1948810a9d483 100644 --- a/jstests/core/timeseries/timeseries_groupby_reorder.js +++ b/jstests/core/timeseries/timeseries_groupby_reorder.js @@ -8,11 +8,7 @@ * requires_fcv_61, * ] */ -(function() { -"use strict"; - load("jstests/libs/fixture_helpers.js"); -load("jstests/core/timeseries/libs/timeseries.js"); const coll = db.timeseries_groupby_reorder; coll.drop(); @@ -26,7 +22,7 @@ assert.commandWorked(coll.insert({_id: 0, t: t, b: 2, c: 2})); assert.commandWorked(coll.insert({_id: 0, t: t, b: 3, c: 3})); // Test reordering the groupby and internal unpack buckets. -if (!isMongos(db)) { +if (!FixtureHelpers.isMongos(db)) { const res = coll.explain("queryPlanner").aggregate([ {$group: {_id: '$meta', accmin: {$min: '$b'}, accmax: {$max: '$c'}}} ]); @@ -51,5 +47,4 @@ res = coll.aggregate([{ } }]) .toArray(); -assert.docEq([{"_id": null, "accmin": 2, "accmax": 6}], res); -})(); +assert.docEq([{"_id": null, "accmin": 2, "accmax": 6}], res); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_hint.js b/jstests/core/timeseries/timeseries_hint.js index 72a3d698a54d9..3c23a0407af35 100644 --- a/jstests/core/timeseries/timeseries_hint.js +++ b/jstests/core/timeseries/timeseries_hint.js @@ -13,11 +13,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/analyze_plan.js"); +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; const coll = db.timeseries_hint; coll.drop(); @@ -88,5 +84,4 @@ runTest({ }, expectedResult: docsDesc, expectedDirection: 'backward', -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_id_range.js b/jstests/core/timeseries/timeseries_id_range.js index 6b0048d52df9e..9917a492108be 100644 --- a/jstests/core/timeseries/timeseries_id_range.js +++ b/jstests/core/timeseries/timeseries_id_range.js @@ -15,11 +15,8 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {getAggPlanStage, getPlanStage} from "jstests/libs/analyze_plan.js"; TimeseriesTest.run((insert) => { // These dates will all be inserted into individual buckets. @@ -239,5 +236,4 @@ TimeseriesTest.run((insert) => { expl = coll.explain("executionStats").aggregate(pipeline); assert.eq(3, expl.stages[0].$cursor.executionStats.totalDocsExamined); })(); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_index.js b/jstests/core/timeseries/timeseries_index.js index 4a2a8b3f027a3..81da8341746c6 100644 --- a/jstests/core/timeseries/timeseries_index.js +++ b/jstests/core/timeseries/timeseries_index.js @@ -9,11 +9,8 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/feature_flag_util.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load("jstests/libs/fixture_helpers.js"); TimeseriesTest.run((insert) => { @@ -309,4 +306,3 @@ TimeseriesTest.run((insert) => { testCreateIndexFailed({$natural: -1}); testCreateIndexFailed({$hint: 'my_index_name'}); }); -})(); diff --git a/jstests/core/timeseries/timeseries_index_build_failure.js b/jstests/core/timeseries/timeseries_index_build_failure.js index 8b6775b597be2..1a5929e42ef5e 100644 --- a/jstests/core/timeseries/timeseries_index_build_failure.js +++ b/jstests/core/timeseries/timeseries_index_build_failure.js @@ -10,10 +10,7 @@ * requires_timeseries, * ] */ -(function() { -'use strict'; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const coll = db.timeseries_index_skipped_record_tracker; @@ -33,5 +30,4 @@ TimeseriesTest.run((insert) => { const bucketColl = db.getCollection("system.buckets." + coll.getName()); assert.commandFailedWithCode(bucketColl.createIndex({"control.min.time": "2dsphere"}), 16755); -}); -}()); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_index_collation.js b/jstests/core/timeseries/timeseries_index_collation.js index ae2a126889ae6..9791b5996b629 100644 --- a/jstests/core/timeseries/timeseries_index_collation.js +++ b/jstests/core/timeseries/timeseries_index_collation.js @@ -9,10 +9,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const timeFieldName = 'tm'; @@ -99,5 +96,4 @@ TimeseriesTest.run((insert) => { assert.eq(false, indexSpecsString[0].collation.numericOrdering, 'Invalid index spec for index_string: ' + tojson(indexSpecsString[0])); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_index_partial.js b/jstests/core/timeseries/timeseries_index_partial.js index dd2affb71ea84..001472eb38bbe 100644 --- a/jstests/core/timeseries/timeseries_index_partial.js +++ b/jstests/core/timeseries/timeseries_index_partial.js @@ -2,8 +2,6 @@ * Test creating and using partial indexes, on a time-series collection. * * @tags: [ - * # TODO (SERVER-73316): remove - * assumes_against_mongod_not_mongos, * # Explain of a resolved view must be executed by mongos. * directly_against_shardsvrs_incompatible, * # Refusing to run a test that issues an aggregation command with explain because it may return @@ -13,46 +11,58 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/feature_flag_util.js"); +import { + getAggPlanStage, + getPlanStages, + getRejectedPlan, + getRejectedPlans +} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) { jsTestLog( "Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled."); - return; + quit(); } const coll = db.timeseries_index_partial; const timeField = 'time'; const metaField = 'm'; - -coll.drop(); -assert.commandWorked(db.createCollection(coll.getName(), {timeseries: {timeField, metaField}})); - -const buckets = db.getCollection('system.buckets.' + coll.getName()); let extraIndexes = []; let extraBucketIndexes = []; -if (FixtureHelpers.isSharded(buckets)) { - // If the collection is sharded, expect an implicitly-created index on time. - // It will appear differently in listIndexes depending on whether you look at the time-series - // collection or the buckets collection. - extraIndexes.push({ - "v": 2, - "key": {"time": 1}, - "name": "control.min.time_1", - }); - extraBucketIndexes.push({ - "v": 2, - "key": {"control.min.time": 1}, - "name": "control.min.time_1", - }); -} +let buckets = []; -// TODO SERVER-66438: Remove feature flag check. -if (FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesScalabilityImprovements")) { +function resetCollection(collation) { + coll.drop(); + extraIndexes = []; + extraBucketIndexes = []; + + if (collation) { + assert.commandWorked(db.createCollection(coll.getName(), { + timeseries: {timeField, metaField}, + collation: collation, + })); + } else { + assert.commandWorked( + db.createCollection(coll.getName(), {timeseries: {timeField, metaField}})); + } + buckets = db.getCollection('system.buckets.' + coll.getName()); + // If the collection is sharded, expect an implicitly-created index on time. It will appear + // differently in listIndexes depending on whether you look at the time-series collection or + // the buckets collection. + // TODO SERVER-77112 fix this logic once this issue is fixed. + if (FixtureHelpers.isSharded(buckets)) { + extraIndexes.push({ + "v": 2, + "key": {"time": 1}, + "name": "control.min.time_1", + }); + extraBucketIndexes.push({ + "v": 2, + "key": {"control.min.time": 1}, + "name": "control.min.time_1", + }); + } // When enabled, the {meta: 1, time: 1} index gets built by default on the time-series // bucket collection. extraIndexes.push({ @@ -67,6 +77,8 @@ if (FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesScalabilityImprovements") }); } +resetCollection(); + assert.sameMembers(coll.getIndexes(), extraIndexes); assert.sameMembers(buckets.getIndexes(), extraBucketIndexes); @@ -109,10 +121,16 @@ assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression: // If scan is not present, check rejected plans if (scan === null) { const rejectedPlans = getRejectedPlans(getAggPlanStage(explain, "$cursor")["$cursor"]); - if (rejectedPlans.length === 1) { - const scans = getPlanStages(getRejectedPlan(rejectedPlans[0]), "IXSCAN"); - if (scans.length === 1) { - scan = scans[0]; + if (rejectedPlans.length === 2) { + let firstScan = getPlanStages(getRejectedPlan(rejectedPlans[0]), "IXSCAN"); + let secondScan = getPlanStages(getRejectedPlan(rejectedPlans[1]), "IXSCAN"); + // Both plans should have an "IXSCAN" stage and one stage should scan the index on + // the 'a' field. + if (firstScan.length === 1 && secondScan.length === 1) { + scan = firstScan[0]; + if (secondScan[0]["indexName"] == "a_1") { + scan = secondScan[0]; + } } } } else { @@ -129,7 +147,7 @@ assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression: const result = coll.aggregate([{$match: predicate}], {hint: {a: 1}}).toArray(); const unindexed = coll.aggregate([{$_internalInhibitOptimization: {}}, {$match: predicate}]).toArray(); - assert.docEq(result, unindexed); + assert.sameMembers(result, unindexed); } function checkPlanAndResults(predicate) { checkPlan(predicate); @@ -165,6 +183,12 @@ assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression: // Test some predicates on the time field. { + // TODO SERVER-77112 we can change this to assert.commandWorkedOrFailed, since the indexes + // made by 'createIndex' should be identical to the implicit index made by + // 'shardCollection'. + if (!FixtureHelpers.isSharded(buckets)) { + assert.commandWorked(coll.createIndex({[timeField]: 1})); + } const t0 = ISODate('2000-01-01T00:00:00Z'); const t1 = ISODate('2000-01-01T00:00:01Z'); const t2 = ISODate('2000-01-01T00:00:02Z'); @@ -192,6 +216,11 @@ assert.commandFailedWithCode(coll.createIndex({a: 1}, {partialFilterExpression: coll.createIndex({a: 1}, {partialFilterExpression: {[timeField]: {$gte: t1}}})); check({a: {$lt: 999}, [timeField]: {$gte: t1}}); check({a: {$lt: 999}, [timeField]: {$gte: t2}}); + + // Drop the index, so it doesn't interfere with other tests. + if (!FixtureHelpers.isSharded(buckets)) { + assert.commandWorked(coll.dropIndex({[timeField]: 1})); + } } assert.commandWorked(coll.dropIndex({a: 1})); @@ -253,12 +282,9 @@ assert.sameMembers(buckets.getIndexes(), extraBucketIndexes.concat([ // Test how partialFilterExpression interacts with collation. { + // Recreate the collection with a collation. const numericCollation = {locale: "en_US", numericOrdering: true}; - coll.drop(); - assert.commandWorked(db.createCollection(coll.getName(), { - timeseries: {timeField, metaField}, - collation: numericCollation, - })); + resetCollection(numericCollation); assert.commandWorked(coll.insert([ {[timeField]: ISODate(), [metaField]: {x: "1000", y: 1}, a: "120"}, @@ -601,4 +627,3 @@ assert.sameMembers(buckets.getIndexes(), extraBucketIndexes.concat([ } }); } -})(); diff --git a/jstests/core/timeseries/timeseries_index_spec.js b/jstests/core/timeseries/timeseries_index_spec.js index d2ac8e58687de..8ca6b5f06b104 100644 --- a/jstests/core/timeseries/timeseries_index_spec.js +++ b/jstests/core/timeseries/timeseries_index_spec.js @@ -9,11 +9,8 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/feature_flag_util.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; TimeseriesTest.run(() => { const collName = "timeseries_index_spec"; @@ -137,4 +134,3 @@ TimeseriesTest.run(() => { assert(foundIndex); } }); -}()); diff --git a/jstests/core/timeseries/timeseries_index_stats.js b/jstests/core/timeseries/timeseries_index_stats.js index 5ac1b4c10d4e8..d191ec24fa020 100644 --- a/jstests/core/timeseries/timeseries_index_stats.js +++ b/jstests/core/timeseries/timeseries_index_stats.js @@ -13,10 +13,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; load("jstests/libs/fixture_helpers.js"); // For isSharded. TimeseriesTest.run((insert) => { @@ -97,4 +94,3 @@ TimeseriesTest.run((insert) => { assert.sameMembers( Object.keys(indexKeys), multiStageDocs[0].index_names, tojson(multiStageDocs)); }); -})(); diff --git a/jstests/core/timeseries/timeseries_index_ttl_partial.js b/jstests/core/timeseries/timeseries_index_ttl_partial.js index e0f64b6b8422f..03e3114e4400f 100644 --- a/jstests/core/timeseries/timeseries_index_ttl_partial.js +++ b/jstests/core/timeseries/timeseries_index_ttl_partial.js @@ -9,15 +9,12 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db.getMongo())) { jsTestLog( "Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled."); - return; + quit(); } const collName = "timeseries_index_ttl_partial"; @@ -148,5 +145,4 @@ const resetTsColl = function(extraOptions = {}) { assert.commandFailedWithCode(coll.createIndex(timeAndDataSpec, filterOnMeta), ErrorCodes.CannotCreateIndex); } -}()); -})(); +}()); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_index_use.js b/jstests/core/timeseries/timeseries_index_use.js index ebcea9d8e6b41..82912c018b27c 100644 --- a/jstests/core/timeseries/timeseries_index_use.js +++ b/jstests/core/timeseries/timeseries_index_use.js @@ -13,11 +13,13 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/analyze_plan.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import { + getAggPlanStage, + getPlanStages, + getRejectedPlan, + getRejectedPlans +} from "jstests/libs/analyze_plan.js"; const generateTest = (useHint) => { return (insert) => { @@ -403,8 +405,7 @@ const generateTest = (useHint) => { {}, collation); - /*********************************** Tests $expr predicates - * *********************************/ + /*********************************** Tests $expr predicates *******************************/ resetCollections(); assert.commandWorked(insert(coll, [ {_id: 0, [timeFieldName]: ISODate('1990-01-01 00:00:00.000Z'), [metaFieldName]: 2}, @@ -433,5 +434,4 @@ const generateTest = (useHint) => { // Run the test twice, once without hinting the index, and again hinting the index by spec. TimeseriesTest.run(generateTest(false)); -TimeseriesTest.run(generateTest(true)); -})(); +TimeseriesTest.run(generateTest(true)); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_insert.js b/jstests/core/timeseries/timeseries_insert.js index bbb876a3adeaa..965a38151ba71 100644 --- a/jstests/core/timeseries/timeseries_insert.js +++ b/jstests/core/timeseries/timeseries_insert.js @@ -5,10 +5,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; const coll = db.timeseries_insert; coll.drop(); @@ -31,5 +28,4 @@ for (let i = 0; i < 100; i++) { fields: host.fields, tags: host.tags, })); -} -})(); \ No newline at end of file +} \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_insert_after_delete.js b/jstests/core/timeseries/timeseries_insert_after_delete.js index cffe0c7c91d16..5c8ec7482f882 100644 --- a/jstests/core/timeseries/timeseries_insert_after_delete.js +++ b/jstests/core/timeseries/timeseries_insert_after_delete.js @@ -8,18 +8,8 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. -load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'. - -if (FixtureHelpers.isMongos(db) && - !TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(db.getMongo())) { - jsTestLog( - "Skipping test because the sharded time-series updates and deletes feature flag is disabled"); - return; -} +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +load("jstests/libs/fixture_helpers.js"); // For 'FixtureHelpers'. TimeseriesTest.run((insert) => { const testDB = db.getSiblingDB(jsTestName()); @@ -43,4 +33,3 @@ TimeseriesTest.run((insert) => { assert.docEq([objB], docs); assert(coll.drop()); }); -})(); diff --git a/jstests/core/timeseries/timeseries_insert_after_update.js b/jstests/core/timeseries/timeseries_insert_after_update.js index 196ccae2149a0..eef90c2c63ea6 100644 --- a/jstests/core/timeseries/timeseries_insert_after_update.js +++ b/jstests/core/timeseries/timeseries_insert_after_update.js @@ -14,25 +14,9 @@ * assumes_read_preference_unchanged, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; load("jstests/libs/fixture_helpers.js"); -if (FixtureHelpers.isMongos(db) && - !TimeseriesTest.shardedtimeseriesCollectionsEnabled(db.getMongo())) { - jsTestLog("Skipping test because the time-series updates and deletes feature flag is disabled"); - return; -} - -if (FixtureHelpers.isMongos(db) && - !TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(db.getMongo())) { - jsTestLog( - "Skipping test because the sharded time-series updates and deletes feature flag is disabled"); - return; -} - TimeseriesTest.run((insert) => { const testDB = db.getSiblingDB(jsTestName()); assert.commandWorked(testDB.dropDatabase()); @@ -76,4 +60,3 @@ TimeseriesTest.run((insert) => { assert.eq(bucketsColl.find().itcount(), 3, bucketsColl.find().toArray()); } }); -})(); diff --git a/jstests/core/timeseries/timeseries_internal_bounded_sort.js b/jstests/core/timeseries/timeseries_internal_bounded_sort.js index 873c16b7421cb..d3cb5937914b2 100644 --- a/jstests/core/timeseries/timeseries_internal_bounded_sort.js +++ b/jstests/core/timeseries/timeseries_internal_bounded_sort.js @@ -10,16 +10,8 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); -load("jstests/core/timeseries/libs/timeseries.js"); - -if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) { - jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled."); - return; -} +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; const coll = db.timeseries_internal_bounded_sort; const buckets = db['system.buckets.' + coll.getName()]; @@ -177,4 +169,3 @@ function runTest(ascending) { runTest(true); // ascending runTest(false); // descending -})(); diff --git a/jstests/core/timeseries/timeseries_internal_bounded_sort_compound.js b/jstests/core/timeseries/timeseries_internal_bounded_sort_compound.js index 92d77cf8fd609..1c01d4f353976 100644 --- a/jstests/core/timeseries/timeseries_internal_bounded_sort_compound.js +++ b/jstests/core/timeseries/timeseries_internal_bounded_sort_compound.js @@ -11,16 +11,8 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); -load("jstests/core/timeseries/libs/timeseries.js"); - -if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) { - jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled."); - return; -} +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; const coll = db.timeseries_internal_bounded_sort_compound; const buckets = db['system.buckets.' + coll.getName()]; @@ -226,4 +218,3 @@ runTest({m: +1, t: +1}); runTest({m: +1, t: -1}); runTest({m: -1, t: +1}); runTest({m: -1, t: -1}); -})(); diff --git a/jstests/core/timeseries/timeseries_internal_bounded_sort_compound_mixed_types.js b/jstests/core/timeseries/timeseries_internal_bounded_sort_compound_mixed_types.js index f8e435c8563fb..0134232804226 100644 --- a/jstests/core/timeseries/timeseries_internal_bounded_sort_compound_mixed_types.js +++ b/jstests/core/timeseries/timeseries_internal_bounded_sort_compound_mixed_types.js @@ -11,16 +11,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); -load("jstests/core/timeseries/libs/timeseries.js"); - -if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) { - jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled."); - return; -} +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; const coll = db.timeseries_internal_bounded_sort_compound_mixed_types; const buckets = db['system.buckets.' + coll.getName()]; @@ -163,4 +154,3 @@ runTest({m: +1, t: +1}); runTest({m: +1, t: -1}); runTest({m: -1, t: +1}); runTest({m: -1, t: -1}); -})(); diff --git a/jstests/core/timeseries/timeseries_internal_bounded_sort_overflow.js b/jstests/core/timeseries/timeseries_internal_bounded_sort_overflow.js index 6a588b5ada2e3..9731b1311c712 100644 --- a/jstests/core/timeseries/timeseries_internal_bounded_sort_overflow.js +++ b/jstests/core/timeseries/timeseries_internal_bounded_sort_overflow.js @@ -11,16 +11,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); -load("jstests/core/timeseries/libs/timeseries.js"); - -if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) { - jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled."); - return; -} +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; const coll = db.timeseries_internal_bounded_sort_overflow; const buckets = db['system.buckets.' + coll.getName()]; @@ -49,4 +40,3 @@ const result = buckets // Make sure the result is in order. assert.eq(result[0].t, docs[0].t); assert.eq(result[1].t, docs[1].t); -})(); diff --git a/jstests/core/timeseries/timeseries_internal_bucket_geo_within.js b/jstests/core/timeseries/timeseries_internal_bucket_geo_within.js index f4467bde490ce..09fd754a835ee 100644 --- a/jstests/core/timeseries/timeseries_internal_bucket_geo_within.js +++ b/jstests/core/timeseries/timeseries_internal_bucket_geo_within.js @@ -4,8 +4,6 @@ * collection. * * @tags: [ - * # TODO (SERVER-73321): remove - * assumes_against_mongod_not_mongos, * # Explain of a resolved view must be executed by mongos. * directly_against_shardsvrs_incompatible, * # Time series geo functionality requires optimization. @@ -20,10 +18,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; const coll = db.timeseries_internal_bucket_geo_within; coll.drop(); @@ -53,10 +48,6 @@ for (let collScanStage of collScanStages) { "field": "loc" } }; - // TODO SERVER-60373 Fix duplicate predicates for sharded time-series collection - if (FixtureHelpers.isSharded(bucketsColl)) { - expectedPredicate = {$and: [expectedPredicate, expectedPredicate]}; - } assert.docEq(expectedPredicate, collScanStage.filter, collScanStages); } @@ -319,5 +310,4 @@ assert.sameMembers(results, [ pipeline = [{$match: {loc: {$geoWithin: {$centerSphere: [[0, 80], 1], $center: [[0, 0], 5]}}}}]; err = assert.throws(() => coll.explain().aggregate(pipeline)); assert.eq(err.code, ErrorCodes.BadValue, err); -} -}()); +} \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_lastpoint.js b/jstests/core/timeseries/timeseries_lastpoint.js index f3ea1f5c09372..9433a2a0b780f 100644 --- a/jstests/core/timeseries/timeseries_lastpoint.js +++ b/jstests/core/timeseries/timeseries_lastpoint.js @@ -13,13 +13,17 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); -load("jstests/core/timeseries/libs/timeseries_agg_helpers.js"); -load("jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js"); -load("jstests/libs/analyze_plan.js"); +import {TimeseriesAggTests} from "jstests/core/timeseries/libs/timeseries_agg_helpers.js"; +import { + createBoringCollections, + getMapInterestingValuesToEquivalentsStage, + createInterestingCollections, + expectDistinctScan, + expectCollScan, + expectIxscan, + testAllTimeMetaDirections, +} from "jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js"; const testDB = TimeseriesAggTests.getTestDb(); assert.commandWorked(testDB.dropDatabase()); @@ -141,4 +145,3 @@ function getGroupStage(time, extraFields = []) { ]; }); } -})(); diff --git a/jstests/core/timeseries/timeseries_lastpoint_top.js b/jstests/core/timeseries/timeseries_lastpoint_top.js index f6a93b7091980..028d3bd90f494 100644 --- a/jstests/core/timeseries/timeseries_lastpoint_top.js +++ b/jstests/core/timeseries/timeseries_lastpoint_top.js @@ -15,14 +15,17 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); -load("jstests/core/timeseries/libs/timeseries_agg_helpers.js"); -load("jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js"); -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/feature_flag_util.js"); +import {TimeseriesAggTests} from "jstests/core/timeseries/libs/timeseries_agg_helpers.js"; +import { + createBoringCollections, + getMapInterestingValuesToEquivalentsStage, + createInterestingCollections, + expectDistinctScan, + expectCollScan, + expectIxscan, + testAllTimeMetaDirections, +} from "jstests/core/timeseries/libs/timeseries_lastpoint_helpers.js"; const testDB = TimeseriesAggTests.getTestDb(); assert.commandWorked(testDB.dropDatabase()); @@ -30,7 +33,7 @@ assert.commandWorked(testDB.dropDatabase()); // TODO SERVER-73509 The test doesn't work yet, even though this feature flag is gone. if (true /* previously guarded by featureFlagLastPointQuery */) { jsTestLog("Skipping the test."); - return; + quit(); } /** @@ -165,4 +168,3 @@ function getGroupStage({time, sortBy, n, extraFields = []}) { ]); }); } -})(); diff --git a/jstests/core/timeseries/timeseries_list_collections.js b/jstests/core/timeseries/timeseries_list_collections.js index 318f50b8f3a00..47e582016716f 100644 --- a/jstests/core/timeseries/timeseries_list_collections.js +++ b/jstests/core/timeseries/timeseries_list_collections.js @@ -6,10 +6,7 @@ * requires_timeseries, * ] */ -(function() { -'use strict'; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; const testDB = db.getSiblingDB(jsTestName()); @@ -144,5 +141,4 @@ if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(testDB)) { collation: {locale: 'ja'}, expireAfterSeconds: NumberLong(100), }); -} -})(); +} \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_list_collections_filter_name.js b/jstests/core/timeseries/timeseries_list_collections_filter_name.js index f95469caf4889..e8113b186df54 100644 --- a/jstests/core/timeseries/timeseries_list_collections_filter_name.js +++ b/jstests/core/timeseries/timeseries_list_collections_filter_name.js @@ -7,11 +7,6 @@ * requires_timeseries, * ] */ -(function() { -'use strict'; - -load("jstests/core/timeseries/libs/timeseries.js"); - const timeFieldName = 'time'; const coll = db.timeseries_list_collections_filter_name; @@ -36,5 +31,4 @@ const collectionOptions = [{ info: {readOnly: false}, }]; -assert.eq(collections, collectionOptions); -})(); +assert.eq(collections, collectionOptions); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_lookup.js b/jstests/core/timeseries/timeseries_lookup.js index 62b4ffeeeb0a1..8d2a85558de83 100644 --- a/jstests/core/timeseries/timeseries_lookup.js +++ b/jstests/core/timeseries/timeseries_lookup.js @@ -7,12 +7,10 @@ * does_not_support_stepdowns, * # We need a timeseries collection. * requires_timeseries, + * references_foreign_collection, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const testDB = db.getSiblingDB(jsTestName()); @@ -245,5 +243,4 @@ TimeseriesTest.run((insert) => { testFunc(collAOption, collBOption); }); }); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_match_pushdown.js b/jstests/core/timeseries/timeseries_match_pushdown.js index 1b45895b46ea5..f044fecb41a82 100644 --- a/jstests/core/timeseries/timeseries_match_pushdown.js +++ b/jstests/core/timeseries/timeseries_match_pushdown.js @@ -9,10 +9,7 @@ * directly_against_shardsvrs_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; const coll = db.timeseries_match_pushdown; coll.drop(); @@ -409,5 +406,4 @@ runTest({ ] }, expectedDocs: [], -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_match_pushdown_with_project.js b/jstests/core/timeseries/timeseries_match_pushdown_with_project.js index bec8998f9d4d3..2f90599583b5f 100644 --- a/jstests/core/timeseries/timeseries_match_pushdown_with_project.js +++ b/jstests/core/timeseries/timeseries_match_pushdown_with_project.js @@ -8,10 +8,7 @@ * directly_against_shardsvrs_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; const coll = db.timeseries_match_pushdown_with_project; coll.drop(); @@ -126,5 +123,4 @@ runTest({ {[timeField]: aTime, a: 8, _id: 8}, {[timeField]: aTime, a: 9, _id: 9}, ], -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_merge.js b/jstests/core/timeseries/timeseries_merge.js index 041a88a24762f..75040c6c1e521 100644 --- a/jstests/core/timeseries/timeseries_merge.js +++ b/jstests/core/timeseries/timeseries_merge.js @@ -7,12 +7,10 @@ * does_not_support_stepdowns, * # We need a timeseries collection. * requires_timeseries, + * references_foreign_collection, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries_agg_helpers.js"); +import {TimeseriesAggTests} from "jstests/core/timeseries/libs/timeseries_agg_helpers.js"; const testDB = TimeseriesAggTests.getTestDb(); assert.commandWorked(testDB.dropDatabase()); @@ -107,5 +105,4 @@ let runMergeOnTestCase = () => { runSimpleMergeTestCase(); runMergeOnErrorTestCase(); -runMergeOnTestCase(); -})(); +runMergeOnTestCase(); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_metadata.js b/jstests/core/timeseries/timeseries_metadata.js index 53a5dac863eb8..b82afdb38c648 100644 --- a/jstests/core/timeseries/timeseries_metadata.js +++ b/jstests/core/timeseries/timeseries_metadata.js @@ -9,10 +9,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const collNamePrefix = 'timeseries_metadata_'; @@ -172,5 +169,4 @@ TimeseriesTest.run((insert) => { {_id: 2, time: t[2], meta: {a: [2, 1, 3]}, x: 20}, {_id: 3, time: t[3], meta: {a: [2, 1, 3]}, x: 30}, ]); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_metric_index_2dsphere.js b/jstests/core/timeseries/timeseries_metric_index_2dsphere.js index 2fa470bbf59ed..5b7d8dd5e3bc3 100644 --- a/jstests/core/timeseries/timeseries_metric_index_2dsphere.js +++ b/jstests/core/timeseries/timeseries_metric_index_2dsphere.js @@ -16,15 +16,12 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/feature_flag_util.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) { - return; + quit(); } TimeseriesTest.run((insert) => { @@ -179,4 +176,3 @@ TimeseriesTest.run((insert) => { assert.commandWorked(timeseriescoll.dropIndex(twoDSphereTimeseriesIndexSpec)); }); -})(); diff --git a/jstests/core/timeseries/timeseries_metric_index_ascending_descending.js b/jstests/core/timeseries/timeseries_metric_index_ascending_descending.js index c997bb587890c..e7f17a200f877 100644 --- a/jstests/core/timeseries/timeseries_metric_index_ascending_descending.js +++ b/jstests/core/timeseries/timeseries_metric_index_ascending_descending.js @@ -8,17 +8,14 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/feature_flag_util.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load("jstests/libs/fixture_helpers.js"); if (!FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesMetricIndexes")) { jsTestLog( "Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled."); - return; + quit(); } TimeseriesTest.run((insert) => { @@ -168,4 +165,3 @@ TimeseriesTest.run((insert) => { bucketIndexes = bucketsColl.getIndexes(); assert.eq(13 + numExtraIndexes, bucketIndexes.length, tojson(bucketIndexes)); }); -}()); diff --git a/jstests/core/timeseries/timeseries_metric_index_compound.js b/jstests/core/timeseries/timeseries_metric_index_compound.js index 41e245bccab67..e5ae44d75f513 100644 --- a/jstests/core/timeseries/timeseries_metric_index_compound.js +++ b/jstests/core/timeseries/timeseries_metric_index_compound.js @@ -8,16 +8,13 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/feature_flag_util.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) { jsTestLog( "Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled."); - return; + quit(); } TimeseriesTest.run((insert) => { @@ -203,4 +200,3 @@ TimeseriesTest.run((insert) => { testBadIndexForData({[metaFieldName + ".loc2"]: "2d", a: 1}); testBadIndexForData({[metaFieldName + ".r"]: "hashed", a: 1}); }); -}()); diff --git a/jstests/core/timeseries/timeseries_metric_index_hashed.js b/jstests/core/timeseries/timeseries_metric_index_hashed.js index fd9310fe65280..9de697e149e53 100644 --- a/jstests/core/timeseries/timeseries_metric_index_hashed.js +++ b/jstests/core/timeseries/timeseries_metric_index_hashed.js @@ -6,15 +6,12 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; if (!TimeseriesTest.timeseriesMetricIndexesEnabled(db.getMongo())) { jsTestLog( "Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled."); - return; + quit(); } TimeseriesTest.run((insert) => { @@ -46,5 +43,4 @@ TimeseriesTest.run((insert) => { testIndex({x: 1, y: "hashed"}); testIndex({[`${metaFieldName}.tag`]: 1, x: "hashed"}); testIndex({x: 1, [`${metaFieldName}.tag`]: -1, y: "hashed"}); -}); -}()); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_metric_index_wildcard.js b/jstests/core/timeseries/timeseries_metric_index_wildcard.js index eba9648f88033..2520a0058adcb 100644 --- a/jstests/core/timeseries/timeseries_metric_index_wildcard.js +++ b/jstests/core/timeseries/timeseries_metric_index_wildcard.js @@ -6,15 +6,12 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; if (!TimeseriesTest.timeseriesMetricIndexesEnabled(db.getMongo())) { jsTestLog( "Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled."); - return; + quit(); } TimeseriesTest.run((insert) => { @@ -48,5 +45,4 @@ TimeseriesTest.run((insert) => { testIndex({"$**": -1, x: 1}); testIndex({[`${metaFieldName}.tag`]: 1, "x.$**": 1}); testIndex({"$**": 1, [`${metaFieldName}.tag`]: -1}); -}); -}()); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_min_max.js b/jstests/core/timeseries/timeseries_min_max.js index 4abf641df33b0..c2e5428f63b9a 100644 --- a/jstests/core/timeseries/timeseries_min_max.js +++ b/jstests/core/timeseries/timeseries_min_max.js @@ -10,10 +10,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const collNamePrefix = 'timeseries_min_max_'; @@ -126,5 +123,4 @@ TimeseriesTest.run((insert) => { runTest({a: NumberInt(1)}, {a: NumberInt(1)}, {a: NumberLong(2)}); runTest({a: NumberDecimal(2.5)}, {a: NumberInt(1)}, {a: NumberDecimal(2.5)}); runTest({a: Number(0.5)}, {a: Number(0.5)}, {a: NumberDecimal(2.5)}); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_out.js b/jstests/core/timeseries/timeseries_out.js deleted file mode 100644 index 46beccd99ede0..0000000000000 --- a/jstests/core/timeseries/timeseries_out.js +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Verifies that time-series collections work as expected with $out. - * - * - * @tags: [ - * # TimeseriesAggTests doesn't handle stepdowns. - * does_not_support_stepdowns, - * # We need a timeseries collection. - * requires_timeseries, - * ] - */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries_agg_helpers.js"); - -const testDB = TimeseriesAggTests.getTestDb(); -assert.commandWorked(testDB.dropDatabase()); -const numHosts = 10; -const numIterations = 20; - -let [inColl, observerInColl] = TimeseriesAggTests.prepareInputCollections(numHosts, numIterations); - -// Gets the expected results from non time-series observer input collection. -let expectedResults = - TimeseriesAggTests.getOutputAggregateResults(observerInColl, [{$out: "observer_out"}]); - -// Gets the actual results from time-series input collection. -let actualResults = TimeseriesAggTests.getOutputAggregateResults(inColl, [{$out: "out"}]); - -// Verifies that the number of measurements is same as expected. -assert.eq(actualResults.length, expectedResults.length, actualResults); - -// Verifies that every measurement is same as expected. -for (var i = 0; i < expectedResults.length; ++i) { - assert.eq(actualResults[i], expectedResults[i], actualResults); -} -})(); diff --git a/jstests/core/timeseries/timeseries_out_non_sharded.js b/jstests/core/timeseries/timeseries_out_non_sharded.js new file mode 100644 index 0000000000000..5654c5e5791ea --- /dev/null +++ b/jstests/core/timeseries/timeseries_out_non_sharded.js @@ -0,0 +1,167 @@ +/** + * Verifies that $out writes to a time-series collection from an unsharded collection. + * There is a test for sharded source collections in jstests/sharding/timeseries_out_sharded.js. + * + * @tags: [ + * references_foreign_collection, + * # TimeseriesAggTests doesn't handle stepdowns. + * does_not_support_stepdowns, + * # We need a timeseries collection. + * requires_timeseries, + * requires_fcv_71, + * featureFlagAggOutTimeseries, + * ] + */ +import {TimeseriesAggTests} from "jstests/core/timeseries/libs/timeseries_agg_helpers.js"; + +const numHosts = 10; +const numIterations = 20; + +const testDB = TimeseriesAggTests.getTestDb(); +const dbName = testDB.getName(); +assert.commandWorked(testDB.dropDatabase()); +const targetCollName = "out_time"; + +let [inColl, observerInColl] = + TimeseriesAggTests.prepareInputCollections(numHosts, numIterations, true); + +function runTest({ + observer: observerPipeline, + timeseries: timeseriesPipeline, + drop: shouldDrop = true, + value: valueToCheck = null +}) { + let expectedTSOptions = null; + if (!shouldDrop) { + // To test if an index is preserved by $out when replacing an existing collection. + assert.commandWorked(testDB[targetCollName].createIndex({usage_guest: 1})); + // To test if $out preserves the original collection options. + let collections = testDB.getCollectionInfos({name: targetCollName}); + assert.eq(collections.length, 1, collections); + expectedTSOptions = collections[0]["options"]["timeseries"]; + } else { + expectedTSOptions = timeseriesPipeline[0]["$out"]["timeseries"]; + } + + // Gets the expected results from a non time-series observer input collection. + const expectedResults = TimeseriesAggTests.getOutputAggregateResults( + observerInColl, observerPipeline, null, shouldDrop); + + // Gets the actual results from a time-series input collection. + const actualResults = + TimeseriesAggTests.getOutputAggregateResults(inColl, timeseriesPipeline, null, shouldDrop); + + // Verifies that the number of measurements is same as expected. + TimeseriesAggTests.verifyResults(actualResults, expectedResults); + if (valueToCheck) { + for (var i = 0; i < expectedResults.length; ++i) { + assert.eq(actualResults[i], {"time": valueToCheck}, actualResults); + } + } + + let collections = testDB.getCollectionInfos({name: targetCollName}); + assert.eq(collections.length, 1, collections); + + // Verifies a time-series collection was not made, if that is expected. + if (!expectedTSOptions) { + assert(!collections[0]["options"]["timeseries"], collections); + return; + } + + // Verifies the time-series options are correct, if a time-series collection is expected. + let actualOptions = collections[0]["options"]["timeseries"]; + for (let option in expectedTSOptions) { + // Must loop through each option, since 'actualOptions' will contain default fields and + // values that do not exist in 'expectedTSOptions'. + assert.eq(expectedTSOptions[option], actualOptions[option], actualOptions); + } + + // Verifies the original index is maintained, if $out is replacing an existing collection. + if (!shouldDrop) { + let indexSpecs = testDB[targetCollName].getIndexes(); + assert.eq(indexSpecs.filter(index => index.name == "usage_guest_1").length, 1); + } +} + +// Tests that $out works with a source time-series collections writing to a non-timeseries +// collection. +runTest({observer: [{$out: "observer_out"}], timeseries: [{$out: targetCollName}]}); + +// Tests that $out creates a time-series collection when the collection does not exist. +let timeseriesPipeline = TimeseriesAggTests.generateOutPipeline( + targetCollName, dbName, {timeField: "time", metaField: "tags"}); +runTest({observer: [{$out: "observer_out"}], timeseries: timeseriesPipeline}); + +// Test that $out can replace an existing time-series collection without the 'timeseries' option. +// Change an option in the existing time-series collections. +assert.commandWorked(testDB.runCommand({collMod: targetCollName, expireAfterSeconds: 360})); +// Run the $out stage. +timeseriesPipeline = [{$out: targetCollName}]; +runTest({observer: [{$out: "observer_out"}], timeseries: timeseriesPipeline, drop: false}); + +// Test that $out can replace an existing time-series collection with the 'timeseries' option. +let newDate = new Date('1999-09-30T03:24:00'); +let observerPipeline = [{$set: {"time": newDate}}, {$out: "observer_out"}]; +timeseriesPipeline = TimeseriesAggTests.generateOutPipeline( + targetCollName, dbName, {timeField: "time", metaField: "tags"}, {$set: {"time": newDate}}); +// Run the $out stage and confirm all the documents have the new value. +runTest({observer: observerPipeline, timeseries: timeseriesPipeline, drop: false, value: newDate}); + +// Test $out to time-series succeeds with a non-existent database. +const destDB = testDB.getSiblingDB("outDifferentDB"); +assert.commandWorked(destDB.dropDatabase()); +timeseriesPipeline = + TimeseriesAggTests.generateOutPipeline(targetCollName, destDB.getName(), {timeField: "time"}); +// TODO SERVER-75856 remove this conditional. +if (FixtureHelpers.isMongos(testDB)) { // this is not supported in mongos. + assert.throwsWithCode(() => inColl.aggregate(timeseriesPipeline), ErrorCodes.NamespaceNotFound); +} else { + inColl.aggregate(timeseriesPipeline); + assert.eq(300, destDB[targetCollName].find().itcount()); +} + +// Tests that an error is raised when trying to create a time-series collection from a non +// time-series collection. +let pipeline = TimeseriesAggTests.generateOutPipeline("observer_out", dbName, {timeField: "time"}); +assert.throwsWithCode(() => inColl.aggregate(pipeline), 7268700); +assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7268700); + +// Tests that an error is raised for invalid timeseries options. +pipeline = TimeseriesAggTests.generateOutPipeline( + targetCollName, dbName, {timeField: "time", invalidField: "invalid"}); +assert.throwsWithCode(() => inColl.aggregate(pipeline), 40415); +assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 40415); + +// Tests that an error is raised if the user changes the 'timeField'. +pipeline = + TimeseriesAggTests.generateOutPipeline(targetCollName, dbName, {timeField: "usage_guest_nice"}); +assert.throwsWithCode(() => inColl.aggregate(pipeline), 7406103); +assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7406103); + +// Tests that an error is raised if the user changes the 'metaField'. +pipeline = TimeseriesAggTests.generateOutPipeline( + targetCollName, dbName, {timeField: "time", metaField: "usage_guest_nice"}); +assert.throwsWithCode(() => inColl.aggregate(pipeline), 7406103); +assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7406103); + +// Tests that an error is raised if the user changes 'bucketManSpanSeconds'. +pipeline = TimeseriesAggTests.generateOutPipeline( + targetCollName, + dbName, + {timeField: "time", bucketMaxSpanSeconds: 330, bucketRoundingSeconds: 330}); +assert.throwsWithCode(() => inColl.aggregate(pipeline), 7406103); +assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7406103); + +// Tests that an error is raised if the user changes 'granularity'. +pipeline = TimeseriesAggTests.generateOutPipeline( + targetCollName, dbName, {timeField: "time", granularity: "minutes"}); +assert.throwsWithCode(() => inColl.aggregate(pipeline), 7406103); +assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7406103); + +// Tests that an error is raised if a conflicting view exists. +if (!FixtureHelpers.isMongos(testDB)) { // can not shard a view. + assert.commandWorked(testDB.createCollection("view_out", {viewOn: "out"})); + pipeline = TimeseriesAggTests.generateOutPipeline("view_out", dbName, {timeField: "time"}); + assert.throwsWithCode(() => inColl.aggregate(pipeline), 7268703); + assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7268703); +} \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_out_of_order.js b/jstests/core/timeseries/timeseries_out_of_order.js index ff84239377373..6cea3c5c87e8d 100644 --- a/jstests/core/timeseries/timeseries_out_of_order.js +++ b/jstests/core/timeseries/timeseries_out_of_order.js @@ -10,10 +10,7 @@ * requires_timeseries, * ] */ -(function() { -'use strict'; - -load('jstests/core/timeseries/libs/timeseries.js'); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const collNamePrefix = 'timeseries_out_of_order_'; @@ -67,5 +64,4 @@ TimeseriesTest.run((insert) => { assert.eq(buckets[1].control.min[timeFieldName], times[2]); assert.eq(buckets[1].control.max[timeFieldName], times[2]); }); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_partial_index_opt.js b/jstests/core/timeseries/timeseries_partial_index_opt.js new file mode 100644 index 0000000000000..40c649bb61f44 --- /dev/null +++ b/jstests/core/timeseries/timeseries_partial_index_opt.js @@ -0,0 +1,55 @@ +/** + * Test partial index optimization on a time-series collection. + * If a query expression is covered by the partial index filter, it is removed from the filter in + * the fetch stage. + * + * @tags: [ + * # Explain of a resolved view must be executed by mongos. + * directly_against_shardsvrs_incompatible, + * # Refusing to run a test that issues a command with explain because it may return + * # incomplete results if interrupted by a stepdown. + * does_not_support_stepdowns, + * # We need a timeseries collection. + * requires_timeseries, + * requires_fcv_70, + * ] + */ +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; + +const coll = db.timeseries_partial_index_opt; + +coll.drop(); +assert.commandWorked( + db.createCollection(coll.getName(), {timeseries: {timeField: "time", metaField: "tag"}})); + +assert.commandWorked(coll.insertMany([ + {_id: 0, time: new Date("2021-07-29T07:46:38.746Z"), tag: 2, a: 5}, + {_id: 1, time: new Date("2021-08-29T00:15:38.001Z"), tag: 1, a: 5, b: 8}, + {_id: 2, time: new Date("2021-11-29T12:20:34.821Z"), tag: 1, a: 7, b: 12}, + {_id: 3, time: new Date("2021-03-09T07:29:34.201Z"), tag: 2, a: 2, b: 7}, + {_id: 4, time: new Date("2021-10-09T07:29:34.201Z"), tag: 4, a: 8, b: 10} +])); + +// Check that the plan uses partial index scan with 'indexName' and the filter of the fetch +// stage does not contain the field in the partial filter expression. +function checkIndexScanAndFilter(coll, predicate, indexName, filterField) { + const explain = coll.find(predicate).explain(); + const scan = getAggPlanStage(explain, "IXSCAN"); + assert.eq(scan.indexName, indexName, scan); + + const fetch = getAggPlanStage(explain, "FETCH"); + if (fetch !== null && fetch.hasOwnProperty("filter")) { + const filter = fetch.filter; + assert(!filter.hasOwnProperty(filterField), + "Unexpected field " + filterField + " in fetch filter: " + tojson(filter)); + } +} + +const timeDate = ISODate("2021-10-01 00:00:00.000Z"); +assert.commandWorked( + coll.createIndex({time: 1}, {name: "time_1_tag", partialFilterExpression: {tag: {$gt: 1}}})); +checkIndexScanAndFilter(coll, {time: {$gte: timeDate}, tag: {$gt: 1}}, "time_1_tag", "tag"); + +assert.commandWorked( + coll.createIndex({tag: 1}, {name: "tag_1_b", partialFilterExpression: {b: {$gte: 10}}})); +checkIndexScanAndFilter(coll, {tag: {$gt: 1}, b: {$gte: 10}}, "tag_1_b", "b"); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_predicates.js b/jstests/core/timeseries/timeseries_predicates.js index d43f20a49e765..f356979bbcbb0 100644 --- a/jstests/core/timeseries/timeseries_predicates.js +++ b/jstests/core/timeseries/timeseries_predicates.js @@ -9,11 +9,6 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); - const coll = db.timeseries_predicates_normal; const tsColl = db.timeseries_predicates_timeseries; coll.drop(); @@ -380,5 +375,4 @@ checkAllBucketings({"mt.a": {$size: 1}}, [ {mt: {a: [{b: 2}]}}, {mt: {a: [{b: 3}]}}, {mt: {a: [{b: 2}, {b: 3}]}}, -]); -})(); +]); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_predicates_with_projections.js b/jstests/core/timeseries/timeseries_predicates_with_projections.js index 65b4ff155cb0d..c162204d1d94a 100644 --- a/jstests/core/timeseries/timeseries_predicates_with_projections.js +++ b/jstests/core/timeseries/timeseries_predicates_with_projections.js @@ -9,11 +9,6 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); - const coll = db.timeseries_predicates_with_projections_normal; const tsColl = db.timeseries_predicates_with_projections_timeseries; coll.drop(); @@ -63,5 +58,4 @@ checkPredicateResult({y: 1}, {x: {$lt: 0}}, [ checkPredicateResult({x: 1}, {"mm.x": {$lt: 0}}, [ {mm: {x: -1}}, {mm: {x: 1}}, -]); -})(); +]); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_project.js b/jstests/core/timeseries/timeseries_project.js index 85093904aba76..809d737f59a5b 100644 --- a/jstests/core/timeseries/timeseries_project.js +++ b/jstests/core/timeseries/timeseries_project.js @@ -7,11 +7,6 @@ * requires_fcv_62, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); - const coll = db.timeseries_project; coll.drop(); assert.commandWorked( @@ -128,5 +123,4 @@ pipeline = [{$project: {a: 1, _id: 0}}, {$project: {newMeta: "$x"}}]; tsDoc = tsColl.aggregate(pipeline).toArray(); regDoc = regColl.aggregate(pipeline).toArray(); assert.docEq(regDoc, tsDoc); -})(); -})(); +})(); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_reopened_bucket_insert.js b/jstests/core/timeseries/timeseries_reopened_bucket_insert.js index 5b4c5d2c8799d..ba515eb1a08eb 100644 --- a/jstests/core/timeseries/timeseries_reopened_bucket_insert.js +++ b/jstests/core/timeseries/timeseries_reopened_bucket_insert.js @@ -11,16 +11,14 @@ * assumes_read_preference_unchanged, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load("jstests/libs/fixture_helpers.js"); // For isSharded. if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db)) { jsTestLog( "Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled."); - return; + quit(); } const testDB = db.getSiblingDB(jsTestName()); @@ -216,6 +214,45 @@ const expectToReopenArchivedBuckets = function() { jsTestLog("Exiting expectToReopenArchivedBuckets."); }(); +// TODO SERVER-77454: Investigate re-enabling this. +const expectToReopenCompressedBuckets = function() { + if (!FeatureFlagUtil.isPresentAndEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) { + return; + } + + jsTestLog("Entering expectToReopenCompressedBuckets..."); + resetCollection(); + + let initialMeasurements = []; + for (let i = 0; i < 5; ++i) { + initialMeasurements.push({ + [timeField]: ISODate("2022-08-26T19:19:00Z"), + [metaField]: "ReopenedBucket1", + }); + } + const forward = { + [timeField]: ISODate("2022-08-27T19:19:00Z"), + [metaField]: "ReopenedBucket1", + }; + const backward = { + [timeField]: ISODate("2022-08-26T19:19:00Z"), + [metaField]: "ReopenedBucket1", + }; + + for (let i = 0; i < initialMeasurements.length; ++i) { + checkIfBucketReopened( + initialMeasurements[i], /* willCreateBucket */ i == 0, /* willReopenBucket */ false); + } + // Time forwards will open a new bucket, and close and compress the old one. + checkIfBucketReopened(forward, /* willCreateBucket */ true, /* willReopenBucket */ false); + assert.eq(1, bucketsColl.find({"control.version": 2}).toArray().length); + + // We expect to reopen the compressed bucket with time backwards. + checkIfBucketReopened(backward, /* willCreateBucket */ false, /* willReopenBucket */ true); + + jsTestLog("Exiting expectToReopenCompressedBuckets."); +}; + const failToReopenNonSuitableBuckets = function() { jsTestLog("Entering failToReopenNonSuitableBuckets..."); resetCollection(); @@ -737,4 +774,3 @@ const reopenBucketsWhenSuitableIndexExistsNoMeta = function() { }(); coll.drop(); -})(); diff --git a/jstests/core/timeseries/timeseries_resume_after.js b/jstests/core/timeseries/timeseries_resume_after.js index e26b728a0e028..be1c5716ecc32 100644 --- a/jstests/core/timeseries/timeseries_resume_after.js +++ b/jstests/core/timeseries/timeseries_resume_after.js @@ -13,10 +13,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const timeFieldName = "time"; @@ -94,5 +91,24 @@ TimeseriesTest.run((insert) => { resumeToken = res.cursor.postBatchResumeToken; jsTestLog("Got resume token " + tojson(resumeToken)); -}); -})(); + + // Test that '$_resumeAfter' fails if the recordId is Long. + assert.commandFailedWithCode(db.runCommand({ + find: bucketsColl.getName(), + filter: {}, + $_requestResumeToken: true, + $_resumeAfter: {'$recordId': NumberLong(10)}, + hint: {$natural: 1} + }), + 7738600); + + // Test that '$_resumeAfter' fails if querying the time-series view. + assert.commandFailedWithCode(db.runCommand({ + find: coll.getName(), + filter: {}, + $_requestResumeToken: true, + $_resumeAfter: {'$recordId': BinData(5, '1234')}, + hint: {$natural: 1} + }), + ErrorCodes.InvalidPipelineOperator); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_show_record_id.js b/jstests/core/timeseries/timeseries_show_record_id.js index 681933439184f..1c569d2d37ed5 100644 --- a/jstests/core/timeseries/timeseries_show_record_id.js +++ b/jstests/core/timeseries/timeseries_show_record_id.js @@ -6,10 +6,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const timeFieldName = "time"; @@ -58,5 +55,4 @@ TimeseriesTest.run((insert) => { const bucketsColl = db.getCollection("system.buckets." + coll.getName()); checkRecordId(bucketsColl.find().showRecordId().toArray()); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_simple.js b/jstests/core/timeseries/timeseries_simple.js index c86b2c2575087..43327d610314f 100644 --- a/jstests/core/timeseries/timeseries_simple.js +++ b/jstests/core/timeseries/timeseries_simple.js @@ -9,10 +9,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const coll = db.timeseries_simple; @@ -116,5 +113,4 @@ TimeseriesTest.run((insert) => { bucketDoc.data[key], 'invalid bucket data for field ' + key + ': ' + tojson(bucketDoc)); }); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_sparse.js b/jstests/core/timeseries/timeseries_sparse.js index 1362fc6eac48c..adf243ccc8d23 100644 --- a/jstests/core/timeseries/timeseries_sparse.js +++ b/jstests/core/timeseries/timeseries_sparse.js @@ -9,10 +9,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const collNamePrefix = 'timeseries_sparse_'; @@ -90,5 +87,4 @@ TimeseriesTest.run((insert) => { {_id: 2, time: t[2], b: 22, c: 20}, {_id: 3, time: t[3], c: 33, d: 30}, ]); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_sparse_index.js b/jstests/core/timeseries/timeseries_sparse_index.js index 16b2fef370422..c827f6c62199d 100644 --- a/jstests/core/timeseries/timeseries_sparse_index.js +++ b/jstests/core/timeseries/timeseries_sparse_index.js @@ -11,16 +11,13 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/feature_flag_util.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; if (!FeatureFlagUtil.isEnabled(db, "TimeseriesMetricIndexes")) { jsTestLog( "Skipped test as the featureFlagTimeseriesMetricIndexes feature flag is not enabled."); - return; + quit(); } TimeseriesTest.run((insert) => { @@ -137,4 +134,3 @@ TimeseriesTest.run((insert) => { {"meta.abc": 1, "control.max.tm": -1, "control.min.tm": -1}, 3); }); -}()); diff --git a/jstests/core/timeseries/timeseries_special_indexes_metadata.js b/jstests/core/timeseries/timeseries_special_indexes_metadata.js index 5b6b150e45927..678f6f40d678f 100644 --- a/jstests/core/timeseries/timeseries_special_indexes_metadata.js +++ b/jstests/core/timeseries/timeseries_special_indexes_metadata.js @@ -14,11 +14,8 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/analyze_plan.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js"; load("jstests/libs/fixture_helpers.js"); TimeseriesTest.run((insert) => { @@ -378,4 +375,3 @@ TimeseriesTest.run((insert) => { assert(planWildcardStage.multiKeyPaths.hasOwnProperty("meta.d.zip"), "Index has wrong multikey paths after insert; plan: " + tojson(planWildcardStage)); }); -})(); diff --git a/jstests/core/timeseries/timeseries_streaming_group.js b/jstests/core/timeseries/timeseries_streaming_group.js index 89dfbe95a7b49..60d52c187848e 100644 --- a/jstests/core/timeseries/timeseries_streaming_group.js +++ b/jstests/core/timeseries/timeseries_streaming_group.js @@ -10,10 +10,7 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; load("jstests/libs/fail_point_util.js"); // For configureFailPoint const ts = db.timeseries_streaming_group; @@ -64,6 +61,33 @@ for (let i = 0; i < numTimes; i++) { assert.commandWorked(ts.insert(documents)); assert.commandWorked(coll.insert(documents)); +// Incorrect use of $_internalStreamingGroup should return error +assert.commandFailedWithCode(db.runCommand({ + aggregate: "timeseires_streaming_group_regular_collection", + pipeline: [{ + $_internalStreamingGroup: { + _id: {symbol: "$symbol", time: "$time"}, + count: {$sum: 1}, + $monotonicIdFields: ["price"] + } + }], + cursor: {}, +}), + 7026705); +assert.commandFailedWithCode(db.runCommand({ + aggregate: "timeseires_streaming_group_regular_collection", + pipeline: [{$_internalStreamingGroup: {_id: null, count: {$sum: 1}}}], + cursor: {}, +}), + 7026702); +assert.commandFailedWithCode(db.runCommand({ + aggregate: "timeseires_streaming_group_regular_collection", + pipeline: + [{$_internalStreamingGroup: {_id: null, count: {$sum: 1}, $monotonicIdFields: ["_id"]}}], + cursor: {}, +}), + 7026708); + const runTest = function(pipeline, expectedMonotonicIdFields) { const explain = assert.commandWorked(ts.explain().aggregate(pipeline)); const streamingGroupStage = getAggPlanStage(explain, "$_internalStreamingGroup"); @@ -128,4 +152,3 @@ runTest( {$sort: {_id: 1}} ], ["_id"]); -})(); diff --git a/jstests/core/timeseries/timeseries_union_with.js b/jstests/core/timeseries/timeseries_union_with.js index f32fdd8289258..ab2437b1ef33d 100644 --- a/jstests/core/timeseries/timeseries_union_with.js +++ b/jstests/core/timeseries/timeseries_union_with.js @@ -7,12 +7,10 @@ * does_not_support_stepdowns, * # We need a timeseries collection. * requires_timeseries, + * references_foreign_collection, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { const testDB = db.getSiblingDB(jsTestName()); @@ -102,5 +100,4 @@ TimeseriesTest.run((insert) => { testFunc(collAOption, collBOption); }); }); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_update.js b/jstests/core/timeseries/timeseries_update.js index 9eca3ee44fd2d..a257eaa969fdc 100644 --- a/jstests/core/timeseries/timeseries_update.js +++ b/jstests/core/timeseries/timeseries_update.js @@ -10,24 +10,10 @@ * requires_timeseries, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; load("jstests/libs/fixture_helpers.js"); -if (FixtureHelpers.isMongos(db) && - !TimeseriesTest.shardedtimeseriesCollectionsEnabled(db.getMongo())) { - jsTestLog("Skipping test because the sharded time-series feature flag is disabled"); - return; -} - -if (FixtureHelpers.isMongos(db) && - !TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(db.getMongo())) { - jsTestLog( - "Skipping test because the sharded time-series updates and deletes feature flag is disabled"); - return; -} +const arbitraryUpdatesEnabled = TimeseriesTest.arbitraryUpdatesEnabled(db); const timeFieldName = "time"; const metaFieldName = "tag"; @@ -95,13 +81,15 @@ TimeseriesTest.run((insert) => { const arrayDoc3 = {_id: 3, [timeFieldName]: dateTime, [metaFieldName]: [3, 6, 10]}; /************************************ multi:false updates ************************************/ - testUpdate({ - initialDocList: [doc1], - updateList: [{q: {[metaFieldName]: {b: "B"}}, u: {$set: {[metaFieldName]: {b: "C"}}}}], - resultDocList: [doc1], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); + if (!arbitraryUpdatesEnabled) { + testUpdate({ + initialDocList: [doc1], + updateList: [{q: {[metaFieldName]: {b: "B"}}, u: {$set: {[metaFieldName]: {b: "C"}}}}], + resultDocList: [doc1], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); + } /************************************ multi:true updates *************************************/ /************************** Tests updating with an update document ***************************/ @@ -206,58 +194,60 @@ TimeseriesTest.run((insert) => { }); // Query on a field that is not the metaField. - testUpdate({ - initialDocList: [doc1], - updateList: [{ - q: {measurement: "cpu"}, - u: {$set: {[metaFieldName]: {c: "C"}}}, - multi: true, - }], - resultDocList: [doc1], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); - - // Query on the metaField and modify a field that is not the metaField. - testUpdate({ - initialDocList: [doc2], - updateList: [{ - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {$set: {f2: "f2"}}, - multi: true, - }], - resultDocList: [doc2], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); - - // Query on the metaField and a field that is not the metaField. - testUpdate({ - initialDocList: [doc1], - updateList: [ - { - q: {[metaFieldName]: {a: "A", b: "B"}, measurement: "cpu"}, + if (!arbitraryUpdatesEnabled) { + testUpdate({ + initialDocList: [doc1], + updateList: [{ + q: {measurement: "cpu"}, u: {$set: {[metaFieldName]: {c: "C"}}}, multi: true, - }, - ], - resultDocList: [doc1], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); + }], + resultDocList: [doc1], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); - // Query on the metaField and modify the metaField and fields that are not the metaField. - testUpdate({ - initialDocList: [doc2], - updateList: [{ - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {$set: {[metaFieldName]: {e: "E"}, f3: "f3"}, $inc: {f2: 3}, $unset: {f1: ""}}, - multi: true, - }], - resultDocList: [doc2], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); + // Query on the metaField and modify a field that is not the metaField. + testUpdate({ + initialDocList: [doc2], + updateList: [{ + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {$set: {f2: "f2"}}, + multi: true, + }], + resultDocList: [doc2], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); + + // Query on the metaField and a field that is not the metaField. + testUpdate({ + initialDocList: [doc1], + updateList: [ + { + q: {[metaFieldName]: {a: "A", b: "B"}, measurement: "cpu"}, + u: {$set: {[metaFieldName]: {c: "C"}}}, + multi: true, + }, + ], + resultDocList: [doc1], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); + + // Query on the metaField and modify the metaField and fields that are not the metaField. + testUpdate({ + initialDocList: [doc2], + updateList: [{ + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {$set: {[metaFieldName]: {e: "E"}, f3: "f3"}, $inc: {f2: 3}, $unset: {f1: ""}}, + multi: true, + }], + resultDocList: [doc2], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); + } // Compound query on the metaField using dot notation and modify the metaField. testUpdate({ @@ -301,17 +291,19 @@ TimeseriesTest.run((insert) => { }); // Query on a field that is not the metaField using dot notation and modify the metaField. - testUpdate({ - initialDocList: [doc1], - updateList: [{ - q: {"measurement.A": "cpu"}, - u: {$set: {[metaFieldName]: {c: "C"}}}, - multi: true, - }], - resultDocList: [doc1], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); + if (!arbitraryUpdatesEnabled) { + testUpdate({ + initialDocList: [doc1], + updateList: [{ + q: {"measurement.A": "cpu"}, + u: {$set: {[metaFieldName]: {c: "C"}}}, + multi: true, + }], + resultDocList: [doc1], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); + } // Query with an empty document (i.e update all documents in the collection). testUpdate({ @@ -346,17 +338,19 @@ TimeseriesTest.run((insert) => { }); // Rename the metaField. - testUpdate({ - initialDocList: [doc1, doc2, doc4], - updateList: [{ - q: {[metaFieldName + ".a"]: "A"}, - u: {$rename: {[metaFieldName]: "Z"}}, - multi: true, - }], - resultDocList: [doc1, doc2, doc4], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); + if (!arbitraryUpdatesEnabled) { + testUpdate({ + initialDocList: [doc1, doc2, doc4], + updateList: [{ + q: {[metaFieldName + ".a"]: "A"}, + u: {$rename: {[metaFieldName]: "Z"}}, + multi: true, + }], + resultDocList: [doc1, doc2, doc4], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); + } // Rename a subfield of the metaField. testUpdate({ @@ -372,17 +366,19 @@ TimeseriesTest.run((insert) => { }); // Rename a subfield of the metaField to something not in the metaField. - testUpdate({ - initialDocList: [doc1, doc2, doc4], - updateList: [{ - q: {[metaFieldName + ".a"]: "A"}, - u: {$rename: {[metaFieldName + ".a"]: "notMetaField.a"}}, - multi: true, - }], - resultDocList: [doc1, doc2, doc4], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); + if (!arbitraryUpdatesEnabled) { + testUpdate({ + initialDocList: [doc1, doc2, doc4], + updateList: [{ + q: {[metaFieldName + ".a"]: "A"}, + u: {$rename: {[metaFieldName + ".a"]: "notMetaField.a"}}, + multi: true, + }], + resultDocList: [doc1, doc2, doc4], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); + } // For all documents that have at least one 2 in its metaField array, update the first 2 // to be 100 using the positional $ operator. @@ -534,251 +530,253 @@ TimeseriesTest.run((insert) => { }); // Multiple updates, ordered: Query on the metaField and on a field that is not the metaField. - testUpdate({ - initialDocList: [doc1], - updateList: [ - { - q: {[metaFieldName]: {a: "A", b: "B"}}, - u: {$set: {[metaFieldName]: {c: "C", d: 1}}}, - multi: true, - }, - { - q: {measurement: "cpu", [metaFieldName + ".d"]: 1}, - u: {$set: {[metaFieldName + ".c"]: "CC"}}, - multi: true, - } - ], - resultDocList: [{_id: 1, [timeFieldName]: dateTime, [metaFieldName]: {c: "C", d: 1}}], - n: 1, - failCode: ErrorCodes.InvalidOptions, - }); + if (!arbitraryUpdatesEnabled) { + testUpdate({ + initialDocList: [doc1], + updateList: [ + { + q: {[metaFieldName]: {a: "A", b: "B"}}, + u: {$set: {[metaFieldName]: {c: "C", d: 1}}}, + multi: true, + }, + { + q: {measurement: "cpu", [metaFieldName + ".d"]: 1}, + u: {$set: {[metaFieldName + ".c"]: "CC"}}, + multi: true, + } + ], + resultDocList: [{_id: 1, [timeFieldName]: dateTime, [metaFieldName]: {c: "C", d: 1}}], + n: 1, + failCode: ErrorCodes.InvalidOptions, + }); - // Multiple updates, ordered: Query on the metaField and modify the metaField and a field that - // is not the metaField using dot notation. - testUpdate({ - initialDocList: [doc2], - updateList: [ - { - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {$inc: {[metaFieldName + ".d"]: 6}}, - multi: true, - }, - { - q: {[metaFieldName]: {c: "C", d: 8}}, - u: {$set: {"f1.0": "f2"}}, - multi: true, - } - ], - resultDocList: [{ - _id: 2, - [timeFieldName]: dateTime, - [metaFieldName]: {c: "C", d: 8}, - f: [{"k": "K", "v": "V"}], - }], - n: 1, - failCode: ErrorCodes.InvalidOptions, - }); + // Multiple updates, ordered: Query on the metaField and modify the metaField and a field + // that is not the metaField using dot notation. + testUpdate({ + initialDocList: [doc2], + updateList: [ + { + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {$inc: {[metaFieldName + ".d"]: 6}}, + multi: true, + }, + { + q: {[metaFieldName]: {c: "C", d: 8}}, + u: {$set: {"f1.0": "f2"}}, + multi: true, + } + ], + resultDocList: [{ + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {c: "C", d: 8}, + f: [{"k": "K", "v": "V"}], + }], + n: 1, + failCode: ErrorCodes.InvalidOptions, + }); - // Multiple updates, ordered: Query on the metaField and modify a field that is not the - // metaField using dot notation. - testUpdate({ - initialDocList: [doc2], - updateList: [ - { - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {$set: {"f1.0": "f2"}}, - multi: true, - }, - { - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {$inc: {[metaFieldName + ".d"]: 6}}, - multi: true, - } - ], - resultDocList: [doc2], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); + // Multiple updates, ordered: Query on the metaField and modify a field that is not the + // metaField using dot notation. + testUpdate({ + initialDocList: [doc2], + updateList: [ + { + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {$set: {"f1.0": "f2"}}, + multi: true, + }, + { + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {$inc: {[metaFieldName + ".d"]: 6}}, + multi: true, + } + ], + resultDocList: [doc2], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); - // Multiple updates, unordered: Query on the metaField and modify a field that is not the - // metaField using dot notation. - testUpdate({ - initialDocList: [doc2], - updateList: [ - { - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {$set: {"f1.0": "f2"}}, - multi: true, - }, - { - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {$inc: {[metaFieldName + ".d"]: 6}}, - multi: true, - } - ], - ordered: false, - resultDocList: [{ - _id: 2, - [timeFieldName]: dateTime, - [metaFieldName]: {c: "C", d: 8}, - f: [{"k": "K", "v": "V"}], - }], - n: 1, - failCode: ErrorCodes.InvalidOptions, - }); + // Multiple updates, unordered: Query on the metaField and modify a field that is not the + // metaField using dot notation. + testUpdate({ + initialDocList: [doc2], + updateList: [ + { + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {$set: {"f1.0": "f2"}}, + multi: true, + }, + { + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {$inc: {[metaFieldName + ".d"]: 6}}, + multi: true, + } + ], + ordered: false, + resultDocList: [{ + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {c: "C", d: 8}, + f: [{"k": "K", "v": "V"}], + }], + n: 1, + failCode: ErrorCodes.InvalidOptions, + }); - // Multiple updates, ordered: Modify the metaField, a field that is not the metaField, and the - // metaField. Only the first update should succeed. - testUpdate({ - initialDocList: [doc2], - updateList: [ - { - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {$inc: {[metaFieldName + ".d"]: 6}}, - multi: true, - }, - { - q: {[metaFieldName]: {c: "C", d: 8}}, - u: {$set: {"f1.0": "f2"}}, - multi: true, - }, - { - q: {[metaFieldName]: {c: "C", d: 8}}, - u: {$inc: {[metaFieldName + ".d"]: 7}}, - multi: true, - } - ], - resultDocList: [{ - _id: 2, - [timeFieldName]: dateTime, - [metaFieldName]: {c: "C", d: 8}, - f: [{"k": "K", "v": "V"}], - }], - n: 1, - failCode: ErrorCodes.InvalidOptions, - }); + // Multiple updates, ordered: Modify the metaField, a field that is not the metaField, and + // the metaField. Only the first update should succeed. + testUpdate({ + initialDocList: [doc2], + updateList: [ + { + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {$inc: {[metaFieldName + ".d"]: 6}}, + multi: true, + }, + { + q: {[metaFieldName]: {c: "C", d: 8}}, + u: {$set: {"f1.0": "f2"}}, + multi: true, + }, + { + q: {[metaFieldName]: {c: "C", d: 8}}, + u: {$inc: {[metaFieldName + ".d"]: 7}}, + multi: true, + } + ], + resultDocList: [{ + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {c: "C", d: 8}, + f: [{"k": "K", "v": "V"}], + }], + n: 1, + failCode: ErrorCodes.InvalidOptions, + }); - // Multiple updates, unordered: Modify the metaField, a field that is not the metaField, and the - // metaField. The first and last updates should succeed. - testUpdate({ - initialDocList: [doc2], - updateList: [ - { - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {$inc: {[metaFieldName + ".d"]: 6}}, - multi: true, - }, - { - q: {[metaFieldName]: {c: "C", d: 8}}, - u: {$set: {"f1.0": "f2"}}, - multi: true, - }, - { - q: {[metaFieldName]: {c: "C", d: 8}}, - u: {$inc: {[metaFieldName + ".d"]: 7}}, - multi: true, - } - ], - resultDocList: [{ - _id: 2, - [timeFieldName]: dateTime, - [metaFieldName]: {c: "C", d: 15}, - f: [{"k": "K", "v": "V"}], - }], - ordered: false, - n: 2, - failCode: ErrorCodes.InvalidOptions, - }); + // Multiple updates, unordered: Modify the metaField, a field that is not the metaField, and + // the metaField. The first and last updates should succeed. + testUpdate({ + initialDocList: [doc2], + updateList: [ + { + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {$inc: {[metaFieldName + ".d"]: 6}}, + multi: true, + }, + { + q: {[metaFieldName]: {c: "C", d: 8}}, + u: {$set: {"f1.0": "f2"}}, + multi: true, + }, + { + q: {[metaFieldName]: {c: "C", d: 8}}, + u: {$inc: {[metaFieldName + ".d"]: 7}}, + multi: true, + } + ], + resultDocList: [{ + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {c: "C", d: 15}, + f: [{"k": "K", "v": "V"}], + }], + ordered: false, + n: 2, + failCode: ErrorCodes.InvalidOptions, + }); - // Multiple updates, unordered: Query on the metaField and modify a field that is not the - // metaField using dot notation. - testUpdate({ - initialDocList: [doc2], - updateList: [ - { - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {$set: {"f1.0": "f2"}}, - multi: true, - }, - { - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {$inc: {[metaFieldName + ".d"]: 6}}, - multi: true, - } - ], - ordered: false, - resultDocList: [{ - _id: 2, - [timeFieldName]: dateTime, - [metaFieldName]: {c: "C", d: 8}, - f: [{"k": "K", "v": "V"}], - }], - n: 1, - failCode: ErrorCodes.InvalidOptions, - }); + // Multiple updates, unordered: Query on the metaField and modify a field that is not the + // metaField using dot notation. + testUpdate({ + initialDocList: [doc2], + updateList: [ + { + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {$set: {"f1.0": "f2"}}, + multi: true, + }, + { + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {$inc: {[metaFieldName + ".d"]: 6}}, + multi: true, + } + ], + ordered: false, + resultDocList: [{ + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {c: "C", d: 8}, + f: [{"k": "K", "v": "V"}], + }], + n: 1, + failCode: ErrorCodes.InvalidOptions, + }); - // Multiple updates, ordered: Modify the metaField, a field that is not the metaField, and the - // metaField. Only the first update should succeed. - testUpdate({ - initialDocList: [doc2], - updateList: [ - { - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {$inc: {[metaFieldName + ".d"]: 6}}, - multi: true, - }, - { - q: {[metaFieldName]: {c: "C", d: 8}}, - u: {$set: {"f1.0": "f2"}}, - multi: true, - }, - { - q: {[metaFieldName]: {c: "C", d: 8}}, - u: {$inc: {[metaFieldName + ".d"]: 7}}, - multi: true, - } - ], - resultDocList: [{ - _id: 2, - [timeFieldName]: dateTime, - [metaFieldName]: {c: "C", d: 8}, - f: [{"k": "K", "v": "V"}], - }], - n: 1, - failCode: ErrorCodes.InvalidOptions, - }); + // Multiple updates, ordered: Modify the metaField, a field that is not the metaField, and + // the metaField. Only the first update should succeed. + testUpdate({ + initialDocList: [doc2], + updateList: [ + { + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {$inc: {[metaFieldName + ".d"]: 6}}, + multi: true, + }, + { + q: {[metaFieldName]: {c: "C", d: 8}}, + u: {$set: {"f1.0": "f2"}}, + multi: true, + }, + { + q: {[metaFieldName]: {c: "C", d: 8}}, + u: {$inc: {[metaFieldName + ".d"]: 7}}, + multi: true, + } + ], + resultDocList: [{ + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {c: "C", d: 8}, + f: [{"k": "K", "v": "V"}], + }], + n: 1, + failCode: ErrorCodes.InvalidOptions, + }); - // Multiple updates, unordered: Modify the metaField, a field that is not the metaField, and the - // metaField. - testUpdate({ - initialDocList: [doc2], - updateList: [ - { - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {$inc: {[metaFieldName + ".d"]: 6}}, - multi: true, - }, - { - q: {[metaFieldName]: {c: "C", d: 8}}, - u: {$set: {"f1.0": "f2"}}, - multi: true, - }, - { - q: {[metaFieldName]: {c: "C", d: 8}}, - u: {$inc: {[metaFieldName + ".d"]: 7}}, - multi: true, - } - ], - resultDocList: [{ - _id: 2, - [timeFieldName]: dateTime, - [metaFieldName]: {c: "C", d: 15}, - f: [{"k": "K", "v": "V"}], - }], - ordered: false, - n: 2, - failCode: ErrorCodes.InvalidOptions, - }); + // Multiple updates, unordered: Modify the metaField, a field that is not the metaField, and + // the metaField. + testUpdate({ + initialDocList: [doc2], + updateList: [ + { + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {$inc: {[metaFieldName + ".d"]: 6}}, + multi: true, + }, + { + q: {[metaFieldName]: {c: "C", d: 8}}, + u: {$set: {"f1.0": "f2"}}, + multi: true, + }, + { + q: {[metaFieldName]: {c: "C", d: 8}}, + u: {$inc: {[metaFieldName + ".d"]: 7}}, + multi: true, + } + ], + resultDocList: [{ + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {c: "C", d: 15}, + f: [{"k": "K", "v": "V"}], + }], + ordered: false, + n: 2, + failCode: ErrorCodes.InvalidOptions, + }); + } // Multiple unordered updates on multiple matching documents. testUpdate({ @@ -845,71 +843,78 @@ TimeseriesTest.run((insert) => { n: 2 }); - // Query for documents using $jsonSchema with the metaField in dot notation required. - testUpdate({ - initialDocList: [doc1, doc2, doc3], - updateList: [{ - q: {"$jsonSchema": {"required": [metaFieldName + ".a"]}}, - u: {$set: {[metaFieldName]: "a"}}, - multi: true - }], - resultDocList: [doc1, doc2, doc3], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); + if (!arbitraryUpdatesEnabled) { + // Query for documents using $jsonSchema with the metaField in dot notation required. + testUpdate({ + initialDocList: [doc1, doc2, doc3], + updateList: [{ + q: {"$jsonSchema": {"required": [metaFieldName + ".a"]}}, + u: {$set: {[metaFieldName]: "a"}}, + multi: true + }], + resultDocList: [doc1, doc2, doc3], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); + } // Query for documents using $jsonSchema with a field that is not the metaField required. - testUpdate({ - initialDocList: [doc1, doc2, doc3], - updateList: [{ - q: {"$jsonSchema": {"required": [metaFieldName, timeFieldName]}}, - u: {$set: {[metaFieldName]: "a"}}, - multi: true - }], - resultDocList: [doc1, doc2, doc3], - n: 0, - failCode: ErrorCodes.InvalidOptions - }); + if (!arbitraryUpdatesEnabled) { + testUpdate({ + initialDocList: [doc1, doc2, doc3], + updateList: [{ + q: {"$jsonSchema": {"required": [metaFieldName, timeFieldName]}}, + u: {$set: {[metaFieldName]: "a"}}, + multi: true + }], + resultDocList: [doc1, doc2, doc3], + n: 0, + failCode: ErrorCodes.InvalidOptions + }); + } const nestedMetaObj = {_id: 6, [timeFieldName]: dateTime, [metaFieldName]: {[metaFieldName]: "A"}}; - - // Query for documents using $jsonSchema with the metaField required and a required subfield of - // the metaField with the same name as the metaField. - testUpdate({ - initialDocList: [doc1, nestedMetaObj], - updateList: [{ - q: { - "$jsonSchema": { - "required": [metaFieldName], - "properties": {[metaFieldName]: {"required": [metaFieldName]}} - } - }, - u: {$set: {[metaFieldName]: "a"}}, - multi: true - }], - resultDocList: [doc1, {_id: 6, [timeFieldName]: dateTime, [metaFieldName]: "a"}], - n: 1 - }); + if (!arbitraryUpdatesEnabled) { + // Query for documents using $jsonSchema with the metaField required and a required + // subfield of the metaField with the same name as the metaField. + testUpdate({ + initialDocList: [doc1, nestedMetaObj], + updateList: [{ + q: { + "$jsonSchema": { + "required": [metaFieldName], + "properties": {[metaFieldName]: {"required": [metaFieldName]}} + } + }, + u: {$set: {[metaFieldName]: "a"}}, + multi: true + }], + resultDocList: [doc1, {_id: 6, [timeFieldName]: dateTime, [metaFieldName]: "a"}], + n: 1 + }); + } // Query for documents using $jsonSchema with the metaField required and an optional field that // is not the metaField. - testUpdate({ - initialDocList: [doc1, nestedMetaObj], - updateList: [{ - q: { - "$jsonSchema": { - "required": [metaFieldName], - "properties": {"measurement": {description: "can be any value"}} - } - }, - u: {$set: {[metaFieldName]: "a"}}, - multi: true - }], - resultDocList: [doc1, nestedMetaObj], - n: 0, - failCode: ErrorCodes.InvalidOptions - }); + if (!arbitraryUpdatesEnabled) { + testUpdate({ + initialDocList: [doc1, nestedMetaObj], + updateList: [{ + q: { + "$jsonSchema": { + "required": [metaFieldName], + "properties": {"measurement": {description: "can be any value"}} + } + }, + u: {$set: {[metaFieldName]: "a"}}, + multi: true + }], + resultDocList: [doc1, nestedMetaObj], + n: 0, + failCode: ErrorCodes.InvalidOptions + }); + } // Multiple updates, unordered: Modify the metaField of all documents using arrayFilters. testUpdate({ @@ -1030,18 +1035,20 @@ TimeseriesTest.run((insert) => { }); // Do the same test case as above but with upsert:true, which should fail. - testUpdate({ - initialDocList: [doc1, doc4, doc5], - updateList: [{ - q: {[metaFieldName]: "Z"}, - u: {$set: {[metaFieldName]: 5}}, - multi: true, - upsert: true, - }], - resultDocList: [doc1, doc4, doc5], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); + if (!arbitraryUpdatesEnabled) { + testUpdate({ + initialDocList: [doc1, doc4, doc5], + updateList: [{ + q: {[metaFieldName]: "Z"}, + u: {$set: {[metaFieldName]: 5}}, + multi: true, + upsert: true, + }], + resultDocList: [doc1, doc4, doc5], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); + } // Variables defined in the let option can only be used in the update if the update is an // pipeline update. Since this update is an update document, the literal name of the variable @@ -1058,94 +1065,98 @@ TimeseriesTest.run((insert) => { n: 1, }); - /************************** Tests updating with an update pipeline **************************/ - // Modify the metaField, which should fail since update pipelines are not supported. - testUpdate({ - initialDocList: [doc1], - updateList: [{ - q: {}, - u: [ - {$addFields: {[metaFieldName + ".c"]: "C", [metaFieldName + ".e"]: "E"}}, - {$unset: metaFieldName + ".e"} - ], - multi: true, - }], - resultDocList: [doc1], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); + if (!arbitraryUpdatesEnabled) { + /************************** Tests updating with an update pipeline ************************/ + // Modify the metaField, which should fail since update pipelines are not supported. + testUpdate({ + initialDocList: [doc1], + updateList: [{ + q: {}, + u: [ + {$addFields: {[metaFieldName + ".c"]: "C", [metaFieldName + ".e"]: "E"}}, + {$unset: metaFieldName + ".e"} + ], + multi: true, + }], + resultDocList: [doc1], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); - /************************ Tests updating with a replacement document *************************/ - // Replace a document to have no metaField, which should fail since updates with replacement - // documents are not supported. - testUpdate({ - initialDocList: [doc2], - updateList: [{ - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {f2: {e: "E", f: "F"}, f3: 7}, - multi: true, - }], - resultDocList: [doc2], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); + /************************ Tests updating with a replacement document **********************/ + // Replace a document to have no metaField, which should fail since updates with replacement + // documents are not supported. + testUpdate({ + initialDocList: [doc2], + updateList: [{ + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {f2: {e: "E", f: "F"}, f3: 7}, + multi: true, + }], + resultDocList: [doc2], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); - // Replace a document with an empty document, which should fail since updates with replacement - // documents are not supported. - testUpdate({ - initialDocList: [doc2], - updateList: [{ - q: {[metaFieldName]: {c: "C", d: 2}}, - u: {}, - multi: true, - }], - resultDocList: [doc2], - n: 0, - failCode: ErrorCodes.InvalidOptions, - }); + // Replace a document with an empty document, which should fail since updates with + // replacement documents are not supported. + testUpdate({ + initialDocList: [doc2], + updateList: [{ + q: {[metaFieldName]: {c: "C", d: 2}}, + u: {}, + multi: true, + }], + resultDocList: [doc2], + n: 0, + failCode: ErrorCodes.InvalidOptions, + }); + } /*********************** Tests updating a collection with no metaField. **********************/ // Query on a field which is not the (nonexistent) metaField. - testUpdate({ - initialDocList: [doc3], - updateList: [{ - q: {f: "F"}, - u: {}, - multi: true, - }], - resultDocList: [doc3], - n: 0, - failCode: ErrorCodes.InvalidOptions, - hasMetaField: false, - }); + if (!arbitraryUpdatesEnabled) { + testUpdate({ + initialDocList: [doc3], + updateList: [{ + q: {f: "F"}, + u: {$set: {f: "FF"}}, + multi: true, + }], + resultDocList: [doc3], + n: 0, + failCode: ErrorCodes.InvalidOptions, + hasMetaField: false, + }); - // Query on all documents and update them to be empty documents. - testUpdate({ - initialDocList: [doc3], - updateList: [{ - q: {}, - u: {}, - multi: true, - }], - resultDocList: [doc3], - n: 0, - failCode: ErrorCodes.InvalidOptions, - hasMetaField: false, - }); + // Query on all documents and update them to be empty documents. + testUpdate({ + initialDocList: [doc3], + updateList: [{ + q: {}, + u: {$set: {f: "FF"}}, + multi: true, + }], + resultDocList: [doc3], + n: 0, + failCode: ErrorCodes.InvalidOptions, + hasMetaField: false, + }); - // Query on all documents and update them to be nonempty documents. - testUpdate({ - initialDocList: [doc3], - updateList: [{ - q: {}, - u: {f: "FF"}, - multi: true, - }], - resultDocList: [doc3], - n: 0, - failCode: ErrorCodes.InvalidOptions, - hasMetaField: false, - }); + // Query on all documents and update them to be nonempty documents. + testUpdate({ + initialDocList: [doc3], + updateList: [{ + q: {}, + u: {$set: {f: "FF"}}, + multi: true, + }], + resultDocList: [doc3], + n: 0, + failCode: ErrorCodes.InvalidOptions, + hasMetaField: false, + }); + } /************************ Tests updating a collection using collation. ************************/ const collationDoc1 = {_id: 1, [timeFieldName]: dateTime, [metaFieldName]: "café"}; @@ -1187,4 +1198,3 @@ TimeseriesTest.run((insert) => { n: 1, }); }); -}()); diff --git a/jstests/core/timeseries/timeseries_update_concurrent.js b/jstests/core/timeseries/timeseries_update_concurrent.js index c6a7b41aadf79..85cb4fa02c874 100644 --- a/jstests/core/timeseries/timeseries_update_concurrent.js +++ b/jstests/core/timeseries/timeseries_update_concurrent.js @@ -21,10 +21,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; load("jstests/libs/curop_helpers.js"); load("jstests/libs/parallel_shell_helpers.js"); @@ -118,10 +115,11 @@ validateUpdateIndex( // Attempt to update a document in a collection that has been replaced with a new time-series // collection with a different metaField. -validateUpdateIndex( - docs, - [{q: {[metaFieldName]: {a: "B"}}, u: {$set: {[metaFieldName]: {c: "C"}}}, multi: true}], - testCases.REPLACE_METAFIELD, - ErrorCodes.InvalidOptions, - "meta"); -})(); +if (!TimeseriesTest.arbitraryUpdatesEnabled(db)) { + validateUpdateIndex( + docs, + [{q: {[metaFieldName]: {a: "B"}}, u: {$set: {[metaFieldName]: {c: "C"}}}, multi: true}], + testCases.REPLACE_METAFIELD, + ErrorCodes.InvalidOptions, + "meta"); +} diff --git a/jstests/core/timeseries/timeseries_update_hint.js b/jstests/core/timeseries/timeseries_update_hint.js index 98ecca8e872e9..65014827a3e12 100644 --- a/jstests/core/timeseries/timeseries_update_hint.js +++ b/jstests/core/timeseries/timeseries_update_hint.js @@ -19,10 +19,6 @@ * uses_parallel_shell, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); load("jstests/libs/curop_helpers.js"); load('jstests/libs/parallel_shell_helpers.js'); @@ -355,5 +351,4 @@ testUpdateHintFailed({ resultDocList: [hintDoc1, hintDoc2, hintDoc3], nModifiedBuckets: 0, failCode: ErrorCodes.BadValue, -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/timeseries/timeseries_update_multi.js b/jstests/core/timeseries/timeseries_update_multi.js index 5f1bc9ae0de1e..1cd26c3746236 100644 --- a/jstests/core/timeseries/timeseries_update_multi.js +++ b/jstests/core/timeseries/timeseries_update_multi.js @@ -6,9 +6,9 @@ * requires_multi_updates, * # We need a timeseries collection. * requires_timeseries, + * requires_non_retryable_writes, * featureFlagTimeseriesUpdatesSupport, - * # TODO (SERVER-73454): Re-enable the tests. - * __TEMPORARILY_DISABLED__, + * featureFlagLargeBatchedOperations, * ] */ @@ -27,14 +27,20 @@ assert.commandWorked(testDB.dropDatabase()); /** * Confirms that a set of updates returns the expected set of documents. + * + * If this is an upsert and we expect a document to be inserted, 'upsertedDoc' must be non-null. We + * will use the 'upsertedId' returned from the update command unioned with 'upsertedDoc' to + * construct the inserted document. This will be added to 'resultDocList' to validate the + * collection's contents. */ function testUpdate({ initialDocList, createCollectionWithMetaField = true, updateList, resultDocList, - n, - nModified = n, + nMatched, + nModified = nMatched, + upsertedDoc, failCode, }) { const coll = testDB.getCollection(collNamePrefix + count++); @@ -47,303 +53,867 @@ function testUpdate({ assert.commandWorked(coll.insert(initialDocList)); const updateCommand = {update: coll.getName(), updates: updateList}; - const res = failCode ? assert.commandFailedWithCode(testDB.runCommand(updateCommand), failCode) - : assert.commandWorked(testDB.runCommand(updateCommand)); + const res = failCode ? assert.commandFailedWithCode(coll.runCommand(updateCommand), failCode) + : assert.commandWorked(coll.runCommand(updateCommand)); + + if (!failCode) { + if (upsertedDoc) { + assert.eq(1, res.n, tojson(res)); + assert.eq(0, res.nModified, tojson(res)); + assert(res.hasOwnProperty("upserted"), tojson(res)); + assert.eq(1, res.upserted.length); + + if (upsertedDoc.hasOwnProperty("_id")) { + assert.eq(upsertedDoc._id, res.upserted[0]._id); + } else { + upsertedDoc["_id"] = res.upserted[0]._id; + } + resultDocList.push(upsertedDoc); + } else { + assert.eq(nMatched, res.n); + assert.eq(nModified, res.nModified); + assert(!res.hasOwnProperty("upserted"), tojson(res)); + } + } - assert.eq(n, res.n); - assert.eq(nModified, res.nModified); const resDocs = coll.find().toArray(); assert.eq(resDocs.length, resultDocList.length); - resultDocList.forEach(resultDoc => { - assert.docEq(resultDoc, - coll.findOne({_id: resultDoc._id}), - "Expected document " + resultDoc["_id"] + - " not found in result collection:" + tojson(resDocs)); - }); + assert.sameMembers( + resultDocList, resDocs, "Collection contents did not match expected after update"); } -const doc_a_b_no_metrics = { +const doc_id_1_a_b_no_metrics = { _id: 1, [timeFieldName]: dateTime, [metaFieldName]: {a: "A", b: "B"}, }; -const doc_a_b_array_metric = { +const doc_id_2_a_b_array_metric = { _id: 2, [timeFieldName]: dateTime, [metaFieldName]: {a: "A", b: "B"}, f: [{"k": "K", "v": "V"}], }; -const doc_a_b_string_metric = { +const doc_id_3_a_b_string_metric = { _id: 3, [timeFieldName]: dateTime, [metaFieldName]: {a: "A", b: "B"}, f: "F", }; -const doc_no_meta_string_metric = { +const doc_id_4_no_meta_string_metric = { _id: 4, [timeFieldName]: dateTime, f: "F", }; -const doc_a_c_array_metric_1 = { +const doc_id_5_a_c_array_metric = { _id: 5, [timeFieldName]: dateTime, [metaFieldName]: {a: "A", c: "C"}, f: [2, 3], }; -const doc_a_c_array_metric_2 = { +const doc_id_6_a_c_array_metric = { _id: 6, [timeFieldName]: dateTime, [metaFieldName]: {a: "A", c: "C"}, f: [1, 10], }; -const doc_no_meta_int_metric = { +const doc_id_7_no_meta_int_metric = { _id: 7, [timeFieldName]: dateTime, g: 1, }; +const doc_id_8_array_meta = { + _id: 8, + [timeFieldName]: dateTime, + [metaFieldName]: [1, 2, 3, 4], + f: [4, 3, 2, 1], +}; /** * Tests op-style updates */ // Query on the _id field and modify the metaField. -testUpdate({ - initialDocList: [doc_a_b_no_metrics, doc_a_b_array_metric], - updateList: [{ - q: {_id: {$lt: 10}}, - u: {$set: {[metaFieldName]: {c: "C"}}}, - multi: true, - }], - resultDocList: [ - {_id: 1, [timeFieldName]: dateTime, [metaFieldName]: {c: "C"}}, - { - _id: 2, - [timeFieldName]: dateTime, - [metaFieldName]: {c: "C"}, - f: [{"k": "K", "v": "V"}], - }, - ], - n: 2, -}); +(function testMetricFieldQueryMetaFieldUpdate() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric], + updateList: [{ + q: {_id: {$lt: 10}}, + u: {$set: {[metaFieldName]: {c: "C"}}}, + multi: true, + }], + resultDocList: [ + {_id: 1, [timeFieldName]: dateTime, [metaFieldName]: {c: "C"}}, + { + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {c: "C"}, + f: [{"k": "K", "v": "V"}], + }, + ], + nMatched: 2, + }); +})(); + +// Query doesn't match any docs. +(function testZeroMeasurementUpdate() { + testUpdate({ + initialDocList: + [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric, doc_id_5_a_c_array_metric], + updateList: [{ + q: {someField: "doesNotExist"}, + u: {$set: {[metaFieldName]: {c: "C"}}}, + multi: true, + }], + resultDocList: + [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric, doc_id_5_a_c_array_metric], + nMatched: 0, + }); +})(); + +// No-op update. +(function testNoopUpdate() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric], + updateList: [{ + q: {}, + u: {$set: {[metaFieldName]: {a: "A", b: "B"}}}, + multi: true, + }], + resultDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric], + nMatched: 2, + nModified: 0 + }); +})(); // Query on the metaField and modify the timeField. -testUpdate({ - initialDocList: [doc_a_b_no_metrics, doc_a_b_array_metric], - updateList: [{ - q: {[metaFieldName]: {a: "A", b: "B"}}, - u: {$set: {[timeFieldName]: dateTimeUpdated}}, - multi: true, - }], - resultDocList: [ - { - _id: 1, - [timeFieldName]: dateTimeUpdated, - [metaFieldName]: {a: "A", b: "B"}, - }, - { - _id: 2, - [timeFieldName]: dateTimeUpdated, - [metaFieldName]: {a: "A", b: "B"}, - f: [{"k": "K", "v": "V"}], - }, - ], - n: 2, -}); +// Skip tests changing the shard key value in sharding. +if (!db.getMongo().isMongos()) { + (function testMetaFieldQueryTimeFieldUpdate() { + testUpdate({ + initialDocList: + [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric, doc_id_5_a_c_array_metric], + updateList: [{ + q: {[metaFieldName]: {a: "A", b: "B"}}, + u: {$set: {[timeFieldName]: dateTimeUpdated}}, + multi: true, + }], + resultDocList: [ + { + _id: 1, + [timeFieldName]: dateTimeUpdated, + [metaFieldName]: {a: "A", b: "B"}, + }, + { + _id: 2, + [timeFieldName]: dateTimeUpdated, + [metaFieldName]: {a: "A", b: "B"}, + f: [{"k": "K", "v": "V"}], + }, + doc_id_5_a_c_array_metric + ], + nMatched: 2, + }); + })(); +} + +// Query on the metaField and a metric field. +(function testMetaFieldQueryMetricFieldMetric() { + testUpdate({ + initialDocList: [doc_id_3_a_b_string_metric, doc_id_2_a_b_array_metric], + updateList: [{ + q: {[metaFieldName]: {a: "A", b: "B"}, f: "F"}, + u: {$set: {[metaFieldName]: {c: "C"}}}, + multi: true, + }], + resultDocList: [ + { + _id: 3, + [timeFieldName]: dateTime, + [metaFieldName]: {c: "C"}, + f: "F", + }, + doc_id_2_a_b_array_metric + ], + nMatched: 1, + }); +})(); + +// Query on the metaField and modify the metaField and a metric field. +(function testMetaFieldQueryMetaAndMetricFieldUpdate() { + testUpdate({ + initialDocList: [doc_id_3_a_b_string_metric, doc_id_2_a_b_array_metric], + updateList: [{ + q: {[metaFieldName]: {a: "A", b: "B"}}, + u: {$set: {[metaFieldName]: {c: "C"}, f: "FF"}}, + multi: true, + }], + resultDocList: [ + { + _id: 3, + [timeFieldName]: dateTime, + [metaFieldName]: {c: "C"}, + f: "FF", + }, + { + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {c: "C"}, + f: "FF", + } + ], + nMatched: 2, + }); +})(); -// Query on the metric field and remove the timeField. // This command will fail because all time-series collections require a time field. -testUpdate({ - initialDocList: [doc_a_b_string_metric, doc_a_c_array_metric_1], - updateList: [{ - q: {f: "F"}, - u: {$unset: {[timeFieldName]: ""}}, - multi: true, - }], - resultDocList: [ - doc_a_b_string_metric, - doc_a_c_array_metric_1, - ], - n: 0, - failCode: ErrorCodes.InvalidOptions, -}); +// Skip tests changing the shard key value in sharding. +if (!db.getMongo().isMongos()) { + (function testRemoveTimeField() { + testUpdate({ + initialDocList: [doc_id_3_a_b_string_metric, doc_id_5_a_c_array_metric], + updateList: [{ + q: {f: "F"}, + u: {$unset: {[timeFieldName]: ""}}, + multi: true, + }], + resultDocList: [ + doc_id_3_a_b_string_metric, + doc_id_5_a_c_array_metric, + ], + nMatched: 0, + failCode: ErrorCodes.BadValue, + }); + })(); +} + +// This command will fail because the time field must be a timestamp. +// Skip tests changing the shard key value in sharding. +if (!db.getMongo().isMongos()) { + (function testChangeTimeFieldType() { + testUpdate({ + initialDocList: [doc_id_3_a_b_string_metric, doc_id_5_a_c_array_metric], + updateList: [{ + q: {f: "F"}, + u: {$set: {[timeFieldName]: "hello"}}, + multi: true, + }], + resultDocList: [ + doc_id_3_a_b_string_metric, + doc_id_5_a_c_array_metric, + ], + nMatched: 0, + failCode: ErrorCodes.BadValue, + }); + })(); +} // Query on the time field and remove the metaField. -testUpdate({ - initialDocList: [doc_a_b_no_metrics, doc_a_b_array_metric, doc_a_c_array_metric_1], - updateList: [{ - q: {[timeField]: dateTime}, - u: {$unset: {[metaFieldName]: ""}, multi: true}, - multi: true, - }], - resultDocList: [ - { - _id: 1, - [timeFieldName]: dateTime, - }, - { - _id: 2, - [timeFieldName]: dateTime, - f: [{"k": "K", "v": "V"}], - }, - { - _id: 5, - [timeFieldName]: dateTime, - f: [2, 3], - }, - ], - n: 3, -}); +(function testTimeFieldQueryRemoveMetaField() { + testUpdate({ + initialDocList: + [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric, doc_id_5_a_c_array_metric], + updateList: [{ + q: {[timeFieldName]: dateTime}, + u: {$unset: {[metaFieldName]: ""}}, + multi: true, + }], + resultDocList: [ + { + _id: 1, + [timeFieldName]: dateTime, + }, + { + _id: 2, + [timeFieldName]: dateTime, + f: [{"k": "K", "v": "V"}], + }, + { + _id: 5, + [timeFieldName]: dateTime, + f: [2, 3], + }, + ], + nMatched: 3, + }); +})(); + +(function testRenameMetaField() { + // Rename the metaField. + testUpdate({ + initialDocList: [doc_id_3_a_b_string_metric], + updateList: [{ + q: {}, + u: {$rename: {[metaFieldName]: "Z"}}, + multi: true, + }], + resultDocList: [ + { + _id: 3, + [timeFieldName]: dateTime, + Z: {a: "A", b: "B"}, + f: "F", + }, + ], + nMatched: 1, + }); +})(); + +// Rename a subfield of the metaField to something not in the metaField. +(function testRenameMetaSubfield() { + testUpdate({ + initialDocList: [doc_id_3_a_b_string_metric], + updateList: [{ + q: {[metaFieldName + ".a"]: "A"}, + u: {$rename: {[metaFieldName + ".a"]: "Z.a"}}, + multi: true, + }], + resultDocList: [ + { + _id: 3, + [timeFieldName]: dateTime, + [metaFieldName]: {b: "B"}, + Z: {a: "A"}, + f: "F", + }, + ], + nMatched: 1, + }); +})(); // Expand a metric field. -testUpdate({ - initialDocList: [doc_a_b_no_metrics, doc_a_b_array_metric], - updateList: [{ - q: {[metaFieldName]: {a: "A", b: "B"}}, - u: {$set: {f: "x".repeat(5 * 1024 * 1024)}}, - multi: true, - }], - resultDocList: [ - { - _id: 1, - [timeFieldName]: dateTime, - [metaFieldName]: {a: "A", b: "B"}, - f: "x".repeat(5 * 1024 * 1024), - }, - { - _id: 2, - [timeFieldName]: dateTime, - [metaFieldName]: {a: "A", b: "B"}, - f: "x".repeat(5 * 1024 * 1024), - }, - ], - n: 2, -}); +(function testExpandMetricField() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric], + updateList: [{ + q: {[metaFieldName]: {a: "A", b: "B"}}, + u: {$set: {f: "x".repeat(3 * 1024 * 1024)}}, + multi: true, + }], + resultDocList: [ + { + _id: 1, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", b: "B"}, + f: "x".repeat(3 * 1024 * 1024), + }, + { + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", b: "B"}, + f: "x".repeat(3 * 1024 * 1024), + }, + ], + nMatched: 2, + }); +})(); // Change the type of an existing field. -testUpdate({ - initialDocList: [doc_a_b_array_metric, doc_a_b_string_metric], - updateList: [{ - q: {[metaFieldName]: {a: "A", b: "B"}}, - u: {$set: {f: "X"}}, - multi: true, - }], - resultDocList: [ - { - _id: 2, - [timeFieldName]: dateTime, - [metaFieldName]: {a: "A", b: "B"}, - f: "X", - }, - { - _id: 3, - [timeFieldName]: dateTime, - [metaFieldName]: {a: "A", b: "B"}, - f: "X", - }, - ], - n: 2, -}); +(function testChangeExistingFieldType() { + testUpdate({ + initialDocList: [doc_id_2_a_b_array_metric, doc_id_3_a_b_string_metric], + updateList: [{ + q: {[metaFieldName]: {a: "A", b: "B"}}, + u: {$set: {f: "X"}}, + multi: true, + }], + resultDocList: [ + { + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", b: "B"}, + f: "X", + }, + { + _id: 3, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", b: "B"}, + f: "X", + }, + ], + nMatched: 2, + }); +})(); // Add a new field. -testUpdate({ - initialDocList: [doc_a_b_no_metrics, doc_a_b_array_metric, doc_a_b_string_metric], - updateList: [{ - q: {_id: {$lt: 3}}, - u: {$set: {g: 42}}, - multi: true, - }], - resultDocList: [ - { - _id: 1, - [timeFieldName]: dateTime, - [metaFieldName]: {a: "A", b: "B"}, - g: 42, - }, - { - _id: 2, +(function testAddNewField() { + testUpdate({ + initialDocList: + [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric, doc_id_3_a_b_string_metric], + updateList: [{ + q: {_id: {$lt: 3}}, + u: {$set: {g: 42}}, + multi: true, + }], + resultDocList: [ + { + _id: 1, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", b: "B"}, + g: 42, + }, + { + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", b: "B"}, + f: [{"k": "K", "v": "V"}], + g: 42, + }, + doc_id_3_a_b_string_metric + ], + nMatched: 2, + }); +})(); + +// Update a metric field with a positional operator. +(function testArrayModifier() { + testUpdate({ + initialDocList: + [doc_id_2_a_b_array_metric, doc_id_5_a_c_array_metric, doc_id_6_a_c_array_metric], + updateList: [{ + q: {f: {$gt: 2}}, + u: {$set: {'f.$': 20}}, + multi: true, + }], + resultDocList: [ + doc_id_2_a_b_array_metric, + { + _id: 5, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", c: "C"}, + f: [2, 20], + }, + { + _id: 6, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", c: "C"}, + f: [1, 20], + } + ], + nMatched: 2, + }); +})(); + +// Update the meta field with a positional operator. +(function testMetaFieldArrayModifier() { + testUpdate({ + initialDocList: [doc_id_8_array_meta, doc_id_2_a_b_array_metric], + updateList: [{ + q: {[metaFieldName]: {$gt: 2}}, + u: {$set: {[metaFieldName + '.$']: 20, f: 10}}, + multi: true, + }], + resultDocList: [ + {_id: 8, [timeFieldName]: dateTime, [metaFieldName]: [1, 2, 20, 4], f: 10}, + doc_id_2_a_b_array_metric + ], + nMatched: 1, + }); +})(); + +// Update meta and metric fields with a positional operator. +(function testMetaAndMetricFieldArrayModifier() { + testUpdate({ + initialDocList: [doc_id_8_array_meta, doc_id_2_a_b_array_metric], + updateList: [{ + q: {[metaFieldName]: {$gt: 2}, f: {$gt: 2}}, + u: {$set: {[metaFieldName + '.$']: 20, 'f.$': 10}}, + multi: true, + }], + resultDocList: [ + {_id: 8, [timeFieldName]: dateTime, [metaFieldName]: [20, 2, 3, 4], f: [10, 3, 2, 1]}, + doc_id_2_a_b_array_metric + ], + nMatched: 1, + }); +})(); + +// Empty query and update a metric field using a positional operator. +(function testArrayModifierNoFilter() { + testUpdate({ + initialDocList: [doc_id_3_a_b_string_metric, doc_id_5_a_c_array_metric], + updateList: [{ + q: {}, + u: {$set: {'f.$': 20}}, + multi: true, + }], + resultDocList: [doc_id_3_a_b_string_metric, doc_id_5_a_c_array_metric], + failCode: ErrorCodes.BadValue, + }); +})(); + +// Query on the meta field and update a metric field using a positional operator. +(function testArrayModifierMetaFilter() { + testUpdate({ + initialDocList: [doc_id_3_a_b_string_metric, doc_id_5_a_c_array_metric], + updateList: [{ + q: {[metaFieldName]: {a: "A", c: "C"}}, + u: {$set: {'f.$': 20}}, + multi: true, + }], + resultDocList: [doc_id_3_a_b_string_metric, doc_id_5_a_c_array_metric], + failCode: ErrorCodes.BadValue, + }); +})(); + +(function testChangeArrayElementType() { + testUpdate({ + initialDocList: + [doc_id_2_a_b_array_metric, doc_id_5_a_c_array_metric, doc_id_6_a_c_array_metric], + updateList: [{ + q: {f: {$lte: 2}}, + u: {$set: {'f.$': {k: "v"}}}, + multi: true, + }], + resultDocList: [ + doc_id_2_a_b_array_metric, + { + _id: 5, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", c: "C"}, + f: [{k: "v"}, 3], + }, + { + _id: 6, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", c: "C"}, + f: [{k: "v"}, 10], + } + ], + nMatched: 2, + }); +})(); + +(function testChangeMeasurementId() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateList: [{ + q: {}, + u: {$set: {_id: 10}}, + multi: true, + }], + resultDocList: [{ + _id: 10, [timeFieldName]: dateTime, [metaFieldName]: {a: "A", b: "B"}, - f: [{"k": "K", "v": "V"}], - g: 42, - }, - doc_a_b_string_metric - ], - n: 2, -}); + }], + nMatched: 1 + }); +})(); + +// Use a non-idempotent update to insert the updated measurement later in the index to verify +// handling of the Halloween Problem. +(function testHalloweenProblem() { + testUpdate({ + initialDocList: [doc_id_2_a_b_array_metric, doc_id_3_a_b_string_metric], + updateList: [{ + q: {}, + u: {$set: {[metaFieldName + '.a']: "B"}, $inc: {x: 1}}, + multi: true, + }], + resultDocList: [ + { + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "B", b: "B"}, + f: [{"k": "K", "v": "V"}], + x: 1, + }, + { + _id: 3, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "B", b: "B"}, + f: "F", + x: 1, + }, + ], + nMatched: 2, + }); +})(); /** * Tests pipeline-style updates */ // Add a field of the sum of an array field using aggregation pipeline. -testUpdate({ - initialDocList: [doc_a_c_array_metric_1, doc_a_c_array_metric_2], - updateList: [{ - q: {[metaFieldName]: {a: "A", c: "C"}}, - u: [{$set: {sumF: {$sum: "$f"}}}], - multi: true, - }], - resultDocList: [ - { - _id: 5, - [timeFieldName]: dateTime, - [metaFieldName]: {a: "A", c: "C"}, - f: [2, 3], - sumF: 5, - }, - { - _id: 6, - [timeFieldName]: dateTime, - [metaFieldName]: {a: "A", c: "C"}, - f: [5, 6], - sumF: 11, - }, - ], - n: 2, -}); +(function testUpdatePipelineArrayAggregation() { + testUpdate({ + initialDocList: [doc_id_5_a_c_array_metric, doc_id_6_a_c_array_metric], + updateList: [{ + q: {[metaFieldName]: {a: "A", c: "C"}}, + u: [{$set: {sumF: {$sum: "$f"}}}], + multi: true, + }], + resultDocList: [ + { + _id: 5, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", c: "C"}, + f: [2, 3], + sumF: 5, + }, + { + _id: 6, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", c: "C"}, + f: [1, 10], + sumF: 11, + }, + ], + nMatched: 2, + }); +})(); // Add a new field for all measurements. -testUpdate({ - initialDocList: [doc_no_meta_string_metric, doc_no_meta_int_metric], - createCollectionWithMetaField: false, - updateList: [{ - q: {}, - u: [{$set: {newField: true}}], - multi: true, - }], - resultDocList: [ - { - _id: 4, - [timeFieldName]: dateTime, - f: "F", - newField: true, - }, - { - _id: 7, - [timeFieldName]: dateTime, - g: 1, - newField: true, - }, - ], - n: 2, -}); +(function testUpdatePipelineAddNewField() { + testUpdate({ + initialDocList: [doc_id_4_no_meta_string_metric, doc_id_7_no_meta_int_metric], + createCollectionWithMetaField: false, + updateList: [{ + q: {}, + u: [{$set: {newField: true}}], + multi: true, + }], + resultDocList: [ + { + _id: 4, + [timeFieldName]: dateTime, + f: "F", + newField: true, + }, + { + _id: 7, + [timeFieldName]: dateTime, + g: 1, + newField: true, + }, + ], + nMatched: 2, + }); +})(); + +// Cause a bucket to be split into multiple new buckets by an update, i.e. update documents in the +// same bucket to belong in different buckets. +(function testSplitBucketWithUpdate() { + testUpdate({ + initialDocList: + [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric, doc_id_3_a_b_string_metric], + updateList: [{ + q: {}, + u: [{$set: {[metaFieldName]: "$f"}}], + multi: true, + }], + resultDocList: [ + { + _id: 1, + [timeFieldName]: dateTime, + }, + { + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: [{"k": "K", "v": "V"}], + f: [{"k": "K", "v": "V"}], + }, + { + _id: 3, + [timeFieldName]: dateTime, + [metaFieldName]: "F", + f: "F", + } + ], + nMatched: 3, + }); +})(); + +// Only touch the meta field in a pipeline update. +(function testUpdatePipelineOnlyTouchMetaField() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics, doc_id_6_a_c_array_metric], + updateList: [{ + q: {[metaFieldName]: {a: "A", b: "B"}}, + u: [{$set: {[metaFieldName]: "$" + metaFieldName + ".a"}}], + multi: true, + }], + resultDocList: + [{_id: 1, [timeFieldName]: dateTime, [metaFieldName]: "A"}, doc_id_6_a_c_array_metric], + nMatched: 1, + }); +})(); /** * Tests upsert with multi:true. */ -testUpdate({ - initialDocList: [doc_a_b_no_metrics, doc_a_b_array_metric], - updateList: [{ - q: {[metaFieldName]: {z: "Z"}}, - u: {$set: {[timeFieldName]: dateTime}}, - upsert: true, - multi: true, - }], - resultDocList: [ - doc_a_b_no_metrics, - doc_a_b_array_metric, - {[timeFieldName]: dateTime}, - ], - n: 1, - nModified: 0, -}); +// Run an upsert that doesn't include an _id. +// Skip upsert tests in sharding as the query has to be on the shard key field. +if (!db.getMongo().isMongos()) { + (function testUpsertWithNoId() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric], + updateList: [{ + q: {[metaFieldName]: {z: "Z"}}, + u: {$set: {[timeFieldName]: dateTime}}, + upsert: true, + multi: true, + }], + resultDocList: [ + doc_id_1_a_b_no_metrics, + doc_id_2_a_b_array_metric, + ], + upsertedDoc: {[metaFieldName]: {z: "Z"}, [timeFieldName]: dateTime}, + }); + })(); + // Run an upsert that includes an _id. + (function testUpsertWithId() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateList: [{ + q: {_id: 100}, + u: {$set: {[timeFieldName]: dateTime}}, + upsert: true, + multi: true, + }], + resultDocList: [ + doc_id_1_a_b_no_metrics, + ], + upsertedDoc: {_id: 100, [timeFieldName]: dateTime}, + }); + })(); + + // Run an upsert that updates documents and skips the upsert. + (function testUpsertUpdatesDocs() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric], + updateList: [{ + q: {[metaFieldName + ".a"]: "A"}, + u: {$set: {f: 10}}, + upsert: true, + multi: true, + }], + resultDocList: [ + { + _id: 1, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", b: "B"}, + f: 10, + }, + { + _id: 2, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", b: "B"}, + f: 10, + } + ], + nMatched: 2, + }); + })(); + + // Run an upsert that matches documents with no-op updates and skips the upsert. + (function testUpsertMatchesDocs() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric], + updateList: [{ + q: {[metaFieldName + ".a"]: "A"}, + u: {$set: {[timeFieldName]: dateTime}}, + upsert: true, + multi: true, + }], + resultDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric], + nMatched: 2, + nModified: 0, + }); + })(); + + // Run an upsert that matches a bucket but no documents in it, and inserts the document into a + // bucket with the same parameters. + (function testUpsertIntoMatchedBucket() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric], + updateList: [{ + q: {[metaFieldName]: {a: "A", b: "B"}, f: 111}, + u: {$set: {[timeFieldName]: dateTime}}, + upsert: true, + multi: true, + }], + upsertedDoc: {[metaFieldName]: {a: "A", b: "B"}, [timeFieldName]: dateTime, f: 111}, + resultDocList: [doc_id_1_a_b_no_metrics, doc_id_2_a_b_array_metric], + }); + })(); + + // Run an upsert that doesn't insert a time field. + (function testUpsertNoTimeField() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateList: [{ + q: {[metaFieldName]: {z: "Z"}}, + u: {$set: {f: 10}}, + upsert: true, + multi: true, + }], + resultDocList: [ + doc_id_1_a_b_no_metrics, + ], + failCode: ErrorCodes.BadValue, + }); + })(); + + // Run an upsert where the time field is provided in the query. + (function testUpsertQueryOnTimeField() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateList: [{ + q: {[timeFieldName]: dateTimeUpdated}, + u: {$set: {f: 10}}, + upsert: true, + multi: true, + }], + upsertedDoc: { + [timeFieldName]: dateTimeUpdated, + f: 10, + }, + resultDocList: [ + doc_id_1_a_b_no_metrics, + ], + }); + })(); + + // Run an upsert where a document to insert is supplied by the request. + (function testUpsertSupplyDoc() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateList: [{ + q: {[timeFieldName]: dateTimeUpdated}, + u: [{$set: {f: 10}}], + upsert: true, + multi: true, + upsertSupplied: true, + c: {new: {[timeFieldName]: dateTime, f: 100}} + }], + upsertedDoc: { + [timeFieldName]: dateTime, + f: 100, + }, + resultDocList: [ + doc_id_1_a_b_no_metrics, + ], + }); + })(); + + // Run an upsert where a document to insert is supplied by the request and does not have a time + // field. + (function testUpsertSupplyDocNoTimeField() { + testUpdate({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateList: [{ + q: {[timeFieldName]: dateTimeUpdated}, + u: [{$set: {f: 10}}], + upsert: true, + multi: true, + upsertSupplied: true, + c: {new: {[metaFieldName]: {a: "A"}, f: 100}} + }], + resultDocList: [ + doc_id_1_a_b_no_metrics, + ], + failCode: ErrorCodes.BadValue, + }); + })(); +} })(); diff --git a/jstests/core/timeseries/timeseries_update_one.js b/jstests/core/timeseries/timeseries_update_one.js index b53f645c5a038..fa524a3910e7b 100644 --- a/jstests/core/timeseries/timeseries_update_one.js +++ b/jstests/core/timeseries/timeseries_update_one.js @@ -5,51 +5,19 @@ * # We need a timeseries collection. * requires_timeseries, * featureFlagTimeseriesUpdatesSupport, - * # TODO (SERVER-73726): Re-enable the time-series updateOne test. - * __TEMPORARILY_DISABLED__, + * # TODO SERVER-76454 Remove the following two tags. + * does_not_support_retryable_writes, + * requires_non_retryable_writes, * ] */ -(function() { -"use strict"; - -const timeFieldName = "time"; -const metaFieldName = "mm"; -const collNamePrefix = "timeseries_update_one_"; -let count = 0; -const testDB = db.getSiblingDB(jsTestName()); -assert.commandWorked(testDB.dropDatabase()); - -/** - * Ensure the updateOne command operates correctly by examining documents after the update. - */ -function testUpdateOne({initialDocList, updateQuery, updateObj, resultDocList, n, upsert = false}) { - const coll = testDB.getCollection(collNamePrefix + count++); - if (initialDocList) { - assert.commandWorked(testDB.createCollection( - coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); - assert.commandWorked(coll.insert(initialDocList)); - } - - const updateCommand = { - update: coll.getName(), - updates: [{q: updateQuery, u: updateObj, multi: false, upsert: upsert}] - }; - const res = assert.commandWorked(testDB.runCommand(updateCommand)); - assert.eq(n, res.n); - assert.eq((upsert) ? n - 1 : n, res.nModified); - - if (resultDocList) { - const resDocs = coll.find().toArray(); - assert.eq(resDocs.length, resultDocList.length); - resultDocList.forEach(resultDoc => { - assert.docEq(resultDoc, - coll.findOne({_id: resultDoc._id}), - "Expected document " + resultDoc["_id"] + - " not found in result collection:" + tojson(resDocs)); - }); - } -} +import { + getTestDB, + metaFieldName, + prepareCollection, + testUpdateOne, + timeFieldName +} from "jstests/core/timeseries/libs/timeseries_writes_util.js"; /** * Tests op-style updates. @@ -82,85 +50,138 @@ function testUpdateOne({initialDocList, updateQuery, updateObj, resultDocList, n const query_m1_b1 = {b: {$eq: 1}, [metaFieldName]: {$eq: 1}}; // Metric field update: unset field. - testUpdateOne({ - initialDocList: [doc_m1_a_b, doc_m1_c_d], - updateQuery: query_m1_a1, - updateObj: {$unset: {a: ""}}, - resultDocList: [doc_m1_b, doc_m1_c_d], - n: 1 - }); + (function testUnsetMetricField() { + testUpdateOne({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + updateQuery: query_m1_a1, + updateObj: {$unset: {a: ""}}, + resultDocList: [doc_m1_b, doc_m1_c_d], + nMatched: 1 + }); + })(); // Metric field update: add new field. - testUpdateOne({ - initialDocList: [doc_m1_b, doc_m1_c_d], - updateQuery: query_m1_b1, - updateObj: {$set: {a: 1}}, - resultDocList: [doc_m1_a_b, doc_m1_c_d], - n: 1 - }); + (function testAddNewMetricField() { + testUpdateOne({ + initialDocList: [doc_m1_b, doc_m1_c_d], + updateQuery: query_m1_b1, + updateObj: {$set: {a: 1}}, + resultDocList: [doc_m1_a_b, doc_m1_c_d], + nMatched: 1 + }); + })(); // Metric field update: change field type (integer to array). - testUpdateOne({ - initialDocList: [doc_m1_a_b, doc_m1_c_d], - updateQuery: query_m1_a1, - updateObj: {$set: {a: ["arr", "ay"]}}, - resultDocList: [doc_m1_arrayA_b, doc_m1_c_d], - n: 1 - }); + (function testChangeFieldType() { + testUpdateOne({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + updateQuery: query_m1_a1, + updateObj: {$set: {a: ["arr", "ay"]}}, + resultDocList: [doc_m1_arrayA_b, doc_m1_c_d], + nMatched: 1 + }); + })(); // Metric field update: no-op with non-existent field to unset. - testUpdateOne({ - initialDocList: [doc_m1_a_b, doc_m1_c_d], - updateQuery: query_m1_a1, - updateObj: {$unset: {z: ""}}, - resultDocList: [doc_m1_a_b, doc_m1_c_d], - n: 0 - }); + (function testMatchOneNoopUpdate() { + testUpdateOne({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + updateQuery: query_m1_a1, + updateObj: {$unset: {z: ""}}, + resultDocList: [doc_m1_a_b, doc_m1_c_d], + nMatched: 1, + nModified: 0 + }); + })(); // Metric field update: no-op with non-existent field to unset. - testUpdateOne({ - initialDocList: [doc_m1_a_b, doc_m1_c_d], - updateQuery: {}, - updateObj: {$unset: {z: ""}}, - resultDocList: [doc_m1_a_b, doc_m1_c_d], - n: 0 - }); + (function testMatchMultipleNoopUpdate() { + testUpdateOne({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + updateQuery: {}, + updateObj: {$unset: {z: ""}}, + resultDocList: [doc_m1_a_b, doc_m1_c_d], + nMatched: 1, + nModified: 0 + }); + })(); + + // Metric field update: match multiple docs, only update one. + (function testMatchMultipleUpdateOne() { + testUpdateOne({ + initialDocList: [doc_a_b, doc_m1_a_b, doc_m1_c_d], + updateQuery: {}, + updateObj: {$set: {a: 100}}, + // Don't validate exact results as we could update any doc. + nMatched: 1, + }); + })(); + + // Match and update zero docs. + (function testMatchNone() { + testUpdateOne({ + initialDocList: [doc_a_b, doc_m1_a_b, doc_m1_c_d], + updateQuery: {[metaFieldName]: {z: "Z"}}, + updateObj: {$set: {a: 100}}, + resultDocList: [doc_a_b, doc_m1_a_b, doc_m1_c_d], + nMatched: 0, + }); + })(); + + // Meta-only update only updates one. + (function testMetaOnlyUpdateOne() { + testUpdateOne({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + updateQuery: {[metaFieldName]: 1}, + updateObj: {$set: {[metaFieldName]: 2}}, + // Don't validate exact results as we could update any doc. + nMatched: 1, + }); + })(); // Meta field update: remove meta field. - testUpdateOne({ - initialDocList: [doc_m1_a_b, doc_m1_c_d], - updateQuery: query_m1_a1, - updateObj: {$unset: {[metaFieldName]: ""}}, - resultDocList: [doc_a_b, doc_m1_c_d], - n: 1 - }); + (function testRemoveMetaField() { + testUpdateOne({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + updateQuery: query_m1_a1, + updateObj: {$unset: {[metaFieldName]: ""}}, + resultDocList: [doc_a_b, doc_m1_c_d], + nMatched: 1 + }); + })(); // Meta field update: add meta field. - testUpdateOne({ - initialDocList: [doc_a_b], - updateQuery: {}, - updateObj: {$set: {[metaFieldName]: 1}}, - resultDocList: [doc_m1_a_b], - n: 1 - }); + (function testAddMetaField() { + testUpdateOne({ + initialDocList: [doc_a_b], + updateQuery: {}, + updateObj: {$set: {[metaFieldName]: 1}}, + resultDocList: [doc_m1_a_b], + nMatched: 1 + }); + })(); - // Meta field update: add meta field. - testUpdateOne({ - initialDocList: [doc_m1_b], - updateQuery: {}, - updateObj: {$set: {[metaFieldName]: 2}}, - resultDocList: [doc_m2_b], - n: 1 - }); + // Meta field update: update meta field. + (function testUpdateMetaField() { + testUpdateOne({ + initialDocList: [doc_m1_b], + updateQuery: {}, + updateObj: {$set: {[metaFieldName]: 2}}, + resultDocList: [doc_m2_b], + nMatched: 1 + }); + })(); // Meta field update: update meta field to different type (integer to string). - testUpdateOne({ - initialDocList: [doc_m1_a_b, doc_m1_c_d], - updateQuery: query_m1_a1, - updateObj: {$set: {[metaFieldName]: "1"}}, - resultDocList: [doc_stringM1_a_b, doc_m1_c_d], - n: 1 - }); + (function testChangeMetaFieldType() { + testUpdateOne({ + initialDocList: [doc_m1_a_b, doc_m1_c_d], + updateQuery: query_m1_a1, + updateObj: {$set: {[metaFieldName]: "1"}}, + resultDocList: [doc_stringM1_a_b, doc_m1_c_d], + nMatched: 1 + }); + })(); } /** @@ -174,22 +195,44 @@ function testUpdateOne({initialDocList, updateQuery, updateObj, resultDocList, n {[timeFieldName]: timestamp2022, [metaFieldName]: 2, _id: 1, a: 1, "newField": 42}; // Update timeField, metaField and add a new field. - testUpdateOne({ - initialDocList: [doc_2023_m1_a1], - updateQuery: {a: {$eq: 1}, [metaFieldName]: {$eq: 1}}, - updateObj: [ - {$set: {[timeFieldName]: timestamp2022}}, - {$set: {[metaFieldName]: 2}}, - {$set: {"newField": 42}}, - ], - resultDocList: [doc_2022_m2_a1_newField], - n: 1 - }); + // Skip tests changing the shard key value in sharding. + if (!db.getMongo().isMongos()) { + (function testPipelineUpdateSetMultipleFields() { + testUpdateOne({ + initialDocList: [doc_2023_m1_a1], + updateQuery: {a: {$eq: 1}, [metaFieldName]: {$eq: 1}}, + updateObj: [ + {$set: {[timeFieldName]: timestamp2022}}, + {$set: {[metaFieldName]: 2}}, + {$set: {"newField": 42}}, + ], + resultDocList: [doc_2022_m2_a1_newField], + nMatched: 1 + }); + })(); + } // Expect removal of the timeField to fail. - assert.commandFailedWithCode( - regColl.updateOne({}, [{$set: {[metaFieldName]: 2}}, {$unset: {[timeFieldName]: ""}}]), - ErrorCodes.InvalidOptions); + (function testRemoveTimeField() { + testUpdateOne({ + initialDocList: [doc_2023_m1_a1], + updateQuery: {}, + updateObj: [{$set: {[metaFieldName]: 2}}, {$unset: timeFieldName}], + resultDocList: [doc_2023_m1_a1], + failCode: ErrorCodes.BadValue, + }); + })(); + + // Expect changing the type of the timeField to fail. + (function testChangeTimeFieldType() { + testUpdateOne({ + initialDocList: [doc_2023_m1_a1], + updateQuery: {}, + updateObj: [{$set: {[timeFieldName]: "string"}}], + resultDocList: [doc_2023_m1_a1], + failCode: ErrorCodes.BadValue, + }); + })(); } /** @@ -198,49 +241,264 @@ function testUpdateOne({initialDocList, updateQuery, updateObj, resultDocList, n { const timestamp2023 = ISODate("2023-02-06T19:19:00Z"); const timestamp2022 = ISODate("2022-02-06T19:19:00Z"); - const doc_t2023_m1_id_a = {[timeFieldName]: timestamp2023, [metaFieldName]: 1, _id: 1, a: 1}; - const doc_t2022_m2_stringId_stringA = - {[timeFieldName]: timestamp2022, [metaFieldName]: 2, "_id": 2, "a": 2}; - - // Full measurement replacement: update every field in the document. - testUpdateOne({ - initialDocList: [doc_t2023_m1_id_a], - updateQuery: {}, - updateObj: doc_t2022_m2_stringId_stringA, - resultDocList: [doc_t2022_m2_stringId_stringA], - n: 1 - }); + const doc_t2023_m1_id1_a1 = {[timeFieldName]: timestamp2023, [metaFieldName]: 1, _id: 1, a: 1}; + const doc_t2022_m2_id2_a2 = {[timeFieldName]: timestamp2022, [metaFieldName]: 2, _id: 2, a: 2}; + const doc_t2022_m2_noId_a2 = {[timeFieldName]: timestamp2022, [metaFieldName]: 2, a: 2}; + + // Skip tests changing the shard key value in sharding. + if (!db.getMongo().isMongos()) { + // Full measurement replacement: update every field in the document, including the _id. + (function testReplacementUpdateChangeId() { + testUpdateOne({ + initialDocList: [doc_t2023_m1_id1_a1], + updateQuery: {}, + updateObj: doc_t2022_m2_id2_a2, + resultDocList: [doc_t2022_m2_id2_a2], + nMatched: 1 + }); + })(); + + // Full measurement replacement: update every field in the document, except the _id. + (function testReplacementUpdateNoId() { + testUpdateOne({ + initialDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2], + updateQuery: {_id: 1}, + updateObj: doc_t2022_m2_noId_a2, + resultDocList: [ + doc_t2022_m2_id2_a2, + {[timeFieldName]: timestamp2022, [metaFieldName]: 2, a: 2, _id: 1}, + ], + nMatched: 1 + }); + })(); + + // Replacement that results in two duplicate measurements. + (function testReplacementUpdateDuplicateIds() { + testUpdateOne({ + initialDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2], + updateQuery: {_id: 1}, + updateObj: doc_t2022_m2_id2_a2, + resultDocList: [doc_t2022_m2_id2_a2, doc_t2022_m2_id2_a2], + nMatched: 1, + }); + })(); + } + + // Replacement with no time field. + (function testReplacementUpdateNoTimeField() { + testUpdateOne({ + initialDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2], + updateQuery: {_id: 1}, + updateObj: {[metaFieldName]: 1, a: 1, _id: 10}, + resultDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2], + failCode: ErrorCodes.BadValue, + }); + })(); + + // Replacement with time field of the wrong type. + (function testReplacementUpdateWrongTypeTimeField() { + testUpdateOne({ + initialDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2], + updateQuery: {_id: 1}, + updateObj: {[metaFieldName]: 1, a: 1, _id: 10, [timeFieldName]: "string"}, + resultDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2], + failCode: ErrorCodes.BadValue, + }); + })(); + + // Replacement that only references the meta field. Still fails because of the missing time + // field. + (function testReplacementMetaOnly() { + testUpdateOne({ + initialDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2], + updateQuery: {[metaFieldName]: 1}, + updateObj: {[metaFieldName]: 3}, + resultDocList: [doc_t2023_m1_id1_a1, doc_t2022_m2_id2_a2], + failCode: ErrorCodes.BadValue, + }); + })(); // Tests upsert with full measurement. - testUpdateOne({ - initialDocList: [doc_t2023_m1_id_a], - updateQuery: {[metaFieldName]: {$eq: 2}}, - updateObj: doc_t2022_m2_stringId_stringA, - resultDocList: [doc_t2023_m1_id_a, doc_t2022_m2_stringId_stringA], - n: 1, - upsert: true - }); - - // Tests upsert with full measurement: no-op when the query doesn't match and upsert is false. - testUpdateOne({ - initialDocList: [doc_t2023_m1_id_a], - updateQuery: {[metaFieldName]: {$eq: 2}}, - updateObj: doc_t2022_m2_stringId_stringA, - resultDocList: [doc_t2023_m1_id_a], - n: 0, - upsert: false - }); + (function testUpsert() { + testUpdateOne({ + initialDocList: [doc_t2023_m1_id1_a1], + updateQuery: {[metaFieldName]: {$eq: 2}}, + updateObj: doc_t2022_m2_id2_a2, + resultDocList: [doc_t2023_m1_id1_a1], + upsert: true, + upsertedDoc: doc_t2022_m2_id2_a2, + }); + })(); + + // Tests upsert with full measurement: no-op when the query matches but update is a no-op. + (function testNoopUpsert() { + testUpdateOne({ + initialDocList: [doc_t2023_m1_id1_a1], + updateQuery: {}, + updateObj: {$unset: {z: ""}}, + resultDocList: [doc_t2023_m1_id1_a1], + nMatched: 1, + nModified: 0, + upsert: true + }); + })(); + + // Run a replacement upsert that includes an _id in the query. + (function testReplacementUpsertWithId() { + testUpdateOne({ + initialDocList: [doc_t2023_m1_id1_a1], + updateQuery: {_id: 100}, + updateObj: {[timeFieldName]: ISODate("2023-02-06T19:19:01Z"), a: 5}, + upsert: true, + upsertedDoc: {_id: 100, [timeFieldName]: ISODate("2023-02-06T19:19:01Z"), a: 5}, + resultDocList: [doc_t2023_m1_id1_a1], + }); + })(); +} + +/** + * Tests upsert with multi:false. + */ +{ + const dateTime = ISODate("2021-07-12T16:00:00Z"); + const dateTimeUpdated = ISODate("2023-01-27T16:00:00Z"); + const doc_id_1_a_b_no_metrics = { + _id: 1, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", b: "B"}, + }; + + // Run an upsert that doesn't include an _id. + (function testUpsertWithNoId() { + testUpdateOne({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateQuery: {[metaFieldName]: {z: "Z"}}, + updateObj: {$set: {[timeFieldName]: dateTime}}, + upsert: true, + upsertedDoc: {[metaFieldName]: {z: "Z"}, [timeFieldName]: dateTime}, + resultDocList: [ + doc_id_1_a_b_no_metrics, + ], + }); + })(); + + // Run an upsert that includes an _id. + (function testUpsertWithId() { + testUpdateOne({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateQuery: {_id: 100}, + updateObj: {$set: {[timeFieldName]: dateTime}}, + upsert: true, + upsertedDoc: {_id: 100, [timeFieldName]: dateTime}, + resultDocList: [ + doc_id_1_a_b_no_metrics, + ], + }); + })(); + + // Run an upsert that updates documents and skips the upsert. + (function testUpsertUpdatesDocs() { + testUpdateOne({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateQuery: {[metaFieldName + ".a"]: "A"}, + updateObj: {$set: {f: 10}}, + upsert: true, + resultDocList: [ + { + _id: 1, + [timeFieldName]: dateTime, + [metaFieldName]: {a: "A", b: "B"}, + f: 10, + }, + ], + nMatched: 1, + }); + })(); + + // Run an upsert that matches a bucket but no documents in it, and inserts the document into a + // bucket with the same parameters. + (function testUpsertIntoMatchedBucket() { + testUpdateOne({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateQuery: {[metaFieldName]: {a: "A", b: "B"}, f: 111}, + updateObj: {$set: {[timeFieldName]: dateTime}}, + upsert: true, + upsertedDoc: {[metaFieldName]: {a: "A", b: "B"}, [timeFieldName]: dateTime, f: 111}, + resultDocList: [ + doc_id_1_a_b_no_metrics, + ], + }); + })(); + + // Run an upsert that doesn't insert a time field. + (function testUpsertNoTimeField() { + testUpdateOne({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateQuery: {[metaFieldName]: {z: "Z"}}, + updateObj: {$set: {f: 10}}, + upsert: true, + resultDocList: [ + doc_id_1_a_b_no_metrics, + ], + failCode: ErrorCodes.BadValue, + }); + })(); + + // Run an upsert where the time field is provided in the query. + (function testUpsertQueryOnTimeField() { + testUpdateOne({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateQuery: {[timeFieldName]: dateTimeUpdated}, + updateObj: {$set: {f: 10}}, + upsert: true, + upsertedDoc: {[timeFieldName]: dateTimeUpdated, f: 10}, + resultDocList: [ + doc_id_1_a_b_no_metrics, + ], + }); + })(); + + // Run an upsert where a document to insert is supplied by the request. + (function testUpsertSupplyDoc() { + testUpdateOne({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateQuery: {[timeFieldName]: dateTimeUpdated}, + updateObj: [{$set: {f: 10}}], + upsert: true, + upsertedDoc: {[timeFieldName]: dateTime, f: 100}, + c: {new: {[timeFieldName]: dateTime, f: 100}}, + resultDocList: [ + doc_id_1_a_b_no_metrics, + ], + }); + })(); + + // Run an upsert where a document to insert is supplied by the request and does not have a time + // field. + (function testUpsertSupplyDocNoTimeField() { + testUpdateOne({ + initialDocList: [doc_id_1_a_b_no_metrics], + updateQuery: {[timeFieldName]: dateTimeUpdated}, + updateObj: [{$set: {f: 10}}], + upsert: true, + c: {new: {[metaFieldName]: {a: "A"}, f: 100}}, + resultDocList: [ + doc_id_1_a_b_no_metrics, + ], + failCode: ErrorCodes.BadValue, + }); + })(); } /** * Tests measurement modification that could exceed bucket size limit (default value of 128000 * bytes). */ -{ - const coll = testDB.getCollection(collNamePrefix + count++); - assert.commandWorked(testDB.createCollection( - coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); - count--; // Decrement count to access collection correctly in 'testUpdateOne' helper. +(function testUpdateExceedsBucketSizeLimit() { + const testDB = getTestDB(); + const collName = "testUpdateExceedsBucketSizeLimit"; + const coll = testDB.getCollection(collName); + prepareCollection({collName, initialDocList: []}); // Fill up a bucket to roughly 120000 bytes by inserting 4 batches of 30 documents sized at // 1000 bytes. @@ -248,7 +506,7 @@ function testUpdateOne({initialDocList, updateQuery, updateObj, resultDocList, n while (batchNum < 4) { let batch = []; for (let i = 0; i < 30; i++) { - const doc = {_id: i, [timeField]: ISODate(), value: "a".repeat(1000)}; + const doc = {_id: i, [timeFieldName]: ISODate(), value: "a".repeat(1000)}; batch.push(doc); } @@ -257,13 +515,10 @@ function testUpdateOne({initialDocList, updateQuery, updateObj, resultDocList, n } // Update any of the measurements with a document which will exceed the 128000 byte threshold. - const chunkyDoc = {_id: 128000, [timeField]: ISODate(), value: "a".repeat(10000)}; - testUpdateOne({ - // initialDocList: We manually inserted measurements. - updateQuery: {}, - updateObj: chunkyDoc, - // resultDocList: No need to check all of the measurements. - n: 1 - }); -} + const chunkyDoc = {_id: 128000, [timeFieldName]: ISODate(), value: "a".repeat(10000)}; + + const updateCommand = {update: collName, updates: [{q: {}, u: chunkyDoc, multi: false}]}; + const res = assert.commandWorked(testDB.runCommand(updateCommand)); + assert.eq(1, res.n, tojson(res)); + assert.eq(1, res.nModified, tojson(res)); })(); diff --git a/jstests/core/timeseries/timeseries_user_system_buckets.js b/jstests/core/timeseries/timeseries_user_system_buckets.js index 02ae0cb8f59d9..6835fca1864e8 100644 --- a/jstests/core/timeseries/timeseries_user_system_buckets.js +++ b/jstests/core/timeseries/timeseries_user_system_buckets.js @@ -13,8 +13,8 @@ * requires_fcv_63 * ] */ -userCollSystemBuckets = db.system.buckets.coll; -userColl = db.coll; +let userCollSystemBuckets = db.system.buckets.coll; +let userColl = db.coll; userCollSystemBuckets.drop(); userColl.drop(); @@ -25,10 +25,10 @@ assert.commandWorked(userCollSystemBuckets.insert({a: 1})); // A user collection with the same postfix should not be considered time series collection assert.commandWorked(userColl.insert({a: 2})); -docs = userColl.find().toArray(); +let docs = userColl.find().toArray(); assert.eq(1, docs.length); -docsSystemBuckets = userCollSystemBuckets.find().toArray(); +let docsSystemBuckets = userCollSystemBuckets.find().toArray(); assert.eq(1, docsSystemBuckets.length); userCollSystemBuckets.drop(); diff --git a/jstests/core/top.js b/jstests/core/top.js index 02c1a3e0d1c85..ea671c46ed879 100644 --- a/jstests/core/top.js +++ b/jstests/core/top.js @@ -15,8 +15,6 @@ * tenant_migration_incompatible, * does_not_support_repeated_reads, * requires_fcv_62, - * # TODO SERVER-67640: Verify 'top' and $collStats work correctly for queries in CQF. - * cqf_incompatible, * ] */ diff --git a/jstests/core/transaction_too_large_for_cache.js b/jstests/core/transaction_too_large_for_cache.js index dddcbed9f4b49..b06ead39b621b 100644 --- a/jstests/core/transaction_too_large_for_cache.js +++ b/jstests/core/transaction_too_large_for_cache.js @@ -7,6 +7,7 @@ * requires_persistence, * requires_non_retryable_writes, * requires_wiredtiger, + * no_selinux * ] */ diff --git a/jstests/core/txns/abort_expired_transaction.js b/jstests/core/txns/abort_expired_transaction.js index b461856280e4b..a4ed72cc7dbe6 100644 --- a/jstests/core/txns/abort_expired_transaction.js +++ b/jstests/core/txns/abort_expired_transaction.js @@ -35,6 +35,11 @@ try { const session = db.getMongo().startSession(sessionOptions); const sessionDb = session.getDatabase(testDBName); + // Number of passes made by the "abortExpiredTransactions" thread before the transaction + // expires. + const abortExpiredTransactionsPassesPreAbort = + db.serverStatus().metrics.abortExpiredTransactions.passes; + let txnNumber = 0; jsTest.log("Insert a document starting a transaction."); @@ -66,6 +71,14 @@ try { "currentOp reports that the idle transaction still exists, it has not been " + "aborted as expected."); + assert.soon(() => { + // For this expired transaction to abort, the "abortExpiredTransactions" thread has to + // perform at least one pass. + const serverStatus = db.serverStatus(); + return abortExpiredTransactionsPassesPreAbort < + serverStatus.metrics.abortExpiredTransactions.passes; + }); + jsTest.log( "Attempt to do a write in the transaction, which should fail because the transaction " + "was aborted"); diff --git a/jstests/core/txns/aggregation_in_transaction.js b/jstests/core/txns/aggregation_in_transaction.js index 76195d0caa042..1c4da78ab2025 100644 --- a/jstests/core/txns/aggregation_in_transaction.js +++ b/jstests/core/txns/aggregation_in_transaction.js @@ -1,5 +1,5 @@ // Tests that aggregation is supported in transactions. -// @tags: [uses_transactions, uses_snapshot_read_concern] +// @tags: [uses_transactions, uses_snapshot_read_concern, references_foreign_collection] (function() { "use strict"; diff --git a/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js b/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js index a32cc59e255a9..49f03c6fddcb6 100644 --- a/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js +++ b/jstests/core/txns/commands_banning_txnnumber_outside_transactions.js @@ -11,7 +11,7 @@ (function() { "use strict"; -const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid"; +load("jstests/libs/fixture_helpers.js"); const session = db.getMongo().startSession(); const sessionDb = session.getDatabase("admin"); @@ -53,7 +53,7 @@ nonRetryableWriteCommands.forEach(function(command) { [50768, 50889]); }); -if (!isMongos) { +if (!FixtureHelpers.isMongos(db)) { nonRetryableWriteCommandsMongodOnly.forEach(function(command) { jsTest.log("Testing command: " + tojson(command)); assert.commandFailedWithCode( diff --git a/jstests/core/txns/commands_not_allowed_in_txn.js b/jstests/core/txns/commands_not_allowed_in_txn.js index a261b14a0be25..45352c2710921 100644 --- a/jstests/core/txns/commands_not_allowed_in_txn.js +++ b/jstests/core/txns/commands_not_allowed_in_txn.js @@ -29,7 +29,7 @@ const sessionOptions = { const session = db.getMongo().startSession(sessionOptions); const sessionDb = session.getDatabase(dbName); -const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid"; +const runningOnMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid"; assert.commandWorked(testDB.createCollection(testColl.getName(), {writeConcern: {w: "majority"}})); assert.commandWorked(testDB.runCommand({ @@ -69,7 +69,7 @@ function testCommand(command) { assert(res.errmsg.match(errmsgRegExp), res); // Mongos has special handling for commitTransaction to support commit recovery. - if (!isMongos) { + if (!runningOnMongos) { assert.commandFailedWithCode(sessionDb.adminCommand({ commitTransaction: 1, txnNumber: NumberLong(txnNumber), @@ -134,7 +134,7 @@ const commands = [ ]; // There is no applyOps command on mongos. -if (!isMongos) { +if (!runningOnMongos) { commands.push({ applyOps: [{ op: "u", @@ -164,7 +164,7 @@ assert.commandFailedWithCode(sessionDb.runCommand({ ErrorCodes.OperationNotSupportedInTransaction); // Mongos has special handling for commitTransaction to support commit recovery. -if (!isMongos) { +if (!runningOnMongos) { // The failed find should abort the transaction so a commit should fail. assert.commandFailedWithCode(sessionDb.adminCommand({ commitTransaction: 1, diff --git a/jstests/core/txns/concurrent_drops_and_creates.js b/jstests/core/txns/concurrent_drops_and_creates.js index 71b7b83651160..7c7bfd6d58730 100644 --- a/jstests/core/txns/concurrent_drops_and_creates.js +++ b/jstests/core/txns/concurrent_drops_and_creates.js @@ -10,13 +10,9 @@ * uses_transactions, * ] */ -(function() { -"use strict"; - // TODO (SERVER-39704): Remove the following load after SERVER-397074 is completed // For retryOnceOnTransientAndRestartTxnOnMongos. load('jstests/libs/auto_retry_transaction_in_sharding.js'); -load("jstests/libs/feature_flag_util.js"); const dbName1 = "test1"; const dbName2 = "test2"; @@ -63,11 +59,8 @@ retryOnceOnTransientAndRestartTxnOnMongos(session, () => { sessionOutsideTxn.advanceClusterTime(session.getClusterTime()); assert.commandWorked(testDB2.runCommand({drop: collNameB, writeConcern: {w: "majority"}})); -// This test cause a StaleConfig error on sharding so even with the PointInTimeCatalogLookups flag -// enabled no command will succeed. -// TODO SERVER-67289: Remove feature flag check. -if (FeatureFlagUtil.isPresentAndEnabled(db, "PointInTimeCatalogLookups") && - !session.getClient().isMongos()) { +// This test cause a StaleConfig error on sharding so no command will succeed. +if (!session.getClient().isMongos()) { // We can perform reads on the dropped collection as it existed when we started the transaction. assert.commandWorked(sessionDB2.runCommand({find: sessionCollB.getName()})); @@ -112,25 +105,13 @@ assert.commandWorked(sessionCollA.insert({})); sessionOutsideTxn.advanceClusterTime(session.getClusterTime()); assert.commandWorked(testDB2.runCommand({create: collNameB})); -// TODO SERVER-67289: Remove feature flag check. -if (FeatureFlagUtil.isPresentAndEnabled(db, "PointInTimeCatalogLookups")) { - // We can insert to collection B in the transaction as the transaction does not have a - // collection on this namespace (even as it exist at latest). A collection will be implicitly - // created and we will fail to commit this transaction with a WriteConflict error. - retryOnceOnTransientAndRestartTxnOnMongos(session, () => { - assert.commandWorked(sessionCollB.insert({})); - }, txnOptions); +// We can insert to collection B in the transaction as the transaction does not have a collection on +// this namespace (even as it exist at latest). A collection will be implicitly created and we will +// fail to commit this transaction with a WriteConflict error. +retryOnceOnTransientAndRestartTxnOnMongos(session, () => { + assert.commandWorked(sessionCollB.insert({})); +}, txnOptions); - assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.WriteConflict); -} else { - // We cannot write to collection B in the transaction, since it experienced catalog changes - // since the transaction's read timestamp. Since our implementation of the in-memory collection - // catalog always has the most recent collection metadata, we do not allow you to read from a - // collection at a time prior to its most recent catalog changes. - assert.commandFailedWithCode(sessionCollB.insert({}), ErrorCodes.SnapshotUnavailable); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); -} +assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.WriteConflict); -session.endSession(); -}()); +session.endSession(); \ No newline at end of file diff --git a/jstests/core/txns/create_collection_parallel.js b/jstests/core/txns/create_collection_parallel.js index cbbb968c88fef..e0f0c5bd42327 100644 --- a/jstests/core/txns/create_collection_parallel.js +++ b/jstests/core/txns/create_collection_parallel.js @@ -7,12 +7,8 @@ * uses_transactions, * ] */ -(function() { -"use strict"; - load("jstests/libs/create_collection_txn_helpers.js"); load("jstests/libs/auto_retry_transaction_in_sharding.js"); -load("jstests/libs/feature_flag_util.js"); const dbName = 'test_txns_create_collection_parallel'; @@ -66,23 +62,14 @@ function runParallelCollectionCreateTest(command, explicitCreate) { session.commitTransaction(); assert.eq(sessionColl.find({}).itcount(), 1); - // TODO SERVER-67289: Remove feature flag check. - if (FeatureFlagUtil.isPresentAndEnabled(db, "PointInTimeCatalogLookups")) { - // create cannot observe the collection created in the other transaction so the command - // will succeed and we will instead throw WCE when trying to commit the transaction. - retryOnceOnTransientAndRestartTxnOnMongos(secondSession, () => { - assert.commandWorked(secondSessionDB.runCommand({create: collName})); - }, {writeConcern: {w: "majority"}}); - - assert.commandFailedWithCode(secondSession.commitTransaction_forTesting(), - ErrorCodes.WriteConflict); - } else { - assert.commandFailedWithCode(secondSessionDB.runCommand({create: collName}), - ErrorCodes.NamespaceExists); + // create cannot observe the collection created in the other transaction so the command will + // succeed and we will instead throw WCE when trying to commit the transaction. + retryOnceOnTransientAndRestartTxnOnMongos(secondSession, () => { + assert.commandWorked(secondSessionDB.runCommand({create: collName})); + }, {writeConcern: {w: "majority"}}); - assert.commandFailedWithCode(secondSession.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - } + assert.commandFailedWithCode(secondSession.commitTransaction_forTesting(), + ErrorCodes.WriteConflict); assert.eq(distinctSessionColl.find({}).itcount(), 0); sessionColl.drop({writeConcern: {w: "majority"}}); @@ -195,5 +182,4 @@ runParallelCollectionCreateTest("insert", false /*explicitCreate*/); runParallelCollectionCreateTest("update", true /*explicitCreate*/); runParallelCollectionCreateTest("update", false /*explicitCreate*/); runParallelCollectionCreateTest("findAndModify", true /*explicitCreate*/); -runParallelCollectionCreateTest("findAndModify", false /*explicitCreate*/); -}()); +runParallelCollectionCreateTest("findAndModify", false /*explicitCreate*/); \ No newline at end of file diff --git a/jstests/core/txns/create_indexes_parallel.js b/jstests/core/txns/create_indexes_parallel.js index d04116482958d..7b6bc1fde0d15 100644 --- a/jstests/core/txns/create_indexes_parallel.js +++ b/jstests/core/txns/create_indexes_parallel.js @@ -7,12 +7,8 @@ * uses_transactions, * ] */ -(function() { -"use strict"; - load("jstests/libs/auto_retry_transaction_in_sharding.js"); load("jstests/libs/create_index_txn_helpers.js"); -load("jstests/libs/feature_flag_util.js"); let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyIndex) { const dbName = 'test_txns_create_indexes_parallel'; @@ -93,26 +89,14 @@ let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyInd assert.eq(secondSessionColl.find({}).itcount(), 1); assert.eq(secondSessionColl.getIndexes().length, 2); - // TODO SERVER-67289: Remove feature flag check. - if (FeatureFlagUtil.isPresentAndEnabled(db, "PointInTimeCatalogLookups")) { - // createIndexes cannot observe the index created in the other transaction so the command - // will succeed and we will instead throw WCE when trying to commit the transaction. - retryOnceOnTransientAndRestartTxnOnMongos(session, () => { - assert.commandWorked(sessionColl.runCommand( - {createIndexes: collName, indexes: [conflictingIndexSpecs]})); - }, {writeConcern: {w: "majority"}}); - - assert.commandFailedWithCode(session.commitTransaction_forTesting(), - ErrorCodes.WriteConflict); - } else { - // createIndexes takes minimum visible snapshots of new collections into consideration when - // checking for existing indexes. - assert.commandFailedWithCode( - sessionColl.runCommand({createIndexes: collName, indexes: [conflictingIndexSpecs]}), - ErrorCodes.SnapshotUnavailable); - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - } + // createIndexes cannot observe the index created in the other transaction so the command will + // succeed and we will instead throw WCE when trying to commit the transaction. + retryOnceOnTransientAndRestartTxnOnMongos(session, () => { + assert.commandWorked( + sessionColl.runCommand({createIndexes: collName, indexes: [conflictingIndexSpecs]})); + }, {writeConcern: {w: "majority"}}); + + assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.WriteConflict); assert.eq(sessionColl.find({}).itcount(), 1); assert.eq(sessionColl.getIndexes().length, 2); @@ -207,5 +191,4 @@ let doParallelCreateIndexesTest = function(explicitCollectionCreate, multikeyInd doParallelCreateIndexesTest(false /*explicitCollectionCreate*/, false /*multikeyIndex*/); doParallelCreateIndexesTest(true /*explicitCollectionCreate*/, false /*multikeyIndex*/); doParallelCreateIndexesTest(false /*explicitCollectionCreate*/, true /*multikeyIndex*/); -doParallelCreateIndexesTest(true /*explicitCollectionCreate*/, true /*multikeyIndex*/); -}()); +doParallelCreateIndexesTest(true /*explicitCollectionCreate*/, true /*multikeyIndex*/); \ No newline at end of file diff --git a/jstests/core/txns/dbstats_not_blocked_by_txn.js b/jstests/core/txns/dbstats_not_blocked_by_txn.js index 6555a216e1497..3eaec86df82e3 100644 --- a/jstests/core/txns/dbstats_not_blocked_by_txn.js +++ b/jstests/core/txns/dbstats_not_blocked_by_txn.js @@ -7,6 +7,8 @@ */ (function() { "use strict"; +load("jstests/libs/fixture_helpers.js"); + var dbName = 'dbstats_not_blocked_by_txn'; var mydb = db.getSiblingDB(dbName); @@ -16,8 +18,7 @@ mydb.createCollection("foo", {writeConcern: {w: "majority"}}); var session = db.getMongo().startSession(); var sessionDb = session.getDatabase(dbName); -const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid"; -if (isMongos) { +if (FixtureHelpers.isMongos(db)) { // Before starting the transaction below, access the collection so it can be implicitly // sharded and force all shards to refresh their database versions because the refresh // requires an exclusive lock and would block behind the transaction. diff --git a/jstests/core/txns/list_collections_not_blocked_by_txn.js b/jstests/core/txns/list_collections_not_blocked_by_txn.js index 1ef9bb17b386e..7b615c076f8ce 100644 --- a/jstests/core/txns/list_collections_not_blocked_by_txn.js +++ b/jstests/core/txns/list_collections_not_blocked_by_txn.js @@ -9,6 +9,7 @@ // TODO (SERVER-39704): Remove the following load after SERVER-397074 is completed // For withTxnAndAutoRetryOnMongos. load('jstests/libs/auto_retry_transaction_in_sharding.js'); +load("jstests/libs/fixture_helpers.js"); var dbName = 'list_collections_not_blocked'; var mydb = db.getSiblingDB(dbName); @@ -19,8 +20,7 @@ mydb.foo.drop({writeConcern: {w: "majority"}}); assert.commandWorked(mydb.createCollection("foo", {writeConcern: {w: "majority"}})); -const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid"; -if (isMongos) { +if (FixtureHelpers.isMongos(db)) { // Before starting the transaction below, access the collection so it can be implicitly // sharded and force all shards to refresh their database versions because the refresh // requires an exclusive lock and would block behind the transaction. diff --git a/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js b/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js index d3c26b49de884..81f6c0c579017 100644 --- a/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js +++ b/jstests/core/txns/non_transactional_operations_on_session_with_transaction.js @@ -112,4 +112,4 @@ assert.docEq(doc1, testColl.findOne(doc1)); assert.docEq(doc1, sessionColl.findOne(doc1)); assert.docEq(doc2, testColl.findOne(doc2)); assert.docEq(doc2, sessionColl.findOne(doc2)); -}()); \ No newline at end of file +}()); diff --git a/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js b/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js index 75310ac65d2ba..083a2b978cd2f 100644 --- a/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js +++ b/jstests/core/txns/noop_createIndexes_not_blocked_by_txn.js @@ -6,6 +6,7 @@ // TODO(SERVER-39704): Remove the following load after SERVER-39704 is completed // For withTxnAndAutoRetryOnMongos. load('jstests/libs/auto_retry_transaction_in_sharding.js'); +load("jstests/libs/fixture_helpers.js"); const dbName = 'noop_createIndexes_not_blocked'; const collName = 'test'; @@ -16,8 +17,7 @@ testDB.runCommand({drop: collName, writeConcern: {w: "majority"}}); const session = db.getMongo().startSession({causalConsistency: false}); const sessionDB = session.getDatabase(dbName); -const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid"; -if (isMongos) { +if (FixtureHelpers.isMongos(db)) { // Access the collection before creating indexes so it can be implicitly sharded. assert.eq(sessionDB[collName].find().itcount(), 0); } diff --git a/jstests/core/txns/prepare_conflict.js b/jstests/core/txns/prepare_conflict.js index e338461dc14ce..4aec0afab33f4 100644 --- a/jstests/core/txns/prepare_conflict.js +++ b/jstests/core/txns/prepare_conflict.js @@ -7,9 +7,6 @@ * uses_prepare_transaction, * uses_transactions, * uses_parallel_shell, - * # TODO SERVER-70847: Snapshot reads do not succeed on non-conflicting documents while txn is - * # in prepare. - * cqf_incompatible, * requires_profiling, * ] */ diff --git a/jstests/core/txns/prepare_conflict_aggregation_behavior.js b/jstests/core/txns/prepare_conflict_aggregation_behavior.js index 17c5a79811a41..f1e159df32929 100644 --- a/jstests/core/txns/prepare_conflict_aggregation_behavior.js +++ b/jstests/core/txns/prepare_conflict_aggregation_behavior.js @@ -5,7 +5,11 @@ * * The test runs commands that are not allowed with security token: endSession, prepareTransaction. * @tags: [ - * not_allowed_with_security_token,uses_transactions, uses_prepare_transaction] + * not_allowed_with_security_token, + * references_foreign_collection, + * uses_transactions, + * uses_prepare_transaction, + * ] */ (function() { "use strict"; diff --git a/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js b/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js index e1d3b74595689..a9be6e6c7b2df 100644 --- a/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js +++ b/jstests/core/txns/prepared_transactions_do_not_block_non_conflicting_ddl.js @@ -6,9 +6,9 @@ "use strict"; load("jstests/core/txns/libs/prepare_helpers.js"); -const dbName = "prepared_transactions_do_not_block_non_conflicting_ddl"; +const dbName = "not_block_non_conflicting_ddl"; const collName = "transactions_collection"; -const otherDBName = "prepared_transactions_do_not_block_non_conflicting_ddl_other"; +const otherDBName = "not_block_non_conflicting_ddl_other"; const otherCollName = "transactions_collection_other"; const testDB = db.getSiblingDB(dbName); const otherDB = db.getSiblingDB(otherDBName); diff --git a/jstests/core/txns/read_concern.js b/jstests/core/txns/read_concern.js index fac40d9bcb540..460f705c7675d 100644 --- a/jstests/core/txns/read_concern.js +++ b/jstests/core/txns/read_concern.js @@ -113,4 +113,4 @@ assert.commandFailedWithCode( assert.commandWorked(session.commitTransaction_forTesting()); session.endSession(); -}()); \ No newline at end of file +}()); diff --git a/jstests/core/txns/statement_ids_accepted.js b/jstests/core/txns/statement_ids_accepted.js index b5a56b31870a5..14e64897cf2f8 100644 --- a/jstests/core/txns/statement_ids_accepted.js +++ b/jstests/core/txns/statement_ids_accepted.js @@ -8,6 +8,7 @@ // # Tenant migrations don't support applyOps. // tenant_migration_incompatible // ] +load("jstests/libs/fixture_helpers.js"); (function() { "use strict"; @@ -172,8 +173,7 @@ assert.commandWorked(sessionDb.adminCommand({ autocommit: false })); -const isMongos = assert.commandWorked(db.runCommand("hello")).msg === "isdbgrid"; -if (!isMongos) { +if (!FixtureHelpers.isMongos(db)) { // Skip commands that do not exist on mongos. jsTestLog("Check that prepareTransaction accepts a statement ID"); diff --git a/jstests/core/txns/timeseries_insert_in_txn.js b/jstests/core/txns/timeseries_insert_in_txn.js index 299a94dccc1dd..62ab51c3c38c8 100644 --- a/jstests/core/txns/timeseries_insert_in_txn.js +++ b/jstests/core/txns/timeseries_insert_in_txn.js @@ -5,10 +5,7 @@ * uses_transactions, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; TimeseriesTest.run((insert) => { // Use a custom database, to avoid conflict with other tests that use the system.js collection. @@ -28,5 +25,4 @@ TimeseriesTest.run((insert) => { ErrorCodes.OperationNotSupportedInTransaction); assert.commandFailedWithCode(session.abortTransaction_forTesting(), ErrorCodes.NoSuchTransaction); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js b/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js index 481a1693b8bf6..7e8832fec516a 100644 --- a/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js +++ b/jstests/core/txns/timestamped_reads_wait_for_prepare_oplog_visibility.js @@ -12,7 +12,7 @@ * command_not_supported_in_serverless, * # TODO SERVER-70847: Snapshot reads do not succeed on non-conflicting documents while txn is * # in prepare. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ (function() { diff --git a/jstests/core/txns/txn_ops_allowed_on_buckets_coll.js b/jstests/core/txns/txn_ops_allowed_on_buckets_coll.js index 4af433edefca6..fc8406f969202 100644 --- a/jstests/core/txns/txn_ops_allowed_on_buckets_coll.js +++ b/jstests/core/txns/txn_ops_allowed_on_buckets_coll.js @@ -6,10 +6,7 @@ * uses_snapshot_read_concern * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; const session = db.getMongo().startSession(); @@ -58,5 +55,4 @@ assert.eq(systemColl.find().itcount(), 1); jsTestLog("Testing aggregate."); assert.eq(systemColl.aggregate([{$match: {}}]).itcount(), 1); -assert.commandWorked(session.commitTransaction_forTesting()); -}()); +assert.commandWorked(session.commitTransaction_forTesting()); \ No newline at end of file diff --git a/jstests/core/type_bracket.js b/jstests/core/type_bracket.js index 2381f6e149619..a51f5f2dfad46 100644 --- a/jstests/core/type_bracket.js +++ b/jstests/core/type_bracket.js @@ -2,7 +2,6 @@ "use strict"; load('jstests/aggregation/extras/utils.js'); // For assertArrayEq -load('jstests/libs/optimizer_utils.js'); // For checkCascadesOptimizerEnabled const t = db.type_bracket; t.drop(); @@ -115,27 +114,45 @@ let tests = [ {filter: {a: {$gte: new Code("")}}, expected: [docs[28]]}, {filter: {a: {$lte: new Code("")}}, expected: []}, + // MinKey/MaxKey + {filter: {a: {$lte: MinKey()}}, expected: [docs[0]]}, + {filter: {a: {$lt: MinKey()}}, expected: []}, + {filter: {a: {$gte: MaxKey()}}, expected: [docs[31]]}, + {filter: {a: {$gt: MaxKey()}}, expected: []} ]; -// Include Min/MaxKey type bracketing tests conditional on using CQF. -// TODO SERVER-68274: Always include these testcases once SBE correctly handles the semantics of +// Currently, depending on which query engine is used, documents which are missing 'a' may or may +// not be returned when comparing 'a' against MinKey/MaxKey. For example, for query +// {a: {$gte: MinKey()}}, classic and CQF correctly return documents missing 'a', but SBE does not. +// TODO SERVER-68274: Restrict these testcases once SBE correctly handles the semantics of // missing fields and type bracketing (missing field is implicitly null which is greater than // MinKey). -if (checkCascadesOptimizerEnabled(db)) { - tests.push( - // MinKey - {filter: {a: {$gte: MinKey()}}, expected: docs}, - {filter: {a: {$gt: MinKey()}}, expected: docs.slice(1)}, - {filter: {a: {$lte: MinKey()}}, expected: [docs[0]]}, - {filter: {a: {$lt: MinKey()}}, expected: []}, - // MaxKey - {filter: {a: {$lte: MaxKey()}}, expected: docs}, - {filter: {a: {$lt: MaxKey()}}, expected: docs.slice(0, 31)}, - {filter: {a: {$gte: MaxKey()}}, expected: [docs[31]]}, - {filter: {a: {$gt: MaxKey()}}, expected: []}); -} +let docsWithA = docs.slice(); +docsWithA.splice(29, 1); + +tests.push( + // MinKey + {filter: {a: {$gte: MinKey()}}, expectedList: [docs, docsWithA]}, + {filter: {a: {$gt: MinKey()}}, expectedList: [docs.slice(1), docsWithA.slice(1)]}, + + // MaxKey + {filter: {a: {$lte: MaxKey()}}, expectedList: [docs, docsWithA]}, + {filter: {a: {$lt: MaxKey()}}, expectedList: [docs.slice(0, 31), docsWithA.slice(0, 30)]}); for (const testData of tests) { - runTest(testData.filter, testData.expected); + if (testData.hasOwnProperty("expected")) { + runTest(testData.filter, testData.expected); + } else { + const result = t.aggregate({$match: testData.filter}).toArray(); + let foundMatch = false; + for (let i = 0; i < testData.expectedList.length; i++) { + const expected = testData.expectedList[i]; + foundMatch |= arrayEq(result, expected); + } + assert(foundMatch, + `Actual query result did not match any of the expected options. filter=${ + tojson(testData.filter)}, actual=${tojson(result)}, expectedList=${ + tojson(testData.expectedList)}`); + } } }()); diff --git a/jstests/core/views/invalid_system_views.js b/jstests/core/views/invalid_system_views.js index 8c90068c81e13..3fee8f63e0332 100644 --- a/jstests/core/views/invalid_system_views.js +++ b/jstests/core/views/invalid_system_views.js @@ -16,13 +16,15 @@ * # The drop of offending views may not happen on the donor after a committed migration. * tenant_migration_incompatible, * uses_compact, + * references_foreign_collection, * ] */ +load("jstests/libs/fixture_helpers.js"); (function() { "use strict"; -const isMongos = db.runCommand({isdbgrid: 1}).isdbgrid; -const isStandalone = !isMongos && !db.runCommand({hello: 1}).hasOwnProperty("setName"); +const runningOnMongos = FixtureHelpers.isMongos(db); +const isStandalone = !runningOnMongos && !db.runCommand({hello: 1}).hasOwnProperty("setName"); function runTest(badViewDefinition) { let viewsDB = db.getSiblingDB("invalid_system_views"); @@ -52,7 +54,7 @@ function runTest(badViewDefinition) { " in system.views"; } - if (!isMongos) { + if (!runningOnMongos) { // Commands that run on existing regular collections should not be impacted by the // presence of invalid views. However, applyOps doesn't work on mongos. assert.commandWorked( @@ -107,7 +109,7 @@ function runTest(badViewDefinition) { } const storageEngine = jsTest.options().storageEngine; - if (isMongos || storageEngine === "inMemory") { + if (runningOnMongos || storageEngine === "inMemory") { print("Not testing compact command on mongos or ephemeral storage engine"); } else { assert.commandWorked(viewsDB.runCommand({compact: "collection", force: true}), diff --git a/jstests/core/views/views_aggregation.js b/jstests/core/views/views_aggregation.js index 50b2edfd4a72d..235fbe40cb939 100644 --- a/jstests/core/views/views_aggregation.js +++ b/jstests/core/views/views_aggregation.js @@ -9,6 +9,7 @@ * requires_non_retryable_commands, * # Explain of a resolved view must be executed by mongos. * directly_against_shardsvrs_incompatible, + * references_foreign_collection, * ] */ (function() { diff --git a/jstests/core/views/views_all_commands.js b/jstests/core/views/views_all_commands.js index 5cb25656424fc..579ee1a1aadda 100644 --- a/jstests/core/views/views_all_commands.js +++ b/jstests/core/views/views_all_commands.js @@ -76,6 +76,7 @@ "use strict"; load('jstests/sharding/libs/last_lts_mongod_commands.js'); +load('jstests/sharding/libs/last_lts_mongos_commands.js'); // Pre-written reasons for skipping a test. const isAnInternalCommand = "internal command"; @@ -116,20 +117,21 @@ let viewsCommandTests = { _configsvrMoveRange: {skip: isAnInternalCommand}, _configsvrRefineCollectionShardKey: {skip: isAnInternalCommand}, _configsvrRenameCollection: {skip: isAnInternalCommand}, - _configsvrRenameCollectionMetadata: {skip: isAnInternalCommand}, _configsvrRemoveChunks: {skip: isAnInternalCommand}, _configsvrRemoveShard: {skip: isAnInternalCommand}, _configsvrRemoveShardFromZone: {skip: isAnInternalCommand}, _configsvrRemoveTags: {skip: isAnInternalCommand}, _configsvrRepairShardedCollectionChunksHistory: {skip: isAnInternalCommand}, + _configsvrResetPlacementHistory: {skip: isAnInternalCommand}, _configsvrReshardCollection: {skip: isAnInternalCommand}, _configsvrRunRestore: {skip: isAnInternalCommand}, _configsvrSetAllowMigrations: {skip: isAnInternalCommand}, _configsvrSetClusterParameter: {skip: isAnInternalCommand}, _configsvrSetUserWriteBlockMode: {skip: isAnInternalCommand}, - _configsvrTransitionToCatalogShard: {skip: isAnInternalCommand}, + _configsvrTransitionFromDedicatedConfigServer: {skip: isAnInternalCommand}, _configsvrTransitionToDedicatedConfigServer: {skip: isAnInternalCommand}, _configsvrUpdateZoneKeyRange: {skip: isAnInternalCommand}, + _dropConnectionsToMongot: {skip: isAnInternalCommand}, _flushDatabaseCacheUpdates: {skip: isUnrelated}, _flushDatabaseCacheUpdatesWithWriteConcern: {skip: isUnrelated}, _flushReshardingStateChange: {skip: isUnrelated}, @@ -143,6 +145,7 @@ let viewsCommandTests = { _killOperations: {skip: isUnrelated}, _mergeAuthzCollections: {skip: isAnInternalCommand}, _migrateClone: {skip: isAnInternalCommand}, + _mongotConnPoolStats: {skip: isAnInternalCommand}, _movePrimary: {skip: isAnInternalCommand}, _movePrimaryRecipientAbortMigration: {skip: isAnInternalCommand}, _movePrimaryRecipientForgetMigration: {skip: isAnInternalCommand}, @@ -156,11 +159,10 @@ let viewsCommandTests = { _shardsvrAbortReshardCollection: {skip: isAnInternalCommand}, _shardsvrCheckMetadataConsistency: {skip: isAnInternalCommand}, _shardsvrCheckMetadataConsistencyParticipant: {skip: isAnInternalCommand}, + _shardsvrCleanupStructuredEncryptionData: {skip: isAnInternalCommand}, _shardsvrCloneCatalogData: {skip: isAnInternalCommand}, _shardsvrCompactStructuredEncryptionData: {skip: isAnInternalCommand}, _shardsvrDropCollection: {skip: isAnInternalCommand}, - // TODO SERVER-74324: deprecate _shardsvrDropCollectionIfUUIDNotMatching after 7.0 is lastLTS. - _shardsvrDropCollectionIfUUIDNotMatching: {skip: isUnrelated}, _shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern: {skip: isUnrelated}, _shardsvrDropCollectionParticipant: {skip: isAnInternalCommand}, _shardsvrDropIndexCatalogEntryParticipant: {skip: isAnInternalCommand}, @@ -207,7 +209,14 @@ let viewsCommandTests = { _shardsvrCollModParticipant: {skip: isAnInternalCommand}, _shardsvrParticipantBlock: {skip: isAnInternalCommand}, _shardsvrUnregisterIndex: {skip: isAnInternalCommand}, - _startStreamProcessor: {skip: isAnInternalCommand}, + streams_startStreamProcessor: {skip: isAnInternalCommand}, + streams_startStreamSample: {skip: isAnInternalCommand}, + streams_stopStreamProcessor: {skip: isAnInternalCommand}, + streams_listStreamProcessors: {skip: isAnInternalCommand}, + streams_getMoreStreamSample: {skip: isAnInternalCommand}, + streams_getStats: {skip: isAnInternalCommand}, + streams_testOnlyInsert: {skip: isAnInternalCommand}, + streams_getMetrics: {skip: isAnInternalCommand}, _transferMods: {skip: isAnInternalCommand}, _vectorClockPersist: {skip: isAnInternalCommand}, abortReshardCollection: {skip: isUnrelated}, @@ -256,12 +265,16 @@ let viewsCommandTests = { command: {captrunc: "view", n: 2, inc: false}, expectFailure: true, }, - checkMetadataConsistency: {skip: isUnrelated}, + checkMetadataConsistency: { + command: {checkMetadataConsistency: "view"}, + expectFailure: false, + }, checkShardingIndex: {skip: isUnrelated}, cleanupOrphaned: { skip: "Tested in views/views_sharded.js", }, cleanupReshardCollection: {skip: isUnrelated}, + cleanupStructuredEncryptionData: {skip: isUnrelated}, clearJumboFlag: { command: {clearJumboFlag: "test.view"}, skipStandalone: true, @@ -276,6 +289,7 @@ let viewsCommandTests = { }, clusterAbortTransaction: {skip: "already tested by 'abortTransaction' tests on mongos"}, clusterAggregate: {skip: "already tested by 'aggregate' tests on mongos"}, + clusterBulkWrite: {skip: "already tested by 'bulkWrite' tests on mongos"}, clusterCommitTransaction: {skip: "already tested by 'commitTransaction' tests on mongos"}, clusterCount: {skip: "already tested by 'count' tests on mongos"}, clusterDelete: {skip: "already tested by 'delete' tests on mongos"}, @@ -292,7 +306,7 @@ let viewsCommandTests = { configureFailPoint: {skip: isUnrelated}, configureCollectionBalancing: {skip: isUnrelated}, configureQueryAnalyzer: { - command: {configureQueryAnalyzer: "test.view", mode: "full", sampleRate: 1}, + command: {configureQueryAnalyzer: "test.view", mode: "full", samplesPerSecond: 1}, skipStandalone: true, expectFailure: true, isAdminCommand: true, @@ -611,6 +625,7 @@ let viewsCommandTests = { replSetTestEgress: {skip: isUnrelated}, replSetUpdatePosition: {skip: isUnrelated}, replSetResizeOplog: {skip: isUnrelated}, + resetPlacementHistory: {skip: isUnrelated}, reshardCollection: { command: {reshardCollection: "test.view", key: {_id: 1}}, setup: function(conn) { @@ -718,7 +733,7 @@ let viewsCommandTests = { testVersion2: {skip: isAnInternalCommand}, testVersions1And2: {skip: isAnInternalCommand}, top: {skip: "tested in views/views_stats.js"}, - transitionToCatalogShard: {skip: isUnrelated}, + transitionFromDedicatedConfigServer: {skip: isUnrelated}, transitionToDedicatedConfigServer: {skip: isUnrelated}, update: {command: {update: "view", updates: [{q: {x: 1}, u: {x: 2}}]}, expectFailure: true}, updateRole: { @@ -756,6 +771,10 @@ commandsRemovedFromMongodSinceLastLTS.forEach(function(cmd) { viewsCommandTests[cmd] = {skip: "must define test coverage for backwards compatibility"}; }); +commandsRemovedFromMongosSinceLastLTS.forEach(function(cmd) { + viewsCommandTests[cmd] = {skip: "must define test coverage for backwards compatibility"}; +}); + /** * Helper function for failing commands or writes that checks the result 'res' of either. * If 'code' is null we only check for failure, otherwise we confirm error code matches as diff --git a/jstests/core/views/views_collation.js b/jstests/core/views/views_collation.js index 169e930974044..c661c38b2cf4c 100644 --- a/jstests/core/views/views_collation.js +++ b/jstests/core/views/views_collation.js @@ -6,15 +6,13 @@ // requires_non_retryable_commands, // # Explain of a resolved view must be executed by mongos. // directly_against_shardsvrs_incompatible, +// references_foreign_collection, // ] /** * Tests the behavior of operations when interacting with a view's default collation. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; let viewsDB = db.getSiblingDB("views_collation"); assert.commandWorked(viewsDB.dropDatabase()); @@ -518,5 +516,4 @@ assert.commandWorked(findRes); assert.eq(3, findRes.cursor.firstBatch.length); explain = viewsDB.runCommand({explain: {find: "case_insensitive_view", filter: {f: "case"}}}); assert.neq(null, explain.queryPlanner, tojson(explain)); -assert.eq(1, explain.queryPlanner.collation.strength, tojson(explain)); -}()); +assert.eq(1, explain.queryPlanner.collation.strength, tojson(explain)); \ No newline at end of file diff --git a/jstests/core/views/views_count.js b/jstests/core/views/views_count.js index 92b05ef43ea50..b9d465a3638d1 100644 --- a/jstests/core/views/views_count.js +++ b/jstests/core/views/views_count.js @@ -8,10 +8,7 @@ // requires_fcv_63, // ] -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const sbeEnabled = checkSBEEnabled(db); @@ -94,5 +91,4 @@ assert.commandWorked(viewsDB.runCommand({count: "identityView", hint: "_id_"})); assert.commandFailedWithCode( viewsDB.runCommand({count: "identityView", collation: {locale: "en_US"}}), - ErrorCodes.OptionNotSupportedOnView); -}()); + ErrorCodes.OptionNotSupportedOnView); \ No newline at end of file diff --git a/jstests/core/views/views_creation.js b/jstests/core/views/views_creation.js index fa6c8eb75941d..b06ff90bbb502 100644 --- a/jstests/core/views/views_creation.js +++ b/jstests/core/views/views_creation.js @@ -5,6 +5,7 @@ * assumes_superuser_permissions, * # TODO SERVER-73967: Remove this tag. * does_not_support_stepdowns, + * references_foreign_collection, * ] */ (function() { @@ -111,12 +112,6 @@ assert.commandFailedWithCode(viewsDB.runCommand({ }), 40600); -// The remainder of this test will not work on server versions < 7.0 as the 'create' command -// is not idempotent there. TODO SERVER-74062: remove this. -if (db.version().split('.')[0] < 7) { - return; -} - // Test that creating a view which already exists with identical options reports success. let repeatedCmd = { create: "existingViewTest", @@ -158,4 +153,8 @@ assert.commandFailedWithCode(viewsDB.runCommand({ // Test that creating a view when there is already a collection with the same name fails. assert.commandFailedWithCode(viewsDB.runCommand({create: "collection", viewOn: "collection"}), ErrorCodes.NamespaceExists); + +// Ensure we accept a view with a name of greater than 64 characters (the maximum dbname length). +assert.commandWorked(viewsDB.createView( + "longNamedView", "Queries_IdentityView_UnindexedLargeInMatching0_BackingCollection", [])); }()); diff --git a/jstests/core/views/views_distinct.js b/jstests/core/views/views_distinct.js index b5d019485cc5f..a5ef205fcb086 100644 --- a/jstests/core/views/views_distinct.js +++ b/jstests/core/views/views_distinct.js @@ -5,15 +5,14 @@ * assumes_unsharded_collection, * # Explain of a resolved view must be executed by mongos. * directly_against_shardsvrs_incompatible, + * requires_fcv_71, * ] */ -(function() { -"use strict"; - // For arrayEq. We don't use array.eq as it does an ordered comparison on arrays but we don't // care about order in the distinct response. load("jstests/aggregation/extras/utils.js"); +import {getWinningPlan, getPlanStage} from "jstests/libs/analyze_plan.js"; var viewsDB = db.getSiblingDB("views_distinct"); assert.commandWorked(viewsDB.dropDatabase()); @@ -93,6 +92,27 @@ assert(explainPlan.stages[0].$cursor.hasOwnProperty("executionStats")); assert.eq(explainPlan.stages[0].$cursor.executionStats.nReturned, 2); assert(explainPlan.stages[0].$cursor.executionStats.hasOwnProperty("allPlansExecution")); +// Distinct with hints work on views. +assert.commandWorked(viewsDB.coll.createIndex({state: 1})); + +explainPlan = largePopView.explain().distinct("pop", {}, {hint: {state: 1}}); +assert(getPlanStage(explainPlan.stages[0].$cursor, "FETCH")); +assert(getPlanStage(explainPlan.stages[0].$cursor, "IXSCAN")); + +explainPlan = largePopView.explain().distinct("pop"); +assert.neq(getWinningPlan(explainPlan.stages[0].$cursor.queryPlanner).stage, + "IXSCAN", + tojson(explainPlan)); + +// Make sure that the hint produces the right results. +assert(arrayEq([10, 7], largePopView.distinct("pop", {}, {hint: {state: 1}}))); + +explainPlan = + largePopView.runCommand("distinct", {"key": "a", query: {a: 1, b: 2}, hint: {bad: 1, hint: 1}}); +assert.commandFailedWithCode(explainPlan, ErrorCodes.BadValue, tojson(explainPlan)); +var regex = new RegExp("hint provided does not correspond to an existing index"); +assert(regex.test(explainPlan.errmsg)); + // Distinct commands fail when they try to change the collation of a view. assert.commandFailedWithCode( viewsDB.runCommand({distinct: "identityView", key: "state", collation: {locale: "en_US"}}), @@ -145,5 +165,4 @@ assert.commandWorked(coll.insert({a: "not leaf"})); assertIdentityViewDistinctMatchesCollection("a"); assertIdentityViewDistinctMatchesCollection("a.b"); assertIdentityViewDistinctMatchesCollection("a.b.c"); -assertIdentityViewDistinctMatchesCollection("a.b.c.d"); -}()); +assertIdentityViewDistinctMatchesCollection("a.b.c.d"); \ No newline at end of file diff --git a/jstests/core/views/views_stats.js b/jstests/core/views/views_stats.js index 3bda9eec2502a..623ef6b4dfd6e 100644 --- a/jstests/core/views/views_stats.js +++ b/jstests/core/views/views_stats.js @@ -17,6 +17,7 @@ (function() { "use strict"; load("jstests/libs/stats.js"); +load("jstests/libs/fixture_helpers.js"); let viewsDB = db.getSiblingDB("views_stats"); assert.commandWorked(viewsDB.dropDatabase()); @@ -42,9 +43,7 @@ lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0); assert.writeError(view.update({}, {})); lastHistogram = assertHistogramDiffEq(view, lastHistogram, 0, 1, 0); -let helloResponse = assert.commandWorked(viewsDB.runCommand("hello")); -const isMongos = (helloResponse.msg === "isdbgrid"); -if (isMongos) { +if (FixtureHelpers.isMongos(viewsDB)) { jsTest.log("Tests are being run on a mongos; skipping top tests."); return; } diff --git a/jstests/core/views/views_validation.js b/jstests/core/views/views_validation.js index 02c060fd50a82..4dea7c9491bed 100644 --- a/jstests/core/views/views_validation.js +++ b/jstests/core/views/views_validation.js @@ -2,6 +2,7 @@ // # Running getCollection on views in sharded suites tries to shard views, which fails. // assumes_unsharded_collection, // requires_non_retryable_commands, +// references_foreign_collection, // ] (function() { diff --git a/jstests/core/write/autoid.js b/jstests/core/write/autoid.js index 679b109fcc513..1797e3d7bafa9 100644 --- a/jstests/core/write/autoid.js +++ b/jstests/core/write/autoid.js @@ -3,14 +3,14 @@ // key. // @tags: [assumes_unsharded_collection] -f = db.jstests_autoid; +let f = db.jstests_autoid; f.drop(); f.save({z: 1}); -a = f.findOne({z: 1}); +let a = f.findOne({z: 1}); f.update({z: 1}, {z: 2}); -b = f.findOne({z: 2}); +let b = f.findOne({z: 2}); assert.eq(a._id.str, b._id.str); -c = f.update({z: 2}, {z: "abcdefgabcdefgabcdefg"}); +let c = f.update({z: 2}, {z: "abcdefgabcdefgabcdefg"}); c = f.findOne({}); assert.eq(a._id.str, c._id.str); diff --git a/jstests/core/write/batch_write_command_w0.js b/jstests/core/write/batch_write_command_w0.js index 7d5038120b60b..4b6bcb021684b 100644 --- a/jstests/core/write/batch_write_command_w0.js +++ b/jstests/core/write/batch_write_command_w0.js @@ -33,12 +33,8 @@ coll.drop(); // // Single document insert, w:0 write concern specified, missing ordered coll.drop(); -request = { - insert: coll.getName(), - documents: [{a: 1}], - writeConcern: {w: 0} -}; -result = coll.runCommand(request); +let request = {insert: coll.getName(), documents: [{a: 1}], writeConcern: {w: 0}}; +let result = coll.runCommand(request); assert.eq({ok: 1}, result); countEventually(coll, 1); diff --git a/jstests/core/write/bulk/bulk_write.js b/jstests/core/write/bulk/bulk_write.js index a48bd6d11213b..f29e4077eb211 100644 --- a/jstests/core/write/bulk/bulk_write.js +++ b/jstests/core/write/bulk/bulk_write.js @@ -1,26 +1,17 @@ /** - * Tests bulk write command for valid / invalid input. + * Tests bulk write command for valid input. * * The test runs commands that are not allowed with security token: bulkWrite. * @tags: [ * assumes_against_mongod_not_mongos, * not_allowed_with_security_token, - * # TODO SERVER-72988: Until bulkWrite is compatible with retryable writes. - * requires_non_retryable_writes, - * # Command is not yet compatible with tenant migration. - * tenant_migration_incompatible, + * command_not_supported_in_serverless, + * # TODO SERVER-52419 Remove this tag. + * featureFlagBulkWriteCommand, * ] */ (function() { "use strict"; -load("jstests/libs/feature_flag_util.js"); - -// Skip this test if the BulkWriteCommand feature flag is not enabled -// TODO SERVER-67711: Remove feature flag check. -if (!FeatureFlagUtil.isPresentAndEnabled(db, "BulkWriteCommand")) { - jsTestLog('Skipping test because the BulkWriteCommand feature flag is disabled.'); - return; -} var coll = db.getCollection("coll"); var coll1 = db.getCollection("coll1"); @@ -35,52 +26,32 @@ assert.eq(coll.find().itcount(), 1); assert.eq(coll1.find().itcount(), 0); coll.drop(); -// Make sure non-adminDB request fails -assert.commandFailedWithCode(db.runCommand({ - bulkWrite: 1, - ops: [{insert: 0, document: {skey: "MongoDB"}}], - nsInfo: [{ns: "test.coll"}] -}), - [ErrorCodes.Unauthorized]); - -assert.eq(coll.find().itcount(), 0); -assert.eq(coll1.find().itcount(), 0); - // Make sure optional fields are accepted -assert.commandWorked(db.adminCommand({ +var res = db.adminCommand({ bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}], nsInfo: [{ns: "test.coll"}], cursor: {batchSize: 1024}, bypassDocumentValidation: true, ordered: false -})); +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 0); assert.eq(coll.find().itcount(), 1); assert.eq(coll1.find().itcount(), 0); coll.drop(); -// Make sure invalid fields are not accepted -assert.commandFailedWithCode(db.adminCommand({ - bulkWrite: 1, - ops: [{insert: 0, document: {skey: "MongoDB"}}], - nsInfo: [{ns: "test.coll"}], - cursor: {batchSize: 1024}, - bypassDocumentValidation: true, - ordered: false, - fooField: 0 -}), - [40415]); - -assert.eq(coll.find().itcount(), 0); -assert.eq(coll1.find().itcount(), 0); - // Make sure ops and nsInfo can take arrays properly -assert.commandWorked(db.adminCommand({ +res = db.adminCommand({ bulkWrite: 1, ops: [{insert: 1, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}], nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}] -})); +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 0); assert.eq(coll.find().itcount(), 1); assert.eq(coll1.find().itcount(), 1); @@ -88,144 +59,32 @@ coll.drop(); coll1.drop(); // Test 2 inserts into the same namespace -assert.commandWorked(db.adminCommand({ +res = db.adminCommand({ bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}], nsInfo: [{ns: "test.coll"}] -})); - -assert.eq(coll.find().itcount(), 2); -assert.eq(coll1.find().itcount(), 0); -coll.drop(); - -// Make sure we fail if index out of range of nsInfo -assert.commandFailedWithCode(db.adminCommand({ - bulkWrite: 1, - ops: [{insert: 2, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}], - nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}] -}), - [ErrorCodes.BadValue]); - -assert.eq(coll.find().itcount(), 0); -assert.eq(coll1.find().itcount(), 0); - -// Missing ops -assert.commandFailedWithCode(db.adminCommand({bulkWrite: 1, nsInfo: [{ns: "mydb.coll"}]}), [40414]); - -assert.eq(coll.find().itcount(), 0); -assert.eq(coll1.find().itcount(), 0); - -// Missing nsInfo -assert.commandFailedWithCode( - db.adminCommand({bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}]}), [40414]); - -assert.eq(coll.find().itcount(), 0); -assert.eq(coll1.find().itcount(), 0); - -// Test valid arguments with invalid values -assert.commandFailedWithCode(db.adminCommand({ - bulkWrite: 1, - ops: [{insert: "test", document: {skey: "MongoDB"}}], - nsInfo: [{ns: "test.coll"}] -}), - [ErrorCodes.TypeMismatch]); - -assert.eq(coll.find().itcount(), 0); -assert.eq(coll1.find().itcount(), 0); +}); -assert.commandFailedWithCode( - db.adminCommand( - {bulkWrite: 1, ops: [{insert: 0, document: "test"}], nsInfo: [{ns: "test.coll"}]}), - [ErrorCodes.TypeMismatch]); - -assert.eq(coll.find().itcount(), 0); -assert.eq(coll1.find().itcount(), 0); - -assert.commandFailedWithCode( - db.adminCommand( - {bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}], nsInfo: ["test"]}), - [ErrorCodes.TypeMismatch]); - -assert.eq(coll.find().itcount(), 0); -assert.eq(coll1.find().itcount(), 0); - -assert.commandFailedWithCode( - db.adminCommand({bulkWrite: 1, ops: "test", nsInfo: [{ns: "test.coll"}]}), - [ErrorCodes.TypeMismatch]); - -assert.eq(coll.find().itcount(), 0); -assert.eq(coll1.find().itcount(), 0); - -assert.commandFailedWithCode( - db.adminCommand( - {bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}], nsInfo: "test"}), - [ErrorCodes.TypeMismatch]); - -assert.eq(coll.find().itcount(), 0); -assert.eq(coll1.find().itcount(), 0); - -// Test 2 inserts into the same namespace -assert.commandWorked(db.adminCommand({ - bulkWrite: 1, - ops: [{insert: 0, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}], - nsInfo: [{ns: "test.coll"}] -})); +assert.commandWorked(res); +assert.eq(res.numErrors, 0); assert.eq(coll.find().itcount(), 2); assert.eq(coll1.find().itcount(), 0); coll.drop(); -// Test that a write can fail part way through a write and the write partially executes. -assert.commandWorked(db.adminCommand({ - bulkWrite: 1, - ops: [ - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - {insert: 1, document: {skey: "MongoDB"}} - ], - nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}] -})); - -assert.eq(coll.find().itcount(), 1); -assert.eq(coll1.find().itcount(), 0); -coll.drop(); -coll1.drop(); - -assert.commandWorked(db.adminCommand({ - bulkWrite: 1, - ops: [ - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - {insert: 1, document: {skey: "MongoDB"}} - ], - nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}], - ordered: false -})); - -assert.eq(coll.find().itcount(), 1); -assert.eq(coll1.find().itcount(), 1); -coll.drop(); -coll1.drop(); - // Test BypassDocumentValidator assert.commandWorked(coll.insert({_id: 1})); assert.commandWorked(db.runCommand({collMod: "coll", validator: {a: {$exists: true}}})); -assert.commandWorked(db.adminCommand({ - bulkWrite: 1, - ops: [{insert: 0, document: {_id: 3, skey: "MongoDB"}}], - nsInfo: [{ns: "test.coll"}], - bypassDocumentValidation: false, -})); - -assert.eq(0, coll.count({_id: 3})); - -assert.commandWorked(db.adminCommand({ +res = db.adminCommand({ bulkWrite: 1, ops: [{insert: 0, document: {_id: 3, skey: "MongoDB"}}], nsInfo: [{ns: "test.coll"}], bypassDocumentValidation: true, -})); +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 0); assert.eq(1, coll.count({_id: 3})); diff --git a/jstests/core/write/bulk/bulk_write_delete_cursor.js b/jstests/core/write/bulk/bulk_write_delete_cursor.js index 00e829d5a79e5..109e7bb7705de 100644 --- a/jstests/core/write/bulk/bulk_write_delete_cursor.js +++ b/jstests/core/write/bulk/bulk_write_delete_cursor.js @@ -5,35 +5,21 @@ * @tags: [ * assumes_against_mongod_not_mongos, * not_allowed_with_security_token, - * # TODO SERVER-72988: Until bulkWrite is compatible with retryable writes. - * requires_non_retryable_writes, - * # Command is not yet compatible with tenant migration. - * tenant_migration_incompatible, + * command_not_supported_in_serverless, + * # TODO SERVER-52419 Remove this tag. + * featureFlagBulkWriteCommand, * ] */ +load("jstests/libs/bulk_write_utils.js"); // For cursorEntryValidator. + (function() { "use strict"; -load("jstests/libs/feature_flag_util.js"); - -// Skip this test if the BulkWriteCommand feature flag is not enabled. -// TODO SERVER-67711: Remove feature flag check. -if (!FeatureFlagUtil.isPresentAndEnabled(db, "BulkWriteCommand")) { - jsTestLog('Skipping test because the BulkWriteCommand feature flag is disabled.'); - return; -} var coll = db.getCollection("coll"); var coll1 = db.getCollection("coll1"); coll.drop(); coll1.drop(); -const cursorEntryValidator = function(entry, expectedEntry) { - assert(entry.ok == expectedEntry.ok); - assert(entry.idx == expectedEntry.idx); - assert(entry.n == expectedEntry.n); - assert(entry.code == expectedEntry.code); -}; - // Test generic delete with no return. var res = db.adminCommand({ bulkWrite: 1, @@ -45,6 +31,7 @@ var res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); @@ -66,6 +53,7 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); @@ -88,6 +76,7 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); @@ -110,6 +99,7 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); @@ -120,27 +110,6 @@ assert.sameMembers(coll.find().toArray(), [{_id: 1, skey: "MongoDB"}]); coll.drop(); -// Test deletes multiple when multi is true. -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - {insert: 0, document: {_id: 0, skey: "MongoDB"}}, - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - {delete: 0, filter: {skey: "MongoDB"}, multi: true}, - ], - nsInfo: [{ns: "test.coll"}] -}); - -assert.commandWorked(res); - -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 2}); -assert(!res.cursor.firstBatch[3]); -assert(!coll.findOne()); - -coll.drop(); - // Test Insert outside of bulkWrite + delete in bulkWrite. coll.insert({_id: 1, skey: "MongoDB"}); @@ -153,6 +122,7 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); assert.docEq(res.cursor.firstBatch[0].value, {_id: 1, skey: "MongoDB"}); @@ -174,6 +144,7 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 0}); assert(!res.cursor.firstBatch[0].value); @@ -184,59 +155,6 @@ assert.eq("MongoDB", coll1.findOne().skey); coll.drop(); coll1.drop(); -// Make sure multi:true + return fails the op. -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - {delete: 0, filter: {_id: 1}, multi: true, return: true}, - ], - nsInfo: [{ns: "test.coll"}] -}); - -assert.commandWorked(res); - -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.InvalidOptions}); -assert(!res.cursor.firstBatch[1]); - -// Test returnFields with return. -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - {insert: 0, document: {_id: 0, skey: "MongoDB"}}, - {delete: 0, filter: {_id: 0}, returnFields: {_id: 0, skey: 1}, return: true}, - ], - nsInfo: [{ns: "test.coll"}] -}); - -assert.commandWorked(res); - -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); -assert.docEq(res.cursor.firstBatch[1].value, {skey: "MongoDB"}); -assert(!res.cursor.firstBatch[2]); - -assert(!coll.findOne()); - -coll.drop(); - -// Test providing returnFields without return option. -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - {insert: 0, document: {_id: 0, skey: "MongoDB"}}, - {delete: 0, filter: {_id: 0}, returnFields: {_id: 1}}, - ], - nsInfo: [{ns: "test.coll"}] -}); - -assert.commandWorked(res); - -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: ErrorCodes.InvalidOptions}); -assert(!res.cursor.firstBatch[2]); - -coll.drop(); - // Test let matches specific document. res = db.adminCommand({ bulkWrite: 1, @@ -244,17 +162,14 @@ res = db.adminCommand({ {insert: 0, document: {_id: 0, skey: "MongoDB"}}, {insert: 0, document: {_id: 1, skey: "MongoDB2"}}, {insert: 0, document: {_id: 2, skey: "MongoDB3"}}, - { - delete: 0, - filter: {$expr: {$eq: ["$skey", "$$targetKey"]}}, - let : {targetKey: "MongoDB"}, - return: true - }, + {delete: 0, filter: {$expr: {$eq: ["$skey", "$$targetKey"]}}, return: true}, ], - nsInfo: [{ns: "test.coll"}] + nsInfo: [{ns: "test.coll"}], + let : {targetKey: "MongoDB"} }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); @@ -265,69 +180,5 @@ assert(!res.cursor.firstBatch[4]); assert.sameMembers(coll.find().toArray(), [{_id: 1, skey: "MongoDB2"}, {_id: 2, skey: "MongoDB3"}]); -coll.drop(); - -// Test write fails userAllowedWriteNS. -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - { - delete: 0, - filter: {_id: 1}, - multi: true, - }, - ], - nsInfo: [{ns: "test.system.profile"}] -}); - -assert.commandWorked(res); - -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.InvalidNamespace}); -assert(!res.cursor.firstBatch[1]); - -// Test delete continues on error with ordered:false. -coll.insert({_id: 1, skey: "MongoDB"}); -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - { - delete: 0, - filter: {_id: 0}, - }, - {delete: 1, filter: {_id: 1}, return: true} - ], - nsInfo: [{ns: "test.system.profile"}, {ns: "test.coll"}], - ordered: false -}); - -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.InvalidNamespace}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); -assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB"}); -assert(!res.cursor.firstBatch[2]); - -assert(!coll.findOne()); - -coll.drop(); - -// Test delete stop on error with ordered:true. -coll.insert({_id: 1, skey: "MongoDB"}); -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - { - delete: 0, - filter: {_id: 0}, - }, - {delete: 1, filter: {_id: 1}, return: true}, - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - ], - nsInfo: [{ns: "test.system.profile"}, {ns: "test.coll"}], -}); - -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.InvalidNamespace}); -assert(!res.cursor.firstBatch[1]); - -assert.eq(coll.findOne().skey, "MongoDB"); - coll.drop(); })(); diff --git a/jstests/core/write/bulk/bulk_write_getMore.js b/jstests/core/write/bulk/bulk_write_getMore.js new file mode 100644 index 0000000000000..28d4343077361 --- /dev/null +++ b/jstests/core/write/bulk/bulk_write_getMore.js @@ -0,0 +1,62 @@ +/** + * Tests bulk write command in conjunction with using getMore to obtain the rest + * of the cursor response. + * + * These tests are incompatible with various overrides due to using getMore. + * + * The test runs commands that are not allowed with security token: bulkWrite. + * @tags: [ + * assumes_against_mongod_not_mongos, + * not_allowed_with_security_token, + * command_not_supported_in_serverless, + * does_not_support_retryable_writes, + * requires_non_retryable_writes, + * requires_getmore, + * # Contains commands that fail which will fail the entire transaction + * does_not_support_transactions, + * # TODO SERVER-52419 Remove this tag. + * featureFlagBulkWriteCommand, + * ] + */ +load("jstests/libs/bulk_write_utils.js"); // For cursorEntryValidator. + +(function() { +"use strict"; + +var coll = db.getCollection("coll"); +var coll1 = db.getCollection("coll1"); +coll.drop(); +coll1.drop(); + +// The retryable write override does not append txnNumber to getMore since it is not a retryable +// command. + +// Test getMore by setting batch size to 1 and running 2 inserts. +// Should end up with 1 insert return per batch. +var res = db.adminCommand({ + bulkWrite: 1, + ops: [{insert: 1, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}], + nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}], + cursor: {batchSize: 1}, +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 0); + +assert(res.cursor.id != 0); +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); +assert(!res.cursor.firstBatch[1]); + +// First batch only had 1 of 2 responses so run a getMore to get the next batch. +var getMoreRes = + assert.commandWorked(db.adminCommand({getMore: res.cursor.id, collection: "$cmd.bulkWrite"})); + +assert(getMoreRes.cursor.id == 0); +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); +assert(!getMoreRes.cursor.nextBatch[1]); + +assert.eq(coll.find().itcount(), 1); +assert.eq(coll1.find().itcount(), 1); +coll.drop(); +coll1.drop(); +})(); diff --git a/jstests/core/write/bulk/bulk_write_insert_cursor.js b/jstests/core/write/bulk/bulk_write_insert_cursor.js index fcce9584d2ecf..674346a2038b6 100644 --- a/jstests/core/write/bulk/bulk_write_insert_cursor.js +++ b/jstests/core/write/bulk/bulk_write_insert_cursor.js @@ -5,40 +5,27 @@ * @tags: [ * assumes_against_mongod_not_mongos, * not_allowed_with_security_token, - * # TODO SERVER-72988: Until bulkWrite is compatible with retryable writes. - * requires_non_retryable_writes, - * # Command is not yet compatible with tenant migration. - * tenant_migration_incompatible, + * command_not_supported_in_serverless, + * # TODO SERVER-52419 Remove this tag. + * featureFlagBulkWriteCommand, * ] */ +load("jstests/libs/bulk_write_utils.js"); // For cursorEntryValidator. + (function() { "use strict"; -load("jstests/libs/feature_flag_util.js"); - -// Skip this test if the BulkWriteCommand feature flag is not enabled. -// TODO SERVER-67711: Remove feature flag check. -if (!FeatureFlagUtil.isPresentAndEnabled(db, "BulkWriteCommand")) { - jsTestLog('Skipping test because the BulkWriteCommand feature flag is disabled.'); - return; -} var coll = db.getCollection("coll"); var coll1 = db.getCollection("coll1"); coll.drop(); coll1.drop(); -const cursorEntryValidator = function(entry, expectedEntry) { - assert(entry.ok == expectedEntry.ok); - assert(entry.idx == expectedEntry.idx); - assert(entry.n == expectedEntry.n); - assert(entry.code == expectedEntry.code); -}; - // Make sure a properly formed request has successful result. var res = db.adminCommand( {bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}], nsInfo: [{ns: "test.coll"}]}); assert.commandWorked(res); +assert.eq(res.numErrors, 0); assert(res.cursor.id == 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); @@ -49,34 +36,6 @@ assert.eq(coll1.find().itcount(), 0); coll.drop(); -// Test getMore by setting batch size to 1 and running 2 inserts. -// Should end up with 1 insert return per batch. -res = db.adminCommand({ - bulkWrite: 1, - ops: [{insert: 1, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}], - nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}], - cursor: {batchSize: 1}, -}); - -assert.commandWorked(res); - -assert(res.cursor.id != 0); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); -assert(!res.cursor.firstBatch[1]); - -// First batch only had 1 of 2 responses so run a getMore to get the next batch. -var getMoreRes = - assert.commandWorked(db.adminCommand({getMore: res.cursor.id, collection: "$cmd.bulkWrite"})); - -assert(getMoreRes.cursor.id == 0); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); -assert(!getMoreRes.cursor.nextBatch[1]); - -assert.eq(coll.find().itcount(), 1); -assert.eq(coll1.find().itcount(), 1); -coll.drop(); -coll1.drop(); - // Test internal batch size > 1. res = db.adminCommand({ bulkWrite: 1, @@ -85,6 +44,7 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); assert(res.cursor.id == 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); @@ -93,127 +53,5 @@ assert(!res.cursor.firstBatch[2]); assert.eq(coll.find().itcount(), 2); assert.eq(coll1.find().itcount(), 0); -coll.drop(); - -// Test that a write can fail part way through a write and the write partially executes. -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - {insert: 1, document: {skey: "MongoDB"}} - ], - nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}] -}); - -assert.commandWorked(res); - -assert(res.cursor.id == 0); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: 11000}); -// Make sure that error extra info was correctly added -assert.docEq(res.cursor.firstBatch[1].keyPattern, {_id: 1}); -assert.docEq(res.cursor.firstBatch[1].keyValue, {_id: 1}); -assert(!res.cursor.firstBatch[2]); - -assert.eq(coll.find().itcount(), 1); -assert.eq(coll1.find().itcount(), 0); -coll.drop(); -coll1.drop(); - -// Test that we continue processing after an error for ordered:false. -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - {insert: 1, document: {skey: "MongoDB"}} - ], - nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}], - ordered: false -}); - -assert.commandWorked(res); - -assert(res.cursor.id == 0); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: 11000}); -cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, n: 1, idx: 2}); -assert(!res.cursor.firstBatch[3]); - -assert.eq(coll.find().itcount(), 1); -assert.eq(coll1.find().itcount(), 1); -coll.drop(); -coll1.drop(); - -// Test fixDocumentForInsert works properly by erroring out on >16MB size insert. -var targetSize = (16 * 1024 * 1024) + 1; -var doc = {_id: new ObjectId(), value: ''}; - -var size = Object.bsonsize(doc); -assert.gte(targetSize, size); - -// Set 'value' as a string with enough characters to make the whole document 'targetSize' -// bytes long. -doc.value = new Array(targetSize - size + 1).join('x'); -assert.eq(targetSize, Object.bsonsize(doc)); - -// Testing ordered:false continues on with other ops when fixDocumentForInsert fails. -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - {insert: 0, document: doc}, - {insert: 0, document: {_id: 2, skey: "MongoDB2"}}, - ], - nsInfo: [{ns: "test.coll"}], - ordered: false -}); - -assert.commandWorked(res); - -assert(res.cursor.id == 0); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); - -// In most cases we expect this to fail because it tries to insert a document that is too large. -// In some cases we may see the javascript execution interrupted because it takes longer than -// our default time limit, so we allow that possibility. -try { - cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: ErrorCodes.BadValue}); -} catch { - cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: ErrorCodes.Interrupted}); -} -cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, n: 1, idx: 2}); -assert(!res.cursor.firstBatch[3]); - -coll.drop(); - -// Testing ordered:true short circuits. -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - {insert: 0, document: doc}, - {insert: 0, document: {_id: 2, skey: "MongoDB2"}}, - ], - nsInfo: [{ns: "test.coll"}], - ordered: true -}); - -assert.commandWorked(res); - -assert(res.cursor.id == 0); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); - -// In most cases we expect this to fail because it tries to insert a document that is too large. -// In some cases we may see the javascript execution interrupted because it takes longer than -// our default time limit, so we allow that possibility. -try { - cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: ErrorCodes.BadValue}); -} catch { - cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: ErrorCodes.Interrupted}); -} -assert(!res.cursor.firstBatch[2]); - coll.drop(); })(); diff --git a/jstests/core/write/bulk/bulk_write_non_retryable_cursor.js b/jstests/core/write/bulk/bulk_write_non_retryable_cursor.js new file mode 100644 index 0000000000000..40ec1462f8ff0 --- /dev/null +++ b/jstests/core/write/bulk/bulk_write_non_retryable_cursor.js @@ -0,0 +1,175 @@ +/** + * Tests bulk write cursor response for correct responses. + * + * This file contains tests that are not compatible with retryable writes for various reasons. + * + * The test runs commands that are not allowed with security token: bulkWrite. + * @tags: [ + * assumes_against_mongod_not_mongos, + * does_not_support_retryable_writes, + * requires_non_retryable_writes, + * not_allowed_with_security_token, + * command_not_supported_in_serverless, + * # TODO SERVER-52419 Remove this tag. + * featureFlagBulkWriteCommand, + * ] + */ +load("jstests/libs/bulk_write_utils.js"); // For cursorEntryValidator. + +(function() { +"use strict"; + +var coll = db.getCollection("coll"); +var coll1 = db.getCollection("coll1"); +coll.drop(); +coll1.drop(); + +// TODO SERVER-31242 findAndModify retry doesn't apply 'fields' to response. +// This causes _id to not get projected out and the assert fails. +// These tests should be moved back to `bulk_write_update_cursor.js` and +// `bulk_write_delete_cursor.js` if the above ticket is completed. + +// Test returnFields with return. +var res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 0, skey: "MongoDB"}}, + {delete: 0, filter: {_id: 0}, returnFields: {_id: 0, skey: 1}, return: true}, + ], + nsInfo: [{ns: "test.coll"}] +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 0); + +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); +assert.docEq(res.cursor.firstBatch[1].value, {skey: "MongoDB"}); +assert(!res.cursor.firstBatch[2]); + +assert(!coll.findOne()); + +coll.drop(); + +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 0, skey: "MongoDB"}}, + { + update: 0, + filter: {_id: 0}, + updateMods: {$set: {skey: "MongoDB2"}}, + returnFields: {_id: 0, skey: 1}, + return: "post" + }, + ], + nsInfo: [{ns: "test.coll"}] +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 0); + +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1}); +assert.docEq(res.cursor.firstBatch[1].value, {skey: "MongoDB2"}); +assert(!res.cursor.firstBatch[2]); + +assert.eq("MongoDB2", coll.findOne().skey); + +coll.drop(); + +// Multi:true is not supported for retryable writes. + +// Test updates multiple when multi is true. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 0, skey: "MongoDB"}}, + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + {update: 0, filter: {skey: "MongoDB"}, updateMods: {$set: {skey: "MongoDB2"}}, multi: true}, + ], + nsInfo: [{ns: "test.coll"}] +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 0); + +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 2, nModified: 2}); +assert(!res.cursor.firstBatch[2].value); +assert(!res.cursor.firstBatch[3]); +assert.sameMembers(coll.find().toArray(), [{_id: 0, skey: "MongoDB2"}, {_id: 1, skey: "MongoDB2"}]); + +coll.drop(); + +// Test deletes multiple when multi is true. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 0, skey: "MongoDB"}}, + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + {delete: 0, filter: {skey: "MongoDB"}, multi: true}, + ], + nsInfo: [{ns: "test.coll"}] +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 0); + +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 2}); +assert(!res.cursor.firstBatch[3]); +assert(!coll.findOne()); + +coll.drop(); + +// Test let for multiple updates and a delete, with constants shadowing in one update. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 0, skey: "MongoDB"}}, + {insert: 0, document: {_id: 1, skey: "MongoDB2"}}, + { + update: 0, + filter: {$expr: {$eq: ["$skey", "$$targetKey1"]}}, + updateMods: [{$set: {skey: "$$replacedKey1"}}], + constants: {replacedKey1: "MongoDB4"}, + return: "post" + }, + { + update: 0, + filter: {$expr: {$eq: ["$skey", "$$targetKey2"]}}, + updateMods: [{$set: {skey: "MongoDB"}}], + return: "post" + }, + {delete: 0, filter: {$expr: {$eq: ["$skey", "$$replacedKey2"]}}, return: true} + ], + nsInfo: [{ns: "test.coll"}], + let : { + targetKey1: "MongoDB", + targetKey2: "MongoDB2", + replacedKey1: "MongoDB", + replacedKey2: "MongoDB4" + } +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 0); + +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1, nModified: 1}); +cursorEntryValidator(res.cursor.firstBatch[3], {ok: 1, idx: 3, n: 1, nModified: 1}); +cursorEntryValidator(res.cursor.firstBatch[4], {ok: 1, idx: 4, n: 1}); +assert(!res.cursor.firstBatch[5]); + +assert.docEq(res.cursor.firstBatch[2].value, {_id: 0, skey: "MongoDB4"}); +assert.docEq(res.cursor.firstBatch[3].value, {_id: 1, skey: "MongoDB"}); +assert.docEq(res.cursor.firstBatch[4].value, {_id: 0, skey: "MongoDB4"}); + +assert.sameMembers(coll.find().toArray(), [{_id: 1, skey: "MongoDB"}]); + +coll.drop(); +})(); diff --git a/jstests/core/write/bulk/bulk_write_non_transaction.js b/jstests/core/write/bulk/bulk_write_non_transaction.js new file mode 100644 index 0000000000000..185a951102bdf --- /dev/null +++ b/jstests/core/write/bulk/bulk_write_non_transaction.js @@ -0,0 +1,675 @@ +/** + * Tests bulk write command for scenarios that cause the command to fail (ok: 0). + * + * These tests are incompatible with the transaction overrides since any failure + * will cause a transaction abortion which will make the overrides infinite loop. + * + * The test runs commands that are not allowed with security token: bulkWrite. + * @tags: [ + * assumes_against_mongod_not_mongos, + * not_allowed_with_security_token, + * command_not_supported_in_serverless, + * # Contains commands that fail which will fail the entire transaction + * does_not_support_transactions, + * # TODO SERVER-52419 Remove this tag. + * featureFlagBulkWriteCommand, + * ] + */ +load("jstests/libs/bulk_write_utils.js"); // For cursorEntryValidator. + +(function() { +"use strict"; + +var coll = db.getCollection("coll"); +var coll1 = db.getCollection("coll1"); +coll.drop(); +coll1.drop(); + +var maxWriteBatchSize = db.hello().maxWriteBatchSize; +var insertOp = {insert: 0, document: {_id: 1, skey: "MongoDB"}}; + +// Make sure bulkWrite at maxWriteBatchSize is okay +let ops = []; +for (var i = 0; i < maxWriteBatchSize; ++i) { + ops.push(insertOp); +} + +var res = db.adminCommand({ + bulkWrite: 1, + ops: ops, + nsInfo: [{ns: "test.coll"}], +}); + +// It is also possible to see interruption here due to very large batch size. +if (!ErrorCodes.isInterruption(res.code)) { + assert.commandWorked(res); +} +coll.drop(); + +// Make sure bulkWrite above maxWriteBatchSize fails +ops = []; +for (var i = 0; i < maxWriteBatchSize + 1; ++i) { + ops.push(insertOp); +} + +res = db.adminCommand({ + bulkWrite: 1, + ops: ops, + nsInfo: [{ns: "test.coll"}], +}); + +// It is also possible to see interruption here due to very large batch size. +if (!ErrorCodes.isInterruption(res.code)) { + assert.commandFailedWithCode(res, [ErrorCodes.InvalidLength]); +} + +// Make sure invalid fields are not accepted +assert.commandFailedWithCode(db.adminCommand({ + bulkWrite: 1, + ops: [{insert: 0, document: {skey: "MongoDB"}}], + nsInfo: [{ns: "test.coll"}], + cursor: {batchSize: 1024}, + bypassDocumentValidation: true, + ordered: false, + fooField: 0 +}), + [40415]); + +assert.eq(coll.find().itcount(), 0); +assert.eq(coll1.find().itcount(), 0); + +// Make sure we fail if index out of range of nsInfo +assert.commandFailedWithCode(db.adminCommand({ + bulkWrite: 1, + ops: [{insert: 2, document: {skey: "MongoDB"}}, {insert: 0, document: {skey: "MongoDB"}}], + nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}] +}), + [ErrorCodes.BadValue]); + +assert.eq(coll.find().itcount(), 0); +assert.eq(coll1.find().itcount(), 0); + +// Missing ops +assert.commandFailedWithCode(db.adminCommand({bulkWrite: 1, nsInfo: [{ns: "mydb.coll"}]}), [40414]); + +assert.eq(coll.find().itcount(), 0); +assert.eq(coll1.find().itcount(), 0); + +// Missing nsInfo +assert.commandFailedWithCode( + db.adminCommand({bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}]}), [40414]); + +assert.eq(coll.find().itcount(), 0); +assert.eq(coll1.find().itcount(), 0); + +// Test valid arguments with invalid values +assert.commandFailedWithCode(db.adminCommand({ + bulkWrite: 1, + ops: [{insert: "test", document: {skey: "MongoDB"}}], + nsInfo: [{ns: "test.coll"}] +}), + [ErrorCodes.TypeMismatch]); + +assert.eq(coll.find().itcount(), 0); +assert.eq(coll1.find().itcount(), 0); + +assert.commandFailedWithCode( + db.adminCommand( + {bulkWrite: 1, ops: [{insert: 0, document: "test"}], nsInfo: [{ns: "test.coll"}]}), + [ErrorCodes.TypeMismatch]); + +assert.eq(coll.find().itcount(), 0); +assert.eq(coll1.find().itcount(), 0); + +assert.commandFailedWithCode( + db.adminCommand( + {bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}], nsInfo: ["test"]}), + [ErrorCodes.TypeMismatch]); + +assert.eq(coll.find().itcount(), 0); +assert.eq(coll1.find().itcount(), 0); + +assert.commandFailedWithCode( + db.adminCommand({bulkWrite: 1, ops: "test", nsInfo: [{ns: "test.coll"}]}), + [ErrorCodes.TypeMismatch]); + +assert.eq(coll.find().itcount(), 0); +assert.eq(coll1.find().itcount(), 0); + +assert.commandFailedWithCode( + db.adminCommand( + {bulkWrite: 1, ops: [{insert: 0, document: {skey: "MongoDB"}}], nsInfo: "test"}), + [ErrorCodes.TypeMismatch]); + +assert.eq(coll.find().itcount(), 0); +assert.eq(coll1.find().itcount(), 0); + +// Make sure update multi:true + return fails the op. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + { + update: 0, + filter: {_id: 1}, + updateMods: {$set: {skey: "MongoDB2"}}, + multi: true, + return: "post" + }, + ], + nsInfo: [{ns: "test.coll"}] +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +cursorEntryValidator(res.cursor.firstBatch[0], + {ok: 0, idx: 0, n: 0, nModified: 0, code: ErrorCodes.InvalidOptions}); +assert(!res.cursor.firstBatch[1]); + +// Test update providing returnFields without return option. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 0, skey: "MongoDB"}}, + { + update: 0, + filter: {_id: 0}, + updateMods: {$set: {skey: "MongoDB2"}}, + returnFields: {_id: 1} + }, + ], + nsInfo: [{ns: "test.coll"}] +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], + {ok: 0, idx: 1, n: 0, nModified: 0, code: ErrorCodes.InvalidOptions}); +assert(!res.cursor.firstBatch[2]); + +coll.drop(); + +// Test update fails userAllowedWriteNS. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + { + update: 0, + filter: {_id: 1}, + updateMods: {$set: {skey: "MongoDB2"}}, + }, + ], + nsInfo: [{ns: "test.system.profile"}] +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +cursorEntryValidator(res.cursor.firstBatch[0], + {ok: 0, idx: 0, n: 0, nModified: 0, code: ErrorCodes.InvalidNamespace}); +assert(!res.cursor.firstBatch[1]); + +var coll2 = db.getCollection("coll2"); +coll2.drop(); + +// Test update continues on error with ordered:false. +assert.commandWorked(coll2.createIndex({x: 1}, {unique: true})); +assert.commandWorked(coll2.insert({x: 3})); +assert.commandWorked(coll2.insert({x: 4})); +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {update: 0, filter: {x: 3}, updateMods: {$inc: {x: 1}}, upsert: true}, + { + update: 1, + filter: {_id: 1}, + updateMods: {$set: {skey: "MongoDB2"}}, + upsert: true, + return: "post" + }, + ], + nsInfo: [{ns: "test.coll2"}, {ns: "test.coll"}], + ordered: false +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +cursorEntryValidator(res.cursor.firstBatch[0], + {ok: 0, idx: 0, n: 0, nModified: 0, code: ErrorCodes.DuplicateKey}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 0}); + +assert.docEq(res.cursor.firstBatch[1].upserted, {index: 0, _id: 1}); +assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB2"}); +assert(!res.cursor.firstBatch[2]); +coll.drop(); +coll2.drop(); + +// Test update stop on error with ordered:true. +assert.commandWorked(coll2.createIndex({x: 1}, {unique: true})); +assert.commandWorked(coll2.insert({x: 3})); +assert.commandWorked(coll2.insert({x: 4})); +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {update: 0, filter: {x: 3}, updateMods: {$inc: {x: 1}}, upsert: true}, + { + update: 1, + filter: {_id: 1}, + updateMods: {$set: {skey: "MongoDB2"}}, + upsert: true, + return: "post" + }, + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + ], + nsInfo: [{ns: "test.coll2"}, {ns: "test.coll"}], +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +cursorEntryValidator(res.cursor.firstBatch[0], + {ok: 0, idx: 0, n: 0, nModified: 0, code: ErrorCodes.DuplicateKey}); +assert(!res.cursor.firstBatch[1]); +coll.drop(); +coll2.drop(); + +// Test fixDocumentForInsert works properly by erroring out on >16MB size insert. +var targetSize = (16 * 1024 * 1024) + 1; +var doc = {_id: new ObjectId(), value: ''}; + +var size = Object.bsonsize(doc); +assert.gte(targetSize, size); + +// Set 'value' as a string with enough characters to make the whole document 'targetSize' +// bytes long. +doc.value = new Array(targetSize - size + 1).join('x'); +assert.eq(targetSize, Object.bsonsize(doc)); + +// Testing ordered:false continues on with other ops when fixDocumentForInsert fails. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + {insert: 0, document: doc}, + {insert: 0, document: {_id: 2, skey: "MongoDB2"}}, + ], + nsInfo: [{ns: "test.coll"}], + ordered: false +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +assert.eq(res.cursor.id, 0); +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); + +// In most cases we expect this to fail because it tries to insert a document that is too large. +// In some cases we may see the javascript execution interrupted because it takes longer than +// our default time limit, so we allow that possibility. +try { + cursorEntryValidator(res.cursor.firstBatch[1], + {ok: 0, n: 0, idx: 1, code: ErrorCodes.BadValue}); +} catch { + cursorEntryValidator(res.cursor.firstBatch[1], + {ok: 0, n: 0, idx: 1, code: ErrorCodes.Interrupted}); +} +cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, n: 1, idx: 2}); +assert(!res.cursor.firstBatch[3]); + +coll.drop(); + +// Testing ordered:true short circuits. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + {insert: 0, document: doc}, + {insert: 0, document: {_id: 2, skey: "MongoDB2"}}, + ], + nsInfo: [{ns: "test.coll"}], + ordered: true +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +assert.eq(res.cursor.id, 0); +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); + +// In most cases we expect this to fail because it tries to insert a document that is too large. +// In some cases we may see the javascript execution interrupted because it takes longer than +// our default time limit, so we allow that possibility. +try { + cursorEntryValidator(res.cursor.firstBatch[1], + {ok: 0, n: 0, idx: 1, code: ErrorCodes.BadValue}); +} catch { + cursorEntryValidator(res.cursor.firstBatch[1], + {ok: 0, n: 0, idx: 1, code: ErrorCodes.Interrupted}); +} +assert(!res.cursor.firstBatch[2]); + +coll.drop(); + +// Test that a write can fail part way through a write and the write partially executes. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + {insert: 1, document: {skey: "MongoDB"}} + ], + nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}] +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +assert.eq(res.cursor.id, 0); +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, n: 0, idx: 1, code: 11000}); +// Make sure that error extra info was correctly added +assert.docEq(res.cursor.firstBatch[1].keyPattern, {_id: 1}); +assert.docEq(res.cursor.firstBatch[1].keyValue, {_id: 1}); +assert(!res.cursor.firstBatch[2]); + +assert.eq(coll.find().itcount(), 1); +assert.eq(coll1.find().itcount(), 0); +coll.drop(); +coll1.drop(); + +// Test that we continue processing after an error for ordered:false. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + {insert: 1, document: {skey: "MongoDB"}} + ], + nsInfo: [{ns: "test.coll"}, {ns: "test.coll1"}], + ordered: false +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +assert.eq(res.cursor.id, 0); +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, n: 1, idx: 0}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, n: 0, idx: 1, code: 11000}); +cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, n: 1, idx: 2}); +assert(!res.cursor.firstBatch[3]); + +assert.eq(coll.find().itcount(), 1); +assert.eq(coll1.find().itcount(), 1); +coll.drop(); +coll1.drop(); + +// Make sure delete multi:true + return fails the op. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {delete: 0, filter: {_id: 1}, multi: true, return: true}, + ], + nsInfo: [{ns: "test.coll"}] +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +cursorEntryValidator(res.cursor.firstBatch[0], + {ok: 0, n: 0, idx: 0, code: ErrorCodes.InvalidOptions}); +assert(!res.cursor.firstBatch[1]); + +// Test delete providing returnFields without return option. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 0, skey: "MongoDB"}}, + {delete: 0, filter: {_id: 0}, returnFields: {_id: 1}}, + ], + nsInfo: [{ns: "test.coll"}] +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], + {ok: 0, n: 0, idx: 1, code: ErrorCodes.InvalidOptions}); +assert(!res.cursor.firstBatch[2]); + +coll.drop(); + +// Test delete fails userAllowedWriteNS. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + { + delete: 0, + filter: {_id: 1}, + }, + ], + nsInfo: [{ns: "test.system.profile"}] +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +cursorEntryValidator(res.cursor.firstBatch[0], + {ok: 0, idx: 0, n: 0, code: ErrorCodes.InvalidNamespace}); +assert(!res.cursor.firstBatch[1]); + +// Test delete continues on error with ordered:false. +coll.insert({_id: 1, skey: "MongoDB"}); +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + { + delete: 0, + filter: {_id: 0}, + }, + {delete: 1, filter: {_id: 1}, return: true} + ], + nsInfo: [{ns: "test.system.profile"}, {ns: "test.coll"}], + ordered: false +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +cursorEntryValidator(res.cursor.firstBatch[0], + {ok: 0, idx: 0, n: 0, code: ErrorCodes.InvalidNamespace}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); +assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB"}); +assert(!res.cursor.firstBatch[2]); + +assert(!coll.findOne()); + +coll.drop(); + +// Test delete stop on error with ordered:true. +coll.insert({_id: 1, skey: "MongoDB"}); +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + { + delete: 0, + filter: {_id: 0}, + }, + {delete: 1, filter: {_id: 1}, return: true}, + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + ], + nsInfo: [{ns: "test.system.profile"}, {ns: "test.coll"}], +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +cursorEntryValidator(res.cursor.firstBatch[0], + {ok: 0, idx: 0, n: 0, code: ErrorCodes.InvalidNamespace}); +assert(!res.cursor.firstBatch[1]); + +assert.eq(coll.findOne().skey, "MongoDB"); + +coll.drop(); + +// Test running multiple findAndModify ops in a command. +// For normal commands this should succeed and for retryable writes the top level should fail. + +// Want to make sure both update + delete handle this correctly so test the following combinations +// of ops. update + delete, delete + update. This will prove that both ops set and check the flag +// correctly so doing update + update and delete + delete is redundant. + +// update + delete +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB2"}}, return: "pre"}, + {delete: 0, filter: {_id: 1}, return: true}, + ], + nsInfo: [{ns: "test.coll"}] +}); + +let processCursor = true; +try { + assert.commandWorked(res); + assert.eq(res.numErrors, 0); +} catch { + processCursor = false; + assert.commandFailedWithCode(res, [ErrorCodes.BadValue]); + assert.eq(res.errmsg, "BulkWrite can only support 1 op with a return for a retryable write"); +} + +if (processCursor) { + cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); + cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1}); + assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB"}); + cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1}); + assert.docEq(res.cursor.firstBatch[2].value, {_id: 1, skey: "MongoDB2"}); + assert(!res.cursor.firstBatch[3]); +} + +coll.drop(); + +// delete + update +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + {insert: 0, document: {_id: 2, skey: "MongoDB"}}, + {delete: 0, filter: {_id: 2}, return: true}, + {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB2"}}, return: "pre"}, + ], + nsInfo: [{ns: "test.coll"}] +}); + +processCursor = true; +try { + assert.commandWorked(res); + assert.eq(res.numErrors, 0); +} catch { + processCursor = false; + assert.commandFailedWithCode(res, [ErrorCodes.BadValue]); + assert.eq(res.errmsg, "BulkWrite can only support 1 op with a return for a retryable write"); +} + +if (processCursor) { + cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); + cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); + cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1}); + assert.docEq(res.cursor.firstBatch[2].value, {_id: 2, skey: "MongoDB"}); + cursorEntryValidator(res.cursor.firstBatch[3], {ok: 1, idx: 3, n: 1, nModified: 1}); + assert.docEq(res.cursor.firstBatch[3].value, {_id: 1, skey: "MongoDB"}); + assert(!res.cursor.firstBatch[4]); +} + +coll.drop(); + +// Test BypassDocumentValidator +assert.commandWorked(coll.insert({_id: 1})); +assert.commandWorked(db.runCommand({collMod: "coll", validator: {a: {$exists: true}}})); + +res = db.adminCommand({ + bulkWrite: 1, + ops: [{insert: 0, document: {_id: 3, skey: "MongoDB"}}], + nsInfo: [{ns: "test.coll"}], + bypassDocumentValidation: false, +}); +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +assert.eq(0, coll.count({_id: 3})); +coll.drop(); + +// Test that we correctly count multiple errors for different write types when ordered=false. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 1}}, + {insert: 0, document: {_id: 2}}, + // error 1: duplicate key error + {insert: 0, document: {_id: 1}}, + {delete: 0, filter: {_id: 2}}, + // error 2: user can't write to namespace + {delete: 1, filter: {_id: 0}}, + {update: 0, filter: {_id: 0}, updateMods: {$set: {x: 1}}}, + // error 3: invalid update operator + {update: 0, filter: {_id: 0}, updateMods: {$blah: {x: 1}}}, + ], + nsInfo: [{ns: "test.coll"}, {ns: "test.system.profile"}], + ordered: false +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 3); +coll.drop(); + +// Checking n and nModified on update success and failure. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB2"}}}, + {update: 0, filter: {_id: 1}, updateMods: {$set: {_id: 2}}}, + ], + nsInfo: [{ns: "test.coll"}] +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1}); +cursorEntryValidator(res.cursor.firstBatch[2], + {ok: 0, idx: 2, n: 0, nModified: 0, code: ErrorCodes.ImmutableField}); +assert(!res.cursor.firstBatch[3]); +coll.drop(); + +// Test constants is not supported on non-pipeline update. +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + { + update: 0, + filter: {$expr: {$eq: ["$skey", "MongoDB"]}}, + updateMods: {skey: "$$targetKey"}, + constants: {targetKey: "MongoDB2"}, + return: "post" + }, + ], + nsInfo: [{ns: "test.coll"}], +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 1); + +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, n: 0, nModified: 0, code: 51198}); +assert.eq(res.cursor.firstBatch[0].errmsg, + "Constant values may only be specified for pipeline updates"); +assert(!res.cursor.firstBatch[1]); + +coll.drop(); +})(); diff --git a/jstests/core/write/bulk/bulk_write_update_cursor.js b/jstests/core/write/bulk/bulk_write_update_cursor.js index 140ebe27e6b93..1b2a75e6bd25a 100644 --- a/jstests/core/write/bulk/bulk_write_update_cursor.js +++ b/jstests/core/write/bulk/bulk_write_update_cursor.js @@ -5,36 +5,21 @@ * @tags: [ * assumes_against_mongod_not_mongos, * not_allowed_with_security_token, - * # TODO SERVER-72988: Until bulkWrite is compatible with retryable writes. - * requires_non_retryable_writes, - * # Command is not yet compatible with tenant migration. - * tenant_migration_incompatible, + * command_not_supported_in_serverless, + * # TODO SERVER-52419 Remove this tag. + * featureFlagBulkWriteCommand, * ] */ +load("jstests/libs/bulk_write_utils.js"); // For cursorEntryValidator. + (function() { "use strict"; -load("jstests/libs/feature_flag_util.js"); - -// Skip this test if the BulkWriteCommand feature flag is not enabled. -// TODO SERVER-67711: Remove feature flag check. -if (!FeatureFlagUtil.isPresentAndEnabled(db, "BulkWriteCommand")) { - jsTestLog('Skipping test because the BulkWriteCommand feature flag is disabled.'); - return; -} var coll = db.getCollection("coll"); var coll1 = db.getCollection("coll1"); coll.drop(); coll1.drop(); -const cursorEntryValidator = function(entry, expectedEntry) { - assert(entry.ok == expectedEntry.ok); - assert(entry.idx == expectedEntry.idx); - assert(entry.n == expectedEntry.n); - assert(entry.nModified == expectedEntry.nModified); - assert(entry.code == expectedEntry.code); -}; - // Test generic update with no return. var res = db.adminCommand({ bulkWrite: 1, @@ -46,9 +31,10 @@ var res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1}); assert(!res.cursor.firstBatch[1].value); assert(!res.cursor.firstBatch[2]); @@ -68,9 +54,10 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1}); assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB"}); assert(!res.cursor.firstBatch[2]); @@ -90,9 +77,10 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1}); assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB2"}); assert(!res.cursor.firstBatch[2]); @@ -118,10 +106,11 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, nModified: 1}); +cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1, nModified: 1}); assert.docEq(res.cursor.firstBatch[2].value, {_id: 1, skey: "MongoDB2"}); assert(!res.cursor.firstBatch[3]); assert.sameMembers(coll.find().toArray(), [{_id: 0, skey: "MongoDB"}, {_id: 1, skey: "MongoDB2"}]); @@ -146,35 +135,68 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, nModified: 1}); +cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1, nModified: 1}); assert.docEq(res.cursor.firstBatch[2].value, {_id: 0, skey: "MongoDB2"}); assert(!res.cursor.firstBatch[3]); assert.sameMembers(coll.find().toArray(), [{_id: 0, skey: "MongoDB2"}, {_id: 1, skey: "MongoDB"}]); coll.drop(); -// Test updates multiple when multi is true. +// Test update with sort and not return +res = db.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 0, skey: "MongoDB"}}, + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + { + update: 0, + filter: {skey: "MongoDB"}, + updateMods: {$set: {skey: "MongoDB2"}}, + sort: {_id: -1} + }, + ], + nsInfo: [{ns: "test.coll"}] +}); + +assert.commandWorked(res); +assert.eq(res.numErrors, 0); + +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1, nModified: 1}); +assert(!res.cursor.firstBatch[3]); +assert.sameMembers(coll.find().toArray(), [{_id: 0, skey: "MongoDB"}, {_id: 1, skey: "MongoDB2"}]); + +coll.drop(); + +// Test update with sort and not return res = db.adminCommand({ bulkWrite: 1, ops: [ {insert: 0, document: {_id: 0, skey: "MongoDB"}}, {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - {update: 0, filter: {skey: "MongoDB"}, updateMods: {$set: {skey: "MongoDB2"}}, multi: true}, + { + update: 0, + filter: {skey: "MongoDB"}, + updateMods: {$set: {skey: "MongoDB2"}}, + sort: {_id: 1} + }, ], nsInfo: [{ns: "test.coll"}] }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, nModified: 2}); -assert(!res.cursor.firstBatch[2].value); +cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1, nModified: 1}); assert(!res.cursor.firstBatch[3]); -assert.sameMembers(coll.find().toArray(), [{_id: 0, skey: "MongoDB2"}, {_id: 1, skey: "MongoDB2"}]); +assert.sameMembers(coll.find().toArray(), [{_id: 0, skey: "MongoDB2"}, {_id: 1, skey: "MongoDB"}]); coll.drop(); @@ -190,8 +212,9 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, nModified: 1}); +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 1}); assert.docEq(res.cursor.firstBatch[0].value, {_id: 1, skey: "MongoDB2"}); assert(!res.cursor.firstBatch[1]); @@ -211,8 +234,9 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, nModified: 0}); +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 0, nModified: 0}); assert(!res.cursor.firstBatch[0].value); assert(!res.cursor.firstBatch[1]); @@ -237,8 +261,9 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, nModified: 0}); +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 0}); assert.docEq(res.cursor.firstBatch[0].upserted, {index: 0, _id: 1}); assert(!res.cursor.firstBatch[0].value); assert(!res.cursor.firstBatch[1]); @@ -263,8 +288,9 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, nModified: 0}); +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 0}); assert.docEq(res.cursor.firstBatch[0].upserted, {index: 0, _id: 1}); assert.docEq(res.cursor.firstBatch[0].value, {_id: 1, skey: "MongoDB2"}); assert(!res.cursor.firstBatch[1]); @@ -273,76 +299,6 @@ assert.eq("MongoDB2", coll.findOne().skey); coll.drop(); -// Make sure multi:true + return fails the op. -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - { - update: 0, - filter: {_id: 1}, - updateMods: {$set: {skey: "MongoDB2"}}, - multi: true, - return: "post" - }, - ], - nsInfo: [{ns: "test.coll"}] -}); - -assert.commandWorked(res); - -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.InvalidOptions}); -assert(!res.cursor.firstBatch[1]); - -// Test returnFields with return. -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - {insert: 0, document: {_id: 0, skey: "MongoDB"}}, - { - update: 0, - filter: {_id: 0}, - updateMods: {$set: {skey: "MongoDB2"}}, - returnFields: {_id: 0, skey: 1}, - return: "post" - }, - ], - nsInfo: [{ns: "test.coll"}] -}); - -assert.commandWorked(res); - -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 1}); -assert.docEq(res.cursor.firstBatch[1].value, {skey: "MongoDB2"}); -assert(!res.cursor.firstBatch[2]); - -assert.eq("MongoDB2", coll.findOne().skey); - -coll.drop(); - -// Test providing returnFields without return option. -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - {insert: 0, document: {_id: 0, skey: "MongoDB"}}, - { - update: 0, - filter: {_id: 0}, - updateMods: {$set: {skey: "MongoDB2"}}, - returnFields: {_id: 1} - }, - ], - nsInfo: [{ns: "test.coll"}] -}); - -assert.commandWorked(res); - -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 0, idx: 1, code: ErrorCodes.InvalidOptions}); -assert(!res.cursor.firstBatch[2]); - -coll.drop(); - // Test inc operator in updateMods. res = db.adminCommand({ bulkWrite: 1, @@ -354,9 +310,10 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1}); assert.docEq(res.cursor.firstBatch[1].value, {_id: 0, a: 3}); assert.eq(res.cursor.firstBatch[1].nModified, 1); assert(!res.cursor.firstBatch[2]); @@ -381,8 +338,9 @@ res = db.adminCommand({ }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, nModified: 1}); +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 1}); assert.eq(res.cursor.firstBatch[0].nModified, 1); assert.docEq(res.cursor.firstBatch[0].value, {_id: 0, a: [{b: 6}, {b: 1}, {b: 2}]}); assert(!res.cursor.firstBatch[1]); @@ -400,19 +358,20 @@ res = db.adminCommand({ update: 0, filter: {$expr: {$eq: ["$skey", "$$targetKey"]}}, updateMods: {skey: "MongoDB2"}, - let : {targetKey: "MongoDB"}, return: "post" }, ], - nsInfo: [{ns: "test.coll"}] + nsInfo: [{ns: "test.coll"}], + let : {targetKey: "MongoDB"} }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[3], {ok: 1, idx: 3, nModified: 1}); +cursorEntryValidator(res.cursor.firstBatch[3], {ok: 1, idx: 3, n: 1, nModified: 1}); assert.docEq(res.cursor.firstBatch[3].value, {_id: 0, skey: "MongoDB2"}); assert(!res.cursor.firstBatch[4]); @@ -422,124 +381,118 @@ assert.sameMembers( coll.drop(); -// Test multiple updates on same namespace. +// Test constants works in pipeline update. res = db.adminCommand({ bulkWrite: 1, ops: [ - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, - {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB2"}}, return: "post"}, - {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB3"}}, return: "post"}, + {insert: 0, document: {_id: 0, skey: "MongoDB"}}, + {insert: 0, document: {_id: 1, skey: "MongoDB2"}}, + {insert: 0, document: {_id: 2, skey: "MongoDB3"}}, + { + update: 0, + filter: {$expr: {$eq: ["$skey", "$$targetKey"]}}, + updateMods: [{$set: {skey: "$$replacedKey"}}], + constants: {targetKey: "MongoDB", replacedKey: "MongoDB2"}, + return: "post" + }, ], - nsInfo: [{ns: "test.coll"}] + nsInfo: [{ns: "test.coll"}], }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 1}); -assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB2"}); -cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, nModified: 1}); -assert.docEq(res.cursor.firstBatch[2].value, {_id: 1, skey: "MongoDB3"}); -assert(!res.cursor.firstBatch[3]); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[3], {ok: 1, idx: 3, n: 1, nModified: 1}); +assert.docEq(res.cursor.firstBatch[3].value, {_id: 0, skey: "MongoDB2"}); +assert(!res.cursor.firstBatch[4]); -assert.eq("MongoDB3", coll.findOne().skey); +assert.sameMembers( + coll.find().toArray(), + [{_id: 0, skey: "MongoDB2"}, {_id: 1, skey: "MongoDB2"}, {_id: 2, skey: "MongoDB3"}]); coll.drop(); -// Test upsert with implicit collection creation. +// Test let matches specific document (targetKey) and constants overwrite let (replacedKey). res = db.adminCommand({ bulkWrite: 1, ops: [ + {insert: 0, document: {_id: 0, skey: "MongoDB"}}, + {insert: 0, document: {_id: 1, skey: "MongoDB2"}}, + {insert: 0, document: {_id: 2, skey: "MongoDB3"}}, { update: 0, - filter: {_id: 1}, - updateMods: {$set: {skey: "MongoDB2"}}, - upsert: true, + filter: {$expr: {$eq: ["$skey", "$$targetKey"]}}, + updateMods: [{$set: {skey: "$$replacedKey"}}], + constants: {replacedKey: "MongoDB4"}, return: "post" }, ], - nsInfo: [{ns: "test.coll2"}] + nsInfo: [{ns: "test.coll"}], + let : {targetKey: "MongoDB3", replacedKey: "MongoDB2"} }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, nModified: 0}); -assert.docEq(res.cursor.firstBatch[0].upserted, {index: 0, _id: 1}); -assert.docEq(res.cursor.firstBatch[0].value, {_id: 1, skey: "MongoDB2"}); -assert(!res.cursor.firstBatch[1]); +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[3], {ok: 1, idx: 3, n: 1, nModified: 1}); +assert.docEq(res.cursor.firstBatch[3].value, {_id: 2, skey: "MongoDB4"}); +assert(!res.cursor.firstBatch[4]); -var coll2 = db.getCollection("coll2"); -coll2.drop(); +coll.drop(); -// Test write fails userAllowedWriteNS. +// Test multiple updates on same namespace. res = db.adminCommand({ bulkWrite: 1, ops: [ - { - update: 0, - filter: {_id: 1}, - updateMods: {$set: {skey: "MongoDB2"}}, - multi: true, - }, + {insert: 0, document: {_id: 1, skey: "MongoDB"}}, + {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB2"}}, return: "post"}, + {update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB3"}}}, ], - nsInfo: [{ns: "test.system.profile"}] + nsInfo: [{ns: "test.coll"}] }); assert.commandWorked(res); +assert.eq(res.numErrors, 0); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.InvalidNamespace}); -assert(!res.cursor.firstBatch[1]); +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1}); +cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, n: 1, nModified: 1}); +assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB2"}); +cursorEntryValidator(res.cursor.firstBatch[2], {ok: 1, idx: 2, n: 1, nModified: 1}); +assert(!res.cursor.firstBatch[3]); -// Test update continues on error with ordered:false. -assert.commandWorked(coll2.createIndex({x: 1}, {unique: true})); -assert.commandWorked(coll2.insert({x: 3})); -assert.commandWorked(coll2.insert({x: 4})); -res = db.adminCommand({ - bulkWrite: 1, - ops: [ - {update: 0, filter: {x: 3}, updateMods: {$inc: {x: 1}}, upsert: true, return: "post"}, - { - update: 1, - filter: {_id: 1}, - updateMods: {$set: {skey: "MongoDB2"}}, - upsert: true, - return: "post" - }, - ], - nsInfo: [{ns: "test.coll2"}, {ns: "test.coll"}], - ordered: false -}); +assert.eq("MongoDB3", coll.findOne().skey); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.DuplicateKey}); -cursorEntryValidator(res.cursor.firstBatch[1], {ok: 1, idx: 1, nModified: 0}); -assert.docEq(res.cursor.firstBatch[1].upserted, {index: 0, _id: 1}); -assert.docEq(res.cursor.firstBatch[1].value, {_id: 1, skey: "MongoDB2"}); -assert(!res.cursor.firstBatch[2]); coll.drop(); -coll2.drop(); -// Test update stop on error with ordered:true. -assert.commandWorked(coll2.createIndex({x: 1}, {unique: true})); -assert.commandWorked(coll2.insert({x: 3})); -assert.commandWorked(coll2.insert({x: 4})); +// Test upsert with implicit collection creation. res = db.adminCommand({ bulkWrite: 1, ops: [ - {update: 0, filter: {x: 3}, updateMods: {$inc: {x: 1}}, upsert: true, return: "post"}, { - update: 1, + update: 0, filter: {_id: 1}, updateMods: {$set: {skey: "MongoDB2"}}, upsert: true, return: "post" }, - {insert: 0, document: {_id: 1, skey: "MongoDB"}}, ], - nsInfo: [{ns: "test.coll2"}, {ns: "test.coll"}], + nsInfo: [{ns: "test.coll2"}] }); -cursorEntryValidator(res.cursor.firstBatch[0], {ok: 0, idx: 0, code: ErrorCodes.DuplicateKey}); +assert.commandWorked(res); +assert.eq(res.numErrors, 0); + +cursorEntryValidator(res.cursor.firstBatch[0], {ok: 1, idx: 0, n: 1, nModified: 0}); +assert.docEq(res.cursor.firstBatch[0].upserted, {index: 0, _id: 1}); +assert.docEq(res.cursor.firstBatch[0].value, {_id: 1, skey: "MongoDB2"}); assert(!res.cursor.firstBatch[1]); -coll.drop(); + +var coll2 = db.getCollection("coll2"); coll2.drop(); })(); diff --git a/jstests/core/write/collection_uuid_write_commands.js b/jstests/core/write/collection_uuid_write_commands.js index ff9781ec09052..e12971e2c13e7 100644 --- a/jstests/core/write/collection_uuid_write_commands.js +++ b/jstests/core/write/collection_uuid_write_commands.js @@ -23,7 +23,7 @@ const validateErrorResponse = function( }; var testCommand = function(cmd, cmdObj) { - const testDB = db.getSiblingDB(jsTestName()); + const testDB = db.getSiblingDB("coll_uuid_write_cmds"); assert.commandWorked(testDB.dropDatabase()); const coll = testDB['coll']; assert.commandWorked(coll.insert({_id: 0})); diff --git a/jstests/core/write/delete/delete_hint.js b/jstests/core/write/delete/delete_hint.js index 6b944702fd97d..9d1e6eb88f536 100644 --- a/jstests/core/write/delete/delete_hint.js +++ b/jstests/core/write/delete/delete_hint.js @@ -10,10 +10,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getPlanStage} from "jstests/libs/analyze_plan.js"; function assertCommandUsesIndex(command, expectedHintKeyPattern) { const out = assert.commandWorked(coll.runCommand({explain: command})); @@ -114,5 +111,4 @@ function failedHintTest() { normalIndexTest(); sparseIndexTest(); shellHelpersTest(); -failedHintTest(); -})(); +failedHintTest(); \ No newline at end of file diff --git a/jstests/core/write/delete/remove.js b/jstests/core/write/delete/remove.js index f08792aeca594..e3ecd6343212d 100644 --- a/jstests/core/write/delete/remove.js +++ b/jstests/core/write/delete/remove.js @@ -3,11 +3,11 @@ // remove.js // unit test for db remove -t = db.removetest; +let t = db.removetest; function f(n, dir) { t.createIndex({x: dir || 1}); - for (i = 0; i < n; i++) + for (let i = 0; i < n; i++) t.save({x: 3, z: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}); assert.eq(n, t.find().count()); diff --git a/jstests/core/write/delete/remove3.js b/jstests/core/write/delete/remove3.js index 6f0a94589e94e..c2ce5ffead8b4 100644 --- a/jstests/core/write/delete/remove3.js +++ b/jstests/core/write/delete/remove3.js @@ -1,9 +1,9 @@ // @tags: [requires_non_retryable_writes, requires_fastcount] -t = db.remove3; +let t = db.remove3; t.drop(); -for (i = 1; i <= 8; i++) { +for (let i = 1; i <= 8; i++) { t.save({_id: i, x: i}); } diff --git a/jstests/core/write/delete/remove4.js b/jstests/core/write/delete/remove4.js index 483de24bf7d77..43dcbc5807bfd 100644 --- a/jstests/core/write/delete/remove4.js +++ b/jstests/core/write/delete/remove4.js @@ -1,6 +1,6 @@ // @tags: [requires_non_retryable_writes] -t = db.remove4; +let t = db.remove4; t.drop(); t.save({a: 1, b: 1}); diff --git a/jstests/core/write/delete/remove6.js b/jstests/core/write/delete/remove6.js index f60200a8d8122..0546fa3f8b612 100644 --- a/jstests/core/write/delete/remove6.js +++ b/jstests/core/write/delete/remove6.js @@ -1,9 +1,9 @@ // @tags: [requires_non_retryable_writes, requires_fastcount] -t = db.remove6; +let t = db.remove6; t.drop(); -N = 1000; +let N = 1000; function pop() { t.drop(); diff --git a/jstests/core/write/delete/remove7.js b/jstests/core/write/delete/remove7.js index 9cc8632999090..9924a34362ca8 100644 --- a/jstests/core/write/delete/remove7.js +++ b/jstests/core/write/delete/remove7.js @@ -1,6 +1,6 @@ // @tags: [requires_non_retryable_writes] -t = db.remove7; +let t = db.remove7; t.drop(); function getTags(n) { @@ -14,13 +14,13 @@ function getTags(n) { return a; } -for (i = 0; i < 1000; i++) { +for (let i = 0; i < 1000; i++) { t.save({tags: getTags()}); } t.createIndex({tags: 1}); -for (i = 0; i < 200; i++) { +for (let i = 0; i < 200; i++) { for (var j = 0; j < 10; j++) t.save({tags: getTags(100)}); var q = {tags: {$in: getTags(10)}}; diff --git a/jstests/core/write/delete/remove8.js b/jstests/core/write/delete/remove8.js index 7a8263c21e448..3f34a753e92f3 100644 --- a/jstests/core/write/delete/remove8.js +++ b/jstests/core/write/delete/remove8.js @@ -4,10 +4,10 @@ // requires_fastcount, // ] -t = db.remove8; +let t = db.remove8; t.drop(); -N = 1000; +let N = 1000; function fill() { for (var i = 0; i < N; i++) { diff --git a/jstests/core/write/delete/remove_justone.js b/jstests/core/write/delete/remove_justone.js index f5345627e34cb..1ae630432db3d 100644 --- a/jstests/core/write/delete/remove_justone.js +++ b/jstests/core/write/delete/remove_justone.js @@ -3,7 +3,7 @@ // key. // @tags: [assumes_unsharded_collection, requires_non_retryable_writes, requires_fastcount] -t = db.remove_justone; +let t = db.remove_justone; t.drop(); t.insert({x: 1}); diff --git a/jstests/core/write/delete/removec.js b/jstests/core/write/delete/removec.js index 560f7405de3ee..bd68d43201c42 100644 --- a/jstests/core/write/delete/removec.js +++ b/jstests/core/write/delete/removec.js @@ -6,36 +6,36 @@ // Sanity test for removing documents with adjacent index keys. SERVER-2008 -t = db.jstests_removec; +let t = db.jstests_removec; t.drop(); t.createIndex({a: 1}); /** @return an array containing a sequence of numbers from i to i + 10. */ function runStartingWith(i) { - ret = []; - for (j = 0; j < 11; ++j) { + let ret = []; + for (let j = 0; j < 11; ++j) { ret.push(i + j); } return ret; } // Insert some documents with adjacent index keys. -for (i = 0; i < 1100; i += 11) { +for (let i = 0; i < 1100; i += 11) { t.save({a: runStartingWith(i)}); } // Remove and then reinsert random documents in the background. -s = startParallelShell('t = db.jstests_removec;' + - 'Random.setRandomSeed();' + - 'for( j = 0; j < 1000; ++j ) {' + - ' o = t.findOne( { a:Random.randInt( 1100 ) } );' + - ' t.remove( { _id:o._id } );' + - ' t.insert( o );' + - '}'); +let s = startParallelShell('t = db.jstests_removec;' + + 'Random.setRandomSeed();' + + 'for( j = 0; j < 1000; ++j ) {' + + ' o = t.findOne( { a:Random.randInt( 1100 ) } );' + + ' t.remove( { _id:o._id } );' + + ' t.insert( o );' + + '}'); // Find operations are error free. Note that the cursor throws if it detects the $err // field in the returned document. -for (i = 0; i < 200; ++i) { +for (let i = 0; i < 200; ++i) { t.find({a: {$gte: 0}}).hint({a: 1}).itcount(); } diff --git a/jstests/core/write/find_and_modify/find_and_modify.js b/jstests/core/write/find_and_modify/find_and_modify.js index 56eae29456802..47885cf9c921c 100644 --- a/jstests/core/write/find_and_modify/find_and_modify.js +++ b/jstests/core/write/find_and_modify/find_and_modify.js @@ -6,7 +6,7 @@ // requires_fastcount, // ] -t = db.find_and_modify; +let t = db.find_and_modify; t.drop(); // fill db @@ -15,7 +15,8 @@ for (var i = 1; i <= 10; i++) { } // returns old -out = t.findAndModify({sort: {priority: 1}, update: {$set: {inprogress: true}, $inc: {value: 1}}}); +let out = + t.findAndModify({sort: {priority: 1}, update: {$set: {inprogress: true}, $inc: {value: 1}}}); assert.eq(out.value, 0); assert.eq(out.inprogress, false); t.update({_id: out._id}, {$set: {inprogress: false}}); @@ -133,7 +134,7 @@ runFindAndModify(true /* shouldMatch */, false /* upsert */, false /* new */); // t.drop(); -cmdRes = db.runCommand( +let cmdRes = db.runCommand( {findAndModify: t.getName(), query: {_id: "miss"}, update: {$inc: {y: 1}}, upsert: true}); assert.commandWorked(cmdRes); assert("value" in cmdRes); diff --git a/jstests/core/write/find_and_modify/find_and_modify3.js b/jstests/core/write/find_and_modify/find_and_modify3.js index a319aef7a2d9a..8aee710a0cf8e 100644 --- a/jstests/core/write/find_and_modify/find_and_modify3.js +++ b/jstests/core/write/find_and_modify/find_and_modify3.js @@ -3,7 +3,7 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.find_and_modify3; +let t = db.find_and_modify3; t.drop(); t.insert({_id: 0, other: 0, comments: [{i: 0, j: 0}, {i: 1, j: 1}]}); @@ -14,10 +14,10 @@ t.insert({ }); // this is the only one that gets modded t.insert({_id: 2, other: 2, comments: [{i: 0, j: 0}, {i: 1, j: 1}]}); -orig0 = t.findOne({_id: 0}); -orig2 = t.findOne({_id: 2}); +let orig0 = t.findOne({_id: 0}); +let orig2 = t.findOne({_id: 2}); -out = t.findAndModify({ +let out = t.findAndModify({ query: {_id: 1, 'comments.i': 0}, update: {$set: {'comments.$.j': 2}}, 'new': true, diff --git a/jstests/core/write/find_and_modify/find_and_modify4.js b/jstests/core/write/find_and_modify/find_and_modify4.js index d5b3ae23cb21f..8d23edb62a94c 100644 --- a/jstests/core/write/find_and_modify/find_and_modify4.js +++ b/jstests/core/write/find_and_modify/find_and_modify4.js @@ -3,7 +3,7 @@ // key. // @tags: [assumes_unsharded_collection, requires_fastcount] -t = db.find_and_modify4; +let t = db.find_and_modify4; t.drop(); // this is the best way to build auto-increment diff --git a/jstests/core/write/find_and_modify/find_and_modify_hint.js b/jstests/core/write/find_and_modify/find_and_modify_hint.js index 9298e92dd7495..0350000f533fe 100644 --- a/jstests/core/write/find_and_modify/find_and_modify_hint.js +++ b/jstests/core/write/find_and_modify/find_and_modify_hint.js @@ -9,10 +9,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getPlanStage} from "jstests/libs/analyze_plan.js"; function assertCommandUsesIndex(command, expectedHintKeyPattern) { const out = assert.commandWorked(coll.runCommand({explain: command})); @@ -151,5 +148,4 @@ const coll = db.jstests_find_and_modify_hint; hint: {badHint: 1} }; assert.commandFailedWithCode(coll.runCommand(famUpdateCmd), ErrorCodes.BadValue); -})(); -})(); +})(); \ No newline at end of file diff --git a/jstests/core/write/find_and_modify/find_and_modify_metrics.js b/jstests/core/write/find_and_modify/find_and_modify_metrics.js index 37ba521ef7350..5a251525c08a0 100644 --- a/jstests/core/write/find_and_modify/find_and_modify_metrics.js +++ b/jstests/core/write/find_and_modify/find_and_modify_metrics.js @@ -10,6 +10,9 @@ * # This test contains assertions on the number of executed operations, and tenant migrations * # passthrough suites automatically retry operations on TenantMigrationAborted errors. * tenant_migration_incompatible, + * # The config fuzzer may run logical session cache refreshes in the background, which modifies + * # some serverStatus metrics read in this test. + * does_not_support_config_fuzzer, * ] */ (function() { diff --git a/jstests/core/write/find_and_modify/find_and_modify_pipeline_update.js b/jstests/core/write/find_and_modify/find_and_modify_pipeline_update.js index 0340d29bd669a..38246e386fc7c 100644 --- a/jstests/core/write/find_and_modify/find_and_modify_pipeline_update.js +++ b/jstests/core/write/find_and_modify/find_and_modify_pipeline_update.js @@ -2,11 +2,8 @@ * Tests the pipeline-style update is accepted by the findAndModify command. * @tags: [requires_non_retryable_writes] */ -(function() { -"use strict"; - load("jstests/libs/fixture_helpers.js"); // For isMongos. -load("jstests/libs/analyze_plan.js"); // For planHasStage(). +import {getPlanStage, planHasStage} from "jstests/libs/analyze_plan.js"; const coll = db.find_and_modify_pipeline_update; coll.drop(); @@ -93,5 +90,4 @@ if (!FixtureHelpers.isMongos(db)) { let err = assert.throws(() => coll.findAndModify( {query: {_id: 1}, update: [{$set: {y: 1}}], arrayFilters: [{"i.x": 4}]})); -assert.eq(err.code, ErrorCodes.FailedToParse); -}()); +assert.eq(err.code, ErrorCodes.FailedToParse); \ No newline at end of file diff --git a/jstests/core/write/find_and_modify/find_and_modify_server6254.js b/jstests/core/write/find_and_modify/find_and_modify_server6254.js index 5a0dae9db22c4..2cb527446ba2e 100644 --- a/jstests/core/write/find_and_modify/find_and_modify_server6254.js +++ b/jstests/core/write/find_and_modify/find_and_modify_server6254.js @@ -3,11 +3,11 @@ // key. // @tags: [assumes_unsharded_collection, requires_fastcount] -t = db.find_and_modify_server6254; +let t = db.find_and_modify_server6254; t.drop(); t.insert({x: 1}); -ret = t.findAndModify({query: {x: 1}, update: {$set: {x: 2}}, new: true}); +let ret = t.findAndModify({query: {x: 1}, update: {$set: {x: 2}}, new: true}); assert.eq(2, ret.x, tojson(ret)); assert.eq(1, t.count()); diff --git a/jstests/core/write/find_and_modify/find_and_modify_server6582.js b/jstests/core/write/find_and_modify/find_and_modify_server6582.js index 7ad8aebee760d..2e5b38af2a23c 100644 --- a/jstests/core/write/find_and_modify/find_and_modify_server6582.js +++ b/jstests/core/write/find_and_modify/find_and_modify_server6582.js @@ -3,11 +3,12 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.find_and_modify_server6582; +let t = db.find_and_modify_server6582; t.drop(); -x = t.runCommand("findAndModify", {query: {f: 1}, update: {$set: {f: 2}}, upsert: true, new: true}); -le = x.lastErrorObject; +let x = + t.runCommand("findAndModify", {query: {f: 1}, update: {$set: {f: 2}}, upsert: true, new: true}); +let le = x.lastErrorObject; assert.eq(le.updatedExisting, false); assert.eq(le.n, 1); assert.eq(le.upserted, x.value._id); diff --git a/jstests/core/write/find_and_modify/find_and_modify_server6588.js b/jstests/core/write/find_and_modify/find_and_modify_server6588.js index 197c892ca67c5..344bb74a2416d 100644 --- a/jstests/core/write/find_and_modify/find_and_modify_server6588.js +++ b/jstests/core/write/find_and_modify/find_and_modify_server6588.js @@ -3,25 +3,12 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.find_and_modify_sever6588; +let t = db.find_and_modify_sever6588; -initial = { - _id: 1, - a: [{b: 1}], - z: 1 -}; -up = { - "$set": {"a.$.b": 2} -}; -q = { - _id: 1, - "a.b": 1 -}; -correct = { - _id: 1, - a: [{b: 2}], - z: 1 -}; +let initial = {_id: 1, a: [{b: 1}], z: 1}; +let up = {"$set": {"a.$.b": 2}}; +let q = {_id: 1, "a.b": 1}; +let correct = {_id: 1, a: [{b: 2}], z: 1}; t.drop(); t.insert(initial); @@ -30,7 +17,7 @@ assert.eq(correct, t.findOne()); t.drop(); t.insert(initial); -x = t.findAndModify({query: q, update: up}); +let x = t.findAndModify({query: q, update: up}); assert.eq(correct, t.findOne()); t.drop(); diff --git a/jstests/core/write/find_and_modify/find_and_modify_server6659.js b/jstests/core/write/find_and_modify/find_and_modify_server6659.js index 029a32d6a4988..091c9e93ebcab 100644 --- a/jstests/core/write/find_and_modify/find_and_modify_server6659.js +++ b/jstests/core/write/find_and_modify/find_and_modify_server6659.js @@ -3,9 +3,9 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.find_and_modify_server6659; +let t = db.find_and_modify_server6659; t.drop(); -x = t.findAndModify({query: {f: 1}, update: {$set: {f: 2}}, upsert: true, new: true}); +let x = t.findAndModify({query: {f: 1}, update: {$set: {f: 2}}, upsert: true, new: true}); assert.eq(2, x.f); assert.eq(2, t.findOne().f); diff --git a/jstests/core/write/find_and_modify/find_and_modify_server6909.js b/jstests/core/write/find_and_modify/find_and_modify_server6909.js index c74e342822bab..24a62a9ba2b4f 100644 --- a/jstests/core/write/find_and_modify/find_and_modify_server6909.js +++ b/jstests/core/write/find_and_modify/find_and_modify_server6909.js @@ -3,12 +3,12 @@ // key. // @tags: [assumes_unsharded_collection] -c = db.find_and_modify_server6906; +let c = db.find_and_modify_server6906; c.drop(); c.insert({_id: 5, a: {b: 1}}); -ret = c.findAndModify({ +let ret = c.findAndModify({ query: {'a.b': 1}, update: {$set: {'a.b': 2}}, // Ensure the query on 'a.b' no longer matches. new: true diff --git a/jstests/core/write/find_and_modify/find_and_modify_server6993.js b/jstests/core/write/find_and_modify/find_and_modify_server6993.js index 4d9b169700d48..eec75a9679b73 100644 --- a/jstests/core/write/find_and_modify/find_and_modify_server6993.js +++ b/jstests/core/write/find_and_modify/find_and_modify_server6993.js @@ -3,7 +3,7 @@ // key. // @tags: [assumes_unsharded_collection] -c = db.find_and_modify_server6993; +let c = db.find_and_modify_server6993; c.drop(); c.insert({a: [1, 2]}); diff --git a/jstests/core/write/find_and_modify/find_and_modify_server7660.js b/jstests/core/write/find_and_modify/find_and_modify_server7660.js index 7973279ddcf1b..3ec3d715405e0 100644 --- a/jstests/core/write/find_and_modify/find_and_modify_server7660.js +++ b/jstests/core/write/find_and_modify/find_and_modify_server7660.js @@ -3,13 +3,13 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.find_and_modify_server7660; +let t = db.find_and_modify_server7660; t.drop(); -a = t.findAndModify( +let a = t.findAndModify( {query: {foo: 'bar'}, update: {$set: {bob: 'john'}}, sort: {foo: 1}, upsert: true, new: true}); -b = t.findOne(); +let b = t.findOne(); assert.eq(a, b); assert.eq("bar", a.foo); assert.eq("john", a.bob); diff --git a/jstests/core/write/find_and_modify/find_and_modify_where.js b/jstests/core/write/find_and_modify/find_and_modify_where.js index 62375da7aabe2..6d605f9d23afa 100644 --- a/jstests/core/write/find_and_modify/find_and_modify_where.js +++ b/jstests/core/write/find_and_modify/find_and_modify_where.js @@ -8,11 +8,11 @@ // requires_scripting, // ] -t = db.find_and_modify_where; +let t = db.find_and_modify_where; t.drop(); t.insert({_id: 1, x: 1}); -res = t.findAndModify({query: {$where: "return this.x == 1"}, update: {$set: {y: 1}}}); +let res = t.findAndModify({query: {$where: "return this.x == 1"}, update: {$set: {y: 1}}}); assert.eq(1, t.findOne().y); diff --git a/jstests/core/write/insert/insert_id_undefined.js b/jstests/core/write/insert/insert_id_undefined.js index 6d0bc38f8fb45..80873def9c658 100644 --- a/jstests/core/write/insert/insert_id_undefined.js +++ b/jstests/core/write/insert/insert_id_undefined.js @@ -3,7 +3,7 @@ // @tags: [assumes_no_implicit_collection_creation_after_drop, requires_fastcount] // ensure a document with _id undefined cannot be saved -t = db.insert_id_undefined; +let t = db.insert_id_undefined; t.drop(); t.insert({_id: undefined}); assert.eq(t.count(), 0); diff --git a/jstests/core/write/update/update2.js b/jstests/core/write/update/update2.js index 080875b50bd23..a026346e35574 100644 --- a/jstests/core/write/update/update2.js +++ b/jstests/core/write/update/update2.js @@ -3,7 +3,7 @@ // key. // @tags: [assumes_unsharded_collection] -f = db.ed_db_update2; +let f = db.ed_db_update2; f.drop(); f.save({a: 4}); diff --git a/jstests/core/write/update/update3.js b/jstests/core/write/update/update3.js index 5a61b8bcfc956..884a65e9e0eef 100644 --- a/jstests/core/write/update/update3.js +++ b/jstests/core/write/update/update3.js @@ -5,7 +5,7 @@ // Update with mods corner cases. -f = db.jstests_update3; +let f = db.jstests_update3; f.drop(); f.save({a: 1}); @@ -30,4 +30,4 @@ assert.eq(0, f.findOne()._id, "D"); f.drop(); f.save({_id: 1, a: 1}); f.update({}, {$unset: {"a": 1, "b.c": 1}}); -assert.docEq({_id: 1}, f.findOne(), "E"); \ No newline at end of file +assert.docEq({_id: 1}, f.findOne(), "E"); diff --git a/jstests/core/write/update/update5.js b/jstests/core/write/update/update5.js index fafc0d72ce08f..2bdcaea1ef2bc 100644 --- a/jstests/core/write/update/update5.js +++ b/jstests/core/write/update/update5.js @@ -4,7 +4,7 @@ // // @tags: [assumes_unsharded_collection, requires_fastcount] -t = db.update5; +let t = db.update5; function go(key) { t.drop(); @@ -24,7 +24,7 @@ function go(key) { check(3, "C"); var ik = {}; - for (k in key) + for (let k in key) ik[k] = 1; t.createIndex(ik); diff --git a/jstests/core/write/update/update6.js b/jstests/core/write/update/update6.js index 8a1950b8d02ec..7704e921151bb 100644 --- a/jstests/core/write/update/update6.js +++ b/jstests/core/write/update/update6.js @@ -5,7 +5,7 @@ // assumes_unsharded_collection, // ] -t = db.update6; +let t = db.update6; t.drop(); t.save({a: 1, b: {c: 1, d: 1}}); diff --git a/jstests/core/write/update/update7.js b/jstests/core/write/update/update7.js index d3a7a5d1debeb..a7fcbbfddf05c 100644 --- a/jstests/core/write/update/update7.js +++ b/jstests/core/write/update/update7.js @@ -3,7 +3,7 @@ // key. // @tags: [assumes_unsharded_collection, requires_multi_updates, requires_non_retryable_writes] -t = db.update7; +let t = db.update7; t.drop(); function s() { @@ -119,6 +119,7 @@ assert.eq("4,7,", s(), "E1"); t.update({}, {$inc: {x: 1}}, false, true); assert.eq("5,8,1", s(), "E2"); +let i; for (i = 4; i < 8; i++) t.save({_id: i}); t.save({_id: i, x: 1}); diff --git a/jstests/core/write/update/update8.js b/jstests/core/write/update/update8.js index 596bc8695ddc7..7db54242cf399 100644 --- a/jstests/core/write/update/update8.js +++ b/jstests/core/write/update/update8.js @@ -1,5 +1,5 @@ -t = db.update8; +let t = db.update8; t.drop(); t.update({_id: 1, tags: {"$ne": "a"}}, {"$push": {tags: "a"}}, true); diff --git a/jstests/core/write/update/update9.js b/jstests/core/write/update/update9.js index d119681a09e45..e7f9da8ddb601 100644 --- a/jstests/core/write/update/update9.js +++ b/jstests/core/write/update/update9.js @@ -1,8 +1,8 @@ -t = db.update9; +let t = db.update9; t.drop(); -orig = { +let orig = { "_id": 1, "question": "a", "choices": {"1": {"choice": "b"}, "0": {"choice": "c"}}, diff --git a/jstests/core/write/update/update_addToSet2.js b/jstests/core/write/update/update_addToSet2.js index 44ba8bce671e8..2aabdb3078c95 100644 --- a/jstests/core/write/update/update_addToSet2.js +++ b/jstests/core/write/update/update_addToSet2.js @@ -1,10 +1,8 @@ -t = db.update_addToSet2; +let t = db.update_addToSet2; t.drop(); -o = { - _id: 1 -}; +let o = {_id: 1}; t.insert({_id: 1}); t.update({}, {$addToSet: {'kids': {'name': 'Bob', 'age': '4'}}}); diff --git a/jstests/core/write/update/update_addToSet3.js b/jstests/core/write/update/update_addToSet3.js index efd682cef4cf6..ee13b651233b5 100644 --- a/jstests/core/write/update/update_addToSet3.js +++ b/jstests/core/write/update/update_addToSet3.js @@ -1,6 +1,6 @@ // Test the use of $each in $addToSet -t = db.update_addToSet3; +let t = db.update_addToSet3; t.drop(); t.insert({_id: 1}); diff --git a/jstests/core/write/update/update_arraymatch1.js b/jstests/core/write/update/update_arraymatch1.js index 10b7e37e45127..dd88af731aa6b 100644 --- a/jstests/core/write/update/update_arraymatch1.js +++ b/jstests/core/write/update/update_arraymatch1.js @@ -3,19 +3,14 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.update_arraymatch1; +let t = db.update_arraymatch1; t.drop(); -o = { - _id: 1, - a: [{x: 1, y: 1}, {x: 2, y: 2}, {x: 3, y: 3}] -}; +let o = {_id: 1, a: [{x: 1, y: 1}, {x: 2, y: 2}, {x: 3, y: 3}]}; t.insert(o); assert.eq(o, t.findOne(), "A1"); -q = { - "a.x": 2 -}; +let q = {"a.x": 2}; t.update(q, {$set: {b: 5}}); o.b = 5; assert.eq(o, t.findOne(), "A2"); diff --git a/jstests/core/write/update/update_arraymatch2.js b/jstests/core/write/update/update_arraymatch2.js index 7610de7c96202..5ec6be9e3cf03 100644 --- a/jstests/core/write/update/update_arraymatch2.js +++ b/jstests/core/write/update/update_arraymatch2.js @@ -1,6 +1,6 @@ // @tags: [requires_multi_updates, requires_non_retryable_writes] -t = db.update_arraymatch2; +let t = db.update_arraymatch2; t.drop(); t.insert({}); diff --git a/jstests/core/write/update/update_arraymatch3.js b/jstests/core/write/update/update_arraymatch3.js index 36f7ab22430eb..e16a518fd80a8 100644 --- a/jstests/core/write/update/update_arraymatch3.js +++ b/jstests/core/write/update/update_arraymatch3.js @@ -1,13 +1,9 @@ // @tags: [requires_multi_updates, requires_non_retryable_writes] -t = db.update_arraymatch3; +let t = db.update_arraymatch3; t.drop(); -o = { - _id: 1, - title: "ABC", - comments: [{"by": "joe", "votes": 3}, {"by": "jane", "votes": 7}] -}; +let o = {_id: 1, title: "ABC", comments: [{"by": "joe", "votes": 3}, {"by": "jane", "votes": 7}]}; t.save(o); assert.eq(o, t.findOne(), "A1"); diff --git a/jstests/core/write/update/update_arraymatch4.js b/jstests/core/write/update/update_arraymatch4.js index 3c087e53ca5ad..6c8f378fad196 100644 --- a/jstests/core/write/update/update_arraymatch4.js +++ b/jstests/core/write/update/update_arraymatch4.js @@ -3,13 +3,10 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.update_arraymatch4; +let t = db.update_arraymatch4; t.drop(); -x = { - _id: 1, - arr: ["A1", "B1", "C1"] -}; +let x = {_id: 1, arr: ["A1", "B1", "C1"]}; t.insert(x); assert.eq(x, t.findOne(), "A1"); diff --git a/jstests/core/write/update/update_arraymatch5.js b/jstests/core/write/update/update_arraymatch5.js index 1b4c967b38b7d..0d676f3e4e51e 100644 --- a/jstests/core/write/update/update_arraymatch5.js +++ b/jstests/core/write/update/update_arraymatch5.js @@ -4,7 +4,7 @@ // requires_non_retryable_writes, // ] -t = db.update_arraymatch5; +let t = db.update_arraymatch5; t.drop(); t.insert({abc: {visible: true}, testarray: [{foobar_id: 316, visible: true, xxx: 1}]}); diff --git a/jstests/core/write/update/update_arraymatch6.js b/jstests/core/write/update/update_arraymatch6.js index 1241753b86694..16563e8d3e5f4 100644 --- a/jstests/core/write/update/update_arraymatch6.js +++ b/jstests/core/write/update/update_arraymatch6.js @@ -4,7 +4,7 @@ // @tags: [assumes_unsharded_collection] var res; -t = db.jstests_update_arraymatch6; +let t = db.jstests_update_arraymatch6; t.drop(); function doTest() { @@ -17,4 +17,4 @@ function doTest() { doTest(); t.drop(); t.createIndex({'a.id': 1}); -doTest(); \ No newline at end of file +doTest(); diff --git a/jstests/core/write/update/update_arraymatch7.js b/jstests/core/write/update/update_arraymatch7.js index cded4ba56f493..552c8469dd19e 100644 --- a/jstests/core/write/update/update_arraymatch7.js +++ b/jstests/core/write/update/update_arraymatch7.js @@ -6,7 +6,7 @@ // Check that the positional operator works properly when an index only match is used for the update // query spec. SERVER-5067 -t = db.jstests_update_arraymatch7; +let t = db.jstests_update_arraymatch7; t.drop(); function testPositionalInc() { diff --git a/jstests/core/write/update/update_arraymatch8.js b/jstests/core/write/update/update_arraymatch8.js index e3aa91d642292..2f5365352e270 100644 --- a/jstests/core/write/update/update_arraymatch8.js +++ b/jstests/core/write/update/update_arraymatch8.js @@ -9,7 +9,7 @@ // SERVER-7511 // array.$.name -t = db.jstests_update_arraymatch8; +let t = db.jstests_update_arraymatch8; t.drop(); t.createIndex({'array.name': 1}); t.insert({'array': [{'name': 'old'}]}); @@ -51,7 +51,7 @@ assert(!t.findOne({'array.name': 'old'})); // // array.12.name t = db.jstests_update_arraymatch8; t.drop(); -arr = new Array(); +let arr = new Array(); for (var i = 0; i < 20; i++) { arr.push({'name': 'old'}); } diff --git a/jstests/core/write/update/update_blank1.js b/jstests/core/write/update/update_blank1.js index cd8f7433ebeb5..0e643f890b0e9 100644 --- a/jstests/core/write/update/update_blank1.js +++ b/jstests/core/write/update/update_blank1.js @@ -3,15 +3,10 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.update_blank1; +let t = db.update_blank1; t.drop(); -orig = { - "": 1, - _id: 2, - "a": 3, - "b": 4 -}; +let orig = {"": 1, _id: 2, "a": 3, "b": 4}; t.insert(orig); var res = t.update({}, {$set: {"c": 5}}); print(res); diff --git a/jstests/core/write/update/update_dbref.js b/jstests/core/write/update/update_dbref.js index f3e461c737927..caaff55cc9174 100644 --- a/jstests/core/write/update/update_dbref.js +++ b/jstests/core/write/update/update_dbref.js @@ -8,7 +8,7 @@ // Test that we can update DBRefs, but not dbref fields outside a DBRef var res; -t = db.jstests_update_dbref; +let t = db.jstests_update_dbref; t.drop(); res = t.save({_id: 1, a: new DBRef("a", "b")}); diff --git a/jstests/core/write/update/update_hint.js b/jstests/core/write/update/update_hint.js index f0869073de0da..a834fd0bb469a 100644 --- a/jstests/core/write/update/update_hint.js +++ b/jstests/core/write/update/update_hint.js @@ -7,10 +7,7 @@ * @tags: [assumes_unsharded_collection, requires_multi_updates, requires_non_retryable_writes] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getPlanStage} from "jstests/libs/analyze_plan.js"; function assertCommandUsesIndex(command, expectedHintKeyPattern) { const out = assert.commandWorked(coll.runCommand({explain: command})); @@ -162,5 +159,4 @@ function failedHintTest() { normalIndexTest(); sparseIndexTest(); shellHelpersTest(); -failedHintTest(); -})(); +failedHintTest(); \ No newline at end of file diff --git a/jstests/core/write/update/update_invalid1.js b/jstests/core/write/update/update_invalid1.js index bbda4cee53e94..7276d33e5ef58 100644 --- a/jstests/core/write/update/update_invalid1.js +++ b/jstests/core/write/update/update_invalid1.js @@ -2,7 +2,7 @@ // requires_fastcount, // ] -t = db.update_invalid1; +let t = db.update_invalid1; t.drop(); t.update({_id: 5}, {$set: {$inc: {x: 5}}}, true); diff --git a/jstests/core/write/update/update_metrics.js b/jstests/core/write/update/update_metrics.js index 8bf93e5009d95..a5e3938d47b2c 100644 --- a/jstests/core/write/update/update_metrics.js +++ b/jstests/core/write/update/update_metrics.js @@ -5,6 +5,9 @@ * @tags: [ * # The test is designed to work with an unsharded collection. * assumes_unsharded_collection, + * # The config fuzzer may run logical session cache refreshes in the background, which modifies + * # some serverStatus metrics read in this test. + * does_not_support_config_fuzzer, * # The test relies on the precise number of executions of commands. * requires_non_retryable_writes, * # This test contains assertions on the number of executed operations, and tenant migrations diff --git a/jstests/core/write/update/update_multi3.js b/jstests/core/write/update/update_multi3.js index 4c6769bc65a9e..463d4192ad97d 100644 --- a/jstests/core/write/update/update_multi3.js +++ b/jstests/core/write/update/update_multi3.js @@ -1,6 +1,6 @@ // @tags: [requires_multi_updates, requires_non_retryable_writes] -t = db.update_multi3; +let t = db.update_multi3; function test(useIndex) { t.drop(); @@ -8,7 +8,7 @@ function test(useIndex) { if (useIndex) t.createIndex({k: 1}); - for (i = 0; i < 10; i++) { + for (let i = 0; i < 10; i++) { t.save({_id: i, k: 'x', a: []}); } diff --git a/jstests/core/write/update/update_multi4.js b/jstests/core/write/update/update_multi4.js index cfe11616efc46..d0e868ee5ddd8 100644 --- a/jstests/core/write/update/update_multi4.js +++ b/jstests/core/write/update/update_multi4.js @@ -1,9 +1,9 @@ // @tags: [requires_multi_updates, requires_non_retryable_writes] -t = db.update_mulit4; +let t = db.update_mulit4; t.drop(); -for (i = 0; i < 1000; i++) { +for (let i = 0; i < 1000; i++) { t.insert({_id: i, k: i % 12, v: "v" + i % 12}); } diff --git a/jstests/core/write/update/update_multi6.js b/jstests/core/write/update/update_multi6.js index 023398534cdc0..217da4c002d34 100644 --- a/jstests/core/write/update/update_multi6.js +++ b/jstests/core/write/update/update_multi6.js @@ -2,7 +2,7 @@ var res; -t = db.update_multi6; +let t = db.update_multi6; t.drop(); t.update({_id: 1}, {_id: 1, x: 1, y: 2}, true, false); diff --git a/jstests/core/write/update/update_pipeline_shell_helpers.js b/jstests/core/write/update/update_pipeline_shell_helpers.js index 0ac1b35f4ce6e..d22abfdc0aa60 100644 --- a/jstests/core/write/update/update_pipeline_shell_helpers.js +++ b/jstests/core/write/update/update_pipeline_shell_helpers.js @@ -7,12 +7,9 @@ * requires_non_retryable_writes, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For 'arrayEq'. -load("jstests/libs/analyze_plan.js"); // For planHasStage(). -load("jstests/libs/fixture_helpers.js"); // For isMongos(). +import {getPlanStage, planHasStage} from "jstests/libs/analyze_plan.js"; +load("jstests/libs/fixture_helpers.js"); // For isMongos(). // Make sure that the test collection is empty before starting the test. const testColl = db.update_pipeline_shell_helpers_test; @@ -129,5 +126,4 @@ if (!FixtureHelpers.isMongos(db)) { assert.throws(() => testColl.replaceOne({_id: 1}, [{$replaceWith: {}}])); assert.throws(() => testColl.findOneAndReplace({_id: 1}, [{$replaceWith: {}}])); assert.throws(() => testColl.bulkWrite( - [{replaceOne: {filter: {_id: 1}, replacement: [{$replaceWith: {}}]}}])); -})(); + [{replaceOne: {filter: {_id: 1}, replacement: [{$replaceWith: {}}]}}])); \ No newline at end of file diff --git a/jstests/core/write/update/update_setOnInsert.js b/jstests/core/write/update/update_setOnInsert.js index 9457c69f325d7..6e3f757660fa3 100644 --- a/jstests/core/write/update/update_setOnInsert.js +++ b/jstests/core/write/update/update_setOnInsert.js @@ -1,5 +1,5 @@ // This tests that $setOnInsert works and allow setting the _id -t = db.update_setOnInsert; +let t = db.update_setOnInsert; var res; function dotest(useIndex) { diff --git a/jstests/core/write/update/update_with_large_hint.js b/jstests/core/write/update/update_with_large_hint.js index 0b2521337cecf..d812954130d2b 100644 --- a/jstests/core/write/update/update_with_large_hint.js +++ b/jstests/core/write/update/update_with_large_hint.js @@ -33,4 +33,4 @@ assert.commandFailedWithCode(coll.runCommand("delete", { deletes: [{q: {_id: 0}, limit: 1, hint: {[longHint]: 1}}], }), ErrorCodes.BadValue); -}()); \ No newline at end of file +}()); diff --git a/jstests/core/write/update/updatea.js b/jstests/core/write/update/updatea.js index 99938c433fa15..23dda37054b81 100644 --- a/jstests/core/write/update/updatea.js +++ b/jstests/core/write/update/updatea.js @@ -4,13 +4,10 @@ // @tags: [assumes_unsharded_collection] var res; -t = db.updatea; +let t = db.updatea; t.drop(); -orig = { - _id: 1, - a: [{x: 1, y: 2}, {x: 10, y: 11}] -}; +let orig = {_id: 1, a: [{x: 1, y: 2}, {x: 10, y: 11}]}; res = t.save(orig); assert.commandWorked(res); @@ -52,7 +49,7 @@ orig = { _id: 1, a: [] }; -for (i = 0; i < 12; i++) +for (let i = 0; i < 12; i++) orig.a.push(i); res = t.save(orig); diff --git a/jstests/core/write/update/updateb.js b/jstests/core/write/update/updateb.js index 1518e7f354637..cea484a27329d 100644 --- a/jstests/core/write/update/updateb.js +++ b/jstests/core/write/update/updateb.js @@ -3,15 +3,12 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.updateb; +let t = db.updateb; t.drop(); t.update({"x.y": 2}, {$inc: {a: 7}}, true); -correct = { - a: 7, - x: {y: 2} -}; -got = t.findOne(); +let correct = {a: 7, x: {y: 2}}; +let got = t.findOne(); delete got._id; assert.docEq(correct, got, "A"); diff --git a/jstests/core/write/update/updatec.js b/jstests/core/write/update/updatec.js index 8ce8cf4ecdd69..e47ed3ff797c8 100644 --- a/jstests/core/write/update/updatec.js +++ b/jstests/core/write/update/updatec.js @@ -1,5 +1,5 @@ -t = db.updatec; +let t = db.updatec; t.drop(); t.update({"_id": 123}, {$set: {"v": {"i": 123, "a": 456}}, $push: {"f": 234}}, 1, 0); diff --git a/jstests/core/write/update/updated.js b/jstests/core/write/update/updated.js index 919d02610c7e6..d850181afb5a0 100644 --- a/jstests/core/write/update/updated.js +++ b/jstests/core/write/update/updated.js @@ -3,13 +3,10 @@ // key. // @tags: [assumes_unsharded_collection] -t = db.updated; +let t = db.updated; t.drop(); -o = { - _id: Math.random(), - items: [null, null, null, null] -}; +let o = {_id: Math.random(), items: [null, null, null, null]}; t.insert(o); assert.docEq(o, t.findOne(), "A1"); diff --git a/jstests/core/write/update/updatee.js b/jstests/core/write/update/updatee.js index fbbcac01c9a96..ed2c3a0d81c55 100644 --- a/jstests/core/write/update/updatee.js +++ b/jstests/core/write/update/updatee.js @@ -5,7 +5,7 @@ // big numeric updates (used to overflow) -t = db.updatee; +let t = db.updatee; t.drop(); var o = { diff --git a/jstests/core/write/update/updatef.js b/jstests/core/write/update/updatef.js index 6597484f78aa8..3a55a38d95495 100644 --- a/jstests/core/write/update/updatef.js +++ b/jstests/core/write/update/updatef.js @@ -7,24 +7,24 @@ // Test unsafe management of nsdt on update command yield SERVER-3208 -prefixNS = db.jstests_updatef; +let prefixNS = db.jstests_updatef; prefixNS.save({}); -t = db.jstests_updatef_actual; +let t = db.jstests_updatef_actual; t.drop(); t.save({a: 0, b: []}); -for (i = 0; i < 1000; ++i) { +for (let i = 0; i < 1000; ++i) { t.save({a: 100}); } t.save({a: 0, b: []}); // Repeatedly rename jstests_updatef to jstests_updatef_ and back. This will // invalidate the jstests_updatef_actual NamespaceDetailsTransient object. -s = startParallelShell( +let s = startParallelShell( "for( i=0; i < 100; ++i ) { db.jstests_updatef.renameCollection( 'jstests_updatef_' ); db.jstests_updatef_.renameCollection( 'jstests_updatef' ); }"); -for (i = 0; i < 20; ++i) { +for (let i = 0; i < 20; ++i) { t.update({a: 0}, {$push: {b: i}}, false, true); } diff --git a/jstests/core/write/update/updateg.js b/jstests/core/write/update/updateg.js index 8a849a0ce5939..b014547b3e2a0 100644 --- a/jstests/core/write/update/updateg.js +++ b/jstests/core/write/update/updateg.js @@ -5,7 +5,7 @@ // SERVER-3370 check modifiers with field name characters comparing less than '.' character. -t = db.jstests_updateg; +let t = db.jstests_updateg; t.drop(); t.update({}, {'$inc': {'all.t': 1, 'all-copy.t': 1}}, true); diff --git a/jstests/core/write/update/updateh.js b/jstests/core/write/update/updateh.js index 72d0d22c616e6..bafebc08ed77e 100644 --- a/jstests/core/write/update/updateh.js +++ b/jstests/core/write/update/updateh.js @@ -7,7 +7,7 @@ var res; -t = db.jstest_updateh; +let t = db.jstest_updateh; t.drop(); t.insert({x: 1}); diff --git a/jstests/core/write/update/updatei.js b/jstests/core/write/update/updatei.js index 599c9538a80f7..c5b7875df51bf 100644 --- a/jstests/core/write/update/updatei.js +++ b/jstests/core/write/update/updatei.js @@ -5,13 +5,13 @@ // Test new (optional) update syntax // SERVER-4176 -t = db.updatei; +let t = db.updatei; // Using a multi update t.drop(); -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { t.save({_id: i, k: "x", a: []}); } @@ -24,7 +24,7 @@ t.drop(); // Using a single update -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { t.save({_id: i, k: "x", a: []}); } @@ -35,7 +35,7 @@ t.drop(); // Using upsert, found -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { t.save({_id: i, k: "x", a: []}); } @@ -46,7 +46,7 @@ t.drop(); // Using upsert + multi, found -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { t.save({_id: i, k: "x", a: []}); } @@ -59,7 +59,7 @@ t.drop(); // Using upsert, not found -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { t.save({_id: i, k: "x", a: []}); } @@ -70,7 +70,7 @@ t.drop(); // Without upsert, found -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { t.save({_id: i, k: "x", a: []}); } @@ -81,7 +81,7 @@ t.drop(); // Without upsert, not found -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { t.save({_id: i, k: "x", a: []}); } diff --git a/jstests/core/write/update/updatej.js b/jstests/core/write/update/updatej.js index bab2a32f45fc2..77c5f52cbcf3d 100644 --- a/jstests/core/write/update/updatej.js +++ b/jstests/core/write/update/updatej.js @@ -5,7 +5,7 @@ // encounters an error. // @tags: [requires_multi_updates, requires_non_retryable_writes, assumes_unsharded_collection] -t = db.jstests_updatej; +let t = db.jstests_updatej; t.drop(); t.save({a: []}); diff --git a/jstests/core/write/update/updatek.js b/jstests/core/write/update/updatek.js index 923b4145d1d7d..4ef4fe8ddb982 100644 --- a/jstests/core/write/update/updatek.js +++ b/jstests/core/write/update/updatek.js @@ -5,7 +5,7 @@ // Test modifier operations on numerically equivalent string field names. SERVER-4776 -t = db.jstests_updatek; +let t = db.jstests_updatek; t.drop(); t.save({_id: 0, '1': {}, '01': {}}); diff --git a/jstests/core/write/update/updatel.js b/jstests/core/write/update/updatel.js index a663f30672214..332fad0c6ece8 100644 --- a/jstests/core/write/update/updatel.js +++ b/jstests/core/write/update/updatel.js @@ -8,7 +8,7 @@ // setinel ('$'), the update fails with an error. SERVER-6669 SERVER-4713 var res; -t = db.jstests_updatel; +let t = db.jstests_updatel; t.drop(); // The collection is empty, forcing an upsert. In this case the query has no array position match diff --git a/jstests/core/write/update/updatem.js b/jstests/core/write/update/updatem.js index 8e4af7e56c741..0dd0fcba0140e 100644 --- a/jstests/core/write/update/updatem.js +++ b/jstests/core/write/update/updatem.js @@ -7,7 +7,7 @@ // Tests that _id will exist in all updated docs. -t = db.jstests_updatem; +let t = db.jstests_updatem; t.drop(); // new _id from insert (upsert:true) diff --git a/jstests/core/write/update/upsert_and.js b/jstests/core/write/update/upsert_and.js index 1e45cbe8dc20b..c2bab3ab25cf3 100644 --- a/jstests/core/write/update/upsert_and.js +++ b/jstests/core/write/update/upsert_and.js @@ -5,7 +5,7 @@ // tests to ensure fields in $and conditions are created when using the query to do upsert var res; -coll = db.upsert4; +let coll = db.upsert4; coll.drop(); res = coll.update({_id: 1, $and: [{c: 1}, {d: 1}], a: 12}, {$inc: {y: 1}}, true); diff --git a/jstests/core/write/update/upsert_fields.js b/jstests/core/write/update/upsert_fields.js index 310bace490744..0433b15048bd2 100644 --- a/jstests/core/write/update/upsert_fields.js +++ b/jstests/core/write/update/upsert_fields.js @@ -12,7 +12,7 @@ coll.drop(); var upsertedResult = function(query, expr) { coll.drop(); - result = coll.update(query, expr, {upsert: true}); + let result = coll.update(query, expr, {upsert: true}); return result; }; diff --git a/jstests/core/write/update/upsert_shell.js b/jstests/core/write/update/upsert_shell.js index 3ab07b50c21b3..252a0f4139a55 100644 --- a/jstests/core/write/update/upsert_shell.js +++ b/jstests/core/write/update/upsert_shell.js @@ -5,7 +5,7 @@ // tests to make sure that the new _id is returned after the insert in the shell var l; -t = db.upsert1; +let t = db.upsert1; t.drop(); // make sure the new _id is returned when $mods are used diff --git a/jstests/core/write/validate_user_documents.js b/jstests/core/write/validate_user_documents.js index 2a30ed0fea8c9..65aa7cde07ea7 100644 --- a/jstests/core/write/validate_user_documents.js +++ b/jstests/core/write/validate_user_documents.js @@ -10,7 +10,7 @@ // Ensure that inserts and updates of the system.users collection validate the schema of inserted // documents. -mydb = db.getSiblingDB("validate_user_documents"); +let mydb = db.getSiblingDB("validate_user_documents"); function assertGLEOK(status) { assert(status.ok && status.err === null, "Expected OK status object; found " + tojson(status)); diff --git a/jstests/cqf/analyze/array_histogram.js b/jstests/cqf/analyze/array_histogram.js index e242277c5005b..4e854983cd759 100644 --- a/jstests/cqf/analyze/array_histogram.js +++ b/jstests/cqf/analyze/array_histogram.js @@ -1,10 +1,12 @@ /** * This test verifies array histograms are both generated and estimated correctly. */ -(function() { -"use strict"; - -load('jstests/libs/ce_stats_utils.js'); +import { + createAndValidateHistogram, + runHistogramsTest, + verifyCEForMatch +} from "jstests/libs/ce_stats_utils.js"; +import {forceCE} from "jstests/libs/optimizer_utils.js"; runHistogramsTest(function verifyArrayHistograms() { const coll = db.array_histogram; @@ -283,4 +285,3 @@ runHistogramsTest(function verifyArrayHistograms() { hint: idx }); }); -}()); diff --git a/jstests/cqf/analyze/ce_histogram.js b/jstests/cqf/analyze/ce_histogram.js index 47e86d7b1d010..2f45ecb5743c2 100644 --- a/jstests/cqf/analyze/ce_histogram.js +++ b/jstests/cqf/analyze/ce_histogram.js @@ -10,10 +10,12 @@ * change as a result of updates to estimation, since estimates for bucket boundaries should always * be accurate. */ -(function() { -"use strict"; - -load('jstests/libs/ce_stats_utils.js'); +import { + createAndValidateHistogram, + runHistogramsTest, + verifyCEForMatch +} from "jstests/libs/ce_stats_utils.js"; +import {forceCE} from "jstests/libs/optimizer_utils.js"; const charCodeA = 65; const collName = "ce_histogram"; @@ -253,4 +255,3 @@ runHistogramsTest(function testScalarHistograms() { verifyCEForNDV(3); verifyCEForNDV(4); }); -}()); diff --git a/jstests/cqf/analyze/ce_sample_rate.js b/jstests/cqf/analyze/ce_sample_rate.js index 4af4c6fc1c074..6180266a0e4d9 100644 --- a/jstests/cqf/analyze/ce_sample_rate.js +++ b/jstests/cqf/analyze/ce_sample_rate.js @@ -2,10 +2,14 @@ * This is an integration test for histogram CE & statistics to ensure that we can estimate a * histogram appropriately for different sample sizes. */ -(function() { -"use strict"; - -load('jstests/libs/ce_stats_utils.js'); +import { + assertApproxEq, + createAndValidateHistogram, + createHistogram, + getRootCE, + runHistogramsTest +} from "jstests/libs/ce_stats_utils.js"; +import {forceCE, round2} from "jstests/libs/optimizer_utils.js"; const field = "sampled"; const numDocs = 1000; @@ -135,4 +139,3 @@ runHistogramsTest(function testSampleRates() { testSampleRatesForDocsWithPredicates(docs, expectedEst, expectedHistogram, sampleRates); } }); -})(); diff --git a/jstests/cqf/analyze/missing_histogram.js b/jstests/cqf/analyze/missing_histogram.js index 57d16836883cd..0f17cd4e8de56 100644 --- a/jstests/cqf/analyze/missing_histogram.js +++ b/jstests/cqf/analyze/missing_histogram.js @@ -2,10 +2,12 @@ * This test verifies that we gracefully handle the case where we do not have statistics or a * histogram available for a given path. It also tests empty collections are handled appropriately. */ -(function() { -"use strict"; - -load('jstests/libs/ce_stats_utils.js'); +import { + createAndValidateHistogram, + runHistogramsTest, + verifyCEForMatch +} from "jstests/libs/ce_stats_utils.js"; +import {forceCE} from "jstests/libs/optimizer_utils.js"; runHistogramsTest(function testEmptyAndMissingHistograms() { const emptyColl = db.missing_histogram_empty; @@ -45,4 +47,3 @@ runHistogramsTest(function testEmptyAndMissingHistograms() { hint: {notAField: 1}, }); }); -}()); diff --git a/jstests/cqf/analyze/scalar_histograms.js b/jstests/cqf/analyze/scalar_histograms.js index 83dfa7c0a42b3..d6b73fe7977a8 100644 --- a/jstests/cqf/analyze/scalar_histograms.js +++ b/jstests/cqf/analyze/scalar_histograms.js @@ -1,15 +1,12 @@ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } assert.commandWorked( - db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"})); + db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"})); const coll = db.cqf_analyze_scalar_hist; coll.drop(); @@ -58,4 +55,3 @@ testAnalyzeStats("c", docs, 37); assert.commandWorked( db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "forceBonsai"})); -}()); diff --git a/jstests/cqf/analyze/type_counts.js b/jstests/cqf/analyze/type_counts.js index 2a20163e52d53..964c7a36a2157 100644 --- a/jstests/cqf/analyze/type_counts.js +++ b/jstests/cqf/analyze/type_counts.js @@ -3,10 +3,17 @@ * histogram with appropriate type counts and retrieve that histogram to estimate a simple match * predicate. Note that this tests predicates and histograms on several types. */ -(function() { -"use strict"; - -load('jstests/libs/ce_stats_utils.js'); +import { + createAndValidateHistogram, + runHistogramsTest, + verifyCEForMatch, + verifyCEForMatchNodes, +} from "jstests/libs/ce_stats_utils.js"; +import { + extractLogicalCEFromNode, + forceCE, + navigateToPlanPath +} from "jstests/libs/optimizer_utils.js"; runHistogramsTest(function testTypeCounts() { const coll = db.type_counts; @@ -871,4 +878,3 @@ runHistogramsTest(function testTypeCounts() { expected: [] }); }); -}()); diff --git a/jstests/cqf/disjunction.js b/jstests/cqf/disjunction.js deleted file mode 100644 index fa265d8c3f8db..0000000000000 --- a/jstests/cqf/disjunction.js +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Test that $or is translated to a SargableNode, and executed with correct results. - */ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. -if (!checkCascadesOptimizerEnabled(db)) { - jsTestLog("Skipping test because the optimizer is not enabled"); - return; -} - -const coll = db.cqf_disjunction; -coll.drop(); - -let docs = []; -for (let i = 0; i < 10; ++i) { - // Generate enough documents for an index to be preferable. - for (let a = 0; a < 10; ++a) { - for (let b = 0; b < 10; ++b) { - docs.push({a, b}); - } - } -} -assert.commandWorked(coll.insert(docs)); - -let result = coll.find({$or: [{a: 2}, {b: 3}]}).toArray(); -assert.eq(result.length, 190, result); -for (const doc of result) { - assert(doc.a === 2 || doc.b === 3, "Query returned a doc not matching the predicate: ${doc}"); -} - -assert.commandWorked(coll.createIndexes([ - {a: 1}, - {b: 1}, -])); - -result = coll.find({$or: [{a: 2}, {b: 3}]}).toArray(); -assert.eq(result.length, 190, result); -for (const doc of result) { - assert(doc.a === 2 || doc.b === 3, "Query returned a doc not matching the predicate: ${doc}"); -} - -// At time of writing, queries that compare to literal array or MinKey/MaxKey are translated to -// an ABT with a disjunction in it. -result = coll.find({arr: {$eq: [2]}}).toArray(); -assert.eq(result.length, 0, result); - -result = coll.find({arr: {$gt: MinKey()}}).toArray(); -assert.eq(result.length, docs.length, result); - -// Test a nested or/and where one leaf predicate ($exists) cannot be fully satisfied with index -// bounds. -result = coll.find({ - $or: [ - // 'b' exists on every doc so this should never happen. - {a: 5, b: {$exists: false}}, - // The field 'nope' doesn't exist so this also shouldn't happen. - {nope: 'nope'}, - ] - }) - .toArray(); -assert.eq(result.length, 0, result); - -// Test that adding an $or predicate doesn't inhibit the use of index scan for other predicates. -// The $or can just be a residual predicate. -{ - const res = runWithParams( - [ - {key: 'internalCascadesOptimizerExplainVersion', value: "v2"}, - {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true} - ], - () => coll.explain("executionStats") - .find({a: 2, $or: [{b: 2}, {no_such_field: 123}]}) - .finish()); - assert.eq(10, res.executionStats.nReturned); - - // We get an index scan on 'a' and some expression for the $or. - const expectedStr = - `Root [{scan_0}] -Filter [] -| BinaryOp [Or] -| | EvalFilter [] -| | | Variable [scan_0] -| | PathGet [no_such_field] -| | PathTraverse [1] -| | PathCompare [Eq] -| | Const [123] -| EvalFilter [] -| | Variable [scan_0] -| PathGet [b] -| PathCompare [Eq] -| Const [2] -NestedLoopJoin [joinType: Inner, {rid_1}] -| | Const [true] -| LimitSkip [limit: 1, skip: 0] -| Seek [ridProjection: rid_1, {'': scan_0}, cqf_disjunction_] -IndexScan [{'': rid_1}, scanDefName: cqf_disjunction_, indexDefName: a_1, interval: {=Const [2]}] -`; - const actualStr = removeUUIDsFromExplain(db, res); - assert.eq(expectedStr, actualStr); -} -}()); diff --git a/jstests/cqf/index_hints.js b/jstests/cqf/index_hints.js deleted file mode 100644 index 6594a4be09890..0000000000000 --- a/jstests/cqf/index_hints.js +++ /dev/null @@ -1,87 +0,0 @@ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. -if (!checkCascadesOptimizerEnabled(db)) { - jsTestLog("Skipping test because the optimizer is not enabled"); - return; -} - -const t = db.cqf_index_hints; -t.drop(); - -assert.commandWorked(t.insert({_id: 0, a: [1, 2, 3, 4]})); -assert.commandWorked(t.insert({_id: 1, a: [2, 3, 4]})); -assert.commandWorked(t.insert({_id: 2, a: [2]})); -assert.commandWorked(t.insert({_id: 3, a: 2})); -assert.commandWorked(t.insert({_id: 4, a: [1, 3]})); - -assert.commandWorked(t.createIndex({a: 1})); - -// There are too few documents, and an index is not preferable. -{ - let res = t.explain("executionStats").find({a: 2}).finish(); - assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); -} - -{ - let res = t.explain("executionStats").find({a: 2}).hint({a: 1}).finish(); - assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); -} - -{ - let res = t.explain("executionStats").find({a: 2}).hint("a_1").finish(); - assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); -} - -{ - let res = t.explain("executionStats").find({a: 2}).hint({$natural: 1}).finish(); - assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); - - res = t.find({a: 2}).hint({$natural: 1}).toArray(); - assert.eq(res[0]._id, 0, res); -} - -{ - let res = t.explain("executionStats").find({a: 2}).hint({$natural: -1}).finish(); - assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); - - res = t.find({a: 2}).hint({$natural: -1}).toArray(); - assert.eq(res[0]._id, 3, res); -} - -// Generate enough documents for index to be preferable. -for (let i = 0; i < 100; i++) { - assert.commandWorked(t.insert({a: i + 10})); -} - -{ - let res = t.explain("executionStats").find({a: 2}).finish(); - assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); -} - -{ - let res = t.explain("executionStats").find({a: 2}).hint({a: 1}).finish(); - assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); -} - -{ - let res = t.explain("executionStats").find({a: 2}).hint("a_1").finish(); - assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); -} -{ - let res = t.explain("executionStats").find({a: 2}).hint({$natural: 1}).finish(); - assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); - - res = t.find({a: 2}).hint({$natural: 1}).toArray(); - assert.eq(res[0]._id, 0, res); -} - -{ - let res = t.explain("executionStats").find({a: 2}).hint({$natural: -1}).finish(); - assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); - - res = t.find({a: 2}).hint({$natural: -1}).toArray(); - assert.eq(res[0]._id, 3, res); -} -}()); diff --git a/jstests/cqf/no_collection.js b/jstests/cqf/no_collection.js deleted file mode 100644 index 3c7ecae4c3271..0000000000000 --- a/jstests/cqf/no_collection.js +++ /dev/null @@ -1,15 +0,0 @@ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. -if (!checkCascadesOptimizerEnabled(db)) { - jsTestLog("Skipping test because the optimizer is not enabled"); - return; -} - -let t = db.cqf_no_collection; -t.drop(); - -const res = t.explain("executionStats").aggregate([{$match: {'a': 2}}]); -assert.eq(0, res.executionStats.nReturned); -}()); \ No newline at end of file diff --git a/jstests/cqf/array_index.js b/jstests/cqf/optimizer/array_index.js similarity index 94% rename from jstests/cqf/array_index.js rename to jstests/cqf/optimizer/array_index.js index b96fbb9413d6a..1dba4e9019881 100644 --- a/jstests/cqf/array_index.js +++ b/jstests/cqf/optimizer/array_index.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_array_index; @@ -61,4 +62,3 @@ assert.commandWorked(t.createIndex({a: 1})); res = t.explain("executionStats").aggregate([{$match: {a: {$gte: MaxKey}}}]); assert.eq(1, res.executionStats.nReturned); } -}()); diff --git a/jstests/cqf/array_match.js b/jstests/cqf/optimizer/array_match.js similarity index 95% rename from jstests/cqf/array_match.js rename to jstests/cqf/optimizer/array_match.js index 774361d0c4b7e..acee15d2b35b7 100644 --- a/jstests/cqf/array_match.js +++ b/jstests/cqf/optimizer/array_match.js @@ -1,10 +1,13 @@ -(function() { -"use strict"; +import { + assertValueOnPath, + assertValueOnPlanPath, + checkCascadesOptimizerEnabled, + navigateToPlanPath, +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_array_match; @@ -79,4 +82,3 @@ assert.commandWorked(t.createIndex({b: 1, a: 1})); assertValueOnPath("IndexScan", indexUnionNode, "children.1.nodeType"); assertValueOnPath(2, indexUnionNode, "children.1.interval.lowBound.bound.1.value"); } -}()); diff --git a/jstests/cqf/array_size.js b/jstests/cqf/optimizer/array_size.js similarity index 87% rename from jstests/cqf/array_size.js rename to jstests/cqf/optimizer/array_size.js index 8f3b4fdf73307..9017e3466b74b 100644 --- a/jstests/cqf/array_size.js +++ b/jstests/cqf/optimizer/array_size.js @@ -1,10 +1,8 @@ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_array_size; @@ -26,4 +24,3 @@ assert.eq(2, res.executionStats.nReturned); res = t.explain("executionStats").aggregate([{$match: {'a.b': {$size: 2}}}]); assert.eq(1, res.executionStats.nReturned); -}()); diff --git a/jstests/cqf/basic_agg.js b/jstests/cqf/optimizer/basic_agg.js similarity index 93% rename from jstests/cqf/basic_agg.js rename to jstests/cqf/optimizer/basic_agg.js index 678d896c290a1..67a7e1a4c3677 100644 --- a/jstests/cqf/basic_agg.js +++ b/jstests/cqf/optimizer/basic_agg.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const coll = db.cqf_basic_index; @@ -45,4 +46,3 @@ assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); res = coll.explain("executionStats").aggregate([{$match: {$and: [{'a.b': 2}]}}]); assert.eq(1, res.executionStats.nReturned); assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); -}()); diff --git a/jstests/cqf/basic_agg_expr.js b/jstests/cqf/optimizer/basic_agg_expr.js similarity index 97% rename from jstests/cqf/basic_agg_expr.js rename to jstests/cqf/optimizer/basic_agg_expr.js index 0578717f5fdee..a55ce8de5fc26 100644 --- a/jstests/cqf/basic_agg_expr.js +++ b/jstests/cqf/optimizer/basic_agg_expr.js @@ -1,12 +1,9 @@ -(function() { -"use strict"; - load('jstests/aggregation/extras/utils.js'); // For assertArrayEq. +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_agg_expr; @@ -136,4 +133,3 @@ const t = db.cqf_agg_expr; assertArrayEq({actual: res, expected: [{_id: 0, a: 1}, {_id: 1, a: 2}, {_id: 2, a: 3}]}); } } -}()); diff --git a/jstests/cqf/basic_find.js b/jstests/cqf/optimizer/basic_find.js similarity index 92% rename from jstests/cqf/basic_find.js rename to jstests/cqf/optimizer/basic_find.js index f41e4b1b9f7f7..fd0ad385841b2 100644 --- a/jstests/cqf/basic_find.js +++ b/jstests/cqf/optimizer/basic_find.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const coll = db.cqf_basic_find; @@ -45,4 +46,3 @@ assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); res = coll.explain("executionStats").find({'': {$gt: 2}}).finish(); assert.eq(1, res.executionStats.nReturned); assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); -}()); diff --git a/jstests/cqf/basic_unwind.js b/jstests/cqf/optimizer/basic_unwind.js similarity index 80% rename from jstests/cqf/basic_unwind.js rename to jstests/cqf/optimizer/basic_unwind.js index 89f5c7ea5d8c0..0ed1064dc4b7e 100644 --- a/jstests/cqf/basic_unwind.js +++ b/jstests/cqf/optimizer/basic_unwind.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const coll = db.cqf_basic_unwind; @@ -22,4 +23,3 @@ assert.commandWorked(coll.insert([ let res = coll.explain("executionStats").aggregate([{$unwind: '$x'}]); assert.eq(4, res.executionStats.nReturned); assertValueOnPlanPath("Unwind", res, "child.child.nodeType"); -}()); diff --git a/jstests/cqf/chess.js b/jstests/cqf/optimizer/chess.js similarity index 76% rename from jstests/cqf/chess.js rename to jstests/cqf/optimizer/chess.js index b210c52135423..47dee1f46453f 100644 --- a/jstests/cqf/chess.js +++ b/jstests/cqf/optimizer/chess.js @@ -1,10 +1,13 @@ -(function() { -"use strict"; +import { + assertValueOnPath, + assertValueOnPlanPath, + checkCascadesOptimizerEnabled, + navigateToPlanPath, +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const coll = db.cqf_chess; @@ -100,17 +103,14 @@ const res = coll.explain("executionStats").aggregate([ // TODO: verify expected results. -// Verify we are getting an intersection between "minutes" and either "turns" or "avgRating". -// The plan is currently not stable due to sampling. -{ - const indexNode = navigateToPlanPath(res, "child.child.leftChild.leftChild"); - assertValueOnPath("IndexScan", indexNode, "nodeType"); - assertValueOnPath("minutes_1", indexNode, "indexDefName"); -} -{ - const indexNode = navigateToPlanPath(res, "child.child.leftChild.rightChild.children.0.child"); - assertValueOnPath("IndexScan", indexNode, "nodeType"); - const indexName = navigateToPath(indexNode, "indexDefName"); - assert(indexName === "turns_1" || indexName === "avgRating_1"); -} -}()); +/** + * Demonstrate the following: + * 1. Limit is subsumed into the collation node above. + * 2. We have one index scan on minutes and the range is between 2 and 150 (we can encode both + * comparisons as a single index scan). + */ +assertValueOnPlanPath("Collation", res, "child.nodeType"); + +const indexNode = navigateToPlanPath(res, "child.child.leftChild"); +assertValueOnPath("IndexScan", indexNode, "nodeType"); +assertValueOnPath("minutes_1", indexNode, "indexDefName"); diff --git a/jstests/cqf/compond_index.js b/jstests/cqf/optimizer/compond_index.js similarity index 96% rename from jstests/cqf/compond_index.js rename to jstests/cqf/optimizer/compond_index.js index 526dd3c4eb0d9..4d757c63fac5d 100644 --- a/jstests/cqf/compond_index.js +++ b/jstests/cqf/optimizer/compond_index.js @@ -1,10 +1,8 @@ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled, runWithParams} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_compound_index; @@ -91,4 +89,3 @@ assert.commandWorked(t.createIndex({a: 1, b: 1})); () => t.explain("executionStats").aggregate([{$match: {a: {$gte: 1, $lte: 3}}}])); assert.eq(30, res.executionStats.nReturned); } -}()); diff --git a/jstests/cqf/cost_model_override.js b/jstests/cqf/optimizer/cost_model_override.js similarity index 89% rename from jstests/cqf/cost_model_override.js rename to jstests/cqf/optimizer/cost_model_override.js index 5f03cbeeca272..cfef385899019 100644 --- a/jstests/cqf/cost_model_override.js +++ b/jstests/cqf/optimizer/cost_model_override.js @@ -3,13 +3,15 @@ * the cost of produced query plan changed. */ -(function() { -"use strict"; +import { + assertValueOnPath, + checkCascadesOptimizerEnabled, + navigateToPlanPath +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const coll = db.cost_model_override; @@ -50,5 +52,4 @@ function executeAndGetScanCost(scanIncrementalCost) { const scanCost1 = executeAndGetScanCost(0.2); const scanCost2 = executeAndGetScanCost(0.4); -assert.lt(scanCost1, scanCost2); -}()); +assert.lt(scanCost1, scanCost2); \ No newline at end of file diff --git a/jstests/cqf/count_optimize.js b/jstests/cqf/optimizer/count_optimize.js similarity index 91% rename from jstests/cqf/count_optimize.js rename to jstests/cqf/optimizer/count_optimize.js index 9ac7c14ea8f47..5597732780093 100644 --- a/jstests/cqf/count_optimize.js +++ b/jstests/cqf/optimizer/count_optimize.js @@ -1,10 +1,12 @@ -(function() { -"use strict"; +import { + checkCascadesOptimizerEnabled, + removeUUIDsFromExplain, + runWithParams +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_count_optimize; @@ -58,4 +60,3 @@ Filter [] PhysicalScan [{'a': evalTemp_0}, cqf_count_optimize_] `; assert.eq(expectedStr, removeUUIDsFromExplain(db, res)); -}()); diff --git a/jstests/cqf/optimizer/disjunction.js b/jstests/cqf/optimizer/disjunction.js new file mode 100644 index 0000000000000..529d61bbbbc74 --- /dev/null +++ b/jstests/cqf/optimizer/disjunction.js @@ -0,0 +1,310 @@ +/** + * Test that $or is translated to a SargableNode, and executed with correct results. + */ +import { + checkCascadesOptimizerEnabled, + removeUUIDsFromExplain, + runWithParams +} from "jstests/libs/optimizer_utils.js"; + +if (!checkCascadesOptimizerEnabled(db)) { + jsTestLog("Skipping test because the optimizer is not enabled"); + quit(); +} + +const coll = db.cqf_disjunction; +coll.drop(); + +let docs = []; +// Generate 100 documents with different pairs of a,b values. +for (let i = 0; i < 10; ++i) { + for (let a = 0; a < 10; ++a) { + for (let b = 0; b < 10; ++b) { + docs.push({a, b}); + } + } +} +// Generate extra non-matching documents to discourage collection scan. +for (let i = 0; i < 1000; ++i) { + docs.push({}); +} +assert.commandWorked(coll.insert(docs)); + +let result = coll.find({$or: [{a: 2}, {b: 3}]}).toArray(); +assert.eq(result.length, 190, result); +for (const doc of result) { + assert(doc.a === 2 || doc.b === 3, "Query returned a doc not matching the predicate: ${doc}"); +} + +assert.commandWorked(coll.createIndexes([ + {a: 1}, + {b: 1}, +])); + +{ + const query = {$or: [{a: 2}, {b: 3}]}; + + // Check the plan and count. + const res = runWithParams( + [ + {key: 'internalCascadesOptimizerExplainVersion', value: "v2"}, + {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true} + ], + () => coll.explain("executionStats").find(query).finish()); + assert.eq(190, res.executionStats.nReturned); + + // We should get a union of two indexes {a:1} and {b:1}. + const expectedStr = + `Root [{scan_0}] +NestedLoopJoin [joinType: Inner, {rid_1}] +| | Const [true] +| LimitSkip [limit: 1, skip: 0] +| Seek [ridProjection: rid_1, {'': scan_0}, cqf_disjunction_] +Unique [{rid_1}] +Union [{rid_1}] +| IndexScan [{'': rid_1}, scanDefName: cqf_disjunction_, indexDefName: b_1, interval: {=Const [3]}] +IndexScan [{'': rid_1}, scanDefName: cqf_disjunction_, indexDefName: a_1, interval: {=Const [2]}] +`; + const actualStr = removeUUIDsFromExplain(db, res); + assert.eq(expectedStr, actualStr); + + // Check the full result. + const result = coll.find(query).toArray(); + assert.eq(result.length, 190, result); + for (const doc of result) { + assert(doc.a === 2 || doc.b === 3, + "Query returned a doc not matching the predicate: ${doc}"); + } +} + +// At time of writing, queries that compare to literal array or MinKey/MaxKey are translated to +// an ABT with a disjunction in it. +result = coll.find({arr: {$eq: [2]}}).toArray(); +assert.eq(result.length, 0, result); + +result = coll.find({arr: {$gt: MinKey()}}).toArray(); +assert.eq(result.length, docs.length, result); + +// Test a nested or/and where one leaf predicate ($exists) cannot be fully satisfied with index +// bounds. +result = coll.find({ + $or: [ + // 'b' exists on every doc so this should never happen. + {a: 5, b: {$exists: false}}, + // The field 'nope' doesn't exist so this also shouldn't happen. + {nope: 'nope'}, + ] + }) + .toArray(); +assert.eq(result.length, 0, result); + +// Test that adding an $or predicate doesn't inhibit the use of index scan for other predicates. +// The $or can just be a residual predicate. +{ + const res = runWithParams( + [ + {key: 'internalCascadesOptimizerExplainVersion', value: "v2"}, + {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true} + ], + () => coll.explain("executionStats") + .find({a: 2, $or: [{b: 2}, {no_such_field: 123}]}) + .finish()); + assert.eq(10, res.executionStats.nReturned); + + // We get an index scan on 'a' and some expression for the $or. + const expectedStr = + `Root [{scan_0}] +Filter [] +| BinaryOp [Or] +| | EvalFilter [] +| | | Variable [scan_0] +| | PathGet [no_such_field] +| | PathTraverse [1] +| | PathCompare [Eq] +| | Const [123] +| EvalFilter [] +| | Variable [scan_0] +| PathGet [b] +| PathCompare [Eq] +| Const [2] +NestedLoopJoin [joinType: Inner, {rid_1}] +| | Const [true] +| LimitSkip [limit: 1, skip: 0] +| Seek [ridProjection: rid_1, {'': scan_0}, cqf_disjunction_] +IndexScan [{'': rid_1}, scanDefName: cqf_disjunction_, indexDefName: a_1, interval: {=Const [2]}] +`; + const actualStr = removeUUIDsFromExplain(db, res); + assert.eq(expectedStr, actualStr); +} + +// Test that an $or containing multiple predicates on the same field groups the predicates under +// the shared field. +{ + const params = [ + {key: 'internalCascadesOptimizerExplainVersion', value: "v2"}, + {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true}, + {key: "internalCascadesOptimizerDisableIndexes", value: true} + ]; + + // + // Test $or where all predicates are on the same field. + // + let res = runWithParams( + params, + () => coll.explain("executionStats").find({$or: [{a: 1}, {a: 2}, {a: 3}]}).finish()); + + let expectedStr = + `Root [{scan_0}] +Filter [] +| EvalFilter [] +| | Variable [evalTemp_0] +| PathTraverse [1] +| PathCompare [EqMember] +| Const [[1, 2, 3]] +PhysicalScan [{'': scan_0, 'a': evalTemp_0}, cqf_disjunction_] +`; + assert.eq(300, res.executionStats.nReturned); + let actualStr = removeUUIDsFromExplain(db, res); + assert.eq(expectedStr, actualStr); + + // The same query, but with nested $ors. + res = runWithParams( + params, + () => + coll.explain("executionStats").find({$or: [{$or: [{a: 1}, {a: 2}, {a: 3}]}]}).finish()); + assert.eq(300, res.executionStats.nReturned); + assert.eq(expectedStr, actualStr); + + res = runWithParams( + params, + () => + coll.explain("executionStats").find({$or: [{a: 1}, {$or: [{a: 2}, {a: 3}]}]}).finish()); + assert.eq(300, res.executionStats.nReturned); + assert.eq(expectedStr, actualStr); + + // + // Test $or where two predicates are on the same field and one is on a different field. + // + res = runWithParams( + params, + () => coll.explain("executionStats").find({$or: [{a: 1}, {a: 2}, {b: 3}]}).finish()); + + expectedStr = + `Root [{scan_0}] +Filter [] +| BinaryOp [Or] +| | EvalFilter [] +| | | Variable [evalTemp_1] +| | PathTraverse [1] +| | PathCompare [Eq] +| | Const [3] +| EvalFilter [] +| | Variable [evalTemp_0] +| PathTraverse [1] +| PathCompare [EqMember] +| Const [[1, 2]] +PhysicalScan [{'': scan_0, 'a': evalTemp_0, 'b': evalTemp_1}, cqf_disjunction_] +`; + assert.eq(280, res.executionStats.nReturned); + actualStr = removeUUIDsFromExplain(db, res); + assert.eq(expectedStr, actualStr); + + // The same query, but with nested $ors. + res = runWithParams( + params, + () => + coll.explain("executionStats").find({$or: [{$or: [{a: 1}, {a: 2}]}, {b: 3}]}).finish()); + assert.eq(280, res.executionStats.nReturned); + assert.eq(expectedStr, actualStr); + + res = runWithParams( + params, + () => + coll.explain("executionStats").find({$or: [{$or: [{a: 1}, {b: 3}]}, {a: 2}]}).finish()); + assert.eq(280, res.executionStats.nReturned); + assert.eq(expectedStr, actualStr); + + // + // Test $or where two predicates are on one field and two predicates are on another. + // + res = runWithParams( + params, + () => + coll.explain("executionStats").find({$or: [{a: 1}, {a: 2}, {b: 3}, {b: 4}]}).finish()); + + expectedStr = + `Root [{scan_0}] +Filter [] +| BinaryOp [Or] +| | EvalFilter [] +| | | Variable [evalTemp_1] +| | PathTraverse [1] +| | PathCompare [EqMember] +| | Const [[3, 4]] +| EvalFilter [] +| | Variable [evalTemp_0] +| PathTraverse [1] +| PathCompare [EqMember] +| Const [[1, 2]] +PhysicalScan [{'': scan_0, 'a': evalTemp_0, 'b': evalTemp_1}, cqf_disjunction_] +`; + assert.eq(360, res.executionStats.nReturned); + actualStr = removeUUIDsFromExplain(db, res); + assert.eq(expectedStr, actualStr); + + // The same query, but with nested $ors. + runWithParams(params, + () => coll.explain("executionStats") + .find({$or: [{$or: [{a: 1}, {a: 2}]}, {$or: [{b: 3}, {b: 4}]}]}) + .finish()); + assert.eq(360, res.executionStats.nReturned); + assert.eq(expectedStr, actualStr); + + runWithParams(params, + () => coll.explain("executionStats") + .find({$or: [{$or: [{a: 1}, {b: 4}]}, {$or: [{b: 3}, {a: 2}]}]}) + .finish()); + assert.eq(360, res.executionStats.nReturned); + assert.eq(expectedStr, actualStr); +} + +// Test a union involving multikey indexes. +// First make {a:1} and {b:1} multikey. +assert.commandWorked(coll.insert({a: ['asdf'], b: ['qwer']})); +{ + const query = {$or: [{a: 2}, {b: 3}]}; + + // Check the plan and count. + const res = runWithParams( + [ + {key: 'internalCascadesOptimizerExplainVersion', value: "v2"}, + {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true} + ], + () => coll.explain("executionStats").find(query).finish()); + assert.eq(190, res.executionStats.nReturned); + + // We should get a union of two indexes {a:1} and {b:1}. + // Neither one needs its own Unique stage, because we have to have a Unique after the Union + // anyway. + const expectedStr = + `Root [{scan_0}] +NestedLoopJoin [joinType: Inner, {rid_1}] +| | Const [true] +| LimitSkip [limit: 1, skip: 0] +| Seek [ridProjection: rid_1, {'': scan_0}, cqf_disjunction_] +Unique [{rid_1}] +Union [{rid_1}] +| IndexScan [{'': rid_1}, scanDefName: cqf_disjunction_, indexDefName: b_1, interval: {=Const [3]}] +IndexScan [{'': rid_1}, scanDefName: cqf_disjunction_, indexDefName: a_1, interval: {=Const [2]}] +`; + const actualStr = removeUUIDsFromExplain(db, res); + assert.eq(expectedStr, actualStr); + + // Check the full result. + const result = coll.find(query).toArray(); + assert.eq(result.length, 190, result); + for (const doc of result) { + assert(doc.a === 2 || doc.b === 3, + "Query returned a doc not matching the predicate: ${doc}"); + } +} \ No newline at end of file diff --git a/jstests/cqf/elemmatch_bounds.js b/jstests/cqf/optimizer/elemmatch_bounds.js similarity index 98% rename from jstests/cqf/elemmatch_bounds.js rename to jstests/cqf/optimizer/elemmatch_bounds.js index 09e9a44c45a39..4e17c45429a60 100644 --- a/jstests/cqf/elemmatch_bounds.js +++ b/jstests/cqf/optimizer/elemmatch_bounds.js @@ -7,10 +7,7 @@ * requires_cqf, * ] */ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For getPlanSkeleton. +import {getPlanSkeleton} from "jstests/libs/optimizer_utils.js"; const coll = db.cqf_elemmatch_bounds; coll.drop(); @@ -204,5 +201,4 @@ result = run({ assertCount(result, numDuplicates, {c: [[1, 2, 3]]}); assertCount(result, numDuplicates, {c: [[1], [2], [3]]}); assertCount(result, numDuplicates, {c: [[2]]}); -assert.eq(result.length, numDuplicates * 3); -})(); +assert.eq(result.length, numDuplicates * 3); \ No newline at end of file diff --git a/jstests/cqf/empty_results.js b/jstests/cqf/optimizer/empty_results.js similarity index 76% rename from jstests/cqf/empty_results.js rename to jstests/cqf/optimizer/empty_results.js index 05fd8b24fa880..7c97a3d71f74d 100644 --- a/jstests/cqf/empty_results.js +++ b/jstests/cqf/optimizer/empty_results.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_empty_results; @@ -15,4 +16,3 @@ assert.commandWorked(t.insert([{a: 1}, {a: 2}])); const res = t.explain("executionStats").aggregate([{$match: {'a': 2}}, {$limit: 1}, {$skip: 10}]); assert.eq(0, res.executionStats.nReturned); assertValueOnPlanPath("CoScan", res, "child.child.child.nodeType"); -}()); diff --git a/jstests/cqf/explain_test.js b/jstests/cqf/optimizer/explain_test.js similarity index 87% rename from jstests/cqf/explain_test.js rename to jstests/cqf/optimizer/explain_test.js index 2c24cf32c2114..574df54a027cf 100644 --- a/jstests/cqf/explain_test.js +++ b/jstests/cqf/optimizer/explain_test.js @@ -1,11 +1,12 @@ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. +import { + checkCascadesOptimizerEnabled, + removeUUIDsFromExplain, + runWithParams +} from "jstests/libs/optimizer_utils.js"; if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_explain_test; @@ -40,4 +41,3 @@ PhysicalScan [{'': scan_0, 'a': evalTemp_2, 'b': evalTemp_3}, cqf_explain_ const actualStr = removeUUIDsFromExplain(db, res); assert.eq(expectedStr, actualStr); -}()); diff --git a/jstests/cqf/filter_order.js b/jstests/cqf/optimizer/filter_order.js similarity index 82% rename from jstests/cqf/filter_order.js rename to jstests/cqf/optimizer/filter_order.js index e33e45c661eb5..2f094be4f90f7 100644 --- a/jstests/cqf/filter_order.js +++ b/jstests/cqf/optimizer/filter_order.js @@ -1,10 +1,8 @@ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const coll = db.cqf_filter_order; @@ -19,4 +17,3 @@ assert.commandWorked(bulk.execute()); let res = coll.aggregate([{$match: {'a': {$eq: 1}, 'b': {$eq: 1}, 'c': {$eq: 1}}}]).toArray(); // TODO: verify plan that predicate on "c" is applied first (most selective), then "b", then "a". -}()); diff --git a/jstests/cqf/find_limit_skip.js b/jstests/cqf/optimizer/find_limit_skip.js similarity index 100% rename from jstests/cqf/find_limit_skip.js rename to jstests/cqf/optimizer/find_limit_skip.js diff --git a/jstests/cqf/find_sort.js b/jstests/cqf/optimizer/find_sort.js similarity index 87% rename from jstests/cqf/find_sort.js rename to jstests/cqf/optimizer/find_sort.js index 53d281dba0d59..f2b179808890a 100644 --- a/jstests/cqf/find_sort.js +++ b/jstests/cqf/optimizer/find_sort.js @@ -1,10 +1,12 @@ -(function() { -"use strict"; +import { + assertValueOnPath, + checkCascadesOptimizerEnabled, + navigateToPlanPath +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const coll = db.cqf_find_sort; @@ -38,4 +40,3 @@ assert.eq(numResults, res.executionStats.nReturned); const indexScanNode = navigateToPlanPath(res, "child.child.child.leftChild.child.child"); assertValueOnPath("IndexScan", indexScanNode, "nodeType"); assertValueOnPath(5, indexScanNode, "interval.highBound.bound.0.value"); -}()); diff --git a/jstests/cqf/group.js b/jstests/cqf/optimizer/group.js similarity index 82% rename from jstests/cqf/group.js rename to jstests/cqf/optimizer/group.js index 7979a72f2eee8..814937a38ef01 100644 --- a/jstests/cqf/group.js +++ b/jstests/cqf/optimizer/group.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const coll = db.cqf_group; @@ -24,4 +25,3 @@ const res = coll.explain("executionStats").aggregate([ ]); assertValueOnPlanPath("GroupBy", res, "child.child.nodeType"); assert.eq(4, res.executionStats.nReturned); -}()); diff --git a/jstests/cqf/optimizer/index_hints.js b/jstests/cqf/optimizer/index_hints.js new file mode 100644 index 0000000000000..70dc6a2ba5ae3 --- /dev/null +++ b/jstests/cqf/optimizer/index_hints.js @@ -0,0 +1,167 @@ +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled, + removeUUIDsFromExplain, + runWithParams, +} from "jstests/libs/optimizer_utils.js"; + +if (!checkCascadesOptimizerEnabled(db)) { + jsTestLog("Skipping test because the optimizer is not enabled"); + quit(); +} + +const t = db.cqf_index_hints; +t.drop(); + +assert.commandWorked(t.insert({_id: 0, b: 0, a: [1, 2, 3, 4]})); +assert.commandWorked(t.insert({_id: 1, b: 1, a: [2, 3, 4]})); +assert.commandWorked(t.insert({_id: 2, b: 2, a: [2]})); +assert.commandWorked(t.insert({_id: 3, b: 3, a: 2})); +assert.commandWorked(t.insert({_id: 4, b: 4, a: [1, 3]})); + +assert.commandWorked(t.createIndex({a: 1})); +assert.commandWorked(t.createIndex({b: 1})); + +// There are too few documents, and an index is not preferable. +{ + let res = t.explain("executionStats").find({a: 2}).finish(); + assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); +} + +{ + let res = t.explain("executionStats").find({a: 2}).hint({a: 1}).finish(); + assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); +} + +{ + let res = t.explain("executionStats").find({a: 2}).hint("a_1").finish(); + assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); +} + +{ + let res = t.explain("executionStats").find({a: 2}).hint({$natural: 1}).finish(); + assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); + + res = t.find({a: 2}).hint({$natural: 1}).toArray(); + assert.eq(res[0]._id, 0, res); +} + +{ + let res = t.explain("executionStats").find({a: 2}).hint({$natural: -1}).finish(); + assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); + + res = t.find({a: 2}).hint({$natural: -1}).toArray(); + assert.eq(res[0]._id, 3, res); +} + +// Generate enough documents for index to be preferable. +for (let i = 0; i < 100; i++) { + assert.commandWorked(t.insert({b: i + 5, a: i + 10})); +} + +{ + let res = t.explain("executionStats").find({a: 2}).finish(); + assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); +} + +{ + let res = t.explain("executionStats").find({a: 2}).hint({a: 1}).finish(); + assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); +} + +{ + let res = t.explain("executionStats").find({a: 2}).hint("a_1").finish(); + assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); +} +{ + let res = t.explain("executionStats").find({a: 2}).hint({$natural: 1}).finish(); + assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); + + res = t.find({a: 2}).hint({$natural: 1}).toArray(); + assert.eq(res[0]._id, 0, res); +} + +{ + let res = t.explain("executionStats").find({a: 2}).hint({$natural: -1}).finish(); + assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); + + res = t.find({a: 2}).hint({$natural: -1}).toArray(); + assert.eq(res[0]._id, 3, res); +} + +// Use index {a:1} multikeyness info, Cannot eliminate PathTraverse. +{ + const res = runWithParams( + [ + {key: 'internalCascadesOptimizerExplainVersion', value: "v2"}, + {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true} + ], + () => t.explain("executionStats").find({a: 2}).hint({$natural: -1}).finish()); + + const expectedStr = + `Root [{scan_0}] +Filter [] +| EvalFilter [] +| | Variable [evalTemp_0] +| PathTraverse [1] +| PathCompare [Eq] +| Const [2] +PhysicalScan [{'': scan_0, 'a': evalTemp_0}, cqf_index_hints_] +`; + + const actualStr = removeUUIDsFromExplain(db, res); + assert.eq(expectedStr, actualStr); +} + +// Hint collection scan to disable indexes. Check that index {b: 1} multikeyness info can eliminate +// PathTraverse. +{ + const res = runWithParams( + [ + {key: 'internalCascadesOptimizerExplainVersion', value: "v2"}, + {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true} + ], + () => t.explain("executionStats").find({b: 2}).hint({$natural: -1}).finish()); + + const expectedStr = + `Root [{scan_0}] +Filter [] +| EvalFilter [] +| | Variable [evalTemp_0] +| PathCompare [Eq] +| Const [2] +PhysicalScan [{'': scan_0, 'b': evalTemp_0}, cqf_index_hints_] +`; + + const actualStr = removeUUIDsFromExplain(db, res); + assert.eq(expectedStr, actualStr); +} + +// Hint index {a: 1} to disable index {b:1}. Check that index {b: 1} multikeyness info can eliminate +// PathTraverse. +{ + const res = runWithParams( + [ + {key: 'internalCascadesOptimizerExplainVersion', value: "v2"}, + {key: "internalCascadesOptimizerUseDescriptiveVarNames", value: true} + ], + () => t.explain("executionStats").find({a: {$gt: 0}, b: 2}).hint("a_1").finish()); + + const expectedStr = + `Root [{scan_0}] +NestedLoopJoin [joinType: Inner, {rid_1}] +| | Const [true] +| Filter [] +| | EvalFilter [] +| | | Variable [evalTemp_4] +| | PathCompare [Eq] +| | Const [2] +| LimitSkip [limit: 1, skip: 0] +| Seek [ridProjection: rid_1, {'': scan_0, 'b': evalTemp_4}, cqf_index_hints_] +Unique [{rid_1}] +IndexScan [{'': rid_1}, scanDefName: cqf_index_hints_, indexDefName: a_1, interval: {(Const [0], Const [""])}] +`; + + const actualStr = removeUUIDsFromExplain(db, res); + assert.eq(expectedStr, actualStr); +} diff --git a/jstests/cqf/index_intersect.js b/jstests/cqf/optimizer/index_intersect.js similarity index 90% rename from jstests/cqf/index_intersect.js rename to jstests/cqf/optimizer/index_intersect.js index 8d89703cfb88e..29543e38aa912 100644 --- a/jstests/cqf/index_intersect.js +++ b/jstests/cqf/optimizer/index_intersect.js @@ -1,10 +1,13 @@ -(function() { -"use strict"; +import { + assertValueOnPath, + checkCascadesOptimizerEnabled, + navigateToPlanPath, + runWithParams, +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_index_intersect; @@ -18,7 +21,7 @@ for (let i = 0; i < nMatches; i++) { documents.push({a: 4, b: 3, c: 2}); documents.push({a: 5, b: 5, c: 2}); -for (let i = 1; i < nMatches + 500; i++) { +for (let i = 1; i < nMatches + 1000; i++) { documents.push({a: i + nMatches, b: i + nMatches, c: i + nMatches}); } @@ -53,4 +56,3 @@ joinNode = navigateToPlanPath(res, "child.leftChild"); assertValueOnPath("HashJoin", joinNode, "nodeType"); assertValueOnPath("IndexScan", joinNode, "leftChild.nodeType"); assertValueOnPath("IndexScan", joinNode, "rightChild.children.0.child.nodeType"); -}()); diff --git a/jstests/cqf/index_intersect1.js b/jstests/cqf/optimizer/index_intersect1.js similarity index 89% rename from jstests/cqf/index_intersect1.js rename to jstests/cqf/optimizer/index_intersect1.js index 7c602dcb695c4..4c3d0b1704549 100644 --- a/jstests/cqf/index_intersect1.js +++ b/jstests/cqf/optimizer/index_intersect1.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_index_intersect1; @@ -32,4 +33,3 @@ res = t.explain("executionStats") .aggregate([{$project: {'_id': 0, 'a': 1}}, {$match: {'a': {$gt: 60, $lt: 100}}}]); assert.eq(2, res.executionStats.nReturned); assertValueOnPlanPath("IndexScan", res, "child.child.nodeType"); -}()); \ No newline at end of file diff --git a/jstests/cqf/index_subfield.js b/jstests/cqf/optimizer/index_subfield.js similarity index 86% rename from jstests/cqf/index_subfield.js rename to jstests/cqf/optimizer/index_subfield.js index edb8146e3b951..70b56a643cb1e 100644 --- a/jstests/cqf/index_subfield.js +++ b/jstests/cqf/optimizer/index_subfield.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_index_subfield; @@ -29,4 +30,3 @@ assert.commandWorked(t.createIndex({a: 1, b: 1})); t.explain("executionStats").find({a: 2, 'b.c': 3}, {_id: 0, a: 1}).hint("a_1_b_1").finish(); assertValueOnPlanPath("IndexScan", res, "child.child.child.nodeType"); } -}()); diff --git a/jstests/cqf/index_with_null.js b/jstests/cqf/optimizer/index_with_null.js similarity index 90% rename from jstests/cqf/index_with_null.js rename to jstests/cqf/optimizer/index_with_null.js index aa4125825f527..b6bd29ca565cb 100644 --- a/jstests/cqf/index_with_null.js +++ b/jstests/cqf/optimizer/index_with_null.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_index_with_null; @@ -45,4 +46,3 @@ t.createIndex({c: 1}); // Verify the query **is covered** by the index. assertValueOnPlanPath("IndexScan", res, "child.child.nodeType"); } -}()); diff --git a/jstests/cqf/lookup.js b/jstests/cqf/optimizer/lookup.js similarity index 96% rename from jstests/cqf/lookup.js rename to jstests/cqf/optimizer/lookup.js index 0b633766bf764..353bc0bf63087 100644 --- a/jstests/cqf/lookup.js +++ b/jstests/cqf/optimizer/lookup.js @@ -1,14 +1,16 @@ -(function() { -"use strict"; +import { + assertValueOnPath, + checkCascadesOptimizerEnabled, + navigateToPlanPath +} from "jstests/libs/optimizer_utils.js"; + +load("jstests/aggregation/extras/utils.js"); -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } -load("jstests/aggregation/extras/utils.js"); - const collA = db.collA; collA.drop(); @@ -110,4 +112,3 @@ try { assert.commandWorked( db.adminCommand({'configureFailPoint': 'disablePipelineOptimization', 'mode': 'off'})); } -}()); diff --git a/jstests/cqf/match_expr.js b/jstests/cqf/optimizer/match_expr.js similarity index 90% rename from jstests/cqf/match_expr.js rename to jstests/cqf/optimizer/match_expr.js index ed2ffcdb4268c..7bcbc2452c274 100644 --- a/jstests/cqf/match_expr.js +++ b/jstests/cqf/optimizer/match_expr.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_match_expr; @@ -46,4 +47,3 @@ const numExpected = 1 * 5 * 5 + 4 * 1 * 1; assertValueOnPlanPath("Filter", res, "child.nodeType"); assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); } -}()); diff --git a/jstests/cqf/match_with_exists.js b/jstests/cqf/optimizer/match_with_exists.js similarity index 100% rename from jstests/cqf/match_with_exists.js rename to jstests/cqf/optimizer/match_with_exists.js diff --git a/jstests/cqf/match_with_in.js b/jstests/cqf/optimizer/match_with_in.js similarity index 97% rename from jstests/cqf/match_with_in.js rename to jstests/cqf/optimizer/match_with_in.js index ac52430ece6eb..73418c3a2dcc3 100644 --- a/jstests/cqf/match_with_in.js +++ b/jstests/cqf/optimizer/match_with_in.js @@ -3,10 +3,7 @@ */ load('jstests/aggregation/extras/utils.js'); // For assertArrayEq. -load('jstests/libs/optimizer_utils.js'); - -(function() { -"use strict"; +import {usedBonsaiOptimizer} from "jstests/libs/optimizer_utils.js"; const coll = db.cqf_match_with_in; coll.drop(); @@ -84,4 +81,3 @@ const tests = [ for (const testData of tests) { runTest(testData.filter, testData.expected); } -}()); diff --git a/jstests/cqf/optimizer/no_collection.js b/jstests/cqf/optimizer/no_collection.js new file mode 100644 index 0000000000000..9e6d0b88e8f82 --- /dev/null +++ b/jstests/cqf/optimizer/no_collection.js @@ -0,0 +1,12 @@ +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; + +if (!checkCascadesOptimizerEnabled(db)) { + jsTestLog("Skipping test because the optimizer is not enabled"); + quit(); +} + +let t = db.cqf_no_collection; +t.drop(); + +const res = t.explain("executionStats").aggregate([{$match: {'a': 2}}]); +assert.eq(0, res.executionStats.nReturned); diff --git a/jstests/cqf/nonselective_index.js b/jstests/cqf/optimizer/nonselective_index.js similarity index 83% rename from jstests/cqf/nonselective_index.js rename to jstests/cqf/optimizer/nonselective_index.js index 56ba933c0bada..52c8384cd7c97 100644 --- a/jstests/cqf/nonselective_index.js +++ b/jstests/cqf/optimizer/nonselective_index.js @@ -1,13 +1,14 @@ /** * Tests scenario related to SERVER-13065. */ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_nonselective_index; @@ -26,5 +27,4 @@ assert.commandWorked(t.createIndex({a: 1})); const res = t.explain("executionStats").aggregate([{$match: {a: {$gte: 0}}}]); assert.eq(nDocs, res.executionStats.nReturned); -assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); -}()); \ No newline at end of file +assertValueOnPlanPath("PhysicalScan", res, "child.child.nodeType"); \ No newline at end of file diff --git a/jstests/cqf/not_expr.js b/jstests/cqf/optimizer/not_expr.js similarity index 97% rename from jstests/cqf/not_expr.js rename to jstests/cqf/optimizer/not_expr.js index 980bc671c1aae..3b4233a96508d 100644 --- a/jstests/cqf/not_expr.js +++ b/jstests/cqf/optimizer/not_expr.js @@ -1,8 +1,5 @@ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For assertArrayEq. -load("jstests/libs/optimizer_utils.js"); // For assertValueOnPlanPath. +import {assertValueOnPlanPath} from "jstests/libs/optimizer_utils.js"; const c = db.cqf_not; c.drop(); @@ -270,4 +267,3 @@ assertArrayEq({ {a: [[3, 3], [3, 3]]}, ], }); -}()); diff --git a/jstests/cqf/null_missing.js b/jstests/cqf/optimizer/null_missing.js similarity index 87% rename from jstests/cqf/null_missing.js rename to jstests/cqf/optimizer/null_missing.js index 2797fc66e0ced..0b6e0c5a6b389 100644 --- a/jstests/cqf/null_missing.js +++ b/jstests/cqf/optimizer/null_missing.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_null_missing; @@ -32,4 +33,3 @@ assert.commandWorked(t.createIndex({'a.b': 1})); assert.eq(3, res.executionStats.nReturned); assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); } -}()); diff --git a/jstests/cqf/object_elemMatch.js b/jstests/cqf/optimizer/object_elemMatch.js similarity index 90% rename from jstests/cqf/object_elemMatch.js rename to jstests/cqf/optimizer/object_elemMatch.js index e1baf046e9b93..dbfd868fed4a5 100644 --- a/jstests/cqf/object_elemMatch.js +++ b/jstests/cqf/optimizer/object_elemMatch.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_object_elemMatch; @@ -35,4 +36,3 @@ assert.commandWorked(t.insert({a: [{"": [1, 2], c: [3, 4]}]})); assert.eq(1, res.executionStats.nReturned); assertValueOnPlanPath("PhysicalScan", res, "child.child.child.nodeType"); } -}()); diff --git a/jstests/cqf/partial_index.js b/jstests/cqf/optimizer/partial_index.js similarity index 88% rename from jstests/cqf/partial_index.js rename to jstests/cqf/optimizer/partial_index.js index ba1993a318cd1..fe708e4e72e01 100644 --- a/jstests/cqf/partial_index.js +++ b/jstests/cqf/optimizer/partial_index.js @@ -1,10 +1,8 @@ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_partial_index; @@ -30,4 +28,3 @@ assert.eq(1, res.length); // TODO: verify with explain the plan should not use the index. res = t.aggregate([{$match: {'a': 3, 'b': 3}}]).toArray(); assert.eq(2, res.length); -}()); \ No newline at end of file diff --git a/jstests/cqf/project_expr_dependency.js b/jstests/cqf/optimizer/project_expr_dependency.js similarity index 84% rename from jstests/cqf/project_expr_dependency.js rename to jstests/cqf/optimizer/project_expr_dependency.js index 0c3d1510d1b35..806e46abe555f 100644 --- a/jstests/cqf/project_expr_dependency.js +++ b/jstests/cqf/optimizer/project_expr_dependency.js @@ -1,10 +1,8 @@ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled, navigateToPlanPath} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_project_expr_dependency; @@ -23,4 +21,3 @@ const res = t.explain("executionStats").aggregate([ // Demonstrate we only need to read "b1" and "c1" from the collection. const scanNodeProjFieldMap = navigateToPlanPath(res, "child.child.fieldProjectionMap"); assert.eq(["b1", "c1"], Object.keys(scanNodeProjFieldMap)); -}()); diff --git a/jstests/cqf/projection.js b/jstests/cqf/optimizer/projection.js similarity index 99% rename from jstests/cqf/projection.js rename to jstests/cqf/optimizer/projection.js index 9bd12a477866e..841ca8ba942d4 100644 --- a/jstests/cqf/projection.js +++ b/jstests/cqf/optimizer/projection.js @@ -3,10 +3,8 @@ * Many of these tests are similar/repeats of core/projection_semantics.js */ -(function() { -"use strict"; load('jstests/aggregation/extras/utils.js'); // For assertArrayEq. -load('jstests/libs/optimizer_utils.js'); +import {usedBonsaiOptimizer} from "jstests/libs/optimizer_utils.js"; const coll = db.cqf_project; @@ -187,5 +185,4 @@ function testInputOutput({input, projection, expectedOutput, interestingIndexes [{_id: 0, b: {c: 1, d: 1}}, {_id: 1, b: {c: 2, d: 2}}, {_id: 2, b: {c: 3, d: 3}}], interestingIndexes: [], }); -}()); -}()); +}()); \ No newline at end of file diff --git a/jstests/cqf/range_descending.js b/jstests/cqf/optimizer/range_descending.js similarity index 97% rename from jstests/cqf/range_descending.js rename to jstests/cqf/optimizer/range_descending.js index 4c8fe22f9fc3c..fd790f5905d19 100644 --- a/jstests/cqf/range_descending.js +++ b/jstests/cqf/optimizer/range_descending.js @@ -7,10 +7,8 @@ * behavior, the index bounds are swapped when the corresponding index is descending. */ -(function() { -"use strict"; +import {assertValueOnPlanPath} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); const coll = db.cqf_range_descending; /* * This is the most basic case: a single range predicate with a descending index. @@ -84,4 +82,3 @@ const coll = db.cqf_range_descending; assertValueOnPlanPath("IndexScan", res, "child.child.leftChild.nodeType"); } } -}()); diff --git a/jstests/cqf/recursive_ix_nav.js b/jstests/cqf/optimizer/recursive_ix_nav.js similarity index 87% rename from jstests/cqf/recursive_ix_nav.js rename to jstests/cqf/optimizer/recursive_ix_nav.js index 529ec9d7eac22..8b9f59bd4fd1e 100644 --- a/jstests/cqf/recursive_ix_nav.js +++ b/jstests/cqf/optimizer/recursive_ix_nav.js @@ -1,10 +1,13 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled, + navigateToPlanPath, + runWithParams, +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_recursive_ix_nav; @@ -152,7 +155,11 @@ assert.commandWorked(t.createIndex({a: 1, b: 1, c: 1, d: 1, e: 1})); [ {key: "internalCascadesOptimizerMinIndexEqPrefixes", value: 2}, {key: "internalCascadesOptimizerMaxIndexEqPrefixes", value: 2}, - {key: "internalCascadesOptimizerDisableScan", value: true} + {key: "internalCascadesOptimizerDisableScan", value: true}, + // Make Seek very expensive to discourage plans where we satisfy some predicates after + // the fetch. We want to test the plan where a,c,e predicates are all satisfied on the + // index side: a,c as equality prefixes and e as residual. + {key: 'internalCostModelCoefficients', value: {"seekStartupCost": 1e6 + 0.1}} ], () => t.explain("executionStats").aggregate([ { @@ -170,12 +177,18 @@ assert.commandWorked(t.createIndex({a: 1, b: 1, c: 1, d: 1, e: 1})); // Assert we have two spool producers, one for each interval for "a" ([1, 3] and [6, 6]). assertValueOnPlanPath( - "SpoolProducer", res, "child.child.leftChild.child.children.0.leftChild.nodeType"); - assertValueOnPlanPath(7, res, "child.child.leftChild.child.children.0.leftChild.id"); + "SpoolProducer", res, "child.child.child.children.0.child.children.0.leftChild.nodeType"); + const leftNode = + navigateToPlanPath(res, 'child.child.child.children.0.child.children.0.leftChild'); assertValueOnPlanPath( - "SpoolProducer", res, "child.child.leftChild.child.children.1.leftChild.nodeType"); - assertValueOnPlanPath(8, res, "child.child.leftChild.child.children.1.leftChild.id"); + "SpoolProducer", res, "child.child.child.children.0.child.children.1.leftChild.nodeType"); + const rightNode = + navigateToPlanPath(res, 'child.child.child.children.0.child.children.1.leftChild'); + + assert.neq(leftNode.id, + rightNode.id, + `Expected different spool ids: ${tojson({leftNode, rightNode})}`); } { @@ -209,4 +222,3 @@ assert.commandWorked(t.createIndex({a: 1, b: 1, c: 1, d: 1, e: 1})); assertValueOnPlanPath("IndexScan", res, "child.leftChild.rightChild.nodeType"); assertValueOnPlanPath(false, res, "child.leftChild.rightChild.reversed"); } -}()); diff --git a/jstests/cqf/redundant_conditions.js b/jstests/cqf/optimizer/redundant_conditions.js similarity index 87% rename from jstests/cqf/redundant_conditions.js rename to jstests/cqf/optimizer/redundant_conditions.js index 75db76e4e3011..3593969321d44 100644 --- a/jstests/cqf/redundant_conditions.js +++ b/jstests/cqf/optimizer/redundant_conditions.js @@ -1,13 +1,15 @@ /** * Tests scenario related to SERVER-22857. */ -(function() { -"use strict"; +import { + checkCascadesOptimizerEnabled, + removeUUIDsFromExplain, + runWithParams +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_redundant_condition; @@ -39,5 +41,4 @@ PhysicalScan [{'': scan_0, 'a': evalTemp_2}, cqf_redundant_condition_] `; const actualStr = removeUUIDsFromExplain(db, res); assert.eq(expectedStr, actualStr); -} -}()); +} \ No newline at end of file diff --git a/jstests/cqf/residual_pred_costing.js b/jstests/cqf/optimizer/residual_pred_costing.js similarity index 87% rename from jstests/cqf/residual_pred_costing.js rename to jstests/cqf/optimizer/residual_pred_costing.js index 387c5804b0a0c..6c98749265e26 100644 --- a/jstests/cqf/residual_pred_costing.js +++ b/jstests/cqf/optimizer/residual_pred_costing.js @@ -1,13 +1,16 @@ /** * Tests scenario related to SERVER-21697. */ -(function() { -"use strict"; +import { + assertValueOnPath, + checkCascadesOptimizerEnabled, + navigateToPlanPath, + runWithParams, +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_residual_pred_costing; @@ -33,5 +36,4 @@ assert.eq(nDocs * 0.1, res.executionStats.nReturned); // Demonstrate we can pick the indexing covering most fields. const indexNode = navigateToPlanPath(res, "child.leftChild"); assertValueOnPath("IndexScan", indexNode, "nodeType"); -assertValueOnPath("a_1_b_1_c_1_d_1", indexNode, "indexDefName"); -}()); +assertValueOnPath("a_1_b_1_c_1_d_1", indexNode, "indexDefName"); \ No newline at end of file diff --git a/jstests/cqf/sampling.js b/jstests/cqf/optimizer/sampling.js similarity index 86% rename from jstests/cqf/sampling.js rename to jstests/cqf/optimizer/sampling.js index 37dd0ae0e4411..fdd91cc17dd90 100644 --- a/jstests/cqf/sampling.js +++ b/jstests/cqf/optimizer/sampling.js @@ -1,10 +1,8 @@ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const coll = db.cqf_sampling; @@ -28,4 +26,3 @@ const props = res.queryPlanner.winningPlan.optimizerPlan.properties; // Verify the winning plan cardinality is within roughly 25% of the expected documents. assert.lt(nDocs * 0.2 * 0.75, props.adjustedCE); assert.gt(nDocs * 0.2 * 1.25, props.adjustedCE); -}()); diff --git a/jstests/cqf/selective_index.js b/jstests/cqf/optimizer/selective_index.js similarity index 81% rename from jstests/cqf/selective_index.js rename to jstests/cqf/optimizer/selective_index.js index 42113655472be..7e482658c29b7 100644 --- a/jstests/cqf/selective_index.js +++ b/jstests/cqf/optimizer/selective_index.js @@ -1,13 +1,15 @@ /** * Tests scenario related to SERVER-20616. */ -(function() { -"use strict"; +import { + assertValueOnPath, + checkCascadesOptimizerEnabled, + navigateToPlanPath +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_selective_index; @@ -30,5 +32,4 @@ assert.eq(1, res.executionStats.nReturned); // Demonstrate we can pick index on "b". const indexNode = navigateToPlanPath(res, "child.leftChild"); assertValueOnPath("IndexScan", indexNode, "nodeType"); -assertValueOnPath("b_1", indexNode, "indexDefName"); -}()); \ No newline at end of file +assertValueOnPath("b_1", indexNode, "indexDefName"); \ No newline at end of file diff --git a/jstests/cqf/sort.js b/jstests/cqf/optimizer/sort.js similarity index 81% rename from jstests/cqf/sort.js rename to jstests/cqf/optimizer/sort.js index 6d03b95c850df..f79fb45bc396c 100644 --- a/jstests/cqf/sort.js +++ b/jstests/cqf/optimizer/sort.js @@ -1,10 +1,8 @@ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_sort; @@ -19,4 +17,3 @@ assert.commandWorked(t.insert({_id: 6, x: 4})); const res = t.aggregate([{$unwind: '$x'}, {$sort: {'x': 1}}]).toArray(); assert.eq(4, res.length); -}()); \ No newline at end of file diff --git a/jstests/cqf/sort1.js b/jstests/cqf/optimizer/sort1.js similarity index 89% rename from jstests/cqf/sort1.js rename to jstests/cqf/optimizer/sort1.js index fce60b2f4ae43..335b5e8a70842 100644 --- a/jstests/cqf/sort1.js +++ b/jstests/cqf/optimizer/sort1.js @@ -1,10 +1,8 @@ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_sort1; @@ -34,4 +32,3 @@ t.createIndex(index); const resIndexScan = t.find(query).hint(index).sort({_id: 1}).toArray(); assert.eq(resCollScan, resIndexScan); } -}()); \ No newline at end of file diff --git a/jstests/cqf/sort2.js b/jstests/cqf/optimizer/sort2.js similarity index 80% rename from jstests/cqf/sort2.js rename to jstests/cqf/optimizer/sort2.js index 7961d82320dea..5023808b407b4 100644 --- a/jstests/cqf/sort2.js +++ b/jstests/cqf/optimizer/sort2.js @@ -1,10 +1,8 @@ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_sort2; @@ -22,4 +20,3 @@ t.createIndex({a: 1}); const resIndexScan = t.find({a: {$gte: 1}}).sort({a: -1}).hint({a: 1}).toArray(); assert.eq(resCollScan, resIndexScan); } -}()); \ No newline at end of file diff --git a/jstests/cqf/sort3.js b/jstests/cqf/optimizer/sort3.js similarity index 82% rename from jstests/cqf/sort3.js rename to jstests/cqf/optimizer/sort3.js index 66a23b6beba10..59590cf48795c 100644 --- a/jstests/cqf/sort3.js +++ b/jstests/cqf/optimizer/sort3.js @@ -1,10 +1,8 @@ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_sort3; @@ -23,4 +21,3 @@ t.createIndex({a: 1, b: -1}); const resIndexScan = t.find({a: {$gte: 2}}).sort({a: 1, b: -1}).hint({a: 1, b: -1}).toArray(); assert.eq(resCollScan, resIndexScan); } -}()); \ No newline at end of file diff --git a/jstests/cqf/sort_compound_pred.js b/jstests/cqf/optimizer/sort_compound_pred.js similarity index 94% rename from jstests/cqf/sort_compound_pred.js rename to jstests/cqf/optimizer/sort_compound_pred.js index a983890b36eb2..f4cfd8b90162f 100644 --- a/jstests/cqf/sort_compound_pred.js +++ b/jstests/cqf/optimizer/sort_compound_pred.js @@ -1,10 +1,12 @@ -(function() { -"use strict"; +import { + checkCascadesOptimizerEnabled, + removeUUIDsFromExplain, + runWithParams +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_sort_compound_pred; @@ -72,4 +74,3 @@ Union [{disjunction_0, rid_1}] IndexScan [{' 0': disjunction_0, '': rid_1}, scanDefName: cqf_sort_compound_pred_, indexDefName: a_1, interval: {=Const [1]}] `); } -}()); diff --git a/jstests/cqf/sort_match.js b/jstests/cqf/optimizer/sort_match.js similarity index 89% rename from jstests/cqf/sort_match.js rename to jstests/cqf/optimizer/sort_match.js index 4d53ec5f66925..55a5ab4f219f0 100644 --- a/jstests/cqf/sort_match.js +++ b/jstests/cqf/optimizer/sort_match.js @@ -1,13 +1,16 @@ /** * Tests scenario related to SERVER-12923. */ -(function() { -"use strict"; +import { + assertValueOnPath, + checkCascadesOptimizerEnabled, + navigateToPlanPath, + runWithParams, +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_sort_match; @@ -49,5 +52,4 @@ assert.commandWorked(t.createIndex({b: 1})); } prev = current; } -} -}()); \ No newline at end of file +} \ No newline at end of file diff --git a/jstests/cqf/sort_project.js b/jstests/cqf/optimizer/sort_project.js similarity index 96% rename from jstests/cqf/sort_project.js rename to jstests/cqf/optimizer/sort_project.js index c3cd5b2b9dbac..5b413c87fc2ab 100644 --- a/jstests/cqf/sort_project.js +++ b/jstests/cqf/optimizer/sort_project.js @@ -1,10 +1,12 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled, + runWithParams +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } var coll = db.cqf_testCovIndxScan; @@ -78,4 +80,3 @@ const nDocs = 20; assert.eq(nDocs, res.executionStats.nReturned); assertValueOnPlanPath("IndexScan", res, "child.child.nodeType"); } -}()); diff --git a/jstests/cqf/sorted_merge.js b/jstests/cqf/optimizer/sorted_merge.js similarity index 95% rename from jstests/cqf/sorted_merge.js rename to jstests/cqf/optimizer/sorted_merge.js index 6c001e99b7807..5213f3717844f 100644 --- a/jstests/cqf/sorted_merge.js +++ b/jstests/cqf/optimizer/sorted_merge.js @@ -1,10 +1,12 @@ -(function() { -"use strict"; +import { + checkCascadesOptimizerEnabled, + removeUUIDsFromExplain, + runWithParams +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_sorted_merge; @@ -75,4 +77,3 @@ IndexScan [{'': rid_1}, scanDefName: cqf_sorted_merge_, indexDefName: a_1, `; assert.eq(removeUUIDsFromExplain(db, getExplain()), multikeyExplain); testCorrectness(); -}()); \ No newline at end of file diff --git a/jstests/cqf/type_bracket.js b/jstests/cqf/optimizer/type_bracket.js similarity index 94% rename from jstests/cqf/type_bracket.js rename to jstests/cqf/optimizer/type_bracket.js index 13abbc2a856c4..3fb986f80dbe7 100644 --- a/jstests/cqf/type_bracket.js +++ b/jstests/cqf/optimizer/type_bracket.js @@ -1,7 +1,4 @@ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. +import {assertValueOnPlanPath} from "jstests/libs/optimizer_utils.js"; const t = db.cqf_type_bracket; t.drop(); @@ -55,4 +52,3 @@ assert.commandWorked(t.createIndex({a: 1})); assert.eq(4, res.executionStats.nReturned); assertValueOnPlanPath("IndexScan", res, "child.leftChild.nodeType"); } -}()); \ No newline at end of file diff --git a/jstests/cqf/type_predicate.js b/jstests/cqf/optimizer/type_predicate.js similarity index 82% rename from jstests/cqf/type_predicate.js rename to jstests/cqf/optimizer/type_predicate.js index eb8de44b3f601..af80f58db75fa 100644 --- a/jstests/cqf/type_predicate.js +++ b/jstests/cqf/optimizer/type_predicate.js @@ -1,10 +1,8 @@ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_type_predicate; @@ -23,4 +21,3 @@ for (let i = 0; i < 10; i++) { const res = t.explain("executionStats").aggregate([{$match: {a: {$type: "double"}}}]); assert.eq(10, res.executionStats.nReturned); } -}()); \ No newline at end of file diff --git a/jstests/cqf/unionWith.js b/jstests/cqf/optimizer/unionWith.js similarity index 93% rename from jstests/cqf/unionWith.js rename to jstests/cqf/optimizer/unionWith.js index 63dedc9d75018..70aaf970899ef 100644 --- a/jstests/cqf/unionWith.js +++ b/jstests/cqf/optimizer/unionWith.js @@ -1,10 +1,8 @@ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } load("jstests/aggregation/extras/utils.js"); @@ -51,4 +49,3 @@ assert.eq([{_id: 0, a: 1}, {a: 2}], res); res = collA.aggregate([{$unionWith: "collB"}, {$project: {_id: 0, a: 1}}]).toArray(); assert.eq(2, res.length); assert.eq([{a: 1}, {a: 2}], res); -}()); diff --git a/jstests/cqf/validate_internal_plan_with_rid_output.js b/jstests/cqf/optimizer/validate_internal_plan_with_rid_output.js similarity index 87% rename from jstests/cqf/validate_internal_plan_with_rid_output.js rename to jstests/cqf/optimizer/validate_internal_plan_with_rid_output.js index 37b1fe4d4875c..e94b278631692 100644 --- a/jstests/cqf/validate_internal_plan_with_rid_output.js +++ b/jstests/cqf/optimizer/validate_internal_plan_with_rid_output.js @@ -1,13 +1,11 @@ -(function() { -"use strict"; - // Validate that we can internally generate a special query which along with a document returns its // RecordID. -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; + if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const coll = db.cqf_findone_rid; @@ -30,4 +28,3 @@ try { } finally { db.cqf_findone_rid_view.drop(); } -}()); diff --git a/jstests/cqf/value_elemMatch.js b/jstests/cqf/optimizer/value_elemMatch.js similarity index 93% rename from jstests/cqf/value_elemMatch.js rename to jstests/cqf/optimizer/value_elemMatch.js index 0afa14bb2d1e5..d57c3724bfa45 100644 --- a/jstests/cqf/value_elemMatch.js +++ b/jstests/cqf/optimizer/value_elemMatch.js @@ -1,7 +1,4 @@ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For assertValueOnPlanPath. +import {assertValueOnPlanPath} from "jstests/libs/optimizer_utils.js"; const t = db.cqf_value_elemMatch; t.drop(); @@ -42,4 +39,3 @@ assert.commandWorked(t.createIndex({a: 1})); assert.eq(0, res.executionStats.nReturned); assertValueOnPlanPath("CoScan", res, "child.child.child.nodeType"); } -}()); diff --git a/jstests/cqf/value_elemmatch_exists.js b/jstests/cqf/optimizer/value_elemmatch_exists.js similarity index 87% rename from jstests/cqf/value_elemmatch_exists.js rename to jstests/cqf/optimizer/value_elemmatch_exists.js index ed0a3e44ef78a..375ad0b6e3818 100644 --- a/jstests/cqf/value_elemmatch_exists.js +++ b/jstests/cqf/optimizer/value_elemmatch_exists.js @@ -1,13 +1,11 @@ /** * Tests scenario related to SERVER-74954. */ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_elemmatch_exists; @@ -33,4 +31,3 @@ const res = t.find({ assert.eq(2, res.length); assert.eq(1, res[0].a); assert.eq(4, res[1].a); -}()); diff --git a/jstests/cqf_parallel/basic_exchange.js b/jstests/cqf_parallel/optimizer/basic_exchange.js similarity index 81% rename from jstests/cqf_parallel/basic_exchange.js rename to jstests/cqf_parallel/optimizer/basic_exchange.js index 446357a966e1d..cf409bfe3dd1e 100644 --- a/jstests/cqf_parallel/basic_exchange.js +++ b/jstests/cqf_parallel/optimizer/basic_exchange.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_exchange; @@ -19,4 +20,3 @@ assert.commandWorked(t.insert({a: {b: 5}})); const res = t.explain("executionStats").aggregate([{$match: {'a.b': 2}}]); assert.eq(1, res.executionStats.nReturned); assertValueOnPlanPath("Exchange", res, "child.nodeType"); -}()); diff --git a/jstests/cqf_parallel/groupby.js b/jstests/cqf_parallel/optimizer/groupby.js similarity index 90% rename from jstests/cqf_parallel/groupby.js rename to jstests/cqf_parallel/optimizer/groupby.js index 9b23fb1546c15..a6ddaae7e21fc 100644 --- a/jstests/cqf_parallel/groupby.js +++ b/jstests/cqf_parallel/optimizer/groupby.js @@ -1,10 +1,12 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled, + runWithParams +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const t = db.cqf_exchange; @@ -36,4 +38,3 @@ assertValueOnPlanPath( "UnknownPartitioning", res, "child.child.child.child.child.properties.physicalProperties.distribution.type"); -}()); diff --git a/jstests/cqf_parallel/index.js b/jstests/cqf_parallel/optimizer/index.js similarity index 81% rename from jstests/cqf_parallel/index.js rename to jstests/cqf_parallel/optimizer/index.js index d56727d5b77a1..c4bd0246a83b8 100644 --- a/jstests/cqf_parallel/index.js +++ b/jstests/cqf_parallel/optimizer/index.js @@ -1,10 +1,11 @@ -(function() { -"use strict"; +import { + assertValueOnPlanPath, + checkCascadesOptimizerEnabled +} from "jstests/libs/optimizer_utils.js"; -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. if (!checkCascadesOptimizerEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); - return; + quit(); } const coll = db.cqf_parallel_index; @@ -21,4 +22,3 @@ assert.commandWorked(coll.createIndex({a: 1})); let res = coll.explain("executionStats").aggregate([{$match: {a: {$lt: 10}}}]); assert.eq(10, res.executionStats.nReturned); assertValueOnPlanPath("IndexScan", res, "child.child.leftChild.child.nodeType"); -}()); diff --git a/jstests/decimal/decimal128_test1.js b/jstests/decimal/decimal128_test1.js index 6cf083341e46c..b38e6ec7c0b95 100644 --- a/jstests/decimal/decimal128_test1.js +++ b/jstests/decimal/decimal128_test1.js @@ -160,4 +160,4 @@ testData.forEach(function(testCase) { assert.eq(output, `NumberDecimal("${testCase.input}")`); } }); -}()); \ No newline at end of file +}()); diff --git a/jstests/decimal/decimal128_test2.js b/jstests/decimal/decimal128_test2.js index ffbc37042259c..853af54876535 100644 --- a/jstests/decimal/decimal128_test2.js +++ b/jstests/decimal/decimal128_test2.js @@ -301,4 +301,4 @@ data.forEach(function(testCase) { assert.eq(output, `NumberDecimal("${testCase.input}")`); } }); -}()); \ No newline at end of file +}()); diff --git a/jstests/decimal/decimal128_test3.js b/jstests/decimal/decimal128_test3.js index b50f3e45bd14f..4ba0c276e149c 100644 --- a/jstests/decimal/decimal128_test3.js +++ b/jstests/decimal/decimal128_test3.js @@ -578,4 +578,4 @@ data.forEach(function(testCase) { assert.eq(output, `NumberDecimal("${testCase.input}")`); } }); -}()); \ No newline at end of file +}()); diff --git a/jstests/decimal/decimal128_test4.js b/jstests/decimal/decimal128_test4.js index 7ec4f14c303d8..8a945db386ba4 100644 --- a/jstests/decimal/decimal128_test4.js +++ b/jstests/decimal/decimal128_test4.js @@ -136,4 +136,4 @@ parseErrors.forEach(function(testCase) { } assert.throws(test, [], `[Test - ${testCase.description}] should have failed with error.`); }); -}()); \ No newline at end of file +}()); diff --git a/jstests/decimal/decimal128_test6.js b/jstests/decimal/decimal128_test6.js index 07a52669e33b4..a66486b69df0d 100644 --- a/jstests/decimal/decimal128_test6.js +++ b/jstests/decimal/decimal128_test6.js @@ -46,4 +46,4 @@ parseErrors.forEach(function(testCase) { } assert.throws(test, [], `[Test - ${testCase.description}] should have failed with error.`); }); -}()); \ No newline at end of file +}()); diff --git a/jstests/decimal/decimal128_test7.js b/jstests/decimal/decimal128_test7.js index d9ff5774ade84..0bf6c558ae590 100644 --- a/jstests/decimal/decimal128_test7.js +++ b/jstests/decimal/decimal128_test7.js @@ -415,4 +415,4 @@ parseErrors.forEach(function(testCase) { } assert.throws(test, [], `[Test - ${testCase.description}] should have failed with error.`); }); -}()); \ No newline at end of file +}()); diff --git a/jstests/disk/dbNoCreate.js b/jstests/disk/dbNoCreate.js index 785d7473f26b2..2d48afad92ea0 100644 --- a/jstests/disk/dbNoCreate.js +++ b/jstests/disk/dbNoCreate.js @@ -14,4 +14,4 @@ MongoRunner.stopMongod(m); m = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: m.dbpath}); assert.eq( -1, m.getDBNames().indexOf(baseName), "found " + baseName + " in " + tojson(m.getDBNames())); -MongoRunner.stopMongod(m); \ No newline at end of file +MongoRunner.stopMongod(m); diff --git a/jstests/disk/directoryperdb.js b/jstests/disk/directoryperdb.js index 84183eae7dfd1..e6eb3271200a4 100644 --- a/jstests/disk/directoryperdb.js +++ b/jstests/disk/directoryperdb.js @@ -7,7 +7,7 @@ var storageEngine = "wiredTiger"; var dbFileMatcher = /(collection|index)-.+\.wt$/; // Set up helper functions. -assertDocumentCount = function(db, count) { +let assertDocumentCount = function(db, count) { assert.eq(count, db[baseName].count(), 'Expected ' + count + ' documents in ' + db._name + '.' + baseName + '. ' + @@ -42,7 +42,7 @@ const waitForDatabaseDirectoryRemoval = function(dbName, dbDirPath) { /** * Returns the current connection which gets restarted with wiredtiger. */ -checkDBFilesInDBDirectory = function(conn, dbToCheck) { +let checkDBFilesInDBDirectory = function(conn, dbToCheck) { MongoRunner.stopMongod(conn); conn = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: '', restart: true}); @@ -58,9 +58,9 @@ checkDBFilesInDBDirectory = function(conn, dbToCheck) { dir = dbpath + Array(22).join('.229.135.166'); } - files = listFiles(dir); + let files = listFiles(dir); var fileCount = 0; - for (f in files) { + for (let f in files) { if (files[f].isDirectory) continue; fileCount += 1; @@ -74,13 +74,13 @@ checkDBFilesInDBDirectory = function(conn, dbToCheck) { /** * Returns the restarted connection with wiredtiger. */ -checkDBDirectoryNonexistent = function(conn, dbToCheck) { +let checkDBDirectoryNonexistent = function(conn, dbToCheck) { MongoRunner.stopMongod(conn); conn = MongoRunner.runMongod({dbpath: dbpath, directoryperdb: '', restart: true}); var files = listFiles(dbpath); // Check that there are no files in the toplevel dbpath. - for (f in files) { + for (let f in files) { if (!files[f].isDirectory) { assert(!dbFileMatcher.test(files[f].name), 'Database file' + files[f].name + diff --git a/jstests/disk/libs/wt_file_helper.js b/jstests/disk/libs/wt_file_helper.js index 6e819f889e776..dcf076d99befd 100644 --- a/jstests/disk/libs/wt_file_helper.js +++ b/jstests/disk/libs/wt_file_helper.js @@ -1,9 +1,9 @@ -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan} from "jstests/libs/analyze_plan.js"; /** * Get the URI of the wt collection file given the collection name. */ -let getUriForColl = function(coll) { +export let getUriForColl = function(coll) { assert(coll.exists()); // Collection must exist return coll.stats().wiredTiger.uri.split("table:")[1]; }; @@ -11,7 +11,7 @@ let getUriForColl = function(coll) { /** * Get the URI of the wt index file given the collection name and the index name. */ -let getUriForIndex = function(coll, indexName) { +export let getUriForIndex = function(coll, indexName) { assert(coll.exists()); // Collection must exist const ret = assert.commandWorked(coll.getDB().runCommand({collStats: coll.getName()})); return ret.indexDetails[indexName].uri.split("table:")[1]; @@ -20,7 +20,7 @@ let getUriForIndex = function(coll, indexName) { /** * 'Corrupt' the file by replacing it with an empty file. */ -let corruptFile = function(file) { +export let corruptFile = function(file) { removeFile(file); writeFile(file, ""); }; @@ -29,7 +29,7 @@ let corruptFile = function(file) { * Starts a mongod on the provided data path without clearing data. Accepts 'options' as parameters * to runMongod. */ -let startMongodOnExistingPath = function(dbpath, options) { +export let startMongodOnExistingPath = function(dbpath, options) { let args = {dbpath: dbpath, noCleanData: true}; for (let attr in options) { if (options.hasOwnProperty(attr)) @@ -38,7 +38,7 @@ let startMongodOnExistingPath = function(dbpath, options) { return MongoRunner.runMongod(args); }; -let assertQueryUsesIndex = function(coll, query, indexName) { +export let assertQueryUsesIndex = function(coll, query, indexName) { let res = coll.find(query).explain(); assert.commandWorked(res); @@ -50,7 +50,7 @@ let assertQueryUsesIndex = function(coll, query, indexName) { /** * Assert that running MongoDB with --repair on the provided dbpath exits cleanly. */ -let assertRepairSucceeds = function(dbpath, port, opts) { +export let assertRepairSucceeds = function(dbpath, port, opts) { let args = ["mongod", "--repair", "--port", port, "--dbpath", dbpath, "--bind_ip_all"]; for (let a in opts) { if (opts.hasOwnProperty(a)) @@ -64,7 +64,7 @@ let assertRepairSucceeds = function(dbpath, port, opts) { assert.eq(0, runMongoProgram.apply(this, args)); }; -let assertRepairFailsWithFailpoint = function(dbpath, port, failpoint) { +export let assertRepairFailsWithFailpoint = function(dbpath, port, failpoint) { const param = "failpoint." + failpoint + "={'mode': 'alwaysOn'}"; jsTestLog("The node should fail to complete repair with --setParameter " + param); @@ -77,7 +77,7 @@ let assertRepairFailsWithFailpoint = function(dbpath, port, failpoint) { /** * Asserts that running MongoDB with --repair on the provided dbpath fails. */ -let assertRepairFails = function(dbpath, port) { +export let assertRepairFails = function(dbpath, port) { jsTestLog("The node should complete repairing the node but fails."); assert.neq(0, runMongoProgram("mongod", "--repair", "--port", port, "--dbpath", dbpath)); @@ -87,7 +87,7 @@ let assertRepairFails = function(dbpath, port) { * Assert that starting MongoDB with --replSet on an existing data path exits with a specific * error. */ -let assertErrorOnStartupWhenStartingAsReplSet = function(dbpath, port, rsName) { +export let assertErrorOnStartupWhenStartingAsReplSet = function(dbpath, port, rsName) { jsTestLog("The repaired node should fail to start up with the --replSet option"); clearRawMongoProgramOutput(); @@ -103,7 +103,7 @@ let assertErrorOnStartupWhenStartingAsReplSet = function(dbpath, port, rsName) { * Assert that starting MongoDB as a standalone on an existing data path exits with a specific * error because the previous repair failed. */ -let assertErrorOnStartupAfterIncompleteRepair = function(dbpath, port) { +export let assertErrorOnStartupAfterIncompleteRepair = function(dbpath, port) { jsTestLog("The node should fail to start up because a previous repair did not complete"); clearRawMongoProgramOutput(); @@ -119,7 +119,7 @@ let assertErrorOnStartupAfterIncompleteRepair = function(dbpath, port) { * Assert that starting MongoDB as a standalone on an existing data path succeeds. Uses a provided * testFunc to run any caller-provided checks on the started node. */ -let assertStartAndStopStandaloneOnExistingDbpath = function(dbpath, port, testFunc) { +export let assertStartAndStopStandaloneOnExistingDbpath = function(dbpath, port, testFunc) { jsTestLog("The repaired node should start up and serve reads as a standalone"); let node = MongoRunner.runMongod({dbpath: dbpath, port: port, noCleanData: true}); assert(node); @@ -133,7 +133,8 @@ let assertStartAndStopStandaloneOnExistingDbpath = function(dbpath, port, testFu * * Returns the started node. */ -let assertStartInReplSet = function(replSet, originalNode, cleanData, expectResync, testFunc) { +export let assertStartInReplSet = function( + replSet, originalNode, cleanData, expectResync, testFunc) { jsTestLog("The node should rejoin the replica set. Clean data: " + cleanData + ". Expect resync: " + expectResync); // Skip clearing initial sync progress after a successful initial sync attempt so that we @@ -166,7 +167,7 @@ let assertStartInReplSet = function(replSet, originalNode, cleanData, expectResy /** * Assert certain error messages are thrown on startup when files are missing or corrupt. */ -let assertErrorOnStartupWhenFilesAreCorruptOrMissing = function( +export let assertErrorOnStartupWhenFilesAreCorruptOrMissing = function( dbpath, dbName, collName, deleteOrCorruptFunc, errmsgRegExp) { // Start a MongoDB instance, create the collection file. const mongod = MongoRunner.runMongod({dbpath: dbpath, cleanData: true}); @@ -187,11 +188,11 @@ let assertErrorOnStartupWhenFilesAreCorruptOrMissing = function( /** * Assert certain error messages are thrown on a specific request when files are missing or corrupt. */ -let assertErrorOnRequestWhenFilesAreCorruptOrMissing = function( +export let assertErrorOnRequestWhenFilesAreCorruptOrMissing = function( dbpath, dbName, collName, deleteOrCorruptFunc, requestFunc, errmsgRegExp) { // Start a MongoDB instance, create the collection file. - mongod = MongoRunner.runMongod({dbpath: dbpath, cleanData: true}); - testColl = mongod.getDB(dbName)[collName]; + let mongod = MongoRunner.runMongod({dbpath: dbpath, cleanData: true}); + let testColl = mongod.getDB(dbName)[collName]; const doc = {a: 1}; assert.commandWorked(testColl.insert(doc)); @@ -220,7 +221,7 @@ let assertErrorOnRequestWhenFilesAreCorruptOrMissing = function( /** * Runs the WiredTiger tool with the provided arguments. */ -let runWiredTigerTool = function(...args) { +export let runWiredTigerTool = function(...args) { const cmd = ['wt'].concat(args); // TODO (SERVER-67632): Check the return code on Windows variants again. if (_isWindows()) { @@ -234,7 +235,7 @@ let runWiredTigerTool = function(...args) { * Stops the given mongod, runs the truncate command on the given uri using the WiredTiger tool, and * starts mongod again on the same path. */ -let truncateUriAndRestartMongod = function(uri, conn, mongodOptions) { +export let truncateUriAndRestartMongod = function(uri, conn, mongodOptions) { MongoRunner.stopMongod(conn, null, {skipValidation: true}); runWiredTigerTool("-h", conn.dbpath, "truncate", uri); return startMongodOnExistingPath(conn.dbpath, mongodOptions); @@ -243,7 +244,7 @@ let truncateUriAndRestartMongod = function(uri, conn, mongodOptions) { /** * Stops the given mongod and runs the alter command to modify the index table's metadata. */ -let alterIndexFormatVersion = function(uri, conn, formatVersion) { +export let alterIndexFormatVersion = function(uri, conn, formatVersion) { MongoRunner.stopMongod(conn, null, {skipValidation: true}); runWiredTigerTool( "-h", @@ -257,8 +258,9 @@ let alterIndexFormatVersion = function(uri, conn, formatVersion) { * Stops the given mongod, dumps the table with the uri, modifies the content, and loads it back to * the table. */ -let count = 0; -let rewriteTable = function(uri, conn, modifyData) { +export let count = 0; + +export let rewriteTable = function(uri, conn, modifyData) { MongoRunner.stopMongod(conn, null, {skipValidation: true}); const separator = _isWindows() ? '\\' : '/'; const tempDumpFile = conn.dbpath + separator + "temp_dump"; @@ -281,12 +283,12 @@ let rewriteTable = function(uri, conn, modifyData) { // In WiredTiger table dumps, the first seven lines are the header and key that we don't want to // modify. We will skip them and start from the line containing the first value. -const wtHeaderLines = 7; +export const wtHeaderLines = 7; /** * Inserts the documents with duplicate field names into the MongoDB server. */ -let insertDocDuplicateFieldName = function(coll, uri, conn, numDocs) { +export let insertDocDuplicateFieldName = function(coll, uri, conn, numDocs) { for (let i = 0; i < numDocs; ++i) { coll.insert({a: "aaaaaaa", b: "bbbbbbb"}); } @@ -304,7 +306,7 @@ let insertDocDuplicateFieldName = function(coll, uri, conn, numDocs) { rewriteTable(uri, conn, makeDuplicateFieldNames); }; -let insertDocSymbolField = function(coll, uri, conn, numDocs) { +export let insertDocSymbolField = function(coll, uri, conn, numDocs) { for (let i = 0; i < numDocs; ++i) { coll.insert({a: "aaaaaaa"}); } @@ -324,7 +326,7 @@ let insertDocSymbolField = function(coll, uri, conn, numDocs) { /** * Inserts array document with non-sequential indexes into the MongoDB server. */ -let insertNonSequentialArrayIndexes = function(coll, uri, conn, numDocs) { +export let insertNonSequentialArrayIndexes = function(coll, uri, conn, numDocs) { for (let i = 0; i < numDocs; ++i) { coll.insert({arr: [1, 2, [1, [1, 2], 2], 3]}); } @@ -343,7 +345,7 @@ let insertNonSequentialArrayIndexes = function(coll, uri, conn, numDocs) { /** * Inserts documents with invalid regex options into the MongoDB server. */ -let insertInvalidRegex = function(coll, mongod, nDocuments) { +export let insertInvalidRegex = function(coll, mongod, nDocuments) { const regex = "a*.conn"; const options = 'gimsuy'; @@ -377,7 +379,7 @@ let insertInvalidRegex = function(coll, mongod, nDocuments) { /** * Inserts document with invalid UTF-8 string into the MongoDB server. */ -let insertInvalidUTF8 = function(coll, uri, conn, numDocs) { +export let insertInvalidUTF8 = function(coll, uri, conn, numDocs) { for (let i = 0; i < numDocs; ++i) { coll.insert({validString: "\x70"}); } @@ -392,4 +394,4 @@ let insertInvalidUTF8 = function(coll, uri, conn, numDocs) { } }; rewriteTable(uri, conn, makeInvalidUTF8); -}; \ No newline at end of file +}; diff --git a/jstests/disk/repair_clustered_collection.js b/jstests/disk/repair_clustered_collection.js index 001ac2dbeb241..8a227033c5dea 100644 --- a/jstests/disk/repair_clustered_collection.js +++ b/jstests/disk/repair_clustered_collection.js @@ -4,9 +4,12 @@ * * @tags: [requires_wiredtiger] */ -(function() { +import { + assertRepairSucceeds, + getUriForColl, + startMongodOnExistingPath +} from "jstests/disk/libs/wt_file_helper.js"; -load('jstests/disk/libs/wt_file_helper.js'); load("jstests/libs/collection_drop_recreate.js"); const dbName = jsTestName(); @@ -55,7 +58,7 @@ const runRepairTest = function runRepairTestOnMongoDInstance( // Ensure the orphaned collection is valid and the document is preserved. const orphanedImportantCollName = "orphan." + testCollUri.replace(/-/g, "_"); const localDb = mongod.getDB("local"); - orphanedCollection = localDb[orphanedImportantCollName]; + let orphanedCollection = localDb[orphanedImportantCollName]; assert(orphanedCollection.exists()); assert.eq(orphanedCollection.count(expectedOrphanDoc), 1, @@ -83,5 +86,4 @@ docToInsert = { "timestamp": ISODate("2021-05-18T00:00:00.000Z"), "temp": 12 }; -runRepairTest(clusteredCollOptions, docToInsert, isTimeseries); -})(); +runRepairTest(clusteredCollOptions, docToInsert, isTimeseries); \ No newline at end of file diff --git a/jstests/disk/repair_corrupt_document.js b/jstests/disk/repair_corrupt_document.js index 428bbf247566d..ffc5198ced560 100644 --- a/jstests/disk/repair_corrupt_document.js +++ b/jstests/disk/repair_corrupt_document.js @@ -2,9 +2,12 @@ * Tests that --repair deletes corrupt BSON documents. */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import { + assertQueryUsesIndex, + assertRepairSucceeds, + getUriForIndex, + startMongodOnExistingPath, +} from "jstests/disk/libs/wt_file_helper.js"; const baseName = "repair_corrupt_document"; const collName = "test"; @@ -83,5 +86,4 @@ let corruptDocumentOnInsert = function(db, coll) { MongoRunner.stopMongod(mongod); jsTestLog("Exiting runValidateWithRepairMode."); -})(); -})(); +})(); \ No newline at end of file diff --git a/jstests/disk/repair_does_not_invalidate_config_on_standalone.js b/jstests/disk/repair_does_not_invalidate_config_on_standalone.js index 3560ce5331124..1b7d95ebedc17 100644 --- a/jstests/disk/repair_does_not_invalidate_config_on_standalone.js +++ b/jstests/disk/repair_does_not_invalidate_config_on_standalone.js @@ -4,9 +4,11 @@ * @tags: [requires_wiredtiger] */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import { + assertRepairSucceeds, + assertStartAndStopStandaloneOnExistingDbpath, + getUriForColl, +} from "jstests/disk/libs/wt_file_helper.js"; const baseName = "repair_does_not_invalidate_config_on_standalone"; const dbName = baseName; @@ -40,5 +42,4 @@ assertStartAndStopStandaloneOnExistingDbpath(dbpath, port, function(node) { assert.eq(nodeDB[collName].find().itcount(), 0); assert(!nodeDB.getSiblingDB("local")["system.replset"].exists()); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/disk/repair_duplicate_keys.js b/jstests/disk/repair_duplicate_keys.js index 4b639b9b7ba2d..9e8bd5da2bc70 100644 --- a/jstests/disk/repair_duplicate_keys.js +++ b/jstests/disk/repair_duplicate_keys.js @@ -5,9 +5,12 @@ * @tags: [requires_wiredtiger] */ -(function() { +import { + assertQueryUsesIndex, + assertRepairSucceeds, + startMongodOnExistingPath +} from "jstests/disk/libs/wt_file_helper.js"; -load('jstests/disk/libs/wt_file_helper.js'); load("jstests/libs/uuid_util.js"); const baseName = "repair_duplicate_keys"; @@ -255,5 +258,4 @@ runRepairAndVerifyCollectionDocs(); MongoRunner.stopMongod(mongod); jsTestLog("Exiting checkLostAndFoundCollForDoubleDup."); -})(); -})(); +})(); \ No newline at end of file diff --git a/jstests/disk/repair_failure_is_recoverable.js b/jstests/disk/repair_failure_is_recoverable.js index 2ede4bfe36db0..4b5a693f5e3d8 100644 --- a/jstests/disk/repair_failure_is_recoverable.js +++ b/jstests/disk/repair_failure_is_recoverable.js @@ -5,9 +5,12 @@ * This is not storage-engine specific. */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import { + assertErrorOnStartupAfterIncompleteRepair, + assertRepairFailsWithFailpoint, + assertRepairSucceeds, + assertStartAndStopStandaloneOnExistingDbpath, +} from "jstests/disk/libs/wt_file_helper.js"; const exitBeforeRepairParameter = "exitBeforeDataRepair"; const exitBeforeRepairInvalidatesConfigParameter = "exitBeforeRepairInvalidatesConfig"; @@ -58,5 +61,4 @@ assertStartAndStopStandaloneOnExistingDbpath(dbpath, port, function(node) { let nodeDB = node.getDB(dbName); assert(nodeDB[collName].exists()); assert.eq(nodeDB[collName].find().itcount(), 1); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/disk/repair_index_format_version.js b/jstests/disk/repair_index_format_version.js index 46922a98b0904..c18a29f962494 100644 --- a/jstests/disk/repair_index_format_version.js +++ b/jstests/disk/repair_index_format_version.js @@ -2,9 +2,11 @@ * Tests that mismatch of index type and index format version will be resolved during startup. */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import { + alterIndexFormatVersion, + getUriForIndex, + startMongodOnExistingPath +} from "jstests/disk/libs/wt_file_helper.js"; const baseName = "repair_index_format_version"; const collNamePrefix = "test_"; @@ -43,5 +45,4 @@ alterIndexFormatVersion(uri, mongod, 14); mongod = startMongodOnExistingPath(dbpath); checkLog.containsJson(mongod, 6818600); -MongoRunner.stopMongod(mongod, null, {skipValidation: true}); -})(); +MongoRunner.stopMongod(mongod, null, {skipValidation: true}); \ No newline at end of file diff --git a/jstests/disk/repair_invalidates_replica_set_config.js b/jstests/disk/repair_invalidates_replica_set_config.js index 8b3745f5d778a..256317a3e96d8 100644 --- a/jstests/disk/repair_invalidates_replica_set_config.js +++ b/jstests/disk/repair_invalidates_replica_set_config.js @@ -5,9 +5,13 @@ * @tags: [requires_wiredtiger, requires_replication] */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import { + assertErrorOnStartupWhenStartingAsReplSet, + assertRepairSucceeds, + assertStartAndStopStandaloneOnExistingDbpath, + assertStartInReplSet, + getUriForColl, +} from "jstests/disk/libs/wt_file_helper.js"; // This test triggers an unclean shutdown (an fassert), which may cause inaccurate fast counts. TestData.skipEnforceFastCountOnValidate = true; @@ -123,5 +127,4 @@ secondary = assertStartInReplSet( assert.eq(nodeDB[collName].find().itcount(), 1); }); -replSet.stopSet(); -})(); +replSet.stopSet(); \ No newline at end of file diff --git a/jstests/disk/repair_unfinished_indexes.js b/jstests/disk/repair_unfinished_indexes.js index 0f2a84fc68ab2..ae0e79b1aeff5 100644 --- a/jstests/disk/repair_unfinished_indexes.js +++ b/jstests/disk/repair_unfinished_indexes.js @@ -5,9 +5,15 @@ * @tags: [requires_wiredtiger, requires_replication] */ -(function() { +import { + assertErrorOnStartupWhenStartingAsReplSet, + assertRepairSucceeds, + assertStartInReplSet, + corruptFile, + getUriForColl, + startMongodOnExistingPath, +} from "jstests/disk/libs/wt_file_helper.js"; -load('jstests/disk/libs/wt_file_helper.js'); load('jstests/noPassthrough/libs/index_build.js'); const dbName = "repair_unfinished_indexes"; @@ -91,5 +97,4 @@ assertErrorOnStartupWhenStartingAsReplSet( newSecondary.getDB(dbName).getCollection(collName), 2, ["_id_", "a_1"]); })(); -replSet.stopSet(); -})(); +replSet.stopSet(); \ No newline at end of file diff --git a/jstests/disk/validate_bson_inconsistency.js b/jstests/disk/validate_bson_inconsistency.js index 7f23e3ff8c5fe..5740e3a1e5db4 100644 --- a/jstests/disk/validate_bson_inconsistency.js +++ b/jstests/disk/validate_bson_inconsistency.js @@ -4,9 +4,15 @@ * @tags: [requires_fcv_62] */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import { + getUriForColl, + insertDocDuplicateFieldName, + insertDocSymbolField, + insertInvalidRegex, + insertInvalidUTF8, + insertNonSequentialArrayIndexes, + startMongodOnExistingPath, +} from "jstests/disk/libs/wt_file_helper.js"; const baseName = "validate_bson_inconsistency"; const collNamePrefix = "test_"; @@ -236,7 +242,7 @@ resetDbpath(dbpath); db = mongod.getDB(baseName); testColl = db[collName]; - res = assert.commandWorked(testColl.validate()); + let res = assert.commandWorked(testColl.validate()); assert(res.valid, tojson(res)); assert.eq(res.nNonCompliantDocuments, 10); assert.eq(res.warnings.length, 1); @@ -266,7 +272,7 @@ resetDbpath(dbpath); db = mongod.getDB(baseName); testColl = db[collName]; - res = assert.commandWorked(testColl.validate()); + let res = assert.commandWorked(testColl.validate()); assert(res.valid, tojson(res)); assert.eq(res.nNonCompliantDocuments, 0); assert.eq(res.warnings.length, 0); @@ -319,5 +325,4 @@ resetDbpath(dbpath); assert.eq(res.warnings.length, 1); MongoRunner.stopMongod(mongod, null, {skipValidation: true}); -})(); -})(); +})(); \ No newline at end of file diff --git a/jstests/disk/wt_corrupt_file_errors.js b/jstests/disk/wt_corrupt_file_errors.js index bd799a992ea99..46cb462125fd7 100644 --- a/jstests/disk/wt_corrupt_file_errors.js +++ b/jstests/disk/wt_corrupt_file_errors.js @@ -4,9 +4,13 @@ * @tags: [requires_wiredtiger] */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import { + assertErrorOnRequestWhenFilesAreCorruptOrMissing, + assertErrorOnStartupWhenFilesAreCorruptOrMissing, + corruptFile, + getUriForColl, + getUriForIndex, +} from "jstests/disk/libs/wt_file_helper.js"; const baseName = "wt_corrupt_file_errors"; const collName = "test"; @@ -80,5 +84,4 @@ assertErrorOnRequestWhenFilesAreCorruptOrMissing( testColl.insert({a: 1}); }); }, - new RegExp("Fatal assertion.*50882")); -})(); + new RegExp("Fatal assertion.*50882")); \ No newline at end of file diff --git a/jstests/disk/wt_missing_file_errors.js b/jstests/disk/wt_missing_file_errors.js index 85310fa82e901..e71220d7352ec 100644 --- a/jstests/disk/wt_missing_file_errors.js +++ b/jstests/disk/wt_missing_file_errors.js @@ -4,9 +4,12 @@ * @tags: [requires_wiredtiger] */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import { + assertErrorOnRequestWhenFilesAreCorruptOrMissing, + assertErrorOnStartupWhenFilesAreCorruptOrMissing, + getUriForColl, + getUriForIndex, +} from "jstests/disk/libs/wt_file_helper.js"; const baseName = "wt_missing_file_errors"; const collName = "test"; @@ -80,5 +83,4 @@ assertErrorOnRequestWhenFilesAreCorruptOrMissing( testColl.insert({a: 1}); }); }, - new RegExp("Fatal assertion.*50883")); -})(); + new RegExp("Fatal assertion.*50883")); \ No newline at end of file diff --git a/jstests/disk/wt_repair_corrupt_files.js b/jstests/disk/wt_repair_corrupt_files.js index 2c0dcd7c67543..4968fc87f3f5d 100644 --- a/jstests/disk/wt_repair_corrupt_files.js +++ b/jstests/disk/wt_repair_corrupt_files.js @@ -5,9 +5,16 @@ * @tags: [requires_wiredtiger] */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import { + assertErrorOnStartupAfterIncompleteRepair, + assertQueryUsesIndex, + assertRepairFails, + assertRepairSucceeds, + corruptFile, + getUriForColl, + getUriForIndex, + startMongodOnExistingPath, +} from "jstests/disk/libs/wt_file_helper.js"; const baseName = "wt_repair_corrupt_files"; const collName = "test"; @@ -169,5 +176,4 @@ let runTest = function(mongodOptions) { runTest({}); runTest({directoryperdb: ""}); -runTest({wiredTigerDirectoryForIndexes: ""}); -})(); +runTest({wiredTigerDirectoryForIndexes: ""}); \ No newline at end of file diff --git a/jstests/disk/wt_repair_corrupt_metadata.js b/jstests/disk/wt_repair_corrupt_metadata.js index 6e529bf5d90d9..0ef7eb8df0e49 100644 --- a/jstests/disk/wt_repair_corrupt_metadata.js +++ b/jstests/disk/wt_repair_corrupt_metadata.js @@ -5,9 +5,7 @@ * @tags: [requires_wiredtiger] */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import {assertRepairSucceeds, startMongodOnExistingPath} from "jstests/disk/libs/wt_file_helper.js"; const baseName = "wt_repair_corrupt_metadata"; const collName = "test"; @@ -99,5 +97,4 @@ let runTest = function(mongodOptions) { MongoRunner.stopMongod(mongod); }; -runTest({}); -})(); +runTest({}); \ No newline at end of file diff --git a/jstests/disk/wt_repair_inconsistent_index.js b/jstests/disk/wt_repair_inconsistent_index.js index f11e03a5e0502..f409bd13decab 100644 --- a/jstests/disk/wt_repair_inconsistent_index.js +++ b/jstests/disk/wt_repair_inconsistent_index.js @@ -4,9 +4,14 @@ * @tags: [requires_wiredtiger] */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import { + assertQueryUsesIndex, + assertRepairSucceeds, + getUriForColl, + getUriForIndex, + startMongodOnExistingPath, + truncateUriAndRestartMongod, +} from "jstests/disk/libs/wt_file_helper.js"; const baseName = "wt_repair_inconsistent_index"; const collName = "test"; @@ -114,5 +119,4 @@ let runTest = function(mongodOptions) { runTest({}); runTest({directoryperdb: ""}); -runTest({wiredTigerDirectoryForIndexes: ""}); -})(); +runTest({wiredTigerDirectoryForIndexes: ""}); \ No newline at end of file diff --git a/jstests/disk/wt_repair_missing_files.js b/jstests/disk/wt_repair_missing_files.js index 7f3b8ce42d061..a97da2bfbad02 100644 --- a/jstests/disk/wt_repair_missing_files.js +++ b/jstests/disk/wt_repair_missing_files.js @@ -5,9 +5,13 @@ * @tags: [requires_wiredtiger] */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import { + assertQueryUsesIndex, + assertRepairSucceeds, + getUriForColl, + getUriForIndex, + startMongodOnExistingPath, +} from "jstests/disk/libs/wt_file_helper.js"; const baseName = "wt_repair_missing_files"; const collName = "test"; @@ -167,5 +171,4 @@ testColl = mongod.getDB(baseName)[collName]; assert.eq(testColl.find(doc).itcount(), 1); assert.eq(testColl.count(), 1); -MongoRunner.stopMongod(mongod); -})(); +MongoRunner.stopMongod(mongod); \ No newline at end of file diff --git a/jstests/disk/wt_repair_orphaned_idents.js b/jstests/disk/wt_repair_orphaned_idents.js index 83d3bfee42473..606e2269f5553 100644 --- a/jstests/disk/wt_repair_orphaned_idents.js +++ b/jstests/disk/wt_repair_orphaned_idents.js @@ -4,9 +4,7 @@ * @tags: [requires_wiredtiger] */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import {getUriForColl} from "jstests/disk/libs/wt_file_helper.js"; const baseName = "wt_repair_orphaned_idents"; const dbpath = MongoRunner.dataPath + baseName + "/"; @@ -82,5 +80,4 @@ for (let entry of res.cursor.firstBatch) { assert(testDb[collName].drop()); } -MongoRunner.stopMongod(mongod); -})(); +MongoRunner.stopMongod(mongod); \ No newline at end of file diff --git a/jstests/disk/wt_startup_with_missing_user_collection.js b/jstests/disk/wt_startup_with_missing_user_collection.js index 23752fa20ba0e..f48cbc9f2f500 100644 --- a/jstests/disk/wt_startup_with_missing_user_collection.js +++ b/jstests/disk/wt_startup_with_missing_user_collection.js @@ -4,9 +4,12 @@ * * @tags: [requires_wiredtiger] */ -(function() { +import { + getUriForColl, + getUriForIndex, + startMongodOnExistingPath +} from "jstests/disk/libs/wt_file_helper.js"; -load('jstests/disk/libs/wt_file_helper.js'); load('jstests/noPassthrough/libs/index_build.js'); // This test triggers an unclean shutdown (an fassert), which may cause inaccurate fast counts. @@ -74,5 +77,4 @@ assert.neq(null, mongod, "Failed to start"); testDB = mongod.getDB(dbName); assert(testDB.getCollection("a").drop()); -MongoRunner.stopMongod(mongod); -}()); +MongoRunner.stopMongod(mongod); \ No newline at end of file diff --git a/jstests/disk/wt_table_checks.js b/jstests/disk/wt_table_checks.js index cb8003c92811b..ce30ea921aae6 100644 --- a/jstests/disk/wt_table_checks.js +++ b/jstests/disk/wt_table_checks.js @@ -4,9 +4,7 @@ * * @tags: [requires_wiredtiger] */ -(function() { - -load('jstests/disk/libs/wt_file_helper.js'); +import {startMongodOnExistingPath} from "jstests/disk/libs/wt_file_helper.js"; function checkTableLogSettings(conn, enabled) { conn.getDBNames().forEach(function(d) { @@ -40,7 +38,7 @@ function checkTableLogSettings(conn, enabled) { function checkTableChecksFileRemoved(dbpath) { let files = listFiles(dbpath); - for (file of files) { + for (let file of files) { assert.eq(false, file.name.includes("_wt_table_checks")); } } @@ -123,5 +121,4 @@ checkLog.containsJson(conn, 22432); // Skipping table logging checks. assert(checkLog.checkContainsWithCountJson(conn, 5548302, undefined, 0)); checkTableLogSettings(conn, /*enabled=*/ true); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/disk/wt_validate_table_logging.js b/jstests/disk/wt_validate_table_logging.js index 939280feeb8ea..7659075266e8e 100644 --- a/jstests/disk/wt_validate_table_logging.js +++ b/jstests/disk/wt_validate_table_logging.js @@ -5,10 +5,7 @@ * requires_wiredtiger, * ] */ -(function() { -'use strict'; - -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; let conn = MongoRunner.runMongod(); @@ -92,5 +89,4 @@ if (csiEnabled) { {index: '$**_columnstore', uri: indexUri(conn, '$**_columnstore'), expected: true}); } -MongoRunner.stopMongod(conn, null, {skipValidation: true}); -}()); +MongoRunner.stopMongod(conn, null, {skipValidation: true}); \ No newline at end of file diff --git a/jstests/fle2/convert_encrypted_to_capped.js b/jstests/fle2/convert_encrypted_to_capped.js index 17be6f78c53cb..e977f2e5bf545 100644 --- a/jstests/fle2/convert_encrypted_to_capped.js +++ b/jstests/fle2/convert_encrypted_to_capped.js @@ -8,13 +8,10 @@ * requires_fcv_70 * ] */ -load("jstests/fle2/libs/encrypted_client_util.js"); - -(function() { -'use strict'; +import {isFLE2ReplicationEnabled} from "jstests/fle2/libs/encrypted_client_util.js"; if (!isFLE2ReplicationEnabled()) { - return; + quit(); } const dbTest = db.getSiblingDB('convert_encrypted_to_capped_db'); @@ -42,4 +39,3 @@ assert.commandFailedWithCode( dbTest.runCommand({cloneCollectionAsCapped: "basic", toCollection: "capped", size: 100000}), 6367302, "Clone encrypted collection as capped passed"); -}()); diff --git a/jstests/fle2/create_encrypted_collection.js b/jstests/fle2/create_encrypted_collection.js index b186f8cb7591a..6308abbb6db9e 100644 --- a/jstests/fle2/create_encrypted_collection.js +++ b/jstests/fle2/create_encrypted_collection.js @@ -6,11 +6,6 @@ * requires_fcv_70 * ] */ -load("jstests/fle2/libs/encrypted_client_util.js"); - -(function() { -'use strict'; - let dbTest = db.getSiblingDB('create_encrypted_collection_db'); dbTest.dropDatabase(); @@ -85,5 +80,4 @@ assert.commandWorked(dbTest.createCollection("basic_int32_cf", { "queries": {"queryType": "equality", contention: NumberInt(123)} }] } -})); -}()); +})); \ No newline at end of file diff --git a/jstests/fle2/create_encrypted_indexes.js b/jstests/fle2/create_encrypted_indexes.js index ce16988325e01..b1b0dd52ee274 100644 --- a/jstests/fle2/create_encrypted_indexes.js +++ b/jstests/fle2/create_encrypted_indexes.js @@ -6,11 +6,6 @@ * requires_fcv_70 * ] */ -load("jstests/fle2/libs/encrypted_client_util.js"); - -(function() { -'use strict'; - let dbTest = db.getSiblingDB('create_encrypted_indexes_db'); dbTest.basic.drop(); @@ -73,5 +68,4 @@ assert.commandFailedWithCode(res, 6346502, "Create compound index on encrypted f assert.commandWorked(dbTest.basic.createIndex({"paymentMethods.creditCards.notNumber": 1})); // A wildcard index on the entire document is allowed. -assert.commandWorked(dbTest.basic.createIndex({"$**": 1})); -}()); +assert.commandWorked(dbTest.basic.createIndex({"$**": 1})); \ No newline at end of file diff --git a/jstests/fle2/libs/encrypted_client_util.js b/jstests/fle2/libs/encrypted_client_util.js index a74099a2704a7..7bf8d0b81c2fc 100644 --- a/jstests/fle2/libs/encrypted_client_util.js +++ b/jstests/fle2/libs/encrypted_client_util.js @@ -1,13 +1,13 @@ load("jstests/concurrency/fsm_workload_helpers/server_types.js"); // For isMongos. -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; /** * Create a FLE client that has an unencrypted and encrypted client to the same database */ -var kSafeContentField = "__safeContent__"; +export var kSafeContentField = "__safeContent__"; -var EncryptedClient = class { +export var EncryptedClient = class { /** * Create a new encrypted FLE connection to the target server with a local KMS * @@ -21,6 +21,14 @@ var EncryptedClient = class { // use this.useImplicitSharding = !(typeof (ImplicitlyShardAccessCollSettings) === "undefined"); + if (conn.isAutoEncryptionEnabled()) { + this._keyVault = conn.getKeyVault(); + this._edb = conn.getDB(dbName); + this._db = undefined; + this._admindb = conn.getDB("admin"); + return; + } + const localKMS = { key: BinData( 0, @@ -69,6 +77,13 @@ var EncryptedClient = class { return this._edb; } + /** + * Creates a session on the encryptedClient. + */ + startSession() { + return this._edb.getMongo().startSession(); + } + /** * Return an encrypted database * @@ -158,21 +173,21 @@ var EncryptedClient = class { // All our tests use "last" as the key to query on so shard on "last" instead of "_id" if (this.useImplicitSharding) { - let resShard = this._db.adminCommand({enableSharding: this._db.getName()}); + let resShard = this._edb.adminCommand({enableSharding: this._edb.getName()}); // enableSharding may only be called once for a database. if (resShard.code !== ErrorCodes.AlreadyInitialized) { assert.commandWorked( - resShard, "enabling sharding on the '" + this._db.getName() + "' db failed"); + resShard, "enabling sharding on the '" + this._edb.getName() + "' db failed"); } let shardCollCmd = { - shardCollection: this._db.getName() + "." + name, + shardCollection: this._edb.getName() + "." + name, key: {last: "hashed"}, collation: {locale: "simple"} }; - resShard = this._db.adminCommand(shardCollCmd); + resShard = this._edb.adminCommand(shardCollCmd); jsTestLog("Sharding: " + tojson(shardCollCmd)); } @@ -207,7 +222,7 @@ var EncryptedClient = class { if (tenantId) { Object.extend(listCollCmdObj, {"$tenant": tenantId}); } - const cis = assert.commandWorked(this._db.runCommand(listCollCmdObj)); + const cis = assert.commandWorked(this._edb.runCommand(listCollCmdObj)); assert.eq( cis.cursor.firstBatch.length, 1, `Expected to find one collection named '${name}'`); @@ -244,7 +259,7 @@ var EncryptedClient = class { const actualEcoc = countDocuments(sessionDB, ef.ecocCollection, tenantId); assert.eq(actualEcoc, - this.ecocCountMatchesEscCount ? expectedEsc : expectedEcoc, + expectedEcoc, `ECOC document count is wrong: Actual ${actualEcoc} vs Expected ${expectedEcoc}`); } @@ -258,7 +273,24 @@ var EncryptedClient = class { */ assertEncryptedCollectionCounts(name, expectedEdc, expectedEsc, expectedEcoc, tenantId) { this.assertEncryptedCollectionCountsByObject( - this._db, name, expectedEdc, expectedEsc, expectedEcoc, tenantId); + this._edb, name, expectedEdc, expectedEsc, expectedEcoc, tenantId); + } + + /** + * Assert the number of non-anchor documents in the ESC associated with the given EDC + * collection name matches the expected. + * + * @param {string} name Name of EDC + * @param {number} expectedCount Number of non-anchors expected in ESC + */ + assertESCNonAnchorCount(name, expectedCount) { + const escName = this.getStateCollectionNamespaces(name).esc; + const actualCount = + this._edb.getCollection(escName).countDocuments({"value": {"$exists": false}}); + assert.eq( + actualCount, + expectedCount, + `ESC non-anchor count is wrong: Actual ${actualCount} vs Expected ${expectedCount}`); } /** @@ -371,7 +403,10 @@ var EncryptedClient = class { assert.docEq(docs, onDiskDocs); } - assertStateCollectionsAfterCompact(collName, ecocExists, ecocTempExists = false) { + assertStateCollectionsAfterCompact(collName, + ecocExists, + ecocTempExists = false, + escDeletesExists = false) { const baseCollInfos = this._edb.getCollectionInfos({"name": collName}); assert.eq(baseCollInfos.length, 1); const baseCollInfo = baseCollInfos[0]; @@ -380,10 +415,11 @@ var EncryptedClient = class { const checkMap = {}; // Always expect the ESC collection, optionally expect ECOC. - // ECOC is not expected in sharded clusters. checkMap[baseCollInfo.options.encryptedFields.escCollection] = true; checkMap[baseCollInfo.options.encryptedFields.ecocCollection] = ecocExists; checkMap[baseCollInfo.options.encryptedFields.ecocCollection + ".compact"] = ecocTempExists; + checkMap[baseCollInfo.options.encryptedFields.escCollection + ".deletes"] = + escDeletesExists; const edb = this._edb; Object.keys(checkMap).forEach(function(coll) { @@ -394,7 +430,7 @@ var EncryptedClient = class { } }; -function runEncryptedTest(db, dbName, collName, encryptedFields, runTestsCallback) { +export function runEncryptedTest(db, dbName, collNames, encryptedFields, runTestsCallback) { const dbTest = db.getSiblingDB(dbName); dbTest.dropDatabase(); @@ -408,8 +444,14 @@ function runEncryptedTest(db, dbName, collName, encryptedFields, runTestsCallbac let client = new EncryptedClient(db.getMongo(), dbName); - assert.commandWorked( - client.createEncryptionCollection(collName, {encryptedFields: encryptedFields})); + if (typeof collNames === "string") { + collNames = [collNames]; + } + + for (let collName of collNames) { + assert.commandWorked( + client.createEncryptionCollection(collName, {encryptedFields: encryptedFields})); + } let edb = client.getDB(); runTestsCallback(edb, client); @@ -418,22 +460,21 @@ function runEncryptedTest(db, dbName, collName, encryptedFields, runTestsCallbac /** * @returns Returns true if talking to a replica set */ -function isFLE2ReplicationEnabled() { +export function isFLE2ReplicationEnabled() { return typeof (testingReplication) == "undefined" || testingReplication === true; } -// TODO SERVER-67760 remove once feature flag is gone /** - * @returns Returns true if featureFlagFLE2Range is enabled + * @returns Returns true if featureFlagFLE2CleanupCommand is enabled */ -function isFLE2RangeEnabled(db) { - return FeatureFlagUtil.isPresentAndEnabled(db, "FLE2Range"); +export function isFLE2CleanupEnabled(db) { + return FeatureFlagUtil.isEnabled(db, "FLE2CleanupCommand"); } /** * @returns Returns true if internalQueryFLEAlwaysUseEncryptedCollScanMode is enabled */ -function isFLE2AlwaysUseCollScanModeEnabled(db) { +export function isFLE2AlwaysUseCollScanModeEnabled(db) { const doc = assert.commandWorked( db.adminCommand({getParameter: 1, internalQueryFLEAlwaysUseEncryptedCollScanMode: 1})); return (doc.internalQueryFLEAlwaysUseEncryptedCollScanMode === true); @@ -445,7 +486,7 @@ function isFLE2AlwaysUseCollScanModeEnabled(db) { * * @param {BinData} value bindata value */ -function assertIsIndexedEncryptedField(value) { +export function assertIsIndexedEncryptedField(value) { assert(value instanceof BinData, "Expected BinData, found: " + value); assert.eq(value.subtype(), 6, "Expected Encrypted bindata: " + value); assert(value.hex().startsWith("0e") || value.hex().startsWith("0f"), @@ -457,7 +498,7 @@ function assertIsIndexedEncryptedField(value) { * * @param {BinData} value bindata value */ -function assertIsEqualityIndexedEncryptedField(value) { +export function assertIsEqualityIndexedEncryptedField(value) { assert(value instanceof BinData, "Expected BinData, found: " + value); assert.eq(value.subtype(), 6, "Expected Encrypted bindata: " + value); assert(value.hex().startsWith("0e"), @@ -469,7 +510,7 @@ function assertIsEqualityIndexedEncryptedField(value) { * * @param {BinData} value bindata value */ -function assertIsRangeIndexedEncryptedField(value) { +export function assertIsRangeIndexedEncryptedField(value) { assert(value instanceof BinData, "Expected BinData, found: " + value); assert.eq(value.subtype(), 6, "Expected Encrypted bindata: " + value); assert(value.hex().startsWith("0f"), @@ -481,7 +522,7 @@ function assertIsRangeIndexedEncryptedField(value) { * * @param {BinData} value bindata value */ -function assertIsUnindexedEncryptedField(value) { +export function assertIsUnindexedEncryptedField(value) { assert(value instanceof BinData, "Expected BinData, found: " + value); assert.eq(value.subtype(), 6, "Expected Encrypted bindata: " + value); assert(value.hex().startsWith("10"), diff --git a/jstests/fle2/libs/qe_state_collection_stats_tracker.js b/jstests/fle2/libs/qe_state_collection_stats_tracker.js new file mode 100644 index 0000000000000..004175f6130ea --- /dev/null +++ b/jstests/fle2/libs/qe_state_collection_stats_tracker.js @@ -0,0 +1,239 @@ +/** + * Class that tracks the document counts in the QE state collections for every unique + * field+value pair that exists in the encrypted data collection. + * + * NOTE: This tracker is only accurate if the encrypted fields being tracked all have + * a contention factor of 0. Also, the type of the encrypted value has to be a string. + */ +class QEStateCollectionStatsTracker { + constructor() { + /* fieldStats is a map of field names to a map of values mapped to objects + containing stats counters. For example: + { + "first" : { + "erwin" : { nonAnchors: 2, anchors: 0, nullAnchor: false, ecoc: 2, new: true}, + ... + }, + ... + } + */ + this.fieldStats = {}; + } + + /** + * Updates the stats after inserting a single encrypted document that contains the + * specified field (key) and value pair. + * Every insert of an encrypted field adds one non-anchor to the ESC and adds one + * entry in the ECOC. + * + * @param {string} key the field name + * @param {string} value the field value + */ + updateStatsPostInsert(key, value) { + if (!this.fieldStats.hasOwnProperty(key)) { + this.fieldStats[key] = {}; + } + + const field = this.fieldStats[key]; + if (field.hasOwnProperty(value)) { + field[value].nonAnchors++; + field[value].ecoc++; + } else { + field[value] = {nonAnchors: 1, anchors: 0, nullAnchor: false, ecoc: 1, new: true}; + } + } + + /** + * Updates the stats after compacting the collection where documents + * containing the specified encrypted fields exist. + * For every encrypted value that has been inserted for each field that has not been + * compacted/cleaned-up (i.e. has one or more ECOC entries), we update the stats for this + * field+value pair by adding one ESC anchor, and clearing the counts for non-anchors & ecoc. + * + * This assumes that all non-anchors & ecoc entries for this value have been deleted after + * compaction. + * + * @param {string} keys list of field names that were compacted + */ + updateStatsPostCompactForFields(...keys) { + keys.forEach(key => { + if (!this.fieldStats.hasOwnProperty(key)) { + print("Skipping field " + key + + " in updateStatsPostCompact because it is not tracked"); + return; + } + const field = this.fieldStats[key]; + Object.entries(field).forEach(([value, stats]) => { + if (stats.ecoc > 0) { + stats.anchors++; + stats.nonAnchors = 0; + stats.ecoc = 0; + } + stats.new = false; + }); + }); + } + + /** + * Updates the stats after cleanup of the encrypted collection where documents + * containing the specified encrypted fields exist. + * For every field+value pair that has been inserted but not yet compacted/cleaned-up + * (i.e. has one or more ECOC entries), we update the stats for this field+value pair + * by adding one ESC null anchor (if none exists yet), and clearing the + * counts for normal anchors, non-anchors, & ecoc. + * + * This assumes that all non-anchors and normal anchors for this value have been deleted + * from the ESC after cleanup. This also assumes all ECOC entries for this value have + * been deleted post-cleanup. + * + * @param {string} keys list of field names that were compacted + */ + updateStatsPostCleanupForFields(...keys) { + keys.forEach(key => { + if (!this.fieldStats.hasOwnProperty(key)) { + print("Skipping field " + key + + " in updateStatsPostCleanup because it is not tracked"); + return; + } + const field = this.fieldStats[key]; + Object.entries(field).forEach(([value, stats]) => { + if (stats.ecoc > 0) { + stats.nullAnchor = true; + stats.nonAnchors = 0; + stats.anchors = 0; + stats.ecoc = 0; + } + stats.new = false; + }); + }); + } + + /** + * Returns an object that contains the aggregated statistics for each + * field specified in keys. + * + * @param {string} keys list of field names that were compacted + * @returns {Object} + */ + calculateTotalStatsForFields(...keys) { + const totals = { + esc: 0, // # of ESC entries + escNonAnchors: 0, // # of ESC non-anchors + escAnchors: 0, // # of ESC anchors + escNullAnchors: 0, // # of ESC null anchors + escDeletableAnchors: 0, // # of ESC anchors that may be deleted in the next cleanup + escFutureNullAnchors: 0, // # of null anchors that may be inserted in the next cleanup + ecoc: 0, // # of ECOC entries + ecocUnique: 0, // # of ECOC entries that are unique + new: 0, // # of new values + }; + keys.forEach(key => { + if (!this.fieldStats.hasOwnProperty(key)) { + print("Skipping field " + key + " in stats aggregation because it is not tracked"); + return; + } + const field = this.fieldStats[key]; + Object.entries(field).forEach(([value, stats]) => { + totals.esc += (stats.nonAnchors + stats.anchors + (stats.nullAnchor ? 1 : 0)); + totals.escNonAnchors += stats.nonAnchors; + totals.escAnchors += stats.anchors; + totals.escNullAnchors += (stats.nullAnchor ? 1 : 0); + totals.escDeletableAnchors += ((stats.ecoc > 0) ? stats.anchors : 0); + totals.escFutureNullAnchors += ((stats.ecoc > 0 && stats.nullAnchor == 0) ? 1 : 0); + totals.ecoc += stats.ecoc; + totals.ecocUnique += ((stats.ecoc > 0) ? 1 : 0); + totals.new += (stats.new ? 1 : 0); + }); + }); + + return totals; + } + + _calculateEstimatedEmuBinaryReads(nAnchors, nNonAnchors, hasNullAnchor, escSize) { + let total = 0; + + // anchor binary hops + // + total += 1; // null anchor read for lambda + let rho = 2; + if (nAnchors > 0) { + rho = Math.pow(2, Math.floor(Math.log2(nAnchors)) + 1); + } + total += Math.log2(rho); // # reads to find rho + total += Math.log2(rho); // # reads in the binary search iterations + total += (nAnchors == 0 ? 1 : 0); // extra read if no anchors exist + + // binary hops + // + total += (nAnchors > 0 || hasNullAnchor) ? 1 : 0; // anchor read for lambda + rho = Math.max(2, escSize); + total += 1; // estimated # of reads to find final value of rho + total += Math.ceil(Math.log2(rho)); // estimated # of binary search iterations + total += (nNonAnchors == 0 ? 1 : 0); // extra read if no non-anchors exist + return total; + } + + /** + * Returns a lower-bound on how many ESC reads will be performed if a + * cleanup is performed on the current encrypted collection state. + * NOTE: call this *before* calling cleanup and before updating the tracker + * with updateStatsPostCleanupForFields. + * + * @param {string} keys list of field names that have been added to the encrypted collection + * @returns {int} + */ + calculateEstimatedESCReadCountForCleanup(...keys) { + let totals = this.calculateTotalStatsForFields(keys); + let estimate = 0; + + estimate += totals.escNonAnchors; // # of reads into in-mem delete set + + keys.forEach(key => { + if (!this.fieldStats.hasOwnProperty(key)) { + return; + } + const field = this.fieldStats[key]; + Object.entries(field).forEach(([value, stats]) => { + if (stats.ecoc == 0) { + return; // value not compacted + } + estimate += 1; // null anchor read + estimate += this._calculateEstimatedEmuBinaryReads( + stats.anchors, stats.nonAnchors, stats.nullAnchor, totals.esc); + }); + }); + return estimate; + } + + /** + * Returns a lower-bound on how many ESC reads will be performed if a + * compact is performed on the current encrypted collection state. + * NOTE: call this *before* calling compact and before updating the tracker + * with updateStatsPostCompactForFields. + * + * @param {string} keys list of field names that have been added to the encrypted collection + * @returns {int} + */ + calculateEstimatedESCReadCountForCompact(...keys) { + let totals = this.calculateTotalStatsForFields(keys); + let estimate = 0; + + estimate += totals.escNonAnchors; // # of reads into in-mem delete set + + keys.forEach(key => { + if (!this.fieldStats.hasOwnProperty(key)) { + return; + } + const field = this.fieldStats[key]; + Object.entries(field).forEach(([value, stats]) => { + if (stats.ecoc == 0) { + return; // value not compacted + } + estimate += (stats.nullAnchor ? 1 : 0); // null anchor read + estimate += this._calculateEstimatedEmuBinaryReads( + stats.anchors, stats.nonAnchors, stats.nullAnchor, totals.esc); + }); + }); + return estimate; + } +} diff --git a/jstests/fle2/modify_encrypted_collection.js b/jstests/fle2/modify_encrypted_collection.js index 7cae63cdeeed0..a31d55f4ffb93 100644 --- a/jstests/fle2/modify_encrypted_collection.js +++ b/jstests/fle2/modify_encrypted_collection.js @@ -6,11 +6,6 @@ * requires_fcv_70 * ] */ -load("jstests/fle2/libs/encrypted_client_util.js"); - -(function() { -'use strict'; - let dbTest = db.getSiblingDB('modify_encrypted_collection_db'); dbTest.basic.drop(); @@ -38,5 +33,4 @@ assert.commandFailedWithCode(dbTest.runCommand({collMod: "basic", validationLeve ErrorCodes.BadValue); assert.commandWorked( - dbTest.runCommand({collMod: "basic", validationLevel: "strict", validationAction: "error"})); -}()); + dbTest.runCommand({collMod: "basic", validationLevel: "strict", validationAction: "error"})); \ No newline at end of file diff --git a/jstests/fle2/shard_collection.js b/jstests/fle2/shard_collection.js index e6ce9da60372c..be7cbce121e47 100644 --- a/jstests/fle2/shard_collection.js +++ b/jstests/fle2/shard_collection.js @@ -5,14 +5,11 @@ * requires_fcv_70 * ] */ -load("jstests/fle2/libs/encrypted_client_util.js"); - -(function() { -'use strict'; +import {EncryptedClient} from "jstests/fle2/libs/encrypted_client_util.js"; // Passthrough workaround if (!isMongos(db)) { - return; + quit(); } let dbName = 'shard_state'; @@ -38,4 +35,3 @@ assert.commandFailedWithCode( db.adminCommand({shardCollection: 'shard_state.enxcol_.basic.ecc', key: {_id: 1}}), 6464401); assert.commandFailedWithCode( db.adminCommand({shardCollection: 'shard_state.enxcol_.basic.ecoc', key: {_id: 1}}), 6464401); -}()); diff --git a/jstests/free_mon/free_mon_announce.js b/jstests/free_mon/free_mon_announce.js index d78d0b58608d9..3c35e91cf40fa 100644 --- a/jstests/free_mon/free_mon_announce.js +++ b/jstests/free_mon/free_mon_announce.js @@ -14,20 +14,11 @@ const mongod = MongoRunner.runMongod({ assert.neq(mongod, null, 'mongod not running'); const admin = mongod.getDB('admin'); -function getConnectAnnounce() { - // Capture message as it'd be presented to a user. - clearRawMongoProgramOutput(); - const exitCode = runMongoProgram( - 'mongo', '--port', mongod.port, '--eval', "shellHelper( 'show', 'freeMonitoring' );"); - assert.eq(exitCode, 0); - return rawMongoProgramOutput(); -} - // state === 'enabled'. admin.enableFreeMonitoring(); WaitForRegistration(mongod); const reminder = "To see your monitoring data"; -assert.neq(getConnectAnnounce().search(reminder), -1, 'userReminder not found'); +assert(FreeMonGetStatus(mongod).userReminder.includes(reminder), 'userReminder not found'); // Cleanup. MongoRunner.stopMongod(mongod); diff --git a/jstests/free_mon/free_mon_register_cmd.js b/jstests/free_mon/free_mon_register_cmd.js index 295908663a591..006d022e6b2ad 100644 --- a/jstests/free_mon/free_mon_register_cmd.js +++ b/jstests/free_mon/free_mon_register_cmd.js @@ -21,9 +21,7 @@ assert.neq(null, conn, 'mongod was unable to start up'); sleep(10 * 1000); // Then verify that no registrations happened since we haven't runtime enabled yed. -assert.eq('undecided', - conn.getDB('admin').getFreeMonitoringStatus().state, - "Initial state should be 'undecided'"); +assert.eq('undecided', FreeMonGetStatus(conn).state, "Initial state should be 'undecided'"); assert.eq(0, mock_web.queryStats().registers, "mongod registered without enabling free_mod"); assert.commandWorked(conn.adminCommand({setFreeMonitoring: 1, action: "enable"})); @@ -31,8 +29,7 @@ assert.commandWorked(conn.adminCommand({setFreeMonitoring: 1, action: "enable"}) WaitForFreeMonServerStatusState(conn, 'enabled'); // The command should either timeout or suceed after registration is complete -const retStatus1 = conn.adminCommand({getFreeMonitoringStatus: 1}); -assert.commandWorked(retStatus1); +const retStatus1 = FreeMonGetStatus(conn); assert.eq(retStatus1.state, "enabled", tojson(retStatus1)); const stats = mock_web.queryStats(); @@ -64,8 +61,7 @@ assert.soon(function() { return regDoc.state == "disabled"; }, "Failed to unregister", 60 * 1000); -const retStatus2 = conn.adminCommand({getFreeMonitoringStatus: 1}); -assert.commandWorked(retStatus2); +const retStatus2 = FreeMonGetStatus(conn); assert.eq(retStatus2.state, "disabled", tojson(retStatus1)); MongoRunner.stopMongod(conn); diff --git a/jstests/free_mon/libs/free_mon.js b/jstests/free_mon/libs/free_mon.js index 269f1e6530695..351f1a33e488f 100644 --- a/jstests/free_mon/libs/free_mon.js +++ b/jstests/free_mon/libs/free_mon.js @@ -288,7 +288,12 @@ function FreeMonGetStatus(conn) { 'use strict'; const admin = conn.getDB("admin"); - return assert.commandWorked(admin.runCommand({getFreeMonitoringStatus: 1})); + const reply = assert.commandWorked(admin.runCommand({getFreeMonitoringStatus: 1})); + // FreeMonitoring has been deprecated and reports 'disabled' regardless of status. + assert.eq(reply.state, 'disabled', 'FreeMonitoring has been deprecated'); + + // Use the "true" state tucked into the 'debug' field if its available. + return reply.debug || reply; } /** diff --git a/jstests/hooks/run_aggregate_metrics_background.js b/jstests/hooks/run_aggregate_metrics_background.js deleted file mode 100644 index 32335852fcd61..0000000000000 --- a/jstests/hooks/run_aggregate_metrics_background.js +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Runs the $operationMetrics stage and ensures that all the expected fields are present. - */ - -'use strict'; - -(function() { -load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology. - -if (typeof db === 'undefined') { - throw new Error( - "Expected mongo shell to be connected a server, but global 'db' object isn't defined"); -} - -// Disable implicit sessions so FSM workloads that kill random sessions won't interrupt the -// operations in this test that aren't resilient to interruptions. -TestData.disableImplicitSessions = true; - -const topology = DiscoverTopology.findConnectedNodes(db.getMongo()); - -const aggregateMetricsBackground = function(host) { - function verifyFields(doc) { - const kTopLevelFields = [ - "docBytesWritten", - "docUnitsWritten", - "idxEntryBytesWritten", - "idxEntryUnitsWritten", - "totalUnitsWritten", - "cpuNanos", - "db", - "primaryMetrics", - "secondaryMetrics" - ]; - const kReadFields = [ - "docBytesRead", - "docUnitsRead", - "idxEntryBytesRead", - "idxEntryUnitsRead", - "keysSorted", - "docUnitsReturned" - ]; - - for (let key of kTopLevelFields) { - assert(doc.hasOwnProperty(key), "The metrics output is missing the property: " + key); - } - let primaryMetrics = doc.primaryMetrics; - for (let key of kReadFields) { - assert(primaryMetrics.hasOwnProperty(key), - "The metrics output is missing the property: primaryMetrics." + key); - } - let secondaryMetrics = doc.secondaryMetrics; - for (let key of kReadFields) { - assert(secondaryMetrics.hasOwnProperty(key), - "The metrics output is missing the property: secondaryMetrics." + key); - } - } - - let conn = new Mongo(host); - conn.setSecondaryOk(); - - assert.neq( - null, conn, "Failed to connect to host '" + host + "' for background metrics collection"); - - // Filter out arbiters. - if (conn.adminCommand({isMaster: 1}).arbiterOnly) { - print("Skipping background aggregation against test node: " + host + - " because it is an arbiter and has no data."); - return; - } - - let db = conn.getDB("admin"); - let clearMetrics = Math.random() < 0.9 ? false : true; - print("Running $operationMetrics with {clearMetrics: " + clearMetrics + "} on host: " + host); - const cursor = db.aggregate([{$operationMetrics: {clearMetrics: clearMetrics}}]); - while (cursor.hasNext()) { - let doc = cursor.next(); - try { - verifyFields(doc); - } catch (e) { - print("caught exception while verifying that all expected fields are in the metrics " + - "output: " + tojson(doc)); - throw (e); - } - } -}; - -// This file is run continuously and is very fast so we want to impose some kind of rate limiting -// which is why we sleep for 1 second here. This sleep is here rather than in -// aggregate_metrics_background.py because the background job that file uses is designed to be run -// continuously so it is easier and cleaner to just sleep here. -sleep(1000); -if (topology.type === Topology.kStandalone) { - try { - aggregateMetricsBackground(topology.mongod); - } catch (e) { - print("background aggregate metrics against the standalone failed"); - throw e; - } -} else if (topology.type === Topology.kReplicaSet) { - for (let replicaMember of topology.nodes) { - try { - aggregateMetricsBackground(replicaMember); - } catch (e) { - print("background aggregate metrics was not successful against all replica set " + - "members"); - throw e; - } - } -} else { - throw new Error("Unsupported topology configuration: " + tojson(topology)); -} -})(); diff --git a/jstests/hooks/run_analyze_shard_key_background.js b/jstests/hooks/run_analyze_shard_key_background.js index 625eb6bc59e6d..96f361b45d28f 100644 --- a/jstests/hooks/run_analyze_shard_key_background.js +++ b/jstests/hooks/run_analyze_shard_key_background.js @@ -153,9 +153,15 @@ function getLatestSampleQueryDocument() { * of the resulting metrics. */ function analyzeShardKey(ns, shardKey, indexKey) { - jsTest.log(`Analyzing shard keys ${tojsononeline({ns, shardKey, indexKey})}`); - - const res = conn.adminCommand({analyzeShardKey: ns, key: shardKey}); + const cmdObj = {analyzeShardKey: ns, key: shardKey}; + const rand = Math.random(); + if (rand < 0.25) { + cmdObj.sampleRate = Math.random() * 0.5 + 0.5; + } else if (rand < 0.5) { + cmdObj.sampleSize = NumberLong(AnalyzeShardKeyUtil.getRandInteger(1000, 10000)); + } + jsTest.log(`Analyzing shard keys ${tojsononeline({shardKey, indexKey, cmdObj})}`); + const res = conn.adminCommand(cmdObj); if (res.code == ErrorCodes.BadValue || res.code == ErrorCodes.IllegalOperation || res.code == ErrorCodes.NamespaceNotFound || @@ -204,15 +210,37 @@ function analyzeShardKey(ns, shardKey, indexKey) { tojsononeline(res)}`); return res; } + if (res.code == 7559401) { + print(`Failed to analyze the shard key because one of the shards fetched the split ` + + `point documents after the TTL deletions had started. ${tojsononeline(res)}`); + return res; + } + if (res.code == 7588600) { + print(`Failed to analyze the shard key because the document for one of the most common ` + + `shard key values got deleted while the command was running. ${tojsononeline(res)}`); + return res; + } + if (res.code == 7826501) { + print(`Failed to analyze the shard key because $collStats indicates that the collection ` + + `is empty. ${tojsononeline(res)}`); + return res; + } + if (res.code == 7826505) { + print(`Failed to analyze the shard key because the collection becomes empty during the ` + + `step for calculating the monotonicity metrics. ${tojsononeline(res)}`); + return res; + } + if (res.code == 7826506 || res.code == 7826507) { + print(`Failed to analyze the shard key because the collection becomes empty during the ` + + `step for calculating the cardinality and frequency metrics. ${tojsononeline(res)}`); + return res; + } assert.commandWorked(res); jsTest.log(`Finished analyzing the shard key: ${tojsononeline(res)}`); - // The response should only contain the "numDocs" field if it also contains the fields about the - // characteristics of the shard key (e.g. "numDistinctValues" and "mostCommonValues") since the - // number of documents is just a supporting metric for those metrics. - if (res.hasOwnProperty("numDocs")) { - AnalyzeShardKeyUtil.assertContainKeyCharacteristicsMetrics(res); + if (res.hasOwnProperty("keyCharacteristics")) { + AnalyzeShardKeyUtil.validateKeyCharacteristicsMetrics(res.keyCharacteristics); } else { AnalyzeShardKeyUtil.assertNotContainKeyCharacteristicsMetrics(res); } diff --git a/jstests/hooks/run_check_metadata_consistency.js b/jstests/hooks/run_check_metadata_consistency.js index 50ca85f3f9a54..80e410d335d49 100644 --- a/jstests/hooks/run_check_metadata_consistency.js +++ b/jstests/hooks/run_check_metadata_consistency.js @@ -1,8 +1,5 @@ -'use strict'; - -(function() { -load('jstests/libs/check_metadata_consistency_helpers.js'); // For MetadataConsistencyChecker. -load('jstests/libs/fixture_helpers.js'); // For FixtureHelpers. +import {MetadataConsistencyChecker} from "jstests/libs/check_metadata_consistency_helpers.js"; +load('jstests/libs/fixture_helpers.js'); // For FixtureHelpers. assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a server?'); @@ -16,7 +13,7 @@ assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a jsTest.log( `Aborted metadata consistency check due to retriable error during topology discovery: ${ e}`); - return; + quit(); } else { throw e; } @@ -26,4 +23,3 @@ assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a const mongos = db.getMongo(); MetadataConsistencyChecker.run(mongos); -})(); diff --git a/jstests/hooks/run_check_routing_table_consistency.js b/jstests/hooks/run_check_routing_table_consistency.js index 305ac9550bc28..bfcbf4f12ffb5 100644 --- a/jstests/hooks/run_check_routing_table_consistency.js +++ b/jstests/hooks/run_check_routing_table_consistency.js @@ -1,7 +1,6 @@ -'use strict'; - -(function() { -load('jstests/libs/check_routing_table_consistency_helpers.js'); // For check implementation. +import { + RoutingTableConsistencyChecker +} from "jstests/libs/check_routing_table_consistency_helpers.js"; load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology. assert.neq(typeof db, 'undefined', 'No `db` object, is the shell connected to a server?'); @@ -15,4 +14,3 @@ if (topology.type !== Topology.kShardedCluster) { tojson(topology)); } RoutingTableConsistencyChecker.run(db.getMongo()); -})(); diff --git a/jstests/hooks/run_fcv_upgrade_downgrade_background.js b/jstests/hooks/run_fcv_upgrade_downgrade_background.js new file mode 100644 index 0000000000000..f1c5e02c8ecef --- /dev/null +++ b/jstests/hooks/run_fcv_upgrade_downgrade_background.js @@ -0,0 +1,92 @@ +/** + * Runs dbCheck in background. + * + * may need more checks, see: jstests/concurrency/fsm_workloads/drop_database_sharded_setFCV.js + */ +'use strict'; + +(function() { +load('jstests/libs/discover_topology.js'); // For Topology and DiscoverTopology. +load('jstests/libs/parallelTester.js'); // For Thread. + +if (typeof db === 'undefined') { + throw new Error( + "Expected mongo shell to be connected a server, but global 'db' object isn't defined"); +} + +// Disable implicit sessions so FSM workloads that kill random sessions won't interrupt the +// operations in this test that aren't resilient to interruptions. +TestData.disableImplicitSessions = true; + +const conn = db.getMongo(); + +const sendFCVUpDown = function(ver) { + try { + print("Running adminCommand({setFeatureCompatibilityVersion: " + ver + "}"); + const res = conn.adminCommand({setFeatureCompatibilityVersion: ver}); + assert.commandWorked(res); + } catch (e) { + if (e.code === 332) { + // Cannot downgrade the cluster as collection xxx has 'encryptedFields' with range + // indexes. + jsTestLog('setFCV: Can not downgrade'); + return; + } + if (e.code === 5147403) { + // Invalid fcv transition (e.g lastContinuous -> lastLTS). + jsTestLog('setFCV: Invalid transition'); + return; + } + if (e.code === 7428200) { + // Cannot upgrade FCV if a previous FCV downgrade stopped in the middle of cleaning + // up internal server metadata. + assertAlways.eq(latestFCV, targetFCV); + jsTestLog( + 'setFCV: Cannot upgrade FCV if a previous FCV downgrade stopped in the middle \ + of cleaning up internal server metadata'); + return; + } + throw e; + } +}; + +Random.setRandomSeed(); +let maxSleep = 5000; // 5 sec. +let currSleep = 10; // Start at 10ms. + +// Get time interval to sleep in ms. +// Value returned will be between currSleep and 2 * currSleep. +// Also increase currSleep in order to sleep for longer and longer intervals. +// This type of exponential backoff ensures that we run (several times) for short tests, +// but dont cause long tests to time out. +const getRandTimeIncInterval = function() { + let ret = Random.randInt(currSleep) + currSleep; + currSleep *= 4; + return ret; +}; + +// Only go throug the loop a few times sleeping (by an increasing duration) between sendFCV +// commands. This is so even short duration tests can experience a few FCV changes, but for long +// running tests to not time out (which can happen if sleep duration was a fixed small duration). +while (currSleep <= maxSleep) { + // downgrade FCV + sleep(getRandTimeIncInterval()); + sendFCVUpDown(lastLTSFCV); + + // upgrade FCV + sleep(getRandTimeIncInterval()); + sendFCVUpDown(latestFCV); + // At this point FCV is back to latestFCV. + + if (lastLTSFCV !== lastContinuousFCV) { + // downgrade FCV + sleep(getRandTimeIncInterval()); + sendFCVUpDown(lastContinuousFCV); + + // upgrade FCV + sleep(getRandTimeIncInterval()); + sendFCVUpDown(latestFCV); + // At this point FCV is back to latestFCV. + } +} +})(); diff --git a/jstests/hooks/validate_collections.js b/jstests/hooks/validate_collections.js index 1ef1a9a761aea..70459c5914106 100644 --- a/jstests/hooks/validate_collections.js +++ b/jstests/hooks/validate_collections.js @@ -70,6 +70,13 @@ function CollectionValidator() { print('Skipping collection validation for ' + coll.getFullName() + ' since collection was not found'); continue; + } else if (res.codeName === "CommandNotSupportedOnView") { + // Even though we pass a filter to getCollectionInfos() to only fetch + // collections, nothing is preventing the collection from being dropped and + // recreated as a view. + print('Skipping collection validation for ' + coll.getFullName() + + ' as it is a view'); + continue; } const host = db.getMongo().host; print('Collection validation failed on host ' + host + diff --git a/jstests/libs/analyze_plan.js b/jstests/libs/analyze_plan.js index 308119d5262d5..4fc8cb5cc65af 100644 --- a/jstests/libs/analyze_plan.js +++ b/jstests/libs/analyze_plan.js @@ -7,7 +7,7 @@ load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. /** * Returns a sub-element of the 'queryPlanner' explain output which represents a winning plan. */ -function getWinningPlan(queryPlanner) { +export function getWinningPlan(queryPlanner) { // The 'queryPlan' format is used when the SBE engine is turned on. If this field is present, // it will hold a serialized winning plan, otherwise it will be stored in the 'winningPlan' // field itself. @@ -18,7 +18,7 @@ function getWinningPlan(queryPlanner) { /** * Returns an element of explain output which represents a rejected candidate plan. */ -function getRejectedPlan(rejectedPlan) { +export function getRejectedPlan(rejectedPlan) { // The 'queryPlan' format is used when the SBE engine is turned on. If this field is present, // it will hold a serialized winning plan, otherwise it will be stored in the 'rejectedPlan' // element itself. @@ -28,7 +28,7 @@ function getRejectedPlan(rejectedPlan) { /** * Returns a sub-element of the 'cachedPlan' explain output which represents a query plan. */ -function getCachedPlan(cachedPlan) { +export function getCachedPlan(cachedPlan) { // The 'queryPlan' format is used when the SBE engine is turned on. If this field is present, it // will hold a serialized cached plan, otherwise it will be stored in the 'cachedPlan' field // itself. @@ -40,7 +40,7 @@ function getCachedPlan(cachedPlan) { * subdocuments whose stage is 'stage'. Returns an empty array if the plan does not have the * requested stage. if 'stage' is 'null' returns all the stages in 'root'. */ -function getPlanStages(root, stage) { +export function getPlanStages(root, stage) { var results = []; if (root.stage === stage || stage === undefined) { @@ -99,7 +99,7 @@ function getPlanStages(root, stage) { * Given the root stage of explain's JSON representation of a query plan ('root'), returns a list of * all the stages in 'root'. */ -function getAllPlanStages(root) { +export function getAllPlanStages(root) { return getPlanStages(root); } @@ -108,7 +108,7 @@ function getAllPlanStages(root) { * subdocument with its stage as 'stage'. Returns null if the plan does not have such a stage. * Asserts that no more than one stage is a match. */ -function getPlanStage(root, stage) { +export function getPlanStage(root, stage) { var planStageList = getPlanStages(root, stage); if (planStageList.length === 0) { @@ -124,7 +124,7 @@ function getPlanStage(root, stage) { /** * Returns the set of rejected plans from the given replset or sharded explain output. */ -function getRejectedPlans(root) { +export function getRejectedPlans(root) { if (root.queryPlanner.winningPlan.hasOwnProperty("shards")) { const rejectedPlans = []; for (let shard of root.queryPlanner.winningPlan.shards) { @@ -141,7 +141,7 @@ function getRejectedPlans(root) { * Given the root stage of explain's JSON representation of a query plan ('root'), returns true if * the query planner reports at least one rejected alternative plan, and false otherwise. */ -function hasRejectedPlans(root) { +export function hasRejectedPlans(root) { function sectionHasRejectedPlans(explainSection) { assert(explainSection.hasOwnProperty("rejectedPlans"), tojson(explainSection)); return explainSection.rejectedPlans.length !== 0; @@ -184,7 +184,7 @@ function hasRejectedPlans(root) { /** * Returns an array of execution stages from the given replset or sharded explain output. */ -function getExecutionStages(root) { +export function getExecutionStages(root) { if (root.hasOwnProperty("executionStats") && root.executionStats.executionStages.hasOwnProperty("shards")) { const executionStages = []; @@ -216,7 +216,7 @@ function getExecutionStages(root) { * Returns an empty array if the plan does not have the requested stage. Asserts that agg explain * structure matches expected format. */ -function getAggPlanStages(root, stage, useQueryPlannerSection = false) { +export function getAggPlanStages(root, stage, useQueryPlannerSection = false) { let results = []; function getDocumentSources(docSourceArray) { @@ -309,7 +309,7 @@ function getAggPlanStages(root, stage, useQueryPlannerSection = false) { * If 'useQueryPlannerSection' is set to 'true', the 'queryPlanner' section of the explain output * will be used to lookup the given 'stage', even if 'executionStats' section is available. */ -function getAggPlanStage(root, stage, useQueryPlannerSection = false) { +export function getAggPlanStage(root, stage, useQueryPlannerSection = false) { let planStageList = getAggPlanStages(root, stage, useQueryPlannerSection); if (planStageList.length === 0) { @@ -329,7 +329,7 @@ function getAggPlanStage(root, stage, useQueryPlannerSection = false) { * explain plans, and it can search for a query planner stage like "FETCH" or an agg stage like * "$group." */ -function aggPlanHasStage(root, stage) { +export function aggPlanHasStage(root, stage) { return getAggPlanStages(root, stage).length > 0; } @@ -340,7 +340,7 @@ function aggPlanHasStage(root, stage) { * Expects that the stage appears once or zero times per node. If the stage appears more than once * on one node's query plan, an error will be thrown. */ -function planHasStage(db, root, stage) { +export function planHasStage(db, root, stage) { const matchingStages = getPlanStages(root, stage); // If we are executing against a mongos, we may get more than one occurrence of the stage. @@ -360,7 +360,7 @@ function planHasStage(db, root, stage) { * Given the root stage of explain's BSON representation of a query plan ('root'), * returns true if the plan is index only. Otherwise returns false. */ -function isIndexOnly(db, root) { +export function isIndexOnly(db, root) { return !planHasStage(db, root, "FETCH") && !planHasStage(db, root, "COLLSCAN"); } @@ -368,7 +368,7 @@ function isIndexOnly(db, root) { * Returns true if the BSON representation of a plan rooted at 'root' is using * an index scan, and false otherwise. */ -function isIxscan(db, root) { +export function isIxscan(db, root) { return planHasStage(db, root, "IXSCAN"); } @@ -376,7 +376,7 @@ function isIxscan(db, root) { * Returns true if the BSON representation of a plan rooted at 'root' is using * the idhack fast path, and false otherwise. */ -function isIdhack(db, root) { +export function isIdhack(db, root) { return planHasStage(db, root, "IDHACK"); } @@ -384,11 +384,11 @@ function isIdhack(db, root) { * Returns true if the BSON representation of a plan rooted at 'root' is using * a collection scan, and false otherwise. */ -function isCollscan(db, root) { +export function isCollscan(db, root) { return planHasStage(db, root, "COLLSCAN"); } -function isClusteredIxscan(db, root) { +export function isClusteredIxscan(db, root) { return planHasStage(db, root, "CLUSTERED_IXSCAN"); } @@ -396,7 +396,7 @@ function isClusteredIxscan(db, root) { * Returns true if the BSON representation of a plan rooted at 'root' is using the aggregation * framework, and false otherwise. */ -function isAggregationPlan(root) { +export function isAggregationPlan(root) { if (root.hasOwnProperty("shards")) { const shards = Object.keys(root.shards); return shards.reduce( @@ -410,7 +410,7 @@ function isAggregationPlan(root) { * Returns true if the BSON representation of a plan rooted at 'root' is using just the query layer, * and false otherwise. */ -function isQueryPlan(root) { +export function isQueryPlan(root) { if (root.hasOwnProperty("shards")) { const shards = Object.keys(root.shards); return shards.reduce( @@ -424,7 +424,7 @@ function isQueryPlan(root) { * Get the "chunk skips" for a single shard. Here, "chunk skips" refer to documents excluded by the * shard filter. */ -function getChunkSkipsFromShard(shardPlan, shardExecutionStages) { +export function getChunkSkipsFromShard(shardPlan, shardExecutionStages) { const shardFilterPlanStage = getPlanStage(getWinningPlan(shardPlan), "SHARDING_FILTER"); if (!shardFilterPlanStage) { return 0; @@ -452,7 +452,7 @@ function getChunkSkipsFromShard(shardPlan, shardExecutionStages) { * Get the sum of "chunk skips" from all shards. Here, "chunk skips" refer to documents excluded by * the shard filter. */ -function getChunkSkipsFromAllShards(explainResult) { +export function getChunkSkipsFromAllShards(explainResult) { const shardPlanArray = explainResult.queryPlanner.winningPlan.shards; const shardExecutionStagesArray = explainResult.executionStats.executionStages.shards; assert.eq(shardPlanArray.length, shardExecutionStagesArray.length, explainResult); @@ -468,7 +468,7 @@ function getChunkSkipsFromAllShards(explainResult) { * Given explain output at executionStats level verbosity, confirms that the root stage is COUNT or * RECORD_STORE_FAST_COUNT and that the result of the count is equal to 'expectedCount'. */ -function assertExplainCount({explainResults, expectedCount}) { +export function assertExplainCount({explainResults, expectedCount}) { const execStages = explainResults.executionStats.executionStages; // If passed through mongos, then the root stage should be the mongos SINGLE_SHARD stage or @@ -500,7 +500,7 @@ function assertExplainCount({explainResults, expectedCount}) { /** * Verifies that a given query uses an index and is covered when used in a count command. */ -function assertCoveredQueryAndCount({collection, query, project, count}) { +export function assertCoveredQueryAndCount({collection, query, project, count}) { let explain = collection.find(query, project).explain(); assert(isIndexOnly(db, getWinningPlan(explain.queryPlanner)), "Winning plan was not covered: " + tojson(explain.queryPlanner.winningPlan)); @@ -517,7 +517,7 @@ function assertCoveredQueryAndCount({collection, query, project, count}) { * present exactly once in the plan returned. When 'stagesNotExpected' array is passed, also * verifies that none of those stages are present in the explain() plan. */ -function assertStagesForExplainOfCommand({coll, cmdObj, expectedStages, stagesNotExpected}) { +export function assertStagesForExplainOfCommand({coll, cmdObj, expectedStages, stagesNotExpected}) { const plan = assert.commandWorked(coll.runCommand({explain: cmdObj})); const winningPlan = getWinningPlan(plan.queryPlanner); for (let expectedStage of expectedStages) { @@ -534,7 +534,7 @@ function assertStagesForExplainOfCommand({coll, cmdObj, expectedStages, stagesNo /** * Utility to obtain a value from 'explainRes' using 'getValueCallback'. */ -function getFieldValueFromExplain(explainRes, getValueCallback) { +export function getFieldValueFromExplain(explainRes, getValueCallback) { assert(explainRes.hasOwnProperty("queryPlanner"), explainRes); const plannerOutput = explainRes.queryPlanner; const fieldValue = getValueCallback(plannerOutput); @@ -545,7 +545,7 @@ function getFieldValueFromExplain(explainRes, getValueCallback) { /** * Get the 'planCacheKey' from 'explainRes'. */ -function getPlanCacheKeyFromExplain(explainRes, db) { +export function getPlanCacheKeyFromExplain(explainRes, db) { return getFieldValueFromExplain(explainRes, function(plannerOutput) { return FixtureHelpers.isMongos(db) && plannerOutput.hasOwnProperty("winningPlan") && plannerOutput.winningPlan.hasOwnProperty("shards") @@ -557,7 +557,7 @@ function getPlanCacheKeyFromExplain(explainRes, db) { /** * Get the 'queryHash' from 'explainRes'. */ -function getQueryHashFromExplain(explainRes, db) { +export function getQueryHashFromExplain(explainRes, db) { return getFieldValueFromExplain(explainRes, function(plannerOutput) { return FixtureHelpers.isMongos(db) ? plannerOutput.winningPlan.shards[0].queryHash : plannerOutput.queryHash; @@ -568,7 +568,7 @@ function getQueryHashFromExplain(explainRes, db) { * Helper to run a explain on the given query shape and get the "planCacheKey" from the explain * result. */ -function getPlanCacheKeyFromShape({ +export function getPlanCacheKeyFromShape({ query = {}, projection = {}, sort = {}, @@ -588,7 +588,7 @@ function getPlanCacheKeyFromShape({ * Helper to run a explain on the given pipeline and get the "planCacheKey" from the explain * result. */ -function getPlanCacheKeyFromPipeline(pipeline, collection, db) { +export function getPlanCacheKeyFromPipeline(pipeline, collection, db) { const explainRes = assert.commandWorked(collection.explain().aggregate(pipeline)); return getPlanCacheKeyFromExplain(explainRes, db); @@ -597,7 +597,7 @@ function getPlanCacheKeyFromPipeline(pipeline, collection, db) { /** * Given the winning query plan, flatten query plan tree into a list of plan stage names. */ -function flattenQueryPlanTree(winningPlan) { +export function flattenQueryPlanTree(winningPlan) { let stages = []; while (winningPlan) { stages.push(winningPlan.stage); @@ -606,3 +606,37 @@ function flattenQueryPlanTree(winningPlan) { stages.reverse(); return stages; } + +/** + * Assert that a command plan has no FETCH stage or if the stage is present, it has no filter. + */ +export function assertNoFetchFilter({coll, cmdObj}) { + const plan = assert.commandWorked(coll.runCommand({explain: cmdObj})); + const winningPlan = getWinningPlan(plan.queryPlanner); + const fetch = getPlanStage(winningPlan, "FETCH"); + assert((fetch === null || !fetch.hasOwnProperty("filter")), + "Unexpected fetch: " + tojson(fetch)); + return winningPlan; +} + +/** + * Assert that a find plan has a FETCH stage with expected filter and returns a specified number of + * results. + */ +export function assertFetchFilter({coll, predicate, expectedFilter, nReturned}) { + const exp = coll.find(predicate).explain("executionStats"); + const plan = getWinningPlan(exp.queryPlanner); + const fetch = getPlanStage(plan, "FETCH"); + assert(fetch !== null, "Missing FETCH stage " + plan); + assert(fetch.hasOwnProperty("filter"), + "Expected filter in the fetch stage, got " + tojson(fetch)); + assert.eq(expectedFilter, + fetch.filter, + "Expected filter " + tojson(expectedFilter) + " got " + tojson(fetch.filter)); + + if (nReturned !== null) { + assert.eq(exp.executionStats.nReturned, + nReturned, + "Expected " + nReturned + " documents, got " + exp.executionStats.nReturned); + } +} diff --git a/jstests/libs/auto_retry_transaction_in_sharding.js b/jstests/libs/auto_retry_transaction_in_sharding.js index f81a366970a2d..7ccac0b7d5a66 100644 --- a/jstests/libs/auto_retry_transaction_in_sharding.js +++ b/jstests/libs/auto_retry_transaction_in_sharding.js @@ -94,4 +94,4 @@ var { retryOnceOnTransientOnMongos, retryOnceOnTransientAndRestartTxnOnMongos }; -})(); \ No newline at end of file +})(); diff --git a/jstests/libs/badSAN.pem b/jstests/libs/badSAN.pem index b36e5a4c72080..9e60d23d52162 100644 --- a/jstests/libs/badSAN.pem +++ b/jstests/libs/badSAN.pem @@ -3,53 +3,53 @@ # # Certificate with an otherwise permissible CommonName, but with an unmatchable SubjectAlternateName. -----BEGIN CERTIFICATE----- -MIIDwjCCAqqgAwIBAgIEbr2RhTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDwjCCAqqgAwIBAgIEJiElLDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjBvMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM2WhcNMjUwOTEwMTQyODM2WjBvMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDESMBAGA1UEAwwJ -MTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA3q91q4t8 -/v442v1IaL20H8mcxaNh2jE1X1+mPz9La1tKsjDN3BwIOt2PamMDUdJhT9YL0MDF -h+gUaPcAvJA8KMMGwnzrzbCSI2SkWyQ4+QKyDeKnNZ1sn/EmZNruHZ+meiXu4EtE -t0HrFRIgEYFZGj6iMhkQUfDan6Aed1eBgTrjHr61pTywCw5SJaDlI78uuZQvs7KB -UpsEHJDdvyZvK8TxmllL+mLqcP1D6aLmlNwC6Pnzc8wWrSuqI9v+YP1YsGBEyi/l -a/+q7QOOWcaruRXqH/nj4KkRixGBpRQy80OQse68pmtLA4FAJateFKtXYD6Vh/g4 -jI9FE1PmAni/vQIDAQABo2EwXzAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV -HSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQUBcxltaZPDRkxtSGwZvfNtbMfOMsw -EQYDVR0RBAowCIIGYmFkU0FOMA0GCSqGSIb3DQEBCwUAA4IBAQATShkmAdj1+FkA -BdACAz23f+V/ihcBHtCFpfi4SgS+vzmzS/BbP6pXD+DS78cpLfCtS0c4xJFMgyPo -82/Ii0CsK7Jm5jT/ZWg02CW6ecaSVmAhzlVybQpjQ6qrXP1zoqJeyyOhTl1Sfcbu -WFOkDh3d0SEuCctDne5pUGIHjZ3YDKiOmLPYNDUDKB3DX5P6t5PnqtPHpYPE3zG2 -6X96xs3OqpER0vUWHKSe1ofnVr9YmDt7mOYbOaa6MU1WUPedZ5BR0mOVwsxx7z5i -QdVARPyZqSd0BCIub9cwTo9cqPPBzSt/MBd5brMR7NwdWhIAHIhYGvT+437ZXjBq -RMvCdM5m +MTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAyxUzmqbz +gz667HetJB/MU8n3mCouCBfBLGJBTjkA7Z+iu//fTDU4t9BSqg+CC8viwuWHxmW3 +Iv0m6XYKLtYCJLl2tDdSkpn/3N/OdfybN4KncCnUVqQ2kK5IR4gvWMe3w2wqt1mL +Hxe8SFubXwbLv4soIpHtL7tx+hVOunKE0ANBZO3jijxJF7xz1QA1W/mkCAnrapva +JeZHmHcltIketndVNYziOKIimeiuQ9tZeW1OoH4galkdOAnd3CdEPHh4ZLDePu4n +6kor92Siav3PK073CmxbUl0H0PEP7d4HwiR/M/eq+Eqh9ykRVAwr3vJhLKuVcgen +t0ZrlutxCVmqSwIDAQABo2EwXzAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV +HSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQUgVc7HomlduDjgc2G95sshVuWTE4w +EQYDVR0RBAowCIIGYmFkU0FOMA0GCSqGSIb3DQEBCwUAA4IBAQCV8Wacy4CjWV1S +VyfeZ7OeNw/ggF6vaZrJbYeI69GtHU38wnklviKeG5zkBVv6MPrw8bkqM28Jtyjg ++gXc0y0Po+m6+56mnfJAEUwtZbKQ/IXSPh0NcETO98Dk4rJyhk+/PQYtUqL7lJ/T +7zaQG8MgyiILkxQqRDqAcGu3nF4UBujFBNKqF3p2yC42pL9TYrgbmuUiWN7Qpyk0 +3ZqJivlUHBOX3D5K8xf+RUwHUquVFOZfX+U68yA/mZ3fsFNBhzhZowVgcS3NMFoT +xtnZRku+KHoodwVwyyIeTuFlpzekRDafFoQsW06rXo+I2Wl7pLfA5YAAbH53eZCX +8YbCofVu -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDer3Wri3z+/jja -/UhovbQfyZzFo2HaMTVfX6Y/P0trW0qyMM3cHAg63Y9qYwNR0mFP1gvQwMWH6BRo -9wC8kDwowwbCfOvNsJIjZKRbJDj5ArIN4qc1nWyf8SZk2u4dn6Z6Je7gS0S3QesV -EiARgVkaPqIyGRBR8NqfoB53V4GBOuMevrWlPLALDlIloOUjvy65lC+zsoFSmwQc -kN2/Jm8rxPGaWUv6Yupw/UPpouaU3ALo+fNzzBatK6oj2/5g/ViwYETKL+Vr/6rt -A45Zxqu5Feof+ePgqRGLEYGlFDLzQ5Cx7ryma0sDgUAlq14Uq1dgPpWH+DiMj0UT -U+YCeL+9AgMBAAECggEAKt0tUkr0UYUP3p9Ye8jgTioZwjVT1ZMZSfV4Y75EPzMA -atJYHeFLeEFDpN/QOMS9NIfoGFOy8hkrLJU3EBTXb6UyvOcc4Kf3SVbLCDwtt0jc -iy2wr/JQgW0CzfESWqNrgSGiHIyAeqH2pUSq5ZO3WgTqZahLaupouscwdhpi9EPM -fhPmuiFnJ+W8NA9DWRBiEZh+DyYFBad4+Dyk/R0OAAh0m5kgkFEVyOmE0t4APwiw -55ePv7bAR1QDbL+Q5bKV3vwr+gsylNjwZexYxAJQJZD2PyLjsftUH7E1W9RYM0Om -duSVEoXJZFCWtosx07B4pQHb6ryqGUS76ax0J9ZXIQKBgQD7t1fuJtKgj9Nagcij -J2Hz9ws7NZx/PLew1Br8a8j96jYHNfJvEG6gDVYoVW/qKO/UJIvdj557WSnVPDEJ -4h9Bdc3MV0IhiLpi4m63pHUKwEdiCooUcJm2OtQSVd3FXnNSJpjQrN5jg4VXe1kg -Bjok0G5YqEMOfd0uZcWrW9uApQKBgQDieaMdfwU8z32G/v4oSa/m2PfN11BjdZn5 -AQkvRvHTIP+hwunAX1uu0iTE6y4IO0fjrSTmpJbOsFslwiAYXmikUQk1F1I+s7FM -NEoQb3zueXbOs9K+WnepgMuMOgMbDm8qqoaGYmikU7mHZZb34nC0JSRA66DaoXsC -RmCDAsa/OQKBgQD2JKVRdplE9R/CQ2NFV/+LJ6rN7XDpYyBlRCRXbbcPxPWsO26k -eLcUv7Xenx1fJ0TOeCZlNEnPaLNllwSFG59gzae/CBzc2e0ZQT7vSVxCdR/YmWHI -9wr4jbJPc7P3ipLOZHvOoxycx0Ge+DmA/VXiJgehnnhkNWQSOOcA2ERfGQKBgQC9 -W46yvt2WNrZyBQpkjRfyID8xcHHadx3CQMd5LAxNxy12Bw6CtjhmN3Ze33ShlU9K -Yh6UadFeB75uF53WQjmkCc11AobVvlDjsmSq0UzX598afOgcGHAs3W9TU739BViV -h/bqraVooEhjmOFdaYtqVBO35EueAZ5kDIvtfojGoQKBgED/n9nPsVLjuDvgNyux -8fYlfGnIeQSoOnasOvsWh+hsw5MIAaFknUZ27rYB6BblJLrLAbUCVEeOZsyWtJWm -Kp3IlXPUz7cYt0aCFLcDhCSQevfCDzJ+8dexAMzCkbrdcD5Wl3xWjRmsgw67YtzS -dy3/1MEdNReJZZoJv80le9Ty +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDLFTOapvODPrrs +d60kH8xTyfeYKi4IF8EsYkFOOQDtn6K7/99MNTi30FKqD4ILy+LC5YfGZbci/Sbp +dgou1gIkuXa0N1KSmf/c3851/Js3gqdwKdRWpDaQrkhHiC9Yx7fDbCq3WYsfF7xI +W5tfBsu/iygike0vu3H6FU66coTQA0Fk7eOKPEkXvHPVADVb+aQICetqm9ol5keY +dyW0iR62d1U1jOI4oiKZ6K5D21l5bU6gfiBqWR04Cd3cJ0Q8eHhksN4+7ifqSiv3 +ZKJq/c8rTvcKbFtSXQfQ8Q/t3gfCJH8z96r4SqH3KRFUDCve8mEsq5VyB6e3RmuW +63EJWapLAgMBAAECggEARWcYfKjDnb3Jadi4bhkD3jquh+0aUB+2H7HweYgXnh2p +IrCeIlAp8v+Z0I5+NgPWmh6RGSdXv/hd3Pk/H6R5lQ1g2NPzPzCr9VLzHvVWIlFf +YYCaFYYHM8ir4O5Up/WRApMrwf8zAUw+R0gvP/l1lh87dx0BvwdDO2Eex68tXlyy +NDVaQIcPTWIh2HDMBQqEoINbzrLrl+RqJ3IY7APDIzCj9DQ3/T6jkIFnye7GLYem ++ucqdw0UH8EEda//t94MeVL+G3HARH1t5JTr4KSyIEGwF+6cwYMlIE594IbZrF1l +vyD4WoT0ebrSYYSbGhlMkt5ZJGT91MFAn4C5Vt+9wQKBgQD094SfcDmTcpN640Xb +lQxAhkHiAwoJVSf2NWU3o2Jh9a+wAgZnWa/ExRzcz23FPi5cuRi5dC9rRsTkn2Pi +DNHm71kzT+Ayy53SKzlhpy4OVrFUv0yCovweJcUFl5aNMwQM9QW7kZ3o6p3X0PAO +vlufFdImIILXh0p+we3LATiO4QKBgQDUOsIPrE/0xL91Y37uh78573EkNR7HoLPe +ylaQQkUjwA/zU+qPSorkWbeYT9V3SXSXjdQB0GoaCc7o3IQ6Qycv7rafwvhheXRf +iQrGWnzu8KrX2Qs91CCiTitut0ojwaWg6LJd6cNU5uyKDmLBk1ewyyj8/G58149S +ypLzStt6qwKBgENogRQmm5FZa//a3nRIFXEEAMkisPZUeoSjuNCQoxf1tXyncf+q +jFWHMCQK6FfofnWBca5GrG3GsZN/0Mp5YKE9p7wY9MbFhQ46WrXmfSPw8ycw5Orl +0p6xIgAh8Im2Sh0Op58vuNZJCVpD4msMMsYaCRP4ykhosDUlRDwif9/BAoGBAIad +EnRghoKpvQsXZe6kWc3Eq44cx34114FL9CYicrpfW36qzo++52Q55/MLSEoWZIaw +pjXUXUhQylX3cUOHTrbfgiTJxUQGhqMGSLhRswbXznWErNW11FE+pdvfFH6jmIv8 +rQ5WdNhIdOaIg2lnLOrtofz9nJNBIx6PcTAyXg5rAoGAG1MjzmYd705B9GNwulMp +sHAZAFDVDbkuDkBS0Jzv7vLSOarD4hal8aOAoLU/0Ya9f+bMwyt2c6bglqY2HoTY +7hXD6xxYt4ocka6phiuok97TMNzRAzIWd3TNotOtyDP8anXjXLzZ8dkHHIvdaUxE +9POtRW3Pjf+k4zKsP7khbJ8= -----END PRIVATE KEY----- diff --git a/jstests/libs/badSAN.pem.digest.sha1 b/jstests/libs/badSAN.pem.digest.sha1 index 98251b6edc657..ddd1a4df3fcd4 100644 --- a/jstests/libs/badSAN.pem.digest.sha1 +++ b/jstests/libs/badSAN.pem.digest.sha1 @@ -1 +1 @@ -ABC6CD0E2DBFF1D8CCF4F67BE7EA74049CA4219A \ No newline at end of file +E0A5C1F7BB4842CAB8B6FD12B9236C469514975F \ No newline at end of file diff --git a/jstests/libs/badSAN.pem.digest.sha256 b/jstests/libs/badSAN.pem.digest.sha256 index d2b1d0bfa9c90..b040870b002e0 100644 --- a/jstests/libs/badSAN.pem.digest.sha256 +++ b/jstests/libs/badSAN.pem.digest.sha256 @@ -1 +1 @@ -16C50CECD049C952B5BA9E2CDFCFCD9D999E7D5344BA85501BCEA05CCEE5529B \ No newline at end of file +4EF9CCD46DCBF7B038EC3CA9560A43E5B2EE24D3FB0B6612C031FF4265927AB8 \ No newline at end of file diff --git a/jstests/libs/bulk_write_utils.js b/jstests/libs/bulk_write_utils.js new file mode 100644 index 0000000000000..09109da99e330 --- /dev/null +++ b/jstests/libs/bulk_write_utils.js @@ -0,0 +1,11 @@ + +/** + * Helper function to check a BulkWrite cursorEntry. + */ +const cursorEntryValidator = function(entry, expectedEntry) { + assert.eq(entry.ok, expectedEntry.ok); + assert.eq(entry.idx, expectedEntry.idx); + assert.eq(entry.n, expectedEntry.n); + assert.eq(entry.nModified, expectedEntry.nModified); + assert.eq(entry.code, expectedEntry.code); +}; diff --git a/jstests/libs/ca-2019.pem b/jstests/libs/ca-2019.pem deleted file mode 100644 index d1a5689cf0f36..0000000000000 --- a/jstests/libs/ca-2019.pem +++ /dev/null @@ -1,48 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDczCCAlugAwIBAgIBATANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu -ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw -FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE -BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB0MRcwFQYDVQQD -Ew5LZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25n -b0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazEL -MAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCBxSXj -qA5y2EMQkcmvLDNikE88Og3+spJ3ex60HWVPk8EeXN68jyfbKLYsoCcBE2rBAE/N -shVBJa8irh0o/UTh1XNW4iGCsfMvYamXiHnaOjmGVKjfBoj6pzQH0uK0X5olm3Sa -zZPkLLCR81yxsK6woJZMFTvrlEjxj/SmDZ9tVXW692bC4i6nGvOCSpgv9kms85xO -Ed2xbuCLXFDXKafXZd5AK+iegkDs3ah7VXMEE8sbqGnlqC1nsy5bpCnb7aC+3af7 -SV2XEFlSQT5kwTmk9CvTDzM9O78SO8nNhEOFBLQEdGDGd3BShE8dCdh2JTy3zKsb -WeE+mxy0mEwxNfGfAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF -BQADggEBAANwbvhM5K/Jcl6yzUSqAawvyAypT5aWBob7rt9KFq/aemjMN0gY2nsS -8WTGd9jiXlxGc/TzrK6MOsJ904UAFE1L9uR//G1gIBa9dNbYoiii2Fc8b1xDVJEP -b23rl/+GAT6UTSY+YgEjeA4Jk6H9zotO07lSw06rbCQam5SdA5UiMvuLHWCo3BHY -8WzqLiW/uHlb4K5prF9yuTUBEIgkRvvvyOKXlRvm1Ed5UopT2hmwA86mffAfgJc2 -vSbm9/8Q00fYwO7mluB6mbEcnbquaqRLoB83k+WbwUAZ2yjWHXuXVMPwyaysazcp -nOjaLwQJQgKejY62PiNcw7xC/nIxBeI= ------END CERTIFICATE----- ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAgcUl46gOcthDEJHJrywzYpBPPDoN/rKSd3setB1lT5PBHlze -vI8n2yi2LKAnARNqwQBPzbIVQSWvIq4dKP1E4dVzVuIhgrHzL2Gpl4h52jo5hlSo -3waI+qc0B9LitF+aJZt0ms2T5CywkfNcsbCusKCWTBU765RI8Y/0pg2fbVV1uvdm -wuIupxrzgkqYL/ZJrPOcThHdsW7gi1xQ1ymn12XeQCvonoJA7N2oe1VzBBPLG6hp -5agtZ7MuW6Qp2+2gvt2n+0ldlxBZUkE+ZME5pPQr0w8zPTu/EjvJzYRDhQS0BHRg -xndwUoRPHQnYdiU8t8yrG1nhPpsctJhMMTXxnwIDAQABAoIBAD5iGOnM800wO2Uu -wGbOd9FNEFoiinHDRHfdnw/1BavwmqjO+mBo7T8E3jarsrRosiwfyz1V+7O6uuuQ -CgKXZlKuOuksgfGDPCWt7EolWHaZAOhbsGaujJD6ah/MuCD/yGmFxtNYOl05QpSX -Cht9lSzhtf7TQl/og/xkOLbO27JB540ck/OCSOczXg9Z/O8AmIUyDn7AKb6G1Zhk -2IN//HQoAvDUMZLWrzy+L7YGbA8pBR3yiPsYBH0rX2Oc9INpiGA+B9Nf1HDDsxeZ -/o+5xLbRDDfIDtlYO0cekJ053W0zUQLrMEIn9991EpG2O/fPgs10NlKJtaFH8CmT -ExgVA9ECgYEA+6AjtUdxZ0BL3Wk773nmhesNH5/5unWFaGgWpMEaEM7Ou7i6QApL -KAbzOYItV3NNCbkcrejq7jsDGEmiwUOdXeQx6XN7/Gb2Byc/wezy5ALi0kcUwaur -6s9+Ah+T4vcU2AjfuCWXIpe46KLEbwORmCRQGwkCBCwRhHGt5sGGxTkCgYEAhAaw -voHI6Cb+4z3PNAKRnf2rExBYRyCz1KF16ksuwJyQSLzFleXRyRWFUEwLuVRL0+EZ -JXhMbtrILrc23dJGEsB8kOCFehSH/IuL5eB0QfKpDFA+e6pimsbVeggx/rZhcERB -WkcV3jN4O82gSL3EnIgvAT1/nwhmbmjvDhFJhZcCgYBaW4E3IbaZaz9S/O0m69Fa -GbQWvS3CRV1oxqgK9cTUcE9Qnd9UC949O3GwHw0FMERjz3N7B/8FGW/dEuQ9Hniu -NLmvqWbGlnqWywNcMihutJKbDCdp/Km5olUPkiNbB3sWsOkViXoiU/V0pK6BZvir -d67EZpGwydpogyH9kVVCEQKBgGHXc3Q7SmCBRbOyQrQQk0m6i+V8328W1S5m2bPg -M62aWXMOMn976ZRT1pBDSwz1Y5yJ3NDf7gTZLjEwpgCNrFCJRcc4HLL0NDL8V5js -VjvpUU5GyYdsJdb+M4ZUPHi/QEaqzqPQumwJSLlJEdfWirZWVj9dDA8XcpGwQjjy -psHRAoGBAJUTgeJYhjK7k5sgfh+PRqiRJP0msIH8FK7SenBGRUkelWrW6td2Riey -EcOCMFkRWBeDgnZN5xDyWLBgrzpw9iHQQIUyyBaFknQcRUYKHkCx+k+fr0KHHCUb -X2Kvf0rbeMucb4y/h7950HkBBq83AYKMAoI8Ql3cx7pKmyOLXRov ------END RSA PRIVATE KEY----- \ No newline at end of file diff --git a/jstests/libs/ca.pem b/jstests/libs/ca.pem index dfc69f542030f..4638aa633f480 100644 --- a/jstests/libs/ca.pem +++ b/jstests/libs/ca.pem @@ -3,73 +3,51 @@ # # Primary Root Certificate Authority Most Certificates are issued by this CA. -----BEGIN CERTIFICATE----- -MIIDeTCCAmGgAwIBAgIEe9SskzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDeTCCAmGgAwIBAgIESt5aGjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQyWhcNMjQwNDMwMjE1OTQyWjB0MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM1WhcNMjUwOTEwMTQyODM1WjB0MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO -S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDf -vZIt82obTHnc3iHgUYSc+yVkCHyERF3kdcTTFszDbN9mVPL5ZkH9lIAC3A2rj24T -pItMW1N+zOaLHU5tJB9VnCnKSFz5CHd/KEcLA3Ql2K70z7n1FvINnBmqAQdgPcPu -Et2rFgGg3atR3T3bV7ZRlla0CcoAFl/YoDI16oHRXboxAtoAzaIwvS6HUrOYQPYq -BLGt00Wws4bpILk3b04lDLEHmzDe6N3/v3FgBurPzR2tL97/sJGePE94I833hYG4 -vBdU0Kdt9FbTDEFOgrfRCisHyZY6Vw6rIiWBSLUBCjtm2vipgoD0H3DvyZLbMQRr -qmctCX4KQtOZ8dV3JQkNAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI -hvcNAQELBQADggEBAJnz4lK9GiCWhCXIPzghYRRheYWL8nhkZ+3+oC1B3/mGEf71 -2VOdND6fMPdHinD8jONH75mOpa7TanriVYX3KbrQ4WABFNJMX9uz09F+0A2D5tyc -iDkldnei+fiX4eSx80oCPgvaxdJWauiTsEi+fo2Do47PYkch9+BDXT9F/m3S3RRW -cia7URBAV8Itq6jj2BHcpS/dEqZcmN9kGWujVagcCorc0wBKSmkO/PZIjISid+TO -Db2g+AvqSBDU0lbdP7NXRSIxvZejDz4qMjcpSbhW9OS2BCYZcq5wgH2lwYkdPtmX -JkhxWKwsW11WJWDcmaXcffO3a6lDizxyjnTedoU= +S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCb +k/WPIqqv46Nv9FeodWiPtx4/v3XZJwGxTk3JEje2CLjeVjU0q6OZoofP1wgSIZSh +iO2o9iDC5O1Aedop0i+wqe9dMcn34O1K5aM4ff8c4orfBe0xqyvE3cJx4BeSTZ4n +NY00x9PkCcoq98SoU7S9vkJq+AxUzUII34GQ4xCeaM7+g43PpGo5KFDwrzI/VUJX +qaeRNXS0/j8Wwp7Gv8L1a+ZGlxrgpXTJLGamhtkWyVEWSpgcc5suA0qSwvkAE1KX +5aJoBUDL22fLRhs91xNFDUYTAvkG8X4gM0f8lBL24+nbOBkOLdpqSZZ+dk59JKHD +TFGBx0p17I1g0xjWNjMVAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBAIwWNyaQhZglJyKMIAUAwlvBL5REA99bua06xWfJwdmdlci9 +Bb6MgQzVk5K68rsNlcL0ma+Ri5FfU+j7gsYZh4pILYb9xqFxiKX7bxMZv99LR8Mi +0EImM7gz3S579qYBXWd4V6/1G864qln8neHv+X3MF/wk3O9IYqepWsC3xDRos1Zv +xQfb37Ol4pcHtue4wHXr5TV8+KPcUusfNcftnpsEHyEUHqPORdHB7xRpfhosRYvL +7WwMXNseuyHFcdA/rEhUVsca+SUeOMIW+8euuU/as3ZaEpv1ZmpHEYXHb2SlS6W+ +gTzUOtNXsKVDrm9uEcUHytp+xvp9l9NNM/IRGGA= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDfvZIt82obTHnc -3iHgUYSc+yVkCHyERF3kdcTTFszDbN9mVPL5ZkH9lIAC3A2rj24TpItMW1N+zOaL -HU5tJB9VnCnKSFz5CHd/KEcLA3Ql2K70z7n1FvINnBmqAQdgPcPuEt2rFgGg3atR -3T3bV7ZRlla0CcoAFl/YoDI16oHRXboxAtoAzaIwvS6HUrOYQPYqBLGt00Wws4bp -ILk3b04lDLEHmzDe6N3/v3FgBurPzR2tL97/sJGePE94I833hYG4vBdU0Kdt9FbT -DEFOgrfRCisHyZY6Vw6rIiWBSLUBCjtm2vipgoD0H3DvyZLbMQRrqmctCX4KQtOZ -8dV3JQkNAgMBAAECggEBAImudWrzdGMSH5eL1hrc7xdO8QZqtn0KOSEiW5luWVkV -ATBOrCpPA+LQ5CleUsc0/w71XYcCWBIacvA+e4lsBiA/dfP1rcjNrgNAvN8JJAs6 -GQerYIpysUcwvRZBhdFykMRLNb9XTm84mXlKBQuaISZ5ticaMwT0v8xlYCCPi41S -0gfhW9fIKA45NYmTKUhS2l5M0kNqJ6JmONOHv0RaSgn4qxAVNrkAJCbb2npEKZiP -qIR7XL1MbE7b1lNnL3PpbA6LCQpLBhNOHXrAfquagvm209d6zx8fAcIfFXMwdIZj -S80DG9pYyXxCm9wfdEJHrpFwf/ueoXpr9p6JhVvenkECgYEA/MwD3zQ+8dF4X/z5 -UEbfXsmZ5olqH8bKithou5zDXkwkLi7VcOWK7TkNUpJ70ex37/Jf4CdRPYarzK8Y -QCWadiUfthCkQxZAGK6vy/MGL3ha+rzuZmpERiq275kmUK+qQ79rKh10j7b08PYE -Q9XZqDsWh3AyniZcjuf0KDH2J3ECgYEA4pNOmG0s7Xx6yJgLE4EKIIhHswnF01Tw -6v3HlQRymecl8D8DuX974dB9ihlM1scNPRcqz9kr0ZRn0gRUirI+VBMuilp8SGxF -CyUNoP9vnhYPJqeOIIv61XGno02HNzLjOC1oTFndYgAx5PH/wbbjqPCq64zVXpke -pi5zfhJrhV0CgYB3X1NYvBAZGoQQN6H3EXB7DxHYdf0iAjo0dFqIdU8gIS5YjI8H -n60Jtg4fdsJ3b4V3TemOLKLFPem0Xt5BtEALGB6wV41pjIE55otm8Fx0YA3+Jucg -f3+77oGyBIy+PyVUlnhhS8V187wYaOO8sKf2M+jAje+pFI5SgR+fN3B4UQKBgQDL -BJlBJVpDde2C/hG2qUJMwjAUseYLpam1ti32TaTuWFr7OUt6FEwJm8pRSrg6HuOy -4KZg1URZJ3Qbj/u8x1fn98QU/l0se68l/E6I+zilWzi6Nxq3+RWJ2awPPHGzOSq1 -9KHnAALJRElwzpKPnfPCNdvA8lFh0SvQVgDwb0relQKBgGCGh51jqQieVVowiYc5 -/1mPI4I3MLg4k1v/iAUQzINEX7xkCG+xx7tz5LzUM//nNeI/UiO/rVG6/dnLNhUP -ixVZG7LiRihtrPkUr+ecULCX+6RNDAi88X5z5EA8DSzZY41jpKrneowy2Wy16ZOq -LKaGtbH/tHj8UT/l8OYicgI1 +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCbk/WPIqqv46Nv +9FeodWiPtx4/v3XZJwGxTk3JEje2CLjeVjU0q6OZoofP1wgSIZShiO2o9iDC5O1A +edop0i+wqe9dMcn34O1K5aM4ff8c4orfBe0xqyvE3cJx4BeSTZ4nNY00x9PkCcoq +98SoU7S9vkJq+AxUzUII34GQ4xCeaM7+g43PpGo5KFDwrzI/VUJXqaeRNXS0/j8W +wp7Gv8L1a+ZGlxrgpXTJLGamhtkWyVEWSpgcc5suA0qSwvkAE1KX5aJoBUDL22fL +Rhs91xNFDUYTAvkG8X4gM0f8lBL24+nbOBkOLdpqSZZ+dk59JKHDTFGBx0p17I1g +0xjWNjMVAgMBAAECggEAa1GtWod3Zs2IuHm83h/feQPAggj4qksWB9EUJ1fBxnhN +zlF5clPUDu7m8KBMmPVBKgzipGLkRozUfF2R+Ldc3mjbDVaDE9K83aV66kU/7hT3 +5tCXxmNlVfADabpDiFCygCmYtogBypRWOT2hEtxrGVhdSJzDJNxSaPyEzAVWOW7P +w1Rf77yH7nMqxNUbJ/oGlEHOrWu/eBIKJnRK0P0yCkDFN3K4zK7TNg+HEQTbkCrk +NmQQLtmIcKPc+hC/MLxlw0PhlBJkS0ukPpIXGsRGdNBAzvyOQSZhrt4bz8mQvOzw +Ev2nJwKrRsCY+O97w3M95Le6z7ihpf2YnpJ80DRHIQKBgQDJMRmmArB04aMbAQwD +wFcxTFim1reE9+8DU3IbXXyg+h70CzuZ1DticZy4gB0bUamx4L18FQE6Cto6KVvd +DsaHo9e3phDlwvEkpN6WDDrp03vqkRTJiiijk0w2jB1LdQ1QRXGToCPOEvgdj+iv +artNlQnUZfBlWFbZAhDF5V9+gwKBgQDF9cylDEJsPfPgOalAiuw501JBMnuB/wKg +gtvmRmE9wTU2DBusLdlg6bBnhYZBY2qd419RSZPchuPyeqFcLAk2DGrZRXz7x0X0 +oS50CtAGgoSYiIObLr+vzRioiE66yTOfESUZW9zqLPBwC0WcPyE/BmhdFWMYlgvb +/E0Ex7LUhwKBgCu7DW0lJn+xT8ed2aOzGgCwLJDPGQLA9WXOrH3AO0euNi4rht/v +3pyOP1dnGWyHuo/cXrNqyWJ5W2fK95m8DXEMLuZPJVsbnViusWcB74hFnKimslMA +QccUTDuLBw8QuT0aaw6Af2fafa3HSvdeBqpdW86b/b25jt8KcOTi73fLAoGBAIYU +rzCXNiIkcBtOUtIRhmDAPjVEoRzygXAAUjsNLm3qgEUEPHYJc/PNNJtZeA6v7JXW +XEgtgsXaY2hoIQTSGscN8A0LoNTUKxC/Xzxf2nieTHsX87PXHSRQ0UPNVy3ye1Fh +BnS/oMjH+W6aY+Kpa7ZJe8SYaM9NSekNYYk8TWbfAoGBAMHSFnGo9V6BG1qh9xRl +9S+QpROy4xevFlIN6KGzPNcCjjzOf7WdePss2BZdHXRzxoKANI9qHX/5hNP8YwrU +4ofU8ObQ7YINk49we/VreTo+2VWKr7qtVHoi1rBUt57WcwhtZvdZ3hDLABQPHSfn +TVHLzAuMRhYfHg1uhdTa3HCB -----END PRIVATE KEY----- -# Certificate from jstests/libs/ca-2019.pem ------BEGIN CERTIFICATE----- -MIIDczCCAlugAwIBAgIBATANBgkqhkiG9w0BAQUFADB0MRcwFQYDVQQDEw5LZXJu -ZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25nb0RCMRYw -FAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazELMAkGA1UE -BhMCVVMwHhcNMTQwNzE3MTYwMDAwWhcNMjAwNzE3MTYwMDAwWjB0MRcwFQYDVQQD -Ew5LZXJuZWwgVGVzdCBDQTEPMA0GA1UECxMGS2VybmVsMRAwDgYDVQQKEwdNb25n -b0RCMRYwFAYDVQQHEw1OZXcgWW9yayBDaXR5MREwDwYDVQQIEwhOZXcgWW9yazEL -MAkGA1UEBhMCVVMwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCBxSXj -qA5y2EMQkcmvLDNikE88Og3+spJ3ex60HWVPk8EeXN68jyfbKLYsoCcBE2rBAE/N -shVBJa8irh0o/UTh1XNW4iGCsfMvYamXiHnaOjmGVKjfBoj6pzQH0uK0X5olm3Sa -zZPkLLCR81yxsK6woJZMFTvrlEjxj/SmDZ9tVXW692bC4i6nGvOCSpgv9kms85xO -Ed2xbuCLXFDXKafXZd5AK+iegkDs3ah7VXMEE8sbqGnlqC1nsy5bpCnb7aC+3af7 -SV2XEFlSQT5kwTmk9CvTDzM9O78SO8nNhEOFBLQEdGDGd3BShE8dCdh2JTy3zKsb -WeE+mxy0mEwxNfGfAgMBAAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEF -BQADggEBAANwbvhM5K/Jcl6yzUSqAawvyAypT5aWBob7rt9KFq/aemjMN0gY2nsS -8WTGd9jiXlxGc/TzrK6MOsJ904UAFE1L9uR//G1gIBa9dNbYoiii2Fc8b1xDVJEP -b23rl/+GAT6UTSY+YgEjeA4Jk6H9zotO07lSw06rbCQam5SdA5UiMvuLHWCo3BHY -8WzqLiW/uHlb4K5prF9yuTUBEIgkRvvvyOKXlRvm1Ed5UopT2hmwA86mffAfgJc2 -vSbm9/8Q00fYwO7mluB6mbEcnbquaqRLoB83k+WbwUAZ2yjWHXuXVMPwyaysazcp -nOjaLwQJQgKejY62PiNcw7xC/nIxBeI= ------END CERTIFICATE----- diff --git a/jstests/libs/ca.pem.digest.sha1 b/jstests/libs/ca.pem.digest.sha1 index dbe9e3898afc7..e1ec750dc4655 100644 --- a/jstests/libs/ca.pem.digest.sha1 +++ b/jstests/libs/ca.pem.digest.sha1 @@ -1 +1 @@ -F42B9419C2EF9D431D7C0E5061A82902D385203A \ No newline at end of file +D33E7C8B0748C66DBEEE6E24410FA72A47607DF3 \ No newline at end of file diff --git a/jstests/libs/ca.pem.digest.sha256 b/jstests/libs/ca.pem.digest.sha256 index 2cffe1b5da960..4ac5afdd90414 100644 --- a/jstests/libs/ca.pem.digest.sha256 +++ b/jstests/libs/ca.pem.digest.sha256 @@ -1 +1 @@ -21A1C6A87B31AF590F5074EE716F193522B8F540081A5D571B25AE5DF72863E3 \ No newline at end of file +6568E01751761F5EC6A07B050857C77DD2D2604CD05A70A62F7DDA14829C1077 \ No newline at end of file diff --git a/jstests/libs/catalog_shard_util.js b/jstests/libs/catalog_shard_util.js deleted file mode 100644 index b6bff02127c9c..0000000000000 --- a/jstests/libs/catalog_shard_util.js +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Utilities for testing config server catalog shard behaviors. - */ -var CatalogShardUtil = (function() { - load("jstests/libs/feature_flag_util.js"); - - function isEnabledIgnoringFCV(st) { - return FeatureFlagUtil.isEnabled( - st.configRS.getPrimary(), "CatalogShard", undefined /* user */, true /* ignoreFCV */); - } - - function isTransitionEnabledIgnoringFCV(st) { - return FeatureFlagUtil.isEnabled(st.configRS.getPrimary(), - "TransitionToCatalogShard", - undefined /* user */, - true /* ignoreFCV */); - } - - function transitionToDedicatedConfigServer(st, timeout) { - if (timeout == undefined) { - timeout = 10 * 60 * 1000; // 10 minutes - } - - assert.soon(function() { - const res = st.s.adminCommand({transitionToDedicatedConfigServer: 1}); - if (!res.ok && res.code === ErrorCodes.ShardNotFound) { - // If the config server primary steps down right after removing the config.shards - // doc for the shard but before responding with "state": "completed", the mongos - // would retry the _configsvrTransitionToDedicatedConfigServer command against the - // new config server primary, which would not find the removed shard in its - // ShardRegistry if it has done a ShardRegistry reload after the config.shards doc - // for the shard was removed. This would cause the command to fail with - // ShardNotFound. - return true; - } - assert.commandWorked(res); - return res.state == 'completed'; - }, "failed to transition to dedicated config server within " + timeout + "ms", timeout); - } - - function waitForRangeDeletions(conn) { - assert.soon(() => { - const rangeDeletions = conn.getCollection("config.rangeDeletions").find().toArray(); - if (rangeDeletions.length) { - print("Waiting for range deletions to complete: " + tojsononeline(rangeDeletions)); - sleep(100); - return false; - } - return true; - }); - } - - return { - isEnabledIgnoringFCV, - isTransitionEnabledIgnoringFCV, - transitionToDedicatedConfigServer, - waitForRangeDeletions, - }; -})(); diff --git a/jstests/libs/ce_stats_utils.js b/jstests/libs/ce_stats_utils.js index fa74012018274..daa5461c59e52 100644 --- a/jstests/libs/ce_stats_utils.js +++ b/jstests/libs/ce_stats_utils.js @@ -1,10 +1,16 @@ load('jstests/aggregation/extras/utils.js'); // For assertArrayEq. -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. + +import { + checkCascadesFeatureFlagEnabled, + extractLogicalCEFromNode, + getPlanSkeleton, + navigateToRootNode, +} from "jstests/libs/optimizer_utils.js"; /** * Returns a simplified skeleton of the physical plan including intervals & logical CE. */ -function summarizeExplainForCE(explain) { +export function summarizeExplainForCE(explain) { const node = getPlanSkeleton(navigateToRootNode(explain), { extraKeepKeys: ["interval", "properties"], printLogicalCE: true, @@ -15,7 +21,7 @@ function summarizeExplainForCE(explain) { /** * Extracts the cardinality estimate of the explain root node. */ -function getRootCE(explain) { +export function getRootCE(explain) { const rootNode = navigateToRootNode(explain); assert.neq(rootNode, null, tojson(explain)); assert.eq(rootNode.nodeType, "Root", tojson(rootNode)); @@ -25,7 +31,7 @@ function getRootCE(explain) { /** * Asserts that expected and actual are equal, within a small tolerance. */ -function assertApproxEq(expected, actual, msg, tolerance = 0.01) { +export function assertApproxEq(expected, actual, msg, tolerance = 0.01) { assert(Math.abs(expected - actual) < tolerance, msg); } @@ -34,7 +40,7 @@ function assertApproxEq(expected, actual, msg, tolerance = 0.01) { * if the ce parameter is omitted, we expect our estimate to exactly match what the query actually * returns. */ -function verifyCEForMatch({coll, predicate, expected, ce, hint}) { +export function verifyCEForMatch({coll, predicate, expected, ce, hint}) { jsTestLog(`Verify CE for match ${tojson(predicate)}`); const CEs = ce ? [ce] : undefined; return verifyCEForMatchNodes( @@ -48,7 +54,7 @@ function verifyCEForMatch({coll, predicate, expected, ce, hint}) { * expected estimates should be defined in CEs, or it defaults to the number of documents expected * to be returned by the query. */ -function verifyCEForMatchNodes({coll, predicate, expected, getNodeCEs, CEs, hint}) { +export function verifyCEForMatchNodes({coll, predicate, expected, getNodeCEs, CEs, hint}) { // Run aggregation & verify query results. const options = hint ? {hint} : {}; const actual = coll.aggregate([{$match: predicate}], options).toArray(); @@ -76,11 +82,11 @@ function verifyCEForMatchNodes({coll, predicate, expected, getNodeCEs, CEs, hint /** * Creates a histogram for the given 'coll' along the input field 'key'. */ -function createHistogram(coll, key, options = {}) { +export function createHistogram(coll, key, options = {}) { // We can't use forceBonsai here because the new optimizer doesn't know how to handle the // analyze command. - assert.commandWorked( - coll.getDB().adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"})); + assert.commandWorked(coll.getDB().adminCommand( + {setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"})); // Set up histogram for test collection. const res = coll.getDB().runCommand(Object.assign({analyze: coll.getName(), key}, options)); @@ -90,7 +96,7 @@ function createHistogram(coll, key, options = {}) { /** * Validates that the generated histogram for the given "coll" has the expected type counters. */ -function createAndValidateHistogram({coll, expectedHistogram, empty = false, options = {}}) { +export function createAndValidateHistogram({coll, expectedHistogram, empty = false, options = {}}) { const field = expectedHistogram._id; createHistogram(coll, field, options); @@ -107,8 +113,8 @@ function createAndValidateHistogram({coll, expectedHistogram, empty = false, opt * estimation. This ensures that the appropriate flags/query knobs are set and ensures the state of * relevant flags is restored after the test. */ -function runHistogramsTest(test) { - if (!checkCascadesOptimizerEnabled(db)) { +export function runHistogramsTest(test) { + if (!checkCascadesFeatureFlagEnabled(db)) { jsTestLog("Skipping test because the optimizer is not enabled"); return; } @@ -140,7 +146,7 @@ function runHistogramsTest(test) { /** * Creates a single-field index for each field in the 'fields' array. */ -function createIndexes(coll, fields) { +export function createIndexes(coll, fields) { for (const field of fields) { assert.commandWorked(coll.createIndex({[field]: 1})); } @@ -149,17 +155,18 @@ function createIndexes(coll, fields) { /** * Creates statistics for each field in the 'fields' array. */ -function analyzeFields(db, coll, fields, bucketCnt = 100) { +export function analyzeFields(db, coll, fields, bucketCnt = 100) { for (const field of fields) { assert.commandWorked( db.runCommand({analyze: coll.getName(), key: field, numberBuckets: bucketCnt})); } } + /** * Given a scalar histogram document print it combining bounds with the corresponding buckets. * hist = { buckets: [{boundaryCount: 1, rangeCount: 0, ...}], bounds: [100, 500]} */ -function printScalarHistogram(hist) { +export function printScalarHistogram(hist) { assert.eq(hist.buckets.length, hist.bounds.length); let i = 0; while (i < hist.buckets.length) { @@ -168,7 +175,7 @@ function printScalarHistogram(hist) { } } -function printHistogram(hist) { +export function printHistogram(hist) { jsTestLog(`Histogram on field: ${hist._id}`); print("Scalar Histogram:\n"); printScalarHistogram(hist.statistics.scalarHistogram); diff --git a/jstests/libs/change_stream_rewrite_util.js b/jstests/libs/change_stream_rewrite_util.js index a5b678ef3de07..574b848a1bfa2 100644 --- a/jstests/libs/change_stream_rewrite_util.js +++ b/jstests/libs/change_stream_rewrite_util.js @@ -7,7 +7,8 @@ load("jstests/libs/fixture_helpers.js"); // For isMongos. // Function which generates a write workload on the specified collection, including all events that // a change stream may consume. Assumes that the specified collection does not already exist. -function generateChangeStreamWriteWorkload(db, collName, numDocs, includInvalidatingEvents = true) { +export function generateChangeStreamWriteWorkload( + db, collName, numDocs, includInvalidatingEvents = true) { // If this is a sharded passthrough, make sure we shard on something other than _id so that a // non-id field appears in the documentKey. This will generate 'create' and 'shardCollection'. if (FixtureHelpers.isMongos(db)) { @@ -99,7 +100,8 @@ function generateChangeStreamWriteWorkload(db, collName, numDocs, includInvalida // Helper function to fully exhaust a change stream from the specified point and return all events. // Assumes that all relevant events can fit into a single 16MB batch. -function getAllChangeStreamEvents(db, extraPipelineStages = [], csOptions = {}, resumeToken) { +export function getAllChangeStreamEvents( + db, extraPipelineStages = [], csOptions = {}, resumeToken) { // Open a whole-cluster stream based on the supplied arguments. const csCursor = db.getMongo().watch( extraPipelineStages, @@ -121,12 +123,13 @@ function getAllChangeStreamEvents(db, extraPipelineStages = [], csOptions = {}, } // Helper function to check whether this value is a plain old javascript object. -function isPlainObject(value) { +export function isPlainObject(value) { return (value && typeof (value) == "object" && value.constructor === Object); } // Verifies the number of change streams events returned from a particular shard. -function assertNumChangeStreamDocsReturnedFromShard(stats, shardName, expectedTotalReturned) { +export function assertNumChangeStreamDocsReturnedFromShard( + stats, shardName, expectedTotalReturned) { assert(stats.shards.hasOwnProperty(shardName), stats); const stages = stats.shards[shardName].stages; const lastStage = stages[stages.length - 1]; @@ -134,7 +137,7 @@ function assertNumChangeStreamDocsReturnedFromShard(stats, shardName, expectedTo } // Verifies the number of oplog events read by a particular shard. -function assertNumMatchingOplogEventsForShard(stats, shardName, expectedTotalReturned) { +export function assertNumMatchingOplogEventsForShard(stats, shardName, expectedTotalReturned) { assert(stats.shards.hasOwnProperty(shardName), stats); assert.eq(Object.keys(stats.shards[shardName].stages[0])[0], "$cursor", stats); const executionStats = stats.shards[shardName].stages[0].$cursor.executionStats; @@ -145,7 +148,7 @@ function assertNumMatchingOplogEventsForShard(stats, shardName, expectedTotalRet } // Returns a newly created sharded collection sharded by caller provided shard key. -function createShardedCollection(shardingTest, shardKey, dbName, collName, splitAt) { +export function createShardedCollection(shardingTest, shardKey, dbName, collName, splitAt) { const db = shardingTest.s.getDB(dbName); assertDropAndRecreateCollection(db, collName); @@ -173,7 +176,7 @@ function createShardedCollection(shardingTest, shardKey, dbName, collName, split // 2. There are no additional events being returned other than the ones in the 'expectedResult'. // 3. the filtering is been done at oplog level, and each of the shard read only the // 'expectedOplogNReturnedPerShard' documents. -function verifyChangeStreamOnWholeCluster({ +export function verifyChangeStreamOnWholeCluster({ st, changeStreamSpec, userMatchExpr, @@ -235,13 +238,15 @@ function verifyChangeStreamOnWholeCluster({ verbosity: "executionStats" }); - assertNumMatchingOplogEventsForShard(stats, st.rs0.name, expectedOplogNReturnedPerShard[0]); - assertNumMatchingOplogEventsForShard(stats, st.rs1.name, expectedOplogNReturnedPerShard[1]); + assertNumMatchingOplogEventsForShard( + stats, st.shard0.shardName, expectedOplogNReturnedPerShard[0]); + assertNumMatchingOplogEventsForShard( + stats, st.shard1.shardName, expectedOplogNReturnedPerShard[1]); if (expectedChangeStreamDocsReturnedPerShard !== undefined) { assertNumChangeStreamDocsReturnedFromShard( - stats, st.rs0.name, expectedChangeStreamDocsReturnedPerShard[0]); + stats, st.shard0.shardName, expectedChangeStreamDocsReturnedPerShard[0]); assertNumChangeStreamDocsReturnedFromShard( - stats, st.rs1.name, expectedChangeStreamDocsReturnedPerShard[1]); + stats, st.shard1.shardName, expectedChangeStreamDocsReturnedPerShard[1]); } } diff --git a/jstests/libs/change_stream_util.js b/jstests/libs/change_stream_util.js index 20af14f28d38c..037a8227b4054 100644 --- a/jstests/libs/change_stream_util.js +++ b/jstests/libs/change_stream_util.js @@ -370,7 +370,7 @@ function ChangeStreamTest(_db, name = "ChangeStreamTest") { * If the current batch has a document in it, that one will be ignored. */ self.getOneChange = function(cursor, expectInvalidate = false) { - changes = self.getNextChanges(cursor, 1, true); + const changes = self.getNextChanges(cursor, 1, true); if (expectInvalidate) { assert(isInvalidated(changes[changes.length - 1]), diff --git a/jstests/libs/check_metadata_consistency_helpers.js b/jstests/libs/check_metadata_consistency_helpers.js index df619b3077c5b..303a6a18cd82c 100644 --- a/jstests/libs/check_metadata_consistency_helpers.js +++ b/jstests/libs/check_metadata_consistency_helpers.js @@ -1,8 +1,6 @@ -'use strict'; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; -load('jstests/libs/feature_flag_util.js'); // For FeatureFlagUtil. - -var MetadataConsistencyChecker = (function() { +export var MetadataConsistencyChecker = (function() { const run = (mongos) => { const adminDB = mongos.getDB('admin'); @@ -17,6 +15,31 @@ var MetadataConsistencyChecker = (function() { return; } + // The isTransientError() function is responsible for setting an error as transient and + // abort the metadata consistency check to be retried in the future. + const isTransientError = function(e) { + if (ErrorCodes.isRetriableError(e.code) || ErrorCodes.isInterruption(e.code)) { + return true; + } + + // TODO SERVER-78117: Remove once checkMetadataConsistency command is robust to + // ShardNotFound + if (e.code === ErrorCodes.ShardNotFound) { + // Metadata consistency check can fail with ShardNotFound if the router's + // ShardRegistry reloads after choosing which shards to target and a chosen + // shard is no longer in the cluster. + return true; + } + + if (e.code === ErrorCodes.FailedToSatisfyReadPreference) { + // Metadata consistency check can fail with FailedToSatisfyReadPreference error + // response when the primary of the shard is permanently down. + return true; + } + + return false; + }; + const checkMetadataConsistency = function() { jsTest.log('Started metadata consistency check'); @@ -40,7 +63,7 @@ var MetadataConsistencyChecker = (function() { try { checkMetadataConsistency(); } catch (e) { - if (ErrorCodes.isRetriableError(e.code) || ErrorCodes.isInterruption(e.code)) { + if (isTransientError(e)) { jsTest.log(`Aborted metadata consistency check due to retriable error: ${e}`); } else { throw e; diff --git a/jstests/libs/check_orphans_are_deleted_helpers.js b/jstests/libs/check_orphans_are_deleted_helpers.js index 6233872842ce4..0cee0c5c9d6c1 100644 --- a/jstests/libs/check_orphans_are_deleted_helpers.js +++ b/jstests/libs/check_orphans_are_deleted_helpers.js @@ -46,14 +46,12 @@ var CheckOrphansAreDeletedHelpers = (function() { adminDB .aggregate([ {$currentOp: {idleCursors: true, allUsers: true}}, - {$match: {type: 'idleCursor', ns: ns}} + {$match: {type: 'idleCursor'}} ]) .toArray(); - print("Idle cursors on " + ns + " @ " + shardId + ": " + - tojson(idleCursors)); + print("Idle cursors on shard " + shardId + ": " + tojson(idleCursors)); } catch (e) { - print("Failed to get idle cursors for " + ns + " @ " + shardId + ": " + - tojson(e)); + print("Failed to get idle cursors on shard " + shardId + ": " + tojson(e)); } return 'timed out waiting for rangeDeletions on ' + ns + ' to be empty @ ' + diff --git a/jstests/libs/check_routing_table_consistency_helpers.js b/jstests/libs/check_routing_table_consistency_helpers.js index 14bea76173e8d..071c6dc3f24f3 100644 --- a/jstests/libs/check_routing_table_consistency_helpers.js +++ b/jstests/libs/check_routing_table_consistency_helpers.js @@ -1,6 +1,6 @@ -'use strict'; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; -var RoutingTableConsistencyChecker = (function() { +export var RoutingTableConsistencyChecker = (function() { const sameObjectFields = (lhsObjFields, rhsObjFields) => { if (lhsObjFields.length !== rhsObjFields.length) { return false; @@ -81,6 +81,135 @@ var RoutingTableConsistencyChecker = (function() { return true; }; + /** + * Reproduces the logic implemented in ShardingCatalogManager::initializePlacementHistory() + * to compute the placement of each existing collection and database by reading the content of + * - config.collections + config.chunks + * - config.databases. + * The output format follows the same schema of config.placementHistory; results are ordered by + * namespace. + **/ + const buildCurrentPlacementData = (mongos) => { + const pipeline = [ + { + $lookup: { + from: "chunks", + localField: "uuid", + foreignField: "uuid", + as: "timestampByShard", + pipeline: [ + { + $group: { + _id: "$shard", + value: { + $max: "$onCurrentShardSince" + } + } + } + ], + } + }, + { + $project: { + _id: 0, + nss: "$_id", + shards: "$timestampByShard._id", + uuid: 1, + timestamp: { + $max: "$timestampByShard.value" + }, + } + }, + { + $unionWith: { + coll: "databases", + pipeline: [ + { + $project: { + _id: 0, + nss: "$_id", + shards: [ + "$primary" + ], + timestamp: "$version.timestamp" + } + } + ] + } + }, + { + $sort: { + nss: 1 + } + } + ]; + + return mongos.getDB('config').collections.aggregate(pipeline); + }; + + /** + * Extracts from config.placementHistory the most recent document for each collection + * and database. Results are ordered by namespace. + */ + const getHistoricalPlacementData = (mongos, atClusterTime) => { + const kConfigPlacementHistoryInitializationMarker = ''; + const pipeline = [ + { + $match: { + // Skip documents containing initialization metadata + nss: {$ne: kConfigPlacementHistoryInitializationMarker}, + timestamp: {$lte: atClusterTime} + } + }, + { + $group: { + _id: "$nss", + placement: {$top: {output: "$$CURRENT", sortBy: {"timestamp": -1}}} + } + }, + // Disregard placement entries on dropped namespaces + {$match: {"placement.shards": {$not: {$size: 0}}}}, + {$replaceRoot: {newRoot: "$placement"}}, + {$sort: {nss: 1}} + ]; + return mongos.getDB('config').placementHistory.aggregate(pipeline); + }; + + const checkHistoricalPlacementMetadataConsistency = (mongos) => { + const placementDataFromRoutingTable = buildCurrentPlacementData(mongos); + const now = mongos.getDB('admin').runCommand({isMaster: 1}).operationTime; + const historicalPlacementData = getHistoricalPlacementData(mongos, now); + + placementDataFromRoutingTable.forEach(function(nssPlacementFromRoutingTable) { + assert(historicalPlacementData.hasNext(), + `Historical placement data on ${nssPlacementFromRoutingTable.nss} is missing`); + const historicalNssPlacement = historicalPlacementData.next(); + assert.eq(nssPlacementFromRoutingTable.nss, + historicalNssPlacement.nss, + 'Historical placement data does not contain the expected number of entries'); + assert.sameMembers(nssPlacementFromRoutingTable.shards, + historicalNssPlacement.shards, + `Inconsistent placement info detected: routing table ${ + tojson(nssPlacementFromRoutingTable)} VS placement history ${ + tojson(historicalNssPlacement)}`); + + assert.eq(nssPlacementFromRoutingTable.uuid, + historicalNssPlacement.uuid, + `Inconsistent placement info detected: routing table ${ + tojson(nssPlacementFromRoutingTable)} VS placement history ${ + tojson(historicalNssPlacement)}`); + // Timestamps are not compared, since they are expected to diverge if a chunk + // migration, collection rename or a movePrimary request have been executed during + // the test. + }); + + if (historicalPlacementData.hasNext()) { + assert(false, + `Unexpected historical placement entries: ${ + tojson(historicalPlacementData.toArray())}`); + } + }; + const run = (mongos) => { try { jsTest.log('Checking routing table consistency'); @@ -101,6 +230,7 @@ var RoutingTableConsistencyChecker = (function() { `Corrupted routing table detected for ${collData._id}! Details: ${ tojson(collData)}`); }); + jsTest.log('Routing table consistency check completed'); } catch (e) { if (e.code !== ErrorCodes.Unauthorized) { throw e; @@ -108,7 +238,19 @@ var RoutingTableConsistencyChecker = (function() { jsTest.log( 'Skipping check of routing table consistency - access to admin collections is not authorized'); } - jsTest.log('Routing table consistency check completed'); + + try { + jsTest.log('Checking consistency of config.placementHistory against the routing table'); + checkHistoricalPlacementMetadataConsistency(mongos); + jsTest.log('config.placementHistory consistency check completed'); + + } catch (e) { + if (e.code !== ErrorCodes.Unauthorized) { + throw e; + } + jsTest.log( + 'Skipping consistency check of config.placementHistory - access to admin collections is not authorized'); + } }; return { diff --git a/jstests/libs/check_shard_filtering_metadata_helpers.js b/jstests/libs/check_shard_filtering_metadata_helpers.js index 970be5e081dfe..987fad37a3d1b 100644 --- a/jstests/libs/check_shard_filtering_metadata_helpers.js +++ b/jstests/libs/check_shard_filtering_metadata_helpers.js @@ -3,6 +3,10 @@ var CheckShardFilteringMetadataHelpers = (function() { function run(mongosConn, nodeConn, shardId, skipCheckShardedCollections = false) { function checkDatabase(configDatabasesEntry) { + // No shard other than the db-primary shard can believe to be the db-primary. Non + // db-primary shards are allowed to have a stale notion of the dbVersion, as long as + // they believe they are not primary. + const dbName = configDatabasesEntry._id; print(`CheckShardFilteringMetadata: checking database '${dbName}' on node '${ nodeConn.host}' of shard '${shardId}'`); @@ -10,28 +14,59 @@ var CheckShardFilteringMetadataHelpers = (function() { const nodeMetadata = assert.commandWorked(nodeConn.adminCommand({getDatabaseVersion: dbName})); + // Skip this test if isPrimaryShardForDb is not present. Multiversion incompatible. + if (nodeMetadata.dbVersion.isPrimaryShardForDb === undefined) { + return; + } + if (nodeMetadata.dbVersion.timestamp === undefined) { - // Shards are allowed to not know the dbVersion. + // Node has no knowledge of the database. return; } - assert.eq(nodeMetadata.dbVersion.uuid, - configDatabasesEntry.version.uuid, - `Unexpected dbVersion.uuid for db '${dbName}' on node '${nodeConn.host}'`); - assert.eq(timestampCmp(nodeMetadata.dbVersion.timestamp, - configDatabasesEntry.version.timestamp), - 0, - `Unexpected dbVersion timestamp for db '${dbName}' on node '${ - nodeConn.host}'. Found '${ - tojson(nodeMetadata.dbVersion.timestamp)}'; expected '${ - tojson(configDatabasesEntry.version.timestamp)}'`); - assert.eq(nodeMetadata.dbVersion.lastMod, - configDatabasesEntry.version.lastMod, - `Unexpected dbVersion lastMod for db '${dbName}' on node '${nodeConn.host}'`); + assert.eq( + configDatabasesEntry.primary === shardId, + nodeMetadata.isPrimaryShardForDb, + `Unexpected isPrimaryShardForDb for db '${dbName}' on node '${nodeConn.host}'`); + + // If the node is the primary shard for the database, it should know the correct + // database version. + if (configDatabasesEntry.primary === shardId) { + assert.eq( + nodeMetadata.dbVersion.uuid, + configDatabasesEntry.version.uuid, + `Unexpected dbVersion.uuid for db '${dbName}' on node '${nodeConn.host}'`); + assert.eq(timestampCmp(nodeMetadata.dbVersion.timestamp, + configDatabasesEntry.version.timestamp), + 0, + `Unexpected dbVersion timestamp for db '${dbName}' on node '${ + nodeConn.host}'. Found '${ + tojson(nodeMetadata.dbVersion.timestamp)}'; expected '${ + tojson(configDatabasesEntry.version.timestamp)}'`); + assert.eq( + nodeMetadata.dbVersion.lastMod, + configDatabasesEntry.version.lastMod, + `Unexpected dbVersion lastMod for db '${dbName}' on node '${nodeConn.host}'`); + } print(`CheckShardFilteringMetadata: Database '${dbName}' on '${nodeConn.host}' OK`); } + function getPrimaryShardForDB(dbName) { + if (dbName == 'config') { + return 'config'; + } + + const configDB = mongosConn.getDB('config'); + + const dbEntry = configDB.databases.findOne({_id: dbName}); + assert(dbEntry, `Couldn't find database '${dbName}' in 'config.databases'`); + assert(dbEntry.primary, + `Database entry for db '${dbName}' does not contain primary shard: ${ + tojson(dbEntry)}`); + return dbEntry.primary; + } + function checkShardedCollection(coll, nodeShardingState) { const ns = coll._id; print(`CheckShardFilteringMetadata: checking collection '${ns} ' on node '${ @@ -39,13 +74,13 @@ var CheckShardFilteringMetadataHelpers = (function() { const configDB = mongosConn.getDB('config'); + const dbName = mongosConn.getCollection(ns).getDB().getName(); + const primaryShardId = getPrimaryShardForDB(dbName); const highestChunkOnShard = configDB.chunks.find({uuid: coll.uuid, shard: shardId}) .sort({lastmod: -1}) .limit(1) .toArray()[0]; - const expectedShardVersion = - highestChunkOnShard ? highestChunkOnShard.lastmod : Timestamp(0, 0); const expectedTimestamp = coll.timestamp; const collectionMetadataOnNode = nodeShardingState.versions[ns]; @@ -55,25 +90,29 @@ var CheckShardFilteringMetadataHelpers = (function() { return; } - if (collectionMetadataOnNode.timestamp === undefined) { - // Versions earlier than v6.3 did not report the timestamp on shardingState command - // (SERVER-70790). This early exit can be removed after v6.0 is no longer tested in - // multiversion suites. - return; - } - - if (timestampCmp(collectionMetadataOnNode.timestamp, Timestamp(0, 0)) === 0) { - // The metadata reflects an unsharded collection. It is okay for a node to have this - // stale metadata, as long as the node knows the correct dbVersion. + // TODO BACKPORT-15533: re-enable the following checks in multiversion suites + const isMultiversion = jsTest.options().shardMixedBinVersions || + jsTest.options().useRandomBinVersionsWithinReplicaSet; + if (isMultiversion || + (shardId != getPrimaryShardForDB(dbName) && !highestChunkOnShard)) { + // The shard is neither primary for database nor owns some chunks for this + // collection. + // In this case the shard is allow to have a stale/wrong collection + // metadata as long as it has the correct db version. return; } - // If the node knows its filtering info, then assert that it is correct. + // Check that timestamp is correct assert.eq(timestampCmp(collectionMetadataOnNode.timestamp, expectedTimestamp), 0, `Unexpected timestamp for ns '${ns}' on node '${nodeConn.host}'. Found '${ tojson(collectionMetadataOnNode.timestamp)}', expected '${ tojson(expectedTimestamp)}'`); + + // Check that placement version is correct + const expectedShardVersion = + highestChunkOnShard ? highestChunkOnShard.lastmod : Timestamp(0, 0); + // Only check the major version because some operations (such as resharding or // setAllowMigrations) bump the minor version without the shards knowing. This does not // affect placement, so it is okay. @@ -87,10 +126,9 @@ var CheckShardFilteringMetadataHelpers = (function() { const configDB = mongosConn.getDB('config'); // Check shards know correct database versions. - // TODO: SERVER-73991 Reenable this check. - // configDB.databases.find({primary: shardId}).forEach(configDatabasesEntry => { - // checkDatabase(configDatabasesEntry); - // }); + configDB.databases.find().forEach(configDatabasesEntry => { + checkDatabase(configDatabasesEntry); + }); // Check that shards have correct filtering metadata for sharded collections. if (!skipCheckShardedCollections) { diff --git a/jstests/libs/check_unique_indexes.js b/jstests/libs/check_unique_indexes.js index abbb9bb536ff8..38e33baeb4bd0 100644 --- a/jstests/libs/check_unique_indexes.js +++ b/jstests/libs/check_unique_indexes.js @@ -12,7 +12,7 @@ function checkUniqueIndexFormatVersion(adminDB) { if (!isWiredTiger) return; - res = assert.commandWorked(adminDB.runCommand({"listDatabases": 1})); + let res = assert.commandWorked(adminDB.runCommand({"listDatabases": 1})); let databaseList = res.databases; databaseList.forEach(function(database) { diff --git a/jstests/libs/chunk_manipulation_util.js b/jstests/libs/chunk_manipulation_util.js index a4afda3e9374b..d27fd97e8eaf4 100644 --- a/jstests/libs/chunk_manipulation_util.js +++ b/jstests/libs/chunk_manipulation_util.js @@ -2,7 +2,7 @@ // Utilities for testing chunk manipulation: moveChunk, mergeChunks, etc. // -load('./jstests/libs/test_background_ops.js'); +load('jstests/libs/test_background_ops.js'); // // Start a background moveChunk. diff --git a/jstests/libs/client-all-the-oids.pem b/jstests/libs/client-all-the-oids.pem index 0ae372303c80a..4f65f2d9f466a 100644 --- a/jstests/libs/client-all-the-oids.pem +++ b/jstests/libs/client-all-the-oids.pem @@ -3,10 +3,10 @@ # # Client certificate with a long list of OIDs. Ensures the server functions well in unexpected circumstances. -----BEGIN CERTIFICATE----- -MIIG4zCCBcugAwIBAgIEUZgH9zANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIG4zCCBcugAwIBAgIEZ+LS5zANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjCCA/ExEDAO +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM2WhcNMjUwOTEwMTQyODM2WjCCA/ExEDAO BgNVBAMMB0RhdHVtLTMxEDAOBgNVBAQMB0RhdHVtLTQxEDAOBgNVBAUTB0RhdHVt LTUxCzAJBgNVBAYTAlVTMRAwDgYDVQQHDAdEYXR1bS03MQswCQYDVQQIDAJOWTEQ MA4GA1UECQwHRGF0dW0tOTERMA8GA1UECgwIRGF0dW0tMTAxETAPBgNVBAsMCERh @@ -28,44 +28,44 @@ ETAPBgNVBC8MCERhdHVtLTQ3MREwDwYDVQQwDAhEYXR1bS00ODERMA8GA1UEMQwI RGF0dW0tNDkxETAPBgNVBDIMCERhdHVtLTUwMREwDwYDVQQzDAhEYXR1bS01MTER MA8GA1UENAwIRGF0dW0tNTIxETAPBgNVBDUMCERhdHVtLTUzMREwDwYDVQQ2DAhE YXR1bS01NDERMA8GA1UEQQwIRGF0dW0tNjUxETAPBgNVBEgMCERhdHVtLTcyMIIB -IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvuPee340G2hE/gE8HHnzqJkH -Q4FqJv4TuzYcH1b4XyozDGAUTXaJo+/vJOcuh3WUJbSKfFRipq6JC85QZutRkfZX -v/+VzJz95KJwwMTfg6yTx36Hn0+MlTps1tuwsvOClqRsAZ+KMqbyO3gQy3LzykOE -BBHvYweC+qVa9oZdIO1/OqHySdHSPZKsKrJRny0f604wYFtB+zWHtO7nbNG4QCzr -vjyox4IidDX6KPJ1f4CsjeIxSn0qtldEKADGHmEfpzGILaLHo2G1xEj0jhdahfve -+BIpoJOTWbho0iDfAqE7P058ssM6XjX4y4dQ3tWhguRXI4pI5LsH5cubYJjA4wID -AQABMA0GCSqGSIb3DQEBCwUAA4IBAQA4NnTRTEWrEYHQQ4AL9x6m1iT2zr3k3N67 -uwDQRiXqggr2Vtqz75N6DekU4KvsTpUFvl74HWTAZUKjjtryS/1sQIs9nSld/oEv -iRYNoaXYTwI4Osng2LVC6uOZd5fAnqkbN3RdhbpqzwVBq/UPJgYC28mD2Wbn2axa -wBOxR+RfJ7e53jwiTBBVHv9cO+3MqFvLeu4yMswUenN6dywL5VtkmjUWtzvrvWMr -DL0eCmrdacYhbT/oRYRvgD6A72gI5SOBQ4sU+5t1fcYMkaRDr7woqh3/mY4LfthM -Ya0joUdTxUqxSbPg4DjQNvawAFeI/KtCjDxjQulMtl7gs7Kqgmsb +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuPLQteDh5gvx/70joj9m21HB +On6LdiuaOwDg9aK6lWSe0RU1uzGGmQPtLM6mArQZ/2RXwhl6WLvVJLC1PC6ZAmbp +fOgMnWyF4nNElQTfC6jQctc+TaRMtXyq6A1Mt9z6JSwmEgGnMInTeoSINNyDWzhF +Pez+BkPDdIYDv+qNUzM/fRflq7PoRgdABku+1XX4piJpJG3rTNJT5FmexlAZf7bc +KUtwtcdoyBwIZWICGz26yFTry8/X0TEWuVlsIutBwTgzSIP/yEilcdZU2IZZYlhM +Oeis2JngFD3JEIRhb1di5CHVHUBKhy8GkiZNtnjk1/aavyFvdw7rnyOOKT9tKwID +AQABMA0GCSqGSIb3DQEBCwUAA4IBAQBqPLE1+AEIb8wwcdChqjyMTc3dSg9jSPxj +IAWpRncsGR4OxzaJB1iDsWzyFVAJXz32L6ptjPLE/tk0DKnsvzFLttYj8jagLXMF +OCTI6LczZDl1aPUVUFp+2BdYUPstr84glY+ptA7ZR6xlZOYnzmcAyGOo+aSeJuIp +f0C7KeJNYsI2kO+FAJlOghwMsErQJzSF97Sb99nFm8t1lnk80q0Doo8mkuh1Dpot +iRz/iYCODRHyh0owGeazvSS2WO/urC/AL1siTvAdJiWuejXb67dR2KGk0v4mYS6y +7bOlbdnicaMNyz5T7/TOzCRJweYwTBlzBtdoa8OtXex5zSxnMwGb -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC+4957fjQbaET+ -ATwcefOomQdDgWom/hO7NhwfVvhfKjMMYBRNdomj7+8k5y6HdZQltIp8VGKmrokL -zlBm61GR9le//5XMnP3konDAxN+DrJPHfoefT4yVOmzW27Cy84KWpGwBn4oypvI7 -eBDLcvPKQ4QEEe9jB4L6pVr2hl0g7X86ofJJ0dI9kqwqslGfLR/rTjBgW0H7NYe0 -7uds0bhALOu+PKjHgiJ0Nfoo8nV/gKyN4jFKfSq2V0QoAMYeYR+nMYgtosejYbXE -SPSOF1qF+974Eimgk5NZuGjSIN8CoTs/TnyywzpeNfjLh1De1aGC5Fcjikjkuwfl -y5tgmMDjAgMBAAECggEAeHU7FBrTrufhgZgt4ppiU+YdW0/zOJ/ku6KtpGkxWnw6 -snh+11MSEE18T4FDL1/XGOQQ79wgVKaW/Mg4nHmqg708KoCSewgmf3yyQjL6CRGC -P8Vst+9u+0xfGkaP/p0DryQas/uFLemDultN3dSwWta4HAllUsyOrrRh7mdjpT6h -YyRqVN9eo8pLApsQ4Ahow4Ut00bdSi32eOEy1VRhLCMnNa/Z7Fb4evczN3AOH+qv -3CINABoMG0MJcluYN3W9qutSCz7vhG20r8Nxf3zlPg1CC0eJCP2vArlmgPHDNVN4 -F8QbNUCVdnNOfvtd35Y04Ud3jHtpxO0RDBgTlc2goQKBgQDzJ7ek394psAhw9ytp -3XFAm0zkjr/3voF9MmCKxCSS1p8WSfjYZ9pJ2GOdiY648oF42kwS9kaUTIKONeCr -m3pNz1ndZtMbzkmMBXUKEf0Kl78igyEY+4D0gsfY1lBkzVIiypppS3M0CUKudkFL -czYIkCRHYds//oodGC2K4CxDFQKBgQDI+VnjrZZWsxI/rWkfGjITFmAFnp7HGTOB -Md0A4WIOdL5rDXbycW2g+/5ya56h4j4evJALjA3nkhJ77Eza1Tf0CJCyfGyclXO4 -jjjOwm7Q0O4SMxGS4eSKe94bsXs5wxJZVVrxpXzfyRj+szQp2YkJoxGE0zh95Bf4 -lnGfAxpSFwKBgDQd/DfDoBuxVm3YHJ8JTr/5SYbnre/NDnYmORklJ22twNWHL2Y7 -BEe1sMxQcp3jpKqhp5Kc5M3ehFE07R37KkDJQ8q3wmIAWjU/6jEpX+JIWjhsgMiZ -B6/g5DLu5LZmZ8d7Q5N0D5JEtd0tDZu9awR02MHQEK1rwnCwAPr2R6ZtAoGBAKu/ -5KDdLRiEyjie8rJAycBkI8VJrDlMAScMvOjOs+TUwhJsnFVOuDiWgfm4nQWNejb8 -QEGN/CgFPkSnSXrOMpYbZ8UQ7iTW1+FFsaiIovlTQ9FL0V3sLEpo1wRlpYBUg+7S -MflGyrPYgMLR1Oda33Db6dHQTHvRnOa1cv5IQYsbAoGBANbD8okVET/Ct6FjO7xd -LCB/SLYQP6yZ2ASuOSCznxuCIUIpIehklTYolBvcaov36dFwJ4h2tvfOBWwUfkc3 -IDKdsxO8O6r6605EkbqPLRQwXJn4F9WXl7rgsFTRdP2Vx9KvZ/gfsNzaREuy73cQ -RE+ZlgSlISIgYJeBScwFU9UA +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC48tC14OHmC/H/ +vSOiP2bbUcE6fot2K5o7AOD1orqVZJ7RFTW7MYaZA+0szqYCtBn/ZFfCGXpYu9Uk +sLU8LpkCZul86AydbIXic0SVBN8LqNBy1z5NpEy1fKroDUy33PolLCYSAacwidN6 +hIg03INbOEU97P4GQ8N0hgO/6o1TMz99F+Wrs+hGB0AGS77VdfimImkkbetM0lPk +WZ7GUBl/ttwpS3C1x2jIHAhlYgIbPbrIVOvLz9fRMRa5WWwi60HBODNIg//ISKVx +1lTYhlliWEw56KzYmeAUPckQhGFvV2LkIdUdQEqHLwaSJk22eOTX9pq/IW93Duuf +I44pP20rAgMBAAECggEATcGAL8cxEgeZoXxCW/ct5i00AMStG2xvdjkDWyIkMaTR +BHRXV7EVtoUpFLp4nxBbEb19C9SsVFv722YTfA7XM2RS67mffBvhGfh+L+yRXZSG +tpF51yipO3ADZnYY+AAGhtRN2SoqwURgzdSkcxz2eMZqjgNyNO4OYZHqR6hz7DMb +ihA0rKhDhdSNkMPnS2gHtoT3bi/+tzGDHwxevXgbO2cka/9UzdgTyzAxzfdxJpY+ +TS0Qx9HYuA8GXveIa/4ajXdbgGAJUknMQZDHj5yBwZkjCwNSVKhYhtggFR4Kmucr +wMz3P2TjlhbIoiR6VpjfT2qJJ6rHFRYpBw20zkJMUQKBgQDfbYoiqcYtP8LuVmw7 +2ohMgvvW8p8UmFqwXLF+rWVHJ7oVBR3cjEFiwqKH+hsHTsDHM0JpY5B8XPPnUcV1 +kW7eYpoqMZau+Qmjo7LPD7/CxjkN/9rZxwGRi+DT0QgaMQs+h+be4f+ywZhcUTOF +1jXxciAMAvofd817uonrS48+aQKBgQDT6TO97TD4HPbreyQp5LgulJb3jPgXqssI +lbzT/qcLlPsGA3KO8F2CDmTuV3A0Hab3nr2zKx1uHJQ9VQieobQTD4NVO7lyH+39 +/hJXjpf9V2yMjzHtRSLa7hOym/8jTjl/cSgcDXIMzcVIEK37/7XRjB6Fkm1vdVNi +94m9g27EcwKBgBeQdgEx5rTfBLAebUviwH1RUz5YWG7Torz3CcFSiFv41Kdi4sNg +B4VjAcFVn4Qlyi84OMjkt1hmdVLwTOMZc5gAzkGk+T97BIk9up4cCx1/yoSvd2WA +l6nCFvRAF5qrkYQG0VKwg0dXI7qY1dZHwWi1oKujpL2s5P8nrWQjk/gBAoGAeqbD +V+5tJW4+TFVdvRGwuOUv6+AfyDMGiNWCEYGW1dFI6VYdfDYLKTeDm5/oRYHUHgY2 +7fjKc+z+r/EDqC0XuOCFt1N2JmPsr9Hac+sIdY2gOkq3LwmQ/v5FUF+R8LFZ/jVU +bcgqdtaylN9ylCSQ69QZ5l22EYjq1qFHi/UCq5kCgYEAiGUNKMCsYycBX/kkjkMg +pO/hl/pDEHSL2Dz5nFjF22u/zPRT2XjKYoBAPHGFJT4cYLmrJaV7W/mLUBSKwkI5 +74cEdgJwUmGanM+gu3+tTmKeUTqK/5U8j+bKLH5/IOvESdNeLydSTp0v4Sxxfwwz +FfhUTXW/SmdW3z1KRSWrO70= -----END PRIVATE KEY----- diff --git a/jstests/libs/client-all-the-oids.pem.digest.sha1 b/jstests/libs/client-all-the-oids.pem.digest.sha1 index f26c9661cee11..47fd432582441 100644 --- a/jstests/libs/client-all-the-oids.pem.digest.sha1 +++ b/jstests/libs/client-all-the-oids.pem.digest.sha1 @@ -1 +1 @@ -71FD2BBCC95D54BCD6BCA7AEB1E82A0605944A64 \ No newline at end of file +F01FF9A9A4126740BF256680D0E2AD86AE4543CF \ No newline at end of file diff --git a/jstests/libs/client-all-the-oids.pem.digest.sha256 b/jstests/libs/client-all-the-oids.pem.digest.sha256 index daf94063c5e22..2a9facbe494f5 100644 --- a/jstests/libs/client-all-the-oids.pem.digest.sha256 +++ b/jstests/libs/client-all-the-oids.pem.digest.sha256 @@ -1 +1 @@ -D4A432248459C54976437EAD1F2C970FE9448A687492BBE9989C13FAF3510065 \ No newline at end of file +F43F108A87484B8D256BF53482926E1B338E23CC14E7ACC3B5E11978321F4859 \ No newline at end of file diff --git a/jstests/libs/client-custom-oids.pem b/jstests/libs/client-custom-oids.pem index bd32651276be4..c417c95c16bb6 100644 --- a/jstests/libs/client-custom-oids.pem +++ b/jstests/libs/client-custom-oids.pem @@ -1,53 +1,53 @@ -# Autogenerated file, do not edit. -# Generate using jstests/ssl/x509/mkcert.py --config jstests/ssl/x509/certs.yml client-custom-oids.pem +# Hand-generated file, do not regenerate. See jstests/ssl/x509/certs.yml for details. # -# Client certificate using non-standard OIDs. +# Client certificate using non-standard OIDs. DO NOT regenerate without consulting the Server Security team first. -----BEGIN CERTIFICATE----- -MIIDjDCCAnSgAwIBAgIEcLf2wDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDjjCCAnagAwIBAgIEUS1DzjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjCBmzELMAkG -A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD -aXR5MRAwDgYDVQQKDAdNb25nb0RCMRMwEQYDVQQLDApLZXJuZWxVc2VyMQ8wDQYD -VQQDDAZjbGllbnQxEzARBgMqAzgMClJhbmRvVmFsdWUxFDASBgMqAy0MC1ZhbHVl -LFJhbmRvMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAp+L/ayb/DnKq -X5wjq2N9gbw+XhJAOKw2LJiCkHhcYbC+4ARO0DqKU+ptBLncKZ0BpBYVCSFIm//u -gapPHEtXNLztBj81UV2aHOwF/XgotCIGwkJxVPKALO+87xYQ0zPMKBqP890XPidC -d0KY/ItV36JOAzKa8ZmNZR/ChZvDMClT4iHwpEQ6FCMGaXJTqBA+vNiIn+tIc7Y1 -ZHgA3iTww+ruKC0u2pQdla+O/ImL/EWxCDtYcwKC4V64MWJ3RliUPoP88EB9i34g -e1EH1r37QB9GRP4iX27TVZ07+cplHyNoOiVvZ+tVEzfQPDCZfxHYSsCOIEXxQqCJ -Bq2txdnl/wIDAQABMA0GCSqGSIb3DQEBCwUAA4IBAQA0+Wuquxc1ajfPWwTquuL7 -J7pHP25542R7IdBKHTluWbVFVuxaGJ6IiE3uwfYNhWOkixRglkVZKfWXQT5e5hco -DY2QE+pagJ50eTlG5g6BwH3O96q/HgeHegWwgB34IXh4n4m/B9+w+GDvtW1cdRzN -rVphbwko68EwMZ1gSdRxei9zbCafKLbaUC5/obGZDkGipyMjD4abHBXfKH3VMM1o -Kf6EdfXHJlrK8NhMrbgbLhu195bWwLKqOztsbvUsmP1u4lqeEX78miIVK+SgifWX -TVT5DDOOSH9Xr68v1GRRRml15E4252qhQtDdowxD4vihKN1DsgzESvXDEpQwUYtt +IFRlc3QgQ0EwIBcNMjMwNTA5MTgxMjMxWhgPNTE5MjAzMzEwMzU5MTBaMIGbMQsw +CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr +IENpdHkxEDAOBgNVBAoMB01vbmdvREIxEzARBgNVBAsMCktlcm5lbFVzZXIxDzAN +BgNVBAMMBmNsaWVudDETMBEGAyoDOAwKUmFuZG9WYWx1ZTEUMBIGAyoDLQwLVmFs +dWUsUmFuZG8wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKIw6RslLI +yML6K+UkSzJJvcWe2DkJsLrjxTehzJuP1fQL2uxh3NqBfN6/9ToBMZBRiQpxaPH8 +jbYcyddCP8qM6jXIpsxVMwc4Fa/+SVS3StnVD+Kne4i8EBhy11xnlUIfU8Vxvu61 +oiZnA/xLML2dLxi2gKNvVk+HIaqMqhdND6llEr0uJP8pQwToiSO0qHCI6XCQeeuI +6BAiTS/VDQBnjtRjDjn0PGlCz80EgINK+rsN+y7E9Dy5/P0a6Mqqg51ylh1shVpI +BShW1/ZItH6SNUaEDWIO6Zpf0fs5HyraGce3/wWpqpMbE1jj70aeFDAXDntkMQlZ +ruzstg7UqIwHAgMBAAEwDQYJKoZIhvcNAQELBQADggEBALAh4waS16TzAR+faIqw +1UqGPNGM2Zxvf+fbiYkNq39HipTvZOus5fNlKrakzbiK10uB6KxeCNzpg1TbLC7j +kD1D8UOcVc5E3Di39A76pZ8CssVsl6+BB4BZlN9gj/R7mw7oHYICsWii9lfm1KEy +XQp+Wz5C2lZiSm/7J3aJpfP/JVcif6YSx3Yn0XkDfR9Co3+YwdV7p6YEdqsrICO5 +JMZPgYHXVJhPvzx1WC1UCx5MY60vbldPNu75N4En/XnnbJLc2RjQYA6xuCjYVCQe +UtMRO5Pz9UxM0n9/oFa8SIxg7tVwp77SlK0j1kqpP2WIOcSqMAcSYJuNBD8bu7AP +TxE= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCn4v9rJv8Ocqpf -nCOrY32BvD5eEkA4rDYsmIKQeFxhsL7gBE7QOopT6m0EudwpnQGkFhUJIUib/+6B -qk8cS1c0vO0GPzVRXZoc7AX9eCi0IgbCQnFU8oAs77zvFhDTM8woGo/z3Rc+J0J3 -Qpj8i1Xfok4DMprxmY1lH8KFm8MwKVPiIfCkRDoUIwZpclOoED682Iif60hztjVk -eADeJPDD6u4oLS7alB2Vr478iYv8RbEIO1hzAoLhXrgxYndGWJQ+g/zwQH2LfiB7 -UQfWvftAH0ZE/iJfbtNVnTv5ymUfI2g6JW9n61UTN9A8MJl/EdhKwI4gRfFCoIkG -ra3F2eX/AgMBAAECggEATKgVP/PaWdp5eJZuov8We3pcb8+di0L2qX4pf5W1eNWf -EeJlqiZQquhLRvEmWcnayfVbPYP5B2HgoUwGO0EbXHU3NLb/vVsj1zjds1J+I2G1 -/FUiMktXwyzj87b7j7QC7/zQ5eR5jGOYsaOy/v0QBMCzJjqhDNez/Ax4YVEx2ncG -qFXQiVBwVtbmNFa6y4G/3SYbLjTmnkRUvjgUrWO+3Y8cHGbzugGMKhmfQKElMnRS -Tflrg4WhtHHaI2gaIpmVHuIw5MRoIs9rMUV9IVfLrcmG/KjxVha+LlRAJFi0tMCC -kKSb9YLS4E/uFAAWaW5qmDfGEU3gPT1oYwsAob/wAQKBgQDcwrxj3caDgnFWPh2z -vy9e08Pvh71V7kJDVDoqrZCRlduLg/9qOP0sPHSRzWARr8ND8eqZtBv0lDvhAtfW -w/flt0Hon0M1QTIMpAr9WMWrCz1g7RPvpgNwOVPmOi89Jgh4W6zICyRHm75NOSbT -zO9HhEzEiKWu9MUorbTly4DyAQKBgQDCr5iHwKGPxRCbBx4nXhyPTCowI5L1MpS8 -n8mZx2Iz+6vb9JqWL7qMGDJ05py3E7m4HK7E3O+dGc9SVDLW5WDsdm2BqINZRSI+ -VhZRVmdZOiRYM4ogHexKqxDZzGWpRy9WhOywT1bpB56euw4QNsyJQivwqY7LKFZz -rZGlESrX/wKBgQC1ErmphE/+sHC8F4kPVULKmhH9l64SHLKXPS/Ur7aD45JHlkZa -31GyBghrSzSAbVVRls0hs5y2eRvATS+yxdzAvBbAclmTBd3Cho9JDJZd3INEzFDs -4ZOyaiNKFPGIS3jpF/DZyGHCpplqB/W0BkVBuN+q2JZlsflBi3F0hv5GAQKBgFWR -omQAacJsJjlNbpZ1Ce591okkCaKV4cPqI0zg0qvwcQATSaK7Ra3vsLWq6rSZCeb/ -TF2gkfyl7HbOJS5I0Un9X3I+o8Sn2z40zkhfxrr0ECdoN8yX+lRzTcu8x+4YKJlJ -gVKY/KUdwT0ZEDxWKZ4mpHN5wxZOG3lqj3pSylb3AoGAVMf7yegFepBN0+4njyq/ -2XETNhk2MN/oplWy3XIQh8tL+KaFeFZCEmjPS1impLzg725CPwuL7/T+WOvx9zvx -aJUY2no2nqsEF5p/ov9r2/Yy9uJGHS/b7dgPwHcwf4/uPcrq5qlStRQNGI/yqR88 -kebD0hJpUTbpKfsb0j/WI0w= +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDKIw6RslLIyML6 +K+UkSzJJvcWe2DkJsLrjxTehzJuP1fQL2uxh3NqBfN6/9ToBMZBRiQpxaPH8jbYc +yddCP8qM6jXIpsxVMwc4Fa/+SVS3StnVD+Kne4i8EBhy11xnlUIfU8Vxvu61oiZn +A/xLML2dLxi2gKNvVk+HIaqMqhdND6llEr0uJP8pQwToiSO0qHCI6XCQeeuI6BAi +TS/VDQBnjtRjDjn0PGlCz80EgINK+rsN+y7E9Dy5/P0a6Mqqg51ylh1shVpIBShW +1/ZItH6SNUaEDWIO6Zpf0fs5HyraGce3/wWpqpMbE1jj70aeFDAXDntkMQlZruzs +tg7UqIwHAgMBAAECggEBAKVtaWf9u9iMzV02fVJlpCNLhydHp+5xT5JG/g+RxyJB +JHhrU5eHyt+8VQReank9mfHQqNZF3/0j8Q3AdkXGtTr8FsY/E+0KVPcmcKE4XHeh +b4L6fDE7XZ0jww6BiCNRuQqwYV+EthG6QZl/XoQewJ+aQqxhvN/KkE10VQLH1Uf/ +e3kBs5JnRzIHBpvw2eWqz0AEaG/dGmlk0adS50OCd/9Sd5fj3GxtBNtIwcxBXCul +CJOlM8AiZ7cCAnDhtmlD13w5rCrv3m1dGQ9uddTMrEqzwLzvI7pHjpihuvpxe4AV +c6gGtJi9rrIYGq89UFkViZMzrXF3ZOB/Q+QVMr3fJcECgYEA74vZ8B4MuYNZxwIb +sQE1Pc28fMR7+ahxean1SoMgUnPVvPUVM4Rb5zmb+HM3qwuD8pjePeCFjI8JRMuY +BLwbS768AhN2PEDmHeZqGW2+ejy5miKWJQkw0P2kwNTFiXWGQMJudwCVuLLwPRZz +/+BF9+Y5LB+61s01x2yt57eC1ucCgYEA2AVnnKPn54SrvRYi4o3HMGhT04Z4ed3o +mHIH4NwtAyuUSYyW4sznzLycp5YwwNr7VUIAAVHDFJDgRJClzExvN6MJMz5Pm74p +1JyLJNJIAdOIeiuUQ72hCDIATnO8b8eSx3CyZk/pMAkX4jDdcdSHxYe4Q0lYY28w +r4igqlZyneECgYEA5dS5mm9KaCeFWLJGHVL6YTzm7pKaHFQn48JUjVQ4C8QFu5Et +8Uq53qTgZ8UsERkwVO+ks5uaptyJ2Q654TkVu1vaCOfhVjgyUVfETneD/MYMkb1D +b29c/feOPlEm4hb3Y2TpFZZjEF3mr8W+MC9fElU6X4JmAjfLtHYqeJsSltsCgYBu +1x8Z3XQRdB6Wr/QIYQzyhqV0ZIwOo9FD30axlue1t7enoW6OQusxPxn2V3b+jZ3m +Wi4cfzJkrA2WwM2BrTpnxszisTcxx3o5MHWo2AjAfySI4zF5LKSiyt0jY/ktNa7X +jLjNDHWvAwtxMPd+/7kGnqPqSokCxDur5aPioua+oQKBgHZpMxgTfBig2ZLQlb3F +5d9WDstdnzntJ+xh0Ibvpk/fvu2xdAtqxwdxBf/pfS62urJt9QrmkXrf6lS0OWHC +g3vzYidCoffJfPSXq+QE6E4cypyXVCZ3n8ZDVue50cZ6aqwmQ3VZqgG0Tj4z9lrb +bwPuHxWcD061D1qE/+PNx00Y -----END PRIVATE KEY----- diff --git a/jstests/libs/client-custom-oids.pem.digest.sha1 b/jstests/libs/client-custom-oids.pem.digest.sha1 index fb28c8fb8239a..d57daf29cb2a7 100644 --- a/jstests/libs/client-custom-oids.pem.digest.sha1 +++ b/jstests/libs/client-custom-oids.pem.digest.sha1 @@ -1 +1 @@ -FF12C0191181574BBD7C455E746D32A427D746C0 \ No newline at end of file +C13A7E8D8AE02F2D797C2764C4046D30CB7417E4 \ No newline at end of file diff --git a/jstests/libs/client-custom-oids.pem.digest.sha256 b/jstests/libs/client-custom-oids.pem.digest.sha256 index 6dc0e47eb01a5..f18e2d865c2c8 100644 --- a/jstests/libs/client-custom-oids.pem.digest.sha256 +++ b/jstests/libs/client-custom-oids.pem.digest.sha256 @@ -1 +1 @@ -95A5EF3F2960CACE68851F62DD1B991758A5858288A688278B72DFD7C1DD0DC9 \ No newline at end of file +722F166C90CF8633B2896D63272E4C908F6BAF2A1A57844D3B949805E63C12FA \ No newline at end of file diff --git a/jstests/libs/client-multivalue-rdn.pem b/jstests/libs/client-multivalue-rdn.pem index a97355e002f6d..cb28fc2f5e155 100644 --- a/jstests/libs/client-multivalue-rdn.pem +++ b/jstests/libs/client-multivalue-rdn.pem @@ -4,49 +4,50 @@ # Client certificate containing multivalue RDNs -----BEGIN CERTIFICATE----- -MIIDUzCCAjsCBES9zCwwDQYJKoZIhvcNAQELBQAwdDELMAkGA1UEBhMCVVMxETAP +MIIDUzCCAjsCBGucNjowDQYJKoZIhvcNAQELBQAwdDELMAkGA1UEBhMCVVMxETAP BgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQK DAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0 -IENBMB4XDTIyMDIwMzIxNTk0M1oXDTI0MDUwNzIxNTk0M1owaDEyMA0GA1UEAwwG +IENBMB4XDTIzMDYxNjE0MjgzN1oXDTI1MDkxNzE0MjgzN1owaDEyMA0GA1UEAwwG Y2xpZW50MA4GA1UECgwHTW9uZ29EQjARBgNVBAsMCktlcm5lbFVzZXIxMjAJBgNV BAYTAlVTMA8GA1UECAwITmV3IFlvcmswFAYDVQQHDA1OZXcgWW9yayBDaXR5MIIB -IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6Osw6+wVACn7qj/48i7+V+zm -h0c5IxH4ba8a640fHOnScUlpWz4UyNiBIyF36ITTH9KJoXdcQWH51S5iHUrgZUbq -GaSUuS+LO5PRDu+55W67L6+5Y1Qq3swCLeYi5rxOoMiHALY8ert6agAeoTSPv/AD -3Us9P9n/H83qneFBHdb8yjMeZj8hiCIt8mC62w79pJCLQyDHiKKeSFtEqBCAhD3h -yGkb45pjIAKUevbEDZvCIhpzT5FuwUgbhWVBuoEoX5apwA+49u0Ots+jbxcJSTkn -y1gFJmu9PF6lvqwNq30jATvarFxHYB2BZjLRbSKH4TE0ITBiIbVL9H7tKqhP5QID -AQABMA0GCSqGSIb3DQEBCwUAA4IBAQCPxLvlCIZLq7uW8ok74AC2MG+2TmmIyZTQ -m/FRA7xRfDtueMcN6Zq49DrbuTtOoLC6tNt4X3o+wU+RTGoiRwR4LBltOF6dUz24 -4KbludShxMo2AsWELfRJCKGnZOi3WP8sA+nenSNPoWRzZkw4Tn4QWSFyzGOGIS77 -eTCkJX4BnQwMoknINUkxWiE4/AITe06hafA+YBW1keJUd7ouOjdCP89EVriR+p28 -OyoQyqvFwW6+gL6/iL2V+o9seP6b4vn7Rn25sDRoHeJVzUP/SyZVka5wTh80Pk3x -OOZYWg8+enNza9SVAK2ReNNjaSlt+nByoZNjLOwnJQ6O7sDviINI +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4RDvOBSVYF9NjSpy1uOXXrUs +ggmy1MLmdNZRXl7zP/npT4Fa1PP1gfA/O2PJWeat3btD5MS9n6VnX7Z8qGuqENW3 +doMLuSWqb6FbHQk7o4qWmK6BfHjbHSrpshosO3Fz4oLw+wRQyJZqFueNLRTU9cMS +bOi45bzr3L1+0fhUI8Kifb1bM8TTeljgRXk7sukRTQsVHIheUxdfOP4d4kxKzTeU +nssb74RzO3ME5Jmk6+hLw0aaMrK+3Vv4wXg++6Xs9XLfOSlarFq57SkMvpyDFfEE +g8haNM8Mw5Nr8QZI4YDTfIXek7Hpvdym95mLh8Bj/8LgyotXaHxQTbLTuyIvOQID +AQABMA0GCSqGSIb3DQEBCwUAA4IBAQBNQ+33kFzFgHT87EHh9mynDI8Cmrki37vj +3WdrI+SnvnaVCk5AUX9TCHCGxCYUoXeLKqVTgZVYizWV9TrBO4qRUEO9ckBFgk3K +o7jIP435bsRMP5UiWMELjYaL01vhfIq4srVicNE9AiLJXzKKQQTpP8zD+KGxcOAY +3IHDZzd6muYoQ+bok7qQlc5VVu3rTSJyDDEOj96iuTTbuhNKPKjWi4BHSnW24t6W +dsA+S82CMzLYgylSz67Ik1J+rysAU0InHTudjuU5j8xqBLTLF14CnKyPivhDv1+i +OS3QkrUVQ2cVyiwdFmf1Zhw9KNk0BdGzdstElo9zZXARP2yu/2X1 -----END CERTIFICATE----- ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA6Osw6+wVACn7qj/48i7+V+zmh0c5IxH4ba8a640fHOnScUlp -Wz4UyNiBIyF36ITTH9KJoXdcQWH51S5iHUrgZUbqGaSUuS+LO5PRDu+55W67L6+5 -Y1Qq3swCLeYi5rxOoMiHALY8ert6agAeoTSPv/AD3Us9P9n/H83qneFBHdb8yjMe -Zj8hiCIt8mC62w79pJCLQyDHiKKeSFtEqBCAhD3hyGkb45pjIAKUevbEDZvCIhpz -T5FuwUgbhWVBuoEoX5apwA+49u0Ots+jbxcJSTkny1gFJmu9PF6lvqwNq30jATva -rFxHYB2BZjLRbSKH4TE0ITBiIbVL9H7tKqhP5QIDAQABAoIBAHUsA/NbU4E+nY0b -G5hyDZ+b3KjHKrY6zxgWk9tVpgY8lpJJFQhCpeAYEnbi2liNoUwL9RLWYgG+URlu -eQs1ZMduMkxuICc9BLUW+n+iF4mU9/PYWdHfQKXOSXZfpMUgjAmUd13jT2+Kqt1y -a/Y64+nxy2/i7tAVUaPlShbcf46LZ8Pjz4z25W+iSkPN0ACwbLDNZu/BPDzXGVgd -yVBSZTDN2w4dOeeQm7HAGIdcuaRuAhPU3eonsWVHfobuD9p3XK6uUWoVvLoeei6r -OH7bVPFSmcSOZlxLL1yXfAffPGswRlQWKqL5mHmBgkjm6FEyOzp08StqreG6s3+E -P05pZw0CgYEA/jjO4GGgHph3lwGqmIkTAH7cFv4p7wDJjN8KQuH1riUW9YpEjHVK -7QldA3paPXjsKCsJrTwB9nYnqOUqzauXvMW5ib8eLhn5oaj6qWeNu7HlxAlwiH9E -AyEUrHho3jNASeRLe2/Ui9KIDSwqBk7EiyE0Bo4Xfr/FSr732hBj9BMCgYEA6ow9 -Lnl6nUo4751G/zoWqIIpFaU9UeEkLRloOnLkE1DFaaVICa7BPOe5quEPj+2yhlws -HaYATAS1P1nlt43dodClGcnlrd5E8nAV4cd2Q+NBEldwVeyVnD6gybQ/7/0S3gCJ -+pRPbRKelFDSn75FyeZ2a+51uQGqBKeuU9FTeycCgYEAzRP+rzuR85yDPKn3eL17 -OrcbzOCT613zOVTj7jhh/G2nK8Syr6wfGUCobBnTmitmNrEhSUJTQwLl03WrJeb6 -rSnEFr66Fe6WVjb+npIO4A8OjyoeQb6Imw2go9Eup7Eod3NXAOihyXm4jwfFjvkT -zDiow3D0uybwfO+4+YynVUkCgYEAjYIW15dLNuK0/zwwvYPab8g36WtMV74yCVOu -4rS2jkDJGjgyAkWBKLHV/xbSQM/0ScQKicjBnRuqDpK6Wcgp05sCQVDiVcgoaOzf -Bt1EqSjO9bXzfKPEkAPpFki92pkhbPd+R8R7Nx9otasdqGsvx+RXxj6UoA40+aIB -N2ivqTkCgYAzBFlob7cN6Dn0fSYkOGx5kVDE/A2X/qHMs9b6Z0qCc2hWrcEuVQra -3SMcrF/STCrdlkO84GuolzZ0j6h8it0hRUJXIw4PblMisozaD9lmfwFdzvbyU8+q -4kjthONkJjFsTyz3WOcRTXTAbj87U57WS3E3AApRw2lGtzdrkDPERQ== ------END RSA PRIVATE KEY----- +-----BEGIN PRIVATE KEY----- +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDhEO84FJVgX02N +KnLW45detSyCCbLUwuZ01lFeXvM/+elPgVrU8/WB8D87Y8lZ5q3du0PkxL2fpWdf +tnyoa6oQ1bd2gwu5JapvoVsdCTujipaYroF8eNsdKumyGiw7cXPigvD7BFDIlmoW +540tFNT1wxJs6LjlvOvcvX7R+FQjwqJ9vVszxNN6WOBFeTuy6RFNCxUciF5TF184 +/h3iTErNN5SeyxvvhHM7cwTkmaTr6EvDRpoysr7dW/jBeD77pez1ct85KVqsWrnt +KQy+nIMV8QSDyFo0zwzDk2vxBkjhgNN8hd6Tsem93Kb3mYuHwGP/wuDKi1dofFBN +stO7Ii85AgMBAAECggEAS9edjTvedHnZE9N0nHZYSXlq9K0Y/rpo14FrLQ55ERuB +pCGWfsFw/2b6n+RJ80oZV+llgR9NORGPasD/c+/IAOFL8BtL5YnMS87HedrlJoCq +q+vORagiSktuMe7A7g0WvINcenIjWNzbBjnkKIdnFiQ0vs+TkxNdNecDZ/UzQVMR +EQCVrLFxwGeeQs8mgPErl4BEexNH77Pi07i511/swFP06kmOzxSI7uknY83vaNsr +s4XrScpMxszOCcJf2857nUPYZSldnUJbvTjzDxKvn1sHpcvnlSdP5AsYa8i9UmM4 +d0FzUmywOCRmwhFzhPdNETDQVHNJj3nhD7rpw70bowKBgQDvcTkhniVYyQYLqFEi +LRRVDAUy+hQF2uToYpId4+wSD1tdXZr25uzBi1wAb3wkSIxig5MItYAQnMQxS8H4 +Z9hmtsTNe5RLUtkt+t5rKDa8MpEt/0u7sKfzteTM0SPlAz8fQV1xIfrnYL0R3jy+ +thidEUfn9f1EaNNvKh8TMbsm8wKBgQDwoTcCd8RNJ+dfSzEXYgz++AZmPilnebBR +BJjsZX/lF4y3V3ZMIB083DCnszb6l63nxMTBYlU2RRJ7C7AxxJVIHZF+WahW/Fq1 +TiGpkXZfyLkT5H1kNuH7IvcDVmAu/BfzsN5Ej6yC2wDpXEqWmV4TZbE1wQKApgy2 +t6VUyHO0IwKBgC7NSMeDa/Vgxae9rK4rdY/yG4oNS6ChWqD5s2nYRSp6ifdD9qhQ +FvL11HPZVsCY1afj4/eN0oxsuASStEVjtu7MxscLYr8eYIkWQidb4ucCU1JUVm9H +ZmzCnwhR0NxQuCPZ2PPmGm7jf9FgSStV2JXK7O0wLeMTQlC2QQC1dcl1AoGBAJOc +og0+gUo/f1zi2HOar5Q9fFd+LJIQgUvCATmLLkDQCH90BNrAHI94F9TYSJLDN2hl +ObnT46gOCT84NVbiXB1IHjefMnhiCBcOnfHSjQZHMNn2IcG3NTuFAHiopQpNlTfl +uQPgCxie1llRR2RJIv/NMz9hbnKS6luwHpj6+pd3AoGANwlsfpj8zn4mwauqsSK9 +13cauws+HcI5g/vUda0Y/LreJsN5ELrWL4UQCoMthQSI05hYpBAxkLcvZsMJ1hs5 +Y8e0QukC7n6to4KNMYY2yiRjBitdoHjFKurWgrrUFbtrq6IFiZ18pPVX/1JH+oBa +1IsF59mIpZ+JJCi9W/8cGJY= +-----END PRIVATE KEY----- diff --git a/jstests/libs/client-multivalue-rdn.pem.digest.sha1 b/jstests/libs/client-multivalue-rdn.pem.digest.sha1 index 6e49497ab0277..5a051aa7d53e6 100644 --- a/jstests/libs/client-multivalue-rdn.pem.digest.sha1 +++ b/jstests/libs/client-multivalue-rdn.pem.digest.sha1 @@ -1 +1 @@ -C544D25C0899ADC557BE6D274D47F49BD1686886 \ No newline at end of file +2B638F696EF373BCFF1E9F911661F251DD460511 \ No newline at end of file diff --git a/jstests/libs/client-multivalue-rdn.pem.digest.sha256 b/jstests/libs/client-multivalue-rdn.pem.digest.sha256 index 30404cb1f297b..2ff001b6501c4 100644 --- a/jstests/libs/client-multivalue-rdn.pem.digest.sha256 +++ b/jstests/libs/client-multivalue-rdn.pem.digest.sha256 @@ -1 +1 @@ -85548F4F4C885AEDC9B4C12C10EA3C80F1F2B3F77BD8DED940868C6B3F5573F9 \ No newline at end of file +BC73453E12FADF69B3D40D3B19804A534945247D54F9B38C5BA8653D6549A633 \ No newline at end of file diff --git a/jstests/libs/client-self-signed.pem b/jstests/libs/client-self-signed.pem index 6ce670e4d2e26..2f64f2d98cc39 100644 --- a/jstests/libs/client-self-signed.pem +++ b/jstests/libs/client-self-signed.pem @@ -3,54 +3,54 @@ # # A basic self-signed certificate. -----BEGIN CERTIFICATE----- -MIID8DCCAtigAwIBAgIEJzH3EjANBgkqhkiG9w0BAQsFADBwMQswCQYDVQQGEwJV +MIID8DCCAtigAwIBAgIEV2NvBzANBgkqhkiG9w0BAQsFADBwMQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxEzARBgNVBAsMCktlcm5lbFVzZXIxDzANBgNVBAMMBmNs -aWVudDAeFw0yMjAxMjcyMTU5NDZaFw0yNDA0MzAyMTU5NDZaMHAxCzAJBgNVBAYT +aWVudDAeFw0yMzA2MDkxNDI4NDRaFw0yNTA5MTAxNDI4NDRaMHAxCzAJBgNVBAYT AlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEQ MA4GA1UECgwHTW9uZ29EQjETMBEGA1UECwwKS2VybmVsVXNlcjEPMA0GA1UEAwwG -Y2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1n/0VkN2yib+ -jGaSkipaEn2jJ4KhmPXixLVzLj+YxXUN18mZNcA1IN1Lguci/sm2UQnj3Ulhfeff -IGgU7C2aKXb51OVd+CHSfYwL9Y+SYtojmDFQg9GyZHWOC9hoNhbonziMGRReqOEg -sI6onb88QwVo0nI3ADeIaiZXPVoxRBdmNg75sKjR3F5sFplfcx/ANGYKm/bjLZgj -181m8hdOZLdXlE168o/yUlTh4yUc7TodTf6ERjjiQk7yn52XOQQW7aI4qUGpiv03 -Qqa8P7PkMzWx1iT0Hg1TfyexltEKcwfFP6OBPr227Jhl9ZUaAplN35I1mEudRInx -U9qlWKxQaQIDAQABo4GRMIGOMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMBMGA1Ud -JQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBRDkjlMAfHAtoiaTGvdpBNKnP9csjAS +Y2xpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx+wr8anR1E4r +QM5uHmtO4aEBJYc1hX2H/jtN7DNKxi6CmTNTN8IkT/awyTXxkEIimD58eLM5Fa21 +/t0esd4x+/h+AdZysnboDyiKGWWftrNEaBgVbwIqN2GI79yerRzdEfqKpxI5LiDM +67Cx4cOqvU36bY/C/ruCzGeskiBGRpVrWfAaMobuV1oO9oOxdE6OE8MvPWq9KN39 +fVA9xRCuKnpTPaC+jFejEJNaI44jgYlmUihReZJYNmnp+AiJOM0FOF2+wVUOwfF6 +JaFr4R5md3OZNS2MRrepGBRBOUQ9F+RGrCwoTONRUWgXEj8JcAI6QSpSsyv4PeW8 +xLokkHZNxQIDAQABo4GRMIGOMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMBMGA1Ud +JQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBTCeps5ZImYVRzQlmQH5xBTaENN+zAS BgNVHSMECzAJoQSkAjAAggEAMCwGCWCGSAGG+EIBDQQfFh1PcGVuU1NMIEdlbmVy -YXRlZCBDZXJ0aWZpY2F0ZTANBgkqhkiG9w0BAQsFAAOCAQEAQHaO5NAKaGkUgQYn -wuQVfh5qGf41MRosL3gRZPDHaVjrAV4Rp5jytJTiB14H3lLS1pvEeN/06cSoXgxw -/P9JjrkYXnNt5h+vZBOomfhZAsGSVQT3eRNcgpNqbVZLaN3T9+wl9Uel6Njg+iwu -SW04b2L4x84+tJEX02+0u3yXq1vo42dQX+YY/VuEufmLZTLe+/AzB3gFKPzCs6ua -Lj9u2UVH7nLzn1IcqbUrZyD4MjoHj4brWkR5mWoIpjXSihS/g8EK2ttIJ9gHKi5J -7pA6ebfUjE8HLwKjY1DTrjt5oye6yqG+zzuNLj7qXLpM2iImG05K6WjcwTUy8Zv/ -7TyUtA== +YXRlZCBDZXJ0aWZpY2F0ZTANBgkqhkiG9w0BAQsFAAOCAQEAPXZo/1KXHHmRGVyb +KN4zLu6RRE9MrdK2K2EPEObYr32/3WTM9l8y/ioP3bhovmpK74/7Sa3WCAnfDKQ4 +est9SzEGbyWogYgtU90g7o4v/TA/knvzIXPw1iGeOYjn+v+W5pLwjgF1ay448xB+ +14sDSqS0LuHg0ZgJJ5dxSgm5LMe9HUAZ6nckI4epMV0+cSORSMpcvRvQp5Ql2Qbr +tQKOVd/t4WXdN+s//tKlcS7I91e8+rTNEavOqp1Gju5RwQEPRhU1+mgavprbgaBM +OUa0qbUoHPi/cm99O8FuhjHu7cHkp4CuxCClIHsJitotd4Nvf6D7xCwq5toIGeW+ +p5ob+A== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDWf/RWQ3bKJv6M -ZpKSKloSfaMngqGY9eLEtXMuP5jFdQ3XyZk1wDUg3UuC5yL+ybZRCePdSWF9598g -aBTsLZopdvnU5V34IdJ9jAv1j5Ji2iOYMVCD0bJkdY4L2Gg2FuifOIwZFF6o4SCw -jqidvzxDBWjScjcAN4hqJlc9WjFEF2Y2DvmwqNHcXmwWmV9zH8A0Zgqb9uMtmCPX -zWbyF05kt1eUTXryj/JSVOHjJRztOh1N/oRGOOJCTvKfnZc5BBbtojipQamK/TdC -prw/s+QzNbHWJPQeDVN/J7GW0QpzB8U/o4E+vbbsmGX1lRoCmU3fkjWYS51EifFT -2qVYrFBpAgMBAAECggEAV9fI4Xb5c+oqPqdXCSLtBjuLkIev1CuGddZ1WBBVaS/5 -vlBiPR/84fuei/pKW5uR3Xg2aA5ALcbCmsvlHZ/DTY3a3HQeWprHUAEFeMgWRANf -plkzxvgenYOC65jxfI0/MM2Amli8N18S+xtBNHD3pd7WQbDik2UqqYYQHiEMofxP -PvE51/kuPY3F1nzQot87N+EINSRrkvzT2S+4qC8+QJ8Kocn8i+ETuESoWiyXAAFT -4e5AsF7QiXcTaM9T/SjysRrzLkyXuGimNxhlygc1goaRVSwSdgwM81OckMYmBjcd -FhZGCDOn5LKSl5DAi//zwaKqZ54v7dgPbHx8YakkDQKBgQDsyy7BGwZgCNrsEaUH -JLVF8bG1jQPmXfmBGJrERfkxUwMcPzhBqXsKaJDIBv2yh/82XW4fytjfajxhT4ej -XZshOXmOKWd63sh4MFYWmbVpoWW5pll4PGcE0kpyGugzGM6Y15pZkf+x+GZvECKQ -KPn23TJBS2Kaf9wjUMgvYPbgvwKBgQDn5dvkhWP3YoWrzEV1PTgzNVHZDt3rhgvm -MUVojlb/A9xu+/NZkIcOJi2qZ+aodLK7y5Ugjae//x3hnguWFyFY4hIE67HzB4Dx -PukZiRBxpOq+uHdUmfHmGkX9XrchFJn+lQs5QkwSvjQge3HBWLxAwp3M9ic2+x+8 -5Rzs935w1wKBgCdchwMWiPLBxhJjxHjxgyiDSrURIcrTaDwraN6jew7V67hwUduo -XomWZCq77sQUkznoQfwK7g4FTNAoNjXTw4u8UBZvj4H/Mne4ITdUibFrYMuBeXHh -KIbGphVdn6eOwhjqDgBBoq1kyzI/Dl/ET/jXQBlWfKeOBXvhW8V0atVlAoGAZnyr -5h8dwyODuB2bROk9gUQ7XBa59XFUqPN1nXPq9uGZ9mLbdeXuCk7NN9abli+dHmon -CjAQx0XUyvWyYS7vyfx/wjT4fFQApJ4NHv/4iIE+TfPwqS8wPWW3MPc/MBuOw3jT -cQbf4Bi3qPNlnvG8oVJhs0fGpQHvUOhhFEl9VBkCgYEA2Ush5K/bG4b2DDnzDFxT -whAzUYrJO7/6u1N9cTvXmwqVkBFtnjA//sPJesPiYiz0nYFHtxco/H26hdY6vP8A -xrlZiUZu5rxF1BgMdjl13NQUWdyTB3VHxKsNFytUijRbpZsj1vBZgh2IiQNVuzf3 -4m6vQeGHRETrgkJuX2sCWSU= +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDH7CvxqdHUTitA +zm4ea07hoQElhzWFfYf+O03sM0rGLoKZM1M3wiRP9rDJNfGQQiKYPnx4szkVrbX+ +3R6x3jH7+H4B1nKydugPKIoZZZ+2s0RoGBVvAio3YYjv3J6tHN0R+oqnEjkuIMzr +sLHhw6q9Tfptj8L+u4LMZ6ySIEZGlWtZ8Boyhu5XWg72g7F0To4Twy89ar0o3f19 +UD3FEK4qelM9oL6MV6MQk1ojjiOBiWZSKFF5klg2aen4CIk4zQU4Xb7BVQ7B8Xol +oWvhHmZ3c5k1LYxGt6kYFEE5RD0X5EasLChM41FRaBcSPwlwAjpBKlKzK/g95bzE +uiSQdk3FAgMBAAECggEBAJSDELHY4RLBbsgZkRvvww1BOUTTiCK9+cQV8fDAuY83 +BbUgt3T4N1lffDw8YlIzjtdhs71GBeXgwQ2u3RpsYN043wImd9WzFvgLuWrcXySt +Tue03Fc2WH4DamIklYsrod21LEKn+uoVW6TKAZogbDlsL9grr0LjsGp/qWZzq0DK +dCg9FmtasYsEpTaIDbgtJZO4NjpjoayqGbBACKdAXTMNTckttdFvq2mi7MFuY7oa +RP/DdOaD70YGm2/89wbrV3B6Lnhv3lrj8P6CJiXmRvqK12DYtdCWgdS6RWoV00b4 +qoHUIg/BawoWMUo4b01x8P6xqWvKT4OJO93akFsOtIECgYEA/j/YZVF3dtpSzZCp +8EO6/g/ksy/99L61+sxylW0q8BAKBar1R1laMZlJOtdoQlczsTlAFtGzVaGmShHj +sqh7N3646vIYl1qzSJuMmBuiGljvb5zMbVEcGFARO0aUDgAYP2I+ofIkoSPRkGt6 +E1x7K7hz8SeYB0msKkC4welDKqkCgYEAyUyRE9fyb4dYJkTSOSoKejLeqeRbiPRp +TrcKaY/SCB+kUJFTh4aXPIne9uk7PlzY1euJBwzJNbl6m/kdNUdXbZ+jRsPzK7BX ++ze9QslafeFiDpGVxmwRNv4sPOLKcqCvGXhAQCUXRFzH6omxHliC4RkdfLaHqUhl +vvxIqBj5t70CgYBWlQtuRXX3ZZX5JyCYD5ioWGU7mEZViHSUefczZ7/NjMi88WEh +8Q3EAj1r6ls47FVQLvziSHtX1/q3EqyF9NYxKdhzOgqh0GNpgH86dX6YllzDl5QO +TibRKMMVeo2Ezwdy3lQR9lH/BiGhmtgxq7bORrxFDYS6Rp54rR29+1/CMQKBgFXp +8uV4PkWxi0LSDrNNKSmceoIiL5sVTbjF0JDbTDYhYxzr2a23GOdCpMHXK2zjbbxn +iZVTtLDUV+sn+Hpb14m3H5W9XhTgb7yNvp45mACv6Az1v+nvB63j73eRB/zCbdk+ +BJYb/oEz5DNKzyh3eGygLoCi2uW6O4q23D+6YSI1AoGBANGK6K4YhIf+uZQwtAJ7 +/vwgTiaBeidR/cjzI04JZiRAb+CW730WwX2WwkbwSwmbsDePN6nrppGr5Djek3L3 +mFoP7EdBgubg8LLmw7UPI76M3yn177OgkRBYSPzWITJdwXkIOy/LNgMUISY895PQ +mciJVC+m5TcFzm+WIycVahx1 -----END PRIVATE KEY----- diff --git a/jstests/libs/client-self-signed.pem.digest.sha1 b/jstests/libs/client-self-signed.pem.digest.sha1 index e4911a6c71052..e47e271418edf 100644 --- a/jstests/libs/client-self-signed.pem.digest.sha1 +++ b/jstests/libs/client-self-signed.pem.digest.sha1 @@ -1 +1 @@ -F65F9DD67A60BFB5AA7CE23A0E39869EBD23694D \ No newline at end of file +784240744D02ECF6DE86268DC036B34F1D5FE454 \ No newline at end of file diff --git a/jstests/libs/client-self-signed.pem.digest.sha256 b/jstests/libs/client-self-signed.pem.digest.sha256 index d979ba81508d7..68c51501b6e3a 100644 --- a/jstests/libs/client-self-signed.pem.digest.sha256 +++ b/jstests/libs/client-self-signed.pem.digest.sha256 @@ -1 +1 @@ -902810474F735BF234C986943328A7248C3EB2E83EF7D5528F4A051306129CF1 \ No newline at end of file +7070C6F79F11C2EB31E67AD0F3B11DA5257019CC6C60BA816416B478A8A10B9D \ No newline at end of file diff --git a/jstests/libs/client.pem b/jstests/libs/client.pem index 251cef82ff5a2..462eaa2ae72a0 100644 --- a/jstests/libs/client.pem +++ b/jstests/libs/client.pem @@ -3,52 +3,52 @@ # # General purpose client certificate. -----BEGIN CERTIFICATE----- -MIIDsDCCApigAwIBAgIEfxgIOTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDsDCCApigAwIBAgIEA6zcQDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjBwMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM3WhcNMjUwOTEwMTQyODM3WjBwMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxEzARBgNVBAsMCktlcm5lbFVzZXIxDzANBgNV -BAMMBmNsaWVudDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMvcl8IW -V298VV6n1Vwxx+vrW5UBgEsTucc2zT9so4QCxAWvBWDJEBgRoGXjEx0AFuwgaaqG -ahfBFcTbYTfyg/qwUbdQk3cBbuJu+kzvCzxUVz0GkIJ+oy0PO1z3lY9C3XRq5D7c -7C+7g18J91j5xBf0wKf6sjv0Cp+KG9UjTwHc3xu12tJPRU12wo4CqF5E4nyqTvz6 -Fs3L9Jq6n5nwScAjETE7jwV4W6WTdeUlrrxgHHR0yciyfELGYWSnLvvD/uLIE2PE -I2fwlwpKeZx+dFghx/xTvw+d6pdTD7dks/oSzAiaT53SVJkuvJpyQ45Racs1yxm2 -BKFBNPN2GQVSzHkCAwEAAaNOMEwwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYD -VR0lBAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFFB/REXzQ1TD3BT1LfdGSv1d+67w -MA0GCSqGSIb3DQEBCwUAA4IBAQBTP+81kpm+2/Po0QEn6ELI7fOFbCDktptVf9fY -gQbZgClAaNOzC6is8EIq7VPUwPimTTMAgzMs8AKpnnn1XlAxHnh4usXqAQWz2s3U -+WnIRplFj8bbev0GeDQWK3eQqqvlFVIkFSItgPmTeukmPZe1548s6Dh83mNpy6J+ -1YP3yBjBQSnQTVy/9HdojaaLHRIHf0I+EDDTfJ/zbHZOGlwvthcxlb3Kz/1Ykhyo -0FOAIaTC7V8+3fKV5lut4NV2JQbZ+r4ZcVZb0cJKE9SAnnhnWxowDlzOVaGya8AI -dMGWAhUI9p4fAOr1HKR+06unGzN9v+OF2EsFtU/iWYr6sMcI +BAMMBmNsaWVudDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOol5/l7 +63ZuThH0wFA4DwtvKjkHtoXU/YeUcJ8uzwRba6/KJxyK3gsfvSSkJ4QjoOLSd86q +KGpDzsmIHU7gWTrBpjdK1mbDamiNDPKVJLFpiRYrK4ypdrcKYrW0IZYCKkI94i6m +U5I3Evu73jTJ1Z5cusgb2PI8TLA+L5QHB2gWAEdE1qF4TYTt62dtHsJ7irIerxUO ++ZCgCMWRYvNt4T52B3DevltCGM2WkCl/guxL2BTCgDB4+VG88Jp7E1Gr+uSkhezH +dSWemuiwB0QjAuhtbhdyW5AnoDD5auaXPVuTOQyPOCtxHL3EkNA7O9Uexg8Uuy1Y +f11OEVR3i79wRSsCAwEAAaNOMEwwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYD +VR0lBAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFP48R6f3r85r+VmIL0FKOorNgEXV +MA0GCSqGSIb3DQEBCwUAA4IBAQCRvacDUQ6iweRiIDsaSkU2DlSPceILQ7B+5vn9 +lN2k2G61u08XbZIgETbwpsE5CvP79RjU147ooUYnHT7ifxrQT2ebk6xtffBKVKgw +gapXxgVEk9RaLiYHgymwG5hel44euj2hhgMYki4HWOeDS+W8SH20B4BCIpAf2a1Z +jPHzZsxtrwR4+/nqvvQnfKC5lAdsgmnpR4tOOnCAbyzA9eR4nSaxxzHTOq+vhxNj +HUZ871yL1BMMB9P+bDuqE3Hg6Vo+oohdFCr6vbzdGPMm95HfVpxD0c6UFZo1jpPR +Vtr1odloEyoKBfC2Eg/ecijeqcqcLJSXqfaPL2jmkpx5gaGM -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDL3JfCFldvfFVe -p9VcMcfr61uVAYBLE7nHNs0/bKOEAsQFrwVgyRAYEaBl4xMdABbsIGmqhmoXwRXE -22E38oP6sFG3UJN3AW7ibvpM7ws8VFc9BpCCfqMtDztc95WPQt10auQ+3Owvu4Nf -CfdY+cQX9MCn+rI79AqfihvVI08B3N8btdrST0VNdsKOAqheROJ8qk78+hbNy/Sa -up+Z8EnAIxExO48FeFulk3XlJa68YBx0dMnIsnxCxmFkpy77w/7iyBNjxCNn8JcK -SnmcfnRYIcf8U78PneqXUw+3ZLP6EswImk+d0lSZLryackOOUWnLNcsZtgShQTTz -dhkFUsx5AgMBAAECggEAPJH+RRx+PhGjC8yyCAKCdAYp38viYmwp9pbBxOZybvaj -Z0zpPCiBL6WNEri1JRixttaqjpABVa142lSUPhtAO2vH27+FEQbL+1sd413i6Lnm -catRpHQb53dvG+Az/6zOP5jC2CqrwkLkdYhwhW8wZC3EUCSccFPCFETkoB8tik+d -L7Ri+mnKuYeAo0gNzOkXs9jl4ztCeFovobulIf8y1kwHdcPme5TYK7LdS/32A+et -ZqglgzLGNglucAXbdknuUKXMA/khHpM3xQeI6oC4H57i6Iq9tIZm3fTwtCrJ/GRG -ocFkjeEc3KRVASfgkmihEYtxLegm0U7B0IIpNPlYAQKBgQD+dEYb7gdVTsHPPPxW -w1PgqnkwmGlGLItLrXXazHWlISAUJw0yeeLAXJ3INL5RIIis/EfifxFXohC+UUvK -LHBmOZo7CSVfobzJL838pm9UajONd771jMCib2tIBMtOvir3vfCraoISTCHgsxI9 -52t5Cuxv3QuDS3LJHzV6HXluyQKBgQDNGaM/ORiKWcKzOjE3dMbgNGtCNCtZhb2B -OUStsd2n6h9qZmB5lLDnNyUZnYjJEyRoVt3caRLSDa1f2T42ywxvtpZvQlyll6hB -plPqvSMS4g8CrTXV7oLj+GuwcGWsPTDIOOMx/PfE9bznMNRTIxiXAcCXRm4SWsgR -zrshBUPYMQKBgQDfxB7Xdvap9Y2IksgSAMnle+UBcDa8CHYjAhLva/hVVBEix8ja -R8e5hHkY1BE9xM+M2Hra2wXEO/hfdZyh2Xyq2SMhoYRlWhVsE4chFLg40wAs+05K -IBJThoogll64C5I7taRNCmgCcUMlmDSFjdq8YnfUU95JAvOZnMFpdzwPyQKBgFAj -T1TZs0wDjH7JIwffGgHqmWoxxiphhk0imkIf+FKuuP2y6Mk4nvKI6ncaxaKDVztp -jaCccZ0fePm1gYiZR8+ykQ8B5/9PxY95NNrIchbjuye5lAp1+jCnFTTIhgGJmzDw -gV070Xfk6J2Qx7WARhBiv+SbwVBspjXHB/j7/KlRAoGAPANseZ33ViApVcNM/Y/K -RzUeTP+sqLtZ/CUrSLFxmE+++BMU1fZ10Xn+al/4Unc7dMZxh5+mgKEmnBjYo1ey -NfvoxL+aNtccVkueNu1A/UsmGZwMW2ThQfUvwbrJHIT0XJcjdlSnvnqzNwLiejo3 -/G+YoOXeyZHGkIQfo560RK4= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDqJef5e+t2bk4R +9MBQOA8Lbyo5B7aF1P2HlHCfLs8EW2uvyiccit4LH70kpCeEI6Di0nfOqihqQ87J +iB1O4Fk6waY3StZmw2pojQzylSSxaYkWKyuMqXa3CmK1tCGWAipCPeIuplOSNxL7 +u940ydWeXLrIG9jyPEywPi+UBwdoFgBHRNaheE2E7etnbR7Ce4qyHq8VDvmQoAjF +kWLzbeE+dgdw3r5bQhjNlpApf4LsS9gUwoAwePlRvPCaexNRq/rkpIXsx3Ulnpro +sAdEIwLobW4XcluQJ6Aw+Wrmlz1bkzkMjzgrcRy9xJDQOzvVHsYPFLstWH9dThFU +d4u/cEUrAgMBAAECggEBALyy79BP3eMD4kb2SEZd50H/xLdRT/drPycUqe6fepa4 +VoDFGeAWyfuNCJGO+Ym0bORfWc5js05wdyZTW8tFYqPHjHzjAwauVgMMKXMWXwvb +UW9cOyyRJes9o29sS+ToucXIuY9+27rqR5I4RUulnVKSyLScPKp7jhI7C3zwbTej +eLI2FkiOS1iR3b1ItnE/e3pDovk7WEMDe0AZQ93soIDGq3WzLsRnpExK9MMzfvu/ +7IG4mw7ziWHe/mUx6POb0xoqkRmZ2+LFyZOqnlQsyUhxRbO+3LuM/H17c1av1RNy +3tYdewBRn5l8vsh6nEm8UimDJQQIZEGuRBsbFPGJlQECgYEA+4MrYKuohpH3TfKj +Z7CwPUDsyh83FbptuLkKk5ztlSytsGRh5liicYXmxr68oBvTJiEa5mlMxjt16OFA +E88sbHBYFPueiEUNc+R/g+BRfMCbFS+9agfe/5RgQNWEokWNcmdHVYQShNtujit2 +rBcff49bSwzGsRuy0O1hiGpXz9MCgYEA7lNsA0RZkY9Se+FUcCNyhWM9YrAN78mn +WPM5AXq3NjZaO+xPYGNM3O1Xhvaz/EFQtzekq1iDLDx4WiQsbV/jBgLF/Q8Kuxrg +0PcQFjPCfcErOy2jBs8Ks6OKHX4sX6pDkvPIEubmsqnRbvV0RZTGcxAtsiq90LBq +mSeTGsUCtkkCgYBFY8X28MUZA3JoZfXhKKUm8R+jEAOhkgKtgRfC6/u8OUxeKwO7 +il6e1WN7F6pwvdx+W4nRYeHVmxgHvQVxsam+7SvP9i+hxvNUMwlfN+cjdPwUV0x5 +0VwbxTLdEEt8fZXtp0LN/Bcj4mpY/PLLvcFp7wIv7YFv4YVvEN7kxPofEwKBgBTQ +vHJcmeYYun+PYqyYq/vyev4PmmgDGNawB56VdUMK5D6vmQ82HRR+tlJXYcj6e25F +MeC1Tl+iuHBHEIpNAasYuuorUiidZF1b9s+5nZcWNAxrI/4IP0sJUZrZ1k5UtKo1 +GJhuCmA4bM5gKdOZ9us90n/pM8LyZZ0S08pWwrHhAoGAG/FNpaeMZ9g63+39DrcY +OZKJfOMSREoG0Vux2nr8NwNB3Y9VRoHYJi2jOw010EPjvCiJN9ivnAHX2dhsnJkD +BhEEVfjwtEsMFrOTfVWUoh3fvS2imxJxmG1gwLmQAdjsgBFw1u91BWjtR77sb8Kv +OcI/pg4/nHDpMMulGK4WVJU= -----END PRIVATE KEY----- diff --git a/jstests/libs/client.pem.digest.sha1 b/jstests/libs/client.pem.digest.sha1 index bdc762f36c78e..22f90499415f4 100644 --- a/jstests/libs/client.pem.digest.sha1 +++ b/jstests/libs/client.pem.digest.sha1 @@ -1 +1 @@ -5894EAB5181D5411D4916F70B2CDFEEBA7445EE5 \ No newline at end of file +27AFCC0D1138C5A765A324057914ACF2435EE7EC \ No newline at end of file diff --git a/jstests/libs/client.pem.digest.sha256 b/jstests/libs/client.pem.digest.sha256 index 9de62d73900d3..d206d28deaaeb 100644 --- a/jstests/libs/client.pem.digest.sha256 +++ b/jstests/libs/client.pem.digest.sha256 @@ -1 +1 @@ -67A67B32FC253E12412FFC7D6A3C410F3BECDC10A45ACBC8BF422F790D4E818F \ No newline at end of file +6E5D1B2373C5DF89F78D1A1F2DD88A5C10218AA92885E4181CF44C90B08C42E8 \ No newline at end of file diff --git a/jstests/libs/client_email.pem b/jstests/libs/client_email.pem index 069a8a3b75767..7fd39cdfccc0e 100644 --- a/jstests/libs/client_email.pem +++ b/jstests/libs/client_email.pem @@ -3,53 +3,53 @@ # # Client certificate containing an email address. Includes authorizations for queryable backup. -----BEGIN CERTIFICATE----- -MIIDxzCCAq+gAwIBAgIEVcV3GzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDxzCCAq+gAwIBAgIEVWSODTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjCBlDELMAkG +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM3WhcNMjUwOTEwMTQyODM3WjCBlDELMAkG A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD aXR5MRAwDgYDVQQKDAdNb25nb0RCMRMwEQYDVQQLDApLZXJuZWxVc2VyMQ8wDQYD VQQDDAZjbGllbnQxIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVAbW9uZ29kYi5jb20w -ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCh1x2IV4tVqxh2X/2FDKHE -ub6bTLnqj0kSVDty/c0egluLcwGxoN8x5Yb+aoRscwh+hHsn00rIPLMEqu/MbKAO -smKIdDnQXrQ5gbfvqcJbQyIYoFn8RafQXvDdzhTtEt9Pq4sgVGpEFytrQdjzV8eL -nKAc7B4PubAC++byH67T+BR5iRXcjw1aDIDpIS82/8r3R/lEVdQSvQi5vUy7xhH+ -ANpYJvreNdzG9MKHAYH3pxak//eZyDbUL1xheXaSyNnCbCnnxBnHqdiuSHzmXXgn -mjC++8gMlu2Bor+7FxGRBjqihACdcbnx7PF1fwiyBNaUJ3JWkve94kozn6TQV0d5 +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCreZvMNPhjJuSExgmpRghh +OlcfXYcUr96FqV1tL9PSXKF9MASRWN2YlGuPwHt/y1IlFauQb9SwHA2XDOODm9dc +1neOxfL6bcAUzcIW1iVJcKV1HdIFZu3QDAtRu5/deV1kp5ZSrsTgdX4gmuXTv7xb +/H8JcjLQ6MAMDeoVmoAa1y3g2mX1q8IeCJkZ270QreZDG+Z1u4Vz8bPFhFi2+0NX +0oR2uYKBIFDU2QSbMS2hp7kkbg+XW4uFI/IKV7Bg7/tcMJI/+3ru4zUJQoDVF4YC +zEbIITNGc5K1arEOXDkbl+8BTZLpKusjVtdNYWQNo+Zi83B1hUbWpNBD6UOCjbZz AgMBAAGjQDA+MDwGCysGAQQBgo4pAgEBBC0xKzAPDAZiYWNrdXAMBWFkbWluMBgM -D3JlYWRBbnlEYXRhYmFzZQwFYWRtaW4wDQYJKoZIhvcNAQELBQADggEBABEAJ6ns -/6f3uILOMtPkrBxR84TFrei1NTFiPhEg1b2fo5JFaOh08WasrA+fmiCoIqbXakS7 -T1one0Ww0IGwUv7A6twKQ6NJW7Edi+T8qisubNeG1YXNCCyETrI3B712P/gkayjf -/gu6pyTJqkFxQ0g85mg8leWN9TRKaIXYK1n3cnnY1L4Cndyp0JSFNg1gLTvj6yUb -xYJ/msQH50C3xgLgURGYBlY40JO6Q5N0gKXfmSBWewq3JhQ9BIyMy0HdAcs6mqwP -+WGKoUrYVo1h5GTB354AMHDkasu+6Rs92ovz3tjtiPsD2L+fFgfmThGSKEJrkZOW -fsLvGYWqU0PuSlE= +D3JlYWRBbnlEYXRhYmFzZQwFYWRtaW4wDQYJKoZIhvcNAQELBQADggEBADJLBoYC +mgu4D8P6sHPcyWeb2eIin6U+jalP5Ad8jGM5E6uh+LAL4qjtGiC8i4VqSQNHeJlW +nVS0ys2fM/bHc5Lz4SN2hMwP8He9ReXywudVw9ILUsukMYr0jRW1wsEXOg/pi1WI ++qVHLhOf72eaurH4e1wnPUiXqUCLcn/uCnQiiEzD+BnuJOI9INNXjdMp9EIkq3hh +DpGh8PXM8mqFB0scgMFBZ/K4DCWPlfTU3t8jUa8uie9/Ggwg6ljiphO0mNbzJCX4 +drbd9KM7EpoGq3X+3650K4TCz7X6jZtQb+GRIowFqN60HuZaLiUHBR8/mO3yy33V +1+xwd43H5eJMrpc= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCh1x2IV4tVqxh2 -X/2FDKHEub6bTLnqj0kSVDty/c0egluLcwGxoN8x5Yb+aoRscwh+hHsn00rIPLME -qu/MbKAOsmKIdDnQXrQ5gbfvqcJbQyIYoFn8RafQXvDdzhTtEt9Pq4sgVGpEFytr -QdjzV8eLnKAc7B4PubAC++byH67T+BR5iRXcjw1aDIDpIS82/8r3R/lEVdQSvQi5 -vUy7xhH+ANpYJvreNdzG9MKHAYH3pxak//eZyDbUL1xheXaSyNnCbCnnxBnHqdiu -SHzmXXgnmjC++8gMlu2Bor+7FxGRBjqihACdcbnx7PF1fwiyBNaUJ3JWkve94koz -n6TQV0d5AgMBAAECggEAQQw6JYOyUBN9uI5qUmC6YFybvMXA8AL5RrGuV1Clcf7J -Fqp54tAbBW4QrQ9Y/FHb9yX+bgphw0uKVHTz/wEl7+JI8jlsx1BZNcfqixx7Lr0z -5hwFLv08LucJ/syG4qa0NMxpFex37bg2Tlzf+yar4HRqclWA26cxlHF06JpNZYpr -3iDbnqCMzMIPs9boKnM+O3nRo1PCl7KrXOgeCTMg5tdCsMR9U2mNOZQAGRPrOwiZ -/cqh3rC0+Tkwpp6BWUM3Ftxsa3q3AX6WI5pQ6l7+2q1rysZpKmNCshO503mp4Lc+ -+jpaLcgzlgAsVqA5qJ8LBbnajDVeNDkCwmmrR4C4gQKBgQDQLCar3tQnmjZ1UeNk -STIhGYW/BG5mYxKj5shZqi4zFjH+KiymC2JfyOSHqZYqzYcbssGReWh0CJInskK4 -y1yiImeDWc05pHGsIJAfijeUfPSCwr3ulbr+1pE/+isyhX+fYg/rz3X7lm0HeLxT -1fGOrjWIvuOJif18T6MeIJ24EQKBgQDHBeV7k8Z34Gegc/JnnrBzxlYOsuHIXejQ -/0dztkFiFDYOw94RiEP50+bK56Soh6ZF2qaCQklkSqBEtl//Wm6OWpoweTG3rTgU -leEjp0Y7HlhzStCzDth2QBmxXZT2m/G/jmJoowDuBo+HVbrNa5gXF9yCXqWowIfk -zZmhuN7A6QKBgBzyVrpFdOjA19u+dEkoqHDT1LY4DoXsNtZVq/xT3rK53l/CS40X -PimljKmUmk3/YE8oryPkZvLjkjc04XRCyvG8qPopzZC7XhcaPBA2rv3V1kYsgC6h -4Wu4OGBWEBWpXJK0FxSqN3SxeR212zIpKLq3XLhUGt2wM3BKvprc3DGRAoGAdsEU -8kz18teq3bnxnVS6Ewr4lKK2SHmIjxSTzP6mOuC2dM74tdtqPCrtnorj3E+8rhfO -nRDye+5vfTCZTWPnbfev41adjOzF9rqL8VtBc8simgC9UOp9zOloq0Wcuh3I/TT3 -kVoaFu1BPU6xPRuDT9xEDmJtVKk1LRhlIHOQLukCgYEAtzYL+ZFrFrsbvgDUj17W -DcZXJA5f6XpPb4biMVAS7SuE3H9w9BaTLIIo7p10hOIKuiB7sG3yZ0oJKy+i9QWV -AvHGeFy4jUGIho4/TEhBclmgPQ0+vCEA+dy6c0oGz5rfQuBdZnQ1qyU28XhLR6/+ -mHIvte5PXur+ki4euVWLino= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCreZvMNPhjJuSE +xgmpRghhOlcfXYcUr96FqV1tL9PSXKF9MASRWN2YlGuPwHt/y1IlFauQb9SwHA2X +DOODm9dc1neOxfL6bcAUzcIW1iVJcKV1HdIFZu3QDAtRu5/deV1kp5ZSrsTgdX4g +muXTv7xb/H8JcjLQ6MAMDeoVmoAa1y3g2mX1q8IeCJkZ270QreZDG+Z1u4Vz8bPF +hFi2+0NX0oR2uYKBIFDU2QSbMS2hp7kkbg+XW4uFI/IKV7Bg7/tcMJI/+3ru4zUJ +QoDVF4YCzEbIITNGc5K1arEOXDkbl+8BTZLpKusjVtdNYWQNo+Zi83B1hUbWpNBD +6UOCjbZzAgMBAAECggEADVVfb/mOdudHTau4hJkVfbnznFTBdDJYszwAL8VQS0M4 +q2tYb5ThZkPkOUFWWRGOSaPtu0V23FrSE7TE3C76CBYFJezuf9qY3QVC2lO8goFn +3Dt9YT4nr82/85MLU0VkpbcCUVuVo35/WDmzNUvrcZuopStkGCqBBG7wjM1+uJdy +M18aB3jTCyWSO35AutrsHiJFjD8iY61G0UcXFF/Ue3CWuEdO6yUvoQrzUianTSfU +gOUCXAeRa1WAQyEbal03xGxXAIu1ZAnMN1IjYQnLGrW9kR+CyBmfNGva6EnfAhAV +uI+gNXng63f/B1pa2eCTHUjobK6CzTfHrGjLA813QQKBgQDbfvuKugdsprM3IQfs +qreeP7RuOdvYS/FHO7o2GxdUsfgMoxlozcme3ZHO5iLGHQajLMxszPhD1udLk/E8 +f2Se8BeIcuOuM9dlm5oDI3o4pDqm8+5yNBzyozNKywl5FM8jD5tUKMdqy3NnFc+a +8M/DxNjImn0Z/w7ll+GltSQPXQKBgQDH/iNOpTGrq/MNhd0/QzdwJgwOnOT4KtTl +LQ89w1RqqmvXbnMhNoxogpouN5G/VtutcOyf7Npdo3C26Hdnpm+GIr+l43ZYmVQi +9siBjZZDGJk/CDAlj+gkY6UlwN9x3Qi1ARnkeSZ+3nk3LqiVDpczM84CzWveSZdH +S/TBvbcQDwKBgC2Q1d09sy7eCFRIeiGqawXiCa3cOwRS95qnDogO5bACZhERhsPX +/KQLMSq3Yb6o5ejX8vQfNOa4ZTmuU6UQeS35f5km6JcQUgXY4IcIRWYeycJA3147 +7Up9kLdVIPlO1ZVctw0ojgVgiOt8fqWWmjr00WALmi7cYRZOilxhF0RNAoGBAMA+ +pYX1+fdCBRPcuD8Vx/bAPBrX6qo70gay99GxMDaQt7WQ8I7etr3HvZnrxOie//CL +eoqIzafxcmoAsLcsIExanstCCgNE8MHjY/5VMjoxLS6QGmghG0/PkXLnImN7y+Di +vBcJ9l1CUlcfPOJ78hBAHipHeQdmykq40wBh89U1AoGAdG0vbfHADzyJwcqQ9EM5 +M1dZAbGTRPkD9Wu5yWRx8JQobLhtmG6ow7QI1XGgpyBLkzvKga0b0JloPCX9fdj5 +gxsBE3is2LRbGqMKYeR3L/9PNR3OHJLTumyFuOIjiz4fVbEoDU7VtLLwI8aXNj4S +7uohF8n4KqyJFbZWsap+nK8= -----END PRIVATE KEY----- diff --git a/jstests/libs/client_email.pem.digest.sha1 b/jstests/libs/client_email.pem.digest.sha1 index da13d5bcbf589..cb8117946575c 100644 --- a/jstests/libs/client_email.pem.digest.sha1 +++ b/jstests/libs/client_email.pem.digest.sha1 @@ -1 +1 @@ -DE8636084C85D07B6DE32227BF5797F2B078AB10 \ No newline at end of file +A6976ABCFB735B643160A63B49A08DEF5C45253D \ No newline at end of file diff --git a/jstests/libs/client_email.pem.digest.sha256 b/jstests/libs/client_email.pem.digest.sha256 index 33a6d18c1029a..529d387907c03 100644 --- a/jstests/libs/client_email.pem.digest.sha256 +++ b/jstests/libs/client_email.pem.digest.sha256 @@ -1 +1 @@ -449F68F4701C634CF90C2E1ACABB550499B31471EEF89F5439D67FA5F4F2BC80 \ No newline at end of file +35C5F350766D572190D381F9F6F130E0A5615A141C20AEE6CFB4596FBF2C689D \ No newline at end of file diff --git a/jstests/libs/client_escape.pem b/jstests/libs/client_escape.pem index b262f09b075ac..ae61235397a68 100644 --- a/jstests/libs/client_escape.pem +++ b/jstests/libs/client_escape.pem @@ -3,51 +3,51 @@ # # Client certificate with reserved characters in subject name. Includes authorizations for queryable backup. -----BEGIN CERTIFICATE----- -MIIDhzCCAm+gAwIBAgIEDUtBFTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDhzCCAm+gAwIBAgIEIO+6PzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjBVMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM3WhcNMjUwOTEwMTQyODM3WjBVMQswCQYD VQQGEwIsKzEMMAoGA1UECAwDIlw8MQswCQYDVQQHDAIgPjELMAkGA1UECgwCOyAx DzANBgNVBAsMBkVzY2FwZTENMAsGA1UEAwwEVGVzdDCCASIwDQYJKoZIhvcNAQEB -BQADggEPADCCAQoCggEBALG3OMek73tAvuG8yZEtzu+rg47Y1Y0NQazAnJ1eQdsy -txLh1+CPgg2PrbDVjGdtcLrBbaOh8ETu9D8QMZFE4ypUSBuwwvCK0avcyVIjGs9P -A3BRp/aAVTEHyKw+rhhBUsEShUTNMU9xT6X3JzJXf0GhvDXuIRYkDC45/XK9Avvr -5tUj/9wEBy9dv7dAnbcja2YFhIATJPZuC7hyixs73DzAsth+lHOhSVl1VXfDV4tX -DZxgMfQhbX7Lgrv845WinSFaKCyeqBm3bOjVNpEc0lg5J3PbttnlYCR8exchvZzG -waZZoYCaWmcwqWJ9/kBp0liNEzf07Rwcj1vPoOA1FAkCAwEAAaNAMD4wPAYLKwYB +BQADggEPADCCAQoCggEBAJx4W+j5ckLQkyec4zTBprwDv+6Ba+DSjEmdmKEIA0dz +Ze7SvkzjXj1PTJnh65M2gF5OTGPhHWE+40GYPJEktehwVnrBxDRRP87S2Sr8ZsbV +ib0cYjJpW3iF4uWntER3ez4iO5zHpLyPg2ZykmEuE2QMExtUO5cqdEXGuHfkCkgG +3239bQ4B9K4kQ9ly7n3tIDObBiDFMu/PEgZrmHeyKdHfDrL3HDBEUeF6SL6nO+ws +MkDVLymVACZeCF3ms6dGGOuq3H/940Ick5SqkWFUm9Ggwvk3M0oiOO5kMvQzJuZa +Lxq8Gn+XTAfwsYVOhk8cQ/YJgBYpP2qgHdi37lTxXakCAwEAAaNAMD4wPAYLKwYB BAGCjikCAQEELTErMA8MBmJhY2t1cAwFYWRtaW4wGAwPcmVhZEFueURhdGFiYXNl -DAVhZG1pbjANBgkqhkiG9w0BAQsFAAOCAQEAJ8tGkfZwIR2yp+nmLXJHp/lJorC3 -DH61UhWtnmq7IB7hEx4ufUNB9KOshEO9nsbKBeuZXNbRQ8DTlvrboyv11xZmbFre -GMquaomxNZ4w3xax9Kkuv7vlFeBQ0WAfup+p09DY/kUxKy65w3eYhe99AzbCQBpd -nnlk2AkYZz5uwfQQffDkEknG6p1aifVzqRiY5GxaTeXmXRe37VuNfO39vAOGmKXk -+MVb4uQXwGq6ht2oY7sLS5yplVEmISbHAa1ljnX+X0A2MxhhIIzrJv/TG17sbczc -wwGQsPSDnqpxg1Kljtq/VFSAubEykZXHmwbsq385oYrXSA+zP4+9QconNg== +DAVhZG1pbjANBgkqhkiG9w0BAQsFAAOCAQEABv+zGbzRqf1UEeDluN6KUEAl/DwO +WxnwSlu7M0NVDJtgStElg7tI4QLiLaWCbHDfs3i+vjDy64a7AulJ3hj8KrCL8f9E +0V7b2+9azc9/fhRheNPEVUfpcFAtr5v9WmWHGaKSo0m9Y+S3Wy+Eyq1SJ44GXzFd +aSJBqvRCuMACKfXq6ZrBRcvKzrsyQqYIfUjj3Vc3exZvhpIlhfs/IRQsegRB2zEh +IucMsmSVDFxgL6UwxRCZ0Zcq7akVv3j2Unu08ZqA/8o4kICZM1jJZ8RLQBfFwRka +9PdibRufrRdX1rY93p4FfV8G8Sm7U3wXJp+yMr8Lmm38gR8BSoYEfmRzBA== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCxtzjHpO97QL7h -vMmRLc7vq4OO2NWNDUGswJydXkHbMrcS4dfgj4INj62w1YxnbXC6wW2jofBE7vQ/ -EDGRROMqVEgbsMLwitGr3MlSIxrPTwNwUaf2gFUxB8isPq4YQVLBEoVEzTFPcU+l -9ycyV39Bobw17iEWJAwuOf1yvQL76+bVI//cBAcvXb+3QJ23I2tmBYSAEyT2bgu4 -cosbO9w8wLLYfpRzoUlZdVV3w1eLVw2cYDH0IW1+y4K7/OOVop0hWigsnqgZt2zo -1TaRHNJYOSdz27bZ5WAkfHsXIb2cxsGmWaGAmlpnMKliff5AadJYjRM39O0cHI9b -z6DgNRQJAgMBAAECggEABc71PiE36pFdK8ed1u7mWpMCg8K2iBusz9ajx8jy4hUw -Qp+7DI3kNElD5gm0wxSioJf922FJlxcwSHdhB1x3CEUk7jLCzFXdMH7Q3axkDe6J -MDya/sWHA0k719Vcn1JTQBg43tzOBciwT75aO5z8fcqpeiHEVxLqiwA+NmfTFyS6 -vHMm1CbFsYwdunyv6x+jSOaRd1oXKtsfH/KGELAO38v5mbzJSmLu9mwvgIaZ7WMB -Dv/XQgZJn8yFjXrQhOWPVs7262Q8yx+0gY37fhJbPAYu6lCHi8h9XdoaNKnP/rEL -BPeVB3CgDKFqSteZncbeWvzk9q8UY9JPD43WIt2mAQKBgQDeAz7/jDsJRaWgVqwA -mE7YSN8VCXnCJWX8+T/LrRYhE/bYSexIkbWq7FCYpRncm/1fPUbocJsjNIx2mEZG -4GLkye6d2uGz+vWM27mLNMSztB+c9yVlUKJpDfpPWnM9/GgEDKmj8eNbDVljMO/5 -EyRBauNbaobDG/ojYxMeI/dO6QKBgQDM6/ZMKsiIRcSjfPmsrg/OFvP02dliObEN -qbpGk2vRpTiYnopJHO8LBb7rlmoVWV4rYhlBpFNrRAObUVLv7LG1bdo+Q/mlE/EC -FPAKfN7GogSiF24l/twLmoQTgOwW3xxTOzL6kSV9aHxVOL0N2zoTscGgbZMV0Hvz -wacI/FOoIQKBgQDKDLI86GaiHZyKu9Z2BCddd5RvFNyW8GpNPJnux2uoVv8EAOLl -eJaZI9CF21waBm0lTCNIT5MhCyX6mML8piHajlx3lUhsAC2RMDdAlZme4oMS22pR -Nn1YlrwDlBHli90uMkgBSJbdutxrBZKgX+dTEfAwtlZHIyMXKYewmpLsYQKBgQDM -0Yv71UdZ7WH9hoyG6/MgKhDsSlHcu40b+ukUYYUCpgFLiirtqN3ERpntHwopDInz -ErnrwjMeo3x3YXFkHVAB5yqb0ZVUSfo5+nNyCB0irA21dXXCxFlrv6UDWXif46CM -ED+D8k9maWjcRTmw/82soZ7Gmr7Irvk1SfmKIan8AQKBgBB68OsyhPFY2rpI+f5L -G3aF4S6EJpYtbJvq/UDLCLiXh8mgE1UMTephqD/ZxfM3fN64Ux2Ezh+hKSDUfZEw -xZU8PnhzFwicaZnGOCt267qO9fcylHIqJ3WMCCQyno9pWcMZDvKrTnZslfGeRWB1 -wKYreYBUywU7jSZVpFm2AQDJ +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCceFvo+XJC0JMn +nOM0waa8A7/ugWvg0oxJnZihCANHc2Xu0r5M4149T0yZ4euTNoBeTkxj4R1hPuNB +mDyRJLXocFZ6wcQ0UT/O0tkq/GbG1Ym9HGIyaVt4heLlp7REd3s+Ijucx6S8j4Nm +cpJhLhNkDBMbVDuXKnRFxrh35ApIBt9t/W0OAfSuJEPZcu597SAzmwYgxTLvzxIG +a5h3sinR3w6y9xwwRFHheki+pzvsLDJA1S8plQAmXghd5rOnRhjrqtx//eNCHJOU +qpFhVJvRoML5NzNKIjjuZDL0MybmWi8avBp/l0wH8LGFToZPHEP2CYAWKT9qoB3Y +t+5U8V2pAgMBAAECggEAdArfPx0gnGTLfelgp9OmsHGnel8Jfqcn28DBXUvwAqGW +y1QIRkWpspaJHObsKzWUZlxzUu+1NlXJfPBGmu466rPFryzdMl+/jsfYv4SJRJVB +nbrbKL/yUqQY7nE9xDlIcYA57cAycJWOwf/eh6wAPYam9PG3cBjQOp9yV5gL9/XY +AT9M+IqVztRgVHxcEYDuEnTJsQtfTH26RW2ylYbeoZsOZxrXVXff3cokm4sGF3aB +Jr5RdBud03aVBz2ilWyTyDKp/VhQRsOmWK74icTXhIwv9WUx54c5O2VKywHxdouL +KX6yaa3QrQNqlvjbN1qYs08ev6xIImGiZSbJLly0AQKBgQDLC62F8CMgH6HnFB3n +cYR0BhlLRCjLDanCdRfKe/P0VvSS/t+f6g0La0wfhbx6K05N0o1r9tp8HCdE48oc +lTQICoIzORORMJNdMWAgoqREnYPYOgHAUs7SncR1+IjkpfZ3UiJjWkHN7BOJ28L5 +X08gdZULRpKa83K1MQymPSF8YQKBgQDFRxMWlY+07QKA9iTB3yYplD3OISlO6xLB +l+abomf0a0Dc5qC7agPXNnA35eObKhQOM9cBGtpGx0HHmMXif/IewmhL/KM/uRIm +apgyYQiGg/aMkEilW4UzcUQ00UsBwlVBkHcEO4SBJhpBP2Kf4D5RICHGE8/9vRfv +qqIq+H6mSQKBgQCOtvaQXelSeulhclJSiwd+RYshzBagIkpf082VFOqzoyrk5yBn +Vis1C4XF4kpH1IiFSqj8adXHxkITucglrvmTbU92kXefZXUu27WlOqwbTluNb7gr +ZgjZIOslwDr3+27xD1n8W0RFaNmS3FR+0u7a8cqA0mnZX6QQlxk8/1q6AQKBgA7Z +Y7ludouyz15vqKKjLlcw3loWqupSzW2fBm+ukM6YCCDYhz60Iyfe5CGA/1ndl/bd +thBSOh3bv7rLaBG9ebcRARK/KHaScqhLm7snDKI7aqJ39c/kjKkrnGuxWUj/nLU7 +r4m1BStHd/BzWfQYx/gJSCGFukEqK5QRrvU3ESlpAoGBAKI//xqHWGulMRHPfTY3 +blriUuVxqkL6+uICw+zFLMgtQb+0c6uBFBD5Dewy4Cz6li4mtQ2KlxBfnDI6icU3 +8Ewk2oT6oXUdZl/WXHUaqDMcW87DgknBUldMZBh+7WA4xrvRktYCq3EvsVpIvncs +YGN+GU8JqkNNlw2fCZOvipiy -----END PRIVATE KEY----- diff --git a/jstests/libs/client_escape.pem.digest.sha1 b/jstests/libs/client_escape.pem.digest.sha1 index dbcee0140ddfc..161ce10160dcf 100644 --- a/jstests/libs/client_escape.pem.digest.sha1 +++ b/jstests/libs/client_escape.pem.digest.sha1 @@ -1 +1 @@ -F591F020AA95C50F153085D577CE8571F30D8FAC \ No newline at end of file +666CD448820851EB1B094B7F9D98B7861AC5F6B7 \ No newline at end of file diff --git a/jstests/libs/client_escape.pem.digest.sha256 b/jstests/libs/client_escape.pem.digest.sha256 index 2bdb65f770ad1..3e8f06cb4835c 100644 --- a/jstests/libs/client_escape.pem.digest.sha256 +++ b/jstests/libs/client_escape.pem.digest.sha256 @@ -1 +1 @@ -EA837C5070EEAB2FF64789387D7FA660D5639AE6AF262F43CC873A76F189176B \ No newline at end of file +95F5C4C32BFDA406D41616E23A07144C6629603E49A5C3A208E1C1CF1C2D3704 \ No newline at end of file diff --git a/jstests/libs/client_privatekey.pem b/jstests/libs/client_privatekey.pem index b883de2843b57..6c65b783090ed 100644 --- a/jstests/libs/client_privatekey.pem +++ b/jstests/libs/client_privatekey.pem @@ -3,53 +3,53 @@ # # General purpose client certificate with roles. -----BEGIN CERTIFICATE----- -MIID4DCCAsigAwIBAgIEL2OwKzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIID4DCCAsigAwIBAgIEE2FVsjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQzWhcNMjQwNDMwMjE1OTQzWjCBrTELMAkG +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM3WhcNMjUwOTEwMTQyODM3WjCBrTELMAkG A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD aXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxDzANBgNVBAMM BmNsaWVudDEiMCAGCSqGSIb3DQEJARYTZXhhbXBsZUBtb25nb2RiLmNvbTEbMBkG A1UEDAwSQSBUZXN0IENlcnRpZmljYXRlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAv6/hbwlJjz8L3KWV/UkhoUTPg37fRp78Wt8xQDNWTn5SfgJRYxwQ -Qz3zXwZBgqon7tYM5H+CE9SXtmzeQjW8ulgXSjFZNo7Q4C4owps6v7RVcMXgV+7d -vQvesZsD7m00z9oEpPTs9HbiVHuq7Acv3CjJefGvX74CaDM123soVhCZ3kGr+Nyf -zW/rkVtcMBjcg/d6g5umy0CQd606a/0QTkKt3bj3KK8hfp8m5R4VTtzW+gXNe1Y7 -+Jo6Owtu8S1m7KK08yXXeYyRHwo7Hy7x1JGh4nNwjtn4AJvnYpijsEBz/k3Lr+4n -GNSx8lMYAm7RSr3Jx96bXMeaZ6lyqJgiRQIDAQABo0AwPjA8BgsrBgEEAYKOKQIB +MIIBCgKCAQEAumqDjL1EEZdppEL/9adf4Tspw8Evs35mxVKWdYRTUMc/Zr8lbY6i +7bhHTyvVjfsCzNGW5AwqrDHVh9IRseSYvmkb2MIl/00Feix5yr8la9BxRsToMT1i +XRIuixygG2IedqC/FIHYJhkdFU6qsoFDgD1aCy4bnyg7JD/AVCwSsC+SMWRCSs0B +8OzSEzRjgaLXVezWXZ3w7Phwfv8TsG+tXX1Z17okUuRZSZ4hCwMSpoSn7SU1F/pu +saQDOJsS9fiLSV9f4jJdZxJ+4b1Yn5DzOfKBeJVqUvm9RxpZK1fTtU/Dv7SLGcCo +SdpBsUVBz8y7zjmUehtDwJN6yzkYicadGwIDAQABo0AwPjA8BgsrBgEEAYKOKQIB AQQtMSswDwwGYmFja3VwDAVhZG1pbjAYDA9yZWFkQW55RGF0YWJhc2UMBWFkbWlu -MA0GCSqGSIb3DQEBCwUAA4IBAQAEUbXUsfXgXsRtqLEz5h+JUEfn5i5Ac+PrK+zV -lamEOKTjDnLggkXwK0KktRJAxBxJBI/Flf92LiHgHeGVCb7KNP/EnSkzh0zJkbeE -W+j9uXhTZt0MjVAVEOkgblp90CJOqfMoXPZ7EU9roz+8VwdZvriYYoiElA/cb/SK -v+ezjc3oc2kX0ODCOhIoFRsy7TXdq9qQGjUHl0qKv9/9PTiItKkrbS8r3jwlyHUU -OmuDUu+lOW6nOLkBmolEOtPARp/FuKNssS54IyFscbndGDteqSRZ2LpUeO6bodms -h8VJvNInhCQBgHB/0+IyRiOR2CNhgMAbg1vTyckhtEAvqwls +MA0GCSqGSIb3DQEBCwUAA4IBAQA/jyavKT5je2l6rPnh/1nXh80dVrQsYuuy99HE +QESXVFjEFw+B5sCcaT3zVrgkvgD1F+lOEDYR7MG/uK6xxBJ3F+jJRWA9Z29U8ifw +Jrcje4d0tKoSQN6tz76LOMRpelRq9dRizPCygwDAdbfJasPHParnneCjMdy9caP2 +eWjdVnuZqGXDbYQB65L2G4hZGqTczJ/aJLhf5obwJkVwCWdsfZntA3b8AHVu54bu +tG2V9S8l1bHnU7C4DKmxuoAnRaYrQ/c0eT3mOVwD3IpOe8gIkU52T9U8BOKy0kzH +mCdsHFInfPI4CdqmReX8Rl266SigIZDVhJEV20G8fjDECLRg -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC/r+FvCUmPPwvc -pZX9SSGhRM+Dft9Gnvxa3zFAM1ZOflJ+AlFjHBBDPfNfBkGCqifu1gzkf4IT1Je2 -bN5CNby6WBdKMVk2jtDgLijCmzq/tFVwxeBX7t29C96xmwPubTTP2gSk9Oz0duJU -e6rsBy/cKMl58a9fvgJoMzXbeyhWEJneQav43J/Nb+uRW1wwGNyD93qDm6bLQJB3 -rTpr/RBOQq3duPcoryF+nyblHhVO3Nb6Bc17Vjv4mjo7C27xLWbsorTzJdd5jJEf -CjsfLvHUkaHic3CO2fgAm+dimKOwQHP+Tcuv7icY1LHyUxgCbtFKvcnH3ptcx5pn -qXKomCJFAgMBAAECggEBAJgKW1cFGNGEAlabCGMEdKZDvAzivxp21FcHnTr8/UH5 -NFk6YW+pPMT5CGagwalwaYgpQ8Sh1n0ALO3HYGtH0FBFuwNgLRD3KnoGYtIo6epQ -kUyHxzA4CK0AEzzwZafh+ve6R+DHXINzIIY+KQ5ZjP6lY1lT9/SK88HOjLNv9kh8 -NAEUoBtcMW9kDryDt92PAlXoguXNiGzd4JYnFg6iFLKn7tdgczz6dsrTPTIzmOtq -KrN6G/yFv8dG0MnizcmlLRKY+Zkvt7YcWatvWnpDwZle0ABjIemkpWV0hFjKPnhk -qOM4j9gw4y+Qga3agj8ZWXtJGX/8cOvbszejQlesPoECgYEA/IM39oU0y+NZOkUA -0qujGbRk1FgRiOGvZf+xK51fiSGUBjomUp6aLBGx4tPUAfOgoXIA0Vy2x7YRSK32 -ZlgNH9XKSitzsPvZHGvXgCm2hU/NxPXHhsJvNuzGqCri3TP3xUzcH/hTDO2y4Y+O -NYzIN3PjEQS8ba7t3hTEJBjgW/ECgYEAwlWbkzHwFIyeaLlOqqUi9Mr4C+AH2Gv2 -xE8SNV6Yd/kY02TkyPnJiChuzCHVLOu+HUsWCX4B4Ry6uRH7/N13q1lb94ePFNx0 -GD5CVRs+CU7qXja8Ey67i4jlPl0224VhHOQBKe+HMIFMLkKegp2LIzK6blVmWkbF -lADCBBgXj5UCgYEAtF6Ssf/Sh7Uc/ldd0B4UAf9uapOB6vGylTxAdLQUEuMuVghh -aXCrFcGJ/EltTfuViNzjIqmEUkGGNRE2SUKqFUxkE/jXydsL6ZZKt9yT6MPpasBZ -RrRKNsSI3oTLylAdswxEzH1X7Ys41I/zd+LY/WtFDdoBMqPae2D//fza/jECgYBP -SD+1OI1axNenn9ohMkflmJzDs31f5QQCqMOj9Fi9sWlYbPQNriJzIxO4wiDHN9IS -/1wZOUgo/+CJunWUfwHgbQ9hF/PereXLMjM7p2aSw5hIIYpvRQSMc6ga3kqQGoU5 -FSgIZMlBl65tvQ4P+ZgXHw4CD2M3d2IJ9JkytGWAVQKBgQD5F+eiPzXWwmUbBxXH -leIQ14QMfIkL+2XgqXYRndeZuoIiwEHjjIF9FDLas/gFWlu0lBi2v3oOVIZxQyG6 -jfwbneFu2xNZlD6mQhWPpTU2xqno7T1NEsop6Q8Vt6pPW6SgJ8gaqAlK3/LyX6Ej -Y7eFhnHjLCwgq3B0lBkcVeg4lQ== +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC6aoOMvUQRl2mk +Qv/1p1/hOynDwS+zfmbFUpZ1hFNQxz9mvyVtjqLtuEdPK9WN+wLM0ZbkDCqsMdWH +0hGx5Ji+aRvYwiX/TQV6LHnKvyVr0HFGxOgxPWJdEi6LHKAbYh52oL8UgdgmGR0V +TqqygUOAPVoLLhufKDskP8BULBKwL5IxZEJKzQHw7NITNGOBotdV7NZdnfDs+HB+ +/xOwb61dfVnXuiRS5FlJniELAxKmhKftJTUX+m6xpAM4mxL1+ItJX1/iMl1nEn7h +vVifkPM58oF4lWpS+b1HGlkrV9O1T8O/tIsZwKhJ2kGxRUHPzLvOOZR6G0PAk3rL +ORiJxp0bAgMBAAECggEAWX6fTybiEev2DKUZyu6iyjekElWht/N8FUlT8HEpqoMt +ff9QUauDrsqkeW7a5IHYU0pfvXXV2rtF/yGr2xKa/fbqJoL2yP1KHuBRTDO+HGeC +qm4H5vPIUFRVQfXpK5xMcbk0Yvz/LzMpUjxlGLOUG3HY07s8CYm/8uQkSoSqKCir +GmfWjvTkLsB3S0EtnEQY5ewrveTwXlKTMLExTR6bvKt14nK+HM+1Ys7om9u+fQF8 +eajsGk3sTUELWykMg2RhUwlcIZhLyv7EJlxtCB9bMg51uUrfFz0W1DzqBHKGTPxv +K6oT+nFG1E6GZ8YdBoT/aDYyj04ctFQpKhbTyPbGCQKBgQD287hAo9jCSPBQtqF7 +ZJo4mx+g3keiiDGGVXBME7sC4oDk2fs7spa75sLmEXNN2wMeCD3sQlsiHxJUeFIt +yP7wHiiRUiHET7+rVWg0QwT22JHMUglfoLbcqoSTp6/XujJ8lkEnLA0yi3qXJGts +BStfyfX7y6Vfqi3jZzf4Oe9cJQKBgQDBPv+mgAqffcTJMXqYhrrPF9emoJoW8OWX +6n8Vm0IWnN+NLo9UbDbpiCbhavdBLBIYvwhiycZ2pz2/zbss8+G8XuI55n7vOvUA +smHr3XNtXGzn9QgF9cCcXinhATTtceTNDENQfoVv/IJzx9xslsrOvXzMVAJeR0pY +oaewvLgwPwKBgQCkl4wWiR3PYA3Lve2i8EpZsApEjeHw0hUwE4HlMsFSCuUdoqtv +/ne4hPUhoj/XhEFvos6iyRc0hsEQy6D6IxzHrVIciUE4Nm7pIuNw8bo9S9rg80yM +D/HQ4VW7k+f/QHqlzv9dvF6PcacjVYLDXC6siU8Jo5F1UVeEWSdUqkh+gQKBgQCK +6URQZZ49Q+UfUyfzWHYUw+jK6IubjhMcfv5Xg8GgC6hPDncNkrRubua0B3YxLQLd +MwVOLgkx5cpng/XWvIE6LWKliGEaiuDvXUsyh3+fz16h5uubjSqlvLKSAZIQVVzJ +YNVKmYhGFdeYbsSucj7bGi8JmiIRr2FsENhAHYixdQKBgFekRxAEJOUy24hZgs0U +5hj1rs7kHMK0nq4MjaJ/p5t7zXhNTk5RMsevffLoFM9a59uxHiE7n/O5rWT41o/X +Js4zyPvWfVgRyT+UAPZoO2ABJ+rDzSjH+l2o9l4Y0O/mSZLRBZPrtNEHdYzWcHwb +7zJx0U9ZDtAE7wejbzA6rObX -----END PRIVATE KEY----- diff --git a/jstests/libs/client_privatekey.pem.digest.sha1 b/jstests/libs/client_privatekey.pem.digest.sha1 index 1402e8353765a..6af90649598ec 100644 --- a/jstests/libs/client_privatekey.pem.digest.sha1 +++ b/jstests/libs/client_privatekey.pem.digest.sha1 @@ -1 +1 @@ -C678FD6B6A52D0180CE518B4B74C9C92BA941869 \ No newline at end of file +9FECF1AF0B0E9ACB0FA0AF7FA82842BAC63A6523 \ No newline at end of file diff --git a/jstests/libs/client_privatekey.pem.digest.sha256 b/jstests/libs/client_privatekey.pem.digest.sha256 index 85a1cfdfcf854..29be0553086ef 100644 --- a/jstests/libs/client_privatekey.pem.digest.sha256 +++ b/jstests/libs/client_privatekey.pem.digest.sha256 @@ -1 +1 @@ -4F5C946F3C702A7675005C921D438A43D66DA45F540AF6945971001273604544 \ No newline at end of file +9A64EA9620FC26C9D5D55A00A6A01C6AF62CE4A345DBE172750F13EEF10FDC1E \ No newline at end of file diff --git a/jstests/libs/client_revoked.pem b/jstests/libs/client_revoked.pem index 7397be718381c..422a9918331c7 100644 --- a/jstests/libs/client_revoked.pem +++ b/jstests/libs/client_revoked.pem @@ -6,49 +6,49 @@ MIIDsTCCApmgAwIBAgIBBDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJVUzER MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRl -c3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjB0MQswCQYDVQQG +c3QgQ0EwHhcNMjMwNjA5MTQyODM4WhcNMjUwOTEwMTQyODM4WjB0MQswCQYDVQQG EwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkx EDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOY2xp -ZW50X3Jldm9rZWQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDXLYp+ -lWykS2LLUxAp48lEHXPYpE6rlyJqr2Sk2lji8Tneo3AHPtpx+8pPWJP+sqtM4ZIu -42ag7AgORu9+J4xyWZ7mDAMS5NHOpVfbg3xDuTAC6dHDQq4YosjjFR4x8Ma67PKM -dCDuMXlbp51EwNVnBnOPKXFdIbVF7yITHju4jv1f6y4bhO5TyhRqgtovtEOR93oJ -v1m/wGAx4pmvr95alwKtRAZiti6nXE/CFLMjkCATljrLu8RDc3BndHSuCP57X5nn -FpYoMRwe7r09vTKYuelYs1uLiDbzqC/fktfvzprme/rBY+f8IiVyl0bRTF9zhQzT -ctPm1+hrljvd+8vbAgMBAAGjTjBMMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMG -A1UdJQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBRerq6d7jrJ8x0Gg5ESvbLX18Zh -3TANBgkqhkiG9w0BAQsFAAOCAQEAiZqiDWRHBjZQvwsKVLrs8Al9XoInB1aYjG2C -dlja6mwaGikNYKCgotKoIChBc8EJ1X2fEuFa8n1bGOamDCVJCu0utQLe+EEok86d -Y5uvUp6qt91ytBOlZk8bg+kDgP9Cd7F92CkNnaxW01/sHu115SUFM0VH7olBvdOR -ouxg76qvL2gaEC4amQAdgYpJ3/A1esg+CcCiCFyTK3nAdd6BHEad/KH1umoHic2X -S4eyd8D0GPRrRfpPQSiC3Y4pWvL1L40UbkzAx4bqSQMvY5hWsXmQiMQDpQN3sOOc -31Us59mICifxAtd5OpN1oZkIRn46wx4okqXofh+bttkNyWDunA== +ZW50X3Jldm9rZWQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKyLxu +r1YYp1NEuM789WhvGN1gYSi9gxIYensRvO2U3gSpLG0BGhhKwXeB+ej71B5hM150 +wpsKSsFuEgNvImywmKJJESKmHrRxpPco1/Vjl+3fEONhmT56C2UX6SjcgTDZDaL8 +xyL83tJtH8/FCuguggW9TEVHEULjykQgnyeZFG4VNSy4RPUkIh4HTPkdZC9dERcn +zxJv7zVJvnQJt/E8OJmNkRmD/YYNTPKWzjJhCKpDCtpxyh6RHz2PdYeXLvqNGgKJ +uLB3+bAkS1IMIDwtSodOKv0ld1NbiaOUAgdTekUJskyu01DUqlK52b+wew82egX8 +52RNPwW31nAXAfHjAgMBAAGjTjBMMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMG +A1UdJQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBRqXfMofLL++h8DYfshKWI5y9xn +wjANBgkqhkiG9w0BAQsFAAOCAQEAL7arHIv5jRUX5Xdp7iWmvg5iY50iaDhDTMw3 +fCzwBKzluyJj3nDBNuVLaMD647pvpmszSHKeo/gP6yf0GmDoke8fpmdQN2P+9JxQ +ETylOyWKQXJfkhLMdIzUHKIVZLkApsQ+NQO9yI5BbGUneZYL8DiS4qYCQTwfnH2p +04RNK+jDkbGvDEDZUa6qJv+pujPG6AjT4gefaPwXyk+1vz+an6ZmUt9gTQYSoPMi +ziB6GKZESjpMxUyloxRzS3AWFQ9MDI81+ip6ENJ7QOMhobdWpU4Z/gMe0LB0p1PT +jay6qHsD6sM7aPHIbq2vRcl2qV00iqqxw+z7fO9VlF73PvWSJQ== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDXLYp+lWykS2LL -UxAp48lEHXPYpE6rlyJqr2Sk2lji8Tneo3AHPtpx+8pPWJP+sqtM4ZIu42ag7AgO -Ru9+J4xyWZ7mDAMS5NHOpVfbg3xDuTAC6dHDQq4YosjjFR4x8Ma67PKMdCDuMXlb -p51EwNVnBnOPKXFdIbVF7yITHju4jv1f6y4bhO5TyhRqgtovtEOR93oJv1m/wGAx -4pmvr95alwKtRAZiti6nXE/CFLMjkCATljrLu8RDc3BndHSuCP57X5nnFpYoMRwe -7r09vTKYuelYs1uLiDbzqC/fktfvzprme/rBY+f8IiVyl0bRTF9zhQzTctPm1+hr -ljvd+8vbAgMBAAECggEBAKZBtMmbkKbc7IenNUz4iFEN5K2WPobMLrZ5sGzDnHw0 -4H7Kj0u537TEVIu8KBVaXYr4myeJYomh9ZN6cZ9q1VShNGoRC+r0S7u7+0dLr57w -Hk2R9hZFlhjI5ii2726s2BAj9kBZlfwU0+zidFxkFj8VOAJ8he/slJBldVGKQaCV -yAYykH+XAYlUnaG8TYOLsqJjB2yROLjDKX/55D6aHMWeYOihEgh/51O1sYcw5iiP -Fv/BWuZu0K3wP1FyzRjPm7rZfZs7Jgx/8CbBR2jPetvm2OibuyvHQr2guT2uYbcl -e5PRj8CcfiqwJSlvPoXfi3YKItU+rTnUfycYENBY/iECgYEA8kaqJWK0kb1TSnrl -vOdUUELZr9LYhH3yp/+/dJC587hA0PNlhH/q6RtKWy4Q51qc0VjYO8gXmlaFyR9i -Gl0Yw5rq4yuW6S/47S4jYs3hdWSztzg3jTOvbXoN9D8sBgR96YuEigbHFkKVOMzX -ESj8HS1lOjmnq3S0sS/UpfmFKysCgYEA413qhjnpNEce7D3QXyxKOHgX+Mx5IVqY -x/VfRSFlRi/sPyDd6ZqCbnv2geM6jnK9wDaOj6+k4ERLXNMGJqE3tpwt1QROOG9Y -LUn6VYR0lNZi0fzJtCXGKNq30aOb2eTt0m1wYePRyGGTmTouUhMJZeSF6S6h97uP -suAC4k48yhECgYBNlKdDVXow+QlE3lnWxdTP5rhCfyfqNVKQzWqHbxi8nJHU1zv4 -0+VrQ9vpmrS7AN8agnKrogU83Nv2bWBCxPD7Ig55NCoc/cmeWZnnN7osl1SdJRlU -0+onCmCRh/EW9DVFpfGJKWZzEFssVxu/WPpydWjm3jN8yeBapNBZXa9xBQKBgQCT -hWoXcReofcTIj0rVlY0KmpEjMrgdNgUPKFuKJSLqqUllpFOXsFKUkQXePKrPpg8L -2dohzFrz0Bv9vEWvZscZSrhKECMYZtYVfHxaZDioIXm4uYW9xwyNkWvXL4p9lBXi -hHN4cDK7CuSTdLZ9NjDQ1bc1FrasYBvaeZIld5HloQKBgQCmb5LTVXvcjHRKFDsC -9dmkaQl6ttOnbhvn+CjdfwgfiPdOLvHAp2Q3bad0xbMc8Cc7PYxbfEPuYIeLHxB5 -fc7WeRvwVLeS9F3ClkYVSq/D9ZU/U5tG/VsrxylUHcVAA1hK2aHaWN1zcJd7Azaf -hhu9hM1a/hdaN2MKcOBSYFob1A== +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDKyLxur1YYp1NE +uM789WhvGN1gYSi9gxIYensRvO2U3gSpLG0BGhhKwXeB+ej71B5hM150wpsKSsFu +EgNvImywmKJJESKmHrRxpPco1/Vjl+3fEONhmT56C2UX6SjcgTDZDaL8xyL83tJt +H8/FCuguggW9TEVHEULjykQgnyeZFG4VNSy4RPUkIh4HTPkdZC9dERcnzxJv7zVJ +vnQJt/E8OJmNkRmD/YYNTPKWzjJhCKpDCtpxyh6RHz2PdYeXLvqNGgKJuLB3+bAk +S1IMIDwtSodOKv0ld1NbiaOUAgdTekUJskyu01DUqlK52b+wew82egX852RNPwW3 +1nAXAfHjAgMBAAECggEBAKwBlYQ1icwjskfqkviSmWETMEReZZZKpYDJ87ZaERRj +NoThQBzdK5nNfTds6ikiBhrg3dAgei5CtsxMz0jnZFZvQcjEliVeiiSTj2q/tFAk +QQsjYhwZRptzKu8bQSO5Gdwi6wuLRqxDvS3++9fhpow8ke290k2z4I8jtKIPHiGD +ZAC71sg93DYRbEiBGqzQSTc53LGPH61RhntaFq8mQpNF5naRBxDQkxrWY3/eKsEo +pjDDdD1pm3IaXUZ73ul352z6istyslYtkushWfFk5fxmBhWiT9o9AJprdqwqUhhs +u+6ab/Vov7/bN9uo2q16x03EOyQz+9bygvmDFzfX/RECgYEA/hUjax+GihYzX6kT +mRPwx7rmofNBYbfIL4t6p/WBcFfI2wFHXqk09kfl1M5v5DDg8TTmjQAU7iP+Njmk +Ki7wL0E3tK6jaJdK8JTwUclb5uFW3M9tb9h9zuuy8l0jtn7HfDaetICWrGRCWBi/ +x/SG1MZt9y462jZRx6Uaa8TuQSUCgYEAzFB+jOd6yPHHRqJ3Sl2QXvy8IM4LNi8B +dF+IzO4IKvQJWtEcESWEc66zpvy4oLvcDdtpxWd50NDLYDXq1YfltUVDDyus7d7l +bxYg3rRMXOFHODJsEiW5xzuIeTL1bFRP9NfydAfrN9HBJSiLb9pu8UvX9dO3Tljq +tcLC3bA4DGcCgYEApnDMcdkF9iES6EBGwUlJulzZxg8mrk+IyHRzFeGCOEiZH7XX +vc/UDN0OVngg3feS8w83U/hQvatAVN8vhh6XYi0zw51/F/27rpuyTbE9DaJhl2Ye +B11nFIxb/d47jcnA/cJ99joh+a33s+QKhX7OcEXINVrIXLemnMSv6RbpCC0CgYB+ +yyluCi1hzOHNTP/Yz63LtO6PMS82Bf/SF4OPId1BwsaXbJQNdAn5vJ5S8B0n6s2K +b/L5BdlMJHdCEtPUhgyg7QXLwWAFEsGxqbrmWl/VeBy5nFkC/hSHtsNf66bw/nXM +TcNyDIQF1Q8XH01BMf6NknLIZQYqAfZOrDpAV57A2wKBgCsnOWwoIdPHaoBUoFhP +aBmFqFx1x3aUWAhXAAZaB6/Z4RPh0oz1WRidzI3QCckgABjS1fE9WvrDMKc94Zir +Cm8Yf4XuwrYaV4SOB0IkKuC6Itr1E4W1h2j5myRoxRSSszyaMpcJJHkDY9KNVawV +CyGikkG42/FFg8tWWvjp7o4y -----END PRIVATE KEY----- diff --git a/jstests/libs/client_revoked.pem.digest.sha1 b/jstests/libs/client_revoked.pem.digest.sha1 index 954c131885578..93d6896fd9765 100644 --- a/jstests/libs/client_revoked.pem.digest.sha1 +++ b/jstests/libs/client_revoked.pem.digest.sha1 @@ -1 +1 @@ -B0AD6EF051052F1F5E25AD400848CAAEC237AC02 \ No newline at end of file +C8884AD029E9D121BBD51CA0DB4ACA122604A282 \ No newline at end of file diff --git a/jstests/libs/client_revoked.pem.digest.sha256 b/jstests/libs/client_revoked.pem.digest.sha256 index 4503fa28a3689..4217110212832 100644 --- a/jstests/libs/client_revoked.pem.digest.sha256 +++ b/jstests/libs/client_revoked.pem.digest.sha256 @@ -1 +1 @@ -619462638B204A36B830C672AD7596B15DE6653B731292C81F927A88A9AA6C99 \ No newline at end of file +2D2702C05BA308219BE37E5F2CCA1625E5538F51639C7900657F6C4B6E9017B2 \ No newline at end of file diff --git a/jstests/libs/client_roles.pem b/jstests/libs/client_roles.pem index 085e7ae2da74e..93b01f9e9c9af 100644 --- a/jstests/libs/client_roles.pem +++ b/jstests/libs/client_roles.pem @@ -3,52 +3,52 @@ # # General purpose client certificate with roles. -----BEGIN CERTIFICATE----- -MIIDtjCCAp6gAwIBAgIENm8bejANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDtjCCAp6gAwIBAgIEZVtXuTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjCBgzELMAkG +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM4WhcNMjUwOTEwMTQyODM4WjCBgzELMAkG A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD aXR5MRAwDgYDVQQKDAdNb25nb0RCMRUwEwYDVQQLDAxLZXJuZWwgVXNlcnMxIDAe BgNVBAMMF0tlcm5lbCBDbGllbnQgUGVlciBSb2xlMIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEAyPzl0H1+eLYJ92HXheslqxl9Ph+uI6VL+4fb0ES6ss8W -v5zUO0s5jXIbFzFnGffSfzvy2OAAm02dOVyU1pVRaPLzj1sOg3gWuwaNyHooBug2 -Cn1QhIglc9+tsik7KWFkSS9LkKT8W9U0Q2bJXeux/UdqtPbggxHpyW5Z9PGGht4/ -PLVtWCNgPpIcH98hh/MXOtWY9yB6cFHqWfRza1p5DPUju5FqL9HLBPCWDUrM9s0I -5p2+Sr1/jMrmSQ8TYAczQiniDzXFJUozSKsNQbICp+Qkmg7rQgkrOrKU8E/7KMlE -CmdzuW8r0XCCmRcurQcSOVMqy4NWmjlGzg7wQi3iQwIDAQABo0AwPjA8BgsrBgEE +AAOCAQ8AMIIBCgKCAQEAx1P0sz0dH97THiMIro1zs4ybwBrdUdJECY21BKeQwCom +mOq0sVADddyGByzDKwXfkSd7E0RJ5DrLxm/TmIJqD0/+oisIdCff8B2WAIo9U/D4 +vqPhl1Kx1Dcv6814sKPhxCI83Vbd6HHrbfeQqHMNyx4YA81SVG6cVzdUTy9aH7V5 +lgGbYnwrQzAh6s6oC7BSU6XelsXL0eGzI5z0aMLGbuSgw2ogqGwH7iG4qRSbuUGy +G73gFAR46b5FsGEHe2q/NWzDCmJz/DETM2g7DjSCtrraAAyeshp7PtOxJes2G6pb +RV7WxzZ1Y1UyX/B528/uBW7wCoe6OHQYOEjZIYkGawIDAQABo0AwPjA8BgsrBgEE AYKOKQIBAQQtMSswDwwGYmFja3VwDAVhZG1pbjAYDA9yZWFkQW55RGF0YWJhc2UM -BWFkbWluMA0GCSqGSIb3DQEBCwUAA4IBAQC/a2dpkqo0jHBb1roMaVzw2f65cPHr -I7MMC78+080GFqdLhc2h1o9UvvTS703NLVd/eu6VmSdrsLpbuun7/vXcONvMK38U -N38RhkC0tZCbm5BNBf/8QMuHbW1ZjdPbbpMaIGZP5k4W5YYsRUnk9tfZ9y3EkPaf -7poApuxDyzci0oPiPl1TK5KS6H4X3z9/l+9Mpv+UdG+/ckJGEoBf7B+s7CNv1nc4 -meLqea0iTA7+bdQLDpbQ1f6HSx64Bqb0w32hc/Y8541eaf/Z/wMjJ40iX+0MSKcW -cobRQr9pJ4tBvW8p0iqhZjN1EYXnxaAulBw7B/6hsY/eDmavg49qh3g0 +BWFkbWluMA0GCSqGSIb3DQEBCwUAA4IBAQAnq7yMpYjQx91xDGdriHz0XyvH+UhJ +Wi1vv9xLjdXuO2baYMTiPxTdFM+Fy3CEyXj/4bWyeCjPYg3+xBFstiAydPdUd6tz +4rzX/UbELFv6i5LFLIchUPZg0EJx5/+3M9On19ZX0LzyevpJENJ+AzDkCqpkkBhR +I/JlGDukTnlsI5CZk2nCzKX44QzefX76JaMeBI8QudEwFjHA2T6ZbdwyJ+TGzH8T +yjq8cRZUH6ex9Z1ENHYYFn5uXPJYVP+135LWz+wy9Mj/4W1fxr9STJxtg524zNmn +nuCMhFh9rKMNBe6L9jAKGnr7AoiJ3ME2mIiH8mWlU3qIDq3/TttAPT35 -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDI/OXQfX54tgn3 -YdeF6yWrGX0+H64jpUv7h9vQRLqyzxa/nNQ7SzmNchsXMWcZ99J/O/LY4ACbTZ05 -XJTWlVFo8vOPWw6DeBa7Bo3IeigG6DYKfVCEiCVz362yKTspYWRJL0uQpPxb1TRD -Zsld67H9R2q09uCDEenJbln08YaG3j88tW1YI2A+khwf3yGH8xc61Zj3IHpwUepZ -9HNrWnkM9SO7kWov0csE8JYNSsz2zQjmnb5KvX+MyuZJDxNgBzNCKeIPNcUlSjNI -qw1BsgKn5CSaDutCCSs6spTwT/soyUQKZ3O5byvRcIKZFy6tBxI5UyrLg1aaOUbO -DvBCLeJDAgMBAAECggEALAIkGAjCfo/tkrtbw7j/YH2OTOIG8UsqXgMbgHjsIGbz -5jTiy7DnAy+u+t8YYjk/YBiyTgaOW3MdMAgzluDPGJnJ9uPLQ8Ixx/XhabpFtW13 -F8jkroBZIwwhLleU2rS2jsfKVfuqPjlq3i9NfEE1ke3nUYSVGF39wNtm2xlfiXwR -IAA7y9fEegEA26sRErlNWZF6Lya1muK5LqqxbKcknpM9em7pW96GBToGqxy5DEGr -oOnQ0+FEkYVTIu3Lzb13k3KjeVh7CllbBa8O1sYgnZxwdqkjilrTm2miUqGKyT3Z -zuHNWF4UwGgA0tt2M0mjHegMfBJrobP1lPDjLJLsQQKBgQDpfXzlRw/yVXTYCxYF -IhZvt0uemW5ZeiYIwtWPlB8F7eleXS+rldk6G9ZQaVxzTEN8rTGk9pOV/TQdGBuh -Wvwj4z5rBnE7NUc4JioE0/9TwR/iJ97nxIZBw7ugYVF6nT+CEjWxD5EK2CRjCHtH -DmSs6+8i2nIJcfFRuUNhSeMT8QKBgQDcXUGrR+xIhcrCe0IfU80rDGP72nNtLhxl -JruOivmiVIgvKf9YJt8Fg/NY/nXt5ZLu+X6fBmqgea8JRFgMYJLwhwUH233Hk6qK -+crYKD+gvcmudrnW+KuWU8xb2Igp7+SdUEs7Qc0N10gIrdNLy07TxiUV4+K+uQo1 -gJMKSaO9cwKBgH+ezF5ReiDdnrDbVlzV5eeICzgT6uHTRuiw/aOttUs7X7vpOT4S -+JmvT3PCMvr+K0EBNUpSUBbCP/LLE4cdQQWolpxO+CMBmvzQt1TOPxebTn8Bhe+u -VieqAWrG5o8bVfQVWe1+cau4uiK7Jw6Oim6NwraNKZmKAvnFRlPt28UxAoGAHh/F -fWOsolFf4Ww2ItAy6OKdvG0lgFRgX1eHJjpLW+yQXVoxZNyTek6Kcqz+dBIBHxgO -PjEDXIKl8e9c1fxRuf3LK0LPE/xUwQDawOfnotKvAmGKkB+YXUmBU0DrhEgeCX9T -eXtKS80VNNKGYwMc+IhKw45OjeH/ykaUU0Cz2I8CgYEAwCLQxklGrlsTPehgC/CE -0ZKYRGACqF3T+jWIsUP8BUABHxQsIik3mXAu7tcqRcD+E8D0Ctlqc70xFanSEmYa -NN4gxX8rFBwKTmgQb7+25r4G7koGNqIIx2cGJVVF3RVokPTdbX7NEER81wQlZ4uK -RbnNju9uqfxBtl7itm+pGmM= +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDHU/SzPR0f3tMe +IwiujXOzjJvAGt1R0kQJjbUEp5DAKiaY6rSxUAN13IYHLMMrBd+RJ3sTREnkOsvG +b9OYgmoPT/6iKwh0J9/wHZYAij1T8Pi+o+GXUrHUNy/rzXiwo+HEIjzdVt3ocett +95Cocw3LHhgDzVJUbpxXN1RPL1oftXmWAZtifCtDMCHqzqgLsFJTpd6WxcvR4bMj +nPRowsZu5KDDaiCobAfuIbipFJu5QbIbveAUBHjpvkWwYQd7ar81bMMKYnP8MRMz +aDsONIK2utoADJ6yGns+07El6zYbqltFXtbHNnVjVTJf8Hnbz+4FbvAKh7o4dBg4 +SNkhiQZrAgMBAAECggEAT4mBwC/nujro1UK3iSdqfr/humM94xNbENDZ5ZSTnwpy +wlwNZB+AcfxgAubqrfU3A63UX5jFP2vyuikPLvEDLz8FZWJ0ih0LL74uh+KhnCgw +qamtL/vQd9TZq2ce/KU4RlI/JTcvbzxd7FB4Ca93VpCYlpPMgWJtwiOS4dIMTVAh +eK9AiD+6DK6kYXEbdtTE/qB3rzBhI81mkmVCONMC1DzVPLJaobZImQOpjG3wwqve +pPe+aEuFLvu0J4z+r9xVub7D/swh0KnnOhGgKZH59KMw++YWGyC1PjHc7YVE+url +sIMQ1ZGTu5nd7Zy/NnVSxrjcaH+8iUBxDb5AxkATGQKBgQDl9TWwzwe5+CMoFdHd +bTnL4AKOwxTU7f+CYL8uC4yFhzDpwfMdCv0iAsyxcKh0qec24c3OLsaohInOEoGq +/D99vOLL/8YzUQzcOrkQ3GAY/bY7z5OLG2NrvWiLeALhYu8ZhjrK4qMInHltWgxU +vfBeIBZSjzeb0KoZitdr1b9npwKBgQDd5r5qLLQtgg9OHfZAzkqNqZyS4JBGVBzT +IOdryu2Zyhy9WBCnay9/5GsY+i/lx2fcOGWlQOE7YK7zGzGUTpWe3CjqmoCF+A8K +os3cn0/Z1WVSAhStamuZ29JaxdIku41PR/9kVT8n2TymLpkpINgPqlb9JiUGoxpG +HjZ7AYSDnQKBgElDpqd+TmfhttG+oe56Lj/WTvpNDQYKDa3sRPzWkR65w8u2D07h +gWSkn7KowgLpXtENgBSAsqpeD8mn+8gONexJkbiM9QjEeeRwkSXeEH/l1XmY2Nkp +ELPy1KwawFAuxR7MtU6OhoLn14gPeH5HRZ23e2UyW/U4tfkNUzT4FNNhAoGAVhws +2FbwtJg8CDxoS03CcKbZM8YhNql9ZhDmvVXKTlu4O2HpVI6rqB2j0dWEsM5o53Sb +PT2oKbqjKYPsAXldZoBZAHFkXpicfawf5vWLryS4ZNKZ45Hgn5xGTSNnIQoHFDUE +TpMmJviOKI78sJEmRy37A9HBnIm+sGcZDTLRkaECgYAOWKr0DpV1IZpUneen98mc +XBkn4kz91d+RGOr7InZGB9h4wM8zlu2BI5WChK6YmNm81PJ10nCgce5yzcRgy4qd +DqDWLiQxMFhA3G+ioHfC/DSa5DR5T/e45LvNiljUC19VqyIvlFWVTBMMn7YwI4zR +BKNegkZ1VyooNu4FBkyoTQ== -----END PRIVATE KEY----- diff --git a/jstests/libs/client_roles.pem.digest.sha1 b/jstests/libs/client_roles.pem.digest.sha1 index 8a0cfa649e467..5ba43611afc58 100644 --- a/jstests/libs/client_roles.pem.digest.sha1 +++ b/jstests/libs/client_roles.pem.digest.sha1 @@ -1 +1 @@ -065A489D101C23DFA6C30BD1956D743DEDA4A422 \ No newline at end of file +1367B5639EE86E2CB4FF2DF5EBCC02A5B3A9CD2E \ No newline at end of file diff --git a/jstests/libs/client_roles.pem.digest.sha256 b/jstests/libs/client_roles.pem.digest.sha256 index e751586dd0078..7501be120029d 100644 --- a/jstests/libs/client_roles.pem.digest.sha256 +++ b/jstests/libs/client_roles.pem.digest.sha256 @@ -1 +1 @@ -AD26FEAF7FFA89165CEE4CCCEB4C2049B7DA4B8A39C65316A798B2DB5BCCB016 \ No newline at end of file +6B8325B29E3C061BD6EA615A2779BC4A4D9FB56630C4F526A29CE0ACBC6191A5 \ No newline at end of file diff --git a/jstests/libs/client_title.pem b/jstests/libs/client_title.pem index 6ea6036c4b8e7..32243b069e5d6 100644 --- a/jstests/libs/client_title.pem +++ b/jstests/libs/client_title.pem @@ -3,53 +3,53 @@ # # General purpose client certificate with roles. -----BEGIN CERTIFICATE----- -MIID5DCCAsygAwIBAgIEbhN+OTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIID5DCCAsygAwIBAgIELjth/DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjCBsTELMAkG +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM4WhcNMjUwOTEwMTQyODM4WjCBsTELMAkG A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD aXR5MRAwDgYDVQQKDAdNb25nb0RCMRMwEQYDVQQLDApLZXJuZWxVc2VyMQ8wDQYD VQQDDAZjbGllbnQxIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVAbW9uZ29kYi5jb20x GzAZBgNVBAwMEkEgVGVzdCBDZXJ0aWZpY2F0ZTCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAM63JWBNHspk/W4Zkebwc4VXcnmqN6stOuwQfjEBjnixL+D9 -osIoE/FU6x/2TwHEOOYVW1FhP+rN/OQRcqY0T8z5Dk4zQACMZZQdNNMbBNmk1XIH -6pFThn+olAsq7KbeNsQEtlF9EWIRxrpc+C7Oj69o+3dCJbhRNdj64pw7BrPfOo95 -nM8G0nkCJyKQkpWouDinSssNaeYFujtQWHQ6aTn/Sj+6ta0iwzm8Js/QD9rG6SOd -2owC62yuK0eUrfkf6xADWe11decK9Y4oEZdbtFVZfHm494ovPpaEe++5kUSRp008 -rDvJBqmIZX9dnd5IGP7DU6PtSEB897Xmk/oiBEUCAwEAAaNAMD4wPAYLKwYBBAGC +ggEPADCCAQoCggEBAMu3CZSHz1GkrDsDeMC+2e7eTI4VncmjwPV7mTY93rRC/n1J +YEAqRyaX5u3yLNHb7rVVC7nQ/r0MpzMRThKl264YsVGtlrBDWiujyLk9Gim4aZqM +RKFzHEptyUVoXSNC+yq6Ve65Fxq6gBZcUaOJHFOSw0fYnncW7eeFrvHR+/ehUnyx +RUOGfxc4pbNthUqcE8PO/VS2iV9XtCUhZXoWccMxNCMKmK6+NRLLrzhIVCENwyVq +Geeyht5WnjlZF0XKuJi0m/ZLXJRtg3EjXxHiWYgPdKght3RCOt2raqV126ZzTcsC +k4Bjqc7fzd3dW7GQKuMt3zmNxFXi7opCBsDXPa8CAwEAAaNAMD4wPAYLKwYBBAGC jikCAQEELTErMA8MBmJhY2t1cAwFYWRtaW4wGAwPcmVhZEFueURhdGFiYXNlDAVh -ZG1pbjANBgkqhkiG9w0BAQsFAAOCAQEAcd974FGDy5YioMZGZMV0N1D5Nrima43E -uX0NhhYWuThs4NnKcklp5bH4Q6FvqkbARlALDUIf46AWGET2Zg98fSRUeFNf0QEu -OnoxPCjlm3f/YYbCsuEzcNpN0a26dEWAVU6jPc9Vz8zJOwmGOiH+x6WLhDPFzgn+ -5Btzo9672tGb+a8ZInKxaZNvpCZ+6zggWX9SxiI1ZG0MZTtHdiXopneKlB/8s+sh -K7qOOx98pXICEB3VFd41chlU22UkfAuVe4ql/iLr84lyF4njbbp7NLAJuAoC4Scg -L7jq3qaDOu7khniW+9TRLumpH98EDBO817G1y5BdcNuWs3iIxZiLKQ== +ZG1pbjANBgkqhkiG9w0BAQsFAAOCAQEAbLZVPVdApKR2ZHkzTD0wcl6dR7T55q9e +i4ibhHj2jbx55ctl8btEkUezcxdEtkk4t6TTXawfqG7NrQ2XEtueaOi4HbV6baD8 +wj/SecnQGdD+TLAB/PvYxL6jc8pisLCrghUzlIAdjeTdI/dXdRu18WKJ2ZI03alu +qTKIalDdmUfbCyGpHI8sjs32x4rPyMVujUClHwM49SIHyROisaKL9AqEZYo7mUi3 +rYeR7QZXjf9cxFJFNfXSYKJGLdh22xGXZKtX+s404fMyemd2YGjbk0KvcWIHtkqa +p2Z/KxJUDt+xWsyZTpUZPToLYwnc8/6h6/WgQTQr45Q2CYZsb2hRmQ== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDOtyVgTR7KZP1u -GZHm8HOFV3J5qjerLTrsEH4xAY54sS/g/aLCKBPxVOsf9k8BxDjmFVtRYT/qzfzk -EXKmNE/M+Q5OM0AAjGWUHTTTGwTZpNVyB+qRU4Z/qJQLKuym3jbEBLZRfRFiEca6 -XPguzo+vaPt3QiW4UTXY+uKcOwaz3zqPeZzPBtJ5AicikJKVqLg4p0rLDWnmBbo7 -UFh0Omk5/0o/urWtIsM5vCbP0A/axukjndqMAutsritHlK35H+sQA1ntdXXnCvWO -KBGXW7RVWXx5uPeKLz6WhHvvuZFEkadNPKw7yQapiGV/XZ3eSBj+w1Oj7UhAfPe1 -5pP6IgRFAgMBAAECggEAIVlguUmX1xU14aZUaIqQSInUGRbCBma0o9MBWMykfIox -bD3fHS20EfIeQLjPBDzw5QW7BsUGt83lp7G86l10JmBj02/nOm9sD9oqZIhuXuJ9 -4Piv+iQchcnfoHGbXkxpT3RnKXxg6o5tnjFThNGkUqtALnxIc5T+d/P5zTSkny3x -BnR/cHHOo+FPA9X4wUjdpKKH6WuRTmb+rPUTlrY7n5D9OF0JtcTMhPyAYGbe3H1I -iRVFie3+GIilMmFnUaN6kuL2iLMNRNR+OIKjabQuLrLS2x7B/1SaBya7qGnnU/d1 -P+akrGJeX5QBhXBupgzQGF5Ef0QbDoxrIq9D9bluHQKBgQDxvpq8IrwScFuzZ5Kt -MdIBC2yED1Fz7czOO40r4HrUAFhqwHz1aHzXGXBE834hqaRE8rp/n7Cfedf990Ku -HRA81nQRIdhgv3vDHj4GaQBcfsU1DDtnz5GQ7AJka2zjK/jF10Bz3a3G0e9bp0xp -XVdEB4ecldWgd56VD9NEaoML/wKBgQDa570m6ktm3cVZk8q4cfbAJb5iApGYaazl -/tkrkLnihEjWHkNwcZ9zW9vj32OZajNxv0mEcWGS//dUomknbAjMje18R5FptTV8 -8HZhwdJNbgE47PqwfItjoT2TJJLOWbXJC74sbhwmr8JFJ3YlNr37XNIUI7+Df5DK -o7FhHYu/uwKBgHP26SdCylFWEMc48gWfFoxlvIKFDp+7/TkZHBlmL8Wu/LsI52iZ -3PkaPN72pxTi2egxQAGkywdVXaV4jUYUrwtgHIFzaObQNEBfK4XLrN6x+Uv/OCgO -TFfmvycI2U0IdOgC3+o09v2fC2E+GcuZeRkrO4SD49x8RVhgJQp2xbJlAoGAMFDP -1gQGinjOczwElXtJ7BUolTdd7Vb9u3Hpew0hihaKgQADAJGDkRGoUf5fXGAtZKDE -2D+yOqDWdU63iOT2eDenQDQHq148pQvqBR+jjEWIbYkYt9V1apMPJSgAYx03210F -J4dSHVPTvDG+iO4xNobBM4LEZYDFc4R3xSihTLsCgYEAgt0K8VoBbsbzWT5AbtUt -Nj3K1SYNOekYo0tylmoxEMWU8XPVzzrALNUALet/0fW4RKG1g0/19ZaJdXCofVaX -WalZagr49BT3QMBnGQJSOJGFX3Zje5QbmBRZgHVrkADXuNgDMac3sBTcthR39b4E -+xCGpMK6P4VV1Bj10fyK3Mw= +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDLtwmUh89RpKw7 +A3jAvtnu3kyOFZ3Jo8D1e5k2Pd60Qv59SWBAKkcml+bt8izR2+61VQu50P69DKcz +EU4SpduuGLFRrZawQ1oro8i5PRopuGmajEShcxxKbclFaF0jQvsqulXuuRcauoAW +XFGjiRxTksNH2J53Fu3nha7x0fv3oVJ8sUVDhn8XOKWzbYVKnBPDzv1UtolfV7Ql +IWV6FnHDMTQjCpiuvjUSy684SFQhDcMlahnnsobeVp45WRdFyriYtJv2S1yUbYNx +I18R4lmID3SoIbd0Qjrdq2qlddumc03LApOAY6nO383d3VuxkCrjLd85jcRV4u6K +QgbA1z2vAgMBAAECggEBAMrEVXTR+Tl6fzP+MKMpVCK+gdSrD3M020t8yvOCgblL +NaPe9T7o1glECUeuVY5NKzyyeglqTSQ/A+2AvAUzsYWIbnauId8wKwBk9dXrS69b +L0H1fBp3SSGMRVoglxQSB45wCjijYqvoFrYrDdeJEfjdjCJnKtO+ru5T0d5A9Ft1 +HrXQsW3ztH3jcySdY4nB1i6ytINSArCY53oN1Z5/ehVkfcuTHkWPz6bciaZRY7Pl +7YTuehovozl3n1eSccUFMGRHF2W69WZ94pHKW+/Qix5X02CTfCv/wWIBsgc5GcOc +R9ArHNc1S1VhKk1DJwj8fhqyajBU44FG5/vX/wAg+QECgYEA+5n0fu2FWYcPUMab +5H1sLbpFrRMCOa1H0rlyXfxLDenF773n8/TuCuAeNkO99ljo4y9K8bzEhd+gfZiq +ubkFt9b0EUm0BuIKZhOpIO6rbsasL1ugLn0QxYb4kuNqim8n3nrX+0hngaFjyGLc +lLI2ex6T3hEw0fOzgZBJeLFgk2cCgYEAz0bEJ9sqI8x5AoLjTaNIw6AjQ9z2J+Ar +CZhu251bxrn3V4vPqFVq3K/2DqNvA0diV4lB4l9ijf0JSpNMsDd4wMxVoAki4EV+ +hNivn80EE3LTCEupq+ErYLx4npU6+NGcpFKqo0LG1lGaaePPyIUE4/tqk/PvnLZI +4fIfQSwInnkCgYBPYmYYXk0C0HBuphut3jzxuKIfV64OELRmsoh5Sw9LVoVTfXHg +MVmiKNCfgoQ/ZBInDFbzKwI+0y5KMo87hjtdo/7iLYUV3uA1EOL+Nw/0Jc9SKfDC +ekd+a3WmswZ8o1HLCqt254NxDUD0iuzaJdi2xBEXsgjbVa8/pAzqfdzg5QKBgFI/ +PVSnVYhQ+W9yqmlDMntVjsi94/scq1cqYkrInQM1BZ16f9LG2hlpxRQ367P8Xlxn +vXnq6Xt3/XjXDCYcTNEMA6n2Fh2x1as3JDEfs89Xz79J7rCiL4k6IA6lHnpfCm31 +03nm5GlkhgYgfHGPvsoMGFVPBTRE7JSjc51zsJPZAoGBAMoU0DbQJsdvOyReahd2 +0LSyT8mcLppUZp3lujXdIvQOdxIdXZThcz8OwEXrTP2SUmR5SAqmKFpCxryxwqex +09vLGsJShewHlyb5BFDkYu0mgdvfpVSUzGcUUd/z2wH3K7cEVhgj+jCUoHA/AbYe +Vxgkq989MMYc4gFUhOyzIVF/ -----END PRIVATE KEY----- diff --git a/jstests/libs/client_title.pem.digest.sha1 b/jstests/libs/client_title.pem.digest.sha1 index 9314837da5e80..cadd1783de934 100644 --- a/jstests/libs/client_title.pem.digest.sha1 +++ b/jstests/libs/client_title.pem.digest.sha1 @@ -1 +1 @@ -689F782F2CE3483FDE3CFB70E9E22800C604FF14 \ No newline at end of file +A8354AC4225473798EE2FA5BA6822731D1617F8E \ No newline at end of file diff --git a/jstests/libs/client_title.pem.digest.sha256 b/jstests/libs/client_title.pem.digest.sha256 index f465f2a44ed7a..407c95b086980 100644 --- a/jstests/libs/client_title.pem.digest.sha256 +++ b/jstests/libs/client_title.pem.digest.sha256 @@ -1 +1 @@ -183BD149346634BE397BACE2B8231CCDF8A435104CAC12C6E9C1814BFACFA8BB \ No newline at end of file +B9D245F1A0E281F0D2ACCFAD28246374F309A303550E1AA7730D423D97B4C865 \ No newline at end of file diff --git a/jstests/libs/client_utf8.pem b/jstests/libs/client_utf8.pem index eb34840b07639..05d890d2893d4 100644 --- a/jstests/libs/client_utf8.pem +++ b/jstests/libs/client_utf8.pem @@ -3,52 +3,52 @@ # # Client certificate with non latin-1 unicode characters. -----BEGIN CERTIFICATE----- -MIIDqjCCApKgAwIBAgIEd+OXqjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDqjCCApKgAwIBAgIEMzr3GDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjB4MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM4WhcNMjUwOTEwMTQyODM4WjB4MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxFTATBgNVBAsMDEtlcm5lbCBVc2VyczEVMBMG A1UEAwwM0JrQsNC70L7Rj9C9MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC -AQEAvMqJXShCsc4p+P7CQ0KMOb4h/tkAovPsv/5tWNZGaa1aCtHrkH31vTCdz5mQ -nRMm8tYCkn7ODHYbE7c6YMYshboPnVIdLSwGhGAmblF0N/IpGhrgHJcmnlLPVjW2 -5R/I1HLe7NtCZbO+rMG5zPJIyLqdSJiDLWX7vygsLx7PjgVFqymQKSrvtVi1v2Zx -siyfzrT31hT7mEvWhW11JXYNZLZxLHgNSnZL9FDe/RXnAHbF2gYhmzoOxSDceefD -vPylZXy9lqgT5puGvpE7nqXf1tcmoIsyPRTFf499E7g6Q3TcOTJQPWRytPvR8h0J -pvUQk5b7EvrPFCNbQHfOeQgVjwIDAQABo0AwPjA8BgsrBgEEAYKOKQIBAQQtMSsw +AQEA6t8zc4oEvXct4oub19eLtrGssh1yoydgd5CzR0PNL4yI+BVrx3y3J2y6NV9a +XmJhWuBozl+dgEvWMbH/BRCVQVq3cTlvN8/RSm+4Nb/7GEqK7dwv5XKn893vzX8u +MO+q3BlZnrSrhNA/PFvXJg+cAf3qCd1uZP6DQtEe7xTvWkRK3k6MGp1zRimD1Gb4 +NB78Nr+N+fNr8/1ke6yyV6rHPKLkTEX1miTIhZctAmsGc/UNU96nYdNTjhB+yWjZ +8lLTwdAV4P9W6GBtqPvWtUmClbs5hJQ8hSl1Bc3ItdkfzupbE9cT0bi6KJsATyS0 +DSlcUq6ICER3E1YiI1sI5GSzXwIDAQABo0AwPjA8BgsrBgEEAYKOKQIBAQQtMSsw DwwGYmFja3VwDAVhZG1pbjAYDA9yZWFkQW55RGF0YWJhc2UMBWFkbWluMA0GCSqG -SIb3DQEBCwUAA4IBAQBu+46q9zu+N0/XeVZftDwhizAg8eSvouryfixTXjXR5FuX -GHt+FGlEs6ZNDUIsK/wYnpocJl+YhdACP8sWRMDKXOVwnyNWuMR1wBJ5bEUYrrDW -HIXnJa/mtPq/oZd9bVA9W/aGfEpZN5tyfmCvlIN4n0fwVtnEeMoXGR8PWTs4s35g -NJ+SzE8JY7JIKKR8+dPeVv7qv1OlbQF6yXVx8CvPv9FuPIcboFOpWIpliLU8ROVR -N/Aq2Zf7MCXpK+8nZnA4O6mTNaeQZDlLpu017q5XGXqhgOqyFIKWOOJbWEVN14JZ -nkYA6KgzV9IczoBimE7Sc002KoPkFjX0UlbmX/MS +SIb3DQEBCwUAA4IBAQA7ItpDLulGN0Yo8vzs9eBn91WamTposa4lBosW2AHm+BU8 +QPWpNGLa1iOrKF/wdbPGAiJB5TlLZfv3qCUouFZUFWigP9ndrbsU3v426iuf1ufq +thecoD72N5BGUl5iRmmqcbmB0XYB3xoxj+KI0bg5zxypEipH5p1yGTQkEz0KVsMX +KL1oTHbIKOa+Rd3UIePRgA8ihNsSEMzK57z3Qzpu24QNC6S8baXNzKtQFjMH29Pb +yckGKAmmSClk2JStUFxRCyQH2VqaxSS1evmqGyzv8AP50caW9IdAzP0MTgvowhr3 +d32nfa+VEN5fpLrTQyuVLIgBi3j/E16l1UqNYa+n -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC8yoldKEKxzin4 -/sJDQow5viH+2QCi8+y//m1Y1kZprVoK0euQffW9MJ3PmZCdEyby1gKSfs4MdhsT -tzpgxiyFug+dUh0tLAaEYCZuUXQ38ikaGuAclyaeUs9WNbblH8jUct7s20Jls76s -wbnM8kjIup1ImIMtZfu/KCwvHs+OBUWrKZApKu+1WLW/ZnGyLJ/OtPfWFPuYS9aF -bXUldg1ktnEseA1Kdkv0UN79FecAdsXaBiGbOg7FINx558O8/KVlfL2WqBPmm4a+ -kTuepd/W1yagizI9FMV/j30TuDpDdNw5MlA9ZHK0+9HyHQmm9RCTlvsS+s8UI1tA -d855CBWPAgMBAAECggEAVOHTZPknE36YTIVunoMbPRMJgeXbbWiyt5gTXe/pdLYm -V9sOSMb5Z/il26M17AmiZ2hle6xbxCqN/g1RyBUSC4YeYyN6TscOnxID9XHWCRlt -9XjD7tbe3CxrYF2CP/5Mc4i6RlBGyCdKwa3Qdh8xLl8JPIyl3hGOYOIVC5KUy0s5 -W4aQhgh9AlqI+wv7QugKrC3ZAdcRT/FwakcpmC+C5sUN5RxkldhGuYwaBaC1z0Vh -8xk1EvvDf7oqoJRyUHbZW8hAXY3Ei/RrQnwTCU58Y24NBCJNxF7rV9ww1SwJ6HtC -ApKBwM6iVtJjzi1GkgAXIS96RJB73f2YNLy45C/iYQKBgQDr546tqBL7G8lIEa2r -nl+wsDjmpQPdeRxSgaSRgmHvFMVtuSKivVTD8w/xVjzi44D6fHf7hnt9sUPR72M9 -XKUTf3rjg45jQIfbwkRJQ1hpznLu5QMZWhD2VwUJAbCnzNw+yISpBOuU4W66FA1r -TPYhZdb1k95pJnQlWa6XU0BlNwKBgQDM35BI9xFKmoCls8uFDH3F2Wkiafj0f2PH -ikuyT/Wzdw4cyIITN7h9GCXgsQ70t9tALgoSlWODMtkBwn5iKCxA0RcIKEfOEZLQ -i2W6nrVEh4cNePzUa211IHKbiWmHRMTm7K+hmMQua7k0kU1QaMbr+G90MsoISQjV -g6F2Y17+aQKBgQDXSjIaJcN8y7T8QrX+Y8DsnUawp3RJkRfZ7FrmONlrucccBdOo -NaXAVnj8RTm3zuyMrT9Km0bkRPyiARjOjVhR3Qunyw4NYn0af8aWHhH4LeMSRop3 -fozwZCZCO/qeiQWPfqwjHExrSPkmdNpyTIBrpmdxI4vc5q0k0R3XGLEyLQKBgQCs -DK+B5yK5USjqfyRSNpxFFACrqu0sfvLPdv983pOLRFcwqt45v1iKjUX5/Rd/Qgu5 -STCiTJyGaKQ/SxOR4QTQ5gb+jb1HfBCcXARxhajaxSoQkWNaDGGetEIrBmn99C2b -dHdGMSHHjiW51LBl76fmMPuATMrHzHXDwUhOQMNcyQKBgBwawHWxGAGm9Yzpjg86 -p+eK+KZX/INuLOtkFBNiaGhRbycvI05B0uWe1VGmhNaO5V5lrWniMuKuMQpNXIkZ -73kMEssOlCf/z4Q7TB/LXoLdCtFjb06ZB55if9uTglewy+260TaQHxtKlpiazI3G -JwDm0XRKm0TavwjfFinDJ0II +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDq3zNzigS9dy3i +i5vX14u2sayyHXKjJ2B3kLNHQ80vjIj4FWvHfLcnbLo1X1peYmFa4GjOX52AS9Yx +sf8FEJVBWrdxOW83z9FKb7g1v/sYSort3C/lcqfz3e/Nfy4w76rcGVmetKuE0D88 +W9cmD5wB/eoJ3W5k/oNC0R7vFO9aREreTowanXNGKYPUZvg0Hvw2v43582vz/WR7 +rLJXqsc8ouRMRfWaJMiFly0CawZz9Q1T3qdh01OOEH7JaNnyUtPB0BXg/1boYG2o ++9a1SYKVuzmElDyFKXUFzci12R/O6lsT1xPRuLoomwBPJLQNKVxSrogIRHcTViIj +WwjkZLNfAgMBAAECggEASpCNvxKEKrpZYOC9myqc+mUx1GIw8MchgxP3Npq522yc +V1ZFn1FivCtyeltJT0mgar9vxoTodcCdwa0mry7yk4r/TfzT6D4bzavmKXpHdxDD +Rnxu4EMkOvGsnerQeUy9S57oqs/k3AdY7AqkJTszZhZ8ZKFd2MZMWbwWPATDcD4+ +XYiqadNS7uZfNpdwh8sWWGAynNkMvm8T/Hp+XgTCb2tQd8tdLfNI3EELIUGyF7nk +ro09BSHxPVJ/kZlRAVq/1Qo3wTlP/GcQVFgxOByg8CVuA4nAoO77iI4jDTAXZ9To +89Xe/tD1odlAVcKy/HMYkQ92Way1nGRGMthNW3JfkQKBgQD8EMwIr7IZviZLKc2h +XaBwmGKU4UBF3ZfCljmb1vW0rfyNIRlCZbPgfyapYFpx7hl4XQXcAwbnhZS1hgCg +HcaEEbDLn3meC/6EIfX52CCr+UdxN6yaFec3d8CubbS/VfMfRPlLcZlSVZHojJSR +tPSWJE+vJrf5JOIPSwnKYNcdKwKBgQDuibOIoOnD9ZoRmegSaWLJGMITiZtVPkoK +ldxHJaSOA8nNbCR+VH/7M2AbqMwacsMK22jbSYy7LrQUOA1EKFFV0WqnzIV8WkP6 +wrELGWC8RU9PnAX5dzZGa2giMHq+n/D/gjOOljZVwH6epnKYU8BEGY0xHMSVPx7r +8zuP0p9wnQKBgQDj9DzQzjN+3Fu9Nbzk5csmiGj+wwZhKJkKPNk0eh4SnBX9e6Vg +SqF1sQfBvYWN6wnVtCqMYaBo2IE3+Euwgbz5yxXb0AkoSSiPTjcpD6/cVr+pP/Q0 +FQLZQs9Z75S6RHxuFu5sr/s8Xm5ppFjnvIJFFemKOLW/9v8JXY8pGpLvqQKBgDdz +lWPGnHEscAN1dsiI7Sj4c21dSNnZIBcz3UBA1O8anFf/ssAXXqUDvWzQoZnoYcTt +WoPXpjqBUeV9XkAg6z967kNRvRo6VaE+jy+YV6+MoTJNf+oNN3XJVCzWfkJAJiMP +nnKgIsNue0DgSrV0iraKBhOLr3tOcB7DrW3ytZPVAoGBANevYlW0zvAjg8YudaJE +jKRCR7yTthG8pZLxS13Zs/H9HKCrk5q0Hw9SnJ9CifV1oBJ1TVeLti41JNFQbFOT +K5xMVAE4cg2zhd7Af690B2UJEhi8IvUeKgDHuV7QOnuXNeRwqdHv8+9nJsuT5aUD +IMzPSf4+7qRTVVPaUANsWDOy -----END PRIVATE KEY----- diff --git a/jstests/libs/client_utf8.pem.digest.sha1 b/jstests/libs/client_utf8.pem.digest.sha1 index 41b06c70d103c..a44054184ab2f 100644 --- a/jstests/libs/client_utf8.pem.digest.sha1 +++ b/jstests/libs/client_utf8.pem.digest.sha1 @@ -1 +1 @@ -A70A1D8408B98911E5C98B8986581ED8CB630EDA \ No newline at end of file +6C41B2B5610C5C7B81AFC363D139004837066824 \ No newline at end of file diff --git a/jstests/libs/client_utf8.pem.digest.sha256 b/jstests/libs/client_utf8.pem.digest.sha256 index 1665d1eaecc4a..8d19e36d26696 100644 --- a/jstests/libs/client_utf8.pem.digest.sha256 +++ b/jstests/libs/client_utf8.pem.digest.sha256 @@ -1 +1 @@ -DD74B0EA2C4AE0AF8E0F57E2F095B4E7236FEA6959D82F5965956725C402BBC7 \ No newline at end of file +F102A76AF3E51980D5F5365C923B9173E2438CB539ED558B7EE7B4624739BBD1 \ No newline at end of file diff --git a/jstests/libs/cluster_cert.pem b/jstests/libs/cluster_cert.pem index d6cef5a0733c0..add923b572e38 100644 --- a/jstests/libs/cluster_cert.pem +++ b/jstests/libs/cluster_cert.pem @@ -3,51 +3,51 @@ # # Alternate cert for use in intra-cluster communication. -----BEGIN CERTIFICATE----- -MIIDYTCCAkmgAwIBAgIELTFGAzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDYTCCAkmgAwIBAgIEeEZt1zANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjBxMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM4WhcNMjUwOTEwMTQyODM4WjBxMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEUMBIGA1UEAwwL -Y2x1c3RlcnRlc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDheRo7 -28ofWqHTuHpzsunBG5MYcup2TsxxqbOeNSblGsHbKaP1sQqN+ZF2eVqYeRkDPWy8 -yA9iLbr8lz2dLSHdsZJUjsjggJjIKYHwPFV42Hu0yO6MYhElO0FHUBMPpw1k0ovL -zW6VpaFiE45qJdpFEppCoqBk2QvEGkQ5mCl+BcQ2Y8oIlz40SHIrqmvbmXd8UXdW -EV+9axIifb4oUWhpQ+ATJEGj0LL9c/PAtDw4c+fQjQ71HSB1s6EQNDlBqJIrgImZ -pQOqsd5vMqQRpQ1ZQBAStmDtvHnqidrvsJMB5drCsF6ru4yAuQeCNqdOKipItPvJ -3Ivf3C+nCSpkX1kVAgMBAAEwDQYJKoZIhvcNAQELBQADggEBADkg/GMf/49G8jCA -6RphR02g0COWsI1nEaBxnvTlb1+sBVWwqYqZN9WhUxg5YTtQg9vCLC2pFG3E+EZJ -hO/YdyOUuqR/V7QA1qucu+H3Sf8F95NCrc6dT75ibZsP1TtUgun8AWNRd+98Rrna -ezIs+T7mE8FrvJoZX+OMOw/JpEll2sSL7Y7BtbV/EBqLCfI8NTUgJfXem+prIuHa -/0ocxrS4cG+JArfasI3NtZWCLPXPTLBVxX/GL1xwVHE8G3GVXLxczNTaRzcKOG9P -U82mA3oLMNmJl66FYs88uIeOLm9ub9rP6BDsRw9hh7hOJy/YYByRpMM9Yan8/k45 -c1qrwPs= +Y2x1c3RlcnRlc3QwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDBYgSW +90TPI2bhEI/rgefJfV34FrHiqVGnUFrKPM1IId4Jkwp+uRexeHOf7GbPWlUiFB8k +Cpt9YEa2cr52NiK1xxbBCQr58Efm1dJEtO2sR21kg4tyqaATe6uTWHjxPRaOMeMP +Ponluixk+Fx2s2As48hBIlyrxpbbHphEThNzTa+8u5MevVsfozal1zvmDKg7Fgkn +UCC72ujO1T8cx7sF6fS48GtygSnIC6x4N8pAF/SaMFXDOJK5qBGCYcMQ1JY53dKg ++zfW/GEiLG9hllv/1JKfEgKwsKyODoPXcS+gn9GGZs8wG/kq/8W87wlL0dw1MR7G +U8N8JJFuNhUJ1X3FAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAIwMWBo9I3WWwF9c +AqzlQ3o45vraQYvB8qnIoflLI1T+VDEgcUk44pwGY1/GcvirMYm31aB0GJhk+hiS +9nxqLSEqZCNb/cNxA72z7OUTc+Sc+/IJBxY2vaEdUmqtXRb0NZWAVzO0lxOkpvqF +yAEl+y3Pzlq1f6ogLdeBVobajkWVkGUmk/YboGd1dEYkmrInwj0dsjJBdoiVQJp6 +k2YLn1PIS8VcTCRL10x7iwMV311KQJPOTIxoF2g0XE96cCMWCUt+LA++Vn2G9rtv +uz6P7Sj/hPmkEqQaYpd8MRTbCr/CS+v/r2NbsZU7bKb//N6UzypkUIwbrNh1bpt+ +YCQpMYs= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDheRo728ofWqHT -uHpzsunBG5MYcup2TsxxqbOeNSblGsHbKaP1sQqN+ZF2eVqYeRkDPWy8yA9iLbr8 -lz2dLSHdsZJUjsjggJjIKYHwPFV42Hu0yO6MYhElO0FHUBMPpw1k0ovLzW6VpaFi -E45qJdpFEppCoqBk2QvEGkQ5mCl+BcQ2Y8oIlz40SHIrqmvbmXd8UXdWEV+9axIi -fb4oUWhpQ+ATJEGj0LL9c/PAtDw4c+fQjQ71HSB1s6EQNDlBqJIrgImZpQOqsd5v -MqQRpQ1ZQBAStmDtvHnqidrvsJMB5drCsF6ru4yAuQeCNqdOKipItPvJ3Ivf3C+n -CSpkX1kVAgMBAAECggEAJwjiQ86vVXJJZfAgMvucgHQXqTnr4YOO3Xa1xAQLscpK -GXlCC30VCLNWPZ6Q0qjUbb0qoBw6nZKxNp2waw+vN0RQwxbdLBDXYn/dIQww9/Ty -pb/LnL41na5+hKwWQLV5GiVhUm1EYeAE6ofcNgBLo32u+y5QemUhJlgty5LOTCS8 -NRdoSdr+TUPRO9qCkBNuxQfzU4Y4S56sp5eJsZ6rqME7Mjb03ZxBsZxDEeXBWoQH -ABVFGoeEWOC5Xb1VAHNRy6Dcu5tT3sg0XgKLJr/OnWb+x7guyYcATAcVNA1dw6Mu -SbCLIKBLhpg1/SlIYSo1umx6kguAbb5MyG3CSL2nwQKBgQD3CovK40JnnSCE79/r -VVWiQGQPnFU2LOLQVPe4AppJdF+f1HGLpZ36gm5/x/5i2u8aD2zt+83y3CrZEPOe -Zpyrk+zF8WBLuTfdgevcXk5iQtKj8EhSMoUJr75Nklmpe1IbJdhe5gH/cc1rqLLL -1LM7qPy/B9qxm0CHrPYbi3WqTQKBgQDpplMRwR9nzNpvHRxxCHQk03HAdIWb6uXX -nYFNSPNV18PljdiAjQq1o/8qRvC12FIXzcEztKiZIE01mQaxBHcGP8rPVZieoAW7 -p0+CIdShIfGAj3pSBVF65r1DvQn5+VtAm73xZVk/paWyBU3C9f/HfD72FCKO7K+i -lyVc8v896QKBgA38HNnJb7Lp5DNiWOy7kfNzbbashO5iMBzXEobqLs3FtrwXjK35 -HC5YP+Uf7zitaCezg9kdDhWXfR14pjHrYScdYqP5/BeNTqpNdoqtFAbf/YimS5HL -plhcCIvfow/DGWzMAamtn6NUT+quTCDZ06Om91fhG/I99bM4iV4Z0PlpAoGAYDhT -q1MZyNhu+CVH6jCuC+BbzwLtZulUX/gIILizJR3nGajRiRcMWwM/eLygMnL0U8Mz -FkUGzZCk2za6r7mD/rnUno/Ee0axNbdQIeoms3jUCLqNiCuSg4d0V+oIqr4K20+H -6FpxZ6mi0+4lOO2vuQosr5BZLvy/07hk9aNIg8kCgYEAp0hCTHC6HTnF0/r2Gjev -OttVaF015nKE0h25xi24JpUXrjddU40QPiIYgTHt80daYz01TPwz3CPnN7Wxh8yE -z631hB9hBLr6ZY3cH7XcPPWx/ClAp9oyY+lceTRVhKJbWZcboEUO1k7HsD2Sgd+p -C9lp6d7zepnwPkiFv0BBjyI= +MIIEwAIBADANBgkqhkiG9w0BAQEFAASCBKowggSmAgEAAoIBAQDBYgSW90TPI2bh +EI/rgefJfV34FrHiqVGnUFrKPM1IId4Jkwp+uRexeHOf7GbPWlUiFB8kCpt9YEa2 +cr52NiK1xxbBCQr58Efm1dJEtO2sR21kg4tyqaATe6uTWHjxPRaOMeMPPonluixk ++Fx2s2As48hBIlyrxpbbHphEThNzTa+8u5MevVsfozal1zvmDKg7FgknUCC72ujO +1T8cx7sF6fS48GtygSnIC6x4N8pAF/SaMFXDOJK5qBGCYcMQ1JY53dKg+zfW/GEi +LG9hllv/1JKfEgKwsKyODoPXcS+gn9GGZs8wG/kq/8W87wlL0dw1MR7GU8N8JJFu +NhUJ1X3FAgMBAAECggEBAJdijAVCfPNK23bE6GWMxE7PfYiA+7BcrDc2iFxZTLpz +CMal/UlGzG15xcoj3jfkl1CeP+KPCzoS7EhXexI/PtzehcmHsrgGicMDECAJIyYC +pHhTIg8ZCt81qRMn50WydL3L/9wnMPxNygMhCgFCWr/JcTLMG4+9XssaIEkYddGE +Wki8ORGuE3N3KxjgQoFEP82AmhuotOc2jG2rEN3Ld41XlN1rIQ8cAxmIXJRViMj4 +q30zgNl/2KfD1Bg7C+SxaeaIWmL+9Qwq0cHw0GY6ddet3cvW5cDncZ+XKK7Uyg32 +ZGA1Te8TsJyZhu1nXUSKNMBaAZ79cExT1bYKsI2SpgECgYEA7PlUuhPSS1eg41Oq +0GY425ww6hh+vQiB0M2iZGfIJteCzzRosKbJACYsyMTDPvtQkHP/vQG441NK4S+i +0AqlFV0zYOP7jEKgIr40ed9HEpi/6ndivLb8Ioe/g9na2ZOrLnsr6swsZ1lbqxLm +U/dwm3xJGUUWtu2nrZL8WRsXoDECgYEA0Oi7vxDe0iOHs6Ul76nJ0HG0bBSDJUSO +SZpUg2ZqkyOrRvHePdahwdDCE9Hc6Aw03dUo0n5aB+bro95ETD6GS0qC64Og5wsK +OH+o0/temGQ6Wlgz4ENUZtxjy93TDGLwUGD3rY9Rz8WI8RLzDxZ34WIWwbexS+Yr +SR8LtvEARdUCgYEA5bpMLdu7WTjJCrf4dvEyG4vOS5KVgtH1byN6U1XczfLMp+yJ +tP7rCo73iWZeVPczQeaCPIun3hDIHYedkYtQQGbKwRoiqPWJ4kR8AM24S8ny+uzj +tki6IwtwWPTgWV7zaysTBxsJzOLun+jBixLsgn85Khs1Cv9XN0iwA/3kqHECgYEA +gBiEroZwqldg6RV1qnvopGhkIfV96McdnCIGej+9T5WKe0jpZe+KZeZUaoS/OIXr +kK4YhuE04S2GBYfPRxT7kYURu8mNSr4pOTWF1t2GRlkGssjsnjGKujue8a2FsE2m +XxLmK8T6fDT6YB+na/Px9AAKRiQVkZ2DoyVnMHicGzUCgYEAgycmQO5q09G7aPZR +7GyzerNlpuhqPiiH1cXIw0b1jTtTLgTm5pfsGw6Cip8MLBDbOiSyhl7wHJ7uAhUK +zjR6lEGhV9Xr8tI+OsI7nXVuprZsBK5ujjGK0L9T6/YpJYYJ4826jmP/SvQuRdnm +K0MRxiXWsoMefgwv5cD+jAZ3fas= -----END PRIVATE KEY----- diff --git a/jstests/libs/cluster_cert.pem.digest.sha1 b/jstests/libs/cluster_cert.pem.digest.sha1 index 611b67d97c28c..0e859909e72e5 100644 --- a/jstests/libs/cluster_cert.pem.digest.sha1 +++ b/jstests/libs/cluster_cert.pem.digest.sha1 @@ -1 +1 @@ -DCC5666EE339AB3F62BEFBA3D0DB7D94121B951C \ No newline at end of file +4136812A5C5A442FC49C4018862F98FFE54648C5 \ No newline at end of file diff --git a/jstests/libs/cluster_cert.pem.digest.sha256 b/jstests/libs/cluster_cert.pem.digest.sha256 index 84449a1793f2c..ee3203dfc2f42 100644 --- a/jstests/libs/cluster_cert.pem.digest.sha256 +++ b/jstests/libs/cluster_cert.pem.digest.sha256 @@ -1 +1 @@ -18CD5ACA178A2A7D8275976FA81341106BBC0F775F6DBF47035D846A50101CE6 \ No newline at end of file +5A35ED3B01DA3990EBFE0BF2ACC3B1DA4DDB35222A0A2964FCE1345F85051058 \ No newline at end of file diff --git a/jstests/libs/cluster_server_parameter_utils.js b/jstests/libs/cluster_server_parameter_utils.js index 60f370b61ae22..dc4f5df8ac9c6 100644 --- a/jstests/libs/cluster_server_parameter_utils.js +++ b/jstests/libs/cluster_server_parameter_utils.js @@ -16,9 +16,9 @@ * when the featureFlag is disabled. */ -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; -const kNonTestOnlyClusterParameters = { +export const kNonTestOnlyClusterParameters = { changeStreamOptions: { default: {preAndPostImages: {expireAfterSeconds: 'off'}}, testValues: [ @@ -38,7 +38,7 @@ const kNonTestOnlyClusterParameters = { }, }; -const kTestOnlyClusterParameters = { +export const kTestOnlyClusterParameters = { cwspTestNeedsFeatureFlagClusterWideToaster: { default: {intData: 16}, testValues: [{intData: 17}, {intData: 18}], @@ -58,21 +58,25 @@ const kTestOnlyClusterParameters = { }, }; -const kAllClusterParameters = +export const kAllClusterParameters = Object.assign({}, kNonTestOnlyClusterParameters, kTestOnlyClusterParameters); -const kAllClusterParameterNames = Object.keys(kAllClusterParameters); -const kAllClusterParameterDefaults = kAllClusterParameterNames.map( + +export const kAllClusterParameterNames = Object.keys(kAllClusterParameters); + +export const kAllClusterParameterDefaults = kAllClusterParameterNames.map( (name) => Object.assign({_id: name}, kAllClusterParameters[name].default)); -const kAllClusterParameterValues1 = kAllClusterParameterNames.map( + +export const kAllClusterParameterValues1 = kAllClusterParameterNames.map( (name) => Object.assign({_id: name}, kAllClusterParameters[name].testValues[0])); -const kAllClusterParameterValues2 = kAllClusterParameterNames.map( + +export const kAllClusterParameterValues2 = kAllClusterParameterNames.map( (name) => Object.assign({_id: name}, kAllClusterParameters[name].testValues[1])); -const kNonTestOnlyClusterParameterDefaults = +export const kNonTestOnlyClusterParameterDefaults = Object.keys(kNonTestOnlyClusterParameters) .map((name) => Object.assign({_id: name}, kAllClusterParameters[name].default)); -function considerParameter(paramName, conn) { +export function considerParameter(paramName, conn) { // { featureFlag: 'name' } indicates that the CWSP should only be considered with the FF // enabled. { featureFlag: '!name' } indicates that the CWSP should only be considered with the // FF disabled. @@ -88,7 +92,7 @@ function considerParameter(paramName, conn) { // A dictionary of 'setParameters' that should be validated while considering the current CWSP. function validateSetParameter(cp) { if (cp.setParameters) { - for ([param, value] of Object.entries(cp.setParameters)) { + for (let [param, value] of Object.entries(cp.setParameters)) { const resp = conn.getDB("admin").runCommand({getParameter: 1, param: 1}); const hasParam = resp.hasOwnProperty(param) && resp[param] === value; if (!hasParam) { @@ -125,7 +129,7 @@ function considerParameter(paramName, conn) { validateStandalone(cp); } -function tenantCommand(command, tenantId) { +export function tenantCommand(command, tenantId) { if (tenantId === undefined) { return command; } else { @@ -134,12 +138,12 @@ function tenantCommand(command, tenantId) { } // Set the log level for get/setClusterParameter logging to appear. -function setupNode(conn) { +export function setupNode(conn) { const adminDB = conn.getDB('admin'); adminDB.setLogLevel(2); } -function setupReplicaSet(rst) { +export function setupReplicaSet(rst) { setupNode(rst.getPrimary()); rst.getSecondaries().forEach(function(secondary) { @@ -147,7 +151,7 @@ function setupReplicaSet(rst) { }); } -function setupSharded(st) { +export function setupSharded(st) { setupNode(st.s0); const shards = [st.rs0, st.rs1, st.rs2]; @@ -160,7 +164,7 @@ function setupSharded(st) { } // Upserts config.clusterParameters document with w:majority via setClusterParameter. -function runSetClusterParameter(conn, update, tenantId) { +export function runSetClusterParameter(conn, update, tenantId) { const paramName = update._id; if (!considerParameter(paramName, conn)) { return; @@ -179,7 +183,7 @@ function runSetClusterParameter(conn, update, tenantId) { // Runs getClusterParameter on a specific mongod or mongos node and returns true/false depending // on whether the expected values were returned. -function runGetClusterParameterNode( +export function runGetClusterParameterNode( conn, getClusterParameterArgs, expectedClusterParameters, tenantId) { const adminDB = conn.getDB('admin'); @@ -229,7 +233,7 @@ function runGetClusterParameterNode( // Runs getClusterParameter on each replica set node and asserts that the response matches the // expected parameter objects on at least a majority of nodes. -function runGetClusterParameterReplicaSet( +export function runGetClusterParameterReplicaSet( rst, getClusterParameterArgs, expectedClusterParameters, tenantId) { let numMatches = 0; const numTotalNodes = rst.getSecondaries().length + 1; @@ -250,7 +254,7 @@ function runGetClusterParameterReplicaSet( // Runs getClusterParameter on mongos, each mongod in each shard replica set, and each mongod in // the config server replica set. -function runGetClusterParameterSharded( +export function runGetClusterParameterSharded( st, getClusterParameterArgs, expectedClusterParameters, tenantId) { assert(runGetClusterParameterNode( st.s0, getClusterParameterArgs, expectedClusterParameters, tenantId)); @@ -265,7 +269,7 @@ function runGetClusterParameterSharded( } // Tests valid usages of set/getClusterParameter and verifies that the expected values are returned. -function testValidClusterParameterCommands(conn) { +export function testValidClusterParameterCommands(conn) { if (conn instanceof ReplSetTest) { // Run getClusterParameter in list format and '*' and ensure it returns all default values // on all nodes in the replica set. @@ -342,12 +346,12 @@ function testValidClusterParameterCommands(conn) { } } -const tenantId1 = ObjectId(); -const tenantId2 = ObjectId(); +export const tenantId1 = ObjectId(); +export const tenantId2 = ObjectId(); // Tests valid usages of set/getClusterParameter on a serverless replica set and verifies that the // expected values are returned. -function testValidServerlessClusterParameterCommands(conn) { +export function testValidServerlessClusterParameterCommands(conn) { // TODO SERVER-69663 Add serverless sharded cluster tests once supported. assert(conn instanceof ReplSetTest); assert( @@ -413,7 +417,7 @@ function testValidServerlessClusterParameterCommands(conn) { } // Assert that explicitly getting a disabled cluster server parameter fails on a node. -function testExplicitDisabledGetClusterParameter(conn, tenantId) { +export function testExplicitDisabledGetClusterParameter(conn, tenantId) { const adminDB = conn.getDB('admin'); assert.commandFailedWithCode(adminDB.runCommand(tenantCommand( {getClusterParameter: "testIntClusterParameter"}, tenantId)), @@ -426,7 +430,7 @@ function testExplicitDisabledGetClusterParameter(conn, tenantId) { // Tests that disabled cluster server parameters return errors or are filtered out as appropriate // by get/setClusterParameter. -function testDisabledClusterParameters(conn, tenantId) { +export function testDisabledClusterParameters(conn, tenantId) { if (conn instanceof ReplSetTest) { // Assert that explicitly setting a disabled cluster server parameter fails. const adminDB = conn.getPrimary().getDB('admin'); @@ -495,7 +499,7 @@ function testDisabledClusterParameters(conn, tenantId) { } // Tests that invalid uses of getClusterParameter fails on a given node. -function testInvalidGetClusterParameter(conn, tenantId) { +export function testInvalidGetClusterParameter(conn, tenantId) { const adminDB = conn.getDB('admin'); // Assert that specifying a nonexistent parameter returns an error. assert.commandFailedWithCode( @@ -514,7 +518,7 @@ function testInvalidGetClusterParameter(conn, tenantId) { } // Tests that invalid uses of set/getClusterParameter fail with the appropriate errors. -function testInvalidClusterParameterCommands(conn, tenantId) { +export function testInvalidClusterParameterCommands(conn, tenantId) { if (conn instanceof ReplSetTest) { const adminDB = conn.getPrimary().getDB('admin'); @@ -539,6 +543,13 @@ function testInvalidClusterParameterCommands(conn, tenantId) { // Assert that invalid uses of getClusterParameter fail on secondaries. testInvalidGetClusterParameter(secondary, tenantId); }); + + // Assert that invalid direct writes to _config.clusterParameters fail. + assert.commandFailed(conn.getPrimary().getDB("config").clusterParameters.insert({ + _id: 'testIntClusterParameter', + foo: 'bar', + clusterParameterTime: {"$timestamp": {t: 0, i: 0}} + })); } else if (conn instanceof ShardingTest) { const adminDB = conn.s0.getDB('admin'); @@ -594,6 +605,19 @@ function testInvalidClusterParameterCommands(conn, tenantId) { // Assert that invalid forms of getClusterParameter fail on configsvr secondaries. testInvalidGetClusterParameter(secondary, tenantId); }); + // Assert that invalid direct writes to _config.clusterParameters fail. + assert.commandFailed(configRS.getPrimary().getDB("config").clusterParameters.insert({ + _id: 'testIntClusterParameter', + foo: 'bar', + clusterParameterTime: {"$timestamp": {t: 0, i: 0}} + })); + shards.forEach(function(shard) { + assert.commandFailed(shard.getPrimary().getDB("config").clusterParameters.insert({ + _id: 'testIntClusterParameter', + foo: 'bar', + clusterParameterTime: {"$timestamp": {t: 0, i: 0}} + })); + }); } else { // Standalone const adminDB = conn.getDB('admin'); @@ -607,5 +631,12 @@ function testInvalidClusterParameterCommands(conn, tenantId) { // Assert that running setClusterParameter with a scalar value fails. assert.commandFailed(adminDB.runCommand( tenantCommand({setClusterParameter: {testIntClusterParameter: 5}}, tenantId))); + + // Assert that invalid direct writes to _config.clusterParameters fail. + assert.commandFailed(conn.getDB("config").clusterParameters.insert({ + _id: 'testIntClusterParameter', + foo: 'bar', + clusterParameterTime: {"$timestamp": {t: 0, i: 0}} + })); } } diff --git a/jstests/libs/cluster_title_foo.pem b/jstests/libs/cluster_title_foo.pem index 51c5a1b0e118c..c304bb2e89955 100644 --- a/jstests/libs/cluster_title_foo.pem +++ b/jstests/libs/cluster_title_foo.pem @@ -3,52 +3,52 @@ # # Alternate certificate for intracluster auth including the title attribute set to foo. -----BEGIN CERTIFICATE----- -MIIDjzCCAnegAwIBAgIEWd0RDTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDjzCCAnegAwIBAgIEfAUPhzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjMwMzIyMDIzODIyWhcNMjUwNjIzMDIzODIyWjB/MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQyWhcNMjUwOTEwMTQyODQyWjB/MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEUMBIGA1UEAwwL Y2x1c3RlcnRlc3QxDDAKBgNVBAwMA2ZvbzCCASIwDQYJKoZIhvcNAQEBBQADggEP -ADCCAQoCggEBAKtYXLqpGhTggA2fItGZDqGwmPmUWpMpazBZ1vMxyvWeLQvso9Pk -Ubz+zXT0MP+XtjteoqUwNcfRViSiv2wiIttBD3VlGH3dGJXSnQaMMjE1MORkkjHJ -qeSZZA75QEpfyRhx7Tc+JEIwQx1Ptrrt1k9rQv58x1N8zN27Eqsqw3f9dq4XjpCs -XRBcOOSjVyHRKli5j1wxFLDNxBtr5+i5LfmWOgPY/KSQtE0cRqFXTxajHuMaRUtl -z9QMKRKc2uN3E7fA1Fa8IboT4mhG6mY9xO2rMf0cV4ZuMa3LimwG4KnTnii8cz8g -fXPDENvdI4/Wm6YuUlQRlfu6v77Mb0UEfW8CAwEAAaMeMBwwGgYDVR0RBBMwEYIJ -bG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQCdZZaJZN0X1htNTL1I -/ENBqZ5NYXyQi24yMJbRz+hVTFaR0gaecAG5A388YbcjmO1yLZcpzI4oHDSInc0Z -1kS8Zsriqfkh4ZQsWeHV9LImclecpYK2l0VB6YOpTOS2f75+PEaRSEYiYWEwERrk -q5IVodd59c5Mn8GUUrJVlVpNiwX1w0J9+qiUtmuQqrORpt6hbV1DGwXrMQgpprys -tZiEOxRUEyGTTtMoxktsGbT0o6Z+YAQRl90UVB7rPCpzwuJECFi0JXH28cIfncnr -8HVnEhxsPg4HHQmb5Ykq/gxNbAWSQAey3fP2NKosGnRKDJbd1ivyvvQNWya3DLIW -dbnN +ADCCAQoCggEBAMRFDgHwS8ElNPJ+vzGP13Z6gu/jXcKuoL88uCxIamXPDKJMuGPu +pWy2YAZk3mYCFzEky4XFu8WMQsKUqFhvhQc2/YnV7bEQ0IBlRuAEoTJG3lP3TnhL +dxvNpkol0A35wK9QHfzwAFT4yBFM7evZeC46j55wEVSAl5CYrioh1v5Og/O4qixt +OhzEh7/kzB4pAfbwrL0NxqMSkwxWAJO2tlIMD0EgODRc/a6rFjZ6fhYwx1YehBRZ +mYwF777XxiyYogwxqfjaIIbmK9g93rcJA5xmKOlhlhnI+S2JmNQDp+60S4C4ZMGO +9Th03H/eGHEcC9Nn6ZXnS9OiARqcb8R2U8sCAwEAAaMeMBwwGgYDVR0RBBMwEYIJ +bG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQABsEvdQJ4UdLxUcy7o +gNoDYmddQrMwgkLoE0KzkWS471JcxAxY+9x+EA8PP5+7G3tR39UcDPus+dnLsm9Q +Y7cq5/SEV3LuUXlBtxlEfnOflikOkFyvyYfEikEhyqj/cLMyOgUGVZzQW0DBwu5l +lZvEatST8Ag3+M/kY124yY5Z8COnPvOtVTD0qYr7eFHtVPTmpqnZIvJzjBKQqmYd +wv4suZNbEYBetewUJ5Ko8FUwlna5iuMsE92ME7WpGT3rRr6PHQ+jJfAt0WFMN6RF +Bj0IHe7bq6IEBKo9iiV0EfX0zCSrWg94sQa22KSAGsa2IgNY4Xicr/YiZJMMNDBx +NbPQ -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCrWFy6qRoU4IAN -nyLRmQ6hsJj5lFqTKWswWdbzMcr1ni0L7KPT5FG8/s109DD/l7Y7XqKlMDXH0VYk -or9sIiLbQQ91ZRh93RiV0p0GjDIxNTDkZJIxyankmWQO+UBKX8kYce03PiRCMEMd -T7a67dZPa0L+fMdTfMzduxKrKsN3/XauF46QrF0QXDjko1ch0SpYuY9cMRSwzcQb -a+fouS35ljoD2PykkLRNHEahV08Wox7jGkVLZc/UDCkSnNrjdxO3wNRWvCG6E+Jo -RupmPcTtqzH9HFeGbjGty4psBuCp054ovHM/IH1zwxDb3SOP1pumLlJUEZX7ur++ -zG9FBH1vAgMBAAECggEAIQ63NTwS0BxQGFCvgwiojgFoQh6hKus+xuFOWzUsFx8h -Sb+qC+Ns8a2nLf0+xtEaU3H6pywZ9CcrG35auB4N44c12Exc4Uuaxq0Ppoe910iP -2kCdBAYIRRZi+5CTGsZIIfM49QOEM1DkYe9TLdVdF412K2sfebgGPnEtNODXPXrU -P0iLqxXRJmiWMZoxzbxNATMS8LkUG4gjfDeuGJZD1QFoun6hbCT4W6B2CgVnSpM1 -8Njys39V55wjAsfaKm4gpzeNRj5V0iw/G++G43uCVUQntTR/kzMABsfYFIWmfuOq -E50VteYwzlxskQZxqAPcw/7QmZNCANIfEEWFw3hmAQKBgQDgCiYlquODGUTcDydF -jzmZ3nnpacvBQ9KVO8IwpOg0v63EGXokvhVBigUszYTnqdlnihTJxkccXeCr21G7 -pL1tqq8qKQga46MH1B4DF7xYftwwYewEzXYIfYfMkJaxPw3Q9xrWU0y2pbHUY7zg -0odpV5IFkhyRpig3vdS15gqjQQKBgQDDydQx1XniYh72lX89Od5fXnfubzJJiG5J -GzSP0Z4GMusEX6cV4VTjZydDunv22nmHUj2yVtXIyFST1VJ5A2/OmSwjN8Dm/91E -/fTaSa3Eh/H5EzUV6EtuZXnIdYWBM/tQfQwViA2gph2mIMLUD7kxVo5G6y6wL+kw -kgDqWRnkrwKBgQC0v1thXkoo9VT5mPwdAVz+R1/hsSniZR5aqZiUeCaij9XX9Jn3 -VKd/daORLsm/wOcVwm/dDatHNnHRFKMPGOx+soqZH/ta/jYEVdxUsGySlN595jJs -+Xn1hZjur+PzYaR65zDuosusO2eJq2GxnAgFM9IpzmRgGUYvGmamzc3dQQKBgHrB -2iTgx4oUoXtUIrI9zVqYfbPmzm3id9uojh06fc0/MbHNU5LZdIMcUzcY/s65Dwe0 -nfBql6JLURRb5VjwubKcwVrXg0CS3qZ6YIJZPfWCk0nrLBavTlRKlcAFR47KC+Hc -da4uXvUCEobt9ZpGvYPc1FpM7ToU4C3O7XoCIcULAoGAD7W2C2tiHepHUlbLCiEt -fHoyoWVc1v1xPRdw/lNHVkopHyxB7Zg8nf2ei9kv+6ECdqmNk6qiYVtFMd+gxK3e -G5sgEZ2GazACraR9snz+iBOyYm+CoKJd1YzeyuFIs3hdq0++QQAm9XDaTu6C8HEM -bkhlGRJcQyaN32bPtRXkymY= +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDERQ4B8EvBJTTy +fr8xj9d2eoLv413CrqC/PLgsSGplzwyiTLhj7qVstmAGZN5mAhcxJMuFxbvFjELC +lKhYb4UHNv2J1e2xENCAZUbgBKEyRt5T9054S3cbzaZKJdAN+cCvUB388ABU+MgR +TO3r2XguOo+ecBFUgJeQmK4qIdb+ToPzuKosbTocxIe/5MweKQH28Ky9DcajEpMM +VgCTtrZSDA9BIDg0XP2uqxY2en4WMMdWHoQUWZmMBe++18YsmKIMMan42iCG5ivY +Pd63CQOcZijpYZYZyPktiZjUA6futEuAuGTBjvU4dNx/3hhxHAvTZ+mV50vTogEa +nG/EdlPLAgMBAAECggEAWhMLC665RYoK06OBoYBPNQuine9t71PvZ/S43XHaiqDM +PowRL6OtfcfxJ94RByW8eQxW7yzBMTdeyCgrXnLHzXjaQPDSJin+Vn6kWVkmlYla +rZRZCfIwVq05vNu/sTpGyO5u5M9Mh7KpNu9kXZIvip/Lm7345hwh8vVvMNzY61pU +A3suFgpZ24XmQO27/mLK+H9uG28uGCg/3lIu+b70Z+lIQKfAN1Li05tWptFJV6P+ +oQZ9B6wtXKCpPsIUNZcMbet5zM3ypxm4XkchihTmB1IQoMW3VKDxf4+NRWmjivmy +We6rEcZi1Xa3M65i2Z0wc4SnAy3txUDaHUj1SvXt+QKBgQD9OQmji4ks0Y/kxb72 +51GqncR/Z/0QKP/3qvseUw4V5D1K5WdHdKayNhVojXkV7SqiDxDBdMXe5wURXNj9 +n7nle5K9l5lS0W7eezmja5QQbLuLO1WpZ1eb+jbzbtW8IBtABZptJq7Hmq/sHKSI +8RFwpX5G20XRhbfBBUYvovz0XwKBgQDGbBzNXq0eoft3hNnghDZgn5WuKc610Y8w +esuf2uVdBOOk8WNtNjMbIU515mzHJilJiUwzNkEQsoT5/76Qg3IWWCgJi99PICeC +86GtvQfH2jOrGGllmYtdlnSozwMQ7/pUD8DEM0l6JPPbb/ywvM36cUEwVgi0m0Mb +bFc5cy64FQKBgGewd3X0qVMk7Nss7imERziqKdBR8Joxb78m0HV2ZQopz79feI8W +ATxwUQvjAnYsC40Yxi+xdWT1DGozrtcMFL8XftsLvMjg4ZlQtCVq49Jl89XrkgQw +QPup1d6QwAysyPvKT5XqhR9PBEKW5/j7XbzWx4KUP16wdrIfSsIu543ZAoGAOi3Z +BB6OhDzajuDVQY+CojIooTiA867OXVij++si9XNJjEN687rAoWPSrZ8ypfH6iSVI +wntV+J9ffi4OojDocsTGeIYapAi5jRwCe/7BGUhRfglaXf/3bSmAaz7Hl1/F9n/n +9Z9UHAxZtC3R2cCCZLxwcMvJIauksZXCvYWYXUECgYBct0htlUnnI/9fU2VjTQIg ++1+16hgbERvLt6hdfiV3UQhlFjNK+sU800C4e6/wETJJwYHh50SzLzO/Jnd4XdT2 +CI6VfkOQTLd+lzGYHiAKDEAEbs5r5xNAPuRfVyYzJWn/1vpeEof0NkFNEDeB23h3 +JVHMGEFxjKDjiq6NPSQHWw== -----END PRIVATE KEY----- diff --git a/jstests/libs/cluster_title_foo.pem.digest.sha1 b/jstests/libs/cluster_title_foo.pem.digest.sha1 index 2e6c630d0f444..05925e5d1890e 100644 --- a/jstests/libs/cluster_title_foo.pem.digest.sha1 +++ b/jstests/libs/cluster_title_foo.pem.digest.sha1 @@ -1 +1 @@ -AAA79606BF68AE2AFA2A0F37F4DCD09FFCFD8295 \ No newline at end of file +E4F45DC0E7557AECD0C092C0E3825AC18B2F3EFE \ No newline at end of file diff --git a/jstests/libs/cluster_title_foo.pem.digest.sha256 b/jstests/libs/cluster_title_foo.pem.digest.sha256 index 21bcc294a5239..0fb8281c5f66d 100644 --- a/jstests/libs/cluster_title_foo.pem.digest.sha256 +++ b/jstests/libs/cluster_title_foo.pem.digest.sha256 @@ -1 +1 @@ -63EF60AFA384EAE126790C8CE5EE438F5956C77378D8997AD1644DBCC310F3DB \ No newline at end of file +0E1A9DBF21569470B698A2FEC88766FA73A6F9C37493E456C2E1E491EFAB0EBB \ No newline at end of file diff --git a/jstests/libs/cluster_title_foo_no_o_ou_dc.pem b/jstests/libs/cluster_title_foo_no_o_ou_dc.pem index b3dff850c96ee..a2ae24b730191 100644 --- a/jstests/libs/cluster_title_foo_no_o_ou_dc.pem +++ b/jstests/libs/cluster_title_foo_no_o_ou_dc.pem @@ -3,51 +3,51 @@ # # Alternate certificate for intracluster auth including the title attribute set to foo without O, OU, or DC. -----BEGIN CERTIFICATE----- -MIIDbDCCAlSgAwIBAgIER0TcWzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDbDCCAlSgAwIBAgIEMjcc+TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjMwMzIyMDIzODUyWhcNMjUwNjIzMDIzODUyWjBcMRQwEgYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQyWhcNMjUwOTEwMTQyODQyWjBcMRQwEgYD VQQDDAtjbHVzdGVydGVzdDEMMAoGA1UEDAwDZm9vMQswCQYDVQQGEwJVUzERMA8G A1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkwggEiMA0GCSqG -SIb3DQEBAQUAA4IBDwAwggEKAoIBAQCwpF7T1FoPnfAHotkAv5NRotQekIebAqHW -ohdeQiqmJoIMJ58qZOaTaNm+HMRiPo6/PYuKqup7w9nkbBO6xRK6+N8nn3IsrOVl -MOuERahyCcjEBRStZL/QiDSOK7FzBwLsnx/wSgNWisOzi840h0+OLmtpEK4kjxgg -sbH5GFEObfWX4OgHtjdf4MTn/EZkyb643MQT6aD8/qQ0/Ai0ptKCbuCfycondK6U -Tzps9vA61gXy/KwPhYfs9BVeWQAP7XHZvv7Lqgg5yciEC+qBwR3/pCYUhNqECndj -VY0Uffp/uH5snBRNfS1+/p9jIo+t0nq3UXVjfz+Fl1Uwndp8wtaNAgMBAAGjHjAc +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDcjlSKJhnubIEYuoVuJ1A7Kn2xOVZvp3oB +IAEScUY46vKoJOi4Hn+r3KXDejiGk8/s50DzObn9TjtNvJlrg4UCdiAcUZ6LCwDp +2mfNy8ysYVRJi1sRGHGcfZChpadMDXCvuPj+V/m/l1RO0Ic/OPEG+yvZ8b1hCozN +jhNePRlyaH6Tq7ZJWmmkyayeLkQ5PO6DY7CV2WEtpUs00IaUraMNnhIWOZFsBY7p +hDUhFF7Y/PBjKozgi/WOwACsNvyGo9BZEq8YRj4zmSOY+z1lYtcw4SK7NyAooF5F +szBY50arOCuz/q/oSb/yv4pFRQoMheV58tVgl7MIYmGDVxOna/afAgMBAAGjHjAc MBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA -E4q9YQX7PJ3IwibFNDpmwnb/mDHXQyhYdJsh7eRmdrF60TEMiXdYV+NMpVf/j1qV -bXiV7TskcNkqIK+88wbgDeR0gen+MhAyHCSe5B7QwRsuQ+8elsN84urmu9fddSKw -XycjivcqpqTGSyndWy5FAIfJ2SepZswgUofKcYOju36y6Ai5UBCQA1lNTwQHpQ8L -nZbf/mcqtQ0Op9y+UaT8r+L/ju9rNTVw96fDq4oJNXHZQgFUKZrv73RsJJaj8v5X -w0rYEQn0i3hIlap9clp4dXqFeqwrRxa5nI838p6DvjyMzBagMC6RVEHHI/JHAfzF -yy9y0ma7HQ32Lg5XspPrGw== +e+CY9ogOd42JptjeYltsTitqpuSRtp8J8/zes/yxeG0DEFhyqoWhlO3zDTM/p3zS +uYYENre8dx5LBzTED/Bqz0/nV2Gn/vx3x3QWXDsG1yfNJEuVPexhTgqOKN2Mg88E +vYyXIV+P7/8/pJQnp7tgUsG+FYLq7AkGrgoIX0e8KbJ5aXtTIvTKJ9VCAmsdsIZG +Aw1x7AJUCmp16bvSX7CRkKVZGgFR1jTzDQENgaJChscprI+foohQ/JzHrXSxMO6k +4H0BoGPPWXObYb2FjmAiK/a2wKJHk9E+TM5rz0Esmo96WPA/HMVupP8+8l/ypQu8 +TrSMtoHM7xBDZP/9Ng72MA== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCwpF7T1FoPnfAH -otkAv5NRotQekIebAqHWohdeQiqmJoIMJ58qZOaTaNm+HMRiPo6/PYuKqup7w9nk -bBO6xRK6+N8nn3IsrOVlMOuERahyCcjEBRStZL/QiDSOK7FzBwLsnx/wSgNWisOz -i840h0+OLmtpEK4kjxggsbH5GFEObfWX4OgHtjdf4MTn/EZkyb643MQT6aD8/qQ0 -/Ai0ptKCbuCfycondK6UTzps9vA61gXy/KwPhYfs9BVeWQAP7XHZvv7Lqgg5yciE -C+qBwR3/pCYUhNqECndjVY0Uffp/uH5snBRNfS1+/p9jIo+t0nq3UXVjfz+Fl1Uw -ndp8wtaNAgMBAAECggEABZWsydW04zmDFTq40aU86x/SxQScxPHYXAjT5E8DOi2N -fwThq111TMPL3o7aRqDjsngnqUKuFyuh/+7K0OTaKr8jjwUjfvYYapKZX500LibR -CiF+/dxplBY6UyRef9yA4ypEwDwWzu2kMlEBO/frM/uTucalOtKrWJ1FmzKBnYse -8H9zLyKbc96xk6IiFYlBqe6O6JT6mZtBHwz59zVmuJ7eP0V8Se8ZTA1MEE3P+ORR -/9xLURQc0hvfDFwSnM/gKAuwB3tpnJsEUmRCX0WBBCEiEJ+FaQ5yAihmfRv9AH8c -dFR/7XuKEMN5jetR4khjB2eBY26SXRzTQ8qE9fujoQKBgQDonTDu6EAa0yoySjQ+ -q3KW2Ir4Egqw3kJfBQ3ZBjtsRvbBsl0S0rEfq2EfgusKvIz9sVDLEGhrOnn8zhKM -CWkaikZORwniRtGUpMdsbw7UfUHaSDi/12kqD7vKXs3bJWrQsRVl3yHMYaDHWAUF -L9q9rvD7AD12bFMH8cBnGjmuFwKBgQDCZp+0G8fUlVgACwmMNnToOT/mzQEjsUlG -4ReS/o889pPvtpm+Ul5XK1Pl1gvcwfSo2hkzXBht95Sj5t7L4qBkK2naoN1LgbfX -R/fLuMQLCYgUOs3UbOUfyOy1LfgEHINuDHVaK7RkiWhuHE3/a+VKZvOnffL8Copu -xo1LyUHK+wKBgD8Rh5fu/pqHUHSMK/gl8g62LY+vDJkB2gr7StLh3rCv2O2Rl6yn -1YBZrh6mF2Y00yFhtx8nlrgkBbkmgl7XmliozwEgP6zLOL3No4hh4Cp6v6UYWdKh -7BCMbYUkCTp2vaxRpxSU2AwbGEWUNuA+JlexnALiAMgf/K81u834jVUHAoGBAIBP -K+m8zFBLoiGlJ1AcQV1lLAAyHyZnxW2688xZqEEcntgBNcigpRPzzRROCtZSTiGE -kk2L47PxTXJA15zKoAJ9hQiAVI+ZtrWpEqyr7vk5+U8g4OnsVe58t39+L8zG5Ril -sG8rmY0iBINouzJzDIvnF7rdLpuceXJUKr5yv7IxAoGBALxfb5KmvbdmBe43a5zo -J+Ig8oURUXbaWPb+8rpp+GaK3Hqf0Asjqlq2Fulz6TlwtkoRPl1yyebkNqz23p+T -0K52WJWnpxmXi5dRqDTJie/E8Tvm8ff/Xey04jDdS+J56WAAnC0P5O+Lq9BD6iNG -U3G/2LmJ+zn2NPeSxPyW3PSf +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDcjlSKJhnubIEY +uoVuJ1A7Kn2xOVZvp3oBIAEScUY46vKoJOi4Hn+r3KXDejiGk8/s50DzObn9TjtN +vJlrg4UCdiAcUZ6LCwDp2mfNy8ysYVRJi1sRGHGcfZChpadMDXCvuPj+V/m/l1RO +0Ic/OPEG+yvZ8b1hCozNjhNePRlyaH6Tq7ZJWmmkyayeLkQ5PO6DY7CV2WEtpUs0 +0IaUraMNnhIWOZFsBY7phDUhFF7Y/PBjKozgi/WOwACsNvyGo9BZEq8YRj4zmSOY ++z1lYtcw4SK7NyAooF5FszBY50arOCuz/q/oSb/yv4pFRQoMheV58tVgl7MIYmGD +VxOna/afAgMBAAECggEBAMw/VpTU8gC1JNxUpwo6h/cyw1Gi6qPdsYNnMvb0RXai +RXNnMsiHHe53GF8tSTl/mvcltSVJWS72Cr9Tn7RMCJod8GCpSSw0VXU7GBQh3nno +bFjrH7t2KogkVBMOSB6K98cTgipwKE3AA4g1Xnoy7ipr4dEkKB+82GXnY5JK/MzP +tNoRD4YLWKbI60mgK1Po7DkeiAPub4dFwzpmdvfpHpB/vuuL7R+pieg9TeGAnrLg +tjk8xsVTszxAlBlbw81y1IT3FpKhvmxbfNPDWLNRm19gxqPoPQF+QVQF8XxFQ9jC +dmddjPpmZgkd/R3OPN7SLfN/fblXStTtHFyk0Ge3qLECgYEA/5ssCDQHzrf3ZTwc +vQMZfxW8hI/0WFyTuT1+z/UJ5SA+WE1wQUu8yZD3brQjq3o0cSqdCY6XIC1EgH29 +yfp9bFvpB/mMytW1PPy552dcx9N9ZjtbiUEtvhllfdHzN4EHjvtCaEPWzuXUMRir +meQ9jZQSqkbMamyUUyft6Rg39bcCgYEA3OVVBjLe9YmZ4wyhENFeAuR8bN/Kb/z1 ++SxwWh14EwH4NCD1LaQIkKEQ/qqH2Lk5yB9R91eqambepTiIvYyoXeyWV51MqnnE +mPzxBpi7wWblg6DX/vt6dZyDDupAnwsvhfLqbscrs1IfBe1IzE4y4wutIf2Kj4BI +OS4dwUt0xlkCgYB7T+AJQRi3KNdodnyizxkAz3q4NT67VkZKKpnAN8YDTO/m580N +Iz27vH0yYiOHOZiNM/K4xpqwAka2+nKSO49AtIKSv1imDj22Y3JIafw/xw8LP/2k +FNa9jEEDV0NP9qav2xWpeXjrLcOVwAIhZxQu9k2e6jL58NIJ4AyN1IpCtwKBgEhn +HuRGPlBKxtbl59EkXuUh0Sq6e5cTWehPSZAwApBwX9NuTg6kqm4FbRWb/aTqqWbP +5UTmRiRUDXwoOKGwfeszRd/33g4ulWLz3WilHT16JZZsXP/lm7D7GPYFkCLRPsVv +5qlivQ8sxqBhsy/MHd/PjvMKMKVyjbm4ROZ7fg4ZAoGBAP1oDVm7TkG9c5WQjhGi +mNLOICxHJgwbKvBohyNNSks/ghla8CzVaZrwuNNUhU6mD2vE254+GaFaIfT0pFVJ +cGO88GVJENIoQ28E4nvcmlyAAb5tep0TqWvAoYkMH6VZ2pjMOtpqt3HuvZKg1Znf +PEP+srnIlrQ1OgN7lpisW0e8 -----END PRIVATE KEY----- diff --git a/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha1 b/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha1 index 2550111023592..2b7e1e0bb1544 100644 --- a/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha1 +++ b/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha1 @@ -1 +1 @@ -ECA9EA58F05E2C92503D0F0B776BA5264A7D9D4B \ No newline at end of file +49FC3CB680E39E396B89E69712F90AE3881D5A12 \ No newline at end of file diff --git a/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha256 b/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha256 index 87c6d2980e612..ad839f370fced 100644 --- a/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha256 +++ b/jstests/libs/cluster_title_foo_no_o_ou_dc.pem.digest.sha256 @@ -1 +1 @@ -D2AAD57CB4C330806DA153860BD0E908E3CFE4C41061986C3F27DBC1DD80B2D2 \ No newline at end of file +20B576CDF1772A6CF29AE384ED6E0E2CF60D98A2B0DFE4F13B81054E118F242A \ No newline at end of file diff --git a/jstests/libs/clustered_collections/clustered_capped_utils.js b/jstests/libs/clustered_collections/clustered_capped_utils.js index 5045fa3d07e40..0ede4d5464408 100644 --- a/jstests/libs/clustered_collections/clustered_capped_utils.js +++ b/jstests/libs/clustered_collections/clustered_capped_utils.js @@ -1,6 +1,6 @@ -load("jstests/libs/ttl_util.js"); +import {TTLUtil} from "jstests/libs/ttl_util.js"; -var ClusteredCappedUtils = class { +export var ClusteredCappedUtils = class { // Validate TTL-based deletion on a clustered, capped collection. static testClusteredCappedCollectionWithTTL(db, collName, clusterKeyField) { jsTest.log("Validating TTL operation on capped clustered collection"); @@ -308,12 +308,7 @@ var ClusteredCappedUtils = class { {getParameter: 1, "ttlMonitorBatchDeletes": 1}))["ttlMonitorBatchDeletes"]; const ns = db.getName() + "." + collName; - const featureFlagBatchMultiDeletes = assert.commandWorked(db.adminCommand({ - getParameter: 1, - "featureFlagBatchMultiDeletes": 1 - }))["featureFlagBatchMultiDeletes"]["value"]; - - if (featureFlagBatchMultiDeletes && isBatched) { + if (isBatched) { const ops = db.getSiblingDB("local") .oplog.rs diff --git a/jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js b/jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js index f9495c913c5c8..fbfde4f31370d 100644 --- a/jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js +++ b/jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js @@ -1,10 +1,10 @@ /** * Validate bounded collection scans on a clustered collection. */ +import {getPlanStage} from "jstests/libs/analyze_plan.js"; -const testClusteredCollectionBoundedScan = function(coll, clusterKey) { +export const testClusteredCollectionBoundedScan = function(coll, clusterKey) { "use strict"; - load("jstests/libs/analyze_plan.js"); load("jstests/libs/collection_drop_recreate.js"); const batchSize = 100; @@ -23,6 +23,27 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) { assert.commandWorked(bulk.execute()); assert.eq(coll.find().itcount(), batchSize); } + + // Checks that the number of docs examined matches the expected number. There are separate + // expected args for Classic vs SBE because in Classic there is an extra cursor->next() call + // beyond the end of the range if EOF has not been hit, but in SBE there is not. This function + // also handles that this stat is in different places for the two engines: + // Classic: executionStats.executionStages.docsExamined + // SBE: executionStats.totalDocsExamined + function assertDocsExamined(executionStats, expectedClassic, expectedSbe) { + let sbe = false; + let docsExamined = executionStats.executionStages.docsExamined; + if (docsExamined == undefined) { + sbe = true; + docsExamined = executionStats.totalDocsExamined; + } + if (sbe) { + assert.eq(expectedSbe, docsExamined); + } else { + assert.eq(expectedClassic, docsExamined); + } + } + function testEq() { initAndPopulate(coll, clusterKey); @@ -36,11 +57,12 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) { assert.eq(5, getPlanStage(expl, "CLUSTERED_IXSCAN").maxRecord); assert.eq(1, expl.executionStats.executionStages.nReturned); - // Expect nReturned + 1 documents examined by design - additional cursor 'next' beyond - // the range. - assert.eq(2, expl.executionStats.executionStages.docsExamined); + // In Classic, expect nReturned + 1 documents examined by design - additional cursor 'next' + // beyond the range. In SBE, expect nReturned as it does not examine the extra document. + assertDocsExamined(expl.executionStats, 2, 1); } - function testLT(op, val, expectedNReturned, expectedDocsExamined) { + + function testLT(op, val, expectedNReturned, expectedDocsExaminedClassic) { initAndPopulate(coll, clusterKey); const expl = assert.commandWorked(coll.getDB().runCommand({ @@ -55,9 +77,14 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) { assert.eq(NaN, getPlanStage(expl, "CLUSTERED_IXSCAN").minRecord); assert.eq(expectedNReturned, expl.executionStats.executionStages.nReturned); - assert.eq(expectedDocsExamined, expl.executionStats.executionStages.docsExamined); + + // In this case the scans do not hit EOF, so there is an extra cursor->next() call past the + // end of the range in Classic, making SBE expect one fewer doc examined than Classic. + assertDocsExamined( + expl.executionStats, expectedDocsExaminedClassic, expectedDocsExaminedClassic - 1); } - function testGT(op, val, expectedNReturned, expectedDocsExamined) { + + function testGT(op, val, expectedNReturned, expectedDocsExaminedClassic) { initAndPopulate(coll, clusterKey); const expl = assert.commandWorked(coll.getDB().runCommand({ @@ -72,9 +99,14 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) { assert.eq(89, getPlanStage(expl, "CLUSTERED_IXSCAN").minRecord); assert.eq(expectedNReturned, expl.executionStats.executionStages.nReturned); - assert.eq(expectedDocsExamined, expl.executionStats.executionStages.docsExamined); + + // In this case the scans hit EOF, so there is no extra cursor->next() call in Classic, + // making Classic and SBE expect the same number of docs examined. + assertDocsExamined( + expl.executionStats, expectedDocsExaminedClassic, expectedDocsExaminedClassic); } - function testRange(min, minVal, max, maxVal, expectedNReturned, expectedDocsExamined) { + + function testRange(min, minVal, max, maxVal, expectedNReturned, expectedDocsExaminedClassic) { initAndPopulate(coll, clusterKey); const expl = assert.commandWorked(coll.getDB().runCommand({ @@ -92,8 +124,13 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) { assert.eq(maxVal, getPlanStage(expl, "CLUSTERED_IXSCAN").maxRecord); assert.eq(expectedNReturned, expl.executionStats.executionStages.nReturned); - assert.eq(expectedDocsExamined, expl.executionStats.executionStages.docsExamined); + + // In this case the scans do not hit EOF, so there is an extra cursor->next() call past the + // end of the range in Classic, making SBE expect one fewer doc examined than Classic. + assertDocsExamined( + expl.executionStats, expectedDocsExaminedClassic, expectedDocsExaminedClassic - 1); } + function testIn() { initAndPopulate(coll, clusterKey); @@ -107,10 +144,12 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) { assert.eq(30, getPlanStage(expl, "CLUSTERED_IXSCAN").maxRecord); assert.eq(3, expl.executionStats.executionStages.nReturned); - // The range scanned is 21 documents + 1 extra document by design - additional cursor - // 'next' beyond the range. - assert.eq(22, expl.executionStats.executionStages.docsExamined); + // The range scanned is 21 documents. In Classic, expect 'docsExamined' to be one higher by + // design - additional cursor 'next' beyond the range. In SBE, expect 21 as it does not + // examine the extra document. + assertDocsExamined(expl.executionStats, 22, 21); } + function testNonClusterKeyScan() { initAndPopulate(coll, clusterKey); @@ -127,6 +166,12 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) { function testBoundedScans(coll, clusterKey) { testEq(); + + // The last argument of the following calls, 'expectedDocsExaminedClassic', and the specific + // comments, are for Classic engine. SBE does not have the additional cursor->next() call + // beyond the range, so in calls to testLT() and testRange() its value will be one lower. + // This is accounted for by delegations to the assertDocsExamined() helper function. + // Expect docsExamined == nReturned + 2 due to the collection scan bounds being always // inclusive and due to the by-design additional cursor 'next' beyond the range. testLT("$lt", 10, 10, 12); @@ -139,7 +184,7 @@ const testClusteredCollectionBoundedScan = function(coll, clusterKey) { testGT("$gt", 89, 10, 11); // Expect docsExamined == nReturned. testGT("$gte", 89, 11, 11); - // docsExamined reflects the fact that collection scan bounds are always exclusive and + // docsExamined reflects the fact that collection scan bounds are always inclusive and // that by design we do an additional cursor 'next' beyond the range. testRange("$gt", 20, "$lt", 40, 19, 22); testRange("$gte", 20, "$lt", 40, 20, 22); diff --git a/jstests/libs/clustered_collections/clustered_collection_hint_common.js b/jstests/libs/clustered_collections/clustered_collection_hint_common.js index a65939698e2df..6315d5967f64b 100644 --- a/jstests/libs/clustered_collections/clustered_collection_hint_common.js +++ b/jstests/libs/clustered_collections/clustered_collection_hint_common.js @@ -1,12 +1,10 @@ /** * Validate $hint on a clustered collection. */ +import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js"; +load("jstests/libs/collection_drop_recreate.js"); -function testClusteredCollectionHint(coll, clusterKey, clusterKeyName) { - "use strict"; - load("jstests/libs/analyze_plan.js"); - load("jstests/libs/collection_drop_recreate.js"); - +export function testClusteredCollectionHint(coll, clusterKey, clusterKeyName) { const clusterKeyFieldName = Object.keys(clusterKey)[0]; const batchSize = 100; @@ -196,6 +194,91 @@ function testClusteredCollectionHint(coll, clusterKey, clusterKeyName) { } }); + // Find with $natural hints and sorts: we should scan the collection in the hinted + // direction regardless of sort direction, and provide a blocking sort if needed. + validateClusteredCollectionHint(coll, { + expectedNReturned: batchSize, + cmd: {find: collName, hint: {$natural: 1}, sort: {[clusterKeyFieldName]: 1}}, + expectedWinningPlanStats: { + stage: "COLLSCAN", + direction: "forward", + }, + unexpectedWinningPlanStats: ["SORT"] // We shouldn't need a blocking sort here. + }); + validateClusteredCollectionHint(coll, { + expectedNReturned: batchSize, + cmd: {find: collName, hint: {$natural: -1}, sort: {[clusterKeyFieldName]: 1}}, + expectedWinningPlanStats: [ + {stage: "SORT", sortPattern: {[clusterKeyFieldName]: 1}}, + { + stage: "COLLSCAN", + direction: "backward", + } + ] + }); + validateClusteredCollectionHint(coll, { + expectedNReturned: batchSize, + cmd: {find: collName, hint: {$natural: 1}, sort: {[clusterKeyFieldName]: -1}}, + expectedWinningPlanStats: [ + {stage: "SORT", sortPattern: {[clusterKeyFieldName]: -1}}, + { + stage: "COLLSCAN", + direction: "forward", + } + ] + }); + validateClusteredCollectionHint(coll, { + expectedNReturned: batchSize, + cmd: {find: collName, hint: {$natural: -1}, sort: {[clusterKeyFieldName]: -1}}, + expectedWinningPlanStats: { + stage: "COLLSCAN", + direction: "backward", + }, + unexpectedWinningPlanStats: ["SORT"] // We shouldn't need a blocking sort here. + }); + + // We always need a blocking sort when the sort pattern does not match the provided sort for + // the clustered collection. + validateClusteredCollectionHint(coll, { + expectedNReturned: batchSize, + cmd: {find: collName, hint: {$natural: 1}, sort: {a: 1}}, + expectedWinningPlanStats: [ + {stage: "SORT", sortPattern: {a: 1}}, + { + stage: "COLLSCAN", + direction: "forward", + } + ] + }); + validateClusteredCollectionHint(coll, { + expectedNReturned: batchSize, + cmd: {find: collName, hint: {$natural: -1}, sort: {a: 1}}, + expectedWinningPlanStats: [ + {stage: "SORT", sortPattern: {a: 1}}, + { + stage: "COLLSCAN", + direction: "backward", + } + ] + }); + validateClusteredCollectionHint(coll, { + expectedNReturned: batchSize, + cmd: {find: collName, hint: {$natural: 1}, sort: {a: -1}}, + expectedWinningPlanStats: [ + {stage: "SORT", sortPattern: {a: -1}}, + { + stage: "COLLSCAN", + direction: "forward", + } + ] + }); + validateClusteredCollectionHint(coll, { + expectedNReturned: batchSize, + cmd: {find: collName, hint: {$natural: -1}, sort: {a: -1}}, + expectedWinningPlanStats: + [{stage: "SORT", sortPattern: {a: -1}}, {stage: "COLLSCAN", direction: "backward"}], + }); + // Find on a standard index. validateClusteredCollectionHint(coll, { expectedNReturned: batchSize, @@ -280,25 +363,39 @@ function testClusteredCollectionHint(coll, clusterKey, clusterKeyName) { return testHint(coll, clusterKey, clusterKeyName); } -function validateClusteredCollectionHint(coll, - {expectedNReturned, cmd, expectedWinningPlanStats = {}}) { +export function validateClusteredCollectionHint( + coll, + {expectedNReturned, cmd, expectedWinningPlanStats = {}, unexpectedWinningPlanStats = []}) { const explain = assert.commandWorked(coll.runCommand({explain: cmd})); assert.eq(explain.executionStats.nReturned, expectedNReturned, tojson(explain)); const actualWinningPlan = getWinningPlan(explain.queryPlanner); - const stageOfInterest = getPlanStage(actualWinningPlan, expectedWinningPlanStats.stage); - assert.neq(null, stageOfInterest); - for (const [key, value] of Object.entries(expectedWinningPlanStats)) { - assert(stageOfInterest[key] !== undefined, tojson(explain)); - assert.eq(stageOfInterest[key], value, tojson(explain)); + if (!Array.isArray(expectedWinningPlanStats)) { + expectedWinningPlanStats = [expectedWinningPlanStats]; } - // Explicitly check that the plan is not bounded by default. - if (!expectedWinningPlanStats.hasOwnProperty("minRecord")) { - assert(!actualWinningPlan.hasOwnProperty("minRecord"), tojson(explain)); + for (const excludedStage of unexpectedWinningPlanStats) { + const stageOfInterest = getPlanStage(actualWinningPlan, excludedStage); + assert.eq(null, stageOfInterest); } - if (!expectedWinningPlanStats.hasOwnProperty("maxRecord")) { - assert(!actualWinningPlan.hasOwnProperty("maxRecord"), tojson(explain)); + + for (const expectedWinningPlanStageStats of expectedWinningPlanStats) { + const stageOfInterest = + getPlanStage(actualWinningPlan, expectedWinningPlanStageStats.stage); + assert.neq(null, stageOfInterest); + + for (const [key, value] of Object.entries(expectedWinningPlanStageStats)) { + assert(stageOfInterest[key] !== undefined, tojson(explain)); + assert.eq(stageOfInterest[key], value, tojson(explain)); + } + + // Explicitly check that the plan is not bounded by default. + if (!expectedWinningPlanStageStats.hasOwnProperty("minRecord")) { + assert(!actualWinningPlan.hasOwnProperty("minRecord"), tojson(explain)); + } + if (!expectedWinningPlanStageStats.hasOwnProperty("maxRecord")) { + assert(!actualWinningPlan.hasOwnProperty("maxRecord"), tojson(explain)); + } } } diff --git a/jstests/libs/clustered_collections/clustered_collection_util.js b/jstests/libs/clustered_collections/clustered_collection_util.js index 0018721551b45..e53220858dbbf 100644 --- a/jstests/libs/clustered_collections/clustered_collection_util.js +++ b/jstests/libs/clustered_collections/clustered_collection_util.js @@ -2,7 +2,6 @@ * Utilities for testing clustered collections. */ -load("jstests/libs/analyze_plan.js"); load("jstests/libs/collection_drop_recreate.js"); var ClusteredCollectionUtil = class { @@ -193,4 +192,4 @@ var ClusteredCollectionUtil = class { assert.eq(1, coll.find({[clusterKey]: NumberLong("42")}).itcount()); coll.drop(); } -}; +}; \ No newline at end of file diff --git a/jstests/libs/columnstore_util.js b/jstests/libs/columnstore_util.js index 9d80d3ae87d11..58b6831ef18f5 100644 --- a/jstests/libs/columnstore_util.js +++ b/jstests/libs/columnstore_util.js @@ -6,7 +6,7 @@ load("jstests/libs/discover_topology.js"); // For findNonConfigNodes. // For areAllCollectionsClustered. load("jstests/libs/clustered_collections/clustered_collection_util.js"); load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For setParameterOnAllHosts. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; /** * Updates server parameters to disable column scan query planning heuristics so that column scan @@ -17,7 +17,7 @@ load("jstests/libs/sbe_util.js"); // For checkSBE * for the planning heuristics behavior is included in unit tests, no passthrough tests, and perf * tests. */ -function fullyEnableColumnScan(nodes) { +export function fullyEnableColumnScan(nodes) { // Since the CSI query planning heuristics are OR-ed together, we can set any one of // [internalQueryColumnScanMinAvgDocSizeBytes, internalQueryColumnScanMinCollectionSizeBytes, // internalQueryColumnScanMinNumColumnFilters] to zero in order to fully enable column scan. @@ -29,12 +29,12 @@ function fullyEnableColumnScan(nodes) { * expected to succeed. Otherwise, logs the reason why the test will not create column store indexes * and returns false. */ -function safeToCreateColumnStoreIndex(db) { +export function safeToCreateColumnStoreIndex(db) { return safeToCreateColumnStoreIndexInCluster( DiscoverTopology.findNonConfigNodes(db.getMongo())); } -function safeToCreateColumnStoreIndexInCluster(nodes) { +export function safeToCreateColumnStoreIndexInCluster(nodes) { for (const node of nodes) { const conn = new Mongo(node); if (FixtureHelpers.isMongos(conn.getDB("admin"))) { @@ -79,7 +79,7 @@ function safeToCreateColumnStoreIndexInCluster(nodes) { * Checks if the test is eligible to run and sets the appropriate parameters to use column store * indexes. Returns true if setup was successful. */ -function setUpServerForColumnStoreIndexTest(db) { +export function setUpServerForColumnStoreIndexTest(db) { if (!checkSBEEnabled(db)) { jsTestLog("Skipping column store index test since SBE is disabled"); return false; diff --git a/jstests/libs/config_files/disable_moveparanoia.ini b/jstests/libs/config_files/disable_moveparanoia.ini deleted file mode 100644 index f21b50f9513c9..0000000000000 --- a/jstests/libs/config_files/disable_moveparanoia.ini +++ /dev/null @@ -1 +0,0 @@ -moveParanoia=false diff --git a/jstests/libs/config_files/disable_nomoveparanoia.ini b/jstests/libs/config_files/disable_nomoveparanoia.ini deleted file mode 100644 index 4696304134f36..0000000000000 --- a/jstests/libs/config_files/disable_nomoveparanoia.ini +++ /dev/null @@ -1 +0,0 @@ -noMoveParanoia=false diff --git a/jstests/libs/config_files/enable_paranoia.json b/jstests/libs/config_files/enable_paranoia.json deleted file mode 100644 index 218646b1662e0..0000000000000 --- a/jstests/libs/config_files/enable_paranoia.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "sharding" : { - "archiveMovedChunks" : true - } -} diff --git a/jstests/libs/config_files/set_shardingrole_configsvr.json b/jstests/libs/config_files/set_shardingrole_configsvr.json index f09d10d3f18a2..be6c3853e0bfe 100644 --- a/jstests/libs/config_files/set_shardingrole_configsvr.json +++ b/jstests/libs/config_files/set_shardingrole_configsvr.json @@ -5,4 +5,4 @@ "replication" : { "replSetName" : "dummy" } -} \ No newline at end of file +} diff --git a/jstests/libs/config_files/set_shardingrole_shardsvr.json b/jstests/libs/config_files/set_shardingrole_shardsvr.json index c605dce50cc9e..333a33528b9ad 100644 --- a/jstests/libs/config_files/set_shardingrole_shardsvr.json +++ b/jstests/libs/config_files/set_shardingrole_shardsvr.json @@ -5,4 +5,4 @@ "replication" : { "replSetName" : "dummy" } -} \ No newline at end of file +} diff --git a/jstests/libs/config_shard_util.js b/jstests/libs/config_shard_util.js new file mode 100644 index 0000000000000..a0c8f1584a405 --- /dev/null +++ b/jstests/libs/config_shard_util.js @@ -0,0 +1,53 @@ +/** + * Utilities for testing config server config shard behaviors. + */ +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + +export const ConfigShardUtil = (function() { + function isTransitionEnabledIgnoringFCV(st) { + return FeatureFlagUtil.isEnabled(st.configRS.getPrimary(), + "TransitionToCatalogShard", + undefined /* user */, + true /* ignoreFCV */); + } + + function transitionToDedicatedConfigServer(st, timeout) { + if (timeout == undefined) { + timeout = 10 * 60 * 1000; // 10 minutes + } + + assert.soon(function() { + const res = st.s.adminCommand({transitionToDedicatedConfigServer: 1}); + if (!res.ok && res.code === ErrorCodes.ShardNotFound) { + // If the config server primary steps down right after removing the config.shards + // doc for the shard but before responding with "state": "completed", the mongos + // would retry the _configsvrTransitionToDedicatedConfigServer command against the + // new config server primary, which would not find the removed shard in its + // ShardRegistry if it has done a ShardRegistry reload after the config.shards doc + // for the shard was removed. This would cause the command to fail with + // ShardNotFound. + return true; + } + assert.commandWorked(res); + return res.state == 'completed'; + }, "failed to transition to dedicated config server within " + timeout + "ms", timeout); + } + + function waitForRangeDeletions(conn) { + assert.soon(() => { + const rangeDeletions = conn.getCollection("config.rangeDeletions").find().toArray(); + if (rangeDeletions.length) { + print("Waiting for range deletions to complete: " + tojsononeline(rangeDeletions)); + sleep(100); + return false; + } + return true; + }); + } + + return { + isTransitionEnabledIgnoringFCV, + transitionToDedicatedConfigServer, + waitForRangeDeletions, + }; +})(); diff --git a/jstests/libs/conn_pool_helpers.js b/jstests/libs/conn_pool_helpers.js index 55977524f705d..9beb49da1d135 100644 --- a/jstests/libs/conn_pool_helpers.js +++ b/jstests/libs/conn_pool_helpers.js @@ -32,16 +32,17 @@ function launchFinds(mongos, threads, {times, readPref, shouldFail}) { } } -function assertHasConnPoolStats(mongos, allHosts, args, checkNum) { +function assertHasConnPoolStats(mongos, allHosts, args, checkNum, connPoolStatsCmd = undefined) { checkNum++; jsTestLog("Check #" + checkNum + ": " + tojson(args)); - var {ready = 0, pending = 0, active = 0, hosts = allHosts, isAbsent, checkStatsFunc} = args; + let {ready = 0, pending = 0, active = 0, hosts = allHosts, isAbsent, checkStatsFunc} = args; checkStatsFunc = checkStatsFunc ? checkStatsFunc : function(stats) { - return stats.available == ready && stats.refreshing == pending && stats.inUse == active; + return stats.available == ready && stats.refreshing == pending && + (stats.inUse + stats.leased) == active; }; function checkStats(res, host) { - var stats = res.hosts[host]; + let stats = res.hosts[host]; if (!stats) { jsTestLog("Connection stats for " + host + " are absent"); return isAbsent; @@ -52,7 +53,8 @@ function assertHasConnPoolStats(mongos, allHosts, args, checkNum) { } function checkAllStats() { - var res = mongos.adminCommand({connPoolStats: 1}); + let cmdName = connPoolStatsCmd ? connPoolStatsCmd : "connPoolStats"; + let res = mongos.adminCommand({[cmdName]: 1}); return hosts.map(host => checkStats(res, host)).every(x => x); } diff --git a/jstests/libs/crl.pem b/jstests/libs/crl.pem index c835a93e2ab5d..44fb2e5ff3e88 100644 --- a/jstests/libs/crl.pem +++ b/jstests/libs/crl.pem @@ -1,12 +1,12 @@ -----BEGIN X509 CRL----- MIIBujCBozANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJVUzERMA8GA1UECAwI TmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdv -REIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0EXDTIy -MDIwMzIyMDAyMVoXDTI0MDUwNjIyMDAyMVowDQYJKoZIhvcNAQELBQADggEBAHuY -cW87HAG5icgtgJ6X/z/uYf9oVQCCRU589Vr5HCETIobfkAWOqTM5yxO5Oa+o5G7C -ZgrEPT2JkDx/Us4kYwY3J3rQSpCSJhMqIAHQIKIojzkyQI6PguyS2x7JU9uEp/Z2 -qeM72ogBsFFX6Ior4YczeC+KAmD2OJS0B2Zed6nSqfmbk2WZf1q4i9a6BgU+46Hb -HBnfHQv8/utrmVQs2ibCnapBH4ihPz7ZZNRd+0cmv3C/P0rqJF7wGlcaseIZULyo -7GcA494HcpN+nj1U6Cjh5nscXzn/2hvt3miZ+P32Y7SVzezqoIacKinf26V5qxq7 -fpW0No+7nMxkW0zdRKg= +REIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0EXDTIz +MDYxNjE0MjkxNVoXDTI1MDkxNjE0MjkxNVowDQYJKoZIhvcNAQELBQADggEBAHfk +rEkGCjmhkghxGHuUwBXpHkntgsKNZQnUAUjRp6DCJBA+OpSrFOGOuRvVZcV/8C/7 +IS5H0qf0qybnEAqvXU0bZD9e8+ipLa73879YbODzHkBiyL9fLg2BGJLXPvi9ETV1 +UspQf25h7Wh23MqHj9yiqo1aKazcOmyvxUnYsRxnzXQNIMJ4QfAOa4hZuOs+qmDf +rEYqkrUNzmvrzVU2zvgTqT2fJyPUz/s2IDj7BJCXrmysGUPcPRftx45kmfjU9tdm +4Po3EspAjuUoAcYvGzpz68c2Y8CbenrEfJoDjAzm8mRL57xcXz/xtRLhOYMhKvlh +okBs7OL7fQCUxCC++Mc= -----END X509 CRL----- diff --git a/jstests/libs/crl.pem.digest.sha1 b/jstests/libs/crl.pem.digest.sha1 index 69b575b83e177..ddbb042f90a50 100644 --- a/jstests/libs/crl.pem.digest.sha1 +++ b/jstests/libs/crl.pem.digest.sha1 @@ -1 +1 @@ -2231880F51FB2A6A2E9B97AA5D7CE4FAD5DD3FC2 \ No newline at end of file +BA3069FA4495A4594EE9FED9A482ACD54DD31EC7 \ No newline at end of file diff --git a/jstests/libs/crl.pem.digest.sha256 b/jstests/libs/crl.pem.digest.sha256 index 91a9465bb2e7c..c5c2a80863e27 100644 --- a/jstests/libs/crl.pem.digest.sha256 +++ b/jstests/libs/crl.pem.digest.sha256 @@ -1 +1 @@ -2A185DD7786D83315C2CE2493BF1FB308686FC5C80D575D930C36AF2A0A6741F \ No newline at end of file +13E613C19075A68CAA7F40E479AFB2B8F0D59A3DFAB5171C16BB8298D37FB591 \ No newline at end of file diff --git a/jstests/libs/crl_client_revoked.pem b/jstests/libs/crl_client_revoked.pem index 17e3d19f57a4e..2eede2bdc4aa1 100644 --- a/jstests/libs/crl_client_revoked.pem +++ b/jstests/libs/crl_client_revoked.pem @@ -1,12 +1,12 @@ -----BEGIN X509 CRL----- MIIB0DCBuTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJVUzERMA8GA1UECAwI TmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdv -REIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0EXDTIy -MDIwMzIyMDAyMVoXDTI0MDUwNjIyMDAyMVowFDASAgEEFw0yMjAyMDMyMjAwMjFa -MA0GCSqGSIb3DQEBCwUAA4IBAQCOZeTs2vJh5Eg0u+0Dpebx6tzJXShp3+p8AbDr -vrxkyDhKP4GuIHmBgJ12KaJUmBWtm4MucDxinWtNiTp3CeL7nuwmX7TGR8YQNL8/ -pDy0SfkT4AKe9V32OtySIFtMFhzcqNNFDu9H2p+Um3lywxoSyw+H64M3NL93IyeZ -3Dy0q25fPxpiP1tz9y0Q1TVIIX/SWxHpaCdPfANRYVSTwD+gRM27u3dezJ4K3w7w -wYGYXK019wIln4QouAm5mvKb3TxRlCn4ggM0npHmX9e/dr3acmlQ1QnoD8+Q8eER -+9mbf/yyH2rw2XZN/V67Xgri3ctoT0tUpdh91xP63KLitu/v +REIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0EXDTIz +MDYxNjE0MjkxNVoXDTI1MDkxNjE0MjkxNVowFDASAgEEFw0yMzA2MTYxNDI5MTVa +MA0GCSqGSIb3DQEBCwUAA4IBAQBX1uEdyMk0u0n3wraQAXHME8WMDq5daV9FLysD +OVovDObVN9XHuqTNH74ncpsxs+uq2x/E2QHu3wFpQ8SptuqQ7My9h6nXgtKfCrAS +QpYzF/8ucPGlFaTdF4HbMdVWRkL1DpLBJYbmBzh5e9CKJB3QpI7htPFeL6B9zqCc +JtDdFCwGdqlbnowdonCVV9p+ii8Mjr25wSnyPkhsDKUy1bEDnr5VxFVrIJ6JdVYv +8oN7moeUPT3cOxmtfj8HologrL/A16/9px063yW0J4Acrz1ZanyyFGz/gLsWRlOc +AMGdGrCI2Cqw8dDDCuCDYGUc4heJhkN02H8Wgjstn2l85evQ -----END X509 CRL----- diff --git a/jstests/libs/crl_client_revoked.pem.digest.sha1 b/jstests/libs/crl_client_revoked.pem.digest.sha1 index 31500641823c2..11f70f48fb3c4 100644 --- a/jstests/libs/crl_client_revoked.pem.digest.sha1 +++ b/jstests/libs/crl_client_revoked.pem.digest.sha1 @@ -1 +1 @@ -38C199A9F96DBBD3FE16E14BAC0C31D27F25AD3C \ No newline at end of file +0CA30BC9C4C5159DEBEB97E5C16B44EF8F761F24 \ No newline at end of file diff --git a/jstests/libs/crl_client_revoked.pem.digest.sha256 b/jstests/libs/crl_client_revoked.pem.digest.sha256 index d5121c25a8b5e..b2bc5df045b07 100644 --- a/jstests/libs/crl_client_revoked.pem.digest.sha256 +++ b/jstests/libs/crl_client_revoked.pem.digest.sha256 @@ -1 +1 @@ -BE4CD9973F7EAD168485E79475BC615FA2D3156D1CFC87AA918C09372E714BFB \ No newline at end of file +A45552882279FAEF4079226CACA2E9934550ACFF0E6A2D45AD8D906335621433 \ No newline at end of file diff --git a/jstests/libs/crl_expired.pem b/jstests/libs/crl_expired.pem index fc72792780b93..08570617efbfa 100644 --- a/jstests/libs/crl_expired.pem +++ b/jstests/libs/crl_expired.pem @@ -1,12 +1,12 @@ -----BEGIN X509 CRL----- MIIBujCBozANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJVUzERMA8GA1UECAwI TmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdv -REIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0EXDTIy -MDIwMzIyMDAyMVoXDTIyMDIwMzIyMDAyMlowDQYJKoZIhvcNAQELBQADggEBAKhw -Ph7SF8TwsEk4K9L8WrEooJCMqm52SuSjqg23HdpnMxnGw8QyrYd8zXPBOOj+K4Oi -QoVYUjH1tsEZsWdpP8ixFPPzKZUx2e/40XbMWKyUMeebCUHe3VPkchCzKIdrP26Z -1ZcLPfr3qhJyr3Jy2Cs4z4ysNm0wRO5P0bgE8FhBhOXvyhLTvomvKpMSlaU4Wy8/ -O3GGUlPOwtZ3xgW6kzJibd+CqCKxCgPxB8dlY3/Bbx/ECGh/n/k9u2AE/rIwsx2G -mO94LD/phdfN7gvJoDhwEtGKrQDC8NynNzF0NPPTMcO7lNP4ydcMmhyhXTUxsTaU -/K6+UalKCs2thRGPCXg= +REIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0EXDTIz +MDYxNjE0MjkxNVoXDTIzMDYxNjE0MjkxNlowDQYJKoZIhvcNAQELBQADggEBAE9w +lGiE612zemJtKOdYx2OLQMQt7lsz4Sqvgwt3/NGcrCB+wTiAnb3e/fcGRnk7xwtA +JN54PC4PgYbwUn48IEWEn3hqzjMHef62q3N5s6b1M/uVN9V+QXbkOi/f8Q71J9M/ +cLcj3gItc/r9nnxSAJwEDyk8kv5STrWfPN9fPDT73xqcch0GsHyOC+VuVycvjH9q +CukTvdEVWqtfijjMlpeucnCrjc6OioWZBTrcAgbv9jQIHXg3AjwUvSXnnGh3S+li +32pWZOA6mqEdWLmqq0Z74erypsvhHCb+MDSHt7tPxCwp65uBKVjzfSY6TK+bbPLN +XnyoWkX1aKaNi5mmcKk= -----END X509 CRL----- diff --git a/jstests/libs/crl_expired.pem.digest.sha1 b/jstests/libs/crl_expired.pem.digest.sha1 index e73095bcc0b01..46cf7e611c34b 100644 --- a/jstests/libs/crl_expired.pem.digest.sha1 +++ b/jstests/libs/crl_expired.pem.digest.sha1 @@ -1 +1 @@ -DC31CEE7C62EFA0FBCB8C1FA0947C5F4A1DD0006 \ No newline at end of file +6DCD7E3784AA03399ABB9227AFDC64541F13F00D \ No newline at end of file diff --git a/jstests/libs/crl_expired.pem.digest.sha256 b/jstests/libs/crl_expired.pem.digest.sha256 index 08d3c8f417ed0..919fd64492583 100644 --- a/jstests/libs/crl_expired.pem.digest.sha256 +++ b/jstests/libs/crl_expired.pem.digest.sha256 @@ -1 +1 @@ -FDCDEE458365A80D4B422A2350F5CE37223197A07C6905D44B40895D5921551C \ No newline at end of file +F4BC10F4A4D00603B6EA5D3FEA2B338E34CB910E320F002E215F4B786C664BFE \ No newline at end of file diff --git a/jstests/libs/ecdsa-ca-ocsp.crt b/jstests/libs/ecdsa-ca-ocsp.crt index cb1ba34d06089..107d07f9de1f5 100644 --- a/jstests/libs/ecdsa-ca-ocsp.crt +++ b/jstests/libs/ecdsa-ca-ocsp.crt @@ -1,13 +1,14 @@ -----BEGIN CERTIFICATE----- -MIIB9TCCAZygAwIBAgIEZKr7xjAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER +MIICFjCCAbugAwIBAgIEVEMOBzAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl -c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjB6MQsw +c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjB6MQsw CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UE AwwUS2VybmVsIFRlc3QgRVNDREEgQ0EwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC -AAS1km6C590H9n4iIbd7X1BxsXzCcgSIksEGGXDiAWKskZH9VCAwO6wOB0wssrk0 -bzTzw2yFj60wQqMux1qm6CPXoxAwDjAMBgNVHRMEBTADAQH/MAoGCCqGSM49BAMC -A0cAMEQCIFClJcZHoOYm/V5B63yLTTGToc1AsUgDA70OOPdH4V26AiArujh92W2L -BIicxzY8674zDv4QbI+I7KO7ejyoh57ftg== +AARJ0xOKeXlNeEj8uXEjyD9EzQX9UuxfvagpA5TqPHaeMCB7u69usoJS3Wjfn74n +UzVYRtoN4NAmYQyMoaWtg+fNoy8wLTAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBRg +exXgwe8CGDS9DRVEaZUiJqV5bDAKBggqhkjOPQQDAgNJADBGAiEAgcN9+OxIxXxQ +Q7EfAMhOYxVJMEEy8YTRqzLUxqD8SvECIQDR2En79y1wRmnyu7KqgaVf0mBJD1z8 +fXA5eXg8cYp1vA== -----END CERTIFICATE----- diff --git a/jstests/libs/ecdsa-ca-ocsp.key b/jstests/libs/ecdsa-ca-ocsp.key index 3292d5b68899a..944ad4aa04763 100644 --- a/jstests/libs/ecdsa-ca-ocsp.key +++ b/jstests/libs/ecdsa-ca-ocsp.key @@ -1,5 +1,5 @@ -----BEGIN PRIVATE KEY----- -MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgtKR7JQBHBc471ZOg -vjKYMWrNnl4poaypv0J4HXtf0a6hRANCAAS1km6C590H9n4iIbd7X1BxsXzCcgSI -ksEGGXDiAWKskZH9VCAwO6wOB0wssrk0bzTzw2yFj60wQqMux1qm6CPX +MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg3B+jo0PJFdvRJW5Z +eItzbqoEvvLI1WWmRu/AxE/QhZmhRANCAARJ0xOKeXlNeEj8uXEjyD9EzQX9Uuxf +vagpA5TqPHaeMCB7u69usoJS3Wjfn74nUzVYRtoN4NAmYQyMoaWtg+fN -----END PRIVATE KEY----- diff --git a/jstests/libs/ecdsa-ca-ocsp.pem b/jstests/libs/ecdsa-ca-ocsp.pem index 53911568ca665..eb9029a96c9f4 100644 --- a/jstests/libs/ecdsa-ca-ocsp.pem +++ b/jstests/libs/ecdsa-ca-ocsp.pem @@ -1,18 +1,19 @@ -----BEGIN CERTIFICATE----- -MIIB9TCCAZygAwIBAgIEZKr7xjAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER +MIICFjCCAbugAwIBAgIEVEMOBzAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl -c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjB6MQsw +c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjB6MQsw CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UE AwwUS2VybmVsIFRlc3QgRVNDREEgQ0EwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC -AAS1km6C590H9n4iIbd7X1BxsXzCcgSIksEGGXDiAWKskZH9VCAwO6wOB0wssrk0 -bzTzw2yFj60wQqMux1qm6CPXoxAwDjAMBgNVHRMEBTADAQH/MAoGCCqGSM49BAMC -A0cAMEQCIFClJcZHoOYm/V5B63yLTTGToc1AsUgDA70OOPdH4V26AiArujh92W2L -BIicxzY8674zDv4QbI+I7KO7ejyoh57ftg== +AARJ0xOKeXlNeEj8uXEjyD9EzQX9UuxfvagpA5TqPHaeMCB7u69usoJS3Wjfn74n +UzVYRtoN4NAmYQyMoaWtg+fNoy8wLTAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBRg +exXgwe8CGDS9DRVEaZUiJqV5bDAKBggqhkjOPQQDAgNJADBGAiEAgcN9+OxIxXxQ +Q7EfAMhOYxVJMEEy8YTRqzLUxqD8SvECIQDR2En79y1wRmnyu7KqgaVf0mBJD1z8 +fXA5eXg8cYp1vA== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgtKR7JQBHBc471ZOg -vjKYMWrNnl4poaypv0J4HXtf0a6hRANCAAS1km6C590H9n4iIbd7X1BxsXzCcgSI -ksEGGXDiAWKskZH9VCAwO6wOB0wssrk0bzTzw2yFj60wQqMux1qm6CPX +MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQg3B+jo0PJFdvRJW5Z +eItzbqoEvvLI1WWmRu/AxE/QhZmhRANCAARJ0xOKeXlNeEj8uXEjyD9EzQX9Uuxf +vagpA5TqPHaeMCB7u69usoJS3Wjfn74nUzVYRtoN4NAmYQyMoaWtg+fN -----END PRIVATE KEY----- diff --git a/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha1 b/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha1 index 25678664c4fef..0dd82c6fc9832 100644 --- a/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha1 +++ b/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha1 @@ -1 +1 @@ -BD9FC892A958AE5A5753EBDA314675D528B05156 \ No newline at end of file +EB3A609AE91AE0373E050066CF9DCCC2992FB044 \ No newline at end of file diff --git a/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha256 b/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha256 index 98047d84bda96..56c7b038dddd9 100644 --- a/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha256 +++ b/jstests/libs/ecdsa-ca-ocsp.pem.digest.sha256 @@ -1 +1 @@ -31ED10340FB5812D5720BF34902548EFB6ED81E5B5AA11CA3647CDF079E25124 \ No newline at end of file +920526AD79D3CC2EC14E1C59ACB4BEE76BDB6803375C4785A457C3CB97B1B8F2 \ No newline at end of file diff --git a/jstests/libs/ecdsa-ca.pem b/jstests/libs/ecdsa-ca.pem index 0fbe387263f69..58cc22e890a96 100644 --- a/jstests/libs/ecdsa-ca.pem +++ b/jstests/libs/ecdsa-ca.pem @@ -4,23 +4,24 @@ # Root of ECDSA tree. -----BEGIN CERTIFICATE----- -MIIB9jCCAZygAwIBAgIEKUUq3TAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER +MIICFDCCAbugAwIBAgIEKbnGsDAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl -c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjB6MQsw +c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjB6MQsw CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UE AwwUS2VybmVsIFRlc3QgRVNDREEgQ0EwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNC -AAS6/8jcHeK3U6THarZbEWw7UJYKA45ahLNpOQxTni6CwpFjyaFKRqnv8OUK09v5 -58X4U4yGKpTJi6MB4U8c7+8zoxAwDjAMBgNVHRMEBTADAQH/MAoGCCqGSM49BAMC -A0gAMEUCIQDe1DKE44zvfcc1PYUqTqtPNtVb513kSitIizNsRhkhRAIgHcbGd1j4 -cZ91P8FlKg8NizbShq7hrbpni7urkmNoFXQ= +AARFSZplmlIJErVVOGWJPbQ0Fv8cIrwaCkL31rQb3Dea0rCd+5vyFk7qOhX/OfZB +bC7Q+9ZhDDfNZVDZXGl1omdooy8wLTAMBgNVHRMEBTADAQH/MB0GA1UdDgQWBBTm +gV6I/ikYioQ9vv2qei0uiY5guTAKBggqhkjOPQQDAgNHADBEAiBJnsgI3P+AtpVh +8oDDvN61dGJkCw/9VEfzV5Px8yxixwIgd96Cm9aZDsDijlhfbkxvgAaSm7CxOJI6 +gn2fotwvt3E= -----END CERTIFICATE----- -----BEGIN EC PARAMETERS----- BggqhkjOPQMBBw== -----END EC PARAMETERS----- -----BEGIN EC PRIVATE KEY----- -MHcCAQEEIIqvI0YJXBaQAGdgoSL6MLBR2LbmKGpZciebA61vPdiYoAoGCCqGSM49 -AwEHoUQDQgAEuv/I3B3it1Okx2q2WxFsO1CWCgOOWoSzaTkMU54ugsKRY8mhSkap -7/DlCtPb+efF+FOMhiqUyYujAeFPHO/vMw== +MHcCAQEEIIuWnsOUBRrBqZ2Ceb9HavbN8VNJX3mv7QpFoqD5csGZoAoGCCqGSM49 +AwEHoUQDQgAERUmaZZpSCRK1VThliT20NBb/HCK8GgpC99a0G9w3mtKwnfub8hZO +6joV/zn2QWwu0PvWYQw3zWVQ2VxpdaJnaA== -----END EC PRIVATE KEY----- diff --git a/jstests/libs/ecdsa-ca.pem.digest.sha1 b/jstests/libs/ecdsa-ca.pem.digest.sha1 index 0f43c9996e023..8570461d56505 100644 --- a/jstests/libs/ecdsa-ca.pem.digest.sha1 +++ b/jstests/libs/ecdsa-ca.pem.digest.sha1 @@ -1 +1 @@ -BEAD07ADF2EC1A7D46C2094B72B03510101425E1 \ No newline at end of file +2E1DAC1D8204F57CE6F22BE35E634F898E9E4D7D \ No newline at end of file diff --git a/jstests/libs/ecdsa-ca.pem.digest.sha256 b/jstests/libs/ecdsa-ca.pem.digest.sha256 index 06559f08adfa8..be1c2dc8dc2cc 100644 --- a/jstests/libs/ecdsa-ca.pem.digest.sha256 +++ b/jstests/libs/ecdsa-ca.pem.digest.sha256 @@ -1 +1 @@ -9965666BB50F095320BFEA5B57535FDEF82060F38155AE7287C2742A0A24E4A5 \ No newline at end of file +91F2E4499375EA8F989DD56981FA945E8FBD23BF845DA3283BAF6FB37042CBF4 \ No newline at end of file diff --git a/jstests/libs/ecdsa-client.pem b/jstests/libs/ecdsa-client.pem index bb07191190076..b522826073300 100644 --- a/jstests/libs/ecdsa-client.pem +++ b/jstests/libs/ecdsa-client.pem @@ -4,22 +4,22 @@ # Client certificate for ECDSA tree. -----BEGIN CERTIFICATE----- -MIIB1jCCAXsCBGp/B5IwCgYIKoZIzj0EAwIwejELMAkGA1UEBhMCVVMxETAPBgNV +MIIB1jCCAXsCBA+/nyYwCgYIKoZIzj0EAwIwejELMAkGA1UEBhMCVVMxETAPBgNV BAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQKDAdN b25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxHTAbBgNVBAMMFEtlcm5lbCBUZXN0IEVT -Q0RBIENBMB4XDTIyMDIwMzIxNTk0OFoXDTI0MDUwNzIxNTk0OFowcDELMAkGA1UE +Q0RBIENBMB4XDTIzMDYxNjE0Mjg0OFoXDTI1MDkxNzE0Mjg0OFowcDELMAkGA1UE BhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5 MRAwDgYDVQQKDAdNb25nb0RCMRMwEQYDVQQLDApLZXJuZWxVc2VyMQ8wDQYDVQQD -DAZjbGllbnQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAASTbCS0Taz+os56KvG/ -xJGEq4P8rPQ/QWJLjS0t/132O9/XamI90yoHscLOI8AY7RUvnzCGIYOeQyxMuBbZ -4LtYMAoGCCqGSM49BAMCA0kAMEYCIQDZGbOXz1ewIJ3yyVmxYpf7b3oOvtoGR3Hm -MPrcRAK36AIhAKASzLijFUTbtuQXI6+IIE9XdLUXDQSsjJ5TfvTmehX8 +DAZjbGllbnQwWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAQcFnNCI6lDtSbHUJfK +mSVs4MCvalE1xSc8uxPmcfpEfFhJoXEl7NyNxpytRA5Sv2dtrW/Y6EAOmIUrod7d +7+BqMAoGCCqGSM49BAMCA0kAMEYCIQDyoR6LmZiL5gD7tgNIza3q7GEzZXnLacLe +N+JWjYohRAIhAKYY6aNFJUCr4SR/Z8p5z+5/A2E5bNxr1OBuhu6aSPPB -----END CERTIFICATE----- -----BEGIN EC PARAMETERS----- BggqhkjOPQMBBw== -----END EC PARAMETERS----- -----BEGIN EC PRIVATE KEY----- -MHcCAQEEIDRLYogqXDUrJAEf4YxkZyDdcwdPev2538AArwQP/nfXoAoGCCqGSM49 -AwEHoUQDQgAEk2wktE2s/qLOeirxv8SRhKuD/Kz0P0FiS40tLf9d9jvf12piPdMq -B7HCziPAGO0VL58whiGDnkMsTLgW2eC7WA== +MHcCAQEEIK0mO6ZZ7h8qTWeJWGFYeO3E/JENne5OTfwwDZt6AYfaoAoGCCqGSM49 +AwEHoUQDQgAEHBZzQiOpQ7Umx1CXypklbODAr2pRNcUnPLsT5nH6RHxYSaFxJezc +jcacrUQOUr9nba1v2OhADpiFK6He3e/gag== -----END EC PRIVATE KEY----- diff --git a/jstests/libs/ecdsa-client.pem.digest.sha1 b/jstests/libs/ecdsa-client.pem.digest.sha1 index d7d0154a6974d..062dae859f0bb 100644 --- a/jstests/libs/ecdsa-client.pem.digest.sha1 +++ b/jstests/libs/ecdsa-client.pem.digest.sha1 @@ -1 +1 @@ -5C7CDA84DF0FAE87B0BF77C897DA1F70E6DCD84B \ No newline at end of file +DF341DE8AACD9B18F225CC5706428672D9877FA5 \ No newline at end of file diff --git a/jstests/libs/ecdsa-client.pem.digest.sha256 b/jstests/libs/ecdsa-client.pem.digest.sha256 index 6dc88a01efb65..ca899d9f6c9bf 100644 --- a/jstests/libs/ecdsa-client.pem.digest.sha256 +++ b/jstests/libs/ecdsa-client.pem.digest.sha256 @@ -1 +1 @@ -9523EFD9443380AAA6C9A618369E40E3C6D7E011D40C307749B89A0A332A2077 \ No newline at end of file +CA4DA0541E28DDB2ED59124FBE24213BD610AEAF4AA1661A2D4D821C24607AC5 \ No newline at end of file diff --git a/jstests/libs/ecdsa-ocsp-responder.crt b/jstests/libs/ecdsa-ocsp-responder.crt index 1e2b0c87f93be..1e9c469ec8e93 100644 --- a/jstests/libs/ecdsa-ocsp-responder.crt +++ b/jstests/libs/ecdsa-ocsp-responder.crt @@ -1,15 +1,16 @@ -----BEGIN CERTIFICATE----- -MIICVjCCAfygAwIBAgIEEsVJKDAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER +MIICeDCCAh+gAwIBAgIEeHFXFDAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl -c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjBsMQsw +c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjBsMQsw CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UE -AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEAbp6SVU9kZym6alv -ePcUYyze4KtdvTSCqrlCvhPQMJXkraG3DQdaADBGHUAhp6V6qFF19j0uP/rmgYv4 -SV82l6N+MHwwCQYDVR0TBAIwADAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEw -HQYDVR0OBBYEFNZtZQGDw+WGi0SIQeORd5GjEQ25MAsGA1UdDwQEAwIF4DAnBgNV -HSUEIDAeBggrBgEFBQcDAQYIKwYBBQUHAwIGCCsGAQUFBwMJMAoGCCqGSM49BAMC -A0gAMEUCIQC4Uihq24KsC0qSx6OMaQyzVsbtuyL/sCvKtSAvfyG8wAIgEOdY/kVF -7vfW+oZ5Rlo6i281FvIDJFCMOqViN8voUks= +AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEPH7mFDsURQ4W7lPd +RCM8VdiRf42EzconV51yGoDpcZSPu4KQXdi++WkxT9p0idyOGI8/0cnS1UnFIaG8 +nRel46OBoDCBnTAJBgNVHRMEAjAAMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAA +ATAdBgNVHQ4EFgQULQhUl0tD80eJzcBw9F3aBOMT8EwwCwYDVR0PBAQDAgXgMCcG +A1UdJQQgMB4GCCsGAQUFBwMBBggrBgEFBQcDAgYIKwYBBQUHAwkwHwYDVR0jBBgw +FoAUYHsV4MHvAhg0vQ0VRGmVIialeWwwCgYIKoZIzj0EAwIDRwAwRAIgA5qToMdE +qMQE11sqxPxpwjvdxhdA0GrN8nDbGlo478cCIAZoUMvgFv/mtpi9VxEC5YMTNOq+ +iJpZyIwDck79Mss9 -----END CERTIFICATE----- diff --git a/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha1 b/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha1 index cd03ff2284b56..8ebbeac1a43cd 100644 --- a/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha1 +++ b/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha1 @@ -1 +1 @@ -6B77279E773353A64975775754716390E0728404 \ No newline at end of file +3C0AECADD99DE1FC5B7C5A81144C0ECB26BB3519 \ No newline at end of file diff --git a/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha256 b/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha256 index bf65889e5aaa6..98cfab83b8872 100644 --- a/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha256 +++ b/jstests/libs/ecdsa-ocsp-responder.crt.digest.sha256 @@ -1 +1 @@ -4B61C7A3DE928F0D8F3E40B80856FD43762DB8285BB1434C5D6E167C145544E2 \ No newline at end of file +6D5091B5E09F162903FA1E55B83D002BF8BE56A4D9B0732410191219B7A077BA \ No newline at end of file diff --git a/jstests/libs/ecdsa-ocsp-responder.key b/jstests/libs/ecdsa-ocsp-responder.key index fc07aeee8e612..09996b46b40d6 100644 --- a/jstests/libs/ecdsa-ocsp-responder.key +++ b/jstests/libs/ecdsa-ocsp-responder.key @@ -1,5 +1,5 @@ -----BEGIN PRIVATE KEY----- -MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgX827/4CkJz/ML3eC -NCGnNJ/3r3GMfIkDXP7bTF8AD32hRANCAAQBunpJVT2RnKbpqW949xRjLN7gq129 -NIKquUK+E9AwleStobcNB1oAMEYdQCGnpXqoUXX2PS4/+uaBi/hJXzaX +MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgqSXSuRsmntIYB4B3 +Unvh261XDac/D7atXcTLfvxHXPmhRANCAAQ8fuYUOxRFDhbuU91EIzxV2JF/jYTN +yidXnXIagOlxlI+7gpBd2L75aTFP2nSJ3I4Yjz/RydLVScUhobydF6Xj -----END PRIVATE KEY----- diff --git a/jstests/libs/ecdsa-server-ocsp-mustStaple.pem b/jstests/libs/ecdsa-server-ocsp-mustStaple.pem index e8c538081e05f..da951a1291f0c 100644 --- a/jstests/libs/ecdsa-server-ocsp-mustStaple.pem +++ b/jstests/libs/ecdsa-server-ocsp-mustStaple.pem @@ -1,22 +1,23 @@ -----BEGIN CERTIFICATE----- -MIICyjCCAnCgAwIBAgIEcvbiOTAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER +MIIC7TCCApOgAwIBAgIEOdtbgzAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl -c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjBsMQsw +c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjBsMQsw CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UE -AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEG1jvLcSrSPHYE2k9 -AYUo2AcBnxhQSlWjc6qGdskFPraSzpcAmgJe93mIeqoOkMhp5WTd2VKh1wNnJQ/z -YMftz6OB8TCB7jAJBgNVHRMEAjAAMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAA -ATAdBgNVHQ4EFgQUvihhabxAnZflf5IfG7fSRtq1JsowCwYDVR0PBAQDAgWgMB0G -A1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjBnBggrBgEFBQcBAQRbMFkwLQYI -KwYBBQUHMAGGIWh0dHA6Ly9sb2NhbGhvc3Q6OTAwMS9wb3dlci9sZXZlbDAoBggr -BgEFBQcwAYYcaHR0cDovL2xvY2FsaG9zdDo4MTAwL3N0YXR1czARBggrBgEFBQcB -GAQFMAMCAQUwCgYIKoZIzj0EAwIDSAAwRQIhAPVGh3LTQEQnQTxd6Op1cxoK10GA -Seev3F0madq2Upo0AiAib1IpBJbsUR6h9AAqQKqGUGKlMOvalRmb5fN9xjkl8A== +AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE3TkE9MiEGYhA5i5n +k7uiDcNaXeHhAtI6bM9I3wB81nqAtPwl/fL59GRCeR1el0Wu7W0c6xF+DWKIlxlU +zhwGYaOCARMwggEPMAkGA1UdEwQCMAAwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/ +AAABMB0GA1UdDgQWBBQ56EENV6yAt07fnIptwE4qIgFuMDALBgNVHQ8EBAMCBaAw +HQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMGcGCCsGAQUFBwEBBFswWTAt +BggrBgEFBQcwAYYhaHR0cDovL2xvY2FsaG9zdDo5MDAxL3Bvd2VyL2xldmVsMCgG +CCsGAQUFBzABhhxodHRwOi8vbG9jYWxob3N0OjgxMDAvc3RhdHVzMBEGCCsGAQUF +BwEYBAUwAwIBBTAfBgNVHSMEGDAWgBRgexXgwe8CGDS9DRVEaZUiJqV5bDAKBggq +hkjOPQQDAgNIADBFAiEAxHccgj+Ko4fg9oL9Mo+MpU6zU1/G8HQ2r5PuUPbE8OcC +IAx08piNUhfVVt/zaZlfLX8gIXz8mmbmOBMbmMFjMx8N -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgh0QNI4XmNLg8gocs -fl16geJLuecIxjpvW9MY2JZ3vmWhRANCAAQbWO8txKtI8dgTaT0BhSjYBwGfGFBK -VaNzqoZ2yQU+tpLOlwCaAl73eYh6qg6QyGnlZN3ZUqHXA2clD/Ngx+3P +MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgVRzSZON1ISgXftxt +H8VyOuD6jTdxbnO203MqWdPFid2hRANCAATdOQT0yIQZiEDmLmeTu6INw1pd4eEC +0jpsz0jfAHzWeoC0/CX98vn0ZEJ5HV6XRa7tbRzrEX4NYoiXGVTOHAZh -----END PRIVATE KEY----- diff --git a/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha1 b/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha1 index a83437d1a90cb..9ac8ecfad77ab 100644 --- a/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha1 +++ b/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha1 @@ -1 +1 @@ -8405431F55B0E4B422C0421B7FADC3160C72A494 \ No newline at end of file +52575E280DBCD17BE2BB16F99041B8B9B90FAD21 \ No newline at end of file diff --git a/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha256 b/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha256 index 574e330333d61..ea4da518fb056 100644 --- a/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha256 +++ b/jstests/libs/ecdsa-server-ocsp-mustStaple.pem.digest.sha256 @@ -1 +1 @@ -27893987C39D98357BBC3758EDBB7157E78E000BD373128ED38346E7F176C90F \ No newline at end of file +6EC581765A649E6D2D8E283E1181A7E32327FA9E9C68159E280D6675A4EE6422 \ No newline at end of file diff --git a/jstests/libs/ecdsa-server-ocsp.pem b/jstests/libs/ecdsa-server-ocsp.pem index 2fe18cc70f51e..065a27ec0b1bf 100644 --- a/jstests/libs/ecdsa-server-ocsp.pem +++ b/jstests/libs/ecdsa-server-ocsp.pem @@ -1,22 +1,23 @@ -----BEGIN CERTIFICATE----- -MIICtzCCAl2gAwIBAgIEQVAUIjAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER +MIIC2TCCAn6gAwIBAgIEc3mdQDAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl -c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjBsMQsw +c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjBsMQsw CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UE -AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEJJWlKXhZYhkGm7KD -/4s5zG3FTZqxCkbF7LVTKUyEWU2oVE/X555ANKo7F4sNTh4kJYH9R9BNWm/ckr9F -F4lGRKOB3jCB2zAJBgNVHRMEAjAAMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAA -ATAdBgNVHQ4EFgQUBA+bPOsot328qMyJD1GNNsM5MpEwCwYDVR0PBAQDAgWgMB0G +AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEGj966QCvYqydgxz5 +fX5JJZRb3UwGZiczFwBj0iTrpI6c1ISOCzWrbq3v0UQUlW38gE11/+M7iPfJ8z+K +HLra16OB/zCB/DAJBgNVHRMEAjAAMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAA +ATAdBgNVHQ4EFgQUsbX4B4hzBcFM7ZQj66tB4VZZRG0wCwYDVR0PBAQDAgWgMB0G A1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjBnBggrBgEFBQcBAQRbMFkwLQYI KwYBBQUHMAGGIWh0dHA6Ly9sb2NhbGhvc3Q6OTAwMS9wb3dlci9sZXZlbDAoBggr -BgEFBQcwAYYcaHR0cDovL2xvY2FsaG9zdDo4MTAwL3N0YXR1czAKBggqhkjOPQQD -AgNIADBFAiEA94hM6CSWcr65vB6E2+WaFe0MLzYm+tEWPlLedT4BQekCIDRk8Ww+ -jUPFWz7Pz2lOqmH/FZWxVn1GEbeDHZZljk3P +BgEFBQcwAYYcaHR0cDovL2xvY2FsaG9zdDo4MTAwL3N0YXR1czAfBgNVHSMEGDAW +gBRgexXgwe8CGDS9DRVEaZUiJqV5bDAKBggqhkjOPQQDAgNJADBGAiEA+W11lMP6 +iSS+01h1eAHPQzutZIdZ76rTCnMt0W82YxICIQC/UHjCg/YYEB1b+g+A7GR4TWjq +47Ex/m6/+Vs6918z2Q== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgOEISSH6wYpjSExnT -fpQW8Dan60vNlFYIL1Q2xFPFzluhRANCAAQklaUpeFliGQabsoP/iznMbcVNmrEK -RsXstVMpTIRZTahUT9fnnkA0qjsXiw1OHiQlgf1H0E1ab9ySv0UXiUZE +MIGHAgEAMBMGByqGSM49AgEGCCqGSM49AwEHBG0wawIBAQQgSoduw+gdLkgClffE +IFmoF0pj9aGdT1q8RcI05flCQzahRANCAAQaP3rpAK9irJ2DHPl9fkkllFvdTAZm +JzMXAGPSJOukjpzUhI4LNature/RRBSVbfyATXX/4zuI98nzP4ocutrX -----END PRIVATE KEY----- diff --git a/jstests/libs/ecdsa-server-ocsp.pem.digest.sha1 b/jstests/libs/ecdsa-server-ocsp.pem.digest.sha1 index a2ce1449f8009..f38fb8b9960a5 100644 --- a/jstests/libs/ecdsa-server-ocsp.pem.digest.sha1 +++ b/jstests/libs/ecdsa-server-ocsp.pem.digest.sha1 @@ -1 +1 @@ -2D09B949B8BA53416B40B64C193F1962E5BE3A0D \ No newline at end of file +6254B29BE13CB7F86C8A93C029B89D76DB705525 \ No newline at end of file diff --git a/jstests/libs/ecdsa-server-ocsp.pem.digest.sha256 b/jstests/libs/ecdsa-server-ocsp.pem.digest.sha256 index 048ddee7588e9..9fe4092b9b369 100644 --- a/jstests/libs/ecdsa-server-ocsp.pem.digest.sha256 +++ b/jstests/libs/ecdsa-server-ocsp.pem.digest.sha256 @@ -1 +1 @@ -98BC40243CB5D19B7DEB912AF02598F668DE83B2CD77EFCAA69E3C5A07E26B01 \ No newline at end of file +FD73A26040F9EB153BC74560125F89547F88CD83753FE2904E81F18C49A0D6C7 \ No newline at end of file diff --git a/jstests/libs/ecdsa-server.pem b/jstests/libs/ecdsa-server.pem index 9a5dfadfc7b10..194b98cf5a0cb 100644 --- a/jstests/libs/ecdsa-server.pem +++ b/jstests/libs/ecdsa-server.pem @@ -4,25 +4,26 @@ # Server certificate for ECDSA tree. -----BEGIN CERTIFICATE----- -MIICTTCCAfKgAwIBAgIEVfZqODAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER +MIICcDCCAhWgAwIBAgIERpwkVDAKBggqhkjOPQQDAjB6MQswCQYDVQQGEwJVUzER MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEdMBsGA1UEAwwUS2VybmVsIFRl -c3QgRVNDREEgQ0EwHhcNMjIwMjAzMjE1OTQ4WhcNMjQwNTA3MjE1OTQ4WjBsMQsw +c3QgRVNDREEgQ0EwHhcNMjMwNjE2MTQyODQ4WhcNMjUwOTE3MTQyODQ4WjBsMQsw CQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3Jr IENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UE -AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEkIPr8LJPhlw1aQyN -6ZTN5zvdOVMBhKR66DFcyLsSjZTwdaYgrTdoHuRg4cY/mDfOjykn93QaXXiYELWO -FLGxFKN0MHIwCQYDVR0TBAIwADAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEw -HQYDVR0OBBYEFIQvX1T4/vSJNRIFuL1wFsP3e9SEMAsGA1UdDwQEAwIFoDAdBgNV -HSUEFjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwCgYIKoZIzj0EAwIDSQAwRgIhAKWy -vhMAorPe5oCXXM5bCHIyoq8lis9e8jU3ewBTnZ6+AiEA2hx0vQXogjX1T8zqn/2K -XcOWOCaP5UWWsZU9F3tOtJg= +AwwGc2VydmVyMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEbcTv1kEFjReoKnQr +kt6NUKFKouB/Fq8czR9xGNsEflTiqzC1gUnWXWCSZAGqsdNml4XnviRxHxu+FZsg +MciDD6OBljCBkzAJBgNVHRMEAjAAMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAA +ATAdBgNVHQ4EFgQUyteUn3L8KvqutJkC1FiSPa5DHbYwCwYDVR0PBAQDAgWgMB0G +A1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAfBgNVHSMEGDAWgBTmgV6I/ikY +ioQ9vv2qei0uiY5guTAKBggqhkjOPQQDAgNJADBGAiEAriAXYcaueuIJitGKcuz1 +akx+Cp6KPL62V6hxCytuyqECIQC4SrxeMhrkvrfYE+HwSpSoM4adoeubvWO/Trcq +f/3c/g== -----END CERTIFICATE----- -----BEGIN EC PARAMETERS----- BggqhkjOPQMBBw== -----END EC PARAMETERS----- -----BEGIN EC PRIVATE KEY----- -MHcCAQEEIFpM+Xqp9Ga+tlNOh4qhoqooGCAI4dTgPn7bzOVjy/B3oAoGCCqGSM49 -AwEHoUQDQgAEkIPr8LJPhlw1aQyN6ZTN5zvdOVMBhKR66DFcyLsSjZTwdaYgrTdo -HuRg4cY/mDfOjykn93QaXXiYELWOFLGxFA== +MHcCAQEEIJrVwEvvtjj6vLrCDXK00GtzS3/sDyMNnr90GR7rdv9FoAoGCCqGSM49 +AwEHoUQDQgAEbcTv1kEFjReoKnQrkt6NUKFKouB/Fq8czR9xGNsEflTiqzC1gUnW +XWCSZAGqsdNml4XnviRxHxu+FZsgMciDDw== -----END EC PRIVATE KEY----- diff --git a/jstests/libs/ecdsa-server.pem.digest.sha1 b/jstests/libs/ecdsa-server.pem.digest.sha1 index c67f3477ce1d3..6c2b33a4d78b9 100644 --- a/jstests/libs/ecdsa-server.pem.digest.sha1 +++ b/jstests/libs/ecdsa-server.pem.digest.sha1 @@ -1 +1 @@ -3124F81F8BE74AFE7FB3EEC1CFA17BDF48A97A68 \ No newline at end of file +CD4C1D75223860723FA2C288418E9CB6F28C740D \ No newline at end of file diff --git a/jstests/libs/ecdsa-server.pem.digest.sha256 b/jstests/libs/ecdsa-server.pem.digest.sha256 index ea1854935ae70..0ae974bb709f0 100644 --- a/jstests/libs/ecdsa-server.pem.digest.sha256 +++ b/jstests/libs/ecdsa-server.pem.digest.sha256 @@ -1 +1 @@ -6AE713FACA4950A79F38272F40B72DF973206468DD88B6F528C7C2C236AE03B4 \ No newline at end of file +BBD9CEDC767FDB365178E0B4FC96ED56F91C8CBA2FA5127B93C8DE00AAFDE53D \ No newline at end of file diff --git a/jstests/libs/expired.pem b/jstests/libs/expired.pem index 4106ff57118cc..9349fddc0e1c5 100644 --- a/jstests/libs/expired.pem +++ b/jstests/libs/expired.pem @@ -3,52 +3,52 @@ # # A certificate which has passed its expiration date. -----BEGIN CERTIFICATE----- -MIIDkjCCAnqgAwIBAgIES0ioJDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDkjCCAnqgAwIBAgIENQFz0TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjExMDExMDQxMzA0WhcNMjIwMTIzMDgxMzA0WjBtMQswCQYD +IFRlc3QgQ0EwHhcNMjMwMjIwMjA0MTU5WhcNMjMwNjA1MDA0MTU5WjBtMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEQMA4GA1UEAwwH -ZXhwaXJlZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMOX7aXgIpMi -1lLqNmsqmjg6N+hg0Mf63N7ZCPqwOSaGUo4lqfgR27RdQR9SI5JIDpFu0XUWpDQi -+w+eH7i45VQJ3DE62bB1dW1WGnZOWOTH6uEfEX/kGjJBYjUInliERyvqINiyeAA+ -deqKIjzlny8APqzSgr2vnffKRxLzjN2JV4CLWPmXzQ0p8E6DmXgNHO3gcEviwqDg -OP6PEdWAAbPQlutk4P34LYdnvDoQMtaTd8s7wQYxoU7nXy+iRh1ZOQB6Dj1nMCdX -9sblA0cO1zVNRAJmlQBVlVmB0pfntRvs5OWywoXN7uVy9WI3dC4qU8k2APsnnWur -Y1PYAGalCjUCAwEAAaMzMDEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwGgYDVR0RBBMw -EYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQDetGkeZ2NsmsRm -y64CYd9bq1QRi72Px0lnL/uVwX6v/LQBX/2TnkFIXJ/ICW0FyjZppsd4inLoXEH3 -guOfTrWlkD8OPNZmdNzMZf7i1JBtzKQVSyoX3J8zllPiYdfmWwUuqd2GnBwebzo7 -Sz5zVPhb0eQR+kdWT0tA3yzs9Ox+6MyDeWsHN4aBEDfezi170t2Ax/HVU86HA58I -V8wPeXtRtN97bQYcOlGYElRA0VKygyAC2nVkxl1PjSKU7SEYTeHVp0qYCy2NhSA6 -n1DI3fz+ZccmAfKAxqy9ggvHmgnhRFoAtFeMQmAWB74/do2ZHCx3LE/EAv7sDXTk -2SqCkSng +ZXhwaXJlZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOpiZhK3zhWC +Rmo714SuRmGvB+vsGtkc8UIBwKoA7PrcvveyQtBobG1jgKZAwLVGb7MAfzgvZB8d +QWJpoHfjHMCKxW8PEvOi9RgpiMfAfMDVGLCSliGQ4TlP3PQCQHbVi2rjlYyBIL5D +/JubBM6XpZrXQSXFNTzrOyWuMrqtgAICfoSTZGQXj3v5sqcVjb1+pN0IeuHFiTP6 +UPZ5JLGy5VxU2I3XZ2yxh/iBJWNSiL/38TGJRb5wXEQ7hxrU8guYQvtIqmMxCOuJ +6qOMrPwsgQxwLfyICC6V/hCQJqo9aJAz+F6vrQ0DHe3stx9gVe9NvRXPQg9jnFQ+ +fJbYYSkiymMCAwEAAaMzMDEwEwYDVR0lBAwwCgYIKwYBBQUHAwEwGgYDVR0RBBMw +EYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAUrnrVrJn7Usuc +IKdK7FFJ26PAkxeRUPZIluwztdW/mQYMFctnaGgxkUr+iRQAfk8mwUikpsp2PpUL +oj+MvZwxaCzgRILdP5soMW+hfr9tpQRXHsMKStXxPLp9xaWTcRuVpuKO/cM1kBG0 +XiAupsf4chXrsWy2lJhUMTzbMcB2hhvnW9d5J8ducXb34WHB9E0zsvtVQ4t4zMoB +ulNzuUUu+A1Nh6Xb/SQxAmPRrwRV4DCE37Gp05EIPHiaatG5S8/oQoIqIXsweyQC +oJo2mD4a+cNsKRqzYldtJEjrkMVDZ/tH4XCUaFFlCvXTh16Y5zvWj5eynkfCnOVe +T7xIog+S -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDDl+2l4CKTItZS -6jZrKpo4OjfoYNDH+tze2Qj6sDkmhlKOJan4Edu0XUEfUiOSSA6RbtF1FqQ0IvsP -nh+4uOVUCdwxOtmwdXVtVhp2Tljkx+rhHxF/5BoyQWI1CJ5YhEcr6iDYsngAPnXq -iiI85Z8vAD6s0oK9r533ykcS84zdiVeAi1j5l80NKfBOg5l4DRzt4HBL4sKg4Dj+ -jxHVgAGz0JbrZOD9+C2HZ7w6EDLWk3fLO8EGMaFO518vokYdWTkAeg49ZzAnV/bG -5QNHDtc1TUQCZpUAVZVZgdKX57Ub7OTlssKFze7lcvViN3QuKlPJNgD7J51rq2NT -2ABmpQo1AgMBAAECggEAa1SkoUCkWG9mgutpX1dqd25jHY57IzIjH9qjVcV8JwrQ -pJZAdhN+p7QAt2pAgNYvwFi6cfExvDZx5LkVmS1FEt+ySAWOTYm3HX+BV+hYWXrn -BhAhbzfBtFAyzv1ivxofYBbDXYZZ2XBtDn4smZQ7zPPqcLW37tU+7ym95MVnSG1O -c383X+hnsLyMo10lQdnD+0GLGVoVtOEQkX3vDM4ckejwsn3GUsnsasXl4pwESG/l -e/lSqaAE5zv0jJ4Vd+pvmFA/QxnnbRzWFr5YAfZTq84VG7Ila9pAxQKsx+5hVzKH -eMS+BrjkG3ZjJihRAP1BYl8+uZLoa9fAEb/K5aGkBQKBgQDshRWLe1OVr4GrwGmA -M828l+ASvQgZr8iZfFC5sbB8CeDz5/6IFLXSLc3oAcUAdJzEq7jMLaqwvyNy4aeT -vOYS0K94njk3l+mGtY+H1NYdzK9D306zSjIeUqA9efUCEm/4v323IpMVCF4s5MMU -gLVQZ8hNIJKjjx+Nm1AMTWl0AwKBgQDTs+3w+kraf3JfG4EBYI6w5vNCBlJHDni+ -ZEOmmd5cgxR/sEZJiAbfsNs4pnHqKboQxFU+lgH2tcktkcN4rpFvN2bt865l2XCX -3NqR8gJbN3kroAi7m0ah9p6ZfZGYT3S3nsjjeKZaz7dkZNtrP/zxrC6IbsamAEeO -NnFa4rkfZwKBgQCN3UFP+TfoR23io7VkBS542Su4cZODPLF4hl0xFEhEDfTUtykv -XCS2nWyspSsRm+BQAqQEK7v+6ZaMmJbYAWyKiotFMLonA0I533rowRtwok3Zyv48 -gdtP3sVoOldf18k7jTgNeXp5GhzKlqgACzc1tBorUMdDvVvypt67kk8XGwKBgGw8 -qyOwpEKwNGaB5mQw3ON/QNH572ka8grNsnTkaHa+IeXi1xzTKkiF4J9HIijJqrLV -3ouAIILlkHT2+IykHAFTvxFbrEewc0uBhFJn/GJsq0vtp2lbyzIVqCLru1u3DAKx -cR4z2kHFv4rZsgFUltts3+GrWW8X533DQhNopaNzAoGAbekygXWWCXX6WeGhvshe -JTWf96Blmy7ZLDlOy7D7Ns3536ALcl/tPtL2wOqmz/9rBTHKX/v7k+yaaysI3vn4 -+79/ig8mSqEA1nHgA3JIILvV3C3r8RKoGsrw/6LZDynANrcHiiR+aHlrdaHsh2o1 -0FQr/GIVnWR/Z6gaB39T4xM= +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDqYmYSt84VgkZq +O9eErkZhrwfr7BrZHPFCAcCqAOz63L73skLQaGxtY4CmQMC1Rm+zAH84L2QfHUFi +aaB34xzAisVvDxLzovUYKYjHwHzA1RiwkpYhkOE5T9z0AkB21Ytq45WMgSC+Q/yb +mwTOl6Wa10ElxTU86zslrjK6rYACAn6Ek2RkF497+bKnFY29fqTdCHrhxYkz+lD2 +eSSxsuVcVNiN12dssYf4gSVjUoi/9/ExiUW+cFxEO4ca1PILmEL7SKpjMQjrieqj +jKz8LIEMcC38iAgulf4QkCaqPWiQM/her60NAx3t7LcfYFXvTb0Vz0IPY5xUPnyW +2GEpIspjAgMBAAECggEAHXON1e5ItAUxqempFB/98gpXWXrKiNuNKTklJ3eET6Vr +TZ6VrLBiZ4BoSvu5+01Ffd2jHsE9uHcpnx8crdPY6gzF8EWZoQmp+IZHjKoQQv7D +4nQY71DQLC5v2i1qQkRlB2JfiU80eRP76uWYvgjqI0HHCuLz5Jq+TObNsw1YF2hR +RCePiXwuIt/avNnZgb9WOdqSwd24jF6TCmkSMGKPUuJ0cjWHkLa1sLupxePiEC2h +XmGtLC5k4lHUhom6CVmIV/Hpu7mooWDWTLRxSOdyAKI73s2zQb+c7VoLU9ShN6HR +N+RqFzXUDL3SfZdbEokU1mr0WJyvY+E6obgUlqFGCQKBgQD1SDcvQd6vHmxn5+2E +DXdfISHc/KKszkv0I9QHGktv8qAS5bGJadLWVmBpWmhxjN3M6hfiGTmt61c7NFNm +haiXGDSlfISZVuch29Ze5AcgUGE1tXOaWHcHp+hj5uWm7glFMjW3ZpmjXoe20oMi +j6uFFZAzz1SjGK1HP+nqbvYqhQKBgQD0oEdTqNY/7LUAfUvWt74oMhFveAGtq786 +4GHDTRVWGDwFRVrQqtrDrlWBE7ZVH2cQid/C5lqdicTnUrYgRXHHgiQ5sp8+dlfA +Lu2rx3vtLrMHyfAGRbITUhZQxYUsONfzkEbKi1aB+OBIyCjAis744RKb3TuFWsJX +Oc/lV9nZxwKBgQDuRp4dUV7zPZnQArVwb+iDyFruI5ogRzPv0pJZ6Ahakxc/5FTo +iZ3gy/5Suhn2HQMm2k4jPaATvSh7giR21ublCQWzsVfa4locQURp4BiwIoWcuE26 +JbBUBqAx1I9J77mft9wI8ynTU7SBPNVZULotybnRKtssqX9DVOp27B5FRQKBgG1k +e2ELAHxnou2MGmzzkAKzmR5q4P1D72kpzC4k+2Sbw8Nrp1fm/5tAC5aENSZNu64m +qbyjfGQ0pqv3TeCSjXX1WbO/j/zDuSwFEF2gAGgpWQAjqsVzR1XigH4wRgRnixKt +MysCMnY+0DVb5PVtXW7oX4T54tMYqg2Nmc1qgs4jAoGBALizxO2B+2riBZ/7iuOL +WOdrCEPZFq5+q6lJVULu5SeSsbaTbng9jdYS+XYgRTj0uFz2GZtNFF9SzKBpnQeE +GiYokGidq0PduPEnq9d1KzF9gKCBprxa5jxogkSqTzbcecMwuXE904rdr+JXwStL +d8GuJjHcCJoZW9lGvVU/5YU+ -----END PRIVATE KEY----- diff --git a/jstests/libs/expired.pem.digest.sha1 b/jstests/libs/expired.pem.digest.sha1 index 4b8f78e141f25..fcebeaa05ee9f 100644 --- a/jstests/libs/expired.pem.digest.sha1 +++ b/jstests/libs/expired.pem.digest.sha1 @@ -1 +1 @@ -5325BDD0B9A3417A1C07B947C83168A1C189D1E4 \ No newline at end of file +8650F73BE93E97609540AD60B055C78721CDA0D4 \ No newline at end of file diff --git a/jstests/libs/expired.pem.digest.sha256 b/jstests/libs/expired.pem.digest.sha256 index e38411a855f89..838b2a6583de5 100644 --- a/jstests/libs/expired.pem.digest.sha256 +++ b/jstests/libs/expired.pem.digest.sha256 @@ -1 +1 @@ -7B1A1353749D953FB0A2A2FA036B2C44BAFB3AE1EFCFA9D3A9DB50ADDCA337C7 \ No newline at end of file +D815E7BA03F1220118893FFAEFD21C32C487EACB660E21AE9D570B0F9D025FA8 \ No newline at end of file diff --git a/jstests/libs/fail_point_util.js b/jstests/libs/fail_point_util.js index 1c3a698325b22..008c7c40b4620 100644 --- a/jstests/libs/fail_point_util.js +++ b/jstests/libs/fail_point_util.js @@ -3,6 +3,7 @@ */ var configureFailPoint; +var configureFailPointForRS; var kDefaultWaitForFailPointTimeout; (function() { @@ -15,8 +16,10 @@ if (configureFailPoint) { kDefaultWaitForFailPointTimeout = 5 * 60 * 1000; configureFailPoint = function(conn, failPointName, data = {}, failPointMode = "alwaysOn") { - const res = assert.commandWorked( - conn.adminCommand({configureFailPoint: failPointName, mode: failPointMode, data: data})); + const res = sh.assertRetryableCommandWorkedOrFailedWithCodes(() => { + return conn.adminCommand( + {configureFailPoint: failPointName, mode: failPointMode, data: data}); + }, "Timed out enabling fail point " + failPointName); return { conn: conn, @@ -26,11 +29,13 @@ configureFailPoint = function(conn, failPointName, data = {}, failPointMode = "a // Can only be called once because this function does not keep track of the // number of times the fail point is entered between the time it returns // and the next time it gets called. - assert.commandWorked(conn.adminCommand({ - waitForFailPoint: failPointName, - timesEntered: this.timesEntered + timesEntered, - maxTimeMS: maxTimeMS - })); + sh.assertRetryableCommandWorkedOrFailedWithCodes(() => { + return conn.adminCommand({ + waitForFailPoint: failPointName, + timesEntered: this.timesEntered + timesEntered, + maxTimeMS: maxTimeMS + }); + }, "Timed out waiting for failpoint " + failPointName); }, waitWithTimeout: function(timeoutMS) { // This function has three possible outcomes: @@ -38,17 +43,40 @@ configureFailPoint = function(conn, failPointName, data = {}, failPointMode = "a // 1) Returns true when the failpoint was hit. // 2) Returns false when the command returned a `MaxTimeMSExpired` response. // 3) Otherwise, this throws for an unexpected error. - let res = assert.commandWorkedOrFailedWithCode(conn.adminCommand({ - waitForFailPoint: failPointName, - timesEntered: this.timesEntered + 1, - maxTimeMS: timeoutMS - }), - ErrorCodes.MaxTimeMSExpired); - return res["ok"] === 1; + let res = sh.assertRetryableCommandWorkedOrFailedWithCodes(() => { + return conn.adminCommand({ + waitForFailPoint: failPointName, + timesEntered: this.timesEntered + 1, + maxTimeMS: timeoutMS + }); + }, "Timed out waiting for failpoint " + failPointName, [ErrorCodes.MaxTimeMSExpired]); + return res !== undefined && res["ok"] === 1; }, off: function() { - assert.commandWorked( - conn.adminCommand({configureFailPoint: failPointName, mode: "off"})); + sh.assertRetryableCommandWorkedOrFailedWithCodes(() => { + return conn.adminCommand({configureFailPoint: failPointName, mode: "off"}); + }, "Timed out disabling fail point " + failPointName); + } + }; +}; + +configureFailPointForRS = function(conns, failPointName, data = {}, failPointMode = "alwaysOn") { + conns.forEach((conn) => { + sh.assertRetryableCommandWorkedOrFailedWithCodes(() => { + return conn.adminCommand( + {configureFailPoint: failPointName, mode: failPointMode, data: data}); + }, "Timed out setting failpoint " + failPointName); + }); + + return { + conns: conns, + failPointName: failPointName, + off: function() { + conns.forEach((conn) => { + sh.assertRetryableCommandWorkedOrFailedWithCodes(() => { + return conn.adminCommand({configureFailPoint: failPointName, mode: "off"}); + }, "Timed out disabling fail point " + failPointName); + }); } }; }; diff --git a/jstests/libs/feature_flag_util.js b/jstests/libs/feature_flag_util.js index 12403b8fed8c5..4e75a65298aa9 100644 --- a/jstests/libs/feature_flag_util.js +++ b/jstests/libs/feature_flag_util.js @@ -1,11 +1,9 @@ -"use strict"; - load("jstests/libs/fixture_helpers.js"); /** * Utilities for feature flags. */ -var FeatureFlagUtil = (function() { +export var FeatureFlagUtil = (function() { // A JS attempt at an enum. const FlagStatus = { kEnabled: 'kEnabled', @@ -51,7 +49,7 @@ var FeatureFlagUtil = (function() { } else { // Some db-like objects (e.g ShardedClusterFixture) have a getSiblingDB method // instead of getDB, use that here to avoid an undefined error. - setConn(db.getSiblingDB(db.defaultDB)); + setConn(db.getSiblingDB(db.getMongo().defaultDB)); } } diff --git a/jstests/libs/fsm_serial_client.js b/jstests/libs/fsm_serial_client.js index 6d6c0a18f4e49..97fc766b5fce8 100644 --- a/jstests/libs/fsm_serial_client.js +++ b/jstests/libs/fsm_serial_client.js @@ -15,13 +15,13 @@ var denylist = workloadDenylist.map(function(file) { return workloadDir + '/' + file; }); -runWorkloadsSerially(workloadList.filter(function(file) { +await runWorkloadsSerially(workloadList.filter(function(file) { return !Array.contains(denylist, file); }), - {}, - {dbNamePrefix: dbNamePrefix}, - { - keepExistingDatabases: true, - dropDatabaseDenylist: fsmDbDenylist, - validateCollections: validateCollectionsOnCleanup - }); + {}, + {dbNamePrefix: dbNamePrefix}, + { + keepExistingDatabases: true, + dropDatabaseDenylist: fsmDbDenylist, + validateCollections: validateCollectionsOnCleanup + }); diff --git a/jstests/libs/ftdc.js b/jstests/libs/ftdc.js index 105769c5c93c0..b0326c1f14b7b 100644 --- a/jstests/libs/ftdc.js +++ b/jstests/libs/ftdc.js @@ -19,7 +19,7 @@ function setParameter(adminDb, obj) { /** * Verify that getDiagnosticData is working correctly. */ -function verifyGetDiagnosticData(adminDb) { +function verifyGetDiagnosticData(adminDb, logData = true) { // We need to retry a few times if run this test immediately after mongod is started as FTDC may // not have run yet. var foundGoodDocument = false; @@ -42,8 +42,9 @@ function verifyGetDiagnosticData(adminDb) { assert(data.hasOwnProperty("end"), "does not have 'end' in '" + tojson(data) + "'"); foundGoodDocument = true; - - jsTestLog("Got good getDiagnosticData: " + tojson(result)); + if (logData) { + jsTestLog("Got good getDiagnosticData: " + tojson(result)); + } } } diff --git a/jstests/libs/geo_near_random.js b/jstests/libs/geo_near_random.js index c45cec7e813fb..e6f8d24ef30b8 100644 --- a/jstests/libs/geo_near_random.js +++ b/jstests/libs/geo_near_random.js @@ -1,6 +1,6 @@ GeoNearRandomTest = function(name, dbToUse) { this.name = name; - this.db = (dbToUse || db); + this.db = (dbToUse || globalThis.db); this.t = this.db[name]; this.reset(); print("Starting getNear test: " + name); diff --git a/jstests/libs/golden_test.js b/jstests/libs/golden_test.js index f3e218f17954f..b9ef72a0e0c8b 100644 --- a/jstests/libs/golden_test.js +++ b/jstests/libs/golden_test.js @@ -1,5 +1,4 @@ - -function tojsonOnelineSortKeys(x) { +export function tojsonOnelineSortKeys(x) { let indent = " "; let nolint = true; let depth = undefined; @@ -11,23 +10,10 @@ function tojsonOnelineSortKeys(x) { // - Discards the field ordering, by recursively sorting the fields of each object. // - Discards the result-set ordering by sorting the array of normalized documents. // Returns a string. -function normalize(result) { +export function normalize(result) { return result.map(d => tojsonOnelineSortKeys(d)).sort().join('\n') + '\n'; } -// Override print to output to both stdout and the golden file. -// This affects everything that uses print: printjson, jsTestLog, etc. -print = (() => { - const original = print; - return function print(...args) { - // Imitate GlobalInfo::Functions::print::call. - const str = args.map(a => a == null ? '[unknown type]' : a).join(' '); - _writeGoldenData(str); - - return original(...args); - }; -})(); - // Takes an array or cursor, and prints a normalized version of it. // // Normalizing means ignoring: @@ -35,7 +21,7 @@ print = (() => { // - order of documents in the array/cursor. // // If running the query fails, this catches and prints the exception. -function show(cursorOrArray) { +export function show(cursorOrArray) { if (!Array.isArray(cursorOrArray)) { try { cursorOrArray = cursorOrArray.toArray(); @@ -52,6 +38,6 @@ function show(cursorOrArray) { // This function should be called from the suite definition, so that individual tests don't need // to remember to call it. This function should not be called from any libs/*.js file, because // it's surprising if load() has side effects (besides defining JS functions / values). -function beginGoldenTest() { +export function beginGoldenTest() { _openGoldenData(jsTestName()); } diff --git a/jstests/libs/intermediate-ca-chain.pem b/jstests/libs/intermediate-ca-chain.pem index 4d94be279ee59..c5a549e7e80e4 100644 --- a/jstests/libs/intermediate-ca-chain.pem +++ b/jstests/libs/intermediate-ca-chain.pem @@ -5,45 +5,45 @@ # Certificate from ca.pem -----BEGIN CERTIFICATE----- -MIIDeTCCAmGgAwIBAgIEe9SskzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDeTCCAmGgAwIBAgIESt5aGjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQyWhcNMjQwNDMwMjE1OTQyWjB0MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM1WhcNMjUwOTEwMTQyODM1WjB0MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO -S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDf -vZIt82obTHnc3iHgUYSc+yVkCHyERF3kdcTTFszDbN9mVPL5ZkH9lIAC3A2rj24T -pItMW1N+zOaLHU5tJB9VnCnKSFz5CHd/KEcLA3Ql2K70z7n1FvINnBmqAQdgPcPu -Et2rFgGg3atR3T3bV7ZRlla0CcoAFl/YoDI16oHRXboxAtoAzaIwvS6HUrOYQPYq -BLGt00Wws4bpILk3b04lDLEHmzDe6N3/v3FgBurPzR2tL97/sJGePE94I833hYG4 -vBdU0Kdt9FbTDEFOgrfRCisHyZY6Vw6rIiWBSLUBCjtm2vipgoD0H3DvyZLbMQRr -qmctCX4KQtOZ8dV3JQkNAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI -hvcNAQELBQADggEBAJnz4lK9GiCWhCXIPzghYRRheYWL8nhkZ+3+oC1B3/mGEf71 -2VOdND6fMPdHinD8jONH75mOpa7TanriVYX3KbrQ4WABFNJMX9uz09F+0A2D5tyc -iDkldnei+fiX4eSx80oCPgvaxdJWauiTsEi+fo2Do47PYkch9+BDXT9F/m3S3RRW -cia7URBAV8Itq6jj2BHcpS/dEqZcmN9kGWujVagcCorc0wBKSmkO/PZIjISid+TO -Db2g+AvqSBDU0lbdP7NXRSIxvZejDz4qMjcpSbhW9OS2BCYZcq5wgH2lwYkdPtmX -JkhxWKwsW11WJWDcmaXcffO3a6lDizxyjnTedoU= +S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCb +k/WPIqqv46Nv9FeodWiPtx4/v3XZJwGxTk3JEje2CLjeVjU0q6OZoofP1wgSIZSh +iO2o9iDC5O1Aedop0i+wqe9dMcn34O1K5aM4ff8c4orfBe0xqyvE3cJx4BeSTZ4n +NY00x9PkCcoq98SoU7S9vkJq+AxUzUII34GQ4xCeaM7+g43PpGo5KFDwrzI/VUJX +qaeRNXS0/j8Wwp7Gv8L1a+ZGlxrgpXTJLGamhtkWyVEWSpgcc5suA0qSwvkAE1KX +5aJoBUDL22fLRhs91xNFDUYTAvkG8X4gM0f8lBL24+nbOBkOLdpqSZZ+dk59JKHD +TFGBx0p17I1g0xjWNjMVAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBAIwWNyaQhZglJyKMIAUAwlvBL5REA99bua06xWfJwdmdlci9 +Bb6MgQzVk5K68rsNlcL0ma+Ri5FfU+j7gsYZh4pILYb9xqFxiKX7bxMZv99LR8Mi +0EImM7gz3S579qYBXWd4V6/1G864qln8neHv+X3MF/wk3O9IYqepWsC3xDRos1Zv +xQfb37Ol4pcHtue4wHXr5TV8+KPcUusfNcftnpsEHyEUHqPORdHB7xRpfhosRYvL +7WwMXNseuyHFcdA/rEhUVsca+SUeOMIW+8euuU/as3ZaEpv1ZmpHEYXHb2SlS6W+ +gTzUOtNXsKVDrm9uEcUHytp+xvp9l9NNM/IRGGA= -----END CERTIFICATE----- # Certificate from intermediate-ca.pem -----BEGIN CERTIFICATE----- -MIIDdzCCAl+gAwIBAgIERt21mjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDdzCCAl+gAwIBAgIEe6nR6DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB1MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ2WhcNMjUwOTEwMTQyODQ2WjB1MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEYMBYGA1UEAwwP SW50ZXJtZWRpYXRlIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA -uLEuQhQ7IQImAIKUMANKaR+4/WLf0F03p5RDlv/TCNdEXTLws03x7bppdv4Y8r6+ -oChy+8rtyA2ckblb0z0OeMlsJY5a04eUhrZYeG4OXn1QuvUqfXl++oBlHnWUD3xG -3v9oPKMxGf9nr6JJXBCeG3owLR9Lbr3QS6Pvz9WwNZGpUVDm/QQcKvbGHmB9fE7/ -RM6IgxtagZlug5WUCTT08tsLfb89UQchCAjO9eZvDcENofXcnsJWImJdTYDlquMM -DB54R9cqoLtDV9NiPVYsjCQ1BgXYMxeG0K/T1rWQY4uB132Y3oFy3RxaDT1BqdAO -O6BOo1AZNyIYIPbt0+Rp3QIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3 -DQEBCwUAA4IBAQCSOAjf6Qh0JnAOK4TqPYt2VG8HEmumxKk/HvIKrk9gbBc6RoVi -Z1M/gxRz82lnmJvIPbsJk4AmTsw8Pnech8Ujeahw0ybRJbLs7FA0YrHg1B7517Tr -eZl6TZn7o5+3HKKcCPpoh+P1XrNWP21NsY75T8sTN0BQ3im3aZDApgO4v0YFqWl0 -20YOdrLk81cJ8Znjdh+/ieR4uPH06CbXjAGPAbB+mnEWMNLlV2WGsJtDCHYM+wU0 -zd0wy2KvqMBbr014v/c4jmyeCBcmgxQ9Q8ATWbys7S5p0CFB6+zeV/2Mb32lwSCM -+Xeg/ms5ZGQJY5fIznwIg+Osg1zGvMF2Rsq9 +12Tv7dcfDmz2/A2bquC4GIPqMHHf1l1cRK8mOydwJRFmzbc4MEFgCmvhURLAE6ie +B4ghfCKpZqD2kO/GtDBK7isMxur14NbKKKFXnwPreSBknSTccJ+8iIvxK+wni+w0 +Ox/Avr4byocV0O6WJ6JEvvcyNbBk+IWsTfNbLZ32/A6WtraE5q2vIZpN2bNEtJe9 +JVu56wI95zcAZmnz3S1RtLVvT8XqmHnCUTpN+5oJWRBTr9pScQNjicpKo+GST03Y +j5KaI8B4cdNecldgqbmebcL0m3RGfBKgv8AEqsjdqg3hvD0rXUpeiGKoXMjyzmlv +OCsQtgP6azneIZRt6MQMsQIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBWWBPrsmerwScxU3y1IVGIOoI3hEBCS9t+BzYHpxuvaSHjyYiZ +e1MLgxt4FAbHu6WMB5T1rkJgrGUluCdctxXMg7Ak6d+hVbuBAzAV44rEw/yVGLGV +7FvMOxYh9e+HFTq1iI8kSmgDCKsTww6kfE4fs+FI3fCXwhfy3zLlAlBYoqV67bVF ++Yd1E75kBNcAuyY6Zic1N1BI6f23npvY3plQp2qWjhdGEUb76CZSXrEZ3P9q817O +D27YiPP6uhy5ypVnna2jmTnJ5M2EZ01Sv0w94pz5jUXSi49FRATMc73wYl8bSvw+ +swyDhMJMHUeTPr1deiB8SVdzVsOZCd5LQeuz -----END CERTIFICATE----- diff --git a/jstests/libs/intermediate-ca-chain.pem.digest.sha1 b/jstests/libs/intermediate-ca-chain.pem.digest.sha1 index dbe9e3898afc7..e1ec750dc4655 100644 --- a/jstests/libs/intermediate-ca-chain.pem.digest.sha1 +++ b/jstests/libs/intermediate-ca-chain.pem.digest.sha1 @@ -1 +1 @@ -F42B9419C2EF9D431D7C0E5061A82902D385203A \ No newline at end of file +D33E7C8B0748C66DBEEE6E24410FA72A47607DF3 \ No newline at end of file diff --git a/jstests/libs/intermediate-ca-chain.pem.digest.sha256 b/jstests/libs/intermediate-ca-chain.pem.digest.sha256 index 2cffe1b5da960..4ac5afdd90414 100644 --- a/jstests/libs/intermediate-ca-chain.pem.digest.sha256 +++ b/jstests/libs/intermediate-ca-chain.pem.digest.sha256 @@ -1 +1 @@ -21A1C6A87B31AF590F5074EE716F193522B8F540081A5D571B25AE5DF72863E3 \ No newline at end of file +6568E01751761F5EC6A07B050857C77DD2D2604CD05A70A62F7DDA14829C1077 \ No newline at end of file diff --git a/jstests/libs/intermediate-ca.pem b/jstests/libs/intermediate-ca.pem index 97dcda1bda1ed..5edc2e61d476a 100644 --- a/jstests/libs/intermediate-ca.pem +++ b/jstests/libs/intermediate-ca.pem @@ -3,51 +3,51 @@ # # CA issues by the primary root CA, which then issues its own server cert. -----BEGIN CERTIFICATE----- -MIIDdzCCAl+gAwIBAgIERt21mjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDdzCCAl+gAwIBAgIEe6nR6DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB1MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ2WhcNMjUwOTEwMTQyODQ2WjB1MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEYMBYGA1UEAwwP SW50ZXJtZWRpYXRlIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA -uLEuQhQ7IQImAIKUMANKaR+4/WLf0F03p5RDlv/TCNdEXTLws03x7bppdv4Y8r6+ -oChy+8rtyA2ckblb0z0OeMlsJY5a04eUhrZYeG4OXn1QuvUqfXl++oBlHnWUD3xG -3v9oPKMxGf9nr6JJXBCeG3owLR9Lbr3QS6Pvz9WwNZGpUVDm/QQcKvbGHmB9fE7/ -RM6IgxtagZlug5WUCTT08tsLfb89UQchCAjO9eZvDcENofXcnsJWImJdTYDlquMM -DB54R9cqoLtDV9NiPVYsjCQ1BgXYMxeG0K/T1rWQY4uB132Y3oFy3RxaDT1BqdAO -O6BOo1AZNyIYIPbt0+Rp3QIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3 -DQEBCwUAA4IBAQCSOAjf6Qh0JnAOK4TqPYt2VG8HEmumxKk/HvIKrk9gbBc6RoVi -Z1M/gxRz82lnmJvIPbsJk4AmTsw8Pnech8Ujeahw0ybRJbLs7FA0YrHg1B7517Tr -eZl6TZn7o5+3HKKcCPpoh+P1XrNWP21NsY75T8sTN0BQ3im3aZDApgO4v0YFqWl0 -20YOdrLk81cJ8Znjdh+/ieR4uPH06CbXjAGPAbB+mnEWMNLlV2WGsJtDCHYM+wU0 -zd0wy2KvqMBbr014v/c4jmyeCBcmgxQ9Q8ATWbys7S5p0CFB6+zeV/2Mb32lwSCM -+Xeg/ms5ZGQJY5fIznwIg+Osg1zGvMF2Rsq9 +12Tv7dcfDmz2/A2bquC4GIPqMHHf1l1cRK8mOydwJRFmzbc4MEFgCmvhURLAE6ie +B4ghfCKpZqD2kO/GtDBK7isMxur14NbKKKFXnwPreSBknSTccJ+8iIvxK+wni+w0 +Ox/Avr4byocV0O6WJ6JEvvcyNbBk+IWsTfNbLZ32/A6WtraE5q2vIZpN2bNEtJe9 +JVu56wI95zcAZmnz3S1RtLVvT8XqmHnCUTpN+5oJWRBTr9pScQNjicpKo+GST03Y +j5KaI8B4cdNecldgqbmebcL0m3RGfBKgv8AEqsjdqg3hvD0rXUpeiGKoXMjyzmlv +OCsQtgP6azneIZRt6MQMsQIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBWWBPrsmerwScxU3y1IVGIOoI3hEBCS9t+BzYHpxuvaSHjyYiZ +e1MLgxt4FAbHu6WMB5T1rkJgrGUluCdctxXMg7Ak6d+hVbuBAzAV44rEw/yVGLGV +7FvMOxYh9e+HFTq1iI8kSmgDCKsTww6kfE4fs+FI3fCXwhfy3zLlAlBYoqV67bVF ++Yd1E75kBNcAuyY6Zic1N1BI6f23npvY3plQp2qWjhdGEUb76CZSXrEZ3P9q817O +D27YiPP6uhy5ypVnna2jmTnJ5M2EZ01Sv0w94pz5jUXSi49FRATMc73wYl8bSvw+ +swyDhMJMHUeTPr1deiB8SVdzVsOZCd5LQeuz -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC4sS5CFDshAiYA -gpQwA0ppH7j9Yt/QXTenlEOW/9MI10RdMvCzTfHtuml2/hjyvr6gKHL7yu3IDZyR -uVvTPQ54yWwljlrTh5SGtlh4bg5efVC69Sp9eX76gGUedZQPfEbe/2g8ozEZ/2ev -oklcEJ4bejAtH0tuvdBLo+/P1bA1kalRUOb9BBwq9sYeYH18Tv9EzoiDG1qBmW6D -lZQJNPTy2wt9vz1RByEICM715m8NwQ2h9dyewlYiYl1NgOWq4wwMHnhH1yqgu0NX -02I9ViyMJDUGBdgzF4bQr9PWtZBji4HXfZjegXLdHFoNPUGp0A47oE6jUBk3Ihgg -9u3T5GndAgMBAAECggEBALPMwaTQvzOGPOq4NH19Zp5qpJQBArR9W2YIH7jLum3b -65Dzu4JDOmfd1zhhbHY9HaUbW70mtE/SeH8hPXSq9wC9zkvNhzxwvDZdJEmxksmI -I+SuMjxO4EAMaOS7QHXG3dPQP3DwyiduDkncqqA3CUTksivrUGsx3KsWoKrrdAzg -5tMfgcw+nRuOonJbIiUvTZT9CkpfTmOaNaAAS+60EDjBWGEHS1X6OUIcInQqlQ+o -zbtSsIT5Xna7f/XYHzwtEc0oQdlc1+nnfMsBKZ2xFsTZqzWldnMPIBekhP3293hc -EW9fLIUDKYtwyQ9zVk3AxFGtjRz3LknDGQywh9NkdJ0CgYEA3KceK5ZeRTlLsmhz -fGZXfoSWIdYluhYi2+0rQ5I175BJoKxY5UQHt56bRtORkpVucbslj5w9uUffAzjV -L38W/CqFBE1AMTd6fwxdU0MkWWMBTTNVJCoMCV2q95SN8gvJk18JK6e4VuteVE9U -eF6uLWFGYzki/gM4Y31e9Mkzmk8CgYEA1kdUVTRGBjfkQ0ZK2JUk+zTaPDOwIaqA -M56Srhqe93drKWnBO6xnMpFmCMHARCnY45Jr7ONn3ZYg7E1RjQrAzqAcj4i4jbaZ -knY7/dEJLC+M8GNs88R1Et/1kwZXR+7NESUDTaWo29CCdpZ4Fr/9zUc+Cq+YSViz -pnnRQsmRKhMCgYEAn7PtaErbTGsd2LE09RL1vMKmDzN59ufSry9How6OLyhVwg88 -ACRvGX2YkXjL4jZ5y6NSmlDRc+sLBL/7vPbVYgo8YFKxZW3sIUyHt53fDztTU9cV -hqlQMO80sSE6Y7gYW6vrbUdlarPMgGUylDSjCIFheqt+Ii+efpPdSHrf+I0CgYB9 -Crrnzc1fK5aEu6xJnx8piLohj3gk7T5K4SpvdxF1D7b1P8WHcR/3nmmQiqAMJu9u -Pa/FWR7vbABiZOxGBhRlf4GrMPmhmQEdOunfP/C+XDE+xoZ56hb3oMHJvlWIoI4C -hzraQOW9AHxhf4UsoXUWKZDG45lWk/CiIUxAUSfpCQKBgQCe4idGKMxYLTH760Ay -XTy7PkAGmOkz2uzkZ2mXFu7s4jVWkkIcemsyiTlESPMisjQ4cfi3rAucXzEoLxxc -ecuCCEd3gixzl/j/4U5iGl3twU6eBpWK1KF7RG/EQ9hhuRLZXUoZbACd1uG5k+oV -QcMXqarq2H87hd9vrxlB99Fx/A== +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDXZO/t1x8ObPb8 +DZuq4LgYg+owcd/WXVxEryY7J3AlEWbNtzgwQWAKa+FREsATqJ4HiCF8IqlmoPaQ +78a0MEruKwzG6vXg1soooVefA+t5IGSdJNxwn7yIi/Er7CeL7DQ7H8C+vhvKhxXQ +7pYnokS+9zI1sGT4haxN81stnfb8Dpa2toTmra8hmk3Zs0S0l70lW7nrAj3nNwBm +afPdLVG0tW9PxeqYecJROk37mglZEFOv2lJxA2OJykqj4ZJPTdiPkpojwHhx015y +V2CpuZ5twvSbdEZ8EqC/wASqyN2qDeG8PStdSl6IYqhcyPLOaW84KxC2A/prOd4h +lG3oxAyxAgMBAAECggEBAJhQgHooDYYd9+n1lYcYshZj8k+ftzHXg0/uq5JZKSyN +Sa1fSxSUpl24O/Ug1UMRke6xjTxDJpe0a6fCZzk0jUgumTJxJL6CJSLmNRf1paZa +Ccw2LMxXqPLGQx1UOgLwXV8R7TL1LKHjNMIydWxBi2ufUpo0yrITlOzqkieH9Qf3 +3Ovc54S1/4VBWCkul+CwYvIpcm21A0W+y6iwNaRPTAzqU0D7bNUc4SE50oaJLhhy +t4qBbZ6N037L0u4Gh37SzSM3+AS13n3L5u2z1cIGllHul1Is73NUzfH/dZruREaY +UJqgGVy1N7TJKN0s0Ug3h9HGSrxv4/Q1YPdDqMWdO6ECgYEA7S8njwKY2ga9BPbU +xvL2JcIf2fpmm3IbYajwT69nVRbF6jxo/jNa4wtLadD37/+T3zuCUn9+GUoeeBob +C6zx3f3m7MJ2lftXJyQVm5ENtshWLANGk+BA5I35V1YZwqk+ep3q7t8HEfiKsj3w +w+4YrdZqti7KQ12WMXRJr+nyhssCgYEA6HtDX/DpGpmOVEYKi0hpwflTzRMrKZRA +VdPQh9PFztsPpVx2bxDLUPGORLjz6mjQ54s6vpWsFCIRGWknfshlMWCLwNewTWKs +n6HlTXuA3FSSbXfDRrEvMU7S9uVjlkUawNrLCH9rcc3W006VdnuCvErqONbjRlfI +8YBS6tetDvMCgYBQXKK9nLanYJMYpH0Rb26g+nYKSAIN9wp5+2B4z0hmlxG1vIQi +ZMoNlV0W3Du1cFXs09/jDdluviM2tDmnqhBFE+rzGyxMwt0ToPFFRU9tN9GilfWK +0veJuOTbh4uw3eEScIESMDTYDOsJW11BNWHdPIHpek8DpijmVq1E0jJfIwKBgQC1 +jz5o+Q4zVeUP77tgV2cws/U8XVICIOO1o7xht4PuLqqja/iaeLUwO9Xt2bu0P7OW +gOeZ2+4NsyVDgRn1K6/LxiierFPlu8Aw0xDvWBqk9+97SmLZHJtMmNCtPSxvOPzR +zI1vz4Mr16OEVwTnCUJqpt8RENFiKECoSp13BoUC/QKBgGBKIi2f2EYY2i+F9aFR +wnyzKiTtHTiTVuLI0gKzh5c31wUDNJaJcODVerdAJPz0DZvRBWPvquUi2haJi1ZL +ayZWCrcwXZWXq+DJGg29VP+JK0tI8xuYR9riliWZe5hLnmLj9M2RU3m2Em2/vJVQ +SugaPOT2njhLHBmDgbUq6lOJ -----END PRIVATE KEY----- diff --git a/jstests/libs/intermediate-ca.pem.digest.sha1 b/jstests/libs/intermediate-ca.pem.digest.sha1 index b18b337262920..5acc6af1798f7 100644 --- a/jstests/libs/intermediate-ca.pem.digest.sha1 +++ b/jstests/libs/intermediate-ca.pem.digest.sha1 @@ -1 +1 @@ -70F69F1CF9ECA2515CF1F7A6A53C5CE749D4E59C \ No newline at end of file +0EB0876F8189F8EE0FFFC551AF474F0E66D8B52E \ No newline at end of file diff --git a/jstests/libs/intermediate-ca.pem.digest.sha256 b/jstests/libs/intermediate-ca.pem.digest.sha256 index 2abba6584e324..73e090b8578e4 100644 --- a/jstests/libs/intermediate-ca.pem.digest.sha256 +++ b/jstests/libs/intermediate-ca.pem.digest.sha256 @@ -1 +1 @@ -D56DD3A2C10AF3589B668FEB3F9A014C876BD912D701BF2B7332BA42ABC8BC43 \ No newline at end of file +EBB24526AF892C7F7FB25145356299CD18A2E4B0ACBB447E400842F4AB6633F5 \ No newline at end of file diff --git a/jstests/libs/kill_sessions.js b/jstests/libs/kill_sessions.js index 57e6c636ab280..957a412db2ab2 100644 --- a/jstests/libs/kill_sessions.js +++ b/jstests/libs/kill_sessions.js @@ -789,4 +789,4 @@ var _kill_sessions_api_module = (function() { })(); // Globals -KillSessionsTestHelper = _kill_sessions_api_module.KillSessionsTestHelper; +var KillSessionsTestHelper = _kill_sessions_api_module.KillSessionsTestHelper; diff --git a/jstests/libs/load_ce_test_data.js b/jstests/libs/load_ce_test_data.js index 7d0a7bdc7e0a4..22f979fcbd982 100644 --- a/jstests/libs/load_ce_test_data.js +++ b/jstests/libs/load_ce_test_data.js @@ -1,5 +1,3 @@ -load("jstests/libs/ce_stats_utils.js"); - /** * Analyze all fields and create statistics. * Create single-field indexes on the fields with indexed flag. @@ -49,19 +47,19 @@ function importDataset(dbName, dataDir, dbMetadata) { */ function loadJSONDataset(db, dataSet, dataDir, dbMetadata) { assert.commandWorked( - db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"})); + db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"})); for (const collMetadata of dbMetadata) { - coll = db[collMetadata.collectionName]; + let coll = db[collMetadata.collectionName]; coll.drop(); } for (const chunkName of dataSet) { - chunkFilePath = `${dataDir}${chunkName}`; + let chunkFilePath = `${dataDir}${chunkName}`; print(`Loading chunk file: ${chunkFilePath}\n`); load(chunkFilePath); // At this point there is a variable named as the value of chunkName. - coll = eval(`db[${chunkName}.collName]`); + let coll = eval(`db[${chunkName}.collName]`); eval(`assert.commandWorked(coll.insertMany(${chunkName}.collData, {ordered: false}));`); // Free the chunk memory after insertion into the DB eval(`${chunkName} = null`); diff --git a/jstests/libs/localhostnameCN.pem b/jstests/libs/localhostnameCN.pem index 7b8530d39ac6e..918ae0688649f 100644 --- a/jstests/libs/localhostnameCN.pem +++ b/jstests/libs/localhostnameCN.pem @@ -3,53 +3,53 @@ # # Server certificate with IP localhost in CN, includes a SAN. -----BEGIN CERTIFICATE----- -MIIDyzCCArOgAwIBAgIER67qozANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDyzCCArOgAwIBAgIEI1TzoDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjBvMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM5WhcNMjUwOTEwMTQyODM5WjBvMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDESMBAGA1UEAwwJ -MTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuBhhUIRS -AzneprgtUZER24xU5jSbtGRXwRc+2MC3G7WgIKl3OvIFLn3BcYuUnWIIXD0Wa1Ry -vToIeqHj6Sb8IMlm9lNMtKOmjcFfLvaia8q+SnsXc/FZMQT3qvNorhoI574+mUBL -msFbNeYthLijvpvd2aL0uYqebtcqc73skL0USDbl0djGijo5StzCvQCK9J44Htzl -A3oitRYkWiBZZW7SFinPLKsUueFdhGFZK8SUq1ZNjowUbx8jzMGzZu9JquGdhIEq -3V8p/5WixFI3s5u21WFaAKljjh5HKj2HSI+MC/Hiuqy9sgoPZOjbGuDLaAgrC97Z -WjW5qbvpX3RUMQIDAQABo2owaDAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV -HSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQUSrTOui2RoGQl8kBB/Aj6r+waT3cw -GgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQCX -pXs6Lnste/dxYim7N+xet4UmMLrSCDLOHAECef57hbH9uVeH+Pjl/TMGbx098Yl2 -ibcVT4Z7ieQWGmEyGWJw44RsMcpQpiMGXnaWZe2k+Hn9xfSQrVoyOFhxgUTnFu7u -nzxqh2Mod/MayFajtLFdYPjHuykxeL5+kBEPJuwTPBN2uaKKyJrmKwnq2mxUMAFj -+i2B9bZ67hSCaLAY4D76QCvEiAeTwEzhijovW077jOrqRdrHViRa9MI0CZkTy+cI -h23LR3WPjlHlihag/EIdOjXlLYoONmEMKiDvTsI0f6hynXUNyYa6gT82NQwbxz1u -JZn5x2nCr6m0PdUCa40f +MTI3LjAuMC4xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv/TeqpZE +PNM4+QbmQEIpk25GVitmdIwpQ2TDJCkdumcQRB7aFEovGi3bGgG3B9wkg88ASFh0 +QdytV/TNFbX659hVVKdh860jQDe4joYZXkUIXeRhGiq4wXbKUJw493pyBTWitNTp +uYbVOB/7jvfvunTy9p8Z58QPXwP/IUsUqkOwrv1WE+rjBvia/zxDl4I5UoGSI208 +4h7GiHmSQuguaUWZkmlsVgmV8laYS1jp4qOr+hGoFYegW63tdZABD1GBOrxLP5qm +yrlc7+FHsNi1AmIdYHg7QV3BpxxMVb7J4cSl1wolS6KHuckm2o5K4FJLx1Vhjl8w +UKJPo6B2v0wdQQIDAQABo2owaDAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV +HSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQUpe2ZqO42LqlVTOF/xOsmRS1vbKYw +GgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQBn +ww6JKaLBChLsM/qhp2rGIT3QPKry2p54yRWwwqE7dLN7GnhCIjfMbZX7wf4DqeWc +xbcuHQI6AQJWde8xBW44p+3uI1xk0NoH+2xsjtGxqMyMVcWeG8qnowqFvJos5qLc +xf37P74FKCVCAaU3F11ikCNZ+7rG0EdDfINUSRDayAe2+qBuv5c3LvE5L6rifZK7 +V2S/eY1d7TFANsjxQDHVaiyqJQa3eMGW9GhiYD6YQ7LjNTE2+ofjn/csjhhks5il +dB88SdZFitCPBG8ZBehL1kdJOrRyQFqwjTPxSoprShk9YwK8fQn1NZyl9248R7iu +64ugfNDTOT2tJOdQwLnY -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC4GGFQhFIDOd6m -uC1RkRHbjFTmNJu0ZFfBFz7YwLcbtaAgqXc68gUufcFxi5SdYghcPRZrVHK9Ogh6 -oePpJvwgyWb2U0y0o6aNwV8u9qJryr5Kexdz8VkxBPeq82iuGgjnvj6ZQEuawVs1 -5i2EuKO+m93ZovS5ip5u1ypzveyQvRRINuXR2MaKOjlK3MK9AIr0njge3OUDeiK1 -FiRaIFllbtIWKc8sqxS54V2EYVkrxJSrVk2OjBRvHyPMwbNm70mq4Z2EgSrdXyn/ -laLEUjezm7bVYVoAqWOOHkcqPYdIj4wL8eK6rL2yCg9k6Nsa4MtoCCsL3tlaNbmp -u+lfdFQxAgMBAAECggEAMn4FWucf82CQ79s+GswWQMhJlOZC+oQ3CW/NvfeFgWUc -aTjxycoNn8XAI9trxIrZziq3FnMQEVR0dOYoM4+MwZVb1x3bwQPNr72k1KWywKvX -62e9dABzPR2mrE2cnXvoi19DnhbjZau4z5y1SVy3FJV9kK7APo+FitMbAqnJ05yA -2zH0O6tk4vyoUo5Bj/8GFam194uCpAhm0NfBy9Cyk1ZenDMRmyUefahmeBGFqeaF -f6E/Bazb6l4drhu4yhuaEA4e/VXkopixbzQHMctGoU5EUQLUlJkbtAkYx61c7x61 -addr4OgPWv9Um3eIu/0FOziqVMTuobSrf63vStOSbQKBgQDwTU6/fAkcEHNpOb9y -oWnS2L6JhNMhzZyhyj+nUz2qp6trurnXwewE9ZtZuSniaQxT4I5Qpl7CaGBmhGPs -E8qEX/lKK9DY58vMUx6JDBtwdAQLBC42LQmneLj71gfwWZ8VVy7AZ96sNL7GC+YL -2WvhDG4f08ZfyRtMSYgSgeliUwKBgQDEHxk1FL6q04lExutvjO64J/QyH3qZaqjl -UenOQVA5/B2UuJWq9n4m14e9ZzWdw3GFiT4+hV4iMs8z1HWMyuYpEA6UNla1u1Xe -VmFGq306OuTh54PP/fvu4fwiSWXKQTEkrbb9WLdc4sIy2STuOX+41b4cZy0I3FqK -tZdkLY5m6wKBgQCQwoGF6Pqz5VUhNqCWNZbCZb+iqFloK60H6gaejg7AF3G03C/I -QhIkirCjRGBu/Elo3gXdn9vF0YsBNw/az7FYPVi3zd1qTXkABbKHbLu66qjk2gfc -qxT9xkPpse3mZJbpDDQlxGzn4H5sYA6dZMUQNaTBl5oRadz//+vw+kHV8QKBgQCY -R+ljvOrba4svWyFeKVkGpwdGkAi67QWdof/gRfiMPYWef8C+0cxcTog4edY43JPd -8xXgp9/SwA8BGJv5qWYTRkN7s8GaNI7VJ886d1eyCh7EheZkbrra3p/O45zk8b+9 -0iC/EM63kd7mapLxYrYYh+ao2TgvpCGtiJi3kWP75wKBgQDcJ0+POm5QQYlHDecF -MZOpfI1FDfummX5yY/+8RuG/LVyB5nfilbz/EdMlNc2rVenI2IOl2MHqEuRNv2sh -5/w0/Dgmk02TWz9HO5G0TstE1t5K3Evd8Ti6IF/N8SoJlJAqI+aqFXW5r1U9+KH4 -h9BY45YpwEgi05lHTdQzk413bg== +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQC/9N6qlkQ80zj5 +BuZAQimTbkZWK2Z0jClDZMMkKR26ZxBEHtoUSi8aLdsaAbcH3CSDzwBIWHRB3K1X +9M0Vtfrn2FVUp2HzrSNAN7iOhhleRQhd5GEaKrjBdspQnDj3enIFNaK01Om5htU4 +H/uO9++6dPL2nxnnxA9fA/8hSxSqQ7Cu/VYT6uMG+Jr/PEOXgjlSgZIjbTziHsaI +eZJC6C5pRZmSaWxWCZXyVphLWOnio6v6EagVh6Bbre11kAEPUYE6vEs/mqbKuVzv +4Uew2LUCYh1geDtBXcGnHExVvsnhxKXXCiVLooe5ySbajkrgUkvHVWGOXzBQok+j +oHa/TB1BAgMBAAECggEBAISoyinCZ65Rm5IevomyL+F/2IQ8wjXRl8LHpEUdVTo5 +d/hcCgLzsdG51y4F+WQpM+MT7/IPj2jlBbc/q2xRs/D+hRpYA03QXghHefilnGqV +8s5Qwvq+g6jQW8mR7Zy6ton7I1MtwclB8aE0GToZB0gpArCGC/UsfiMANBXiOYng +w5UMJNzTQzPmbLtxcMBysDTJFrzbaeNAvEm3zOe5+Y3+JzK7PuasoGajF3Trm4yD +Fhb9PtiBH/V63W5SidfpwENo3P1/fvab9LBRfA9z5g+ZiypnE2CDvy29fxiq/pY3 +fXn0kxl4ym+7wIvxnYx23AjMMERnJHmOn9skGm3RKXECgYEA31knsxHVaZJD7Djd +UzABp9xUK5rETgQ1Dc3wS5kv37ZmodQLOS1QJxF4iQneZ053m4AjjOEpqp9Kvf6H +8RFWi5f3/3wB0icBVCoY1NnI8eaOaBzckffClge/jju+DLAwUNL3Smh0ZIl7Egoq +CLel4LpRJt7LKFbseLs3ttn4XxMCgYEA3ATfr9gwYPmow6en38awSrLrhWJH0ANn +/p8Q5EWdQo7ViHsAnNJt1Zg0mCzGLeKnuFXT2NZzNaizqA3Jl+FKvsHwIdtEBbBX +OiYVnEbc0NuTKrHb1nZan7XmRHnxahlr4vf9KsxSw6nQsOhBHagMqUW334YMk5qO +BG6dk92TGNsCgYEAiYE+K8Ti+ugN4TTxLfH0UwAW4fGawd1dPG55blGVY8nMTf1a +G42GN0dOSjBFOJzajmXJfUZyfJUtUuONliDyg2bATA5woI1bCBISz5h1WlhzfC2o +rkU3C18h09N8Ihum41u+25SLdAogNu8DCfLmsQCETcYElYVOeNXqayyu+PkCgYBE +h6wYvHDNM9YyIv1yARQWIEalxCf0DauNroP3ZguGmLDAEvfs4MpfNkpjf8a2shtl +mi4jIyC9fO+Aj9LT4NOEOSoPkZlNYFC7BvPCOnw6/bmIGeAMm868Yk730zezwXlO +N8n+U6gP08vx3lWx5A8VhmawU7OFIiXMEZw5W1Ge8wKBgQC7IqlXmq/cZZyYItUM +JUNPgbNU8RF1V0DDyxcqZ6brNronoJe/FJQyE6y1dV3ndk0y3jS+hZHhuvrfkMhW +tspeTRZ7GqvmsDknnE/Oow8MuOaO8mFAebMee9/Wm7fp8UQzkqCD1ro1T0852Aax +qqG6MHvmqu7cHdsgTM7lzGBHaw== -----END PRIVATE KEY----- diff --git a/jstests/libs/localhostnameCN.pem.digest.sha1 b/jstests/libs/localhostnameCN.pem.digest.sha1 index 9df0f445e09f2..9295181016754 100644 --- a/jstests/libs/localhostnameCN.pem.digest.sha1 +++ b/jstests/libs/localhostnameCN.pem.digest.sha1 @@ -1 +1 @@ -C848512676CE8B7F1DC1E4EAF0733329F9129356 \ No newline at end of file +D9F106405A41A436C0B53C4670BED9A7008C1F94 \ No newline at end of file diff --git a/jstests/libs/localhostnameCN.pem.digest.sha256 b/jstests/libs/localhostnameCN.pem.digest.sha256 index 7fbb9c42d0b78..cf87b29408366 100644 --- a/jstests/libs/localhostnameCN.pem.digest.sha256 +++ b/jstests/libs/localhostnameCN.pem.digest.sha256 @@ -1 +1 @@ -D57B1241325C2C3DD08B7B7B2D095026C48DCF8390841E00C000EEA7F5DF3F87 \ No newline at end of file +64439F597E9E8F00EB8C01F0CB413EA1B12B95A18FE41195384B61AA08F290FC \ No newline at end of file diff --git a/jstests/libs/localhostnameSAN.pem b/jstests/libs/localhostnameSAN.pem index 0194976edc841..32cb68ba698ca 100644 --- a/jstests/libs/localhostnameSAN.pem +++ b/jstests/libs/localhostnameSAN.pem @@ -3,54 +3,54 @@ # # Server certificate with a selection of SANs -----BEGIN CERTIFICATE----- -MIID8DCCAtigAwIBAgIEA8/VOjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIID8DCCAtigAwIBAgIEHBHc+TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjB5MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM5WhcNMjUwOTEwMTQyODM5WjB5MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEcMBoGA1UEAwwT c2FudGVzdGhvc3RuYW1lLmNvbTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC -ggEBANdWi2k6E12hbrZ7x9ypiFvPXvYBeQpQn/LndWDhbhr7gdzd1CYDov02sB2x -v+ZHAbw4M+mlCSubpi71/TriZ+F+QOsNqfRNPB1x+KqFQOQqlA/5bq4Jflz9GMkg -wvtb7tMVAbk3Hv4nHDJJ1TBEFkpR7zQl9hAFQxm1fokb5nEkDQk54BMcDf0bHT99 -nP2uSB8/5gAIGMRS4d0YSVyCEezkNjY5w1DEIpyMx3oQ4DfcE3eyh6RcXvz/5lEr -YTcnRrN4f3mZySPR3rRolEWwioBppz8bnDr6a64C1wOGOsybNY0J+7BQV3totXwE -P7UE1ikD42QD3bwLvWLqNGQ+wysCAwEAAaOBhDCBgTAJBgNVHRMEAjAAMAsGA1Ud -DwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQUvdKLieNLHvQI -2LepEk9sdqm08sAwMwYDVR0RBCwwKoINKi5leGFtcGxlLmNvbYIJbG9jYWxob3N0 -gghtb3JlZnVuIYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEARKd2X8OKkSJ06z0G -kZ9KjHSUL5MSvdZ65+gPxpH+ZPVa08jnopy81e8FtKNXPA6oE8YoXUdHbsp9C81H -qb496yK5mAIKsYFmxyp6hC63/si8hnfVCTuKCGu7clX6kKVUReQmj00KgpsIPCf9 -CGC5X42ahiqLvuzrs6lKEKN57xBeEtY0qguhunwOakgstk9i60ELQFoShxM4tS6m -5VIu5bp1Ryji+WXvIKFZOkzi3TwFiTllw0EdGdcVL8XymMeINzATocJs75ht6eq6 -Vh1R+O2Oq6BS+gnEJS4dugvwUSo5VinKn1QzBT559Ebj4BOSMH5G+l0MZzm02yTi -YyV/oA== +ggEBAJyiDUfLhicCkMLaulsWfI8QQJEkJFbJcJ+dxL/ckl0XpE1Bcox9KeNy1dJ/ +JEOQqWyLnKqb6MOpS8UDp1VhDY8JeQShecy6ddPlfsunM0Er/YLiPoeudc47g0Oq +BywBtZQ1XMwhnD0v6GbaL9vQBCenfA2laVtKMHfJRTrGCDxgiPfad6nCki92Chio +7W3IxGjQXdbqMKvZjI465dbyNuZsx+B4dSzYCUs4aPbvWpzqaaTLakVATatUGcHG +TQEzPgyytSk46pod3nILad2isNU5ZOnsNCaHjKNHoZRn+1Z7b5FIu+/pl75Moi3h +T0FjwLQRZF++3cnBzX0vpH9jZB0CAwEAAaOBhDCBgTAJBgNVHRMEAjAAMAsGA1Ud +DwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNVHQ4EFgQU1Fd1BIdSItER +RKQ8Jv0M+Lww/KkwMwYDVR0RBCwwKoINKi5leGFtcGxlLmNvbYIJbG9jYWxob3N0 +gghtb3JlZnVuIYcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAJ6phZ78MyNxZuwD3 +7DY1s+X9b1fxQ8JTZF8AAsLmyVTXZD6DUVXPw9Cz4SqvFb4XgpWTIQKj9ZjiuVXt +lPCjQH2y76mdAdKhe0FOkndfBRcg5ezGtWvmiDMqoM9pAdzZeTcIjhfFeYxtEQB/ +xfXzFsVSFvYXgTdTsK2ii6PYpuboSkqKiZd5jhlW3PVGNMA4G9hysuHLK6Y2MFUc +ixE/rhz7pH9u3H/XBwtwxMwDj/NiMpSOlEfomBleX5mZdWCCklQM9ix8voq8gUWI +bE8nywECfSn/nt9dwykQIRP+ZE4+Jz9YkIduEkL7x78pIR27tc97SXwpXQEHhvj0 +obSYjg== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDXVotpOhNdoW62 -e8fcqYhbz172AXkKUJ/y53Vg4W4a+4Hc3dQmA6L9NrAdsb/mRwG8ODPppQkrm6Yu -9f064mfhfkDrDan0TTwdcfiqhUDkKpQP+W6uCX5c/RjJIML7W+7TFQG5Nx7+Jxwy -SdUwRBZKUe80JfYQBUMZtX6JG+ZxJA0JOeATHA39Gx0/fZz9rkgfP+YACBjEUuHd -GElcghHs5DY2OcNQxCKcjMd6EOA33BN3soekXF78/+ZRK2E3J0azeH95mckj0d60 -aJRFsIqAaac/G5w6+muuAtcDhjrMmzWNCfuwUFd7aLV8BD+1BNYpA+NkA928C71i -6jRkPsMrAgMBAAECggEACr6QujFhofxaD9ThRgtXeG1CabftGCyprm8KFklpOwNt -mV2gJj1sNoIyC2lBD/ZfCcssG+5WJMzhX9p6RiPh1wh7qL7Hyh7IbxeutqNrd3cA -aEOj2B81JOKkB/UQhYYyxml36ovrifbdm0y/zGa5TgAElW+HdYktrey2YZT4zJBO -fpkkfaj1EjcWvTnBfsfsyX0M81Nvnvc8ViejhwbUf1WGMK0VCgxPAMVuqYQAr3OO -HWN2fOvU9tyeKC305viIqE2x5zOYbhY5U/mPht5CXVhI4xHZEeXlGeK8ic7wiWqt -yY834uGhzEEvr4FQcUVgujhgkQfGrd3+qXjiZ+/+sQKBgQDwAeXjy+0Z7gUtEwQk -fX5FDv5CqwVgWWwMVAg7ogchZuFstuO2qlbeg0BrHXUdiZAnCLvay2vCYApKzcs8 -hZBoV10mZUGjZVkC06LWa6EvtSTtYPIe5NWoHcMQiYHY0zj2h5Hyh5JEii/a8PsN -7v1JmwrTg4xoAfm5FZH37wMEdwKBgQDlr9TC6KfFnvcsvl0S/4dCDXNk/Fvk2nj5 -ICjRPQkllutWjkSJCecn1K80NFEO+b8yWTuT7ARZY19fU3Rp08RN7qIkjoMT61ug -DQN4nLMJF8hjLQSu/ZpuFzq7UTEi2LiUzkaZ+6/FwnxXl6PlAYrCSo8n4RDtEJqC -OueDAkan7QKBgEhQKhXigYPIhWK5Ugw0i3D/PXiU0r/e8YEdaIWfIgrcVptM13tY -A2hzn1smuvy1T+uS5BbeJ4+fJDq5mryXeZUWjYheBdLXXGRPo8Z7E0uuhnvHM+w/ -Amju3rEC3U6ZBZax7jVmx5lDEXwkE2B1W3dr8W0dO5ay0a0EZMCRERWtAoGBAInW -5ZY6zUR5y9hcPbsDAocPT2sRT4yd1++Z7yTyviSC16TQKC0ddk2fA64On36fPrDW -gDybbVi/nCK3EKvZ3HrwdPn/VIaFvizk4mplj8QrM6ThBWiG5UCgTCzR4u0Ipm0X -BaOvOaMyvjBK3p62ODG1UQgbt26tgiwZcYpbuU9ZAoGAE66/9onziZ6mpyjlZdW1 -78bECsRPvC3dckAMcDME8Wr2QGgYfrMRzdoJn2jE1mAF5K9MsV4UJnpx1WwkJcSe -jcZUZkRzJgpag6BiSuFwOVcx2oY68dor4dBCBf8NDQYycie0H55UuN9UNOTLjYUy -dEIFFmIgZcexEsmgD4r0ync= +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCcog1Hy4YnApDC +2rpbFnyPEECRJCRWyXCfncS/3JJdF6RNQXKMfSnjctXSfyRDkKlsi5yqm+jDqUvF +A6dVYQ2PCXkEoXnMunXT5X7LpzNBK/2C4j6HrnXOO4NDqgcsAbWUNVzMIZw9L+hm +2i/b0AQnp3wNpWlbSjB3yUU6xgg8YIj32nepwpIvdgoYqO1tyMRo0F3W6jCr2YyO +OuXW8jbmbMfgeHUs2AlLOGj271qc6mmky2pFQE2rVBnBxk0BMz4MsrUpOOqaHd5y +C2ndorDVOWTp7DQmh4yjR6GUZ/tWe2+RSLvv6Ze+TKIt4U9BY8C0EWRfvt3Jwc19 +L6R/Y2QdAgMBAAECggEAGqEabOhXKG6xOc+B+1Z5WsFCmOpyGycoNJrYBl5zq8wH +LPNBjE0DQB4X6C+hdLM1erTJr9/N1OYFcbtLSaK0cWsE3hqt4Haa7amIwwrsFdpq +wUPBqcOpV6Sajnhru7mWq1nfKv1T2Ls6YbIajN5ytmMtHgzPOqwETXk0geeFIu5q +gP/aMJINGmEmOvw1ZlE6+x3s20l8wrVjqRPfYtKsuKh1Iuu+jWjZzR3AeASzu4If +kYJiaDwO5oeY+/bt/VFLClSwYMfcACNDMjptXSx92RrDRxnu4SPJ4SqQLms7Q+0C +X/TnkGAgvJJuRsrvSJftJiSVW4nOrKHvR4PoaUIVKQKBgQDMICBuFtYWNz8egm9L +VTs5BtM5Gn2gIgCu4cz7Ai9lmmAq/xyV5X2qImjeNIEcrNaxUXrGJTTR46PhN8Rt +SAqUMzcb1A038iShMr80N5ncDAOwNI3lYlHjHd2BqnHB+7wqIyEfCwgVmDQtePdt +X098GwitGx4DoiCss5JhSZGqmwKBgQDEcDAnP9YosbfNZKHvmvNvza4bMjEZ7mMg +tQh9cuGdtUC6Nk3qMFg652KNhZvg1W2T8JCQB6IuSoDDarc/nrTyjf1OTgrqfPqt +14vRHTzq5fY8ry7W5fgb1BMbKIQthKY4Z1MMe5C+pt90ofQU/iyVJob1TQLFgxs5 +qQ2GjKhbpwKBgFfFaKxtvViIIOfphhmKaJC40pI3RdVZSZnpFc7IvcCehMN4Nr5t +k39YlGcXhHzkBNzyACy7St8FVy48YIXs+D+JViTtJVHtGLsHqxe04L+xmtRlhK4h +fLx/1wMuWwPiTqJ/wHossCk47RawcRPia7cdmLl2c410ZUBdZo/WpoAJAoGAXvvE +POZAHnM4Zcc4CgyJk1EH07ykQ16ibek7TrVi5IgE4UVqzUdNEkZZwAaPxdpNXtBe +hlY6lFmQA22xZ8DMy8/eYKOZ4aJG8BIeWCHkF8zUEKnAY0bVfldAWcxwhTXzzagg +XxPMfHNh7xp0VEZGtmPns+rl4S0w1+OnV5zht1cCgYAQHOXPX1q02OoUy6czML+R +OxUYJ+a88JvZHeo7PEIgxgrmR7ilz778uZ6VYthijidj1JizkJsC7XwsTl1+gcpS +2BUCeKoriLgk7VTlbQKESg7t0TuyGZqvrKgXYFH7pfmQgWKPnV8fRd7neJTZTTU5 +519KLeSDjmOITErrQIVGiw== -----END PRIVATE KEY----- diff --git a/jstests/libs/localhostnameSAN.pem.digest.sha1 b/jstests/libs/localhostnameSAN.pem.digest.sha1 index 1852be474dab2..27fb851592b76 100644 --- a/jstests/libs/localhostnameSAN.pem.digest.sha1 +++ b/jstests/libs/localhostnameSAN.pem.digest.sha1 @@ -1 +1 @@ -378683F7C8F790D575291116028F47E9A6001AAC \ No newline at end of file +0CA15ACAD6F10DF5C671B6DC739B4A7B25F73782 \ No newline at end of file diff --git a/jstests/libs/localhostnameSAN.pem.digest.sha256 b/jstests/libs/localhostnameSAN.pem.digest.sha256 index 63817335da55e..8a6a740676997 100644 --- a/jstests/libs/localhostnameSAN.pem.digest.sha256 +++ b/jstests/libs/localhostnameSAN.pem.digest.sha256 @@ -1 +1 @@ -39A96BCD7FBB5D653D2AAC46F1FDDDE0C8FDD2B62E43F220F2A0627E3EEA8C30 \ No newline at end of file +E3F243A8F60DF7E8157AC3C06D3486824C9EB8C30F557921DEEF106A3A272734 \ No newline at end of file diff --git a/jstests/libs/mongodbauthorizationgrant.cnf b/jstests/libs/mongodbauthorizationgrant.cnf index 3c2a82b36cc20..08f6c4b7ff277 100644 --- a/jstests/libs/mongodbauthorizationgrant.cnf +++ b/jstests/libs/mongodbauthorizationgrant.cnf @@ -11,4 +11,4 @@ database = UTF8:admin [MongoDBRole2] role = UTF8:readAnyDatabase -database = UTF8:admin \ No newline at end of file +database = UTF8:admin diff --git a/jstests/libs/non-expiring-ca.pem b/jstests/libs/non-expiring-ca.pem new file mode 100644 index 0000000000000..542364b44870e --- /dev/null +++ b/jstests/libs/non-expiring-ca.pem @@ -0,0 +1,52 @@ +# Hand-generated file, do not regenerate. See jstests/ssl/x509/certs.yml for details. +# +# CA with an expiration date far into the future, used exclusively for testing client-custom-oids.pem. DO NOT regenerate this certificate or add any certificates to this certificate chain without consulting the Server Security team first. +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIETEJLQTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO +BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs +IFRlc3QgQ0EwIBcNMjMwNTA5MTgxMjMxWhgPNTE5MjAzMzEwMzU5MTBaMHQxCzAJ +BgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsg +Q2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVsMRcwFQYDVQQD +DA5LZXJuZWwgVGVzdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALcpu7tjtN5syZJdK3hytuqbWOYHzTPTgx1L3EjONJu8FEmcOY94KBuG85+CfkoD +545dduCp2JHrEu29UCeV88baLa/XHQ6/yNptbhm3vS9Mo7wKPpdMvxHEUMV6AEVq +nXSmRlFumekF2ggxQtJsQ/i/gBCZRb/Z+En1mbSFjT9e7XZ5rzA1aB+aXyBaYY+Z +ajtdIJ+zOWKbo5wU/blDUK67vsAKOKrFFzo3RdSnGEpJ1WEe14nka40ieUrmiYVK +gG+sFd+92wrdZJOGnCvTL5PNnzx+4dmSqnC/tUKfSgbHn2g6pfljzhF8Md9WMKKi +aNbGQUPxDzACJ72WySQFYMUCAwEAAaMTMBEwDwYDVR0TAQH/BAUwAwEB/zANBgkq +hkiG9w0BAQsFAAOCAQEAd6MKAqauGVl3PE3xL0SYPiFgCAj5MXrHjJWhqmhjoKrM +8Pz+yCGEPTLRKIZIiwkC9Paq6ZOc+epsq13d+hVnGn5sOcJXK36w7wMV8liQeckH ++S1AYcUJSe+HOKDhWZ+RD4Qhx3gGZlTuKk7uy+ZJ1cxOPjH9RRgB9TYm211hoYd4 +MaQQ2Mm+6srYCQFPO05yfWy11kYSmuo6vYUEm/HU+CND3seVHs7+m8er3bKfarhr +62rNmzKWdFZRyk9Vufo19OCC0ryLTohYao68J1I/7ghCuv7daIfVutc2gwvTj+9k +PVukHno3wMF4J7ucic1eXm+Yq3t6FaW+YHjQfTQaxg== +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC3Kbu7Y7TebMmS +XSt4crbqm1jmB80z04MdS9xIzjSbvBRJnDmPeCgbhvOfgn5KA+eOXXbgqdiR6xLt +vVAnlfPG2i2v1x0Ov8jabW4Zt70vTKO8Cj6XTL8RxFDFegBFap10pkZRbpnpBdoI +MULSbEP4v4AQmUW/2fhJ9Zm0hY0/Xu12ea8wNWgfml8gWmGPmWo7XSCfszlim6Oc +FP25Q1Cuu77ACjiqxRc6N0XUpxhKSdVhHteJ5GuNInlK5omFSoBvrBXfvdsK3WST +hpwr0y+TzZ88fuHZkqpwv7VCn0oGx59oOqX5Y84RfDHfVjCiomjWxkFD8Q8wAie9 +lskkBWDFAgMBAAECggEAMUziYwXrYRMXDnZVhQ4bffpia0geS5za5b/Ngys9rc1q +CIYsWBmXchFY8c5F/9YdNYyiusX8rQoBBky2jnEmNJf1RCc6gwXMIj7iK7nxChIu +4CZBwqe9piKZOw4jlIul5gf7VV/XvYj9rsbTnZ/Wi9W/urgGtiUNV5rrzFNGOY/2 +6xMkT5Ng/vPJZgXR0nSxsKccFWglBJPFOcsTH4conAdDi9MEnenV2p40ryfECVKt +Bvpe9GFGtxsZxuIac8Juxt7XH75XOo1MYASytSt0S1Gpp6NJ8VKk/n35Ea29Cbtc +0Syw76/9uYy571w/SWQgcl21OusplMg9Dd88i2gGAQKBgQDfMUFCbaHguy8YEqGb +2OW+gQCoKM81+cLV4dgP61JYSmMZv+OzqqJ1PhQMxKPU7Fvl9LTr42YvEEjAXBKh +KP7iN+VdQRL520Y+cGcSucjPfRHru+4dGEJ+aPPuYG50HIbPDm0L1OaczZTY8hyB +cs4nqo1j8MJhF3S/AxFnLmdQMQKBgQDSFiuL85MsGqBUfrK8xR6nlj4Zi5x17nde +p8YWZs/DhTRPvRDNbuq77TfIXjMMyMKJsjLNmUEfdQuh6peEMotvpJionlsiTg8Y +efNDaQLseKKNK5/0Z7vSJxmR8nZIKjflLtMHNniWBHpHc8+0chKS0BDQ5KZQScCV +Gyt8gggo1QKBgB2QX62V6hBjmwxcQ23qYBxI6DZeGXxz1fwQy1boe+LYD6J0iYvd ++WEAVRWP4oesu0uNi32HhJyNUqWwTINuc7yxXL9qhEH2aqqQpwaS92eMkJgiL5tJ +AF3QNyeHPHpC4RSCO62KiPWSQbou79mxxF76t3nahVTpD3zRwjdhrSuxAoGAM50Y +w8Bqxuofu8KI3RG8r0WdKh9/qAWXNB0Z8IT/xDRknrZ/e0klyFfGXaau+hQUn0m1 +a4ecVUMnQXdmFCdq1Fnm235UO4Bb+xJy7nvNKRWWMgKmwJ//p+jNQmsEHkSpCNhR +JjN+urSM6iMMw3NHFuShTQDvz0ffVYKgU22K3aUCgYAA07eKDRBaKUzyfDTSG7UU +d1Gri/rw0PDyyxvl+Dn7mylH2rAjeDZxjcrYs0ZoXT2/5A6Mg+C4qNfFjv1PaLKa +W5+meJUkQG9Jv8Wxbbcz2ewhfky4SfPwTpgCycKiSiU4If1nWBLbEuokhHF4UXeq +I2mtmDuHNCXggaMWM9tnQA== +-----END PRIVATE KEY----- diff --git a/jstests/libs/non-expiring-ca.pem.digest.sha1 b/jstests/libs/non-expiring-ca.pem.digest.sha1 new file mode 100644 index 0000000000000..87ce842de1411 --- /dev/null +++ b/jstests/libs/non-expiring-ca.pem.digest.sha1 @@ -0,0 +1 @@ +1459D973E8240972BD740F43356804E8E7E39BA7 \ No newline at end of file diff --git a/jstests/libs/non-expiring-ca.pem.digest.sha256 b/jstests/libs/non-expiring-ca.pem.digest.sha256 new file mode 100644 index 0000000000000..7c1c23e4f9b37 --- /dev/null +++ b/jstests/libs/non-expiring-ca.pem.digest.sha256 @@ -0,0 +1 @@ +EC5C4791F4FE1CC67493714F5441A7B14E831071078B6D2725E3F1CE5080A325 \ No newline at end of file diff --git a/jstests/libs/not_yet_valid.pem b/jstests/libs/not_yet_valid.pem index eb22af451b6e9..db2412b1cdd46 100644 --- a/jstests/libs/not_yet_valid.pem +++ b/jstests/libs/not_yet_valid.pem @@ -3,53 +3,53 @@ # # A certificate which has yet to reach its validity date. -----BEGIN CERTIFICATE----- -MIID1jCCAr6gAwIBAgIEJLBcqzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIID1jCCAr6gAwIBAgIES6i6UjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNNDIwMTI5MjE1OTQ0WhcNNDQwNTAyMjE1OTQ0WjBzMQswCQYD +IFRlc3QgQ0EwHhcNNDMwNjExMTQyODM5WhcNNDUwOTEyMTQyODM5WjBzMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEWMBQGA1UEAwwN -bm90X3lldF92YWxpZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL29 -rHDLieL9YgGh1QmOKxB0MWXKqDAZ3/ZBdVWyV3ALLmqRO3sJTNiVeTdLG5LN87bW -8XfZrim37qpS+mh/NXJHq6MhTq7KBQuHm8/NjpRwKTQAI7gXvd7ItFUCPUn72fs6 -S3Yj1KMyG6oUuG/H8UTsH1XNZqCDbUMM27puAG5L4gg0ggu0PSXS3zR8R0Ljafkx -N1OGqaQ2hY9p10e8ka/YP0kBR/B+VSSngk1xUx+z9P1+DOW9sTJ3egj08mH5hszc -SXJmQqmbj6ybRxQifMCvHKvOaB/C26ifV1cfn9CS8JP6dhJrX5Y42d+ufZJ2KXHZ -YJTkILQJlD7dHv184LMCAwEAAaNxMG8wEwYDVR0lBAwwCgYIKwYBBQUHAwEwGgYD +bm90X3lldF92YWxpZDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALtj +wVivlSxj0vQEqUivXQso7ZGc+V6kvZRGzJWdv9C5IGhisk0OUSA7dOVpqaKlo8VB +kXXkWpq+d+ZeU5PmlNOen/Txrd5z+rLbuFw7lHcPudO6k/2gXeeLdCPxnA3jZkXl +AHARqS+T+eTg1sCGN2dZ2rMeGv1zpzDB1rx4kxBXumw8xOnVQ2GhBm6nQypYW8kY +roaGwktF6hKekwIXMoPeirSkQ+v3KwtTeAQzxZfjsfYgC28u5qOFiu623S6HgSwv +4agZgsJjED8JZh/u9u0901NGTB41YqGpvexIc/+XgSt4BLqjsK+VSfIlulvBMb4a +I+h/8m7cm7nHhPBmjDUCAwEAAaNxMG8wEwYDVR0lBAwwCgYIKwYBBQUHAwEwGgYD VR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMDwGCysGAQQBgo4pAgEBBC0xKzAPDAZi YWNrdXAMBWFkbWluMBgMD3JlYWRBbnlEYXRhYmFzZQwFYWRtaW4wDQYJKoZIhvcN -AQELBQADggEBALBm9gBCGg6cM/aCjypxlIrRqUVuNAK1jZvZyRFIZhLtLw5FN/+o -JKzYjqs9gQhlNoFG+/41c59Afu06/Yy3DPi/VKPJ2TZDr4DPCBchSKdjDUEXm50B -0fHymT/hjhMPsF1tclgwINPCwBeReSKOfXxjSAQyjC7pSQEF3Qr5hYXQEP38+NSW -eDg+5S0Y1+QDpATYanJCUzBRD4ZRvoy7VsueszMNtygPEQYorE6GnM7CJ5z1b4j4 -az7/LqVAZEOW6sxFT3bPuPtsiMEjbWxXcqbfbeb6WRntGG+YfvSq9Al1SRBAUjcI -w5xxu9jynyHZyHc1BC1lKP8XY/GFWFG+/nM= +AQELBQADggEBAB7PsAcFFKW6UqJ/JyztVPV73Id/5d+epWQjZhljKKlTA9PFpmvo +RGWwszklbOe4K0dZJSBnxhl8QNMo6A90z+Aff73bWbMqjEfaF03AVMv4uM+DpgQj +LbMhLwcATgGC16tbDOWxbwJs87yonFRgrqOFVxuAnPfUu5hSGfi4aAEsyfMmoMue +jCnLIuKJm8Ra+KDq0RUxcWYu47C0+vBTjskR9TIMESYXF5ljTpdph6f3ZNIqzNg5 +S7S57Ym+J+RoBo04jlKlewvjHkdiupcvbbJUsnfus1fSQEKPpn/gPYeN6dl1KUWp +bsC9c+prApS2XSBC160IEuz4Fg3/Hx0UUmM= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC9vaxwy4ni/WIB -odUJjisQdDFlyqgwGd/2QXVVsldwCy5qkTt7CUzYlXk3SxuSzfO21vF32a4pt+6q -UvpofzVyR6ujIU6uygULh5vPzY6UcCk0ACO4F73eyLRVAj1J+9n7Okt2I9SjMhuq -FLhvx/FE7B9VzWagg21DDNu6bgBuS+IINIILtD0l0t80fEdC42n5MTdThqmkNoWP -addHvJGv2D9JAUfwflUkp4JNcVMfs/T9fgzlvbEyd3oI9PJh+YbM3ElyZkKpm4+s -m0cUInzArxyrzmgfwtuon1dXH5/QkvCT+nYSa1+WONnfrn2Sdilx2WCU5CC0CZQ+ -3R79fOCzAgMBAAECggEABF5qqwo0uZJGAG2efyLT4QQb4xEggEMIQh5NulRp6P2I -6xE/Al07dL00Vk6XavDtDKe0VaMgfHtstmVEuMU9EM2PqSjqK+Ig7ZwIvWcpQke2 -2yipqeJ6D7glVtO/X4W6BivYcvbuPXe45VimyoQbDRA2xH3/P/DmwiV4pblEr0Vr -2AdQ9ZsV0t6MekKnQidw27U/UnA3zE8i8PqAfSCIzBHKKntjdRVWJVAs1eVInEDk -erHzvu6zkMairlIY2YDUmRH+XKpqj1cOWDyrBLKLA8GkY8fbKn42f7MgbuiQBbOS -1xH6w1U943+LptUm2VP3iczCLucy6letfNtJcn5yUQKBgQDt+vs2ObQ9GP98msYx -4Ut9nTmv0RzFyGMfeua0GUYtryqmUDBHsy6YbhnXAtazIEPQ0hWF0f4l3EGeJ+kB -dXvrcSn5YIRKlo6xNOM8yDGOsRLwVjOcEeXHKmopWX0I/5FdIkiMo392Knrw09BM -WHDtd9bFokbNRZyecVdaSB50qQKBgQDMG53mYjGiaWQJZcbKbdvzSiLI4KWQ+w1g -SuTyC9ml5MGTf0D6uw5iLhnxg5X1aN/QxbcrfIPpwAmAyP6CjNzurC3p1FZ6vszL -lkDnSXiHM/kdrEDkbKILLVlK0zk/veeHHsRujfdpCnWdUaB+gU3d3HDhPV1mK6ct -sj9QBvzn+wKBgDQpqDipi009FnscfcbfKCnfdY6JGnJryvZSfREK3SwUUhfkScDd -kKAOuhE76Q2YS+UQt3D+p2NrFOLywor0UnY39shXlIe42owTGJ/xDZPGUm1lp6hU -7/Wo4V4w5Ew9oII2iopxJ/Yht5LkCqtBU3dppiTpvfUuhfbgxxHd7vcJAoGBAJdr -9ZyxRHllDp4aL3yGPCMl4OB+KNaCKc5CQc+AExwLtcCjK6XXmDgmU4DlSh8iisjr -pZRRb9u+aE5uzsIzlsRKYFWU/gNaAb0X31a3Hv/PAwWMgSf1n9IC9reYNGShkl6f -6MLL97yp0c/AevzfGNF7dRLk36GoyQIRtv678Gx5AoGAQsKJ8fvOSgXiRUI31Ers -99st2+viPOpAgsTLQICJMCkj+Sp//KmRo1YrieYpb3zmIWxhAqQ3HwNcTRq+OKSH -ix/ABTytekH/56+uvzUcNIzuwbk/GKwoXI6E6TA58mnsh4dd59ZlPTtDicK/Glsf -Dx0G3fNCJsrG6CrH1EM9n/I= +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC7Y8FYr5UsY9L0 +BKlIr10LKO2RnPlepL2URsyVnb/QuSBoYrJNDlEgO3TlaamipaPFQZF15Fqavnfm +XlOT5pTTnp/08a3ec/qy27hcO5R3D7nTupP9oF3ni3Qj8ZwN42ZF5QBwEakvk/nk +4NbAhjdnWdqzHhr9c6cwwda8eJMQV7psPMTp1UNhoQZup0MqWFvJGK6GhsJLReoS +npMCFzKD3oq0pEPr9ysLU3gEM8WX47H2IAtvLuajhYrutt0uh4EsL+GoGYLCYxA/ +CWYf7vbtPdNTRkweNWKhqb3sSHP/l4EreAS6o7CvlUnyJbpbwTG+GiPof/Ju3Ju5 +x4TwZow1AgMBAAECggEATeVS2zXp8dDxQUSqxi83o0r5Lp2tP72FnRytMMipku9R +3HKiocuAx8BPgIoi3Ryz7myqNfKeA1OH7fhqD8I7PZjj72ODnkRiA3W4toaB2dhs +q+oUWMsQHg9nUQW/lDpiKk3el7ePt9pwd8dLHi7s2waMFgc+uvsXlfoN9Ly+jXzk +RK49fi8njY1Bf0acSQLJvaDrSc4rnLipY8/WBRVnWlZY0FRABQ4dsI8bHzxsRgNN +3G2CT0zveBqY8egodwRkGqde4yYVnz2buerCc+YkA6tbBOB5kcX1IKa8zOH0HzHg +2ngWFPMcPGC08FROdXS0opAz6ORTFnSHksh6zyoBxQKBgQDc/midtYhuB6zDnAsw +bnfAPkLRo4f7fFwgsH0IdwzSGstCphV7HWTXi9dJsQo59LrKnEFvjy6OGDe7Vzt/ +d4+/Lg7rs0G5UHaMVEypAQe0bxRF+zvdnIWFC/aksiWxTtXnq0wWVv17VRW+mSSh +ZNvI/hX+aapHx2E/CZ4DML5GowKBgQDZEqe2qbVbkfiOEXIevyf9JOQ7Qy/j67/w +8JrWi8w2QiMpLBndYh7eZZIA6nAhWEU3wOc4C0WVn6qa3GXaYcILPb3KQ+sNfo+B +R+cuXbdTsOIjd3zQ6KAKxzaTay45hbpRCa/BWXrgpSqmUQO9cz5vRNygxcna2mEP +UghlXSuHRwKBgFoM7PI+s54qUHLQ1hphGTjj5fRoz5NhhGTppblaUYYX1vWMmbo2 +Kw5N9GcZ1hHxsF+5NkzQKmdKZQNYs2zoelGr4faXb9OkfvoFq8s+GTakAL3XdviB +vEKPsmxAD23lsvIY8gM0ZUvpStgErVF6uLh5GNs3kWR4UR8PuyyPoylDAoGAaMP2 +KdSycOV8fqSw3WII3MZHWMrOfEQ1uQWG1XFXOACnk6FzQkOu/ksrMkHzqEQ/8+6o +KYZXKSWMY31nswRncKF1zf8FnNeuTwjy7I/SgRPnsJJkrTb4tvr6vh+GicCe8amp +J7oV8gIFGYwUMAVE5tLnYLjU+UlYetpuru1OqtUCgYBe6k9r9O+VJAV3OJLQSKc0 +OfKTBcsGBAAzSiWQTvkvlyq6J0yK7qgonO+3jY7hlDm3TUqJo8ZGVck5XG7sJNMm +pjP5B9gFyFmiDcjE0rP/O7H5d89xm6TLihpNtNAr645Oo3zggUvLsks679ij4YRy +ruwgwxo3b3z8nK/splKurQ== -----END PRIVATE KEY----- diff --git a/jstests/libs/not_yet_valid.pem.digest.sha1 b/jstests/libs/not_yet_valid.pem.digest.sha1 index c2b282c0ebd2d..fe10b1ff56073 100644 --- a/jstests/libs/not_yet_valid.pem.digest.sha1 +++ b/jstests/libs/not_yet_valid.pem.digest.sha1 @@ -1 +1 @@ -385D8CBF6B41B08FAE5C0361A9F3E08FB278B991 \ No newline at end of file +CCE5D2D4BB9F8BB8780BA9A691A27E6C84F5536F \ No newline at end of file diff --git a/jstests/libs/not_yet_valid.pem.digest.sha256 b/jstests/libs/not_yet_valid.pem.digest.sha256 index 4c735b56a1078..9d2404eeeebd6 100644 --- a/jstests/libs/not_yet_valid.pem.digest.sha256 +++ b/jstests/libs/not_yet_valid.pem.digest.sha256 @@ -1 +1 @@ -6F8A24651A0D6EA91FD82AE20C96270138E88C0E2D9B7B1C831EF571EC8EFF52 \ No newline at end of file +0E0165718DF08EAFCE8FB65464E3E00F51EA21C585462747846F25EA185375B0 \ No newline at end of file diff --git a/jstests/libs/ocsp/ca_ocsp.crt b/jstests/libs/ocsp/ca_ocsp.crt index 67ba4dac386e4..7b60cf68f20bf 100644 --- a/jstests/libs/ocsp/ca_ocsp.crt +++ b/jstests/libs/ocsp/ca_ocsp.crt @@ -1,21 +1,21 @@ -----BEGIN CERTIFICATE----- -MIIDeTCCAmGgAwIBAgIEaP5C8TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDeTCCAmGgAwIBAgIEUlBUJTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjB0MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ0WhcNMjUwOTEwMTQyODQ0WjB0MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO -S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS -ASa6t/0P3MHRRhLP2W25y2NJ9eBl6YMJj3oY058ixhHOhPLvdnWPcLHOh36BiNu8 -5+lX8w27vAz84qzloYQ44YJO+uvU9WBEoMa707IWTre3PSoYfe4y49fm9AwYkG2R -qn7674TGn5eNZnSuBVzab7Fy5+zBDAhgCcB+z0MYP3COEyrmzkKY2rPTPu7K7o3r -5FFGfxgceDzN6lukG12h75F+R64o8lvVBkHV5+1mwlx8SlthPH1HJB6/ZjX1m4gw -yRlU9JfR00iphro4FYI1SgGKeSP7Fn1E0VRB/YwRAoZFguq62vlMImzh+2folFF4 -1XzEO+BKjUiRfYbRTpWRAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI -hvcNAQELBQADggEBAKRiAji29p93B9cA/9gjE5Zlb39LRtYA5+RBC0WTn8u5ku// -XGpz3P8aDpO0BFAd/gNiyGRWQ7blHj0Mn02QHqW7T/yyyE0m/fASdXIoJApFDEtK -n1mUcmVdiiIlQvXo/oi1RTwxckQNqFYm2M/XSrg0HLFVHsTQrdDskbs0SRQZK3qv -5EluYHu7UvKAAmzepZhtyC1VNrZbP97cJ03ZupUyMo6NHLk0WxFHvYM3U7K/W7ui -YuFWenPWmESFGHR923lF6HjzvkhHYXKPaYhTnKZJ1kjwhYfcdfSmbizJBBlryzEL -+0KtahO1J5NBwYyVgo+Pkv5OEWwunzrpsY+TLR4= +S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDj +KEjkptHYsXzYYRGitba+fkBEhsuRVoAVphzopMVKven8XMHde8b0xJmqnqPYCPiN +shU62bcM+fWENDjpuavcqvzC2nYK118mbrRkko2AurqCPKiaj+o16aogsPS+lBS5 +WnK4C369pesQsZz+SO/s/LwW8iAsOAaayoFvJaK5RCaU+v1C7M6gFDyeQFLCp7UR +H4IZmWqR7A35mC76bdswyleTpN5gqAVjR2E3k/U1Azp42vUqNiNTNBzLLe9Gllbp +UwAqUZkf1o10Ew+K4wsU6L/rGK/QbaNObJ1NEg0UnVQGjYCJiemKMIVt5OsB7aj9 +LWIzHFLF2xynv9NwOZ9fAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBADidce5kTcwIUD+CHiJ04YFZEpxSyMCk2R7baZuZS6igngHH +z05oz4rnvavuaudFwvgj5f630pbel140DdWH/p7nsEsW6QEr9tThWDeabRiRcpq0 +ELd9kvYOGLlc0TZBAuFFLu60fe/NiGlWvALszIY166Bq+Y3xoHiLy5SJ56+O2REX +AKnzx2IIiDkz+GPHcQnjaTIo8t26I4ArHjekh1DQ0soUtzav+IBZUgb6H0Q12NH0 +4GVTZv5fLAMufqzCzhzmbU28p45Hrj8ZFErim2ES2l/akOHJUbNrbMTz63G8ZbJk +Inl2kjX/FBwuQ2d1idIJPU4B8djmQrJrb7urcog= -----END CERTIFICATE----- diff --git a/jstests/libs/ocsp/ca_ocsp.key b/jstests/libs/ocsp/ca_ocsp.key index 8f2faea5c0988..6b2e18401cdb2 100644 --- a/jstests/libs/ocsp/ca_ocsp.key +++ b/jstests/libs/ocsp/ca_ocsp.key @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDSASa6t/0P3MHR -RhLP2W25y2NJ9eBl6YMJj3oY058ixhHOhPLvdnWPcLHOh36BiNu85+lX8w27vAz8 -4qzloYQ44YJO+uvU9WBEoMa707IWTre3PSoYfe4y49fm9AwYkG2Rqn7674TGn5eN -ZnSuBVzab7Fy5+zBDAhgCcB+z0MYP3COEyrmzkKY2rPTPu7K7o3r5FFGfxgceDzN -6lukG12h75F+R64o8lvVBkHV5+1mwlx8SlthPH1HJB6/ZjX1m4gwyRlU9JfR00ip -hro4FYI1SgGKeSP7Fn1E0VRB/YwRAoZFguq62vlMImzh+2folFF41XzEO+BKjUiR -fYbRTpWRAgMBAAECggEBAM2sFPs/oNe9NBE6mMf5wU36lPZlmW0WQqDFZSYVdECB -TgU+DhNaqA5gp7OG3e4NVG/xjYX2ZBfeN7YgZobLJgzzR0UE2J2L58mu0JQ546uX -UrRicezkwLoUjWoC0CnqoXTNT2mB1T8WJD52/oVEeUjxpQ/NdwEmQLmWM3G+mY6P -JQgxk8CIFVu1P0qajjmT/AjVEwaH/ftQN0rLFomr2UVSJ7XcPD4KzqXTOu5BahIp -uoIEtc84dzPQIvh43Prjwea313zFKiGVWWoS514PpySi9CPk+Z8N7kY3r6ENHnXP -MLptUEUAQ9IQsKBdQemF4e4U1YY2o43rxU02K4nwgcUCgYEA6HPo1H4fN6Ox22VZ -KwzdVLJNojVYvV51C6dLSh9r7TQiomKwbMrfoIN0TUVb38mlhSQrlBCgpgJW+HOf -U6cO3SdWvaprCcKLpSPWfy30nAW7LfzExNLF6+kLXIhUse1T9nNC3iHnjDP/ZCo+ -/QEJXu0+tkAwv46k/EYYfBzzkycCgYEA50cZ3o4QekPGHWT/9ioGwFjbl94c/9wW -t5YS3ELeuQS16ZVsQlwH530Jd4Md1JqtL2024a/aMu5BEkV94ZgIu/xq2t0xOgUG -CEwW8abfEIzXArfCMJBlSV63ir/Rm7ZNrwZNtf+fu5gTtSShNAIOEQ2gQvjLGAlz -MQ+xgXsRpIcCgYBub6MOUV1DceHt+hiC+3mr44o+plminl29S53ZeVQtPbe2rmxj -Q387Izj9/RXJHGQm/Sg3EC9Cr5niMAxiEdxd9XKgaXaxDFyL+JrPkWXFOnPTRWO2 -nuypR+6954dVgQuC6v+5ySHY2ltbmzaRvQxWrSA+NgDzATIhKKw5SwftwQKBgQDP -r8XHWYPqh5oNI3nHsQ3HBQSNaGRaT2YHDrl5GoOvWk+RpyFEpihi9KHftNrA5PXI -tSannKcxPUsqAUV9pG2TgqTiTee61nAIW2vvLY766b7bLfEwczrBnFDuRDsSva8d -EWgPm7HxYCuya2ZnscC4B6h2+7xFpZbP4+ve/ollWQKBgQCf+2qZHTIhaJGZwpyh -hROhQCJimw7ECTkuP1vEu2guhu1QKMCp8yD/Li7S+SdzZp1L4NPbMswJfy1wXkfj -uyWP2mRmQuWg+RUPFUmvSp3eP6LKBHlmrBEfVI2lBLTaB+5MG84a6r8OaIbJXd6s -bbDRynzmmyQ7u3wKCA8DfMpHBQ== +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDjKEjkptHYsXzY +YRGitba+fkBEhsuRVoAVphzopMVKven8XMHde8b0xJmqnqPYCPiNshU62bcM+fWE +NDjpuavcqvzC2nYK118mbrRkko2AurqCPKiaj+o16aogsPS+lBS5WnK4C369pesQ +sZz+SO/s/LwW8iAsOAaayoFvJaK5RCaU+v1C7M6gFDyeQFLCp7URH4IZmWqR7A35 +mC76bdswyleTpN5gqAVjR2E3k/U1Azp42vUqNiNTNBzLLe9GllbpUwAqUZkf1o10 +Ew+K4wsU6L/rGK/QbaNObJ1NEg0UnVQGjYCJiemKMIVt5OsB7aj9LWIzHFLF2xyn +v9NwOZ9fAgMBAAECggEANiTTAxU0LYdYr6arcnIHpe4n2W2btf+9NevLNjSYBhGw +ZQOi/ezT5mG6Eu4PNZ4lyHhrEQT12bT2rVVZP3SPH2DuMG+r9TeSqXF3tAUuj0Fy +1ToqIfUtqT4R3Arxuz9GlUuWzoGG6yNSHT+IoR+3eHBhUMLTolaUVG7yXRDGcYv2 +9yPN+dGx5SBeM/SERLBVZsze855RQNvlmf9yd7HvddviS31JqWdRNNS+oXSecLfk +FuWsUOd+Bhg+0s1cYFOw1UcwA4tytXcpuwtsNbe5hVOR+aV1DphMOU5YjDzPzwcD +MH6b/3y6vEqk3tyY8fBd+6USrE/7UiWKEKiOu7n18QKBgQD4ndfS4W/pzGP0Wo0W +DjedsrdNgF1ZwVh1Uv/94D78uYcUEgKwBxUWXJGW8H6QeHytzUBEAPMZbpeyoY78 +X7vdIqgoWt+A865BeCL6LoAbz2AQbK+jh9mj2MDr3dF7rwzc7TXLa0YgetrIjDCv +Zpr18EWr9RfyiYTJfRqJv9e4CQKBgQDp50szNC4OEAJdh/H3rKTC70ICNpb0602g +pmFyllEqvWo7eFT1GDfawGfS7Wc85CWVbNMx7h65PLFxLQsCecXCJZZiwPvVQsqm +xXYA7ClCvGEvXglct0ZJvMk+UHgjusT8YDmPwGXNuu3y31pUktQvD0fOmu1sDKqO +hXmn8wFmJwKBgQDzb0LD10AI0rxQqFWbcgJdJIA9n+JIH3xpLQNB6FBkj5lHOrds ++xJX2f4oqIWVGpUibWZu7+sZTOO24mCql2bRCb1T/l742iIXMvY0uFFyaaTrkDCO +1Fd0Cqokigczr1zN+msBKqH47bLKShLlvHchN8pQrOVs8+CUli3lEXa9WQKBgQDW +mFSe+q2SS0/QPexaPjO/gME/xJPEyqkizeMzvQNkwflX7HONlhWQhdv7YcHI1MxC +hmBRO9VGP4/QdPHX6J7uG4wYuFOT+j5wuVMlT4YfazOCwLS4MpGzDxhXKn6+0Rjv +Lt1ArNT55hlvLUnzs+4l6tAHlo5jBk/oiD7wPnu3GwKBgQCsPSbbYcC2H+xHNkPW +cK4cisQQAeNJXLtZUElw/OePDTMgo2hMnvKkep3LI3WqwJ208Uo8DOW5E+t1GC8I +61ENw2rMf1z8uaXLj5IbJPAGsgYvEef7kNthvtVl3XkTRZ8K4Sw+MGWnSpMOcv/n +AKUlIVYnLie1Kwnu1xD6LRV40w== -----END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/ca_ocsp.pem b/jstests/libs/ocsp/ca_ocsp.pem index 9f57d6aa4cca3..322c9bb255d5d 100644 --- a/jstests/libs/ocsp/ca_ocsp.pem +++ b/jstests/libs/ocsp/ca_ocsp.pem @@ -1,49 +1,49 @@ -----BEGIN CERTIFICATE----- -MIIDeTCCAmGgAwIBAgIEaP5C8TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDeTCCAmGgAwIBAgIEUlBUJTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjB0MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ0WhcNMjUwOTEwMTQyODQ0WjB0MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO -S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS -ASa6t/0P3MHRRhLP2W25y2NJ9eBl6YMJj3oY058ixhHOhPLvdnWPcLHOh36BiNu8 -5+lX8w27vAz84qzloYQ44YJO+uvU9WBEoMa707IWTre3PSoYfe4y49fm9AwYkG2R -qn7674TGn5eNZnSuBVzab7Fy5+zBDAhgCcB+z0MYP3COEyrmzkKY2rPTPu7K7o3r -5FFGfxgceDzN6lukG12h75F+R64o8lvVBkHV5+1mwlx8SlthPH1HJB6/ZjX1m4gw -yRlU9JfR00iphro4FYI1SgGKeSP7Fn1E0VRB/YwRAoZFguq62vlMImzh+2folFF4 -1XzEO+BKjUiRfYbRTpWRAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI -hvcNAQELBQADggEBAKRiAji29p93B9cA/9gjE5Zlb39LRtYA5+RBC0WTn8u5ku// -XGpz3P8aDpO0BFAd/gNiyGRWQ7blHj0Mn02QHqW7T/yyyE0m/fASdXIoJApFDEtK -n1mUcmVdiiIlQvXo/oi1RTwxckQNqFYm2M/XSrg0HLFVHsTQrdDskbs0SRQZK3qv -5EluYHu7UvKAAmzepZhtyC1VNrZbP97cJ03ZupUyMo6NHLk0WxFHvYM3U7K/W7ui -YuFWenPWmESFGHR923lF6HjzvkhHYXKPaYhTnKZJ1kjwhYfcdfSmbizJBBlryzEL -+0KtahO1J5NBwYyVgo+Pkv5OEWwunzrpsY+TLR4= +S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDj +KEjkptHYsXzYYRGitba+fkBEhsuRVoAVphzopMVKven8XMHde8b0xJmqnqPYCPiN +shU62bcM+fWENDjpuavcqvzC2nYK118mbrRkko2AurqCPKiaj+o16aogsPS+lBS5 +WnK4C369pesQsZz+SO/s/LwW8iAsOAaayoFvJaK5RCaU+v1C7M6gFDyeQFLCp7UR +H4IZmWqR7A35mC76bdswyleTpN5gqAVjR2E3k/U1Azp42vUqNiNTNBzLLe9Gllbp +UwAqUZkf1o10Ew+K4wsU6L/rGK/QbaNObJ1NEg0UnVQGjYCJiemKMIVt5OsB7aj9 +LWIzHFLF2xynv9NwOZ9fAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBADidce5kTcwIUD+CHiJ04YFZEpxSyMCk2R7baZuZS6igngHH +z05oz4rnvavuaudFwvgj5f630pbel140DdWH/p7nsEsW6QEr9tThWDeabRiRcpq0 +ELd9kvYOGLlc0TZBAuFFLu60fe/NiGlWvALszIY166Bq+Y3xoHiLy5SJ56+O2REX +AKnzx2IIiDkz+GPHcQnjaTIo8t26I4ArHjekh1DQ0soUtzav+IBZUgb6H0Q12NH0 +4GVTZv5fLAMufqzCzhzmbU28p45Hrj8ZFErim2ES2l/akOHJUbNrbMTz63G8ZbJk +Inl2kjX/FBwuQ2d1idIJPU4B8djmQrJrb7urcog= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDSASa6t/0P3MHR -RhLP2W25y2NJ9eBl6YMJj3oY058ixhHOhPLvdnWPcLHOh36BiNu85+lX8w27vAz8 -4qzloYQ44YJO+uvU9WBEoMa707IWTre3PSoYfe4y49fm9AwYkG2Rqn7674TGn5eN -ZnSuBVzab7Fy5+zBDAhgCcB+z0MYP3COEyrmzkKY2rPTPu7K7o3r5FFGfxgceDzN -6lukG12h75F+R64o8lvVBkHV5+1mwlx8SlthPH1HJB6/ZjX1m4gwyRlU9JfR00ip -hro4FYI1SgGKeSP7Fn1E0VRB/YwRAoZFguq62vlMImzh+2folFF41XzEO+BKjUiR -fYbRTpWRAgMBAAECggEBAM2sFPs/oNe9NBE6mMf5wU36lPZlmW0WQqDFZSYVdECB -TgU+DhNaqA5gp7OG3e4NVG/xjYX2ZBfeN7YgZobLJgzzR0UE2J2L58mu0JQ546uX -UrRicezkwLoUjWoC0CnqoXTNT2mB1T8WJD52/oVEeUjxpQ/NdwEmQLmWM3G+mY6P -JQgxk8CIFVu1P0qajjmT/AjVEwaH/ftQN0rLFomr2UVSJ7XcPD4KzqXTOu5BahIp -uoIEtc84dzPQIvh43Prjwea313zFKiGVWWoS514PpySi9CPk+Z8N7kY3r6ENHnXP -MLptUEUAQ9IQsKBdQemF4e4U1YY2o43rxU02K4nwgcUCgYEA6HPo1H4fN6Ox22VZ -KwzdVLJNojVYvV51C6dLSh9r7TQiomKwbMrfoIN0TUVb38mlhSQrlBCgpgJW+HOf -U6cO3SdWvaprCcKLpSPWfy30nAW7LfzExNLF6+kLXIhUse1T9nNC3iHnjDP/ZCo+ -/QEJXu0+tkAwv46k/EYYfBzzkycCgYEA50cZ3o4QekPGHWT/9ioGwFjbl94c/9wW -t5YS3ELeuQS16ZVsQlwH530Jd4Md1JqtL2024a/aMu5BEkV94ZgIu/xq2t0xOgUG -CEwW8abfEIzXArfCMJBlSV63ir/Rm7ZNrwZNtf+fu5gTtSShNAIOEQ2gQvjLGAlz -MQ+xgXsRpIcCgYBub6MOUV1DceHt+hiC+3mr44o+plminl29S53ZeVQtPbe2rmxj -Q387Izj9/RXJHGQm/Sg3EC9Cr5niMAxiEdxd9XKgaXaxDFyL+JrPkWXFOnPTRWO2 -nuypR+6954dVgQuC6v+5ySHY2ltbmzaRvQxWrSA+NgDzATIhKKw5SwftwQKBgQDP -r8XHWYPqh5oNI3nHsQ3HBQSNaGRaT2YHDrl5GoOvWk+RpyFEpihi9KHftNrA5PXI -tSannKcxPUsqAUV9pG2TgqTiTee61nAIW2vvLY766b7bLfEwczrBnFDuRDsSva8d -EWgPm7HxYCuya2ZnscC4B6h2+7xFpZbP4+ve/ollWQKBgQCf+2qZHTIhaJGZwpyh -hROhQCJimw7ECTkuP1vEu2guhu1QKMCp8yD/Li7S+SdzZp1L4NPbMswJfy1wXkfj -uyWP2mRmQuWg+RUPFUmvSp3eP6LKBHlmrBEfVI2lBLTaB+5MG84a6r8OaIbJXd6s -bbDRynzmmyQ7u3wKCA8DfMpHBQ== +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDjKEjkptHYsXzY +YRGitba+fkBEhsuRVoAVphzopMVKven8XMHde8b0xJmqnqPYCPiNshU62bcM+fWE +NDjpuavcqvzC2nYK118mbrRkko2AurqCPKiaj+o16aogsPS+lBS5WnK4C369pesQ +sZz+SO/s/LwW8iAsOAaayoFvJaK5RCaU+v1C7M6gFDyeQFLCp7URH4IZmWqR7A35 +mC76bdswyleTpN5gqAVjR2E3k/U1Azp42vUqNiNTNBzLLe9GllbpUwAqUZkf1o10 +Ew+K4wsU6L/rGK/QbaNObJ1NEg0UnVQGjYCJiemKMIVt5OsB7aj9LWIzHFLF2xyn +v9NwOZ9fAgMBAAECggEANiTTAxU0LYdYr6arcnIHpe4n2W2btf+9NevLNjSYBhGw +ZQOi/ezT5mG6Eu4PNZ4lyHhrEQT12bT2rVVZP3SPH2DuMG+r9TeSqXF3tAUuj0Fy +1ToqIfUtqT4R3Arxuz9GlUuWzoGG6yNSHT+IoR+3eHBhUMLTolaUVG7yXRDGcYv2 +9yPN+dGx5SBeM/SERLBVZsze855RQNvlmf9yd7HvddviS31JqWdRNNS+oXSecLfk +FuWsUOd+Bhg+0s1cYFOw1UcwA4tytXcpuwtsNbe5hVOR+aV1DphMOU5YjDzPzwcD +MH6b/3y6vEqk3tyY8fBd+6USrE/7UiWKEKiOu7n18QKBgQD4ndfS4W/pzGP0Wo0W +DjedsrdNgF1ZwVh1Uv/94D78uYcUEgKwBxUWXJGW8H6QeHytzUBEAPMZbpeyoY78 +X7vdIqgoWt+A865BeCL6LoAbz2AQbK+jh9mj2MDr3dF7rwzc7TXLa0YgetrIjDCv +Zpr18EWr9RfyiYTJfRqJv9e4CQKBgQDp50szNC4OEAJdh/H3rKTC70ICNpb0602g +pmFyllEqvWo7eFT1GDfawGfS7Wc85CWVbNMx7h65PLFxLQsCecXCJZZiwPvVQsqm +xXYA7ClCvGEvXglct0ZJvMk+UHgjusT8YDmPwGXNuu3y31pUktQvD0fOmu1sDKqO +hXmn8wFmJwKBgQDzb0LD10AI0rxQqFWbcgJdJIA9n+JIH3xpLQNB6FBkj5lHOrds ++xJX2f4oqIWVGpUibWZu7+sZTOO24mCql2bRCb1T/l742iIXMvY0uFFyaaTrkDCO +1Fd0Cqokigczr1zN+msBKqH47bLKShLlvHchN8pQrOVs8+CUli3lEXa9WQKBgQDW +mFSe+q2SS0/QPexaPjO/gME/xJPEyqkizeMzvQNkwflX7HONlhWQhdv7YcHI1MxC +hmBRO9VGP4/QdPHX6J7uG4wYuFOT+j5wuVMlT4YfazOCwLS4MpGzDxhXKn6+0Rjv +Lt1ArNT55hlvLUnzs+4l6tAHlo5jBk/oiD7wPnu3GwKBgQCsPSbbYcC2H+xHNkPW +cK4cisQQAeNJXLtZUElw/OePDTMgo2hMnvKkep3LI3WqwJ208Uo8DOW5E+t1GC8I +61ENw2rMf1z8uaXLj5IbJPAGsgYvEef7kNthvtVl3XkTRZ8K4Sw+MGWnSpMOcv/n +AKUlIVYnLie1Kwnu1xD6LRV40w== -----END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/ca_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/ca_ocsp.pem.digest.sha1 index 112d057d8a9e4..1382923a4b93e 100644 --- a/jstests/libs/ocsp/ca_ocsp.pem.digest.sha1 +++ b/jstests/libs/ocsp/ca_ocsp.pem.digest.sha1 @@ -1 +1 @@ -BF8C557A0367B29F817643DB7F1786C8F9E0EB29 \ No newline at end of file +D7AAF4260C327731CEC6EAB5F679CFEE8C560A82 \ No newline at end of file diff --git a/jstests/libs/ocsp/ca_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/ca_ocsp.pem.digest.sha256 index 508eb3bfa2d45..fdb0aedc04849 100644 --- a/jstests/libs/ocsp/ca_ocsp.pem.digest.sha256 +++ b/jstests/libs/ocsp/ca_ocsp.pem.digest.sha256 @@ -1 +1 @@ -D783E2DACE3BEA1AD49CDAE84E1F1CB54ED79C97A76DCA8C19FCC4B60A6C3CD4 \ No newline at end of file +C82EE66C7DE8A0B9E7140C5BD0084BE4F52147F121A5B958B327148849A6557E \ No newline at end of file diff --git a/jstests/libs/ocsp/client_ocsp.pem b/jstests/libs/ocsp/client_ocsp.pem index e7ffb1b5126e2..510f0aec28bc3 100644 --- a/jstests/libs/ocsp/client_ocsp.pem +++ b/jstests/libs/ocsp/client_ocsp.pem @@ -1,52 +1,52 @@ -----BEGIN CERTIFICATE----- -MIID+jCCAuKgAwIBAgIENMutCTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIID+jCCAuKgAwIBAgIEYUoivDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBiMRAwDgYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjBiMRAwDgYD VQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMMCWxvY2FsaG9z dDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMQ8wDQYDVQQHDAZPQ1NQLTIwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDLCW7KuORvLi/8ZPwbAE3/skF9 -JqAMfShWEPVpDmlyabiR9B+DkKps0e+tF/T82ZPzKyLzCQJoZCtxFLxlvEx2OnOW -5v89ZMDackJf5UmGP/oeZYWsCAHmu86x6sXjrmTfXtO6e7pf+GIyeujVrxb0fTZs -6TYKVLlpr5GFWslsaLaEUV8cDTQQj0WvlYX3rgKeQ1jd3B6qn9X1CU3W5Grjg/D8 -oRjygSiTEJE22tJ/IlVDulGTgAZBXewmO5TZsHC7+EJfG2HdmQJsgjNJm81W5GAq -PEmsHlcZ/jlKa4fMLw71a1UEFvFaTA1QWr+4StVTU1sk7qdzjoCTkkvVjXohAgMB +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDP+anHp2PcAacpG70/Xc49/EbZ +j1n1zOi+rAyh/Hrws52uhLClhkQ6qzaaZlB/wOG84YZvBTtP6L+ZHB9rysnGEIzx +Zstw4Wg52CRs+D3VPK+FYpAAMQsdiNQn9YuBt/DNYuDeYuStrY+gFFmFR674Cj+z +hexFHiBIi0wtQmv5Lx0B1SGnVRAkG38aEUYVtDDZgSKFFCHHKga7q6MemHiuvqvG +vxRyEtPaSTA5OnKBxRT+tCPhQWGR8p4X1PwQRSTw1LpP6ZYpKtIMdImDbEWIH6ys +PCOxp5ir07uUAvoclgmjzwCy4f1LI0KyL3IgtHtcKPjx1T6P8bhfSekAVRabAgMB AAGjgaUwgaIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYDVR0lBAwwCgYIKwYB -BQUHAwIwHQYDVR0OBBYEFAzQKqv6kSKcIz85diU9ncsO9yzRMDgGCCsGAQUFBwEB +BQUHAwIwHQYDVR0OBBYEFKZns1Tv+GB2VXC/nOoZWLULWSITMDgGCCsGAQUFBwEB BCwwKjAoBggrBgEFBQcwAYYcaHR0cDovL2xvY2FsaG9zdDo4MTAwL3N0YXR1czAa -BgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBAAbl -SGWOBNAd7R3aGyPQiqqOnIUkYO/ugcleM9DZMoyuL9v/ourpSwY1CkRFHhXO2iDp -wjIKNJVVdWs88Iju/tQrn1Y8oTMUvzVCFLL91Iaa4odHhx34HlvFBjQ4EPmq0v41 -Cd+hYe42UDPm+fG3t9o7UZ51G8bkgjzKFSfofSR0TYpEhwIOGya/ccjTPpy4gGAn -A0uWQxdAniu/lJ16bVapQD2WZGVhfOQpMTg+40uM87IY8dz9+eXq6/s9lvw0imt3 -3G4bgFv7VlCTMX+Zwa6Y8a729/mfORpEfB9DETgvrypvFVGLJmddG67tuFCmEFzx -pOtpcwwSe3LjQyPx880= +BgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBAHEV +fJbRizVql6i3JXqZmXvCZKFX1UmtekVj1SbKSkE6gDOYbMMGm7wZgei8/eUC7JZ7 +DCHkkLQGsbHEq1IqgB/2unXiX54GAkxMOBa2sdAYZ/qGBryV7GziZ/pffOMBdu80 +osD5RoCcrdupWvp/hpLYg2cqMQ57uo3F67jrk3JQZyol7zEmzhGN1vGTmntoZI3s +rPd/7cNtspz1pWnlAMUQWMJDsSyiYh02luSnWWkrj9iF3AgHuHcwIBf1obNyq2kb +gFr100o3hFHC0LO/BLk1Bno5ekHlGNEXpH+CwOK0qrfsmOhCaxgozICiMEvwr0n6 +nJh0bQjM5FSl7ihIJzQ= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDLCW7KuORvLi/8 -ZPwbAE3/skF9JqAMfShWEPVpDmlyabiR9B+DkKps0e+tF/T82ZPzKyLzCQJoZCtx -FLxlvEx2OnOW5v89ZMDackJf5UmGP/oeZYWsCAHmu86x6sXjrmTfXtO6e7pf+GIy -eujVrxb0fTZs6TYKVLlpr5GFWslsaLaEUV8cDTQQj0WvlYX3rgKeQ1jd3B6qn9X1 -CU3W5Grjg/D8oRjygSiTEJE22tJ/IlVDulGTgAZBXewmO5TZsHC7+EJfG2HdmQJs -gjNJm81W5GAqPEmsHlcZ/jlKa4fMLw71a1UEFvFaTA1QWr+4StVTU1sk7qdzjoCT -kkvVjXohAgMBAAECggEBAKWy/A1L2Xn2+siD7c9nb0CHTLUDicjHd9zNmEr1rQ7n -wu6yiemIbd3Dc1QTYifctcbVZH9t0d46Kc3uu1ZX1xsB1wXHXXTQPifI6exzJo7+ -tXSLLA1at5qDmUtQK9IFqGM3c5oUkeTyw8koz85cR2KesYUuyqChxnO/CgcIMaxb -UKD+4OUoqoaWHv4XIKg5mjuchHuQRXqWhNihSNBte8UV5AR+earQRUtRlo2VLYrW -5Ra0GPRZD/561gvNCEPKAZO0OFU7UDGbH5hFPaKe9l/5nlQiZ9yX/aovkNPcBkOI -+EuwvvmQQWxkPFj27SZqpJADqR1vL4/blCBJ0lQMwPECgYEA6fmLewEBkYV4B1Zn -ubdpliVHwfLM0zCuzBHNrnHpRaTHXHF+3BWfhT+etOQJu4tviOERIX8/rhW6lti+ -DGttAIV+PG2Z/h58qdRYyps96HTwwGFZD7eUA71zdOaTdcOug5+IZ3yyio5ep7SB -e6uwSaCIQWdf3vbrrvFDC1fdF00CgYEA3iZUBfvIaV+c6QZyEnX9DAG2QhuYS6FS -7/WEKvUVWtSsASZL6XzBs+OKINSMXvGl095SNhCTSATCTazIPkzLs0WAiY1z0+gH -LgDSW/vGMIM9FsfCYJ6jxNJuAXPl9b9tXbQlA97JJ7RALcKfNg6WFIEubo4IQcYT -40g1+hHHjCUCgYEAkMaAsBPnTIwxwRiOBjSfePCIflImu2ccQdmiU5bYqOdVFLaF -sNkQY/sB0RxpiUq2MRRS7U83sBhx5HUo47Z1NCVl/y4yYzOqH8vtRTDaFcSGVAPo -f/kv9UB8+JDUHcJ/caJ9XMukDmgZ2duvYz7RTj2vEglNIUHYg991hMdRXuECgYEA -2RbDjYVbd/4DrISr/PZobs9NeJTW4zGnhu96VTBjyfuCiy0NigJsmIqHdUXAVxf2 -YXElDchs5TghWR68IIWTmoEU91bQFgjs2zvaViFYsBfQHu7fOOROTg0Fi03jL5/+ -FE/yzDFuiepPvWgvhfgDGc6TvV33G6+hx73NYa72PjECgYAzNDqiUGcKkbPi9iCi -FcfoPlu9QrSYFo/+/1towWg+xD/AydzR9yFDtlh0D7XhU5hvM38BuNH3oWRXLCSj -Ul4gUBPQ9aLXUbP0qxPvaIGYofqLqFbzFgUNmYK5J8o3g394DDgwBwEr3HyUh5n5 -rriGBbcgH2n6JyCIogymNPowMA== +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDP+anHp2PcAacp +G70/Xc49/EbZj1n1zOi+rAyh/Hrws52uhLClhkQ6qzaaZlB/wOG84YZvBTtP6L+Z +HB9rysnGEIzxZstw4Wg52CRs+D3VPK+FYpAAMQsdiNQn9YuBt/DNYuDeYuStrY+g +FFmFR674Cj+zhexFHiBIi0wtQmv5Lx0B1SGnVRAkG38aEUYVtDDZgSKFFCHHKga7 +q6MemHiuvqvGvxRyEtPaSTA5OnKBxRT+tCPhQWGR8p4X1PwQRSTw1LpP6ZYpKtIM +dImDbEWIH6ysPCOxp5ir07uUAvoclgmjzwCy4f1LI0KyL3IgtHtcKPjx1T6P8bhf +SekAVRabAgMBAAECggEAZ3pxX0WKCg1IfsuufbjOxUigd987g3FQ3I3+OxrEkL7N +1edvfOwxRn+py2jc5LOznacYnGSd/kRwEF5Nw/RCMKpLdlEPKDr/AMjqVs2s9kwb +iOJhg7bjMO7DoJ4rq3858Pchq4TJAyE4NQ3q/MWN6RwwVcJsrHcw2Od2H47M6k4B +CoLtyuIzpN9vQtQydR3XwaMNAKBV/DjviSJrqZw3xt7qKHnRCVrn9d1liZm/L0mq +0B4hwhtfnUNn7AaV5AGj7Pd9oa6lUt8Jdc2jTE0vV6bTr/Yr1YwB9Frkh1pZhZ+a +1xWVQVofT6uoMokK1uGcU9zFYUQ6ipP+Dod9Ni6VAQKBgQDtjP+jU7J0BkDv2+Ve +fchvLBKvpOIYicnWxtyc1axlX28iZXxa77o9PTF9zLS6pkGf1xJtkg3gRusMTSTY +67GtHsvnnGug7xYaJvMVnEbleOGjEfdg4Cn4noYrzwtk6nfqJiUaPHjMn3hQtD2l +XqYqNAQmU4IGmwQm+IBUX1GSnwKBgQDgIKRQPmgV5/EDCmRDeCNthUaj1gqqzjdl +c4mF0EWAUWTB2RCHvcsDAwxtiGoDqDGaouns0t3Sv5M1+Q0Te9F1cdnCrR1ujrXI +kKkzYp7AfhxIu5q2qt2pF+EWaMaWM22Byynt0byXmL1ZQqUyQRQyFlxlmfqRh0dw +eJS7Fb3WhQKBgCnRZXtKE6MrKqlO65zrxaoUdyGmhArPztFZZqLRVqzt1PGYLgEQ +LJf1Os4riDFHbUwHH35cdMbjlSMwPWlWbIvOXRTjy2qkTB7Edox6W1ywIACF/Cd4 +rsrOlU4G/7l/MSbuGh/5P0a+V/7VC/kdpWDOkWBx2PTyuxNlMaoosYJ/AoGBALhZ +4Q2n51FtcI6Bs6rTXMCvCUWlvyxzZplqCUop36EGUgwokTI16JBXp5dIx3quk+6P +5j3HzHGW5m1Up27JP6aMuQMqBbV0aQ3J/4KmsdG/aWBh/4YU0TJO0/PsxruxHXfP +NnPJOoivN7/904echBIW1nUzzCh7z0UxyZSA859JAoGAXKXvh8s3n7o323yQN0R7 +ouyg1Qk8jYIKAwlUsaPXMBX711Cptx2/qtLRwQpos+JgqBD5xOg3LT/DOqYL+Ip/ +vMCn0KeX2crEstsJFqZ28YzAXoZBG3DIwH3a44wutL+xfYj4IdHWq1hLDqIg5tyo +6MClqNMZRz6UFl0iotzfHFg= -----END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/client_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/client_ocsp.pem.digest.sha1 index 0a34e970c45be..2771aa053d498 100644 --- a/jstests/libs/ocsp/client_ocsp.pem.digest.sha1 +++ b/jstests/libs/ocsp/client_ocsp.pem.digest.sha1 @@ -1 +1 @@ -22CB52597C604163DEC4C7CA6075857B65BC0A38 \ No newline at end of file +167A47262BB5612FA24D8199A6549BE003F39576 \ No newline at end of file diff --git a/jstests/libs/ocsp/client_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/client_ocsp.pem.digest.sha256 index 2647408b974ec..661252b28b4cb 100644 --- a/jstests/libs/ocsp/client_ocsp.pem.digest.sha256 +++ b/jstests/libs/ocsp/client_ocsp.pem.digest.sha256 @@ -1 +1 @@ -BF7E7B303E250F8043BCE5DF152A9CE69EA3C654DC2A3F6C6A08358EB62C37ED \ No newline at end of file +140FD8E16CDF21418853EB2BD04A052EEE2891E0CD147A0908D9974E1E70C459 \ No newline at end of file diff --git a/jstests/libs/ocsp/intermediate_ca_ocsp.crt b/jstests/libs/ocsp/intermediate_ca_ocsp.crt deleted file mode 100644 index a4193f87c9343..0000000000000 --- a/jstests/libs/ocsp/intermediate_ca_ocsp.crt +++ /dev/null @@ -1,22 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIEYLGF9TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV -UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO -BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjEwMzA5MTY0MDMxWhcNNDEwMzExMTY0MDMxWjB+MQswCQYD -VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp -dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY -SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEApP3UQTlZVYFjzvRREJbdqYBw1zF+NWayd+AFUqWzrW35TECxnmR0 -PEr+ILEucOfiPB/AwRoTCMF0IJk1y6l2ljxGs9vuGD/MdBtnxzJ3cVbzPTtVm5Q4 -kAmVJz7O+2cw70XGD3hruDMKGkAixRwLXp16ENl0jyJ6V44JBRfOQcZLG3geJgve -cbp1KwkTASaRcYv+93tr9z5s92a/2UVXRuSK/Rf1+x+U4+GRVJh4/k8i9nP/ieYg -92OGqhWr1ETdSv66SZ+sHd+4OftMbETqBdiTGj7GM+EszAEUTPYDabTvQlOBtdZH -NYOLHGMxKxdEj5EyzE4y8WO7yk4W+TZItwIDAQABozIwMDAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBRRg4ZhgrLm0lO4jGm+fmVnaczaPzANBgkqhkiG9w0BAQsF -AAOCAQEAZK3kybfwQ05q6BQevqkun9CKC3Vwhv6BLez5wOXW3gQ8BQdypKbSMEB8 -4RPEcy1zBU+6LPa8S+WE88EN85/0n/oS7N7MAgEpSWT8vflBm76nEfG4jG+l8h+Q -yIp0e5/ITq/MtRx9EiRk+vU6l3Mkvqq+2a3T7pKhvE4jOIOyFtg5jr+p2n46WEw4 -g3N/BzbZLpz147KO7tYrelhIAAcbmeVUKlQOjtcljeGbZimRgt8kzJWBVNAS6tEj -J8FTRLMX6HSTbVMx8tjq+MxF9hn1Ztc/3GIIuTvlGeTkLTS8atR4318YfMcZLlwm -pt3Zd7lPfbW/gmFewm7GB5TL9rDfbA== ------END CERTIFICATE----- diff --git a/jstests/libs/ocsp/intermediate_ca_ocsp.key b/jstests/libs/ocsp/intermediate_ca_ocsp.key deleted file mode 100644 index 8d2c2725ee846..0000000000000 --- a/jstests/libs/ocsp/intermediate_ca_ocsp.key +++ /dev/null @@ -1,28 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCk/dRBOVlVgWPO -9FEQlt2pgHDXMX41ZrJ34AVSpbOtbflMQLGeZHQ8Sv4gsS5w5+I8H8DBGhMIwXQg -mTXLqXaWPEaz2+4YP8x0G2fHMndxVvM9O1WblDiQCZUnPs77ZzDvRcYPeGu4Mwoa -QCLFHAtenXoQ2XSPInpXjgkFF85BxksbeB4mC95xunUrCRMBJpFxi/73e2v3Pmz3 -Zr/ZRVdG5Ir9F/X7H5Tj4ZFUmHj+TyL2c/+J5iD3Y4aqFavURN1K/rpJn6wd37g5 -+0xsROoF2JMaPsYz4SzMARRM9gNptO9CU4G11kc1g4scYzErF0SPkTLMTjLxY7vK -Thb5Nki3AgMBAAECggEAASkb7h2GKFjRp+oGC/TTuFaD9K+PcLa5OKilwPATdHva -jhPCbBfOzYHFidtVNUwcRkn+5BzX127s7zHEtBsMD4B7CtbYNOl1+bcbosYTGwP+ -kAaz0nVXdIPsvarub8xJBtXZz9AMCe6p+odK91H8Ln0zF50/+aXHcIg6PgPt2n6U -smChi15o1F6kdr+hwrqUpjW7NDN3Fs5lCH4dNw8I5PvpqPwl3IkwYG8e76A/9dJa -Fe1mzrUcmXi57JwSePE+Q7/ncIfXYB964AkTMLabylaPsB5EKP587jfpEfXXfyXn -Y+MLFCfP8dUXwu2nAr6vSWs3Ne4TGwWLLKGSP1UQuQKBgQDRBrQj75aN4hPulr9j -MTLIXxNRBOEkKXv11mvZFEV1ljyyw3qCivIndJBLNLRDsY+cr6yOYVwvfF5sx6Si -sF4N789yusRQr3iARJ67+fIJ04gOaIMW8iYzB9kr9eaLdpWSbbBkVG44aF28CiDb -dgeEFFjXYY5u4T+V+YJPLuDrLQKBgQDKEc6SXndtATpU8Gq5DWcUthPwEVQmVYsF -6EGWtU/fdVgTs1XmkFuRLy4VZcICK8+uGqN+bOMtr5gKJjEhAr2aDMqpm3keXdLz -Xlf/El2zzQ1Pj+Jm69odeCqGHwXGQTMOF5bqvIngWi1A5ijS/N3BiNLwtzlcKm+P -yJuJF+dh8wKBgQC7Nd7XpLlaIFcrxLZrl9/c2FKLqOwgoEsW9tGnHmHLnCCHF089 -ZkbWEa8+vFiLnJd8hVbuOsL/AMvtb63DzGSg5N0O67nybgZmE4972rPuGxfrl615 -Oq393JSkq9utoyr5d+aZJYmGWetCBGxDQuYeZL7hQM35/yIdJ9iPJPRrjQKBgCac -rndjm7h1kprmcc44lGjtvfOSrBzDHdScI+RTcxbFCnaBPznWfdjJRioKjr7xdjbT -mkgvMF3rfsb5s0uWhXppVVSBg+xci1G7xl7UOJmB5jg8y0tVaBFXg/Cq/uR6UvIv -acQjEMmREbKkCEsAzLMNnRkoOcq1xSmZcLcKnUknAoGBAJjGDcvr/RsrP/fehxxs -jqtxALg26wAUbfvgjrJxZ3sSmXvP++t8KcXHoAi9vbUXwHjdcq1GCiynmuJG/D4z -u7oBsQnducfSTULsmdMIjnBTy6cdcilfgfX+3h/eUEDzF2R0vx3ugmJMUW4+iMm8 -CVLNHOr0uNpdrz5tOf6SpRhd ------END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/intermediate_ca_ocsp.pem b/jstests/libs/ocsp/intermediate_ca_ocsp.pem deleted file mode 100644 index 5c77a6e454a79..0000000000000 --- a/jstests/libs/ocsp/intermediate_ca_ocsp.pem +++ /dev/null @@ -1,44 +0,0 @@ - ------BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIEYLGF9TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV -UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO -BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjEwMzA5MTY0MDMxWhcNNDEwMzExMTY0MDMxWjB+MQswCQYD -VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp -dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY -SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEApP3UQTlZVYFjzvRREJbdqYBw1zF+NWayd+AFUqWzrW35TECxnmR0 -PEr+ILEucOfiPB/AwRoTCMF0IJk1y6l2ljxGs9vuGD/MdBtnxzJ3cVbzPTtVm5Q4 -kAmVJz7O+2cw70XGD3hruDMKGkAixRwLXp16ENl0jyJ6V44JBRfOQcZLG3geJgve -cbp1KwkTASaRcYv+93tr9z5s92a/2UVXRuSK/Rf1+x+U4+GRVJh4/k8i9nP/ieYg -92OGqhWr1ETdSv66SZ+sHd+4OftMbETqBdiTGj7GM+EszAEUTPYDabTvQlOBtdZH -NYOLHGMxKxdEj5EyzE4y8WO7yk4W+TZItwIDAQABozIwMDAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBRRg4ZhgrLm0lO4jGm+fmVnaczaPzANBgkqhkiG9w0BAQsF -AAOCAQEAZK3kybfwQ05q6BQevqkun9CKC3Vwhv6BLez5wOXW3gQ8BQdypKbSMEB8 -4RPEcy1zBU+6LPa8S+WE88EN85/0n/oS7N7MAgEpSWT8vflBm76nEfG4jG+l8h+Q -yIp0e5/ITq/MtRx9EiRk+vU6l3Mkvqq+2a3T7pKhvE4jOIOyFtg5jr+p2n46WEw4 -g3N/BzbZLpz147KO7tYrelhIAAcbmeVUKlQOjtcljeGbZimRgt8kzJWBVNAS6tEj -J8FTRLMX6HSTbVMx8tjq+MxF9hn1Ztc/3GIIuTvlGeTkLTS8atR4318YfMcZLlwm -pt3Zd7lPfbW/gmFewm7GB5TL9rDfbA== ------END CERTIFICATE----- ------BEGIN CERTIFICATE----- -MIIDeTCCAmGgAwIBAgIEBdhiWzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV -UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO -BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjAwMzIzMjIxMzA5WhcNNDAwMzI1MjIxMzA5WjB0MQswCQYD -VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp -dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO -S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCg -H42hLFFnWFETIDs4Q3rjzJLB4mxqn7BiFDbhzivKGN8SMrIaoyg8CkNJWpJVYEBN -BjaQHMzivBiQEjDbx2bWz7+rMjont9zJbNmMMuEZcqQw42SBlQ/xXBnIbvICGoXy -7EkEH/kYzX7NjUhAHOJUdfyTW0okChPxOQr8CI07HVYmeelBZh6FPnzdQ5mgsbmk -vsdesE1gvcfFtm/7Q6+GXp+1GDVGRUmPmHTYPIkjouJWQM++WU2KofSe5k9Rn1Oz -ZE3jJAaB9gGA83/xcLkVLBe4dyE5foVbbXL7t37yB8R06/7ffV62B7sn0M5X/rfA -UY5sJ6WOWdQz8k+WjXlXAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI -hvcNAQELBQADggEBAAsY/vktUSwXC1MCC8cYtcrlI0EgGcvkcRxEjRv7t5YVZii6 -eqKSfaX5HDxKl8dH7Z95Z3sDqr7iwPFtzmzQHEwvSSKbiqeS9Be0yf6mJv10LC5d -M9qoMvbp90ob3Jhib5IGzeijcQFfzbZa+MGnWiCGX04U/hUrayMdmna83exKbeNW -S0LT1F82rG2QklFOFSZSInXsBiR4olRWqXrYpNjP4B5gueQ2+XUlMZdphvkOksCo -/UBdqKotBFgyYXdMygl4hscxo+O4FRpX6RKVyobJXKax+mzbc9YUKTFtKu6KlZls -jvqjtuXgmZvXOgduG5D8Sqoqp/q1nYzYpcgEss4= ------END CERTIFICATE----- diff --git a/jstests/libs/ocsp/intermediate_ca_only_ocsp.crt b/jstests/libs/ocsp/intermediate_ca_only_ocsp.crt index dd618b0452bdf..8646678566997 100644 --- a/jstests/libs/ocsp/intermediate_ca_only_ocsp.crt +++ b/jstests/libs/ocsp/intermediate_ca_only_ocsp.crt @@ -1,22 +1,22 @@ -----BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIEJWcflTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDojCCAoqgAwIBAgIEfCXJSjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB+MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjB+MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEA1GZlPg548GQidASCrRZDXfJm3Li+vIazhm4MGhv6bJ2o+SYHfu5j -PO51T5l0TGvuoJPnzwq+RlW5RW3/+fCMl0gt+awDDzjKCekn00w8KSk3rqGDYQk7 -cSVuoPJVyJJ/vo86bwVMB9TYyxPi1lEBeeyVaa2FvbzS+SZq2c9c087WVYRmj/81 -yGztUAwj2Zoru1ECuhHE8YMeyDqmi52XByUmI7Ywrzs4xc3pQ+AI62IsiUFbCNqw -EPHhEcCgEwaUlWihvUfO5lJSKeUyn0WlQ8MmnUTnCCMYDDJKPublsuu7Ngl6xmEM -H3u9iSbV6qWQwsC4PJcw5PO30yT5QkFe1wIDAQABozIwMDAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBRuXVvr6fP/Pyp2uqsHQ7KTvYGuNTANBgkqhkiG9w0BAQsF -AAOCAQEAaO2/QF2KG9LM1yKWe9ZbrvZa2dWgFHebY6HckDgE5rc5+nYug9zSEUi+ -QYKZfEAYbMZ2a3ymybsNsGpVPBYv6p2kaa6V9hrJb/awMI9dFg+o/U4v3dLQZmci -6cSI/c2T9mqJ4tVUPZE5tyVXZBZrQEUOJwcrcH19qWtS2n5Zk4BjkFAysC/eICMw -yD4kGI/djEkaABkCC7Xb75ySh5BJgC8ZZ/5gmKYiFZnV8d6ktXh7nPcqfsyuTuei -NBTSZpdTJ5WWNpybnK7/QUeedk1imNkTqK1TOJ4yjxIlhpGOX0f9q9InCrdYZb6P -SaRjfoPnX4zSLP5nv3QSDMcJjD/IrA== +MIIBCgKCAQEAnreW/G0+IaH5ly9cI00zwvHEs5jUc+eF8B/YWi7E/arCIRgOhnwv +c9pBDoVqmwx9cTtEI/KJW431V8LTBu40ToDBtrJCRUydF+dAnoxv5n1TlCF0t/52 +pyGM5Rf3K5nfJGjSpNPnw3yojpXKuAKJZdQxlPBtnsmCNk8+CtkTE3m/psLIGa/9 +Rq3Ctx0xfdJQiiajV8Kv08a3CDaWW2Tj4dWukUI6LFFfOKXe6Fvc1sfTPG7oH+GM +x/e7uBbCcP6J5368posgLddZifQjv9bQoGx0AB15cl9PzyI6PlaV4nww1HNwd7yY +UzWNVdOQ2LPkTERu0WwG7/lEE6L56p092wIDAQABozIwMDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBT0V8yyi6DeSqcaTzB1DWftYWVlgTANBgkqhkiG9w0BAQsF +AAOCAQEAyuXdfv9tkL2YgwzVFOSPkBTowGECU+7aEGtI45rRgkqw3x6HUl+zYcqa +REj0dLsFKoixkYSKS33Q7mg09jUXOcqs76hk0nf21ITe0c4M9NZzZPrN7PLmKRw5 +gYwsXHGYGL+3FmqTNN+L6kk/Gd/KcuwY2NH1701Qj0c6uP+Z54lP45DKKaJujouU +iIOGaLg2sU0r79baqzt7DvZVmlbqQZ8XTYykZo4O4E0hJ7WTNNwu+h1HoOanSfYE +EWKzo3qS7JkvZG1hoQhNk6np8+KQOWGGlom4j5HhFaSvlMk+M4NyqvJsO4hA5B2D +tEOIfINSAtpVNmi5I16idsVixhy34A== -----END CERTIFICATE----- diff --git a/jstests/libs/ocsp/intermediate_ca_only_ocsp.key b/jstests/libs/ocsp/intermediate_ca_only_ocsp.key index 31d77afb2d249..558d745a58cee 100644 --- a/jstests/libs/ocsp/intermediate_ca_only_ocsp.key +++ b/jstests/libs/ocsp/intermediate_ca_only_ocsp.key @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDUZmU+DnjwZCJ0 -BIKtFkNd8mbcuL68hrOGbgwaG/psnaj5Jgd+7mM87nVPmXRMa+6gk+fPCr5GVblF -bf/58IyXSC35rAMPOMoJ6SfTTDwpKTeuoYNhCTtxJW6g8lXIkn++jzpvBUwH1NjL -E+LWUQF57JVprYW9vNL5JmrZz1zTztZVhGaP/zXIbO1QDCPZmiu7UQK6EcTxgx7I -OqaLnZcHJSYjtjCvOzjFzelD4AjrYiyJQVsI2rAQ8eERwKATBpSVaKG9R87mUlIp -5TKfRaVDwyadROcIIxgMMko+5uWy67s2CXrGYQwfe72JJtXqpZDCwLg8lzDk87fT -JPlCQV7XAgMBAAECggEAS9I9w/hgndfxIJ9XkrhG1iupIpPkquIfBhcUxOFF9S4W -2tIDjQFGdcLeJ8ss4/cHmVUTRCqen/cMC0foP+3qEjsXBnCsKuvn27/akqg9Nahc -Ez/e6W3lRU/KfTPlAZOifIEep/EpIgaOWXhA2qbSMxcMg0wJWSEl4wEe1aAbkBQ4 -lZ+i/sj/vhceaDDBHTrsC3TcCm41eREHBFnRbr/ga3REAmKah0s48aeVp6cgDbaB -4VLoT2KGhXlHFTQsUysM6W7xm35RRarbtiJfqH83ZQTepWNsrJ6du/eY/UfKnsfH -fP/ppGtrktENRsmAesEjz+EwQPpsZLayxFdGZ6O2AQKBgQDtrC9CsfznaWdPtLey -H02JXS+roPJWXRL6ND1u9qA2rg+/12XsqgIofz7sFU7Xm3334wVqwNIaLqm7tIxM -8R3LzwPz5SJnNTWbILmckolZ+98r1AHlfwM/6zhj9krWE2vYlu16vI4wpP6taxb8 -c2wscUcMcTjPhfQGbF78VlW7AQKBgQDkx1AIvHLKE0S6Apexjjj3sXgsroo+Kssp -aRinANuNHDPyLhVoKiVJsTs1soZ44Ls1YmSVRPMVoHQxxIKDnW9oui7aOOwoChhg -qqt5N7gVF0fxcD5gaut9zY7LdDT70mC6nj2TYCjMxObVDTDkl2kHg77Ct5aQ6i+/ -SX2jjhZR1wKBgQCCTiO/IkxxJ3XP2lnYW5csF54aL+yNcwwDh36jBq5CLF5QJK0p -+u+h+lmqH+iRBxwiOyJuK/dFCY5fbhZ53LwkUIRvRJH/GcohGekJscGCRzhNFDhR -9bf3ZGQPUiongpBfOChNYXFntB45P/xjPnjxSM3WQYEPLfbjSqktGbx5AQKBgQCN -wzvCzhNLSHlT0ftxmLGQOrY+6cDcGORfOrJ9+bgSTqGZthipvUPx0BLiiUdua8NV -pyywmlMO2ahmlmBRciLxAD28F32uqeLFM7yhlndJCm4YG/drA5X9FH0hcVbLnGc3 -/IonYnSlvnVTG6NqYrBAadCxE9YG4jbID5/80zTdbQKBgB/30lNgWCXNKs/RVsMU -npTVDESCwaU19wTlbOeVS3vCFDapJmuv7K7uhi1b67Pt5wnaw3drKNddIOS53XZp -yfsYjZWml8z46qyA5kK4scQ1LkYPnj4Zf+q9dF9i1mYGT2b65v/dzfrLr7//hcBZ -23zayhJpJWDJqbUWliaDmY98 +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCet5b8bT4hofmX +L1wjTTPC8cSzmNRz54XwH9haLsT9qsIhGA6GfC9z2kEOhWqbDH1xO0Qj8olbjfVX +wtMG7jROgMG2skJFTJ0X50CejG/mfVOUIXS3/nanIYzlF/crmd8kaNKk0+fDfKiO +lcq4Aoll1DGU8G2eyYI2Tz4K2RMTeb+mwsgZr/1GrcK3HTF90lCKJqNXwq/TxrcI +NpZbZOPh1a6RQjosUV84pd7oW9zWx9M8bugf4YzH97u4FsJw/onnfrymiyAt11mJ +9CO/1tCgbHQAHXlyX0/PIjo+VpXifDDUc3B3vJhTNY1V05DYs+RMRG7RbAbv+UQT +ovnqnT3bAgMBAAECggEBAIql0g2c2KPEvSXmx1RHQqpPTQeewCin3YcQKo1NQeRe +YMtMGpQ8s68+v4oR3jinxoU9wp6sZnVGI0aQ+IubVrIlJBOW0PF8pdaVuwtFnxL3 +9CT4vN9mOPxzAIO4VcEO37EMqMY4HsPUh+JW1Am+nYwIu29iQEBvw4Hx4z7StilR +13pCl+GrZ+c/+64SRKDt5vlnPRElm30410nCRR37TvdRqmWqfrcph7mZ7RRQTtE6 +UPAU100eR+RT/U7lLb3xzxFOtUZ1dZXCMobM/lzDHXpKb7zrBwcg4ugo8GhOkjlN +UpV+b/pvHNsViqpk3Rp0ZznIl2j/iLYtO+eZ1T45qnECgYEAy61VFS0xcGHLHgQE +YQYrA6res/RTHWnstnZDW9KJ6yPFmKS8/Dh4WS30ORP0bNuCsn0UUPOp4C/njrPF +JjcpohyqFZK/0OmW+oomEJWbH1Rdv9NaViVfjDcydeV1YI+bGKT2LCTjVOvk7oT+ +Qwvndzk5ToYBCBZNTs0MqjboOCUCgYEAx32AeFp48eMWlyZ93qVgvRYFhUQtHKd8 +ofKzMPJCJT27zAUg1arDN04S5LepyoioTr/rFIMlxjawl5t3VsJA8/oYvmn3xnKW +WZxnMhd9kTz/VtZU2acOzCXCUKAJYQDITemshC7vzayK7m2+i65nF3ygySmbun1Q +sStWmtxSvf8CgYA6gTKEX7PozR50I8FCQWxPpGBOHqtVMpz9GGFm2cYmFeNnYkKq +zGm9bBbP4mx+bFtRe94/Fo7AR+Jiuj6yKHw8BbHRepLApqholRA0CVVCnZBETqiP +QDL+6sgC14Ns1jM90/ymV345YX0kCm2iwqOdOoG8jX90XDuenAW8SwNPRQKBgCar +jVvHLeq1uotSStP9+uKsgiVzMct8LAy4n0O9slTFwsGpvNOuZmkyGTd84sz/8KBT +U56GTaw+rby1xkxrG2wv7tv6bq1pSPvV9asgIpck29pZcdk/KP8p0qEOZMMJA8sW +oPp+nr2f1M6jN2ycQh+raP+VsTpVUG8Hl6ItU1zBAoGActnprdCobGUplY/Q1YY0 +M14xXwF+lVoILrvU0Tu7ihxS4cNLRqxn+T0IR/zIiqnzsna1SqTfvanZg3sRQUCz +gKwWLF96uXCwdBMaXGXS9gDHWdbNjmaPCcpjeKvA/mleEMoSbpKMriPd1ekUfTu1 +KoMtXj7ywkdsXmOibpWE1dY= -----END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem b/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem index 98632f07b8e91..48e4edef18fc3 100644 --- a/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem +++ b/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem @@ -1,50 +1,50 @@ -----BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIEJWcflTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDojCCAoqgAwIBAgIEfCXJSjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB+MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjB+MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEA1GZlPg548GQidASCrRZDXfJm3Li+vIazhm4MGhv6bJ2o+SYHfu5j -PO51T5l0TGvuoJPnzwq+RlW5RW3/+fCMl0gt+awDDzjKCekn00w8KSk3rqGDYQk7 -cSVuoPJVyJJ/vo86bwVMB9TYyxPi1lEBeeyVaa2FvbzS+SZq2c9c087WVYRmj/81 -yGztUAwj2Zoru1ECuhHE8YMeyDqmi52XByUmI7Ywrzs4xc3pQ+AI62IsiUFbCNqw -EPHhEcCgEwaUlWihvUfO5lJSKeUyn0WlQ8MmnUTnCCMYDDJKPublsuu7Ngl6xmEM -H3u9iSbV6qWQwsC4PJcw5PO30yT5QkFe1wIDAQABozIwMDAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBRuXVvr6fP/Pyp2uqsHQ7KTvYGuNTANBgkqhkiG9w0BAQsF -AAOCAQEAaO2/QF2KG9LM1yKWe9ZbrvZa2dWgFHebY6HckDgE5rc5+nYug9zSEUi+ -QYKZfEAYbMZ2a3ymybsNsGpVPBYv6p2kaa6V9hrJb/awMI9dFg+o/U4v3dLQZmci -6cSI/c2T9mqJ4tVUPZE5tyVXZBZrQEUOJwcrcH19qWtS2n5Zk4BjkFAysC/eICMw -yD4kGI/djEkaABkCC7Xb75ySh5BJgC8ZZ/5gmKYiFZnV8d6ktXh7nPcqfsyuTuei -NBTSZpdTJ5WWNpybnK7/QUeedk1imNkTqK1TOJ4yjxIlhpGOX0f9q9InCrdYZb6P -SaRjfoPnX4zSLP5nv3QSDMcJjD/IrA== +MIIBCgKCAQEAnreW/G0+IaH5ly9cI00zwvHEs5jUc+eF8B/YWi7E/arCIRgOhnwv +c9pBDoVqmwx9cTtEI/KJW431V8LTBu40ToDBtrJCRUydF+dAnoxv5n1TlCF0t/52 +pyGM5Rf3K5nfJGjSpNPnw3yojpXKuAKJZdQxlPBtnsmCNk8+CtkTE3m/psLIGa/9 +Rq3Ctx0xfdJQiiajV8Kv08a3CDaWW2Tj4dWukUI6LFFfOKXe6Fvc1sfTPG7oH+GM +x/e7uBbCcP6J5368posgLddZifQjv9bQoGx0AB15cl9PzyI6PlaV4nww1HNwd7yY +UzWNVdOQ2LPkTERu0WwG7/lEE6L56p092wIDAQABozIwMDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBT0V8yyi6DeSqcaTzB1DWftYWVlgTANBgkqhkiG9w0BAQsF +AAOCAQEAyuXdfv9tkL2YgwzVFOSPkBTowGECU+7aEGtI45rRgkqw3x6HUl+zYcqa +REj0dLsFKoixkYSKS33Q7mg09jUXOcqs76hk0nf21ITe0c4M9NZzZPrN7PLmKRw5 +gYwsXHGYGL+3FmqTNN+L6kk/Gd/KcuwY2NH1701Qj0c6uP+Z54lP45DKKaJujouU +iIOGaLg2sU0r79baqzt7DvZVmlbqQZ8XTYykZo4O4E0hJ7WTNNwu+h1HoOanSfYE +EWKzo3qS7JkvZG1hoQhNk6np8+KQOWGGlom4j5HhFaSvlMk+M4NyqvJsO4hA5B2D +tEOIfINSAtpVNmi5I16idsVixhy34A== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDUZmU+DnjwZCJ0 -BIKtFkNd8mbcuL68hrOGbgwaG/psnaj5Jgd+7mM87nVPmXRMa+6gk+fPCr5GVblF -bf/58IyXSC35rAMPOMoJ6SfTTDwpKTeuoYNhCTtxJW6g8lXIkn++jzpvBUwH1NjL -E+LWUQF57JVprYW9vNL5JmrZz1zTztZVhGaP/zXIbO1QDCPZmiu7UQK6EcTxgx7I -OqaLnZcHJSYjtjCvOzjFzelD4AjrYiyJQVsI2rAQ8eERwKATBpSVaKG9R87mUlIp -5TKfRaVDwyadROcIIxgMMko+5uWy67s2CXrGYQwfe72JJtXqpZDCwLg8lzDk87fT -JPlCQV7XAgMBAAECggEAS9I9w/hgndfxIJ9XkrhG1iupIpPkquIfBhcUxOFF9S4W -2tIDjQFGdcLeJ8ss4/cHmVUTRCqen/cMC0foP+3qEjsXBnCsKuvn27/akqg9Nahc -Ez/e6W3lRU/KfTPlAZOifIEep/EpIgaOWXhA2qbSMxcMg0wJWSEl4wEe1aAbkBQ4 -lZ+i/sj/vhceaDDBHTrsC3TcCm41eREHBFnRbr/ga3REAmKah0s48aeVp6cgDbaB -4VLoT2KGhXlHFTQsUysM6W7xm35RRarbtiJfqH83ZQTepWNsrJ6du/eY/UfKnsfH -fP/ppGtrktENRsmAesEjz+EwQPpsZLayxFdGZ6O2AQKBgQDtrC9CsfznaWdPtLey -H02JXS+roPJWXRL6ND1u9qA2rg+/12XsqgIofz7sFU7Xm3334wVqwNIaLqm7tIxM -8R3LzwPz5SJnNTWbILmckolZ+98r1AHlfwM/6zhj9krWE2vYlu16vI4wpP6taxb8 -c2wscUcMcTjPhfQGbF78VlW7AQKBgQDkx1AIvHLKE0S6Apexjjj3sXgsroo+Kssp -aRinANuNHDPyLhVoKiVJsTs1soZ44Ls1YmSVRPMVoHQxxIKDnW9oui7aOOwoChhg -qqt5N7gVF0fxcD5gaut9zY7LdDT70mC6nj2TYCjMxObVDTDkl2kHg77Ct5aQ6i+/ -SX2jjhZR1wKBgQCCTiO/IkxxJ3XP2lnYW5csF54aL+yNcwwDh36jBq5CLF5QJK0p -+u+h+lmqH+iRBxwiOyJuK/dFCY5fbhZ53LwkUIRvRJH/GcohGekJscGCRzhNFDhR -9bf3ZGQPUiongpBfOChNYXFntB45P/xjPnjxSM3WQYEPLfbjSqktGbx5AQKBgQCN -wzvCzhNLSHlT0ftxmLGQOrY+6cDcGORfOrJ9+bgSTqGZthipvUPx0BLiiUdua8NV -pyywmlMO2ahmlmBRciLxAD28F32uqeLFM7yhlndJCm4YG/drA5X9FH0hcVbLnGc3 -/IonYnSlvnVTG6NqYrBAadCxE9YG4jbID5/80zTdbQKBgB/30lNgWCXNKs/RVsMU -npTVDESCwaU19wTlbOeVS3vCFDapJmuv7K7uhi1b67Pt5wnaw3drKNddIOS53XZp -yfsYjZWml8z46qyA5kK4scQ1LkYPnj4Zf+q9dF9i1mYGT2b65v/dzfrLr7//hcBZ -23zayhJpJWDJqbUWliaDmY98 +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCet5b8bT4hofmX +L1wjTTPC8cSzmNRz54XwH9haLsT9qsIhGA6GfC9z2kEOhWqbDH1xO0Qj8olbjfVX +wtMG7jROgMG2skJFTJ0X50CejG/mfVOUIXS3/nanIYzlF/crmd8kaNKk0+fDfKiO +lcq4Aoll1DGU8G2eyYI2Tz4K2RMTeb+mwsgZr/1GrcK3HTF90lCKJqNXwq/TxrcI +NpZbZOPh1a6RQjosUV84pd7oW9zWx9M8bugf4YzH97u4FsJw/onnfrymiyAt11mJ +9CO/1tCgbHQAHXlyX0/PIjo+VpXifDDUc3B3vJhTNY1V05DYs+RMRG7RbAbv+UQT +ovnqnT3bAgMBAAECggEBAIql0g2c2KPEvSXmx1RHQqpPTQeewCin3YcQKo1NQeRe +YMtMGpQ8s68+v4oR3jinxoU9wp6sZnVGI0aQ+IubVrIlJBOW0PF8pdaVuwtFnxL3 +9CT4vN9mOPxzAIO4VcEO37EMqMY4HsPUh+JW1Am+nYwIu29iQEBvw4Hx4z7StilR +13pCl+GrZ+c/+64SRKDt5vlnPRElm30410nCRR37TvdRqmWqfrcph7mZ7RRQTtE6 +UPAU100eR+RT/U7lLb3xzxFOtUZ1dZXCMobM/lzDHXpKb7zrBwcg4ugo8GhOkjlN +UpV+b/pvHNsViqpk3Rp0ZznIl2j/iLYtO+eZ1T45qnECgYEAy61VFS0xcGHLHgQE +YQYrA6res/RTHWnstnZDW9KJ6yPFmKS8/Dh4WS30ORP0bNuCsn0UUPOp4C/njrPF +JjcpohyqFZK/0OmW+oomEJWbH1Rdv9NaViVfjDcydeV1YI+bGKT2LCTjVOvk7oT+ +Qwvndzk5ToYBCBZNTs0MqjboOCUCgYEAx32AeFp48eMWlyZ93qVgvRYFhUQtHKd8 +ofKzMPJCJT27zAUg1arDN04S5LepyoioTr/rFIMlxjawl5t3VsJA8/oYvmn3xnKW +WZxnMhd9kTz/VtZU2acOzCXCUKAJYQDITemshC7vzayK7m2+i65nF3ygySmbun1Q +sStWmtxSvf8CgYA6gTKEX7PozR50I8FCQWxPpGBOHqtVMpz9GGFm2cYmFeNnYkKq +zGm9bBbP4mx+bFtRe94/Fo7AR+Jiuj6yKHw8BbHRepLApqholRA0CVVCnZBETqiP +QDL+6sgC14Ns1jM90/ymV345YX0kCm2iwqOdOoG8jX90XDuenAW8SwNPRQKBgCar +jVvHLeq1uotSStP9+uKsgiVzMct8LAy4n0O9slTFwsGpvNOuZmkyGTd84sz/8KBT +U56GTaw+rby1xkxrG2wv7tv6bq1pSPvV9asgIpck29pZcdk/KP8p0qEOZMMJA8sW +oPp+nr2f1M6jN2ycQh+raP+VsTpVUG8Hl6ItU1zBAoGActnprdCobGUplY/Q1YY0 +M14xXwF+lVoILrvU0Tu7ihxS4cNLRqxn+T0IR/zIiqnzsna1SqTfvanZg3sRQUCz +gKwWLF96uXCwdBMaXGXS9gDHWdbNjmaPCcpjeKvA/mleEMoSbpKMriPd1ekUfTu1 +KoMtXj7ywkdsXmOibpWE1dY= -----END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha1 index 7f99d4fec9534..6c794f664f171 100644 --- a/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha1 +++ b/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha1 @@ -1 +1 @@ -D8A8CAD72FF7EC2620AD14DC5E405AAD8D590A33 \ No newline at end of file +C741148C2729C25172A40E45FF37C842DF99BA28 \ No newline at end of file diff --git a/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha256 index 58154edba3fb6..880fda0c016d8 100644 --- a/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha256 +++ b/jstests/libs/ocsp/intermediate_ca_only_ocsp.pem.digest.sha256 @@ -1 +1 @@ -216F69DCDF851F2A2FB2FF02E6979B74BFA1A6BE1506B7F9510397CF69CE5CDB \ No newline at end of file +9617E9454EB8B95594D2697E84753192FE832D88663FDBD9E436CF583D38F52F \ No newline at end of file diff --git a/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem b/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem index 676e23c72157a..53bffa3c40199 100644 --- a/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem +++ b/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem @@ -1,44 +1,44 @@ -----BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIEJWcflTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDojCCAoqgAwIBAgIEfCXJSjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB+MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjB+MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEA1GZlPg548GQidASCrRZDXfJm3Li+vIazhm4MGhv6bJ2o+SYHfu5j -PO51T5l0TGvuoJPnzwq+RlW5RW3/+fCMl0gt+awDDzjKCekn00w8KSk3rqGDYQk7 -cSVuoPJVyJJ/vo86bwVMB9TYyxPi1lEBeeyVaa2FvbzS+SZq2c9c087WVYRmj/81 -yGztUAwj2Zoru1ECuhHE8YMeyDqmi52XByUmI7Ywrzs4xc3pQ+AI62IsiUFbCNqw -EPHhEcCgEwaUlWihvUfO5lJSKeUyn0WlQ8MmnUTnCCMYDDJKPublsuu7Ngl6xmEM -H3u9iSbV6qWQwsC4PJcw5PO30yT5QkFe1wIDAQABozIwMDAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBRuXVvr6fP/Pyp2uqsHQ7KTvYGuNTANBgkqhkiG9w0BAQsF -AAOCAQEAaO2/QF2KG9LM1yKWe9ZbrvZa2dWgFHebY6HckDgE5rc5+nYug9zSEUi+ -QYKZfEAYbMZ2a3ymybsNsGpVPBYv6p2kaa6V9hrJb/awMI9dFg+o/U4v3dLQZmci -6cSI/c2T9mqJ4tVUPZE5tyVXZBZrQEUOJwcrcH19qWtS2n5Zk4BjkFAysC/eICMw -yD4kGI/djEkaABkCC7Xb75ySh5BJgC8ZZ/5gmKYiFZnV8d6ktXh7nPcqfsyuTuei -NBTSZpdTJ5WWNpybnK7/QUeedk1imNkTqK1TOJ4yjxIlhpGOX0f9q9InCrdYZb6P -SaRjfoPnX4zSLP5nv3QSDMcJjD/IrA== +MIIBCgKCAQEAnreW/G0+IaH5ly9cI00zwvHEs5jUc+eF8B/YWi7E/arCIRgOhnwv +c9pBDoVqmwx9cTtEI/KJW431V8LTBu40ToDBtrJCRUydF+dAnoxv5n1TlCF0t/52 +pyGM5Rf3K5nfJGjSpNPnw3yojpXKuAKJZdQxlPBtnsmCNk8+CtkTE3m/psLIGa/9 +Rq3Ctx0xfdJQiiajV8Kv08a3CDaWW2Tj4dWukUI6LFFfOKXe6Fvc1sfTPG7oH+GM +x/e7uBbCcP6J5368posgLddZifQjv9bQoGx0AB15cl9PzyI6PlaV4nww1HNwd7yY +UzWNVdOQ2LPkTERu0WwG7/lEE6L56p092wIDAQABozIwMDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBT0V8yyi6DeSqcaTzB1DWftYWVlgTANBgkqhkiG9w0BAQsF +AAOCAQEAyuXdfv9tkL2YgwzVFOSPkBTowGECU+7aEGtI45rRgkqw3x6HUl+zYcqa +REj0dLsFKoixkYSKS33Q7mg09jUXOcqs76hk0nf21ITe0c4M9NZzZPrN7PLmKRw5 +gYwsXHGYGL+3FmqTNN+L6kk/Gd/KcuwY2NH1701Qj0c6uP+Z54lP45DKKaJujouU +iIOGaLg2sU0r79baqzt7DvZVmlbqQZ8XTYykZo4O4E0hJ7WTNNwu+h1HoOanSfYE +EWKzo3qS7JkvZG1hoQhNk6np8+KQOWGGlom4j5HhFaSvlMk+M4NyqvJsO4hA5B2D +tEOIfINSAtpVNmi5I16idsVixhy34A== -----END CERTIFICATE----- -----BEGIN CERTIFICATE----- -MIIDeTCCAmGgAwIBAgIEaP5C8TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDeTCCAmGgAwIBAgIEUlBUJTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjB0MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ0WhcNMjUwOTEwMTQyODQ0WjB0MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO -S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDS -ASa6t/0P3MHRRhLP2W25y2NJ9eBl6YMJj3oY058ixhHOhPLvdnWPcLHOh36BiNu8 -5+lX8w27vAz84qzloYQ44YJO+uvU9WBEoMa707IWTre3PSoYfe4y49fm9AwYkG2R -qn7674TGn5eNZnSuBVzab7Fy5+zBDAhgCcB+z0MYP3COEyrmzkKY2rPTPu7K7o3r -5FFGfxgceDzN6lukG12h75F+R64o8lvVBkHV5+1mwlx8SlthPH1HJB6/ZjX1m4gw -yRlU9JfR00iphro4FYI1SgGKeSP7Fn1E0VRB/YwRAoZFguq62vlMImzh+2folFF4 -1XzEO+BKjUiRfYbRTpWRAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI -hvcNAQELBQADggEBAKRiAji29p93B9cA/9gjE5Zlb39LRtYA5+RBC0WTn8u5ku// -XGpz3P8aDpO0BFAd/gNiyGRWQ7blHj0Mn02QHqW7T/yyyE0m/fASdXIoJApFDEtK -n1mUcmVdiiIlQvXo/oi1RTwxckQNqFYm2M/XSrg0HLFVHsTQrdDskbs0SRQZK3qv -5EluYHu7UvKAAmzepZhtyC1VNrZbP97cJ03ZupUyMo6NHLk0WxFHvYM3U7K/W7ui -YuFWenPWmESFGHR923lF6HjzvkhHYXKPaYhTnKZJ1kjwhYfcdfSmbizJBBlryzEL -+0KtahO1J5NBwYyVgo+Pkv5OEWwunzrpsY+TLR4= +S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDj +KEjkptHYsXzYYRGitba+fkBEhsuRVoAVphzopMVKven8XMHde8b0xJmqnqPYCPiN +shU62bcM+fWENDjpuavcqvzC2nYK118mbrRkko2AurqCPKiaj+o16aogsPS+lBS5 +WnK4C369pesQsZz+SO/s/LwW8iAsOAaayoFvJaK5RCaU+v1C7M6gFDyeQFLCp7UR +H4IZmWqR7A35mC76bdswyleTpN5gqAVjR2E3k/U1Azp42vUqNiNTNBzLLe9Gllbp +UwAqUZkf1o10Ew+K4wsU6L/rGK/QbaNObJ1NEg0UnVQGjYCJiemKMIVt5OsB7aj9 +LWIzHFLF2xynv9NwOZ9fAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBADidce5kTcwIUD+CHiJ04YFZEpxSyMCk2R7baZuZS6igngHH +z05oz4rnvavuaudFwvgj5f630pbel140DdWH/p7nsEsW6QEr9tThWDeabRiRcpq0 +ELd9kvYOGLlc0TZBAuFFLu60fe/NiGlWvALszIY166Bq+Y3xoHiLy5SJ56+O2REX +AKnzx2IIiDkz+GPHcQnjaTIo8t26I4ArHjekh1DQ0soUtzav+IBZUgb6H0Q12NH0 +4GVTZv5fLAMufqzCzhzmbU28p45Hrj8ZFErim2ES2l/akOHJUbNrbMTz63G8ZbJk +Inl2kjX/FBwuQ2d1idIJPU4B8djmQrJrb7urcog= -----END CERTIFICATE----- diff --git a/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha1 index 7f99d4fec9534..6c794f664f171 100644 --- a/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha1 +++ b/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha1 @@ -1 +1 @@ -D8A8CAD72FF7EC2620AD14DC5E405AAD8D590A33 \ No newline at end of file +C741148C2729C25172A40E45FF37C842DF99BA28 \ No newline at end of file diff --git a/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha256 index 58154edba3fb6..880fda0c016d8 100644 --- a/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha256 +++ b/jstests/libs/ocsp/intermediate_ca_with_root_ocsp.pem.digest.sha256 @@ -1 +1 @@ -216F69DCDF851F2A2FB2FF02E6979B74BFA1A6BE1506B7F9510397CF69CE5CDB \ No newline at end of file +9617E9454EB8B95594D2697E84753192FE832D88663FDBD9E436CF583D38F52F \ No newline at end of file diff --git a/jstests/libs/ocsp/intermediate_only_ca_ocsp.pem b/jstests/libs/ocsp/intermediate_only_ca_ocsp.pem deleted file mode 100644 index 6baeb77224224..0000000000000 --- a/jstests/libs/ocsp/intermediate_only_ca_ocsp.pem +++ /dev/null @@ -1,50 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIEYLGF9TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV -UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO -BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjEwMzA5MTY0MDMxWhcNNDEwMzExMTY0MDMxWjB+MQswCQYD -VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp -dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY -SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEApP3UQTlZVYFjzvRREJbdqYBw1zF+NWayd+AFUqWzrW35TECxnmR0 -PEr+ILEucOfiPB/AwRoTCMF0IJk1y6l2ljxGs9vuGD/MdBtnxzJ3cVbzPTtVm5Q4 -kAmVJz7O+2cw70XGD3hruDMKGkAixRwLXp16ENl0jyJ6V44JBRfOQcZLG3geJgve -cbp1KwkTASaRcYv+93tr9z5s92a/2UVXRuSK/Rf1+x+U4+GRVJh4/k8i9nP/ieYg -92OGqhWr1ETdSv66SZ+sHd+4OftMbETqBdiTGj7GM+EszAEUTPYDabTvQlOBtdZH -NYOLHGMxKxdEj5EyzE4y8WO7yk4W+TZItwIDAQABozIwMDAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBRRg4ZhgrLm0lO4jGm+fmVnaczaPzANBgkqhkiG9w0BAQsF -AAOCAQEAZK3kybfwQ05q6BQevqkun9CKC3Vwhv6BLez5wOXW3gQ8BQdypKbSMEB8 -4RPEcy1zBU+6LPa8S+WE88EN85/0n/oS7N7MAgEpSWT8vflBm76nEfG4jG+l8h+Q -yIp0e5/ITq/MtRx9EiRk+vU6l3Mkvqq+2a3T7pKhvE4jOIOyFtg5jr+p2n46WEw4 -g3N/BzbZLpz147KO7tYrelhIAAcbmeVUKlQOjtcljeGbZimRgt8kzJWBVNAS6tEj -J8FTRLMX6HSTbVMx8tjq+MxF9hn1Ztc/3GIIuTvlGeTkLTS8atR4318YfMcZLlwm -pt3Zd7lPfbW/gmFewm7GB5TL9rDfbA== ------END CERTIFICATE----- ------BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCk/dRBOVlVgWPO -9FEQlt2pgHDXMX41ZrJ34AVSpbOtbflMQLGeZHQ8Sv4gsS5w5+I8H8DBGhMIwXQg -mTXLqXaWPEaz2+4YP8x0G2fHMndxVvM9O1WblDiQCZUnPs77ZzDvRcYPeGu4Mwoa -QCLFHAtenXoQ2XSPInpXjgkFF85BxksbeB4mC95xunUrCRMBJpFxi/73e2v3Pmz3 -Zr/ZRVdG5Ir9F/X7H5Tj4ZFUmHj+TyL2c/+J5iD3Y4aqFavURN1K/rpJn6wd37g5 -+0xsROoF2JMaPsYz4SzMARRM9gNptO9CU4G11kc1g4scYzErF0SPkTLMTjLxY7vK -Thb5Nki3AgMBAAECggEAASkb7h2GKFjRp+oGC/TTuFaD9K+PcLa5OKilwPATdHva -jhPCbBfOzYHFidtVNUwcRkn+5BzX127s7zHEtBsMD4B7CtbYNOl1+bcbosYTGwP+ -kAaz0nVXdIPsvarub8xJBtXZz9AMCe6p+odK91H8Ln0zF50/+aXHcIg6PgPt2n6U -smChi15o1F6kdr+hwrqUpjW7NDN3Fs5lCH4dNw8I5PvpqPwl3IkwYG8e76A/9dJa -Fe1mzrUcmXi57JwSePE+Q7/ncIfXYB964AkTMLabylaPsB5EKP587jfpEfXXfyXn -Y+MLFCfP8dUXwu2nAr6vSWs3Ne4TGwWLLKGSP1UQuQKBgQDRBrQj75aN4hPulr9j -MTLIXxNRBOEkKXv11mvZFEV1ljyyw3qCivIndJBLNLRDsY+cr6yOYVwvfF5sx6Si -sF4N789yusRQr3iARJ67+fIJ04gOaIMW8iYzB9kr9eaLdpWSbbBkVG44aF28CiDb -dgeEFFjXYY5u4T+V+YJPLuDrLQKBgQDKEc6SXndtATpU8Gq5DWcUthPwEVQmVYsF -6EGWtU/fdVgTs1XmkFuRLy4VZcICK8+uGqN+bOMtr5gKJjEhAr2aDMqpm3keXdLz -Xlf/El2zzQ1Pj+Jm69odeCqGHwXGQTMOF5bqvIngWi1A5ijS/N3BiNLwtzlcKm+P -yJuJF+dh8wKBgQC7Nd7XpLlaIFcrxLZrl9/c2FKLqOwgoEsW9tGnHmHLnCCHF089 -ZkbWEa8+vFiLnJd8hVbuOsL/AMvtb63DzGSg5N0O67nybgZmE4972rPuGxfrl615 -Oq393JSkq9utoyr5d+aZJYmGWetCBGxDQuYeZL7hQM35/yIdJ9iPJPRrjQKBgCac -rndjm7h1kprmcc44lGjtvfOSrBzDHdScI+RTcxbFCnaBPznWfdjJRioKjr7xdjbT -mkgvMF3rfsb5s0uWhXppVVSBg+xci1G7xl7UOJmB5jg8y0tVaBFXg/Cq/uR6UvIv -acQjEMmREbKkCEsAzLMNnRkoOcq1xSmZcLcKnUknAoGBAJjGDcvr/RsrP/fehxxs -jqtxALg26wAUbfvgjrJxZ3sSmXvP++t8KcXHoAi9vbUXwHjdcq1GCiynmuJG/D4z -u7oBsQnducfSTULsmdMIjnBTy6cdcilfgfX+3h/eUEDzF2R0vx3ugmJMUW4+iMm8 -CVLNHOr0uNpdrz5tOf6SpRhd ------END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/ocsp_responder.crt b/jstests/libs/ocsp/ocsp_responder.crt index cdc1d6414ee74..dd723c0983b94 100644 --- a/jstests/libs/ocsp/ocsp_responder.crt +++ b/jstests/libs/ocsp/ocsp_responder.crt @@ -1,21 +1,21 @@ -----BEGIN CERTIFICATE----- -MIIDgzCCAmugAwIBAgIEG9rCUjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDgzCCAmugAwIBAgIEHKwtmTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjBiMRAwDgYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ2WhcNMjUwOTEwMTQyODQ2WjBiMRAwDgYD VQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMMCWxvY2FsaG9z dDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMQ8wDQYDVQQHDAZPQ1NQLTMwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNTTbCpwZg6MARSlau3FXBzmM4 -+6SqXPf3xQ9L2hpulCFWWDIfvWpK3zVKbSVIU6L9lo8Cdrne4ZRE486BXo1iIvdY -kpekvrS/m6BG91wN0/AI+NNaK/7Azdz4aScAs8cxbqigqeYqFLH/KAvP0XebzfZ6 -kfsqjPYRvvaur9QWIvgKdjBz6VEY01uIoZVWR7rSPAj+K3kYhfEUK3vsdQ30LdDh -F7T81NO+w4ecuy1/aqMxc3oM1L3H7yh62a2tyBwv223KCoxH4WGvpmmTPZ/EGEgd -07pGMFcii99GJ/yFqoqJo3akjXqms4WqeL0W7vlFOFV2SQw8+dPFSOOqV6iNAgMB +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUE9xVxnPbyMQJIrAP924qbpY0 +BI9gIq1W4SRMrXqcKbWDz1I2nhhiIWW4Iw7j5+5udX+1RMGIn5azyrxfrC93WfHi +1MGoXobFz5B1amVJWSB6YUhvssEFxumcKhQCvypojYZw/3dZEWuwpu+7ffHc1gpu +eQ7h84UObFPXMeh2Wa6UhnTL/UR51dA7KEHyM+xjRfCrua8IiTgKkYkR6Wb7XBvq +6x3AhVpv5ix9Y5UL1vh49M7vyBsej2lz/IKf4pOEFXkrvWxX7LmiJDgEfiyXhcDM +LaWzp89+CJdDVpQsduKhQuh6X08ILk1qLtDutHmkc7mpAqsWSMO4a7vEjrjvAgMB AAGjLzAtMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMBMGA1UdJQQMMAoGCCsGAQUF -BwMJMA0GCSqGSIb3DQEBCwUAA4IBAQCps3M1xiI8UZYXGd2ESdYBFB8Z8oK75SC0 -lgCB1u8ryjXPVcNPQSQfMDid+9uXHHQkdAZ0nbY/+gc9Iu3ekMS31bwROto4239s -BlVH65hgDfOOcUlSbG68xgXZQtGHhx+bxJ+NMuPjkvEayJVI/jFj4/u1JBuKxXwf -IHnUnjicgPK8d+txSEDlgaYEOt7udQc76Am2dVxejC3bCLK0WwvB/vVCWng1xiSu -j2XsSJ24Xg8ppbEXP6hJ73UXOmFyrT9n2rtmDLLU0gNhiWhE3VPdB9FYzcteBknk -9OR9cf45oRn//rGFwQIuK1KVQZWkn0j97fMHO4dDLLXwoJJZ0C5N +BwMJMA0GCSqGSIb3DQEBCwUAA4IBAQBOcnFH8KQjIqdROk4/zavs7UTmKfm24nAm +CQFEbPcdDV15ySg8ZHxmzHw+SEzvIuhTnIacTnOWoBgr4sx6y/p+hd712SME0okK +FJz/qWVj+u5N5zQ9IUqqJ1+dxq8x2rJbZLVnws1ToWP+Z4j1JuxZ9AWEfDmHke/J +M61hGksZSf8VulPjuB8HJuLe3jaxOptQ7fncjkpVXSH91gzakjsS3lO/U9RrwC3V +ZCX4JKWcjLadiKgf4L5II0GAWMZpVCXmYkUmkLLRkaFuN7t59FCOfomK21Uf9KoB +Nvyz2RKMD2fY86gYtCBza85hM2tf0ul6JPIt9KtxJTFAwbRJP5Wj -----END CERTIFICATE----- diff --git a/jstests/libs/ocsp/ocsp_responder.key b/jstests/libs/ocsp/ocsp_responder.key index 07a72b35e8214..61e29babd4d8a 100644 --- a/jstests/libs/ocsp/ocsp_responder.key +++ b/jstests/libs/ocsp/ocsp_responder.key @@ -1,28 +1,28 @@ -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDNTTbCpwZg6MAR -Slau3FXBzmM4+6SqXPf3xQ9L2hpulCFWWDIfvWpK3zVKbSVIU6L9lo8Cdrne4ZRE -486BXo1iIvdYkpekvrS/m6BG91wN0/AI+NNaK/7Azdz4aScAs8cxbqigqeYqFLH/ -KAvP0XebzfZ6kfsqjPYRvvaur9QWIvgKdjBz6VEY01uIoZVWR7rSPAj+K3kYhfEU -K3vsdQ30LdDhF7T81NO+w4ecuy1/aqMxc3oM1L3H7yh62a2tyBwv223KCoxH4WGv -pmmTPZ/EGEgd07pGMFcii99GJ/yFqoqJo3akjXqms4WqeL0W7vlFOFV2SQw8+dPF -SOOqV6iNAgMBAAECggEAKe1C6a3Cl7fwwZc07LECsnm7ub9qeYBTA76yyQLZswt9 -Y2zPj7vXIUNEVtbDbmnmN3Ov8QjeHK5k4UzEt5bfuNL7QJNvdK2drCPUL6oEmq5V -rv9h6mBhvm5MWk7fMD2GyImY1nbcEmv3jexXvTFCBAej9pQpfx8agbVGG5a88WgF -EXVt8emrAVBtIOS+lql7Swi+jjDvtKZS3HQDZoBqHhqguS/BT8EG7uwqM4unUeIU -IgmWzmFYWiylUtGZwqkD5qtymXokxJHdOyUewz0US4sHP1Pnk5He2Iz3EJlb6uG9 -ffCL1DobPmh5Ptk6RdMQ1HHgfcQzni3uIgHqYalB4QKBgQDwaZCe5+OUpIFI3yL3 -SQDHqTtumxdFrWKeK1+coXJKXl0jM6OlbqDizYbsHGOlNeA0QLpP+IbO70s7naSA -99k4djmBu5c/I8rkKBkxHLEXk05VvPLUYlyPbfMmIEZt3V6J9vMdvg2JcbHNY1mJ -3hzKJpGdLsl/PupZO6dYyFsjwwKBgQDanN7Y3b3gW1AdCsdPpCbue7VJnRUowN21 -tMqmoMf+ODrlTaeUibh690xYynGky7uXGv/+7b2pJ87u3RNFHLVh0YQPKBEY1U85 -zKvJq7nVmyZRDUUlZxbotg3KLfujFK3GIES9zSNgSeYu4VE3N/QuDtasWcVM/DHT -SiCd8RDNbwKBgDWAuM+0eaBl7+PAzO5Nj9/fWLunxkkaah0bN/8Kqv3D/GROJLg3 -Y5S1+rXnJ4Hn+IqIcS5E9/Nrq8mLrsWI4w2+udOWZPWHSKr4BYs5gGvnu4I3N69u -aHz/EiLFAgeHQXfgvoA7AJY6+D/mQVH+XVfbt4JMnEk2Pj83mGmYxTezAoGAOo74 -tpi3Kujqzcy8VN4Au7MRfZWBPHhlKy4xdm7tA3DmfdPORuCZNP8XSVV5GY6lQjWu -Swg93JjYOm2nosSs5XXs7O5rnf5NiYsEnTHNxqUXn3BxjDJQFafmi7jDhgyc+8xa -/Yh8qGdDLF2YlrT7SHcbE/G8AwIvCoz5rbUMLisCgYEAg/du2NWZj1d9VjkZOuP3 -ZMnc04SJfB2oD1Xd/zoq/cEDFpHCAMzFm2BVPK3OAvPqNHppH1u0oeu17a+OCfeB -+t/3bsNDd5Ovm8HNfAvhevQhG69tL9bfQblqSoBg6Fd00iILiT9WKK86zcV4kVMa -2LXdfgZvnpV5oroqetzqASA= +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDUE9xVxnPbyMQJ +IrAP924qbpY0BI9gIq1W4SRMrXqcKbWDz1I2nhhiIWW4Iw7j5+5udX+1RMGIn5az +yrxfrC93WfHi1MGoXobFz5B1amVJWSB6YUhvssEFxumcKhQCvypojYZw/3dZEWuw +pu+7ffHc1gpueQ7h84UObFPXMeh2Wa6UhnTL/UR51dA7KEHyM+xjRfCrua8IiTgK +kYkR6Wb7XBvq6x3AhVpv5ix9Y5UL1vh49M7vyBsej2lz/IKf4pOEFXkrvWxX7Lmi +JDgEfiyXhcDMLaWzp89+CJdDVpQsduKhQuh6X08ILk1qLtDutHmkc7mpAqsWSMO4 +a7vEjrjvAgMBAAECggEBAJlMhe+m1SbhF/t2jIfuFj8r0v1k8nLOfjgX5JJcsA1k +BOZznX0HiJ6ef5W8Gc5aX7qOyrKwMaldmqT5/nK111XRuYx6uEXJlmg0orqNHwwH +FfgAjEjZkLdpUoJk1DQsFS6gZrcVoyLJ+esmsJH59zPnsICslzpzOhHz8iLS27N3 +zPuEhYi0yiXr4Yi8p/TloMuscTSmiWQy8CfIsFVs/FC4n40xkedvB4vEHUEfxb/D +sbqPuKZ9nnu2EFCYZ+UpYOSho7q/60dBHpuszPpbLmk0ICm+2jda+32trI/ZxNxT +ZopDwUlwfh6DoXz1Dj9O1Y54YPWcKlCeraJS/uSxobkCgYEA9LsRYDHtBDlxnOsl +F2ALdIdvvuxAdJWg8gTaojbLnU18dujTtN3NvU4DYzl2KxKAuCHDNSpabWCd5CNp +ouEx8CuA1GDA0J1aUXEDh+hBeqLatFeAVh5FNQAEKIbUa8H02K3CQ0pyY/XGC24b +yVmW+P1mm0PeX9wn6QV5SSTyUFsCgYEA3dffC7NIJmJvmwxdc4a7JKL/OLfvvHEO +sYjsKugqRFFALyTrR99/MmwWxvemoPCUhgGvYDUdgDZOtLK7aV0OTG3lb5FNQZyv +odTMqgp1E1UTnjNDLvbe8BCwjpbzCr4a8Cu+tNHYKKTnTc+IQt3wJIvkODLpYXW/ +9enENerhHf0CgYBRjzpmNZ1IyCAd7JvK435N0okiNrfwYqFlpsp6PwqynM07D7Lm +Vvkm+qXUXqUMC7mMI+xbfobuZsGX81lUJ/qtkMH5hVkc7SPZenFzzKptxXJfob2q +daIY3K2GGBEY7GW+BIJ2TIw81f6vdn2aJ9pRlpmoawDkL4m5ddKK1t5OewKBgFHm +d3b3WkSphLCGMR4MeJy+/GBxL7WvO5B5j08L5UbRhFOjWqVvMqNn2kTcewzhpMOQ +bbGkCl8iLY5yidQNQdEfpVu6Ff6KHSEu0dxY/f4EI7j9DemRxJ52WuhZfCLUKlfO +nppc+mOO9cH5Q0IEc7raVogU66pKcXMVOZX6P9/1AoGBAK3OS5YXFwwLoZ0Tqs4o +p4wr1lcJnZT6PC8XV4s7G6xToQDNg3HzUIZ/zlAGQ8L5/69sJd4RePLuD8ULqQwW +nYpTnY97k4fMzfm6Et19QDZt5D1eSjdyedyLeGLGJcB3PSvX6okUVuCeLTQt2JZU +1h1KfS3ZnTJiOOI6rCnDU4Nu -----END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/ocsp_responder.pem b/jstests/libs/ocsp/ocsp_responder.pem index ec54da767d2ee..e0ad37f96c70e 100644 --- a/jstests/libs/ocsp/ocsp_responder.pem +++ b/jstests/libs/ocsp/ocsp_responder.pem @@ -1,49 +1,49 @@ -----BEGIN CERTIFICATE----- -MIIDgzCCAmugAwIBAgIEG9rCUjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDgzCCAmugAwIBAgIEHKwtmTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjBiMRAwDgYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ2WhcNMjUwOTEwMTQyODQ2WjBiMRAwDgYD VQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMMCWxvY2FsaG9z dDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMQ8wDQYDVQQHDAZPQ1NQLTMwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDNTTbCpwZg6MARSlau3FXBzmM4 -+6SqXPf3xQ9L2hpulCFWWDIfvWpK3zVKbSVIU6L9lo8Cdrne4ZRE486BXo1iIvdY -kpekvrS/m6BG91wN0/AI+NNaK/7Azdz4aScAs8cxbqigqeYqFLH/KAvP0XebzfZ6 -kfsqjPYRvvaur9QWIvgKdjBz6VEY01uIoZVWR7rSPAj+K3kYhfEUK3vsdQ30LdDh -F7T81NO+w4ecuy1/aqMxc3oM1L3H7yh62a2tyBwv223KCoxH4WGvpmmTPZ/EGEgd -07pGMFcii99GJ/yFqoqJo3akjXqms4WqeL0W7vlFOFV2SQw8+dPFSOOqV6iNAgMB +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDUE9xVxnPbyMQJIrAP924qbpY0 +BI9gIq1W4SRMrXqcKbWDz1I2nhhiIWW4Iw7j5+5udX+1RMGIn5azyrxfrC93WfHi +1MGoXobFz5B1amVJWSB6YUhvssEFxumcKhQCvypojYZw/3dZEWuwpu+7ffHc1gpu +eQ7h84UObFPXMeh2Wa6UhnTL/UR51dA7KEHyM+xjRfCrua8IiTgKkYkR6Wb7XBvq +6x3AhVpv5ix9Y5UL1vh49M7vyBsej2lz/IKf4pOEFXkrvWxX7LmiJDgEfiyXhcDM +LaWzp89+CJdDVpQsduKhQuh6X08ILk1qLtDutHmkc7mpAqsWSMO4a7vEjrjvAgMB AAGjLzAtMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgXgMBMGA1UdJQQMMAoGCCsGAQUF -BwMJMA0GCSqGSIb3DQEBCwUAA4IBAQCps3M1xiI8UZYXGd2ESdYBFB8Z8oK75SC0 -lgCB1u8ryjXPVcNPQSQfMDid+9uXHHQkdAZ0nbY/+gc9Iu3ekMS31bwROto4239s -BlVH65hgDfOOcUlSbG68xgXZQtGHhx+bxJ+NMuPjkvEayJVI/jFj4/u1JBuKxXwf -IHnUnjicgPK8d+txSEDlgaYEOt7udQc76Am2dVxejC3bCLK0WwvB/vVCWng1xiSu -j2XsSJ24Xg8ppbEXP6hJ73UXOmFyrT9n2rtmDLLU0gNhiWhE3VPdB9FYzcteBknk -9OR9cf45oRn//rGFwQIuK1KVQZWkn0j97fMHO4dDLLXwoJJZ0C5N +BwMJMA0GCSqGSIb3DQEBCwUAA4IBAQBOcnFH8KQjIqdROk4/zavs7UTmKfm24nAm +CQFEbPcdDV15ySg8ZHxmzHw+SEzvIuhTnIacTnOWoBgr4sx6y/p+hd712SME0okK +FJz/qWVj+u5N5zQ9IUqqJ1+dxq8x2rJbZLVnws1ToWP+Z4j1JuxZ9AWEfDmHke/J +M61hGksZSf8VulPjuB8HJuLe3jaxOptQ7fncjkpVXSH91gzakjsS3lO/U9RrwC3V +ZCX4JKWcjLadiKgf4L5II0GAWMZpVCXmYkUmkLLRkaFuN7t59FCOfomK21Uf9KoB +Nvyz2RKMD2fY86gYtCBza85hM2tf0ul6JPIt9KtxJTFAwbRJP5Wj -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDNTTbCpwZg6MAR -Slau3FXBzmM4+6SqXPf3xQ9L2hpulCFWWDIfvWpK3zVKbSVIU6L9lo8Cdrne4ZRE -486BXo1iIvdYkpekvrS/m6BG91wN0/AI+NNaK/7Azdz4aScAs8cxbqigqeYqFLH/ -KAvP0XebzfZ6kfsqjPYRvvaur9QWIvgKdjBz6VEY01uIoZVWR7rSPAj+K3kYhfEU -K3vsdQ30LdDhF7T81NO+w4ecuy1/aqMxc3oM1L3H7yh62a2tyBwv223KCoxH4WGv -pmmTPZ/EGEgd07pGMFcii99GJ/yFqoqJo3akjXqms4WqeL0W7vlFOFV2SQw8+dPF -SOOqV6iNAgMBAAECggEAKe1C6a3Cl7fwwZc07LECsnm7ub9qeYBTA76yyQLZswt9 -Y2zPj7vXIUNEVtbDbmnmN3Ov8QjeHK5k4UzEt5bfuNL7QJNvdK2drCPUL6oEmq5V -rv9h6mBhvm5MWk7fMD2GyImY1nbcEmv3jexXvTFCBAej9pQpfx8agbVGG5a88WgF -EXVt8emrAVBtIOS+lql7Swi+jjDvtKZS3HQDZoBqHhqguS/BT8EG7uwqM4unUeIU -IgmWzmFYWiylUtGZwqkD5qtymXokxJHdOyUewz0US4sHP1Pnk5He2Iz3EJlb6uG9 -ffCL1DobPmh5Ptk6RdMQ1HHgfcQzni3uIgHqYalB4QKBgQDwaZCe5+OUpIFI3yL3 -SQDHqTtumxdFrWKeK1+coXJKXl0jM6OlbqDizYbsHGOlNeA0QLpP+IbO70s7naSA -99k4djmBu5c/I8rkKBkxHLEXk05VvPLUYlyPbfMmIEZt3V6J9vMdvg2JcbHNY1mJ -3hzKJpGdLsl/PupZO6dYyFsjwwKBgQDanN7Y3b3gW1AdCsdPpCbue7VJnRUowN21 -tMqmoMf+ODrlTaeUibh690xYynGky7uXGv/+7b2pJ87u3RNFHLVh0YQPKBEY1U85 -zKvJq7nVmyZRDUUlZxbotg3KLfujFK3GIES9zSNgSeYu4VE3N/QuDtasWcVM/DHT -SiCd8RDNbwKBgDWAuM+0eaBl7+PAzO5Nj9/fWLunxkkaah0bN/8Kqv3D/GROJLg3 -Y5S1+rXnJ4Hn+IqIcS5E9/Nrq8mLrsWI4w2+udOWZPWHSKr4BYs5gGvnu4I3N69u -aHz/EiLFAgeHQXfgvoA7AJY6+D/mQVH+XVfbt4JMnEk2Pj83mGmYxTezAoGAOo74 -tpi3Kujqzcy8VN4Au7MRfZWBPHhlKy4xdm7tA3DmfdPORuCZNP8XSVV5GY6lQjWu -Swg93JjYOm2nosSs5XXs7O5rnf5NiYsEnTHNxqUXn3BxjDJQFafmi7jDhgyc+8xa -/Yh8qGdDLF2YlrT7SHcbE/G8AwIvCoz5rbUMLisCgYEAg/du2NWZj1d9VjkZOuP3 -ZMnc04SJfB2oD1Xd/zoq/cEDFpHCAMzFm2BVPK3OAvPqNHppH1u0oeu17a+OCfeB -+t/3bsNDd5Ovm8HNfAvhevQhG69tL9bfQblqSoBg6Fd00iILiT9WKK86zcV4kVMa -2LXdfgZvnpV5oroqetzqASA= +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDUE9xVxnPbyMQJ +IrAP924qbpY0BI9gIq1W4SRMrXqcKbWDz1I2nhhiIWW4Iw7j5+5udX+1RMGIn5az +yrxfrC93WfHi1MGoXobFz5B1amVJWSB6YUhvssEFxumcKhQCvypojYZw/3dZEWuw +pu+7ffHc1gpueQ7h84UObFPXMeh2Wa6UhnTL/UR51dA7KEHyM+xjRfCrua8IiTgK +kYkR6Wb7XBvq6x3AhVpv5ix9Y5UL1vh49M7vyBsej2lz/IKf4pOEFXkrvWxX7Lmi +JDgEfiyXhcDMLaWzp89+CJdDVpQsduKhQuh6X08ILk1qLtDutHmkc7mpAqsWSMO4 +a7vEjrjvAgMBAAECggEBAJlMhe+m1SbhF/t2jIfuFj8r0v1k8nLOfjgX5JJcsA1k +BOZznX0HiJ6ef5W8Gc5aX7qOyrKwMaldmqT5/nK111XRuYx6uEXJlmg0orqNHwwH +FfgAjEjZkLdpUoJk1DQsFS6gZrcVoyLJ+esmsJH59zPnsICslzpzOhHz8iLS27N3 +zPuEhYi0yiXr4Yi8p/TloMuscTSmiWQy8CfIsFVs/FC4n40xkedvB4vEHUEfxb/D +sbqPuKZ9nnu2EFCYZ+UpYOSho7q/60dBHpuszPpbLmk0ICm+2jda+32trI/ZxNxT +ZopDwUlwfh6DoXz1Dj9O1Y54YPWcKlCeraJS/uSxobkCgYEA9LsRYDHtBDlxnOsl +F2ALdIdvvuxAdJWg8gTaojbLnU18dujTtN3NvU4DYzl2KxKAuCHDNSpabWCd5CNp +ouEx8CuA1GDA0J1aUXEDh+hBeqLatFeAVh5FNQAEKIbUa8H02K3CQ0pyY/XGC24b +yVmW+P1mm0PeX9wn6QV5SSTyUFsCgYEA3dffC7NIJmJvmwxdc4a7JKL/OLfvvHEO +sYjsKugqRFFALyTrR99/MmwWxvemoPCUhgGvYDUdgDZOtLK7aV0OTG3lb5FNQZyv +odTMqgp1E1UTnjNDLvbe8BCwjpbzCr4a8Cu+tNHYKKTnTc+IQt3wJIvkODLpYXW/ +9enENerhHf0CgYBRjzpmNZ1IyCAd7JvK435N0okiNrfwYqFlpsp6PwqynM07D7Lm +Vvkm+qXUXqUMC7mMI+xbfobuZsGX81lUJ/qtkMH5hVkc7SPZenFzzKptxXJfob2q +daIY3K2GGBEY7GW+BIJ2TIw81f6vdn2aJ9pRlpmoawDkL4m5ddKK1t5OewKBgFHm +d3b3WkSphLCGMR4MeJy+/GBxL7WvO5B5j08L5UbRhFOjWqVvMqNn2kTcewzhpMOQ +bbGkCl8iLY5yidQNQdEfpVu6Ff6KHSEu0dxY/f4EI7j9DemRxJ52WuhZfCLUKlfO +nppc+mOO9cH5Q0IEc7raVogU66pKcXMVOZX6P9/1AoGBAK3OS5YXFwwLoZ0Tqs4o +p4wr1lcJnZT6PC8XV4s7G6xToQDNg3HzUIZ/zlAGQ8L5/69sJd4RePLuD8ULqQwW +nYpTnY97k4fMzfm6Et19QDZt5D1eSjdyedyLeGLGJcB3PSvX6okUVuCeLTQt2JZU +1h1KfS3ZnTJiOOI6rCnDU4Nu -----END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/ocsp_responder.pem.digest.sha1 b/jstests/libs/ocsp/ocsp_responder.pem.digest.sha1 index 9bff82f75c9d2..bd7ebfe3ea8ec 100644 --- a/jstests/libs/ocsp/ocsp_responder.pem.digest.sha1 +++ b/jstests/libs/ocsp/ocsp_responder.pem.digest.sha1 @@ -1 +1 @@ -8D7FAD707F23480F7BE38FC06F46794DEDB18CEA \ No newline at end of file +EE2B7FA024D58EA95D40EF923938FA34B8A6E9F8 \ No newline at end of file diff --git a/jstests/libs/ocsp/ocsp_responder.pem.digest.sha256 b/jstests/libs/ocsp/ocsp_responder.pem.digest.sha256 index 8251d950e2f35..350edac368b49 100644 --- a/jstests/libs/ocsp/ocsp_responder.pem.digest.sha256 +++ b/jstests/libs/ocsp/ocsp_responder.pem.digest.sha256 @@ -1 +1 @@ -CE89D643467D21E63407A1C64C793FA72D5C5FCF32A97C1E317EFC5A367B9F4D \ No newline at end of file +1A354365C8D9618F76783975F8FC0F30BD097AFDBD7A15316D8BC2F2CE545A67 \ No newline at end of file diff --git a/jstests/libs/ocsp/ocsp_server_intermediate_appended.pem b/jstests/libs/ocsp/ocsp_server_intermediate_appended.pem deleted file mode 100644 index e3065485238bf..0000000000000 --- a/jstests/libs/ocsp/ocsp_server_intermediate_appended.pem +++ /dev/null @@ -1,48 +0,0 @@ - ------BEGIN CERTIFICATE----- -MIIELzCCAxegAwIBAgIEc3NuKDANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV -UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO -BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwYSW50ZXJt -ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIwMDQyMTE5MTQ1MloXDTQwMDQyMzE5MTQ1 -MlowgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN -TmV3IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVs -MSUwIwYDVQQDDBxTZXJ2ZXIgT0NTUCBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkq -hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8o7m7QpIMUZ2r6HOmhuqNF25x0odb9Bg -rSLm7Hvb3WBu6jwWPrrnPerR/nODVEY4Qo7mOclgCsooJx3HaPYPgRYffRQMJ+I5 -lpvsRsBjW7CnS0amz9QcbGnIhMeFU45gCn51CTLPoBJ7hB9F4Z02bOJEMkkXkhtm -kkiVysUs6po+t2+w8tojOScZdeDUtwfStKJ7Xb9B79Ko3BCcITXJUxDBcqUEJF+E -v3YQuQg/QKNTO+L39aFFo8WNfuP09txdjT/+T8PZq826ccohRdSrJ5lq1hXmmKXp -3p6Ut35aE4tjj6KSjDonMkYcvdNHQ0aL2p8x4JjwgwAuNwawTUbYIwIDAQABo4Gv -MIGsMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMB -BggrBgEFBQcDAjAdBgNVHQ4EFgQUyC6Gv0rfoato44VsaVig1SmminYwOAYIKwYB -BQUHAQEELDAqMCgGCCsGAQUFBzABhhxodHRwOi8vbG9jYWxob3N0OjgxMDAvc3Rh -dHVzMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOC -AQEAogdunlFL04lqVbZyqPvN/5TtrEtM87invrzTYZ8UmT5Q4Kr8mHRsumBuVwDu -bE+umrPtQVvu0XYqsjmjmOk7hTIK6PFuF6rLQCUBHVXBZggTNKFFBWphQ8odUbPG -FmOqSlkZAkcNo3dLpxRbfDru2ARxeE2+sRCPWwUZc7utqpLoZ0deuKdDSlA/VcGJ -5wf0sjmcjvJRRUSYeJcUox4ySL+4WtFu33LhYZKgnrMNegaJ6UyIlwB4ihMyi9sV -yDlsY+vGqivqqMUw8V6tdUekCYPUlHWXeICqsRIBII+xMzqTv1rXPzNyAvyVYrBi -hG10rdLfnQWn2vpYKU5b3Vo1yg== ------END CERTIFICATE----- ------BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIEYLGF9TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV -UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO -BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjEwMzA5MTY0MDMxWhcNNDEwMzExMTY0MDMxWjB+MQswCQYD -VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp -dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY -SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEApP3UQTlZVYFjzvRREJbdqYBw1zF+NWayd+AFUqWzrW35TECxnmR0 -PEr+ILEucOfiPB/AwRoTCMF0IJk1y6l2ljxGs9vuGD/MdBtnxzJ3cVbzPTtVm5Q4 -kAmVJz7O+2cw70XGD3hruDMKGkAixRwLXp16ENl0jyJ6V44JBRfOQcZLG3geJgve -cbp1KwkTASaRcYv+93tr9z5s92a/2UVXRuSK/Rf1+x+U4+GRVJh4/k8i9nP/ieYg -92OGqhWr1ETdSv66SZ+sHd+4OftMbETqBdiTGj7GM+EszAEUTPYDabTvQlOBtdZH -NYOLHGMxKxdEj5EyzE4y8WO7yk4W+TZItwIDAQABozIwMDAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBRRg4ZhgrLm0lO4jGm+fmVnaczaPzANBgkqhkiG9w0BAQsF -AAOCAQEAZK3kybfwQ05q6BQevqkun9CKC3Vwhv6BLez5wOXW3gQ8BQdypKbSMEB8 -4RPEcy1zBU+6LPa8S+WE88EN85/0n/oS7N7MAgEpSWT8vflBm76nEfG4jG+l8h+Q -yIp0e5/ITq/MtRx9EiRk+vU6l3Mkvqq+2a3T7pKhvE4jOIOyFtg5jr+p2n46WEw4 -g3N/BzbZLpz147KO7tYrelhIAAcbmeVUKlQOjtcljeGbZimRgt8kzJWBVNAS6tEj -J8FTRLMX6HSTbVMx8tjq+MxF9hn1Ztc/3GIIuTvlGeTkLTS8atR4318YfMcZLlwm -pt3Zd7lPfbW/gmFewm7GB5TL9rDfbA== ------END CERTIFICATE----- diff --git a/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem b/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem index 93ac0f42be110..e8fbc703889c2 100644 --- a/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem +++ b/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem @@ -1,75 +1,75 @@ -----BEGIN CERTIFICATE----- -MIIELzCCAxegAwIBAgIEUOothTANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV +MIIELzCCAxegAwIBAgIEfW+T1TANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwYSW50ZXJt -ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIyMDEyNzIxNTk0N1oXDTI0MDQzMDIxNTk0 -N1owgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN +ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIzMDYwOTE0Mjg0NloXDTI1MDkxMDE0Mjg0 +NlowgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN TmV3IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVs MSUwIwYDVQQDDBxTZXJ2ZXIgT0NTUCBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkq -hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz46Uuj2QZUYHdpn1Hqcus0Bp+G+DXB4z -+Oy0gfsDcVjb3zJIuTr4Oa50kZs1ZxzAAi0EuZbhmaxb1UIoF1pWKfGrFfP6xRna -X39unrP5iZ8w7M6f6Op6LsygKMe4Tx2IdtC69rNfwxlaeFhCpIzG6gzNQWD/xFVp -pLpaqqPHaIIgp3KSv4qLDwBT7gFcCHUuj/O71V5wkipeEn+512ly98bHn8whq3fk -w7tKTmvy1kMxRxB/Bc3ZbhRJJsXZkcRv4M+qrdDkz3/+IIUNkMTs+6fPeVsDVVEF -wfNIbQMPznUyfppmdahghwSpuHiAzzFAR1QZI/6SGc+E6VZn8nEalQIDAQABo4Gv +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA+W6RJhUIsx3hsjCKQNfqacGiMPTz2NE0 +YKoqYrKM5w4xCgwR5lRkOW9kC9ASsm232iQrffGD8l5huymc1RXog2sEh4JV/5f3 +VaNq6Lk6x9spn7roaQ8LbyeFsXEH32r3PM0YRrNs7tSWN2uaUqXF07OvhN1o5/dx +28IVAyhLC06F3gqWEDCgn0bd6EpPMtSdKfnvJ4YrNRFrB3MtAJ6VDBHgjMVbXrrl +8GVmiGRA/y4dDbuT5yK3+gmu7XrJhUKIMGbUKYuRR1NiH+yNUaaAlkoyrVE3gV+I +Wwz1e+sl/3oc+fHsmXtX9kX+NxhSfR0wm4Tn//CjTQxGY/uu/FQuqwIDAQABo4Gv MIGsMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMB -BggrBgEFBQcDAjAdBgNVHQ4EFgQUTwxQ4b2GGlnEqB9M2bAXp5OirKMwOAYIKwYB +BggrBgEFBQcDAjAdBgNVHQ4EFgQUbcLZ6+ThLq8kjSDkl4rjudqckFYwOAYIKwYB BQUHAQEELDAqMCgGCCsGAQUFBzABhhxodHRwOi8vbG9jYWxob3N0OjgxMDAvc3Rh dHVzMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOC -AQEAIQc2TM7zkixdANoFswyN2Q32xlQPeXGOWOJneTepeuqBtVwQ0+aZ6wCE91b1 -okTQDizDPFWUROWdffpGk/EI4+Q+J/HONOiq3J0Rz9Dak/VhApZ50UJ9jwZttra9 -SGFw4xQz7AG6ftYKT/78GA5KTubQU/gKZBopwNx1VJi6aFnwSosbofMviJQOcVlX -Ga+I2LgvB73PPtxUbk0LIHV8k+wY3DgseYyViqOxFlrbc4/stqYHE5y23I+539AX -8x1Sc2WtO198gOAXobLiYkW6mUqSPVENMTOVa2hC5rsaigYMi1HbRQa57s4+5UGr -3i8BejcVaI7mfbce3lCoUbAgGg== +AQEATpC8AxX4/QQ3JuqD7+qDMKVCY6hhOIETDp7MJvRAF9I/8P7p4AWXDQuROaXp +bGKm7Q/29UaHvF23YfMl10QCc2cb9ybYHqzbKtGKqIAjnCtsFjI51Hs3rb34RGIt +/DT6IqYqix+aR5MyVA1EmuA2eV8WM51vjcvlOPhrz4dYzecEgB5Q7yJ/pT8++8vK +huhAq8wwMo4pMsrtzq5AVGV6ND43gB7Hn2LHD4RNIDfUbvSG32vBJHm5MJ+WiR9l +Ljhdyc8UGZTzG9MQ9KVXhNuPF8m0aGA/5euuV03JJBODpwftS7QkdbUCvgl0EK3i +D8PVv2KR4n72yXTvgVXNIGFG7Q== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDPjpS6PZBlRgd2 -mfUepy6zQGn4b4NcHjP47LSB+wNxWNvfMki5Ovg5rnSRmzVnHMACLQS5luGZrFvV -QigXWlYp8asV8/rFGdpff26es/mJnzDszp/o6nouzKAox7hPHYh20Lr2s1/DGVp4 -WEKkjMbqDM1BYP/EVWmkulqqo8dogiCncpK/iosPAFPuAVwIdS6P87vVXnCSKl4S -f7nXaXL3xsefzCGrd+TDu0pOa/LWQzFHEH8FzdluFEkmxdmRxG/gz6qt0OTPf/4g -hQ2QxOz7p895WwNVUQXB80htAw/OdTJ+mmZ1qGCHBKm4eIDPMUBHVBkj/pIZz4Tp -VmfycRqVAgMBAAECggEAaaLMqVkqA5E3vTIwYjB+gTa4eHvw5FJu6zM1z628m9S4 -2wkRoEyWB2114KGY9WjZhIH7FKXpHjht6MUp+HC+x8w64gpxyB0XWmFWIKnCyDtg -QDYNhxLHqsf0f6zebk2+pZIoheTXSUm/FN7+1BbeKkLnuG2w3vEFupQqDw0aMWKV -z0bX5oXeFqm1piCqj0ng5eFRDFD9JSXvcrGxJ2sxT8CHgEQM+uDIYRsU8K21Wq2W -I+MuyMz9La1sfjHITGQ5MdunHSYvjNuHCXqM3YQWR0Bh3h1DNNCsMTh5HZeRWVsd -Haof4VjRScwoW1q27T/2mnohf4sNu0cGhRYlfwtPJQKBgQD7QeE4pw4R0HjYlqb+ -FBLlbTdKdkm9eUg7heOV/KOwiungE9j6pwxuYosC0McVYWWG9SMKY0q8/UsPVZIm -CzZ0Vs1gB9CG8UW999YkqvKM0YvetOpNmaW2gc0fY/TPgMqQZU8l7VdUudiXB8hv -aDYPdYd27/B/1Lz+he12XfDUOwKBgQDTeYh8Zm93qrPPbg2ieESY6V8VpnFdOG9I -EUtgiElXiSlyB0pqV8+DC1UWmFsRz/3i62rp8o58UcJPMe2SJz///W9C+ZZyNtW/ -Rbx/40I5pBkvK0bZEXgULUVnFlvgb3a3PsLyhxk8eL5dfp2NvtKTXvfGaKus9QYb -vEoEPobvbwKBgFMDiDskjrR6EYNV+ySVU0z0EcGLZX+xk5j++puylg6dRvpe9GCU -Uroh2tX6TtyUimvVkFc9SRM6CNOvLRNevwYfK8nfqxj6nFVQjjMdO/gkv7a8RXGQ -Iz0yk6gcaWUpo0OkBUt9qE34/UOhMasFXl8rMK+uROKnUi4x56wlC43DAoGAf4RD -NrAV+tSmBChadGONCZ9/RHDO3uVOxOgYyaakgvIkWavnxWQZru1Aa5WHJKCEeTZ3 -i4ZFQNWUE2kJ1h1wzA2n73zMqSZDkUidt4fzwQogXX79A5szCweZV+X1lMnhjfF8 -X/3yy7ILKBlXK8eq9k5Hng4zpuFAw6yv/QsvFmUCgYBPAceHxnjW9D/NWIstCQmf -fxWiJyYpSfo3T5snWSKbUpZAFBu0RXLRiwBUteY7/1aZCPAsr5SM1tCSRKgiZbGp -OJXlyTv5IJMDvCA233EzuUVxhc8xxXMYQCspOd9E7Wf1+PMAf211mijeOt4sXvHv -4wQJwTDVo0ahgYMHJlHi0A== +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD5bpEmFQizHeGy +MIpA1+ppwaIw9PPY0TRgqipisoznDjEKDBHmVGQ5b2QL0BKybbfaJCt98YPyXmG7 +KZzVFeiDawSHglX/l/dVo2rouTrH2ymfuuhpDwtvJ4WxcQffavc8zRhGs2zu1JY3 +a5pSpcXTs6+E3Wjn93HbwhUDKEsLToXeCpYQMKCfRt3oSk8y1J0p+e8nhis1EWsH +cy0AnpUMEeCMxVteuuXwZWaIZED/Lh0Nu5PnIrf6Ca7tesmFQogwZtQpi5FHU2If +7I1RpoCWSjKtUTeBX4hbDPV76yX/ehz58eyZe1f2Rf43GFJ9HTCbhOf/8KNNDEZj ++678VC6rAgMBAAECggEAao1M/BOoL7voGhKaPLD/tkW9X2SEdm6IDXMjwB2+C0YI +tN4LF3WdituGxXURR5+PFmS1H4v4baTb4vQXxv8g4GLrAGgxDIqCYdb5aIkYDyAU +W+OgPKDspYMgnXhHgK1VCGgkoq8rLasqsGoK9ptSMuljZUKf+de+j74M89hWlnEQ +4JSy3C3xvHZZn/LYT50N+moiw7dh9a6ceDK2AoU8ZJq0iSL8My2bs4uVhurKPKFP +vrbKerhRP/sh2h0LosnB+2Wp9zwSqV5L5hn+W2WVqfPxgWtFwrnzX+RYuvsAN3bn +rAHZ4HzOfEs+dDNYySJ1UZXFR3CuXD7qa+ApbNFAgQKBgQD+LttPGhSzAYh46lGG +iOAngpGQBCKaOrnhxsK1VwPZMWACV5Q4sfLdrR+mAroLjSqFrVko+qe44hqmngVL +OfSirk5B12kObtSrjvKn5gR2WyASl5P0QNQl8TolhWI3gEQJH0k2jN5X0l24cJ2N +HbDpzU0DObiNfB16tyYQHjo5mwKBgQD7NwQV1LUOn3ZIZ5nUFwOWd9IH4s++pwPd +m/39YCD65UYQ8OhfTJrVcn/cz0ETsL9DP/2sthoa7PPhYaXLi6qcfZmsuJE/dVOv ++pecSn672gCURsgPcPl/XX/siRmZgb45SDEyrl4GYoK/SbyZlgnvpJo42lraaD0e +UBbJzLP4MQKBgCUV/RWLfp2niZvqxD5W1i2tlUNn9wx9qQVSFLKUoZEc7R8qXAvx +mrfRPJ7iIvbwf9XFNw8Nk08cXjsVLzyMli7uM6jTnxZmAU2Oq8TngJssLH/J8eJf +WxS2H3+9+FiUtFiIYgw9fWte3CG+/J3MSTWzqJrh3xV1mG+BLWKIpoIlAoGAH9So +v9tj1aZ+5k26QBVqbvZftoAWsqGW682IMUKs6x2B80OTLgAW3lTTvrbEGCqdEXha +PDgWtrKvdC9bQp8/zvRbNHducAv9vp6R90u5IzRMPn15e/tkoa7HNsFobPrzj26G +TVWqtERnLLW7H/rS53qD0BBa0rHCjMS5HnjBlQECgYEA9XUgBaG+4sC4+PPLeFuF +0NvxdtVFBU4/x9B4eGWLNuvmekwF5rnU/FjMBBeUW+W05VGsaFxjGUnk+DowZjyL +MaLGMY7k2AxoeCB+mZ3HbtKDfi2W/AcLmH8Vz3DpqlmiFZTIYdbyeP+Pnbg88Ckc ++z1mB2pKj2JKsaptU5anL2Q= -----END PRIVATE KEY----- -----BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIEJWcflTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDojCCAoqgAwIBAgIEfCXJSjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB+MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjB+MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwY SW50ZXJtZWRpYXRlIENBIGZvciBPQ1NQMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEA1GZlPg548GQidASCrRZDXfJm3Li+vIazhm4MGhv6bJ2o+SYHfu5j -PO51T5l0TGvuoJPnzwq+RlW5RW3/+fCMl0gt+awDDzjKCekn00w8KSk3rqGDYQk7 -cSVuoPJVyJJ/vo86bwVMB9TYyxPi1lEBeeyVaa2FvbzS+SZq2c9c087WVYRmj/81 -yGztUAwj2Zoru1ECuhHE8YMeyDqmi52XByUmI7Ywrzs4xc3pQ+AI62IsiUFbCNqw -EPHhEcCgEwaUlWihvUfO5lJSKeUyn0WlQ8MmnUTnCCMYDDJKPublsuu7Ngl6xmEM -H3u9iSbV6qWQwsC4PJcw5PO30yT5QkFe1wIDAQABozIwMDAPBgNVHRMBAf8EBTAD -AQH/MB0GA1UdDgQWBBRuXVvr6fP/Pyp2uqsHQ7KTvYGuNTANBgkqhkiG9w0BAQsF -AAOCAQEAaO2/QF2KG9LM1yKWe9ZbrvZa2dWgFHebY6HckDgE5rc5+nYug9zSEUi+ -QYKZfEAYbMZ2a3ymybsNsGpVPBYv6p2kaa6V9hrJb/awMI9dFg+o/U4v3dLQZmci -6cSI/c2T9mqJ4tVUPZE5tyVXZBZrQEUOJwcrcH19qWtS2n5Zk4BjkFAysC/eICMw -yD4kGI/djEkaABkCC7Xb75ySh5BJgC8ZZ/5gmKYiFZnV8d6ktXh7nPcqfsyuTuei -NBTSZpdTJ5WWNpybnK7/QUeedk1imNkTqK1TOJ4yjxIlhpGOX0f9q9InCrdYZb6P -SaRjfoPnX4zSLP5nv3QSDMcJjD/IrA== +MIIBCgKCAQEAnreW/G0+IaH5ly9cI00zwvHEs5jUc+eF8B/YWi7E/arCIRgOhnwv +c9pBDoVqmwx9cTtEI/KJW431V8LTBu40ToDBtrJCRUydF+dAnoxv5n1TlCF0t/52 +pyGM5Rf3K5nfJGjSpNPnw3yojpXKuAKJZdQxlPBtnsmCNk8+CtkTE3m/psLIGa/9 +Rq3Ctx0xfdJQiiajV8Kv08a3CDaWW2Tj4dWukUI6LFFfOKXe6Fvc1sfTPG7oH+GM +x/e7uBbCcP6J5368posgLddZifQjv9bQoGx0AB15cl9PzyI6PlaV4nww1HNwd7yY +UzWNVdOQ2LPkTERu0WwG7/lEE6L56p092wIDAQABozIwMDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBT0V8yyi6DeSqcaTzB1DWftYWVlgTANBgkqhkiG9w0BAQsF +AAOCAQEAyuXdfv9tkL2YgwzVFOSPkBTowGECU+7aEGtI45rRgkqw3x6HUl+zYcqa +REj0dLsFKoixkYSKS33Q7mg09jUXOcqs76hk0nf21ITe0c4M9NZzZPrN7PLmKRw5 +gYwsXHGYGL+3FmqTNN+L6kk/Gd/KcuwY2NH1701Qj0c6uP+Z54lP45DKKaJujouU +iIOGaLg2sU0r79baqzt7DvZVmlbqQZ8XTYykZo4O4E0hJ7WTNNwu+h1HoOanSfYE +EWKzo3qS7JkvZG1hoQhNk6np8+KQOWGGlom4j5HhFaSvlMk+M4NyqvJsO4hA5B2D +tEOIfINSAtpVNmi5I16idsVixhy34A== -----END CERTIFICATE----- diff --git a/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha1 index 0ea8b53ffbad6..0e6a68a74ddc3 100644 --- a/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha1 +++ b/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha1 @@ -1 +1 @@ -A1F079E1453B9E8B79FBD4A7A7EE430851A0DF3A \ No newline at end of file +9B5ECB2AB3DA8633F3ADE0592FA7FC3C1D7B1A32 \ No newline at end of file diff --git a/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha256 index 1d29ebcc2e439..7e7f3a8d3d4db 100644 --- a/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha256 +++ b/jstests/libs/ocsp/server_and_intermediate_ca_appended_ocsp.pem.digest.sha256 @@ -1 +1 @@ -C4A40BF1602DACA5BD94BEEC95996961422C70F749F709F3D5E776E2A6CA96E3 \ No newline at end of file +D640AD852DC92F4DC209C4DB27E8103FD771986C13C1250C19578BD34858E8D1 \ No newline at end of file diff --git a/jstests/libs/ocsp/server_intermediate_ca_ocsp.pem b/jstests/libs/ocsp/server_intermediate_ca_ocsp.pem deleted file mode 100644 index 3aa49df8d1b55..0000000000000 --- a/jstests/libs/ocsp/server_intermediate_ca_ocsp.pem +++ /dev/null @@ -1,53 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIELzCCAxegAwIBAgIEc3NuKDANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV -UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO -BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwYSW50ZXJt -ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIwMDQyMTE5MTQ1MloXDTQwMDQyMzE5MTQ1 -MlowgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN -TmV3IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVs -MSUwIwYDVQQDDBxTZXJ2ZXIgT0NTUCBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkq -hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8o7m7QpIMUZ2r6HOmhuqNF25x0odb9Bg -rSLm7Hvb3WBu6jwWPrrnPerR/nODVEY4Qo7mOclgCsooJx3HaPYPgRYffRQMJ+I5 -lpvsRsBjW7CnS0amz9QcbGnIhMeFU45gCn51CTLPoBJ7hB9F4Z02bOJEMkkXkhtm -kkiVysUs6po+t2+w8tojOScZdeDUtwfStKJ7Xb9B79Ko3BCcITXJUxDBcqUEJF+E -v3YQuQg/QKNTO+L39aFFo8WNfuP09txdjT/+T8PZq826ccohRdSrJ5lq1hXmmKXp -3p6Ut35aE4tjj6KSjDonMkYcvdNHQ0aL2p8x4JjwgwAuNwawTUbYIwIDAQABo4Gv -MIGsMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMB -BggrBgEFBQcDAjAdBgNVHQ4EFgQUyC6Gv0rfoato44VsaVig1SmminYwOAYIKwYB -BQUHAQEELDAqMCgGCCsGAQUFBzABhhxodHRwOi8vbG9jYWxob3N0OjgxMDAvc3Rh -dHVzMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOC -AQEAogdunlFL04lqVbZyqPvN/5TtrEtM87invrzTYZ8UmT5Q4Kr8mHRsumBuVwDu -bE+umrPtQVvu0XYqsjmjmOk7hTIK6PFuF6rLQCUBHVXBZggTNKFFBWphQ8odUbPG -FmOqSlkZAkcNo3dLpxRbfDru2ARxeE2+sRCPWwUZc7utqpLoZ0deuKdDSlA/VcGJ -5wf0sjmcjvJRRUSYeJcUox4ySL+4WtFu33LhYZKgnrMNegaJ6UyIlwB4ihMyi9sV -yDlsY+vGqivqqMUw8V6tdUekCYPUlHWXeICqsRIBII+xMzqTv1rXPzNyAvyVYrBi -hG10rdLfnQWn2vpYKU5b3Vo1yg== ------END CERTIFICATE----- ------BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDyjubtCkgxRnav -oc6aG6o0XbnHSh1v0GCtIubse9vdYG7qPBY+uuc96tH+c4NURjhCjuY5yWAKyign -Hcdo9g+BFh99FAwn4jmWm+xGwGNbsKdLRqbP1BxsaciEx4VTjmAKfnUJMs+gEnuE -H0XhnTZs4kQySReSG2aSSJXKxSzqmj63b7Dy2iM5Jxl14NS3B9K0ontdv0Hv0qjc -EJwhNclTEMFypQQkX4S/dhC5CD9Ao1M74vf1oUWjxY1+4/T23F2NP/5Pw9mrzbpx -yiFF1KsnmWrWFeaYpenenpS3floTi2OPopKMOicyRhy900dDRovanzHgmPCDAC43 -BrBNRtgjAgMBAAECggEBAPIQ4y0U4c8rPy8wD/uEOGxiTREySgZYsuKWvlarlVRs -9MQWiyy3YidMvZ1uslXcbjEeY2ywJ4UdEs1WzrdVOUveRDaTVz5Gaqp/mWFShtXu -ikZ5j+hBCsy3FUJNzCUDJZ3TbgFsEADz8Qh+HUN3neU0OlLk1v0dE1RR1Au0k4rb -yvMFRDcHLQ2u6AoZm1vkaV+/E8REObb5lutgs2719JJapAlbPT49ttlkfXvgt5kv -Bnvt80S5+PEuyLNVRsdsRLaZZ4tZpmYenObb4kjiIbCHGRBkHXwXdsnLuqmxXSMb -52cUsBFGaPUtvIUQh41kGSUNdnKjf1SndJKqE4m6nUECgYEA+gxQx8SGMuy7jEqD -A/qU+aFF8brqeCb29YifY1eMjox+PvC4+2kG06Y3C/dvbA7eRxdU1PN5R/nPZMrX -+WxNbsnSJGtvvxZplygpj9DNzwKCH+4Z9dLk/+f7HqKv55c0eLt22PjnQ9GwVNEG -UnEWDo6Wl5F6qw2HAdRGuQbvBjsCgYEA+FTyQuxOgWpjCw9FrtV3+nRqISGaKZMM -pqvzPQQuA7Xer2UR4aW2lGtaA8y8Xgt2rBAPIlMggCIUXmWdkH6pwHSvIWhzCMhx -cyFTAFFsFcQkhCIArVbGvhbBgR0Srtb8ncFx+qbqg1N4Uwm60trBQWgAapZpFhDi -hXqRmSoDDzkCgYEAlGE+hmz+XbXRTVziBjhqsv+aq+mJPaeRoP5j5uWLCQQh3mOm -wbn/TRUzUSyRuAPSr0kPFBcu/yEkiuE77EzyXi3xP59pfnFkU0iH8Ums94y7fwsh -6JgvQBR/FhzgWYOGpaZIzlRVmA8UniAzqjRlLFo8ztCLhHnQhatcFGwi5wUCgYEA -047KtOjMGMShjBJ+sut5Qw1aPM97nl+AL53douWkrdSK2bGpAitC2D58eTA6aYQq -nXsw6XUYAxEFeUXobej6hNLjP/rTxW+99u803th+1Cw9T7QID6QVvGt2fqBeAkV1 -AJCEoZ0BvM+nelaXqnpimW4YrLVm4T2RPVWmJG3+HUECgYB+q+DztAUDCiVgVtxR -wkwnl8WPgZI01b+bCP3d9HgL6zLt/AOYBDfsKuhQ23CPhNJvVmq3gi9xvufBM6jA -lWhttgN+G72VmQmA84yXgi7b3T73E8ft0u0thJjPaddzAJOuLyYKzLI0KgOYe4Hk -Glm8Afrwqqz3QQPj1mqrK5Rvlg== ------END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/server_ocsp.pem b/jstests/libs/ocsp/server_ocsp.pem index 86d73b9cb8ee4..9cb0ac85d3e51 100644 --- a/jstests/libs/ocsp/server_ocsp.pem +++ b/jstests/libs/ocsp/server_ocsp.pem @@ -1,52 +1,52 @@ -----BEGIN CERTIFICATE----- -MIIEBDCCAuygAwIBAgIEKx0QGjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIEBDCCAuygAwIBAgIEGnfTOTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBiMRAwDgYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ0WhcNMjUwOTEwMTQyODQ0WjBiMRAwDgYD VQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMMCWxvY2FsaG9z dDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMQ8wDQYDVQQHDAZPQ1NQLTEwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC7kdmu54NATVQJ17vNVuL5M+t4 -iExQWHSVi8lp771E+4z1hJqnQHH1Voi2HuWS/nolxvap/O0V6GbDmWbdFRoBL6cP -ASgrZPn263SLfgms31sIYMUthPmPK3UpxyQByFPpV+4oBDCnRRos1+XOKvAhCd+3 -GctudX/OiPRTU0iShURYyKytF0LCxNAhXqxDB8UNpSakM8OOwz5QOVP+0Px+HUT5 -0ULHueof6GHJXoACeQlXsMWdLcXsyj7Gl+ogpBAAbOZyNzVHSJzjGW5krTvwEsZ6 -9lb22kfYi03BsTxueQmC6uGFJWBxwnkfstQ+2u36hP+IKwJMjMG5u+edAUQ1AgMB +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCul4pkcbY3hc98DR2yj8+RubSK +sajO47va1vOEy4v3HVo7/kFQg6GRWCTXWzb8uzsqq81a/5dGbqqrWjzwWKH4oyRr +G7J9WDCukiWDqC0v1TkGgvdFYtNSJrmW4sOtsFLBteMbuaCpYBQ3DNpRjUazJTx0 +tZu5bb+MKAVHKNIlptXfZ1CGg8iQTb2e3iBoYFSknF/XdP8GUXU3PtdUpmx5yuwT +BaFTExfdDj8uQuxz1xT1crYb/+akxMO6aqW6EFNFEvkFJWxu5GGnHqKBfgfsvKGr +D4pA/W4kWwVo0wfQ6JdQdMjdOITDLLWUU5w/ecF2oYbSyfEw29n2ZzyMhIIzAgMB AAGjga8wgawwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB -BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBQqVWQYrG9+vpv1nnGGws3T6mqQqDA4 +BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRGjjyMczVRJuyPkj517L6To2Mk3TA4 BggrBgEFBQcBAQQsMCowKAYIKwYBBQUHMAGGHGh0dHA6Ly9sb2NhbGhvc3Q6ODEw MC9zdGF0dXMwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB -CwUAA4IBAQB5+mPWOi1CCc9TT+TDb5A07U+QzkPqVfQc2xME4qVozj07B7BeD5ep -i7g6uxAT4jLutve/k8M2cpq13oFyMC1C2e66Zw+8qKdPNXVbdyJhDnz6hGLZPE7W -QwTB9FMmZOKB8012ZnB2gTcWqcsG2rcglex4iZtYiVxA7KzrCaYqgfX34clFfc9/ -qOIGqBiMp9ic/Z5C2irdY+t2vYs4gW12nrsIYJFbg6pjy2e7oQoKQ7b2cxzAYZvJ -vISodMjrsxj68A9XlXQ2lLcXwiEd3czSAx3sDd2dieZ9JcNybPXpaC+RF2GpPd6F -3Eu4OzInD6UWtHSJgvmriF2rpqV1LTXM +CwUAA4IBAQCGaXzfjXEei5VeWf8DeTPnys+jda5sRTlIisoPpemrUH1tT0hNed0r +jhCWUmju1rpoxvfvbesdp7vAeQnUkZUJgnYnLhrzXG5u/CS9CGplbxtwnaXpe2Wb +zEPFBtIrKH/ssw99zP4MiGfQiqP6yGKF0q+1q4DgQs7d5ZEQzpQVLDEccko89JTe +T1Ts0gZHB1tBcR5IHKJ8it320loYAkahMLythT2dRZ0+cLWiRjNnm9HIVAuPpEOl +ifj7ndH696aMQp97URqRdwR6vN3rFS6mPJhEzKohNx4C4ET9Dm6juvuSAmRx1nkm +PF2TLpBz0dCO9BB8iCY6mxmWmSHazU3W -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC7kdmu54NATVQJ -17vNVuL5M+t4iExQWHSVi8lp771E+4z1hJqnQHH1Voi2HuWS/nolxvap/O0V6GbD -mWbdFRoBL6cPASgrZPn263SLfgms31sIYMUthPmPK3UpxyQByFPpV+4oBDCnRRos -1+XOKvAhCd+3GctudX/OiPRTU0iShURYyKytF0LCxNAhXqxDB8UNpSakM8OOwz5Q -OVP+0Px+HUT50ULHueof6GHJXoACeQlXsMWdLcXsyj7Gl+ogpBAAbOZyNzVHSJzj -GW5krTvwEsZ69lb22kfYi03BsTxueQmC6uGFJWBxwnkfstQ+2u36hP+IKwJMjMG5 -u+edAUQ1AgMBAAECggEBALf2q26s6ADoFtV0fmA9qG1L69WSsG5Y4zdNG6VIGfUP -VsfyX5BMV7iNP5aHpRhPeFOW2ZQNWiaTgj6zNLz/Fjs1ln7T3qb46WSwt+ScIDcp -9Wm5J4qmkfFGaSopg3owFYSV1iHvQhy8XJjAw0Y6vLtaqM03glt66HgTwLX8x//T -AKNCd0Jj47SKmlZlBD1te4SSrzylJiRL4hYn0Gs0VE8y5hF5c0GCS/+rbQwPTUlC -jVlxiBvPCVwzYyjeRNQQuOE9MFv4MvLJ/R4W7rUyf+G3wuOZySwZnHWJRwQOnwdc -dILgdrYMWo8WDuCSbTez0iBoHxgVSgb7D4x+gjROx2ECgYEA52+tBZhoqpiNQQNT -REH8ngSz5bVAwp9mvUQQb/rp4J+X3VptCrcg8krXOAFICHybKuZq0KFG/qZW7Aaz -eeHyXfaNnobm6RNIMOPF/u6vFbjF+LK4+bV37tniZdTlqap5xV0iRWlFvTouERRe -vhhx5qeUv1IVO11U4kqqSqzqk20CgYEAz3pIhDNI8y9r2GFaETjOrKkNd+HbOjeg -4vewUHMpiATey/uWHLUYaRUPdKrjhJytNYaMtmfNSLnZGOkwGaZ9nPQ3tz2hkwAx -smya0LUeZLZwbV65xlQ/VTwE7vYQhcUaN9LXu+a3JHS7iYZa0nyDcWZjqUuum3AI -SeML7kthrukCgYAEpM4A9bzr52G2Mz3heb164jdF3awt+4rRM2NtC8jTlAA1FdJo -S6JimjPVUNlEKlPmuXbdNs843teRptTFFk6Jqh/PVX+en59sM7XOVrW2pt16DwP9 -JrIAXejYCDyPefc1iZfdcq5OjLNN7m20PMNUUHqAmEIKzeWdIMhn9S5DxQKBgAyg -EFGbHVFPDWz5X5W0sz7mep9U6xY71Fp7YAtFNr3ELhqTkKrmijm6wloDHB0xu4iK -S32+C0vlo0RUzEyDSRmH8uv/oBll5aBJ45HXs6XFREojfYKIFeKF6HbyGkdJmzLS -bbZNb8+UGuVfe59bXFRGOsJOjK2Av8BfRJaXl7YhAoGAbGO2av1IdTHz9eQPVX18 -muU/i6UvsIl4Rb9poJTuwO/I74pJHctwggnLVaj8I/xLzbO3D87CHV49NOLLtn1D -VZUoodANrh7qfQAIbB5csSg2rQaID6kAQl1w4Os/3jwiRAmYkQ890wL+q0S68xG+ -V9n4HmUO0QV1gsutC1zq+D0= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCul4pkcbY3hc98 +DR2yj8+RubSKsajO47va1vOEy4v3HVo7/kFQg6GRWCTXWzb8uzsqq81a/5dGbqqr +WjzwWKH4oyRrG7J9WDCukiWDqC0v1TkGgvdFYtNSJrmW4sOtsFLBteMbuaCpYBQ3 +DNpRjUazJTx0tZu5bb+MKAVHKNIlptXfZ1CGg8iQTb2e3iBoYFSknF/XdP8GUXU3 +PtdUpmx5yuwTBaFTExfdDj8uQuxz1xT1crYb/+akxMO6aqW6EFNFEvkFJWxu5GGn +HqKBfgfsvKGrD4pA/W4kWwVo0wfQ6JdQdMjdOITDLLWUU5w/ecF2oYbSyfEw29n2 +ZzyMhIIzAgMBAAECggEBAJYVPOlFet9VztuTptDFoShjHBkRycw3plfj6ahhFWuT +NutVkK32yZ/Yxqq+BZHSHXV8BGbSSidIoLv2MWZmuq2zCG5ue8wWUJrwlQENQ/LX +d1LqqEdkQcZf9bAINTAM38cXZSiDNOIE7ru398CdEoaTXtnw9/G5ryx7hrX6GFQ2 +H0CWdsCXgiDm8if2PsBtWOZwpFjxEl0gCFR4l1pAqs/XzPP9HJ69nvKDXF+gjAVM +qe4qfMT0sDUl2tjzG8A4tyET56bFO8C1R0R2+tWqBc4iCOwQiHQHQhwGtYD7qEGK ++ZxTKyt6Y8a85eERQm4t95nhTYkA/zGkwaQXVwhlnakCgYEA4yOH7Sf8+y6v/uOC +ODirWJ6v5evCnhjYMvnaOaLzcvhOqsUvYWuyxtQcCMFvwN9wrdtnfsNe5rXPlShj +7RROfAM6NcCvTaakeInvtyEnE04VanwpLnGckEZgCr7TG7lWwOcLrJh5Tox26841 +u10Lcb68nt9gOKvQ97IXRWnZ0q0CgYEAxMa+S+48jsoMR5PNCtT5jZ3UeIg3QDIZ +5QGvI8bwJZOYZcRIXfDc+c00JKmOkd9K36cJJPaf5RCkiuqJPgmcfc7RUh8Jb+F4 +G32pgPlEUpqzV+1q45WwIg3oXBFgeUxs5w0DIBUneX9Tdw22cHGXJmr47OcJeeiN +uZIbbkArJF8CgYA/iefzOpV1Ook3FzONQKUGBFYWTk5B5ZdNI0Gdj+zkQ3vWH5Ty +fqsjHaC9/kahwJ+HsvGPr11z0nZANm1Fm8GcxVZaRQ2E/lHR+lwZcWe56cTp5dOr +T1LJtTYWq2zou37+NWO5o2mDxJ1bt3KmeA/EgOPI2ZnUIJQzRhlmbIbnfQKBgDz6 +bbwv/potKLMBrVe++fqVv2L+q0h8fiPGatTGcGLkoyReOCLMYl4S5ia6WJEBxj0a +kS3gM1qT1rmpxo/wAIvIDHvLXGxMTaEPRvjNxgtnH06PJ0GRgHx9HNVzGRddxJ2x +HZfSlmIDQAUzvaaIvNNN8QfQ7NHXbBvmmBOJVRU3AoGAMqedJ8pnseyzSC2/c0Ps +IPIISVimfFWA17rOGohVwCHkXIFAdPt3ntjSjKv0BmGzVB9arbdo1hVmsW+Rz7XC +Nu8u9gNC6SRhHbFZ/ys2AYd7YZD5QQ85vDKE53w8WRatLP1JAm3SiU+5Ks3ZoFDO +kRgRWrZACkM84BZZAUmfsXQ= -----END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/server_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/server_ocsp.pem.digest.sha1 index 1afca61e9e0f4..8a7ad665ed0e2 100644 --- a/jstests/libs/ocsp/server_ocsp.pem.digest.sha1 +++ b/jstests/libs/ocsp/server_ocsp.pem.digest.sha1 @@ -1 +1 @@ -08FBF18FA233B42A1591279E979CFDD9DFB4977F \ No newline at end of file +8A8E33FE14DF4D8DDEE9CB28A69DCC2F26D4133F \ No newline at end of file diff --git a/jstests/libs/ocsp/server_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/server_ocsp.pem.digest.sha256 index 219b235711695..17cbef1241961 100644 --- a/jstests/libs/ocsp/server_ocsp.pem.digest.sha256 +++ b/jstests/libs/ocsp/server_ocsp.pem.digest.sha256 @@ -1 +1 @@ -5A5D30BEEF694D55BD4BD0FA5E699382C2B6DFBEF1EAF186146698EF2E1359D6 \ No newline at end of file +E52DB1E98B2210DAFE8530E33C8DEA24ECA2D5D358D67BC0778F78B2F46A571D \ No newline at end of file diff --git a/jstests/libs/ocsp/server_ocsp_mustStaple.pem b/jstests/libs/ocsp/server_ocsp_mustStaple.pem index e277b30c9e06b..74476bf7bbb98 100644 --- a/jstests/libs/ocsp/server_ocsp_mustStaple.pem +++ b/jstests/libs/ocsp/server_ocsp_mustStaple.pem @@ -1,52 +1,52 @@ -----BEGIN CERTIFICATE----- -MIIEFzCCAv+gAwIBAgIEL05J0DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIEFzCCAv+gAwIBAgIEPJLrJjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBiMRAwDgYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjBiMRAwDgYD VQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMMCWxvY2FsaG9z dDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMQ8wDQYDVQQHDAZPQ1NQLTEwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDKUbaM1TrAvHpIVi/qLnAgMelQ -lWu1sLh2kPnaXyIyfLij3KcEH2xeAdDlUfR3fVX10AawYOQix5hkOgD4Th8bBy4z -m9q61e8aeBLHbMjXFcI4yXn7S+sBrUZzOfeKTHkAtjfmXn7zS9NMMYYQG9KEzwSl -XdvktQyI2EzitrM+2gRY2f9abMndScE/1Y4EuNSWvAz0ln8NSQoKsxB/qGkSkbtw -mgXrmxP79NGs93x9zDp4UIP9goYkRsmoKxdYrrwbibyYog7tMTpjpUaEce1kDiYl -iZbyyKGX1M0LfDnEwlcwaDD7zMHgMwmKt4dDyi4yUey3LTFDxC4+0FQDrGMBAgMB +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAMrvAIe4zQtig1aRLfcTYvf2d +faCWNTPoKwnL6FKUYqupD4FBQ56ryVV37k5uXAiJwh4HnI2BNKtvZTpskWVmrw6S +Y4dGmT6dbHU4cz830PK7tb6nhEkVeqobcwKvCpebudcUXJ4hoIM6uHN9htGmDYjy +e/sRuuwTq+B7H7mh8Wo/RQ5C3N9DTXHuKuP5gR3c9o7MHqQ5qkaY5+kuB3afYF4u +KtwU1ANUkE7iVR1TyZ0LvDBj7G7LkNkD25XIUg7Z9gQos4+eScN3E3wQOvfY7z+2 +EyLjX5AJ6ziwvglowqEhnqzCtfwneggUyTMKa6gMi2uHi9Q77R1G7Uq+dpqtAgMB AAGjgcIwgb8wCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB -BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSEaix3tjioPatVVkObBqSsU7G3XDA4 +BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBT6h8diKVNTYVy95EtxT+qshlpIMDA4 BggrBgEFBQcBAQQsMCowKAYIKwYBBQUHMAGGHGh0dHA6Ly9sb2NhbGhvc3Q6ODEw MC9zdGF0dXMwEQYIKwYBBQUHARgEBTADAgEFMBoGA1UdEQQTMBGCCWxvY2FsaG9z -dIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAAotxVRA/9sEuLGa9sJE0ERhuN7yr -GcWad5nkDdYSmg68GditxeUQDc86jACfU+/Dv7Ni+kvkPmlKrreThsyJbIix36LT -bC21fouGk9wAszhc2ENZzR9bE6PB+qVwL6axVkS4XFyXrPZBxlL0R+WiNUOH0RZI -leOlD1LuLEAejYg10q2sof3JpEGVjDzJMGqM1kaSTJ6R78CgkHdE0OlmLmJNknCl -hvdK68W7dwCUyHoUX1VucnK3XowvsvWa0r1dp0yyFWvRbcglxI1lt1f5hfUZaNVD -m0NfpxxjkpC1jzmS5K40Gt+XvXltBGCIUeFduMoqt0NHk698WoCd8LuZdw== +dIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEA4SbsqrppLAEH6HczQWF53KFIwTXK +EbQSkHySWVHkQZd64rI4FtiubecsvorbpeQjnWTvpJPJmwNSmd2rp86D2bnCB8El +2t2WUZH48+iCBcojeka7ppsU/B4y2I9qo+I18EZ3MvOxr3F78qSEiKkfE/hyJgNe +f5yyX/MrpcyY70b58M6IazMBxDii/pDJIC1bMmwSvbKYxX4vZLbEA/npJNMUvfgS +8oJLd6sEUPP/5Sy5XeO4rkq2UN0swtdCEOgBk93T3Pvofm91ZuA90/ep5/rDzPBe +UehkzSMSGAKrv/DVG1Wkbl8nOZRN0n3CVI0kZ+cHDyC3MWaaI8Q07Xyi9g== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDKUbaM1TrAvHpI -Vi/qLnAgMelQlWu1sLh2kPnaXyIyfLij3KcEH2xeAdDlUfR3fVX10AawYOQix5hk -OgD4Th8bBy4zm9q61e8aeBLHbMjXFcI4yXn7S+sBrUZzOfeKTHkAtjfmXn7zS9NM -MYYQG9KEzwSlXdvktQyI2EzitrM+2gRY2f9abMndScE/1Y4EuNSWvAz0ln8NSQoK -sxB/qGkSkbtwmgXrmxP79NGs93x9zDp4UIP9goYkRsmoKxdYrrwbibyYog7tMTpj -pUaEce1kDiYliZbyyKGX1M0LfDnEwlcwaDD7zMHgMwmKt4dDyi4yUey3LTFDxC4+ -0FQDrGMBAgMBAAECggEAMJNT6BZeB9544ZtH38sMgr5ZDU8C0FnAwD7orkR/Gm4V -iso0k2nUA/IdrKFzt6ixFda/dGOfAra3YQEIMJXZJA0iS6PU7VzmDo3bvGPjCLkh -q668CdjdcMagjpPRWjQoLUKSw03W3yVKcxXE4WM8tURbEjUp0YyimMyOETTabnFO -LAkWun3UtQBYlvs8TdQkowYZsOXjEqVac2aynaOSWBhx1g63q6pwvs4AtLDfBGcN -7QeL3xe0vvrEZh5hAV73NatZI5Xx8jzv8C47XgPlsLTzsFJYOgKAPyPNKzeV/lnL -6rtQer8rbQwABOI1lMsM1eRmYm/FFZZB5d2srtvtnQKBgQD59/y3ZG7mDh44PUyJ -OtFk7LV3m8vS07hv/6ofLycYiT2k8T6gJF9WdsccFUhUgmLP1MC6VFLWEGWBFY7J -6FjEkyX50QKNVnq/p8j/kHSycJW1C3lMtUsukJAczCxhAKKG/tJkWNV4PSOLBiCD -+qd3aja5ECiMvucTVTbRLdX+pwKBgQDPM2c71KmtCF9kgx2aYqaYRSB021QVyeq3 -eoLIzEvizehebtF4MGW5oLoS73b/rZwsBDHRImq7eK1gs8FMF68ZDcYhD/jVSsj9 -v/lPKkas6Kg4AgAkS0ZS/+nqhjOyeATmMps63HfMHVGQPL1/sr8mopnvtrtfbtyg -G8xDS1uuFwKBgBj/fdxxRM3o94i0SKUopqwrJ/KwN+/7kGRS8xZvRr+jafDG13Z4 -bLhe5Iagcj1RVMRoCGYZ7LbPqPcByufSPp5aAOA80L6FuXzVMLquHZ2CuNYEMbtE -HiKn/mGC4aVJxPcvIKc8YwzFQHq0wCeyt3CvxI05WnTI2p8KVJMSvGXdAoGAKWAr -akAyTdJRMkIYhD7U461SKOOva9ZxX4hAW5aoRLc3grwAak7H0YSuA5/8FFx3xjZk -OE32IE+d16StoeNM3OTqPqnw167iagGO6GcZy+d9yAlmO9koRuTBskpwQmBDIjDS -3LMv3Pux0OrAMKLiFvX7ZWoJCqqZtgM3C+0ddqsCgYEA6pSgc3sHam8lXftWq9zq -7YBhHmjfeGcDslz6NTekW3IrDrEcMpwZLqFxkXOMlM/bbLSGsz46xW8Nt8uJLN7h -+q6RtUmgtlUUznTAsBD2aZNm7e5TUgWDK7VyTeDbd8ld7GLBInVEbplTfVAJ3fHC -REyG1gCUF350GsgxXdsXmg0= +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDAMrvAIe4zQtig +1aRLfcTYvf2dfaCWNTPoKwnL6FKUYqupD4FBQ56ryVV37k5uXAiJwh4HnI2BNKtv +ZTpskWVmrw6SY4dGmT6dbHU4cz830PK7tb6nhEkVeqobcwKvCpebudcUXJ4hoIM6 +uHN9htGmDYjye/sRuuwTq+B7H7mh8Wo/RQ5C3N9DTXHuKuP5gR3c9o7MHqQ5qkaY +5+kuB3afYF4uKtwU1ANUkE7iVR1TyZ0LvDBj7G7LkNkD25XIUg7Z9gQos4+eScN3 +E3wQOvfY7z+2EyLjX5AJ6ziwvglowqEhnqzCtfwneggUyTMKa6gMi2uHi9Q77R1G +7Uq+dpqtAgMBAAECggEASmYZKYm45Fz28GNjAtn/jgsVlpZfyGV132RxmV4oQUpk +ur/GwBXF4SfBjvhPoga8Q19CMp4WZLKXvjpIQGiUAQi4slAWuGxvwY6PXbe2/AtF +OwZidAQQKmFeYdvGaS3HfD+XE0m66LdytRNbmXbLxuSsY3k8uiNKaV76OY8RU68s +5Kd9W0g+/TgasNmLR/VTvIjG9JMAZs7XP5ufPjzPdOktwyV4zndVfQOBLJoQbgVe +4gN0NrKxXUvp3E3aoFEY4Zc/RTraF+M1G4oUrvGSzT2fHA0adE1mSl9RJn4dFmNS +z+GJ2MRMFzBYXMJWPHyZAbnXhKJ6dONDnOwKHeix3QKBgQDsjQig8c9T6Y48KA+W +rwkxZ0ZQP5y2MiAyk6BZS+QSWmX/9P2Q37C5efe+xD/shR8rlrakMo2D5Gprabjp +jGpg+0wJHb+WC9RJlMuASNCIlhB22IkN2xTB3x/BVC6kOEWwIsMTjbfWXBgRFEs+ +I03mqyUE9OTUZLm+yUTOR+fCOwKBgQDQACfCq0fy2Y8tcv6NGgZ2gbtj14PJa8gO +kSH+mpT/95fnIBu+894TC71NdwsJ6l4lS8lFfk5RMjHKAPItBkDuvP0kdjWqFmV7 +WYPkUSf1EguixmNrEIxUpykyYYreXiqGlVJu+Qzpi8Ommv03CpaFKfOAXvIgWa5L +FXi0Y2WgNwKBgDo6LRmQ7Dv3ja95HP02cMjcZw5x2h1vEUXh2OKiL63k2p66q3/x +0AqX30fx/to1moqcOrEIUJVasGdoSsASdE0TDpBf5j0FBGhoW/9j7RDx/3OBsZrb +hqyCQ8rO8fwybdUBeYtioxH88V+i4zKUdiSFlEM4FOvDL4Wq/WjZgk7pAoGAAsr9 +va9vF+Oz+HhC0sI2tACp831BV8MKvKdWPYT12zoH4CKePMIpiTfvIssmasuq1/Vc +joJTquNxp7S7i785v/rpq0OrFM2YLz/UdYxhbPkBDv5690URnVFhTDvjEXlSONxo +bvUJJ1mpFuOd4s9RlhgqHN8pgSWzIW74X/O1uaECgYBDPHqAQkC0nr10CLca58e9 +qYVd4/vVrhbbnxJwyl9rjuBJVp/y9hssDNhDKiJuTMz3+4ds5XvZRQbWLIt/V9qr +gT4mtqHAfk76Cjg0/RQcPUyNd4e4Gd/HObnF4lEC9z5j8zBN/DCwP/PewLJQYcri +Q5gP/QsoVfc2J4pATQz/lw== -----END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha1 b/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha1 index b366ff279f9f3..5959c42e3ab8a 100644 --- a/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha1 +++ b/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha1 @@ -1 +1 @@ -4B0BF83D70980E620C8D195C6979D3D6BE8323E7 \ No newline at end of file +E36475B1125E0147754258152F35A57B85B6146D \ No newline at end of file diff --git a/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha256 b/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha256 index edb2eb6b5c0a2..3a6fb59e452e8 100644 --- a/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha256 +++ b/jstests/libs/ocsp/server_ocsp_mustStaple.pem.digest.sha256 @@ -1 +1 @@ -BC9FAEBBDE885879057893157EDDB79EA405369F3E1A98AD69EC20CAB996C764 \ No newline at end of file +5CA31EA44018281579AC45F1D58C0C2F17613635998216E87FAC70FDB40F1C74 \ No newline at end of file diff --git a/jstests/libs/ocsp/server_ocsp_revoked.pem b/jstests/libs/ocsp/server_ocsp_revoked.pem index 96c6e71db498b..cb824644d0308 100644 --- a/jstests/libs/ocsp/server_ocsp_revoked.pem +++ b/jstests/libs/ocsp/server_ocsp_revoked.pem @@ -1,52 +1,52 @@ -----BEGIN CERTIFICATE----- -MIIEBDCCAuygAwIBAgIETZmtxDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIEBDCCAuygAwIBAgIEXojk4zANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBiMRAwDgYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ1WhcNMjUwOTEwMTQyODQ1WjBiMRAwDgYD VQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMMCWxvY2FsaG9z dDELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMQ8wDQYDVQQHDAZPQ1NQLTEwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDxzlF6pDVDoDTxU9mhC1nYgC7y -NkQ2gozEValQXwF0ck4F4qMQVkrz4vFSEG5HPAykK5Yv6MQ6RcyEB1w5F+FrI6oj -PexL6Pr/iQ8kWkNAPqIPZRXBlddri1butlDCTLPHXsU7bqdjFCAnlBKCRJJ4WqYF -kfvtPAfZkN6s+0C4SXHi5+xY5d7NzDGzzWnhhcCsfOZJu6jpFRqoAV5CFvSsMvJF -r51W8e9kf6sG1zEU/iel5rz3CJNPhPcARcPQ2+pZnRAMOKwmloe90le6QxYA/3tP -DbxFn3lDZ4lBXB/t5M72LXQXiJIcorUV5cxoQ2W1jClzg6HUSfxSbCXoZz5HAgMB +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC4z1jlUx5rFqNRnLeroecRO20h +rWnFL+FQZUlDvRGpiUfVIK0vvTmQdEk690PHFEcnu7nKCVSxVrJLm9A6/SOidOSO +BoZNkF4D/Q6kmKEad7T4xz/Ma1x4xmk+rkbTEAe3IpJEQ9pyMJ2iTzOEOKQ+08j8 +xca5No+V88OKnTtp8bk4MRykNceB+8GIqYNFt7ir9MoKEjJPbySZsXRtHXPuEOQd +N+pWIdchDiidIpLihCviCCQO0n9q6mjZ7ev3nF7M2HzrMeEVkHdARVFWwqPh9tV8 +iB9299Sb/i/B2UKQe1CoRP1U6/Q7ElMYwFt5+CDpgMFBvx+7N2G/3OQFWqX1AgMB AAGjga8wgawwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB -BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBQnIxUeRtcliLQ8WYOTMPNUOfKVhzA4 +BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBQvmFQ9FdhyABOmhW2ubwTx0cOTPTA4 BggrBgEFBQcBAQQsMCowKAYIKwYBBQUHMAGGHGh0dHA6Ly9sb2NhbGhvc3Q6ODEw MC9zdGF0dXMwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB -CwUAA4IBAQC02XPR8svJ7M3F7pMrd2TWjdo5q6r6VuQ255k76hUWnyhFMO+rJAfE -0qOwvTxSC3BTn7uopYgmtZ16IA2kXW99kM0khSOWS2rvrKwaXNH5N4m5vpJvvnYn -4tRy+VN75HdUxIjGFPDv590yrc2rolGK/4BENErK+uQL1s64+6BGZHhxu67UQIni -P2XtpNl7FOqqKTN1C36LNPz2WT+rNY6BArKeKNPrChkjjf05XDBazXHK0mWBL5Fb -S4lw/5D+RuyC7IUxn3H8EKWXRhm8m2+PbtXjcTmXDf8MmGdmq5VcRPIKhMjrxX/T -xZ7XdGSu/Q8KvsO3taacxoh4uqvX8kNu +CwUAA4IBAQDYtwwX7/EO1/tSNSLF1cl+maF3NvNxxCdQGKooE1DCMwLHq4OnehQi +pCaRD/uks4Kb/Ji/yWafkOOPT40UUa9KCVEs9XkZygSXamTwkm2FIn+gsxNpWeP0 +sprkljUPN640RNzQyInrRwPdGvvO/Mdv4WrLKiAg7TpHlynveRiu4+dk58nVSyUO +Mi1TisTqsXDz6i6nXMgnt6b1JIy9xdAqONq98RkgcxbBQ3dYv8oU4YehKBO8e3RJ +4nxhCHOD6pqNTCPD4LyiTOG28J1l4oCslGEjTXYHmebSRW6pYmJIbV8C+Zl7YK5X +vIp7oO1z8YiA7Wf7fTobR0Xg5JmXAp/K -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDxzlF6pDVDoDTx -U9mhC1nYgC7yNkQ2gozEValQXwF0ck4F4qMQVkrz4vFSEG5HPAykK5Yv6MQ6RcyE -B1w5F+FrI6ojPexL6Pr/iQ8kWkNAPqIPZRXBlddri1butlDCTLPHXsU7bqdjFCAn -lBKCRJJ4WqYFkfvtPAfZkN6s+0C4SXHi5+xY5d7NzDGzzWnhhcCsfOZJu6jpFRqo -AV5CFvSsMvJFr51W8e9kf6sG1zEU/iel5rz3CJNPhPcARcPQ2+pZnRAMOKwmloe9 -0le6QxYA/3tPDbxFn3lDZ4lBXB/t5M72LXQXiJIcorUV5cxoQ2W1jClzg6HUSfxS -bCXoZz5HAgMBAAECggEBAK+gGq3o9obXpZxiOn8tr2QANhjWMVUmHPe8/+zQFE5v -dxvvMGB9TJM3Ee9435/9jXDv93V0qpFogNKkRedx4NfgX4KZRzcbULKo6caNiKrw -0uQ7l0Gzg3MpEUrwQFffPfRgcQBprknYJEKa2ZakF34y2MLS9RepsknjXthk5Ozz -gcg6jqa7JBl/Cu+d1CUetUDcz/Xizt5CCW0yZ3Qt+UgmyNRNgIuSAUFkoJRFwiBf -8/cZ5fkhatck/vsdxwGzvP/oTR9P0AuMXUEBSj+KNMbiuVfeAg84T5Q533scU300 -lZCfQGsNDmTTTiTNqgqdP0d/6XgbQFhh+gBdhm8bHoECgYEA+d8UnCaG967ixP1x -IIexVjABzMY5fdXjqJxmhJstU+jKZ7gATq6Pl7Qh8RgOfdXdJnD+17UtqAip8bvF -dKdVYpHZ10nwvChQ6BnALSDGAqsfU3p8U0UEh1XLtMmPPaNPsuRtENRlHlQcrp4N -pr7u/kWrDsNNJpExrq48rMF1m00CgYEA97yYavOWWNq+NBkvbxx14Ha0UZqkIfKp -RbvFw6x6CMPsn5OAjklI44CpnLa7J+XiKiD0IWPkMO57jsaKc6Z/vUi5QX6/dwgD -1W/CmGU7ZVXr23JYTitnECaF9AWMRIJ4yL6Z1OEkAPtexTLEtLgBT+tzupzpgchJ -2wJ0yxcaLeMCgYEAiGYFOyQw4v3dnkj3Oxm3bWPxZ5YBhjZ++ui9cb+/o6Fc8/dW -e50Al4BmUtSd/IUFHtnp01h8ntBz7JhitkYt7wvNDZ+4QQ4E9F4yMLBGRuigxhID -0fTH/xSPiZXJko0WkYHuI5S9yiuCKLPwochyb+0Z7oogEGCL7V4BkxBcIbUCgYEA -ggJWVqJYv26kLMEe1IeyEMca4ExwYGRxkuf05Dhqnpj7X89PwtwQxVurJ1P6KfkJ -VKSQmclLYqZ07zugQwsYtGQq3IVAw53QLT2GeOV/YzHRIgwC7Zr3blFZMPOMZhO2 -gVsNbdttQpqoZIK9Gj4Kaj+dL0zTekl7ANVsJ3yLP5MCgYAuES4K0f1Gob9yT2VN -OvvwoMxYlkPKrziLmJFtDGp5vl4VzTsnFfbw/ykccG6UXt5EWpWW0pI7olRp7cWD -2nfCek9OkkMRl5KoQMhG+UPk8i0JS7+wEW1NirLwXEEwL/VDdwoj/llA7MWkr5Ug -yxWrUGrJoSqX/nlkiSrarBkH/w== +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC4z1jlUx5rFqNR +nLeroecRO20hrWnFL+FQZUlDvRGpiUfVIK0vvTmQdEk690PHFEcnu7nKCVSxVrJL +m9A6/SOidOSOBoZNkF4D/Q6kmKEad7T4xz/Ma1x4xmk+rkbTEAe3IpJEQ9pyMJ2i +TzOEOKQ+08j8xca5No+V88OKnTtp8bk4MRykNceB+8GIqYNFt7ir9MoKEjJPbySZ +sXRtHXPuEOQdN+pWIdchDiidIpLihCviCCQO0n9q6mjZ7ev3nF7M2HzrMeEVkHdA +RVFWwqPh9tV8iB9299Sb/i/B2UKQe1CoRP1U6/Q7ElMYwFt5+CDpgMFBvx+7N2G/ +3OQFWqX1AgMBAAECggEAK7Hie73uODWPAbHC/1J1Nm7Ne6PrmRKb/A0miFyAe4wY +rrxeFiZwIMZNtiJWJRCXTj6lS65vFsq/tv6s+fV14mguzaGfSWFzyQ+g0avAk2hw +Ik3BuMw9fdSzd31vA2IL4PHMkVVVpZ/29TQyovVdZ/lc0N7WAF51Dmgm+HP6Yy67 +0tla/Bky4fvjy2MvSkxBT4TPqs+Co5ICj4xOABDOtMmsKMwLgqGHs7/K0+4GwFKr +vrKt5DpAQmrrWbp6E6DyJLCb+GVTm4KFdbYUnJ/aV6RruxmUvIEIKjV4QYCPxmxC +myVgoFeFgEditvmiTUYC+X5DRGngtsJc2OxLxG5j4QKBgQDd+12dbN1BkOoTFUWs +Glol1kcLhzkUdy5Mx/KdcmBr2hi+ZtilXp+ydH64YZQolJ82rAxZe1ve/uQnQVec +DbB6GSeWEV2T5GtYR1W1lOBvh8jGaGt5JcghjqZRHWKb6m0r86JkrLTZBzbVykwB +bQzMFt06vMJbJBVVMl5Xf9Bh3QKBgQDVIa2um9fBWY31oxq0kMPlvjuT7NBWFRuN +TF61d8Jr25eDoQ5b1n9x25J8P5LMeBERq9cz/AZ8ymiYhXEPOwImyzSEbrc9f9Fs +fLw4qqFXgHKRNaD5BauqV2HGX3TUO0sCCXCn3IlJXegHhN7T9r4GgLR2w2to9+Xe +LBkUSHDu+QKBgQDE3qjw9FrqSmZ+lDAdaaWnxvltTPkfoG+MowwV/RlsrZmiS746 +M8h4UX8NcWUf9Geb6kxtk80v4WlhMz9K5I0uTF7iZfZTMpLNUT6/cxD1eTxkUrl2 +UT7EHCp+6AvLccZu432TweQGUrKcOvTbkuZ1npWIAV5xzHeq/qfS/EVIdQKBgE3/ +dUBhaCQqUfmDJ1LNZ+O8KrGICPZ34w1i1YYzvcMbKefSGR8i8KEKaiFuYprRle2R +pcAwi28/4+dtbmLUhYUM+Ls+iXDIRiwdugRC7ajIKSVbKv33RUuhjva6GFdTG7Az +JDdKTjO7Wh7mUWXg9soKcADGo5hWoJkeKk9x82KBAoGAPh14iiYunVA+zgJU3opM +kL/bZmxWD/ATEaRiVHDCZ/jBVcU/ywwzt2niyQdx1Taybb8IRT6mA6nTkxnnYIQS +zdT1Zabo3HAYAFwXPIi2TgQ8eCjSiRE5VYbaxfLqcVIpj27bGtrzz9aHIw+ci+ga +pBVHu0GJc/4LiGs4NJpj3Es= -----END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha1 b/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha1 index a16c01face3ef..75795fd866804 100644 --- a/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha1 +++ b/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha1 @@ -1 +1 @@ -5DDD9151B27748CE669027BE752EBD7FEBF18392 \ No newline at end of file +A96EFE717775551966C031244B7C6412FA8D3E66 \ No newline at end of file diff --git a/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha256 b/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha256 index 7fcefa06e76f3..71085ab2d5c28 100644 --- a/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha256 +++ b/jstests/libs/ocsp/server_ocsp_revoked.pem.digest.sha256 @@ -1 +1 @@ -FC98F8A109CC247A33B8194C6482D93DF966983C5F6269783F2D3EEEBDC3EFD7 \ No newline at end of file +8A1618C8FBEBB727C367D5124AA77FAEA8AA09C9C8CBADC8E5DFD38C399F1B67 \ No newline at end of file diff --git a/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem b/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem index dbcd621d38691..0d8c990481b93 100644 --- a/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem +++ b/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem @@ -1,53 +1,53 @@ -----BEGIN CERTIFICATE----- -MIIELzCCAxegAwIBAgIEUkVd6jANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV +MIIELzCCAxegAwIBAgIEJPVjeTANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwYSW50ZXJt -ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIyMDEyNzIxNTk0N1oXDTI0MDQzMDIxNTk0 -N1owgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN +ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIzMDYwOTE0Mjg0NVoXDTI1MDkxMDE0Mjg0 +NVowgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN TmV3IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVs MSUwIwYDVQQDDBxTZXJ2ZXIgT0NTUCBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkq -hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAs91RJ/ZwqHuim2gGkgsDFa8WkaEGj070 -gxP0sW15N+IvsNXyHx43KTyN7B117hgRDS/nKQHUyX8g044dBvmAmmOqyXfsXA6j -TXUXovcx/IMSlxwgx9D1mrhNOxSrsUFE2sb+5Ibp9f/ORL5bU6dE6iMqJMldUQcr -VrlE4oiq6y8T9ykArP9zTpcddARPTbUoOGDuhoVS/cjvNgFdcsR0L4LX6r7wKJ03 -H5L9k43GxXjyjtnXtSFjG7YVAwWU/FoXpqngn3okqNIgSLZoCAM06E+TLCapJjOp -110HgGLoQmkrvfpjKTmfhsVVGR4WquQOqzr4BLZVB9qP9JTqwuK99QIDAQABo4Gv +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvPjzpFEZvcBHspirINBNt1aoaO1k/n+E +BGm4Vxwz2F7LfjiCOPXmb0Z6vPW8Xf5rnmanrxei6obTD4eCF5SodSrCSrV3ccG/ +KRgEOPjMJZc6/0IY2jrLqVDAnIzz6sQd2GayQNLU88CmuYnZ2Nzah38TTl5ppnrj +oy9qC6Xy7sFkHN7gQLLG9Am/gt15hm+9HDSvxir7mRlRKn9FskhJcYz8svKalxKW +DNekW9CJ/z0CTxrFM0/DAc34GB69Og1VuJIrSjexY3DbltivEZFCduciM09gOVDp +TlsFtmEpYu4i9LMWXWsa5ks+SPMqhnHoBCQHQ4Gb1vwQeiV5ok42OwIDAQABo4Gv MIGsMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMB -BggrBgEFBQcDAjAdBgNVHQ4EFgQUl1R5DS4QffEZ0Tb/zR51ubuiP6kwOAYIKwYB +BggrBgEFBQcDAjAdBgNVHQ4EFgQUhV3aQ2Q65JKAn7LJq9aVEAntaMwwOAYIKwYB BQUHAQEELDAqMCgGCCsGAQUFBzABhhxodHRwOi8vbG9jYWxob3N0OjgxMDAvc3Rh dHVzMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOC -AQEA0PM+cliksO+QdC6fY3kCWIPX+rcKO0aqkw0ib2Tt55uHZR1xAiX5Go1cR0Xj -etgzLl9zKZhx1ybkLgnAo2psPx0PIZc0hoSRSJMm5v06jKl9zuQ1PU81HHAQmnbh -+Ut54tVXsl3e2Si/0YLVKQCVFXcn7cqZqUTu5mcaRSlLm5pqAj4n95C+STwYUEuc -FZaZ5mYNqu2KBqZ3v5QVE7/qAPf3CVCmhi0bKJ8i1vssg8oIqqVXK3nrnhEo0dFU -QaOYMPWfntVwX3EIvVY8gMyFvzkxSz8dfA3ep/OxQoRECSq41GDbfIQMty1pnA3B -jOO8x5hcv2ibfukDCLIQkKeP6g== +AQEAa+Shlr1ZXFLZ9xKv8irm8sPaWwlcos8WOBDbbQ0hzzAcTaHFGR6aAzYK07qV +1Bht3/yRFjEYuRrx3rZUtMhgS32T59h/zX9VWsSset9XR+qxdPqsO/WjePEyfHhg +Lt7FBAUQ/Cbf8FyiDY07f5R4adCUtHJk/1becD2+xdXo/zw2XezIIw4Quyfbk+Sy +G7O1oLzre7EQ/2RYgsDNuck7UvMOhyZ9j0V9PhLtIymjkccjNp6K0C6H9rxKYJck +r1QHPQO4ghjyJawTTwNxkbztK2ABSM0IV3pIuKrj6rtqgblBJ9wkhOOvsLft9WK0 +sZPO8N6G9bUmuDcHn5HGKeLZBA== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCz3VEn9nCoe6Kb -aAaSCwMVrxaRoQaPTvSDE/SxbXk34i+w1fIfHjcpPI3sHXXuGBENL+cpAdTJfyDT -jh0G+YCaY6rJd+xcDqNNdRei9zH8gxKXHCDH0PWauE07FKuxQUTaxv7khun1/85E -vltTp0TqIyokyV1RBytWuUTiiKrrLxP3KQCs/3NOlx10BE9NtSg4YO6GhVL9yO82 -AV1yxHQvgtfqvvAonTcfkv2TjcbFePKO2de1IWMbthUDBZT8WhemqeCfeiSo0iBI -tmgIAzToT5MsJqkmM6nXXQeAYuhCaSu9+mMpOZ+GxVUZHhaq5A6rOvgEtlUH2o/0 -lOrC4r31AgMBAAECggEAKmIFZKhSpbD0gTBDFBjaMjre05QhKpXNUjHaoBCO1tdK -q53OfvysvN/TBYdvqZosMnJOQ0B2NsDpKFC9kE4surSvoufKX2rnmjH0Tud29Oyh -7c+n/c3Egh/ZuwlE5/DcW702hP0xllEf9tzwQkcRGycMIDz+60AIO1hO3h6cP+yj -ko174JxUGb3DQpEjbkUQ8kWYSAA9adpepX9Qtk3u/e4OZdCj+YJ/rGmvCUqOHHFO -ZvdYADVNNHvaZ1jlXufr9C2C/8eM6j7vkRZI4e0UznIqQGn8O45H27wgnMy4P6HZ -zBzoTAla/f9QcUP1TuAy/yzmSFvhcn8Qic9hcfkmQQKBgQDoRSdQB/bf3p1cYNE6 -Yu0qSA+a8PFQKtmJX5lK5cn6N14a4zhSUBubiMkv632ygq2P7KtsALI/ehLr+lxE -0N7a7X+ESuhtMry3JKAoEomZGupfbdi0OU6PgapWC21w6Y6cSfsQOtqWvJgfa2gW -6KLnwFlWb3hhCmXm/namIJ0rBQKBgQDGPYj38O/DyqeQoZtRiGNQ0Y7mtnEgPyup -8Dv5U6CoJ8TI5pYnHjz9/fmJtRljiFhThvuHlA3XZZxZVg4KTPqVsN6EfedCbi3I -Og3URUcaRtsrcfrLmzYKvEb8b66McaKFzkwzom/1hXWgsM3gjAF/hA4uzIYdyo44 -p1hiFeMaMQKBgQDXozw4ROyiMt8NYKVe+3EMDBLQ2lhvARktPJ/otSWrM7Qeak1j -vhOjdn6yCoOMM15HfIY5ovvZis/+XVVEXlZIEq6Md68Jkk06CrrV+T/d8Ose5bCG -wZ16BfvKHpngdjV8TALWso013KmuodzlR93WIvHOGXc9QJYSurQUz7qasQKBgBrK -jC0+AZlLVRQF0zDUpt9wQsjJVUaPYv5HekN62tZ+8WhZSWel/YYyAbxrni/GaHF5 -Z5ruFmTK1bN5HtKjjqYWTixHu65Np6BMwDu01SQm+U3IzKUhp88RnOJW0ZcncFh3 -BfNge5MJ41jBLEGxii5KkYQfnex+yHO9ogM3hAaxAoGBAM8tFeQID9C71X1Kh37+ -SPypUqfUUIsD5zdimOVmfrDxUmzJ8u6BRLIv+nQ1CYxaGbIvv9nTjo8zPmf4QBqF -XA7L0jr/ahUGtDh9eH4HdzReAMcF3T+a0pi423OjW1l3jiDYZtcwznA2C/0DYKGv -s2/1z/Pi3w7kVEJsWfMJgMUy +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC8+POkURm9wEey +mKsg0E23Vqho7WT+f4QEabhXHDPYXst+OII49eZvRnq89bxd/mueZqevF6LqhtMP +h4IXlKh1KsJKtXdxwb8pGAQ4+Mwllzr/QhjaOsupUMCcjPPqxB3YZrJA0tTzwKa5 +idnY3NqHfxNOXmmmeuOjL2oLpfLuwWQc3uBAssb0Cb+C3XmGb70cNK/GKvuZGVEq +f0WySElxjPyy8pqXEpYM16Rb0In/PQJPGsUzT8MBzfgYHr06DVW4kitKN7FjcNuW +2K8RkUJ25yIzT2A5UOlOWwW2YSli7iL0sxZdaxrmSz5I8yqGcegEJAdDgZvW/BB6 +JXmiTjY7AgMBAAECggEAXpmiHpga8srXGakjwgGFZkqTdDOjY2QiirMB/Vm4+pA5 +/q37QtiWyw3VU9MQwV0kMt/hAd4rSIzGC/giP/vDeSQ2r3+4k9ISCFhlhL/IMulK +N463MkhMvC4YvkYxtU0IQ9TlV140DPJchHVmARJOs5YB7DGHYjgSzRZnV/1zZ+G8 +HxjeBRdKjQBthQSid0CPg+YlT/LgFLuoHyMBH/K3ZOHUpZiCPWPwr3MbA3KM6Zvo +2lCurzzsaURkqlyd6wrBoZyAKcyQsKRtRAuynAQ1NOEKJPuhYt46vROreHDhsI7G +qiKypV1kzyLp9oL02gddMHXa/4OF+F3jr0JLGty4gQKBgQDg5AJK9XDm71shqKfi +zCel2R5Oj9SsvGb4wximIKalCwYN3S2t8lJBu1dOXx1hthcFrexKYbNIdQCompYP +OYJrXvCWS5Vixn1Qip1f895BUIux0HvG0d5vBmAPeiIF64JXUg66wO8xDzgVjAbM +ZaNXyqpvW+BgPjhQ2Seo2gyB6QKBgQDXHPtRdIpaFfNDW5jYveDp+FJsBsTMRknf +V6VLdgKXa2L3BFwNvpvjjfXcVZw+udEiOPv4mnZ/Hv4EZ30BF0J3pL3YnOdkC9ao +QiiHrJi262Fge8BCtYrXdEBPeCO3xjI/LLZ8Yh4S0TWLyx4X3MtiyAb0z9ueYh0b +mfQPGnhcgwKBgGHSbDjk2E5rkS1r6lMZ0KkwbyFnKPBWJ0hPvLoOe85QpDqFmegO +/r6+2bQqZWJqTUtYIu8b3ltHIZk1XM6Uimlf67DEgd9sTXgqQ1hIgMXSXmFjOWP7 +SBDtKf2xd3kxvwlylRug7qpdX1zoAtEH6Ow4KLi1szzJ878fJkQpTMV5AoGAeBz0 +r8z1G8w54xiYRh3wInTIAQ3egXmV8iHMaFBirg4GWouSoxIVlYBFElyzO8sh3YwY +Ff6Zv+2cRPVMNXm5YXkZkIQ8J/78Q0N9whQ7yZew7eDrv9QCzL9a0YTx3MHMeL4M +v3NB+5vZ3E70ZLqizmuGhtgxd0StoCXcwzoyjuUCgYArVGS5ZYSlujndoIvmBmJs +4jSG9sRJmyY/8JGyXKQpEjboAo/0vbT2ld6rvPaDfajzSQbmZHVMtZHxfA2t+3Tg +IrehJ8U3SgB1GGLDIBs9l0ydIWw4SHbO4LziYXsv7C1apjz/9dIPO0cQLOcdibvP +xuHjgAc+f0lPvcM4ciTcEg== -----END PRIVATE KEY----- diff --git a/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha1 b/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha1 index cacb2b7e84946..df2230ab416b5 100644 --- a/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha1 +++ b/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha1 @@ -1 +1 @@ -0FD31772F5F6802195A98A4F06A4F347047BC7EA \ No newline at end of file +1025010C4C3F520A1B9AF30416135B30C7DF0E54 \ No newline at end of file diff --git a/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha256 b/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha256 index f7c96ead3942f..a96c4b6eb25c6 100644 --- a/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha256 +++ b/jstests/libs/ocsp/server_signed_by_intermediate_ca_ocsp.pem.digest.sha256 @@ -1 +1 @@ -342CA437B1471B4A8D4231604B4D42435E0C0E4E738B498F6FDDEE7F4151F7C4 \ No newline at end of file +77672A5A46B5D04B099CE011203DE3CB78853AF0F65688E3A575586FC72174CC \ No newline at end of file diff --git a/jstests/libs/ocsp_server_intermediate_appended.pem b/jstests/libs/ocsp_server_intermediate_appended.pem deleted file mode 100644 index 317bb9ecb14b9..0000000000000 --- a/jstests/libs/ocsp_server_intermediate_appended.pem +++ /dev/null @@ -1,26 +0,0 @@ - ------BEGIN CERTIFICATE----- -MIIELzCCAxegAwIBAgIEc3NuKDANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV -UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO -BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEhMB8GA1UEAwwYSW50ZXJt -ZWRpYXRlIENBIGZvciBPQ1NQMB4XDTIwMDQyMTE5MTQ1MloXDTQwMDQyMzE5MTQ1 -MlowgYIxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwN -TmV3IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVs -MSUwIwYDVQQDDBxTZXJ2ZXIgT0NTUCBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkq -hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA8o7m7QpIMUZ2r6HOmhuqNF25x0odb9Bg -rSLm7Hvb3WBu6jwWPrrnPerR/nODVEY4Qo7mOclgCsooJx3HaPYPgRYffRQMJ+I5 -lpvsRsBjW7CnS0amz9QcbGnIhMeFU45gCn51CTLPoBJ7hB9F4Z02bOJEMkkXkhtm -kkiVysUs6po+t2+w8tojOScZdeDUtwfStKJ7Xb9B79Ko3BCcITXJUxDBcqUEJF+E -v3YQuQg/QKNTO+L39aFFo8WNfuP09txdjT/+T8PZq826ccohRdSrJ5lq1hXmmKXp -3p6Ut35aE4tjj6KSjDonMkYcvdNHQ0aL2p8x4JjwgwAuNwawTUbYIwIDAQABo4Gv -MIGsMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMB -BggrBgEFBQcDAjAdBgNVHQ4EFgQUyC6Gv0rfoato44VsaVig1SmminYwOAYIKwYB -BQUHAQEELDAqMCgGCCsGAQUFBzABhhxodHRwOi8vbG9jYWxob3N0OjgxMDAvc3Rh -dHVzMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOC -AQEAogdunlFL04lqVbZyqPvN/5TtrEtM87invrzTYZ8UmT5Q4Kr8mHRsumBuVwDu -bE+umrPtQVvu0XYqsjmjmOk7hTIK6PFuF6rLQCUBHVXBZggTNKFFBWphQ8odUbPG -FmOqSlkZAkcNo3dLpxRbfDru2ARxeE2+sRCPWwUZc7utqpLoZ0deuKdDSlA/VcGJ -5wf0sjmcjvJRRUSYeJcUox4ySL+4WtFu33LhYZKgnrMNegaJ6UyIlwB4ihMyi9sV -yDlsY+vGqivqqMUw8V6tdUekCYPUlHWXeICqsRIBII+xMzqTv1rXPzNyAvyVYrBi -hG10rdLfnQWn2vpYKU5b3Vo1yg== ------END CERTIFICATE----- diff --git a/jstests/libs/optimizer_utils.js b/jstests/libs/optimizer_utils.js index e4c8b14212258..e0a61618a68e8 100644 --- a/jstests/libs/optimizer_utils.js +++ b/jstests/libs/optimizer_utils.js @@ -1,18 +1,27 @@ -load("jstests/libs/analyze_plan.js"); +import {getAggPlanStage, isAggregationPlan} from "jstests/libs/analyze_plan.js"; /** - * Utility for checking if the query optimizer is enabled. + * Utility for checking if the Cascades optimizer code path is enabled (checks framework control). */ -function checkCascadesOptimizerEnabled(theDB) { - const param = theDB.adminCommand({getParameter: 1, featureFlagCommonQueryFramework: 1}); - return param.hasOwnProperty("featureFlagCommonQueryFramework") && - param.featureFlagCommonQueryFramework.value; +export function checkCascadesOptimizerEnabled(theDB) { + const val = theDB.adminCommand({getParameter: 1, internalQueryFrameworkControl: 1}) + .internalQueryFrameworkControl; + return val == "tryBonsai" || val == "tryBonsaiExperimental" || val == "forceBonsai"; +} + +/** + * Utility for checking if the Cascades optimizer feature flag is on. + */ +export function checkCascadesFeatureFlagEnabled(theDB) { + const featureFlag = theDB.adminCommand({getParameter: 1, featureFlagCommonQueryFramework: 1}); + return featureFlag.hasOwnProperty("featureFlagCommonQueryFramework") && + featureFlag.featureFlagCommonQueryFramework.value; } /** * Given the result of an explain command, returns whether the bonsai optimizer was used. */ -function usedBonsaiOptimizer(explain) { +export function usedBonsaiOptimizer(explain) { if (!isAggregationPlan(explain)) { return explain.queryPlanner.winningPlan.hasOwnProperty("optimizerPlan"); } @@ -31,7 +40,7 @@ function usedBonsaiOptimizer(explain) { * * This is useful for finding the access path part of a plan, typically a PhysicalScan or IndexScan. */ -function leftmostLeafStage(node) { +export function leftmostLeafStage(node) { for (;;) { if (node.queryPlanner) { node = node.queryPlanner; @@ -55,7 +64,7 @@ function leftmostLeafStage(node) { /** * Retrieves the cardinality estimate from a node in explain. */ -function extractLogicalCEFromNode(node) { +export function extractLogicalCEFromNode(node) { const ce = node.properties.logicalProperties.cardinalityEstimate[0].ce; assert.neq(ce, null, tojson(node)); return ce; @@ -64,7 +73,7 @@ function extractLogicalCEFromNode(node) { /** * Get a very simplified version of a plan, which only includes nodeType and nesting structure. */ -function getPlanSkeleton(node, options = {}) { +export function getPlanSkeleton(node, options = {}) { const {extraKeepKeys = [], keepKeysDeep = [], printFilter = false, printLogicalCE = false} = options; @@ -111,7 +120,7 @@ function getPlanSkeleton(node, options = {}) { * This is completely ignorant of the structure of a query: for example if there * are literals match the predicate, it will also match those. */ -function findSubtrees(tree, predicate) { +export function findSubtrees(tree, predicate) { let result = []; const visit = subtree => { if (typeof subtree === 'object' && subtree != null) { @@ -133,7 +142,7 @@ function findSubtrees(tree, predicate) { return result; } -function printBound(bound) { +export function printBound(bound) { if (!Array.isArray(bound.bound)) { return [false, ""]; } @@ -156,7 +165,7 @@ function printBound(bound) { return [true, result]; } -function prettyInterval(compoundInterval) { +export function prettyInterval(compoundInterval) { // Takes an array of intervals, each one applying to one component of a compound index key. // Try to format it as a string. // If either bound is not Constant, return the original JSON unchanged. @@ -189,7 +198,7 @@ function prettyInterval(compoundInterval) { return result.trim(); } -function prettyExpression(expr) { +export function prettyExpression(expr) { switch (expr.nodeType) { case 'Variable': return expr.name; @@ -228,7 +237,7 @@ function prettyExpression(expr) { } } -function prettyOp(op) { +export function prettyOp(op) { // See src/mongo/db/query/optimizer/syntax/syntax.h, PATHSYNTAX_OPNAMES. switch (op) { /* comparison operations */ @@ -280,7 +289,7 @@ function prettyOp(op) { * Helper function to remove UUIDs of collections in the supplied database from a V1 or V2 optimizer * explain. */ -function removeUUIDsFromExplain(db, explain) { +export function removeUUIDsFromExplain(db, explain) { const listCollsRes = db.runCommand({listCollections: 1}).cursor.firstBatch; let plan = explain.queryPlanner.winningPlan.optimizerPlan.plan.toString(); @@ -291,7 +300,7 @@ function removeUUIDsFromExplain(db, explain) { return plan; } -function navigateToPath(doc, path) { +export function navigateToPath(doc, path) { let result; let field; @@ -310,15 +319,15 @@ function navigateToPath(doc, path) { } } -function navigateToPlanPath(doc, path) { +export function navigateToPlanPath(doc, path) { return navigateToPath(doc, "queryPlanner.winningPlan.optimizerPlan." + path); } -function navigateToRootNode(doc) { +export function navigateToRootNode(doc) { return navigateToPath(doc, "queryPlanner.winningPlan.optimizerPlan"); } -function assertValueOnPathFn(value, doc, path, fn) { +export function assertValueOnPathFn(value, doc, path, fn) { try { assert.eq(value, fn(doc, path)); } catch (e) { @@ -328,15 +337,15 @@ function assertValueOnPathFn(value, doc, path, fn) { } } -function assertValueOnPath(value, doc, path) { +export function assertValueOnPath(value, doc, path) { assertValueOnPathFn(value, doc, path, navigateToPath); } -function assertValueOnPlanPath(value, doc, path) { +export function assertValueOnPlanPath(value, doc, path) { assertValueOnPathFn(value, doc, path, navigateToPlanPath); } -function runWithParams(keyValPairs, fn) { +export function runWithParams(keyValPairs, fn) { let prevVals = []; try { @@ -371,7 +380,7 @@ function runWithParams(keyValPairs, fn) { } } -function round2(n) { +export function round2(n) { return (Math.round(n * 100) / 100); } @@ -379,7 +388,7 @@ function round2(n) { * Force cardinality estimation mode: "histogram", "heuristic", or "sampling". We need to force the * use of the new optimizer. */ -function forceCE(mode) { +export function forceCE(mode) { assert.commandWorked( db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "forceBonsai"})); assert.commandWorked( diff --git a/jstests/libs/os_helpers.js b/jstests/libs/os_helpers.js index 60a52a31eccf2..0182bd21ec70a 100644 --- a/jstests/libs/os_helpers.js +++ b/jstests/libs/os_helpers.js @@ -6,19 +6,93 @@ function isLinux() { return getBuildInfo().buildEnvironment.target_os == "linux"; } -function isRHEL8() { - if (_isWindows()) { - return false; +function isMacOS() { + return getBuildInfo().buildEnvironment.target_os == "macOS"; +} + +// See "man 5 os-release" for documentation +function readOsRelease() { + try { + const os_release = cat("/etc/os-release"); + + let lines = os_release.split("\n"); + + let tags = {}; + + for (let line of lines) { + let vp = line.replaceAll("\"", "").split("="); + tags[vp[0]] = vp[1]; + } + + return tags; + } catch { + // ignore } + assert(!isLinux(), "Linux hosts should always have /etc/os-release."); + + return {}; +} + +/** + * Check if Linux OS is given identifier. Identifiers are always lower case strings. + * + * @param {string} distro ID of the distro in os-release + * @returns + */ +function isDistro(distro) { + let tags = readOsRelease(); + return tags.hasOwnProperty("ID") && tags["ID"] === distro; +} + +/** + * Check if Linux OS is given identifier and specific version. Do not use for matching major + * versions like RHEL 8, isRHELMajorVerison. + * + * @param {string} distro ID of the distro in os-release + * @returns + */ +function isDistroVersion(distro, version) { + let tags = readOsRelease(); + return tags.hasOwnProperty("ID") && tags["ID"] === distro && + tags.hasOwnProperty("VERSION_ID") && tags["VERSION_ID"] === version; +} + +/** + * Is it RHEL and is it 7, 8 or 9? + * @param {string} majorVersion + * @returns True if majorVersion = 8 and version is 8.1, 8.2 etc. + */ +function isRHELMajorVerison(majorVersion) { + let tags = readOsRelease(); + return tags.hasOwnProperty("ID") && tags["ID"] === "rhel" && + tags.hasOwnProperty("VERSION_ID") && tags["VERSION_ID"].startsWith(majorVersion); +} + +/** + * Example +NAME="Red Hat Enterprise Linux" +VERSION="8.7 (Ootpa)" +ID="rhel" +ID_LIKE="fedora" +VERSION_ID="8.7" +PLATFORM_ID="platform:el8" +PRETTY_NAME="Red Hat Enterprise Linux 8.7 (Ootpa)" +ANSI_COLOR="0;31" +CPE_NAME="cpe:/o:redhat:enterprise_linux:8::baseos" +HOME_URL="https://www.redhat.com/" +DOCUMENTATION_URL="https://access.redhat.com/documentation/red_hat_enterprise_linux/8/" +BUG_REPORT_URL="https://bugzilla.redhat.com/" + +REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 8" +REDHAT_BUGZILLA_PRODUCT_VERSION=8.7 +REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux" +REDHAT_SUPPORT_PRODUCT_VERSION="8.7" + */ +function isRHEL8() { // RHEL 8 disables TLS 1.0 and TLS 1.1 as part their default crypto policy // We skip tests on RHEL 8 that require these versions as a result. - const grep_result = runProgram('grep', 'Ootpa', '/etc/redhat-release'); - if (grep_result == 0) { - return true; - } - - return false; + return isRHELMajorVerison("8"); } function isSUSE15SP1() { @@ -38,69 +112,100 @@ function isSUSE15SP1() { } function isUbuntu() { - if (_isWindows()) { - return false; - } - // Ubuntu 18.04 and later compiles openldap against gnutls which does not // support SHA1 signed certificates. ldaptest.10gen.cc uses a SHA1 cert. - const grep_result = runProgram('grep', 'ID=ubuntu', '/etc/os-release'); - if (grep_result == 0) { - return true; - } - - return false; + return isDistro("ubuntu"); } +/** + * Example: +NAME="Ubuntu" +VERSION="18.04.6 LTS (Bionic Beaver)" +ID=ubuntu +ID_LIKE=debian +PRETTY_NAME="Ubuntu 18.04.6 LTS" +VERSION_ID="18.04" +HOME_URL="https://www.ubuntu.com/" +SUPPORT_URL="https://help.ubuntu.com/" +BUG_REPORT_URL="https://bugs.launchpad.net/ubuntu/" +PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy" +VERSION_CODENAME=bionic +UBUNTU_CODENAME=bionic + */ function isUbuntu1804() { - if (_isWindows()) { - return false; - } - // Ubuntu 18.04's TLS 1.3 implementation has an issue with OCSP stapling. We have disabled // stapling on this build variant, so we need to ensure that tests that require stapling // do not run on this machine. - const grep_result = runProgram('grep', 'bionic', '/etc/os-release'); - if (grep_result === 0) { - return true; - } - - return false; + return isDistroVersion("ubuntu", "18.04"); } function isUbuntu2004() { - if (_isWindows()) { - return false; - } - // Ubuntu 20.04 disables TLS 1.0 and TLS 1.1 as part their default crypto policy // We skip tests on Ubuntu 20.04 that require these versions as a result. - const grep_result = runProgram('grep', 'focal', '/etc/os-release'); - if (grep_result == 0) { - return true; - } + return isDistroVersion("ubuntu", "20.04"); +} - return false; +/** + * Example: +PRETTY_NAME="Debian GNU/Linux 12 (bookworm)" +NAME="Debian GNU/Linux" +VERSION_ID="12" +VERSION="12 (bookworm)" +VERSION_CODENAME=bookworm +ID=debian +HOME_URL="https://www.debian.org/" +SUPPORT_URL="https://www.debian.org/support" +BUG_REPORT_URL="https://bugs.debian.org/" + */ +function isDebian() { + return isDistro("debian"); } -function isDebian10() { - if (_isWindows()) { - return false; - } +/** + * Example: +NAME="Fedora Linux" +VERSION="38 (Workstation Edition)" +ID=fedora +VERSION_ID=38 +VERSION_CODENAME="" +PLATFORM_ID="platform:f38" +PRETTY_NAME="Fedora Linux 38 (Workstation Edition)" +ANSI_COLOR="0;38;2;60;110;180" +LOGO=fedora-logo-icon +CPE_NAME="cpe:/o:fedoraproject:fedora:38" +DEFAULT_HOSTNAME="fedora" +HOME_URL="https://fedoraproject.org/" +DOCUMENTATION_URL="https://docs.fedoraproject.org/en-US/fedora/f38/system-administrators-guide/" +SUPPORT_URL="https://ask.fedoraproject.org/" +BUG_REPORT_URL="https://bugzilla.redhat.com/" +REDHAT_BUGZILLA_PRODUCT="Fedora" +REDHAT_BUGZILLA_PRODUCT_VERSION=38 +REDHAT_SUPPORT_PRODUCT="Fedora" +REDHAT_SUPPORT_PRODUCT_VERSION=38 +SUPPORT_END=2024-05-14 +VARIANT="Workstation Edition" +VARIANT_ID=workstation + */ +function isFedora() { + return isDistro("fedora"); +} - // Debian 10 disables TLS 1.0 and TLS 1.1 as part their default crypto policy - // We skip tests on Debian 10 that require these versions as a result. - try { - // this file exists on systemd-based systems, necessary to avoid mischaracterizing debian - // derivatives as stock debian - const releaseFile = cat("/etc/os-release").toLowerCase(); - const prettyName = releaseFile.split('\n').find(function(line) { - return line.startsWith("pretty_name"); - }); - return prettyName.includes("debian") && - (prettyName.includes("10") || prettyName.includes("buster") || - prettyName.includes("bullseye")); - } catch (e) { - return false; - } +/** + * Note: Amazon 2022 was never released for production. It became Amazon 2023. + * + * Example: +NAME="Amazon Linux" +VERSION="2022" +ID="amzn" +ID_LIKE="fedora" +VERSION_ID="2022" +PLATFORM_ID="platform:al2022" +PRETTY_NAME="Amazon Linux 2022" +ANSI_COLOR="0;33" +CPE_NAME="cpe:2.3:o:amazon:amazon_linux:2022" +HOME_URL="https://aws.amazon.com/linux/" +BUG_REPORT_URL="https://github.com/amazonlinux/amazon-linux-2022" +*/ +function isAmazon2023() { + return isDistroVersion("amzn", "2022") || isDistroVersion("amzn", "2023"); } diff --git a/jstests/libs/override_methods/check_metadata_consistency.js b/jstests/libs/override_methods/check_metadata_consistency.js index 9418d098442be..46e8651f797bc 100644 --- a/jstests/libs/override_methods/check_metadata_consistency.js +++ b/jstests/libs/override_methods/check_metadata_consistency.js @@ -1,6 +1,4 @@ -'use strict'; - -load('jstests/libs/check_metadata_consistency_helpers.js'); // For MetadataConsistencyChecker +import {MetadataConsistencyChecker} from "jstests/libs/check_metadata_consistency_helpers.js"; ShardingTest.prototype.checkMetadataConsistency = function() { if (jsTest.options().skipCheckMetadataConsistency) { diff --git a/jstests/libs/override_methods/check_routing_table_consistency.js b/jstests/libs/override_methods/check_routing_table_consistency.js index c9a47714132e5..0a51e0cc640e4 100644 --- a/jstests/libs/override_methods/check_routing_table_consistency.js +++ b/jstests/libs/override_methods/check_routing_table_consistency.js @@ -1,6 +1,6 @@ -'use strict'; - -load('jstests/libs/check_routing_table_consistency_helpers.js'); +import { + RoutingTableConsistencyChecker +} from "jstests/libs/check_routing_table_consistency_helpers.js"; ShardingTest.prototype.checkRoutingTableConsistency = function() { if (jsTest.options().skipCheckRoutingTableConsistency) { diff --git a/jstests/libs/override_methods/crud_ops_as_bulkWrite.js b/jstests/libs/override_methods/crud_ops_as_bulkWrite.js new file mode 100644 index 0000000000000..2b597788f3d82 --- /dev/null +++ b/jstests/libs/override_methods/crud_ops_as_bulkWrite.js @@ -0,0 +1,431 @@ +/** + * Overrides the runCommand method to convert specified CRUD ops into bulkWrite commands. + * Converts the bulkWrite responses into the original CRUD response. + */ +(function() { +'use strict'; + +let originalRunCommand = Mongo.prototype.runCommand; + +const commandsToBulkWriteOverride = new Set(["insert", "update", "delete", "findandmodify"]); + +const commandsToAlwaysFlushBulkWrite = new Set([ + "aggregate", + "mapreduce", + "authenticate", + "logout", + "applyops", + "checkshardingindex", + "cleanuporphaned", + "cleanupreshardcollection", + "commitreshardcollection", + "movechunk", + "moveprimary", + "moverange", + "mergechunks", + "refinecollectionshardkey", + "split", + "splitvector", + "killallsessions", + "killallsessionsbypattern", + "dropconnections", + "filemd5", + "fsync", + "fsyncunlock", + "killop", + "setfeaturecompatibilityversion", + "shutdown", + "currentop", + "listdatabases", + "listcollections", + "committransaction", + "aborttransaction", + "preparetransaction", + "endsessions", + "killsessions" +]); + +let numOpsPerResponse = []; +let nsInfos = []; +let bufferedOps = []; +let letObj = null; +let ordered = true; +let bypassDocumentValidation = null; +const maxBatchSize = 5; + +function resetBulkWriteBatch() { + numOpsPerResponse = []; + nsInfos = []; + bufferedOps = []; + letObj = null; + bypassDocumentValidation = null; + ordered = true; +} + +function checkNamespaceStoredInBufferedOps(ns) { + return nsInfos.findIndex((element) => element.ns == ns) != -1; +} + +function getLetFromCommand(cmdObj) { + if (cmdObj.hasOwnProperty("updates")) { + if (cmdObj.updates[0].hasOwnProperty("let")) { + return cmdObj.updates[0].let; + } + } else if (cmdObj.hasOwnProperty("deletes")) { + if (cmdObj.deletes[0].hasOwnProperty("let")) { + return cmdObj.updates[0].let; + } + } else if (cmdObj.hasOwnProperty("let")) { + return cmdObj.let; + } + return null; +} + +function opCompatibleWithCurrentBatch(cmdObj) { + if (numOpsPerResponse.length >= maxBatchSize) { + return false; + } + + // If bypassDocumentValidation is not set we can continue. If the stored + // bypassDocumentValidation and the command bypassDocumentValidation are the same we can + // continue. + let cmdBypassDocumentValidation = cmdObj.hasOwnProperty("bypassDocumentValidation") && + (cmdObj.bypassDocumentValidation == true); + if (bypassDocumentValidation != null && + (cmdBypassDocumentValidation != bypassDocumentValidation)) { + return false; + } + + const currentCmdLet = getLetFromCommand(cmdObj); + + // If 'letObj' is null then we can always continue. If 'letObj' is not null and cmdObj.let is + // then we can always continue. If both objects are not null and they are the same we can + // continue. + if (letObj != null && currentCmdLet != null && 0 === bsonWoCompare(letObj, currentCmdLet)) { + return false; + } + + // If saved ordered is false or the incoming ordered is false we must flush the batch. + let newOrdered = cmdObj.hasOwnProperty("ordered") ? cmdObj.ordered : true; + if (!ordered || !newOrdered) { + return false; + } + + return true; +} + +function flushCurrentBulkWriteBatch(options) { + if (bufferedOps.length == 0) { + return {}; + } + + // Should not be possible to reach if bypassDocumentValidation is not set. + assert(bypassDocumentValidation != null); + + let bulkWriteCmd = { + "bulkWrite": 1, + "ops": bufferedOps, + "nsInfo": nsInfos, + "ordered": (ordered != null) ? ordered : true, + "bypassDocumentValidation": bypassDocumentValidation, + }; + + if (letObj != null) { + bulkWriteCmd["let"] = letObj; + } + + let resp = {}; + resp = originalRunCommand.apply(this, ["admin", bulkWriteCmd, options]); + + let response = convertBulkWriteResponse(bulkWriteCmd, resp); + let finalResponse = response; + + let expectedResponseLength = numOpsPerResponse.length; + + // Retry on ordered:true failures by re-running subset of original bulkWrite command. + while (finalResponse.length != expectedResponseLength) { + // Need to figure out how many ops we need to subset out. Every entry in numOpsPerResponse + // represents a number of bulkWrite ops that correspond to an initial CRUD op. We need to + // make sure we split at a CRUD op boundary in the bulkWrite. + for (let i = 0; i < response.length; i++) { + let target = numOpsPerResponse.shift(); + for (let j = 0; j < target; j++) { + bufferedOps.shift(); + } + } + bulkWriteCmd.ops = bufferedOps; + + resp = originalRunCommand.apply(this, ["admin", bulkWriteCmd, options]); + response = convertBulkWriteResponse(bulkWriteCmd, resp); + finalResponse = finalResponse.concat(response); + } + + resetBulkWriteBatch(); + return response; +} + +function processFindAndModifyResponse(current, isRemove, resp) { + // findAndModify will only ever be a single op so we can freely replace + // the existing response. + resp = {ok: 1, value: null}; + if (current.hasOwnProperty("value")) { + resp["value"] = current.value; + } + let lastErrorObject = {}; + lastErrorObject["n"] = current.n; + if (current.hasOwnProperty("upserted")) { + lastErrorObject["upserted"] = current.upserted._id; + } + if (!isRemove) { + lastErrorObject["updatedExisting"] = current.nModified != 0; + } + resp["lastErrorObject"] = lastErrorObject; + return resp; +} + +function initializeResponse(op) { + if (op.hasOwnProperty("update")) { + // Update always has nModified field set. + return {"n": 0, "nModified": 0, "ok": 1}; + } + return {"n": 0, "ok": 1}; +} + +/** + * The purpose of this function is to take a server response from a bulkWrite command and to + * transform it to an array of responses for the corresponding CRUD commands that make up the + * bulkWrite. + * + * 'cmd' is the bulkWrite that was executed to generate the response + * 'orig' is the bulkWrite command response + */ +function convertBulkWriteResponse(cmd, bulkWriteResponse) { + let responses = []; + if (bulkWriteResponse.ok == 1) { + let cursorIdx = 0; + for (let numOps of numOpsPerResponse) { + let num = 0; + let resp = initializeResponse(cmd.ops[cursorIdx]); + while (num < numOps) { + if (cursorIdx >= bulkWriteResponse.cursor.firstBatch.length) { + // this can happen if the bulkWrite encountered an error processing + // an op with ordered:true set. This means we have no more op responses + // left to process so push the current response we were building and + // return. + // If the last response has writeErrors set then it was in the middle of an op + // otherwise we are beginning a new op response and should not push it. + if (resp.writeErrors) { + responses.push(resp); + } + return responses; + } + + let current = bulkWriteResponse.cursor.firstBatch[cursorIdx]; + + // findAndModify returns have a different format. Detect findAndModify + // by the precense of 'return' field in the op. + if (cmd.ops[cursorIdx].hasOwnProperty("return")) { + resp = processFindAndModifyResponse( + current, cmd.ops[cursorIdx].hasOwnProperty("delete"), resp); + } else { + if (current.ok == 0) { + // Normal write contains an error. + if (!resp.hasOwnProperty("writeErrors")) { + resp["writeErrors"] = []; + } + let writeError = {index: num, code: current.code, errmsg: current.errmsg}; + resp["writeErrors"].push(writeError); + } else { + resp.n += current.n; + if (current.hasOwnProperty("nModified")) { + resp.nModified += current.nModified; + } + if (current.hasOwnProperty("upserted")) { + if (!resp.hasOwnProperty("upserted")) { + resp["upserted"] = []; + } + resp["upserted"].push(current.upserted); + } + } + } + cursorIdx += 1; + num += 1; + } + responses.push(resp); + } + } + return responses; +} + +function getNsInfoIdx(nsInfoEntry) { + let idx = nsInfos.findIndex((element) => element.ns == nsInfoEntry); + if (idx == -1) { + idx = nsInfos.length; + nsInfos.push({ns: nsInfoEntry}); + } + return idx; +} + +function processInsertOp(nsInfoIdx, doc) { + return {insert: nsInfoIdx, document: doc}; +} + +function processUpdateOp(nsInfoIdx, cmdObj, update) { + let op = { + "update": nsInfoIdx, + "filter": update.q, + "updateMods": update.u, + "multi": update.multi ? update.multi : false, + "upsert": update.upsert ? update.upsert : false, + }; + + ["arrayFilters", "collation", "hint", "sampleId"].forEach(property => { + if (cmdObj.hasOwnProperty(property)) { + op[property] = cmdObj[property]; + } + }); + + if (update.hasOwnProperty("let")) { + letObj = update.let; + } + + return op; +} + +function processDeleteOp(nsInfoIdx, cmdObj, deleteCmd) { + let op = { + "delete": nsInfoIdx, + "filter": deleteCmd.q, + "multi": deleteCmd.limit ? deleteCmd.limit == 0 : false + }; + + ["collation", "hint", "sampleId"].forEach(property => { + if (cmdObj.hasOwnProperty(property)) { + op[property] = cmdObj[property]; + } + }); + + if (deleteCmd.hasOwnProperty("let")) { + letObj = deleteCmd.let; + } + + return op; +} + +function processFindAndModifyOp(nsInfoIdx, cmdObj) { + let op = {}; + + if (cmdObj.hasOwnProperty("remove") && (cmdObj.remove == true)) { + // is delete. + op["delete"] = nsInfoIdx; + op["return"] = true; + } else { + // is update. + op["update"] = nsInfoIdx; + op["updateMods"] = cmdObj.update; + op["return"] = cmdObj.new ? "post" : "pre"; + if (cmdObj.hasOwnProperty("upsert")) { + op["upsert"] = cmdObj.upsert; + } + if (cmdObj.hasOwnProperty("arrayFilters")) { + op["arrayFilters"] = cmdObj.arrayFilters; + } + } + + op["filter"] = cmdObj.query; + + ["sort", "collation", "hint", "sampleId"].forEach(property => { + if (cmdObj.hasOwnProperty(property)) { + op[property] = cmdObj[property]; + } + }); + + if (cmdObj.hasOwnProperty("fields")) { + op["returnFields"] = cmdObj.fields; + } + + if (cmdObj.hasOwnProperty("let")) { + letObj = cmdObj.let; + } + + return op; +} + +Mongo.prototype.runCommand = function(dbName, cmdObj, options) { + /** + * After SERVER-76660 this function will be used to direct a command to 2 different clusters. + * The main cluster will always execute originalRunCommand and the second will follow the + * current execution path below and their responses will be compared (if the bulkWrite path + * executed anything). + */ + + let cmdName = Object.keys(cmdObj)[0].toLowerCase(); + if (commandsToBulkWriteOverride.has(cmdName)) { + let response = {}; + if (!opCompatibleWithCurrentBatch(cmdObj)) { + response = flushCurrentBulkWriteBatch.apply(this, [options]); + } + + // Set bypassDocumentValidation if necessary. + if (bypassDocumentValidation == null) { + bypassDocumentValidation = cmdObj.hasOwnProperty("bypassDocumentValidation") + ? cmdObj.bypassDocumentValidation + : false; + } + + ordered = cmdObj.hasOwnProperty("ordered") ? cmdObj.ordered : true; + + let nsInfoEntry = dbName + "." + cmdObj[cmdName]; + let nsInfoIdx = getNsInfoIdx(nsInfoEntry); + + let numOps = 0; + + // Is insert + if (cmdName === "insert") { + assert(cmdObj.documents); + for (let doc of cmdObj.documents) { + bufferedOps.push(processInsertOp(nsInfoIdx, doc)); + numOps += 1; + } + } else if (cmdName === "update") { + assert(cmdObj.updates); + for (let update of cmdObj.updates) { + bufferedOps.push(processUpdateOp(nsInfoIdx, cmdObj, update)); + numOps += 1; + } + } else if (cmdName === "delete") { + assert(cmdObj.deletes); + for (let deleteCmd of cmdObj.deletes) { + bufferedOps.push(processDeleteOp(nsInfoIdx, cmdObj, deleteCmd)); + numOps += 1; + } + } else if (cmdName === "findandmodify") { + bufferedOps.push(processFindAndModifyOp(nsInfoIdx, cmdObj)); + numOps += 1; + } else { + throw new Error("Unrecognized command in bulkWrite override"); + } + + numOpsPerResponse.push(numOps); + + return response; + } else if (commandsToAlwaysFlushBulkWrite.has(cmdName)) { + flushCurrentBulkWriteBatch.apply(this, [options]); + } else { + // Commands which are selectively allowed. If they are operating on a namespace which we + // have stored in our buffered ops then we will flush, if not then we allow the command to + // execute normally. + if (typeof cmdObj[cmdName] === 'string') { + // Should be the collection that the command is operating on, can make full namespace. + const ns = dbName + "." + cmdObj[cmdName]; + if (checkNamespaceStoredInBufferedOps(ns)) { + flushCurrentBulkWriteBatch.apply(this, [options]); + } + } + // Otherwise is an always allowed command (like `isMaster`). + } + + // Not a bulkWrite supported CRUD op, execute the command unmodified. + return originalRunCommand.apply(this, arguments); +}; +})(); diff --git a/jstests/libs/override_methods/golden_overrides.js b/jstests/libs/override_methods/golden_overrides.js new file mode 100644 index 0000000000000..51f562b74ba11 --- /dev/null +++ b/jstests/libs/override_methods/golden_overrides.js @@ -0,0 +1,24 @@ +// Override print to output to both stdout and the golden file. +// This affects everything that uses print: printjson, jsTestLog, etc. +globalThis.print = (() => { + const original = globalThis.print; + return function print(...args) { + // Imitate GlobalInfo::Functions::print::call. + let str = args.map(a => a == null ? '[unknown type]' : a).join(' '); + + // Make sure each print() call ends in a newline. + // + // From manual testing, it seems (print('a'), print('b')) behaves the same as + // (print('a\n'), print('b\n')); that behavior must be to ensure each print call appears on + // its own line for readability. In the context of golden testing, we want to match that + // behavior, and this also ensures the test output is a proper text file + // (newline-terminated). + if (str.slice(-1) !== '\n') { + str += '\n'; + } + + _writeGoldenData(str); + + return original(...args); + }; +})(); diff --git a/jstests/libs/override_methods/implicit_v1_resume_token_changestreams.js b/jstests/libs/override_methods/implicit_v1_resume_token_changestreams.js deleted file mode 100644 index b0fc5dc0de82e..0000000000000 --- a/jstests/libs/override_methods/implicit_v1_resume_token_changestreams.js +++ /dev/null @@ -1,25 +0,0 @@ -/** - * Loading this file overrides 'runCommand' with a function that modifies any $changeStream - * aggregation to use $_generateV2ResumeTokens:false. - */ -(function() { -"use strict"; - -load("jstests/libs/override_methods/override_helpers.js"); // For 'OverrideHelpers'. - -// Override runCommand to set $_generateV2ResumeTokens on all $changeStreams. -function runCommandV1Tokens(conn, dbName, cmdName, cmdObj, originalRunCommand, makeRunCommandArgs) { - if (OverrideHelpers.isAggregationWithChangeStreamStage(cmdName, cmdObj)) { - // Make a copy to avoid mutating the user's original command object. - cmdObj = Object.assign({}, cmdObj, {$_generateV2ResumeTokens: false}); - } - return originalRunCommand.apply(conn, makeRunCommandArgs(cmdObj)); -} - -// Always apply the override if a test spawns a parallel shell. -OverrideHelpers.prependOverrideInParallelShell( - "jstests/libs/override_methods/implicit_v1_resume_token_changestreams.js"); - -// Override the default runCommand with our custom version. -OverrideHelpers.overrideRunCommand(runCommandV1Tokens); -})(); \ No newline at end of file diff --git a/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js b/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js index 15d86e21abb2a..c9c67b25ab491 100644 --- a/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js +++ b/jstests/libs/override_methods/implicit_whole_cluster_changestreams.js @@ -65,4 +65,4 @@ DB.prototype.watch = function(pipeline, options) { pipeline = Object.assign([], pipeline); pipeline.unshift(ChangeStreamPassthroughHelpers.nsMatchFilter(this, 1)); return this.getMongo().watch(pipeline, options); -}; \ No newline at end of file +}; diff --git a/jstests/libs/override_methods/implicit_whole_db_changestreams.js b/jstests/libs/override_methods/implicit_whole_db_changestreams.js index a9cfd81505483..3b6204ef55568 100644 --- a/jstests/libs/override_methods/implicit_whole_db_changestreams.js +++ b/jstests/libs/override_methods/implicit_whole_db_changestreams.js @@ -166,4 +166,4 @@ DB.prototype.runCommand = function(cmdObj, extra, queryOptions, noPassthrough) { this._runCommandImpl = (noPassthrough ? originalRunCommandImpl : passthroughRunCommandImpl); return originalRunCommand.apply(this, [cmdObj, extra, queryOptions]); }; -}()); \ No newline at end of file +}()); diff --git a/jstests/libs/override_methods/implicitly_configure_query_analyzer.js b/jstests/libs/override_methods/implicitly_configure_query_analyzer.js index 8b81df69efa5c..f407b81b30dcc 100644 --- a/jstests/libs/override_methods/implicitly_configure_query_analyzer.js +++ b/jstests/libs/override_methods/implicitly_configure_query_analyzer.js @@ -12,7 +12,7 @@ load("jstests/libs/override_methods/override_helpers.js"); // For 'OverrideHelp load("jstests/libs/override_methods/shard_collection_util.js"); const kShardProbability = 0.5; -const kSampleRate = 1000; // per second. +const kSamplesPerSecond = 1000; // per second. // Save a reference to the original methods in the IIFE's scope. // This scoping allows the original methods to be called by the overrides below. @@ -37,8 +37,8 @@ function configureQueryAnalyzer({db, collName}) { let result; try { - result = - db.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: kSampleRate}); + result = db.adminCommand( + {configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: kSamplesPerSecond}); } catch (e) { print(`Failed to configure query analyzer: ${tojsononeline({ns, e})}`); if (!isNetworkError(e)) { @@ -47,8 +47,11 @@ function configureQueryAnalyzer({db, collName}) { } if (!result.ok) { if (result.code === ErrorCodes.CommandNotFound || - result.code === ErrorCodes.NamespaceNotFound) { + result.code === ErrorCodes.NamespaceNotFound || + result.code === ErrorCodes.CommandNotSupportedOnView || + result.code === ErrorCodes.IllegalOperation) { print(`Failed to configure query analyzer: ${tojsononeline({ns, result})}`); + return; } assert.commandWorked(result); } diff --git a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js index cd765a33754c9..3f3baff00460b 100644 --- a/jstests/libs/override_methods/implicitly_shard_accessed_collections.js +++ b/jstests/libs/override_methods/implicitly_shard_accessed_collections.js @@ -32,30 +32,6 @@ DB.prototype.createCollection = function() { return createCollResult; } - // We check feature flags on both primary and secondaries in case a step down happens after this - // check. - const featureResults = FixtureHelpers - .runCommandOnAllShards({ - db: this.getSiblingDB('admin'), - cmdObj: {getParameter: 1, featureFlagShardedTimeSeries: 1} - }) - .map(result => assert.commandWorked(result)); - - // The feature can only be used if the version associated with the feature is greater than or - // equal to the FCV version. The getParameter does not consider the FCV value when checking for - // whether the feature flag is enabled. So we run an additional getParameter command to fetch - // the FCV state. - const fcvResult = - assert.commandWorked(FixtureHelpers.getPrimaryForNodeHostingDatabase(this).adminCommand( - {getParameter: 1, featureCompatibilityVersion: 1})); - const isTimeseriesShardingEnabled = featureResults.every( - result => result.featureFlagShardedTimeSeries.value && - MongoRunner.compareBinVersions(fcvResult.featureCompatibilityVersion.version, - result.featureFlagShardedTimeSeries.version) >= 0); - if (!isTimeseriesShardingEnabled) { - return createCollResult; - } - const timeField = arguments[1]["timeseries"]["timeField"]; ShardingOverrideCommon.shardCollectionWithSpec({ db: this, @@ -85,6 +61,11 @@ DB.prototype.getCollection = function() { try { TestData.doNotOverrideReadPreference = true; collStats = this.runCommand({collStats: collection.getName()}); + if (!collStats.ok && collStats.codeName == "CommandNotSupportedOnView") { + // In case we catch CommandNotSupportedOnView it means the collection was actually a + // view and should be returned without attempting to shard it (which is not allowed) + return collection; + } } finally { TestData.doNotOverrideReadPreference = testDataDoNotOverrideReadPreferenceOriginal; } diff --git a/jstests/libs/override_methods/inject_dollar_tenant.js b/jstests/libs/override_methods/inject_dollar_tenant.js index a336d8d20238d..66d318c826b32 100644 --- a/jstests/libs/override_methods/inject_dollar_tenant.js +++ b/jstests/libs/override_methods/inject_dollar_tenant.js @@ -23,7 +23,7 @@ function runCommandWithDollarTenant( let res = originalRunCommand.apply(conn, makeRunCommandArgs(cmdToRun)); const prefixedDbName = kTenantId + "_" + dbName; - assertExpectedDbNameInResponse(res, dbName, prefixedDbName); + assertExpectedDbNameInResponse(res, dbName, prefixedDbName, tojsononeline(res)); updateDbNamesInResponse(res, dbName, prefixedDbName); return res; } diff --git a/jstests/libs/override_methods/inject_security_token.js b/jstests/libs/override_methods/inject_security_token.js index 813384be6138a..4e7f8bf823bff 100644 --- a/jstests/libs/override_methods/inject_security_token.js +++ b/jstests/libs/override_methods/inject_security_token.js @@ -60,7 +60,6 @@ function prepareSecurityToken(conn) { const kCmdsAllowedWithSecurityToken = new Set([ `abortTransaction`, `aggregate`, - `availableQueryOptions`, `buildinfo`, `buildinfo`, `collMod`, @@ -137,7 +136,7 @@ function runCommandWithResponseCheck( let res = originalRunCommand.apply(conn, makeRunCommandArgs(cmdObj)); const prefixedDbName = kTenantId + "_" + dbName; - assertExpectedDbNameInResponse(res, dbName, prefixedDbName); + assertExpectedDbNameInResponse(res, dbName, prefixedDbName, tojsononeline(res)); updateDbNamesInResponse(res, dbName, prefixedDbName); return res; } diff --git a/jstests/libs/override_methods/inject_tenant_prefix.js b/jstests/libs/override_methods/inject_tenant_prefix.js index 4e55802487726..9a91d6558474d 100644 --- a/jstests/libs/override_methods/inject_tenant_prefix.js +++ b/jstests/libs/override_methods/inject_tenant_prefix.js @@ -106,6 +106,11 @@ function prependTenantIdToDbNameIfApplicable(dbName) { return dbName; } + if (extractOriginalDbName(dbName) !== dbName) { + // dbName already has a tenantId prefix + return dbName; + } + let prefix; // If running shard split passthroughs, then assign a database to a randomly selected tenant if (usingMultipleTenants()) { @@ -320,6 +325,19 @@ function extractTenantMigrationError(resObj, errorCode) { } } } + + // BulkWrite command has errors contained in a cursor response. The error will always be + // in the first batch of the cursor response since getMore is not allowed to run with + // tenant migration / shard merge suites. + if (resObj.cursor) { + if (resObj.cursor.firstBatch) { + for (let opRes of resObj.cursor.firstBatch) { + if (opRes.code && opRes.code == errorCode) { + return {code: opRes.code, errmsg: opRes.errmsg}; + } + } + } + } return null; } @@ -391,6 +409,15 @@ function modifyCmdObjForRetry(cmdObj, resObj) { } cmdObj.deletes = retryOps; } + + if (cmdObj.bulkWrite) { + let retryOps = []; + // For bulkWrite tenant migration errors always act as if they are executed as + // `ordered:true` meaning we will have to retry every op from the one that errored. + retryOps = + cmdObj.ops.slice(resObj.cursor.firstBatch[resObj.cursor.firstBatch.length - 1].idx); + cmdObj.ops = retryOps; + } } /** @@ -533,6 +560,7 @@ function runCommandRetryOnTenantMigrationErrors( let nModified = 0; let upserted = []; let nonRetryableWriteErrors = []; + let bulkWriteResponse = {}; const isRetryableWrite = cmdObjWithTenantId.txnNumber && !cmdObjWithTenantId.hasOwnProperty("autocommit"); @@ -575,6 +603,31 @@ function runCommandRetryOnTenantMigrationErrors( // Add/modify the shells's n, nModified, upserted, and writeErrors, unless this command is // part of a retryable write. if (!isRetryableWrite) { + // bulkWrite case. + if (cmdObjWithTenantId.bulkWrite) { + // First attempt store the whole response. + if (numAttempts == 1) { + bulkWriteResponse = resObj; + } else { + // The last item from the previous response is guaranteed to be a + // tenant migration error. Remove it to append the retried response. + let newIdx = bulkWriteResponse.cursor.firstBatch.pop().idx; + // Iterate over new response and change the indexes to start with newIdx. + for (let opRes of resObj.cursor.firstBatch) { + opRes.idx = newIdx; + newIdx += 1; + } + + // Add the new responses (with modified indexes) onto the original responses. + bulkWriteResponse.cursor.firstBatch = + bulkWriteResponse.cursor.firstBatch.concat(resObj.cursor.firstBatch); + + // Add new numErrors onto old numErrors. Subtract one to account for the + // tenant migration error that was popped off. + bulkWriteResponse.numErrors += resObj.numErrors - 1; + } + } + if (resObj.n) { n += resObj.n; } @@ -651,8 +704,14 @@ function runCommandRetryOnTenantMigrationErrors( // Store the connection to the recipient so the next commands can be rerouted. const donorConnection = getRoutingConnection(conn); const migrationStateDoc = getOperationStateDocument(donorConnection); - setRoutingConnection( - conn, connect(migrationStateDoc.recipientConnectionString).getMongo()); + + const otherConn = connect(migrationStateDoc.recipientConnectionString).getMongo(); + if (conn.getAutoEncryptionOptions() !== undefined) { + otherConn.setAutoEncryption(conn.getAutoEncryptionOptions()); + otherConn.toggleAutoEncryption(conn.isAutoEncryptionEnabled()); + } + + setRoutingConnection(conn, otherConn); // After getting a TenantMigrationCommitted error, wait for the python test fixture // to do a dbhash check on the donor and recipient primaries before we retry the @@ -707,6 +766,9 @@ function runCommandRetryOnTenantMigrationErrors( if (nonRetryableWriteErrors.length > 0) { resObj.writeErrors = nonRetryableWriteErrors; } + if (cmdObjWithTenantId.bulkWrite) { + resObj = bulkWriteResponse; + } } return resObj; } @@ -733,12 +795,19 @@ Mongo.prototype.runCommand = function(dbName, cmdObj, options) { return resObj; }; -// Override all base methods on the Mongo prototype to try to proxy the call to the underlying +Mongo.prototype.getDbNameWithTenantPrefix = function(dbName) { + return prependTenantIdToDbNameIfApplicable(dbName); +}; + +// Override base methods on the Mongo prototype to try to proxy the call to the underlying // internal routing connection, if one exists. // NOTE: This list is derived from scripting/mozjs/mongo.cpp:62. ['auth', + 'cleanup', 'close', 'compact', + 'getAutoEncryptionOptions', + 'isAutoEncryptionEnabled', 'cursorHandleFromId', 'find', 'generateDataKey', @@ -766,6 +835,22 @@ Mongo.prototype.runCommand = function(dbName, cmdObj, options) { }; }); +// The following methods are overridden so that the method applies to both +// the proxy connection and the underlying internal routing connection, if one exists. +['toggleAutoEncryption', + 'unsetAutoEncryption', + 'setAutoEncryption', +].forEach(methodName => { + const $method = Mongo.prototype[methodName]; + Mongo.prototype[methodName] = function() { + let rc = getRoutingConnection(this); + if (rc !== this) { + $method.apply(rc, arguments); + } + return $method.apply(this, arguments); + }; +}); + OverrideHelpers.prependOverrideInParallelShell( "jstests/libs/override_methods/inject_tenant_prefix.js"); }()); diff --git a/jstests/libs/override_methods/network_error_and_txn_override.js b/jstests/libs/override_methods/network_error_and_txn_override.js index cbb6ac98ab1f0..902277b25603e 100644 --- a/jstests/libs/override_methods/network_error_and_txn_override.js +++ b/jstests/libs/override_methods/network_error_and_txn_override.js @@ -243,10 +243,22 @@ function isRetryableMoveChunkResponse(res) { res.code === ErrorCodes.CallbackCanceled; } -function isFailedToSatisfyPrimaryReadPreferenceError(msg) { - const kReplicaSetMonitorError = - /^Could not find host matching read preference.*mode: "primary"/; - return msg.match(kReplicaSetMonitorError); +function isFailedToSatisfyPrimaryReadPreferenceError(res) { + const kReplicaSetMonitorError = /Could not find host matching read preference.*mode:.*primary/; + if (res.hasOwnProperty("errmsg")) { + return res.errmsg.match(kReplicaSetMonitorError); + } + if (res.hasOwnProperty("message")) { + return res.message.match(kReplicaSetMonitorError); + } + if (res.hasOwnProperty("writeErrors")) { + for (let writeError of res.writeErrors) { + if (writeError.errmsg.match(kReplicaSetMonitorError)) { + return true; + } + } + } + return false; } function hasError(res) { @@ -797,6 +809,17 @@ function shouldRetryWithNetworkErrorOverride( res, cmdName, startTime, logError, shouldOverrideAcceptableError = true) { assert(configuredForNetworkRetry()); + if (isFailedToSatisfyPrimaryReadPreferenceError(res) && + Date.now() - startTime < 5 * 60 * 1000) { + // ReplicaSetMonitor::getHostOrRefresh() waits up to 15 seconds to find the + // primary of the replica set. It is possible for the step up attempt of another + // node in the replica set to take longer than 15 seconds so we allow retrying + // for up to 5 minutes. + logError("Failed to find primary when attempting to run command," + + " will retry for another 15 seconds"); + return kContinue; + } + if (RetryableWritesUtil.isRetryableWriteCmdName(cmdName)) { if ((cmdName === "findandmodify" || cmdName === "findAndModify") && isRetryableExecutorCodeAndMessage(res.code, res.errmsg)) { @@ -850,18 +873,6 @@ function shouldRetryWithNetworkErrorOverride( return kContinue; } - if (res.hasOwnProperty("errmsg") && - isFailedToSatisfyPrimaryReadPreferenceError(res.errmsg) && - Date.now() - startTime < 5 * 60 * 1000) { - // ReplicaSetMonitor::getHostOrRefresh() waits up to 15 seconds to find the - // primary of the replica set. It is possible for the step up attempt of another - // node in the replica set to take longer than 15 seconds so we allow retrying - // for up to 5 minutes. - logError("Failed to find primary when attempting to run command," + - " will retry for another 15 seconds"); - return kContinue; - } - // Some sharding commands return raw responses from all contacted shards and there won't // be a top level code if shards returned more than one error code, in which case retry // if any error is retryable. @@ -963,7 +974,7 @@ function shouldRetryWithNetworkExceptionOverride( if (numNetworkErrorRetries === 0) { logError("No retries, throwing"); throw e; - } else if (isFailedToSatisfyPrimaryReadPreferenceError(e.message) && + } else if (isFailedToSatisfyPrimaryReadPreferenceError(e) && Date.now() - startTime < 5 * 60 * 1000) { // ReplicaSetMonitor::getHostOrRefresh() waits up to 15 seconds to find the // primary of the replica set. It is possible for the step up attempt of another diff --git a/jstests/libs/override_methods/override_fixtures_changestream_multitenancy.js b/jstests/libs/override_methods/override_fixtures_changestream_multitenancy.js index c51aa8093784d..34721dd954576 100644 --- a/jstests/libs/override_methods/override_fixtures_changestream_multitenancy.js +++ b/jstests/libs/override_methods/override_fixtures_changestream_multitenancy.js @@ -94,4 +94,4 @@ ShardingTest = function(params) { // Extend the new 'ShardingTest' fixture with the properties of the original one. Object.extend(ShardingTest, originalShardingTest); -})(); \ No newline at end of file +})(); diff --git a/jstests/libs/override_methods/retry_aborted_db_and_index_creation.js b/jstests/libs/override_methods/retry_aborted_db_and_index_creation.js new file mode 100644 index 0000000000000..c0301a73d7992 --- /dev/null +++ b/jstests/libs/override_methods/retry_aborted_db_and_index_creation.js @@ -0,0 +1,79 @@ +/** + * Overrides Mongo.prototype.runCommand to retry interrupted create index and create database + * commands. Was modeled partly on retry_on_killed_session.js. + */ +(function() { +"use strict"; + +load("jstests/libs/override_methods/override_helpers.js"); + +const mongoRunCommandOriginal = Mongo.prototype.runCommand; + +Mongo.prototype.runCommand = function runCommand(dbName, cmdObj, options) { + return runWithRetries(this, cmdObj, mongoRunCommandOriginal, arguments); +}; + +const kCreateIndexCmdNames = new Set(["createIndexes", "createIndex"]); +const kMaxRetryCount = 100; + +// Returns if the command should retry on IndexBuildAborted errors. +function shouldRetryIndexCreateCmd(cmdObj) { + if (cmdObj.hasOwnProperty("autocommit")) { + // Transactions are retried at a higher level. + return false; + } + + const cmdName = Object.keys(cmdObj)[0]; + if (kCreateIndexCmdNames.has(cmdName)) { + return true; + } + + return false; +} + +// Returns if the code is one that could come from an index build being aborted. +function hasIndexBuildAbortedError(res) { + return res.code === ErrorCodes.IndexBuildAborted; +} + +function hasInterruptedDbCreationError(errOrRes) { + return errOrRes.code === ErrorCodes.Interrupted && + ((errOrRes.errmsg.indexOf("Database") === 0 && + errOrRes.errmsg.indexOf("could not be created") > 0) || + errOrRes.errmsg.indexOf("Failed to read local metadata.") === 0 || + errOrRes.errmsg.indexOf("split failed") === 0 || + errOrRes.errmsg.indexOf( + "Failed to read highest version persisted chunk for collection") === 0); +} + +/* Run client command with the ability to retry on a IndexBuildAborted Code + * and InterruptedDbCreation Error. + */ +function runWithRetries(mongo, cmdObj, clientFunction, clientFunctionArguments) { + let retryCount = 0; + while (true) { + const res = clientFunction.apply(mongo, clientFunctionArguments); + + if (++retryCount >= kMaxRetryCount) { + return res; + } else if (hasIndexBuildAbortedError(res)) { + if (shouldRetryIndexCreateCmd(cmdObj)) { + print("-=-=-=- Retrying " + tojsononeline(cmdObj) + + " after IndexBuildAborted error response: " + tojsononeline(res)); + continue; + } else { + return res; + } + } else if (hasInterruptedDbCreationError(res)) { + print("-=-=-=- Retrying " + tojsononeline(cmdObj) + + " after interrupted db creation response: " + tojsononeline(res)); + continue; + } + + return res; + } +} + +OverrideHelpers.prependOverrideInParallelShell( + "jstests/libs/override_methods/retry_aborted_db_and_index_creation.js"); +})(); diff --git a/jstests/libs/override_methods/sharding_continuous_config_stepdown.js b/jstests/libs/override_methods/sharding_csrs_continuous_config_stepdown.js similarity index 100% rename from jstests/libs/override_methods/sharding_continuous_config_stepdown.js rename to jstests/libs/override_methods/sharding_csrs_continuous_config_stepdown.js diff --git a/jstests/libs/override_methods/tenant_aware_response_checker.js b/jstests/libs/override_methods/tenant_aware_response_checker.js index 9c9e9a0ef7745..7b53504ab68d5 100644 --- a/jstests/libs/override_methods/tenant_aware_response_checker.js +++ b/jstests/libs/override_methods/tenant_aware_response_checker.js @@ -12,23 +12,25 @@ function wordInString(str, word) { return regexp.test(str); } -function checkExpectedDbNameInString(str, dbName, prefixedDbName) { +function checkExpectedDbNameInString(str, dbName, prefixedDbName, originalRes) { // System db names (admin, local and config) should never be tenant prefixed. if (dbName == "admin" || dbName == "local" || dbName == "config") { assert.eq(false, wordInString(str, prefixedDbName), - `Response db name "${str}" does not match sent db name "${dbName}"`); + `Response db name "${str}" does not match sent db name "${ + dbName}". The response is "${originalRes}"`); return; } // Currently, we do not expect prefixed db name in db name field as we only test with // "featureFlagRequireTenantID: true". - // TODO SERVER-70740: expect prefixed db name if "expectPrefix" option in request is true. + // TODO SERVER-78300: expect prefixed db name if "expectPrefix" option in request is true. assert.eq(false, wordInString(str, prefixedDbName), - `Response db name "${str}" does not match sent db name "${dbName}"`); + `Response db name "${str}" does not match sent db name "${ + dbName}". The response is "${originalRes}"`); } -function checkExpectedDbInErrorMsg(errMsg, dbName, prefixedDbName) { +function checkExpectedDbInErrorMsg(errMsg, dbName, prefixedDbName, originalRes) { // The db name in error message should always include tenant prefixed db name regardless how the // tenantId was received in the request. @@ -38,17 +40,8 @@ function checkExpectedDbInErrorMsg(errMsg, dbName, prefixedDbName) { return; } - // TODO SERVER-74486: We will check collection ns string in future. - if (errMsg.includes(dbName + ".")) { - // Do not check ns until we change error mssage to include tenant in ns. - return; - } - - // System db names (admin, local and config) should never be tenant prefixed. + // Skip check system db names (admin, local and config) which could be tenant prefixed or not. if (dbName == "admin" || dbName == "local" || dbName == "config") { - assert.eq(false, - wordInString(errMsg, prefixedDbName), - `Response db name "${errMsg}" does not match sent db name "${dbName}"`); return; } @@ -61,7 +54,7 @@ function checkExpectedDbInErrorMsg(errMsg, dbName, prefixedDbName) { assert.eq(true, errMsg.includes(prefixedDbName), `The db name in the errmsg does not contain expected tenant prefixed db name "${ - prefixedDbName}", error msg: ${errMsg}`); + prefixedDbName}". The response is "${originalRes}"`); } /** @@ -70,8 +63,9 @@ function checkExpectedDbInErrorMsg(errMsg, dbName, prefixedDbName) { * @param {*} requestDbName the original db name requested by jstest. * @param {*} prefixedDbName the tenant prefixed db name expected by inject_dollar_tenant.js and * inject_security_toiken.js. + * @param {*} originalResForLogging the original response for logging. */ -function assertExpectedDbNameInResponse(res, requestDbName, prefixedDbName) { +function assertExpectedDbNameInResponse(res, requestDbName, prefixedDbName, originalResForLogging) { if (requestDbName.length === 0) { return; } @@ -80,21 +74,25 @@ function assertExpectedDbNameInResponse(res, requestDbName, prefixedDbName) { let v = res[k]; if (typeof v === "string") { if (k === "dbName" || k == "db" || k == "dropped") { - checkExpectedDbNameInString(v, requestDbName, prefixedDbName); + checkExpectedDbNameInString( + v, requestDbName, prefixedDbName, originalResForLogging); } else if (k === "namespace" || k === "ns") { - checkExpectedDbNameInString(getDbName(v), requestDbName, prefixedDbName); + checkExpectedDbNameInString( + getDbName(v), requestDbName, prefixedDbName, originalResForLogging); } else if (k == "name") { - checkExpectedDbNameInString(v, requestDbName, prefixedDbName); + checkExpectedDbNameInString( + v, requestDbName, prefixedDbName, originalResForLogging); } else if (k === "errmsg") { - checkExpectedDbInErrorMsg(v, requestDbName, prefixedDbName); + checkExpectedDbInErrorMsg(v, requestDbName, prefixedDbName, originalResForLogging); } } else if (Array.isArray(v)) { v.forEach((item) => { if (typeof item === "object" && item !== null) - assertExpectedDbNameInResponse(item, requestDbName, prefixedDbName); + assertExpectedDbNameInResponse( + item, requestDbName, prefixedDbName, originalResForLogging); }); } else if (typeof v === "object" && v !== null && Object.keys(v).length > 0) { - assertExpectedDbNameInResponse(v, requestDbName, prefixedDbName); + assertExpectedDbNameInResponse(v, requestDbName, prefixedDbName, originalResForLogging); } } } diff --git a/jstests/libs/password_protected.pem b/jstests/libs/password_protected.pem index 1a30869523723..1a8a08d2cd236 100644 --- a/jstests/libs/password_protected.pem +++ b/jstests/libs/password_protected.pem @@ -4,58 +4,58 @@ # Server cerificate using an encrypted private key. -----BEGIN CERTIFICATE----- -MIIEWDCCA0CgAwIBAgIEGQdcfTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIEWDCCA0CgAwIBAgIEIO4OPzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjBsMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM5WhcNMjUwOTEwMTQyODM5WjBsMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UEAwwG -c2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4Y2yTK4QaCf/ -zGj35W1sCoGoBYtyt7iJthVd86WQEQXK9Efra8phbxlpqEBaf/EvB/hwN8073eCL -NQlhm5kupVfY1hIezlJzrAJp1u1A/m6znkqTs4cHp5+Ln/MkTq3CYTUwy4Z1mM0T -sLmTKBlcdVqC3gvux/iL6RjRPmbPXuHgN3ORagk11oSKEsIy8ShGMFZMdtT1pnqr -xESx3JGIRr+CPoctsDsiOkJmHauqJDqVliF5pjdr2N0T//JOtKzVCY0hf+A0pJ3z -N0zIiP42XANZCo2aikwOmnnSRzIUJT52tp/FIRzFbuenRF5sJD++zUp15H/30FKA -INAZiwWO0QIDAQABo4H5MIH2MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1Ud -JQQMMAoGCCsGAQUFBwMBMB0GA1UdDgQWBBSeFd//+k2VDH02TPQzJmfOqMv4sjCB +c2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4xt3hUpO97g5 +pCBkLjz0pu1XyO7dtaV8BqtjDDWFCYKByMJdYnFNPF/DaGmShpADfbsdQYV8IiWx +Ly2fVSZH/rcVNvvdMqiDGxhlci14jLTbt4hW3f02Lct6CLy68hx+9Eg3JoyCo2br +VYbBMhRjBDt7jrYa/WiBVDkeaPfwOxt/jNyZiITWQUMeWfpGjmP5aRp5vgJzDukq +5kuTaIy/z1K0J5IyHu3sSnC29kLWWdaavdpF29iwf78dkuRsh5h4M/UnUxufyqgf +u31WZmTCiZcitAi+vOcRtWQiBWVg1iVt9ohATjhvmc7647T1VoHGggrZdYKvfF8Y +5bPS2LSlPwIDAQABo4H5MIH2MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1Ud +JQQMMAoGCCsGAQUFBwMBMB0GA1UdDgQWBBRvMjhuWPxwzNAJYB7vdas/PF3HVjCB iwYDVR0jBIGDMIGAoXikdjB0MQswCQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlv cmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzAN -BgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0GCBHvUrJMwGgYD -VR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQBOLNRO -G1L6FxiqreB3MBafbW7Q2RsZdIQRqZilec5Kxf+znDH2C5NiRS6vR+5e7hDSPy6m -F2/CXbZep9Jgu/6SYFSB7x5HvHCSn41zc12UINc5N2ol8e5aHvimoWXvJiT71Wz2 -iRUNUl/DRuUnq/UglCz7iM0NDB3Ti5SgsTka6OmYyAT72sh70pqUq8X+htaSj0/B -+NIrNjRYMCBKhVo2SsRXh7XvsyUraGYoVH3y9mEVLJkyuBf6W1odXLlokzB84bJT -QBXv9IUdDQYBK3J7Q7iVOBW0Vkfp11WfVaAhrcAC/udL5LoxeJep/hJVNNuA0qqG -BkrzIl/Evn3SZ++/ +BgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0GCBEreWhowGgYD +VR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEBCwUAA4IBAQAq0bTR +PEZPpKq+VCBT+06+uoe3l93zs/h72n3Yn6BdO2cCcskqi0yB3NhazLIohgZ1rGyY +o9W5Z73Y6BCk8Rd07c6vTc0Jgra1cdr/1X6RZpENMVfcvi0w3Ej5dlK+znU68oDj +eDh/GorF/I9JvPvYpArIGdxSNR0isDsgI9I+4/YMg7WTCOTrMSLq9DmSK51uxTWe +GY8AUuOPxZPeWwWWZsO9sRIXCbzAPOSF8TiqjWiKkYF3UO0dLIId9YRty7EPp2ea +SDmfYT1CSihDaUzbDmV+39UMw8Pw1mn45kN4CxXPF+3NDx7HqZ4yzna2DiJw9J2w +YEbO5Gg3cM6vxgC4 -----END CERTIFICATE----- ------BEGIN RSA PRIVATE KEY----- -Proc-Type: 4,ENCRYPTED -DEK-Info: AES-256-CBC,53A39D8D01CE003985BD7AB8429591B1 - -7K2t5Np+XfDNlKHDkU29xpNC+3xmE2FJJAmD9pMwpeoiPB/6snYnNXnDJyNW60oA -hriOMMoNU/vGUvn1aAyIlxw4MsjO0PkHfKipI3m+NQC2kZNTTBOBA/dzLavo64zB -5IDDlNzrvHlxP3P/jAF2EdQ91206YtYw/x1Ix4y2F4hITktRsYEx09gH9hKoo8uJ -f2ekx28ANIOjuWBc3zI1SDpfGPqr5ruZZsB6Ucj6+rYpSboC1ZAfdCdjbAhuAkp0 -1QiGKhfyyZdo44ojjCzZpPwGsOaK+f1UYp8p9WqXJ+RfwP69h9yJNVPsWi1axcTd -nRM2ppxmeRuyrPo/HeD2T19gDkzGdlJA35dN2T4p43k4xfUGKWjij/5ndFuupfVs -o4o0rqhzvBSupsxnka1TfXNHrbdJpiWoH0M5G0YhaJfX2+tnWi301jQ2kE8fMXGB -aisE0BXNp0oFk1SYgiet5H3Rj0eWabvUHszjvzEcn8EK/G4llHvijGVUeYIajFfZ -4Yyx4chBAMB8fto/LcDbGzRe9cHyCz1bq7/IBqnYAUMNwsC5Z4OfMRZK274kQX1l -V8YZW4O5jpAyI9BhbwrQE/Lx5eX4JKd62j3OTwewL/aitfXjF5zgiv4LjTLB4XcP -z3MAdjCQbwnbd8M6izVy36OMO1wR1PO7Pad39S+BOtdtodB78/ZR3j2bgsRqiHAx -l4kgbKgnKsceHjbxjrb7qelqalwEhlTIwVciY7Dooh4qT73uPRJwWbCXARpsh4mg -1FHCRhyyPumIXa/EENssAghr/xI/gwx6ZJnun4WMK/Um0dHv8WUXQcN+Fx2GT9BO -UgObmASGvfG50q8ZHp+qwIOy/EHYA7fnWYNr1DZnCZPse3AvYwXLyG4AAdFjQdSe -2Q9v+Pw39EYNl7h97gnm10hL79bJ0eFggQGGXrqNd0wKBeewhxSigEEZ70K/H7ga -URoRxDl8pe+NytGRPrWIUZbYPxaFCxc+kpnB9V96u+RuTNQORrOdiKKjzFUve2qh -JiNASm6JHsCKsVfPEmIOSnzwJPpAcEx8SYEyhyrPMHv51UjPtUaStByp+DsWiKeb -yzI20MxV0WishdjgfWN6YU4eRY0CXUPxly+MmhNHdscOSzUtnpaAoCpcEs/zsope -xNUtBz8lsyb1x6ooBibnqrP6E5TA78hiAK+9/UsEpdPBODmudRA0Bc3X9y0dyEe/ -76wzVnZnE3Ho63XYEyRLnHGOQOxjzgQr9ssRh2hjvk9/wEuJ746WmHAxf66DIDRQ -yQP5O8zYYMREPj4s6WPK8zWuYCzMwMmtHVc+/kYUKopmKRr4jzKZucuDuTW5FnFx -Zi9RqgQHrkafTLOsg2mFW85IcewTlJ7lLzHP8wlnbxtX5NdzDArpTuX5gVDvXSQx -LNdUtMrBFvpfszxzHMMEg2qo5+T/2UGS8sd5Z0mpB5jLFzaxAFq98KAU51XVpvBn -Q3R5lXS6Q3myHCYg1NHDCtfss1fiwpCIVn9EI2u9/IhM9Tpnp+l1eFafv1Doi5BL -LRwBtXxqMqUnvdvuS0LBLAJaJlH5jrF28vNrKP4SRK3j9zg6p+Yvgm1EvJC01iiY ------END RSA PRIVATE KEY----- +-----BEGIN ENCRYPTED PRIVATE KEY----- +MIIFLTBXBgkqhkiG9w0BBQ0wSjApBgkqhkiG9w0BBQwwHAQIjrhb/JO1RvYCAggA +MAwGCCqGSIb3DQIJBQAwHQYJYIZIAWUDBAEqBBDtYUB4pr1Zg+bjhQkJuSySBIIE +0HWptM5jRrPfdiEjPEx1qPTIh3zalOcVjHIX0JI6WAuwJlNFJ+jewzEQ7lZWPVXv +XD/M/h9FD9whe0i9F35h+rrHChbhmfCosXItPbgj22wJHvOdwNw8eoardT2p3BXA +TZBNs9LdOPlIbnpSGe+mBU7z3bh3LBpJkdni2gXi6WN+2nyUv5V0oWui9a3sqRhS +fQyVeg7zT+w3/gFIZ2MaMaSAAZS3C+u9GQCqFQH7ut8U2EUnQhoWvbGc38amCAbx +ovC6uS2IwDXMFdWrPmusEWZBa/GmndBa7OXO+KlzUxIM8iw6ICRTmXwT9e5dS4ED +3ONDPzBZxXrsam/tHx0UaTLMo9sDLMoW3PPVp/pYzIyltQMi3QsJ9+oEB8Xel8KB +r+UQAkx+XCgZFFOwGKt76w8OanTbR6krWxbdmEeECo2dlDjQjygGE/jfnQ4Z+4cC +ZvSNj8rODbSbHevkw0Fx/bIrtjI6aIP63cPsV1Y+bk930IasiprOKFsyHarcWMXw +2YQ5IMfqcHjfS5ZRmjmruZhySLZ8TBEBlQSoPEUNpAlcXBkAdhls9fVSa7d1sPcP +2PKLUgA4I/IjBRM9z4Y77yYaeZ8cKDFh8IgGgXlFMwjNUujJRlyOyYGx4BClQXvu +Q7sH4V3PC/JXTeczRUe0+hRHLD2M9krs6ROdDBpvkbxhba3NifhH02eA3sfInazg +mXAbUvjnULorSLS1riScZj/ExLgbIOGiZQ/H6O0v3LZt9EsOlC1xgcOLLWJpCo6A +qypjyuCJGenG1gFIf7CLxU1nhlAC6uRcsvwvY4vRw5fO1u8fuomxWyOOulMFeOBW +my+rNEtTmhBlPiST11g0BHuW+zyGQxgzr88x9tWl+OdIoRGazhWRTguIVOV0ubt1 +dAhF4jY3uNl9kvHMB3viiRvuuH9lf17MTJtdjboIA81uvqYkx7sN3773QzCwcYGP +DdFK7aHB3MwKF9e4z7FFQTIqpRCjyZgih3HhUJub2cKer1qPYN5N37uSTl8Tk7sB +x6xH45lVnQ8bIzzwQd0SBEKi7zIZzOyqstSp7IsOOgsxVZPyDq8Wuqg6DNcOP+A6 +sdaNk5gHVuRsJXDLOoQUjmUp5IOW0ePPFpBwNIbHMAI9/Jy7W7l4A1/9S8/LhFFw +4JI3qaBlAocNR5CnmmS2kOFmQLOR5sdOq4C8+D/1ICXJqyj7MpTfBbUb6Jm1/XSm +9mibccGCgrj2VmpcoYw8O79GJsYVQJofofwhH8r4J3ZIYSA/LkeKtDg3y/gwWwkL +dJYl7ZpF3uoD6d+euwzLJntICLrXFkLvTx15Gy1+qFe4l749/XnEQcav0v1NnYnN +MhlcdOaACxoRA0aq2G7Ds7sFqduX7qqM7/NipP2ER6UhYwI1BFjtT3r+9SMKoJi9 +XHxoiOwdz+qd57NNqvMT9iHJ/YOBPilLFAMgzbJFrDbJguI7OjwTnDF0CCSSTi2r +g3JQszXsZK+uRHi1FfxdBiPKFx2r590262GJ6/goX0thwG0smKkMG4eZ2tUaSiL/ +RLClfI2NfvBat/ThKWcipAc9vZulJzzjGKmmrndU3DnQujh/RAnGCZVlK/ysgQNG +JOZg2p+qtfXODXQj84bcTtdXQ1sSCtorgHr/qKsuT97v +-----END ENCRYPTED PRIVATE KEY----- diff --git a/jstests/libs/password_protected.pem.digest.sha1 b/jstests/libs/password_protected.pem.digest.sha1 index 98c4138fb9974..cc9e4167ae047 100644 --- a/jstests/libs/password_protected.pem.digest.sha1 +++ b/jstests/libs/password_protected.pem.digest.sha1 @@ -1 +1 @@ -470CE33FFA8FCA3CC61F3FD2266B6E3DAB4B77D2 \ No newline at end of file +D42847522F2EB56669C8F4C1E88FFB177026143E \ No newline at end of file diff --git a/jstests/libs/password_protected.pem.digest.sha256 b/jstests/libs/password_protected.pem.digest.sha256 index 02d1a0b702be6..6bddec7b349ab 100644 --- a/jstests/libs/password_protected.pem.digest.sha256 +++ b/jstests/libs/password_protected.pem.digest.sha256 @@ -1 +1 @@ -7A24D74A8CC11A256EA0EE08EBBFA60D0274094F7A4B36EB7B8B0061AB58F375 \ No newline at end of file +CB2C415800C604B9C195D8D1F2A714732720867A2736A8DC6ECF8340EA026860 \ No newline at end of file diff --git a/jstests/libs/query_stats_utils.js b/jstests/libs/query_stats_utils.js new file mode 100644 index 0000000000000..1401830ed2d3f --- /dev/null +++ b/jstests/libs/query_stats_utils.js @@ -0,0 +1,208 @@ +const kShellApplicationName = "MongoDB Shell"; +const kDefaultQueryStatsHmacKey = BinData(0, "MjM0NTY3ODkxMDExMTIxMzE0MTUxNjE3MTgxOTIwMjE="); + +/** + * Utility for checking that the aggregated queryStats metrics are logical (follows sum >= max >= + * min, and sum = max = min if only one execution). + */ +function verifyMetrics(batch) { + batch.forEach(element => { + if (element.metrics.execCount === 1) { + for (const [metricName, summaryValues] of Object.entries(element.metrics)) { + // Skip over fields that aren't aggregated metrics with sum/min/max (execCount, + // lastExecutionMicros). + if (summaryValues.sum === undefined) { + continue; + } + const debugInfo = {[metricName]: summaryValues}; + // If there has only been one execution, all metrics should have min, max, and sum + // equal to each other. + assert.eq(summaryValues.sum, summaryValues.min, debugInfo); + assert.eq(summaryValues.sum, summaryValues.max, debugInfo); + assert.eq(summaryValues.min, summaryValues.max, debugInfo); + } + } else { + for (const [metricName, summaryValues] of Object.entries(element.metrics)) { + // Skip over fields that aren't aggregated metrics with sum/min/max (execCount, + // lastExecutionMicros). + if (summaryValues.sum === undefined) { + continue; + } + const debugInfo = {[metricName]: summaryValues}; + assert.gte(summaryValues.sum, summaryValues.min, debugInfo); + assert.gte(summaryValues.sum, summaryValues.max, debugInfo); + assert.lte(summaryValues.min, summaryValues.max, debugInfo); + } + } + }); +} + +/** + * + * Collect query stats from a given collection. Only include query shapes generated by the shell + * that is running tests. + * + * @param conn - connection to database + * @param {object} options { + * {String} collName - name of collection + * match - extraMatch - optional argument that can be used to filter the pipeline + * } + */ +function getQueryStats(conn, options = { + collName: "" +}) { + let match = {"key.client.application.name": kShellApplicationName, ...options.extraMatch}; + if (options.collName && options.collName) { + match["key.queryShape.cmdNs.coll"] = options.collName; + } + const result = conn.adminCommand({ + aggregate: 1, + pipeline: [ + {$queryStats: {}}, + // Sort on query stats key so entries are in a deterministic order. + {$sort: {key: 1}}, + {$match: match} + ], + cursor: {} + }); + assert.commandWorked(result); + return result.cursor.firstBatch; +} + +/** + * @param {object} conn - connection to database + * @param {object} options { + * {BinData} hmacKey + * {String} collName - name of collection + * {boolean} transformIdentifiers - whether to include transform identifiers + * } + */ +function getQueryStatsFindCmd(conn, options = { + collName: "", + transformIdentifiers: false, + hmacKey: kDefaultQueryStatsHmacKey +}) { + let matchExpr = { + "key.queryShape.command": "find", + "key.client.application.name": kShellApplicationName + }; + if (options.collName) { + matchExpr["key.queryShape.cmdNs.coll"] = options.collName; + } + // Filter out agg queries, including $queryStats. + var pipeline; + if (options.transformIdentifiers) { + pipeline = [ + { + $queryStats: { + transformIdentifiers: { + algorithm: "hmac-sha-256", + hmacKey: options.hmacKey ? options.hmacKey : kDefaultQueryStatsHmacKey + } + } + }, + {$match: matchExpr}, + // Sort on queryStats key so entries are in a deterministic order. + {$sort: {key: 1}}, + ]; + } else { + pipeline = [ + {$queryStats: {}}, + {$match: matchExpr}, + // Sort on queryStats key so entries are in a deterministic order. + {$sort: {key: 1}}, + ]; + } + const result = conn.adminCommand({aggregate: 1, pipeline: pipeline, cursor: {}}); + assert.commandWorked(result); + return result.cursor.firstBatch; +} + +/** + * Collects query stats from any aggregate command query shapes (with $queryStats requests filtered + * out) that were generated by the shell that is running tests. + * + * /** + * @param {object} conn - connection to database + * @param {object} options { + * {BinData} hmacKey + * {boolean} transformIdentifiers - whether to include transform identifiers + * } + */ +function getQueryStatsAggCmd(conn, options = { + transformIdentifiers: false, + hmacKey: kDefaultQueryStatsHmacKey +}) { + var pipeline; + if (options.transformIdentifiers) { + pipeline = [ + { + $queryStats: { + transformIdentifiers: { + algorithm: "hmac-sha-256", + hmacKey: options.hmacKey ? options.hmacKey : kDefaultQueryStatsHmacKey + } + } + }, + // Filter out find queries and $queryStats aggregations. + { + $match: { + "key.queryShape.command": "aggregate", + "key.queryShape.pipeline.0.$queryStats": {$exists: false}, + "key.client.application.name": kShellApplicationName + } + }, + // Sort on key so entries are in a deterministic order. + {$sort: {key: 1}}, + ]; + } else { + pipeline = [ + {$queryStats: {}}, + // Filter out find queries and $queryStats aggregations. + { + $match: { + "key.queryShape.command": "aggregate", + "key.queryShape.pipeline.0.$queryStats": {$exists: false}, + "key.client.application.name": kShellApplicationName + } + }, + // Sort on key so entries are in a deterministic order. + {$sort: {key: 1}}, + ]; + } + + const result = conn.adminCommand({aggregate: 1, pipeline: pipeline, cursor: {}}); + assert.commandWorked(result); + + return result.cursor.firstBatch; +} + +function confirmAllExpectedFieldsPresent(expectedKey, resultingKey) { + let fieldsCounter = 0; + for (const field in resultingKey) { + fieldsCounter++; + if (field === "client") { + // client meta data is environment/machine dependent, so do not + // assert on fields or specific fields other than the application name. + assert.eq(resultingKey.client.application.name, kShellApplicationName); + continue; + } + if (!expectedKey.hasOwnProperty(field)) { + print("Field present in actual object but missing from expected: " + field); + print("Expected " + tojson(expectedKey)); + print("Actual " + tojson(resultingKey)); + } + assert(expectedKey.hasOwnProperty(field)); + assert.eq(expectedKey[field], resultingKey[field]); + } + // Make sure the resulting key isn't missing any fields. + assert.eq(fieldsCounter, Object.keys(expectedKey).length, resultingKey); +} + +function asFieldPath(str) { + return "$" + str; +} + +function asVarRef(str) { + return "$$" + str; +} diff --git a/jstests/libs/retryable_writes_util.js b/jstests/libs/retryable_writes_util.js index eb3ae969e3d61..18a88414dff4d 100644 --- a/jstests/libs/retryable_writes_util.js +++ b/jstests/libs/retryable_writes_util.js @@ -33,7 +33,8 @@ var RetryableWritesUtil = (function() { "findAndModify", "insert", "update", - "testInternalTransactions" + "testInternalTransactions", + "bulkWrite" ]); /** diff --git a/jstests/libs/rollover_ca.pem b/jstests/libs/rollover_ca.pem index 7cb94c33ec2a0..b24b04ef9d4ca 100644 --- a/jstests/libs/rollover_ca.pem +++ b/jstests/libs/rollover_ca.pem @@ -3,53 +3,53 @@ # # Separate CA used during rollover tests. -----BEGIN CERTIFICATE----- -MIIDxzCCAq+gAwIBAgIEMEXJYDANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV +MIIDxzCCAq+gAwIBAgIEa8Os7zANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxETAPBgNVBAcMCE5ldyBZb3JrMRYwFAYDVQQK DA1Nb25nb0RCLCBJbmMuMQ8wDQYDVQQLDAZLZXJuZWwxIDAeBgNVBAMMF0tlcm5l -bCBSb2xsb3ZlciBUZXN0IENBMB4XDTIyMDEyNzIxNTk0N1oXDTI0MDQzMDIxNTk0 -N1owfjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREwDwYDVQQHDAhO +bCBSb2xsb3ZlciBUZXN0IENBMB4XDTIzMDYwOTE0Mjg0NloXDTI1MDkxMDE0Mjg0 +NlowfjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREwDwYDVQQHDAhO ZXcgWW9yazEWMBQGA1UECgwNTW9uZ29EQiwgSW5jLjEPMA0GA1UECwwGS2VybmVs MSAwHgYDVQQDDBdLZXJuZWwgUm9sbG92ZXIgVGVzdCBDQTCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBALvUQ/c9NMdWMIPv0B5lP0oKUiwAL2WvTZyzmRX9 -imZm2fYpjbYJD6jrpvYk3BP1wV7iPaVqMyy5VmsBjcdJoZQ8kbUE+LnqFSpPwZvs -qlBfVPSPKwf4ZQYESmfrtpn1xOhOW3q8OsJHpdmFQFVOzPm87kSslVuwkyUhwLtd -ToSwe339K4OUJV1ZVrtTQ8bMT/gEV2Gh+fScUpK47W01+he8p3lr0WDwmioyCAYS -v1FHd6YSHB17/M1n5xsDWFillDTxMYK2Y/He7Tvfmy/d5+wOoUNWzQDIkdKlndQ2 -oy5YF0XENmPdGBY6mWaPd03/zKkhVfU0tY650OvLEhIdqmMCAwEAAaNNMEswDwYD -VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFKMXkxW7L94J -wfc3SWBZrL6W5EzOMAkGA1UdIwQCMAAwDQYJKoZIhvcNAQELBQADggEBAA9eK1CR -Xijo6jowUggkJgwietTVAJfoKBJS+OKpBEbd6lQMg1BpbK3AZe/mY0L5c/MdwDut -7UL9TWtbPiMesPZ5PEO54fvddRIm+gUYRXyiuY2p7vVOt0a4136pMCn24YoFgzQS -vEP8sDHnL/kBVNB9Khwt1Jt8PaTlkuB2B9TqJNcJUwtaAFN8UekaC9MEk0KbRdhy -d37h1rvR+FO/BtaWvF8UxGCKK6TkvPHq3hk1gJ6wL7ME+U1op7EzKnKD4yro4oq1 -54KqKP3wjcfuZSLPlgIfJjkKrQmG3gZ6f6aNaMN/IoVdUubjRKEfDEkhEEUZnwt3 -Xb6r5w91Rbljk24= +AQEBBQADggEPADCCAQoCggEBAMUCe3YeyeHqlUUizlidQKOPGK4Oo3apEkhRLEMV +pivRALuNNpA51gpGfLmUlpXJuRxHwBnOBYdDMmGT2oS8SbRqW9G1LKebtB+9eVvk +1nN06bvOoWemnID+AgTfqrgMENMCvSueX2f6e10wHvZD8PJagl1DDwvTPqB6ZeMC +Uxn3h9JFPWa2ZX28m68VUi4t4ZAdoErD8VMquct2TAoTb3sLQfqXOcbZma2ljQA9 +ajpEbJUi36V8Un8XyGHv9NyuQRuEFS2a8IJ4zhggE4vZtxGwJN5fyhtcteaZJiAM +qgG9I1/DPcwYm27I8WpGJWUNOYOHc+hydr26+73XhYD3DG8CAwEAAaNNMEswDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFL1PcLhtM/ok +9HRGBQCBtytyKDALMAkGA1UdIwQCMAAwDQYJKoZIhvcNAQELBQADggEBAKvJRLjf +JPCqiu75rcyihAdpHvx6QEpEcGvHhpVgo+ZMkz0OhtRcTCpQ3gg/VmzcK6kAAZdX +7qfzY4i1qdegqidcnp6ozVg+POumEnwGRc8WANeRV2SO+A9t0DNjYS1+2GT/dYcQ +1TDqTblOPXYw0n9dCZkybtzCDfFQXNVB0gBwjSzQiSiubTAIkkv9OwFHGYI3D2WT +QkucHZkoq2BguvdNmc917G7WKE6Hj0zxGg7k3VAnE0SBdJcG9bFLHlDE5D4YxncD +48clvTK33sUNgtL1EbVQphtqrsj+13IO3OdyHC6IvV8yVR/nDJHc0/Mky5oHPKPG +loO6XClH5ipv6YE= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC71EP3PTTHVjCD -79AeZT9KClIsAC9lr02cs5kV/YpmZtn2KY22CQ+o66b2JNwT9cFe4j2lajMsuVZr -AY3HSaGUPJG1BPi56hUqT8Gb7KpQX1T0jysH+GUGBEpn67aZ9cToTlt6vDrCR6XZ -hUBVTsz5vO5ErJVbsJMlIcC7XU6EsHt9/SuDlCVdWVa7U0PGzE/4BFdhofn0nFKS -uO1tNfoXvKd5a9Fg8JoqMggGEr9RR3emEhwde/zNZ+cbA1hYpZQ08TGCtmPx3u07 -35sv3efsDqFDVs0AyJHSpZ3UNqMuWBdFxDZj3RgWOplmj3dN/8ypIVX1NLWOudDr -yxISHapjAgMBAAECggEAIPhxtcSYryUB/ybpcWx0X5rQMckWiNLs3MTp6mJHGRKU -0BEbPwj4Jr624B44Q4iwGOe9ynuJ4B/oe4AaUxEpu8umCBCHWJsue+7kHWq9ur4B -O6yl8RA5K4U1Smil9QCfP+gBRtojtla9ViF92Xurn2r6qDNjRGUKb0oterY2QjQr -fMI6i9oGRPyDYf4v08NjKKOtpjLM7PhY+52GPgEzPLULojqwU2Hwv2hmBf8Yni/l -mMTrtiLuCtKx0CLDG6O0ZQ+1u7Gx4iuNANDYKHE4Bf0WiRcRHi9KRsN/ZVa8wZvl -w4NUPo1UdUCKWjM+jTlr3Iv0AM8+BarydYrB3V6CAQKBgQDc/tgcDciVrbViUD0f -w+jX32g8RcA8ffgfReH+Hw69OaFX6ePmOKggWkI95Wx+heBLiQqGDDadwCIHygD1 -sDKyeMKwRAU3BsYUI414m2zWnONXFQQdHbPzSQvQ3amtPbDwRDN8yzdBsQUFcVPT -ikdGMvmADEiTQ5Ck1q4kjr5zcQKBgQDZlI8AMVTzC8ulaI8oPZC2IyOe+9PI3LXe -0/5qPfcks+iQ6XiNM/t6I2eoVqe1SIsMlpCQ1jJoW/GQNdkC6oXAbWc7j7TKNNtF -DxyHIqctRt2/ijy9p/eXSCMZUTT4PyW9/AWxXYIqXZ76QufF71Zoxs4ZRn3I0NYI -HEvC9l4pEwKBgGC4nMbydWalByzHJ2leqerJGWq/sFoJW+37/OPmneHNdkLu19Kr -21GFj1ZdsfVSDI+io9t1PvYd4Ab+rxrYiee4mKTisFGcAldQFBvEEod/VLSJOyqv -FFIXFzfLu/ZZeLY/czVcD4wNuL/gEKsV6wnbR312YtEpEgZC+yZ+3vXRAoGAB/ii -WI52H0ViW5f+Dqpav+F/r5ZoRuaXHyfDHV9Ry51vusdi7EFoSCw94vPxxvl2Zqqp -dzTxudMMgY0He0zeQ5N+gbcdF39iPSB1mhnR6B29iAPnf8dEkd1Js+a+uw2NM22l -Q786QpUVevOyjBTdpI8MA+8KMq47+SYYPHdMMdMCgYEAjf8TnMe6kB4MC/2urZqy -pH3kh60EQ3m5d9Ief3wmUBlov9fvegqOrxBQunVeLERO6mXbFV7lVOm8oDvIC+/d -g18pWN3QoLhfqFTNETwY67EreoMYvXKLu9U7HKs/qYP3RZQb3RB3aSFHlDMJgwIG -mvnUi+VWUQ3c2GApHClq7gU= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDFAnt2Hsnh6pVF +Is5YnUCjjxiuDqN2qRJIUSxDFaYr0QC7jTaQOdYKRny5lJaVybkcR8AZzgWHQzJh +k9qEvEm0alvRtSynm7QfvXlb5NZzdOm7zqFnppyA/gIE36q4DBDTAr0rnl9n+ntd +MB72Q/DyWoJdQw8L0z6gemXjAlMZ94fSRT1mtmV9vJuvFVIuLeGQHaBKw/FTKrnL +dkwKE297C0H6lznG2ZmtpY0APWo6RGyVIt+lfFJ/F8hh7/TcrkEbhBUtmvCCeM4Y +IBOL2bcRsCTeX8obXLXmmSYgDKoBvSNfwz3MGJtuyPFqRiVlDTmDh3Pocna9uvu9 +14WA9wxvAgMBAAECggEAU0+DaiYG9VRAH2ZioDKPnRrsMt7Z3VoN8yrwbsX+6VSO +3MEQq9jpXJsGL2xYaatOblkhMUhgKh0OdxkRNURyXqsDfSECazZ661kystuInHZ6 +SQNOWgio8ht4OxtilEX172WfHYzxh4TzGis5TKbag4Im0s2C1VtVhoN8Bo17GuVG +R6vIolyn4KKIdZIvChCH0bdlOQkEq/RZ+j5Zz9+Ml5lMJ7roYOLq4Vxmc6wx7ZeS +4Eofloy6PZHpyvhShjYQGvV4gs0utEh7jWcD1GUkjsSkcQSt2fIwS4kD+vjrMraW +qeEpYGixYyB662+STihXDxHeZoVZ5HdF6Mi6a2V7cQKBgQDhnRIrZFQD4gnfBhRV +drk9mLgHPOkLBs5SnFbG1+JAPjZUJpbnRtClKVk8+0AM0Ud/sYx19KYCaX0zvxIB +Eh+D2qQYWr3ZG+ag5j2GMT9bknvXqafxBLZNg6jwxnVaaOrGrsxboYNinfc51sIT +sB5ZqDFpUsn4iVUmpcm5GBnPCwKBgQDfiy3Iqh6+hcijM5MTxTkZOKMlXQQGv7Ss +qS45tSBQJf+WtjXvHBOPxfHAsbzuB6HlxxKBs4gQ/dNp7wR55GEffa2c3XPKsyes +TMdKgB2SiHLrLw38dhHtOFe5m8IT32CjqmhXe5xtyJSyFxFTsywM69y0zeWHVS3Z ++H5jkO+mrQKBgB4lc8kShdti97iyQkNNMuFVJ7nn3KfZh/Yn57x4GkZFSdMiuOU8 +ZCK9jKwGUn+j8y7P1ZnpT8lguRcR/+DewBFJRMXwUg/Rl5aGTVBCAlpFD4E4rTUa +URW4cvmBOysSe9SChNH1me2yd9dlp9cjoFqQi9Gr+0rXZuZcHsE5xDETAoGBAMNt +W3NINmR9dPAoUHZCPy6rcwVhEfoMcplXtg/BJySqc42Acho9w2Q3uqepOvAlQAYv +SSCWoWepX8AGszUU0UvEhZjTiT017oMVBE0/P5sxKrYsht+lIPrv/NFJOBxDdqre +eSWx7QmVB1nCDOXNh78sG+D896Kedt1N/sBwnvJpAoGAa782dZSkN6SvKnzCHtQR +ItzQA3eFmVKJTvzMj9AuEdtuuiX4SQyWwRhMOtlhmjv98QVUvLYlT1/tqYdYcIAM +zLigrlyCqfn0rgx80A9AWb8eAPiAUFuaaYZPifRHRUZOEohHU8ARK9ZmG3lDNK0W +aossgb2d+UwdcJV8Ghl7M5s= -----END PRIVATE KEY----- diff --git a/jstests/libs/rollover_ca.pem.digest.sha1 b/jstests/libs/rollover_ca.pem.digest.sha1 index c028ae06ec922..ddfe377bf1828 100644 --- a/jstests/libs/rollover_ca.pem.digest.sha1 +++ b/jstests/libs/rollover_ca.pem.digest.sha1 @@ -1 +1 @@ -BC0C54E29440F5B46456402B1D4E23F3C415897E \ No newline at end of file +A7BADDAD616FD9FF94CFEF687EE98597E90594D4 \ No newline at end of file diff --git a/jstests/libs/rollover_ca.pem.digest.sha256 b/jstests/libs/rollover_ca.pem.digest.sha256 index 14c053cf5ae68..06299594bd294 100644 --- a/jstests/libs/rollover_ca.pem.digest.sha256 +++ b/jstests/libs/rollover_ca.pem.digest.sha256 @@ -1 +1 @@ -2ED258565E79ED591A290D07D46A4AFA0C4071FE0744A7931D9B19177ACD27F6 \ No newline at end of file +5949BB54DB96B97538682C309D91EF372993B2D74203539DDDFF63D64FAF5668 \ No newline at end of file diff --git a/jstests/libs/rollover_ca_merged.pem b/jstests/libs/rollover_ca_merged.pem index d89b03d088c9a..95d80109cd4a3 100644 --- a/jstests/libs/rollover_ca_merged.pem +++ b/jstests/libs/rollover_ca_merged.pem @@ -5,47 +5,47 @@ # Certificate from rollover_ca.pem -----BEGIN CERTIFICATE----- -MIIDxzCCAq+gAwIBAgIEMEXJYDANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV +MIIDxzCCAq+gAwIBAgIEa8Os7zANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxETAPBgNVBAcMCE5ldyBZb3JrMRYwFAYDVQQK DA1Nb25nb0RCLCBJbmMuMQ8wDQYDVQQLDAZLZXJuZWwxIDAeBgNVBAMMF0tlcm5l -bCBSb2xsb3ZlciBUZXN0IENBMB4XDTIyMDEyNzIxNTk0N1oXDTI0MDQzMDIxNTk0 -N1owfjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREwDwYDVQQHDAhO +bCBSb2xsb3ZlciBUZXN0IENBMB4XDTIzMDYwOTE0Mjg0NloXDTI1MDkxMDE0Mjg0 +NlowfjELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREwDwYDVQQHDAhO ZXcgWW9yazEWMBQGA1UECgwNTW9uZ29EQiwgSW5jLjEPMA0GA1UECwwGS2VybmVs MSAwHgYDVQQDDBdLZXJuZWwgUm9sbG92ZXIgVGVzdCBDQTCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBALvUQ/c9NMdWMIPv0B5lP0oKUiwAL2WvTZyzmRX9 -imZm2fYpjbYJD6jrpvYk3BP1wV7iPaVqMyy5VmsBjcdJoZQ8kbUE+LnqFSpPwZvs -qlBfVPSPKwf4ZQYESmfrtpn1xOhOW3q8OsJHpdmFQFVOzPm87kSslVuwkyUhwLtd -ToSwe339K4OUJV1ZVrtTQ8bMT/gEV2Gh+fScUpK47W01+he8p3lr0WDwmioyCAYS -v1FHd6YSHB17/M1n5xsDWFillDTxMYK2Y/He7Tvfmy/d5+wOoUNWzQDIkdKlndQ2 -oy5YF0XENmPdGBY6mWaPd03/zKkhVfU0tY650OvLEhIdqmMCAwEAAaNNMEswDwYD -VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFKMXkxW7L94J -wfc3SWBZrL6W5EzOMAkGA1UdIwQCMAAwDQYJKoZIhvcNAQELBQADggEBAA9eK1CR -Xijo6jowUggkJgwietTVAJfoKBJS+OKpBEbd6lQMg1BpbK3AZe/mY0L5c/MdwDut -7UL9TWtbPiMesPZ5PEO54fvddRIm+gUYRXyiuY2p7vVOt0a4136pMCn24YoFgzQS -vEP8sDHnL/kBVNB9Khwt1Jt8PaTlkuB2B9TqJNcJUwtaAFN8UekaC9MEk0KbRdhy -d37h1rvR+FO/BtaWvF8UxGCKK6TkvPHq3hk1gJ6wL7ME+U1op7EzKnKD4yro4oq1 -54KqKP3wjcfuZSLPlgIfJjkKrQmG3gZ6f6aNaMN/IoVdUubjRKEfDEkhEEUZnwt3 -Xb6r5w91Rbljk24= +AQEBBQADggEPADCCAQoCggEBAMUCe3YeyeHqlUUizlidQKOPGK4Oo3apEkhRLEMV +pivRALuNNpA51gpGfLmUlpXJuRxHwBnOBYdDMmGT2oS8SbRqW9G1LKebtB+9eVvk +1nN06bvOoWemnID+AgTfqrgMENMCvSueX2f6e10wHvZD8PJagl1DDwvTPqB6ZeMC +Uxn3h9JFPWa2ZX28m68VUi4t4ZAdoErD8VMquct2TAoTb3sLQfqXOcbZma2ljQA9 +ajpEbJUi36V8Un8XyGHv9NyuQRuEFS2a8IJ4zhggE4vZtxGwJN5fyhtcteaZJiAM +qgG9I1/DPcwYm27I8WpGJWUNOYOHc+hydr26+73XhYD3DG8CAwEAAaNNMEswDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFL1PcLhtM/ok +9HRGBQCBtytyKDALMAkGA1UdIwQCMAAwDQYJKoZIhvcNAQELBQADggEBAKvJRLjf +JPCqiu75rcyihAdpHvx6QEpEcGvHhpVgo+ZMkz0OhtRcTCpQ3gg/VmzcK6kAAZdX +7qfzY4i1qdegqidcnp6ozVg+POumEnwGRc8WANeRV2SO+A9t0DNjYS1+2GT/dYcQ +1TDqTblOPXYw0n9dCZkybtzCDfFQXNVB0gBwjSzQiSiubTAIkkv9OwFHGYI3D2WT +QkucHZkoq2BguvdNmc917G7WKE6Hj0zxGg7k3VAnE0SBdJcG9bFLHlDE5D4YxncD +48clvTK33sUNgtL1EbVQphtqrsj+13IO3OdyHC6IvV8yVR/nDJHc0/Mky5oHPKPG +loO6XClH5ipv6YE= -----END CERTIFICATE----- # Certificate from ca.pem -----BEGIN CERTIFICATE----- -MIIDeTCCAmGgAwIBAgIEe9SskzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDeTCCAmGgAwIBAgIESt5aGjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQyWhcNMjQwNDMwMjE1OTQyWjB0MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM1WhcNMjUwOTEwMTQyODM1WjB0MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO -S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDf -vZIt82obTHnc3iHgUYSc+yVkCHyERF3kdcTTFszDbN9mVPL5ZkH9lIAC3A2rj24T -pItMW1N+zOaLHU5tJB9VnCnKSFz5CHd/KEcLA3Ql2K70z7n1FvINnBmqAQdgPcPu -Et2rFgGg3atR3T3bV7ZRlla0CcoAFl/YoDI16oHRXboxAtoAzaIwvS6HUrOYQPYq -BLGt00Wws4bpILk3b04lDLEHmzDe6N3/v3FgBurPzR2tL97/sJGePE94I833hYG4 -vBdU0Kdt9FbTDEFOgrfRCisHyZY6Vw6rIiWBSLUBCjtm2vipgoD0H3DvyZLbMQRr -qmctCX4KQtOZ8dV3JQkNAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI -hvcNAQELBQADggEBAJnz4lK9GiCWhCXIPzghYRRheYWL8nhkZ+3+oC1B3/mGEf71 -2VOdND6fMPdHinD8jONH75mOpa7TanriVYX3KbrQ4WABFNJMX9uz09F+0A2D5tyc -iDkldnei+fiX4eSx80oCPgvaxdJWauiTsEi+fo2Do47PYkch9+BDXT9F/m3S3RRW -cia7URBAV8Itq6jj2BHcpS/dEqZcmN9kGWujVagcCorc0wBKSmkO/PZIjISid+TO -Db2g+AvqSBDU0lbdP7NXRSIxvZejDz4qMjcpSbhW9OS2BCYZcq5wgH2lwYkdPtmX -JkhxWKwsW11WJWDcmaXcffO3a6lDizxyjnTedoU= +S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCb +k/WPIqqv46Nv9FeodWiPtx4/v3XZJwGxTk3JEje2CLjeVjU0q6OZoofP1wgSIZSh +iO2o9iDC5O1Aedop0i+wqe9dMcn34O1K5aM4ff8c4orfBe0xqyvE3cJx4BeSTZ4n +NY00x9PkCcoq98SoU7S9vkJq+AxUzUII34GQ4xCeaM7+g43PpGo5KFDwrzI/VUJX +qaeRNXS0/j8Wwp7Gv8L1a+ZGlxrgpXTJLGamhtkWyVEWSpgcc5suA0qSwvkAE1KX +5aJoBUDL22fLRhs91xNFDUYTAvkG8X4gM0f8lBL24+nbOBkOLdpqSZZ+dk59JKHD +TFGBx0p17I1g0xjWNjMVAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBAIwWNyaQhZglJyKMIAUAwlvBL5REA99bua06xWfJwdmdlci9 +Bb6MgQzVk5K68rsNlcL0ma+Ri5FfU+j7gsYZh4pILYb9xqFxiKX7bxMZv99LR8Mi +0EImM7gz3S579qYBXWd4V6/1G864qln8neHv+X3MF/wk3O9IYqepWsC3xDRos1Zv +xQfb37Ol4pcHtue4wHXr5TV8+KPcUusfNcftnpsEHyEUHqPORdHB7xRpfhosRYvL +7WwMXNseuyHFcdA/rEhUVsca+SUeOMIW+8euuU/as3ZaEpv1ZmpHEYXHb2SlS6W+ +gTzUOtNXsKVDrm9uEcUHytp+xvp9l9NNM/IRGGA= -----END CERTIFICATE----- diff --git a/jstests/libs/rollover_ca_merged.pem.digest.sha1 b/jstests/libs/rollover_ca_merged.pem.digest.sha1 index c028ae06ec922..ddfe377bf1828 100644 --- a/jstests/libs/rollover_ca_merged.pem.digest.sha1 +++ b/jstests/libs/rollover_ca_merged.pem.digest.sha1 @@ -1 +1 @@ -BC0C54E29440F5B46456402B1D4E23F3C415897E \ No newline at end of file +A7BADDAD616FD9FF94CFEF687EE98597E90594D4 \ No newline at end of file diff --git a/jstests/libs/rollover_ca_merged.pem.digest.sha256 b/jstests/libs/rollover_ca_merged.pem.digest.sha256 index 14c053cf5ae68..06299594bd294 100644 --- a/jstests/libs/rollover_ca_merged.pem.digest.sha256 +++ b/jstests/libs/rollover_ca_merged.pem.digest.sha256 @@ -1 +1 @@ -2ED258565E79ED591A290D07D46A4AFA0C4071FE0744A7931D9B19177ACD27F6 \ No newline at end of file +5949BB54DB96B97538682C309D91EF372993B2D74203539DDDFF63D64FAF5668 \ No newline at end of file diff --git a/jstests/libs/rollover_server.pem b/jstests/libs/rollover_server.pem index 306ae1b885239..f90adf960f3bd 100644 --- a/jstests/libs/rollover_server.pem +++ b/jstests/libs/rollover_server.pem @@ -3,52 +3,52 @@ # # Server rollover certificate. -----BEGIN CERTIFICATE----- -MIIDsTCCApmgAwIBAgIEBBkPnzANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV +MIIDsTCCApmgAwIBAgIEDQu6BDANBgkqhkiG9w0BAQsFADB+MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxETAPBgNVBAcMCE5ldyBZb3JrMRYwFAYDVQQK DA1Nb25nb0RCLCBJbmMuMQ8wDQYDVQQLDAZLZXJuZWwxIDAeBgNVBAMMF0tlcm5l -bCBSb2xsb3ZlciBUZXN0IENBMB4XDTIyMDEyNzIxNTk0N1oXDTI0MDQzMDIxNTk0 -N1oweDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREwDwYDVQQHDAhO +bCBSb2xsb3ZlciBUZXN0IENBMB4XDTIzMDYwOTE0Mjg0NloXDTI1MDkxMDE0Mjg0 +NloweDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMREwDwYDVQQHDAhO ZXcgWW9yazEhMB8GA1UECgwYTW9uZ29EQiwgSW5jLiAoUm9sbG92ZXIpMQ8wDQYD VQQLDAZLZXJuZWwxDzANBgNVBAMMBnNlcnZlcjCCASIwDQYJKoZIhvcNAQEBBQAD -ggEPADCCAQoCggEBAMXA2FrbYDKpEo2EKRNCsx1C4hvDloS5S+GEjOCSk2tqPeZA -cAMrSMhLYH48Qoda4ojVJ+kbLsRABQyLQfIwc4+zcDO2P7cCVpHK2fsnjaPAMAxQ -t8M7zHAeQxCRFMjNeU9+wymQ46ztCqvetEktmIjPZpfqIJo4DPeVlqwxj0kNm6w4 -hEtS+rFUDVgunnTMugbEBXcidkBpMGRiovC1U97YjKJlPtZCODZ3TrDSWpppMKXS -4rIugM6VouUitqlJ85kKUaDYWzuoUPVYLwdf0lWUrlgrYruvYWVs42Yh9yB6GN8B -3Re2xC1XEPLVR81VP21hIdcBdE+GtK02jxKgfvECAwEAAaM9MDswHQYDVR0lBBYw +ggEPADCCAQoCggEBAO+78PkGqGcpB2yLFwqJCcD/+nqNa/G/ZywnCEAVaJSKfQsD +inq4nHbh4FHReB7RxikuhXymWi8HpzATTcBw2t92ZnEjxzJ/CXqZUshcGkGicJXX +vLoEuWRDL7pIgd2uju8Z1OKo8Ueb6rkIh0dBKtP8WkdIzXl5Cl8Tdyqu59vKlRIf +pfi7lzQCZmFnhf+ClsRAgqlf7efIWzfFEKnjzZsWq5clo9QVt4s83tXKlL02XT5v +TPvc6QooYI7KbepamHhniO1W5vZ4RL72s+28TAAEzu6jY1ndqwhP3Fxcsg0riEqF +K2GkAPjReD7YHjNKYMdrcjtdA781i9RPuizcEkECAwEAAaM9MDswHQYDVR0lBBYw FAYIKwYBBQUHAwEGCCsGAQUFBwMCMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAA -ATANBgkqhkiG9w0BAQsFAAOCAQEAsm3OxjXa3lO7rE1sjE6OoCAk9zad5gb5FUyZ -yVEvg5GoOpna3ceOqWbHy/jYEI+ZRfzIEUENLXAfHe6IyPXCHuzhhxrxEikB5S5R -yT+j+EZvBmFOexZ8dS26U2jMdB6R/NEcTFCWheKV2gVIGp/EFWppA93AcQbJuqQ3 -+HppdEjNpouRTbkVw4/SIu8B4NWqW7+3IWbq6OetzZu8M0a1m+iYKSN/zAbkVyHU -Wsg0xbXeF75+91/cKieyDmfntaIS6ZjdYDymvQTZ/Lf4sOD3z/ubrXACNrqe5iYi -SaZ2wyryWQtd8R7l53HBNFVn72d1XxIv4ikMJaGnxo6hmHC/sQ== +ATANBgkqhkiG9w0BAQsFAAOCAQEAAP374HAhwhdwFwC2aes21gbx0zSD9UBLStYh +rzh6nwJ0z5lIQv/Y1OVZypjlU8ksuTFNJ9VjgN06ebTPOsX/mD0OszvRdSX7UvtA +YwR2s/0x/htd09/ET43XwxMVJ44HiBVrgzcm8focxNK6TYdmR0AzPczmLeIum5q/ +GJQZrSYpBQYa+6Hztjq4lhXhMOJKXq4FBPE43qOPDVhJf2DK5z7qscO8js6fMAre +gwX4QquHDjUeaMHedg/D9NKlVu3zC6TJoPZlZMu889w9z2iMa9DZKpaW8NmSF0yD +dFmkV0k9q7W79XmfctJCUtW+z3SmTO2i6YpFyeuBMPIg0pbhaw== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDFwNha22AyqRKN -hCkTQrMdQuIbw5aEuUvhhIzgkpNraj3mQHADK0jIS2B+PEKHWuKI1SfpGy7EQAUM -i0HyMHOPs3Aztj+3AlaRytn7J42jwDAMULfDO8xwHkMQkRTIzXlPfsMpkOOs7Qqr -3rRJLZiIz2aX6iCaOAz3lZasMY9JDZusOIRLUvqxVA1YLp50zLoGxAV3InZAaTBk -YqLwtVPe2IyiZT7WQjg2d06w0lqaaTCl0uKyLoDOlaLlIrapSfOZClGg2Fs7qFD1 -WC8HX9JVlK5YK2K7r2FlbONmIfcgehjfAd0XtsQtVxDy1UfNVT9tYSHXAXRPhrSt -No8SoH7xAgMBAAECggEBAIzrhcIJPp4bWbs7CMJO77DUbqh2Upa2LNrCSFnoWeFb -zkx8ctXvTGhfPp32dzpZi8ESlORKmKMFNBPiUNYzrMYkirpvDyxtIB+4vGl7oDWm -yRadlrYrsN3c0tHFcVg/BEmf/ZdrRrN+H/KmOzdCpMYpdeSf1SfQ9XyhPsPqdT8K -kKM+Yn94qqjdlZzPc5ylBllg09oT+NDfBEUTa2gsSLb3VP17bh0/twUPMBA864ds -5aT0MkaZYxNu/LYsc3PNhfZnG1qlyJwT6oIUCju/X0qm6iAXy9d7WZ7HN4tZ2G7H -pjk0sVJzlquzhfxRYD8g9Hhu4fzMEnC0SvVIThkNGSECgYEA6FaXTBeKm2tzCqCr -2SaxBRcMgybWLTgUg/Fnm+aQVC6f7emMHb1K6nlZxabTpz0tcIJFoquThTKYvvUB -e712wtd8ujmCDfXK/7SHWn8EUW0vbwA4yLRcKX+J64f5VhSnvVTd/yvgEpj0hzCZ -8jdfg3pyK2LZ74WxiT9vxl1Sf4UCgYEA2eSSh8YApSpcM155MpXFTQlgBDuUIfot -o33j/ccsJyB47hGXphOYVCBA+RMLiix//Xp34x6xeFJFfMeSIWiaGIRZ3mQpm/eN -UPUL63Usk16cGkhbyEOa0s5o8mTjT3dTYIuJ0jIFBIfR8nfqjg3YmKxHlhm9uowe -ArExXCekv30CgYBZiFzfdsb0I5D+jHIMyWs4AezRcZrhbBYDznhVzKDbv+fjf7d+ -El8XQlJE54fyj5G+JIV/LU047AmOtM2wiI+GgBHRla23gXuL1F7AkefxFPlNdjFr -ro1BdKOKfyincmg9fsHZvmen4weAPUtl2s7U1M/ARmSjd8q1kBubvoS3HQKBgFKR -1qFS/D2avtPMkjsEHH9j9RnFhg4WIyNYeoRZ7LZrDMiSrBgIRupiSpzYhb/3uwzQ -UwwjPndtHd36NrsoS3TM+s1WwZnUBp5OLhUtExZJfPhMwVMzo0ENbSGl76nWeycT -2cdooxQjcADlRmJMhu7cIkiUOpytqlW01hKpBzp1AoGAGwf8hPmGVliXI3QxkE+0 -x4vMRU4TxdYxwI/NUq6lg2VKR8MtjwPRIZzSgE+WXuwEujyW5e7Xik+MWI+d25Wn -640nsi8SdrffjaRTMvJ/cngPt5O6h/2p954+5w5w1JLwxRM9xuMYx1aUiNnvlWqQ -aZP3iz7pFJ2fTDM8T3y9++Y= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDvu/D5BqhnKQds +ixcKiQnA//p6jWvxv2csJwhAFWiUin0LA4p6uJx24eBR0Xge0cYpLoV8plovB6cw +E03AcNrfdmZxI8cyfwl6mVLIXBpBonCV17y6BLlkQy+6SIHdro7vGdTiqPFHm+q5 +CIdHQSrT/FpHSM15eQpfE3cqrufbypUSH6X4u5c0AmZhZ4X/gpbEQIKpX+3nyFs3 +xRCp482bFquXJaPUFbeLPN7VypS9Nl0+b0z73OkKKGCOym3qWph4Z4jtVub2eES+ +9rPtvEwABM7uo2NZ3asIT9xcXLINK4hKhSthpAD40Xg+2B4zSmDHa3I7XQO/NYvU +T7os3BJBAgMBAAECggEAH7NAaA2DUzDPtICn6VK5oXW3z2DH902R/PqkUqUCETJG +2p/7PcMPnYC8wtTqB1lB1Mw8BoGQx0tBxk+nJl1F8+qaHTfX7UtzG20Oz0OujSNH +s+i1Ifvh7+oac42G7qNYbzGBPv+L+CYb4Z/JE5kAasuhvG9aDEf/IRdX2KMMMbnU +eC3At9WcrxqNvFreYaVJNgU/AUbY8n9lyYSWJw/GODkf1rAhPYU3DrXqSQtnJu8D +HEo9BUFxvuz6FMeADYecNSJolKU6d1KM3cRtDsftgK2G2Hn+v99xubIoFsV/7M6R +9H6pQaiKsSo/yatg4s0QQmQNZ5EHBySqTYvyCRG+aQKBgQD9J9kpVcsWYzNX0OFT +15Zr8GUK0V4UNMdVgpjaPLQx9x9+wBpCmVFfhGEmP7aM2ZtbH/1a3CsvqMzouweZ +NHfmVCybUmBbdcj36PXzVFS/iExDU3EAZizzAxQ1QscDHTnp1DaJXzp9o3VZS9vv +ptTddaddPxaVxWCxncik5YKwrwKBgQDybX0cWIvQJBO+bFk2kmWsLWlK3gmznGy4 +KnA5oJbJEv+9ehw4WwMSQPc5PMGbOnNN4cmkn7XFbAeaCuV8tS++8LFlGJNutrbb +Y+DiEBHuaCbscM+By90Z018qw96g3ejLGKUTjffyte4kYxZWqODzab5qi/LPz01G +24jtT0XIDwKBgEvMrapBxQBcDZiCs6UuDR0eqrJ1hAzazMCezPOzb2TykJycGoDM +dV/7PDd+pkNAONMtHeghulCX41rf/WNzIV923rBXFwDroJQSTepg2stKnUMfbdtn +vJe62UclSn91Ncz8vKOfPt01n0Jwg1cbhesVelpiNHn90nj8PIKbMemDAoGAfEhB +QYzrcHNuY8ssGVwIH7LQLf+Sva/N1MR88C4x3zeMQFkfqoyOEIeZtpA9ORVIE39T +XsA58dImO4Smjb5dkefXKdrlinbFW8fifEJ8zto6SjCzUPlwilLgkQ4FTjc1pxkl +V9cKbDV0ttbNlvPRDzkA06KXgo3mMhNOKUrgnWUCgYEAwI9FloXAj+S3efkeZ9jc +viQdRtWRQGnmidVu8Rfp+8mBEQ/qu2s1jI66L5ax3Xp32QL4yDe979OOwp7pV9Y4 +M3cJ3lM/82bkfIQBP2MfXr9LpBQ9KaPInVWHU8fTdnAybQttJ5hZnUHiQ9Bw89Sr +f7g5DtSK1lTNZXS/YD5Mbfg= -----END PRIVATE KEY----- diff --git a/jstests/libs/rollover_server.pem.digest.sha1 b/jstests/libs/rollover_server.pem.digest.sha1 index b3c5b556abac7..3baf539de1996 100644 --- a/jstests/libs/rollover_server.pem.digest.sha1 +++ b/jstests/libs/rollover_server.pem.digest.sha1 @@ -1 +1 @@ -5C5314F8D08C97FDA9DA5B112F5FA30B37319197 \ No newline at end of file +C7F5ED18F1B7BB7461F523786EA3DC86CE760CFC \ No newline at end of file diff --git a/jstests/libs/rollover_server.pem.digest.sha256 b/jstests/libs/rollover_server.pem.digest.sha256 index 12ff9605c6cd2..b658c87a79aee 100644 --- a/jstests/libs/rollover_server.pem.digest.sha256 +++ b/jstests/libs/rollover_server.pem.digest.sha256 @@ -1 +1 @@ -C6F4922E7BCEFAF6ECE24C01D812B7CC77E8C4599717DBE916A259FBBFD46779 \ No newline at end of file +77F7D478FF7EEEB6EE4F15A0410C24B83ED728D4C3F7654C9FBD865B1EAC6484 \ No newline at end of file diff --git a/jstests/libs/rs0.pem b/jstests/libs/rs0.pem index 14f0e2a9d1cd8..1a03d984df691 100644 --- a/jstests/libs/rs0.pem +++ b/jstests/libs/rs0.pem @@ -3,55 +3,55 @@ # # General purpose server certificate file. -----BEGIN CERTIFICATE----- -MIIERDCCAyygAwIBAgIELAAMRzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIERDCCAyygAwIBAgIEOEYgezANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjBYMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQyWhcNMjUwOTEwMTQyODQyWjBYMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDDAKBgNVBAsMA3JzMDCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBAMdo6tEwLb5g57mc+aj8i8sZEhePJ1LScFicbZ1v -yxsmcQ1T3m0Vcy/MUKCja8vQ5aDYavNZmJ6rsHmmMjF0IGG4UgDmJE40kMXfLRCJ -DJ9kZyToq+CVQKHHNFbCCF4rYj7crz0t9qn8G3+1L901O2JkXhUdfIQ1XJiB2kfD -mbUBjl2kxhobX0IH4QKEFoFvVcT0xGBA4OFMRk2fS9M+/6ya0z5wcjeqSw9juCWr -H6HtcxRCHVrAEBJk7lbOKHGpQDPUdm0zL5/ayj52Y+ky7H1+pergjgx/ilXOmQYp -iOHI6fUuRUR194J/t8TG/PE6ZQqN2YxeLY4H5ffFha3v6ncCAwEAAaOB+TCB9jAJ +AQEBBQADggEPADCCAQoCggEBAKzHxnz3XQ18kldMMXkmB1asTjrvUwfjRTjd2SPF +BF4kYv04iNW2RXZv12Z8HLFvkMBPICeli0k7MGPParqHq9xY8iEMWE2Kg2/UD6Rk +GRfm9DKLg0XsTY+tDGx2lV96bxHoAdJyAW4+Gw+aLkoaU/g9KWS1T4uXYcXav42r +rYP4X5IQoDidUJrYEanjOVEq9Yb2Yuhx2ms+/UgMJh2NeufQjzeSaLKuWMvg+ifW +CSQqXrVzBLpMxl5l7Xzv0RQSm4W110EagDT+qyyR7dQAPj927+0BxCVWVB/eWrgf ++T9/ktYQwkfElux1ceQvTyPo0drwbB3+s7vDIYkyW81V6J0CAwEAAaOB+TCB9jAJ BgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNV -HQ4EFgQULj+elw+YJr0JKdiRLAtdeH40E4AwgYsGA1UdIwSBgzCBgKF4pHYwdDEL +HQ4EFgQUL8FPqdWiheUf3BW+3276HVxXwgAwgYsGA1UdIwSBgzCBgKF4pHYwdDEL MAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9y ayBDaXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNV -BAMMDktlcm5lbCBUZXN0IENBggR71KyTMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcE -fwAAATANBgkqhkiG9w0BAQsFAAOCAQEAnRwckjgQFPpAehcBqZVJOBQspjfQKDWF -9iipxovXSQ5yPfqKu9Apywy+GryFhg881UFEcPgdVVxMEZ5SkUQNNhjv7+QAPz9D -Jc0QqIUdZkgqAvJShlJ8y+7C27p7pUqbsflyx0Zd1NnllNW4xclhZ8pG4oqmhX5H -fJxUtz2LEk3pttC8c56f/t3CaVSVFDAWDFfPXAj2ZOg0cKpjn6A6rxA7h5gAXFlB -fjEjBVpyeMzgfS3OvHsSCZCe2mZK6vdRHFnniZMDPog1h7Z5tDWPAZH1UF9pGhVg -Tbyg/Xbp159SjmleaLMsL39E0Nqj18kzMYHT5oxlq1LPyrzmUcVSgg== +BAMMDktlcm5lbCBUZXN0IENBggRK3loaMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcE +fwAAATANBgkqhkiG9w0BAQsFAAOCAQEAXnw4jioplYNcJh1AAza9BqCyAkPd4zC9 +K1yaJZPeh5VJ2ytvTYsC2JXohVWvfqO2Rd7I4x95KB5I1+bbTNqpvvN5/9/BDo1h +jL83THTcPCifyMm7i8O+I+jPcAGeKoRkprxZwQrhBbqQgKAcayCVe6m1WF0t5nt3 +e8I7pKLYQlW33N3FFDSpo5C0e1XEZt5pXBNlCafowqCHEKp/u6NH5sDFXaiSIWN/ +Y8xRiV9EVTfQcBhC+ssg3GbtvEWGLZY8uzpTh2KFv4smj3e7rel9blh7MRlie0/r +DkUxt1eYchLmQPBJa9xNLxNXCSccgI5Emx5ZGf93I22fEo0u6mhU+Q== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDHaOrRMC2+YOe5 -nPmo/IvLGRIXjydS0nBYnG2db8sbJnENU95tFXMvzFCgo2vL0OWg2GrzWZieq7B5 -pjIxdCBhuFIA5iRONJDF3y0QiQyfZGck6KvglUChxzRWwgheK2I+3K89Lfap/Bt/ -tS/dNTtiZF4VHXyENVyYgdpHw5m1AY5dpMYaG19CB+EChBaBb1XE9MRgQODhTEZN -n0vTPv+smtM+cHI3qksPY7glqx+h7XMUQh1awBASZO5WzihxqUAz1HZtMy+f2so+ -dmPpMux9fqXq4I4Mf4pVzpkGKYjhyOn1LkVEdfeCf7fExvzxOmUKjdmMXi2OB+X3 -xYWt7+p3AgMBAAECggEAevMdlU13ZuKo/bDj3mWYa+lRVmVVgaNu0bPhgijjuiHB -os4LXobTNq1rKSCZ3lk2vC20A8dCatLBRMBMQmGvdZEIPrWnvPB8MwSf7IoT1lM5 -pXAwSZC9AQKqKrQIJMfrE4dOJRKp+7UN33f/wwyHSOtJdwtvMrUUGpQ12fmoVAlC -93LlK0g7ZrnIXsuZoMnJqNKra3nizTAeKzG0AvrKU9tR21SPkQZJvGRKDqIbYfGW -5zJorVLm+yb8iS7XYS8uD/MKeUIo0uMFgm6YDGIBldyh/kTAEqsC/03RFAt3u5bZ -4Veo1HMU0rb7XEe3rgjhZX0SS7NfaTLH4yQjRXNPgQKBgQDirpy39v4SrPVEZkVM -Js78pSN7Mz0D8Z3NhBJDj+3LZIMeYA75LVrfrEdy/aNGrmwIBBOmKeIAZ3wt7K04 -Rl6ic3Ng+NADEoiDZnI0QLvrOfDtqsfQ5UggU1AvXbJgubmaqhkm1Fk3CKaSXqCM -Wy3cGEr923VfMwyP4F0KEFlGWQKBgQDhM1PmAfv1QxXcd6CBMMqNNbxgp2GJQ7IM -jtxbM4/3hW8ZOskz34weycGOrc63BMHHwesoyTKssitz1NiCL8+28kuHNfech5Hh -pVumPM0T3Yo5OGaZOPyV/Onbu9knZvdnQ+AxKiUbQ2P6pw49KDccv5Z8jdn8ppZE -d4dB7aA9TwKBgGve8h8tP3z9p33UOTfi4+8gWsEfAfMlgJWzOsnB7UQz4Z2L0rRL -HAqCvkF/Z7qT+D7cSclx9uWAWXy1Oz0jZ5dg31APN1Wu9R2qm8A36fUnTnqbIZlL -8sXHP8o6iU80MpniRjPPK2FgEXxn4XjJTwqE2PLzA2i5L0osV7oZS5KJAoGBAKlB -UoVEaIHlccDJiDCQ4ytOf86E/qGT53XYyXUiQ8nc16s2q5H5Ke7Z5EfACeU0BhCE -hqGY1iVFo7Li/faayJrPcESnIvraSaI2DbPbbqbHkuN+qF3wnqZ796fWf1dF5BwQ -v6cz7b+X3sS8Wc9NMl7A//GNC5EAA2BiFZ5PYmexAoGASc45IMiE91ohTVKghuhP -aPDxqMe+pW8v+RjMt8S8nACjVa86ArwfP7ay1o0pbGOWztxrarOrDvFTdB5np68W -WcYZIusKepo/tacAYDv5/EoMobHAHp+/KlLeP6zCqBLK7LSp+g7w3LH7KRCPcfca -hVBUJrdPssNoOVsesKTB6RU= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCsx8Z8910NfJJX +TDF5JgdWrE4671MH40U43dkjxQReJGL9OIjVtkV2b9dmfByxb5DATyAnpYtJOzBj +z2q6h6vcWPIhDFhNioNv1A+kZBkX5vQyi4NF7E2PrQxsdpVfem8R6AHScgFuPhsP +mi5KGlP4PSlktU+Ll2HF2r+Nq62D+F+SEKA4nVCa2BGp4zlRKvWG9mLocdprPv1I +DCYdjXrn0I83kmiyrljL4Pon1gkkKl61cwS6TMZeZe1879EUEpuFtddBGoA0/qss +ke3UAD4/du/tAcQlVlQf3lq4H/k/f5LWEMJHxJbsdXHkL08j6NHa8Gwd/rO7wyGJ +MlvNVeidAgMBAAECggEAeMXEFsIJx02WNXNK+bX9VzZmEIgNb+wLDO71wy0KGoww +rXQBIfGgqGMRG9ARdm3XrrUgctYigHPhJUNtBSmklgeOaE9qwfjaWybOMEjPyYdk +lpgykIPWy2FY19AkJXM9hDS6YVHTci3zWHqbdKzmmRSXhI/AJIESlOyNDQg954pl +pLrI58NkJpRdmcmVIrHJeQWTmOq+VrIyhqBpgCTnONZDtkA1+i6acgPQh+O0kJ5O +A+KTNbQJPO1tmpiVuQJjNPUlfgBOt6vmgPLjkHQ19/E0IN8auAaGv+fMoQ4XqlYX +nuYvJoEOfhXJYHRWta9EFAJZyvs752LHlbFz5VKweQKBgQDVsGwPXJnBImtEPBJ4 +DUyxQX8fB4eUHaQU8d1FdKzaXmoVq6oxZSXQMUj1f+iRfTJzT1MtNe/q/+SS1d7I +eVuPK3yPGPflQFkuDuSTgWJB7UgX2+i8kKTlDadfJJNPTMMIZzFJrbGhhscVl7rx +IRvunR92gdlaUqeWxbUxzzvPawKBgQDO/b/lCdvRIhJcQ9oVDfVVkXX80KPdv8sd +2HmDojx9uRrZ0aCM5vk/AKVZTEoS+5fApbojzpRdpj6A/9dZcmw7G5lrotU/f2aE +z9XmDmV8TawZifRiQu3n90xnv/jwUEwSJR6hOZXXmBBK/Bit2GKiEEfHaNym1q3c +U+sWkxJSFwKBgCwFbS14+R/FdG2ZJoDe1IbLpGGDDpYfyRabgOb3E0jlHFucgrIs +US7jiFEy6XlXXlZM6CivLN6vmqn5Ly0Dey4yWjWsgh0TNYv8e3A7vj8wn6Jypi56 +ac3aEznRchtrB96qS4gPJUHOXyL+n/9ev79XVQz30Qv/bRDtZ9d8BqlDAoGAbhHd +k0wUyjcWEF48f+m2RlRdq5y/JtIwjqRoqakCBdEDCEVC3OqOLASJ6Nx6n3GOlvEJ +9LSLjOk0X6CswXHpP91DTkt+no9+0q06j2Wkbd9X3xTPEdmJbUrCJIGfPRtV+Ggo +y481sTm5oEZCUV+5w3ho0w9eFpIeTgWKA60dlu0CgYEAxkpYMpOtQ9GN6O73qcB2 +0oTiomEEq7e2nbFtdkVj/jvRJ1oDsbAn60noVCKzNLX0f5/erSIr9Vr/SGs1W9Hg +vUjCdkOD6RJSN3Y9irVuhdbOkKFQa4HYKrJBQqEelgCVefeCQ+uyxqbUZiVrA8Fv +vBuGtW9UCYEN7jt5kO8Qmgk= -----END PRIVATE KEY----- diff --git a/jstests/libs/rs0.pem.digest.sha1 b/jstests/libs/rs0.pem.digest.sha1 index aee2a51fa6c5e..d9a15ff150455 100644 --- a/jstests/libs/rs0.pem.digest.sha1 +++ b/jstests/libs/rs0.pem.digest.sha1 @@ -1 +1 @@ -C89AE2171C6A7E7985285ECF05755953288B45E3 \ No newline at end of file +317BCCD4BE507AC43E46AF6FA1A7B73EF3F382BA \ No newline at end of file diff --git a/jstests/libs/rs0.pem.digest.sha256 b/jstests/libs/rs0.pem.digest.sha256 index c7bcbd608099f..2f545c2faffd8 100644 --- a/jstests/libs/rs0.pem.digest.sha256 +++ b/jstests/libs/rs0.pem.digest.sha256 @@ -1 +1 @@ -BBAED3E7D623E89E015831F02FEE9B3D9BB55630B2D1F4B0463ABF96F1C36F34 \ No newline at end of file +D63654D8DFD75A5E8C237F55AE7B2B0F825778B7C719DD5C7BB850E8C1E8CF64 \ No newline at end of file diff --git a/jstests/libs/rs1.pem b/jstests/libs/rs1.pem index 1ce5ad8af7dce..540c5166a8d91 100644 --- a/jstests/libs/rs1.pem +++ b/jstests/libs/rs1.pem @@ -3,55 +3,55 @@ # # General purpose server certificate file. -----BEGIN CERTIFICATE----- -MIIERDCCAyygAwIBAgIEWAd1pjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIERDCCAyygAwIBAgIEeJ4FSTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjBYMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQzWhcNMjUwOTEwMTQyODQzWjBYMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDDAKBgNVBAsMA3JzMTCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBAMUg2WiApnL3z6XBZ5l5WJJkeNfOE3FMJDYTnQNw -OV6zWjsAG1Wuzi9QiH8HdyIDIk61l1qcAzfdecI02pTjm8VgLl3lFlpyXzcpazXO -bTxXqenCB4vSLIWNIMbD/qIzAxsMXjI+2xHg7TnGS5zNM2eRPt8RiNv+TfA99bKS -3TnQPCEO0lKafKR9EPmdUSMgEwHLEX64P+wabf8TYopS7eUy9nI1QvkZ9tphbGrS -AIrKQH8pU5OzObkxeCuMNjai/4JQ6XlEOvUMl0LOTLB4pfBc04PIokCdUmFRhIeM -fzJ4CKMLrtV083EOADcN1HlRf65XJED3WjjbjTYupSj35f0CAwEAAaOB+TCB9jAJ +AQEBBQADggEPADCCAQoCggEBAM3Zy8Ex0Rj99cwRtygAmNRlkbH5sKt1v0MFkkWb +0ni4FmoU+13DheUhhQGn7SnD00oh0sjACEBrMe/2lPhsFJoph5HuDAwLBcgoY0IV +46fOKN3sZcKpV6pkq8MqGEwD/pADsrfm/A/LyBQgIyuNOHK40ND0E6iQ8310TsES +Tx0H8MivJ17YRLONWKH7yAPnEMVN7zq0pfiAkrvlzvMIjJcYgh5q+DR5Drf+kz3h +4K1RG6aUEswXBEF5CywGGxjbbMfPcVE4ztYgke6V4uCdgjRp4/YsdS27h/t6vXYi +l5PyARJyevqv/g+5OErQi/J9szBq2ZKjA1j5KJfw6l3ex98CAwEAAaOB+TCB9jAJ BgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNV -HQ4EFgQUcWth71HW7KFa9lXxbGBmPDwt0BwwgYsGA1UdIwSBgzCBgKF4pHYwdDEL +HQ4EFgQUc0AkmxrNl4bhLNcLVlIeBl2aBwswgYsGA1UdIwSBgzCBgKF4pHYwdDEL MAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9y ayBDaXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNV -BAMMDktlcm5lbCBUZXN0IENBggR71KyTMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcE -fwAAATANBgkqhkiG9w0BAQsFAAOCAQEAQxxTMUk5Lj0scrdkG5kIJcOmR8EYJjF0 -OZ7/K0Mt5J1naYvB9WIaoDkR9QVhPqTaFMBOOImFHzU5ElVCSmH0QNtvPEGKxIpY -+M9pPfPGOt4Rk+Jq3ha4fXrJmMXAxJf++zR8n8Lfb/aa7gULk4lw6StlEfT5nLpx -VPS+F/VyPuSQfJcum38hwFl5Itm7jRTWqtwi4UXgcq+XnyIiJnr9k72wdzDD40Cm -qTHKbR9zuG7O7N+VqceJgZyYwgHiuxflPY+FPT5uD1hPvsUjNIYBtmFEwUifGPF3 -/Oub8jzFSIzG/W85UAKCSW7Nd+e3n1/RObB75kpUZgJMNzaGt0JXcw== +BAMMDktlcm5lbCBUZXN0IENBggRK3loaMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcE +fwAAATANBgkqhkiG9w0BAQsFAAOCAQEARrzy+HQuvJMHO3klypBOKLVvgPzP70tv +uDOHZqKVJG9KUPhl1Bl+nxnqt8RLp0VXSilT505yfjpAQAfROVERY8M6xXsDPwnC +grj5Er1ehWaqHAn3nJLZd/k9QpeEq5A5fVJpmwKYK4Knhw8Sutpez+aMC5PWXytO +v3pfZ7sl7sGl1Nf568PKBdcGOeLm6TZ12s6MoCnUNErao0P72DDoEh1gTHuM2P1+ +Ctk85L38z2H+XWNYIUmoy3m7HEYy4VnKkLVqE5u6FHphF9rhYZV8AECPUnM7mSf2 +BncdCoa8jU8ZkZNkAtUngl7bBvMiVPuG7qbWrTvXns8HFTDIba1Eiw== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDFINlogKZy98+l -wWeZeViSZHjXzhNxTCQ2E50DcDles1o7ABtVrs4vUIh/B3ciAyJOtZdanAM33XnC -NNqU45vFYC5d5RZacl83KWs1zm08V6npwgeL0iyFjSDGw/6iMwMbDF4yPtsR4O05 -xkuczTNnkT7fEYjb/k3wPfWykt050DwhDtJSmnykfRD5nVEjIBMByxF+uD/sGm3/ -E2KKUu3lMvZyNUL5GfbaYWxq0gCKykB/KVOTszm5MXgrjDY2ov+CUOl5RDr1DJdC -zkyweKXwXNODyKJAnVJhUYSHjH8yeAijC67VdPNxDgA3DdR5UX+uVyRA91o42402 -LqUo9+X9AgMBAAECggEBAKJKeBLcD60JLg/12Vf2GiBzzTVGOKWHHSzUGulQxDqe -CAQZA2gYQTGc6LNELyV2VRFd2RzE4rVkhjCIGQiJFp55n9K3nx6ZmKAuXlBvAWmz -dnS3xEStpO/Sj5B0nbdlrgHfvdE6BFuExWlSQr/2BIMYhh1aL0WG6R2HtTbuSGml -/QDjPN1x8mDunzt01QZDhw8XyyZhMSPfVNrgeS20OAU8tk1somxJi0TlFfe8sje1 -v4npn2ACrr5efbjP+k12w1edny+gFHZT4zSt1ocSj2SAzwSfadPgHDdp5kn42xau -nc5/+ZcusBGVTsBKJGDxGrzE9to4ga2zaC1nVqiUrVUCgYEA+7XHlY7eAaabc23r -TYwpLafaCFSyxPTDERz6LbuPeEZGE1xDNj6uN2wKLMJm3Btb5akbTHuKixM8GH6+ -Jg2b04aKOLZOPmOWTmezxHmDmC32t9zHg9cGglKeLwjj/WUN39h0cepBEni03CCU -LdeFxR2qx0EmrYH9lz5h27Cm/kcCgYEAyHztaXdDHKry49tgoXgODGlVg1bWkmK8 -NTY5nyxjgQoDrZHVo6Np/VZ0T4GJj48e/ThebNnQN/MzdScerf/7OcA05+rfOwon -6y702BBF+Okvj38s5CamuKPOhvfibhN+xwf4HCax70TzE4VvZPqx4sz9QKwtv0A4 -hfwNsbHYB5sCgYEAt6zjUihpqky4XNfV/8WKeu1kNfYQaZauDXvWt66JN9wX4KLP -zPKaUIj/N8A4LN+uBH2NFReFdoz3qmw6NyxxDD69+DpYCEDFertDu9hsBY1s3qg1 -0ugCsPC2y12yeMLYCAM8na+yAlegqoz/dCA0Vi2a5EGPhsc3lLkJ0bUrdpsCgYBA -lC0NCcNuWoo+Zw2rNTahPNS0p6YaJP+mzD6nTO8IQ6eHozADs8GSPuTcL0eWHG3I -9v1DZq2xN/9dPyqctZXAtm7UPU2GgPD2yntHlYZUdRhCyp41J1YQp4MA6pvfBRyT -E7gRqU0rWzRJF3QYPMBL4+e3iz55GkCat+PZVwXU/QKBgAPJIHNaKW1njaPwtseG -2UTzpkctIMg3FWK1ArNiGZ4ZzJ/iFojZE5Ux6ll8DuSbbECxNPkVF+Sw2ljUXy1x -UElqmJCPvPf/sNiX86VrxxxRBvXleaeIOqnWSiNwCyLT2Q3cEsbxOYQXwnhhuEOp -1VKpb0ervdGaUFzMQ3SqyWWM +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDN2cvBMdEY/fXM +EbcoAJjUZZGx+bCrdb9DBZJFm9J4uBZqFPtdw4XlIYUBp+0pw9NKIdLIwAhAazHv +9pT4bBSaKYeR7gwMCwXIKGNCFeOnzijd7GXCqVeqZKvDKhhMA/6QA7K35vwPy8gU +ICMrjThyuNDQ9BOokPN9dE7BEk8dB/DIryde2ESzjVih+8gD5xDFTe86tKX4gJK7 +5c7zCIyXGIIeavg0eQ63/pM94eCtURumlBLMFwRBeQssBhsY22zHz3FROM7WIJHu +leLgnYI0aeP2LHUtu4f7er12IpeT8gEScnr6r/4PuThK0IvyfbMwatmSowNY+SiX +8Opd3sffAgMBAAECggEAZ1naOv1QBkQ7jGG4m5TxJnJ4W+sJEIzlojjB38EEQyfp +2Qj/y2vHZesWZGZzZGy1QJsKwU/o1K79O4WJ9dAN5/jB4DqsQb+m/3+Hlj53NmED +k6iEzt9G7H3u3uXbnQ7EWHoC2OJX23Zs1e3suokEtlSkQsTIufFMzsf+YmFW5zOW +Sp3aV/RmI6I0HaWKaXdeKyvOJleG7xujWRhb+v2w0B808vinziAQ7EWx9724Yyad +Q217Tk4McQt6SYDE89GZQAGPU5bxE+KxtRWGjkZjZjzpUT1Ca2FYm1bmn9w7fmmi +riJwYg3hgKWqrZQsAiyFaQSnfAHBL5SknKDYOudAwQKBgQD2zMTAnvrEza2czz8m +ftP1JYYT3LwyWdkvCgvTLVssO2afLCQVGN9lD9i+61BYcyuWLFxHjYFoyZHrtuzR +3N8Kn4K8G0tYbp/lcWEydDYGn2c5x5ZFkyHpVwaokGxhJM6Ryf7vBELvCFnjoWzs +vQqo2M/cUmeeAWSfwkQiKmvwKwKBgQDVhj8izHKlPiFGr37DI1djzgLKHAMtq7Rg ++0SQYRTqvQpqvaObCpv5Gw6AoJ85bCJn9X9RyBDTDqcczfTDIUujz1WdFUOhhArO +iXkwXN4aMznwV71OuEKTJ0oNn+qoAS4B3StLsbmGqzLtMNYCf0ONoQjXhv28eFYS +0IdWRQE5HQKBgQCxZlc3Lg+LT1ywDriR7zBkUmih9lTAzJy0l0AQt9WFYd1OJOh6 +0boiZtWhxnumWILIG3Lpa+nNbLaa/I4V+/HHLQlINUZfdoUw4+K95ZLAB+ynN7Po +pu4FUCzVFZsR6OcjHFNHc4S5VQNoACzPrsLuw4oGEKWyicXOoFFhTaTwqQKBgBAi +TLL8b808nJcSu6lTCk9oSZ2r8DGLP930HoIqU8JI9niKS8pzSsXP3ZCqkl1sZk0k +p7IuQ9cuzEHI7i2wXl7KU9XCvZF+wLJKgPW+jqm8JSyb9Jn2IlGsLlRJORnovOk3 +JQZbL4GVfgueHQ2jQB97g4eFk7aiAR760r8aB7WhAoGBAOyLXWJQ4nA2+8+Ha41B +wEqM1GZFYRXQugOdGiLk+WG6Uq+tlwWzHInCgKsKuBD6VaBamyVDM8jusFBst9a8 +moBWXcGlT4wDWrXAie9vDer/DVfpW7MKEOryeqF5MaIeC5we+uDjQyJ/8N2EJZjf +hDkZNGMnn1B5rqbbcW9wOilF -----END PRIVATE KEY----- diff --git a/jstests/libs/rs1.pem.digest.sha1 b/jstests/libs/rs1.pem.digest.sha1 index f92d4597b5593..53d6877006a4b 100644 --- a/jstests/libs/rs1.pem.digest.sha1 +++ b/jstests/libs/rs1.pem.digest.sha1 @@ -1 +1 @@ -30A30E41279453CAB7BB93E267138AD4BCAB4F03 \ No newline at end of file +26ED3E3468A6AFAEE9EAE9EC1A19FF67621CDA81 \ No newline at end of file diff --git a/jstests/libs/rs1.pem.digest.sha256 b/jstests/libs/rs1.pem.digest.sha256 index c2cd2b7a388ba..11ab08011442c 100644 --- a/jstests/libs/rs1.pem.digest.sha256 +++ b/jstests/libs/rs1.pem.digest.sha256 @@ -1 +1 @@ -2BDA71253E3ADEC8A74B7B1B8F4EFDBDFE4D89B7DBD23C64AFFAB318F28D99C0 \ No newline at end of file +51B1A69A8C044A301A521DF56BF8AFEB7A1804990BFF93D6BBAF498925CFC5C3 \ No newline at end of file diff --git a/jstests/libs/rs2.pem b/jstests/libs/rs2.pem index fd0ab632cd87c..237f0f26a83c1 100644 --- a/jstests/libs/rs2.pem +++ b/jstests/libs/rs2.pem @@ -3,55 +3,55 @@ # # General purpose server certificate file. -----BEGIN CERTIFICATE----- -MIIERDCCAyygAwIBAgIEV+C4qTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIERDCCAyygAwIBAgIEEPb6fjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjBYMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQzWhcNMjUwOTEwMTQyODQzWjBYMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDDAKBgNVBAsMA3JzMjCCASIwDQYJKoZIhvcN -AQEBBQADggEPADCCAQoCggEBALFJkRAZB3R8AWw34dm2bLSpnlybSF6ayRmL+8kn -I80BFFsH5sB8oPuhtqOslg0sJzemBtZVEv5o1zvi0pJ1KaifhPzRnwFLdpiWMmik -0Wjz3HSh4gepKF2it5iojkwkXrKKL291RXfybO3xKVMO+AiDg/hYtckAdJqZdjNJ -3tfgo3n4EKsgYc+EF5ycmERhGdvBhsuFMkakaT4KQtldmTxx/TD4NbH2pmp3G4yp -70KEEAWOt5TowE/VD1WEJJwA0BxYTckGgBLCXbKUSk8gnPXkM53WSMlTc5G/GMc2 -GMjFoxudfVHNiobLh9y/X/PvlpRfQlvCrHB0nnnaztAz2UkCAwEAAaOB+TCB9jAJ +AQEBBQADggEPADCCAQoCggEBAPghGUGLksW8CIMMEGg5NBJjQk7BhTBD4MG5wFMj +k7q+eS0F6ICSn18BoquSzDrHQ3Ts+Y0hQ1B2lexKI9TkpMZgi6Kxcn0BlSG+z4bI +9AN06fykYmkPwZz84tX6lnB+UDkRXYZAbdDBO3s6qjhw61Az+l6Cbe0y80DIrXWE +9XDqxsN/M02stwrgECr6oJaED+R3vpDlDPA93RpvyX3zSL6n/SHwRVnlqejS7W85 +jfF6fX7iM9qw29oS2czoO2gHhh4UPkQNNqYhgoKuYIAyrIqw4ARIfQGSKEYNSXzB +Exo21AMkwaMgJXfjAi8ErIW6Vpz/0Yb+s9wdMLU6nZGHQ20CAwEAAaOB+TCB9jAJ BgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNVHSUEDDAKBggrBgEFBQcDATAdBgNV -HQ4EFgQU/aaFMGpvVWcpjxvMoih9yt5vOJMwgYsGA1UdIwSBgzCBgKF4pHYwdDEL +HQ4EFgQUiOwt8xofVJDvI9pbVvESreNCVYgwgYsGA1UdIwSBgzCBgKF4pHYwdDEL MAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9y ayBDaXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxFzAVBgNV -BAMMDktlcm5lbCBUZXN0IENBggR71KyTMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcE -fwAAATANBgkqhkiG9w0BAQsFAAOCAQEAvZKEFmva83RGbjwJy+RwmOnW7UjxUPIV -ztp595BznO/m07XzqeB/eR11YGF/O6UjW1tH6pIs/tBB/Pm7GUa8xzdxf49b+I2d -moEOTNrplot4ssQdqwoX1TaGxcGvK2iIgK6fn6nZJh1nEeafISfMBc/AnR9wNZm2 -Aw4xiXuZp5kjBc4N+wPzgZKck6Gq5jSsFb6i4QYNzAfcFsED5BI3PIoqEUcIFOwI -y2Aiei/35hCT4ZeBa3q8EF0xEEaWHF++aj2+3LdfcGoYvCLBYKuVwW9H2iDNVqQE -F3kcB+40hMm/OYmVE1MPNG8BCjBBC8n9Q1SI1JDAIvVc79vcajVKtg== +BAMMDktlcm5lbCBUZXN0IENBggRK3loaMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcE +fwAAATANBgkqhkiG9w0BAQsFAAOCAQEAaEOmcrRqKBSf8cn76sGYl8bhuhGa5AAM +84f7UkR4IQkZi8LylXpuiByd8Bb5uzfkfesE9kNYlx/aI8+wYg/di5wbkjSpsYyA +6hWBXGX759S9BcosnbAvuExfhmaZAd3j1V0j7K4Mjjs7vEdeZQaHpJ6WoXGnPmqB +cWqlLNYEyxwFG4+BbIlmSsZ+xNF5f0eA9P3rdj0CZNHVVRbYUzkqn4FcayzOhENa +UVvDpssD6NLCwy/cMAn1ePc5AsRpvInR9SMl81IX35QT8rb9YbOGLCCyCbUQL5W1 +xDMhFv2sNhGZQRrBVB+ftMQ51nN60p6JBu9OBFmIZPuI1FvkDYjvvQ== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCxSZEQGQd0fAFs -N+HZtmy0qZ5cm0hemskZi/vJJyPNARRbB+bAfKD7obajrJYNLCc3pgbWVRL+aNc7 -4tKSdSmon4T80Z8BS3aYljJopNFo89x0oeIHqShdoreYqI5MJF6yii9vdUV38mzt -8SlTDvgIg4P4WLXJAHSamXYzSd7X4KN5+BCrIGHPhBecnJhEYRnbwYbLhTJGpGk+ -CkLZXZk8cf0w+DWx9qZqdxuMqe9ChBAFjreU6MBP1Q9VhCScANAcWE3JBoASwl2y -lEpPIJz15DOd1kjJU3ORvxjHNhjIxaMbnX1RzYqGy4fcv1/z75aUX0JbwqxwdJ55 -2s7QM9lJAgMBAAECggEAKengl9OzBYEDvRgBFz2fuZ6YSACWPXeOr7F+l3Hfbuog -a90UWtGcJaF30n5NA1Q6+VcRKr72PJuAtzHK4sE8VhdQk4zjcKTspuprhH3i3FM6 -/YAANhtx5aFAkqDW831tvfwZdtpc0BzKT/7B7FcPoMOnAaHHaHvpFVvOrBV8z8Bx -Y06fs7QJH+R9DRLK48hNQ7ezuzSdOyJM96UGdAH7qt73tFBUkRA4WHjMBuJAnVSd -hc+Qh2a39i6SC1hAA6AtNwi8QD6C7NIvdZwisVv8cuSFyJc53kM4xbGzm1FopZ3m -QmDyJFhSatCH8IVdbwErDixziQkVc2I5DGCXShf6mQKBgQDZ9NKGceu/pxcC7Qqy -GFHWv22MVRiIaA0DEM4lFsbhpEtNA4cKVLpFjY/jzZvdIePPPj2OvHVxssrGJJhL -eK/WP2yUaWQ44wIqNnsns76UnCTreBLNqItHNlggV5tIYFIn1uHUeCg7HhRsCwRq -Fh3WMQ+z76C3GF5cu9VDwwd4mwKBgQDQO30xGHZYqrqnrWyw5NxjdiDmWzCMIhO+ -+DsOpDaPuleeT9/bEyLHOZvVcUub5i6Y1itZtAOgFnBbuxPozakPa9/fbN9TdQ+a -jj8hyggFZSG2PDexXQnAJwSbXT96aGi1/CVtnLP4N6kmtO0wjGLq3JggyZLl+G1K -UvYux0MZ6wKBgQCqxykaDTNSlULzmPaTe3jMkmvs1FSHtTfU2hscdo9ZCBm0e3oZ -PzBBBV3kehuRlldg8HzdVY+UiipWPSBTqnnknwed9kKATGdK2it/fVhsKzjGg+v/ -1vtizhMZLGvQtyBIdRe4GnozcCtCSROpaDDyvrh2HeHI9UAi206MbMly1wKBgDqK -zwqTfwo0jZ+AkVM2NIO9/UfmEUMEfZqt/SSDpFdKI0H94Midm6R5HMeoP4KH90e+ -xpdEldRXGqWfddx4nXQZdupAmJTFD2r7XOJqA1FI+m5ahanWp1wfXBs13xfR7MZl -Kjyj1rENLQAV061XeqPe+uIU6bi/3DIOGupR2RqbAoGBANc3mLU41tA3mFbWhifP -30XdZTMffoc9iaQ8GdgceYwZc9WvOF1URFweNdUuRRRup/X3yPanYxjBs7hhpY13 -+rIOClCvNJ53eZZwL/mXTMCtIpJ5B2p7dIHY/N1UOLiOMNC8wJMloqPL8OTYZUmv -Rqmx8CbeoJMcMc21Wbls5RX3 +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQD4IRlBi5LFvAiD +DBBoOTQSY0JOwYUwQ+DBucBTI5O6vnktBeiAkp9fAaKrksw6x0N07PmNIUNQdpXs +SiPU5KTGYIuisXJ9AZUhvs+GyPQDdOn8pGJpD8Gc/OLV+pZwflA5EV2GQG3QwTt7 +Oqo4cOtQM/pegm3tMvNAyK11hPVw6sbDfzNNrLcK4BAq+qCWhA/kd76Q5QzwPd0a +b8l980i+p/0h8EVZ5ano0u1vOY3xen1+4jPasNvaEtnM6DtoB4YeFD5EDTamIYKC +rmCAMqyKsOAESH0BkihGDUl8wRMaNtQDJMGjICV34wIvBKyFulac/9GG/rPcHTC1 +Op2Rh0NtAgMBAAECggEBAJujh0gJ2GKXc29f3drCJttxz2G/GWAQ86YQcYa0xjFY +0IFhN1mOntDlIbglOF2WJ8Xr7wyLVMHf4GJ6jsc3M/QLEwtV6s5mc7jbCyksW4Lf +jvWd1uy3qWZjaZV2vW6KjJ9/OP7A4yESU9EUFzmX+sIwGFe30GP8lYCJX+RbJelQ +3m8QwXVnsBuc1bBA5LPnmncNE4jl1O2sn+CZXLVoAxAsfr9D3STsUYREIBjxXMLa +ekqG7JdYvh4pBoTBmeeibsCM+icGOJsTbsXBr39aKXXMoxnXRrdPr+VwYL1nm06v +WCCULs/CQOt8e3QpwARqAuVU8KrP1CkHMiVKTTNcXgECgYEA/1gmdDegIU9Qodz0 +W2FJYMmjWhK0EIT/cv+G+4L+swnjqXjoVeSsao/NnVoxZCOJC9iQ5jyXyPyJOrB3 +6wvHDhiPA4BSgOz7Pk9ORcRSam4iAF5ayQqyBBecqNjVTG/mpK0k8r8VCxhTcVkw +eyWHvils9vHyLZmrQ6a86lnI080CgYEA+MQ0pgnC6qe53pE9kIfg8YXyC1PpG/uj +Km10EXgZRjeY0NWg57S8KMAwDlzpvNJoIgf1CsUZQ4vc8PRId1yZNVGlpUE5OuJM +8KuJ0iPD59aF5oIEt2LFSxV0GfDdpctzsGUkgR9Fz8gsjh22LNqEmn+IDiwf/E17 +8ek5DroJziECgYEA0fcoV7RN5lYUFaVdAa8z6XSk8DIlAth6PZPE6RhEW/Cr1fVo +BwkshcuN5e/+YTufO7BvJgN5RHn0VeA6G3rpybuEDfr83KsBxbDsyg2vP3kkWG09 +9cbnrtnDpkv2yQ3S+GKv2TQrF55/LtuKNBkeT848natB2yFbZRu8iHAf5ZkCgYAv +ywpOaAuxp323O/I3BVP7Mv+4m2tqu+KyMQsenBNHAUFzxcc/HOMVdS8GY2VoqIfn +Gd5DLnDXIFTXWidd/0VUEBah9DD5liawClTVMZfev8FD8vDTBr/e2AVRQoxi4P6e +AiCMSiTQcaXVu/GXFnhpcl0CVAnlIEHSzTrwEF/34QKBgCgXp2WGZ9Fa6akH3pwJ +VgmBGbiUH/LMHj7fmKreprL3p9T+7tkfj28+NBuRdDh544A+SYmwuF6gd4yu6WIw +EW7VuqiHeP785pxSHrUvwav5ja+ii5uKl2qgCtkNRSQUFckZr+1CHfdGbeU4CybU +rV8BllDApj8GdVXJNUspBPCf -----END PRIVATE KEY----- diff --git a/jstests/libs/rs2.pem.digest.sha1 b/jstests/libs/rs2.pem.digest.sha1 index 04f9431cec5a0..d417978e2cec3 100644 --- a/jstests/libs/rs2.pem.digest.sha1 +++ b/jstests/libs/rs2.pem.digest.sha1 @@ -1 +1 @@ -A78F67FACD85FC28B02AF28515C1A115058C2DC3 \ No newline at end of file +D5001B67C0300AE043ABAC72FF5B20562F1845A2 \ No newline at end of file diff --git a/jstests/libs/rs2.pem.digest.sha256 b/jstests/libs/rs2.pem.digest.sha256 index 6017cff83691a..45d2cd2ce7295 100644 --- a/jstests/libs/rs2.pem.digest.sha256 +++ b/jstests/libs/rs2.pem.digest.sha256 @@ -1 +1 @@ -F993BFD896A91AF8FFB082B7F13C20E1F7C693B436A9D257B9C236FC508734C6 \ No newline at end of file +2BCE399B3F536417B055D303378D8614EAAC1F2B285A3EAAE3DE3C9D4479A815 \ No newline at end of file diff --git a/jstests/libs/sbe_assert_error_override.js b/jstests/libs/sbe_assert_error_override.js index fb70a8e2337b3..8f752e3088add 100644 --- a/jstests/libs/sbe_assert_error_override.js +++ b/jstests/libs/sbe_assert_error_override.js @@ -158,7 +158,17 @@ const equivalentErrorCodesList = [ [40393, 5153212], [40394, 5153213], [4940401, 5153214], - [40390, 5153215] + [40390, 5153215], + [5787902, 7548606], + [5787903, 7548606], + [5787908, 7548606], + [ErrorCodes.BadValue, 4938500], + [50700, 5156303], + [50699, 5156302], + [50697, 5156304], + [50698, 5156305], + [5155800, 34473], + [5155801, 34470], ]; // This map is generated based on the contents of 'equivalentErrorCodesList'. This map should _not_ diff --git a/jstests/libs/sbe_explain_helpers.js b/jstests/libs/sbe_explain_helpers.js index a405efc1ae54d..92d27adf5b947 100644 --- a/jstests/libs/sbe_explain_helpers.js +++ b/jstests/libs/sbe_explain_helpers.js @@ -3,10 +3,9 @@ */ // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/sbe_util.js"); +import {getPlanStage, getPlanStages} from "jstests/libs/analyze_plan.js"; -function isIdIndexScan(db, root, expectedParentStageForIxScan) { +export function isIdIndexScan(db, root, expectedParentStageForIxScan) { const parentStage = getPlanStage(root, expectedParentStageForIxScan); if (!parentStage) return false; @@ -27,7 +26,7 @@ function isIdIndexScan(db, root, expectedParentStageForIxScan) { * Returns an empty array if the plan does not have the requested stage. Asserts that agg explain * structure matches expected format. */ -function getSbePlanStages(queryLayerOutput, stage) { +export function getSbePlanStages(queryLayerOutput, stage) { assert(queryLayerOutput); const queryInfo = getQueryInfoAtTopLevelOrFirstStage(queryLayerOutput); // If execution stats are available, then use the execution stats tree. @@ -46,7 +45,7 @@ function getSbePlanStages(queryLayerOutput, stage) { * SBE, then plan information will be in the 'queryPlanner' object. Currently, this supports find * query or pushed-down prefix pipeline stages. */ -function getQueryInfoAtTopLevelOrFirstStage(explainOutputV2) { +export function getQueryInfoAtTopLevelOrFirstStage(explainOutputV2) { if (explainOutputV2.hasOwnProperty("queryPlanner")) { return explainOutputV2; } diff --git a/jstests/libs/sbe_util.js b/jstests/libs/sbe_util.js index 90451a646f764..02caf007d628e 100644 --- a/jstests/libs/sbe_util.js +++ b/jstests/libs/sbe_util.js @@ -12,7 +12,7 @@ load("jstests/libs/fixture_helpers.js"); // For 'isMongos' * If 'checkAllNodes` is true, explicitly checks if feature flags are enabled for all * nodes. */ -function checkSBEEnabled(theDB, featureFlags = [], checkAllNodes = false) { +export function checkSBEEnabled(theDB, featureFlags = [], checkAllNodes = false) { // By default, we find that SBE is enabled. If, for any node, we find that the classic engine is // on, `checkResult` will be set to false. This is done intentionally so that in the event that // we check all nodes, the effects from previously visited nodes will carry over into the rest. diff --git a/jstests/libs/server-intermediate-ca.pem b/jstests/libs/server-intermediate-ca.pem index b9665630db88f..b591a9a91bce1 100644 --- a/jstests/libs/server-intermediate-ca.pem +++ b/jstests/libs/server-intermediate-ca.pem @@ -3,74 +3,74 @@ # # Server certificate signed by intermediate CA, including intermediate CA in bundle. -----BEGIN CERTIFICATE----- -MIIDrTCCApWgAwIBAgIEUjJhqTANBgkqhkiG9w0BAQsFADB1MQswCQYDVQQGEwJV +MIIDrTCCApWgAwIBAgIEH05N8TANBgkqhkiG9w0BAQsFADB1MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEYMBYGA1UEAwwPSW50ZXJt -ZWRpYXRlIENBMB4XDTIyMDEyNzIxNTk0OFoXDTI0MDQzMDIxNTk0OFowfTELMAkG +ZWRpYXRlIENBMB4XDTIzMDYwOTE0Mjg0N1oXDTI1MDkxMDE0Mjg0N1owfTELMAkG A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD aXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxIDAeBgNVBAMM F1NlcnZlciBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A -MIIBCgKCAQEAszRjtWB25WU9jQus2tifRDECbm0S2dr/+JNDN+af1gyTlun3gHVw -tWTwtln1CnKoifoiAnN69RW43Z7Y3/s6OC8bH71QWUpwPAEoIkQqj4eTBTCu0vbR -lHD5asmr3zrtcyS4Lp26CD1zFl+oOOW3rRZpPh6qy4/OsYUUiNEnSm1rIRRLiMMz -LJLRDBQTU+TJW2HSM4UzwlqGIblWGmHwtu7l0bv+n3cyKr+xbm5z9jTsi7Siox3P -mwanUR+f9EjWcmRG7wCRqeZ9q5EX4nLr6AGo/1WWrn7kH9BXcY4GFjISbZsYJvGQ -4w37AHboxmIeQlkGsr+GKYAi3zhuNFB/7wIDAQABoz0wOzAdBgNVHSUEFjAUBggr +MIIBCgKCAQEAykbosPvg/KsB67glkMfKYwnpaE8wH66C9HHe98wA4hhYT08w57Dy +vSu8R+to2YwkFPXQPsBrDjOeQ6DrOUumelvy7THcNBNlB1Zzdix2XS+on3dgFUXw +lMqMHECwgOsMmFRq6fhkS5jINGW2mKBNqmfGmrqFd7YyN+dOzdDdJZufCTNvxQsv +GJbPbXkFIcq2oEgZgHpAAYQJSV8cKeCVMGvgexPKPEBasmHww0ouVEBFPKckE85W +i+fKy81KhYtcFJluSZbfcaaOs65Ka5Efbyh/BHMgBPuMdYBZKRvFCYJ9xlXA0tEo +u2XAXDBqKpvqnNkoI2uIbeuWUIf1r/UuQwIDAQABoz0wOzAdBgNVHSUEFjAUBggr BgEFBQcDAQYIKwYBBQUHAwIwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0G -CSqGSIb3DQEBCwUAA4IBAQA0u5Ryv/jMBbnsab4pCwVkY/r+NhY9Cev+IQbOP/op -ig72quZP16DLP0NwbSo/nUPlF9UFO1q1qVTLukOqANmbmYhfkbJJyQl8MBiaV3vg -kz89dwBYTgiQuiKm2oU9vGDV6uEOCSeG6e+vs/pdcqaKXif2Zt4Vxv2vbxKWOQRI -hMBvgCJz5I8JAcH4fTHdrj0cwvNPphnZ0gzDIBL5E2wiluna/fh4M6lnLiztDraf -s0ddchZ2lRHsoz1JbvCsAg9xUC0+RqQA+yMhfNqf8CxiMtBAIaEc8hNOvuSCbER4 -jZ0vAcXWXtANizGV5XKczpPZCQvNetnDGcC3+6dPauAD +CSqGSIb3DQEBCwUAA4IBAQBmbBYSYbX313xvzXXwZKW2ee+5/fs8LKNSkuxqoDv0 +gEoOPnx7UkC0skVAsYI+EvRuJ/yMxg1+kRfNvo3lLffXaIRXnLFSQ00EMBE2kxm0 +p2mRv+OZd7eaPuXjNaO9kSfWR1PdgZdOjYnGqhekZMSQbsC1KTlVOk3UozUvGV5H +OTJPhkdRAYNC7rkdu9xjdHD+kwsgdpwz8QVbTsPaUZcopNWmiJJC01XUsOmPDWdJ +kwlorBzehDSOqLKfqZYpjuyWeaPMyBYs+0pxNoNs+M4476zqGY4ny/nCmDmj9VE+ +AhHjVyna2Ur8ifUn0OWcWe/bvgGxLFTSbntQMkIhikwT -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCzNGO1YHblZT2N -C6za2J9EMQJubRLZ2v/4k0M35p/WDJOW6feAdXC1ZPC2WfUKcqiJ+iICc3r1Fbjd -ntjf+zo4LxsfvVBZSnA8ASgiRCqPh5MFMK7S9tGUcPlqyavfOu1zJLgunboIPXMW -X6g45betFmk+HqrLj86xhRSI0SdKbWshFEuIwzMsktEMFBNT5MlbYdIzhTPCWoYh -uVYaYfC27uXRu/6fdzIqv7FubnP2NOyLtKKjHc+bBqdRH5/0SNZyZEbvAJGp5n2r -kRficuvoAaj/VZaufuQf0FdxjgYWMhJtmxgm8ZDjDfsAdujGYh5CWQayv4YpgCLf -OG40UH/vAgMBAAECggEAYdyROsJkC1+fHkAq9BPp3aZZ6o28BqZWtJtO0N8rmAuk -KXy5QThhBV4LKbm0XDFDIp4rJmWD1wU2wCf3zVD3eSkYSdvrXeeNpOcI0LWi/GCQ -A/yS6/gHeWW8fvmE11Hpy8BYqHRP3CIakDRKvE+OX4JmlJrQsHtm54CCNzjomPZ+ -ijiL3RTXCyc4Jitt465MqI3AfKxPOIxIkblHuVU7C64qZVPHJE6qqeFuji7psiBt -7N1cbcI0nj4ntCXhMOaufSPq4BXeRINEX1lrwyVXJZJhz3MEXF5+c2pt4Y42SJFM -YEiBNMXzTuAfanTRdj3y1/Sut3YWRYLOA0l6qEZRoQKBgQDueaif5QiS8yWLWPiZ -Nla8xL62bzxpxc+74U/OJ/RVKzei81+DKWhHo7uGsiJugTkaoBWogknaFo9rl4va -KaOIltRsnNUrvjZi1CxhYovJEhkgVPBL97YO3p0XMl7m72qsDTfL8OJHZgF2sQpd -0/wnd9vgjdFQjaHr0fnjgx6K3wKBgQDAX7JMhz9rzL3ig0rUZni5Ol4AsIAgKtNp -+pXa7p+BJQEAVa4H2mquwLKa2t9yPXQpjNWGJebu8Mr+6FqaZyBejAY/793/7+Ww -ZlkqxnssV7QbRK5EuzaY/4du09Z5quYCFjgKL8kO14vIFMR2GGgWfnDw7LWEMBXl -zpDdz+y88QKBgQDIoq6OKAG4sLzrqiUtshvzoYvarWekjfqiVYPxLIhSh5O9kwjO -ry2+6DBDuOdjFXFXx0uxhDxiMgzkNpJBMDsYFd4OqzxmGxhiuaPFI0X8Gy/slcm+ -AGC5ze1YsNZLcS7MJirFeJrH+zhMBdN08X76Lctd97MdFKwgXX7iPVSa/QKBgBlC -98jw4bUoN68tCoCFzEiWj42Ln+eZeBWsoE0VlMCCuXTvy2Cgn+2+xDVtlHYN99kU -810gUQkDecSrgmpQLIH95Tw0JXTcbc8SQZdKZRZXlgEWrcO8ydihhYlaLAniFT4k -6Mr9p2tZhrOJdpsDbXe2tuIMU3G+VsHMtF1MVowhAoGBAN2iciuWzfnXI92yGfxW -9MV0Eh8A3OeFWPel2qWKaQ75ehBO8EbBGQM8iLY7IXHVQkv9Iav9e5JpHhbFf6eP -i9uPz5v91idOfmubGhVmlQPG+iA3yRGnys1opU+daEVyMaUTWK2z+jC6gPlMK/UP -HQYfaJnYlK3rMz5btAv8FXNz +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDKRuiw++D8qwHr +uCWQx8pjCeloTzAfroL0cd73zADiGFhPTzDnsPK9K7xH62jZjCQU9dA+wGsOM55D +oOs5S6Z6W/LtMdw0E2UHVnN2LHZdL6ifd2AVRfCUyowcQLCA6wyYVGrp+GRLmMg0 +ZbaYoE2qZ8aauoV3tjI3507N0N0lm58JM2/FCy8Yls9teQUhyragSBmAekABhAlJ +Xxwp4JUwa+B7E8o8QFqyYfDDSi5UQEU8pyQTzlaL58rLzUqFi1wUmW5Jlt9xpo6z +rkprkR9vKH8EcyAE+4x1gFkpG8UJgn3GVcDS0Si7ZcBcMGoqm+qc2Sgja4ht65ZQ +h/Wv9S5DAgMBAAECggEABXwQuSPLQO6eGbcfhaJ1MWRGaOakxmcj897Wjd+BMqA4 +XMYn7FBW9Jwn+cc/S47KN6tGnzz2aicqJmlJl38en3i4yIeukdboyV8luFGSUAqH +xvMkrs77q/0l+WojnwtAhyzazUdH6OjWnN0ZK2BFWtZ/gxiFn+5XxD5IW7pLFMbs +B9a35zozJlmflj/skcyOIAn0ghvJuabwI768snnFW/HCiN9AhmkProMdN2bVTXmQ +oDAh1ZTisWk4s5v9K9g67eLTJDrioS88c7X3Io6TWzVlqE9sczUQ4PYt+pMH6Asx +yY3712U6G4P/jYQGJufcNv1ys10L5Kqz4ry4y1ohaQKBgQD/Xcj/9whM3GPkouTC +Fn/mBVh0BjDyvIS0BokoMviGOtNutXD/3nDgzJypJMad2h0u7yYzI+K2WEhXZKDj +tm7dCEorgN6FcHvP/R8DaGhcNcCkGScR0TAoAKf91791eIodRLHv14sr+E52PRY8 +2PcoG9f6mNow1y0nmRbqkDSKLwKBgQDKx2ZwrUtLCKgxi2fqoRmOZQuEC79QOz/n +Phl0cCPeTpv6G9D6PnbdN+p8tEKicMrDEeV+csUfFBXauHg12gCUpAsddr6RBs4F +T8hZO6xy3j/TLpAWd5LoRSbhMBSkaJ43xdCQGTqIrzHubGGJPcLaAf1VL45shPAy +3faDZ/BcLQKBgQCKcoNF2t2CJj52N6ZEX8Rppd5F0RF6mKBtpdl4lOHOjFgS/oXM +AwI5rlUFEu4nqJgH2RphwYPpjkVyNBlSO5cxeIwVt+FsgQZvRfEpfNKPo4jUrtpS +u5IbLffmFLE1c+uVYKgDu101soJ/cjD7PjoJYccfkv1AW3icrlARTateewKBgE2y +P28XB011wLRTHPsKxTcJQnNS6Pf968tXwPsbaLLqXdtkQxtPSc4TebZUf1+sZhR+ +S/e6VAtHb+RBYFYJ9MT/Yf7lG7mH6PKiEjsWoUnWHJB3O+BP6qsyq+YGvTINn+nw +0qwT7pimwDQBtRGX54wOzRmRwjmUalCjOaw57B/1AoGBAJZf1KOwvz3JZAC6GU78 +9k+vXNaGBRF+mSFFZ6k/p50uLFgI6H7Q0ueFIezbatN+DOZCXpomCv3hadp6iAE5 +YUnfESTXw4mvB+aCQzX4gfkMw116+xpYirWJdZ59u2xNJPD4Q9yDOekHDqNl98M/ +tt3x6rtgvMgDCQ0lH9rQk9jO -----END PRIVATE KEY----- # Certificate from intermediate-ca.pem -----BEGIN CERTIFICATE----- -MIIDdzCCAl+gAwIBAgIERt21mjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDdzCCAl+gAwIBAgIEe6nR6DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ3WhcNMjQwNDMwMjE1OTQ3WjB1MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ2WhcNMjUwOTEwMTQyODQ2WjB1MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEYMBYGA1UEAwwP SW50ZXJtZWRpYXRlIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA -uLEuQhQ7IQImAIKUMANKaR+4/WLf0F03p5RDlv/TCNdEXTLws03x7bppdv4Y8r6+ -oChy+8rtyA2ckblb0z0OeMlsJY5a04eUhrZYeG4OXn1QuvUqfXl++oBlHnWUD3xG -3v9oPKMxGf9nr6JJXBCeG3owLR9Lbr3QS6Pvz9WwNZGpUVDm/QQcKvbGHmB9fE7/ -RM6IgxtagZlug5WUCTT08tsLfb89UQchCAjO9eZvDcENofXcnsJWImJdTYDlquMM -DB54R9cqoLtDV9NiPVYsjCQ1BgXYMxeG0K/T1rWQY4uB132Y3oFy3RxaDT1BqdAO -O6BOo1AZNyIYIPbt0+Rp3QIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3 -DQEBCwUAA4IBAQCSOAjf6Qh0JnAOK4TqPYt2VG8HEmumxKk/HvIKrk9gbBc6RoVi -Z1M/gxRz82lnmJvIPbsJk4AmTsw8Pnech8Ujeahw0ybRJbLs7FA0YrHg1B7517Tr -eZl6TZn7o5+3HKKcCPpoh+P1XrNWP21NsY75T8sTN0BQ3im3aZDApgO4v0YFqWl0 -20YOdrLk81cJ8Znjdh+/ieR4uPH06CbXjAGPAbB+mnEWMNLlV2WGsJtDCHYM+wU0 -zd0wy2KvqMBbr014v/c4jmyeCBcmgxQ9Q8ATWbys7S5p0CFB6+zeV/2Mb32lwSCM -+Xeg/ms5ZGQJY5fIznwIg+Osg1zGvMF2Rsq9 +12Tv7dcfDmz2/A2bquC4GIPqMHHf1l1cRK8mOydwJRFmzbc4MEFgCmvhURLAE6ie +B4ghfCKpZqD2kO/GtDBK7isMxur14NbKKKFXnwPreSBknSTccJ+8iIvxK+wni+w0 +Ox/Avr4byocV0O6WJ6JEvvcyNbBk+IWsTfNbLZ32/A6WtraE5q2vIZpN2bNEtJe9 +JVu56wI95zcAZmnz3S1RtLVvT8XqmHnCUTpN+5oJWRBTr9pScQNjicpKo+GST03Y +j5KaI8B4cdNecldgqbmebcL0m3RGfBKgv8AEqsjdqg3hvD0rXUpeiGKoXMjyzmlv +OCsQtgP6azneIZRt6MQMsQIDAQABoxAwDjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3 +DQEBCwUAA4IBAQBWWBPrsmerwScxU3y1IVGIOoI3hEBCS9t+BzYHpxuvaSHjyYiZ +e1MLgxt4FAbHu6WMB5T1rkJgrGUluCdctxXMg7Ak6d+hVbuBAzAV44rEw/yVGLGV +7FvMOxYh9e+HFTq1iI8kSmgDCKsTww6kfE4fs+FI3fCXwhfy3zLlAlBYoqV67bVF ++Yd1E75kBNcAuyY6Zic1N1BI6f23npvY3plQp2qWjhdGEUb76CZSXrEZ3P9q817O +D27YiPP6uhy5ypVnna2jmTnJ5M2EZ01Sv0w94pz5jUXSi49FRATMc73wYl8bSvw+ +swyDhMJMHUeTPr1deiB8SVdzVsOZCd5LQeuz -----END CERTIFICATE----- diff --git a/jstests/libs/server-intermediate-ca.pem.digest.sha1 b/jstests/libs/server-intermediate-ca.pem.digest.sha1 index 2bdaf850a79e5..21672d91b5fa5 100644 --- a/jstests/libs/server-intermediate-ca.pem.digest.sha1 +++ b/jstests/libs/server-intermediate-ca.pem.digest.sha1 @@ -1 +1 @@ -263036BD7986055530468D3980687114823C2687 \ No newline at end of file +6FE8D7E03541D54ADB167B0F3F7C0C4062BBBBAA \ No newline at end of file diff --git a/jstests/libs/server-intermediate-ca.pem.digest.sha256 b/jstests/libs/server-intermediate-ca.pem.digest.sha256 index 077a772ea559d..61024d569dd3d 100644 --- a/jstests/libs/server-intermediate-ca.pem.digest.sha256 +++ b/jstests/libs/server-intermediate-ca.pem.digest.sha256 @@ -1 +1 @@ -57E7FB1DC241D6A3D88F71929D13EB828ED62F05307FC34A88CA712CD54D26EB \ No newline at end of file +EDBCB6ACD21542C3E8E1AD2A5B9F68049D8C6804038A8E4313587B56E4255973 \ No newline at end of file diff --git a/jstests/libs/server-intermediate-leaf.pem b/jstests/libs/server-intermediate-leaf.pem index 37913546525e1..58e088f7f3412 100644 --- a/jstests/libs/server-intermediate-leaf.pem +++ b/jstests/libs/server-intermediate-leaf.pem @@ -3,52 +3,52 @@ # # Server certificate signed by intermediate CA. -----BEGIN CERTIFICATE----- -MIIDqTCCApGgAwIBAgIEJCpSyjANBgkqhkiG9w0BAQsFADB1MQswCQYDVQQGEwJV +MIIDqTCCApGgAwIBAgIELD7RAjANBgkqhkiG9w0BAQsFADB1MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEYMBYGA1UEAwwPSW50ZXJt -ZWRpYXRlIENBMB4XDTIyMDEyNzIxNTk0OFoXDTI0MDQzMDIxNTk0OFowgYIxCzAJ +ZWRpYXRlIENBMB4XDTIzMDYwOTE0Mjg0N1oXDTI1MDkxMDE0Mjg0N1owgYIxCzAJ BgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsg Q2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVsMSUwIwYDVQQD DBxTZXJ2ZXIgTGVhZiBWaWEgSW50ZXJtZWRpYXRlMIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEAq6glhf+rRBoVKBl15cZ2HeE6K5tY0R8Ore5T/rat1IYY -Lte50nw3LassVz/OfDWOd8AMEnReaz0iVy/YWWQQxZqgAnRYThfjEpN9IfyplzKf -Xajg887hr28Sh57JLGbpBpfC3sODvPbAk12zdJlbtHNp94hYdatVPQiPaL94F9a5 -auQaKJQS/6W9GtfnQq20XMPFj5DEoybe4aqDkEXZBfa/Sf07qw0WcVvf6tzDYcH7 -6DFnUaZG4+NNkEy70Ckpmk4XyX+2DSdHd974AZfnb7aDXx9IkXExTPwTm4/6LvnF -LOMTiCUJaU9kpibdLfmxdQqODt8cq7hCYhljd3Fc7wIDAQABozMwMTATBgNVHSUE +AAOCAQ8AMIIBCgKCAQEAvuD+b/PUsswTUiFXvf+FQfZR/hPu0ix8QFJZ49hkvXoe +JmjjO5/7XgP/tQXletqobTb6Q38PlIxvIpwnqGL1DSqEGQ/iwoSsroQbF/dtqKRj +hKlNuOjH2sM/l9/aWl0oJPKFVBUfRlFIgwihwzyPNFbcqW3hlUQYG0VgG1Gr4QWy +IQvI1BigbOTw64jgqjOotBxIkVb8aavfO+PpghiCwlihfuq5hOGTQcKvZIwS9JY7 +FKqK4j3rQ2rGNp1FmpJ9zjSb/GLxq2xDsGVpa21eIj3hHzbk4/TLTNdABXZI3Prx +VpHQWUB71QCJiu78Ohha52K1yzX+7wmWFGlFmZtOjwIDAQABozMwMTATBgNVHSUE DDAKBggrBgEFBQcDATAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZI -hvcNAQELBQADggEBAJQpLZw+YSScjyL9tNlaoRMhevxoUvTJUiC7UWoLyNdIJxsu -MgNYA+sHUgtCWu1m1cntLNkW03eZ5/HaiR9S9USyKmLkajG92cWYopvS924MsEq2 -D+bWlQckt2AyMaoCD4uT8Lo8RLlb95RmKeVsi/cbEFJMk7U2BxO0PaFOeEeNtjeY -LG6LU90paMrWqw8n2zgiWaHFH7cG6fFKHadygdvzxvMFJu8C7djyYP/MjbjaodOS -ayMZjZ7h1pnCHWZtO0Tb1onjbpykE6R6VFzCMNPWztRPntiRhhVIelWQfCj9PBTx -ohpjzEZ9UGtqpEz8hJbDU4RF6aD0XQ9zSB/EhuM= +hvcNAQELBQADggEBAEVPHstyEbJ306Rfe4EyxtWpM1GnhAaAtaCx19m6GwxGjxQh +808Kuj8DtAzUVkD3pdVFyjDoB24Xz6nb6VlfUSOlNiqRlU+zDtsOvcpE/ipPbucS +aMCpC2ObZ5iZvTHLSV1Qair5dURCJRxfpS8T1lvoRsIFs/oJ/uqUC5Ifk3LGy1GQ +gd9o8DgWx2jQPOxjL6BZZFdcIkZRpFQlCspbrYt1kPauT6Eh9szl7O0TdiHPhBDo ++N+21WlnkbANi7yOvJUyeNkwmcrRSJw+rA7wSak7G5G+z2Yw1NbPPGX+0nigAG56 +dqe7mQvKTkfin+K9A1VZPshG1Ecf+O9nRrFHbZ4= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCrqCWF/6tEGhUo -GXXlxnYd4Torm1jRHw6t7lP+tq3Uhhgu17nSfDctqyxXP858NY53wAwSdF5rPSJX -L9hZZBDFmqACdFhOF+MSk30h/KmXMp9dqODzzuGvbxKHnsksZukGl8Lew4O89sCT -XbN0mVu0c2n3iFh1q1U9CI9ov3gX1rlq5BoolBL/pb0a1+dCrbRcw8WPkMSjJt7h -qoOQRdkF9r9J/TurDRZxW9/q3MNhwfvoMWdRpkbj402QTLvQKSmaThfJf7YNJ0d3 -3vgBl+dvtoNfH0iRcTFM/BObj/ou+cUs4xOIJQlpT2SmJt0t+bF1Co4O3xyruEJi -GWN3cVzvAgMBAAECggEAZ3RduRbP16mImrRNlAA6a+O0NVfY/aAkLrt2sArVVULE -DGdDvRHUB5gkjykLf1yf0phSLkUoKqWbrsRNNgLTAOUiDpikJ9zJpAZz4inu5AtE -dSQ0/3vuFNdyaX5PbI1RYAHTFoLrQNXOVoKgh2NuG6F7eg4YkkKCitg/5jePX32z -SWqIJEixlX4+Gr1t84wA5b6uoz6a3MT71QQc9pJt4l++5rCCsrT31tK9lxqq0wAh -Ai38ZLEJlopz/4zexhv9JAI5YpQmCpLuRnXHny++AzaZP2IeUw7UeDkDSgaVCDbZ -gEyD9FDAmOhENZxDVYXyQduNEPgKUkMFoCVpkXO/UQKBgQDY78jBAiqCIXtWITzX -ctZW7ZUL8dyKoIEOEUwn6z3MEOgOM1thmbYFIwZER6LGVTeh8H7WUIC6HyRkevbu -LHSXk1i0xRgZKBNZTcYNWsAnHA+w+XZsfqMkpguy0j4I2Kqcmko1cHjOWiDA0uMX -oLQhCYTWpZNNQ/ZCofWGdoPehQKBgQDKkRRqg8bzLPQ1N8cavv11nIC2J8zk1ix7 -KBMO0sfbQMz9FfZEzrof99f4kW1ViM63cIDWfjbdv8fgjcqviKW1A3MkRcqhepvq -k+X8VutD6u9lnUldkCIxeOYdYv5W66ctS+yrbc4imdahCgDJklhFTXM9F5J8wNdz -92Sttczp4wKBgQCDwb7wWtxuhN9gBHfrcvfzfADWbGNCXxGC9caHSD0UQABw/4g0 -0yLWI9uehNO8Ge3cETE4AVTtthRoYravGZkGppttz3c/cDOwPahYXBHwrv5owlrG -snNrwt3AEilTPiUBUERaCKGW0u6prmv24cgFeL6Rj6bTer+K4Ms6i6iz8QKBgBn7 -XdkDqgMK4w/oZZtin/Pz7PZwSHGZnv709uzR0FypBSH5LStKMMlk6sixLoroD0us -HHpmmfb7YFHHHhZBuq9rD9u5L/JiXZlK+xbQt0Bw49/uurhLgndCAJIIXoMbmsfO -kz6xyNzbSpJBTDSOls/czUkqutlRitEPDFTTmsLtAoGAOAyfMbNEvQk0y5sVRMqL -d0YqYZN0kEHO20roTFDrMrpXsh3KfbLZP4izBz1B3NOEzdNuCCMVVfceTNmfms8A -2/dh7FvDF6HJOMFQl2uvSHlouR9eEgm6zSRBORWkzG0f45qpvJMZBBpJzmB2wgVx -rXc+yJjRTig/EqqVtQ2zBIQ= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC+4P5v89SyzBNS +IVe9/4VB9lH+E+7SLHxAUlnj2GS9eh4maOM7n/teA/+1BeV62qhtNvpDfw+UjG8i +nCeoYvUNKoQZD+LChKyuhBsX922opGOEqU246Mfawz+X39paXSgk8oVUFR9GUUiD +CKHDPI80VtypbeGVRBgbRWAbUavhBbIhC8jUGKBs5PDriOCqM6i0HEiRVvxpq987 +4+mCGILCWKF+6rmE4ZNBwq9kjBL0ljsUqoriPetDasY2nUWakn3ONJv8YvGrbEOw +ZWlrbV4iPeEfNuTj9MtM10AFdkjc+vFWkdBZQHvVAImK7vw6GFrnYrXLNf7vCZYU +aUWZm06PAgMBAAECggEAJD2dsW3PFX9vr6lK+nT4PRTibmYkct0lXiUEkiD0x5DX +Bp3lft2aITiLJTiQYGoBjnLgw03tjFu5gg257duUAULwP73nZN1B2ASXDE7bECje +CEMI0bHIuD1X5qMG1x9WzuUI8XTtMjGendpWtDXcTqirTrPhH7EFDrB2VdmE070o +J/ls6xYUkaYYNR0CthugxIEnVj/8n56ecGTOi0l+QCo3vpW64QmKzz1R2OtqoF0t +wWpvJT8mhJhJDREZnSYGvEojz5GGevpvmoPZYXd2WxvE6rFhA3nwmeuZn27Nk9Ao +zxUdAH+eTqBA3SMwzqX3tjcWO1OdVi0PAUzv2BnHSQKBgQDgyxib7h1SvFcYF8aX +sDpIceS7ySbmb9f65dKbHNqrqJXwGGpDxD0Uz3ehqjfSo6gZzteRQsVfI3VizAlM +LHmzdXXxmuuHIGFWAWAV6GyBD/Xea9pzL/LQ7n9ldq5HUusFMXx8Q6GuUeb08MJj +lJhNEHfG99B/6iRfamyxOY3eXQKBgQDZYJ2htLpb6WgOS1RXmh0dg7R/e6WGFzkC +04rRBEbCGg86vBRAsopzXJXL6EPXuS6GB/RqpDekTednypKFKY1idoSFmcpo8tbd +XqEszk6Iityiocj03T0VwVHVw2vAuBdbfEGUvEFm0rrJbYHgOeIZ+WyUdQHIJIzi +69L0rjMZ2wKBgQClnl0CPXxTQbo1YQcLKWa3i1pH1JsZelu5WexCJg8iG/JkU8iv +Jv0NwRFWBdBdHAC1CwUd3AEI6FoLMWLEQZxk0OzV1hsjkoLEV/0QBw8yQ60Vc4ce +CLywnJc2DSu4FupbCCu8biPICEXOPzgpIAjZ9oEZEeoG5F5qAkIUjN/ubQKBgBRG +p81qjyt9lQAoVKCrNmYHX/G4NdNRHTc/RiaC8JqwVZVT78utG67xBuPzMUjQ9B76 +ZPkBglMoFRqgUZRsMMzrYycXFkM9y37wkbYdNEQWN8hPsO/uJwF9e8WlqcpbIYqe +B3v5J1yQ8W1ScpfYrldf5ZnfZCAoEKAmARJjoU6bAoGACQujARGIN3bPWrCgj2V4 +h3GahvEmLL07kuvy2cSOYfkLpWp4qgR2NEwiRe5kUw0MtHuBe5qilTXn7OnJaYe0 +TkNfahlL73dCfDjNQ5CMXj64P/SP0YoH/snPx7mejJZO1U/4UKB9LbDpJCoMFdyy +9YewNWEeUhMiQzhMt2PEeY0= -----END PRIVATE KEY----- diff --git a/jstests/libs/server-intermediate-leaf.pem.digest.sha1 b/jstests/libs/server-intermediate-leaf.pem.digest.sha1 index eaa87a3e7ae40..bc662d3bb4f12 100644 --- a/jstests/libs/server-intermediate-leaf.pem.digest.sha1 +++ b/jstests/libs/server-intermediate-leaf.pem.digest.sha1 @@ -1 +1 @@ -AEC6D0D178627DADF56597264DA6C674498197EA \ No newline at end of file +DBCA9478EF39949988842F7C598633BC51780B0A \ No newline at end of file diff --git a/jstests/libs/server-intermediate-leaf.pem.digest.sha256 b/jstests/libs/server-intermediate-leaf.pem.digest.sha256 index 97242f0e438b3..38c57828077d9 100644 --- a/jstests/libs/server-intermediate-leaf.pem.digest.sha256 +++ b/jstests/libs/server-intermediate-leaf.pem.digest.sha256 @@ -1 +1 @@ -B87926861C8DD6C66214A91FF96A2403793604E78799FE55ADE7ECC90EAD3C33 \ No newline at end of file +32C062F812222E0949A3B03FD347D30D479575FA7691AF6982AB520805EE99D3 \ No newline at end of file diff --git a/jstests/libs/server.pem b/jstests/libs/server.pem index 00def9a75d539..4cfd885c56692 100644 --- a/jstests/libs/server.pem +++ b/jstests/libs/server.pem @@ -3,56 +3,56 @@ # # General purpose server certificate file. -----BEGIN CERTIFICATE----- -MIIEZDCCA0ygAwIBAgIEJXfWyjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIEZDCCA0ygAwIBAgIEVx6CDzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjBsMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQwWhcNMjUwOTEwMTQyODQwWjBsMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UEAwwG -c2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAmimB7418/JLt -T3GBjWsiyO5RRApaTYbUMDibwHCJu7uS8WXIsXmmM0kG/KeiDCGJsnqJLDmgmmDf -W1wZf9cqnUb5kg10J7UznPIyqG+LxJJYQTQmUNmHNcQienS4iI/lOrJ+9oeTHE8l -hwL1trua7XY0cTQHbED4HEMRJ2MRrRZkLyFF/oIOLrM04ya+y5d9XrLlBf+8O2It -cA93L0ZMdC1dJdWQVibLBEKT6AmeYa3BHr4o2jQbm/N7iW9wcKEZaf4oe1XHy5a7 -YkjzMaGIbtQA7y7W7hENOgVHR1jpHgbkIkiZWJiCfEJ5Voz7Fc0wfDTyh/cHkWSf -pd0vxKgJXwIDAQABo4IBBDCCAQAwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYD -VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBT4ez2+qTaostXv -MIza0HCaaXjqlzCBiwYDVR0jBIGDMIGAoXikdjB0MQswCQYDVQQGEwJVUzERMA8G +c2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvqWriorxaktd +7eceGLGDiCJ9OM/5ogFclcMhx3+vj+Suafrxmg8viGHDh2Tmg4/2oOcjSO5xZck8 +vVEIlmThtNdULxpV+GVm4eUUn5OlqBVHsqIIwXkD/nQRikXIhrlmtn+F064DzT6J +MsWkDTZFup2tj48nDamWdBKWInhApuMlGMg/FMRZnC9g+PvDZYHEUku3glFvXi7/ +sGUr1rD2lXe00B/VE648pEENaP+XwcvnyvWOenWEgwWQxSgAggBirnDBg/px+HPV +isBILw+lDRGM6watcDLBYn2aokA1yC7saNVz1tQ+adUjFyF3kAYiVKjMUu8jNc3z +uOekpDAEnQIDAQABo4IBBDCCAQAwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYD +VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSNsQSAJ0uVz49t +fctZ1tsVa5gLXTCBiwYDVR0jBIGDMIGAoXikdjB0MQswCQYDVQQGEwJVUzERMA8G A1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoM B01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3Qg -Q0GCBHvUrJMwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB -CwUAA4IBAQDT97jbSaovQZeFMBS6yvXEshb5WuE+417V7SB8YEkfSSmYzqhAIyD9 -28iQAhUqSiU8PGTTEU2g1BXAQ8E41IWhLWAhD/COhZ0p+c0pfN6tv0mIJ1lafx1i -m5sVAanO+RKtXn8CsmI9OZA0Zc3d4+pMe7VJWNSeoKTFpsRuhKSHgT9UjsTdFtMV -PoOcjm3cArmzNqJ7IRtSFm/MkYl0tm3NGqXJin279h/r3OalmKthWYXc6d5Z/Hka -Nm3m/STSuaaL38Y9WudlpEXlvRGJ+VLLLJaDOQOZiOm+2uYbp9GDcbXxGHBhenqz -kwdit0IT5BIkXpnF2sNGJbEPxiTx9JwO +Q0GCBEreWhowGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMA0GCSqGSIb3DQEB +CwUAA4IBAQA0x3Fe/xsvpigB81Tfj/ysBpTagXKUrv4DV1WM4dGJ+ffRU0iwF/vK +tSowhGIWnpiQL8B+rhc11Rk22Vm2J0CrYIfQxzy7lTt8HjwupKlRcBId9Qo/ULKv +YS8I7kFu55xDzi44btW5c/LsNa6gP5lfwGh3gcufSkwvGCIa6uLk3wHMpFQ0AxPC +i6Cv+XdV/ul8CjfjCzPDo8hyyCkARLT0/M0OPgOERNbwgXUsk24ld1lq8pynaYOl +4h2lS67X9V2QhEfNhzN0LvYXUXSPcPv4vORR0QhUPMrB61WtzQWLrDIWSsNUMdhx +GrRUxoL1e3OBeA9JLp1V0zVrr/D0wy0B -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCaKYHvjXz8ku1P -cYGNayLI7lFEClpNhtQwOJvAcIm7u5LxZcixeaYzSQb8p6IMIYmyeoksOaCaYN9b -XBl/1yqdRvmSDXQntTOc8jKob4vEklhBNCZQ2Yc1xCJ6dLiIj+U6sn72h5McTyWH -AvW2u5rtdjRxNAdsQPgcQxEnYxGtFmQvIUX+gg4uszTjJr7Ll31esuUF/7w7Yi1w -D3cvRkx0LV0l1ZBWJssEQpPoCZ5hrcEevijaNBub83uJb3BwoRlp/ih7VcfLlrti -SPMxoYhu1ADvLtbuEQ06BUdHWOkeBuQiSJlYmIJ8QnlWjPsVzTB8NPKH9weRZJ+l -3S/EqAlfAgMBAAECggEAMVPaubYCKicxO9xL4d5IERcAdc8COhBMUOmsHJVl3OYW -DJvO7crI/Vv+mlVhKpSIpM1EmxwNYZhhn7AGkLtebVjKfotDgXkffLaasv6OMGXL -S4HOSMxx0ShjB9/VNA0nAfMfYO7ciZBhp6Owz7mTxyuteBN31JGtUg27rWirvUAP -yHE+Y7wTxs3GmdgHeLPcCegh09jZ/yboHAgEZqiuXrwfqPT8kmPVXZ7ZH/38Quar -jeuaii7SpZUwpJMMd6C5pPs3lcu5E/czQv+XJrXuNbO9yZ3ePk7c+MLGuWffAVd4 -77Gd51epWhQc90RlppxScD7GlcWR8OZyVxgSAVRTkQKBgQDGyCjOCd7LDkMhI/ES -5gmUK+kbBiDDac/hGDhHZWAMLoVwJ8gMDJu2oU3D8S5Cq2F2nPxCMR75GP/JmU1S -WtLSa0nR3NMvm4oKMRG628YiSaqN/ZiEHuURw1OMTpgXLq4kB17CsWcKUUQsfB5G -t9C8d55dqjMp1/sCqiumNNV2IwKBgQDGiWimL/AnVKsx9DVviCXEsxdixQVCBGsr -LMs990y2NStTslTXVUhH6R5iJ/GMlO9C24UymKiSZ4KyIt+F2B3n/xvnRUdZ253B -jGX/vF2rr5mrnuUddWGbtZevWPf+rKMFHUynfpITCm3eKNCmUNOLOv8L9cOTndFj -yFFNF1ONlQKBgBDxapj1OmowUwR2Hcwwk0xv2bSV9yDw2ekjuVhMib1AEduXaHOu -d28/nHNBEWJZXTtQ8idqLGuq99JlILQOTb3lqysaGV4Lcd6ghBRbOy4c/U6Q+Pj5 -8Shsb6ib4lbHgcxMXIVXvaKhfqAUDDiW3lHSGEt/gFDmudxmSMYn5rHlAoGBAKrN -gGipCVTs9LNvQ6GGYefNo6rnkVRwdLqaOOhAn5CKQZIp8++lgR/MoMUVVCv73bOJ -JBiZWT1LLBeRBPgjK+dxhFH9OhtGY3wmC4JuTa/szKnnc3laiPTqz2qdgg1p/H3j -7RSUQZt793WPa6IKG1THFrD1aKLQvVm49qj6DnnZAoGBAIfF/1/x+Cy/i5mAj145 -+uNgGkY/aqwNglmJbQNN+0rB9Lmc79ZpMLBp081Jd4yhySlwKw44maZkUuyHQ6Jg -P0v0i/eyowYQviRJHEu4DRgCmHYLMqa5k9ms5orNAKLp+KmmzUEdRUIExuJ5HaF/ -cXPuG2QD/nsf3ha+/PnjVWwo +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC+pauKivFqS13t +5x4YsYOIIn04z/miAVyVwyHHf6+P5K5p+vGaDy+IYcOHZOaDj/ag5yNI7nFlyTy9 +UQiWZOG011QvGlX4ZWbh5RSfk6WoFUeyogjBeQP+dBGKRciGuWa2f4XTrgPNPoky +xaQNNkW6na2PjycNqZZ0EpYieECm4yUYyD8UxFmcL2D4+8NlgcRSS7eCUW9eLv+w +ZSvWsPaVd7TQH9UTrjykQQ1o/5fBy+fK9Y56dYSDBZDFKACCAGKucMGD+nH4c9WK +wEgvD6UNEYzrBq1wMsFifZqiQDXILuxo1XPW1D5p1SMXIXeQBiJUqMxS7yM1zfO4 +56SkMASdAgMBAAECggEBALaMafpJ2qn+Kp2fPPlGGH0lESRyOEUA6sdYRiMBx3iP +7tX5/JeYzNnZSuF9IyB8nBor2GSANMLS8z8PtZpUVK7VtH05yXIEnoPU12+JcAjG +/5UzhBDFsKZYO2dcNIuUQZ6j8t/i8y73H2YnS1N/UTtEuikJWMrDRRY1qd6tqLJi +i7qRNv2bjdqrEu7Rdhha75WxY7RQysLkGkexde7hFKeWOjFK1pjpn1nRHjTqylE1 +TX4cH37ihXzra0iLhiuLlhIF0dav8/bLNQgLdKlb1H3hEabk089p9+Dwm9YGejus +GvtePgKHdr9I/6WhltDqxgBn4PD+p+dt4L8KWTRi0B0CgYEA6OweDotakEf1PwdR +bs6K7tU78LZ7+FKGe25gFW97bQxvk84B954e6kJS74CVPDI8hQPZW1zk1PIN2e7h +ntNby2I0AWB+zz4Bih8hO8L/F8oqzaoerh9Yd2ZZ8wddMa+lKGu137LRj2vZMEla +4eeARmlDzpJMg/6u5gnYlzAAvTcCgYEA0YlHDDKK3GyXDoe5O69oiaHKEqSrM4PY +NY6piHsD1a4ksLjtLUZ/fP2w8NVfV5M7skf8nSMhKMc7vBdu6qBN9eo4TfGNByZk +nvJkrjDaXVYy+WT/5F+5K6iMcgS1Fo0Dpllv6AMCdDhRWk9fM0/irxwValQaJrs+ +mGcrcd0C1ssCgYBXFN/v5B3Jz31bQSsq2EGNJV+xkSgsIP5ya0O0/+cPUBTvF5gY +sZ8xSfaj4FjrFoUV3eiHheVvz8dp8SudK7wn/+EdmqwOY0pED3tnUnH4vPbfyXiK +9OEoUrXSbLBlARwoTozCw6IhktqyeNpnlxuYN4bIVl4RA0j0bD3z2FiOZQKBgFoI +on6bOQ99mWqk5vK+bCy3WByqpOV9wWlB45bBROSL7zgAPek2YZFTcLQK+uymVwBD +7keW0Ki08vXfG8m1F8qS2Z6kK/TmilXB0YEHfMyePUjsHQgEGYyo37AeVbFa5jaU +N4F5yZQmns4vTLi/mqejaZBGkvYRftP9gK1sScwBAoGAbD0cKaMEp9rTZF2x7Olz +2Thyx9K+8BDbHYfaqpj1PTRoyLYL5syC9FYE4FBkui7NsynyEpHjpvilJduC9oh+ +2/EwfNC75Hq5nJkGgk5KHeMMMz3nNCsGi78AW7ilsYi1hC277UX75UI92j70J+Jl +0M1McDYiCVYgnsKMydJD1Pw= -----END PRIVATE KEY----- diff --git a/jstests/libs/server.pem.digest.sha1 b/jstests/libs/server.pem.digest.sha1 index 459245fc9c876..af47ee559d1bf 100644 --- a/jstests/libs/server.pem.digest.sha1 +++ b/jstests/libs/server.pem.digest.sha1 @@ -1 +1 @@ -B83F19BCEFB506CC46478A2B9DBEE2762EBD038A \ No newline at end of file +CF9D05702DD8002E7AAC8E9564420E9D4673B249 \ No newline at end of file diff --git a/jstests/libs/server.pem.digest.sha256 b/jstests/libs/server.pem.digest.sha256 index 16c8057185c25..7dfe932dbef96 100644 --- a/jstests/libs/server.pem.digest.sha256 +++ b/jstests/libs/server.pem.digest.sha256 @@ -1 +1 @@ -FE8BF8840E44CBE2E04B408C3F49247E256A1006A4DD6EE9B74B5FB84EA2FAA4 \ No newline at end of file +D466E7EFF3BF6E5F080F8B6C80D839B6D2F1DED790F9AC99C00A7370228481AE \ No newline at end of file diff --git a/jstests/libs/server_SAN.pem b/jstests/libs/server_SAN.pem index 32422b3b27315..35b1c5c08f57d 100644 --- a/jstests/libs/server_SAN.pem +++ b/jstests/libs/server_SAN.pem @@ -3,53 +3,53 @@ # # General purpose server certificate with good SANs. -----BEGIN CERTIFICATE----- -MIIDvjCCAqagAwIBAgIEJXwypTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDvjCCAqagAwIBAgIEIQ0eljANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjB9MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQwWhcNMjUwOTEwMTQyODQwWjB9MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEgMB4GA1UEAwwX S2VybmVsIENsaWVudCBQZWVyIFJvbGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQDJBVF8+yRUl2SUbW2GDTTChhRTwrmjqZkrpe65Gt5QIMqq0LDQMr0G -4t1X0B6gIrLjpzB9a8Aty9PLi+Dcp+C9hC/Bm8W6GsnyqhzFAu5hXELMb+Zfj9tx -Vv8KcFPB1AxTzpRPUfdncfcpBBKi6c9jtp3IjM/jLE6nppyeP2BQ0LY9xJE2tjVZ -fSxZ6kJPEbY+QOF3DfSmSY06qhNt8T0EgLhDpp7UuoH3bq2pNrFzMnXIZW868PUn -FwfFxYWb6+QWAzAVwd5aj4326727pFtwR9+SxvHZl5NcfJIVQQl6WJg5qtCTgwbZ -uYfyLWVKCtNLxsutrf+ydfGQZOte+0XJAgMBAAGjTzBNMB0GA1UdJQQWMBQGCCsG +ggEKAoIBAQDb1cukIOQBUsYVprQC8sIUrjOB7uZEIkAgFVd207erlA2Qq5MHWnnU +OcicqLRLITGVdfS4fSlsu0tnCwSV9Tawo/jrskjITDuIewoOR+vDBlj6GDTygygQ +6urmNTalafIN/WGpliEsk6zUDqYrAz/bPiiR4f9AxKrczkxkHfD7gM6rzdj+ukxR +fMmpLUHJgh8pIqSSN6ZA7lnauB7NCI0RQtB9qspmJHUx2FZW5B6Lh+ZOis/soEmX +OPy0ApzxxyjIjTjoNqWQOG2v8NHAIH2HtQRSVYbkZ8vTolD7BdezEECHAJGoS94v +oGPMMJ0T/0Gnc+LlUAiyvl+1DnjDw3XFAgMBAAGjTzBNMB0GA1UdJQQWMBQGCCsG AQUFBwMBBggrBgEFBQcDAjAsBgNVHREEJTAjgglsb2NhbGhvc3SHBH8AAAGHEAAA -AAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQELBQADggEBAGhG3IwxfD1f3wEBZ43G -GSIKbfbrXPuubELReKOwvN12m06EO5UQxgwZmwjBuH1HeZYblEDuYd7xaYxPW543 -2Vrsu1FtXD4U1OVazU3XjCalhcMj3loAWy76w5RexHbedIcd+NdgcqN98jlZolHk -pyKxQGxdWU8lQvBdHCriOO/hQdPYiJTrR5Y+2rX0Sh/yT0KPD6N+BP2pd7NROVmr -g9QCZC34fTDVie2useS/QEEj03Amw1HgqfczTEa1e8CaqelC0XQtb9OcKpvv53aO -JXUsTs5uroM3yXoF8tFTLBs3ulY6xFXRAeJ2x93GYHp/8N8S879JdIIBUYhiwmWy -PJk= +AAAAAAAAAAAAAAAAAAEwDQYJKoZIhvcNAQELBQADggEBAH8uSic6SrFCjMqqS1xa +6+XymvE/MjXyhARGI6DFrIZPBt/LZwzWr4RznSf3teOruhXvXZAHnaXAOnYwizpW +XWzDev2xegglXM6N10dFWhy5M3zo2h91RPhc7MCkWcaHwMoPdKADVPW7sD8Ppx6Z +cfMJbGHAM54XJU7c4RJ0Qs9Lv77+kLXTw0tjCvB7j2tCamTl4vw9j3UwxKdCC/P3 +WXV00yfTkayLEXlquPlNjSKJOXeDj0JoG+T3NgK973soKnztsH8aPq/s1kP0mUDf +zLrQZRLCxBWAj48wN4h+/mG+EbTt+H2VwPbA9tQPzg9SlIPgY7ZPGSVbA/qCPcp4 +zkE= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDJBVF8+yRUl2SU -bW2GDTTChhRTwrmjqZkrpe65Gt5QIMqq0LDQMr0G4t1X0B6gIrLjpzB9a8Aty9PL -i+Dcp+C9hC/Bm8W6GsnyqhzFAu5hXELMb+Zfj9txVv8KcFPB1AxTzpRPUfdncfcp -BBKi6c9jtp3IjM/jLE6nppyeP2BQ0LY9xJE2tjVZfSxZ6kJPEbY+QOF3DfSmSY06 -qhNt8T0EgLhDpp7UuoH3bq2pNrFzMnXIZW868PUnFwfFxYWb6+QWAzAVwd5aj432 -6727pFtwR9+SxvHZl5NcfJIVQQl6WJg5qtCTgwbZuYfyLWVKCtNLxsutrf+ydfGQ -ZOte+0XJAgMBAAECggEASQBdb33k27N/G0gCFkSFfH8ksqZstDrLHUbNQvu28HJ4 -J0BSdcl3TCDnMRSriowPWw1EVsfiqr7y02Cg8IEm5Kw0i6L+U2+XF0Ef4YwG9eSD -farFhr6/epGVXT0dra5MK8NBqOyjZDXHBGYuPmuanSOceVBpzp4wkkG3buClIbOE -f/NhIoc8odkPUMXICXs7iJM2SVh+2isAH9X0iIx6de7zqnJw1xLwcWdGx6X0KgmY -SJ1JzUmhSDWdhAW0R3JoluzlEAUs71webtTMthp+AQuOC5wrRc0cARdC37foGC+V -p13udR0bZyP09Bzb/BW2tqR7QfP7BiXObsxVjC8V4QKBgQD5AF/7kLYeLmpRVvJm -cMs1KF0i3ZETRW5zQFMfmSYM0LmjyrmkgEtCEtHO5p2KMGtFZdvqkdS5/Zt5l+mJ -aCwv27OsjPmO5VsvoCGBYOlw+MbnEim+cqOrjjVTNX91fH1ZxAcsp7Hmc09tu/jr -8Qc8ZDzD4X6itsnI/tbYE8Kc+wKBgQDOq7X6AXfLsLwaq1BlYn/uljRMyrjE8/tu -iPIj8xKT6KP0RkMqPl1xPT9q3J7gsxkVg28/MLBW2bnLV+dBKL+eGTVPw+AByqQr -98FrpqKbARTmsu4Dn3vYgharpxrmg1L2nmN4WM3v6HGLdA5EPpQJvCKp2O29QCjh -3ySH4vblCwKBgQCXwH++rFohmL4Y5nmCrzlZI0lnx0r0SLtgqBJAzrBe3RJWXWW4 -eKvlD90oUGow3wNxXvuhQNE5rPMFLu0YXhGX9TjSb3RkfymMo/XniK2cuTFXgD1K -oUlYc6nSFWehrYYjoBGTSHxma148DXROLy3uw1Q5OQNZnTbrNUywkZJo4wKBgH8P -ip3d8SzFx6AN2yu51zV0G0trBxXvepGGmHgJpU5SJRq1Z+280e4g3bBxWyyCb9WO -LQMIiCGdUmD19jNVPhmRHfmgT0RKtYxikgQBOs9ZZuQ+9Z48mwONVyrJXfyCmKsO -zdDbqCDrI4O9IlhKsPEbPaR2vhMwMvJLIkZ4/5npAoGBAOnes6P9drxmzaihJdsF -1QBecczw5h5UaEp5KHfYwf+0G8s7tZeKelgHErPt6tivJ2sWbLNqxbvFQK7t1KNi -kIRf1MN1XTxdSHuLVqHwuxE50zH3LGFE7c/ujszAiMbHct79s3/kcoUd5lkDVYyC -x1hWrOgfx84p9Z897299r4fk +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDb1cukIOQBUsYV +prQC8sIUrjOB7uZEIkAgFVd207erlA2Qq5MHWnnUOcicqLRLITGVdfS4fSlsu0tn +CwSV9Tawo/jrskjITDuIewoOR+vDBlj6GDTygygQ6urmNTalafIN/WGpliEsk6zU +DqYrAz/bPiiR4f9AxKrczkxkHfD7gM6rzdj+ukxRfMmpLUHJgh8pIqSSN6ZA7lna +uB7NCI0RQtB9qspmJHUx2FZW5B6Lh+ZOis/soEmXOPy0ApzxxyjIjTjoNqWQOG2v +8NHAIH2HtQRSVYbkZ8vTolD7BdezEECHAJGoS94voGPMMJ0T/0Gnc+LlUAiyvl+1 +DnjDw3XFAgMBAAECggEASwfNWVdgepMlyH39MoRoeWZ7bf172gVWnZyrHYNlTMpb +VuU4aWoX6rdOISnzXzEVG04HGHCfktzZ/3FjP0tSPze33bob3UEnkI1uATHK5eVk +uPdKGvto0V1cjvXakNp4Iw44Jwl2iSBd/IKDdGrHgKzWa/QWiqSVLIe8yu7tMOlD +sBcEYemnehTWipZLHLTa9iYog4bmptvwbzM4Kl4HZ3nYNLZzjpHfoZNWGtJMcZW/ +g7697l7rdar/wx4JVa0crUVSLNupigjToA9SH6eTf89pqi67ZV+eLt6tMkub/vVQ +eHt1E/uXt5ydeCHxnjXArva4oDPir+3wW64qjkgIgQKBgQDumww2kZ2QdTc00ehy +moYh3UfUn+Jx68NUlxsXOGR/E0PoBiHS3OEFLOuMaIbbDuiUvgHF+XTu+ddWed8R +iN0jY2mjtrBMPUYPAqttLjVQEmpKPcWfVFARhJYMqWOksHII/l1yi6N9MEyzI6Er +3sNH2jggqZR+2UInx2EGISZKewKBgQDr3HHtODahFRSue+3DPxliQH0SH++j4KzE +rcfLi5jdODaUbpPcAsrIbRXnEW1Lf+S1QXKt38t4aR5h8gPn16CQQ2waqX4JDYSG +frml9II25VPGPU0ve0YzAFfBvn0Ls63J3u7HCcnwV7lDSD8f5sZtHM4qtwLr9SEF +pNArQnJsvwKBgQDSbf85aPay3g1QEgeUet7sosCkrlUA71IXGiSUN/G5eH4c9LuD +wbTZ4aHi9JRqQR2xgFkEBlqwH7tf0p9+UVvlx9j0vzuAIVHWDx6sbWIrOfJvg0b0 +m2D58hp7FDCCn/ISKHK1gJ0w3RXnrvaHQDCs/7EcbTI/JNAJUPcqdrXUXQKBgQDL +7oIO4vDRHGISb5Lno3I2Mp4xgq14G3YmZD+A7cWRWN5QPr/Xlg5xd2hdrwK6Ke29 +DsayMfNCvFkJxPC9kAIDWlhpQS15dFem1oF8TUodXvGtUSmgqUzMIjq+iQ6jhIr6 +Jah9LiiNh7vmwdvaoHXmt+ZGppB/JiaUM6nODZjUDwKBgHYT+uyrrfSvjQrEaeAI +DSYMPZdvbf4BkDUmqFLu1aUUUUV3j+UrdYQBJZ3tkrq1QEqsogwmMdMfHxb51UBd +btDEpoK0zsqzcBYnTX2fv9MfP8QzqrK+CMVExHwsTLRB3syG6vg73pmj4wIR3Ni5 +QRaH1E50tZVpdVVxhrMUOaBG -----END PRIVATE KEY----- diff --git a/jstests/libs/server_SAN.pem.digest.sha1 b/jstests/libs/server_SAN.pem.digest.sha1 index debb75113449e..72961501176d7 100644 --- a/jstests/libs/server_SAN.pem.digest.sha1 +++ b/jstests/libs/server_SAN.pem.digest.sha1 @@ -1 +1 @@ -6B67673A606FD2F43474BD04490F5F3C88EBDED4 \ No newline at end of file +E10148B4BA6A0E2F5E12889AEA51622D89EC8524 \ No newline at end of file diff --git a/jstests/libs/server_SAN.pem.digest.sha256 b/jstests/libs/server_SAN.pem.digest.sha256 index bb25a260fd3d1..6c5631e2e4cdc 100644 --- a/jstests/libs/server_SAN.pem.digest.sha256 +++ b/jstests/libs/server_SAN.pem.digest.sha256 @@ -1 +1 @@ -59BB5C306881B052F8E1CFCC9CE772E8273FBE4B381E4E268CD1ED9508DC88AC \ No newline at end of file +D73E8A7806D5DFF5CDAE263F2291EB2213E8D6E033306C11DC1EC43E5AC7F008 \ No newline at end of file diff --git a/jstests/libs/server_SAN2.pem b/jstests/libs/server_SAN2.pem index c63d5b76a5ce9..4146fe47ff2ec 100644 --- a/jstests/libs/server_SAN2.pem +++ b/jstests/libs/server_SAN2.pem @@ -3,52 +3,52 @@ # # General purpose server certificate with bad SANs. -----BEGIN CERTIFICATE----- -MIIDtDCCApygAwIBAgIEIuh3kzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDtDCCApygAwIBAgIEMI69UzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjB9MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQwWhcNMjUwOTEwMTQyODQwWjB9MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEgMB4GA1UEAwwX S2VybmVsIENsaWVudCBQZWVyIFJvbGUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw -ggEKAoIBAQDCauZxI+7aKcahn2sGQ0flmSrS9jHnB/r0EqQHdABWAzSeQ4yVuqmL -2OqknjTBuEEjDhuPPt8EuOTxO5iWwoCTsd7dK1VS+rbq2CdrzwMhxKRzBDyPqANz -pOrmb4UMW80sYkdFKpBlwxi+DxkDZ4jtbbuQISCY5xJBl26f2O+Wh32G1hDFrEhc -5jvpyCjUjBtdCo96zRvU2XsZRRWoD9EPHHGS5FPKCdBHlPZOkm/xSE4v/srTmxPg -6qYntQLRyURcSQT2FwEPbItx3mpvHq1MG1LWhMH7Yb4bhlrEdKWoR5AWLfhN7YDG -t8dRUZVxwQF9VczugDjihVrkYrmhHRkDAgMBAAGjRTBDMBMGA1UdJQQMMAoGCCsG +ggEKAoIBAQDYVMefpss9TFc0e+pubi0OLN9fHERskMvlLxUN8KCpKL41Go+jNtmj +6qdIalYbhH/mE0U1xL0m/9dVhIYj0VGULm4Mwz2u8j/9q6nPqz1M32xAGohVLe4m +Zq6DQJW7XS8ELMnkU5k6uriBB00WNwU/24RdXMLhsFWtYesSNBuAMh9RaFK6mNEK +p3VHY9ZwqPkSscXvxnIS+acGS7V8VIDN4xgxEqAcaMI5IlFhFUewqxi6xdeoYp+X +QLfHQxfrPolp/mAXiWS+xyEKlrsjM+H/USl2BflTnYldR0RWnxARZpEqCCZ3o78l +BdyOUZ4P1J5RmCx0tUk0+x1cVVT9HGzNAgMBAAGjRTBDMBMGA1UdJQQMMAoGCCsG AQUFBwMBMCwGA1UdEQQlMCOCCWxvY2FsaG9zdIcEfwAAAYcQAAAAAAAAAAAAAAAA -AAAAATANBgkqhkiG9w0BAQsFAAOCAQEAjVmcCjkgQFQbiZRlwYStgpWYHDGYonEf -YxOPouTpM+x0JKlejQ2uszX1GXtLXgymqLsjHkcfuoug8aPa4vmUujmuAZEnISae -qaN8Z+qxHEkBatjnOp8xucL9tUJ+rvCV/i4aDWg2QV7MJmNXXLmUqZMjtjEvbsaw -qYADAoiCcuREZaeGM08WU0Z56gelb5+uTRbYFEyXO36XR2tRyljJzqXGwvR0tgb2 -LoqXw7QYlIyszQOJOZx4tu9OY/U4RfPwSZEn/nbrN2gIPJ/mpOa1mY01d8fg+Wfu -A8a+sZc0YYh+qcy7L8am1xS37bhctc6QsaZbf6PfqxUretNzTpKfcA== +AAAAATANBgkqhkiG9w0BAQsFAAOCAQEALT7OkVF3XbytVG2wKQRjYQ1eIa/Cc+IH +noOD7c58j721FgbF1Ctm/2n9s8Q/fxDmz4l8njUyf1I9RyxjrovUFpuRtwYBdwTe +myECS5V/Quel52y0ZWS8FCL+MTHfhyPBVPi98zOK73iVM9RR1Ju/pZhlSWtBp57c +X5jWMBtIjPPJurpBy2tB7zDb8Mw53YO+wwOHVlAq4SSyU++zcBL+F7cQg0UeCeL9 +KmB3QRaDqZf+5h6zZm0a+b5AB4WOh91LCGeTltJsKE9Fsf8KU2o3GqvWgVS0/Qbs +dprPz4SFmhQnK8jUTpHAAWDTGSnInQRZlwSqHdwCJ1uuo5RH6kMsrQ== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDCauZxI+7aKcah -n2sGQ0flmSrS9jHnB/r0EqQHdABWAzSeQ4yVuqmL2OqknjTBuEEjDhuPPt8EuOTx -O5iWwoCTsd7dK1VS+rbq2CdrzwMhxKRzBDyPqANzpOrmb4UMW80sYkdFKpBlwxi+ -DxkDZ4jtbbuQISCY5xJBl26f2O+Wh32G1hDFrEhc5jvpyCjUjBtdCo96zRvU2XsZ -RRWoD9EPHHGS5FPKCdBHlPZOkm/xSE4v/srTmxPg6qYntQLRyURcSQT2FwEPbItx -3mpvHq1MG1LWhMH7Yb4bhlrEdKWoR5AWLfhN7YDGt8dRUZVxwQF9VczugDjihVrk -YrmhHRkDAgMBAAECggEBALWpV5v+XG/Du1NH96QJeAPEg+xNmUFsBoTLajOAo9O6 -+Q6sxY8etM/0pwOcxGUCuvJ7eyr1L0RlU59I9YJhSe03tavpkrePhx2XTfaBn0aN -TLAgFEqTTm5fehJJwANVQDipuDib1gMQMm1dBB1XP+3CrBC3s5LLzxY03mxEM1Lt -szKHRs+uEl4FrriEvYoXllkRB/Ws9zatFvVQ2idJk7Ritmai9lcxq9EpqK0SxeRS -1FM1T08IVeroHqsJGY/oyNLzkHJZTJCBActQvCauB2yP++a8+1JmR7GcIBUjbmfS -RdXPArC8hrXDJoOtissI/28rRDiJFaK+dXrlrqxcc5kCgYEA8KZq8T1NKwUnGiWb -Hhe7LUFOOljCGHOa4uTu/3swXDJU7opIj0cZ7s/QR+Yc+DHhoHTp7N+NbYsR1WCf -RqiWsQieYdhZd899vte0SLNSWExvRuFpgOcJ5mrUzcTRlZiI6mmrXCiednhvryOc -vd5uQwBlwI7itn3HvB9+kUVpSRUCgYEAztGM+auXsnwyLPLAvxNlwv4JNQ/DGM1Y -o/VP6g3N/7Rj1muRN1gqn/GW8pBCYvqH7Fu/SUgR5KT1SmILL2Zk5rY2G9x/J8dM -a3i7J6J6/fRZgE9IuIoL0wZ3F50iZdosWCi5tOH3++2LwRzHh/KOLzJ+MyCbFpAd -8RGW/fJzL7cCgYEA6gD49pGkaxO8nRk6R73NvcjF98h9HGe1kbIJkJZKRSyQF9CR -k/kQh36+SlvBibp/apalLTeIf59+fN1So9OljIoT8JL/FJjH7n4ziYoNpVzVy7e+ -7qA6qTryqRAcuHm1kTOldJzu0cZ023omnR4gW7iUOPT4EXoncY+ydDJpma0CgYBl -sBSvoZXptTT6crUgfcoYEM24IDLd3AFMRE5xly4FHs3D3Im++4OhtqhwRUvO3L2+ -EfJAdsYNdKoCU6iKOr87zLVYB5chmNVTNj5XI4VznhPviYwI6B8eN9yQaLtD9vy8 -r/F3JW/Hl6mSXrMgfbs5K4tvWgXHFz8Ri4OBAxdtiwKBgQDEY8zxo0ekWuvPdNVM -3l/tDDGjPdLBSf0wSEBLD1ac+qx1rklvgsxJs0I3MRxAQ9VeSXHA5PjNzj3vVAdo -yv2PpF3juB+w0EvX7FYWcVfiCcaMHU1e2EzO23IecRTZFWsUoyl2pB2dgmG+gO8E -OgPIeV6qCNVSCl9H/Xxjy+nBvg== +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDYVMefpss9TFc0 +e+pubi0OLN9fHERskMvlLxUN8KCpKL41Go+jNtmj6qdIalYbhH/mE0U1xL0m/9dV +hIYj0VGULm4Mwz2u8j/9q6nPqz1M32xAGohVLe4mZq6DQJW7XS8ELMnkU5k6uriB +B00WNwU/24RdXMLhsFWtYesSNBuAMh9RaFK6mNEKp3VHY9ZwqPkSscXvxnIS+acG +S7V8VIDN4xgxEqAcaMI5IlFhFUewqxi6xdeoYp+XQLfHQxfrPolp/mAXiWS+xyEK +lrsjM+H/USl2BflTnYldR0RWnxARZpEqCCZ3o78lBdyOUZ4P1J5RmCx0tUk0+x1c +VVT9HGzNAgMBAAECggEAZHP5vkjVV0F7vQNNeLnwcgSkNKYLn94cz+9s92wlLmec +60vKJx/xMAxmYVn/YgfE3DFWkgEpVFK4yx0erMMl/okQ0RxYKESVMlXkJGp/rJKS +0bGAXQ1W6L4Fiy0SWk6K5oVluoCXRo4t3rxlL0udRGnKrQ/F7sbTAg/V+D36Byxk +jbN9kkmh8n21/rocMzOzc4WRwMVRKWWfplfXLrR/stVIjsaAr5V+qqR6OQ3/6iye +AfFgf1KrVD1LdxcfKDbTI+LN51V28ojuiPWg3utGDs09rU0lBGmjrk2797otmFqi +vVC27jGq3MmpVOFOOQoYfX7LI7K1M0XLVuD/+4ioAQKBgQDttpBQI6uUY8QfxKn6 +l/UocomPgyp0J8ljtHROCbRnoTtTJmmIrgytwOgF18VJiIXwT6SFkkYkEi8NvTs7 +SCC45dn06Z3Zp0drog/pYaFMJZdPWF3ULNivtrAB+4l0t3cEc57GC8ujX8IHiUwO +4ABQ3y9sp/PH+TEJBy9ym+byPQKBgQDo+SCYv8h08f64VV3WnrFWFVBHD4trn+SL +XK49ITtMzBvUWfHpe6HzFsEeVUohN6cbpOBs3WuI69PAv7ZhFZtL2Y9oIgwhO5JG +FQrou9FDzBqn0rsagVzf6O8ysmKM7abo1+WSf/YMCTKNbWDJ6vswCpM1akAVgdIq +tBGAsVrd0QKBgGewidSbKp3IwuUEmQyZOjQRehWipmMSc9NNBKqBqzLE7qa/i6s2 +GIu2KL9btk/0AFCpvN8Sxgu37tR0ZaDfPn7IrEEbomItiIbXo08u6ffYMd9HcW46 +va4v5yiGOxgxavYDJQ/IV2SFQZKd/hJC/YBJSluAC2OAPDZu7vB0NtIVAoGBAKQG +CkIg/IQBgMHNMIUTM22VEfEF67X5W9IWjYHybKq0JX/LV7t31NyuD4dIqylZ4mhf +G7bp6XT7/Bj/oOsXaD/ty76YAgw/wctfs0+KNFpUUAwKHLsbiwUGHaqG10W7aEEV +B8euGc/9hb723CctLm5zc5Mu3DGINizknFIMDBqRAoGBAMPS/WxSB/GSKlaGq5mz +kC4f3/So9cz0rVv+9dN4Eg/aMiK3IPqfaVtjIGJ1LModYP8+0EnmwxtLutNgFDRJ +yjAdtScpl2JMgxtMzS9poFLTCVo5LCPFHuzhfrm6tVQjXcYqjTVUaKessAZQPPle +C8o0xbmvmzekDPmaEGWiQfq5 -----END PRIVATE KEY----- diff --git a/jstests/libs/server_SAN2.pem.digest.sha1 b/jstests/libs/server_SAN2.pem.digest.sha1 index 8aa9bde808ca0..2101d581f9efb 100644 --- a/jstests/libs/server_SAN2.pem.digest.sha1 +++ b/jstests/libs/server_SAN2.pem.digest.sha1 @@ -1 +1 @@ -CA28CC498B15E8D0AC2DD04C60AA8AA72B6D3FA0 \ No newline at end of file +3040F262AC6C02210A6C37B009647238DBC8BD3C \ No newline at end of file diff --git a/jstests/libs/server_SAN2.pem.digest.sha256 b/jstests/libs/server_SAN2.pem.digest.sha256 index f044b25a0f027..5705a80aad7fb 100644 --- a/jstests/libs/server_SAN2.pem.digest.sha256 +++ b/jstests/libs/server_SAN2.pem.digest.sha256 @@ -1 +1 @@ -EECBBD89E1F99E4DB0E6B9811A85A3F3C9C360A390D5811994E8D4C1700CDD2D \ No newline at end of file +3740B9EB44CAA386A942F009AD601AFF2ADF5DE5CF25B2E76FE9C76909E365CC \ No newline at end of file diff --git a/jstests/libs/server_no_SAN.pem b/jstests/libs/server_no_SAN.pem index a6ef685055fb3..26cb17412d378 100644 --- a/jstests/libs/server_no_SAN.pem +++ b/jstests/libs/server_no_SAN.pem @@ -3,52 +3,52 @@ # # General purpose server certificate with missing SAN. -----BEGIN CERTIFICATE----- -MIIDmzCCAoOgAwIBAgIERNWoyTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDmzCCAoOgAwIBAgIEB5Dc3DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjCBkTELMAkG +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQwWhcNMjUwOTEwMTQyODQwWjCBkTELMAkG A1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBD aXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYDVQQLDAZLZXJuZWwxEjAQBgNVBAMM CWxvY2FsaG9zdDEgMB4GA1UEDAwXU2VydmVyIG5vIFNBTiBhdHRyaWJ1dGUwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDGBqSXwzpbfK2E4BS7HftKkm3E -S2D68MDvYjdbMYQVfd+b7AsvtzgvnEhkxjgfoiID5fvy0Ag4RS7SMhpmJtA3e65+ -HLvbwRgnDiugbdNpqpW4cRWxxb/GIOMir2gbeJsUFRGBSFGQR3l172NJIKSERKU9 -m91/zoFbU2tvJPD92qf+tUu+GCL2+7lvxCL5kbG8UlYn07TEnqYEikQzpKC6KaVH -+fMFahcRIKYo2Co6+pzCNC4hGnaDLQ5R1bqUSow+0+QQ+Fhl/h/CACSdf23p8Cq0 -SWFZ73yXyMvQSx53i7n2EXrgwy6eVoA9das0wuixhAfQ5K2IJhLe+NwDUOhHAgMB -AAGjFzAVMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA0GCSqGSIb3DQEBCwUAA4IBAQC/ -mu3lLVBn1aQEJB9vFvVnpRIYriLbJ9Wz2oWM4iqJcQNti6HxYaYs4GNxzM2TrC5U -ehSd2lEIl8blx3s9gkfnS87/pRF5N55DlXX5DGmbdN3WIrnyRRwNtHaaA/MDd8Sm -8nfxtB4BAQBOclBfbm5WKVgQjFxNO3pCkOpoWMuhKPOqPr7IVL8OwYBkYFtMxdBA -gADickznCdzTIi5O14CYYMGzkbkf4dcP93WzA+tKwlDghmYOFu6Ujoz3xsi8hz16 -L2pOI+4wznbGPtXGaPEZOTV0ugD5OlbGvyCly8YqgGw7/q9/UdNM3jKPJ6X0UoFn -P5F2t6c+5jdsuao+bSGw +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDJsUV0l4vt4bLwfUO3hFXoaI74 +U31dq/qdfY4RiXk4DZeNECARkR5rr9xV2Ut8PL08HG/GwFToK83vQ8mD2LPrJNBc +QTmc2zPs6Nvsr4Vp4GwTvf4v7imAFV4sqdsCZ60S1zlA+nQ4lXlt5i6pEGRWscxQ +H6501Ycm6wua5CLGbw05j8erj1Zrr59mY9rGhkJ8e6D0dgOlQ52kQYXHFxh8MdT3 ++whhQFY8YYsh7TbCGR7xG5CALeFj0pw/unKjA9q+G+7f81VBOp9VCnZmeYM0zmVB +hWzPaaxlvn34funhrm+zG4WC+5qBo79N7gBoJk7MxuFPn/ZKImpURSD7x5djAgMB +AAGjFzAVMBMGA1UdJQQMMAoGCCsGAQUFBwMBMA0GCSqGSIb3DQEBCwUAA4IBAQBh +xjEZg2l5oNZpmEHTWNsWTpNP0/Ifl7Aw1XPz14CykQClOmrzZR+vJXsXG5B3nAs6 +BssYXMHdtonlJs0QMm/YKTrmn13c2IUl++vM0i86qgdI65KOgrXmkppVQVj6Jg0O +4D2Qivl8bd4HXTCybmFezSxaMAmXPBp6qhdp2lpH+s3vMf4x5kXenASMqX8lEJoc +nGeFRtd6EF7+PZWv3MQ0a/gVrtmUOk+oB6G4NEt/EBe0dy9yqTgdeuPGLU3lP4kl +xCJCvrgRAg5TBzgJAcaf97v9wb4BmGbTJPCjX8+M8ynLp1OdhqRd1M09ynyqJpDy +pSILRZEQq++SAeOUncUV -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDGBqSXwzpbfK2E -4BS7HftKkm3ES2D68MDvYjdbMYQVfd+b7AsvtzgvnEhkxjgfoiID5fvy0Ag4RS7S -MhpmJtA3e65+HLvbwRgnDiugbdNpqpW4cRWxxb/GIOMir2gbeJsUFRGBSFGQR3l1 -72NJIKSERKU9m91/zoFbU2tvJPD92qf+tUu+GCL2+7lvxCL5kbG8UlYn07TEnqYE -ikQzpKC6KaVH+fMFahcRIKYo2Co6+pzCNC4hGnaDLQ5R1bqUSow+0+QQ+Fhl/h/C -ACSdf23p8Cq0SWFZ73yXyMvQSx53i7n2EXrgwy6eVoA9das0wuixhAfQ5K2IJhLe -+NwDUOhHAgMBAAECggEBAKB+qc/GjvmvJwNWifbN6ekanJRHc6ZukjByLeNfUmoI -xr6cpMRbftI3nATxEnOw+FiogXbeXIsepz/9E6BPPNp7B4V2zFrjOSwONBWmbX5q -n0YJgUe/uhpedTibaYk6SpYQLUvWzSReiXaoOpP+PNhmunYgaXk4aWcXSS+fhSJ4 -NCYYuMinGdDn/uia7h3jzsPCFjMUEFmhg7vxfqTdYQw+wzqq4GkLXkKrYM+Bt6kY -YU1duZFrRUlO5oOk1lyqIIdRIuxqtNXzwxq/atYdpkEjfmZ3tT+YQQKdwN194eCR -PlwHGyKjyqJNIP/VkUqf55kzgR8ye0nr4hCOpi4TkoECgYEA5/8BTtxj4gNDXIMN -MCwCuK7P4DfshzWEyeaelyoOifVd3KUFafDRiuWjhIQO79lcjn0u6qP+pBQ2c053 -N5EuxXlNyCKCZb3j64SPsTu6SPeBf7t3uj1gdXjYJ2uX7SzSdeQgnC5AGf3HbfTb -MoKnTb4Rp7W3VBS4f8zFA/pFmxECgYEA2oPaeSUGgOE0OodgrtyCZmwRHNN0zQ/G -wz2R8OfoEknMux2JlRxT4XLK05cWCbB8XVKT/On9bQtyTDCtSyeohgIH/0ubXIz0 -jEXo4FFSuWrCuX+fryKCL3UBZdGcT9sa5c+ihp/bBfI0aGCEBpqyqU7jqhCiOS04 -MTYExZAe3dcCgYAepg+0LWV8rFWv2Rha7tWFNIL96iVzT1y1l6QH8GRvUV0PEzX9 -4vSr1t6dWRzoDt0tbdhO4092ubzfytPxIVr+d6IQ+I4lhsqfiKm7Dlrz3M7c4xVU -I1uk6UwVGR+/E5bSQEsWlv2c8WvxWNHEgII9wQhwEY+gqgQdzh2RAvVDIQKBgQCQ -fspUe6heRw1reqPzXXuZUPLL9gRxqXINH0THYgLzPPv16+VTxeSt22PGIU2AYMUM -TRPxFaXwmrYdLb8aSIJHA0bYid2ViNwZeg0xcHVciP10/rDQdehSi3O+KarMn52c -w/28Oi7yMqR3NQN4/okQLkub5N3xIzbaS9dB8TiIBwKBgHvKEfySnAqfuroSDRIb -ryM53B8P1sEd1Nm9QHMH9+vI4h62HSbcDLI2zRP6TMjeuVGJF1LRaHdlE+XLNyrb -qIOKQ/Jd8HD0MaQ2BckyJmstnU5UagpjkzCMmgVqObBDz2D+1PKMRultta8GNdg6 -+tqSUxiEDXjWu/BHMHtVswGU +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDJsUV0l4vt4bLw +fUO3hFXoaI74U31dq/qdfY4RiXk4DZeNECARkR5rr9xV2Ut8PL08HG/GwFToK83v +Q8mD2LPrJNBcQTmc2zPs6Nvsr4Vp4GwTvf4v7imAFV4sqdsCZ60S1zlA+nQ4lXlt +5i6pEGRWscxQH6501Ycm6wua5CLGbw05j8erj1Zrr59mY9rGhkJ8e6D0dgOlQ52k +QYXHFxh8MdT3+whhQFY8YYsh7TbCGR7xG5CALeFj0pw/unKjA9q+G+7f81VBOp9V +CnZmeYM0zmVBhWzPaaxlvn34funhrm+zG4WC+5qBo79N7gBoJk7MxuFPn/ZKImpU +RSD7x5djAgMBAAECggEBALri7c6pFdmoRpwcFgEYOFoPeFqVUhbX4nLIAjoxvga4 +YXMuO+jLJPr3ixxpKk7GITpvxwrM8F/pJvrLPxBSXfRGumKhdXbojma5Jf9cbCy8 +7KgmZj+XWRD7u7V69hp2YqKQltaku8gqiMfSf+3b6H2EZiLgGFDeAkuHg8tUFPz0 +BhPp2szdh3YVOpmYBHY11EUFyAvs7MCvfZ5/SvYv8S7BfCouH6IGnl1Tf2C+r3I/ +4RK9tqViK2uVfx6+phw/kMBk5vwHecB7uPrcEcsoQ5gOJ5UlPGcG9J8A+W3Q8kRP +85eJ1+klY/zyXGMK5vIoyPtgYxV4C8pjIUhTn6+6h1ECgYEA/Y6zJrFQZbX8m7WR +iHhXyKe0gqLWMhxHp6499SuwHIu8KOlVR+O8/5JqMTXhHcAWqgqxZ7vnsurYuqdS +GVlrs42+wz+BqrKdhNCSjkbHu/+QnT4Y4QtlwjJ90gldMpJGsz72/qT9YEhqljrb +5N0AEpfazAzig86nDmoZ6SEj3HUCgYEAy6KquIo+LhmMZWf7GFj7F+xThUZdSL0Y +3temH3YDlwwYPaaLeOhlUB8YONQAOcBKU/kg/rCXjqQj+/esi8uzByREN2JuBDLx +NU0hFjRGaR8ghrLtQqPI+z+dWMf5n/M7sP0/JZUdoRnbeXg3PTjgsoOl5hrfPA5v +kKV98BBsCXcCgYAPlIQnnX+eMFeMTdTaeOKT/tIVsOHOhHRWtlsyRHP23RM7xFKs +Ly8+2QVTbscdZ988pZmPETnEga+9kGh4DEmU5+HP54gVMBKDxbkrque9ApAlSVqI +AYZIL4gRrueeIuTbQMQ2k3bKQsjh4E4Aux+1BMW6AhStGrajKRvcel/UvQKBgCuk +u249wsHim+KH/JrlRzuDXQstX4Tdcl0gXuxTQMW4w+FCtpW9SSl44JodZcb63XNN +67JqarEZoNS56nMzvzUCotQz9lxUBrpjw3mczpJtJ213H76ul76xjamfgnKzkNGI +FWaapPUIM6+/AifO7umTqZS9oaafeW3I6krvWhhVAoGBAKlFTaJ1r+rF/JWdnKru +ZNYNU88ddKy5sSc69F0uH+/jZv/hBaqPo9aeAfiqQwVmm4qcjI3pWuUburkNFpmL +C5VQ2LVxgUHgic17BR8GQAlcmL7bJawOEWiijU6Jz74Z7HnBXKPoRsOnBtAuX721 +tTfECd3/iuqmfVKUptRqXtb9 -----END PRIVATE KEY----- diff --git a/jstests/libs/server_no_SAN.pem.digest.sha1 b/jstests/libs/server_no_SAN.pem.digest.sha1 index 1fb3e0b684d12..fdcaa7b41ff1b 100644 --- a/jstests/libs/server_no_SAN.pem.digest.sha1 +++ b/jstests/libs/server_no_SAN.pem.digest.sha1 @@ -1 +1 @@ -57BD30716FC867960EC0F13CD7449A7791D11798 \ No newline at end of file +F6F1B14582AE459BCD82A3EEA8B9DA6FEB53FE62 \ No newline at end of file diff --git a/jstests/libs/server_no_SAN.pem.digest.sha256 b/jstests/libs/server_no_SAN.pem.digest.sha256 index ba98a5f8bd965..27a7bf17d973c 100644 --- a/jstests/libs/server_no_SAN.pem.digest.sha256 +++ b/jstests/libs/server_no_SAN.pem.digest.sha256 @@ -1 +1 @@ -D5FB78ABB12B2B2413A17981FB384AE6E8E059E53EA8DCA471935799D410E041 \ No newline at end of file +59924A914651C13CED05B61D757DAD08BD2AA0FD0D23FED683DF39748CC59688 \ No newline at end of file diff --git a/jstests/libs/server_no_subject.pem b/jstests/libs/server_no_subject.pem index 7766b47095db3..959e3ae42e8d1 100644 --- a/jstests/libs/server_no_subject.pem +++ b/jstests/libs/server_no_subject.pem @@ -3,54 +3,54 @@ # # Server certificate with empty Subject, but critical SAN. -----BEGIN CERTIFICATE----- -MIIEDTCCAvWgAwIBAgIEb1+q+jANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIEDTCCAvWgAwIBAgIEPPun/DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjAAMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA6ETKNscCEbNn3MC81M9hMzfKlEU3 -KTZRZ3EOfBXZxxqmEgMU6nBF6Ez5rrLk0EoA7cfnQYMFrP2sdjJAjq408dIVT7yX -jpwme7ePUTY4OecMO3pkuK5JwRZT0393nDUBgp46ke+CBTaVdoNLNVlPQqvBWC8+ -4IbNvJRuaLErZBh/dXNpgcBDokp6UvM020TD9oOwpoc6byW85WVcbZ4w0FxllsSM -xaIGe7YlsFCn94NO1r6gDUdB4JQemFIjdb83/oK7dudLRnMi/AtLbNet416P95+V -m97WKwvrfaUk33ZfdtmXMYQdaEEJJ49isxNE3inp+y3C/rzFSUoGYVo93QIDAQAB +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQwWhcNMjUwOTEwMTQyODQwWjAAMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAwNGP+tbyhjO31TKEofcmnOH2sNNU +0Jok5kjftKaq4yDshDhrtymdQgkb07n/Lm4XvETO7PwB5Ajtq6VCqMDwgDvCWOJM +61G0UVBYXgVXz6hHE70x/JYWgChHkmBjnZ51cphSsMzjiAtm2V/bn+uaxDpXJnuT +9XgGsbxao/748/JRIlKZxYRYjn+2fsqjRU/doYHzA2RFsNsaEWSL6tEH8Y7oUSkP +MZ/dSh2xxxyb4IwMs7yo4AN1Pkezt4dq1h8CYlc4Iob5AKKpGoFT0gMSkpkDHF02 +iKergDe8WjKMeNKU0VX/7YIMYOjB063bnpKO6FxCuq4ayCx1yfajY7OfTwIDAQAB o4IBGTCCARUwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYDVR0lBBYwFAYIKwYB -BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSgGgjwDilUeiEuYEvNTvgCZt4qKDCB +BQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBStKpasK6aPn4IXRTlOfgJ7c4ge+TCB iwYDVR0jBIGDMIGAoXikdjB0MQswCQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlv cmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzAN -BgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0GCBHvUrJMwLwYD +BgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3QgQ0GCBEreWhowLwYD VR0RAQH/BCUwI4IJbG9jYWxob3N0hwR/AAABhxAAAAAAAAAAAAAAAAAAAAABMA0G -CSqGSIb3DQEBCwUAA4IBAQAIZuBwcX/yY7ygxhsCI5o/cwKD+icct3b9djyay7Nv -ZEPPVAhAPXTh3i7ZkpU/RqDWQ6avEHXuQuZEXusB+2retKGd7qnBb5mkJzJvVXjB -MInX1Waj1MQiESC7RmNlJvR7JcPZxDjKxchi4/AJkUK0vK4H3IE34zkL4/EE3zMO -iII8CI67xMxL34sNEaG/DC061Ti1EJonkM5khfw0jaXXjbFkKCjmhdTv0WXvZmYK -AN0MOiyMNnE7j9sLcE3R9kvgopJAg08gf3JSkyDTMNQJGxqblmFoo+bKEXfRCqC9 -QGe6HIDiRwxjByZ9iqYxqfiH1G7TVbysruTeiP/Jdmab +CSqGSIb3DQEBCwUAA4IBAQCEAFXRg0IP+BYkDthBhkMNNRYxGfkiG9PsroHp5b+I +dI1J0UeeKdXHQKL45sjTog0VFwsGUc9EXauweY/airVE9dNct1P95rysvkwCSD4b +X1eQe1QzMxU+yUotPLK4L16sWjzbjVYGzk+1TN8ICHUtIEZBSgXNt87mmry9G6OD +4F/YyJMuQcmtxQER6rNZZhyXVC7rEIRIqo4GJ9btlxv6Y/nJd5r1QGQrdHtn/kX6 +d4veHRNk09zDySYvaOK4eiYgBmyB0ouV0DMdmKid5hKIro5qq/aD8IRmtv+AKVCX +vS+6RpYFOFSDk/xL+pPBDBuwxYrCNJ3dJfZwB3/r7v/t -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDoRMo2xwIRs2fc -wLzUz2EzN8qURTcpNlFncQ58FdnHGqYSAxTqcEXoTPmusuTQSgDtx+dBgwWs/ax2 -MkCOrjTx0hVPvJeOnCZ7t49RNjg55ww7emS4rknBFlPTf3ecNQGCnjqR74IFNpV2 -g0s1WU9Cq8FYLz7ghs28lG5osStkGH91c2mBwEOiSnpS8zTbRMP2g7CmhzpvJbzl -ZVxtnjDQXGWWxIzFogZ7tiWwUKf3g07WvqANR0HglB6YUiN1vzf+grt250tGcyL8 -C0ts163jXo/3n5Wb3tYrC+t9pSTfdl922ZcxhB1oQQknj2KzE0TeKen7LcL+vMVJ -SgZhWj3dAgMBAAECggEBANg4mYpOzpVNzwVJA0CU4WZ9rH9Ew+oAn91M+O/4o2hf -XXPtHH52EA4GAnVoIevoyF6Stqxc3V7CM9dARqrMb4siSCaXaPsgUA7hvXAbqlH0 -zYVdFgB6dzxhnZ2Izv9CtOo0hE+wh/h0bMULymqafleRboa6TK1VsJa6EuIw9DTT -d0FAOn8/bnKVT0x94RorbUdLhfMYRZ2CLcABsQa90Z6++uWQOsIWYGWapSW5EdQ0 -WZ43EkRHyIUkFEY0Z+dGRyVSNDiZMYWgv140mfdy8ftK4IeiAXj0rDcJtt8Ih0kZ -VDIY6hqMORQONbcH1YMiISt6xhDGeZVM19HcYT/nXh0CgYEA+nr616UuraQFbwz5 -aRRHRjcQi9KjLlfGQQDliR4kyHDcnTluwNoVye1uuPLSjd1CGwyIKRShUG9ariOU -3uqej7G9u20S+PZZ8196SHNaBK0xtT0cQDufUZo707m/tW+8P86EkZQGzyAYxhOK -+AbeOk1S1+iXwuXvotuRGYK8F1sCgYEA7WMS1hnOkbNYWc3h8EzLFOA2ujO/9TCd -CAMiGeU87G9B12A/KEEGqgg7/c5qIE60iYBVUiLseeu7M7ZIpjNgvzZYFonp5bsp -Ki2GbrnfxtF9hnTDXql/tYbqIDaJSIpF8NTaMEd7CHdIUSe1Dsc2/b5/hmhAVMzj -SQNUUJsGPScCgYEA5hgQ1AYGkjYpU7E8cB3Tt5mf1oIBqvGwykfzk3kgWwzqbHe0 -2O29tEgrPTS63N0S+9wQPISaB1SznWJMQFaQn/msDD+PfSp4yQu9Pk7Qs5kSH6Zq -jEr4+LJRIRbyF87zxD2HJGAxvRWEDHkpYNyWSkJ7xqEAwGYPM7C1YxToih8CgYAN -K5X78uqXAtBAC0AhyODrg5UFt6FKxSuxKhtWPHWo2HNas6hNX24zOMm7Rxx0Nmml -x0z0haOBECcOs2pMbkVL1oQEnBox+LRL9sKimgVkTnMcZpYARn8jXxzWNLBhKeq/ -dc+1JgIZciS8++r7qunOIieJGXeYPNXPUzP2VON/uQKBgQDvMfdU3B99x2p6bChD -mmKu0OF8ER8k4bhFQ6lpp1zJy77RYSfR4mdTe/T4iN1vgrSSHhdO6DCJBNFNJOzh -YdrXLEFjQqznR/GWLj8w8lR3al5tqWrLzSvSvt/DA7TYtyR5YfLVjyu8V94jXTsj -8MCNB+rBU7bCljz7RZ42P3gbXw== +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDA0Y/61vKGM7fV +MoSh9yac4faw01TQmiTmSN+0pqrjIOyEOGu3KZ1CCRvTuf8ubhe8RM7s/AHkCO2r +pUKowPCAO8JY4kzrUbRRUFheBVfPqEcTvTH8lhaAKEeSYGOdnnVymFKwzOOIC2bZ +X9uf65rEOlcme5P1eAaxvFqj/vjz8lEiUpnFhFiOf7Z+yqNFT92hgfMDZEWw2xoR +ZIvq0QfxjuhRKQ8xn91KHbHHHJvgjAyzvKjgA3U+R7O3h2rWHwJiVzgihvkAoqka +gVPSAxKSmQMcXTaIp6uAN7xaMox40pTRVf/tggxg6MHTrdueko7oXEK6rhrILHXJ +9qNjs59PAgMBAAECggEBAJaUEYimzAcZ5HpOVV3XEQR7QpecsiOl/SMJzgnaDqIJ +WCt5lEUZ7oFfc6fJHQs5VrfkVGSl+SQIBPOCv6fh5O9/u6qGL+OljtfhkuD58zVs +CPXVTnAfFbtHuX0KYUD6OmYfppQDrbzUiqE/RtiAugB8PwsCfu23qiKrIW3o9Kos +OlGc0n8WGOOhfA8uDr+jqaIS8rgODBRDd7siTm+YIPG4HZuW77rWBPHMmgxW5geV +RBCbNnlBokkZX4ZfBzSiPHkDVZutHsNQmlWpl3Equ9O0U7aAOi3vCEBFSPhu5Z9X +1YRi6gVIr+iUtKPmE9rXgNYuY+oz7YmZys4/4RHQ/GECgYEA7z62La3mv5+GE3MT +4Hx3/mR8uu+v+sTE6FjU70d4ZJSA6ack6taUWsm75aTGLo+fTozcig1Zs+lPDZiv +h0sxAk+Tu9JkR3V4mPO8JPQ/weRz+QvrWf4mhEzAmhGfwd8l9EZmDR5UlhtoR9T1 +VAdMQov3h3iD7ThXr3Rqh0LTzCUCgYEAzlJ/poP5l+ypkTIJzb2G2qC8htUtDyW2 +IwCPFXSs2Sze3BeThXwW0LaD85W3yw9xVhoS9wFH4Rc1eDQty2UbkpMDwfH3EHVh +BCzpu1jrkN+F8KkBMa75Ak9jeuO29IukPVKnNUEWqskhVVFRym6zs9938tEzkBzZ +fpse/Zum6WMCgYEAiugLWFxGxG13qCVBni5GcTKg5NyzfVLvzMN+5mrFsQg0DPVZ +zuvFeSz15nCUDIahBTdt+M2ljnrgxlEnYNM8Yk4XxY5zYLYIzi19yKrztbzRxQCx +pi+U5220yf9/lU3duIWDTQyWKg1Br6sqwZ33HhAsKMDFQF9dMebSzzPyyu0CgYBf +LDEUB7bfvNyvTGy3SiDlwtWUQMurPBMbbEuUcyC0gX0/+2QiZA5GMpsFht+kPLhk +JlzvMdkNXN5eV3t0YmxonYZTNaFpJywcd/dNY4QubN1lGSCi4Xqd9S8HZflkLvIR +E4psB11EZMkKiRt4jL46T/ANwzDM3nH5c+bEx8MjzwKBgD4YsDVmi9KZuq2kq8Bh +Vyxkwyl552a/0CRFFYooN/rUpidVBxegjqFaD8ynGQXFv1MR0Azz7slzI3AI10Wi +/YmD7V9Ko8iuO8Rnu5EMXMtqwswc3cSeR3Dxn7B/CQlqvD4DPYWlsL3+gCch1+8f +DZu2ZEwTqJFaQhUcrY23JM0y -----END PRIVATE KEY----- diff --git a/jstests/libs/server_no_subject.pem.digest.sha1 b/jstests/libs/server_no_subject.pem.digest.sha1 index 928599a89c9a3..d592305cf22c3 100644 --- a/jstests/libs/server_no_subject.pem.digest.sha1 +++ b/jstests/libs/server_no_subject.pem.digest.sha1 @@ -1 +1 @@ -B3431C4D34FFE25C36ED62A6958EB9364E5C2DC2 \ No newline at end of file +8F5D79EF1032B2D9A3636656CD0127A67F1D395B \ No newline at end of file diff --git a/jstests/libs/server_no_subject.pem.digest.sha256 b/jstests/libs/server_no_subject.pem.digest.sha256 index 0f686d50330c2..a9f8be39a8d79 100644 --- a/jstests/libs/server_no_subject.pem.digest.sha256 +++ b/jstests/libs/server_no_subject.pem.digest.sha256 @@ -1 +1 @@ -E1DEA621F4E8F1E4AF9471F0A5FE063F8341287D73304D6314055F0B61D39D02 \ No newline at end of file +B34A00B1A79DFA5ADF3F03F762ADBB7430F28A08CF4EC0B00E09C644CC164AE2 \ No newline at end of file diff --git a/jstests/libs/server_no_subject_no_SAN.pem b/jstests/libs/server_no_subject_no_SAN.pem index 0c0fa9291ccb3..886024b297ea6 100644 --- a/jstests/libs/server_no_subject_no_SAN.pem +++ b/jstests/libs/server_no_subject_no_SAN.pem @@ -3,53 +3,53 @@ # # Server certificate with empty Subject, and no SANs. -----BEGIN CERTIFICATE----- -MIID2jCCAsKgAwIBAgIEf7gcJDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIID2jCCAsKgAwIBAgIEQYe5jDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjAAMIIBIjAN -BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu9BAloe22zkr+eJj4AtFHHSY4gib -e1CqSp6iabV1kFlxoMCu4usfw5i8z5b5HA9vUguQsrUvcs2p8H4tzkVGwAMPvFXs -7wB3c85GC2ds6rxMJLtLFqNv6PcmvFCVyip56AfyDgAT8WuPSNItIVYHEvKJ5Fkv -VmPY/q65dtbJkI0wTJilsJ37vgL6PeqgZDhTlcwYQR0eQ0L6SXDp77q7/2wAIsyL -yZkA4LAF3nqOogF3aA6ESPlpfAz3PztBcv8z3NINgiMzWhve877Daa6AuTU2Fq+0 -4V7+Gn5PODcQw3ze8qGSD429wa2DzsvqwIXEYxHg+M6QIMV54XicsZnSEQIDAQAB +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQwWhcNMjUwOTEwMTQyODQwWjAAMIIBIjAN +BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2DdmP/51O/Dn4LwkBFyAE2qAebqA +iu4x7cGVMe/WKC1TdYUmp36z7IxEpNI1fCXcWZJoytU/HviJfViFh7KyhKcIGx93 +X4Pd/AC0IGXJ+lR8EGLQHU+UvAwqHOp1ef8OCFS6D25FEzVANgqUd9M2F9FGg0pB +pStkp9M6YkQzgEC/SnRD5R521l7Ui+5I5g+CdMezX5+69Y7Bil5GriKKiM5yzdyV +J5NK/SynoTbupzm5BP4cpWK9NwebCV5femrzaKOt8nK2rNd0hxuM6gfCJV/t4fFR +lUCpmJWJt5v6mkkr5R8cgLykLS8TLNDNhEDxW9KPSUzeD9lEB0v1ABvX2QIDAQAB o4HnMIHkMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUF -BwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUVeu/OMtu+8A5fnY8RipJ62KvK84wgYsG +BwMBBggrBgEFBQcDAjAdBgNVHQ4EFgQUFvZLZ4jB9Kz6YNhf6Cxqh4VGwvAwgYsG A1UdIwSBgzCBgKF4pHYwdDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3Jr MRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MRAwDgYDVQQKDAdNb25nb0RCMQ8wDQYD -VQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0IENBggR71KyTMA0GCSqG -SIb3DQEBCwUAA4IBAQC2cmJknkplrrHBKFfodtZk5988RCv+3kePMd70zKEtEy4F -IUo2qalZ+DkbX1WTVxnqkj5nUxintVCYGDdDubMdcRobrTXz9HP6k6tkt/SY0PXe -7v7/zxuDndI+HpsYcaw6caznD30N9L46BYR0iJ7gsHbYzSNi/3hzf0Wmj5zpbcQl -YPUso/mZFvsSrqo+fOjgTuTpn482oSwictno4rwcGmT2wUFxYQpJsn17n3uN43wx -uGpSuL9LxwYdn0FG0255zhJkp8e8ofHwS5jA6h9pDR1mdfrPiwSGNlFiUBTmbMXj -n2uIDbkoJn42cQUGKxqyrEe6qMHYqET+e5Y5boMm +VQQLDAZLZXJuZWwxFzAVBgNVBAMMDktlcm5lbCBUZXN0IENBggRK3loaMA0GCSqG +SIb3DQEBCwUAA4IBAQAvs0z/RFgx6BgfH4IsskwxCooqJ4zSZSYwUlz1lZA8jT8W +NS6KuXwdVw9UvrrzdOFVZbsyzlVjAvRuTMV2z5KpPeLnfraxWuDov7gsYqa6UCfX +jDX38E+lolZ17ZD5m8BAaNIw1/tvCEokw6+OTK1FEWpEhzucLkF2P4gD3FwAuz26 +ibuG38CSOYn9sn06YLKfNNQVvPb/Aju2/PGT9A2FuLb16qbaDscQbIDbakMt3Kp1 +hBCVQw4EguqSVlxFO+5Qgv7HYd1ZF1rrrua4NqyZ/p21NFFcwmNaZCVRe1zeZ6zf +Rldsmd2l8y4mAL/omMK8Nl0/k46izX/oA4G/NWjh -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC70ECWh7bbOSv5 -4mPgC0UcdJjiCJt7UKpKnqJptXWQWXGgwK7i6x/DmLzPlvkcD29SC5CytS9yzanw -fi3ORUbAAw+8VezvAHdzzkYLZ2zqvEwku0sWo2/o9ya8UJXKKnnoB/IOABPxa49I -0i0hVgcS8onkWS9WY9j+rrl21smQjTBMmKWwnfu+Avo96qBkOFOVzBhBHR5DQvpJ -cOnvurv/bAAizIvJmQDgsAXeeo6iAXdoDoRI+Wl8DPc/O0Fy/zPc0g2CIzNaG97z -vsNproC5NTYWr7ThXv4afk84NxDDfN7yoZIPjb3BrYPOy+rAhcRjEeD4zpAgxXnh -eJyxmdIRAgMBAAECggEACusmdhnZtZDzT4mryMCe+fKFWM6rS1X7MG9cfczA0u2A -P5o0EJketZ5Ri7f5L3puPFTTyq/h3Ei8knCjdRRt8oe7CHXuWk6qox7gz5TprhPY -UWJaESzbOq3zoIsGykQB5k1f8xRqFGTuZYtieEeqDZ3wCkhtChav7M53lcMS4MZa -/P5lv7lNWEnJh59LTXlJ6Pp4zh39OGV9F0MdJzZWhZWcfuEdTn/fYRYiPn/Pggm5 -tliBdNYPnn1Yu6gIQru9EtzTK7f3yeq0oaKBXIk7PiKWZGDQKu5bLGSbFkwvDEPe -FzoT0q5YZ8cXwRlBp5QtBV48jfl7iG1+zTA5fxVjSQKBgQDlcPVxqv1jOeD/qarU -ut24Jfv3sNd2oouRiW8+j/4mkBklCYEbCuIJsuAvC8yQaLZQZFeWrK/H4I2eoii6 -qQ3/dCH/D7LerNgZM1ResjQk5eNghmmhM1QnJa3HTz06h33FZL+m+QLXQ9xwmaHV -6om9wDIeWonmmP17dVqxpEPUFwKBgQDRjbyNPokBxAXnq7X9mbnKuMHlqgUyBD8r -sInn9XlGjqBBVNpG+k02z+kl7qCwSq41qFHAO7LSnsrJj/xb3oC9K68/ddsy0esK -76RhSYM+K1ohbNmXy7Sxab4nw+9mIOyH8ZViPCiX3njRFAnHYAHmAwIIe7qvC8+S -MqqSRXLcFwKBgQC/gUoMJwk5wfyPyGEDEDmx01p9AgjFR6SwxAKrOGqMa2fhZw5w -sOO77qZ5/iIvQocxkJ8e72IEtePezUCrlCFP3/HkP4kvnRr95b79D0bAqXTggzA1 -UW9jAWsL2ZKkgKS0loP07RC29jhPb5wJvapcS8hJV46Uf4vLeUdK0G2g3wKBgEsS -40Ua7UyjpJXuD4Iua8Bz1otoEPptSRBm69EdQXiEqmcddiHNlJIZhGahihH+f1Eb -30XmXVdVqkLyAbAME8Ux/FPY7lHToMhHOHXeN1WzhFPLQl0+jpqszoJCkObezr6Z -TzJlaQmXm2MUMbiq7aMw2q6dXTFPIus2maNTnCW7AoGAFBUq5JBvMntjcs3qNv1R -Cr1JcC0ZtoYoMJGLL8VgM09P01XnixkjELQzYhrEMPrZRkFUQTtakv1J3Z6E6ytC -0oWkr0OhCMdf7MVFh5rXc3MOFK+t17ZgQ8n4/YbmncUmnu0Qy1C0XnLFcdJtyE6t -snYKL3t0oFL9WZd7NiqA3Hk= +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDYN2Y//nU78Ofg +vCQEXIATaoB5uoCK7jHtwZUx79YoLVN1hSanfrPsjESk0jV8JdxZkmjK1T8e+Il9 +WIWHsrKEpwgbH3dfg938ALQgZcn6VHwQYtAdT5S8DCoc6nV5/w4IVLoPbkUTNUA2 +CpR30zYX0UaDSkGlK2Sn0zpiRDOAQL9KdEPlHnbWXtSL7kjmD4J0x7Nfn7r1jsGK +XkauIoqIznLN3JUnk0r9LKehNu6nObkE/hylYr03B5sJXl96avNoo63ycras13SH +G4zqB8IlX+3h8VGVQKmYlYm3m/qaSSvlHxyAvKQtLxMs0M2EQPFb0o9JTN4P2UQH +S/UAG9fZAgMBAAECggEAJaSvhqC0pHTyyeu3kZLRMZAvQgI4cve5dZ1obkNFU8Vg +bGt8KVkj1iEtqkx2x2CIFogo+ndZ//hhZ5NL3C4+al3ySVaXH+gDc+ZjdeiC27Rt +zSSG+zNpv7uWcWZYgZ7oQIcPxbP7DrcMsNvysVHybv8eeakp+KmvYpCh743UMZXC +Rjm7Xhv/ihDcNpO0nn1zmdmUnIh2/dBKlKvpceWEWR4hLjBqrxq/Us4Vs359m/zd +4gwgTv2TvjwnFoHs7BBQVftRFzzNl7ebqPTt0e5B0LA06p2/ch6uLjh255srN0sw +5KaMG5l5zvrBU+1EFN3+PotFlrKjRmE8q8ah6Cq0CQKBgQDw6rgnP2LowJt1+5MB +IQRDilvAQhs0e+usa3Ktff56G0LMt488+CNsfsVVJfo+9iJvEaV7IkRJmhhg1ppQ +QrdBo4stARQ1bvCZSAR6JG55RZo0l8w6lZedrsbs4IEq+I9Yik4wRibKnFBUZtTG +ft/IWfR9tokoo/oWHEG8Ncff4wKBgQDlwMt3srcTWywvry+jFOqIB4SHmOTgzE8d +zfVgyX6ztMsArariigxU1Wzrges3meGItQ/5QjC+SSi5iGfQQtj5j1iDZf8KoYmf +3e1S9zn69Blrr5Zv/BFTD8FOFjHEfi1p/yc9wnhKuaWddl3ZpskmknV3hnbXi/1D ++Q5P+MH+EwKBgEplUUTdcB/MCrXeYEEC/xwUR80RHPGSKu7tp1YoCEOQ04oATT1Y +qye/5hQrTCHRRETkQCPMKyZHbavJ2ZFbaNfeNwZIxQLTJX1QHgHR6kOM2NfQ4IJe +25kX9doEYh7w7uM6onaJ00TSDIRj1OUPHO/zx2piyexA5uOZCjuip/xdAoGAE12Q +yOITdFo2+z308fYNXrHjhdppUFloQcbea+4P8+3FPqVkxfuE1pG2wJMO1Hzv+anp +UsU1fpSEDj9lILTBvSdL+qdaO/cptoPqQRf9lx2EFwOR1paUjz/At2g/gaRxB4Iu +OJJvuTy0rPURLAtW4R1vMUfwNHLkQhp4Dl2VXGkCgYBKttzSEmn1cYblMQs8BOfp +Wp/BsYY0atATvZsBE66F3r9/uCd/FWRjkh5ct4awAZaqCsk8SLDhXmlbfS+mJHZR +QzTYI68Z0MGQvWr6pFAvCKagGDeQM9uTNJMgphRzjbHe869qCHVZMZ/EZGLpjjqj +9JPmrPE/sTYezAlSWKtKrQ== -----END PRIVATE KEY----- diff --git a/jstests/libs/server_no_subject_no_SAN.pem.digest.sha1 b/jstests/libs/server_no_subject_no_SAN.pem.digest.sha1 index bb098491fa902..fc9e8098cb9d7 100644 --- a/jstests/libs/server_no_subject_no_SAN.pem.digest.sha1 +++ b/jstests/libs/server_no_subject_no_SAN.pem.digest.sha1 @@ -1 +1 @@ -22BC5A014E9F2127C33A41C26E2044BBC2B53F99 \ No newline at end of file +ED3B838A6EDE1C3ED7AC7A07EC1376D4672A9437 \ No newline at end of file diff --git a/jstests/libs/server_no_subject_no_SAN.pem.digest.sha256 b/jstests/libs/server_no_subject_no_SAN.pem.digest.sha256 index 67573a0f95299..7e07216593ded 100644 --- a/jstests/libs/server_no_subject_no_SAN.pem.digest.sha256 +++ b/jstests/libs/server_no_subject_no_SAN.pem.digest.sha256 @@ -1 +1 @@ -D5CE39CBFEF9B028FEB2F7395E4BD62E58914EC4C8A2BDB134932F1B3E3B4633 \ No newline at end of file +3F0D4C05487B274071B96350851F7C5F05583EA620744EF245BCA33202FB0283 \ No newline at end of file diff --git a/jstests/libs/server_title_bar.pem b/jstests/libs/server_title_bar.pem index 13d303c4fda5c..c5c5030ec72be 100644 --- a/jstests/libs/server_title_bar.pem +++ b/jstests/libs/server_title_bar.pem @@ -3,51 +3,51 @@ # # Server certificate including the title attribute set to bar. -----BEGIN CERTIFICATE----- -MIIDijCCAnKgAwIBAgIEKf++izANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDijCCAnKgAwIBAgIEMqSE8DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjMwMzIyMDIzOTE2WhcNMjUwNjIzMDIzOTE2WjB6MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQxWhcNMjUwOTEwMTQyODQxWjB6MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UEAwwG c2VydmVyMQwwCgYDVQQMDANiYXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQCmLpAVBx01DYjNf4ElBIZvtYm3JXsOAYa5sYhSXHzxMA7t6xBpzynrXqxt -WDBwVlL+MEEoBi4lP7TBiBD9aZ/6agukeKliv7DuBZUSORIfu8aOsIXEe+U+F35q -WvCNod8SpQrxvjvvLbQsJCD+zdrzzIVOCgYToAlDb0znu8fXxFQ2gOPbJEu60aX1 -ca6hPA8+rmbt5KfPJ+fIPV/onhaiMuUklTX7PlntMhYgGYANFAP6fVw2OIgeGXjn -67z+ZD14EQ0reSfzqrKEbvqzrr8MJJ2wJYoYrT/Atu1JsLeudrb8ilmx26jHKa80 -OG3rxObOsg5z/0K2GsWR8AGXE0KzAgMBAAGjHjAcMBoGA1UdEQQTMBGCCWxvY2Fs -aG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAFWe3TFS1T/GwfM3jE2g7MJQZ -30+p9HSfiC9kA3KPgyQE6lna17gyAS+YNaAQjC3pT1o3Dbanjs2Y2Ho/6JWAoeoj -puzrkgCH9IHIKhR/+JX/XwX+yY6txNzwgRvkdVpQkHZ4dp4LBb9sNQ+RA5T3rAlR -5g9/LwJbBGP0KSG2nxrsDEa3uYtm6HaqyjyNtCe6Hy9ez4qFq0fmKxnu2DnGgRwZ -O8hxW4rc/c5JRp5q1EuocpEHZTqZ1SigtdA1nBe6cA60gEOqOFfA7DrN4cM5vyk3 -fkxPGQ+uP/6tTSF8DHIL4lE9X8clKMYc8UU47SYCzN6NIKLBQcaQHTCb5V5ZwA== +AoIBAQDw8WgpAqCTkA2o9SESxrrP9rn49fNarF9z8P6CAxw8nwwaItJKj1ovY/vE +Mes9pmnduVGq4/kG03DHtFCHBsqfFttcMAFD4NbhhepZQMAO5Jj82j7CqtFYqFKz +5eRs4BooTKX6kyOEmhZJN6G0LIXFG1UgsGfL9SqQpC7KU/Bi8aUgfEYWTsaBtHVV +4KU8vtz+0jNeyDF0Llr1OqiF5nwPFvfGNl5ZLy8oewfcPdaISSCLwesYaFrODrQQ +nQmUenGswNZQSscDBbU6MuLTAjfnlmijlnRRA5CIBN1tNHujVAWyHKOIo7JHfqtf +80ePNcgulwrYc1jy1ein1wLjDIANAgMBAAGjHjAcMBoGA1UdEQQTMBGCCWxvY2Fs +aG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAS3MosJziaHA2BT30FzQvCQMj +BUajLeL4K/8gNIEesrP7qA1w9FXhZgw5zq6tmwaNsZ/M1efYh8KtmnddxzE6DbNL +lfWZrX51sVKR1YSXhvgN021O+pBFSfuJw1QY38XhOHxqydfJI/qfdrZpherqzQvT +3P9B3IfIAzp+3b58kqEOh3LCCabcsr59SgQQWoaKZo7QaVH/uP3p+TCpRS3Vq8QO +V5uVTStgl+mjrsrqA0nmgZuij8cveARo/7W7oHcuSoS9FZ+yXz/ZIpFV4OkJ1MuP +RAwJQRAO5jEEucpadJzD+bQUKBxxL3uBdE2DqHbbqn6d15vxbowu7zcuNzfCYg== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCmLpAVBx01DYjN -f4ElBIZvtYm3JXsOAYa5sYhSXHzxMA7t6xBpzynrXqxtWDBwVlL+MEEoBi4lP7TB -iBD9aZ/6agukeKliv7DuBZUSORIfu8aOsIXEe+U+F35qWvCNod8SpQrxvjvvLbQs -JCD+zdrzzIVOCgYToAlDb0znu8fXxFQ2gOPbJEu60aX1ca6hPA8+rmbt5KfPJ+fI -PV/onhaiMuUklTX7PlntMhYgGYANFAP6fVw2OIgeGXjn67z+ZD14EQ0reSfzqrKE -bvqzrr8MJJ2wJYoYrT/Atu1JsLeudrb8ilmx26jHKa80OG3rxObOsg5z/0K2GsWR -8AGXE0KzAgMBAAECggEAXNBmwofNpULg5D1RaNZlK2EOAI9bchAiKfZgt/dWBPMd -c341FZORyxZ+YTe/Hg7onXVf/rWs8jrpfqm7K33hzt+JjxuhJzj+3YGap6neWIDs -vecTXxD/kTVX8pjF/6SnzWcGfMwN92DkXz7yer2Ii1/wGAz7JdzdL5+rKUY0sGnd -EVs+f3y46hJ36ejD/DM0Lj9zVMzbOlA/Kiuq+uHGrH3DZBiL5qvzecn+3HM9o8kh -RzmtdllpsXq+P+MxFoea5OuIbq2vuNh4Cpg5PxEMbXhtPT0XwDf2NtN85CHL2glv -zI0CqaJ/kNLLLorNrbtekXuLllZPJezPxefyXcby0QKBgQDZi0sOKIAmVXc0whdj -MmsWpgtjs7S1NCgk09DiObSum/OWDMOYarFGdR/tDl30mzvpCbkk7QhNZcOZlNGX -szfi6jBm7ejbDaEexJ2U7gU3GaeZ13AqIDukAV2ArMwR40S25JyZ+jZvgsiUXhjv -nRFXFXaPMejYKPVX9CwLeXBvawKBgQDDju54KZYCKzPrZ0j+CbXTKVEC52Ch6G7S -g3AAOMHoVXGhn26jD3Uietnq3KI7oSHHeNkqQYYdbFCkjMkF25Rp9xlFILoLZ0VA -G6krXQ73z+BRPK9TPwzCaVxSXf+mxF4AIrGZbYsSZj+htm74opRk6+q3YrGzI9o1 -0ga84tez2QKBgQCCwT1wmhlEcTRAKrTh86j4KP9JgvcHvvyt/f5cKzEVjjjfpHZg -AyjgX3+7/VmtryxYSnbU4f+Ofa8Ofatokdjyc655/19pYozIMIdCv7m0v5/EUQBi -4ZLXZdasg6/4xHBFua0Cw6i6Z5Jl0xUL2I1WmVj0gpwgaKXmoqVilDBnVwKBgQCv -NfqXErtiSg8ElM+jPFP6U4RP07qSlcvlNPo+WJvza8qZgl0AH7NVJzjj4rZAMsgv -DimUYIynBArkw3bAltHMdyXe98l4uhgjriTNw1zLzyYt4u866Lyn2vpqmemaI0oN -WQhCbREzdQUCAJBAmHnYSj9L+1M3K6IwonKC/cNBUQKBgA+Jgzck+Q+mD/+ZvC1R -UWQmXG3IIMrpLRb+7eAanEDZX97sprY1E+Z05TbUuseR6IheED46JoviPtRYFRHV -ZBYcuhOd/BdDF3u38U08EAQkqaZnBzHM+780IphFRr3o/wH2JSwyeilSg8q1/XxO -VnZNKtdpmc5+EKlg3UhTb+T8 +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDw8WgpAqCTkA2o +9SESxrrP9rn49fNarF9z8P6CAxw8nwwaItJKj1ovY/vEMes9pmnduVGq4/kG03DH +tFCHBsqfFttcMAFD4NbhhepZQMAO5Jj82j7CqtFYqFKz5eRs4BooTKX6kyOEmhZJ +N6G0LIXFG1UgsGfL9SqQpC7KU/Bi8aUgfEYWTsaBtHVV4KU8vtz+0jNeyDF0Llr1 +OqiF5nwPFvfGNl5ZLy8oewfcPdaISSCLwesYaFrODrQQnQmUenGswNZQSscDBbU6 +MuLTAjfnlmijlnRRA5CIBN1tNHujVAWyHKOIo7JHfqtf80ePNcgulwrYc1jy1ein +1wLjDIANAgMBAAECggEAUo0DePmTdrtmUrsZx8sa+mG4OhpadHiWg6zQJk9Wf1am +2NWRY0ZWSeJhwkiwJoE7yGHUH07YZYRQIbwf0wN6rKMyKRWxeUYxmTc+obHTm8aq +vAcydZP9Afk5zLU5XCw0Chaoz39WLfp1JETRPF/8vRmuQvLtvS2UldYKyfuUYvFp +xh7dlAjCCnoNPTHmg+0EM8SDFinPdsPi0BJVHA6s94GYC1X/HpXwwk0bTXnV6u92 +KEaSdVeOV3K6DYd5ONzI2IWx6UPPe3GJEgvtv064TWcwBA151+sf8F0E1c3HEoN0 +gTp7k/GbuSBFV0syTskBTu9sluwroi0JNBOtqQKtwQKBgQD7EtUV3PITjO24s6Kb +32Ngj6DDbTeUjo0dYmx6CoFAQxbMclEnwd+roZBya7ZXcRsa4bA1JCtRFi+7e+uq +WVnvRD/KqIfPNGwOKYmsoiuryZh1u70bAWHwA4XS1oB3RNv9qxV7HZ1n2Ks2c9nc +3jpFRsnLIiX8DGJtJpfdh0Q6KQKBgQD1q7AJ/1OlTm7zpCUMd76sj5BmCi5W+p2B +epORB0/hCsqcsIRvhX1x4BjgbeTg1a0b5kIz3l74oMKcXa02o6buecJYlDNUHmVu +BNC1F/SguppmtCmLmMUKWSlEyxKi1z73/V8S6ScVhgQQzYnKWL7ynS9H5Oazf7g8 +8iFS3SKbRQKBgBRYt1n5OMjqz8z7Cb91//iwSwfrTujEEJ2D/2R4e3b4uN1Cml8N +cBHZmvoazoERkm/AQGZeEwCUOrQH29ZHPWmNb1n+BFt/VQ5kwVpzHfTi9m78Etwg +ZY5syqiGZ4zC0M0i0y/R+5KR1XYpN0uV2sUIcov6rXdajZNo4D+8mYPZAoGAbkK+ +Yd5MxnaraYKUdkIQPBa7MnEDV1pn8sL3FDJ58YEMasL2za7WHJP91ky8WwTMCyeP +MwRtD53nepK4uTs+Iu5XAbaSSwDReU2D9qNn0fOudMYLl3HbjcdytEOIe7mrs1Xu +o+gs+IYLsRbu7vhhyzYtEFF2Eb4TDJeHyU8ixxkCgYAROhTkyV74n2K98JZ8GDNx +BesAB2V8JblnB8d55sgl5yPDhJBYXegEACJ4WS+BohEnOoepBVfBgkqxPAS9xM6l +vkfsk2/Gq2V8Po0MdU1SHktXLfD+GR1MkD9oTc6rYOkwDH4QBztG+ADyfwPH/siq +q0BkYC1GSOAd/1+2sBi9UA== -----END PRIVATE KEY----- diff --git a/jstests/libs/server_title_bar.pem.digest.sha1 b/jstests/libs/server_title_bar.pem.digest.sha1 index 18a85e869e48b..85abc3bec81c2 100644 --- a/jstests/libs/server_title_bar.pem.digest.sha1 +++ b/jstests/libs/server_title_bar.pem.digest.sha1 @@ -1 +1 @@ -31E9FDDBBAC424AA6377FF410698241361CCDC3F \ No newline at end of file +3051C6BF8FE8AE6D1FA2FE178324F6F886ECB1D0 \ No newline at end of file diff --git a/jstests/libs/server_title_bar.pem.digest.sha256 b/jstests/libs/server_title_bar.pem.digest.sha256 index 8c5d7afe58cf2..9721586063045 100644 --- a/jstests/libs/server_title_bar.pem.digest.sha256 +++ b/jstests/libs/server_title_bar.pem.digest.sha256 @@ -1 +1 @@ -2F1C21FFC8FD92864E6E26AC4052087AE9D34133EA1507D22155170F72903237 \ No newline at end of file +D6BB414C9AF5821D6D5CC046470232C56F03880AAEFCC7E249A20CBBC8D0FF8F \ No newline at end of file diff --git a/jstests/libs/server_title_bar_no_o_ou_dc.pem b/jstests/libs/server_title_bar_no_o_ou_dc.pem new file mode 100644 index 0000000000000..41ce7b42c9c34 --- /dev/null +++ b/jstests/libs/server_title_bar_no_o_ou_dc.pem @@ -0,0 +1,53 @@ +# Autogenerated file, do not edit. +# Generate using jstests/ssl/x509/mkcert.py --config jstests/ssl/x509/certs.yml server_title_bar_no_o_ou_dc.pem +# +# Server certificate including the title attribute set to bar without O, OU, or DC. +-----BEGIN CERTIFICATE----- +MIIDZzCCAk+gAwIBAgIELGze1TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO +BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQyWhcNMjUwOTEwMTQyODQyWjBXMQ8wDQYD +VQQDDAZzZXJ2ZXIxDDAKBgNVBAwMA2JhcjELMAkGA1UEBhMCVVMxETAPBgNVBAgM +CE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAxNWomUaGmbq8tdEtG9QbpnXlo+RO12LclNrQIAbL ++jqg176WXpQN+OZ4ZzpEY5oYN5RuvjSVRExYOOUM8BTYDNAsY7Txb/X5IC9wWCZj +FcNxWHFZJcaHcU3Nr5vhqd7emkOdQBnlrfeb4CwKeJ4wUt+SjWJ6VSFjtZ3FVjOO +eVAl5EC5uaWMnraDGaxmO9DkTt4tDBAXDdmkFMYPnXK7xi2Z2WtVe+sKGxnlCUWk +Agtn2SN8hqgWDgm3bJtvQjya6ty6c511A6FgJc9CfqbBce7e5OTLOW9MGz46ysRa +h/EZZEG4u4ofQp2XRhUibuUCJuFTE/5+nioUqJM/EnWyMQIDAQABox4wHDAaBgNV +HREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBACwAwY9q +GFkZW43DhMigmv6nSDBM/fLvvHyvPIO6eZCkoFv8tpHnWEe3th+ExvDeg0eLA+HM +WF1jZKWx2cwJhGi+/ATyO6J10+D0wTWcKU3CN6z0WLpY3jM8ELalBjQLvUl09iAv +cLMLZVM/xebCN3QKbO5/aVxadeaDtj4FVLDstak7fjNMP2IVjXyobEHqnk7oQLYY +RSpEGyUCKQWOFFXOpb1BR3YY8Rc5xV/LzBe2vwoBnL/uxsqaE+ObzrzkyrjIWSzt +CJ9evJ6x6Kw7zlgDDwz9XcEcEPDjOYjTzPnLekUFAGE8LJYXYjLh6HKpOv1WIyBN +d+i2kGvKYxayuME= +-----END CERTIFICATE----- +-----BEGIN PRIVATE KEY----- +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDE1aiZRoaZury1 +0S0b1BumdeWj5E7XYtyU2tAgBsv6OqDXvpZelA345nhnOkRjmhg3lG6+NJVETFg4 +5QzwFNgM0CxjtPFv9fkgL3BYJmMVw3FYcVklxodxTc2vm+Gp3t6aQ51AGeWt95vg +LAp4njBS35KNYnpVIWO1ncVWM455UCXkQLm5pYyetoMZrGY70ORO3i0MEBcN2aQU +xg+dcrvGLZnZa1V76wobGeUJRaQCC2fZI3yGqBYOCbdsm29CPJrq3LpznXUDoWAl +z0J+psFx7t7k5Ms5b0wbPjrKxFqH8RlkQbi7ih9CnZdGFSJu5QIm4VMT/n6eKhSo +kz8SdbIxAgMBAAECggEBAJbjbsZo8P/hzY3XVywREk0t7acQenvUNmIJxyf17eHZ +lbktSdogxwE5s6z2Vry/wLbCm45FgvODTtH2jE9yuxg/cPfSGo8IUTyiAQ4iBy0E +0NvKsFsr8GIkEXwAFCKDTcOV93LPJ2mP+bcEK9bA9SxiZNbrWfnuiaAM3Nyy0xIc +zWI3nDwB1U5cfaedJz1gPiBqZvXH6la0jSFdXVeNa6HapV20o9ABgFuwN8JrMt9w +LyD+SxM5Visnt8LfXq71t+op2suriUz5/lnuz9HEs19f6JCIpjKzUMqxDsuaEpyt +aofOqy+fuN0fGtnJyHVliiFUvbr4+IpljpyZ/0mt7p0CgYEA5UoEIRQxIeZZATpl +f4GgYWU7OF/34JRHSDjRgJf9n0FqHMlxbmwnRzWW9P2cUif2+1oESj49kBjm4Yq/ +7qryU6Wk8qKb9dpazDxcGfSi6wfnm814h0NrQzfrtxUKqUeIzAX4MGyty/Z4qATt +EoNQeBLk09bBg/BguS52wU4RcW8CgYEA28PEJ0KQMlyDIipPr9mBPoKudbVRfgej +iC/GryEfd84/GPY5B6UZ4YPpZLuHDNAXj1G85/2pMpsKZGD/hpJTL6xNzdoisziG +hCgUXYkzvv6E4VMBUxfg+BZtYu0dnJxENW7U7xnYlnleRqfQr5n6hqY8rSRonZ2B +ZtbnqlHsBl8CgYAjkSWu0+kDLy4jSKGx87OjKAbVqd1TkuyzwUyOPAV1jLofGQhl +bpVxMFjoS+bRv61KNB5yPdFi5tn5poI7TX0liI5brOmwa1ymTb6jsBUNjD5bJrZJ +lSWwWZ5Q2gy7UYNg0pQ7PsB+4Q66UXlM4925ooSob6m5A7KdRx3YwnlmNQKBgQCf +J2mZHSWcs07soUPQkG5/PNjnugZohVOzPxZeolhNxXoAs+CdXr4dihCuuPiXGOTX +EFUElbnBQnMGOxES/klsTxaRrBZRQPB6KSSGkOR+v4iwPhLJgJOWV3ekmyBE2Q60 +u61N86PdZcwW01XYtmSuuxlaMvHoo7ahKaXgvS93dQKBgHKYStfXREIo54ONTgmL +EHZbFzPCKEzXot2lLaee4L71SwyvawxvRzsC8QgMuG33hbqzechierdfODlrPXMQ +nWbjByfapIYg7EJIk3xSo3xn9NEJvFgYVKO1BEL5m0VRCf7n3HiTZ3+PT7OZtpJZ +GmQ1aBdFgcyau+K6GqCmX4ke +-----END PRIVATE KEY----- diff --git a/jstests/libs/server_title_bar_no_o_ou_dc.pem.digest.sha1 b/jstests/libs/server_title_bar_no_o_ou_dc.pem.digest.sha1 new file mode 100644 index 0000000000000..b9d351aaee16d --- /dev/null +++ b/jstests/libs/server_title_bar_no_o_ou_dc.pem.digest.sha1 @@ -0,0 +1 @@ +AA09D3F3FACEDFFA86CB4216B4237F0A4B15D763 \ No newline at end of file diff --git a/jstests/libs/server_title_bar_no_o_ou_dc.pem.digest.sha256 b/jstests/libs/server_title_bar_no_o_ou_dc.pem.digest.sha256 new file mode 100644 index 0000000000000..93c14dab77628 --- /dev/null +++ b/jstests/libs/server_title_bar_no_o_ou_dc.pem.digest.sha256 @@ -0,0 +1 @@ +3646F2B1B24E8AEF7C0C305DD57A384A3B5A8E43DE705557FAE0682092E2F381 \ No newline at end of file diff --git a/jstests/libs/server_title_foo.pem b/jstests/libs/server_title_foo.pem index af938c1c7a8be..9a953d666c23c 100644 --- a/jstests/libs/server_title_foo.pem +++ b/jstests/libs/server_title_foo.pem @@ -3,51 +3,51 @@ # # Server certificate including the title attribute set to foo. -----BEGIN CERTIFICATE----- -MIIDijCCAnKgAwIBAgIELeq5MTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDijCCAnKgAwIBAgIEMDdisTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjMwMzIyMDIzODEyWhcNMjUwNjIzMDIzODEyWjB6MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQxWhcNMjUwOTEwMTQyODQxWjB6MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UEAwwG c2VydmVyMQwwCgYDVQQMDANmb28wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK -AoIBAQC4Z+4f6WnJJzMqxxkShigpyObbCx0EElyzuSfECotm523C2jpVgplVh5Pn -eTL6eIUwwNN2d4XHR0VAvvU+tBS+MB42NrZt6MSh+tWCm/HN21/4zg48hdedGFwH -wDLTN94kRiaChkZ5aNzVqtLa+PtKX6UEYLvIHt+I7Y95hSvc1t1MSaobaEvLRjbU -fzihRGYYOXeLB0Yw3zurWi7wJ1Z9D8bIYikzgMkn1sPBPTmYHiqQIlxeDmQ5xmNJ -uRSjK6t16r8SVeNCTS85/pmWuy7hN7YnZXsdGXhP88sZxZOqdjEpsJsj5zGN0Ki0 -KC9NYasht7tZ8dMGmuPjsvo0dwyzAgMBAAGjHjAcMBoGA1UdEQQTMBGCCWxvY2Fs -aG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEALbGw66c/ZoiuKT2u2i4dTjpV -L9xceahK9DWGV3syddTPkloER7vpyZzES6TrkC0Kw/3OMnSDaIy1hR3Gp9zCWhDX -UQLrqh+rnYMEPucG6oWxjPUovfmkWU0zdsTuiXmdJ0eWW/OLe1NPmt6WHlCG2cUl -BRJR23v2KfRfCL9YaOyLynsY49TXjEELyKD67csA3M6sYKbJ/pseM1TwDqB0Odyz -CSKDGQx98UsWGS2skuuhPgic8pgJITdp/WfUuI6JyvjpWRuxrHZykJSo38WhS6RG -rTyj35fDoapyFiJscx0dVrFkTrvptTlLRRxeIDzbZ40wR+EadnJ2/5DB0Nbu0A== +AoIBAQCue7oeuWmdipFRnsOjXFYpKXTFV+arFNHKynfGdqLyU94ZiG7K7J4SgN+L +S3CKvEp5lC8/aztLj+EtPXVGj5lC+yVmPPQ0PYxr0G9ufIdZeqo3sLUVOMBovuOe +fqdc4BOBoKs7zBh7TLr7DBPp17uROTkla8VB28p8jFeKcWI+ImjCLGy71gqEXzV1 ++3lMvmSlm8rQq1hGJhOeCxPmHo5o0GlnZUhh4b+ts0clTQU2HZ1EEktFgofz0lT/ +q+2hOdIpYrecUQ97iLFzF/hMrIdSuzia4mDuFAvFXj723N3J3xwoXaGVY46Qe5EE +oMR+Q+PzjyN4lq3eqsJUtKAgsylHAgMBAAGjHjAcMBoGA1UdEQQTMBGCCWxvY2Fs +aG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAUox64GeSoepqlF19g4qfkVu3 ++ld6tsInEMzPq+gqz4PsjL8e4C7z/JzQQnrGfk2sD3rUUWOiMK3V+Nx+ggCrHPth +ctRcZXCmD9TxW+zPkji3TLLq4f0cVV6OkDS0eDXIWSi0Fwb+PyhfMcsR7WMTs5G4 +uQeju/3EYx/IVU5WWrMJLcOS6dUV9A9HnVw7y3yYAbO354t9i8KLBFtgCb6O6r9A +lqGAAEujfpzIRNnvq9TbxCVVlYia01BkJNwj1EPP6bYEvQUnYaokpTqNWYNOkt3D +XQj6MEQtXhxAZkaHcEFK/LFniY56KB9RDoXfsMFZRXWc/n+SGki8eMLO5YWEMQ== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC4Z+4f6WnJJzMq -xxkShigpyObbCx0EElyzuSfECotm523C2jpVgplVh5PneTL6eIUwwNN2d4XHR0VA -vvU+tBS+MB42NrZt6MSh+tWCm/HN21/4zg48hdedGFwHwDLTN94kRiaChkZ5aNzV -qtLa+PtKX6UEYLvIHt+I7Y95hSvc1t1MSaobaEvLRjbUfzihRGYYOXeLB0Yw3zur -Wi7wJ1Z9D8bIYikzgMkn1sPBPTmYHiqQIlxeDmQ5xmNJuRSjK6t16r8SVeNCTS85 -/pmWuy7hN7YnZXsdGXhP88sZxZOqdjEpsJsj5zGN0Ki0KC9NYasht7tZ8dMGmuPj -svo0dwyzAgMBAAECggEAGxL3PSwx4dylgIRWxAd6Yhgi/Mn26qAfiCuJERlTOjqE -PPV5VxCjnpEXQAblWyzSsUO+SEhoFcf6/PSMYTZjTUEXTnJd+mkQZY/ERTbMG6M3 -xfnK0Uv9Sg1HhcPMMoKjVMQP5137ftvMgHpiFtAzZMoCGlBxgYI2442tYPQSaovJ -DqjPwz+Mn4PEskR0/xamhW+/dStbl2xaG9URPD5Mf2ZhWl8milMJC02Y/Ytm7igq -AENT06qMcaBtTQZrQubCmWHN+m/cHdGHLlsg5UN4SwsY5OaNrWqMFqrv8ouZEyC0 -4n5+X0kcU9FtXN6LSlLrlANdnAKKY9Sz5NNynlASYQKBgQD1jC33t4GYzqas+hpa -WGZfLu8aFdcyN5d2sXyMcPKuzULkytyn2GvfGkzcr4ngrewwM4EpotBxFRWN4CUl -cABzbehwFi0FM9PE/Ww25TALkBbtGmWxSmNuK9uIMwyCClas4T4fV8BS3pCsBC/p -Jp1QibOvRxtwTr8NRtWEPI9puwKBgQDAQXf0Q6xiBiM7Jwp7UtPEgEwY5aetTuYj -lLuasXMbAPpAhuZhBGlgpu4Xg6s/HnkQbuYoYcidoMGtTjwTIrnWtDC507kpdzQp -DkUJPBijiu6OMvQepJIilf45fyHnyDJ1q881PrrOzYikdHth9Ti61BD80YsHFAuF -51NJHhedaQKBgFTJsN3G6eNACGHWgt8Lg13+sOWLASH//DcqFl3QapxdmGm0evki -TC1fwYa6vptssw/52PHtnJhPtX+mFG2W/TDelNKPdcBEIy30bDeQcESt3pzE7rSH -gUn7rvSa3AjTVRahOHhOLsTuwXoEgB68DLpQslEl9p2TM3l8KiJdXxAdAoGAebT4 -SxnMNwHLq9a7O/bjOLI/ekNoMr6P0laFrRhI1f94bQD5NtGkJBuI/jnMXtjbqxuJ -eGbuqVrrQNsWDMce/lxzvC/cN/POgW8XJRF2R5HcEwkOoZdtK5foqF1jCWgjCXsK -YZqkh+Z1aiaTNSAYGa3GU0YTzRdTdCFNCCyUpoECgYAXQe6DhbcgRMv23eqml2Bk -dtcK8q8SJJ0t/onWItcI6CDo91nZSTkQ0A5aGjhUgTrBSnJFmbYjeyuOfqcVxDc6 -+I2Yn7ybBjpmZFQciOd0T2a79aZWKicjCM1PjJTQN4ghIt6/f/HtmcHoH4OBXfIp -zHWq9QZ48v0gu+1NZTx8xg== +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCue7oeuWmdipFR +nsOjXFYpKXTFV+arFNHKynfGdqLyU94ZiG7K7J4SgN+LS3CKvEp5lC8/aztLj+Et +PXVGj5lC+yVmPPQ0PYxr0G9ufIdZeqo3sLUVOMBovuOefqdc4BOBoKs7zBh7TLr7 +DBPp17uROTkla8VB28p8jFeKcWI+ImjCLGy71gqEXzV1+3lMvmSlm8rQq1hGJhOe +CxPmHo5o0GlnZUhh4b+ts0clTQU2HZ1EEktFgofz0lT/q+2hOdIpYrecUQ97iLFz +F/hMrIdSuzia4mDuFAvFXj723N3J3xwoXaGVY46Qe5EEoMR+Q+PzjyN4lq3eqsJU +tKAgsylHAgMBAAECggEAePY5r7cmqxs3Z3hDdOs8qEifNXic55oFXytCIgp5KLpP +zS+yb2fepkFFuJHSMbwbfXHn13WFo/rZRr3GoeZHgBIQeXJkchsBUHUt61DVnIjX +nMgaod24NmWv818ms9J8c29phokVp9UoA4b5/zGaTTF4lqLCG8g99l5tCgNU8nFP +Cfx/xWuRbxhcLYnrfrctwvG5yS1PcjIkaJb1fAlbKQVMBnJ162tu3RE8zGmR0HJH +08Trs9eS61ZrGOLJObqPLkwuy8VSsyRDPaSJj8EQ+A2uCCbjbITySfJtOvtW7rXz +xgxHBbeFtyFURa7lkcfn52MzpXjHCuqeHYH+ZJWzgQKBgQDdPKyW9B1lma8aGyhw +wce/Xr59cSYA05osBsYiMu93+zxPEspl+fAZg8yjbeMl4I62CANUtwIJCeHw9HeY +hb4Om9yVVFFp/FlWkPlqdXvAUKGMhO0N+vq6QDN07Q1BVUUUHJZ+gjhrnNvuY2SU +Fj7VB9L9k/YCSj0QgYWQ4UaIIQKBgQDJ5l7/kQCUzznulU91/BQt4UyL7x3jRvLd +8L/d82yfEy/Bx4ZFar4I9+B54FRepB1SDdb1bOmwPqnvGNc3Hg3UKJzhSuqpKBpv ++sQfwy8yFLDNMgB7mb6B7VtuZsFYT5yNj+4lA2Cfcuy1PZwX4YatW2Toe+bLft9k +2zfT1XXkZwKBgDwrZEUp31Til1ziRf1kto61ldlIDK3s3uFadkiW0cov6hcXZTSW +5VYLInzQRhbnG+kmBMHlhAgxR+HgmyZAOZB/k16JsBrDJwkEJNFvYljLlSRCgrZq +rAY7r8L9Nb2vEzqFC+kcQXwDDU2oepJL+oq1tgyBUUcOKc1zbIAaxLQBAoGBAKTZ +9AFKXSYkGdJDmbDlVXist/qeEFJN0OoEtDS+mJc+bEUV6/1sDaR2+JOq5lisOcCQ +yk50Uk70q34tUzSO1o2/Z3DQ4c+ijguWvmKM1VFX8ZBp3lkNjK67pmb7gazgvBwe +RD12h4NJrBrEJlqda4DK2ha1bBoGCtNn5yqQ5YTLAoGAK/tGX8AXlStM8lnezsTM +wRj3kK05ptTOnDWHR4nKJLYXczSo/PJug9/Qjbox+XWzfTlzSThqVkmuN50iNavf +8N2EzLTD8fhXNBTzzCg5L4oLwdhlGX5mrwXRwTYG7cCFawWeMU3h6HrjRpqD7VE6 +kbmNTsIi1wxt0ymamDF5yuc= -----END PRIVATE KEY----- diff --git a/jstests/libs/server_title_foo.pem.digest.sha1 b/jstests/libs/server_title_foo.pem.digest.sha1 index 1c08ffd82a130..77dc1d452d666 100644 --- a/jstests/libs/server_title_foo.pem.digest.sha1 +++ b/jstests/libs/server_title_foo.pem.digest.sha1 @@ -1 +1 @@ -AE9780F50789327BB1F6AD5343490CC2FDF559FD \ No newline at end of file +E5836708C648A0532CC95AC1C55A01E010243EAF \ No newline at end of file diff --git a/jstests/libs/server_title_foo.pem.digest.sha256 b/jstests/libs/server_title_foo.pem.digest.sha256 index 1b7bfd18f16f8..267623019f628 100644 --- a/jstests/libs/server_title_foo.pem.digest.sha256 +++ b/jstests/libs/server_title_foo.pem.digest.sha256 @@ -1 +1 @@ -C2D4EE231C2704118F01DCD559987464EFDE8939873595386A8772B6274C70A1 \ No newline at end of file +CAE1806485306B46661711B44D7AEFD232B474C1AB441B33F1AFB60B2912850E \ No newline at end of file diff --git a/jstests/libs/server_title_foo_no_o_ou_dc.pem b/jstests/libs/server_title_foo_no_o_ou_dc.pem index 87e5d8964edac..b3bcd67c4efb6 100644 --- a/jstests/libs/server_title_foo_no_o_ou_dc.pem +++ b/jstests/libs/server_title_foo_no_o_ou_dc.pem @@ -3,51 +3,51 @@ # # Server certificate including the title attribute set to foo without O, OU, or DC. -----BEGIN CERTIFICATE----- -MIIDZzCCAk+gAwIBAgIEPUtD4TANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDZzCCAk+gAwIBAgIEQFQqZDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjMwMzIyMDIzODQxWhcNMjUwNjIzMDIzODQxWjBXMQ8wDQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQyWhcNMjUwOTEwMTQyODQyWjBXMQ8wDQYD VQQDDAZzZXJ2ZXIxDDAKBgNVBAwMA2ZvbzELMAkGA1UEBhMCVVMxETAPBgNVBAgM CE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MIIBIjANBgkqhkiG9w0B -AQEFAAOCAQ8AMIIBCgKCAQEA/z49ZcRbY9ZWekDxYgPqwlNffxfXWVtKibaO/FtY -vI2Ey6ngyqTGCvZrJ1MWvKxaKoILrPIhjxGcREW/FQNb2TG/6kpnhbUeoYe0zy1w -/hxZv9mkSe3xmkxw0V4RmzmKfaxeGcsq5S8eNJ9SVX1CRLgyindO+bwkikzMdL7f -5VlVx2ry3t1Jnn1ncRAGBV+PgtoVqQgK5IYFONVcOsoaxikSzr5q6WW1NwrUNhOs -F/76LoTFvu14o/QmzxiXsSMLmdo9f/Ejimf1THOMEahmD2KFUnx0F3EzcY6dholF -mE1pEmytTN9LlnMK/xt2CsuOtjn7NHznX17GBSuF7LzX3QIDAQABox4wHDAaBgNV -HREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBAHPVIMCg -kmfyKl7Ci5uJS1OCGAdjpaqoSlC2jz7xWOe8P2Hz9gluQNu38EyG9EHM4G1jktCV -T3KyfaEcQw/4bgz1QlMEio1xPSEsqwMswAzb4cDPbxI3MEyLkx4mIcYZXG614rlm -ZX6A4UzZ7dIXRPoETnEy6CUDiOBVmlrGfVqv6lqtx63yUSbDKwoF8HVpJxpSjgQt -qY6AWKHqohmUImwludPlmjxJLh49yJyOMvXHRPr+BMPM/UYKVJ9mx4YmLJxMZMz4 -GSzPKqiJRNczvT1T1qdInUfYa5DtTxYS7NK2ZfvDqtjllTszoUp18shg3P5+tGJa -2zKmcCXWNlcqkqY= +AQEFAAOCAQ8AMIIBCgKCAQEA3EfHW/98lze4NL9FsW/3S9ZDGKo/BV+PGJ8sFQ3b +dBmynH76ZT2YUY7NB1T/kbyBFoyq975VDS9q5PVLgbA7JefzWBKAU5ytxYJsq5B1 +p8UadrljjbjJ5m8DPQeVdz+i/uv22GJ//PGUTj2oih3NMW7bbRI+OQEhzTmuG18o +oZVJImHJOt49mNCvt/ShZj7EQMWqTaOijHevb7YkraH5EAX/sA3Af0QZPrrLCBu1 +wjTDiN0cvdCdwv6SnduEWqDz3pf9pjAckGIV186PtlvOAmYu+7v/WNNAWqZC4EtR +wopr6PCzSE17uv8blJX494rHmzDDGm/UJH+yyEPyf3HKeQIDAQABox4wHDAaBgNV +HREEEzARgglsb2NhbGhvc3SHBH8AAAEwDQYJKoZIhvcNAQELBQADggEBAGusSCgq +nogJ2c9qYqSk3rxSTxkPhMXMxSZVJxu6KT/tS4bmxQJ0F/RnHuGNf76DwFjs46NL +dBes3AHJ+n/LJtaAc6/gsPHJYyM+mUhfZB+Uwzf+YR/9Lg7WXju0wXAWK6qCOz8L +MoalrKw7hkLCaIryrdC+YLx0WE3/97ZW/Mi6QUNyv0iCcgc9rPvRlCzxh2riARWz +LnRO1UbN95vEvZuTrAflV0n5Dox2MqzUfY0mZhCAHIHnP8n1+LJzI4NE2xbZMz63 +R+2QHzdDDOj8+7Vj5TmBa+LJ6VXPemdvXpD8wcQSl4gwXBVFNZhCmxr7x7G4GwIj +mcwn0Qmq1NtYXik= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQD/Pj1lxFtj1lZ6 -QPFiA+rCU19/F9dZW0qJto78W1i8jYTLqeDKpMYK9msnUxa8rFoqggus8iGPEZxE -Rb8VA1vZMb/qSmeFtR6hh7TPLXD+HFm/2aRJ7fGaTHDRXhGbOYp9rF4ZyyrlLx40 -n1JVfUJEuDKKd075vCSKTMx0vt/lWVXHavLe3UmefWdxEAYFX4+C2hWpCArkhgU4 -1Vw6yhrGKRLOvmrpZbU3CtQ2E6wX/vouhMW+7Xij9CbPGJexIwuZ2j1/8SOKZ/VM -c4wRqGYPYoVSfHQXcTNxjp2GiUWYTWkSbK1M30uWcwr/G3YKy462Ofs0fOdfXsYF -K4XsvNfdAgMBAAECggEAKCBhyKDw+SYWHEwfZphVDM3Moo9d9JdMhY/ktLmrnqDk -8pu3UkRLOif5OopudaTm2+3r5fl+2x4aogURAD2x79hJYozl73hE44IRI8zyCZDt -byLJGDJHHEnOJqwSOoP2SMGTXZy6FqOsrPsrF3OEuob2sxwEl3BDklZ2ghgL3OM5 -IIVycNo7tEGjrH1p7Z0+5Uuwf3lNZxlItc17bOTRwAi9eVlIMyoLz/ocaJFt5C3Y -KgCzkQcvWjjJEVwlMe10u9yyjs51yKAqkBfREOVYrYcAQigH1QSKpeLqbVDULMcl -5CM0e9y1ZDZAeOsRFqCdFMYHVBB/PdlMxP2eM+12JQKBgQD/2SZUC75jF29ekgmx -FEePQ+LQAlnP2Hplo1TUp6mOIB4n75B/GXbhvh/Aw/bzkeOPSKpZMbOUDcHGDwNu -ul63BeZWC7hBV53/rJdLEbknafZo3Aw427foNhRscA6iyb8z+QzsgElRuQ2Po/qF -0vXYxBI48V9ANkEUxCnBjhs8xwKBgQD/ZP+L29//osaTdxyiVwMqmMxw1NOg72eJ -pE5h5anJ+Zdj+XlE1BOGnpz/J1OMCEmspa0py0zqlvBTuhB5l1AamgeSlqjt8u7a -T56ariCmwkDCHVRUDevXXAmzdHgp9c5SPNp1Ka7qj5vhTK3YpVGUtzcWRJxqWeCg -rHYL/Yl6OwKBgHF1U/j7iD+bWekfbCraKm3PFhtWn4t7nbPK/cicXaXIencNVw/2 -M/EiBiTPAom7TaXx/JE3aEKk4yS47bXB8lTJyf6ojdp0R33lhOZmgqyG4h5YTxc7 -4M+ag+4et27bdu5OaLvMnDcgkHH9rxB/oESzlr0n1Sy9opjZ8QaDxXJrAoGAAfDQ -iE2JbDXecGxtSUaD/aTfmNPlL8nh7YfUGKZYHfLJlbbllwJNi65U3xN7bQr7FFbF -9BVZZkbzWI+HZIUj1K/q8tA2RGieLAaC3AYKtXmwaEk0xNa+PgqzACwYZak6giF4 -P3+rlpi0xIeCoqzO6+RghMjMr3ozXMUyuHCaxNUCgYEAkas1e5PagZ1u5AjpXtN3 -SI5Wc7IwwtzJf3PCsT3ijYifo1NGG98xM5jJhr+6Sw9QYuocJ1+dY1iHKVdxsBAK -WN+jJqncuF1EMEDLJpCk//ecLygG4aXnVuT+HGe38+X1SWzpTshP0wmZQeixZOtv -gRcYsGOG1GGQc7R4PrXooY0= +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQDcR8db/3yXN7g0 +v0Wxb/dL1kMYqj8FX48YnywVDdt0GbKcfvplPZhRjs0HVP+RvIEWjKr3vlUNL2rk +9UuBsDsl5/NYEoBTnK3FgmyrkHWnxRp2uWONuMnmbwM9B5V3P6L+6/bYYn/88ZRO +PaiKHc0xbtttEj45ASHNOa4bXyihlUkiYck63j2Y0K+39KFmPsRAxapNo6KMd69v +tiStofkQBf+wDcB/RBk+ussIG7XCNMOI3Ry90J3C/pKd24RaoPPel/2mMByQYhXX +zo+2W84CZi77u/9Y00BapkLgS1HCimvo8LNITXu6/xuUlfj3isebMMMab9Qkf7LI +Q/J/ccp5AgMBAAECggEAPLnhAMCvBTUMKCZuvRrDzvlTZ/JPC1krprsCuQ8n55rJ +/ZCX0N8pJZq7dQD+r5uK7r96xm+HHOpsipAA79C4jZujHy2CiQpRcXiiIq3hcY3J +3N8AzeoaZtdFykZT8xrtAV/lngORixbCLsuasfDDR5RrE2eLFcX1HpF/KPaTye9N +1cswVQ7RAoUY90+n1PSoDOTbmG6Sw0EEM/gmvHIvN/KQnXhrbHLcgzSPvPauVWhB +Mddd1qifPvEItOrtEz9E7Oksu42v27Nq1baVC2HYYDYe3jp9Ef4tQN0chlKFbkVU +XDDFdHpbjSkC1w9WnmVLCJqj4NrTNbkEURmYBf44AQKBgQDxiq+slTBep8x/dIfi +/FVn7IA9ENYudDcjFWMr3kkKcldoJw92kh33RHIto4gqRLiz3E8bByrXd9S2dFco +q+e8bXlK7D13z1/Thg8i9m5ogBEzXRwMNMM0S03SoBOGoEghWIQfacxD5YVBNtDP +xPYO0TrbKfYo/WiWug/qONnveQKBgQDpd0ow7zPXo7EmA1dx7q/VHb+TKkQ4JWeQ +hNPY9nEuSq4lC5QpLrxF6RhTdpgPjGkxcEURFkVWpO/3717k5bXbIAptOpnLi2M6 +NmiuxA1dxZLCvQcHTBaE4e1XDSaGPqhxvEM8cJ+DFKZUu6BTKD0MlrM0FEHlhfAD +85gfsIjzAQKBgFPeeAlQ9C6pzRQkflqOi4k+UXjis7wFlm+UKY8969a4xSjhBzmA +mu8U0SCPu+QRYKDzSnR0FKgkb7O6ydjRd/GxgYBGb0F+vCNSVUcqkkOu1Eoldmu2 +lmE+FJOiCaWhWepaNeZCci7RL2fphK/gECAs/mbDNzocY4iSqpwFiYbhAoGAJmYL +Ws8M7MusiD8Gc+O5Ick5yB+shruINBnUqhumc+GukMM9xCQ+rRTwflHEItKKPqpj +gbLzBpQsL9A8AFPTvE2hyWNZBkRPtrRaNVxjgmfLgqIKdOXL0mFCYw3zpyLJG6PG +Pzua0Lllvgv6C5NTry1eHhOy3uhPmKbI/3VOoQECgYBm6ZixNIVsHCugR+fzVJWb +oOXdxExOYPGKP4dIvcmd8/CK9W9gFHPPmLOEslhOgCIjzZkJSPSB1lk2Ry3c2rqd +XXYmWCZYm6gAmO6J4EVW/X22SIqkNHDPymRjN3VHIksmQ35dvQW4rCg2IOx68sHD +rEP0oSLjSj+Q+e3h7I7nNw== -----END PRIVATE KEY----- diff --git a/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha1 b/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha1 index be0670f8b8f85..71f399192326c 100644 --- a/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha1 +++ b/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha1 @@ -1 +1 @@ -0F500F3768A87910EAD0571578AB10A9E39F2122 \ No newline at end of file +F8786CC78C8A2D4F2390806C822BAF7EB6BDF0DE \ No newline at end of file diff --git a/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha256 b/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha256 index 38a0951e2a4b4..632a3a72ad0f2 100644 --- a/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha256 +++ b/jstests/libs/server_title_foo_no_o_ou_dc.pem.digest.sha256 @@ -1 +1 @@ -602B89632680A18CC323E067301487BB97A7F49CA9180ED116CC75AE06B2DA94 \ No newline at end of file +79B89A23F11220BBD9119724BD171A7BE63DC9D9488032691858F9B0F1C0F2E2 \ No newline at end of file diff --git a/jstests/libs/set_try_bonsai_experimental.js b/jstests/libs/set_try_bonsai_experimental.js new file mode 100644 index 0000000000000..8962207469dd3 --- /dev/null +++ b/jstests/libs/set_try_bonsai_experimental.js @@ -0,0 +1,27 @@ +/** + * Set internalQueryFrameworkControl to tryBonsaiExperimental and + * internalQueryCardinalityEstimatorMode to sampling. This is intended to be used by tasks which + * should use experimental bonsai behavior, currently defined by both the control knob and the CE + * mode, regardless of the configuration of the variant running the task. This is needed because the + * suite definition cannot override a knob which is also defined by the variant. + */ +(function() { +'use strict'; + +if (typeof db !== "undefined") { + assert.commandWorked(db.adminCommand({ + setParameter: 1, + internalQueryFrameworkControl: "tryBonsaiExperimental", + internalQueryCardinalityEstimatorMode: "sampling" + })); +} + +if (typeof TestData !== "undefined" && TestData.hasOwnProperty("setParameters") && + TestData.hasOwnProperty("setParametersMongos")) { + TestData["setParameters"]["internalQueryFrameworkControl"] = "tryBonsaiExperimental"; + TestData["setParametersMongos"]["internalQueryFrameworkControl"] = "tryBonsaiExperimental"; + + TestData["setParameters"]["internalQueryCardinalityEstimatorMode"] = "sampling"; + TestData["setParametersMongos"]["internalQueryCardinalityEstimatorMode"] = "sampling"; +} +})(); diff --git a/jstests/libs/smoke.pem b/jstests/libs/smoke.pem index f8ecb28a0bf07..432d153517079 100644 --- a/jstests/libs/smoke.pem +++ b/jstests/libs/smoke.pem @@ -3,51 +3,51 @@ # # A self-signed certificate used for smoke testing. -----BEGIN CERTIFICATE----- -MIIDZDCCAkygAwIBAgIEXcrf1zANBgkqhkiG9w0BAQsFADBrMQswCQYDVQQGEwJV +MIIDZDCCAkygAwIBAgIEJATu0jANBgkqhkiG9w0BAQsFADBrMQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEOMAwGA1UEAwwFc21va2Uw -HhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBrMQswCQYDVQQGEwJVUzER +HhcNMjMwNjA5MTQyODQ0WhcNMjUwOTEwMTQyODQ0WjBrMQswCQYDVQQGEwJVUzER MA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNV BAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEOMAwGA1UEAwwFc21va2UwggEi -MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPxycX8hni7V8evBZrKWm7b1oT -OYRz9JBgwQMdZV2TlWpJJLehFESl2ENWRVs9ZiD6TylofnKHaxaIAkhqgNc1Vk2h -ABj2LcqG7gukcH6++PZLTWS58GmGVunrvCFzX6jkKEa8PFBHqDf/KcPqN6ALHUGY -jb9md25dMD3NudMLsfZDEA8AXAE/G8UDWbN1RxLUWAxJDotzAoF3dKr7s5ruo9NN -urSb8SErjmO1AuSArZwYwTcVO98JPUCOQfHyhu0kgi9EN/kKqBFaFYCZAUqd8lKD -VR0GY3uiYjYXYRcbuvQihQ3H6gnMbAKCS4BR+KW3Uzty27qS36MhGjpd60CnAgMB -AAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAMwm26DbGdss -sxgGTGXa4ogqOMe6zjEXib00auRGxJ/DvjDe7CJqEetMaBVnuUVr1neCh0/i35jG -Cbrzs0fMwYnElXbJk3cGuEB/IeFyVAeJ3y7LiI9+o3AoIK8/Kzipn0W3Z/7ohVlt -AnA4amQSZFIhpxk/oFks7hFnMjEiS4V+iBOUj6GhY/D3th9rpZWC8rOqTrEYvAqm -BpYDKbU0j4tS8Z6fDOS7042/e1ocYieA4hBDOuA5jifOlkcWfoUjzNBg5HSbpQoT -+RbhMZnzP7LdARXm3XvOAkgo5pBji1zTZmO4H1IGEJMcNNthTR20F9yeZXJblJdT -Gj/fOrDuX5o= +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDY/pPCLzyXxRpPF4LIPuUGWWkG +GTbXHCW6Si84T4qU6TfuMjghybQivuULF16fybTwx2Q2TByJIEEsi0G2c4STtFGr +3090YxeS+BaAlRK4qi1Y8nqARDKdfSzSliXeXsU9U0eoNBgAPQCGgk43uqKmCSwB +MesECWJqf01k+op4FXtyAKqtUpylSs2mhGPOvBz8UVKiz4Fo2Y69nIIjT2F5ceX5 +l5KE/5PQfNpFMAHdZm06JunkJ9yUYB2HLQtBcLWQDbqAQb1TptoWDWgvhxb0SGVy +1D1UuUHx4EgbYfB293diOznXf9ZEgaa49QUA3VmR161jGSPQGCt2LUjSBhfZAgMB +AAGjEDAOMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAG/Pc+ePHnVS +bGb5PWJQIai3Xys1ZjCSjQ8Z7aunJ5HB12x7SXDG2rAglJlX3+pvN/qlqBNoPOMM +VpxI1hvICHoS+YhgUxDiQy/+cJSsgo3eWWqSNES/J2xlNNkYqw1IcNqq26IMeZsA +kG2WUx3y9C/9FMZGE2xLuBRd5CZuDhwI4CIS+k/7flOxTw+J+VmpmXKEC44T8iEg +q+xadroj9uMT4IqKPJOkxh/9RnOaSv4FRYgpJtxJ6wtsIlAxXX5M/7/Eyl7Gi0fb +dRcdHjAQo1X4EoG/ooetSfFkBYa9qrobYoJ3igKTllgrWzClGMaUkLvEb3ddpdVj +1Ah+rEk1nLo= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDPxycX8hni7V8e -vBZrKWm7b1oTOYRz9JBgwQMdZV2TlWpJJLehFESl2ENWRVs9ZiD6TylofnKHaxaI -AkhqgNc1Vk2hABj2LcqG7gukcH6++PZLTWS58GmGVunrvCFzX6jkKEa8PFBHqDf/ -KcPqN6ALHUGYjb9md25dMD3NudMLsfZDEA8AXAE/G8UDWbN1RxLUWAxJDotzAoF3 -dKr7s5ruo9NNurSb8SErjmO1AuSArZwYwTcVO98JPUCOQfHyhu0kgi9EN/kKqBFa -FYCZAUqd8lKDVR0GY3uiYjYXYRcbuvQihQ3H6gnMbAKCS4BR+KW3Uzty27qS36Mh -Gjpd60CnAgMBAAECggEAf21zU2M+bwcHPDE8SpKGbtaW1o2C5x6JYV1eTKv7HOUY -3yzulZJ5m4Cro4A7uccl2H2uNpC+KsEJe3Zy9SZ5VhPM3j3SEUD6IZBigYIgqXzE -iGA5szN3dFVfdxzVIw365Zqx7wXKcnf/h7UUm1NsCwTxUt80EGKUxAAvFzms7lEm -GDSiNJh9Bh90fC946LbAIOw+JIrsIJzeUq7wQ3bHWxZfdMc4Hg8M+3znXV0VWlsv -g9nZ3EKWofyvk5I86bwuuMVcHoFPtMH8U5Hl2jiPxjR1lmWvhcdcGyxrsmA8536Y -1PA6FfbTuUtjreNKpcpv+pWLOrZIzEWqNYQxKbG2YQKBgQDziwznjWV0zqN7mUz3 -EIeo2UiVF34elfRZaJnRTdv//fUDOjRds96OkbLsp3frNYslGrI0UwPHHpEg+Lha -JQFto/BizjvC1VaSQqJ6MDt+qPCZ4U6vTGM71jfp3xMQ65M6JU60tqQ2tQofGzlN -QEW0AwNhmrJfnvRF6mswhioQkQKBgQDaZ8sJyTLUa8DpYxhDgO8Tg3s9VIZQHZa4 -afp0eAWJAVI04Bav1r8/58X0+//BkoSY0cIPIim0t9FuU2TQUUgr/RnXMAW9JApd -KlOO46IY1ElLRGHkToBPearHiWEuX59E7Md6WH6VRt1TKzTe0ODf2eTFohBnUyMS -sslQUCdZtwKBgQC5cB4STXJ7/z8xxGd7E8YHxfJjO3EXftyAG4pLeXpTMB6YJ4Bp -/KQwcDYJxfYkTS+2v94Cw1b0DY05ysgsM1MkFZja0udkEacKVNx8Jy+V4LLaqFiO -V56TmCgY1nchAg6nTnLgXNUqpqyfRM5byN2KFGVFs1GwR0r91WFqoC11UQKBgF4H -J4OsCmUniuMv1YdiYAtfpNQvqq+dPYFhpwEzlvZ4CiNXgozrgGUL7M7fGyoEYW/F -hq2rQJdcOB8uag3BoLfHfLOHKkFW3dtkWJsA542W/4MXCqoXHeiZcev7+knTwycS -ZYMHzF+KJckjpdxzwwy8q2BmuHczdLsdG8ym5XobAoGBAOfKIZgDttyDb7R4fEzo -v57t3LrlIF54jQYWxqh+81YzqbQFfouuYkfOFa5a87buGgu2BGjGpZcOAnv/FFw1 -EnHPueBsOWIH0C12Pft/4+idwfVZdOq+QbzMt8rudtcnlwhJZQUrvNQjCymiKi7/ -acmbpzsrdmOQ3PB4+Bfd4E1Z +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDY/pPCLzyXxRpP +F4LIPuUGWWkGGTbXHCW6Si84T4qU6TfuMjghybQivuULF16fybTwx2Q2TByJIEEs +i0G2c4STtFGr3090YxeS+BaAlRK4qi1Y8nqARDKdfSzSliXeXsU9U0eoNBgAPQCG +gk43uqKmCSwBMesECWJqf01k+op4FXtyAKqtUpylSs2mhGPOvBz8UVKiz4Fo2Y69 +nIIjT2F5ceX5l5KE/5PQfNpFMAHdZm06JunkJ9yUYB2HLQtBcLWQDbqAQb1TptoW +DWgvhxb0SGVy1D1UuUHx4EgbYfB293diOznXf9ZEgaa49QUA3VmR161jGSPQGCt2 +LUjSBhfZAgMBAAECggEBAL35IyM+d63nwcC8tKLbbWbBoFDWgkyRN/aAPBbwjEMS +awT+smXiUPKabavoKc96qA6kgfeHavQiaOy2fjFdsRQfKeFvAADFQD5LNGgeGuXR +FyJoJxdknWuDy1oSWdT043ltD68S7HBI98zLB++CViNGpF/nc/l0vdS+3uMo9VFq +4DSEKco/gSVYFmVmk2qblTsOm0breoeegVNQrfUe420ZN87EYIV6E0cRDF7y9DBg +ynCJKxVVMbhCGs4XHjgu1wm6Je298crfgZqwM9MbxCQq2z9ZIA3OZZSiVUtzZDN+ +/tIDYQ7evoAU3jlyitEZjzoutmP5oruLg0TmsuTM/IECgYEA9h4cXeZZCenkL1sk +zXMAhwBCgiBEmLDRhgqEKGNomNAn3YLYS56ZOPdZyOFPluj4pncCw8tuFY4FUjag +A8q1heUGvpivf+0fPtvKYm43VNBWKqY5OTAhRjGMwvPWz4NRQYmM/VaeTx8z+nlN +HvSzS/WtZ4RrKSAZZnnAf6oEK7MCgYEA4bUakBqZ3Ia/BD+t3Ic2hJuF7xxtNHLQ +vTvLrGRS5k42TtFI/S7P5lYw1KNqZfFWPcLpM4Y8Gg+whLXXyiTchzlMu3oLLKf8 +xV9ygKv32LMVyKys3bRy6uPXsBGo5j1O5D6dKSWlpP3PdxJSEgG41ESm1/2XP2lm +Sdav7YNiuEMCgYEA7nIBvZ+cCBTIdHDzWPDgQ+smhHfKvbwhYqHxpEebYOHanatU +7v57KEvMeuh/eY/Ax1ZqIVOIFe4AjcRKhyHveAeJ30KCVYhgS0AZ9f8eMFegA7YD +nrpYom8DFyWyql1pbftb014GBlYOv86hvyoIQ8GD/NS4FaH5ueSfcZBAdi0CgYAE +LzZZ25RkqT7bVk7j2sHg/X0jLLS9ly9VgslI3edHi8Wn+mtO/lIuZAStvIXZc/r0 +VCu8n813cjkEjNZ+Ueagvyg0BZQ1dLvT8OwPhKCorNFHkiP2TEUhI644/mrSMerH +gn2paKBEicwR2g4ZdVy22rr5ICNwsoSJ8+f5EEmBPwKBgGyEAufpgsQ/DSZMh5JH +YRVnis44JXmGh5OOVIZY5FLIy2enTxRDIJRInsLtCiZv8jfpKUPuh4dg/H6V4f7H +/l9NN3NX0FKCKZ16nfsmsTXn0bSVri6DhJ6StFu0i7UAIqJct5Se+GlA4W/3LUTe +FoqhkNkhzcva2MInJQzC5kem -----END PRIVATE KEY----- diff --git a/jstests/libs/smoke.pem.digest.sha1 b/jstests/libs/smoke.pem.digest.sha1 index 9b868f56bd0eb..5ebb3fc37c656 100644 --- a/jstests/libs/smoke.pem.digest.sha1 +++ b/jstests/libs/smoke.pem.digest.sha1 @@ -1 +1 @@ -5AA4C4852BE5F777296840988BC35FC266C80FEE \ No newline at end of file +CF3D4706D74DD3AC2CCDE7C69BDB515B470AD5FD \ No newline at end of file diff --git a/jstests/libs/smoke.pem.digest.sha256 b/jstests/libs/smoke.pem.digest.sha256 index 25031455d70b5..c0c6b63f6e873 100644 --- a/jstests/libs/smoke.pem.digest.sha256 +++ b/jstests/libs/smoke.pem.digest.sha256 @@ -1 +1 @@ -EFB8DE63DF6DCFB93649EFA5D5F9667444EAAF32FCB612ADCCB95A1F103F814F \ No newline at end of file +088E9A717A2F1FC90B9B5F1C6519E8D104EDFEA64968E919630AC4EE8207C4EE \ No newline at end of file diff --git a/jstests/libs/splithorizon-ca.pem b/jstests/libs/splithorizon-ca.pem index 7d401349feee4..0439865cf2bdf 100644 --- a/jstests/libs/splithorizon-ca.pem +++ b/jstests/libs/splithorizon-ca.pem @@ -3,53 +3,53 @@ # # CA for split horizon testing. -----BEGIN CERTIFICATE----- -MIIDyjCCArKgAwIBAgIEEz8mbzANBgkqhkiG9w0BAQsFADCBiDELMAkGA1UEBhMC +MIIDyjCCArKgAwIBAgIEGtmmGDANBgkqhkiG9w0BAQsFADCBiDELMAkGA1UEBhMC VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ8w DQYDVQQLDAZLZXJuZWwxFjAUBgNVBAoMDU1vbmdvREIsIEluYy4xJTAjBgNVBAMM -HEtlcm5lbCBTcGxpdCBIb3Jpem9uIFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ4WhcN -MjQwNDMwMjE1OTQ4WjCBiDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3Jr +HEtlcm5lbCBTcGxpdCBIb3Jpem9uIFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ3WhcN +MjUwOTEwMTQyODQ3WjCBiDELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3Jr MRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ8wDQYDVQQLDAZLZXJuZWwxFjAUBgNV BAoMDU1vbmdvREIsIEluYy4xJTAjBgNVBAMMHEtlcm5lbCBTcGxpdCBIb3Jpem9u -IFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCxHkpTenKn -0n3MxdH+yL+N/879HmJPKlN0HEAQP350VdyvgD28qlZQgwujolp+s31BoSQoEifv -ECR374y83oJy3ZXB7315BZjA5APShllIdUJt7mpQDKiKbzqGajX3nPo0iSh1zUlv -/+swtu4IqlkvhVwHdOPONTpf00jxFYvnL51kLRPgGnqj7tXQl2vQvjHGBNZxN9md -TMXpHU6HdbhLXnCc6PViCdc8dpOT7d6tL5tZp34mrkfyTbhSHE+LzXj+dHR4LaoA -fwDJne7pX14NfQikcYo6UfVt4Z4ooxojTGJitB1XbwYq8rdCtdpopCP+0RiModJ0 -XmNR7zoO9rbFAgMBAAGjOjA4MAwGA1UdEwQFMAMBAf8wHQYDVR0OBBYEFOGVM3o0 -/euAzEuIyPIdkRxctV0AMAkGA1UdIwQCMAAwDQYJKoZIhvcNAQELBQADggEBAGki -+aOaPfFyOTTS8ojhVPQ7vAHvtKcY7mEO+tOaG8K4PCQElS05DzKmMaMPArWP/Cn9 -KxZtBkQbXriprc/NPzVY1I8IAlJJsiYZjdA+lhV6HXyi7hWsQHMmbHkf2jyJ8jBf -xR+l8MxL/SJCSP5hmOce7vAC+nbmntx/uXcrMz4CDYNbKMqdLETz4tGiX3Dlv/8W -0kEIPx1pAZLd/gf4BoOXiW8AaISyTkgloxq/jHCgEueFFGUYVpRovNf/N3jYMJDL -8tPBDUrGFPb7hCSHkHrGsriGVWpVuSsQxdzZO9/rpA2khgqsp8Z+p2X4QvRrwEUW -sKJmv+i5WkUKmMNowh8= +IFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCw8c5axdKz +ca7Ffj3FSFRw10+kCyzwnEiu/PAsC8YbTLGklnWC1wJtz60vHmX1+ns225omKgA1 +C1g4OvpLGt8T4uKy2igF/J01tthvXUE1wkCTU1ZHn+Ski6P6Nsf7AIEDKZKVefbo +BTp5vhCAM1sQv1sAuTKUZNn0wJPZdQygmmzmGqbF5+aPpbjHrhxQvDbFAMFANVRy +s9iTPFNjknher/OwnE/zakphlvIrrbnJraQTmbSMQq/mrPnL0e83donhtN1onVUt +TfgDmVqH7wmZUpjrbF1fB0OQcdzX7T2IwbJnAJC7n1W47jXPaxsQ3uKS89yFQSJN +froq0dTnWj2JAgMBAAGjOjA4MAwGA1UdEwQFMAMBAf8wHQYDVR0OBBYEFH+VIX2V +L+Jxin+FQ61yAH+U66phMAkGA1UdIwQCMAAwDQYJKoZIhvcNAQELBQADggEBAEPF +xhPOMsYu/yDEUkBIxXDyv0mSLf0WH8HBAqSyiRHvkW7h5lSx+/lWu0FBqSAFiTB9 +uaPoImah0cT1H576gIv/xURjw1K7ZvHGcEp34360AJeHWA8OJ4Py7sBadGDFP5Gt +zmhD+5mluIqgKzKSIr5yxGSg+PaB8R4VGjdxI4otuwkxCslB4C7u7ZhTIBosXdpS +sD0BaHa+cg1Quxv9L0hGIlvEC3v/u7V5RTAW80atgFLPAfdYdNzepEDLyuC6y/wd +jJh9RcljWvz82Vh/+I4CJEX+qd2E0gaFWLpbqMoquXr8YWOcplxXWzO0wHvHJslE +OdYjkLZIIJuUhqSfigk= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCxHkpTenKn0n3M -xdH+yL+N/879HmJPKlN0HEAQP350VdyvgD28qlZQgwujolp+s31BoSQoEifvECR3 -74y83oJy3ZXB7315BZjA5APShllIdUJt7mpQDKiKbzqGajX3nPo0iSh1zUlv/+sw -tu4IqlkvhVwHdOPONTpf00jxFYvnL51kLRPgGnqj7tXQl2vQvjHGBNZxN9mdTMXp -HU6HdbhLXnCc6PViCdc8dpOT7d6tL5tZp34mrkfyTbhSHE+LzXj+dHR4LaoAfwDJ -ne7pX14NfQikcYo6UfVt4Z4ooxojTGJitB1XbwYq8rdCtdpopCP+0RiModJ0XmNR -7zoO9rbFAgMBAAECggEAcMPFNILIDrr/5uinexxagNC1+wdmNdrPySPXUcDrBKxJ -1d7C76RPUfEs7uCF1xb1j1xFxdFRZW4UmduE8haEXRdgqlmMvAlpKJ3DJTzuCSjG -w3DAdPiqMYEBdOSYOW7TCbGtddref5UjKx+8Sv4RzCSO5BxykS3oXWwKi5tZODVU -TYH/O7mIA+fQnLv2QCqLQCe7oFzhD7cyLzbpbK0SS3UE38GwAuExCHX810BR6wHi -nCJkHxEFdFagJKeMBAxNLNdqUG3ISL7/r+FrouSQHZtqAj/Ch/XMcIwEJK85d5Ib -S0qwFaBXuWSk6/V3hb4gkkL1OqJQ1u2n+yh8hjvMnQKBgQDVxpZH5bgMSxwBvWIe -BxaRZjucUVcptkNrmW+B6yC/rI7Y8TIY3YIDhAO6Tg4oTo1mANxnmY0EugwptF4+ -blbLt/vOthd81PfYTDziN/ZN7ChabQAnsC8QUwyEQ+cofl3eIu1tu73zKhdeC5Hg -THYipR/UDjOs6fkwMD1CWpvCqwKBgQDUGiX32qXAF63GA/O6ixAGKAV6ZgO8chzI -w7ei9CnVUfHOv+1SGwGj14Y8GcWE70VgNv/dArbVpS8E4XFFnkfcwA2W+plrt6DZ -wTlLis+yM8COQM6VSYv8Zhrf+7JdmRrxpa3j9a7rJjtC7YmrIK3aYDeft7gpTNRe -J32DBFDsTwKBgQCOkk8PzEkfCci38FJLrHaEqiX/btAu5Xu5ey8++k1xB+iNDu9W -XgSjy5ug2QXgI+NxsAlOnr3J9Tq/ZaelA3mnjCDID/FCM9bHzrCcPq5p8aJIDIIZ -9gqtXHXwkEjOXNjFmY23rYpVbjD/a7/yU4xGNtIvXvlfzPuAA+wXIM7Y2QKBgGHy -5V/NmEfaZ4SPZKnb+H1vVABPRiBrbkGEqLRXH06E9i8tUPJeyGYabMIqgJ3ARYCG -RaiRzU9iZhFR7xZgXv1hr6Tue5VUCrMk2Um6g+nenmjTItOsUDoyCO0w9hDlWJ0J -jwE7/xhW4n0o5y+g+shjMKzQkfp4oYYNpJexkZFfAoGAaGyfshedvZhR1jJMwfxy -T2K3HoIx9f0HpXbSvcTU60ibivfN8j3CKyb21G1TDy0vVXzmmAn/xE1d9+vIoRhh -zCadDje2aMNoWgYEjxCBm1M6eH1+JKJFDuXV9vxuMjZtdOsw9NGISCa5lVsz2Z/i -/SF0aY1nqtIKkcfGe2NRaKY= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCw8c5axdKzca7F +fj3FSFRw10+kCyzwnEiu/PAsC8YbTLGklnWC1wJtz60vHmX1+ns225omKgA1C1g4 +OvpLGt8T4uKy2igF/J01tthvXUE1wkCTU1ZHn+Ski6P6Nsf7AIEDKZKVefboBTp5 +vhCAM1sQv1sAuTKUZNn0wJPZdQygmmzmGqbF5+aPpbjHrhxQvDbFAMFANVRys9iT +PFNjknher/OwnE/zakphlvIrrbnJraQTmbSMQq/mrPnL0e83donhtN1onVUtTfgD +mVqH7wmZUpjrbF1fB0OQcdzX7T2IwbJnAJC7n1W47jXPaxsQ3uKS89yFQSJNfroq +0dTnWj2JAgMBAAECggEBAJJwLFWXbp8vsHKl3b1N9QRGTRT4YsLmtIiNsL7jJ5sk +R8hs9OfJxarXuv5J7Bw8pohqChMXo4BC2UoAFXDe1kwA85kNTx5VSE5R+qF/zh4N +m5/R0pAIVhOPta+4cpzad+sEFto6TAsNeK2UjNdsqSsdEtVwb9m79V3HmxTW+BJZ +GiUL5y5agp/gGHvDYGwUvHSMV4ve+GSc1bMsjmkIYmW/QBe0SK8zhxViNmcQm3DK +4fBxrTGsoszdKZajxgVUjxi5Gp7dJCr7dxKkVhBX0Ph5UU7eouLGKT+4y45xsclz +OCo0pOut10VXDCLgV6vg731JCL8cc4AkB7tKYOFm61kCgYEA5JAhYYsD8rmw2VDT +UGRnVn3EBWz+i1QwFHWSnlQYU0MO34PRua65+ixsjoN9LVjdeWdYSIS2xsyvbdSw +uJIU09O/JHAv7YlIoWKa3LdAM5qkSkU2OFaoGc3yaidXoZT8oHAFoQgV1rZaZnE2 +Jfg5yjBzyVbws84yy/oeOslrEr8CgYEAxi9pXi2tEuj3DURonGfH4StDXCt3CDx2 +Lj0pnprc0ppQYzN7gd8FL5auza39iuU2+RHK7MhzgZh8JI1szgwHNYPfVZGUvtE/ +wIjLBZ0wYfCMS9+Am4jEmprGTp+ks9Rn4axOllKzAhsBm5Jejm1YlonJXbBtg1QJ +LoHf4jhA6bcCgYAlf3/6gNQk/lIFVT1V8yMBDaEbQsaAFU/OC3wCfcl/34CRKw89 +Z20bni4xM2FWDz5GrK3PN/bEptAhNMPJhC3mktbJI565WcyQrZy6mVXvIW/Sv4gl +thHeKeubftjhqY/Iwpgp0ynCECjEpc48bH0OruRQI/NZjHrI/D/5ZRIsAQKBgFRL +aDLBlMLdSe9fsGsODUstnpn+dsOPC+lA5Insbo45SoXXKOolVMxX+APNxHCma8pm +kfxCijaOCAd4C9Zb8VkGvSBlKnI7CDW5OTkrEaAms1W2O7pr4surlztr1Z8Tchff +tRFlHXSAzh9Ak21a/voQoxBxcTMRtFIOtY4/xQRjAoGAT3n5kKdolTEHLoYk5Nq4 +3qRODRIm5uFHM5OKVGtgWGWhSDiUzbBw+DRL8aceWBI28Vwi5yw7pGZXWWLxmcdh +JHsjqR5f3HqYjUaHGPqo600S13i+cXuiR63GnMLhUApTXqv1fGx9OShIPm9imX8o +9hIG/rq5Asx02sY8apm13uY= -----END PRIVATE KEY----- diff --git a/jstests/libs/splithorizon-ca.pem.digest.sha1 b/jstests/libs/splithorizon-ca.pem.digest.sha1 index 293283125621d..a28ace5dc86ff 100644 --- a/jstests/libs/splithorizon-ca.pem.digest.sha1 +++ b/jstests/libs/splithorizon-ca.pem.digest.sha1 @@ -1 +1 @@ -88F3B334A89B06D6D5C7F6336F512B3ADE14177B \ No newline at end of file +8D493ABC737EF81BFACC721ED7869C62E28BE08C \ No newline at end of file diff --git a/jstests/libs/splithorizon-ca.pem.digest.sha256 b/jstests/libs/splithorizon-ca.pem.digest.sha256 index 5bcd94c39bab1..7e7927cebd1f3 100644 --- a/jstests/libs/splithorizon-ca.pem.digest.sha256 +++ b/jstests/libs/splithorizon-ca.pem.digest.sha256 @@ -1 +1 @@ -30C84F4D878690807FEB5F01C6AA72F3CD17CCC416FC3BF71A4D6BE697276867 \ No newline at end of file +36BF3E83C9B009BD38DCA3389FF19C1AFB1D7090CA8158EECF9F8F8D8E113171 \ No newline at end of file diff --git a/jstests/libs/splithorizon-server.pem b/jstests/libs/splithorizon-server.pem index b647aabe66249..cc8a6c6abb267 100644 --- a/jstests/libs/splithorizon-server.pem +++ b/jstests/libs/splithorizon-server.pem @@ -3,53 +3,53 @@ # # Server certificate for split horizon testing. -----BEGIN CERTIFICATE----- -MIID5DCCAsygAwIBAgIESEoCtDANBgkqhkiG9w0BAQsFADCBiDELMAkGA1UEBhMC +MIID5DCCAsygAwIBAgIERACtBTANBgkqhkiG9w0BAQsFADCBiDELMAkGA1UEBhMC VVMxETAPBgNVBAgMCE5ldyBZb3JrMRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ8w DQYDVQQLDAZLZXJuZWwxFjAUBgNVBAoMDU1vbmdvREIsIEluYy4xJTAjBgNVBAMM -HEtlcm5lbCBTcGxpdCBIb3Jpem9uIFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ4WhcN -MjQwNDMwMjE1OTQ4WjCBgTELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3Jr +HEtlcm5lbCBTcGxpdCBIb3Jpem9uIFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ3WhcN +MjUwOTEwMTQyODQ3WjCBgTELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3Jr MRYwFAYDVQQHDA1OZXcgWW9yayBDaXR5MQ8wDQYDVQQLDAZLZXJuZWwxJTAjBgNV BAoMHE1vbmdvREIsIEluYy4gKFNwbGl0aG9yaXpvbikxDzANBgNVBAMMBnNlcnZl -cjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL5lcosVHzMYAh6GO4SO -LEWuT81d9cCszfxSDH8yTLnrEXsiMdDzTM9R/McZTpVQGHE74R8h4wTqf7roCrSK -gl0MokDUYhr/X8Gbr22UxfmNYyVsN1m55NoUkW/igBkqzsBQEzcfkdpRr6Uv4eu2 -uWcTLb4fJAQBzLtl0SHBgPMx2PNQaudscWKOezAV9zSygPwhqKUWsYzuFPGZPyJp -QU1cSfFv7PgaH8UGzK3ioXOd37HY0QANBaowO2XocAyK91Htk86mlG1ICEqkPHRG -u4mbsFDK2bf30Up9BuICQYLIIVe+hpSefqA/IbXqv+1x49hEMGK1ebOt15knuCwc -3RkCAwEAAaNbMFkwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMDgGA1Ud +cjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKDnlnCmQwRB+dWZlS87 +pXX6ixb1ipKbyZXxwbUI8l8HWEQKojZdJIvjsmVVTyo5pyW7DeFcmBr7xurF0Scf +aRv2PnPLZrFlS57fI+ZFlaLK5vIq559MessXFmL+S86aHdNcGBj+O122oZXeuokp +v0renwoQ7hWC8DalSZ6yG+JvnMxR8lfm1EGpKTqMqVtzYfNJutvO7Xh0ZjnaY7oj +Nj6rAPYLd4NMbBN3bm0XonnUfnsxGhtswDM04BPi9vP7mdaJofq6cAx/GY1UsBKF +6wuYNv3HN7Me9JRsX1u+g/1rogG2ti4/tW5JUBjIfBz6+Q2r+P4bzqlt8XvT0GUK +XmECAwEAAaNbMFkwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMDgGA1Ud EQQxMC+CCWxvY2FsaG9zdIINc3BsaXRob3Jpem9uMYINc3BsaXRob3Jpem9uMocE -fwAAATANBgkqhkiG9w0BAQsFAAOCAQEARDK4wkeH/LQ8apO9Q3cAnEOwzBsbz270 -iT+4QQOrZ660F5Ex1SaiZd50brje7UyZmx6YTybMtCxwUAoyRvM3PXvdRUNeMTb7 -J6VQXMGDmP4ERYB6Hzbc/VrOOjxe5MgbGOgfoKlc6HDTcBY9unvh4M3GJrTudVnv -wfTzwQR4bcFa7ASEohQbSXmw3qPeyD6od3q/A+ZTWvzEdKGKs4tHDoO0+KZROoNC -Kd9oW+bobAeHmfePGxjv76epqFO/7KQeUd/niY+98hvBCaHQQJT4IjJFmBXIo4JB -pZrmKAKRS0iD5Wy2Q4r51YSq1dy7YYiJI/FoO6pbxbGZLaAWBKJHUg== +fwAAATANBgkqhkiG9w0BAQsFAAOCAQEAjRVgI+Y6kV6wLv3s7osLiT6VJ11YOlF1 +JWH9uxRXp9R9ke3aadCV6FQ5F+1C9I6HvT6lCT3Ltlsk5QzOfZ4/GnC09BDhPQKd +aiiAXx04eZWvXGjC+ge5NfNB7ZOIjuf6qH6859Hr/inGC9rXt4+GQkEps7JiIHax +iATIDi0lkW3VxWS/jNtIDZW7+//I7Cb4Tp6xB/rZFMiKfoKpJgZ7dot5R78/961u +Upmzqqn23ULzZ76hGLL5El762BdxhU8IKk+uiqGLmiMRzLO1T+mlQtiqLtscf3ej +5erWdW4WCiJ7csv0+3EYzxnHvB87N1h/EXnsm5Vp+KmFG/skQ7hMbg== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC+ZXKLFR8zGAIe -hjuEjixFrk/NXfXArM38Ugx/Mky56xF7IjHQ80zPUfzHGU6VUBhxO+EfIeME6n+6 -6Aq0ioJdDKJA1GIa/1/Bm69tlMX5jWMlbDdZueTaFJFv4oAZKs7AUBM3H5HaUa+l -L+HrtrlnEy2+HyQEAcy7ZdEhwYDzMdjzUGrnbHFijnswFfc0soD8IailFrGM7hTx -mT8iaUFNXEnxb+z4Gh/FBsyt4qFznd+x2NEADQWqMDtl6HAMivdR7ZPOppRtSAhK -pDx0RruJm7BQytm399FKfQbiAkGCyCFXvoaUnn6gPyG16r/tcePYRDBitXmzrdeZ -J7gsHN0ZAgMBAAECggEBAIW2pWz8Fn9Bmytoxh/f4UPGmZD3LgJ02e0cil5CKxRe -/FXnu+itFgJ75/TD1yaRq7jKft3oZJEtRysyj2If7FIA72psPcIMG2nTNq7Uzvzl -yP2dNHo1TX3C+CkFf0UthSoWkogiCcKzn5F5QUbTev3iwDtHj3mo8emiJe95AGDK -JsBCv//qGcsyC6/CkS02734jW5fnP+On1qRxwVNMDbJPXlauJ13HFCShGFqUdXsh -DxtmpJNcn3x33lkDvQ8HOco3i7M2vXZdBVZUIBpCUK4jz6WqchFoG3iEpAH6rBLO -kGRkjx9kZTWd3w1gpV/Z3sd+DxS3ic8bzuo3H/BuRQkCgYEA+ewTPGOdCMr0geGO -fS/6yVn6Orv3ILjnn/ZyFHzoIs5F6wkc4vf1/EAQP2GJBJqZlQXjv5RSEVElDn2w -KCM12IsGlZKDoCMbDUba7yF1Y32OwvCeWdoxE/LoymwPZtoj9TWAbqUxD9WKYvaE -c18hYR7rOT7pNXfS2JLDV1u57j8CgYEAwwbJIhXkVa9f6XppnT0a9ojUqswVp8AD -r3WJ5QlvoF5rxGdqPmgjaobE+i7mYJcrgMQQkSumBnaGtzVunF9ydg9kFah7uJ/z -iX8ZuivQbTEJGgrDGYOMIfqRxloq+aR0f66RgUrWV11CuuFw8M/zf4YYCqtl1/V5 -oFu8gLh1DqcCgYAoPkxmp/+sX8n5dBGWtPgkHhn+BTBpE30ws+VwySA0IpT9oM0G -+zif8szq99CxbheW+IjQ25hPc4qNB84q2GieQty/jwUk7yI3BBbS9MdLSveHCmnl -PA41ESNNHRNp02yA2qmgp3b7/r22680uKr5cPjdQU4IH+xB3kUJlOb75zwKBgBtD -tZaoTNWVwOjZDi7HPh+9HU4LXfOnqlw7KF32pb5btOEuO/IhiOH5mUwIhnh2acSE -pHRHD9zTcR3nwGDoDw5mNG26siEra1aLiEM4oNjDBCy52HCWrV6KsxpxvJRN42N+ -OiVDGxf3l76cibeAut/XFUFYmNJKJQjACmhMYUD9AoGAEgjKAngD8M+XRjNOR9Q8 -3jemZKTSxMiTZ23Jhm3kr1t0OUHObSXrjI/pRU/DyiAkzfoFqp+gBU7EpJJWYjCu -p0UrW/vR1dj/LHIRtBHNYGwu+CLuZYJmzy1qMSxKthL7yERWVG96lZM4tUTp8UHi -91CJDlpFxa3JCH7VnApiUIk= +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCg55ZwpkMEQfnV +mZUvO6V1+osW9YqSm8mV8cG1CPJfB1hECqI2XSSL47JlVU8qOacluw3hXJga+8bq +xdEnH2kb9j5zy2axZUue3yPmRZWiyubyKuefTHrLFxZi/kvOmh3TXBgY/jtdtqGV +3rqJKb9K3p8KEO4VgvA2pUmeshvib5zMUfJX5tRBqSk6jKlbc2HzSbrbzu14dGY5 +2mO6IzY+qwD2C3eDTGwTd25tF6J51H57MRobbMAzNOAT4vbz+5nWiaH6unAMfxmN +VLAShesLmDb9xzezHvSUbF9bvoP9a6IBtrYuP7VuSVAYyHwc+vkNq/j+G86pbfF7 +09BlCl5hAgMBAAECggEBAJlkTcG0bp0HfHHJyTPPC+zN3X0UtXcQdvuwDjdUgSJg +sR+kd4GWB/ooJnB+BEs/zP2mNhRKnmLS9Y3o6Xv/cMeMV3syRAMHizC4jtO+MGmS +PtpMVqVBVqp3pwrWXFRoDdjpqbth2hx7t81VGMUb/FYWps0E2MO/INsndWiJaRXS +SAgvlbJSyELkp8BtG2d/xKPRwmGFO7IHwqpn9dCTLgymlV1V6eTwntqg9pUjxbRg +JnqeTuhI1kMvJAMOoMhTOqIG2TVriRr/goGsI6dKfkdxV6xGG+D68GoMIcCdYAu/ +mxxw62r+Ujwej8dhjnBt+eyjl2eUrKDkC6dTlfeFg8ECgYEA0wpKw/X4cm/32nPX +B7uXm4IjereCXgRBQz0bsb7G1M3MekYY6weJd11JwwAVjH9AVO4M+OyjqtS4aKAR +X3o6Mz8Rq5fM/ddhNP80z0BWbzpkA4jJVxlTT8dajMPf0XB6qQnXsNQmSVnfxwdm +zJAglg44pf0999+NAmA+LSYal6kCgYEAwy8AumQCwKUcsD9VH5mPVvVsbHSHpDX3 +BfwEX1ludM5xAgOw8U+5OJgSFqQKUo5HMwzgsd0DN3ftDIbyhibLDf1+nJrWzReV +YM6oWkHSTMYjWgD62DVtgpUzs58IJjTQQ0B0TUi3/7gmRWoHoJpBGw4tzTgTeG7W +Y46x4GJi4/kCgYBCn0AlBrRs4/35n5IlZcohlH7A0ce9CFjV8ieZACHisik3/IMH +RzTVUsTjY1ZqKQ3VAeVke8fbORYSKV3ypzJSVTmt0tkq1WBhi6NPPf9LU8KY/wiY +j20mhUeHopo4kuqWDH5j20VO6KllOmfwchtnY4vskrqKUq9ALcPTGvFQKQKBgFId +kDzanp3V2T/9JR7qR/fZwQfP7tETMx39bQmAYowZ3klurB6Z398De3izAvTAjwvX +OyEZTqUje0Nt8tKlt3/nSkEwk3Ytmdbkmsd+Ma/DHFRdXSthLvVlOB81pQQN7CCf +GU76bQ7UtqU1eogC3ak4SeSw1uAwIVo5SnSQb/JhAoGBALyq7FOHaiNFngQZs28D +sr9J+H/e43DsH82XGg2XYU9SMfsNrrXoS1MwdUGIPVV9fKRdoFUcsFlcNP2zqZyg +RyBuOC7/uhi2TAsggXTyqcPVTu3td2BRPQ0LarjHD4eSmgKFcC2reqtabjJ5pRGD +k9i21Ce0oXr0kdaRGi63C7cP -----END PRIVATE KEY----- diff --git a/jstests/libs/splithorizon-server.pem.digest.sha1 b/jstests/libs/splithorizon-server.pem.digest.sha1 index 7758f1bd3f0a9..980ea0a2634fb 100644 --- a/jstests/libs/splithorizon-server.pem.digest.sha1 +++ b/jstests/libs/splithorizon-server.pem.digest.sha1 @@ -1 +1 @@ -E877FCA8158A566902AC60584781143547F1470B \ No newline at end of file +F5C2A872E64BE5CD7D88816BEB82BECC1E086DAC \ No newline at end of file diff --git a/jstests/libs/splithorizon-server.pem.digest.sha256 b/jstests/libs/splithorizon-server.pem.digest.sha256 index bab0025dbdc7a..b89ddf38ab782 100644 --- a/jstests/libs/splithorizon-server.pem.digest.sha256 +++ b/jstests/libs/splithorizon-server.pem.digest.sha256 @@ -1 +1 @@ -17EACF5F5659067ACC1CFB74075E00741D8638E6F623609F834E5B0E84FE3E1B \ No newline at end of file +E45A9387367EF18299199CC2AA9ACCD74AF3CA279CFEB60BD9F3D6C7BAC34A47 \ No newline at end of file diff --git a/jstests/libs/storage_engine_utils.js b/jstests/libs/storage_engine_utils.js index 7c6f2d1309ea3..082951572e60f 100644 --- a/jstests/libs/storage_engine_utils.js +++ b/jstests/libs/storage_engine_utils.js @@ -9,4 +9,4 @@ function storageEngineIsWiredTiger() { // We assume that WiredTiger is the default storage engine, if the storage engine is // unspecified in the test options. return !jsTest.options().storageEngine || jsTest.options().storageEngine === "wiredTiger"; -} \ No newline at end of file +} diff --git a/jstests/libs/telemetry_utils.js b/jstests/libs/telemetry_utils.js deleted file mode 100644 index bb42973301ac7..0000000000000 --- a/jstests/libs/telemetry_utils.js +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Utility for checking that the aggregated telemetry metrics are logical (follows sum >= max >= - * min, and sum = max = min if only one execution). - */ -function verifyMetrics(batch) { - batch.forEach(element => { - if (element.metrics.execCount === 1) { - for (const [metricName, summaryValues] of Object.entries(element.metrics)) { - // Skip over fields that aren't aggregated metrics with sum/min/max (execCount, - // lastExecutionMicros). - if (summaryValues.sum === undefined) { - continue; - } - const debugInfo = {[metricName]: summaryValues}; - // If there has only been one execution, all metrics should have min, max, and sum - // equal to each other. - assert.eq(summaryValues.sum, summaryValues.min, debugInfo); - assert.eq(summaryValues.sum, summaryValues.max, debugInfo); - assert.eq(summaryValues.min, summaryValues.max, debugInfo); - } - } else { - for (const [metricName, summaryValues] of Object.entries(element.metrics)) { - // Skip over fields that aren't aggregated metrics with sum/min/max (execCount, - // lastExecutionMicros). - if (summaryValues.sum === undefined) { - continue; - } - const debugInfo = {[metricName]: summaryValues}; - assert.gte(summaryValues.sum, summaryValues.min, debugInfo); - assert.gte(summaryValues.sum, summaryValues.max, debugInfo); - assert.lte(summaryValues.min, summaryValues.max, debugInfo); - } - } - }); -} - -/** - * - * Collect telemetry from a given collection. Only include query shapes generated by the shell that - * is running tests. - * - */ -function getTelemetry(conn) { - const kApplicationName = "MongoDB Shell"; - const result = conn.adminCommand({ - aggregate: 1, - pipeline: [ - {$telemetry: {}}, - // Sort on telemetry key so entries are in a deterministic order. - {$sort: {key: 1}}, - {$match: {"key.applicationName": kApplicationName}} - ], - cursor: {} - }); - return result.cursor.firstBatch; -} - -function getTelemetryRedacted(conn) { - const kApplicationName = "dXRuJCwctavU"; - const result = conn.adminCommand({ - aggregate: 1, - pipeline: [ - {$telemetry: {redactIdentifiers: true}}, - // Filter out agg queries, including $telemetry. - {$match: {"key.find": {$exists: true}, "key.applicationName": kApplicationName}}, - // Sort on telemetry key so entries are in a deterministic order. - {$sort: {key: 1}}, - ], - cursor: {} - }); - assert.commandWorked(result); - return result.cursor.firstBatch; -} diff --git a/jstests/libs/tenant_migration_donor.pem b/jstests/libs/tenant_migration_donor.pem index dd67976e7578c..3ebd08bc8c2c7 100644 --- a/jstests/libs/tenant_migration_donor.pem +++ b/jstests/libs/tenant_migration_donor.pem @@ -3,53 +3,53 @@ # # Client certificate file for tenant migration donor. -----BEGIN CERTIFICATE----- -MIID4zCCAsugAwIBAgIEcGOmLTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIID4zCCAsugAwIBAgIEfyfW3DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjBrMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQzWhcNMjUwOTEwMTQyODQzWjBrMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxHzAdBgNVBAsMFnRlbmFudF9taWdyYXRpb25f -ZG9ub3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDQEvdnV/rKKmAX -YmOJz6seF7z0JL4fO2ImmMckKQUYGuWR/Oa/gXuQqb7QHaPPnlKHnKkxrRAotXG2 -2c2a2bWBEQOODl5qsLL8JN8vvJz9iWu6sTh6fnkO630TJKobtlaApgqJOa/H6TPs -8re6+n4db8yqYC2d+ue/rLI4ruJFRM5VbevgutqsSlVlFYf0gTnsTck6pexXw0HK -Zamw0yMAEg/F0VfHjcVgpkZIzZKpX5F+v1FmZqjTdAnjnwNdFdwHLlmrnvNfbHkN -dCbSk1ETole7NTu2LBH9+fxvqzjWmyoA5Srlvt3cHN97XsHwKLC+4ikvRSh6pUwg -ha4sXLPVAgMBAAGjgYUwgYIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYDVR0l -BAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFJZwHiyyArO1ExT8WAuqUcSYxx7zMDQG +ZG9ub3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCUWLVdp1mrlWCE +ujgB0Rjddra6tc8PfjnZUW8qlmOmqQTuPGtkVJBrMsHnmT554XQ+6UsdWylit38S +7OJOPXR2n8jc67AnrlfnvHh+p/6Q97oiH0+Bcg+9Kl2V+zeNDaNd95JZF+nqlb6W +WGVPadIILJjD5wWSXqi0C2spN6wyuq0kYFgzCnBJp6p2haIjQzdlstmTOJP9/FPx +pWWTGBz540Bfyjv4LYPuXmIFf1eVh+xkh+VyONfG/ZJyc9MsINbFi3j/Low6sEgc +ZoqTTopBB0XKJzQ3NtM63zVrjmizfWFW3a+KlFb9SYfdZUkah+wCQEgwZ7gKo13e +XNf+3lFjAgMBAAGjgYUwgYIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYDVR0l +BAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFJBZSJC8XIqS/3E+1ULuWywuw+/8MDQG CysGAQQBgo4pAgEBBCUxIzAhDBh0ZW5hbnRNaWdyYXRpb25Eb25vclJvbGUMBWFk -bWluMA0GCSqGSIb3DQEBCwUAA4IBAQAMuCkuvIDgMdA6J+DEGxo4Y8PFrcAuzApp -rtJ9O5iyYfvCFQe9fec0M8aIZMSwnIjpLpn7W+ClK+aXQEetii6uDArmPl8Ql7mu -y4Sv47iI+9SGTHpjU0sKDQXXAwSyLrOgroRGg0AugpZ3MjXPnEQZqbksgrZjWINi -uWT/h9PrUBVsXiFAY5zN0OLPPUc72CFRKSbfJdHbn4XwMs9NFQpUa1BPxJnXBdXq -LlHKrxn/FvJXiRaBcekKpaGWdGYXL5ar5qv3/ZxdXnO12JFGj1mB+biMNpL5I5cT -/vnXg3S54v4k+EG/0oqo9zbkOp1FEU/n0FKUCg67AR8k0htvDiwV +bWluMA0GCSqGSIb3DQEBCwUAA4IBAQAEMnjAAZApKG33zdFMX122KKEQXHJLqe5Q +EGhcUbP5gZWGDIM0Enli9tfJnS/oRVfe7y3q9L9jx4ym2M3cIB4tCa1KACZ65mgV +MsSS8MgiN5t8gNsxYN5WWeZNN/yoceFbyspJ9s29WNTweoA/Tv+JI0xSCj3HSuMb +J/nbx6Mhu5+ZD+y68SeqlL6IOuv/XNyPBmQD9zdiAh1mrn9M/zNZ4pnpu32gdcYE +szhLvI7ML8BszuDy0XPXkpWsaDS7kPH43qYqtYbXbczUgyRWcraXqhsHJWKhludH +gcxAUCcu1/e4oTzOoY4dAZF2Xx3cwB9aFeWqezSKc2SGHgQTQIB+ -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDQEvdnV/rKKmAX -YmOJz6seF7z0JL4fO2ImmMckKQUYGuWR/Oa/gXuQqb7QHaPPnlKHnKkxrRAotXG2 -2c2a2bWBEQOODl5qsLL8JN8vvJz9iWu6sTh6fnkO630TJKobtlaApgqJOa/H6TPs -8re6+n4db8yqYC2d+ue/rLI4ruJFRM5VbevgutqsSlVlFYf0gTnsTck6pexXw0HK -Zamw0yMAEg/F0VfHjcVgpkZIzZKpX5F+v1FmZqjTdAnjnwNdFdwHLlmrnvNfbHkN -dCbSk1ETole7NTu2LBH9+fxvqzjWmyoA5Srlvt3cHN97XsHwKLC+4ikvRSh6pUwg -ha4sXLPVAgMBAAECggEBAJZuL+nKJKmGi2Q8oMQgnJEsDlLgQYfo8eP/rnHJKkKs -J28tTVIE9eW3oxlpZGYVC4u5ymT8vuL/kw/kVY5veZavS0enwcEWsMA0lBs6HLH8 -aSNXPwLobvNHc2ykpEtWvyFRaeqeByTbx8u2CvkmYok7q9c9o51EqcncvBjY7wOu -Fch/Rq2P0FTidpyk4APe4FaENDn8MKYCIKLNyLthkHqXQ8mzleJAlof+4lXwEQ9k -HehpklLqgoQPyCyF0TDwWgqMs72tBo7gSZQ7G6tV1/iwrW/29YL5L90uZH8VqWyh -Eoo2NoNifhPgjH0Wt7QcNiK+ofuqypEaf0akPguAW0ECgYEA9Kr1tVg+VTKRj0LM -hXRGq96FmJ0mtwtCtoBG7O91llskN3k/wcHjNsh+OqNSrU0S5SoSJiKjDm9j9CL+ -Weo+oX432zB0Y37mHYGo2tn97UeZw7sCLxdpI7E8MNDRyUw0fECfwzb6qk2eeueS -AaE2D9mwgpPTL4Oh3obsQlVhHgUCgYEA2bYc3Qcb0wVmx9XV23XKa3IIKxeilWw2 -rnVFifHNQ0VCZmSdj7OUx1VnyHM8PLxMme+h1bziXZDsIzGIyZ0xJqsud7muKtHX -HSpfyGZi2pRMRvNM34Ql2ZwSiFQvLyKbSXKzkYaYUjCPG5nRUV/uDQJ+544fYuFz -GzVw35Q7V5ECgYAx4VzAgOIMWofkbhjmKEN0LKSN7ZQEA06xiEttaJBPe/tRN+3w -oYeiFpPtfniPjkQI5l/W2H7npXv6PNwgFqp4IBRhImrIEgn2AAYdwA2Nv2gwyEJ0 -uVAdD7gWt6vdXyTgJAiuMto9uB9ULTvOC0DLUHU02dBXQn3QsS8fguqOxQKBgFbF -4CE0eSGVcI7Lqu5Kt4A1tg2+4N1fgTfto+bESoS6cSmwhqBE+lKstq3NSEAI6Rsd -yMoQ+8RBQ+0PDC8XiQSZi+7KQiHs+ykzv3N3Y4UtQG2zAXvBBvkB3N0beKb3yx7u -KaYiGEoB8rgUibo+WvoyeXQ/UEEwDJnWpgNm6HQRAoGAVchu4R+WGg6ItFP6kgNZ -AWh8SunlDjBr7Bc7TEXyQm6JYKH2KFKY1YxSpKNOg7oCddIHqjm7PkS+v9VpWOIA -T/43Ak+4x+Heb/JiiQjyT7hd9z50KLqdneDjGN5+xrRRw+nhca0qMG0+ldVrgkwk -bw+41eqcCw800imGJNg/aH0= +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCUWLVdp1mrlWCE +ujgB0Rjddra6tc8PfjnZUW8qlmOmqQTuPGtkVJBrMsHnmT554XQ+6UsdWylit38S +7OJOPXR2n8jc67AnrlfnvHh+p/6Q97oiH0+Bcg+9Kl2V+zeNDaNd95JZF+nqlb6W +WGVPadIILJjD5wWSXqi0C2spN6wyuq0kYFgzCnBJp6p2haIjQzdlstmTOJP9/FPx +pWWTGBz540Bfyjv4LYPuXmIFf1eVh+xkh+VyONfG/ZJyc9MsINbFi3j/Low6sEgc +ZoqTTopBB0XKJzQ3NtM63zVrjmizfWFW3a+KlFb9SYfdZUkah+wCQEgwZ7gKo13e +XNf+3lFjAgMBAAECggEAFMsCUOg+F2DOsIvxgb+vPot/PVhyd3d34FAbxtphiWCz +gjkRzHRV8dChgTbnnOnPA2przueSO3YA58QwTEss4duvqdeXT1iOlbrDIO2nLuaq +PlIpMLmuHxOrHnSh6rF4rpke+EHz1Z+7oRMu0BzDBcZKe1sMWuHNv77B/HsDNfDh +5mAB3bG3NYu73lfLoeA1x+r4eKBGQkcz98Uil+Nhdn1Tvef5nOa0jLHolHl5mzyW +HXCKnJGvHbklcMll7R0jTJ/GPnhHVSBVpZVD9AsyGZc7a0fp6Cb5jmxUd3YkqWvj +juCf7MYJHUEIWAPgSJ9CXkh2lhKoqWWNCWFA1tILkQKBgQDDz6Cjmkbl1k9CRipQ +76z58SF7Aq6w7TQL/Wl1FSRb1tGgM7T+F/KXQRqEVF3kNPC9DuvcAEy4RwlNQ/Gb +e2ZHS+ewJD8prjtCcKwr/U7hmgeYTvmvPWZICAe8aA0k1tRHmhuXftvBcozWjvI4 +DSRVDrHZCe7EGmd5ONXcoLNnXQKBgQDB8hjSeuaEuvDrcoagIl5YbbD2H+3avs7o +K7eqWKozH2yGuMViTbBSad3GqW/7WxE2E+7cUqVKRECnOZOy91+kU9H3Y4gqYeIi +Nd/ktVi49huvauvUAlFcLGEeR5k6gqFFIpWmMR8ksb+2rwkaLeV8J5dweRiB5iO/ +OJvu0zDPvwKBgFoWhInAyS2lVAHh1kY2fxbfYc/+g/DSsUdy57n+aCP4yakzIOpm +ii4cvd50El6UHM5etxUY7jM6O4VY5SfrtjrHKWlYw9ahWpJO1GfIuyoAe84PR8Dg +NtadGzILjUCNtkzbEia5LtHpqfJtAfPX2AdqI1j/wOJoY4LaK0q+hMvhAoGBAKd0 +623A5AyyhJYmeosFYcSkYWkBxJNcsLXSGXEhwUMG3OOt4e+w+I8QDYccgS3cQY4Z +w+HEEid/qvs4QYkDAjeWj/I0pDtD7MZxEfIdio+ZH+pkPV7+6VTWlLxaQbk9VRWm +eOPYAfXi649GrmtGOaDrZlyckkaGeVMAA9M/0iNtAoGBAK1KI3i+kjxV07KJJjrN +TGERncEEtL4eMptiRB9QautH3rJQNR6LJLTda2uxcG/hmFHoyo8OVYN5ZSWqYwFH +foIXGsF892Xnm13vc4O0nMZEq6d74NqU6SGI3skcuta8alU+332SY6kVLQdtXm92 +wagjbm1BGKw0V1uz9cWLgB9D -----END PRIVATE KEY----- diff --git a/jstests/libs/tenant_migration_donor.pem.digest.sha1 b/jstests/libs/tenant_migration_donor.pem.digest.sha1 index 804d455c5f5f4..9454a23078a9c 100644 --- a/jstests/libs/tenant_migration_donor.pem.digest.sha1 +++ b/jstests/libs/tenant_migration_donor.pem.digest.sha1 @@ -1 +1 @@ -EFDD44ECA40D0E353233605FFF194228782FADC4 \ No newline at end of file +CFBF2C663DC9AAFC6835DDB66A9B9C244B307D5A \ No newline at end of file diff --git a/jstests/libs/tenant_migration_donor.pem.digest.sha256 b/jstests/libs/tenant_migration_donor.pem.digest.sha256 index 498f714d523d5..3fd0e3f5c0973 100644 --- a/jstests/libs/tenant_migration_donor.pem.digest.sha256 +++ b/jstests/libs/tenant_migration_donor.pem.digest.sha256 @@ -1 +1 @@ -5CCF1A18C3D88EEF873679E49946D55B6C8ED4B8F6CE72B70A388C1F7273FC5B \ No newline at end of file +71984A2DA870E8AB0F44DD6A11992780112B2F0D05E5BA4C39397DA9B11EB723 \ No newline at end of file diff --git a/jstests/libs/tenant_migration_donor_expired.pem b/jstests/libs/tenant_migration_donor_expired.pem index bc7c89bf47bbb..48f001a1c7c16 100644 --- a/jstests/libs/tenant_migration_donor_expired.pem +++ b/jstests/libs/tenant_migration_donor_expired.pem @@ -3,53 +3,53 @@ # # Client certificate file for tenant migration donor which has passed its expiration date. -----BEGIN CERTIFICATE----- -MIID4zCCAsugAwIBAgIEdsgsBzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIID4zCCAsugAwIBAgIEbZ0UODANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjExMDExMDQxMzA1WhcNMjIwMTIzMDgxMzA1WjBrMQswCQYD +IFRlc3QgQ0EwHhcNMjMwMjIwMjA0MjAzWhcNMjMwNjA1MDA0MjAzWjBrMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxHzAdBgNVBAsMFnRlbmFudF9taWdyYXRpb25f -ZG9ub3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCoe0ZBYmC5i1k0 -OHApTg3CFKIs/oV5IdRoDG5lKc0uowK87JGHwuCCRNT/9TfGhNJ/X86I1lHTbo/A -2Q57C0+/R+Fj5fndFQLVGLKXwtf15/7/iDWS3/wz/WtlsnVePg54EiY1vM37LSXE -nd10AAR9aXIs6FpY67Ku9RxExhluxnSHpw45dOkrvaWhRlaESmSCCYzdcHAjBLrc -C3qiPi4OJ/3cuhCcEkgz0fvcTP5DqSYtz+n/yO0evu4zeNqypEjGIA1fneWVWsyI -IXcIs9VzrR2RUzx3ejgq3JM6TollZKE2nkF+UerxslbqqvtwFYi4xUcBzFNMS/0f -rn5bz145AgMBAAGjgYUwgYIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYDVR0l -BAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFGT9EzQ+l9vWpncQ68RFggFKcC48MDQG +ZG9ub3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDAox42YtKA1C9/ +OrFpDLJe/q8hJsaKWkdp1lSXwR0Ptu4p7m83Lxeo/+ciriHEAxIKLoMqQ1XBTdiy +gmWAu9rj3772pqDfQVqNC/1fwltQyyvzoLLcN3TcmRtHUhbslpqwJUPxBy7/8sKk +X4oKr8pMeabE/KUvsspjQ1rcH+wM1j1IO8rsrjlki/7GuqzvAua+sGxoQLvd1ex5 +EV0hqA0MVslOqEKoKdjYqMe2/jzeTjRq0sa5WEYRHTrDchoJexAFH4YUe9GnF7EK +Bwlg7X2LxfOCHOx74WHi8znpMxYzPtVcWxFQn5Hy6ztanb+fwEh2xzQf06dKIdzk +wy1aMIxPAgMBAAGjgYUwgYIwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwEwYDVR0l +BAwwCgYIKwYBBQUHAwIwHQYDVR0OBBYEFMqUoRTLmcR4eWyS+JeN+nR7UtZXMDQG CysGAQQBgo4pAgEBBCUxIzAhDBh0ZW5hbnRNaWdyYXRpb25Eb25vclJvbGUMBWFk -bWluMA0GCSqGSIb3DQEBCwUAA4IBAQA8Llv++htxTPUiRJ/28GIg10BEKogNtoux -wq6k/CX8PfzNKZ4AQ2yLVQ6QVfM80tUEDcd9mRm8UY6axNtqKhEOSZMDvnMJluQG -+fELJLZV0uHCQghVxGGurJIDRhI65yV9KlKOwwa4LYsmk3BILfYrSrTcBobA4W7E -lJo799J1knsx2Hory0MbEgaSRG/04e+xvr5ny885e0NyirHo7hl0Wgqad4J/ki1p -CY5U39MHL7dobq9fRwhLf4fXPlx+xOAw8oWxvYZu8LGmCT9HGJ6Gkl3knGI6JyPB -qNlsraJwWy33dkOQ+HYWT9faOuLFQnQEc66sMkLTO0j7OrxuqgXP +bWluMA0GCSqGSIb3DQEBCwUAA4IBAQBZNPp72UtbhG43ZQqWvyTE1AWpPkhUR3n5 +xbih8X9b+2EikLRg6brxLBPM8leiH2AkpFpj3/givgpbbR4yFkKEHfQASeU1tbqp +qPOtB5+5l/WvqcGtz6mV4/T4Hw2GfqACkTrVuJnR3ZJF4+jWDjfEMLtyRYNTKvPC +wyNYek8+9LM9yd2LYk4zt5NNCCDJ8J92Cq6wRX7opWGxwcAhs3ESVcWMn6tR3gS2 +JrV7iXsi7qZSreW33E5n0fgP5+PMbVvYKQB14gaAqNWMfl7XI0ckJyqgEWvlhJY7 +ZWg6lZNCUHFYPHON+Vqixv9UgylvTkJdmxazyG/kpTKKNISDTpOE -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCoe0ZBYmC5i1k0 -OHApTg3CFKIs/oV5IdRoDG5lKc0uowK87JGHwuCCRNT/9TfGhNJ/X86I1lHTbo/A -2Q57C0+/R+Fj5fndFQLVGLKXwtf15/7/iDWS3/wz/WtlsnVePg54EiY1vM37LSXE -nd10AAR9aXIs6FpY67Ku9RxExhluxnSHpw45dOkrvaWhRlaESmSCCYzdcHAjBLrc -C3qiPi4OJ/3cuhCcEkgz0fvcTP5DqSYtz+n/yO0evu4zeNqypEjGIA1fneWVWsyI -IXcIs9VzrR2RUzx3ejgq3JM6TollZKE2nkF+UerxslbqqvtwFYi4xUcBzFNMS/0f -rn5bz145AgMBAAECggEASn+wEithH8pZWHj4R5tnBAHBsGmfNW/bD2Mn0X3JZ6zX -AhkNRN3MDm5HTSLuVc7XkVEY73LtXbOOxzRzbb8TtT4gkN7ZPIQU64tvNSIIAoli -PTXKkraOO6L8QUzHDzQbi8ZitPJWlpFhGO7BRJDZa2ccAHWBtb4LFcf2hwadqlhI -xt6psHS7zZ079U6NY/3jtPIr7OZYJpYPM9gyyzt4UsPbFzrEqFz98tHDQ+3hnVxd -pRccFdqbw3R6IG3MLrRWMJUuKbqGtCjnhpvbbxEddp75y9BSK+k+SdkoTSdTPVVS -QDn8/tXtQJ7QU4SjUp6FXWQey3kfCzSI0n4wLkmcgQKBgQDZNn0qv1Nviltm2fwP -0PICNsA6jni5fiPrSaNAdOczOi7ZfmBVjhLgz9n96AXgjCc+xFnmjswXv4qbc2t3 -u0pbzJv9fc4tNB0Eb/jhjJSC6AXJ8iCOyXDoyFddAkjzjH7+8jc6RVFswnka2Qbt -RO1YfxZ5LyOUaQbq/cAzGCOZMQKBgQDGkR4m8O9tpgDszZxa+oSnWc09kxEBNBfE -1ub9QRBP1dpvuL/56EVUt5W9XON2xSNxLsdtBoOrgdBVx/WZoGBJfmJxMG/FQPTi -6Ok3pqTwyn6keOtV12t/gLO/dfpTzrKYQyVau1tbxXHjL/EUi2DL85k60fZG5ter -qHjpMa/TiQKBgQDPBkcP+iDM26K4CaVbcbtnbsWSSf44VUho1dt58LH3Okoy02d/ -w5Ssno5XmNAZL5usEDrbK1jMfave84gHKwP5MK+wUDq1DMlnVE1ys6lMh1YVHuf3 -D3nE9EWICUh8kHjuBu+qYyzzKvuAIj+e3yYURbSmtc4EgoGscUPBrw0m0QKBgQC3 -KDZ6zvTa3CuwQjv+A2SHBSN2r4gY5xchnuS0J+bG7UiukuipuKDy8uAxKlQ6Qr9d -cDvNihu8AGLOLUKS8Ua/o89j/ryqYy8/en1cst8jqHTGey8AIFNs6adjbIx574f3 -QMBc/8LWVLqnR5qFS8b+eXUWt4MGhXncQes9lnJLmQKBgQC4OM/UziUXuDD7Fl6f -79rd6JxbnfKlMhMnY7wpeeqyt3ziT5OQRhlkbfH/r5H+Tce2q5+xvXn6s6LtFUlf -jPQjXyo/k0+k00n78hej0Q+CKYkPBvbB3VL1xoZa+o5QP/wQtm6ozTxpH7fuaQx9 -9W+75loYzXxN+xu1EFPfuLPpcA== +MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQDAox42YtKA1C9/ +OrFpDLJe/q8hJsaKWkdp1lSXwR0Ptu4p7m83Lxeo/+ciriHEAxIKLoMqQ1XBTdiy +gmWAu9rj3772pqDfQVqNC/1fwltQyyvzoLLcN3TcmRtHUhbslpqwJUPxBy7/8sKk +X4oKr8pMeabE/KUvsspjQ1rcH+wM1j1IO8rsrjlki/7GuqzvAua+sGxoQLvd1ex5 +EV0hqA0MVslOqEKoKdjYqMe2/jzeTjRq0sa5WEYRHTrDchoJexAFH4YUe9GnF7EK +Bwlg7X2LxfOCHOx74WHi8znpMxYzPtVcWxFQn5Hy6ztanb+fwEh2xzQf06dKIdzk +wy1aMIxPAgMBAAECggEBAKHmdfXviEuOCX08ru5DJYaBNl7+X87XoT1qTR5dxzb1 +36SOKBqREamPqYqUHvzGN9smzEYw4VndO0qMHRLcz1LFMZHK6Vm5a4kvkntwtZ5p +oz7WyHwcf7MHWs3OSEX/LXLtXvSrvOyP8taDFVl19OfhNjBIxewYr9BNQ4fNrG38 +CJeFcXua5bI7EEKWeRCo9unQs1/Ui0QQmxevsBxVFn8LB2oMaZRxpP4wf8Iw/tO3 +UilNwhG9FRpklFvjvn19VIP9b2dv2vPsfZ86tdmvBFrByjKpZ4ATSJYWj15mIeRv +wuP/A63JqNfB4hK3PbIV76TWeZpagunqECddgUN4pcECgYEA/IXoSxPziogHLwkr +bX8B6NLju6oAB2F0juJ/ycnA84ASOpHgdmBBiTId52ZjV9W5WhT8TdpNhHFmlg0V +nwv2bTA9fj0cK8SDIw8moozqUpWDRPlOc7lSqp81tuwXDdfSQy3vu0XsloFeGu+F +4DuX1popaCxkLSehLPEiYJBHyxsCgYEAw0of/012E2DiWbwdhwR2mfZS7naaVkzh +bN5kU18Ef3ICkPHb5Y+UdVqqRIIz5R+IEnDCFKFSyKddfHEXFlUHBmZLNBpOH8BD +fXQp4y626oROL2y3mDWWJjXNB0wiKpcOikf96QBDZ5AjvljnrramCdmdBeoL4OLS +0Dgg9t/gAt0Cf1LbnV0FLuRFvhWw7I8BHZ9Hk3IAVgIV+CjqKQQNL0K2w0R0mTI+ +Y0KhLy6+adpoRmlIeYESIF1U2FRj9rQ5OKKbMZI6ewPhdyYJ0qODmV9/r5LyOsu+ +A2H426cp0Ga6akOgzwij6P15dWdhMIxjAW9pJi0IY9ZtKnWUqbMFpIkCgYBto94W +/aw+8PPjoWOKfA0CH3MsAlZgAUXGU41L6VqjGqUqLz9fwJ5+zGovkFIGXw+MGtY7 +st7BXIV16iAmH5KUGzY5iFM4LD833dHuhDmZrLIgmg7xW5Ry213CrRG5i5lUNVru +R4GQUTJgGZXpVw4dFZm8ykvk9DObRwfq0oH32QKBgDhdCAjpr+wCokHqGCNYWF3J +NJIVGKF/gUFjROZqkDtOT5jVLEwKrGy0GUlnY8XDzFZtPa1cKISGjJ7s5rWKhjW/ +w4QS8Pv8Ms64TEIb/J9xqUrkguijBW373d4nJESGqEg5XgZbcivuP710uYHTf28k +kAYbuWRygT+Bm9JyNwg1 -----END PRIVATE KEY----- diff --git a/jstests/libs/tenant_migration_donor_expired.pem.digest.sha1 b/jstests/libs/tenant_migration_donor_expired.pem.digest.sha1 index 11bd90ecf3ac1..7ec9cd87b21a4 100644 --- a/jstests/libs/tenant_migration_donor_expired.pem.digest.sha1 +++ b/jstests/libs/tenant_migration_donor_expired.pem.digest.sha1 @@ -1 +1 @@ -1B3466B84D702B4AE46FA78B60B73763823312D3 \ No newline at end of file +5F48E18C9FA1815556F64F6CEA54D507D6758560 \ No newline at end of file diff --git a/jstests/libs/tenant_migration_donor_expired.pem.digest.sha256 b/jstests/libs/tenant_migration_donor_expired.pem.digest.sha256 index f81e94de59503..9009313f52656 100644 --- a/jstests/libs/tenant_migration_donor_expired.pem.digest.sha256 +++ b/jstests/libs/tenant_migration_donor_expired.pem.digest.sha256 @@ -1 +1 @@ -AFE0FFD75316145A8DB71A6FB4AB77EC379857F1D6374121F6139357EAA86110 \ No newline at end of file +470FE82061694EB9A894488792DDFD4C290C471AB11970E2DA42984E26BAAE68 \ No newline at end of file diff --git a/jstests/libs/tenant_migration_donor_insufficient_privileges.pem b/jstests/libs/tenant_migration_donor_insufficient_privileges.pem index 20b487ac64bf6..86baaff3ac850 100644 --- a/jstests/libs/tenant_migration_donor_insufficient_privileges.pem +++ b/jstests/libs/tenant_migration_donor_insufficient_privileges.pem @@ -3,53 +3,53 @@ # # Client certificate file for tenant migration donor without the required privileges. -----BEGIN CERTIFICATE----- -MIID2DCCAsCgAwIBAgIETPHVrTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIID2DCCAsCgAwIBAgIEX0I4eDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ1WhcNMjQwNDMwMjE1OTQ1WjBrMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQzWhcNMjUwOTEwMTQyODQzWjBrMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxHzAdBgNVBAsMFnRlbmFudF9taWdyYXRpb25f -ZG9ub3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCukv88tmaxJrjz -RBGNRwU6d7/GTet471tkLQtcY2+39zeSQiqDT0Ru6wcqnXbtfSBl9/RZoVUiMYgm -WDH8PpdxFBPQ2UsuNqChEJGuoYHh4G0FqYbrdhFGEuNlT+NqgSvSDJ9cpo9tl0WK -Qge+Bap0DaL05XH23Q4XQh1pZAHH7r5a95tOXIPcshxJ1YneP0FD26kjeQxgHnTB -HR6RNPLElwuuE6EUPb8UwNRk3pRYbmv0OGgRGtbnH+Ols4ir0GJK/vtZZpusS1Om -qgY+gSHXJ+pF/xk3jv3RZJtlnoXEwWIucMeXaJSEVBMrq87kS0fwgY4FGIqhSwp1 -jrc5liBJAgMBAAGjezB5MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1UdJQQM -MAoGCCsGAQUFBwMCMB0GA1UdDgQWBBQwzCU8euanRmnO3RrjjwA1KLYh6TArBgsr +ZG9ub3IwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDV99dY9BjryoLU +sRX01PinnpONl8iK/B5KtkUKaBuPul1NBF6EBvo3GsluWxK7CVxaMrf/E4P5vHtB +3Qx9lakRV1hel51r0rmNki8/cC635GF+w+e0u0Y5BW1cw22K9ukwyAL0BPW5PWmg +438a0K3N00wlPLSWBPiQAn2lnXUuHOXuOF9yDA2pPr9SDcDsjVHic873kDXh/+aL +dRiXBcp07qIwt8IedV+sKmpS76ItD7k+XfIyy5dQAGwaS03O/3Z2z5SW1Nfo8sqt +u2fGH1RtmXieU46jz7Fu6zbmrwafXbLrUftkaIDx1LWloHbrHtI8uKUPq272USWX +XNt+VoIhAgMBAAGjezB5MAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMGA1UdJQQM +MAoGCCsGAQUFBwMCMB0GA1UdDgQWBBT4Sd58ZmKhJCMacxqVZgt+oqeOMTArBgsr BgEEAYKOKQIBAQQcMRowGAwPcmVhZEFueURhdGFiYXNlDAVhZG1pbjANBgkqhkiG -9w0BAQsFAAOCAQEAS9b6JY6ebxXHCzzKTx/GOi/OCgWrqlDthNtdVjluaRvSxBGk -v+goonF3Z/8dXVwTLysRwz2BiJqROHaN6x9JvODPSlYyLOyIE3CafDUu6oLm5huC -BS1pJURujRFdHeRCdLhtzgcfO6zIGRrkiMr7zSZYSqMBfkEaJLl2c9w/MMVQYDBf -KOzC5rV9OEkiJcI0nonJMM98L1bKNPRdBqNB+8QlNxOeJXDmNAyG+vI3Mt2c452U -AkXYYBT40e+DdiDzoxIA1DKo6Auc5IqFv9q+dTJVKsCuJCzV8+5bZS9DR286wAVf -hG9C1ov75NeIAE6w7iyt2lV1FZRNSP2zBv6Y1A== +9w0BAQsFAAOCAQEAcz9ghdMtXd5pqD/nJbms1k/mP2ufJ0vXlAZ4cYNlBVxQ5Jo2 +lRNQaRHey+pVn7ItkKuHpD+WUpEQ1f9yUKo4Xhc169Vn+k14jvDJRk4wBAGJXck+ +B9TNvdimyA8X0UxgqG/3Blh7c7n6jzXQWC9e9rm92i6RFPuLKg1i1f8TupS/evmd +pe64qgy/mQmlexZt7DKm8la2aoXVkH8zxnhUVHd/pbN1ExPOLO+e8ZFqUR1EVzFh +sU8/Vc/D0IYUYYHXPYD3fEV+4uxl8o6WzoyWLbkbRKWKDRnAVkvPacOwsL9I482x +RQjm0kCzFRZi9E0lj0rMkVKRsEhDHt86WxvnKQ== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCukv88tmaxJrjz -RBGNRwU6d7/GTet471tkLQtcY2+39zeSQiqDT0Ru6wcqnXbtfSBl9/RZoVUiMYgm -WDH8PpdxFBPQ2UsuNqChEJGuoYHh4G0FqYbrdhFGEuNlT+NqgSvSDJ9cpo9tl0WK -Qge+Bap0DaL05XH23Q4XQh1pZAHH7r5a95tOXIPcshxJ1YneP0FD26kjeQxgHnTB -HR6RNPLElwuuE6EUPb8UwNRk3pRYbmv0OGgRGtbnH+Ols4ir0GJK/vtZZpusS1Om -qgY+gSHXJ+pF/xk3jv3RZJtlnoXEwWIucMeXaJSEVBMrq87kS0fwgY4FGIqhSwp1 -jrc5liBJAgMBAAECggEAM9AHFwLf6bYfcHwEZZTtlVPTREz+FU+dJVdFJu+QSd6C -zNL8gSp2miEBaIGBuazIf9se17dNaC+hRBgrBb2h/vrBgtvrN1UZSZR8WIMw5FRS -pzqQg7PwHocenIQgcAzUb6w1ZC8/JHygNA+y8pAF7hnaAqtSrr0fIIIL7qqy6J6S -vVCxiOyvXR+VHmvrltLIpfBPsg3pWLoygE0Nfd+ab93/kWNS1/TO+76GacaFi9pf -f7jIRPny7wDBpij+ABtRrv0HPaH9Kh8GwmLfozCIUv3RbLX3+hVePjMlB7OC2JK6 -lnvHVb5LNdtNbvnaHleutchA2eov5IgXT7v6Wn+u8QKBgQDUkSKAUZpULQJIVBoz -tJcnnwFbhvHE9Ysb+YTiPMKWtFLWctMM7ZdhfRtBlkvPL+d+xSvnOhiEjBsO1epz -s5yfGoUM88kbKQLK+66IIQJBQvDUzB7J6WTrd9j0X1hg4CNWZPUP3E/vbnVF4Bjg -DV0Zh69fGdMJGhOnLO7AOU5/BQKBgQDSPo3aLE6t2lEhH9jsSJtoD//N8TaYVise -oGgAl4COemto3whZxutYOnwMLR/yaj4MQZ2eoIS87EwQkKEVKmiD/35qjT2mrdgq -GUP8EVvSb9XlQmVwsd6gf0zUpL3nj3TSo1LrjZbmz5UU8QilbGR5YhU7TTQ9rB7s -TtwIIXw3dQKBgQCOVGfPl9RITKLcn0O0H1Wet/8GtAKqqObuYlKx0ZFRq4hUAyqB -24yjQEvBzMsR/bd50Lgm6WWFSvLLKj0EH93dQrqYA5xCzWELXR5uE/wYiqQLOGnd -NKPYbrUcW8MniqzeqlbUueXkIgfwM680Rn5yG8I3YWlHDOjf5Vwv5a9DqQKBgFVQ -cPuRczP1HWrVo47uP7HQnDsToNXcUY8SBGIJGG+4mZFqv9a+c43P3bqLLSWPmzIa -Bj3yYSrQsGUga11NYi/+I2xVeCkE7mzW59GHsb0JCMEJWmSKqQ5z2deIKk+m1P8M -q39Oa2ep4JMo5BtMitD+ziLMR2CnUb0Omxbpj6BpAoGAU/TBvFbx9x24eUrGpNql -peRYIcUkq91PjW2LC8X8IK9VMGnznpynn6lkdwDoMydrXDZ7gG9pik9rEO0aVE8c -Yo63x5Hq4+tSEFHiy7FuK7WA2XbLo67cYI/LFs0bCey7kUWG7xkmE4eLhVVOfZX5 -WfSMNStPHfPy0LrvK2JGs3w= +MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDV99dY9BjryoLU +sRX01PinnpONl8iK/B5KtkUKaBuPul1NBF6EBvo3GsluWxK7CVxaMrf/E4P5vHtB +3Qx9lakRV1hel51r0rmNki8/cC635GF+w+e0u0Y5BW1cw22K9ukwyAL0BPW5PWmg +438a0K3N00wlPLSWBPiQAn2lnXUuHOXuOF9yDA2pPr9SDcDsjVHic873kDXh/+aL +dRiXBcp07qIwt8IedV+sKmpS76ItD7k+XfIyy5dQAGwaS03O/3Z2z5SW1Nfo8sqt +u2fGH1RtmXieU46jz7Fu6zbmrwafXbLrUftkaIDx1LWloHbrHtI8uKUPq272USWX +XNt+VoIhAgMBAAECggEBAKHF5w2cUJNF47EHyr/3naCt5oWxcrCSehymvoBlwiDj +GNO4L/XGiLvNBurm9LxxEWAo5LB4gtn4xYBArsbsto0j6u5p113ETwJYulxOZyQm +Cgib+b3NsgQ+tDbkkxf0Is79Ci6Q6XAKnKhGGL2fdSkfTn0A7tBWJdBL8c+bwxL+ +6G/0t/jzErXPNjHo9bdUagJO0Z09SzJbPvIUhEmfbPBPFb6NwTEbcTmWz3rUjpCZ +5dtxAe6yYOja0eSfJ1nehx/rnFyWJoHpxI9MEopZ+eR3b7qlmlBeKWAeyLEjIHaL +1n8TMgpoXTeOhOx1wPuFMNVM4e4mhVBko6UU3Ff7qoECgYEA7Wn3/UJhd+tQ07Mf +Xid9dpLDcmo0UMO28PP4fT9GvsQuqMegmt0hCmu8pKlRJoFsMU4Ab30CNl8JtzD1 ++A6WSM56QIDocZOx/RXtQfANbeRf3PHkZGI/2dWLxlYRvcM3dF28Ttk1RI5HDABz +8Cfkc7py5lvstuGIF/rXuEY+ISkCgYEA5rf+QLLb5/T+i9FcT2d8U1iSLeNYcQdl +Jr/Rt8H922fcXnq7QMNODyW1Ha5PYHUr0iTCamCe1vqjBBCqaBuphEwjdb/gv7FE +XZPqst59GAIVTqlL5MGq/QDLgiGjkbLt9IMuUU4xKTDJ6XJHyamEw9JFn/oPB6ag +ImHWHCL8IDkCgYAQ+qYor6mm2OZz7XiN1EctxrtBoITTTdv0iY447uCRXhh0K6q5 +yJzHUQMxx4YtOc5SDGENmCQjXVKljlPZBLoMxO+LU2zMSqLNFdddAt0DAfU1KPuL +jldArfwMGDW2m5KIuPdxsCkWGRLNse6Yu1GkUS5MeUCJp8GC7YamACgLWQKBgQDO +RAp0DzOY8agsmhT8DFnTLakqi0lDa8gb13wH29A3umgCs5j7MmB0HFMK4Q6n/rkZ +m7GQZyc6rULWXIvsnWZ1F5jQKaYl2rZzLs2x8kVGFH62H5bxQc+7d115ztcsaBpi +8nCTUeOTnsG1Cm1xtjMy2qdSeb28KlSZyHwiFYjwyQKBgQDrF5KHhheR/eX+h7m+ +Jn1m6cvODkFtB/I+3L50rBkR/iCcDQNgZ+HGLKr8LxQb0sj8eOlbpPs3KjrsYKPO +T0CpmDDHLdiihIcWrWX8SIyMmyjZfUTtI5rNqxRLcw+hbOwj0yBRHJ2Dcfu8aNCk +Au5feocGvD5BuFPV8b3HQWAESQ== -----END PRIVATE KEY----- diff --git a/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha1 b/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha1 index 916325df58371..50b1c4d4becdf 100644 --- a/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha1 +++ b/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha1 @@ -1 +1 @@ -F6F2FF0DD2BB3AAFDCAC5D9CA190FD4AA201F049 \ No newline at end of file +D1CD97F3C9B816D7E8FC865E9CC330585A557F42 \ No newline at end of file diff --git a/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha256 b/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha256 index eceeace3ad087..b2300a0290b05 100644 --- a/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha256 +++ b/jstests/libs/tenant_migration_donor_insufficient_privileges.pem.digest.sha256 @@ -1 +1 @@ -4085D0DD39563975418DE7088089C069DC1AE39B1EACB8DA2130C1CADCBE0B4F \ No newline at end of file +D405DB53817A6293A79BE7C00EF272471A42473AAEC6AC9FA10F8D2299C71CB0 \ No newline at end of file diff --git a/jstests/libs/tenant_migration_recipient.pem b/jstests/libs/tenant_migration_recipient.pem index 38a8f545f6e2f..b2783e16265e2 100644 --- a/jstests/libs/tenant_migration_recipient.pem +++ b/jstests/libs/tenant_migration_recipient.pem @@ -3,53 +3,53 @@ # # Client certificate file for tenant migration recipient. -----BEGIN CERTIFICATE----- -MIID6zCCAtOgAwIBAgIEJcofjTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIID6zCCAtOgAwIBAgIEKzC/fzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBvMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQzWhcNMjUwOTEwMTQyODQzWjBvMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxIzAhBgNVBAsMGnRlbmFudF9taWdyYXRpb25f -cmVjaXBpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAznSAJWeK -aX4D4hQE3Z81yntvNjbWntvmEgSQ6+Bh5+10hmpsdVE/oQD+IrfDRl4PZxNJSOvH -3ZI47begaYPl1qPQizf0TAHve2RwXo5S1hdKofJi5CZ6xbAlwvxyN0/CBk8CaJ1B -UyDWmRpsP1Qutwr0LVK1Sl1r//hy5eEutg8Qq8gjTcyYE2cWoNGgPxV2Eg4ds4sA -Y0NZtJt80bZQhIt/LT70Z69me7Y1gCKgk26UydXZU6xvctL7klRuDcAhiC+hsmEZ -WZmM8El9dtovv88WwtGG7qVakW1SJ+4hxDUCIZMdriq49FXAyBgnD+Jj/YaJgEhC -ZBNNnur6qqJK3wIDAQABo4GJMIGGMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMG -A1UdJQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBSZXizXFINpk1j//W0zmqBVh745 -FjA4BgsrBgEEAYKOKQIBAQQpMScwJQwcdGVuYW50TWlncmF0aW9uUmVjaXBpZW50 -Um9sZQwFYWRtaW4wDQYJKoZIhvcNAQELBQADggEBAKbaeByk1xCNdW/5anhcxvY6 -8Dyu3sqUhVyCP2VmS7JPjtt+HfzQGVyh7vKLYPoVt7vQp/2fKPvDnahBTbOeWlRQ -UIO86xdqaEX6GskRu4CMGB+1d4fnQxjQZkBwwQEMMXP+ooeBCDuwp2+zYoOgGdvn -ZXL7Ui8xEuJeqyczZAhcsk+Xnk/ZW5b7EeLIgjHhrkxxB6MdNejA+xlU1qKhA2xL -qQeAtYDRhl7zOzlBj2Wx2mX+6Ph1IW9IHtgMxPyjjgeEoblSxRf7nqOIGbXs8mPo -ap2I7iSNjo1vyykbWA6vJNIS40z8A8EhSmHSPe4aw326CYN0VeywCOy0TNcHu0o= +cmVjaXBpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx1QP5Zls +94qZiqNtDGshF6xcws4kcJqdVCrPwsI7bdu/jx8KWQ9Lj2UPXXST1B5tHCLXjWkv +PYhNMxqAXXLLzKiwCSlT//pN2ja5sy4IVC8olXMCsXa2/S7Mlxzoi8XI6Z5U+rGp +7Zwi2y3kxAe/DY6mJvZgF/8N2DYnZUBsb+0JTOE9zmonKadKEttOF+qnNvo2HYEZ +urKeuHvWv6lOuKpPIodTan1k4BmbaoX1XQuE1H7zHlPtpP62D5qI+UctB2Uyd3+V +eAObDMHic5nlxVVG4SvdZQvM5TjrxeT8FREWXIvAtHI2KBLlsWqMO+wDiWJPXB10 +VjMSrwOB39U/wQIDAQABo4GJMIGGMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMG +A1UdJQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBRbjCQ7AhUFIJEF5kCQF+pawaus +ETA4BgsrBgEEAYKOKQIBAQQpMScwJQwcdGVuYW50TWlncmF0aW9uUmVjaXBpZW50 +Um9sZQwFYWRtaW4wDQYJKoZIhvcNAQELBQADggEBAAAlw+U1ZMmEEv/cHQnQlPdn +dVYc1V3hP4tDugVavCWuDzAps2+qu8QVLL7SekvW0OsCCS6jEcM4lS5dIVhx9fl7 +UOVMhrTd+XrKTsaeweOwq49aCDXkgaByGtmoXJWjL28wsrp3dSkObviaKKekFfuC +d8j5Ea9YCa5M8EwuE1M3P6OaA1E2OGQkDEPFrhwfrKBdO2RZwwbH5FEUHvJXLCjg +BIP99tweL1UGHH11dUonFuIjVCU/FcWTxksjGS0l/1biOsatSBt7Xal8Uv76NCnT +zNQRFsURJRm01mIZxf+djCrSg0JUCKnrJOMFcg12yeGzRw6WPtxQikW2pxnOK9s= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDOdIAlZ4ppfgPi -FATdnzXKe282Ntae2+YSBJDr4GHn7XSGamx1UT+hAP4it8NGXg9nE0lI68fdkjjt -t6Bpg+XWo9CLN/RMAe97ZHBejlLWF0qh8mLkJnrFsCXC/HI3T8IGTwJonUFTINaZ -Gmw/VC63CvQtUrVKXWv/+HLl4S62DxCryCNNzJgTZxag0aA/FXYSDh2ziwBjQ1m0 -m3zRtlCEi38tPvRnr2Z7tjWAIqCTbpTJ1dlTrG9y0vuSVG4NwCGIL6GyYRlZmYzw -SX122i+/zxbC0YbupVqRbVIn7iHENQIhkx2uKrj0VcDIGCcP4mP9homASEJkE02e -6vqqokrfAgMBAAECggEAH4CHTS3PYOlpjkWfuHFis9LB1XPoq7TCFl27/0HtroX9 -EcWLZxtOqjKMlJ+VnFcd4ox+0jGn/ZciOKOcRn1pxKaaW6PeEvr4n8tjmgn9ec10 -BeIGVOnKMf/+wxHiG65/3JaRt4THQvfvxszRV5hwyF1ThNbp8r7ui9BNN+Z0SjMc -GOTyf1j6Y9dcE6I8pJVtNp9o7fyYYrF9AE75QWv+x8DXSuIhcEFkFzmyJJHk1Psi -7T5JfmmI53KrvE/9n7MbkWQIC4YGrSYwUY0jKsu3xskXVOYAOW7GPLol+OTviJBq -4IVAckfA/Q6qOlnYQUkKqaRn5gjFmARhUEkLzmDaAQKBgQDsLbZvkOVsMDYrZtCv -z0smeEq3z/sHUK7DJsOEBFVlSAG3TcjZW6STNJaFetZF26H4pAqbFZqKIv5J5V3D -egvHDxSfWynwop/VydyEZlTr/Ymxdm7YThnzvqXbr8q0X0QVSnhCtyKe6WJeoWK1 -HXWcWgSDK0Rm5Ul5Y/H61Y31jwKBgQDfyC4VHN2AWLOVVrgF+IsvwML/9xTuCBZw -ak+9Zm8/7X6++J2Fe2GMmyYAWMDZhJmd+yoZDajZ0WCyHHvxCT2wyJQCWiHNs8dm -QsDR8w7TDgZdkUQ9l48YtHV5QjOYd1/WsqWvWsOdZrXBxEzuLjF9140LY75WWuXa -QULT0KjNsQKBgFzYy7OoXsjdWy5MyRWUhJKnD5ibZrBFg66seohXu8qJOEN7jM4G -PPix38qxs7La2R4KPzEgmRRdFWKvjODgBelHgG/1QtOa2wMMzGgwYoozrgHZ8VGP -wpwUAtgOj73BBd7o28Y+gJnAXi+delSBNE83BDcct1NMKEpUhDh748b5AoGAbxK8 -FMykAL2GPXyykHwiEcy6vTPVlqQ7BwctlxPN6kwmWgGqpgNsks67WPa0mgKgAPMW -nSLTiXHMbfuHZUcx8JpOJLC6EJDISzCc5lifJhDTBgRBe9TLOPbxTAOY+ndeAYHg -jR4e8/R0CL43dQ1GkXC42EtkwRYS+nbkBaCO+LECgYEA2R+GXgZ87UWwB+3X3uAW -HQ/mP5PfQzVph/K35DbJxfy5A6kwyB/BWEqxoaZH5Deoj4pZU7NmpB0smEx3O4fh -UgmyPlkayz1gbEHJ1PWe36j4bi7C36yyZbwc5iqJRvWv+CqtWAbHaammdQ17d6Dc -S1VNokxppp+iUUKFYjL/vs4= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDHVA/lmWz3ipmK +o20MayEXrFzCziRwmp1UKs/Cwjtt27+PHwpZD0uPZQ9ddJPUHm0cIteNaS89iE0z +GoBdcsvMqLAJKVP/+k3aNrmzLghULyiVcwKxdrb9LsyXHOiLxcjpnlT6santnCLb +LeTEB78NjqYm9mAX/w3YNidlQGxv7QlM4T3Oaicpp0oS204X6qc2+jYdgRm6sp64 +e9a/qU64qk8ih1NqfWTgGZtqhfVdC4TUfvMeU+2k/rYPmoj5Ry0HZTJ3f5V4A5sM +weJzmeXFVUbhK91lC8zlOOvF5PwVERZci8C0cjYoEuWxaow77AOJYk9cHXRWMxKv +A4Hf1T/BAgMBAAECggEBAIPbRtzSPnQOGn3MmAGECJMZcQ9owFBA51xbKa6jQB7v +I+vwU68QYCKObriauoOyUOkw3zhrYVWqCa+Jk2q92rUazcBxt2B42vFEDFnMVLQj +sgwS1bBBYNFhGPPJqsdzYfGwzHQ9/LPy+lfVJyTKHwc/2wehKXYWQblKMYtYL6Nl +lYoBJg+ukhI/k9CssCWUzu6GT2phCPmm2tH9IODyPZYQj9ZUG2PFcVDyR+9aQKb4 +RaaOzrNzt31srGIHNjW7QD0yEwNsTiZ05DTAS0ExGt6TnHQcbqSkxT1djZVurZhd +0zT3rGEhsAkqNQ/mMQ3SRyB2pwhXZ+/aqqGl4bsSrLUCgYEA8nAXqzr6nXHBQv9c ++kwiTuINVYO/Hss1owTDzPEv3E+z/mVstOo8KUNBT1DiHARwrYMJ6nra5lah41MH +F+X2/nUqYC/yIrtrlCOnQHr6d2znIpWq1Sj9/8MzfMgZuhdUmQTrqc2Tmno0bh5K +eBPxUrNsftGy4GlY7WVLkuCAzwMCgYEA0nqbSedoOqCwIdkVgi2libIX2TWZpBej +LI4cNLA0YPV6GqG3hFkxs8D2XEIovEG3q+3dzgqmCXFg24HgaYFr11b4m3XYj/3V +CiGC/G1c3QxvkJCieseRtsyhS/H5++X9YFyZG5Fk5JzkArO7Nae890o+Nom6j0C3 +q1j8XtUqaOsCgYA1k4X3hkIqFyCBgNN4UOjoC9ashj/vOzMwQnZOzSIpiseZOarL +VFRVPhKpx5MgY+7OuX2wftPvQUfnZ8rSgjSSSSxDM3VMLaT5iOOGQWcmiz+NCgxF +rRhstCOluMbOtCcy5b56uP6cjdMWXsVQQf/7qcEZSZhBNaa8V71ayRhOJwKBgGK2 +Pb4bWTalxfUZ6oxy+//oFmc4TCY5L73lqDTNrsMKXYm/7mGTs0uqY+BA5vnjmFB5 +7lyDgftLwTExaB4TJJCJqW6/hiGB2jg2H0hjwfmpq2kRbCJJFn202rDTe3o4Euzu +gJ/9QGQ3cHgT8ujnEiioGSVa0rP8lic2RKX1Mz6hAoGAVJcNEifTD0GPB+VKl/vE +dDSUHz3MrlbI5o6hORCuG9Gyf/wzUaCuOJrgTwBsa3iMYh+9tK8TGCZYw+fQhaun +JWRmkU8tjqHMsc1PssIcDaZpbHWoYDt09jYssqPjrQir6m6JYeo8AZYZOaorKXrg +QrF3oemjAKBVppgXuFrLzo8= -----END PRIVATE KEY----- diff --git a/jstests/libs/tenant_migration_recipient.pem.digest.sha1 b/jstests/libs/tenant_migration_recipient.pem.digest.sha1 index 7dad2694bdea1..ffa61296b2f3c 100644 --- a/jstests/libs/tenant_migration_recipient.pem.digest.sha1 +++ b/jstests/libs/tenant_migration_recipient.pem.digest.sha1 @@ -1 +1 @@ -379766C97E0E06045E08F4397740FD9EC54CC14A \ No newline at end of file +11FA775E0D82623200531C010564BAB34F3EDB9E \ No newline at end of file diff --git a/jstests/libs/tenant_migration_recipient.pem.digest.sha256 b/jstests/libs/tenant_migration_recipient.pem.digest.sha256 index ad01d1b9815c2..659d908545581 100644 --- a/jstests/libs/tenant_migration_recipient.pem.digest.sha256 +++ b/jstests/libs/tenant_migration_recipient.pem.digest.sha256 @@ -1 +1 @@ -CA5F02E7CFEB7509E1D6BD9BEAC00A40A80527CF07FB2029442DE8296A4DF2C0 \ No newline at end of file +A4FAA454C6AE379571ECEAB1DEC16A9222B15D189BD0B731D72C2DBD82C2874D \ No newline at end of file diff --git a/jstests/libs/tenant_migration_recipient_expired.pem b/jstests/libs/tenant_migration_recipient_expired.pem index 6abbf4baf0f19..2ff44ea00665a 100644 --- a/jstests/libs/tenant_migration_recipient_expired.pem +++ b/jstests/libs/tenant_migration_recipient_expired.pem @@ -3,53 +3,53 @@ # # Client certificate file for tenant migration recipient which has passed its expiration date. -----BEGIN CERTIFICATE----- -MIID6zCCAtOgAwIBAgIEVJVyCTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIID6zCCAtOgAwIBAgIEUUGPJjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjExMDExMDQxMzA2WhcNMjIwMTIzMDgxMzA2WjBvMQswCQYD +IFRlc3QgQ0EwHhcNMjMwMjIwMjA0MjA0WhcNMjMwNjA1MDA0MjA0WjBvMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxIzAhBgNVBAsMGnRlbmFudF9taWdyYXRpb25f -cmVjaXBpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAqcz9uXUw -mmYzgMZHQVct/Aep+Pt9bXl4UndRN1Fy1d8QKIshEqMVUuBh9WsI5rSg55RfH0MZ -V4bpq8ah+HjfL9tPAw3seYPRg2rVfGre5QXYfardZnxcDvVrQ6Ln+/eTmtgQM7qX -YCSJu+UskcwfObsAXEC9S/QSTcLyWtypNBsggCzm6hF/qNLoDdq2T9D4rAhVZsQ3 -k8B01DgywjWGSwTeOdnNaB1VZW3QKE9mLgbiQhS++NR+KRtUd5/TnFLs0tU3sgUK -osSS/tBx60wR2zUnX190uY7ljg/oQkPpcdJ/Mc1BDUKu2cYGL8w1+pooT4CTSLVQ -GRw3kZvW0/1LNwIDAQABo4GJMIGGMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMG -A1UdJQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBQaGcRjaxKtM6ejvAJUx6b7VpyG -JDA4BgsrBgEEAYKOKQIBAQQpMScwJQwcdGVuYW50TWlncmF0aW9uUmVjaXBpZW50 -Um9sZQwFYWRtaW4wDQYJKoZIhvcNAQELBQADggEBADcRbGHg4C1yyFs9nVvODhED -U8UoXIPv6GJvQVS/hrYgQutjeESTUDEP74MB8GVSVtzFwiI0Ls+2JI8iVOGuAfbX -XJNvKTuxBa/UY3IxZC4bTvTWEXhfuLaOCnAE2srHK4wrZgOQQgegQc0JYrzwjO0W -pvP7FP6BhFfGo0DIRIfOvyMgD2Z4IqinAsdCOrbL4lLSgt8HXRh2R8IFfNSoE1F7 -fkocKa22emVFOEq6O+hNNaXiWSMYDaP0GSSKU4ywSMpDLbF67kbivGmrCFHO8yDJ -5ST2qCxpbGF10pp/0K7uThoA8SIKVVpm7BWoBMnOkoLmyPc0JhHZUChwbNFq6H8= +cmVjaXBpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAude6yEIe +i5zDr7r6m3W4qKs5W2tQ57o5IZVP3uWEPrNW5Zn33jQNmgeUiD2MXj5JjgZBuJFo +KbBjL9n65sn0pVVZPr3LVH9G6grr2jMdTyzqGYeubvsE1NPgcvd9roW2uKDCL/WK +jRI2GVFuVVeW6u/XTNcy5atA/jm9j+LTvK/zNlal4U3rsta1QsRKcl7BSsyBws1n +7fdMA9OdYISBsrG4MtB8vHMItdIQAIfrq8KBtG+F+7rsToUNVhrn94vPUqqyUmG2 +3/5ulCRw3gI4wcEVPQVVFt0FLWNm/3GySdCayiu1E1BP+enkSwkoq7goF/rQbVD1 +D8QelyqOk0m2LQIDAQABo4GJMIGGMAkGA1UdEwQCMAAwCwYDVR0PBAQDAgWgMBMG +A1UdJQQMMAoGCCsGAQUFBwMCMB0GA1UdDgQWBBSnXaXIw+RLmwSDU5RyzZF/ONRv +fDA4BgsrBgEEAYKOKQIBAQQpMScwJQwcdGVuYW50TWlncmF0aW9uUmVjaXBpZW50 +Um9sZQwFYWRtaW4wDQYJKoZIhvcNAQELBQADggEBAAk4lcSNw6ybY/Pf//+pWI/E +176XpAM1gouNgFxpVFHt41YrLfMnyEGcqo7s3blkZNmfhzocpdZfNb8XOrU0v24S +J2ABfI70kVDesqofh2bOwi5vkg/FacpLCz/bxMVUr8tQ5vCcmjManJsgZpyGVK2Y +1ZEsIHkEVZ393LXtaTQIaqVlHz3M1d7crh7nEKfcXI5ntc4WOoSkzuW/yC6WuAbQ +lWyy8IWTQpH778x8pDYWBgrw8BK+yjqOLT66Ca9/ZlfXPoQXk839X048Dc1qQFoT +zY8sw9Vv/zCFzLsqF03yJCkznG5LvEsPAtQ0WZtpFLV1klI+RUPQ/bQjbmPXxvk= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCpzP25dTCaZjOA -xkdBVy38B6n4+31teXhSd1E3UXLV3xAoiyESoxVS4GH1awjmtKDnlF8fQxlXhumr -xqH4eN8v208DDex5g9GDatV8at7lBdh9qt1mfFwO9WtDouf795Oa2BAzupdgJIm7 -5SyRzB85uwBcQL1L9BJNwvJa3Kk0GyCALObqEX+o0ugN2rZP0PisCFVmxDeTwHTU -ODLCNYZLBN452c1oHVVlbdAoT2YuBuJCFL741H4pG1R3n9OcUuzS1TeyBQqixJL+ -0HHrTBHbNSdfX3S5juWOD+hCQ+lx0n8xzUENQq7ZxgYvzDX6mihPgJNItVAZHDeR -m9bT/Us3AgMBAAECggEACc0qbDUmjBMQMAPCAzSME/tBRX0G3XXgiyoWBxJthYjt -vtlhHFlLltAa93apOd/9VcDLyNsvsEy6Wk8J9HTNtU58fhmkp9MDGgnlrtvgZ+nx -eDePtiXBjp9+BJZ3u6Sr4YlNTh8cM9GIHc95xiS5PLTdrXWTN7osAE8bfEE2hypd -mSVSGL/PBEo6+jsaEWsZW366U7KC/2vl8SBsxlEAXneKv+fw3BhngwghRDsQOFBt -XX72GnRTUOt1nroMlA+s3Ao3XZrqD6Z145pTMqXen5IoCjBrUWk7n4uwxi10MY7k -LbxOxh2oghwDbbTYgi2bgkYHfIk5mEsn/mxhAXjjCQKBgQDbR2HUhAusirpvDFX/ -e1r8RmovGT0SLEjIfNfrHHJlpRRlvUeQP0pVIH0pEootROZeA/ujofc8NMfEdrTw -dcufxITlpPvKk8+o6zgaQ0dDWZ9zIPiWMrWwfyEZyNJoTsp1gyLQbp47bViBfJsA -td1nX4Ep1WRgIq3SQ2l3bh03YwKBgQDGPHP2chMao6ln/T/wp1Aa8bD5aHjSCa6q -TzsRQeUNVIl14oFE8G+vtgr04el2qztWB0MoXyioqaH09NOny6EFeqZwPo1vQTN5 -Q7HeMDx7TvtOCXOxRCe0F5LR3etsOcNekTInXu9AwOnt4UH52gMVziAuVdTAfXO4 -6GTk2wV3HQKBgCxDO3c3dFfO5RU3a0CX+OTFnfeF47MAZ2y47qjR5DGqYfSrgX+X -lvyaA0nAKU48AzhlG22LaymnCdAZmiqTzJeihqUIaZ8ZuShC2t9KR19L/wixVhyT -feNztg3LYNWXWfzgjK2ANsaOKvhwW6WIEHomaB82qP1S4r13yBlIi/M7AoGBAJ1i -4a6IYyKDTbyCFIG8VJ0PxrI9f69CgKo2vW62ImSy+W/epUNWoVWf8pL5yaGt0S48 -FdW3t1AxXaRdvK07vlvbRMlY4HG0Emn0lQMSyPIdgugyGOhkdCFHlgmJZ6BFPBeY -r3kSpmGCyDdU4Ey+CjUFsgcXnhI1h+sGlxQbz/sBAoGBAIB/YchF5tOGpls4k8be -+YD4eB2spHWtJRRbnTYPnGY4xqOzEII14agzvi4Kh7j5O7TE5SZW3m2GjPV7HmFI -mrU24lspIJjyrZAZXxDT+Y8Ki7yp/ITS38W79DKV/dFnjKG2oUy4ZIs86OkWbNc0 -N1CQppZPRt8/QREmB77yT9ic +MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC517rIQh6LnMOv +uvqbdbioqzlba1DnujkhlU/e5YQ+s1blmffeNA2aB5SIPYxePkmOBkG4kWgpsGMv +2frmyfSlVVk+vctUf0bqCuvaMx1PLOoZh65u+wTU0+By932uhba4oMIv9YqNEjYZ +UW5VV5bq79dM1zLlq0D+Ob2P4tO8r/M2VqXhTeuy1rVCxEpyXsFKzIHCzWft90wD +051ghIGysbgy0Hy8cwi10hAAh+urwoG0b4X7uuxOhQ1WGuf3i89SqrJSYbbf/m6U +JHDeAjjBwRU9BVUW3QUtY2b/cbJJ0JrKK7UTUE/56eRLCSiruCgX+tBtUPUPxB6X +Ko6TSbYtAgMBAAECggEBAIzmWt7qAm6ndFYP1WF4Z4C6EMqXGsgWEKq9ocjFCvbe +0ctSaPM3U/isNpj00S/C2dGsPLfKxsaUzR7Cjc/c3ndbtkYzb62osgNQNHFCv73/ +t52TmVTbTFZwLYP80HU5O8fPWmsyJtG+NPYNHzHlLq8BGKNRpW0r4iLddDNbLl2n +XezuETlqKO/gcOSaD0zGU2MH+IOLGBgN0GZnOcM6njEOSgvou2/iD6J8DHa+79Cp +fhheGsD6C7fMhU1nl/IfPCXsKw+s9f+b653k5xSRmxCtjz4zjzXe5SfLXi1V3Xn7 +sbFe0A8NUE2urk+zPoJvnT38/TPuJYV48Rr+vhkQ+qECgYEA5i9JMCEGVZM1xRcq +snl7MuGq+2EnVCRaQvGr5kc65VUrRTt6Nwb2YFn+/RyRGNoW02dehmeOz/uPxQBq +FzQUYSr0c61eO2Zuxtoz4z1KdgnMBRkdu8ueHs552N85xsbuHDGW45GCR32l6rfp +JjYNcN0tcRm0z3f0Y55JFoVUGQMCgYEAzq9c76yaAjnLY04f8QxwJbZFsgMpC/s1 +XzFw3/k9PQLTA8A3HY9qRvrx821n7lAeRWTJLpI12tyUxE5euXeMdcD0rwYplDkL +3gABl24O79bzCzdNtQaWbftpTDdHfE7c1C2UMVkYhBSRe97fzWzQmJOSlIiD/HCK +FKP5XorFFQ8CgYEAnK7fQLWaHDICTdBBLg9m/vGBc29kV/AOyLa8bhlaS7S3qX6c +6EwC9P8NhLknQyVgmDIqs45WNdhkupJXpMe8f4+/qeX+2KwXB6CL5UhZIRP2HhpE +lQo67Xlak3cPWvEaL3LJ5MmtRoCOqcDaITp117eWMQBwhTB/2DdsRLoozoUCgYBG +n9GH7FirzgfbpQRxH6jqmf/ytfW+rFYucvx46/eq814beADdTvYnzvQCFC5hVmsh +lzDPgxJ3+Mu00Hq5sUoDqq1xaQ+oIF4Xu7TPg8IC3lYqTPl2lAuvBSuT6Ye5yFDZ +B8INor4n08dqplufHEGjATojGwrOpb3pE6bQUWbKrQKBgAcDXQi+OPuT0ivy8AFT +CDjXfW51Ijm66/QTuajbqDI5sQMp4/LN+7H7uFuPdblxVyEG5kCFYDw6a92DL1Rl +MgJ5jptMMt42O335DwRcafxnHDrrXbsMlEjsjnpie0LbEztNXpgEduCGItvTCU9y +h5RULLCWiLZ4PsOvg6mIkTD2 -----END PRIVATE KEY----- diff --git a/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha1 b/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha1 index 3af12b7a100b1..f25685bacf51d 100644 --- a/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha1 +++ b/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha1 @@ -1 +1 @@ -EC814337A356D99D55DF854FF71CF72C1CCA102D \ No newline at end of file +6226C8F81AD07D4668FC4203A97AF390FF87D25F \ No newline at end of file diff --git a/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha256 b/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha256 index 10f7d3cef53d1..75d0129fe43b2 100644 --- a/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha256 +++ b/jstests/libs/tenant_migration_recipient_expired.pem.digest.sha256 @@ -1 +1 @@ -A74CF5D200100E22D6DA8AC683657C387F2604B152402552415FF156FF04C8AE \ No newline at end of file +220121B04479EC8277B1F1DC865584C44C8607C0D8CC37A100C9D9E7A3865767 \ No newline at end of file diff --git a/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem b/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem index c2dbff6bbfba9..7d5104edd81be 100644 --- a/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem +++ b/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem @@ -3,53 +3,53 @@ # # Client certificate file for tenant migration recipient without the required privileges. -----BEGIN CERTIFICATE----- -MIID3DCCAsSgAwIBAgIEOPFUxzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIID3DCCAsSgAwIBAgIEFHYlLDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ2WhcNMjQwNDMwMjE1OTQ2WjBvMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQ0WhcNMjUwOTEwMTQyODQ0WjBvMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxIzAhBgNVBAsMGnRlbmFudF9taWdyYXRpb25f -cmVjaXBpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxQcS9Du5 -cu+WrloqzHqOq8aO71x5fco09rDW12XEt7BUaqadNqBMbyO1JO3KsYHuG3Z1bGZB -6MPXKwoZb7QVGMjKkyb1inPYLC+1guE8skolKRE8mzDoYclzZ8nlNLxP85axOHTE -ON8rOZzViX1wevW/4Wk5YykRl1q4PcI9sS0ApKGyUsD1aYuV8/5HT1flF4quQ8GH -MgaI3CuaI/JpyPJhHXv07McQ40c/3B87bvteJ2zB+PUCnYClza4xr461zK5TmeER -nSu4AUgH2Pp/YYpWF6PqHiiZmvWot1DFAjobFoNDhSpG6DlAxuZYGvreRfhZvyNw -+rcvl8VET0eQXwIDAQABo3sweTAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV -HSUEDDAKBggrBgEFBQcDAjAdBgNVHQ4EFgQU60uDTycUKt17FwGh7+ngdgXw5DAw +cmVjaXBpZW50MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAtycgACfm +cEqAUBVftYHO+uSfZPlr3T03pSGVnekRBKvTsE0Uq1oprtc2Fw0OU/YLMzt8zw6v +syS4vgiXUcida7+yTp4zDegSzt3EGTcPompHjSzSK0Ex2s51GQMDOMoqG1ZE5Ze7 +RG9wrp2o+B/41svqddPipNr8J6vJLjDEDJVxSREAbbQhUDFWPO1QL8hdphhDvCwc +OOCXqVLAGZ+QTHahh8//ppMF8ih6J3zKQQ3t1Hl2LwqsdIIRJLuUstwG5PP9XyGu +hCiob0DBN06zFPU2MP2MTh4rFW+9kk2OyYc9KOZV08iuSPwa7QDL1Ob2vZ5BPhFD +1cUBhLPQ7wrEsQIDAQABo3sweTAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDATBgNV +HSUEDDAKBggrBgEFBQcDAjAdBgNVHQ4EFgQUmHFFVgfTM67SWC9P09ppm5aKWvEw KwYLKwYBBAGCjikCAQEEHDEaMBgMD3JlYWRBbnlEYXRhYmFzZQwFYWRtaW4wDQYJ -KoZIhvcNAQELBQADggEBAMD5QD/cX2nL+wmH1Q3NmGPynKuNODwOUMKxvf2ST+Vw -WR5A6UFyjIBvQeUQOkN3Tt2XSMLGJL0r/T+HSxwNR0yYy8pMUXuZei41F3Z6LZcN -ckr/ZEoighkmc7U4GvLZYdGNgeKA1v/FCsaiy4hWwOP+50XKVMKCXnW/yNGjK/u8 -0Q016rm92fYv19wZJlNgNElzHVcefAwHRxjZBSGVbmDvG4YGelZwBPfwIPKgMYKj -M5gwmhd18ileIoC2C/TjKw2kaKsQY79gn7JUVvIhMumeaZXsk7HDiqBeNrPzbxIG -/7KEFMPbsM1ydbHSwfdfeU2wDL3upUcZAHkPNwgA7go= +KoZIhvcNAQELBQADggEBAD7vEz/KGq6ky/gMQGHtACd8oNUrotpXbw1CfZj6Tn7x +Evy46a9MVhUlPL59RqsIKmG7ptZXonEqDIiJJrzZ53XD0nMrCmm99c5WV6rtDa19 +5CsHeZfxjmZJIPHPt0s2tNOusQlcxPViOkWoMf04K7u0OLli+5M6ftyFZuMgS+GQ +8MX1n678DgM1kgTj2uyKOm7LUKKqLtp+y5CkdwhqVlKptUicdRUEmW7dekAeUA56 +sDma2XSt4GImSB9BbGypCM5+md+E3e0bI7LNCNx3bIt1BQW2jMVyYuCps9b/SlKP +vt84e9JwCSXu5jZI83Lo4aTNxS97BeCkALQ5uDBncAI= -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDFBxL0O7ly75au -WirMeo6rxo7vXHl9yjT2sNbXZcS3sFRqpp02oExvI7Uk7cqxge4bdnVsZkHow9cr -ChlvtBUYyMqTJvWKc9gsL7WC4TyySiUpETybMOhhyXNnyeU0vE/zlrE4dMQ43ys5 -nNWJfXB69b/haTljKRGXWrg9wj2xLQCkobJSwPVpi5Xz/kdPV+UXiq5DwYcyBojc -K5oj8mnI8mEde/TsxxDjRz/cHztu+14nbMH49QKdgKXNrjGvjrXMrlOZ4RGdK7gB -SAfY+n9hilYXo+oeKJma9ai3UMUCOhsWg0OFKkboOUDG5lga+t5F+Fm/I3D6ty+X -xURPR5BfAgMBAAECggEBAJYibNQEyqyFWwmilahY32yPkg1dJwquUauFV0CtQLhE -Oh9GtNeSUIwD5p2sQV7/xgPRQWsHhMOsr8IRIQ7YZ5cKMirtbf9BhQEunZ6MqWam -Tyi7BLaxDvKswi7dTrXfpQDV3FdLytYXYGW3V5Q8LMDVkJBgUSV5fvkZXPlakQ33 -BqsYTTWTwR0GE9tsr0mkJX1sL61K/aargX4Qw3iL2OdLQorCsXyptguzZZ5gKtco -D009bWXorBiTy7AuzFsXMpPxIvV8j+BZxPgAYTFpXS3dE8iEEbnEp0WeyrpgGVvf -IQMZf+R5XKhRJnXYq2yFPnYo2812gBRP+lqJEVJahwkCgYEA/+sAvRy8HjrxJYtv -zoRLDdTrS2OjfiURIb8EbI0qpJZsyP/4CRAInGS80MG3dnZrWiTuhHUpAsxB7MwL -EU5A583deX/KtQd6DG5kaNP9tmATm4N+8MlzZ/Cc6+noPOj/7UwTrzh02U/4nC6v -9FFIvXWfn3zbSflmVXvCBR2gr/0CgYEAxRc9SqqR0bqVvSqXvIjx5DdsY8Z0/gaZ -6zg6hZWtNCjWZ8JdO3Nfb7Gq8WyHbuT0hMZKspoUkQcJDKgevPuLgYa9azkT+JEc -64kE8inL1OCAKwZwrV7ytscHFwIaGz9w5FiDMpAHzmvLIo1BygkAapqtnlhFnLHT -7Ui7YfPyqosCgYEAht1oaUDAXkn/lSKTTEjpaKOhT/x1R1/vVFJe2XnXVB81uwMx -ykzZzNlFq9m6fkJPtpSp4cIAV8oen9SzrG4JxF737TSMNbR3/B8c6SV2meqtypGU -jv4KxGbHu4dr7NV5Maua4AcnhPIg+OWdsmo3pChdc2YQBj7ZUAlFKP8BFF0CgYBL -XQuO1ZJUxCgl4XeMHEGCpr7HmEd8K0IH88RM+GL2ovb5ThUgeolvyFw5XOqcuOfX -LYu6p7hdjHclY9P9J6K8sK0Vpwm/7p1EY2yEvP64M/BOg3Pql3fv9EL9xUv+R0// -wOjKGNEjWYyfdaxGeRKzsNnaxp43wLfs7/bXCdUs6QKBgAoH7eGj/0t5tHE9xS3X -Nc+PAnEIUdSO7Y7Pz0weTiM4zcPXLnbuULPQuqzqSVYG4bgPh5pckRXkZkn5/8PS -KkwYxCalzZSskSI8R5MsVjvhGZ6Ntw+zuTF991BqQ0PgTWwM8e5XfzWyiQ3Ao42a -d1VZUQaPZimyoO+OdvKCDSzN +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC3JyAAJ+ZwSoBQ +FV+1gc765J9k+WvdPTelIZWd6REEq9OwTRSrWimu1zYXDQ5T9gszO3zPDq+zJLi+ +CJdRyJ1rv7JOnjMN6BLO3cQZNw+iakeNLNIrQTHaznUZAwM4yiobVkTll7tEb3Cu +naj4H/jWy+p10+Kk2vwnq8kuMMQMlXFJEQBttCFQMVY87VAvyF2mGEO8LBw44Jep +UsAZn5BMdqGHz/+mkwXyKHonfMpBDe3UeXYvCqx0ghEku5Sy3Abk8/1fIa6EKKhv +QME3TrMU9TYw/YxOHisVb72STY7Jhz0o5lXTyK5I/BrtAMvU5va9nkE+EUPVxQGE +s9DvCsSxAgMBAAECggEAK348CC1xeJsIi3v35Xd0+nmOLIFajwg5jZpDGGO/adeQ +Bp7XWLWpjl6G9sRf8apNEJaA1f7L1IPU5zPNE1vLnknAgxDoWNYSmVNqfP5pVQ1d +/nV1V1Y9C5PZlN31RugjRaif5dG5Y8/+90hzVrDo+8Ei5WYyvSlPlfAibzifZck0 +V9ULQQ20O3UTW/8Uss+W8gzxMkTm15q0BwT/41mRyIupY4o/GNjaoDUDDOHkkDox +dWJY9mXc/zxd9rtUfyR8nS5C/OZ3yE7vuubT3Vnx4Fdwh7zE5pbvid6k6UPlE8bD +JtvBnbF13HbUMhAmF9pnWBKlFMGI8UZHPUj4PzVc0QKBgQDhcK/ZdnVGuwJ7i48e +zg6scv7JsVFQf4atXVa1Fmq2CHk8ew8rBKPW1FmIiqti96TInuDkaxG5Ei7N+RHZ +9d2/pR7Bp4W5vik39ymINCO4N4nWvLsHrq432+yfjmKcD00HTOxAUxVC+EDTzAYy +qrhGOivjghAbHtAJ99bEUW2p9QKBgQDP+vdIhU1XSDID+rU/WVTctr6wa5MkFMik +LZowFVvicGELpQvtb7+WqeE/Yu8WSeAUKI4Nd91TbgPCgLNuRjae/sczA00euQYi +MI9jctQj94kht5TvUsUbGdPvpXPqZKDF1FzFlbSPwHGTAtaSYzyaM05bLZwIrLST +euW+AG5OTQKBgE6YOrZV1g0SpYrs3Lignf0BGlK2vuKRkyJdqBz587oCukGbpW4Z +8AS1g4FW1ulp7MkEmuMHcOZUsHyemNqkHOrzZgWdocFfyn74bEJP6yQOOL/kjE/h +VlujEJuPyFEgBHrHFpZWYNM4OWGdf4uqeRQCs8pdTcAmveC7xuQqf1EBAoGASOuT +PAgQ3+NeNer3FWj6yhAAt9Zf1qy61GVwxB8ZAkGopO6PSgZ+RBL3+MN1VHk+aZL2 +i15VaWUPSGjbgHR5vjFSIl6r8XOp5N7lx1aBbsLhf8LgIzEADsp1dnaqN6pkfDhC +9rvQfP5aU9MQ7G6C1wXaGTnalWgIQBU86+s2wZkCgYEAu3yXsCDpXQHoZsYFCR8g +UDvYLPuR9KSSZ5hKbyFjUNilnuI8tImxdRL4vcreM9gWqlWNacJGZmeXLwLIXpE6 +iZK+PwT+ncX2L34y8+OyvIYzsNGNaXLAdcCSoa4Kk3+tQikyx8189sdeF2mAHfng +OSRkOmDc2t+1VEKJWQco8Ow= -----END PRIVATE KEY----- diff --git a/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha1 b/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha1 index 3e44d403fc1ea..862df3e59bfce 100644 --- a/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha1 +++ b/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha1 @@ -1 +1 @@ -4DDA0775EAC5EE548618B1595E6D67EF7F13E2F1 \ No newline at end of file +CB1C9F72C1CA410D989BD8E3BCEC2CA9D3BCAAD5 \ No newline at end of file diff --git a/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha256 b/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha256 index b045f141e8474..5c3dd58cee87d 100644 --- a/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha256 +++ b/jstests/libs/tenant_migration_recipient_insufficient_privileges.pem.digest.sha256 @@ -1 +1 @@ -0909312EFC6E36BD1A03D9FA6CB6129E18B9FB80FA2EB8ADB57B5075162509AB \ No newline at end of file +F97E83D586E2BA3ABFC633E94F2F7083BC05752C01C56C7DDF036ED1F8116949 \ No newline at end of file diff --git a/jstests/libs/test_background_ops.js b/jstests/libs/test_background_ops.js index f08ab644b987c..6b6aaab7ff65f 100644 --- a/jstests/libs/test_background_ops.js +++ b/jstests/libs/test_background_ops.js @@ -132,8 +132,8 @@ var startParallelOps = function(mongo, proc, args, context) { var args = stored.args; eval("args = " + args); - result = undefined; - err = undefined; + let result = undefined; + let err = undefined; try { result = operation.apply(null, args); @@ -186,7 +186,7 @@ var startParallelOps = function(mongo, proc, args, context) { rawJoin(options); - result = getResult(mongo, procName); + let result = getResult(mongo, procName); assert.neq(result, null); @@ -232,7 +232,7 @@ var RandomFunctionContext = function(context) { Random.randShardKeyValue = function(shardKey) { var keyValue = {}; - for (field in shardKey) { + for (let field in shardKey) { keyValue[field] = Random.randInt(1, 100); } @@ -241,7 +241,7 @@ var RandomFunctionContext = function(context) { Random.randCluster = function() { var numShards = 2; // Random.randInt( 1, 10 ) - var rs = false; // Random.randBool() + const rs = false; // Random.randBool() var st = new ShardingTest({shards: numShards, mongos: 4, other: {rs: rs}}); return st; diff --git a/jstests/libs/trace_missing_docs.js b/jstests/libs/trace_missing_docs.js index a23cd859509c6..0ea5f07b867e6 100644 --- a/jstests/libs/trace_missing_docs.js +++ b/jstests/libs/trace_missing_docs.js @@ -80,4 +80,4 @@ function traceMissingDoc(coll, doc, mongos) { } return allOps; -} \ No newline at end of file +} diff --git a/jstests/libs/transactions_util.js b/jstests/libs/transactions_util.js index c1531ec514e78..868bd59025b3b 100644 --- a/jstests/libs/transactions_util.js +++ b/jstests/libs/transactions_util.js @@ -17,6 +17,7 @@ var TransactionsUtil = (function() { 'getMore', 'insert', 'update', + 'bulkWrite', ]); const kCmdsThatWrite = new Set([ @@ -25,6 +26,7 @@ var TransactionsUtil = (function() { 'findAndModify', 'findandmodify', 'delete', + 'bulkWrite', ]); // Indicates an aggregation command with a pipeline that cannot run in a transaction but can @@ -45,14 +47,34 @@ var TransactionsUtil = (function() { return false; } - if (dbName === 'local' || dbName === 'config' || dbName === 'admin') { - return false; - } + // bulkWrite always operates on the admin DB so cannot check the dbName directly. + // Operating namespaces are also contained within a 'nsInfo' array in the command. + if (cmdName === 'bulkWrite') { + // 'nsInfo' does not exist in command. + if (!cmdObj['nsInfo']) { + return false; + } - if (kCmdsThatWrite.has(cmdName)) { - if (cmdObj[cmdName].startsWith('system.')) { + // Loop through 'nsInfo'. + for (const ns of cmdObj['nsInfo']) { + if (!ns['ns']) { + return false; + } + var db = ns['ns'].split('.', 1)[0]; + if (db === 'local' || db === 'config' || db === 'system') { + return false; + } + } + } else { + if (dbName === 'local' || dbName === 'config' || dbName === 'admin') { return false; } + + if (kCmdsThatWrite.has(cmdName)) { + if (cmdObj[cmdName].startsWith('system.')) { + return false; + } + } } if (cmdObj.lsid === undefined) { diff --git a/jstests/libs/trusted-ca.pem b/jstests/libs/trusted-ca.pem index 4190341ac516a..e47011e5963db 100644 --- a/jstests/libs/trusted-ca.pem +++ b/jstests/libs/trusted-ca.pem @@ -3,52 +3,52 @@ # # CA for alternate client/server certificate chain. -----BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIEclbQATANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV +MIIDojCCAoqgAwIBAgIEc+efUTANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEfMB0GA1UEAwwWVHJ1c3Rl -ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMjAxMjcyMTU5NDhaFw0yNDA0MzAyMTU5NDha +ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMzA2MDkxNDI4NDdaFw0yNTA5MTAxNDI4NDda MHwxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3 IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVsMR8w HQYDVQQDDBZUcnVzdGVkIEtlcm5lbCBUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEA8h+5axgTodw8KmHz/rcPcy2N/etFkipOVL0i3Ug6JcKk -DjuSIdyLULuIQlR8nXWQ3hW9CZ2gDCeSnmnUKY6GWDQPHoSUJPhmGkXPuPBXivcL -QpLVZeOHrqR4+SHzOA3317LF/QYm9kC3dEZIz+dWUlTHs4NFwR+Yo84XNosSGaUh -o0mK5YcBx0W7y82rNrijcygOkXF9QrANUZfUz5uQ/ZPDjgoISqFvgMzJtpL6LqSC -TbsUM4NbPSYECDFzIosO+rhYCUsgZ5pE6NWZjmKzq4+zeb/2iSIoEb7U/5f6i4H4 -880y+usrcsBuNCS1OVHaEB1ZrlinJbzplB3nV9Hj1wIDAQABoywwKjAMBgNVHRME +AAOCAQ8AMIIBCgKCAQEAn4/NB8z28VxeJ2Opsvm83sjk4dZGkok1Z9QlKS9VcTZU +sfYN2nrCUEq0mMGg7mFsbSBgZq0a1IoRYP0Ci1ycaqqg0iLGlvNAsBhazVgnlr6O +P1j+hkf5JGM7r+ZgVF/0u7i9EFAgVs8EwqCH/RE5p0oJ5ncGiNf92KB/uG0r/eWz +TF3/VGuudWcOaCzs8MMMWY4iYDpm5QWUnS7eu/VWW1efGH6ZEEo63bnAFsQZu6xZ +yKOKealhiDLRVatigFqZh6oLQoEckl4+QzWKWxscAHuMuTy+fWYLdhtrGZIBEutO +DmzUMupifSy70VMt9nPcD3/Z93agswMJuU5hktpvUQIDAQABoywwKjAMBgNVHRME BTADAQH/MBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsF -AAOCAQEArn+KmfD2JEXa0G81jY+v1+XBT4BCcFExbdpOYbIoo2m0Qvx+sla5+Qu7 -nG51R+3rnkVPr03ogKYtf3hYtQJk6DqfuF0V9ESYkz09XRwyW93mh3z4yumXnk3y -d6SG2quC6iJV0EqT/OnmmveGBpxaBjf80ezRq+8t0mVGeNwZSxv0OprAkmKIIDM8 -Qa1/LlGhStiU+hN62c3m4wHdY5jreRYH7NyIZCHJ/wKgo0cDWWdJ4MeAaQhuijUI -BaNg6mFHlxVMMRGIGSduUhu7vHzjbAES6kJxdIpDM8tZMlRZQ3ORml5s9onSMb2n -NmJkjwyB62odD+yrygWRLtFMJmKODQ== +AAOCAQEAfDOw6TjvfP6w137p3z+FncTYQM8a+Ytgtniy4VvJjLXyev4ibzGyBiBk +Pj6Y5AcCVRyxzUgPnL3kNOTOPI2HMRLu6WR3vzzvJPZQcetTt91A9rGr6C/I08gS +AlPaWFsiMmJML/QxH/C5Jh1wvoRha69U0IlXITGHiGBvmYtvjUXD12S6W95zlbSO +g9zKc/MBZxe+bjaR5e4l+ieMI5QvBf3ehTg8g0kV7CEA0ZCmbuHL/yLkIz+Yvf7l +QK4NXwZCOq+ERpugG0cGh1zwk5K7N3MsBvA5NhyPQnN/4WHZ3c0Lqznf6m4h7QyW +U0F1wL+qogbpLVQ/oZOdnjUm9JzlIA== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDyH7lrGBOh3Dwq -YfP+tw9zLY3960WSKk5UvSLdSDolwqQOO5Ih3ItQu4hCVHyddZDeFb0JnaAMJ5Ke -adQpjoZYNA8ehJQk+GYaRc+48FeK9wtCktVl44eupHj5IfM4DffXssX9Bib2QLd0 -RkjP51ZSVMezg0XBH5ijzhc2ixIZpSGjSYrlhwHHRbvLzas2uKNzKA6RcX1CsA1R -l9TPm5D9k8OOCghKoW+AzMm2kvoupIJNuxQzg1s9JgQIMXMiiw76uFgJSyBnmkTo -1ZmOYrOrj7N5v/aJIigRvtT/l/qLgfjzzTL66ytywG40JLU5UdoQHVmuWKclvOmU -HedX0ePXAgMBAAECggEAP28UUvyB2Dws/yWemTM4twJOWyISnhK7ZvQOeE79/Pqb -pKbyyDBRx5r6PfaIl3A/vg+P8T78uXJ8tUggr6qJg/5Pn704Wt9BUMzNeTRumxfS -OWTix8juuVCZ6Rt970epkTdjK8E63/VgmvP3C5EeSn+vulZAJjKy+Od7qWySF2lr -ikpuEvzy+l81NWu/YbBgD1JlNdn9IPr/hmBeOwUyOII9uiDzSvvUQb9Q2wFbW07O -DjPG1SxCYr54vGL3REmyOzhLPp+5OiHZtF7w76vyq/pTCz7eUfPwTwIo3DAybvPx -vfIeBLfTIpdTL9XyCPhoIuwj+6AA6MfRKATi/0OLsQKBgQD7uVgvlDE2+a8xjfFT -Z676Vm6hiMJ3lrUysRvJJkTAiL6fzqS42gOu3dTk6UakuAxiGpkBOtKG6/j4HYbL -AUGhpfyLwyKfdVn9OyAZFxW786MKtdYt2iKZ+c5EEb2MFndQGBa0ErtaG0nPHlTn -vm9Kf3bR0n2WXdb0pzLlL2NsLQKBgQD2PKHxK0zRKiy3js5nxUK1feYtTlqWsvxO -wM94qSOQ4r19QpdbIU8/ywWw8O3j5p+etWqy6KlSShDvP1QYlx5zEo5vhZl96Cfz -Ixk6eHrxsi8r1ARcCgDc+05ZqyGhjPOCZI5y4JXNbdnVhKnfzioFgs9mUL+IwzxQ -l8hnGDOekwKBgQC8nwGphU3rd+UDKO1wJf1DsIhtmmC947wbJANCEt7pto3AicN8 -kEC6Q5dHgFVjEFaXHH8SINWoLCH/KYDblTFsw0geIjZLbk/kJO3EXzv1/nZpjB/V -c+MBpeIdt31k/2CgL6yzemXQ+ymvfXb5wAT8uc57I1Lf3ak44iCNA5locQKBgBoz -7Y+6adxAPq3x1mkYhrbFhP8BPYaX2V2QCwPuC8jZOAziTzT1YHeLZTmlCcucuROs -foQ3Wf1VwGOVO5+6RRU9vkaD0weg8s9exsqAE6UwrvBdCXG55smdPIMyQMvuMeOI -S9hRqd9Id0vPaDxWtgYMG4HpydgF3p8856iA3M1dAoGBAN1s4DE+5KsnrM888dp5 -z38+QEYyuO3DMuSE/72lnb+7rfTg+75tfdLdKhJrACHlGC1LvnfJ5oLUVWwMgqSZ -BVBDkykYxd82ATRjfLj3S75nIj70vgb6vGUpm9CommhvRuUIwi9GAp+LzI5f9w1n -1i9azD5QQ+vNVmfXX61/0ehq +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCfj80HzPbxXF4n +Y6my+bzeyOTh1kaSiTVn1CUpL1VxNlSx9g3aesJQSrSYwaDuYWxtIGBmrRrUihFg +/QKLXJxqqqDSIsaW80CwGFrNWCeWvo4/WP6GR/kkYzuv5mBUX/S7uL0QUCBWzwTC +oIf9ETmnSgnmdwaI1/3YoH+4bSv95bNMXf9Ua651Zw5oLOzwwwxZjiJgOmblBZSd +Lt679VZbV58YfpkQSjrducAWxBm7rFnIo4p5qWGIMtFVq2KAWpmHqgtCgRySXj5D +NYpbGxwAe4y5PL59Zgt2G2sZkgES604ObNQy6mJ9LLvRUy32c9wPf9n3dqCzAwm5 +TmGS2m9RAgMBAAECggEAN0loTW+jkPXkWdmajz0hSEBrriTExzlrm9JSHfccr+GX +byJGopEuiwuXj+ZnkLGJFDbHsEwCo+pDI0wHeapZOExu9OC/1sXqgBmwPL5y2TSl +rSV066o8lXMc11IHxrSI5BKaz/AzhGCu0ZE2DWXOiDyLOaO8S+YrOshB+PE68Pn9 +/qWZWSWfOz1nsWYX8ywu+R3o3Kw2RPfn/9lruxjJCVntuCvZcWmaeiBvxMdl6jeC +LvlT6+4tkE/lEA/sgRMow5hD2aysSnsIkcorwwZZiCkMDVavNXlwZJxIaHH8nnad +2Pxaf0PP/BXfTIKHinuzprVwQ9WeKukbJv/pjNw4AQKBgQDSUBE1L7dAynXFLA2M +cYucpuWTMqjGt8uRQ9ceDXE7Z1SGiawIPCgmHofoB0mgddRoRxWm5iNupPRRXvpw +hBT4I0LXbMjCuJIO7TSeRJsWSSUqEaZL0dFx/5GmV3NACkTR/4NRikILt2o3L3pP +9Yv7efRcRHUgZLW3cb6P1e8CIQKBgQDCOVy1O9mJZBeg0kQonGQZu814GxFvR/zx +z2Islh0+axKS3/l2v/yb5bepjfomdisjQxqvhfim1KmJv+P8TP6WFmxwjg85tUdk +oMi3oTszxjCA+eYfj0yQGv0+UvseGm0TfTuiAO0SL3CWu7XrKN4ob+yLnnd9gQqP +BTGQ7cgnMQKBgQCQNYq3F5LmgG0k2EIqDSmYLvC4cEI+kISrhQMafkkoXIAfCIPH +2cgF62Vxep3Hw0P0hNmZ6bBeDAnjSeccA9WFGFia8uLucjTku04bQBu/ukQbhqKq +1qJxMrciglBqlx/9huD6pn0HH6tbT9jkvxBPTZ57Lg3KOoRH11y9sAoFQQKBgC9O +v4kZYdw4OBUhAh8OMMef2eVGWpHLbA4OIHCRw9+/Ps/tpBrLmqDybDDtdx/FKq61 +GpvkOvOP2xfFWKfMrTorjhBAWe8Je7FEBH/N0tjCjm/r7qSDR/fVyxdSKP5lG2pi +15KXPSdvzLG6WQ5Fbw9Ua756Q8qbEtJRRohxko4RAoGAfzCC1wa5zzmeY2/B0vqs +ul1AKBD7h8XbgKmRrEn9OAQShAxdwQKQa0N9DQh/N2K4s3pfrUMY9kLS6wxBfD0T +xt5K6vlqxnDw4swbB86SbP1gvmU/nL0dnLfqZJsAsNzMqRxhWjc5ysm0IXKV1O1q +Bm8Kwf1jENRP5Lie83AW64M= -----END PRIVATE KEY----- diff --git a/jstests/libs/trusted-ca.pem.digest.sha1 b/jstests/libs/trusted-ca.pem.digest.sha1 index 44694a1db8f31..8fa1c7ff42115 100644 --- a/jstests/libs/trusted-ca.pem.digest.sha1 +++ b/jstests/libs/trusted-ca.pem.digest.sha1 @@ -1 +1 @@ -199E4C0E9DBAC3EDDA339125CB8ABBFF8DDF8442 \ No newline at end of file +53A3A10554962C1188FED99A346C0BD4F43B3A27 \ No newline at end of file diff --git a/jstests/libs/trusted-ca.pem.digest.sha256 b/jstests/libs/trusted-ca.pem.digest.sha256 index d01ec86644853..04db01b02639e 100644 --- a/jstests/libs/trusted-ca.pem.digest.sha256 +++ b/jstests/libs/trusted-ca.pem.digest.sha256 @@ -1 +1 @@ -2A513BF013D0031D93537E82077983549D3A462EF43C9AA32D15BC6A3D13EEED \ No newline at end of file +415233491DF74176550B70F1D87272991A4F09E560A06315F6845E55631CB50F \ No newline at end of file diff --git a/jstests/libs/trusted-client.pem b/jstests/libs/trusted-client.pem index 4589a236f233f..2535fdf114cc8 100644 --- a/jstests/libs/trusted-client.pem +++ b/jstests/libs/trusted-client.pem @@ -3,52 +3,52 @@ # # Client certificate for trusted chain. -----BEGIN CERTIFICATE----- -MIIDrjCCApagAwIBAgIEL4rPNzANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV +MIIDrjCCApagAwIBAgIEEsAWmjANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEfMB0GA1UEAwwWVHJ1c3Rl -ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMjAxMjcyMTU5NDhaFw0yNDA0MzAyMTU5NDha +ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMzA2MDkxNDI4NDdaFw0yNTA5MTAxNDI4NDda MIGAMQswCQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5l dyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEj MCEGA1UEAwwaVHJ1c3RlZCBLZXJuZWwgVGVzdCBDbGllbnQwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQC/O+/T5vzk4N45xzpTy7wJCA0Vqrg4rjMW38Ne -L03rZI937BN1LFhvSVDp3aKSdA8dgA1oMZxFbNhYw1C73PUKP3GhP+u+Fd4aUwUy -mkSGYPW4XIESDcc9hVRtWDM5PH2GtMxuT9xOLJqL4FIjzBjjuQpI5VltF9gdgheX -kpCUhk3A3XWqoyQ7exnq8rxfcvgmto+NUtuIjaHxbuDIzm+tCwS4LFgYVuCEc3G3 -MwIFW7VNJyMYdNhejBfAu0i0w91VhvWg2s2fEG0Xcel5TsP8a3dYGKaCVZ3C2MsV -fQf8/cI6X6nn+vcfFvtAQhGAGfD/ry9rOuXELElb3cezO2Q3AgMBAAGjMzAxMBMG +DQEBAQUAA4IBDwAwggEKAoIBAQC3N+lL+aVjQmmDjnVLaO5c6SFs/kT17q3cGx2r +VnhkF/nXUGTn9G9CwL2YR89IFFxM5pzPXGXX/MQzHZI1fief+2qGWK8z/fd4XKar +jLmyQ2qFUz2zCN4yMlzHJI8Jr+wzyylz6wfxjyPaC9Jd7t5gKAVMUvNNPU9UamDU +bhxFk11E9ot0/bToAf0sv4wfndt+xA2AWtt4Vm+F44zuptL1C+UXqEXtY+t+j3jZ +iw+/rbRaKksawgWb9zn4SvmOLVy7SR7IgAAcI/SEPG+dSXlyDD9BHRR6YbwcQPB+ +V+IHo3R41d8mQ2n6FMTdrv342G0CDuR4sH8c3fmbP1mqNqmdAgMBAAGjMzAxMBMG A1UdJQQMMAoGCCsGAQUFBwMCMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATAN -BgkqhkiG9w0BAQsFAAOCAQEAKtc2fA9kFJb8/bxJidpQjUAqiRgFOW+VYA2DTpQJ -5+2mgEFutClVGjEdcmHseO14a5l6W2KSbJAYpv2ZIhFuBXpPsHk16H7j9SR+RmNp -NkFhqWbEhdOVm+KNS/4M4wRl0s5RnbXpPvxdvwGIdcplUck+cFWtcSCU1OZYZL3z -F4nN+pdoydLUx5zzHMkQSoFj8Hwv9IfIXhDFLkOhvGE8BS0r7dR2q2TEzA2cBg0R -KEqDGzWRAuAZv78Cg7TfIVgTA0DYaZIL+6F3QK8UzFrQLScbfKCXUJudD/P/AHvj -s/RYLhJGDEOjHov5Egixkd9DPBtq91N3eCzdSyZrIGQ73g== +BgkqhkiG9w0BAQsFAAOCAQEAQ5OWvZJ/WIdj6/3DTPGa/l1bwPLHtPs0EZVwO82f +JwIZGk+6/WNVmnejgoqp5cSJUzeJ+9qodoqmIySiQYsPQ29t/6WBhfUkWQ45Tc9D +uMYfWiJNyIZGZSvimg4LU3XW0Wh+KNOE0WTiNn6+EpP5Bd3mpp7cqKrBsglUiuyh +ECC6pv9kknBlMWRbtvSxbwSNnuoAoD4ACWiUIFl9cE1ot7q1Yy9tP6PUcd4ma2bk +t/CmtA89+8+8b8euElCsREeESwHViuH61cHVA5L1MdRGJ5gkITzau1SJuB44s4Oo +u/VDOqjAgwYx5P0clbareAAFu6zlarFN3b2Udr6T9O/FEQ== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC/O+/T5vzk4N45 -xzpTy7wJCA0Vqrg4rjMW38NeL03rZI937BN1LFhvSVDp3aKSdA8dgA1oMZxFbNhY -w1C73PUKP3GhP+u+Fd4aUwUymkSGYPW4XIESDcc9hVRtWDM5PH2GtMxuT9xOLJqL -4FIjzBjjuQpI5VltF9gdgheXkpCUhk3A3XWqoyQ7exnq8rxfcvgmto+NUtuIjaHx -buDIzm+tCwS4LFgYVuCEc3G3MwIFW7VNJyMYdNhejBfAu0i0w91VhvWg2s2fEG0X -cel5TsP8a3dYGKaCVZ3C2MsVfQf8/cI6X6nn+vcfFvtAQhGAGfD/ry9rOuXELElb -3cezO2Q3AgMBAAECggEAcIotUV8WZPuQzB/ay4WSWx5J1P5q+7BIkKWOq9ba3DSJ -2eEsckBuqs9Sts6f5eA3JP0+5pqLhK/RgisvT99wtV8w19xuN6dW1dcVr/npacuV -z7Fgo+dH4YSdctv9CSn4FVZBGIoW9Ep7iOWycS6jVyCGVO/j+LeXj4YHrEOsojqC -+YRTv+LbY+aWWcL7SuiATIEqWqFvjqmW5sTtmblUG/TtW/D8XB8UQdwEbfQsZx0j -uVxDXx4qLGaEOMjE1DQpzTicNQ3z8r4Ij4egfUj71TuByrVCqn4PFcaq74U/1BPA -C9CMm8qdD79qpLmum/y4SyXgahajQHLIVgnAR9y+AQKBgQDiaGqpTL6Q02TEtV8X -B2yt4Ep5kiNfNKTqXjpIuOZGJOIJoSWt5UZ0fCcXfza8/cirDtbQ9ZXubRGP8kwD -uAATCWY+cpjKSbz+tIxa3C9S/mL1LZY9AZRJMpWnNF3hTCCVBhjQnqIDxWzW8Jbr -AubgCp/2EMaUi5nGXIpU4UwGVwKBgQDYOpw8OP819QET1vEfixUjGz3KExvgecMw -2vj8U2fHQomrRfpzGY2GhgollR7gONeFgDZSsqh6Cts5Kx2rR68KfMCX3dk88Xub -U3FszxTCugPlKH90tXrNPJcI+Bz6cuynmt0syR0ZntNVFWNV7Hoj3LLAY7NCdwg7 -QKBZlhwlIQKBgQDQsiMvb5hxcwp98Bl45tUc6ZR8v2Jvjd0+VCExi45ntfPNoIdU -5fStxwtZs1/Hkb95PjJxZw8POeZoY5YCD4eyBIYEpimEvbfCqLZ/wlq1C9w32A+W -qHABkOk1uSWYWU3nUDlrg+4fv8n1zsuuUXxzpBeTAB+sKYpTuFWirBIh/QKBgDGf -QbuQWQFI+LZU3YEfqfokhkmZmQwq5WCQ4BMIEQjpfC6SIKfJdXEp0apOToemg89f -XRgdaAyZ8TVtb6GfEcyWVJyFjRUvVe6Pd7hAzcLibYJpiNZ/z27KON8WEZBoT2cn -YxqkDMmUaWtdPS661kzkmSENwFXAe2Mdsa7dhBgBAoGARAs0NF71RQniqTvSy+SB -GJ3/sjCFrs66IbejqtbiF0aWjpPoIGRLh+lOYzSpeWGX3blhs6YkzYYcsamYVdaE -ltxC5Cw3eMke2Z8X3jrLyoGJUGwZzrURxcWvWPJsbUJAfu804htLAt35aHdmWGKT -mVeQMft12zXHLOIbQKCN4Po= +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC3N+lL+aVjQmmD +jnVLaO5c6SFs/kT17q3cGx2rVnhkF/nXUGTn9G9CwL2YR89IFFxM5pzPXGXX/MQz +HZI1fief+2qGWK8z/fd4XKarjLmyQ2qFUz2zCN4yMlzHJI8Jr+wzyylz6wfxjyPa +C9Jd7t5gKAVMUvNNPU9UamDUbhxFk11E9ot0/bToAf0sv4wfndt+xA2AWtt4Vm+F +44zuptL1C+UXqEXtY+t+j3jZiw+/rbRaKksawgWb9zn4SvmOLVy7SR7IgAAcI/SE +PG+dSXlyDD9BHRR6YbwcQPB+V+IHo3R41d8mQ2n6FMTdrv342G0CDuR4sH8c3fmb +P1mqNqmdAgMBAAECggEADopIYHgqcOsnVoJqupZUPFlgBrdTH7VWZN5eB2fwW/kv +IbBGocXbQo/rO/eO8qhy/sNadHZfurlblLbnEIm3eVHJjWniVZz1E78+luSvI/+H +p5cIkXnFsHQguski9ODaPO4FlyZy1e/HJ5nCkyPO0BC9AuaeWmXgMtHSZ2lpovfB +dPlL7KN55AZVSvB5enSb12m9G+RTygDWAKRiV3oule1yVnr7+wVrROoZpLGEzy+2 +5iLmrENWwDaGPg+UYK7dbKgYoU71Rfb1un7HTN4grcsJDaJSdho/ESxh3jXdD2Zi +YJKGyUDxgJwLvgJmYb1adXwKwFAv//43Re9O8Ed3HQKBgQDq8pMqmnxpsHHvM0kT +d52vIFNGP3GbMjZ7bQqA18nv08HXetcNnEu/hOkvyEvl/setLQw6ZXAYMlRFaKq6 +9gx0z9kP2MWoehno/1sroJuHcAm52cs7h10E80O/QblVw3/wDQ7N/xBwNRENc0t/ +tBiOE+lX2aCjjVgmPG9jFnEIewKBgQDHorrAS/1ay6iZs0jfMAtIX/AWkTgFVvoa +HyEPDKX0Ae8HqQ65U1bEWRJMeUrm+YbH1fTTw6SRvEP8KVpUI+e0mvwHlSG6Rrjk +6DuYw3aX/GmltYOOaEcn58xDqmVjf3pOLmdDw3Db3XNVQ/0xLl3pIIZOiSlWwK8o +kKUZANOWxwKBgEjjsNyKyQZRA0fedVFgzr7CHJOyoyAu19A66ANI7xFEdOLQ7V7/ +mlB0f5OfZEyaWoBTdXO4fKpFEvflnPIb24lx4jmUWt4F43vxnOtQDHBcRegutWpm +eGlMvzvavBDrcgmbQ3iNJSBqljvCBS49RVDuxHnIvpl/BFX4ceYf1Dl5AoGAMPw0 +GNVMkIfikfxVCDbQveyUi+UMOvsxmVFEGl4/JlWHInkQHNdfNgSpIcytXWmi8cwF +LCUYb44jnG9FM9ovk7hn2TB1uzB5U+nMHdrwcbDE951Hb50UwKz3CS95e2WfTCnE +uvLs8XYwQ57QNM4syvRpIEI1u4zCLhD4Ad+bC6cCgYAJ9mxLDp20ZLEI6nqxaPwU +ddlcFjowbkkjchz8w5aFjkySuDe5AzPPh7HeGvn924ahRPbSXzmSIovc8UKFKQKs +zcxWlbF5EADBxzFbojqHvm0INEIgDlcUuOXb9XwcGk2Si9fzTWiY9O+3Zh+iRE+M +VgMoS3t+wWZnahGy5FSDvQ== -----END PRIVATE KEY----- diff --git a/jstests/libs/trusted-client.pem.digest.sha1 b/jstests/libs/trusted-client.pem.digest.sha1 index 246c340c1c99b..e5cdff63e2b6b 100644 --- a/jstests/libs/trusted-client.pem.digest.sha1 +++ b/jstests/libs/trusted-client.pem.digest.sha1 @@ -1 +1 @@ -C142DB298EC99AB0277EA16903D001DF297A7F1A \ No newline at end of file +0A3CEFF09FFC8F5978B32666F039D8E2C061BB3A \ No newline at end of file diff --git a/jstests/libs/trusted-client.pem.digest.sha256 b/jstests/libs/trusted-client.pem.digest.sha256 index 7eb442a40ea56..f79bd8e829d67 100644 --- a/jstests/libs/trusted-client.pem.digest.sha256 +++ b/jstests/libs/trusted-client.pem.digest.sha256 @@ -1 +1 @@ -19998D06B253E27ADE91239F6EC9B94329EC16369B09D6FDFA35FD2685652027 \ No newline at end of file +330CD2E18004BE2479D58988108A9B7DBC1A4768DBA2A512C956C8FD3DB5F52D \ No newline at end of file diff --git a/jstests/libs/trusted-client.pfx b/jstests/libs/trusted-client.pfx index c1d9bea7f948d..df405f69c2937 100644 Binary files a/jstests/libs/trusted-client.pfx and b/jstests/libs/trusted-client.pfx differ diff --git a/jstests/libs/trusted-server.pem b/jstests/libs/trusted-server.pem index 0895bd191b516..d52b3a3e2b94a 100644 --- a/jstests/libs/trusted-server.pem +++ b/jstests/libs/trusted-server.pem @@ -3,52 +3,52 @@ # # Server certificate for trusted chain. -----BEGIN CERTIFICATE----- -MIIDrjCCApagAwIBAgIEOQrseTANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV +MIIDrjCCApagAwIBAgIEALTCNDANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEfMB0GA1UEAwwWVHJ1c3Rl -ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMjAxMjcyMTU5NDhaFw0yNDA0MzAyMTU5NDha +ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMzA2MDkxNDI4NDhaFw0yNTA5MTAxNDI4NDha MIGAMQswCQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5l dyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEj MCEGA1UEAwwaVHJ1c3RlZCBLZXJuZWwgVGVzdCBTZXJ2ZXIwggEiMA0GCSqGSIb3 -DQEBAQUAA4IBDwAwggEKAoIBAQDD4DY+/aqU2WCuHbCJete03bAbdYzQHGDlbfql -0GcXON5aJEUwCyXbnKZjcxi55vxItF4KTau6ipiVXq+ukt8Maixi8gifAIYUVfft -Naiitn2JubzfatjKfCVpQI/JnFOhugSw/obJPhAqw02g4Ul6tDvoqa7rC9wRmKXH -TyRr5xkrRi1dNhWX7s/hvRYwoMX35ipKhW7RepHxG6byLvKlKWCMgDhzIphNRNYp -DopzzvvL9Of2siab51lzFdfYZ4cMGZt3o+uY6Kmb63glv60Omf21x33hGnU2ucrd -yYxgF43+8J8RJeVGGzD3sjE2ZTI21ZUQM3/BLT+PnHgB1u2XAgMBAAGjMzAxMBMG +DQEBAQUAA4IBDwAwggEKAoIBAQCn6mB0JG5+uY2Qv2HhlqvBAJr/caKuedeqWG9I +SDbtSR+slY09fpmjKxC1UdsgkXJwKfC1Mqcdtj5PgdgGuzcype+7+eEcbd4yUuuw +kPnMZNYTbGSEH160NWjfOCWMTxRLDrYVRCJTw5LkCD5QA2mNS3SldqxZQ/E/Cjw1 +5BgurjC3HaENcvwgK1vHvm15bt1TF5YYEC9vfo5pMeG+9MXeSLJ5DgBwcMPjKpQ6 +9HBAE5/AzpVqQBTUPECxdfPKxd1652jfkvja8vBKVxkWIK4n8jy/adrju1ci/7lU +S/ec9hIL4pEk0LuXFBmtkLrSUtYJyrV/4rhwXlmaUjv4FRhZAgMBAAGjMzAxMBMG A1UdJQQMMAoGCCsGAQUFBwMBMBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATAN -BgkqhkiG9w0BAQsFAAOCAQEAtGG7O9KZ6fO6PByz/C9i0O6ClhkKuq85kaH2hCpA -apJ2eM1LcfmJ9DgiMmvN6QevTnkkJG42MXqKVbd5mglnmGPRFgQtqIkFS4ERKvj1 -3/IjwvPP1k0rCbuJQ6J8tmEmZDIATYWd7qauH0K2KvtH9sjB+9ghTfBZzIx2Y5We -+k4ETF5p1Kqu7aBmEXOfcldh9iedqkDUNP7fWQKXPDJ8a5oFVTkAU2MZQCczOp2h -SNdj6jb8qaWoUp4cVs0sswC/nhI4DbNrfQXmRZisMuywRP+gzH5vr9VUsRvHAan6 -orla7WPITYTuWf8qV2grNJwQaN0AhNktXl7U7h3kIDd5fg== +BgkqhkiG9w0BAQsFAAOCAQEAbBag9QSwjtviJ6LefGx7CvyOtf4z1Lz/Niw6o3lS +sBE/UPStas65VRS97lCIrQ73Hm0Kmvf7lQ6pkyhC/fcxiWvs9oFLNCtNBREDtWzd +yywJhwA8tKorFnKGKuguL9zUbEnlh5H8a4dLkNSwPf2+BkJGeAKo0LZUOzBTJkrt +H1glT/ABIV/ApmHtnX0guhvtGU3XPUDdfNvDgdW1UgxWLzBzKUYDGoyHvUj9VW2i +U2QaM4NiYnC6lCMFf69w7sDh75NBKn4QoZB5oL/lyt2Y7gMGfQU4pBatQSA+u2Qd +gzIkpzECim6JnX+SnOYIuKAqYZazoY5sBFx0c+iUT4GdEg== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDD4DY+/aqU2WCu -HbCJete03bAbdYzQHGDlbfql0GcXON5aJEUwCyXbnKZjcxi55vxItF4KTau6ipiV -Xq+ukt8Maixi8gifAIYUVfftNaiitn2JubzfatjKfCVpQI/JnFOhugSw/obJPhAq -w02g4Ul6tDvoqa7rC9wRmKXHTyRr5xkrRi1dNhWX7s/hvRYwoMX35ipKhW7RepHx -G6byLvKlKWCMgDhzIphNRNYpDopzzvvL9Of2siab51lzFdfYZ4cMGZt3o+uY6Kmb -63glv60Omf21x33hGnU2ucrdyYxgF43+8J8RJeVGGzD3sjE2ZTI21ZUQM3/BLT+P -nHgB1u2XAgMBAAECggEBAKRUSG5UVnYGYOvykJbmbm2YxxRibD50cH5K5EuUKcj7 -tt+dKkaCM8KpC5bHiuj79o/wcizmuLoJaLV3+J/XLDW4JYDlWYI76EDLVvK1X8MS -owOWxfQSKcrGmIRS287ize8Sprju8JmI7ftSWqAsIX9GEjTBlrQvAPPqQrqRlRbI -E3pZoWO2EJ/WjhbLUCmet9AIm+hfG6/CnnzvH0CIYDr4/CFaI3dzfD4Oeeoe6gEs -KE/I/q1ya7JAA9o+wVNL3TpJ293yXSXxX+YcMSgrSpgwadkkDr+8AFplDwfJx1uA -UTJs5SMeKym+jrQgxIAJFjLEuh3fClAfe+TgC1Pwc3ECgYEA9to/lMS99qwLX6aU -hFeCeY199O2qtsGqG7Fb2R0LXgxbux9hAIFZkzAOma3ZUwvMJOmwO8K2CVGtGNfB -uC5V6OSes7u73FsHzg6nVg5f4J+6V69QT3UhDNxmEmykrZ/3y2yrGBJdARuezrA3 -B/BWjvm1x2ZQHNOzliyWzWDgYmUCgYEAyyJgSFGWd60GqEGFfbhh0GvIhjAAnBys -Rha2Y3rxfwhzP4We9WlBawTbdi+SIeKyoIUVvMILspadyhJCqlmKjVX2E34ejvJr -bqrZsucWtDu3pLbdpxIgkvadilyPeFuJLfSbevuHWePynjfgXNTLqeXKao+hxbIx -8uUo2MoDEksCgYEAnkXpJlXPBu3gjP+dkg0Z1x3leLk6D9u0WfUp0tdQhoid+Chy -ZYSKDlltwxM4mIqj5bcADBEX7nmz5o3P42uyIZUUPGFOXkbvhirXF9I3nypKrBX4 -BDoxarGVsJKAM+KxWnjeapy7jQ5MkHFjl2990EZDSwpKq3EBYHRObHiE5hECgYEA -sh4wgvlXkRTdM4hQf07AJjt8l31b64eQ39xmjZBuVc+ZDugh3FsxC6A0t8s6vg/E -RdCsoNkd3LkJHvkoD6t4PDpWDf2W2g1jeqTBQ4gydlOu0fWvIiJZAcTRm1NSUl0h -SnSkKRCJPJ8OTQH98Ch/dTiTZyEO3wVDmxKp4H4yd0UCgYB9kUwKpSJZmOE36y6w -mXT1YUK0lR41v6jDEJVBegNChVuXuFk9hEooyCmOVRRGGm96Qu+tAzVZcoya8+LE -YJJQD3invZW+IJhrwKlgDCqSqaiAfOz+Yw/H/vUaohwjT6okgrC3uzEYnneL1DA/ -7XxoLcbhaN51O9KJ/U0jZ4iEQg== +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCn6mB0JG5+uY2Q +v2HhlqvBAJr/caKuedeqWG9ISDbtSR+slY09fpmjKxC1UdsgkXJwKfC1Mqcdtj5P +gdgGuzcype+7+eEcbd4yUuuwkPnMZNYTbGSEH160NWjfOCWMTxRLDrYVRCJTw5Lk +CD5QA2mNS3SldqxZQ/E/Cjw15BgurjC3HaENcvwgK1vHvm15bt1TF5YYEC9vfo5p +MeG+9MXeSLJ5DgBwcMPjKpQ69HBAE5/AzpVqQBTUPECxdfPKxd1652jfkvja8vBK +VxkWIK4n8jy/adrju1ci/7lUS/ec9hIL4pEk0LuXFBmtkLrSUtYJyrV/4rhwXlma +Ujv4FRhZAgMBAAECggEBAKLg24ztZe0w6hw1OKoNcC83+iG8xbP+5m8Ld26i9fy3 +yKytKKM08azv8jYf37G4xEv+ssnhB2/vvoN7DOSCp6lOgrxRR8Y+4KDqBEtyZYyZ +Z3rR0rLChyfBAyg7m1h9wM6Jx5/bRn0AIbVmNaOBm9p+DfkOo2G3YQToVb1ksvxb +UfnGBuSL6qXL8ABuL247he1AY3w0yDa0DQcYd0YjAebQjavbN3i1xAdKN+JFBHIS +4gviMOAk7WoPWf6k86CMXHUexyqvti146AdgvAqUjtlnuzjDqCyyuIvwxET4f5ix +pYhEK+SOht7sA+JYdtBZiDLdmMKXmNX5tfGL5i4oGHECgYEA30oKGXvZNmq4C/7R +ndWPxuQ0NkX3pUiGjN66flNf57leHffd5iQclGD3QIR0YTpDaAZ6jF3qV10kTZ9p ++UuRA6Kd7GEyV2GYxMmmVzvA9QALDN/D8dCCJc+PO8976SFj8wllMVG41aTTtzqS +tr59FKlUEgE1EZgEqS+9RDoayN0CgYEAwIOr9vYM/vITEDSImpxp46+Wr2+cUkJ2 +yffAwTHE1D8fRn8AbxMg5AriOw/LkOKD8zGBlORpf5D76dEE5NiEZGEq6o69WEM2 +hmYa7mcCnhEB3QIYcUIuvuRo8Fh12YG/SXd3kA6WInt2taEanGhFhsIfdKwW2/eH +dtuD+pz1l60CgYBVmWitxFDND7RmxNVPEKQBt1JczA3YCympr2kHLKdDikiObItg +ws9CArpGvYBwFYnpwzIPxaEkxMw6Bbb2nwWEwz6Pc+N8pCmQp01he8LJKa8SzGWt +uiVqoVtjrnLuOKK8dQmaEp4tKPcQ8x9zdys0VIWqMVRK3mBLC8Ye9bd28QKBgE1w +Kaori0q6IUTxfMmEhWua4+gp4x2LsrDHQff0hxJBWdlHmOsVLLPOVPYhAmeDVkRq +847q2i0AKvUAqRFMruSZ2WOEi1GHp9UkGU0wjnL6sF8wSpi0YI1U34lea/lUIZfx +wFxoIag5NaUV6thjcSQlzfVmi8NrrEf5QPt8S6X5AoGANaCh2qtSZisawT6sjNpI +CwD1UhR45iYUaTx5VRDa79PK7SY7Hw5nCfzdo77I7Plec/OgptbAxJOmVtx5Jgc1 +TcvX3eaJHD3Wz1nKoRfQnR4e3sp8/SJs9QiNieSq8jwyKmvhILl/UmfHrg0PdItZ +QX6Y2VLLEayhllPo5hiUWLo= -----END PRIVATE KEY----- diff --git a/jstests/libs/trusted-server.pem.digest.sha1 b/jstests/libs/trusted-server.pem.digest.sha1 index 4a0f953a8da10..aa8559b350ed1 100644 --- a/jstests/libs/trusted-server.pem.digest.sha1 +++ b/jstests/libs/trusted-server.pem.digest.sha1 @@ -1 +1 @@ -50922DBE6EFF8BDFB3EA8054EEF9B2A090B9E83A \ No newline at end of file +4B9818A479578E91263E92744C3E77E73311C3CF \ No newline at end of file diff --git a/jstests/libs/trusted-server.pem.digest.sha256 b/jstests/libs/trusted-server.pem.digest.sha256 index fb2bfdf3fcf51..aede4d3a54586 100644 --- a/jstests/libs/trusted-server.pem.digest.sha256 +++ b/jstests/libs/trusted-server.pem.digest.sha256 @@ -1 +1 @@ -3928416446FFAFFC95B07E9FE82F223F2CDD01689DBE513172B7A2D27904B616 \ No newline at end of file +2C0022083F31AC3DA66694713E26A7D2C1F012D880E85244D4B6ACC975652CA1 \ No newline at end of file diff --git a/jstests/libs/trusted-server.pfx b/jstests/libs/trusted-server.pfx index 57b9c3080196a..57cc9f6c0db7c 100644 Binary files a/jstests/libs/trusted-server.pfx and b/jstests/libs/trusted-server.pfx differ diff --git a/jstests/libs/ttl_util.js b/jstests/libs/ttl_util.js index f17b89e404533..723170cae2215 100644 --- a/jstests/libs/ttl_util.js +++ b/jstests/libs/ttl_util.js @@ -4,7 +4,7 @@ load("jstests/libs/fixture_helpers.js"); -const TTLUtil = class { +export const TTLUtil = class { /** * Wait until documents inserted before a call to this function have been visited by a TTL * monitor pass. On replica sets, by default the function waits for the TTL deletes to become diff --git a/jstests/libs/txns/txn_passthrough_runner_selftest.js b/jstests/libs/txns/txn_passthrough_runner_selftest.js index 70bc723dc953c..9c69e9c86a1b2 100644 --- a/jstests/libs/txns/txn_passthrough_runner_selftest.js +++ b/jstests/libs/txns/txn_passthrough_runner_selftest.js @@ -15,20 +15,12 @@ db.setProfilingLevel(2); const coll = db[testName]; assert.commandWorked(coll.insert({x: 1})); -/* TODO(SERVER-47835) undenylist -let commands = db.system.profile.find().toArray(); -// Check that the insert is not visible because the txn has not committed. -assert.eq(commands.length, 0); -*/ + // Use a dummy, unrelated operation to signal the txn runner to commit the transaction. assert.commandWorked(db.runCommand({ping: 1})); let commands = db.system.profile.find().toArray(); // Assert the insert is now visible. assert.eq(commands.length, 1); -/* TODO(SERVER-47835) replace above assertion with below assertion. -assert.eq(commands.length, 2);*/ -/* TODO(SERVER-47835) uncomment -assert.eq(commands[1].command.find, 'system.profile');*/ assert.eq(commands[0].command.insert, testName); })(); diff --git a/jstests/libs/uuid_util.js b/jstests/libs/uuid_util.js index 796d294942843..a9df57db5666a 100644 --- a/jstests/libs/uuid_util.js +++ b/jstests/libs/uuid_util.js @@ -32,4 +32,4 @@ function getUUIDFromListCollections(db, collName) { function extractUUIDFromObject(uuid) { const uuidString = uuid.toString(); return uuidString.substring(6, uuidString.length - 2); -} \ No newline at end of file +} diff --git a/jstests/libs/wildcard_index_helpers.js b/jstests/libs/wildcard_index_helpers.js index 0977a0ad7be40..df712192fe978 100644 --- a/jstests/libs/wildcard_index_helpers.js +++ b/jstests/libs/wildcard_index_helpers.js @@ -1,12 +1,9 @@ /** * Common utility functions for testing functionality of Wildcard Indexes. */ +import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js"; -"use strict"; - -const WildcardIndexHelpers = (function() { - load("jstests/libs/analyze_plan.js"); - +export const WildcardIndexHelpers = (function() { /** * Asserts that the given explain contains the given expectedIndexName in the winningPlan. */ diff --git a/jstests/libs/write_concern_util.js b/jstests/libs/write_concern_util.js index fff2517c75498..f02f7a65d8b14 100644 --- a/jstests/libs/write_concern_util.js +++ b/jstests/libs/write_concern_util.js @@ -154,4 +154,4 @@ function runWriteConcernRetryabilityTest(priConn, secConn, cmd, kNodes, dbName, checkWriteConcernTimedOut(testDB2.runCommand(cmd)); restartServerReplication(secConn); -} \ No newline at end of file +} diff --git a/jstests/multiVersion/genericBinVersion/batched_multi_deletes_large_transaction.js b/jstests/multiVersion/genericBinVersion/batched_multi_deletes_large_transaction.js deleted file mode 100644 index 6eac1f493556d..0000000000000 --- a/jstests/multiVersion/genericBinVersion/batched_multi_deletes_large_transaction.js +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Tests that multi-deletes in a mixed version cluster replicate as individual delete operations. - * - * Batched multi-deletes were introduced in 6.1 so a replica set running in 6.0 FCV will not be - * able to take advantage of this feature. - * - * @tags: [ - * requires_replication, - * ] - */ -(function() { -'use strict'; - -function runTest(primaryBinVersion, secondaryBinVersion) { - const testLogPrefix = - 'primary-' + primaryBinVersion + '-secondary-' + secondaryBinVersion + ': '; - jsTestLog(testLogPrefix + 'Starting test case.'); - const rst = new ReplSetTest({ - nodes: [ - { - binVersion: primaryBinVersion, - }, - { - binVersion: secondaryBinVersion, - rsConfig: {votes: 0, priority: 0}, - }, - ] - }); - rst.startSet(); - rst.initiate(); - - let primary = rst.getPrimary(); - const db = primary.getDB('test'); - const coll = db.t; - - const docIds = [0, 1, 2, 3]; - assert.commandWorked(coll.insert(docIds.map((x) => { - return {_id: x, x: x}; - }))); - - assert.commandWorked(coll.remove({})); - // Check oplog entries generated for the multi-delete operation. - // Oplog entries will be returned in reverse timestamp order (most recent first). - const ops = rst.findOplog(primary, {op: 'd', ns: coll.getFullName()}).toArray(); - jsTestLog(testLogPrefix + 'applyOps oplog entries: ' + tojson(ops)); - assert.eq(ops.length, - docIds.length, - 'number oplog entries should match documents inserted initially'); - const deletedDocIds = ops.map((entry) => entry.o._id).flat(); - jsTestLog(testLogPrefix + 'deleted doc _ids: ' + tojson(deletedDocIds)); - assert.sameMembers(deletedDocIds, docIds); - - rst.stopSet(); - jsTestLog(testLogPrefix + 'Test case finished successfully.'); -} - -runTest('latest', 'last-lts'); -runTest('last-lts', 'latest'); -})(); diff --git a/jstests/multiVersion/genericBinVersion/load_keys_on_upgrade.js b/jstests/multiVersion/genericBinVersion/load_keys_on_upgrade.js index cbcc526127417..d0b2236980597 100644 --- a/jstests/multiVersion/genericBinVersion/load_keys_on_upgrade.js +++ b/jstests/multiVersion/genericBinVersion/load_keys_on_upgrade.js @@ -3,7 +3,7 @@ // admin.system.keys on upgrade. // -load('./jstests/multiVersion/libs/multi_rs.js'); +load('jstests/multiVersion/libs/multi_rs.js'); var oldVersion = "last-lts"; @@ -53,4 +53,4 @@ assert.eq(1, rsConn.getDB("admin").auth("root", "root")); assert.commandWorked(rsConn.adminCommand({hello: 1})); print("clusterTime2: " + tojson(rsConn.getDB("admin").getSession().getClusterTime())); -rst.stopSet(); \ No newline at end of file +rst.stopSet(); diff --git a/jstests/multiVersion/genericBinVersion/migration_between_mixed_version_mongods.js b/jstests/multiVersion/genericBinVersion/migration_between_mixed_version_mongods.js index 9bde5a6798725..9c85cf7390a33 100644 --- a/jstests/multiVersion/genericBinVersion/migration_between_mixed_version_mongods.js +++ b/jstests/multiVersion/genericBinVersion/migration_between_mixed_version_mongods.js @@ -7,7 +7,7 @@ // Checking UUID consistency involves talking to a shard node, which in this test is shutdown TestData.skipCheckingUUIDsConsistentAcrossCluster = true; -load("./jstests/multiVersion/libs/verify_versions.js"); +load("jstests/multiVersion/libs/verify_versions.js"); (function() { "use strict"; diff --git a/jstests/multiVersion/genericBinVersion/minor_version_upgrade_replset.js b/jstests/multiVersion/genericBinVersion/minor_version_upgrade_replset.js index a0dffdf776e86..7389b496f8e36 100644 --- a/jstests/multiVersion/genericBinVersion/minor_version_upgrade_replset.js +++ b/jstests/multiVersion/genericBinVersion/minor_version_upgrade_replset.js @@ -2,8 +2,8 @@ // Tests upgrading a replica set // -load('./jstests/multiVersion/libs/multi_rs.js'); -load('./jstests/libs/test_background_ops.js'); +load('jstests/multiVersion/libs/multi_rs.js'); +load('jstests/libs/test_background_ops.js'); var oldVersion = "last-lts"; diff --git a/jstests/multiVersion/genericBinVersion/mixed_replica_set_with_latest_primary.js b/jstests/multiVersion/genericBinVersion/mixed_replica_set_with_latest_primary.js index 546ae94e95b7f..751b3a66dde53 100644 --- a/jstests/multiVersion/genericBinVersion/mixed_replica_set_with_latest_primary.js +++ b/jstests/multiVersion/genericBinVersion/mixed_replica_set_with_latest_primary.js @@ -4,7 +4,7 @@ (function() { "use strict"; -load('./jstests/multiVersion/libs/multi_rs.js'); +load('jstests/multiVersion/libs/multi_rs.js'); const lastLTSVersion = "last-lts"; const latestVersion = "latest"; diff --git a/jstests/multiVersion/genericBinVersion/rollback_capped_deletions.js b/jstests/multiVersion/genericBinVersion/rollback_capped_deletions.js index a01eef7abd31d..a477ba3fe1de5 100644 --- a/jstests/multiVersion/genericBinVersion/rollback_capped_deletions.js +++ b/jstests/multiVersion/genericBinVersion/rollback_capped_deletions.js @@ -47,4 +47,4 @@ try { // The fast count checks occur when tearing down the fixture as part of the consistency checks. rollbackTest.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/multiVersion/genericBinVersion/skip_level_upgrade.js b/jstests/multiVersion/genericBinVersion/skip_level_upgrade.js index 103d31a423913..221e353cb666c 100644 --- a/jstests/multiVersion/genericBinVersion/skip_level_upgrade.js +++ b/jstests/multiVersion/genericBinVersion/skip_level_upgrade.js @@ -31,7 +31,6 @@ const defaultOptions = { // This lists all binary versions older than the last-lts version. const versions = [ - {binVersion: '4.2', testCollection: 'four_two'}, {binVersion: '5.0', testCollection: 'five_zero'}, ]; diff --git a/jstests/multiVersion/genericBinVersion/timeseries_collection_mixed_type.js b/jstests/multiVersion/genericBinVersion/timeseries_collection_mixed_type.js index 3b9bbf90cabb6..492fb5d66af00 100644 --- a/jstests/multiVersion/genericBinVersion/timeseries_collection_mixed_type.js +++ b/jstests/multiVersion/genericBinVersion/timeseries_collection_mixed_type.js @@ -11,7 +11,11 @@ load('jstests/multiVersion/libs/multi_rs.js'); const timeFieldName = "time"; // Note that this list will need to be kept up to date as versions are added/dropped. -const upgradeVersions = [{binVersion: "6.0", fcv: "6.0"}, {binVersion: "latest"}]; +const upgradeVersions = [ + {binVersion: "6.0", fcv: "6.0"}, + {binVersion: "last-lts", fcv: lastLTSFCV}, + {binVersion: "latest"} +]; /* * Creates a collection, populates it with `docs`, runs the `query` and ensures that the result set diff --git a/jstests/multiVersion/genericSetFCVUsage/1_test_launching_replset.js b/jstests/multiVersion/genericSetFCVUsage/1_test_launching_replset.js index 69c110df30faf..5dfb74acd2113 100644 --- a/jstests/multiVersion/genericSetFCVUsage/1_test_launching_replset.js +++ b/jstests/multiVersion/genericSetFCVUsage/1_test_launching_replset.js @@ -3,7 +3,7 @@ // // -load('./jstests/multiVersion/libs/verify_versions.js'); +load('jstests/multiVersion/libs/verify_versions.js'); (function() { "use strict"; diff --git a/jstests/multiVersion/genericSetFCVUsage/2_test_launching_cluster.js b/jstests/multiVersion/genericSetFCVUsage/2_test_launching_cluster.js index 00e025e910db2..1e0b1561a7938 100644 --- a/jstests/multiVersion/genericSetFCVUsage/2_test_launching_cluster.js +++ b/jstests/multiVersion/genericSetFCVUsage/2_test_launching_cluster.js @@ -3,7 +3,7 @@ // // -load('./jstests/multiVersion/libs/verify_versions.js'); +load('jstests/multiVersion/libs/verify_versions.js'); (function() { "use strict"; diff --git a/jstests/multiVersion/genericSetFCVUsage/3_upgrade_replset.js b/jstests/multiVersion/genericSetFCVUsage/3_upgrade_replset.js index 976a2f01c52d3..56f5a7399da6c 100644 --- a/jstests/multiVersion/genericSetFCVUsage/3_upgrade_replset.js +++ b/jstests/multiVersion/genericSetFCVUsage/3_upgrade_replset.js @@ -2,8 +2,8 @@ // Tests upgrading then downgrading a replica set // -load('./jstests/multiVersion/libs/multi_rs.js'); -load('./jstests/libs/test_background_ops.js'); +load('jstests/multiVersion/libs/multi_rs.js'); +load('jstests/libs/test_background_ops.js'); for (let oldVersion of ["last-lts", "last-continuous"]) { jsTest.log("Testing upgrade/downgrade with " + oldVersion); diff --git a/jstests/multiVersion/genericSetFCVUsage/abort_unprepared_transactions_on_FCV_downgrade.js b/jstests/multiVersion/genericSetFCVUsage/abort_unprepared_transactions_on_FCV_downgrade.js index 603caeac82632..91ceb52c68737 100644 --- a/jstests/multiVersion/genericSetFCVUsage/abort_unprepared_transactions_on_FCV_downgrade.js +++ b/jstests/multiVersion/genericSetFCVUsage/abort_unprepared_transactions_on_FCV_downgrade.js @@ -7,7 +7,7 @@ (function() { "use strict"; -function runTest(downgradeFCV) { +function runTest(downgradeFCV, succeedDowngrade) { const rst = new ReplSetTest({nodes: [{binVersion: "latest"}]}); rst.startSet(); rst.initiate(); @@ -34,9 +34,20 @@ function runTest(downgradeFCV) { assert.commandFailedWithCode(testDB.runCommand({drop: collName, maxTimeMS: 1000}), ErrorCodes.MaxTimeMSExpired); - jsTestLog("Downgrade the featureCompatibilityVersion."); - assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: downgradeFCV})); - checkFCV(adminDB, downgradeFCV); + if (succeedDowngrade) { + jsTestLog("Downgrade the featureCompatibilityVersion."); + assert.commandWorked( + testDB.adminCommand({setFeatureCompatibilityVersion: downgradeFCV})); + checkFCV(adminDB, downgradeFCV); + } else { + jsTestLog( + "Downgrade the featureCompatibilityVersion but fail after transitioning to the intermediary downgrading state."); + assert.commandWorked( + primary.adminCommand({configureFailPoint: 'failDowngrading', mode: "alwaysOn"})); + assert.commandFailedWithCode( + testDB.adminCommand({setFeatureCompatibilityVersion: downgradeFCV}), 549181); + checkFCV(adminDB, downgradeFCV, downgradeFCV); + } jsTestLog("Drop the collection. This should succeed, since the transaction was aborted."); assert.commandWorked(testDB.runCommand({drop: collName})); @@ -45,17 +56,22 @@ function runTest(downgradeFCV) { assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction); } finally { - jsTestLog("Restore the original featureCompatibilityVersion."); - assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - checkFCV(adminDB, latestFCV); + // We can't upgrade from "downgrading to lastContinuous" -> latest. + if (succeedDowngrade || downgradeFCV == lastLTSFCV) { + jsTestLog("Restore the original featureCompatibilityVersion."); + assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV})); + checkFCV(adminDB, latestFCV); + } } session.endSession(); rst.stopSet(); } -runTest(lastLTSFCV); +runTest(lastLTSFCV, true /* succeedDowngrade */); +runTest(lastLTSFCV, false /* succeedDowngrade */); if (lastLTSFCV !== lastContinuousFCV) { - runTest(lastContinuousFCV); + runTest(lastContinuousFCV, true /* succeedDowngrade */); + runTest(lastContinuousFCV, false /* succeedDowngrade */); } }()); diff --git a/jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js b/jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js deleted file mode 100644 index eabdfe3e40ee1..0000000000000 --- a/jstests/multiVersion/genericSetFCVUsage/balancer_multiVersion_detect.js +++ /dev/null @@ -1,34 +0,0 @@ -// -// Test checks whether the balancer correctly detects a mixed set of shards -// - -// Test mixed version between "latest" and "last-lts"/"last-continuous". -for (let versions of [["latest", "last-lts"], ["latest", "last-continuous"]]) { - jsTest.log("Starting cluster with shard binVersion: " + tojson(versions)); - - var options = { - mongosOptions: {verbose: 1, useLogFiles: true}, - configOptions: {}, - shardOptions: {binVersion: versions}, - enableBalancer: true - }; - - var st = new ShardingTest({shards: 3, mongos: 1, other: options}); - - var mongos = st.s0; - var admin = mongos.getDB("admin"); - var coll = mongos.getCollection("foo.bar"); - - printjson(admin.runCommand({enableSharding: coll.getDB() + ""})); - st.ensurePrimaryShard(coll.getDB().getName(), st.shard1.shardName); - printjson(admin.runCommand({shardCollection: coll + "", key: {_id: 1}})); - - assert.soon(function() { - var log = cat(mongos.fullOptions.logFile); - return /multiVersion cluster detected/.test(log); - }, "multiVersion warning not printed!", 30 * 16 * 60 * 1000, 5 * 1000); - - st.stop(); - - jsTest.log("DONE!"); -} diff --git a/jstests/multiVersion/genericSetFCVUsage/can_upgrade_FCV_after_failed_downgrade.js b/jstests/multiVersion/genericSetFCVUsage/can_upgrade_FCV_after_failed_downgrade.js index 66eaca15793eb..af1c209ae5614 100644 --- a/jstests/multiVersion/genericSetFCVUsage/can_upgrade_FCV_after_failed_downgrade.js +++ b/jstests/multiVersion/genericSetFCVUsage/can_upgrade_FCV_after_failed_downgrade.js @@ -5,11 +5,7 @@ * @tags: [requires_fcv_70] */ -(function() { -"use strict"; - load("jstests/libs/fail_point_util.js"); -load("jstests/libs/feature_flag_util.js"); const latest = "latest"; @@ -188,5 +184,4 @@ function runShardingTest() { runStandaloneTest(); runReplicaSetTest(); testConfigServerFCVTimestampIsAlwaysNewer(); -runShardingTest(); -})(); +runShardingTest(); \ No newline at end of file diff --git a/jstests/multiVersion/genericSetFCVUsage/cannot_downgrade_from_latest_to_last_continuous.js b/jstests/multiVersion/genericSetFCVUsage/cannot_downgrade_from_latest_to_last_continuous.js index 37a931971aeaf..87dbea4ebca69 100644 --- a/jstests/multiVersion/genericSetFCVUsage/cannot_downgrade_from_latest_to_last_continuous.js +++ b/jstests/multiVersion/genericSetFCVUsage/cannot_downgrade_from_latest_to_last_continuous.js @@ -66,9 +66,11 @@ function runShardingTest() { st.stop(); } -runStandaloneTest(); -runReplicaSetTest(); -runShardingTest(); +if (lastContinuousFCV != lastLTSFCV) { + runStandaloneTest(); + runReplicaSetTest(); + runShardingTest(); +} TestData.setParameters.disableTransitionFromLatestToLastContinuous = false; })(); diff --git a/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js index f72d4e764e84b..9d33dd7ae99a9 100644 --- a/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js +++ b/jstests/multiVersion/genericSetFCVUsage/collection_validator_feature_compatibility_version.js @@ -13,7 +13,7 @@ const testName = "collection_validator_feature_compatibility_version"; const dbpath = MongoRunner.dataPath + testName; // An array of feature flags that must be enabled to run feature flag tests. -const featureFlagsToEnable = ["featureFlagUserRoles"]; +const featureFlagsToEnable = []; // These arrays should be populated with // @@ -32,25 +32,12 @@ const testCasesLastContinuous = [ // ]; const testCasesLastContinuousWithFeatureFlags = [ - // TODO SERVER-70689: Remove this case when 7.0 becomes lastLTS. - { - validator: {$expr: {$eq: ["$$USER_ROLES", []]}}, - nonMatchingDocument: {a: 1}, - lastStableErrCode: 17276 - } + ]; const testCasesLastStable = testCasesLastContinuous.concat([]); const testCasesLastStableWithFeatureFlags = testCasesLastContinuousWithFeatureFlags.concat([]); -// The addition of the $$USER_ROLES system variable is slightly different than the usual use case of -// this test file. This means that some of the following commands won't work/fail as expected for -// the $$USER_ROLES test case. -// TODO SERVER-70689: Remove this function and references to it. -function testCaseDoesNotReferenceUserRoles(testCase) { - return testCase.validator.$expr.$eq[0] != "$$USER_ROLES"; -} - // Tests Feature Compatibility Version behavior of the validator of a collection by executing test // cases 'testCases' and using a previous stable version 'lastVersion' of mongod. 'lastVersion' can // have values "last-lts" and "last-continuous". @@ -244,29 +231,21 @@ function testCollectionValidatorFCVBehavior(lastVersion, testCases, featureFlags testDB = conn.getDB(testName); testCases.forEach(function(test, i) { - // In this case, using $$USER_ROLES on the last FCV will cause the collection - // creation to fail during parsing because the necessary feature flag will not have been - // enabled. - // TODO SERVER-70689: Remove the guard of this if-statement and keep the body. - if (testCaseDoesNotReferenceUserRoles(test)) { - const coll = testDB["coll3" + i]; - // Even though the feature compatibility version is the last version, we should still - // be able to add a validator using new query features, because - // internalValidateFeaturesAsPrimary is false. - assert.commandWorked( - testDB.createCollection(coll.getName(), {validator: test.validator}), - `Expected to be able to create collection with validator ${ - tojson(test.validator)}`); - - // We should also be able to modify a collection to have a validator using new query - // features. - coll.drop(); - assert.commandWorked(testDB.createCollection(coll.getName())); - assert.commandWorked( - testDB.runCommand({collMod: coll.getName(), validator: test.validator}), - `Expected to be able to modify collection validator to be ${ - tojson(test.validator)}`); - } + const coll = testDB["coll3" + i]; + // Even though the feature compatibility version is the last version, we should still + // be able to add a validator using new query features, because + // internalValidateFeaturesAsPrimary is false. + assert.commandWorked( + testDB.createCollection(coll.getName(), {validator: test.validator}), + `Expected to be able to create collection with validator ${tojson(test.validator)}`); + + // We should also be able to modify a collection to have a validator using new query + // features. + coll.drop(); + assert.commandWorked(testDB.createCollection(coll.getName())); + assert.commandWorked( + testDB.runCommand({collMod: coll.getName(), validator: test.validator}), + `Expected to be able to modify collection validator to be ${tojson(test.validator)}`); }); MongoRunner.stopMongod(conn); diff --git a/jstests/multiVersion/genericSetFCVUsage/default_startup_FCV_parameter.js b/jstests/multiVersion/genericSetFCVUsage/default_startup_FCV_parameter.js new file mode 100644 index 0000000000000..e4b552b4dcffe --- /dev/null +++ b/jstests/multiVersion/genericSetFCVUsage/default_startup_FCV_parameter.js @@ -0,0 +1,244 @@ +/** + * Tests the defaultStartupFCV startup parameter. + */ + +(function() { +"use strict"; + +TestData.setParameters = TestData.setParameters || {}; +TestData.setParameters.disableTransitionFromLatestToLastContinuous = true; + +const latest = "latest"; +const testName = "default_startup_FCV_parameter"; +const dbpath = MongoRunner.dataPath + testName; +resetDbpath(dbpath); + +function runStandaloneTest() { + jsTestLog("Test starting with defaultStartupFCV = lastLTS"); + let conn = MongoRunner.runMongod( + {binVersion: latest, setParameter: "defaultStartupFCV=" + lastLTSFCV}); + assert.neq(null, conn); + let adminDB = conn.getDB("admin"); + checkFCV(adminDB, lastLTSFCV); + MongoRunner.stopMongod(conn); + + jsTestLog("Test starting with defaultStartupFCV = lastContinuous"); + conn = MongoRunner.runMongod({ + binVersion: latest, + dbpath: dbpath, + setParameter: "defaultStartupFCV=" + lastContinuousFCV + }); + assert.neq(null, conn); + adminDB = conn.getDB("admin"); + checkFCV(adminDB, lastContinuousFCV); + MongoRunner.stopMongod(conn); + + clearRawMongoProgramOutput(); + jsTestLog("Test starting with defaultStartupFCV when there is already an existing FCV."); + conn = MongoRunner.runMongod({ + binVersion: latest, + dbpath: dbpath, + noCleanData: true, + setParameter: "defaultStartupFCV=" + lastLTSFCV + }); + assert.neq(null, conn); + adminDB = conn.getDB("admin"); + // The FCV should still be the original FCV, not the provided defaultStartupFCV. + checkFCV(adminDB, lastContinuousFCV); + assert(rawMongoProgramOutput().includes( + "Ignoring the provided defaultStartupFCV parameter since the FCV already exists")); + MongoRunner.stopMongod(conn); + + jsTestLog("Test starting with defaultStartupFCV = latest"); + conn = + MongoRunner.runMongod({binVersion: latest, setParameter: "defaultStartupFCV=" + latestFCV}); + assert.neq(null, conn); + adminDB = conn.getDB("admin"); + checkFCV(adminDB, latestFCV); + MongoRunner.stopMongod(conn); + + clearRawMongoProgramOutput(); + jsTestLog("Test starting with invalid defaultStartupFCV, FCV should default to latest"); + conn = MongoRunner.runMongod({binVersion: latest, setParameter: "defaultStartupFCV=hello"}); + assert.neq(null, conn); + adminDB = conn.getDB("admin"); + checkFCV(adminDB, latestFCV); + assert(rawMongoProgramOutput().includes("The provided 'defaultStartupFCV' is not a valid FCV")); + MongoRunner.stopMongod(conn); + + clearRawMongoProgramOutput(); + jsTestLog("Test starting with invalid defaultStartupFCV, FCV should default to latest"); + conn = MongoRunner.runMongod({binVersion: latest, setParameter: "defaultStartupFCV=5.0"}); + assert.neq(null, conn); + adminDB = conn.getDB("admin"); + checkFCV(adminDB, latestFCV); + assert(rawMongoProgramOutput().includes("The provided 'defaultStartupFCV' is not a valid FCV")); + MongoRunner.stopMongod(conn); +} + +function runReplicaSetTest() { + jsTestLog("Test starting with defaultStartupFCV = lastLTS"); + let rst = new ReplSetTest({ + nodes: [ + { + binVersion: latest, + setParameter: {defaultStartupFCV: lastLTSFCV}, + + }, + { + binVersion: latest, + // The second node will initial sync from the primary and end up with lastLTSFCV. + setParameter: {defaultStartupFCV: lastContinuousFCV}, + rsConfig: {priority: 0}, + } + ] + }); + rst.startSet(); + rst.initiate(); + assert.neq(null, rst); + let primaryAdminDB = rst.getPrimary().getDB("admin"); + let secondaryAdminDB = rst.getSecondary().getDB("admin"); + checkFCV(primaryAdminDB, lastLTSFCV); + checkFCV(secondaryAdminDB, lastLTSFCV); + rst.stopSet(); + + jsTestLog("Test starting with defaultStartupFCV = lastContinuous"); + rst = new ReplSetTest({ + nodes: [ + { + binVersion: latest, + dbpath: dbpath + "1", + setParameter: {defaultStartupFCV: lastContinuousFCV}, + + }, + { + binVersion: latest, + dbpath: dbpath + "2", + // The second node will initial sync from the primary and end up with + // lastContinuousFCV. + setParameter: {defaultStartupFCV: lastLTSFCV}, + rsConfig: {priority: 0}, + } + ] + }); + rst.startSet(); + rst.initiate(); + assert.neq(null, rst); + primaryAdminDB = rst.getPrimary().getDB("admin"); + secondaryAdminDB = rst.getSecondary().getDB("admin"); + checkFCV(primaryAdminDB, lastContinuousFCV); + checkFCV(secondaryAdminDB, lastContinuousFCV); + rst.stopSet(null /* signal */, true /* forRestart */); + + clearRawMongoProgramOutput(); + jsTestLog("Test starting with defaultStartupFCV when there is already an existing FCV."); + rst.startSet({restart: true, setParameter: {defaultStartupFCV: lastLTSFCV}}); + assert.neq(null, rst); + primaryAdminDB = rst.getPrimary().getDB("admin"); + secondaryAdminDB = rst.getSecondary().getDB("admin"); + // The FCV should still be the original FCV, not the provided defaultStartupFCV. + checkFCV(primaryAdminDB, lastContinuousFCV); + checkFCV(secondaryAdminDB, lastContinuousFCV); + rst.stopSet(); + + jsTestLog("Test starting with defaultStartupFCV = latest"); + rst = new ReplSetTest({ + nodes: 2, + nodeOptions: {binVersion: latest, setParameter: {defaultStartupFCV: latestFCV}} + }); + rst.startSet(); + rst.initiate(); + assert.neq(null, rst); + primaryAdminDB = rst.getPrimary().getDB("admin"); + secondaryAdminDB = rst.getSecondary().getDB("admin"); + checkFCV(primaryAdminDB, latestFCV); + checkFCV(secondaryAdminDB, latestFCV); + rst.stopSet(); + + clearRawMongoProgramOutput(); + jsTestLog("Test starting with invalid defaultStartupFCV, FCV should default to latest"); + rst = new ReplSetTest( + {nodes: 2, nodeOptions: {binVersion: latest, setParameter: {defaultStartupFCV: "hello"}}}); + rst.startSet(); + rst.initiate(); + assert.neq(null, rst); + primaryAdminDB = rst.getPrimary().getDB("admin"); + secondaryAdminDB = rst.getSecondary().getDB("admin"); + checkFCV(primaryAdminDB, latestFCV); + checkFCV(secondaryAdminDB, latestFCV); + assert(rawMongoProgramOutput().includes("The provided 'defaultStartupFCV' is not a valid FCV")); + rst.stopSet(); + + clearRawMongoProgramOutput(); + jsTestLog("Test starting with invalid defaultStartupFCV, FCV should default to latest"); + rst = new ReplSetTest( + {nodes: 2, nodeOptions: {binVersion: latest, setParameter: {defaultStartupFCV: "5.0"}}}); + rst.startSet(); + rst.initiate(); + assert.neq(null, rst); + primaryAdminDB = rst.getPrimary().getDB("admin"); + secondaryAdminDB = rst.getSecondary().getDB("admin"); + checkFCV(primaryAdminDB, latestFCV); + checkFCV(secondaryAdminDB, latestFCV); + assert(rawMongoProgramOutput().includes("The provided 'defaultStartupFCV' is not a valid FCV")); + rst.stopSet(); +} + +function runShardingTest() { + jsTestLog("Test starting sharded cluster with defaultStartupFCV = lastLTS"); + let st = new ShardingTest({ + shards: 2, + mongos: 1, + config: 1, + // Shards should ignore the defaultStartupFCV parameter. + shardOptions: {binVersion: latest, setParameter: {defaultStartupFCV: latestFCV}}, + configOptions: {binVersion: latest, setParameter: {defaultStartupFCV: lastLTSFCV}} + }); + let configPrimaryAdminDB = st.configRS.getPrimary().getDB("admin"); + let shard0PrimaryAdminDB = st.rs0.getPrimary().getDB("admin"); + let shard1PrimaryAdminDB = st.rs1.getPrimary().getDB("admin"); + + checkFCV(configPrimaryAdminDB, lastLTSFCV); + checkFCV(shard0PrimaryAdminDB, lastLTSFCV); + checkFCV(shard1PrimaryAdminDB, lastLTSFCV); + st.stop(); + + st = new ShardingTest({ + shards: 2, + mongos: 1, + config: 1, + configOptions: {binVersion: latest, setParameter: {defaultStartupFCV: lastContinuousFCV}} + }); + configPrimaryAdminDB = st.configRS.getPrimary().getDB("admin"); + shard0PrimaryAdminDB = st.rs0.getPrimary().getDB("admin"); + shard1PrimaryAdminDB = st.rs1.getPrimary().getDB("admin"); + + checkFCV(configPrimaryAdminDB, lastContinuousFCV); + checkFCV(shard0PrimaryAdminDB, lastContinuousFCV); + checkFCV(shard1PrimaryAdminDB, lastContinuousFCV); + + jsTestLog("Test that a replica set started with shardsvr still defaults to lastLTS"); + const newShard = new ReplSetTest({ + nodes: 2, + nodeOptions: {binVersion: latest, setParameter: {defaultStartupFCV: latestFCV}} + }); + newShard.startSet({shardsvr: ''}); + newShard.initiate(); + + let primaryAdminDB = newShard.getPrimary().getDB("admin"); + let secondaryAdminDB = newShard.getSecondary().getDB("admin"); + checkFCV(primaryAdminDB, lastLTSFCV); + checkFCV(secondaryAdminDB, lastLTSFCV); + assert.commandWorked(st.s.adminCommand({addShard: newShard.getURL(), name: newShard.name})); + + jsTestLog("Test that the FCV should be set to the cluster's FCV after running addShard"); + checkFCV(primaryAdminDB, lastContinuousFCV); + checkFCV(secondaryAdminDB, lastContinuousFCV); + newShard.stopSet(); + st.stop(); +} + +runStandaloneTest(); +runReplicaSetTest(); +runShardingTest(); +})(); diff --git a/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js b/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js index e5c79d024bdce..b701d8a0859de 100644 --- a/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js +++ b/jstests/multiVersion/genericSetFCVUsage/downgrade_replset.js @@ -1,7 +1,7 @@ // Test the downgrade of a replica set succeeds, while reads and writes continue. -load('./jstests/multiVersion/libs/multi_rs.js'); -load('./jstests/libs/test_background_ops.js'); +load('jstests/multiVersion/libs/multi_rs.js'); +load('jstests/libs/test_background_ops.js'); let newVersion = "latest"; diff --git a/jstests/multiVersion/genericSetFCVUsage/fcv_upgrade_fails_during_is_cleaning_server_metadata.js b/jstests/multiVersion/genericSetFCVUsage/fcv_upgrade_fails_during_is_cleaning_server_metadata.js index 8c598bad5a1d7..4e79e2e31aa6a 100644 --- a/jstests/multiVersion/genericSetFCVUsage/fcv_upgrade_fails_during_is_cleaning_server_metadata.js +++ b/jstests/multiVersion/genericSetFCVUsage/fcv_upgrade_fails_during_is_cleaning_server_metadata.js @@ -4,11 +4,7 @@ * @tags: [requires_fcv_70] */ -(function() { -"use strict"; - load("jstests/libs/fail_point_util.js"); -load("jstests/libs/feature_flag_util.js"); const latest = "latest"; const testName = "fcv_upgrade_fails_during_is_cleaning_server_metadata"; @@ -147,5 +143,4 @@ function runShardedClusterTest() { runStandaloneTest(); runReplicaSetTest(); -runShardedClusterTest(); -})(); +runShardedClusterTest(); \ No newline at end of file diff --git a/jstests/multiVersion/genericSetFCVUsage/initial_sync_downgraded_from_latest.js b/jstests/multiVersion/genericSetFCVUsage/initial_sync_downgraded_from_latest.js index 12959cf2d111f..eb691cdf53035 100644 --- a/jstests/multiVersion/genericSetFCVUsage/initial_sync_downgraded_from_latest.js +++ b/jstests/multiVersion/genericSetFCVUsage/initial_sync_downgraded_from_latest.js @@ -5,7 +5,7 @@ 'use strict'; -load("./jstests/multiVersion/libs/initial_sync.js"); +load("jstests/multiVersion/libs/initial_sync.js"); let replSetVersion = "latest"; diff --git a/jstests/multiVersion/genericSetFCVUsage/initial_sync_latest_from_downgraded.js b/jstests/multiVersion/genericSetFCVUsage/initial_sync_latest_from_downgraded.js index d3f33a98f13b7..0e9dcb9154132 100644 --- a/jstests/multiVersion/genericSetFCVUsage/initial_sync_latest_from_downgraded.js +++ b/jstests/multiVersion/genericSetFCVUsage/initial_sync_latest_from_downgraded.js @@ -5,7 +5,7 @@ 'use strict'; -load("./jstests/multiVersion/libs/initial_sync.js"); +load("jstests/multiVersion/libs/initial_sync.js"); let newSecondaryVersion = "latest"; diff --git a/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js b/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js index 85949d3d06476..fb6a5f25aa7f7 100644 --- a/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js +++ b/jstests/multiVersion/genericSetFCVUsage/major_version_upgrade.js @@ -31,12 +31,15 @@ const defaultOptions = { // This lists all supported releases and needs to be kept up to date as versions are added and // dropped. const versions = [ - {binVersion: '4.2', featureCompatibilityVersion: '4.2', testCollection: 'four_two'}, {binVersion: '4.4', featureCompatibilityVersion: '4.4', testCollection: 'four_four'}, {binVersion: '5.0', featureCompatibilityVersion: '5.0', testCollection: 'five_zero'}, {binVersion: '6.0', featureCompatibilityVersion: '6.0', testCollection: 'six_zero'}, - {binVersion: 'last-lts', testCollection: 'last_lts'}, - {binVersion: 'last-continuous', testCollection: 'last_continuous'}, + {binVersion: 'last-lts', featureCompatibilityVersion: lastLTSFCV, testCollection: 'last_lts'}, + { + binVersion: 'last-continuous', + featureCompatibilityVersion: lastContinuousFCV, + testCollection: 'last_continuous' + }, {binVersion: 'latest', featureCompatibilityVersion: latestFCV, testCollection: 'latest'}, ]; diff --git a/jstests/multiVersion/genericSetFCVUsage/restart_during_downgrading_fcv.js b/jstests/multiVersion/genericSetFCVUsage/restart_during_downgrading_fcv.js index dde2219aeba76..5aa55f5576f95 100644 --- a/jstests/multiVersion/genericSetFCVUsage/restart_during_downgrading_fcv.js +++ b/jstests/multiVersion/genericSetFCVUsage/restart_during_downgrading_fcv.js @@ -3,11 +3,6 @@ * FCV is still in downgrading state and we can change FCV to upgraded state. */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); - const latest = "latest"; const testName = "restart_during_downgrading_fcv"; const dbpath = MongoRunner.dataPath + testName; @@ -91,5 +86,4 @@ const runShardedClusterTest = function() { runStandaloneTest(); runReplicaSetTest(); -runShardedClusterTest(); -})(); +runShardedClusterTest(); \ No newline at end of file diff --git a/jstests/multiVersion/genericSetFCVUsage/rollback_downgraded_to_latest.js b/jstests/multiVersion/genericSetFCVUsage/rollback_downgraded_to_latest.js index 1978172b68fee..3b18543ea7af7 100644 --- a/jstests/multiVersion/genericSetFCVUsage/rollback_downgraded_to_latest.js +++ b/jstests/multiVersion/genericSetFCVUsage/rollback_downgraded_to_latest.js @@ -14,4 +14,4 @@ testMultiversionRollback(testName, "last-lts", "latest"); testName = "multiversion_rollback_last_continuous_to_latest"; jsTestLog("Testing multiversion rollback from last-continuous to latest"); testMultiversionRollback(testName, "last-continuous", "latest"); -})(); \ No newline at end of file +})(); diff --git a/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_downgraded.js b/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_downgraded.js index d8328df339679..84b818421e35d 100644 --- a/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_downgraded.js +++ b/jstests/multiVersion/genericSetFCVUsage/rollback_latest_to_downgraded.js @@ -14,4 +14,4 @@ testMultiversionRollback(testName, "latest", "last-lts"); var testName = "multiversion_rollback_latest_to_last_continuous"; jsTestLog("Testing multiversion rollback from latest to last-continuous"); testMultiversionRollback(testName, "latest", "last-continuous"); -})(); \ No newline at end of file +})(); diff --git a/jstests/multiVersion/genericSetFCVUsage/run_feature_flag_multiversion_test.js b/jstests/multiVersion/genericSetFCVUsage/run_feature_flag_multiversion_test.js index af1552dbbf9c8..0233592830aa2 100644 --- a/jstests/multiVersion/genericSetFCVUsage/run_feature_flag_multiversion_test.js +++ b/jstests/multiVersion/genericSetFCVUsage/run_feature_flag_multiversion_test.js @@ -22,8 +22,11 @@ function runTest(downgradeFCV) { let primary = rst.getPrimary(); let adminDB = primary.getDB("admin"); - assert.commandWorked(adminDB.adminCommand({setFeatureCompatibilityVersion: downgradeFCV})); - checkFCV(adminDB, downgradeFCV); + assert.commandWorked( + primary.adminCommand({configureFailPoint: 'failDowngrading', mode: "alwaysOn"})); + assert.commandFailedWithCode( + adminDB.adminCommand({setFeatureCompatibilityVersion: downgradeFCV}), 549181); + checkFCV(adminDB, downgradeFCV, downgradeFCV); if (downgradeFCV === lastLTSFCV) { numLastLTSRuns++; } @@ -73,4 +76,4 @@ if (lastLTSFCV === lastContinuousFCV) { assert.eq(numLastLTSRuns, 2); assert.eq(numLastContRuns, 1); } -})(); \ No newline at end of file +})(); diff --git a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js index 3e74869a014cf..e30a6fc042279 100644 --- a/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js +++ b/jstests/multiVersion/genericSetFCVUsage/set_feature_compatibility_version.js @@ -9,13 +9,9 @@ TestData.skipCheckingUUIDsConsistentAcrossCluster = true; // data. TestData.skipCheckDBHashes = true; -(function() { -"use strict"; - load("jstests/libs/index_catalog_helpers.js"); load("jstests/libs/write_concern_util.js"); load("jstests/replsets/rslib.js"); -load("jstests/libs/feature_flag_util.js"); let dbpath = MongoRunner.dataPath + "feature_compatibility_version"; resetDbpath(dbpath); @@ -282,16 +278,6 @@ function runReplicaSetTest(downgradeVersion) { assert.commandFailedWithCode(res, ErrorCodes.WriteConcernFailed); restartServerReplication(secondary); - // If downgrading->upgrading feature is not enabled, - // upgrading the FCV should fail if a previous downgrade has not yet completed. - if (!FeatureFlagUtil.isEnabled(primaryAdminDB, - "DowngradingToUpgrading", - null /* user not specified */, - true /* ignores FCV */)) { - assert.commandFailedWithCode( - primary.adminCommand({setFeatureCompatibilityVersion: latestFCV}), 5147403); - } - if (lastContinuousFCV !== lastLTSFCV) { // We will fail if we have not yet completed a downgrade and attempt to downgrade to a // different target version. @@ -533,4 +519,3 @@ if (lastLTSFCV != lastContinuousFCV) { runReplicaSetTest('last-continuous'); runShardingTest('last-continuous'); } -})(); diff --git a/jstests/multiVersion/genericSetFCVUsage/setfcv_aborts_reshard_collection.js b/jstests/multiVersion/genericSetFCVUsage/setfcv_aborts_reshard_collection.js index a15d2bbb5ef26..009415f8ebec8 100644 --- a/jstests/multiVersion/genericSetFCVUsage/setfcv_aborts_reshard_collection.js +++ b/jstests/multiVersion/genericSetFCVUsage/setfcv_aborts_reshard_collection.js @@ -1,16 +1,17 @@ /** * Tests that setFeatureCompatibilityVersion command aborts an ongoing reshardCollection command */ -(function() { -"use strict"; - +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load("jstests/libs/parallel_shell_helpers.js"); load("jstests/sharding/libs/resharding_test_fixture.js"); load('jstests/libs/discover_topology.js'); load('jstests/libs/fail_point_util.js'); load('jstests/sharding/libs/sharded_transactions_helpers.js'); -function runTest(forcePooledConnectionsDropped) { +// Global variable is used to avoid spinning up a set of servers just to see if the +// feature flag is enabled. +let reshardingImprovementsEnabled; +function runTest({forcePooledConnectionsDropped, withUUID}) { const reshardingTest = new ReshardingTest({numDonors: 2, numRecipients: 2, reshardInPlace: true}); reshardingTest.setup(); @@ -29,6 +30,16 @@ function runTest(forcePooledConnectionsDropped) { let mongos = inputCollection.getMongo(); + if (reshardingImprovementsEnabled === undefined) { + reshardingImprovementsEnabled = FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements"); + } + if (withUUID && !reshardingImprovementsEnabled) { + jsTestLog("Skipping test with UUID since featureFlagReshardingImprovements is not enabled"); + reshardingTest.tearDown(); + } + jsTestLog("Testing with forcePooledConnectionsDropped: " + forcePooledConnectionsDropped + + " withUUID: " + withUUID); + for (let x = 0; x < 1000; x++) { assert.commandWorked(inputCollection.insert({oldKey: x, newKey: -1 * x})); } @@ -48,12 +59,14 @@ function runTest(forcePooledConnectionsDropped) { const coordinatorDoc = mongos.getCollection("config.reshardingOperations").findOne({ns: sourceNamespace}); - return coordinatorDoc === null || coordinatorDoc.state === "aborting"; + return coordinatorDoc === null || coordinatorDoc.state === "aborting" || + coordinatorDoc.state === "quiesced"; }); } const recipientShardNames = reshardingTest.recipientShardNames; let awaitShell; + let reshardingUUID = withUUID ? UUID() : undefined; reshardingTest.withReshardingInBackground( { newShardKeyPattern: {newKey: 1}, @@ -61,6 +74,7 @@ function runTest(forcePooledConnectionsDropped) { {min: {newKey: MinKey}, max: {newKey: 0}, shard: recipientShardNames[0]}, {min: {newKey: 0}, max: {newKey: MaxKey}, shard: recipientShardNames[1]}, ], + reshardingUUID: reshardingUUID }, () => { // Wait for config server to have started resharding before sending setFCV, otherwise @@ -149,7 +163,11 @@ function runTest(forcePooledConnectionsDropped) { // This test case forces the setFCV command to call dropsConnections while the coordinator is in // the process of establishing connections to the participant shards in order to ensure that the // resharding operation does not stall. -runTest(true); +runTest({forcePooledConnectionsDropped: true}); + +assert(reshardingImprovementsEnabled !== undefined); -runTest(false); -})(); +// We test with a UUID because we need for setFCV to abort the quiesce period as well, in order +// to completely clear the config server's state collection. Because this test takes a while +// we don't try all combinations of forcePooledCollectionsDropped and withUUID. +runTest({forcePooledConnectionsDropped: false, withUUID: reshardingImprovementsEnabled}); diff --git a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_abort_on_fcv_change.js b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_abort_on_fcv_change.js index 73326dd39ef96..0c9ebc56dfd4a 100644 --- a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_abort_on_fcv_change.js +++ b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_abort_on_fcv_change.js @@ -8,6 +8,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' load("jstests/libs/parallelTester.js"); // for 'Thread' @@ -16,7 +17,7 @@ load("jstests/replsets/rslib.js"); // for 'setLogVerbosity' const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const tenantId = ObjectId().str; -const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); +const dbName = makeTenantDB(tenantId, "testDB"); const collName = "testColl"; const donorRst = tenantMigrationTest.getDonorRst(); diff --git a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch.js b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch.js index c11f570e73511..4f0844c4133ee 100644 --- a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch.js +++ b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch.js @@ -8,7 +8,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {isShardMergeEnabled} from "jstests/replsets/libs/tenant_migration_util.js"; +import {isShardMergeEnabled, makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' load("jstests/libs/parallelTester.js"); // for 'Thread' @@ -17,7 +17,7 @@ function runTest(downgradeFCV) { const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const tenantId = ObjectId().str; - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); const collName = "testColl"; const donorPrimary = tenantMigrationTest.getDonorPrimary(); diff --git a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch_after_failover.js b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch_after_failover.js deleted file mode 100644 index 191c1e7a80201..0000000000000 --- a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_donor_recipient_fcv_mismatch_after_failover.js +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Tests that restarting a migration attempt after a failover fails if the donor and recipient no - * longer share the same FCV. - * @tags: [ - * requires_majority_read_concern, - * incompatible_with_windows_tls, - * # Shard merge is not robust to failovers and restarts. - * incompatible_with_shard_merge, - * serverless, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -load("jstests/libs/fail_point_util.js"); -load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' -load("jstests/libs/parallelTester.js"); // for 'Thread' - -function runTest(downgradeFCV) { - const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); - - const tenantId = ObjectId().str; - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); - const collName = "testColl"; - - const donorPrimary = tenantMigrationTest.getDonorPrimary(); - const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); - - tenantMigrationTest.insertDonorDB(dbName, collName); - - const migrationId = UUID(); - const migrationOpts = { - migrationIdString: extractUUIDFromObject(migrationId), - recipientConnString: tenantMigrationTest.getRecipientConnString(), - tenantId: tenantId, - }; - - // Configure a failpoint to have the recipient primary hang after a successful initial - // comparison. - const recipientDB = recipientPrimary.getDB(dbName); - const hangAfterFirstFCVcheck = - configureFailPoint(recipientDB, "fpAfterComparingRecipientAndDonorFCV", {action: "hang"}); - - // Start a migration and wait for recipient to hang at the failpoint. - assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); - hangAfterFirstFCVcheck.wait(); - - // Downgrade the FCV for the donor set. - assert.commandWorked(donorPrimary.adminCommand({setFeatureCompatibilityVersion: downgradeFCV})); - - // Step up a new node in the recipient set and trigger a failover. The new primary should - // attempt to resume cloning, but fail upon re-checking the FCVs. - const recipientRst = tenantMigrationTest.getRecipientRst(); - const newRecipientPrimary = recipientRst.getSecondaries()[0]; - recipientRst.awaitLastOpCommitted(); - assert.commandWorked(newRecipientPrimary.adminCommand({replSetStepUp: 1})); - hangAfterFirstFCVcheck.off(); - recipientRst.getPrimary(); - - // Make sure we see the FCV mismatch detection message on the recipient regardless. - checkLog.containsJson(newRecipientPrimary, 5382300); - - // Upgrade again to check on the status of the migration from the donor's point of view. - assert.commandWorked(donorPrimary.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - TenantMigrationTest.assertAborted( - tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); - - tenantMigrationTest.stop(); -} - -runTest(lastContinuousFCV); -if (lastContinuousFCV != lastLTSFCV) { - runTest(lastLTSFCV); -} diff --git a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_recipient_abort_on_fcv_change.js b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_recipient_abort_on_fcv_change.js index 717cf2e52878c..0ca01e7706343 100644 --- a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_recipient_abort_on_fcv_change.js +++ b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_recipient_abort_on_fcv_change.js @@ -8,6 +8,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' load("jstests/libs/parallelTester.js"); // for 'Thread' @@ -16,7 +17,7 @@ load("jstests/replsets/rslib.js"); // for 'setLogVerbosity' const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const tenantId = ObjectId().str; -const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); +const dbName = makeTenantDB(tenantId, "testDB"); const collName = "testColl"; const donorRst = tenantMigrationTest.getDonorRst(); diff --git a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_save_fcv.js b/jstests/multiVersion/genericSetFCVUsage/tenant_migration_save_fcv.js deleted file mode 100644 index 16735b65a9305..0000000000000 --- a/jstests/multiVersion/genericSetFCVUsage/tenant_migration_save_fcv.js +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Tests that we note down the recipient FCV at the beginning of a migration and that a change - * in that FCV will abort the migration. - * @tags: [ - * requires_majority_read_concern, - * incompatible_with_windows_tls, - * # Shard merge is not robust to failovers and restarts. - * incompatible_with_shard_merge, - * serverless, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - isShardMergeEnabled, - makeX509OptionsForTest, - runMigrationAsync -} from "jstests/replsets/libs/tenant_migration_util.js"; - -load("jstests/libs/fail_point_util.js"); -load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' -load("jstests/libs/parallelTester.js"); // for 'Thread' -load("jstests/replsets/rslib.js"); // 'createRstArgs' - -function runTest(downgradeFCV) { - const recipientRst = new ReplSetTest({ - nodes: 2, - name: jsTestName() + "_recipient", - nodeOptions: makeX509OptionsForTest().recipient - }); - - recipientRst.startSet(); - recipientRst.initiate(); - - const tenantMigrationTest = - new TenantMigrationTest({name: jsTestName(), recipientRst: recipientRst}); - const tenantId = ObjectId().str; - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); - const collName = "testColl"; - - const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); - - tenantMigrationTest.insertDonorDB(dbName, collName); - - const migrationId = UUID(); - const migrationOpts = { - migrationIdString: extractUUIDFromObject(migrationId), - recipientConnString: tenantMigrationTest.getRecipientConnString(), - tenantId: tenantId, - }; - - // Configure a failpoint to have the recipient primary hang after taking note of its FCV. - const recipientDb = recipientPrimary.getDB(dbName); - const hangAfterSavingFCV = configureFailPoint( - recipientDb, "fpAfterRecordingRecipientPrimaryStartingFCV", {action: "hang"}); - - // Start a migration and wait for recipient to hang at the failpoint. - const donorRstArgs = createRstArgs(tenantMigrationTest.getDonorRst()); - const migrationThread = new Thread(runMigrationAsync, migrationOpts, donorRstArgs); - migrationThread.start(); - hangAfterSavingFCV.wait(); - - const isRunningMergeProtocol = isShardMergeEnabled(recipientDb); - - // Downgrade the FCV for the recipient set. - assert.commandWorked( - recipientPrimary.adminCommand({setFeatureCompatibilityVersion: downgradeFCV})); - - // Step up a new node in the recipient set and trigger a failover. The new primary should - // attempt to resume cloning, but fail upon re-checking the FCV. - const newRecipientPrimary = recipientRst.getSecondaries()[0]; - recipientRst.awaitLastOpCommitted(); - assert.commandWorked(newRecipientPrimary.adminCommand({replSetStepUp: 1})); - hangAfterSavingFCV.off(); - recipientRst.getPrimary(); - - // The migration will not be able to continue in the downgraded version. - TenantMigrationTest.assertAborted(migrationThread.returnData()); - // Change-of-FCV detection message. - if (isRunningMergeProtocol && MongoRunner.compareBinVersions(downgradeFCV, "5.2") < 0) { - // FCV is too old for shard merge. - checkLog.containsJson(newRecipientPrimary, 5949504); - } else { - // Can't change FCVs during a migration. - checkLog.containsJson(newRecipientPrimary, 5356200); - } - - tenantMigrationTest.stop(); - recipientRst.stopSet(); -} - -runTest(lastContinuousFCV); -if (lastContinuousFCV != lastLTSFCV) { - runTest(lastLTSFCV); -} diff --git a/jstests/multiVersion/genericSetFCVUsage/test_replica_set_startup_in_downgrading_state.js b/jstests/multiVersion/genericSetFCVUsage/test_replica_set_startup_in_downgrading_state.js index ef03a3656f7bc..3c014f018d381 100644 --- a/jstests/multiVersion/genericSetFCVUsage/test_replica_set_startup_in_downgrading_state.js +++ b/jstests/multiVersion/genericSetFCVUsage/test_replica_set_startup_in_downgrading_state.js @@ -5,12 +5,8 @@ * @tags: [requires_fcv_70] */ -(function() { -"use strict"; - load('jstests/multiVersion/libs/verify_versions.js'); load('jstests/libs/fail_point_util.js'); -load("jstests/libs/feature_flag_util.js"); function runReplicaSet() { let fcvDoc; @@ -81,5 +77,4 @@ function runReplicaSet() { rst.stopSet(); } -runReplicaSet(); -})(); +runReplicaSet(); \ No newline at end of file diff --git a/jstests/multiVersion/genericSetFCVUsage/test_sharding_startup_in_downgrading_state.js b/jstests/multiVersion/genericSetFCVUsage/test_sharding_startup_in_downgrading_state.js index 71992e8004937..2c2e6bd76dcd7 100644 --- a/jstests/multiVersion/genericSetFCVUsage/test_sharding_startup_in_downgrading_state.js +++ b/jstests/multiVersion/genericSetFCVUsage/test_sharding_startup_in_downgrading_state.js @@ -5,12 +5,8 @@ * @tags: [requires_fcv_70] */ -(function() { -"use strict"; - load('jstests/multiVersion/libs/verify_versions.js'); load('jstests/libs/fail_point_util.js'); -load("jstests/libs/feature_flag_util.js"); function runSharding() { let fcvDoc; @@ -114,5 +110,4 @@ function runSharding() { st.stop(); } -runSharding(); -})(); +runSharding(); \ No newline at end of file diff --git a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster.js b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster.js index fbb6a9a268782..eea18471f3994 100644 --- a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster.js +++ b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster.js @@ -6,8 +6,9 @@ (function() { "use strict"; -load('./jstests/multiVersion/libs/multi_rs.js'); -load('./jstests/multiVersion/libs/multi_cluster.js'); +load('jstests/multiVersion/libs/multi_rs.js'); +load('jstests/multiVersion/libs/multi_cluster.js'); +load('jstests/multiVersion/libs/upgrade_downgrade_cluster_shared.js'); // When checking UUID consistency, the shell attempts to run a command on the node it believes is // primary in each shard. However, this test restarts shards, and the node that is elected primary @@ -16,21 +17,6 @@ load('./jstests/multiVersion/libs/multi_cluster.js'); // command is nondeterministic, skip the consistency check for this test. TestData.skipCheckingUUIDsConsistentAcrossCluster = true; -var testCRUDAndAgg = function(db) { - assert.commandWorked(db.foo.insert({x: 1})); - assert.commandWorked(db.foo.insert({x: -1})); - assert.commandWorked(db.foo.update({x: 1}, {$set: {y: 1}})); - assert.commandWorked(db.foo.update({x: -1}, {$set: {y: 1}})); - var doc1 = db.foo.findOne({x: 1}); - assert.eq(1, doc1.y); - var doc2 = db.foo.findOne({x: -1}); - assert.eq(1, doc2.y); - - assert.commandWorked(db.foo.remove({x: 1}, true)); - assert.commandWorked(db.foo.remove({x: -1}, true)); - assert.eq(null, db.foo.findOne()); -}; - // Test upgrade/downgrade between "latest" and "last-lts"/"last-continuous". for (let oldVersion of ["last-lts", "last-continuous"]) { var st = new ShardingTest({ @@ -63,6 +49,7 @@ for (let oldVersion of ["last-lts", "last-continuous"]) { testCRUDAndAgg(st.s.getDB('unsharded')); testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); // upgrade the config servers first jsTest.log('upgrading config servers'); @@ -70,12 +57,14 @@ for (let oldVersion of ["last-lts", "last-continuous"]) { testCRUDAndAgg(st.s.getDB('unsharded')); testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); // Restart mongos to clear all cache and force it to do remote calls. st.restartMongoses(); testCRUDAndAgg(st.s.getDB('unsharded')); testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); // Then upgrade the shards. jsTest.log('upgrading shard servers'); @@ -86,12 +75,14 @@ for (let oldVersion of ["last-lts", "last-continuous"]) { testCRUDAndAgg(st.s.getDB('unsharded')); testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); // Restart mongos to clear all cache and force it to do remote calls. st.restartMongoses(); testCRUDAndAgg(st.s.getDB('unsharded')); testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); // Finally, upgrade mongos jsTest.log('upgrading mongos servers'); @@ -99,12 +90,14 @@ for (let oldVersion of ["last-lts", "last-continuous"]) { testCRUDAndAgg(st.s.getDB('unsharded')); testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); // Restart mongos to clear all cache and force it to do remote calls. st.restartMongoses(); testCRUDAndAgg(st.s.getDB('unsharded')); testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); // Check that version document is unmodified. version = st.s.getCollection('config.version').findOne(); @@ -118,12 +111,14 @@ for (let oldVersion of ["last-lts", "last-continuous"]) { testCRUDAndAgg(st.s.getDB('unsharded')); testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); // Restart mongos to clear all cache and force it to do remote calls. st.restartMongoses(); testCRUDAndAgg(st.s.getDB('unsharded')); testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); jsTest.log('downgrading shard servers'); st.downgradeCluster(oldVersion, {downgradeMongos: false, downgradeConfigs: false}); @@ -133,24 +128,28 @@ for (let oldVersion of ["last-lts", "last-continuous"]) { testCRUDAndAgg(st.s.getDB('unsharded')); testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); // Restart mongos to clear all cache and force it to do remote calls. st.restartMongoses(); testCRUDAndAgg(st.s.getDB('unsharded')); testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); jsTest.log('downgrading config servers'); st.downgradeCluster(oldVersion, {downgradeMongos: false, downgradeShards: false}); testCRUDAndAgg(st.s.getDB('unsharded')); testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); // Restart mongos to clear all cache and force it to do remote calls. st.restartMongoses(); testCRUDAndAgg(st.s.getDB('unsharded')); testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); // Check that version document is unmodified. version = st.s.getCollection('config.version').findOne(); diff --git a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster_config_shard.js b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster_config_shard.js new file mode 100644 index 0000000000000..728f540a9ac7f --- /dev/null +++ b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_cluster_config_shard.js @@ -0,0 +1,161 @@ +/** + * Tests that CRUD, aggregation and DDL commands continue to work as expected with a config shard on + * both sharded and unsharded collection at each step of cluster upgrade/downgrade between last-lts + * and latest and between last-continuous and latest. + */ +(function() { +"use strict"; + +load('jstests/multiVersion/libs/multi_rs.js'); +load('jstests/multiVersion/libs/multi_cluster.js'); +load('jstests/multiVersion/libs/upgrade_downgrade_cluster_shared.js'); + +// When checking UUID consistency, the shell attempts to run a command on the node it believes is +// primary in each shard. However, this test restarts shards, and the node that is elected primary +// after the restart may be different from the original primary. Since the shell does not retry on +// NotWritablePrimary errors, and whether or not it detects the new primary before issuing the +// command is nondeterministic, skip the consistency check for this test. +TestData.skipCheckingUUIDsConsistentAcrossCluster = true; + +// Test upgrade/downgrade between "latest" and "last-lts"/"last-continuous". +for (let oldVersion of ["last-lts", "last-continuous"]) { + var st = new ShardingTest({ + shards: 2, + mongos: 1, + other: { + mongosOptions: {binVersion: oldVersion}, + configOptions: {binVersion: oldVersion}, + shardOptions: {binVersion: oldVersion}, + + rsOptions: {binVersion: oldVersion}, + rs: true, + }, + configShard: true + }); + st.configRS.awaitReplication(); + + // check that config.version document gets initialized properly + var version = st.s.getCollection('config.version').findOne(); + var clusterID = version.clusterId; + assert.neq(null, clusterID); + + // Setup sharded collection + assert.commandWorked(st.s.adminCommand({enableSharding: 'sharded'})); + st.ensurePrimaryShard('sharded', st.shard0.shardName); + + assert.commandWorked(st.s.adminCommand({shardCollection: 'sharded.foo', key: {x: 1}})); + assert.commandWorked(st.s.adminCommand({split: 'sharded.foo', middle: {x: 0}})); + assert.commandWorked( + st.s.adminCommand({moveChunk: 'sharded.foo', find: {x: 1}, to: st.shard1.shardName})); + + testCRUDAndAgg(st.s.getDB('unsharded')); + testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); + + // upgrade the config servers first + jsTest.log('upgrading config servers'); + st.upgradeCluster("latest", {upgradeMongos: false, upgradeShards: false}); + + testCRUDAndAgg(st.s.getDB('unsharded')); + testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); + + // Restart mongos to clear all cache and force it to do remote calls. + st.restartMongoses(); + + testCRUDAndAgg(st.s.getDB('unsharded')); + testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); + + // Then upgrade the shards. + jsTest.log('upgrading shard servers'); + st.upgradeCluster("latest", {upgradeMongos: false, upgradeConfigs: false}); + + awaitRSClientHosts(st.s, st.rs0.getPrimary(), {ok: true, ismaster: true}); + awaitRSClientHosts(st.s, st.rs1.getPrimary(), {ok: true, ismaster: true}); + + testCRUDAndAgg(st.s.getDB('unsharded')); + testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); + + // Restart mongos to clear all cache and force it to do remote calls. + st.restartMongoses(); + + testCRUDAndAgg(st.s.getDB('unsharded')); + testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); + + // Finally, upgrade mongos + jsTest.log('upgrading mongos servers'); + st.upgradeCluster("latest", {upgradeConfigs: false, upgradeShards: false}); + + testCRUDAndAgg(st.s.getDB('unsharded')); + testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); + + // Restart mongos to clear all cache and force it to do remote calls. + st.restartMongoses(); + + testCRUDAndAgg(st.s.getDB('unsharded')); + testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); + + // Check that version document is unmodified. + version = st.s.getCollection('config.version').findOne(); + assert.eq(clusterID, version.clusterId); + + /////////////////////////////////////////////////////////////////////////////////////////// + // Downgrade back + + jsTest.log('downgrading mongos servers'); + st.downgradeCluster(oldVersion, {downgradeConfigs: false, downgradeShards: false}); + + testCRUDAndAgg(st.s.getDB('unsharded')); + testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); + + // Restart mongos to clear all cache and force it to do remote calls. + st.restartMongoses(); + + testCRUDAndAgg(st.s.getDB('unsharded')); + testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); + + jsTest.log('downgrading shard servers'); + st.downgradeCluster(oldVersion, {downgradeMongos: false, downgradeConfigs: false}); + + awaitRSClientHosts(st.s, st.rs0.getPrimary(), {ok: true, ismaster: true}); + awaitRSClientHosts(st.s, st.rs1.getPrimary(), {ok: true, ismaster: true}); + + testCRUDAndAgg(st.s.getDB('unsharded')); + testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); + + // Restart mongos to clear all cache and force it to do remote calls. + st.restartMongoses(); + + testCRUDAndAgg(st.s.getDB('unsharded')); + testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); + + jsTest.log('downgrading config servers'); + st.downgradeCluster(oldVersion, {downgradeMongos: false, downgradeShards: false}); + + testCRUDAndAgg(st.s.getDB('unsharded')); + testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); + + // Restart mongos to clear all cache and force it to do remote calls. + st.restartMongoses(); + + testCRUDAndAgg(st.s.getDB('unsharded')); + testCRUDAndAgg(st.s.getDB('sharded')); + testDDLOps(st); + + // Check that version document is unmodified. + version = st.s.getCollection('config.version').findOne(); + assert.eq(clusterID, version.clusterId); + + st.stop(); +} +})(); diff --git a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_idempotency.js b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_idempotency.js index ff24256908a7c..d847a980d4049 100644 --- a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_idempotency.js +++ b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_idempotency.js @@ -57,4 +57,4 @@ function runTest(downgradeVersion) { runTest('last-lts'); runTest('last-continuous'); -}()); \ No newline at end of file +}()); diff --git a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_sharded_cluster.js b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_sharded_cluster.js index 1d17a15ff935e..19e6fab60b9e9 100644 --- a/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_sharded_cluster.js +++ b/jstests/multiVersion/genericSetFCVUsage/upgrade_downgrade_sharded_cluster.js @@ -10,18 +10,15 @@ * 6. Verify the data consistency after the downgrade procedure */ -(function() { -'use strict'; - load('jstests/multiVersion/libs/multi_cluster.js'); // For upgradeCluster -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const dbName = jsTestName(); function setupClusterAndDatabase(binVersion) { const st = new ShardingTest({ mongos: 1, - config: 1, + config: 2, shards: 2, other: { mongosOptions: {binVersion: binVersion}, @@ -94,9 +91,42 @@ function checkConfigAndShardsFCV(expectedFCV) { } } +// TODO(SERVER-77873): Remove checkReshardingActiveIndex; once the feature flag is removed the +// check will be incorrect. +function checkReshardingActiveIndex() { + const getActiveIndex = (node) => { + const indexes = st.configRS.getPrimary().getDB("config").reshardingOperations.getIndexes(); + return indexes.find((index) => (index.name == "ReshardingCoordinatorActiveIndex")); + }; + let activeIndex = getActiveIndex(st.configRS.getPrimary()); + if (FeatureFlagUtil.isPresentAndEnabled(st.s, "ReshardingImprovements")) { + assert( + !activeIndex, + "With ReshardingImprovements enabled, the config.reshardingOperations ReshardingCoordinatorActiveIndex is present but should not be."); + } + // Since downgrading does not restore the index, we don't check for the index's presence + // until we force a step-up (re-initializing the coordinator) + + assert.commandWorked(st.configRS.getSecondary().adminCommand({replSetStepUp: 1})); + st.configRS.waitForPrimaryOnlyServices(st.configRS.getPrimary()); + activeIndex = getActiveIndex(st.configRS.getPrimary()); + if (FeatureFlagUtil.isPresentAndEnabled(st.s, "ReshardingImprovements")) { + assert( + !activeIndex, + "With ReshardingImprovements enabled, the config.reshardingOperations ReshardingCoordinatorActiveIndex is present but should not be, after step-up."); + } else { + assert( + activeIndex, + "With ReshardingImprovements disabled, the config.reshardingOperations ReshardingCoordinatorActiveIndex is not present but should be, after step-up."); + assert(activeIndex.unique, + "The config.reshardingOperations ReshardingCoordinatorActiveIndex is not unique"); + } +} + function checkClusterBeforeUpgrade(fcv) { checkConfigAndShardsFCV(fcv); checkConfigVersionDoc(); + checkReshardingActiveIndex(); } function checkClusterAfterBinaryUpgrade() { @@ -106,10 +136,12 @@ function checkClusterAfterBinaryUpgrade() { function checkClusterAfterFCVUpgrade(fcv) { checkConfigAndShardsFCV(fcv); checkConfigVersionDoc(); + checkReshardingActiveIndex(); } function checkClusterAfterFCVDowngrade() { checkConfigVersionDoc(); + checkReshardingActiveIndex(); } function checkClusterAfterBinaryDowngrade(fcv) { @@ -153,5 +185,4 @@ for (const oldVersion of [lastLTSFCV, lastContinuousFCV]) { checkClusterAfterBinaryDowngrade(oldVersion); st.stop(); -} -})(); +} \ No newline at end of file diff --git a/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js b/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js index 2b7ed55bd953d..00e056952ab6a 100644 --- a/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js +++ b/jstests/multiVersion/genericSetFCVUsage/view_definition_feature_compatibility_version.js @@ -13,7 +13,7 @@ const testName = "view_definition_feature_compatibility_version_multiversion"; const dbpath = MongoRunner.dataPath + testName; // An array of feature flags that must be enabled to run feature flag tests. -const featureFlagsToEnable = ["featureFlagUserRoles"]; +const featureFlagsToEnable = []; // These arrays should be populated with aggregation pipelines that use // aggregation features in new versions of mongod. This test ensures that a view @@ -21,10 +21,7 @@ const featureFlagsToEnable = ["featureFlagUserRoles"]; // latest version, and rejects it when the feature compatibility version is the last // version. const testCasesLastContinuous = []; -const testCasesLastContinuousWithFeatureFlags = [ - // TODO SERVER-70689: Remove this case when 7.0 becomes lastLTS. - [{$project: {z: "$$USER_ROLES"}}] -]; +const testCasesLastContinuousWithFeatureFlags = []; // Anything that's incompatible with the last continuous release is incompatible with the last // stable release. @@ -32,14 +29,6 @@ const testCasesLastStable = testCasesLastContinuous.concat([]); const testCasesLastStableWithFeatureFlags = testCasesLastContinuousWithFeatureFlags.concat([]); -// The addition of the $$USER_ROLES system variable is slightly different than the usual use case of -// this test file. This means that some of the following commands won't work/fail as expected for -// the $$USER_ROLES test case. -// TODO SERVER-70689: Remove this function and references to it. -function testCaseDoesNotReferenceUserRoles(testCase) { - return testCase[0].$project.z != "$$USER_ROLES"; -} - // Tests Feature Compatibility Version behavior of view creation while using aggregation pipelines // 'testCases' and using a previous stable version 'lastVersion' of mongod. // 'lastVersion' can have values "last-lts" and "last-continuous". @@ -94,27 +83,17 @@ function testViewDefinitionFCVBehavior(lastVersion, testCases, featureFlags = [] // Read against an existing view using new query features should not fail. testCases.forEach((pipe, i) => { - if (testCaseDoesNotReferenceUserRoles(pipe)) { - // The $$USER_ROLES value will be evaluated every time the view is queried, so the - // following query would fail since we are running an older FCV. - // TODO SERVER-70689: Remove the guard of this if-statement and keep the body. - assert.commandWorked(testDB.runCommand({find: "firstView" + i}), - `Failed to query view with pipeline ${tojson(pipe)}`); - } + assert.commandWorked(testDB.runCommand({find: "firstView" + i}), + `Failed to query view with pipeline ${tojson(pipe)}`); }); // Trying to create a new view in the same database as existing invalid view should fail, // even if the new view doesn't use any new query features. - if (testCaseDoesNotReferenceUserRoles(testCases[0])) { - // Since the $$USER_ROLES variable won't be evaluated during this view creation, the view - // creation will succeed even though we are on an older FCV. - // TODO SERVER-70689: Remove the guard of this if-statement and keep the body. - assert.commandFailedWithCode( - testDB.createView("newViewOldFeatures", "coll", [{$project: {_id: 1}}]), - ErrorCodes.QueryFeatureNotAllowed, - `Expected *not* to be able to create view on database ${testDB} while in FCV ${ - binVersionToFCV(lastVersion)}`); - } + assert.commandFailedWithCode( + testDB.createView("newViewOldFeatures", "coll", [{$project: {_id: 1}}]), + ErrorCodes.QueryFeatureNotAllowed, + `Expected *not* to be able to create view on database ${testDB} while in FCV ${ + binVersionToFCV(lastVersion)}`); // Trying to create a new view succeeds if it's on a separate database. const testDB2 = conn.getDB(testName + '2'); @@ -171,13 +150,8 @@ function testViewDefinitionFCVBehavior(lastVersion, testCases, featureFlags = [] // Read against an existing view using new query features should not fail. testCases.forEach((pipe, i) => { - if (testCaseDoesNotReferenceUserRoles(pipe)) { - // The view is evaluated on the fly, and the FCV is still set to the last version so the - // evaluation of $$USER_ROLES will cause this to fail. - // TODO SERVER-70689: Remove the guard of this if-statement and keep the body. - assert.commandWorked(testDB.runCommand({find: "firstView" + i}), - `Failed to query view with pipeline ${tojson(pipe)}`); - } + assert.commandWorked(testDB.runCommand({find: "firstView" + i}), + `Failed to query view with pipeline ${tojson(pipe)}`); }); // Set the feature compatibility version back to the latest version. @@ -217,32 +191,21 @@ function testViewDefinitionFCVBehavior(lastVersion, testCases, featureFlags = [] testDB = conn.getDB(testName); testCases.forEach(function(pipe, i) { - // In this case, using $$USER_ROLES on the last FCV version will cause the view - // creation to fail during parsing because the necessary feature flag will not have been - // enabled due to the older FCV. - // TODO SERVER-70689: Remove the guard of this if-statement and keep the body. - if (testCaseDoesNotReferenceUserRoles(pipe)) { - // Even though the feature compatibility version is the last version, we should still be - // able to create a view using new query features, because - // internalValidateFeaturesAsPrimary is false. - assert.commandWorked( - testDB.createView("thirdView" + i, "coll", pipe), - `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV` + - ` ${ - binVersionToFCV( - lastVersion)} with internalValidateFeaturesAsPrimary=false`); - - // We should also be able to modify a view to use new query features. - assert(testDB["thirdView" + i].drop(), - `Drop of view with pipeline ${tojson(pipe)} failed`); - assert.commandWorked(testDB.createView("thirdView" + i, "coll", [])); - assert.commandWorked( - testDB.runCommand({collMod: "thirdView" + i, viewOn: "coll", pipeline: pipe}), - `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV` + - ` ${ - binVersionToFCV( - lastVersion)} with internalValidateFeaturesAsPrimary=false`); - } + // Even though the feature compatibility version is the last version, we should still be + // able to create a view using new query features, because + // internalValidateFeaturesAsPrimary is false. + assert.commandWorked( + testDB.createView("thirdView" + i, "coll", pipe), + `Expected to be able to create view with pipeline ${tojson(pipe)} while in FCV` + + ` ${binVersionToFCV(lastVersion)} with internalValidateFeaturesAsPrimary=false`); + + // We should also be able to modify a view to use new query features. + assert(testDB["thirdView" + i].drop(), `Drop of view with pipeline ${tojson(pipe)} failed`); + assert.commandWorked(testDB.createView("thirdView" + i, "coll", [])); + assert.commandWorked( + testDB.runCommand({collMod: "thirdView" + i, viewOn: "coll", pipeline: pipe}), + `Expected to be able to modify view to use pipeline ${tojson(pipe)} while in FCV` + + ` ${binVersionToFCV(lastVersion)} with internalValidateFeaturesAsPrimary=false`); }); MongoRunner.stopMongod(conn); diff --git a/jstests/multiVersion/libs/data_generators.js b/jstests/multiVersion/libs/data_generators.js index a46fbe8439a4f..d1d4e8e2bb8a2 100644 --- a/jstests/multiVersion/libs/data_generators.js +++ b/jstests/multiVersion/libs/data_generators.js @@ -300,7 +300,7 @@ function DataGenerator() { } // Data we are using as a source for our testing - testData = [ + let testData = [ GenFlatObjectAllTypesHardCoded(), GenFlatObjectAllTypes(0), GenFlatObjectAllTypes(2), @@ -398,7 +398,7 @@ function IndexDataGenerator(options) { // Find the character (index into keyChars) that we currently have at this position, set // this position to the next character in the keyChars sequence - keyCharsIndex = keyChars.search(currentKey[currentKeyIndex]); + var keyCharsIndex = keyChars.search(currentKey[currentKeyIndex]); currentKey = setCharAt( currentKey, currentKeyIndex, keyChars[(keyCharsIndex + 1) % keyChars.length]); currentKeyIndex = currentKeyIndex + 1; @@ -519,7 +519,7 @@ function IndexDataGenerator(options) { return GenIndexOptions(seed); } - testIndexes = [ + let testIndexes = [ // Single Field Indexes {"spec": GenSingleFieldIndex(1), "options": GenIndexOptions(0)}, {"spec": GenSingleFieldIndex(0), "options": GenIndexOptions(1)}, diff --git a/jstests/multiVersion/libs/initial_sync.js b/jstests/multiVersion/libs/initial_sync.js index 329602f0c4b34..a1d3fc63c6f61 100644 --- a/jstests/multiVersion/libs/initial_sync.js +++ b/jstests/multiVersion/libs/initial_sync.js @@ -1,8 +1,8 @@ 'use strict'; -load("./jstests/multiVersion/libs/multi_rs.js"); -load("./jstests/replsets/rslib.js"); +load("jstests/multiVersion/libs/multi_rs.js"); +load("jstests/replsets/rslib.js"); /** * Test that starts up a replica set with 2 nodes of version 'replSetVersion', inserts some data, @@ -47,4 +47,4 @@ var multversionInitialSyncTest = function( rst.awaitSecondaryNodes(); rst.stopSet(); -}; \ No newline at end of file +}; diff --git a/jstests/multiVersion/libs/multiversion_rollback.js b/jstests/multiVersion/libs/multiversion_rollback.js index 37a7abadbfb38..d8e0419c40420 100644 --- a/jstests/multiVersion/libs/multiversion_rollback.js +++ b/jstests/multiVersion/libs/multiversion_rollback.js @@ -11,7 +11,6 @@ load("jstests/replsets/libs/rollback_test.js"); load("jstests/libs/collection_drop_recreate.js"); load('jstests/libs/parallel_shell_helpers.js'); load("jstests/libs/fail_point_util.js"); -load("jstests/libs/feature_flag_util.js"); function printFCVDoc(nodeAdminDB, logMessage) { const fcvDoc = nodeAdminDB.system.version.findOne({_id: 'featureCompatibilityVersion'}); @@ -164,11 +163,7 @@ function testMultiversionRollbackLatestFromDowngrading(testName, upgradeImmediat printFCVDoc(newPrimaryAdminDB, "New primary's FCV after rolling back: "); checkFCV(newPrimaryAdminDB, lastLTSFCV, lastLTSFCV); - if (upgradeImmediately && - FeatureFlagUtil.isEnabled(newPrimaryAdminDB, - "DowngradingToUpgrading", - null /* user not specified */, - true /* ignores FCV */)) { + if (upgradeImmediately) { // We can upgrade immediately. assert.commandWorked(newPrimary.adminCommand({setFeatureCompatibilityVersion: latestFCV})); diff --git a/jstests/multiVersion/libs/upgrade_downgrade_cluster_shared.js b/jstests/multiVersion/libs/upgrade_downgrade_cluster_shared.js new file mode 100644 index 0000000000000..f2b98b37c1bc6 --- /dev/null +++ b/jstests/multiVersion/libs/upgrade_downgrade_cluster_shared.js @@ -0,0 +1,66 @@ +var testCRUDAndAgg = function(db) { + assert.commandWorked(db.foo.insert({x: 1})); + assert.commandWorked(db.foo.insert({x: -1})); + assert.commandWorked(db.foo.update({x: 1}, {$set: {y: 1}})); + assert.commandWorked(db.foo.update({x: -1}, {$set: {y: 1}})); + var doc1 = db.foo.findOne({x: 1}); + assert.eq(1, doc1.y); + var doc2 = db.foo.findOne({x: -1}); + assert.eq(1, doc2.y); + + assert.commandWorked(db.foo.remove({x: 1}, true)); + assert.commandWorked(db.foo.remove({x: -1}, true)); + assert.eq(null, db.foo.findOne()); +}; + +var testDDLOps = function(st) { + var shard0Name = st.shard0.shardName; + var shard1Name = st.shard1.shardName; + var db = st.s.getDB("sharded"); + var configDB = st.s.getDB("config"); + assert.commandWorked(db.foo.insert({x: 1})); + + // moveChunk + var shard0NumChunks = configDB.chunks.find({shard: shard0Name}).toArray().length; + var shard1NumChunks = configDB.chunks.find({shard: shard1Name}).toArray().length; + + assert.commandWorked( + st.s.adminCommand({moveChunk: "sharded.foo", find: {x: 1}, to: shard0Name})); + + var newShard0NumChunks = configDB.chunks.find({shard: shard0Name}).toArray().length; + var newShard1NumChunks = configDB.chunks.find({shard: shard1Name}).toArray().length; + assert.eq(newShard0NumChunks, shard0NumChunks + 1); + assert.eq(newShard1NumChunks, shard1NumChunks - 1); + + assert.commandWorked( + st.s.adminCommand({moveChunk: "sharded.foo", find: {x: 1}, to: shard1Name})); + + // shardCollection + assert.eq(null, configDB.collections.findOne({_id: "sharded.apple"})); + assert.commandWorked(st.s.adminCommand({shardCollection: "sharded.apple", key: {_id: 1}})); + assert.eq(1, configDB.collections.find({_id: "sharded.apple"}).toArray().length); + + // renameCollection + assert.commandWorked(st.s.adminCommand( + {renameCollection: "sharded.apple", to: "sharded.pear", dropTarget: true})); + assert.eq(null, configDB.collections.findOne({_id: "sharded.apple"})); + assert.eq(1, configDB.collections.find({_id: "sharded.pear"}).toArray().length); + + // drop a collection + assert.commandWorked(db.runCommand({drop: "pear"})); + assert.eq(null, configDB.collections.findOne({_id: "sharded.pear"})); + + // movePrimary + assert(configDB.databases.findOne({_id: "sharded", primary: shard0Name})); + + assert.commandWorked(st.s.adminCommand({movePrimary: "sharded", to: shard1Name})); + assert.eq(null, configDB.databases.findOne({_id: "sharded", primary: shard0Name})); + assert(configDB.databases.findOne({_id: "sharded", primary: shard1Name})); + + assert.commandWorked(st.s.adminCommand({movePrimary: "sharded", to: shard0Name})); + assert.eq(null, configDB.databases.findOne({_id: "sharded", primary: shard1Name})); + assert(configDB.databases.findOne({_id: "sharded", primary: shard0Name})); + + assert.commandWorked(db.foo.remove({x: 1}, true)); + assert.eq(null, db.foo.findOne()); +}; diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/analyze_shard_key_ttl_indexes_setFCV.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/analyze_shard_key_ttl_indexes_setFCV.js deleted file mode 100644 index 99b465d3bbfe7..0000000000000 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/analyze_shard_key_ttl_indexes_setFCV.js +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Tests that version upgrade creates the TTL indexes for config.sampledQueries and - * config.sampledQueriesDiff. - * - * @tags: [requires_fcv_70] - */ - -(function() { -"use strict"; - -load('./jstests/multiVersion/libs/multi_rs.js'); -load('./jstests/multiVersion/libs/multi_cluster.js'); -load("./jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); - -/** - * Verifies that a proper TTL index exists for the query sample collection - */ -function assertTTLIndexExists(node, collName, indexName) { - const configDB = node.getDB("config"); - let foundIndexSpec = undefined; - assert.soon(() => { - const indexSpecs = - assert.commandWorked(configDB.runCommand({"listIndexes": collName})).cursor.firstBatch; - for (var i = 0; i < indexSpecs.length; ++i) { - if (indexSpecs[i].name == indexName) { - foundIndexSpec = indexSpecs[i]; - return true; - } - } - return false; - }); - assert.eq(foundIndexSpec.key, {"expireAt": 1}); - assert.eq(foundIndexSpec.expireAfterSeconds, 0); -} - -function assertTTLIndexesExist(node) { - assertTTLIndexExists(node, "sampledQueries", "SampledQueriesTTLIndex"); - assertTTLIndexExists(node, "sampledQueriesDiff", "SampledQueriesDiffTTLIndex"); - assertTTLIndexExists(node, "analyzeShardKeySplitPoints", "AnalyzeShardKeySplitPointsTTLIndex"); -} - -for (let oldVersion of ["last-lts", "last-continuous"]) { - jsTest.log("Start testing with version " + oldVersion); - var st = new ShardingTest({ - shards: 1, - rs: {nodes: 2}, - mongos: 1, - other: { - mongosOptions: {binVersion: oldVersion}, - configOptions: {binVersion: oldVersion}, - shardOptions: {binVersion: oldVersion}, - rsOptions: {binVersion: oldVersion} - } - }); - st.configRS.awaitReplication(); - - //////// Upgrade to latest - - // Upgrade the config servers - jsTest.log('upgrading config servers'); - st.upgradeCluster("latest", {upgradeMongos: false, upgradeShards: false}); - // Restart mongos to clear all cache and force it to do remote calls. - st.restartMongoses(); - - // Upgrade the shards - jsTest.log('upgrading shard servers'); - st.upgradeCluster("latest", {upgradeMongos: false, upgradeConfigs: false}); - awaitRSClientHosts(st.s, st.rs0.getPrimary(), {ok: true, ismaster: true}); - // Restart mongos to clear all cache and force it to do remote calls. - st.restartMongoses(); - - assertTTLIndexesExist(st.rs0.getPrimary()); - - // Upgrade mongos - jsTest.log('upgrading mongos servers'); - st.upgradeCluster("latest", {upgradeConfigs: false, upgradeShards: false}); - // Restart mongos to clear all cache and force it to do remote calls. - st.restartMongoses(); - - assertTTLIndexesExist(st.rs0.getPrimary()); - - // Check that version document is unmodified. - version = st.s.getCollection('config.version').findOne(); - var clusterID = version.clusterId; - assert.eq(clusterID, version.clusterId); - - //////// Downgrade back - - jsTest.log('downgrading mongos servers'); - st.downgradeCluster(oldVersion, {downgradeConfigs: false, downgradeShards: false}); - // Restart mongos to clear all cache and force it to do remote calls. - st.restartMongoses(); - - assertTTLIndexesExist(st.rs0.getPrimary()); - - jsTest.log('downgrading shard servers'); - st.downgradeCluster(oldVersion, {downgradeMongos: false, downgradeConfigs: false}); - awaitRSClientHosts(st.s, st.rs0.getPrimary(), {ok: true, ismaster: true}); - // Restart mongos to clear all cache and force it to do remote calls. - st.restartMongoses(); - - for (let conn of [st.rs0.getPrimary(), st.rs0.getSecondary()]) { - assertTTLIndexesExist(conn); - } - - jsTest.log('downgrading config servers'); - st.downgradeCluster(oldVersion, {downgradeMongos: false, downgradeShards: false}); - // Restart mongos to clear all cache and force it to do remote calls. - st.restartMongoses(); - - // Check that version document is unmodified. - version = st.s.getCollection('config.version').findOne(); - assert.eq(clusterID, version.clusterId); - - jsTest.log("End testing with version " + oldVersion); - st.stop(); -} -})(); diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/cannot_downgrade_config_server_with_change_streams_images_collection_option.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/cannot_downgrade_config_server_with_change_streams_images_collection_option.js deleted file mode 100644 index d3bd581397430..0000000000000 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/cannot_downgrade_config_server_with_change_streams_images_collection_option.js +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Verifies a config server cannot downgrade with a collection with changeStreamPreAndPostImages - * enabled. - * - * @tags: [requires_fcv_70, featureFlagCatalogShard, featureFlagTransitionToCatalogShard] - */ -(function() { -"use strict"; - -const st = new ShardingTest({config: 1, shards: 1}); - -// A collection on a shard with changeStreamPreAndPostImages shouldn't impact downgrade. -const validShardNS = "foo.bar"; -assert.commandWorked(st.s.getCollection(validShardNS).insert({x: 1})); -assert.commandWorked( - st.s.getDB("foo").runCommand({collMod: "bar", changeStreamPreAndPostImages: {enabled: true}})); - -// A collection on the config server with changeStreamPreAndPostImages should prevent downgrade. The -// config server can only downgrade when in dedicated mode and in this mode the only user -// accessible collections on it are in the config and admin databases, which never allow this -// option, so we have to create a collection on a separate db via direct connection. -const directConfigNS = "directDB.onConfig"; -assert.commandWorked(st.configRS.getPrimary().getCollection(directConfigNS).insert({x: 1})); -assert.commandWorked(st.configRS.getPrimary().getDB("directDB").runCommand({ - collMod: "onConfig", - changeStreamPreAndPostImages: {enabled: true} -})); - -assert.commandFailedWithCode(st.s.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}), - ErrorCodes.CannotDowngrade); - -// Unset the option on the config server collection and now the config server can downgrade. -assert.commandWorked(st.configRS.getPrimary().getDB("directDB").runCommand({ - collMod: "onConfig", - changeStreamPreAndPostImages: {enabled: false} -})); - -assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); - -st.stop(); -})(); diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/compound_wildcard_indexes_downgrade.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/compound_wildcard_indexes_downgrade.js deleted file mode 100644 index 807c0de042868..0000000000000 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/compound_wildcard_indexes_downgrade.js +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Tests that we will fail on startup if the compound wildcard indexes were not removed before we - * downgrade from 7.0 to 'last-lts'. Downgrading FCV will allow continued use of a CWI as long as - * the version of mongod is still 7.0, but will disallow any new creation of a CWI. - * - * @tags: [ - * featureFlagCompoundWildcardIndexes, - * requires_fcv_70, - * ] - */ - -(function() { -'use strict'; - -load("jstests/libs/analyze_plan.js"); // For getPlanStages. - -const dbpath = MongoRunner.dataPath + 'compound_wildcard_indexes_downgrade'; -resetDbpath(dbpath); - -// If we have a CWI on the admin database, we want to make sure we can startup properly despite FCV -// not being initialized yet. It's possible to hit an invariant if featureFlag.isEnabled is called -// without checking fcv.isVersionInitialized. -const dbName = 'admin'; -const dbNameTest = "compound_wildcard_indexes_downgrade"; -const collName = 'compound_wildcard_indexes_downgrade'; - -const latestVersion = "latest"; -const lastLTSVersion = "last-lts"; - -const keyPattern = { - "a.$**": 1, - b: 1 -}; - -// Startup with latest, create a compound wildcard index, stop mongod. -{ - const conn = - MongoRunner.runMongod({dbpath: dbpath, binVersion: latestVersion, noCleanData: true}); - const db = conn.getDB(dbName); - const coll = db[collName]; - - assert.commandWorked(coll.createIndex(keyPattern)); - - assert.commandWorked(coll.insert({a: {c: 1}, b: 1})); - assert.commandWorked(coll.insert({a: 30, b: 20})); - - MongoRunner.stopMongod(conn); -} - -// Test that we are able to restart a mongod if there exists any CWI on the 'admin' DB and the FCV -// may not be initialized. -{ - const conn = - MongoRunner.runMongod({dbpath: dbpath, binVersion: latestVersion, noCleanData: true}); - const db = conn.getDB(dbName); - const coll = db[collName]; - - // Drop the CWI for downgrading. - assert.commandWorked(coll.dropIndex(keyPattern)); - - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: '6.0'})); - MongoRunner.stopMongod(conn); -} - -// A normal downgrade process should drop all CWI. Now there's no CWI, we should be able to start a -// last-lts mongod. -{ - const conn = - MongoRunner.runMongod({dbpath: dbpath, binVersion: lastLTSVersion, noCleanData: true}); - - MongoRunner.stopMongod(conn); -} - -// Tests on a regular database. Test that 1) FCV can be downgraded with the existence of CWI, 2) -// continued use of CWI after FCV downgraded, 3) cannot create more CWI, and 4) a downgraded mongod -// fails to start up if CWI is not removed. -{ - let conn = - MongoRunner.runMongod({dbpath: dbpath, binVersion: latestVersion, noCleanData: true}); - let db = conn.getDB(dbNameTest); - let coll = db[collName]; - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: '7.0'})); - - assert.commandWorked(coll.createIndex(keyPattern)); - - // Test that it succeeds to downgrade the FCV with the existence of CWI, but it should fail to - // start a mongod with the existence of a CWI. - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: '6.0'})); - - // Test that the CWI can still be used after FCV downgraded. - const exp = coll.find({"a.c": 1}).explain(); - const winningPlan = getWinningPlan(exp.queryPlanner); - const ixScans = getPlanStages(winningPlan, "IXSCAN"); - assert.gt(ixScans.length, 0, exp); - assert.docEq(ixScans[0].indexName, "a.$**_1_b_1", ixScans); - - // We cannot create more CWI if FCV is below 7.0. - assert.commandFailedWithCode(coll.createIndex({"b.$**": 1, c: 1}), - ErrorCodes.CannotCreateIndex); - - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: '7.0'})); - - // We can create more CWI if FCV is 7.0. - assert.commandWorked(coll.createIndex({"b.$**": 1, c: 1})); - - MongoRunner.stopMongod(conn); - - // To successfully downgrade a mongod, user must drop all CWI first. - assert.throws(() => MongoRunner.runMongod( - {dbpath: dbpath, binVersion: lastLTSVersion, noCleanData: true}), - [], - "MongoD should fail because wildcard indexes do not allow compounding"); - - // Start a "latest" mongod and drop all indexes to successfully downgrade the mongod. - conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: latestVersion, noCleanData: true}); - db = conn.getDB(dbNameTest); - coll = db[collName]; - coll.dropIndexes(); - - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: '6.0'})); - - MongoRunner.stopMongod(conn); - - // We can downgrade now as all indexes have been removed. - conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: lastLTSVersion, noCleanData: true}); - - MongoRunner.stopMongod(conn); -} -})(); diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/dbCheck_snapshotRead.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/dbCheck_snapshotRead.js deleted file mode 100644 index 729289bd9a5db..0000000000000 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/dbCheck_snapshotRead.js +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Ensure that a 6.0 version replicating a dbCheck oplog entry with the removed snapshotRead:false - * option does not crash when a 'latest' version receives the entry. - * - * @tags: [ - * requires_replication, - * ] - */ -(function() { -"use strict"; - -load('jstests/multiVersion/libs/multi_rs.js'); - -const nodes = { - // We want the 6.0 node to be the primary. - n1: {binVersion: "6.0", rsConfig: {priority: 1}}, - n2: {binVersion: "latest", rsConfig: {priority: 0}}, -}; - -const rst = new ReplSetTest({nodes: nodes}); -rst.startSet(); -rst.initiate(); - -const dbName = "test"; -const collName = jsTestName(); - -const primary = rst.getPrimary(); -const primaryDB = primary.getDB(dbName); -const coll = primaryDB[collName]; - -assert.commandWorked(coll.insert({a: 1})); - -// The 6.0 node will replicate the dbCheck oplog entry with the 'snapshotRead:false' option. This is -// not supported in recent versions and should be ignored, but not cause the node to crash. -assert.commandWorked(primaryDB.runCommand({"dbCheck": 1, snapshotRead: false})); - -rst.awaitReplication(); - -function dbCheckCompleted(db) { - return db.currentOp().inprog.filter(x => x["desc"] == "dbCheck")[0] === undefined; -} - -function forEachNode(f) { - f(rst.getPrimary()); - f(rst.getSecondary()); -} - -function awaitDbCheckCompletion(db) { - assert.soon(() => dbCheckCompleted(db), "dbCheck timed out"); - rst.awaitSecondaryNodes(); - rst.awaitReplication(); - - forEachNode(function(node) { - const healthlog = node.getDB('local').system.healthlog; - assert.soon(function() { - return (healthlog.find({"operation": "dbCheckStop"}).itcount() == 1); - }, "dbCheck command didn't complete"); - }); -} - -awaitDbCheckCompletion(primaryDB); - -{ - // The 6.0 primary should not report any errors. - const healthlog = primary.getDB('local').system.healthlog; - assert.eq(0, healthlog.find({severity: "error"}).itcount()); - assert.eq(0, healthlog.find({severity: "warning"}).itcount()); -} - -{ - // The latest secondary should log an error in the health log. - const secondary = rst.getSecondary(); - const healthlog = secondary.getDB('local').system.healthlog; - assert.eq(1, healthlog.find({severity: "error"}).itcount()); - assert.eq(0, healthlog.find({severity: "warning"}).itcount()); - const errorEntry = healthlog.findOne({severity: "error"}); - assert(errorEntry.hasOwnProperty('data'), tojson(errorEntry)); - assert.eq(false, errorEntry.data.success, tojson(errorEntry)); - assert(errorEntry.data.error.startsWith("Location6769502"), tojson(errorEntry)); -} - -rst.stopSet(); -})(); diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/fle2_range_downgrade.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/fle2_range_downgrade.js deleted file mode 100644 index c8928789d2d8b..0000000000000 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/fle2_range_downgrade.js +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Tests that the cluster cannot be downgraded when range encrypted fields present - * - * @tags: [ - * requires_fcv_61 - * ] - */ - -load("jstests/fle2/libs/encrypted_client_util.js"); - -(function() { -"use strict"; - -const rst = new ReplSetTest({nodes: 1}); -rst.startSet(); -rst.initiate(); -rst.awaitReplication(); - -let conn = rst.getPrimary(); -let db = conn.getDB("admin"); - -function runTest(targetFCV) { - assert.commandWorked(db.createCollection("basic", { - encryptedFields: { - "fields": [ - { - "path": "first", - "keyId": UUID("11d58b8a-0c6c-4d69-a0bd-70c6d9befae9"), - "bsonType": "int", - "queries": {"queryType": "rangePreview", "min": 1, "max": 2, "sparsity": 1} - }, - ] - } - })); - - let res = assert.commandFailedWithCode( - db.adminCommand({setFeatureCompatibilityVersion: targetFCV}), ErrorCodes.CannotDowngrade); - - assert(db.basic.drop()); - - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: targetFCV})); - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: latestFCV})); -} - -runTest(lastLTSFCV); -runTest(lastContinuousFCV); - -rst.stopSet(); -})(); diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/invalid_index_options.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/invalid_index_options.js deleted file mode 100644 index 8abc86d65ac27..0000000000000 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/invalid_index_options.js +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Tests that in 6.1 version listIndexes can parse invalid index specs created before 5.0 version. - * - * @tags: [requires_replication] - */ -(function() { -"use strict"; - -load('jstests/multiVersion/libs/multi_rs.js'); - -var nodes = { - n1: {binVersion: "4.4"}, - n2: {binVersion: "4.4"}, -}; - -var rst = new ReplSetTest({nodes: nodes}); -rst.startSet(); -rst.initiate(); - -const dbName = "test"; -const collName = jsTestName(); - -let primaryDB = rst.getPrimary().getDB(dbName); -let primaryColl = primaryDB.getCollection(collName); - -// In earlier versions, users were able to add invalid index options when creating an index. The -// option could still be interpreted accordingly. -assert.commandWorked(primaryColl.createIndex({x: 1}, {sparse: "yes"})); - -// Upgrades from 4.4 to 5.0. -jsTestLog("Upgrading to version 5.0"); -rst.upgradeSet({binVersion: "5.0"}); -assert.commandWorked(rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: "5.0"})); - -// Upgrades from 5.0 to 6.0. -jsTestLog("Upgrading to version last-lts"); -rst.upgradeSet({binVersion: "last-lts"}); -assert.commandWorked(rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); - -// Upgrades from 6.0 to latest. -jsTestLog("Upgrading to version latest"); -rst.upgradeSet({binVersion: "latest"}); -const primary = rst.getPrimary(); -assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - -primaryDB = primary.getDB(dbName); - -// Verify listIndexes command can correctly output the repaired index specs. -assert.commandWorked(primaryDB.runCommand({listIndexes: collName})); - -// Add a new node to make sure the initial sync works correctly with the invalid index specs. -jsTestLog("Bringing up a new node"); -rst.add(); -rst.reInitiate(); - -jsTestLog("Waiting for new node to be synced."); -rst.awaitReplication(); -rst.awaitSecondaryNodes(); - -const [secondary1, secondary2] = rst.getSecondaries(); -const secondaryDB1 = secondary1.getDB(dbName); -const secondaryDB2 = secondary2.getDB(dbName); - -// Verify that the existing nodes detect invalid index options, but the new node has the repaired -// index spec. -let validateRes = assert.commandWorked(primaryDB.runCommand({validate: collName})); -assert(!validateRes.valid, "validate should fail: " + tojson(validateRes)); - -validateRes = assert.commandWorked(secondaryDB1.runCommand({validate: collName})); -assert(!validateRes.valid, "validate should fail: " + tojson(validateRes)); - -validateRes = assert.commandWorked(secondaryDB2.runCommand({validate: collName})); -assert(validateRes.valid, "validate should succeed: " + tojson(validateRes)); - -// Use collMod to fix the invalid index options in the collection. -assert.commandWorked(primaryDB.runCommand({collMod: collName})); - -// Fix the invalid fields from index spec. -checkLog.containsJson(primary, 6444400, {fieldName: "sparse"}); -checkLog.containsJson(secondary1, 6444400, {fieldName: "sparse"}); - -// Verify that the index no longer has invalid index options. -assert.commandWorked(primaryDB.runCommand({listIndexes: collName})); - -validateRes = assert.commandWorked(primaryDB.runCommand({validate: collName})); -assert(validateRes.valid, "validate should succeed: " + tojson(validateRes)); - -validateRes = assert.commandWorked(secondaryDB1.runCommand({validate: collName})); -assert(validateRes.valid, "validate should succeed: " + tojson(validateRes)); - -validateRes = assert.commandWorked(secondaryDB2.runCommand({validate: collName})); -assert(validateRes.valid, "validate should succeed: " + tojson(validateRes)); - -rst.stopSet(); -})(); \ No newline at end of file diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/partial_indexes_downgrade.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/partial_indexes_downgrade.js deleted file mode 100644 index 95eb03bfeb514..0000000000000 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/partial_indexes_downgrade.js +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Tests that we don't fail the FCV check for partial indexes on the admin database during startup. - */ - -(function() { -'use strict'; - -const dbpath = MongoRunner.dataPath + 'partial_indexes_downgrade'; -resetDbpath(dbpath); - -// If we have a partial index on the admin database, we want to make sure we can startup properly -// despite FCV not being initialized yet. It's possible to hit an invariant if featureFlag.isEnabled -// is called without checking fcv.isVersionInitialized (see SERVER-71068 for more details). -const dbName = 'admin'; -const collName = 'partial_indexes_downgrade'; - -// Startup with latest, create partial index, stop mongod. -{ - const conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: 'latest', noCleanData: true}); - const db = conn.getDB(dbName); - const coll = db[collName]; - - assert.commandWorked(coll.createIndex( - {a: 1, b: 1}, {partialFilterExpression: {$or: [{a: {$lt: 20}}, {b: {$lt: 10}}]}})); - - assert.commandWorked(coll.insert({a: 1, b: 1})); - assert.commandWorked(coll.insert({a: 30, b: 20})); - - MongoRunner.stopMongod(conn); -} - -// Startup with latest again, to make sure we're not checking FCV for this index at startup. -{ - const conn = MongoRunner.runMongod({dbpath: dbpath, binVersion: 'latest', noCleanData: true}); - const db = conn.getDB(dbName); - const coll = db[collName]; - - // Make sure we are on the same db path as before. - assert.eq(coll.aggregate().toArray().length, 2); - - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); - MongoRunner.stopMongod(conn); -} -})(); diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/recordPreImages_option_upgrade.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/recordPreImages_option_upgrade.js deleted file mode 100644 index 6f86dcb202c5a..0000000000000 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/recordPreImages_option_upgrade.js +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Verifies that the server ignores collection option "recordPreImages" on binary upgrade from the - * last LTS version to the current, as well as removes the option from collection attributes on - * FCV upgrade. - */ -(function() { -"use strict"; -load('jstests/multiVersion/libs/multi_rs.js'); - -const lastLTSVersion = "last-lts"; -const latestVersion = "latest"; - -// Setup a two-node replica set with last LTS binaries, so it is possible to create a collection -// with "recordPreImages" option. -const rst = new ReplSetTest( - {name: jsTestName(), nodes: [{binVersion: lastLTSVersion}, {binVersion: lastLTSVersion}]}); -rst.startSet(); -rst.initiate(); -const testDB = rst.getPrimary().getDB("test"); -const primaryNode = rst.getPrimary(); -const secondaryNode = rst.getSecondary(); - -// Create the collection. -const collectionName = "coll"; -assert.commandWorked(testDB.createCollection(collectionName, {recordPreImages: true})); -let coll = testDB[collectionName]; - -// Insert a test document which will be updated to trigger recording of change stream pre-images. -assert.commandWorked(coll.insert({_id: 1, a: 1})); -assert.commandWorked(coll.updateOne({_id: 1}, {$inc: {a: 1}})); -rst.awaitReplication(); - -// Upgrade the binary of the secondary node to the current version to setup a mixed binary cluster. -rst.upgradeMembers([secondaryNode], {binVersion: latestVersion}); - -// Make sure the primary node did not change. -rst.stepUp(primaryNode); - -// Verify that recording of change stream pre-images succeeds. -assert.commandWorked(coll.updateOne({_id: 1}, {$inc: {a: 1}})); -rst.awaitReplication(); - -// Finally upgrade the binary of the primary node to the current version. -rst.upgradePrimary(rst.getPrimary(), {binVersion: latestVersion}); - -// Update a document on the collection with inactive "recordPreImages" collection option. -coll = rst.getPrimary().getDB("test")[collectionName]; -assert.commandWorked(coll.updateOne({_id: 1}, {$inc: {a: 1}})); -rst.awaitReplication(); - -// Upgrade the FCV to the latest to trigger removal of "recordPreImages" collection option from -// persistent catalog entries. -assert.commandWorked(rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: latestFCV})); - -// To check the collection options, downgrade FCV to later replace the binary of the server with -// the last LTS binary version. -assert.commandWorked(rst.getPrimary().adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); -rst.upgradeSet({binVersion: lastLTSVersion}); - -// Verify that collection option "recordPreImages" was removed. -const result = - assert.commandWorked(rst.getPrimary().getDB("test").runCommand({listCollections: 1})); -assert.eq(result.cursor.firstBatch[0].name, collectionName); -assert.docEq( - {}, - result.cursor.firstBatch[0].options, - `Collection option "recordPreImages" was not removed. Got response: ${tojson(result)}`); -rst.stopSet(); -})(); \ No newline at end of file diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_capped_collection.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_capped_collection.js deleted file mode 100644 index 2a921cfaf1c1f..0000000000000 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_capped_collection.js +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Test to ensure that: - * 1. The FCV cannot be downgraded to 6.0 if there are capped collections with a size - * that is non multiple of 256 bytes. - * 2. The FCV can be set back to upgraded if feature flag DowngradingToUpgrading is true. - * - * @tags: [requires_fcv_70] - */ -(function() { -"use strict"; - -load('jstests/libs/collection_drop_recreate.js'); -load("jstests/libs/feature_flag_util.js"); - -const latest = "latest"; -const dbName = "test_set_fcv_capped_collection"; -const collName = "capped_collection"; -const cappedCollOptions = { - capped: true, - size: 5242881, - max: 5000, -}; - -function checkFCVDowngradeUpgrade(db, adminDB) { - let runDowngradingToUpgrading = false; - if (FeatureFlagUtil.isEnabled(adminDB, "DowngradingToUpgrading")) { - runDowngradingToUpgrading = true; - } - - jsTest.log("Create a relaxed size capped collection and attempt to setFCV to lastLTS"); - checkFCV(adminDB, latestFCV); - assertCreateCollection(db, collName, cappedCollOptions); - assert.commandFailedWithCode(adminDB.runCommand({setFeatureCompatibilityVersion: lastLTSFCV}), - ErrorCodes.CannotDowngrade); - - // Check FCV is in downgrading state. - checkFCV(adminDB, lastLTSFCV, lastLTSFCV); - - if (runDowngradingToUpgrading) { - jsTest.log("Set FCV back to latest"); - assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV})); - checkFCV(adminDB, latestFCV); - - // Confirm the capped collection is not affected. - const res = db[collName].stats(); - assert.eq(res.capped, cappedCollOptions.capped); - assert.eq(res.maxSize, cappedCollOptions.size); - } - - assertDropCollection(db, collName); -} - -function runStandaloneTest() { - jsTest.log("Start Standalone test"); - const conn = MongoRunner.runMongod({binVersion: latest}); - const db = conn.getDB(dbName); - const adminDB = conn.getDB("admin"); - - checkFCVDowngradeUpgrade(db, adminDB); - - MongoRunner.stopMongod(conn); -} - -function runReplicaSetTest() { - jsTest.log("Start Replica Set test"); - const rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}}); - rst.startSet(); - rst.initiate(); - const db = rst.getPrimary().getDB(dbName); - const adminDB = rst.getPrimary().getDB("admin"); - - checkFCVDowngradeUpgrade(db, adminDB); - - rst.stopSet(); -} - -function runShardingTest() { - jsTest.log("Start Sharding test"); - const st = new ShardingTest({shards: 2, mongos: 1, config: 1}); - const db = st.s.getDB(dbName); - const adminDB = st.s.getDB("admin"); - - checkFCVDowngradeUpgrade(db, adminDB); - - st.stop(); -} - -runStandaloneTest(); -runReplicaSetTest(); -runShardingTest(); -})(); diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_encrypted_field_collection.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_encrypted_field_collection.js deleted file mode 100644 index 125e6c7dbd7fe..0000000000000 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_encrypted_field_collection.js +++ /dev/null @@ -1,83 +0,0 @@ -/** - * Test to ensure that: - * 1. The FCV cannot be downgraded to 6.0 if there are queryable range encryption indexes. - * 2. The FCV can be set back to upgraded if feature flag DowngradingToUpgrading is true. - * - * @tags: [requires_fcv_70] - */ -(function() { -"use strict"; - -load('jstests/libs/collection_drop_recreate.js'); -load("jstests/libs/feature_flag_util.js"); - -const latest = "latest"; -const dbName = "test_set_fcv_encrypted_field"; -const collName = "encrypted"; -const encryptedFieldsOption = { - encryptedFields: { - fields: [{ - path: "firstName", - keyId: UUID("11d58b8a-0c6c-4d69-a0bd-70c6d9befae9"), - bsonType: "int", - queries: {queryType: "rangePreview", sparsity: 1, min: NumberInt(1), max: NumberInt(2)} - }] - } -}; - -function checkFCVDowngradeUpgrade(db, adminDB) { - let runDowngradingToUpgrading = false; - if (FeatureFlagUtil.isEnabled(adminDB, "DowngradingToUpgrading")) { - runDowngradingToUpgrading = true; - } - - jsTest.log("Create a encrypted field collection and attempt to setFCV to lastLTS"); - checkFCV(adminDB, latestFCV); - assertCreateCollection(db, collName, encryptedFieldsOption); - assert.commandFailedWithCode(adminDB.runCommand({setFeatureCompatibilityVersion: lastLTSFCV}), - ErrorCodes.CannotDowngrade); - - // Check FCV is in downgrading state. - checkFCV(adminDB, lastLTSFCV, lastLTSFCV); - - if (runDowngradingToUpgrading) { - jsTest.log("Set FCV back to latest"); - assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV})); - checkFCV(adminDB, latestFCV); - - // Check encryptedField is unaffected. - const res = db.getCollectionInfos({name: collName}); - assert.eq(res[0].options.encryptedFields.fields[0].queries.queryType, - encryptedFieldsOption.encryptedFields.fields[0].queries.queryType); - } - - assertDropCollection(db, collName); -} - -function runReplicaSetTest() { - jsTest.log("Start Replica Set test"); - const rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}}); - rst.startSet(); - rst.initiate(); - const db = rst.getPrimary().getDB(dbName); - const adminDB = rst.getPrimary().getDB("admin"); - - checkFCVDowngradeUpgrade(db, adminDB); - - rst.stopSet(); -} - -function runShardingTest() { - jsTest.log("Start Sharding test"); - const st = new ShardingTest({shards: 2, mongos: 1, config: 1}); - const db = st.s.getDB(dbName); - const adminDB = st.s.getDB("admin"); - - checkFCVDowngradeUpgrade(db, adminDB); - - st.stop(); -} - -runReplicaSetTest(); -runShardingTest(); -})(); diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_partial_ttl_index_on_timeseries.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_partial_ttl_index_on_timeseries.js deleted file mode 100644 index 1233db7478588..0000000000000 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_downgrading_to_upgraded_with_partial_ttl_index_on_timeseries.js +++ /dev/null @@ -1,106 +0,0 @@ -/** - * Test to ensure that: - * 1. The FCV cannot be downgraded to 6.0 if there are timeseries collections with - * partial TTL index. - * 2. The FCV can be set back to upgraded if feature flag DowngradingToUpgrading is true. - * - * @tags: [requires_fcv_70] - */ -(function() { -"use strict"; - -load('jstests/libs/collection_drop_recreate.js'); -load("jstests/libs/feature_flag_util.js"); - -const latest = "latest"; -const dbName = "test_set_fcv_partial_ttl_index"; -const collName = "timeseries"; -const timeFieldName = "tm"; -const metaFieldName = "mm"; -const timeseriesOptions = { - timeseries: { - timeField: timeFieldName, - metaField: metaFieldName, - } -}; -const ttlIndexSpec = { - [timeFieldName]: 1, -}; -const ttlIndexOptions = { - expireAfterSeconds: 3600, - partialFilterExpression: { - [metaFieldName]: { - $gt: 5, - } - } -}; - -function checkFCVDowngradeUpgrade(db, adminDB) { - let runDowngradingToUpgrading = false; - if (FeatureFlagUtil.isEnabled(adminDB, "DowngradingToUpgrading")) { - runDowngradingToUpgrading = true; - } - - jsTest.log( - "Create a partial TTL index on timeseries collection and attempt to setFCV to lastLTS"); - checkFCV(adminDB, latestFCV); - assertCreateCollection(db, collName, timeseriesOptions); - assert.commandWorked(db[collName].createIndex(ttlIndexSpec, ttlIndexOptions)); - assert.commandFailed(adminDB.runCommand({setFeatureCompatibilityVersion: lastLTSFCV})); - - // Check FCV is in downgrading state. - checkFCV(adminDB, lastLTSFCV, lastLTSFCV); - - if (runDowngradingToUpgrading) { - jsTest.log("Set FCV back to latest"); - assert.commandWorked(adminDB.runCommand({setFeatureCompatibilityVersion: latestFCV})); - checkFCV(adminDB, latestFCV); - - // Confirm the partial TTL index is not affected. - const res = db[collName].getIndexes(); - assert.eq(res[1].expireAfterSeconds, ttlIndexOptions.expireAfterSeconds); - assert.eq(res[1].partialFilterExpression, ttlIndexOptions.partialFilterExpression); - } - - assertDropCollection(db, collName); -} - -function runStandaloneTest() { - jsTest.log("Start Standalone test"); - const conn = MongoRunner.runMongod({binVersion: latest}); - const db = conn.getDB(dbName); - const adminDB = conn.getDB("admin"); - - checkFCVDowngradeUpgrade(db, adminDB); - - MongoRunner.stopMongod(conn); -} - -function runReplicaSetTest() { - jsTest.log("Start Replica Set test"); - const rst = new ReplSetTest({nodes: 2, nodeOpts: {binVersion: latest}}); - rst.startSet(); - rst.initiate(); - const db = rst.getPrimary().getDB(dbName); - const adminDB = rst.getPrimary().getDB("admin"); - - checkFCVDowngradeUpgrade(db, adminDB); - - rst.stopSet(); -} - -function runShardingTest() { - jsTest.log("Start Sharding test"); - const st = new ShardingTest({shards: 2, mongos: 1, config: 1}); - const db = st.s.getDB(dbName); - const adminDB = st.s.getDB("admin"); - - checkFCVDowngradeUpgrade(db, adminDB); - - st.stop(); -} - -runStandaloneTest(); -runReplicaSetTest(); -runShardingTest(); -})(); diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_prevent_invalid_downgrade_with_catalog_shard.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_prevent_invalid_downgrade_with_catalog_shard.js deleted file mode 100644 index 10e08d3b98c27..0000000000000 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/set_fcv_prevent_invalid_downgrade_with_catalog_shard.js +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Test to ensure that: - * 1. The FCV cannot be downgraded to a version that does not have catalog shards if catalog - * shard is enabled. - * 2. If the FCV does get downgraded to a version that does not support catalog shards, a - * catalog shard cannot be created (this can occur if an FCV downgrade happens concurrently - * with the creation of a catalog shard). - * - * @tags: [requires_fcv_70, featureFlagCatalogShard, featureFlagTransitionToCatalogShard] - */ -(function() { -"use strict"; - -// TODO (SERVER-74534): Enable the metadata consistency check when it will work with co-located -// configsvr. -TestData.skipCheckMetadataConsistency = true; - -load("jstests/libs/catalog_shard_util.js"); - -const shardedNs = "foo.bar"; -const unshardedNs = "unsharded_foo.unsharded_bar"; - -function basicCRUD(conn, ns) { - assert.commandWorked( - conn.getCollection(ns).insert([{_id: 1, x: 1, skey: -1000}, {_id: 2, skey: 1000}])); - assert.sameMembers(conn.getCollection(ns).find().toArray(), - [{_id: 1, x: 1, skey: -1000}, {_id: 2, skey: 1000}]); - assert.commandWorked(conn.getCollection(ns).remove({x: 1})); - assert.commandWorked(conn.getCollection(ns).remove({skey: 1000})); - assert.eq(conn.getCollection(ns).find().toArray().length, 0); -} - -let splitPoint = 0; -function basicShardedDDL(conn, ns) { - assert.commandWorked(conn.adminCommand({split: ns, middle: {skey: splitPoint}})); - splitPoint += 10; -} - -const st = new ShardingTest({shards: 2, catalogShard: true, other: {enableBalancer: true}}); -const mongosAdminDB = st.s.getDB("admin"); - -assert.commandWorked(st.s.adminCommand({shardCollection: shardedNs, key: {skey: 1}})); - -function runTest(targetFCV) { - jsTest.log("Downgrading FCV to an unsupported version when catalogShard is enabled."); - - const errRes = assert.commandFailedWithCode( - mongosAdminDB.runCommand({setFeatureCompatibilityVersion: targetFCV}), - ErrorCodes.CannotDowngrade); - assert.eq(errRes.errmsg, - `Cannot downgrade featureCompatibilityVersion to ${targetFCV} with a catalog shard as it is not supported in earlier versions. Please transition the config server to dedicated mode using the transitionToDedicatedConfigServer command.`); - - // The downgrade fails and should not start the downgrade process on any cluster node. - const configRes = - st.config0.getDB("admin").runCommand({getParameter: 1, featureCompatibilityVersion: 1}); - assert(configRes.featureCompatibilityVersion); - assert.eq(configRes.featureCompatibilityVersion.version, latestFCV); - - const shardRes = - st.shard1.getDB("admin").runCommand({getParameter: 1, featureCompatibilityVersion: 1}); - assert(shardRes.featureCompatibilityVersion); - assert.eq(shardRes.featureCompatibilityVersion.version, latestFCV); - - // The catalog shard's data can still be accessed. - basicCRUD(st.s, shardedNs); - basicShardedDDL(st.s, shardedNs); - basicCRUD(st.s, unshardedNs); - - // Remove the catalog shard and verify we can now downgrade. - CatalogShardUtil.transitionToDedicatedConfigServer(st); - assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: targetFCV})); - - jsTest.log("Attempting to create a catalogShard on an unsupported FCV."); - - assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: targetFCV})); - assert.commandFailedWithCode(mongosAdminDB.runCommand({transitionToCatalogShard: 1}), 7467202); - - // Upgrade and transition back to catalog shard mode for the next test. - assert.commandWorked(mongosAdminDB.runCommand({setFeatureCompatibilityVersion: latestFCV})); - assert.commandWorked(mongosAdminDB.runCommand({transitionToCatalogShard: 1})); - - basicCRUD(st.s, shardedNs); - basicShardedDDL(st.s, shardedNs); - basicCRUD(st.s, unshardedNs); -} - -runTest(lastLTSFCV); -runTest(lastContinuousFCV); - -st.stop(); -})(); diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/timeseries_index.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/timeseries_index.js deleted file mode 100644 index a6bca60df1a38..0000000000000 --- a/jstests/multiVersion/targetedTestsLastLtsFeatures/timeseries_index.js +++ /dev/null @@ -1,24 +0,0 @@ -/** - * Tests that time-series measurement indexes can be created in FCV 6.0. - */ -(function() { -"use strict"; - -const rst = new ReplSetTest({nodes: 1}); -rst.startSet(); -rst.initiate(); - -const primary = rst.getPrimary(); - -const dbName = "test"; -const collName = "coll"; - -const db = primary.getDB(dbName); - -assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); -assert.commandWorked(db.createCollection(collName, {timeseries: {timeField: "t", metaField: "m"}})); -assert.commandWorked(db.coll.insert({t: ISODate(), m: 1})); -assert.commandWorked(db.coll.createIndex({a: 1, t: 1})); - -rst.stopSet(); -}()); diff --git a/jstests/multiVersion/targetedTestsLastLtsFeatures/timeseries_out_error.js b/jstests/multiVersion/targetedTestsLastLtsFeatures/timeseries_out_error.js new file mode 100644 index 0000000000000..62a7a6a16ac17 --- /dev/null +++ b/jstests/multiVersion/targetedTestsLastLtsFeatures/timeseries_out_error.js @@ -0,0 +1,86 @@ +/** + * Tests that $out errors when trying to write to time-series collections on older server versions. + * $out with the 'timeseries' option should only succeed if the FCV >= 7.1. + */ + +(function() { +"use strict"; + +load('./jstests/multiVersion/libs/multi_cluster.js'); // for upgradeCluster. + +const st = new ShardingTest({ + shards: 2, + rs: {nodes: 2}, + mongos: 1, + other: { + mongosOptions: {binVersion: "last-lts"}, + configOptions: {binVersion: "last-lts"}, + shardOptions: {binVersion: "last-lts"}, + rsOptions: {binVersion: "last-lts"} + } +}); +st.configRS.awaitReplication(); + +const dbName = "test"; +const testDB = st.s.getDB(dbName); +let coll = testDB["coll"]; +let tColl = testDB["timeseries"]; +coll.drop(); +tColl.drop(); + +// set up a source collection and a time-series target collection. +assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); +assert.commandWorked(coll.insert({t: ISODate(), m: 1})); +assert.commandWorked(testDB.createCollection(tColl.getName(), {timeseries: {timeField: "t"}})); +assert.commandWorked(tColl.insert({t: ISODate(), m: 1})); + +// assert aggregate succeeds with no 'timeseries' option. +let pipeline = [{$out: "out"}]; +assert.doesNotThrow(() => coll.aggregate(pipeline)); +assert.eq(1, testDB["out"].find().itcount()); + +// assert aggregate fails with the original error with the 'timeseries' option. +pipeline = [{$out: {coll: "out_time", db: dbName, timeseries: {timeField: "t"}}}]; +assert.throwsWithCode(() => coll.aggregate(pipeline), 16994); + +// assert aggregate fails if trying to write to a time-series collection without the 'timeseries' +// option. +let replacePipeline = [{$out: tColl.getName()}]; +assert.throwsWithCode(() => coll.aggregate(replacePipeline), ErrorCodes.InvalidOptions); + +// upgrade the shards. +jsTestLog('upgrading the shards.'); +st.upgradeCluster("latest", {upgradeMongos: false, upgradeConfigs: false}); +awaitRSClientHosts(st.s, st.rs0.getPrimary(), {ok: true, ismaster: true}); +// assert aggregate fails with the original error with the 'timeseries' option. +assert.throwsWithCode(() => coll.aggregate(pipeline), 16994); +// assert aggregate fails if trying to write to a time-series collection without the 'timeseries' +// option. +assert.throwsWithCode(() => coll.aggregate(replacePipeline), 7406100); + +// upgrade the config server and mongos. +jsTestLog('upgrading the config server and mongos.'); +st.upgradeCluster("latest", {upgradeShards: false, upgradeMongos: true, upgradeConfigs: true}); +let mongosConn = st.s; +coll = mongosConn.getDB(dbName)["coll"]; +// assert aggregate fails with an updated error with the 'timeseries' option. +assert.throwsWithCode(() => coll.aggregate(pipeline), 7406100); // new error code. +// assert aggregate fails if trying to write to a time-series collection without the 'timeseries' +// option. +assert.throwsWithCode(() => coll.aggregate(replacePipeline), 7406100); + +// upgrade the FCV version +jsTestLog('upgrading the FCV version.'); +assert.commandWorked(mongosConn.adminCommand({setFeatureCompatibilityVersion: latestFCV})); +// assert aggregate with 'timeseries' succeeds. +assert.doesNotThrow(() => coll.aggregate(pipeline)); +let resultColl = mongosConn.getDB(dbName)["out_time"]; +assert.eq(1, resultColl.find().itcount()); + +// assert aggregate replacing a time-series collection without 'timeseries' succeeds. +assert.doesNotThrow(() => coll.aggregate(replacePipeline)); +resultColl = mongosConn.getDB(dbName)["timeseries"]; +assert.eq(1, resultColl.find().itcount()); + +st.stop(); +}()); diff --git a/jstests/noPassthrough/agg_collstats_expr.js b/jstests/noPassthrough/agg_collstats_expr.js index 45f98af720d9c..3d61cdde9d262 100644 --- a/jstests/noPassthrough/agg_collstats_expr.js +++ b/jstests/noPassthrough/agg_collstats_expr.js @@ -25,10 +25,7 @@ function getShardCount(counts, shardName) { * on the i-th shard or no chunks assigned to that shard if shardDistribution[i] is null. */ function runShardingTestExists(shardDistribution) { - const st = ShardingTest({ - shards: shardDistribution.length, - setParameter: {receiveChunkWaitForRangeDeleterTimeoutMS: 90000} - }); + const st = ShardingTest({shards: shardDistribution.length}); const mongos = st.s0; const admin = mongos.getDB("admin"); diff --git a/jstests/noPassthrough/agg_group.js b/jstests/noPassthrough/agg_group.js index 654de425e79f1..e19f0cd7308d9 100644 --- a/jstests/noPassthrough/agg_group.js +++ b/jstests/noPassthrough/agg_group.js @@ -11,11 +11,6 @@ // partial aggregation results in a special format to the mongos. // // @tags: [requires_sharding] -(function() { -'use strict'; - -load("jstests/libs/analyze_plan.js"); - const st = new ShardingTest({config: 1, shards: 1}); // This database name can provide multiple similar test cases with a good separate namespace and @@ -108,5 +103,4 @@ assertShardedGroupResultsMatch(coll, [{$group: {_id: "$item", a: {$avg: "$price" // Verifies that SBE group pushdown with sharded $avg works for missing data. assertShardedGroupResultsMatch(coll, [{$group: {_id: "$item", a: {$avg: "$missing"}}}]); -st.stop(); -}()); +st.stop(); \ No newline at end of file diff --git a/jstests/noPassthrough/aggregation_out_on_secondary.js b/jstests/noPassthrough/aggregation_out_on_secondary.js index 2279b9f9f10ae..68862925078e1 100644 --- a/jstests/noPassthrough/aggregation_out_on_secondary.js +++ b/jstests/noPassthrough/aggregation_out_on_secondary.js @@ -68,4 +68,4 @@ const primaryProfile = assert.eq(1, primaryProfile); rs.stopSet(); -}()); \ No newline at end of file +}()); diff --git a/jstests/noPassthrough/analyze_command.js b/jstests/noPassthrough/analyze_command.js index 122bca9fc1338..f702bddf1bc47 100644 --- a/jstests/noPassthrough/analyze_command.js +++ b/jstests/noPassthrough/analyze_command.js @@ -1,7 +1,4 @@ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod({setParameter: {featureFlagCommonQueryFramework: true}}); assert.neq(null, conn, "mongod was unable to start up"); @@ -11,11 +8,11 @@ const db = conn.getDB(jsTestName()); if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE is not enabled"); MongoRunner.stopMongod(conn); - return; + quit(); } assert.commandWorked( - db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"})); + db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"})); const coll = db.cqf_analyze; const syscoll = db.system.statistics.cqf_analyze; @@ -179,4 +176,3 @@ assert.eq(100, syscoll.find({_id: "a"})[0].statistics.scalarHistogram.buckets.le cleanup(); MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/and_hash.js b/jstests/noPassthrough/and_hash.js index ba96d11ff16a1..7c7b27b45ccaa 100644 --- a/jstests/noPassthrough/and_hash.js +++ b/jstests/noPassthrough/and_hash.js @@ -1,9 +1,6 @@ // Tests for whether the query solution correctly used an AND_HASH for index intersection. -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For assertArrayEq. -load("jstests/libs/analyze_plan.js"); // For planHasStage helper to analyze explain() output. +import {getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js"; const conn = MongoRunner.runMongod(); const db = conn.getDB("test"); @@ -136,5 +133,4 @@ assertAndHashUsed({ shouldUseAndHash: true }); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/and_sorted.js b/jstests/noPassthrough/and_sorted.js index d5b64cbda8ec2..137cd8393c118 100644 --- a/jstests/noPassthrough/and_sorted.js +++ b/jstests/noPassthrough/and_sorted.js @@ -1,9 +1,6 @@ // Tests for whether the query solution correctly used an AND_SORTED stage for index intersection. -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For assertArrayEq. -load("jstests/libs/analyze_plan.js"); // For planHasStage helper to analyze explain() output. +import {getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js"; const conn = MongoRunner.runMongod(); const db = conn.getDB("test"); @@ -151,5 +148,4 @@ runAndSortedTests(); assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1})); runAndSortedTests(); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/arithmetic_expression_constant_folding.js b/jstests/noPassthrough/arithmetic_expression_constant_folding.js index da298ab843429..14319ba2a8c57 100644 --- a/jstests/noPassthrough/arithmetic_expression_constant_folding.js +++ b/jstests/noPassthrough/arithmetic_expression_constant_folding.js @@ -89,16 +89,9 @@ function runRandomizedPropertyTest({op, min, max}) { assertPipelineCorrect(pipeline, v); } -// TODO: SERVER-67282 Randomized property testing should work after SBE is updated to match classic -// engine, so remove this setParameter. When this knob is removed from this test, move this test -// into jstests/aggregation/expressions/arithmetic_constant_folding.js. -testDB.adminCommand({setParameter: 1, internalQueryFrameworkControl: "forceClassicEngine"}); for (let i = 0; i < 5; i++) { runRandomizedPropertyTest({op: "$add", min: -314159255, max: 314159255}); runRandomizedPropertyTest({op: "$multiply", min: -31415, max: 31415}); } -// TODO: SERVER-67282 Randomized property testing should work after SBE is updated to match classic -// engine, so remove this setParameter. -testDB.adminCommand({setParameter: 1, internalQueryFrameworkControl: "trySbeEngine"}); MongoRunner.stopMongod(conn); })(); diff --git a/jstests/noPassthrough/atomic_rename_collection.js b/jstests/noPassthrough/atomic_rename_collection.js index cdc7e336c9149..1e58be4eadd86 100644 --- a/jstests/noPassthrough/atomic_rename_collection.js +++ b/jstests/noPassthrough/atomic_rename_collection.js @@ -41,7 +41,7 @@ tests.forEach((test) => { dropTarget: true }; assert.commandWorked(local.adminCommand(cmd), tojson(cmd)); - ops = + let ops = local.oplog.rs.find({ts: {$gt: ts}, ns: {'$regex': dbregex}}).sort({$natural: 1}).toArray(); assert.eq(ops.length, test.expectedOplogEntries, diff --git a/jstests/noPassthrough/auto_safe_reconfig_helper_max_voting_nodes.js b/jstests/noPassthrough/auto_safe_reconfig_helper_max_voting_nodes.js index 534abcf611eea..b8227d0267713 100644 --- a/jstests/noPassthrough/auto_safe_reconfig_helper_max_voting_nodes.js +++ b/jstests/noPassthrough/auto_safe_reconfig_helper_max_voting_nodes.js @@ -11,15 +11,6 @@ load("jstests/replsets/rslib.js"); -function waitAllNodesHaveConfig(replTest, config) { - replTest.nodes.forEach(function(node) { - assert.soon(function() { - const nodeConfig = replTest.getReplSetConfigFromNode(node.nodeId); - return isSameConfigContent(config, nodeConfig); - }); - }); -} - // Make secondaries unelectable. Add 7 voting nodes, which is the maximum allowed. const replTest = new ReplSetTest({ nodes: [ diff --git a/jstests/noPassthrough/auto_safe_reconfig_helpers.js b/jstests/noPassthrough/auto_safe_reconfig_helpers.js index fe3e8b7b62531..812fc16cdc9b0 100644 --- a/jstests/noPassthrough/auto_safe_reconfig_helpers.js +++ b/jstests/noPassthrough/auto_safe_reconfig_helpers.js @@ -185,5 +185,8 @@ assertSameConfigContent(replTest.getReplSetConfigFromNode(), config); // Restore the original config before shutting down. reconfig(replTest, origConfig); +// There is a chance that some nodes haven't finished reconfig, if we directly call stopSet, those +// nodes may fail to answer certain commands and fail the test. +waitAllNodesHaveConfig(replTest, config); replTest.stopSet(); })(); diff --git a/jstests/noPassthrough/background_validation_checkpoint_existence.js b/jstests/noPassthrough/background_validation_checkpoint_existence.js index 438278b9aeaec..f3cf1afc450ef 100644 --- a/jstests/noPassthrough/background_validation_checkpoint_existence.js +++ b/jstests/noPassthrough/background_validation_checkpoint_existence.js @@ -58,4 +58,4 @@ assert.eq(true, res.valid, res); assert.eq(2, res.nIndexes, res); MongoRunner.stopMongod(conn); -}()); \ No newline at end of file +}()); diff --git a/jstests/noPassthrough/batched_multi_deletes.js b/jstests/noPassthrough/batched_multi_deletes.js index 09d1482f0b2c9..ed53fe438437f 100644 --- a/jstests/noPassthrough/batched_multi_deletes.js +++ b/jstests/noPassthrough/batched_multi_deletes.js @@ -7,9 +7,7 @@ * ] */ -(function() { -"use strict"; -load("jstests/libs/analyze_plan.js"); +import {getPlanStage} from "jstests/libs/analyze_plan.js"; function validateBatchedDeletes(conn) { const db = conn.getDB("test"); @@ -91,5 +89,4 @@ function validateBatchedDeletes(conn) { rst.awaitNodesAgreeOnPrimary(); validateBatchedDeletes(rst.getPrimary()); rst.stopSet(); -} -})(); +} \ No newline at end of file diff --git a/jstests/noPassthrough/batched_multi_deletes_cursor_cache_disabled.js b/jstests/noPassthrough/batched_multi_deletes_cursor_cache_disabled.js new file mode 100644 index 0000000000000..3fc2bebc22802 --- /dev/null +++ b/jstests/noPassthrough/batched_multi_deletes_cursor_cache_disabled.js @@ -0,0 +1,58 @@ +/** + * Tests batched deletes with 'gWiredTigerCursorCacheSize=0', to see if there are any use-after-free + * bugs due to cursor lifetime. This test is only expected to catch regressions in ASAN variants. + * + * @tags: [ + * does_not_support_transactions, + * exclude_from_large_txns, + * requires_sharding, + * ] + */ +(function() { +"use strict"; + +if (!_isAddressSanitizerActive()) { + jsTestLog("Skipping " + jsTestName() + " because address sanitizer is not active."); +} + +load("jstests/libs/fail_point_util.js"); // For 'configureFailPoint()' +load("jstests/libs/parallelTester.js"); // For 'startParallelShell()' + +var st = + new ShardingTest({shards: 1, rs: {nodes: 1, setParameter: {wiredTigerCursorCacheSize: 0}}}); + +const primary = st.s0; +const rsPrimary = st.rs0.getPrimary(); +const db = primary.getDB('test'); +const coll = db.test; + +assert.commandWorked(primary.adminCommand({shardCollection: 'test.test', key: {_id: 1}})); + +const docIds = Array.from(Array(10).keys()); +assert.commandWorked(coll.insert(docIds.map((x) => { + return {_id: x, x: x}; +}))); + +const throwWriteConflictExceptionInBatchedDeleteStage = + configureFailPoint(rsPrimary, "throwWriteConflictExceptionInBatchedDeleteStage"); + +function performBatchedDelete() { + const testDB = db.getMongo().getDB("test"); + const coll = testDB.test; + const result = assert.commandWorked(coll.remove({x: {$gte: 0}})); + jsTestLog('delete result: ' + tojson(result)); +} + +const awaitBatchedDelete = startParallelShell(performBatchedDelete, primary.port); + +throwWriteConflictExceptionInBatchedDeleteStage.wait(); + +jsTestLog("update documents"); +assert.commandWorked(coll.update({}, {$inc: {x: -docIds.length}})); + +throwWriteConflictExceptionInBatchedDeleteStage.off(); + +awaitBatchedDelete(); + +st.stop(); +})(); diff --git a/jstests/noPassthrough/batched_multi_deletes_large_transaction.js b/jstests/noPassthrough/batched_multi_deletes_large_transaction.js index 09728147bec8e..7c94520478fea 100644 --- a/jstests/noPassthrough/batched_multi_deletes_large_transaction.js +++ b/jstests/noPassthrough/batched_multi_deletes_large_transaction.js @@ -6,14 +6,11 @@ * handled by the primary. * * @tags: [ - * requires_fcv_62, + * requires_fcv_71, * requires_replication, * ] */ -(function() { -'use strict'; - -load("jstests/libs/feature_flag_util.js"); // for FeatureFlagUtil.isEnabled +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const rst = new ReplSetTest({ nodes: [ @@ -47,7 +44,7 @@ assert.commandWorked(coll.insert(docIds.map((x) => { // Set up server to split deletes over multiple oplog entries // such that each oplog entry contains two delete operations. -if (!FeatureFlagUtil.isEnabled(db, "InternalWritesAreReplicatedTransactionally")) { +if (!FeatureFlagUtil.isEnabled(db, "LargeBatchedOperations")) { // Confirm legacy server behavior where mutiple oplog entries are not allowed // for batched writes. const result = @@ -61,7 +58,7 @@ if (!FeatureFlagUtil.isEnabled(db, "InternalWritesAreReplicatedTransactionally") // Stop test and return early. The rest of the test will test the new multiple oplog entry // behavior. rst.stopSet(); - return; + quit(); } // This document removal request will be replicated over two applyOps oplog entries, @@ -96,5 +93,4 @@ assert(ops[1].hasOwnProperty('prevOpTime')); assert.eq(ops[0].prevOpTime.ts, ops[1].ts); assert.eq(ops[1].prevOpTime.ts, Timestamp()); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/bson_max_limit.js b/jstests/noPassthrough/bson_max_limit.js index 60ad231ee5184..1b60ce8300dd8 100644 --- a/jstests/noPassthrough/bson_max_limit.js +++ b/jstests/noPassthrough/bson_max_limit.js @@ -83,26 +83,4 @@ function executeTest(db) { executeTest(conn.getDB("test")); MongoRunner.stopMongod(conn); } - -{ - const rst = new ReplSetTest({ - nodes: [ - {}, - { - // Disallow elections on secondary. - rsConfig: { - priority: 0, - votes: 0, - }, - } - ] - }); - rst.startSet(); - rst.initiate(); - // Test the modern default behavior where storeFindAndModifyImagesInSideCollection is true. - rst.getPrimary().adminCommand( - {setParameter: 1, storeFindAndModifyImagesInSideCollection: true}); - executeTest(rst.getPrimary().getDB("test")); - rst.stopSet(); -} })(); diff --git a/jstests/noPassthrough/bucket_unpacking_with_sort_granularity_change.js b/jstests/noPassthrough/bucket_unpacking_with_sort_granularity_change.js index 267a12cc34a4a..5206d6f91c07b 100644 --- a/jstests/noPassthrough/bucket_unpacking_with_sort_granularity_change.js +++ b/jstests/noPassthrough/bucket_unpacking_with_sort_granularity_change.js @@ -3,15 +3,12 @@ // We check that the results are correct, the documents are sorted, and the documents we expect to // appear, appear. // Note: events in buckets that exceed bucketMaxSpan are not included. -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For TimeseriesTest +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; const dbName = jsTestName(); // Start a single mongoD using MongoRunner. -const conn = MongoRunner.runMongod({setParameter: "featureFlagBucketUnpackWithSort=true"}); +const conn = MongoRunner.runMongod(); assert.neq(null, conn, "mongod was unable to start up"); // Create the test DB and collection. @@ -21,13 +18,6 @@ const collName = dbName; const coll = db[collName]; const minsToMillis = (mins) => mins * 60 * 1000; -if (!TimeseriesTest.bucketUnpackWithSortEnabled(db.getMongo())) { - jsTestLog("Skipping test because 'BucketUnpackWithSort' is disabled."); - return; -} - -printjson(conn.adminCommand({getParameter: 1, featureFlagBucketUnpackWithSort: 1})); - const on = "alwaysOn"; const off = "off"; @@ -103,4 +93,3 @@ let resOpt = mergeShellOptimized(); assert(resOpt == 0); MongoRunner.stopMongod(conn); -})(); diff --git a/jstests/noPassthrough/can_load_ttl_index_capped_collection.js b/jstests/noPassthrough/can_load_ttl_index_capped_collection.js index 0e0c47c6ed873..266746c5f1718 100644 --- a/jstests/noPassthrough/can_load_ttl_index_capped_collection.js +++ b/jstests/noPassthrough/can_load_ttl_index_capped_collection.js @@ -47,4 +47,4 @@ assert.eq( assert.eq(indexes[2].expireAfterSeconds, 10, "Index is not TTL as expected: " + tojson(indexes)); MongoRunner.stopMongod(conn); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/capped_collections_downgrade.js b/jstests/noPassthrough/capped_collections_downgrade.js deleted file mode 100644 index 53a276f0c123a..0000000000000 --- a/jstests/noPassthrough/capped_collections_downgrade.js +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Tests that the cluster cannot be downgraded when there are capped collections with a size that - * is non multiple of 256 bytes. The user has to resize or drop the collection in order to - * downgrade. - */ -(function() { - -const conn = MongoRunner.runMongod(); -const testDB = conn.getDB(jsTestName()); -const cappedColl = testDB["capped_coll"]; -const options = Object.assign({}, {capped: true}, {size: 50 * 1023}); -testDB.createCollection(cappedColl.getName(), options); - -// We expect the server to be in a non-downgradable state initially and "command" is what we have to -// run to correct the state in order to successfully downgrade. -function checkCappedCollectionForDowngrade(command) { - assert.commandFailedWithCode(testDB.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}), - ErrorCodes.CannotDowngrade); - testDB.runCommand(command); - assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); - assert.commandWorked(testDB.adminCommand({setFeatureCompatibilityVersion: latestFCV})); -} - -// We want to resize the collection to have a size multiple of 256 bytes in order to be able to -// downgrade. -const resizeCommand = Object.assign({}, {collMod: cappedColl.getName()}, {cappedSize: 50 * 1024}); -checkCappedCollectionForDowngrade(resizeCommand); - -// We reset the size of the collection to be a non multiple of 256 bytes. -const resetSizeCommand = - Object.assign({}, {collMod: cappedColl.getName()}, {cappedSize: 50 * 1023}); -testDB.runCommand(resetSizeCommand); - -// We want to drop the collection in order to be able to downgrade. -const dropCommand = Object.assign({}, {drop: cappedColl.getName()}); -checkCappedCollectionForDowngrade(dropCommand); - -MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/catalog_shard.js b/jstests/noPassthrough/catalog_shard.js index 6f63809c9d37e..846a7b7f604d6 100644 --- a/jstests/noPassthrough/catalog_shard.js +++ b/jstests/noPassthrough/catalog_shard.js @@ -1,21 +1,13 @@ /** - * Tests catalog shard topology. + * Tests config shard topology. * * @tags: [ * requires_persistence, * requires_fcv_70, - * featureFlagCatalogShard, * featureFlagTransitionToCatalogShard, * ] */ -(function() { -"use strict"; - -// TODO (SERVER-74534): Enable the metadata consistency check when it will work with co-located -// configsvr. -TestData.skipCheckMetadataConsistency = true; - -load("jstests/libs/catalog_shard_util.js"); +import {ConfigShardUtil} from "jstests/libs/config_shard_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/write_concern_util.js"); @@ -26,6 +18,13 @@ const unshardedDbName = "unsharded_db"; const unshardedNs = unshardedDbName + ".unsharded_coll"; const indexedNs = "db_with_index.coll"; +const timeseriesDbName = "timeseriesDB"; +const timeseriesUnshardedCollName = "unsharded_timeseries_coll"; +const timeseriesShardedCollName = "sharded_timeseries_coll"; +const timeseriesShardedNs = timeseriesDbName + "." + timeseriesShardedCollName; +const timeseriesShardedBucketsNs = + `${timeseriesDbName}.system.buckets.${timeseriesShardedCollName}`; + function basicCRUD(conn) { assert.commandWorked(st.s.getCollection(unshardedNs).insert([{x: 1}, {x: -1}])); @@ -49,7 +48,7 @@ function getCatalogShardChunks(conn) { const st = new ShardingTest({ shards: 1, config: 3, - catalogShard: true, + configShard: true, }); const configShardName = st.shard0.shardName; @@ -134,29 +133,6 @@ const newShardName = assert.commandWorked(st.s.adminCommand({split: ns, middle: {skey: 40}})); } -{ - // - // ShardingStateRecovery doesn't block step up. - // - - assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {skey: 0}, to: configShardName})); - - const hangMigrationFp = configureFailPoint(st.configRS.getPrimary(), "moveChunkHangAtStep5"); - const moveChunkThread = new Thread(function(mongosHost, ns, newShardName) { - const mongos = new Mongo(mongosHost); - assert.commandWorked( - mongos.adminCommand({moveChunk: ns, find: {skey: 0}, to: newShardName})); - }, st.s.host, ns, newShardName); - moveChunkThread.start(); - hangMigrationFp.wait(); - - // Stepping up shouldn't hang because of ShardingStateRecovery. - st.configRS.stepUp(st.configRS.getSecondary()); - - hangMigrationFp.off(); - moveChunkThread.join(); -} - { // // Collections on the config server support changeStreamPreAndPostImages when the config server @@ -179,7 +155,7 @@ const newShardName = { // - // Can't remove catalogShard using the removeShard command. + // Can't remove configShard using the removeShard command. // assert.commandFailedWithCode(st.s.adminCommand({removeShard: "config"}), @@ -188,7 +164,7 @@ const newShardName = { // - // Remove the catalog shard. + // Remove the config shard. // let configPrimary = st.configRS.getPrimary(); @@ -198,13 +174,31 @@ const newShardName = st.s.adminCommand({moveChunk: indexedNs, find: {_id: 0}, to: configShardName})); assert.commandWorked(st.s.getCollection(indexedNs).createIndex({oldKey: 1})); + // Create a sharded and unsharded timeseries collection and verify they and their buckets + // collections are correctly dropped. This provides coverage for views and sharded views. + const timeseriesDB = st.s.getDB(timeseriesDbName); + assert.commandWorked(timeseriesDB.createCollection(timeseriesUnshardedCollName, + {timeseries: {timeField: "time"}})); + assert.commandWorked(st.s.adminCommand({movePrimary: timeseriesDbName, to: configShardName})); + assert.commandWorked(timeseriesDB.createCollection(timeseriesShardedCollName, + {timeseries: {timeField: "time"}})); + assert.commandWorked(st.s.adminCommand({shardCollection: timeseriesShardedNs, key: {time: 1}})); + assert.commandWorked(timeseriesDB[timeseriesShardedCollName].insert({time: ISODate()})); + st.printShardingStatus(); + assert.commandWorked(st.s.adminCommand({ + moveChunk: timeseriesShardedBucketsNs, + find: {"control.min.time": 0}, + to: configShardName, + _waitForDelete: true + })); + // Use write concern to verify the commands support them. Any values weaker than the default // sharding metadata write concerns will be upgraded. let removeRes = assert.commandWorked( st.s0.adminCommand({transitionToDedicatedConfigServer: 1, writeConcern: {wtimeout: 100}})); assert.eq("started", removeRes.state); - // The removal won't complete until all chunks and dbs are moved off the catalog shard. + // The removal won't complete until all chunks and dbs are moved off the config shard. removeRes = assert.commandWorked(st.s0.adminCommand({transitionToDedicatedConfigServer: 1})); assert.eq("ongoing", removeRes.state); @@ -213,15 +207,22 @@ const newShardName = {moveChunk: ns, find: {skey: -1}, to: newShardName, _waitForDelete: true})); assert.commandWorked(st.s.adminCommand( {moveChunk: indexedNs, find: {_id: 0}, to: newShardName, _waitForDelete: true})); + assert.commandWorked(st.s.adminCommand({ + moveChunk: timeseriesShardedBucketsNs, + find: {"control.min.time": 0}, + to: newShardName, + _waitForDelete: true + })); // Blocked because of the sharded and unsharded databases and the remaining chunk. removeRes = assert.commandWorked(st.s0.adminCommand({transitionToDedicatedConfigServer: 1})); assert.eq("ongoing", removeRes.state); assert.eq(1, removeRes.remaining.chunks); - assert.eq(2, removeRes.remaining.dbs); + assert.eq(3, removeRes.remaining.dbs); assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: newShardName})); assert.commandWorked(st.s.adminCommand({movePrimary: unshardedDbName, to: newShardName})); + assert.commandWorked(st.s.adminCommand({movePrimary: timeseriesDbName, to: newShardName})); // The draining sharded collections should not have been locally dropped yet. assert(configPrimary.getCollection(ns).exists()); @@ -246,13 +247,14 @@ const newShardName = assert.eq(1, removeRes.pendingRangeDeletions); suspendRangeDeletionFp.off(); - CatalogShardUtil.waitForRangeDeletions(st.s); + ConfigShardUtil.waitForRangeDeletions(st.s); - // Start the final transition command. This will trigger locally dropping collections on the - // config server. Hang after removing one collection and trigger a failover to verify the final - // transition can be resumed on the new primary and the collection dropping is idempotent. + // Start the final transition command. This will trigger locally dropping all tracked user + // databases on the config server. Hang after removing one database and trigger a failover to + // verify the final transition can be resumed on the new primary and the database dropping is + // idempotent. const hangRemoveFp = configureFailPoint( - st.configRS.getPrimary(), "hangAfterDroppingCollectionInTransitionToDedicatedConfigServer"); + st.configRS.getPrimary(), "hangAfterDroppingDatabaseInTransitionToDedicatedConfigServer"); const finishRemoveThread = new Thread(function(mongosHost) { const mongos = new Mongo(mongosHost); return mongos.adminCommand({transitionToDedicatedConfigServer: 1}); @@ -297,7 +299,7 @@ const newShardName = { // - // Can't create catalogShard using the addShard command. + // Can't create configShard using the addShard command. // assert.commandFailed(st.s.adminCommand({addShard: st.configRS.getURL(), name: "config"})); @@ -314,7 +316,7 @@ const newShardName = { // - // Add back the catalog shard. + // Add back the config shard. // // Create an index while the collection is not on the config server to verify it clones the @@ -324,7 +326,7 @@ const newShardName = // Use write concern to verify the command support them. Any values weaker than the default // sharding metadata write concerns will be upgraded. assert.commandWorked( - st.s.adminCommand({transitionToCatalogShard: 1, writeConcern: {wtimeout: 100}})); + st.s.adminCommand({transitionFromDedicatedConfigServer: 1, writeConcern: {wtimeout: 100}})); // Basic CRUD and sharded DDL work. basicCRUD(st.s); @@ -340,38 +342,5 @@ const newShardName = [{_id: 1}, {oldKey: 1}, {newKey: 1}]); } -{ - // - // transitionToCatalogShard requires replication to all config server nodes. - // - // TODO SERVER-75391: Remove. - // - - // Transition to dedicated mode so the config server can transition back to catalog shard mode. - let removeRes = assert.commandWorked(st.s.adminCommand({transitionToDedicatedConfigServer: 1})); - assert.eq("started", removeRes.state); - assert.commandWorked(st.s.adminCommand( - {moveChunk: ns, find: {skey: 0}, to: newShardName, _waitForDelete: true})); - assert.commandWorked(st.s.adminCommand( - {moveChunk: ns, find: {skey: 5}, to: newShardName, _waitForDelete: true})); - assert.commandWorked(st.s.adminCommand( - {moveChunk: indexedNs, find: {_id: 0}, to: newShardName, _waitForDelete: true})); - assert.commandWorked(st.s.adminCommand({movePrimary: "directDB", to: newShardName})); - assert.commandWorked(st.s.adminCommand({transitionToDedicatedConfigServer: 1})); - - // transitionToCatalogShard times out with a lagged config secondary despite having a majority - // of its set still replicating. - const laggedSecondary = st.configRS.getSecondary(); - st.configRS.awaitReplication(); - stopServerReplication(laggedSecondary); - assert.commandFailedWithCode(st.s.adminCommand({transitionToCatalogShard: 1, maxTimeMS: 1000}), - ErrorCodes.MaxTimeMSExpired); - restartServerReplication(laggedSecondary); - - // Now it succeeds. - assert.commandWorked(st.s.adminCommand({transitionToCatalogShard: 1})); -} - st.stop(); newShardRS.stopSet(); -}()); diff --git a/jstests/noPassthrough/catalog_shard_resharding_fixture.js b/jstests/noPassthrough/catalog_shard_resharding_fixture.js index b1f337f0fbc42..c6f8574769df4 100644 --- a/jstests/noPassthrough/catalog_shard_resharding_fixture.js +++ b/jstests/noPassthrough/catalog_shard_resharding_fixture.js @@ -1,24 +1,19 @@ /** - * Test the ReshardingTest fixture can work with a catalogShard. + * Test the ReshardingTest fixture can work with a configShard. * * @tags: [ * requires_fcv_70, - * featureFlagCatalogShard, * featureFlagTransitionToCatalogShard, * ] */ -// TODO (SERVER-74534): Enable the metadata consistency check when it will work with co-located -// configsvr. -TestData.skipCheckMetadataConsistency = true; - (function() { "use strict"; load("jstests/sharding/libs/resharding_test_fixture.js"); const reshardingTest = - new ReshardingTest({numDonors: 2, numRecipients: 2, reshardInPlace: true, catalogShard: true}); + new ReshardingTest({numDonors: 2, numRecipients: 2, reshardInPlace: true, configShard: true}); reshardingTest.setup(); const ns = "reshardingDb.coll"; diff --git a/jstests/noPassthrough/catalog_shard_secondary_reads.js b/jstests/noPassthrough/catalog_shard_secondary_reads.js index 9ab579d282af3..914dec785d89b 100644 --- a/jstests/noPassthrough/catalog_shard_secondary_reads.js +++ b/jstests/noPassthrough/catalog_shard_secondary_reads.js @@ -1,18 +1,12 @@ /** - * Tests catalog shard topology. + * Tests config shard topology. * * @tags: [ * requires_fcv_70, - * featureFlagCatalogShard, * ] */ -(function() { -"use strict"; +import {ConfigShardUtil} from "jstests/libs/config_shard_util.js"; -// TODO SERVER-74534: Enable metadata consistency check when it works with a catalog shard. -TestData.skipCheckMetadataConsistency = true; - -load("jstests/libs/catalog_shard_util.js"); load("jstests/libs/fail_point_util.js"); load('jstests/libs/chunk_manipulation_util.js'); @@ -22,7 +16,7 @@ const st = new ShardingTest({ shards: {rs0: {nodes: 2}, rs1: {nodes: 2}}, config: 2, mongos: 1, - catalogShard: true, + configShard: true, }); assert.commandWorked(st.s0.getDB('test').user.insert({_id: 1234})); @@ -74,8 +68,8 @@ joinMoveChunk(); assert.commandWorked(st.s0.adminCommand({movePrimary: 'test', to: st.shard1.shardName})); assert.commandWorked(st.s0.adminCommand({movePrimary: 'sharded', to: st.shard1.shardName})); -// A catalog shard can't be removed until all range deletions have finished. -CatalogShardUtil.waitForRangeDeletions(st.s0); +// A config shard can't be removed until all range deletions have finished. +ConfigShardUtil.waitForRangeDeletions(st.s0); removeRes = assert.commandWorked(st.s0.adminCommand({transitionToDedicatedConfigServer: 1})); assert.eq("completed", removeRes.state, tojson(removeRes)); @@ -122,7 +116,7 @@ assert.commandWorked(st.s0.adminCommand({setFeatureCompatibilityVersion: upgrade // Need to drop the database before it can become a shard again. assert.commandWorked(st.configRS.getPrimary().getDB('sharded').dropDatabase()); -assert.commandWorked(st.s0.adminCommand({transitionToCatalogShard: 1})); +assert.commandWorked(st.s0.adminCommand({transitionFromDedicatedConfigServer: 1})); assert.commandWorked(st.s0.adminCommand({movePrimary: 'test', to: st.shard0.shardName})); assert.commandWorked( st.s0.adminCommand({moveChunk: 'sharded.user', find: {_id: 0}, to: st.shard0.shardName})); @@ -135,5 +129,4 @@ assert.eq({_id: 5678}, doc); st.stop(); -MongoRunner.stopMongod(staticMongod); -})(); +MongoRunner.stopMongod(staticMongod); \ No newline at end of file diff --git a/jstests/noPassthrough/change_stream_generate_v2_tokens_flag_with_test_commands_disabled.js b/jstests/noPassthrough/change_stream_generate_v2_tokens_flag_with_test_commands_disabled.js deleted file mode 100644 index f23507031de3d..0000000000000 --- a/jstests/noPassthrough/change_stream_generate_v2_tokens_flag_with_test_commands_disabled.js +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Test that the $_generateV2ResumeTokens parameter cannot be used on mongoS when test commands are - * disabled. - * @tags: [ - * uses_change_streams, - * requires_sharding, - * requires_replication, - * ] - */ -(function() { -"use strict"; - -// Signal to the ShardingTest that we want to disable test commands. -TestData.enableTestCommands = false; - -// Create a sharding fixture with test commands disabled. -const st = new ShardingTest({shards: 1, rs: {nodes: 1}}); - -// Confirm that attempting to set any values for $_generateV2ResumeTokens field fails on mongos. -assert.throwsWithCode(() => st.s.watch([], {$_generateV2ResumeTokens: true}).hasNext(), 6528201); -assert.throwsWithCode(() => st.s.watch([], {$_generateV2ResumeTokens: false}).hasNext(), 6528201); - -// Confirm that attempting to run change streams with $_generateV2ResumeTokens:true fails on shards. -assert.throwsWithCode( - () => st.rs0.getPrimary().watch([], {$_generateV2ResumeTokens: true}).hasNext(), 6528200); - -// Explicity requesting v1 tokens is allowed on a shard. This is to allow a 6.0 mongoS to -// communicate with a 7.0 shard. -const stream = st.rs0.getPrimary().watch([], {$_generateV2ResumeTokens: false}); -assert.commandWorked(st.s.getDB("test")["coll"].insert({x: 1})); -assert.soon(() => stream.hasNext()); - -st.stop(); -})(); \ No newline at end of file diff --git a/jstests/noPassthrough/change_stream_mongos_with_generate_v2_resume_tokens_flag.js b/jstests/noPassthrough/change_stream_mongos_with_generate_v2_resume_tokens_flag.js deleted file mode 100644 index 2bdbb90739782..0000000000000 --- a/jstests/noPassthrough/change_stream_mongos_with_generate_v2_resume_tokens_flag.js +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Test that mongoS does not set the value of $_generateV2ResumeTokens on the commands it sends to - * the shards, if no value was specified by the client. If a value was specified, mongoS forwards it - * to the shards. On a replica set, no explicit value is set; the aggregation simply treats it as - * default-true. - * @tags: [ - * uses_change_streams, - * requires_sharding, - * requires_replication, - * ] - */ -(function() { -"use strict"; - -load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection. -load("jstests/libs/profiler.js"); // For profilerHasSingleMatchingEntryOrThrow. - -// Create a sharding fixture with a single one-node replset shard and a one-node replset config -// server. The latter is to ensure that there is only one node that the internal new-shard monitor -// $changeStream can be sent to, since it is dispatched with secondaryPreferred readPreference. -const st = new ShardingTest({shards: 1, rs: {nodes: 1}, config: {nodes: 1}}); - -const mongosDB = st.s.getDB("test"); -const shardDB = st.rs0.getPrimary().getDB(mongosDB.getName()); -const configDB = st.configRS.getPrimary().getDB("config"); - -const mongosColl = assertDropAndRecreateCollection(mongosDB, jsTestName()); -const shardColl = shardDB[mongosColl.getName()]; -const configColl = configDB.shards; - -// Enable profiling on the shard and config server. -assert.commandWorked(shardDB.setProfilingLevel(2)); -assert.commandWorked(configDB.setProfilingLevel(2)); - -// Create one stream on mongoS that returns v2 tokens, the default. -const v2MongosStream = mongosColl.watch([], {comment: "v2MongosStream"}); - -// Create a second stream on mongoS that explicitly requests v1 tokens. -const v1MongosStream = - mongosColl.watch([], {comment: "v1MongosStream", $_generateV2ResumeTokens: false}); - -// Create a stream directly on the shard which returns the default v2 tokens. -const v2ShardStream = shardColl.watch([], {comment: "v2ShardStream"}); - -// Insert a test document into the collection. -assert.commandWorked(mongosColl.insert({_id: 1})); - -// Wait until all streams have encountered the insert operation. -assert.soon(() => v1MongosStream.hasNext() && v2MongosStream.hasNext() && v2ShardStream.hasNext()); - -// Confirm that in a sharded cluster, when v1 token is explicitly requested, mongoS fowards -// $_generateV2ResumeTokens:false to the shard. -profilerHasAtLeastOneMatchingEntryOrThrow({ - profileDB: shardDB, - filter: { - "originatingCommand.aggregate": mongosColl.getName(), - "originatingCommand.comment": "v1MongosStream", - "originatingCommand.$_generateV2ResumeTokens": false - } -}); - -// Confirm that we also set $_generateV2ResumeTokens to false on the internal new-shard monitoring -// $changeStream that we dispatch to the config servers. -profilerHasAtLeastOneMatchingEntryOrThrow({ - profileDB: configDB, - filter: { - "originatingCommand.aggregate": configColl.getName(), - "originatingCommand.comment": "v1MongosStream", - "originatingCommand.$_generateV2ResumeTokens": false - } -}); - -// Confirm that mongoS never sets the $_generateV2ResumeTokens field when client didn't explicitly -// specify. -profilerHasAtLeastOneMatchingEntryOrThrow({ - profileDB: shardDB, - filter: { - "originatingCommand.aggregate": mongosColl.getName(), - "originatingCommand.comment": "v2MongosStream", - "originatingCommand.$_generateV2ResumeTokens": {$exists: false} - } -}); - -// Confirm that we also do not set the $_generateV2ResumeTokens field on the request sent to the -// config server. -profilerHasAtLeastOneMatchingEntryOrThrow({ - profileDB: configDB, - filter: { - "originatingCommand.aggregate": configColl.getName(), - "originatingCommand.comment": "v2MongosStream", - "originatingCommand.$_generateV2ResumeTokens": {$exists: false} - } -}); - -// Confirm that on a replica set - in this case, a direct connection to the shard - no value is set -// for $_generateV2ResumeTokens if the client did not specify one. The aggregation defaults to -// treating the value as true. -profilerHasAtLeastOneMatchingEntryOrThrow({ - profileDB: shardDB, - filter: { - "originatingCommand.aggregate": mongosColl.getName(), - "originatingCommand.comment": "v2ShardStream", - "originatingCommand.$_generateV2ResumeTokens": {$exists: false} - } -}); - -st.stop(); -})(); \ No newline at end of file diff --git a/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_replset.js b/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_replset.js index d7af9b045cd18..d49a9e333c4c6 100644 --- a/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_replset.js +++ b/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_replset.js @@ -9,7 +9,13 @@ load("jstests/noPassthrough/libs/change_stream_pre_image_time_based_expiration_u // Tests pre-image time based expiration on a replica-set. (function testChangeStreamPreImagesforTimeBasedExpirationOnReplicaSet() { - const replSetTest = new ReplSetTest({name: "replSet", nodes: 3}); + const replSetTest = new ReplSetTest({ + name: "replSet", + nodes: 3, + // Test expects an exact number of pre-images to be deleted. Thus, the pre-images truncate + // markers must only contain 1 document at most. + nodeOptions: {setParameter: {preImagesCollectionTruncateMarkersMinBytes: 1}} + }); replSetTest.startSet(); replSetTest.initiate(); diff --git a/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_sharded.js b/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_sharded.js index 551ae495b5874..803bfa45b2d2b 100644 --- a/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_sharded.js +++ b/jstests/noPassthrough/change_stream_pre_image_time_based_expiration_sharded.js @@ -15,6 +15,9 @@ load("jstests/noPassthrough/libs/change_stream_pre_image_time_based_expiration_u shards: 1, rs: { nodes: 3, + // Test expects an exact number of pre-images to be deleted. Thus, the pre-images + // truncate markers must only contain 1 document at most. + setParameter: {preImagesCollectionTruncateMarkersMinBytes: 1}, }, }; const st = new ShardingTest(options); diff --git a/jstests/noPassthrough/change_stream_pre_images_server_stats.js b/jstests/noPassthrough/change_stream_pre_images_server_stats.js new file mode 100644 index 0000000000000..06dcfc5889328 --- /dev/null +++ b/jstests/noPassthrough/change_stream_pre_images_server_stats.js @@ -0,0 +1,126 @@ +/** + * Tests that FTDC collects information about the pre-image collection, including its purging job. + * @tags: [ requires_replication ] + */ +(function() { +'use strict'; + +// For verifyGetDiagnosticData. +load('jstests/libs/ftdc.js'); + +const kExpiredPreImageRemovalJobSleepSeconds = 1; +const kExpireAfterSeconds = 1; + +const replicaSet = new ReplSetTest({ + nodes: 1, + nodeOptions: { + setParameter: + {expiredChangeStreamPreImageRemovalJobSleepSecs: kExpiredPreImageRemovalJobSleepSeconds} + } +}); + +replicaSet.startSet(); +replicaSet.initiate(); + +const primary = replicaSet.getPrimary(); +const adminDb = primary.getDB('admin'); +const testDb = primary.getDB(jsTestName()); + +assert.soon(() => { + // Ensure that server status diagnostics is collecting pre-image collection statistics. + const serverStatusDiagnostics = verifyGetDiagnosticData(adminDb).serverStatus; + return serverStatusDiagnostics.hasOwnProperty('changeStreamPreImages') && + serverStatusDiagnostics.changeStreamPreImages.hasOwnProperty('purgingJob'); +}); + +const diagnosticsBeforeTestCollModifications = + verifyGetDiagnosticData(adminDb).serverStatus.changeStreamPreImages.purgingJob; + +// Create collection and insert sample data. +assert.commandWorked( + testDb.createCollection("testColl", {changeStreamPreAndPostImages: {enabled: true}})); +const numberOfDocuments = 100; +for (let i = 0; i < numberOfDocuments; i++) { + assert.commandWorked(testDb.testColl.insert({x: i})); +} + +for (let i = 0; i < numberOfDocuments; i++) { + assert.commandWorked(testDb.testColl.updateOne({x: i}, {$inc: {y: 1}})); +} + +const preImageCollection = primary.getDB('config')['system.preimages']; + +const estimatedToBeRemovedDocsSize = preImageCollection.find() + .toArray() + .map(doc => Object.bsonsize(doc)) + .reduce((acc, size) => acc + size, 0); +assert.gt(estimatedToBeRemovedDocsSize, 0); + +// Set the 'expireAfterSeconds' to 'kExpireAfterSeconds'. +assert.commandWorked(adminDb.runCommand({ + setClusterParameter: + {changeStreamOptions: {preAndPostImages: {expireAfterSeconds: kExpireAfterSeconds}}} +})); + +// Ensure purging job deletes the expired pre-image entries of the test collection. +assert.soon(() => { + // All entries are removed. + return preImageCollection.count() === 0; +}); + +// Ensure that FTDC collected the purging job information of the pre-image collection. +assert.soon(() => { + const diagnosticsAfterTestCollModifications = + verifyGetDiagnosticData(adminDb).serverStatus.changeStreamPreImages.purgingJob; + + const totalPassBigger = diagnosticsAfterTestCollModifications.totalPass > + diagnosticsBeforeTestCollModifications.totalPass; + const scannedBigger = diagnosticsAfterTestCollModifications.scannedCollections > + diagnosticsBeforeTestCollModifications.scannedCollections; + const scannedInternalBigger = diagnosticsAfterTestCollModifications.scannedInternalCollections > + diagnosticsBeforeTestCollModifications.scannedInternalCollections; + const bytesEqual = diagnosticsAfterTestCollModifications.bytesDeleted >= + diagnosticsBeforeTestCollModifications.bytesDeleted + estimatedToBeRemovedDocsSize; + const docsDeletedEqual = diagnosticsAfterTestCollModifications.docsDeleted >= + diagnosticsBeforeTestCollModifications.docsDeleted + numberOfDocuments; + const wallTimeGTE = diagnosticsAfterTestCollModifications.maxStartWallTimeMillis.tojson() >= + ISODate("1970-01-01T00:00:00.000Z").tojson(); + const timeElapsedGTE = diagnosticsAfterTestCollModifications.timeElapsedMillis >= + diagnosticsBeforeTestCollModifications.timeElapsedMillis; + + // For debug purposes log which condition failed. + if (!totalPassBigger) { + jsTestLog("totalPassBigger failed, retrying"); + return false; + } + if (!scannedBigger) { + jsTestLog("scannedBigger failed, retrying"); + return false; + } + if (!scannedInternalBigger) { + jsTestLog("scannedInternalBigger failed, retrying"); + return false; + } + if (!bytesEqual) { + jsTestLog("bytesEqual) failed, retrying"); + return false; + } + if (!docsDeletedEqual) { + jsTestLog("docsDeletedEqual failed, retrying"); + return false; + } + if (!wallTimeGTE) { + jsTestLog("wallTimeGTE failed, retrying"); + return false; + } + if (!timeElapsedGTE) { + jsTestLog("timeElapsedGTE failed, retrying"); + return false; + } + + return totalPassBigger && scannedBigger && scannedInternalBigger && bytesEqual && + docsDeletedEqual && wallTimeGTE && timeElapsedGTE; +}); + +replicaSet.stopSet(); +}()); diff --git a/jstests/noPassthrough/change_streams_per_shard_cursor.js b/jstests/noPassthrough/change_streams_per_shard_cursor.js index d4fcefab48da2..d2c752a5fa47a 100644 --- a/jstests/noPassthrough/change_streams_per_shard_cursor.js +++ b/jstests/noPassthrough/change_streams_per_shard_cursor.js @@ -4,10 +4,7 @@ * uses_change_streams, * ] */ -(function() { -"use strict"; - -load("jstests/libs/catalog_shard_util.js"); +import {ConfigShardUtil} from "jstests/libs/config_shard_util.js"; const dbName = jsTestName(); const setupShardedCluster = (shards = 1) => { @@ -69,9 +66,11 @@ assert.commandFailedWithCode(sdb.runCommand({ }), 6273801); -// $out can't passthrough so it's not allowed. +// $out can't passthrough so it's not allowed. This may be caught in parsing, or when preparing +// the aggregation. assert.commandFailedWithCode( - assert.throws(() => pscWatch(sdb, "coll", shardId, {pipeline: [{$out: "h"}]})), 6273802); + assert.throws(() => pscWatch(sdb, "coll", shardId, {pipeline: [{$out: "h"}]})), + [6273802, ErrorCodes.IllegalOperation]); // Shard option should be specified. assert.commandFailedWithCode( @@ -83,12 +82,6 @@ assert.commandFailedWithCode( assert.commandFailedWithCode(assert.throws(() => pscWatch(sdb, "coll", 42)), ErrorCodes.TypeMismatch); -const isCatalogShardEnabled = CatalogShardUtil.isEnabledIgnoringFCV(st); -if (!isCatalogShardEnabled) { - // Can't open a per shard cursor on the config RS. - assert.commandFailedWithCode(assert.throws(() => pscWatch(sdb, "coll", "config")), 6273803); -} - // The shardId should be a valid shard. assert.commandFailedWithCode( assert.throws(() => pscWatch(sdb, "coll", "Dwane 'the Shard' Johnson")), @@ -109,18 +102,16 @@ for (let i = 1; i <= 4; i++) { } assert(!c.hasNext()); -if (isCatalogShardEnabled) { - // Can open a per shard cursor on the config server. - const configDB = st.s0.getDB("config"); - c = pscWatch(configDB, "coll", "config", undefined /* options */, {allowToRunOnConfigDB: true}); - for (let i = 1; i <= 4; i++) { - configDB.coll.insertOne({location: 2, i}); - assert(!c.isExhausted()); - assert.soon(() => c.hasNext()); - c.next(); - } - assert(!c.hasNext()); +// Can open a per shard cursor on the config server. +const configDB = st.s0.getDB("config"); +c = pscWatch(configDB, "coll", "config", undefined /* options */, {allowToRunOnConfigDB: true}); +for (let i = 1; i <= 4; i++) { + configDB.coll.insertOne({location: 2, i}); + assert(!c.isExhausted()); + assert.soon(() => c.hasNext()); + c.next(); } +assert(!c.hasNext()); // Simple database level watch c = pscWatch(sdb, 1, shardId); @@ -211,5 +202,4 @@ sdb.coll2.insertOne({location: 10, _id: 4}); assert(!c.isExhausted()); assert(!c.hasNext()); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/noPassthrough/change_streams_pre_image_removal_job.js b/jstests/noPassthrough/change_streams_pre_image_removal_job.js index 274153370b3e8..e7293e4501320 100644 --- a/jstests/noPassthrough/change_streams_pre_image_removal_job.js +++ b/jstests/noPassthrough/change_streams_pre_image_removal_job.js @@ -8,11 +8,9 @@ // requires_replication, // requires_majority_read_concern, // ] -(function() { -"use strict"; - load('jstests/replsets/rslib.js'); // For getLatestOp, getFirstOplogEntry. load("jstests/libs/collection_drop_recreate.js"); // For assertDropAndRecreateCollection. +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const docA = { _id: 12345, @@ -33,7 +31,12 @@ const oplogSizeMB = 1; // Set up the replica set with two nodes and two collections with 'changeStreamPreAndPostImages' // enabled and run expired pre-image removal job every second. const rst = new ReplSetTest({nodes: 2, oplogSize: oplogSizeMB}); -rst.startSet({setParameter: {expiredChangeStreamPreImageRemovalJobSleepSecs: 1}}); +rst.startSet({ + setParameter: { + expiredChangeStreamPreImageRemovalJobSleepSecs: 1, + preImagesCollectionTruncateMarkersMinBytes: 1 + } +}); rst.initiate(); const largeStr = 'abcdefghi'.repeat(4 * 1024); const primaryNode = rst.getPrimary(); @@ -135,36 +138,43 @@ function retryOnCappedPositionLostError(func, message) { return onlyTwoPreImagesLeft && allPreImagesHaveBiggerTimestamp; }); - // Because the pre-images collection is implicitly replicated, validate that writes do not - // generate oplog entries, with the exception of deletions. - const preimagesNs = 'config.system.preimages'; - // Multi-deletes are batched base on time before performing the deletion, therefore the - // deleted pre-images can span through multiple applyOps oplog entries. - // - // As pre-images span two collections, the minimum number of batches is 2, as we perform - // the range-deletion per collection. The maximum number of batches is 4 (one per single - // pre-image removed). - const expectedNumberOfBatchesRange = [2, 3, 4]; - const serverStatusBatches = testDB.serverStatus()['batchedDeletes']['batches']; - const serverStatusDocs = testDB.serverStatus()['batchedDeletes']['docs']; - assert.contains(serverStatusBatches, expectedNumberOfBatchesRange); - assert.eq(serverStatusDocs, preImagesToExpire); - assert.contains( - retryOnCappedPositionLostError( - () => localDB.oplog.rs - .find({ns: 'admin.$cmd', 'o.applyOps.op': 'd', 'o.applyOps.ns': preimagesNs}) - .itcount(), - "Failed to fetch oplog entries for pre-image deletes"), - expectedNumberOfBatchesRange); - assert.eq(0, - retryOnCappedPositionLostError( - () => localDB.oplog.rs.find({op: {'$ne': 'd'}, ns: preimagesNs}).itcount(), - "Failed to fetch all oplog entries except pre-image deletes")); - - // Verify that pre-images collection content on the primary node is the same as on the - // secondary. - rst.awaitReplication(); - assert(bsonWoCompare(getPreImages(primaryNode), getPreImages(rst.getSecondary())) === 0); + // If the feature flag is on, then batched deletes will not be used for deletion. Additionally, + // since truncates are not replicated, the number of pre-images on the primary may differ from + // that of the secondary. + if (!FeatureFlagUtil.isPresentAndEnabled(testDB, "UseUnreplicatedTruncatesForDeletions")) { + // Because the pre-images collection is implicitly replicated, validate that writes do not + // generate oplog entries, with the exception of deletions. + const preimagesNs = 'config.system.preimages'; + // Multi-deletes are batched base on time before performing the deletion, therefore the + // deleted pre-images can span through multiple applyOps oplog entries. + // + // As pre-images span two collections, the minimum number of batches is 2, as we perform + // the range-deletion per collection. The maximum number of batches is 4 (one per single + // pre-image removed). + const expectedNumberOfBatchesRange = [2, 3, 4]; + const serverStatusBatches = testDB.serverStatus()['batchedDeletes']['batches']; + const serverStatusDocs = testDB.serverStatus()['batchedDeletes']['docs']; + assert.contains(serverStatusBatches, expectedNumberOfBatchesRange); + assert.eq(serverStatusDocs, preImagesToExpire); + assert.contains( + retryOnCappedPositionLostError( + () => + localDB.oplog.rs + .find( + {ns: 'admin.$cmd', 'o.applyOps.op': 'd', 'o.applyOps.ns': preimagesNs}) + .itcount(), + "Failed to fetch oplog entries for pre-image deletes"), + expectedNumberOfBatchesRange); + assert.eq(0, + retryOnCappedPositionLostError( + () => localDB.oplog.rs.find({op: {'$ne': 'd'}, ns: preimagesNs}).itcount(), + "Failed to fetch all oplog entries except pre-image deletes")); + + // Verify that pre-images collection content on the primary node is the same as on the + // secondary. + rst.awaitReplication(); + assert(bsonWoCompare(getPreImages(primaryNode), getPreImages(rst.getSecondary())) === 0); + } } // Increase oplog size on each node to prevent oplog entries from being deleted which removes a @@ -174,5 +184,4 @@ rst.nodes.forEach((node) => { assert.commandWorked(node.adminCommand({replSetResizeOplog: 1, size: largeOplogSizeMB})); }); -rst.stopSet(); -}()); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/classic_cqf_simple_comparisons.js b/jstests/noPassthrough/classic_cqf_simple_comparisons.js new file mode 100644 index 0000000000000..9928e0c3cf076 --- /dev/null +++ b/jstests/noPassthrough/classic_cqf_simple_comparisons.js @@ -0,0 +1,50 @@ +/** + * Tests that comparisons against a variety of BSON types and shapes are the same in CQF and + * classic. + */ +import {leafs, smallDocs} from "jstests/query_golden/libs/example_data.js"; + +const cqfConn = MongoRunner.runMongod({setParameter: {featureFlagCommonQueryFramework: true}}); +assert.neq(null, cqfConn, "mongod was unable to start up"); +const cqfDb = cqfConn.getDB(jsTestName()); + +assert.commandWorked( + cqfDb.adminCommand({setParameter: 1, internalQueryFrameworkControl: "forceBonsai"})); +const cqfColl = cqfDb.cqf_compare; +cqfColl.drop(); + +// Disable via TestData so there's no conflict in case a variant has this enabled. +TestData.setParameters.featureFlagCommonQueryFramework = false; +TestData.setParameters.internalQueryFrameworkControl = 'trySbeEngine'; +const classicConn = MongoRunner.runMongod(); +assert.neq(null, classicConn, "mongod was unable to start up"); + +const classicColl = classicConn.getDB(jsTestName()).classic_compare; +classicColl.drop(); + +// TODO SERVER-67818 Bonsai NaN $eq NaN should be true. +// The above ticket also fixes inequality comparisons to NaN. +const docs = smallDocs().filter(doc => !tojson(doc).match(/NaN/)); +cqfColl.insert(docs); +classicColl.insert(docs); + +for (const op of ['$eq', '$lt', '$lte', '$gt', '$gte']) { + for (const leaf of leafs()) { + // TODO SERVER-67550 Equality to null does not match undefined, in Bonsai. + if (tojson(leaf).match(/null|undefined/)) + continue; + // TODO SERVER-67818 Bonsai NaN $eq NaN should be true. + if (tojson(leaf).match(/NaN/)) + continue; + // Regex with non-equality predicate is not allowed. + if (leaf instanceof RegExp && op !== '$eq') + continue; + + const cqfResult = cqfColl.find({a: {[op]: leaf}}, {_id: 0}).toArray(); + const classicResult = classicColl.find({a: {[op]: leaf}}, {_id: 0}).toArray(); + assert.eq(cqfResult, classicResult); + } +} + +MongoRunner.stopMongod(cqfConn); +MongoRunner.stopMongod(classicConn); diff --git a/jstests/noPassthrough/client_disconnect_during_sign_logical_time.js b/jstests/noPassthrough/client_disconnect_during_sign_logical_time.js index f205f24b5c8fe..d596517c7ada4 100644 --- a/jstests/noPassthrough/client_disconnect_during_sign_logical_time.js +++ b/jstests/noPassthrough/client_disconnect_during_sign_logical_time.js @@ -18,4 +18,4 @@ assert.commandFailedWithCode(st.s.adminCommand({ ErrorCodes.ClientDisconnect); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/client_metadata_log.js b/jstests/noPassthrough/client_metadata_log.js index 71fc22a890442..d8c43151c361f 100644 --- a/jstests/noPassthrough/client_metadata_log.js +++ b/jstests/noPassthrough/client_metadata_log.js @@ -8,7 +8,7 @@ (function() { 'use strict'; -let checkLog = function(conn) { +let checkLogForMetadata = function(conn) { let coll = conn.getCollection("test.foo"); assert.commandWorked(coll.insert({_id: 1})); @@ -30,7 +30,7 @@ let testMongoD = function() { let conn = MongoRunner.runMongod({useLogFiles: true}); assert.neq(null, conn, 'mongod was unable to start up'); - checkLog(conn); + checkLogForMetadata(conn); MongoRunner.stopMongod(conn); }; @@ -43,7 +43,7 @@ let testMongoS = function() { let st = new ShardingTest({shards: 1, mongos: 1, other: options}); - checkLog(st.s0); + checkLogForMetadata(st.s0); // Validate db.currentOp() contains mongos information let curOp = st.s0.adminCommand({currentOp: 1}); diff --git a/jstests/noPassthrough/cluster-server-parameter-op-observer.js b/jstests/noPassthrough/cluster-server-parameter-op-observer.js index ce8f7dbcb115a..ebcb40a58ba1c 100644 --- a/jstests/noPassthrough/cluster-server-parameter-op-observer.js +++ b/jstests/noPassthrough/cluster-server-parameter-op-observer.js @@ -1,22 +1,15 @@ // Test that ClusterServerParameterOpObserver fires appropriately. -// @tags: [requires_replication] +// @tags: [requires_replication, requires_fcv_71] (function() { 'use strict'; -const kUnknownCSPLogId = 6226300; -const kUnknownCSPLogComponent = 'control'; -const kUnknownCSPLogLevel = 3; - function runTest(conn) { const config = conn.getDB('config'); - const originalLogLevel = - assert.commandWorked(config.setLogLevel(kUnknownCSPLogLevel, kUnknownCSPLogComponent)) - .was.verbosity; - assert.writeOK( - config.clusterParameters.insert({_id: 'foo', clusterParameterTime: Date(), value: 123})); - assert.commandWorked(config.setLogLevel(originalLogLevel, kUnknownCSPLogComponent)); - assert(checkLog.checkContainsOnceJson(conn, kUnknownCSPLogId, {name: 'foo'})); + const res = + config.clusterParameters.insert({_id: 'foo', clusterParameterTime: Date(), value: 123}); + assert(res.hasWriteError()); + assert.neq(res.getWriteError().length, 0); } const rst = new ReplSetTest({nodes: 2}); diff --git a/jstests/noPassthrough/cluster_analyze_command.js b/jstests/noPassthrough/cluster_analyze_command.js index 8dea667053bf6..a193971549ac7 100644 --- a/jstests/noPassthrough/cluster_analyze_command.js +++ b/jstests/noPassthrough/cluster_analyze_command.js @@ -1,7 +1,4 @@ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const st = new ShardingTest({ shards: 2, @@ -17,7 +14,7 @@ const db = st.getDB("test"); if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE is not enabled"); st.stop(); - return; + quit(); } const coll = db.analyze_coll; @@ -40,4 +37,3 @@ res = db.runCommand({analyze: coll.getName(), writeConcern: {w: 1}}); assert.commandWorked(res); st.stop(); -})(); diff --git a/jstests/noPassthrough/cluster_commands_require_cluster_node.js b/jstests/noPassthrough/cluster_commands_require_cluster_node.js index 6366db4538eaf..c7787ac31cb0f 100644 --- a/jstests/noPassthrough/cluster_commands_require_cluster_node.js +++ b/jstests/noPassthrough/cluster_commands_require_cluster_node.js @@ -6,10 +6,7 @@ * requires_sharding, * ] */ -(function() { -"use strict"; - -load("jstests/libs/catalog_shard_util.js"); +import {ConfigShardUtil} from "jstests/libs/config_shard_util.js"; const kDBName = "foo"; const kCollName = "bar"; @@ -29,6 +26,7 @@ const clusterCommandsCases = [ }, {cmd: {clusterInsert: kCollName, documents: [{x: 1}]}}, {cmd: {clusterUpdate: kCollName, updates: [{q: {doesNotExist: 1}, u: {x: 1}}]}}, + // TODO SERVER-52419 add test for bulkWrite. ]; function runTestCaseExpectFail(conn, testCase, code) { @@ -95,20 +93,11 @@ function runTestCaseExpectSuccess(conn, testCase) { runTestCaseExpectFail(st.s, testCase, ErrorCodes.CommandNotFound); } - // - // Cluster commands are allowed on a catalog shard enabled config server. - // - - const isCatalogShardEnabled = CatalogShardUtil.isEnabledIgnoringFCV(st); for (let testCase of clusterCommandsCases) { - if (isCatalogShardEnabled) { - if (testCase.expectedErr) { - runTestCaseExpectFail(st.rs0.getPrimary(), testCase, testCase.expectedErr); - } else { - runTestCaseExpectSuccess(st.rs0.getPrimary(), testCase); - } + if (testCase.expectedErr) { + runTestCaseExpectFail(st.rs0.getPrimary(), testCase, testCase.expectedErr); } else { - runTestCaseExpectFail(st.configRS.getPrimary(), testCase, ErrorCodes.NoShardingEnabled); + runTestCaseExpectSuccess(st.rs0.getPrimary(), testCase); } } @@ -125,5 +114,4 @@ function runTestCaseExpectSuccess(conn, testCase) { } st.stop(); -} -}()); +} \ No newline at end of file diff --git a/jstests/noPassthrough/cluster_server_parameter_refresher.js b/jstests/noPassthrough/cluster_server_parameter_refresher.js index a0be0a5dac95f..4a5e2f7b48fc2 100644 --- a/jstests/noPassthrough/cluster_server_parameter_refresher.js +++ b/jstests/noPassthrough/cluster_server_parameter_refresher.js @@ -9,10 +9,10 @@ * requires_sharding * ] */ -(function() { -'use strict'; - -load('jstests/libs/cluster_server_parameter_utils.js'); +import { + kAllClusterParameterDefaults, + runGetClusterParameterSharded +} from "jstests/libs/cluster_server_parameter_utils.js"; function runTest(st, startupRefreshIntervalMS) { // This assert is necessary because we subtract 8000 MS from this value later on, and we don't @@ -153,5 +153,4 @@ let options = { }; let st = new ShardingTest(options); runTest(st, 10000); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/noPassthrough/clustered_capped_collection.js b/jstests/noPassthrough/clustered_capped_collection.js index d30a445a925e8..2beac6ff802c1 100644 --- a/jstests/noPassthrough/clustered_capped_collection.js +++ b/jstests/noPassthrough/clustered_capped_collection.js @@ -5,13 +5,12 @@ * requires_fcv_53, * requires_replication, * does_not_support_stepdowns, + * # Tests running with experimental CQF behavior require test commands to be enabled. + * cqf_experimental_incompatible, * ] */ -(function() { -"use strict"; - load("jstests/libs/clustered_collections/clustered_collection_util.js"); -load("jstests/libs/clustered_collections/clustered_capped_utils.js"); +import {ClusteredCappedUtils} from "jstests/libs/clustered_collections/clustered_capped_utils.js"; { const replSet = new ReplSetTest({name: "clustered_capped_collections", nodes: 1}); @@ -58,4 +57,3 @@ load("jstests/libs/clustered_collections/clustered_capped_utils.js"); replSetNoTestCommands.stopSet(); } -})(); diff --git a/jstests/noPassthrough/clustered_capped_collection_arbitrary_key.js b/jstests/noPassthrough/clustered_capped_collection_arbitrary_key.js index 1b369ba4d0447..7c5aa457febf3 100644 --- a/jstests/noPassthrough/clustered_capped_collection_arbitrary_key.js +++ b/jstests/noPassthrough/clustered_capped_collection_arbitrary_key.js @@ -7,11 +7,8 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - load("jstests/libs/clustered_collections/clustered_collection_util.js"); -load("jstests/libs/clustered_collections/clustered_capped_utils.js"); +import {ClusteredCappedUtils} from "jstests/libs/clustered_collections/clustered_capped_utils.js"; const replSet = new ReplSetTest({name: "clustered_capped_collections", nodes: 1}); replSet.startSet({setParameter: {ttlMonitorSleepSecs: 1, supportArbitraryClusterKeyIndex: true}}); @@ -39,4 +36,3 @@ for (let awaitData of [false, true]) { } replSet.stopSet(); -})(); diff --git a/jstests/noPassthrough/clustered_coll_mod.js b/jstests/noPassthrough/clustered_coll_mod.js index 997b452436dab..ba19c97198666 100644 --- a/jstests/noPassthrough/clustered_coll_mod.js +++ b/jstests/noPassthrough/clustered_coll_mod.js @@ -6,11 +6,8 @@ * ] */ -(function() { -"use strict"; - load("jstests/libs/clustered_collections/clustered_collection_util.js"); -load("jstests/libs/ttl_util.js"); +import {TTLUtil} from "jstests/libs/ttl_util.js"; // Run TTL monitor constantly to speed up this test. const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'}); @@ -76,5 +73,4 @@ function testCollMod(coll, clusterKey, clusterKeyName) { testCollMod(conn.getDB(jsTestName())["coll"], {_id: 1}, "_id_"); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/clustered_coll_mod_arbitrary_key.js b/jstests/noPassthrough/clustered_coll_mod_arbitrary_key.js index 35ef0ee48d3fd..2291735e6f6ec 100644 --- a/jstests/noPassthrough/clustered_coll_mod_arbitrary_key.js +++ b/jstests/noPassthrough/clustered_coll_mod_arbitrary_key.js @@ -6,11 +6,8 @@ * ] */ -(function() { -"use strict"; - load("jstests/libs/clustered_collections/clustered_collection_util.js"); -load("jstests/libs/ttl_util.js"); +import {TTLUtil} from "jstests/libs/ttl_util.js"; // Run TTL monitor constantly to speed up this test. const conn = MongoRunner.runMongod( @@ -76,5 +73,4 @@ function testCollMod(coll, clusterKey, clusterKeyName) { testCollMod(conn.getDB("local")["coll"], {ts: 1}, "ts_1"); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/clustered_collection_bounded_scan_nonreplicated.js b/jstests/noPassthrough/clustered_collection_bounded_scan_nonreplicated.js index 1e17e794d86e2..a4c606ffe8b12 100644 --- a/jstests/noPassthrough/clustered_collection_bounded_scan_nonreplicated.js +++ b/jstests/noPassthrough/clustered_collection_bounded_scan_nonreplicated.js @@ -8,11 +8,10 @@ * assumes_unsharded_collection, * ] */ -(function() { -"use strict"; - load("jstests/libs/clustered_collections/clustered_collection_util.js"); -load("jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js"); +import { + testClusteredCollectionBoundedScan +} from "jstests/libs/clustered_collections/clustered_collection_bounded_scan_common.js"; const conn = MongoRunner.runMongod({setParameter: {supportArbitraryClusterKeyIndex: true}}); @@ -23,4 +22,3 @@ const nonReplicatedColl = nonReplicatedDB[collName]; testClusteredCollectionBoundedScan(nonReplicatedColl, {ts: 1}); MongoRunner.stopMongod(conn); -})(); diff --git a/jstests/noPassthrough/clustered_collection_hint_nonreplicated.js b/jstests/noPassthrough/clustered_collection_hint_nonreplicated.js index 42ab4ddeab25f..c5fffbb132b0e 100644 --- a/jstests/noPassthrough/clustered_collection_hint_nonreplicated.js +++ b/jstests/noPassthrough/clustered_collection_hint_nonreplicated.js @@ -7,10 +7,10 @@ * assumes_unsharded_collection, * ] */ -(function() { -"use strict"; load("jstests/libs/clustered_collections/clustered_collection_util.js"); -load("jstests/libs/clustered_collections/clustered_collection_hint_common.js"); +import { + testClusteredCollectionHint +} from "jstests/libs/clustered_collections/clustered_collection_hint_common.js"; const conn = MongoRunner.runMongod({setParameter: {supportArbitraryClusterKeyIndex: true}}); @@ -21,4 +21,3 @@ const nonReplicatedColl = nonReplicatedDB[collName]; testClusteredCollectionHint(nonReplicatedColl, {ts: 1}, "ts_1"); MongoRunner.stopMongod(conn); -})(); diff --git a/jstests/noPassthrough/clustered_collection_sorted_scan.js b/jstests/noPassthrough/clustered_collection_sorted_scan.js index 21900ec8773f9..8b1d7021883b5 100644 --- a/jstests/noPassthrough/clustered_collection_sorted_scan.js +++ b/jstests/noPassthrough/clustered_collection_sorted_scan.js @@ -2,10 +2,8 @@ * Tests that clustered collections can be used for sorted scanning without inserting * a blocking scan operator. */ -(function() { -"use strict"; +import {getPlanStage, planHasStage} from "jstests/libs/analyze_plan.js"; -load("jstests/libs/analyze_plan.js"); load("jstests/libs/clustered_collections/clustered_collection_util.js"); Random.setRandomSeed(); @@ -15,7 +13,7 @@ const testConnection = const testDb = testConnection.getDB('local'); const collectionSize = 10; const clusteredCollName = "clustered_index_sorted_scan_coll"; -const clusterField = "clusterKey"; +const clusterField = "_id"; let nonClusteredCollName = clusteredCollName + "_nc"; @@ -26,7 +24,6 @@ let clusteredColl = testDb[clusteredCollName]; // Generate a non-clustered collection for comparison assert.commandWorked(testDb.createCollection(nonClusteredCollName)); -assert.commandWorked(testDb[nonClusteredCollName].createIndex({[clusterField]: 1}, {unique: true})); let nonClusteredColl = testDb[nonClusteredCollName]; // Put something in the collections so the planner has something to chew on. @@ -72,12 +69,16 @@ function runTest(isClustered, hasFilter, hasHint, direction) { assert(!planHasStage(testDb, plan, "SORT"), "Unexpected sort in " + formatParamsAndPlan(plan)); } -function testCollations(direction) { +function testCollations(collectionCollation, queryCollation, direction) { + const collationsMatch = collectionCollation == queryCollation; + let strCollName = clusteredCollName + "_str"; // Generate a clustered collection for the remainder of the testing - assert.commandWorked(testDb.createCollection( - strCollName, {clusteredIndex: {key: {[clusterField]: 1}, unique: true}})); + assert.commandWorked(testDb.createCollection(strCollName, { + clusteredIndex: {key: {[clusterField]: 1}, unique: true}, + collation: collectionCollation + })); let tsColl = testDb[strCollName]; @@ -86,22 +87,120 @@ function testCollations(direction) { assert.commandWorked(tsColl.insert({[clusterField]: i.toString(), a: Math.random()})); } - // Run query with Faroese collation, just to choose something unlikely. - // Because the collations don't match, we can't use the clustered index - // to provide a sort - let plan = tsColl.find() - .sort({[clusterField]: direction}) - .collation({locale: "fo", caseLevel: true}) - .explain(); - assert(planHasStage(testDb, plan, "SORT"), "Expected sort in " + tojson(plan)); - - // However, if we can exclude strings, we don't need an explicit sort even - // if the collations don't match - plan = tsColl.find({[clusterField]: {$gt: -1}}) - .sort({[clusterField]: direction}) - .collation({locale: "fo", caseLevel: true}) - .explain(); + function runExplain(filter) { + return tsColl.find(filter) + .sort({[clusterField]: direction}) + .collation(queryCollation) + .explain(); + } + + // + // Some queries need an explicit sort only when the query/collection collations do not match. + // + function assertPlanOnlyHasSortIfCollationsDontMatch(plan) { + if (collationsMatch) { + assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan)); + } else { + assert(planHasStage(testDb, plan, "SORT"), "Expected sort in " + tojson(plan)); + } + } + + // Empty match. + let plan = runExplain({}); + assertPlanOnlyHasSortIfCollationsDontMatch(plan); + + // Comparison against a field other than the cluster field. + plan = runExplain({a: {$lt: 2}}); + assertPlanOnlyHasSortIfCollationsDontMatch(plan); + + // Query which contains an unsupported match expression. + plan = runExplain({$or: [{[clusterField]: {$lt: 2}}, {[clusterField]: {$gt: 5}}]}); + assertPlanOnlyHasSortIfCollationsDontMatch(plan); + + // Conjunction with one child which is an unsupported match expression and another which is a + // comparison against a field other than the cluster field. + plan = runExplain( + {$and: [{$or: [{[clusterField]: {$lt: 2}}, {[clusterField]: {$gt: 5}}]}, {a: {$gt: -1}}]}); + assertPlanOnlyHasSortIfCollationsDontMatch(plan); + + // Match which compares the cluster field to a string. + plan = runExplain({[clusterField]: {$gt: "1"}}); + assertPlanOnlyHasSortIfCollationsDontMatch(plan); + + // Match which compares the cluster field to an object containing a string. + plan = runExplain({[clusterField]: {$eq: {a: "str"}}}); + assertPlanOnlyHasSortIfCollationsDontMatch(plan); + + // Match which compares the cluster field to an array containing a string. + plan = runExplain({[clusterField]: {$eq: [1, 2, "str"]}}); + assertPlanOnlyHasSortIfCollationsDontMatch(plan); + + // $in query where one of the elements is a string. + plan = runExplain({[clusterField]: {$in: [1, "2", 3]}}); + assertPlanOnlyHasSortIfCollationsDontMatch(plan); + + // Conjunction with one child which compares the cluster field to a string and another which + // is a comparison against a field other than the cluster field. + plan = runExplain({$and: [{[clusterField]: "str"}, {a: 5}]}); + assertPlanOnlyHasSortIfCollationsDontMatch(plan); + + // Conjunction with one $in child which compares the cluster field to a string and another + // which is a comparison against a field other than the cluster field. + plan = runExplain({$and: [{[clusterField]: {$in: [1, "2", 3]}}, {a: 5}]}); + assertPlanOnlyHasSortIfCollationsDontMatch(plan); + + // + // Some queries can omit the explicit sort regardless of collations. This is the case when + // we can exclude string values of the cluster key in the output. + // + + // Simple comparison on cluster key which omits strings. + plan = runExplain({[clusterField]: {$gt: -1}}); + assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan)); + plan = runExplain({[clusterField]: {$eq: {a: 5}}}); + assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan)); + plan = runExplain({[clusterField]: {$eq: [1, 2, 3]}}); + assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan)); + + // Conjunction with multiple comparisons on cluster key which omits strings. + plan = runExplain({$and: [{[clusterField]: {$gt: -1}}, {[clusterField]: {$lt: 10}}]}); + assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan)); + + // $in query against cluster key which omits strings. + plan = runExplain({[clusterField]: {$in: [1, 2, 3]}}); assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan)); + + // Conjunction of $in query against cluster key and another comparison on a field other than + // the cluster key. The first conjunct omits strings. + plan = runExplain({$and: [{[clusterField]: {$in: [1, 2, 3]}}, {a: 5}]}); + assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan)); + + // Conjunction with one comparison against the cluster key and one against another field. The + // second conjunct omits strings. + plan = runExplain({$and: [{a: {$lt: 2}}, {[clusterField]: {$gt: -1}}]}); + assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan)); + + // Conjunction with one child which is an unsupported match expression and another which is + // a comparison against the cluster field. The second conjunct omits strings. + plan = runExplain({ + $and: [ + {$or: [{[clusterField]: {$lt: 2}}, {[clusterField]: {$gt: 5}}]}, + {[clusterField]: {$gt: -1}} + ] + }); + assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan)); + + // Conjunction which contains a comparison of the cluster field to a string and a comparison + // of the cluster field to a number. The second conjunct omits strings. + plan = runExplain({$and: [{[clusterField]: {$lt: "1"}}, {[clusterField]: {$gt: 2}}]}); + assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan)); + + // Conjunction which contains a $in comparison of the cluster field to a string and a $in + // comparison of the cluster field to a number. The second conjunct omits strings. + plan = runExplain( + {$and: [{[clusterField]: {$in: [1, "2", 3]}}, {[clusterField]: {$in: [1, 3, 4]}}]}); + assert(!planHasStage(testDb, plan, "SORT"), "Unxpected sort in " + tojson(plan)); + tsColl.drop(); } @@ -146,7 +245,7 @@ function testPlanCache(direction) { assert.commandWorked(clusteredColl.createIndex({a: 1}, {name: indexName})); const filter = {a: {$gt: -1}}; - const projection = {_id: 0, [clusterField]: 1}; + const projection = {[clusterField]: 1}; const sort = {[clusterField]: direction}; // Because of the _a index above, we should have two alternatves -- filter via the @@ -190,8 +289,31 @@ for (let isClustered = 0; isClustered <= 1; isClustered++) { } } -testCollations(/* direction = */ 1); -testCollations(/* direction = */ -1); +// +// Show that the direction of the sort does not affect the plans we are able to provide. Also show +// the collation conditions under which we can avoid explicit sorts in the final plan. +// + +const defaultCollation = { + locale: "simple", +}; +const faroeseCollation = { + locale: "fo", + caseLevel: true +}; + +testCollations( + defaultCollation /* for collection */, faroeseCollation /* for query */, /* direction = */ 1); +testCollations( + defaultCollation /* for collection */, faroeseCollation /* for query */, /* direction = */ -1); +testCollations( + faroeseCollation /* for collection */, faroeseCollation /* for query */, /* direction = */ 1); +testCollations( + faroeseCollation /* for collection */, faroeseCollation /* for query */, /* direction = */ -1); +testCollations( + defaultCollation /* for collection */, defaultCollation /* for query */, /* direction = */ 1); +testCollations( + defaultCollation /* for collection */, defaultCollation /* for query */, /* direction = */ -1); testMinMax(); @@ -205,5 +327,4 @@ assert(planHasStage(testDb, plan, "SORT"), "Expected sort in " + tojson(plan)); clusteredColl.drop(); nonClusteredColl.drop(); -MongoRunner.stopMongod(testConnection); -})(); +MongoRunner.stopMongod(testConnection); \ No newline at end of file diff --git a/jstests/noPassthrough/clustered_collection_ttl.js b/jstests/noPassthrough/clustered_collection_ttl.js index de21cccb219bd..da1b0db8bb6fe 100644 --- a/jstests/noPassthrough/clustered_collection_ttl.js +++ b/jstests/noPassthrough/clustered_collection_ttl.js @@ -6,11 +6,9 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; load("jstests/libs/clustered_collections/clustered_collection_util.js"); load('jstests/libs/dateutil.js'); -load('jstests/libs/ttl_util.js'); +import {TTLUtil} from "jstests/libs/ttl_util.js"; // Run TTL monitor constantly to speed up this test. const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'}); @@ -102,5 +100,4 @@ assert.commandWorked(replicatedColl.createIndex({ttlField: 1}, {expireAfterSecon insertAndValidateTTL(replicatedColl, "ttlField"); replicatedColl.drop(); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/collection_catalog_two_phase_drops.js b/jstests/noPassthrough/collection_catalog_two_phase_drops.js index e53da1c1dc72c..a8868eb7f84df 100644 --- a/jstests/noPassthrough/collection_catalog_two_phase_drops.js +++ b/jstests/noPassthrough/collection_catalog_two_phase_drops.js @@ -8,11 +8,7 @@ * requires_wiredtiger * ] */ -(function() { -"use strict"; - -load("jstests/disk/libs/wt_file_helper.js"); -load("jstests/libs/feature_flag_util.js"); +import {getUriForColl, getUriForIndex} from "jstests/disk/libs/wt_file_helper.js"; const rst = new ReplSetTest({ nodes: 1, @@ -32,12 +28,6 @@ const primary = rst.getPrimary(); const dbName = "test"; const db = primary.getDB(dbName); -if (!FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) { - jsTestLog("Skipping as featureFlagPointInTimeCatalogLookups is not enabled"); - rst.stopSet(); - return; -} - // Pause the checkpoint thread to control the checkpoint timestamp. assert.commandWorked( primary.adminCommand({configureFailPoint: "pauseCheckpointThread", mode: "alwaysOn"})); @@ -83,8 +73,8 @@ checkLog.containsJson(primary, 6825301, { assert.commandWorked(db.adminCommand({appendOplogNote: 1, data: {msg: "advance timestamp"}})); assert.commandWorked(db.adminCommand({fsync: 1})); -// Completing drop for ident. -checkLog.containsJson(primary, 22237, { +// "The ident was successfully dropped". +checkLog.containsJson(primary, 6776600, { ident: function(ident) { return ident == xIndexUri; } @@ -133,13 +123,13 @@ checkLog.containsJson(primary, 6825300, { assert.commandWorked(db.adminCommand({appendOplogNote: 1, data: {msg: "advance timestamp"}})); assert.commandWorked(db.adminCommand({fsync: 1})); -// Completing drop for ident. -checkLog.containsJson(primary, 22237, { +// "The ident was successfully dropped". +checkLog.containsJson(primary, 6776600, { ident: function(ident) { return ident == collUri; } }); -checkLog.containsJson(primary, 22237, { +checkLog.containsJson(primary, 6776600, { ident: function(ident) { return ident == idIndexUri; } @@ -160,5 +150,4 @@ checkLog.containsJson(primary, 6825302, { assert.commandWorked( primary.adminCommand({configureFailPoint: "pauseCheckpointThread", mode: "off"})); -rst.stopSet(); -}()); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/collection_scan_low_priority.js b/jstests/noPassthrough/collection_scan_low_priority.js index 192cc0046006d..1270eb321bb37 100644 --- a/jstests/noPassthrough/collection_scan_low_priority.js +++ b/jstests/noPassthrough/collection_scan_low_priority.js @@ -2,6 +2,8 @@ * Tests that unbounded collections scans access the storage engine with low priority. * * @tags: [ + * cqf_incompatible, # TODO SERVER-64007: This test requires plans which yield in order to count + * # low-priority transactions, which CQF cannot generate until this ticket is complete. * requires_wiredtiger, * ] */ @@ -21,10 +23,10 @@ const coll = db.coll; assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1})); -const runTest = function(options) { +const runTest = function(options, deprioritize) { assert.commandWorked(db.createCollection(coll.getName(), options)); - assert.commandWorked(coll.insert({_id: 0})); - assert.commandWorked(coll.insert({_id: 1})); + assert.commandWorked(coll.insert({_id: 0, class: 0})); + assert.commandWorked(coll.insert({_id: 1, class: 0})); const numLowPriority = function() { return db.serverStatus() @@ -34,7 +36,11 @@ const runTest = function(options) { const testScanDeprioritized = function(direction) { const numLowPriorityBefore = numLowPriority(); coll.find().hint({$natural: direction}).itcount(); - assert.gt(numLowPriority(), numLowPriorityBefore); + if (deprioritize) { + assert.gt(numLowPriority(), numLowPriorityBefore); + } else { + assert.eq(numLowPriority(), numLowPriorityBefore); + } }; testScanDeprioritized(1); testScanDeprioritized(-1); @@ -42,7 +48,11 @@ const runTest = function(options) { const testScanSortLimitDeprioritized = function(direction) { const numLowPriorityBefore = numLowPriority(); coll.find().hint({$natural: direction}).sort({_id: 1}).limit(1).itcount(); - assert.gt(numLowPriority(), numLowPriorityBefore); + if (deprioritize) { + assert.gt(numLowPriority(), numLowPriorityBefore); + } else { + assert.eq(numLowPriority(), numLowPriorityBefore); + } }; testScanSortLimitDeprioritized(1); testScanSortLimitDeprioritized(-1); @@ -55,11 +65,45 @@ const runTest = function(options) { testScanLimitNotDeprioritized(1); testScanLimitNotDeprioritized(-1); + const testAggregationInducedScanDeprioritized = function() { + assert.commandWorked(coll.insert({_id: 3, class: 1})); + assert.commandWorked(coll.insert({_id: 4, class: 1})); + let numLowPriorityBefore = numLowPriority(); + coll.aggregate( + [{ + $group: {_id: "$class", idSum: {$count: {}}}, + }], + ); + if (deprioritize) { + assert.gt(numLowPriority(), numLowPriorityBefore); + } else { + assert.eq(numLowPriority(), numLowPriorityBefore); + } + + numLowPriorityBefore = numLowPriority(); + coll.aggregate( + [{ + $match: {class: 0}, + + }], + ); + if (deprioritize) { + assert.gt(numLowPriority(), numLowPriorityBefore); + } else { + assert.eq(numLowPriority(), numLowPriorityBefore); + } + }; + testAggregationInducedScanDeprioritized(); assert(coll.drop()); }; -runTest({}); -runTest({clusteredIndex: {key: {_id: 1}, unique: true}}); +runTest({}, true); +runTest({clusteredIndex: {key: {_id: 1}, unique: true}}, true); + +assert.commandWorked( + db.adminCommand({setParameter: 1, deprioritizeUnboundedUserCollectionScans: false})); +runTest({}, false); +runTest({clusteredIndex: {key: {_id: 1}, unique: true}}, false); MongoRunner.stopMongod(conn); }()); diff --git a/jstests/noPassthrough/collmod_ttl.js b/jstests/noPassthrough/collmod_ttl.js index 9464ec3284c15..4ae97167282e8 100644 --- a/jstests/noPassthrough/collmod_ttl.js +++ b/jstests/noPassthrough/collmod_ttl.js @@ -8,10 +8,7 @@ * requires_ttl_index, * ] */ -(function() { -"use strict"; - -load("jstests/libs/ttl_util.js"); +import {TTLUtil} from "jstests/libs/ttl_util.js"; // Runs TTL monitor constantly to speed up this test. const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'}); @@ -42,5 +39,4 @@ assert.commandWorked(testDB.runCommand({ TTLUtil.waitForPass(testDB); assert.eq(0, coll.find().itcount()); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/column_scan_slow_logs.js b/jstests/noPassthrough/column_scan_slow_logs.js index b4fda3d42000f..806dcec090764 100644 --- a/jstests/noPassthrough/column_scan_slow_logs.js +++ b/jstests/noPassthrough/column_scan_slow_logs.js @@ -5,10 +5,7 @@ * featureFlagColumnstoreIndexes, * ] */ -(function() { -"use strict"; - -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const conn = MongoRunner.runMongod({}); assert.neq(null, conn, "mongod was unable to start up"); @@ -18,7 +15,7 @@ assert.commandWorked(db.dropDatabase()); if (!setUpServerForColumnStoreIndexTest(db)) { MongoRunner.stopMongod(conn); - return; + quit(); } const coll = db.collection; @@ -65,5 +62,4 @@ assert.eq(planSummary.match(/'_id'/g).length, 1, `'_id' should appear once in planSummary. Instead, got: ${planSummary}`); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/column_store_index_load.js b/jstests/noPassthrough/column_store_index_load.js index d73d2f97a0614..dd7ab8e0887b8 100644 --- a/jstests/noPassthrough/column_store_index_load.js +++ b/jstests/noPassthrough/column_store_index_load.js @@ -1,4 +1,3 @@ - /** * Test that different methods of loading a column store index all produce the same valid results. * Indexes are validated by comparing query results that use the index with results from a control @@ -10,18 +9,15 @@ * featureFlagColumnstoreIndexes, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {planHasStage} from "jstests/libs/analyze_plan.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const mongod = MongoRunner.runMongod({}); const db = mongod.getDB("test"); if (!setUpServerForColumnStoreIndexTest(db)) { MongoRunner.stopMongod(mongod); - return; + quit(); } // @@ -242,4 +238,3 @@ for (let i = 0; i < noIndexResults.length; ++i) { } MongoRunner.stopMongod(mongod); -})(); diff --git a/jstests/noPassthrough/columnstore_index_persistence.js b/jstests/noPassthrough/columnstore_index_persistence.js index e8e587ee280fc..db0f04def75cb 100644 --- a/jstests/noPassthrough/columnstore_index_persistence.js +++ b/jstests/noPassthrough/columnstore_index_persistence.js @@ -11,11 +11,8 @@ * ] */ -(function() { -'use strict'; - load('jstests/libs/index_catalog_helpers.js'); -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const rst = new ReplSetTest({nodes: 1}); rst.startSet(); @@ -28,7 +25,7 @@ let db_primary = primary.getDB('test'); if (!setUpServerForColumnStoreIndexTest(db_primary)) { rst.stopSet(); - return; + quit(); } let coll_primary = db_primary.getCollection(collName); @@ -73,5 +70,4 @@ coll_primary = db_primary.getCollection(collName); assert.neq(null, IndexCatalogHelpers.findByKeyPattern(indexList, {"$**": "columnstore"})); } -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/columnstore_index_rowstore_settings.js b/jstests/noPassthrough/columnstore_index_rowstore_settings.js index 22a2475cdc05c..adf568e8f17a3 100644 --- a/jstests/noPassthrough/columnstore_index_rowstore_settings.js +++ b/jstests/noPassthrough/columnstore_index_rowstore_settings.js @@ -8,17 +8,14 @@ * ] */ -(function() { -'use strict'; - -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const mongod = MongoRunner.runMongod({}); const db = mongod.getDB("test"); if (!setUpServerForColumnStoreIndexTest(db)) { MongoRunner.stopMongod(mongod); - return; + quit(); } const coll = db.columnstore_index_rowstore_settings; @@ -245,5 +242,4 @@ function getRowstoreStats(explainExec) { assert.eq(stats.fetches, count / 100, "Expected number of fetches. " + tojson(explain)); })(); -MongoRunner.stopMongod(mongod); -}()); +MongoRunner.stopMongod(mongod); \ No newline at end of file diff --git a/jstests/noPassthrough/commands_handle_kill.js b/jstests/noPassthrough/commands_handle_kill.js index 6f9a023c014fd..70a8e14f7f18e 100644 --- a/jstests/noPassthrough/commands_handle_kill.js +++ b/jstests/noPassthrough/commands_handle_kill.js @@ -1,7 +1,7 @@ // Tests that commands properly handle their underlying plan executor failing or being killed. // @tags: [ -// # TODO SERVER-64007: Support yielding in CQF plans. -// cqf_incompatible, +// # TODO SERVER-70446: Enable yielding for index plans in CQF. +// cqf_experimental_incompatible, // ] (function() { 'use strict'; @@ -212,7 +212,7 @@ assertCommandPropogatesPlanExecutorKillReason({find: coll.getName(), filter: {a: {usesIndex: true}); assertCommandPropogatesPlanExecutorKillReason( - {update: coll.getName(), updates: [{q: {a: {$gte: 0}}, u: {$set: {a: 1}}}]}, + {update: coll.getName(), updates: [{q: {a: {$gte: 0}}, u: {$set: {a: 1}}, multi: true}]}, {curOpFilter: {op: 'update'}, usesIndex: true}); assertCommandPropogatesPlanExecutorKillReason( diff --git a/jstests/noPassthrough/comment_field_passthrough.js b/jstests/noPassthrough/comment_field_passthrough.js index 0f6e5a1b236b8..f5ad24dfe9d1d 100644 --- a/jstests/noPassthrough/comment_field_passthrough.js +++ b/jstests/noPassthrough/comment_field_passthrough.js @@ -8,10 +8,6 @@ * ] */ -// TODO (SERVER-74534): Enable the metadata consistency check when it will work with co-located -// configsvr. -TestData.skipCheckMetadataConsistency = true; - import {authCommandsLib} from "jstests/auth/lib/commands_lib.js"; load("jstests/libs/fail_point_util.js"); // Helper to enable/disable failpoints easily. @@ -25,7 +21,8 @@ const denylistedTests = [ "addShardToZone", "removeShardFromZone", "oidcListKeys", - "oidcRefreshKeys" + "oidcRefreshKeys", + "aggregate_$search" // TODO SERVER-76087 reenable this test ]; function runTests(tests, conn, impls, options) { diff --git a/jstests/noPassthrough/cqf_fallback.js b/jstests/noPassthrough/cqf_fallback.js index 0cc62da363e94..d4a79595d2c78 100644 --- a/jstests/noPassthrough/cqf_fallback.js +++ b/jstests/noPassthrough/cqf_fallback.js @@ -2,11 +2,7 @@ * Verify that expressions and operators are correctly routed to CQF where eligible. This decision * is based on several factors including the query text, collection metadata, etc.. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/optimizer_utils.js"); +import {usedBonsaiOptimizer} from "jstests/libs/optimizer_utils.js"; let conn = MongoRunner.runMongod({setParameter: {featureFlagCommonQueryFramework: true}}); assert.neq(null, conn, "mongod was unable to start up"); @@ -20,7 +16,7 @@ if (assert.commandWorked(db.adminCommand({getParameter: 1, internalQueryFramewor .internalQueryFrameworkControl == "forceClassicEngine") { jsTestLog("Skipping test due to forceClassicEngine"); MongoRunner.stopMongod(conn); - return; + quit(); } assert.commandWorked( @@ -36,6 +32,28 @@ function assertSupportedByBonsaiFully(cmd) { assert.commandWorked(db.runCommand(cmd)); } +function assertSupportedByBonsaiExperimentally(cmd) { + // Experimental features require the knob to be set to "tryBonsaiExperimental" or higher. + // With "tryBonsai", these features should not use the new optimizer. + assert.commandWorked( + db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"})); + const defaultExplain = assert.commandWorked(db.runCommand({explain: cmd})); + assert(!usedBonsaiOptimizer(defaultExplain), tojson(defaultExplain)); + + // Non-explain should also work and use the fallback mechanism, but we cannnot verify exactly + // this without looking at the logs. + assert.commandWorked(db.runCommand(cmd)); + + // Enable "experimental" features in bonsai and expect the query to use Bonsai and pass. + assert.commandWorked( + db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"})); + const explain = assert.commandWorked(db.runCommand({explain: cmd})); + assert(usedBonsaiOptimizer(explain), tojson(explain)); + + // Non-explain should still work. + assert.commandWorked(db.runCommand(cmd)); +} + function assertNotSupportedByBonsai(cmd, testOnly, database = db) { // An unsupported stage should not use the new optimizer. assert.commandWorked( @@ -93,6 +111,64 @@ assertNotSupportedByBonsai({find: coll.getName(), filter: {$alwaysFalse: 1}}, tr assertNotSupportedByBonsai( {aggregate: coll.getName(), pipeline: [{$match: {$alwaysFalse: 1}}], cursor: {}}, true); +// Test $match on _id; these have only experimental support. +assertSupportedByBonsaiExperimentally({find: coll.getName(), filter: {_id: 1}}); +assertSupportedByBonsaiExperimentally( + {aggregate: coll.getName(), pipeline: [{$match: {_id: 1}}], cursor: {}}); +assertSupportedByBonsaiExperimentally({find: coll.getName(), filter: {_id: {$lt: 10}}}); +assertSupportedByBonsaiExperimentally( + {aggregate: coll.getName(), pipeline: [{$match: {_id: {$lt: 10}}}], cursor: {}}); +assertSupportedByBonsaiExperimentally({find: coll.getName(), filter: {'_id.a': 1}}); +assertSupportedByBonsaiExperimentally( + {aggregate: coll.getName(), pipeline: [{$match: {'_id.a': 1}}], cursor: {}}); +assertSupportedByBonsaiExperimentally( + {find: coll.getName(), filter: {$and: [{a: 10}, {_id: {$gte: 5}}]}}); +assertSupportedByBonsaiExperimentally({ + aggregate: coll.getName(), + pipeline: [{$match: {$and: [{a: 10}, {_id: {$gte: 5}}]}}], + cursor: {} +}); + +// Test $project on _id. These are fully supported in bonsai unless the _id index is specifically +// hinted, which is only experimentally supported. +assertSupportedByBonsaiFully({find: coll.getName(), filter: {}, projection: {_id: 1}}); +assertSupportedByBonsaiFully( + {aggregate: coll.getName(), pipeline: [{$project: {_id: 1}}], cursor: {}}); +assertSupportedByBonsaiFully({find: coll.getName(), filter: {}, projection: {_id: 1, a: 1}}); +assertSupportedByBonsaiFully( + {aggregate: coll.getName(), pipeline: [{$project: {_id: 1, a: 1}}], cursor: {}}); + +assertSupportedByBonsaiExperimentally( + {find: coll.getName(), filter: {}, projection: {_id: 1}, hint: {_id: 1}}); +assertSupportedByBonsaiExperimentally( + {aggregate: coll.getName(), pipeline: [{$project: {_id: 1}}], cursor: {}, hint: {_id: 1}}); +assertSupportedByBonsaiExperimentally( + {find: coll.getName(), filter: {}, projection: {_id: 1, a: 1}, hint: {_id: 1}}); +assertSupportedByBonsaiExperimentally({ + aggregate: coll.getName(), + pipeline: [{$project: {_id: 1, a: 1}}], + cursor: {}, + hint: {_id: 1} +}); + +// $natural hints are fully supported in Bonsai... +assertSupportedByBonsaiFully({find: coll.getName(), filter: {}, hint: {$natural: 1}}); +assertSupportedByBonsaiFully( + {aggregate: coll.getName(), pipeline: [], cursor: {}, hint: {$natural: 1}}); +assertSupportedByBonsaiFully({find: coll.getName(), filter: {}, hint: {$natural: -1}}); +assertSupportedByBonsaiFully( + {aggregate: coll.getName(), pipeline: [], cursor: {}, hint: {$natural: -1}}); + +// ... Except if the query relies on some experimental feature (e.g., predicate on _id). +assertSupportedByBonsaiExperimentally( + {find: coll.getName(), filter: {_id: 1}, hint: {$natural: 1}}); +assertSupportedByBonsaiExperimentally( + {aggregate: coll.getName(), pipeline: [{$match: {_id: 1}}], cursor: {}, hint: {$natural: 1}}); +assertSupportedByBonsaiExperimentally( + {find: coll.getName(), filter: {_id: 1}, hint: {$natural: -1}}); +assertSupportedByBonsaiExperimentally( + {aggregate: coll.getName(), pipeline: [{$match: {_id: 1}}], cursor: {}, hint: {$natural: -1}}); + // Unsupported projection expression. assertNotSupportedByBonsai( {find: coll.getName(), filter: {}, projection: {a: {$concatArrays: [["$b"], ["suppported"]]}}}, @@ -268,11 +344,11 @@ assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "fr_CA"}})); assertNotSupportedByBonsai({find: coll.getName(), filter: {}}); assertNotSupportedByBonsai({aggregate: coll.getName(), pipeline: [], cursor: {}}); -// A simple collation on an index should be eligible for CQF. +// A simple collation on an index should only have experimental support in CQF. coll.drop(); assert.commandWorked(coll.createIndex({a: 1}, {collation: {locale: "simple"}})); -assertSupportedByBonsaiFully({find: coll.getName(), filter: {}}); -assertSupportedByBonsaiFully({aggregate: coll.getName(), pipeline: [], cursor: {}}); +assertSupportedByBonsaiExperimentally({find: coll.getName(), filter: {}}); +assertSupportedByBonsaiExperimentally({aggregate: coll.getName(), pipeline: [], cursor: {}}); // A query against a collection with a hidden index should be eligible for CQF. coll.drop(); @@ -280,10 +356,10 @@ assert.commandWorked(coll.createIndex({a: 1}, {hidden: true})); assertSupportedByBonsaiFully({find: coll.getName(), filter: {}}); assertSupportedByBonsaiFully({aggregate: coll.getName(), pipeline: [], cursor: {}}); -// Unhiding the supported index means the query is still eligible for CQF. +// Unhiding the index means the query only has experimental support in CQF once again. coll.unhideIndex({a: 1}); -assertSupportedByBonsaiFully({find: coll.getName(), filter: {}}); -assertSupportedByBonsaiFully({aggregate: coll.getName(), pipeline: [], cursor: {}}); +assertSupportedByBonsaiExperimentally({find: coll.getName(), filter: {}}); +assertSupportedByBonsaiExperimentally({aggregate: coll.getName(), pipeline: [], cursor: {}}); // A query against a collection with a hidden index should be eligible for CQF even if the // underlying index is not supported. @@ -421,6 +497,9 @@ db = conn.getDB("test"); coll = db[jsTestName()]; coll.drop(); +assert.commandWorked( + db.adminCommand({configureFailPoint: 'enableExplainInBonsai', 'mode': 'alwaysOn'})); + const supportedExpression = { a: {$eq: 4} }; @@ -431,30 +510,46 @@ assert(!usedBonsaiOptimizer(explain), tojson(explain)); explain = coll.explain().aggregate([{$match: supportedExpression}]); assert(!usedBonsaiOptimizer(explain), tojson(explain)); -// Show that trying to set the framework to tryBonsai or forceBonsai is not permitted when the -// feature flag is off. +// Show that trying to set the framework to tryBonsai is not permitted when the feature flag is off, +// but tryBonsaiExperimental and forceBonsai are allowed (since test commands are enabled here by +// default). assert.commandFailed( db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"})); explain = coll.explain().find(supportedExpression).finish(); assert(!usedBonsaiOptimizer(explain), tojson(explain)); -assert.commandFailed( +assert.commandWorked( + db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"})); +explain = coll.explain().find(supportedExpression).finish(); +assert(usedBonsaiOptimizer(explain), tojson(explain)); + +assert.commandWorked( db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "forceBonsai"})); explain = coll.explain().find(supportedExpression).finish(); -assert(!usedBonsaiOptimizer(explain), tojson(explain)); +assert(usedBonsaiOptimizer(explain), tojson(explain)); MongoRunner.stopMongod(conn); -// Show that we can't start a mongod with the framework control set to tryBonsai or forceBonsai +// Show that we can't start a mongod with the framework control set to tryBonsaiExperimental when +// test commands are off. +TestData.enableTestCommands = false; +try { + conn = MongoRunner.runMongod( + {setParameter: {internalQueryFrameworkControl: "tryBonsaiExperimental"}}); + MongoRunner.stopMongod(conn); + assert(false, "MongoD was able to start up when it should have failed"); +} catch (_) { + // This is expected. +} + +// Show that we can't start a mongod with the framework control set to tryBonsai // when the feature flag is off. TestData.setParameters.featureFlagCommonQueryFramework = false; -let mongodStarted = false; +TestData.enableTestCommands = true; try { conn = MongoRunner.runMongod({setParameter: {internalQueryFrameworkControl: "tryBonsai"}}); MongoRunner.stopMongod(conn); - mongodStarted = true; + assert(false, "MongoD was able to start up when it should have failed"); } catch (_) { // This is expected. } -assert(!mongodStarted, "MongoD was able to start up when it should have failed"); -}()); diff --git a/jstests/noPassthrough/create_indexes_fails_if_insufficient_disk_space.js b/jstests/noPassthrough/create_indexes_fails_if_insufficient_disk_space.js new file mode 100644 index 0000000000000..020b090156237 --- /dev/null +++ b/jstests/noPassthrough/create_indexes_fails_if_insufficient_disk_space.js @@ -0,0 +1,35 @@ +/** + * Ensures that a createIndexes command request fails when the available disk space is below the + * indexBuildMinAvailableDiskSpaceMB threshold. + * @tags: [ + * requires_fcv_71, + * requires_replication, + * ] + */ + +(function() { +"use strict"; + +load('jstests/libs/fail_point_util.js'); + +const rst = new ReplSetTest({nodes: 1}); +rst.startSet(); +rst.initiate(); + +const primary = rst.getPrimary(); +const primaryDB = primary.getDB('test'); +const primaryColl = primaryDB.getCollection('test'); + +const simulateDiskSpaceFp = + configureFailPoint(primaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024}); + +// Empty collections do not start index builds, and should succeed. +assert.commandWorked(primaryColl.createIndex({b: 1})); + +// Populate collection. +assert.commandWorked(primaryColl.insert({a: 1})); + +// Index build should fail to start. +assert.commandFailedWithCode(primaryColl.createIndex({a: 1}), [ErrorCodes.OutOfDiskSpace]); +rst.stopSet(); +})(); diff --git a/jstests/noPassthrough/crud_timestamps.js b/jstests/noPassthrough/crud_timestamps.js index eb80ef4746308..1bf018b9101a5 100644 --- a/jstests/noPassthrough/crud_timestamps.js +++ b/jstests/noPassthrough/crud_timestamps.js @@ -16,18 +16,10 @@ rst.initiate(); const testDB = rst.getPrimary().getDB(dbName); const coll = testDB.getCollection(collName); -// Determine whether deletes are batched. -const ret = rst.getPrimary().adminCommand({getParameter: 1, featureFlagBatchMultiDeletes: 1}); -assert(ret.ok || (!ret.ok && ret.errmsg === "no option found to get")); -const batchedDeletesEnabled = ret.ok ? ret.featureFlagBatchMultiDeletes.value : false; -if (batchedDeletesEnabled) { - // For consistent results, generate a single delete (applyOps) batch. - assert.commandWorked( - testDB.adminCommand({setParameter: 1, batchedDeletesTargetBatchTimeMS: 0})); - assert.commandWorked( - testDB.adminCommand({setParameter: 1, batchedDeletesTargetStagedDocBytes: 0})); - assert.commandWorked(testDB.adminCommand({setParameter: 1, batchedDeletesTargetBatchDocs: 0})); -} +// For consistent results, generate a single delete (applyOps) batch. +assert.commandWorked(testDB.adminCommand({setParameter: 1, batchedDeletesTargetBatchTimeMS: 0})); +assert.commandWorked(testDB.adminCommand({setParameter: 1, batchedDeletesTargetStagedDocBytes: 0})); +assert.commandWorked(testDB.adminCommand({setParameter: 1, batchedDeletesTargetBatchDocs: 0})); if (!testDB.serverStatus().storageEngine.supportsSnapshotReadConcern) { rst.stopSet(); @@ -112,21 +104,9 @@ request = { assert.commandWorked(coll.runCommand(request)); -if (batchedDeletesEnabled) { - const applyOps = oplog.findOne({op: 'c', ns: 'admin.$cmd', 'o.applyOps.op': 'd'}); - const ts = applyOps['ts']; - check(ts, []); -} else { - ts1 = oplog.findOne({op: 'd', o: {_id: 1}}).ts; - ts2 = oplog.findOne({op: 'd', o: {_id: 2}}).ts; - const ts3 = oplog.findOne({op: 'd', o: {_id: 3}}).ts; - const ts4 = oplog.findOne({op: 'd', o: {_id: 4}}).ts; - - check(ts1, [{_id: 2}, {_id: 3, a: 4}, {_id: 4, a: 5}]); - check(ts2, [{_id: 3, a: 4}, {_id: 4, a: 5}]); - check(ts3, [{_id: 4, a: 5}]); - check(ts4, []); -} +const applyOps = oplog.findOne({op: 'c', ns: 'admin.$cmd', 'o.applyOps.op': 'd'}); +const ts = applyOps['ts']; +check(ts, []); session.endSession(); rst.stopSet(); diff --git a/jstests/noPassthrough/currentop_query.js b/jstests/noPassthrough/currentop_query.js index a3244f663437f..88b6b58e87aed 100644 --- a/jstests/noPassthrough/currentop_query.js +++ b/jstests/noPassthrough/currentop_query.js @@ -6,10 +6,7 @@ * requires_sharding, * ] */ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; // This test runs manual getMores using different connections, which will not inherit the // implicit session of the cursor establishing command. @@ -619,5 +616,4 @@ for (let connType of [rsConn, mongosConn]) { } } -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/noPassthrough/currentop_target_all_nodes.js b/jstests/noPassthrough/currentop_target_all_nodes.js new file mode 100644 index 0000000000000..5a8168e34b074 --- /dev/null +++ b/jstests/noPassthrough/currentop_target_all_nodes.js @@ -0,0 +1,32 @@ +// Tests that the $currentOp works as expected when run with the targetAllNodes option turned on and +// off. +// +// @tags: [ +// requires_fcv_71, +// ] +(function() { +"use strict"; + +const shardCount = 2; +const rsNodesPerShardCount = 2; +const st = new ShardingTest({shards: shardCount, rs: {nodes: rsNodesPerShardCount}}); +const clusterAdminDB = st.s.getDB("admin"); + +function runCurrentOpAgg(shouldTargetAllNodes) { + return clusterAdminDB.aggregate( + [ + {$currentOp: {targetAllNodes: shouldTargetAllNodes}}, + {$match: {"command.comment": "issuing a currentOp with targetAllNodes"}} + ], + {comment: "issuing a currentOp with targetAllNodes"}); +} + +const targetAllNodesFalse = runCurrentOpAgg(false); +assert.eq(shardCount, targetAllNodesFalse.itcount(), tojson(targetAllNodesFalse)); + +const targetAllNodesTrue = runCurrentOpAgg(true); +assert.eq( + shardCount * rsNodesPerShardCount, targetAllNodesTrue.itcount(), tojson(targetAllNodesTrue)); + +st.stop(); +}()); diff --git a/jstests/noPassthrough/dbcheck_detects_data_corruption.js b/jstests/noPassthrough/dbcheck_detects_data_corruption.js new file mode 100644 index 0000000000000..616f07aae47ac --- /dev/null +++ b/jstests/noPassthrough/dbcheck_detects_data_corruption.js @@ -0,0 +1,63 @@ +/** + * This tests that errors are logged when dbCheck finds evidence of corruption, but does not cause + * the operation to fail. + */ +(function() { + +const replSet = new ReplSetTest({nodes: 2}); +replSet.startSet(); +replSet.initiate(); + +const primary = replSet.getPrimary(); +const secondary = replSet.getSecondary(); + +const db = primary.getDB('test'); +const collName = 'coll'; +const coll = db[collName]; + +assert.commandWorked(coll.insert({_id: 0, a: "first"})); + +// Create the same type of corruption on both nodes. +assert.commandWorked(db.adminCommand({ + configureFailPoint: "skipUnindexingDocumentWhenDeleted", + mode: "alwaysOn", + data: {indexName: "_id_"} +})); +assert.commandWorked(secondary.getDB('admin').runCommand({ + configureFailPoint: "skipUnindexingDocumentWhenDeleted", + mode: "alwaysOn", + data: {indexName: "_id_"} +})); + +const docId = 1; +assert.commandWorked(coll.insert({_id: docId, a: "second"})); +assert.commandWorked(coll.remove({_id: docId})); + +// Validate should detect this inconsistency. +let res = coll.validate(); +assert.commandWorked(res); +assert(!res.valid, res); + +assert.commandWorked(db.runCommand({"dbCheck": 1})); + +// Wait for both nodes to finish checking. +[primary, secondary].forEach((node) => { + print("waiting for node to finish: " + tojson(node)); + const healthlog = node.getDB('local').system.healthlog; + assert.soon(() => healthlog.find({operation: "dbCheckStop"}).itcount() == 1); +}); + +[primary, secondary].forEach((node) => { + print("checking " + tojson(node)); + let entry = node.getDB('local').system.healthlog.findOne({severity: 'error'}); + assert(entry, "No healthlog entry found on " + tojson(node)); + assert.eq("Erroneous index key found with reference to non-existent record id", + entry.msg, + tojson(entry)); + + // The erroneous index key should not affect the hashes. The documents should still be the same. + assert.eq(1, node.getDB('local').system.healthlog.count({severity: 'error'})); +}); + +replSet.stopSet(undefined /* signal */, false /* forRestart */, {skipValidation: true}); +})(); diff --git a/jstests/noPassthrough/dbhash_before_ddl_op.js b/jstests/noPassthrough/dbhash_before_ddl_op.js index 6b2e2337b9266..8ba3739a2a31d 100644 --- a/jstests/noPassthrough/dbhash_before_ddl_op.js +++ b/jstests/noPassthrough/dbhash_before_ddl_op.js @@ -1,7 +1,6 @@ /** * Tests that dbHash does not throw SnapshotUnavailable when running earlier than the latest DDL - * operation for a collection in the database. When the point-in-time catalog lookups feature flag - * is disabled, SnapshotUnavailable is still thrown. + * operation for a collection in the database. * * @tags: [ * requires_replication, @@ -10,8 +9,6 @@ (function() { "use strict"; -load("jstests/libs/feature_flag_util.js"); - const replTest = new ReplSetTest({nodes: 1}); replTest.startSet(); replTest.initiate(); @@ -36,47 +33,24 @@ jsTestLog("Last insert timestamp: " + tojson(insertTS)); const renameTS = assert.commandWorked(db[jsTestName()].renameCollection("renamed")).operationTime; jsTestLog("Rename timestamp: " + tojson(renameTS)); -if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) { - // dbHash at all timestamps should work. - let res = assert.commandWorked(db.runCommand({ - dbHash: 1, - $_internalReadAtClusterTime: createTS, - })); - assert(res.collections.hasOwnProperty(jsTestName())); - - res = assert.commandWorked(db.runCommand({ - dbHash: 1, - $_internalReadAtClusterTime: insertTS, - })); - assert(res.collections.hasOwnProperty(jsTestName())); - - res = assert.commandWorked(db.runCommand({ - dbHash: 1, - $_internalReadAtClusterTime: renameTS, - })); - assert(res.collections.hasOwnProperty("renamed")); -} else { - // dbHash at the 'createTS' should throw SnapshotUnavailable due to the rename. - assert.commandFailedWithCode(db.runCommand({ - dbHash: 1, - $_internalReadAtClusterTime: createTS, - }), - ErrorCodes.SnapshotUnavailable); - - // dbHash at the 'insertTS' should throw SnapshotUnavailable due to the rename. - assert.commandFailedWithCode(db.runCommand({ - dbHash: 1, - $_internalReadAtClusterTime: insertTS, - }), - ErrorCodes.SnapshotUnavailable); - - // dbHash at 'renameTS' should work. - let res = assert.commandWorked(db.runCommand({ - dbHash: 1, - $_internalReadAtClusterTime: renameTS, - })); - assert(res.collections.hasOwnProperty("renamed")); -} +// dbHash at all timestamps should work. +let res = assert.commandWorked(db.runCommand({ + dbHash: 1, + $_internalReadAtClusterTime: createTS, +})); +assert(res.collections.hasOwnProperty(jsTestName())); + +res = assert.commandWorked(db.runCommand({ + dbHash: 1, + $_internalReadAtClusterTime: insertTS, +})); +assert(res.collections.hasOwnProperty(jsTestName())); + +res = assert.commandWorked(db.runCommand({ + dbHash: 1, + $_internalReadAtClusterTime: renameTS, +})); +assert(res.collections.hasOwnProperty("renamed")); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/dbstats_sharded_collection.js b/jstests/noPassthrough/dbstats_sharded_collection.js deleted file mode 100644 index a2ab3dde627cf..0000000000000 --- a/jstests/noPassthrough/dbstats_sharded_collection.js +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Tests that the dbStats command properly computes the stats by comparing the results from a - * sharded cluster to the summation of querying the mongod's directly. - * - * @tags: [requires_dbstats] - */ - -(function() { -"use strict"; - -// Set up cluster with 2 shards, insert a batch of documents, and configure the cluster so both -// shards have documents. -const st = new ShardingTest({shards: 2, mongos: 1}); -const dbName = "db"; -const db = st.getDB(dbName); -const collName = "foo"; -const ns = dbName + "." + collName; -const numDocs = 100; - -assert.commandWorked(st.s.adminCommand({enableSharding: dbName})); - -let primaryShard = st.getPrimaryShard(dbName); -let secondaryShard = st.getOther(primaryShard); - -let bulk = primaryShard.getCollection(ns).initializeUnorderedBulkOp(); -for (let i = 0; i < numDocs; i++) { - bulk.insert({_id: i, x: i, y: -i}); -} -assert.commandWorked(bulk.execute()); -assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}})); -assert.commandWorked(st.s.adminCommand({split: ns, middle: {_id: numDocs / 2}})); -assert.commandWorked(st.s.adminCommand( - {moveChunk: ns, find: {_id: 0}, to: secondaryShard.name, _waitForDelete: true})); - -const scale = 1024 * 1024; -let dbStats = db.runCommand({dbStats: 1, scale: scale}); -assert.commandWorked(dbStats); -jsTestLog('dbStats result on mongos: ' + tojson(dbStats)); -let shard0Stats = primaryShard.getDB(dbName).runCommand({dbStats: 1, scale: scale}); -assert.commandWorked(shard0Stats); -jsTestLog('dbStats result on primary shard ' + primaryShard.host + ': ' + tojson(shard0Stats)); -let shard1Stats = secondaryShard.getDB(dbName).runCommand({dbStats: 1, scale: scale}); -assert.commandWorked(shard1Stats); -jsTestLog('dbStats result on secondary shard ' + secondaryShard.host + ': ' + tojson(shard1Stats)); - -// Compare each of the relevant fields in dbStats to make sure the individual shards' responses sum -// to the overall cluster's value. -let total = shard0Stats.collections + shard1Stats.collections; -assert.eq(dbStats.collections, - total, - "Sharded collection dbStats returned " + dbStats.collections + - " collections total, but sum of individual shards' responses returned " + total + - " collections total"); - -total = shard0Stats.views + shard1Stats.views; -assert.eq(dbStats.views, - total, - "Sharded collection dbStats returned " + dbStats.views + - " views total, but sum of individual shards' responses returned " + total + - " views total"); - -total = shard0Stats.objects + shard1Stats.objects; -assert.eq(dbStats.objects, - total, - "Sharded collection dbStats returned " + dbStats.objects + - " objects total, but sum of individual shards' responses returned " + total + - " objects total"); - -total = shard0Stats.dataSize + shard1Stats.dataSize; -assert.eq(dbStats.dataSize, - total, - "Sharded collection dbStats returned " + dbStats.dataSize + - " dataSize total, but sum of individual shards' responses returned " + total + - " dataSize total"); - -total = shard0Stats.storageSize + shard1Stats.storageSize; -assert.eq(dbStats.storageSize, - total, - "Sharded collection dbStats returned " + dbStats.storageSize + - " storageSize total, but sum of individual shards' responses returned " + total + - " storageSize total"); - -total = shard0Stats.indexes + shard1Stats.indexes; -assert.eq(dbStats.indexes, - total, - "Sharded collection dbStats returned " + dbStats.indexes + - " indexes total, but sum of individual shards' responses returned " + total + - " indexes total"); - -total = shard0Stats.indexSize + shard1Stats.indexSize; -assert.eq(dbStats.indexSize, - total, - "Sharded collection dbStats returned " + dbStats.indexSize + - " indexSize total, but sum of individual shards' responses returned " + total + - " indexSize total"); - -total = shard0Stats.totalSize + shard1Stats.totalSize; -assert.eq(dbStats.totalSize, - total, - "Sharded collection dbStats returned " + dbStats.totalSize + - " totalSize total, but sum of individual shards' responses returned " + total + - " totalSize total"); - -st.stop(); -})(); \ No newline at end of file diff --git a/jstests/noPassthrough/dedicated_to_catalog_shard.js b/jstests/noPassthrough/dedicated_to_catalog_shard.js index 94107ac52ac22..4cd77cb921d49 100644 --- a/jstests/noPassthrough/dedicated_to_catalog_shard.js +++ b/jstests/noPassthrough/dedicated_to_catalog_shard.js @@ -1,17 +1,12 @@ /** - * Tests catalog shard topology. + * Tests config shard topology. * * @tags: [ * requires_fcv_70, - * featureFlagCatalogShard, * featureFlagTransitionToCatalogShard, * ] */ -// TODO (SERVER-74534): Enable the metadata consistency check when it will work with co-located -// configsvr. -TestData.skipCheckMetadataConsistency = true; - (function() { "use strict"; @@ -52,17 +47,17 @@ const configCS = st.configRS.getURL(); } // -// Catalog shard mode tests (post addShard). +// Config shard mode tests (post addShard). // { // // Adding the config server as a shard works. // - assert.commandWorked(st.s.adminCommand({transitionToCatalogShard: 1})); + assert.commandWorked(st.s.adminCommand({transitionFromDedicatedConfigServer: 1})); // More than once works. - assert.commandWorked(st.s.adminCommand({transitionToCatalogShard: 1})); - assert.commandWorked(st.s.adminCommand({transitionToCatalogShard: 1})); + assert.commandWorked(st.s.adminCommand({transitionFromDedicatedConfigServer: 1})); + assert.commandWorked(st.s.adminCommand({transitionFromDedicatedConfigServer: 1})); // Flushing routing / db cache updates works. flushRoutingAndDBCacheUpdates(st.configRS.getPrimary()); diff --git a/jstests/noPassthrough/devnull.js b/jstests/noPassthrough/devnull.js index 5d3fa5e1c5f75..103b49f4b5691 100644 --- a/jstests/noPassthrough/devnull.js +++ b/jstests/noPassthrough/devnull.js @@ -10,7 +10,7 @@ assert(logContents.indexOf("enableMajorityReadConcern:false is no longer support const emrcDefaultConn = MongoRunner.runMongod({storageEngine: "devnull"}); db = emrcDefaultConn.getDB("test"); -res = db.foo.insert({x: 1}); +let res = db.foo.insert({x: 1}); assert.eq(1, res.nInserted, tojson(res)); // Skip collection validation during stopMongod if invalid storage engine. diff --git a/jstests/noPassthrough/disabled_cluster_server_parameters.js b/jstests/noPassthrough/disabled_cluster_server_parameters.js index ca88787627e00..fa470b6ffa61c 100644 --- a/jstests/noPassthrough/disabled_cluster_server_parameters.js +++ b/jstests/noPassthrough/disabled_cluster_server_parameters.js @@ -5,13 +5,18 @@ * @tags: [ * does_not_support_stepdowns, * requires_replication, - * requires_sharding + * requires_sharding, + * # Tests running with experimental CQF behavior require test commands to be enabled. + * cqf_experimental_incompatible, * ] */ -(function() { -'use strict'; - -load('jstests/libs/cluster_server_parameter_utils.js'); +import { + setupNode, + setupReplicaSet, + setupSharded, + testDisabledClusterParameters +} from "jstests/libs/cluster_server_parameter_utils.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; // Verifies that test-only parameters are disabled and excluded when enableTestCommands is false. TestData.enableTestCommands = false; @@ -54,4 +59,3 @@ setupSharded(st); // Check that the same behavior for disabled cluster server parameters holds on sharded clusters. testDisabledClusterParameters(st); st.stop(); -}()); diff --git a/jstests/noPassthrough/disabled_test_parameters.js b/jstests/noPassthrough/disabled_test_parameters.js index 0f71810db035f..3ac30bb5da2fc 100644 --- a/jstests/noPassthrough/disabled_test_parameters.js +++ b/jstests/noPassthrough/disabled_test_parameters.js @@ -1,4 +1,6 @@ // Test that test-only set parameters are disabled. +// Tests running with experimental CQF behavior require test commands to be enabled. +// @tags: [cqf_experimental_incompatible] (function() { 'use strict'; diff --git a/jstests/noPassthrough/drop_config_db.js b/jstests/noPassthrough/drop_config_db.js index c629d41d7a9ca..4f44b5a90990f 100644 --- a/jstests/noPassthrough/drop_config_db.js +++ b/jstests/noPassthrough/drop_config_db.js @@ -1,5 +1,7 @@ /* * Test that dropping the config DB does not crash the server. + * Tests running with experimental CQF behavior require test commands to be enabled. + * @tags: [cqf_experimental_incompatible] */ (function() { "use strict"; diff --git a/jstests/noPassthrough/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js b/jstests/noPassthrough/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js index 20d4b93049b18..962ebd55da0f1 100644 --- a/jstests/noPassthrough/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js +++ b/jstests/noPassthrough/drop_oplog_should_fail_if_storage_engine_supports_replSetResizeOplog_command.js @@ -37,4 +37,4 @@ if (storageEngineIsWiredTiger()) { } MongoRunner.stopMongod(primary); -}()); \ No newline at end of file +}()); diff --git a/jstests/noPassthrough/drop_pending_retry.js b/jstests/noPassthrough/drop_pending_retry.js index f726345d405fb..e6d8254d887c2 100644 --- a/jstests/noPassthrough/drop_pending_retry.js +++ b/jstests/noPassthrough/drop_pending_retry.js @@ -7,10 +7,7 @@ * requires_wiredtiger * ] */ -(function() { -"use strict"; - -load("jstests/disk/libs/wt_file_helper.js"); +import {getUriForColl, getUriForIndex} from "jstests/disk/libs/wt_file_helper.js"; const rst = new ReplSetTest({ nodes: 1, @@ -18,6 +15,7 @@ const rst = new ReplSetTest({ setParameter: { // Set the history window to zero to explicitly control the oldest timestamp. minSnapshotHistoryWindowInSeconds: 0, + logComponentVerbosity: tojson({storage: 1}), } } }); @@ -51,20 +49,25 @@ assert.commandWorked(db.getCollection("toWrite").insert({x: 1})); // Take a checkpoint to advance the checkpoint timestamp. assert.commandWorked(db.adminCommand({fsync: 1})); -// Tests that the table drops are retried each time the drop pending reaper runs until they succeed. -// We wait for 5 retries here. 5 for the collection table and 5 for the index table. -checkLog.containsWithAtLeastCount(primary, "Drop-pending ident is still in use", 2 * 5); +// Tests that the table drops are retried when the drop pending reaper runs. Once for the collection +// and once for the index. +checkLog.containsWithAtLeastCount(primary, "Drop-pending ident is still in use", 2); // Let the table drops succeed. assert.commandWorked(primary.adminCommand({configureFailPoint: "WTDropEBUSY", mode: "off"})); -// Completing drop for ident -checkLog.containsJson(primary, 22237, { +// Perform another write and another checkpoint to advance the checkpoint timestamp, triggering +// the reaper. +assert.commandWorked(db.getCollection("toWrite").insert({x: 1})); +assert.commandWorked(db.adminCommand({fsync: 1})); + +// "The ident was successfully dropped". +checkLog.containsJson(primary, 6776600, { ident: function(ident) { return ident == collUri; } }); -checkLog.containsJson(primary, 22237, { +checkLog.containsJson(primary, 6776600, { ident: function(ident) { return ident == indexUri; } @@ -73,5 +76,4 @@ checkLog.containsJson(primary, 22237, { assert.commandWorked( primary.adminCommand({configureFailPoint: "pauseCheckpointThread", mode: "off"})); -rst.stopSet(); -}()); \ No newline at end of file +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/drop_unfinished_replicated_index_build_in_standalone.js b/jstests/noPassthrough/drop_unfinished_replicated_index_build_in_standalone.js index f4e9728865b88..c3cb0aba1b24c 100644 --- a/jstests/noPassthrough/drop_unfinished_replicated_index_build_in_standalone.js +++ b/jstests/noPassthrough/drop_unfinished_replicated_index_build_in_standalone.js @@ -7,10 +7,6 @@ * requires_replication, * ] */ -(function() { -'use strict'; - -load('jstests/disk/libs/wt_file_helper.js'); load('jstests/noPassthrough/libs/index_build.js'); const dbName = jsTestName(); @@ -70,5 +66,4 @@ jsTestLog("Dropping database from secondary"); assert.commandWorked(secondaryDB.dropDatabase()); MongoRunner.stopMongod(mongod); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/durable_history_index_usage.js b/jstests/noPassthrough/durable_history_index_usage.js index ae3be04fd14dd..d84ac707ba530 100644 --- a/jstests/noPassthrough/durable_history_index_usage.js +++ b/jstests/noPassthrough/durable_history_index_usage.js @@ -6,12 +6,8 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/libs/fail_point_util.js"); load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/feature_flag_util.js"); const replTest = new ReplSetTest({ nodes: 1, @@ -68,9 +64,6 @@ const findWithIndex = function(atClusterTime, expectedErrCode) { } }; -const pointInTimeCatalogLookupsAreEnabled = - FeatureFlagUtil.isEnabled(testDB(), "PointInTimeCatalogLookups"); - const oldestTS = insert({a: 0}); jsTestLog("Oldest timestamp: " + tojson(oldestTS)); @@ -130,8 +123,7 @@ checkLogs(); // The index is being re-created. -// When the PointInTimeCatalogLookups feature flag is enabled, it's possible to read prior to the -// most recent DDL operation for the collection. +// It's possible to read prior to the most recent DDL operation for the collection. // // At oldestTs, the index did not exist, so queries for the index at that timestamp will return // BadValue. @@ -144,16 +136,12 @@ checkLogs(); // // Etc. // -// Generally speaking when the PointInTimeCatalogLookups feature flag is enabled, find queries -// should all return the result one would expect based on the state of the catalog at that point in -// time. When the feature flag is disabled, these find queries will instead return +// Find queries should all return the result one would expect based on the state of the catalog at +// that point in time. When the feature flag is disabled, these find queries will instead return // SnapshotUnavailable. -findWithIndex( - oldestTS, - pointInTimeCatalogLookupsAreEnabled ? ErrorCodes.BadValue : ErrorCodes.SnapshotUnavailable); -findWithIndex(createIndexTS, - pointInTimeCatalogLookupsAreEnabled ? null : ErrorCodes.SnapshotUnavailable); +findWithIndex(oldestTS, ErrorCodes.BadValue); +findWithIndex(createIndexTS, null); findWithIndex(preIndexCommitTS, ErrorCodes.BadValue); findWithIndex(undefined, ErrorCodes.BadValue); @@ -183,14 +171,9 @@ checkLog.containsJson(primary(), 20663, { }); IndexBuildTest.assertIndexes(coll(), 2, ["_id_", "a_1"]); -findWithIndex( - oldestTS, - pointInTimeCatalogLookupsAreEnabled ? ErrorCodes.BadValue : ErrorCodes.SnapshotUnavailable); -findWithIndex(createIndexTS, - pointInTimeCatalogLookupsAreEnabled ? null : ErrorCodes.SnapshotUnavailable); -findWithIndex( - preIndexCommitTS, - pointInTimeCatalogLookupsAreEnabled ? ErrorCodes.BadValue : ErrorCodes.SnapshotUnavailable); +findWithIndex(oldestTS, ErrorCodes.BadValue); +findWithIndex(createIndexTS, null); +findWithIndex(preIndexCommitTS, ErrorCodes.BadValue); findWithIndex(restartInsertTS, ErrorCodes.BadValue); assert.eq(3, findWithIndex(undefined)["cursor"]["firstBatch"].length); @@ -208,44 +191,34 @@ const insertAfterRestartAfterIndexBuild = insert({a: 4}); assert.eq(5, findWithIndex(insertAfterRestartAfterIndexBuild)["cursor"]["firstBatch"].length); assert.eq(5, findWithIndex(undefined)["cursor"]["firstBatch"].length); -findWithIndex( - oldestTS, - pointInTimeCatalogLookupsAreEnabled ? ErrorCodes.BadValue : ErrorCodes.SnapshotUnavailable); -findWithIndex(createIndexTS, - pointInTimeCatalogLookupsAreEnabled ? null : ErrorCodes.SnapshotUnavailable); -findWithIndex( - preIndexCommitTS, - pointInTimeCatalogLookupsAreEnabled ? ErrorCodes.BadValue : ErrorCodes.SnapshotUnavailable); -findWithIndex( - restartInsertTS, - pointInTimeCatalogLookupsAreEnabled ? ErrorCodes.BadValue : ErrorCodes.SnapshotUnavailable); +findWithIndex(oldestTS, ErrorCodes.BadValue); +findWithIndex(createIndexTS, null); +findWithIndex(preIndexCommitTS, ErrorCodes.BadValue); +findWithIndex(restartInsertTS, ErrorCodes.BadValue); + +assert.eq(4, findWithIndex(insertAfterIndexBuildTS)["cursor"]["firstBatch"].length); + +// Drop the index and demonstrate the durable history can be used across a restart for reads with +// times prior to the drop. +const dropIndexTS = assert.commandWorked(coll().dropIndex(indexSpec)).operationTime; +jsTestLog("Index drop timestamp: " + tojson(dropIndexTS)); + +// Take a checkpoint to persist the new catalog entry of the index being rebuilt. +assert.commandWorked(testDB().adminCommand({fsync: 1})); + +replTest.stop(0, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL}, {forRestart: true}); +replTest.start( + 0, + { + setParameter: { + // To control durable history more predictably, disable the checkpoint thread. + syncdelay: 0 + } + }, + true /* restart */); +// Test that we can read using the dropped index on timestamps before the drop assert.eq(4, findWithIndex(insertAfterIndexBuildTS)["cursor"]["firstBatch"].length); +assert.eq(5, findWithIndex(insertAfterRestartAfterIndexBuild)["cursor"]["firstBatch"].length); -if (pointInTimeCatalogLookupsAreEnabled) { - // Drop the index and demonstrate the durable history can be used across a restart for reads - // with times prior to the drop. - const dropIndexTS = assert.commandWorked(coll().dropIndex(indexSpec)).operationTime; - jsTestLog("Index drop timestamp: " + tojson(dropIndexTS)); - - // Take a checkpoint to persist the new catalog entry of the index being rebuilt. - assert.commandWorked(testDB().adminCommand({fsync: 1})); - - replTest.stop(0, 9, {allowedExitCode: MongoRunner.EXIT_SIGKILL}, {forRestart: true}); - replTest.start( - 0, - { - setParameter: { - // To control durable history more predictably, disable the checkpoint thread. - syncdelay: 0 - } - }, - true /* restart */); - - // Test that we can read using the dropped index on timestamps before the drop - assert.eq(4, findWithIndex(insertAfterIndexBuildTS)["cursor"]["firstBatch"].length); - assert.eq(5, findWithIndex(insertAfterRestartAfterIndexBuild)["cursor"]["firstBatch"].length); -} - -replTest.stopSet(); -})(); +replTest.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/ensure_size_storer_flushes_periodically.js b/jstests/noPassthrough/ensure_size_storer_flushes_periodically.js index 65826603346b2..ca3c2eb7b35b0 100644 --- a/jstests/noPassthrough/ensure_size_storer_flushes_periodically.js +++ b/jstests/noPassthrough/ensure_size_storer_flushes_periodically.js @@ -71,4 +71,4 @@ assert.gte(testColl.count(), "Fast count should still be 100 + 1 after crash. Fast count: " + testColl.count()); MongoRunner.stopMongod(conn); -}()); \ No newline at end of file +}()); diff --git a/jstests/noPassthrough/explain_execution_time_in_nanoseconds.js b/jstests/noPassthrough/explain_execution_time_in_nanoseconds.js index 3fd8028b2e8bf..2ee2978d38463 100644 --- a/jstests/noPassthrough/explain_execution_time_in_nanoseconds.js +++ b/jstests/noPassthrough/explain_execution_time_in_nanoseconds.js @@ -1,10 +1,7 @@ // When running explain commands with "executionStats" verbosity, checks that the explain output // includes "executionTimeMicros"/"executionTimeNanos" only if requested. // "executionTimeMillisEstimate" will always be present in the explain output. -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAllPlanStages(). +import {getAllPlanStages} from "jstests/libs/analyze_plan.js"; let conn = MongoRunner.runMongod({}); assert.neq(conn, null, "mongod failed to start up"); @@ -85,5 +82,4 @@ for (let executionStage of executionStages) { } } -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/explain_group_stage_exec_stats.js b/jstests/noPassthrough/explain_group_stage_exec_stats.js index 8a8774f694a5a..4526992c036c5 100644 --- a/jstests/noPassthrough/explain_group_stage_exec_stats.js +++ b/jstests/noPassthrough/explain_group_stage_exec_stats.js @@ -2,11 +2,8 @@ * Tests that $group stage reports memory footprint per accumulator when explain is run with * verbosities "executionStats" and "allPlansExecution". */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAggPlanStage(). -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod(); const testDB = conn.getDB('test'); @@ -19,7 +16,7 @@ if (checkSBEEnabled(testDB)) { // spilling behavior of the classic DocumentSourceGroup stage. jsTest.log("Skipping test since SBE $group pushdown has different memory tracking behavior"); MongoRunner.stopMongod(conn); - return; + quit(); } const bigStr = Array(1025).toString(); // 1KB of ',' @@ -150,4 +147,3 @@ groupStages = getAggPlanStage( checkGroupStages(groupStages, {}, false, 0); MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/explain_output_truncation.js b/jstests/noPassthrough/explain_output_truncation.js index 563bbaddbda2a..c7a89bc60f9e2 100644 --- a/jstests/noPassthrough/explain_output_truncation.js +++ b/jstests/noPassthrough/explain_output_truncation.js @@ -1,10 +1,7 @@ /** * Test that explain output is correctly truncated when it grows too large. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getPlanStage, getWinningPlan, planHasStage} from "jstests/libs/analyze_plan.js"; const dbName = "test"; const collName = jsTestName(); @@ -49,5 +46,4 @@ assert.eq( fetchStage.inputStage.warning, "stats tree exceeded BSON size limit for explain", explain); assert(!planHasStage(testDb, explain, "IXSCAN"), explain); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/explain_sort_exec_stats.js b/jstests/noPassthrough/explain_sort_exec_stats.js index f21e82c9d28e6..de7fbb3f3c1f6 100644 --- a/jstests/noPassthrough/explain_sort_exec_stats.js +++ b/jstests/noPassthrough/explain_sort_exec_stats.js @@ -2,11 +2,8 @@ * Tests that $sort stage reports the correct stats when explain is run with * different verbosities. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages(). -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod(); const db = conn.getDB("test"); @@ -90,4 +87,3 @@ pipelines.forEach(function(pipeline) { }); MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/explain_unionwith_lookup_sharded.js b/jstests/noPassthrough/explain_unionwith_lookup_sharded.js index ef4b4a42bec66..4268143845ab5 100644 --- a/jstests/noPassthrough/explain_unionwith_lookup_sharded.js +++ b/jstests/noPassthrough/explain_unionwith_lookup_sharded.js @@ -4,10 +4,7 @@ * * This test was originally designed to reproduce SERVER-71636. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getAggPlanStage, getAggPlanStages} from "jstests/libs/analyze_plan.js"; const dbName = "test"; @@ -212,5 +209,4 @@ stageExplain = getStageFromMergerPart(explain); assert(stageExplain.hasOwnProperty("$unionWith"), explain); assertStageDoesNotHaveRuntimeStats(stageExplain); -st.stop(); -}()); +st.stop(); \ No newline at end of file diff --git a/jstests/noPassthrough/external_data_source.js b/jstests/noPassthrough/external_data_source.js index b366fc7739063..5b24adbb939ed 100644 --- a/jstests/noPassthrough/external_data_source.js +++ b/jstests/noPassthrough/external_data_source.js @@ -7,10 +7,7 @@ * requires_external_data_source * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // for aggPlanHasStage() +import {aggPlanHasStage} from "jstests/libs/analyze_plan.js"; // Runs tests on a standalone mongod. let conn = MongoRunner.runMongod({setParameter: {enableComputeMode: true}}); @@ -160,6 +157,17 @@ assert.throwsWithCode(() => { }); })(); +(function testCollectionlessAgg() { + const docs = [{a: 1}, {a: 2}, {a: 3}]; + assert.sameMembers(docs, db.aggregate([{$documents: docs}]).toArray()); +})(); + +(function testCollectionlessAggWithExternalDataSources() { + assert.throwsWithCode(() => { + db.aggregate([{$documents: [{a: 1}]}], {$_externalDataSources: []}); + }, 7604400); +})(); + // // Named Pipes success test cases follow. // @@ -491,5 +499,4 @@ if (hostInfo.os.type != "Windows") { return !runningStatus.alive && runningStatus.exitCode != 0; }, "Expected mongod died due to an error", 120 * 1000); })(); -} -})(); +} \ No newline at end of file diff --git a/jstests/noPassthrough/external_sort_find.js b/jstests/noPassthrough/external_sort_find.js index a1505f129a392..0b5ad6f9d0682 100644 --- a/jstests/noPassthrough/external_sort_find.js +++ b/jstests/noPassthrough/external_sort_find.js @@ -1,11 +1,8 @@ /** * Test that the find command can spill to disk while executing a blocking sort. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getAggPlanStage, getPlanStage} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; // Only allow blocking sort execution to use 100 kB of memory. const kMaxMemoryUsageBytes = 100 * 1024; @@ -167,4 +164,3 @@ assert.eq(aggregationExternalSortStatsForPipeline.spills, aggregationExternalSortStatsForPipeline); MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/fle2_shardsvr_cleanup.js b/jstests/noPassthrough/fle2_shardsvr_cleanup.js new file mode 100644 index 0000000000000..0d8b965dc2837 --- /dev/null +++ b/jstests/noPassthrough/fle2_shardsvr_cleanup.js @@ -0,0 +1,36 @@ +/** + * Cannot run cleanup against a shard server + * + * @tags: [ + * requires_fcv_70, + * requires_sharding, + * featureFlagFLE2CleanupCommand + * ] + */ +import {EncryptedClient} from "jstests/fle2/libs/encrypted_client_util.js"; + +function runTest(mongosConn, shardConn) { + let dbName = 'testdb'; + + let clientMongoS = new EncryptedClient(mongosConn, dbName); + + assert.commandWorked(clientMongoS.createEncryptionCollection("basic", { + encryptedFields: { + "fields": + [{"path": "first", "bsonType": "string", "queries": {"queryType": "equality"}}] + } + })); + + let clientShard = new EncryptedClient(shardConn, dbName); + + assert.commandFailedWithCode(clientShard.getDB().basic.cleanup(), 7618804); +} + +jsTestLog("Sharding: Testing fle2 cleanup not allowed against a shard server"); +{ + const st = new ShardingTest({shards: 1, mongos: 1, config: 1}); + + runTest(st.s, st.shard0); + + st.stop(); +} diff --git a/jstests/noPassthrough/fle2_shardsvr_compact.js b/jstests/noPassthrough/fle2_shardsvr_compact.js index 7f5aa2cc5bf8b..16bffafd7d2c6 100644 --- a/jstests/noPassthrough/fle2_shardsvr_compact.js +++ b/jstests/noPassthrough/fle2_shardsvr_compact.js @@ -6,10 +6,7 @@ * requires_sharding * ] */ -load("jstests/fle2/libs/encrypted_client_util.js"); - -(function() { -'use strict'; +import {EncryptedClient} from "jstests/fle2/libs/encrypted_client_util.js"; function runTest(mongosConn, shardConn) { let dbName = 'testdb'; @@ -36,4 +33,3 @@ jsTestLog("Sharding: Testing fle2 drop collection warning"); st.stop(); } -}()); diff --git a/jstests/noPassthrough/ftdc_connection_reuse.js b/jstests/noPassthrough/ftdc_connection_reuse.js index 242afee00a077..4d490870d67d5 100644 --- a/jstests/noPassthrough/ftdc_connection_reuse.js +++ b/jstests/noPassthrough/ftdc_connection_reuse.js @@ -14,7 +14,7 @@ load("jstests/libs/parallelTester.js"); const ftdcPath = MongoRunner.toRealPath('ftdc'); const st = new ShardingTest({ - shards: 1, + shards: {rs0: {nodes: 1}}, mongos: { s0: {setParameter: {diagnosticDataCollectionDirectoryPath: ftdcPath}}, } @@ -27,11 +27,11 @@ const testDB = st.s.getDB(kDbName); const coll = testDB.getCollection(kCollName); function getDiagnosticData() { + let stats; assert.soon(() => { - let stats = verifyGetDiagnosticData(st.s.getDB("admin")).connPoolStats; + stats = verifyGetDiagnosticData(st.s.getDB("admin")).connPoolStats; return stats["pools"].hasOwnProperty('NetworkInterfaceTL-TaskExecutorPool-0'); }, "Failed to load NetworkInterfaceTL-TaskExecutorPool-0 in FTDC within time limit"); - const stats = verifyGetDiagnosticData(st.s.getDB("admin")).connPoolStats; assert(stats.hasOwnProperty('totalWasUsedOnce')); assert(stats.hasOwnProperty('totalConnUsageTimeMillis')); return stats["pools"]["NetworkInterfaceTL-TaskExecutorPool-0"]; @@ -74,8 +74,14 @@ function launchFinds({times, readPref, shouldFail}) { function resetPools() { const cfg = st.rs0.getPrimary().getDB('local').system.replset.findOne(); const allHosts = cfg.members.map(x => x.host); - assert.commandWorked(st.s.adminCommand({dropConnections: 1, hostAndPort: allHosts})); + // FTDC data is collected periodically. Check that the data returned reflects that the pools + // have been dropped before resuming testing. + assert.soon(() => { + const stats = getDiagnosticData(); + // The shard has a single node in its replica set. + return !stats.hasOwnProperty(allHosts[0]); + }, "Failed to wait for pool stats to reflect dropped pools"); } [1, 2, 3].forEach(v => assert.commandWorked(coll.insert({x: v}))); diff --git a/jstests/noPassthrough/ftdc_mirrored_reads.js b/jstests/noPassthrough/ftdc_mirrored_reads.js index c1bb78e7dac8b..2c7eda4cb928d 100644 --- a/jstests/noPassthrough/ftdc_mirrored_reads.js +++ b/jstests/noPassthrough/ftdc_mirrored_reads.js @@ -16,26 +16,30 @@ const kCollName = "test"; const kOperations = 100; const rst = new ReplSetTest({nodes: 3}); -rst.startSet(); +// Disable mirrored reads to make sure the initialization of oplog fetcher find commands from the +// secondaries do not get included in the metrics that we are testing. +rst.startSet({ + setParameter: { + mirrorReads: tojsononeline({samplingRate: 0.0}), + logComponentVerbosity: tojson({command: 1}) + } +}); rst.initiateWithHighElectionTimeout(); const primary = rst.getPrimary(); const secondaries = rst.getSecondaries(); -function getMirroredReadsStats(node) { - return node.getDB(kDbName).serverStatus({mirroredReads: 1}).mirroredReads; -} - function getDiagnosticData(node) { let db = node.getDB('admin'); - const stats = verifyGetDiagnosticData(db).serverStatus; + const stats = verifyGetDiagnosticData(db, false /* logData */).serverStatus; assert(stats.hasOwnProperty('mirroredReads')); + jsTestLog(`Got diagnostic data for host: ${node}, ${tojson(stats.mirroredReads)}`); return stats.mirroredReads; } function getMirroredReadsProcessedAsSecondary() { let readsProcessed = 0; for (let i = 0; i < secondaries.length; i++) { - const stats = getMirroredReadsStats(secondaries[i]); + const stats = getDiagnosticData(secondaries[i]); readsProcessed += stats.processedAsSecondary; } return readsProcessed; @@ -46,7 +50,7 @@ function waitForPrimaryToSendMirroredReads(expectedReadsSeen, expectedReadsSent) jsTestLog("Verifying reads were seen and sent by the maestro"); jsTestLog("ExpectedReadsSent :" + expectedReadsSent + ", ExpectedReadsSeen:" + expectedReadsSeen); - const afterPrimaryReadStats = getMirroredReadsStats(primary); + const afterPrimaryReadStats = getDiagnosticData(primary); const actualMirrorableReadsSeen = afterPrimaryReadStats.seen; const actualMirroredReadsSent = afterPrimaryReadStats.sent; jsTestLog("Primary metrics after reads: " + tojson(afterPrimaryReadStats)); @@ -58,13 +62,14 @@ function waitForPrimaryToSendMirroredReads(expectedReadsSeen, expectedReadsSent) function sendAndCheckReads(rst) { const primary = rst.getPrimary(); // Initial metrics before sending kOperations number of finds. - const initialPrimaryReadStats = getMirroredReadsStats(primary); + const initialPrimaryReadStats = getDiagnosticData(primary); const mirrorableReadsSeenBefore = initialPrimaryReadStats.seen; const mirroredReadsSentBefore = initialPrimaryReadStats.sent; + primary.getDB(kDbName).getCollection(kCollName).insert({x: i}); jsTestLog(`Sending ${kOperations} reads to primary`); for (var i = 0; i < kOperations; ++i) { - primary.getDB(kDbName).runCommand({find: kCollName, filter: {}}); + assert.commandWorked(primary.getDB(kDbName).runCommand({find: kCollName, filter: {}})); } const expectedReadsSeen = mirrorableReadsSeenBefore + kOperations; @@ -121,8 +126,8 @@ assert.commandWorked(primary.adminCommand({setParameter: 1, mirrorReads: {sampli let primaryResolvedAfterReads = getDiagnosticData(primary).resolved; jsTestLog(`Mirrored ${primaryResolvedAfterReads} reads so far`); for (let i = 0; i < secondaries.length; i++) { - jsTestLog("Secondary " + secondaries[i] + - " metrics: " + tojson(getMirroredReadsStats(secondaries[i]))); + // Print the secondary metrics for easier debugging. + getDiagnosticData(secondaries[i]); } // There are two secondaries, so `kOperations * 2` reads must be resolved. return primaryResolvedBeforeReads + kOperations * 2 <= primaryResolvedAfterReads; diff --git a/jstests/noPassthrough/geo_near_random1.js b/jstests/noPassthrough/geo_near_random1.js index 06dcf86c819f0..37051468a5eac 100644 --- a/jstests/noPassthrough/geo_near_random1.js +++ b/jstests/noPassthrough/geo_near_random1.js @@ -1,14 +1,12 @@ // this tests all points using $near -var db; (function() { "use strict"; load("jstests/libs/geo_near_random.js"); const conn = MongoRunner.runMongod(); assert.neq(null, conn, "mongod failed to start."); -db = conn.getDB("test"); -var test = new GeoNearRandomTest("weekly.geo_near_random1"); +var test = new GeoNearRandomTest("weekly.geo_near_random1", conn.getDB("test")); test.insertPts(1000); diff --git a/jstests/noPassthrough/geo_near_random2.js b/jstests/noPassthrough/geo_near_random2.js index b5ec59af1124d..6ee97cae62424 100644 --- a/jstests/noPassthrough/geo_near_random2.js +++ b/jstests/noPassthrough/geo_near_random2.js @@ -1,14 +1,12 @@ // this tests 1% of all points using $near and $nearSphere -var db; (function() { "use strict"; load("jstests/libs/geo_near_random.js"); const conn = MongoRunner.runMongod(); assert.neq(null, conn, "mongod failed to start."); -db = conn.getDB("test"); -var test = new GeoNearRandomTest("weekly.geo_near_random2"); +var test = new GeoNearRandomTest("weekly.geo_near_random2", conn.getDB("test")); test.insertPts(50000); diff --git a/jstests/noPassthrough/global_profiling_filter.js b/jstests/noPassthrough/global_profiling_filter.js index ac3b4496550de..a6356095ab8c0 100644 --- a/jstests/noPassthrough/global_profiling_filter.js +++ b/jstests/noPassthrough/global_profiling_filter.js @@ -238,7 +238,7 @@ function runCorrectnessTests(conn) { })(); (function testGlobalFilterUnsetOverridesDatabaseSpecificSettings() { - result = assert.commandWorked(db.getSiblingDB("db1").runCommand( + let result = assert.commandWorked(db.getSiblingDB("db1").runCommand( {profile: isMongos ? 0 : 1, filter: profileFilter1.filter})); assert.eq(result.filter, profileFilter2.filter); result = assert.commandWorked(db.getSiblingDB("db3").runCommand( diff --git a/jstests/noPassthrough/group_spill_long_keys.js b/jstests/noPassthrough/group_spill_long_keys.js new file mode 100644 index 0000000000000..08a6abb9547ee --- /dev/null +++ b/jstests/noPassthrough/group_spill_long_keys.js @@ -0,0 +1,110 @@ +/** + * Test a $group query which has a large number of group-by fields and needs to spill to disk. + */ +import {getPlanStage} from "jstests/libs/analyze_plan.js"; + +const MEM_LIMIT_KB = 2; + +// Make sure that we can handle more than 32 keys (the maximum allowed number of components in a +// compound index). +const NUM_GROUP_KEYS = 33; + +// Run a mongod that has a reduced memory limit for when its hash aggregation operators (in both +// SBE and the Classic execution engine) will spill data to disk. +const memLimit = MEM_LIMIT_KB * 1024; +const conn = MongoRunner.runMongod({ + setParameter: { + internalQuerySlotBasedExecutionHashAggApproxMemoryUseInBytesBeforeSpill: memLimit, + internalDocumentSourceGroupMaxMemoryBytes: memLimit + } +}); +assert.neq(conn, null, "mongod failed to start up"); + +const db = conn.getDB("test"); +const coll = db.group_spill_long_keys; + +function nextFieldName(name) { + function nextChar(char) { + return String.fromCharCode(char.charCodeAt(0) + 1); + } + + function lastChar(str) { + return str[str.length - 1]; + } + + // If the final character is a "z", start using a longer string. Otherwise we cycle through all + // possibilities for the last letter. These means we generate only 26 unique names for each + // string length, but that's ok since this function will not be used to generate more than ~40 + // unique names. + if (lastChar(name) === "z") { + return "a".repeat(name.length + 1); + } else { + return name.substr(0, name.length - 1) + nextChar(lastChar(name)); + } +} + +let counter = 0; + +/** + * Generates a document with 'NUM_GROUP_KEYS' uniquely named keys. Values are increasingly large + * 64-bit integers. + */ +function generateDoc() { + let doc = {}; + let str = "a"; + for (let i = 0; i < NUM_GROUP_KEYS; ++i) { + doc[str] = NumberLong(counter); + ++counter; + str = nextFieldName(str); + } + return doc; +} + +// Calculate how many documents we need. We use 100 times the approximate number of documents that +// would cause a spill limit in order to cause the query to spill frequently. +let exampleDoc = generateDoc(); +let docSize = Object.bsonsize(exampleDoc); +let docsNeeded = Math.ceil(memLimit / docSize) * 100; + +coll.drop(); +for (let i = 0; i < docsNeeded; ++i) { + assert.commandWorked(coll.insert(generateDoc())); +} + +/** + * Generates the _id field for a $group query that aggregates on 'NUM_GROUP_KEY' unique keys. The + * returned document should look like {a: "$a", b: "$b", ...}. + */ +const groupKey = (function() { + let doc = {}; + let str = "a"; + for (let i = 0; i < NUM_GROUP_KEYS; ++i) { + doc[str] = "$" + str; + str = nextFieldName(str); + } + return doc; +}()); + +const pipeline = [{$group: {_id: groupKey}}]; + +// Run the query twice and assert that there are as many groups as documents in the collection, +// since each document has a unique group key. We run the query twice because the second time it may +// use a cached plan. +for (let i = 0; i < 2; ++i) { + assert.eq(docsNeeded, coll.aggregate(pipeline).itcount()); +} + +// Run an explain. If SBE was used, make sure that we see a "group" stage that spilled in the exec +// stats. +let explain = coll.explain("executionStats").aggregate(pipeline); +assert(explain.hasOwnProperty("explainVersion"), explain); +if (explain.explainVersion !== "1") { + let hashAgg = getPlanStage(explain.executionStats.executionStages, "group"); + // There should be a group-by slot for each field we are grouping by. + assert.eq(hashAgg.groupBySlots.length, NUM_GROUP_KEYS, hashAgg); + assert.eq(hashAgg.usedDisk, true, hashAgg); + assert.gt(hashAgg.spills, 0, hashAgg); + assert.gt(hashAgg.spilledRecords, 0, hashAgg); +} + +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/group_spill_metrics.js b/jstests/noPassthrough/group_spill_metrics.js index 89c6d6072d93f..4101cdbba62e1 100644 --- a/jstests/noPassthrough/group_spill_metrics.js +++ b/jstests/noPassthrough/group_spill_metrics.js @@ -7,11 +7,8 @@ * requires_persistence, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getAggPlanStage(). -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod(); const db = conn.getDB('test'); @@ -74,4 +71,3 @@ assert.eq( metricsAfter.spilledRecords, expectedSpilledRecords + metricsBefore.spilledRecords, pipeline); MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/hybrid_index_with_updates.js b/jstests/noPassthrough/hybrid_index_with_updates.js index 0e4641ed83a40..a7e1d1a5b440d 100644 --- a/jstests/noPassthrough/hybrid_index_with_updates.js +++ b/jstests/noPassthrough/hybrid_index_with_updates.js @@ -51,7 +51,7 @@ const collScanFailPoint = configureFailPoint( // Start the background build. let bgBuild = startParallelShell(function() { - assert.commandWorked(db.hybrid.createIndex({i: 1}, {background: true})); + assert.commandWorked(db.hybrid.createIndex({i: 1})); }, conn.port); checkLog.containsJson(conn, 20386, { diff --git a/jstests/noPassthrough/hybrid_multikey.js b/jstests/noPassthrough/hybrid_multikey.js index ba0154708f4b9..6908a63c36cc3 100644 --- a/jstests/noPassthrough/hybrid_multikey.js +++ b/jstests/noPassthrough/hybrid_multikey.js @@ -3,10 +3,7 @@ * various index types. */ load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/analyze_plan.js"); // For getWinningPlan to analyze explain() output. - -(function() { -"use strict"; +import {getWinningPlan} from "jstests/libs/analyze_plan.js"; const conn = MongoRunner.runMongod(); const dbName = 'test'; @@ -135,4 +132,3 @@ runTest({ }); MongoRunner.stopMongod(conn); -})(); diff --git a/jstests/noPassthrough/hybrid_unique_index_with_updates.js b/jstests/noPassthrough/hybrid_unique_index_with_updates.js index 3dae16551c743..2140dffd9f699 100644 --- a/jstests/noPassthrough/hybrid_unique_index_with_updates.js +++ b/jstests/noPassthrough/hybrid_unique_index_with_updates.js @@ -51,7 +51,7 @@ let setUp = function(coll) { let buildIndexInBackground = function(coll, expectDuplicateKeyError) { const createIndexFunction = function(collFullName) { const coll = db.getMongo().getCollection(collFullName); - return coll.createIndex({i: 1}, {background: true, unique: true}); + return coll.createIndex({i: 1}, {unique: true}); }; const assertFunction = expectDuplicateKeyError ? function(collFullName) { assert.commandFailedWithCode(createIndexFunction(collFullName), ErrorCodes.DuplicateKey); diff --git a/jstests/noPassthrough/index_abort_before_commit_signal.js b/jstests/noPassthrough/index_abort_before_commit_signal.js index 61e2a7ee7a040..927bb3bce18a9 100644 --- a/jstests/noPassthrough/index_abort_before_commit_signal.js +++ b/jstests/noPassthrough/index_abort_before_commit_signal.js @@ -60,7 +60,7 @@ createIndex(); jsTestLog('Waiting for index build to complete'); IndexBuildTest.waitForIndexBuildToStop(testDB, coll.getName(), 'a_1'); -IndexBuildTest.assertIndexes(coll, 1, ['_id_']); +IndexBuildTest.assertIndexesSoon(coll, 1, ['_id_']); rst.stopSet(); })(); diff --git a/jstests/noPassthrough/index_abort_stepdown_prepare.js b/jstests/noPassthrough/index_abort_stepdown_prepare.js index 061ac4055b355..c8a98d015ff9d 100644 --- a/jstests/noPassthrough/index_abort_stepdown_prepare.js +++ b/jstests/noPassthrough/index_abort_stepdown_prepare.js @@ -105,7 +105,7 @@ assert.commandWorked(session.abortTransaction_forTesting()); jsTestLog("Waiting for index build to complete"); IndexBuildTest.waitForIndexBuildToStop(primaryDB, primaryColl.getName(), indexName); -IndexBuildTest.assertIndexes(primaryColl, 2, ["_id_", indexName]); +IndexBuildTest.assertIndexesSoon(primaryColl, 2, ["_id_", indexName]); rst.stopSet(); })(); diff --git a/jstests/noPassthrough/index_build_aborted_on_fcv_downgrade.js b/jstests/noPassthrough/index_build_aborted_on_fcv_downgrade.js new file mode 100644 index 0000000000000..a5612f9960443 --- /dev/null +++ b/jstests/noPassthrough/index_build_aborted_on_fcv_downgrade.js @@ -0,0 +1,133 @@ +/** + * Ensures that index builds are aborted when setFCV causes an FCV downgrade, and that during that + * period new index builds are blocked. + * + * TODO (SERVER-68290): remove test when removing index build abort on FCV downgrade and reintroduce + * "jstests/noPassthrough/index_downgrade_fcv.js". + * + * @tags: [ + * requires_fcv_71, + * requires_replication, + * ] + */ +(function() { +"use strict"; + +load('jstests/noPassthrough/libs/index_build.js'); +load("jstests/libs/fail_point_util.js"); + +const rst = new ReplSetTest({ + nodes: [ + {}, + { + // Disallow elections on secondary. + rsConfig: { + priority: 0, + }, + }, + ] +}); +rst.startSet(); +rst.initiate(); + +const dbName = 'test'; +const collName = 'coll'; +const primary = rst.getPrimary(); +const primaryDB = primary.getDB(dbName); +const primaryColl = primaryDB.getCollection(collName); + +assert.commandWorked(primaryColl.insert({a: 1})); + +rst.awaitReplication(); + +// Clear log to ensure checkLog does not see unrelated log entries. +assert.commandWorked(primaryDB.adminCommand({clearLog: 'global'})); + +// Hang an index build in the commit phase, to later check that FCV downgrade waits on a commiting +// index build. +const hangIndexBuildBeforeCommit = configureFailPoint(primary, "hangIndexBuildBeforeCommit"); +const createIdxCommit = IndexBuildTest.startIndexBuild( + primary, primaryColl.getFullName(), {c: 1}, null, [ErrorCodes.IndexBuildAborted]); +const commitBuildUUID = + IndexBuildTest + .assertIndexesSoon(primaryColl, 2, ['_id_'], ['c_1'], {includeBuildUUIDs: true})['c_1'] + .buildUUID; +hangIndexBuildBeforeCommit.wait(); + +// Setup index build to be aborted by the FCV downgrade. +const hangAfterInitializingIndexBuild = + configureFailPoint(primary, "hangAfterInitializingIndexBuild"); +const createIdxAborted = IndexBuildTest.startIndexBuild( + primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.IndexBuildAborted]); + +const abortedBuildUUID = + IndexBuildTest + .assertIndexesSoon( + primaryColl, 3, ['_id_'], ['a_1', 'c_1'], {includeBuildUUIDs: true})['a_1'] + .buildUUID; + +hangAfterInitializingIndexBuild.wait(); + +const hangAfterBlockingIndexBuildsForFcvDowngrade = + configureFailPoint(primary, "hangAfterBlockingIndexBuildsForFcvDowngrade"); + +// Ensure index build block and abort happens during the FCV transitioning state. +const failAfterReachingTransitioningState = + configureFailPoint(primary, "failAfterReachingTransitioningState"); + +const awaitSetFcv = startParallelShell( + funWithArgs(function(collName) { + // Should fail due to failAfterReachingTransitioningState. + assert.commandFailedWithCode(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}), + 7555200); + }, primaryColl.getName()), primary.port); + +hangAfterBlockingIndexBuildsForFcvDowngrade.wait(); + +// Start an index build while the block is active. +const createIdxBlocked = IndexBuildTest.startIndexBuild(primary, primaryColl.getFullName(), {b: 1}); +// "Index build: new index builds are blocked, waiting". +checkLog.containsJson(primary, 7738700); + +hangAfterBlockingIndexBuildsForFcvDowngrade.off(); + +// "About to abort all index builders running". +assert.soon(() => checkLog.checkContainsWithCountJson(primary, + 7738702, + { + reason: function(reason) { + return reason.startsWith( + "FCV downgrade in progress"); + } + }, + /*count=*/ 1)); + +// "Index build: joined after abort". +checkLog.containsJson(primary, 20655, { + buildUUID: function(uuid) { + return uuid && uuid["uuid"]["$uuid"] === extractUUIDFromObject(abortedBuildUUID); + } +}); + +checkLog.containsJson(primary, 4725201, { + indexBuilds: function(uuidArray) { + return uuidArray && uuidArray.length == 1 && + uuidArray[0]["uuid"]["$uuid"] === extractUUIDFromObject(commitBuildUUID); + } +}); +hangIndexBuildBeforeCommit.off(); +hangAfterInitializingIndexBuild.off(); + +jsTestLog("Waiting for threads to join"); +createIdxAborted(); +createIdxCommit(); +awaitSetFcv(); +createIdxBlocked(); + +// The index build started before the FCV downgrade should have been aborted, while the build +// started while the index build block was in place should have succeeded. The index build which was +// already in the commit phase when the FCV downgrade took place should also have completed. +IndexBuildTest.assertIndexesSoon(primaryColl, 3, ['_id_', 'b_1', 'c_1']); + +rst.stopSet(); +})(); diff --git a/jstests/noPassthrough/index_build_external_and_internal_abort.js b/jstests/noPassthrough/index_build_external_and_internal_abort.js index b3b3e671abd9c..447ad5dce26a7 100644 --- a/jstests/noPassthrough/index_build_external_and_internal_abort.js +++ b/jstests/noPassthrough/index_build_external_and_internal_abort.js @@ -3,14 +3,10 @@ * internal index build abort (e.g. build failed due to invalid keys). * * @tags: [ - * featureFlagIndexBuildGracefulErrorHandling, + * requires_fcv_71, * requires_replication, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); load('jstests/noPassthrough/libs/index_build.js'); const rst = new ReplSetTest({ @@ -29,7 +25,6 @@ assert.commandWorked(coll.insert({point: {x: -15.0, y: "abc"}})); let indexBuilderThreadFP = configureFailPoint(testDB, 'hangIndexBuildBeforeTransitioningReplStateTokAwaitPrimaryAbort'); -let connThreadFP = configureFailPoint(testDB, 'hangInRemoveIndexBuildEntryAfterCommitOrAbort'); // Will fail with error code 13026: "geo values must be 'legacy coordinate pairs' for 2d indexes" const waitForIndexBuild = @@ -45,20 +40,15 @@ const awaitDropCollection = assert.commandWorked(db.runCommand({drop: collName})); }, coll.getName()), primary.port); -// Wait for the 'drop' command to hang while tearing down the index build, just after setting the -// index build state to kAborted. -connThreadFP.wait(); +// Check external abort is reattempted multiple times, meaning it is blocked behind the internal +// abort. +assert.soon(() => checkLog.checkContainsWithAtLeastCountJson(primary, 4656010, {}, 3)); -// Resume the index builder thread, which would now try to abort an index that's already in kAbort -// state. +// Resume the index builder thread, which will transition to kAwaitPrimaryAbort and unblock external +// aborts. indexBuilderThreadFP.off(); -// Wait for the log to confirm the index builder won't attempt to abort the build, because it's -// already in aborted state. -checkLog.containsJson(primary, 7530800); - -// Resume the collection drop and wait for its completion. -connThreadFP.off(); +// Wait for completion. awaitDropCollection(); waitForIndexBuild(); @@ -66,5 +56,4 @@ waitForIndexBuild(); // The collection does not exist. assert.eq(testDB.getCollectionNames().indexOf(coll.getName()), -1, "collection still exists."); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/index_build_external_and_internal_abort_do_not_deadlock_single_phase.js b/jstests/noPassthrough/index_build_external_and_internal_abort_do_not_deadlock_single_phase.js new file mode 100644 index 0000000000000..d9a37f17dd630 --- /dev/null +++ b/jstests/noPassthrough/index_build_external_and_internal_abort_do_not_deadlock_single_phase.js @@ -0,0 +1,47 @@ +/** + * Tests dropping a collection (causing an external index build abort) does not deadlock with an + * internal self abort for single-phase index builds. + */ +(function() { +"use strict"; + +load('jstests/noPassthrough/libs/index_build.js'); +load("jstests/libs/fail_point_util.js"); + +// A standalone configuration is key to running the index build single-phase. +const conn = MongoRunner.runMongod(); + +const dbName = 'test'; +const collName = 'coll'; +const db = conn.getDB(dbName); +const coll = db.getCollection(collName); + +coll.drop(); +assert.commandWorked(coll.insert({a: [0, "a"]})); + +// Hang after the index build has checked if the build is already aborted, but before taking +// collection locks for cleanup. +const hangBeforeCleanup = configureFailPoint(db, 'hangIndexBuildBeforeAbortCleanUp'); + +const hangAfterCollDropHasLocks = + configureFailPoint(db, 'hangAbortIndexBuildByBuildUUIDAfterLocks'); + +const createIdx = + IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {a: "2d"}, null, [13026]); + +hangBeforeCleanup.wait(); + +const collDrop = startParallelShell(funWithArgs(function(dbName, collName) { + db.getSiblingDB(dbName).getCollection(collName).drop(); + }, dbName, collName), conn.port); + +hangAfterCollDropHasLocks.wait(); +hangBeforeCleanup.off(); +hangAfterCollDropHasLocks.off(); + +jsTestLog("Waiting for collection drop shell to return"); +collDrop(); +createIdx(); + +MongoRunner.stopMongod(conn); +})(); diff --git a/jstests/noPassthrough/index_build_external_and_internal_abort_do_not_deadlock_two_phase.js b/jstests/noPassthrough/index_build_external_and_internal_abort_do_not_deadlock_two_phase.js new file mode 100644 index 0000000000000..bf1e120ea0921 --- /dev/null +++ b/jstests/noPassthrough/index_build_external_and_internal_abort_do_not_deadlock_two_phase.js @@ -0,0 +1,90 @@ +/** + * Tests dropping a collection (causing an external index build abort) does not deadlock with an + * internal self abort for two-phase index builds. + * + * @tags: [ + * requires_replication, + * ] + */ +(function() { +"use strict"; + +load('jstests/noPassthrough/libs/index_build.js'); +load("jstests/libs/fail_point_util.js"); + +const rst = new ReplSetTest({ + nodes: [ + {}, + { + // Disallow elections on secondary. + rsConfig: { + priority: 0, + }, + }, + ] +}); +rst.startSet(); +rst.initiate(); + +const primary = rst.getPrimary(); +const primaryDB = primary.getDB('test'); +const primaryColl = primaryDB.getCollection('test'); + +primaryColl.drop(); +assert.commandWorked(primaryColl.insert({a: 1})); + +// Pause the index builds on the secondary, using the 'hangAfterStartingIndexBuild' failpoint. +const failpointHangAfterInit = configureFailPoint(primaryDB, "hangAfterInitializingIndexBuild"); +const hangBeforeCleanup = configureFailPoint(primaryDB, 'hangIndexBuildBeforeAbortCleanUp'); + +// Block secondary to avoid commitQuorum being fullfilled. +IndexBuildTest.pauseIndexBuilds(rst.getSecondary()); + +jsTestLog("Waiting for index build to start"); +const createIdx = IndexBuildTest.startIndexBuild( + primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.OutOfDiskSpace]); + +const buildUUID = + IndexBuildTest + .assertIndexesSoon(primaryColl, 2, ['_id_'], ['a_1'], {includeBuildUUIDs: true})['a_1'] + .buildUUID; + +const failAfterVoteForCommitReadiness = + configureFailPoint(primaryDB, + "failIndexBuildWithErrorInSecondDrain", + {buildUUID: buildUUID, error: ErrorCodes.OutOfDiskSpace}); + +// Continue index build after preparing the artificial failure. +failpointHangAfterInit.off(); + +// Wait for the index build to be in clean up path. +hangBeforeCleanup.wait(); + +const hangAfterCollDropHasLocks = + configureFailPoint(primaryDB, 'hangAbortIndexBuildByBuildUUIDAfterLocks'); + +const collDrop = startParallelShell(funWithArgs(function(dbName, collName) { + jsTestLog("Dropping collection"); + db.getSiblingDB(dbName).getCollection(collName).drop(); + }, primaryDB.getName(), primaryColl.getName()), primary.port); + +hangAfterCollDropHasLocks.wait(); +hangBeforeCleanup.off(); +hangAfterCollDropHasLocks.off(); + +// The index build should not be externally abortable once the index builder thread is in the +// process of aborting. +jsTestLog("Waiting for the index build to abort"); +// Cleaned up index build after abort. +checkLog.containsJson(primary, 465611, { + buildUUID: function(uuid) { + return uuid && uuid["uuid"]["$uuid"] === extractUUIDFromObject(buildUUID); + } +}); + +jsTestLog("Waiting for collection drop shell to return"); +collDrop(); +createIdx(); + +rst.stopSet(); +})(); diff --git a/jstests/noPassthrough/index_build_killed_disk_space.js b/jstests/noPassthrough/index_build_killed_disk_space.js index c1c76ecbdcfae..69ddcb7315545 100644 --- a/jstests/noPassthrough/index_build_killed_disk_space.js +++ b/jstests/noPassthrough/index_build_killed_disk_space.js @@ -1,9 +1,9 @@ /** * Ensures that index builds are killed on primaries when the available disk space drops below a - * limit. + * limit,only if the primary has not yet voted for commit. * * @tags: [ - * featureFlagIndexBuildGracefulErrorHandling, + * requires_fcv_71, * requires_replication, * ] */ @@ -13,6 +13,114 @@ load('jstests/noPassthrough/libs/index_build.js'); load("jstests/libs/fail_point_util.js"); +function killBeforeVoteCommitSucceeds(rst) { + const primary = rst.getPrimary(); + const primaryDB = primary.getDB('test'); + const primaryColl = primaryDB.getCollection('test'); + + primaryColl.drop(); + assert.commandWorked(primaryColl.insert({a: 1})); + + const hangAfterInitFailPoint = configureFailPoint(primaryDB, 'hangAfterInitializingIndexBuild'); + + let serverStatus = primaryDB.serverStatus(); + const tookActionCountBefore = serverStatus.metrics.diskSpaceMonitor.tookAction; + const killedDueToInsufficientDiskSpaceBefore = + serverStatus.indexBuilds.killedDueToInsufficientDiskSpace; + + jsTestLog("Waiting for index build to start"); + const createIdx = IndexBuildTest.startIndexBuild( + primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.OutOfDiskSpace]); + IndexBuildTest.waitForIndexBuildToStart(primaryDB, primaryColl.getName(), 'a_1'); + + // Ensure the index build is in an abortable state before the DiskSpaceMonitor runs. + hangAfterInitFailPoint.wait(); + + // Default indexBuildMinAvailableDiskSpaceMB is 500 MB. + // Simulate a remaining disk space of 450MB. + const simulateDiskSpaceFp = + configureFailPoint(primaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024}); + + jsTestLog("Waiting for the disk space monitor to take action"); + assert.soon(() => { + return primaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction > tookActionCountBefore; + }); + + jsTestLog("Waiting for the index build to be killed"); + // "Index build: joined after abort". + checkLog.containsJson(primary, 20655); + + jsTestLog("Waiting for threads to join"); + createIdx(); + simulateDiskSpaceFp.off(); + hangAfterInitFailPoint.off(); + + // "Index build: aborted due to insufficient disk space" + checkLog.containsJson(primary, 7333601); + + assert.eq(killedDueToInsufficientDiskSpaceBefore + 1, + primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace); + + rst.awaitReplication(); + IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_']); + + const secondaryColl = rst.getSecondary().getCollection(primaryColl.getFullName()); + IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']); +} + +function killAfterVoteCommitFails(rst) { + const primary = rst.getPrimary(); + const primaryDB = primary.getDB('test'); + const primaryColl = primaryDB.getCollection('test'); + + primaryColl.drop(); + assert.commandWorked(primaryColl.insert({a: 1})); + + const hangAfterVoteCommit = + configureFailPoint(primaryDB, 'hangIndexBuildAfterSignalPrimaryForCommitReadiness'); + + let serverStatus = primaryDB.serverStatus(); + const tookActionCountBefore = serverStatus.metrics.diskSpaceMonitor.tookAction; + const killedDueToInsufficientDiskSpaceBefore = + serverStatus.indexBuilds.killedDueToInsufficientDiskSpace; + + jsTestLog("Waiting for index build to start"); + const createIdx = IndexBuildTest.startIndexBuild(primary, primaryColl.getFullName(), {a: 1}); + IndexBuildTest.waitForIndexBuildToStart(primaryDB, primaryColl.getName(), 'a_1'); + + // Ensure the index build has voted commit before the DiskSpaceMonitor runs. + hangAfterVoteCommit.wait(); + + // Default indexBuildMinAvailableDiskSpaceMB is 500 MB. + // Simulate a remaining disk space of 450MB. + const simulateDiskSpaceFp = + configureFailPoint(primaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024}); + + jsTestLog("Waiting for the disk space monitor to take action"); + assert.soon(() => { + return primaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction > tookActionCountBefore; + }); + + jsTestLog("Waiting for the index build kill attempt to fail"); + // "Index build: cannot force abort". + checkLog.containsJson(primary, 7617000); + + hangAfterVoteCommit.off(); + simulateDiskSpaceFp.off(); + + jsTestLog("Waiting for threads to join"); + createIdx(); + + assert.eq(killedDueToInsufficientDiskSpaceBefore, + primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace); + + rst.awaitReplication(); + IndexBuildTest.assertIndexes(primaryColl, 2, ['_id_', 'a_1']); + + const secondaryColl = rst.getSecondary().getCollection(primaryColl.getFullName()); + IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']); +} + const rst = new ReplSetTest({ nodes: [ {}, @@ -27,50 +135,8 @@ const rst = new ReplSetTest({ rst.startSet(); rst.initiate(); -const primary = rst.getPrimary(); -const primaryDB = primary.getDB('test'); -const primaryColl = primaryDB.getCollection('test'); - -assert.commandWorked(primaryColl.insert({a: 1})); - -let hangAfterInitFailPoint = configureFailPoint(primaryDB, 'hangAfterInitializingIndexBuild'); - -const tookActionCountBefore = primaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction; - -jsTestLog("Waiting for index build to start"); -const createIdx = IndexBuildTest.startIndexBuild( - primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.Interrupted]); -IndexBuildTest.waitForIndexBuildToStart(primaryDB, primaryColl.getName(), 'a_1'); - -// Ensure the index build is in an abortable state before the DiskSpaceMonitor runs. -hangAfterInitFailPoint.wait(); - -// Default indexBuildMinAvailableDiskSpaceMB is 500 MB. -// Simulate a remaining disk space of 450MB. -const simulateDiskSpaceFp = - configureFailPoint(primaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024}); - -jsTestLog("Waiting for the disk space monitor to take action"); -assert.soon(() => { - return primaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction > tookActionCountBefore; -}); -hangAfterInitFailPoint.off(); - -jsTestLog("Waiting for the index build to be killed"); -// "Index build: joined after abort". -checkLog.containsJson(primary, 20655); - -jsTestLog("Waiting for threads to join"); -createIdx(); -simulateDiskSpaceFp.off(); - -assert.eq(1, primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace); - -rst.awaitReplication(); -IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_']); - -const secondaryColl = rst.getSecondary().getCollection(primaryColl.getFullName()); -IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']); +killBeforeVoteCommitSucceeds(rst); +killAfterVoteCommitFails(rst); rst.stopSet(); })(); diff --git a/jstests/noPassthrough/index_build_killed_disk_space_secondary.js b/jstests/noPassthrough/index_build_killed_disk_space_secondary.js index c083df03f27f0..6ece302e479dd 100644 --- a/jstests/noPassthrough/index_build_killed_disk_space_secondary.js +++ b/jstests/noPassthrough/index_build_killed_disk_space_secondary.js @@ -1,9 +1,9 @@ /** * Ensures that index builds are cancelled by secondaries when the available disk space drops below - * a limit. + * a limit, only if the secondary has not yet voted for commit. * * @tags: [ - * featureFlagIndexBuildGracefulErrorHandling, + * requires_fcv_71, * requires_replication, * ] */ @@ -13,6 +13,167 @@ load('jstests/noPassthrough/libs/index_build.js'); load("jstests/libs/fail_point_util.js"); +function killBeforeVoteCommitSucceeds(rst) { + jsTestLog( + "Index build in a secondary can be killed by the DiskSpaceMonitor before it has voted for commit."); + + const dbName = 'test'; + const collName = 'coll'; + const primary = rst.getPrimary(); + const primaryDB = primary.getDB(dbName); + const primaryColl = primaryDB.getCollection(collName); + + primaryColl.drop(); + assert.commandWorked(primaryColl.insert({a: 1})); + + rst.awaitReplication(); + + const secondary = rst.getSecondary(); + const secondaryDB = secondary.getDB(dbName); + const secondaryColl = secondaryDB.getCollection(collName); + + const primaryKilledDueToDiskSpaceBefore = + primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace; + const secondaryKilledDueToDiskSpaceBefore = + secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace; + + // Pause the index build on the primary after it replicates the startIndexBuild oplog entry, + // effectively pausing the index build on the secondary too as it will wait for the primary to + // commit or abort. + IndexBuildTest.pauseIndexBuilds(primary); + + const tookActionCountBefore = secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction; + + jsTestLog("Waiting for index build to start on secondary"); + const hangAfterInitFailPoint = + configureFailPoint(secondaryDB, 'hangAfterInitializingIndexBuild'); + const createIdx = IndexBuildTest.startIndexBuild( + primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.IndexBuildAborted]); + IndexBuildTest.waitForIndexBuildToStart(secondaryDB, secondaryColl.getName(), 'a_1'); + + // Ensure the index build is in an abortable state before the DiskSpaceMonitor runs. + hangAfterInitFailPoint.wait(); + + // Default indexBuildMinAvailableDiskSpaceMB is 500 MB. + // Simulate a remaining disk space of 450MB on the secondary node. + const simulateDiskSpaceFp = + configureFailPoint(secondaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024}); + + jsTestLog("Waiting for the disk space monitor to take action on secondary"); + assert.soon(() => { + return secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction > + tookActionCountBefore; + }); + IndexBuildTest.resumeIndexBuilds(primary); + + jsTestLog("Waiting for the index build to be killed"); + // "Index build: joined after abort". + checkLog.containsJson(secondary, 20655); + + jsTestLog("Waiting for threads to join"); + createIdx(); + + // Confirm that the error message returned by the createIndexes command describes the secondary + // running out of disk space, rather than a generic "operation was interrupted" message. + // We use the log message as a proxy for the error message that is returned by createIndexes. + checkLog.contains( + primary, + new RegExp( + "20655.*Index build: joined after abort.*IndexBuildAborted.*'voteAbortIndexBuild' received from.*: available disk space of.*bytes is less than required minimum of")); + + simulateDiskSpaceFp.off(); + + // "Index build: aborted due to insufficient disk space" + checkLog.containsJson(secondaryDB, 7333601); + + // Disable failpoint only after we know the build is aborted. We want the build to be aborted + // before it has voted for commit, and this ensures that is the case. + hangAfterInitFailPoint.off(); + + assert.eq(primaryKilledDueToDiskSpaceBefore, + primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace); + assert.eq(secondaryKilledDueToDiskSpaceBefore + 1, + secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace); + + rst.awaitReplication(); + IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_']); + IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']); +} + +function killAfterVoteCommitFails(rst) { + jsTestLog( + "Index build in a secondary cannot killed by the DiskSpaceMonitor after it has voted for commit"); + + const dbName = 'test'; + const collName = 'coll'; + const primary = rst.getPrimary(); + const primaryDB = primary.getDB(dbName); + const primaryColl = primaryDB.getCollection(collName); + + primaryColl.drop(); + assert.commandWorked(primaryColl.insert({a: 1})); + + rst.awaitReplication(); + + const secondary = rst.getSecondary(); + const secondaryDB = secondary.getDB(dbName); + const secondaryColl = secondaryDB.getCollection(collName); + + const primaryKilledDueToDiskSpaceBefore = + primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace; + const secondaryKilledDueToDiskSpaceBefore = + secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace; + + // Pause the index build on the primary after it replicates the startIndexBuild oplog entry, + // effectively pausing the index build on the secondary too as it will wait for the primary to + // commit or abort. + IndexBuildTest.pauseIndexBuilds(primary); + + const tookActionCountBefore = secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction; + + jsTestLog("Waiting for index build to start on secondary"); + const hangAfterVoteCommit = + configureFailPoint(secondaryDB, 'hangIndexBuildAfterSignalPrimaryForCommitReadiness'); + const createIdx = + IndexBuildTest.startIndexBuild(primary, primaryColl.getFullName(), {a: 1}, null); + IndexBuildTest.waitForIndexBuildToStart(secondaryDB, secondaryColl.getName(), 'a_1'); + + // Ensure the index build is in an abortable state before the DiskSpaceMonitor runs. + hangAfterVoteCommit.wait(); + + // Default indexBuildMinAvailableDiskSpaceMB is 500 MB. + // Simulate a remaining disk space of 450MB on the secondary node. + const simulateDiskSpaceFp = + configureFailPoint(secondaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024}); + + jsTestLog("Waiting for the disk space monitor to take action on secondary"); + assert.soon(() => { + return secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction > + tookActionCountBefore; + }); + IndexBuildTest.resumeIndexBuilds(primary); + + jsTestLog("Waiting for the index build kill attempt to fail"); + // "Index build: cannot force abort". + checkLog.containsJson(secondary, 7617000); + + // Disable failpoint only after the abort attempt. + hangAfterVoteCommit.off(); + + jsTestLog("Waiting for threads to join"); + createIdx(); + simulateDiskSpaceFp.off(); + + assert.eq(primaryKilledDueToDiskSpaceBefore, + primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace); + assert.eq(secondaryKilledDueToDiskSpaceBefore, + secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace); + + rst.awaitReplication(); + IndexBuildTest.assertIndexes(primaryColl, 2, ['_id_', 'a_1']); + IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']); +} + const rst = new ReplSetTest({ nodes: [ {}, @@ -27,62 +188,8 @@ const rst = new ReplSetTest({ rst.startSet(); rst.initiate(); -const dbName = 'test'; -const collName = 'coll'; -const primary = rst.getPrimary(); -const primaryDB = primary.getDB(dbName); -const primaryColl = primaryDB.getCollection(collName); - -assert.commandWorked(primaryColl.insert({a: 1})); - -rst.awaitReplication(); - -const secondary = rst.getSecondary(); -const secondaryDB = secondary.getDB(dbName); -const secondaryColl = secondaryDB.getCollection(collName); - -// Pause the index build on the primary after it replicates the startIndexBuild oplog entry, -// effectively pausing the index build on the secondary too as it will wait for the primary to -// commit or abort. -IndexBuildTest.pauseIndexBuilds(primary); - -const tookActionCountBefore = secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction; - -jsTestLog("Waiting for index build to start on secondary"); -const hangAfterInitFailPoint = configureFailPoint(secondaryDB, 'hangAfterInitializingIndexBuild'); -const createIdx = IndexBuildTest.startIndexBuild( - primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.IndexBuildAborted]); -IndexBuildTest.waitForIndexBuildToStart(secondaryDB, secondaryColl.getName(), 'a_1'); - -// Ensure the index build is in an abortable state before the DiskSpaceMonitor runs. -hangAfterInitFailPoint.wait(); -hangAfterInitFailPoint.off(); - -// Default indexBuildMinAvailableDiskSpaceMB is 500 MB. -// Simulate a remaining disk space of 450MB on the secondary node. -const simulateDiskSpaceFp = - configureFailPoint(secondaryDB, 'simulateAvailableDiskSpace', {bytes: 450 * 1024 * 1024}); - -jsTestLog("Waiting for the disk space monitor to take action on secondary"); -assert.soon(() => { - return secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction > tookActionCountBefore; -}); -IndexBuildTest.resumeIndexBuilds(primary); - -jsTestLog("Waiting for the index build to be killed"); -// "Index build: joined after abort". -checkLog.containsJson(secondary, 20655); - -jsTestLog("Waiting for threads to join"); -createIdx(); -simulateDiskSpaceFp.off(); - -assert.eq(0, primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace); -assert.eq(1, secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace); - -rst.awaitReplication(); -IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_']); -IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']); +killBeforeVoteCommitSucceeds(rst); +killAfterVoteCommitFails(rst); rst.stopSet(); })(); diff --git a/jstests/noPassthrough/index_build_killop_primary.js b/jstests/noPassthrough/index_build_killop_primary.js new file mode 100644 index 0000000000000..d86fa92abaf43 --- /dev/null +++ b/jstests/noPassthrough/index_build_killop_primary.js @@ -0,0 +1,103 @@ +/** + * Confirms that background index builds on a primary can be aborted using killop. + * @tags: [ + * requires_replication, + * ] + */ +(function() { +"use strict"; + +load('jstests/noPassthrough/libs/index_build.js'); +load("jstests/libs/fail_point_util.js"); + +function killopOnFailpoint(rst, failpointName, collName) { + const primary = rst.getPrimary(); + const testDB = primary.getDB('test'); + const coll = testDB.getCollection(collName); + + assert.commandWorked(coll.insert({a: 1})); + + const fp = configureFailPoint(testDB, failpointName); + // Pausing is only required to obtain the opId, as the target failpoint will block the build at + // the location where we want the index build to be killed. + IndexBuildTest.pauseIndexBuilds(primary); + + const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}); + + // When the index build starts, find its op id. + const opId = IndexBuildTest.waitForIndexBuildToScanCollection(testDB, coll.getName(), 'a_1'); + + IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId, (op) => { + jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op)); + assert.eq( + undefined, + op.connectionId, + 'Was expecting IndexBuildsCoordinator op; found db.currentOp() for connection thread instead: ' + + tojson(op)); + assert.eq( + coll.getFullName(), + op.ns, + 'Unexpected ns field value in db.currentOp() result for index build: ' + tojson(op)); + }); + + // Once we have the opId, we can resume index builds (the target failpoint will block it at the + // desired location). + IndexBuildTest.resumeIndexBuilds(primary); + + // Index build should be present in the config.system.indexBuilds collection. + const indexMap = + IndexBuildTest.assertIndexes(coll, 2, ["_id_"], ["a_1"], {includeBuildUUIDs: true}); + const indexBuildUUID = indexMap['a_1'].buildUUID; + assert(primary.getCollection('config.system.indexBuilds').findOne({_id: indexBuildUUID})); + + // Kill the index builder thread. + fp.wait(); + assert.commandWorked(testDB.killOp(opId)); + fp.off(); + + const exitCode = createIdx({checkExitSuccess: false}); + assert.neq( + 0, exitCode, 'expected shell to exit abnormally due to index build being terminated'); + + // Check that no new index has been created. This verifies that the index build was aborted + // rather than successfully completed. + IndexBuildTest.assertIndexesSoon(coll, 1, ['_id_']); + + const cmdNs = testDB.getCollection('$cmd').getFullName(); + let ops = rst.dumpOplog(primary, {op: 'c', ns: cmdNs, 'o.startIndexBuild': coll.getName()}); + assert.eq(1, ops.length, 'incorrect number of startIndexBuild oplog entries: ' + tojson(ops)); + ops = rst.dumpOplog(primary, {op: 'c', ns: cmdNs, 'o.abortIndexBuild': coll.getName()}); + assert.eq(1, ops.length, 'incorrect number of abortIndexBuild oplog entries: ' + tojson(ops)); + ops = rst.dumpOplog(primary, {op: 'c', ns: cmdNs, 'o.commitIndexBuild': coll.getName()}); + assert.eq(0, ops.length, 'incorrect number of commitIndexBuild oplog entries: ' + tojson(ops)); + + // Index build should be removed from the config.system.indexBuilds collection. + assert.isnull( + primary.getCollection('config.system.indexBuilds').findOne({_id: indexBuildUUID})); +} + +const rst = new ReplSetTest({ + nodes: [ + {}, + { + // Disallow elections on secondary. + rsConfig: { + priority: 0, + votes: 0, + }, + }, + ] +}); +rst.startSet(); +rst.initiate(); + +// Kill the build before it has voted for commit. +jsTestLog("killOp index build on primary before vote for commit readiness"); +killopOnFailpoint(rst, 'hangAfterIndexBuildFirstDrain', 'beforeVoteCommit'); + +// Kill the build after it has voted for commit. +jsTestLog("killOp index build on primary after vote for commit readiness"); +killopOnFailpoint(rst, 'hangIndexBuildAfterSignalPrimaryForCommitReadiness', 'afterVoteCommit'); + +rst.stopSet(); +})(); diff --git a/jstests/noPassthrough/index_build_killop_secondary_after_commit.js b/jstests/noPassthrough/index_build_killop_secondary_after_commit.js new file mode 100644 index 0000000000000..fdc414861abdf --- /dev/null +++ b/jstests/noPassthrough/index_build_killop_secondary_after_commit.js @@ -0,0 +1,108 @@ +/** + * Confirms that aborting a background index builds on a secondary does not leave node in an + * inconsistent state. + * @tags: [ + * requires_replication, + * ] + */ +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + +load("jstests/libs/log.js"); // for checkLog +load('jstests/noPassthrough/libs/index_build.js'); + +// This test triggers an unclean shutdown (an fassert), which may cause inaccurate fast counts. +TestData.skipEnforceFastCountOnValidate = true; + +const rst = new ReplSetTest({ + nodes: [ + {}, + { + // Disallow elections on secondary. This allows the primary to commit without waiting + // for the secondary. + rsConfig: { + priority: 0, + votes: 0, + }, + slowms: 30000, // Don't log slow operations on secondary. See SERVER-44821. + }, + { + // The arbiter prevents the primary from stepping down due to lack of majority in the + // case where the secondary is restarting due to the (expected) unclean shutdown. Note + // that the arbiter doesn't participate in the commitQuorum. + rsConfig: { + arbiterOnly: true, + }, + }, + ] +}); +const nodes = rst.startSet(); +rst.initiate(); + +const primary = rst.getPrimary(); +const testDB = primary.getDB('test'); +const coll = testDB.getCollection('test'); + +assert.commandWorked(coll.insert({a: 1})); + +let secondary = rst.getSecondary(); +IndexBuildTest.pauseIndexBuilds(secondary); + +const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}); + +// When the index build starts, find its op id. +let secondaryDB = secondary.getDB(testDB.getName()); +const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB); + +IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId, (op) => { + jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op)); + assert.eq(coll.getFullName(), + op.ns, + 'Unexpected ns field value in db.currentOp() result for index build: ' + tojson(op)); +}); + +// Wait for the primary to complete the index build and replicate a commit oplog entry. +// "Index build: completed successfully" +checkLog.containsJson(primary, 20663); + +// Kill the index build. +assert.commandWorked(secondaryDB.killOp(opId)); + +const gracefulIndexBuildFlag = FeatureFlagUtil.isEnabled(testDB, "IndexBuildGracefulErrorHandling"); +if (!gracefulIndexBuildFlag) { + // We expect this to crash the secondary because this error is not recoverable + assert.soon(function() { + return rawMongoProgramOutput().search(/Fatal assertion.*(51101)/) >= 0; + }); +} else { + // Expect the secondary to crash. Depending on timing, this can be either because the secondary + // was waiting for a primary abort when a 'commitIndexBuild' is applied, or because the build + // fails and tries to request an abort while a 'commitIndexBuild' is being applied. + assert.soon(function() { + return rawMongoProgramOutput().search(/Fatal assertion.*(7329403|7329407)/) >= 0; + }); +} + +// After restarting the secondary, expect that the index build completes successfully. +rst.stop(secondary.nodeId, undefined, {forRestart: true, allowedExitCode: MongoRunner.EXIT_ABORT}); +rst.start(secondary.nodeId, undefined, true /* restart */); + +secondary = rst.getSecondary(); +secondaryDB = secondary.getDB(testDB.getName()); + +// Wait for the restarted secondary node to reach SECONDARY state again. +rst.waitForState(secondary, ReplSetTest.State.SECONDARY); + +// Wait for the index build to complete on all nodes. +rst.awaitReplication(); + +// Expect successful createIndex command invocation in parallel shell. A new index should be present +// on the primary and secondary. +createIdx(); + +IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']); + +// Check that index was created on the secondary despite the attempted killOp(). +const secondaryColl = secondaryDB.getCollection(coll.getName()); +IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']); + +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/index_build_killop_secondary_before_commit.js b/jstests/noPassthrough/index_build_killop_secondary_before_commit.js index acd23431959ce..d9b9231145c05 100644 --- a/jstests/noPassthrough/index_build_killop_secondary_before_commit.js +++ b/jstests/noPassthrough/index_build_killop_secondary_before_commit.js @@ -1,18 +1,100 @@ /** - * Sends a killop to an index build on a secondary node before it commits and confirms that the - * index build is canceled on all nodes. + * Sends a killop to an index build on a secondary node before it commits and confirms that: + * - the index build is canceled on all nodes if killop is before voting for commit. + * - the killop results in the secondary crashing if the killop is after voting for commit. * * @tags: [ - * featureFlagIndexBuildGracefulErrorHandling, + * requires_fcv_71, * requires_replication, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); load('jstests/noPassthrough/libs/index_build.js'); +TestData.skipEnforceFastCountOnValidate = true; + +function killopIndexBuildOnSecondaryOnFailpoint(rst, failpointName, shouldSucceed) { + const primary = rst.getPrimary(); + const testDB = primary.getDB('test'); + const coll = testDB.getCollection('test'); + let secondary = rst.getSecondary(); + let secondaryDB = secondary.getDB(testDB.getName()); + + coll.drop(); + assert.commandWorked(coll.insert({a: 1})); + + // Pause the index build on the primary so that it does not commit. + IndexBuildTest.pauseIndexBuilds(primary); + IndexBuildTest.pauseIndexBuilds(secondary); + + let expectedErrors = shouldSucceed ? ErrorCodes.IndexBuildAborted : []; + + const fp = configureFailPoint(secondary, failpointName); + const createIdx = + IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {}, expectedErrors); + + // When the index build starts, find its op id. + const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB); + + IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId, (op) => { + jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op)); + assert.eq( + coll.getFullName(), + op.ns, + 'Unexpected ns field value in db.currentOp() result for index build: ' + tojson(op)); + }); + + // Resume index build to the desired failpoint, and kill it. + IndexBuildTest.resumeIndexBuilds(secondary); + fp.wait(); + assert.commandWorked(secondaryDB.killOp(opId)); + fp.off(); + + if (shouldSucceed) { + // "attempting to abort index build". + checkLog.containsJson(primary, 4656010); + + IndexBuildTest.resumeIndexBuilds(primary); + // "Index build: joined after abort". + checkLog.containsJson(primary, 20655); + + // Wait for the index build abort to replicate. + rst.awaitReplication(); + + // Expect the index build to fail and for the index to not exist on either node. + createIdx(); + + IndexBuildTest.assertIndexes(coll, 1, ['_id_']); + + const secondaryColl = secondaryDB.getCollection(coll.getName()); + IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']); + } else { + // We expect this to crash the secondary because this error is not recoverable. + assert.soon(function() { + return rawMongoProgramOutput().search(/Fatal assertion.*(51101)/) >= 0; + }); + + // After restarting the secondary, expect that the index build completes successfully. + rst.stop(secondary.nodeId, + undefined, + {forRestart: true, allowedExitCode: MongoRunner.EXIT_ABORT}); + rst.start(secondary.nodeId, undefined, true /* restart */); + + secondary = rst.getSecondary(); + secondaryDB = secondary.getDB(testDB.getName()); + + IndexBuildTest.resumeIndexBuilds(primary); + // Expect the index build to succeed. + createIdx(); + + // Wait for the index build commit to replicate. + rst.awaitReplication(); + IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']); + + const secondaryColl = secondaryDB.getCollection(coll.getName()); + IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']); + } +} + const rst = new ReplSetTest({ nodes: [ {}, @@ -23,59 +105,26 @@ const rst = new ReplSetTest({ }, slowms: 30000, // Don't log slow operations on secondary. See SERVER-44821. }, + { + // The arbiter prevents the primary from stepping down due to lack of majority in the + // case where the secondary is restarting due to the (expected) unclean shutdown. Note + // that the arbiter doesn't participate in the commitQuorum. + rsConfig: { + arbiterOnly: true, + }, + }, ] }); rst.startSet(); rst.initiate(); -const primary = rst.getPrimary(); -const testDB = primary.getDB('test'); -const coll = testDB.getCollection('test'); - -assert.commandWorked(coll.insert({a: 1})); - -// Pause the index build on the primary so that it does not commit. -IndexBuildTest.pauseIndexBuilds(primary); - -const secondary = rst.getSecondary(); -IndexBuildTest.pauseIndexBuilds(secondary); - -const createIdx = IndexBuildTest.startIndexBuild( - primary, coll.getFullName(), {a: 1}, {}, ErrorCodes.IndexBuildAborted); - -// When the index build starts, find its op id. -const secondaryDB = secondary.getDB(testDB.getName()); -const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB); - -IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId, (op) => { - jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op)); - assert.eq(coll.getFullName(), - op.ns, - 'Unexpected ns field value in db.currentOp() result for index build: ' + tojson(op)); -}); - -// Kill the index build. -assert.commandWorked(secondaryDB.killOp(opId)); - -// Resume index build, allowing it to cancel. -IndexBuildTest.resumeIndexBuilds(secondary); -// "attempting to abort index build". -checkLog.containsJson(primary, 4656010); - -IndexBuildTest.resumeIndexBuilds(primary); -// "Index build: joined after abort". -checkLog.containsJson(primary, 20655); - -// Wait for the index build abort to replicate. -rst.awaitReplication(); - -// Expect the index build to fail and for the index to not exist on either node. -createIdx(); - -IndexBuildTest.assertIndexes(coll, 1, ['_id_']); +// Kill the build before it has voted for commit. +jsTestLog("killOp index build on secondary before vote for commit readiness"); +killopIndexBuildOnSecondaryOnFailpoint( + rst, 'hangAfterIndexBuildFirstDrain', /*shouldSucceed*/ true); -const secondaryColl = secondaryDB.getCollection(coll.getName()); -IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']); +jsTestLog("killOp index build on secondary after vote for commit readiness"); +killopIndexBuildOnSecondaryOnFailpoint( + rst, 'hangIndexBuildAfterSignalPrimaryForCommitReadiness', /*shouldSucceed*/ false); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/index_build_operation_metrics.js b/jstests/noPassthrough/index_build_operation_metrics.js index 75ee9e331fb45..49b1f580e1e3f 100644 --- a/jstests/noPassthrough/index_build_operation_metrics.js +++ b/jstests/noPassthrough/index_build_operation_metrics.js @@ -285,7 +285,7 @@ assert.commandWorked(primaryDB[collName].dropIndex({a: 1})); assert(!metrics[dbName]); }); - // Ensure the index was actually built. Do this after checking metrics because the helper calls + // Ensure the index was not built. Do this after checking metrics because the helper calls // listIndexes which contributes to metrics. IndexBuildTest.assertIndexes(primaryDB[collName], 1, ['_id_']); IndexBuildTest.assertIndexes(secondaryDB[collName], 1, ['_id_']); @@ -385,4 +385,4 @@ assert.commandWorked(primaryDB[collName].dropIndex({a: 1})); IndexBuildTest.assertIndexes(secondaryDB[collName], 2, ['_id_', 'a_1']); })(); rst.stopSet(); -}()); \ No newline at end of file +}()); diff --git a/jstests/noPassthrough/index_build_out_of_order_scan.js b/jstests/noPassthrough/index_build_out_of_order_scan.js new file mode 100644 index 0000000000000..ebc2d1dbf0017 --- /dev/null +++ b/jstests/noPassthrough/index_build_out_of_order_scan.js @@ -0,0 +1,76 @@ +/** + * Ensures that index builds encountering a DataCorruptionDetected error log and increment a metric. + * + * @tags: [ + * requires_fcv_71, + * requires_replication, + * ] + */ +(function() { +"use strict"; + +load('jstests/noPassthrough/libs/index_build.js'); +load("jstests/libs/fail_point_util.js"); + +const rst = new ReplSetTest({nodes: 1}); +rst.startSet(); +rst.initiate(); + +const dbName = 'test'; +const collName = 'coll'; +const primary = rst.getPrimary(); +const primaryDB = primary.getDB(dbName); +const primaryColl = primaryDB.getCollection(collName); + +assert.commandWorked(primaryColl.insert({a: 1})); + +rst.awaitReplication(); + +const hangAfterInitializingIndexBuild = + configureFailPoint(primary, "hangAfterInitializingIndexBuild"); +const createIdx = IndexBuildTest.startIndexBuild( + primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.DataCorruptionDetected]); + +const buildUUID = + IndexBuildTest + .assertIndexesSoon(primaryColl, 2, ['_id_'], ['a_1'], {includeBuildUUIDs: true})['a_1'] + .buildUUID; + +hangAfterInitializingIndexBuild.wait(); +const WTRecordStoreUassertOutOfOrder = + configureFailPoint(primary, "WTRecordStoreUassertOutOfOrder"); +const hangBeforeAbort = + configureFailPoint(primary, "hangIndexBuildBeforeTransitioningReplStateTokAwaitPrimaryAbort"); +hangAfterInitializingIndexBuild.off(); + +hangBeforeAbort.wait(); + +// Get collection UUID. +const collInfos = primaryDB.getCollectionInfos({name: primaryColl.getName()}); +assert.eq(collInfos.length, 1, collInfos); +const collUUID = collInfos[0].info.uuid; + +// Index build: data corruption detected. +checkLog.containsJson(primary, 7333600, { + buildUUID: function(uuid) { + return uuid && uuid["uuid"]["$uuid"] === extractUUIDFromObject(buildUUID); + }, + db: primaryDB.getName(), + collectionUUID: function(uuid) { + jsTestLog(collUUID); + return uuid && uuid["uuid"]["$uuid"] === extractUUIDFromObject(collUUID); + } +}); +assert.eq(1, primaryDB.serverStatus().indexBuilds.failedDueToDataCorruption); + +// Disable out-of-order failpoint so clean-up can succeed. +WTRecordStoreUassertOutOfOrder.off(); +hangBeforeAbort.off(); + +jsTestLog("Waiting for threads to join"); +createIdx(); + +IndexBuildTest.assertIndexesSoon(primaryColl, 1, ['_id_']); + +rst.stopSet(); +})(); diff --git a/jstests/noPassthrough/index_build_stepdown_dropCollection_during_early_setup.js b/jstests/noPassthrough/index_build_stepdown_dropCollection_during_early_setup.js new file mode 100644 index 0000000000000..778bfa89e0533 --- /dev/null +++ b/jstests/noPassthrough/index_build_stepdown_dropCollection_during_early_setup.js @@ -0,0 +1,79 @@ +/** + * Starts an index build, steps down the primary before the index build has completed its setup (and + * made other replicas aware of the index build), and drop the collection the index is being built + * on. This exercises a path described in SERVER-77025 whereby applying a DDL operation (like + * dropCollection) on the secondary conflicts with the ongoing index build. This test confirms that + * replication waits until the index build is not present anymore, and then retries dropCollection + * and succeeds. + * + * @tags: [ + * requires_replication, + * ] + */ +(function() { +"use strict"; + +load("jstests/libs/fail_point_util.js"); // For "configureFailPoint()" +load("jstests/libs/parallelTester.js"); // For "startParallelShell()" +load("jstests/noPassthrough/libs/index_build.js"); // For "IndexBuildTest" + +const rst = new ReplSetTest({nodes: 2}); +rst.startSet(); +rst.initiate(); + +const primary = rst.getPrimary(); +const primaryDB = primary.getDB("test"); +const primaryColl = primaryDB.getCollection("coll"); +assert.commandWorked(primaryDB.setLogLevel(1, "replication")); + +assert.commandWorked(primaryColl.insert({_id: 1, a: 1})); +rst.awaitReplication(); + +// Enable fail point which makes index build hang during setup, simulating a condition where the +// index build is registered, but not yet replicated. +const fp = configureFailPoint(primary, "hangIndexBuildOnSetupBeforeTakingLocks"); + +const waitForIndexBuildToErrorOut = IndexBuildTest.startIndexBuild( + primary, primaryColl.getFullName(), {a: 1}, {}, [ErrorCodes.InterruptedDueToReplStateChange]); + +fp.wait(); + +// Step down the node, while the index build is set up in memory but the "startIndexBuild" entry +// hasn't replicated. +assert.commandWorked(primaryDB.adminCommand({"replSetStepDown": 5 * 60, "force": true})); + +rst.waitForPrimary(); + +// Drop the collection on the new primary. The new primary is not aware of the index build, because +// the old primary hadn't been able to replicate the "startIndexBuild" oplog entry. +const waitForDropCollection = startParallelShell(function() { + db.getCollection("coll").drop(); +}, rst.getPrimary().port); + +// Confirm that the old primary, now secondary waits until the index build is not in progress any +// longer before retrying the drop. +// "Waiting for index build(s) to complete on the namespace before retrying the conflicting +// operation" +assert.soon(() => checkLog.checkContainsOnceJson(rst.getSecondary(), 7702500)); + +// Resume the index build so it can fail due to InterruptedDueToReplStateChange. +fp.off(); + +// Confirm that the old primary, now secondary can retry the dropCollection. +// "Acceptable error during oplog application: background operation in progress for namespace" +assert.soon(() => checkLog.checkContainsOnceJson(rst.getSecondary(), 51775)); + +// dropCollection now succeeds, and the command completes on the primary. +waitForDropCollection(); + +rst.awaitReplication(); + +// The index build fails with InterruptedDueToReplStateChange. +waitForIndexBuildToErrorOut(); + +// Collection doesn't exist. +assert(!rst.getPrimary().getDB("test").getCollectionNames().includes("coll")); +assert(!rst.getSecondary().getDB("test").getCollectionNames().includes("coll")); + +rst.stopSet(); +})(); diff --git a/jstests/noPassthrough/index_build_stepdown_during_async_stepup.js b/jstests/noPassthrough/index_build_stepdown_during_async_stepup.js new file mode 100644 index 0000000000000..d5d76ca04bdbc --- /dev/null +++ b/jstests/noPassthrough/index_build_stepdown_during_async_stepup.js @@ -0,0 +1,80 @@ +/** + * Verifies that the index build step-up async task handles a stepdown gracefully. + * + * @tags: [ + * requires_fcv_71, + * requires_replication, + * ] + */ +(function() { +"use strict"; + +load('jstests/noPassthrough/libs/index_build.js'); +load("jstests/libs/fail_point_util.js"); + +const rst = new ReplSetTest({nodes: 2}); +rst.startSet(); +rst.initiate(); + +const dbName = 'test'; +const collName = 'coll'; +const primary = rst.getPrimary(); +const primaryDB = primary.getDB(dbName); +const primaryColl = primaryDB.getCollection(collName); + +assert.commandWorked(primaryColl.insert({a: 1})); + +rst.awaitReplication(); + +const secondary = rst.getSecondary(); + +const hangAfterIndexBuildDumpsInsertsFromBulk = + configureFailPoint(primary, 'hangAfterIndexBuildDumpsInsertsFromBulk'); +const hangOnStepUpAsyncTaskBeforeCheckingCommitQuorum = + configureFailPoint(secondary, 'hangOnStepUpAsyncTaskBeforeCheckingCommitQuorum'); + +const waitForIndexBuildToComplete = IndexBuildTest.startIndexBuild( + primary, primaryColl.getFullName(), {a: 1}, null, [ErrorCodes.InterruptedDueToReplStateChange]); + +// Wait for the primary to start the index build. +hangAfterIndexBuildDumpsInsertsFromBulk.wait(); + +assert.commandWorked(primary.adminCommand({replSetStepDown: 60, force: true})); + +// The old secondary is now stepping up and checking the active index builds. +// "IndexBuildsCoordinator-StepUp [..] Active index builds" +hangOnStepUpAsyncTaskBeforeCheckingCommitQuorum.wait(); +checkLog.containsJson(secondary, 20650); + +// Step down the new primary. +const waitForStepDown = startParallelShell(() => { + assert.commandWorked(db.adminCommand({replSetStepDown: 60 * 60, force: true})); +}, secondary.port); + +// Wait for the RstlKillOpThread to run again. It first ran when the secondary stepped up (earlier +// in this test case), and it's running now when it's stepping down again. +assert.soon(() => checkLog.checkContainsWithCountJson(secondary, 21343, {}, 2)); + +// Wait for the step-up task to be marked as killPending by the RstlKillOpThread. +assert.soon(() => { + return 1 === + secondary.getDB('test') + .currentOp({desc: 'IndexBuildsCoordinator-StepUp', killPending: true})['inprog'] + .length; +}); + +// Turn off the failpoints. Allow the createIndexes command to return +// InterruptedDueToReplStateChange due to stepdown, the stepped-up secondary to complete the new +// stepdown, and the index build to succeed. +hangOnStepUpAsyncTaskBeforeCheckingCommitQuorum.off(); +hangAfterIndexBuildDumpsInsertsFromBulk.off(); +waitForIndexBuildToComplete(); +waitForStepDown(); + +IndexBuildTest.assertIndexesSoon( + rst.getPrimary().getDB(dbName).getCollection(collName), 2, ['_id_', 'a_1']); +IndexBuildTest.assertIndexesSoon( + rst.getSecondary().getDB(dbName).getCollection(collName), 2, ['_id_', 'a_1']); + +rst.stopSet(); +})(); diff --git a/jstests/noPassthrough/index_build_vote_abort_while_vote_commit.js b/jstests/noPassthrough/index_build_vote_abort_while_vote_commit.js index 34d02c9d61a51..a7689f083d6fc 100644 --- a/jstests/noPassthrough/index_build_vote_abort_while_vote_commit.js +++ b/jstests/noPassthrough/index_build_vote_abort_while_vote_commit.js @@ -1,9 +1,8 @@ /** - * Ensures that index builds can safely be aborted, for instance by the DiskSpaceMonitor, while a - * voteCommitIndexBuild is in progress. + * Ensures that index builds cannot be aborted after voting for commit. * * @tags: [ - * featureFlagIndexBuildGracefulErrorHandling, + * requires_fcv_71, * requires_replication, * ] */ @@ -45,7 +44,7 @@ const secondaryColl = secondaryDB.getCollection(collName); // effectively pausing the index build on the secondary too as it will wait for the primary to // commit or abort. IndexBuildTest.pauseIndexBuilds(primary); -const hangVoteCommit = configureFailPoint(primary, 'hangBeforeVoteCommitIndexBuild'); +const hangBeforeVoteCommit = configureFailPoint(primary, 'hangBeforeVoteCommitIndexBuild'); const tookActionCountBefore = secondaryDB.serverStatus().metrics.diskSpaceMonitor.tookAction; @@ -55,7 +54,7 @@ const createIdx = IndexBuildTest.startIndexBuild( IndexBuildTest.waitForIndexBuildToStart(secondaryDB, secondaryColl.getName(), 'a_1'); // Wait until secondary is voting for commit. -hangVoteCommit.wait(); +hangBeforeVoteCommit.wait(); // Default indexBuildMinAvailableDiskSpaceMB is 500 MB. // Simulate a remaining disk space of 450MB on the secondary node. @@ -68,20 +67,20 @@ assert.soon(() => { }); IndexBuildTest.resumeIndexBuilds(primary); -jsTestLog("Waiting for the index build to be killed"); -// "Index build: joined after abort". -checkLog.containsJson(secondary, 20655); +jsTestLog("Waiting for the index build kill attempt to fail"); +// "Index build: cannot force abort". +checkLog.containsJson(secondary, 7617000); +hangBeforeVoteCommit.off(); jsTestLog("Waiting for threads to join"); createIdx(); simulateDiskSpaceFp.off(); assert.eq(0, primaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace); -assert.eq(1, secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace); +assert.eq(0, secondaryDB.serverStatus().indexBuilds.killedDueToInsufficientDiskSpace); -rst.awaitReplication(); -IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_']); -IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']); +IndexBuildTest.assertIndexesSoon(primaryColl, 2, ['_id_', 'a_1']); +IndexBuildTest.assertIndexesSoon(secondaryColl, 2, ['_id_', 'a_1']); rst.stopSet(); })(); diff --git a/jstests/noPassthrough/index_build_yield_bulk_load.js b/jstests/noPassthrough/index_build_yield_bulk_load.js index 4d2e1bd4150ba..12fc4997162c2 100644 --- a/jstests/noPassthrough/index_build_yield_bulk_load.js +++ b/jstests/noPassthrough/index_build_yield_bulk_load.js @@ -58,4 +58,4 @@ awaitIndex(); awaitDrop(); MongoRunner.stopMongod(conn); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/index_build_yield_prepare_conflicts.js b/jstests/noPassthrough/index_build_yield_prepare_conflicts.js index e28fae3d36a68..2881ae145315e 100644 --- a/jstests/noPassthrough/index_build_yield_prepare_conflicts.js +++ b/jstests/noPassthrough/index_build_yield_prepare_conflicts.js @@ -74,4 +74,4 @@ session.abortTransaction_forTesting(); awaitIndex(); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/index_commit_currentop_slow.js b/jstests/noPassthrough/index_commit_currentop_slow.js index 1daea5a189167..d3db6d352ecfe 100644 --- a/jstests/noPassthrough/index_commit_currentop_slow.js +++ b/jstests/noPassthrough/index_commit_currentop_slow.js @@ -35,15 +35,13 @@ assert.commandWorked(coll.insert({a: 1})); const secondary = rst.getSecondary(); IndexBuildTest.pauseIndexBuilds(secondary); -const createIdx = - IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true}); +const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}); // Wait for secondary to start processing commitIndexBuild oplog entry from the primary. const secondaryDB = secondary.getDB(testDB.getName()); assert.soon(function() { const filter = { 'command.commitIndexBuild': {$exists: true}, - 'waitingForLatch.captureName': 'AnonymousLockable', '$all': true, }; const result = assert.commandWorked(secondaryDB.currentOp(filter)); diff --git a/jstests/noPassthrough/index_downgrade_fcv.js b/jstests/noPassthrough/index_downgrade_fcv.js deleted file mode 100644 index aba8fddf284f0..0000000000000 --- a/jstests/noPassthrough/index_downgrade_fcv.js +++ /dev/null @@ -1,57 +0,0 @@ -/** - * If a user attempts to downgrade the server while there is an index build in progress, the - * downgrade should succeed without blocking. - * @tags: [ - * requires_replication, - * ] - */ -(function() { -"use strict"; - -load('jstests/noPassthrough/libs/index_build.js'); - -const rst = new ReplSetTest({ - nodes: [ - {}, - { - // Disallow elections on secondary. - rsConfig: { - priority: 0, - votes: 0, - }, - }, - ] -}); -const nodes = rst.startSet(); -rst.initiate(); - -const primary = rst.getPrimary(); -const testDB = primary.getDB('test'); -const coll = testDB.getCollection('test'); - -assert.commandWorked(coll.insert({a: 1})); - -IndexBuildTest.pauseIndexBuilds(primary); - -const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}); -IndexBuildTest.waitForIndexBuildToScanCollection(testDB, coll.getName(), 'a_1'); - -// Downgrade the primary using the setFeatureCompatibilityVersion command. -try { - assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); -} finally { - IndexBuildTest.resumeIndexBuilds(primary); -} - -IndexBuildTest.waitForIndexBuildToStop(testDB); - -createIdx(); - -IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']); - -// This confirms that the downgrade command will complete successfully after the index build has -// completed. -assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); - -rst.stopSet(); -})(); diff --git a/jstests/noPassthrough/index_drop_before_running.js b/jstests/noPassthrough/index_drop_before_running.js index 57ec6a906bde9..a2af906258edb 100644 --- a/jstests/noPassthrough/index_drop_before_running.js +++ b/jstests/noPassthrough/index_drop_before_running.js @@ -2,7 +2,7 @@ * Test aborting an index build after setup but before transitioning to in-progress. * * @tags: [ - * featureFlagIndexBuildGracefulErrorHandling, + * requires_fcv_71, * requires_replication, * ] */ diff --git a/jstests/noPassthrough/index_killop_standalone.js b/jstests/noPassthrough/index_killop_standalone.js index be4a3aff1e15a..33e24e332c679 100644 --- a/jstests/noPassthrough/index_killop_standalone.js +++ b/jstests/noPassthrough/index_killop_standalone.js @@ -1,5 +1,5 @@ /** - * Confirms that both foreground and background index builds can be aborted using killop. + * Confirms that index builds can be aborted using killop. */ (function() { "use strict"; @@ -14,11 +14,11 @@ assert.commandWorked(testDB.dropDatabase()); assert.commandWorked(testDB.test.insert({a: 1})); const coll = testDB.test; -// Test that building an index with 'options' can be aborted using killop. -function testAbortIndexBuild(options) { +// Test that building an index can be aborted using killop. +function testAbortIndexBuild() { IndexBuildTest.pauseIndexBuilds(conn); - const createIdx = IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {a: 1}, options); + const createIdx = IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {a: 1}); // When the index build starts, find its op id. const opId = IndexBuildTest.waitForIndexBuildToScanCollection(testDB, coll.getName(), 'a_1'); @@ -42,7 +42,6 @@ function testAbortIndexBuild(options) { IndexBuildTest.assertIndexes(coll, 1, ['_id_']); } -testAbortIndexBuild({background: true}); -testAbortIndexBuild({background: false}); +testAbortIndexBuild(); MongoRunner.stopMongod(conn); })(); diff --git a/jstests/noPassthrough/index_primary_aborts_immediately_on_key_generation_error.js b/jstests/noPassthrough/index_primary_aborts_immediately_on_key_generation_error.js index d473ae51e5e87..0ddd895139c7c 100644 --- a/jstests/noPassthrough/index_primary_aborts_immediately_on_key_generation_error.js +++ b/jstests/noPassthrough/index_primary_aborts_immediately_on_key_generation_error.js @@ -4,7 +4,7 @@ * proceed to the next phase. * * @tags: [ - * featureFlagIndexBuildGracefulErrorHandling, + * requires_fcv_71, * requires_replication, * ] */ @@ -73,16 +73,11 @@ createIdx(); const reasonString = `'voteAbortIndexBuild' received from '${secondary.host}'`; checkLog.checkContainsOnceJsonStringMatch(testDB, 4656003, "error", reasonString); -// As aborting the build involves interrupting the building thread on which the user op is waiting, -// the user op will return before the primary has actually aborted the build. Waiting for the -// 'createIndexes' command to return does not guarantee that the primary has replicated the abort -// oplog entry, nor that the secondary has applied it. -IndexBuildTest.waitForIndexBuildToStop(testDB); -IndexBuildTest.waitForIndexBuildToStop(secondaryDB); - -// Assert index does not exist. -IndexBuildTest.assertIndexes(coll, 1, ['_id_'], []); -IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_'], []); +// Wait for the index build to eventually disappear. Due to an external abort thread doing the +// cleanup, we can't rely on waitForIndexBuildToStop as it checks for the opId of the builder +// thread. +IndexBuildTest.assertIndexesSoon(coll, 1, ['_id_'], []); +IndexBuildTest.assertIndexesSoon(secondaryColl, 1, ['_id_'], []); rst.stopSet(); })(); diff --git a/jstests/noPassthrough/index_scan_low_priority.js b/jstests/noPassthrough/index_scan_low_priority.js index d04f9c75c133c..4094455397e12 100644 --- a/jstests/noPassthrough/index_scan_low_priority.js +++ b/jstests/noPassthrough/index_scan_low_priority.js @@ -21,45 +21,66 @@ const coll = db.coll; assert.commandWorked(db.adminCommand({setParameter: 1, internalQueryExecYieldIterations: 1})); -assert.commandWorked(coll.insert({a: 0})); -assert.commandWorked(coll.insert({a: 1})); -assert.commandWorked(coll.createIndexes([{a: 1}, {a: -1}])); +const runTest = function(deprioritize) { + assert.commandWorked(coll.insert({a: 0})); + assert.commandWorked(coll.insert({a: 1})); + assert.commandWorked(coll.createIndexes([{a: 1}, {a: -1}])); -const numLowPriority = function() { - return db.serverStatus().wiredTiger.concurrentTransactions.read.lowPriority.finishedProcessing; -}; + const numLowPriority = function() { + return db.serverStatus() + .wiredTiger.concurrentTransactions.read.lowPriority.finishedProcessing; + }; -const testCoveredScanDeprioritized = function(direction) { - const numLowPriorityBefore = numLowPriority(); - coll.find().hint({a: direction}).itcount(); - assert.gt(numLowPriority(), numLowPriorityBefore); -}; -testCoveredScanDeprioritized(1); -testCoveredScanDeprioritized(-1); + const testCoveredScanDeprioritized = function(direction) { + const numLowPriorityBefore = numLowPriority(); + coll.find().hint({a: direction}).itcount(); + if (deprioritize) { + assert.gt(numLowPriority(), numLowPriorityBefore); + } else { + assert.eq(numLowPriority(), numLowPriorityBefore); + } + }; + testCoveredScanDeprioritized(1); + testCoveredScanDeprioritized(-1); -const testNonCoveredScanDeprioritized = function(direction) { - const numLowPriorityBefore = numLowPriority(); - coll.find({b: 1}).hint({a: direction}).itcount(); - assert.gt(numLowPriority(), numLowPriorityBefore); -}; -testNonCoveredScanDeprioritized(1); -testNonCoveredScanDeprioritized(-1); + const testNonCoveredScanDeprioritized = function(direction) { + const numLowPriorityBefore = numLowPriority(); + coll.find({b: 1}).hint({a: direction}).itcount(); + if (deprioritize) { + assert.gt(numLowPriority(), numLowPriorityBefore); + } else { + assert.eq(numLowPriority(), numLowPriorityBefore); + } + }; + testNonCoveredScanDeprioritized(1); + testNonCoveredScanDeprioritized(-1); -const testScanSortLimitDeprioritized = function(direction) { - const numLowPriorityBefore = numLowPriority(); - coll.find().hint({a: direction}).sort({a: 1}).limit(1).itcount(); - assert.gt(numLowPriority(), numLowPriorityBefore); -}; -testScanSortLimitDeprioritized(1); -testScanSortLimitDeprioritized(-1); + const testScanSortLimitDeprioritized = function(direction) { + const numLowPriorityBefore = numLowPriority(); + coll.find().hint({a: direction}).sort({a: 1}).limit(1).itcount(); + if (deprioritize) { + assert.gt(numLowPriority(), numLowPriorityBefore); + } else { + assert.eq(numLowPriority(), numLowPriorityBefore); + } + }; + testScanSortLimitDeprioritized(1); + testScanSortLimitDeprioritized(-1); -const testScanLimitNotDeprioritized = function(direction) { - const numLowPriorityBefore = numLowPriority(); - coll.find().hint({a: direction}).limit(1).itcount(); - assert.eq(numLowPriority(), numLowPriorityBefore); + const testScanLimitNotDeprioritized = function(direction) { + const numLowPriorityBefore = numLowPriority(); + coll.find().hint({a: direction}).limit(1).itcount(); + assert.eq(numLowPriority(), numLowPriorityBefore); + }; + testScanLimitNotDeprioritized(1); + testScanLimitNotDeprioritized(-1); }; -testScanLimitNotDeprioritized(1); -testScanLimitNotDeprioritized(-1); + +runTest(true); + +assert.commandWorked( + db.adminCommand({setParameter: 1, deprioritizeUnboundedUserIndexScans: false})); +runTest(false); MongoRunner.stopMongod(conn); }()); diff --git a/jstests/noPassthrough/index_secondary_awaiting_primary_abort_crash_on_commit.js b/jstests/noPassthrough/index_secondary_awaiting_primary_abort_crash_on_commit.js index 4dc9a27ca77e7..b1208086bf7b1 100644 --- a/jstests/noPassthrough/index_secondary_awaiting_primary_abort_crash_on_commit.js +++ b/jstests/noPassthrough/index_secondary_awaiting_primary_abort_crash_on_commit.js @@ -3,7 +3,7 @@ * oplog entry to be replicated. If a commit entry is received instead, the secondary should crash. * * @tags: [ - * featureFlagIndexBuildGracefulErrorHandling, + * requires_fcv_71, * requires_replication, * ] */ diff --git a/jstests/noPassthrough/index_secondary_signal_primary_abort.js b/jstests/noPassthrough/index_secondary_signal_primary_abort.js index 5cf7bc2fa036f..3e9b0578c3b3c 100644 --- a/jstests/noPassthrough/index_secondary_signal_primary_abort.js +++ b/jstests/noPassthrough/index_secondary_signal_primary_abort.js @@ -2,7 +2,7 @@ * Tests that a failing index build on a secondary node causes the primary node to abort the build. * * @tags: [ - * featureFlagIndexBuildGracefulErrorHandling, + * requires_fcv_71, * requires_replication, * ] */ @@ -74,13 +74,9 @@ createIdx(); failSecondaryBuild.off(); -// Wait for the builds to be unregistered before asserting indexes. -IndexBuildTest.waitForIndexBuildToStop(primaryDB, primaryColl.getName(), kIndexName); -IndexBuildTest.waitForIndexBuildToStop(secondaryDB, secondaryColl.getName(), kIndexName); - // Assert index does not exist. -IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_'], []); -IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_'], []); +IndexBuildTest.assertIndexesSoon(primaryColl, 1, ['_id_'], []); +IndexBuildTest.assertIndexesSoon(secondaryColl, 1, ['_id_'], []); rst.stopSet(); })(); diff --git a/jstests/noPassthrough/index_signaling_primary_abort_shutdown.js b/jstests/noPassthrough/index_signaling_primary_abort_shutdown.js index 6871a3d903a3e..8ba5c637d3a07 100644 --- a/jstests/noPassthrough/index_signaling_primary_abort_shutdown.js +++ b/jstests/noPassthrough/index_signaling_primary_abort_shutdown.js @@ -3,7 +3,7 @@ * properly interrupted, without blocking shutdown, and restarted after shutdown. * * @tags: [ - * featureFlagIndexBuildGracefulErrorHandling, + * requires_fcv_71, * requires_replication, * ] */ diff --git a/jstests/noPassthrough/index_stepdown_abort_prepare_conflict.js b/jstests/noPassthrough/index_stepdown_abort_prepare_conflict.js index adb535e2b75c0..089ff1a9b4b2b 100644 --- a/jstests/noPassthrough/index_stepdown_abort_prepare_conflict.js +++ b/jstests/noPassthrough/index_stepdown_abort_prepare_conflict.js @@ -45,7 +45,13 @@ assert.commandWorked( // Enable fail point which makes hybrid index build to hang before it aborts. var failPoint; -if (TestData.setParameters.featureFlagIndexBuildGracefulErrorHandling) { + +const gracefulIndexBuildFeatureFlag = + assert + .commandWorked( + primary.adminCommand({getParameter: 1, featureFlagIndexBuildGracefulErrorHandling: 1})) + .featureFlagIndexBuildGracefulErrorHandling.value; +if (gracefulIndexBuildFeatureFlag) { // If this feature flag is enabled, index builds fail immediately instead of suppressing errors // until the commit phase, and always signal the primary for abort (even if it is itself). Abort // is only ever performed in the command thread, which is interrupted by replication state diff --git a/jstests/noPassthrough/index_stepdown_commit_prepare_conflict.js b/jstests/noPassthrough/index_stepdown_commit_prepare_conflict.js index 12fabee9ae36b..e7e026644d52b 100644 --- a/jstests/noPassthrough/index_stepdown_commit_prepare_conflict.js +++ b/jstests/noPassthrough/index_stepdown_commit_prepare_conflict.js @@ -103,4 +103,4 @@ IndexBuildTest.assertIndexes(newPrimary.getDB(dbName).getCollection(collName), 2 IndexBuildTest.assertIndexes(primaryColl, 2, ["_id_", "x_1"]); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/index_stepup_abort_skipped_records.js b/jstests/noPassthrough/index_stepup_abort_skipped_records.js index b3a154ea36bb0..8e1563a958aa2 100644 --- a/jstests/noPassthrough/index_stepup_abort_skipped_records.js +++ b/jstests/noPassthrough/index_stepup_abort_skipped_records.js @@ -3,7 +3,7 @@ * skipped records that still cause key generation errors. * * @tags: [ - * featureFlagIndexBuildGracefulErrorHandling, + * requires_fcv_71, * requires_replication, * ] */ @@ -53,11 +53,8 @@ rst.stepUp(secondary); createIdx(); // The new primary should eventually abort the build. -IndexBuildTest.waitForIndexBuildToStop(primaryDB, primaryColl.getName(), kIndexName); -IndexBuildTest.waitForIndexBuildToStop(secondaryDB, secondaryColl.getName(), kIndexName); - -IndexBuildTest.assertIndexes(primaryColl, 1, ['_id_']); -IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']); +IndexBuildTest.assertIndexesSoon(primaryColl, 1, ['_id_']); +IndexBuildTest.assertIndexesSoon(secondaryColl, 1, ['_id_']); // Verify failure reason is due to step-up check. checkLog.checkContainsOnceJsonStringMatch( diff --git a/jstests/noPassthrough/indexbg1.js b/jstests/noPassthrough/indexbg1.js index 8c2a840ee752f..92c19cc04d9bb 100644 --- a/jstests/noPassthrough/indexbg1.js +++ b/jstests/noPassthrough/indexbg1.js @@ -56,7 +56,7 @@ while (1) { // if indexing finishes before we can run checks, try indexing w/ m assert.commandWorked(bulk.execute()); assert.eq(size, t.count()); - bgIndexBuildPid = doParallel(fullName + ".createIndex( {i:1}, {background:true} )"); + bgIndexBuildPid = doParallel(fullName + ".createIndex( {i:1} )"); try { // wait for indexing to start print("wait for indexing to start"); diff --git a/jstests/noPassthrough/indexbg2.js b/jstests/noPassthrough/indexbg2.js index e374a406bfc1a..1507ee1810b35 100644 --- a/jstests/noPassthrough/indexbg2.js +++ b/jstests/noPassthrough/indexbg2.js @@ -30,7 +30,7 @@ let doParallel = function(work) { let indexBuild = function() { let fullName = "db." + baseName; - return doParallel(fullName + ".createIndex( {i:1}, {background:true, unique:true} )"); + return doParallel(fullName + ".createIndex( {i:1}, {unique:true} )"); }; let doneParallel = function() { diff --git a/jstests/noPassthrough/indexbg_drop.js b/jstests/noPassthrough/indexbg_drop.js index 423af104d6094..2b8a6a3e16d69 100644 --- a/jstests/noPassthrough/indexbg_drop.js +++ b/jstests/noPassthrough/indexbg_drop.js @@ -55,7 +55,7 @@ jsTest.log("Starting background indexing for test of: " + tojson(dc)); // Add another index to be sure the drop command works. primaryDB.getCollection(collection).createIndex({b: 1}); -primaryDB.getCollection(collection).createIndex({i: 1}, {background: true}); +primaryDB.getCollection(collection).createIndex({i: 1}); // Make sure the index build has started on the secondary. IndexBuildTest.waitForIndexBuildToStart(secondDB); diff --git a/jstests/noPassthrough/indexbg_killop_primary.js b/jstests/noPassthrough/indexbg_killop_primary.js deleted file mode 100644 index de0f1c66e867f..0000000000000 --- a/jstests/noPassthrough/indexbg_killop_primary.js +++ /dev/null @@ -1,88 +0,0 @@ -/** - * Confirms that background index builds on a primary can be aborted using killop. - * @tags: [ - * requires_replication, - * ] - */ -(function() { -"use strict"; - -load('jstests/noPassthrough/libs/index_build.js'); - -const rst = new ReplSetTest({ - nodes: [ - {}, - { - // Disallow elections on secondary. - rsConfig: { - priority: 0, - votes: 0, - }, - }, - ] -}); -const nodes = rst.startSet(); -rst.initiate(); - -const primary = rst.getPrimary(); -const testDB = primary.getDB('test'); -const coll = testDB.getCollection('test'); - -assert.commandWorked(coll.insert({a: 1})); - -IndexBuildTest.pauseIndexBuilds(primary); - -const createIdx = - IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true}); - -// When the index build starts, find its op id. -const opId = IndexBuildTest.waitForIndexBuildToScanCollection(testDB, coll.getName(), 'a_1'); - -IndexBuildTest.assertIndexBuildCurrentOpContents(testDB, opId, (op) => { - jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op)); - assert.eq( - undefined, - op.connectionId, - 'Was expecting IndexBuildsCoordinator op; found db.currentOp() for connection thread instead: ' + - tojson(op)); - assert.eq(coll.getFullName(), - op.ns, - 'Unexpected ns field value in db.currentOp() result for index build: ' + tojson(op)); -}); - -// Index build should be present in the config.system.indexBuilds collection. -const indexMap = - IndexBuildTest.assertIndexes(coll, 2, ["_id_"], ["a_1"], {includeBuildUUIDs: true}); -const indexBuildUUID = indexMap['a_1'].buildUUID; -assert(primary.getCollection('config.system.indexBuilds').findOne({_id: indexBuildUUID})); - -// Kill the index builder thread. -assert.commandWorked(testDB.killOp(opId)); - -// Wait for the index build to stop from the killop signal. -try { - IndexBuildTest.waitForIndexBuildToStop(testDB); -} finally { - IndexBuildTest.resumeIndexBuilds(primary); -} - -const exitCode = createIdx({checkExitSuccess: false}); -assert.neq(0, exitCode, 'expected shell to exit abnormally due to index build being terminated'); - -// Check that no new index has been created. This verifies that the index build was aborted -// rather than successfully completed. -IndexBuildTest.assertIndexes(coll, 1, ['_id_']); - -const cmdNs = testDB.getCollection('$cmd').getFullName(); -let ops = rst.dumpOplog(primary, {op: 'c', ns: cmdNs, 'o.startIndexBuild': coll.getName()}); -assert.eq(1, ops.length, 'incorrect number of startIndexBuild oplog entries: ' + tojson(ops)); -ops = rst.dumpOplog(primary, {op: 'c', ns: cmdNs, 'o.abortIndexBuild': coll.getName()}); -assert.eq(1, ops.length, 'incorrect number of abortIndexBuild oplog entries: ' + tojson(ops)); -ops = rst.dumpOplog(primary, {op: 'c', ns: cmdNs, 'o.commitIndexBuild': coll.getName()}); -assert.eq(0, ops.length, 'incorrect number of commitIndexBuild oplog entries: ' + tojson(ops)); - -// Index build should be removed from the config.system.indexBuilds collection. -assert.isnull(primary.getCollection('config.system.indexBuilds').findOne({_id: indexBuildUUID})); - -rst.stopSet(); -})(); diff --git a/jstests/noPassthrough/indexbg_killop_secondary.js b/jstests/noPassthrough/indexbg_killop_secondary.js deleted file mode 100644 index a404e6acd92f3..0000000000000 --- a/jstests/noPassthrough/indexbg_killop_secondary.js +++ /dev/null @@ -1,103 +0,0 @@ -/** - * Confirms that aborting a background index builds on a secondary does not leave node in an - * inconsistent state. - * @tags: [ - * requires_replication, - * ] - */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); -load("jstests/libs/log.js"); // for checkLog -load('jstests/noPassthrough/libs/index_build.js'); - -// This test triggers an unclean shutdown (an fassert), which may cause inaccurate fast counts. -TestData.skipEnforceFastCountOnValidate = true; - -const rst = new ReplSetTest({ - nodes: [ - {}, - { - // Disallow elections on secondary. This allows the primary to commit without waiting - // for the secondary. - rsConfig: { - priority: 0, - votes: 0, - }, - slowms: 30000, // Don't log slow operations on secondary. See SERVER-44821. - }, - ] -}); -const nodes = rst.startSet(); -rst.initiate(); - -const primary = rst.getPrimary(); -const testDB = primary.getDB('test'); -const coll = testDB.getCollection('test'); - -assert.commandWorked(coll.insert({a: 1})); - -let secondary = rst.getSecondary(); -IndexBuildTest.pauseIndexBuilds(secondary); - -const createIdx = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}); - -// When the index build starts, find its op id. -let secondaryDB = secondary.getDB(testDB.getName()); -const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB); - -IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId, (op) => { - jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op)); - assert.eq(coll.getFullName(), - op.ns, - 'Unexpected ns field value in db.currentOp() result for index build: ' + tojson(op)); -}); - -// Wait for the primary to complete the index build and replicate a commit oplog entry. -// "Index build: completed successfully" -checkLog.containsJson(primary, 20663); - -// Kill the index build. -assert.commandWorked(secondaryDB.killOp(opId)); - -const gracefulIndexBuildFlag = FeatureFlagUtil.isEnabled(testDB, "IndexBuildGracefulErrorHandling"); -if (!gracefulIndexBuildFlag) { - // We expect this to crash the secondary because this error is not recoverable - assert.soon(function() { - return rawMongoProgramOutput().search(/Fatal assertion.*(51101)/) >= 0; - }); -} else { - // Expect the secondary to crash. Depending on timing, this can be either because the secondary - // was waiting for a primary abort when a 'commitIndexBuild' is applied, or because the build - // fails and tries to request an abort while a 'commitIndexBuild' is being applied. - assert.soon(function() { - return rawMongoProgramOutput().search(/Fatal assertion.*(7329403|7329407)/) >= 0; - }); -} - -// After restarting the secondary, expect that the index build completes successfully. -rst.stop(secondary.nodeId, undefined, {forRestart: true, allowedExitCode: MongoRunner.EXIT_ABORT}); -rst.start(secondary.nodeId, undefined, true /* restart */); - -secondary = rst.getSecondary(); -secondaryDB = secondary.getDB(testDB.getName()); - -// Wait for the restarted secondary node to reach SECONDARY state again. -rst.waitForState(secondary, ReplSetTest.State.SECONDARY); - -// Wait for the index build to complete on all nodes. -rst.awaitReplication(); - -// Expect successful createIndex command invocation in parallel shell. A new index should be present -// on the primary and secondary. -createIdx(); - -IndexBuildTest.assertIndexes(coll, 2, ['_id_', 'a_1']); - -// Check that index was created on the secondary despite the attempted killOp(). -const secondaryColl = secondaryDB.getCollection(coll.getName()); -IndexBuildTest.assertIndexes(secondaryColl, 2, ['_id_', 'a_1']); - -rst.stopSet(); -})(); diff --git a/jstests/noPassthrough/indexbg_killop_secondary_success.js b/jstests/noPassthrough/indexbg_killop_secondary_success.js index 22f3f5238062b..fbb122dd23a2a 100644 --- a/jstests/noPassthrough/indexbg_killop_secondary_success.js +++ b/jstests/noPassthrough/indexbg_killop_secondary_success.js @@ -6,10 +6,7 @@ * requires_replication, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load('jstests/noPassthrough/libs/index_build.js'); // This test triggers an unclean shutdown (an fassert), which may cause inaccurate fast counts. @@ -27,9 +24,9 @@ const rst = new ReplSetTest({ slowms: 30000, // Don't log slow operations on secondary. See SERVER-44821. }, { - // The arbiter prevents the primary from stepping down in the case where the secondary - // is restarting due to the (expected) unclean shutdown. Note that the arbiter doesn't - // participate in the commitQuorum. + // The arbiter prevents the primary from stepping down due to lack of majority in the + // case where the secondary is restarting due to the (expected) unclean shutdown. Note + // that the arbiter doesn't participate in the commitQuorum. rsConfig: { arbiterOnly: true, }, @@ -58,7 +55,8 @@ const createIdx = (gracefulIndexBuildFlag) // When the index build starts, find its op id. let secondaryDB = secondary.getDB(primaryDB.getName()); -const opId = IndexBuildTest.waitForIndexBuildToStart(secondaryDB); +const opId = + IndexBuildTest.waitForIndexBuildToScanCollection(secondaryDB, primaryColl.getName(), "a_1"); IndexBuildTest.assertIndexBuildCurrentOpContents(secondaryDB, opId, (op) => { jsTestLog('Inspecting db.currentOp() entry for index build: ' + tojson(op)); @@ -88,6 +86,7 @@ if (!gracefulIndexBuildFlag) { } primary = rst.getPrimary(); +rst.awaitSecondaryNodes(); primaryDB = primary.getDB('test'); primaryColl = primaryDB.getCollection('test'); @@ -113,4 +112,3 @@ if (!gracefulIndexBuildFlag) { } rst.stopSet(); -})(); diff --git a/jstests/noPassthrough/indexbg_killop_stepdown.js b/jstests/noPassthrough/indexbg_killop_stepdown.js index b54336ad90c6e..f24f7167ed744 100644 --- a/jstests/noPassthrough/indexbg_killop_stepdown.js +++ b/jstests/noPassthrough/indexbg_killop_stepdown.js @@ -6,14 +6,11 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/libs/fail_point_util.js"); -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load('jstests/noPassthrough/libs/index_build.js'); -const rst = new ReplSetTest({nodes: 2}); +const rst = new ReplSetTest({nodes: 3}); rst.startSet(); rst.initiate(); @@ -84,11 +81,11 @@ if (!gracefulIndexBuildFlag) { rst.stop( primary.nodeId, undefined, {forRestart: true, allowedExitCode: MongoRunner.EXIT_ABORT}); rst.start(primary.nodeId, undefined, true /* restart */); -} else { - primary = rst.waitForPrimary(); } -// Wait for the index build to complete. +// Wait for primary and secondaries to reach goal state, and for the index build to complete. +primary = rst.waitForPrimary(); +rst.awaitSecondaryNodes(); rst.awaitReplication(); if (gracefulIndexBuildFlag) { @@ -104,9 +101,6 @@ if (gracefulIndexBuildFlag) { rst.getSecondary().getDB('test').getCollection('test'), 1, ['_id_']); } else { - // Wait for the index build to complete. - rst.awaitReplication(); - // Verify that the stepped up node completed the index build. IndexBuildTest.assertIndexes( rst.getPrimary().getDB('test').getCollection('test'), 2, ['_id_', 'a_1']); @@ -117,5 +111,4 @@ if (gracefulIndexBuildFlag) { TestData.skipEnforceFastCountOnValidate = true; } -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/indexbg_killop_stepup.js b/jstests/noPassthrough/indexbg_killop_stepup.js index bd5d73035c0e8..ae7bd543eb8d0 100644 --- a/jstests/noPassthrough/indexbg_killop_stepup.js +++ b/jstests/noPassthrough/indexbg_killop_stepup.js @@ -34,10 +34,8 @@ IndexBuildTest.pauseIndexBuilds(secondary); let waitForCommitReadinessFP = configureFailPoint(primary, "hangIndexBuildAfterSignalPrimaryForCommitReadiness"); -const awaitIndexBuild = - IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {a: 1}, {background: true}, [ - ErrorCodes.InterruptedDueToReplStateChange - ]); +const awaitIndexBuild = IndexBuildTest.startIndexBuild( + primary, coll.getFullName(), {a: 1}, {}, [ErrorCodes.InterruptedDueToReplStateChange]); // When the index build starts, find its op id. let secondaryDB = secondary.getDB(testDB.getName()); @@ -81,11 +79,14 @@ awaitStepUp(); // Wait for the index build to be aborted before asserting that it doesn't exist. IndexBuildTest.waitForIndexBuildToStop(secondaryDB, coll.getName(), "a_1"); -rst.awaitReplication(); -IndexBuildTest.assertIndexes(coll, 1, ['_id_']); const secondaryColl = secondaryDB.getCollection(coll.getName()); -IndexBuildTest.assertIndexes(secondaryColl, 1, ['_id_']); +// Although the index is aborted on the secondary that's stepping up, as of +// featureFlagIndexBuildGracefulErrorHandling we abort builds on secondaries (that is, we replicate +// 'abortIndexBuild') asynchronously wrt the index builder thread on the primary. Wait for the +// secondaries to complete the abort. +IndexBuildTest.assertIndexesSoon(coll, 1, ['_id_']); +IndexBuildTest.assertIndexesSoon(secondaryColl, 1, ['_id_']); rst.stopSet(); })(); diff --git a/jstests/noPassthrough/interrupt_compact_commands.js b/jstests/noPassthrough/interrupt_compact_commands.js new file mode 100644 index 0000000000000..e579aa4062806 --- /dev/null +++ b/jstests/noPassthrough/interrupt_compact_commands.js @@ -0,0 +1,131 @@ +/** + * Tests that the compact command is interruptible in the storage engine (WT) layer. + * Loads data such that the storage engine compact command finds data to compress and actually runs. + * Pauses a compact command in the MDB layer, sets interrupt via killOp, and then releases the + * command to discover the interrupt in the storage engine layer. + * + * @tags: [requires_persistence] + */ + +(function() { +"use strict"; + +load("jstests/libs/fail_point_util.js"); +load("jstests/libs/parallelTester.js"); + +/** + * Loads 30000 * 20 documents into collection . via 20 threads. + * Tags each insert with a thread ID. Then deletes half the data, by thread ID, to create holes such + * that WT::compact finds compaction work to do. + */ +function loadData(conn, dbName, collName, coll) { + const kThreads = 20; + + coll.createIndex({t: 1}); + + jsTestLog("Loading data..."); + + const threads = []; + for (let t = 0; t < kThreads; t++) { + let thread = new Thread(function(t, port, dbName, collName) { + const mongo = new Mongo('localhost:' + port); + const testDB = mongo.getDB(dbName); + const testColl = testDB.getCollection(collName); + + // This is a sufficient amount of data for WT::compact to run. If the data size is too + // small, WT::compact skips. + const size = 500; + const count = 25000; + const doc = {a: -1, x: 'x'.repeat(size), b: -1, t: t}; + + let bulkInsert = testColl.initializeUnorderedBulkOp(); + for (var i = 0; i < count; ++i) { + bulkInsert.insert(doc); + } + jsTestLog("Committing inserts, t: " + t); + assert.commandWorked(bulkInsert.execute()); + }, t, conn.port, dbName, collName); + threads.push(thread); + thread.start(); + } + for (let t = 0; t < kThreads; ++t) { + threads[t].join(); + } + + jsTestLog("Pruning data..."); + + for (var t = 0; t < kThreads; t = t + 2) { + coll.deleteMany({t: t}); + } + + jsTestLog("Data setup complete."); +} + +const dbName = jsTestName(); +const collName = 'testColl'; + +const conn = MongoRunner.runMongod(); +assert.neq(conn, null); +const testDB = conn.getDB(dbName); +const testColl = testDB.getCollection(collName); + +loadData(conn, dbName, collName, testColl); + +let fp; +let fpOn = false; +try { + jsTestLog("Setting the failpoint..."); + fp = configureFailPoint(testDB, "pauseCompactCommandBeforeWTCompact"); + fpOn = true; + TestData.comment = "commentOpIdentifier"; + TestData.dbName = dbName; + + let compactJoin = startParallelShell(() => { + jsTestLog("Starting the compact command, which should stall on a failpoint..."); + assert.commandFailedWithCode( + db.getSiblingDB(TestData.dbName) + .runCommand({"compact": "testColl", "comment": TestData.comment}), + ErrorCodes.Interrupted); + }, conn.port); + + jsTestLog("Waiting for the compact command to hit the failpoint..."); + fp.wait(); + + jsTestLog("Finding the compact command opId in order to call killOp..."); + let opId = null; + assert.soon(function() { + const ops = testDB.getSiblingDB("admin") + .aggregate([ + {$currentOp: {allUsers: true}}, + {$match: {"command.comment": TestData.comment}} + ]) + .toArray(); + if (ops.length == 0) { + return false; + } + assert.eq(ops.length, 1); + opId = ops[0].opid; + return true; + }); + jsTestLog("Calling killOp to interrupt the compact command, opId: " + tojson(opId)); + assert.commandWorked(testDB.killOp(opId)); + + jsTestLog("Releasing the failpoint and waiting for the compact command to finish..."); + fp.off(); + fpOn = false; + + compactJoin(); + + // Make sure that WT::compact did not skip because of too little data. + assert( + !checkLog.checkContainsOnce(testDB, "there is no useful work to do - skipping compaction")); +} finally { + if (fpOn) { + jsTestLog("Release the failpoint"); + fp.off(); + } +} + +jsTestLog("Done"); +MongoRunner.stopMongod(conn); +})(); diff --git a/jstests/noPassthrough/interrupt_while_yielded.js b/jstests/noPassthrough/interrupt_while_yielded.js index b063fb98b8c96..69e1332e179f9 100644 --- a/jstests/noPassthrough/interrupt_while_yielded.js +++ b/jstests/noPassthrough/interrupt_while_yielded.js @@ -1,7 +1,7 @@ /** * @tags: [ - * # TODO SERVER-64007: Support yielding in CQF plans. - * cqf_incompatible, + * # TODO SERVER-70446: Enable yielding for index plans in CQF. + * cqf_experimental_incompatible, * ] */ (function() { diff --git a/jstests/noPassthrough/list_collections_large_number.js b/jstests/noPassthrough/list_collections_large_number.js index 379d4ea5dd7a6..2b0b5d53926cd 100644 --- a/jstests/noPassthrough/list_collections_large_number.js +++ b/jstests/noPassthrough/list_collections_large_number.js @@ -31,4 +31,4 @@ assert.commandWorked(db.runCommand({"listCollections": 1})); // Do not validate collections since that is an expensive action. MongoRunner.stopMongod(conn, undefined, {skipValidation: true}); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/list_indexes_index_build_info.js b/jstests/noPassthrough/list_indexes_index_build_info.js index 69d2c1cc7f103..d0da35470bbd6 100644 --- a/jstests/noPassthrough/list_indexes_index_build_info.js +++ b/jstests/noPassthrough/list_indexes_index_build_info.js @@ -248,7 +248,7 @@ try { 'unique index info does not contain replicationState: ' + tojson(uniqueIndexBuildInfo)); const replicationState = uniqueIndexBuildInfo.replicationState; assert.eq(replicationState.state, - 'Aborted', + 'External abort', 'Unexpected replication state: ' + tojson(uniqueIndexBuildInfo)); assert(replicationState.hasOwnProperty('timestamp'), 'replication state should contain abort timestamp: ' + tojson(uniqueIndexBuildInfo)); diff --git a/jstests/noPassthrough/list_indexes_ready_and_in_progress.js b/jstests/noPassthrough/list_indexes_ready_and_in_progress.js index 17ca6f983d588..26e8e7372c2ac 100644 --- a/jstests/noPassthrough/list_indexes_ready_and_in_progress.js +++ b/jstests/noPassthrough/list_indexes_ready_and_in_progress.js @@ -27,8 +27,7 @@ IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]); assert.commandWorked(coll.insert({a: 1})); IndexBuildTest.pauseIndexBuilds(conn); -const createIdx = - IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {b: 1}, {background: true}); +const createIdx = IndexBuildTest.startIndexBuild(conn, coll.getFullName(), {b: 1}); IndexBuildTest.waitForIndexBuildToScanCollection(testDB, coll.getName(), 'b_1'); // The listIndexes command supports returning all indexes, including ones that are not ready. diff --git a/jstests/noPassthrough/list_indexes_with_build_uuids.js b/jstests/noPassthrough/list_indexes_with_build_uuids.js index 385acd89ce345..f74901a8d462b 100644 --- a/jstests/noPassthrough/list_indexes_with_build_uuids.js +++ b/jstests/noPassthrough/list_indexes_with_build_uuids.js @@ -53,7 +53,7 @@ replSet.awaitReplication(); // Build and finish the first index. assert.commandWorked(primaryDB.runCommand( - {createIndexes: collName, indexes: [{key: {i: 1}, name: firstIndexName, background: true}]})); + {createIndexes: collName, indexes: [{key: {i: 1}, name: firstIndexName}]})); replSet.awaitReplication(); // Start hanging index builds on the secondary. diff --git a/jstests/noPassthrough/log_and_profile_query_hash.js b/jstests/noPassthrough/log_and_profile_query_hash.js index 82dce784dfd50..0418f778b3482 100644 --- a/jstests/noPassthrough/log_and_profile_query_hash.js +++ b/jstests/noPassthrough/log_and_profile_query_hash.js @@ -5,15 +5,11 @@ * requires_profiling, * assumes_read_preference_unchanged, * # TODO SERVER-67607: support query hash in slow query log lines. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ -(function() { -"use strict"; - // For getLatestProfilerEntry(). load("jstests/libs/profiler.js"); -load("jstests/libs/sbe_util.js"); // Prevent the mongo shell from gossiping its cluster time, since this will increase the amount // of data logged for each op. For some of the testcases below, including the cluster time would @@ -154,5 +150,4 @@ const creationLogList = log.filter( logLine.indexOf('"queryHash":"' + String(onCreationHashes.queryHash)) != -1)); assert.eq(1, creationLogList.length); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/loglong.js b/jstests/noPassthrough/loglong.js index 7e53701d19948..a807d9399384b 100644 --- a/jstests/noPassthrough/loglong.js +++ b/jstests/noPassthrough/loglong.js @@ -53,4 +53,4 @@ function assertLogTruncated(db, t) { assert(found, tojson(log)); } MongoRunner.stopMongod(conn); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/lookup_max_intermediate_size.js b/jstests/noPassthrough/lookup_max_intermediate_size.js index c99a1e836dbb7..08cea2270c369 100644 --- a/jstests/noPassthrough/lookup_max_intermediate_size.js +++ b/jstests/noPassthrough/lookup_max_intermediate_size.js @@ -5,10 +5,7 @@ // ] load("jstests/aggregation/extras/utils.js"); // For assertErrorCode. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. - -(function() { -"use strict"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; // Used by testPipeline to sort result documents. All _ids must be primitives. function compareId(a, b) { @@ -119,4 +116,3 @@ assert(sharded.adminCommand({shardCollection: "test.lookUp", key: {_id: 'hashed' runTest(sharded.getDB('test').lookUp, sharded.getDB('test').from, 4568); sharded.stop(); -}()); diff --git a/jstests/noPassthrough/lookup_metrics.js b/jstests/noPassthrough/lookup_metrics.js index 9f2aec0bfdb7d..ca6bf81d04685 100644 --- a/jstests/noPassthrough/lookup_metrics.js +++ b/jstests/noPassthrough/lookup_metrics.js @@ -2,11 +2,7 @@ * Tests that the lookup metrics are recorded correctly in serverStatus. */ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'. -load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStages' and other explain helpers. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod({setParameter: {allowDiskUseByDefault: true}}); assert.neq(null, conn, "mongod was unable to start up"); @@ -17,7 +13,7 @@ if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because either the sbe lookup pushdown feature flag is disabled or" + " sbe itself is disabled"); MongoRunner.stopMongod(conn); - return; + quit(); } assert.commandWorked(db.dropDatabase()); @@ -142,5 +138,4 @@ assert.eq( 4 /* Matching results */); compareLookupCounters(expectedCounters); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/lookup_pushdown.js b/jstests/noPassthrough/lookup_pushdown.js index 641c73d5ff2ad..42979d3051249 100644 --- a/jstests/noPassthrough/lookup_pushdown.js +++ b/jstests/noPassthrough/lookup_pushdown.js @@ -3,11 +3,14 @@ * * @tags: [requires_sharding, uses_transactions] */ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'. -load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStages' and other explain helpers. +import { + aggPlanHasStage, + getAggPlanStage, + getAggPlanStages, + hasRejectedPlans, + planHasStage, +} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const JoinAlgorithm = { Classic: 0, @@ -116,7 +119,7 @@ const sbeEnabled = checkSBEEnabled(db); if (!sbeEnabled) { jsTestLog("Skipping test because SBE is disabled"); MongoRunner.stopMongod(conn); - return; + quit(); } let coll = db[name]; @@ -1024,5 +1027,4 @@ assert.commandWorked(db.createView(shardedViewName, name, [{$match: {b: {$gte: 0 [{$lookup: {from: shardedViewName, localField: "a", foreignField: "b", as: "out"}}], JoinAlgorithm.Classic /* expectedJoinAlgorithm */); }()); -st.stop(); -}()); +st.stop(); \ No newline at end of file diff --git a/jstests/noPassthrough/lookup_with_limit_sharded.js b/jstests/noPassthrough/lookup_with_limit_sharded.js index 6846db7f0f872..4b9e348e44ad0 100644 --- a/jstests/noPassthrough/lookup_with_limit_sharded.js +++ b/jstests/noPassthrough/lookup_with_limit_sharded.js @@ -12,9 +12,13 @@ * requires_sharding, * ] */ -(function() { -load("jstests/libs/analyze_plan.js"); // For getAggPlanStages(). -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import { + flattenQueryPlanTree, + getAggPlanStages, + getPlanStage, + getWinningPlan +} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const st = new ShardingTest({shards: 2, config: 1}); const db = st.s.getDB("test"); @@ -22,7 +26,7 @@ const db = st.s.getDB("test"); if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE $lookup is not enabled."); st.stop(); - return; + quit(); } const coll = db.lookup_with_limit; @@ -127,4 +131,3 @@ checkShardedResults(sortPipeline, 0); checkShardedResults(topKSortPipeline, 2); st.stop(); -}()); diff --git a/jstests/noPassthrough/match_expression_optimization_failpoint.js b/jstests/noPassthrough/match_expression_optimization_failpoint.js index 590102ba8e823..622001b792d18 100644 --- a/jstests/noPassthrough/match_expression_optimization_failpoint.js +++ b/jstests/noPassthrough/match_expression_optimization_failpoint.js @@ -1,9 +1,5 @@ // Tests that match expression optimization works properly when the failpoint isn't triggered, and // is disabled properly when it is triggered. -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For aggPlan functions. Random.setRandomSeed(); const conn = MongoRunner.runMongod({}); @@ -38,5 +34,4 @@ const disabledResult = coll.aggregate(pipeline).toArray(); // Test that the result is the same with and without optimizations enabled (result is sorted). assert.eq(enabledResult, disabledResult); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/merge_on_secondary.js b/jstests/noPassthrough/merge_on_secondary.js index 14f3454a1387d..0cf46c4c1c2c2 100644 --- a/jstests/noPassthrough/merge_on_secondary.js +++ b/jstests/noPassthrough/merge_on_secondary.js @@ -74,4 +74,4 @@ assert(!res.hasOwnProperty("writeErrors")); assert(!res.hasOwnProperty("writeConcernError")); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/merge_out_on_secondary_fail_on_stepdown.js b/jstests/noPassthrough/merge_out_on_secondary_fail_on_stepdown.js index f275244f78f75..ad2010bcf0ff7 100644 --- a/jstests/noPassthrough/merge_out_on_secondary_fail_on_stepdown.js +++ b/jstests/noPassthrough/merge_out_on_secondary_fail_on_stepdown.js @@ -91,4 +91,4 @@ const outStage = `{$out: "${outputCollName}"}`; runTest(outStage, outFailPoint); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/metadata_size_estimate.js b/jstests/noPassthrough/metadata_size_estimate.js new file mode 100644 index 0000000000000..a052fe0c17337 --- /dev/null +++ b/jstests/noPassthrough/metadata_size_estimate.js @@ -0,0 +1,57 @@ +// Test the impact of having too many roles +// @tags: [requires_sharding] + +(function() { +'use strict'; + +// Use a relatively small record size to more reliably hit a tipping point where the write batching +// logic thinks we have more space available for metadata than we really do. Note also that by using +// small records, we are verifying that the batching logic is accounting for the overhead required +// to serialize each document into a BSONArray. +const kDataBlockSize = 4 * 1024; +const kDataBlock = 'x'.repeat(kDataBlockSize); +const kBSONMaxObjSize = 16 * 1024 * 1024; +const kNumRows = (kBSONMaxObjSize / kDataBlockSize) + 5; + +function runTest(conn) { + const admin = conn.getDB('admin'); + assert.commandWorked(admin.runCommand({createUser: 'admin', pwd: 'pwd', roles: ['root']})); + assert(admin.auth('admin', 'pwd')); + + // Create more than 16KB of role data. + // These roles are grouped into a meta-role to avoid calls to `usersInfo` unexpectedly + // overflowing from duplication of roles/inheritedRoles plus showPrivileges. + const userRoles = []; + for (let i = 0; i < 10000; ++i) { + userRoles.push({db: 'qwertyuiopasdfghjklzxcvbnm_' + i, role: 'read'}); + } + assert.commandWorked( + admin.runCommand({createRole: 'bigRole', roles: userRoles, privileges: []})); + assert.commandWorked(admin.runCommand({createUser: 'user', pwd: 'pwd', roles: ['bigRole']})); + admin.logout(); + + assert(admin.auth('user', 'pwd')); + const db = conn.getDB(userRoles[0].db); + + // Fill a collection with enough rows to necessitate paging. + for (let i = 1; i <= kNumRows; ++i) { + assert.commandWorked(db.myColl.insert({_id: i, data: kDataBlock})); + } + // Verify initial write. + assert.eq(kNumRows, db.myColl.count({})); + + // Create an aggregation which will batch up to kMaxWriteBatchSize or 16MB + // (not counting metadata) + assert.eq(0, db.myColl.aggregate([{"$out": 'yourColl'}]).itcount(), 'Aggregation failed'); + + // Verify the $out stage completed. + assert.eq(db.myColl.count({}), db.yourColl.count({})); + assert.eq(kNumRows, db.yourColl.count({})); +} + +{ + const st = new ShardingTest({mongos: 1, config: 1, shards: 1}); + runTest(st.s0); + st.stop(); +} +})(); diff --git a/jstests/noPassthrough/mirror_reads.js b/jstests/noPassthrough/mirror_reads.js index 4a27724aeef41..9122e6678b423 100644 --- a/jstests/noPassthrough/mirror_reads.js +++ b/jstests/noPassthrough/mirror_reads.js @@ -77,18 +77,19 @@ function sendAndCheckReads({rst, cmd, minRate, maxRate, burstCount}) { return ((readsPending == 0) && (readsSent === readsResolved)); }, "Did not resolve all requests within time limit", 10000); - // The number of mirrored reads processed across all secondaries. - let readsProcessedAsSecondaryTotal = 0; - for (let i = 0; i < secondaries.length; i++) { - const currentSecondaryMirroredReadsStats = getMirroredReadsStats(secondaries[i]); - const processedAsSecondary = currentSecondaryMirroredReadsStats.processedAsSecondary - - initialProcessedAsSecondary[i]; - jsTestLog("Verifying number of reads processed by secondary " + secondaries[i] + ": " + - tojson({processedAsSecondary: processedAsSecondary})); - readsProcessedAsSecondaryTotal += processedAsSecondary; - } - assert.eq(readsProcessedAsSecondaryTotal, readsSucceeded); - assert.eq(readsProcessedAsSecondaryTotal, readsSent); + assert.soon(() => { + // The number of mirrored reads processed across all secondaries. + let readsProcessedAsSecondaryTotal = 0; + for (let i = 0; i < secondaries.length; i++) { + const currentSecondaryMirroredReadsStats = getMirroredReadsStats(secondaries[i]); + const processedAsSecondary = currentSecondaryMirroredReadsStats.processedAsSecondary - + initialProcessedAsSecondary[i]; + jsTestLog("Verifying number of reads processed by secondary " + secondaries[i] + ": " + + tojson({processedAsSecondary: processedAsSecondary})); + readsProcessedAsSecondaryTotal += processedAsSecondary; + } + return readsProcessedAsSecondaryTotal == readsSucceeded && readsSucceeded == readsSent; + }, "Read metrics across secondaries did not converge to expected results", 10000); jsTestLog("Verifying primary statistics: " + tojson({current: currentPrimaryMirroredReadsStats, start: initialPrimaryStats})); diff --git a/jstests/noPassthrough/mongobridge_testcommands.js b/jstests/noPassthrough/mongobridge_testcommands.js index a76a89e8486be..749a02bf5632f 100644 --- a/jstests/noPassthrough/mongobridge_testcommands.js +++ b/jstests/noPassthrough/mongobridge_testcommands.js @@ -5,6 +5,8 @@ * @tags: [ * requires_replication, * requires_sharding, + * # Tests running with experimental CQF behavior require test commands to be enabled. + * cqf_experimental_incompatible, * ] */ diff --git a/jstests/noPassthrough/mr_disk_use.js b/jstests/noPassthrough/mr_disk_use.js index f2d178aa9c2de..22a867b506b9e 100644 --- a/jstests/noPassthrough/mr_disk_use.js +++ b/jstests/noPassthrough/mr_disk_use.js @@ -40,4 +40,4 @@ const res = assert.commandWorked(db.runCommand(mapReduceCmd)); assert.eq(res.results[0], {_id: "a", value: 42}, res); MongoRunner.stopMongod(conn); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/mr_mutable_properties.js b/jstests/noPassthrough/mr_mutable_properties.js index ac33778de5bd0..3564f5994a0f5 100644 --- a/jstests/noPassthrough/mr_mutable_properties.js +++ b/jstests/noPassthrough/mr_mutable_properties.js @@ -17,15 +17,30 @@ const map = function() { }; const reduce = function(key, values) { - // set property on receiver + // Deal with the possibility that the input 'values' may have already been partially reduced. + values = values.reduce(function(acc, current) { + if (current.hasOwnProperty("food")) { + return acc.concat(current.food); + } else { + acc.push(current); + return acc; + } + }, []); + + // Set property on receiver. this.feed = {beat: 1}; - // set property on key arg + // Set property on key arg. key.fed = {mochi: 1}; - // push properties onto values array arg - values.push(this.feed); - values.push(key.fed); + // Push properties onto values array arg, if they are not present in the array already due to + // an earlier reduction. + if (!values.some(obj => obj.hasOwnProperty("beat"))) { + values.push(this.feed); + } + if (!values.some(obj => obj.hasOwnProperty("mochi"))) { + values.push(key.fed); + } // modify each value in the (modified) array arg values.forEach(function(val) { diff --git a/jstests/noPassthrough/nested_sort_merge.js b/jstests/noPassthrough/nested_sort_merge.js index 5d2568e690643..464ee2d0ace86 100644 --- a/jstests/noPassthrough/nested_sort_merge.js +++ b/jstests/noPassthrough/nested_sort_merge.js @@ -2,9 +2,7 @@ * Verifies that nested SORT_MERGE plans are handled correctly by the SBE stage builder. * Intended to reproduce SERVER-61496. */ -(function() { - -load("jstests/libs/analyze_plan.js"); // for 'getPlanStages'. +import {getPlanStages} from "jstests/libs/analyze_plan.js"; const conn = MongoRunner.runMongod(); const db = conn.getDB("test"); @@ -97,5 +95,4 @@ for (const doc of queries) { } } } -MongoRunner.stopMongod(conn); -})(); \ No newline at end of file +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/noFetchBonus.js b/jstests/noPassthrough/noFetchBonus.js index 9dd1c197d725e..fd0fa80e60479 100644 --- a/jstests/noPassthrough/noFetchBonus.js +++ b/jstests/noPassthrough/noFetchBonus.js @@ -2,10 +2,7 @@ // requires_replication, // requires_sharding, // ] -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); +import {getRejectedPlans, isIndexOnly, planHasStage} from "jstests/libs/analyze_plan.js"; const st = new ShardingTest({shards: 1, rs: {nodes: 1}, config: 1}); const db = st.s.getDB("test"); @@ -27,5 +24,4 @@ assert.eq(rejected.length, 1, rejected); assert(planHasStage(db, rejected[0], 'SHARDING_FILTER'), explain); assert(planHasStage(db, rejected[0], 'FETCH'), rejected); -st.stop(); -}()); +st.stop(); \ No newline at end of file diff --git a/jstests/noPassthrough/no_query_plan_unindexed_child_with_text.js b/jstests/noPassthrough/no_query_plan_unindexed_child_with_text.js new file mode 100644 index 0000000000000..4e298a3a9a0bb --- /dev/null +++ b/jstests/noPassthrough/no_query_plan_unindexed_child_with_text.js @@ -0,0 +1,54 @@ +/** + * Tests that query planning fails when an $or has a text child along with an unindexed child. + * + * @tags: [ + * requires_fcv_71, + * ] + */ +(function() { +"use strict"; + +const conn = MongoRunner.runMongod(); +assert.neq(null, conn, "mongod was unable to start up"); + +const db = conn.getDB("test"); +const coll = db.getCollection(jsTestName()); +coll.drop(); + +assert.commandWorked(coll.insert({x: 1})); + +assert.commandWorked(coll.createIndex({"$**": "text"})); + +assert.commandWorked(coll.createIndex({"indexed": 1})); + +const pipeline = [ + { + $match: { + $and: [{ + $and: [ + {"indexed": {$eq: 1}}, + { + $or: [ + {$text: {$search: "abcd"}}, + {"unindexed": {$eq: 1}}, + ] + }, + ] + }] + } + }, +]; + +assert.throwsWithCode(function() { + coll.aggregate(pipeline); +}, ErrorCodes.NoQueryExecutionPlans); + +assert.commandWorked( + db.adminCommand({configureFailPoint: "disableMatchExpressionOptimization", mode: "alwaysOn"})); + +assert.throwsWithCode(function() { + coll.aggregate(pipeline); +}, ErrorCodes.NoQueryExecutionPlans); + +MongoRunner.stopMongod(conn); +})(); diff --git a/jstests/noPassthrough/non_multikey_ixscan_on_path_with_positional_component.js b/jstests/noPassthrough/non_multikey_ixscan_on_path_with_positional_component.js index a17ee3ed3c1bd..09d3ec293f38a 100644 --- a/jstests/noPassthrough/non_multikey_ixscan_on_path_with_positional_component.js +++ b/jstests/noPassthrough/non_multikey_ixscan_on_path_with_positional_component.js @@ -2,10 +2,8 @@ * Tests that we can execute a query which survived a yield using an index scan on a path containing * a positional component. This test was designed to reproduce SERVER-52589. */ -(function() { -"use strict"; +import {getPlanStage} from "jstests/libs/analyze_plan.js"; -load("jstests/libs/analyze_plan.js"); // For explain helpers. load("jstests/aggregation/extras/utils.js"); // For assertArrayEq. // Configure 'internalQueryExecYieldIterations' such that operations will yield on each PlanExecutor @@ -59,5 +57,4 @@ assert.eq(ixscan.isMultiKey, false, explain); // Now execute the query and validate the result. assertArrayEq({actual: cursor.toArray(), expected: [doc]}); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js b/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js index 77e6709aa9f28..a20fa003ed825 100644 --- a/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js +++ b/jstests/noPassthrough/optimize_sharded_sample_with_orphaned_docs.js @@ -6,14 +6,11 @@ * requires_replication, * ] */ -(function() { -"use strict"; +import {aggPlanHasStage} from "jstests/libs/analyze_plan.js"; // Deliberately inserts orphans outside of migration. TestData.skipCheckOrphans = true; -load('jstests/libs/analyze_plan.js'); // For aggPlanHasStage(). - // Set up a 2-shard cluster. const st = new ShardingTest({name: jsTestName(), shards: 2, rs: {nodes: 1}}); @@ -150,4 +147,3 @@ runSampleAndConfirmResults({ }); st.stop(); -})(); diff --git a/jstests/noPassthrough/out_majority_read_replset.js b/jstests/noPassthrough/out_majority_read_replset.js index 496520f8a4391..989cd0aedc3bf 100644 --- a/jstests/noPassthrough/out_majority_read_replset.js +++ b/jstests/noPassthrough/out_majority_read_replset.js @@ -6,7 +6,6 @@ "use strict"; load("jstests/libs/write_concern_util.js"); // For stopReplicationOnSecondaries. -load("jstests/libs/feature_flag_util.js"); const rst = new ReplSetTest({nodes: 2, nodeOptions: {enableMajorityReadConcern: ""}}); @@ -57,14 +56,6 @@ const awaitShell = startParallelShell(`{ }`, db.getMongo().port); -// Wait for the $out before restarting the replication when not using point-in-time reads. -if (!FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) { - assert.soon(function() { - const filter = {"command.aggregate": "sourceColl"}; - return assert.commandWorked(db.currentOp(filter)).inprog.length === 1; - }); -} - // Restart data replication and wait until the new write becomes visible. restartReplicationOnSecondaries(rst); rst.awaitLastOpCommitted(); diff --git a/jstests/noPassthrough/out_merge_majority_read.js b/jstests/noPassthrough/out_merge_majority_read.js index 826604cc69434..0fe1c412a281e 100644 --- a/jstests/noPassthrough/out_merge_majority_read.js +++ b/jstests/noPassthrough/out_merge_majority_read.js @@ -13,8 +13,9 @@ (function() { 'use strict'; -// Skip metadata consistency check since the sharded clsuter is started with 0 shards +// Skip metadata consistency checks since the sharded cluster is started with 0 shards TestData.skipCheckMetadataConsistency = true; +TestData.skipCheckRoutingTableConsistency = true; const testServer = MongoRunner.runMongod(); const db = testServer.getDB("test"); diff --git a/jstests/noPassthrough/out_merge_on_secondary_metadata.js b/jstests/noPassthrough/out_merge_on_secondary_metadata.js index 3d9727bef7bcf..9668c16fd411a 100644 --- a/jstests/noPassthrough/out_merge_on_secondary_metadata.js +++ b/jstests/noPassthrough/out_merge_on_secondary_metadata.js @@ -115,4 +115,4 @@ const outPipeline = [{$group: {_id: "$_id", sum: {$sum: "$a"}}}, {$out: outCollN testMetadata(outPipeline, "out_on_secondary_metadata"); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/out_merge_on_secondary_write_concern.js b/jstests/noPassthrough/out_merge_on_secondary_write_concern.js index 1229d898a7fb3..13baa6f381996 100644 --- a/jstests/noPassthrough/out_merge_on_secondary_write_concern.js +++ b/jstests/noPassthrough/out_merge_on_secondary_write_concern.js @@ -75,4 +75,4 @@ const outPipeline = [{$group: {_id: "$_id", sum: {$sum: "$a"}}}, {$out: outColl. testWriteConcern(outPipeline, "out_on_secondary_write_concern"); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/parse_zone_info.js b/jstests/noPassthrough/parse_zone_info.js index bdfdfd5cd34e7..5e88bb6f24e63 100644 --- a/jstests/noPassthrough/parse_zone_info.js +++ b/jstests/noPassthrough/parse_zone_info.js @@ -43,7 +43,7 @@ function testWithGoodTimeZoneDir(tz_good_path) { // changes from the slim-format files. This was fixed in the timelib 2021 series. const corner_coll = testDB.parse_zone_info_corner_cases; - test_dates = [ + let test_dates = [ { test_date: "2020-10-20T19:49:47.634Z", test_date_parts: { diff --git a/jstests/noPassthrough/partial_unique_indexes.js b/jstests/noPassthrough/partial_unique_indexes.js index 3aa5d53f94cf5..04a0f866101e2 100644 --- a/jstests/noPassthrough/partial_unique_indexes.js +++ b/jstests/noPassthrough/partial_unique_indexes.js @@ -49,7 +49,7 @@ assert.commandWorked(testDB.adminCommand( {configureFailPoint: 'WTWriteConflictExceptionForReads', mode: {activationProbability: 0.01}})); assert.commandWorked(testDB.adminCommand( {configureFailPoint: 'WTWriteConflictException', mode: {activationProbability: 0.01}})); -res = benchRun(benchArgs); +let res = benchRun(benchArgs); printjson({res}); assert.commandWorked( diff --git a/jstests/noPassthrough/pin_code_segments_on_startup.js b/jstests/noPassthrough/pin_code_segments_on_startup.js index 41a6826ca74fc..9135fae85ba7d 100644 --- a/jstests/noPassthrough/pin_code_segments_on_startup.js +++ b/jstests/noPassthrough/pin_code_segments_on_startup.js @@ -2,7 +2,8 @@ * Tests that a standalone mongod is able to pin code segments on startup when * 'lockCodeSegmentsInMemory=true'. * TODO (SERVER-75632): Re-enable this test on amazon linux once ulimits are configured. - * @tags: [incompatible_with_macos, incompatible_with_windows_tls, incompatible_with_amazon_linux] + * @tags: [requires_increased_memlock_limits, incompatible_with_macos, + * incompatible_with_windows_tls, incompatible_with_amazon_linux] */ (function() { diff --git a/jstests/noPassthrough/pipeline_optimization_failpoint.js b/jstests/noPassthrough/pipeline_optimization_failpoint.js index 543bc9d6a397c..6c5bb123db824 100644 --- a/jstests/noPassthrough/pipeline_optimization_failpoint.js +++ b/jstests/noPassthrough/pipeline_optimization_failpoint.js @@ -1,9 +1,7 @@ // Tests that pipeline optimization works properly when the failpoint isn't triggered, and is // disabled properly when it is triggered. -(function() { -"use strict"; +import {aggPlanHasStage} from "jstests/libs/analyze_plan.js"; -load("jstests/libs/analyze_plan.js"); // For aggPlan functions. Random.setRandomSeed(); const conn = MongoRunner.runMongod({}); @@ -61,5 +59,4 @@ const disabledResult = coll.aggregate(pipeline).toArray(); // Test that the result is the same with and without optimizations enabled. assert.eq(enabledResult, disabledResult); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/plan_cache_group_lookup.js b/jstests/noPassthrough/plan_cache_group_lookup.js index 8975bdf96a0d8..26f7400b708ec 100644 --- a/jstests/noPassthrough/plan_cache_group_lookup.js +++ b/jstests/noPassthrough/plan_cache_group_lookup.js @@ -2,14 +2,11 @@ * Test that plans with $group and $lookup lowered to SBE are cached and invalidated correctly. * @tags: [ * # TODO SERVER-67607: Test plan cache with CQF enabled. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ -(function() { -"use strict"; - load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod(); const db = conn.getDB("test"); @@ -19,7 +16,7 @@ const foreignColl = db.plan_cache_pipeline_foreign; if (!checkSBEEnabled(db)) { jsTest.log("Skipping test because SBE is not enabled"); MongoRunner.stopMongod(conn); - return; + quit(); } assert.commandWorked(coll.insert({a: 1})); @@ -225,5 +222,4 @@ const groupStage = { db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "trySbeEngine"})); })(); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/plan_cache_hits_and_misses_metrics.js b/jstests/noPassthrough/plan_cache_hits_and_misses_metrics.js index 78b08e13533b9..0516603fd993c 100644 --- a/jstests/noPassthrough/plan_cache_hits_and_misses_metrics.js +++ b/jstests/noPassthrough/plan_cache_hits_and_misses_metrics.js @@ -3,14 +3,11 @@ * is recovered from the plan cache. * * @tags: [ - * # Bonsai optimizer cannot use the plan cache yet. + * # TODO SERVER-67607: Test plan cache with CQF enabled. * cqf_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod({}); const db = conn.getDB("plan_cache_hits_and_misses_metrics"); @@ -169,5 +166,4 @@ function runCommandAndCheckPlanCacheMetric( }, ].forEach(testCase => runCommandAndCheckPlanCacheMetric(testCase)); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/plan_cache_index_create.js b/jstests/noPassthrough/plan_cache_index_create.js index 7a37e49bfc465..6ecd6e366d971 100644 --- a/jstests/noPassthrough/plan_cache_index_create.js +++ b/jstests/noPassthrough/plan_cache_index_create.js @@ -4,14 +4,11 @@ * @tags: [ * requires_replication, * # TODO SERVER-67607: Test plan cache with CQF enabled. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); // For getCachedPlan(). -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getCachedPlan} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const dbName = "test"; const collName = "coll"; @@ -63,10 +60,7 @@ function runTest({rst, readDB, writeDB}) { // single plan exists. assert.commandWorked(writeDB.runCommand({ createIndexes: collName, - indexes: [ - {key: {y: 1}, name: "less_selective", background: false}, - {key: {z: 1}, name: "least_selective", background: false} - ], + indexes: [{key: {y: 1}, name: "less_selective"}, {key: {z: 1}, name: "least_selective"}], writeConcern: {w: "majority"} })); @@ -102,7 +96,7 @@ function runTest({rst, readDB, writeDB}) { const testDB = db.getSiblingDB(TestData.dbName); assert.commandWorked(testDB.runCommand({ createIndexes: TestData.collName, - indexes: [{key: {x: 1}, name: "most_selective", background: true}], + indexes: [{key: {x: 1}, name: "most_selective"}], writeConcern: {w: "majority"} })); }, writeDB.getMongo().port); @@ -155,7 +149,7 @@ function runTest({rst, readDB, writeDB}) { // Build a "most selective" index in the foreground. assert.commandWorked(writeDB.runCommand({ createIndexes: collName, - indexes: [{key: {x: 1}, name: "most_selective", background: false}], + indexes: [{key: {x: 1}, name: "most_selective"}], writeConcern: {w: "majority"} })); @@ -183,11 +177,10 @@ const secondaryDB = rst.getSecondary().getDB(dbName); if (checkSBEEnabled(primaryDB)) { jsTest.log("Skipping test because SBE is enabled"); rst.stopSet(); - return; + quit(); } runTest({rst: rst, readDB: primaryDB, writeDB: primaryDB}); runTest({rst: rst, readDB: secondaryDB, writeDB: primaryDB}); rst.stopSet(); -})(); diff --git a/jstests/noPassthrough/plan_cache_invalidation.js b/jstests/noPassthrough/plan_cache_invalidation.js index 327fc55d5cf09..d6dfc27be411f 100644 --- a/jstests/noPassthrough/plan_cache_invalidation.js +++ b/jstests/noPassthrough/plan_cache_invalidation.js @@ -3,7 +3,7 @@ * and clearing. * @tags: [ * # TODO SERVER-67607: Test plan cache with CQF enabled. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ diff --git a/jstests/noPassthrough/plan_cache_list_failed_plans.js b/jstests/noPassthrough/plan_cache_list_failed_plans.js index 3e778a53e3a23..d5b3054c7a4e2 100644 --- a/jstests/noPassthrough/plan_cache_list_failed_plans.js +++ b/jstests/noPassthrough/plan_cache_list_failed_plans.js @@ -1,8 +1,5 @@ // Confirms the $planCacheStats output format includes information about failed plans. -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod(); assert.neq(null, conn, "mongod was unable to start up"); @@ -12,7 +9,7 @@ const coll = testDB.test; if (checkSBEEnabled(testDB)) { jsTest.log("Skipping test because SBE is enabled"); MongoRunner.stopMongod(conn); - return; + quit(); } coll.drop(); @@ -50,5 +47,4 @@ const candidatePlanScores = planCacheEntry.candidatePlanScores; assert.eq(candidatePlanScores.length, 2, planCacheEntry); assert.eq(candidatePlanScores[1], 0, planCacheEntry); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/plan_cache_memory_debug_info.js b/jstests/noPassthrough/plan_cache_memory_debug_info.js index a52d1f5aebee3..087c92c27343d 100644 --- a/jstests/noPassthrough/plan_cache_memory_debug_info.js +++ b/jstests/noPassthrough/plan_cache_memory_debug_info.js @@ -3,12 +3,10 @@ * cumulative size of the system's plan caches exceeds a pre-configured threshold. * @tags: [ * # TODO SERVER-67607: Test plan cache with CQF enabled. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ -(function() { -"use strict"; -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; /** * Creates two indexes for the given collection. In order for plans to be cached, there need to be @@ -86,7 +84,7 @@ const coll = db.plan_cache_memory_debug_info; if (checkSBEEnabled(db)) { jsTest.log("Skipping test because SBE is enabled"); MongoRunner.stopMongod(conn); - return; + quit(); } coll.drop(); @@ -223,5 +221,4 @@ largeQueryCacheEntry = getPlanCacheEntryForFilter(coll, largeQuery); assertCacheEntryHasDebugInfo(largeQueryCacheEntry); assert.gt(largeQueryCacheEntry.estimatedSizeBytes, 10 * 1024, largeQueryCacheEntry); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/plan_cache_metrics.js b/jstests/noPassthrough/plan_cache_metrics.js index 93565a6d13fca..bc9e93c5fb9d5 100644 --- a/jstests/noPassthrough/plan_cache_metrics.js +++ b/jstests/noPassthrough/plan_cache_metrics.js @@ -3,7 +3,7 @@ * and cleared from the cache. * @tags: [ * # TODO SERVER-67607: Test plan cache with CQF enabled. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ (function() { diff --git a/jstests/noPassthrough/plan_cache_replan_group_lookup.js b/jstests/noPassthrough/plan_cache_replan_group_lookup.js index 941b9ba7d8d86..aede304556640 100644 --- a/jstests/noPassthrough/plan_cache_replan_group_lookup.js +++ b/jstests/noPassthrough/plan_cache_replan_group_lookup.js @@ -4,14 +4,10 @@ * requires_profiling, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getAggPlanStages, getCachedPlan, getPlanStage} from "jstests/libs/analyze_plan.js"; load("jstests/libs/log.js"); // For findMatchingLogLine. load("jstests/libs/profiler.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStages()' +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod(); const db = conn.getDB("test"); @@ -644,4 +640,3 @@ if (sbeEnabled) { } MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/plan_cache_replan_sort.js b/jstests/noPassthrough/plan_cache_replan_sort.js index 16c80ea346b2b..23df2cd776561 100644 --- a/jstests/noPassthrough/plan_cache_replan_sort.js +++ b/jstests/noPassthrough/plan_cache_replan_sort.js @@ -5,12 +5,9 @@ * requires_profiling, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getCachedPlan} from "jstests/libs/analyze_plan.js"; load("jstests/libs/profiler.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod({setParameter: {allowDiskUseByDefault: false}}); const db = conn.getDB("test"); @@ -68,4 +65,3 @@ assert.eq( profileObj); MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/plan_cache_stats_agg_source.js b/jstests/noPassthrough/plan_cache_stats_agg_source.js index 9c2777e1f04f4..1b2e4be51663c 100644 --- a/jstests/noPassthrough/plan_cache_stats_agg_source.js +++ b/jstests/noPassthrough/plan_cache_stats_agg_source.js @@ -2,14 +2,17 @@ * Tests for the $planCacheStats aggregation metadata source. * @tags: [ * # TODO SERVER-67607: Test plan cache with CQF enabled. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import { + getAggPlanStage, + getCachedPlan, + getPlanCacheKeyFromShape, + getPlanStage, + getPlanStages, +} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod(); assert.neq(null, conn, "mongod failed to start up"); @@ -182,4 +185,3 @@ assert.commandWorked(testDb.runCommand({planCacheClear: coll.getName()})); assert.eq(0, coll.aggregate([{$planCacheStats: {}}]).itcount()); MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/plan_cache_stats_all_hosts_sharded.js b/jstests/noPassthrough/plan_cache_stats_all_hosts_sharded.js new file mode 100644 index 0000000000000..6b928a1b3f88e --- /dev/null +++ b/jstests/noPassthrough/plan_cache_stats_all_hosts_sharded.js @@ -0,0 +1,56 @@ +// Tests that the $planCacheStats will collect data from all nodes in a shard. +// +// @tags: [ +// assumes_read_concern_unchanged, +// assumes_read_preference_unchanged, +// # TODO SERVER-67607: Test plan cache with CQF enabled. +// cqf_experimental_incompatible, +// ] +(function() { +"use strict"; + +load("jstests/sharding/libs/create_sharded_collection_util.js"); + +for (let shardCount = 1; shardCount <= 2; shardCount++) { + const st = new ShardingTest({name: jsTestName(), shards: shardCount, rs: {nodes: 2}}); + + const db = st.s.getDB("test"); + const coll = db.plan_cache_stats_all_servers; + coll.drop(); + const planCache = coll.getPlanCache(); + + CreateShardedCollectionUtil.shardCollectionWithChunks(coll, {a: 1}, [ + {min: {a: MinKey}, max: {a: 5}, shard: st.shard0.shardName}, + {min: {a: 5}, max: {a: MaxKey}, shard: st["shard" + (1 % shardCount).toString()].shardName}, + ]); + + assert.commandWorked(coll.createIndex({b: 1})); + assert.commandWorked(coll.createIndex({c: 1})); + assert.commandWorked(coll.insertOne({a: 1, b: 2, c: 3})); + assert.commandWorked(coll.insertOne({a: 11, b: 12, c: 13})); + + planCache.clear(); + + // Send single shard request to primary node. + assert.eq(1, coll.find({a: 1, b: 2}).readPref("primary").itcount()); + // Send multi shard request to secondary nodes. + assert.eq(1, coll.find({b: 12, c: 13}).readPref("secondary").itcount()); + + // On primary there is only one plan in the plan cache, because the query was sent to a single + // shard + db.getMongo().setReadPref("primary"); + assert.eq(1, coll.aggregate({$planCacheStats: {}}).itcount()); + // On secondaries there is a plan for each shard + db.getMongo().setReadPref("secondary"); + assert.eq(shardCount, coll.aggregate({$planCacheStats: {}}).itcount()); + + // If we set allHosts: true, we return all plans despite any read preference setting. + const totalPlans = 1 + shardCount; + db.getMongo().setReadPref("primary"); + assert.eq(totalPlans, coll.aggregate({$planCacheStats: {allHosts: true}}).itcount()); + db.getMongo().setReadPref("secondary"); + assert.eq(totalPlans, coll.aggregate({$planCacheStats: {allHosts: true}}).itcount()); + + st.stop(); +} +}()); diff --git a/jstests/noPassthrough/point_in_time_lookups.js b/jstests/noPassthrough/point_in_time_lookups.js index 5942f332f5f19..edbb7da6f1c04 100644 --- a/jstests/noPassthrough/point_in_time_lookups.js +++ b/jstests/noPassthrough/point_in_time_lookups.js @@ -4,7 +4,6 @@ * @tags: [ * requires_persistence, * requires_replication, - * featureFlagPointInTimeCatalogLookups, * requires_fcv_70, * ] */ diff --git a/jstests/noPassthrough/point_in_time_lookups_drop_pending.js b/jstests/noPassthrough/point_in_time_lookups_drop_pending.js index d7ba210d818b7..11e413bac1b05 100644 --- a/jstests/noPassthrough/point_in_time_lookups_drop_pending.js +++ b/jstests/noPassthrough/point_in_time_lookups_drop_pending.js @@ -5,7 +5,6 @@ * @tags: [ * requires_persistence, * requires_replication, - * featureFlagPointInTimeCatalogLookups, * requires_fcv_70, * ] */ diff --git a/jstests/noPassthrough/preimages_can_be_inconsistent.js b/jstests/noPassthrough/preimages_can_be_inconsistent.js new file mode 100644 index 0000000000000..f658c6940d9ed --- /dev/null +++ b/jstests/noPassthrough/preimages_can_be_inconsistent.js @@ -0,0 +1,101 @@ +/** + * Test that consistency checks for preimage work as expected. Consistency is defined by performing + * these steps: + * * Fix a nsUUID to scan the preimage collection + * * Obtain all preimage entries of the namespace by sorting in descending order of '_id.ts' and + * '_id.applyIndexOps'. + * * For each entry position and node: + * * The entry exists in the node at that position and is equal across all nodes in that + * position. + * * The entry doesn't exist in the node at that position. + * @tags: [ + * requires_replication, + * ] + */ + +(function() { +"use strict"; + +function getPreImage(collectionIndex, ts) { + const farOffDate = ISODate("2100-01-01"); + const epochSeconds = farOffDate.valueOf() / 1000; + // Return a document inserted with a date really far off into the future. + return { + _id: { + nsUUID: UUID(`3b241101-e2bb-4255-8caf-4136c566a12${collectionIndex}`), + ts: new Timestamp(epochSeconds, ts), + applyOpsIndex: 0, + }, + operationTime: farOffDate, + }; +} + +assert.doesNotThrow(() => { + const replSetTest = new ReplSetTest({name: "replSet", nodes: 2}); + replSetTest.startSet(); + replSetTest.initiate(); + + const primary = replSetTest.getPrimary(); + const secondary = replSetTest.getSecondary(); + + const coll = primary.getDB("config")["system.preimages"]; + const secondaryColl = secondary.getDB("config")["system.preimages"]; + + // Insert documents to the preimages collection. Ensure they are not replicated to secondaries. + coll.insert(getPreImage(1, 0)); + coll.insert(getPreImage(1, 1)); + coll.insert(getPreImage(2, 1)); + coll.insert(getPreImage(3, 1)); + + assert.eq(coll.find({}).itcount(), 4); + assert.eq(secondaryColl.find({}).itcount(), 0); + + // Now insert preimages in the old secondary. + replSetTest.stepUp(secondary); + + const newPrimary = replSetTest.getPrimary(); + const newColl = newPrimary.getDB("config")["system.preimages"]; + newColl.insert(getPreImage(1, 1)); + newColl.insert(getPreImage(2, 1)); + + // Verify that even if the data isn't consistent the test passes as consistency is defined as + // two nodes having entries equal or non-existent starting from the end. + replSetTest.stopSet(); +}); + +const replSetTest = new ReplSetTest({name: "replSet", nodes: 2}); +replSetTest.startSet(); +replSetTest.initiate(); + +const primary = replSetTest.getPrimary(); +const secondary = replSetTest.getSecondary(); + +const coll = primary.getDB("config")["system.preimages"]; +const secondaryColl = secondary.getDB("config")["system.preimages"]; + +// Insert a document to the preimage collection. Ensure it is not replicated to secondaries. +coll.insert(getPreImage(1, 0)); +assert.eq(coll.find({}).itcount(), 1); +assert.eq(secondaryColl.find({}).itcount(), 0); + +// Now insert another document to the secondary, this will cause an inconsistency error when we stop +// the replica set. +replSetTest.stepUp(secondary); + +const newPrimary = replSetTest.getPrimary(); + +const newColl = newPrimary.getDB("config")["system.preimages"]; +newColl.insert(getPreImage(1, 1)); + +// Verify that the two nodes are inconsistent. +assert.throws(() => replSetTest.stopSet()); + +try { + replSetTest.stopSet(); +} catch (e) { + // Verify that the inconsistency is the one we're looking for in preimages. + assert.eq(e.message.includes("Detected preimage entries that have different content"), true); +} +// Tear down the nodes now without checking for consistency. +replSetTest.stopSet(undefined, undefined, {skipCheckDBHashes: true}); +})(); diff --git a/jstests/noPassthrough/profile_operation_metrics.js b/jstests/noPassthrough/profile_operation_metrics.js index 63f3ded21426a..727ea0a091139 100644 --- a/jstests/noPassthrough/profile_operation_metrics.js +++ b/jstests/noPassthrough/profile_operation_metrics.js @@ -8,16 +8,13 @@ * requires_wiredtiger, * # TODO SERVER-71170: docBytesRead for read operations using cqf are reported are higher than * # tests expect. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. -load("jstests/libs/fixture_helpers.js"); // For isReplSet(). -load("jstests/libs/os_helpers.js"); // For isLinux(). -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled(). +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +load("jstests/libs/fixture_helpers.js"); // For isReplSet(). +load("jstests/libs/os_helpers.js"); // For isLinux(). +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const dbName = jsTestName(); const collName = 'coll'; @@ -1193,20 +1190,24 @@ const operations = [ }, profileFilter: {op: 'command', 'command.aggregate': collName}, profileAssert: (db, profileDoc) => { - // TODO SERVER-71684: We currently erroneously account for reads from and writes to - // temporary record stores used as spill tables. This test accommodates the erroneous - // behavior. Such accommodation is only necessary for debug builds, since we spill - // artificially in debug builds in order to exercise the query execution engine's - // spilling logic. - // - // The classic engine spills to files outside the storage engine rather than to a - // temporary record store, so it is not subject to SERVER-71684. - if (isDebugBuild(db) && checkSBEEnabled(db)) { - // For $group, we incorporate the number of items spilled into "keysSorted" and the - // number of individual spill events into "sorterSpills". + // In debug builds we spill artificially in order to exercise the query execution + // engine's spilling logic. For $group, we incorporate the number of items spilled into + // "keysSorted" and the number of individual spill events into "sorterSpills". + if (isDebugBuild(db)) { assert.gt(profileDoc.keysSorted, 0); assert.gt(profileDoc.sorterSpills, 0); + } else { + assert.eq(profileDoc.keysSorted, 0); + assert.eq(profileDoc.sorterSpills, 0); + } + // TODO SERVER-71684: We currently erroneously account for reads from and writes to + // temporary record stores used as spill tables. This test accommodates the erroneous + // behavior. Such accommodation is only necessary for debug builds (where we spill + // artificially for test purposes), and when SBE is used. The classic engine spills to + // files outside the storage engine rather than to a temporary record store, so it is + // not subject to SERVER-71684. + if (isDebugBuild(db) && checkSBEEnabled(db)) { assert.gt(profileDoc.docBytesWritten, 0); assert.gt(profileDoc.docUnitsWritten, 0); assert.gt(profileDoc.totalUnitsWritten, 0); @@ -1214,9 +1215,6 @@ const operations = [ assert.eq(profileDoc.docBytesRead, 29 * 100 + profileDoc.docBytesWritten); assert.eq(profileDoc.docUnitsRead, 100 + profileDoc.docUnitsWritten); } else { - assert.eq(profileDoc.keysSorted, 0); - assert.eq(profileDoc.sorterSpills, 0); - assert.eq(profileDoc.docBytesRead, 29 * 100); assert.eq(profileDoc.docUnitsRead, 100); assert.eq(profileDoc.docBytesWritten, 0); @@ -1243,20 +1241,8 @@ const operations = [ }, profileFilter: {op: 'command', 'command.aggregate': collName}, profileAssert: (db, profileDoc) => { - if (isDebugBuild(db) && !checkSBEEnabled(db)) { - // In debug builds, the classic engine does some special spilling for test purposes - // when disk use is disabled. We spill for each of the first 20 documents, spilling - // less often after we reach that limit. This 26 is the sum of 20 spills of - // documents in groups 0 through 3 plus 6 additional items spilled for groups 4 - // through 10. - assert.eq(profileDoc.keysSorted, 26); - // This 21 is the sum of 20 debug spills plus 1 final debug spill. - assert.eq(profileDoc.sorterSpills, 21); - } else { - assert.eq(profileDoc.keysSorted, 0); - assert.eq(profileDoc.sorterSpills, 0); - } - + assert.eq(profileDoc.keysSorted, 0); + assert.eq(profileDoc.sorterSpills, 0); assert.eq(profileDoc.docBytesRead, 29 * 100); assert.eq(profileDoc.docUnitsRead, 100); assert.eq(profileDoc.docBytesWritten, 0); @@ -1550,16 +1536,9 @@ const operations = [ }, profileFilter: {op: 'insert', 'command.insert': 'ts', 'command.ordered': true}, profileAssert: (db, profileDoc) => { - // Debug builds may perform extra reads of the _mdb_catalog when updating index entries. - if (TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db) && isDebugBuild(db)) { - assert.gte(profileDoc.docBytesRead, 216); - assert.gte(profileDoc.docUnitsRead, 2); - assert.gte(profileDoc.cursorSeeks, 2); - } else { - assert.eq(profileDoc.docBytesRead, 207); - assert.eq(profileDoc.docUnitsRead, 2); - assert.eq(profileDoc.cursorSeeks, 2); - } + assert.eq(profileDoc.docBytesRead, 207); + assert.eq(profileDoc.docUnitsRead, 2); + assert.eq(profileDoc.cursorSeeks, 2); assert.eq(profileDoc.docBytesWritten, 233); assert.eq(profileDoc.idxEntryBytesRead, 0); assert.eq(profileDoc.idxEntryUnitsRead, 0); @@ -1585,16 +1564,9 @@ const operations = [ }, profileFilter: {op: 'insert', 'command.insert': 'ts', 'command.ordered': false}, profileAssert: (db, profileDoc) => { - // Debug builds may perform extra reads of the _mdb_catalog when updating index entries. - if (TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db) && isDebugBuild(db)) { - assert.gte(profileDoc.docBytesRead, 216); - assert.gte(profileDoc.docUnitsRead, 2); - assert.gte(profileDoc.cursorSeeks, 2); - } else { - assert.eq(profileDoc.docBytesRead, 207); - assert.eq(profileDoc.docUnitsRead, 2); - assert.eq(profileDoc.cursorSeeks, 2); - } + assert.eq(profileDoc.docBytesRead, 207); + assert.eq(profileDoc.docUnitsRead, 2); + assert.eq(profileDoc.cursorSeeks, 2); assert.eq(profileDoc.docBytesWritten, 233); assert.eq(profileDoc.idxEntryBytesRead, 0); assert.eq(profileDoc.idxEntryUnitsRead, 0); @@ -1699,4 +1671,3 @@ jsTestLog("Testing replica set"); runTest(db); rst.stopSet(); })(); -})(); diff --git a/jstests/noPassthrough/profile_query_planning_time_metric.js b/jstests/noPassthrough/profile_query_planning_time_metric.js index d8748514015e3..09456028b4173 100644 --- a/jstests/noPassthrough/profile_query_planning_time_metric.js +++ b/jstests/noPassthrough/profile_query_planning_time_metric.js @@ -1,10 +1,6 @@ /** * Tests that the query planning time is captured in the profiler. */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); load("jstests/libs/profiler.js"); // For getLatestProfilerEntry. const conn = MongoRunner.runMongod(); @@ -66,5 +62,4 @@ verifyProfilerLog(commandProfilerFilter); coll.findOne({}); verifyProfilerLog(findProfilerFilter); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/queryStats/agg_cmd_one_way_tokenization.js b/jstests/noPassthrough/queryStats/agg_cmd_one_way_tokenization.js new file mode 100644 index 0000000000000..f9f95c4c27f41 --- /dev/null +++ b/jstests/noPassthrough/queryStats/agg_cmd_one_way_tokenization.js @@ -0,0 +1,195 @@ +/** + * Test that $queryStats properly tokenizes aggregation commands, on mongod and mongos. + * @tags: [featureFlagQueryStats] + */ +load("jstests/libs/query_stats_utils.js"); +(function() { +"use strict"; + +const kHashedDbName = "iDlS7h5jf5HHxWPJpeHRbA+jLTNNZaqxVVkplrEkfko="; +const kHashedCollName = "w6Ax20mVkbJu4wQWAMjL8Sl+DfXAr2Zqdc3kJRB7Oo0="; +const kHashedFieldA = "GDiF6ZEXkeo4kbKyKEAAViZ+2RHIVxBQV9S6b6Lu7gU="; +const kHashedFieldB = "m1xtUkfSpZNxXjNZYKwo86vGD37Zxmd2gtt+TXDO558="; + +function verifyConsistentFields(key) { + assert.eq({"db": `${kHashedDbName}`, "coll": `${kHashedCollName}`}, key.queryShape.cmdNs); + assert.eq("aggregate", key.queryShape.command); + assert.eq({batchSize: "?number"}, key.cursor); + assert.eq(kShellApplicationName, key.client.application.name); +} + +function runTest(conn) { + const db = conn.getDB("testDB"); + const admin = conn.getDB("admin"); + + db.test.drop(); + db.otherColl.drop(); + assert.commandWorked(db.test.insert({a: "foobar", b: 15})); + assert.commandWorked(db.test.insert({a: "foobar", b: 20})); + assert.commandWorked(db.otherColl.insert({a: "foobar", price: 2.50})); + + // First checks proper tokenization on a basic pipeline. + { + db.test + .aggregate([ + {$sort: {a: -1}}, + {$match: {a: {$regex: "foo(.*)"}, b: {$gt: 10}}}, + {$skip: 5}, + ]) + .toArray(); + + const stats = getQueryStatsAggCmd(admin, {transformIdentifiers: true}); + + assert.eq(1, stats.length); + const key = stats[0].key; + verifyConsistentFields(key); + // Make sure there is no otherNss field when there are no secondary namespaces. + assert(!key.hasOwnProperty('otherNss'), key); + // Ensure the query stats key pipeline holds the raw input without optimization (e.g., the + // $sort stays before the $match, as in the raw query). + assert.eq( + [ + {"$sort": {[kHashedFieldA]: -1}}, + { + "$match": { + "$and": [ + {[kHashedFieldA]: {"$regex": "?string"}}, + {[kHashedFieldB]: {"$gt": "?number"}} + ] + } + }, + {"$skip": "?number"} + ], + key.queryShape.pipeline, + key.queryShape.pipeline); + } + + // Checks proper tokenization on another basic pipeline that is a subset of the original + // pipeline to make sure there are separate query stats entries per separate query shape. + { + db.test.aggregate([{$match: {a: {$regex: "foo(.*)"}, b: {$gt: 0}}}]).toArray(); + const stats = getQueryStatsAggCmd(admin, {transformIdentifiers: true}); + + assert.eq(2, stats.length); + const key = stats[0].key; + verifyConsistentFields(key); + // Make sure there is no otherNss field when there are no secondary namespaces. + assert(!key.hasOwnProperty('otherNss'), key); + assert.eq([{ + "$match": { + "$and": [ + {[kHashedFieldA]: {"$regex": "?string"}}, + {[kHashedFieldB]: {"$gt": "?number"}} + ] + } + }], + key.queryShape.pipeline, + key.queryShape.pipeline); + } + // Checks proper tokenization on a pipeline that involves a let variable and a $lookup stage + // that has its own subpipeline and references another namespace. + { + const kHashedOtherCollName = "8Rfz9QKu4P3BbyJ3Zpf5kxlUGx7gMvVk2PXZlJVfikE="; + const kHashedAsOutputName = "OsoJyz+7myXF2CkbE5dKd9DJ1gDAUw5uyt12k1ENQpY="; + const kHashedFieldOrderName = "KcpgS5iaiD5/3BKdQRG5rodz+aEE9FkcTPTYZ+G7cpA="; + const kHashedFieldPrice = "LiAftyHzrbrVhwtTPaiHd8Lu9gUILkWgcP682amX7lI="; + const kHashedFieldMaxPrice = "lFzklZZ6KbbYMBTi8KtTTp1GZCcPaUKUmOe3iko+IF8="; + const kHashedFieldRole = "SGZr91N1v3SFufKI5ww9WSZ4krOXKRpxpS+QshHwyUk="; + + db.test.aggregate([{ + $lookup: { + from: "otherColl", + let: { order_name: "$a", price: "$price"}, + pipeline: [{ + $match: { + $expr: { + $and: [ + { $eq: ["$a", "$$order_name"] }, + { $lte: ["$$price", "$$max_price"] } + ] + } + } + }], + as: "my_output" + }}, + { + $match: {$expr: {$eq: ["$role", "$$USER_ROLES.role"]}} + }], {let: {max_price: 3.00}}).toArray(); + const stats = getQueryStatsAggCmd(admin, {transformIdentifiers: true}); + + assert.eq(3, stats.length); + const key = stats[0].key; + verifyConsistentFields(key); + assert.eq( + [ + { + "$lookup": { + "from": `${kHashedOtherCollName}`, + "as": `${kHashedAsOutputName}`, + "let": { + [kHashedFieldOrderName]: asFieldPath(kHashedFieldA), + [kHashedFieldPrice]: asFieldPath(kHashedFieldPrice) + }, + "pipeline": [{ + "$match": { + "$expr": { + "$and": [ + { + "$eq": [ + asFieldPath(kHashedFieldA), + asVarRef(kHashedFieldOrderName) + ], + }, + { + "$lte": [ + asVarRef(kHashedFieldPrice), + asVarRef(kHashedFieldMaxPrice) + ] + } + ] + } + } + }] + } + }, + { + "$match": { + "$expr": { + "$eq": [ + asFieldPath(kHashedFieldRole), + asVarRef("USER_ROLES." + kHashedFieldRole) + ] + } + } + } + ], + key.queryShape.pipeline, + key.queryShape.pipeline); + assert.eq({[kHashedFieldMaxPrice]: "?number"}, key.queryShape.let); + assert.eq([{"db": `${kHashedDbName}`, "coll": `${kHashedOtherCollName}`}], key.otherNss); + } +} + +const conn = MongoRunner.runMongod({ + setParameter: { + internalQueryStatsRateLimit: -1, + } +}); +runTest(conn); +MongoRunner.stopMongod(conn); + +const st = new ShardingTest({ + mongos: 1, + shards: 1, + config: 1, + rs: {nodes: 1}, + mongosOptions: { + setParameter: { + internalQueryStatsRateLimit: -1, + 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}" + } + }, +}); +runTest(st.s); +st.stop(); +}()); diff --git a/jstests/noPassthrough/queryStats/application_name_find.js b/jstests/noPassthrough/queryStats/application_name_find.js new file mode 100644 index 0000000000000..29e887391a490 --- /dev/null +++ b/jstests/noPassthrough/queryStats/application_name_find.js @@ -0,0 +1,39 @@ +/** + * Test that applicationName and namespace appear in queryStats for the find command. + * @tags: [featureFlagQueryStats] + */ +load("jstests/libs/query_stats_utils.js"); +(function() { +"use strict"; + +const kApplicationName = "MongoDB Shell"; +const kHashedCollName = "w6Ax20mVkbJu4wQWAMjL8Sl+DfXAr2Zqdc3kJRB7Oo0="; +const kHashedFieldName = "lU7Z0mLRPRUL+RfAD5jhYPRRpXBsZBxS/20EzDwfOG4="; + +// Turn on the collecting of queryStats metrics. +let options = { + setParameter: {internalQueryStatsRateLimit: -1}, +}; + +const conn = MongoRunner.runMongod(options); +conn.setLogLevel(3, "query"); +const testDB = conn.getDB('test'); +var coll = testDB[jsTestName()]; +coll.drop(); + +coll.insert({v: 1}); +coll.insert({v: 2}); +coll.insert({v: 3}); + +coll.find({v: 1}).toArray(); + +let queryStats = getQueryStats(conn); +assert.eq(1, queryStats.length, queryStats); +assert.eq(kApplicationName, queryStats[0].key.client.application.name, queryStats); + +queryStats = getQueryStatsFindCmd(conn, {transformIdentifiers: true}); +assert.eq(1, queryStats.length, queryStats); +assert.eq(kApplicationName, queryStats[0].key.client.application.name, queryStats); + +MongoRunner.stopMongod(conn); +}()); diff --git a/jstests/noPassthrough/queryStats/clear_query_stats_store.js b/jstests/noPassthrough/queryStats/clear_query_stats_store.js new file mode 100644 index 0000000000000..4cdf67ebd99e3 --- /dev/null +++ b/jstests/noPassthrough/queryStats/clear_query_stats_store.js @@ -0,0 +1,43 @@ +/** + * Test that the telemetry store can be cleared when the cache size is reset to 0. + * @tags: [featureFlagQueryStats] + */ +load("jstests/libs/query_stats_utils.js"); // For verifyMetrics. + +(function() { +"use strict"; + +// Turn on the collecting of telemetry metrics. +let options = { + setParameter: {internalQueryStatsRateLimit: -1, internalQueryStatsCacheSize: "10MB"}, +}; + +const conn = MongoRunner.runMongod(options); +const testDB = conn.getDB('test'); +var coll = testDB[jsTestName()]; +coll.drop(); + +let query = {}; +for (var j = 0; j < 10; ++j) { + query["foo.field.xyz." + j] = 1; + query["bar.field.xyz." + j] = 2; + query["baz.field.xyz." + j] = 3; + coll.aggregate([{$match: query}]).itcount(); +} + +// Confirm number of entries in the store and that none have been evicted. +let telemetryResults = testDB.getSiblingDB("admin").aggregate([{$queryStats: {}}]).toArray(); +assert.eq(telemetryResults.length, 10, telemetryResults); +assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 0); + +// Command to clear the cache. +assert.commandWorked(testDB.adminCommand({setParameter: 1, internalQueryStatsCacheSize: "0MB"})); + +// 10 regular queries plus the $queryStats query, means 11 entries evicted when the cache is +// cleared. +assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 11); + +// Calling $queryStats should fail when the telemetry store size is 0 bytes. +assert.throwsWithCode(() => testDB.getSiblingDB("admin").aggregate([{$queryStats: {}}]), 6579000); +MongoRunner.stopMongod(conn); +}()); diff --git a/jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js b/jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js new file mode 100644 index 0000000000000..811ae2039ed19 --- /dev/null +++ b/jstests/noPassthrough/queryStats/documentSourceQueryStats_redaction_parameters.js @@ -0,0 +1,120 @@ +/** + * Test the $queryStats hmac properties. + * @tags: [featureFlagQueryStats] + */ + +load("jstests/aggregation/extras/utils.js"); // For assertAdminDBErrCodeAndErrMsgContains. +load("jstests/libs/query_stats_utils.js"); + +(function() { +"use strict"; + +// Assert the expected queryStats key with no hmac. +function assertQueryStatsKeyWithoutHmac(queryStatsKey) { + assert.eq(queryStatsKey.filter, {"foo": {"$lte": "?number"}}); + assert.eq(queryStatsKey.sort, {"bar": -1}); + assert.eq(queryStatsKey.limit, "?number"); +} + +function runTest(conn) { + const testDB = conn.getDB('test'); + var coll = testDB[jsTestName()]; + coll.drop(); + + coll.insert({foo: 1}); + coll.find({foo: {$lte: 2}}).sort({bar: -1}).limit(2).toArray(); + // Default is no hmac. + assertQueryStatsKeyWithoutHmac(getQueryStatsFindCmd(conn)[0].key.queryShape); + + // Turning on hmac should apply hmac to all field names on all entries, even previously cached + // ones. + const queryStatsKey = getQueryStatsFindCmd(conn, {transformIdentifiers: true})[0]["key"]; + assert.eq(queryStatsKey.queryShape.filter, + {"fNWkKfogMv6MJ77LpBcuPrO7Nq+R+7TqtD+Lgu3Umc4=": {"$lte": "?number"}}); + assert.eq(queryStatsKey.queryShape.sort, {"CDDQIXZmDehLKmQcRxtdOQjMqoNqfI2nGt2r4CgJ52o=": -1}); + assert.eq(queryStatsKey.queryShape.limit, "?number"); + + // Turning hmac back off should preserve field names on all entries, even previously cached + // ones. + const queryStats = getQueryStats(conn)[1]["key"]; + assertQueryStatsKeyWithoutHmac(queryStats.queryShape); + + // Explicitly set transformIdentifiers to false. + assertQueryStatsKeyWithoutHmac( + getQueryStatsFindCmd(conn, {transformIdentifiers: false})[0]["key"].queryShape); + + // Wrong parameter name throws error. + let pipeline = [{$queryStats: {redactFields: true}}]; + assertAdminDBErrCodeAndErrMsgContains( + coll, pipeline, 40415, "BSON field '$queryStats.redactFields' is an unknown field."); + + // Wrong parameter name throws error. + pipeline = [{$queryStats: {algorithm: "hmac-sha-256"}}]; + assertAdminDBErrCodeAndErrMsgContains( + coll, pipeline, 40415, "BSON field '$queryStats.algorithm' is an unknown field."); + + // Wrong parameter type throws error. + pipeline = [{$queryStats: {transformIdentifiers: {algorithm: 1}}}]; + assertAdminDBErrCodeAndErrMsgContains( + coll, + pipeline, + ErrorCodes.TypeMismatch, + "BSON field '$queryStats.transformIdentifiers.algorithm' is the wrong type 'double', expected type 'string'"); + + pipeline = [{$queryStats: {transformIdentifiers: {algorithm: "hmac-sha-256", hmacKey: 1}}}]; + assertAdminDBErrCodeAndErrMsgContains( + coll, + pipeline, + ErrorCodes.TypeMismatch, + "BSON field '$queryStats.transformIdentifiers.hmacKey' is the wrong type 'double', expected type 'binData'"); + + // Unsupported algorithm throws error. + pipeline = [{$queryStats: {transformIdentifiers: {algorithm: "hmac-sha-1"}}}]; + assertAdminDBErrCodeAndErrMsgContains( + coll, + pipeline, + ErrorCodes.BadValue, + "Enumeration value 'hmac-sha-1' for field '$queryStats.transformIdentifiers.algorithm' is not a valid value."); + + // TransformIdentifiers with missing algorithm throws error. + pipeline = [{$queryStats: {transformIdentifiers: {}}}]; + assertAdminDBErrCodeAndErrMsgContains( + coll, + pipeline, + 40414, + "BSON field '$queryStats.transformIdentifiers.algorithm' is missing but a required field"); + + // Parameter object with unrecognized key throws error. + pipeline = + [{$queryStats: {transformIdentifiers: {algorithm: "hmac-sha-256", hmacStrategy: "on"}}}]; + assertAdminDBErrCodeAndErrMsgContains( + coll, + pipeline, + 40415, + "BSON field '$queryStats.transformIdentifiers.hmacStrategy' is an unknown field."); +} + +const conn = MongoRunner.runMongod({ + setParameter: { + internalQueryStatsRateLimit: -1, + featureFlagQueryStats: true, + } +}); +runTest(conn); +MongoRunner.stopMongod(conn); + +const st = new ShardingTest({ + mongos: 1, + shards: 1, + config: 1, + rs: {nodes: 1}, + mongosOptions: { + setParameter: { + internalQueryStatsRateLimit: -1, + 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}" + } + }, +}); +runTest(st.s); +st.stop(); +}()); diff --git a/jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js b/jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js new file mode 100644 index 0000000000000..8eed4aa836cb8 --- /dev/null +++ b/jstests/noPassthrough/queryStats/feature_flag_off_sampling_rate_on.js @@ -0,0 +1,49 @@ +/** + * Test that calls to read from telemetry store fail when feature flag is turned off and sampling + * rate > 0. + */ +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + +// Set sampling rate to -1. +let options = { + setParameter: {internalQueryStatsRateLimit: -1}, +}; +const conn = MongoRunner.runMongod(options); +const testdb = conn.getDB('test'); + +// This test specifically tests error handling when the feature flag is not on. +// TODO SERVER-65800 This test can be deleted when the feature is on by default. +if (!conn || FeatureFlagUtil.isEnabled(testdb, "QueryStats")) { + jsTestLog(`Skipping test since feature flag is disabled. conn: ${conn}`); + if (conn) { + MongoRunner.stopMongod(conn); + } + quit(); +} + +var coll = testdb[jsTestName()]; +coll.drop(); + +// Bulk insert documents to reduces roundtrips and make timeout on a slow machine less likely. +const bulk = coll.initializeUnorderedBulkOp(); +for (let i = 1; i <= 20; i++) { + bulk.insert({foo: 0, bar: Math.floor(Math.random() * 3)}); +} +assert.commandWorked(bulk.execute()); + +// Pipeline to read telemetry store should fail without feature flag turned on even though sampling +// rate is > 0. +assert.commandFailedWithCode( + testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}), + ErrorCodes.QueryFeatureNotAllowed); + +// Pipeline, with a filter, to read telemetry store fails without feature flag turned on even though +// sampling rate is > 0. +assert.commandFailedWithCode(testdb.adminCommand({ + aggregate: 1, + pipeline: [{$queryStats: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}], + cursor: {} +}), + ErrorCodes.QueryFeatureNotAllowed); + +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/queryStats/find_cmd_one_way_tokenization.js b/jstests/noPassthrough/queryStats/find_cmd_one_way_tokenization.js new file mode 100644 index 0000000000000..f304eca8c228d --- /dev/null +++ b/jstests/noPassthrough/queryStats/find_cmd_one_way_tokenization.js @@ -0,0 +1,69 @@ +/** + * Test that $queryStats properly tokenizes find commands, on mongod and mongos. + */ +load("jstests/libs/query_stats_utils.js"); +(function() { +"use strict"; + +const kHashedCollName = "w6Ax20mVkbJu4wQWAMjL8Sl+DfXAr2Zqdc3kJRB7Oo0="; +const kHashedFieldName = "lU7Z0mLRPRUL+RfAD5jhYPRRpXBsZBxS/20EzDwfOG4="; + +function runTest(conn) { + const db = conn.getDB("test"); + const admin = conn.getDB("admin"); + + db.test.drop(); + db.test.insert({v: 1}); + + db.test.find({v: 1}).toArray(); + + let queryStats = getQueryStatsFindCmd(admin, {transformIdentifiers: true}); + + assert.eq(1, queryStats.length); + assert.eq("find", queryStats[0].key.queryShape.command); + assert.eq({[kHashedFieldName]: {$eq: "?number"}}, queryStats[0].key.queryShape.filter); + + db.test.insert({v: 2}); + + const cursor = db.test.find({v: {$gt: 0, $lt: 3}}).batchSize(1); + queryStats = getQueryStatsFindCmd(admin, {transformIdentifiers: true}); + // Cursor isn't exhausted, so there shouldn't be another entry yet. + assert.eq(1, queryStats.length); + + assert.commandWorked( + db.runCommand({getMore: cursor.getId(), collection: db.test.getName(), batchSize: 2})); + + queryStats = getQueryStatsFindCmd(admin, {transformIdentifiers: true}); + assert.eq(2, queryStats.length); + assert.eq("find", queryStats[1].key.queryShape.command); + assert.eq({ + "$and": [{[kHashedFieldName]: {"$gt": "?number"}}, {[kHashedFieldName]: {"$lt": "?number"}}] + }, + queryStats[1].key.queryShape.filter); +} + +const conn = MongoRunner.runMongod({ + setParameter: { + internalQueryStatsRateLimit: -1, + featureFlagQueryStats: true, + } +}); +runTest(conn); +MongoRunner.stopMongod(conn); + +const st = new ShardingTest({ + mongos: 1, + shards: 1, + config: 1, + rs: {nodes: 1}, + mongosOptions: { + setParameter: { + internalQueryStatsRateLimit: -1, + featureFlagQueryStats: true, + 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}" + } + }, +}); +runTest(st.s); +st.stop(); +}()); diff --git a/jstests/noPassthrough/queryStats/geometry_without_coordinates.js b/jstests/noPassthrough/queryStats/geometry_without_coordinates.js new file mode 100644 index 0000000000000..98ba6ec40aa07 --- /dev/null +++ b/jstests/noPassthrough/queryStats/geometry_without_coordinates.js @@ -0,0 +1,22 @@ +// This test was designed to reproduce SERVER-77430. There was a mistaken assertion in a parser that +// we are interested in proving will not fail here. +// @tags: [featureFlagQueryStats] +(function() { +"use strict"; + +const st = new ShardingTest({ + mongos: 1, + shards: 1, + config: 1, + rs: {nodes: 1}, + mongosOptions: { + setParameter: { + internalQueryStatsRateLimit: -1, + } + }, +}); +const coll = st.s.getDB("test").geometry_without_coordinates; +// This is a query that once mistakenly threw an error. +assert.doesNotThrow(() => coll.find({geo: {$geoIntersects: {$geometry: {x: 40, y: 5}}}}).itcount()); +st.stop(); +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js b/jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js new file mode 100644 index 0000000000000..78297e416daf6 --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_collect_on_mongos.js @@ -0,0 +1,308 @@ +/** + * Test that mongos is collecting query stats metrics. + * @tags: [featureFlagQueryStats] + */ + +load('jstests/libs/query_stats_utils.js'); + +(function() { +"use strict"; + +const setup = () => { + const st = new ShardingTest({ + mongos: 1, + shards: 1, + config: 1, + rs: {nodes: 1}, + mongosOptions: { + setParameter: { + internalQueryStatsRateLimit: -1, + 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}" + } + }, + }); + const mongos = st.s; + const db = mongos.getDB("test"); + const coll = db.coll; + coll.insert({v: 1}); + coll.insert({v: 4}); + return st; +}; + +const assertExpectedResults = (results, + expectedQueryStatsKey, + expectedExecCount, + expectedDocsReturnedSum, + expectedDocsReturnedMax, + expectedDocsReturnedMin, + expectedDocsReturnedSumOfSq, + getMores) => { + const {key, metrics} = results; + confirmAllExpectedFieldsPresent(expectedQueryStatsKey, key); + assert.eq(expectedExecCount, metrics.execCount); + assert.docEq({ + sum: NumberLong(expectedDocsReturnedSum), + max: NumberLong(expectedDocsReturnedMax), + min: NumberLong(expectedDocsReturnedMin), + sumOfSquares: NumberLong(expectedDocsReturnedSumOfSq) + }, + metrics.docsReturned); + + const { + firstSeenTimestamp, + latestSeenTimestamp, + lastExecutionMicros, + totalExecMicros, + firstResponseExecMicros + } = metrics; + + // This test can't predict exact timings, so just assert these three fields have been set (are + // non-zero). + assert.neq(lastExecutionMicros, NumberLong(0)); + assert.neq(firstSeenTimestamp.getTime(), 0); + assert.neq(latestSeenTimestamp.getTime(), 0); + + const distributionFields = ['sum', 'max', 'min', 'sumOfSquares']; + for (const field of distributionFields) { + assert.neq(totalExecMicros[field], NumberLong(0)); + assert.neq(firstResponseExecMicros[field], NumberLong(0)); + if (getMores) { + // If there are getMore calls, totalExecMicros fields should be greater than or equal to + // firstResponseExecMicros. + if (field == 'min' || field == 'max') { + // In the case that we've executed multiple queries with the same shape, it is + // possible for the min or max to be equal. + assert.gte(totalExecMicros[field], firstResponseExecMicros[field]); + } else { + assert.gt(totalExecMicros[field], firstResponseExecMicros[field]); + } + } else { + // If there are no getMore calls, totalExecMicros fields should be equal to + // firstResponseExecMicros. + assert.eq(totalExecMicros[field], firstResponseExecMicros[field]); + } + } +}; + +// Assert that, for find queries, no query stats results are written until a cursor has reached +// exhaustion; ensure accurate results once they're written. +{ + const st = setup(); + const db = st.s.getDB("test"); + const collName = "coll"; + const coll = db[collName]; + + const queryStatsKey = { + queryShape: { + cmdNs: {db: "test", coll: "coll"}, + command: "find", + filter: {$and: [{v: {$gt: "?number"}}, {v: {$lt: "?number"}}]}, + }, + readConcern: {level: "local", provenance: "implicitDefault"}, + batchSize: "?number", + client: {application: {name: "MongoDB Shell"}} + }; + + const cursor = coll.find({v: {$gt: 0, $lt: 5}}).batchSize(1); // returns 1 doc + + // Since the cursor hasn't been exhausted yet, ensure no query stats results have been written + // yet. + let queryStats = getQueryStats(db); + assert.eq(0, queryStats.length, queryStats); + + // Run a getMore to exhaust the cursor, then ensure query stats results have been written + // accurately. batchSize must be 2 so the cursor recognizes exhaustion. + assert.commandWorked(db.runCommand({ + getMore: cursor.getId(), + collection: coll.getName(), + batchSize: 2 + })); // returns 1 doc, exhausts the cursor + queryStats = getQueryStatsFindCmd(db); + assert.eq(1, queryStats.length, queryStats); + assertExpectedResults(queryStats[0], + queryStatsKey, + /* expectedExecCount */ 1, + /* expectedDocsReturnedSum */ 2, + /* expectedDocsReturnedMax */ 2, + /* expectedDocsReturnedMin */ 2, + /* expectedDocsReturnedSumOfSq */ 4, + /* getMores */ true); + + // Run more queries (to exhaustion) with the same query shape, and ensure query stats results + // are accurate. + coll.find({v: {$gt: 2, $lt: 3}}).batchSize(10).toArray(); // returns 0 docs + coll.find({v: {$gt: 0, $lt: 1}}).batchSize(10).toArray(); // returns 0 docs + coll.find({v: {$gt: 0, $lt: 2}}).batchSize(10).toArray(); // return 1 doc + queryStats = getQueryStatsFindCmd(db); + assert.eq(1, queryStats.length, queryStats); + assertExpectedResults(queryStats[0], + queryStatsKey, + /* expectedExecCount */ 4, + /* expectedDocsReturnedSum */ 3, + /* expectedDocsReturnedMax */ 2, + /* expectedDocsReturnedMin */ 0, + /* expectedDocsReturnedSumOfSq */ 5, + /* getMores */ true); + + st.stop(); +} + +// Assert that, for agg queries, no query stats results are written until a cursor has reached +// exhaustion; ensure accurate results once they're written. +{ + const st = setup(); + const db = st.s.getDB("test"); + const coll = db.coll; + + const queryStatsKey = { + queryShape: { + cmdNs: {db: "test", coll: "coll"}, + command: "aggregate", + pipeline: [ + {$match: {$and: [{v: {$gt: "?number"}}, {v: {$lt: "?number"}}]}}, + {$project: {_id: true, hello: true}} + ] + + }, + cursor: {batchSize: "?number"}, + applicationName: "MongoDB Shell", + }; + + const cursor = coll.aggregate( + [ + {$match: {v: {$gt: 0, $lt: 5}}}, + {$project: {hello: true}}, + ], + {cursor: {batchSize: 1}}); // returns 1 doc + + // Since the cursor hasn't been exhausted yet, ensure no query stats results have been written + // yet. + let queryStats = getQueryStats(db); + assert.eq(0, queryStats.length, queryStats); + + // Run a getMore to exhaust the cursor, then ensure query stats results have been written + // accurately. batchSize must be 2 so the cursor recognizes exhaustion. + assert.commandWorked(db.runCommand({ + getMore: cursor.getId(), + collection: coll.getName(), + batchSize: 2 + })); // returns 1 doc, exhausts the cursor + queryStats = getQueryStatsAggCmd(db); + assert.eq(1, queryStats.length, queryStats); + assertExpectedResults(queryStats[0], + queryStatsKey, + /* expectedExecCount */ 1, + /* expectedDocsReturnedSum */ 2, + /* expectedDocsReturnedMax */ 2, + /* expectedDocsReturnedMin */ 2, + /* expectedDocsReturnedSumOfSq */ 4, + /* getMores */ true); + + // Run more queries (to exhaustion) with the same query shape, and ensure query stats results + // are accurate. + coll.aggregate([ + {$match: {v: {$gt: 0, $lt: 5}}}, + {$project: {hello: true}}, + ]); // returns 2 docs + coll.aggregate([ + {$match: {v: {$gt: 2, $lt: 3}}}, + {$project: {hello: true}}, + ]); // returns 0 docs + coll.aggregate([ + {$match: {v: {$gt: 0, $lt: 2}}}, + {$project: {hello: true}}, + ]); // returns 1 doc + queryStats = getQueryStatsAggCmd(db); + assert.eq(1, queryStats.length, queryStats); + assertExpectedResults(queryStats[0], + queryStatsKey, + /* expectedExecCount */ 4, + /* expectedDocsReturnedSum */ 5, + /* expectedDocsReturnedMax */ 2, + /* expectedDocsReturnedMin */ 0, + /* expectedDocsReturnedSumOfSq */ 9, + /* getMores */ true); + + st.stop(); +} + +// Assert on batchSize-limited find queries that killCursors will write metrics with partial results +// to the query stats store. +{ + const st = setup(); + const db = st.s.getDB("test"); + const collName = "coll"; + const coll = db[collName]; + + const queryStatsKey = { + queryShape: { + cmdNs: {db: "test", coll: "coll"}, + command: "find", + filter: {$and: [{v: {$gt: "?number"}}, {v: {$lt: "?number"}}]}, + }, + readConcern: {level: "local", provenance: "implicitDefault"}, + batchSize: "?number", + client: {application: {name: "MongoDB Shell"}} + }; + + const cursor1 = coll.find({v: {$gt: 0, $lt: 5}}).batchSize(1); // returns 1 doc + const cursor2 = coll.find({v: {$gt: 0, $lt: 2}}).batchSize(1); // returns 1 doc + + assert.commandWorked( + db.runCommand({killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]})); + const queryStats = getQueryStats(db); + assert.eq(1, queryStats.length); + assertExpectedResults(queryStats[0], + queryStatsKey, + /* expectedExecCount */ 2, + /* expectedDocsReturnedSum */ 2, + /* expectedDocsReturnedMax */ 1, + /* expectedDocsReturnedMin */ 1, + /* expectedDocsReturnedSumOfSq */ 2, + /* getMores */ false); + st.stop(); +} + +// Assert on batchSize-limited agg queries that killCursors will write metrics with partial results +// to the query stats store. +{ + const st = setup(); + const db = st.s.getDB("test"); + const coll = db.coll; + + const queryStatsKey = { + queryShape: { + cmdNs: {db: "test", coll: "coll"}, + command: "aggregate", + pipeline: [{$match: {$and: [{v: {$gt: "?number"}}, {v: {$lt: "?number"}}]}}] + }, + cursor: {batchSize: "?number"}, + applicationName: "MongoDB Shell", + }; + + const cursor1 = coll.aggregate( + [ + {$match: {v: {$gt: 0, $lt: 5}}}, + ], + {cursor: {batchSize: 1}}); // returns 1 doc + const cursor2 = coll.aggregate( + [ + {$match: {v: {$gt: 0, $lt: 2}}}, + ], + {cursor: {batchSize: 1}}); // returns 1 doc + + assert.commandWorked( + db.runCommand({killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]})); + const queryStats = getQueryStats(db); + assert.eq(1, queryStats.length); + assertExpectedResults(queryStats[0], + queryStatsKey, + /* expectedExecCount */ 2, + /* expectedDocsReturnedSum */ 2, + /* expectedDocsReturnedMax */ 1, + /* expectedDocsReturnedMin */ 1, + /* expectedDocsReturnedSumOfSq */ 2, + /* getMores */ false); + st.stop(); +} +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_collectionType.js b/jstests/noPassthrough/queryStats/query_stats_collectionType.js new file mode 100644 index 0000000000000..f86e6d11022b6 --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_collectionType.js @@ -0,0 +1,111 @@ +/** + * Test that collectionType is returned properly in $queryStats. + * @tags: [featureFlagQueryStats] + */ +load("jstests/libs/query_stats_utils.js"); +(function() { +"use strict"; + +function runTest(conn) { + const testDB = conn.getDB('test'); + + // We create one collection for each corresponding type reported by query stats. + assert.commandWorked(testDB.createCollection(jsTestName() + "_collection")); + assert.commandWorked(testDB.createView( + jsTestName() + "_view", jsTestName() + "_collection", [{$match: {v: {$gt: 42}}}])); + assert.commandWorked( + testDB.createCollection(jsTestName() + "_timeseries", {timeseries: {timeField: "time"}})); + + // Next we run queries over each of the collection types to generate query stats. + + // Base _collection has a few simple documents. + var coll = testDB[jsTestName() + "_collection"]; + coll.insert({v: 1}); + coll.insert({v: 2}); + coll.insert({v: 3}); + coll.find({v: 3}).toArray(); + coll.aggregate([]).toArray(); + + // View _view is over _collection. + coll = testDB[jsTestName() + "_view"]; + coll.find({v: 5}).toArray(); + coll.aggregate([{$match: {v: {$lt: 99}}}]).toArray(); + + // Timeseries collection _timeseries. + coll = testDB[jsTestName() + "_timeseries"]; + coll.insert({v: 1, time: ISODate("2021-05-18T00:00:00.000Z")}); + coll.insert({v: 2, time: ISODate("2021-05-18T01:00:00.000Z")}); + coll.insert({v: 3, time: ISODate("2021-05-18T02:00:00.000Z")}); + coll.find({v: 6}).toArray(); + coll.aggregate().toArray(); + // QueryStats should still be collected for queries run on nonexistent collections. + assert.commandWorked(testDB.runCommand({find: jsTestName() + "_nonExistent", filter: {v: 6}})); + assert.commandWorked( + testDB.runCommand({aggregate: jsTestName() + "_nonExistent", pipeline: [], cursor: {}})); + + // Verify that we have two telemetry entries for the collection type. This assumes we have + // executed one find and one agg query for the given collection type. + function verifyTelemetryForCollectionType(collectionType) { + const telemetry = getQueryStats(conn, { + extraMatch: { + "key.collectionType": collectionType, + "key.queryShape.cmdNs.coll": jsTestName() + "_" + collectionType + } + }); + // We should see one entry for find() and one for aggregate() + // for each collection type. The queries account for the fact + // that find() queries over views are rewritten to + // aggregate(). Ie, the query shapes are different because the + // queries are different. + assert.eq(2, telemetry.length, "Expected result for collection type " + collectionType); + } + + verifyTelemetryForCollectionType("collection"); + verifyTelemetryForCollectionType("view"); + verifyTelemetryForCollectionType("timeseries"); + verifyTelemetryForCollectionType("nonExistent"); + + // Verify that, for views, we capture the original query before it's rewritten. The view would + // include a $gt predicate on 'v'. + const findOnViewShape = + getQueryStats( + conn, {extraMatch: {"key.collectionType": "view", "key.queryShape.command": "find"}})[0] + .key.queryShape; + assert.eq(findOnViewShape.filter, {"v": {"$eq": "?number"}}); + + const aggOnViewShape = + getQueryStats( + conn, + {extraMatch: {"key.collectionType": "view", "key.queryShape.command": "aggregate"}})[0] + .key.queryShape; + assert.eq(aggOnViewShape.pipeline, [{"$match": {"v": {"$lt": "?number"}}}]); +} + +const conn = MongoRunner.runMongod({ + setParameter: { + internalQueryStatsRateLimit: -1, + featureFlagQueryStats: true, + } +}); +runTest(conn); +MongoRunner.stopMongod(conn); + +// TODO Implement this in SERVER-76263. +if (false) { + const st = new ShardingTest({ + mongos: 1, + shards: 1, + config: 1, + rs: {nodes: 1}, + mongosOptions: { + setParameter: { + internalQueryStatsSamplingRate: -1, + featureFlagQueryStats: true, + 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}" + } + }, + }); + runTest(st.s); + st.stop(); +} +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_feature_flag.js b/jstests/noPassthrough/queryStats/query_stats_feature_flag.js new file mode 100644 index 0000000000000..59043687ea49e --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_feature_flag.js @@ -0,0 +1,29 @@ +/** + * Test that calls to read from telemetry store fail when feature flag is turned off. + */ +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + +// This test specifically tests error handling when the feature flag is not on. +// TODO SERVER-65800 this test can be removed when the feature flag is removed. +const conn = MongoRunner.runMongod(); +const testDB = conn.getDB('test'); +if (FeatureFlagUtil.isEnabled(testDB, "QueryStats")) { + jsTestLog("Skipping test since query stats are enabled."); + MongoRunner.stopMongod(conn); + quit(); +} + +// Pipeline to read telemetry store should fail without feature flag turned on. +assert.commandFailedWithCode( + testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}), + ErrorCodes.QueryFeatureNotAllowed); + +// Pipeline, with a filter, to read telemetry store fails without feature flag turned on. +assert.commandFailedWithCode(testDB.adminCommand({ + aggregate: 1, + pipeline: [{$queryStats: {}}, {$match: {"key.queryShape.find": {$eq: "###"}}}], + cursor: {} +}), + ErrorCodes.QueryFeatureNotAllowed); + +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/queryStats/query_stats_key.js b/jstests/noPassthrough/queryStats/query_stats_key.js new file mode 100644 index 0000000000000..f4905e00e0b67 --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_key.js @@ -0,0 +1,141 @@ +/** + * This test confirms that telemetry store key fields are properly nested and none are missing. + * @tags: [featureFlagQueryStats] + */ +load("jstests/libs/query_stats_utils.js"); +(function() { +"use strict"; + +function confirmAllMetaFieldsPresent(clientSubObj) { + const kApplicationName = "MongoDB Shell"; + assert.eq(clientSubObj.application.name, kApplicationName); + + { + assert(clientSubObj.hasOwnProperty('driver'), clientSubObj); + assert(clientSubObj.driver.hasOwnProperty("name"), clientSubObj); + assert(clientSubObj.driver.hasOwnProperty("version"), clientSubObj); + } + + { + assert(clientSubObj.hasOwnProperty('os'), clientSubObj); + assert(clientSubObj.os.hasOwnProperty("type"), clientSubObj); + assert(clientSubObj.os.hasOwnProperty("name"), clientSubObj); + assert(clientSubObj.os.hasOwnProperty("architecture"), clientSubObj); + assert(clientSubObj.os.hasOwnProperty("version"), clientSubObj); + } +} + +function confirmAllFieldsPresent(queryStatsEntries) { + const queryShapeFindFields = [ + "cmdNs", + "command", + "filter", + "sort", + "projection", + "hint", + "skip", + "limit", + "singleBatch", + "max", + "min", + "returnKey", + "showRecordId", + "tailable", + "oplogReplay", + "awaitData", + "collation", + "allowDiskUse", + "let" + ]; + + // The outer fields not nested inside queryShape. + const queryStatsKeyFields = [ + "queryShape", + "batchSize", + "comment", + "maxTimeMS", + "noCursorTimeout", + "readConcern", + "allowPartialResults", + "apiDeprecationErrors", + "apiVersion", + "apiStrict", + "collectionType", + "client" + ]; + + for (const entry of queryStatsEntries) { + let fieldCounter = 0; + assert.eq(entry.key.queryShape.command, "find"); + confirmAllMetaFieldsPresent(entry.key.client); + + for (const field in entry.key.queryShape) { + assert(queryShapeFindFields.includes(field)); + fieldCounter++; + } + assert.eq(fieldCounter, queryShapeFindFields.length); + + fieldCounter = 0; + for (const field in entry.key) { + assert(queryStatsKeyFields.includes(field)); + fieldCounter++; + } + assert.eq(fieldCounter, queryStatsKeyFields.length, entry.key); + } +} + +// Turn on the collecting of telemetry metrics. +let options = { + setParameter: {internalQueryStatsRateLimit: -1}, +}; + +const conn = MongoRunner.runMongod(options); +const testDB = conn.getDB('test'); +var coll = testDB[jsTestName()]; +coll.drop(); + +// Have to create an index for hint not to fail. +assert.commandWorked(coll.createIndex({v: 1})); + +let commandObj = { + find: coll.getName(), + filter: {v: {$eq: 2}}, + oplogReplay: true, + comment: "this is a test!!", + min: {"v": 0}, + max: {"v": 4}, + hint: {"v": 1}, + sort: {a: -1}, + returnKey: false, + noCursorTimeout: true, + showRecordId: false, + tailable: false, + awaitData: false, + allowPartialResults: true, + skip: 1, + limit: 2, + maxTimeMS: 500, + collation: {locale: "en_US", strength: 2}, + allowDiskUse: true, + readConcern: {level: "local"}, + batchSize: 2, + singleBatch: true, + let : {}, + projection: {_id: 0}, + apiDeprecationErrors: false, + apiVersion: "1", + apiStrict: false, +}; + +assert.commandWorked(testDB.runCommand(commandObj)); +let telemetry = getQueryStats(conn); +assert.eq(1, telemetry.length); +confirmAllFieldsPresent(telemetry); + +// $hint can only be string(index name) or object (index spec). +assert.throwsWithCode(() => { + coll.find({v: {$eq: 2}}).hint({'v': 60, $hint: -128}).itcount(); +}, ErrorCodes.FailedToParse); + +MongoRunner.stopMongod(conn); +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js b/jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js new file mode 100644 index 0000000000000..aa6a679d5d94a --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_metrics_across_getMore_calls.js @@ -0,0 +1,174 @@ +/** + * Test that the queryStats metrics are aggregated properly by distinct query shape over getMore + * calls. + * @tags: [featureFlagQueryStats] + */ +load("jstests/libs/query_stats_utils.js"); // For verifyMetrics and getQueryStatsAggCmd. + +(function() { +"use strict"; + +// Turn on the collecting of queryStats metrics. +let options = { + setParameter: {internalQueryStatsRateLimit: -1}, +}; + +const conn = MongoRunner.runMongod(options); +const testDB = conn.getDB('test'); +var coll = testDB[jsTestName()]; +coll.drop(); + +// Bulk insert documents to reduces roundtrips and make timeout on a slow machine less likely. +const bulk = coll.initializeUnorderedBulkOp(); +const numDocs = 100; +for (let i = 0; i < numDocs / 2; ++i) { + bulk.insert({foo: 0, bar: Math.floor(Math.random() * 3)}); + bulk.insert({foo: 1, bar: Math.floor(Math.random() * -2)}); +} +assert.commandWorked(bulk.execute()); +// Assert that two queries with identical structures are represented by the same key. +{ + // Note: toArray() is necessary for the batchSize-limited query to run to cursor exhaustion + // (when it writes to the queryStats store). + coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}}).toArray(); + coll.aggregate([{$match: {foo: 0}}], {cursor: {batchSize: 2}}).toArray(); + + // This command will return all queryStats store entires. + const queryStatsResults = getQueryStatsAggCmd(testDB); + // Assert there is only one entry. + assert.eq(queryStatsResults.length, 1, queryStatsResults); + const queryStatsEntry = queryStatsResults[0]; + jsTestLog(queryStatsEntry); + assert.eq(queryStatsEntry.key.queryShape.cmdNs.db, "test"); + assert.eq(queryStatsEntry.key.queryShape.cmdNs.coll, jsTestName()); + assert.eq(queryStatsEntry.key.client.application.name, "MongoDB Shell"); + + // Assert we update execution count for identically shaped queries. + assert.eq(queryStatsEntry.metrics.execCount, 2); + + // Assert queryStats values are accurate for the two above queries. + assert.eq(queryStatsEntry.metrics.docsReturned.sum, numDocs); + assert.eq(queryStatsEntry.metrics.docsReturned.min, numDocs / 2); + assert.eq(queryStatsEntry.metrics.docsReturned.max, numDocs / 2); + + verifyMetrics(queryStatsResults); +} + +const fooEqBatchSize = 5; +const fooNeBatchSize = 3; +// Assert on batchSize-limited queries that killCursors will write metrics with partial results to +// the queryStats store. +{ + let cursor1 = coll.find({foo: {$eq: 0}}).batchSize(fooEqBatchSize); + let cursor2 = coll.find({foo: {$ne: 0}}).batchSize(fooNeBatchSize); + // Issue one getMore for the first query, so 2 * fooEqBatchSize documents are returned total. + assert.commandWorked(testDB.runCommand( + {getMore: cursor1.getId(), collection: coll.getName(), batchSize: fooEqBatchSize})); + + // Kill both cursors so the queryStats metrics are stored. + assert.commandWorked(testDB.runCommand( + {killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]})); + + // This filters queryStats entires to just the ones entered when running above find queries. + const queryStatsResults = testDB.getSiblingDB("admin") + .aggregate([ + {$queryStats: {}}, + {$match: {"key.queryShape.filter.foo": {$exists: true}}}, + {$sort: {key: 1}}, + ]) + .toArray(); + assert.eq(queryStatsResults.length, 2, queryStatsResults); + assert.eq(queryStatsResults[0].key.queryShape.cmdNs.db, "test"); + assert.eq(queryStatsResults[0].key.queryShape.cmdNs.coll, jsTestName()); + assert.eq(queryStatsResults[0].key.client.application.name, "MongoDB Shell"); + assert.eq(queryStatsResults[1].key.queryShape.cmdNs.db, "test"); + assert.eq(queryStatsResults[1].key.queryShape.cmdNs.coll, jsTestName()); + assert.eq(queryStatsResults[1].key.client.application.name, "MongoDB Shell"); + + assert.eq(queryStatsResults[0].metrics.execCount, 1); + assert.eq(queryStatsResults[1].metrics.execCount, 1); + assert.eq(queryStatsResults[0].metrics.docsReturned.sum, fooEqBatchSize * 2); + assert.eq(queryStatsResults[1].metrics.docsReturned.sum, fooNeBatchSize); + + verifyMetrics(queryStatsResults); + + const distributionFields = ['sum', 'max', 'min', 'sumOfSquares']; + for (const field of distributionFields) { + // If there are getMore calls, queryExecMicros should be greater than or equal to + // firstResponseExecMicros. + assert.gt(queryStatsResults[0].metrics.totalExecMicros[field], + queryStatsResults[0].metrics.firstResponseExecMicros[field]); + + // If there is no getMore calls, firstResponseExecMicros and queryExecMicros should be + // equal. + assert.eq(queryStatsResults[1].metrics.totalExecMicros[field], + queryStatsResults[1].metrics.firstResponseExecMicros[field]); + } +} + +// Assert that options such as limit/sort create different keys, and that repeating a query shape +// ({foo: {$eq}}) aggregates metrics across executions. +{ + const query2Limit = 50; + coll.find({foo: {$eq: 0}}).batchSize(2).toArray(); + coll.find({foo: {$eq: 1}}).limit(query2Limit).batchSize(2).toArray(); + coll.find().sort({"foo": 1}).batchSize(2).toArray(); + // This filters queryStats entires to just the ones entered when running above find queries. + let queryStatsResults = + testDB.getSiblingDB("admin") + .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.command": "find"}}]) + .toArray(); + assert.eq(queryStatsResults.length, 4, queryStatsResults); + + verifyMetrics(queryStatsResults); + + // This filters to just the queryStats for query coll.find().sort({"foo": 1}).batchSize(2). + queryStatsResults = + testDB.getSiblingDB("admin") + .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.sort.foo": 1}}]) + .toArray(); + assert.eq(queryStatsResults.length, 1, queryStatsResults); + assert.eq(queryStatsResults[0].key.queryShape.cmdNs.db, "test"); + assert.eq(queryStatsResults[0].key.queryShape.cmdNs.coll, jsTestName()); + assert.eq(queryStatsResults[0].key.client.application.name, "MongoDB Shell"); + assert.eq(queryStatsResults[0].metrics.execCount, 1); + assert.eq(queryStatsResults[0].metrics.docsReturned.sum, numDocs); + + // This filters to just the queryStats for query coll.find({foo: {$eq: + // 1}}).limit(query2Limit).batchSize(2). + queryStatsResults = + testDB.getSiblingDB("admin") + .aggregate([{$queryStats: {}}, {$match: {"key.queryShape.limit": '?number'}}]) + .toArray(); + assert.eq(queryStatsResults.length, 1, queryStatsResults); + assert.eq(queryStatsResults[0].key.queryShape.cmdNs.db, "test"); + assert.eq(queryStatsResults[0].key.queryShape.cmdNs.coll, jsTestName()); + assert.eq(queryStatsResults[0].key.client.application.name, "MongoDB Shell"); + assert.eq(queryStatsResults[0].metrics.execCount, 1); + assert.eq(queryStatsResults[0].metrics.docsReturned.sum, query2Limit); + + // This filters to just the queryStats for query coll.find({foo: {$eq: 0}}).batchSize(2). + queryStatsResults = testDB.getSiblingDB("admin") + .aggregate([ + {$queryStats: {}}, + { + $match: { + "key.queryShape.filter.foo": {$eq: {$eq: "?number"}}, + "key.queryShape.limit": {$exists: false}, + "key.queryShape.sort": {$exists: false} + } + } + ]) + .toArray(); + assert.eq(queryStatsResults.length, 1, queryStatsResults); + assert.eq(queryStatsResults[0].key.queryShape.cmdNs.db, "test"); + assert.eq(queryStatsResults[0].key.queryShape.cmdNs.coll, jsTestName()); + assert.eq(queryStatsResults[0].key.client.application.name, "MongoDB Shell"); + assert.eq(queryStatsResults[0].metrics.execCount, 2); + assert.eq(queryStatsResults[0].metrics.docsReturned.sum, numDocs / 2 + 2 * fooEqBatchSize); + assert.eq(queryStatsResults[0].metrics.docsReturned.max, numDocs / 2); + assert.eq(queryStatsResults[0].metrics.docsReturned.min, 2 * fooEqBatchSize); +} + +MongoRunner.stopMongod(conn); +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_regex.js b/jstests/noPassthrough/queryStats/query_stats_regex.js new file mode 100644 index 0000000000000..b910df94f4edb --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_regex.js @@ -0,0 +1,36 @@ +/** + * Test that telemetry works properly for a find command that uses regex. + * @tags: [featureFlagQueryStats] + */ +(function() { +"use strict"; + +load("jstests/libs/query_stats_utils.js"); // For getQueryStats. + +// Turn on the collecting of telemetry metrics. +let options = { + setParameter: {internalQueryStatsRateLimit: -1}, +}; + +const conn = MongoRunner.runMongod(options); +const testDB = conn.getDB('test'); +var coll = testDB[jsTestName()]; +coll.drop(); + +const bulk = coll.initializeUnorderedBulkOp(); +const numDocs = 100; +for (let i = 0; i < numDocs / 2; ++i) { + bulk.insert({foo: "ABCDE"}); + bulk.insert({foo: "CDEFG"}); +} +assert.commandWorked(bulk.execute()); + +{ + coll.find({foo: {$regex: "/^ABC/i"}}).itcount(); + let queryStats = getQueryStats(testDB); + assert.eq(1, queryStats.length, queryStats); + assert.eq({"foo": {"$regex": "?string"}}, queryStats[0].key.queryShape.filter); +} + +MongoRunner.stopMongod(conn); +}()); diff --git a/jstests/noPassthrough/queryStats/query_stats_sampling_rate.js b/jstests/noPassthrough/queryStats/query_stats_sampling_rate.js new file mode 100644 index 0000000000000..481dc2ba49e04 --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_sampling_rate.js @@ -0,0 +1,32 @@ +/** + * Test that calls to read from telemetry store fail when sampling rate is not greater than 0 even + * if feature flag is on. + * @tags: [featureFlagQueryStats] + */ +let options = { + setParameter: {internalQueryStatsRateLimit: 0}, +}; + +const conn = MongoRunner.runMongod(options); +const testdb = conn.getDB('test'); +var coll = testdb[jsTestName()]; +coll.drop(); +for (var i = 0; i < 20; i++) { + coll.insert({foo: 0, bar: Math.floor(Math.random() * 3)}); +} + +coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}}); + +// Reading telemetry store with a sampling rate of 0 should return 0 documents. +let telStore = testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}); +assert.eq(telStore.cursor.firstBatch.length, 0); + +// Reading telemetry store should work now with a sampling rate of greater than 0. +assert.commandWorked( + testdb.adminCommand({setParameter: 1, internalQueryStatsRateLimit: 2147483647})); +coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}}); +telStore = assert.commandWorked( + testdb.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}})); +assert.eq(telStore.cursor.firstBatch.length, 1); + +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js b/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js new file mode 100644 index 0000000000000..0d9720a6040d5 --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_server_status_metrics.js @@ -0,0 +1,214 @@ +/** + * Test the telemetry related serverStatus metrics. + * @tags: [featureFlagQueryStats] + */ +function runTestWithMongodOptions(mongodOptions, test, testOptions) { + const conn = MongoRunner.runMongod(mongodOptions); + const testDB = conn.getDB('test'); + const coll = testDB[jsTestName()]; + + test(conn, testDB, coll, testOptions); + + MongoRunner.stopMongod(conn); +} + +// Helper to round up to the next highest power of 2 for our estimation. +function align(number) { + return Math.pow(2, Math.ceil(Math.log2(number))); +} + +function addApprox2MBOfStatsData(testDB, coll) { + const k2MB = 2 * 1024 * 1024; + + const cmdObjTemplate = { + find: coll.getName(), + filter: {foo123: {$eq: "?"}}, + }; + + const kEstimatedEntrySizeBytes = (() => { + // Metrics stored per shape. + const kNumCountersAndDates = + 4 /* top-level */ + (4 * 3) /* those with sum, min, max, sumOfSquares */; + + // Just a sample, will change based on where the test is run - shouldn't be off by too much + // though. + const kClientMetadataEst = { + client: {application: {name: "MongoDB Shell"}}, + driver: {name: "MongoDB Internal Client", version: "7.1.0-alpha"}, + os: {type: "Linux", name: "Ubuntu", architecture: "aarch64", version: "22.04"} + }; + + const kCmdNsObj = {cmdNs: {db: testDB.getName(), coll: coll.getName()}}; + + // This is likely not to be exact - we are probably forgetting something. But we don't need + // to be exact, just "good enough." + return align(kNumCountersAndDates * 4 + Object.bsonsize(cmdObjTemplate) + + Object.bsonsize(kClientMetadataEst) + Object.bsonsize(kCmdNsObj)); + })(); + const nIterations = k2MB / kEstimatedEntrySizeBytes; + for (let i = 0; i <= nIterations; i++) { + let newQuery = {["foo" + i]: "bar"}; + const cmdObj = cmdObjTemplate; + cmdObj.filter = newQuery; + const cmdRes = assert.commandWorked(testDB.runCommand(cmdObj)); + new DBCommandCursor(testDB, cmdRes).itcount(); + } +} +/** + * Test serverStatus metric which counts the number of evicted entries. + * + * testOptions must include `resetCacheSize` bool field; e.g., { resetCacheSize : true } + */ +function evictionTest(conn, testDB, coll, testOptions) { + const evictedBefore = testDB.serverStatus().metrics.queryStats.numEvicted; + assert.eq(evictedBefore, 0); + addApprox2MBOfStatsData(testDB, coll); + if (!testOptions.resetCacheSize) { + const evictedAfter = testDB.serverStatus().metrics.queryStats.numEvicted; + assert.gt(evictedAfter, 0); + return; + } + // Make sure number of evicted entries increases when the cache size is reset, which forces out + // least recently used entries to meet the new, smaller size requirement. + assert.eq(testDB.serverStatus().metrics.queryStats.numEvicted, 0); + assert.commandWorked( + testDB.adminCommand({setParameter: 1, internalQueryStatsCacheSize: "1MB"})); + const evictedAfter = testDB.serverStatus().metrics.queryStats.numEvicted; + assert.gt(evictedAfter, 0); +} + +/** + * Test serverStatus metric which counts the number of requests for which telemetry is not collected + * due to rate-limiting. + * + * testOptions must include `samplingRate` and `numRequests` number fields; + * e.g., { samplingRate: -1, numRequests: 20 } + */ +function countRateLimitedRequestsTest(conn, testDB, coll, testOptions) { + const numRateLimitedRequestsBefore = + testDB.serverStatus().metrics.queryStats.numRateLimitedRequests; + assert.eq(numRateLimitedRequestsBefore, 0); + + coll.insert({a: 0}); + + // Running numRequests / 2 times since we dispatch two requests per iteration + for (var i = 0; i < testOptions.numRequests / 2; i++) { + coll.find({a: 0}).toArray(); + coll.aggregate([{$match: {a: 1}}]); + } + + const numRateLimitedRequestsAfter = + testDB.serverStatus().metrics.queryStats.numRateLimitedRequests; + + if (testOptions.samplingRate === 0) { + // Telemetry should not be collected for any requests. + assert.eq(numRateLimitedRequestsAfter, testOptions.numRequests); + } else if (testOptions.samplingRate >= testOptions.numRequests) { + // Telemetry should be collected for all requests. + assert.eq(numRateLimitedRequestsAfter, 0); + } else { + // Telemetry should be collected for some but not all requests. + assert.gt(numRateLimitedRequestsAfter, 0); + assert.lt(numRateLimitedRequestsAfter, testOptions.numRequests); + } +} + +function telemetryStoreSizeEstimateTest(conn, testDB, coll, testOptions) { + assert.eq(testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes, 0); + let halfWayPointSize; + // Only using three digit numbers (eg 100, 101) means the string length will be the same for all + // entries and therefore the key size will be the same for all entries, which makes predicting + // the total size of the store clean and easy. + for (var i = 100; i < 200; i++) { + coll.aggregate([{$match: {["foo" + i]: "bar"}}]).itcount(); + if (i == 150) { + halfWayPointSize = + testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes; + } + } + // Confirm that telemetry store has grown and size is non-zero. + assert.gt(halfWayPointSize, 0); + const fullSize = testDB.serverStatus().metrics.queryStats.queryStatsStoreSizeEstimateBytes; + assert.gt(fullSize, 0); + // Make sure the final telemetry store size is twice as much as the halfway point size (+/- 5%) + assert(fullSize >= halfWayPointSize * 1.95 && fullSize <= halfWayPointSize * 2.05, + tojson({fullSize, halfWayPointSize})); +} + +function telemetryStoreWriteErrorsTest(conn, testDB, coll, testOptions) { + const debugBuild = testDB.adminCommand('buildInfo').debug; + if (debugBuild) { + jsTestLog("Skipping telemetry store write errors test because debug build will tassert."); + return; + } + + const errorsBefore = testDB.serverStatus().metrics.queryStats.numQueryStatsStoreWriteErrors; + assert.eq(errorsBefore, 0); + for (let i = 0; i < 5; i++) { + // Command should succeed and record the error. + let query = {}; + query["foo" + i] = "bar"; + coll.aggregate([{$match: query}]).itcount(); + } + + // Make sure that we recorded a write error for each run. + assert.eq(testDB.serverStatus().metrics.queryStats.numQueryStatsStoreWriteErrors, 5); +} + +/** + * In this configuration, we insert enough entries into the telemetry store to trigger LRU + * eviction. + */ +runTestWithMongodOptions({ + setParameter: {internalQueryStatsCacheSize: "1MB", internalQueryStatsRateLimit: -1}, +}, + evictionTest, + {resetCacheSize: false}); +/** + * In this configuration, eviction is triggered only when the telemetry store size is reset. + * + * Use an 8MB upper limit since our estimated size of the query stats entry is pretty rough and + * meant to give us some wiggle room so we don't have to keep adjusting this test as we tweak it. + */ +runTestWithMongodOptions({ + setParameter: {internalQueryStatsCacheSize: "8MB", internalQueryStatsRateLimit: -1}, +}, + evictionTest, + {resetCacheSize: true}); + +/** + * In this configuration, every query is sampled, so no requests should be rate-limited. + */ +runTestWithMongodOptions({ + setParameter: {internalQueryStatsRateLimit: -1}, +}, + countRateLimitedRequestsTest, + {samplingRate: 2147483647, numRequests: 20}); + +/** + * In this configuration, the sampling rate is set so that some but not all requests are + * rate-limited. + */ +runTestWithMongodOptions({ + setParameter: {internalQueryStatsRateLimit: 10}, +}, + countRateLimitedRequestsTest, + {samplingRate: 10, numRequests: 20}); + +/** + * Sample all queries and assert that the size of telemetry store is equal to num entries * entry + * size + */ +runTestWithMongodOptions({ + setParameter: {internalQueryStatsRateLimit: -1}, +}, + telemetryStoreSizeEstimateTest); + +/** + * Use a very small telemetry store size and assert that errors in writing to the telemetry store + * are tracked. + */ +runTestWithMongodOptions({ + setParameter: {internalQueryStatsCacheSize: "0.00001MB", internalQueryStatsRateLimit: -1}, +}, + telemetryStoreWriteErrorsTest); \ No newline at end of file diff --git a/jstests/noPassthrough/queryStats/query_stats_upgrade.js b/jstests/noPassthrough/queryStats/query_stats_upgrade.js new file mode 100644 index 0000000000000..1e48c768bf6de --- /dev/null +++ b/jstests/noPassthrough/queryStats/query_stats_upgrade.js @@ -0,0 +1,38 @@ +/** + * Test that telemetry doesn't work on a lower FCV version but works after an FCV upgrade. + * @tags: [featureFlagQueryStats] + */ +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + +const dbpath = MongoRunner.dataPath + jsTestName(); +let conn = MongoRunner.runMongod({dbpath: dbpath}); +let testDB = conn.getDB(jsTestName()); +// This test should only be run with the flag enabled. +assert(FeatureFlagUtil.isEnabled(testDB, "QueryStats")); + +function testLower(restart = false) { + let adminDB = conn.getDB("admin"); + assert.commandWorked(adminDB.runCommand( + {setFeatureCompatibilityVersion: binVersionToFCV("last-lts"), confirm: true})); + if (restart) { + MongoRunner.stopMongod(conn); + conn = MongoRunner.runMongod({dbpath: dbpath, noCleanData: true}); + testDB = conn.getDB(jsTestName()); + adminDB = conn.getDB("admin"); + } + + assert.commandFailedWithCode( + testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}), 6579000); + + // Upgrade FCV. + assert.commandWorked(adminDB.runCommand( + {setFeatureCompatibilityVersion: binVersionToFCV("latest"), confirm: true})); + + // We should be able to run a telemetry pipeline now that the FCV is correct. + assert.commandWorked( + testDB.adminCommand({aggregate: 1, pipeline: [{$queryStats: {}}], cursor: {}}), + ); +} +testLower(true); +testLower(false); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/telemetry/redact_queries_with_nonobject_fields.js b/jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js similarity index 91% rename from jstests/noPassthrough/telemetry/redact_queries_with_nonobject_fields.js rename to jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js index 25cac47555efb..fb16331a3b5e6 100644 --- a/jstests/noPassthrough/telemetry/redact_queries_with_nonobject_fields.js +++ b/jstests/noPassthrough/queryStats/redact_queries_with_nonobject_fields.js @@ -1,15 +1,10 @@ /** * Test that telemetry key generation works for queries with non-object fields. - * @tags: [featureFlagTelemetry] + * @tags: [featureFlagQueryStats] */ -load('jstests/libs/analyze_plan.js'); - -(function() { -"use strict"; - // Turn on the collecting of telemetry metrics. let options = { - setParameter: {internalQueryConfigureTelemetrySamplingRate: -1}, + setParameter: {internalQueryStatsRateLimit: -1}, }; const conn = MongoRunner.runMongod(options); @@ -72,5 +67,4 @@ confirmAggSuccess( } }]); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/queryStats/repl_set_query_stats_key.js b/jstests/noPassthrough/queryStats/repl_set_query_stats_key.js new file mode 100644 index 0000000000000..98c86f65cb155 --- /dev/null +++ b/jstests/noPassthrough/queryStats/repl_set_query_stats_key.js @@ -0,0 +1,89 @@ +/** + * This test confirms that queryStats store key fields specific to replica sets (readConcern and + * readPreference) are included and correctly shapified. General command fields related to api + * versioning are included for good measure. + * @tags: [featureFlagQueryStats] + */ +load("jstests/libs/query_stats_utils.js"); +(function() { +"use strict"; + +const replTest = new ReplSetTest({name: 'reindexTest', nodes: 2}); + +// Turn on the collecting of telemetry metrics. +replTest.startSet({setParameter: {internalQueryStatsRateLimit: -1}}); +replTest.initiate(); + +const primary = replTest.getPrimary(); +const secondary = replTest.getSecondary(); + +const dbName = jsTestName(); +const collName = "foobar"; +const primaryDB = primary.getDB(dbName); +const primaryColl = primaryDB.getCollection(collName); +const secondaryDB = secondary.getDB(dbName); +const secondaryColl = secondaryDB.getCollection(collName); + +primaryColl.drop(); + +assert.commandWorked(primaryColl.insert({a: 1000})); + +replTest.awaitReplication(); + +function confirmCommandFieldsPresent(queryStatsKey, commandObj) { + for (const field in queryStatsKey) { + if (field == "queryShape" || field == "client" || field == "command") { + continue; + } + assert(commandObj.hasOwnProperty(field), + `${field} is present in the query stats key but not present in command obj: ${ + tojson(queryStatsKey)}, ${tojson(commandObj)}`); + } + assert.eq(Object.keys(queryStatsKey).length, Object.keys(commandObj).length, queryStatsKey); +} + +let commandObj = { + find: collName, + filter: {v: {$eq: 2}}, + readConcern: {level: "local", afterClusterTime: new Timestamp(0, 1)}, + $readPreference: {mode: "primary"}, + apiDeprecationErrors: false, + apiVersion: "1", + apiStrict: false, +}; +const replSetConn = new Mongo(replTest.getURL()); +assert.commandWorked(replSetConn.getDB(dbName).runCommand(commandObj)); +let telemetry = getQueryStats(replSetConn, {collName: collName}); +delete telemetry[0].key["collectionType"]; +confirmCommandFieldsPresent(telemetry[0].key, commandObj); +// check that readConcern afterClusterTime is normalized. +assert.eq(telemetry[0].key.readConcern.afterClusterTime, "?timestamp"); + +// check that readPreference not populated and readConcern just has an afterClusterTime field. +commandObj["readConcern"] = { + afterClusterTime: new Timestamp(1, 0) +}; +delete commandObj["$readPreference"]; +assert.commandWorked(replSetConn.getDB(dbName).runCommand(commandObj)); +telemetry = getQueryStats(replSetConn, {collName}); +// We're not concerned with this field here. +delete telemetry[0].key["collectionType"]; +confirmCommandFieldsPresent(telemetry[0].key, commandObj); +assert.eq(telemetry[0].key["readConcern"], {"afterClusterTime": "?timestamp"}); + +// check that readConcern has no afterClusterTime and fields related to api usage are not present. +commandObj["readConcern"] = { + level: "local" +}; +delete commandObj["apiDeprecationErrors"]; +delete commandObj["apiVersion"]; +delete commandObj["apiStrict"]; +assert.commandWorked(replSetConn.getDB(dbName).runCommand(commandObj)); +telemetry = getQueryStats(replSetConn, {collName: collName}); +assert.eq(telemetry[1].key["readConcern"], {level: "local"}); +// We're not concerned with this field here. +delete telemetry[1].key["collectionType"]; +confirmCommandFieldsPresent(telemetry[1].key, commandObj); + +replTest.stopSet(); +})(); diff --git a/jstests/noPassthrough/query_engine_stats.js b/jstests/noPassthrough/query_engine_stats.js index 15965be071a87..8ec6cb3183104 100644 --- a/jstests/noPassthrough/query_engine_stats.js +++ b/jstests/noPassthrough/query_engine_stats.js @@ -3,11 +3,8 @@ * serverStatus. */ -(function() { -"use strict"; - load("jstests/libs/profiler.js"); // For 'getLatestProfilerEntry()'. -load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; let conn = MongoRunner.runMongod({}); assert.neq(null, conn, "mongod was unable to start up"); @@ -18,7 +15,7 @@ let db = conn.getDB(jsTestName()); if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE is not enabled"); MongoRunner.stopMongod(conn); - return; + quit(); } function initializeTestCollection() { @@ -229,16 +226,15 @@ verifyProfiler(queryComment, framework.find.sbe); MongoRunner.stopMongod(conn); -conn = MongoRunner.runMongod({ - restart: conn, - setParameter: - {featureFlagCommonQueryFramework: true, internalQueryFrameworkControl: "tryBonsai"} -}); +conn = + MongoRunner.runMongod({restart: conn, setParameter: {featureFlagCommonQueryFramework: true}}); assert.neq(null, conn, "mongod was unable to start up"); db = conn.getDB(jsTestName()); coll = initializeTestCollection(); +assert.commandWorked( + db.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"})); // Run find using CQF expectedCounters = generateExpectedCounters(framework.find.cqf); @@ -281,5 +277,4 @@ verifyProfiler(queryComment, "cqf"); cursor.next(); // getMore performed verifyProfiler(queryComment, "cqf"); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/query_knobs_validation.js b/jstests/noPassthrough/query_knobs_validation.js index ac1fc5eefaf48..76a947612ff78 100644 --- a/jstests/noPassthrough/query_knobs_validation.js +++ b/jstests/noPassthrough/query_knobs_validation.js @@ -1,14 +1,11 @@ /** * Tests to validate the input values accepted by internal query server parameters. The test - * verfies that the system responds with the expected error code for input values that fall outside + * verifies that the system responds with the expected error code for input values that fall outside * each parameter's valid bounds, and correctly applies input values which fall within that * parameter's valid bounds. */ -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled - -(function() { -"use strict"; +import {checkCascadesFeatureFlagEnabled} from "jstests/libs/optimizer_utils.js"; const conn = MongoRunner.runMongod(); const testDB = conn.getDB("admin"); @@ -65,6 +62,11 @@ const expectedParamDefaults = { internalQueryColumnScanMinAvgDocSizeBytes: 1024, internalQueryColumnScanMinCollectionSizeBytes: -1, internalQueryColumnScanMinNumColumnFilters: 3, + internalQueryMaxSpoolMemoryUsageBytes: 100 * 1024 * 1024, + internalQueryMaxSpoolDiskUsageBytes: 10 * 100 * 1024 * 1024, + deprioritizeUnboundedUserCollectionScans: true, + deprioritizeUnboundedUserIndexScans: true, + internalQueryDocumentSourceWriterBatchExtraReservedBytes: 0, }; function assertDefaultParameterValues() { @@ -259,14 +261,15 @@ assertSetParameterFails("internalQueryFLERewriteMemoryLimit", 0); // Need to have the CQF feature flag enabled in order to set tryBonsai or forceBonsai. assertSetParameterSucceeds("internalQueryFrameworkControl", "forceClassicEngine"); assertSetParameterSucceeds("internalQueryFrameworkControl", "trySbeEngine"); -if (checkCascadesOptimizerEnabled(testDB)) { +if (checkCascadesFeatureFlagEnabled(testDB)) { assertSetParameterSucceeds("internalQueryFrameworkControl", "tryBonsai"); + assertSetParameterSucceeds("internalQueryFrameworkControl", "tryBonsaiExperimental"); assertSetParameterSucceeds("internalQueryFrameworkControl", "forceBonsai"); } else { assert.commandFailed( testDB.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"})); - assert.commandFailed( - testDB.adminCommand({setParameter: 1, internalQueryFrameworkControl: "forceBonsai"})); + assertSetParameterSucceeds("internalQueryFrameworkControl", "tryBonsaiExperimental"); + assertSetParameterSucceeds("internalQueryFrameworkControl", "forceBonsai"); } assertSetParameterFails("internalQueryFrameworkControl", "tryCascades"); assertSetParameterFails("internalQueryFrameworkControl", 1); @@ -283,5 +286,24 @@ assertSetParameterSucceeds("internalQueryColumnScanMinNumColumnFilters", 100); assertSetParameterSucceeds("internalQueryColumnScanMinNumColumnFilters", 0); assertSetParameterFails("internalQueryColumnScanMinNumColumnFilters", -1); +assertSetParameterSucceeds("internalQueryMaxSpoolMemoryUsageBytes", 100); +assertSetParameterSucceeds("internalQueryMaxSpoolMemoryUsageBytes", 1); +assertSetParameterFails("internalQueryMaxSpoolMemoryUsageBytes", 0); + +assertSetParameterSucceeds("internalQueryMaxSpoolDiskUsageBytes", 100); +assertSetParameterSucceeds("internalQueryMaxSpoolDiskUsageBytes", 1); +assertSetParameterFails("internalQueryMaxSpoolDiskUsageBytes", 0); + +assertSetParameterSucceeds("deprioritizeUnboundedUserCollectionScans", true); +assertSetParameterSucceeds("deprioritizeUnboundedUserCollectionScans", false); +assertSetParameterSucceeds("deprioritizeUnboundedUserIndexScans", true); +assertSetParameterSucceeds("deprioritizeUnboundedUserIndexScans", false); + +assertSetParameterSucceeds("internalQueryDocumentSourceWriterBatchExtraReservedBytes", 10); +assertSetParameterSucceeds("internalQueryDocumentSourceWriterBatchExtraReservedBytes", + 4 * 1024 * 1024); +assertSetParameterFails("internalQueryDocumentSourceWriterBatchExtraReservedBytes", -1); +assertSetParameterFails("internalQueryDocumentSourceWriterBatchExtraReservedBytes", + 9 * 1024 * 1024); + MongoRunner.stopMongod(conn); -})(); diff --git a/jstests/noPassthrough/query_oplogreplay.js b/jstests/noPassthrough/query_oplogreplay.js index 4fba7c108b74e..9300574e8fbc2 100644 --- a/jstests/noPassthrough/query_oplogreplay.js +++ b/jstests/noPassthrough/query_oplogreplay.js @@ -1,10 +1,8 @@ // Test oplog queries that can be optimized with oplogReplay. // @tags: [requires_replication, requires_capped] -(function() { -"use strict"; +import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js"; -load("jstests/libs/analyze_plan.js"); load("jstests/libs/storage_engine_utils.js"); let replSet = new ReplSetTest({nodes: 1}); @@ -37,24 +35,26 @@ for (let i = 1; i <= 100; i++) { assert.commandWorked(res); } +const collNs = `test.${jsTestName()}`; + // A $gt query on just the 'ts' field should return the next document after the timestamp. -var cursor = oplog.find({ts: {$gt: timestamps[20]}}); +var cursor = oplog.find({ns: collNs, ts: {$gt: timestamps[20]}}); assert.eq(21, cursor.next().o["_id"]); assert.eq(22, cursor.next().o["_id"]); // A $gte query on the 'ts' field should include the timestamp. -cursor = oplog.find({ts: {$gte: timestamps[20]}}); +cursor = oplog.find({ns: collNs, ts: {$gte: timestamps[20]}}); assert.eq(20, cursor.next().o["_id"]); assert.eq(21, cursor.next().o["_id"]); // An $eq query on the 'ts' field should return the single record with the timestamp. -cursor = oplog.find({ts: {$eq: timestamps[20]}}); +cursor = oplog.find({ns: collNs, ts: {$eq: timestamps[20]}}); assert.eq(20, cursor.next().o["_id"]); assert(!cursor.hasNext()); // An AND with both a $gt and $lt query on the 'ts' field will correctly return results in // the proper bounds. -cursor = oplog.find({$and: [{ts: {$lt: timestamps[5]}}, {ts: {$gt: timestamps[1]}}]}); +cursor = oplog.find({$and: [{ns: collNs}, {ts: {$lt: timestamps[5]}}, {ts: {$gt: timestamps[1]}}]}); assert.eq(2, cursor.next().o["_id"]); assert.eq(3, cursor.next().o["_id"]); assert.eq(4, cursor.next().o["_id"]); @@ -64,6 +64,7 @@ assert(!cursor.hasNext()); // tightest range. cursor = oplog.find({ $and: [ + {ns: collNs}, {ts: {$gte: timestamps[2]}}, {ts: {$gt: timestamps[3]}}, {ts: {$lte: timestamps[7]}}, @@ -79,6 +80,7 @@ assert(!cursor.hasNext()); // result. cursor = oplog.find({ $and: [ + {ns: collNs}, {ts: {$gte: timestamps[1]}}, {ts: {$gt: timestamps[2]}}, {ts: {$eq: timestamps[5]}}, @@ -90,46 +92,49 @@ assert.eq(5, cursor.next().o["_id"]); assert(!cursor.hasNext()); // An $eq query stops scanning after passing the max timestamp. -let res = oplog.find({ts: {$eq: timestamps[10]}}).explain("executionStats"); +let res = oplog.find({ns: collNs, ts: {$eq: timestamps[10]}}).explain("executionStats"); assert.commandWorked(res); // We expect to be able to seek directly to the entry with a 'ts' of 10. -assert.lte(res.executionStats.totalDocsExamined, 2, tojson(res)); +assert.lte(res.executionStats.totalDocsExamined, 2, res); let collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN"); assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res)); -assert.eq(timestamps[10], longToTs(collScanStage.maxRecord), tojson(res)); +assert.eq(timestamps[10], longToTs(collScanStage.maxRecord), res); // An AND with an $lt predicate stops scanning after passing the max timestamp. -res = oplog.find({$and: [{ts: {$gte: timestamps[1]}}, {ts: {$lt: timestamps[10]}}]}) +res = oplog.find({$and: [{ts: {$gte: timestamps[51]}}, {ts: {$lt: timestamps[60]}}]}) .explain("executionStats"); assert.commandWorked(res); -assert.lte(res.executionStats.totalDocsExamined, 11, tojson(res)); +assert.lte(res.executionStats.totalDocsExamined, res.executionStats.nReturned + 2, res); collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN"); assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res)); -assert.eq(timestamps[10], longToTs(collScanStage.maxRecord), tojson(res)); +assert.eq(timestamps[60], longToTs(collScanStage.maxRecord), res); +assert.eq(timestamps[51], longToTs(collScanStage.minRecord), res); // An AND with an $lte predicate stops scanning after passing the max timestamp. -res = oplog.find({$and: [{ts: {$gte: timestamps[1]}}, {ts: {$lte: timestamps[10]}}]}) +res = oplog.find({$and: [{ts: {$gte: timestamps[51]}}, {ts: {$lte: timestamps[60]}}]}) .explain("executionStats"); assert.commandWorked(res); -assert.lte(res.executionStats.totalDocsExamined, 12, tojson(res)); +assert.lte(res.executionStats.totalDocsExamined, res.executionStats.nReturned + 2, res); collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN"); assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res)); -assert.eq(timestamps[10], longToTs(collScanStage.maxRecord), tojson(res)); +assert.eq(timestamps[60], longToTs(collScanStage.maxRecord), res); +assert.eq(timestamps[51], longToTs(collScanStage.minRecord), res); // The max timestamp is respected even when the min timestamp is smaller than the lowest // timestamp in the collection. -res = oplog.find({$and: [{ts: {$gte: timestamps[0]}}, {ts: {$lte: timestamps[10]}}]}) +res = oplog.find({$and: [{ns: collNs}, {ts: {$gte: timestamps[0]}}, {ts: {$lte: timestamps[10]}}]}) .explain("executionStats"); assert.commandWorked(res); collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN"); assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res)); -assert.eq(timestamps[10], longToTs(collScanStage.maxRecord), tojson(res)); +assert.eq(timestamps[10], longToTs(collScanStage.maxRecord), res); // An AND with redundant $eq/$lt/$lte predicates stops scanning after passing the max // timestamp. res = oplog .find({ $and: [ + {ns: collNs}, {ts: {$gte: timestamps[0]}}, {ts: {$lte: timestamps[10]}}, {ts: {$eq: timestamps[5]}}, @@ -141,37 +146,37 @@ assert.commandWorked(res); // We expect to be able to seek directly to the entry with a 'ts' of 5. collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN"); assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res)); -assert.eq(timestamps[5], longToTs(collScanStage.maxRecord), tojson(res)); -assert.eq(timestamps[5], longToTs(collScanStage.minRecord), tojson(res)); +assert.eq(timestamps[5], longToTs(collScanStage.maxRecord), res); +assert.eq(timestamps[5], longToTs(collScanStage.minRecord), res); // An $eq query for a non-existent timestamp scans a single oplog document. -res = oplog.find({ts: {$eq: makeTS(200)}}).explain("executionStats"); +res = oplog.find({ns: collNs, ts: {$eq: makeTS(200)}}).explain("executionStats"); assert.commandWorked(res); // We expect to be able to seek directly to the end of the oplog. collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN"); assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res)); -assert.eq(makeTS(200), longToTs(collScanStage.maxRecord), tojson(res)); +assert.eq(makeTS(200), longToTs(collScanStage.maxRecord), res); // When the filter matches the last document within the timestamp range, the collection scan // examines at most one more document. -res = oplog.find({$and: [{ts: {$gte: timestamps[4]}}, {ts: {$lte: timestamps[8]}}]}) +res = oplog.find({$and: [{ns: collNs}, {ts: {$gte: timestamps[4]}}, {ts: {$lte: timestamps[8]}}]}) .explain("executionStats"); assert.commandWorked(res); // We expect to be able to seek directly to the start of the 'ts' range. collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN"); assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res)); -assert.eq(timestamps[8], longToTs(collScanStage.maxRecord), tojson(res)); +assert.eq(timestamps[8], longToTs(collScanStage.maxRecord), res); // A filter with only an upper bound predicate on 'ts' stops scanning after // passing the max timestamp. -res = oplog.find({ts: {$lt: timestamps[4]}}).explain("executionStats"); +res = oplog.find({ns: collNs, ts: {$lt: timestamps[4]}}).explain("executionStats"); assert.commandWorked(res); collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN"); assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res)); -assert.eq(timestamps[4], longToTs(collScanStage.maxRecord), tojson(res)); +assert.eq(timestamps[4], longToTs(collScanStage.maxRecord), res); // Oplog replay optimization should work with projection. -res = oplog.find({ts: {$lte: timestamps[4]}}).projection({op: 0}); +res = oplog.find({ns: collNs, ts: {$lte: timestamps[4]}}).projection({op: 0}); while (res.hasNext()) { const next = res.next(); assert(!next.hasOwnProperty('op')); @@ -180,7 +185,7 @@ while (res.hasNext()) { res = res.explain("executionStats"); assert.commandWorked(res); -res = oplog.find({ts: {$gte: timestamps[90]}}).projection({'op': 0}); +res = oplog.find({ns: collNs, ts: {$gte: timestamps[90]}}).projection({'op': 0}); while (res.hasNext()) { const next = res.next(); assert(!next.hasOwnProperty('op')); @@ -190,7 +195,7 @@ res = res.explain("executionStats"); assert.commandWorked(res); // Oplog replay optimization should work with limit. -res = oplog.find({$and: [{ts: {$gte: timestamps[4]}}, {ts: {$lte: timestamps[8]}}]}) +res = oplog.find({$and: [{ns: collNs}, {ts: {$gte: timestamps[4]}}, {ts: {$lte: timestamps[8]}}]}) .limit(2) .explain("executionStats"); assert.commandWorked(res); @@ -200,7 +205,7 @@ assert.eq(2, collScanStage.nReturned, res); // A query over both 'ts' and '_id' should only pay attention to the 'ts' field for finding // the oplog start (SERVER-13566). -cursor = oplog.find({ts: {$gte: timestamps[20]}, "o._id": 25}); +cursor = oplog.find({ns: collNs, ts: {$gte: timestamps[20]}, "o._id": 25}); assert.eq(25, cursor.next().o["_id"]); assert(!cursor.hasNext()); @@ -221,11 +226,12 @@ assert.commandWorked(res); assert.eq(res.executionStats.totalDocsExamined, 100); // Ensure oplog replay hack does not work for backward scans. -res = oplog.find({ts: {$lt: timestamps[4]}}).sort({$natural: -1}).explain("executionStats"); +res = oplog.find({ns: collNs, ts: {$lt: timestamps[4]}}) + .sort({$natural: -1}) + .explain("executionStats"); assert.commandWorked(res); -assert.gte(res.executionStats.totalDocsExamined, 100, tojson(res)); +assert.gte(res.executionStats.totalDocsExamined, 100, res); collScanStage = getPlanStage(getWinningPlan(res.queryPlanner), "COLLSCAN"); assert.neq(null, collScanStage, "no collection scan found in explain output: " + tojson(res)); -replSet.stopSet(); -}()); +replSet.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/query_yields_catch_index_corruption.js b/jstests/noPassthrough/query_yields_catch_index_corruption.js index 1360641154116..b27b1e4bdb58c 100644 --- a/jstests/noPassthrough/query_yields_catch_index_corruption.js +++ b/jstests/noPassthrough/query_yields_catch_index_corruption.js @@ -1,7 +1,7 @@ // @tags: [ // requires_persistence, -// # TODO: SERVER-64007 Plans produced by Cascades don't yield -// cqf_incompatible, +// # TODO: SERVER-70446 Enable yielding for index plans in CQF. +// cqf_experimental_incompatible, // ] (function() { "use strict"; diff --git a/jstests/noPassthrough/quiet_shell.js b/jstests/noPassthrough/quiet_shell.js index 1f55d0e06f9ea..2ab7880587b93 100644 --- a/jstests/noPassthrough/quiet_shell.js +++ b/jstests/noPassthrough/quiet_shell.js @@ -23,4 +23,4 @@ clearRawMongoProgramOutput(); } MongoRunner.stopMongod(mongo); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/readConcern_atClusterTime.js b/jstests/noPassthrough/readConcern_atClusterTime.js index 37c644b8d353b..8b1d4c2061e9f 100644 --- a/jstests/noPassthrough/readConcern_atClusterTime.js +++ b/jstests/noPassthrough/readConcern_atClusterTime.js @@ -4,6 +4,8 @@ // requires_persistence, // uses_atclustertime, // uses_transactions, +// # Tests running with experimental CQF behavior require test commands to be enabled. +// cqf_experimental_incompatible, // ] function _getClusterTime(rst) { diff --git a/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js b/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js index 31b2e513b3154..ee9a1d9432c95 100644 --- a/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js +++ b/jstests/noPassthrough/readConcern_snapshot_mongos_enable_test_commands.js @@ -1,6 +1,7 @@ // Verifies that snapshot readConcern on mongos is not gated by the enableTestCommands flag. // -// @tags: [requires_sharding] +// Tests running with experimental CQF behavior require test commands to be enabled. +// @tags: [requires_sharding, cqf_experimental_incompatible] (function() { "use strict"; diff --git a/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js b/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js index 43d1bd4a8140a..94c79ec95b2af 100644 --- a/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js +++ b/jstests/noPassthrough/read_concern_snapshot_catalog_invalidation.js @@ -1,11 +1,7 @@ // Tests that snapshot reads return an error when accessing a collection whose metadata is invalid // for the snapshot's point in time. // @tags: [uses_transactions] -(function() { -"use strict"; - -load("jstests/libs/curop_helpers.js"); // For waitForCurOpByFailPoint(). -load("jstests/libs/feature_flag_util.js"); // For FeatureFlagUtil.isEnabled(). +load("jstests/libs/curop_helpers.js"); // For waitForCurOpByFailPoint(). const kDbName = "test"; const kCollName = "coll"; @@ -52,7 +48,6 @@ function testCommand(cmd, curOpFilter, expectSucceed) { // Execute command in parallel shell. Read commands should work even if catalog changes has // occured since opening the snapshot. - expectSucceed = expectSucceed && FeatureFlagUtil.isEnabled(testDB, "PointInTimeCatalogLookups"); const awaitCommand = execCommand(cmd, expectSucceed); waitForCurOpByFailPointNoNS(testDB, "hangAfterPreallocateSnapshot", curOpFilter); @@ -113,5 +108,4 @@ testCommand({update: kCollName, updates: [{q: {x: 1}, u: {$set: {x: 2}}}]}, {"command.update": kCollName, "command.readConcern.level": "snapshot"}, false /*write is expected to fail*/); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/read_majority.js b/jstests/noPassthrough/read_majority.js index 7b1e3867d5107..b9b8f9d69c92c 100644 --- a/jstests/noPassthrough/read_majority.js +++ b/jstests/noPassthrough/read_majority.js @@ -16,11 +16,7 @@ * ] */ -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/feature_flag_util.js"); - -(function() { -"use strict"; +import {getWinningPlan, isCollscan, isIxscan} from "jstests/libs/analyze_plan.js"; // Tests the functionality for committed reads for the given read concern level. function testReadConcernLevel(level) { @@ -51,7 +47,6 @@ function testReadConcernLevel(level) { // Point-in-time reads on a collection before it was created behaves like reading from a // non-existent collection. - assert(FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")); assert.commandWorked(res); assert(res.cursor.firstBatch.length == 0); } @@ -71,7 +66,6 @@ function testReadConcernLevel(level) { // Point-in-time reads on a collection before it was created behaves like reading from a // non-existent collection. - assert(FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")); assert.commandWorked(res); assert(res.cursor.firstBatch.length == 0); } @@ -217,42 +211,18 @@ function testReadConcernLevel(level) { assert.eq(cursor.next().version, 4); assert(!cursor.objsLeftInBatch()); - // Even though renaming advances the minimum visible snapshot, we're querying by a namespace - // that no longer exists. Because of this, the query surprisingly returns no results instead of - // timing out. This violates read-committed semantics but is allowed by the current - // specification. This is not the case for point-in-time reads as the collection instance is - // recreated internally to support reads at this time. + // Even though the collection is renamed, point-in-time reads reconstruct the prior collection + // internally. const tempNs = db.getName() + '.temp'; assert.commandWorked(db.adminCommand({renameCollection: t.getFullName(), to: tempNs})); - if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) { - assert.eq(getCursorForReadConcernLevel().itcount(), 10); - - // Snapshot is available. - assertSnapshotAvailableForReadConcernLevel(); - assertSnapshotAvailableForReadConcernLevelByUUID(collUuid); - } else { - assert.eq(getCursorForReadConcernLevel().itcount(), 0); - - // Trigger a getMore that should fail due to the rename. - let error = assert.throws(() => { - cursor.next(); - }); - assert.eq(error.code, ErrorCodes.QueryPlanKilled); - - // Starting a new query by UUID will block because the minimum visible timestamp is ahead of - // the majority-committed snapshot. - assertNoSnapshotAvailableForReadConcernLevelByUUID(collUuid); - } + assert.eq(getCursorForReadConcernLevel().itcount(), 10); + + // Snapshot is available. + assertSnapshotAvailableForReadConcernLevel(); + assertSnapshotAvailableForReadConcernLevelByUUID(collUuid); - // Renaming back will cause queries to block again because the original namespace exists, and - // its minimum visible timestamp is ahead of the current majority-committed snapshot when not - // using point-in-time reads. assert.commandWorked(db.adminCommand({renameCollection: tempNs, to: t.getFullName()})); - if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) { - assertSnapshotAvailableForReadConcernLevel(); - } else { - assertNoSnapshotAvailableForReadConcernLevel(); - } + assertSnapshotAvailableForReadConcernLevel(); newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name; assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot})); @@ -264,25 +234,11 @@ function testReadConcernLevel(level) { // violates strict read-committed semantics since we don't guarantee them on metadata // operations. t.drop(); - if (!FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) { - assert.eq(getCursorForReadConcernLevel().itcount(), 0); - assert.eq(getAggCursorForReadConcernLevel().itcount(), 0); - } - // Creating a new collection with the same name hides the collection until that operation is - // in the committed view when not using point-in-time reads. t.insert({_id: 0, version: 8}); - if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) { - assertSnapshotAvailableForReadConcernLevel(); - } else { - assertNoSnapshotAvailableForReadConcernLevel(); - } + assertSnapshotAvailableForReadConcernLevel(); newSnapshot = assert.commandWorked(db.adminCommand("makeSnapshot")).name; - if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) { - assertSnapshotAvailableForReadConcernLevel(); - } else { - assertNoSnapshotAvailableForReadConcernLevel(); - } + assertSnapshotAvailableForReadConcernLevel(); assert.commandWorked(db.adminCommand({"setCommittedSnapshot": newSnapshot})); assert.eq(getCursorForReadConcernLevel().itcount(), 1); assert.eq(getAggCursorForReadConcernLevel().itcount(), 1); @@ -312,4 +268,3 @@ MongoRunner.stopMongod(conn); if (supportsCommittedReads) { testReadConcernLevel("majority"); } -}()); diff --git a/jstests/noPassthrough/read_majority_reads.js b/jstests/noPassthrough/read_majority_reads.js index b5f9328b8ac45..b5b4ddf3603ae 100644 --- a/jstests/noPassthrough/read_majority_reads.js +++ b/jstests/noPassthrough/read_majority_reads.js @@ -19,8 +19,9 @@ (function() { 'use strict'; -// Skip metadata consistency check since the sharded clsuter is started with 0 shards +// Skip metadata consistency checks since the sharded cluster is started with 0 shards TestData.skipCheckMetadataConsistency = true; +TestData.skipCheckRoutingTableConsistency = true; var testServer = MongoRunner.runMongod(); var db = testServer.getDB("test"); diff --git a/jstests/noPassthrough/read_only_allow_disk_use.js b/jstests/noPassthrough/read_only_allow_disk_use.js index a6fc5ea4013ef..f7fb3c62da37e 100644 --- a/jstests/noPassthrough/read_only_allow_disk_use.js +++ b/jstests/noPassthrough/read_only_allow_disk_use.js @@ -9,11 +9,7 @@ * requires_replication * ] */ - -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const memoryLimitMb = 1; const memoryLimitBytes = 1 * 1024 * 1024; @@ -159,4 +155,3 @@ runTest(connRecoverStandalone, true); runTest(connRecoverStandalone, false); MongoRunner.stopMongod(connRecoverStandalone); -})(); \ No newline at end of file diff --git a/jstests/noPassthrough/read_ticket_exhaustion_with_stepdown.js b/jstests/noPassthrough/read_ticket_exhaustion_with_stepdown.js index 702ddd95c1186..535fa60f924cf 100644 --- a/jstests/noPassthrough/read_ticket_exhaustion_with_stepdown.js +++ b/jstests/noPassthrough/read_ticket_exhaustion_with_stepdown.js @@ -21,13 +21,13 @@ * arriving reads are serviced without deadlocking. * queuedLongReadsFunc - Issues long read commands until told to stop. * newLongReadsFunc - When told to begin, issues long read commands until told - to stop. + * to stop. * * Test Steps: * 0) Start ReplSet with special params: * - lower read ticket concurrency * - increase yielding - * 1) Insert 100 documents. + * 1) Insert 1000 documents. * 2) Kick off parallel readers that perform long collection scans, subject to yields. * 3) Sleep with global X Lock (including RSTL), thus queuing up reads. * 4) Signal new readers that will be received after the global lock is released. @@ -36,21 +36,26 @@ * <> * 6) Stop Readers. * - * @tags: [multiversion_incompatible] + * @tags: [ + * multiversion_incompatible, + * requires_replication, + * requires_wiredtiger, + * ] */ (function() { "use strict"; load("jstests/libs/parallel_shell_helpers.js"); +const kNumReadTickets = 5; const replTest = new ReplSetTest({ name: jsTestName(), nodes: 1, nodeOptions: { setParameter: { // This test seeks the minimum amount of concurrency to force ticket exhaustion. - storageEngineConcurrencyAdjustmentAlgorithm: "", - storageEngineConcurrentReadTransactions: 5, + storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions", + storageEngineConcurrentReadTransactions: kNumReadTickets, // Make yielding more common. internalQueryExecYieldPeriodMS: 1, internalQueryExecYieldIterations: 1 @@ -126,17 +131,11 @@ function runStepDown() { let stats = db.runCommand({serverStatus: 1}); jsTestLog(stats.locks); jsTestLog(stats.wiredTiger.concurrentTransactions); - const stepDownSecs = 5; - assert.commandWorked(primaryAdmin.runCommand({"replSetStepDown": stepDownSecs, "force": true})); - - // Wait until the primary transitioned to SECONDARY state. - replTest.waitForState(primary, ReplSetTest.State.SECONDARY); - - // Enforce the replSetStepDown timer. - sleep(stepDownSecs * 1000); - - replTest.waitForState(primary, ReplSetTest.State.PRIMARY); - replTest.getPrimary(); + // Force primary to step down, then unfreeze and allow it to step up. + assert.commandWorked( + primaryAdmin.runCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true})); + assert.commandWorked(primaryAdmin.runCommand({replSetFreeze: 0})); + return replTest.getPrimary(); } /****************************************************/ @@ -153,12 +152,12 @@ let primaryColl = db[collName]; let queuedReaders = []; let newReaders = []; -// 1) Insert 100 documents. -jsTestLog("Fill collection [" + dbName + "." + collName + "] with 100 docs"); -for (let i = 0; i < 100; i++) { +// 1) Insert 1000 documents. +jsTestLog("Fill collection [" + dbName + "." + collName + "] with 1000 docs"); +for (let i = 0; i < 1000; i++) { assert.commandWorked(primaryColl.insert({"x": i})); } -jsTestLog("100 inserts done"); +jsTestLog("1000 inserts done"); // 2) Kick off parallel readers that perform long collection scans, subject to yields. for (let i = 0; i < nQueuedReaders; i++) { @@ -192,9 +191,10 @@ assert.soon( () => db.getSiblingDB("admin") .aggregate([{$currentOp: {}}, {$match: {"command.aggregate": TestData.collName}}]) .toArray() - .length > 5, + .length >= kNumReadTickets, "Expected more readers than read tickets."); -runStepDown(); + +primary = runStepDown(); // 6) Stop Readers. jsTestLog("Stopping Readers"); diff --git a/jstests/noPassthrough/reconfig_restarts_collection_scan.js b/jstests/noPassthrough/reconfig_restarts_collection_scan.js index 4d38c2750cda6..9e6c0c638c3b1 100644 --- a/jstests/noPassthrough/reconfig_restarts_collection_scan.js +++ b/jstests/noPassthrough/reconfig_restarts_collection_scan.js @@ -74,4 +74,4 @@ awaitCreateIndex(); IndexBuildTest.assertIndexes(coll, 2, ["_id_", "a_1"]); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/resize_tickets.js b/jstests/noPassthrough/resize_tickets.js index b5d6f4af66f6f..5603ad01095b3 100644 --- a/jstests/noPassthrough/resize_tickets.js +++ b/jstests/noPassthrough/resize_tickets.js @@ -7,11 +7,9 @@ * requires_wiredtiger, * ] */ -(function() { -'use strict'; - -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; +jsTestLog("Start a replica set with execution control enabled by default"); let replTest = new ReplSetTest({ name: jsTestName(), nodes: 1, @@ -21,6 +19,7 @@ replTest.initiate(); let mongod = replTest.getPrimary(); // TODO (SERVER-67104): Remove the feature flag check. if (FeatureFlagUtil.isPresentAndEnabled(mongod, 'ExecutionControl')) { + // Users cannot manually adjust read/write tickets once execution control is enabled at startup. assert.commandFailedWithCode( mongod.adminCommand({setParameter: 1, wiredTigerConcurrentWriteTransactions: 10}), ErrorCodes.IllegalOperation); @@ -30,14 +29,48 @@ if (FeatureFlagUtil.isPresentAndEnabled(mongod, 'ExecutionControl')) { } replTest.stopSet(); +const gfixedConcurrentTransactions = "fixedConcurrentTransactions"; +jsTestLog("Start a replica set with execution control explicitly disabled on startup"); +replTest = new ReplSetTest({ + name: jsTestName(), + nodes: 1, + nodeOptions: { + // Users can opt out of execution control by specifying the 'fixedConcurrentTransactions' + // option on startup. + setParameter: {storageEngineConcurrencyAdjustmentAlgorithm: gfixedConcurrentTransactions} + }, +}); +replTest.startSet(); +replTest.initiate(); +mongod = replTest.getPrimary(); + +assert.commandWorked( + mongod.adminCommand({setParameter: 1, wiredTigerConcurrentWriteTransactions: 20})); +assert.commandWorked( + mongod.adminCommand({setParameter: 1, wiredTigerConcurrentReadTransactions: 20})); +replTest.stopSet(); + +jsTestLog("Start a replica set with execution control implicitly disabled on startup"); replTest = new ReplSetTest({ name: jsTestName(), nodes: 1, - nodeOptions: {setParameter: {storageEngineConcurrencyAdjustmentAlgorithm: ""}}, + nodeOptions: { + // If a user manually sets read/write tickets on startup, implicitly set the + // 'storageEngineConcurrencyAdjustmentAlgorithm' parameter to 'fixedConcurrentTransactions' + // and disable execution control. + setParameter: {wiredTigerConcurrentReadTransactions: 20} + }, }); replTest.startSet(); replTest.initiate(); mongod = replTest.getPrimary(); + +const getParameterResult = + mongod.adminCommand({getParameter: 1, storageEngineConcurrencyAdjustmentAlgorithm: 1}); +assert.commandWorked(getParameterResult); +assert.eq(getParameterResult.storageEngineConcurrencyAdjustmentAlgorithm, + gfixedConcurrentTransactions); + // The 20, 10, 30 sequence of ticket resizes are just arbitrary numbers in order to test a decrease // (20 -> 10) and an increase (10 -> 30) of tickets. assert.commandWorked( @@ -52,5 +85,4 @@ assert.commandWorked( mongod.adminCommand({setParameter: 1, wiredTigerConcurrentReadTransactions: 10})); assert.commandWorked( mongod.adminCommand({setParameter: 1, wiredTigerConcurrentReadTransactions: 30})); -replTest.stopSet(); -}()); +replTest.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/restart_index_build_if_resume_fails.js b/jstests/noPassthrough/restart_index_build_if_resume_fails.js index bcd1e3a50ce6d..5cce87d4f7d1a 100644 --- a/jstests/noPassthrough/restart_index_build_if_resume_fails.js +++ b/jstests/noPassthrough/restart_index_build_if_resume_fails.js @@ -8,12 +8,9 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const dbName = "test"; const collName = jsTestName(); @@ -107,5 +104,4 @@ if (columnstoreEnabled) { [{a: 24}, {a: 25}]); } -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js b/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js index 46f5fd8d80a2b..593c556eaafab 100644 --- a/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js +++ b/jstests/noPassthrough/restart_index_build_if_resume_interrupted_by_shutdown.js @@ -9,12 +9,9 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const dbName = "test"; const collName = jsTestName(); @@ -91,5 +88,4 @@ if (columnstoreEnabled) { }), [{a: 99}, {a: 100}]); } -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js b/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js index 147c4e4281ea8..cc34310beb07d 100644 --- a/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js +++ b/jstests/noPassthrough/resumable_index_build_bulk_load_phase.js @@ -8,12 +8,9 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const dbName = "test"; @@ -68,5 +65,4 @@ if (columnstoreEnabled) { runTests(testDocs, [{"$**": "columnstore"}, {b: 1}], "_columnstore"); } -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_index_build_bulk_load_phase_large.js b/jstests/noPassthrough/resumable_index_build_bulk_load_phase_large.js index 8cbdcb18268b1..86b171fdecb17 100644 --- a/jstests/noPassthrough/resumable_index_build_bulk_load_phase_large.js +++ b/jstests/noPassthrough/resumable_index_build_bulk_load_phase_large.js @@ -9,12 +9,9 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const dbName = "test"; @@ -57,5 +54,4 @@ if (columnstoreEnabled) { ["bulk load"], [{skippedPhaseLogID: 20391}]); } -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_index_build_clearing_tmp_directory_on_restart.js b/jstests/noPassthrough/resumable_index_build_clearing_tmp_directory_on_restart.js index c94db0b2ee7ea..58f6d49b5da81 100644 --- a/jstests/noPassthrough/resumable_index_build_clearing_tmp_directory_on_restart.js +++ b/jstests/noPassthrough/resumable_index_build_clearing_tmp_directory_on_restart.js @@ -8,12 +8,9 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const dbName = "test"; @@ -63,5 +60,4 @@ ResumableIndexBuildTest.run( const files = listFiles(tmpDir); assert.eq(files.length, 0, files); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js b/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js index 49ec48f5ced6a..54d1549a93833 100644 --- a/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js +++ b/jstests/noPassthrough/resumable_index_build_collection_scan_phase.js @@ -9,12 +9,9 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const dbName = "test"; @@ -64,5 +61,4 @@ if (columnstoreEnabled) { runTests([{a: 1, b: 1}, {a: 2, b: 2}], [{"$**": "columnstore"}, {b: 1}], "_columnstore"); } -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_index_build_collection_scan_phase_large.js b/jstests/noPassthrough/resumable_index_build_collection_scan_phase_large.js index d1dd867cb3ea0..c59ac0784620f 100644 --- a/jstests/noPassthrough/resumable_index_build_collection_scan_phase_large.js +++ b/jstests/noPassthrough/resumable_index_build_collection_scan_phase_large.js @@ -9,12 +9,9 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const dbName = "test"; @@ -70,5 +67,4 @@ if (columnstoreEnabled) { ["collection scan"], [{numScannedAfterResume: numDocuments - maxIndexBuildMemoryUsageMB}]); } -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js b/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js index 485abf0a758b2..070b6218c7a26 100644 --- a/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js +++ b/jstests/noPassthrough/resumable_index_build_drain_writes_phase.js @@ -8,11 +8,8 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const dbName = "test"; @@ -85,5 +82,4 @@ if (columnstoreEnabled) { }), "_subdocument_columnstore"); } -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js index 52def8c8bde8e..a365ba911a836 100644 --- a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js +++ b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_primary.js @@ -9,11 +9,8 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const dbName = "test"; const collName = jsTestName(); @@ -158,5 +155,4 @@ if (columnstoreEnabled) { [{a: 32}, {a: 33}]); } -rst.stopSet(); -})(); \ No newline at end of file +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js index cb859855d342f..a576fcecfa5a3 100644 --- a/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js +++ b/jstests/noPassthrough/resumable_index_build_drain_writes_phase_secondary.js @@ -12,11 +12,8 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const dbName = "test"; const collName = jsTestName(); @@ -163,5 +160,4 @@ if (columnstoreEnabled) { [{a: 28}, {a: 29}]); } -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_index_build_initialized.js b/jstests/noPassthrough/resumable_index_build_initialized.js index aa672b3260d27..dc771c7751d0a 100644 --- a/jstests/noPassthrough/resumable_index_build_initialized.js +++ b/jstests/noPassthrough/resumable_index_build_initialized.js @@ -9,12 +9,9 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const dbName = "test"; @@ -54,5 +51,4 @@ if (columnstoreEnabled) { runTests({foo: 1, b: 10}, [{"$**": "columnstore"}, {b: 1}], "_columnstore"); } -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_index_build_mixed_phases.js b/jstests/noPassthrough/resumable_index_build_mixed_phases.js index 463d481d5e21e..81212f59e42cc 100644 --- a/jstests/noPassthrough/resumable_index_build_mixed_phases.js +++ b/jstests/noPassthrough/resumable_index_build_mixed_phases.js @@ -8,12 +8,9 @@ * requires_replication, * ] */ -(function() { -"use strict"; - load("jstests/noPassthrough/libs/index_build.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const dbName = "test"; @@ -115,5 +112,4 @@ runTests( ], ["bulk load", "drain writes"], [{skippedPhaseLogID: 20391}, {skippedPhaseLogID: 20392}]); -rst.stopSet(); -})(); \ No newline at end of file +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/resumable_timeseries_index_build_collection_scan_phase.js b/jstests/noPassthrough/resumable_timeseries_index_build_collection_scan_phase.js index 749735e62f0cd..66f46afea909d 100644 --- a/jstests/noPassthrough/resumable_timeseries_index_build_collection_scan_phase.js +++ b/jstests/noPassthrough/resumable_timeseries_index_build_collection_scan_phase.js @@ -9,10 +9,7 @@ * requires_replication, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; load("jstests/noPassthrough/libs/index_build.js"); const rst = new ReplSetTest({nodes: 1}); @@ -59,4 +56,3 @@ ResumableIndexBuildTest.run( [{numScannedAfterResume: 2}]); rst.stopSet(); -})(); diff --git a/jstests/noPassthrough/retry_network_error_test.js b/jstests/noPassthrough/retry_network_error_test.js index e8fe4a78047f2..25651027a836e 100644 --- a/jstests/noPassthrough/retry_network_error_test.js +++ b/jstests/noPassthrough/retry_network_error_test.js @@ -43,4 +43,4 @@ try { jsTestLog("Caught exception after exhausting retries: " + e); } assert.eq(attempts, numRetries + 1); -}()); \ No newline at end of file +}()); diff --git a/jstests/noPassthrough/rolling_index_builds_interrupted.js b/jstests/noPassthrough/rolling_index_builds_interrupted.js index d98bc5cdeb94e..86a966907ce47 100644 --- a/jstests/noPassthrough/rolling_index_builds_interrupted.js +++ b/jstests/noPassthrough/rolling_index_builds_interrupted.js @@ -50,7 +50,6 @@ IndexBuildTest.buildIndexOnNodeAsStandalone( replTest.awaitNodesAgreeOnPrimary( replTest.kDefaultTimeoutMS, replTest.nodes, replTest.getNodeId(primary)); -// TODO(SERVER-71768): fix the index build stall. jsTestLog('Build index on the primary as part of the replica set: ' + primary.host); let createIdx = IndexBuildTest.startIndexBuild( primary, primaryColl.getFullName(), {x: 1}, {name: 'x_1'}, [ErrorCodes.Interrupted]); @@ -81,7 +80,6 @@ assert.commandWorked(primaryDB.killOp(opId)); createIdx(); -// TODO(SERVER-71768): Check dbHash. TestData.skipCheckDBHashes = true; replTest.stopSet(); }()); diff --git a/jstests/noPassthrough/router_transactions_metrics.js b/jstests/noPassthrough/router_transactions_metrics.js index 21677c30dfa08..77bf9005b6b6a 100644 --- a/jstests/noPassthrough/router_transactions_metrics.js +++ b/jstests/noPassthrough/router_transactions_metrics.js @@ -1,7 +1,6 @@ // Tests multi-statement transactions metrics in the serverStatus output from mongos in various // basic cases. // @tags: [ -// requires_fcv_70, // uses_multi_shard_transaction, // uses_transactions, // ] diff --git a/jstests/noPassthrough/sample_pushdown_transaction.js b/jstests/noPassthrough/sample_pushdown_transaction.js index 21a85f5da1b6e..8ae60aa0dddc9 100644 --- a/jstests/noPassthrough/sample_pushdown_transaction.js +++ b/jstests/noPassthrough/sample_pushdown_transaction.js @@ -5,10 +5,7 @@ * Requires random cursor support. * @tags: [requires_replication] */ -(function() { -'use strict'; - -load('jstests/libs/analyze_plan.js'); // For planHasStage. +import {aggPlanHasStage} from "jstests/libs/analyze_plan.js"; // Set up. const rst = new ReplSetTest({nodes: 1}); @@ -48,5 +45,4 @@ assert.gt(randDocs.length, 0, tojson(randDocs)); // Clean up. assert.commandWorked(session.abortTransaction_forTesting()); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/sample_pushdown_with_set_cluster_param.js b/jstests/noPassthrough/sample_pushdown_with_set_cluster_param.js index 5af0867d12446..65359760c5dbb 100644 --- a/jstests/noPassthrough/sample_pushdown_with_set_cluster_param.js +++ b/jstests/noPassthrough/sample_pushdown_with_set_cluster_param.js @@ -5,10 +5,7 @@ * Requires random cursor support. * @tags: [requires_replication] */ -(function() { -'use strict'; - -load('jstests/libs/analyze_plan.js'); // For planHasStage. +import {aggPlanHasStage} from "jstests/libs/analyze_plan.js"; const numDocs = 1000; const sampleSize = numDocs * .06; @@ -99,5 +96,4 @@ const pipeline = [{$sample: {size: sampleSize}}, {$match: {a: {$gte: 0}}}]; })(); // // Clean up. -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/sbe_multiplanner_trial_termination.js b/jstests/noPassthrough/sbe_multiplanner_trial_termination.js index 0be140981edcd..3c147be29c441 100644 --- a/jstests/noPassthrough/sbe_multiplanner_trial_termination.js +++ b/jstests/noPassthrough/sbe_multiplanner_trial_termination.js @@ -3,10 +3,7 @@ * demonstrates that unlike the classic multiplanner, the SBE multiplanner's end condition is by * default not proportional to the size of the collection. */ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const numDocs = 1000; const dbName = "sbe_multiplanner_db"; @@ -28,7 +25,7 @@ const db = conn.getDB(dbName); if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE is not enabled"); MongoRunner.stopMongod(conn); - return; + quit(); } const coll = db[collName]; @@ -119,5 +116,4 @@ assert.commandWorked(db.adminCommand({setParameter: 1, [collFracKnobSbe]: defaul allPlans = getAllPlansExecution("2"); verifySbeNumReads(allPlans, trialLengthFromCollFrac); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/sbe_plan_cache_api_version.js b/jstests/noPassthrough/sbe_plan_cache_api_version.js index 2a3a0d184e3fa..431cb01c345e7 100644 --- a/jstests/noPassthrough/sbe_plan_cache_api_version.js +++ b/jstests/noPassthrough/sbe_plan_cache_api_version.js @@ -7,11 +7,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/sbe_util.js"); +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod({}); assert.neq(conn, null, "mongod failed to start"); @@ -145,5 +141,4 @@ for (const testcase of testcases) { }); } -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js b/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js index 09b94049c97dc..622fc4d1c99a0 100644 --- a/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js +++ b/jstests/noPassthrough/sbe_plan_cache_clear_on_param_change.js @@ -3,14 +3,11 @@ * cache is cleared. * @tags: [ * # TODO SERVER-67607: Test plan cache with CQF enabled. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/sbe_util.js"); +import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; // Lists the names of the setParameters which should result in the SBE plan cache being cleared when // the parameter is modified. Along with each parameter, includes a valid new value of the parameter @@ -48,6 +45,15 @@ const paramList = [ {name: "internalQueryColumnScanMinAvgDocSizeBytes", value: 2048}, {name: "internalQueryColumnScanMinCollectionSizeBytes", value: 2048}, {name: "internalQueryColumnScanMinNumColumnFilters", value: 5}, + {name: "internalQueryCardinalityEstimatorMode", value: "sampling"}, + {name: "internalCascadesOptimizerDisableScan", value: true}, + {name: "internalCascadesOptimizerDisableIndexes", value: true}, + {name: "internalCascadesOptimizerDisableMergeJoinRIDIntersect", value: true}, + {name: "internalCascadesOptimizerDisableHashJoinRIDIntersect", value: true}, + {name: "internalCascadesOptimizerDisableGroupByAndUnionRIDIntersect", value: true}, + {name: "internalCascadesOptimizerFastIndexNullHandling", value: true}, + {name: "internalCascadesOptimizerMinIndexEqPrefixes", value: 2}, + {name: "internalCascadesOptimizerMaxIndexEqPrefixes", value: 2}, ]; const conn = MongoRunner.runMongod(); @@ -60,7 +66,7 @@ const db = conn.getDB(dbName); if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE is not enabled"); MongoRunner.stopMongod(conn); - return; + quit(); } assert.commandWorked(db.dropDatabase()); @@ -106,4 +112,3 @@ for (let param of paramList) { } MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/sbe_plan_cache_key_reporting.js b/jstests/noPassthrough/sbe_plan_cache_key_reporting.js index 0cf0546a6bd18..06e68b4286103 100644 --- a/jstests/noPassthrough/sbe_plan_cache_key_reporting.js +++ b/jstests/noPassthrough/sbe_plan_cache_key_reporting.js @@ -7,13 +7,10 @@ * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js"; load("jstests/libs/log.js"); load("jstests/libs/profiler.js"); -load("jstests/libs/sbe_util.js"); +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod({}); assert.neq(conn, null, "mongod failed to start"); @@ -23,7 +20,7 @@ const coll = db.coll; if (!checkSBEEnabled(db)) { jsTest.log("Skipping test because SBE is not enabled"); MongoRunner.stopMongod(conn); - return; + quit(); } assert.commandWorked(db.createCollection(coll.getName())); @@ -224,4 +221,3 @@ function assertQueryHashAndPlanCacheKey(sbe, classic) { })(); MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js b/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js index d07a44560026d..3194fa69e88b7 100644 --- a/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js +++ b/jstests/noPassthrough/sbe_plan_cache_memory_debug_info.js @@ -6,14 +6,11 @@ * stripping debug info even though the size of the classic cache may be below the threshold. * @tags: [ * # TODO SERVER-67607: Test plan cache with CQF enabled. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod({}); assert.neq(conn, null, "mongod failed to start"); @@ -22,7 +19,7 @@ const db = conn.getDB("sbe_plan_cache_memory_debug_info"); if (!checkSBEEnabled(db)) { jsTest.log("Skipping test because SBE is not enabled"); MongoRunner.stopMongod(conn); - return; + quit(); } function createTestCollection(collectionName) { @@ -103,4 +100,3 @@ assert.eq(0, classicColl.find({a: 2, b: 4}).itcount()); assertCacheEntryIsMissingDebugInfo(classicColl, {a: 2, b: 4}); MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/sbe_plan_cache_size_metric.js b/jstests/noPassthrough/sbe_plan_cache_size_metric.js index 102a971727097..9692e6864f45e 100644 --- a/jstests/noPassthrough/sbe_plan_cache_size_metric.js +++ b/jstests/noPassthrough/sbe_plan_cache_size_metric.js @@ -9,15 +9,12 @@ * assumes_balancer_off, * does_not_support_stepdowns, * # TODO SERVER-67607: Test plan cache with CQF enabled. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'. -load("jstests/libs/analyze_plan.js"); // For 'getQueryHashFromExplain()'. +import {getQueryHashFromExplain} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod(); assert.neq(conn, null, "mongod failed to start"); @@ -26,7 +23,7 @@ const db = conn.getDB("sbe_plan_cache_size_metric"); if (!checkSBEEnabled(db)) { jsTest.log("Skipping test because SBE is not enabled"); MongoRunner.stopMongod(conn); - return; + quit(); } function getCacheEntriesByQueryHashKey(coll, queryHash) { @@ -108,5 +105,4 @@ assert.commandWorked(db.runCommand({planCacheClear: collectionName, query: sbeQu // Assert metric is decremented back to initial value. assert.eq(initialPlanCacheSize, getPlanCacheSize()); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/server_parameter_fcv_upgrade_downgrade.js b/jstests/noPassthrough/server_parameter_fcv_upgrade_downgrade.js index c1fef519d4acd..8869b0dbb4fc3 100644 --- a/jstests/noPassthrough/server_parameter_fcv_upgrade_downgrade.js +++ b/jstests/noPassthrough/server_parameter_fcv_upgrade_downgrade.js @@ -1,9 +1,6 @@ // Test server parameter behavior upon FCV downgrade/upgrade. -(function() { -'use strict'; - -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; function assertParamExistenceInGetParamStar(output, param, expected) { if (output.hasOwnProperty('clusterParameters')) { @@ -193,5 +190,4 @@ function runDowngradeUpgradeTestForCWSP(conn, isMongod, isStandalone, verifyStat s.s0, false /* isMongod */, false /* isStandalone */, verifyParameterState); s.stop(); jsTest.log('END sharding'); -} -})(); +} \ No newline at end of file diff --git a/jstests/noPassthrough/server_status_change_stream_metrics.js b/jstests/noPassthrough/server_status_change_stream_metrics.js new file mode 100644 index 0000000000000..fd9feb89a1d3e --- /dev/null +++ b/jstests/noPassthrough/server_status_change_stream_metrics.js @@ -0,0 +1,42 @@ +/** + * Tests for serverStatus metrics about change streams. + */ +(function() { +"use strict"; + +function getChangeStreamMetrics(db) { + const metrics = db.serverStatus().metrics; + return { + total: metrics.aggStageCounters["$changeStream"], + withExpandedEvents: metrics.changeStreams.showExpandedEvents, + }; +} + +function checkChangeStreamMetrics(db, expectedTotal, expectedWithExpandedEvents) { + const metrics = getChangeStreamMetrics(db); + assert.eq(expectedTotal, metrics.total); + assert.eq(expectedWithExpandedEvents, metrics.withExpandedEvents); +} + +const rst = new ReplSetTest({name: jsTest.name(), nodes: 1}); +rst.startSet(); +rst.initiate(); +const db = rst.getPrimary().getDB(jsTest.name()); +const coll = db.getCollection(jsTest.name()); + +checkChangeStreamMetrics(db, 0, 0); + +db.coll.aggregate([{$changeStream: {}}]); +checkChangeStreamMetrics(db, 1, 0); + +db.coll.aggregate([{$changeStream: {showExpandedEvents: true}}]); +checkChangeStreamMetrics(db, 2, 1); + +db.coll.explain().aggregate([{$changeStream: {}}]); +checkChangeStreamMetrics(db, 3, 1); + +db.coll.explain().aggregate([{$changeStream: {showExpandedEvents: true}}]); +checkChangeStreamMetrics(db, 4, 2); + +rst.stopSet(); +}()); diff --git a/jstests/noPassthrough/server_status_metrics_hello_command.js b/jstests/noPassthrough/server_status_metrics_hello_command.js index 8aacfedae2f06..1e06f8f1060f1 100644 --- a/jstests/noPassthrough/server_status_metrics_hello_command.js +++ b/jstests/noPassthrough/server_status_metrics_hello_command.js @@ -8,37 +8,40 @@ const mongod = MongoRunner.runMongod(); const dbName = "server_status_metrics_hello_command"; const db = mongod.getDB(dbName); let serverStatusMetrics = db.serverStatus().metrics.commands; -const initialIsMasterTotal = serverStatusMetrics.isMaster.total; -const initialHelloTotal = 0; + +function getCommandCount(cmdName) { + return serverStatusMetrics.hasOwnProperty(cmdName) ? serverStatusMetrics[cmdName].total : 0; +} + +let currentIsMasterTotal = getCommandCount("isMaster"); +let currentHelloTotal = getCommandCount("hello"); // Running hello command. jsTestLog("Running hello command"); assert.commandWorked(db.runCommand({hello: 1})); serverStatusMetrics = db.serverStatus().metrics.commands; +assert.eq(getCommandCount("hello"), currentHelloTotal + 1, "commands.hello should increment"); +++currentHelloTotal; assert.eq( - serverStatusMetrics.hello.total, initialHelloTotal + 1, "commands.hello should increment"); -assert.eq(serverStatusMetrics.isMaster.total, - initialIsMasterTotal, - "commands.isMaster should not increment"); + getCommandCount("isMaster"), currentIsMasterTotal, "commands.isMaster should not increment"); // Running isMaster command. jsTestLog("Running isMaster command"); assert.commandWorked(db.runCommand({isMaster: 1})); serverStatusMetrics = db.serverStatus().metrics.commands; +assert.eq(getCommandCount("hello"), currentHelloTotal, "commands.hello should not increment"); assert.eq( - serverStatusMetrics.hello.total, initialHelloTotal + 1, "commands.hello should not increment"); -assert.eq(serverStatusMetrics.isMaster.total, - initialIsMasterTotal + 1, - "commands.isMaster should increment"); + getCommandCount("isMaster"), currentIsMasterTotal + 1, "commands.isMaster should increment"); +++currentIsMasterTotal; // Running ismaster command. jsTestLog("Running ismaster command"); assert.commandWorked(db.runCommand({ismaster: 1})); serverStatusMetrics = db.serverStatus().metrics.commands; +assert.eq(getCommandCount("hello"), currentHelloTotal, "commands.hello should not increment"); assert.eq( - serverStatusMetrics.hello.total, initialHelloTotal + 1, "commands.hello should not increment"); -assert.eq(serverStatusMetrics.isMaster.total, - initialIsMasterTotal + 2, - "commands.isMaster should increment"); + getCommandCount("isMaster"), currentIsMasterTotal + 1, "commands.isMaster should increment"); +++currentIsMasterTotal; + MongoRunner.stopMongod(mongod); })(); diff --git a/jstests/noPassthrough/server_status_multiplanner.js b/jstests/noPassthrough/server_status_multiplanner.js index db4c528f031ff..0f06a32ba37b6 100644 --- a/jstests/noPassthrough/server_status_multiplanner.js +++ b/jstests/noPassthrough/server_status_multiplanner.js @@ -1,9 +1,6 @@ /** * Tests the serverStatus and FTDC metrics for multi planner execution (both classic and SBE). */ -(function() { -"use strict"; - function sumHistogramBucketCounts(histogram) { let sum = 0; for (const bucket of histogram) { @@ -13,7 +10,7 @@ function sumHistogramBucketCounts(histogram) { } load("jstests/libs/ftdc.js"); -load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled()'. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const collName = jsTestName(); const dbName = jsTestName(); @@ -27,7 +24,7 @@ const db = conn.getDB(dbName); if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE is not enabled"); MongoRunner.stopMongod(conn); - return; + quit(); } let coll = db.getCollection(collName); @@ -109,5 +106,4 @@ assert.soon(() => { return true; }, "FTDC output should eventually reflect observed serverStatus metrics."); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/server_transaction_metrics_secondary.js b/jstests/noPassthrough/server_transaction_metrics_secondary.js index a6a03f760f5c0..de936fb4ee449 100644 --- a/jstests/noPassthrough/server_transaction_metrics_secondary.js +++ b/jstests/noPassthrough/server_transaction_metrics_secondary.js @@ -1,6 +1,8 @@ // Test that transactions run on secondaries do not change the serverStatus transaction metrics. // @tags: [ // uses_transactions, +// # Tests running with experimental CQF behavior require test commands to be enabled. +// cqf_experimental_incompatible, // ] (function() { "use strict"; diff --git a/jstests/noPassthrough/setparameter_config_alias_not_overwritten_by_default.js b/jstests/noPassthrough/setparameter_config_alias_not_overwritten_by_default.js new file mode 100644 index 0000000000000..9275e18f12d32 --- /dev/null +++ b/jstests/noPassthrough/setparameter_config_alias_not_overwritten_by_default.js @@ -0,0 +1,34 @@ +// Verify setParameters paramaters which are an alias to a config parameter do not have the value +// passed with setParameter as a startup argument overwritten by the config default. + +(function() { +'use strict'; + +const defaultsConn = MongoRunner.runMongod(); +function getDefaultValue(parameterName) { + const res = + assert.commandWorked(defaultsConn.adminCommand({getParameter: 1, [parameterName]: 1})); + return res[parameterName]; +} + +let paramsDict = {}; +const parameters = ['journalCommitInterval', 'syncdelay']; +parameters.forEach(param => { + const defaultValue = getDefaultValue(param); + const setValue = defaultValue + 1; + paramsDict[param] = setValue; +}); +MongoRunner.stopMongod(defaultsConn); + +function runTestOnConn(conn, setParams) { + Object.keys(setParams).forEach(param => { + const res = assert.commandWorked(conn.adminCommand({getParameter: 1, [param]: 1})); + assert.eq(res[param], setParams[param]); + }); +} + +// Run the test on a standalone mongod. +const standaloneConn = MongoRunner.runMongod({setParameter: paramsDict}); +runTestOnConn(standaloneConn, paramsDict); +MongoRunner.stopMongod(standaloneConn); +}()); diff --git a/jstests/noPassthrough/shard_filtering.js b/jstests/noPassthrough/shard_filtering.js index 0883b2365d7ce..eb0beca6abc1c 100644 --- a/jstests/noPassthrough/shard_filtering.js +++ b/jstests/noPassthrough/shard_filtering.js @@ -5,13 +5,10 @@ * @tags: [ * requires_sharding, * # TODO SERVER-71169: Implement shard filtering for CQF. - * cqf_incompatible, + * cqf_experimental_incompatible, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {isIndexOnly, isIxscan, planHasStage} from "jstests/libs/analyze_plan.js"; // Deliberately inserts orphans outside of migration. TestData.skipCheckOrphans = true; @@ -149,5 +146,4 @@ assert.sameMembers( mongosColl.find({$or: [{a: 0, b: 0}, {a: 25, b: 0}]}, {_id: 0, a: 1, b: 1}).toArray(), [{a: 0, b: 0}]); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/noPassthrough/shard_router_handle_staleconfig.js b/jstests/noPassthrough/shard_router_handle_staleconfig.js new file mode 100644 index 0000000000000..3804afa4b6cd7 --- /dev/null +++ b/jstests/noPassthrough/shard_router_handle_staleconfig.js @@ -0,0 +1,68 @@ +/** + * Tests that mongos can detect stale routing information before checking for UUID mismatches and + * redirect the request to the appropriate shard. + * + * @tags: [requires_sharding] + */ +(function() { +"use strict"; +const st = new ShardingTest({shards: 2, mongos: 2}); +const dbName = "db"; + +function checkCommand(cmd, collName, withinTransaction) { + const db = st.getDB(dbName); + const coll = db[collName]; + coll.drop(); + + // Create a sharded collection and move it to the secondary shard. + assert.commandWorked(st.s0.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}})); + const nonPrimaryShard = st.getOther(st.getPrimaryShard(dbName)).name; + assert.commandWorked(st.s0.adminCommand( + {moveChunk: `${dbName}.${collName}`, find: {a: 0}, to: nonPrimaryShard})); + // We now proceed to insert one document on each mongos connection. This will register cache + // information about where to route the requests to that particular shard key. + let i = 0; + st.forEachMongos(mongos => { + mongos.getDB(dbName)[collName].insert({a: 0, x: i++}); + }); + + let session; + if (withinTransaction) { + session = st.s1.getDB(dbName).getMongo().startSession(); + session.startTransaction({readConcern: {level: "snapshot"}}); + } + // Drop and recreate the collection on the primary shard. Now the collection resides on the + // primary shard rather than the secondary. Note that we are only doing this in one mongos so + // that the other one has stale information. + const sDb = st.s0.getDB(dbName); + assert.commandWorked(sDb.runCommand({drop: coll.getName()})); + assert.commandWorked(sDb.adminCommand({shardCollection: coll.getFullName(), key: {a: 1}})); + const newUuid = sDb.getCollectionInfos({name: coll.getName()})[0].info.uuid; + + // Proceed to make a request on the other mongos for the new collection. We expect this request + // to get sent to the wrong shard as the router is stale. mongos should detect this and retry + // the request with the correct shard. No exception should be passed to the user in this case. + if (withinTransaction) { + const sessionColl = session.getDatabase(dbName).getCollection(collName); + assert.commandWorked(sessionColl.runCommand(Object.extend(cmd, {collectionUUID: newUuid}))); + session.commitTransaction(); + } else { + assert.commandWorked(st.s1.getDB(dbName)[collName].runCommand( + Object.extend(cmd, {collectionUUID: newUuid}))); + } +} + +let collName = jsTestName() + "_find"; +checkCommand({find: collName, filter: {}}, collName, false); +checkCommand({find: collName, filter: {}}, collName, true); +collName = jsTestName() + "_insert"; +checkCommand({insert: collName, documents: [{x: 1}]}, collName, false); +checkCommand({insert: collName, documents: [{x: 1}]}, collName, true); +collName = jsTestName() + "_agg"; +checkCommand( + {aggregate: collName, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}}, collName, false); +checkCommand( + {aggregate: collName, pipeline: [{$match: {x: 1}}], cursor: {batchSize: 10}}, collName, true); + +st.stop(); +})(); diff --git a/jstests/noPassthrough/sharded_distinct.js b/jstests/noPassthrough/sharded_distinct.js index 4b50586e6fbf6..c57804cc20969 100644 --- a/jstests/noPassthrough/sharded_distinct.js +++ b/jstests/noPassthrough/sharded_distinct.js @@ -20,4 +20,4 @@ assert.commandFailed(coll.runCommand("distinct", {help: helpFn, foo: 1})); assert.commandFailed(coll.runCommand( {explain: {distinct: coll.getName(), help: helpFn, foo: 1}, verbosity: 'queryPlanner'})); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/sharded_index_projection_verbatim_persistence.js b/jstests/noPassthrough/sharded_index_projection_verbatim_persistence.js index 042916e6f8061..de765e9df26ca 100644 --- a/jstests/noPassthrough/sharded_index_projection_verbatim_persistence.js +++ b/jstests/noPassthrough/sharded_index_projection_verbatim_persistence.js @@ -9,10 +9,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; const st = new ShardingTest({shards: 3, rs: {nodes: 1}}); const dbName = "test"; @@ -75,5 +72,4 @@ if (setUpServerForColumnStoreIndexTest(st.s.getDB(dbName))) { assert.eq(catEntry.columnstoreProjection, kProjectionDoc, shardCatalogs); } } -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/noPassthrough/sharded_timeseries_bucketing_parameters_downgrade.js b/jstests/noPassthrough/sharded_timeseries_bucketing_parameters_downgrade.js deleted file mode 100644 index 0e1b77aff6259..0000000000000 --- a/jstests/noPassthrough/sharded_timeseries_bucketing_parameters_downgrade.js +++ /dev/null @@ -1,124 +0,0 @@ -/** - * Tests that bucketing parameters are disallowed after downgrading to versions where the parameters - * are not supported. - */ -(function() { -'use strict'; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/catalog_shard_util.js"); -load("jstests/libs/fail_point_util.js"); - -const dbName = 'testDB'; -const collName = 'testColl'; -const timeField = 'tm'; -const metaField = 'mt'; - -const st = new ShardingTest({shards: 2}); -const mongos = st.s0; - -function useBucketingParametersOnLowerFCV() { - const db = mongos.getDB(dbName); - if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db)) { - jsTestLog( - "Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled."); - return; - } - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - - const isCatalogShardEnabled = CatalogShardUtil.isEnabledIgnoringFCV(st); - - let coll = db.getCollection(collName); - coll.drop(); - assert.commandWorked(db.createCollection(collName, { - timeseries: { - timeField: timeField, - metaField: metaField, - bucketMaxSpanSeconds: 60, - bucketRoundingSeconds: 60 - } - })); - - const configDirectDb = st.configRS.getPrimary().getDB(dbName); - const configDirectColl = configDirectDb.getCollection(collName); - if (isCatalogShardEnabled) { - // Verify we cannot downgrade if the config server has a timeseries collection with - // bucketing. - assert.commandWorked(configDirectDb.createCollection(collName, { - timeseries: { - timeField: timeField, - metaField: metaField, - bucketMaxSpanSeconds: 60, - bucketRoundingSeconds: 60 - } - })); - } - - // On the latestFCV, we should not be able to use collMod with incomplete bucketing parameters. - assert.commandFailedWithCode( - db.runCommand({collMod: collName, timeseries: {bucketMaxSpanSeconds: 3600}}), - ErrorCodes.InvalidOptions); - - // We should fail to downgrade if we have a collection with custom bucketing parameters set. - assert.commandFailedWithCode(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}), - ErrorCodes.CannotDowngrade); - - coll = db.getCollection(collName); - coll.drop(); - - if (isCatalogShardEnabled) { - // We should still fail to downgrade if we have a collection on the config server with - // custom bucketing parameters set. - assert.commandFailedWithCode( - mongos.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}), - ErrorCodes.CannotDowngrade); - - configDirectColl.drop(); - } - - // Successfully downgrade to latest FCV. - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); - - // On the latest FCV, we should not be able to create a collection with bucketing parameters. - assert.commandFailedWithCode(db.createCollection(collName, { - timeseries: { - timeField: timeField, - metaField: metaField, - bucketMaxSpanSeconds: 60, - bucketRoundingSeconds: 60 - } - }), - ErrorCodes.InvalidOptions); - - assert.commandWorked( - db.createCollection(collName, {timeseries: {timeField: timeField, metaField: metaField}})); - - // On the latest FCV we should not be able to use collMod with the bucketing parameters. - assert.commandFailedWithCode(db.runCommand({ - collMod: collName, - timeseries: {bucketMaxSpanSeconds: 3600, bucketRoundingSeconds: 3600} - }), - ErrorCodes.InvalidOptions); - assert.commandFailedWithCode( - db.runCommand({collMod: collName, timeseries: {bucketMaxSpanSeconds: 3600}}), - ErrorCodes.InvalidOptions); - assert.commandFailedWithCode( - db.runCommand({collMod: collName, timeseries: {bucketRoundingSeconds: 3600}}), - ErrorCodes.InvalidOptions); - - // Verify the time-series options are valid. - let collections = assert.commandWorked(db.runCommand({listCollections: 1})).cursor.firstBatch; - let collectionEntry = collections.find(entry => entry.name === 'system.buckets.' + collName); - assert(collectionEntry); - - assert.eq(collectionEntry.options.timeseries.granularity, "seconds"); - // Downgrading does not remove the 'bucketMaxSpanSeconds' parameter. It should correspond with - // the "seconds" granularity. - assert.eq(collectionEntry.options.timeseries.bucketMaxSpanSeconds, 3600); - assert.isnull(collectionEntry.options.timeseries.bucketRoundingSeconds); -} - -useBucketingParametersOnLowerFCV(); - -st.stop(); -})(); diff --git a/jstests/noPassthrough/shell_bson_obj_to_array.js b/jstests/noPassthrough/shell_bson_obj_to_array.js index bc49396c27bb2..af3a7b1f77f01 100644 --- a/jstests/noPassthrough/shell_bson_obj_to_array.js +++ b/jstests/noPassthrough/shell_bson_obj_to_array.js @@ -30,4 +30,4 @@ tests.forEach((test) => { }); MongoRunner.stopMongod(conn); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/shell_can_use_read_concern.js b/jstests/noPassthrough/shell_can_use_read_concern.js index 8f092efcf25ec..3d1ae1214f60d 100644 --- a/jstests/noPassthrough/shell_can_use_read_concern.js +++ b/jstests/noPassthrough/shell_can_use_read_concern.js @@ -202,4 +202,4 @@ runTests({withSession: false}); runTests({withSession: true}); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/shutdown_with_fsync.js b/jstests/noPassthrough/shutdown_with_fsync.js new file mode 100644 index 0000000000000..42bd97311b2ec --- /dev/null +++ b/jstests/noPassthrough/shutdown_with_fsync.js @@ -0,0 +1,29 @@ +/** + * Tests that shutdown can succeed even if the server is fsync locked. + */ + +(function() { +"use strict"; + +const conn = MongoRunner.runMongod(); +assert.neq(conn, null); + +const dbName = jsTestName(); +const collName = "testColl"; +const testDB = conn.getDB(dbName); +const testColl = testDB.getCollection(collName); + +jsTestLog("Insert some data to create a collection."); +assert.commandWorked(testColl.insert({x: 1})); + +jsTestLog("Set fsync lock to block server writes. Create some nesting for extra test coverage"); +testDB.fsyncLock(); +testDB.fsyncLock(); + +jsTestLog("Check that the fsync lock is working: no writes should be possible."); +assert.commandFailed(testDB.runCommand({insert: {z: 1}, maxTimeMS: 30})); + +jsTestLog("Check that shutdown can succeed with an fsync lock: the fsync lock should be cleared."); +// Skipping validation because the fsync lock causes the validate command to hang. +MongoRunner.stopMongod(conn, null, {skipValidation: true}); +}()); diff --git a/jstests/noPassthrough/slow_query_log_stats_not_block_on_RSTL.js b/jstests/noPassthrough/slow_query_log_stats_not_block_on_RSTL.js new file mode 100644 index 0000000000000..1065962b072a7 --- /dev/null +++ b/jstests/noPassthrough/slow_query_log_stats_not_block_on_RSTL.js @@ -0,0 +1,61 @@ +/** + * Tests that storage stats reporting on slow query logging does acquire the RSTL. + * + * @tags: [ + * requires_replication, + * ] + */ +(function() { +"use strict"; + +load("jstests/libs/parallel_shell_helpers.js"); // startParallelShell +load("jstests/libs/wait_for_command.js"); // waitForCommand + +const rst = new ReplSetTest({nodes: 1}); +rst.startSet(); +rst.initiate(); +const testDB = rst.getPrimary().getDB("test"); +const testCollection = testDB.getCollection("c"); + +const fieldValue = "slow query logging reporting storage statistics"; + +assert.commandWorked(testCollection.insertOne({a: fieldValue})); + +jsTestLog("Starting the sleep command in a parallel thread to take the RSTL MODE_X lock"); +let rstlXLockSleepJoin = startParallelShell(() => { + jsTestLog("Parallel Shell: about to start sleep command"); + assert.commandFailedWithCode(db.adminCommand({ + sleep: 1, + secs: 60 * 60, + // RSTL MODE_X lock. + lockTarget: "RSTL", + $comment: "RSTL lock sleep" + }), + ErrorCodes.Interrupted); +}, testDB.getMongo().port); + +jsTestLog("Waiting for the sleep command to start and fetch the opID"); +const sleepCmdOpID = + waitForCommand("RSTL lock", op => (op["command"]["$comment"] == "RSTL lock sleep"), testDB); + +jsTestLog("Wait for the sleep command to log that the RSTL MODE_X lock was acquired"); +checkLog.containsJson(testDB, 6001600); + +try { + jsTestLog("Running the query while the RSTL is being held"); + + // Log any query regardless of its completion time. + assert.commandWorked(testDB.setProfilingLevel(0, -1)); + + const loggedQuery = RegExp("Slow query.*\"find\":\"c\".*" + fieldValue + ".*\"storage\":{"); + assert.eq(false, checkLog.checkContainsOnce(rst.getPrimary(), loggedQuery)); + assert.eq(1, testCollection.find({a: fieldValue}).itcount()); + assert.eq(true, checkLog.checkContainsOnce(rst.getPrimary(), loggedQuery)); +} finally { + jsTestLog("Ensure the sleep cmd releases the lock so that the server can shutdown"); + assert.commandWorked(testDB.killOp(sleepCmdOpID)); // kill the sleep cmd + rstlXLockSleepJoin(); // wait for the thread running the sleep cmd to finish +} + +rst.stopSet(); +})(); diff --git a/jstests/noPassthrough/sort_spill_estimate_data_size.js b/jstests/noPassthrough/sort_spill_estimate_data_size.js index 9f7fc62bac608..a0cd67dd039ef 100644 --- a/jstests/noPassthrough/sort_spill_estimate_data_size.js +++ b/jstests/noPassthrough/sort_spill_estimate_data_size.js @@ -3,9 +3,7 @@ * * This test was originally designed to reproduce SERVER-53760. */ -(function() { -"use strict"; -load('jstests/libs/analyze_plan.js'); // For 'getAggPlanStage()'. +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; const conn = MongoRunner.runMongod(); assert.neq(null, conn, "mongod was unable to start up"); @@ -69,5 +67,4 @@ assert.lt(dataBytesSorted, 3 * totalSize, explain); assert.eq(createPipeline(coll).toArray(), [{_id: 0, sumTop900UniqueValues: 94550}], explain); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/spill_to_disk_secondary_read.js b/jstests/noPassthrough/spill_to_disk_secondary_read.js index f143aa8852422..771fd5a630123 100644 --- a/jstests/noPassthrough/spill_to_disk_secondary_read.js +++ b/jstests/noPassthrough/spill_to_disk_secondary_read.js @@ -3,11 +3,8 @@ * writeConcern greater than w:1. * @tags: [requires_replication, requires_majority_read_concern, requires_persistence] */ -(function() { -"use strict"; - -load("jstests/libs/sbe_explain_helpers.js"); // For getSbePlanStages. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getSbePlanStages} from "jstests/libs/sbe_explain_helpers.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const kNumNodes = 3; const replTest = new ReplSetTest({ @@ -214,4 +211,3 @@ const readColl = secondary.getDB("test").foo; })(); replTest.stopSet(); -})(); diff --git a/jstests/noPassthrough/ssl_cipher_default.js b/jstests/noPassthrough/ssl_cipher_default.js index d1a6f6fd5a415..4b5f1a32e1e1f 100644 --- a/jstests/noPassthrough/ssl_cipher_default.js +++ b/jstests/noPassthrough/ssl_cipher_default.js @@ -46,4 +46,4 @@ assertCorrectConfig({ tlsCipherConfig: "HIGH" }, "HIGH"); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/standalone_cluster_parameters.js b/jstests/noPassthrough/standalone_cluster_parameters.js index 7ab53f704c539..6a962923cff6b 100644 --- a/jstests/noPassthrough/standalone_cluster_parameters.js +++ b/jstests/noPassthrough/standalone_cluster_parameters.js @@ -1,14 +1,15 @@ /** * Checks that set/getClusterParameter run as expected on standalone. * @tags: [ - * # Standalone cluster parameters enabled only under this flag. - * featureFlagAuditConfigClusterParameter, + * # Standalone cluster parameters enabled in 7.1+. + * requires_fcv_71, * ] */ -(function() { -'use strict'; - -load('jstests/libs/cluster_server_parameter_utils.js'); +import { + setupNode, + testInvalidClusterParameterCommands, + testValidClusterParameterCommands, +} from "jstests/libs/cluster_server_parameter_utils.js"; const conn = MongoRunner.runMongod({}); @@ -21,5 +22,4 @@ testInvalidClusterParameterCommands(conn); // Then, ensure that set/getClusterParameter set and retrieve expected values. testValidClusterParameterCommands(conn); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/start_up_with_custom_cost_model.js b/jstests/noPassthrough/start_up_with_custom_cost_model.js index 4158eee4c94b1..6fda80e2193d5 100644 --- a/jstests/noPassthrough/start_up_with_custom_cost_model.js +++ b/jstests/noPassthrough/start_up_with_custom_cost_model.js @@ -1,10 +1,11 @@ /** * Tests that 'internalCostModelCoefficients' can be set on startup. */ -(function() { -'use strict'; - -load("jstests/libs/optimizer_utils.js"); // For checkCascadesOptimizerEnabled. +import { + assertValueOnPath, + checkCascadesOptimizerEnabled, + navigateToPlanPath +} from "jstests/libs/optimizer_utils.js"; function getScanCostWith(customScanCost) { const costStr = `{"scanIncrementalCost": ${customScanCost}}`; @@ -51,8 +52,7 @@ function getScanCostWith(customScanCost) { const scanCost1 = getScanCostWith(0.2); const scanCost2 = getScanCostWith(0.4); if (scanCost1 === undefined) { - return; + quit(); } -assert.lt(scanCost1, scanCost2); -}()); +assert.lt(scanCost1, scanCost2); \ No newline at end of file diff --git a/jstests/noPassthrough/store_retryable_find_and_modify_images_in_side_collection.js b/jstests/noPassthrough/store_retryable_find_and_modify_images_in_side_collection.js index fde47c8956d9e..cb05e40fdc8e5 100644 --- a/jstests/noPassthrough/store_retryable_find_and_modify_images_in_side_collection.js +++ b/jstests/noPassthrough/store_retryable_find_and_modify_images_in_side_collection.js @@ -1,6 +1,6 @@ /** * Test that retryable findAndModify commands will store pre- and post- images in the appropriate - * collections for `storeFindAndModifyImagesInSideCollection=true`. + * collections. * * @tags: [requires_replication] */ @@ -56,9 +56,6 @@ function assertRetryCommand(cmdResponse, retryResponse) { } function checkProfilingLogs(primary) { - assert.commandWorked( - primary.adminCommand({setParameter: 1, storeFindAndModifyImagesInSideCollection: true})); - let db = primary.getDB('for_profiling'); let configDB = primary.getDB('config'); assert.commandWorked(db.user.insert({_id: 1})); @@ -98,9 +95,6 @@ function checkProfilingLogs(primary) { } function runTests(lsid, mainConn, primary, secondary, docId) { - const setParam = {setParameter: 1, storeFindAndModifyImagesInSideCollection: true}; - primary.adminCommand(setParam); - let txnNumber = NumberLong(docId); let incrementTxnNumber = function() { txnNumber = NumberLong(txnNumber + 1); diff --git a/jstests/noPassthrough/supports_read_concern_majority.js b/jstests/noPassthrough/supports_read_concern_majority.js index 5bf5bba6fa869..dc8af14014d52 100644 --- a/jstests/noPassthrough/supports_read_concern_majority.js +++ b/jstests/noPassthrough/supports_read_concern_majority.js @@ -8,4 +8,4 @@ const conn = MongoRunner.runMongod({enableMajorityReadConcern: false}); assert(!conn); const logContents = rawMongoProgramOutput(); assert(logContents.indexOf("enableMajorityReadConcern:false is no longer supported") > 0); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/telemetry/application_name_find.js b/jstests/noPassthrough/telemetry/application_name_find.js deleted file mode 100644 index 13c70e6d70f2d..0000000000000 --- a/jstests/noPassthrough/telemetry/application_name_find.js +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Test that applicationName and namespace appear in telemetry for the find command. - * @tags: [featureFlagTelemetry] - */ -(function() { -"use strict"; - -const kApplicationName = "MongoDB Shell"; -const kHashedApplicationName = "dXRuJCwctavU"; - -const getTelemetry = (conn, redactIdentifiers = false) => { - const result = assert.commandWorked(conn.adminCommand({ - aggregate: 1, - pipeline: [ - {$telemetry: {redactIdentifiers}}, - // Sort on telemetry key so entries are in a deterministic order. - {$sort: {key: 1}}, - {$match: {"key.applicationName": {$in: [kApplicationName, kHashedApplicationName]}}}, - {$match: {"key.find": {$exists: true}}} - ], - cursor: {batchSize: 10} - })); - return result.cursor.firstBatch; -}; - -// Turn on the collecting of telemetry metrics. -let options = { - setParameter: {internalQueryConfigureTelemetrySamplingRate: -1}, -}; - -const conn = MongoRunner.runMongod(options); -conn.setLogLevel(3, "query"); -const testDB = conn.getDB('test'); -var coll = testDB[jsTestName()]; -coll.drop(); - -coll.insert({v: 1}); -coll.insert({v: 2}); -coll.insert({v: 3}); - -coll.find({v: 1}).toArray(); - -let telemetry = getTelemetry(conn); -assert.eq(1, telemetry.length, telemetry); -assert.eq({ - cmdNs: {db: testDB.getName(), coll: coll.getName()}, - find: coll.getName(), - filter: {v: {"$eq": "?"}}, - applicationName: kApplicationName -}, - telemetry[0].key, - telemetry); - -telemetry = getTelemetry(conn, true); -assert.eq(1, telemetry.length, telemetry); -const hashedColl = "zF15YAUWbyIP"; -assert.eq({ - cmdNs: {db: "n4bQgYhMfWWa", coll: hashedColl}, - find: hashedColl, - filter: {"TJRIXgwhrmxB": {"$eq": "?"}}, - applicationName: kHashedApplicationName -}, - telemetry[0].key, - telemetry); - -MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/telemetry/clear_telemetry_store.js b/jstests/noPassthrough/telemetry/clear_telemetry_store.js deleted file mode 100644 index b2409cc0bbb60..0000000000000 --- a/jstests/noPassthrough/telemetry/clear_telemetry_store.js +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Test that the telemetry store can be cleared when the cache size is reset to 0. - * @tags: [featureFlagTelemetry] - */ -load("jstests/libs/telemetry_utils.js"); // For verifyMetrics. - -(function() { -"use strict"; - -// Turn on the collecting of telemetry metrics. -let options = { - setParameter: { - internalQueryConfigureTelemetrySamplingRate: -1, - internalQueryConfigureTelemetryCacheSize: "10MB" - }, -}; - -const conn = MongoRunner.runMongod(options); -const testDB = conn.getDB('test'); -var coll = testDB[jsTestName()]; -coll.drop(); - -let query = {}; -for (var j = 0; j < 10; ++j) { - query["foo.field.xyz." + j] = 1; - query["bar.field.xyz." + j] = 2; - query["baz.field.xyz." + j] = 3; - coll.aggregate([{$match: query}]).itcount(); -} - -// Confirm number of entries in the store and that none have been evicted. -let telemetryResults = testDB.getSiblingDB("admin").aggregate([{$telemetry: {}}]).toArray(); -assert.eq(telemetryResults.length, 10, telemetryResults); -assert.eq(testDB.serverStatus().metrics.telemetry.numEvicted, 0); - -// Command to clear the cache. -assert.commandWorked( - testDB.adminCommand({setParameter: 1, internalQueryConfigureTelemetryCacheSize: "0MB"})); - -// 10 regular queries plus the $telemetry query, means 11 entries evicted when the cache is cleared. -assert.eq(testDB.serverStatus().metrics.telemetry.numEvicted, 11); - -// Calling $telemetry should fail when the telemetry store size is 0 bytes. -assert.throwsWithCode(() => testDB.getSiblingDB("admin").aggregate([{$telemetry: {}}]), 6579000); -MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/telemetry/feature_flag_off_sampling_rate_on.js b/jstests/noPassthrough/telemetry/feature_flag_off_sampling_rate_on.js deleted file mode 100644 index 8dee55a109bac..0000000000000 --- a/jstests/noPassthrough/telemetry/feature_flag_off_sampling_rate_on.js +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Test that calls to read from telemetry store fail when feature flag is turned off and sampling - * rate > 0. - */ -load('jstests/libs/analyze_plan.js'); -load("jstests/libs/feature_flag_util.js"); - -(function() { -"use strict"; - -// Set sampling rate to -1. -let options = { - setParameter: {internalQueryConfigureTelemetrySamplingRate: -1}, -}; -const conn = MongoRunner.runMongod(options); -const testdb = conn.getDB('test'); - -// This test specifically tests error handling when the feature flag is not on. -// TODO SERVER-65800 This test can be deleted when the feature is on by default. -if (!conn || FeatureFlagUtil.isEnabled(testdb, "Telemetry")) { - jsTestLog(`Skipping test since feature flag is disabled. conn: ${conn}`); - if (conn) { - MongoRunner.stopMongod(conn); - } - return; -} - -var coll = testdb[jsTestName()]; -coll.drop(); - -// Bulk insert documents to reduces roundtrips and make timeout on a slow machine less likely. -const bulk = coll.initializeUnorderedBulkOp(); -for (let i = 1; i <= 20; i++) { - bulk.insert({foo: 0, bar: Math.floor(Math.random() * 3)}); -} -assert.commandWorked(bulk.execute()); - -// Pipeline to read telemetry store should fail without feature flag turned on even though sampling -// rate is > 0. -assert.commandFailedWithCode( - testdb.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}), - ErrorCodes.QueryFeatureNotAllowed); - -// Pipeline, with a filter, to read telemetry store fails without feature flag turned on even though -// sampling rate is > 0. -assert.commandFailedWithCode(testdb.adminCommand({ - aggregate: 1, - pipeline: [{$telemetry: {}}, {$match: {"key.find.find": {$eq: "###"}}}], - cursor: {} -}), - ErrorCodes.QueryFeatureNotAllowed); - -MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/telemetry/redactFieldnames_parameter.js b/jstests/noPassthrough/telemetry/redactFieldnames_parameter.js deleted file mode 100644 index ca2a9aa348d51..0000000000000 --- a/jstests/noPassthrough/telemetry/redactFieldnames_parameter.js +++ /dev/null @@ -1,95 +0,0 @@ -/** - * Test that the $telemetry.redactionFieldNames parameter correctly sets the redaction stratgey for - * telemetry store keys. - * @tags: [featureFlagTelemetry] - */ -(function() { -"use strict"; - -load("jstests/aggregation/extras/utils.js"); // For assertAdminDBErrCodeAndErrMsgContains. - -// Turn on the collecting of telemetry metrics. -let options = { - setParameter: {internalQueryConfigureTelemetrySamplingRate: -1}, -}; - -const conn = MongoRunner.runMongod(options); -const testDB = conn.getDB('test'); -var coll = testDB[jsTestName()]; -coll.drop(); - -coll.aggregate([{$sort: {bar: -1}}, {$limit: 2}, {$match: {foo: {$lte: 2}}}]); -// Default is no redaction. -let telStore = assert.commandWorked( - testDB.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}})); -assert.eq(telStore.cursor.firstBatch[0]["key"]["pipeline"], - [{"$sort": {"bar": "###"}}, {"$limit": "###"}, {"$match": {"foo": {"$lte": "###"}}}]); - -// Turning on redaction should redact field names on all entries, even previously cached ones. -telStore = assert.commandWorked(testDB.adminCommand( - {aggregate: 1, pipeline: [{$telemetry: {redactIdentifiers: true}}], cursor: {}})); -telStore.cursor.firstBatch.forEach(element => { - // Find the non-telemetry query and check its key to assert it matches requested redaction - // strategy. - if (!telStore.cursor.firstBatch[0]["key"]["pipeline"][0]["$telemetry"]) { - assert.eq(telStore.cursor.firstBatch[0]["key"]["pipeline"], [ - {"$sort": {"/N4rLtula/QI": "###"}}, - {"$limit": "###"}, - {"$match": {"LCa0a2j/xo/5": {"TmACc7vp8cv6": "###"}}} - ]); - } -}); - -// Turning redaction back off should preserve field names on all entries, even previously cached -// ones. -telStore = assert.commandWorked( - testDB.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}})); -telStore.cursor.firstBatch.forEach(element => { - // Find the non-telemetry query and check its key to assert it matches requested redaction - // strategy. - if (!telStore.cursor.firstBatch[0]["key"]["pipeline"][0]["$telemetry"]) { - assert.eq( - telStore.cursor.firstBatch[0]["key"]["pipeline"], - [{"$sort": {"bar": "###"}}, {"$limit": "###"}, {"$match": {"foo": {"$lte": "###"}}}]); - } -}); - -// Explicitly set redactIdentifiers to false. -telStore = assert.commandWorked(testDB.adminCommand( - {aggregate: 1, pipeline: [{$telemetry: {redactIdentifiers: false}}], cursor: {}})); -telStore.cursor.firstBatch.forEach(element => { - // Find the non-telemetry query and check its key to assert it matches requested redaction - // strategy. - if (!telStore.cursor.firstBatch[0]["key"]["pipeline"][0]["$telemetry"]) { - assert.eq( - telStore.cursor.firstBatch[0]["key"]["pipeline"], - [{"$sort": {"bar": "###"}}, {"$limit": "###"}, {"$match": {"foo": {"$lte": "###"}}}]); - } -}); - -// Wrong parameter name throws error. -let pipeline = [{$telemetry: {redactFields: true}}]; -assertAdminDBErrCodeAndErrMsgContains( - coll, - pipeline, - ErrorCodes.FailedToParse, - "$telemetry parameters object may only contain 'redactIdentifiers' option. Found: redactFields"); - -// Wrong parameter type throws error. -pipeline = [{$telemetry: {redactIdentifiers: 1}}]; -assertAdminDBErrCodeAndErrMsgContains( - coll, - pipeline, - ErrorCodes.FailedToParse, - "$telemetry redactIdentifiers parameter must be boolean. Found type: double"); - -// Parameter object with unrecognized key throws error. -pipeline = [{$telemetry: {redactIdentifiers: true, redactionStrategy: "on"}}]; -assertAdminDBErrCodeAndErrMsgContains( - coll, - pipeline, - ErrorCodes.FailedToParse, - "$telemetry parameters object may only contain one field, 'redactIdentifiers'. Found: { redactIdentifiers: true, redactionStrategy: \"on\" }"); - -MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js b/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js deleted file mode 100644 index 5882ab529f00a..0000000000000 --- a/jstests/noPassthrough/telemetry/telemetry_collect_on_mongos.js +++ /dev/null @@ -1,273 +0,0 @@ -/** - * Test that mongos is collecting telemetry metrics. - * @tags: [requires_fcv_70, featureFlagTelemetry] - */ - -load('jstests/libs/telemetry_utils.js'); - -(function() { -"use strict"; - -// Redacted literal replacement string. This may change in the future, so it's factored out. -const aggRedactString = "###"; -const findRedactString = "?"; - -const setup = () => { - const st = new ShardingTest({ - mongos: 1, - shards: 1, - config: 1, - rs: {nodes: 1}, - mongosOptions: { - setParameter: { - internalQueryConfigureTelemetrySamplingRate: -1, - 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}" - } - }, - }); - const mongos = st.s; - const db = mongos.getDB("test"); - const coll = db.coll; - coll.insert({v: 1}); - coll.insert({v: 4}); - return st; -}; - -const assertExpectedResults = (results, - expectedTelemetryKey, - expectedExecCount, - expectedDocsReturnedSum, - expectedDocsReturnedMax, - expectedDocsReturnedMin, - expectedDocsReturnedSumOfSq) => { - const {key, metrics} = results; - assert.eq(expectedTelemetryKey, key); - assert.eq(expectedExecCount, metrics.execCount); - assert.docEq({ - sum: NumberLong(expectedDocsReturnedSum), - max: NumberLong(expectedDocsReturnedMax), - min: NumberLong(expectedDocsReturnedMin), - sumOfSquares: NumberLong(expectedDocsReturnedSumOfSq) - }, - metrics.docsReturned); - - // This test can't predict exact timings, so just assert these three fields have been set (are - // non-zero). - const {firstSeenTimestamp, lastExecutionMicros, queryExecMicros} = metrics; - - assert.neq(timestampCmp(firstSeenTimestamp, Timestamp(0, 0)), 0); - assert.neq(lastExecutionMicros, NumberLong(0)); - - const distributionFields = ['sum', 'max', 'min', 'sumOfSquares']; - for (const field of distributionFields) { - assert.neq(queryExecMicros[field], NumberLong(0)); - } -}; - -// Assert that, for find queries, no telemetry results are written until a cursor has reached -// exhaustion; ensure accurate results once they're written. -{ - const st = setup(); - const db = st.s.getDB("test"); - const collName = "coll"; - const coll = db[collName]; - - const telemetryKey = { - cmdNs: {db: "test", coll: "coll"}, - find: collName, - filter: {$and: [{v: {$gt: findRedactString}}, {v: {$lt: findRedactString}}]}, - batchSize: findRedactString, - readConcern: {level: "local", provenance: "implicitDefault"}, - applicationName: "MongoDB Shell", - }; - - const cursor = coll.find({v: {$gt: 0, $lt: 5}}).batchSize(1); // returns 1 doc - - // Since the cursor hasn't been exhausted yet, ensure no telemetry results have been written - // yet. - let telemetry = getTelemetry(db); - assert.eq(0, telemetry.length); - - // Run a getMore to exhaust the cursor, then ensure telemetry results have been written - // accurately. batchSize must be 2 so the cursor recognizes exhaustion. - assert.commandWorked(db.runCommand({ - getMore: cursor.getId(), - collection: coll.getName(), - batchSize: 2 - })); // returns 1 doc, exhausts the cursor - // The $telemetry query for the previous `getTelemetry` is included in this call to $telemetry. - telemetry = getTelemetry(db); - assert.eq(2, telemetry.length); - assertExpectedResults(telemetry[0], - telemetryKey, - /* expectedExecCount */ 1, - /* expectedDocsReturnedSum */ 2, - /* expectedDocsReturnedMax */ 2, - /* expectedDocsReturnedMin */ 2, - /* expectedDocsReturnedSumOfSq */ 4); - - // Run more queries (to exhaustion) with the same query shape, and ensure telemetry results are - // accurate. - coll.find({v: {$gt: 2, $lt: 3}}).batchSize(10).toArray(); // returns 0 docs - coll.find({v: {$gt: 0, $lt: 1}}).batchSize(10).toArray(); // returns 0 docs - coll.find({v: {$gt: 0, $lt: 2}}).batchSize(10).toArray(); // return 1 doc - telemetry = getTelemetry(db); - assert.eq(2, telemetry.length); - assertExpectedResults(telemetry[0], - telemetryKey, - /* expectedExecCount */ 4, - /* expectedDocsReturnedSum */ 3, - /* expectedDocsReturnedMax */ 2, - /* expectedDocsReturnedMin */ 0, - /* expectedDocsReturnedSumOfSq */ 5); - - st.stop(); -} - -// Assert that, for agg queries, no telemetry results are written until a cursor has reached -// exhaustion; ensure accurate results once they're written. -{ - const st = setup(); - const db = st.s.getDB("test"); - const coll = db.coll; - - const telemetryKey = { - pipeline: [ - {$match: {v: {$gt: aggRedactString, $lt: aggRedactString}}}, - {$project: {hello: aggRedactString}}, - ], - namespace: "test.coll", - applicationName: "MongoDB Shell" - }; - - const cursor = coll.aggregate( - [ - {$match: {v: {$gt: 0, $lt: 5}}}, - {$project: {hello: "$world"}}, - ], - {cursor: {batchSize: 1}}); // returns 1 doc - - // Since the cursor hasn't been exhausted yet, ensure no telemetry results have been written - // yet. - let telemetry = getTelemetry(db); - assert.eq(0, telemetry.length); - - // Run a getMore to exhaust the cursor, then ensure telemetry results have been written - // accurately. batchSize must be 2 so the cursor recognizes exhaustion. - assert.commandWorked(db.runCommand({ - getMore: cursor.getId(), - collection: coll.getName(), - batchSize: 2 - })); // returns 1 doc, exhausts the cursor - // The $telemetry query for the previous `getTelemetry` is included in this call to $telemetry. - telemetry = getTelemetry(db); - assert.eq(2, telemetry.length); - assertExpectedResults(telemetry[0], - telemetryKey, - /* expectedExecCount */ 1, - /* expectedDocsReturnedSum */ 2, - /* expectedDocsReturnedMax */ 2, - /* expectedDocsReturnedMin */ 2, - /* expectedDocsReturnedSumOfSq */ 4); - - // Run more queries (to exhaustion) with the same query shape, and ensure telemetry results are - // accurate. - coll.aggregate([ - {$match: {v: {$gt: 0, $lt: 5}}}, - {$project: {hello: "$world"}}, - ]); // returns 2 docs - coll.aggregate([ - {$match: {v: {$gt: 2, $lt: 3}}}, - {$project: {hello: "$universe"}}, - ]); // returns 0 docs - coll.aggregate([ - {$match: {v: {$gt: 0, $lt: 2}}}, - {$project: {hello: "$galaxy"}}, - ]); // returns 1 doc - telemetry = getTelemetry(db); - assert.eq(2, telemetry.length); - assertExpectedResults(telemetry[0], - telemetryKey, - /* expectedExecCount */ 4, - /* expectedDocsReturnedSum */ 5, - /* expectedDocsReturnedMax */ 2, - /* expectedDocsReturnedMin */ 0, - /* expectedDocsReturnedSumOfSq */ 9); - - st.stop(); -} - -// Assert on batchSize-limited find queries that killCursors will write metrics with partial results -// to the telemetry store. -{ - const st = setup(); - const db = st.s.getDB("test"); - const collName = "coll"; - const coll = db[collName]; - - const telemetryKey = { - cmdNs: {db: "test", coll: "coll"}, - find: collName, - filter: {$and: [{v: {$gt: findRedactString}}, {v: {$lt: findRedactString}}]}, - batchSize: findRedactString, - readConcern: {level: "local", provenance: "implicitDefault"}, - applicationName: "MongoDB Shell" - }; - - const cursor1 = coll.find({v: {$gt: 0, $lt: 5}}).batchSize(1); // returns 1 doc - const cursor2 = coll.find({v: {$gt: 0, $lt: 2}}).batchSize(1); // returns 1 doc - - assert.commandWorked( - db.runCommand({killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]})); - - const telemetry = getTelemetry(db); - assert.eq(1, telemetry.length); - assertExpectedResults(telemetry[0], - telemetryKey, - /* expectedExecCount */ 2, - /* expectedDocsReturnedSum */ 2, - /* expectedDocsReturnedMax */ 1, - /* expectedDocsReturnedMin */ 1, - /* expectedDocsReturnedSumOfSq */ 2); - st.stop(); -} - -// Assert on batchSize-limited agg queries that killCursors will write metrics with partial results -// to the telemetry store. -{ - const st = setup(); - const db = st.s.getDB("test"); - const coll = db.coll; - - const telemetryKey = { - pipeline: [{$match: {v: {$gt: aggRedactString, $lt: aggRedactString}}}], - namespace: `test.${coll.getName()}`, - applicationName: "MongoDB Shell" - }; - - const cursor1 = coll.aggregate( - [ - {$match: {v: {$gt: 0, $lt: 5}}}, - ], - {cursor: {batchSize: 1}}); // returns 1 doc - const cursor2 = coll.aggregate( - [ - {$match: {v: {$gt: 0, $lt: 2}}}, - ], - {cursor: {batchSize: 1}}); // returns 1 doc - - assert.commandWorked( - db.runCommand({killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]})); - - const telemetry = getTelemetry(db); - assert.eq(1, telemetry.length); - assertExpectedResults(telemetry[0], - telemetryKey, - /* expectedExecCount */ 2, - /* expectedDocsReturnedSum */ 2, - /* expectedDocsReturnedMax */ 1, - /* expectedDocsReturnedMin */ 1, - /* expectedDocsReturnedSumOfSq */ 2); - st.stop(); -} -}()); diff --git a/jstests/noPassthrough/telemetry/telemetry_feature_flag.js b/jstests/noPassthrough/telemetry/telemetry_feature_flag.js deleted file mode 100644 index 04377ca661010..0000000000000 --- a/jstests/noPassthrough/telemetry/telemetry_feature_flag.js +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Test that calls to read from telemetry store fail when feature flag is turned off. - */ -load('jstests/libs/analyze_plan.js'); -load("jstests/libs/feature_flag_util.js"); - -(function() { -"use strict"; - -// This test specifically tests error handling when the feature flag is not on. -// TODO SERVER-65800 this test can be removed when the feature flag is removed. -const conn = MongoRunner.runMongod(); -const testDB = conn.getDB('test'); -if (FeatureFlagUtil.isEnabled(testDB, "Telemetry")) { - jsTestLog("Skipping test since telemetry is enabled."); - MongoRunner.stopMongod(conn); - return; -} - -// Pipeline to read telemetry store should fail without feature flag turned on. -assert.commandFailedWithCode( - testDB.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}), - ErrorCodes.QueryFeatureNotAllowed); - -// Pipeline, with a filter, to read telemetry store fails without feature flag turned on. -assert.commandFailedWithCode(testDB.adminCommand({ - aggregate: 1, - pipeline: [{$telemetry: {}}, {$match: {"key.find.find": {$eq: "###"}}}], - cursor: {} -}), - ErrorCodes.QueryFeatureNotAllowed); - -MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/telemetry/telemetry_metrics_across_getMore_calls.js b/jstests/noPassthrough/telemetry/telemetry_metrics_across_getMore_calls.js deleted file mode 100644 index 87fc54a3360e7..0000000000000 --- a/jstests/noPassthrough/telemetry/telemetry_metrics_across_getMore_calls.js +++ /dev/null @@ -1,158 +0,0 @@ -/** - * Test that the telemetry metrics are aggregated properly by distinct query shape over getMore - * calls. - * @tags: [featureFlagTelemetry] - */ -load("jstests/libs/telemetry_utils.js"); // For verifyMetrics. - -(function() { -"use strict"; - -// Turn on the collecting of telemetry metrics. -let options = { - setParameter: {internalQueryConfigureTelemetrySamplingRate: -1}, -}; - -const conn = MongoRunner.runMongod(options); -const testDB = conn.getDB('test'); -var coll = testDB[jsTestName()]; -coll.drop(); - -// Bulk insert documents to reduces roundtrips and make timeout on a slow machine less likely. -const bulk = coll.initializeUnorderedBulkOp(); -const numDocs = 100; -for (let i = 0; i < numDocs / 2; ++i) { - bulk.insert({foo: 0, bar: Math.floor(Math.random() * 3)}); - bulk.insert({foo: 1, bar: Math.floor(Math.random() * -2)}); -} -assert.commandWorked(bulk.execute()); - -// Assert that two queries with identical structures are represented by the same key. -{ - // Note: toArray() is necessary for the batchSize-limited query to run to cursor exhaustion - // (when it writes to the telemetry store). - coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}}).toArray(); - coll.aggregate([{$match: {foo: 0}}], {cursor: {batchSize: 2}}).toArray(); - - // This command will return all telemetry store entires. - const telemetryResults = testDB.getSiblingDB("admin").aggregate([{$telemetry: {}}]).toArray(); - // Assert there is only one entry. - assert.eq(telemetryResults.length, 1, telemetryResults); - const telemetryEntry = telemetryResults[0]; - assert.eq(telemetryEntry.key.namespace, `test.${jsTestName()}`); - assert.eq(telemetryEntry.key.applicationName, "MongoDB Shell"); - - // Assert we update execution count for identically shaped queries. - assert.eq(telemetryEntry.metrics.execCount, 2); - - // Assert telemetry values are accurate for the two above queries. - assert.eq(telemetryEntry.metrics.docsReturned.sum, numDocs); - assert.eq(telemetryEntry.metrics.docsReturned.min, numDocs / 2); - assert.eq(telemetryEntry.metrics.docsReturned.max, numDocs / 2); - - verifyMetrics(telemetryResults); -} - -const fooEqBatchSize = 5; -const fooNeBatchSize = 3; -// Assert on batchSize-limited queries that killCursors will write metrics with partial results to -// the telemetry store. -{ - let cursor1 = coll.find({foo: {$eq: 0}}).batchSize(fooEqBatchSize); - let cursor2 = coll.find({foo: {$ne: 0}}).batchSize(fooNeBatchSize); - // Issue one getMore for the first query, so 2 * fooEqBatchSize documents are returned total. - assert.commandWorked(testDB.runCommand( - {getMore: cursor1.getId(), collection: coll.getName(), batchSize: fooEqBatchSize})); - - // Kill both cursors so the telemetry metrics are stored. - assert.commandWorked(testDB.runCommand( - {killCursors: coll.getName(), cursors: [cursor1.getId(), cursor2.getId()]})); - - // This filters telemetry entires to just the ones entered when running above find queries. - const telemetryResults = testDB.getSiblingDB("admin") - .aggregate([ - {$telemetry: {}}, - {$match: {"key.filter.foo": {$exists: true}}}, - {$sort: {key: 1}}, - ]) - .toArray(); - assert.eq(telemetryResults.length, 2, telemetryResults); - assert.eq(telemetryResults[0].key.cmdNs.db, "test"); - assert.eq(telemetryResults[0].key.cmdNs.coll, jsTestName()); - assert.eq(telemetryResults[0].key.applicationName, "MongoDB Shell"); - assert.eq(telemetryResults[1].key.cmdNs.db, "test"); - assert.eq(telemetryResults[1].key.cmdNs.coll, jsTestName()); - assert.eq(telemetryResults[1].key.applicationName, "MongoDB Shell"); - - assert.eq(telemetryResults[0].metrics.execCount, 1); - assert.eq(telemetryResults[1].metrics.execCount, 1); - assert.eq(telemetryResults[0].metrics.docsReturned.sum, fooEqBatchSize * 2); - assert.eq(telemetryResults[1].metrics.docsReturned.sum, fooNeBatchSize); - - verifyMetrics(telemetryResults); -} - -// Assert that options such as limit/sort create different keys, and that repeating a query shape -// ({foo: {$eq}}) aggregates metrics across executions. -{ - const query2Limit = 50; - coll.find({foo: {$eq: 0}}).batchSize(2).toArray(); - coll.find({foo: {$eq: 1}}).limit(query2Limit).batchSize(2).toArray(); - coll.find().sort({"foo": 1}).batchSize(2).toArray(); - // This filters telemetry entires to just the ones entered when running above find queries. - let telemetryResults = - testDB.getSiblingDB("admin") - .aggregate([{$telemetry: {}}, {$match: {"key.find": {$exists: true}}}]) - .toArray(); - assert.eq(telemetryResults.length, 4, telemetryResults); - - verifyMetrics(telemetryResults); - - // This filters to just the telemetry for query coll.find().sort({"foo": 1}).batchSize(2). - telemetryResults = testDB.getSiblingDB("admin") - .aggregate([{$telemetry: {}}, {$match: {"key.sort.foo": 1}}]) - .toArray(); - assert.eq(telemetryResults.length, 1, telemetryResults); - assert.eq(telemetryResults[0].key.cmdNs.db, "test"); - assert.eq(telemetryResults[0].key.cmdNs.coll, jsTestName()); - assert.eq(telemetryResults[0].key.applicationName, "MongoDB Shell"); - assert.eq(telemetryResults[0].metrics.execCount, 1); - assert.eq(telemetryResults[0].metrics.docsReturned.sum, numDocs); - - // This filters to just the telemetry for query coll.find({foo: {$eq: - // 1}}).limit(query2Limit).batchSize(2). - telemetryResults = testDB.getSiblingDB("admin") - .aggregate([{$telemetry: {}}, {$match: {"key.limit": '?'}}]) - .toArray(); - assert.eq(telemetryResults.length, 1, telemetryResults); - assert.eq(telemetryResults[0].key.cmdNs.db, "test"); - assert.eq(telemetryResults[0].key.cmdNs.coll, jsTestName()); - assert.eq(telemetryResults[0].key.applicationName, "MongoDB Shell"); - assert.eq(telemetryResults[0].metrics.execCount, 1); - assert.eq(telemetryResults[0].metrics.docsReturned.sum, query2Limit); - - // This filters to just the telemetry for query coll.find({foo: {$eq: 0}}).batchSize(2). - telemetryResults = testDB.getSiblingDB("admin") - .aggregate([ - {$telemetry: {}}, - { - $match: { - "key.filter.foo": {$eq: {$eq: "?"}}, - "key.limit": {$exists: false}, - "key.sort": {$exists: false} - } - } - ]) - .toArray(); - assert.eq(telemetryResults.length, 1, telemetryResults); - assert.eq(telemetryResults[0].key.cmdNs.db, "test"); - assert.eq(telemetryResults[0].key.cmdNs.coll, jsTestName()); - assert.eq(telemetryResults[0].key.applicationName, "MongoDB Shell"); - assert.eq(telemetryResults[0].metrics.execCount, 2); - assert.eq(telemetryResults[0].metrics.docsReturned.sum, numDocs / 2 + 2 * fooEqBatchSize); - assert.eq(telemetryResults[0].metrics.docsReturned.max, numDocs / 2); - assert.eq(telemetryResults[0].metrics.docsReturned.min, 2 * fooEqBatchSize); -} - -MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js b/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js deleted file mode 100644 index 6bbf55f08ea7a..0000000000000 --- a/jstests/noPassthrough/telemetry/telemetry_redact_find_cmd.js +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Test that $telemetry properly redacts find commands, on mongod and mongos. - * @tags: [requires_fcv_70] - */ -load("jstests/libs/telemetry_utils.js"); -(function() { -"use strict"; - -function runTest(conn) { - const db = conn.getDB("test"); - const admin = conn.getDB("admin"); - - db.test.drop(); - db.test.insert({v: 1}); - - db.test.find({v: 1}).toArray(); - - let telemetry = getTelemetryRedacted(admin); - - assert.eq(1, telemetry.length); - assert.eq("n4bQgYhMfWWa", telemetry[0].key.find); - assert.eq({"TJRIXgwhrmxB": {$eq: "?"}}, telemetry[0].key.filter); - - db.test.insert({v: 2}); - - const cursor = db.test.find({v: {$gt: 0, $lt: 3}}).batchSize(1); - telemetry = getTelemetryRedacted(admin); - // Cursor isn't exhausted, so there shouldn't be another entry yet. - assert.eq(1, telemetry.length); - - assert.commandWorked( - db.runCommand({getMore: cursor.getId(), collection: db.test.getName(), batchSize: 2})); - - telemetry = getTelemetryRedacted(admin); - assert.eq(2, telemetry.length); - assert.eq("n4bQgYhMfWWa", telemetry[1].key.find); - assert.eq({"$and": [{"TJRIXgwhrmxB": {"$gt": "?"}}, {"TJRIXgwhrmxB": {"$lt": "?"}}]}, - telemetry[1].key.filter); -} - -const conn = MongoRunner.runMongod({ - setParameter: { - internalQueryConfigureTelemetrySamplingRate: -1, - featureFlagTelemetry: true, - } -}); -runTest(conn); -MongoRunner.stopMongod(conn); - -const st = new ShardingTest({ - mongos: 1, - shards: 1, - config: 1, - rs: {nodes: 1}, - mongosOptions: { - setParameter: { - internalQueryConfigureTelemetrySamplingRate: -1, - featureFlagTelemetry: true, - 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}" - } - }, -}); -runTest(st.s); -st.stop(); -}()); diff --git a/jstests/noPassthrough/telemetry/telemetry_sampling_rate.js b/jstests/noPassthrough/telemetry/telemetry_sampling_rate.js deleted file mode 100644 index 1bada398a0378..0000000000000 --- a/jstests/noPassthrough/telemetry/telemetry_sampling_rate.js +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Test that calls to read from telemetry store fail when sampling rate is not greater than 0 even - * if feature flag is on. - * @tags: [featureFlagTelemetry] - */ -load('jstests/libs/analyze_plan.js'); - -(function() { -"use strict"; - -let options = { - setParameter: {internalQueryConfigureTelemetrySamplingRate: 0}, -}; - -const conn = MongoRunner.runMongod(options); -const testdb = conn.getDB('test'); -var coll = testdb[jsTestName()]; -coll.drop(); -for (var i = 0; i < 20; i++) { - coll.insert({foo: 0, bar: Math.floor(Math.random() * 3)}); -} - -coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}}); - -// Reading telemetry store with a sampling rate of 0 should return 0 documents. -let telStore = testdb.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}}); -assert.eq(telStore.cursor.firstBatch.length, 0); - -// Reading telemetry store should work now with a sampling rate of greater than 0. -assert.commandWorked(testdb.adminCommand( - {setParameter: 1, internalQueryConfigureTelemetrySamplingRate: 2147483647})); -coll.aggregate([{$match: {foo: 1}}], {cursor: {batchSize: 2}}); -telStore = assert.commandWorked( - testdb.adminCommand({aggregate: 1, pipeline: [{$telemetry: {}}], cursor: {}})); -assert.eq(telStore.cursor.firstBatch.length, 1); - -MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/telemetry/telemetry_server_status_metrics.js b/jstests/noPassthrough/telemetry/telemetry_server_status_metrics.js deleted file mode 100644 index 84ac1717d6994..0000000000000 --- a/jstests/noPassthrough/telemetry/telemetry_server_status_metrics.js +++ /dev/null @@ -1,155 +0,0 @@ -/** - * Test the telemetry related serverStatus metrics. - * @tags: [featureFlagTelemetry] - */ -load('jstests/libs/analyze_plan.js'); - -(function() { -"use strict"; - -function runTestWithMongodOptions(mongodOptions, test, testOptions) { - const conn = MongoRunner.runMongod(mongodOptions); - const testDB = conn.getDB('test'); - const coll = testDB[jsTestName()]; - - test(conn, testDB, coll, testOptions); - - MongoRunner.stopMongod(conn); -} - -/** - * Test serverStatus metric which counts the number of evicted entries. - * - * testOptions must include `resetCacheSize` bool field; e.g., { resetCacheSize : true } - */ -function evictionTest(conn, testDB, coll, testOptions) { - const evictedBefore = testDB.serverStatus().metrics.telemetry.numEvicted; - assert.eq(evictedBefore, 0); - for (var i = 0; i < 4000; i++) { - let query = {}; - query["foo" + i] = "bar"; - coll.aggregate([{$match: query}]).itcount(); - } - if (!testOptions.resetCacheSize) { - const evictedAfter = testDB.serverStatus().metrics.telemetry.numEvicted; - assert.gt(evictedAfter, 0); - return; - } - // Make sure number of evicted entries increases when the cache size is reset, which forces out - // least recently used entries to meet the new, smaller size requirement. - assert.eq(testDB.serverStatus().metrics.telemetry.numEvicted, 0); - assert.commandWorked( - testDB.adminCommand({setParameter: 1, internalQueryConfigureTelemetryCacheSize: "1MB"})); - const evictedAfter = testDB.serverStatus().metrics.telemetry.numEvicted; - assert.gt(evictedAfter, 0); -} - -/** - * Test serverStatus metric which counts the number of requests for which telemetry is not collected - * due to rate-limiting. - * - * testOptions must include `samplingRate` and `numRequests` number fields; - * e.g., { samplingRate: 2147483647, numRequests: 20 } - */ -function countRateLimitedRequestsTest(conn, testDB, coll, testOptions) { - const numRateLimitedRequestsBefore = - testDB.serverStatus().metrics.telemetry.numRateLimitedRequests; - assert.eq(numRateLimitedRequestsBefore, 0); - - coll.insert({a: 0}); - - // Running numRequests / 2 times since we dispatch two requests per iteration - for (var i = 0; i < testOptions.numRequests / 2; i++) { - coll.find({a: 0}).toArray(); - coll.aggregate([{$match: {a: 1}}]); - } - - const numRateLimitedRequestsAfter = - testDB.serverStatus().metrics.telemetry.numRateLimitedRequests; - - if (testOptions.samplingRate === 0) { - // Telemetry should not be collected for any requests. - assert.eq(numRateLimitedRequestsAfter, testOptions.numRequests); - } else if (testOptions.samplingRate >= testOptions.numRequests) { - // Telemetry should be collected for all requests. - assert.eq(numRateLimitedRequestsAfter, 0); - } else { - // Telemetry should be collected for some but not all requests. - assert.gt(numRateLimitedRequestsAfter, 0); - assert.lt(numRateLimitedRequestsAfter, testOptions.numRequests); - } -} - -function telemetryStoreSizeEstimateTest(conn, testDB, coll, testOptions) { - assert.eq(testDB.serverStatus().metrics.telemetry.telemetryStoreSizeEstimateBytes, 0); - let halfWayPointSize; - // Only using three digit numbers (eg 100, 101) means the string length will be the same for all - // entries and therefore the key size will be the same for all entries, which makes predicting - // the total size of the store clean and easy. - for (var i = 100; i < 200; i++) { - coll.aggregate([{$match: {["foo" + i]: "bar"}}]).itcount(); - if (i == 150) { - halfWayPointSize = - testDB.serverStatus().metrics.telemetry.telemetryStoreSizeEstimateBytes; - } - } - // Confirm that telemetry store has grown and size is non-zero. - assert.gt(halfWayPointSize, 0); - const fullSize = testDB.serverStatus().metrics.telemetry.telemetryStoreSizeEstimateBytes; - assert.gt(fullSize, 0); - // Make sure the final telemetry store size is twice as much as the halfway point size (+/- 5%) - assert(fullSize >= halfWayPointSize * 1.95 && fullSize <= halfWayPointSize * 2.05, - tojson({fullSize, halfWayPointSize})); -} -/** - * In this configuration, we insert enough entries into the telemetry store to trigger LRU - * eviction. - */ -runTestWithMongodOptions({ - setParameter: { - internalQueryConfigureTelemetryCacheSize: "1MB", - internalQueryConfigureTelemetrySamplingRate: -1 - }, -}, - evictionTest, - {resetCacheSize: false}); -/** - * In this configuration, eviction is triggered only when the telemetry store size is reset. - * */ -runTestWithMongodOptions({ - setParameter: { - internalQueryConfigureTelemetryCacheSize: "4MB", - internalQueryConfigureTelemetrySamplingRate: -1 - }, -}, - evictionTest, - {resetCacheSize: true}); - -/** - * In this configuration, every query is sampled, so no requests should be rate-limited. - */ -runTestWithMongodOptions({ - setParameter: {internalQueryConfigureTelemetrySamplingRate: -1}, -}, - countRateLimitedRequestsTest, - {samplingRate: 2147483647, numRequests: 20}); - -/** - * In this configuration, the sampling rate is set so that some but not all requests are - * rate-limited. - */ -runTestWithMongodOptions({ - setParameter: {internalQueryConfigureTelemetrySamplingRate: 10}, -}, - countRateLimitedRequestsTest, - {samplingRate: 10, numRequests: 20}); - -/** - * Sample all queries and assert that the size of telemetry store is equal to num entries * entry - * size - */ -runTestWithMongodOptions({ - setParameter: {internalQueryConfigureTelemetrySamplingRate: -1}, -}, - telemetryStoreSizeEstimateTest); -}()); diff --git a/jstests/noPassthrough/timeseries_bucket_limit_size.js b/jstests/noPassthrough/timeseries_bucket_limit_size.js index de1719ec8da96..6449e9bf931ee 100644 --- a/jstests/noPassthrough/timeseries_bucket_limit_size.js +++ b/jstests/noPassthrough/timeseries_bucket_limit_size.js @@ -7,10 +7,8 @@ * requires_fcv_61, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const conn = MongoRunner.runMongod({setParameter: {timeseriesBucketMinCount: 1}}); @@ -18,6 +16,8 @@ const dbName = jsTestName(); const db = conn.getDB(dbName); TimeseriesTest.run((insert) => { + const alwaysUseCompressedBuckets = + FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets"); const areTimeseriesScalabilityImprovementsEnabled = TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db); @@ -81,7 +81,7 @@ TimeseriesTest.run((insert) => { assert.eq(largeValue, bucketDocs[0].control.max.x, 'invalid control.max for x in first bucket: ' + tojson(bucketDocs[0].control)); - assert.eq(2, + assert.eq(alwaysUseCompressedBuckets ? 1 : 2, bucketDocs[0].control.version, 'unexpected control.version in first bucket: ' + tojson(bucketDocs)); @@ -114,4 +114,3 @@ TimeseriesTest.run((insert) => { }); MongoRunner.stopMongod(conn); -})(); diff --git a/jstests/noPassthrough/timeseries_bucketing_parameters_downgrade.js b/jstests/noPassthrough/timeseries_bucketing_parameters_downgrade.js deleted file mode 100644 index 5a50f30c6d51a..0000000000000 --- a/jstests/noPassthrough/timeseries_bucketing_parameters_downgrade.js +++ /dev/null @@ -1,212 +0,0 @@ -/** - * Tests behavior with the bucketing parameters on time-series collections when downgrading. If we - * are using custom bucketing parameters we expect to fail the downgrade but if we use default - * granularity values the downgrade should succeed. - */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/feature_flag_util.js"); // For isEnabled. - -const conn = MongoRunner.runMongod(); -const db = conn.getDB("test"); - -if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(conn)) { - jsTestLog( - "Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled."); - MongoRunner.stopMongod(conn); - return; -} - -const collName = "timeseries_bucketing_parameters"; -const coll = db.getCollection(collName); -const bucketsColl = db.getCollection("system.buckets." + collName); - -const timeFieldName = "tm"; -const metaFieldName = "mm"; - -const resetCollection = function(extraOptions = {}) { - coll.drop(); - - const tsOpts = {timeField: timeFieldName, metaField: metaFieldName}; - assert.commandWorked( - db.createCollection(coll.getName(), {timeseries: Object.merge(tsOpts, extraOptions)})); - assert.commandWorked(coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: 1})); -}; - -const secondsMaxSpan = TimeseriesTest.getBucketMaxSpanSecondsFromGranularity('seconds'); -const secondsRoundingSeconds = TimeseriesTest.getBucketRoundingSecondsFromGranularity('seconds'); -const minutesMaxSpan = TimeseriesTest.getBucketMaxSpanSecondsFromGranularity('minutes'); -const minutesRoundingSeconds = TimeseriesTest.getBucketRoundingSecondsFromGranularity('minutes'); -const hoursMaxSpan = TimeseriesTest.getBucketMaxSpanSecondsFromGranularity('hours'); -const hoursRoundingSeconds = TimeseriesTest.getBucketRoundingSecondsFromGranularity('hours'); - -const getNearestGranularity = function(bucketingParams) { - assert(bucketingParams.hasOwnProperty('bucketMaxSpanSeconds') && - bucketingParams.hasOwnProperty('bucketRoundingSeconds')); - - if (bucketingParams.bucketMaxSpanSeconds <= secondsMaxSpan && - bucketingParams.bucketRoundingSeconds <= secondsRoundingSeconds) { - return 'seconds'; - } - - if (bucketingParams.bucketMaxSpanSeconds <= minutesMaxSpan && - bucketingParams.bucketRoundingSeconds <= minutesRoundingSeconds) { - return 'minutes'; - } - - if (bucketingParams.bucketMaxSpanSeconds <= hoursMaxSpan && - bucketingParams.bucketRoundingSeconds <= hoursRoundingSeconds) { - return 'hours'; - } - - return null; -}; - -// Checks if the downgrade command succeeds and reset the version to latestFCV. -function checkDowngradeSucceeds() { - // Verify that downgrade succeeds. - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); - - // Check that the 'granularity' and 'bucketMaxSpanSeconds' are correctly set and that - // 'bucketRoundingSeconds' is not set to any value. - let collections = assert.commandWorked(db.runCommand({listCollections: 1})).cursor.firstBatch; - let collectionEntry = - collections.find(entry => entry.name === 'system.buckets.' + coll.getName()); - assert(collectionEntry); - - let granularity = collectionEntry.options.timeseries.granularity; - assert(granularity); - assert.isnull(collectionEntry.options.timeseries.bucketRoundingSeconds); - assert.eq(collectionEntry.options.timeseries.bucketMaxSpanSeconds, - TimeseriesTest.getBucketMaxSpanSecondsFromGranularity(granularity)); - - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: latestFCV})); -} - -// Checks that downgrade fails but tries again by using the collMod command to modify the collection -// into a downgradable state. Will drop the collection if there is no possible granularity to -// update. -function checkDowngradeFailsAndTryAgain(bucketingParams) { - assert.commandFailedWithCode(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}), - ErrorCodes.CannotDowngrade); - - let nextGranularity = getNearestGranularity(bucketingParams); - - if (nextGranularity) { - assert.commandWorked( - db.runCommand({collMod: collName, timeseries: {granularity: nextGranularity}})); - } else { - // If the bucketMaxSpanSeconds and bucketRoundingSeconds are both greater than the values - // corresponding to the 'hours' granularity, the only way to successfully downgrade is to - // drop the collection. - resetCollection(); - } - - checkDowngradeSucceeds(); -} - -const checkBucketCount = function(count = 1) { - let stats = assert.commandWorked(coll.stats()); - assert(stats.timeseries); - assert.eq(stats.timeseries['bucketCount'], count); -}; - -// 1. We expect downgrade to work seamlessly when a standard granularity is used. -{ - resetCollection(); - - // When we create collections with no granularity specified, we should default to 'seconds' - // meaning we should be able to downgrade successfully. - checkDowngradeSucceeds(); - - // If we explicitly set the granularity of a collection we expect to succesfully downgrade. - resetCollection({granularity: 'seconds'}); - checkDowngradeSucceeds(); - - // We expect to successfully downgrade with different granularity values. - assert.commandWorked(db.runCommand({collMod: collName, timeseries: {granularity: "seconds"}})); - checkDowngradeSucceeds(); - assert.commandWorked(db.runCommand({collMod: collName, timeseries: {granularity: "minutes"}})); - checkDowngradeSucceeds(); - assert.commandWorked(db.runCommand({collMod: collName, timeseries: {granularity: "hours"}})); - checkDowngradeSucceeds(); -} - -// 2. We expect to successfully downgrade if 'bucketMaxSpanSeconds' corresponds to a granularity. -{ - resetCollection({granularity: 'seconds', bucketMaxSpanSeconds: secondsMaxSpan}); - checkDowngradeSucceeds(); - - resetCollection({granularity: 'seconds', bucketMaxSpanSeconds: secondsMaxSpan}); - checkDowngradeSucceeds(); - - resetCollection({granularity: 'minutes', bucketMaxSpanSeconds: minutesMaxSpan}); - checkDowngradeSucceeds(); - - resetCollection({granularity: 'hours', bucketMaxSpanSeconds: hoursMaxSpan}); - checkDowngradeSucceeds(); -} - -// 3. When we set values for 'bucketMaxSpanSeconds' and 'bucketRoundingSeconds' we expect downgrade -// to fail. Changing the collection's granularity to the next possible granularity should allow -// downgrade to succeed. -{ - // Use custom bucketing parameters (less than the 'seconds' granularity). - let bucketingParams = { - bucketMaxSpanSeconds: secondsRoundingSeconds, - bucketRoundingSeconds: secondsRoundingSeconds - }; - resetCollection(bucketingParams); - - // Insert a few measurements to create a total of 3 buckets. - assert.commandWorked(coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: 2})); - assert.commandWorked(coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: 3})); - checkBucketCount(3); - - // Expect downgrade to fail but when the granularity is changed to 'seconds' we should - // successfully downgrade. - checkDowngradeFailsAndTryAgain(bucketingParams); - - // Use custom bucketing parameters (less than the 'minutes' granularity). - bucketingParams = {bucketMaxSpanSeconds: secondsMaxSpan, bucketRoundingSeconds: secondsMaxSpan}; - assert.commandWorked(db.runCommand({collMod: collName, timeseries: bucketingParams})); - - // Expect downgrade to fail but when the granularity is changed to 'minutes' we should - // successfully downgrade. - checkDowngradeFailsAndTryAgain(bucketingParams); - - // Use custom bucketing parameters (less than the 'hours' granularity). - bucketingParams = {bucketMaxSpanSeconds: minutesMaxSpan, bucketRoundingSeconds: minutesMaxSpan}; - assert.commandWorked(db.runCommand({collMod: collName, timeseries: bucketingParams})); - - // Expect downgrade to fail but when the granularity is changed to 'hours' we should - // successfully downgrade. - checkDowngradeFailsAndTryAgain(bucketingParams); - - // Make sure the collection did not get dropped in the process to successfully downgrade by - // checking the number of buckets in the collection. - checkBucketCount(3); -} - -// 4. In cases where the bucketing parameters are higher than the possible granularities, the only -// way to downgrade is to drop the collection. -{ - let bucketingParams = {bucketMaxSpanSeconds: hoursMaxSpan, bucketRoundingSeconds: hoursMaxSpan}; - resetCollection(bucketingParams); - - // Insert a few measurements to create a total of 3 buckets. - assert.commandWorked(coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: 2})); - assert.commandWorked(coll.insert({[timeFieldName]: ISODate(), [metaFieldName]: 3})); - checkBucketCount(3); - - // Expect the downgrade to fail and drops the collection for the downgrade to succeed. - checkDowngradeFailsAndTryAgain(bucketingParams); - - // Verify the original collection had to be dropped in order to downgrade. - checkBucketCount(1); -} - -MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/timeseries_collStats.js b/jstests/noPassthrough/timeseries_collStats.js index 362fc891c4a04..e8430a57a76c8 100644 --- a/jstests/noPassthrough/timeseries_collStats.js +++ b/jstests/noPassthrough/timeseries_collStats.js @@ -7,10 +7,9 @@ * requires_getmore, * ] */ -(function() { -"use strict"; +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; -load("jstests/core/timeseries/libs/timeseries.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const kIdleBucketExpiryMemoryUsageThreshold = 1024 * 1024 * 10; const conn = MongoRunner.runMongod({ @@ -22,6 +21,8 @@ const conn = MongoRunner.runMongod({ const dbName = jsTestName(); const testDB = conn.getDB(dbName); +const alwaysUseCompressedBuckets = + FeatureFlagUtil.isEnabled(testDB, "TimeseriesAlwaysUseCompressedBuckets"); const isTimeseriesScalabilityImprovementsEnabled = TimeseriesTest.timeseriesScalabilityImprovementsEnabled(testDB); @@ -182,7 +183,7 @@ if (isTimeseriesScalabilityImprovementsEnabled) { expectedStats.numBucketsClosedDueToTimeBackward++; } expectedStats.numMeasurementsCommitted++; -if (!isTimeseriesScalabilityImprovementsEnabled) { +if (!isTimeseriesScalabilityImprovementsEnabled && !alwaysUseCompressedBuckets) { expectedStats.numCompressedBuckets++; } if (isTimeseriesScalabilityImprovementsEnabled) { @@ -203,7 +204,9 @@ expectedStats.numCommits += 2; expectedStats.numMeasurementsCommitted += numDocs; expectedStats.avgNumMeasurementsPerCommit = Math.floor(expectedStats.numMeasurementsCommitted / expectedStats.numCommits); -expectedStats.numCompressedBuckets++; +if (!alwaysUseCompressedBuckets) { + expectedStats.numCompressedBuckets++; +} if (isTimeseriesScalabilityImprovementsEnabled) { expectedStats.numBucketQueriesFailed++; } @@ -226,8 +229,10 @@ expectedStats.numCommits += 2; expectedStats.numMeasurementsCommitted += 1001; expectedStats.avgNumMeasurementsPerCommit = Math.floor(expectedStats.numMeasurementsCommitted / expectedStats.numCommits); -expectedStats.numCompressedBuckets++; -expectedStats.numSubObjCompressionRestart += 2; +if (!alwaysUseCompressedBuckets) { + expectedStats.numCompressedBuckets++; + expectedStats.numSubObjCompressionRestart += 2; +} if (isTimeseriesScalabilityImprovementsEnabled) { expectedStats.numBucketQueriesFailed++; } @@ -327,5 +332,4 @@ testIdleBucketExpiry(i => { return {[timeFieldName]: ISODate(), [metaFieldName]: i, a: largeValue}; }); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_compression_fail.js b/jstests/noPassthrough/timeseries_compression_fail.js index 4b7ffa85a21a4..aaee83849ff60 100644 --- a/jstests/noPassthrough/timeseries_compression_fail.js +++ b/jstests/noPassthrough/timeseries_compression_fail.js @@ -2,10 +2,7 @@ * Tests that the server can detect when timeseries bucket compression is not decompressible without * data loss. Bucket should remain uncompressed and we log that this happened. */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; let conn = MongoRunner.runMongod(); @@ -13,6 +10,13 @@ const dbName = jsTestName(); const db = conn.getDB(dbName); const coll = db.getCollection('t'); +// TODO SERVER-77454: Investigate re-enabling this. +if (FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) { + jsTestLog("Skipping test as the always use compressed buckets feature is enabled"); + MongoRunner.stopMongod(conn); + quit(); +} + // Assumes each bucket has a limit of 1000 measurements. const bucketMaxCount = 1000; const numDocs = bucketMaxCount + 100; @@ -47,4 +51,3 @@ assert.eq(1, bucketDocs[0].control.version); assert.eq(1, bucketDocs[1].control.version); MongoRunner.stopMongod(conn); -})(); diff --git a/jstests/noPassthrough/timeseries_create.js b/jstests/noPassthrough/timeseries_create.js index b13d166d35eaa..24c238a0a0f54 100644 --- a/jstests/noPassthrough/timeseries_create.js +++ b/jstests/noPassthrough/timeseries_create.js @@ -5,10 +5,7 @@ * @tags: [ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; const conn = MongoRunner.runMongod(); @@ -231,5 +228,4 @@ testTimeseriesNamespaceExists((testDB, collName) => { assert.commandWorked(testDB.runCommand({drop: coll.getName(), writeConcern: {w: "majority"}})); } -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_delete_one_transaction.js b/jstests/noPassthrough/timeseries_delete_one_transaction.js new file mode 100644 index 0000000000000..3bb68d8a94f3d --- /dev/null +++ b/jstests/noPassthrough/timeseries_delete_one_transaction.js @@ -0,0 +1,263 @@ +/** + * Tests the deleteOne command on time-series collections in multi-document transactions. + */ +load("jstests/libs/fail_point_util.js"); +load('jstests/libs/parallel_shell_helpers.js'); + +const rst = new ReplSetTest({nodes: 1}); +rst.startSet(); +rst.initiate(); + +const metaFieldName = "mm"; +const timeFieldName = "tt"; +const collectionNamePrefix = "test_coll_"; +let collectionCounter = 0; + +const testDB = rst.getPrimary().getDB(jsTestName()); +let testColl = testDB[collectionNamePrefix + collectionCounter]; +assert.commandWorked(testDB.dropDatabase()); + +const docsPerMetaField = 3; +const initializeData = function() { + testColl = testDB[collectionNamePrefix + ++collectionCounter]; + assert.commandWorked(testDB.createCollection( + testColl.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); + + let docs = []; + for (let i = 0; i < docsPerMetaField; ++i) { + docs.push({_id: i, [timeFieldName]: new Date(), [metaFieldName]: 0}); + docs.push({_id: i, [timeFieldName]: new Date(), [metaFieldName]: 1}); + docs.push({_id: i, [timeFieldName]: new Date(), [metaFieldName]: 2}); + } + + // Insert test documents. + assert.commandWorked(testColl.insertMany(docs)); + printjson("Printing docs: " + tojson(testColl.find({}).toArray())); +}; + +// 1. Delete one document from the collection in a transaction. +(function basicDeleteOne() { + jsTestLog("Running 'basicDeleteOne'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + + session.startTransaction(); + assert.commandWorked(sessionColl.deleteOne({_id: 0, [metaFieldName]: 0})); + assert.commandWorked(session.commitTransaction_forTesting()); + session.endSession(); + + // Expect one deleted document with meta: 0. + assert.eq(testColl.find({[metaFieldName]: 0}).toArray().length, docsPerMetaField - 1); +})(); + +// 2. deleteOne should not have visible changes when the transaction is aborted. +(function deleteOneTransactionAborts() { + jsTestLog("Running 'deleteOneTransactionAborts'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + + session.startTransaction(); + assert.commandWorked(sessionColl.deleteOne({_id: 0, [metaFieldName]: 1})); + assert.commandWorked(session.abortTransaction_forTesting()); + session.endSession(); + + // The transaction was aborted so no documents should have been deleted. + assert.eq(testColl.find({[metaFieldName]: 1}).toArray().length, docsPerMetaField); +})(); + +// 3. Run a few deleteOnes in a single transaction. +(function multipleDeleteOne() { + jsTestLog("Running 'multipleDeleteOne'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + session.startTransaction(); + + for (let i = 0; i < docsPerMetaField; ++i) { + assert.commandWorked(sessionColl.deleteOne({_id: i, [metaFieldName]: 0})); + } + + assert.commandWorked(session.commitTransaction_forTesting()); + session.endSession(); + + // Expect all documents with {meta: 0} to be deleted. + assert.eq(testColl.find({[metaFieldName]: 0}).toArray().length, 0); +})(); + +// 4. Tests performing deleteOnes in and out of a transaction on abort. +(function mixedDeleteOneAbortTxn() { + jsTestLog("Running 'mixedDeleteOneAbortTxn'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + session.startTransaction(); + + // Delete all documents for meta values 0, 1. + for (let i = 0; i < docsPerMetaField; ++i) { + assert.commandWorked(sessionColl.deleteOne({_id: i, [metaFieldName]: 0})); + assert.commandWorked(sessionColl.deleteOne({_id: i, [metaFieldName]: 1})); + } + + // Outside of the session and transaction, perform a deleteOne. + const docFilterNoTxn = {_id: 0, [metaFieldName]: 2}; + assert.commandWorked(testColl.deleteOne(docFilterNoTxn)); + + assert.commandWorked(session.abortTransaction_forTesting()); + session.endSession(); + + // The aborted transaction should not have deleted any documents. + assert.eq(testColl.find({[metaFieldName]: 0}).toArray().length, docsPerMetaField); + assert.eq(testColl.find({[metaFieldName]: 1}).toArray().length, docsPerMetaField); + + // The delete outside of the transaction should have succeeded. + assert.eq(testColl.find(docFilterNoTxn).toArray().length, 0); +})(); + +// 5. Tests performing deleteOnes in and out of a transaction on commit. +(function mixedDeleteOneCommitTxn() { + jsTestLog("Running 'mixedDeleteOneCommitTxn'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + session.startTransaction(); + + for (let i = 0; i < docsPerMetaField; ++i) { + // Within the transaction. + assert.commandWorked(sessionColl.deleteOne({_id: i, [metaFieldName]: 0})); + assert.commandWorked(sessionColl.deleteOne({_id: i, [metaFieldName]: 1})); + + // Outside of the session and transaction, perform deleteOne. + assert.commandWorked(testColl.deleteOne({_id: i, [metaFieldName]: 2})); + } + + assert.commandWorked(session.commitTransaction_forTesting()); + session.endSession(); + + // Expect all documents to have been deleted. + assert.eq(testColl.find({}).toArray().length, 0); +})(); + +// 6. Tests a race to delete the same document in and out of a transaction. +(function raceToDeleteOne() { + jsTestLog("Running 'raceToDeleteOne'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + session.startTransaction(); + + // Within the transaction, perform deleteOne. + const deleteFilter = {_id: 1, [metaFieldName]: 0}; + assert.commandWorked(sessionColl.deleteOne(deleteFilter)); + + // Note: there is a change the parallel shell runs after the transaction is committed and that + // is fine as both interleavings should succeed. + const awaitTestDelete = startParallelShell( + funWithArgs(function(dbName, collName, filter) { + const testDB = db.getSiblingDB(dbName); + const coll = testDB.getCollection(collName); + + // Outside of the session and transaction, perform deleteOne. + assert.commandWorked(coll.deleteOne(filter)); + }, testDB.getName(), testColl.getName(), deleteFilter), testDB.getMongo().port); + + assert.commandWorked(session.commitTransaction_forTesting()); + assert.eq(testColl.find(deleteFilter).toArray().length, 0); + session.endSession(); + + // Allow non-transactional deleteOne to finish. + awaitTestDelete(); + assert.eq(testColl.find(deleteFilter).toArray().length, 0); +})(); + +// 7. Tests a transactional deleteOne on a document which gets inserted after the transaction +// starts. +(function deleteOneAndInsertBeforeCommit() { + jsTestLog("Running 'deleteOneAndInsertBeforeCommit'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + const newDoc = {_id: 101, [timeFieldName]: new Date(), [metaFieldName]: 101}; + + session.startTransaction(); + // Ensure the document does not exist within the snapshot of the newly started transaction. + assert.eq(sessionColl.find(newDoc).toArray().length, 0); + + // Outside of the session and transaction, insert document. + assert.commandWorked(testColl.insert(newDoc)); + + // Double check the document is still not visible from within the transaction. + assert.eq(sessionColl.find(newDoc).toArray().length, 0); + + // Within the transaction, perform deleteOne. + assert.commandWorked(sessionColl.deleteOne(newDoc)); + assert.eq(sessionColl.find(newDoc).toArray().length, 0); + + assert.commandWorked(session.commitTransaction_forTesting()); + session.endSession(); + + // The newly inserted document should be present even though the transaction commits after the + // insert. + assert.eq(testColl.find(newDoc).toArray().length, 1); +})(); + +// 8. Tests two side-by-side transactional deleteOnes on the same document. +(function deleteOneInTwoTransactions() { + jsTestLog("Running 'deleteOneInTwoTransactions'"); + initializeData(); + + const sessionA = testDB.getMongo().startSession(); + const sessionB = testDB.getMongo().startSession(); + const collA = sessionA.getDatabase(jsTestName()).getCollection(testColl.getName()); + const collB = sessionB.getDatabase(jsTestName()).getCollection(testColl.getName()); + + const docToDelete = {_id: 1, [metaFieldName]: 1}; + + // Start transactions on different sessions. + sessionA.startTransaction({readConcern: {level: "snapshot"}}); + sessionB.startTransaction({readConcern: {level: "snapshot"}}); + + // Ensure the document exists in the snapshot of both transactions. + assert.eq(collA.find(docToDelete).toArray().length, 1); + assert.eq(collB.find(docToDelete).toArray().length, 1); + + // Perform deleteOne on transaction A. + assert.commandWorked(collA.deleteOne(docToDelete)); + + const deleteCommand = { + delete: collB.getName(), + deletes: [{ + q: docToDelete, + limit: 1, + }] + }; + + // We expect the deleteOne on transaction B to fail, causing the transaction to abort. + // Sidenote: avoiding the deleteOne method from 'crud_api.js' because it throws. + assert.commandFailedWithCode(collB.runCommand(deleteCommand), ErrorCodes.WriteConflict); + assert.commandFailedWithCode(sessionB.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); + sessionB.endSession(); + + // Ensure the document does not exist in the snapshot of transaction A. + assert.eq(collA.find(docToDelete).toArray().length, 0); + // Since transaction A has not committed yet, the document should still be present outside of + // the transaction. + assert.eq(testColl.find(docToDelete).toArray().length, 1); + + // Ensure the document has been successfully deleted after transaction A commits. + assert.commandWorked(sessionA.commitTransaction_forTesting()); + assert.eq(testColl.find(docToDelete).toArray().length, 0); + + sessionA.endSession(); +})(); + +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_direct_remove_reopen.js b/jstests/noPassthrough/timeseries_direct_remove_reopen.js index dbbd7898181b0..ea4a60612f6d9 100644 --- a/jstests/noPassthrough/timeseries_direct_remove_reopen.js +++ b/jstests/noPassthrough/timeseries_direct_remove_reopen.js @@ -1,11 +1,8 @@ /** * Tests that direct removal in a timeseries bucket collection synchronizes with bucket reopening. */ -(function() { -'use strict'; - load("jstests/libs/fail_point_util.js"); -load("jstests/libs/feature_flag_util.js"); // For isEnabled. +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load("jstests/libs/parallel_shell_helpers.js"); const conn = MongoRunner.runMongod(); @@ -18,7 +15,7 @@ if (!FeatureFlagUtil.isEnabled(testDB, "TimeseriesScalabilityImprovements")) { jsTestLog( "Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled."); MongoRunner.stopMongod(conn); - return; + quit(); } const collName = 'test'; @@ -88,5 +85,4 @@ buckets = bucketsColl.find().sort({_id: 1}).toArray(); assert.eq(buckets.length, 1); assert.neq(buckets[0]._id, oldId); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_direct_update.js b/jstests/noPassthrough/timeseries_direct_update.js index 93f6282b8f124..3f110f9fcbe06 100644 --- a/jstests/noPassthrough/timeseries_direct_update.js +++ b/jstests/noPassthrough/timeseries_direct_update.js @@ -2,12 +2,9 @@ * Tests that direct updates to a timeseries bucket collection close the bucket, preventing further * inserts to land in that bucket or deletes and updates to be applied to it. */ -(function() { -'use strict'; - load("jstests/libs/fail_point_util.js"); load("jstests/libs/parallel_shell_helpers.js"); -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const conn = MongoRunner.runMongod(); @@ -96,15 +93,14 @@ assert(!buckets[2].control.hasOwnProperty("closed")); // Make sure that closed buckets are skipped by updates and deletes. if (FeatureFlagUtil.isPresentAndEnabled(testDB, "TimeseriesUpdatesSupport")) { - // TODO SERVER-73454 Enable this test. // The first two buckets containing documents 0 and 1 are closed, so we can only update the // third document in the last bucket. - // const result = assert.commandWorked(coll.updateMany({}, {$set: {newField: 123}})); - // assert.eq(result.matchedCount, 1, result); - // assert.eq(result.modifiedCount, 1, result); - // assert.docEq(docs.slice(2, 3), - // coll.find({newField: 123}, {newField: 0}).toArray(), - // `Expected exactly one document to be updated. ${coll.find().toArray()}`); + const result = assert.commandWorked(coll.updateMany({}, {$set: {newField: 123}})); + assert.eq(result.matchedCount, 1, result); + assert.eq(result.modifiedCount, 1, result); + assert.docEq(docs.slice(2, 3), + coll.find({newField: 123}, {newField: 0}).toArray(), + `Expected exactly one document to be updated. ${coll.find().toArray()}`); } if (FeatureFlagUtil.isPresentAndEnabled(testDB, "TimeseriesDeletesSupport")) { // The first two buckets containing documents 0 and 1 are closed, so we can only delete the @@ -124,5 +120,4 @@ if (FeatureFlagUtil.isPresentAndEnabled(testDB, "TimeseriesDeletesSupport")) { `Expected exactly one document to be deleted. ${coll.find().toArray()}`); } -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_dynamic_bucket_sizing.js b/jstests/noPassthrough/timeseries_dynamic_bucket_sizing.js index 2baf8294674e2..d87e1edf101ea 100644 --- a/jstests/noPassthrough/timeseries_dynamic_bucket_sizing.js +++ b/jstests/noPassthrough/timeseries_dynamic_bucket_sizing.js @@ -11,10 +11,8 @@ * requires_wiredtiger, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const minWiredTigerCacheSizeGB = 0.256; const cacheSize = minWiredTigerCacheSizeGB * 1000 * 1000 * 1000; // 256 MB @@ -36,6 +34,10 @@ replSet.startSet({setParameter: {timeseriesBucketMaxSize: defaultBucketMaxSize}} replSet.initiate(); const db = replSet.getPrimary().getDB(jsTestName()); + +const alwaysUseCompressedBuckets = + FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets"); + const coll = db.getCollection('t'); coll.drop(); assert.commandWorked(db.createCollection( @@ -45,7 +47,7 @@ if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db)) { replSet.stopSet(); jsTestLog( 'Skipping test because the TimeseriesScalabilityImprovements feature flag is disabled.'); - return; + quit(); } // Helper to log timeseries stats. @@ -115,7 +117,11 @@ while (bucketsClosedDueToSize == 0) { // buckets should be closed due to cache pressure. assert.eq(bucketsClosedDueToSize, cardinalityForCachePressure, formatStatsLog(timeseriesStats)); assert.eq(bucketsClosedDueToCachePressure, 0, formatStatsLog(timeseriesStats)); -assert.eq(compressedBuckets, cardinalityForCachePressure, formatStatsLog(timeseriesStats)); +if (alwaysUseCompressedBuckets) { + assert.eq(compressedBuckets, 0, formatStatsLog(timeseriesStats)); +} else { + assert.eq(compressedBuckets, cardinalityForCachePressure, formatStatsLog(timeseriesStats)); +} // If we pass the cardinality point to simulate cache pressure, we will begin to see buckets closed // due to 'CachePressure' and not 'DueToSize'. @@ -145,9 +151,12 @@ assert.eq(bucketsClosedDueToSize, cardinalityForCachePressure, formatStatsLog(ti assert.eq( bucketsClosedDueToCachePressure, cardinalityForCachePressure, formatStatsLog(timeseriesStats)); -// We expect the number of compressed buckets to double (independent to whether the buckets were -// closed due to size or cache pressure). -assert.eq(compressedBuckets, 2 * cardinalityForCachePressure, formatStatsLog(timeseriesStats)); +if (alwaysUseCompressedBuckets) { + assert.eq(compressedBuckets, 0, formatStatsLog(timeseriesStats)); +} else { + // We expect the number of compressed buckets to double (independent to whether the buckets were + // closed due to size or cache pressure). + assert.eq(compressedBuckets, 2 * cardinalityForCachePressure, formatStatsLog(timeseriesStats)); +} replSet.stopSet(); -})(); diff --git a/jstests/noPassthrough/timeseries_dynamic_bucket_sizing_large.js b/jstests/noPassthrough/timeseries_dynamic_bucket_sizing_large.js index f8e4d77fd2909..5c5dee6b6bb90 100644 --- a/jstests/noPassthrough/timeseries_dynamic_bucket_sizing_large.js +++ b/jstests/noPassthrough/timeseries_dynamic_bucket_sizing_large.js @@ -11,10 +11,8 @@ * requires_wiredtiger, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const defaultBucketMaxSize = 128000; // 125 KB const minWiredTigerCacheSizeGB = 0.256; // 256 MB @@ -32,6 +30,10 @@ replSet.startSet({setParameter: {timeseriesBucketMaxSize: defaultBucketMaxSize}} replSet.initiate(); const db = replSet.getPrimary().getDB(jsTestName()); + +const alwaysUseCompressedBuckets = + FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets"); + let coll = db.getCollection('t'); coll.drop(); @@ -39,7 +41,7 @@ if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db)) { replSet.stopSet(); jsTestLog( 'Skipping test because the TimeseriesScalabilityImprovements feature flag is disabled.'); - return; + quit(); } // Helper to log timeseries stats. @@ -110,7 +112,9 @@ const initializeBuckets = function(numOfBuckets = 1) { expectedBucketCount++; numBucketsClosedDueToSize++; - numCompressedBuckets++; + if (!alwaysUseCompressedBuckets) { + numCompressedBuckets++; + } timeseriesStats = assert.commandWorked(coll.stats()).timeseries; assert.eq(timeseriesStats.bucketCount, expectedBucketCount, formatStatsLog(timeseriesStats)); @@ -140,7 +144,9 @@ const initializeBuckets = function(numOfBuckets = 1) { // We create one bucket for 'meta2', fill it up and create another one for future insertions. expectedBucketCount += 2; numBucketsClosedDueToSize++; - numCompressedBuckets++; + if (!alwaysUseCompressedBuckets) { + numCompressedBuckets++; + } timeseriesStats = assert.commandWorked(coll.stats()).timeseries; assert.eq(timeseriesStats.bucketCount, expectedBucketCount, formatStatsLog(timeseriesStats)); @@ -201,8 +207,9 @@ const initializeBuckets = function(numOfBuckets = 1) { assert.eq(timeseriesStats.numBucketsClosedDueToSize, 0, formatStatsLog(timeseriesStats)); assert.eq( timeseriesStats.numBucketsClosedDueToCachePressure, 1, formatStatsLog(timeseriesStats)); - assert.eq(timeseriesStats.numCompressedBuckets, 1, formatStatsLog(timeseriesStats)); + assert.eq(timeseriesStats.numCompressedBuckets, + alwaysUseCompressedBuckets ? 0 : 1, + formatStatsLog(timeseriesStats)); })(); replSet.stopSet(); -})(); diff --git a/jstests/noPassthrough/timeseries_expire.js b/jstests/noPassthrough/timeseries_expire.js index 8f4ce8564e6e3..d9654460db4fe 100644 --- a/jstests/noPassthrough/timeseries_expire.js +++ b/jstests/noPassthrough/timeseries_expire.js @@ -7,11 +7,8 @@ * requires_getmore, * ] */ -(function() { -"use strict"; - load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers' -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'}); const testDB = conn.getDB(jsTestName()); @@ -52,5 +49,4 @@ TimeseriesTest.run((insert) => { assert.eq(0, bucketDocs.length, bucketDocs); }); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_expires_with_partial_index.js b/jstests/noPassthrough/timeseries_expires_with_partial_index.js index 9a0c6534ed365..54fb1a1b7458c 100644 --- a/jstests/noPassthrough/timeseries_expires_with_partial_index.js +++ b/jstests/noPassthrough/timeseries_expires_with_partial_index.js @@ -10,13 +10,10 @@ * requires_fcv_63, * ] */ -(function() { -"use strict"; - load('jstests/libs/fixture_helpers.js'); // For 'FixtureHelpers' load("jstests/libs/clustered_collections/clustered_collection_util.js"); -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/ttl_util.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {TTLUtil} from "jstests/libs/ttl_util.js"; const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'}); const testDB = conn.getDB(jsTestName()); @@ -117,5 +114,4 @@ TimeseriesTest.run((insert) => { checkInsertion(coll, collectionTTLExpiredDocLowMeta, true); }); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_extended_range_startup.js b/jstests/noPassthrough/timeseries_extended_range_startup.js index a77a02be71c16..9d2885791ccc9 100644 --- a/jstests/noPassthrough/timeseries_extended_range_startup.js +++ b/jstests/noPassthrough/timeseries_extended_range_startup.js @@ -57,4 +57,4 @@ assert.eq(1, primaryDB.extended.count()); assert.eq(1, getExtendedRangeCount(primary)); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/timeseries_idle_buckets.js b/jstests/noPassthrough/timeseries_idle_buckets.js index 099d22d591ee4..426efb9e91eb5 100644 --- a/jstests/noPassthrough/timeseries_idle_buckets.js +++ b/jstests/noPassthrough/timeseries_idle_buckets.js @@ -5,10 +5,8 @@ * requires_replication, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const rst = new ReplSetTest({nodes: 1}); rst.startSet({setParameter: {timeseriesIdleBucketExpiryMemoryUsageThreshold: 10485760}}); @@ -16,6 +14,8 @@ rst.initiate(); const db = rst.getPrimary().getDB(jsTestName()); +const alwaysUseCompressedBuckets = + FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets"); const isBucketReopeningEnabled = TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db); assert.commandWorked(db.dropDatabase()); @@ -70,7 +70,9 @@ for (let i = 0; i < numDocs; i++) { // Check buckets. if (isBucketReopeningEnabled) { - let bucketDocs = bucketsColl.find({"control.version": 2}).limit(1).toArray(); + let bucketDocs = bucketsColl.find({"control.version": alwaysUseCompressedBuckets ? 1 : 2}) + .limit(1) + .toArray(); if (bucketDocs.length > 0) { foundExpiredBucket = true; } @@ -80,7 +82,7 @@ for (let i = 0; i < numDocs; i++) { .toArray(); if (bucketDocs.length > 1) { // If bucket compression is enabled the expired bucket should have been compressed - assert.eq(2, + assert.eq(alwaysUseCompressedBuckets ? 1 : 2, bucketDocs[0].control.version, 'unexpected control.version in first bucket: ' + tojson(bucketDocs)); assert.eq(1, @@ -105,4 +107,3 @@ for (let i = 0; i < numDocs; i++) { assert(foundExpiredBucket, "Did not find an expired bucket"); rst.stopSet(); -})(); diff --git a/jstests/noPassthrough/timeseries_insert_after_cycle_primary.js b/jstests/noPassthrough/timeseries_insert_after_cycle_primary.js index 44c922a6b794f..9a1a67d8da104 100644 --- a/jstests/noPassthrough/timeseries_insert_after_cycle_primary.js +++ b/jstests/noPassthrough/timeseries_insert_after_cycle_primary.js @@ -6,10 +6,7 @@ * requires_replication, * ] */ -(function() { -'use strict'; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; const replTest = new ReplSetTest({nodes: 2}); replTest.startSet(); @@ -90,5 +87,4 @@ if (TimeseriesTest.timeseriesScalabilityImprovementsEnabled(testDB())) { checkColl(2, 2); } -replTest.stopSet(); -})(); +replTest.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_insert_ordered_false.js b/jstests/noPassthrough/timeseries_insert_ordered_false.js index 5434415c2a0e9..4fc57ece56d0b 100644 --- a/jstests/noPassthrough/timeseries_insert_ordered_false.js +++ b/jstests/noPassthrough/timeseries_insert_ordered_false.js @@ -5,10 +5,7 @@ * requires_sharding, * ] */ -(function() { -'use strict'; - -load('jstests/core/timeseries/libs/timeseries.js'); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; load('jstests/libs/fail_point_util.js'); const conn = MongoRunner.runMongod(); @@ -113,13 +110,6 @@ assert.commandWorked(mongos.adminCommand({enableSharding: jsTestName()})); // Run test on sharded cluster before sharding the collection. runTest(mongos, st.getPrimaryShard(jsTestName()), false); -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - // Run test on sharded cluster after sharding the collection. runTest(mongos, st.getPrimaryShard(jsTestName()), true); st.stop(); -})(); diff --git a/jstests/noPassthrough/timeseries_insert_ordered_true.js b/jstests/noPassthrough/timeseries_insert_ordered_true.js index cbb2f04a78c62..3e57dd42feecd 100644 --- a/jstests/noPassthrough/timeseries_insert_ordered_true.js +++ b/jstests/noPassthrough/timeseries_insert_ordered_true.js @@ -1,10 +1,7 @@ /** * Tests that time-series inserts respect {ordered: true}. */ -(function() { -'use strict'; - -load('jstests/core/timeseries/libs/timeseries.js'); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; load('jstests/libs/fail_point_util.js'); const conn = MongoRunner.runMongod(); @@ -67,4 +64,3 @@ assert.eq(bucketsColl.count(), ' buckets but found: ' + tojson(bucketsColl.find().toArray())); MongoRunner.stopMongod(conn); -})(); diff --git a/jstests/noPassthrough/timeseries_insert_rollback.js b/jstests/noPassthrough/timeseries_insert_rollback.js index 460bc95cfe857..8b90c8ecdf12c 100644 --- a/jstests/noPassthrough/timeseries_insert_rollback.js +++ b/jstests/noPassthrough/timeseries_insert_rollback.js @@ -53,7 +53,7 @@ rollbackTest.transitionToSteadyStateOperations(); assert.commandWorked(coll.insert(docs[2], {ordered: true})); assert.commandWorked(coll.insert(docs[3], {ordered: false})); -assert.docEq(docs.slice(2), coll.find().toArray()); +assert.sameMembers(docs.slice(2), coll.find().toArray()); const buckets = bucketsColl.find().toArray(); assert.eq(buckets.length, 2, 'Expected two bucket but found: ' + tojson(buckets)); diff --git a/jstests/noPassthrough/timeseries_internal_bounded_sort_spilling.js b/jstests/noPassthrough/timeseries_internal_bounded_sort_spilling.js index b7f002039fca4..75d3760e55331 100644 --- a/jstests/noPassthrough/timeseries_internal_bounded_sort_spilling.js +++ b/jstests/noPassthrough/timeseries_internal_bounded_sort_spilling.js @@ -7,19 +7,11 @@ * does_not_support_stepdowns, * ] */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); -load("jstests/core/timeseries/libs/timeseries.js"); +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; const kSmallMemoryLimit = 1024; -const conn = MongoRunner.runMongod({ - setParameter: { - internalQueryMaxBlockingSortMemoryUsageBytes: kSmallMemoryLimit, - featureFlagBucketUnpackWithSort: true - } -}); +const conn = MongoRunner.runMongod( + {setParameter: {internalQueryMaxBlockingSortMemoryUsageBytes: kSmallMemoryLimit}}); const dbName = jsTestName(); const testDB = conn.getDB(dbName); @@ -172,5 +164,4 @@ function assertSorted(result) { assert.eq(naive, opt); } -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_large_measurements_max_size.js b/jstests/noPassthrough/timeseries_large_measurements_max_size.js index 7c2b68f9cce2e..a23cab48234fa 100644 --- a/jstests/noPassthrough/timeseries_large_measurements_max_size.js +++ b/jstests/noPassthrough/timeseries_large_measurements_max_size.js @@ -7,10 +7,7 @@ * requires_fcv_61, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; const conn = MongoRunner.runMongod(); @@ -86,5 +83,4 @@ for (let i = 0; i < numMeasurements; i++) { assert.commandWorked(coll.insertMany(batch, {ordered: false})); checkBucketSize(); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_latency_stats.js b/jstests/noPassthrough/timeseries_latency_stats.js index 923dd642b7207..bd294da9d39f9 100644 --- a/jstests/noPassthrough/timeseries_latency_stats.js +++ b/jstests/noPassthrough/timeseries_latency_stats.js @@ -25,20 +25,20 @@ assert.contains(bucketsColl.getName(), testDB.getCollectionNames()); const getLatencyStats = () => { const stats = coll.aggregate([{$collStats: {latencyStats: {}}}]).next(); - assert(stats.hasOwnProperty("latencyStats")); - assert(stats.latencyStats.hasOwnProperty("writes")); + assert(stats.hasOwnProperty("latencyStats"), tojson(stats)); + assert(stats.latencyStats.hasOwnProperty("writes"), tojson(stats)); return stats.latencyStats.writes; }; const stats1 = getLatencyStats(); -assert.eq(stats1.ops, 0); -assert.eq(stats1.latency, 0); +assert.eq(stats1.ops, 0, tojson(stats1)); +assert.eq(stats1.latency, 0, tojson(stats1)); assert.commandWorked(coll.insert({[timeFieldName]: new Date(), x: 1})); const stats2 = getLatencyStats(); -assert.eq(stats2.ops, 1); -assert.gt(stats2.latency, stats1.latency); +assert.eq(stats2.ops, 1, tojson(stats2)); +assert.gt(stats2.latency, stats1.latency, tojson(stats2)); const reps = 10; for (let i = 0; i < reps; ++i) { @@ -46,8 +46,8 @@ for (let i = 0; i < reps; ++i) { } const stats3 = getLatencyStats(); -assert.eq(stats3.ops, 1 + reps); -assert.gt(stats3.latency, stats2.latency); +assert.eq(stats3.ops, 1 + reps, tojson(stats3)); +assert.gt(stats3.latency, stats2.latency, tojson(stats3)); MongoRunner.stopMongod(conn); })(); diff --git a/jstests/noPassthrough/timeseries_multi_update_spill_to_disk.js b/jstests/noPassthrough/timeseries_multi_update_spill_to_disk.js new file mode 100644 index 0000000000000..08424e70c346b --- /dev/null +++ b/jstests/noPassthrough/timeseries_multi_update_spill_to_disk.js @@ -0,0 +1,129 @@ +/** + * Tests running time-series multi-update commands that spill to disk. + * + * @tags: [ + * featureFlagTimeseriesUpdatesSupport + * ] + */ + +import {getExecutionStages} from "jstests/libs/analyze_plan.js"; + +const dateTime = ISODate("2021-07-12T16:00:00Z"); +const buckets = ["A", "B", "C", "D", "E", "F", "G"]; +const numDocsPerBucket = 4; + +const conn = MongoRunner.runMongod({setParameter: 'allowDiskUseByDefault=true'}); +const db = conn.getDB(jsTestName()); +const coll = db.getCollection(jsTestName()); + +function setUpCollectionForTest() { + coll.drop(); + assert.commandWorked( + db.createCollection(coll.getName(), {timeseries: {timeField: "time", metaField: "meta"}})); + + let docs = []; + for (const bucket of buckets) { + for (let i = 0; i < numDocsPerBucket; ++i) { + docs.push({"time": dateTime, "meta": bucket, str: i % 2 == 0 ? "even" : "odd"}); + } + } + assert.commandWorked(coll.insert(docs)); +} + +function verifySpillingStats( + explain, expectedSpills, expectedMemoryLimitBytes, expectedDiskLimitBytes) { + const execStages = getExecutionStages(explain); + assert.gt(execStages.length, 0, `No execution stages found: ${tojson(explain)}`); + assert.eq("TS_MODIFY", + execStages[0].stage, + `TS_MODIFY stage not found in executionStages: ${tojson(explain)}`); + assert.eq("SPOOL", + execStages[0].inputStage.stage, + `SPOOL stage not found in executionStages: ${tojson(explain)}`); + const spoolStage = execStages[0].inputStage; + assert.eq(spoolStage.memLimit, expectedMemoryLimitBytes, tojson(explain)); + assert.eq(spoolStage.diskLimit, expectedDiskLimitBytes, tojson(explain)); + assert.eq(spoolStage.spills, expectedSpills, tojson(explain)); + if (expectedSpills > 0) { + assert(spoolStage.usedDisk, tojson(explain)); + assert.gt(spoolStage.spilledDataStorageSize, 0, tojson(explain)); + assert.gte( + spoolStage.totalDataSizeSpooled, spoolStage.spilledDataStorageSize, tojson(explain)); + } else { + assert(!spoolStage.usedDisk, tojson(explain)); + assert.eq(spoolStage.spilledDataStorageSize, 0, tojson(explain)); + assert.gt(spoolStage.totalDataSizeSpooled, 0, tojson(explain)); + } +} + +function runTest({memoryLimitBytes, expectedSpills}) { + assert.commandWorked(db.adminCommand( + {setParameter: 1, internalQueryMaxSpoolMemoryUsageBytes: memoryLimitBytes})); + + const diskLimitBytes = 10 * memoryLimitBytes; + assert.commandWorked( + db.adminCommand({setParameter: 1, internalQueryMaxSpoolDiskUsageBytes: diskLimitBytes})); + assert.commandWorked(db.adminCommand({setParameter: 1, allowDiskUseByDefault: true})); + + setUpCollectionForTest(); + + const updateCommand = { + update: coll.getName(), + updates: [{q: {str: "even"}, u: {$set: {str: "not even"}}, multi: true}] + }; + + // First run an explain and verify the spilling stats. + const explain = + assert.commandWorked(db.runCommand({explain: updateCommand, verbosity: "executionStats"})); + verifySpillingStats(explain, expectedSpills, memoryLimitBytes, diskLimitBytes); + + // Now run the actual command and verify the results. + const res = assert.commandWorked(db.runCommand(updateCommand)); + // We'll update exactly half the records. + const expectedNUpdated = buckets.length * numDocsPerBucket / 2; + assert.eq( + expectedNUpdated, res.n, "Update did not report the correct number of records update"); + assert.eq(coll.find({str: "even"}).toArray().length, + 0, + "Collection has an unexpected number of records matching filter post-update"); +} + +(function noSpilling() { + runTest({memoryLimitBytes: 100 * 1024 * 1024, expectedSpills: 0}); +})(); + +(function spillEveryRecord() { + // Spool stage just spills 32-byte record ids in this instance. Set a limit just under that size + // so that we will need to spill on every record. + runTest({memoryLimitBytes: 30, expectedSpills: buckets.length}); +})(); + +(function spillEveryOtherRecord() { + // Spool stage just spills 32-byte record ids in this instance. Set a limit just over that size + // so that we will need to spill on every other record. + runTest({memoryLimitBytes: 50, expectedSpills: Math.floor(buckets.length / 2)}); +})(); + +(function maxDiskUseExceeded() { + assert.commandWorked( + db.adminCommand({setParameter: 1, internalQueryMaxSpoolDiskUsageBytes: 1})); + setUpCollectionForTest(); + assert.commandFailedWithCode(db.runCommand({ + update: coll.getName(), + updates: [{q: {str: "even"}, u: {$set: {str: "not even"}}, multi: true}] + }), + 7443700); +})(); + +(function maxMemoryUseExceeded_spillingDisabled() { + assert.commandWorked(db.adminCommand({setParameter: 1, allowDiskUseByDefault: false})); + + setUpCollectionForTest(); + assert.commandFailedWithCode(db.runCommand({ + update: coll.getName(), + updates: [{q: {str: "even"}, u: {$set: {str: "not even"}}, multi: true}] + }), + ErrorCodes.QueryExceededMemoryLimitNoDiskUseAllowed); +})(); + +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_out_concurrent_sharding.js b/jstests/noPassthrough/timeseries_out_concurrent_sharding.js new file mode 100644 index 0000000000000..cdab4605b6789 --- /dev/null +++ b/jstests/noPassthrough/timeseries_out_concurrent_sharding.js @@ -0,0 +1,178 @@ +/* + * Ensures that when $out is doing a rename collection operation and a concurrent 'shardCollection' + * command is invoked, the operations are serialized. This is a targeted test to reproduce the + * scenario described in SERVER-76626. We block the rename operation behind a DDL lock and validate + * that a concurrent 'shardCollection' command cannot make progress. + * + * @tags: [ + * # We need a timeseries collection. + * requires_timeseries, + * requires_fcv_71, + * featureFlagAggOutTimeseries + * ] + */ +(function() { +"use strict"; + +load("jstests/libs/parallel_shell_helpers.js"); +load("jstests/libs/fail_point_util.js"); // for configureFailPoint. + +const dbName = "test"; +const timeFieldName = 'time'; +const metaFieldName = 'tag'; +const numDocs = 40; + +/* Create new sharded collection on testDB */ +let _collCounter = 0; +function setUpCollection(testDB) { + const collName = 'coll_' + _collCounter++; + + // Create a time-series collection to be the source for $out. + testDB.createCollection(collName, + {timeseries: {timeField: timeFieldName, metaField: metaFieldName}}); + const docs = []; + for (let i = 0; i < numDocs; ++i) { + docs.push({ + [timeFieldName]: ISODate(), + [metaFieldName]: (1 * numDocs) + i, + }); + } + assert.commandWorked(testDB[collName].insertMany(docs)); + return testDB[collName]; +} + +function runOut(dbName, sourceCollName, targetCollName, expectCommandWorked) { + const testDB = db.getSiblingDB(dbName); + const cmdRes = testDB.runCommand({ + aggregate: sourceCollName, + pipeline: [{ + $out: { + db: testDB.getName(), + coll: targetCollName, + timeseries: {timeField: "time", metaField: "tag"} + } + }], + cursor: {} + }); + if (expectCommandWorked) { + assert.commandWorked(cmdRes); + } else { + assert.commandFailed(cmdRes); + } +} + +function checkMetadata(testDB) { + const checkOptions = {'checkIndexes': 1}; + let inconsistencies = testDB.checkMetadataConsistency(checkOptions).toArray(); + assert.eq(0, inconsistencies, inconsistencies); +} + +function runOutAndShardCollectionConcurrently_shardCollectionMustFail(st, testDB, primaryShard) { + // The target collection should exist to produce the metadata inconsistency scenario. + const sourceColl = setUpCollection(testDB); + const targetColl = setUpCollection(testDB); + + // Set a failpoint in the internalRenameCollection command after the sharding check. + const fp = configureFailPoint(primaryShard, 'blockBeforeInternalRenameAndAfterTakingDDLLocks'); + + // Run an $out aggregation pipeline in a parallel shell. + let outShell = startParallelShell(funWithArgs(runOut, + testDB.getName(), + sourceColl.getName(), + targetColl.getName(), + true /*expectCommandWorked*/), + st.s.port); + + // Wait for the aggregation pipeline to hit the failpoint. + fp.wait(); + + // Validate the temporary collection exists, meaning we are in the middle of the $out stage. + const collNames = testDB.getCollectionNames(); + assert.eq(collNames.filter(col => col.includes('tmp.agg_out')).length, 1, collNames); + + // Assert sharding the target collection fails, since the rename command has a lock on the + // view namespace. + jsTestLog("attempting to shard the target collection."); + assert.commandFailedWithCode( + testDB.adminCommand({shardCollection: targetColl.getFullName(), key: {[metaFieldName]: 1}}), + ErrorCodes.LockBusy); + + // Turn off the failpoint and resume the $out aggregation pipeline. + jsTestLog("turning the failpoint off."); + fp.off(); + outShell(); + // Assert the metadata is consistent. + checkMetadata(testDB); + + // Assert sharding the target collection succeeds, since there is no lock on the view + // namespace. + assert.commandWorked(testDB.adminCommand( + {shardCollection: targetColl.getFullName(), key: {[metaFieldName]: 1}})); + + // Assert the metadata is consistent. + checkMetadata(testDB); + + sourceColl.drop(); + targetColl.drop(); +} + +function runOutAndShardCollectionConcurrently_OutMustFail(st, testDB, primaryShard) { + // The target collection should exist to produce the metadata inconsistency scenario. + const sourceColl = setUpCollection(testDB); + const targetColl = setUpCollection(testDB); + + // Set a failpoint in the internalRenameCollection command after the sharding check. + const fp = configureFailPoint(primaryShard, 'blockBeforeInternalRenameAndBeforeTakingDDLLocks'); + + // Run an $out aggregation pipeline in a parallel shell. + let outShell = startParallelShell(funWithArgs(runOut, + testDB.getName(), + sourceColl.getName(), + targetColl.getName(), + false /*expectCommandWorked*/), + st.s.port); + + // Wait for the aggregation pipeline to hit the failpoint. + fp.wait(); + + // Validate the temporary collection exists, meaning we are in the middle of the $out stage. + const collNames = testDB.getCollectionNames(); + assert.eq(collNames.filter(col => col.includes('tmp.agg_out')).length, 1, collNames); + + // Assert sharding the target collection fails, since the rename command has a lock on the + // view namespace. + jsTestLog("attempting to shard the target collection."); + assert.commandWorked(testDB.adminCommand( + {shardCollection: targetColl.getFullName(), key: {[metaFieldName]: 1}})); + + // Turn off the failpoint and resume the $out aggregation pipeline. + jsTestLog("turning the failpoint off."); + fp.off(); + outShell(); + + // Assert the metadata is consistent. + checkMetadata(testDB); + + sourceColl.drop(); + targetColl.drop(); +} + +const st = new ShardingTest({shards: 2}); +const testDB = st.s.getDB(dbName); +const primaryShard = st.shard0; + +// Reduce DDL lock timeout to half a second to speedup testing command that are expected to fail +// with LockBusy error +const fp = configureFailPoint(primaryShard, "overrideDDLLockTimeout", {'timeoutMillisecs': 500}); + +assert.commandWorked( + st.s.adminCommand({enableSharding: dbName, primaryShard: primaryShard.shardName})); + +// Running tests +runOutAndShardCollectionConcurrently_shardCollectionMustFail(st, testDB, primaryShard); +runOutAndShardCollectionConcurrently_OutMustFail(st, testDB, primaryShard); + +fp.off(); + +st.stop(); +}()); diff --git a/jstests/noPassthrough/timeseries_retry_delete_and_update.js b/jstests/noPassthrough/timeseries_retry_delete_and_update.js index 276dcb0449f9a..1127dedbb203c 100644 --- a/jstests/noPassthrough/timeseries_retry_delete_and_update.js +++ b/jstests/noPassthrough/timeseries_retry_delete_and_update.js @@ -5,7 +5,7 @@ * @tags: [ * requires_replication, * requires_timeseries, - * requires_fcv_70, + * featureFlagTimeseriesUpdatesSupport, * ] */ (function() { @@ -42,14 +42,15 @@ let retriedStatementsCount = 0; * returns the command object given the collection to run it on, and a validate function that * validates the result after the command has been applied to the collection. */ -const runTest = function(initialDocs, cmdBuilderFn, validateFn) { +const runTest = function( + initialDocs, cmdBuilderFn, validateFn, expectError = false, statementRetried = 1) { const session = primary.startSession({retryWrites: true}); const testDB = session.getDatabase(jsTestName()); - const coll = testDB.getCollection('timeseres_retry_delete_and_update_' + collCount++); + const coll = testDB.getCollection('timeseries_retry_delete_and_update_' + collCount++); coll.drop(); - assert.commandWorked( - testDB.createCollection(coll.getName(), {timeseries: {timeField: timeFieldName}})); + assert.commandWorked(testDB.createCollection( + coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); assert.commandWorked(testDB.runCommand({ insert: coll.getName(), @@ -59,13 +60,18 @@ const runTest = function(initialDocs, cmdBuilderFn, validateFn) { })); // For retryable writes, the server uses 'txnNumber' as the key to look up previously executed - // operations in the sesssion. + // operations in the session. let cmdObj = cmdBuilderFn(coll); cmdObj["lsid"] = session.getSessionId(); cmdObj["txnNumber"] = NumberLong(1); - assert.commandWorked(testDB.runCommand(cmdObj), 'Failed to write bucket on first write'); - assert.commandWorked(testDB.runCommand(cmdObj), 'Failed to write bucket on retry write'); + if (expectError) { + assert.commandFailedWithCode(testDB.runCommand(cmdObj), ErrorCodes.InvalidOptions); + assert.commandFailedWithCode(testDB.runCommand(cmdObj), ErrorCodes.InvalidOptions); + } else { + assert.commandWorked(testDB.runCommand(cmdObj), 'Failed to write bucket on first write'); + assert.commandWorked(testDB.runCommand(cmdObj), 'Failed to write bucket on retry write'); + } validateFn(coll); @@ -73,7 +79,8 @@ const runTest = function(initialDocs, cmdBuilderFn, validateFn) { assert.eq(++retriedCommandsCount, transactionsServerStatus.retriedCommandsCount, 'Incorrect statistic in db.serverStatus(): ' + tojson(transactionsServerStatus)); - assert.eq(++retriedStatementsCount, + retriedStatementsCount += statementRetried; + assert.eq(retriedStatementsCount, transactionsServerStatus.retriedStatementsCount, 'Incorrect statistic in db.serverStatus(): ' + tojson(transactionsServerStatus)); @@ -106,7 +113,19 @@ function deleteValidateFn(coll) { })(); function updateCmdBuilderFn(coll) { - return {update: coll.getName(), updates: [{q: {}, u: {$inc: {updated: 1}}, multi: false}]}; + return { + update: coll.getName(), + updates: [ + {q: {}, u: {$inc: {updated: 1}}, multi: false}, + {q: {}, u: {$inc: {updated: 1}}, multi: true}, + {q: {}, u: {$inc: {anotherUpdated: 1}}, multi: false}, + ], + }; +} +function updateCmdUnorderedBuilderFn(coll) { + let updateCmd = updateCmdBuilderFn(coll); + updateCmd["ordered"] = false; + return updateCmd; } function updateValidateFn(coll) { assert.eq(coll.countDocuments({updated: {$exists: true}}), @@ -114,14 +133,61 @@ function updateValidateFn(coll) { "Expected exactly one document to be updated."); assert.eq(coll.countDocuments({updated: 1}), 1, "Expected document to be updated only once."); } +function updateUnorderedValidateFn(coll) { + updateValidateFn(coll); + assert.eq(coll.countDocuments({anotherUpdated: {$exists: true}}), + 1, + "Expected exactly one document to be updated."); + assert.eq( + coll.countDocuments({anotherUpdated: 1}), 1, "Expected document to be updated only once."); +} -// TODO SERVER-73726 Enable update tests. -// (function testPartialBucketUpdate() { -// runTest(allDocumentsSameBucket, updateCmdBuilderFn, updateValidateFn); -// })(); -// (function testFullBucketUpdate() { -// runTest(allDocumentsDifferentBuckets, updateCmdBuilderFn, updateValidateFn); -// })(); +(function testPartialBucketUpdate() { + runTest(allDocumentsSameBucket, + updateCmdBuilderFn, + updateValidateFn, + /*expectError=*/ true); +})(); +(function testFullBucketUpdate() { + runTest(allDocumentsDifferentBuckets, + updateCmdBuilderFn, + updateValidateFn, + /*expectError=*/ true); +})(); +(function testPartialBucketUpdateUnordered() { + runTest(allDocumentsSameBucket, + updateCmdUnorderedBuilderFn, + updateUnorderedValidateFn, + /*expectError=*/ true, + /*statementRetried=*/ 2); +})(); +(function testFullBucketUpdateUnordered() { + runTest(allDocumentsDifferentBuckets, + updateCmdUnorderedBuilderFn, + updateUnorderedValidateFn, + /*expectError=*/ true, + /*statementRetried=*/ 2); +})(); + +function upsertCmdBuilderFn(coll) { + return { + update: coll.getName(), + updates: [{ + q: {[timeFieldName]: dateTime, [metaFieldName]: "B"}, + u: {$inc: {updated: 1}}, + multi: false, + upsert: true, + }], + }; +} +function upsertValidateFn(coll) { + assert.eq(coll.countDocuments({[metaFieldName]: "B", updated: 1}), + 1, + "Expected exactly one document to be upserted once."); +} +(function testUpsert() { + runTest(allDocumentsSameBucket, upsertCmdBuilderFn, upsertValidateFn); +})(); rst.stopSet(); })(); diff --git a/jstests/noPassthrough/timeseries_sample.js b/jstests/noPassthrough/timeseries_sample.js index 70bf25398ba3b..3a5f69824f2ee 100644 --- a/jstests/noPassthrough/timeseries_sample.js +++ b/jstests/noPassthrough/timeseries_sample.js @@ -2,10 +2,7 @@ * Tests inserting sample data into the time-series buckets collection. This test is for the * exercising the optimized $sample implementation for $_internalUnpackBucket. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {aggPlanHasStage, getAggPlanStage, getPlanStage} from "jstests/libs/analyze_plan.js"; let conn = MongoRunner.runMongod({setParameter: {timeseriesBucketMaxCount: 100}}); @@ -197,5 +194,4 @@ assert.gte(sampleFromBucketStage.dupsTested, 150, sampleFromBucketStage); const multiIteratorStage = getPlanStage(sampleFromBucketStage, "MULTI_ITERATOR"); assert.neq(multiIteratorStage, null, explainRes); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_serverStatus.js b/jstests/noPassthrough/timeseries_serverStatus.js index 58b3fe74f7da7..f3ac9b61a73c1 100644 --- a/jstests/noPassthrough/timeseries_serverStatus.js +++ b/jstests/noPassthrough/timeseries_serverStatus.js @@ -1,12 +1,9 @@ /** * Tests that serverStatus contains a bucketCatalog section. */ -(function() { -"use strict"; - load("jstests/libs/fail_point_util.js"); load("jstests/libs/parallel_shell_helpers.js"); -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const conn = MongoRunner.runMongod(); @@ -112,5 +109,4 @@ if (!FeatureFlagUtil.isEnabled(conn, "TimeseriesScalabilityImprovements")) { checkNoServerStatus(); } -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_server_parameters.js b/jstests/noPassthrough/timeseries_server_parameters.js index f74e1923ef381..d1be2c8d99c41 100644 --- a/jstests/noPassthrough/timeseries_server_parameters.js +++ b/jstests/noPassthrough/timeseries_server_parameters.js @@ -6,10 +6,6 @@ * ] */ -(function() { -'use strict'; - -load("jstests/core/timeseries/libs/timeseries.js"); load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // Valid parameter values are in the range [0, infinity). @@ -32,5 +28,4 @@ testNumericServerParameter('timeseriesBucketMaxSize', true /*hasLowerBound*/, 0 /*lowerOutOfBounds*/, false /*hasUpperBound*/, - "unused" /*upperOutOfBounds*/); -})(); + "unused" /*upperOutOfBounds*/); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_server_status_measurements.js b/jstests/noPassthrough/timeseries_server_status_measurements.js index e5177cdadb4c8..67a765b6f2781 100644 --- a/jstests/noPassthrough/timeseries_server_status_measurements.js +++ b/jstests/noPassthrough/timeseries_server_status_measurements.js @@ -7,10 +7,7 @@ * requires_fcv_61, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; const conn = MongoRunner.runMongod(); @@ -75,5 +72,4 @@ for (let i = 0; i < numMeasurements; i++) { assert.commandWorked(coll.insertMany(batch, {ordered: false})); checkBucketSize(); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_sort.js b/jstests/noPassthrough/timeseries_sort.js index b8cdd15df0971..3df1f0685abd9 100644 --- a/jstests/noPassthrough/timeseries_sort.js +++ b/jstests/noPassthrough/timeseries_sort.js @@ -6,11 +6,8 @@ * requires_sharding, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/analyze_plan.js"); // For getAggPlanStage +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; Random.setRandomSeed(); @@ -26,12 +23,6 @@ const st = new ShardingTest({shards: 2}); const sDB = st.s.getDB(dbName); assert.commandWorked(sDB.adminCommand({enableSharding: dbName})); -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - st.ensurePrimaryShard(dbName, st.shard0.shardName); // Shard time-series collection. @@ -130,4 +121,3 @@ assertAccessPath([forwardSort], {$natural: -1}, "COLLSCAN", 1); assertAccessPath([backwardSort], {$natural: 1}, "COLLSCAN", -1); st.stop(); -})(); diff --git a/jstests/noPassthrough/timeseries_ttl.js b/jstests/noPassthrough/timeseries_ttl.js index af3101ec6c0a8..5232f5f735b04 100644 --- a/jstests/noPassthrough/timeseries_ttl.js +++ b/jstests/noPassthrough/timeseries_ttl.js @@ -8,10 +8,8 @@ * requires_getmore, * ] */ -(function() { -"use strict"; load("jstests/libs/clustered_collections/clustered_collection_util.js"); -load("jstests/libs/ttl_util.js"); +import {TTLUtil} from "jstests/libs/ttl_util.js"; // Run TTL monitor constantly to speed up this test. const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'}); @@ -155,5 +153,4 @@ testCase((coll, bucketsColl) => { assert.eq(0, bucketsColl.find().itcount()); })(); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_ttl_index_downgrade.js b/jstests/noPassthrough/timeseries_ttl_index_downgrade.js deleted file mode 100644 index 3f90074172775..0000000000000 --- a/jstests/noPassthrough/timeseries_ttl_index_downgrade.js +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Tests that the cluster cannot be downgraded when there are secondary TTL indexes with partial - * filters on time-series present. - */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/feature_flag_util.js"); // For isEnabled. - -const conn = MongoRunner.runMongod(); -const db = conn.getDB("test"); - -if (!FeatureFlagUtil.isEnabled(db, "TimeseriesScalabilityImprovements")) { - jsTestLog( - "Skipped test as the featureFlagTimeseriesScalabilityImprovements feature flag is not enabled."); - MongoRunner.stopMongod(conn); - return; -} - -const collName = "timeseries_ttl_index_downgrade"; -const coll = db.getCollection(collName); -const bucketsColl = db.getCollection("system.buckets." + collName); - -const timeFieldName = "tm"; -const metaFieldName = "mm"; -const timeSpec = { - [timeFieldName]: 1 -}; - -assert.commandWorked(db.createCollection( - coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); - -function checkIndexForDowngrade(isCompatible) { - if (!isCompatible) { - assert.commandFailedWithCode(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}), - ErrorCodes.CannotDowngrade); - assert.commandWorked(coll.dropIndexes("*")); - } - - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); - assert.commandWorked(db.adminCommand({setFeatureCompatibilityVersion: latestFCV})); -} - -// Verify that downgrading succeeds on a time-series collection without any indexes. -checkIndexForDowngrade(true); - -// Verify that downgrading succeeds on a time-series collection with a partial index. -const options = { - name: "partialIndexOnMeta", - partialFilterExpression: {[metaFieldName]: {$gt: 5}} -}; -assert.commandWorked(coll.createIndex(timeSpec, options)); -checkIndexForDowngrade(true); - -// Verify that downgrading succeeds on a time-series collection created with expireAfterSeconds -// value. -coll.drop(); -assert.commandWorked(db.createCollection( - coll.getName(), - {timeseries: {timeField: timeFieldName, metaField: metaFieldName}, expireAfterSeconds: 3600})); -checkIndexForDowngrade(true); - -// Verify that downgrading fails on a time-series collection with a partial, TTL index. -assert.commandWorked(coll.createIndex(timeSpec, Object.merge(options, {expireAfterSeconds: 400}))); -checkIndexForDowngrade(false); - -MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/timeseries_update_delete_transaction.js b/jstests/noPassthrough/timeseries_update_delete_transaction.js index 3bf3a1dd3c492..7b3c50d2d459a 100644 --- a/jstests/noPassthrough/timeseries_update_delete_transaction.js +++ b/jstests/noPassthrough/timeseries_update_delete_transaction.js @@ -5,11 +5,6 @@ * requires_replication, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); - const rst = new ReplSetTest({nodes: 1}); rst.startSet(); rst.initiate(); @@ -34,9 +29,11 @@ assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes. session.startTransaction(); // Time-series update in a multi-document transaction should fail. -assert.commandFailedWithCode(sessionColl.update({[metaFieldName]: "a"}, {"$set": {"b": "a"}}), +assert.commandFailedWithCode(session.getDatabase(jsTestName()).runCommand({ + update: collectionName, + updates: [{q: {[metaFieldName]: "a"}, u: {"$set": {"b": "a"}}, multi: true}], +}), ErrorCodes.OperationNotSupportedInTransaction); assert.commandFailedWithCode(session.commitTransaction_forTesting(), ErrorCodes.NoSuchTransaction); session.endSession(); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_update_one_transaction.js b/jstests/noPassthrough/timeseries_update_one_transaction.js new file mode 100644 index 0000000000000..5c10cf9be7953 --- /dev/null +++ b/jstests/noPassthrough/timeseries_update_one_transaction.js @@ -0,0 +1,273 @@ +/** + * Tests the updateOne command on time-series collections in multi-document transactions. + * + * @tags: [ + * requires_replication, + * requires_timeseries, + * featureFlagTimeseriesUpdatesSupport, + * ] + */ +load("jstests/libs/fail_point_util.js"); +load('jstests/libs/parallel_shell_helpers.js'); + +const rst = new ReplSetTest({nodes: 1}); +rst.startSet(); +rst.initiate(); + +const metaFieldName = "mm"; +const timeFieldName = "tt"; +const collectionNamePrefix = "test_coll_"; +let collectionCounter = 0; + +const testDB = rst.getPrimary().getDB(jsTestName()); +let testColl = testDB[collectionNamePrefix + collectionCounter]; +assert.commandWorked(testDB.dropDatabase()); + +const docsPerMetaField = 3; +const initializeData = function() { + testColl = testDB[collectionNamePrefix + ++collectionCounter]; + assert.commandWorked(testDB.createCollection( + testColl.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); + + let docs = []; + for (let i = 0; i < docsPerMetaField; ++i) { + docs.push({_id: i, [timeFieldName]: new Date(), [metaFieldName]: 0}); + docs.push({_id: i, [timeFieldName]: new Date(), [metaFieldName]: 1}); + docs.push({_id: i, [timeFieldName]: new Date(), [metaFieldName]: 2}); + } + + // Insert test documents. + assert.commandWorked(testColl.insertMany(docs)); +}; + +// 1. Update one document from the collection in a transaction. +(function basicUpdateOne() { + jsTestLog("Running 'basicUpdateOne'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + + session.startTransaction(); + assert.commandWorked(sessionColl.updateOne({_id: 0, [metaFieldName]: 0}, {$inc: {updated: 1}})); + assert.commandWorked(session.commitTransaction_forTesting()); + session.endSession(); + + // Expect one updated document with updated: 1. + assert.eq(testColl.find({updated: 1}).toArray().length, 1); +})(); + +// 2. updateOne should not have visible changes when the transaction is aborted. +(function updateOneTransactionAborts() { + jsTestLog("Running 'updateOneTransactionAborts'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + + session.startTransaction(); + assert.commandWorked(sessionColl.updateOne({_id: 0, [metaFieldName]: 1}, {$inc: {updated: 1}})); + assert.commandWorked(session.abortTransaction_forTesting()); + session.endSession(); + + // The transaction was aborted so no documents should have been updated. + assert.eq(testColl.find({updated: 1}).toArray().length, 0); +})(); + +// 3. Run a few updateOnes in a single transaction. +(function multipleUpdateOne() { + jsTestLog("Running 'multipleUpdateOne'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + session.startTransaction(); + + for (let i = 0; i < docsPerMetaField; ++i) { + assert.commandWorked( + sessionColl.updateOne({_id: i, [metaFieldName]: 0}, {$inc: {updated: 1}})); + } + + assert.commandWorked(session.commitTransaction_forTesting()); + session.endSession(); + + // Expect all documents with {meta: 0} to be updated. + assert.eq(testColl.find({[metaFieldName]: 0, updated: 1}).toArray().length, docsPerMetaField); +})(); + +// 4. Tests performing updateOnes in and out of a transaction on abort. +(function mixedUpdateOneAbortTxn() { + jsTestLog("Running 'mixedUpdateOneAbortTxn'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + session.startTransaction(); + + // Update all documents for meta values 0, 1. + for (let i = 0; i < docsPerMetaField; ++i) { + assert.commandWorked( + sessionColl.updateOne({_id: i, [metaFieldName]: 0}, {$inc: {updated: 1}})); + assert.commandWorked( + sessionColl.updateOne({_id: i, [metaFieldName]: 1}, {$inc: {updated: 1}})); + } + + // Outside of the session and transaction, perform an updateOne. + assert.commandWorked(testColl.updateOne({_id: 0, [metaFieldName]: 2}, {$inc: {updated: 1}})); + + assert.commandWorked(session.abortTransaction_forTesting()); + session.endSession(); + + // The aborted transaction should not have updated any documents. + assert.eq(testColl.find({[metaFieldName]: 0, updated: 1}).toArray().length, 0); + assert.eq(testColl.find({[metaFieldName]: 1, updated: 1}).toArray().length, 0); + + // The update outside of the transaction should have succeeded. + assert.eq(testColl.find({[metaFieldName]: 2, updated: 1}).toArray().length, 1); +})(); + +// 5. Tests performing updateOnes in and out of a transaction on commit. +(function mixedUpdateOneCommitTxn() { + jsTestLog("Running 'mixedUpdateOneCommitTxn'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + session.startTransaction(); + + for (let i = 0; i < docsPerMetaField; ++i) { + // Within the transaction. + assert.commandWorked( + sessionColl.updateOne({_id: i, [metaFieldName]: 0}, {$inc: {updated: 1}})); + assert.commandWorked( + sessionColl.updateOne({_id: i, [metaFieldName]: 1}, {$inc: {updated: 1}})); + + // Outside of the session and transaction, perform updateOne. + assert.commandWorked( + testColl.updateOne({_id: i, [metaFieldName]: 2}, {$inc: {updated: 1}})); + } + + assert.commandWorked(session.commitTransaction_forTesting()); + session.endSession(); + + // Expect all documents to have been updated. + assert.eq(testColl.find({updated: 1}).toArray().length, 9); +})(); + +// 6. Tests a race to update the same document in and out of a transaction. +(function raceToUpdateOne() { + jsTestLog("Running 'raceToUpdateOne'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + session.startTransaction(); + + // Within the transaction, perform an updateOne. + const updateFilter = {_id: 1, [metaFieldName]: 0}; + assert.commandWorked(sessionColl.updateOne(updateFilter, {$set: {_id: 10}})); + + // Note: there is a change the parallel shell runs after the transaction is committed and that + // is fine as both interleavings should succeed. + const awaitTestUpdate = startParallelShell( + funWithArgs(function(dbName, collName, filter) { + const testDB = db.getSiblingDB(dbName); + const coll = testDB.getCollection(collName); + + // Outside of the session and transaction, perform updateOne. + assert.commandWorked(coll.updateOne(filter, {$set: {_id: 10}})); + }, testDB.getName(), testColl.getName(), updateFilter), testDB.getMongo().port); + + assert.commandWorked(session.commitTransaction_forTesting()); + session.endSession(); + + // Allow non-transactional updateOne to finish. + awaitTestUpdate(); + assert.eq(testColl.find({_id: 10}).toArray().length, 1); +})(); + +// 7. Tests a transactional updateOne on a document which gets visible after the transaction +// starts. +(function updateOneAndInsertBeforeCommit() { + jsTestLog("Running 'updateOneAndInsertBeforeCommit'"); + initializeData(); + + const session = testDB.getMongo().startSession(); + const sessionColl = session.getDatabase(jsTestName()).getCollection(testColl.getName()); + + session.startTransaction(); + // Ensure the document does not exist within the snapshot of the newly started transaction. + assert.eq(sessionColl.find({[metaFieldName]: 101}).toArray().length, 0); + + // Outside of the session and transaction, update document. + assert.commandWorked( + testColl.updateOne({[metaFieldName]: 0, _id: 0}, {$set: {[metaFieldName]: 101}})); + + // Double check the document is still not visible from within the transaction. + assert.eq(sessionColl.find({[metaFieldName]: 101}).toArray().length, 0); + + // Within the transaction, perform updateOne. + assert.commandWorked(sessionColl.updateOne({[metaFieldName]: 101}, {$inc: {updated: 1}})); + assert.eq(sessionColl.find({updated: 1}).toArray().length, 0); + + assert.commandWorked(session.commitTransaction_forTesting()); + session.endSession(); + + // The newly updated document should not be updated even though the transaction commits after + // the write performed outside. + assert.eq(testColl.find({updated: 1}).toArray().length, 0); +})(); + +// 8. Tests two side-by-side transactional updateOnes on the same document. +(function updateOneInTwoTransactions() { + jsTestLog("Running 'updateOneInTwoTransactions'"); + initializeData(); + + const sessionA = testDB.getMongo().startSession(); + const sessionB = testDB.getMongo().startSession(); + const collA = sessionA.getDatabase(jsTestName()).getCollection(testColl.getName()); + const collB = sessionB.getDatabase(jsTestName()).getCollection(testColl.getName()); + + const docToUpdate = {_id: 1, [metaFieldName]: 1}; + + // Start transactions on different sessions. + sessionA.startTransaction({readConcern: {level: "snapshot"}}); + sessionB.startTransaction({readConcern: {level: "snapshot"}}); + + // Ensure the document exists in the snapshot of both transactions. + assert.eq(collA.find(docToUpdate).toArray().length, 1); + assert.eq(collB.find(docToUpdate).toArray().length, 1); + + // Perform updateOne on transaction A. + assert.commandWorked(collA.updateOne(docToUpdate, {$inc: {updated: 1}})); + + const updateCommand = { + update: collB.getName(), + updates: [{ + q: docToUpdate, + u: {$inc: {updated: 1}}, + multi: false, + }] + }; + + // We expect the updateOne on transaction B to fail, causing the transaction to abort. + // Sidenote: avoiding the updateOne method from 'crud_api.js' because it throws. + assert.commandFailedWithCode(collB.runCommand(updateCommand), ErrorCodes.WriteConflict); + assert.commandFailedWithCode(sessionB.abortTransaction_forTesting(), + ErrorCodes.NoSuchTransaction); + sessionB.endSession(); + + // Ensure the document is updated in the snapshot of transaction A. + assert.eq(collA.find({updated: 1}).toArray().length, 1); + // Since transaction A has not committed yet, the document should still not be updated outside + // of the transaction. + assert.eq(testColl.find({updated: 1}).toArray().length, 0); + + // Ensure the document has been successfully updated after transaction A commits. + assert.commandWorked(sessionA.commitTransaction_forTesting()); + assert.eq(testColl.find({updated: 1}).toArray().length, 1); + + sessionA.endSession(); +})(); + +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/timeseries_update_oplog.js b/jstests/noPassthrough/timeseries_update_oplog.js new file mode 100644 index 0000000000000..9b6e8c8c37f67 --- /dev/null +++ b/jstests/noPassthrough/timeseries_update_oplog.js @@ -0,0 +1,164 @@ +/** + * Tests time-series updates are replicated atomically as applyOps oplog entries that group the + * writes together. + * + * @tags: [ + * requires_replication, + * requires_timeseries, + * featureFlagTimeseriesUpdatesSupport, + * ] + */ +(function() { +'use strict'; + +const rst = new ReplSetTest({nodes: 1}); +rst.startSet(); +rst.initiate(); + +const primary = rst.getPrimary(); +const timeFieldName = 'time'; +const metaFieldName = 'tag'; +const dateTime = ISODate("2023-06-29T16:00:00Z"); +const testDB = primary.getDB("test"); +let collCount = 0; + +const initialMeasurement = [ + {_id: 0, [timeFieldName]: dateTime, [metaFieldName]: 0}, + {_id: 1, [timeFieldName]: dateTime, [metaFieldName]: 0, a: 1}, + {_id: 2, [timeFieldName]: dateTime, [metaFieldName]: 0, a: 1}, + {_id: 3, [timeFieldName]: dateTime, [metaFieldName]: 1}, +]; + +const runTest = function({cmdBuilderFn, validateFn, retryableWrite = false}) { + const coll = testDB.getCollection('timeseries_update_oplog' + collCount++); + coll.drop(); + assert.commandWorked(testDB.createCollection( + coll.getName(), {timeseries: {timeField: timeFieldName, metaField: metaFieldName}})); + assert.commandWorked(coll.insertMany(initialMeasurement)); + + let cmdObj = cmdBuilderFn(coll); + if (retryableWrite) { + const session = primary.startSession({retryWrites: true}); + cmdObj["lsid"] = session.getSessionId(); + cmdObj["txnNumber"] = NumberLong(0); + assert.commandWorked(session.getDatabase("test").runCommand(cmdObj)); + } else { + assert.commandWorked(testDB.runCommand(cmdObj)); + } + + validateFn(testDB, coll, retryableWrite); +}; + +function partialBucketMultiUpdateBuilderFn(coll) { + return {update: coll.getName(), updates: [{q: {a: 1}, u: {$inc: {updated: 1}}, multi: true}]}; +} +function fullBucketMultiUpdateBuilderFn(coll) { + return { + update: coll.getName(), + updates: [{q: {[metaFieldName]: 0}, u: {$inc: {updated: 1}}, multi: true}] + }; +} +function partialBucketSingletonUpdateBuilderFn(coll) { + return { + update: coll.getName(), + updates: [{q: {[metaFieldName]: 0}, u: {$inc: {updated: 1}}, multi: false}] + }; +} +function fullBucketSingletonUpdateBuilderFn(coll) { + return { + update: coll.getName(), + updates: [{q: {[metaFieldName]: 1}, u: {$inc: {updated: 1}}, multi: false}] + }; +} +function upsertBuilderFn(coll) { + return { + update: coll.getName(), + updates: [{ + q: {[timeFieldName]: dateTime, [metaFieldName]: 2}, + u: {$inc: {updated: 1}}, + multi: false, + upsert: true + }] + }; +} + +// Full bucket update's oplog entry is an ApplyOps[delete, insert]. +function fullBucketValidateFn(testDB, coll, retryableWrite) { + const opEntries = + testDB.getSiblingDB("local") + .oplog.rs + .find({"o.applyOps.ns": testDB.getName() + '.system.buckets.' + coll.getName()}) + .toArray(); + assert.eq(opEntries.length, 1); + const opEntry = opEntries[0]; + assert.eq(opEntry["o"]["applyOps"].length, 2); + assert(opEntry["o"]["applyOps"][0]["op"] == "d"); + assert(opEntry["o"]["applyOps"][1]["op"] == "i"); +} +// Partial bucket update's oplog entry is an ApplyOps[update, insert]. +function partialBucketValidateFn(testDB, coll, retryableWrite) { + const opEntries = + testDB.getSiblingDB("local") + .oplog.rs + .find({"o.applyOps.ns": testDB.getName() + '.system.buckets.' + coll.getName()}) + .toArray(); + assert.eq(opEntries.length, 1); + const opEntry = opEntries[0]; + assert.eq(opEntry["o"]["applyOps"].length, 2); + assert(opEntry["o"]["applyOps"][0]["op"] == "u"); + assert(opEntry["o"]["applyOps"][1]["op"] == "i"); +} +// When inserting a new measurement, an Upsert's oplog entry is an ApplyOps[insert] if it's a +// retryable write. Otherwise, it generates a regular insert oplog entry. +function upsertValidateFn(testDB, coll, retryableWrite) { + const opEntries = + testDB.getSiblingDB("local") + .oplog.rs + .find({"o.applyOps.ns": testDB.getName() + '.system.buckets.' + coll.getName()}) + .toArray(); + if (retryableWrite) { + assert.eq(opEntries.length, 1); + const opEntry = opEntries[0]; + assert.eq(opEntry["o"]["applyOps"].length, 1); + assert(opEntry["o"]["applyOps"][0]["op"] == "i"); + } else { + assert.eq(opEntries.length, 0); + } +} + +(function testPartialBucketMultiUpdate() { + runTest({cmdBuilderFn: partialBucketMultiUpdateBuilderFn, validateFn: partialBucketValidateFn}); +})(); +(function testFullBucketMultiUpdate() { + runTest({cmdBuilderFn: fullBucketMultiUpdateBuilderFn, validateFn: fullBucketValidateFn}); +})(); +(function testPartialBucketSingletonUpdate() { + runTest( + {cmdBuilderFn: partialBucketSingletonUpdateBuilderFn, validateFn: partialBucketValidateFn}); +})(); +(function testPartialBucketSingletonUpdate() { + runTest({cmdBuilderFn: fullBucketSingletonUpdateBuilderFn, validateFn: fullBucketValidateFn}); +})(); +(function testPartialBucketRetryableSingletonUpdate() { + runTest({ + cmdBuilderFn: partialBucketSingletonUpdateBuilderFn, + validateFn: partialBucketValidateFn, + retryableWrite: true + }); +})(); +(function testPartialBucketRetryableSingletonUpdate() { + runTest({ + cmdBuilderFn: fullBucketSingletonUpdateBuilderFn, + validateFn: fullBucketValidateFn, + retryableWrite: true + }); +})(); +(function testUpsert() { + runTest({cmdBuilderFn: upsertBuilderFn, validateFn: upsertValidateFn}); +})(); +(function testRetryableUpsert() { + runTest({cmdBuilderFn: upsertBuilderFn, validateFn: upsertValidateFn, retryableWrite: true}); +})(); + +rst.stopSet(); +})(); \ No newline at end of file diff --git a/jstests/noPassthrough/timestamp_index_builds.js b/jstests/noPassthrough/timestamp_index_builds.js index 61299bbaf4ea8..bc7c303dcfb0d 100644 --- a/jstests/noPassthrough/timestamp_index_builds.js +++ b/jstests/noPassthrough/timestamp_index_builds.js @@ -65,7 +65,7 @@ nodes.forEach(node => assert.commandWorked(node.adminCommand( // This test create indexes with majority of nodes not available for replication. So, disabling // index build commit quorum. -assert.commandWorked(coll.createIndexes([{foo: 1}], {background: true}, 0)); +assert.commandWorked(coll.createIndexes([{foo: 1}], {}, 0)); rst.awaitReplication(); rst.stopSet(undefined, true); diff --git a/jstests/noPassthrough/traffic_reading.js b/jstests/noPassthrough/traffic_reading.js index aa29d360387e8..9b041085fe4f8 100644 --- a/jstests/noPassthrough/traffic_reading.js +++ b/jstests/noPassthrough/traffic_reading.js @@ -15,13 +15,13 @@ mkdir(recordingDir); // Create the options and run mongod var opts = {auth: "", setParameter: "trafficRecordingDirectory=" + recordingDir}; -m = MongoRunner.runMongod(opts); +let m = MongoRunner.runMongod(opts); // Get the port of the host var serverPort = m.port; // Create necessary users -adminDB = m.getDB("admin"); +let adminDB = m.getDB("admin"); const testDB = m.getDB("test"); const coll = testDB.getCollection("foo"); adminDB.createUser({user: "admin", pwd: "pass", roles: jsTest.adminUserRoles}); diff --git a/jstests/noPassthrough/transaction_api_commit_errors.js b/jstests/noPassthrough/transaction_api_commit_errors.js new file mode 100644 index 0000000000000..1613599a78b26 --- /dev/null +++ b/jstests/noPassthrough/transaction_api_commit_errors.js @@ -0,0 +1,115 @@ +/** + * Tests that the transaction API handles commit errors correctly. + */ +(function() { +"use strict"; + +load("jstests/libs/fail_point_util.js"); + +const kDbName = "testDb"; +const kCollName = "testColl"; + +function makeSingleInsertTxn(doc) { + return [{ + dbName: kDbName, + command: { + insert: kCollName, + documents: [doc], + } + }]; +} + +function runTxn(conn, commandInfos) { + return conn.adminCommand({testInternalTransactions: 1, commandInfos: commandInfos}); +} + +const st = new ShardingTest({config: 1, shards: 1}); +const shardPrimary = st.rs0.getPrimary(); + +// Set up the test collection. +assert.commandWorked(st.s.getDB(kDbName)[kCollName].insert([{_id: 0}])); + +// +// Error codes where the API should retry and eventually commit the transaction, either by retrying +// commit until it succeeds or retrying the entire transaction until it succeeds. Fail commands 10 +// times to exhaust internal retries at layers below the transaction API. +// + +// Retryable error. Note this error is not a NotPrimary error so it won't be rewritten by mongos. +let commitFailPoint = + configureFailPoint(shardPrimary, + "failCommand", + { + errorCode: ErrorCodes.ReadConcernMajorityNotAvailableYet, + failCommands: ["commitTransaction"], + failInternalCommands: true, + }, + {times: 10}); +let res = assert.commandWorked(runTxn(st.s, makeSingleInsertTxn({_id: 1}))); +commitFailPoint.off(); + +// No command error with a retryable write concern error. +commitFailPoint = configureFailPoint( + shardPrimary, + "failCommand", + { + writeConcernError: + {code: NumberInt(ErrorCodes.ReadConcernMajorityNotAvailableYet), errmsg: "foo"}, + failCommands: ["commitTransaction"], + failInternalCommands: true, + }, + {times: 10}); +res = assert.commandWorked(runTxn(st.s, makeSingleInsertTxn({_id: 2}))); +commitFailPoint.off(); + +// +// Error codes where the API should not retry. +// + +// Non-transient commit error with a non-retryable write concern error. +commitFailPoint = configureFailPoint(shardPrimary, + "failCommand", + { + errorCode: ErrorCodes.InternalError, + failCommands: ["commitTransaction"], + failInternalCommands: true, + }, + {times: 10}); +res = assert.commandFailedWithCode(runTxn(st.s, makeSingleInsertTxn({_id: 3})), + ErrorCodes.InternalError); +commitFailPoint.off(); + +// No commit error with a non-retryable write concern error. +commitFailPoint = configureFailPoint( + shardPrimary, + "failCommand", + { + writeConcernError: {code: NumberInt(ErrorCodes.InternalError), errmsg: "foo"}, + failCommands: ["commitTransaction"], + failInternalCommands: true, + }, + {times: 10}); +// The internal transaction test command will rethrow a write concern error as a top-level error. +res = assert.commandFailedWithCode(runTxn(st.s, makeSingleInsertTxn({_id: 4})), + ErrorCodes.InternalError); +commitFailPoint.off(); + +// Non-transient commit error that is normally transient. Note NoSuchTransaction is not transient +// with a write concern error, which is what this is meant to simulate. Also note the fail command +// fail point can't take both a write concern error and write concern error so we "cheat" and +// override the error labels. +commitFailPoint = configureFailPoint(shardPrimary, + "failCommand", + { + errorCode: ErrorCodes.NoSuchTransaction, + errorLabels: [], + failCommands: ["commitTransaction"], + failInternalCommands: true, + }, + {times: 10}); +res = assert.commandFailedWithCode(runTxn(st.s, makeSingleInsertTxn({_id: 5})), + ErrorCodes.NoSuchTransaction); +commitFailPoint.off(); + +st.stop(); +}()); diff --git a/jstests/noPassthrough/transaction_reaper.js b/jstests/noPassthrough/transaction_reaper.js index 1240cfd1ba349..b2412b935d2d9 100644 --- a/jstests/noPassthrough/transaction_reaper.js +++ b/jstests/noPassthrough/transaction_reaper.js @@ -1,5 +1,4 @@ // @tags: [ -// requires_fcv_70, // requires_replication, // requires_sharding, // ] diff --git a/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js b/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js deleted file mode 100644 index 20bf20a55ca21..0000000000000 --- a/jstests/noPassthrough/transaction_write_with_snapshot_unavailable.js +++ /dev/null @@ -1,74 +0,0 @@ -/** - * Test that write errors in a transaction due to SnapshotUnavailable are labelled - * TransientTransactionError and the error is reported at the top level, not in a writeErrors array. - * - * Other transient transaction errors are tested elsewhere: WriteConflict is tested in - * transactions_write_conflicts.js, NotWritablePrimary is tested in transient_txn_error_labels.js, - * and NoSuchTransaction is tested in transient_txn_error_labels_with_write_concern.js. - * - * @tags: [uses_transactions] - */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); - -const name = "transaction_write_with_snapshot_unavailable"; -const replTest = new ReplSetTest({name: name, nodes: 1}); -replTest.startSet(); -replTest.initiate(); - -const dbName = name; -const dbNameB = dbName + "B"; -const collName = "collection"; -const collNameB = collName + "B"; - -const primary = replTest.getPrimary(); -const primaryDB = primary.getDB(dbName); - -if (FeatureFlagUtil.isEnabled(primaryDB, "PointInTimeCatalogLookups")) { - // With the PointInTimeCatalogLookups feature this test doesn't make sense as the - // SnapshotUnavailable error will be removed - replTest.stopSet(); - return; -} - -assert.commandWorked(primaryDB[collName].insertOne({}, {writeConcern: {w: "majority"}})); - -function testOp(cmd) { - let op = Object.getOwnPropertyNames(cmd)[0]; - let session = primary.startSession(); - let sessionDB = session.getDatabase(name); - - jsTestLog(`Testing that WriteConflict during ${op} is labelled TransientTransactionError`); - - session.startTransaction({readConcern: {level: "snapshot"}}); - assert.commandWorked(sessionDB.runCommand({insert: collName, documents: [{}]})); - // Create collection outside transaction, cannot write to it in the transaction - assert.commandWorked(primaryDB.getSiblingDB(dbNameB).runCommand({create: collNameB})); - - let res; - try { - res = sessionDB.getSiblingDB(dbNameB).runCommand(cmd); - assert.commandFailedWithCode(res, ErrorCodes.SnapshotUnavailable); - assert.eq(res.ok, 0); - assert(!res.hasOwnProperty("writeErrors")); - assert.eq(res.errorLabels, ["TransientTransactionError"]); - } catch (ex) { - printjson(cmd); - printjson(res); - throw ex; - } - - assert.commandFailedWithCode(session.abortTransaction_forTesting(), - ErrorCodes.NoSuchTransaction); - assert.commandWorked(primaryDB.getSiblingDB(dbNameB).runCommand( - {dropDatabase: 1, writeConcern: {w: "majority"}})); -} - -testOp({insert: collNameB, documents: [{_id: 0}]}); -testOp({update: collNameB, updates: [{q: {}, u: {$set: {x: 1}}}]}); -testOp({delete: collNameB, deletes: [{q: {_id: 0}, limit: 1}]}); - -replTest.stopSet(); -})(); diff --git a/jstests/noPassthrough/transition_to_catalog_shard_feature_flag.js b/jstests/noPassthrough/transition_to_catalog_shard_feature_flag.js index 2fff0b6b7e0f9..de8fb8513b68a 100644 --- a/jstests/noPassthrough/transition_to_catalog_shard_feature_flag.js +++ b/jstests/noPassthrough/transition_to_catalog_shard_feature_flag.js @@ -1,6 +1,6 @@ /** - * Verifies the transitionToCatalogShard feature flag guards running the catalog shard transition - * commands. + * Verifies the transitionFromDedicatedConfigServer feature flag guards running the config shard + * transition commands. * * @tags: [requires_fcv_70] */ @@ -21,13 +21,14 @@ const st = new ShardingTest({ }); // None of the transition commands can be run on mongos or the config server. -assert.commandFailedWithCode(st.s.adminCommand({transitionToCatalogShard: 1}), +assert.commandFailedWithCode(st.s.adminCommand({transitionFromDedicatedConfigServer: 1}), ErrorCodes.CommandNotFound); assert.commandFailedWithCode(st.s.adminCommand({transitionToDedicatedConfigServer: 1}), 7368401); const configPrimary = st.configRS.getPrimary(); -assert.commandFailedWithCode(configPrimary.adminCommand({_configsvrTransitionToCatalogShard: 1}), - ErrorCodes.CommandNotFound); +assert.commandFailedWithCode( + configPrimary.adminCommand({_configsvrTransitionFromDedicatedConfigServer: 1}), + ErrorCodes.CommandNotFound); assert.commandFailedWithCode( configPrimary.adminCommand({_configsvrTransitionToDedicatedConfigServer: 1}), 7368402); diff --git a/jstests/noPassthrough/ttl_changes_are_immediate.js b/jstests/noPassthrough/ttl_changes_are_immediate.js index d9c509740861a..ac19e7c3f44c5 100644 --- a/jstests/noPassthrough/ttl_changes_are_immediate.js +++ b/jstests/noPassthrough/ttl_changes_are_immediate.js @@ -1,7 +1,5 @@ // Ensure that changes to the TTL sleep time are reflected immediately. -(function() { -"use strict"; -load("jstests/libs/ttl_util.js"); +import {TTLUtil} from "jstests/libs/ttl_util.js"; let runner = MongoRunner.runMongod({setParameter: "ttlMonitorSleepSecs=1000"}); let db = runner.getDB("test"); @@ -24,5 +22,4 @@ TTLUtil.waitForPass(coll.getDB(), true, 20 * 1000); assert.eq(coll.count(), 0, "We should get 0 documents after TTL monitor run"); -MongoRunner.stopMongod(runner); -})(); +MongoRunner.stopMongod(runner); \ No newline at end of file diff --git a/jstests/noPassthrough/ttl_hidden_index.js b/jstests/noPassthrough/ttl_hidden_index.js index 7ef7ecef991e0..e359562a4931a 100644 --- a/jstests/noPassthrough/ttl_hidden_index.js +++ b/jstests/noPassthrough/ttl_hidden_index.js @@ -1,7 +1,5 @@ // Make sure the TTL index still work after we hide it -(function() { -"use strict"; -load("jstests/libs/ttl_util.js"); +import {TTLUtil} from "jstests/libs/ttl_util.js"; let runner = MongoRunner.runMongod({setParameter: "ttlMonitorSleepSecs=1"}); let coll = runner.getDB("test").ttl_hiddenl_index; @@ -24,5 +22,4 @@ TTLUtil.waitForPass(coll.getDB()); assert.eq(coll.count(), 0, "We should get 0 documents after TTL monitor run"); -MongoRunner.stopMongod(runner); -})(); +MongoRunner.stopMongod(runner); \ No newline at end of file diff --git a/jstests/noPassthrough/ttl_monitor_does_not_unregister_index_during_collection_creation.js b/jstests/noPassthrough/ttl_monitor_does_not_unregister_index_during_collection_creation.js deleted file mode 100644 index 45478301ac682..0000000000000 --- a/jstests/noPassthrough/ttl_monitor_does_not_unregister_index_during_collection_creation.js +++ /dev/null @@ -1,58 +0,0 @@ -/** - * Ensures that the TTLMonitor does not remove the cached index information from the - * TTLCollectionCache object for a newly created index before the implicitly created collection is - * registered and visible in the CollectionCatalog. - * Removing this cached index information prevents the TTLMonitor from removing expired documents - * for that collection. - */ -(function() { -'use strict'; -load("jstests/libs/ttl_util.js"); - -const conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'}); - -const dbName = "test"; -const collName = "ttlMonitor"; - -const db = conn.getDB(dbName); -const coll = db.getCollection(collName); - -TestData.dbName = dbName; -TestData.collName = collName; - -coll.drop(); - -const failPoint = "hangTTLCollectionCacheAfterRegisteringInfo"; -assert.commandWorked(db.adminCommand({configureFailPoint: failPoint, mode: "alwaysOn"})); - -// Create an index on a non-existent collection. This will implicitly create the collection. -let awaitcreateIndex = startParallelShell(() => { - const testDB = db.getSiblingDB(TestData.dbName); - assert.commandWorked( - testDB.getCollection(TestData.collName).createIndex({x: 1}, {expireAfterSeconds: 0})); -}, db.getMongo().port); - -// Wait for the TTL monitor to run and register the index in the TTL collection cache. -checkLog.containsJson(db.getMongo(), 4664000); - -// Let the TTL monitor run once. It should not remove the index from the cached TTL information -// until the collection is committed. -TTLUtil.waitForPass(coll.getDB()); - -// Finish the index build. -assert.commandWorked(db.adminCommand({configureFailPoint: failPoint, mode: "off"})); -awaitcreateIndex(); - -// Insert documents, which should expire immediately and be removed on the next TTL pass. -const now = new Date(); -for (let i = 0; i < 10; i++) { - assert.commandWorked(coll.insert({x: now})); -} - -// Let the TTL monitor run once to remove the expired documents. -TTLUtil.waitForPass(coll.getDB()); - -assert.eq(0, coll.find({}).count()); - -MongoRunner.stopMongod(conn); -}()); diff --git a/jstests/noPassthrough/ttl_operation_metrics.js b/jstests/noPassthrough/ttl_operation_metrics.js index 674304619716e..72ea675601efc 100644 --- a/jstests/noPassthrough/ttl_operation_metrics.js +++ b/jstests/noPassthrough/ttl_operation_metrics.js @@ -5,12 +5,9 @@ * requires_replication, * ] */ -(function() { -'use strict'; - load('jstests/noPassthrough/libs/index_build.js'); // For IndexBuildTest load("jstests/libs/fail_point_util.js"); -load("jstests/libs/ttl_util.js"); +import {TTLUtil} from "jstests/libs/ttl_util.js"; var rst = new ReplSetTest({ nodes: 2, @@ -115,5 +112,4 @@ assertMetrics(secondary, (metrics) => { assert.eq(primaryDB[collName].count({}), 1); assert.eq(secondaryDB[collName].count({}), 1); -rst.stopSet(); -}()); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/ttl_operation_metrics_multi_dbs.js b/jstests/noPassthrough/ttl_operation_metrics_multi_dbs.js index e6e1bfefdf399..12ec14b34db5a 100644 --- a/jstests/noPassthrough/ttl_operation_metrics_multi_dbs.js +++ b/jstests/noPassthrough/ttl_operation_metrics_multi_dbs.js @@ -5,12 +5,9 @@ * requires_replication, * ] */ -(function() { -'use strict'; - load('jstests/noPassthrough/libs/index_build.js'); // For IndexBuildTest load("jstests/libs/fail_point_util.js"); -load("jstests/libs/ttl_util.js"); +import {TTLUtil} from "jstests/libs/ttl_util.js"; var rst = new ReplSetTest({ nodes: 2, @@ -91,10 +88,12 @@ assertMetrics(primary, (metrics) => { assert.gte(metrics[dbName2].totalUnitsWritten, 2); }); -// Clear metrics and wait for a TTL pass to delete the documents. +// Clear metrics and wait for two TTL passes to make sure we both observe the inserts and delete the +// documents. clearMetrics(primary); pauseTtl.off(); TTLUtil.waitForPass(primaryDB1); +TTLUtil.waitForPass(primaryDB1); // Ensure that the TTL monitor deleted 2 documents on the primary and recorded read and write // metrics. @@ -144,5 +143,4 @@ const secondaryDB2 = secondary.getDB(dbName2); assert.eq(secondaryDB1[collName].count({}), 0); assert.eq(secondaryDB2[collName].count({}), 2); -rst.stopSet(); -}()); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/noPassthrough/ttl_partial_index.js b/jstests/noPassthrough/ttl_partial_index.js index 00125a9bb914a..427e1a4bdcb91 100644 --- a/jstests/noPassthrough/ttl_partial_index.js +++ b/jstests/noPassthrough/ttl_partial_index.js @@ -1,8 +1,6 @@ // Test that the TTL monitor will correctly use TTL indexes that are also partial indexes. // SERVER-17984. -(function() { -"use strict"; -load("jstests/libs/ttl_util.js"); +import {TTLUtil} from "jstests/libs/ttl_util.js"; // Launch mongod with shorter TTL monitor sleep interval. var runner = MongoRunner.runMongod({setParameter: "ttlMonitorSleepSecs=1"}); @@ -26,5 +24,4 @@ assert.eq(0, "Wrong number of documents in partial index, after TTL monitor run"); assert.eq( 1, coll.find().itcount(), "Wrong number of documents in collection, after TTL monitor run"); -MongoRunner.stopMongod(runner); -})(); +MongoRunner.stopMongod(runner); \ No newline at end of file diff --git a/jstests/noPassthrough/ttl_resharding_collection.js b/jstests/noPassthrough/ttl_resharding_collection.js index cafa433fa5f43..12b8019b9aa0b 100644 --- a/jstests/noPassthrough/ttl_resharding_collection.js +++ b/jstests/noPassthrough/ttl_resharding_collection.js @@ -1,7 +1,5 @@ // Tests that the TTL Monitor is disabled for .system.resharding.* namespaces. -(function() { -"use strict"; -load("jstests/libs/ttl_util.js"); +import {TTLUtil} from "jstests/libs/ttl_util.js"; // Launch mongod with shorter TTL monitor sleep interval. const runner = MongoRunner.runMongod({setParameter: "ttlMonitorSleepSecs=1"}); @@ -22,5 +20,4 @@ TTLUtil.waitForPass(coll.getDB()); // namespace. assert.eq( 1, coll.find().itcount(), "Wrong number of documents in collection, after TTL monitor run"); -MongoRunner.stopMongod(runner); -})(); +MongoRunner.stopMongod(runner); \ No newline at end of file diff --git a/jstests/noPassthrough/ttl_with_dropIndex.js b/jstests/noPassthrough/ttl_with_dropIndex.js index 6bed0fa6b9151..fdf8f861ae015 100644 --- a/jstests/noPassthrough/ttl_with_dropIndex.js +++ b/jstests/noPassthrough/ttl_with_dropIndex.js @@ -1,9 +1,7 @@ /** * Verify the behavior of dropping TTL index. */ -(function() { -'use strict'; -load("jstests/libs/ttl_util.js"); +import {TTLUtil} from "jstests/libs/ttl_util.js"; let conn = MongoRunner.runMongod({setParameter: 'ttlMonitorSleepSecs=1'}); let db = conn.getDB('test'); @@ -38,5 +36,4 @@ TTLUtil.waitForPass(db); assert.eq(coll.find().itcount(), 50); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/unindex_detects_data_corruption.js b/jstests/noPassthrough/unindex_detects_data_corruption.js new file mode 100644 index 0000000000000..a475b92846348 --- /dev/null +++ b/jstests/noPassthrough/unindex_detects_data_corruption.js @@ -0,0 +1,38 @@ +/** + * This tests that errors are logged when unindexing _id finds evidence of corruption, the server + * does not crash, and the appropriate error is returned. + */ +(function() { + +const replSet = new ReplSetTest({nodes: 1}); +replSet.startSet(); +replSet.initiate(); + +const primary = replSet.getPrimary(); + +const db = primary.getDB('test'); +const collName = 'coll'; +const coll = db[collName]; + +assert.commandWorked(coll.insert({a: "first"})); + +assert.commandWorked(primary.adminCommand( + {configureFailPoint: "WTIndexUassertDuplicateRecordForKeyOnIdUnindex", mode: "alwaysOn"})); + +assert.commandFailedWithCode(coll.remove({a: "first"}), ErrorCodes.DataCorruptionDetected); + +assert.commandWorked(primary.adminCommand( + {configureFailPoint: "WTIndexUassertDuplicateRecordForKeyOnIdUnindex", mode: "off"})); + +assert.soonNoExcept(() => { + // The health log entry is written asynchronously by a background worker, expect it to be + // eventually found. + let entry = primary.getDB('local').system.healthlog.findOne({severity: 'error'}); + assert(entry, "No healthlog entry found on " + tojson(primary)); + assert.eq("Un-index seeing multiple records for key", entry.msg, tojson(entry)); + assert.eq(1, primary.getDB('local').system.healthlog.count({severity: 'error'})); + return true; +}); + +replSet.stopSet(); +})(); diff --git a/jstests/noPassthrough/upsert_invalid_multiple_id_fields.js b/jstests/noPassthrough/upsert_invalid_multiple_id_fields.js new file mode 100644 index 0000000000000..44c9f83101a08 --- /dev/null +++ b/jstests/noPassthrough/upsert_invalid_multiple_id_fields.js @@ -0,0 +1,35 @@ +/** + * SERVER-75879: Tests that an invalid document with multiple _id fields cannot be inserted by an + * update sent with upsert=true. + */ +(function() { +"use strict"; + +// Run tests on a standalone mongod. +let conn = MongoRunner.runMongod({setParameter: {enableComputeMode: true}}); +let db = conn.getDB(jsTestName()); + +// _buildBsonObj is a lightweight BSON builder allowing us to construct an invalid BSON in shell. +let invalidBson = _buildBsonObj("a", 1, "_id", 1, "_id", 2, "_id", 3); + +// Assert the BSON is indeed invalid. First, we build a valid one from its JSON string. +let validBson = JSON.parse(JSON.stringify(invalidBson)); +assert.eq(JSON.stringify(invalidBson), JSON.stringify(validBson)); +assert.gt(Object.bsonsize(invalidBson), Object.bsonsize(validBson)); +assert.neq(bsonWoCompare(invalidBson, validBson), 0); + +// Test that a replacement is not permitted +assert.throwsWithCode(() => { + db.coll.replaceOne({}, invalidBson, {upsert: true}); +}, 2); + +// Test that an upsert is not permitted +assert.writeErrorWithCode(db.coll.update({}, invalidBson, {upsert: true}), ErrorCodes.BadValue); + +// Assert that a valid one is actually insertable +assert.writeOK(db.coll.update({}, validBson, {upsert: true})); + +let inserted = db.coll.findOne(); +assert.docEq(inserted, validBson); +MongoRunner.stopMongod(conn); +})(); diff --git a/jstests/noPassthrough/validate_adjust_multikey.js b/jstests/noPassthrough/validate_adjust_multikey.js index ad36ac48f352c..15db61de99afa 100644 --- a/jstests/noPassthrough/validate_adjust_multikey.js +++ b/jstests/noPassthrough/validate_adjust_multikey.js @@ -1,8 +1,7 @@ /** * Tests foreground validation's ability to fix up allowable multikey metadata problems. */ -(function() { -load("jstests/libs/analyze_plan.js"); // For getWinningPlan to analyze explain() output. +import {getWinningPlan} from "jstests/libs/analyze_plan.js"; const conn = MongoRunner.runMongod(); const dbName = jsTestName(); @@ -109,5 +108,4 @@ runTest((coll) => { assertIndexMultikey(coll, 'a_text', false); }); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/validate_db_metadata_command_whole_db.js b/jstests/noPassthrough/validate_db_metadata_command_whole_db.js index 01e4e3dfa0a91..17cd38786edbe 100644 --- a/jstests/noPassthrough/validate_db_metadata_command_whole_db.js +++ b/jstests/noPassthrough/validate_db_metadata_command_whole_db.js @@ -4,11 +4,7 @@ * requires_sharding, * ] */ -(function() { -"use strict"; - -load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. -load("jstests/core/timeseries/libs/timeseries.js"); // For TimeseriesTest. +load("jstests/libs/fixture_helpers.js"); // For FixtureHelpers. const dbName = jsTestName(); const collName = "coll1"; @@ -95,5 +91,4 @@ MongoRunner.stopMongod(conn); const st = new ShardingTest({shards: 2}); st.shardColl(dbName + "." + collName, {_id: 1}, {_id: 1}); runTest(st.s); -st.stop(); -}()); +st.stop(); \ No newline at end of file diff --git a/jstests/noPassthrough/validate_db_metadata_limits.js b/jstests/noPassthrough/validate_db_metadata_limits.js index dce8af08b6f08..13cefab9c18b3 100644 --- a/jstests/noPassthrough/validate_db_metadata_limits.js +++ b/jstests/noPassthrough/validate_db_metadata_limits.js @@ -25,4 +25,4 @@ assert(res.apiVersionErrors, res); assert(res.apiVersionErrors.length < 100, res); MongoRunner.stopMongod(conn); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/validate_duplicate_record.js b/jstests/noPassthrough/validate_duplicate_record.js new file mode 100644 index 0000000000000..1d1430e5c9e22 --- /dev/null +++ b/jstests/noPassthrough/validate_duplicate_record.js @@ -0,0 +1,33 @@ +/** + * Tests that duplicate records for _id index keys are detected by validate. + */ +(function() { +"use strict"; + +// Disable testing diagnostics (TestingProctor) so we do not hit test only fasserts. +TestData.testingDiagnosticsEnabled = false; + +const rst = new ReplSetTest({nodes: 1}); +rst.startSet(); +rst.initiate(); + +let primary = rst.getPrimary(); +let coll = primary.getCollection('test.duplicate_record'); +assert.commandWorked(coll.createIndex({x: 1})); + +for (let i = 0; i < 5; i++) { + assert.commandWorked(coll.insert({x: i})); +} + +function testValidateWithFailpoint(fpName) { + assert.commandWorked(primary.adminCommand({configureFailPoint: fpName, mode: "alwaysOn"})); + let res = assert.commandWorked(coll.validate()); + assert(!res.valid); + assert.commandWorked(primary.adminCommand({configureFailPoint: fpName, mode: "off"})); +} + +// Test duplicate record for index key on _id index. +testValidateWithFailpoint("WTIndexUassertDuplicateRecordForIdIndex"); + +rst.stopSet(); +})(); diff --git a/jstests/noPassthrough/validate_memory_limit.js b/jstests/noPassthrough/validate_memory_limit.js index 63f55a0ed43d8..ff60122fede0f 100644 --- a/jstests/noPassthrough/validate_memory_limit.js +++ b/jstests/noPassthrough/validate_memory_limit.js @@ -7,10 +7,7 @@ * requires_wiredtiger, * ] */ -(function() { -"use strict"; - -load("jstests/disk/libs/wt_file_helper.js"); +import {getUriForIndex, truncateUriAndRestartMongod} from "jstests/disk/libs/wt_file_helper.js"; const kIndexKeyLength = 1024 * 1024; @@ -80,5 +77,4 @@ checkValidateLogs(); // Repair, but incompletely if only some inconsistencies are reported. checkValidateRepair(); -MongoRunner.stopMongod(conn, null, {skipValidation: true}); -})(); \ No newline at end of file +MongoRunner.stopMongod(conn, null, {skipValidation: true}); \ No newline at end of file diff --git a/jstests/noPassthrough/validate_multikey_failures.js b/jstests/noPassthrough/validate_multikey_failures.js index 6d21116c1d0c5..be3d3010b30ca 100644 --- a/jstests/noPassthrough/validate_multikey_failures.js +++ b/jstests/noPassthrough/validate_multikey_failures.js @@ -75,4 +75,4 @@ assert(checkLog.checkContainsWithAtLeastCountJson(conn, 7556101, {"indexKey": {" assert(checkLog.checkContainsWithAtLeastCountJson(conn, 5367500, {"index": "a.b_1"}, 1)); MongoRunner.stopMongod(conn, null, {skipValidation: true}); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthrough/validate_out_of_order.js b/jstests/noPassthrough/validate_out_of_order.js index 27142a71c5f79..c8c9eaeecbd1f 100644 --- a/jstests/noPassthrough/validate_out_of_order.js +++ b/jstests/noPassthrough/validate_out_of_order.js @@ -19,11 +19,11 @@ for (let i = 0; i < 5; i++) { // Test record store out-of-order detection. assert.commandWorked( - primary.adminCommand({configureFailPoint: "WTRecordStoreUassertOutOfOrder", mode: "alwaysOn"})); + primary.adminCommand({configureFailPoint: "failRecordStoreTraversal", mode: "alwaysOn"})); let res = assert.commandWorked(coll.validate()); assert(!res.valid); assert.commandWorked( - primary.adminCommand({configureFailPoint: "WTRecordStoreUassertOutOfOrder", mode: "off"})); + primary.adminCommand({configureFailPoint: "failRecordStoreTraversal", mode: "off"})); // Test index entry out-of-order detection. assert.commandWorked( diff --git a/jstests/noPassthrough/validate_timeseries_bucket_reopening.js b/jstests/noPassthrough/validate_timeseries_bucket_reopening.js index 9936c12aee79c..79f47ebd72bc3 100644 --- a/jstests/noPassthrough/validate_timeseries_bucket_reopening.js +++ b/jstests/noPassthrough/validate_timeseries_bucket_reopening.js @@ -7,10 +7,7 @@ * * @tags: [requires_replication] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest'. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; load("jstests/libs/fail_point_util.js"); const rst = new ReplSetTest({nodes: 1}); @@ -24,7 +21,7 @@ if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(db)) { rst.stopSet(); jsTestLog( 'Skipping test because the TimeseriesScalabilityImprovements feature flag is disabled.'); - return; + quit(); } const collNamePrefix = db.validate_timeseries_bucket_reopening_; @@ -110,4 +107,3 @@ validateBucketReopening(metaFieldName2); fpSameStripe.off(); rst.stopSet(); -})(); diff --git a/jstests/noPassthrough/validate_with_long_index_name.js b/jstests/noPassthrough/validate_with_long_index_name.js index 5c97e37133663..8bebd3e07ed6c 100644 --- a/jstests/noPassthrough/validate_with_long_index_name.js +++ b/jstests/noPassthrough/validate_with_long_index_name.js @@ -7,10 +7,11 @@ * requires_wiredtiger, * ] */ -(function() { -"use strict"; - -load("jstests/disk/libs/wt_file_helper.js"); +import { + getUriForColl, + getUriForIndex, + truncateUriAndRestartMongod +} from "jstests/disk/libs/wt_file_helper.js"; // 64 * 1024 * 1024 = 64MB worth of index names ensures that we test against the maximum BSONObj // size lmit. @@ -63,5 +64,4 @@ assert(!res.valid); assert.contains(extraIndexEntries, res.warnings); assert.contains(extraSizeLimitations, res.errors); -MongoRunner.stopMongod(conn, null, {skipValidation: true}); -})(); \ No newline at end of file +MongoRunner.stopMongod(conn, null, {skipValidation: true}); \ No newline at end of file diff --git a/jstests/noPassthrough/views_count_distinct_disk_use.js b/jstests/noPassthrough/views_count_distinct_disk_use.js index cd8d3046637c2..d90bfefdaff40 100644 --- a/jstests/noPassthrough/views_count_distinct_disk_use.js +++ b/jstests/noPassthrough/views_count_distinct_disk_use.js @@ -1,10 +1,7 @@ // Test count and distinct on views use with different values of the allowDiskUseByDefault // parameter. -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod(); assert.neq(null, conn, "mongod was unable to start up"); @@ -45,5 +42,4 @@ if (!checkSBEEnabled(viewsDB)) { // stage needs to spill to disk if the memory limit is reached. testDiskUse({distinct: "largeView", key: "largeStr"}); -MongoRunner.stopMongod(conn); -})(); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/noPassthrough/vote_abort_index_build.js b/jstests/noPassthrough/vote_abort_index_build.js index c1e8e65f3606d..91418d37da6c6 100644 --- a/jstests/noPassthrough/vote_abort_index_build.js +++ b/jstests/noPassthrough/vote_abort_index_build.js @@ -2,7 +2,7 @@ * Tests the 'voteAbortIndexBuild' internal command. * * @tags: [ - * featureFlagIndexBuildGracefulErrorHandling, + * requires_fcv_71, * requires_replication, * ] */ diff --git a/jstests/noPassthrough/write_conflict_wildcard.js b/jstests/noPassthrough/write_conflict_wildcard.js index 6d221a770e629..3ade58ecaaed3 100644 --- a/jstests/noPassthrough/write_conflict_wildcard.js +++ b/jstests/noPassthrough/write_conflict_wildcard.js @@ -5,10 +5,7 @@ * TODO SERVER-56443: This test is specific to the classic engine. If/when the classic engine is * deleted, this test should be removed as well. */ -(function() { -"strict"; - -load("jstests/libs/sbe_util.js"); +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const conn = MongoRunner.runMongod(); const testDB = conn.getDB("test"); @@ -16,7 +13,7 @@ const testDB = conn.getDB("test"); if (checkSBEEnabled(testDB)) { jsTestLog("Skipping test as SBE is not resilient to WCEs"); MongoRunner.stopMongod(conn); - return; + quit(); } const coll = testDB.write_conflict_wildcard; @@ -45,4 +42,3 @@ for (let i = 0; i < 1000; ++i) { assert.commandWorked( testDB.adminCommand({configureFailPoint: 'WTWriteConflictExceptionForReads', mode: "off"})); MongoRunner.stopMongod(conn); -})(); diff --git a/jstests/noPassthroughWithMongod/background.js b/jstests/noPassthroughWithMongod/background.js index a6fe673a37622..7194e9aed97eb 100644 --- a/jstests/noPassthroughWithMongod/background.js +++ b/jstests/noPassthroughWithMongod/background.js @@ -2,7 +2,7 @@ assert(db.getName() == "test"); -t = db.bg1; +let t = db.bg1; t.drop(); var a = new Mongo(db.getMongo().host).getDB(db.getName()); @@ -18,7 +18,7 @@ for (var i = 0; i < 100000; i++) { } // start bg indexing -a.bg1.createIndex({i: 1}, {name: "i_1", background: true}); +a.bg1.createIndex({i: 1}, {name: "i_1"}); // add more data bulk = t.initializeUnorderedBulkOp(); diff --git a/jstests/noPassthroughWithMongod/btreedel.js b/jstests/noPassthroughWithMongod/btreedel.js index 221c04a2fc527..12216ca03e36d 100644 --- a/jstests/noPassthroughWithMongod/btreedel.js +++ b/jstests/noPassthroughWithMongod/btreedel.js @@ -1,7 +1,7 @@ // btreedel.js // @tags: [SERVER-32869] -t = db.foo; +let t = db.foo; t.remove({}); var bulk = t.initializeUnorderedBulkOp(); @@ -30,8 +30,8 @@ t.remove({_id: {$gt: 200000, $lt: 600000}}); print("3"); print(d.hasNext()); -n = 0; -last = {}; +let n = 0; +let last = {}; printjson(c.next()); while (c.hasNext()) { n++; diff --git a/jstests/noPassthroughWithMongod/capped4.js b/jstests/noPassthroughWithMongod/capped4.js index f948614f24e87..d03a5d8fb4cc6 100644 --- a/jstests/noPassthroughWithMongod/capped4.js +++ b/jstests/noPassthroughWithMongod/capped4.js @@ -1,20 +1,20 @@ // @tags: [requires_capped] -t = db.jstests_capped4; +let t = db.jstests_capped4; t.drop(); db.createCollection("jstests_capped4", {size: 1000, capped: true}); t.createIndex({i: 1}); -for (i = 0; i < 20; ++i) { +for (let i = 0; i < 20; ++i) { t.save({i: i}); } -c = t.find().sort({$natural: -1}).limit(2); +let c = t.find().sort({$natural: -1}).limit(2); c.next(); c.next(); -d = t.find().sort({i: -1}).limit(2); +let d = t.find().sort({i: -1}).limit(2); d.next(); d.next(); -for (i = 20; t.findOne({i: 19}); ++i) { +for (var i = 20; t.findOne({i: 19}); ++i) { t.save({i: i}); } // assert( !t.findOne( { i : 19 } ), "A" ); diff --git a/jstests/noPassthroughWithMongod/capped6.js b/jstests/noPassthroughWithMongod/capped6.js index ad94043158bbb..70fb9dfce3048 100644 --- a/jstests/noPassthroughWithMongod/capped6.js +++ b/jstests/noPassthroughWithMongod/capped6.js @@ -18,7 +18,7 @@ var maxDocuments = Random.randInt(400) + 100; * check is performed in both forward and reverse directions. */ function checkOrder(i, valueArray) { - res = coll.find().sort({$natural: -1}); + let res = coll.find().sort({$natural: -1}); assert(res.hasNext(), "A"); var j = i; while (res.hasNext()) { @@ -41,7 +41,7 @@ function prepareCollection(shouldReverse) { assert.commandWorked(db.createCollection("capped6", {capped: true, size: 1000})); var valueArray = new Array(maxDocuments); var c = ""; - for (i = 0; i < maxDocuments; ++i, c += "-") { + for (let i = 0; i < maxDocuments; ++i, c += "-") { // The a values are strings of increasing length. valueArray[i] = {a: c}; } @@ -67,7 +67,7 @@ function runCapTrunc(valueArray, valueArrayCurIndex, n, inc) { for (var i = valueArrayCurIndex; i < maxDocuments; ++i) { assert.commandWorked(coll.insert(valueArray[i])); } - count = coll.count(); + let count = coll.count(); // The index corresponding to the last document in the collection. valueArrayCurIndex = maxDocuments - 1; diff --git a/jstests/noPassthroughWithMongod/clone_collection_as_capped_no_conflicts.js b/jstests/noPassthroughWithMongod/clone_collection_as_capped_no_conflicts.js index b1deedfa1d01f..d9dc81356a336 100644 --- a/jstests/noPassthroughWithMongod/clone_collection_as_capped_no_conflicts.js +++ b/jstests/noPassthroughWithMongod/clone_collection_as_capped_no_conflicts.js @@ -41,4 +41,4 @@ assert.eq(toColl.count(), 1); // Interrupt the sleep command. assert.commandWorked(testDB.getSiblingDB("admin").killOp(sleepID)); sleepCommand(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthroughWithMongod/collMod_no_conflicts.js b/jstests/noPassthroughWithMongod/collMod_no_conflicts.js index 130582d2f61c5..fe8b4edcad80d 100644 --- a/jstests/noPassthroughWithMongod/collMod_no_conflicts.js +++ b/jstests/noPassthroughWithMongod/collMod_no_conflicts.js @@ -39,4 +39,4 @@ assert.eq(res[0].options.pipeline, collModPipeline); // Interrupt the sleep command. assert.commandWorked(testDB.getSiblingDB("admin").killOp(sleepID)); sleepCommand(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js b/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js index 1474626e757d2..30bf982dd20e2 100644 --- a/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js +++ b/jstests/noPassthroughWithMongod/collstats_shows_ready_and_in_progress_indexes.js @@ -26,10 +26,7 @@ try { awaitParallelShell = startParallelShell(() => { db.getSiblingDB("test").runCommand({ createIndexes: "collstats_show_ready_and_in_progress_indexes", - indexes: [ - {key: {a: 1}, name: 'a_1', background: true}, - {key: {b: 1}, name: 'b_1', background: true} - ] + indexes: [{key: {a: 1}, name: 'a_1'}, {key: {b: 1}, name: 'b_1'}] }); }, db.getMongo().port); diff --git a/jstests/noPassthroughWithMongod/column_scan_explain.js b/jstests/noPassthroughWithMongod/column_scan_explain.js index 43b7ee11a0068..5819fd39004f6 100644 --- a/jstests/noPassthroughWithMongod/column_scan_explain.js +++ b/jstests/noPassthroughWithMongod/column_scan_explain.js @@ -5,15 +5,13 @@ * featureFlagColumnstoreIndexes, * ] */ -(function() { -"use strict"; - +import {getPlanStages} from "jstests/libs/analyze_plan.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; +import {getSbePlanStages} from "jstests/libs/sbe_explain_helpers.js"; load("jstests/aggregation/extras/utils.js"); // For assertArrayEq -load("jstests/libs/sbe_explain_helpers.js"); // For getSbePlanStages. -load("jstests/libs/columnstore_util.js"); // For setUpServerForColumnStoreIndexTest. if (!setUpServerForColumnStoreIndexTest(db)) { - return; + quit(); } const coll = db.column_scan_explain; @@ -268,4 +266,3 @@ assert.commandWorked(coll.insertMany(docs, {ordered: false})); ["stage", "planNodeId"]), `Mismatching column scan plan stage ${tojson(columnScanPlanStages[0])}`); }()); -}()); diff --git a/jstests/noPassthroughWithMongod/columnstore_planning_heuristics.js b/jstests/noPassthroughWithMongod/columnstore_planning_heuristics.js index dd671fa72d0d1..3b4b24f1b54cd 100644 --- a/jstests/noPassthroughWithMongod/columnstore_planning_heuristics.js +++ b/jstests/noPassthroughWithMongod/columnstore_planning_heuristics.js @@ -6,15 +6,12 @@ * featureFlagColumnstoreIndexes, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For "planHasStage." -load("jstests/libs/columnstore_util.js"); // For "setUpServerForColumnStoreIndexTest." +import {planHasStage} from "jstests/libs/analyze_plan.js"; +import {setUpServerForColumnStoreIndexTest} from "jstests/libs/columnstore_util.js"; load("jstests/noPassthrough/libs/server_parameter_helpers.js"); // For "setParameter." if (!setUpServerForColumnStoreIndexTest(db)) { - return; + quit(); } const coll = db.columnstore_planning_heuristics; @@ -76,4 +73,3 @@ assertColumnScanUsed({}, false, "none"); const explain = coll.find({}, {_id: 0, a: 1}).hint({"$**": "columnstore"}).explain(); assert(planHasStage(db, explain, "COLUMN_SCAN"), `Hint should have overridden heuristics to use column scan: ${tojson(explain)}`); -})(); diff --git a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js index 3a4e48d9dbc59..c89449da23b4b 100644 --- a/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js +++ b/jstests/noPassthroughWithMongod/commands_that_write_accept_wc_standalone.js @@ -23,7 +23,7 @@ commands.push({ commands.push({ req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]}, setupFunc: function() { - coll.insert({type: 'oak'}); + assert.commandWorked(coll.insert({type: 'oak'})); assert.eq(coll.getIndexes().length, 1); }, confirmFunc: function() { @@ -41,7 +41,7 @@ commands.push({ writeConcern: {w: 'majority'} }, setupFunc: function() { - coll.insert({type: 'oak'}); + assert.commandWorked(coll.insert({type: 'oak'})); assert.eq(coll.count({type: 'ginkgo'}), 0); assert.eq(coll.count({type: 'oak'}), 1); }, @@ -59,7 +59,7 @@ commands.push({ writeConcern: {w: 'majority'} }, setupFunc: function() { - coll.insert({type: 'oak'}); + assert.commandWorked(coll.insert({type: 'oak'})); assert.eq(coll.count({type: 'ginkgo'}), 0); assert.eq(coll.count({type: 'oak'}), 1); }, @@ -77,7 +77,7 @@ commands.push({ writeConcern: {w: 'majority'} }, setupFunc: function() { - coll.insert({type: 'oak'}); + assert.commandWorked(coll.insert({type: 'oak'})); assert.eq(coll.count({type: 'ginkgo'}), 0); assert.eq(coll.count({type: 'oak'}), 1); }, @@ -90,7 +90,7 @@ commands.push({ commands.push({ req: {applyOps: [{op: "i", ns: coll.getFullName(), o: {_id: 1, type: "willow"}}]}, setupFunc: function() { - coll.insert({_id: 1, type: 'oak'}); + assert.commandWorked(coll.insert({_id: 1, type: 'oak'})); assert.eq(coll.count({type: 'willow'}), 0); }, confirmFunc: function() { @@ -101,8 +101,8 @@ commands.push({ commands.push({ req: {aggregate: collName, pipeline: [{$sort: {type: 1}}, {$out: "foo"}], cursor: {}}, setupFunc: function() { - coll.insert({_id: 1, type: 'oak'}); - coll.insert({_id: 2, type: 'maple'}); + assert.commandWorked(coll.insert({_id: 1, type: 'oak'})); + assert.commandWorked(coll.insert({_id: 2, type: 'maple'})); }, confirmFunc: function() { assert.eq(db.foo.count({type: 'oak'}), 1); @@ -120,15 +120,24 @@ commands.push({ }); }, reduce: function(key, values) { - return {count: values.length}; + // We may be re-reducing values that have already been partially reduced. In that case, + // we expect to see an object like {count: } in the array of input values. + const numValues = values.reduce(function(acc, currentValue) { + if (typeof currentValue === "object") { + return acc + currentValue.count; + } else { + return acc + 1; + } + }, 0); + return {count: numValues}; }, out: "foo" }, setupFunc: function() { - coll.insert({x: 1, tags: ["a", "b"]}); - coll.insert({x: 2, tags: ["b", "c"]}); - coll.insert({x: 3, tags: ["c", "a"]}); - coll.insert({x: 4, tags: ["b", "c"]}); + assert.commandWorked(coll.insert({x: 1, tags: ["a", "b"]})); + assert.commandWorked(coll.insert({x: 2, tags: ["b", "c"]})); + assert.commandWorked(coll.insert({x: 3, tags: ["c", "a"]})); + assert.commandWorked(coll.insert({x: 4, tags: ["b", "c"]})); }, confirmFunc: function() { assert.eq(db.foo.findOne({_id: 'a'}).value.count, 2); diff --git a/jstests/noPassthroughWithMongod/convert_to_capped_no_conflicts.js b/jstests/noPassthroughWithMongod/convert_to_capped_no_conflicts.js index f037b0075ee0f..1d97d5d595467 100644 --- a/jstests/noPassthroughWithMongod/convert_to_capped_no_conflicts.js +++ b/jstests/noPassthroughWithMongod/convert_to_capped_no_conflicts.js @@ -35,4 +35,4 @@ assert(testColl.isCapped()); // Interrupt the sleep command. assert.commandWorked(testDB.getSiblingDB("admin").killOp(sleepID)); sleepCommand(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthroughWithMongod/cursor_server_status_metrics_lifespan_histogram.js b/jstests/noPassthroughWithMongod/cursor_server_status_metrics_lifespan_histogram.js index b80ce4dcc0f74..4467e6afe10be 100644 --- a/jstests/noPassthroughWithMongod/cursor_server_status_metrics_lifespan_histogram.js +++ b/jstests/noPassthroughWithMongod/cursor_server_status_metrics_lifespan_histogram.js @@ -82,4 +82,4 @@ while (cursorLt10Minutes.hasNext()) { cursorLt10Minutes.next(); } assert.eq(getNumCursorsLessThan10Minutes() - initialNumCursorsLt10m, 1); -}()); \ No newline at end of file +}()); diff --git a/jstests/noPassthroughWithMongod/dup_bgindex.js b/jstests/noPassthroughWithMongod/dup_bgindex.js index dd62117cf9f22..7dd8fa072b7dd 100644 --- a/jstests/noPassthroughWithMongod/dup_bgindex.js +++ b/jstests/noPassthroughWithMongod/dup_bgindex.js @@ -2,15 +2,15 @@ (function() { var t = db.duplIndexTest; t.drop(); -docs = []; +let docs = []; for (var i = 0; i < 10000; i++) { docs.push({name: "foo", z: {a: 17, b: 4}, i: i}); } assert.commandWorked(t.insert(docs)); -var cmd = "assert.commandWorked(db.duplIndexTest.createIndex( { i : 1 }, {background:true} ));"; +var cmd = "assert.commandWorked(db.duplIndexTest.createIndex( { i : 1 } ));"; var join1 = startParallelShell(cmd); var join2 = startParallelShell(cmd); -assert.commandWorked(t.createIndex({i: 1}, {background: true})); +assert.commandWorked(t.createIndex({i: 1})); assert.eq(1, t.find({i: 1}).count(), "Should find only one doc"); assert.commandWorked(t.dropIndex({i: 1})); assert.eq(1, t.find({i: 1}).count(), "Should find only one doc"); diff --git a/jstests/noPassthroughWithMongod/explain1.js b/jstests/noPassthroughWithMongod/explain1.js index 1156a5b86edb6..2fedbc6bd05ca 100644 --- a/jstests/noPassthroughWithMongod/explain1.js +++ b/jstests/noPassthroughWithMongod/explain1.js @@ -1,10 +1,10 @@ // SERVER-2662 - drop client cursor in a context where query will yield frequently -t = db.jstests_slowNightly_explain1; +let t = db.jstests_slowNightly_explain1; t.drop(); // Periodically drops the collection, invalidating client cursors for s2's operations. -s1 = startParallelShell(function() { +let s1 = startParallelShell(function() { t = db.jstests_slowNightly_explain1; for (var i = 0; i < 80; ++i) { t.drop(); @@ -17,11 +17,11 @@ s1 = startParallelShell(function() { }); // Query repeatedly. -s2 = startParallelShell(function() { +let s2 = startParallelShell(function() { t = db.jstests_slowNightly_explain1; for (var i = 0; i < 500; ++i) { try { - z = t.find({x: {$gt: 0}, y: 1}).explain(); + let z = t.find({x: {$gt: 0}, y: 1}).explain(); t.count({x: {$gt: 0}, y: 1}); } catch (e) { } @@ -29,7 +29,7 @@ s2 = startParallelShell(function() { }); // Put pressure on s2 to yield more often. -s3 = startParallelShell(function() { +let s3 = startParallelShell(function() { t = db.jstests_slowNightly_explain1; for (var i = 0; i < 200; ++i) { t.validate({scandata: true}); diff --git a/jstests/noPassthroughWithMongod/explain2.js b/jstests/noPassthroughWithMongod/explain2.js index 0720091e4a442..d3ab85f262cc4 100644 --- a/jstests/noPassthroughWithMongod/explain2.js +++ b/jstests/noPassthroughWithMongod/explain2.js @@ -1,18 +1,19 @@ // Test for race condition SERVER-2807. One cursor is dropped and another is not. // @tags: [requires_capped] -collName = 'jstests_slowNightly_explain2'; +let collName = 'jstests_slowNightly_explain2'; -t = db[collName]; +let t = db[collName]; t.drop(); db.createCollection(collName, {capped: true, size: 100000}); t = db[collName]; t.createIndex({x: 1}); -a = startParallelShell('for( i = 0; i < 50000; ++i ) { db.' + collName + '.insert( {x:i,y:1} ); }'); +let a = + startParallelShell('for( i = 0; i < 50000; ++i ) { db.' + collName + '.insert( {x:i,y:1} ); }'); -for (i = 0; i < 800; ++i) { +for (let i = 0; i < 800; ++i) { t.find({x: {$gt: -1}, y: 1}).sort({x: -1}).explain(); } diff --git a/jstests/noPassthroughWithMongod/explain3.js b/jstests/noPassthroughWithMongod/explain3.js index 6d35949d273d8..81e87c9e184da 100644 --- a/jstests/noPassthroughWithMongod/explain3.js +++ b/jstests/noPassthroughWithMongod/explain3.js @@ -1,10 +1,10 @@ // SERVER-2810 - similar to explain1 test, but with a scan and order find -t = db.jstests_slowNightly_explain3; +let t = db.jstests_slowNightly_explain3; t.drop(); // Periodically drops the collection, invalidating client cursors for s2's operations. -s1 = startParallelShell(function() { +let s1 = startParallelShell(function() { t = db.jstests_slowNightly_explain3; for (var i = 0; i < 80; ++i) { t.drop(); @@ -17,18 +17,18 @@ s1 = startParallelShell(function() { }); // Query repeatedly. -s2 = startParallelShell(function() { +let s2 = startParallelShell(function() { t = db.jstests_slowNightly_explain3; for (var i = 0; i < 500; ++i) { try { - z = t.find({x: {$gt: 0}, y: 1}).sort({x: 1}).explain(); + let z = t.find({x: {$gt: 0}, y: 1}).sort({x: 1}).explain(); } catch (e) { } } }); // Put pressure on s2 to yield more often. -s3 = startParallelShell(function() { +let s3 = startParallelShell(function() { t = db.jstests_slowNightly_explain3; for (var i = 0; i < 200; ++i) { t.validate({scandata: true}); diff --git a/jstests/noPassthroughWithMongod/external_sort_text_agg.js b/jstests/noPassthroughWithMongod/external_sort_text_agg.js index 583aff8601bd0..1d11dc7219704 100644 --- a/jstests/noPassthroughWithMongod/external_sort_text_agg.js +++ b/jstests/noPassthroughWithMongod/external_sort_text_agg.js @@ -2,7 +2,7 @@ var t = db.external_sort_text_agg; t.drop(); t.createIndex({text: "text"}); -for (i = 0; i < 100; i++) { +for (let i = 0; i < 100; i++) { t.insert({_id: i, text: Array(210000).join("asdf ")}); // string over 1MB to hit the 100MB threshold for external sort } diff --git a/jstests/noPassthroughWithMongod/findAndModify_upsert_no_conflicts.js b/jstests/noPassthroughWithMongod/findAndModify_upsert_no_conflicts.js index 4c9e83705583c..fdd202f506580 100644 --- a/jstests/noPassthroughWithMongod/findAndModify_upsert_no_conflicts.js +++ b/jstests/noPassthroughWithMongod/findAndModify_upsert_no_conflicts.js @@ -47,4 +47,4 @@ assert.eq(testDB[collName].find(updateDoc).toArray().length, 1); // Interrupt the sleep command. assert.commandWorked(testDB.getSiblingDB("admin").killOp(sleepID)); sleepCommand(); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthroughWithMongod/geo_axis_aligned.js b/jstests/noPassthroughWithMongod/geo_axis_aligned.js index 877954b0f2aa5..f7d92cd5ea71e 100644 --- a/jstests/noPassthroughWithMongod/geo_axis_aligned.js +++ b/jstests/noPassthroughWithMongod/geo_axis_aligned.js @@ -1,21 +1,21 @@ // Axis aligned circles - hard-to-find precision errors possible with exact distances here -t = db.axisaligned; +let t = db.axisaligned; t.drop(); -scale = [1, 10, 1000, 10000]; -bits = [2, 3, 4, 5, 6, 7, 8, 9]; -radius = [0.0001, 0.001, 0.01, 0.1]; -center = [[5, 52], [6, 53], [7, 54], [8, 55], [9, 56]]; +let scale = [1, 10, 1000, 10000]; +let bits = [2, 3, 4, 5, 6, 7, 8, 9]; +let radius = [0.0001, 0.001, 0.01, 0.1]; +let center = [[5, 52], [6, 53], [7, 54], [8, 55], [9, 56]]; -bound = []; +let bound = []; for (var j = 0; j < center.length; j++) bound.push([-180, 180]); // Scale all our values to test different sizes -radii = []; -centers = []; -bounds = []; +let radii = []; +let centers = []; +let bounds = []; for (var s = 0; s < scale.length; s++) { for (var i = 0; i < radius.length; i++) { @@ -70,13 +70,13 @@ for (var b = 0; b < bits.length; b++) { continue; print("DOING WITHIN QUERY "); - r = t.find({"loc": {"$within": {"$center": [center[j], radius[i]]}}}); + let r = t.find({"loc": {"$within": {"$center": [center[j], radius[i]]}}}); assert.eq(5, r.count()); // FIXME: surely code like this belongs in utils.js. - a = r.toArray(); - x = []; + let a = r.toArray(); + let x = []; for (k in a) x.push(a[k]["_id"]); x.sort(); diff --git a/jstests/noPassthroughWithMongod/geo_near_random2.js b/jstests/noPassthroughWithMongod/geo_near_random2.js index 2fafb7d4c80e4..81be89c1e103c 100644 --- a/jstests/noPassthroughWithMongod/geo_near_random2.js +++ b/jstests/noPassthroughWithMongod/geo_near_random2.js @@ -5,10 +5,7 @@ var test = new GeoNearRandomTest("nightly.geo_near_random2"); test.insertPts(10000); -opts = { - sphere: 0, - nToTest: test.nPts * 0.01 -}; +let opts = {sphere: 0, nToTest: test.nPts * 0.01}; test.testPt([0, 0], opts); test.testPt(test.mkPt(), opts); test.testPt(test.mkPt(), opts); diff --git a/jstests/noPassthroughWithMongod/geo_polygon.js b/jstests/noPassthroughWithMongod/geo_polygon.js index 28110591d64fa..2b67642daabbd 100644 --- a/jstests/noPassthroughWithMongod/geo_polygon.js +++ b/jstests/noPassthroughWithMongod/geo_polygon.js @@ -2,14 +2,14 @@ // @tags: [SERVER-40561] // -t = db.geo_polygon4; +let t = db.geo_polygon4; t.drop(); -num = 0; +let num = 0; var bulk = t.initializeUnorderedBulkOp(); -for (x = -180; x < 180; x += .5) { - for (y = -180; y < 180; y += .5) { - o = {_id: num++, loc: [x, y]}; +for (let x = -180; x < 180; x += .5) { + for (let y = -180; y < 180; y += .5) { + let o = {_id: num++, loc: [x, y]}; bulk.insert(o); } } diff --git a/jstests/noPassthroughWithMongod/group_pushdown.js b/jstests/noPassthroughWithMongod/group_pushdown.js index 060e2f47fb129..e8584aa2f7f9b 100644 --- a/jstests/noPassthroughWithMongod/group_pushdown.js +++ b/jstests/noPassthroughWithMongod/group_pushdown.js @@ -1,15 +1,12 @@ /** * Tests basic functionality of pushing $group into the find layer. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getAggPlanStage, getAggPlanStages} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE is not enabled"); - return; + quit(); } // Ensure group pushdown is enabled and capture the original value of @@ -748,4 +745,3 @@ assert.commandWorked(db.adminCommand( coll.aggregate([{$group: {_id: "$$REMOVE", o: {$first: "$non_existent_field"}}}]).toArray(), [{_id: null, o: null}]); })(); -})(); diff --git a/jstests/noPassthroughWithMongod/huge_multikey_index.js b/jstests/noPassthroughWithMongod/huge_multikey_index.js index f7b703f0d9edb..d4d3e03acd4d4 100644 --- a/jstests/noPassthroughWithMongod/huge_multikey_index.js +++ b/jstests/noPassthroughWithMongod/huge_multikey_index.js @@ -1,11 +1,11 @@ // https://jira.mongodb.org/browse/SERVER-4534 // Building an index in the foreground on a field with a large array and few documents in // the collection used to open too many files and crash the server. -t = db.huge_multikey_index; +let t = db.huge_multikey_index; t.drop(); function doit() { - arr = []; + let arr = []; for (var i = 0; i < 1000 * 1000; i++) arr.push(i); diff --git a/jstests/noPassthroughWithMongod/index_bounds_static_limit.js b/jstests/noPassthroughWithMongod/index_bounds_static_limit.js index 616ddcf2a9377..2b2a55ec06120 100644 --- a/jstests/noPassthroughWithMongod/index_bounds_static_limit.js +++ b/jstests/noPassthroughWithMongod/index_bounds_static_limit.js @@ -3,15 +3,12 @@ // // We issue 'setParameter' command which is not compatible with stepdowns. // @tags: [does_not_support_stepdowns] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For explain helpers. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getPlanStage, getPlanStages} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; if (!checkSBEEnabled(db)) { jsTest.log("Skipping test because SBE is not enabled"); - return; + quit(); } const coll = db.index_bounds_static_limit; @@ -69,4 +66,3 @@ try { } finally { setStaticLimit(staticLimit); } -})(); diff --git a/jstests/noPassthroughWithMongod/index_check10.js b/jstests/noPassthroughWithMongod/index_check10.js index f355ace6bb86f..4644517980bf4 100644 --- a/jstests/noPassthroughWithMongod/index_check10.js +++ b/jstests/noPassthroughWithMongod/index_check10.js @@ -3,7 +3,7 @@ Random.setRandomSeed(); -t = db.test_index_check10; +let t = db.test_index_check10; function doIt() { t.drop(); @@ -17,7 +17,7 @@ function doIt() { } var fields = ['a', 'b', 'c', 'd', 'e']; - n = Random.randInt(5) + 1; + let n = Random.randInt(5) + 1; var idx = sort(); var chars = "abcdefghijklmnopqrstuvwxyz"; @@ -32,7 +32,7 @@ function doIt() { function r() { var len = Random.randInt(700 / n); - buf = ""; + let buf = ""; for (var i = 0; i < len; ++i) { buf += chars.charAt(Random.randInt(chars.length)); } @@ -73,9 +73,9 @@ function doIt() { } } s = sort(); - c1 = t.find(spec, {_id: null}).sort(s).hint(idx).toArray(); + let c1 = t.find(spec, {_id: null}).sort(s).hint(idx).toArray(); try { - c3 = t.find(spec, {_id: null}).sort(s).hint({$natural: 1}).toArray(); + var c3 = t.find(spec, {_id: null}).sort(s).hint({$natural: 1}).toArray(); } catch (e) { // may assert if too much data for in memory sort print("retrying check..."); diff --git a/jstests/noPassthroughWithMongod/index_check9.js b/jstests/noPassthroughWithMongod/index_check9.js index 3e082cb1a3f98..6adad9b2b0909 100644 --- a/jstests/noPassthroughWithMongod/index_check9.js +++ b/jstests/noPassthroughWithMongod/index_check9.js @@ -2,7 +2,7 @@ Random.setRandomSeed(); -t = db.test_index_check9; +let t = db.test_index_check9; function doIt() { t.drop(); @@ -16,7 +16,7 @@ function doIt() { } var fields = ['a', 'b', 'c', 'd', 'e']; - n = Random.randInt(5) + 1; + let n = Random.randInt(5) + 1; var idx = sort(); var chars = "abcdefghijklmnopqrstuvwxyz"; @@ -40,7 +40,7 @@ function doIt() { return Random.randInt(10); } else { var len = Random.randInt(10); - buf = ""; + let buf = ""; for (var i = 0; i < len; ++i) { buf += chars.charAt(Random.randInt(chars.length)); } @@ -95,9 +95,9 @@ function doIt() { } } s = sort(); - c1 = t.find(spec, {_id: null}).sort(s).hint(idx).toArray(); - c2 = t.find(spec, {_id: null}).sort(s).hint({$natural: 1}).toArray(); - count = t.count(spec); + let c1 = t.find(spec, {_id: null}).sort(s).hint(idx).toArray(); + let c2 = t.find(spec, {_id: null}).sort(s).hint({$natural: 1}).toArray(); + let count = t.count(spec); assert.eq(c1, c2); assert.eq(c2.length, count); } diff --git a/jstests/noPassthroughWithMongod/indexbg_interrupts.js b/jstests/noPassthroughWithMongod/indexbg_interrupts.js index 9d9c9361f192a..d58a2b5bcbf56 100644 --- a/jstests/noPassthroughWithMongod/indexbg_interrupts.js +++ b/jstests/noPassthroughWithMongod/indexbg_interrupts.js @@ -75,7 +75,7 @@ for (var idx = 0; idx < dropAction.length; idx++) { assert.commandWorked(bulk.execute()); jsTest.log("Starting background indexing for test of: " + JSON.stringify(dc)); - primaryDB.getCollection(collection).createIndex({i: 1}, {background: true}); + primaryDB.getCollection(collection).createIndex({i: 1}); assert.eq(2, primaryDB.getCollection(collection).getIndexes().length); // Wait for the secondary to get the index entry diff --git a/jstests/noPassthroughWithMongod/logpath.js b/jstests/noPassthroughWithMongod/logpath.js index bb39282871f13..6c333724b957f 100644 --- a/jstests/noPassthroughWithMongod/logpath.js +++ b/jstests/noPassthroughWithMongod/logpath.js @@ -23,7 +23,7 @@ assert(mkdir(testdir)); var cleanupFiles = function() { var files = listFiles(logdir); - for (f in files) { + for (let f in files) { var name = files[f].name; // mostly here for safety @@ -38,7 +38,7 @@ var logCount = function(fpattern, prefix) { var pat = RegExp(fpattern + (prefix ? "" : "$")); var cnt = 0; - for (f in files) { + for (let f in files) { if (pat.test(files[f].name)) { cnt++; } diff --git a/jstests/noPassthroughWithMongod/lookup_match_pushdown.js b/jstests/noPassthroughWithMongod/lookup_match_pushdown.js index 9f9c163e98146..731c5e3bdbeea 100644 --- a/jstests/noPassthroughWithMongod/lookup_match_pushdown.js +++ b/jstests/noPassthroughWithMongod/lookup_match_pushdown.js @@ -1,11 +1,8 @@ /** * Tests that the $match stage is pushed before $lookup stage. */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For assertArrayEq. -load('jstests/libs/analyze_plan.js'); // For getWinningPlan(). +import {getWinningPlan} from "jstests/libs/analyze_plan.js"; const coll = db.lookup_match_pushdown; coll.drop(); @@ -76,5 +73,4 @@ const pipelineExprGt = [ {$unwind: "$a"}, {$match: {"a.z": 10, $expr: {$gt: ["$x", 5]}}} ]; -checkPipelineAndResults(pipelineExprGt, expectedPipeline, expectedResultsGt); -}()); +checkPipelineAndResults(pipelineExprGt, expectedPipeline, expectedResultsGt); \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/lookup_with_limit.js b/jstests/noPassthroughWithMongod/lookup_with_limit.js index ba5c3ae9529e9..08815adff0e16 100644 --- a/jstests/noPassthroughWithMongod/lookup_with_limit.js +++ b/jstests/noPassthroughWithMongod/lookup_with_limit.js @@ -1,15 +1,12 @@ /** * Tests that the $limit stage is pushed before $lookup stages, except when there is an $unwind. */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); // For getWinningPlan(). -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {flattenQueryPlanTree, getWinningPlan} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE $lookup is not enabled."); - return; + quit(); } const coll = db.lookup_with_limit; @@ -88,4 +85,3 @@ pipeline = [ ]; checkResults(pipeline, false, ["COLLSCAN", "EQ_LOOKUP", "$unwind", "$sort", "$limit"]); checkResults(pipeline, true, ["COLLSCAN", "$lookup", "$sort"]); -}()); diff --git a/jstests/noPassthroughWithMongod/ne_array_indexability.js b/jstests/noPassthroughWithMongod/ne_array_indexability.js index e632e5fc1b6c1..dbfe6a2d27563 100644 --- a/jstests/noPassthroughWithMongod/ne_array_indexability.js +++ b/jstests/noPassthroughWithMongod/ne_array_indexability.js @@ -1,9 +1,8 @@ /** * Test that $ne: [] queries are cached correctly. See SERVER-39764. */ -(function() { -load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const coll = db.ne_array_indexability; coll.drop(); @@ -48,4 +47,3 @@ runTest({'obj': {$ne: 'def'}}, {'obj': {$ne: [[1]]}}); assert.commandWorked(coll.runCommand('planCacheClear')); runTest({'obj': {$nin: ['abc', 'def']}}, {'obj': {$nin: [[1], 'abc']}}); -})(); diff --git a/jstests/noPassthroughWithMongod/no_balance_collection.js b/jstests/noPassthroughWithMongod/no_balance_collection.js index 7f47f8db95d76..d793bab6c1cab 100644 --- a/jstests/noPassthroughWithMongod/no_balance_collection.js +++ b/jstests/noPassthroughWithMongod/no_balance_collection.js @@ -1,11 +1,7 @@ // Tests whether the noBalance flag disables balancing for collections // @tags: [requires_sharding] -(function() { -"use strict"; - load("jstests/sharding/libs/find_chunks_util.js"); -load("jstests/libs/feature_flag_util.js"); const st = new ShardingTest({shards: 2, mongos: 1, other: {chunkSize: 1}}); @@ -96,5 +92,4 @@ if (lastMigration == null) { assert.eq(lastMigration.time, sh._lastMigration(collB).time); } -st.stop(); -}()); +st.stop(); \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/now_variable.js b/jstests/noPassthroughWithMongod/now_variable.js index c20558102fb9b..a220aa3218122 100644 --- a/jstests/noPassthroughWithMongod/now_variable.js +++ b/jstests/noPassthroughWithMongod/now_variable.js @@ -1,10 +1,7 @@ /** * Tests for the $$NOW and $$CLUSTER_TIME system variable. */ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; load("jstests/libs/sbe_assert_error_override.js"); // Override error-code-checking APIs. const coll = db[jsTest.name()]; @@ -194,4 +191,3 @@ assert.eq(0, futureColl.find({$expr: {$lt: ["$timeField", "$$NOW"]}}).itcount()) assert.soon(() => { return futureColl.find({$expr: {$lt: ["$timeField", "$$NOW"]}}).itcount() == 1; }, "$$NOW should catch up after 3 seconds"); -}()); diff --git a/jstests/noPassthroughWithMongod/or_clustered_collection_sbe_cache.js b/jstests/noPassthroughWithMongod/or_clustered_collection_sbe_cache.js new file mode 100644 index 0000000000000..32985d6006256 --- /dev/null +++ b/jstests/noPassthroughWithMongod/or_clustered_collection_sbe_cache.js @@ -0,0 +1,123 @@ +/** + * Verifies that $or queries on clustered collections that have plans with IXSCAN and + * CLUSTERED_IXSCAN stages does not use the SBE plan cache. + */ +load("jstests/libs/collection_drop_recreate.js"); // For assertDropCollection. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; + +const mongod = MongoRunner.runMongod(); +const dbName = "test"; +const db = mongod.getDB(dbName); +const coll = db.or_use_clustered_collection; +assertDropCollection(db, coll.getName()); + +// Create a clustered collection and create indexes. +assert.commandWorked( + db.createCollection(coll.getName(), {clusteredIndex: {key: {_id: 1}, unique: true}})); +assert.commandWorked(coll.createIndex({a: 1})); + +// Insert documents, and store them to be used later in the test. +const docs = []; +const numDocs = 10; +for (let i = 0; i < numDocs; i++) { + docs.push({a: i, _id: i, noIndex: i}); +} +assert.commandWorked(coll.insertMany(docs)); + +function assertCorrectResults({query, expectedDocIds}) { + let results = query.toArray(); + let expectedResults = []; + expectedDocIds.forEach(id => expectedResults.push(docs[id])); + assert.sameMembers(results, expectedResults); +} + +function validatePlanCacheEntries({increment, query, expectedDocIds}) { + const oldSize = coll.getPlanCache().list().length; + assertCorrectResults({query: query, expectedDocIds: expectedDocIds}); + const newSize = coll.getPlanCache().list().length; + assert.eq(oldSize + increment, + newSize, + "Expected " + tojson(increment) + + " new entries in the cache, but got: " + tojson(coll.getPlanCache().list())); +} + +coll.getPlanCache().clear(); +// Validate queries with a single equality clustered collection scan. +validatePlanCacheEntries( + {increment: 0, query: coll.find({$or: [{_id: 123}, {a: 12}]}), expectedDocIds: []}); +validatePlanCacheEntries( + {increment: 0, query: coll.find({$or: [{_id: 6}, {a: 5}]}), expectedDocIds: [5, 6]}); + +// Validate queries with multiple equality clustered collection scans. +validatePlanCacheEntries( + {increment: 0, query: coll.find({$or: [{_id: 100}, {_id: 123}, {a: 11}]}), expectedDocIds: []}); +validatePlanCacheEntries({ + increment: 0, + query: coll.find({$or: [{_id: 9}, {_id: 5}, {a: 4}]}), + expectedDocIds: [4, 5, 9] +}); + +// Validate queries with multiple range clustered collection scans. +validatePlanCacheEntries({ + increment: 0, + query: coll.find({$or: [{_id: {$lt: -1}}, {_id: {$gt: 10}}, {a: 12}]}), + expectedDocIds: [] +}); +validatePlanCacheEntries({ + increment: 0, + query: coll.find({$or: [{_id: {$lt: 1}}, {_id: {$gt: 8}}, {a: 4}]}), + expectedDocIds: [0, 4, 9] +}); + +// Validate queries with both range and equality clustered collection scans. +validatePlanCacheEntries({ + increment: 0, + query: coll.find({$or: [{_id: {$lt: -1}}, {_id: 11}, {a: 12}]}), + expectedDocIds: [] +}); +validatePlanCacheEntries({ + increment: 0, + query: coll.find({$or: [{_id: {$lt: 2}}, {_id: 8}, {a: 4}]}), + expectedDocIds: [0, 1, 4, 8] +}); + +// Validate queries with 'max' and 'min' set have the correct results. These plans fall back to +// collection scans by the query planner for clustered collections. +validatePlanCacheEntries({ + increment: 0, + query: coll.find({$or: [{_id: 123}, {a: 12}]}).max({_id: 4}).hint({_id: 1}), + expectedDocIds: [] +}); +validatePlanCacheEntries({ + increment: 0, + query: coll.find({$or: [{_id: 6}, {a: 5}]}).max({_id: 6}).hint({_id: 1}), + expectedDocIds: [5] +}); + +validatePlanCacheEntries({ + increment: 0, + query: coll.find({$or: [{_id: 8}, {a: 5}]}).min({_id: 6}).hint({_id: 1}), + expectedDocIds: [8] +}); +validatePlanCacheEntries({ + increment: 0, + query: coll.find({$or: [{_id: 123}, {a: 12}]}).min({_id: 4}).hint({_id: 1}), + expectedDocIds: [] +}); + +// Validate queries that just use a collection scan still get cached. We are checking the SBE cache, +// and don't expect it to increment for classic. +const incrementCache = checkSBEEnabled(db) ? 1 : 0; +validatePlanCacheEntries({ + increment: incrementCache, + query: coll.find({_id: {$gte: 4}}), + expectedDocIds: [4, 5, 6, 7, 8, 9] +}); + +validatePlanCacheEntries({ + increment: incrementCache, + query: coll.find({$and: [{_id: {$gte: 4}}, {noIndex: 6}]}), + expectedDocIds: [6] +}); + +MongoRunner.stopMongod(mongod); \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js b/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js index 4406843eb8396..ebcbecca06015 100644 --- a/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js +++ b/jstests/noPassthroughWithMongod/plan_cache_not_in_regex.js @@ -2,10 +2,7 @@ * Tests that a $not-$in-$regex query, which cannot be supported by an index, cannot incorrectly * hijack the cached plan for an earlier $not-$in query. */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); // For isCollScan and getPlanCacheKeyFromShape. +import {getPlanCacheKeyFromShape, getWinningPlan, isCollscan} from "jstests/libs/analyze_plan.js"; const coll = db.plan_cache_not_in_regex; coll.drop(); @@ -53,5 +50,4 @@ for (let [proj, sort] of [[{}, {}], [{_id: 0, a: 1}, {}], [{_id: 0, a: 1}, {a: 1 // Flush the plan cache before the next iteration. coll.getPlanCache().clear(); -} -})(); +} \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/plan_cache_replanning.js b/jstests/noPassthroughWithMongod/plan_cache_replanning.js index d293413f0fc8b..430cdb89c7968 100644 --- a/jstests/noPassthroughWithMongod/plan_cache_replanning.js +++ b/jstests/noPassthroughWithMongod/plan_cache_replanning.js @@ -3,12 +3,9 @@ * oscillates. It achieves this by creating two indexes, A and B, on a collection, and interleaving * queries which are "ideal" for index A with queries that are "ideal" for index B. */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); // For getPlanStage(). +import {getCachedPlan, getPlanCacheKeyFromShape, getPlanStage} from "jstests/libs/analyze_plan.js"; load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection. -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const isSbeEnabled = checkSBEEnabled(db); @@ -245,4 +242,3 @@ coll = assertDropAndRecreateCollection(db, "plan_cache_replanning"); entryAfterRunningSpecialQuery); } } -})(); diff --git a/jstests/noPassthroughWithMongod/plan_selection_no_results.js b/jstests/noPassthroughWithMongod/plan_selection_no_results.js index 6e9b0f56de61e..f21354285db92 100644 --- a/jstests/noPassthroughWithMongod/plan_selection_no_results.js +++ b/jstests/noPassthroughWithMongod/plan_selection_no_results.js @@ -4,9 +4,7 @@ * The plan which is able to most cheaply determine that there are no results should be selected as * the winner. */ -(function() { -"use strict"; -load("jstests/libs/analyze_plan.js"); +import {getPlanStage} from "jstests/libs/analyze_plan.js"; const coll = db.plan_selection_no_results; coll.drop(); @@ -39,5 +37,4 @@ assert.eq(ixScan.keyPattern, {y: 1}, explain); // Check that there's two rejected plans (one IX intersect plan and one plan which scans the // {x: 1} index). -assert.eq(explain.queryPlanner.rejectedPlans.length, 2, explain); -})(); +assert.eq(explain.queryPlanner.rejectedPlans.length, 2, explain); \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/projection_optimizations.js b/jstests/noPassthroughWithMongod/projection_optimizations.js index 8658d6f226043..831f5a4be64d4 100644 --- a/jstests/noPassthroughWithMongod/projection_optimizations.js +++ b/jstests/noPassthroughWithMongod/projection_optimizations.js @@ -1,11 +1,8 @@ /** * Test projections with $and in cases where optimizations could be performed. */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); -load('jstests/libs/analyze_plan.js'); +import {getWinningPlan, isIndexOnly, isCollscan} from "jstests/libs/analyze_plan.js"; const coll = db.projection_and; coll.drop(); @@ -25,9 +22,9 @@ let result = runFindWithProjection({ expected: [{a: 1, b: false}] }); // Query should be optimized and covered. -assert(isIndexOnly(db, getWinningPlan(result.explain().queryPlanner))); +const winningPlan = getWinningPlan(result.explain().queryPlanner); +assert(isIndexOnly(db, winningPlan), winningPlan); result = runFindWithProjection( {projection: {a: {$and: ['$a', true, 1]}}, expected: [{_id: 0, a: true}]}); -assert(isCollscan(db, getWinningPlan(result.explain().queryPlanner))); -})(); +assert(isCollscan(db, getWinningPlan(result.explain().queryPlanner))); \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/query_stats_configuration.js b/jstests/noPassthroughWithMongod/query_stats_configuration.js new file mode 100644 index 0000000000000..bc5ee4647ce76 --- /dev/null +++ b/jstests/noPassthroughWithMongod/query_stats_configuration.js @@ -0,0 +1,29 @@ +/** + * Tests that the telemetry store can be resized if it is configured, and cannot be resized if it is + * disabled. + */ +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + +if (FeatureFlagUtil.isEnabled(db, "QueryStats")) { + function testTelemetrySetting(paramName, paramValue) { + // The feature flag is enabled - make sure the telemetry store can be configured. + const original = assert.commandWorked(db.adminCommand({getParameter: 1, [paramName]: 1})); + assert(original.hasOwnProperty(paramName), original); + const originalValue = original[paramName]; + try { + assert.doesNotThrow(() => db.adminCommand({setParameter: 1, [paramName]: paramValue})); + // Other tests verify that changing the parameter actually affects the behavior. + } finally { + assert.doesNotThrow(() => + db.adminCommand({setParameter: 1, [paramName]: originalValue})); + } + } + testTelemetrySetting("internalQueryStatsCacheSize", "2MB"); + testTelemetrySetting("internalQueryStatsRateLimit", 2147483647); +} else { + // The feature flag is disabled - make sure the telemetry store *cannot* be configured. + assert.commandFailedWithCode( + db.adminCommand({setParameter: 1, internalQueryStatsCacheSize: '2MB'}), 7373500); + assert.commandFailedWithCode( + db.adminCommand({setParameter: 1, internalQueryStatsRateLimit: 2147483647}), 7506200); +} \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/randomized_mixed_type_bug.js b/jstests/noPassthroughWithMongod/randomized_mixed_type_bug.js index 9804c41cea4e6..19aceb64ddd86 100644 --- a/jstests/noPassthroughWithMongod/randomized_mixed_type_bug.js +++ b/jstests/noPassthroughWithMongod/randomized_mixed_type_bug.js @@ -2,12 +2,17 @@ * Tests that randomly generated documents can be queried from timeseries collections in the same * manner as a tradional collection. */ -(function() { -"use strict"; +import {checkCascadesOptimizerEnabled} from "jstests/libs/optimizer_utils.js"; load('jstests/third_party/fast_check/fc-3.1.0.js'); // For fast-check (fc). -const scalars = [fc.string(), fc.double(), fc.boolean(), fc.date(), fc.constant(null)]; +// TODO SERVER-67506: Re-enable this test when a decision is made about how Bonsai will handle +// comparison to null. Other semantic difference tickets are also relevant here. +let scalars = [fc.string(), fc.double(), fc.boolean(), fc.date()]; +if (!checkCascadesOptimizerEnabled(db)) { + scalars.push(fc.constant(null)); +} + const pathComponents = fc.constant("a", "b"); // Define our grammar for documents. let documentModel = fc.letrec( @@ -61,7 +66,7 @@ let testMixedTypeQuerying = () => { // Query on pathArray w/ {[compare]: val} on test and control. // Compare the results. try { - assert.docEq( + assert.sameMembers( // Isn't timeseries. db.control.find({[path]: {[compare]: val}}, {_id: 0}).toArray(), // Is timeseries. @@ -75,5 +80,4 @@ let testMixedTypeQuerying = () => { })); }; // testMixedTypeQuerying -testMixedTypeQuerying(); -})(); +testMixedTypeQuerying(); \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/reconfigwt.js b/jstests/noPassthroughWithMongod/reconfigwt.js index ed1070b7e84c9..29a668a56f55b 100644 --- a/jstests/noPassthroughWithMongod/reconfigwt.js +++ b/jstests/noPassthroughWithMongod/reconfigwt.js @@ -14,7 +14,7 @@ if (ss.storageEngine.name !== "wiredTiger") { var admin = conn.getDB("admin"); function reconfigure(str) { - ret = admin.runCommand({setParameter: 1, "wiredTigerEngineRuntimeConfig": str}); + let ret = admin.runCommand({setParameter: 1, "wiredTigerEngineRuntimeConfig": str}); print("ret: " + tojson(ret)); return ret; } diff --git a/jstests/noPassthroughWithMongod/recstore.js b/jstests/noPassthroughWithMongod/recstore.js index cae767b063790..9b4e0ee1e6a61 100644 --- a/jstests/noPassthroughWithMongod/recstore.js +++ b/jstests/noPassthroughWithMongod/recstore.js @@ -3,7 +3,7 @@ // it is probably redundant with other tests but is a convenient starting point // for testing such things. -t = db.storetest; +let t = db.storetest; t.drop(); diff --git a/jstests/noPassthroughWithMongod/reindex_duplicate_keys.js b/jstests/noPassthroughWithMongod/reindex_duplicate_keys.js index aa4a363a72a91..58370576a14c2 100644 --- a/jstests/noPassthroughWithMongod/reindex_duplicate_keys.js +++ b/jstests/noPassthroughWithMongod/reindex_duplicate_keys.js @@ -43,4 +43,4 @@ let runTest = function(doc) { runTest(); runTest({a: 1}); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthroughWithMongod/remove9.js b/jstests/noPassthroughWithMongod/remove9.js index ba5fd2207951e..87492ce930341 100644 --- a/jstests/noPassthroughWithMongod/remove9.js +++ b/jstests/noPassthroughWithMongod/remove9.js @@ -1,9 +1,9 @@ -t = db.jstests_remove9; +let t = db.jstests_remove9; t.drop(); -js = +let js = "while( 1 ) { for( i = 0; i < 10000; ++i ) { db.jstests_remove9.save( {i:i} ); } db.jstests_remove9.remove( {i: {$gte:0} } ); }"; -pid = startMongoProgramNoConnect("mongo", "--eval", js, db ? db.getMongo().host : null); +let pid = startMongoProgramNoConnect("mongo", "--eval", js, db ? db.getMongo().host : null); Random.setRandomSeed(); for (var i = 0; i < 10000; ++i) { diff --git a/jstests/noPassthroughWithMongod/replReads.js b/jstests/noPassthroughWithMongod/replReads.js index 878e25d094059..d093d6551e58b 100644 --- a/jstests/noPassthroughWithMongod/replReads.js +++ b/jstests/noPassthroughWithMongod/replReads.js @@ -12,8 +12,8 @@ function testReadLoadBalancing(numReplicas) { s.getDB("test").foo.insert({a: 123}); - primary = s.rs0.getPrimary(); - secondaries = s.rs0.getSecondaries(); + let primary = s.rs0.getPrimary(); + let secondaries = s.rs0.getSecondaries(); function rsStats() { return s.getDB("admin").runCommand("connPoolStats")["replicaSets"][s.rs0.name]; @@ -51,7 +51,7 @@ function testReadLoadBalancing(numReplicas) { var connections = []; for (var i = 0; i < secondaries.length * 10; i++) { - conn = new Mongo(s._mongos[0].host); + let conn = new Mongo(s._mongos[0].host); conn.setSecondaryOk(); conn.getDB('test').foo.findOne(); connections.push(conn); @@ -70,7 +70,7 @@ function testReadLoadBalancing(numReplicas) { db = primary.getDB("test"); printjson(rs.status()); - c = rs.conf(); + let c = rs.conf(); print("config before: " + tojson(c)); for (i = 0; i < c.members.length; i++) { if (c.members[i].host == db.runCommand("hello").primary) @@ -102,7 +102,7 @@ function testReadLoadBalancing(numReplicas) { secondaries = s.rs0.getSecondaries(); for (var i = 0; i < secondaries.length * 10; i++) { - conn = new Mongo(s._mongos[0].host); + let conn = new Mongo(s._mongos[0].host); conn.setSecondaryOk(); conn.getDB('test').foo.findOne(); connections.push(conn); diff --git a/jstests/noPassthroughWithMongod/sbe_agg_pushdown.js b/jstests/noPassthroughWithMongod/sbe_agg_pushdown.js index 78f998ad4f786..78c238630c8a4 100644 --- a/jstests/noPassthroughWithMongod/sbe_agg_pushdown.js +++ b/jstests/noPassthroughWithMongod/sbe_agg_pushdown.js @@ -2,10 +2,7 @@ // the pushed down query with SBE. // TODO: Remove this file when all agg expressions are supported by SBE. -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; // Storing the expression we assume is unsupported as a constant, so we can easily change it when we // implement $toBool in SBE. @@ -15,7 +12,7 @@ const kUnsupportedExpression = { if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE is not enabled"); - return; + quit(); } const coll = db.jstests_sbe_pushdown; @@ -58,5 +55,4 @@ assertPushdownQueryExecMode([{$match: {a: 2}}, {$project: {_id: 0, c: {kUnsuppor // Test query with fully supported expressions are executed with SBE when pushed down. assertPushdownQueryExecMode( - [{$match: {$expr: {$eq: ["$b", {$dateFromParts: {year: 2021, month: 4, day: 28}}]}}}], "2"); -}()); + [{$match: {$expr: {$eq: ["$b", {$dateFromParts: {year: 2021, month: 4, day: 28}}]}}}], "2"); \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/sbe_index_count_scan_cache.js b/jstests/noPassthroughWithMongod/sbe_index_count_scan_cache.js new file mode 100644 index 0000000000000..2e56872988521 --- /dev/null +++ b/jstests/noPassthroughWithMongod/sbe_index_count_scan_cache.js @@ -0,0 +1,69 @@ +/** + * Tests the SBE plan cache for COUNT SCAN queries. + */ +import {getPlanStages, getWinningPlan} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; + +const testDb = db.getSiblingDB(jsTestName()); +// This test is specifically verifying the behavior of the SBE plan cache. +if (!checkSBEEnabled(testDb)) { + jsTestLog("Skipping test because SBE is not enabled"); + quit(); +} + +assert.commandWorked(testDb.dropDatabase()); + +const coll = testDb.coll; + +assert.commandWorked(coll.insert([ + {a: 1}, + {a: 1, b: 1}, + {a: null, b: 2}, + {b: 4}, + {a: {b: 4}}, + {a: [], b: 2}, + {a: [[], 3]}, + {a: {}}, +])); + +function assertCountScan(pipeline) { + const explain = coll.explain().aggregate(pipeline); + const queryPlan = getWinningPlan(explain.stages[0].$cursor.queryPlanner); + const countScan = getPlanStages(queryPlan, "COUNT_SCAN"); + assert.neq([], countScan, explain); +} + +function runTest({index, query, expectedCount, updatedQuery, updatedCount}) { + assert.commandWorked(coll.createIndex(index)); + coll.getPlanCache().clear(); + assert.eq(0, coll.getPlanCache().list().length); + const oldHits = testDb.serverStatus().metrics.query.planCache.sbe.hits; + + const pipeline = [{$match: query}, {$count: "count"}]; + assertCountScan(pipeline); + + assert.eq(expectedCount, coll.aggregate(pipeline).toArray()[0].count); + assert.eq(expectedCount, coll.aggregate(pipeline).toArray()[0].count); + // Verify that the cache has 1 entry, and has been hit for one time. + assert.eq(1, coll.getPlanCache().list().length); + assert.eq(testDb.serverStatus().metrics.query.planCache.sbe.hits, oldHits + 1); + // Run again with a different value to test the parameterization. + pipeline[0].$match = updatedQuery; + assert.eq(updatedCount, coll.aggregate(pipeline).toArray()[0].count); + // Cache not get updated. + assert.eq(1, coll.getPlanCache().list().length); + // Hits stats is incremented. + assert.eq(testDb.serverStatus().metrics.query.planCache.sbe.hits, oldHits + 2); + + assert.commandWorked(coll.dropIndex(index)); +} + +runTest({index: {a: 1}, query: {a: 1}, expectedCount: 2, updatedQuery: {a: 3}, updatedCount: 1}); +// Test for multiKey and null case. +runTest({ + index: {a: 1, b: 1, _id: 1}, + query: {a: {$in: [null, []]}, b: 2}, + expectedCount: 2, + updatedQuery: {a: {$in: [null, []]}, b: 4}, + updatedCount: 1 +}); diff --git a/jstests/noPassthroughWithMongod/sbe_query_eligibility.js b/jstests/noPassthroughWithMongod/sbe_query_eligibility.js index e229c8269a2e2..4e2d627bf3935 100644 --- a/jstests/noPassthroughWithMongod/sbe_query_eligibility.js +++ b/jstests/noPassthroughWithMongod/sbe_query_eligibility.js @@ -1,11 +1,7 @@ /** * Test that verifies which query shapes which are eligible for SBE. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); -load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; /** * Utility which asserts that when running the given 'query' over 'collection', explain's reported @@ -28,7 +24,7 @@ function assertEngineUsed(collection, query, isSBE) { if (!checkSBEEnabled(db)) { jsTestLog("Skipping test because SBE is disabled"); - return; + quit(); } const collName = "sbe_eligiblity"; @@ -216,5 +212,4 @@ const fallbackToClassicCases = [ for (const query of fallbackToClassicCases) { assertEngineUsed(coll, query, false /* isSBE */); -} -})(); +} \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/tcmalloc.js b/jstests/noPassthroughWithMongod/tcmalloc.js index e69850fc8e0d6..81b8aaec18676 100644 --- a/jstests/noPassthroughWithMongod/tcmalloc.js +++ b/jstests/noPassthroughWithMongod/tcmalloc.js @@ -20,4 +20,4 @@ if (hasTcSetParameter()) { assert.commandFailed(db.adminCommand({setParameter: 1, tcmallocReleaseRate: -1.0})); assert.commandFailed(db.adminCommand({setParameter: 1, tcmallocReleaseRate: "foo"})); } -}()); \ No newline at end of file +}()); diff --git a/jstests/noPassthroughWithMongod/telemetry_configuration.js b/jstests/noPassthroughWithMongod/telemetry_configuration.js deleted file mode 100644 index 0ae4e8408c34c..0000000000000 --- a/jstests/noPassthroughWithMongod/telemetry_configuration.js +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Tests that the telemetry store can be resized if it is configured, and cannot be resized if it is - * disabled. - */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); - -if (FeatureFlagUtil.isEnabled(db, "Telemetry")) { - function testTelemetrySetting(paramName, paramValue) { - // The feature flag is enabled - make sure the telemetry store can be configured. - const original = assert.commandWorked(db.adminCommand({getParameter: 1, [paramName]: 1})); - assert(original.hasOwnProperty(paramName), original); - const originalValue = original[paramName]; - try { - assert.doesNotThrow(() => db.adminCommand({setParameter: 1, [paramName]: paramValue})); - // Other tests verify that changing the parameter actually affects the behavior. - } finally { - assert.doesNotThrow(() => - db.adminCommand({setParameter: 1, [paramName]: originalValue})); - } - } - testTelemetrySetting("internalQueryConfigureTelemetryCacheSize", "2MB"); - testTelemetrySetting("internalQueryConfigureTelemetrySamplingRate", 2147483647); -} else { - // The feature flag is disabled - make sure the telemetry store *cannot* be configured. - assert.commandFailedWithCode( - db.adminCommand({setParameter: 1, internalQueryConfigureTelemetryCacheSize: '2MB'}), - 7373500); - assert.commandFailedWithCode( - db.adminCommand({setParameter: 1, internalQueryConfigureTelemetrySamplingRate: 2147483647}), - 7506200); -} -}()); diff --git a/jstests/noPassthroughWithMongod/temp_namespace.js b/jstests/noPassthroughWithMongod/temp_namespace.js index 1f4438d5b967f..249cf4082b231 100644 --- a/jstests/noPassthroughWithMongod/temp_namespace.js +++ b/jstests/noPassthroughWithMongod/temp_namespace.js @@ -3,10 +3,10 @@ // This test requires persistence beacuase it assumes data will survive a restart. // @tags: [requires_persistence, requires_replication] -testname = 'temp_namespace_sw'; +let testname = 'temp_namespace_sw'; var conn = MongoRunner.runMongod(); -d = conn.getDB('test'); +let d = conn.getDB('test'); assert.commandWorked(d.runCommand({ applyOps: [{op: "c", ns: d.getName() + ".$cmd", o: {create: testname + 'temp1', temp: true}}] })); diff --git a/jstests/noPassthroughWithMongod/testing_only_commands.js b/jstests/noPassthroughWithMongod/testing_only_commands.js index 07121f76770e4..fcadd4640eb15 100644 --- a/jstests/noPassthroughWithMongod/testing_only_commands.js +++ b/jstests/noPassthroughWithMongod/testing_only_commands.js @@ -49,7 +49,7 @@ const isBoundedSortEnabled = function(conn) { TestData.enableTestCommands = false; var conn = MongoRunner.runMongod({}); -for (i in testOnlyCommands) { +for (let i in testOnlyCommands) { assertCmdNotFound(conn.getDB('test'), testOnlyCommands[i]); } assert.eq(isBoundedSortEnabled(conn), false); @@ -59,7 +59,7 @@ MongoRunner.stopMongod(conn); TestData.enableTestCommands = true; var conn = MongoRunner.runMongod({}); -for (i in testOnlyCommands) { +for (let i in testOnlyCommands) { assertCmdFound(conn.getDB('test'), testOnlyCommands[i]); } assert.eq(isBoundedSortEnabled(conn), true); diff --git a/jstests/noPassthroughWithMongod/timeseries_system_views_drop.js b/jstests/noPassthroughWithMongod/timeseries_system_views_drop.js index 8adf9ad391cc4..b67e2eb9ae541 100644 --- a/jstests/noPassthroughWithMongod/timeseries_system_views_drop.js +++ b/jstests/noPassthroughWithMongod/timeseries_system_views_drop.js @@ -8,10 +8,7 @@ * requires_getmore, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; const testDB = db.getSiblingDB("timeseries_system_views_drop"); @@ -38,5 +35,4 @@ TimeseriesTest.run((insert) => { assert.commandWorked(testDB.createView("myView", coll.getName(), [])); assert(testDB.system.views.drop()); -}); -})(); +}); \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/ttl1.js b/jstests/noPassthroughWithMongod/ttl1.js index 94738104d73f3..a049ba3af8fec 100644 --- a/jstests/noPassthroughWithMongod/ttl1.js +++ b/jstests/noPassthroughWithMongod/ttl1.js @@ -15,7 +15,7 @@ t.drop(); t.runCommand("create", {flags: 0}); var now = (new Date()).getTime(); -for (i = 0; i < 24; i++) { +for (let i = 0; i < 24; i++) { var past = new Date(now - (3600 * 1000 * i)); t.insert({x: past, y: past, z: past}); } diff --git a/jstests/noPassthroughWithMongod/ttl_index_capped_collection_fails.js b/jstests/noPassthroughWithMongod/ttl_index_capped_collection_fails.js index 18ce357341fd1..7f25bad57a482 100644 --- a/jstests/noPassthroughWithMongod/ttl_index_capped_collection_fails.js +++ b/jstests/noPassthroughWithMongod/ttl_index_capped_collection_fails.js @@ -19,4 +19,4 @@ assert.commandWorked(db.createCollection(cappedColl.getName(), {capped: true, si assert.commandWorked(cappedColl.createIndex({foo: 1})); assert.commandFailedWithCode(cappedColl.createIndex({bar: 1}, {expireAfterSeconds: 10}), ErrorCodes.CannotCreateIndex); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthroughWithMongod/ttl_repl.js b/jstests/noPassthroughWithMongod/ttl_repl.js index 8d9121fd8a241..331fe09de5a5c 100644 --- a/jstests/noPassthroughWithMongod/ttl_repl.js +++ b/jstests/noPassthroughWithMongod/ttl_repl.js @@ -30,9 +30,9 @@ primarycol.drop(); primarydb.createCollection(primarycol.getName()); // create new collection. insert 24 docs, aged at one-hour intervalss -now = (new Date()).getTime(); +let now = (new Date()).getTime(); var bulk = primarycol.initializeUnorderedBulkOp(); -for (i = 0; i < 24; i++) { +for (let i = 0; i < 24; i++) { bulk.insert({x: new Date(now - (3600 * 1000 * i))}); } assert.commandWorked(bulk.execute()); diff --git a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js index 23bb9367452b3..79fcaf4befaed 100644 --- a/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js +++ b/jstests/noPassthroughWithMongod/ttl_repl_maintenance.js @@ -22,7 +22,7 @@ var primeSystemReplset = function() { var restartWithConfig = function() { MongoRunner.stopMongod(conn); conn = MongoRunner.runMongod({restart: true, cleanData: false, dbpath: conn.dbpath}); - testDB = conn.getDB("test"); + let testDB = conn.getDB("test"); var n = 100; for (var i = 0; i < n; i++) { testDB.foo.insert({x: new Date()}); diff --git a/jstests/noPassthroughWithMongod/ttl_sharded.js b/jstests/noPassthroughWithMongod/ttl_sharded.js index 61a97a8ce00d3..93c7df8ad2e41 100644 --- a/jstests/noPassthroughWithMongod/ttl_sharded.js +++ b/jstests/noPassthroughWithMongod/ttl_sharded.js @@ -14,7 +14,7 @@ var s = new ShardingTest({shards: 2, mongos: 1}); var dbname = "testDB"; var coll = "ttl_sharded"; var ns = dbname + "." + coll; -t = s.getDB(dbname).getCollection(coll); +let t = s.getDB(dbname).getCollection(coll); // enable sharding of the collection. Only 1 chunk initially s.adminCommand({enablesharding: dbname}); diff --git a/jstests/noPassthroughWithMongod/validate_bson_types.js b/jstests/noPassthroughWithMongod/validate_bson_types.js index ffcdd80306d59..82894b66ea04d 100644 --- a/jstests/noPassthroughWithMongod/validate_bson_types.js +++ b/jstests/noPassthroughWithMongod/validate_bson_types.js @@ -30,4 +30,4 @@ assert.commandWorked(coll.insert({s: MinKey()})); assert.commandWorked(coll.insert({t: MaxKey()})); // MaxKey assert.commandWorked(coll.validate({checkBSONConformance: true})); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthroughWithMongod/validate_command.js b/jstests/noPassthroughWithMongod/validate_command.js index 3ef6a1d79e454..e01b087357be9 100644 --- a/jstests/noPassthroughWithMongod/validate_command.js +++ b/jstests/noPassthroughWithMongod/validate_command.js @@ -20,7 +20,7 @@ function testValidate(output) { // Test to confirm that validate is working as expected. // SETUP DATA -t = db.jstests_validate; +let t = db.jstests_validate; t.drop(); for (var i = 0; i < count; i++) { diff --git a/jstests/noPassthroughWithMongod/validate_timeseries_count.js b/jstests/noPassthroughWithMongod/validate_timeseries_count.js index ed965ac9e9d56..9882ec5879912 100644 --- a/jstests/noPassthroughWithMongod/validate_timeseries_count.js +++ b/jstests/noPassthroughWithMongod/validate_timeseries_count.js @@ -7,8 +7,8 @@ * ] */ -(function() { -"use strict"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + let testCount = 0; const collNamePrefix = "validate_timeseries_count"; const bucketNamePrefix = "system.buckets.validate_timeseries_count"; @@ -42,24 +42,28 @@ assert.eq(res.nNonCompliantDocuments, 0); assert.eq(res.warnings.length, 0); // Manually changes the control.count of a version-2 (closed) bucket, expects warnings. -jsTestLog("Manually changing the 'control.count' of a version-2 bucket."); -testCount += 1; -collName = collNamePrefix + testCount; -bucketName = bucketNamePrefix + testCount; -db.getCollection(collName).drop(); -assert.commandWorked(db.createCollection( - collName, {timeseries: {timeField: "timestamp", metaField: "metadata", granularity: "hours"}})); -coll = db.getCollection(collName); -bucket = db.getCollection(bucketName); -coll.insertMany([...Array(1002).keys()].map(i => ({ - "metadata": {"sensorId": 2, "type": "temperature"}, - "timestamp": ISODate(), - "temp": i - })), - {ordered: false}); -bucket.updateOne({"meta.sensorId": 2, 'control.version': 2}, {"$set": {"control.count": 10}}); -res = bucket.validate(); -assert(res.valid, tojson(res)); -assert.eq(res.nNonCompliantDocuments, 1); -assert.eq(res.warnings.length, 1); -})(); +if (!FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) { + // TODO SERVER-77454: Investigate re-enabling this. + jsTestLog("Manually changing the 'control.count' of a version-2 bucket."); + testCount += 1; + collName = collNamePrefix + testCount; + bucketName = bucketNamePrefix + testCount; + db.getCollection(collName).drop(); + assert.commandWorked(db.createCollection( + collName, + {timeseries: {timeField: "timestamp", metaField: "metadata", granularity: "hours"}})); + coll = db.getCollection(collName); + bucket = db.getCollection(bucketName); + coll.insertMany( + [...Array(1002).keys()].map(i => ({ + "metadata": {"sensorId": 2, "type": "temperature"}, + "timestamp": ISODate(), + "temp": i + })), + {ordered: false}); + bucket.updateOne({"meta.sensorId": 2, 'control.version': 2}, {"$set": {"control.count": 10}}); + res = bucket.validate(); + assert(res.valid, tojson(res)); + assert.eq(res.nNonCompliantDocuments, 1); + assert.eq(res.warnings.length, 1); +} \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/validate_timeseries_data_indexes.js b/jstests/noPassthroughWithMongod/validate_timeseries_data_indexes.js index 1136b8dcc5f9c..d53f28218e668 100644 --- a/jstests/noPassthroughWithMongod/validate_timeseries_data_indexes.js +++ b/jstests/noPassthroughWithMongod/validate_timeseries_data_indexes.js @@ -114,4 +114,4 @@ res = assert.commandWorked(coll.validate()); assert(res.valid, tojson(res)); assert(res.warnings.length == 1, tojson(res)); assert(res.nNonCompliantDocuments == 1, tojson(res)); -})(); \ No newline at end of file +})(); diff --git a/jstests/noPassthroughWithMongod/validate_timeseries_minmax.js b/jstests/noPassthroughWithMongod/validate_timeseries_minmax.js index d2587e5ea49cb..c7cf0dd85b62b 100644 --- a/jstests/noPassthroughWithMongod/validate_timeseries_minmax.js +++ b/jstests/noPassthroughWithMongod/validate_timeseries_minmax.js @@ -5,8 +5,7 @@ * @tags: [requires_fcv_62] */ -(function() { -"use strict"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const collPrefix = "validate_timeseries_minmax"; const bucketPrefix = "system.buckets.validate_timeseries_minmax"; @@ -257,35 +256,38 @@ assert(res.warnings.length == 1, tojson(res)); assert(res.nNonCompliantDocuments == 1, tojson(res)); // Tests collections with 'control.version' : 2. -jsTestLog("Running validate on a version 2 bucket with incorrect 'max' object field."); -setUpCollection(lotsOfData); -coll = db.getCollection(collName); -bucket = db.getCollection(bucketName); -bucket.updateOne({"meta.sensorId": 2, "control.version": 2}, {"$set": {"control.max.temp": 800}}); -res = bucket.validate(); -assert(res.valid, tojson(res)); -assert.eq(res.nNonCompliantDocuments, 1); -assert.eq(res.warnings.length, 1); +if (!FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) { + // TODO SERVER-77454: Investigate re-enabling this. + jsTestLog("Running validate on a version 2 bucket with incorrect 'max' object field."); + setUpCollection(lotsOfData); + coll = db.getCollection(collName); + bucket = db.getCollection(bucketName); + bucket.updateOne({"meta.sensorId": 2, "control.version": 2}, + {"$set": {"control.max.temp": 800}}); + res = bucket.validate(); + assert(res.valid, tojson(res)); + assert.eq(res.nNonCompliantDocuments, 1); + assert.eq(res.warnings.length, 1); -// "Checks no errors are thrown with a valid closed bucket." -jsTestLog( - "Running validate on a version 2 bucket with everything correct, checking that no warnings are found."); -setUpCollection(lotsOfData); -coll = db.getCollection(collName); -bucket = db.getCollection(bucketName); -res = bucket.validate(); -assert(res.valid, tojson(res)); -assert.eq(res.nNonCompliantDocuments, 0); -assert.eq(res.warnings.length, 0); + // "Checks no errors are thrown with a valid closed bucket." + jsTestLog( + "Running validate on a version 2 bucket with everything correct, checking that no warnings are found."); + setUpCollection(lotsOfData); + coll = db.getCollection(collName); + bucket = db.getCollection(bucketName); + res = bucket.validate(); + assert(res.valid, tojson(res)); + assert.eq(res.nNonCompliantDocuments, 0); + assert.eq(res.warnings.length, 0); -// "Checks no errors are thrown with a valid closed bucket with skipped data fields." -jsTestLog( - "Running validate on a correct version 2 bucket with skipped data fields, checking that no warnings are found."); -setUpCollection(skipFieldData); -coll = db.getCollection(collName); -bucket = db.getCollection(bucketName); -res = bucket.validate(); -assert(res.valid, tojson(res)); -assert.eq(res.nNonCompliantDocuments, 0); -assert.eq(res.warnings.length, 0); -})(); \ No newline at end of file + // "Checks no errors are thrown with a valid closed bucket with skipped data fields." + jsTestLog( + "Running validate on a correct version 2 bucket with skipped data fields, checking that no warnings are found."); + setUpCollection(skipFieldData); + coll = db.getCollection(collName); + bucket = db.getCollection(bucketName); + res = bucket.validate(); + assert(res.valid, tojson(res)); + assert.eq(res.nNonCompliantDocuments, 0); + assert.eq(res.warnings.length, 0); +} \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/validate_timeseries_version.js b/jstests/noPassthroughWithMongod/validate_timeseries_version.js index 2b4d9af7d03cf..56e30f052a47e 100644 --- a/jstests/noPassthroughWithMongod/validate_timeseries_version.js +++ b/jstests/noPassthroughWithMongod/validate_timeseries_version.js @@ -7,8 +7,8 @@ * ] */ -(function() { -"use strict"; +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + let testCount = 0; const collNamePrefix = "validate_timeseries_version"; const bucketNamePrefix = "system.buckets.validate_timeseries_version"; @@ -66,27 +66,32 @@ assert.eq(res.warnings.length, 1); // Inserts enough documents to close a bucket and then manually changes the version to 1. // Expects warnings from validation. -jsTestLog( - "Changing the 'control.version' of a closed bucket from 2 to 1, and checking for warnings from validation."); -testCount += 1; -collName = collNamePrefix + testCount; -bucketName = bucketNamePrefix + testCount; -db.getCollection(collName).drop(); -assert.commandWorked(db.createCollection( - collName, {timeseries: {timeField: "timestamp", metaField: "metadata", granularity: "hours"}})); -coll = db.getCollection(collName); -bucket = db.getCollection(bucketName); -coll.insertMany([...Array(1200).keys()].map(i => ({ - "metadata": {"sensorId": 3, "type": "temperature"}, - "timestamp": ISODate(), - "temp": i - })), - {ordered: false}); -bucket.updateOne({"meta.sensorId": 3, "control.version": 2}, {"$set": {"control.version": 1}}); -res = bucket.validate(); -assert(res.valid, tojson(res)); -assert.eq(res.nNonCompliantDocuments, 1); -assert.eq(res.warnings.length, 1); +if (!FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) { + // TODO SERVER-77454: Investigate re-enabling this. + jsTestLog( + "Changing the 'control.version' of a closed bucket from 2 to 1, and checking for warnings from validation."); + testCount += 1; + collName = collNamePrefix + testCount; + bucketName = bucketNamePrefix + testCount; + db.getCollection(collName).drop(); + assert.commandWorked(db.createCollection( + collName, + {timeseries: {timeField: "timestamp", metaField: "metadata", granularity: "hours"}})); + coll = db.getCollection(collName); + bucket = db.getCollection(bucketName); + coll.insertMany( + [...Array(1200).keys()].map(i => ({ + "metadata": {"sensorId": 3, "type": "temperature"}, + "timestamp": ISODate(), + "temp": i + })), + {ordered: false}); + bucket.updateOne({"meta.sensorId": 3, "control.version": 2}, {"$set": {"control.version": 1}}); + res = bucket.validate(); + assert(res.valid, tojson(res)); + assert.eq(res.nNonCompliantDocuments, 1); + assert.eq(res.warnings.length, 1); +} // Returns warnings on a bucket with an unsupported version. jsTestLog("Changing 'control.version' to an unsupported version and checking for warnings."); @@ -104,7 +109,13 @@ coll.insertMany([...Array(1100).keys()].map(i => ({ "temp": i })), {ordered: false}); -bucket.updateOne({"meta.sensorId": 4, "control.version": 2}, {"$set": {"control.version": 500}}); +if (FeatureFlagUtil.isEnabled(db, "TimeseriesAlwaysUseCompressedBuckets")) { + bucket.updateOne({"meta.sensorId": 4, "control.version": 1}, + {"$set": {"control.version": 500}}); +} else { + bucket.updateOne({"meta.sensorId": 4, "control.version": 2}, + {"$set": {"control.version": 500}}); +} res = bucket.validate(); assert(res.valid, tojson(res)); assert.eq(res.nNonCompliantDocuments, 1); @@ -118,5 +129,4 @@ bucket.updateOne({"meta.sensorId": 4, "control.version": 1}, {"$set": {"control. res = bucket.validate(); assert(res.valid, tojson(res)); assert.eq(res.nNonCompliantDocuments, 2); -assert.eq(res.warnings.length, 1); -})(); \ No newline at end of file +assert.eq(res.warnings.length, 1); \ No newline at end of file diff --git a/jstests/noPassthroughWithMongod/views_invalid.js b/jstests/noPassthroughWithMongod/views_invalid.js index 465c28822c490..4d56c92ad7816 100644 --- a/jstests/noPassthroughWithMongod/views_invalid.js +++ b/jstests/noPassthroughWithMongod/views_invalid.js @@ -23,7 +23,7 @@ assert.commandWorked(invalidDB.adminCommand( {applyOps: [{op: "i", ns: dbname + ".system.views", o: {_id: "invalid", pipeline: 3.0}}]})); // Make sure we logged an error message about the invalid view. -assert(checkLog.checkContainsOnceJson(invalidDB, 7267300)); +assert(checkLog.checkContainsOnceJson(invalidDB, 20326)); // Check that view-related commands fail with an invalid view catalog, but other commands on // existing collections still succeed. diff --git a/jstests/ocsp/lib/mock_ocsp.js b/jstests/ocsp/lib/mock_ocsp.js index faae43c3bdfc5..3246d1732bb37 100644 --- a/jstests/ocsp/lib/mock_ocsp.js +++ b/jstests/ocsp/lib/mock_ocsp.js @@ -145,4 +145,4 @@ class MockOCSPServer { print("Mock OCSP Server stop complete"); } -} \ No newline at end of file +} diff --git a/jstests/ocsp/lib/ocsp_helpers.js b/jstests/ocsp/lib/ocsp_helpers.js index 90ec04d35bae2..130892f04f6d1 100644 --- a/jstests/ocsp/lib/ocsp_helpers.js +++ b/jstests/ocsp/lib/ocsp_helpers.js @@ -103,4 +103,4 @@ var supportsStapling = function() { return false; } return true; -}; \ No newline at end of file +}; diff --git a/jstests/ocsp/ocsp_basic_ca_responder.js b/jstests/ocsp/ocsp_basic_ca_responder.js index e63ca98d499e7..49962ebfde643 100644 --- a/jstests/ocsp/ocsp_basic_ca_responder.js +++ b/jstests/ocsp/ocsp_basic_ca_responder.js @@ -56,4 +56,4 @@ test(OCSP_SERVER_SIGNED_BY_INTERMEDIATE_CA_PEM, OCSP_INTERMEDIATE_CA_WITH_ROOT_PEM, OCSP_INTERMEDIATE_RESPONDER); test(OCSP_SERVER_AND_INTERMEDIATE_APPENDED_PEM, OCSP_CA_PEM, OCSP_INTERMEDIATE_RESPONDER); -}()); \ No newline at end of file +}()); diff --git a/jstests/ocsp/ocsp_client_verification_logging.js b/jstests/ocsp/ocsp_client_verification_logging.js index cd883380e7e56..6e6423300b4a8 100644 --- a/jstests/ocsp/ocsp_client_verification_logging.js +++ b/jstests/ocsp/ocsp_client_verification_logging.js @@ -123,4 +123,4 @@ let runTest = (options) => { runTest({connectionHealthLoggingOn: true}); runTest({connectionHealthLoggingOn: false}); runTest({ocspFaultType: FAULT_REVOKED, connectionHealthLoggingOn: true}); -}()); \ No newline at end of file +}()); diff --git a/jstests/ocsp/ocsp_sharding_basic.js b/jstests/ocsp/ocsp_sharding_basic.js index f8b6ed2523e6c..a196b200a5782 100644 --- a/jstests/ocsp/ocsp_sharding_basic.js +++ b/jstests/ocsp/ocsp_sharding_basic.js @@ -86,4 +86,4 @@ st.restartMongos(0); mock_ocsp.stop(); st.stop(); -}()); \ No newline at end of file +}()); diff --git a/jstests/parallel/allops.js b/jstests/parallel/allops.js index b0d6e7188a375..d15fd09b0c89f 100644 --- a/jstests/parallel/allops.js +++ b/jstests/parallel/allops.js @@ -1,14 +1,14 @@ // test all operations in parallel load('jstests/libs/parallelTester.js'); -f = db.jstests_parallel_allops; +let f = db.jstests_parallel_allops; f.drop(); Random.setRandomSeed(); -t = new ParallelTester(); +let t = new ParallelTester(); -for (id = 0; id < 10; ++id) { +for (var id = 0; id < 10; ++id) { var g = new EventGenerator(id, "jstests_parallel_allops", Random.randInt(20)); for (var j = 0; j < 1000; ++j) { var op = Random.randInt(3); diff --git a/jstests/parallel/checkMultiThread.js b/jstests/parallel/checkMultiThread.js index 3f0bbe8d6e3b5..5dea82ac040ac 100644 --- a/jstests/parallel/checkMultiThread.js +++ b/jstests/parallel/checkMultiThread.js @@ -6,8 +6,8 @@ var func = function() { db.runCommand({sleep: 1, seconds: 10000}); return new Date(); }; -a = new Thread(func); -b = new Thread(func); +let a = new Thread(func); +let b = new Thread(func); a.start(); b.start(); a.join(); diff --git a/jstests/parallel/del.js b/jstests/parallel/del.js index 1a2c74db4a143..1b8e23990cb45 100644 --- a/jstests/parallel/del.js +++ b/jstests/parallel/del.js @@ -1,10 +1,10 @@ load('jstests/libs/parallelTester.js'); -N = 1000; -HOST = db.getMongo().host; +const N = 1000; +const HOST = db.getMongo().host; -a = db.getSiblingDB("fooa"); -b = db.getSiblingDB("foob"); +const a = db.getSiblingDB("fooa"); +const b = db.getSiblingDB("foob"); a.dropDatabase(); b.dropDatabase(); @@ -80,17 +80,17 @@ function del2(dbname, host, max, kCursorKilledErrorCodes) { } } -all = []; +const all = []; all.push(fork(del1, "a", HOST, N, kCursorKilledErrorCodes)); all.push(fork(del2, "a", HOST, N, kCursorKilledErrorCodes)); all.push(fork(del1, "b", HOST, N, kCursorKilledErrorCodes)); all.push(fork(del2, "b", HOST, N, kCursorKilledErrorCodes)); -for (i = 0; i < all.length; i++) +for (let i = 0; i < all.length; i++) all[i].start(); -for (i = 0; i < 10; i++) { +for (let i = 0; i < 10; i++) { sleep(2000); print("dropping"); a.dropDatabase(); @@ -100,6 +100,6 @@ for (i = 0; i < 10; i++) { a.del_parallel.save({done: 1}); b.del_parallel.save({done: 1}); -for (i = 0; i < all.length; i++) { +for (let i = 0; i < all.length; i++) { assert.commandWorked(all[i].returnData()); } diff --git a/jstests/parallel/insert.js b/jstests/parallel/insert.js index d28eb89e2205b..d7fd72d4d6265 100644 --- a/jstests/parallel/insert.js +++ b/jstests/parallel/insert.js @@ -1,17 +1,17 @@ // perform inserts in parallel from several clients load('jstests/libs/parallelTester.js'); -f = db.jstests_parallel_insert; +let f = db.jstests_parallel_insert; f.drop(); f.createIndex({who: 1}); Random.setRandomSeed(); -t = new ParallelTester(); +let t = new ParallelTester(); -for (id = 0; id < 10; ++id) { +for (let id = 0; id < 10; ++id) { var g = new EventGenerator(id, "jstests_parallel_insert", Random.randInt(20)); - for (j = 0; j < 1000; ++j) { + for (let j = 0; j < 1000; ++j) { if (j % 50 == 0) { g.addCheckCount(j, {who: id}); } diff --git a/jstests/parallel/shellfork.js b/jstests/parallel/shellfork.js index b7621279d08db..4646b8f7414f7 100644 --- a/jstests/parallel/shellfork.js +++ b/jstests/parallel/shellfork.js @@ -1,16 +1,16 @@ load('jstests/libs/parallelTester.js'); -a = fork(function(a, b) { +let a = fork(function(a, b) { return a / b; }, 10, 2); a.start(); -b = fork(function(a, b, c) { +let b = fork(function(a, b, c) { return a + b + c; }, 18, " is a ", "multiple of 3"); -makeFunny = function(text) { +let makeFunny = function(text) { return text + " ha ha!"; }; -c = fork(makeFunny, "paisley"); +let c = fork(makeFunny, "paisley"); c.start(); b.start(); b.join(); @@ -18,7 +18,7 @@ assert.eq(5, a.returnData()); assert.eq("18 is a multiple of 3", b.returnData()); assert.eq("paisley ha ha!", c.returnData()); -z = fork(function(a) { +let z = fork(function(a) { load('jstests/libs/parallelTester.js'); var y = fork(function(a) { return a + 1; @@ -29,7 +29,7 @@ z = fork(function(a) { z.start(); assert.eq(7, z.returnData()); -t = 1; +let t = 1; z = new Thread(function() { assert(typeof (t) == "undefined", "t not undefined"); t = 5; diff --git a/jstests/parallel/update_serializability1.js b/jstests/parallel/update_serializability1.js index e57b51ae72154..5d91a28d084c0 100644 --- a/jstests/parallel/update_serializability1.js +++ b/jstests/parallel/update_serializability1.js @@ -1,10 +1,10 @@ -t = db.update_serializability1; +let t = db.update_serializability1; t.drop(); -N = 100000; +let N = 100000; -bulk = t.initializeUnorderedBulkOp(); +let bulk = t.initializeUnorderedBulkOp(); for (var i = 0; i < N; i++) { bulk.insert({_id: i, a: i, b: N - i, x: 1, y: 1}); } @@ -13,10 +13,11 @@ bulk.execute(); t.createIndex({a: 1}); t.createIndex({b: 1}); -s1 = startParallelShell("db.update_serializability1.update( { a : { $gte : 0 } }, { $set : { b : " + - (N + 1) + ", x : 2 } }, false, true );"); -s2 = startParallelShell("db.update_serializability1.update( { b : { $lte : " + N + - " } }, { $set : { a : -1, y : 2 } }, false, true );"); +let s1 = startParallelShell( + "db.update_serializability1.update( { a : { $gte : 0 } }, { $set : { b : " + (N + 1) + + ", x : 2 } }, false, true );"); +let s2 = startParallelShell("db.update_serializability1.update( { b : { $lte : " + N + + " } }, { $set : { a : -1, y : 2 } }, false, true );"); s1(); s2(); diff --git a/jstests/query_golden/array_index.js b/jstests/query_golden/array_index.js index 0b3098b39254a..6177427c104c1 100644 --- a/jstests/query_golden/array_index.js +++ b/jstests/query_golden/array_index.js @@ -7,10 +7,8 @@ * requires_cqf, * ] */ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For getPlanSkeleton. +import {show} from "jstests/libs/golden_test.js"; +import {getPlanSkeleton} from "jstests/libs/optimizer_utils.js"; db.setLogLevel(4, "query"); @@ -38,4 +36,3 @@ function run(pipeline) { run([{$match: {a: 2}}, {$unset: '_id'}]); run([{$match: {a: {$lt: 2}}}, {$unset: '_id'}]); -})(); diff --git a/jstests/query_golden/ce_accuracy.js b/jstests/query_golden/ce_accuracy.js index 4d2d5261940bd..fd354aa70df97 100644 --- a/jstests/query_golden/ce_accuracy.js +++ b/jstests/query_golden/ce_accuracy.js @@ -5,10 +5,9 @@ * ] */ -(function() { - -load("jstests/query_golden/libs/ce_data.js"); -load("jstests/query_golden/libs/run_queries_ce.js"); +import {runHistogramsTest} from "jstests/libs/ce_stats_utils.js"; +import {getCEDocs, getCEDocs1} from "jstests/query_golden/libs/ce_data.js"; +import {runCETestForCollection} from "jstests/query_golden/libs/run_queries_ce.js"; runHistogramsTest(function() { const coll = db.ce_data_20; @@ -38,4 +37,3 @@ runHistogramsTest(function() { const ceDebugFlag = false; runCETestForCollection(db, collMeta, 4, ceDebugFlag); }); -})(); diff --git a/jstests/query_golden/ce_mixed.js b/jstests/query_golden/ce_mixed.js index d4807b9d9cbec..7f852cadb0e53 100644 --- a/jstests/query_golden/ce_mixed.js +++ b/jstests/query_golden/ce_mixed.js @@ -1,12 +1,17 @@ /** - * A test for conjunctive predicates using a semi-realistic collection/queries. + * A test for conjunctive and disjunctive predicates using a semi-realistic collection/queries. * @tags: [ * requires_cqf, * ] */ -(function() { -load("jstests/libs/ce_stats_utils.js"); // For 'getRootCE', 'createHistogram'. +import { + createHistogram, + getRootCE, + runHistogramsTest, + summarizeExplainForCE +} from "jstests/libs/ce_stats_utils.js"; +import {forceCE} from "jstests/libs/optimizer_utils.js"; const collCard = 300; const numberBuckets = 5; @@ -122,6 +127,24 @@ runHistogramsTest(function() { testPredicate({likesPizza: false, name: {$lte: "Bob Bennet"}}); testPredicate({favPizzaToppings: "mushrooms", name: {$lte: "Bob Bennet"}}); + // Test disjunctions of predicates all using histograms. + testPredicate({$or: [{likesPizza: true}, {date: {$lt: new ISODate("1955-01-01T00:00:00")}}]}); + testPredicate({ + $or: [{favPizzaToppings: "mushrooms"}, {name: {$lte: "Bob Bennet", $gte: "Alice Smith"}}] + }); + testPredicate({ + $or: [ + {$and: [{likesPizza: false}, {name: {$lte: "Bob Bennet"}}]}, + {$and: [{likesPizza: true}, {name: {$gte: "Tom Watson"}}]} + ] + }); + testPredicate({ + $or: [ + {$and: [{likesPizza: false}, {name: {$lte: "Bob Bennet"}}]}, + {date: {$lte: "1960-01-01T00:00:00"}} + ] + }); + // Test conjunctions of predicates such that some use histograms and others use heuristics. testPredicate({lastPizzaShopVisited: "Zizzi", likesPizza: true}); testPredicate({lastPizzaShopVisited: "Zizzi", likesPizza: false}); @@ -139,5 +162,42 @@ runHistogramsTest(function() { favPizzaToppings: "mushrooms", likesPizza: true }); + + // Test disjunctions of predicates such that some use histograms and others use heuristics. + testPredicate({$or: [{lastPizzaShopVisited: "Zizzi"}, {likesPizza: true}]}); + testPredicate({ + $or: [ + {lastPizzaShopVisited: "Zizzi"}, + { + date: { + $gt: new ISODate("1950-01-01T00:00:00"), + $lt: new ISODate("1960-01-01T00:00:00") + } + } + ] + }); + testPredicate({ + $or: [ + {$and: [{lastPizzaShopVisited: "Zizzi"}, {name: {$lte: "John Watson"}}]}, + {$and: [{favPizzaToppings: "mushrooms"}, {likesPizza: true}]} + ] + }); + testPredicate({ + $or: [ + {$and: [{lastPizzaShopVisited: "Zizzi"}, {name: {$lte: "John Watson"}}]}, + {$and: [{lastPizzaShopVisited: "Zizzi"}, {name: {$gte: "Kate Knight"}}]} + ] + }); + testPredicate({ + $or: [ + {$and: [{lastPizzaShopVisited: "Zizzi"}, {name: {$lte: "John Watson"}}]}, + {favPizzaToppings: "mushrooms"} + ] + }); + testPredicate({ + $or: [ + {$and: [{favPizzaToppings: "mushrooms"}, {name: {$lte: "John Watson"}}]}, + {lastPizzaShopVisited: "Zizzi"} + ] + }); }); -})(); diff --git a/jstests/query_golden/ce_sampled_histogram.js b/jstests/query_golden/ce_sampled_histogram.js index 56e878018e2eb..1a0d1d467a938 100644 --- a/jstests/query_golden/ce_sampled_histogram.js +++ b/jstests/query_golden/ce_sampled_histogram.js @@ -5,6 +5,16 @@ * requires_cqf, * ] */ +import { + createHistogram, + getRootCE, + runHistogramsTest, + summarizeExplainForCE +} from "jstests/libs/ce_stats_utils.js"; +import {forceCE, round2} from "jstests/libs/optimizer_utils.js"; +import {computeStrategyErrors} from "jstests/query_golden/libs/compute_errors.js"; + +load("jstests/libs/load_ce_test_data.js"); // For 'loadJSONDataset'. /** * Returns a 2-element array containing the number of documents returned by the 'predicate' and @@ -47,11 +57,6 @@ function testMatchPredicate(baseColl, sampleColl, predicate, collSize, totSample print(`Sample error: ${tojson(sampleErr)}`); } -(function() { -load("jstests/libs/load_ce_test_data.js"); // For 'loadJSONDataset'. -load("jstests/libs/ce_stats_utils.js"); // For 'getRootCE', 'createHistogram', runHistogramsTest -load("jstests/query_golden/libs/compute_errors.js"); // For 'computeStrategyErrors'. - Random.setRandomSeed(6345); const collData = 'ce_accuracy_test'; @@ -162,4 +167,3 @@ runHistogramsTest(function testSampleHistogram() { print(`Average base error: ${tojson(avgBaseErr)}\n`); print(`Average sample error: ${tojson(avgSampleErr)}`); }); -})(); diff --git a/jstests/query_golden/elemMatch.js b/jstests/query_golden/elemMatch.js index bb6fb52625718..47f2b38bc86f7 100644 --- a/jstests/query_golden/elemMatch.js +++ b/jstests/query_golden/elemMatch.js @@ -1,5 +1,4 @@ -(function() { -"use strict"; +import {show} from "jstests/libs/golden_test.js"; const coll = db.cqf_elemMatch; coll.drop(); @@ -61,4 +60,3 @@ runPipeline(pipeline); pipeline = [{$match: {a: {$elemMatch: {$elemMatch: {b: {$elemMatch: {$gt: 5}}}}}}}]; runPipeline(pipeline); -}()); diff --git a/jstests/query_golden/eq.js b/jstests/query_golden/eq.js index 5847088a1b227..c595b58b74773 100644 --- a/jstests/query_golden/eq.js +++ b/jstests/query_golden/eq.js @@ -2,7 +2,8 @@ * Tests $eq against a variety of BSON types and shapes. */ -load('jstests/query_golden/libs/example_data.js'); +import {show} from "jstests/libs/golden_test.js"; +import {leafs, smallDocs} from "jstests/query_golden/libs/example_data.js"; const docs = smallDocs(); diff --git a/jstests/query_golden/example.js b/jstests/query_golden/example.js index 29ae77f34e9c8..c7ae0b5651bd1 100644 --- a/jstests/query_golden/example.js +++ b/jstests/query_golden/example.js @@ -1,6 +1,7 @@ /** * Example query-correctness test using the golden-data framework. */ +import {show} from "jstests/libs/golden_test.js"; const coll = db.query_golden_example; coll.drop(); diff --git a/jstests/query_golden/exclusion_projection.js b/jstests/query_golden/exclusion_projection.js index 78e3155650f23..63a723ff9d990 100644 --- a/jstests/query_golden/exclusion_projection.js +++ b/jstests/query_golden/exclusion_projection.js @@ -3,10 +3,11 @@ * jstests/cqf/projection.js; both tests will exist pending a decision about the future of golden * jstesting for CQF. */ - -(function() { -"use strict"; -load("jstests/query_golden/libs/projection_helpers.js"); +import { + getIdProjectionDocs, + getProjectionDocs, + runProjectionsAgainstColl +} from "jstests/query_golden/libs/projection_helpers.js"; const coll = db.cqf_exclusion_project; const exclusionProjSpecs = [ @@ -34,4 +35,3 @@ const idExclusionProjectSpecs = [ {"_id.a.b": 0}, ]; runProjectionsAgainstColl(coll, getIdProjectionDocs(), [] /*no indexes*/, idExclusionProjectSpecs); -}()); diff --git a/jstests/query_golden/expected_output/array_index b/jstests/query_golden/expected_output/array_index index d11fcf001b018..5be96c8b6e23e 100644 --- a/jstests/query_golden/expected_output/array_index +++ b/jstests/query_golden/expected_output/array_index @@ -9,7 +9,8 @@ { "a" : [ 2 ] } { "a" : [ 2, 3, 4 ] } nReturned: 4 -Plan skeleton: { +Plan skeleton: +{ "queryPlanner" : { "winningPlan" : { "optimizerPlan" : { @@ -34,6 +35,7 @@ Plan skeleton: { } } + [jsTest] ---- [jsTest] Query: [ { "$match" : { "a" : { "$lt" : 2 } } }, { "$unset" : "_id" } ] [jsTest] ---- @@ -41,7 +43,8 @@ Plan skeleton: { { "a" : [ 1, 2, 3, 4 ] } { "a" : [ 1, 3 ] } nReturned: 2 -Plan skeleton: { +Plan skeleton: +{ "queryPlanner" : { "winningPlan" : { "optimizerPlan" : { @@ -67,4 +70,4 @@ Plan skeleton: { } } } -} \ No newline at end of file +} diff --git a/jstests/query_golden/expected_output/ce_accuracy b/jstests/query_golden/expected_output/ce_accuracy index 4e0e7bab41b4b..8f4ae7bf079b5 100644 --- a/jstests/query_golden/expected_output/ce_accuracy +++ b/jstests/query_golden/expected_output/ce_accuracy @@ -14,6 +14,7 @@ Collection count: 20 Running CE accuracy test for collection ce_data_20 of 20 documents. Begin query generation + [jsTest] ---- [jsTest] Sample positions: [ 2, 7, 12, 17 ] [jsTest] @@ -100,10 +101,14 @@ Running query batch [0 - 37) with fields [ "a", "b", "c_int", "mixed" ] Actual cardinality: 1 Cardinality estimates: -heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +heuristicIdx: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -112,10 +117,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +heuristicIdx: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -124,10 +133,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 7 Cardinality estimates: -heuristic: 6.6 QError: 1.06, RelError: -0.06, SelError: -2% -heuristicIdx: 6.6 QError: 1.06, RelError: -0.06, SelError: -2% -histogram: 7 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 7 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 1.06, RelError: -0.06, SelError: -2% +heuristicIdx: 6.6 +QError: 1.06, RelError: -0.06, SelError: -2% +histogram: 7 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 7 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -136,10 +149,14 @@ histogramIdx: 7 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 8 Cardinality estimates: -heuristic: 6.6 QError: 1.21, RelError: -0.18, SelError: -7% -heuristicIdx: 6.6 QError: 1.21, RelError: -0.18, SelError: -7% -histogram: 8 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 8 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 1.21, RelError: -0.18, SelError: -7% +heuristicIdx: 6.6 +QError: 1.21, RelError: -0.18, SelError: -7% +histogram: 8 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 8 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -148,10 +165,14 @@ histogramIdx: 8 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 12 Cardinality estimates: -heuristic: 9 QError: 1.33, RelError: -0.25, SelError: -15% -heuristicIdx: 9 QError: 1.33, RelError: -0.25, SelError: -15% -histogram: 12 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 12 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 1.33, RelError: -0.25, SelError: -15% +heuristicIdx: 9 +QError: 1.33, RelError: -0.25, SelError: -15% +histogram: 12 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 12 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -160,10 +181,14 @@ histogramIdx: 12 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 13 Cardinality estimates: -heuristic: 9 QError: 1.44, RelError: -0.31, SelError: -20% -heuristicIdx: 9 QError: 1.44, RelError: -0.31, SelError: -20% -histogram: 13 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 13 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 1.44, RelError: -0.31, SelError: -20% +heuristicIdx: 9 +QError: 1.44, RelError: -0.31, SelError: -20% +histogram: 13 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 13 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -172,10 +197,14 @@ histogramIdx: 13 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 10 Cardinality estimates: -heuristic: 6.6 QError: 1.52, RelError: -0.34, SelError: -17% -heuristicIdx: 6.6 QError: 1.52, RelError: -0.34, SelError: -17% -histogram: 10 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 10 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 1.52, RelError: -0.34, SelError: -17% +heuristicIdx: 6.6 +QError: 1.52, RelError: -0.34, SelError: -17% +histogram: 10 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 10 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -184,10 +213,14 @@ histogramIdx: 10 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 13 Cardinality estimates: -heuristic: 6.6 QError: 1.97, RelError: -0.49, SelError: -32% -heuristicIdx: 6.6 QError: 1.97, RelError: -0.49, SelError: -32% -histogram: 13 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 13 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 1.97, RelError: -0.49, SelError: -32% +heuristicIdx: 6.6 +QError: 1.97, RelError: -0.49, SelError: -32% +histogram: 13 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 13 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -196,10 +229,14 @@ histogramIdx: 13 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 9 QError: 4.5, RelError: 3.5, SelError: 35% -heuristicIdx: 9 QError: 4.5, RelError: 3.5, SelError: 35% -histogram: 2 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 4.5, RelError: 3.5, SelError: 35% +heuristicIdx: 9 +QError: 4.5, RelError: 3.5, SelError: 35% +histogram: 2 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -208,10 +245,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +heuristicIdx: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -220,10 +261,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 18 Cardinality estimates: -heuristic: 6.6 QError: 2.73, RelError: -0.63, SelError: -57% -heuristicIdx: 6.6 QError: 2.73, RelError: -0.63, SelError: -57% -histogram: 18 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 18 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 2.73, RelError: -0.63, SelError: -57% +heuristicIdx: 6.6 +QError: 2.73, RelError: -0.63, SelError: -57% +histogram: 18 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 18 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -232,10 +277,14 @@ histogramIdx: 18 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 19 Cardinality estimates: -heuristic: 6.6 QError: 2.88, RelError: -0.65, SelError: -62% -heuristicIdx: 6.6 QError: 2.88, RelError: -0.65, SelError: -62% -histogram: 19 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 19 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 2.88, RelError: -0.65, SelError: -62% +heuristicIdx: 6.6 +QError: 2.88, RelError: -0.65, SelError: -62% +histogram: 19 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 19 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -244,10 +293,14 @@ histogramIdx: 19 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 9 QError: 9, RelError: 8, SelError: 40% -heuristicIdx: 9 QError: 9, RelError: 8, SelError: 40% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 9, RelError: 8, SelError: 40% +heuristicIdx: 9 +QError: 9, RelError: 8, SelError: 40% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -256,10 +309,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 9 QError: 4.5, RelError: 3.5, SelError: 35% -heuristicIdx: 9 QError: 4.5, RelError: 3.5, SelError: 35% -histogram: 2 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 4.5, RelError: 3.5, SelError: 35% +heuristicIdx: 9 +QError: 4.5, RelError: 3.5, SelError: 35% +histogram: 2 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -268,10 +325,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28% -histogram: 3.69 QError: 3.69, RelError: 2.69, SelError: 13.45% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 3.43, SelError: 17.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 5.6, SelError: 28% +histogram: 3.69 +QError: 3.69, RelError: 2.69, SelError: 13.45% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -280,10 +341,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23% -histogram: 7.75 QError: 3.88, RelError: 2.88, SelError: 28.75% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +heuristicIdx: 6.6 +QError: 3.3, RelError: 2.3, SelError: 23% +histogram: 7.75 +QError: 3.88, RelError: 2.88, SelError: 28.75% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -292,10 +357,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23% -histogram: 7.75 QError: 3.88, RelError: 2.88, SelError: 28.75% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +heuristicIdx: 6.6 +QError: 3.3, RelError: 2.3, SelError: 23% +histogram: 7.75 +QError: 3.88, RelError: 2.88, SelError: 28.75% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -304,10 +373,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 3 Cardinality estimates: -heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15% -heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18% -histogram: 6.93 QError: 2.31, RelError: 1.31, SelError: 19.65% -histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 1.48, RelError: 0.48, SelError: 7.15% +heuristicIdx: 6.6 +QError: 2.2, RelError: 1.2, SelError: 18% +histogram: 6.93 +QError: 2.31, RelError: 1.31, SelError: 19.65% +histogramIdx: 3 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -316,10 +389,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28% -histogram: 1.95 QError: 1.95, RelError: 0.95, SelError: 4.75% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 3.43, SelError: 17.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 5.6, SelError: 28% +histogram: 1.95 +QError: 1.95, RelError: 0.95, SelError: 4.75% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -328,10 +405,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 3.43, SelError: 17.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 5.6, SelError: 28% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -340,10 +421,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +heuristicIdx: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -352,10 +437,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +heuristicIdx: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -364,10 +453,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 10 Cardinality estimates: -heuristic: 6.6 QError: 1.52, RelError: -0.34, SelError: -17% -heuristicIdx: 6.6 QError: 1.52, RelError: -0.34, SelError: -17% -histogram: 10 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 10 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 1.52, RelError: -0.34, SelError: -17% +heuristicIdx: 6.6 +QError: 1.52, RelError: -0.34, SelError: -17% +histogram: 10 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 10 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -376,10 +469,14 @@ histogramIdx: 10 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 11 Cardinality estimates: -heuristic: 6.6 QError: 1.67, RelError: -0.4, SelError: -22% -heuristicIdx: 6.6 QError: 1.67, RelError: -0.4, SelError: -22% -histogram: 11 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 11 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 1.67, RelError: -0.4, SelError: -22% +heuristicIdx: 6.6 +QError: 1.67, RelError: -0.4, SelError: -22% +histogram: 11 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 11 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -388,10 +485,14 @@ histogramIdx: 11 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 9 Cardinality estimates: -heuristic: 9 QError: 1, RelError: 0, SelError: 0% -heuristicIdx: 9 QError: 1, RelError: 0, SelError: 0% -histogram: 9 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 1, RelError: 0, SelError: 0% +heuristicIdx: 9 +QError: 1, RelError: 0, SelError: 0% +histogram: 9 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 9 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -400,10 +501,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 10 Cardinality estimates: -heuristic: 9 QError: 1.11, RelError: -0.1, SelError: -5% -heuristicIdx: 9 QError: 1.11, RelError: -0.1, SelError: -5% -histogram: 10 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 10 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 1.11, RelError: -0.1, SelError: -5% +heuristicIdx: 9 +QError: 1.11, RelError: -0.1, SelError: -5% +histogram: 10 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 10 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -412,10 +517,14 @@ histogramIdx: 10 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 13 Cardinality estimates: -heuristic: 6.6 QError: 1.97, RelError: -0.49, SelError: -32% -heuristicIdx: 6.6 QError: 1.97, RelError: -0.49, SelError: -32% -histogram: 13 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 13 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 1.97, RelError: -0.49, SelError: -32% +heuristicIdx: 6.6 +QError: 1.97, RelError: -0.49, SelError: -32% +histogram: 13 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 13 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -424,10 +533,14 @@ histogramIdx: 13 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 18 Cardinality estimates: -heuristic: 6.6 QError: 2.73, RelError: -0.63, SelError: -57% -heuristicIdx: 6.6 QError: 2.73, RelError: -0.63, SelError: -57% -histogram: 18 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 18 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 2.73, RelError: -0.63, SelError: -57% +heuristicIdx: 6.6 +QError: 2.73, RelError: -0.63, SelError: -57% +histogram: 18 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 18 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -436,10 +549,14 @@ histogramIdx: 18 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 9 QError: 9, RelError: 8, SelError: 40% -heuristicIdx: 9 QError: 9, RelError: 8, SelError: 40% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 9, RelError: 8, SelError: 40% +heuristicIdx: 9 +QError: 9, RelError: 8, SelError: 40% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -448,10 +565,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +heuristicIdx: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -460,10 +581,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 19 Cardinality estimates: -heuristic: 6.6 QError: 2.88, RelError: -0.65, SelError: -62% -heuristicIdx: 6.6 QError: 2.88, RelError: -0.65, SelError: -62% -histogram: 19 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 19 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 2.88, RelError: -0.65, SelError: -62% +heuristicIdx: 6.6 +QError: 2.88, RelError: -0.65, SelError: -62% +histogram: 19 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 19 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -472,10 +597,14 @@ histogramIdx: 19 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 20 Cardinality estimates: -heuristic: 6.6 QError: 3.03, RelError: -0.67, SelError: -67% -heuristicIdx: 6.6 QError: 3.03, RelError: -0.67, SelError: -67% -histogram: 20 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 20 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 3.03, RelError: -0.67, SelError: -67% +heuristicIdx: 6.6 +QError: 3.03, RelError: -0.67, SelError: -67% +histogram: 20 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 20 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -484,10 +613,14 @@ histogramIdx: 20 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 9 QError: 9, RelError: 0.9, SelError: 45% -heuristicIdx: 9 QError: 9, RelError: 0.9, SelError: 45% -histogram: 0 QError: 0, RelError: 0, SelError: 0% -histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% +heuristic: 9 +QError: 9, RelError: 0.9, SelError: 45% +heuristicIdx: 9 +QError: 9, RelError: 0.9, SelError: 45% +histogram: 0 +QError: 0, RelError: 0, SelError: 0% +histogramIdx: 0 +QError: 0, RelError: 0, SelError: 0% [jsTest] ---- @@ -496,10 +629,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 9 QError: 9, RelError: 8, SelError: 40% -heuristicIdx: 9 QError: 9, RelError: 8, SelError: 40% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 9, RelError: 8, SelError: 40% +heuristicIdx: 9 +QError: 9, RelError: 8, SelError: 40% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -508,10 +645,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28% -histogram: 3.69 QError: 3.69, RelError: 2.69, SelError: 13.45% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 3.43, SelError: 17.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 5.6, SelError: 28% +histogram: 3.69 +QError: 3.69, RelError: 2.69, SelError: 13.45% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -520,10 +661,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 0.66, SelError: 33% -histogram: 6.67 QError: 6.67, RelError: 0.67, SelError: 33.35% -histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 0.44, SelError: 22.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 0.66, SelError: 33% +histogram: 6.67 +QError: 6.67, RelError: 0.67, SelError: 33.35% +histogramIdx: 0 +QError: 0, RelError: 0, SelError: 0% [jsTest] ---- @@ -532,10 +677,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28% -histogram: 5.86 QError: 5.86, RelError: 4.86, SelError: 24.3% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 3.43, SelError: 17.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 5.6, SelError: 28% +histogram: 5.86 +QError: 5.86, RelError: 4.86, SelError: 24.3% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -544,10 +693,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28% -histogram: 2.85 QError: 2.85, RelError: 1.85, SelError: 9.25% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 3.43, SelError: 17.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 5.6, SelError: 28% +histogram: 2.85 +QError: 2.85, RelError: 1.85, SelError: 9.25% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -556,10 +709,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 3.43, SelError: 17.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 5.6, SelError: 28% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -568,10 +725,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 0.66, SelError: 33% -histogram: 0 QError: 0, RelError: 0, SelError: 0% -histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 0.44, SelError: 22.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 0.66, SelError: 33% +histogram: 0 +QError: 0, RelError: 0, SelError: 0% +histogramIdx: 0 +QError: 0, RelError: 0, SelError: 0% [jsTest] ---- @@ -580,10 +741,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23% -histogram: 3.79 QError: 1.9, RelError: 0.9, SelError: 8.95% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +heuristicIdx: 6.6 +QError: 3.3, RelError: 2.3, SelError: 23% +histogram: 3.79 +QError: 1.9, RelError: 0.9, SelError: 8.95% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -592,10 +757,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28% -histogram: 7.42 QError: 7.42, RelError: 6.42, SelError: 32.1% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 3.43, SelError: 17.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 5.6, SelError: 28% +histogram: 7.42 +QError: 7.42, RelError: 6.42, SelError: 32.1% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -604,10 +773,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 0.66, SelError: 33% -histogram: 5.02 QError: 5.02, RelError: 0.5, SelError: 25.1% -histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 0.44, SelError: 22.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 0.66, SelError: 33% +histogram: 5.02 +QError: 5.02, RelError: 0.5, SelError: 25.1% +histogramIdx: 0 +QError: 0, RelError: 0, SelError: 0% [jsTest] ---- @@ -616,10 +789,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 0.66, SelError: 33% -histogram: 1.9 QError: 1.9, RelError: 0.19, SelError: 9.5% -histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 0.44, SelError: 22.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 0.66, SelError: 33% +histogram: 1.9 +QError: 1.9, RelError: 0.19, SelError: 9.5% +histogramIdx: 0 +QError: 0, RelError: 0, SelError: 0% [jsTest] ---- @@ -628,10 +805,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23% -histogram: 2 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +heuristicIdx: 6.6 +QError: 3.3, RelError: 2.3, SelError: 23% +histogram: 2 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -640,10 +821,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 3.43, SelError: 17.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 5.6, SelError: 28% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -652,10 +837,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23% -histogram: 4.61 QError: 2.31, RelError: 1.31, SelError: 13.05% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +heuristicIdx: 6.6 +QError: 3.3, RelError: 2.3, SelError: 23% +histogram: 4.61 +QError: 2.31, RelError: 1.31, SelError: 13.05% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -664,10 +853,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28% -histogram: 6.97 QError: 6.97, RelError: 5.97, SelError: 29.85% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 3.43, SelError: 17.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 5.6, SelError: 28% +histogram: 6.97 +QError: 6.97, RelError: 5.97, SelError: 29.85% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -676,10 +869,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 3 Cardinality estimates: -heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15% -heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18% -histogram: 6.26 QError: 2.09, RelError: 1.09, SelError: 16.3% -histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 1.48, RelError: 0.48, SelError: 7.15% +heuristicIdx: 6.6 +QError: 2.2, RelError: 1.2, SelError: 18% +histogram: 6.26 +QError: 2.09, RelError: 1.09, SelError: 16.3% +histogramIdx: 3 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -688,10 +885,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 3 Cardinality estimates: -heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15% -heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18% -histogram: 3 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 1.48, RelError: 0.48, SelError: 7.15% +heuristicIdx: 6.6 +QError: 2.2, RelError: 1.2, SelError: 18% +histogram: 3 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 3 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -700,10 +901,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 3.43, SelError: 17.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 5.6, SelError: 28% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 3.43, SelError: 17.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 5.6, SelError: 28% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -712,10 +917,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15% -heuristicIdx: 6.6 QError: 6.6, RelError: 0.66, SelError: 33% -histogram: 0 QError: 0, RelError: 0, SelError: 0% -histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 0.44, SelError: 22.15% +heuristicIdx: 6.6 +QError: 6.6, RelError: 0.66, SelError: 33% +histogram: 0 +QError: 0, RelError: 0, SelError: 0% +histogramIdx: 0 +QError: 0, RelError: 0, SelError: 0% [jsTest] ---- @@ -724,10 +933,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.47 QError: 2.24, RelError: 1.23, SelError: 12.35% -heuristicIdx: 4.47 QError: 2.24, RelError: 1.23, SelError: 12.35% -histogram: 2 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 2.24, RelError: 1.23, SelError: 12.35% +heuristicIdx: 4.47 +QError: 2.24, RelError: 1.23, SelError: 12.35% +histogram: 2 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -736,10 +949,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.47 QError: 2.24, RelError: 1.23, SelError: 12.35% -heuristicIdx: 4.47 QError: 2.24, RelError: 1.23, SelError: 12.35% -histogram: 2 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 2.24, RelError: 1.23, SelError: 12.35% +heuristicIdx: 4.47 +QError: 2.24, RelError: 1.23, SelError: 12.35% +histogram: 2 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -748,10 +965,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 6.6 QError: 3.3, RelError: 2.3, SelError: 23% -heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23% -histogram: 2 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 3.3, RelError: 2.3, SelError: 23% +heuristicIdx: 6.6 +QError: 3.3, RelError: 2.3, SelError: 23% +histogram: 2 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -760,10 +981,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 3 Cardinality estimates: -heuristic: 6.6 QError: 2.2, RelError: 1.2, SelError: 18% -heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18% -histogram: 3 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 2.2, RelError: 1.2, SelError: 18% +heuristicIdx: 6.6 +QError: 2.2, RelError: 1.2, SelError: 18% +histogram: 3 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 3 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -772,10 +997,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 18 Cardinality estimates: -heuristic: 9 QError: 2, RelError: -0.5, SelError: -45% -heuristicIdx: 9 QError: 2, RelError: -0.5, SelError: -45% -histogram: 18 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 18 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 2, RelError: -0.5, SelError: -45% +heuristicIdx: 9 +QError: 2, RelError: -0.5, SelError: -45% +histogram: 18 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 18 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -784,10 +1013,14 @@ histogramIdx: 18 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 19 Cardinality estimates: -heuristic: 9 QError: 2.11, RelError: -0.53, SelError: -50% -heuristicIdx: 9 QError: 2.11, RelError: -0.53, SelError: -50% -histogram: 19 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 19 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 2.11, RelError: -0.53, SelError: -50% +heuristicIdx: 9 +QError: 2.11, RelError: -0.53, SelError: -50% +histogram: 19 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 19 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -796,10 +1029,14 @@ histogramIdx: 19 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 3 Cardinality estimates: -heuristic: 6.6 QError: 2.2, RelError: 1.2, SelError: 18% -heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18% -histogram: 3 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 2.2, RelError: 1.2, SelError: 18% +heuristicIdx: 6.6 +QError: 2.2, RelError: 1.2, SelError: 18% +histogram: 3 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 3 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -808,10 +1045,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 6 Cardinality estimates: -heuristic: 6.6 QError: 1.1, RelError: 0.1, SelError: 3% -heuristicIdx: 6.6 QError: 1.1, RelError: 0.1, SelError: 3% -histogram: 6 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 6 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 1.1, RelError: 0.1, SelError: 3% +heuristicIdx: 6.6 +QError: 1.1, RelError: 0.1, SelError: 3% +histogram: 6 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 6 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -820,10 +1061,14 @@ histogramIdx: 6 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 10 Cardinality estimates: -heuristic: 9 QError: 1.11, RelError: -0.1, SelError: -5% -heuristicIdx: 9 QError: 1.11, RelError: -0.1, SelError: -5% -histogram: 10 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 10 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 1.11, RelError: -0.1, SelError: -5% +heuristicIdx: 9 +QError: 1.11, RelError: -0.1, SelError: -5% +histogram: 10 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 10 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -832,10 +1077,14 @@ histogramIdx: 10 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 3 Cardinality estimates: -heuristic: 4.47 QError: 1.49, RelError: 0.49, SelError: 7.35% -heuristicIdx: 4.47 QError: 1.49, RelError: 0.49, SelError: 7.35% -histogram: 3 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 1.49, RelError: 0.49, SelError: 7.35% +heuristicIdx: 4.47 +QError: 1.49, RelError: 0.49, SelError: 7.35% +histogram: 3 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 3 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -844,10 +1093,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 12 Cardinality estimates: -heuristic: 6.6 QError: 1.82, RelError: -0.45, SelError: -27% -heuristicIdx: 6.6 QError: 1.82, RelError: -0.45, SelError: -27% -histogram: 12 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 12 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 1.82, RelError: -0.45, SelError: -27% +heuristicIdx: 6.6 +QError: 1.82, RelError: -0.45, SelError: -27% +histogram: 12 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 12 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -856,10 +1109,14 @@ histogramIdx: 12 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 14 Cardinality estimates: -heuristic: 6.6 QError: 2.12, RelError: -0.53, SelError: -37% -heuristicIdx: 6.6 QError: 2.12, RelError: -0.53, SelError: -37% -histogram: 14 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 14 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 2.12, RelError: -0.53, SelError: -37% +heuristicIdx: 6.6 +QError: 2.12, RelError: -0.53, SelError: -37% +histogram: 14 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 14 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -868,10 +1125,14 @@ histogramIdx: 14 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 9 Cardinality estimates: -heuristic: 9 QError: 1, RelError: 0, SelError: 0% -heuristicIdx: 9 QError: 1, RelError: 0, SelError: 0% -histogram: 9 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 1, RelError: 0, SelError: 0% +heuristicIdx: 9 +QError: 1, RelError: 0, SelError: 0% +histogram: 9 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 9 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -880,10 +1141,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 9 Cardinality estimates: -heuristic: 9 QError: 1, RelError: 0, SelError: 0% -heuristicIdx: 9 QError: 1, RelError: 0, SelError: 0% -histogram: 9 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 1, RelError: 0, SelError: 0% +heuristicIdx: 9 +QError: 1, RelError: 0, SelError: 0% +histogram: 9 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 9 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -892,10 +1157,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 9 Cardinality estimates: -heuristic: 9 QError: 1, RelError: 0, SelError: 0% -heuristicIdx: 9 QError: 1, RelError: 0, SelError: 0% -histogram: 9 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 1, RelError: 0, SelError: 0% +heuristicIdx: 9 +QError: 1, RelError: 0, SelError: 0% +histogram: 9 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 9 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -904,10 +1173,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 5 Cardinality estimates: -heuristic: 4.47 QError: 1.12, RelError: -0.11, SelError: -2.65% -heuristicIdx: 4.47 QError: 1.12, RelError: -0.11, SelError: -2.65% -histogram: 5 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 5 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 1.12, RelError: -0.11, SelError: -2.65% +heuristicIdx: 4.47 +QError: 1.12, RelError: -0.11, SelError: -2.65% +histogram: 5 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 5 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -916,10 +1189,14 @@ histogramIdx: 5 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 16 Cardinality estimates: -heuristic: 6.6 QError: 2.42, RelError: -0.59, SelError: -47% -heuristicIdx: 6.6 QError: 2.42, RelError: -0.59, SelError: -47% -histogram: 16 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 16 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 2.42, RelError: -0.59, SelError: -47% +heuristicIdx: 6.6 +QError: 2.42, RelError: -0.59, SelError: -47% +histogram: 16 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 16 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -928,10 +1205,14 @@ histogramIdx: 16 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 5 Cardinality estimates: -heuristic: 4.47 QError: 1.12, RelError: -0.11, SelError: -2.65% -heuristicIdx: 4.47 QError: 1.12, RelError: -0.11, SelError: -2.65% -histogram: 5 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 5 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 1.12, RelError: -0.11, SelError: -2.65% +heuristicIdx: 4.47 +QError: 1.12, RelError: -0.11, SelError: -2.65% +histogram: 5 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 5 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -940,10 +1221,14 @@ histogramIdx: 5 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 18 Cardinality estimates: -heuristic: 6.6 QError: 2.73, RelError: -0.63, SelError: -57% -heuristicIdx: 6.6 QError: 2.73, RelError: -0.63, SelError: -57% -histogram: 18 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 18 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 2.73, RelError: -0.63, SelError: -57% +heuristicIdx: 6.6 +QError: 2.73, RelError: -0.63, SelError: -57% +histogram: 18 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 18 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -952,10 +1237,14 @@ histogramIdx: 18 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 19 Cardinality estimates: -heuristic: 6.6 QError: 2.88, RelError: -0.65, SelError: -62% -heuristicIdx: 6.6 QError: 2.88, RelError: -0.65, SelError: -62% -histogram: 19 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 19 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 2.88, RelError: -0.65, SelError: -62% +heuristicIdx: 6.6 +QError: 2.88, RelError: -0.65, SelError: -62% +histogram: 19 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 19 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -964,10 +1253,14 @@ histogramIdx: 19 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 4 Cardinality estimates: -heuristic: 9 QError: 2.25, RelError: 1.25, SelError: 25% -heuristicIdx: 9 QError: 2.25, RelError: 1.25, SelError: 25% -histogram: 4 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 2.25, RelError: 1.25, SelError: 25% +heuristicIdx: 9 +QError: 2.25, RelError: 1.25, SelError: 25% +histogram: 4 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 4 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -976,10 +1269,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 6 Cardinality estimates: -heuristic: 9 QError: 1.5, RelError: 0.5, SelError: 15% -heuristicIdx: 9 QError: 1.5, RelError: 0.5, SelError: 15% -histogram: 6 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 6 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 1.5, RelError: 0.5, SelError: 15% +heuristicIdx: 9 +QError: 1.5, RelError: 0.5, SelError: 15% +histogram: 6 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 6 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -988,10 +1285,14 @@ histogramIdx: 6 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 20 Cardinality estimates: -heuristic: 6.6 QError: 3.03, RelError: -0.67, SelError: -67% -heuristicIdx: 6.6 QError: 3.03, RelError: -0.67, SelError: -67% -histogram: 20 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 20 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 3.03, RelError: -0.67, SelError: -67% +heuristicIdx: 6.6 +QError: 3.03, RelError: -0.67, SelError: -67% +histogram: 20 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 20 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1000,10 +1301,14 @@ histogramIdx: 20 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -heuristicIdx: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -histogram: 2.92 QError: 1.46, RelError: 0.46, SelError: 4.6% -histogramIdx: 2.92 QError: 1.46, RelError: 0.46, SelError: 4.6% +heuristic: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +heuristicIdx: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +histogram: 2.92 +QError: 1.46, RelError: 0.46, SelError: 4.6% +histogramIdx: 2.92 +QError: 1.46, RelError: 0.46, SelError: 4.6% [jsTest] ---- @@ -1012,10 +1317,14 @@ histogramIdx: 2.92 QError: 1.46, RelError: 0.46, SelError: 4.6% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -heuristicIdx: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -histogram: 1.63 QError: 1.23, RelError: -0.19, SelError: -1.85% -histogramIdx: 1.63 QError: 1.23, RelError: -0.19, SelError: -1.85% +heuristic: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +heuristicIdx: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +histogram: 1.63 +QError: 1.23, RelError: -0.19, SelError: -1.85% +histogramIdx: 1.63 +QError: 1.23, RelError: -0.19, SelError: -1.85% [jsTest] ---- @@ -1024,10 +1333,14 @@ histogramIdx: 1.63 QError: 1.23, RelError: -0.19, SelError: -1.85% Actual cardinality: 3 Cardinality estimates: -heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15% -heuristicIdx: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15% -histogram: 4.74 QError: 1.58, RelError: 0.58, SelError: 8.7% -histogramIdx: 4.74 QError: 1.58, RelError: 0.58, SelError: 8.7% +heuristic: 4.43 +QError: 1.48, RelError: 0.48, SelError: 7.15% +heuristicIdx: 4.43 +QError: 1.48, RelError: 0.48, SelError: 7.15% +histogram: 4.74 +QError: 1.58, RelError: 0.58, SelError: 8.7% +histogramIdx: 4.74 +QError: 1.58, RelError: 0.58, SelError: 8.7% [jsTest] ---- @@ -1036,10 +1349,14 @@ histogramIdx: 4.74 QError: 1.58, RelError: 0.58, SelError: 8.7% Actual cardinality: 3 Cardinality estimates: -heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15% -heuristicIdx: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15% -histogram: 4.08 QError: 1.36, RelError: 0.36, SelError: 5.4% -histogramIdx: 4.08 QError: 1.36, RelError: 0.36, SelError: 5.4% +heuristic: 4.43 +QError: 1.48, RelError: 0.48, SelError: 7.15% +heuristicIdx: 4.43 +QError: 1.48, RelError: 0.48, SelError: 7.15% +histogram: 4.08 +QError: 1.36, RelError: 0.36, SelError: 5.4% +histogramIdx: 4.08 +QError: 1.36, RelError: 0.36, SelError: 5.4% [jsTest] ---- @@ -1048,10 +1365,14 @@ histogramIdx: 4.08 QError: 1.36, RelError: 0.36, SelError: 5.4% Actual cardinality: 3 Cardinality estimates: -heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15% -heuristicIdx: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15% -histogram: 4.74 QError: 1.58, RelError: 0.58, SelError: 8.7% -histogramIdx: 4.74 QError: 1.58, RelError: 0.58, SelError: 8.7% +heuristic: 4.43 +QError: 1.48, RelError: 0.48, SelError: 7.15% +heuristicIdx: 4.43 +QError: 1.48, RelError: 0.48, SelError: 7.15% +histogram: 4.74 +QError: 1.58, RelError: 0.58, SelError: 8.7% +histogramIdx: 4.74 +QError: 1.58, RelError: 0.58, SelError: 8.7% [jsTest] ---- @@ -1060,10 +1381,14 @@ histogramIdx: 4.74 QError: 1.58, RelError: 0.58, SelError: 8.7% Actual cardinality: 3 Cardinality estimates: -heuristic: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15% -heuristicIdx: 4.43 QError: 1.48, RelError: 0.48, SelError: 7.15% -histogram: 4.08 QError: 1.36, RelError: 0.36, SelError: 5.4% -histogramIdx: 4.08 QError: 1.36, RelError: 0.36, SelError: 5.4% +heuristic: 4.43 +QError: 1.48, RelError: 0.48, SelError: 7.15% +heuristicIdx: 4.43 +QError: 1.48, RelError: 0.48, SelError: 7.15% +histogram: 4.08 +QError: 1.36, RelError: 0.36, SelError: 5.4% +histogramIdx: 4.08 +QError: 1.36, RelError: 0.36, SelError: 5.4% [jsTest] ---- @@ -1072,10 +1397,14 @@ histogramIdx: 4.08 QError: 1.36, RelError: 0.36, SelError: 5.4% Actual cardinality: 7 Cardinality estimates: -heuristic: 4.43 QError: 1.58, RelError: -0.37, SelError: -12.85% -heuristicIdx: 4.43 QError: 1.58, RelError: -0.37, SelError: -12.85% -histogram: 8.54 QError: 1.22, RelError: 0.22, SelError: 7.7% -histogramIdx: 8.54 QError: 1.22, RelError: 0.22, SelError: 7.7% +heuristic: 4.43 +QError: 1.58, RelError: -0.37, SelError: -12.85% +heuristicIdx: 4.43 +QError: 1.58, RelError: -0.37, SelError: -12.85% +histogram: 8.54 +QError: 1.22, RelError: 0.22, SelError: 7.7% +histogramIdx: 8.54 +QError: 1.22, RelError: 0.22, SelError: 7.7% [jsTest] ---- @@ -1084,10 +1413,14 @@ histogramIdx: 8.54 QError: 1.22, RelError: 0.22, SelError: 7.7% Actual cardinality: 7 Cardinality estimates: -heuristic: 4.43 QError: 1.58, RelError: -0.37, SelError: -12.85% -heuristicIdx: 4.43 QError: 1.58, RelError: -0.37, SelError: -12.85% -histogram: 8.99 QError: 1.28, RelError: 0.28, SelError: 9.95% -histogramIdx: 8.99 QError: 1.28, RelError: 0.28, SelError: 9.95% +heuristic: 4.43 +QError: 1.58, RelError: -0.37, SelError: -12.85% +heuristicIdx: 4.43 +QError: 1.58, RelError: -0.37, SelError: -12.85% +histogram: 8.99 +QError: 1.28, RelError: 0.28, SelError: 9.95% +histogramIdx: 8.99 +QError: 1.28, RelError: 0.28, SelError: 9.95% [jsTest] ---- @@ -1096,10 +1429,14 @@ histogramIdx: 8.99 QError: 1.28, RelError: 0.28, SelError: 9.95% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -heuristicIdx: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -histogram: 7.75 QError: 3.88, RelError: 2.88, SelError: 28.75% -histogramIdx: 7.75 QError: 3.88, RelError: 2.88, SelError: 28.75% +heuristic: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +heuristicIdx: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +histogram: 7.75 +QError: 3.88, RelError: 2.88, SelError: 28.75% +histogramIdx: 7.75 +QError: 3.88, RelError: 2.88, SelError: 28.75% [jsTest] ---- @@ -1108,10 +1445,14 @@ histogramIdx: 7.75 QError: 3.88, RelError: 2.88, SelError: 28.75% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -heuristicIdx: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -histogram: 1.63 QError: 1.23, RelError: -0.19, SelError: -1.85% -histogramIdx: 1.63 QError: 1.23, RelError: -0.19, SelError: -1.85% +heuristic: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +heuristicIdx: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +histogram: 1.63 +QError: 1.23, RelError: -0.19, SelError: -1.85% +histogramIdx: 1.63 +QError: 1.23, RelError: -0.19, SelError: -1.85% [jsTest] ---- @@ -1120,10 +1461,14 @@ histogramIdx: 1.63 QError: 1.23, RelError: -0.19, SelError: -1.85% Actual cardinality: 5 Cardinality estimates: -heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -heuristicIdx: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -histogram: 8.05 QError: 1.61, RelError: 0.61, SelError: 15.25% -histogramIdx: 8.05 QError: 1.61, RelError: 0.61, SelError: 15.25% +heuristic: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +heuristicIdx: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +histogram: 8.05 +QError: 1.61, RelError: 0.61, SelError: 15.25% +histogramIdx: 8.05 +QError: 1.61, RelError: 0.61, SelError: 15.25% [jsTest] ---- @@ -1132,10 +1477,14 @@ histogramIdx: 8.05 QError: 1.61, RelError: 0.61, SelError: 15.25% Actual cardinality: 5 Cardinality estimates: -heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -heuristicIdx: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -histogram: 7.35 QError: 1.47, RelError: 0.47, SelError: 11.75% -histogramIdx: 7.35 QError: 1.47, RelError: 0.47, SelError: 11.75% +heuristic: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +heuristicIdx: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +histogram: 7.35 +QError: 1.47, RelError: 0.47, SelError: 11.75% +histogramIdx: 7.35 +QError: 1.47, RelError: 0.47, SelError: 11.75% [jsTest] ---- @@ -1144,10 +1493,14 @@ histogramIdx: 7.35 QError: 1.47, RelError: 0.47, SelError: 11.75% Actual cardinality: 5 Cardinality estimates: -heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -heuristicIdx: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -histogram: 8.05 QError: 1.61, RelError: 0.61, SelError: 15.25% -histogramIdx: 8.05 QError: 1.61, RelError: 0.61, SelError: 15.25% +heuristic: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +heuristicIdx: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +histogram: 8.05 +QError: 1.61, RelError: 0.61, SelError: 15.25% +histogramIdx: 8.05 +QError: 1.61, RelError: 0.61, SelError: 15.25% [jsTest] ---- @@ -1156,10 +1509,14 @@ histogramIdx: 8.05 QError: 1.61, RelError: 0.61, SelError: 15.25% Actual cardinality: 5 Cardinality estimates: -heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -heuristicIdx: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -histogram: 7.35 QError: 1.47, RelError: 0.47, SelError: 11.75% -histogramIdx: 7.35 QError: 1.47, RelError: 0.47, SelError: 11.75% +heuristic: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +heuristicIdx: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +histogram: 7.35 +QError: 1.47, RelError: 0.47, SelError: 11.75% +histogramIdx: 7.35 +QError: 1.47, RelError: 0.47, SelError: 11.75% [jsTest] ---- @@ -1168,10 +1525,14 @@ histogramIdx: 7.35 QError: 1.47, RelError: 0.47, SelError: 11.75% Actual cardinality: 8 Cardinality estimates: -heuristic: 4.43 QError: 1.81, RelError: -0.45, SelError: -17.85% -heuristicIdx: 4.43 QError: 1.81, RelError: -0.45, SelError: -17.85% -histogram: 8.77 QError: 1.1, RelError: 0.1, SelError: 3.85% -histogramIdx: 8.77 QError: 1.1, RelError: 0.1, SelError: 3.85% +heuristic: 4.43 +QError: 1.81, RelError: -0.45, SelError: -17.85% +heuristicIdx: 4.43 +QError: 1.81, RelError: -0.45, SelError: -17.85% +histogram: 8.77 +QError: 1.1, RelError: 0.1, SelError: 3.85% +histogramIdx: 8.77 +QError: 1.1, RelError: 0.1, SelError: 3.85% [jsTest] ---- @@ -1180,10 +1541,14 @@ histogramIdx: 8.77 QError: 1.1, RelError: 0.1, SelError: 3.85% Actual cardinality: 8 Cardinality estimates: -heuristic: 4.43 QError: 1.81, RelError: -0.45, SelError: -17.85% -heuristicIdx: 4.43 QError: 1.81, RelError: -0.45, SelError: -17.85% -histogram: 13.07 QError: 1.63, RelError: 0.63, SelError: 25.35% -histogramIdx: 13.07 QError: 1.63, RelError: 0.63, SelError: 25.35% +heuristic: 4.43 +QError: 1.81, RelError: -0.45, SelError: -17.85% +heuristicIdx: 4.43 +QError: 1.81, RelError: -0.45, SelError: -17.85% +histogram: 13.07 +QError: 1.63, RelError: 0.63, SelError: 25.35% +histogramIdx: 13.07 +QError: 1.63, RelError: 0.63, SelError: 25.35% [jsTest] ---- @@ -1192,10 +1557,14 @@ histogramIdx: 13.07 QError: 1.63, RelError: 0.63, SelError: 25.35% Actual cardinality: 5 Cardinality estimates: -heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -heuristicIdx: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -histogram: 5.85 QError: 1.17, RelError: 0.17, SelError: 4.25% -histogramIdx: 5.85 QError: 1.17, RelError: 0.17, SelError: 4.25% +heuristic: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +heuristicIdx: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +histogram: 5.85 +QError: 1.17, RelError: 0.17, SelError: 4.25% +histogramIdx: 5.85 +QError: 1.17, RelError: 0.17, SelError: 4.25% [jsTest] ---- @@ -1204,10 +1573,14 @@ histogramIdx: 5.85 QError: 1.17, RelError: 0.17, SelError: 4.25% Actual cardinality: 5 Cardinality estimates: -heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -heuristicIdx: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -histogram: 4.08 QError: 1.23, RelError: -0.18, SelError: -4.6% -histogramIdx: 4.08 QError: 1.23, RelError: -0.18, SelError: -4.6% +heuristic: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +heuristicIdx: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +histogram: 4.08 +QError: 1.23, RelError: -0.18, SelError: -4.6% +histogramIdx: 4.08 +QError: 1.23, RelError: -0.18, SelError: -4.6% [jsTest] ---- @@ -1216,10 +1589,14 @@ histogramIdx: 4.08 QError: 1.23, RelError: -0.18, SelError: -4.6% Actual cardinality: 4 Cardinality estimates: -heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15% -heuristicIdx: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15% -histogram: 4 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 1.11, RelError: 0.11, SelError: 2.15% +heuristicIdx: 4.43 +QError: 1.11, RelError: 0.11, SelError: 2.15% +histogram: 4 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 4 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1228,10 +1605,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 4 Cardinality estimates: -heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15% -heuristicIdx: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15% -histogram: 4 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 1.11, RelError: 0.11, SelError: 2.15% +heuristicIdx: 4.43 +QError: 1.11, RelError: 0.11, SelError: 2.15% +histogram: 4 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 4 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1240,10 +1621,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 4 Cardinality estimates: -heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15% -heuristicIdx: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15% -histogram: 4 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 1.11, RelError: 0.11, SelError: 2.15% +heuristicIdx: 4.43 +QError: 1.11, RelError: 0.11, SelError: 2.15% +histogram: 4 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 4 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1252,10 +1637,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 4 Cardinality estimates: -heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15% -heuristicIdx: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15% -histogram: 4 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 1.11, RelError: 0.11, SelError: 2.15% +heuristicIdx: 4.43 +QError: 1.11, RelError: 0.11, SelError: 2.15% +histogram: 4 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 4 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1264,10 +1653,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +heuristicIdx: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1276,10 +1669,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +heuristicIdx: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1288,10 +1685,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 7 Cardinality estimates: -heuristic: 6.6 QError: 1.06, RelError: -0.06, SelError: -2% -heuristicIdx: 6.6 QError: 1.06, RelError: -0.06, SelError: -2% -histogram: 7 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 7 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 1.06, RelError: -0.06, SelError: -2% +heuristicIdx: 6.6 +QError: 1.06, RelError: -0.06, SelError: -2% +histogram: 7 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 7 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1300,10 +1701,14 @@ histogramIdx: 7 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 8 Cardinality estimates: -heuristic: 6.6 QError: 1.21, RelError: -0.18, SelError: -7% -heuristicIdx: 6.6 QError: 1.21, RelError: -0.18, SelError: -7% -histogram: 8 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 8 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 1.21, RelError: -0.18, SelError: -7% +heuristicIdx: 6.6 +QError: 1.21, RelError: -0.18, SelError: -7% +histogram: 8 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 8 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1312,10 +1717,14 @@ histogramIdx: 8 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 9 QError: 4.5, RelError: 3.5, SelError: 35% -heuristicIdx: 9 QError: 4.5, RelError: 3.5, SelError: 35% -histogram: 2 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 4.5, RelError: 3.5, SelError: 35% +heuristicIdx: 9 +QError: 4.5, RelError: 3.5, SelError: 35% +histogram: 2 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1324,10 +1733,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 3 Cardinality estimates: -heuristic: 9 QError: 3, RelError: 2, SelError: 30% -heuristicIdx: 9 QError: 3, RelError: 2, SelError: 30% -histogram: 3 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 3, RelError: 2, SelError: 30% +heuristicIdx: 9 +QError: 3, RelError: 2, SelError: 30% +histogram: 3 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 3 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1336,10 +1749,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 6.6 QError: 6.6, RelError: 0.66, SelError: 33% -heuristicIdx: 6.6 QError: 6.6, RelError: 0.66, SelError: 33% -histogram: 0 QError: 0, RelError: 0, SelError: 0% -histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 6.6, RelError: 0.66, SelError: 33% +heuristicIdx: 6.6 +QError: 6.6, RelError: 0.66, SelError: 33% +histogram: 0 +QError: 0, RelError: 0, SelError: 0% +histogramIdx: 0 +QError: 0, RelError: 0, SelError: 0% [jsTest] ---- @@ -1348,10 +1765,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% Actual cardinality: 3 Cardinality estimates: -heuristic: 6.6 QError: 2.2, RelError: 1.2, SelError: 18% -heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18% -histogram: 3 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 2.2, RelError: 1.2, SelError: 18% +heuristicIdx: 6.6 +QError: 2.2, RelError: 1.2, SelError: 18% +histogram: 3 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 3 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1360,10 +1781,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 4 Cardinality estimates: -heuristic: 9 QError: 2.25, RelError: 1.25, SelError: 25% -heuristicIdx: 9 QError: 2.25, RelError: 1.25, SelError: 25% -histogram: 4 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 2.25, RelError: 1.25, SelError: 25% +heuristicIdx: 9 +QError: 2.25, RelError: 1.25, SelError: 25% +histogram: 4 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 4 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1372,10 +1797,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -heuristicIdx: 4.47 QError: 4.47, RelError: 3.47, SelError: 17.35% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +heuristicIdx: 4.47 +QError: 4.47, RelError: 3.47, SelError: 17.35% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1384,10 +1813,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 8 Cardinality estimates: -heuristic: 6.6 QError: 1.21, RelError: -0.18, SelError: -7% -heuristicIdx: 6.6 QError: 1.21, RelError: -0.18, SelError: -7% -histogram: 8 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 8 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 1.21, RelError: -0.18, SelError: -7% +heuristicIdx: 6.6 +QError: 1.21, RelError: -0.18, SelError: -7% +histogram: 8 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 8 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1396,10 +1829,14 @@ histogramIdx: 8 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 9 Cardinality estimates: -heuristic: 6.6 QError: 1.36, RelError: -0.27, SelError: -12% -heuristicIdx: 6.6 QError: 1.36, RelError: -0.27, SelError: -12% -histogram: 9 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% +heuristic: 6.6 +QError: 1.36, RelError: -0.27, SelError: -12% +heuristicIdx: 6.6 +QError: 1.36, RelError: -0.27, SelError: -12% +histogram: 9 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 9 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1408,10 +1845,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 1 Cardinality estimates: -heuristic: 9 QError: 9, RelError: 8, SelError: 40% -heuristicIdx: 9 QError: 9, RelError: 8, SelError: 40% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 9, RelError: 8, SelError: 40% +heuristicIdx: 9 +QError: 9, RelError: 8, SelError: 40% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1420,10 +1861,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 9 QError: 4.5, RelError: 3.5, SelError: 35% -heuristicIdx: 9 QError: 4.5, RelError: 3.5, SelError: 35% -histogram: 2 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 9 +QError: 4.5, RelError: 3.5, SelError: 35% +heuristicIdx: 9 +QError: 4.5, RelError: 3.5, SelError: 35% +histogram: 2 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1432,10 +1877,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 4 Cardinality estimates: -heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15% -heuristicIdx: 6.6 QError: 1.65, RelError: 0.65, SelError: 13% -histogram: 4.14 QError: 1.03, RelError: 0.03, SelError: 0.7% -histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 1.11, RelError: 0.11, SelError: 2.15% +heuristicIdx: 6.6 +QError: 1.65, RelError: 0.65, SelError: 13% +histogram: 4.14 +QError: 1.03, RelError: 0.03, SelError: 0.7% +histogramIdx: 4 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1444,10 +1893,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15% -heuristicIdx: 0 QError: 0, RelError: 0, SelError: 0% -histogram: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6% -histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 0.44, SelError: 22.15% +heuristicIdx: 0 +QError: 0, RelError: 0, SelError: 0% +histogram: 0.32 +QError: 0.32, RelError: 0.03, SelError: 1.6% +histogramIdx: 0 +QError: 0, RelError: 0, SelError: 0% [jsTest] ---- @@ -1456,10 +1909,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23% -histogram: 1.41 QError: 1.42, RelError: -0.3, SelError: -2.95% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +heuristicIdx: 6.6 +QError: 3.3, RelError: 2.3, SelError: 23% +histogram: 1.41 +QError: 1.42, RelError: -0.3, SelError: -2.95% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1468,10 +1925,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 4 Cardinality estimates: -heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15% -heuristicIdx: 6.6 QError: 1.65, RelError: 0.65, SelError: 13% -histogram: 3.79 QError: 1.06, RelError: -0.05, SelError: -1.05% -histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 1.11, RelError: 0.11, SelError: 2.15% +heuristicIdx: 6.6 +QError: 1.65, RelError: 0.65, SelError: 13% +histogram: 3.79 +QError: 1.06, RelError: -0.05, SelError: -1.05% +histogramIdx: 4 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1480,10 +1941,14 @@ histogramIdx: 4 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -heuristicIdx: 6.6 QError: 3.3, RelError: 2.3, SelError: 23% -histogram: 2.53 QError: 1.26, RelError: 0.26, SelError: 2.65% -histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +heuristicIdx: 6.6 +QError: 3.3, RelError: 2.3, SelError: 23% +histogram: 2.53 +QError: 1.26, RelError: 0.26, SelError: 2.65% +histogramIdx: 2 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1492,10 +1957,14 @@ histogramIdx: 2 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15% -heuristicIdx: 0 QError: 0, RelError: 0, SelError: 0% -histogram: 0.59 QError: 0.59, RelError: 0.06, SelError: 2.95% -histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 0.44, SelError: 22.15% +heuristicIdx: 0 +QError: 0, RelError: 0, SelError: 0% +histogram: 0.59 +QError: 0.59, RelError: 0.06, SelError: 2.95% +histogramIdx: 0 +QError: 0, RelError: 0, SelError: 0% [jsTest] ---- @@ -1504,10 +1973,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15% -heuristicIdx: 0 QError: 0, RelError: 0, SelError: 0% -histogram: 0.77 QError: 0.77, RelError: 0.08, SelError: 3.85% -histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 0.44, SelError: 22.15% +heuristicIdx: 0 +QError: 0, RelError: 0, SelError: 0% +histogram: 0.77 +QError: 0.77, RelError: 0.08, SelError: 3.85% +histogramIdx: 0 +QError: 0, RelError: 0, SelError: 0% [jsTest] ---- @@ -1516,10 +1989,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% Actual cardinality: 6 Cardinality estimates: -heuristic: 4.43 QError: 1.35, RelError: -0.26, SelError: -7.85% -heuristicIdx: 6.6 QError: 1.1, RelError: 0.1, SelError: 3% -histogram: 4.24 QError: 1.42, RelError: -0.29, SelError: -8.8% -histogramIdx: 6 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 1.35, RelError: -0.26, SelError: -7.85% +heuristicIdx: 6.6 +QError: 1.1, RelError: 0.1, SelError: 3% +histogram: 4.24 +QError: 1.42, RelError: -0.29, SelError: -8.8% +histogramIdx: 6 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1528,10 +2005,14 @@ histogramIdx: 6 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 5 Cardinality estimates: -heuristic: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -heuristicIdx: 6.6 QError: 1.32, RelError: 0.32, SelError: 8% -histogram: 4.43 QError: 1.13, RelError: -0.11, SelError: -2.85% -histogramIdx: 5 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +heuristicIdx: 6.6 +QError: 1.32, RelError: 0.32, SelError: 8% +histogram: 4.43 +QError: 1.13, RelError: -0.11, SelError: -2.85% +histogramIdx: 5 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1540,10 +2021,14 @@ histogramIdx: 5 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15% -heuristicIdx: 0 QError: 0, RelError: 0, SelError: 0% -histogram: 1.77 QError: 1.77, RelError: 0.18, SelError: 8.85% -histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 0.44, SelError: 22.15% +heuristicIdx: 0 +QError: 0, RelError: 0, SelError: 0% +histogram: 1.77 +QError: 1.77, RelError: 0.18, SelError: 8.85% +histogramIdx: 0 +QError: 0, RelError: 0, SelError: 0% [jsTest] ---- @@ -1552,10 +2037,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 4.43 QError: 4.43, RelError: 0.44, SelError: 22.15% -heuristicIdx: 0 QError: 0, RelError: 0, SelError: 0% -histogram: 1.5 QError: 1.5, RelError: 0.15, SelError: 7.5% -histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 4.43, RelError: 0.44, SelError: 22.15% +heuristicIdx: 0 +QError: 0, RelError: 0, SelError: 0% +histogram: 1.5 +QError: 1.5, RelError: 0.15, SelError: 7.5% +histogramIdx: 0 +QError: 0, RelError: 0, SelError: 0% [jsTest] ---- @@ -1564,10 +2053,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% Actual cardinality: 9 Cardinality estimates: -heuristic: 4.43 QError: 2.03, RelError: -0.51, SelError: -22.85% -heuristicIdx: 6.6 QError: 1.36, RelError: -0.27, SelError: -12% -histogram: 6.36 QError: 1.42, RelError: -0.29, SelError: -13.2% -histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 2.03, RelError: -0.51, SelError: -22.85% +heuristicIdx: 6.6 +QError: 1.36, RelError: -0.27, SelError: -12% +histogram: 6.36 +QError: 1.42, RelError: -0.29, SelError: -13.2% +histogramIdx: 9 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1576,10 +2069,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 2.11 QError: 2.11, RelError: 0.21, SelError: 10.55% -heuristicIdx: 2.11 QError: 2.11, RelError: 0.21, SelError: 10.55% -histogram: 0.22 QError: 0.22, RelError: 0.02, SelError: 1.1% -histogramIdx: 0.22 QError: 0.22, RelError: 0.02, SelError: 1.1% +heuristic: 2.11 +QError: 2.11, RelError: 0.21, SelError: 10.55% +heuristicIdx: 2.11 +QError: 2.11, RelError: 0.21, SelError: 10.55% +histogram: 0.22 +QError: 0.22, RelError: 0.02, SelError: 1.1% +histogramIdx: 0.22 +QError: 0.22, RelError: 0.02, SelError: 1.1% [jsTest] ---- @@ -1588,10 +2085,14 @@ histogramIdx: 0.22 QError: 0.22, RelError: 0.02, SelError: 1.1% Actual cardinality: 0 Cardinality estimates: -heuristic: 2.1 QError: 2.1, RelError: 0.21, SelError: 10.5% -heuristicIdx: 2.57 QError: 2.57, RelError: 0.26, SelError: 12.85% -histogram: 0.59 QError: 0.59, RelError: 0.06, SelError: 2.95% -histogramIdx: 0.22 QError: 0.22, RelError: 0.02, SelError: 1.1% +heuristic: 2.1 +QError: 2.1, RelError: 0.21, SelError: 10.5% +heuristicIdx: 2.57 +QError: 2.57, RelError: 0.26, SelError: 12.85% +histogram: 0.59 +QError: 0.59, RelError: 0.06, SelError: 2.95% +histogramIdx: 0.22 +QError: 0.22, RelError: 0.02, SelError: 1.1% [jsTest] ---- @@ -1600,10 +2101,14 @@ histogramIdx: 0.22 QError: 0.22, RelError: 0.02, SelError: 1.1% Actual cardinality: 1 Cardinality estimates: -heuristic: 2.57 QError: 2.57, RelError: 1.57, SelError: 7.85% -heuristicIdx: 2.57 QError: 2.57, RelError: 1.57, SelError: 7.85% -histogram: 1 QError: 1, RelError: 0, SelError: 0% -histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% +heuristic: 2.57 +QError: 2.57, RelError: 1.57, SelError: 7.85% +heuristicIdx: 2.57 +QError: 2.57, RelError: 1.57, SelError: 7.85% +histogram: 1 +QError: 1, RelError: 0, SelError: 0% +histogramIdx: 1 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1612,10 +2117,14 @@ histogramIdx: 1 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 3 QError: 3, RelError: 0.3, SelError: 15% -heuristicIdx: 3 QError: 3, RelError: 0.3, SelError: 15% -histogram: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6% -histogramIdx: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6% +heuristic: 3 +QError: 3, RelError: 0.3, SelError: 15% +heuristicIdx: 3 +QError: 3, RelError: 0.3, SelError: 15% +histogram: 0.32 +QError: 0.32, RelError: 0.03, SelError: 1.6% +histogramIdx: 0.32 +QError: 0.32, RelError: 0.03, SelError: 1.6% [jsTest] ---- @@ -1624,10 +2133,14 @@ histogramIdx: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6% Actual cardinality: 2 Cardinality estimates: -heuristic: 2.57 QError: 1.29, RelError: 0.28, SelError: 2.85% -heuristicIdx: 2.57 QError: 1.29, RelError: 0.28, SelError: 2.85% -histogram: 1.9 QError: 1.05, RelError: -0.05, SelError: -0.5% -histogramIdx: 1.9 QError: 1.05, RelError: -0.05, SelError: -0.5% +heuristic: 2.57 +QError: 1.29, RelError: 0.28, SelError: 2.85% +heuristicIdx: 2.57 +QError: 1.29, RelError: 0.28, SelError: 2.85% +histogram: 1.9 +QError: 1.05, RelError: -0.05, SelError: -0.5% +histogramIdx: 1.9 +QError: 1.05, RelError: -0.05, SelError: -0.5% [jsTest] ---- @@ -1636,10 +2149,14 @@ histogramIdx: 1.9 QError: 1.05, RelError: -0.05, SelError: -0.5% Actual cardinality: 3 Cardinality estimates: -heuristic: 3.11 QError: 1.04, RelError: 0.04, SelError: 0.55% -heuristicIdx: 3.11 QError: 1.04, RelError: 0.04, SelError: 0.55% -histogram: 4.62 QError: 1.54, RelError: 0.54, SelError: 8.1% -histogramIdx: 4.62 QError: 1.54, RelError: 0.54, SelError: 8.1% +heuristic: 3.11 +QError: 1.04, RelError: 0.04, SelError: 0.55% +heuristicIdx: 3.11 +QError: 1.04, RelError: 0.04, SelError: 0.55% +histogram: 4.62 +QError: 1.54, RelError: 0.54, SelError: 8.1% +histogramIdx: 4.62 +QError: 1.54, RelError: 0.54, SelError: 8.1% [jsTest] ---- @@ -1648,10 +2165,14 @@ histogramIdx: 4.62 QError: 1.54, RelError: 0.54, SelError: 8.1% Actual cardinality: 1 Cardinality estimates: -heuristic: 2.57 QError: 2.57, RelError: 1.57, SelError: 7.85% -heuristicIdx: 2.57 QError: 2.57, RelError: 1.57, SelError: 7.85% -histogram: 0.95 QError: 1.05, RelError: -0.05, SelError: -0.25% -histogramIdx: 0.95 QError: 1.05, RelError: -0.05, SelError: -0.25% +heuristic: 2.57 +QError: 2.57, RelError: 1.57, SelError: 7.85% +heuristicIdx: 2.57 +QError: 2.57, RelError: 1.57, SelError: 7.85% +histogram: 0.95 +QError: 1.05, RelError: -0.05, SelError: -0.25% +histogramIdx: 0.95 +QError: 1.05, RelError: -0.05, SelError: -0.25% [jsTest] ---- @@ -1660,10 +2181,14 @@ histogramIdx: 0.95 QError: 1.05, RelError: -0.05, SelError: -0.25% Actual cardinality: 0 Cardinality estimates: -heuristic: 2.57 QError: 2.57, RelError: 0.26, SelError: 12.85% -heuristicIdx: 2.57 QError: 2.57, RelError: 0.26, SelError: 12.85% -histogram: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6% -histogramIdx: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6% +heuristic: 2.57 +QError: 2.57, RelError: 0.26, SelError: 12.85% +heuristicIdx: 2.57 +QError: 2.57, RelError: 0.26, SelError: 12.85% +histogram: 0.32 +QError: 0.32, RelError: 0.03, SelError: 1.6% +histogramIdx: 0.32 +QError: 0.32, RelError: 0.03, SelError: 1.6% [jsTest] ---- @@ -1672,10 +2197,14 @@ histogramIdx: 0.32 QError: 0.32, RelError: 0.03, SelError: 1.6% Actual cardinality: 0 Cardinality estimates: -heuristic: 2.1 QError: 2.1, RelError: 0.21, SelError: 10.5% -heuristicIdx: 2.1 QError: 2.1, RelError: 0.21, SelError: 10.5% -histogram: 0.65 QError: 0.65, RelError: 0.07, SelError: 3.25% -histogramIdx: 0.65 QError: 0.65, RelError: 0.07, SelError: 3.25% +heuristic: 2.1 +QError: 2.1, RelError: 0.21, SelError: 10.5% +heuristicIdx: 2.1 +QError: 2.1, RelError: 0.21, SelError: 10.5% +histogram: 0.65 +QError: 0.65, RelError: 0.07, SelError: 3.25% +histogramIdx: 0.65 +QError: 0.65, RelError: 0.07, SelError: 3.25% [jsTest] ---- @@ -1684,10 +2213,14 @@ histogramIdx: 0.65 QError: 0.65, RelError: 0.07, SelError: 3.25% Actual cardinality: 0 Cardinality estimates: -heuristic: 1.21 QError: 1.21, RelError: 0.12, SelError: 6.05% -heuristicIdx: 1.21 QError: 1.21, RelError: 0.12, SelError: 6.05% -histogram: 0.08 QError: 0.08, RelError: 0.01, SelError: 0.4% -histogramIdx: 0.08 QError: 0.08, RelError: 0.01, SelError: 0.4% +heuristic: 1.21 +QError: 1.21, RelError: 0.12, SelError: 6.05% +heuristicIdx: 1.21 +QError: 1.21, RelError: 0.12, SelError: 6.05% +histogram: 0.08 +QError: 0.08, RelError: 0.01, SelError: 0.4% +histogramIdx: 0.08 +QError: 0.08, RelError: 0.01, SelError: 0.4% [jsTest] ---- @@ -1696,10 +2229,14 @@ histogramIdx: 0.08 QError: 0.08, RelError: 0.01, SelError: 0.4% Actual cardinality: 6 Cardinality estimates: -heuristic: 4.43 QError: 1.35, RelError: -0.26, SelError: -7.85% -heuristicIdx: 6.6 QError: 1.1, RelError: 0.1, SelError: 3% -histogram: 7.59 QError: 1.26, RelError: 0.26, SelError: 7.95% -histogramIdx: 6 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 1.35, RelError: -0.26, SelError: -7.85% +heuristicIdx: 6.6 +QError: 1.1, RelError: 0.1, SelError: 3% +histogram: 7.59 +QError: 1.26, RelError: 0.26, SelError: 7.95% +histogramIdx: 6 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1708,10 +2245,14 @@ histogramIdx: 6 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 3 Cardinality estimates: -heuristic: 2.81 QError: 1.07, RelError: -0.06, SelError: -0.95% -heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18% -histogram: 7.09 QError: 2.36, RelError: 1.36, SelError: 20.45% -histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% +heuristic: 2.81 +QError: 1.07, RelError: -0.06, SelError: -0.95% +heuristicIdx: 6.6 +QError: 2.2, RelError: 1.2, SelError: 18% +histogram: 7.09 +QError: 2.36, RelError: 1.36, SelError: 20.45% +histogramIdx: 3 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1720,10 +2261,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 16 Cardinality estimates: -heuristic: 11 QError: 1.45, RelError: -0.31, SelError: -25% -heuristicIdx: 11 QError: 1.45, RelError: -0.31, SelError: -25% -histogram: 13.55 QError: 1.18, RelError: -0.15, SelError: -12.25% -histogramIdx: 13.55 QError: 1.18, RelError: -0.15, SelError: -12.25% +heuristic: 11 +QError: 1.45, RelError: -0.31, SelError: -25% +heuristicIdx: 11 +QError: 1.45, RelError: -0.31, SelError: -25% +histogram: 13.55 +QError: 1.18, RelError: -0.15, SelError: -12.25% +histogramIdx: 13.55 +QError: 1.18, RelError: -0.15, SelError: -12.25% [jsTest] ---- @@ -1732,10 +2277,14 @@ histogramIdx: 13.55 QError: 1.18, RelError: -0.15, SelError: -12.25% Actual cardinality: 14 Cardinality estimates: -heuristic: 10.31 QError: 1.36, RelError: -0.26, SelError: -18.45% -heuristicIdx: 10.31 QError: 1.36, RelError: -0.26, SelError: -18.45% -histogram: 13.18 QError: 1.06, RelError: -0.06, SelError: -4.1% -histogramIdx: 13.18 QError: 1.06, RelError: -0.06, SelError: -4.1% +heuristic: 10.31 +QError: 1.36, RelError: -0.26, SelError: -18.45% +heuristicIdx: 10.31 +QError: 1.36, RelError: -0.26, SelError: -18.45% +histogram: 13.18 +QError: 1.06, RelError: -0.06, SelError: -4.1% +histogramIdx: 13.18 +QError: 1.06, RelError: -0.06, SelError: -4.1% [jsTest] ---- @@ -1744,10 +2293,14 @@ histogramIdx: 13.18 QError: 1.06, RelError: -0.06, SelError: -4.1% Actual cardinality: 12 Cardinality estimates: -heuristic: 11.81 QError: 1.02, RelError: -0.02, SelError: -0.95% -heuristicIdx: 11.81 QError: 1.02, RelError: -0.02, SelError: -0.95% -histogram: 8.83 QError: 1.36, RelError: -0.26, SelError: -15.85% -histogramIdx: 8.83 QError: 1.36, RelError: -0.26, SelError: -15.85% +heuristic: 11.81 +QError: 1.02, RelError: -0.02, SelError: -0.95% +heuristicIdx: 11.81 +QError: 1.02, RelError: -0.02, SelError: -0.95% +histogram: 8.83 +QError: 1.36, RelError: -0.26, SelError: -15.85% +histogramIdx: 8.83 +QError: 1.36, RelError: -0.26, SelError: -15.85% [jsTest] ---- @@ -1756,10 +2309,14 @@ histogramIdx: 8.83 QError: 1.36, RelError: -0.26, SelError: -15.85% Actual cardinality: 6 Cardinality estimates: -heuristic: 3.36 QError: 1.79, RelError: -0.44, SelError: -13.2% -heuristicIdx: 9.03 QError: 1.51, RelError: 0.5, SelError: 15.15% -histogram: 8.88 QError: 1.48, RelError: 0.48, SelError: 14.4% -histogramIdx: 4.82 QError: 1.24, RelError: -0.2, SelError: -5.9% +heuristic: 3.36 +QError: 1.79, RelError: -0.44, SelError: -13.2% +heuristicIdx: 9.03 +QError: 1.51, RelError: 0.5, SelError: 15.15% +histogram: 8.88 +QError: 1.48, RelError: 0.48, SelError: 14.4% +histogramIdx: 4.82 +QError: 1.24, RelError: -0.2, SelError: -5.9% [jsTest] ---- @@ -1768,10 +2325,14 @@ histogramIdx: 4.82 QError: 1.24, RelError: -0.2, SelError: -5.9% Actual cardinality: 9 Cardinality estimates: -heuristic: 4.43 QError: 2.03, RelError: -0.51, SelError: -22.85% -heuristicIdx: 6.6 QError: 1.36, RelError: -0.27, SelError: -12% -histogram: 10.44 QError: 1.16, RelError: 0.16, SelError: 7.2% -histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 2.03, RelError: -0.51, SelError: -22.85% +heuristicIdx: 6.6 +QError: 1.36, RelError: -0.27, SelError: -12% +histogram: 10.44 +QError: 1.16, RelError: 0.16, SelError: 7.2% +histogramIdx: 9 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1780,10 +2341,14 @@ histogramIdx: 9 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 3 Cardinality estimates: -heuristic: 2.81 QError: 1.07, RelError: -0.06, SelError: -0.95% -heuristicIdx: 6.6 QError: 2.2, RelError: 1.2, SelError: 18% -histogram: 7.64 QError: 2.55, RelError: 1.55, SelError: 23.2% -histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% +heuristic: 2.81 +QError: 1.07, RelError: -0.06, SelError: -0.95% +heuristicIdx: 6.6 +QError: 2.2, RelError: 1.2, SelError: 18% +histogram: 7.64 +QError: 2.55, RelError: 1.55, SelError: 23.2% +histogramIdx: 3 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1792,10 +2357,14 @@ histogramIdx: 3 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 13 Cardinality estimates: -heuristic: 11 QError: 1.18, RelError: -0.15, SelError: -10% -heuristicIdx: 11 QError: 1.18, RelError: -0.15, SelError: -10% -histogram: 10.78 QError: 1.21, RelError: -0.17, SelError: -11.1% -histogramIdx: 10.78 QError: 1.21, RelError: -0.17, SelError: -11.1% +heuristic: 11 +QError: 1.18, RelError: -0.15, SelError: -10% +heuristicIdx: 11 +QError: 1.18, RelError: -0.15, SelError: -10% +histogram: 10.78 +QError: 1.21, RelError: -0.17, SelError: -11.1% +histogramIdx: 10.78 +QError: 1.21, RelError: -0.17, SelError: -11.1% [jsTest] ---- @@ -1804,10 +2373,14 @@ histogramIdx: 10.78 QError: 1.21, RelError: -0.17, SelError: -11.1% Actual cardinality: 11 Cardinality estimates: -heuristic: 10.31 QError: 1.07, RelError: -0.06, SelError: -3.45% -heuristicIdx: 10.31 QError: 1.07, RelError: -0.06, SelError: -3.45% -histogram: 10.25 QError: 1.07, RelError: -0.07, SelError: -3.75% -histogramIdx: 10.25 QError: 1.07, RelError: -0.07, SelError: -3.75% +heuristic: 10.31 +QError: 1.07, RelError: -0.06, SelError: -3.45% +heuristicIdx: 10.31 +QError: 1.07, RelError: -0.06, SelError: -3.45% +histogram: 10.25 +QError: 1.07, RelError: -0.07, SelError: -3.75% +histogramIdx: 10.25 +QError: 1.07, RelError: -0.07, SelError: -3.75% [jsTest] ---- @@ -1816,10 +2389,14 @@ histogramIdx: 10.25 QError: 1.07, RelError: -0.07, SelError: -3.75% Actual cardinality: 7 Cardinality estimates: -heuristic: 11.81 QError: 1.69, RelError: 0.69, SelError: 24.05% -heuristicIdx: 11.81 QError: 1.69, RelError: 0.69, SelError: 24.05% -histogram: 4.18 QError: 1.67, RelError: -0.4, SelError: -14.1% -histogramIdx: 4.18 QError: 1.67, RelError: -0.4, SelError: -14.1% +heuristic: 11.81 +QError: 1.69, RelError: 0.69, SelError: 24.05% +heuristicIdx: 11.81 +QError: 1.69, RelError: 0.69, SelError: 24.05% +histogram: 4.18 +QError: 1.67, RelError: -0.4, SelError: -14.1% +histogramIdx: 4.18 +QError: 1.67, RelError: -0.4, SelError: -14.1% [jsTest] ---- @@ -1828,10 +2405,14 @@ histogramIdx: 4.18 QError: 1.67, RelError: -0.4, SelError: -14.1% Actual cardinality: 11 Cardinality estimates: -heuristic: 3.36 QError: 3.27, RelError: -0.69, SelError: -38.2% -heuristicIdx: 9.03 QError: 1.22, RelError: -0.18, SelError: -9.85% -histogram: 10.57 QError: 1.04, RelError: -0.04, SelError: -2.15% -histogramIdx: 8.37 QError: 1.31, RelError: -0.24, SelError: -13.15% +heuristic: 3.36 +QError: 3.27, RelError: -0.69, SelError: -38.2% +heuristicIdx: 9.03 +QError: 1.22, RelError: -0.18, SelError: -9.85% +histogram: 10.57 +QError: 1.04, RelError: -0.04, SelError: -2.15% +histogramIdx: 8.37 +QError: 1.31, RelError: -0.24, SelError: -13.15% [jsTest] ---- @@ -1840,10 +2421,14 @@ histogramIdx: 8.37 QError: 1.31, RelError: -0.24, SelError: -13.15% Actual cardinality: 4 Cardinality estimates: -heuristic: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15% -heuristicIdx: 4.43 QError: 1.11, RelError: 0.11, SelError: 2.15% -histogram: 5.69 QError: 1.42, RelError: 0.42, SelError: 8.45% -histogramIdx: 5.69 QError: 1.42, RelError: 0.42, SelError: 8.45% +heuristic: 4.43 +QError: 1.11, RelError: 0.11, SelError: 2.15% +heuristicIdx: 4.43 +QError: 1.11, RelError: 0.11, SelError: 2.15% +histogram: 5.69 +QError: 1.42, RelError: 0.42, SelError: 8.45% +histogramIdx: 5.69 +QError: 1.42, RelError: 0.42, SelError: 8.45% [jsTest] ---- @@ -1852,10 +2437,14 @@ histogramIdx: 5.69 QError: 1.42, RelError: 0.42, SelError: 8.45% Actual cardinality: 8 Cardinality estimates: -heuristic: 2.81 QError: 2.85, RelError: -0.65, SelError: -25.95% -heuristicIdx: 2.81 QError: 2.85, RelError: -0.65, SelError: -25.95% -histogram: 8.04 QError: 1, RelError: 0, SelError: 0.2% -histogramIdx: 8.04 QError: 1, RelError: 0, SelError: 0.2% +heuristic: 2.81 +QError: 2.85, RelError: -0.65, SelError: -25.95% +heuristicIdx: 2.81 +QError: 2.85, RelError: -0.65, SelError: -25.95% +histogram: 8.04 +QError: 1, RelError: 0, SelError: 0.2% +histogramIdx: 8.04 +QError: 1, RelError: 0, SelError: 0.2% [jsTest] ---- @@ -1864,10 +2453,14 @@ histogramIdx: 8.04 QError: 1, RelError: 0, SelError: 0.2% Actual cardinality: 2 Cardinality estimates: -heuristic: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -heuristicIdx: 4.43 QError: 2.22, RelError: 1.21, SelError: 12.15% -histogram: 3.79 QError: 1.9, RelError: 0.9, SelError: 8.95% -histogramIdx: 3.79 QError: 1.9, RelError: 0.9, SelError: 8.95% +heuristic: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +heuristicIdx: 4.43 +QError: 2.22, RelError: 1.21, SelError: 12.15% +histogram: 3.79 +QError: 1.9, RelError: 0.9, SelError: 8.95% +histogramIdx: 3.79 +QError: 1.9, RelError: 0.9, SelError: 8.95% [jsTest] ---- @@ -1876,10 +2469,14 @@ histogramIdx: 3.79 QError: 1.9, RelError: 0.9, SelError: 8.95% Actual cardinality: 2 Cardinality estimates: -heuristic: 2.57 QError: 1.29, RelError: 0.28, SelError: 2.85% -heuristicIdx: 2.57 QError: 1.29, RelError: 0.28, SelError: 2.85% -histogram: 1.79 QError: 1.12, RelError: -0.1, SelError: -1.05% -histogramIdx: 1.79 QError: 1.12, RelError: -0.1, SelError: -1.05% +heuristic: 2.57 +QError: 1.29, RelError: 0.28, SelError: 2.85% +heuristicIdx: 2.57 +QError: 1.29, RelError: 0.28, SelError: 2.85% +histogram: 1.79 +QError: 1.12, RelError: -0.1, SelError: -1.05% +histogramIdx: 1.79 +QError: 1.12, RelError: -0.1, SelError: -1.05% [jsTest] ---- @@ -1888,10 +2485,14 @@ histogramIdx: 1.79 QError: 1.12, RelError: -0.1, SelError: -1.05% Actual cardinality: 0 Cardinality estimates: -heuristic: 2.81 QError: 2.81, RelError: 0.28, SelError: 14.05% -heuristicIdx: 2.81 QError: 2.81, RelError: 0.28, SelError: 14.05% -histogram: 4.04 QError: 4.04, RelError: 0.4, SelError: 20.2% -histogramIdx: 4.04 QError: 4.04, RelError: 0.4, SelError: 20.2% +heuristic: 2.81 +QError: 2.81, RelError: 0.28, SelError: 14.05% +heuristicIdx: 2.81 +QError: 2.81, RelError: 0.28, SelError: 14.05% +histogram: 4.04 +QError: 4.04, RelError: 0.4, SelError: 20.2% +histogramIdx: 4.04 +QError: 4.04, RelError: 0.4, SelError: 20.2% [jsTest] ---- @@ -1900,10 +2501,14 @@ histogramIdx: 4.04 QError: 4.04, RelError: 0.4, SelError: 20.2% Actual cardinality: 20 Cardinality estimates: -heuristic: 11 QError: 1.82, RelError: -0.45, SelError: -45% -heuristicIdx: 11 QError: 1.82, RelError: -0.45, SelError: -45% -histogram: 18.21 QError: 1.1, RelError: -0.09, SelError: -8.95% -histogramIdx: 18.21 QError: 1.1, RelError: -0.09, SelError: -8.95% +heuristic: 11 +QError: 1.82, RelError: -0.45, SelError: -45% +heuristicIdx: 11 +QError: 1.82, RelError: -0.45, SelError: -45% +histogram: 18.21 +QError: 1.1, RelError: -0.09, SelError: -8.95% +histogramIdx: 18.21 +QError: 1.1, RelError: -0.09, SelError: -8.95% [jsTest] ---- @@ -1912,10 +2517,14 @@ histogramIdx: 18.21 QError: 1.1, RelError: -0.09, SelError: -8.95% Actual cardinality: 18 Cardinality estimates: -heuristic: 10.31 QError: 1.75, RelError: -0.43, SelError: -38.45% -heuristicIdx: 10.31 QError: 1.75, RelError: -0.43, SelError: -38.45% -histogram: 18.1 QError: 1.01, RelError: 0.01, SelError: 0.5% -histogramIdx: 18.1 QError: 1.01, RelError: 0.01, SelError: 0.5% +heuristic: 10.31 +QError: 1.75, RelError: -0.43, SelError: -38.45% +heuristicIdx: 10.31 +QError: 1.75, RelError: -0.43, SelError: -38.45% +histogram: 18.1 +QError: 1.01, RelError: 0.01, SelError: 0.5% +histogramIdx: 18.1 +QError: 1.01, RelError: 0.01, SelError: 0.5% [jsTest] ---- @@ -1924,10 +2533,14 @@ histogramIdx: 18.1 QError: 1.01, RelError: 0.01, SelError: 0.5% Actual cardinality: 18 Cardinality estimates: -heuristic: 11.81 QError: 1.52, RelError: -0.34, SelError: -30.95% -heuristicIdx: 11.81 QError: 1.52, RelError: -0.34, SelError: -30.95% -histogram: 10.74 QError: 1.68, RelError: -0.4, SelError: -36.3% -histogramIdx: 10.74 QError: 1.68, RelError: -0.4, SelError: -36.3% +heuristic: 11.81 +QError: 1.52, RelError: -0.34, SelError: -30.95% +heuristicIdx: 11.81 +QError: 1.52, RelError: -0.34, SelError: -30.95% +histogram: 10.74 +QError: 1.68, RelError: -0.4, SelError: -36.3% +histogramIdx: 10.74 +QError: 1.68, RelError: -0.4, SelError: -36.3% [jsTest] ---- @@ -1936,10 +2549,14 @@ histogramIdx: 10.74 QError: 1.68, RelError: -0.4, SelError: -36.3% Actual cardinality: 7 Cardinality estimates: -heuristic: 3.36 QError: 2.08, RelError: -0.52, SelError: -18.2% -heuristicIdx: 3.36 QError: 2.08, RelError: -0.52, SelError: -18.2% -histogram: 9.45 QError: 1.35, RelError: 0.35, SelError: 12.25% -histogramIdx: 9.45 QError: 1.35, RelError: 0.35, SelError: 12.25% +heuristic: 3.36 +QError: 2.08, RelError: -0.52, SelError: -18.2% +heuristicIdx: 3.36 +QError: 2.08, RelError: -0.52, SelError: -18.2% +histogram: 9.45 +QError: 1.35, RelError: 0.35, SelError: 12.25% +histogramIdx: 9.45 +QError: 1.35, RelError: 0.35, SelError: 12.25% [jsTest] ---- @@ -1948,10 +2565,14 @@ histogramIdx: 9.45 QError: 1.35, RelError: 0.35, SelError: 12.25% Actual cardinality: 6 Cardinality estimates: -heuristic: 4.43 QError: 1.35, RelError: -0.26, SelError: -7.85% -heuristicIdx: 6.6 QError: 1.1, RelError: 0.1, SelError: 3% -histogram: 5.06 QError: 1.19, RelError: -0.16, SelError: -4.7% -histogramIdx: 6 QError: 1, RelError: 0, SelError: 0% +heuristic: 4.43 +QError: 1.35, RelError: -0.26, SelError: -7.85% +heuristicIdx: 6.6 +QError: 1.1, RelError: 0.1, SelError: 3% +histogram: 5.06 +QError: 1.19, RelError: -0.16, SelError: -4.7% +histogramIdx: 6 +QError: 1, RelError: 0, SelError: 0% [jsTest] ---- @@ -1960,10 +2581,14 @@ histogramIdx: 6 QError: 1, RelError: 0, SelError: 0% Actual cardinality: 0 Cardinality estimates: -heuristic: 2.81 QError: 2.81, RelError: 0.28, SelError: 14.05% -heuristicIdx: 0 QError: 0, RelError: 0, SelError: 0% -histogram: 0 QError: 0, RelError: 0, SelError: 0% -histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% +heuristic: 2.81 +QError: 2.81, RelError: 0.28, SelError: 14.05% +heuristicIdx: 0 +QError: 0, RelError: 0, SelError: 0% +histogram: 0 +QError: 0, RelError: 0, SelError: 0% +histogramIdx: 0 +QError: 0, RelError: 0, SelError: 0% [jsTest] ---- @@ -1972,10 +2597,14 @@ histogramIdx: 0 QError: 0, RelError: 0, SelError: 0% Actual cardinality: 6 Cardinality estimates: -heuristic: 11 QError: 1.83, RelError: 0.83, SelError: 25% -heuristicIdx: 11 QError: 1.83, RelError: 0.83, SelError: 25% -histogram: 4.33 QError: 1.39, RelError: -0.28, SelError: -8.35% -histogramIdx: 4.33 QError: 1.39, RelError: -0.28, SelError: -8.35% +heuristic: 11 +QError: 1.83, RelError: 0.83, SelError: 25% +heuristicIdx: 11 +QError: 1.83, RelError: 0.83, SelError: 25% +histogram: 4.33 +QError: 1.39, RelError: -0.28, SelError: -8.35% +histogramIdx: 4.33 +QError: 1.39, RelError: -0.28, SelError: -8.35% [jsTest] ---- @@ -1984,10 +2613,14 @@ histogramIdx: 4.33 QError: 1.39, RelError: -0.28, SelError: -8.35% Actual cardinality: 4 Cardinality estimates: -heuristic: 10.31 QError: 2.58, RelError: 1.58, SelError: 31.55% -heuristicIdx: 10.31 QError: 2.58, RelError: 1.58, SelError: 31.55% -histogram: 3.43 QError: 1.17, RelError: -0.14, SelError: -2.85% -histogramIdx: 3.43 QError: 1.17, RelError: -0.14, SelError: -2.85% +heuristic: 10.31 +QError: 2.58, RelError: 1.58, SelError: 31.55% +heuristicIdx: 10.31 +QError: 2.58, RelError: 1.58, SelError: 31.55% +histogram: 3.43 +QError: 1.17, RelError: -0.14, SelError: -2.85% +histogramIdx: 3.43 +QError: 1.17, RelError: -0.14, SelError: -2.85% [jsTest] ---- @@ -1996,10 +2629,14 @@ histogramIdx: 3.43 QError: 1.17, RelError: -0.14, SelError: -2.85% Actual cardinality: 12 Cardinality estimates: -heuristic: 11.81 QError: 1.02, RelError: -0.02, SelError: -0.95% -heuristicIdx: 11.81 QError: 1.02, RelError: -0.02, SelError: -0.95% -histogram: 8.83 QError: 1.36, RelError: -0.26, SelError: -15.85% -histogramIdx: 8.83 QError: 1.36, RelError: -0.26, SelError: -15.85% +heuristic: 11.81 +QError: 1.02, RelError: -0.02, SelError: -0.95% +heuristicIdx: 11.81 +QError: 1.02, RelError: -0.02, SelError: -0.95% +histogram: 8.83 +QError: 1.36, RelError: -0.26, SelError: -15.85% +histogramIdx: 8.83 +QError: 1.36, RelError: -0.26, SelError: -15.85% [jsTest] ---- @@ -2008,10 +2645,14 @@ histogramIdx: 8.83 QError: 1.36, RelError: -0.26, SelError: -15.85% Actual cardinality: 6 Cardinality estimates: -heuristic: 4.88 QError: 1.23, RelError: -0.19, SelError: -5.6% -heuristicIdx: 9.03 QError: 1.51, RelError: 0.5, SelError: 15.15% -histogram: 4.24 QError: 1.42, RelError: -0.29, SelError: -8.8% -histogramIdx: 4.82 QError: 1.24, RelError: -0.2, SelError: -5.9% +heuristic: 4.88 +QError: 1.23, RelError: -0.19, SelError: -5.6% +heuristicIdx: 9.03 +QError: 1.51, RelError: 0.5, SelError: 15.15% +histogram: 4.24 +QError: 1.42, RelError: -0.29, SelError: -8.8% +histogramIdx: 4.82 +QError: 1.24, RelError: -0.2, SelError: -5.9% [jsTest] ---- @@ -2240,50 +2881,70 @@ histogramIdx: [jsTest] Mean errors per strategy for predicate { "dtype" : { "$ne" : "array" } }:: [jsTest] ---- -heuristic: { "RMSQError" : 4.022, "RMSRelError" : 2.698, "meanSelError" : 21.329 } -heuristicIdx: { "RMSQError" : 4.524, "RMSRelError" : 3.231, "meanSelError" : 23.669 } -histogram: { "RMSQError" : 2.156, "RMSRelError" : 1.351, "meanSelError" : 4.712 } -histogramIdx: { "RMSQError" : 0.92, "RMSRelError" : 0, "meanSelError" : 0 } +heuristic: +{ "RMSQError" : 4.022, "RMSRelError" : 2.698, "meanSelError" : 21.329 } +heuristicIdx: +{ "RMSQError" : 4.524, "RMSRelError" : 3.231, "meanSelError" : 23.669 } +histogram: +{ "RMSQError" : 2.156, "RMSRelError" : 1.351, "meanSelError" : 4.712 } +histogramIdx: +{ "RMSQError" : 0.92, "RMSRelError" : 0, "meanSelError" : 0 } [jsTest] ---- [jsTest] Mean errors per strategy for predicate { "dtype" : "array" }:: [jsTest] ---- -heuristic: { "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 } -heuristicIdx: { "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 } -histogram: { "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 } -histogramIdx: { "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 } +heuristic: +{ "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 } +heuristicIdx: +{ "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 } +histogram: +{ "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 } +histogramIdx: +{ "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 } [jsTest] ---- [jsTest] Mean errors per strategy for predicate { "$and" : [ { "elemMatch" : true }, { "dtype" : "array" } ] }:: [jsTest] ---- -heuristic: { "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 } -heuristicIdx: { "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 } -histogram: { "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 } -histogramIdx: { "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 } +heuristic: +{ "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 } +heuristicIdx: +{ "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 } +histogram: +{ "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 } +histogramIdx: +{ "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 } [jsTest] ---- [jsTest] Mean errors per strategy for predicate { "$and" : [ { "elemMatch" : false }, { "dtype" : "array" } ] }:: [jsTest] ---- -heuristic: { "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 } -heuristicIdx: { "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 } -histogram: { "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 } -histogramIdx: { "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 } +heuristic: +{ "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 } +heuristicIdx: +{ "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 } +histogram: +{ "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 } +histogramIdx: +{ "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 } [jsTest] ---- [jsTest] Mean errors per strategy for all queries: [jsTest] ---- -heuristic: { "RMSQError" : 3.385, "RMSRelError" : 2.192, "meanSelError" : 19.222 } -heuristicIdx: { "RMSQError" : 3.766, "RMSRelError" : 2.61, "meanSelError" : 20.706 } -histogram: { "RMSQError" : 1.887, "RMSRelError" : 1.118, "meanSelError" : 4.41 } -histogramIdx: { "RMSQError" : 1.072, "RMSRelError" : 0.303, "meanSelError" : 1.422 } +heuristic: +{ "RMSQError" : 3.385, "RMSRelError" : 2.192, "meanSelError" : 19.222 } +heuristicIdx: +{ "RMSQError" : 3.766, "RMSRelError" : 2.61, "meanSelError" : 20.706 } +histogram: +{ "RMSQError" : 1.887, "RMSRelError" : 1.118, "meanSelError" : 4.41 } +histogramIdx: +{ "RMSQError" : 1.072, "RMSRelError" : 0.303, "meanSelError" : 1.422 } [jsTest] ---- @@ -2494,10 +3155,14 @@ histogramIdx: [jsTest] Mean errors per strategy for all queries: [jsTest] ---- -heuristic: { "RMSQError" : 1.931, "RMSRelError" : 0.597, "meanSelError" : 14.195 } -heuristicIdx: { "RMSQError" : 1.851, "RMSRelError" : 0.648, "meanSelError" : 13.789 } -histogram: { "RMSQError" : 1.417, "RMSRelError" : 0.432, "meanSelError" : 7.936 } -histogramIdx: { "RMSQError" : 1.295, "RMSRelError" : 0.252, "meanSelError" : 6.159 } +heuristic: +{ "RMSQError" : 1.931, "RMSRelError" : 0.597, "meanSelError" : 14.195 } +heuristicIdx: +{ "RMSQError" : 1.851, "RMSRelError" : 0.648, "meanSelError" : 13.789 } +histogram: +{ "RMSQError" : 1.417, "RMSRelError" : 0.432, "meanSelError" : 7.936 } +histogramIdx: +{ "RMSQError" : 1.295, "RMSRelError" : 0.252, "meanSelError" : 6.159 } [jsTest] ---- @@ -2562,11 +3227,18 @@ cardinality: 7, histogramIdx estimation: 9.45, errors: { "qError" : 1.35, "rel [jsTest] Mean errors per strategy for all queries: [jsTest] ---- -heuristic: { "RMSQError" : 3.11, "RMSRelError" : 1.943, "meanSelError" : 18.059 } -heuristicIdx: { "RMSQError" : 3.42, "RMSRelError" : 2.31, "meanSelError" : 19.106 } -histogram: { "RMSQError" : 1.789, "RMSRelError" : 1.002, "meanSelError" : 5.226 } -histogramIdx: { "RMSQError" : 1.127, "RMSRelError" : 0.292, "meanSelError" : 2.518 } -===============================================================================Errors excluding empty queries.Non-empty simple error entries: 111; complex error entries: 29 +heuristic: +{ "RMSQError" : 3.11, "RMSRelError" : 1.943, "meanSelError" : 18.059 } +heuristicIdx: +{ "RMSQError" : 3.42, "RMSRelError" : 2.31, "meanSelError" : 19.106 } +histogram: +{ "RMSQError" : 1.789, "RMSRelError" : 1.002, "meanSelError" : 5.226 } +histogramIdx: +{ "RMSQError" : 1.127, "RMSRelError" : 0.292, "meanSelError" : 2.518 } +=============================================================================== +Errors excluding empty queries. +Non-empty simple error entries: 111; complex error entries: 29 + [jsTest] ---- [jsTest] Aggregate errors for all simple predicate queries @@ -2794,50 +3466,70 @@ histogramIdx: [jsTest] Mean errors per strategy for predicate { "dtype" : { "$ne" : "array" } }:: [jsTest] ---- -heuristic: { "RMSQError" : 3.775, "RMSRelError" : 2.924, "meanSelError" : 20.669 } -heuristicIdx: { "RMSQError" : 4.359, "RMSRelError" : 3.505, "meanSelError" : 24.29 } -histogram: { "RMSQError" : 2.071, "RMSRelError" : 1.465, "meanSelError" : 4.164 } -histogramIdx: { "RMSQError" : 1, "RMSRelError" : 0, "meanSelError" : 0 } +heuristic: +{ "RMSQError" : 3.775, "RMSRelError" : 2.924, "meanSelError" : 20.669 } +heuristicIdx: +{ "RMSQError" : 4.359, "RMSRelError" : 3.505, "meanSelError" : 24.29 } +histogram: +{ "RMSQError" : 2.071, "RMSRelError" : 1.465, "meanSelError" : 4.164 } +histogramIdx: +{ "RMSQError" : 1, "RMSRelError" : 0, "meanSelError" : 0 } [jsTest] ---- [jsTest] Mean errors per strategy for predicate { "dtype" : "array" }:: [jsTest] ---- -heuristic: { "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 } -heuristicIdx: { "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 } -histogram: { "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 } -histogramIdx: { "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 } +heuristic: +{ "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 } +heuristicIdx: +{ "RMSQError" : 1.812, "RMSRelError" : 0.723, "meanSelError" : 15.57 } +histogram: +{ "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 } +histogramIdx: +{ "RMSQError" : 1.293, "RMSRelError" : 0.501, "meanSelError" : 3.888 } [jsTest] ---- [jsTest] Mean errors per strategy for predicate { "$and" : [ { "elemMatch" : true }, { "dtype" : "array" } ] }:: [jsTest] ---- -heuristic: { "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 } -heuristicIdx: { "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 } -histogram: { "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 } -histogramIdx: { "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 } +heuristic: +{ "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 } +heuristicIdx: +{ "RMSQError" : 1.546, "RMSRelError" : 0.587, "meanSelError" : 7.468 } +histogram: +{ "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 } +histogramIdx: +{ "RMSQError" : 1.309, "RMSRelError" : 0.341, "meanSelError" : 7.082 } [jsTest] ---- [jsTest] Mean errors per strategy for predicate { "$and" : [ { "elemMatch" : false }, { "dtype" : "array" } ] }:: [jsTest] ---- -heuristic: { "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 } -heuristicIdx: { "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 } -histogram: { "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 } -histogramIdx: { "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 } +heuristic: +{ "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 } +heuristicIdx: +{ "RMSQError" : 1.89, "RMSRelError" : 0.761, "meanSelError" : 18.191 } +histogram: +{ "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 } +histogramIdx: +{ "RMSQError" : 1.288, "RMSRelError" : 0.543, "meanSelError" : 2.854 } [jsTest] ---- [jsTest] Mean errors per strategy for all queries: [jsTest] ---- -heuristic: { "RMSQError" : 3.132, "RMSRelError" : 2.301, "meanSelError" : 18.602 } -heuristicIdx: { "RMSQError" : 3.554, "RMSRelError" : 2.742, "meanSelError" : 20.755 } -histogram: { "RMSQError" : 1.797, "RMSRelError" : 1.174, "meanSelError" : 4.052 } -histogramIdx: { "RMSQError" : 1.128, "RMSRelError" : 0.319, "meanSelError" : 1.576 } +heuristic: +{ "RMSQError" : 3.132, "RMSRelError" : 2.301, "meanSelError" : 18.602 } +heuristicIdx: +{ "RMSQError" : 3.554, "RMSRelError" : 2.742, "meanSelError" : 20.755 } +histogram: +{ "RMSQError" : 1.797, "RMSRelError" : 1.174, "meanSelError" : 4.052 } +histogramIdx: +{ "RMSQError" : 1.128, "RMSRelError" : 0.319, "meanSelError" : 1.576 } [jsTest] ---- @@ -3040,10 +3732,14 @@ histogramIdx: [jsTest] Mean errors per strategy for all queries: [jsTest] ---- -heuristic: { "RMSQError" : 1.779, "RMSRelError" : 0.662, "meanSelError" : 14.884 } -heuristicIdx: { "RMSQError" : 1.726, "RMSRelError" : 0.722, "meanSelError" : 14.771 } -histogram: { "RMSQError" : 1.401, "RMSRelError" : 0.482, "meanSelError" : 9.053 } -histogramIdx: { "RMSQError" : 1.246, "RMSRelError" : 0.274, "meanSelError" : 6.85 } +heuristic: +{ "RMSQError" : 1.779, "RMSRelError" : 0.662, "meanSelError" : 14.884 } +heuristicIdx: +{ "RMSQError" : 1.726, "RMSRelError" : 0.722, "meanSelError" : 14.771 } +histogram: +{ "RMSQError" : 1.401, "RMSRelError" : 0.482, "meanSelError" : 9.053 } +histogramIdx: +{ "RMSQError" : 1.246, "RMSRelError" : 0.274, "meanSelError" : 6.85 } [jsTest] ---- @@ -3098,10 +3794,14 @@ cardinality: 11, histogramIdx estimation: 8.37, errors: { "qError" : 1.31, "re [jsTest] Mean errors per strategy for all queries: [jsTest] ---- -heuristic: { "RMSQError" : 2.904, "RMSRelError" : 2.071, "meanSelError" : 17.832 } -heuristicIdx: { "RMSQError" : 3.261, "RMSRelError" : 2.463, "meanSelError" : 19.515 } -histogram: { "RMSQError" : 1.722, "RMSRelError" : 1.068, "meanSelError" : 5.088 } -histogramIdx: { "RMSQError" : 1.154, "RMSRelError" : 0.31, "meanSelError" : 2.669 } +heuristic: +{ "RMSQError" : 2.904, "RMSRelError" : 2.071, "meanSelError" : 17.832 } +heuristicIdx: +{ "RMSQError" : 3.261, "RMSRelError" : 2.463, "meanSelError" : 19.515 } +histogram: +{ "RMSQError" : 1.722, "RMSRelError" : 1.068, "meanSelError" : 5.088 } +histogramIdx: +{ "RMSQError" : 1.154, "RMSRelError" : 0.31, "meanSelError" : 2.669 } [jsTest] ---- diff --git a/jstests/query_golden/expected_output/ce_mixed b/jstests/query_golden/expected_output/ce_mixed index f7855be70c2b8..fe37f9023ff45 100644 --- a/jstests/query_golden/expected_output/ce_mixed +++ b/jstests/query_golden/expected_output/ce_mixed @@ -2821,6 +2821,7 @@ } ] + [jsTest] ---- [jsTest] Settings before: internalQueryCardinalityEstimatorMode: sampling, internalQueryFrameworkControl: forceBonsai [jsTest] ---- @@ -2832,7 +2833,7 @@ [jsTest] ---- Histogram estimate: 150. -Heuristic estimate: 17.320508075688778. +Heuristic estimate: 17.32050807568877. Histogram explain: { "nodeType" : "Root", "logicalCE" : 150, @@ -2845,15 +2846,16 @@ Histogram explain: { } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "NestedLoopJoin", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "leftChild" : { "nodeType" : "IndexScan", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "interval" : "[ true, true ]" }, "rightChild" : { @@ -2867,12 +2869,13 @@ Heuristic explain: { } } + [jsTest] ---- [jsTest] Query: { "likesPizza" : false } returned 150 documents. [jsTest] ---- Histogram estimate: 150. -Heuristic estimate: 17.320508075688778. +Heuristic estimate: 17.32050807568877. Histogram explain: { "nodeType" : "Root", "logicalCE" : 150, @@ -2885,15 +2888,16 @@ Histogram explain: { } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "NestedLoopJoin", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "leftChild" : { "nodeType" : "IndexScan", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "interval" : "[ false, false ]" }, "rightChild" : { @@ -2907,12 +2911,13 @@ Heuristic explain: { } } + [jsTest] ---- [jsTest] Query: { "date" : { "$gt" : ISODate("1950-01-01T00:00:00Z") } } returned 299 documents. [jsTest] ---- Histogram estimate: 299. -Heuristic estimate: 59.999999999999986. +Heuristic estimate: 60. Histogram explain: { "nodeType" : "Root", "logicalCE" : 299, @@ -2925,15 +2930,16 @@ Histogram explain: { } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 59.999999999999986, + "logicalCE" : 60, "child" : { "nodeType" : "NestedLoopJoin", - "logicalCE" : 59.999999999999986, + "logicalCE" : 60, "leftChild" : { "nodeType" : "IndexScan", - "logicalCE" : 59.999999999999986, + "logicalCE" : 60, "interval" : "( ISODate(\"1950-01-01T00:00:00Z\"), ISODate(\"0NaN-NaN-NaNTNaN:NaN:NaNZ\") ]" }, "rightChild" : { @@ -2947,12 +2953,13 @@ Heuristic explain: { } } + [jsTest] ---- [jsTest] Query: { "date" : { "$lt" : ISODate("1979-12-06T00:00:00Z") } } returned 179 documents. [jsTest] ---- Histogram estimate: 179.33675564681724. -Heuristic estimate: 59.999999999999986. +Heuristic estimate: 60. Histogram explain: { "nodeType" : "Root", "logicalCE" : 179.33675564681724, @@ -2965,15 +2972,16 @@ Histogram explain: { } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 59.999999999999986, + "logicalCE" : 60, "child" : { "nodeType" : "NestedLoopJoin", - "logicalCE" : 59.999999999999986, + "logicalCE" : 60, "leftChild" : { "nodeType" : "IndexScan", - "logicalCE" : 59.999999999999986, + "logicalCE" : 60, "interval" : "[ ISODate(\"0NaN-NaN-NaNTNaN:NaN:NaNZ\"), ISODate(\"1979-12-06T00:00:00Z\") )" }, "rightChild" : { @@ -2987,12 +2995,13 @@ Heuristic explain: { } } + [jsTest] ---- [jsTest] Query: { "name" : { "$lte" : "Bob Bennet" } } returned 37 documents. [jsTest] ---- Histogram estimate: 61.99999971987415. -Heuristic estimate: 99.00000000000003. +Heuristic estimate: 99. Histogram explain: { "nodeType" : "Root", "logicalCE" : 61.99999971987415, @@ -3014,25 +3023,27 @@ Histogram explain: { } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 99.00000000000003, + "logicalCE" : 99, "child" : { "nodeType" : "Filter", - "logicalCE" : 99.00000000000003, + "logicalCE" : 99, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 99.00000000000003 + "logicalCE" : 99 } } } + [jsTest] ---- [jsTest] Query: { "favPizzaToppings" : "mushrooms" } returned 120 documents. [jsTest] ---- Histogram estimate: 120. -Heuristic estimate: 17.320508075688778. +Heuristic estimate: 17.32050807568877. Histogram explain: { "nodeType" : "Root", "logicalCE" : 120, @@ -3045,87 +3056,93 @@ Histogram explain: { } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "Filter", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 17.320508075688778 + "logicalCE" : 17.32050807568877 } } } + [jsTest] ---- [jsTest] Query: { "lastPizzaShopVisited" : "Zizzi" } returned 62 documents. [jsTest] ---- -Histogram and heuristic estimates were equal: 17.320508075688778. +Histogram and heuristic estimates were equal: 17.32050807568877. Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "Filter", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 17.320508075688778 + "logicalCE" : 17.32050807568877 } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "Filter", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 17.320508075688778 + "logicalCE" : 17.32050807568877 } } } + [jsTest] ---- [jsTest] Query: { "lastPizzaShopVisited" : "Pacinos" } returned 113 documents. [jsTest] ---- -Histogram and heuristic estimates were equal: 17.320508075688778. +Histogram and heuristic estimates were equal: 17.32050807568877. Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "Filter", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 17.320508075688778 + "logicalCE" : 17.32050807568877 } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "Filter", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 17.320508075688778 + "logicalCE" : 17.32050807568877 } } } + [jsTest] ---- [jsTest] Query: { "likesPizza" : true, "date" : { "$gt" : ISODate("1950-01-01T00:00:00Z"), "$lt" : ISODate("1979-12-06T00:00:00Z") } } returned 89 documents. [jsTest] ---- Histogram estimate: 115.65144475323812. -Heuristic estimate: 7.745966692414835. +Heuristic estimate: 7.745966692414833. Histogram explain: { "nodeType" : "Root", "logicalCE" : 115.65144475323812, @@ -3142,38 +3159,40 @@ Histogram explain: { } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 7.745966692414835, + "logicalCE" : 7.745966692414833, "child" : { "nodeType" : "NestedLoopJoin", - "logicalCE" : 7.745966692414835, + "logicalCE" : 7.745966692414833, "leftChild" : { "nodeType" : "IndexScan", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "interval" : "[ true, true ]" }, "rightChild" : { "nodeType" : "Filter", - "logicalCE" : 59.999999999999986, + "logicalCE" : 60, "child" : { "nodeType" : "LimitSkip", - "logicalCE" : 59.999999999999986, + "logicalCE" : 60, "child" : { "nodeType" : "Seek", - "logicalCE" : 59.999999999999986 + "logicalCE" : 60 } } } } } + [jsTest] ---- [jsTest] Query: { "likesPizza" : false, "name" : { "$lte" : "Bob Bennet" } } returned 17 documents. [jsTest] ---- Histogram estimate: 43.84062023548705. -Heuristic estimate: 9.949874371066203. +Heuristic estimate: 9.949874371066198. Histogram explain: { "nodeType" : "Root", "logicalCE" : 43.84062023548705, @@ -3199,38 +3218,40 @@ Histogram explain: { } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 9.949874371066203, + "logicalCE" : 9.949874371066198, "child" : { "nodeType" : "NestedLoopJoin", - "logicalCE" : 9.949874371066203, + "logicalCE" : 9.949874371066198, "leftChild" : { "nodeType" : "IndexScan", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "interval" : "[ false, false ]" }, "rightChild" : { "nodeType" : "Filter", - "logicalCE" : 99.00000000000003, + "logicalCE" : 99, "child" : { "nodeType" : "LimitSkip", - "logicalCE" : 99.00000000000003, + "logicalCE" : 99, "child" : { "nodeType" : "Seek", - "logicalCE" : 99.00000000000003 + "logicalCE" : 99 } } } } } + [jsTest] ---- [jsTest] Query: { "favPizzaToppings" : "mushrooms", "name" : { "$lte" : "Bob Bennet" } } returned 16 documents. [jsTest] ---- Histogram estimate: 39.21224280892076. -Heuristic estimate: 9.949874371066203. +Heuristic estimate: 9.949874371066198. Histogram explain: { "nodeType" : "Root", "logicalCE" : 39.21224280892076, @@ -3256,275 +3277,447 @@ Histogram explain: { } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 9.949874371066203, + "logicalCE" : 9.949874371066198, "child" : { "nodeType" : "Filter", - "logicalCE" : 9.949874371066203, + "logicalCE" : 9.949874371066198, "child" : { "nodeType" : "Filter", - "logicalCE" : 9.949874371066203, + "logicalCE" : 9.949874371066198, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 9.949874371066203 + "logicalCE" : 9.949874371066198 } } } } + +[jsTest] ---- +[jsTest] Query: { "$or" : [ { "likesPizza" : true }, { "date" : { "$lt" : ISODate("1955-01-01T00:00:00Z") } } ] } returned 168 documents. +[jsTest] ---- + +Histogram estimate: 157.54345865842552. +Heuristic estimate: 67.03119854910238. +Histogram explain: { + "nodeType" : "Root", + "logicalCE" : 157.54345865842552, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 157.54345865842552, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 157.54345865842552 + } + } +} + +Heuristic explain: { + "nodeType" : "Root", + "logicalCE" : 67.03119854910238, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 67.03119854910238, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 67.03119854910238 + } + } +} + + +[jsTest] ---- +[jsTest] Query: { "$or" : [ { "favPizzaToppings" : "mushrooms" }, { "name" : { "$lte" : "Bob Bennet", "$gte" : "Alice Smith" } } ] } returned 130 documents. +[jsTest] ---- + +Histogram estimate: 138.3336768827409. +Heuristic estimate: 67.03119854910238. +Histogram explain: { + "nodeType" : "Root", + "logicalCE" : 138.3336768827409, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 138.3336768827409, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 138.3336768827409 + } + } +} + +Heuristic explain: { + "nodeType" : "Root", + "logicalCE" : 67.03119854910238, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 67.03119854910238, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 67.03119854910238 + } + } +} + + +[jsTest] ---- +[jsTest] Query: { "$or" : [ { "$and" : [ { "likesPizza" : false }, { "name" : { "$lte" : "Bob Bennet" } } ] }, { "$and" : [ { "likesPizza" : true }, { "name" : { "$gte" : "Tom Watson" } } ] } ] } returned 34 documents. +[jsTest] ---- + +Histogram estimate: 53.66673009508964. +Heuristic estimate: 14.800368851048507. +Histogram explain: { + "nodeType" : "Root", + "logicalCE" : 53.66673009508964, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 53.66673009508964, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 53.66673009508964 + } + } +} + +Heuristic explain: { + "nodeType" : "Root", + "logicalCE" : 14.800368851048507, + "child" : { + "nodeType" : "NestedLoopJoin", + "logicalCE" : 14.800368851048507, + "leftChild" : { + "nodeType" : "Unique", + "logicalCE" : 25.602073215185506, + "child" : { + "nodeType" : "Union", + "logicalCE" : 25.602073215185506, + "children" : [ + { + "nodeType" : "IndexScan", + "logicalCE" : 17.32050807568877, + "interval" : "[ false, false ]" + }, + { + "nodeType" : "IndexScan", + "logicalCE" : 17.32050807568877, + "interval" : "[ true, true ]" + } + ] + } + }, + "rightChild" : { + "nodeType" : "Filter", + "logicalCE" : 14.800368851048507, + "child" : { + "nodeType" : "LimitSkip", + "logicalCE" : 14.800368851048507, + "child" : { + "nodeType" : "Seek", + "logicalCE" : 14.800368851048507 + } + } + } + } +} + + +[jsTest] ---- +[jsTest] Query: { "$or" : [ { "$and" : [ { "likesPizza" : false }, { "name" : { "$lte" : "Bob Bennet" } } ] }, { "date" : { "$lte" : "1960-01-01T00:00:00" } } ] } returned 17 documents. +[jsTest] ---- + +Histogram estimate: 43.84062023548707. +Heuristic estimate: 102.36131345698406. +Histogram explain: { + "nodeType" : "Root", + "logicalCE" : 43.84062023548707, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 43.84062023548707, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 43.84062023548707 + } + } +} + +Heuristic explain: { + "nodeType" : "Root", + "logicalCE" : 102.36131345698406, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 102.36131345698406, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 102.36131345698406 + } + } +} + + [jsTest] ---- [jsTest] Query: { "lastPizzaShopVisited" : "Zizzi", "likesPizza" : true } returned 62 documents. [jsTest] ---- -Histogram estimate: 12.247448713915896. -Heuristic estimate: 4.1617914502878195. +Histogram estimate: 12.247448713915889. +Heuristic estimate: 4.161791450287816. Histogram explain: { "nodeType" : "Root", - "logicalCE" : 12.247448713915896, + "logicalCE" : 12.247448713915889, "child" : { "nodeType" : "Filter", - "logicalCE" : 12.247448713915896, + "logicalCE" : 12.247448713915889, "child" : { "nodeType" : "Filter", - "logicalCE" : 12.247448713915896, + "logicalCE" : 12.247448713915889, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 12.247448713915896 + "logicalCE" : 12.247448713915889 } } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 4.1617914502878195, + "logicalCE" : 4.161791450287816, "child" : { "nodeType" : "NestedLoopJoin", - "logicalCE" : 4.1617914502878195, + "logicalCE" : 4.161791450287816, "leftChild" : { "nodeType" : "IndexScan", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "interval" : "[ true, true ]" }, "rightChild" : { "nodeType" : "Filter", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "LimitSkip", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "Seek", - "logicalCE" : 17.320508075688778 + "logicalCE" : 17.32050807568877 } } } } } + [jsTest] ---- [jsTest] Query: { "lastPizzaShopVisited" : "Zizzi", "likesPizza" : false } returned 0 documents. [jsTest] ---- -Histogram estimate: 12.247448713915896. -Heuristic estimate: 4.1617914502878195. +Histogram estimate: 12.247448713915889. +Heuristic estimate: 4.161791450287816. Histogram explain: { "nodeType" : "Root", - "logicalCE" : 12.247448713915896, + "logicalCE" : 12.247448713915889, "child" : { "nodeType" : "Filter", - "logicalCE" : 12.247448713915896, + "logicalCE" : 12.247448713915889, "child" : { "nodeType" : "Filter", - "logicalCE" : 12.247448713915896, + "logicalCE" : 12.247448713915889, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 12.247448713915896 + "logicalCE" : 12.247448713915889 } } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 4.1617914502878195, + "logicalCE" : 4.161791450287816, "child" : { "nodeType" : "NestedLoopJoin", - "logicalCE" : 4.1617914502878195, + "logicalCE" : 4.161791450287816, "leftChild" : { "nodeType" : "IndexScan", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "interval" : "[ false, false ]" }, "rightChild" : { "nodeType" : "Filter", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "LimitSkip", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "Seek", - "logicalCE" : 17.320508075688778 + "logicalCE" : 17.32050807568877 } } } } } + [jsTest] ---- [jsTest] Query: { "lastPizzaShopVisited" : "Zizzi", "date" : { "$gt" : ISODate("1950-01-01T00:00:00Z") } } returned 62 documents. [jsTest] ---- -Histogram estimate: 17.29161646579059. -Heuristic estimate: 7.745966692414835. +Histogram estimate: 17.29161646579058. +Heuristic estimate: 7.745966692414833. Histogram explain: { "nodeType" : "Root", - "logicalCE" : 17.29161646579059, + "logicalCE" : 17.29161646579058, "child" : { "nodeType" : "Filter", - "logicalCE" : 17.29161646579059, + "logicalCE" : 17.29161646579058, "child" : { "nodeType" : "Filter", - "logicalCE" : 17.29161646579059, + "logicalCE" : 17.29161646579058, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 17.29161646579059 + "logicalCE" : 17.29161646579058 } } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 7.745966692414835, + "logicalCE" : 7.745966692414833, "child" : { "nodeType" : "NestedLoopJoin", - "logicalCE" : 7.745966692414835, + "logicalCE" : 7.745966692414833, "leftChild" : { "nodeType" : "IndexScan", - "logicalCE" : 59.999999999999986, + "logicalCE" : 60, "interval" : "( ISODate(\"1950-01-01T00:00:00Z\"), ISODate(\"0NaN-NaN-NaNTNaN:NaN:NaNZ\") ]" }, "rightChild" : { "nodeType" : "Filter", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "LimitSkip", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "Seek", - "logicalCE" : 17.320508075688778 + "logicalCE" : 17.32050807568877 } } } } } + [jsTest] ---- [jsTest] Query: { "lastPizzaShopVisited" : "Zizzi", "date" : { "$lt" : ISODate("1979-12-06T00:00:00Z") } } returned 37 documents. [jsTest] ---- -Histogram estimate: 13.391667396064515. -Heuristic estimate: 7.745966692414835. +Histogram estimate: 13.39166739606451. +Heuristic estimate: 7.745966692414833. Histogram explain: { "nodeType" : "Root", - "logicalCE" : 13.391667396064515, + "logicalCE" : 13.39166739606451, "child" : { "nodeType" : "Filter", - "logicalCE" : 13.391667396064515, + "logicalCE" : 13.39166739606451, "child" : { "nodeType" : "Filter", - "logicalCE" : 13.391667396064515, + "logicalCE" : 13.39166739606451, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 13.391667396064515 + "logicalCE" : 13.39166739606451 } } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 7.745966692414835, + "logicalCE" : 7.745966692414833, "child" : { "nodeType" : "NestedLoopJoin", - "logicalCE" : 7.745966692414835, + "logicalCE" : 7.745966692414833, "leftChild" : { "nodeType" : "IndexScan", - "logicalCE" : 59.999999999999986, + "logicalCE" : 60, "interval" : "[ ISODate(\"0NaN-NaN-NaNTNaN:NaN:NaNZ\"), ISODate(\"1979-12-06T00:00:00Z\") )" }, "rightChild" : { "nodeType" : "Filter", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "LimitSkip", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "Seek", - "logicalCE" : 17.320508075688778 + "logicalCE" : 17.32050807568877 } } } } } + [jsTest] ---- [jsTest] Query: { "lastPizzaShopVisited" : "Zizzi", "date" : { "$gt" : ISODate("1950-01-01T00:00:00Z"), "$lt" : ISODate("1979-12-06T00:00:00Z") } } returned 37 documents. [jsTest] ---- -Histogram estimate: 13.354278552090237. -Heuristic estimate: 7.745966692414835. +Histogram estimate: 13.354278552090232. +Heuristic estimate: 7.745966692414833. Histogram explain: { "nodeType" : "Root", - "logicalCE" : 13.354278552090237, + "logicalCE" : 13.354278552090232, "child" : { "nodeType" : "Filter", - "logicalCE" : 13.354278552090237, + "logicalCE" : 13.354278552090232, "child" : { "nodeType" : "Filter", - "logicalCE" : 13.354278552090237, + "logicalCE" : 13.354278552090232, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 13.354278552090237 + "logicalCE" : 13.354278552090232 } } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 7.745966692414835, + "logicalCE" : 7.745966692414833, "child" : { "nodeType" : "NestedLoopJoin", - "logicalCE" : 7.745966692414835, + "logicalCE" : 7.745966692414833, "leftChild" : { "nodeType" : "IndexScan", - "logicalCE" : 59.999999999999986, + "logicalCE" : 60, "interval" : "( ISODate(\"1950-01-01T00:00:00Z\"), ISODate(\"1979-12-06T00:00:00Z\") )" }, "rightChild" : { "nodeType" : "Filter", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "LimitSkip", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "Seek", - "logicalCE" : 17.320508075688778 + "logicalCE" : 17.32050807568877 } } } } } + [jsTest] ---- [jsTest] Query: { "lastPizzaShopVisited" : "Pacinos", "name" : { "$lte" : "Bob Bennet" } } returned 13 documents. [jsTest] ---- -Histogram estimate: 7.8740078562238045. -Heuristic estimate: 9.949874371066203. +Histogram estimate: 7.8740078562238. +Heuristic estimate: 9.949874371066198. Histogram explain: { "nodeType" : "Root", - "logicalCE" : 7.8740078562238045, + "logicalCE" : 7.8740078562238, "child" : { "nodeType" : "NestedLoopJoin", - "logicalCE" : 7.8740078562238045, + "logicalCE" : 7.8740078562238, "leftChild" : { "nodeType" : "IndexScan", "logicalCE" : 61.99999971987415, @@ -3532,130 +3725,135 @@ Histogram explain: { }, "rightChild" : { "nodeType" : "Filter", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "LimitSkip", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "child" : { "nodeType" : "Seek", - "logicalCE" : 17.320508075688778 + "logicalCE" : 17.32050807568877 } } } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 9.949874371066203, + "logicalCE" : 9.949874371066198, "child" : { "nodeType" : "Filter", - "logicalCE" : 9.949874371066203, + "logicalCE" : 9.949874371066198, "child" : { "nodeType" : "Filter", - "logicalCE" : 9.949874371066203, + "logicalCE" : 9.949874371066198, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 9.949874371066203 + "logicalCE" : 9.949874371066198 } } } } + [jsTest] ---- [jsTest] Query: { "lastPizzaShopVisited" : "Pacinos", "favPizzaToppings" : "mushrooms" } returned 20 documents. [jsTest] ---- -Histogram estimate: 10.954451150103326. -Heuristic estimate: 4.1617914502878195. +Histogram estimate: 10.95445115010332. +Heuristic estimate: 4.161791450287816. Histogram explain: { "nodeType" : "Root", - "logicalCE" : 10.954451150103326, + "logicalCE" : 10.95445115010332, "child" : { "nodeType" : "Filter", - "logicalCE" : 10.954451150103326, + "logicalCE" : 10.95445115010332, "child" : { "nodeType" : "Filter", - "logicalCE" : 10.954451150103326, + "logicalCE" : 10.95445115010332, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 10.954451150103326 + "logicalCE" : 10.95445115010332 } } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 4.1617914502878195, + "logicalCE" : 4.161791450287816, "child" : { "nodeType" : "Filter", - "logicalCE" : 4.1617914502878195, + "logicalCE" : 4.161791450287816, "child" : { "nodeType" : "Filter", - "logicalCE" : 4.1617914502878195, + "logicalCE" : 4.161791450287816, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 4.1617914502878195 + "logicalCE" : 4.161791450287816 } } } } + [jsTest] ---- [jsTest] Query: { "lastPizzaShopVisited" : "Pacinos", "date" : { "$gt" : ISODate("1950-01-01T00:00:00Z") }, "favPizzaToppings" : "mushrooms", "likesPizza" : true } returned 20 documents. [jsTest] ---- -Histogram estimate: 9.207714944743804. -Heuristic estimate: 1.6682798577318325. +Histogram estimate: 9.207714944743799. +Heuristic estimate: 1.6682798577318312. Histogram explain: { "nodeType" : "Root", - "logicalCE" : 9.207714944743804, + "logicalCE" : 9.207714944743799, "child" : { "nodeType" : "Filter", - "logicalCE" : 9.207714944743804, + "logicalCE" : 9.207714944743799, "child" : { "nodeType" : "Filter", - "logicalCE" : 9.207714944743804, + "logicalCE" : 9.207714944743799, "child" : { "nodeType" : "Filter", - "logicalCE" : 9.207714944743804, + "logicalCE" : 9.207714944743799, "child" : { "nodeType" : "Filter", - "logicalCE" : 9.207714944743804, + "logicalCE" : 9.207714944743799, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 9.207714944743804 + "logicalCE" : 9.207714944743799 } } } } } } + Heuristic explain: { "nodeType" : "Root", - "logicalCE" : 1.6682798577318325, + "logicalCE" : 1.6682798577318312, "child" : { "nodeType" : "NestedLoopJoin", - "logicalCE" : 1.6682798577318325, + "logicalCE" : 1.6682798577318312, "leftChild" : { "nodeType" : "IndexScan", - "logicalCE" : 17.320508075688778, + "logicalCE" : 17.32050807568877, "interval" : "[ true, true ]" }, "rightChild" : { "nodeType" : "Filter", - "logicalCE" : 2.783157683713742, + "logicalCE" : 2.7831576837137395, "child" : { "nodeType" : "Filter", - "logicalCE" : 2.783157683713742, + "logicalCE" : 2.7831576837137395, "child" : { "nodeType" : "Filter", - "logicalCE" : 2.783157683713742, + "logicalCE" : 2.7831576837137395, "child" : { "nodeType" : "LimitSkip", - "logicalCE" : 2.783157683713742, + "logicalCE" : 2.7831576837137395, "child" : { "nodeType" : "Seek", - "logicalCE" : 2.783157683713742 + "logicalCE" : 2.7831576837137395 } } } @@ -3664,6 +3862,205 @@ Heuristic explain: { } } + +[jsTest] ---- +[jsTest] Query: { "$or" : [ { "lastPizzaShopVisited" : "Zizzi" }, { "likesPizza" : true } ] } returned 150 documents. +[jsTest] ---- + +Histogram estimate: 154.39449909318898. +Heuristic estimate: 25.602073215185506. +Histogram explain: { + "nodeType" : "Root", + "logicalCE" : 154.39449909318898, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 154.39449909318898, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 154.39449909318898 + } + } +} + +Heuristic explain: { + "nodeType" : "Root", + "logicalCE" : 25.602073215185506, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 25.602073215185506, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 25.602073215185506 + } + } +} + + +[jsTest] ---- +[jsTest] Query: { "$or" : [ { "lastPizzaShopVisited" : "Zizzi" }, { "date" : { "$gt" : ISODate("1950-01-01T00:00:00Z"), "$lt" : ISODate("1960-01-01T00:00:00Z") } } ] } returned 109 documents. +[jsTest] ---- + +Histogram estimate: 65.96349869353018. +Heuristic estimate: 67.03119854910238. +Histogram explain: { + "nodeType" : "Root", + "logicalCE" : 65.96349869353018, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 65.96349869353018, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 65.96349869353018 + } + } +} + +Heuristic explain: { + "nodeType" : "Root", + "logicalCE" : 67.03119854910238, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 67.03119854910238, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 67.03119854910238 + } + } +} + + +[jsTest] ---- +[jsTest] Query: { "$or" : [ { "$and" : [ { "lastPizzaShopVisited" : "Zizzi" }, { "name" : { "$lte" : "John Watson" } } ] }, { "$and" : [ { "favPizzaToppings" : "mushrooms" }, { "likesPizza" : true } ] } ] } returned 126 documents. +[jsTest] ---- + +Histogram estimate: 89.72910201968833. +Heuristic estimate: 11.968780936383162. +Histogram explain: { + "nodeType" : "Root", + "logicalCE" : 89.72910201968833, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 89.72910201968833, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 89.72910201968833 + } + } +} + +Heuristic explain: { + "nodeType" : "Root", + "logicalCE" : 11.968780936383162, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 11.968780936383162, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 11.968780936383162 + } + } +} + + +[jsTest] ---- +[jsTest] Query: { "$or" : [ { "$and" : [ { "lastPizzaShopVisited" : "Zizzi" }, { "name" : { "$lte" : "John Watson" } } ] }, { "$and" : [ { "lastPizzaShopVisited" : "Zizzi" }, { "name" : { "$gte" : "Kate Knight" } } ] } ] } returned 56 documents. +[jsTest] ---- + +Histogram estimate: 18.45591859401975. +Heuristic estimate: 14.800368851048507. +Histogram explain: { + "nodeType" : "Root", + "logicalCE" : 18.45591859401975, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 18.45591859401975, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 18.45591859401975 + } + } +} + +Heuristic explain: { + "nodeType" : "Root", + "logicalCE" : 14.800368851048507, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 14.800368851048507, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 14.800368851048507 + } + } +} + + +[jsTest] ---- +[jsTest] Query: { "$or" : [ { "$and" : [ { "lastPizzaShopVisited" : "Zizzi" }, { "name" : { "$lte" : "John Watson" } } ] }, { "favPizzaToppings" : "mushrooms" } ] } returned 126 documents. +[jsTest] ---- + +Histogram estimate: 124.07968100899764. +Heuristic estimate: 22.04774379816937. +Histogram explain: { + "nodeType" : "Root", + "logicalCE" : 124.07968100899764, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 124.07968100899764, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 124.07968100899764 + } + } +} + +Heuristic explain: { + "nodeType" : "Root", + "logicalCE" : 22.04774379816937, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 22.04774379816937, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 22.04774379816937 + } + } +} + + +[jsTest] ---- +[jsTest] Query: { "$or" : [ { "$and" : [ { "favPizzaToppings" : "mushrooms" }, { "name" : { "$lte" : "John Watson" } } ] }, { "lastPizzaShopVisited" : "Zizzi" } ] } returned 101 documents. +[jsTest] ---- + +Histogram estimate: 99.20855534651972. +Heuristic estimate: 22.04774379816937. +Histogram explain: { + "nodeType" : "Root", + "logicalCE" : 99.20855534651972, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 99.20855534651972, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 99.20855534651972 + } + } +} + +Heuristic explain: { + "nodeType" : "Root", + "logicalCE" : 22.04774379816937, + "child" : { + "nodeType" : "Filter", + "logicalCE" : 22.04774379816937, + "child" : { + "nodeType" : "PhysicalScan", + "logicalCE" : 22.04774379816937 + } + } +} + + [jsTest] ---- [jsTest] Settings after: { "internalQueryFrameworkControl" : "forceBonsai", "ok" : 1 } [jsTest] ---- diff --git a/jstests/query_golden/expected_output/ce_sampled_histogram b/jstests/query_golden/expected_output/ce_sampled_histogram index 23007b8cd7ef0..0e07244e855cb 100644 --- a/jstests/query_golden/expected_output/ce_sampled_histogram +++ b/jstests/query_golden/expected_output/ce_sampled_histogram @@ -1,5 +1,6 @@ setting random seed: 6345 + [jsTest] ---- [jsTest] Settings before: internalQueryCardinalityEstimatorMode: sampling, internalQueryFrameworkControl: forceBonsai [jsTest] ---- @@ -32,27 +33,29 @@ Loading chunk file: jstests/query_golden/libs/data/ce_data_500_5 } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$gte" : 122, "$lte" : 381 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 159.69375639952514, + "logicalCE" : 159.6937563995251, "child" : { "nodeType" : "Filter", - "logicalCE" : 159.69375639952514, + "logicalCE" : 159.6937563995251, "child" : { "nodeType" : "Filter", - "logicalCE" : 159.69375639952514, + "logicalCE" : 159.6937563995251, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 159.69375639952514 + "logicalCE" : 159.6937563995251 } } } } + [jsTest] ---- [jsTest] CE: { "uniform_int_0-1000-1" : { "$gte" : 122, "$lte" : 381 } }, base = 183.77, sample = 159.69, actual = 134 [jsTest] ---- @@ -60,40 +63,43 @@ Loading chunk file: jstests/query_golden/libs/data/ce_data_500_5 Base error: { "qError" : 1.37, "relError" : 0.37, "selError" : 9.95 } Sample error: { "qError" : 1.19, "relError" : 0.19, "selError" : 5.14 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$lt" : 122 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 60.45571245186137, + "logicalCE" : 60.45571245186136, "child" : { "nodeType" : "Filter", - "logicalCE" : 60.45571245186137, + "logicalCE" : 60.45571245186136, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 60.45571245186137 + "logicalCE" : 60.45571245186136 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$lt" : 122 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 56.52173913043479, + "logicalCE" : 56.52173913043478, "child" : { "nodeType" : "Filter", - "logicalCE" : 56.52173913043479, + "logicalCE" : 56.52173913043478, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 56.52173913043479 + "logicalCE" : 56.52173913043478 } } } + [jsTest] ---- [jsTest] CE: { "uniform_int_0-1000-1" : { "$lt" : 122 } }, base = 60.46, sample = 56.52, actual = 60 [jsTest] ---- @@ -101,23 +107,25 @@ Sample error: { "qError" : 1.19, "relError" : 0.19, "selError" : 5.14 } Base error: { "qError" : 1.01, "relError" : 0.01, "selError" : 0.09 } Sample error: { "qError" : 1.06, "relError" : -0.06, "selError" : -0.7 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$eq" : 122 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 1.1052631578947203, + "logicalCE" : 1.105263157894737, "child" : { "nodeType" : "Filter", - "logicalCE" : 1.1052631578947203, + "logicalCE" : 1.105263157894737, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 1.1052631578947203 + "logicalCE" : 1.105263157894737 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$eq" : 122 } } [jsTest] ---- @@ -135,6 +143,7 @@ Sample error: { "qError" : 1.06, "relError" : -0.06, "selError" : -0.7 } } } + [jsTest] ---- [jsTest] CE: { "uniform_int_0-1000-1" : { "$eq" : 122 } }, base = 1.11, sample = 0, actual = 1 [jsTest] ---- @@ -142,6 +151,7 @@ Sample error: { "qError" : 1.06, "relError" : -0.06, "selError" : -0.7 } Base error: { "qError" : 1.11, "relError" : 0.11, "selError" : 0.02 } Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$gte" : 381, "$lte" : 948 } } [jsTest] ---- @@ -163,6 +173,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$gte" : 381, "$lte" : 948 } } [jsTest] ---- @@ -184,6 +195,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } } } + [jsTest] ---- [jsTest] CE: { "uniform_int_0-1000-1" : { "$gte" : 381, "$lte" : 948 } }, base = 295.64, sample = 315.74, actual = 277 [jsTest] ---- @@ -191,40 +203,43 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } Base error: { "qError" : 1.07, "relError" : 0.07, "selError" : 3.73 } Sample error: { "qError" : 1.14, "relError" : 0.14, "selError" : 7.75 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$lt" : 381 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 194.74074074074076, + "logicalCE" : 194.74074074074073, "child" : { "nodeType" : "Filter", - "logicalCE" : 194.74074074074076, + "logicalCE" : 194.74074074074073, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 194.74074074074076 + "logicalCE" : 194.74074074074073 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$lt" : 381 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 169.56521739130437, + "logicalCE" : 169.56521739130434, "child" : { "nodeType" : "Filter", - "logicalCE" : 169.56521739130437, + "logicalCE" : 169.56521739130434, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 169.56521739130437 + "logicalCE" : 169.56521739130434 } } } + [jsTest] ---- [jsTest] CE: { "uniform_int_0-1000-1" : { "$lt" : 381 } }, base = 194.74, sample = 169.57, actual = 193 [jsTest] ---- @@ -232,23 +247,25 @@ Sample error: { "qError" : 1.14, "relError" : 0.14, "selError" : 7.75 } Base error: { "qError" : 1.01, "relError" : 0.01, "selError" : 0.35 } Sample error: { "qError" : 1.14, "relError" : -0.12, "selError" : -4.69 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$eq" : 381 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 1.2592592592592378, + "logicalCE" : 1.2592592592592593, "child" : { "nodeType" : "Filter", - "logicalCE" : 1.2592592592592378, + "logicalCE" : 1.2592592592592593, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 1.2592592592592378 + "logicalCE" : 1.2592592592592593 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "uniform_int_0-1000-1" : { "$eq" : 381 } } [jsTest] ---- @@ -266,6 +283,7 @@ Sample error: { "qError" : 1.14, "relError" : -0.12, "selError" : -4.69 } } } + [jsTest] ---- [jsTest] CE: { "uniform_int_0-1000-1" : { "$eq" : 381 } }, base = 1.26, sample = 0, actual = 1 [jsTest] ---- @@ -273,6 +291,7 @@ Sample error: { "qError" : 1.14, "relError" : -0.12, "selError" : -4.69 } Base error: { "qError" : 1.26, "relError" : 0.26, "selError" : 0.05 } Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$gte" : 469, "$lte" : 613 } } [jsTest] ---- @@ -294,6 +313,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$gte" : 469, "$lte" : 613 } } [jsTest] ---- @@ -315,6 +335,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } } } + [jsTest] ---- [jsTest] CE: { "normal_int_0-1000-1" : { "$gte" : 469, "$lte" : 613 } }, base = 259.28, sample = 240.97, actual = 173 [jsTest] ---- @@ -322,6 +343,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } Base error: { "qError" : 1.5, "relError" : 0.5, "selError" : 17.26 } Sample error: { "qError" : 1.39, "relError" : 0.39, "selError" : 13.59 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$lt" : 469 } } [jsTest] ---- @@ -339,23 +361,25 @@ Sample error: { "qError" : 1.39, "relError" : 0.39, "selError" : 13.59 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$lt" : 469 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 226.08695652173915, + "logicalCE" : 226.08695652173913, "child" : { "nodeType" : "Filter", - "logicalCE" : 226.08695652173915, + "logicalCE" : 226.08695652173913, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 226.08695652173915 + "logicalCE" : 226.08695652173913 } } } + [jsTest] ---- [jsTest] CE: { "normal_int_0-1000-1" : { "$lt" : 469 } }, base = 199, sample = 226.09, actual = 199 [jsTest] ---- @@ -363,40 +387,43 @@ Sample error: { "qError" : 1.39, "relError" : 0.39, "selError" : 13.59 } Base error: { "qError" : 1, "relError" : 0, "selError" : 0 } Sample error: { "qError" : 1.14, "relError" : 0.14, "selError" : 5.42 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$eq" : 469 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 3.0000000000000027, + "logicalCE" : 3, "child" : { "nodeType" : "Filter", - "logicalCE" : 3.0000000000000027, + "logicalCE" : 3, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 3.0000000000000027 + "logicalCE" : 3 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$eq" : 469 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 4.347826086956497, + "logicalCE" : 4.3478260869565215, "child" : { "nodeType" : "Filter", - "logicalCE" : 4.347826086956497, + "logicalCE" : 4.3478260869565215, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 4.347826086956497 + "logicalCE" : 4.3478260869565215 } } } + [jsTest] ---- [jsTest] CE: { "normal_int_0-1000-1" : { "$eq" : 469 } }, base = 3, sample = 4.35, actual = 3 [jsTest] ---- @@ -404,6 +431,7 @@ Sample error: { "qError" : 1.14, "relError" : 0.14, "selError" : 5.42 } Base error: { "qError" : 1, "relError" : 0, "selError" : 0 } Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$gte" : 449, "$lte" : 469 } } [jsTest] ---- @@ -425,6 +453,7 @@ Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$gte" : 449, "$lte" : 469 } } [jsTest] ---- @@ -446,6 +475,7 @@ Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 } } } + [jsTest] ---- [jsTest] CE: { "normal_int_0-1000-1" : { "$gte" : 449, "$lte" : 469 } }, base = 162.36, sample = 181.06, actual = 25 [jsTest] ---- @@ -453,6 +483,7 @@ Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 } Base error: { "qError" : 6.49, "relError" : 5.49, "selError" : 27.47 } Sample error: { "qError" : 7.24, "relError" : 6.24, "selError" : 31.21 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$lt" : 449 } } [jsTest] ---- @@ -470,6 +501,7 @@ Sample error: { "qError" : 7.24, "relError" : 6.24, "selError" : 31.21 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$lt" : 449 } } [jsTest] ---- @@ -487,6 +519,7 @@ Sample error: { "qError" : 7.24, "relError" : 6.24, "selError" : 31.21 } } } + [jsTest] ---- [jsTest] CE: { "normal_int_0-1000-1" : { "$lt" : 449 } }, base = 177, sample = 191.3, actual = 177 [jsTest] ---- @@ -494,40 +527,43 @@ Sample error: { "qError" : 7.24, "relError" : 6.24, "selError" : 31.21 } Base error: { "qError" : 1, "relError" : 0, "selError" : 0 } Sample error: { "qError" : 1.08, "relError" : 0.08, "selError" : 2.86 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$eq" : 449 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 3.0000000000000027, + "logicalCE" : 3, "child" : { "nodeType" : "Filter", - "logicalCE" : 3.0000000000000027, + "logicalCE" : 3, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 3.0000000000000027 + "logicalCE" : 3 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "normal_int_0-1000-1" : { "$eq" : 449 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 4.347826086956497, + "logicalCE" : 4.3478260869565215, "child" : { "nodeType" : "Filter", - "logicalCE" : 4.347826086956497, + "logicalCE" : 4.3478260869565215, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 4.347826086956497 + "logicalCE" : 4.3478260869565215 } } } + [jsTest] ---- [jsTest] CE: { "normal_int_0-1000-1" : { "$eq" : 449 } }, base = 3, sample = 4.35, actual = 3 [jsTest] ---- @@ -535,27 +571,29 @@ Sample error: { "qError" : 1.08, "relError" : 0.08, "selError" : 2.86 } Base error: { "qError" : 1, "relError" : 0, "selError" : 0 } Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$gte" : 408, "$lte" : 438 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 137.8193600333422, + "logicalCE" : 137.81936003334218, "child" : { "nodeType" : "Filter", - "logicalCE" : 137.8193600333422, + "logicalCE" : 137.81936003334218, "child" : { "nodeType" : "Filter", - "logicalCE" : 137.8193600333422, + "logicalCE" : 137.81936003334218, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 137.8193600333422 + "logicalCE" : 137.81936003334218 } } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$gte" : 408, "$lte" : 438 } } [jsTest] ---- @@ -577,6 +615,7 @@ Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 } } } + [jsTest] ---- [jsTest] CE: { "chi2_int_0-1000-1" : { "$gte" : 408, "$lte" : 438 } }, base = 137.82, sample = 142.59, actual = 102 [jsTest] ---- @@ -584,6 +623,7 @@ Sample error: { "qError" : 1.45, "relError" : 0.45, "selError" : 0.27 } Base error: { "qError" : 1.35, "relError" : 0.35, "selError" : 7.16 } Sample error: { "qError" : 1.4, "relError" : 0.4, "selError" : 8.12 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$lt" : 408 } } [jsTest] ---- @@ -601,6 +641,7 @@ Sample error: { "qError" : 1.4, "relError" : 0.4, "selError" : 8.12 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$lt" : 408 } } [jsTest] ---- @@ -618,6 +659,7 @@ Sample error: { "qError" : 1.4, "relError" : 0.4, "selError" : 8.12 } } } + [jsTest] ---- [jsTest] CE: { "chi2_int_0-1000-1" : { "$lt" : 408 } }, base = 356, sample = 352.17, actual = 356 [jsTest] ---- @@ -625,23 +667,25 @@ Sample error: { "qError" : 1.4, "relError" : 0.4, "selError" : 8.12 } Base error: { "qError" : 1, "relError" : 0, "selError" : 0 } Sample error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.77 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$eq" : 408 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 4.0000000000000036, + "logicalCE" : 4, "child" : { "nodeType" : "Filter", - "logicalCE" : 4.0000000000000036, + "logicalCE" : 4, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 4.0000000000000036 + "logicalCE" : 4 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$eq" : 408 } } [jsTest] ---- @@ -659,6 +703,7 @@ Sample error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.77 } } } + [jsTest] ---- [jsTest] CE: { "chi2_int_0-1000-1" : { "$eq" : 408 } }, base = 4, sample = 0, actual = 4 [jsTest] ---- @@ -666,48 +711,51 @@ Sample error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.77 } Base error: { "qError" : 1, "relError" : 0, "selError" : 0 } Sample error: { "qError" : 4, "relError" : -1, "selError" : -0.8 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$gte" : 408, "$lte" : 437 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 137.21620895506481, + "logicalCE" : 137.2162089550648, "child" : { "nodeType" : "Filter", - "logicalCE" : 137.21620895506481, + "logicalCE" : 137.2162089550648, "child" : { "nodeType" : "Filter", - "logicalCE" : 137.21620895506481, + "logicalCE" : 137.2162089550648, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 137.21620895506481 + "logicalCE" : 137.2162089550648 } } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$gte" : 408, "$lte" : 437 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 141.25271704696053, + "logicalCE" : 141.2527170469605, "child" : { "nodeType" : "Filter", - "logicalCE" : 141.25271704696053, + "logicalCE" : 141.2527170469605, "child" : { "nodeType" : "Filter", - "logicalCE" : 141.25271704696053, + "logicalCE" : 141.2527170469605, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 141.25271704696053 + "logicalCE" : 141.2527170469605 } } } } + [jsTest] ---- [jsTest] CE: { "chi2_int_0-1000-1" : { "$gte" : 408, "$lte" : 437 } }, base = 137.22, sample = 141.25, actual = 98 [jsTest] ---- @@ -715,6 +763,7 @@ Sample error: { "qError" : 4, "relError" : -1, "selError" : -0.8 } Base error: { "qError" : 1.4, "relError" : 0.4, "selError" : 7.84 } Sample error: { "qError" : 1.44, "relError" : 0.44, "selError" : 8.65 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$lt" : 408 } } [jsTest] ---- @@ -732,6 +781,7 @@ Sample error: { "qError" : 1.44, "relError" : 0.44, "selError" : 8.65 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$lt" : 408 } } [jsTest] ---- @@ -749,6 +799,7 @@ Sample error: { "qError" : 1.44, "relError" : 0.44, "selError" : 8.65 } } } + [jsTest] ---- [jsTest] CE: { "chi2_int_0-1000-1" : { "$lt" : 408 } }, base = 356, sample = 352.17, actual = 356 [jsTest] ---- @@ -756,23 +807,25 @@ Sample error: { "qError" : 1.44, "relError" : 0.44, "selError" : 8.65 } Base error: { "qError" : 1, "relError" : 0, "selError" : 0 } Sample error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.77 } + [jsTest] ---- [jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$eq" : 408 } } [jsTest] ---- { "nodeType" : "Root", - "logicalCE" : 4.0000000000000036, + "logicalCE" : 4, "child" : { "nodeType" : "Filter", - "logicalCE" : 4.0000000000000036, + "logicalCE" : 4, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 4.0000000000000036 + "logicalCE" : 4 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { "chi2_int_0-1000-1" : { "$eq" : 408 } } [jsTest] ---- @@ -790,6 +843,7 @@ Sample error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.77 } } } + [jsTest] ---- [jsTest] CE: { "chi2_int_0-1000-1" : { "$eq" : 408 } }, base = 4, sample = 0, actual = 4 [jsTest] ---- @@ -797,6 +851,7 @@ Sample error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.77 } Base error: { "qError" : 1, "relError" : 0, "selError" : 0 } Sample error: { "qError" : 4, "relError" : -1, "selError" : -0.8 } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -808,21 +863,22 @@ Sample error: { "qError" : 4, "relError" : -1, "selError" : -0.8 } { "nodeType" : "Root", - "logicalCE" : 196.05850926700424, + "logicalCE" : 196.05850926700427, "child" : { "nodeType" : "Filter", - "logicalCE" : 196.05850926700424, + "logicalCE" : 196.05850926700427, "child" : { "nodeType" : "Filter", - "logicalCE" : 196.05850926700424, + "logicalCE" : 196.05850926700427, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 196.05850926700424 + "logicalCE" : 196.05850926700427 } } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -849,6 +905,7 @@ Sample error: { "qError" : 4, "relError" : -1, "selError" : -0.8 } } } + [jsTest] ---- [jsTest] CE: { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -861,6 +918,7 @@ Sample error: { "qError" : 4, "relError" : -1, "selError" : -0.8 } Base error: { "qError" : 1.57, "relError" : 0.57, "selError" : 14.21 } Sample error: { "qError" : 1.35, "relError" : 0.35, "selError" : 8.63 } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -871,17 +929,18 @@ Sample error: { "qError" : 1.35, "relError" : 0.35, "selError" : 8.63 } { "nodeType" : "Root", - "logicalCE" : 89.84799999999998, + "logicalCE" : 89.848, "child" : { "nodeType" : "Filter", - "logicalCE" : 89.84799999999998, + "logicalCE" : 89.848, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 89.84799999999998 + "logicalCE" : 89.848 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -903,6 +962,7 @@ Sample error: { "qError" : 1.35, "relError" : 0.35, "selError" : 8.63 } } } + [jsTest] ---- [jsTest] CE: { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -914,6 +974,7 @@ Sample error: { "qError" : 1.35, "relError" : 0.35, "selError" : 8.63 } Base error: { "qError" : 1.02, "relError" : -0.02, "selError" : -0.43 } Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -924,17 +985,18 @@ Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 } { "nodeType" : "Root", - "logicalCE" : 1.1999999999999789, + "logicalCE" : 1.2, "child" : { "nodeType" : "Filter", - "logicalCE" : 1.1999999999999789, + "logicalCE" : 1.2, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 1.1999999999999789 + "logicalCE" : 1.2 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -956,6 +1018,7 @@ Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 } } } + [jsTest] ---- [jsTest] CE: { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -967,6 +1030,7 @@ Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 } Base error: { "qError" : 1.2, "relError" : 0.2, "selError" : 0.04 } Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -978,21 +1042,22 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } { "nodeType" : "Root", - "logicalCE" : 141.03563731142566, + "logicalCE" : 141.03563731142563, "child" : { "nodeType" : "Filter", - "logicalCE" : 141.03563731142566, + "logicalCE" : 141.03563731142563, "child" : { "nodeType" : "Filter", - "logicalCE" : 141.03563731142566, + "logicalCE" : 141.03563731142563, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 141.03563731142566 + "logicalCE" : 141.03563731142563 } } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -1019,6 +1084,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } } } + [jsTest] ---- [jsTest] CE: { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -1031,6 +1097,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } Base error: { "qError" : 2.17, "relError" : 1.17, "selError" : 15.21 } Sample error: { "qError" : 2.29, "relError" : 1.29, "selError" : 16.72 } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -1041,17 +1108,18 @@ Sample error: { "qError" : 2.29, "relError" : 1.29, "selError" : 16.72 } { "nodeType" : "Root", - "logicalCE" : 89.84799999999998, + "logicalCE" : 89.848, "child" : { "nodeType" : "Filter", - "logicalCE" : 89.84799999999998, + "logicalCE" : 89.848, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 89.84799999999998 + "logicalCE" : 89.848 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -1073,6 +1141,7 @@ Sample error: { "qError" : 2.29, "relError" : 1.29, "selError" : 16.72 } } } + [jsTest] ---- [jsTest] CE: { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -1084,6 +1153,7 @@ Sample error: { "qError" : 2.29, "relError" : 1.29, "selError" : 16.72 } Base error: { "qError" : 1.02, "relError" : -0.02, "selError" : -0.43 } Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -1094,17 +1164,18 @@ Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 } { "nodeType" : "Root", - "logicalCE" : 1.1999999999999789, + "logicalCE" : 1.2, "child" : { "nodeType" : "Filter", - "logicalCE" : 1.1999999999999789, + "logicalCE" : 1.2, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 1.1999999999999789 + "logicalCE" : 1.2 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -1126,6 +1197,7 @@ Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 } } } + [jsTest] ---- [jsTest] CE: { [jsTest] "mixdist_uniform_int_0-1000-1_uniform_int_7000-8000-1_normal_int_0-10000-10_" : { @@ -1137,6 +1209,7 @@ Sample error: { "qError" : 1.04, "relError" : 0.04, "selError" : 0.73 } Base error: { "qError" : 1.2, "relError" : 0.2, "selError" : 0.04 } Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1163,6 +1236,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1189,6 +1263,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } } } + [jsTest] ---- [jsTest] CE: { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1201,6 +1276,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } Base error: { "qError" : 1.48, "relError" : 0.48, "selError" : 21.38 } Sample error: { "qError" : 1.33, "relError" : 0.33, "selError" : 14.59 } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1211,17 +1287,18 @@ Sample error: { "qError" : 1.33, "relError" : 0.33, "selError" : 14.59 } { "nodeType" : "Root", - "logicalCE" : 122.37898193760266, + "logicalCE" : 122.37898193760263, "child" : { "nodeType" : "Filter", - "logicalCE" : 122.37898193760266, + "logicalCE" : 122.37898193760263, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 122.37898193760266 + "logicalCE" : 122.37898193760263 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1232,17 +1309,18 @@ Sample error: { "qError" : 1.33, "relError" : 0.33, "selError" : 14.59 } { "nodeType" : "Root", - "logicalCE" : 169.56521739130437, + "logicalCE" : 169.56521739130434, "child" : { "nodeType" : "Filter", - "logicalCE" : 169.56521739130437, + "logicalCE" : 169.56521739130434, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 169.56521739130437 + "logicalCE" : 169.56521739130434 } } } + [jsTest] ---- [jsTest] CE: { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1254,6 +1332,7 @@ Sample error: { "qError" : 1.33, "relError" : 0.33, "selError" : 14.59 } Base error: { "qError" : 1.31, "relError" : -0.24, "selError" : -7.52 } Sample error: { "qError" : 1.06, "relError" : 0.06, "selError" : 1.91 } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1264,17 +1343,18 @@ Sample error: { "qError" : 1.06, "relError" : 0.06, "selError" : 1.91 } { "nodeType" : "Root", - "logicalCE" : 1.1448275862068757, + "logicalCE" : 1.1448275862068966, "child" : { "nodeType" : "Filter", - "logicalCE" : 1.1448275862068757, + "logicalCE" : 1.1448275862068966, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 1.1448275862068757 + "logicalCE" : 1.1448275862068966 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1296,6 +1376,7 @@ Sample error: { "qError" : 1.06, "relError" : 0.06, "selError" : 1.91 } } } + [jsTest] ---- [jsTest] CE: { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1307,6 +1388,7 @@ Sample error: { "qError" : 1.06, "relError" : 0.06, "selError" : 1.91 } Base error: { "qError" : 1.14, "relError" : 0.14, "selError" : 0.03 } Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1333,6 +1415,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1344,21 +1427,22 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } { "nodeType" : "Root", - "logicalCE" : 156.5311906409129, + "logicalCE" : 156.53119064091288, "child" : { "nodeType" : "Filter", - "logicalCE" : 156.5311906409129, + "logicalCE" : 156.53119064091288, "child" : { "nodeType" : "Filter", - "logicalCE" : 156.5311906409129, + "logicalCE" : 156.53119064091288, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 156.5311906409129 + "logicalCE" : 156.53119064091288 } } } } + [jsTest] ---- [jsTest] CE: { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1371,6 +1455,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } Base error: { "qError" : 1.3, "relError" : 0.29, "selError" : 5.19 } Sample error: { "qError" : 1.78, "relError" : 0.78, "selError" : 13.71 } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1381,17 +1466,18 @@ Sample error: { "qError" : 1.78, "relError" : 0.78, "selError" : 13.71 } { "nodeType" : "Root", - "logicalCE" : 72.42088669950742, + "logicalCE" : 72.42088669950739, "child" : { "nodeType" : "Filter", - "logicalCE" : 72.42088669950742, + "logicalCE" : 72.42088669950739, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 72.42088669950742 + "logicalCE" : 72.42088669950739 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1402,17 +1488,18 @@ Sample error: { "qError" : 1.78, "relError" : 0.78, "selError" : 13.71 } { "nodeType" : "Root", - "logicalCE" : 73.91304347826089, + "logicalCE" : 73.91304347826087, "child" : { "nodeType" : "Filter", - "logicalCE" : 73.91304347826089, + "logicalCE" : 73.91304347826087, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 73.91304347826089 + "logicalCE" : 73.91304347826087 } } } + [jsTest] ---- [jsTest] CE: { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1424,6 +1511,7 @@ Sample error: { "qError" : 1.78, "relError" : 0.78, "selError" : 13.71 } Base error: { "qError" : 1.01, "relError" : -0.01, "selError" : -0.12 } Sample error: { "qError" : 1.01, "relError" : 0.01, "selError" : 0.18 } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1434,17 +1522,18 @@ Sample error: { "qError" : 1.01, "relError" : 0.01, "selError" : 0.18 } { "nodeType" : "Root", - "logicalCE" : 1.1448275862068757, + "logicalCE" : 1.1448275862068966, "child" : { "nodeType" : "Filter", - "logicalCE" : 1.1448275862068757, + "logicalCE" : 1.1448275862068966, "child" : { "nodeType" : "PhysicalScan", - "logicalCE" : 1.1448275862068757 + "logicalCE" : 1.1448275862068966 } } } + [jsTest] ---- [jsTest] Query: ce_data_500 { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1466,6 +1555,7 @@ Sample error: { "qError" : 1.01, "relError" : 0.01, "selError" : 0.18 } } } + [jsTest] ---- [jsTest] CE: { [jsTest] "mixdist_normal_int_0-1000-1_normal_int_0-10000-10_normal_int_0-100000-100_" : { @@ -1477,6 +1567,7 @@ Sample error: { "qError" : 1.01, "relError" : 0.01, "selError" : 0.18 } Base error: { "qError" : 1.14, "relError" : 0.14, "selError" : 0.03 } Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } + [jsTest] ---- [jsTest] Average errors (30 queries): [jsTest] ---- @@ -1484,6 +1575,7 @@ Sample error: { "qError" : 1, "relError" : -1, "selError" : -0.2 } Average base error: { "absError" : NaN, "relError" : 0.35, "selError" : 4.05 } Average sample error: { "absError" : NaN, "relError" : 0.12, "selError" : 4.36 } + [jsTest] ---- [jsTest] Settings after: { "internalQueryFrameworkControl" : "forceBonsai", "ok" : 1 } [jsTest] ---- diff --git a/jstests/query_golden/expected_output/eq b/jstests/query_golden/expected_output/eq index 60f017eb31c0b..b8be01b6ff0c1 100644 --- a/jstests/query_golden/expected_output/eq +++ b/jstests/query_golden/expected_output/eq @@ -225,6 +225,7 @@ { "a" : { "$minKey" : 1 } } Collection count: 213 + [jsTest] ---- [jsTest] Query: { "find" : "query_golden_eq", "filter" : { "a" : { "$eq" : { "$minKey" : 1 } } }, "projection" : { "_id" : 0 } } [jsTest] ---- diff --git a/jstests/query_golden/expected_output/exclusion_projection b/jstests/query_golden/expected_output/exclusion_projection index 25fff0934575f..ecaaf5c13aee5 100644 --- a/jstests/query_golden/expected_output/exclusion_projection +++ b/jstests/query_golden/expected_output/exclusion_projection @@ -59,6 +59,7 @@ { "_id" : 9, "a" : { "d" : 1 } } Collection count: 53 + [jsTest] ---- [jsTest] Query: [ { "$project" : { "a" : 0 } } ] [jsTest] ---- @@ -603,6 +604,7 @@ Collection count: 53 { "_id" : { "x" : 1 }, "y" : 2 } Collection count: 7 + [jsTest] ---- [jsTest] Query: [ { "$project" : { "_id" : 0 } } ] [jsTest] ---- diff --git a/jstests/query_golden/expected_output/extraneous_project b/jstests/query_golden/expected_output/extraneous_project index bf06bad74e956..971bea9604f32 100644 --- a/jstests/query_golden/expected_output/extraneous_project +++ b/jstests/query_golden/expected_output/extraneous_project @@ -6,7 +6,8 @@ nReturned: 0 -Plan skeleton: { +Plan skeleton: +{ "queryPlanner" : { "winningPlan" : { "optimizerPlan" : { @@ -31,13 +32,15 @@ Plan skeleton: { } } + [jsTest] ---- [jsTest] Query: [ { "$match" : { "username" : "/^user8/" } }, { "$group" : { "_id" : 1, "count" : { "$sum" : 1 } } } ] [jsTest] ---- nReturned: 0 -Plan skeleton: { +Plan skeleton: +{ "queryPlanner" : { "winningPlan" : { "optimizerPlan" : { @@ -60,4 +63,4 @@ Plan skeleton: { } } } -} \ No newline at end of file +} diff --git a/jstests/query_golden/expected_output/inclusion_projection b/jstests/query_golden/expected_output/inclusion_projection index 81e63e80ac1f3..18e049074041d 100644 --- a/jstests/query_golden/expected_output/inclusion_projection +++ b/jstests/query_golden/expected_output/inclusion_projection @@ -59,6 +59,7 @@ { "_id" : 9, "a" : { "d" : 1 } } Collection count: 53 + [jsTest] ---- [jsTest] Creating indexes: [jsTest] ---- @@ -791,6 +792,7 @@ Collection count: 53 { "_id" : { "x" : 1 }, "y" : 2 } Collection count: 7 + [jsTest] ---- [jsTest] Creating indexes: [jsTest] ---- diff --git a/jstests/query_golden/expected_output/match_with_and_or b/jstests/query_golden/expected_output/match_with_and_or index 4cf3b70cc0856..a9e8088fa6083 100644 --- a/jstests/query_golden/expected_output/match_with_and_or +++ b/jstests/query_golden/expected_output/match_with_and_or @@ -23,6 +23,7 @@ { "_id" : 9, "a" : [ 1, 2, { "b" : 1 }, { "b" : 2 } ], "x" : 1 } Collection count: 17 + [jsTest] ---- [jsTest] Creating indexes: [jsTest] ---- diff --git a/jstests/query_golden/expected_output/match_with_exists b/jstests/query_golden/expected_output/match_with_exists index d2ed49307fb03..1658eba131092 100644 --- a/jstests/query_golden/expected_output/match_with_exists +++ b/jstests/query_golden/expected_output/match_with_exists @@ -13,6 +13,7 @@ { "_id" : 6, "a" : [ { "b" : 4 } ] } Collection count: 7 + [jsTest] ---- [jsTest] Query: [ { "$match" : { "a" : { "$exists" : true } } } ] [jsTest] ---- @@ -76,6 +77,7 @@ Collection count: 7 { "_id" : 1, "a" : [ ] } Collection count: 1 + [jsTest] ---- [jsTest] Query: [ { "$match" : { "a" : { "$exists" : true } } } ] [jsTest] ---- @@ -97,6 +99,7 @@ Collection count: 1 { "_id" : 1, "a" : false } Collection count: 1 + [jsTest] ---- [jsTest] Query: [ { "$match" : { "a" : { "$exists" : true } } } ] [jsTest] ---- @@ -118,6 +121,7 @@ Collection count: 1 { "_id" : 1, "a" : [ { "b" : 2 }, { "a" : 1 } ] } Collection count: 1 + [jsTest] ---- [jsTest] Query: [ { "$match" : { "a.a" : { "$exists" : true } } } ] [jsTest] ---- @@ -146,6 +150,7 @@ Collection count: 1 { "_id" : 1, "a" : [ [ { "b" : 1 } ] ] } Collection count: 1 + [jsTest] ---- [jsTest] Query: [ { "$match" : { "a.b" : { "$exists" : false } } } ] [jsTest] ---- @@ -168,6 +173,7 @@ Collection count: 1 { "_id" : 2, "a" : [ 2 ] } Collection count: 2 + [jsTest] ---- [jsTest] Query: [ { "$match" : { "a" : { "$elemMatch" : { "$exists" : true } } } } ] [jsTest] ---- diff --git a/jstests/query_golden/expected_output/match_with_in b/jstests/query_golden/expected_output/match_with_in index 82233b6f8e81d..8e10f48d3ad1c 100644 --- a/jstests/query_golden/expected_output/match_with_in +++ b/jstests/query_golden/expected_output/match_with_in @@ -20,6 +20,7 @@ { "_id" : 9, "a" : { "c" : 1 } } Collection count: 14 + [jsTest] ---- [jsTest] Query: [ { "$match" : { "a" : { "$in" : [ null ] } } } ] [jsTest] ---- diff --git a/jstests/query_golden/expected_output/non_multikey_paths b/jstests/query_golden/expected_output/non_multikey_paths index 2d6fa83adb830..a4c99aea847bb 100644 --- a/jstests/query_golden/expected_output/non_multikey_paths +++ b/jstests/query_golden/expected_output/non_multikey_paths @@ -4,48 +4,57 @@ [jsTest] Query: [ { "$match" : { "one.one.one.one" : 2 } } ] [jsTest] ---- -Leaf stage: { +Leaf stage: +{ "nodeType" : "IndexScan", "indexDefName" : "one.one.one.one_1", "interval" : "[ 2, 2 ]" } + [jsTest] ---- [jsTest] Query: [ { "$match" : { "one.one.one.many" : 2 } } ] [jsTest] ---- -Leaf stage: { +Leaf stage: +{ "nodeType" : "IndexScan", "indexDefName" : "one.one.one.many_1", "interval" : "[ 2, 2 ]" } + [jsTest] ---- [jsTest] Query: [ { "$match" : { "many.one.one.one" : 2 } } ] [jsTest] ---- -Leaf stage: { +Leaf stage: +{ "nodeType" : "IndexScan", "indexDefName" : "many.one.one.one_1", "interval" : "[ 2, 2 ]" } + [jsTest] ---- [jsTest] Query: [ { "$match" : { "many.one.one.many" : 2 } } ] [jsTest] ---- -Leaf stage: { +Leaf stage: +{ "nodeType" : "IndexScan", "indexDefName" : "many.one.one.many_1", "interval" : "[ 2, 2 ]" } + [jsTest] ---- [jsTest] Query: [ { "$match" : { "many.many.many.many" : 2 } } ] [jsTest] ---- -Leaf stage: { +Leaf stage: +{ "nodeType" : "IndexScan", "indexDefName" : "many.many.many.many_1", "interval" : "[ 2, 2 ]" -} \ No newline at end of file +} diff --git a/jstests/query_golden/expected_output/not_pushdown b/jstests/query_golden/expected_output/not_pushdown index 10b9235860c40..bf222aeec88f4 100644 --- a/jstests/query_golden/expected_output/not_pushdown +++ b/jstests/query_golden/expected_output/not_pushdown @@ -5,46 +5,59 @@ [jsTest] note: Should be optimized to Neq [jsTest] ---- -Operators used: [ "Neq" ] +Operators used: +[ "Neq" ] + [jsTest] ---- [jsTest] Query: [ { "$match" : { "one.one.one.many" : { "$ne" : 7 } } } ] [jsTest] note: Should stay as Not Traverse Eq [jsTest] ---- -Operators used: [ "Not", "Eq" ] +Operators used: +[ "Not", "Eq" ] + [jsTest] ---- [jsTest] Query: [ { "$match" : { "many.one.one.one" : { "$ne" : 7 } } } ] [jsTest] note: Should stay as Not Traverse Eq [jsTest] ---- -Operators used: [ "Not", "Eq" ] +Operators used: +[ "Not", "Eq" ] + [jsTest] ---- [jsTest] Query: [ { "$match" : { "many.one.one.many" : { "$ne" : 7 } } } ] [jsTest] note: Should stay as Not Traverse Eq [jsTest] ---- -Operators used: [ "Not", "Eq" ] +Operators used: +[ "Not", "Eq" ] + [jsTest] ---- [jsTest] Query: [ { "$match" : { "many.many.many.many" : { "$ne" : 7 } } } ] [jsTest] note: Should stay as Not Traverse Eq [jsTest] ---- -Operators used: [ "Not", "Eq" ] +Operators used: +[ "Not", "Eq" ] + [jsTest] ---- [jsTest] Query: [ { "$match" : { "many" : { "$elemMatch" : { "one.one.one" : { "$ne" : 7 } } } } } ] [jsTest] note: Should be optimized to Neq [jsTest] ---- -Operators used: [ "Neq" ] +Operators used: +[ "Neq" ] + [jsTest] ---- [jsTest] Query: [ { "$match" : { "many.one" : { "$elemMatch" : { "one.one" : { "$ne" : 7 } } } } } ] [jsTest] note: Should be optimized to Neq [jsTest] ---- -Operators used: [ "Neq" ] \ No newline at end of file +Operators used: +[ "Neq" ] diff --git a/jstests/query_golden/expected_output/null_missing b/jstests/query_golden/expected_output/null_missing index 6f0598ae154e5..c6538de172adf 100644 --- a/jstests/query_golden/expected_output/null_missing +++ b/jstests/query_golden/expected_output/null_missing @@ -5,7 +5,8 @@ [jsTest] ---- nReturned: 3 -Plan skeleton: { +Plan skeleton: +{ "queryPlanner" : { "winningPlan" : { "optimizerPlan" : { @@ -21,12 +22,14 @@ Plan skeleton: { } } + [jsTest] ---- [jsTest] Index on { "a.b" : 1 }. Query: [ { "$match" : { "a.b" : null } } ] [jsTest] ---- nReturned: 3 -Plan skeleton: { +Plan skeleton: +{ "queryPlanner" : { "winningPlan" : { "optimizerPlan" : { @@ -46,4 +49,4 @@ Plan skeleton: { } } } -} \ No newline at end of file +} diff --git a/jstests/query_golden/extraneous_project.js b/jstests/query_golden/extraneous_project.js index 69aa4c6048815..1ae3a65ab3e24 100644 --- a/jstests/query_golden/extraneous_project.js +++ b/jstests/query_golden/extraneous_project.js @@ -6,10 +6,8 @@ * requires_cqf, * ] */ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For getPlanSkeleton. +import {show} from "jstests/libs/golden_test.js"; +import {getPlanSkeleton} from "jstests/libs/optimizer_utils.js"; db.setLogLevel(4, "query"); @@ -39,4 +37,3 @@ run([ ]); run([{$match: {username: "/^user8/"}}, {$group: {_id: 1, count: {$sum: 1}}}]); -})(); diff --git a/jstests/query_golden/inclusion_projection.js b/jstests/query_golden/inclusion_projection.js index 295785e82fb6d..abee73f3568de 100644 --- a/jstests/query_golden/inclusion_projection.js +++ b/jstests/query_golden/inclusion_projection.js @@ -3,10 +3,11 @@ * jstests/cqf/projection.js; both tests will exist pending a decision about the future of golden * jstesting for CQF. */ - -(function() { -"use strict"; -load("jstests/query_golden/libs/projection_helpers.js"); +import { + getIdProjectionDocs, + getProjectionDocs, + runProjectionsAgainstColl +} from "jstests/query_golden/libs/projection_helpers.js"; const coll = db.cqf_inclusion_project; @@ -42,4 +43,3 @@ const idInclusionProjSpecs = [ ]; const idIndexes = [{"_id.a": 1}, {"_id.a": 1, "_id.b": 1}, {"_id.a.b": 1}]; runProjectionsAgainstColl(coll, getIdProjectionDocs(), idIndexes, idInclusionProjSpecs); -}()); diff --git a/jstests/query_golden/libs/ce_data.js b/jstests/query_golden/libs/ce_data.js index eb899d047e9b4..c9b262933b697 100644 --- a/jstests/query_golden/libs/ce_data.js +++ b/jstests/query_golden/libs/ce_data.js @@ -1,10 +1,11 @@ // Small data generator for the purpose of developing the test framework. -const alphabet = "abcdefghijklmnopqrstuvwxyz"; -const len = alphabet.length; +export const alphabet = "abcdefghijklmnopqrstuvwxyz"; + +export const len = alphabet.length; // Returns pseudo-random string where the symbols and the length are functions of the parameter n. -function genRandomString(n) { +export function genRandomString(n) { let strLen = n % 4 + 1; let str = ""; let i = 0; @@ -15,11 +16,11 @@ function genRandomString(n) { return str; } -const seedArray = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 19, 20]; -const arrLen = seedArray.length; +export const seedArray = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 18, 19, 20]; +export const arrLen = seedArray.length; // Returns pseudo-random array where the elements and the length are functions of the parameter n. -function genRandomArray(n) { +export function genRandomArray(n) { let aLen = (7 * n) % 5 + 1; let start = (13 * n) % arrLen; return seedArray.slice(start, start + aLen); @@ -29,14 +30,14 @@ function genRandomArray(n) { * Returns documents for cardinality estimation tests. */ -function getCEDocs() { +export function getCEDocs() { return Array.from( {length: 10}, (_, i) => ({_id: i, a: i + 10, b: genRandomString(i), c_int: genRandomArray(i), mixed: i * 11})); } -function getCEDocs1() { +export function getCEDocs1() { return Array.from({length: 10}, (_, i) => ({ _id: i + 10, a: i + 25, diff --git a/jstests/query_golden/libs/compute_errors.js b/jstests/query_golden/libs/compute_errors.js index 7e4bcf966d43b..1c06853eb571a 100644 --- a/jstests/query_golden/libs/compute_errors.js +++ b/jstests/query_golden/libs/compute_errors.js @@ -1,10 +1,12 @@ +import {round2} from "jstests/libs/optimizer_utils.js"; + /** * Compute cardinality estimation errors for a testcase and CE strategy. * Example testcase: * { _id: 2, pipeline: [...], nReturned: 2, "heuristic": 4.47, "histogram": 2, ...} * Returns : {"qError": 2.23, "relError": 1.23, "selError": 12.35} */ -function computeStrategyErrors(testcase, strategy, collSize) { +export function computeStrategyErrors(testcase, strategy, collSize) { const absError = testcase[strategy] - testcase.nReturned; let relError = 0.0; if (testcase.nReturned > 0) { @@ -26,7 +28,7 @@ function computeStrategyErrors(testcase, strategy, collSize) { /** * Compute cardinality estimation errors for a testcase for all CE strategies. */ -function computeAndPrintErrors(testcase, ceStrategies, collSize, isComplex) { +export function computeAndPrintErrors(testcase, ceStrategies, collSize, isComplex) { let errorDoc = {_id: testcase._id, qtype: testcase.qtype}; if (isComplex == true) { errorDoc["numberOfTerms"] = testcase.numberOfTerms; @@ -46,7 +48,7 @@ function computeAndPrintErrors(testcase, ceStrategies, collSize, isComplex) { print(`${strategy}: ${testcase[strategy]} `); print(`QError: ${errors["qError"]}, RelError: ${errors["relError"]}, SelError: ${ errors["selError"]}%\n`); - duration = 'duration_' + strategy; + const duration = 'duration_' + strategy; errorDoc[duration] = testcase[duration]; }); return errorDoc; @@ -55,7 +57,7 @@ function computeAndPrintErrors(testcase, ceStrategies, collSize, isComplex) { /** * Compute CE errors for each query and populate the error collection 'errorColl'. */ -function populateErrorCollection(errorColl, testCases, ceStrategies, collSize, isComplex) { +export function populateErrorCollection(errorColl, testCases, ceStrategies, collSize, isComplex) { for (const testcase of testCases) { jsTestLog(`Query ${testcase._id}: ${tojsononeline(testcase.pipeline)}`); print(`Actual cardinality: ${testcase.nReturned}\n`); @@ -69,7 +71,7 @@ function populateErrorCollection(errorColl, testCases, ceStrategies, collSize, i * Given an array of fields on which we want to perform $group, return an expression computing the * group key. */ -function makeGroupKey(groupFields) { +export function makeGroupKey(groupFields) { let args = []; for (let i = 0; i < groupFields.length; i++) { args.push({$toString: "$" + groupFields[i]}); @@ -83,7 +85,7 @@ function makeGroupKey(groupFields) { /** * Aggregate errors in the 'errorColl' on the 'groupFields' for each CE strategy. */ -function aggregateErrorsPerCategory(errorColl, groupFields, ceStrategies) { +export function aggregateErrorsPerCategory(errorColl, groupFields, ceStrategies) { const groupKey = makeGroupKey(groupFields); jsTestLog(`Mean errors per ${tojsononeline(groupFields)}:`); for (const strategy of ceStrategies) { @@ -136,7 +138,7 @@ function aggregateErrorsPerCategory(errorColl, groupFields, ceStrategies) { * Aggregate errors in the 'errorColl' per CE strategy. If a predicate is provided * aggregate only the error documents which satisfy the predicate. */ -function aggregateErrorsPerStrategy(errorColl, ceStrategies, predicate = {}) { +export function aggregateErrorsPerStrategy(errorColl, ceStrategies, predicate = {}) { const msg = (Object.keys(predicate).length == 0) ? "all queries" : `predicate ${tojsononeline(predicate)}:`; jsTestLog(`Mean errors per strategy for ${msg}:`); @@ -184,7 +186,7 @@ function aggregateErrorsPerStrategy(errorColl, ceStrategies, predicate = {}) { } } -function aggegateOptimizationTimesPerStrategy(errorColl, ceStrategies) { +export function aggegateOptimizationTimesPerStrategy(errorColl, ceStrategies) { print("Average optimization time per strategy:"); for (const strategy of ceStrategies) { const strategyDuration = "$" + @@ -214,7 +216,8 @@ function aggegateOptimizationTimesPerStrategy(errorColl, ceStrategies) { /** * Find top 10 inacurate estimates for a strategy and an error field. */ -function printQueriesWithBadAccuracy(errorColl, testCases, strategy, errorField, count = 10) { +export function printQueriesWithBadAccuracy( + errorColl, testCases, strategy, errorField, count = 10) { const errorFieldName = strategy + "." + errorField; const res = errorColl .aggregate([ @@ -230,7 +233,8 @@ function printQueriesWithBadAccuracy(errorColl, testCases, strategy, errorField, for (const doc of res) { const i = doc["_id"]; const test = testCases[i]; - print(`Id: ${test._id}: ${tojsononeline(test.pipeline)}, qtype: ${test.qtype}, data type: ${test.dtype}, -cardinality: ${test.nReturned}, ${strategy} estimation: ${test[strategy]}, errors: ${tojsononeline(doc[strategy])}\n`); + print(`Id: ${test._id}: ${tojsononeline(test.pipeline)}, qtype: ${test.qtype}, data type: ${ + test.dtype}, \ncardinality: ${test.nReturned}, ${strategy} estimation: ${ + test[strategy]}, errors: ${tojsononeline(doc[strategy])}\n`); } } diff --git a/jstests/query_golden/libs/data/ce_accuracy_test.data b/jstests/query_golden/libs/data/ce_accuracy_test.data index 5698824c8ee4c..3f2c1bb3a69e0 100644 --- a/jstests/query_golden/libs/data/ce_accuracy_test.data +++ b/jstests/query_golden/libs/data/ce_accuracy_test.data @@ -1,2 +1,2 @@ // This is a generated file. -const chunkNames = ['ce_data_500_1','ce_data_500_2','ce_data_500_3','ce_data_500_4','ce_data_500_5']; \ No newline at end of file +const chunkNames = ['ce_data_500_1','ce_data_500_2','ce_data_500_3','ce_data_500_4','ce_data_500_5']; diff --git a/jstests/query_golden/libs/data/ce_accuracy_test.schema b/jstests/query_golden/libs/data/ce_accuracy_test.schema index 40cef37674879..60513d5c65875 100644 --- a/jstests/query_golden/libs/data/ce_accuracy_test.schema +++ b/jstests/query_golden/libs/data/ce_accuracy_test.schema @@ -457,4 +457,4 @@ const dbMetadata = [ "compound_indexes": [], "cardinality": 500 } -]; \ No newline at end of file +]; diff --git a/jstests/query_golden/libs/example_data.js b/jstests/query_golden/libs/example_data.js index 06629907cbb65..027f492fbd82e 100644 --- a/jstests/query_golden/libs/example_data.js +++ b/jstests/query_golden/libs/example_data.js @@ -3,7 +3,7 @@ // Generates interesting "leaf" values: values that don't contain other values. // This includes [] and {}. -function leafs() { +export function leafs() { // See bsontypes.h or https://bsonspec.org/ for a complete list of BSON types. // Not every type is represented here. return [ @@ -120,16 +120,16 @@ function leafs() { // Documents with (at most) a single field with the given name. // Includes the "missing value" by including one empty doc. -function unaryDocs(fieldname, values) { +export function unaryDocs(fieldname, values) { return values.map(v => ({[fieldname]: v})); } // Arrays with exactly one element. -function unaryArrays(values) { +export function unaryArrays(values) { return values.map(v => [v]); } -function smallDocs() { +export function smallDocs() { let values = leafs(); values = values.concat(unaryDocs('x', values)).concat(unaryArrays(values)); return unaryDocs('a', values); @@ -137,7 +137,7 @@ function smallDocs() { // Prepend an '_id' field to each document, numbered sequentially from 0. // Preserves any existing '_id' value, but always moves that field to the beginning. -function sequentialIds(docs) { +export function sequentialIds(docs) { let i = 0; return docs.map(d => Object.merge({_id: i++}, d)); } diff --git a/jstests/query_golden/libs/generate_queries.js b/jstests/query_golden/libs/generate_queries.js index 334bb23f0671e..66358b3da687c 100644 --- a/jstests/query_golden/libs/generate_queries.js +++ b/jstests/query_golden/libs/generate_queries.js @@ -1,11 +1,11 @@ /** * Helper functions for generating of queries over a collection. */ -function makeMatchPredicate(field, boundary, compOp) { +export function makeMatchPredicate(field, boundary, compOp) { return {"$match": {[field]: {[compOp]: boundary}}}; } -function makeRangePredicate(field, op1, bound1, op2, bound2, isElemMatch = false) { +export function makeRangePredicate(field, op1, bound1, op2, bound2, isElemMatch = false) { if (isElemMatch) { return {"$match": {[field]: {"$elemMatch": {[op1]: bound1, [op2]: bound2}}}}; } @@ -18,7 +18,7 @@ function makeRangePredicate(field, op1, bound1, op2, bound2, isElemMatch = false * explosion in the number of predicates we create all comparison predicates only for 25% of the * query values, while for the other 75% we pick one comparison operator in a round-robin fashion. */ -function generateComparisons(field, boundaries, fieldType) { +export function generateComparisons(field, boundaries, fieldType) { let predicates = []; const compOps = ["$eq", "$lt", "$lte", "$gt", "$gte"]; // Index over boundaries. @@ -56,10 +56,10 @@ function generateComparisons(field, boundaries, fieldType) { return docs; } -const min_char_code = '0'.codePointAt(0); -const max_char_code = '~'.codePointAt(0); +export const min_char_code = '0'.codePointAt(0); +export const max_char_code = '~'.codePointAt(0); -function nextChar(thisChar, distance) { +export function nextChar(thisChar, distance) { const number_of_chars = max_char_code - min_char_code + 1; const char_code = thisChar.codePointAt(0); assert(min_char_code <= char_code <= max_char_code, "char is out of range"); @@ -73,7 +73,7 @@ function nextChar(thisChar, distance) { * Produces a string value at some distance from the argument string. * distance: "small", "middle", "large". */ -function nextStr(str, distance) { +export function nextStr(str, distance) { var res = 'nextStrUndefined'; const spec = {"small": 3, "medium": 2, "large": 1}; if (str.length == 0) { @@ -90,8 +90,8 @@ function nextStr(str, distance) { let newStr0 = str.slice(0, pos); let nextCh = nextChar(str[pos], 4 - spec[distance] /*char distance*/); - newStr1 = newStr0 + nextCh; - newStr = newStr1 + str.slice(pos + 1, str.length); + const newStr1 = newStr0 + nextCh; + const newStr = newStr1 + str.slice(pos + 1, str.length); assert(newStr.indexOf("NaN") == -1, `Found NaN with inputs: newStr=${newStr}, str=${str}, distance=${distance}; pos=${ pos}, nextCh=${nextCh}, newStr0=${newStr0}, newStr1=${newStr1}`); @@ -110,7 +110,7 @@ function nextStr(str, distance) { * types both low and upper bounds are taken from the 'values' array and rangeSize is the distance * they are apart from each other. */ -function generateRanges(values, fieldType, rangeSize) { +export function generateRanges(values, fieldType, rangeSize) { let ranges = []; if (fieldType == 'integer' || fieldType == 'double') { for (const val of values) { @@ -118,7 +118,7 @@ function generateRanges(values, fieldType, rangeSize) { } } else if (fieldType == 'string') { for (const val of values) { - nanPos = val.indexOf("NaN"); + const nanPos = val.indexOf("NaN"); assert(nanPos == -1, `Found NaN in values: ${values}, ${val}, ${nanPos}`); var nextVar = nextStr(val, rangeSize); assert(nextVar != 'nextStrUndefined', @@ -155,7 +155,7 @@ function generateRanges(values, fieldType, rangeSize) { * Split an ordered array of values into sub-arrays of the same type. * Example: [0, 25, 'an', 'mac', 'zen'] -> [[0, 25], ['an', 'mac', 'zen']]. */ -function splitValuesPerType(values) { +export function splitValuesPerType(values) { let tp = typeof values[0]; let changePos = [0]; let i = 1; @@ -176,10 +176,10 @@ function splitValuesPerType(values) { return typedValues; } -function getTypeFromFieldName(fieldName) { +export function getTypeFromFieldName(fieldName) { const fieldMeta = fieldName.split("_"); let elemType = undefined; - for (fieldPart of fieldMeta) { + for (let fieldPart of fieldMeta) { if (fieldPart == "int") { elemType = "integer"; } else if (fieldPart == "dbl") { @@ -202,7 +202,7 @@ function getTypeFromFieldName(fieldName) { * in the 'queryValues' document: {values: [1, 15, 37, 72, 100], min: 1, max: 100}. The 'values' * array is sorted. */ -function generateRangePredicates(field, queryValues, fieldType) { +export function generateRangePredicates(field, queryValues, fieldType) { const querySpecs = {"small": 0.001, "medium": 0.01, "large": 0.1}; const opOptions = [["$gt", "$lt"], ["$gt", "$lte"], ["$gte", "$lt"], ["$gte", "$lte"]]; @@ -239,7 +239,7 @@ function generateRangePredicates(field, queryValues, fieldType) { ranges.forEach(function(range) { assert(range.length == 2); let [op1, op2] = opOptions[j]; - pred = makeRangePredicate(field, op1, range[0], op2, range[1]); + let pred = makeRangePredicate(field, op1, range[0], op2, range[1]); const doc = { "pipeline": [pred], "qtype": qSize + " range", @@ -249,7 +249,7 @@ function generateRangePredicates(field, queryValues, fieldType) { }; docs.push(doc); if (fieldType == 'array' && range[0] <= range[1]) { - pred = makeRangePredicate(field, op1, range[0], op2, range[1], true); + let pred = makeRangePredicate(field, op1, range[0], op2, range[1], true); const doc = { "pipeline": [pred], "qtype": qSize + " range", @@ -269,7 +269,7 @@ function generateRangePredicates(field, queryValues, fieldType) { /** * Helper function to extract positions for a sample of size n from a collection. */ -function selectSamplePos(collSize, n) { +export function selectSamplePos(collSize, n) { let samplePos = []; let step = Math.round(collSize / n); let offset = n * step - collSize; @@ -284,11 +284,11 @@ function selectSamplePos(collSize, n) { return samplePos; } -function selectSample(coll, samplePos) { +export function selectSample(coll, samplePos) { return coll.aggregate([{$match: {"_id": {$in: samplePos}}}]).toArray(); } -function selectFieldValues(sample, field) { +export function selectFieldValues(sample, field) { let values = []; for (const doc of sample) { values.push(doc[field]); @@ -299,7 +299,7 @@ function selectFieldValues(sample, field) { /** * Selects few values from histogram bucket boundaries. */ -function selectHistogramBounds(statsColl, field, fieldType) { +export function selectHistogramBounds(statsColl, field, fieldType) { let values = []; let stats = statsColl.find({"_id": field})[0]; // Specify which bucket bound to choose from each histogram type. The number is ratio of the @@ -332,7 +332,7 @@ function selectHistogramBounds(statsColl, field, fieldType) { * Extract min/max values from a field. The initial unwind phase extracts the values in case the * field contains arrays. */ -function getMinMax(coll, field) { +export function getMinMax(coll, field) { const res = coll.aggregate([ {$unwind: field}, {$group: {_id: null, min: {$min: field}, max: {$max: field}}}, @@ -346,7 +346,7 @@ function getMinMax(coll, field) { * Extract query values from an array of sample arrays. Select up to three values per array element. * {[1, 3, 5], [ 2, 4, 6, 8, 10], [100]] -> [1, 3, 5, 2, 6, 10, 100] */ -function selectArrayValues(nestedArray) { +export function selectArrayValues(nestedArray) { let values = []; nestedArray.forEach(function(array) { if (typeof array != "object") { @@ -366,7 +366,7 @@ function selectArrayValues(nestedArray) { return values; } -function selectOutOfRangeValues(minMaxDoc, fieldType) { +export function selectOutOfRangeValues(minMaxDoc, fieldType) { let values = []; const validTypes = new Set(["integer", "double", "string", "date"]); if (!validTypes.has(fieldType)) { @@ -397,7 +397,7 @@ function selectOutOfRangeValues(minMaxDoc, fieldType) { return values; } -function sortValues(values) { +export function sortValues(values) { let sortColl = db["sortColl"]; sortColl.drop(); for (const x of values) { @@ -411,7 +411,7 @@ function sortValues(values) { return sorted; } -function deduplicate(boundaries) { +export function deduplicate(boundaries) { let values = [boundaries[0]]; let i = 0; while (i + 1 < boundaries.length) { @@ -435,7 +435,7 @@ function deduplicate(boundaries) { * values, min, and max for the respective field. Example: * {"a": {values: [1, 15, 37, 72, 100], min: 1, max: 100}, "b": {...} } */ -function selectQueryValues(coll, fields, fieldTypes, samplePos, statsColl) { +export function selectQueryValues(coll, fields, fieldTypes, samplePos, statsColl) { const sample = selectSample(coll, samplePos); let queryValues = {}; @@ -478,7 +478,7 @@ function selectQueryValues(coll, fields, fieldTypes, samplePos, statsColl) { * Query generation for a collection 'coll' with given fields and field types. * The generation uses values from a collection sample with 'sampleSize'. */ -function generateQueries(fields, fieldTypes, queryValues) { +export function generateQueries(fields, fieldTypes, queryValues) { let testCases = []; let i = 0; while (i < fields.length) { @@ -508,7 +508,7 @@ function generateQueries(fields, fieldTypes, queryValues) { * - step: step to navigate through the testCases array * - predicates: array of result predicate documents */ -function pickNextTerm(testCases, cnt, curPos, chosenIds, chosenFields, step, predicates) { +export function pickNextTerm(testCases, cnt, curPos, chosenIds, chosenFields, step, predicates) { assert.eq(curPos, chosenIds.length); let i = (curPos == 0) ? 0 : chosenIds.at(-1) + 1; @@ -548,7 +548,8 @@ function pickNextTerm(testCases, cnt, curPos, chosenIds, chosenFields, step, pre * op: $and or $or * comp: array of comparisons for predicate terms */ -function makeSingleFieldComplexPredicate(field, values, op, comp, predicates, isArray = false) { +export function makeSingleFieldComplexPredicate( + field, values, op, comp, predicates, isArray = false) { let terms = []; for (let i = 0; i < comp.length; i++) { terms.push({[field]: {[comp[i]]: values[i]}}); @@ -570,7 +571,7 @@ function makeSingleFieldComplexPredicate(field, values, op, comp, predicates, is /** * Make a single field DNF predicate. */ -function makeSingleFieldDNF(field, values, predicates) { +export function makeSingleFieldDNF(field, values, predicates) { let term1 = {"$and": [{[field]: {"$gt": values[0]}}, {[field]: {"$lt": values[1]}}]}; let term2 = {"$and": [{[field]: {"$gte": values[2]}}, {[field]: {"$lt": values[3]}}]}; @@ -586,7 +587,7 @@ function makeSingleFieldDNF(field, values, predicates) { /** * Generate single-field conjunctions and disjunctions using values from the 'queryValues' document. */ -function generateSingleFieldPredicates(fields, fieldTypes, queryValues, predicates) { +export function generateSingleFieldPredicates(fields, fieldTypes, queryValues, predicates) { let i = 0; while (i < fields.length) { const field = fields[i]; @@ -628,7 +629,7 @@ function generateSingleFieldPredicates(fields, fieldTypes, queryValues, predicat * - single-field conjunctions and disjunctions with 2 and 4 terms. * - single-field DNFs. */ -function generateComplexPredicates(testCases, fields, fieldTypes, queryValues) { +export function generateComplexPredicates(testCases, fields, fieldTypes, queryValues) { let predicates = []; // Generate multi-field conjunctions. let chosenFields = new Set(); @@ -643,7 +644,7 @@ function generateComplexPredicates(testCases, fields, fieldTypes, queryValues) { // Generate single-field disjunctions and conjunctions. generateSingleFieldPredicates(fields, fieldTypes, queryValues, predicates); - i = 0; + let i = 0; for (let query of predicates) { query["_id"] = i++; } diff --git a/jstests/query_golden/libs/projection_helpers.js b/jstests/query_golden/libs/projection_helpers.js index caf02d103535b..bb2f770de4bc8 100644 --- a/jstests/query_golden/libs/projection_helpers.js +++ b/jstests/query_golden/libs/projection_helpers.js @@ -1,10 +1,11 @@ -load("jstests/query_golden/libs/utils.js"); +import {show} from "jstests/libs/golden_test.js"; +import {resetCollection} from "jstests/query_golden/libs/utils.js"; /** * Drops 'coll' and re-populates it according to 'docs' and 'indexes'. Then, runs the specified * projections against the collection and prints the results. */ -function runProjectionsAgainstColl(coll, docs, indexes, projSpecs) { +export function runProjectionsAgainstColl(coll, docs, indexes, projSpecs) { resetCollection(coll, docs, indexes); for (const projectionSpec of projSpecs) { @@ -17,7 +18,7 @@ function runProjectionsAgainstColl(coll, docs, indexes, projSpecs) { /** * Returns some example docs with interesting values as paths "a", "a.b", and "a.b.c". */ -function getProjectionDocs() { +export function getProjectionDocs() { return [ // // Simple documents without any arrays along "a.b.c". @@ -135,7 +136,7 @@ function getProjectionDocs() { * Similar to getProjectionDocs(), but a smaller list where the interesting values are just under * the _id field. */ -function getIdProjectionDocs() { +export function getIdProjectionDocs() { return [ {_id: 1, x: 2}, {_id: {}, x: 1}, diff --git a/jstests/query_golden/libs/run_queries_ce.js b/jstests/query_golden/libs/run_queries_ce.js index 8dbe838d6007e..b0244386646ab 100644 --- a/jstests/query_golden/libs/run_queries_ce.js +++ b/jstests/query_golden/libs/run_queries_ce.js @@ -1,21 +1,37 @@ -load("jstests/libs/ce_stats_utils.js"); -load("jstests/libs/optimizer_utils.js"); -load("jstests/query_golden/libs/compute_errors.js"); -load("jstests/query_golden/libs/generate_queries.js"); - -function indexedStrategy(strategyName) { +import {analyzeFields, getRootCE} from "jstests/libs/ce_stats_utils.js"; +import { + forceCE, + getPlanSkeleton, + navigateToRootNode, + round2 +} from "jstests/libs/optimizer_utils.js"; +import { + aggegateOptimizationTimesPerStrategy, + aggregateErrorsPerCategory, + aggregateErrorsPerStrategy, + populateErrorCollection, + printQueriesWithBadAccuracy, +} from "jstests/query_golden/libs/compute_errors.js"; +import { + generateComplexPredicates, + generateQueries, + selectQueryValues, + selectSamplePos, +} from "jstests/query_golden/libs/generate_queries.js"; + +export function indexedStrategy(strategyName) { return strategyName + "Idx"; } -function timedExplain(coll, pipeline) { +export function timedExplain(coll, pipeline) { const t0 = Date.now(); - explain = coll.explain().aggregate(pipeline); + const explain = coll.explain().aggregate(pipeline); const t1 = Date.now(); const duration = t1 - t0; return {explain, duration}; } -function getCE(pipeline, explain) { +export function getCE(pipeline, explain) { try { return round2(getRootCE(explain)); } catch (e) { @@ -28,7 +44,7 @@ function getCE(pipeline, explain) { /** * Run the query specified in the 'testcase' document with the CE 'strategy'. */ -function runAggregationWithCE(coll, testcase, strategy) { +export function runAggregationWithCE(coll, testcase, strategy) { let explain = {}; let duration = -1; if (testcase["nReturned"] == null) { @@ -40,10 +56,10 @@ function runAggregationWithCE(coll, testcase, strategy) { } testcase["nReturned"] = explain.executionStats.nReturned; // Run explain without execution to measure optimization time. Ignore the explain. - timedRes = timedExplain(coll, testcase.pipeline); + const timedRes = timedExplain(coll, testcase.pipeline); duration = timedRes.duration; } else { - timedRes = timedExplain(coll, testcase.pipeline); + const timedRes = timedExplain(coll, testcase.pipeline); explain = timedRes.explain; duration = timedRes.duration; } @@ -57,7 +73,7 @@ function runAggregationWithCE(coll, testcase, strategy) { /** * Run queries with complex predicates in batches with a limited number of index fields. */ -function runComplexPredicates(coll, testCases, ceStrategies, ceDebugFlag) { +export function runComplexPredicates(coll, testCases, ceStrategies, ceDebugFlag) { const maxIndexCnt = 50; let start = 0; @@ -103,7 +119,7 @@ function runComplexPredicates(coll, testCases, ceStrategies, ceDebugFlag) { * If 'fields' is not empty, create index for each field and execute all queries on this field. * If 'fields' is empty, execute queries with complex predicates in batches. */ -function runQueries(coll, testCases, ceStrategies, fields, ceDebugFlag) { +export function runQueries(coll, testCases, ceStrategies, fields, ceDebugFlag) { print("Run queries without indexing.\n"); ceStrategies.forEach(function(strategy) { forceCE(strategy); @@ -131,7 +147,7 @@ function runQueries(coll, testCases, ceStrategies, fields, ceDebugFlag) { } } -function printSimpleQueryStats(errorColl, strategies, queries, debugFlag) { +export function printSimpleQueryStats(errorColl, strategies, queries, debugFlag) { jsTestLog("Aggregate errors for all simple predicate queries"); // Aggregate errors for all CE strategies per query category. @@ -165,7 +181,7 @@ function printSimpleQueryStats(errorColl, strategies, queries, debugFlag) { } } -function printComplexQueryStats(errorColl, strategies, queries, debugFlag) { +export function printComplexQueryStats(errorColl, strategies, queries, debugFlag) { jsTestLog("Aggregate errors for all complex predicate queries"); // Aggregate errors for all CE strategies per query category. aggregateErrorsPerCategory(errorColl, ["qtype"], strategies); @@ -182,7 +198,7 @@ function printComplexQueryStats(errorColl, strategies, queries, debugFlag) { } } -function printAllQueryStats(testDB, errorColl1, errorColl2, strategies) { +export function printAllQueryStats(testDB, errorColl1, errorColl2, strategies) { jsTestLog("Aggregate errors for all queries (simple and complex predicates)"); let allErrorsColl = testDB.ce_all_errors; allErrorsColl.drop(); @@ -196,7 +212,7 @@ function printAllQueryStats(testDB, errorColl1, errorColl2, strategies) { * collection metadata. The function assumes that the collection exists and is populated with data. * 'sampleSize' is the number of documents used to extract sample values for query generation. */ -function runCETestForCollection(testDB, collMeta, sampleSize = 6, ceDebugFlag = false) { +export function runCETestForCollection(testDB, collMeta, sampleSize = 6, ceDebugFlag = false) { let ceStrategies = ["heuristic", "histogram"]; if (ceDebugFlag) { ceStrategies.push("sampling"); @@ -216,8 +232,8 @@ function runCETestForCollection(testDB, collMeta, sampleSize = 6, ceDebugFlag = } // Switch to 'tryBonsai' to create statistics and generate queries. - assert.commandWorked( - testDB.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"})); + assert.commandWorked(testDB.adminCommand( + {setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"})); analyzeFields(testDB, coll, fields); const statsColl = testDB.system.statistics[collName]; @@ -253,8 +269,8 @@ function runCETestForCollection(testDB, collMeta, sampleSize = 6, ceDebugFlag = runQueries(coll, complexPred, ceStrategies, [], ceDebugFlag); // Switch to 'tryBonsai' for accuracy analysis. - assert.commandWorked( - testDB.adminCommand({setParameter: 1, internalQueryFrameworkControl: "tryBonsai"})); + assert.commandWorked(testDB.adminCommand( + {setParameter: 1, internalQueryFrameworkControl: "tryBonsaiExperimental"})); let allStrategies = []; for (let strategy of ceStrategies) { @@ -289,8 +305,8 @@ function runCETestForCollection(testDB, collMeta, sampleSize = 6, ceDebugFlag = testDB.createView('ce_errors_complex_pred_not_empty', 'ce_errors_complex_pred', [{$match: {$expr: {$gt: ["$nReturned", 0]}}}]); - errorCollNonEmpty = testDB.ce_errors_not_empty; - errorCollComplexPredNonEmpty = testDB.ce_errors_complex_pred_not_empty; + const errorCollNonEmpty = testDB.ce_errors_not_empty; + const errorCollComplexPredNonEmpty = testDB.ce_errors_complex_pred_not_empty; print(`Non-empty simple error entries: ${ errorCollNonEmpty.find().itcount()}; complex error entries: ${ errorCollComplexPredNonEmpty.find().itcount()}`); diff --git a/jstests/query_golden/libs/utils.js b/jstests/query_golden/libs/utils.js index fdf690e482135..9dcc8db45973e 100644 --- a/jstests/query_golden/libs/utils.js +++ b/jstests/query_golden/libs/utils.js @@ -1,10 +1,11 @@ -load("jstests/query_golden/libs/example_data.js"); +import {show} from "jstests/libs/golden_test.js"; +import {sequentialIds} from "jstests/query_golden/libs/example_data.js"; /** * Drops 'coll' and repopulates it with 'docs' and 'indexes'. Sequential _ids are added to * documents which do not have _id set. */ -function resetCollection(coll, docs, indexes = []) { +export function resetCollection(coll, docs, indexes = []) { coll.drop(); const docsWithIds = sequentialIds(docs); diff --git a/jstests/query_golden/load_data.js b/jstests/query_golden/load_data.js index b17101730085e..a6846066a444f 100644 --- a/jstests/query_golden/load_data.js +++ b/jstests/query_golden/load_data.js @@ -5,8 +5,8 @@ * ] */ -(function() { load("jstests/libs/load_ce_test_data.js"); +import {runHistogramsTest} from "jstests/libs/ce_stats_utils.js"; const dbName = 'ce_accuracy_test'; const dataDir = 'jstests/query_golden/libs/data/'; @@ -35,10 +35,9 @@ for (const collMetadata of dbMetadata) { print(`Actual cardinality: ${actualCard}\n`); assert.eq(expectedCard, actualCard); collMetadata.fields.forEach(function(fieldMetadata) { - fieldName = fieldMetadata.fieldName; + const fieldName = fieldMetadata.fieldName; const fieldCard = coll.find({}, {fieldName: 1}).itcount(); print(`card(${fieldName}) = ${fieldCard}\n`); assert.eq(fieldCard, actualCard); }); } -})(); diff --git a/jstests/query_golden/match_with_and_or.js b/jstests/query_golden/match_with_and_or.js index 85acbf16eecd4..a243d9512501f 100644 --- a/jstests/query_golden/match_with_and_or.js +++ b/jstests/query_golden/match_with_and_or.js @@ -1,10 +1,8 @@ /** * Test $match with $and/$or is supported and returns correct results. */ - -(function() { -"use strict"; -load("jstests/query_golden/libs/utils.js"); +import {show} from "jstests/libs/golden_test.js"; +import {resetCollection} from "jstests/query_golden/libs/utils.js"; const coll = db.and_or_coll; @@ -105,4 +103,3 @@ for (const op of operators) { show(coll.aggregate(pipeline)); } } -}()); diff --git a/jstests/query_golden/match_with_exists.js b/jstests/query_golden/match_with_exists.js index c2f64a1532eb9..f1c34bb2be4d7 100644 --- a/jstests/query_golden/match_with_exists.js +++ b/jstests/query_golden/match_with_exists.js @@ -1,9 +1,7 @@ /** * Test $match with $exists is supported and returns correct results. */ - -(function() { -"use strict"; +import {show} from "jstests/libs/golden_test.js"; const coll = db.cqf_golden_match_with_exists; @@ -66,4 +64,3 @@ runWithData( {_id: 2, a: [2]}, ], [{'a': {$elemMatch: {$exists: true}}}, {'a': {$elemMatch: {$exists: false}}}]); -})(); diff --git a/jstests/query_golden/match_with_in.js b/jstests/query_golden/match_with_in.js index 849468aec2178..84c2346e6c6dd 100644 --- a/jstests/query_golden/match_with_in.js +++ b/jstests/query_golden/match_with_in.js @@ -1,9 +1,7 @@ /** * Test $match with $in is supported and returns correct results. */ - -(function() { -"use strict"; +import {show} from "jstests/libs/golden_test.js"; const coll = db.cqf_golden_match_with_in; coll.drop(); @@ -77,4 +75,3 @@ const testFilters = [ for (const filter of testFilters) { runTest(filter); } -}()); diff --git a/jstests/query_golden/multiple_traverse_single_scan.js b/jstests/query_golden/multiple_traverse_single_scan.js index 2eaff689c3bc4..a0f23bde3619c 100644 --- a/jstests/query_golden/multiple_traverse_single_scan.js +++ b/jstests/query_golden/multiple_traverse_single_scan.js @@ -5,8 +5,7 @@ * * Reproduces SERVER-71524. */ -(function() { -"use strict"; +import {show} from "jstests/libs/golden_test.js"; const coll = db.query_golden_multiple_traverse_single_scan; coll.drop(); @@ -21,4 +20,3 @@ assert.commandWorked(coll.createIndex({a: 1})); // An incorrect plan would force each index entry to match both predicates, // returning an empty result-set. show(coll.find({'a.x': 1, 'a.y': 1}, {_id: 0})); -})(); diff --git a/jstests/query_golden/non_multikey_paths.js b/jstests/query_golden/non_multikey_paths.js index 6a062aad53189..988bac0a216a4 100644 --- a/jstests/query_golden/non_multikey_paths.js +++ b/jstests/query_golden/non_multikey_paths.js @@ -7,10 +7,7 @@ * requires_cqf, * ] */ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For leftmostLeafStage +import {leftmostLeafStage, prettyInterval} from "jstests/libs/optimizer_utils.js"; db.setLogLevel(4, "query"); @@ -61,5 +58,4 @@ run([{$match: {'one.one.one.one': 2}}]); run([{$match: {'one.one.one.many': 2}}]); run([{$match: {'many.one.one.one': 2}}]); run([{$match: {'many.one.one.many': 2}}]); -run([{$match: {'many.many.many.many': 2}}]); -})(); \ No newline at end of file +run([{$match: {'many.many.many.many': 2}}]); \ No newline at end of file diff --git a/jstests/query_golden/not_pushdown.js b/jstests/query_golden/not_pushdown.js index 9b43725d83f6f..b626356892ca3 100644 --- a/jstests/query_golden/not_pushdown.js +++ b/jstests/query_golden/not_pushdown.js @@ -7,10 +7,7 @@ * requires_cqf, * ] */ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For leftmostLeafStage +import {findSubtrees} from "jstests/libs/optimizer_utils.js"; const coll = db.cqf_not_pushdown; coll.drop(); @@ -65,5 +62,4 @@ run('Should stay as Not Traverse Eq', [{$match: {'many.many.many.many': {$ne: 7} // We have an $elemMatch (multikey), but no Traverse underneath the Not. run('Should be optimized to Neq', [{$match: {'many': {$elemMatch: {'one.one.one': {$ne: 7}}}}}]); -run('Should be optimized to Neq', [{$match: {'many.one': {$elemMatch: {'one.one': {$ne: 7}}}}}]); -})(); \ No newline at end of file +run('Should be optimized to Neq', [{$match: {'many.one': {$elemMatch: {'one.one': {$ne: 7}}}}}]); \ No newline at end of file diff --git a/jstests/query_golden/null_missing.js b/jstests/query_golden/null_missing.js index 80d41924db6dd..01bad7370edcb 100644 --- a/jstests/query_golden/null_missing.js +++ b/jstests/query_golden/null_missing.js @@ -7,10 +7,7 @@ * requires_cqf, * ] */ -(function() { -"use strict"; - -load("jstests/libs/optimizer_utils.js"); // For getPlanSkeleton. +import {getPlanSkeleton} from "jstests/libs/optimizer_utils.js"; db.setLogLevel(4, "query"); @@ -42,5 +39,4 @@ const pipeline = [{$match: {'a.b': null}}]; print(`nReturned: ${explain.executionStats.nReturned}\n`); print(`Plan skeleton: `); printjson(getPlanSkeleton(explain)); -} -})(); +} \ No newline at end of file diff --git a/jstests/replsets/all_commands_downgrading_to_upgraded.js b/jstests/replsets/all_commands_downgrading_to_upgraded.js index 9460d090927fb..dcf9db222036a 100644 --- a/jstests/replsets/all_commands_downgrading_to_upgraded.js +++ b/jstests/replsets/all_commands_downgrading_to_upgraded.js @@ -10,13 +10,10 @@ * ] */ -(function() { -"use strict"; - // This will verify the completeness of our map and run all tests. load("jstests/libs/all_commands_test.js"); -load("jstests/libs/fixture_helpers.js"); // For isSharded and isReplSet -load("jstests/libs/feature_flag_util.js"); // For isPresentAndEnabled +load("jstests/libs/fixture_helpers.js"); // For isSharded and isReplSet +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load('jstests/replsets/rslib.js'); const name = jsTestName(); @@ -29,6 +26,7 @@ const isAnInternalCommand = "internal command"; const isDeprecated = "deprecated command"; const commandIsDisabledOnLastLTS = "skip command on downgrading fcv"; const requiresParallelShell = "requires parallel shell"; +const cannotRunWhileDowngrading = "cannot run command while downgrading"; const allCommands = { _addShard: {skip: isAnInternalCommand}, @@ -65,15 +63,16 @@ const allCommands = { _configsvrRemoveShardFromZone: {skip: isAnInternalCommand}, _configsvrRemoveTags: {skip: isAnInternalCommand}, _configsvrRepairShardedCollectionChunksHistory: {skip: isAnInternalCommand}, - _configsvrRenameCollectionMetadata: {skip: isAnInternalCommand}, + _configsvrResetPlacementHistory: {skip: isAnInternalCommand}, _configsvrReshardCollection: {skip: isAnInternalCommand}, _configsvrRunRestore: {skip: isAnInternalCommand}, _configsvrSetAllowMigrations: {skip: isAnInternalCommand}, _configsvrSetClusterParameter: {skip: isAnInternalCommand}, _configsvrSetUserWriteBlockMode: {skip: isAnInternalCommand}, - _configsvrTransitionToCatalogShard: {skip: isAnInternalCommand}, + _configsvrTransitionFromDedicatedConfigServer: {skip: isAnInternalCommand}, _configsvrTransitionToDedicatedConfigServer: {skip: isAnInternalCommand}, _configsvrUpdateZoneKeyRange: {skip: isAnInternalCommand}, + _dropConnectionsToMongot: {skip: isAnInternalCommand}, _flushDatabaseCacheUpdates: {skip: isAnInternalCommand}, _flushDatabaseCacheUpdatesWithWriteConcern: {skip: isAnInternalCommand}, _flushReshardingStateChange: {skip: isAnInternalCommand}, @@ -87,6 +86,7 @@ const allCommands = { _killOperations: {skip: isAnInternalCommand}, _mergeAuthzCollections: {skip: isAnInternalCommand}, _migrateClone: {skip: isAnInternalCommand}, + _mongotConnPoolStats: {skip: isAnInternalCommand}, _movePrimaryRecipientAbortMigration: {skip: isAnInternalCommand}, _movePrimaryRecipientForgetMigration: {skip: isAnInternalCommand}, _movePrimaryRecipientSyncData: {skip: isAnInternalCommand}, @@ -97,6 +97,7 @@ const allCommands = { _recvChunkStatus: {skip: isAnInternalCommand}, _refreshQueryAnalyzerConfiguration: {skip: isAnInternalCommand}, _shardsvrAbortReshardCollection: {skip: isAnInternalCommand}, + _shardsvrCleanupStructuredEncryptionData: {skip: isAnInternalCommand}, _shardsvrCleanupReshardCollection: {skip: isAnInternalCommand}, _shardsvrCloneCatalogData: {skip: isAnInternalCommand}, _shardsvrCompactStructuredEncryptionData: {skip: isAnInternalCommand}, @@ -106,8 +107,6 @@ const allCommands = { _shardsvrDropCollection: {skip: isAnInternalCommand}, _shardsvrCreateCollection: {skip: isAnInternalCommand}, _shardsvrCreateGlobalIndex: {skip: isAnInternalCommand}, - // TODO SERVER-74324: deprecate _shardsvrDropCollectionIfUUIDNotMatching after 7.0 is lastLTS. - _shardsvrDropCollectionIfUUIDNotMatching: {skip: isAnInternalCommand}, _shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern: {skip: isAnInternalCommand}, _shardsvrDropCollectionParticipant: {skip: isAnInternalCommand}, _shardsvrDropGlobalIndex: {skip: isAnInternalCommand}, @@ -144,7 +143,14 @@ const allCommands = { _shardsvrParticipantBlock: {skip: isAnInternalCommand}, _shardsvrCheckMetadataConsistency: {skip: isAnInternalCommand}, _shardsvrCheckMetadataConsistencyParticipant: {skip: isAnInternalCommand}, - _startStreamProcessor: {skip: isAnInternalCommand}, + streams_startStreamProcessor: {skip: isAnInternalCommand}, + streams_startStreamSample: {skip: isAnInternalCommand}, + streams_stopStreamProcessor: {skip: isAnInternalCommand}, + streams_listStreamProcessors: {skip: isAnInternalCommand}, + streams_getMoreStreamSample: {skip: isAnInternalCommand}, + streams_getStats: {skip: isAnInternalCommand}, + streams_testOnlyInsert: {skip: isAnInternalCommand}, + streams_getMetrics: {skip: isAnInternalCommand}, _transferMods: {skip: isAnInternalCommand}, _vectorClockPersist: {skip: isAnInternalCommand}, abortReshardCollection: { @@ -219,11 +225,7 @@ const allCommands = { assert.commandWorked(conn.getDB(dbName).runCommand({create: collName})); }, command: {analyze: collName}, - expectFailure: true, - expectedErrorCode: [ - 6660400, - 6765500 - ], // Analyze command requires common query framework feature flag to be enabled. + checkFeatureFlag: "CommonQueryFramework", teardown: function(conn) { assert.commandWorked(conn.getDB(dbName).runCommand({drop: collName})); }, @@ -231,8 +233,6 @@ const allCommands = { analyzeShardKey: { // TODO SERVER-74867: Remove the skip once 7.0 is lastLTS. skip: commandIsDisabledOnLastLTS, - // TODO SERVER-67966: Remove check when this feature flag is removed. - checkFeatureFlag: "AnalyzeShardKey", setUp: function(conn) { assert.commandWorked(conn.getDB(dbName).runCommand({create: collName})); assert.commandWorked( @@ -365,6 +365,7 @@ const allCommands = { // operation. skip: "requires additional setup through a failed resharding operation", }, + cleanupStructuredEncryptionData: {skip: "requires additional encrypted collection setup"}, clearJumboFlag: { isShardedOnly: true, fullScenario: function(conn, fixture) { @@ -412,6 +413,7 @@ const allCommands = { }, clusterAbortTransaction: {skip: "already tested by 'abortTransaction' tests on mongos"}, clusterAggregate: {skip: "already tested by 'aggregate' tests on mongos"}, + clusterBulkWrite: {skip: "already tested by 'bulkWrite' tests on mongos"}, clusterCommitTransaction: {skip: "already tested by 'commitTransaction' tests on mongos"}, clusterCount: {skip: "already tested by 'count' tests on mongos"}, clusterDelete: {skip: "already tested by 'delete' tests on mongos"}, @@ -497,15 +499,13 @@ const allCommands = { configureQueryAnalyzer: { // TODO SERVER-74867: Remove the skip once 7.0 is lastLTS. skip: commandIsDisabledOnLastLTS, - // TODO SERVER-67966: Remove check when this feature flag is removed. - checkFeatureFlag: "AnalyzeShardKey", setUp: function(conn) { assert.commandWorked(conn.getDB(dbName).runCommand({create: collName})); for (let i = 0; i < 10; i++) { assert.commandWorked(conn.getCollection(fullNs).insert({a: i})); } }, - command: {configureQueryAnalyzer: fullNs, mode: "full", sampleRate: 1}, + command: {configureQueryAnalyzer: fullNs, mode: "full", samplesPerSecond: 1}, teardown: function(conn) { assert.commandWorked(conn.getDB(dbName).runCommand({drop: collName})); }, @@ -1091,13 +1091,7 @@ const allCommands = { }, }, movePrimary: { - isShardedOnly: true, - fullScenario: function(conn, fixture) { - assert.commandWorked(conn.getDB(dbName).runCommand({create: collName})); - assert.commandWorked(conn.getDB('admin').runCommand( - {movePrimary: dbName, to: fixture.shard0.shardName})); - assert.commandWorked(conn.getDB(dbName).runCommand({drop: collName})); - } + skip: cannotRunWhileDowngrading, }, moveRange: { isShardedOnly: true, @@ -1315,6 +1309,11 @@ const allCommands = { isAdminCommand: true, command: {replSetResizeOplog: 1, minRetentionHours: 1}, }, + resetPlacementHistory: { + command: {resetPlacementHistory: 1}, + isShardedOnly: true, + isAdminCommand: true, + }, reshardCollection: { // TODO SERVER-74867: Remove the skip once 7.0 is lastLTS. skip: commandIsDisabledOnLastLTS, @@ -1541,20 +1540,16 @@ const allCommands = { isAdminCommand: true, doesNotRunOnMongos: true, }, - transitionToCatalogShard: { + transitionFromDedicatedConfigServer: { // TODO SERVER-74867: Remove the skip once 7.0 is lastLTS. skip: commandIsDisabledOnLastLTS, - // TODO SERVER-66060: Remove check when this feature flag is removed. - checkFeatureFlag: "CatalogShard", - command: {transitionToCatalogShard: 1}, + command: {transitionFromDedicatedConfigServer: 1}, isShardedOnly: true, isAdminCommand: true, }, transitionToDedicatedConfigServer: { // TODO SERVER-74867: Remove the skip once 7.0 is lastLTS. skip: commandIsDisabledOnLastLTS, - // TODO SERVER-66060: Remove check when this feature flag is removed. - checkFeatureFlag: "CatalogShard", command: {transitionToDedicatedConfigServer: 1}, isShardedOnly: true, isAdminCommand: true, @@ -1767,11 +1762,6 @@ let runAllCommands = function(command, test, conn, fixture) { }; let runTest = function(conn, adminDB, fixture) { - let runDowngradingToUpgrading = false; - if (FeatureFlagUtil.isEnabled(adminDB, "DowngradingToUpgrading")) { - runDowngradingToUpgrading = true; - } - assert.commandFailed(conn.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); jsTestLog("Running all commands in the downgradingToLastLTS FCV"); @@ -1798,30 +1788,28 @@ let runTest = function(conn, adminDB, fixture) { runAllCommands(command, test, conn, fixture); } - if (runDowngradingToUpgrading) { - assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - - jsTestLog("Running all commands after upgrading back to the latest FCV"); - commandsList = AllCommandsTest.checkCommandCoverage(conn, allCommands); - if (isMongos(adminDB)) { - let shardCommandsList = - AllCommandsTest.checkCommandCoverage(fixture.shard0.rs.getPrimary(), allCommands); - commandsList = new Set(commandsList.concat(shardCommandsList)); - } + assert.commandWorked(conn.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - for (const command of commandsList) { - const test = allCommands[command]; + jsTestLog("Running all commands after upgrading back to the latest FCV"); + commandsList = AllCommandsTest.checkCommandCoverage(conn, allCommands); + if (isMongos(adminDB)) { + let shardCommandsList = + AllCommandsTest.checkCommandCoverage(fixture.shard0.rs.getPrimary(), allCommands); + commandsList = new Set(commandsList.concat(shardCommandsList)); + } - // Coverage already guaranteed above, but check again just in case. - assert(test, "Coverage failure: must explicitly define a test for " + command); + for (const command of commandsList) { + const test = allCommands[command]; - if (test.skip !== undefined) { - jsTestLog("Skipping " + command + ": " + test.skip); - continue; - } + // Coverage already guaranteed above, but check again just in case. + assert(test, "Coverage failure: must explicitly define a test for " + command); - runAllCommands(command, test, conn, fixture); + if (test.skip !== undefined) { + jsTestLog("Skipping " + command + ": " + test.skip); + continue; } + + runAllCommands(command, test, conn, fixture); } }; @@ -1868,5 +1856,4 @@ let runShardedClusterTest = function() { runStandaloneTest(); runReplicaSetTest(); -runShardedClusterTest(); -})(); +runShardedClusterTest(); \ No newline at end of file diff --git a/jstests/replsets/apply_batches_totalMillis.js b/jstests/replsets/apply_batches_totalMillis.js index 7e59f76fe5d7a..648948647dc47 100644 --- a/jstests/replsets/apply_batches_totalMillis.js +++ b/jstests/replsets/apply_batches_totalMillis.js @@ -60,4 +60,4 @@ jsTestLog(`Time recorded after larger batch: ${timeAfterLarge}ms`); assert.gte(timeAfterLarge, timeAfterSmall); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/apply_transaction_with_yield.js b/jstests/replsets/apply_transaction_with_yield.js index b1705a3e3b9a3..5b9940cde13b4 100644 --- a/jstests/replsets/apply_transaction_with_yield.js +++ b/jstests/replsets/apply_transaction_with_yield.js @@ -41,4 +41,4 @@ session.commitTransaction(); replTest.awaitReplication(); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/arbiters_not_included_in_w3_wc.js b/jstests/replsets/arbiters_not_included_in_w3_wc.js index aaf35cb450127..b4613094e8dd5 100644 --- a/jstests/replsets/arbiters_not_included_in_w3_wc.js +++ b/jstests/replsets/arbiters_not_included_in_w3_wc.js @@ -47,4 +47,4 @@ assert.commandFailedWithCode(testColl.insert({"b": 2}, {writeConcern: {w: 3, wti ErrorCodes.WriteConcernFailed); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/auth1.js b/jstests/replsets/auth1.js index 8e45f6b403e5f..9f95d74d1fd12 100644 --- a/jstests/replsets/auth1.js +++ b/jstests/replsets/auth1.js @@ -57,7 +57,7 @@ print("make sure user is written before shutting down"); MongoRunner.stopMongod(m); print("start up rs"); -var rs = new ReplSetTest({"name": name, "nodes": 3}); +const rs = new ReplSetTest({"name": name, "nodes": 3}); // The first node is started with the pre-populated data directory. print("start 0 with keyFile"); diff --git a/jstests/replsets/auth_no_pri.js b/jstests/replsets/auth_no_pri.js index 35629b3a383e7..99425014e0213 100644 --- a/jstests/replsets/auth_no_pri.js +++ b/jstests/replsets/auth_no_pri.js @@ -3,7 +3,7 @@ 'use strict'; var NODE_COUNT = 3; -var rs = new ReplSetTest({"nodes": NODE_COUNT, keyFile: "jstests/libs/key1"}); +const rs = new ReplSetTest({"nodes": NODE_COUNT, keyFile: "jstests/libs/key1"}); var nodes = rs.startSet(); rs.initiate(); diff --git a/jstests/replsets/awaitable_hello_errors_on_horizon_change.js b/jstests/replsets/awaitable_hello_errors_on_horizon_change.js index 15b8ec66b0f58..e8923a2f04db7 100644 --- a/jstests/replsets/awaitable_hello_errors_on_horizon_change.js +++ b/jstests/replsets/awaitable_hello_errors_on_horizon_change.js @@ -133,4 +133,4 @@ runTest("hello"); runTest("isMaster"); runTest("ismaster"); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/background_index.js b/jstests/replsets/background_index.js index a09138a9e194b..f56dbf3b8c66e 100644 --- a/jstests/replsets/background_index.js +++ b/jstests/replsets/background_index.js @@ -23,7 +23,7 @@ for (var i = 0; i < 100; i++) { } // Add a background index. -coll.createIndex({x: 1}, {background: true}); +coll.createIndex({x: 1}); // Rename the collection. assert.commandWorked( diff --git a/jstests/replsets/bulk_write_command_wc.js b/jstests/replsets/bulk_write_command_wc.js new file mode 100644 index 0000000000000..c305d326085e7 --- /dev/null +++ b/jstests/replsets/bulk_write_command_wc.js @@ -0,0 +1,194 @@ +/** + * Tests write-concern-related bulkWrite protocol functionality + * + * The test runs commands that are not allowed with security token: bulkWrite. + * @tags: [ + * assumes_against_mongod_not_mongos, + * not_allowed_with_security_token, + * command_not_supported_in_serverless, + * # TODO SERVER-52419 Remove this tag. + * featureFlagBulkWriteCommand, + * ] + */ +(function() { + +// Skip this test when running with storage engines other than inMemory, as the test relies on +// journaling not being active. +if (jsTest.options().storageEngine !== "inMemory") { + jsTest.log("Skipping test because it is only applicable for the inMemory storage engine"); + return; +} + +var request; +var result; + +// NOTE: ALL TESTS BELOW SHOULD BE SELF-CONTAINED, FOR EASIER DEBUGGING + +jsTest.log("Starting no journal/repl set tests..."); + +// Start a single-node replica set with no journal +// Allows testing immediate write concern failures and wc application failures +var rst = new ReplSetTest({nodes: 2}); +rst.startSet(); +rst.initiate(); +var mongod = rst.getPrimary(); +var coll = mongod.getCollection("test.bulk_write_command_wc"); + +// +// Basic bulkWrite, default WC +coll.remove({}); +printjson(request = { + bulkWrite: 1, + ops: [{insert: 0, document: {a: 1}}], + nsInfo: [{ns: "test.bulk_write_command_wc"}] +}); +printjson(result = mongod.adminCommand(request)); +assert(result.ok); +assert.eq(1, result.cursor.firstBatch[0].n); +assert.eq(1, coll.find().itcount()); + +// +// Basic bulkWrite, majority WC +coll.remove({}); +printjson(request = { + bulkWrite: 1, + ops: [{insert: 0, document: {a: 1}}], + nsInfo: [{ns: "test.bulk_write_command_wc"}], + writeConcern: {w: 'majority'} +}); +printjson(result = mongod.adminCommand(request)); +assert(result.ok); +assert.eq(1, result.cursor.firstBatch[0].n); +assert.eq(1, coll.find().itcount()); + +// +// Basic bulkWrite, w:2 WC +coll.remove({}); +printjson(request = { + bulkWrite: 1, + ops: [{insert: 0, document: {a: 1}}], + nsInfo: [{ns: "test.bulk_write_command_wc"}], + writeConcern: {w: 2} +}); +printjson(result = mongod.adminCommand(request)); +assert(result.ok); +assert.eq(1, result.cursor.firstBatch[0].n); +assert.eq(1, coll.find().itcount()); + +// +// Basic bulkWrite, immediate nojournal error +coll.remove({}); +printjson(request = { + bulkWrite: 1, + ops: [{insert: 0, document: {a: 1}}], + nsInfo: [{ns: "test.bulk_write_command_wc"}], + writeConcern: {j: true} +}); +printjson(result = mongod.adminCommand(request)); +assert(!result.ok); +assert.eq(0, coll.find().itcount()); + +// +// Basic bulkWrite, timeout wc error +coll.remove({}); +printjson(request = { + bulkWrite: 1, + ops: [{insert: 0, document: {a: 1}}], + nsInfo: [{ns: "test.bulk_write_command_wc"}], + writeConcern: {w: 3, wtimeout: 1} +}); +printjson(result = mongod.adminCommand(request)); +assert(result.ok); +assert.eq(1, result.cursor.firstBatch[0].n); +assert(result.writeConcernError); +assert.eq(100, result.writeConcernError.code); +assert.eq(1, coll.find().itcount()); + +// +// Basic bulkWrite, wmode wc error +coll.remove({}); +printjson(request = { + bulkWrite: 1, + ops: [{insert: 0, document: {a: 1}}], + nsInfo: [{ns: "test.bulk_write_command_wc"}], + writeConcern: {w: 'invalid'} +}); +printjson(result = mongod.adminCommand(request)); +assert(result.ok); +assert.eq(1, result.cursor.firstBatch[0].n); +assert(result.writeConcernError); +assert.eq(1, coll.find().itcount()); + +// +// Two ordered inserts, write error and wc error both reported +coll.remove({}); +printjson(request = { + bulkWrite: 1, + ops: [{insert: 0, document: {a: 1}}, {insert: 0, document: {_id: /a/}}], + nsInfo: [{ns: "test.bulk_write_command_wc"}], + writeConcern: {w: 'invalid'} +}); +printjson(result = mongod.adminCommand(request)); +assert(result.ok); +assert.eq(1, result.cursor.firstBatch[0].n); +assert.eq(0, result.cursor.firstBatch[1].ok); +assert.eq(1, result.cursor.firstBatch[1].idx); +assert(result.writeConcernError); +assert.eq(1, coll.find().itcount()); + +// +// Two unordered inserts, write error and wc error reported +coll.remove({}); +printjson(request = { + bulkWrite: 1, + ops: [{insert: 0, document: {a: 1}}, {insert: 0, document: {_id: /a/}}], + nsInfo: [{ns: "test.bulk_write_command_wc"}], + ordered: false, + writeConcern: {w: 'invalid'} +}); +printjson(result = mongod.adminCommand(request)); +assert(result.ok); +assert.eq(1, result.cursor.firstBatch[0].n); +assert.eq(0, result.cursor.firstBatch[1].ok); +assert.eq(1, result.cursor.firstBatch[1].idx); +assert(result.writeConcernError); +assert.eq(1, coll.find().itcount()); + +// +// Write error with empty writeConcern object. +coll.remove({}); +request = { + bulkWrite: 1, + ops: [{insert: 0, document: {_id: 1}}, {insert: 0, document: {_id: 1}}], + nsInfo: [{ns: "test.bulk_write_command_wc"}], + ordered: false, + writeConcern: {} +}; +result = mongod.adminCommand(request); +assert(result.ok); +assert.eq(1, result.cursor.firstBatch[0].n); +assert.eq(0, result.cursor.firstBatch[1].ok); +assert.eq(1, result.cursor.firstBatch[1].idx); +assert.eq(null, result.writeConcernError); +assert.eq(1, coll.find().itcount()); + +// +// Write error with unspecified w. +coll.remove({}); +request = { + bulkWrite: 1, + ops: [{insert: 0, document: {_id: 1}}, {insert: 0, document: {_id: 1}}], + nsInfo: [{ns: "test.bulk_write_command_wc"}], + ordered: false, + writeConcern: {wtimeout: 1} +}; +result = assert.commandWorkedIgnoringWriteErrors(mongod.adminCommand(request)); +assert.eq(1, result.cursor.firstBatch[0].n); +assert.eq(0, result.cursor.firstBatch[1].ok); +assert.eq(1, result.cursor.firstBatch[1].idx); +assert.eq(null, result.writeConcernError); +assert.eq(1, coll.find().itcount()); + +jsTest.log("DONE no journal/repl tests"); +rst.stopSet(); +})(); diff --git a/jstests/replsets/cluster_server_parameter_commands_replset.js b/jstests/replsets/cluster_server_parameter_commands_replset.js index 79e1356f47c10..bf1d3bca12bcc 100644 --- a/jstests/replsets/cluster_server_parameter_commands_replset.js +++ b/jstests/replsets/cluster_server_parameter_commands_replset.js @@ -7,10 +7,11 @@ * multiversion_incompatible * ] */ -(function() { -'use strict'; - -load('jstests/libs/cluster_server_parameter_utils.js'); +import { + setupReplicaSet, + testInvalidClusterParameterCommands, + testValidClusterParameterCommands, +} from "jstests/libs/cluster_server_parameter_utils.js"; // Tests that set/getClusterParameter works on a non-sharded replica set. const rst = new ReplSetTest({ @@ -29,5 +30,4 @@ testInvalidClusterParameterCommands(rst); // majority of the nodes in the replica set. testValidClusterParameterCommands(rst); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/replsets/collection_clone_resume_after_network_error.js b/jstests/replsets/collection_clone_resume_after_network_error.js index 0a800befc9083..ebac64c764799 100644 --- a/jstests/replsets/collection_clone_resume_after_network_error.js +++ b/jstests/replsets/collection_clone_resume_after_network_error.js @@ -26,7 +26,7 @@ function checkNoResumeAfter() { // Verify the 'find' command received by the primary has resumeAfter set with the given recordId. function checkHasResumeAfter(recordId) { - checkLog.contains(primary, `"$_resumeAfter":{"$recordId":${recordId}}`); + checkLog.contains(primary, new RegExp(`"\\$_resumeAfter":\\{.*"\\$recordId":${recordId}.*\\}`)); } const beforeRetryFailPointName = "hangBeforeRetryingClonerStage"; diff --git a/jstests/replsets/commands_that_write_accept_wc.js b/jstests/replsets/commands_that_write_accept_wc.js index 8a8adffad5e35..75d06682da8a0 100644 --- a/jstests/replsets/commands_that_write_accept_wc.js +++ b/jstests/replsets/commands_that_write_accept_wc.js @@ -44,7 +44,7 @@ commands.push({ commands.push({ req: {createIndexes: collName, indexes: [{key: {'type': 1}, name: 'type_index'}]}, setupFunc: function() { - coll.insert({type: 'oak'}); + assert.commandWorked(coll.insert({type: 'oak'})); assert.eq(coll.getIndexes().length, 1); }, confirmFunc: function() { @@ -62,7 +62,7 @@ commands.push({ writeConcern: {w: 'majority'} }, setupFunc: function() { - coll.insert({type: 'oak'}); + assert.commandWorked(coll.insert({type: 'oak'})); assert.eq(coll.count({type: 'ginkgo'}), 0); assert.eq(coll.count({type: 'oak'}), 1); }, @@ -80,7 +80,7 @@ commands.push({ writeConcern: {w: 'majority'} }, setupFunc: function() { - coll.insert({type: 'oak'}); + assert.commandWorked(coll.insert({type: 'oak'})); assert.eq(coll.count({type: 'ginkgo'}), 0); assert.eq(coll.count({type: 'oak'}), 1); }, @@ -98,7 +98,7 @@ commands.push({ writeConcern: {w: 'majority'} }, setupFunc: function() { - coll.insert({type: 'oak'}); + assert.commandWorked(coll.insert({type: 'oak'})); assert.eq(coll.count({type: 'ginkgo'}), 0); assert.eq(coll.count({type: 'oak'}), 1); }, @@ -111,7 +111,7 @@ commands.push({ commands.push({ req: {applyOps: [{op: "u", ns: coll.getFullName(), o: {_id: 1, type: "willow"}, o2: {_id: 1}}]}, setupFunc: function() { - coll.insert({_id: 1, type: 'oak'}); + assert.commandWorked(coll.insert({_id: 1, type: 'oak'})); assert.eq(coll.count({type: 'willow'}), 0); }, confirmFunc: function() { @@ -141,15 +141,24 @@ commands.push({ }); }, reduce: function(key, values) { - return {count: values.length}; + // We may be re-reducing values that have already been partially reduced. In that case, + // we expect to see an object like {count: } in the array of input values. + const numValues = values.reduce(function(acc, currentValue) { + if (typeof currentValue === "object") { + return acc + currentValue.count; + } else { + return acc + 1; + } + }, 0); + return {count: numValues}; }, out: "foo" }, setupFunc: function() { - coll.insert({x: 1, tags: ["a", "b"]}); - coll.insert({x: 2, tags: ["b", "c"]}); - coll.insert({x: 3, tags: ["c", "a"]}); - coll.insert({x: 4, tags: ["b", "c"]}); + assert.commandWorked(coll.insert({x: 1, tags: ["a", "b"]})); + assert.commandWorked(coll.insert({x: 2, tags: ["b", "c"]})); + assert.commandWorked(coll.insert({x: 3, tags: ["c", "a"]})); + assert.commandWorked(coll.insert({x: 4, tags: ["b", "c"]})); }, confirmFunc: function() { assert.eq(db.foo.findOne({_id: 'a'}).value.count, 2); diff --git a/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js b/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js index 3502865b351fa..a841d83eb3a72 100644 --- a/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js +++ b/jstests/replsets/commit_transaction_initial_sync_data_already_applied.js @@ -103,4 +103,4 @@ let res = secondary.getDB(dbName).getCollection(collName).find(); assert.eq(res.toArray(), [{_id: 1, a: 0}, {_id: 2}, {_id: 3, a: 1}], res); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/config_txns_reaping_interrupt.js b/jstests/replsets/config_txns_reaping_interrupt.js new file mode 100644 index 0000000000000..c4da081caf586 --- /dev/null +++ b/jstests/replsets/config_txns_reaping_interrupt.js @@ -0,0 +1,327 @@ +/* + * Tests that deleting a config.transactions document interrupts all transaction sessions + * it is associated with. + * + * @tags: [uses_transactions] + */ +(function() { +"use strict"; + +// This test implicitly write the confif.transactions collection, which is not allowed under a +// session. +TestData.disableImplicitSessions = true; + +load("jstests/libs/fail_point_util.js"); +load("jstests/libs/parallelTester.js"); +load("jstests/libs/uuid_util.js"); +load("jstests/sharding/libs/sharded_transactions_helpers.js"); + +const rst = new ReplSetTest({nodes: 1}); +rst.startSet(); +rst.initiate(); + +const primary = rst.getPrimary(); + +const dbName = "testDb"; +const collName = "testColl"; +const ns = dbName + "." + collName; +const sessionColl = primary.getCollection("config.transactions"); + +function runInsert(host, + lsidUUIDString, + lsidTxnNumber, + lsidTxnUUIDString, + txnNumber, + dbName, + collName, + isRetryableWrite) { + const conn = new Mongo(host); + const lsid = {id: UUID(lsidUUIDString)}; + if (lsidTxnNumber) { + lsid.txnNumber = NumberLong(lsidTxnNumber); + } + if (lsidTxnUUIDString) { + lsid.txnUUID = UUID(lsidTxnUUIDString); + } + const cmdObj = { + insert: collName, + documents: [{x: 2}], + lsid, + txnNumber: NumberLong(txnNumber), + + }; + if (isRetryableWrite || lsid.txnNumber) { + cmdObj.stmtId = NumberInt(2); + } + if (!isRetryableWrite) { + cmdObj.autocommit = false; + } + return conn.getDB(dbName).runCommand(cmdObj); +} + +function runTest({committedTxnOpts, inProgressTxnOpts, expectInterrupt}) { + jsTest.log("Testing " + tojson({committedTxnOpts, inProgressTxnOpts, expectInterrupt})); + // Start and commit a transaction. + const cmdObj0 = { + insert: collName, + documents: [{x: 0}], + lsid: committedTxnOpts.lsid, + txnNumber: NumberLong(committedTxnOpts.txnNumber), + startTransaction: true, + autocommit: false, + }; + if (committedTxnOpts.lsid.txnNumber) { + cmdObj0.stmtId = NumberInt(0); + } + assert.commandWorked(primary.getDB(dbName).runCommand(cmdObj0)); + assert.commandWorked(primary.adminCommand({ + commitTransaction: 1, + lsid: committedTxnOpts.lsid, + txnNumber: NumberLong(committedTxnOpts.txnNumber), + autocommit: false + })); + + // Start another transaction. Pause it after it has checked out the session. + const cmdObj1 = { + insert: collName, + documents: [{x: 1}], + lsid: inProgressTxnOpts.lsid, + txnNumber: NumberLong(inProgressTxnOpts.txnNumber), + }; + if (inProgressTxnOpts.lsid.txnNumber || inProgressTxnOpts.isRetryableWrite) { + cmdObj1.stmtId = NumberInt(1); + } + if (!inProgressTxnOpts.isRetryableWrite) { + cmdObj1.startTransaction = true; + cmdObj1.autocommit = false; + } + assert.commandWorked(primary.getDB(dbName).runCommand(cmdObj1)); + const inProgressTxnThread = new Thread( + runInsert, + primary.host, + extractUUIDFromObject(inProgressTxnOpts.lsid.id), + inProgressTxnOpts.lsid.txnNumber ? inProgressTxnOpts.lsid.txnNumber.toNumber() : null, + inProgressTxnOpts.lsid.txnUUID ? extractUUIDFromObject(inProgressTxnOpts.lsid.txnUUID) + : null, + inProgressTxnOpts.txnNumber, + dbName, + collName, + inProgressTxnOpts.isRetryableWrite); + let fp = configureFailPoint(primary, "hangDuringBatchInsert", {shouldCheckForInterrupt: true}); + inProgressTxnThread.start(); + + fp.wait(); + // Delete the config.transactions document for the committed transaction. + assert.commandWorked(sessionColl.remove(makeLsidFilter(committedTxnOpts.lsid, "_id"))); + + fp.off(); + const insertRes = inProgressTxnThread.returnData(); + if (expectInterrupt) { + assert.commandFailedWithCode(insertRes, ErrorCodes.Interrupted); + } else { + assert.commandWorked(insertRes); + if (!inProgressTxnOpts.isRetryableWrite) { + assert.commandWorked(primary.adminCommand({ + commitTransaction: 1, + lsid: inProgressTxnOpts.lsid, + txnNumber: NumberLong(inProgressTxnOpts.txnNumber), + autocommit: false + })); + } + } +} + +jsTest.log("Test deleting config.transactions document for an external/client session"); + +{ + const parentLsid = {id: UUID()}; + const parentTxnNumber = 1234; + runTest({ + committedTxnOpts: {lsid: parentLsid, txnNumber: parentTxnNumber}, + inProgressTxnOpts: { + lsid: { + id: parentLsid.id, + txnUUID: UUID(), + }, + txnNumber: 1, + }, + expectInterrupt: true + }); +} + +{ + const parentLsid = {id: UUID()}; + const parentTxnNumber = 1234; + runTest({ + committedTxnOpts: {lsid: parentLsid, txnNumber: parentTxnNumber - 1}, + inProgressTxnOpts: { + lsid: { + id: parentLsid.id, + txnNumber: NumberLong(parentTxnNumber), + txnUUID: UUID(), + }, + txnNumber: 1, + }, + expectInterrupt: true + }); +} + +jsTest.log("Test deleting config.transactions document for an internal session for a " + + "non-retryable write"); + +{ + const parentLsid = {id: UUID()}; + const parentTxnNumber = 1234; + runTest({ + committedTxnOpts: { + lsid: { + id: parentLsid.id, + txnUUID: UUID(), + }, + txnNumber: 1, + }, + inProgressTxnOpts: { + lsid: { + id: parentLsid.id, + }, + txnNumber: parentTxnNumber, + }, + expectInterrupt: true + }); +} + +{ + const parentLsid = {id: UUID()}; + runTest({ + committedTxnOpts: { + lsid: { + id: parentLsid.id, + txnUUID: UUID(), + }, + txnNumber: 1, + }, + inProgressTxnOpts: { + lsid: { + id: parentLsid.id, + txnUUID: UUID(), + }, + txnNumber: 1, + }, + expectInterrupt: true + }); +} + +jsTest.log("Test deleting config.transactions document for an internal session for the current " + + "retryable write"); + +{ + const parentLsid = {id: UUID()}; + const parentTxnNumber = 1234; + runTest({ + committedTxnOpts: { + lsid: { + id: parentLsid.id, + txnNumber: NumberLong(parentTxnNumber), + txnUUID: UUID(), + }, + txnNumber: 1, + }, + inProgressTxnOpts: {lsid: parentLsid, txnNumber: parentTxnNumber, isRetryableWrite: true}, + expectInterrupt: true + }); +} + +{ + const parentLsid = {id: UUID()}; + const parentTxnNumber = 1234; + runTest({ + committedTxnOpts: { + lsid: { + id: parentLsid.id, + txnNumber: NumberLong(parentTxnNumber), + txnUUID: UUID(), + }, + txnNumber: 1, + }, + inProgressTxnOpts: { + lsid: { + id: parentLsid.id, + txnNumber: NumberLong(parentTxnNumber), + txnUUID: UUID(), + }, + txnNumber: 1, + }, + expectInterrupt: true + }); +} + +jsTest.log("Test deleting config.transactions document for an internal transaction for the " + + "previous retryable write (i.e. no interrupt is expected)"); + +{ + const parentLsid = {id: UUID()}; + const parentTxnNumber = 1234; + runTest({ + committedTxnOpts: { + lsid: { + id: parentLsid.id, + txnNumber: NumberLong(parentTxnNumber - 1), + txnUUID: UUID(), + }, + txnNumber: 1, + }, + inProgressTxnOpts: { + lsid: parentLsid, + txnNumber: parentTxnNumber, + }, + expectInterrupt: false + }); +} + +{ + const parentLsid = {id: UUID()}; + const parentTxnNumber = 1234; + runTest({ + committedTxnOpts: { + lsid: { + id: parentLsid.id, + txnNumber: NumberLong(parentTxnNumber - 1), + txnUUID: UUID(), + }, + txnNumber: 1, + }, + inProgressTxnOpts: { + lsid: parentLsid, + txnNumber: parentTxnNumber, + isRetryableWrite: true, + }, + expectInterrupt: false + }); +} + +{ + const parentLsid = {id: UUID()}; + const parentTxnNumber = 1234; + runTest({ + committedTxnOpts: { + lsid: { + id: parentLsid.id, + txnNumber: NumberLong(parentTxnNumber - 1), + txnUUID: UUID(), + }, + txnNumber: 1, + }, + inProgressTxnOpts: { + lsid: { + id: parentLsid.id, + txnNumber: NumberLong(parentTxnNumber), + txnUUID: UUID(), + }, + txnNumber: 1, + }, + expectInterrupt: false + }); +} + +rst.stopSet(); +})(); diff --git a/jstests/replsets/crud_ops_do_not_throw_locktimeout_on_ticket_exhaustion.js b/jstests/replsets/crud_ops_do_not_throw_locktimeout_on_ticket_exhaustion.js index ea696d2b2b936..1e6dceeb71ee3 100644 --- a/jstests/replsets/crud_ops_do_not_throw_locktimeout_on_ticket_exhaustion.js +++ b/jstests/replsets/crud_ops_do_not_throw_locktimeout_on_ticket_exhaustion.js @@ -24,7 +24,7 @@ const rst = new ReplSetTest({ nodeOptions: { setParameter: { // This test requires a fixed ticket pool size. - storageEngineConcurrencyAdjustmentAlgorithm: "", + storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions", wiredTigerConcurrentWriteTransactions: kNumWriteTickets, wiredTigerConcurrentReadTransactions: kNumReadTickets, logComponentVerbosity: tojson({storage: 1, command: 2}) diff --git a/jstests/replsets/db_reads_while_recovering_all_commands.js b/jstests/replsets/db_reads_while_recovering_all_commands.js index b055ad8c83493..098778c1d9a15 100644 --- a/jstests/replsets/db_reads_while_recovering_all_commands.js +++ b/jstests/replsets/db_reads_while_recovering_all_commands.js @@ -34,8 +34,8 @@ const allCommands = { _configsvrBalancerStart: {skip: isPrimaryOnly}, _configsvrBalancerStatus: {skip: isPrimaryOnly}, _configsvrBalancerStop: {skip: isPrimaryOnly}, - _configsvrCheckClusterMetadataConsistency: {skip: isPrimaryOnly}, - _configsvrCheckMetadataConsistency: {skip: isPrimaryOnly}, + _configsvrCheckClusterMetadataConsistency: {skip: isAnInternalCommand}, + _configsvrCheckMetadataConsistency: {skip: isAnInternalCommand}, _configsvrCleanupReshardCollection: {skip: isPrimaryOnly}, _configsvrCollMod: {skip: isAnInternalCommand}, _configsvrClearJumboFlag: {skip: isPrimaryOnly}, @@ -58,15 +58,16 @@ const allCommands = { _configsvrRemoveShardFromZone: {skip: isPrimaryOnly}, _configsvrRemoveTags: {skip: isPrimaryOnly}, _configsvrRepairShardedCollectionChunksHistory: {skip: isPrimaryOnly}, - _configsvrRenameCollectionMetadata: {skip: isPrimaryOnly}, + _configsvrResetPlacementHistory: {skip: isPrimaryOnly}, _configsvrReshardCollection: {skip: isPrimaryOnly}, _configsvrRunRestore: {skip: isPrimaryOnly}, _configsvrSetAllowMigrations: {skip: isPrimaryOnly}, _configsvrSetClusterParameter: {skip: isPrimaryOnly}, _configsvrSetUserWriteBlockMode: {skip: isPrimaryOnly}, - _configsvrTransitionToCatalogShard: {skip: isPrimaryOnly}, + _configsvrTransitionFromDedicatedConfigServer: {skip: isPrimaryOnly}, _configsvrTransitionToDedicatedConfigServer: {skip: isPrimaryOnly}, _configsvrUpdateZoneKeyRange: {skip: isPrimaryOnly}, + _dropConnectionsToMongot: {skip: isAnInternalCommand}, _flushDatabaseCacheUpdates: {skip: isPrimaryOnly}, _flushDatabaseCacheUpdatesWithWriteConcern: {skip: isPrimaryOnly}, _flushReshardingStateChange: {skip: isPrimaryOnly}, @@ -80,6 +81,7 @@ const allCommands = { _killOperations: {skip: isNotAUserDataRead}, _mergeAuthzCollections: {skip: isPrimaryOnly}, _migrateClone: {skip: isPrimaryOnly}, + _mongotConnPoolStats: {skip: isAnInternalCommand}, _movePrimaryRecipientAbortMigration: {skip: isAnInternalCommand}, _movePrimaryRecipientForgetMigration: {skip: isAnInternalCommand}, _movePrimaryRecipientSyncData: {skip: isAnInternalCommand}, @@ -90,6 +92,7 @@ const allCommands = { _recvChunkStatus: {skip: isPrimaryOnly}, _refreshQueryAnalyzerConfiguration: {skip: isPrimaryOnly}, _shardsvrAbortReshardCollection: {skip: isPrimaryOnly}, + _shardsvrCleanupStructuredEncryptionData: {skip: isPrimaryOnly}, _shardsvrCleanupReshardCollection: {skip: isPrimaryOnly}, _shardsvrCloneCatalogData: {skip: isPrimaryOnly}, _shardsvrCompactStructuredEncryptionData: {skip: isPrimaryOnly}, @@ -100,8 +103,6 @@ const allCommands = { _shardsvrDropGlobalIndex: {skip: isAnInternalCommand}, _shardsvrDropCollection: {skip: isPrimaryOnly}, _shardsvrCreateCollection: {skip: isPrimaryOnly}, - // TODO SERVER-74324: deprecate _shardsvrDropCollectionIfUUIDNotMatching after 7.0 is lastLTS. - _shardsvrDropCollectionIfUUIDNotMatching: {skip: isNotAUserDataRead}, _shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern: {skip: isNotAUserDataRead}, _shardsvrDropCollectionParticipant: {skip: isPrimaryOnly}, _shardsvrDropIndexCatalogEntryParticipant: {skip: isPrimaryOnly}, @@ -137,7 +138,14 @@ const allCommands = { _shardsvrParticipantBlock: {skip: isAnInternalCommand}, _shardsvrCheckMetadataConsistency: {skip: isAnInternalCommand}, _shardsvrCheckMetadataConsistencyParticipant: {skip: isAnInternalCommand}, - _startStreamProcessor: {skip: isAnInternalCommand}, + streams_startStreamProcessor: {skip: isAnInternalCommand}, + streams_startStreamSample: {skip: isAnInternalCommand}, + streams_stopStreamProcessor: {skip: isAnInternalCommand}, + streams_listStreamProcessors: {skip: isAnInternalCommand}, + streams_getMoreStreamSample: {skip: isAnInternalCommand}, + streams_getStats: {skip: isAnInternalCommand}, + streams_testOnlyInsert: {skip: isAnInternalCommand}, + streams_getMetrics: {skip: isAnInternalCommand}, _transferMods: {skip: isPrimaryOnly}, _vectorClockPersist: {skip: isPrimaryOnly}, abortReshardCollection: {skip: isPrimaryOnly}, @@ -165,10 +173,12 @@ const allCommands = { checkShardingIndex: {skip: isPrimaryOnly}, cleanupOrphaned: {skip: isPrimaryOnly}, cleanupReshardCollection: {skip: isPrimaryOnly}, + cleanupStructuredEncryptionData: {skip: isPrimaryOnly}, clearLog: {skip: isNotAUserDataRead}, cloneCollectionAsCapped: {skip: isPrimaryOnly}, clusterAbortTransaction: {skip: "already tested by 'abortTransaction' tests on mongos"}, clusterAggregate: {skip: "already tested by 'aggregate' tests on mongos"}, + clusterBulkWrite: {skip: "already tested by 'bulkWrite' tests on mongos"}, clusterCommitTransaction: {skip: "already tested by 'commitTransaction' tests on mongos"}, clusterCount: {skip: "already tested by 'count' tests on mongos"}, clusterDelete: {skip: "already tested by 'delete' tests on mongos"}, @@ -378,6 +388,7 @@ const allCommands = { replSetTestEgress: {skip: isNotAUserDataRead}, replSetUpdatePosition: {skip: isNotAUserDataRead}, replSetResizeOplog: {skip: isNotAUserDataRead}, + resetPlacementHistory: {skip: isPrimaryOnly}, revokePrivilegesFromRole: {skip: isPrimaryOnly}, revokeRolesFromRole: {skip: isPrimaryOnly}, revokeRolesFromUser: {skip: isPrimaryOnly}, diff --git a/jstests/replsets/dbcheck_validation_mode_parameters.js b/jstests/replsets/dbcheck_validation_mode_parameters.js new file mode 100644 index 0000000000000..c76ab0872ba3b --- /dev/null +++ b/jstests/replsets/dbcheck_validation_mode_parameters.js @@ -0,0 +1,105 @@ +/** + * Test the validity of parameters in the dbCheck command. + * + * @tags: [ + * requires_fcv_71 + * ] + */ + +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + +const dbName = "dbCheckValidationModeParameters"; +const colName = "dbCheckValidationModeParameters-collection"; + +const replSet = new ReplSetTest({ + name: jsTestName(), + nodes: 2, +}); +replSet.startSet(); +replSet.initiateWithHighElectionTimeout(); +const primary = replSet.getPrimary(); +const db = primary.getDB(dbName); +const col = db[colName]; +const nDocs = 1000; + +assert.commandWorked(col.insertMany([...Array(nDocs).keys()].map(x => ({a: x})), {ordered: false})); +replSet.awaitReplication(); + +function testFeatureFlagDisabled() { + jsTestLog("Testing dbCheck with feature flag disabled."); + // validateMode field is not allowed if feature flag is disabled. + assert.commandFailedWithCode(db.runCommand({ + dbCheck: colName, + validateMode: "dataConsistency", + }), + ErrorCodes.InvalidOptions); + assert.commandWorked(db.runCommand({ + dbCheck: colName, + })); +} + +function testInvalidParameter() { + jsTestLog("Testing dbCheck with invalid parameters."); + // Unsupported enum passed in to validateMode field. + assert.commandFailedWithCode(db.runCommand({ + dbCheck: colName, + validateMode: "invalidParam", + }), + ErrorCodes.BadValue); + + // secondaryIndex field must be specified when validateMode is extraIndexKeysCheck. + assert.commandFailedWithCode(db.runCommand({ + dbCheck: colName, + validateMode: "extraIndexKeysCheck", + }), + ErrorCodes.InvalidOptions); + + // secondaryIndex field cannot be specified when validateMode is dataConsistency or + // dataConsistencyAndMissingIndexKeysCheck. + assert.commandFailedWithCode(db.runCommand({ + dbCheck: colName, + validateMode: "dataConsistency", + secondaryIndex: "secondaryIndex", + }), + ErrorCodes.InvalidOptions); + assert.commandFailedWithCode(db.runCommand({ + dbCheck: colName, + validateMode: "dataConsistencyAndMissingIndexKeysCheck", + secondaryIndex: "secondaryIndex", + }), + ErrorCodes.InvalidOptions); +} + +function testValidParameter() { + jsTestLog("Testing dbCheck with valid parameters."); + // dataConsistency is a supported enum for the validateMode field. + assert.commandWorked(db.runCommand({ + dbCheck: colName, + validateMode: "dataConsistency", + })); + + // dataConsistencyAndMissingIndexKeysCheck is a supported enum for the validateMode. + // field + assert.commandWorked(db.runCommand({ + dbCheck: colName, + validateMode: "dataConsistencyAndMissingIndexKeysCheck", + })); + + // extraIndexKeysCheck is a supported enum for the validateMode field. + assert.commandWorked(db.runCommand({ + dbCheck: colName, + validateMode: "extraIndexKeysCheck", + secondaryIndex: "secondaryIndex", + })); +} + +const secondaryIndexChecks = + FeatureFlagUtil.isPresentAndEnabled(primary, "SecondaryIndexChecksInDbCheck"); +if (secondaryIndexChecks) { + testInvalidParameter(); + testValidParameter(); +} else { + testFeatureFlagDisabled(); +} + +replSet.stopSet(); \ No newline at end of file diff --git a/jstests/replsets/dbhash_lock_acquisition.js b/jstests/replsets/dbhash_lock_acquisition.js index 1e7f3eafe349f..61a49b757d163 100644 --- a/jstests/replsets/dbhash_lock_acquisition.js +++ b/jstests/replsets/dbhash_lock_acquisition.js @@ -3,16 +3,9 @@ * resources when reading a timestamp using the $_internalReadAtClusterTime option. * * @tags: [ - * # Incompatible with all feature flags running on last continuous as dbHash runs lock-free in - * # v7.0. - * requires_fcv_70, * uses_transactions, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); load("jstests/libs/parallelTester.js"); // for Thread const rst = new ReplSetTest({nodes: 1}); @@ -22,13 +15,6 @@ rst.initiate(); const primary = rst.getPrimary(); const db = primary.getDB("test"); -if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) { - jsTestLog("Skipping test as dbHash is run lock-free with " + - "the point-in-time catalog lookups feature flag enabled"); - rst.stopSet(); - return; -} - const session = primary.startSession({causalConsistency: false}); const sessionDB = session.getDatabase(db.getName()); @@ -114,5 +100,4 @@ assert.commandWorked(threadCaptruncCmd.returnData()); assert.commandWorked(threadDBHash.returnData()); session.endSession(); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/replsets/dbhash_read_at_cluster_time.js b/jstests/replsets/dbhash_read_at_cluster_time.js index 69635bb4b4062..7f2c21c6f125f 100644 --- a/jstests/replsets/dbhash_read_at_cluster_time.js +++ b/jstests/replsets/dbhash_read_at_cluster_time.js @@ -3,18 +3,10 @@ * read concern are supported by the "dbHash" command. * * @tags: [ - * # Incompatible with all feature flags running on last continuous as dbHash can have different - * # behaviour in v7.0 when using point-in-time catalog lookups. - * requires_fcv_70, * requires_majority_read_concern, * uses_transactions, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); - const rst = new ReplSetTest({nodes: 2}); rst.startSet(); @@ -177,21 +169,12 @@ assert.eq(atClusterTimeHashBefore, const otherDB = otherSession.getDatabase("test"); // We create another collection inside a separate session to modify the collection catalog - // at an opTime later than 'clusterTime'. This prevents further usage of the snapshot - // associated with 'clusterTime' for snapshot reads if the point-in-time catalog lookups feature - // flag is disabled. + // at an opTime later than 'clusterTime'. assert.commandWorked(otherDB.runCommand({create: "mycoll2"})); - if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) { - assert.commandWorked(db.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime})); - } else { - assert.commandFailedWithCode( - db.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime}), - ErrorCodes.SnapshotUnavailable); - } + assert.commandWorked(db.runCommand({dbHash: 1, $_internalReadAtClusterTime: clusterTime})); otherSession.endSession(); } session.endSession(); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/replsets/disabling_chaining_changes_sync_source.js b/jstests/replsets/disabling_chaining_changes_sync_source.js index bca0f92f1cfec..b27e9eddef7ce 100644 --- a/jstests/replsets/disabling_chaining_changes_sync_source.js +++ b/jstests/replsets/disabling_chaining_changes_sync_source.js @@ -40,4 +40,4 @@ assert.commandWorked(primary.getDB("test").foo.insert({x: 1})); replSet.awaitSyncSource(secondary, primary); replSet.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/dont_set_invalid_rwconcern.js b/jstests/replsets/dont_set_invalid_rwconcern.js index 9565ca70a60d1..913b17f3123b1 100644 --- a/jstests/replsets/dont_set_invalid_rwconcern.js +++ b/jstests/replsets/dont_set_invalid_rwconcern.js @@ -56,4 +56,4 @@ assert.commandFailedWithCode(coll.insert({a: 1}, {writeConcern: {w: "bajority"}} assert.commandWorked(coll.insert({a: 1}, {writeConcern: {w: "multiRegion"}})); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/drop_collections_two_phase_dbhash.js b/jstests/replsets/drop_collections_two_phase_dbhash.js index 058a6f09aeb33..a2fa498c7c714 100644 --- a/jstests/replsets/drop_collections_two_phase_dbhash.js +++ b/jstests/replsets/drop_collections_two_phase_dbhash.js @@ -49,4 +49,4 @@ let failMsg = "dbHash during drop pending phase did not match dbHash after drop assert.eq(dropPendingDbHash, dropCommittedDbHash, failMsg); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/election_candidate_and_participant_metrics.js b/jstests/replsets/election_candidate_and_participant_metrics.js index 20c75437c610e..4d4e71efb0629 100644 --- a/jstests/replsets/election_candidate_and_participant_metrics.js +++ b/jstests/replsets/election_candidate_and_participant_metrics.js @@ -244,4 +244,4 @@ assert.eq( assert.eq(originalPrimaryElectionParticipantMetrics.priorityAtElection, 1); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/election_handoff_basic.js b/jstests/replsets/election_handoff_basic.js index 2a4376689d896..883c7738cc4af 100644 --- a/jstests/replsets/election_handoff_basic.js +++ b/jstests/replsets/election_handoff_basic.js @@ -21,4 +21,4 @@ rst.initiateWithHighElectionTimeout(); ElectionHandoffTest.testElectionHandoff(rst, 0, 1); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/election_handoff_flip.js b/jstests/replsets/election_handoff_flip.js index 8ce2b804ca8bc..17b21161e26ac 100644 --- a/jstests/replsets/election_handoff_flip.js +++ b/jstests/replsets/election_handoff_flip.js @@ -22,4 +22,4 @@ sleep(ElectionHandoffTest.stepDownPeriodSecs * 1000); ElectionHandoffTest.testElectionHandoff(rst, 1, 0); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/election_handoff_higher_priority.js b/jstests/replsets/election_handoff_higher_priority.js index 12ac4914a40c2..a3fabee15d318 100644 --- a/jstests/replsets/election_handoff_higher_priority.js +++ b/jstests/replsets/election_handoff_higher_priority.js @@ -30,4 +30,4 @@ rst.initiate(config); ElectionHandoffTest.testElectionHandoff(rst, 0, 2); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/election_handoff_one_unelectable.js b/jstests/replsets/election_handoff_one_unelectable.js index 970b605197c46..79e4571ac213f 100644 --- a/jstests/replsets/election_handoff_one_unelectable.js +++ b/jstests/replsets/election_handoff_one_unelectable.js @@ -28,4 +28,4 @@ rst.initiate(config); ElectionHandoffTest.testElectionHandoff(rst, 0, 2); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/election_participant_new_term_metrics.js b/jstests/replsets/election_participant_new_term_metrics.js index aa76bdf4546e0..802df586a210c 100644 --- a/jstests/replsets/election_participant_new_term_metrics.js +++ b/jstests/replsets/election_participant_new_term_metrics.js @@ -106,4 +106,4 @@ assert(!testNodeElectionParticipantMetrics.newTermAppliedDate, tojson(testNodeElectionParticipantMetrics)); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/force_reconfig_skips_config_replication.js b/jstests/replsets/force_reconfig_skips_config_replication.js index fe3c10308f097..65d2f6871f918 100644 --- a/jstests/replsets/force_reconfig_skips_config_replication.js +++ b/jstests/replsets/force_reconfig_skips_config_replication.js @@ -48,4 +48,4 @@ secondary.reconnect(primary); replTest.awaitNodesAgreeOnConfigVersion(); replTest.stopSet(); -}()); \ No newline at end of file +}()); diff --git a/jstests/replsets/groupAndMapReduce.js b/jstests/replsets/groupAndMapReduce.js index 22935531bcf27..88b0b5ffc3b60 100644 --- a/jstests/replsets/groupAndMapReduce.js +++ b/jstests/replsets/groupAndMapReduce.js @@ -1,6 +1,6 @@ load("jstests/replsets/rslib.js"); -doTest = function(signal) { +let doTest = function(signal) { // Test basic replica set functionality. // -- Replication // -- Failover @@ -32,7 +32,7 @@ doTest = function(signal) { // and secondaries in the set and wait until the change has replicated. replTest.awaitReplication(); - secondaries = replTest.getSecondaries(); + let secondaries = replTest.getSecondaries(); assert(secondaries.length == 2, "Expected 2 secondaries but length was " + secondaries.length); secondaries.forEach(function(secondary) { // try to read from secondary @@ -47,10 +47,10 @@ doTest = function(signal) { print("Calling inline mr() with secondaryOk=true, must succeed"); secondary.setSecondaryOk(); - map = function() { + let map = function() { emit(this.a, 1); }; - reduce = function(key, vals) { + let reduce = function(key, vals) { var sum = 0; for (var i = 0; i < vals.length; ++i) { sum += vals[i]; diff --git a/jstests/replsets/heartbeat_reconfig_propagates_default_write_concern.js b/jstests/replsets/heartbeat_reconfig_propagates_default_write_concern.js index 309fb7c134487..47ba466472bd6 100644 --- a/jstests/replsets/heartbeat_reconfig_propagates_default_write_concern.js +++ b/jstests/replsets/heartbeat_reconfig_propagates_default_write_concern.js @@ -56,4 +56,4 @@ function runTest(hasArbiter) { runTest(false /* hasArbiter */); runTest(true /* hasArbiter */); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/initial_sync_capped_index.js b/jstests/replsets/initial_sync_capped_index.js index 0400621b6b21f..ac6bcab2a37b4 100644 --- a/jstests/replsets/initial_sync_capped_index.js +++ b/jstests/replsets/initial_sync_capped_index.js @@ -22,6 +22,10 @@ * 8. Once initial sync completes, ensure that capped collection indexes on the SECONDARY are valid. * * This is a regression test for SERVER-29197. + * + * @tags: [ + * uses_full_validation, + * ] */ (function() { "use strict"; diff --git a/jstests/replsets/initial_sync_chooses_correct_sync_source.js b/jstests/replsets/initial_sync_chooses_correct_sync_source.js index bc44457e84e63..5fd6327e57554 100644 --- a/jstests/replsets/initial_sync_chooses_correct_sync_source.js +++ b/jstests/replsets/initial_sync_chooses_correct_sync_source.js @@ -48,6 +48,11 @@ const restartAndWaitForHeartbeats = (rst, initialSyncNode, setParameterOpts = {} setParameter: setParameterOpts, }); + // Wait for the restarted node to hit initial sync, then wait for heartbeats. This is to + // prevent a potential race where we wait for heartbeats in startup recovery, which satisfies + // the JS test, but then restart heartbeats and treat the other nodes as DOWN when entering + // initial sync. + rst.waitForState(initialSyncNode, ReplSetTest.State.STARTUP_2); waitForHeartbeats(initialSyncNode); }; diff --git a/jstests/replsets/initial_sync_clone_multikey.js b/jstests/replsets/initial_sync_clone_multikey.js index 8c4416beeaae4..312a4f0151cd4 100644 --- a/jstests/replsets/initial_sync_clone_multikey.js +++ b/jstests/replsets/initial_sync_clone_multikey.js @@ -16,7 +16,7 @@ const primaryDB = primary.getDB(dbName); jsTestLog("Creating the collection and an index."); assert.commandWorked(primaryDB.createCollection(collName)); -assert.commandWorked(primaryDB[collName].createIndex({"x": 1}, {background: true})); +assert.commandWorked(primaryDB[collName].createIndex({"x": 1})); // Make the index multikey. primaryDB[collName].insert({x: [1, 2]}); diff --git a/jstests/replsets/initial_sync_nodes_maintain_and_gossip_commit_point.js b/jstests/replsets/initial_sync_nodes_maintain_and_gossip_commit_point.js index 9b79b67261137..c5f485d5f5fa1 100644 --- a/jstests/replsets/initial_sync_nodes_maintain_and_gossip_commit_point.js +++ b/jstests/replsets/initial_sync_nodes_maintain_and_gossip_commit_point.js @@ -118,15 +118,21 @@ assert.eq(1, rs.compareOpTimes(thirdCommitPointSecondary, secondCommitPointSecon hangBeforeCompletingOplogFetching.off(); hangBeforeFinish.wait(); -// Verify that the initial sync node receives the commit point from the primary via oplog fetching. +// Verify that the initial sync node receives the commit point from the primary, either via oplog +// fetching or by a heartbeat. This will usually happen via oplog fetching but in some cases it is +// possible that the OplogFetcher shuts down before this ever happens. See SERVER-76695 for details. // We only assert that it is greater than or equal to the second commit point because it is possible // for the commit point to not yet be advanced by the primary when we fetch the oplog entry. -const commitPointInitialSyncNode = getLastCommittedOpTime(initialSyncNode); -assert.gte( - rs.compareOpTimes(commitPointInitialSyncNode, secondCommitPointPrimary), - 0, - `commit point on initial sync node should be at least as up-to-date as the second commit point: ${ - tojson(commitPointInitialSyncNode)}`); +assert.soon(() => { + const commitPointInitialSyncNode = getLastCommittedOpTime(initialSyncNode); + // compareOpTimes will throw an error if given an invalid opTime, and if the + // node has not yet advanced its opTime it will still have the default one, + // which is invalid. + if (!globalThis.rs.isValidOpTime(commitPointInitialSyncNode)) { + return false; + } + return rs.compareOpTimes(commitPointInitialSyncNode, secondCommitPointPrimary) >= 0; +}, `commit point on initial sync node should be at least as up-to-date as the second commit point`); // Verify that the non-voting secondary has received the updated commit point via heartbeats from // the initial sync node. diff --git a/jstests/replsets/initial_sync_read_concern_no_oplog.js b/jstests/replsets/initial_sync_read_concern_no_oplog.js index 29e908f5b804a..85bc46c810a5f 100644 --- a/jstests/replsets/initial_sync_read_concern_no_oplog.js +++ b/jstests/replsets/initial_sync_read_concern_no_oplog.js @@ -28,4 +28,4 @@ replSet.awaitReplication(); replSet.awaitSecondaryNodes(); replSet.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/initial_sync_replicates_prepare_received_during_another_initial_sync.js b/jstests/replsets/initial_sync_replicates_prepare_received_during_another_initial_sync.js index 5e31305b07391..7a4136dc2620c 100644 --- a/jstests/replsets/initial_sync_replicates_prepare_received_during_another_initial_sync.js +++ b/jstests/replsets/initial_sync_replicates_prepare_received_during_another_initial_sync.js @@ -114,4 +114,4 @@ jsTestLog("secondary2 successfully replicated prepared transaction after initial assert.commandWorked(PrepareHelpers.commitTransaction(session, prepareTimestamp)); replSet.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/initial_sync_with_write_load.js b/jstests/replsets/initial_sync_with_write_load.js index c696b347b0b86..7e885dc79025f 100644 --- a/jstests/replsets/initial_sync_with_write_load.js +++ b/jstests/replsets/initial_sync_with_write_load.js @@ -47,7 +47,7 @@ var work = function() { assert.commandWorked(db.timeToStartTrigger.insert({_id: 1})); while (true) { - for (x = 0; x < 100; x++) { + for (let x = 0; x < 100; x++) { db["a" + x].insert({a: x}); } diff --git a/jstests/replsets/initial_sync_write_conflict.js b/jstests/replsets/initial_sync_write_conflict.js index 1571c318d45f8..1aecfaf28b097 100644 --- a/jstests/replsets/initial_sync_write_conflict.js +++ b/jstests/replsets/initial_sync_write_conflict.js @@ -31,4 +31,4 @@ replSet.awaitSecondaryNodes(); // If the index table contains any entries pointing to invalid document(RecordID), then // validateCollections called during replica stopSet will capture the index corruption and throw // error. -replSet.stopSet(); \ No newline at end of file +replSet.stopSet(); diff --git a/jstests/replsets/internal_sessions_reaping_basic.js b/jstests/replsets/internal_sessions_reaping_basic.js index adf6ec1021e9e..890b0f9bbf77b 100644 --- a/jstests/replsets/internal_sessions_reaping_basic.js +++ b/jstests/replsets/internal_sessions_reaping_basic.js @@ -3,7 +3,7 @@ * config.image_collection entries for a transaction session if the logical session that it * corresponds to has expired and been removed from the config.system.sessions collection. * - * @tags: [requires_fcv_70, uses_transactions] + * @tags: [requires_fcv_60, uses_transactions] */ (function() { @@ -24,7 +24,6 @@ const rst = new ReplSetTest({ replBatchLimitOperations: 1, // Make transaction records expire immediately. TransactionRecordMinimumLifetimeMinutes: 0, - storeFindAndModifyImagesInSideCollection: true, internalSessionsReapThreshold: 0 } } diff --git a/jstests/replsets/internal_sessions_reaping_interrupt.js b/jstests/replsets/internal_sessions_reaping_interrupt.js index 827d21c8b9ee6..bc15bba12e496 100644 --- a/jstests/replsets/internal_sessions_reaping_interrupt.js +++ b/jstests/replsets/internal_sessions_reaping_interrupt.js @@ -17,7 +17,9 @@ const rst = new ReplSetTest({ ttlMonitorEnabled: false, disableLogicalSessionCacheRefresh: false, TransactionRecordMinimumLifetimeMinutes: 0, - logicalSessionRefreshMillis + logicalSessionRefreshMillis, + // Make the eager reaping occur more frequently. + internalSessionsReapThreshold: 5, } } }); diff --git a/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js b/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js index 56f46b581550f..b4f29d454b9b8 100644 --- a/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js +++ b/jstests/replsets/kill_reads_with_prepare_conflicts_during_step_down.js @@ -109,4 +109,4 @@ assert.commandWorked(sessionDB.adminCommand({ })); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/libs/basic_replset_test.js b/jstests/replsets/libs/basic_replset_test.js new file mode 100644 index 0000000000000..38c1398755392 --- /dev/null +++ b/jstests/replsets/libs/basic_replset_test.js @@ -0,0 +1,132 @@ +load("jstests/replsets/rslib.js"); +load('jstests/replsets/libs/election_metrics.js'); + +function basicReplsetTest(signal, ssl_options1, ssl_options2, ssl_name) { + // Test basic replica set functionality. + // -- Replication + // -- Failover + + // Choose a name that is unique to the options specified. + // This is important because we are depending on a fresh replicaSetMonitor for each run; + // each differently-named replica set gets its own monitor. + // n0 and n1 get the same SSL config since there are 3 nodes but only 2 different configs + let replTest = new ReplSetTest({ + name: 'testSet' + ssl_name, + nodes: {n0: ssl_options1, n1: ssl_options1, n2: ssl_options2} + }); + + // call startSet() to start each mongod in the replica set + replTest.startSet(); + + // Call initiate() to send the replSetInitiate command + // This will wait for initiation + replTest.initiate(); + + // Call getPrimary to return a reference to the node that's been + // elected primary. + let primary = replTest.getPrimary(); + + // Check that both the 'called' and 'successful' fields of the 'electionTimeout' election reason + // counter have been incremented in serverStatus. + const primaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1})); + verifyServerStatusElectionReasonCounterValue( + primaryStatus.electionMetrics, "electionTimeout", 1); + + // Ensure the primary logs an n-op to the oplog upon transitioning to primary. + assert.gt(primary.getDB("local").oplog.rs.count({op: 'n', o: {msg: 'new primary'}}), 0); + + // Here's how you save something to primary + primary.getDB("foo").foo.save({a: 1000}); + + // This method will check the oplogs of the primary + // and secondaries in the set and wait until the change has replicated. + replTest.awaitReplication(); + + let cppconn = new Mongo(replTest.getURL()).getDB("foo"); + assert.eq(1000, cppconn.foo.findOne().a, "cppconn 1"); + + { + // check c++ finding other servers + let temp = replTest.getURL(); + temp = temp.substring(0, temp.lastIndexOf(",")); + temp = new Mongo(temp).getDB("foo"); + assert.eq(1000, temp.foo.findOne().a, "cppconn 1"); + } + + // Here's how to stop the primary node + let primaryId = replTest.getNodeId(primary); + replTest.stop(primaryId); + + // Now let's see who the new primary is: + let newPrimary = replTest.getPrimary(); + + // Is the new primary the same as the old primary? + let newPrimaryId = replTest.getNodeId(newPrimary); + + assert(primaryId != newPrimaryId, "Old primary shouldn't be equal to new primary."); + + reconnect(cppconn); + assert.eq(1000, cppconn.foo.findOne().a, "cppconn 2"); + + // Now let's write some documents to the new primary + let bulk = newPrimary.getDB("bar").bar.initializeUnorderedBulkOp(); + for (let i = 0; i < 1000; i++) { + bulk.insert({a: i}); + } + bulk.execute(); + + // Here's how to restart the old primary node: + let secondary = replTest.restart(primaryId); + + // Now, let's make sure that the old primary comes up as a secondary + assert.soon(function() { + let res = secondary.getDB("admin").runCommand({hello: 1}); + printjson(res); + return res['ok'] == 1 && res['isWritablePrimary'] == false; + }); + + // And we need to make sure that the replset comes back up + assert.soon(function() { + let res = newPrimary.getDB("admin").runCommand({replSetGetStatus: 1}); + printjson(res); + return res.myState == 1; + }); + + // And that both secondary nodes have all the updates + newPrimary = replTest.getPrimary(); + assert.eq(1000, newPrimary.getDB("bar").runCommand({count: "bar"}).n, "assumption 2"); + replTest.awaitSecondaryNodes(); + replTest.awaitReplication(); + + let secondaries = replTest.getSecondaries(); + assert(secondaries.length == 2, "Expected 2 secondaries but length was " + secondaries.length); + secondaries.forEach(function(secondary) { + secondary.setSecondaryOk(); + let count = secondary.getDB("bar").runCommand({count: "bar"}); + printjson(count); + assert.eq(1000, count.n, "secondary count wrong: " + secondary); + }); + + // last error + primary = replTest.getPrimary(); + secondaries = replTest.getSecondaries(); + + let db = primary.getDB("foo"); + let t = db.foo; + + let ts = secondaries.map(function(z) { + z.setSecondaryOk(); + return z.getDB("foo").foo; + }); + + t.save({a: 1000}); + t.createIndex({a: 1}); + replTest.awaitReplication(); + + ts.forEach(function(z) { + assert.eq(2, z.getIndexKeys().length, "A " + z.getMongo()); + }); + + // Shut down the set and finish the test. + replTest.stopSet(signal); +} diff --git a/jstests/replsets/libs/oplog_rollover_test.js b/jstests/replsets/libs/oplog_rollover_test.js index 6de111fd88389..e03f593b09fae 100644 --- a/jstests/replsets/libs/oplog_rollover_test.js +++ b/jstests/replsets/libs/oplog_rollover_test.js @@ -7,7 +7,7 @@ load("jstests/libs/fail_point_util.js"); -function oplogRolloverTest(storageEngine, initialSyncMethod) { +function oplogRolloverTest(storageEngine, initialSyncMethod, serverless = false) { jsTestLog("Testing with storageEngine: " + storageEngine); if (initialSyncMethod) { jsTestLog(" and initial sync method: " + initialSyncMethod); @@ -25,14 +25,20 @@ function oplogRolloverTest(storageEngine, initialSyncMethod) { if (initialSyncMethod) { parameters = Object.merge(parameters, {initialSyncMethod: initialSyncMethod}); } - const replSet = new ReplSetTest({ + + let replSetOptions = { // Set the syncdelay to 1s to speed up checkpointing. nodeOptions: { syncdelay: 1, setParameter: parameters, }, nodes: [{}, {rsConfig: {priority: 0, votes: 0}}] - }); + }; + + if (serverless) + replSetOptions = Object.merge(replSetOptions, {serverless: true}); + + const replSet = new ReplSetTest(replSetOptions); // Set max oplog size to 1MB. replSet.startSet({storageEngine: storageEngine, oplogSize: 1}); replSet.initiate(); diff --git a/jstests/replsets/libs/prepare_failover_due_to_reconfig.js b/jstests/replsets/libs/prepare_failover_due_to_reconfig.js index cf5b4e58fd869..c2fd990dafc00 100644 --- a/jstests/replsets/libs/prepare_failover_due_to_reconfig.js +++ b/jstests/replsets/libs/prepare_failover_due_to_reconfig.js @@ -69,4 +69,4 @@ var testPrepareFailoverDueToReconfig = function(name, reconfigOnPrimary) { assert.docEq(newDoc, doc); rst.stopSet(); -}; \ No newline at end of file +}; diff --git a/jstests/replsets/libs/rename_across_dbs.js b/jstests/replsets/libs/rename_across_dbs.js index 59b0fba4e1418..ed7b9c77ba25c 100644 --- a/jstests/replsets/libs/rename_across_dbs.js +++ b/jstests/replsets/libs/rename_across_dbs.js @@ -165,6 +165,7 @@ var RenameAcrossDatabasesTest = function(options) { _testLog('Checking oplogs and dbhashes after renaming collection.'); replTest.awaitReplication(); replTest.checkOplogs(testName); + replTest.checkPreImageCollection(testName); replTest.checkReplicatedDataHashes(testName); _testLog('Test completed. Stopping replica set.'); diff --git a/jstests/replsets/libs/rollback_resumable_index_build.js b/jstests/replsets/libs/rollback_resumable_index_build.js index 94b64e67a4da0..9925b0b6941f4 100644 --- a/jstests/replsets/libs/rollback_resumable_index_build.js +++ b/jstests/replsets/libs/rollback_resumable_index_build.js @@ -79,8 +79,10 @@ const RollbackResumableIndexBuildTest = class { rollbackTest.awaitLastOpCommitted(); - assert.commandWorked(originalPrimary.adminCommand( - {setParameter: 1, logComponentVerbosity: {index: 1, replication: {heartbeats: 0}}})); + assert.commandWorked(originalPrimary.adminCommand({ + setParameter: 1, + logComponentVerbosity: {index: 1, replication: {election: 0, heartbeats: 0}}, + })); // Set internalQueryExecYieldIterations to 0, internalIndexBuildBulkLoadYieldIterations to // 1, and maxIndexBuildDrainBatchSize to 1 so that the index builds are guaranteed to yield @@ -311,4 +313,4 @@ const RollbackResumableIndexBuildTest = class { testInfo.buildUUIDs, testInfo.indexNames); } -}; \ No newline at end of file +}; diff --git a/jstests/replsets/libs/rollback_test.js b/jstests/replsets/libs/rollback_test.js index 8a19b15b51db0..db58d73b3fb69 100644 --- a/jstests/replsets/libs/rollback_test.js +++ b/jstests/replsets/libs/rollback_test.js @@ -107,6 +107,34 @@ function RollbackTest(name = "RollbackTest", replSet, nodeOptions) { // Make sure we have a replica set up and running. replSet = (replSet === undefined) ? performStandardSetup(nodeOptions) : replSet; + + // Return an helper function to set a tenantId on commands if it is required. + let addTenantIdIfNeeded = (function() { + const adminDB = replSet.getPrimary().getDB("admin"); + const flagDoc = assert.commandWorked( + adminDB.adminCommand({getParameter: 1, featureFlagRequireTenantID: 1})); + const multitenancyDoc = + assert.commandWorked(adminDB.adminCommand({getParameter: 1, multitenancySupport: 1})); + const fcvDoc = assert.commandWorked( + adminDB.adminCommand({getParameter: 1, featureCompatibilityVersion: 1})); + if (multitenancyDoc.hasOwnProperty("multitenancySupport") && + multitenancyDoc.multitenancySupport && + flagDoc.hasOwnProperty("featureFlagRequireTenantID") && + flagDoc.featureFlagRequireTenantID.value && + MongoRunner.compareBinVersions(fcvDoc.featureCompatibilityVersion.version, + flagDoc.featureFlagRequireTenantID.version) >= 0) { + const tenantId = ObjectId(); + + return function(cmdObj) { + return Object.assign(cmdObj, {'$tenant': tenantId}); + }; + } else { + return function(cmdObj) { + return cmdObj; + }; + } + })(); + validateAndUseSetup(replSet); // Majority writes in the initial phase, before transitionToRollbackOperations(), should be @@ -181,9 +209,12 @@ function RollbackTest(name = "RollbackTest", replSet, nodeOptions) { // ensure the insert was replicated and written to the on-disk journal of all 3 // nodes, with the exception of ephemeral and in-memory storage engines where // journaling isn't supported. - assert.commandWorked(curPrimary.getDB(dbName).ensureSyncSource.insert( - {thisDocument: 'is inserted to ensure any node can sync from any other'}, - {writeConcern: {w: 3, j: config.writeConcernMajorityJournalDefault}})); + + assert.commandWorked(curPrimary.getDB(dbName).runCommand(addTenantIdIfNeeded({ + insert: "ensureSyncSource", + documents: [{thisDocument: 'is inserted to ensure any node can sync from any other'}], + writeConcern: {w: 3, j: config.writeConcernMajorityJournalDefault} + }))); } /** @@ -296,6 +327,10 @@ function RollbackTest(name = "RollbackTest", replSet, nodeOptions) { return rst.getPrimary(ReplSetTest.kDefaultTimeoutMS, kRetryIntervalMS); } + this.stepUpNode = function(conn) { + stepUp(conn); + }; + function oplogTop(conn) { return conn.getDB("local").oplog.rs.find().limit(1).sort({$natural: -1}).next(); } @@ -450,10 +485,12 @@ function RollbackTest(name = "RollbackTest", replSet, nodeOptions) { // ensure that this document is not lost due to unclean shutdowns. Ephemeral and in-memory // storage engines are an exception because journaling isn't supported. let writeConcern = TestData.rollbackShutdowns ? {w: 1, j: true} : {w: 1}; - let dbName = "EnsureThereIsAtLeastOneOperationToRollback"; - assert.commandWorked(curPrimary.getDB(dbName).ensureRollback.insert( - {thisDocument: 'is inserted to ensure rollback is not skipped'}, - {writeConcern: writeConcern})); + let dbName = "EnsureThereIsAtLeastOneOpToRollback"; + assert.commandWorked(curPrimary.getDB(dbName).runCommand(addTenantIdIfNeeded({ + insert: "ensureRollback", + documents: [{thisDocument: 'is inserted to ensure rollback is not skipped'}], + writeConcern + }))); log(`Isolating the primary ${curPrimary.host} so it will step down`); // We should have already disconnected the primary from the secondary during the first stage diff --git a/jstests/replsets/libs/tenant_migration_recipient_sync_source.js b/jstests/replsets/libs/tenant_migration_recipient_sync_source.js index 521fd89ecf58e..d273651afd474 100644 --- a/jstests/replsets/libs/tenant_migration_recipient_sync_source.js +++ b/jstests/replsets/libs/tenant_migration_recipient_sync_source.js @@ -3,7 +3,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); @@ -40,7 +40,7 @@ export function setUpMigrationSyncSourceTest() { const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), donorRst}); const tenantId = ObjectId().str; - const tenantDB = tenantMigrationTest.tenantDB(tenantId, "DB"); + const tenantDB = makeTenantDB(tenantId, "DB"); const collName = "testColl"; const donorPrimary = tenantMigrationTest.getDonorPrimary(); diff --git a/jstests/replsets/libs/tenant_migration_test.js b/jstests/replsets/libs/tenant_migration_test.js index 270c443c8a10c..8e36b7234f307 100644 --- a/jstests/replsets/libs/tenant_migration_test.js +++ b/jstests/replsets/libs/tenant_migration_test.js @@ -10,12 +10,15 @@ import { createTenantMigrationRecipientRoleIfNotExist, createTenantMigrationDonorRoleIfNotExist, runTenantMigrationCommand, + runDonorStartMigrationCommand, isMigrationCompleted, checkTenantDBHashes, getExternalKeys, isShardMergeEnabled, isNamespaceForTenant, getTenantMigrationAccessBlocker, + kProtocolShardMerge, + kProtocolMultitenantMigrations, } from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/aggregation/extras/utils.js"); @@ -67,6 +70,9 @@ export class TenantMigrationTest { * Make a new TenantMigrationTest * * @param {string} [name] the name of the replica sets + * @param {string} [protocol] the migration protocol to use, either "multitenant migrations" or + * "shard merge". If no value is provided, will default to "shard merge" if the shard merge + * feature flag is enabled, otherwise will be set to "multitenant migrations" * @param {boolean} [enableRecipientTesting] whether recipient would actually migrate tenant * data * @param {Object} [donorRst] the ReplSetTest instance to adopt for the donor @@ -83,6 +89,7 @@ export class TenantMigrationTest { */ constructor({ name = "TenantMigrationTest", + protocol = "", enableRecipientTesting = true, donorRst, recipientRst, @@ -132,7 +139,8 @@ export class TenantMigrationTest { tojson({mode: 'alwaysOn'}); } - let nodeOptions = isDonor ? migrationX509Options.donor : migrationX509Options.recipient; + const nodeOptions = + isDonor ? migrationX509Options.donor : migrationX509Options.recipient; nodeOptions["setParameter"] = setParameterOpts; const rstName = `${name}_${(isDonor ? "donor" : "recipient")}`; @@ -151,6 +159,19 @@ export class TenantMigrationTest { this._recipientRst = this._recipientPassedIn ? recipientRst : performSetUp(false /* isDonor */); + // If we don't pass "protocol" and shard merge is enabled, we set the protocol to + // "shard merge". Otherwise, the provided protocol is used, which defaults to + // "multitenant migrations" if not provided. + if (protocol === "" && isShardMergeEnabled(this.getDonorPrimary().getDB("admin"))) { + this.protocol = kProtocolShardMerge; + } else if (protocol === "") { + this.protocol = kProtocolMultitenantMigrations; + } + + this.configRecipientsNs = this.protocol === kProtocolShardMerge + ? TenantMigrationTest.kConfigShardMergeRecipientsNS + : TenantMigrationTest.kConfigRecipientsNS; + this._donorRst.asCluster(this._donorRst.nodes, () => { this._donorRst.getPrimary(); this._donorRst.awaitReplication(); @@ -228,12 +249,8 @@ export class TenantMigrationTest { * * Returns the result of the 'donorStartMigration' command. */ - startMigration(migrationOpts, - {retryOnRetryableErrors = false, enableDonorStartMigrationFsync = false} = {}) { - return this.runDonorStartMigration(migrationOpts, { - retryOnRetryableErrors, - enableDonorStartMigrationFsync, - }); + startMigration(migrationOpts, {retryOnRetryableErrors = false} = {}) { + return this.runDonorStartMigration(migrationOpts, {retryOnRetryableErrors}); } /** @@ -291,11 +308,9 @@ export class TenantMigrationTest { const { waitForMigrationToComplete = false, retryOnRetryableErrors = false, - enableDonorStartMigrationFsync = false, } = opts; - const cmdObj = { - donorStartMigration: 1, + const migrationOpts = { migrationId: UUID(migrationIdString), tenantId, tenantIds, @@ -306,8 +321,7 @@ export class TenantMigrationTest { protocol }; - const stateRes = runTenantMigrationCommand(cmdObj, this.getDonorRst(), { - enableDonorStartMigrationFsync, + const stateRes = runDonorStartMigrationCommand(migrationOpts, this.getDonorRst(), { retryOnRetryableErrors, shouldStopFunc: stateRes => (!waitForMigrationToComplete || isMigrationCompleted(stateRes)) @@ -344,27 +358,21 @@ export class TenantMigrationTest { donorPrimary.getCollection(TenantMigrationTest.kConfigDonorsNS).findOne({ _id: UUID(migrationIdString) }); + const recipientStateDoc = - recipientPrimary.getCollection(TenantMigrationTest.kConfigRecipientsNS).findOne({ + recipientPrimary.getCollection(this.configRecipientsNs).findOne({ _id: UUID(migrationIdString) }); - const shardMergeRecipientStateDoc = - recipientPrimary.getCollection(TenantMigrationTest.kConfigShardMergeRecipientsNS) - .findOne({_id: UUID(migrationIdString)}); - if (donorStateDoc) { assert(donorStateDoc.expireAt); } if (recipientStateDoc) { assert(recipientStateDoc.expireAt); } - if (shardMergeRecipientStateDoc) { - assert(shardMergeRecipientStateDoc.expireAt); - } const configDBCollections = recipientPrimary.getDB('config').getCollectionNames(); - assert(!configDBCollections.includes('repl.migration.oplog_' + migrationIdString), + assert(!configDBCollections.includes(`repl.migration.oplog_${migrationIdString}`), configDBCollections); this.getDonorRst().asCluster(donorPrimary, () => { @@ -425,12 +433,7 @@ export class TenantMigrationTest { }); recipientNodes.forEach(node => { - const configRecipientsColl = - node.getCollection(TenantMigrationTest.kConfigRecipientsNS); - assert.soon(() => 0 === configRecipientsColl.count({_id: migrationId}), tojson(node)); - - const configShardMergeRecipientsColl = - node.getCollection(TenantMigrationTest.kConfigShardMergeRecipientsNS); + const configRecipientsColl = node.getCollection(this.configRecipientsNs); assert.soon(() => 0 === configRecipientsColl.count({_id: migrationId}), tojson(node)); let mtab; @@ -544,13 +547,8 @@ export class TenantMigrationTest { expectedAccessState, }) { const configRecipientsColl = - this.getRecipientPrimary().getCollection("config.tenantMigrationRecipients"); - let configDoc = configRecipientsColl.findOne({_id: migrationId}); - if (!configDoc) { - configDoc = this.getRecipientPrimary() - .getCollection(TenantMigrationTest.kConfigShardMergeRecipientsNS) - .findOne({_id: migrationId}); - } + this.getRecipientPrimary().getCollection(this.configRecipientsNs); + const configDoc = configRecipientsColl.findOne({_id: migrationId}); const mtab = this.getTenantMigrationAccessBlocker({recipientNode: node, tenantId}); @@ -602,13 +600,6 @@ export class TenantMigrationTest { () => (`${tojson(docsReturned)} is not equal to ${tojson(data)}`)); } - /** - * Crafts a tenant database name. - */ - tenantDB(tenantId, dbName) { - return `${tenantId}_${dbName}`; - } - /** * Returns the TenantMigrationAccessBlocker serverStatus output for the migration or shard merge * for the given node. @@ -715,8 +706,8 @@ TenantMigrationTest.DonorAccessState = { }; TenantMigrationTest.RecipientAccessState = { - kReject: "reject", - kRejectBefore: "rejectBefore" + kRejectReadsAndWrites: "rejectReadsAndWrites", + kRejectReadsBefore: "rejectReadsBefore" }; TenantMigrationTest.kConfigDonorsNS = "config.tenantMigrationDonors"; diff --git a/jstests/replsets/libs/tenant_migration_util.js b/jstests/replsets/libs/tenant_migration_util.js index 611ca24fb66c6..ed4e701665e99 100644 --- a/jstests/replsets/libs/tenant_migration_util.js +++ b/jstests/replsets/libs/tenant_migration_util.js @@ -2,6 +2,15 @@ * Utilities for testing tenant migrations. */ export const kExternalKeysNs = "config.external_validation_keys"; +export const kProtocolShardMerge = "shard merge"; +export const kProtocolMultitenantMigrations = "multitenant migrations"; + +/** + * Crafts a tenant database name. + */ +export function makeTenantDB(tenantId, dbName) { + return `${tenantId}_${dbName}`; +} /** * Returns true if feature flag 'featureFlagShardMerge' is enabled, false otherwise. @@ -28,35 +37,6 @@ function shouldUseMergeTenantIds(db) { return MongoRunner.compareBinVersions(fcvDoc.featureCompatibilityVersion.version, "6.3") >= 0; } -/** - * Construct a donorStartMigration command object with protocol: "shard merge" if the feature - * flag is enabled. - */ -export function donorStartMigrationWithProtocol(cmd, db) { - // If we don't pass "protocol" and shard merge is enabled, we set the protocol to - // "shard merge". Otherwise, the provided protocol is used, which defaults to - // "multitenant migrations" if not provided. - if (cmd["protocol"] === undefined && isShardMergeEnabled(db)) { - const cmdCopy = Object.assign({}, cmd); - - if (shouldUseMergeTenantIds(db)) { - cmdCopy.tenantIds = cmdCopy.tenantIds || [ObjectId(cmdCopy.tenantId)]; - } - - delete cmdCopy.tenantId; - cmdCopy.protocol = "shard merge"; - return cmdCopy; - } else if (cmd["protocol"] == "shard merge") { - const cmdCopy = Object.assign({}, cmd); - delete cmdCopy.tenantId; - return cmdCopy; - } else { - const cmdCopy = Object.assign({}, cmd); - delete cmdCopy.tenantIds; - return cmdCopy; - } -} - /** * Returns the external keys for the given migration id. */ @@ -138,35 +118,31 @@ export function isMigrationCompleted(res) { * fixture. */ export async function runMigrationAsync(migrationOpts, donorRstArgs, opts = {}) { - const {isMigrationCompleted, makeMigrationCertificatesForTest, runTenantMigrationCommand} = + const {isMigrationCompleted, makeMigrationCertificatesForTest, runDonorStartMigrationCommand} = await import("jstests/replsets/libs/tenant_migration_util.js"); load("jstests/replsets/rslib.js"); // createRst const { retryOnRetryableErrors = false, - enableDonorStartMigrationFsync = false, } = opts; const donorRst = createRst(donorRstArgs, retryOnRetryableErrors); const migrationCertificates = makeMigrationCertificatesForTest(); - const cmdObj = { - donorStartMigration: 1, - migrationId: UUID(migrationOpts.migrationIdString), - tenantId: migrationOpts.tenantId, - tenantIds: eval(migrationOpts.tenantIds), - recipientConnectionString: migrationOpts.recipientConnString, - readPreference: migrationOpts.readPreference || {mode: "primary"}, - donorCertificateForRecipient: migrationOpts.donorCertificateForRecipient || - migrationCertificates.donorCertificateForRecipient, - recipientCertificateForDonor: migrationOpts.recipientCertificateForDonor || - migrationCertificates.recipientCertificateForDonor, - }; - return runTenantMigrationCommand(cmdObj, donorRst, { - retryOnRetryableErrors, - enableDonorStartMigrationFsync, - shouldStopFunc: isMigrationCompleted - }); + return runDonorStartMigrationCommand( + { + migrationId: UUID(migrationOpts.migrationIdString), + tenantId: migrationOpts.tenantId, + tenantIds: eval(migrationOpts.tenantIds), + recipientConnectionString: migrationOpts.recipientConnString, + readPreference: migrationOpts.readPreference || {mode: "primary"}, + donorCertificateForRecipient: migrationOpts.donorCertificateForRecipient || + migrationCertificates.donorCertificateForRecipient, + recipientCertificateForDonor: migrationOpts.recipientCertificateForDonor || + migrationCertificates.recipientCertificateForDonor, + }, + donorRst, + {retryOnRetryableErrors, shouldStopFunc: isMigrationCompleted}); } /** @@ -215,6 +191,36 @@ export async function tryAbortMigrationAsync( return runTenantMigrationCommand(cmdObj, donorRst, {retryOnRetryableErrors}); } +/** + * Runs the donorStartMigration command against the primary of the provided replica set. Will + * automatically assign the correct 'protocol' and 'tenantId'/'tenantIds' based on the provided + * 'protocol' and/or currently enabled feature flags. + */ +export function runDonorStartMigrationCommand(migrationOpts, rst, { + retryOnRetryableErrors = false, + shouldStopFunc = () => true, +} = {}) { + // If we don't pass "protocol" and shard merge is enabled, we set the protocol to + // "shard merge". Otherwise, the provided protocol is used, which defaults to + // "multitenant migrations" if not provided. + const db = rst.getPrimary().getDB("admin"); + const cmd = Object.assign({donorStartMigration: 1}, migrationOpts); + if (cmd["protocol"] === undefined && isShardMergeEnabled(db)) { + if (shouldUseMergeTenantIds(db)) { + cmd.tenantIds = cmd.tenantIds || [ObjectId(cmd.tenantId)]; + } + + delete cmd.tenantId; + cmd.protocol = kProtocolShardMerge; + } else if (cmd["protocol"] == kProtocolShardMerge) { + delete cmd.tenantId; + } else { + delete cmd.tenantIds; + } + + return runTenantMigrationCommand(cmd, rst, {retryOnRetryableErrors, shouldStopFunc}); +} + /** * Runs the given tenant migration command against the primary of the given replica set until * the command succeeds or fails with a non-retryable error (if 'retryOnRetryableErrors' is @@ -223,52 +229,37 @@ export async function tryAbortMigrationAsync( export function runTenantMigrationCommand(cmdObj, rst, { retryOnRetryableErrors = false, shouldStopFunc = () => true, - enableDonorStartMigrationFsync = false } = {}) { let primary = rst.getPrimary(); - let localCmdObj = cmdObj; - let run = () => primary.adminCommand(localCmdObj); - if (Object.keys(cmdObj)[0] === "donorStartMigration") { - run = () => { - const adminDB = primary.getDB("admin"); - localCmdObj = donorStartMigrationWithProtocol(cmdObj, adminDB); - if (enableDonorStartMigrationFsync) { - rst.awaitLastOpCommitted(); - assert.commandWorked(primary.adminCommand({fsync: 1})); - } - return primary.adminCommand(localCmdObj); - }; - } - let res; assert.soon(() => { try { // Note: assert.commandWorked() considers command responses with embedded // writeErrors and WriteConcernErrors as a failure even if the command returned - // "ok: 1". And, admin commands(like, donorStartMigration) - // doesn't generate writeConcernErros or WriteErrors. So, it's safe to wrap up - // run() with assert.commandWorked() here. However, in few scenarios, like - // Mongo.prototype.recordRerouteDueToTenantMigration(), it's not safe to wrap up - // run() with commandWorked() as retrying on retryable writeConcernErrors can - // cause the retry attempt to fail with writeErrors. + // "ok: 1". And, admin commands(like, donorStartMigration) doesn't generate + // writeConcernErros or WriteErrors. So, it's safe to wrap the command invocation with + // assert.commandWorked() here. However, in few scenarios, like + // Mongo.prototype.recordRerouteDueToTenantMigration(), it's not safe to wrap the + // command invocation with commandWorked() as retrying on retryable writeConcernErrors + // can cause the retry attempt to fail with writeErrors. res = undefined; // In some tests we expects the command to fail due to a network error. We want to // catch the error OR the unhandled exception here and return the error to the // caller to assert on the result. Otherwise if this is not a network exception // it will be caught in the outter catch and either be retried or thrown. - res = executeNoThrowNetworkError(() => run()); + res = executeNoThrowNetworkError(() => primary.adminCommand(cmdObj)); assert.commandWorked(res); return shouldStopFunc(res); } catch (e) { if (retryOnRetryableErrors && isRetryableError(e)) { jsTestLog(`Retryable error running runTenantMigrationCommand. Command: ${ - tojson(localCmdObj)}, Error: ${tojson(e)}`); + tojson(cmdObj)}, Error: ${tojson(e)}`); primary = rst.getPrimary(); return false; } jsTestLog(`Error running runTenantMigrationCommand. Command: ${ - tojson(localCmdObj)}, Error: ${tojson(e)}`); + tojson(cmdObj)}, Error: ${tojson(e)}`); // If res is defined, return true to exit assert.soon and return res to the caller. // Otherwise rethrow e to propagate it to the caller. diff --git a/jstests/replsets/localhostAuthBypass.js b/jstests/replsets/localhostAuthBypass.js index cff02a251931e..3c2ce3cbeb00e 100644 --- a/jstests/replsets/localhostAuthBypass.js +++ b/jstests/replsets/localhostAuthBypass.js @@ -107,7 +107,7 @@ var authenticate = function(mongo) { }; var start = function(useHostName) { - var rs = new ReplSetTest( + const rs = new ReplSetTest( {name: replSetName, nodes: 3, keyFile: keyfile, auth: "", useHostName: useHostName}); rs.startSet(); @@ -126,7 +126,7 @@ var runTest = function(useHostName) { print("====================="); print("starting replica set: useHostName=" + useHostName); print("====================="); - var rs = start(useHostName); + const rs = start(useHostName); var port = rs.getPort(rs.getPrimary()); var host = "localhost:" + port; var secHosts = []; @@ -190,7 +190,7 @@ var runNonlocalTest = function(ipAddr) { print("starting mongod: non-local host access " + ipAddr); print("=========================="); - var rs = start(false); + const rs = start(false); var port = rs.getPort(rs.getPrimary()); var host = ipAddr + ":" + port; var secHosts = []; diff --git a/jstests/replsets/maintenance.js b/jstests/replsets/maintenance.js index c034ac404d87e..274b198e48cd8 100644 --- a/jstests/replsets/maintenance.js +++ b/jstests/replsets/maintenance.js @@ -23,7 +23,7 @@ assert.soon(function() { return conns[1].getDB("admin").hello().secondary; }); -join = +let join = startParallelShell("db.getSiblingDB('bar').runCommand({compact : 'foo'});", replTest.ports[1]); print("joining"); @@ -81,7 +81,7 @@ assert.eq(recv.errmsg, "node is recovering"); print("now getmore shouldn't work"); var ex = assert.throws(function() { - lastDoc = null; + let lastDoc = null; while (cursor.hasNext()) { lastDoc = cursor.next(); } diff --git a/jstests/replsets/no_chaining.js b/jstests/replsets/no_chaining.js index fe5998395a0d9..baad3fc58afb6 100644 --- a/jstests/replsets/no_chaining.js +++ b/jstests/replsets/no_chaining.js @@ -66,4 +66,4 @@ if (!_isWindows()) { assert.eq(false, config.settings.chainingAllowed, tojson(config)); } -replTest.stopSet(); \ No newline at end of file +replTest.stopSet(); diff --git a/jstests/replsets/noop_writes_in_bulk_writes_wait_for_write_concern.js b/jstests/replsets/noop_writes_in_bulk_writes_wait_for_write_concern.js new file mode 100644 index 0000000000000..87881d2471a2e --- /dev/null +++ b/jstests/replsets/noop_writes_in_bulk_writes_wait_for_write_concern.js @@ -0,0 +1,335 @@ +/** + * This file tests that if a user initiates a bulk write where the last write is a noop, either + * due to being a duplicate operation or due to an error based on data we read, that we + * still wait for write concern. + * The intended behavior for a no-op write is that we advance the repl client's last optime to the + * optime of the newest entry in the oplog (also referred as the "system optime"), and wait for + * write concern for that optime. This ensures that any writes we may have possibly read that caused + * the operation to be a noop have also been replicated. For all of these tests, the optime fixing + * behavior should be handled by LastOpFixer. + * + * @tags: [featureFlagBulkWriteCommand] // TODO SERVER-52419: Remove this tag. + */ + +load("jstests/libs/parallel_shell_helpers.js"); +load("jstests/libs/fail_point_util.js"); + +const name = jsTestName(); +const replTest = new ReplSetTest({ + name: name, + nodes: [{}, {rsConfig: {priority: 0}}, {rsConfig: {priority: 0}}], + nodeOptions: { + setParameter: { + // Prevent inserts from being batched together. This allows + // us to hang between consecutive insert operations without + // blocking the ones we already processed from executing. + internalInsertMaxBatchSize: 1, + } + } +}); +replTest.startSet(); +replTest.initiate(); + +const primary = replTest.getPrimary(); +const dbName = 'testDB'; +const testDB = primary.getDB(dbName); +const collName = 'testColl'; +const coll = testDB[collName]; + +function dropTestCollection() { + coll.drop(); + assert.eq(0, coll.find().itcount(), "test collection not empty"); +} + +// Each entry in this array contains a bulkWrite command noop write case we want to test. +// Entries have the following structure: +// { +// +// bulkReq: , // Bulk write request object containing multiple writes +// // where the last write will result in a noop +// // write if it is run after noopMakerReq. +// +// bulkConfirmFunc: , // Function to run after bulkReq and to ensure +// // it executed as expected. Accepts the result +// // of the bulkWrite request. +// +// noopMakerReq: // Command request object containing a single non-bulk +// // write that, if run before the final write in bulkReq, +// // will make that write a noop. +// +// noopMakerConfirmFunc: , // Function to run after noopMakerReq to +// // ensure it executed as expected. Accepts +// // the result of the request. +// +// confirmFunc: // Function to run at the end of the test to make any general +// // assertions that are not on either of the command responses. +// } +let commands = []; + +// 'bulkWrite' where the last op is an insert where the document with the same _id has +// already been inserted. +commands.push({ + bulkReq: { + bulkWrite: 1, + ops: [{insert: 0, document: {_id: 0}}, {insert: 0, document: {_id: 1}}], + nsInfo: [{ns: `${dbName}.${collName}`}] + }, + bulkConfirmFunc: function(res) { + assert.commandWorkedIgnoringWriteErrorsAndWriteConcernErrors(res); + assert.eq(res.cursor.firstBatch.length, 2); + + // The first insert succeeded + assert.eq(res.cursor.firstBatch[0].ok, 1); + assert.eq(res.cursor.firstBatch[0].n, 1); + + // The second insert errored + assert.eq(res.cursor.firstBatch[1].ok, 0); + assert.eq(res.cursor.firstBatch[1].code, ErrorCodes.DuplicateKey); + }, + noopMakerReq: {insert: collName, documents: [{_id: 1}]}, + noopMakerConfirmFunc: function(res) { + assert.commandWorkedIgnoringWriteConcernErrors(res); + assert.eq(res.n, 1); + }, + confirmFunc: function() { + assert.eq(coll.count({_id: 0}), 1); + assert.eq(coll.count({_id: 1}), 1); + } +}); + +// 'bulkWrite' where we are doing a mix of local and non-local writes and the last op is an insert +// of a non-local doc with the _id of an existing doc. +const localDBName = "local"; +const localDB = primary.getDB("local"); +const localColl = localDB[collName]; +localColl.drop(); + +commands.push({ + bulkReq: { + bulkWrite: 1, + ops: [{insert: 0, document: {_id: 1}}, {insert: 1, document: {_id: 1}}], + nsInfo: [{ns: `${localDBName}.${collName}`}, {ns: `${dbName}.${collName}`}] + }, + bulkConfirmFunc: function(res) { + assert.commandWorkedIgnoringWriteErrorsAndWriteConcernErrors(res); + assert.eq(res.cursor.firstBatch.length, 2); + + // the local insert succeeded + assert.eq(res.cursor.firstBatch[0].ok, 1); + assert.eq(res.cursor.firstBatch[0].n, 1); + + // the non-local insert failed + assert.eq(res.cursor.firstBatch[1].ok, 0); + assert.eq(res.cursor.firstBatch[1].code, ErrorCodes.DuplicateKey); + }, + noopMakerReq: {insert: collName, documents: [{_id: 1}]}, + noopMakerConfirmFunc: function(res) { + assert.commandWorkedIgnoringWriteConcernErrors(res); + assert.eq(res.n, 1); + }, + confirmFunc: function(res) { + assert.eq(coll.count({_id: 1}), 1); + assert.eq(localColl.count({_id: 1}), 1); + } +}); + +// 'bulkWrite' where the last op is an update that has already been performed. +commands.push({ + bulkReq: { + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 0}}, + {update: 0, filter: {_id: 0}, updateMods: {$set: {x: 1}}} + ], + nsInfo: [{ns: `${dbName}.${collName}`}] + }, + noopMakerReq: {update: collName, updates: [{q: {_id: 0}, u: {$set: {x: 1}}}]}, + bulkConfirmFunc: function(res) { + assert.commandWorkedIgnoringWriteConcernErrors(res); + assert.eq(res.cursor.firstBatch.length, 2); + + // The insert succeeded. + assert.eq(res.cursor.firstBatch[0].ok, 1); + assert.eq(res.cursor.firstBatch[0].n, 1); + + // The update was a noop. + assert.eq(res.cursor.firstBatch[1].ok, 1); + assert.eq(res.cursor.firstBatch[1].n, 1); + assert.eq(res.cursor.firstBatch[1].nModified, 0); + }, + noopMakerConfirmFunc: function(res) { + assert.commandWorkedIgnoringWriteConcernErrors(res); + assert.eq(res.n, 1); + assert.eq(res.nModified, 1); + }, + confirmFunc: function() { + assert.eq(coll.count({_id: 0, x: 1}), 1); + } +}); + +// 'bulkWrite' where the last op is an update where the document to update does not exist. +commands.push({ + bulkReq: { + bulkWrite: 1, + ops: [ + {insert: 0, document: {a: 1}}, + {update: 0, filter: {a: 1}, updateMods: {$set: {x: 1}}} + ], + nsInfo: [{ns: `${dbName}.${collName}`}], + }, + bulkConfirmFunc: function(res) { + assert.commandWorkedIgnoringWriteConcernErrors(res); + assert.eq(res.cursor.firstBatch.length, 2); + + // the insert succeeded + assert.eq(res.cursor.firstBatch[0].ok, 1); + assert.eq(res.cursor.firstBatch[0].n, 1); + + // the update was a no-op + assert.eq(res.cursor.firstBatch[1].ok, 1); + assert.eq(res.cursor.firstBatch[1].n, 0); + assert.eq(res.cursor.firstBatch[1].nModified, 0); + }, + noopMakerReq: {update: collName, updates: [{q: {a: 1}, u: {b: 2}}]}, + noopMakerConfirmFunc: function(res) { + assert.commandWorkedIgnoringWriteConcernErrors(res); + assert.eq(res.n, 1); + assert.eq(res.nModified, 1); + }, + confirmFunc: function() { + assert.eq(coll.find().itcount(), 1); + assert.eq(coll.count({b: 2}), 1); + } +}); + +// 'bulkWrite' where the last op is an update that generates an immutable field error. +commands.push({ + bulkReq: { + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 0}}, + {update: 0, filter: {_id: 1}, updateMods: {$set: {_id: 2}}} + ], + nsInfo: [{ns: `${dbName}.${collName}`}], + }, + bulkConfirmFunc: function(res) { + assert.commandWorkedIgnoringWriteErrorsAndWriteConcernErrors(res); + assert.eq(res.cursor.firstBatch.length, 2); + + // the insert succeeded + assert.eq(res.cursor.firstBatch[0].ok, 1); + assert.eq(res.cursor.firstBatch[0].n, 1); + + // the update failed + assert.eq(res.cursor.firstBatch[1].ok, 0); + assert.eq(res.cursor.firstBatch[1].code, ErrorCodes.ImmutableField); + assert.eq(res.cursor.firstBatch[1].n, 0); + assert.eq(res.cursor.firstBatch[1].nModified, 0); + }, + noopMakerReq: {insert: collName, documents: [{_id: 1}]}, + noopMakerConfirmFunc: function(res) { + assert.commandWorkedIgnoringWriteConcernErrors(res); + assert.eq(res.n, 1); + }, + confirmFunc: function() { + assert.eq(coll.count({_id: 0}), 1); + assert.eq(coll.count({_id: 1}), 1); + } +}); + +// 'bulkWrite' where the last op is a delete where the document to delete does not exist. +commands.push({ + bulkReq: { + bulkWrite: 1, + ops: [{insert: 0, document: {x: 1}}, {delete: 0, filter: {x: 1}, multi: false}], + nsInfo: [{ns: `${dbName}.${collName}`}], + }, + bulkConfirmFunc: function(res) { + assert.commandWorkedIgnoringWriteConcernErrors(res); + assert.eq(res.cursor.firstBatch.length, 2); + + // the insert op succeeded + var res1 = res.cursor.firstBatch[0]; + assert.eq(res1.ok, 1); + assert.eq(res1.n, 1); + + // the delete was a no-op + var res2 = res.cursor.firstBatch[1]; + assert.eq(res2.ok, 1); + assert.eq(res2.n, 0); + }, + noopMakerReq: {delete: collName, deletes: [{q: {x: 1}, limit: 1}]}, + noopMakerConfirmFunc: function(res) { + assert.commandWorkedIgnoringWriteConcernErrors(res); + assert.eq(res.n, 1); + }, + confirmFunc: function(res) { + assert.eq(coll.count({x: 1}), 0); + } +}); + +function testCommandWithWriteConcern(cmd) { + // Provide a small wtimeout that we expect to time out. + cmd.bulkReq.writeConcern = {w: 3, wtimeout: 1000}; + jsTest.log("Testing " + tojson(cmd.bulkReq)); + + dropTestCollection(); + + let failpoint = configureFailPoint(testDB, 'hangBetweenProcessingBulkWriteOps', {}, {skip: 1}); + + function runBulkReq(host, cmd) { + load('jstests/libs/write_concern_util.js'); + + // Tests that the command receives a write concern error. If we don't properly advance + // the client's last optime to the latest oplog entry and wait for that optime to + // satisfy our write concern, then we won't see an error, since all writes up to but not + // not including the latest one in `noopMakerReq` have been replicated. + + // Since we run this on a separate connection from the noopMakerReq, there is no way + // that the client's last op time would get advanced by that operation, so if we pass + // this test it means we are correctly advancing this client's optime after the last + // operation in the batch no-ops. + const res = new Mongo(host).getDB('admin').runCommand(cmd.bulkReq); + try { + assertWriteConcernError(res); + cmd.bulkConfirmFunc(res); + } catch (e) { + // Make sure that we print out the response. + printjson(res); + throw e; + } + } + + // Run in a parallel shell as we expect this to hang. + const awaitBulkWrite = + startParallelShell(funWithArgs(runBulkReq, primary.host, cmd), replTest.ports[0]); + + // Wait to see that the bulkWrite has hit the failpoint. + failpoint.wait(); + + // Wait until all of the nodes have seen the first write from the bulkWrite. + replTest.awaitReplication(); + + // Stop a node so that all w:3 write concerns time out. + replTest.stop(1); + + // Run the function that makes the final bulk write op a no-op. + // Provide a small wtimeout that we expect to time out. + cmd.noopMakerReq.writeConcern = {w: 3, wtimeout: 1000}; + var noopMakerRes = testDB.runCommand(cmd.noopMakerReq); + cmd.noopMakerConfirmFunc(noopMakerRes); + + // Disable the failpoint, allowing the bulkWrite to proceed. + failpoint.off(); + + awaitBulkWrite(); + cmd.confirmFunc(); + + replTest.start(1); +} + +commands.forEach(function(cmd) { + testCommandWithWriteConcern(cmd); +}); + +replTest.stopSet(); \ No newline at end of file diff --git a/jstests/replsets/noop_writes_wait_for_write_concern.js b/jstests/replsets/noop_writes_wait_for_write_concern.js index b49467ef1cd51..b6714c9f9b294 100644 --- a/jstests/replsets/noop_writes_wait_for_write_concern.js +++ b/jstests/replsets/noop_writes_wait_for_write_concern.js @@ -233,14 +233,7 @@ commands.push({ assert.commandWorkedIgnoringWriteConcernErrors(db.runCommand({create: collName})); }, confirmFunc: function(res) { - // Branching is needed for multiversion tests as 'create' is only idempotent as of 7.0. - // TODO SERVER-74062: update this to stop branching on the server version and always - // assert the command worked ignoring write concern errors. - if (db.version().split('.')[0] >= 7) { - assert.commandWorkedIgnoringWriteConcernErrors(res); - } else { - assert.commandFailedWithCode(res, ErrorCodes.NamespaceExists); - } + assert.commandWorkedIgnoringWriteConcernErrors(res); } }); @@ -258,56 +251,6 @@ commands.push({ } }); -// Skip these tests if the BulkWriteCommand feature flag is not enabled -// TODO SERVER-67711: Remove feature flag check. -if (FeatureFlagUtil.isPresentAndEnabled(db, "BulkWriteCommand")) { - // 'bulkWrite' where the document with the same _id has already been inserted. - commands.push({ - req: { - bulkWrite: 1, - ops: [{insert: 0, document: {_id: 1}}], - nsInfo: [{ns: `${dbName}.${collName}`}] - }, - setupFunc: function() { - assert.commandWorked(coll.insert({_id: 1})); - }, - confirmFunc: function(res) { - assert.commandWorkedIgnoringWriteErrorsAndWriteConcernErrors(res); - assert.eq(res.cursor.firstBatch[0].code, ErrorCodes.DuplicateKey); - assert.eq(coll.count({_id: 1}), 1); - } - }); - - // 'bulkWrite' where we are doing a mix of local and non-local writes - // and the last op is an insert of a non-local doc with the _id of an - // existing doc. - var localDBName = "local"; - var localDB = primary.getDB("local"); - var localColl = localDB[collName]; - localColl.drop(); - - commands.push({ - req: { - bulkWrite: 1, - ops: [{insert: 0, document: {_id: 1}}, {insert: 1, document: {_id: 1}}], - nsInfo: [{ns: `${localDBName}.${collName}`}, {ns: `${dbName}.${collName}`}] - }, - setupFunc: function() { - assert.commandWorked(coll.insert({_id: 1})); - }, - confirmFunc: function(res) { - assert.commandWorkedIgnoringWriteErrorsAndWriteConcernErrors(res); - // the local insert happened - assert.eq(res.cursor.firstBatch[0].ok, 1); - assert.eq(res.cursor.firstBatch[0].n, 1); - assert.eq(localColl.count({_id: 1}), 1); - // the non-local insert failed - assert.eq(res.cursor.firstBatch[1].code, ErrorCodes.DuplicateKey); - assert.eq(coll.count({_id: 1}), 1); - } - }); -} - function testCommandWithWriteConcern(cmd) { // Provide a small wtimeout that we expect to time out. cmd.req.writeConcern = {w: 3, wtimeout: 1000}; diff --git a/jstests/replsets/not_primary_errors_returned_if_client_sends_helloOk.js b/jstests/replsets/not_primary_errors_returned_if_client_sends_helloOk.js index e8cca15064aba..9f354c60758cf 100644 --- a/jstests/replsets/not_primary_errors_returned_if_client_sends_helloOk.js +++ b/jstests/replsets/not_primary_errors_returned_if_client_sends_helloOk.js @@ -74,4 +74,4 @@ assert(res.errmsg.includes("not primary"), res); assert(!res.errmsg.includes("not master"), res); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/oplog_note_cmd.js b/jstests/replsets/oplog_note_cmd.js index 1d94b9a1a4193..58536d040abd7 100644 --- a/jstests/replsets/oplog_note_cmd.js +++ b/jstests/replsets/oplog_note_cmd.js @@ -1,6 +1,6 @@ // Test that the "appendOplogNote" command works properly -var rs = new ReplSetTest({name: "oplogNoteTest", nodes: 1}); +const rs = new ReplSetTest({name: "oplogNoteTest", nodes: 1}); rs.startSet(); rs.initiate(); diff --git a/jstests/replsets/pipelineout.js b/jstests/replsets/pipelineout.js index 90c6805b7a086..b296081e61ecf 100644 --- a/jstests/replsets/pipelineout.js +++ b/jstests/replsets/pipelineout.js @@ -11,7 +11,7 @@ var primary = replTest.getPrimary().getDB(name); var secondary = replTest.getSecondary().getDB(name); // populate the collection -for (i = 0; i < 5; i++) { +for (let i = 0; i < 5; i++) { primary.coll.insert({x: i}); } replTest.awaitReplication(); @@ -21,4 +21,4 @@ primary.coll.aggregate({$out: "out"}).itcount(); replTest.awaitReplication(); assert.eq(primary.out.find().sort({x: 1}).toArray(), secondary.out.find().sort({x: 1}).toArray()); -replTest.stopSet(); \ No newline at end of file +replTest.stopSet(); diff --git a/jstests/replsets/primary_commit_split_prepare_transactions.js b/jstests/replsets/primary_commit_split_prepare_transactions.js index b3c5391320c77..3611795ab94f6 100644 --- a/jstests/replsets/primary_commit_split_prepare_transactions.js +++ b/jstests/replsets/primary_commit_split_prepare_transactions.js @@ -34,27 +34,6 @@ const checkDocuments = function(docCount, testColl, expectOld, readConcern = nul } }; -// Verify that we can't insert in the transaction if it is in prepared/committed state. -// Also checks the config.transactions entry. -const checkTransaction = function( - sessionDB, collName, lsid, txnNumber, transactionsColl, expectedState) { - const expectedError = expectedState == "prepared" ? ErrorCodes.PreparedTransactionInProgress - : ErrorCodes.TransactionCommitted; - assert.commandFailedWithCode(sessionDB.runCommand({ - insert: collName, - documents: [{x: 2}], - txnNumber: NumberLong(txnNumber), - autocommit: false - }), - expectedError); - - const res = transactionsColl.find({"_id.id": lsid["id"], "txnNum": txnNumber}) - .readConcern("majority") - .toArray(); - assert.eq(1, res.length); - assert.eq(expectedState, res[0]["state"]); -}; - const replTest = new ReplSetTest({ nodes: 2, nodeOptions: { @@ -79,8 +58,28 @@ const collName = jsTestName(); let testDB = primary.getDB(dbName); let testColl = testDB.getCollection(collName); -const config = primary.getDB("config"); -const transactionsColl = config.getCollection("transactions"); +// Verify that we can't insert in the transaction if it is in prepared/committed state. +// Also checks the config.transactions entry. +const checkTransaction = function(sessionDB, lsid, txnNumber, expectedState) { + const expectedError = expectedState == "prepared" ? ErrorCodes.PreparedTransactionInProgress + : ErrorCodes.TransactionCommitted; + assert.commandFailedWithCode(sessionDB.runCommand({ + insert: collName, + documents: [{x: 2}], + txnNumber: NumberLong(txnNumber), + autocommit: false + }), + expectedError); + + const res = replTest.getPrimary() + .getDB("config") + .getCollection("transactions") + .find({"_id.id": lsid["id"], "txnNum": txnNumber}) + .readConcern("majority") + .toArray(); + assert.eq(1, res.length); + assert.eq(expectedState, res[0]["state"]); +}; testColl.drop({writeConcern: {w: "majority"}}); assert.commandWorked(testDB.runCommand({create: collName, writeConcern: {w: "majority"}})); @@ -130,7 +129,7 @@ assert.soon(() => { timestampCmp(secondaryLastStableRecoveryTimestamp, prepareTimestamp) >= 0; }); -checkTransaction(sessionDB, collName, lsid, txnNumber, transactionsColl, "prepared"); +checkTransaction(sessionDB, lsid, txnNumber, "prepared"); // 2) Step up the secondary as the new primary after it applies the prepared transaction. jsTestLog("Forcing secondary to become primary."); @@ -149,7 +148,7 @@ session = PrepareHelpers.createSessionWithGivenId(newPrimary, lsid); session.setTxnNumber_forTesting(txnNumber); sessionDB = session.getDatabase(dbName); -checkTransaction(sessionDB, collName, lsid, txnNumber, transactionsColl, "prepared"); +checkTransaction(sessionDB, lsid, txnNumber, "prepared"); // Inserts are not seen outside the transaction. checkDocuments(docCount, testColl, true /* expectOld */); @@ -249,7 +248,7 @@ session = PrepareHelpers.createSessionWithGivenId(newPrimary2, lsid); session.setTxnNumber_forTesting(txnNumber); sessionDB = session.getDatabase(dbName); -checkTransaction(sessionDB, collName, lsid, txnNumber, transactionsColl, "prepared"); +checkTransaction(sessionDB, lsid, txnNumber, "prepared"); testDB = newPrimary2.getDB(dbName); testColl = testDB.getCollection(collName); @@ -272,7 +271,7 @@ assert.commandWorked(sessionDB.adminCommand({ autocommit: false, })); -checkTransaction(sessionDB, collName, lsid, txnNumber, transactionsColl, "committed"); +checkTransaction(sessionDB, lsid, txnNumber, "committed"); // After commit the updates become visible. checkDocuments(docCount, testColl, false /* expectOld */); diff --git a/jstests/replsets/read_committed_with_catalog_changes.js b/jstests/replsets/read_committed_with_catalog_changes.js index 45a9c7b93d15a..74835518c687a 100644 --- a/jstests/replsets/read_committed_with_catalog_changes.js +++ b/jstests/replsets/read_committed_with_catalog_changes.js @@ -23,17 +23,11 @@ * * @tags: [ * requires_majority_read_concern, - * # This test is incompatible with earlier implementations of point-in-time catalog lookups. - * requires_fcv_70, * ] */ load("jstests/libs/parallelTester.js"); // For Thread. load("jstests/libs/write_concern_util.js"); -load("jstests/libs/feature_flag_util.js"); - -(function() { -"use strict"; // Each test case includes a 'prepare' method that sets up the initial state starting with a // database that has been dropped, a 'performOp' method that does some operation, and two @@ -208,17 +202,9 @@ const testCases = { // Assertion helpers. These must get all state as arguments rather than through closure since // they may be passed in to a Thread. function assertReadsBlock(db, coll) { - var res = coll.runCommand('find', {"readConcern": {"level": "majority"}, "maxTimeMS": 5000}); - - // When point-in-time catalog reads are enabled, reads no longer block waiting for the majority - // commit point to advance and allow reading earlier than the minimum visible snapshot. - if (FeatureFlagUtil.isEnabled(db, "PointInTimeCatalogLookups")) { - assert.commandWorked(res); - } else { - assert.commandFailedWithCode(res, - ErrorCodes.MaxTimeMSExpired, - "Expected read of " + coll.getFullName() + " to block"); - } + // With point-in-time catalog lookups, reads no longer block waiting for the majority commit + // point to advance. + assert.commandWorked(coll.runCommand('find', {"readConcern": {"level": "majority"}})); } function assertReadsSucceed(coll, timeoutMs = 20000) { @@ -351,4 +337,3 @@ for (var testName in testCases) { } replTest.stopSet(); -}()); diff --git a/jstests/replsets/reconfig_ignores_term_field.js b/jstests/replsets/reconfig_ignores_term_field.js index 7303e849b17cc..af9b388e72866 100644 --- a/jstests/replsets/reconfig_ignores_term_field.js +++ b/jstests/replsets/reconfig_ignores_term_field.js @@ -44,4 +44,4 @@ config = primary.getDB("local").system.replset.findOne(); assert(!config.hasOwnProperty("term")); replTest.stopSet(); -}()); \ No newline at end of file +}()); diff --git a/jstests/replsets/reconfig_only_counts_voters_for_config_commitment.js b/jstests/replsets/reconfig_only_counts_voters_for_config_commitment.js index e6fff47893c04..bb6a189458f3c 100644 --- a/jstests/replsets/reconfig_only_counts_voters_for_config_commitment.js +++ b/jstests/replsets/reconfig_only_counts_voters_for_config_commitment.js @@ -49,4 +49,4 @@ assert.commandWorked(primary.getDB("admin").runCommand({replSetReconfig: config} assert.soon(() => isConfigCommitted(primary)); replTest.stopSet(); -}()); \ No newline at end of file +}()); diff --git a/jstests/replsets/reconfig_only_counts_voting_nodes_for_oplog_commitment.js b/jstests/replsets/reconfig_only_counts_voting_nodes_for_oplog_commitment.js index 51ab53feeab1a..b68a4c9db0000 100644 --- a/jstests/replsets/reconfig_only_counts_voting_nodes_for_oplog_commitment.js +++ b/jstests/replsets/reconfig_only_counts_voting_nodes_for_oplog_commitment.js @@ -59,4 +59,4 @@ restartServerReplication(nodes[1]); replTest.awaitReplication(); replTest.stopSet(); -}()); \ No newline at end of file +}()); diff --git a/jstests/replsets/reconfig_uses_default_protocolVersion.js b/jstests/replsets/reconfig_uses_default_protocolVersion.js index 706872b952284..ed363f3cecb8f 100644 --- a/jstests/replsets/reconfig_uses_default_protocolVersion.js +++ b/jstests/replsets/reconfig_uses_default_protocolVersion.js @@ -20,4 +20,4 @@ config = primary.getDB("local").system.replset.findOne(); assert.eq(config.protocolVersion, 1); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js index e41ea9ca1f45a..d0659752c8e9c 100644 --- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js +++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync.js @@ -271,4 +271,4 @@ assert.commandWorked(sessionDB3.adminCommand( assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_change_oldest_active_txn_timestamp.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_change_oldest_active_txn_timestamp.js index fba44289095b6..86a9a437dc0d5 100644 --- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_change_oldest_active_txn_timestamp.js +++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_change_oldest_active_txn_timestamp.js @@ -111,4 +111,4 @@ replTest.awaitReplication(); assert.docEq({_id: 1, a: 1}, secondaryColl.findOne({_id: 1})); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js index 08ae0fa82527f..fb6cf4abde6d7 100644 --- a/jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js +++ b/jstests/replsets/reconstruct_prepared_transactions_initial_sync_no_oplog_application.js @@ -202,4 +202,4 @@ assert.commandWorked(sessionDB3.adminCommand( assert.eq(testColl.find({_id: 3}).toArray(), [{_id: 3}]); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/rename_replica_set.js b/jstests/replsets/rename_replica_set.js index 19e788171b4ee..71b47b1481471 100644 --- a/jstests/replsets/rename_replica_set.js +++ b/jstests/replsets/rename_replica_set.js @@ -60,4 +60,4 @@ assert.eq(secondaryReplSetName, newReplSetName); assert.commandWorked(coll.insert({b: 2}, {"writeConcern": {"w": 2}})); replTest.stopSet(); -}()); \ No newline at end of file +}()); diff --git a/jstests/replsets/replset1.js b/jstests/replsets/replset1.js index 7ce4610495e04..a94943c6ced43 100644 --- a/jstests/replsets/replset1.js +++ b/jstests/replsets/replset1.js @@ -1,138 +1,4 @@ -var ssl_options1; -var ssl_options2; -var ssl_name; -load("jstests/replsets/rslib.js"); -load('jstests/replsets/libs/election_metrics.js'); -var doTest = function(signal) { - // Test basic replica set functionality. - // -- Replication - // -- Failover +load('jstests/replsets/libs/basic_replset_test.js'); - // Choose a name that is unique to the options specified. - // This is important because we are depending on a fresh replicaSetMonitor for each run; - // each differently-named replica set gets its own monitor. - // n0 and n1 get the same SSL config since there are 3 nodes but only 2 different configs - var replTest = new ReplSetTest({ - name: 'testSet' + ssl_name, - nodes: {n0: ssl_options1, n1: ssl_options1, n2: ssl_options2} - }); - - // call startSet() to start each mongod in the replica set - // this returns a list of nodes - var nodes = replTest.startSet(); - - // Call initiate() to send the replSetInitiate command - // This will wait for initiation - replTest.initiate(); - - // Call getPrimary to return a reference to the node that's been - // elected primary. - var primary = replTest.getPrimary(); - - // Check that both the 'called' and 'successful' fields of the 'electionTimeout' election reason - // counter have been incremented in serverStatus. - const primaryStatus = assert.commandWorked(primary.adminCommand({serverStatus: 1})); - verifyServerStatusElectionReasonCounterValue( - primaryStatus.electionMetrics, "electionTimeout", 1); - - // Ensure the primary logs an n-op to the oplog upon transitioning to primary. - assert.gt(primary.getDB("local").oplog.rs.count({op: 'n', o: {msg: 'new primary'}}), 0); - - // Here's how you save something to primary - primary.getDB("foo").foo.save({a: 1000}); - - // This method will check the oplogs of the primary - // and secondaries in the set and wait until the change has replicated. - replTest.awaitReplication(); - - var cppconn = new Mongo(replTest.getURL()).getDB("foo"); - assert.eq(1000, cppconn.foo.findOne().a, "cppconn 1"); - - { - // check c++ finding other servers - var temp = replTest.getURL(); - temp = temp.substring(0, temp.lastIndexOf(",")); - temp = new Mongo(temp).getDB("foo"); - assert.eq(1000, temp.foo.findOne().a, "cppconn 1"); - } - - // Here's how to stop the primary node - var primaryId = replTest.getNodeId(primary); - replTest.stop(primaryId); - - // Now let's see who the new primary is: - var newPrimary = replTest.getPrimary(); - - // Is the new primary the same as the old primary? - var newPrimaryId = replTest.getNodeId(newPrimary); - - assert(primaryId != newPrimaryId, "Old primary shouldn't be equal to new primary."); - - reconnect(cppconn); - assert.eq(1000, cppconn.foo.findOne().a, "cppconn 2"); - - // Now let's write some documents to the new primary - var bulk = newPrimary.getDB("bar").bar.initializeUnorderedBulkOp(); - for (var i = 0; i < 1000; i++) { - bulk.insert({a: i}); - } - bulk.execute(); - - // Here's how to restart the old primary node: - var secondary = replTest.restart(primaryId); - - // Now, let's make sure that the old primary comes up as a secondary - assert.soon(function() { - var res = secondary.getDB("admin").runCommand({hello: 1}); - printjson(res); - return res['ok'] == 1 && res['isWritablePrimary'] == false; - }); - - // And we need to make sure that the replset comes back up - assert.soon(function() { - var res = newPrimary.getDB("admin").runCommand({replSetGetStatus: 1}); - printjson(res); - return res.myState == 1; - }); - - // And that both secondary nodes have all the updates - newPrimary = replTest.getPrimary(); - assert.eq(1000, newPrimary.getDB("bar").runCommand({count: "bar"}).n, "assumption 2"); - replTest.awaitSecondaryNodes(); - replTest.awaitReplication(); - - var secondaries = replTest.getSecondaries(); - assert(secondaries.length == 2, "Expected 2 secondaries but length was " + secondaries.length); - secondaries.forEach(function(secondary) { - secondary.setSecondaryOk(); - var count = secondary.getDB("bar").runCommand({count: "bar"}); - printjson(count); - assert.eq(1000, count.n, "secondary count wrong: " + secondary); - }); - - // last error - primary = replTest.getPrimary(); - secondaries = replTest.getSecondaries(); - - var db = primary.getDB("foo"); - var t = db.foo; - - var ts = secondaries.map(function(z) { - z.setSecondaryOk(); - return z.getDB("foo").foo; - }); - - t.save({a: 1000}); - t.createIndex({a: 1}); - replTest.awaitReplication(); - - ts.forEach(function(z) { - assert.eq(2, z.getIndexKeys().length, "A " + z.getMongo()); - }); - - // Shut down the set and finish the test. - replTest.stopSet(signal); -}; - -doTest(15); +basicReplsetTest(15); print("replset1.js SUCCESS"); diff --git a/jstests/replsets/replset2.js b/jstests/replsets/replset2.js index 38134794dfb1a..96305aa7a494c 100644 --- a/jstests/replsets/replset2.js +++ b/jstests/replsets/replset2.js @@ -1,6 +1,6 @@ load("jstests/replsets/rslib.js"); -doTest = function(signal) { +let doTest = function(signal) { // Test replication with write concern. // Replica set testing API @@ -50,7 +50,7 @@ doTest = function(signal) { print("replset2.js **** TEMP 1a ****"); - m1 = primary.getDB(testDB).foo.findOne({n: 1}); + let m1 = primary.getDB(testDB).foo.findOne({n: 1}); printjson(m1); assert(m1['n'] == 1, "replset2.js Failed to save to primary on multiple inserts"); @@ -90,7 +90,7 @@ doTest = function(signal) { print("replset2.js **** V1 "); var verifyReplication = function(nodeName, collection) { - data = collection.findOne({n: 1}); + let data = collection.findOne({n: 1}); assert(data['n'] == 1, "replset2.js Failed to save to " + nodeName); data = collection.findOne({n: 999}); assert(data['n'] == 999, "replset2.js Failed to save to " + nodeName); diff --git a/jstests/replsets/replset4.js b/jstests/replsets/replset4.js index 0ac4141a6df9a..4b3c8e427835f 100644 --- a/jstests/replsets/replset4.js +++ b/jstests/replsets/replset4.js @@ -1,4 +1,4 @@ -doTest = function(signal) { +let doTest = function(signal) { // Test orphaned primary steps down var replTest = new ReplSetTest({name: 'testSet', nodes: 3}); diff --git a/jstests/replsets/replset6.js b/jstests/replsets/replset6.js index fd33175d8234e..8c1af72aa2377 100644 --- a/jstests/replsets/replset6.js +++ b/jstests/replsets/replset6.js @@ -1,7 +1,7 @@ // Test replication of collection renaming -baseName = "jstests_replsets_replset6"; +let baseName = "jstests_replsets_replset6"; var rt = new ReplSetTest({name: "replset6tests", nodes: 2}); var nodes = rt.startSet(); @@ -9,17 +9,17 @@ rt.initiate(); var p = rt.getPrimary(); rt.awaitSecondaryNodes(); var secondaries = rt.getSecondaries(); -s = secondaries[0]; +let s = secondaries[0]; s.setSecondaryOk(); -admin = p.getDB("admin"); +let admin = p.getDB("admin"); -debug = function(foo) {}; // print( foo ); } +let debug = function(foo) {}; // print( foo ); } // rename within db p.getDB(baseName).one.save({a: 1}); assert.soon(function() { - v = s.getDB(baseName).one.findOne(); + let v = s.getDB(baseName).one.findOne(); return v && 1 == v.a; }); @@ -41,8 +41,8 @@ assert.eq(-1, s.getDB(baseName).getCollectionNames().indexOf("one")); // rename to new db -first = baseName + "_first"; -second = baseName + "_second"; +let first = baseName + "_first"; +let second = baseName + "_second"; p.getDB(first).one.save({a: 1}); assert.soon(function() { diff --git a/jstests/replsets/replsethostnametrim.js b/jstests/replsets/replsethostnametrim.js index e66bac8acafd0..aa485c93a89ed 100644 --- a/jstests/replsets/replsethostnametrim.js +++ b/jstests/replsets/replsethostnametrim.js @@ -18,4 +18,4 @@ config = primary.getDB("local").system.replset.findOne(); assert.eq(origHost, config.members[0].host); // print("current (good) config:"); printjson(config); -replTest.stopSet(); \ No newline at end of file +replTest.stopSet(); diff --git a/jstests/replsets/replsets_killop.js b/jstests/replsets/replsets_killop.js index 3fb42d6a24468..8168cd6d1cd5e 100644 --- a/jstests/replsets/replsets_killop.js +++ b/jstests/replsets/replsets_killop.js @@ -1,14 +1,14 @@ // Test correctness of replication while a secondary's get more requests are killed on the primary // using killop. SERVER-7952 -numDocs = 1e5; +let numDocs = 1e5; // Set up a replica set. -replTest = new ReplSetTest({name: 'test', nodes: 3}); -nodes = replTest.startSet(); +let replTest = new ReplSetTest({name: 'test', nodes: 3}); +let nodes = replTest.startSet(); replTest.initiate(); -primary = replTest.getPrimary(); -secondary = replTest.getSecondary(); +let primary = replTest.getPrimary(); +let secondary = replTest.getSecondary(); db = primary.getDB('test'); db.test.save({a: 0}); replTest.awaitReplication(); @@ -17,7 +17,7 @@ assert.soon(function() { }); // Start a parallel shell to insert new documents on the primary. -inserter = startParallelShell( +let inserter = startParallelShell( 'var bulk = db.test.initializeUnorderedBulkOp(); \ for( i = 1; i < ' + numDocs + @@ -27,10 +27,10 @@ inserter = startParallelShell( bulk.execute();'); // Periodically kill replication get mores. -for (i = 0; i < 1e3; ++i) { - allOps = db.currentOp(); - for (j in allOps.inprog) { - op = allOps.inprog[j]; +for (let i = 0; i < 1e3; ++i) { + let allOps = db.currentOp(); + for (let j in allOps.inprog) { + let op = allOps.inprog[j]; if (op.ns == 'local.oplog.rs' && op.op == 'getmore') { db.killOp(op.opid); } @@ -46,20 +46,20 @@ assert.eq(numDocs, db.test.find().itcount()); // Return true when the correct number of documents are present on the secondary. Otherwise print // which documents are missing and return false. function allReplicated() { - count = secondary.getDB('test').test.find().itcount(); + let count = secondary.getDB('test').test.find().itcount(); if (count == numDocs) { // Return true if the count is as expected. return true; } // Identify and print the missing a-values. - foundSet = {}; - c = secondary.getDB('test').test.find(); + let foundSet = {}; + let c = secondary.getDB('test').test.find(); while (c.hasNext()) { foundSet['' + c.next().a] = true; } - missing = []; - for (i = 0; i < numDocs; ++i) { + let missing = []; + for (let i = 0; i < numDocs; ++i) { if (!(('' + i) in foundSet)) { missing.push(i); } diff --git a/jstests/replsets/replsettest_checks_wait_for_secondaries.js b/jstests/replsets/replsettest_checks_wait_for_secondaries.js index 321a6670f9ad1..f5eebbc6abbe0 100644 --- a/jstests/replsets/replsettest_checks_wait_for_secondaries.js +++ b/jstests/replsets/replsettest_checks_wait_for_secondaries.js @@ -41,4 +41,4 @@ assert.commandWorked(secondary.adminCommand( // stopSet() will call checkReplicatedDBHashes rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/replsettest_control_1_node.js b/jstests/replsets/replsettest_control_1_node.js index a9930deb6a2a3..72e5ee04fbc13 100644 --- a/jstests/replsets/replsettest_control_1_node.js +++ b/jstests/replsets/replsettest_control_1_node.js @@ -10,4 +10,4 @@ const replTest = new ReplSetTest({name: 'replsettest_control_1_node', nodes: 1}) replTest.startSet(); replTest.initiate(); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/replsettest_remove_then_stopset.js b/jstests/replsets/replsettest_remove_then_stopset.js index bca9aeccb9dfd..5351f98efe147 100644 --- a/jstests/replsets/replsettest_remove_then_stopset.js +++ b/jstests/replsets/replsettest_remove_then_stopset.js @@ -9,4 +9,4 @@ const replTest = new ReplSetTest({nodes: 1}); replTest.startSet(); replTest.remove(0); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/restart_index_build_if_resume_interrupted_by_rollback.js b/jstests/replsets/restart_index_build_if_resume_interrupted_by_rollback.js index 52296b9fffde5..83fcd52641360 100644 --- a/jstests/replsets/restart_index_build_if_resume_interrupted_by_rollback.js +++ b/jstests/replsets/restart_index_build_if_resume_interrupted_by_rollback.js @@ -20,4 +20,4 @@ RollbackResumableIndexBuildTest.runResumeInterruptedByRollback( rollbackTest, dbName, [{a: 1}, {a: 2}], {a: 1}, [{a: 3}], [{a: 4}]); rollbackTest.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/resync_majority_member.js b/jstests/replsets/resync_majority_member.js index 90058b892302b..93fdeedf9b94c 100644 --- a/jstests/replsets/resync_majority_member.js +++ b/jstests/replsets/resync_majority_member.js @@ -127,4 +127,4 @@ assert.eq(0, resyncNode.getDB(dbName)[collName].find(disappearingDoc).itcount()) // We expect node 1 to have crashed. rst.stop(0, undefined, {allowedExitCode: MongoRunner.EXIT_ABORT}); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/rollback_capped_deletions.js b/jstests/replsets/rollback_capped_deletions.js index 86928d2601f91..233384ff8e542 100644 --- a/jstests/replsets/rollback_capped_deletions.js +++ b/jstests/replsets/rollback_capped_deletions.js @@ -45,4 +45,4 @@ try { } rollbackTest.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/rollback_creates_rollback_directory.js b/jstests/replsets/rollback_creates_rollback_directory.js index 961a7300fe313..b0cccd81ffd8e 100644 --- a/jstests/replsets/rollback_creates_rollback_directory.js +++ b/jstests/replsets/rollback_creates_rollback_directory.js @@ -42,7 +42,7 @@ function runRollbackDirectoryTest(shouldCreateRollbackFiles) { // Make sure we have an arbiter assert.soon(function() { - res = conns[2].getDB("admin").runCommand({replSetGetStatus: 1}); + let res = conns[2].getDB("admin").runCommand({replSetGetStatus: 1}); return res.myState == 7; }, "Arbiter failed to initialize."); diff --git a/jstests/replsets/rollback_drop_database.js b/jstests/replsets/rollback_drop_database.js index 01d3e3c06598a..40792d9712521 100644 --- a/jstests/replsets/rollback_drop_database.js +++ b/jstests/replsets/rollback_drop_database.js @@ -3,9 +3,20 @@ * a collection, then executes a 'dropDatabase' command, partitioning the primary such that the * final 'dropDatabase' oplog entry is not replicated. The test then forces rollback of that entry. * - * The 'dropDatabase' command drops each collection, ensures that the last drop is committed, - * and only then logs a 'dropDatabase' oplog entry. This is therefore the only entry that could - * get rolled back. + * The 'dropDatabase' command drops each collection, ensures that the last drop is majority + * committed, and only then logs a 'dropDatabase' oplog entry. This is therefore the only entry that + * could get rolled back. + * + * Additionally test handling of an incompletely dropped database across a replica set. If a primary + * writes a dropDatabase oplog entry and clears in-memory database state, but subsequently rolls + * back the dropDatabase oplog entry, then the replica set secondaries will still have the in-memory + * state. If the original primary is re-elected, it will allow a subsequent createCollection with a + * database name conflicting with the original database. The secondaries should close the original + * empty database and open the new database on receipt of the createCollection. + * + * @tags: [ + * multiversion_incompatible, + * ] */ (function() { @@ -13,15 +24,19 @@ load("jstests/replsets/libs/rollback_test.js"); const testName = "rollback_drop_database"; -const oldDbName = "oldDatabase"; -const newDbName = "newDatabase"; + +// MongoDB does not allow multiple databases to exist that differ only in letter case. These +// database names will differ only in letter case, to test that secondaries will safely close +// conflicting empty databases. +const dbName = "olddatabase"; +const conflictingDbName = "OLDDATABASE"; let rollbackTest = new RollbackTest(testName); let rollbackNode = rollbackTest.getPrimary(); let syncSourceNode = rollbackTest.getSecondary(); // Perform initial insert (common operation). -assert.commandWorked(rollbackNode.getDB(oldDbName)["beforeRollback"].insert({"num": 1})); +assert.commandWorked(rollbackNode.getDB(dbName)["beforeRollback"].insert({"num": 1})); // Set a failpoint on the original primary, so that it blocks after it commits the last // 'dropCollection' entry but before the 'dropDatabase' entry is logged. @@ -30,7 +45,7 @@ assert.commandWorked(rollbackNode.adminCommand( // Issue a 'dropDatabase' command. let dropDatabaseFn = function() { - const rollbackDb = "oldDatabase"; + const rollbackDb = "olddatabase"; var primary = db.getMongo(); jsTestLog("Dropping database " + rollbackDb + " on primary node " + primary.host); var dbToDrop = db.getSiblingDB(rollbackDb); @@ -45,28 +60,48 @@ checkLog.contains(rollbackNode, // Wait for the secondary to finish dropping the collection (the last replicated entry). // We use the default 10-minute timeout for this. assert.soon(function() { - let res = syncSourceNode.getDB(oldDbName).getCollectionNames().includes("beforeRollback"); + let res = syncSourceNode.getDB(dbName).getCollectionNames().includes("beforeRollback"); return !res; }, "Sync source did not finish dropping collection beforeRollback", 10 * 60 * 1000); rollbackTest.transitionToRollbackOperations(); +// Check that the dropDatabase oplog entry has not been written. +assert(!checkLog.checkContainsOnceJson(rollbackNode, 7360105)); + // Allow the final 'dropDatabase' entry to be logged on the now isolated primary. // This is the rollback node's divergent oplog entry. assert.commandWorked(rollbackNode.adminCommand( {configureFailPoint: "dropDatabaseHangBeforeInMemoryDrop", mode: "off"})); waitForDropDatabaseToFinish(); -assert.eq(false, rollbackNode.getDB(oldDbName).getCollectionNames().includes("beforeRollback")); -jsTestLog("Database " + oldDbName + " successfully dropped on primary node " + rollbackNode.host); + +// Check that the dropDatabase oplog entry has now been written. +assert(checkLog.checkContainsOnceJson(rollbackNode, 7360105)); + +assert.eq(false, rollbackNode.getDB(dbName).getCollectionNames().includes("beforeRollback")); +jsTestLog("Database " + dbName + " successfully dropped on primary node " + rollbackNode.host); rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); // Perform an insert on another database while interfacing with the new primary. // This is the sync source's divergent oplog entry. -assert.commandWorked(syncSourceNode.getDB(newDbName)["afterRollback"].insert({"num": 2})); +assert.commandWorked(syncSourceNode.getDB("someDB")["afterRollback"].insert({"num": 2})); rollbackTest.transitionToSyncSourceOperationsDuringRollback(); rollbackTest.transitionToSteadyStateOperations(); +jsTestLog("Transitioned to steady state, going to run test operations"); + +// Check that replication rollback occurred on the old primary. +assert(checkLog.checkContainsOnceJson(rollbackNode, 21612)); + +// The syncSourceNode never received the dropDatabase oplog entry from the rollbackNode. Therefore, +// syncSourceNode never cleared the in-memory database state for that database. Check that +// syncSourceNode will safely clear the original empty database when applying a createCollection +// with a new database name that conflicts with the original. +rollbackTest.stepUpNode(rollbackNode); +// Using only w:2 because the third node is frozen / not replicating. +assert.commandWorked(rollbackNode.getDB(conflictingDbName)["afterRollback"].insert( + {"num": 2}, {writeConcern: {w: 2}})); rollbackTest.stop(); })(); diff --git a/jstests/replsets/rollback_dup_ids_clean_shutdown_during_rollback.js b/jstests/replsets/rollback_dup_ids_clean_shutdown_during_rollback.js index db623b1d67dff..361ff74cfa301 100644 --- a/jstests/replsets/rollback_dup_ids_clean_shutdown_during_rollback.js +++ b/jstests/replsets/rollback_dup_ids_clean_shutdown_during_rollback.js @@ -52,4 +52,4 @@ rollbackTest.transitionToSteadyStateOperations(); // Check the replica set. rollbackTest.stop(); -}()); \ No newline at end of file +}()); diff --git a/jstests/replsets/rollback_large_batched_multi_deletes.js b/jstests/replsets/rollback_large_batched_multi_deletes.js index d5f0f840a3c5a..36f5736773b7a 100644 --- a/jstests/replsets/rollback_large_batched_multi_deletes.js +++ b/jstests/replsets/rollback_large_batched_multi_deletes.js @@ -1,15 +1,12 @@ /** * Tests that a multi-oplog batched multi delete operation can be rolled back. * @tags: [ - * requires_fcv_70, + * requires_fcv_71, * requires_replication, * ] */ -(function() { -'use strict'; - load('jstests/replsets/libs/rollback_test.js'); -load("jstests/libs/feature_flag_util.js"); // for FeatureFlagUtil.isEnabled +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; // Operations that will be present on both nodes, before the common point. const dbName = 'test'; @@ -65,11 +62,10 @@ const nodeOptions = { }; const rollbackTest = new RollbackTest(jsTestName(), /*replSet=*/ undefined, nodeOptions); -if (!FeatureFlagUtil.isEnabled(rollbackTest.getPrimary(), - "InternalWritesAreReplicatedTransactionally")) { +if (!FeatureFlagUtil.isEnabled(rollbackTest.getPrimary(), "LargeBatchedOperations")) { jsTestLog('Skipping test because required feature flag is not enabled.'); rollbackTest.stop(); - return; + quit(); } CommonOps(rollbackTest.getPrimary()); @@ -88,5 +84,4 @@ const primary = rollbackTest.getPrimary(); const coll = primary.getCollection(collName); assert.eq(docIds.length, coll.countDocuments({})); -rollbackTest.stop(); -})(); +rollbackTest.stop(); \ No newline at end of file diff --git a/jstests/replsets/rollback_resumable_index_build_mixed_phases.js b/jstests/replsets/rollback_resumable_index_build_mixed_phases.js index c5b9517620ba5..bfe18551cfe20 100644 --- a/jstests/replsets/rollback_resumable_index_build_mixed_phases.js +++ b/jstests/replsets/rollback_resumable_index_build_mixed_phases.js @@ -137,4 +137,4 @@ runRollbackTo( [{skippedPhaseLogID: 20391}, {skippedPhaseLogID: 20392}]); rollbackTest.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/rollback_set_fcv.js b/jstests/replsets/rollback_set_fcv.js index 6db3b57c30d16..78eac5039f1c1 100644 --- a/jstests/replsets/rollback_set_fcv.js +++ b/jstests/replsets/rollback_set_fcv.js @@ -9,14 +9,10 @@ * @tags: [multiversion_incompatible] */ -(function() { -"use strict"; - load("jstests/replsets/libs/rollback_test.js"); load('jstests/libs/parallel_shell_helpers.js'); load("jstests/libs/fail_point_util.js"); load("jstests/replsets/rslib.js"); -load("jstests/libs/feature_flag_util.js"); function setFCV(fcv) { assert.commandFailedWithCode(db.adminCommand({setFeatureCompatibilityVersion: fcv}), @@ -107,11 +103,6 @@ function rollbackFCVFromDowngradedOrUpgraded(fromFCV, toFCV, failPoint) { let primaryAdminDB = primary.getDB('admin'); let secondaryAdminDB = secondary.getDB('admin'); - const isDowngradingToUpgradingFlagOn = FeatureFlagUtil.isEnabled(primaryAdminDB, - "DowngradingToUpgrading", - null /* user not specified */, - true /* ignores FCV */); - // Complete the upgrade/downgrade to ensure we are not in the upgrading/downgrading state. assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: toFCV})); // Wait for the majority commit point to be updated on the secondary, because checkFCV calls @@ -137,7 +128,7 @@ function rollbackFCVFromDowngradedOrUpgraded(fromFCV, toFCV, failPoint) { }, "Failed waiting for server to unset the targetVersion or to set the FCV to " + fromFCV); rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); // The secondary should never have received the update to unset the targetVersion. - if (fromFCV == lastLTSFCV && isDowngradingToUpgradingFlagOn) { + if (fromFCV == lastLTSFCV) { // When downgrading, the secondary should still be in isCleaningServerMetadata. checkFCV(secondaryAdminDB, lastLTSFCV, fromFCV, true /* isCleaningServerMetadata */); } else { @@ -159,7 +150,7 @@ function rollbackFCVFromDowngradedOrUpgraded(fromFCV, toFCV, failPoint) { assert.eq(topologyVersionBeforeRollback.counter + topologyVersionDiff, topologyVersionAfterRollback.counter); // The primary should have rolled back their FCV to contain the targetVersion. - if (fromFCV == lastLTSFCV && isDowngradingToUpgradingFlagOn) { + if (fromFCV == lastLTSFCV) { // Rolling back from downgraded to isCleaningServerMetadata state. checkFCV(primaryAdminDB, lastLTSFCV, fromFCV, true /* isCleaningServerMetadata */); checkFCV(secondaryAdminDB, lastLTSFCV, fromFCV, true /* isCleaningServerMetadata */); @@ -175,7 +166,7 @@ function rollbackFCVFromDowngradedOrUpgraded(fromFCV, toFCV, failPoint) { // server metadata. // Ensure that the in-memory and on-disk FCV are consistent by checking that this rule is // upheld after rollback. - if (fromFCV === lastLTSFCV && toFCV === latestFCV && isDowngradingToUpgradingFlagOn) { + if (fromFCV === lastLTSFCV && toFCV === latestFCV) { assert.commandFailedWithCode( newPrimary.adminCommand({setFeatureCompatibilityVersion: toFCV}), 7428200); } else { @@ -198,12 +189,6 @@ function rollbackFCVFromUpgradingToDowngrading() { // Ensure the cluster starts at the correct FCV. assert.commandWorked(rollbackNode.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - if (!FeatureFlagUtil.isEnabled(rollbackNodeAdminDB, "DowngradingToUpgrading")) { - jsTestLog( - "Skipping rollbackFCVFromUpgradingToDowngrading as featureFlagDowngradingToUpgrading is not enabled"); - return; - } - fcvDoc = rollbackNodeAdminDB.system.version.findOne({_id: 'featureCompatibilityVersion'}); jsTestLog(`rollbackNode's version at start: ${tojson(fcvDoc)}`); checkFCV(rollbackNodeAdminDB, latestFCV); @@ -305,15 +290,6 @@ function rollbackFCVFromIsCleaningServerMetadataToDowngrading() { let primaryAdminDB = primary.getDB('admin'); let secondaryAdminDB = secondary.getDB('admin'); - if (!FeatureFlagUtil.isEnabled(primaryAdminDB, - "DowngradingToUpgrading", - null /* user not specified */, - true /* ignores FCV */)) { - jsTestLog( - "Skipping rollbackFCVFromIsCleaningServerMetadataToDowngrading test because isDowngradingToUpgrading is not enabled"); - return; - } - // Complete the upgrade/downgrade to ensure we are not in the upgrading/downgrading state. assert.commandWorked(primary.adminCommand({setFeatureCompatibilityVersion: latestFCV})); // Wait for the majority commit point to be updated on the secondary, because checkFCV calls @@ -386,5 +362,4 @@ rollbackFCVFromUpgradingToDowngrading(); // Tests roll back from isCleaningServerMetadata to downgrading. rollbackFCVFromIsCleaningServerMetadataToDowngrading(); -rollbackTest.stop(); -}()); +rollbackTest.stop(); \ No newline at end of file diff --git a/jstests/replsets/rollback_test_control.js b/jstests/replsets/rollback_test_control.js index bf70a7a84a0e5..082b05da52894 100644 --- a/jstests/replsets/rollback_test_control.js +++ b/jstests/replsets/rollback_test_control.js @@ -13,4 +13,4 @@ rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); rollbackTest.transitionToSyncSourceOperationsDuringRollback(); rollbackTest.transitionToSteadyStateOperations(); rollbackTest.stop(); -}()); \ No newline at end of file +}()); diff --git a/jstests/replsets/rollback_time_limit_param.js b/jstests/replsets/rollback_time_limit_param.js index 345e38f5e898f..de1ffbf494d64 100644 --- a/jstests/replsets/rollback_time_limit_param.js +++ b/jstests/replsets/rollback_time_limit_param.js @@ -50,4 +50,4 @@ assert.commandFailedWithCode(primary.adminCommand({setParameter: 1, rollbackTime ErrorCodes.BadValue); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/rollback_unclean_shutdowns_parameter_obeyed.js b/jstests/replsets/rollback_unclean_shutdowns_parameter_obeyed.js index 389a7c4933f26..b60c691613187 100644 --- a/jstests/replsets/rollback_unclean_shutdowns_parameter_obeyed.js +++ b/jstests/replsets/rollback_unclean_shutdowns_parameter_obeyed.js @@ -36,4 +36,4 @@ rollbackTest.transitionToSteadyStateOperations(); assert.eq(rawMongoProgramOutput().search(/Detected unclean shutdown/), -1); rollbackTest.stop(); -}()); \ No newline at end of file +}()); diff --git a/jstests/replsets/rollback_with_coalesced_txn_table_updates_from_vectored_inserts.js b/jstests/replsets/rollback_with_coalesced_txn_table_updates_from_vectored_inserts.js index b56a40c51ab5c..08a62e50f6367 100644 --- a/jstests/replsets/rollback_with_coalesced_txn_table_updates_from_vectored_inserts.js +++ b/jstests/replsets/rollback_with_coalesced_txn_table_updates_from_vectored_inserts.js @@ -118,4 +118,4 @@ assert.commandWorked(primary.getCollection(ns).runCommand("insert", { })); rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/rslib.js b/jstests/replsets/rslib.js index 1c4bafa3060b1..f05588e311b6c 100644 --- a/jstests/replsets/rslib.js +++ b/jstests/replsets/rslib.js @@ -26,6 +26,7 @@ var disconnectSecondaries; var reconnectSecondaries; var createRstArgs; var createRst; +var waitAllNodesHaveConfig; (function() { "use strict"; @@ -129,13 +130,13 @@ reconnect = function(conn) { try { // Make this work with either dbs or connections. if (typeof (conn.getDB) == "function") { - db = conn.getDB('foo'); + db = conn.getDB('config'); } else { db = conn; } // Run a simple command to re-establish connection. - db.bar.stats(); + db.settings.stats(); // SERVER-4241: Shell connections don't re-authenticate on reconnect. if (jsTest.options().keyFile) { @@ -532,8 +533,8 @@ reInitiateWithoutThrowingOnAbortedMember = function(replSetTest) { try { replSetTest.reInitiate(); } catch (e) { - // reInitiate can throw because it tries to run an ismaster command on - // all secondaries, including the new one that may have already aborted + // reInitiate can throw because it tries to run a "hello" command on all secondaries, + // including the new one that may have already aborted const errMsg = tojson(e); if (isNetworkError(e)) { // Ignore these exceptions, which are indicative of an aborted node @@ -587,7 +588,7 @@ awaitRSClientHosts = function(conn, host, hostOk, rs, timeout) { // Check that *all* host properties are set correctly var propOk = true; for (var prop in hostOk) { - // Use special comparator for tags because isMaster can return the fields in + // Use special comparator for tags because hello can return the fields in // different order. The fields of the tags should be treated like a set of // strings and 2 tags should be considered the same if the set is equal. if (prop == 'tags') { @@ -883,4 +884,16 @@ createRst = function(rstArgs, retryOnRetryableErrors) { } } }; + +/** + * Wait until all the nodes in a replica set have the same config as the input config. + */ +waitAllNodesHaveConfig = function(replSet, config) { + replSet.nodes.forEach(function(node) { + assert.soon(function() { + const nodeConfig = replSet.getReplSetConfigFromNode(node.nodeId); + return isSameConfigContent(config, nodeConfig); + }); + }); +}; }()); diff --git a/jstests/replsets/secondary_as_sync_source.js b/jstests/replsets/secondary_as_sync_source.js index 60e15e55a71ac..328b48b5e6cd3 100644 --- a/jstests/replsets/secondary_as_sync_source.js +++ b/jstests/replsets/secondary_as_sync_source.js @@ -4,22 +4,22 @@ * sync operation. * * @tags: [ + * requires_fcv_71, * requires_replication, * ] */ -(function() { -'use strict'; - +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; load('jstests/noPassthrough/libs/index_build.js'); load("jstests/replsets/rslib.js"); const dbName = "test"; const collName = "coll"; +const timeseriesCollName = "tscoll"; -function addTestDocuments(db) { +function addTestDocuments(coll) { let size = 100; jsTest.log("Creating " + size + " test documents."); - var bulk = db.getCollection(collName).initializeUnorderedBulkOp(); + var bulk = coll.initializeUnorderedBulkOp(); for (var i = 0; i < size; ++i) { bulk.insert({i: i}); } @@ -49,20 +49,32 @@ let primaryDB = primary.getDB(dbName); let secondary = replSet.getSecondary(); let secondaryDB = secondary.getDB(dbName); -addTestDocuments(primaryDB); +const coll = primaryDB.getCollection(collName); +addTestDocuments(coll); + +// Create time-series collection with a single measurement. +// We need a non-empty collection to use two-phase index builds. +assert.commandWorked( + primaryDB.createCollection(timeseriesCollName, {timeseries: {timeField: 'time'}})); +const timeseriesColl = primaryDB.getCollection(timeseriesCollName); +assert.commandWorked(timeseriesColl.insert({time: ISODate(), x: 1})); // Used to wait for two-phase builds to complete. let awaitIndex; +let awaitIndexTimeseries; jsTest.log("Hanging index build on the primary node"); IndexBuildTest.pauseIndexBuilds(primary); jsTest.log("Beginning index build"); -const coll = primaryDB.getCollection(collName); awaitIndex = IndexBuildTest.startIndexBuild(primary, coll.getFullName(), {i: 1}); +awaitIndexTimeseries = + IndexBuildTest.startIndexBuild(primary, timeseriesColl.getFullName(), {x: 1}); jsTest.log("Waiting for index build to start on secondary"); -IndexBuildTest.waitForIndexBuildToStart(secondaryDB); +IndexBuildTest.waitForIndexBuildToStart(secondaryDB, collName, 'i_1'); +IndexBuildTest.waitForIndexBuildToStart( + secondaryDB, TimeseriesTest.getBucketsCollName(timeseriesCollName), 'x_1'); jsTest.log("Adding a new node to the replica set"); let newNode = replSet.add({ @@ -85,6 +97,7 @@ waitForState(newNode, ReplSetTest.State.SECONDARY); jsTest.log("Removing index build hang to allow it to finish"); IndexBuildTest.resumeIndexBuilds(primary); awaitIndex(); +awaitIndexTimeseries(); // Wait for the index builds to finish. replSet.awaitReplication(); @@ -99,5 +112,12 @@ printjson(secondaryDB.getCollection(collName).getIndexes()); assert.eq(newNodeDB.getCollection(collName).getIndexes().length, secondaryDB.getCollection(collName).getIndexes().length); +jsTest.log("New nodes indexes for time-series collection:"); +printjson(newNodeDB.getCollection(timeseriesCollName).getIndexes()); +jsTest.log("Secondary nodes indexes for time-series collection:"); +printjson(secondaryDB.getCollection(timeseriesCollName).getIndexes()); + +assert.eq(newNodeDB.getCollection(timeseriesCollName).getIndexes().length, + secondaryDB.getCollection(timeseriesCollName).getIndexes().length); + replSet.stopSet(); -})(); diff --git a/jstests/replsets/secondarydelaysecs_waits_for_writes.js b/jstests/replsets/secondarydelaysecs_waits_for_writes.js index 2b6a9cc8f3138..2265c23f609b7 100644 --- a/jstests/replsets/secondarydelaysecs_waits_for_writes.js +++ b/jstests/replsets/secondarydelaysecs_waits_for_writes.js @@ -12,7 +12,7 @@ // ] load("jstests/replsets/rslib.js"); -doTest = function(signal) { +let doTest = function(signal) { var name = "secondaryDelaySecs"; var host = getHostName(); @@ -67,7 +67,7 @@ doTest = function(signal) { /************* Part 2 *******************/ // how about if we add a new server? will it sync correctly? - conn = replTest.add(); + let conn = replTest.add(); config = primary.getSiblingDB("local").system.replset.findOne(); printjson(config); diff --git a/jstests/replsets/server_status_repl.js b/jstests/replsets/server_status_repl.js index 058fc14c5fa6a..eb04673d13851 100644 --- a/jstests/replsets/server_status_repl.js +++ b/jstests/replsets/server_status_repl.js @@ -14,4 +14,4 @@ assert.commandWorked(testDB.b.insert({}, {writeConcern: {w: 2}})); var ss = primary.getDB("test").serverStatus({repl: 1}); assert.neq(ss.repl.replicationProgress, null, tojson(ss.repl)); -rt.stopSet(); \ No newline at end of file +rt.stopSet(); diff --git a/jstests/replsets/server_status_repl_is_writable_primary.js b/jstests/replsets/server_status_repl_is_writable_primary.js index d33eaee96b441..092f7933721c1 100644 --- a/jstests/replsets/server_status_repl_is_writable_primary.js +++ b/jstests/replsets/server_status_repl_is_writable_primary.js @@ -13,4 +13,4 @@ assert.eq(serverStatusMetricsRepl.isWritablePrimary, true, "repl.isWritablePrima assert.eq( serverStatusMetricsRepl.hasOwnProperty('ismaster'), false, "repl.ismaster should be undefined"); replTest.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/sessions_collection_reaping.js b/jstests/replsets/sessions_collection_reaping.js index bd7d5c4c5547e..58ae758c06be6 100644 --- a/jstests/replsets/sessions_collection_reaping.js +++ b/jstests/replsets/sessions_collection_reaping.js @@ -17,12 +17,7 @@ let replTest = new ReplSetTest({ {/* secondary */ rsConfig: {priority: 0}}, {/* arbiter */ rsConfig: {arbiterOnly: true}} ], - nodeOptions: { - setParameter: { - TransactionRecordMinimumLifetimeMinutes: 0, - storeFindAndModifyImagesInSideCollection: true - } - } + nodeOptions: {setParameter: {TransactionRecordMinimumLifetimeMinutes: 0}} }); let nodes = replTest.startSet(); diff --git a/jstests/replsets/set_cluster_parameter_replset.js b/jstests/replsets/set_cluster_parameter_replset.js index 4339b708588fd..e2cd241b93a26 100644 --- a/jstests/replsets/set_cluster_parameter_replset.js +++ b/jstests/replsets/set_cluster_parameter_replset.js @@ -7,10 +7,11 @@ * requires_persistence, * ] */ -(function() { -'use strict'; - -load('jstests/libs/cluster_server_parameter_utils.js'); +import { + runGetClusterParameterNode, + runGetClusterParameterReplicaSet, + runSetClusterParameter, +} from "jstests/libs/cluster_server_parameter_utils.js"; // Checks that up-to-date cluster parameters are transferred over to newly-added replica set nodes // as part of initial sync. @@ -96,5 +97,4 @@ rst.initiate(); checkClusterParameterInitialSync(rst); checkClusterParameterRestart(rst); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/replsets/shard_merge_enabled.js b/jstests/replsets/shard_merge_enabled.js index df21224cca865..c37d0dc39de11 100644 --- a/jstests/replsets/shard_merge_enabled.js +++ b/jstests/replsets/shard_merge_enabled.js @@ -10,7 +10,7 @@ import { load("jstests/libs/fail_point_util.js"); function runTest(downgradeFCV) { - const rst = new ReplSetTest({nodes: 1}); + const rst = new ReplSetTest({nodes: 1, serverless: true}); rst.startSet(); rst.initiate(); diff --git a/jstests/replsets/shard_merge_invalid_options.js b/jstests/replsets/shard_merge_invalid_options.js index d461cbc16d141..aa3206fa804aa 100644 --- a/jstests/replsets/shard_merge_invalid_options.js +++ b/jstests/replsets/shard_merge_invalid_options.js @@ -8,12 +8,14 @@ import { isShardMergeEnabled, - makeMigrationCertificatesForTest + kProtocolShardMerge, + makeMigrationCertificatesForTest, } from "jstests/replsets/libs/tenant_migration_util.js"; + load("jstests/libs/fail_point_util.js"); function runTest(nodeOptions) { - const rst = new ReplSetTest({nodes: 1, nodeOptions: nodeOptions}); + const rst = new ReplSetTest({nodes: 1, serverless: true, nodeOptions: nodeOptions}); rst.startSet(); rst.initiate(); @@ -47,7 +49,7 @@ function runTest(nodeOptions) { assert.commandFailedWithCode( adminDB.runCommand({ donorStartMigration: 1, - protocol: "shard merge", + protocol: kProtocolShardMerge, migrationId: UUID(), recipientConnectionString: kDummyConnStr, readPreference: readPreference, @@ -63,7 +65,7 @@ function runTest(nodeOptions) { assert.commandFailedWithCode( adminDB.runCommand({ donorStartMigration: 1, - protocol: "shard merge", + protocol: kProtocolShardMerge, migrationId: UUID(), recipientConnectionString: kDummyConnStr, readPreference: readPreference, @@ -78,7 +80,7 @@ function runTest(nodeOptions) { assert.commandFailedWithCode( adminDB.runCommand({ donorStartMigration: 1, - protocol: "shard merge", + protocol: kProtocolShardMerge, migrationId: UUID(), recipientConnectionString: kDummyConnStr, readPreference: readPreference, @@ -96,7 +98,7 @@ function runTest(nodeOptions) { assert.commandFailedWithCode( adminDB.runCommand({ donorStartMigration: 1, - protocol: "shard merge", + protocol: kProtocolShardMerge, migrationId: UUID(), recipientConnectionString: kDummyConnStr, readPreference: readPreference, @@ -113,7 +115,7 @@ function runTest(nodeOptions) { assert.commandFailedWithCode( adminDB.runCommand({ donorStartMigration: 1, - protocol: "shard merge", + protocol: kProtocolShardMerge, migrationId: UUID(), recipientConnectionString: kDummyConnStr, readPreference: readPreference, @@ -129,7 +131,7 @@ function runTest(nodeOptions) { assert.commandFailedWithCode( adminDB.runCommand({ donorStartMigration: 1, - protocol: "shard merge", + protocol: kProtocolShardMerge, migrationId: UUID(), recipientConnectionString: kDummyConnStr, readPreference: readPreference, @@ -145,7 +147,7 @@ function runTest(nodeOptions) { assert.commandFailedWithCode( adminDB.runCommand({ recipientSyncData: 1, - protocol: "shard merge", + protocol: kProtocolShardMerge, migrationId: UUID(), tenantIds: [ObjectId()], donorConnectionString: kDummyConnStr, diff --git a/jstests/replsets/single_server_majority.js b/jstests/replsets/single_server_majority.js index 039d8ccebeb9b..fab4969d8e42b 100644 --- a/jstests/replsets/single_server_majority.js +++ b/jstests/replsets/single_server_majority.js @@ -5,9 +5,9 @@ var mongod = MongoRunner.runMongod({}); // get db and collection, then perform a trivial insert db = mongod.getDB("test"); -col = db.getCollection("single_server_majority"); +let col = db.getCollection("single_server_majority"); col.drop(); // see if we can get a majority write on this single server assert.commandWorked(col.save({a: "test"}, {writeConcern: {w: 'majority'}})); -MongoRunner.stopMongod(mongod); \ No newline at end of file +MongoRunner.stopMongod(mongod); diff --git a/jstests/replsets/split_horizon_hostname_startup.js b/jstests/replsets/split_horizon_hostname_startup.js index fa6b05fc58702..0c310cc92784c 100644 --- a/jstests/replsets/split_horizon_hostname_startup.js +++ b/jstests/replsets/split_horizon_hostname_startup.js @@ -70,4 +70,4 @@ assert.commandFailed(output); assert(output.errmsg.includes("Found split horizon configuration using IP")); MongoRunner.stopMongod(mongod); -}()); \ No newline at end of file +}()); diff --git a/jstests/replsets/split_horizon_hostname_validation.js b/jstests/replsets/split_horizon_hostname_validation.js index c9640d5d33b98..40f9144f538f8 100644 --- a/jstests/replsets/split_horizon_hostname_validation.js +++ b/jstests/replsets/split_horizon_hostname_validation.js @@ -51,4 +51,4 @@ testConfig("a", "12.34.56.78/20", true); // Make sure setting this parameter disables the check testConfig("a", "12.34.56.78", false, {setParameter: {disableSplitHorizonIPCheck: true}}); testConfig("a", "12.34.56.78/20", false, {setParameter: {disableSplitHorizonIPCheck: true}}); -}()); \ No newline at end of file +}()); diff --git a/jstests/replsets/split_horizon_startup_warning.js b/jstests/replsets/split_horizon_startup_warning.js index a051bbb0e926c..22a7f64948eb4 100644 --- a/jstests/replsets/split_horizon_startup_warning.js +++ b/jstests/replsets/split_horizon_startup_warning.js @@ -53,4 +53,4 @@ function testStartupWarnings(horizonName, options = {}) { // Check for startup warnings about IP addresses in SplitHorizon mappings testStartupWarnings("12.34.56.78"); testStartupWarnings("12.34.56.78/20"); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/stable_timestamp_can_advance_after_oplog_hole_abort.js b/jstests/replsets/stable_timestamp_can_advance_after_oplog_hole_abort.js index ad47828779394..af5567263f1c6 100644 --- a/jstests/replsets/stable_timestamp_can_advance_after_oplog_hole_abort.js +++ b/jstests/replsets/stable_timestamp_can_advance_after_oplog_hole_abort.js @@ -194,4 +194,4 @@ testInsert(); testUnpreparedTransactionCommit(); replTest.stopSet(); -}()); \ No newline at end of file +}()); diff --git a/jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js b/jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js index ec537f873fda9..32a295f4d68d5 100644 --- a/jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js +++ b/jstests/replsets/startup_without_fcv_document_succeeds_if_initial_sync_flag_set.js @@ -10,7 +10,7 @@ load("jstests/libs/fail_point_util.js"); -rst = new ReplSetTest({nodes: 1}); +let rst = new ReplSetTest({nodes: 1}); rst.startSet(); rst.initiate(); diff --git a/jstests/replsets/step_down_chaining_disabled.js b/jstests/replsets/step_down_chaining_disabled.js index b99f18ce4abe6..4591668434066 100644 --- a/jstests/replsets/step_down_chaining_disabled.js +++ b/jstests/replsets/step_down_chaining_disabled.js @@ -30,4 +30,4 @@ assert.commandWorked(newPrimary.adminCommand({setParameter: 1, writePeriodicNoop replSet.awaitSyncSource(secondary, newPrimary); replSet.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/step_down_on_secondary.js b/jstests/replsets/step_down_on_secondary.js index c74263f7e34c7..10bc7f4cc23cf 100644 --- a/jstests/replsets/step_down_on_secondary.js +++ b/jstests/replsets/step_down_on_secondary.js @@ -95,7 +95,7 @@ const wTPrintPrepareConflictLogFailPoint = configureFailPoint(primary, "WTPrintP const joinReadThread = startParallelShell(() => { db.getMongo().setSecondaryOk(); - oldPrimaryDB = db.getSiblingDB(TestData.dbName); + let oldPrimaryDB = db.getSiblingDB(TestData.dbName); assert.commandFailedWithCode(oldPrimaryDB.runCommand({ find: TestData.collName, diff --git a/jstests/replsets/stepdown_during_set_fcv.js b/jstests/replsets/stepdown_during_set_fcv.js index b75543dac0c5f..c2338f380a6e4 100644 --- a/jstests/replsets/stepdown_during_set_fcv.js +++ b/jstests/replsets/stepdown_during_set_fcv.js @@ -66,4 +66,4 @@ if (lastLTSFCV !== lastContinuousFCV) { jsTestLog("Running test against lastContinuousFCV"); runTest(lastContinuousFCV); } -})(); \ No newline at end of file +})(); diff --git a/jstests/replsets/sync_passive.js b/jstests/replsets/sync_passive.js index 1c9f385e9188a..a3bd34787c5bb 100644 --- a/jstests/replsets/sync_passive.js +++ b/jstests/replsets/sync_passive.js @@ -81,4 +81,4 @@ replTest.awaitReplication(null, null, liveSecondaries); print("bring #1 back up, make sure everything's okay"); replTest.restart(1); -replTest.stopSet(); \ No newline at end of file +replTest.stopSet(); diff --git a/jstests/replsets/sync_source_changes.js b/jstests/replsets/sync_source_changes.js index 38e55dac5e7ed..02bce0ba3b961 100644 --- a/jstests/replsets/sync_source_changes.js +++ b/jstests/replsets/sync_source_changes.js @@ -56,6 +56,9 @@ rst.waitForState(newNode, ReplSetTest.State.SECONDARY); rst.awaitReplication(); rst.awaitSecondaryNodes(); +// Wait for the new node to no longer be newlyAdded, so that it becomes a voting node. +rst.waitForAllNewlyAddedRemovals(); + // Assure that node 2 will set node 0 as its sync source, since it is the best option. assertSyncSourceChangesTo(rst, newNode, rst.nodes[0]); diff --git a/jstests/replsets/temp_namespace.js b/jstests/replsets/temp_namespace.js index 7cb78c2486055..6c89f250ee6c8 100644 --- a/jstests/replsets/temp_namespace.js +++ b/jstests/replsets/temp_namespace.js @@ -70,7 +70,7 @@ replTest.restart(replTest.getNodeId(secondary), {}, /*wait=*/ true); // wait for the secondary to achieve secondary status assert.soon(function() { try { - res = secondary.getDB("admin").runCommand({replSetGetStatus: 1}); + let res = secondary.getDB("admin").runCommand({replSetGetStatus: 1}); return res.myState == 2; } catch (e) { return false; diff --git a/jstests/replsets/tenant_migration_abort_forget_retry.js b/jstests/replsets/tenant_migration_abort_forget_retry.js index 53f8d16d09765..4f3eb5e6381ff 100644 --- a/jstests/replsets/tenant_migration_abort_forget_retry.js +++ b/jstests/replsets/tenant_migration_abort_forget_retry.js @@ -14,10 +14,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - runTenantMigrationCommand, - tryAbortMigrationAsync -} from "jstests/replsets/libs/tenant_migration_util.js"; +import {tryAbortMigrationAsync} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/parallelTester.js"); @@ -81,7 +78,7 @@ const tenantMigrationTest = const tryAbortThread = new Thread(tryAbortMigrationAsync, {migrationIdString: migrationId1, tenantId: tenantId}, donorRstArgs, - runTenantMigrationCommand); + true /* retryOnRetryableErrors */); tryAbortThread.start(); // Wait for donorAbortMigration command to start. diff --git a/jstests/replsets/tenant_migration_aborted_buildindex.js b/jstests/replsets/tenant_migration_aborted_buildindex.js index fd6c9baf57ceb..f75056b267dc6 100644 --- a/jstests/replsets/tenant_migration_aborted_buildindex.js +++ b/jstests/replsets/tenant_migration_aborted_buildindex.js @@ -11,9 +11,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - runMigrationAsync, -} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, runMigrationAsync} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/parallelTester.js"); @@ -23,7 +21,7 @@ load("jstests/replsets/rslib.js"); // 'createRstArgs' const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const kTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); +const kDbName = makeTenantDB(kTenantId, "testDB"); const kEmptyCollName = "testEmptyColl"; const kNonEmptyCollName = "testNonEmptyColl"; const kNewCollName1 = "testNewColl1"; diff --git a/jstests/replsets/tenant_migration_advance_stable_ts_after_clone.js b/jstests/replsets/tenant_migration_advance_stable_ts_after_clone.js index 91cfc8d80e41b..1380063064b6a 100644 --- a/jstests/replsets/tenant_migration_advance_stable_ts_after_clone.js +++ b/jstests/replsets/tenant_migration_advance_stable_ts_after_clone.js @@ -13,7 +13,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {isShardMergeEnabled} from "jstests/replsets/libs/tenant_migration_util.js"; +import {isShardMergeEnabled, makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); // For extractUUIDFromObject(). @@ -34,7 +34,7 @@ tmt.insertDonorDB(`${tenantId}_db`, collName); const donorPrimary = tmt.getDonorPrimary(); const recipientPrimary = tmt.getRecipientPrimary(); -const kRelatedDbNameDonor = tmt.tenantDB(tenantId, "donorDb"); +const kRelatedDbNameDonor = makeTenantDB(tenantId, "donorDb"); // Note: including this explicit early return here due to the fact that multiversion // suites will execute this test without featureFlagShardMerge enabled (despite the @@ -83,7 +83,7 @@ const hangBeforeAdvanceStableTsFp = configureFailPoint(recipientPrimary, "fpBeforeAdvancingStableTimestamp", {action: "hang"}); // Start the migration. -assert.commandWorked(tmt.startMigration(migrationOpts, {enableDonorStartMigrationFsync: true})); +assert.commandWorked(tmt.startMigration(migrationOpts)); // The recipient's stable timestamp should be less than the timestamp it receives from the donor to // use as the startApplyingDonorOpTime, so the recipient should advance its stable timestamp. Wait diff --git a/jstests/replsets/tenant_migration_buildindex.js b/jstests/replsets/tenant_migration_buildindex.js index 519debee1e1ee..e5ad0c8b6d912 100644 --- a/jstests/replsets/tenant_migration_buildindex.js +++ b/jstests/replsets/tenant_migration_buildindex.js @@ -15,6 +15,7 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import { isShardMergeEnabled, + makeTenantDB, runMigrationAsync } from "jstests/replsets/libs/tenant_migration_util.js"; @@ -27,8 +28,8 @@ const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const kTenantId = ObjectId().str; const kUnrelatedTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); -const kUnrelatedDbName = tenantMigrationTest.tenantDB(kUnrelatedTenantId, "testDB"); +const kDbName = makeTenantDB(kTenantId, "testDB"); +const kUnrelatedDbName = makeTenantDB(kUnrelatedTenantId, "testDB"); const kEmptyCollName = "testEmptyColl"; const kNonEmptyCollName = "testNonEmptyColl"; const kNewCollName1 = "testNewColl1"; diff --git a/jstests/replsets/tenant_migration_buildindex_shard_merge.js b/jstests/replsets/tenant_migration_buildindex_shard_merge.js index b7ec757f8b360..db46fcdad00c6 100644 --- a/jstests/replsets/tenant_migration_buildindex_shard_merge.js +++ b/jstests/replsets/tenant_migration_buildindex_shard_merge.js @@ -16,6 +16,7 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import { isShardMergeEnabled, + makeTenantDB, runMigrationAsync } from "jstests/replsets/libs/tenant_migration_util.js"; @@ -42,8 +43,8 @@ if (!isShardMergeEnabled(donorPrimary.getDB("admin"))) { const kTenant1Id = ObjectId().str; const kTenant2Id = ObjectId().str; -const kTenant1DbName = tenantMigrationTest.tenantDB(kTenant1Id, "testDB"); -const kTenant2DbName = tenantMigrationTest.tenantDB(kTenant2Id, "testDB"); +const kTenant1DbName = makeTenantDB(kTenant1Id, "testDB"); +const kTenant2DbName = makeTenantDB(kTenant2Id, "testDB"); const kEmptyCollName = "testEmptyColl"; const kNonEmptyCollName = "testNonEmptyColl"; const kNewCollName1 = "testNewColl1"; diff --git a/jstests/replsets/tenant_migration_causal_consistency_commit_optime_before_last_cloning_optime.js b/jstests/replsets/tenant_migration_causal_consistency_commit_optime_before_last_cloning_optime.js index 4a3796b85f04c..e16e76e2a002b 100644 --- a/jstests/replsets/tenant_migration_causal_consistency_commit_optime_before_last_cloning_optime.js +++ b/jstests/replsets/tenant_migration_causal_consistency_commit_optime_before_last_cloning_optime.js @@ -2,9 +2,6 @@ * Verify that causal consistency is respected if a tenant migration commits with an earlier optime * timestamp than the latest optime associated with cloning on the recipient. * - * TODO (SERVER-61231): This test currently relies on a TenantCollectionCloner failpoint, which is - * not used by shard merge, but the behavior we are testing here is likely still relevant. Adapt - * for shard merge. * * @tags: [ * incompatible_with_macos, @@ -69,11 +66,11 @@ function assertCanFindWithReadConcern(conn, dbName, collName, expectedDoc, readC "insert", {insert: collName, documents: [{_id: 0, x: 0}]})); assert(insertRes.operationTime, tojson(insertRes)); - // Start a migration and pause the recipient before it copies documents from the donor. - const hangAfterCreateCollectionFp = configureFailPoint( - tmt.getRecipientRst().getPrimary(), "tenantCollectionClonerHangAfterCreateCollection"); + let hangFp = configureFailPoint(tmt.getRecipientPrimary(), + "fpAfterPersistingTenantMigrationRecipientInstanceStateDoc", + {action: "hang"}); assert.commandWorked(tmt.startMigration(migrationOpts)); - hangAfterCreateCollectionFp.wait(); + hangFp.wait(); // Do writes on the recipient to advance its cluster time past the donor's. let bulk = tmt.getRecipientPrimary().getDB("unrelatedDB").bar.initializeUnorderedBulkOp(); @@ -84,7 +81,7 @@ function assertCanFindWithReadConcern(conn, dbName, collName, expectedDoc, readC // Allow the migration to complete. The cloned op should be written with a later opTime on the // recipient than the migration commits with on the donor. - hangAfterCreateCollectionFp.off(); + hangFp.off(); TenantMigrationTest.assertCommitted(tmt.waitForMigrationToComplete(migrationOpts)); // Local reads should always see all the tenant's data, with or without afterClusterTime. @@ -132,13 +129,12 @@ function assertCanFindWithReadConcern(conn, dbName, collName, expectedDoc, readC const laggedSecondary = tmt.getRecipientRst().getSecondaries()[0]; const normalSecondary = tmt.getRecipientRst().getSecondaries()[1]; - // Start a migration and pause the recipient before it copies documents from the donor. Disable - // snapshotting after waiting for the last op to become committed, so a last committed snapshot - // exists but does not contain any documents from the donor. - const hangAfterCreateCollectionFp = configureFailPoint( - tmt.getRecipientRst().getPrimary(), "tenantCollectionClonerHangAfterCreateCollection"); + let hangFp = configureFailPoint(tmt.getRecipientPrimary(), + "fpAfterPersistingTenantMigrationRecipientInstanceStateDoc", + {action: "hang"}); + assert.commandWorked(tmt.startMigration(migrationOpts)); - hangAfterCreateCollectionFp.wait(); + hangFp.wait(); tmt.getRecipientRst().awaitLastOpCommitted(); const snapshotFp = configureFailPoint(laggedSecondary, "disableSnapshotting"); @@ -153,7 +149,7 @@ function assertCanFindWithReadConcern(conn, dbName, collName, expectedDoc, readC // Allow the migration to complete. The cloned op should commit with a later opTime on the // recipient than the migration commits with on the donor. - hangAfterCreateCollectionFp.off(); + hangFp.off(); TenantMigrationTest.assertCommitted(tmt.waitForMigrationToComplete(migrationOpts)); // Verify majority reads cannot be served on the lagged recipient secondary with or without diff --git a/jstests/replsets/tenant_migration_cloner_stats.js b/jstests/replsets/tenant_migration_cloner_stats.js index 19425a9df2038..0fdddd56797a3 100644 --- a/jstests/replsets/tenant_migration_cloner_stats.js +++ b/jstests/replsets/tenant_migration_cloner_stats.js @@ -16,6 +16,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/uuid_util.js"); // For extractUUIDFromObject(). load("jstests/libs/fail_point_util.js"); // For configureFailPoint(). @@ -34,7 +35,7 @@ const migrationOpts = { readPreference: kReadPreference }; -const dbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); +const dbName = makeTenantDB(kTenantId, "testDB"); const collName = "testColl"; const dbName1 = dbName + '_db_1'; diff --git a/jstests/replsets/tenant_migration_cloner_stats_with_failover.js b/jstests/replsets/tenant_migration_cloner_stats_with_failover.js deleted file mode 100644 index 77c98dea61322..0000000000000 --- a/jstests/replsets/tenant_migration_cloner_stats_with_failover.js +++ /dev/null @@ -1,146 +0,0 @@ -/** - * Tests tenant migration cloner stats such as 'approxTotalDataSize', 'approxTotalBytesCopied', - * 'databasesClonedBeforeFailover' across multiple databases and collections with failovers. - * - * This test does the following: - * 1. Insert two databases on the donor. The first database consists of one collection, the second - * consists of two collections. - * 2. Wait for the primary (referred to as the original primary) to clone one batch from the second - * database's second collection. - * 3. Step up the new primary. Ensure that the stats such as 'databasesClonedBeforeFailover' tally. - * 4. Allow the tenant migration to complete and commit. Ensure that stats are sensible. - * - * @tags: [ - * incompatible_with_macos, - * incompatible_with_shard_merge, - * incompatible_with_windows_tls, - * requires_majority_read_concern, - * requires_persistence, - * serverless, - * incompatible_with_shard_merge, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -load("jstests/libs/uuid_util.js"); // For extractUUIDFromObject(). -load("jstests/libs/fail_point_util.js"); // For configureFailPoint(). - -// Limit the batch size to test the stat in between batches. -const tenantMigrationTest = new TenantMigrationTest( - {name: jsTestName(), sharedOptions: {setParameter: {collectionClonerBatchSize: 10}}}); - -const kMigrationId = UUID(); -const kTenantId = ObjectId().str; -const kReadPreference = { - mode: "primary" -}; -const migrationOpts = { - migrationIdString: extractUUIDFromObject(kMigrationId), - tenantId: kTenantId, - readPreference: kReadPreference -}; - -const dbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); -const collName = "coll"; -const dbName1 = dbName + '_db_1'; -const dbName2 = dbName + '_db_2'; -const db2Coll1 = collName + "_db_2_1"; -const db2Coll2 = collName + "_db_2_2"; - -// Add a large amount of data to the donor. -jsTestLog("Adding data to donor."); -const dataForEachCollection = [...Array(100).keys()].map((i) => ({a: i, b: 'metanoia'})); -tenantMigrationTest.insertDonorDB(dbName1, collName + "_1", dataForEachCollection); -tenantMigrationTest.insertDonorDB(dbName2, db2Coll1, dataForEachCollection); -tenantMigrationTest.insertDonorDB(dbName2, db2Coll2, dataForEachCollection); - -const originalRecipientPrimary = tenantMigrationTest.getRecipientPrimary(); -const newRecipientPrimary = tenantMigrationTest.getRecipientRst().getSecondaries()[0]; - -jsTestLog("Collecting the stats of the databases and collections from the donor."); -const donorPrimary = tenantMigrationTest.getDonorPrimary(); -const donorDB2 = donorPrimary.getDB(dbName2); - -const db1Size = assert.commandWorked(donorPrimary.getDB(dbName1).runCommand({dbStats: 1})).dataSize; -const db2Size = assert.commandWorked(donorDB2.runCommand({dbStats: 1})).dataSize; -const db2Collection1Size = assert.commandWorked(donorDB2.runCommand({collStats: db2Coll1})).size; -const db2Collection2Size = assert.commandWorked(donorDB2.runCommand({collStats: db2Coll2})).size; - -const donorStats = { - db1Size, - db2Size, - db2Collection1Size, - db2Collection2Size -}; -jsTestLog("Collected the following stats on the donor: " + tojson(donorStats)); - -// The last collection to be cloned is the one with a greater UUID. -const collInfo = donorDB2.getCollectionInfos(); -const uuid1 = collInfo[0].info.uuid; -const uuid2 = collInfo[1].info.uuid; -const lastCollection = (uuid1 > uuid2) ? db2Coll1 : db2Coll2; - -// Create a failpoint to pause after one batch of the second database's second collection has been -// cloned. -const fpAfterBatchOfSecondDB = configureFailPoint( - originalRecipientPrimary, - "tenantMigrationHangCollectionClonerAfterHandlingBatchResponse", - {nss: originalRecipientPrimary.getDB(dbName2).getCollection(lastCollection).getFullName()}); - -jsTestLog("Starting tenant migration with migrationId: " + kMigrationId + - ", tenantId: " + kTenantId); -assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); - -let res = 0; -let currOp = 0; -jsTestLog("Waiting until one batch of second database has been cloned by original primary."); -fpAfterBatchOfSecondDB.wait(); -// Since documents are inserted on a separate thread, wait until the expected stats are seen. The -// failpoint needs to be maintained so that the next batch isn't processed. -assert.soon(() => { - res = originalRecipientPrimary.adminCommand( - {currentOp: true, desc: "tenant recipient migration"}); - currOp = res.inprog[0]; - - // Wait until one batch of documents of the second database's second collection has been copied. - return currOp.approxTotalBytesCopied > db1Size + db2Collection1Size; -}, res); - -assert.eq(currOp.approxTotalDataSize, db1Size + db2Size, res); -// Since the two collections on the second database are the same size, -// 'db1Size + db2Collection1Size' and 'db1Size + db2Collection2Size' evaluate to the same value. -assert.gt(currOp.approxTotalBytesCopied, db1Size + db2Collection1Size, res); -assert.lt(currOp.approxTotalBytesCopied, db1Size + db2Size, res); -assert.eq(currOp.databases.databasesClonedBeforeFailover, 0, res); -assert.eq(currOp.databases[dbName2].clonedCollectionsBeforeFailover, 0, res); -const bytesCopiedIncludingSecondDB = currOp.approxTotalBytesCopied; -jsTestLog("Bytes copied after first batch of second database: " + bytesCopiedIncludingSecondDB); - -// Wait until the batch of the second collection of the second database has been replicated from the -// original primary to the new primary. Then, step up the new primary. -const fpAfterCreatingCollectionOfSecondDB = - configureFailPoint(newRecipientPrimary, "tenantCollectionClonerHangAfterCreateCollection"); -tenantMigrationTest.getRecipientRst().stepUp(newRecipientPrimary); -fpAfterBatchOfSecondDB.off(); - -jsTestLog("Wait until the new primary creates collection of second database."); -fpAfterCreatingCollectionOfSecondDB.wait(); -res = newRecipientPrimary.adminCommand({currentOp: true, desc: "tenant recipient migration"}); -currOp = res.inprog[0]; -assert.eq(currOp.approxTotalDataSize, db1Size + db2Size, res); -assert.eq(currOp.approxTotalBytesCopied, bytesCopiedIncludingSecondDB, res); -assert.eq(currOp.databases.databasesClonedBeforeFailover, 1, res); -assert.eq(currOp.databases[dbName2].clonedCollectionsBeforeFailover, 1, res); -fpAfterCreatingCollectionOfSecondDB.off(); - -// After the migration completes, the total bytes copied should be equal to the total data size. -jsTestLog("Waiting for migration to complete."); -TenantMigrationTest.assertCommitted(tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); -res = newRecipientPrimary.adminCommand({currentOp: true, desc: "tenant recipient migration"}); -currOp = res.inprog[0]; -assert.eq(currOp.approxTotalDataSize, db1Size + db2Size, res); -assert.eq(currOp.approxTotalBytesCopied, db1Size + db2Size, res); -assert.eq(currOp.databases.databasesClonedBeforeFailover, 1, res); -assert.eq(currOp.databases[dbName2].clonedCollectionsBeforeFailover, 1, res); - -tenantMigrationTest.stop(); diff --git a/jstests/replsets/tenant_migration_clones_system_views.js b/jstests/replsets/tenant_migration_clones_system_views.js index b1a93f254f39f..19ab1007e994b 100644 --- a/jstests/replsets/tenant_migration_clones_system_views.js +++ b/jstests/replsets/tenant_migration_clones_system_views.js @@ -12,6 +12,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/uuid_util.js"); const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); @@ -21,7 +22,7 @@ const donorPrimary = tenantMigrationTest.getDonorPrimary(); const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); const tenantId = ObjectId().str; -const tenantDBName = tenantMigrationTest.tenantDB(tenantId, "testDB"); +const tenantDBName = makeTenantDB(tenantId, "testDB"); const donorTenantDB = donorPrimary.getDB(tenantDBName); const collName = "testColl"; const donorTenantColl = donorTenantDB.getCollection(collName); diff --git a/jstests/replsets/tenant_migration_cloning_uses_read_concern_majority.js b/jstests/replsets/tenant_migration_cloning_uses_read_concern_majority.js index f87de35de4bdc..68b127e69c16e 100644 --- a/jstests/replsets/tenant_migration_cloning_uses_read_concern_majority.js +++ b/jstests/replsets/tenant_migration_cloning_uses_read_concern_majority.js @@ -16,7 +16,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {runMigrationAsync} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, runMigrationAsync} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' @@ -27,7 +27,7 @@ load("jstests/replsets/rslib.js"); // 'createRstArgs' const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const tenantId = ObjectId().str; -const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); +const dbName = makeTenantDB(tenantId, "testDB"); const collName = "testColl"; const donorPrimary = tenantMigrationTest.getDonorPrimary(); diff --git a/jstests/replsets/tenant_migration_cluster_time_keys_cloning.js b/jstests/replsets/tenant_migration_cluster_time_keys_cloning.js index f3737dd4706f2..298d25b199562 100644 --- a/jstests/replsets/tenant_migration_cluster_time_keys_cloning.js +++ b/jstests/replsets/tenant_migration_cluster_time_keys_cloning.js @@ -2,11 +2,8 @@ * Test that tenant migration donor and recipient correctly copy each other cluster time keys into * their config.external_validation_keys collection. * - * TODO (SERVER-61231): Adapt for shard merge. - * * @tags: [ * incompatible_with_macos, - * incompatible_with_shard_merge, * incompatible_with_windows_tls, * requires_majority_read_concern, * requires_persistence, @@ -79,6 +76,12 @@ const migrationX509Options = makeX509OptionsForTest(); "when there is no failover."); const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); + if (isShardMergeEnabled(tenantMigrationTest.getDonorPrimary().getDB("adminDB"))) { + jsTestLog("Skip: shard merge does not support concurrent migrations."); + tenantMigrationTest.stop(); + return; + } + const migrationId = UUID(); const migrationOpts = { migrationIdString: extractUUIDFromObject(migrationId), @@ -115,6 +118,13 @@ const migrationX509Options = makeX509OptionsForTest(); const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), recipientRst}); + if (isShardMergeEnabled(tenantMigrationTest.getDonorPrimary().getDB("adminDB"))) { + jsTestLog("Skip: shard merge does not accept secondary readPreference"); + recipientRst.stopSet(); + tenantMigrationTest.stop(); + return; + } + const migrationId = UUID(); const migrationOpts = { migrationIdString: extractUUIDFromObject(migrationId), @@ -140,7 +150,7 @@ const migrationX509Options = makeX509OptionsForTest(); donorRst.startSet(); donorRst.initiate(); if (isShardMergeEnabled(donorRst.getPrimary().getDB("adminDB"))) { - jsTestLog("Skip: featureFlagShardMerge enabled, but shard merge does not survive failover"); + jsTestLog("Skip: shard merge does not survive failover"); donorRst.stopSet(); return; } @@ -177,55 +187,57 @@ const migrationX509Options = makeX509OptionsForTest(); tenantMigrationTest.stop(); })(); -(() => { - jsTest.log("Test that the donor and recipient correctly copy each other's cluster time keys " + - "when there is recipient failover."); - const recipientRst = new ReplSetTest({ - nodes: 3, - name: "recipientRst", - serverless: true, - nodeOptions: migrationX509Options.recipient - }); - recipientRst.startSet(); - recipientRst.initiate(); - if (isShardMergeEnabled(recipientRst.getPrimary().getDB("adminDB"))) { - jsTestLog("Skip: featureFlagShardMerge enabled, but shard merge does not survive failover"); - recipientRst.stopSet(); - return; - } - - const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), recipientRst}); - - const recipientPrimary = recipientRst.getPrimary(); - const fp = configureFailPoint(recipientPrimary, - "fpAfterPersistingTenantMigrationRecipientInstanceStateDoc", - {action: "hang"}); - - const migrationId = UUID(); - const migrationOpts = { - migrationIdString: extractUUIDFromObject(migrationId), - tenantId: kTenantId1, - }; - assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); - fp.wait(); - - assert.commandWorked( - recipientPrimary.adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true})); - assert.commandWorked(recipientPrimary.adminCommand({replSetFreeze: 0})); - - fp.off(); - TenantMigrationTest.assertCommitted(tenantMigrationTest.waitForMigrationToComplete( - migrationOpts, true /* retryOnRetryableErrors */)); - - assertCopiedExternalKeys(tenantMigrationTest, migrationId); - - // After another migration, the first's keys should still exist. - runMigrationAndAssertExternalKeysCopied(tenantMigrationTest, kTenantId2); - assertCopiedExternalKeys(tenantMigrationTest, migrationId); - - recipientRst.stopSet(); - tenantMigrationTest.stop(); -})(); +// TODO SERVER-76128: Tenant Migrations are not robust to recipient failover. +// (() => { +// jsTest.log("Test that the donor and recipient correctly copy each other's cluster time keys " +// + +// "when there is recipient failover."); +// const recipientRst = new ReplSetTest({ +// nodes: 3, +// name: "recipientRst", +// serverless: true, +// nodeOptions: migrationX509Options.recipient +// }); +// recipientRst.startSet(); +// recipientRst.initiate(); +// if (isShardMergeEnabled(recipientRst.getPrimary().getDB("adminDB"))) { +// jsTestLog("Skip: shard merge does not survive failover"); +// recipientRst.stopSet(); +// return; +// } + +// const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), recipientRst}); + +// const recipientPrimary = recipientRst.getPrimary(); +// const fp = configureFailPoint(recipientPrimary, +// "fpAfterPersistingTenantMigrationRecipientInstanceStateDoc", +// {action: "hang"}); + +// const migrationId = UUID(); +// const migrationOpts = { +// migrationIdString: extractUUIDFromObject(migrationId), +// tenantId: kTenantId1, +// }; +// assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); +// fp.wait(); + +// assert.commandWorked( +// recipientPrimary.adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true})); +// assert.commandWorked(recipientPrimary.adminCommand({replSetFreeze: 0})); + +// fp.off(); +// TenantMigrationTest.assertCommitted(tenantMigrationTest.waitForMigrationToComplete( +// migrationOpts, true /* retryOnRetryableErrors */)); + +// assertCopiedExternalKeys(tenantMigrationTest, migrationId); + +// // After another migration, the first's keys should still exist. +// runMigrationAndAssertExternalKeysCopied(tenantMigrationTest, kTenantId2); +// assertCopiedExternalKeys(tenantMigrationTest, migrationId); + +// recipientRst.stopSet(); +// tenantMigrationTest.stop(); +// })(); (() => { jsTest.log("Test that the donor waits for copied external keys to replicate to every node"); @@ -243,8 +255,7 @@ const migrationX509Options = makeX509OptionsForTest(); function runTest(tenantId, withFailover) { if (withFailover && isShardMergeEnabled(donorRst.getPrimary().getDB("adminDB"))) { - jsTestLog( - "Skip: featureFlagShardMerge enabled, but shard merge does not survive failover"); + jsTestLog("Skip: but shard merge does not survive failover"); tenantMigrationTest.stop(); return; } diff --git a/jstests/replsets/tenant_migration_collection_rename.js b/jstests/replsets/tenant_migration_collection_rename.js index 6b9171b7dc8bf..cd8fe6d8717f0 100644 --- a/jstests/replsets/tenant_migration_collection_rename.js +++ b/jstests/replsets/tenant_migration_collection_rename.js @@ -1,12 +1,11 @@ /** * Tests that tenant migrations aborts without crashing when a donor collection is renamed. * - * TODO SERVER-61231: shard merge does not use collection cloner, so we need another way - * to pause the migration at the correct time. What should shard merge behavior be for - * renaming a collection while a migration is underway? adapt this test - * * @tags: [ * incompatible_with_macos, + * # Shard merge uses backup cursor for copying files. Collection rename during file copy + * # shouldn't affect backup cursor. We expect the test coverage should be already provided by the + * # backup cursor. * incompatible_with_shard_merge, * incompatible_with_windows_tls, * requires_fcv_52, @@ -17,7 +16,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {runMigrationAsync} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, runMigrationAsync} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/parallelTester.js"); @@ -34,7 +33,7 @@ function insertData(collection) { const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const kTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); +const kDbName = makeTenantDB(kTenantId, "testDB"); const kCollectionName = "toBeRenamed"; const donorPrimary = tenantMigrationTest.getDonorPrimary(); const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); diff --git a/jstests/replsets/tenant_migration_collection_ttl.js b/jstests/replsets/tenant_migration_collection_ttl.js index 105fa1d310887..eb6ef85b4c6f3 100644 --- a/jstests/replsets/tenant_migration_collection_ttl.js +++ b/jstests/replsets/tenant_migration_collection_ttl.js @@ -13,7 +13,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {isShardMergeEnabled} from "jstests/replsets/libs/tenant_migration_util.js"; +import {isShardMergeEnabled, makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); @@ -115,7 +115,7 @@ function assertTTLDeleteExpiredDocs(dbName, node) { jsTest.log("Test that the TTL does not delete documents on recipient during cloning"); const tenantId = ObjectId().str; - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); const migrationId = UUID(); const migrationOpts = { @@ -172,7 +172,7 @@ function assertTTLDeleteExpiredDocs(dbName, node) { "Test that the TTL does not delete documents on recipient before migration is forgotten"); const tenantId = ObjectId().str; - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); const migrationId = UUID(); const migrationOpts = { @@ -191,8 +191,7 @@ function assertTTLDeleteExpiredDocs(dbName, node) { let blockFp = configureFailPoint(donorPrimary, "pauseTenantMigrationBeforeLeavingBlockingState"); - assert.commandWorked( - tenantMigrationTest.startMigration(migrationOpts, {enableDonorStartMigrationFsync: true})); + assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); blockFp.wait(); // At a very slow machine, there is a chance that a TTL cycle happened at the donor diff --git a/jstests/replsets/tenant_migration_commit_transaction_retry.js b/jstests/replsets/tenant_migration_commit_transaction_retry.js index f2f0d946584d7..885288ff7f9cb 100644 --- a/jstests/replsets/tenant_migration_commit_transaction_retry.js +++ b/jstests/replsets/tenant_migration_commit_transaction_retry.js @@ -11,6 +11,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/replsets/rslib.js"); load("jstests/libs/uuid_util.js"); @@ -18,7 +19,7 @@ const tenantMigrationTest = new TenantMigrationTest( {name: jsTestName(), sharedOptions: {nodes: 1}, quickGarbageCollection: true}); const kTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); +const kDbName = makeTenantDB(kTenantId, "testDB"); const kCollName = "testColl"; const kNs = `${kDbName}.${kCollName}`; @@ -113,8 +114,7 @@ const migrationOpts2 = { migrationIdString: extractUUIDFromObject(migrationId2), tenantId: kTenantId, }; -TenantMigrationTest.assertCommitted( - tenantMigrationTest2.runMigration(migrationOpts2, {enableDonorStartMigrationFsync: true})); +TenantMigrationTest.assertCommitted(tenantMigrationTest2.runMigration(migrationOpts2)); const recipientPrimary2 = tenantMigrationTest2.getRecipientPrimary(); const recipientTxnEntries2 = recipientPrimary2.getDB("config")["transactions"].find().toArray(); jsTestLog(`Recipient2 config.transactions: ${tojson(recipientTxnEntries2)}`); diff --git a/jstests/replsets/tenant_migration_concurrent_bulk_writes.js b/jstests/replsets/tenant_migration_concurrent_bulk_writes.js index 6e7d1ca5fd283..b980ad5939a7e 100644 --- a/jstests/replsets/tenant_migration_concurrent_bulk_writes.js +++ b/jstests/replsets/tenant_migration_concurrent_bulk_writes.js @@ -12,8 +12,9 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {isShardMergeEnabled} from "jstests/replsets/libs/tenant_migration_util.js"; import { + isShardMergeEnabled, + makeTenantDB, makeX509OptionsForTest, runMigrationAsync } from "jstests/replsets/libs/tenant_migration_util.js"; @@ -178,7 +179,7 @@ function bulkMultiUpdateDocsUnordered(primaryHost, dbName, collName, numDocs) { tenantId, }; - const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName); + const dbName = makeTenantDB(tenantId, kTenantDefinedDbName); const primary = donorRst.getPrimary(); const primaryDB = primary.getDB(dbName); @@ -231,7 +232,7 @@ function bulkMultiUpdateDocsUnordered(primaryHost, dbName, collName, numDocs) { }; const donorRstArgs = createRstArgs(donorRst); - const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName); + const dbName = makeTenantDB(tenantId, kTenantDefinedDbName); const primary = donorRst.getPrimary(); const primaryDB = primary.getDB(dbName); @@ -295,7 +296,7 @@ function bulkMultiUpdateDocsUnordered(primaryHost, dbName, collName, numDocs) { }; const donorRstArgs = createRstArgs(donorRst); - const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName); + const dbName = makeTenantDB(tenantId, kTenantDefinedDbName); const primary = donorRst.getPrimary(); const primaryDB = primary.getDB(dbName); @@ -363,7 +364,7 @@ function bulkMultiUpdateDocsUnordered(primaryHost, dbName, collName, numDocs) { tenantId, }; - const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName); + const dbName = makeTenantDB(tenantId, kTenantDefinedDbName); const primary = donorRst.getPrimary(); const primaryDB = primary.getDB(dbName); @@ -410,7 +411,7 @@ function bulkMultiUpdateDocsUnordered(primaryHost, dbName, collName, numDocs) { }; const donorRstArgs = createRstArgs(donorRst); - const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName); + const dbName = makeTenantDB(tenantId, kTenantDefinedDbName); const primary = donorRst.getPrimary(); const primaryDB = primary.getDB(dbName); @@ -466,7 +467,7 @@ function bulkMultiUpdateDocsUnordered(primaryHost, dbName, collName, numDocs) { }; const donorRstArgs = createRstArgs(donorRst); - const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName); + const dbName = makeTenantDB(tenantId, kTenantDefinedDbName); const primary = donorRst.getPrimary(); const primaryDB = primary.getDB(dbName); @@ -530,7 +531,7 @@ function bulkMultiUpdateDocsUnordered(primaryHost, dbName, collName, numDocs) { }; const donorRstArgs = createRstArgs(donorRst); - const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName); + const dbName = makeTenantDB(tenantId, kTenantDefinedDbName); const primary = donorRst.getPrimary(); const primaryDB = primary.getDB(dbName); @@ -582,7 +583,7 @@ function bulkMultiUpdateDocsUnordered(primaryHost, dbName, collName, numDocs) { }; const donorRstArgs = createRstArgs(donorRst); - const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName); + const dbName = makeTenantDB(tenantId, kTenantDefinedDbName); const primary = donorRst.getPrimary(); const primaryDB = primary.getDB(dbName); @@ -632,7 +633,7 @@ function bulkMultiUpdateDocsUnordered(primaryHost, dbName, collName, numDocs) { tenantId, }; - const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName); + const dbName = makeTenantDB(tenantId, kTenantDefinedDbName); const primary = donorRst.getPrimary(); const primaryDB = primary.getDB(dbName); @@ -671,7 +672,7 @@ function bulkMultiUpdateDocsUnordered(primaryHost, dbName, collName, numDocs) { tenantId, }; - const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName); + const dbName = makeTenantDB(tenantId, kTenantDefinedDbName); const primary = donorRst.getPrimary(); const primaryDB = primary.getDB(dbName); diff --git a/jstests/replsets/tenant_migration_concurrent_migrations_recipient.js b/jstests/replsets/tenant_migration_concurrent_migrations_recipient.js index 7f39f94516513..5405e337c3232 100644 --- a/jstests/replsets/tenant_migration_concurrent_migrations_recipient.js +++ b/jstests/replsets/tenant_migration_concurrent_migrations_recipient.js @@ -15,6 +15,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' @@ -30,7 +31,7 @@ const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); const tenantIds = [...Array(50).keys()].map(() => ObjectId().str); let migrationOptsArray = []; tenantIds.forEach((tenantId) => { - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); const collName = "testColl"; tenantMigrationTest.insertDonorDB(dbName, collName, [{_id: 1}]); const migrationId = UUID(); diff --git a/jstests/replsets/tenant_migration_concurrent_migrations_stress_test.js b/jstests/replsets/tenant_migration_concurrent_migrations_stress_test.js index 25285401aab0a..48d66a1f6083b 100644 --- a/jstests/replsets/tenant_migration_concurrent_migrations_stress_test.js +++ b/jstests/replsets/tenant_migration_concurrent_migrations_stress_test.js @@ -1,12 +1,11 @@ /** * Stress test runs many concurrent migrations against the same recipient. * - * TODO SERVER-61231: shard merge can't handle concurrent migrations. - * * @tags: [ * incompatible_with_amazon_linux, * incompatible_with_macos, * incompatible_with_windows_tls, + * # Shard merge does not allow concurrent migrations. * incompatible_with_shard_merge, * requires_majority_read_concern, * requires_persistence, @@ -17,6 +16,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' load("jstests/replsets/rslib.js"); // for 'setLogVerbosity' @@ -49,7 +49,7 @@ let migrationOptsArray = []; let tenantToIndexMap = {}; let idx = 0; tenantIds.forEach((tenantId) => { - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); const collName = "testColl"; tenantMigrationTest.insertDonorDB(dbName, collName, [{_id: 1}]); const migrationId = UUID(); @@ -88,7 +88,7 @@ function retryAbortedMigration(idx) { assert.commandWorked(tenantMigrationTest.forgetMigration(migrationOpt.migrationIdString)); // Drop recipient DB. - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); let db = recipientPrimary.getDB(dbName); try { db.dropDatabase(); diff --git a/jstests/replsets/tenant_migration_concurrent_reads_on_recipient.js b/jstests/replsets/tenant_migration_concurrent_reads_on_recipient.js index 4230e8c1c0f05..7486b4b4fca66 100644 --- a/jstests/replsets/tenant_migration_concurrent_reads_on_recipient.js +++ b/jstests/replsets/tenant_migration_concurrent_reads_on_recipient.js @@ -14,6 +14,8 @@ * requires_majority_read_concern, * requires_persistence, * serverless, + * # The error code for a rejected recipient command invoked during the reject phase was changed. + * requires_fcv_71, * ] */ @@ -39,7 +41,8 @@ function runCommand(db, cmd, expectedError) { if (expectedError == ErrorCodes.SnapshotTooOld) { // Verify that SnapshotTooOld error is due to migration conflict not due to the read // timestamp being older than the oldest available timestamp. - assert.eq(res.errmsg, "Tenant read is not allowed before migration completes"); + assert.eq(res.errmsg, + "Tenant command 'find' is not allowed before migration completes"); } } else { assert.commandWorked(res); @@ -84,7 +87,7 @@ function testRejectAllReadsAfterCloningDone({testCase, dbName, collName, tenantM ? testCase.command(collName, getLastOpTime(node).ts) : testCase.command(collName); const db = node.getDB(dbName); - runCommand(db, command, ErrorCodes.SnapshotTooOld); + runCommand(db, command, ErrorCodes.IllegalOperation); }); beforeFetchingTransactionsFp.off(); @@ -128,12 +131,10 @@ function testRejectOnlyReadsWithAtClusterTimeLessThanRejectReadsBeforeTimestamp( // unspecified atClusterTime have read timestamp >= rejectReadsBeforeTimestamp. recipientRst.awaitLastOpCommitted(); - const recipientStateDocNss = isShardMergeEnabled(recipientPrimary.getDB("admin")) - ? TenantMigrationTest.kConfigShardMergeRecipientsNS - : TenantMigrationTest.kConfigRecipientsNS; - const recipientDoc = recipientPrimary.getCollection(recipientStateDocNss).findOne({ - _id: UUID(migrationOpts.migrationIdString), - }); + const recipientDoc = + recipientPrimary.getCollection(tenantMigrationTest.configRecipientsNs).findOne({ + _id: UUID(migrationOpts.migrationIdString), + }); assert.lt(preMigrationTimestamp, recipientDoc.rejectReadsBeforeTimestamp); const nodes = testCase.isSupportedOnSecondaries ? recipientRst.nodes : [recipientPrimary]; @@ -203,10 +204,11 @@ function testDoNotRejectReadsAfterMigrationAbortedBeforeReachingRejectReadsBefor nodes.forEach(node => { const db = node.getDB(dbName); if (testCase.requiresReadTimestamp) { - runCommand( - db, testCase.command(collName, getLastOpTime(node).ts), ErrorCodes.SnapshotTooOld); + runCommand(db, + testCase.command(collName, getLastOpTime(node).ts), + ErrorCodes.IllegalOperation); } else { - runCommand(db, testCase.command(collName), ErrorCodes.SnapshotTooOld); + runCommand(db, testCase.command(collName), ErrorCodes.IllegalOperation); } }); @@ -275,12 +277,10 @@ function testDoNotRejectReadsAfterMigrationAbortedAfterReachingRejectReadsBefore // unspecified atClusterTime have read timestamp >= rejectReadsBeforeTimestamp. recipientRst.awaitLastOpCommitted(); - const recipientStateDocNss = isShardMergeEnabled(recipientPrimary.getDB("admin")) - ? TenantMigrationTest.kConfigShardMergeRecipientsNS - : TenantMigrationTest.kConfigRecipientsNS; - const recipientDoc = recipientPrimary.getCollection(recipientStateDocNss).findOne({ - _id: UUID(migrationOpts.migrationIdString), - }); + const recipientDoc = + recipientPrimary.getCollection(tenantMigrationTest.configRecipientsNs).findOne({ + _id: UUID(migrationOpts.migrationIdString), + }); const nodes = testCase.isSupportedOnSecondaries ? recipientRst.nodes : [recipientPrimary]; nodes.forEach(node => { diff --git a/jstests/replsets/tenant_migration_concurrent_reconfig.js b/jstests/replsets/tenant_migration_concurrent_reconfig.js index e766b7c534211..79a754316885b 100644 --- a/jstests/replsets/tenant_migration_concurrent_reconfig.js +++ b/jstests/replsets/tenant_migration_concurrent_reconfig.js @@ -11,6 +11,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' @@ -18,7 +19,7 @@ function runTest({failPoint, shouldFail = false}) { const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const tenantId = ObjectId().str; - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); const donorRst = tenantMigrationTest.getDonorRst(); const donorPrimary = tenantMigrationTest.getDonorPrimary(); diff --git a/jstests/replsets/tenant_migration_concurrent_writes_on_donor_aborted.js b/jstests/replsets/tenant_migration_concurrent_writes_on_donor_aborted.js index 7cbe5af8db914..912cc114cc644 100644 --- a/jstests/replsets/tenant_migration_concurrent_writes_on_donor_aborted.js +++ b/jstests/replsets/tenant_migration_concurrent_writes_on_donor_aborted.js @@ -134,7 +134,6 @@ let abortFp = configureFailPoint(donorPrimary, "abortTenantMigrationBeforeLeavin TenantMigrationTest.assertAborted(tenantMigrationTest.runMigration(migrationOpts, { retryOnRetryableErrors: false, automaticForgetMigration: false, - enableDonorStartMigrationFsync: true })); // Allow the migration to complete and abort. diff --git a/jstests/replsets/tenant_migration_concurrent_writes_on_donor_blocking.js b/jstests/replsets/tenant_migration_concurrent_writes_on_donor_blocking.js index dd2b09321bc31..4238481c3ce9b 100644 --- a/jstests/replsets/tenant_migration_concurrent_writes_on_donor_blocking.js +++ b/jstests/replsets/tenant_migration_concurrent_writes_on_donor_blocking.js @@ -155,8 +155,7 @@ function runTestsAfterMigrationCommitted() { setupTestsBeforeMigration(); -assert.commandWorked( - tenantMigrationTest.startMigration(migrationOpts, {enableDonorStartMigrationFsync: true})); +assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); // Run the command after the migration enters the blocking state. let blockFp = configureFailPoint(donorPrimary, "pauseTenantMigrationBeforeLeavingBlockingState"); diff --git a/jstests/replsets/tenant_migration_concurrent_writes_on_donor_blocking_then_aborted.js b/jstests/replsets/tenant_migration_concurrent_writes_on_donor_blocking_then_aborted.js index 2dc3e2546b9fb..0092c26e74747 100644 --- a/jstests/replsets/tenant_migration_concurrent_writes_on_donor_blocking_then_aborted.js +++ b/jstests/replsets/tenant_migration_concurrent_writes_on_donor_blocking_then_aborted.js @@ -67,8 +67,7 @@ function testRejectBlockedWritesAfterMigrationAborted(testCase, testOpts) { new Thread(resumeMigrationAfterBlockingWrite, testOpts.primaryHost, tenantId, 1); // Run the command after the migration enters the blocking state. - assert.commandWorked( - tenantMigrationTest.startMigration(migrationOpts, {enableDonorStartMigrationFsync: true})); + assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); resumeMigrationThread.start(); blockingFp.wait(); diff --git a/jstests/replsets/tenant_migration_concurrent_writes_on_donor_blocking_then_committed.js b/jstests/replsets/tenant_migration_concurrent_writes_on_donor_blocking_then_committed.js index 17da4d0a93e14..965fc49bc910a 100644 --- a/jstests/replsets/tenant_migration_concurrent_writes_on_donor_blocking_then_committed.js +++ b/jstests/replsets/tenant_migration_concurrent_writes_on_donor_blocking_then_committed.js @@ -66,8 +66,7 @@ function testRejectBlockedWritesAfterMigrationCommitted(testCase, testOpts) { // Run the command after the migration enters the blocking state. resumeMigrationThread.start(); - assert.commandWorked( - tenantMigrationTest.startMigration(migrationOpts, {enableDonorStartMigrationFsync: true})); + assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); blockingFp.wait(); // The migration should unpause and commit after the write is blocked. Verify that the write is diff --git a/jstests/replsets/tenant_migration_concurrent_writes_on_donor_committed.js b/jstests/replsets/tenant_migration_concurrent_writes_on_donor_committed.js index f77889c42c916..bcad804c7896e 100644 --- a/jstests/replsets/tenant_migration_concurrent_writes_on_donor_committed.js +++ b/jstests/replsets/tenant_migration_concurrent_writes_on_donor_committed.js @@ -122,7 +122,6 @@ setupTestsBeforeMigration(); TenantMigrationTest.assertCommitted(tenantMigrationTest.runMigration(migrationOpts, { retryOnRetryableErrors: false, automaticForgetMigration: false, - enableDonorStartMigrationFsync: true })); // run the tests after the migration has committed. diff --git a/jstests/replsets/tenant_migration_concurrent_writes_on_donor_util.js b/jstests/replsets/tenant_migration_concurrent_writes_on_donor_util.js index 5473c68cf4ce3..db3d2c606abe8 100644 --- a/jstests/replsets/tenant_migration_concurrent_writes_on_donor_util.js +++ b/jstests/replsets/tenant_migration_concurrent_writes_on_donor_util.js @@ -292,6 +292,7 @@ export const TenantMigrationConcurrentWriteUtil = { _recvChunkReleaseCritSec: {skip: isNotRunOnUserDatabase}, _recvChunkStart: {skip: isNotRunOnUserDatabase}, _recvChunkStatus: {skip: isNotRunOnUserDatabase}, + _shardsvrCleanupStructuredEncryptionData: {skip: isOnlySupportedOnShardedCluster}, _shardsvrCloneCatalogData: {skip: isNotRunOnUserDatabase}, _shardsvrCommitIndexParticipant: {skip: isOnlySupportedOnShardedCluster}, _shardsvrCompactStructuredEncryptionData: {skip: isOnlySupportedOnShardedCluster}, diff --git a/jstests/replsets/tenant_migration_concurrent_writes_on_recipient.js b/jstests/replsets/tenant_migration_concurrent_writes_on_recipient.js index 5ce9f27a1285b..f31cc91247039 100644 --- a/jstests/replsets/tenant_migration_concurrent_writes_on_recipient.js +++ b/jstests/replsets/tenant_migration_concurrent_writes_on_recipient.js @@ -8,6 +8,8 @@ * requires_majority_read_concern, * requires_persistence, * serverless, + * # The error code for a rejected recipient command invoked during the reject phase was changed. + * requires_fcv_71, * ] */ @@ -67,14 +69,15 @@ function cleanup(dbName) { if (!isShardMergeEnabled(donorPrimary.getDB("adminDB"))) { // Write before cloning is done. assert.commandFailedWithCode(tenantCollOnRecipient.remove({_id: 1}), - ErrorCodes.SnapshotTooOld); + ErrorCodes.IllegalOperation); } startOplogFetcherFp.off(); beforeFetchingTransactionsFp.wait(); - // Write after cloning is done should fail with SnapshotTooOld since no read is allowed. - assert.commandFailedWithCode(tenantCollOnRecipient.remove({_id: 1}), ErrorCodes.SnapshotTooOld); + // Write after cloning is done should fail with IllegalOperation since no read is allowed. + assert.commandFailedWithCode(tenantCollOnRecipient.remove({_id: 1}), + ErrorCodes.IllegalOperation); beforeFetchingTransactionsFp.off(); waitForRejectReadsBeforeTsFp.wait(); @@ -127,7 +130,8 @@ function cleanup(dbName) { abortFp.off(); // Write after the migration aborted. - assert.commandFailedWithCode(tenantCollOnRecipient.remove({_id: 1}), ErrorCodes.SnapshotTooOld); + assert.commandFailedWithCode(tenantCollOnRecipient.remove({_id: 1}), + ErrorCodes.IllegalOperation); assert.commandWorked(tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString)); diff --git a/jstests/replsets/tenant_migration_donor_initial_sync_cloning.js b/jstests/replsets/tenant_migration_donor_initial_sync_cloning.js index 620031703ebf2..0c83ba98c404d 100644 --- a/jstests/replsets/tenant_migration_donor_initial_sync_cloning.js +++ b/jstests/replsets/tenant_migration_donor_initial_sync_cloning.js @@ -25,6 +25,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); load('jstests/replsets/rslib.js'); // for waitForNewlyAddedRemovalForNodeToBeCommitted @@ -49,7 +50,7 @@ const migrationOpts = { migrationIdString: extractUUIDFromObject(UUID()), tenantId: tenantId }; -const dbName = tenantMigrationTest.tenantDB(tenantId, testDBName); +const dbName = makeTenantDB(tenantId, testDBName); const donorRst = tenantMigrationTest.getDonorRst(); const originalDonorPrimary = tenantMigrationTest.getDonorPrimary(); diff --git a/jstests/replsets/tenant_migration_donor_initial_sync_recovery.js b/jstests/replsets/tenant_migration_donor_initial_sync_recovery.js index 4d08a30524a36..e54da59798f56 100644 --- a/jstests/replsets/tenant_migration_donor_initial_sync_recovery.js +++ b/jstests/replsets/tenant_migration_donor_initial_sync_recovery.js @@ -149,7 +149,7 @@ if (donorDoc) { donorDoc.blockTimestamp) == 0); break; default: - throw new Error(`Invalid state "${state}" from donor doc.`); + throw new Error(`Invalid state "${donorDoc.state}" from donor doc.`); } } diff --git a/jstests/replsets/tenant_migration_donor_kill_op_retry.js b/jstests/replsets/tenant_migration_donor_kill_op_retry.js index 69dc32f9853f2..d1969579ebbb4 100644 --- a/jstests/replsets/tenant_migration_donor_kill_op_retry.js +++ b/jstests/replsets/tenant_migration_donor_kill_op_retry.js @@ -44,7 +44,7 @@ function makeTenantId() { let fpNames = [ "pauseTenantMigrationBeforeInsertingDonorStateDoc", "pauseTenantMigrationDonorWhileUpdatingStateDoc", - "pauseTenantMigrationBeforeStoringExternalClusterTimeKeyDocs" + "pauseTenantMigrationDonorBeforeStoringExternalClusterTimeKeyDocs" ]; for (let fpName of fpNames) { jsTestLog("Setting failpoint \"" + fpName + diff --git a/jstests/replsets/tenant_migration_donor_resume_on_stepup_and_restart.js b/jstests/replsets/tenant_migration_donor_resume_on_stepup_and_restart.js deleted file mode 100644 index b3b158c7134c4..0000000000000 --- a/jstests/replsets/tenant_migration_donor_resume_on_stepup_and_restart.js +++ /dev/null @@ -1,488 +0,0 @@ -/** - * Tests that tenant migrations resume successfully on donor stepup and restart. - * - * Incompatible with shard merge, which can't handle restart. - * - * @tags: [ - * incompatible_with_macos, - * incompatible_with_shard_merge, - * incompatible_with_windows_tls, - * # Some tenant migration statistics field names were changed in 6.1. - * requires_fcv_61, - * requires_majority_read_concern, - * requires_persistence, - * # Tenant migrations are only used in serverless. - * serverless, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - forgetMigrationAsync, - isShardMergeEnabled, - makeX509OptionsForTest, - runMigrationAsync, - tryAbortMigrationAsync -} from "jstests/replsets/libs/tenant_migration_util.js"; - -load("jstests/libs/fail_point_util.js"); -load("jstests/libs/parallelTester.js"); -load("jstests/libs/uuid_util.js"); -load("jstests/replsets/rslib.js"); // 'createRstArgs' - -const kMaxSleepTimeMS = 100; -const kTenantId = ObjectId().str; -const kMigrationFpNames = [ - "pauseTenantMigrationBeforeLeavingDataSyncState", - "pauseTenantMigrationBeforeLeavingBlockingState", - "abortTenantMigrationBeforeLeavingBlockingState", - "" -]; - -// Set the delay before a state doc is garbage collected to be short to speed up the test but long -// enough for the state doc to still be around after stepup or restart. -const kGarbageCollectionDelayMS = 30 * 1000; - -// Set the TTL monitor to run at a smaller interval to speed up the test. -const kTTLMonitorSleepSecs = 1; - -const migrationX509Options = makeX509OptionsForTest(); - -/** - * Runs the donorStartMigration command to start a migration, and interrupts the migration on the - * donor using the 'interruptFunc', and asserts that migration eventually commits. - */ -function testDonorStartMigrationInterrupt(interruptFunc, - {donorRestarted = false, disableForShardMerge = true}) { - const donorRst = new ReplSetTest( - {nodes: 3, name: "donorRst", serverless: true, nodeOptions: migrationX509Options.donor}); - - donorRst.startSet(); - donorRst.initiate(); - - const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), donorRst}); - - let donorPrimary = tenantMigrationTest.getDonorPrimary(); - const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); - - if (disableForShardMerge && isShardMergeEnabled(recipientPrimary.getDB("admin"))) { - jsTest.log("Skipping test for shard merge"); - tenantMigrationTest.stop(); - donorRst.stopSet(); - return; - } - - const migrationId = UUID(); - const migrationOpts = { - migrationIdString: extractUUIDFromObject(migrationId), - tenantId: kTenantId, - recipientConnString: tenantMigrationTest.getRecipientConnString(), - }; - const donorRstArgs = createRstArgs(donorRst); - - const runMigrationThread = - new Thread(runMigrationAsync, migrationOpts, donorRstArgs, {retryOnRetryableErrors: true}); - runMigrationThread.start(); - - // Wait for donorStartMigration command to start. - assert.soon(() => donorPrimary.adminCommand({currentOp: true, desc: "tenant donor migration"}) - .inprog.length > 0); - - sleep(Math.random() * kMaxSleepTimeMS); - interruptFunc(donorRst); - - TenantMigrationTest.assertCommitted(runMigrationThread.returnData()); - tenantMigrationTest.waitForDonorNodesToReachState(donorRst.nodes, - migrationId, - migrationOpts.tenantId, - TenantMigrationTest.DonorState.kCommitted); - assert.commandWorked(tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString)); - - donorPrimary = tenantMigrationTest.getDonorPrimary(); // Could change after interrupt. - const donorStats = tenantMigrationTest.getTenantMigrationStats(donorPrimary); - jsTestLog(`Stats at the donor primary: ${tojson(donorStats)}`); - if (donorRestarted) { - // If full restart happened the count could be lost completely. - assert.gte(1, donorStats.totalMigrationDonationsCommitted); - } else { - // The double counting happens when the failover happens after migration completes - // but before the state doc GC mark is persisted. While this test is targeting this - // scenario it is low probability in production. - assert(1 == donorStats.totalMigrationDonationsCommitted || - 2 == donorStats.totalMigrationDonationsCommitted); - } - // Skip checking the stats on the recipient since enableRecipientTesting is false - // so the recipient is forced to respond to recipientSyncData without starting the - // migration. - - tenantMigrationTest.stop(); - donorRst.stopSet(); -} - -/** - * Starts a migration and waits for it to commit, then runs the donorForgetMigration, and interrupts - * the donor using the 'interruptFunc', and asserts that the migration state is eventually garbage - * collected. - */ -function testDonorForgetMigrationInterrupt(interruptFunc) { - const donorRst = new ReplSetTest({ - nodes: 3, - name: "donorRst", - serverless: true, - nodeOptions: Object.assign({}, migrationX509Options.donor, { - setParameter: { - tenantMigrationGarbageCollectionDelayMS: kGarbageCollectionDelayMS, - ttlMonitorSleepSecs: kTTLMonitorSleepSecs, - } - }) - }); - const recipientRst = new ReplSetTest({ - nodes: 1, - name: "recipientRst", - serverless: true, - nodeOptions: Object.assign({}, migrationX509Options.recipient, { - setParameter: { - tenantMigrationGarbageCollectionDelayMS: kGarbageCollectionDelayMS, - ttlMonitorSleepSecs: kTTLMonitorSleepSecs, - } - }) - }); - - donorRst.startSet(); - donorRst.initiate(); - - recipientRst.startSet(); - recipientRst.initiate(); - - const tenantMigrationTest = - new TenantMigrationTest({name: jsTestName(), donorRst, recipientRst}); - - const donorPrimary = tenantMigrationTest.getDonorPrimary(); - - const migrationId = UUID(); - const migrationOpts = { - migrationIdString: extractUUIDFromObject(migrationId), - tenantId: kTenantId, - recipientConnString: recipientRst.getURL(), - }; - const donorRstArgs = createRstArgs(donorRst); - - TenantMigrationTest.assertCommitted( - tenantMigrationTest.runMigration(migrationOpts, {automaticForgetMigration: false})); - const forgetMigrationThread = new Thread(forgetMigrationAsync, - migrationOpts.migrationIdString, - donorRstArgs, - true /* retryOnRetryableErrors */); - forgetMigrationThread.start(); - - // Wait for donorForgetMigration command to start. - assert.soon(() => { - const res = assert.commandWorked( - donorPrimary.adminCommand({currentOp: true, desc: "tenant donor migration"})); - return res.inprog[0].expireAt != null; - }); - sleep(Math.random() * kMaxSleepTimeMS); - interruptFunc(donorRst); - - assert.commandWorkedOrFailedWithCode( - tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString), - ErrorCodes.NoSuchTenantMigration); - - assert.commandWorked(forgetMigrationThread.returnData()); - tenantMigrationTest.waitForMigrationGarbageCollection(migrationId, migrationOpts.tenantId); - - tenantMigrationTest.stop(); - donorRst.stopSet(); - recipientRst.stopSet(); -} - -/** - * Starts a migration and sets the passed in failpoint, then runs the donorAbortMigration, and - * interrupts the donor using the 'interruptFunc', and asserts that the migration state is - * eventually garbage collected. - */ -function testDonorAbortMigrationInterrupt( - interruptFunc, fpName, {fpWaitBeforeAbort = false, isShutdown = false} = {}) { - const donorRst = new ReplSetTest({ - nodes: 3, - name: "donorRst", - serverless: true, - nodeOptions: Object.assign({}, migrationX509Options.donor, { - setParameter: { - tenantMigrationGarbageCollectionDelayMS: kGarbageCollectionDelayMS, - ttlMonitorSleepSecs: kTTLMonitorSleepSecs, - } - }) - }); - const recipientRst = new ReplSetTest({ - nodes: 1, - name: "recipientRst", - serverless: true, - nodeOptions: Object.assign({}, migrationX509Options.recipient, { - setParameter: { - tenantMigrationGarbageCollectionDelayMS: kGarbageCollectionDelayMS, - ttlMonitorSleepSecs: kTTLMonitorSleepSecs, - } - }) - }); - - donorRst.startSet(); - donorRst.initiate(); - - recipientRst.startSet(); - recipientRst.initiate(); - - const tenantMigrationTest = - new TenantMigrationTest({name: jsTestName(), donorRst, recipientRst}); - - const migrationId = UUID(); - const migrationOpts = { - migrationIdString: extractUUIDFromObject(migrationId), - tenantId: kTenantId, - recipientConnString: recipientRst.getURL(), - }; - const donorRstArgs = createRstArgs(donorRst); - let donorPrimary = tenantMigrationTest.getDonorPrimary(); - - // If we passed in a valid failpoint we set it, otherwise we let the migration run normally. - let fp; - if (fpName) { - fp = configureFailPoint(donorPrimary, fpName); - } - - assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); - - if (fp && !isShutdown && fpWaitBeforeAbort) { - fp.wait(); - } - - const tryAbortThread = new Thread(tryAbortMigrationAsync, - {migrationIdString: migrationOpts.migrationIdString}, - donorRstArgs, - true /* retryOnRetryableErrors */); - tryAbortThread.start(); - - // Wait for donorAbortMigration command to start. - assert.soon(() => { - const res = assert.commandWorked( - donorPrimary.adminCommand({currentOp: true, desc: "tenant donor migration"})); - return res.inprog[0].receivedCancellation; - }); - - interruptFunc(donorRst); - - if (fp && !isShutdown) { - // Turn off failpoint in order to allow the migration to resume after stepup. - fp.off(); - } - - tryAbortThread.join(); - - let res = tryAbortThread.returnData(); - assert.commandWorkedOrFailedWithCode(res, ErrorCodes.TenantMigrationCommitted); - - donorPrimary = tenantMigrationTest.getDonorPrimary(); - let configDonorsColl = donorPrimary.getCollection(TenantMigrationTest.kConfigDonorsNS); - let donorDoc = configDonorsColl.findOne({tenantId: kTenantId}); - - if (!res.ok) { - assert.eq(donorDoc.state, TenantMigrationTest.DonorState.kCommitted); - } else { - assert.eq(donorDoc.state, TenantMigrationTest.DonorState.kAborted); - } - - tenantMigrationTest.stop(); - donorRst.stopSet(); - recipientRst.stopSet(); -} - -/** - * Starts a migration and sets the passed in failpoint, then either waits for the failpoint or lets - * the migration run successfully and interrupts the donor using the 'interruptFunc'. After - * restarting, check the to see if the donorDoc data has persisted. - */ -function testStateDocPersistenceOnFailover(interruptFunc, fpName, isShutdown = false) { - const donorRst = new ReplSetTest( - {nodes: 3, name: "donorRst", serverless: true, nodeOptions: migrationX509Options.donor}); - - donorRst.startSet(); - donorRst.initiate(); - - const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), donorRst}); - - const migrationId = UUID(); - const migrationOpts = { - migrationIdString: extractUUIDFromObject(migrationId), - tenantId: kTenantId, - recipientConnString: tenantMigrationTest.getRecipientConnString(), - }; - let donorPrimary = tenantMigrationTest.getDonorPrimary(); - - // If we passed in a valid failpoint we set it, otherwise we let the migration run normally. - let fp; - if (fpName) { - fp = configureFailPoint(donorPrimary, fpName); - assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); - fp.wait(); - } else { - TenantMigrationTest.assertCommitted(tenantMigrationTest.runMigration(migrationOpts)); - } - - let configDonorsColl = donorPrimary.getCollection(TenantMigrationTest.kConfigDonorsNS); - let donorDocBeforeFailover = configDonorsColl.findOne({tenantId: kTenantId}); - - interruptFunc(tenantMigrationTest.getDonorRst()); - - if (fp && !isShutdown) { - // Turn off failpoint in order to allow the migration to resume after stepup. - fp.off(); - } - - donorPrimary = tenantMigrationTest.getDonorPrimary(); - configDonorsColl = donorPrimary.getCollection(TenantMigrationTest.kConfigDonorsNS); - let donorDocAfterFailover = configDonorsColl.findOne({tenantId: kTenantId}); - - // Check persisted fields in the donor doc. - assert.eq(donorDocBeforeFailover._id, donorDocAfterFailover._id); - assert.eq(donorDocBeforeFailover.recipientConnString, - donorDocAfterFailover.recipientConnString); - assert.eq(donorDocBeforeFailover.readPreference, donorDocAfterFailover.readPreference); - assert.eq(donorDocBeforeFailover.startMigrationDonorTimestamp, - donorDocAfterFailover.startMigrationDonorTimestamp); - assert.eq(donorDocBeforeFailover.migration, donorDocAfterFailover.migration); - assert.eq(donorDocBeforeFailover.tenantId, donorDocAfterFailover.tenantId); - assert.eq(donorDocBeforeFailover.donorCertificateForRecipient, - donorDocAfterFailover.donorCertificateForRecipient); - assert.eq(donorDocBeforeFailover.recipientCertificateForDonor, - donorDocAfterFailover.recipientCertificateForDonor); - assert.eq(donorDocBeforeFailover.migrationStart, donorDocAfterFailover.migrationStart); - - tenantMigrationTest.stop(); - donorRst.stopSet(); -} - -(() => { - jsTest.log("Test that the migration resumes on stepup"); - testDonorStartMigrationInterrupt((donorRst) => { - // Force the primary to step down but make it likely to step back up. - const donorPrimary = donorRst.getPrimary(); - assert.commandWorked( - donorPrimary.adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true})); - assert.commandWorked(donorPrimary.adminCommand({replSetFreeze: 0})); - }, {donorRestarted: false}); -})(); - -(() => { - jsTest.log("Test that the migration resumes after restart"); - testDonorStartMigrationInterrupt((donorRst) => { - // Skip validation on shutdown because the full validation can conflict with the tenant - // migration and cause it to fail. - donorRst.stopSet(null /* signal */, true /*forRestart */, {skipValidation: true}); - donorRst.startSet({restart: true}); - }, {donorRestarted: true, disableForShardMerge: true}); -})(); - -(() => { - jsTest.log("Test that the donorForgetMigration command can be retried on stepup"); - testDonorForgetMigrationInterrupt((donorRst) => { - // Force the primary to step down but make it likely to step back up. - const donorPrimary = donorRst.getPrimary(); - assert.commandWorked( - donorPrimary.adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true})); - assert.commandWorked(donorPrimary.adminCommand({replSetFreeze: 0})); - }); -})(); - -(() => { - jsTest.log("Test that the donorForgetMigration command can be retried after restart"); - testDonorForgetMigrationInterrupt((donorRst) => { - // Skip validation on shutdown because the full validation can conflict with the tenant - // migration and cause it to fail. - donorRst.stopSet(null /* signal */, true /*forRestart */, {skipValidation: true}); - donorRst.startSet({restart: true}); - }); -})(); - -(() => { - jsTest.log("Test that the donorAbortMigration command can be retried after restart"); - - kMigrationFpNames.forEach(fpName => { - if (!fpName) { - jsTest.log("Testing without setting a failpoint."); - } else { - jsTest.log("Testing with failpoint: " + fpName); - } - - testDonorAbortMigrationInterrupt((donorRst) => { - // Skip validation on shutdown because the full validation can conflict with the tenant - // migration and cause it to fail. - donorRst.stopSet(null /* signal */, true /*forRestart */, {skipValidation: true}); - donorRst.startSet({restart: true}); - }, fpName, {isShutdown: true}); - }); -})(); - -(() => { - jsTest.log( - "Test that the donorAbortMigration command fails if issued after state == kCommitted"); - - testDonorAbortMigrationInterrupt((donorRst) => {}, - "pauseTenantMigrationAfterUpdatingToCommittedState", - {fpWaitBeforeAbort: true}); -})(); - -(() => { - jsTest.log("Test that the donorAbortMigration command can be retried on stepup"); - kMigrationFpNames.forEach(fpName => { - if (!fpName) { - jsTest.log("Testing without setting a failpoint."); - } else { - jsTest.log("Testing with failpoint: " + fpName); - } - - testDonorAbortMigrationInterrupt((donorRst) => { - // Force the primary to step down but make it likely to step back up. - const donorPrimary = donorRst.getPrimary(); - assert.commandWorked(donorPrimary.adminCommand( - {replSetStepDown: ReplSetTest.kForeverSecs, force: true})); - assert.commandWorked(donorPrimary.adminCommand({replSetFreeze: 0})); - }, fpName); - }); -})(); - -(() => { - jsTest.log("Test stateDoc data persistence on restart."); - kMigrationFpNames.forEach(fpName => { - if (!fpName) { - jsTest.log("Testing without setting a failpoint."); - } else { - jsTest.log("Testing with failpoint: " + fpName); - } - - testStateDocPersistenceOnFailover((donorRst) => { - // Skip validation on shutdown because the full validation can conflict with the tenant - // migration and cause it to fail. - donorRst.stopSet(null /* signal */, true /*forRestart */, {skipValidation: true}); - donorRst.startSet({restart: true}); - }, fpName, true); - }); -})(); - -(() => { - jsTest.log("Test stateDoc data persistence on stepup."); - kMigrationFpNames.forEach(fpName => { - if (!fpName) { - jsTest.log("Testing without setting a failpoint."); - } else { - jsTest.log("Testing with failpoint: " + fpName); - } - - testStateDocPersistenceOnFailover((donorRst) => { - // Force the primary to step down but make it likely to step back up. - const donorPrimary = donorRst.getPrimary(); - assert.commandWorked(donorPrimary.adminCommand( - {replSetStepDown: ReplSetTest.kForeverSecs, force: true})); - assert.commandWorked(donorPrimary.adminCommand({replSetFreeze: 0})); - }, fpName); - }); -})(); diff --git a/jstests/replsets/tenant_migration_donor_rollback_during_cloning.js b/jstests/replsets/tenant_migration_donor_rollback_during_cloning.js index b66ed2f4f7b93..2d4714f3900e7 100644 --- a/jstests/replsets/tenant_migration_donor_rollback_during_cloning.js +++ b/jstests/replsets/tenant_migration_donor_rollback_during_cloning.js @@ -16,7 +16,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' @@ -88,7 +88,7 @@ function runTest(tenantId, readPreference: {mode: 'secondary'} }; - firstFailpointData.database = tenantMigrationTest.tenantDB(tenantId, "testDB"); + firstFailpointData.database = makeTenantDB(tenantId, "testDB"); // The failpoints correspond to the instants right before and after the 'list*' call that the // recipient cloners make. const fpBeforeListCall = @@ -154,7 +154,7 @@ function runTest(tenantId, // into a non-tenant DB, so this data will not be migrated but will still advance the cluster // time. tenantMigrationTest.insertDonorDB( - tenantMigrationTest.tenantDB(ObjectId().str, 'alternateDB'), + makeTenantDB(ObjectId().str, 'alternateDB'), 'alternateColl', [{x: "Tom Petty", y: "Free Fallin"}, {x: "Sushin Shyam", y: "Cherathukal"}]); @@ -186,14 +186,14 @@ function runTest(tenantId, // Creates a collection on the donor. function listCollectionsSetupFunction(tenantId, tenantMigrationTest) { - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); tenantMigrationTest.insertDonorDB(dbName, 'testColl'); } // Creates another collection on the donor, that isn't majority committed due to replication being // halted. function listCollectionsWhilePausedFunction(tenantId, syncSourceNode, tenantMigrationTest) { - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); const donorPrimary = tenantMigrationTest.getDonorPrimary(); const donorTemporaryColl = donorPrimary.getDB(dbName).getCollection('tempColl'); @@ -215,7 +215,7 @@ function listCollectionsWhilePausedFunction(tenantId, syncSourceNode, tenantMigr // Makes sure that the collection that the donor RST failed to replicate does not exist on the // recipient. function listCollectionsPostMigrationFunction(tenantId, tenantMigrationTest) { - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); const collNames = recipientPrimary.getDB(dbName).getCollectionNames(); @@ -225,14 +225,14 @@ function listCollectionsPostMigrationFunction(tenantId, tenantMigrationTest) { // Create a database on the donor RST. function listDatabasesSetupFunction(tenantId, tenantMigrationTest) { - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); tenantMigrationTest.insertDonorDB(dbName, 'testColl'); } // Create another database on the donor RST. This database doesn't exist on a majority of donor RST // nodes, as replication has been paused. function listDatabasesWhilePausedFunction(tenantId, syncSourceNode, tenantMigrationTest) { - const dbTemp = tenantMigrationTest.tenantDB(tenantId, "tempDB"); + const dbTemp = makeTenantDB(tenantId, "tempDB"); const donorPrimary = tenantMigrationTest.getDonorPrimary(); const donorTemporaryColl = donorPrimary.getDB(dbTemp).getCollection('tempColl'); @@ -259,11 +259,11 @@ function listDatabasesPostMigrationFunction(tenantId, tenantMigrationTest) { const dbNames = recipientPrimary.adminCommand( {listDatabases: 1, nameOnly: true, filter: {"name": new RegExp("^" + tenantId)}}); assert.eq(1, dbNames.databases.length, dbNames); - assert.eq(dbNames.databases[0].name, tenantMigrationTest.tenantDB(tenantId, "testDB"), dbNames); + assert.eq(dbNames.databases[0].name, makeTenantDB(tenantId, "testDB"), dbNames); } function listIndexesSetupFunction(tenantId, tenantMigrationTest) { - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); const donorPrimary = tenantMigrationTest.getDonorPrimary(); const donorColl = donorPrimary.getDB(dbName)['testColl']; @@ -289,7 +289,7 @@ function listIndexesSetupFunction(tenantId, tenantMigrationTest) { } function listIndexesWhilePausedFunction(tenantId, syncSourceNode, tenantMigrationTest) { - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); const donorPrimary = tenantMigrationTest.getDonorPrimary(); const donorDB = donorPrimary.getDB(dbName); const donorColl = donorDB['testColl']; @@ -320,7 +320,7 @@ function listIndexesWhilePausedFunction(tenantId, syncSourceNode, tenantMigratio } function listIndexesPostMigrationFunction(tenantId, tenantMigrationTest) { - const dbTest = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbTest = makeTenantDB(tenantId, "testDB"); const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); const testColl = recipientPrimary.getDB(dbTest)['testColl']; diff --git a/jstests/replsets/tenant_migration_donor_startup_recovery.js b/jstests/replsets/tenant_migration_donor_startup_recovery.js index d825ff01c7e04..f4f2828262394 100644 --- a/jstests/replsets/tenant_migration_donor_startup_recovery.js +++ b/jstests/replsets/tenant_migration_donor_startup_recovery.js @@ -135,7 +135,7 @@ if (donorDoc) { // being rebuilt on step up. break; default: - throw new Error(`Invalid state "${state}" from donor doc.`); + throw new Error(`Invalid state "${donorDoc.state}" from donor doc.`); } } diff --git a/jstests/replsets/tenant_migration_drop_collection.js b/jstests/replsets/tenant_migration_drop_collection.js index c6940030dfc3c..f856ae201dfb8 100644 --- a/jstests/replsets/tenant_migration_drop_collection.js +++ b/jstests/replsets/tenant_migration_drop_collection.js @@ -2,8 +2,10 @@ * Tests that TenantCollectionCloner completes without error when a collection is dropped during * cloning as part of a tenant migration. * - * TODO SERVER-61231: relies on various failpoints and such in TenantCollectionCloner, which is - * not used for by Shard Merge, but we should likely test similar behavior, adapt for Shard Merge + * Shard merge uses backup cursor for copying files. One of the guarantee that backup cursor + * provides is that at physical file associated with the table is not deleted when backup cursor is + * active on a checkpoint that contains the table. We expect the test coverage should be already + * provided by the backup cursor. * * @tags: [ * incompatible_with_macos, @@ -17,6 +19,7 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import { + makeTenantDB, makeX509OptionsForTest, runMigrationAsync } from "jstests/replsets/libs/tenant_migration_util.js"; @@ -44,7 +47,7 @@ function runDropTest({failPointName, failPointData, expectedLog, createNew}) { new TenantMigrationTest({name: jsTestName(), recipientRst: recipientRst}); const tenantId = ObjectId().str; - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); const collName = "testColl"; const donorPrimary = tenantMigrationTest.getDonorPrimary(); diff --git a/jstests/replsets/tenant_migration_ensure_migration_outcome_visibility_for_blocked_writes.js b/jstests/replsets/tenant_migration_ensure_migration_outcome_visibility_for_blocked_writes.js index a5c39831204a5..b3aba7286264c 100644 --- a/jstests/replsets/tenant_migration_ensure_migration_outcome_visibility_for_blocked_writes.js +++ b/jstests/replsets/tenant_migration_ensure_migration_outcome_visibility_for_blocked_writes.js @@ -13,6 +13,7 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import { + makeTenantDB, makeX509OptionsForTest, runMigrationAsync } from "jstests/replsets/libs/tenant_migration_util.js"; @@ -70,7 +71,7 @@ function insertDocument(primaryHost, dbName, collName) { }; const donorRstArgs = createRstArgs(donorRst); - const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName); + const dbName = makeTenantDB(tenantId, kTenantDefinedDbName); const primary = donorRst.getPrimary(); const primaryDB = primary.getDB(dbName); @@ -126,7 +127,7 @@ function insertDocument(primaryHost, dbName, collName) { }; const donorRstArgs = createRstArgs(donorRst); - const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName); + const dbName = makeTenantDB(tenantId, kTenantDefinedDbName); const primary = donorRst.getPrimary(); const primaryDB = primary.getDB(dbName); diff --git a/jstests/replsets/tenant_migration_external_cluster_validation.js b/jstests/replsets/tenant_migration_external_cluster_validation.js index 378d6cc8c9ce3..69d30ac40f12d 100644 --- a/jstests/replsets/tenant_migration_external_cluster_validation.js +++ b/jstests/replsets/tenant_migration_external_cluster_validation.js @@ -93,8 +93,6 @@ const recipientPrimaryTestDB = recipientPrimary.getDB(kDbName); const donorSecondaryTestDB = donorRst.getSecondary().getDB(kDbName); const recipientSecondaryTestDB = recipientRst.getSecondary().getDB(kDbName); -const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), donorRst, recipientRst}); - createUsers(donorRst); createUsers(recipientRst); @@ -137,6 +135,7 @@ recipientSecondaryTestDB.logout(); assert.eq(1, donorAdminDB.auth(kAdminUser.name, kAdminUser.pwd)); assert.eq(1, recipientAdminDB.auth(kAdminUser.name, kAdminUser.pwd)); +const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), donorRst, recipientRst}); const migrationId = UUID(); const migrationOpts = { migrationIdString: extractUUIDFromObject(migrationId), diff --git a/jstests/replsets/tenant_migration_external_keys_ttl.js b/jstests/replsets/tenant_migration_external_keys_ttl.js index 28611f9abaf3c..91af232c2ea76 100644 --- a/jstests/replsets/tenant_migration_external_keys_ttl.js +++ b/jstests/replsets/tenant_migration_external_keys_ttl.js @@ -2,11 +2,8 @@ * Tests that tenant migrations correctly set the TTL values for keys in the * config.external_validation_keys collection. * - * TODO SERVER-61231: shard merge can't handle concurrent migrations, adapt this test. - * * @tags: [ * incompatible_with_macos, - * incompatible_with_shard_merge, * incompatible_with_windows_tls, * requires_majority_read_concern, * requires_persistence, @@ -286,38 +283,48 @@ function makeTestParams() { teardown(); } - jsTestLog("Recipient failover before receiving forgetMigration"); - { - const {tmt, teardown} = setup(); - const [tenantId, migrationId, migrationOpts] = makeTestParams(); - const recipientPrimary = tmt.getRecipientPrimary(); - const fp = configureFailPoint(recipientPrimary, - "fpAfterConnectingTenantMigrationRecipientInstance", - {action: "hang"}); - - assert.commandWorked(tmt.startMigration(migrationOpts)); - fp.wait(); - - assert.commandWorked(recipientPrimary.adminCommand( - {replSetStepDown: ReplSetTest.kForeverSecs, force: true})); - assert.commandWorked(recipientPrimary.adminCommand({replSetFreeze: 0})); - fp.off(); - - TenantMigrationTest.assertCommitted( - tmt.waitForMigrationToComplete(migrationOpts, true /* retryOnRetryableErrors */)); - - // The keys should have been created without a TTL deadline. - verifyExternalKeys(tmt.getDonorPrimary(), {migrationId, expectTTLValue: false}); - verifyExternalKeys(tmt.getRecipientPrimary(), {migrationId, expectTTLValue: false}); - - assert.commandWorked(tmt.forgetMigration(migrationOpts.migrationIdString)); - - // After running donorForgetMigration, the TTL value should be updated. The default TTL - // buffer is 1 day so the keys will not have been deleted. - verifyExternalKeys(tmt.getDonorPrimary(), {migrationId, expectTTLValue: true}); - verifyExternalKeys(tmt.getRecipientPrimary(), {migrationId, expectTTLValue: true}); - teardown(); - } + // TODO SERVER-76128: Tenant Migrations are not robust to recipient failover. + // jsTestLog("Recipient failover before receiving forgetMigration"); + // (() => { + // const {tmt, teardown} = setup(); + // const [tenantId, migrationId, migrationOpts] = makeTestParams(); + // const recipientPrimary = tmt.getRecipientPrimary(); + // const fp = configureFailPoint(recipientPrimary, + // "fpAfterConnectingTenantMigrationRecipientInstance", + // {action: "hang"}); + + // if (isShardMergeEnabled(tmt.getDonorPrimary().getDB("admin"))) { + // jsTestLog( + // "Skip: featureFlagShardMerge is enabled and shard merge is not resilient to + // recipient failovers."); + // teardown(); + // return; + // } + + // assert.commandWorked(tmt.startMigration(migrationOpts)); + // fp.wait(); + + // assert.commandWorked(recipientPrimary.adminCommand( + // {replSetStepDown: ReplSetTest.kForeverSecs, force: true})); + // assert.commandWorked(recipientPrimary.adminCommand({replSetFreeze: 0})); + + // fp.off(); + + // TenantMigrationTest.assertCommitted( + // tmt.waitForMigrationToComplete(migrationOpts, true /* retryOnRetryableErrors */)); + + // // The keys should have been created without a TTL deadline. + // verifyExternalKeys(tmt.getDonorPrimary(), {migrationId, expectTTLValue: false}); + // verifyExternalKeys(tmt.getRecipientPrimary(), {migrationId, expectTTLValue: false}); + + // assert.commandWorked(tmt.forgetMigration(migrationOpts.migrationIdString)); + + // // After running donorForgetMigration, the TTL value should be updated. The default TTL + // // buffer is 1 day so the keys will not have been deleted. + // verifyExternalKeys(tmt.getDonorPrimary(), {migrationId, expectTTLValue: true}); + // verifyExternalKeys(tmt.getRecipientPrimary(), {migrationId, expectTTLValue: true}); + // teardown(); + // })(); jsTestLog( "Donor failover after receiving forgetMigration before marking keys garbage collectable"); @@ -355,39 +362,42 @@ function makeTestParams() { teardown(); } - jsTestLog( - "Recipient failover after receiving forgetMigration before marking keys garbage collectable"); - { - const {tmt, donorRst, teardown} = setup(); - const [tenantId, migrationId, migrationOpts] = makeTestParams(); - const recipientPrimary = tmt.getRecipientPrimary(); - - assert.commandWorked(tmt.startMigration(migrationOpts)); - TenantMigrationTest.assertCommitted( - tmt.waitForMigrationToComplete(migrationOpts, true /* retryOnRetryableErrors */)); - - // The keys should have been created without a TTL deadline. - verifyExternalKeys(tmt.getDonorPrimary(), {migrationId, expectTTLValue: false}); - verifyExternalKeys(tmt.getRecipientPrimary(), {migrationId, expectTTLValue: false}); - - const fp = configureFailPoint( - recipientPrimary, "pauseTenantMigrationBeforeMarkingExternalKeysGarbageCollectable"); - const forgetMigrationThread = new Thread( - forgetMigrationAsync, migrationOpts.migrationIdString, createRstArgs(donorRst), true); - forgetMigrationThread.start(); - fp.wait(); - - assert.commandWorked(recipientPrimary.adminCommand( - {replSetStepDown: ReplSetTest.kForeverSecs, force: true})); - assert.commandWorked(recipientPrimary.adminCommand({replSetFreeze: 0})); - fp.off(); - - assert.commandWorked(forgetMigrationThread.returnData()); - - verifyExternalKeys(tmt.getDonorPrimary(), {migrationId, expectTTLValue: true}); - verifyExternalKeys(tmt.getRecipientPrimary(), {migrationId, expectTTLValue: true}); - teardown(); - } + // TODO SERVER-76128: Tenant Migrations are not robust to recipient failover. + // jsTestLog( + // "Recipient failover after receiving forgetMigration before marking keys garbage + // collectable"); + // { + // const {tmt, donorRst, teardown} = setup(); + // const [tenantId, migrationId, migrationOpts] = makeTestParams(); + // const recipientPrimary = tmt.getRecipientPrimary(); + + // assert.commandWorked(tmt.startMigration(migrationOpts)); + // TenantMigrationTest.assertCommitted( + // tmt.waitForMigrationToComplete(migrationOpts, true /* retryOnRetryableErrors */)); + + // // The keys should have been created without a TTL deadline. + // verifyExternalKeys(tmt.getDonorPrimary(), {migrationId, expectTTLValue: false}); + // verifyExternalKeys(tmt.getRecipientPrimary(), {migrationId, expectTTLValue: false}); + + // const fp = configureFailPoint( + // recipientPrimary, "pauseTenantMigrationBeforeMarkingExternalKeysGarbageCollectable"); + // const forgetMigrationThread = new Thread( + // forgetMigrationAsync, migrationOpts.migrationIdString, createRstArgs(donorRst), + // true); + // forgetMigrationThread.start(); + // fp.wait(); + + // assert.commandWorked(recipientPrimary.adminCommand( + // {replSetStepDown: ReplSetTest.kForeverSecs, force: true})); + // assert.commandWorked(recipientPrimary.adminCommand({replSetFreeze: 0})); + // fp.off(); + + // assert.commandWorked(forgetMigrationThread.returnData()); + + // verifyExternalKeys(tmt.getDonorPrimary(), {migrationId, expectTTLValue: true}); + // verifyExternalKeys(tmt.getRecipientPrimary(), {migrationId, expectTTLValue: true}); + // teardown(); + // } jsTestLog("Donor failover after receiving forgetMigration after updating keys."); { @@ -433,49 +443,67 @@ function makeTestParams() { teardown(); } - jsTestLog("Recipient failover after receiving forgetMigration after updating keys."); - { - const {tmt, donorRst, recipientRst, teardown} = setup(); - // this test expects the external keys to expire, so lower the expiration timeouts. - const lowerExternalKeysBufferSecs = 5; - const lowerStateDocExpirationMS = 500; - for (let conn of [...donorRst.nodes, ...recipientRst.nodes]) { - setTenantMigrationExpirationParams( - conn, lowerStateDocExpirationMS, lowerExternalKeysBufferSecs); - } - const [tenantId, migrationId, migrationOpts] = makeTestParams(); - const recipientPrimary = tmt.getRecipientPrimary(); - - assert.commandWorked(tmt.startMigration(migrationOpts)); - TenantMigrationTest.assertCommitted( - tmt.waitForMigrationToComplete(migrationOpts, true /* retryOnRetryableErrors */)); - - // The keys should have been created without a TTL deadline. - verifyExternalKeys(tmt.getDonorPrimary(), {migrationId, expectTTLValue: false}); - verifyExternalKeys(tmt.getRecipientPrimary(), {migrationId, expectTTLValue: false}); - - const fp = configureFailPoint( - recipientPrimary, "fpAfterReceivingRecipientForgetMigration", {action: "hang"}); - const forgetMigrationThread = new Thread( - forgetMigrationAsync, migrationOpts.migrationIdString, createRstArgs(donorRst), true); - forgetMigrationThread.start(); - fp.wait(); - - // Let the keys expire on the donor before the state document is deleted to verify retrying - // recipientForgetMigration can handle this case. The keys won't be deleted until the buffer - // expires, so sleep to avoid wasted work. - sleep((lowerExternalKeysBufferSecs * 1000) + lowerStateDocExpirationMS + 500); - waitForExternalKeysToBeDeleted(tmt.getRecipientPrimary(), migrationId); - - assert.commandWorked(recipientPrimary.adminCommand( - {replSetStepDown: ReplSetTest.kForeverSecs, force: true})); - assert.commandWorked(recipientPrimary.adminCommand({replSetFreeze: 0})); - fp.off(); - - assert.commandWorked(forgetMigrationThread.returnData()); - - // Eventually the donor's keys should be deleted too. - waitForExternalKeysToBeDeleted(tmt.getDonorPrimary(), migrationId); - teardown(); - } + // TODO SERVER-76128: Tenant Migrations are not robust to recipient failover. + // jsTestLog("Recipient failover after receiving forgetMigration after updating keys."); + // (() => { + // const {tmt, donorRst, recipientRst, teardown} = setup(); + // // this test expects the external keys to expire, so lower the expiration timeouts. + // const lowerExternalKeysBufferSecs = 5; + // const lowerStateDocExpirationMS = 500; + // for (let conn of [...donorRst.nodes, ...recipientRst.nodes]) { + // setTenantMigrationExpirationParams( + // conn, lowerStateDocExpirationMS, lowerExternalKeysBufferSecs); + // } + // const [tenantId, migrationId, migrationOpts] = makeTestParams(); + // const recipientPrimary = tmt.getRecipientPrimary(); + + // if (isShardMergeEnabled(tmt.getDonorPrimary().getDB("admin"))) { + // jsTestLog( + // "Skip: featureFlagShardMerge is enabled. Shard merge deletes keys after marking + // the recipient state document as 'aborted'."); + // teardown(); + // return; + // } + + // assert.commandWorked(tmt.startMigration(migrationOpts)); + // TenantMigrationTest.assertCommitted( + // tmt.waitForMigrationToComplete(migrationOpts, true /* retryOnRetryableErrors */)); + + // // The keys should have been created without a TTL deadline. + // verifyExternalKeys(tmt.getDonorPrimary(), {migrationId, expectTTLValue: false}); + // verifyExternalKeys(tmt.getRecipientPrimary(), {migrationId, expectTTLValue: false}); + + // let fp; + // if (isShardMergeEnabled(tmt.getDonorPrimary().getDB("admin"))) { + // fp = configureFailPoint( + // recipientPrimary, "fpBeforeMarkingStateDocAsGarbageCollectable", {action: + // "hang"}); + // } else { + // fp = configureFailPoint( + // recipientPrimary, "fpAfterReceivingRecipientForgetMigration", {action: "hang"}); + // } + // const forgetMigrationThread = new Thread( + // forgetMigrationAsync, migrationOpts.migrationIdString, createRstArgs(donorRst), + // true); + // forgetMigrationThread.start(); + // fp.wait(); + + // // Let the keys expire on the recipient before the state document is deleted to verify + // // retrying recipientForgetMigration can handle this case. The keys won't be deleted + // until + // // the buffer expires, so sleep to avoid wasted work. + // sleep((lowerExternalKeysBufferSecs * 1000) + lowerStateDocExpirationMS + 500); + // waitForExternalKeysToBeDeleted(tmt.getRecipientPrimary(), migrationId); + + // assert.commandWorked(recipientPrimary.adminCommand( + // {replSetStepDown: ReplSetTest.kForeverSecs, force: true})); + // assert.commandWorked(recipientPrimary.adminCommand({replSetFreeze: 0})); + // fp.off(); + + // assert.commandWorked(forgetMigrationThread.returnData()); + + // // Eventually the donor's keys should be deleted too. + // waitForExternalKeysToBeDeleted(tmt.getDonorPrimary(), migrationId); + // teardown(); + // })(); })(); diff --git a/jstests/replsets/tenant_migration_fetch_committed_transactions.js b/jstests/replsets/tenant_migration_fetch_committed_transactions.js index a1f0b2621d395..7bf939e87e17b 100644 --- a/jstests/replsets/tenant_migration_fetch_committed_transactions.js +++ b/jstests/replsets/tenant_migration_fetch_committed_transactions.js @@ -15,6 +15,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/core/txns/libs/prepare_helpers.js"); load("jstests/replsets/rslib.js"); load("jstests/libs/uuid_util.js"); @@ -24,9 +25,9 @@ const transactionsNS = "config.transactions"; const collName = "testColl"; const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); -const tenantDB = tenantMigrationTest.tenantDB(tenantId, "testDB"); -const nonTenantDB = tenantMigrationTest.tenantDB(ObjectId().str, "testDB"); -const tenantNS = `${tenantDB}.${collName}`; +const kTenantDB = makeTenantDB(tenantId, "testDB"); +const kNonTenantDB = makeTenantDB(ObjectId().str, "testDB"); +const kTenantNS = `${kTenantDB}.${collName}`; const donorPrimary = tenantMigrationTest.getDonorPrimary(); const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); @@ -53,15 +54,15 @@ function validateTransactionEntryonRecipient(sessionId) { })); } -assert.commandWorked(donorPrimary.getCollection(tenantNS).insert([{_id: 0, x: 0}, {_id: 1, x: 1}], - {writeConcern: {w: "majority"}})); +assert.commandWorked(donorPrimary.getCollection(kTenantNS).insert([{_id: 0, x: 0}, {_id: 1, x: 1}], + {writeConcern: {w: "majority"}})); let sessionIdBeforeMigration; { jsTestLog("Run and commit a transaction prior to the migration"); const session = donorPrimary.startSession({causalConsistency: false}); sessionIdBeforeMigration = session.getSessionId(); - const sessionDb = session.getDatabase(tenantDB); + const sessionDb = session.getDatabase(kTenantDB); const sessionColl = sessionDb.getCollection(collName); session.startTransaction({writeConcern: {w: "majority"}}); @@ -77,7 +78,7 @@ assert.eq(1, donorPrimary.getCollection(transactionsNS).find().itcount()); { jsTestLog("Run and abort a transaction prior to the migration"); const session = donorPrimary.startSession({causalConsistency: false}); - const sessionDb = session.getDatabase(tenantDB); + const sessionDb = session.getDatabase(kTenantDB); const sessionColl = sessionDb.getCollection(collName); session.startTransaction({writeConcern: {w: "majority"}}); @@ -99,7 +100,7 @@ assert.eq(2, donorPrimary.getCollection(transactionsNS).find().itcount()); { jsTestLog("Run and commit a transaction that does not belong to the tenant"); const session = donorPrimary.startSession({causalConsistency: false}); - const sessionDb = session.getDatabase(nonTenantDB); + const sessionDb = session.getDatabase(kNonTenantDB); const sessionColl = sessionDb.getCollection(collName); session.startTransaction({writeConcern: {w: "majority"}}); diff --git a/jstests/replsets/tenant_migration_fetch_committed_transactions_retry.js b/jstests/replsets/tenant_migration_fetch_committed_transactions_retry.js index b7c7734b7eefa..6b04bf078ad4e 100644 --- a/jstests/replsets/tenant_migration_fetch_committed_transactions_retry.js +++ b/jstests/replsets/tenant_migration_fetch_committed_transactions_retry.js @@ -5,11 +5,10 @@ * 2) Retrying while the migration is actively updating its transactions entries. * 3) Retrying while the migration is updating, and the donor starts a new transaction on an * existing session. - * - * TODO SERVER-61231: shard merge can't handle restart, adapt this test. - * + * * * @tags: [ * incompatible_with_macos, + * # Shard merge is not resilient to donor restarts. * incompatible_with_shard_merge, * incompatible_with_windows_tls, * requires_majority_read_concern, @@ -19,6 +18,8 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; + load("jstests/aggregation/extras/utils.js"); load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); @@ -62,7 +63,7 @@ const assertTransactionEntries = (donorTxnEntries, recipientTxnEntries) => { (() => { jsTestLog("Test retrying after successfully updating entries"); - const tenantDB = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const tenantDB = makeTenantDB(tenantId, "testDB"); const donorRst = tenantMigrationTest.getDonorRst(); const donorPrimary = tenantMigrationTest.getDonorPrimary(); const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); @@ -128,7 +129,7 @@ const assertTransactionEntries = (donorTxnEntries, recipientTxnEntries) => { jsTestLog("Test retrying in the middle of updating entries"); tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), sharedOptions: {nodes: 1}}); - const tenantDB = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const tenantDB = makeTenantDB(tenantId, "testDB"); const collName1 = `${collName}1`; const collName2 = `${collName}2`; @@ -197,7 +198,7 @@ const assertTransactionEntries = (donorTxnEntries, recipientTxnEntries) => { jsTestLog("Test retrying with a new transaction in the middle of updating entries"); tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), sharedOptions: {nodes: 1}}); - const tenantDB = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const tenantDB = makeTenantDB(tenantId, "testDB"); const collName1 = `${collName}1`; const collName2 = `${collName}2`; diff --git a/jstests/replsets/tenant_migration_fetch_committed_transactions_shard_merge.js b/jstests/replsets/tenant_migration_fetch_committed_transactions_shard_merge.js index 9a0a9fa8dd45d..d413d8909ffba 100644 --- a/jstests/replsets/tenant_migration_fetch_committed_transactions_shard_merge.js +++ b/jstests/replsets/tenant_migration_fetch_committed_transactions_shard_merge.js @@ -14,9 +14,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - isShardMergeEnabled, -} from "jstests/replsets/libs/tenant_migration_util.js"; +import {isShardMergeEnabled, makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/core/txns/libs/prepare_helpers.js"); load("jstests/replsets/rslib.js"); @@ -28,8 +26,8 @@ const transactionsNS = "config.transactions"; const collName = "testColl"; const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); -const tenantDB = tenantMigrationTest.tenantDB(tenantId, "testDB"); -const otherTenantDB = tenantMigrationTest.tenantDB(otherTenantId, "testDB"); +const tenantDB = makeTenantDB(tenantId, "testDB"); +const otherTenantDB = makeTenantDB(otherTenantId, "testDB"); const tenantNS = `${tenantDB}.${collName}`; const donorPrimary = tenantMigrationTest.getDonorPrimary(); diff --git a/jstests/replsets/tenant_migration_find_and_modify_retry.js b/jstests/replsets/tenant_migration_find_and_modify_retry.js index 0c6dcdd81a284..e587e97bb0ba7 100644 --- a/jstests/replsets/tenant_migration_find_and_modify_retry.js +++ b/jstests/replsets/tenant_migration_find_and_modify_retry.js @@ -15,13 +15,14 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/uuid_util.js"); load("jstests/libs/fail_point_util.js"); // For configureFailPoint(). const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const kTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); +const kDbName = makeTenantDB(kTenantId, "testDB"); const kCollName = "testColl"; tenantMigrationTest.insertDonorDB(kDbName, kCollName, [{x: 1}]); diff --git a/jstests/replsets/tenant_migration_ignore_create_index_on_nonempty_collection.js b/jstests/replsets/tenant_migration_ignore_create_index_on_nonempty_collection.js index 92f8fa5409151..263024f9e9844 100644 --- a/jstests/replsets/tenant_migration_ignore_create_index_on_nonempty_collection.js +++ b/jstests/replsets/tenant_migration_ignore_create_index_on_nonempty_collection.js @@ -14,13 +14,14 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/uuid_util.js"); load("jstests/libs/fail_point_util.js"); // For configureFailPoint(). const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const kTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); +const kDbName = makeTenantDB(kTenantId, "testDB"); const kCollName = "testColl"; const donorPrimary = tenantMigrationTest.getDonorPrimary(); diff --git a/jstests/replsets/tenant_migration_invalid_inputs.js b/jstests/replsets/tenant_migration_invalid_inputs.js index 7f97f8212f534..5ffe59fa7c94f 100644 --- a/jstests/replsets/tenant_migration_invalid_inputs.js +++ b/jstests/replsets/tenant_migration_invalid_inputs.js @@ -15,10 +15,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - donorStartMigrationWithProtocol, - makeMigrationCertificatesForTest -} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeMigrationCertificatesForTest} from "jstests/replsets/libs/tenant_migration_util.js"; const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), enableRecipientTesting: false}); @@ -35,93 +32,81 @@ const migrationCertificates = makeMigrationCertificatesForTest(); jsTestLog("Testing 'donorStartMigration' command provided with invalid options."); // Test missing tenantId field for protocol 'multitenant migrations'. -assert.commandFailedWithCode( - donorPrimary.adminCommand(donorStartMigrationWithProtocol({ - donorStartMigration: 1, - migrationId: UUID(), - recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), - readPreference, - donorCertificateForRecipient: migrationCertificates.donorCertificateForRecipient, - recipientCertificateForDonor: migrationCertificates.recipientCertificateForDonor, - }, - donorPrimary.getDB("admin"))), - ErrorCodes.InvalidOptions); +assert.commandFailedWithCode(donorPrimary.adminCommand({ + donorStartMigration: 1, + migrationId: UUID(), + recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), + readPreference, + donorCertificateForRecipient: migrationCertificates.donorCertificateForRecipient, + recipientCertificateForDonor: migrationCertificates.recipientCertificateForDonor, +}), + ErrorCodes.InvalidOptions); // Test empty tenantId and unsupported database prefixes. const unsupportedtenantIds = ['', 'admin', 'local', 'config']; unsupportedtenantIds.forEach((invalidTenantId) => { - assert.commandFailedWithCode( - donorPrimary.adminCommand(donorStartMigrationWithProtocol({ - donorStartMigration: 1, - migrationId: UUID(), - recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), - tenantId: invalidTenantId, - readPreference, - donorCertificateForRecipient: migrationCertificates.donorCertificateForRecipient, - recipientCertificateForDonor: migrationCertificates.recipientCertificateForDonor, - }, - donorPrimary.getDB("admin"))), - [ErrorCodes.InvalidOptions, ErrorCodes.BadValue]); -}); - -// Test migrating a tenant to the donor itself. -assert.commandFailedWithCode( - donorPrimary.adminCommand(donorStartMigrationWithProtocol({ + assert.commandFailedWithCode(donorPrimary.adminCommand({ donorStartMigration: 1, migrationId: UUID(), - recipientConnectionString: tenantMigrationTest.getDonorRst().getURL(), - tenantId, + recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), + tenantId: invalidTenantId, readPreference, donorCertificateForRecipient: migrationCertificates.donorCertificateForRecipient, recipientCertificateForDonor: migrationCertificates.recipientCertificateForDonor, - }, - donorPrimary.getDB("admin"))), - ErrorCodes.BadValue); + }), + [ErrorCodes.InvalidOptions, ErrorCodes.BadValue]); +}); + +// Test migrating a tenant to the donor itself. +assert.commandFailedWithCode(donorPrimary.adminCommand({ + donorStartMigration: 1, + migrationId: UUID(), + recipientConnectionString: tenantMigrationTest.getDonorRst().getURL(), + tenantId, + readPreference, + donorCertificateForRecipient: migrationCertificates.donorCertificateForRecipient, + recipientCertificateForDonor: migrationCertificates.recipientCertificateForDonor, +}), + ErrorCodes.BadValue); // Test migrating a tenant to a recipient that shares one or more hosts with the donor. -assert.commandFailedWithCode( - donorPrimary.adminCommand(donorStartMigrationWithProtocol({ - donorStartMigration: 1, - migrationId: UUID(), - recipientConnectionString: - tenantMigrationTest.getRecipientRst().getURL() + "," + donorPrimary.host, - tenantId, - readPreference, - donorCertificateForRecipient: migrationCertificates.donorCertificateForRecipient, - recipientCertificateForDonor: migrationCertificates.recipientCertificateForDonor, - }, - donorPrimary.getDB("admin"))), - ErrorCodes.BadValue); +assert.commandFailedWithCode(donorPrimary.adminCommand({ + donorStartMigration: 1, + migrationId: UUID(), + recipientConnectionString: + tenantMigrationTest.getRecipientRst().getURL() + "," + donorPrimary.host, + tenantId, + readPreference, + donorCertificateForRecipient: migrationCertificates.donorCertificateForRecipient, + recipientCertificateForDonor: migrationCertificates.recipientCertificateForDonor, +}), + ErrorCodes.BadValue); // Test setting tenantIds field for protocol 'multitenant migrations'. -assert.commandFailedWithCode( - donorPrimary.adminCommand(donorStartMigrationWithProtocol({ - donorStartMigration: 1, - migrationId: UUID(), - recipientConnectionString: - tenantMigrationTest.getRecipientRst().getURL() + "," + donorPrimary.host, - tenantId, - tenantIds: [ObjectId(), ObjectId()], - readPreference, - donorCertificateForRecipient: migrationCertificates.donorCertificateForRecipient, - recipientCertificateForDonor: migrationCertificates.recipientCertificateForDonor, - }, - donorPrimary.getDB("admin"))), - ErrorCodes.BadValue); +assert.commandFailedWithCode(donorPrimary.adminCommand({ + donorStartMigration: 1, + migrationId: UUID(), + recipientConnectionString: + tenantMigrationTest.getRecipientRst().getURL() + "," + donorPrimary.host, + tenantId, + tenantIds: [ObjectId(), ObjectId()], + readPreference, + donorCertificateForRecipient: migrationCertificates.donorCertificateForRecipient, + recipientCertificateForDonor: migrationCertificates.recipientCertificateForDonor, +}), + ErrorCodes.BadValue); // Test migrating a tenant to a standalone recipient. -assert.commandFailedWithCode( - donorPrimary.adminCommand(donorStartMigrationWithProtocol({ - donorStartMigration: 1, - migrationId: UUID(), - recipientConnectionString: recipientPrimary.host, - tenantId, - readPreference, - donorCertificateForRecipient: migrationCertificates.donorCertificateForRecipient, - recipientCertificateForDonor: migrationCertificates.recipientCertificateForDonor, - }, - donorPrimary.getDB("admin"))), - ErrorCodes.BadValue); +assert.commandFailedWithCode(donorPrimary.adminCommand({ + donorStartMigration: 1, + migrationId: UUID(), + recipientConnectionString: recipientPrimary.host, + tenantId, + readPreference, + donorCertificateForRecipient: migrationCertificates.donorCertificateForRecipient, + recipientCertificateForDonor: migrationCertificates.recipientCertificateForDonor, +}), + ErrorCodes.BadValue); jsTestLog("Testing 'recipientSyncData' command provided with invalid options."); diff --git a/jstests/replsets/tenant_migration_large_txn.js b/jstests/replsets/tenant_migration_large_txn.js index 689ed9f5fc88b..b1c65d3f77f02 100644 --- a/jstests/replsets/tenant_migration_large_txn.js +++ b/jstests/replsets/tenant_migration_large_txn.js @@ -15,9 +15,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - runMigrationAsync, -} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, runMigrationAsync} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/parallelTester.js"); @@ -27,7 +25,7 @@ load('jstests/replsets/rslib.js'); // 'createRstArgs' const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const kTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); +const kDbName = makeTenantDB(kTenantId, "testDB"); const kCollName = "testColl"; const donorPrimary = tenantMigrationTest.getDonorPrimary(); @@ -51,7 +49,7 @@ function runTransaction(primaryHost, dbName, collName) { session.getDatabase(dbName)[collName].insert({doc: makeLargeDoc(10)}); session.getDatabase(dbName)[collName].insert({doc: makeLargeDoc(5)}); session.getDatabase(dbName)[collName].insert({doc: makeLargeDoc(5)}); - commitRes = session.commitTransaction_forTesting(); + let commitRes = session.commitTransaction_forTesting(); assert.eq(1, commitRes.ok); session.endSession(); } diff --git a/jstests/replsets/tenant_migration_multi_writes.js b/jstests/replsets/tenant_migration_multi_writes.js index 8a8fe468d8254..1e2fb29b62ab7 100644 --- a/jstests/replsets/tenant_migration_multi_writes.js +++ b/jstests/replsets/tenant_migration_multi_writes.js @@ -3,12 +3,8 @@ * were not retried on migration abort, which would create duplicate updates. Partially * updated collection where each update is applied no more than once is still an expected result. * - * TODO SERVER-61231: aborts migration after sending recipientSyncData and starting - * cloning on recipient, adapt this test to handle file cleanup on recipient. - * * @tags: [ * incompatible_with_macos, - * incompatible_with_shard_merge, * incompatible_with_windows_tls, * requires_majority_read_concern, * requires_persistence, @@ -17,9 +13,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - makeX509OptionsForTest, -} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/parallelTester.js"); @@ -52,7 +46,7 @@ const donorPrimary = donorRst.getPrimary(); const kCollName = "testColl"; const kTenantDefinedDbName = "0"; const kTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, kTenantDefinedDbName); +const kDbName = makeTenantDB(kTenantId, kTenantDefinedDbName); const kRecords = 500; const kUpdateCycles = 600; diff --git a/jstests/replsets/tenant_migration_multikey_index.js b/jstests/replsets/tenant_migration_multikey_index.js index 925547bc1f1a8..bc7bd660c662f 100644 --- a/jstests/replsets/tenant_migration_multikey_index.js +++ b/jstests/replsets/tenant_migration_multikey_index.js @@ -11,12 +11,10 @@ * ] */ +import {getPlanStage, getWinningPlan} from "jstests/libs/analyze_plan.js"; import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - makeX509OptionsForTest, -} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; -load("jstests/libs/analyze_plan.js"); load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); @@ -53,7 +51,7 @@ const tenantMigrationTest = const donorPrimary = tenantMigrationTest.getDonorPrimary(); const tenantId = ObjectId().str; -const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); +const dbName = makeTenantDB(tenantId, "testDB"); // The first collection on donor side already has the multi-key index. const collName1 = "multiKeyColl_1"; @@ -91,8 +89,7 @@ const fpBeforeFulfillingDataConsistentPromise = configureFailPoint( recipientPrimary, "fpBeforeFulfillingDataConsistentPromise", {action: "hang"}); jsTestLog("Starting the tenant migration"); -assert.commandWorked( - tenantMigrationTest.startMigration(migrationOpts, {enableDonorStartMigrationFsync: true})); +assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); fpBeforeFulfillingDataConsistentPromise.wait(); diff --git a/jstests/replsets/tenant_migration_network_error_via_rollback.js b/jstests/replsets/tenant_migration_network_error_via_rollback.js index b8d4d4540f66c..8acfb84a0b36b 100644 --- a/jstests/replsets/tenant_migration_network_error_via_rollback.js +++ b/jstests/replsets/tenant_migration_network_error_via_rollback.js @@ -3,10 +3,9 @@ * connection errors between the recipient primary and the sync source at various stages in the * process. (Replica set members close connections as part of rollback.) * - * TODO SERVER-61231: shard merge can't handle concurrent rollback, adapt this test. - * * @tags: [ * incompatible_with_macos, + * # Shard merge aborts if the donor has a rollback. * incompatible_with_shard_merge, * incompatible_with_windows_tls, * requires_majority_read_concern, @@ -18,9 +17,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - makeX509OptionsForTest, -} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' @@ -66,7 +63,7 @@ function runTest({failPointName, failPointData = {}, batchSize = 10 * 1000}) { }); const tenantId = ObjectId().str; - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); + const dbName = makeTenantDB(tenantId, "testDB"); const collName = "testColl"; const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); diff --git a/jstests/replsets/tenant_migration_no_failover.js b/jstests/replsets/tenant_migration_no_failover.js index 26f4a67819480..05761c0580d82 100644 --- a/jstests/replsets/tenant_migration_no_failover.js +++ b/jstests/replsets/tenant_migration_no_failover.js @@ -12,6 +12,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); @@ -19,8 +20,8 @@ const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const tenantId = ObjectId().str; const dbNames = ["db0", "db1", "db2"]; -const tenantDBs = dbNames.map(dbName => tenantMigrationTest.tenantDB(tenantId, dbName)); -const nonTenantDBs = dbNames.map(dbName => tenantMigrationTest.tenantDB(ObjectId().str, dbName)); +const tenantDBs = dbNames.map(dbName => makeTenantDB(tenantId, dbName)); +const nonTenantDBs = dbNames.map(dbName => makeTenantDB(ObjectId().str, dbName)); const collNames = ["coll0", "coll1"]; for (const db of [...tenantDBs, ...nonTenantDBs]) { @@ -35,8 +36,7 @@ const migrationOpts = { tenantId, }; -TenantMigrationTest.assertCommitted( - tenantMigrationTest.runMigration(migrationOpts, {enableDonorStartMigrationFsync: true})); +TenantMigrationTest.assertCommitted(tenantMigrationTest.runMigration(migrationOpts)); for (const db of [...tenantDBs, ...nonTenantDBs]) { for (const coll of collNames) { diff --git a/jstests/replsets/tenant_migration_on_clustered_collection.js b/jstests/replsets/tenant_migration_on_clustered_collection.js index b9d827de0fb39..050a6b813074f 100644 --- a/jstests/replsets/tenant_migration_on_clustered_collection.js +++ b/jstests/replsets/tenant_migration_on_clustered_collection.js @@ -16,9 +16,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - runMigrationAsync, -} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, runMigrationAsync} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/clustered_collections/clustered_collection_util.js"); // ClusteredCollectionUtil load("jstests/libs/parallelTester.js"); // Thread() @@ -28,7 +26,7 @@ load('jstests/replsets/rslib.js'); // 'cr const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const kTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); +const kDbName = makeTenantDB(kTenantId, "testDB"); const kEmptyCollName = "testEmptyColl"; const kNonEmptyCollName = "testNonEmptyColl"; diff --git a/jstests/replsets/tenant_migration_read_your_own_writes.js b/jstests/replsets/tenant_migration_read_your_own_writes.js index 2c6b4837740a4..417c500ef4871 100644 --- a/jstests/replsets/tenant_migration_read_your_own_writes.js +++ b/jstests/replsets/tenant_migration_read_your_own_writes.js @@ -13,6 +13,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/uuid_util.js"); load("jstests/libs/fail_point_util.js"); // For configureFailPoint(). @@ -20,7 +21,7 @@ const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), allowStaleReadsOnDonor: false}); const kTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); +const kDbName = makeTenantDB(kTenantId, "testDB"); const kCollName = "testColl"; const donorPrimary = tenantMigrationTest.getDonorPrimary(); diff --git a/jstests/replsets/tenant_migration_recipient_aborts_merge_on_donor_failure.js b/jstests/replsets/tenant_migration_recipient_aborts_merge_on_donor_failure.js index f84056679a6cd..b0df31f858579 100644 --- a/jstests/replsets/tenant_migration_recipient_aborts_merge_on_donor_failure.js +++ b/jstests/replsets/tenant_migration_recipient_aborts_merge_on_donor_failure.js @@ -12,7 +12,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {isShardMergeEnabled} from "jstests/replsets/libs/tenant_migration_util.js"; +import {isShardMergeEnabled, makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); @@ -31,7 +31,7 @@ load("jstests/libs/uuid_util.js"); jsTestLog("Test that a shard merge is aborted in the event of a donor failure"); const tenantId = ObjectId().str; - const tenantDB = tenantMigrationTest.tenantDB(tenantId, "DB"); + const tenantDB = makeTenantDB(tenantId, "DB"); const collName = "testColl"; const donorRst = tenantMigrationTest.getDonorRst(); diff --git a/jstests/replsets/tenant_migration_recipient_access_blocker_rollback.js b/jstests/replsets/tenant_migration_recipient_access_blocker_rollback.js index 35720c08fbcb4..7784ff2956ca1 100644 --- a/jstests/replsets/tenant_migration_recipient_access_blocker_rollback.js +++ b/jstests/replsets/tenant_migration_recipient_access_blocker_rollback.js @@ -17,6 +17,7 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import { getCertificateAndPrivateKey, + makeTenantDB, makeX509OptionsForTest } from "jstests/replsets/libs/tenant_migration_util.js"; @@ -59,7 +60,7 @@ function runRollbackAfterMigrationCommitted(tenantId) { }; // Populate the donor side with data. - const dbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); + const dbName = makeTenantDB(kTenantId, "testDB"); const collName = "testColl"; const numDocs = 20; tenantMigrationTest.insertDonorDB( @@ -128,7 +129,7 @@ function runRollbackAfterLoneRecipientForgetMigrationCommand(tenantId) { const recipientCertificateForDonor = getCertificateAndPrivateKey("jstests/libs/tenant_migration_recipient.pem"); - const dbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); + const dbName = makeTenantDB(kTenantId, "testDB"); const collName = "testColl"; const originalPrimary = recipientRst.getPrimary(); diff --git a/jstests/replsets/tenant_migration_recipient_current_op.js b/jstests/replsets/tenant_migration_recipient_current_op.js index 967b9ec95814d..2a8ac435c34b0 100644 --- a/jstests/replsets/tenant_migration_recipient_current_op.js +++ b/jstests/replsets/tenant_migration_recipient_current_op.js @@ -18,7 +18,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {forgetMigrationAsync} from "jstests/replsets/libs/tenant_migration_util.js"; +import {forgetMigrationAsync, makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/uuid_util.js"); // For extractUUIDFromObject(). load("jstests/libs/fail_point_util.js"); // For configureFailPoint(). @@ -50,7 +50,7 @@ const dbsToClone = ["db0", "db1", "db2"]; const collsToClone = ["coll0", "coll1"]; const docs = [...Array(10).keys()].map((i) => ({x: i})); for (const db of dbsToClone) { - const tenantDB = tenantMigrationTest.tenantDB(kTenantId, db); + const tenantDB = makeTenantDB(kTenantId, db); for (const coll of collsToClone) { tenantMigrationTest.insertDonorDB(tenantDB, coll, docs); } @@ -133,8 +133,7 @@ const fpAfterForgetMigration = configureFailPoint( jsTestLog("Starting tenant migration with migrationId: " + kMigrationId + ", tenantId: " + kTenantId); -assert.commandWorked( - tenantMigrationTest.startMigration(migrationOpts, {enableDonorStartMigrationFsync: true})); +assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); { // Wait until a current operation corresponding to "tenant recipient migration" with state @@ -314,7 +313,7 @@ forgetMigrationThread.start(); assert.eq(dbsToClone.length, currOp.databases.databasesToClone, tojson(res)); assert.eq(dbsToClone.length, currOp.databases.databasesCloned, tojson(res)); for (const db of dbsToClone) { - const tenantDB = tenantMigrationTest.tenantDB(kTenantId, db); + const tenantDB = makeTenantDB(kTenantId, db); assert(currOp.databases.hasOwnProperty(tenantDB), tojson(res)); const dbStats = currOp.databases[tenantDB]; assert.eq(0, dbStats.clonedCollectionsBeforeFailover, tojson(res)); diff --git a/jstests/replsets/tenant_migration_recipient_does_not_change_sync_source_after_step_down.js b/jstests/replsets/tenant_migration_recipient_does_not_change_sync_source_after_step_down.js index a6f37c9bcbf1f..00d67b8c669ba 100644 --- a/jstests/replsets/tenant_migration_recipient_does_not_change_sync_source_after_step_down.js +++ b/jstests/replsets/tenant_migration_recipient_does_not_change_sync_source_after_step_down.js @@ -16,7 +16,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); @@ -55,7 +55,7 @@ const donorRst = tenantMigrationTest.getDonorRst(); const donorPrimary = donorRst.getPrimary(); const tenantId = ObjectId().str; -const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); +const dbName = makeTenantDB(tenantId, "testDB"); const collName = "testColl"; const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); diff --git a/jstests/replsets/tenant_migration_recipient_failover_before_creating_oplog_buffer.js b/jstests/replsets/tenant_migration_recipient_failover_before_creating_oplog_buffer.js deleted file mode 100644 index 4cd9ae5a4157d..0000000000000 --- a/jstests/replsets/tenant_migration_recipient_failover_before_creating_oplog_buffer.js +++ /dev/null @@ -1,54 +0,0 @@ -/** - * Tests whether the recipient returns an appropriate error code to the donor when the recipient - * primary is made to step down before creating the oplog buffer collection. - * - * @tags: [ - * incompatible_with_macos, - * incompatible_with_shard_merge, - * incompatible_with_windows_tls, - * requires_persistence, - * requires_replication, - * serverless, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -load("jstests/libs/uuid_util.js"); // For extractUUIDFromObject(). -load("jstests/libs/fail_point_util.js"); // For configureFailPoint(). - -const tenantMigrationTest = - new TenantMigrationTest({name: jsTestName(), sharedOptions: {nodes: 2}}); - -const kMigrationId = UUID(); -const kTenantId = ObjectId().str; -const kReadPreference = { - mode: "primary" -}; -const migrationOpts = { - migrationIdString: extractUUIDFromObject(kMigrationId), - tenantId: kTenantId, - readPreference: kReadPreference -}; - -const fpBeforeCreatingOplogBuffer = - configureFailPoint(tenantMigrationTest.getRecipientPrimary(), - "fpAfterRetrievingStartOpTimesMigrationRecipientInstance", - {action: "hang"}); - -jsTestLog("Starting tenant migration with migrationId: " + kMigrationId + - ", tenantId: " + kTenantId); -assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); - -jsTestLog("Waiting until the recipient primary is about to create an oplog buffer collection."); -fpBeforeCreatingOplogBuffer.wait(); - -jsTestLog("Stepping a new primary up."); -tenantMigrationTest.getRecipientRst().stepUp( - tenantMigrationTest.getRecipientRst().getSecondaries()[0]); - -fpBeforeCreatingOplogBuffer.off(); - -jsTestLog("Waiting for migration to complete."); -TenantMigrationTest.assertCommitted(tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); - -tenantMigrationTest.stop(); diff --git a/jstests/replsets/tenant_migration_recipient_fetches_retryable_writes_entry_after_committed_snapshot.js b/jstests/replsets/tenant_migration_recipient_fetches_retryable_writes_entry_after_committed_snapshot.js index bddaa6630793c..5297664d1fbed 100644 --- a/jstests/replsets/tenant_migration_recipient_fetches_retryable_writes_entry_after_committed_snapshot.js +++ b/jstests/replsets/tenant_migration_recipient_fetches_retryable_writes_entry_after_committed_snapshot.js @@ -6,11 +6,11 @@ * recipient's majority read on 'config.transactions' can miss committed retryable writes at that * majority commit point. * - * TODO SERVER-61231: Adapt for shard merge. - * * @tags: [ * incompatible_with_macos, - * # Shard merge only supports 'primary' read preference. + * # Shard merge recipient only reads from the donor primary and the primary checkpoints + * # startMigrationDonorTimestamp. It ensures the donor primary can't have a snapshot in the + * # middle of a batch and makes this test inapplicable. * incompatible_with_shard_merge, * incompatible_with_windows_tls, * requires_majority_read_concern, @@ -22,6 +22,7 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import { isShardMergeEnabled, + makeTenantDB, makeX509OptionsForTest } from "jstests/replsets/libs/tenant_migration_util.js"; @@ -85,7 +86,7 @@ const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), donorRs const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); const kTenantId = ObjectId().str; const migrationId = UUID(); -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); +const kDbName = makeTenantDB(kTenantId, "testDB"); const kCollName = "retryable_write_secondary_oplog_application"; const kNs = `${kDbName}.${kCollName}`; diff --git a/jstests/replsets/tenant_migration_recipient_fetches_retryable_writes_oplog_entries.js b/jstests/replsets/tenant_migration_recipient_fetches_retryable_writes_oplog_entries.js index 1e11553310aaf..dc8a0a0de97e9 100644 --- a/jstests/replsets/tenant_migration_recipient_fetches_retryable_writes_oplog_entries.js +++ b/jstests/replsets/tenant_migration_recipient_fetches_retryable_writes_oplog_entries.js @@ -20,7 +20,7 @@ load("jstests/libs/parallelTester.js"); // For Thread. const kMaxBatchSize = 1; -function runTest({storeFindAndModifyImagesInSideCollection = false}) { +function runTest() { const tenantMigrationTest = new TenantMigrationTest({ name: jsTestName(), quickGarbageCollection: true, @@ -43,12 +43,6 @@ function runTest({storeFindAndModifyImagesInSideCollection = false}) { const donorRst = tenantMigrationTest.getDonorRst(); const donorPrimary = tenantMigrationTest.getDonorPrimary(); const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); - const setParam = { - setParameter: 1, - storeFindAndModifyImagesInSideCollection, - }; - donorPrimary.adminCommand(setParam); - recipientPrimary.adminCommand(setParam); const rsConn = new Mongo(donorRst.getURL()); const tenantSession = rsConn.startSession({retryWrites: true}); @@ -104,7 +98,7 @@ function runTest({storeFindAndModifyImagesInSideCollection = false}) { const docsToInsert = [...Array(numToInsert).keys()].map(i => ({_id: `bulkRetryableWrite${i}`})); - donorConn = new Mongo(host); + let donorConn = new Mongo(host); const tenantSession4 = donorConn.startSession({retryWrites: true}); const tenantCollection4 = tenantSession4.getDatabase(dbName)[collName]; @@ -197,5 +191,4 @@ function runTest({storeFindAndModifyImagesInSideCollection = false}) { tenantMigrationTest.stop(); } -runTest({storeFindAndModifyImagesInSideCollection: false}); -runTest({storeFindAndModifyImagesInSideCollection: true}); +runTest(); diff --git a/jstests/replsets/tenant_migration_recipient_has_tenant_data.js b/jstests/replsets/tenant_migration_recipient_has_tenant_data.js index 5a7f5778404ca..bc3a9eea3cbb5 100644 --- a/jstests/replsets/tenant_migration_recipient_has_tenant_data.js +++ b/jstests/replsets/tenant_migration_recipient_has_tenant_data.js @@ -58,8 +58,7 @@ const migrationOpts = { tenantId: kTenantId, }; -TenantMigrationTest.assertCommitted( - tenantMigrationTest.runMigration(migrationOpts, {enableDonorStartMigrationFsync: true})); +TenantMigrationTest.assertCommitted(tenantMigrationTest.runMigration(migrationOpts)); tenantMigrationTest.waitForMigrationGarbageCollection(migrationId, kTenantId); jsTest.log( diff --git a/jstests/replsets/tenant_migration_recipient_initial_sync_cloning.js b/jstests/replsets/tenant_migration_recipient_initial_sync_cloning.js deleted file mode 100644 index b0c58169b01ed..0000000000000 --- a/jstests/replsets/tenant_migration_recipient_initial_sync_cloning.js +++ /dev/null @@ -1,199 +0,0 @@ -/** - * Tests that during tenant migration, a new recipient node's state document and in-memory state is - * initialized after initial sync, when 1) the node hasn't begun cloning data yet, 2) is cloning - * data, and 3) is in the tenant oplog application phase. - * - * @tags: [ - * incompatible_with_macos, - * incompatible_with_shard_merge, - * incompatible_with_windows_tls, - * requires_majority_read_concern, - * requires_persistence, - * serverless, - * incompatible_with_shard_merge, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; - -load("jstests/libs/fail_point_util.js"); -load("jstests/libs/uuid_util.js"); -load('jstests/replsets/rslib.js'); // for waitForNewlyAddedRemovalForNodeToBeCommitted - -const migrationX509Options = makeX509OptionsForTest(); - -const testDBName = 'testDB'; -const testCollName = 'testColl'; - -// Restarts a node, allows the node to go through initial sync, and then makes sure its state -// matches up with the primary's. Returns the initial sync node. -function restartNodeAndCheckState(tenantId, tenantMigrationTest, checkMtab) { - // Restart a node and allow it to complete initial sync. - const recipientRst = tenantMigrationTest.getRecipientRst(); - const originalRecipientPrimary = recipientRst.getPrimary(); - - jsTestLog("Restarting a node from the recipient replica set."); - let initialSyncNode = recipientRst.getSecondaries()[0]; - initialSyncNode = - recipientRst.restart(initialSyncNode, {startClean: true, skipValidation: true}); - - // Allow the new node to finish initial sync. - waitForNewlyAddedRemovalForNodeToBeCommitted(originalRecipientPrimary, - recipientRst.getNodeId(initialSyncNode)); - recipientRst.awaitSecondaryNodes(); - recipientRst.awaitReplication(); - - jsTestLog("Ensure that the new node's state matches up with the primary's."); - // Make sure the new node's state makes sense. - let recipientDocOnPrimary = undefined; - let recipientDocOnNewNode = undefined; - assert.soon( - () => { - recipientDocOnPrimary = - originalRecipientPrimary.getCollection(TenantMigrationTest.kConfigRecipientsNS) - .findOne({tenantId}); - recipientDocOnNewNode = - initialSyncNode.getCollection(TenantMigrationTest.kConfigRecipientsNS) - .findOne({tenantId}); - - return recipientDocOnPrimary.state == recipientDocOnNewNode.state; - }, - `States never matched, primary: ${recipientDocOnPrimary}, on new node: ${ - recipientDocOnNewNode}`); - - if (checkMtab) { - jsTestLog("Ensuring TenantMigrationAccessBlocker states match."); - const primaryMtab = tenantMigrationTest.getTenantMigrationAccessBlocker( - {recipientNode: originalRecipientPrimary, tenantId}); - const newNodeMtab = tenantMigrationTest.getTenantMigrationAccessBlocker( - {recipientNode: initialSyncNode, tenantId}); - - assert.eq(primaryMtab.recipient.state, - newNodeMtab.recipient.state, - `Mtab didn't match, primary: ${primaryMtab}, on new node: ${newNodeMtab}`); - } - - return initialSyncNode; -} - -// Restarts a node without tenant oplog application. Ensures its state matches up with the -// primary's, and then steps it up. -function restartNodeAndCheckStateWithoutOplogApplication( - tenantId, tenantMigrationTest, checkMtab, fpOnRecipient) { - fpOnRecipient.wait(); - - const initialSyncNode = restartNodeAndCheckState(tenantId, tenantMigrationTest, checkMtab); - - jsTestLog("Stepping up the new node."); - // Now step up the new node - tenantMigrationTest.getRecipientRst().stepUp(initialSyncNode); - fpOnRecipient.off(); -} - -// Pauses the recipient before the tenant oplog application phase, and inserts documents on the -// donor that the recipient tenant oplog applier must apply. Then restarts node, allows initial -// sync, and steps the restarted node up. -function restartNodeAndCheckStateDuringOplogApplication( - tenantId, tenantMigrationTest, checkMtab, fpOnRecipient) { - fpOnRecipient.wait(); - - // Pause the tenant oplog applier before applying a batch. - const originalRecipientPrimary = tenantMigrationTest.getRecipientPrimary(); - const fpPauseOplogApplierOnBatch = - configureFailPoint(originalRecipientPrimary, "fpBeforeTenantOplogApplyingBatch"); - - // Insert documents into the donor after data cloning but before tenant oplog application, so - // that the recipient has entries to apply during tenant oplog application. - tenantMigrationTest.insertDonorDB( - tenantMigrationTest.tenantDB(tenantId, testDBName), - testCollName, - [...Array(30).keys()].map((i) => ({a: i, b: "George Harrison - All Things Must Pass"}))); - - // Wait until the oplog applier has started and is trying to apply a batch. Then restart a node. - fpPauseOplogApplierOnBatch.wait(); - const initialSyncNode = restartNodeAndCheckState(tenantId, tenantMigrationTest, checkMtab); - - jsTestLog("Stepping up the new node."); - // Now step up the new node - tenantMigrationTest.getRecipientRst().stepUp(initialSyncNode); - fpPauseOplogApplierOnBatch.off(); - fpOnRecipient.off(); -} - -// This function does the following: -// 1. Configures a failpoint on the recipient primary, depending on the 'recipientFailpoint' that is -// passed into the function. -// 2. Starts a tenant migration. -// 3. Waits for the recipient failpoint to be hit. Restarts a node, to make it go through initial -// sync. -// 4. Makes sure the restarted node's state is as expected. -// 5. Steps up the restarted node as the recipient primary, lifts the recipient failpoint, and -// allows the migration to complete. -function runTestCase(recipientFailpoint, checkMtab, restartNodeAndCheckStateFunction) { - const tenantId = ObjectId().str; - const donorRst = new ReplSetTest({ - name: "donorRst", - nodes: 1, - serverless: true, - nodeOptions: Object.assign(migrationX509Options.donor, { - setParameter: { - // Allow non-timestamped reads on donor after migration completes for testing. - 'failpoint.tenantMigrationDonorAllowsNonTimestampedReads': - tojson({mode: 'alwaysOn'}), - } - }) - }); - donorRst.startSet(); - donorRst.initiate(); - - const tenantMigrationTest = new TenantMigrationTest({ - name: jsTestName(), - donorRst, - sharedOptions: {setParameter: {tenantApplierBatchSizeOps: 2}} - }); - - const migrationOpts = {migrationIdString: extractUUIDFromObject(UUID()), tenantId}; - const dbName = tenantMigrationTest.tenantDB(tenantId, testDBName); - const originalRecipientPrimary = tenantMigrationTest.getRecipientPrimary(); - - const fpOnRecipient = - configureFailPoint(originalRecipientPrimary, recipientFailpoint, {action: "hang"}); - tenantMigrationTest.insertDonorDB(dbName, testCollName); - - jsTestLog(`Starting a tenant migration with migrationID ${ - migrationOpts.migrationIdString}, and tenantId ${tenantId}`); - assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); - - restartNodeAndCheckStateFunction(tenantId, tenantMigrationTest, checkMtab, fpOnRecipient); - - // Allow the migration to run to completion. - jsTestLog("Allowing migration to run to completion."); - TenantMigrationTest.assertCommitted( - tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); - - assert.commandWorked(tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString)); - - tenantMigrationTest.stop(); - donorRst.stopSet(); -} - -// These two test cases are for before the mtab is created, and before the oplog applier has been -// started. -runTestCase("fpAfterStartingOplogFetcherMigrationRecipientInstance", - false /* checkMtab */, - restartNodeAndCheckStateWithoutOplogApplication); -runTestCase("tenantCollectionClonerHangAfterCreateCollection", - false /* checkMtab */, - restartNodeAndCheckStateWithoutOplogApplication); - -// Test case to initial sync a node while the recipient is in the oplog application phase. -runTestCase("fpBeforeFulfillingDataConsistentPromise", - true /* checkMtab */, - restartNodeAndCheckStateDuringOplogApplication); - -// A case after data consistency so that the mtab exists. We do not care about the oplog applier in -// this case. -runTestCase("fpAfterWaitForRejectReadsBeforeTimestamp", - true /* checkMtab */, - restartNodeAndCheckStateWithoutOplogApplication); diff --git a/jstests/replsets/tenant_migration_recipient_initial_sync_recovery.js b/jstests/replsets/tenant_migration_recipient_initial_sync_recovery.js index caa8562c13759..e0d07f3aaef13 100644 --- a/jstests/replsets/tenant_migration_recipient_initial_sync_recovery.js +++ b/jstests/replsets/tenant_migration_recipient_initial_sync_recovery.js @@ -6,7 +6,8 @@ * incompatible_with_macos, * incompatible_with_shard_merge, * incompatible_with_windows_tls, - * requires_fcv_62, + * # The error code for a rejected recipient command invoked during the reject phase was changed. + * requires_fcv_71, * requires_majority_read_concern, * requires_persistence, * serverless, @@ -72,7 +73,7 @@ if (recipientDoc) { .getTenantMigrationAccessBlocker( {recipientNode: initialSyncNode, tenantId: kTenantId}) .recipient.state == - TenantMigrationTest.RecipientAccessState.kReject); + TenantMigrationTest.RecipientAccessState.kRejectReadsAndWrites); } break; case TenantMigrationTest.RecipientState.kConsistent: @@ -81,7 +82,7 @@ if (recipientDoc) { .getTenantMigrationAccessBlocker( {recipientNode: initialSyncNode, tenantId: kTenantId}) .recipient.state == - TenantMigrationTest.RecipientAccessState.kRejectBefore); + TenantMigrationTest.RecipientAccessState.kRejectReadsBefore); assert.soon(() => bsonWoCompare( tenantMigrationTest .getTenantMigrationAccessBlocker( @@ -93,11 +94,11 @@ if (recipientDoc) { .getTenantMigrationAccessBlocker( {recipientNode: initialSyncNode, tenantId: kTenantId}) .recipient.state == - TenantMigrationTest.RecipientAccessState.kReject); + TenantMigrationTest.RecipientAccessState.kRejectReadsAndWrites); } break; default: - throw new Error(`Invalid state "${state}" from recipient doc.`); + throw new Error(`Invalid state "${recipientDoc.state}" from recipient doc.`); } } diff --git a/jstests/replsets/tenant_migration_recipient_invalidates_in_memory_txns.js b/jstests/replsets/tenant_migration_recipient_invalidates_in_memory_txns.js index 7d01160f4a938..9ec68dc2478b5 100644 --- a/jstests/replsets/tenant_migration_recipient_invalidates_in_memory_txns.js +++ b/jstests/replsets/tenant_migration_recipient_invalidates_in_memory_txns.js @@ -24,6 +24,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/replsets/rslib.js"); load("jstests/libs/uuid_util.js"); @@ -34,7 +35,7 @@ const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), sharedOptions: {setParameter: setParameterOpts}}); const tenantId = ObjectId().str; -const tenantDB = tenantMigrationTest.tenantDB(tenantId, "testDB"); +const tenantDB = makeTenantDB(tenantId, "testDB"); const collName = "testColl"; const transactionsNS = "config.transactions"; diff --git a/jstests/replsets/tenant_migration_recipient_resume_on_stepup_and_restart.js b/jstests/replsets/tenant_migration_recipient_resume_on_stepup_and_restart.js deleted file mode 100644 index f00a4c4f08352..0000000000000 --- a/jstests/replsets/tenant_migration_recipient_resume_on_stepup_and_restart.js +++ /dev/null @@ -1,211 +0,0 @@ -/** - * Tests that tenant migrations resume successfully on recipient stepup and restart. - * - * @tags: [ - * incompatible_with_macos, - * incompatible_with_windows_tls, - * incompatible_with_shard_merge, - * # Some tenant migration statistics field names were changed in 6.1. - * requires_fcv_61, - * requires_majority_read_concern, - * requires_persistence, - * serverless, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - forgetMigrationAsync, - makeX509OptionsForTest, - runMigrationAsync, -} from "jstests/replsets/libs/tenant_migration_util.js"; - -load("jstests/libs/fail_point_util.js"); -load("jstests/libs/parallelTester.js"); -load("jstests/libs/uuid_util.js"); -load('jstests/replsets/rslib.js'); // 'createRstArgs' - -const kMaxSleepTimeMS = 100; -const kTenantId = ObjectId().str; - -// Set the delay before a state doc is garbage collected to be short to speed up the test but long -// enough for the state doc to still be around after stepup or restart. -const kGarbageCollectionDelayMS = 30 * 1000; - -// Set the TTL monitor to run at a smaller interval to speed up the test. -const kTTLMonitorSleepSecs = 1; - -const migrationX509Options = makeX509OptionsForTest(); - -/** - * Runs the donorStartMigration command to start a migration, and interrupts the migration on the - * recipient using the 'interruptFunc' after the migration starts on the recipient side, and - * asserts that migration eventually commits. - * @param {recipientRestarted} bool is needed to properly assert the tenant migrations stat count. - */ -function testRecipientSyncDataInterrupt(interruptFunc, recipientRestarted) { - const recipientRst = new ReplSetTest({ - nodes: 3, - name: "recipientRst", - serverless: true, - nodeOptions: migrationX509Options.recipient - }); - recipientRst.startSet(); - recipientRst.initiate(); - - const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), recipientRst}); - - const donorRst = tenantMigrationTest.getDonorRst(); - const donorPrimary = tenantMigrationTest.getDonorPrimary(); - let recipientPrimary = tenantMigrationTest.getRecipientPrimary(); - - const migrationId = UUID(); - const migrationOpts = { - migrationIdString: extractUUIDFromObject(migrationId), - tenantId: kTenantId, - recipientConnString: tenantMigrationTest.getRecipientConnString(), - }; - const donorRstArgs = createRstArgs(donorRst); - - const runMigrationThread = new Thread(runMigrationAsync, migrationOpts, donorRstArgs); - runMigrationThread.start(); - - // Wait for recipientSyncData command to start. - assert.soon( - () => recipientPrimary.adminCommand({currentOp: true, desc: "tenant recipient migration"}) - .inprog.length > 0); - - sleep(Math.random() * kMaxSleepTimeMS); - interruptFunc(recipientRst); - - TenantMigrationTest.assertCommitted(runMigrationThread.returnData()); - tenantMigrationTest.waitForDonorNodesToReachState(donorRst.nodes, - migrationId, - migrationOpts.tenantId, - TenantMigrationTest.DonorState.kCommitted); - assert.commandWorked(tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString)); - - const donorStats = tenantMigrationTest.getTenantMigrationStats(donorPrimary); - assert.eq(1, donorStats.totalMigrationDonationsCommitted); - - tenantMigrationTest.stop(); - recipientRst.stopSet(); -} - -/** - * Starts a migration and waits for it to commit, then runs the donorForgetMigration, and interrupts - * the recipient using the 'interruptFunc', and asserts that the migration state is eventually - * garbage collected. - */ -function testRecipientForgetMigrationInterrupt(interruptFunc) { - const donorRst = new ReplSetTest({ - nodes: 1, - name: "donorRst", - serverless: true, - nodeOptions: Object.assign({}, migrationX509Options.donor, { - setParameter: { - tenantMigrationGarbageCollectionDelayMS: kGarbageCollectionDelayMS, - ttlMonitorSleepSecs: kTTLMonitorSleepSecs, - } - }) - }); - const recipientRst = new ReplSetTest({ - nodes: 3, - name: "recipientRst", - serverless: true, - nodeOptions: Object.assign({}, migrationX509Options.recipient, { - setParameter: { - tenantMigrationGarbageCollectionDelayMS: kGarbageCollectionDelayMS, - ttlMonitorSleepSecs: kTTLMonitorSleepSecs, - } - }) - }); - - donorRst.startSet(); - donorRst.initiate(); - - recipientRst.startSet(); - recipientRst.initiate(); - - const tenantMigrationTest = - new TenantMigrationTest({name: jsTestName(), donorRst, recipientRst}); - const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); - - const migrationId = UUID(); - const migrationOpts = { - migrationIdString: extractUUIDFromObject(migrationId), - tenantId: kTenantId, - recipientConnString: recipientRst.getURL(), - }; - const donorRstArgs = createRstArgs(donorRst); - - TenantMigrationTest.assertCommitted( - tenantMigrationTest.runMigration(migrationOpts, {automaticForgetMigration: false})); - const forgetMigrationThread = new Thread(forgetMigrationAsync, - migrationOpts.migrationIdString, - donorRstArgs, - false /* retryOnRetryableErrors */); - forgetMigrationThread.start(); - - // Wait for recipientForgetMigration command to start. - assert.soon(() => { - const res = assert.commandWorked( - recipientPrimary.adminCommand({currentOp: true, desc: "tenant recipient migration"})); - return res.inprog[0].expireAt != null; - }); - sleep(Math.random() * kMaxSleepTimeMS); - interruptFunc(recipientRst); - - assert.commandWorkedOrFailedWithCode( - tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString), - ErrorCodes.NoSuchTenantMigration); - - assert.commandWorked(forgetMigrationThread.returnData()); - tenantMigrationTest.waitForMigrationGarbageCollection(migrationId, migrationOpts.tenantId); - - tenantMigrationTest.stop(); - donorRst.stopSet(); - recipientRst.stopSet(); -} - -(() => { - jsTest.log("Test that the migration resumes on stepup"); - testRecipientSyncDataInterrupt((recipientRst) => { - // Force the primary to step down but make it likely to step back up. - const recipientPrimary = recipientRst.getPrimary(); - assert.commandWorked(recipientPrimary.adminCommand( - {replSetStepDown: ReplSetTest.kForeverSecs, force: true})); - assert.commandWorked(recipientPrimary.adminCommand({replSetFreeze: 0})); - }, false); -})(); - -(() => { - jsTest.log("Test that the migration resumes after restart"); - testRecipientSyncDataInterrupt((recipientRst) => { - recipientRst.stopSet(null /* signal */, true /*forRestart */); - recipientRst.startSet({restart: true}); - recipientRst.awaitSecondaryNodes(); - recipientRst.getPrimary(); - }, true); -})(); - -(() => { - jsTest.log("Test that the recipientForgetMigration command can be retried on stepup"); - testRecipientForgetMigrationInterrupt((recipientRst) => { - // Force the primary to step down but make it likely to step back up. - const recipientPrimary = recipientRst.getPrimary(); - assert.commandWorked(recipientPrimary.adminCommand( - {replSetStepDown: ReplSetTest.kForeverSecs, force: true})); - assert.commandWorked(recipientPrimary.adminCommand({replSetFreeze: 0})); - }); -})(); - -(() => { - jsTest.log("Test that the recipientForgetMigration command can be retried after restart"); - testRecipientForgetMigrationInterrupt((recipientRst) => { - recipientRst.stopSet(null /* signal */, true /*forRestart */); - recipientRst.startSet({restart: true}); - recipientRst.awaitSecondaryNodes(); - recipientRst.getPrimary(); - }); -})(); diff --git a/jstests/replsets/tenant_migration_recipient_resumes_on_donor_failover.js b/jstests/replsets/tenant_migration_recipient_resumes_on_donor_failover.js index d64fc1eb096b1..198ceb0cdc46d 100644 --- a/jstests/replsets/tenant_migration_recipient_resumes_on_donor_failover.js +++ b/jstests/replsets/tenant_migration_recipient_resumes_on_donor_failover.js @@ -19,7 +19,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); @@ -49,7 +49,7 @@ function runTest(failPoint) { jsTestLog("Running test with failpoint: " + failPoint); const tenantId = ObjectId().str; - const tenantDB = tenantMigrationTest.tenantDB(tenantId, "DB"); + const tenantDB = makeTenantDB(tenantId, "DB"); const collName = "testColl"; const donorRst = tenantMigrationTest.getDonorRst(); diff --git a/jstests/replsets/tenant_migration_recipient_retry_forget_migration.js b/jstests/replsets/tenant_migration_recipient_retry_forget_migration.js index 43798c7952b61..7f53167b22117 100644 --- a/jstests/replsets/tenant_migration_recipient_retry_forget_migration.js +++ b/jstests/replsets/tenant_migration_recipient_retry_forget_migration.js @@ -16,7 +16,10 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {getCertificateAndPrivateKey} from "jstests/replsets/libs/tenant_migration_util.js"; +import { + getCertificateAndPrivateKey, + makeTenantDB +} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); // For configureFailPoint(). load("jstests/libs/parallelTester.js"); // For Thread() @@ -29,7 +32,7 @@ const tenantId = ObjectId().str; const recipientCertificateForDonor = getCertificateAndPrivateKey("jstests/libs/tenant_migration_recipient.pem"); -const dbName = tenantMigrationTest.tenantDB(tenantId, "test"); +const dbName = makeTenantDB(tenantId, "test"); const collName = "coll"; const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); diff --git a/jstests/replsets/tenant_migration_recipient_retryable_writes_failover.js b/jstests/replsets/tenant_migration_recipient_retryable_writes_failover.js deleted file mode 100644 index 03b5f7eb9724a..0000000000000 --- a/jstests/replsets/tenant_migration_recipient_retryable_writes_failover.js +++ /dev/null @@ -1,110 +0,0 @@ -/** - * Tests whether the recipient correctly clears its oplog buffer if the recipient primary - * fails over while fetching retryable writes oplog entries from the donor. - * - * @tags: [ - * incompatible_with_macos, - * incompatible_with_shard_merge, - * incompatible_with_windows_tls, - * requires_majority_read_concern, - * requires_persistence, - * serverless, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -load("jstests/libs/uuid_util.js"); // For extractUUIDFromObject(). -load("jstests/libs/fail_point_util.js"); // For configureFailPoint(). - -const tenantMigrationTest = - new TenantMigrationTest({name: jsTestName(), sharedOptions: {nodes: 2}}); - -const kMigrationId = UUID(); -const kTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDb"); -const kCollName = "testColl"; -const migrationOpts = { - migrationIdString: extractUUIDFromObject(kMigrationId), - tenantId: kTenantId, -}; - -const donorRst = tenantMigrationTest.getDonorRst(); -const donorPrimary = tenantMigrationTest.getDonorPrimary(); -const rsConn = new Mongo(donorRst.getURL()); -const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); - -const session = rsConn.startSession({retryWrites: true}); -const sessionColl = session.getDatabase(kDbName)[kCollName]; - -const session2 = rsConn.startSession({retryWrites: true}); -const sessionColl2 = session2.getDatabase(kDbName)[kCollName]; - -jsTestLog("Run retryable writes prior to the migration."); -assert.commandWorked(sessionColl.insert({_id: "retryableWrite1"})); -assert.commandWorked(sessionColl2.insert({_id: "retryableWrite2"})); - -jsTestLog("Setting up failpoints."); -// Use `pauseAfterRetrievingRetryableWritesBatch` to hang after inserting the first batch of results -// from the aggregation request into the oplog buffer. -const fpPauseAfterRetrievingRetryableWritesBatch = - configureFailPoint(recipientPrimary, "pauseAfterRetrievingRetryableWritesBatch"); - -// Set aggregation request batch size to 1 so that we can failover in between batches. -const fpSetSmallAggregationBatchSize = - configureFailPoint(recipientPrimary, "fpSetSmallAggregationBatchSize"); - -jsTestLog("Starting tenant migration with migrationId: " + kMigrationId + - ", tenantId: " + kTenantId); -assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); - -jsTestLog("Waiting until the recipient primary fetches a batch of retryable writes oplog entries."); -fpSetSmallAggregationBatchSize.wait(); -fpPauseAfterRetrievingRetryableWritesBatch.wait(); - -// Check that the oplog buffer is correctly populated. -const kOplogBufferNS = "repl.migration.oplog_" + migrationOpts.migrationIdString; -let recipientOplogBuffer = recipientPrimary.getDB("config")[kOplogBufferNS]; -// We expect to have only retryableWrite1 since the cursor batch size is 1 and we paused after -// inserting the first branch of results from the aggregation request. -let cursor = recipientOplogBuffer.find(); -assert.eq(cursor.itcount(), 1, "Incorrect number of oplog entries in buffer: " + cursor.toArray()); - -// Check that we haven't completed the retryable writes fetching stage yet. -let recipientConfigColl = recipientPrimary.getCollection(TenantMigrationTest.kConfigRecipientsNS); -let recipientDoc = recipientConfigColl.find({"_id": kMigrationId}).toArray(); -assert.eq(recipientDoc.length, 1); -assert.eq(recipientDoc[0].completedFetchingRetryableWritesBeforeStartOpTime, false); - -jsTestLog("Stepping a new primary up."); -const recipientRst = tenantMigrationTest.getRecipientRst(); -const recipientSecondary = recipientRst.getSecondary(); -// Use `fpAfterFetchingRetryableWritesEntriesBeforeStartOpTime` to hang after populating the oplog -// buffer with retryable writes entries. Set this before stepping up instead of after so that the -// new primary will not be able to pass this stage without the failpoint being set. -const fpAfterFetchingRetryableWritesEntries = configureFailPoint( - recipientSecondary, "fpAfterFetchingRetryableWritesEntriesBeforeStartOpTime", {action: "hang"}); - -recipientRst.stepUp(recipientSecondary); - -fpPauseAfterRetrievingRetryableWritesBatch.off(); -const newRecipientPrimary = recipientRst.getPrimary(); - -fpAfterFetchingRetryableWritesEntries.wait(); -// The new primary should have cleared its oplog buffer and refetched both retryableWrite1 and -// retryableWrite2. Otherwise, we will invariant when trying to add those entries. -recipientOplogBuffer = newRecipientPrimary.getDB("config")[kOplogBufferNS]; -cursor = recipientOplogBuffer.find(); -assert.eq(cursor.itcount(), 2, "Incorrect number of oplog entries in buffer: " + cursor.toArray()); - -recipientConfigColl = newRecipientPrimary.getCollection(TenantMigrationTest.kConfigRecipientsNS); -recipientDoc = recipientConfigColl.find({"_id": kMigrationId}).toArray(); -assert.eq(recipientDoc.length, 1); -assert.eq(recipientDoc[0].completedFetchingRetryableWritesBeforeStartOpTime, true); - -fpAfterFetchingRetryableWritesEntries.off(); -fpSetSmallAggregationBatchSize.off(); - -jsTestLog("Waiting for migration to complete."); -TenantMigrationTest.assertCommitted(tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); - -tenantMigrationTest.stop(); diff --git a/jstests/replsets/tenant_migration_recipient_rollback_recovery.js b/jstests/replsets/tenant_migration_recipient_rollback_recovery.js deleted file mode 100644 index ffb2cc4919ec6..0000000000000 --- a/jstests/replsets/tenant_migration_recipient_rollback_recovery.js +++ /dev/null @@ -1,335 +0,0 @@ -/** - * Tests that tenant migrations that go through recipient rollback are recovered correctly. - * - * @tags: [ - * incompatible_with_macos, - * incompatible_with_shard_merge, - * incompatible_with_windows_tls, - * requires_majority_read_concern, - * requires_persistence, - * serverless, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - forgetMigrationAsync, - makeX509OptionsForTest, - runMigrationAsync, -} from "jstests/replsets/libs/tenant_migration_util.js"; - -load("jstests/libs/fail_point_util.js"); -load("jstests/libs/uuid_util.js"); -load("jstests/libs/parallelTester.js"); -load("jstests/replsets/libs/rollback_test.js"); -load("jstests/replsets/rslib.js"); // 'createRstArgs' - -const kTenantId = ObjectId().str; - -const kMaxSleepTimeMS = 250; - -// Set the delay before a state doc is garbage collected to be short to speed up the test but long -// enough for the state doc to still be around after the recipient is back in the replication steady -// state. -const kGarbageCollectionDelayMS = 30 * 1000; - -const migrationX509Options = makeX509OptionsForTest(); - -function makeMigrationOpts(tenantMigrationTest, migrationId, tenantId) { - return { - migrationIdString: extractUUIDFromObject(migrationId), - tenantId: tenantId, - recipientConnString: tenantMigrationTest.getRecipientConnString(), - readPreference: {mode: "primary"}, - }; -} - -/** - * Starts a recipient ReplSetTest and creates a TenantMigrationTest for it. Runs 'setUpFunc' after - * initiating the recipient. Then, runs 'rollbackOpsFunc' while replication is disabled on the - * secondaries, shuts down the primary and restarts it after re-election to force the operations in - * 'rollbackOpsFunc' to be rolled back. Finally, runs 'steadyStateFunc' after it is back in the - * replication steady state. - */ -function testRollBack(setUpFunc, rollbackOpsFunc, steadyStateFunc) { - const donorRst = new ReplSetTest({ - name: "donorRst", - nodes: 1, - serverless: true, - nodeOptions: Object.assign({}, migrationX509Options.donor, { - setParameter: { - tenantMigrationGarbageCollectionDelayMS: kGarbageCollectionDelayMS, - ttlMonitorSleepSecs: 1, - } - }) - }); - donorRst.startSet(); - donorRst.initiate(); - - const donorRstArgs = createRstArgs(donorRst); - - const recipientRst = new ReplSetTest({ - name: "recipientRst", - nodes: 3, - serverless: true, - nodeOptions: Object.assign({}, migrationX509Options.recipient, { - setParameter: { - tenantMigrationGarbageCollectionDelayMS: kGarbageCollectionDelayMS, - ttlMonitorSleepSecs: 1, - } - }) - }); - recipientRst.startSet(); - recipientRst.initiate(); - - const tenantMigrationTest = - new TenantMigrationTest({name: jsTestName(), donorRst, recipientRst}); - setUpFunc(tenantMigrationTest, donorRstArgs); - - let originalRecipientPrimary = recipientRst.getPrimary(); - const originalRecipientSecondaries = recipientRst.getSecondaries(); - // The default WC is majority and stopServerReplication will prevent satisfying any majority - // writes. - assert.commandWorked(originalRecipientPrimary.adminCommand( - {setDefaultRWConcern: 1, defaultWriteConcern: {w: 1}, writeConcern: {w: "majority"}})); - recipientRst.awaitLastOpCommitted(); - - // Disable replication on the secondaries so that writes during this step will be rolled back. - stopServerReplication(originalRecipientSecondaries); - rollbackOpsFunc(tenantMigrationTest, donorRstArgs); - - // Shut down the primary and re-enable replication to allow one of the secondaries to get - // elected, and make the writes above get rolled back on the original primary when it comes - // back up. - recipientRst.stop(originalRecipientPrimary); - restartServerReplication(originalRecipientSecondaries); - const newRecipientPrimary = recipientRst.getPrimary(); - assert.neq(originalRecipientPrimary, newRecipientPrimary); - - // Restart the original primary. - originalRecipientPrimary = - recipientRst.start(originalRecipientPrimary, {waitForConnect: true}, true /* restart */); - originalRecipientPrimary.setSecondaryOk(); - recipientRst.awaitReplication(); - - steadyStateFunc(tenantMigrationTest); - - donorRst.stopSet(); - recipientRst.stopSet(); -} - -/** - * Starts a migration and waits for the recipient's primary to insert the recipient's state doc. - * Forces the write to be rolled back. After the replication steady state is reached, asserts that - * recipientSyncData can restart the migration on the new primary. - */ -function testRollbackInitialState() { - const migrationId = UUID(); - let migrationOpts; - let migrationThread; - - let setUpFunc = (tenantMigrationTest, donorRstArgs) => {}; - - let rollbackOpsFunc = (tenantMigrationTest, donorRstArgs) => { - const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); - - // Start the migration asynchronously and wait for the primary to insert the state doc. - migrationOpts = makeMigrationOpts(tenantMigrationTest, migrationId, ObjectId().str); - migrationThread = new Thread(runMigrationAsync, migrationOpts, donorRstArgs); - migrationThread.start(); - assert.soon(() => { - return 1 === - recipientPrimary.getCollection(TenantMigrationTest.kConfigRecipientsNS).count({ - _id: migrationId - }); - }); - }; - - let steadyStateFunc = (tenantMigrationTest) => { - // Verify that the migration restarted successfully on the new primary despite rollback. - TenantMigrationTest.assertCommitted(migrationThread.returnData()); - tenantMigrationTest.assertRecipientNodesInExpectedState({ - nodes: tenantMigrationTest.getRecipientRst().nodes, - migrationId: migrationId, - tenantId: migrationOpts.tenantId, - expectedState: TenantMigrationTest.RecipientState.kConsistent, - expectedAccessState: TenantMigrationTest.RecipientAccessState.kRejectBefore - }); - assert.commandWorked(tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString)); - }; - - testRollBack(setUpFunc, rollbackOpsFunc, steadyStateFunc); -} - -/** - * Starts a migration after enabling 'pauseFailPoint' (must pause the migration) and - * 'setUpFailPoints' on the recipient's primary. Waits for the primary to do the write to transition - * to 'nextState' after reaching 'pauseFailPoint' (i.e. the state doc matches 'query'), then forces - * the write to be rolled back. After the replication steady state is reached, asserts that the - * migration is resumed successfully by new primary regardless of what the rolled back state - * transition is. - */ -function testRollBackStateTransition(pauseFailPoint, setUpFailPoints, nextState, query) { - jsTest.log(`Test roll back the write to transition to state "${ - nextState}" after reaching failpoint "${pauseFailPoint}"`); - - const migrationId = UUID(); - let migrationOpts; - let migrationThread, pauseFp; - - let setUpFunc = (tenantMigrationTest, donorRstArgs) => { - const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); - setUpFailPoints.forEach(failPoint => configureFailPoint(recipientPrimary, failPoint)); - pauseFp = configureFailPoint(recipientPrimary, pauseFailPoint, {action: "hang"}); - - migrationOpts = makeMigrationOpts(tenantMigrationTest, migrationId, ObjectId().str); - migrationThread = new Thread(runMigrationAsync, migrationOpts, donorRstArgs); - migrationThread.start(); - pauseFp.wait(); - }; - - let rollbackOpsFunc = (tenantMigrationTest, donorRstArgs) => { - const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); - // Resume the migration and wait for the primary to do the write for the state transition. - pauseFp.off(); - assert.soon(() => { - return 1 === - recipientPrimary.getCollection(TenantMigrationTest.kConfigRecipientsNS) - .count(Object.assign({_id: migrationId}, query)); - }); - }; - - let steadyStateFunc = (tenantMigrationTest) => { - // Verify that the migration resumed successfully on the new primary despite the rollback. - TenantMigrationTest.assertCommitted(migrationThread.returnData()); - tenantMigrationTest.waitForRecipientNodesToReachState( - tenantMigrationTest.getRecipientRst().nodes, - migrationId, - migrationOpts.tenantId, - TenantMigrationTest.RecipientState.kConsistent, - TenantMigrationTest.RecipientAccessState.kRejectBefore); - assert.commandWorked(tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString)); - }; - - testRollBack(setUpFunc, rollbackOpsFunc, steadyStateFunc); -} - -/** - * Runs donorForgetMigration after completing a migration. Waits for the recipient's primary to - * mark the recipient's state doc as garbage collectable, then forces the write to be rolled back. - * After the replication steady state is reached, asserts that recipientForgetMigration can be - * retried on the new primary and that the state doc is eventually garbage collected. - */ -function testRollBackMarkingStateGarbageCollectable() { - const migrationId = UUID(); - let migrationOpts; - let forgetMigrationThread; - - let setUpFunc = (tenantMigrationTest, donorRstArgs) => { - migrationOpts = makeMigrationOpts(tenantMigrationTest, migrationId, ObjectId().str); - TenantMigrationTest.assertCommitted( - tenantMigrationTest.runMigration(migrationOpts, {automaticForgetMigration: false})); - }; - - let rollbackOpsFunc = (tenantMigrationTest, donorRstArgs) => { - const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); - // Run donorForgetMigration and wait for the primary to do the write to mark the state doc - // as garbage collectable. - forgetMigrationThread = new Thread(forgetMigrationAsync, - migrationOpts.migrationIdString, - donorRstArgs, - false /* retryOnRetryableErrors */); - forgetMigrationThread.start(); - assert.soon(() => { - return 1 === - recipientPrimary.getCollection(TenantMigrationTest.kConfigRecipientsNS) - .count({_id: migrationId, expireAt: {$exists: 1}}); - }); - }; - - let steadyStateFunc = (tenantMigrationTest) => { - // Verify that the migration state got garbage collected successfully despite the rollback. - assert.commandWorked(forgetMigrationThread.returnData()); - tenantMigrationTest.waitForMigrationGarbageCollection( - migrationId, - migrationOpts.tenantId, - tenantMigrationTest.getDonorRst().nodes, - tenantMigrationTest.getRecipientRst().nodes); - }; - - testRollBack(setUpFunc, rollbackOpsFunc, steadyStateFunc); -} - -/** - * Starts a migration and forces the recipient's primary to go through rollback after a random - * amount of time. After the replication steady state is reached, asserts that the migration is - * resumed successfully. - */ -function testRollBackRandom() { - const migrationId = UUID(); - let migrationOpts; - let migrationThread; - - let setUpFunc = (tenantMigrationTest, donorRstArgs) => { - migrationOpts = makeMigrationOpts(tenantMigrationTest, migrationId, ObjectId().str); - migrationThread = new Thread(async (donorRstArgs, migrationOpts) => { - const {runMigrationAsync, forgetMigrationAsync} = - await import("jstests/replsets/libs/tenant_migration_util.js"); - assert.commandWorked(await runMigrationAsync(migrationOpts, donorRstArgs)); - assert.commandWorked(await forgetMigrationAsync( - migrationOpts.migrationIdString, donorRstArgs, false /* retryOnRetryableErrors */)); - }, donorRstArgs, migrationOpts); - - // Start the migration and wait for a random amount of time before transitioning to the - // rollback operations state. - migrationThread.start(); - sleep(Math.random() * kMaxSleepTimeMS); - }; - - let rollbackOpsFunc = (tenantMigrationTest, donorRstArgs) => { - // Let the migration run in the rollback operations state for a random amount of time. - sleep(Math.random() * kMaxSleepTimeMS); - }; - - let steadyStateFunc = (tenantMigrationTest) => { - // Verify that the migration completed and was garbage collected successfully despite the - // rollback. - migrationThread.join(); - tenantMigrationTest.waitForRecipientNodesToReachState( - tenantMigrationTest.getRecipientRst().nodes, - migrationId, - migrationOpts.tenantId, - TenantMigrationTest.RecipientState.kDone, - TenantMigrationTest.RecipientAccessState.kRejectBefore); - tenantMigrationTest.waitForMigrationGarbageCollection( - migrationId, - migrationOpts.tenantId, - tenantMigrationTest.getDonorRst().nodes, - tenantMigrationTest.getRecipientRst().nodes); - }; - - testRollBack(setUpFunc, rollbackOpsFunc, steadyStateFunc); -} - -jsTest.log("Test roll back recipient's state doc insert"); -testRollbackInitialState(); - -jsTest.log("Test roll back recipient's state doc update"); -[{ - pauseFailPoint: "fpBeforeMarkingCloneSuccess", - nextState: "reject", - query: {dataConsistentStopDonorOpTime: {$exists: 1}} -}, - { - pauseFailPoint: "fpBeforePersistingRejectReadsBeforeTimestamp", - nextState: "rejectBefore", - query: {rejectReadsBeforeTimestamp: {$exists: 1}} - }].forEach(({pauseFailPoint, setUpFailPoints = [], nextState, query}) => { - testRollBackStateTransition(pauseFailPoint, setUpFailPoints, nextState, query); -}); - -jsTest.log("Test roll back marking the donor's state doc as garbage collectable"); -testRollBackMarkingStateGarbageCollectable(); - -jsTest.log("Test roll back random"); -testRollBackRandom(); diff --git a/jstests/replsets/tenant_migration_recipient_shard_merge_copies_change_collections.js b/jstests/replsets/tenant_migration_recipient_shard_merge_copies_change_collections.js index 2a816b1ee5099..c6752b007e650 100644 --- a/jstests/replsets/tenant_migration_recipient_shard_merge_copies_change_collections.js +++ b/jstests/replsets/tenant_migration_recipient_shard_merge_copies_change_collections.js @@ -5,7 +5,7 @@ * @tags: [ * incompatible_with_macos, * incompatible_with_windows_tls, - * requires_fcv_70, + * requires_fcv_71, * requires_majority_read_concern, * requires_persistence, * serverless, @@ -19,38 +19,65 @@ import { makeX509OptionsForTest } from "jstests/replsets/libs/tenant_migration_util.js"; +// For assertDropAndRecreateCollection. +load("jstests/libs/collection_drop_recreate.js"); load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); load("jstests/replsets/libs/tenant_migration_util.js"); load("jstests/serverless/libs/change_collection_util.js"); -const donorRst = new ChangeStreamMultitenantReplicaSetTest({ - name: "donorReplSet", - nodes: 2, - nodeOptions: Object.assign(makeX509OptionsForTest().donor, { - setParameter: { - tenantMigrationGarbageCollectionDelayMS: 0, - ttlMonitorSleepSecs: 1, - } - }), -}); -const recipientRst = new ChangeStreamMultitenantReplicaSetTest({ - name: "recipientReplSet", - nodes: 2, - nodeOptions: Object.assign(makeX509OptionsForTest().recipient, { - setParameter: { - tenantMigrationGarbageCollectionDelayMS: 0, - ttlMonitorSleepSecs: 1, - } - }), -}); - -const tenantMigrationTest = new TenantMigrationTest({ - name: jsTestName(), - donorRst, - recipientRst, - quickGarbageCollection: true, -}); +function setup() { + const donorRst = new ChangeStreamMultitenantReplicaSetTest({ + name: "donorReplSet", + nodes: 2, + nodeOptions: Object.assign(makeX509OptionsForTest().donor, { + setParameter: { + tenantMigrationGarbageCollectionDelayMS: 0, + ttlMonitorSleepSecs: 1, + } + }), + }); + const recipientRst = new ChangeStreamMultitenantReplicaSetTest({ + name: "recipientReplSet", + nodes: 2, + nodeOptions: Object.assign(makeX509OptionsForTest().recipient, { + setParameter: { + tenantMigrationGarbageCollectionDelayMS: 0, + ttlMonitorSleepSecs: 1, + } + }), + }); + + const tenantMigrationTest = new TenantMigrationTest({ + name: jsTestName(), + donorRst, + recipientRst, + quickGarbageCollection: true, + }); + + const teardown = () => { + donorRst.stopSet(); + recipientRst.stopSet(); + tenantMigrationTest.stop(); + }; + + // Note: including this explicit early return here due to the fact that multiversion + // suites will execute this test without featureFlagShardMerge enabled (despite the + // presence of the featureFlagShardMerge tag above), which means the test will attempt + // to run a multi-tenant migration and fail. + if (!isShardMergeEnabled(recipientRst.getPrimary().getDB("admin"))) { + teardown(); + jsTestLog("Skipping Shard Merge-specific test"); + quit(); + } + + return { + donorRst, + recipientRst, + tenantMigrationTest, + teardown, + }; +} function assertChangeCollectionEntries(donorEntries, recipientEntries) { assert.eq(donorEntries.length, recipientEntries.length); @@ -59,226 +86,446 @@ function assertChangeCollectionEntries(donorEntries, recipientEntries) { }); } -function getChangeCollectionDocuments(conn) { - // Filter out change collection entries for admin.system.users because 'getTenantConnection' - // will create a user on the donor before we have enabled change streams. Also filter out - // 'create' entries for system.change_collection, since the recipient will have an extra - // entry for the case where changestreams are enabled for a tenant during oplog catchup. - return conn.getDB("config")["system.change_collection"] - .find({ns: {$ne: "admin.system.users"}}) - .toArray(); -} - -// Note: including this explicit early return here due to the fact that multiversion -// suites will execute this test without featureFlagShardMerge enabled (despite the -// presence of the featureFlagShardMerge tag above), which means the test will attempt -// to run a multi-tenant migration and fail. -if (!isShardMergeEnabled(tenantMigrationTest.getRecipientPrimary().getDB("admin"))) { - tenantMigrationTest.stop(); - jsTestLog("Skipping Shard Merge-specific test"); - quit(); -} - -const donorPrimary = tenantMigrationTest.getDonorPrimary(); -const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); - -const tenantId1 = ObjectId(); -const tenantId2 = ObjectId(); - -const donorTenantConn1 = ChangeStreamMultitenantReplicaSetTest.getTenantConnection( - donorPrimary.host, tenantId1, tenantId1.str); - -const donorTenantConn2 = ChangeStreamMultitenantReplicaSetTest.getTenantConnection( - donorPrimary.host, tenantId2, tenantId2.str); - -donorRst.setChangeStreamState(donorTenantConn1, true); - -// Open a change stream and insert documents into database.collection before the migration -// starts. -const donorCursor1 = donorTenantConn1.getDB("database").collection.watch([]); -donorTenantConn1.getDB("database") - .collection.insertMany([{_id: "tenant1_1"}, {_id: "tenant1_2"}, {_id: "tenant1_3"}]); -donorTenantConn1.getDB("database").collection.updateOne({_id: "tenant1_3"}, { - $set: {updated: true} -}); - -// Get the first entry from the tenant1 change stream cursor and grab the resume token. -assert.soon(() => donorCursor1.hasNext()); -const {_id: resumeToken1} = donorCursor1.next(); - -const donorTenant1Session = donorTenantConn1.startSession({retryWrites: true}); -const donorTenant1SessionCollection = donorTenant1Session.getDatabase("database").collection; -assert.commandWorked(donorTenant1SessionCollection.insert({_id: "tenant1_4", w: "RETRYABLE"})); -assert.commandWorked(donorTenant1Session.getDatabase("database").runCommand({ - findAndModify: "collection", - query: {_id: "tenant1_4"}, - update: {$set: {updated: true}} -})); - -// Start a transaction and perform some writes. -const donorTxnSession1 = donorTenantConn1.getDB("database").getMongo().startSession(); -donorTxnSession1.startTransaction(); -donorTxnSession1.getDatabase("database").collection.insertOne({_id: "tenant1_in_transaction_1"}); -donorTxnSession1.getDatabase("database").collection.updateOne({_id: "tenant1_in_transaction_1"}, { - $set: {updated: true} -}); -donorTxnSession1.commitTransaction(); -donorTxnSession1.endSession(); - -const fpBeforeMarkingCloneSuccess = - configureFailPoint(recipientPrimary, "fpBeforeMarkingCloneSuccess", {action: "hang"}); - -const migrationUuid = UUID(); -const tenantIds = [tenantId1, tenantId2]; -const migrationOpts = { - migrationIdString: extractUUIDFromObject(migrationUuid), - readPreference: {mode: "primary"}, - tenantIds, -}; - -assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); - -fpBeforeMarkingCloneSuccess.wait(); - -// Insert more documents after cloning has completed so that oplog entries are applied during oplog -// catchup. -assert.commandWorked(donorTenantConn1.getDB("database").collection.updateOne({_id: "tenant1_2"}, { - $set: {updated: true} -})); - -assert.commandWorked(donorTenant1SessionCollection.insert({_id: "tenant1_5", w: "RETRYABLE"})); -assert.commandWorked( - donorTenant1SessionCollection.updateOne({_id: "tenant1_5"}, {$set: {updated: true}})); - -// Enable change streams for the second tenant during oplog catchup. -donorRst.setChangeStreamState(donorTenantConn2, true); -const donorCursor2 = donorTenantConn2.getDB("database").collection.watch([]); -donorTenantConn2.getDB("database").collection.insertOne({_id: "tenant2_1"}); - -// Get the first entry from the tenant2 change stream cursor and grab the resume token. -assert.soon(() => donorCursor2.hasNext()); -const {_id: resumeToken2} = donorCursor2.next(); - -// Insert another entry so that we can consume it on the Recipient after the migration has -// completed. -donorTenantConn2.getDB("database").collection.insertOne({_id: "tenant2_2"}); - -// Start a transaction and perform some writes. -const donorSession2 = donorTenantConn2.getDB("database").getMongo().startSession(); -donorSession2.startTransaction(); -donorSession2.getDatabase("database").collection.insertOne({_id: "tenant2_in_transaction_1"}); -donorSession2.getDatabase("database").collection.updateOne({_id: "tenant2_in_transaction_1"}, { - $set: {updated: true} -}); -donorSession2.commitTransaction(); - -// Start a transaction and perform some large writes. -const largePad = "a".repeat(10 * 1024 * 1024); -donorSession2.startTransaction(); -donorSession2.getDatabase("database") - .collection.insertOne({_id: "tenant2_in_transaction_2", largePad}); -donorSession2.getDatabase("database").collection.updateOne({_id: "tenant2_in_transaction_2"}, { - $set: {updated: true, largePad: "b" + largePad} -}); -donorSession2.commitTransaction_forTesting(); - -fpBeforeMarkingCloneSuccess.off(); - -TenantMigrationTest.assertCommitted(tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); -assert.commandWorked(tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString)); -tenantMigrationTest.waitForMigrationGarbageCollection(migrationUuid, tenantIds[0]); - -const recipientPrimaryTenantConn1 = ChangeStreamMultitenantReplicaSetTest.getTenantConnection( - recipientPrimary.host, tenantId1, tenantId1.str); - -// Running ChangeStreamMultitenantReplicaSetTest.getTenantConnection will create a user on the -// primary. Await replication so that we can use the same user on secondaries. -recipientRst.awaitReplication(); - -tenantIds.forEach(tenantId => { - const donorTenantConn = ChangeStreamMultitenantReplicaSetTest.getTenantConnection( - donorPrimary.host, tenantId, tenantId.str); +function validateChangeCollections(tenantId, donorTenantConn, recipientConns) { const donorChangeCollectionDocuments = getChangeCollectionDocuments(donorTenantConn); - recipientRst.nodes.forEach(recipientNode => { + recipientConns.forEach(recipientConn => { jsTestLog( - `Performing change collection validation for tenant ${tenantId} on ${recipientNode}`); - const recipientTenantConn = ChangeStreamMultitenantReplicaSetTest.getTenantConnection( - recipientNode.host, tenantId, tenantId.str); - + `Performing change collection validation for tenant ${tenantId} on ${recipientConn}`); assertChangeCollectionEntries(donorChangeCollectionDocuments, - getChangeCollectionDocuments(recipientTenantConn)); + getChangeCollectionDocuments(recipientConn)); }); -}); - -const recipientSecondaryTenantConn1 = ChangeStreamMultitenantReplicaSetTest.getTenantConnection( - recipientRst.getSecondary().host, tenantId1, tenantId1.str); - -// Resume the first change stream on the Recipient primary. -const recipientPrimaryCursor1 = - recipientPrimaryTenantConn1.getDB("database").collection.watch([], {resumeAfter: resumeToken1}); +} -// Resume the first change stream on the Recipient secondary. -const recipientSecondaryCursor1 = - recipientSecondaryTenantConn1.getDB("database").collection.watch([], { - resumeAfter: resumeToken1, +function assertCursorChangeEvents(expectedEvents, cursors) { + expectedEvents.forEach(expectedEvent => { + cursors.forEach(cursor => { + assert.soon(() => cursor.hasNext()); + const changeEvent = cursor.next(); + assert.eq(changeEvent.documentKey._id, expectedEvent._id); + assert.eq(changeEvent.operationType, expectedEvent.operationType); + if (expectedEvent.fullDocument) { + assert.eq(changeEvent.fullDocument, expectedEvent.fullDocument); + } + if (expectedEvent.fullDocumentBeforeChange) { + assert.eq(changeEvent.fullDocumentBeforeChange, + expectedEvent.fullDocumentBeforeChange); + } + }); }); +} -[{_id: "tenant1_2", operationType: "insert"}, - {_id: "tenant1_3", operationType: "insert"}, - {_id: "tenant1_3", operationType: "update"}, - {_id: "tenant1_4", operationType: "insert"}, - {_id: "tenant1_4", operationType: "update"}, - {_id: "tenant1_in_transaction_1", operationType: "insert"}, - {_id: "tenant1_in_transaction_1", operationType: "update"}, - {_id: "tenant1_2", operationType: "update"}, - {_id: "tenant1_5", operationType: "insert"}, - {_id: "tenant1_5", operationType: "update"}, -].forEach(expectedEvent => { - [recipientPrimaryCursor1, recipientSecondaryCursor1].forEach(cursor => { - assert.soon(() => cursor.hasNext()); - const changeEvent = cursor.next(); - assert.eq(changeEvent.documentKey._id, expectedEvent._id); - assert.eq(changeEvent.operationType, expectedEvent.operationType); +function assertChangeStreamGetMoreFailure(donorConnections) { + // Test that running a getMore on a change stream cursor after the migration commits throws + // a resumable change stream exception. + donorConnections.forEach(({conn, cursor}) => { + const failedGetMore = conn.getDB("database").runCommand("getMore", { + getMore: cursor._cursorid, + collection: "collection" + }); + assert.commandFailedWithCode( + failedGetMore, + ErrorCodes.ResumeTenantChangeStream, + "Tailing a change stream on the donor after completion of a shard merge should fail."); + assert(failedGetMore.hasOwnProperty("errorLabels")); + assert.contains("ResumableChangeStreamError", failedGetMore.errorLabels); + + // The cursor should have been deleted after the error so a getMore should fail. + assert.commandFailedWithCode( + conn.getDB("database") + .runCommand("getMore", {getMore: cursor._cursorid, collection: "collection"}), + ErrorCodes.CursorNotFound); }); -}); +} -const recipientPrimaryTenantConn2 = ChangeStreamMultitenantReplicaSetTest.getTenantConnection( - recipientPrimary.host, tenantId2, tenantId2.str); +function getTenantConnections(rst, tenantId) { + const primaryConn = ChangeStreamMultitenantReplicaSetTest.getTenantConnection( + rst.getPrimary().host, tenantId, tenantId.str); -// Running ChangeStreamMultitenantReplicaSetTest.getTenantConnection will create a user on the -// primary. Await replication so that we can use the same user on secondaries. -recipientRst.awaitReplication(); + // Running ChangeStreamMultitenantReplicaSetTest.getTenantConnection will create a user on the + // primary. Await replication so that we can use the same user on secondaries. + rst.awaitReplication(); -const recipientSecondaryTenantConn2 = ChangeStreamMultitenantReplicaSetTest.getTenantConnection( - recipientRst.getSecondary().host, tenantId2, tenantId2.str); + const secondaryConns = rst.getSecondaries().map( + recipientSecondary => ChangeStreamMultitenantReplicaSetTest.getTenantConnection( + recipientSecondary.host, tenantId, tenantId.str)); -// Resume the second change stream on the Recipient primary. -const recipientPrimaryCursor2 = - recipientPrimaryTenantConn2.getDB("database").collection.watch([{$unset: "largePad"}], { - resumeAfter: resumeToken2 - }); + return [primaryConn, ...secondaryConns]; +} -// Resume the second change stream on the Recipient secondary. -const recipientSecondaryCursor2 = - recipientSecondaryTenantConn2.getDB("database").collection.watch([{$unset: "largePad"}], { - resumeAfter: resumeToken2, - }); +function performWrites(conn, _id) { + const collection = conn.getDB("database").collection; + assert.commandWorked(collection.insertOne({_id})); + collection.updateOne({_id}, {$set: {updated: true}}); + collection.deleteOne({_id}); +} -[{_id: "tenant2_2", operationType: "insert"}, - {_id: "tenant2_in_transaction_1", operationType: "insert"}, - {_id: "tenant2_in_transaction_1", operationType: "update"}, - {_id: "tenant2_in_transaction_2", operationType: "insert"}, - {_id: "tenant2_in_transaction_2", operationType: "update"}, -].forEach(expectedEvent => { - [recipientPrimaryCursor2, recipientSecondaryCursor2].forEach(cursor => { - assert.soon(() => cursor.hasNext()); - const changeEvent = cursor.next(); - assert.eq(changeEvent.documentKey._id, expectedEvent._id); - assert.eq(changeEvent.operationType, expectedEvent.operationType); +function performRetryableWrites(conn, _id) { + const session = conn.startSession({retryWrites: true}); + const collection = session.getDatabase("database").collection; + assert.commandWorked(collection.insert({_id, w: "RETRYABLE"})); + assert.commandWorked(session.getDatabase("database").runCommand({ + findAndModify: "collection", + query: {_id}, + update: {$set: {updated: true}} + })); + assert.commandWorked(collection.deleteOne({_id})); +} + +function performTxnWrites(conn, _id) { + const session = conn.getDB("database").getMongo().startSession(); + session.startTransaction(); + session.getDatabase("database").collection.insertOne({_id}); + session.getDatabase("database").collection.updateOne({_id}, {$set: {updated: true}}); + session.getDatabase("database").collection.deleteOne({_id}); + session.commitTransaction(); + session.endSession(); +} + +function performLargeTxnWrites(conn, _id) { + const largePad = "a".repeat(10 * 1024 * 1024); + const session = conn.getDB("database").getMongo().startSession(); + session.startTransaction(); + session.getDatabase("database").collection.insertOne({_id, largePad}); + session.getDatabase("database").collection.updateOne({_id}, { + $set: {updated: true, largePad: `b${largePad}`} }); -}); + session.commitTransaction(); +} + +function getChangeCollectionDocuments(conn) { + // Filter out change collection entries for admin.system.users because 'getTenantConnection' + // will create a user on the donor before we have enabled change streams. Also filter out + // 'create' entries for system.change_collection, since the recipient will have an extra + // entry for the case where changestreams are enabled for a tenant during oplog catchup. + return conn.getDB("config")["system.change_collection"] + .find({ns: {$ne: "admin.system.users"}}) + .toArray(); +} -donorRst.stopSet(); -recipientRst.stopSet(); -tenantMigrationTest.stop(); +(() => { + jsTestLog("Test writes before and during the migration with pre and post images enabled"); + const {tenantMigrationTest, donorRst, recipientRst, teardown} = setup(); + + const donorPrimary = tenantMigrationTest.getDonorPrimary(); + const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); + + const tenantId1 = ObjectId(); + const tenantId2 = ObjectId(); + + const donorTenantConn1 = ChangeStreamMultitenantReplicaSetTest.getTenantConnection( + donorPrimary.host, tenantId1, tenantId1.str); + + donorRst.setChangeStreamState(donorTenantConn1, true); + + assertDropAndRecreateCollection(donorTenantConn1.getDB("database"), + "collection", + {changeStreamPreAndPostImages: {enabled: true}}); + + // Open a change stream and perform writes before the migration starts. + const donorCursor1 = donorTenantConn1.getDB("database").collection.watch([]); + donorTenantConn1.getDB("database").collection.insertOne({_id: "tenant1_0"}); + performWrites(donorTenantConn1, "tenant1_1"); + performRetryableWrites(donorTenantConn1, "tenant1_2"); + performTxnWrites(donorTenantConn1, "tenant1_in_transaction_1"); + + // Get the first entry from the tenant1 change stream cursor and grab the resume token. + assert.soon(() => donorCursor1.hasNext()); + const {_id: resumeToken1} = donorCursor1.next(); + + const fpBeforeMarkingCloneSuccess = + configureFailPoint(recipientPrimary, "fpBeforeMarkingCloneSuccess", {action: "hang"}); + + const migrationUuid = UUID(); + const tenantIds = [tenantId1, tenantId2]; + const migrationOpts = { + migrationIdString: extractUUIDFromObject(migrationUuid), + readPreference: {mode: "primary"}, + tenantIds, + }; + + assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); + + fpBeforeMarkingCloneSuccess.wait(); + + // Perform more writes after cloning has completed so that oplog entries are applied during + // oplog catchup. + performWrites(donorTenantConn1, "tenant1_3"); + performRetryableWrites(donorTenantConn1, "tenant1_4"); + + // Enable change streams for the second tenant during oplog catchup. + const donorTenantConn2 = ChangeStreamMultitenantReplicaSetTest.getTenantConnection( + donorPrimary.host, tenantId2, tenantId2.str); + + donorRst.setChangeStreamState(donorTenantConn2, true); + assertDropAndRecreateCollection(donorTenantConn2.getDB("database"), + "collection", + {changeStreamPreAndPostImages: {enabled: true}}); + + const donorCursor2 = donorTenantConn2.getDB("database").collection.watch([]); + donorTenantConn2.getDB("database").collection.insertOne({_id: "tenant2_0"}); + donorTenantConn2.getDB("database").collection.insertOne({_id: "tenant2_1"}); + + // Get the first entry from the tenant2 change stream cursor and grab the resume token. + assert.soon(() => donorCursor2.hasNext()); + const {_id: resumeToken2} = donorCursor2.next(); + + performTxnWrites(donorTenantConn2, "tenant2_in_transaction_1"); + + fpBeforeMarkingCloneSuccess.off(); + + TenantMigrationTest.assertCommitted( + tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); + + assertChangeStreamGetMoreFailure([ + {conn: donorTenantConn1, cursor: donorCursor1}, + {conn: donorTenantConn2, cursor: donorCursor2}, + ]); + + assert.commandWorked(tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString)); + tenantMigrationTest.waitForMigrationGarbageCollection(migrationUuid, tenantIds[0]); + + const recipientTenant1Conns = getTenantConnections(recipientRst, tenantId1); + validateChangeCollections(tenantId1, donorTenantConn1, recipientTenant1Conns); + + const tenantConn1Cursors = + recipientTenant1Conns.map(conn => conn.getDB("database").collection.watch([], { + resumeAfter: resumeToken1, + fullDocumentBeforeChange: "required", + fullDocument: "required", + })); + + assertCursorChangeEvents( + [ + {_id: "tenant1_1", operationType: "insert", fullDocument: {_id: "tenant1_1"}}, + { + _id: "tenant1_1", + operationType: "update", + fullDocumentBeforeChange: {_id: "tenant1_1"}, + fullDocument: {_id: "tenant1_1", updated: true} + }, + { + _id: "tenant1_1", + operationType: "delete", + fullDocumentBeforeChange: {_id: "tenant1_1", updated: true}, + }, + { + _id: "tenant1_2", + operationType: "insert", + fullDocument: {_id: "tenant1_2", w: "RETRYABLE"} + }, + { + _id: "tenant1_2", + operationType: "update", + fullDocumentBeforeChange: {_id: "tenant1_2", "w": "RETRYABLE"}, + fullDocument: {_id: "tenant1_2", "w": "RETRYABLE", updated: true} + }, + { + _id: "tenant1_2", + operationType: "delete", + fullDocumentBeforeChange: {_id: "tenant1_2", "w": "RETRYABLE", updated: true} + }, + { + _id: "tenant1_in_transaction_1", + operationType: "insert", + fullDocument: {_id: "tenant1_in_transaction_1"} + }, + { + _id: "tenant1_in_transaction_1", + operationType: "update", + fullDocumentBeforeChange: {_id: "tenant1_in_transaction_1"}, + fullDocument: {_id: "tenant1_in_transaction_1", updated: true} + }, + { + _id: "tenant1_in_transaction_1", + operationType: "delete", + fullDocumentBeforeChange: {_id: "tenant1_in_transaction_1", updated: true} + }, + {_id: "tenant1_3", operationType: "insert", fullDocument: {_id: "tenant1_3"}}, + { + _id: "tenant1_3", + operationType: "update", + fullDocumentBeforeChange: {_id: "tenant1_3"}, + fullDocument: {_id: "tenant1_3", updated: true} + }, + { + _id: "tenant1_3", + operationType: "delete", + fullDocumentBeforeChange: {_id: "tenant1_3", updated: true}, + }, + { + _id: "tenant1_4", + operationType: "insert", + fullDocument: {_id: "tenant1_4", w: "RETRYABLE"} + }, + { + _id: "tenant1_4", + operationType: "update", + fullDocumentBeforeChange: {_id: "tenant1_4", w: "RETRYABLE"}, + fullDocument: {_id: "tenant1_4", w: "RETRYABLE", updated: true} + }, + { + _id: "tenant1_4", + operationType: "delete", + fullDocumentBeforeChange: {_id: "tenant1_4", w: "RETRYABLE", updated: true}, + }, + ], + tenantConn1Cursors); + + const recipientTenant2Conns = getTenantConnections(recipientRst, tenantId2); + validateChangeCollections(tenantId2, donorTenantConn2, recipientTenant2Conns); + + const tenantConn2Cursors = + recipientTenant2Conns.map(conn => conn.getDB("database").collection.watch([], { + resumeAfter: resumeToken2, + fullDocumentBeforeChange: "required", + fullDocument: "required", + })); + + assertCursorChangeEvents( + [ + {_id: "tenant2_1", operationType: "insert", fullDocument: {_id: "tenant2_1"}}, + { + _id: "tenant2_in_transaction_1", + operationType: "insert", + fullDocument: {_id: "tenant2_in_transaction_1"} + }, + { + _id: "tenant2_in_transaction_1", + operationType: "update", + fullDocumentBeforeChange: {_id: "tenant2_in_transaction_1"}, + fullDocument: {_id: "tenant2_in_transaction_1", updated: true} + }, + { + _id: "tenant2_in_transaction_1", + operationType: "delete", + fullDocumentBeforeChange: {_id: "tenant2_in_transaction_1", updated: true}, + }, + ], + tenantConn2Cursors); + + teardown(); +})(); + +(() => { + jsTestLog("Test large txns before and during the migration"); + const {tenantMigrationTest, donorRst, recipientRst, teardown} = setup(); + + const donorPrimary = tenantMigrationTest.getDonorPrimary(); + const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); + + const tenantId1 = ObjectId(); + const tenantId2 = ObjectId(); + + const donorTenantConn1 = ChangeStreamMultitenantReplicaSetTest.getTenantConnection( + donorPrimary.host, tenantId1, tenantId1.str); + + donorRst.setChangeStreamState(donorTenantConn1, true); + + // Open a change stream and perform writes before the migration starts. + const donorCursor1 = donorTenantConn1.getDB("database").collection.watch([]); + donorTenantConn1.getDB("database").collection.insertOne({_id: "tenant1_0"}); + performLargeTxnWrites(donorTenantConn1, "tenant1_in_transaction_1"); + + // Get the first entry from the tenant1 change stream cursor and grab the resume token. + assert.soon(() => donorCursor1.hasNext()); + const {_id: resumeToken1} = donorCursor1.next(); + + const fpBeforeMarkingCloneSuccess = + configureFailPoint(recipientPrimary, "fpBeforeMarkingCloneSuccess", {action: "hang"}); + + const migrationUuid = UUID(); + const tenantIds = [tenantId1, tenantId2]; + const migrationOpts = { + migrationIdString: extractUUIDFromObject(migrationUuid), + readPreference: {mode: "primary"}, + tenantIds, + }; + + assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); + + fpBeforeMarkingCloneSuccess.wait(); + + // Perform more writes after cloning has completed so that oplog entries are applied during + // oplog catchup. + performLargeTxnWrites(donorTenantConn1, "tenant1_in_transaction_2"); + + // Enable change streams for the second tenant during oplog catchup. + const donorTenantConn2 = ChangeStreamMultitenantReplicaSetTest.getTenantConnection( + donorPrimary.host, tenantId2, tenantId2.str); + + donorRst.setChangeStreamState(donorTenantConn2, true); + + const donorCursor2 = donorTenantConn2.getDB("database").collection.watch([]); + donorTenantConn2.getDB("database").collection.insertOne({_id: "tenant2_0"}); + performLargeTxnWrites(donorTenantConn2, "tenant2_in_transaction_1"); + + // Get the first entry from the tenant2 change stream cursor and grab the resume token. + assert.soon(() => donorCursor2.hasNext()); + const {_id: resumeToken2} = donorCursor2.next(); + + fpBeforeMarkingCloneSuccess.off(); + + TenantMigrationTest.assertCommitted( + tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); + + assertChangeStreamGetMoreFailure([ + {conn: donorTenantConn1, cursor: donorCursor1}, + {conn: donorTenantConn2, cursor: donorCursor2}, + ]); + + assert.commandWorked(tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString)); + tenantMigrationTest.waitForMigrationGarbageCollection(migrationUuid, tenantIds[0]); + + const recipientTenant1Conns = getTenantConnections(recipientRst, tenantId1); + validateChangeCollections(tenantId1, donorTenantConn1, recipientTenant1Conns); + + const tenantConn1Cursors = recipientTenant1Conns.map( + conn => conn.getDB("database").collection.watch([{$unset: "largePad"}], { + resumeAfter: resumeToken1, + })); + + assertCursorChangeEvents( + [ + { + _id: "tenant1_in_transaction_1", + operationType: "insert", + }, + { + _id: "tenant1_in_transaction_1", + operationType: "update", + }, + { + _id: "tenant1_in_transaction_2", + operationType: "insert", + }, + { + _id: "tenant1_in_transaction_2", + operationType: "update", + }, + ], + tenantConn1Cursors); + + const recipientTenant2Conns = getTenantConnections(recipientRst, tenantId2); + validateChangeCollections(tenantId2, donorTenantConn2, recipientTenant2Conns); + + const tenantConn2Cursors = recipientTenant2Conns.map( + conn => conn.getDB("database").collection.watch([{$unset: "largePad"}], { + resumeAfter: resumeToken2, + })); + + assertCursorChangeEvents( + [ + { + _id: "tenant2_in_transaction_1", + operationType: "insert", + }, + { + _id: "tenant2_in_transaction_1", + operationType: "update", + }, + ], + tenantConn2Cursors); + + teardown(); +})(); diff --git a/jstests/replsets/tenant_migration_recipient_shard_merge_copies_cluster_parameters.js b/jstests/replsets/tenant_migration_recipient_shard_merge_copies_cluster_parameters.js index bc5d7cf2d6e5b..b41a64e0bfd3c 100644 --- a/jstests/replsets/tenant_migration_recipient_shard_merge_copies_cluster_parameters.js +++ b/jstests/replsets/tenant_migration_recipient_shard_merge_copies_cluster_parameters.js @@ -12,10 +12,10 @@ * ] */ +import {tenantCommand} from "jstests/libs/cluster_server_parameter_utils.js"; import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import {isShardMergeEnabled} from "jstests/replsets/libs/tenant_migration_util.js"; -load("jstests/libs/cluster_server_parameter_utils.js"); load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); diff --git a/jstests/replsets/tenant_migration_recipient_shard_merge_import_survives_unclean_restart.js b/jstests/replsets/tenant_migration_recipient_shard_merge_import_survives_unclean_restart.js index 3bb2281bb903f..a9bccbb554bd5 100644 --- a/jstests/replsets/tenant_migration_recipient_shard_merge_import_survives_unclean_restart.js +++ b/jstests/replsets/tenant_migration_recipient_shard_merge_import_survives_unclean_restart.js @@ -15,6 +15,7 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import { isShardMergeEnabled, + makeTenantDB, makeX509OptionsForTest } from "jstests/replsets/libs/tenant_migration_util.js"; @@ -52,7 +53,7 @@ const tenantMigrationTest = new TenantMigrationTest( const donorPrimary = tenantMigrationTest.getDonorPrimary(); const tenantId = ObjectId(); -const tenantDB = tenantMigrationTest.tenantDB(tenantId.str, "DB"); +const tenantDB = makeTenantDB(tenantId.str, "DB"); const collName = "testColl"; // Do a majority write. diff --git a/jstests/replsets/tenant_migration_recipient_shard_merge_learn_files.js b/jstests/replsets/tenant_migration_recipient_shard_merge_learn_files.js index b1e8ab7d6c668..bec4150c0b01e 100644 --- a/jstests/replsets/tenant_migration_recipient_shard_merge_learn_files.js +++ b/jstests/replsets/tenant_migration_recipient_shard_merge_learn_files.js @@ -8,11 +8,13 @@ * requires_persistence, * serverless, * featureFlagShardMerge, + * # The error code for a rejected recipient command invoked during the reject phase was changed. + * requires_fcv_71, * ] */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {isShardMergeEnabled} from "jstests/replsets/libs/tenant_migration_util.js"; +import {isShardMergeEnabled, makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); @@ -35,7 +37,7 @@ if (!isShardMergeEnabled(recipientPrimary.getDB("admin"))) { jsTestLog( "Test that recipient state is correctly set to 'learned filenames' after creating the backup cursor"); const tenantId = ObjectId(); -const tenantDB = tenantMigrationTest.tenantDB(tenantId.str, "DB"); +const tenantDB = makeTenantDB(tenantId.str, "DB"); const collName = "testColl"; const donorPrimary = tenantMigrationTest.getDonorPrimary(); @@ -54,8 +56,7 @@ const migrationOpts = { }; jsTestLog(`Starting the tenant migration to wait in failpoint: ${failpoint}`); -assert.commandWorked( - tenantMigrationTest.startMigration(migrationOpts, {enableDonorStartMigrationFsync: true})); +assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); waitInFailPoint.wait(); @@ -72,7 +73,7 @@ tenantMigrationTest.assertRecipientNodesInExpectedState({ migrationId: migrationUuid, tenantId: tenantId.str, expectedState: TenantMigrationTest.ShardMergeRecipientState.kLearnedFilenames, - expectedAccessState: TenantMigrationTest.RecipientAccessState.kReject + expectedAccessState: TenantMigrationTest.RecipientAccessState.kRejectReadsAndWrites }); waitInFailPoint.off(); diff --git a/jstests/replsets/tenant_migration_recipient_shard_merge_oplog_catchup.js b/jstests/replsets/tenant_migration_recipient_shard_merge_oplog_catchup.js index 3b42100f6d2ca..a6bc9bbf159e2 100644 --- a/jstests/replsets/tenant_migration_recipient_shard_merge_oplog_catchup.js +++ b/jstests/replsets/tenant_migration_recipient_shard_merge_oplog_catchup.js @@ -12,7 +12,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {isShardMergeEnabled} from "jstests/replsets/libs/tenant_migration_util.js"; +import {isShardMergeEnabled, makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); @@ -37,7 +37,7 @@ const kTenant2 = ObjectId().str; // Insert some documents before migration start so that this collection gets cloned by file cloner. const collName = "testColl"; -const tenantDB0 = tenantMigrationTest.tenantDB(kTenant0, "DB"); +const tenantDB0 = makeTenantDB(kTenant0, "DB"); assert.commandWorked(donorPrimary.getDB(tenantDB0)[collName].insert({_id: 0})); const failpoint = "pauseTenantMigrationBeforeLeavingDataSyncState"; @@ -65,10 +65,10 @@ assert.commandWorked(donorPrimary.getDB(tenantDB0)[collName].update({_id: 0}, {' assert.commandWorked(donorPrimary.getDB(tenantDB0)[collName].insert({_id: 1})); // Add new tenant collections. -const tenantDB1 = tenantMigrationTest.tenantDB(kTenant1, "DB"); +const tenantDB1 = makeTenantDB(kTenant1, "DB"); tenantMigrationTest.insertDonorDB(tenantDB1, collName); -const tenantDB2 = tenantMigrationTest.tenantDB(kTenant2, "DB"); +const tenantDB2 = makeTenantDB(kTenant2, "DB"); tenantMigrationTest.insertDonorDB(tenantDB2, collName); // Resume migration. diff --git a/jstests/replsets/tenant_migration_recipient_shard_merge_ttl.js b/jstests/replsets/tenant_migration_recipient_shard_merge_ttl.js index 579c16fd1cbf6..1a0460235b561 100644 --- a/jstests/replsets/tenant_migration_recipient_shard_merge_ttl.js +++ b/jstests/replsets/tenant_migration_recipient_shard_merge_ttl.js @@ -13,9 +13,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - isShardMergeEnabled, -} from "jstests/replsets/libs/tenant_migration_util.js"; +import {isShardMergeEnabled, makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); @@ -36,7 +34,7 @@ if (!isShardMergeEnabled(recipientPrimary.getDB("admin"))) { } const tenantId = ObjectId().str; -const tenantDB = tenantMigrationTest.tenantDB(tenantId, "DB"); +const tenantDB = makeTenantDB(tenantId, "DB"); const collName = "testColl"; const donorPrimary = tenantMigrationTest.getDonorPrimary(); @@ -45,8 +43,6 @@ const expireAfterSeconds = 1; donorPrimary.getDB(tenantDB)[collName].insertOne({name: "deleteMe", lastModifiedDate: new Date()}); donorPrimary.getDB(tenantDB)[collName].createIndex({"lastModifiedDate": 1}, {expireAfterSeconds}); -const hangTTLCollectionCacheAfterRegisteringInfo = - configureFailPoint(recipientPrimary, "hangTTLCollectionCacheAfterRegisteringInfo"); let hangTTLMonitorBetweenPasses = configureFailPoint(recipientPrimary, "hangTTLMonitorBetweenPasses"); @@ -65,29 +61,12 @@ assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); // Wait for a TTL pass to start on the Recipient and then block before continuing. hangTTLMonitorBetweenPasses.wait(); -// Wait until we've registered our TTL index in the cache, but block before committing -// the collection in the catalog. -hangTTLCollectionCacheAfterRegisteringInfo.wait(); - // Wait for TTL expiry. sleep(expireAfterSeconds * 1000); // Unblock the TTL pass on the recipient to let it clean up. hangTTLMonitorBetweenPasses.off(); -// Wait for a full TTL cycle to complete in order to ensure that the TTL cache entry for the -// collection (which does not yet have an entry in the collection catalog) is not deregistered. We -// skip the first pass because it's possible that we can turn off the failpoint and then re-enable -// before the TTL machinery is actually unblocked. -hangTTLMonitorBetweenPasses = - configureFailPoint(recipientPrimary, "hangTTLMonitorBetweenPasses", {}, {skip: 1}); -hangTTLMonitorBetweenPasses.wait(); - -// Unblock TTL registration, thus allowing the collection to be registered in the catalog. -hangTTLCollectionCacheAfterRegisteringInfo.off(); - -hangTTLMonitorBetweenPasses.off(); - TenantMigrationTest.assertCommitted(tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); assert.commandWorked(tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString)); tenantMigrationTest.waitForMigrationGarbageCollection(migrationOpts); diff --git a/jstests/replsets/tenant_migration_recipient_startup_recovery.js b/jstests/replsets/tenant_migration_recipient_startup_recovery.js index a59601be1be6a..8ceea6af23911 100644 --- a/jstests/replsets/tenant_migration_recipient_startup_recovery.js +++ b/jstests/replsets/tenant_migration_recipient_startup_recovery.js @@ -10,6 +10,8 @@ * requires_majority_read_concern, * requires_persistence, * serverless, + * # The error code for a rejected recipient command invoked during the reject phase was changed. + * requires_fcv_71, * ] */ @@ -76,7 +78,7 @@ if (recipientDoc) { .getTenantMigrationAccessBlocker( {recipientNode: recipientPrimary, tenantId: kTenantId}) .recipient.state == - TenantMigrationTest.RecipientAccessState.kReject); + TenantMigrationTest.RecipientAccessState.kRejectReadsAndWrites); } break; case TenantMigrationTest.RecipientState.kConsistent: @@ -85,7 +87,7 @@ if (recipientDoc) { .getTenantMigrationAccessBlocker( {recipientNode: recipientPrimary, tenantId: kTenantId}) .recipient.state == - TenantMigrationTest.RecipientAccessState.kRejectBefore); + TenantMigrationTest.RecipientAccessState.kRejectReadsBefore); assert.soon(() => bsonWoCompare( tenantMigrationTest @@ -98,11 +100,11 @@ if (recipientDoc) { .getTenantMigrationAccessBlocker( {recipientNode: recipientPrimary, tenantId: kTenantId}) .recipient.state == - TenantMigrationTest.RecipientAccessState.kReject); + TenantMigrationTest.RecipientAccessState.kRejectReadsAndWrites); } break; default: - throw new Error(`Invalid state "${state}" from recipient doc.`); + throw new Error(`Invalid state "${recipientDoc.state}" from recipient doc.`); } } diff --git a/jstests/replsets/tenant_migration_recipient_sync_donor_timestamp.js b/jstests/replsets/tenant_migration_recipient_sync_donor_timestamp.js deleted file mode 100644 index 81422e2845455..0000000000000 --- a/jstests/replsets/tenant_migration_recipient_sync_donor_timestamp.js +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Exercises the code path for the recipientSyncData command that waits until a timestamp provided - * by the donor is majority committed: make sure that in this code path, when the recipient is - * interrupted by a primary step down, the recipient properly swaps the error code to the true code - * (like primary step down) that the donor can retry on. - * - * @tags: [ - * incompatible_with_macos, - * incompatible_with_shard_merge, - * incompatible_with_windows_tls, - * requires_persistence, - * requires_replication, - * serverless, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -load("jstests/libs/fail_point_util.js"); -load("jstests/libs/uuid_util.js"); // For extractUUIDFromObject() - -// Make the batch size small so that we can pause before all the batches are applied. -const tenantMigrationTest = new TenantMigrationTest( - {name: jsTestName(), sharedOptions: {setParameter: {tenantApplierBatchSizeOps: 2}}}); - -const kMigrationId = UUID(); -const kTenantId = ObjectId().str; -const kReadPreference = { - mode: "primary" -}; -const migrationOpts = { - migrationIdString: extractUUIDFromObject(kMigrationId), - tenantId: kTenantId, - readPreference: kReadPreference -}; - -const dbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); -const collName = jsTestName() + "_collection"; - -const recipientRst = tenantMigrationTest.getRecipientRst(); -const recipientPrimary = recipientRst.getPrimary(); - -// FailPoint to pause right before the data consistent promise is fulfilled. -const fpBeforeDataConsistent = configureFailPoint( - recipientPrimary, "fpBeforeFulfillingDataConsistentPromise", {action: "hang"}); -const fpBeforeApplierFutureCalled = - configureFailPoint(recipientPrimary, "fpWaitUntilTimestampMajorityCommitted"); - -tenantMigrationTest.insertDonorDB(dbName, collName); - -jsTestLog("Starting migration."); -// Start the migration, and allow it to progress to the point where the _dataConsistentPromise has -// been fulfilled. -tenantMigrationTest.startMigration(migrationOpts); - -jsTestLog("Waiting for data consistent promise."); -// Pause right before the _dataConsistentPromise is fulfilled. Therefore, the applier has -// finished applying entries at least until dataConsistentStopDonorOpTime. -fpBeforeDataConsistent.wait(); - -jsTestLog("Pausing the tenant oplog applier."); -// Pause the applier now. All the entries that the applier cannot process now are past the -// dataConsistentStopDonorOpTime. -const fpPauseOplogApplier = - configureFailPoint(recipientPrimary, "fpBeforeTenantOplogApplyingBatch"); - -jsTestLog("Writing to donor db."); -// Send writes to the donor. The applier will not be able to process these as it is paused. -const docsToApply = [...Array(10).keys()].map((i) => ({a: i})); -tenantMigrationTest.insertDonorDB(dbName, collName, docsToApply); - -jsTestLog("Waiting to hit failpoint in tenant oplog applier."); -fpPauseOplogApplier.wait(); - -jsTestLog("Allowing recipient to respond."); -// Allow the recipient to respond to the donor for the recipientSyncData command that waits on the -// fulfillment of the _dataConsistentPromise. The donor will then send another recipientSyncData -// command that waits on the provided donor timestamp to be majority committed. -fpBeforeDataConsistent.off(); - -jsTestLog("Reach the point where we are waiting for the tenant oplog applier to catch up."); -fpBeforeApplierFutureCalled.wait(); -fpBeforeApplierFutureCalled.off(); - -jsTestLog("Stepping another node up."); -// Make a new recipient primary step up. This will ask the applier to shutdown. -recipientRst.stepUp(recipientRst.getSecondaries()[0]); - -jsTestLog("Release the tenant oplog applier failpoint."); -fpPauseOplogApplier.off(); - -jsTestLog("Waiting for migration to complete."); -TenantMigrationTest.assertCommitted(tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); - -tenantMigrationTest.stop(); diff --git a/jstests/replsets/tenant_migration_recipient_sync_source_reconnect_delayed_secondary.js b/jstests/replsets/tenant_migration_recipient_sync_source_reconnect_delayed_secondary.js deleted file mode 100644 index c95fb70d47494..0000000000000 --- a/jstests/replsets/tenant_migration_recipient_sync_source_reconnect_delayed_secondary.js +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Tests that a migration will continuously retry sync source selection when there are no available - * donor hosts. Also checks that a donor host is considered an uneligible sync source when it has a - * majority OpTime earlier than the recipient's stored 'startApplyingDonorOpTime'. - * - * Tests that if the stale donor host advances its majority OpTime to 'startApplyingDonorOpTime' - * or later, the recipient will successfully choose that donor as sync source and resume the - * migration. - * - * @tags: [ - * incompatible_with_macos, - * incompatible_with_shard_merge, - * incompatible_with_windows_tls, - * requires_majority_read_concern, - * requires_persistence, - * # The currentOp output field 'dataSyncCompleted' was renamed to 'migrationCompleted'. - * requires_fcv_70, - * serverless, - * ] - */ - -import { - setUpMigrationSyncSourceTest -} from "jstests/replsets/libs/tenant_migration_recipient_sync_source.js"; -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; - -// After this setUp() call, we should have a migration with 'secondary' read preference. The -// recipient should be continuously retrying sync source selection, unable to choose -// 'delayedSecondary' because it is too stale and 'donorSecondary' because it is down. -const { - tenantMigrationTest, - migrationOpts, - donorSecondary, - delayedSecondary, - hangAfterCreatingConnections -} = setUpMigrationSyncSourceTest(); - -if (!tenantMigrationTest) { - // Feature flag was not enabled. - quit(); -} - -jsTestLog("Restarting replication on 'delayedSecondary'"); -restartServerReplication(delayedSecondary); - -// The recipient should eventually be able to connect to the lagged secondary, after the secondary -// has caught up and the exclude timeout has expired. -hangAfterCreatingConnections.wait(); - -const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); -const res = recipientPrimary.adminCommand({currentOp: true, desc: "tenant recipient migration"}); -const currOp = res.inprog[0]; -assert.eq(delayedSecondary.host, - currOp.donorSyncSource, - `the recipient should only be able to choose 'delayedSecondary' as sync source`); - -hangAfterCreatingConnections.off(); - -TenantMigrationTest.assertCommitted(tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); -assert.commandWorked(tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString)); - -// Remove 'donorSecondary' so that the test can complete properly. -const donorRst = tenantMigrationTest.getDonorRst(); -donorRst.remove(donorSecondary); -donorRst.stopSet(); -tenantMigrationTest.stop(); diff --git a/jstests/replsets/tenant_migration_recipient_sync_source_restart_donor_secondary.js b/jstests/replsets/tenant_migration_recipient_sync_source_restart_donor_secondary.js deleted file mode 100644 index 10ded49a34ba1..0000000000000 --- a/jstests/replsets/tenant_migration_recipient_sync_source_restart_donor_secondary.js +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Tests that a migration will continuously retry sync source selection when there are no available - * donor hosts. Also checks that a donor host is considered an uneligible sync source when it has a - * majority OpTime earlier than the recipient's stored 'startApplyingDonorOpTime'. - * - * Tests that if a donor host becomes available, the recipient will successfully choose it as a - * sync source and resume the migration. - * - * @tags: [ - * incompatible_with_macos, - * incompatible_with_shard_merge, - * incompatible_with_windows_tls, - * requires_majority_read_concern, - * requires_persistence, - * # The currentOp output field 'dataSyncCompleted' was renamed to 'migrationCompleted'. - * requires_fcv_70, - * serverless, - * ] - */ - -import { - setUpMigrationSyncSourceTest -} from "jstests/replsets/libs/tenant_migration_recipient_sync_source.js"; -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; - -// After this setUp() call, we should have a migration with 'secondary' read preference. The -// recipient should be continuously retrying sync source selection, unable to choose -// 'delayedSecondary' because it is too stale and 'donorSecondary' because it is down. -const { - tenantMigrationTest, - migrationOpts, - donorSecondary, - delayedSecondary, - hangAfterCreatingConnections -} = setUpMigrationSyncSourceTest(); - -if (!tenantMigrationTest) { - // Feature flag was not enabled. - quit(); -} - -const donorRst = tenantMigrationTest.getDonorRst(); - -jsTestLog("Restarting 'donorSecondary'"); -donorRst.start(donorSecondary, null /* options */, true /* restart */); - -// The recipient should eventually be able to connect to the donor secondary, after the node reaches -// 'secondary' state. -hangAfterCreatingConnections.wait(); - -const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); -const res = recipientPrimary.adminCommand({currentOp: true, desc: "tenant recipient migration"}); -const currOp = res.inprog[0]; -// 'donorSecondary' should always be the chosen sync source, since read preference is 'secondary' -// and 'delayedSecondary' cannot be chosen because it is too stale. -assert.eq(donorSecondary.host, - currOp.donorSyncSource, - `the recipient should only be able to choose 'donorSecondary' as sync source`); - -hangAfterCreatingConnections.off(); -restartServerReplication(delayedSecondary); - -TenantMigrationTest.assertCommitted(tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); -assert.commandWorked(tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString)); - -donorRst.stopSet(); -tenantMigrationTest.stop(); diff --git a/jstests/replsets/tenant_migration_resume_collection_cloner_after_recipient_failover.js b/jstests/replsets/tenant_migration_resume_collection_cloner_after_recipient_failover.js deleted file mode 100644 index ea9654d97acd0..0000000000000 --- a/jstests/replsets/tenant_migration_resume_collection_cloner_after_recipient_failover.js +++ /dev/null @@ -1,132 +0,0 @@ -/** - * Tests that in tenant migration, the recipient set can resume collection cloning from the last - * document cloned after a failover. - * @tags: [ - * incompatible_with_macos, - * incompatible_with_shard_merge, - * incompatible_with_windows_tls, - * requires_majority_read_concern, - * requires_persistence, - * serverless, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - checkTenantDBHashes, - makeX509OptionsForTest, -} from "jstests/replsets/libs/tenant_migration_util.js"; - -load("jstests/libs/fail_point_util.js"); -load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' - -const tenantMigrationFailoverTest = function(isTimeSeries, createCollFn, docs) { - const batchSize = 2; - const recipientRst = new ReplSetTest({ - nodes: 2, - name: jsTestName() + "_recipient", - serverless: true, - nodeOptions: Object.assign(makeX509OptionsForTest().recipient, { - setParameter: { - // Use a batch size of 2 so that collection cloner requires more than a single - // batch to complete. - collectionClonerBatchSize: batchSize, - // Allow reads on recipient before migration completes for testing. - 'failpoint.tenantMigrationRecipientNotRejectReads': tojson({mode: 'alwaysOn'}), - } - }) - }); - - recipientRst.startSet(); - recipientRst.initiate(); - - const tenantMigrationTest = - new TenantMigrationTest({name: jsTestName(), recipientRst: recipientRst}); - const donorPrimary = tenantMigrationTest.getDonorPrimary(); - - const tenantId = ObjectId().str; - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); - const donorDB = donorPrimary.getDB(dbName); - const collName = "testColl"; - - const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); - - // Create collection and insert documents. - assert.commandWorked(createCollFn(donorDB, collName)); - tenantMigrationTest.insertDonorDB(dbName, collName, docs); - - const migrationId = UUID(); - const migrationIdString = extractUUIDFromObject(migrationId); - const migrationOpts = { - migrationIdString: migrationIdString, - recipientConnString: tenantMigrationTest.getRecipientConnString(), - tenantId, - }; - - // Configure a fail point to have the recipient primary hang after cloning 2 documents. - const recipientDb = recipientPrimary.getDB(dbName); - let recipientColl = isTimeSeries ? recipientDb.getCollection("system.buckets." + collName) - : recipientDb.getCollection(collName); - - const hangDuringCollectionClone = - configureFailPoint(recipientDb, - "tenantMigrationHangCollectionClonerAfterHandlingBatchResponse", - {nss: recipientColl.getFullName()}); - - // Start a migration and wait for recipient to hang after cloning 2 documents. - assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); - hangDuringCollectionClone.wait(); - assert.soon(() => recipientColl.find().itcount() === batchSize); - - // Insert some documents that will be fetched by the recipient. This is to test that on - // failover, the fetcher will resume fetching from where it left off. The system is expected - // to crash if the recipient fetches a duplicate oplog entry upon resuming the migration. - tenantMigrationTest.insertDonorDB(dbName, "aNewColl", [{_id: "docToBeFetched"}]); - assert.soon(() => { - const configDb = recipientPrimary.getDB("config"); - const oplogBuffer = configDb.getCollection("repl.migration.oplog_" + migrationIdString); - return oplogBuffer.find({"entry.o._id": "docToBeFetched"}).count() === 1; - }); - - // Step up a new node in the recipient set and trigger a failover. The new primary should resume - // cloning starting from the third document. - const newRecipientPrimary = recipientRst.getSecondaries()[0]; - recipientRst.stepUp(newRecipientPrimary); - hangDuringCollectionClone.off(); - recipientRst.getPrimary(); - - // The migration should go through after recipient failover. - TenantMigrationTest.assertCommitted( - tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); - - // Check that recipient has cloned all documents in the collection. - recipientColl = newRecipientPrimary.getDB(dbName).getCollection(collName); - assert.eq(docs.length, recipientColl.find().itcount()); - assert.docEq(docs, recipientColl.find().sort({_id: 1}).toArray()); - checkTenantDBHashes({ - donorRst: tenantMigrationTest.getDonorRst(), - recipientRst: tenantMigrationTest.getRecipientRst(), - tenantId - }); - - tenantMigrationTest.stop(); - recipientRst.stopSet(); -}; - -jsTestLog("Running tenant migration test for time-series collection"); -tenantMigrationFailoverTest(true, - (db, collName) => db.createCollection( - collName, {timeseries: {timeField: "time", metaField: "bucket"}}), - [ - // Group each document in its own bucket in order to work with the - // collectionClonerBatchSize we set at the recipient replSet. - {_id: 1, time: ISODate(), bucket: "a"}, - {_id: 2, time: ISODate(), bucket: "b"}, - {_id: 3, time: ISODate(), bucket: "c"}, - {_id: 4, time: ISODate(), bucket: "d"} - ]); - -jsTestLog("Running tenant migration test for regular collection"); -tenantMigrationFailoverTest(false, - (db, collName) => db.createCollection(collName), - [{_id: 0}, {_id: "string"}, {_id: UUID()}, {_id: new Date()}]); diff --git a/jstests/replsets/tenant_migration_resume_collection_cloner_after_recipient_failover_with_dropped_views.js b/jstests/replsets/tenant_migration_resume_collection_cloner_after_recipient_failover_with_dropped_views.js deleted file mode 100644 index 262e40edf0040..0000000000000 --- a/jstests/replsets/tenant_migration_resume_collection_cloner_after_recipient_failover_with_dropped_views.js +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Tests that in tenant migration, the collection recreated on a dropped view namespace is handled - * correctly on resuming the logical tenant collection cloning phase due to recipient failover. - * @tags: [ - * incompatible_with_macos, - * incompatible_with_shard_merge, - * incompatible_with_windows_tls, - * requires_majority_read_concern, - * requires_persistence, - * serverless, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; - -load("jstests/libs/fail_point_util.js"); -load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' - -const tenantMigrationFailoverTest = function(isTimeSeries, createCollFn) { - const recipientRst = new ReplSetTest({ - nodes: 2, - name: jsTestName() + "_recipient", - serverless: true, - nodeOptions: Object.assign(makeX509OptionsForTest().recipient, { - setParameter: { - // Allow reads on recipient before migration completes for testing. - 'failpoint.tenantMigrationRecipientNotRejectReads': tojson({mode: 'alwaysOn'}), - } - }) - }); - - recipientRst.startSet(); - recipientRst.initiate(); - - const tenantMigrationTest = - new TenantMigrationTest({name: jsTestName(), recipientRst: recipientRst}); - - const donorRst = tenantMigrationTest.getDonorRst(); - const donorPrimary = donorRst.getPrimary(); - - const tenantId = ObjectId().str; - const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); - const donorDB = donorPrimary.getDB(dbName); - const collName = "testColl"; - const donorColl = donorDB[collName]; - - let getCollectionInfo = function(conn) { - return conn.getDB(dbName).getCollectionInfos().filter(coll => { - return coll.name === collName; - }); - }; - - // Create a timeseries collection or a regular view. - assert.commandWorked(createCollFn(donorDB, collName)); - donorRst.awaitReplication(); - - const migrationId = UUID(); - const migrationIdString = extractUUIDFromObject(migrationId); - const migrationOpts = { - migrationIdString: migrationIdString, - recipientConnString: tenantMigrationTest.getRecipientConnString(), - tenantId, - }; - - const recipientPrimary = recipientRst.getPrimary(); - const recipientDb = recipientPrimary.getDB(dbName); - const recipientSystemViewsColl = recipientDb.getCollection("system.views"); - - // Configure a fail point to have the recipient primary hang after cloning - // "_testDB.system.views" collection. - const hangDuringCollectionClone = - configureFailPoint(recipientPrimary, - "tenantMigrationHangCollectionClonerAfterHandlingBatchResponse", - {nss: recipientSystemViewsColl.getFullName()}); - - // Start the migration and wait for the migration to hang after cloning - // "_testDB.system.views" collection. - assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); - hangDuringCollectionClone.wait(); - - assert.soon(() => recipientSystemViewsColl.find().itcount() >= 1); - recipientRst.awaitLastOpCommitted(); - const newRecipientPrimary = recipientRst.getSecondaries()[0]; - - // Verify that a view has been registered for "_testDB.testColl" on the new - // recipient primary. - let collectionInfo = getCollectionInfo(newRecipientPrimary); - assert.eq(1, collectionInfo.length); - assert(collectionInfo[0].type === (isTimeSeries ? "timeseries" : "view"), - "data store type mismatch: " + tojson(collectionInfo[0])); - - // Drop the view and create a regular collection with the same namespace as the - // dropped view on donor. - assert(donorColl.drop()); - assert.commandWorked(donorDB.createCollection(collName)); - - // We need to skip TenantDatabaseCloner::listExistingCollectionsStage() to make sure - // the recipient always clone the above newly created regular collection after the failover. - // Currently, we restart cloning after a failover, only from the collection whose UUID is - // greater than or equal to the last collection we have on disk. - const skiplistExistingCollectionsStage = - configureFailPoint(newRecipientPrimary, "skiplistExistingCollectionsStage"); - - // Step up a new node in the recipient set and trigger a failover. - recipientRst.stepUp(newRecipientPrimary); - hangDuringCollectionClone.off(); - - // The migration should go through after recipient failover. - TenantMigrationTest.assertCommitted( - tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); - - // Check that recipient has dropped the view and and re-created the regular collection as part - // of migration oplog catchup phase. - collectionInfo = getCollectionInfo(newRecipientPrimary); - assert.eq(1, collectionInfo.length); - assert(collectionInfo[0].type === "collection", - "data store type mismatch: " + tojson(collectionInfo[0])); - - tenantMigrationTest.stop(); - recipientRst.stopSet(); -}; - -jsTestLog("Running tenant migration test for time-series collection"); -// Creating a timeseries collection, implicity creates a view on the 'collName' collection -// namespace. -tenantMigrationFailoverTest(true, - (db, collName) => db.createCollection( - collName, {timeseries: {timeField: "time", metaField: "bucket"}})); - -jsTestLog("Running tenant migration test for regular view"); -tenantMigrationFailoverTest(false, - (db, collName) => db.createView(collName, "sourceCollection", [])); diff --git a/jstests/replsets/tenant_migration_resume_collection_cloner_after_rename.js b/jstests/replsets/tenant_migration_resume_collection_cloner_after_rename.js deleted file mode 100644 index 0919240cf249a..0000000000000 --- a/jstests/replsets/tenant_migration_resume_collection_cloner_after_rename.js +++ /dev/null @@ -1,124 +0,0 @@ -/** - * Tests that in tenant migration, the recipient set can resume collection cloning from the last - * document cloned after a failover even if the collection has been renamed on the donor. - * @tags: [ - * incompatible_with_macos, - * incompatible_with_shard_merge, - * incompatible_with_windows_tls, - * requires_majority_read_concern, - * requires_persistence, - * serverless, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - checkTenantDBHashes, - makeX509OptionsForTest, - runMigrationAsync -} from "jstests/replsets/libs/tenant_migration_util.js"; - -load("jstests/libs/fail_point_util.js"); -load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' -load("jstests/libs/parallelTester.js"); // for 'Thread' -load('jstests/replsets/rslib.js'); // 'createRstArgs' - -const recipientRst = new ReplSetTest({ - nodes: 2, - name: jsTestName() + "_recipient", - serverless: true, - nodeOptions: Object.assign(makeX509OptionsForTest().recipient, { - setParameter: { - // Use a batch size of 2 so that collection cloner requires more than a single batch to - // complete. - collectionClonerBatchSize: 2, - // Allow reads on recipient before migration completes for testing. - 'failpoint.tenantMigrationRecipientNotRejectReads': tojson({mode: 'alwaysOn'}), - } - }) -}); - -recipientRst.startSet(); -recipientRst.initiate(); - -const tenantMigrationTest = - new TenantMigrationTest({name: jsTestName(), recipientRst: recipientRst}); -const tenantId = ObjectId().str; -const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); -const collName = "testColl"; - -const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); -const donorPrimary = tenantMigrationTest.getDonorPrimary(); - -// Test _id with mixed bson types. -const docs = [{_id: 0}, {_id: "string"}, {_id: UUID()}, {_id: new Date()}]; -tenantMigrationTest.insertDonorDB(dbName, collName, docs); - -const migrationId = UUID(); -const migrationIdString = extractUUIDFromObject(migrationId); -const migrationOpts = { - migrationIdString: migrationIdString, - recipientConnString: tenantMigrationTest.getRecipientConnString(), - tenantId, -}; - -// Configure a fail point to have the recipient primary hang after cloning 2 documents. -const recipientDb = recipientPrimary.getDB(dbName); -let recipientColl = recipientDb.getCollection(collName); -const hangDuringCollectionClone = - configureFailPoint(recipientDb, - "tenantMigrationHangCollectionClonerAfterHandlingBatchResponse", - {nss: recipientColl.getFullName()}); - -// Start a migration and wait for recipient to hang after cloning 2 documents. -const donorRstArgs = createRstArgs(tenantMigrationTest.getDonorRst()); -const migrationThread = new Thread(runMigrationAsync, migrationOpts, donorRstArgs); -migrationThread.start(); -hangDuringCollectionClone.wait(); -assert.soon(() => recipientColl.find().itcount() === 2); - -// Insert some documents that will be fetched by the recipient. This is to test that on failover, -// the fetcher will resume fetching from where it left off. The system is expected to crash if -// the recipient fetches a duplicate oplog entry upon resuming the migration. -tenantMigrationTest.insertDonorDB(dbName, "aNewColl", [{_id: "docToBeFetched"}]); -assert.soon(() => { - const configDb = recipientPrimary.getDB("config"); - const oplogBuffer = configDb.getCollection("repl.migration.oplog_" + migrationIdString); - return oplogBuffer.find({"entry.o._id": "docToBeFetched"}).count() === 1; -}); - -recipientRst.awaitLastOpCommitted(); - -// Set a failpoint to prevent the new recipient primary from completing the migration before the -// donor renames the collection. -const newRecipientPrimary = recipientRst.getSecondaries()[0]; -const fpPauseAtStartOfMigration = - configureFailPoint(newRecipientPrimary, "pauseAfterRunTenantMigrationRecipientInstance"); - -// Step up a new node in the recipient set and trigger a failover. The new primary should resume -// cloning starting from the third document. -recipientRst.stepUp(newRecipientPrimary); -hangDuringCollectionClone.off(); -recipientRst.getPrimary(); - -// Rename the collection on the donor. -const donorColl = donorPrimary.getDB(dbName).getCollection(collName); -const collNameRenamed = collName + "_renamed"; -assert.commandWorked(donorColl.renameCollection(collNameRenamed)); - -// The migration should go through after recipient failover. -fpPauseAtStartOfMigration.off(); -TenantMigrationTest.assertCommitted(migrationThread.returnData()); - -// Check that recipient has cloned all documents in the renamed collection. -recipientColl = newRecipientPrimary.getDB(dbName).getCollection(collNameRenamed); -assert.eq(4, recipientColl.find().itcount()); -assert.eq(recipientColl.find().sort({_id: 1}).toArray(), docs); -checkTenantDBHashes({ - donorRst: tenantMigrationTest.getDonorRst(), - recipientRst: tenantMigrationTest.getRecipientRst(), - tenantId -}); - -tenantMigrationTest.stop(); -recipientRst.stopSet(); diff --git a/jstests/replsets/tenant_migration_resume_oplog_application.js b/jstests/replsets/tenant_migration_resume_oplog_application.js deleted file mode 100644 index 70849750914b0..0000000000000 --- a/jstests/replsets/tenant_migration_resume_oplog_application.js +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Tests that in a tenant migration, the recipient primary will resume oplog application on - * failover. - * @tags: [ - * incompatible_with_macos, - * incompatible_with_shard_merge, - * incompatible_with_windows_tls, - * requires_majority_read_concern, - * requires_persistence, - * serverless, - * ] - */ - -import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - checkTenantDBHashes, - makeX509OptionsForTest, - runMigrationAsync, -} from "jstests/replsets/libs/tenant_migration_util.js"; - -load("jstests/libs/fail_point_util.js"); -load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' -load("jstests/libs/parallelTester.js"); // for 'Thread' -load("jstests/libs/write_concern_util.js"); // for 'stopReplicationOnSecondaries' -load("jstests/aggregation/extras/utils.js"); // For assertArrayEq. -load('jstests/replsets/rslib.js'); // For 'createRstArgs' - -const recipientRst = new ReplSetTest({ - nodes: 3, - name: jsTestName() + "_recipient", - serverless: true, - // Use a batch size of 2 so that we can hang in the middle of tenant oplog application. - nodeOptions: Object.assign(makeX509OptionsForTest().recipient, - {setParameter: {tenantApplierBatchSizeOps: 2}}) -}); - -recipientRst.startSet(); -recipientRst.initiate(); - -const tenantMigrationTest = - new TenantMigrationTest({name: jsTestName(), recipientRst: recipientRst}); - -const tenantId = ObjectId().str; -const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); -const collName = "testColl"; - -const donorPrimary = tenantMigrationTest.getDonorPrimary(); -const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); -const donorRst = tenantMigrationTest.getDonorRst(); -const donorTestColl = donorPrimary.getDB(dbName).getCollection(collName); - -// Populate the donor replica set with some initial data and make sure it is majority committed. -const majorityCommittedDocs = [{_id: 0, x: 0}, {_id: 1, x: 1}]; -assert.commandWorked(donorTestColl.insert(majorityCommittedDocs, {writeConcern: {w: "majority"}})); -assert.eq(2, donorTestColl.find().readConcern("majority").itcount()); - -const migrationId = UUID(); -const migrationOpts = { - migrationIdString: extractUUIDFromObject(migrationId), - recipientConnString: tenantMigrationTest.getRecipientConnString(), - tenantId, -}; - -// Configure fail point to have the recipient primary hang after the cloner completes and the oplog -// applier has started. -let waitAfterDatabaseClone = configureFailPoint( - recipientPrimary, "fpAfterStartingOplogApplierMigrationRecipientInstance", {action: "hang"}); -// Configure fail point to hang the tenant oplog applier after it applies the first batch. -let waitInOplogApplier = configureFailPoint(recipientPrimary, "hangInTenantOplogApplication"); - -// Start a migration and wait for recipient to hang in the tenant database cloner. -const donorRstArgs = createRstArgs(donorRst); -const migrationThread = new Thread(runMigrationAsync, migrationOpts, donorRstArgs); -migrationThread.start(); -waitAfterDatabaseClone.wait(); - -// Insert some writes that will eventually be picked up by the tenant oplog applier on the -// recipient. -const docsToApply = [{_id: 2, x: 2}, {_id: 3, x: 3}, {_id: 4, x: 4}]; -tenantMigrationTest.insertDonorDB(dbName, collName, docsToApply); - -// Wait for the applied oplog batch to be replicated. -waitInOplogApplier.wait(); -recipientRst.awaitReplication(); -let local = recipientPrimary.getDB("local"); -let appliedNoOps = local.oplog.rs.find({fromTenantMigration: migrationId, op: "n"}); -let resultsArr = appliedNoOps.toArray(); -// It is possible that the first batch applied includes a resume no-op token. We do not write no-op -// entries for resume token entries in tenant migrations. -assert.gt(appliedNoOps.count(), 0, resultsArr); -assert.lte(appliedNoOps.count(), 2, resultsArr); -assert.eq(docsToApply[0], resultsArr[0].o2.o, resultsArr); -if (appliedNoOps.count() === 2) { - assert.eq(docsToApply[1], resultsArr[1].o2.o, resultsArr); -} -// Step up a new node in the recipient set and trigger a failover. The new primary should resume -// fetching starting from the unapplied documents. -const newRecipientPrimary = recipientRst.getSecondaries()[0]; -recipientRst.stepUp(newRecipientPrimary); -waitAfterDatabaseClone.off(); -waitInOplogApplier.off(); -recipientRst.getPrimary(); - -// The migration should go through after recipient failover. -TenantMigrationTest.assertCommitted(migrationThread.returnData()); -// Validate that the last no-op entry is applied. -local = newRecipientPrimary.getDB("local"); -appliedNoOps = local.oplog.rs.find({fromTenantMigration: migrationId, op: "n"}); -resultsArr = appliedNoOps.toArray(); -assert.eq(3, appliedNoOps.count(), appliedNoOps); -assert.eq(docsToApply[2], resultsArr[2].o2.o, resultsArr); - -checkTenantDBHashes({ - donorRst: tenantMigrationTest.getDonorRst(), - recipientRst: tenantMigrationTest.getRecipientRst(), - tenantId -}); -tenantMigrationTest.stop(); -recipientRst.stopSet(); diff --git a/jstests/replsets/tenant_migration_retry_session_migration.js b/jstests/replsets/tenant_migration_retry_session_migration.js index e53c3c6b54b42..491b57350b944 100644 --- a/jstests/replsets/tenant_migration_retry_session_migration.js +++ b/jstests/replsets/tenant_migration_retry_session_migration.js @@ -2,12 +2,8 @@ * Tests that retrying a failed tenant migration works even if the config.transactions on the * recipient is not cleaned up after the failed migration. * - * TODO SERVER-61231: aborts migration after sending recipientSyncData and starting - * cloning on recipient, adapt this test to handle file cleanup on recipient. - * * @tags: [ * incompatible_with_macos, - * incompatible_with_shard_merge, * incompatible_with_windows_tls, * requires_majority_read_concern, * requires_persistence, @@ -16,7 +12,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {checkTenantDBHashes} from "jstests/replsets/libs/tenant_migration_util.js"; +import {checkTenantDBHashes, makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/replsets/rslib.js"); load("jstests/libs/uuid_util.js"); @@ -25,7 +21,7 @@ const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), quickGarbageCollection: true}); const kTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDB"); +const kDbName = makeTenantDB(kTenantId, "testDB"); const kCollName = "testColl"; const donorPrimary = tenantMigrationTest.getDonorPrimary(); @@ -35,6 +31,9 @@ tenantMigrationTest.insertDonorDB(kDbName, kCollName, [{_id: 1}, {_id: 2}]); let waitBeforeFetchingTransactions = configureFailPoint(recipientPrimary, "fpBeforeFetchingCommittedTransactions", {action: "hang"}); +// Prevent donor from blocking writes before writing the transactions (necessary for shard merge). +let pauseDonorBeforeBlocking = + configureFailPoint(donorPrimary, "pauseTenantMigrationBeforeLeavingDataSyncState"); const migrationId = UUID(); const migrationOpts = { @@ -75,6 +74,7 @@ for (const lsid of [lsid1, lsid2]) { lsid: lsid })); } +pauseDonorBeforeBlocking.off(); // Abort the first migration. const abortFp = configureFailPoint(donorPrimary, "abortTenantMigrationBeforeLeavingBlockingState"); @@ -91,6 +91,8 @@ assert.commandWorked(recipientPrimary.getDB(kDbName).dropDatabase()); waitBeforeFetchingTransactions = configureFailPoint(recipientPrimary, "fpBeforeFetchingCommittedTransactions", {action: "hang"}); +pauseDonorBeforeBlocking = + configureFailPoint(donorPrimary, "pauseTenantMigrationBeforeLeavingDataSyncState"); // Retry the migration. tenantMigrationTest.startMigration(migrationOpts); @@ -115,6 +117,7 @@ assert.commandWorked(donorPrimary.getDB(kDbName).runCommand({ })); waitBeforeFetchingTransactions.off(); +pauseDonorBeforeBlocking.off(); TenantMigrationTest.assertCommitted(tenantMigrationTest.waitForMigrationToComplete(migrationOpts)); diff --git a/jstests/replsets/tenant_migration_retryable_internal_transaction.js b/jstests/replsets/tenant_migration_retryable_internal_transaction.js new file mode 100644 index 0000000000000..54187ad8ca8bb --- /dev/null +++ b/jstests/replsets/tenant_migration_retryable_internal_transaction.js @@ -0,0 +1,82 @@ +/** + * Tests that tenant migration and shard merge fails upon observing retryable internal transaction + * writes. + * + * @tags: [ + * incompatible_with_macos, + * incompatible_with_windows_tls, + * requires_majority_read_concern, + * requires_persistence, + * serverless, + * ] + */ + +import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; +load("jstests/libs/uuid_util.js"); // For extractUUIDFromObject(). +load("jstests/libs/fail_point_util.js"); // For configureFailPoint(). + +const tenantMigrationTest = new TenantMigrationTest( + {name: jsTestName(), quickGarbageCollection: true, sharedOptions: {nodes: 1}}); + +const kMigrationId = UUID(); +const kTenantId = ObjectId().str; +const kDbName = makeTenantDB(kTenantId, "testDb"); +const kCollName = "testColl"; +const migrationOpts = { + migrationIdString: extractUUIDFromObject(kMigrationId), + tenantId: kTenantId, +}; + +const donorPrimary = tenantMigrationTest.getDonorPrimary(); +const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); + +const donorPrimaryColl = donorPrimary.getDB(kDbName)[kCollName]; +assert.commandWorked( + donorPrimaryColl.insert({_id: 0, count: 1}, {"writeConcern": {"w": "majority"}})); + +jsTestLog("Testing retryable internal transactions started after migration start."); + +const fpBeforeMarkingCloneSuccess = + configureFailPoint(recipientPrimary, "fpBeforeMarkingCloneSuccess", {action: "hang"}); + +assert.commandWorked(tenantMigrationTest.startMigration(migrationOpts)); + +fpBeforeMarkingCloneSuccess.wait(); + +// Start a retryable internal transaction write. +assert.commandWorked(donorPrimary.getDB("admin").runCommand({ + testInternalTransactions: 1, + commandInfos: [ + { + dbName: kDbName, + command: { + findAndModify: kCollName, + query: {_id: 0}, + update: {$inc: {count: 1}}, + stmtId: NumberInt(0), + }, + }, + + ], + txnNumber: NumberLong(0), + lsid: {id: UUID()}, +})); + +fpBeforeMarkingCloneSuccess.off(); + +TenantMigrationTest.assertAborted( + tenantMigrationTest.waitForMigrationToComplete( + migrationOpts, false /* retryOnRetryableErrors */, true /* forgetMigration */), + ErrorCodes.RetryableInternalTransactionNotSupported); +tenantMigrationTest.waitForMigrationGarbageCollection(migrationOpts.migrationIdString); + +// Drop the tenant database on recipient before retrying migration. +assert.commandWorked(recipientPrimary.getDB(kDbName).dropDatabase()); + +jsTestLog("Testing retryable internal transactions completed before migration start."); + +TenantMigrationTest.assertAborted(tenantMigrationTest.runMigration( + migrationOpts, ErrorCodes.RetryableInternalTransactionNotSupported)); + +tenantMigrationTest.stop(); diff --git a/jstests/replsets/tenant_migration_retryable_write_retry.js b/jstests/replsets/tenant_migration_retryable_write_retry.js index f127d36bd81ab..c757004614448 100644 --- a/jstests/replsets/tenant_migration_retryable_write_retry.js +++ b/jstests/replsets/tenant_migration_retryable_write_retry.js @@ -16,9 +16,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import { - makeX509OptionsForTest, -} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); // For configureFailPoint(). load("jstests/libs/uuid_util.js"); // For extractUUIDFromObject(). @@ -48,7 +46,7 @@ recipientRst.initiate(); const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), donorRst, recipientRst}); const kTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDb"); +const kDbName = makeTenantDB(kTenantId, "testDb"); const kCollName = "testColl"; const kNs = `${kDbName}.${kCollName}`; diff --git a/jstests/replsets/tenant_migration_retryable_write_retry_on_recipient.js b/jstests/replsets/tenant_migration_retryable_write_retry_on_recipient.js index 245673daa6204..f861cab55e1c2 100644 --- a/jstests/replsets/tenant_migration_retryable_write_retry_on_recipient.js +++ b/jstests/replsets/tenant_migration_retryable_write_retry_on_recipient.js @@ -14,6 +14,7 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import { isShardMergeEnabled, + makeTenantDB, runMigrationAsync } from "jstests/replsets/libs/tenant_migration_util.js"; @@ -25,7 +26,7 @@ load("jstests/replsets/rslib.js"); // 'createRstArgs' const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const kTenantId = ObjectId().str; -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDb"); +const kDbName = makeTenantDB(kTenantId, "testDb"); const kCollNameBefore = "testCollBefore"; const kCollNameDuring = "testCollDuring"; @@ -186,6 +187,7 @@ assert.commandWorked( jsTest.log("Waiting for migration to complete"); waitBeforeFetchingTransactions.off(); TenantMigrationTest.assertCommitted(migrationThread.returnData()); +tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString); // Print the no-op oplog entries for debugging purposes. jsTestLog("Recipient oplog migration entries."); @@ -262,6 +264,7 @@ function testRecipientRetryableWrites(db, writes) { jsTestLog("Run retryable write on primary after the migration"); testRecipientRetryableWrites(recipientDb, beforeWrites); testRecipientRetryableWrites(recipientDb, duringWrites); + jsTestLog("Step up secondary"); const recipientRst = tenantMigrationTest.getRecipientRst(); recipientRst.stepUp(recipientRst.getSecondary()); @@ -269,8 +272,6 @@ jsTestLog("Run retryable write on secondary after the migration"); testRecipientRetryableWrites(recipientRst.getPrimary().getDB(kDbName), beforeWrites); testRecipientRetryableWrites(recipientRst.getPrimary().getDB(kDbName), duringWrites); -tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString); - jsTestLog("Trying a back-to-back migration"); const tenantMigrationTest2 = new TenantMigrationTest( {name: jsTestName() + "2", donorRst: tenantMigrationTest.getRecipientRst()}); diff --git a/jstests/replsets/tenant_migration_shard_merge_conflicting_recipient_sync_data_cmds.js b/jstests/replsets/tenant_migration_shard_merge_conflicting_recipient_sync_data_cmds.js index 2a5a88e10264d..6a866d608b57c 100644 --- a/jstests/replsets/tenant_migration_shard_merge_conflicting_recipient_sync_data_cmds.js +++ b/jstests/replsets/tenant_migration_shard_merge_conflicting_recipient_sync_data_cmds.js @@ -15,6 +15,7 @@ import { getCertificateAndPrivateKey, isShardMergeEnabled, + kProtocolShardMerge, makeX509OptionsForTest, } from "jstests/replsets/libs/tenant_migration_util.js"; @@ -51,13 +52,15 @@ TestData.stopFailPointErrorCode = 4880402; /** * Runs recipientSyncData on the given host and returns the response. */ -function runRecipientSyncDataCmd(primaryHost, { - migrationIdString, - tenantIds, - donorConnectionString, - readPreference, - recipientCertificateForDonor -}) { +function runRecipientSyncDataCmd(primaryHost, + { + migrationIdString, + tenantIds, + donorConnectionString, + readPreference, + recipientCertificateForDonor + }, + protocol) { jsTestLog("Starting a recipientSyncDataCmd for migrationId: " + migrationIdString + " tenantIds: '" + tenantIds + "'"); const primary = new Mongo(primaryHost); @@ -66,7 +69,7 @@ function runRecipientSyncDataCmd(primaryHost, { migrationId: UUID(migrationIdString), donorConnectionString: donorConnectionString, tenantIds: eval(tenantIds), - protocol: "shard merge", + protocol, readPreference: readPreference, startMigrationDonorTimestamp: Timestamp(1, 1), recipientCertificateForDonor: recipientCertificateForDonor @@ -113,9 +116,9 @@ function testConcurrentConflictingMigration(migrationOpts0, migrationOpts1) { // Start the conflicting recipientSyncData cmds. const recipientSyncDataThread0 = - new Thread(runRecipientSyncDataCmd, primary.host, migrationOpts0); + new Thread(runRecipientSyncDataCmd, primary.host, migrationOpts0, kProtocolShardMerge); const recipientSyncDataThread1 = - new Thread(runRecipientSyncDataCmd, primary.host, migrationOpts1); + new Thread(runRecipientSyncDataCmd, primary.host, migrationOpts1, kProtocolShardMerge); recipientSyncDataThread0.start(); recipientSyncDataThread1.start(); diff --git a/jstests/replsets/tenant_migration_shard_merge_import_write_conflict_retry.js b/jstests/replsets/tenant_migration_shard_merge_import_write_conflict_retry.js index 9de3bfebe903f..eb7048ef3c2bb 100644 --- a/jstests/replsets/tenant_migration_shard_merge_import_write_conflict_retry.js +++ b/jstests/replsets/tenant_migration_shard_merge_import_write_conflict_retry.js @@ -66,8 +66,7 @@ const migrationOpts = { migrationIdString: extractUUIDFromObject(migrationId), tenantIds: [tenantId] }; -TenantMigrationTest.assertCommitted( - tenantMigrationTest.runMigration(migrationOpts, {enableDonorStartMigrationFsync: true})); +TenantMigrationTest.assertCommitted(tenantMigrationTest.runMigration(migrationOpts)); tenantMigrationTest.getRecipientRst().nodes.forEach(node => { for (let collectionName of ["myCollection", "myCappedCollection"]) { diff --git a/jstests/replsets/tenant_migration_shard_merge_invalid_inputs.js b/jstests/replsets/tenant_migration_shard_merge_invalid_inputs.js index 81bae99e49488..dbf69f06cc3a0 100644 --- a/jstests/replsets/tenant_migration_shard_merge_invalid_inputs.js +++ b/jstests/replsets/tenant_migration_shard_merge_invalid_inputs.js @@ -17,6 +17,7 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import { isShardMergeEnabled, + kProtocolShardMerge, makeMigrationCertificatesForTest, } from "jstests/replsets/libs/tenant_migration_util.js"; @@ -51,7 +52,7 @@ unsupportedtenantIds.forEach((invalidTenantId) => { const cmd = { donorStartMigration: 1, migrationId: UUID(), - protocol: 'shard merge', + protocol: kProtocolShardMerge, tenantId: invalidTenantId, recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), readPreference, @@ -66,7 +67,7 @@ unsupportedtenantIds.forEach((invalidTenantId) => { assert.commandFailedWithCode(donorPrimary.adminCommand({ donorStartMigration: 1, migrationId: UUID(), - protocol: 'shard merge', + protocol: kProtocolShardMerge, recipientConnectionString: tenantMigrationTest.getDonorRst().getURL(), readPreference, donorCertificateForRecipient: migrationCertificates.donorCertificateForRecipient, @@ -78,7 +79,7 @@ assert.commandFailedWithCode(donorPrimary.adminCommand({ assert.commandFailedWithCode(donorPrimary.adminCommand({ donorStartMigration: 1, migrationId: UUID(), - protocol: 'shard merge', + protocol: kProtocolShardMerge, recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL() + "," + donorPrimary.host, readPreference, @@ -91,7 +92,7 @@ assert.commandFailedWithCode(donorPrimary.adminCommand({ assert.commandFailedWithCode(donorPrimary.adminCommand({ donorStartMigration: 1, migrationId: UUID(), - protocol: 'shard merge', + protocol: kProtocolShardMerge, recipientConnectionString: recipientPrimary.host, readPreference, donorCertificateForRecipient: migrationCertificates.donorCertificateForRecipient, @@ -109,7 +110,7 @@ unsupportedtenantIds.forEach((invalidTenantId) => { donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), tenantId: invalidTenantId, tenantIds: [ObjectId()], - protocol: 'shard merge', + protocol: kProtocolShardMerge, startMigrationDonorTimestamp: Timestamp(1, 1), readPreference, recipientCertificateForDonor: migrationCertificates.recipientCertificateForDonor, @@ -121,7 +122,7 @@ unsupportedtenantIds.forEach((invalidTenantId) => { assert.commandFailedWithCode(recipientPrimary.adminCommand({ recipientSyncData: 1, migrationId: UUID(), - protocol: 'shard merge', + protocol: kProtocolShardMerge, tenantIds: [ObjectId()], donorConnectionString: tenantMigrationTest.getRecipientRst().getURL(), startMigrationDonorTimestamp: Timestamp(1, 1), @@ -134,7 +135,7 @@ assert.commandFailedWithCode(recipientPrimary.adminCommand({ assert.commandFailedWithCode(recipientPrimary.adminCommand({ recipientSyncData: 1, migrationId: UUID(), - protocol: 'shard merge', + protocol: kProtocolShardMerge, tenantIds: [ObjectId()], donorConnectionString: `${tenantMigrationTest.getDonorRst().getURL()},${recipientPrimary.host}`, startMigrationDonorTimestamp: Timestamp(1, 1), @@ -147,7 +148,7 @@ assert.commandFailedWithCode(recipientPrimary.adminCommand({ assert.commandFailedWithCode(recipientPrimary.adminCommand({ recipientSyncData: 1, migrationId: UUID(), - protocol: 'shard merge', + protocol: kProtocolShardMerge, tenantIds: [ObjectId()], donorConnectionString: recipientPrimary.host, startMigrationDonorTimestamp: Timestamp(1, 1), @@ -162,7 +163,7 @@ nullTimestamps.forEach((nullTs) => { assert.commandFailedWithCode(donorPrimary.adminCommand({ recipientSyncData: 1, migrationId: UUID(), - protocol: 'shard merge', + protocol: kProtocolShardMerge, tenantIds: [ObjectId()], donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), startMigrationDonorTimestamp: Timestamp(1, 1), @@ -177,7 +178,7 @@ nullTimestamps.forEach((nullTs) => { assert.commandFailedWithCode(recipientPrimary.adminCommand({ recipientSyncData: 1, migrationId: UUID(), - protocol: 'shard merge', + protocol: kProtocolShardMerge, donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), startMigrationDonorTimestamp: Timestamp(1, 1), readPreference, @@ -189,7 +190,7 @@ assert.commandFailedWithCode(recipientPrimary.adminCommand({ assert.commandFailedWithCode(recipientPrimary.adminCommand({ recipientSyncData: 1, migrationId: UUID(), - protocol: 'shard merge', + protocol: kProtocolShardMerge, tenantIds: [], donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), startMigrationDonorTimestamp: Timestamp(1, 1), @@ -201,7 +202,7 @@ assert.commandFailedWithCode(recipientPrimary.adminCommand({ // The decision field must be set for recipientForgetMigration with shard merge assert.commandFailedWithCode(recipientPrimary.adminCommand({ recipientForgetMigration: 1, - protocol: "shard merge", + protocol: kProtocolShardMerge, migrationId: UUID(), tenantIds: [ObjectId()], donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), diff --git a/jstests/replsets/tenant_migration_shard_merge_recipient_access_blocker_rollback.js b/jstests/replsets/tenant_migration_shard_merge_recipient_access_blocker_rollback.js index 6da482857ebbf..2b74b7ad9eb7e 100644 --- a/jstests/replsets/tenant_migration_shard_merge_recipient_access_blocker_rollback.js +++ b/jstests/replsets/tenant_migration_shard_merge_recipient_access_blocker_rollback.js @@ -16,6 +16,8 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.j import { getCertificateAndPrivateKey, isShardMergeEnabled, + kProtocolShardMerge, + makeTenantDB, makeX509OptionsForTest } from "jstests/replsets/libs/tenant_migration_util.js"; @@ -64,12 +66,12 @@ function runRollbackAfterMigrationCommitted() { const migrationOpts = { migrationIdString: extractUUIDFromObject(kMigrationId), tenantIds: [kTenantId], - protocol: "shard merge", + protocol: kProtocolShardMerge, readPreference: kReadPreference }; // Populate the donor side with data. - const dbName = tenantMigrationTest.tenantDB(kTenantId.str, "testDB"); + const dbName = makeTenantDB(kTenantId.str, "testDB"); const collName = "testColl"; const numDocs = 20; tenantMigrationTest.insertDonorDB( @@ -138,7 +140,7 @@ function runRollbackAfterLoneRecipientForgetMigrationCommand() { const recipientCertificateForDonor = getCertificateAndPrivateKey("jstests/libs/tenant_migration_recipient.pem"); - const dbName = tenantMigrationTest.tenantDB(kTenantId.str, "testDB"); + const dbName = makeTenantDB(kTenantId.str, "testDB"); const collName = "testColl"; const originalPrimary = recipientRst.getPrimary(); @@ -156,20 +158,22 @@ function runRollbackAfterLoneRecipientForgetMigrationCommand() { const fpNewPrimary = configureFailPoint(newPrimary, "pauseBeforeRunTenantMigrationRecipientInstance"); - function runRecipientForgetMigration(host, { - migrationIdString, - donorConnectionString, - tenantIds, - readPreference, - recipientCertificateForDonor - }) { + function runRecipientForgetMigration(host, + { + migrationIdString, + donorConnectionString, + tenantIds, + readPreference, + recipientCertificateForDonor + }, + protocol) { const db = new Mongo(host); return db.adminCommand({ recipientForgetMigration: 1, migrationId: UUID(migrationIdString), donorConnectionString, tenantIds: eval(tenantIds), - protocol: "shard merge", + protocol, decision: "committed", readPreference, recipientCertificateForDonor @@ -177,13 +181,16 @@ function runRollbackAfterLoneRecipientForgetMigrationCommand() { } const recipientForgetMigrationThread = - new Thread(runRecipientForgetMigration, originalPrimary.host, { - migrationIdString: extractUUIDFromObject(kMigrationId), - donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), - tenantIds: tojson([kTenantId]), - readPreference: kReadPreference, - recipientCertificateForDonor - }); + new Thread(runRecipientForgetMigration, + originalPrimary.host, + { + migrationIdString: extractUUIDFromObject(kMigrationId), + donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), + tenantIds: tojson([kTenantId]), + readPreference: kReadPreference, + recipientCertificateForDonor + }, + kProtocolShardMerge); // Run a delayed/retried recipientForgetMigration command after the state doc has been deleted. recipientForgetMigrationThread.start(); diff --git a/jstests/replsets/tenant_migration_shard_merge_recipient_current_op.js b/jstests/replsets/tenant_migration_shard_merge_recipient_current_op.js index 5d7bcc78929a6..0bb94bc0fe284 100644 --- a/jstests/replsets/tenant_migration_shard_merge_recipient_current_op.js +++ b/jstests/replsets/tenant_migration_shard_merge_recipient_current_op.js @@ -18,6 +18,7 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.j import { forgetMigrationAsync, isShardMergeEnabled, + makeTenantDB } from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/uuid_util.js"); // For extractUUIDFromObject(). @@ -55,7 +56,7 @@ const dbsToClone = ["db0", "db1", "db2"]; const collsToClone = ["coll0", "coll1"]; const docs = [...Array(10).keys()].map((i) => ({x: i})); for (const db of dbsToClone) { - const tenantDB = tenantMigrationTest.tenantDB(kTenantId, db); + const tenantDB = makeTenantDB(kTenantId, db); for (const coll of collsToClone) { tenantMigrationTest.insertDonorDB(tenantDB, coll, docs); } diff --git a/jstests/replsets/tenant_migration_shard_merge_recipient_retry_forget_migration.js b/jstests/replsets/tenant_migration_shard_merge_recipient_retry_forget_migration.js index 1dcbacef5b8d4..6ddb4baa56e54 100644 --- a/jstests/replsets/tenant_migration_shard_merge_recipient_retry_forget_migration.js +++ b/jstests/replsets/tenant_migration_shard_merge_recipient_retry_forget_migration.js @@ -15,7 +15,9 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import { getCertificateAndPrivateKey, - isShardMergeEnabled + isShardMergeEnabled, + kProtocolShardMerge, + makeTenantDB } from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); // For configureFailPoint(). @@ -41,27 +43,24 @@ const tenantId = ObjectId(); const recipientCertificateForDonor = getCertificateAndPrivateKey("jstests/libs/tenant_migration_recipient.pem"); -const dbName = tenantMigrationTest.tenantDB(tenantId.str, "test"); +const dbName = makeTenantDB(tenantId.str, "test"); const collName = "coll"; // Not doing a migration before writing to the recipient to mimic that a migration has completed and // the state doc has been garbage collected. assert.commandWorked(recipientPrimary.getDB(dbName)[collName].insert({_id: 1})); -function runRecipientForgetMigration(host, { - migrationIdString, - donorConnectionString, - tenantIds, - readPreference, - recipientCertificateForDonor -}) { +function runRecipientForgetMigration( + host, + {migrationIdString, donorConnectionString, tenantIds, recipientCertificateForDonor}, + protocol) { const db = new Mongo(host); return db.adminCommand({ recipientForgetMigration: 1, migrationId: UUID(migrationIdString), donorConnectionString, tenantIds: eval(tenantIds), - protocol: "shard merge", + protocol, decision: "committed", readPreference: {mode: "primary"}, recipientCertificateForDonor @@ -72,12 +71,15 @@ const fp = configureFailPoint( recipientPrimary, "fpBeforeMarkingStateDocAsGarbageCollectable", {action: "hang"}); const recipientForgetMigrationThread = - new Thread(runRecipientForgetMigration, recipientPrimary.host, { - migrationIdString: extractUUIDFromObject(migrationId), - donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), - tenantIds: tojson([tenantId]), - recipientCertificateForDonor - }); + new Thread(runRecipientForgetMigration, + recipientPrimary.host, + { + migrationIdString: extractUUIDFromObject(migrationId), + donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), + tenantIds: tojson([tenantId]), + recipientCertificateForDonor + }, + kProtocolShardMerge); // Run a delayed/retried recipientForgetMigration command after the state doc has been deleted. recipientForgetMigrationThread.start(); @@ -114,12 +116,15 @@ assert.eq(1, newRecipientPrimary.getDB(dbName)[collName].find().itcount()); // Test that we can retry the recipientForgetMigration on the new primary. newPrimaryFp.off(); -assert.commandWorked(runRecipientForgetMigration(newRecipientPrimary.host, { - migrationIdString: extractUUIDFromObject(migrationId), - donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), - tenantIds: tojson([tenantId]), - recipientCertificateForDonor -})); +assert.commandWorked(runRecipientForgetMigration( + newRecipientPrimary.host, + { + migrationIdString: extractUUIDFromObject(migrationId), + donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), + tenantIds: tojson([tenantId]), + recipientCertificateForDonor + }, + kProtocolShardMerge)); currOp = assert .commandWorked( diff --git a/jstests/replsets/tenant_migration_shard_merge_ssl_configuration.js b/jstests/replsets/tenant_migration_shard_merge_ssl_configuration.js index e52d7ad6af547..6b50b1361cade 100644 --- a/jstests/replsets/tenant_migration_shard_merge_ssl_configuration.js +++ b/jstests/replsets/tenant_migration_shard_merge_ssl_configuration.js @@ -15,15 +15,15 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import { - donorStartMigrationWithProtocol, getCertificateAndPrivateKey, - isMigrationCompleted, isShardMergeEnabled, + kProtocolShardMerge, makeMigrationCertificatesForTest, makeX509OptionsForTest, - runTenantMigrationCommand, } from "jstests/replsets/libs/tenant_migration_util.js"; +load("jstests/libs/uuid_util.js"); + const standalone = MongoRunner.runMongod({}); const shardMergeFeatureFlagEnabled = isShardMergeEnabled(standalone.getDB("admin")); MongoRunner.stopMongod(standalone); @@ -64,7 +64,7 @@ const kExpiredMigrationCertificates = { migrationId: UUID(), recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), tenantIds: [kTenantId], - protocol: "shard merge", + protocol: kProtocolShardMerge, readPreference: kReadPreference, recipientCertificateForDonor: kValidMigrationCertificates.recipientCertificateForDonor, }), @@ -77,7 +77,7 @@ const kExpiredMigrationCertificates = { migrationId: UUID(), recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), tenantIds: [kTenantId], - protocol: "shard merge", + protocol: kProtocolShardMerge, readPreference: kReadPreference, donorCertificateForRecipient: kValidMigrationCertificates.donorCertificateForRecipient, }), @@ -90,7 +90,7 @@ const kExpiredMigrationCertificates = { migrationId: UUID(), donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), tenantIds: [kTenantId], - protocol: "shard merge", + protocol: kProtocolShardMerge, startMigrationDonorTimestamp: Timestamp(1, 1), readPreference: kReadPreference }), @@ -103,7 +103,7 @@ const kExpiredMigrationCertificates = { migrationId: UUID(), donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), tenantIds: [kTenantId], - protocol: "shard merge", + protocol: kProtocolShardMerge, decision: "aborted", readPreference: kReadPreference }), @@ -128,7 +128,7 @@ const kExpiredMigrationCertificates = { migrationId: UUID(), recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), tenantIds: [kTenantId], - protocol: "shard merge", + protocol: kProtocolShardMerge, readPreference: kReadPreference, donorCertificateForRecipient: kValidMigrationCertificates.donorCertificateForRecipient, recipientCertificateForDonor: kValidMigrationCertificates.recipientCertificateForDonor, @@ -155,7 +155,7 @@ const kExpiredMigrationCertificates = { migrationId: UUID(), donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), tenantIds: [kTenantId], - protocol: "shard merge", + protocol: kProtocolShardMerge, readPreference: kReadPreference, startMigrationDonorTimestamp: Timestamp(1, 1), recipientCertificateForDonor: kValidMigrationCertificates.recipientCertificateForDonor, @@ -193,7 +193,7 @@ const kExpiredMigrationCertificates = { migrationId: UUID(), donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), tenantIds: [kTenantId], - protocol: "shard merge", + protocol: kProtocolShardMerge, startMigrationDonorTimestamp: Timestamp(1, 1), readPreference: kReadPreference })); @@ -227,7 +227,7 @@ const kExpiredMigrationCertificates = { migrationId: UUID(), donorConnectionString: tenantMigrationTest.getDonorRst().getURL(), tenantIds: [kTenantId], - protocol: "shard merge", + protocol: kProtocolShardMerge, decision: "aborted", readPreference: kReadPreference })); @@ -267,16 +267,14 @@ const kExpiredMigrationCertificates = { const migrationId = UUID(); const donorStartMigrationCmdObj = { donorStartMigration: 1, - migrationId: migrationId, + migrationIdString: extractUUIDFromObject(migrationId), recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), tenantIds: [kTenantId], - protocol: "shard merge", + protocol: kProtocolShardMerge, readPreference: kReadPreference }; - const stateRes = assert.commandWorked(runTenantMigrationCommand( - donorStartMigrationCmdObj, - donorRst, - {retryOnRetryableErrors: false, shouldStopFunc: isMigrationCompleted})); + const stateRes = + assert.commandWorked(tenantMigrationTest.runMigration(donorStartMigrationCmdObj)); assert.eq(stateRes.state, TenantMigrationTest.DonorState.kCommitted); assert.commandWorked( donorRst.getPrimary().adminCommand({donorForgetMigration: 1, migrationId: migrationId})); @@ -314,17 +312,15 @@ const kExpiredMigrationCertificates = { const donorStartMigrationCmdObj = { donorStartMigration: 1, - migrationId: UUID(), + migrationIdString: extractUUIDFromObject(UUID()), recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), tenantIds: [kTenantId], - protocol: "shard merge", + protocol: kProtocolShardMerge, readPreference: kReadPreference }; - const stateRes = assert.commandWorked(runTenantMigrationCommand( - donorStartMigrationCmdObj, - donorRst, - {retryOnRetryableErrors: false, shouldStopFunc: isMigrationCompleted})); + const stateRes = + assert.commandWorked(tenantMigrationTest.runMigration(donorStartMigrationCmdObj)); assert.eq(stateRes.state, TenantMigrationTest.DonorState.kCommitted); donorRst.stopSet(); @@ -362,18 +358,16 @@ const kExpiredMigrationCertificates = { const donorStartMigrationCmdObj = { donorStartMigration: 1, - migrationId: UUID(), + migrationIdString: extractUUIDFromObject(UUID()), recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), tenantIds: [kTenantId], - protocol: "shard merge", + protocol: kProtocolShardMerge, readPreference: kReadPreference, donorCertificateForRecipient: kExpiredMigrationCertificates.donorCertificateForRecipient, recipientCertificateForDonor: kExpiredMigrationCertificates.recipientCertificateForDonor, }; - const stateRes = assert.commandWorked(runTenantMigrationCommand( - donorStartMigrationCmdObj, - donorRst, - {retryOnRetryableErrors: false, shouldStopFunc: isMigrationCompleted})); + const stateRes = + assert.commandWorked(tenantMigrationTest.runMigration(donorStartMigrationCmdObj)); assert.eq(stateRes.state, TenantMigrationTest.DonorState.kCommitted); donorRst.stopSet(); diff --git a/jstests/replsets/tenant_migration_ssl_configuration.js b/jstests/replsets/tenant_migration_ssl_configuration.js index 9e9d7b5e285d9..2ad965fff6106 100644 --- a/jstests/replsets/tenant_migration_ssl_configuration.js +++ b/jstests/replsets/tenant_migration_ssl_configuration.js @@ -16,14 +16,13 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import { - donorStartMigrationWithProtocol, getCertificateAndPrivateKey, - isMigrationCompleted, makeMigrationCertificatesForTest, makeX509OptionsForTest, - runTenantMigrationCommand } from "jstests/replsets/libs/tenant_migration_util.js"; +load("jstests/libs/uuid_util.js"); + const kTenantId = ObjectId().str; const kReadPreference = { mode: "primary" @@ -46,31 +45,27 @@ const kExpiredMigrationCertificates = { jsTest.log("Test that donorStartMigration requires 'donorCertificateForRecipient' when " + "tenantMigrationDisableX509Auth=false"); - assert.commandFailedWithCode( - donorPrimary.adminCommand(donorStartMigrationWithProtocol({ - donorStartMigration: 1, - migrationId: UUID(), - recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), - tenantId: kTenantId, - readPreference: kReadPreference, - recipientCertificateForDonor: kValidMigrationCertificates.recipientCertificateForDonor, - }, - donorPrimary.getDB("admin"))), - ErrorCodes.InvalidOptions); + assert.commandFailedWithCode(donorPrimary.adminCommand({ + donorStartMigration: 1, + migrationId: UUID(), + recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), + tenantId: kTenantId, + readPreference: kReadPreference, + recipientCertificateForDonor: kValidMigrationCertificates.recipientCertificateForDonor, + }), + ErrorCodes.InvalidOptions); jsTest.log("Test that donorStartMigration requires 'recipientCertificateForDonor' when " + "tenantMigrationDisableX509Auth=false"); - assert.commandFailedWithCode( - donorPrimary.adminCommand(donorStartMigrationWithProtocol({ - donorStartMigration: 1, - migrationId: UUID(), - recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), - tenantId: kTenantId, - readPreference: kReadPreference, - donorCertificateForRecipient: kValidMigrationCertificates.donorCertificateForRecipient, - }, - donorPrimary.getDB("admin"))), - ErrorCodes.InvalidOptions); + assert.commandFailedWithCode(donorPrimary.adminCommand({ + donorStartMigration: 1, + migrationId: UUID(), + recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), + tenantId: kTenantId, + readPreference: kReadPreference, + donorCertificateForRecipient: kValidMigrationCertificates.donorCertificateForRecipient, + }), + ErrorCodes.InvalidOptions); jsTest.log("Test that recipientSyncData requires 'recipientCertificateForDonor' when " + "tenantMigrationDisableX509Auth=false"); @@ -109,18 +104,16 @@ const kExpiredMigrationCertificates = { const donorPrimary = tenantMigrationTest.getDonorPrimary(); - assert.commandFailedWithCode( - donorPrimary.adminCommand(donorStartMigrationWithProtocol({ - donorStartMigration: 1, - migrationId: UUID(), - recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), - tenantId: kTenantId, - readPreference: kReadPreference, - donorCertificateForRecipient: kValidMigrationCertificates.donorCertificateForRecipient, - recipientCertificateForDonor: kValidMigrationCertificates.recipientCertificateForDonor, - }, - donorPrimary.getDB("admin"))), - ErrorCodes.IllegalOperation); + assert.commandFailedWithCode(donorPrimary.adminCommand({ + donorStartMigration: 1, + migrationId: UUID(), + recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), + tenantId: kTenantId, + readPreference: kReadPreference, + donorCertificateForRecipient: kValidMigrationCertificates.donorCertificateForRecipient, + recipientCertificateForDonor: kValidMigrationCertificates.recipientCertificateForDonor, + }), + ErrorCodes.IllegalOperation); donorRst.stopSet(); tenantMigrationTest.stop(); @@ -246,15 +239,13 @@ const kExpiredMigrationCertificates = { const migrationId = UUID(); const donorStartMigrationCmdObj = { donorStartMigration: 1, - migrationId: migrationId, + migrationIdString: extractUUIDFromObject(migrationId), recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), tenantId: kTenantId, readPreference: kReadPreference }; - const stateRes = assert.commandWorked(runTenantMigrationCommand( - donorStartMigrationCmdObj, - donorRst, - {retryOnRetryableErrors: false, shouldStopFunc: isMigrationCompleted})); + const stateRes = + assert.commandWorked(tenantMigrationTest.runMigration(donorStartMigrationCmdObj)); assert.eq(stateRes.state, TenantMigrationTest.DonorState.kCommitted); assert.commandWorked( donorRst.getPrimary().adminCommand({donorForgetMigration: 1, migrationId: migrationId})); @@ -292,16 +283,14 @@ const kExpiredMigrationCertificates = { const donorStartMigrationCmdObj = { donorStartMigration: 1, - migrationId: UUID(), + migrationIdString: extractUUIDFromObject(UUID()), recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), tenantId: kTenantId, readPreference: kReadPreference }; - const stateRes = assert.commandWorked(runTenantMigrationCommand( - donorStartMigrationCmdObj, - donorRst, - {retryOnRetryableErrors: false, shouldStopFunc: isMigrationCompleted})); + const stateRes = + assert.commandWorked(tenantMigrationTest.runMigration(donorStartMigrationCmdObj)); assert.eq(stateRes.state, TenantMigrationTest.DonorState.kCommitted); donorRst.stopSet(); @@ -339,17 +328,15 @@ const kExpiredMigrationCertificates = { const donorStartMigrationCmdObj = { donorStartMigration: 1, - migrationId: UUID(), + migrationIdString: extractUUIDFromObject(UUID()), recipientConnectionString: tenantMigrationTest.getRecipientRst().getURL(), tenantId: kTenantId, readPreference: kReadPreference, donorCertificateForRecipient: kExpiredMigrationCertificates.donorCertificateForRecipient, recipientCertificateForDonor: kExpiredMigrationCertificates.recipientCertificateForDonor, }; - const stateRes = assert.commandWorked(runTenantMigrationCommand( - donorStartMigrationCmdObj, - donorRst, - {retryOnRetryableErrors: false, shouldStopFunc: isMigrationCompleted})); + const stateRes = + assert.commandWorked(tenantMigrationTest.runMigration(donorStartMigrationCmdObj)); assert.eq(stateRes.state, TenantMigrationTest.DonorState.kCommitted); donorRst.stopSet(); diff --git a/jstests/replsets/tenant_migration_sync_source_too_stale.js b/jstests/replsets/tenant_migration_sync_source_too_stale.js index a60a06db48f6c..46776f700008f 100644 --- a/jstests/replsets/tenant_migration_sync_source_too_stale.js +++ b/jstests/replsets/tenant_migration_sync_source_too_stale.js @@ -9,10 +9,9 @@ * 'delayedSecondary', it should see that it is too stale. As a result, it should retry sync source * selection until it finds a sync source that is no longer too stale. * - * TODO SERVER-61231: shard merge can't handle restart, adapt this test. - * * @tags: [ * incompatible_with_macos, + * # Shard merge can only sync from primary therefore this test is not applicable. * incompatible_with_shard_merge, * incompatible_with_windows_tls, * requires_majority_read_concern, @@ -24,7 +23,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, makeX509OptionsForTest} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); @@ -55,7 +54,7 @@ const tenantMigrationTest = new TenantMigrationTest({ }); const tenantId = ObjectId().str; -const tenantDB = tenantMigrationTest.tenantDB(tenantId, "testDB"); +const tenantDB = makeTenantDB(tenantId, "testDB"); const collName = "testColl"; const delayedSecondary = donorRst.getSecondaries()[0]; diff --git a/jstests/replsets/tenant_migration_test_max_bson_limit.js b/jstests/replsets/tenant_migration_test_max_bson_limit.js index 97115f3c7645c..146b518aae9e5 100644 --- a/jstests/replsets/tenant_migration_test_max_bson_limit.js +++ b/jstests/replsets/tenant_migration_test_max_bson_limit.js @@ -11,6 +11,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/parallelTester.js"); load("jstests/libs/uuid_util.js"); @@ -28,8 +29,8 @@ function bulkWriteDocsUnordered(primaryHost, dbName, collName, numDocs) { } let request = {insert: collName, documents: batch, writeConcern: {w: 1}, ordered: false}; - res = assert.commandFailedWithCode(primaryDB[collName].runCommand(request), - ErrorCodes.TenantMigrationCommitted); + let res = assert.commandFailedWithCode(primaryDB[collName].runCommand(request), + ErrorCodes.TenantMigrationCommitted); return res; } @@ -44,7 +45,7 @@ const migrationOpts = { tenantId, }; -const dbName = tenantMigrationTest.tenantDB(tenantId, kTenantDefinedDbName); +const dbName = makeTenantDB(tenantId, kTenantDefinedDbName); const primary = tenantMigrationTest.getDonorPrimary(); const primaryDB = primary.getDB(dbName); const numWriteOps = diff --git a/jstests/replsets/tenant_migration_timeseries_collections.js b/jstests/replsets/tenant_migration_timeseries_collections.js index 3474a6405a6bf..305e46ddf67ee 100644 --- a/jstests/replsets/tenant_migration_timeseries_collections.js +++ b/jstests/replsets/tenant_migration_timeseries_collections.js @@ -11,6 +11,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/uuid_util.js"); const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); @@ -18,7 +19,7 @@ const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const donorPrimary = tenantMigrationTest.getDonorPrimary(); const tenantId = ObjectId().str; -const tsDB = tenantMigrationTest.tenantDB(tenantId, "tsDB"); +const tsDB = makeTenantDB(tenantId, "tsDB"); const collName = "tsColl"; const donorTSDB = donorPrimary.getDB(tsDB); assert.commandWorked(donorTSDB.createCollection(collName, {timeseries: {timeField: "time"}})); diff --git a/jstests/replsets/tenant_migration_timeseries_retryable_write_retry_on_recipient.js b/jstests/replsets/tenant_migration_timeseries_retryable_write_retry_on_recipient.js index 4a3475178f54d..1a46c1f6ceadb 100644 --- a/jstests/replsets/tenant_migration_timeseries_retryable_write_retry_on_recipient.js +++ b/jstests/replsets/tenant_migration_timeseries_retryable_write_retry_on_recipient.js @@ -17,7 +17,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {runMigrationAsync} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, runMigrationAsync} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/parallelTester.js"); // for 'Thread' @@ -30,7 +30,7 @@ function testRetryOnRecipient(ordered) { const donorPrimary = tenantMigrationTest.getDonorPrimary(); const kTenantId = ObjectId().str; - const kDbName = tenantMigrationTest.tenantDB(kTenantId, "tsDb"); + const kDbName = makeTenantDB(kTenantId, "tsDb"); const kCollNameBefore = "tsCollBefore"; const kCollNameDuring = "tsCollDuring"; @@ -105,6 +105,7 @@ function testRetryOnRecipient(ordered) { jsTest.log("Waiting for migration to complete"); pauseTenantMigrationBeforeLeavingDataSyncState.off(); TenantMigrationTest.assertCommitted(migrationThread.returnData()); + tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString); // Print the no-op oplog entries for debugging purposes. jsTestLog("Recipient oplog migration entries."); @@ -132,8 +133,6 @@ function testRetryOnRecipient(ordered) { testRecipientRetryableWrites(recipientRst.getPrimary().getDB(kDbName), beforeWrites); testRecipientRetryableWrites(recipientRst.getPrimary().getDB(kDbName), duringWrites); - tenantMigrationTest.forgetMigration(migrationOpts.migrationIdString); - jsTestLog("Trying a back-to-back migration"); const tenantMigrationTest2 = new TenantMigrationTest( {name: jsTestName() + "2", donorRst: tenantMigrationTest.getRecipientRst()}); diff --git a/jstests/replsets/tenant_migration_transaction_boundary.js b/jstests/replsets/tenant_migration_transaction_boundary.js index c8bdbfdc0a348..e68e8e8a969de 100644 --- a/jstests/replsets/tenant_migration_transaction_boundary.js +++ b/jstests/replsets/tenant_migration_transaction_boundary.js @@ -22,13 +22,14 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/replsets/rslib.js"); load("jstests/libs/uuid_util.js"); const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const tenantId = ObjectId().str; -const tenantDB = tenantMigrationTest.tenantDB(tenantId, "testDB"); +const tenantDB = makeTenantDB(tenantId, "testDB"); const collName = "testColl"; const tenantNS = `${tenantDB}.${collName}`; const transactionsNS = "config.transactions"; diff --git a/jstests/replsets/tenant_migration_v1_id_index.js b/jstests/replsets/tenant_migration_v1_id_index.js index 6203be453fa37..91c67697e3f84 100644 --- a/jstests/replsets/tenant_migration_v1_id_index.js +++ b/jstests/replsets/tenant_migration_v1_id_index.js @@ -12,6 +12,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/uuid_util.js"); const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); @@ -21,7 +22,7 @@ const migrationOpts = { migrationIdString: extractUUIDFromObject(UUID()), tenantId: tenantId }; -const dbName = tenantMigrationTest.tenantDB(tenantId, "testDB"); +const dbName = makeTenantDB(tenantId, "testDB"); // Collection names for the collections with "v: 1" and "v: 2" '_id' indexes. const collWithV1Index = "testCollV1"; diff --git a/jstests/replsets/tenant_migrations_back_to_back.js b/jstests/replsets/tenant_migrations_back_to_back.js index f8c7ca991b962..e820e1cb76051 100644 --- a/jstests/replsets/tenant_migrations_back_to_back.js +++ b/jstests/replsets/tenant_migrations_back_to_back.js @@ -10,13 +10,13 @@ * requires_majority_read_concern, * requires_persistence, * serverless, - * # The currentOp output field 'lastDurableState' was changed from an enum value to a string. - * requires_fcv_70, + * # The error code for a rejected recipient command invoked during the reject phase was changed. + * requires_fcv_71, * ] */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {runMigrationAsync} from "jstests/replsets/libs/tenant_migration_util.js"; +import {makeTenantDB, runMigrationAsync} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/parallelTester.js"); // for 'Thread' @@ -27,7 +27,7 @@ const kTenantId = ObjectId().str; const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), insertDataForTenant: kTenantId}); -const kDbName = tenantMigrationTest.tenantDB(kTenantId, "testDb"); +const kDbName = makeTenantDB(kTenantId, "testDb"); const kCollName = "testColl"; const donorPrimary = tenantMigrationTest.getDonorPrimary(); @@ -72,7 +72,7 @@ recipientRst.nodes.forEach(node => { }; const res = db.runCommand(cmd); assert.commandFailedWithCode(res, ErrorCodes.SnapshotTooOld, tojson(cmd)); - assert.eq(res.errmsg, "Tenant read is not allowed before migration completes"); + assert.eq(res.errmsg, "Tenant command 'find' is not allowed before migration completes"); }); jsTestLog("Running a back-to-back migration"); @@ -100,8 +100,9 @@ waitAfterCreatingMtab.wait(); // Check that the current serverStatus reflects the recipient access blocker. const mtabStatus = tenantMigrationTest.getTenantMigrationAccessBlocker( {donorNode: donor2Primary, tenantId: kTenantId}); -assert.eq( - mtabStatus.recipient.state, TenantMigrationTest.RecipientAccessState.kRejectBefore, mtabStatus); +assert.eq(mtabStatus.recipient.state, + TenantMigrationTest.RecipientAccessState.kRejectReadsBefore, + mtabStatus); assert(mtabStatus.recipient.hasOwnProperty("rejectBeforeTimestamp"), mtabStatus); const res = assert.commandWorked( @@ -147,7 +148,7 @@ newDonorRst.nodes.forEach(node => { }; const res = db.runCommand(cmd); assert.commandFailedWithCode(res, ErrorCodes.SnapshotTooOld, tojson(cmd)); - assert.eq(res.errmsg, "Tenant read is not allowed before migration completes"); + assert.eq(res.errmsg, "Tenant command 'find' is not allowed before migration completes"); }); waitAfterCreatingMtab.off(); diff --git a/jstests/replsets/tenant_migrations_back_to_back_2.js b/jstests/replsets/tenant_migrations_back_to_back_2.js new file mode 100644 index 0000000000000..82e00a0ca4d1f --- /dev/null +++ b/jstests/replsets/tenant_migrations_back_to_back_2.js @@ -0,0 +1,127 @@ +/** + * This test simulates and verifies the handling of below edge case involving back-to-back tenant + * migration (rs0 -> rs1 -> rs0) by both shard merge and tenant migration protocols. + * 1) rs0: Retryable insert at txnNum: 55 succeeds. + * 2) rs0: No-op session write (E.g. no-op retryable update) at txnNum: 56 succeeds, causing no + * writes to 'config.transactions' table but updates in-memory transaction participant. + * 3) Start migration from rs0 -> rs1, copying the oplog chain for txnNum:55 from rs0 to rs1. + * 4) rs0 -> rs1 migration succeeds. + * 5) Starting a migration again from rs1 -> rs0 should succeed and not fail + * with ErrorCodes.TransactionTooOld. + * + * @tags: [ + * incompatible_with_macos, + * incompatible_with_windows_tls, + * requires_majority_read_concern, + * requires_persistence, + * serverless, + * ] + */ + +import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; +load("jstests/libs/uuid_util.js"); // For extractUUIDFromObject(). +load("jstests/libs/fail_point_util.js"); // For configureFailPoint(). + +const kMigrationId = UUID(); +const kTenantId = ObjectId().str; +const kDbName = makeTenantDB(kTenantId, "testDb"); +const kCollName = "testColl"; +const migrationOpts = { + migrationIdString: extractUUIDFromObject(kMigrationId), + tenantId: kTenantId, +}; + +const kSessionId = { + id: UUID() +}; +const kRetryableWriteTxnId = NumberLong(55); +const kNoopSessionWriteTxnId = NumberLong(kRetryableWriteTxnId + 1); + +function runRetryableWriteWithTxnIdLessThanNoopWriteTxnId(conn) { + return conn.getDB(kDbName).runCommand({ + insert: kCollName, + documents: [{_id: "retryableWrite"}], + txnNumber: kRetryableWriteTxnId, + lsid: kSessionId, + stmtIds: [NumberInt(0)] + }); +} + +let noOpSessionWrites = [ + { + testDesc: "no-op retryable write", + testOp: (conn) => { + assert.commandWorked(conn.getDB(kDbName).runCommand({ + update: kCollName, + updates: [{q: {_id: "noOpRetryableWrite"}, u: {$inc: {x: 1}}}], + txnNumber: kNoopSessionWriteTxnId, + lsid: kSessionId, + stmtIds: [NumberInt(1)] + })); + } + }, + { + testDesc: "read transaction", + testOp: (conn) => { + assert.commandWorked(conn.getDB(kDbName).runCommand({ + find: kCollName, + txnNumber: kNoopSessionWriteTxnId, + lsid: kSessionId, + startTransaction: true, + autocommit: false, + })); + assert.commandWorked(conn.getDB("admin").runCommand({ + commitTransaction: 1, + txnNumber: kNoopSessionWriteTxnId, + lsid: kSessionId, + autocommit: false, + })); + } + }, + { + testDesc: "abort transaction", + testOp: (conn) => { + assert.commandWorked(conn.getDB(kDbName).runCommand({ + insert: kCollName, + documents: [{_id: "noOpRetryableWrite"}], + txnNumber: kNoopSessionWriteTxnId, + lsid: kSessionId, + startTransaction: true, + autocommit: false, + })); + + assert.commandWorked(conn.getDB("admin").runCommand({ + abortTransaction: 1, + txnNumber: kNoopSessionWriteTxnId, + lsid: kSessionId, + autocommit: false, + })); + } + } +]; + +noOpSessionWrites.forEach(({testDesc, testOp}) => { + jsTest.log(`Testing no-op session write == ${testDesc} ==.`); + const tenantMigrationTest = + new TenantMigrationTest({name: jsTestName(), sharedOptions: {nodes: 1}}); + + const donorPrimary = tenantMigrationTest.getDonorPrimary(); + const recipientPrimary = tenantMigrationTest.getRecipientPrimary(); + + jsTestLog(`Run no-op session write on recipient prior to migration.`); + testOp(recipientPrimary); + + // Ensure the in-memory transaction participant on recipient is updated to + // kNoopSessionWriteTxnId. + assert.commandFailedWithCode(runRetryableWriteWithTxnIdLessThanNoopWriteTxnId(recipientPrimary), + ErrorCodes.TransactionTooOld); + + jsTestLog("Run retryable write on donor prior to migration."); + assert.commandWorked(runRetryableWriteWithTxnIdLessThanNoopWriteTxnId(donorPrimary)); + + // Migration should succeed. + TenantMigrationTest.assertCommitted(tenantMigrationTest.runMigration(migrationOpts)); + + tenantMigrationTest.stop(); +}); diff --git a/jstests/replsets/tenant_migrations_transaction_with_create_collection.js b/jstests/replsets/tenant_migrations_transaction_with_create_collection.js index be97a06568906..e0315a515c910 100644 --- a/jstests/replsets/tenant_migrations_transaction_with_create_collection.js +++ b/jstests/replsets/tenant_migrations_transaction_with_create_collection.js @@ -12,6 +12,7 @@ */ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import {makeTenantDB} from "jstests/replsets/libs/tenant_migration_util.js"; load("jstests/aggregation/extras/utils.js"); load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); @@ -19,7 +20,7 @@ load("jstests/libs/uuid_util.js"); const tenantMigrationTest = new TenantMigrationTest({name: jsTestName()}); const tenantId = ObjectId().str; -const tenantDB = tenantMigrationTest.tenantDB(tenantId, "testDB"); +const tenantDB = makeTenantDB(tenantId, "testDB"); const collName = "testColl"; const tenantNS = `${tenantDB}.${collName}`; const transactionsNS = "config.transactions"; diff --git a/jstests/replsets/transactions_committed_with_tickets_exhausted.js b/jstests/replsets/transactions_committed_with_tickets_exhausted.js index cff1ff346c169..2839fa8fd6c01 100644 --- a/jstests/replsets/transactions_committed_with_tickets_exhausted.js +++ b/jstests/replsets/transactions_committed_with_tickets_exhausted.js @@ -23,7 +23,7 @@ const rst = new ReplSetTest({ nodeOptions: { setParameter: { // This test requires a fixed ticket pool size. - storageEngineConcurrencyAdjustmentAlgorithm: "", + storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions", wiredTigerConcurrentWriteTransactions: kNumWriteTickets, // Setting a transaction lifetime of 20 seconds works fine locally because the diff --git a/jstests/replsets/transactions_reaped_with_tickets_exhausted.js b/jstests/replsets/transactions_reaped_with_tickets_exhausted.js index 9368194a8524b..1e7c4af8e53fa 100644 --- a/jstests/replsets/transactions_reaped_with_tickets_exhausted.js +++ b/jstests/replsets/transactions_reaped_with_tickets_exhausted.js @@ -21,7 +21,7 @@ const rst = new ReplSetTest({ nodeOptions: { setParameter: { // This test requires a fixed ticket pool size. - storageEngineConcurrencyAdjustmentAlgorithm: "", + storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions", wiredTigerConcurrentWriteTransactions: kNumWriteTickets, // Setting a transaction lifetime of 1 hour to make sure the transaction reaper diff --git a/jstests/replsets/unconditional_step_down.js b/jstests/replsets/unconditional_step_down.js index 4908d1a8680e8..e2a32291062ae 100644 --- a/jstests/replsets/unconditional_step_down.js +++ b/jstests/replsets/unconditional_step_down.js @@ -158,7 +158,7 @@ function runStepsDowntoRemoved(params) { runStepDownTest({ testMsg: "reconfig command", stepDownFn: () => { - load("./jstests/replsets/rslib.js"); + load("jstests/replsets/rslib.js"); var newConfig = rst.getReplSetConfigFromNode(); var oldMasterId = rst.getNodeId(primary); @@ -176,7 +176,7 @@ runStepDownTest({ runStepDownTest({ testMsg: "reconfig via heartbeat", stepDownFn: () => { - load("./jstests/replsets/rslib.js"); + load("jstests/replsets/rslib.js"); var newConfig = rst.getReplSetConfigFromNode(); var oldMasterId = rst.getNodeId(primary); @@ -194,7 +194,7 @@ runStepDownTest({ runStepsDowntoRemoved({ testMsg: "reconfig via heartbeat - primary to removed", stepDownFn: () => { - load("./jstests/replsets/rslib.js"); + load("jstests/replsets/rslib.js"); var newConfig = rst.getReplSetConfigFromNode(); var oldMasterId = rst.getNodeId(primary); @@ -213,7 +213,7 @@ runStepsDowntoRemoved({ runStepDownTest({ testMsg: "stepdown via heartbeat", stepDownFn: () => { - load("./jstests/replsets/rslib.js"); + load("jstests/replsets/rslib.js"); var newConfig = rst.getReplSetConfigFromNode(); var newMasterId = rst.getNodeId(secondary); diff --git a/jstests/replsets/use_history_after_restart.js b/jstests/replsets/use_history_after_restart.js index 742d8f7a8858c..b0282ab8b64c9 100644 --- a/jstests/replsets/use_history_after_restart.js +++ b/jstests/replsets/use_history_after_restart.js @@ -1,6 +1,5 @@ /** - * Demonstrate that durable history can be used across a restart. Also assert that any - * collection/index minimum visible timestamps are set to legal values. The rough test outline: + * Demonstrate that durable history can be used across a restart. The rough test outline: * * 1) Create a collection `existsAtOldestTs`. This collection and its `_id` index should be readable * across a restart. @@ -12,16 +11,9 @@ * @tags: [ * requires_majority_read_concern, * requires_persistence, - * # This test is incompatible with earlier implementations of point-in-time catalog lookups. - * requires_fcv_70, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); - let replTest = new ReplSetTest({ name: "use_history_after_restart", nodes: 1, @@ -125,13 +117,9 @@ result = primary.getDB("test").runCommand( {find: "dneAtOldestTs", readConcern: {level: "snapshot", atClusterTime: oldestTimestamp}}); jsTestLog({"SnapshotUnavailable on dneAtOldestTs": result}); -if (FeatureFlagUtil.isEnabled(primary.getDB("test"), "PointInTimeCatalogLookups")) { - // The collection does not exist at this time so find will return an empty result set. - assert.commandWorked(result); - assert.eq(0, result["cursor"]["firstBatch"].length); -} else { - assert.commandFailedWithCode(result, ErrorCodes.SnapshotUnavailable); -} +// The collection does not exist at this time so find will return an empty result set. +assert.commandWorked(result); +assert.eq(0, result["cursor"]["firstBatch"].length); // Querying `dneAtOldestTs` at the stable timestamp should succeed with a correct result. result = primary.getDB("test").runCommand( @@ -139,5 +127,4 @@ result = primary.getDB("test").runCommand( jsTestLog({"Available dneAtOldestTs result": result}); assert.eq(1, result["cursor"]["firstBatch"].length); -replTest.stopSet(); -})(); +replTest.stopSet(); \ No newline at end of file diff --git a/jstests/resmoke_selftest/end2end/timeout/nested/top_level_timeout.js b/jstests/resmoke_selftest/end2end/timeout/nested/top_level_timeout.js index 065b9dcd55e97..45c2a2a414400 100644 --- a/jstests/resmoke_selftest/end2end/timeout/nested/top_level_timeout.js +++ b/jstests/resmoke_selftest/end2end/timeout/nested/top_level_timeout.js @@ -6,9 +6,14 @@ rst.startSet(); rst.initiate(); function start() { + // The --originSuite argument is to trick the resmoke local invocation into passing + // because when we pass --taskId into resmoke it thinks that it is being ran in evergreen + // and cannot normally find an evergreen task associated with + // buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_nested_timeout.yml const resmokeCmd = 'python3 buildscripts/resmoke.py run ' + '--storageEngineCacheSizeGB=1 --dbpathPrefix=/data/db/selftest_inner ' + '--internalParam=test_archival --taskId=123 ' + + '--originSuite=resmoke_end2end_tests ' + '--internalParam=is_inner_level ' + '--basePort=20020 ' + '--suites=buildscripts/tests/resmoke_end2end/suites/resmoke_selftest_nested_timeout.yml ' + diff --git a/jstests/selinux/core.js b/jstests/selinux/core.js index 1ab400011249f..c16be9b681844 100644 --- a/jstests/selinux/core.js +++ b/jstests/selinux/core.js @@ -4,21 +4,7 @@ load('jstests/selinux/lib/selinux_base_test.js'); class TestDefinition extends SelinuxBaseTest { - get config() { - return { - "systemLog": - {"destination": "file", "logAppend": true, "path": "/var/log/mongodb/mongod.log"}, - "storage": {"dbPath": "/var/lib/mongo"}, - "processManagement": { - "fork": true, - "pidFilePath": "/var/run/mongodb/mongod.pid", - "timeZoneInfo": "/usr/share/zoneinfo" - }, - "net": {"port": 27017, "bindIp": "127.0.0.1"} - }; - } - - run() { + async run() { // On RHEL7 there is no python3, but check_has_tag.py will also work with python2 const python = (0 == runNonMongoProgram("which", "python3")) ? "python3" : "python2"; @@ -56,9 +42,16 @@ class TestDefinition extends SelinuxBaseTest { } jsTest.log("Running test: " + t); - if (!load(t)) { + try { + let evalString = "import(" + tojson(t) + ")"; + let handle = startParallelShell(evalString, db.getMongo().port); + let rc = handle(); + assert.eq(rc, 0); + } catch (e) { + print(tojson(e)); throw ("failed to load test " + t); } + jsTest.log("Successful test: " + t); } } diff --git a/jstests/selinux/default.js b/jstests/selinux/default.js index 9252e84d9b7a8..e231247f6c4d9 100644 --- a/jstests/selinux/default.js +++ b/jstests/selinux/default.js @@ -6,11 +6,7 @@ load('jstests/selinux/lib/selinux_base_test.js'); class TestDefinition extends SelinuxBaseTest { - get config() { - return cat("rpm/mongod.conf"); - } - - run() { + async run() { // The only things we are verifying here: // - that we are connected // - that process is running in correct SELinux context diff --git a/jstests/selinux/lib/selinux_base_test.js b/jstests/selinux/lib/selinux_base_test.js index a5cccd451fca7..395cd19a1791d 100644 --- a/jstests/selinux/lib/selinux_base_test.js +++ b/jstests/selinux/lib/selinux_base_test.js @@ -1,8 +1,36 @@ 'use strict'; +/** + * An "abstract" base selinux test class, containing common functions that should be + * assumed to be called by a test executor. + * + * Implementations for the test can extend this base clase in order to integrate + * into evergreen/selinux_test_executor.sh + * + * NOTE: Implementations for this exist in both community and enterprise, + * be cautious about modifying the base class. + */ class SelinuxBaseTest { + /** + * Returns the "base" configuration per the rpm mongod.conf + * Inheriting classes should use this base configuration and + * extend the returned object as necessary + */ get config() { - return {}; + return { + "systemLog": { + "destination": "file", + "logAppend": true, + "path": "/var/log/mongodb/mongod.log", + "verbosity": 0 + }, + "processManagement": { + "pidFilePath": "/var/run/mongodb/mongod.pid", + "timeZoneInfo": "/usr/share/zoneinfo" + }, + "net": {"port": 27017, "bindIp": "127.0.0.1"}, + "storage": {"dbPath": "/var/lib/mongo"} + }; } // Notice: private definitions, e.g.: #sudo() are not @@ -11,13 +39,25 @@ class SelinuxBaseTest { return run("sudo", "--non-interactive", "bash", "-c", script); } + /** + * Called by test executors (e.g. evergreen/selinux_test_executor.sh) + * to set up the test environment + */ setup() { } + /** + * Called by test executors (e.g. evergreen/selinux_test_executor.sh) + * to tear down test configurations at the end of the test run + */ teardown() { } - run() { + /** + * Called by test executors (e.g. evergreen/selinux_test_executor.sh) + * to run the test. Inheriting classes must override this to run their tests + */ + async run() { assert("override this function"); } } diff --git a/jstests/serial_run/README b/jstests/serial_run/README index 73bc3344f1de9..5b5a2f1074011 100644 --- a/jstests/serial_run/README +++ b/jstests/serial_run/README @@ -1,2 +1,2 @@ This suite contains tests that are unusually susceptible to availability of machine resources; -therefore, this suite is always run with --jobs=1 via Evergreen config. \ No newline at end of file +therefore, this suite is always run with --jobs=1 via Evergreen config. diff --git a/jstests/serial_run/index_multi.js b/jstests/serial_run/index_multi.js index 5ba9b277357ef..99d9655e34cea 100644 --- a/jstests/serial_run/index_multi.js +++ b/jstests/serial_run/index_multi.js @@ -51,8 +51,7 @@ for (var i = 90; i < 93; i++) { spec["field" + (i + 1)] = 1; spec["field" + (i + 2)] = 1; indexJobs.push(startParallelShell( - setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + "," + - "{ background: true }));" + + setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + "));" + "db.results.insert(Object.extend(" + "db.runCommand({ getlasterror: 1 }), " + tojson(spec) + ") );", null, // port @@ -67,8 +66,7 @@ for (var i = 30; i < 90; i += 2) { spec["field" + i] = 1; spec["field" + (i + 1)] = 1; indexJobs.push(startParallelShell( - setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + ", " + - "{ background: true }));" + + setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + "));" + "db.results.insert(Object.extend(" + "db.runCommand({ getlasterror: 1 }), " + tojson(spec) + ") );", null, // port @@ -82,8 +80,7 @@ for (var i = 0; i < 30; i++) { var spec = {}; spec["field" + i] = 1; indexJobs.push(startParallelShell( - setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + ", " + - "{ background: true }));" + + setupDBStr + "printjson(db.index_multi.createIndex(" + tojson(spec) + "));" + "db.results.insert(Object.extend(" + "db.runCommand({ getlasterror: 1 }), " + tojson(spec) + ") );", null, // port diff --git a/jstests/serverless/change_collection_expired_document_remover.js b/jstests/serverless/change_collection_expired_document_remover.js index 709a3ec6d3cf0..93cba546ee3e2 100644 --- a/jstests/serverless/change_collection_expired_document_remover.js +++ b/jstests/serverless/change_collection_expired_document_remover.js @@ -4,15 +4,13 @@ * @tags: [requires_fcv_62] */ -(function() { -"use strict"; - // For configureFailPoint. load("jstests/libs/fail_point_util.js"); // For assertDropAndRecreateCollection. load("jstests/libs/collection_drop_recreate.js"); // For ChangeStreamMultitenantReplicaSetTest. load("jstests/serverless/libs/change_collection_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const getTenantConnection = ChangeStreamMultitenantReplicaSetTest.getTenantConnection; @@ -24,11 +22,17 @@ const kExpireAfterSeconds = 1; const kSleepBetweenWritesSeconds = 5; // Millisecond(s) that can be added to the wall time to advance it marginally. const kSafetyMarginMillis = 1; +// To imitate 1-by-1 deletion we specify a low amount of bytes per marker. +const kMinBytesPerMarker = 1; const replSet = new ChangeStreamMultitenantReplicaSetTest({ nodes: 2, - setParameter: - {changeCollectionExpiredDocumentsRemoverJobSleepSeconds: kExpiredRemovalJobSleepSeconds} + nodeOptions: { + setParameter: { + changeCollectionTruncateMarkersMinBytes: kMinBytesPerMarker, + changeCollectionExpiredDocumentsRemoverJobSleepSeconds: kExpiredRemovalJobSleepSeconds + } + } }); const primary = replSet.getPrimary(); @@ -46,14 +50,20 @@ function assertChangeCollectionDocuments( // Assert that querying for 'expectedRetainedDocs' yields documents that are exactly the same as // 'expectedRetainedDocs'. if (expectedRetainedDocs.length > 0) { - const retainedDocs = changeColl.aggregate(pipeline(expectedRetainedDocs)).toArray(); - assert.eq(retainedDocs, expectedRetainedDocs); + assert.soonNoExcept(() => { + const retainedDocs = changeColl.aggregate(pipeline(expectedRetainedDocs)).toArray(); + assert.eq(retainedDocs, expectedRetainedDocs); + return true; + }); } // Assert that the query for any `expectedDeletedDocs` yields no results. if (expectedDeletedDocs.length > 0) { - const deletedDocs = changeColl.aggregate(pipeline(expectedDeletedDocs)).toArray(); - assert.eq(deletedDocs.length, 0); + assert.soonNoExcept(() => { + const deletedDocs = changeColl.aggregate(pipeline(expectedDeletedDocs)).toArray(); + assert.eq(deletedDocs.length, 0); + return true; + }); } } @@ -124,17 +134,22 @@ assert.commandWorked(citiesTenantConnPrimary.getDB("admin").runCommand( {setClusterParameter: {changeStreams: {expireAfterSeconds: kExpireAfterSeconds}}})); // Get tenants respective collections for testing. -const stocksTestDb = stocksTenantConnPrimary.getDB(jsTestName()); -const citiesTestDb = citiesTenantConnPrimary.getDB(jsTestName()); -const notUsedTestDb = notUsedTenantConnPrimary.getDB(jsTestName()); +const dbName = "change_coll_expired_doc_remover"; +const stocksTestDb = stocksTenantConnPrimary.getDB(dbName); +const citiesTestDb = citiesTenantConnPrimary.getDB(dbName); +const notUsedTestDb = notUsedTenantConnPrimary.getDB(dbName); const stocksColl = assertDropAndRecreateCollection(stocksTestDb, "stocks"); const citiesColl = assertDropAndRecreateCollection(citiesTestDb, "cities"); const notUsedColl = assertDropAndRecreateCollection(notUsedTestDb, "notUsed"); // Wait until the remover job hangs. -let fpHangBeforeRemovingDocs = configureFailPoint(primary, "hangBeforeRemovingExpiredChanges"); -fpHangBeforeRemovingDocs.wait(); +let fpHangBeforeRemovingDocsPrimary = + configureFailPoint(primary, "hangBeforeRemovingExpiredChanges"); +let fpHangBeforeRemovingDocsSecondary = + configureFailPoint(secondary, "hangBeforeRemovingExpiredChanges"); +fpHangBeforeRemovingDocsPrimary.wait(); +fpHangBeforeRemovingDocsSecondary.wait(); // Insert 5 documents to the 'stocks' collection owned by the 'stocksTenantId' that should be // deleted. @@ -250,20 +265,29 @@ assertChangeCollectionDocuments(citiesChangeCollectionSecondary, // 'currentWallTime' < first-non-expired-document. const currentWallTime = new Date(lastExpiredDocumentTime + kExpireAfterSeconds * 1000 + kSafetyMarginMillis); -const fpInjectWallTime = configureFailPoint( - primary, "injectCurrentWallTimeForRemovingExpiredDocuments", {currentWallTime}); +const failpointName = + FeatureFlagUtil.isPresentAndEnabled(stocksTestDb, "UseUnreplicatedTruncatesForDeletions") + ? "injectCurrentWallTimeForCheckingMarkers" + : "injectCurrentWallTimeForRemovingExpiredDocuments"; +const fpInjectWallTimePrimary = configureFailPoint(primary, failpointName, {currentWallTime}); +const fpInjectWallTimeSecondary = configureFailPoint(secondary, failpointName, {currentWallTime}); // Unblock the change collection remover job such that it picks up on the injected // 'currentWallTime'. -fpHangBeforeRemovingDocs.off(); +fpHangBeforeRemovingDocsPrimary.off(); +fpHangBeforeRemovingDocsSecondary.off(); // Wait until the remover job has retrieved the injected 'currentWallTime' and reset the first // failpoint. -fpInjectWallTime.wait(); +fpInjectWallTimePrimary.wait(); +fpInjectWallTimeSecondary.wait(); // Wait for a complete cycle of the TTL job. -fpHangBeforeRemovingDocs = configureFailPoint(primary, "hangBeforeRemovingExpiredChanges"); -fpHangBeforeRemovingDocs.wait(); +fpHangBeforeRemovingDocsPrimary = configureFailPoint(primary, "hangBeforeRemovingExpiredChanges"); +fpHangBeforeRemovingDocsSecondary = + configureFailPoint(secondary, "hangBeforeRemovingExpiredChanges"); +fpHangBeforeRemovingDocsPrimary.wait(); +fpHangBeforeRemovingDocsSecondary.wait(); // Assert that only required documents are retained in change collections on the primary. assertChangeCollectionDocuments( @@ -279,7 +303,6 @@ assertChangeCollectionDocuments( assertChangeCollectionDocuments( citiesChangeCollectionSecondary, citiesColl, citiesExpiredDocuments, citiesNonExpiredDocuments); -fpHangBeforeRemovingDocs.off(); - +fpHangBeforeRemovingDocsPrimary.off(); +fpHangBeforeRemovingDocsSecondary.off(); replSet.stopSet(); -})(); diff --git a/jstests/serverless/change_collection_server_stats.js b/jstests/serverless/change_collection_server_stats.js index 09d3d2ad3d611..96a980ffa789c 100644 --- a/jstests/serverless/change_collection_server_stats.js +++ b/jstests/serverless/change_collection_server_stats.js @@ -87,7 +87,7 @@ assert.soon(() => { diagnosticsBeforeTestCollInsertions.docsDeleted + numberOfDocuments - 1 && diagnosticsAfterTestCollInsertions.maxStartWallTimeMillis.tojson() >= wallTimeOfTheFirstOplogEntry.tojson() && - diagnosticsAfterTestCollInsertions.timeElapsedMillis > + diagnosticsAfterTestCollInsertions.timeElapsedMillis >= diagnosticsBeforeTestCollInsertions.timeElapsedMillis; }); diff --git a/jstests/serverless/change_stream_state_commands.js b/jstests/serverless/change_stream_state_commands.js index c7196e7a104e5..f6fdcb6eb61c9 100644 --- a/jstests/serverless/change_stream_state_commands.js +++ b/jstests/serverless/change_stream_state_commands.js @@ -9,6 +9,10 @@ load("jstests/libs/fail_point_util.js"); // For configureFailPoint. load('jstests/libs/parallel_shell_helpers.js'); // For funWithArgs. +// Disable implicit sessions since dropping "config" database for a tenant must be done not in a +// session. +TestData.disableImplicitSessions = true; + const replSetTest = new ReplSetTest({nodes: 2, name: "change-stream-state-commands", serverless: true}); @@ -104,6 +108,12 @@ const secondOrgTenantId = ObjectId(); setChangeStreamState(firstOrgTenantId, false); setChangeStreamState(firstOrgTenantId, false); assertChangeStreamState(firstOrgTenantId, false); + + // Verify that dropping "config" database works and effectively disables change streams. + setChangeStreamState(firstOrgTenantId, true); + assert.commandWorked(replSetTest.getPrimary().getDB("config").runCommand( + {dropDatabase: 1, $tenant: firstOrgTenantId})); + assertChangeStreamState(firstOrgTenantId, false); })(); // Tests that the 'setChangeStreamState' command tolerates the primary step-down and can @@ -302,4 +312,5 @@ const secondOrgTenantId = ObjectId(); })(); replSetTest.stopSet(); +TestData.disableImplicitSessions = false; }()); diff --git a/jstests/serverless/change_streams/isolate_high_water_mark.js b/jstests/serverless/change_streams/isolate_high_water_mark.js new file mode 100644 index 0000000000000..6a4c8790c7293 --- /dev/null +++ b/jstests/serverless/change_streams/isolate_high_water_mark.js @@ -0,0 +1,75 @@ +/** + * Test to make sure that a write by one tenant can't advance the resume token of another tenant. If + * it can happen then during a split a migrating tenant can wind up with a resume token greater than + * the split operation's blockTS and we could skip events when resuming on the recipient. + * @tags: [ + * serverless, + * requires_fcv_71 + * ] + */ +// TODO SERVER-76309: re-purpose this test to show that the resume token does advance with the +// global oplog, or remove the test in favour of existing coverage elsewhere. +load("jstests/serverless/libs/change_collection_util.js"); + +(function() { +const tenantIds = [ObjectId(), ObjectId()]; +const rst = new ChangeStreamMultitenantReplicaSetTest({ + nodes: 3, + nodeOptions: {setParameter: {shardSplitGarbageCollectionDelayMS: 0, ttlMonitorSleepSecs: 1}} +}); + +const primary = rst.getPrimary(); +const tenant1Conn = + ChangeStreamMultitenantReplicaSetTest.getTenantConnection(primary.host, tenantIds[0]); +const tenant2Conn = + ChangeStreamMultitenantReplicaSetTest.getTenantConnection(primary.host, tenantIds[1]); +const tenant1DB = tenant1Conn.getDB("test"); +const tenant2DB = tenant2Conn.getDB("test"); +rst.setChangeStreamState(tenant1Conn, true); +rst.setChangeStreamState(tenant2Conn, true); + +// Open a stream on the test collection, and write a document to it. +const csCursor = tenant1DB.coll.watch(); +assert.commandWorked(tenant1DB.coll.insert({})); +assert.soon(() => csCursor.hasNext()); +const monitoredEvent = csCursor.next(); + +// Write an event to an un-monitored collection for the same tenant. Since this event is written +// into that tenant's change collection, it will cause the PBRT to advance even though that event is +// not relevant to the stream we have opened. When we see a PBRT that is greater than the timestamp +// of the last event (stored in 'monitoredEvent'), we know it must be a synthetic high-water-mark +// token. +// +// Note that the first insert into the un-monitored collection may not be enough to advance the +// PBRT; some passthroughs will group the un-monitored write into a transaction with the monitored +// write, giving them the same timestamp. We put the un-monitored insert into the assert.soon loop, +// so that it will eventually get its own transaction with a new timestamp. +let hwmToken = null; +assert.soon(() => { + assert.commandWorked(tenant1DB.coll2.insert({})); + assert.eq(csCursor.hasNext(), false); + hwmToken = csCursor.getResumeToken(); + assert.neq(undefined, hwmToken); + return bsonWoCompare(hwmToken, monitoredEvent._id) > 0; +}); + +// Open a change stream on tenant 2 so we can observe a write that happens and verify that write +// advanced the global oplog timestamp. +const csCursor2 = tenant2DB.coll.watch(); +let tenant2Event = null; +assert.soon(() => { + assert.commandWorked(tenant2DB.coll.insert({})); + assert.soon(() => csCursor2.hasNext()); + tenant2Event = csCursor2.next(); + return bsonWoCompare(tenant2Event._id, hwmToken) > 0; +}); + +// Try to get a new resume token for tenant 1. We shouldn't be able to get a new resume token +// greater than the last resume token we got. +assert.eq(csCursor.hasNext(), false); +hwmToken2 = csCursor.getResumeToken(); +assert.neq(undefined, hwmToken2); +assert.eq(bsonWoCompare(hwmToken, hwmToken2), 0); + +rst.stopSet(); +})(); diff --git a/jstests/serverless/change_streams/multitenant_pre_image_expired_document_remover.js b/jstests/serverless/change_streams/multitenant_pre_image_expired_document_remover.js index 0ae16779fb612..b6d5cddbb2598 100644 --- a/jstests/serverless/change_streams/multitenant_pre_image_expired_document_remover.js +++ b/jstests/serverless/change_streams/multitenant_pre_image_expired_document_remover.js @@ -4,13 +4,11 @@ * @tags: [requires_fcv_62] */ -(function() { -"use strict"; - // For assertDropAndRecreateCollection. load("jstests/libs/collection_drop_recreate.js"); // For ChangeStreamMultitenantReplicaSetTest. load("jstests/serverless/libs/change_collection_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const getTenantConnection = ChangeStreamMultitenantReplicaSetTest.getTenantConnection; @@ -21,7 +19,12 @@ const kVeryShortPreImageExpirationIntervalSecs = 1; // enabled and run expired pre-image removal job every 'kPreImageRemovalJobSleepSecs' seconds. const rst = new ChangeStreamMultitenantReplicaSetTest({ nodes: 2, - setParameter: {expiredChangeStreamPreImageRemovalJobSleepSecs: kPreImageRemovalJobSleepSecs} + setParameter: { + expiredChangeStreamPreImageRemovalJobSleepSecs: kPreImageRemovalJobSleepSecs, + // If 'UseUnreplicatedTruncatesForDeletions' feature flag is enabled, the test expects + // documents to be removed 1 by 1. + preImagesCollectionTruncateMarkersMinBytes: 1, + } }); // Hard code a tenant ids such that tenants can be identified deterministically. @@ -81,12 +84,13 @@ const stocks = [ // Create the 'stocks' collection on all three tenants. // Enable pre-images collection for 'tenant1' and 'tenant2' but not for 'notUsedTenant'. +const dbName = "mt_pre_image_expired_doc_remover"; const stocksCollTenant1 = assertDropAndRecreateCollection( - connTenant1.getDB(jsTestName()), "stocks", {changeStreamPreAndPostImages: {enabled: true}}); + connTenant1.getDB(dbName), "stocks", {changeStreamPreAndPostImages: {enabled: true}}); const stocksCollTenant2 = assertDropAndRecreateCollection( - connTenant2.getDB(jsTestName()), "stocks", {changeStreamPreAndPostImages: {enabled: true}}); + connTenant2.getDB(dbName), "stocks", {changeStreamPreAndPostImages: {enabled: true}}); const stocksCollNotUsedTenant = - assertDropAndRecreateCollection(connNotUsedTenant.getDB(jsTestName()), "stocks"); + assertDropAndRecreateCollection(connNotUsedTenant.getDB(dbName), "stocks"); // Insert some documents. They should not create pre-images documents. assert.commandWorked(stocksCollTenant1.insertMany(stocks)); @@ -119,10 +123,21 @@ assert.soon(() => (getPreImageCount(connTenant1) === 0), "Expecting 0 pre-images on tenant1, found " + getPreImageCount(connTenant1)); assert.eq(stocks.length, getPreImageCount(connTenant2)); -// Verify that the changes to pre-image collections are replicated correctly. -rst.awaitReplication(); -assert.eq(0, getPreImageCount(connTenant1Secondary)); -assert.eq(stocks.length, getPreImageCount(connTenant2Secondary)); +// Verify the pre-images collections are eventually in sync between the secondary and primary. +if (FeatureFlagUtil.isPresentAndEnabled(connTenant1Secondary.getDB(jsTestName()), + "UseUnreplicatedTruncatesForDeletions")) { + assert.soonNoExcept(() => { + assert.eq(0, getPreImageCount(connTenant1Secondary)); + assert.eq(stocks.length, getPreImageCount(connTenant2Secondary)); + return true; + }); +} else { + // Replicated deletes ensure the secondary and primary will instantly be in sync after awaiting + // replication. + rst.awaitReplication(); + assert.eq(0, getPreImageCount(connTenant1Secondary)); + assert.eq(stocks.length, getPreImageCount(connTenant2Secondary)); +} // Wait long enough for the purging job to finish. The pre-images of 'tenant2' should still not // expire. @@ -134,9 +149,16 @@ setExpireAfterSeconds(connTenant2, kVeryShortPreImageExpirationIntervalSecs); assert.soon(() => (getPreImageCount(connTenant2) === 0), "Expecting 0 pre-images on tenant2, found " + getPreImageCount(connTenant2)); -// Verify that the changes to pre-image collections are replicated correctly. -rst.awaitReplication(); -assert.eq(0, getPreImageCount(connTenant2Secondary)); +// Ensure pre-images are expired on the secondary. +if (FeatureFlagUtil.isPresentAndEnabled(connTenant2Secondary.getDB(jsTestName()), + "UseUnreplicatedTruncatesForDeletions")) { + assert.soonNoExcept(() => { + assert.eq(0, getPreImageCount(connTenant2Secondary)); + return true; + }); +} else { + rst.awaitReplication(); + assert.eq(0, getPreImageCount(connTenant2Secondary)); +} rst.stopSet(); -}()); diff --git a/jstests/serverless/change_streams/multitenant_read_from_change_collection.js b/jstests/serverless/change_streams/multitenant_read_from_change_collection.js index 47872e89d713d..1f8eabbd640f3 100644 --- a/jstests/serverless/change_streams/multitenant_read_from_change_collection.js +++ b/jstests/serverless/change_streams/multitenant_read_from_change_collection.js @@ -67,6 +67,15 @@ assertDropAndRecreateCollection( assert(secondTenantTestDb.getCollectionInfos({name: "stockPrice"})[0] .options.changeStreamPreAndPostImages.enabled); +// Verify that while the change streams are disabled for the tenant, performing update and delete +// operations on a collection with change stream pre- and post-images enabled succeeds. The +// pre-images collection shouldn't be affected either. +replSetTest.setChangeStreamState(firstTenantConn, false); +assert.commandWorked(firstTenantTestDb.stockPrice.insert({_id: "mdb", price: 350})); +assert.commandWorked(firstTenantTestDb.stockPrice.updateOne({_id: "mdb"}, {$set: {price: 450}})); +assert.commandWorked(firstTenantTestDb.stockPrice.deleteOne({_id: "mdb"})); +assert(!firstTenantConn.getDB("config").getCollectionNames().includes("system.preimages")); + // Create a new incarnation of the change collection for the first tenant. replSetTest.setChangeStreamState(firstTenantConn, false); replSetTest.setChangeStreamState(firstTenantConn, true); diff --git a/jstests/serverless/change_streams_cluster_parameter.js b/jstests/serverless/change_streams_cluster_parameter.js index 05e071d7da59e..474f481d1727f 100644 --- a/jstests/serverless/change_streams_cluster_parameter.js +++ b/jstests/serverless/change_streams_cluster_parameter.js @@ -5,6 +5,8 @@ // requires_sharding, // featureFlagServerlessChangeStreams, // requires_fcv_63, +// # TODO SERVER-74811: Re-enable this test. +// __TEMPORARILY_DISABLED__, // ] (function() { "use strict"; diff --git a/jstests/serverless/cluster_parameter_op_observer_serverless.js b/jstests/serverless/cluster_parameter_op_observer_serverless.js index 74d76c1baba98..b227ddba1513d 100644 --- a/jstests/serverless/cluster_parameter_op_observer_serverless.js +++ b/jstests/serverless/cluster_parameter_op_observer_serverless.js @@ -3,7 +3,7 @@ * @tags: [ * does_not_support_stepdowns, * requires_replication, - * requires_fcv_62, + * requires_fcv_71, * serverless * ] */ @@ -15,33 +15,20 @@ load("jstests/serverless/libs/change_collection_util.js"); const getTenantConnection = ChangeStreamMultitenantReplicaSetTest.getTenantConnection; -const kUnknownCSPLogId = 6226300; -const kUnknownCSPLogComponent = 'control'; -const kUnknownCSPLogLevel = 3; const tenantId = ObjectId(); function runTest(conn) { const tenantConn = getTenantConnection(conn.host, tenantId); let i = 0; - const connConfig = conn.getDB('config'); for (let myConn of [conn, tenantConn]) { const myConnConfig = myConn.getDB('config'); - // Using non-tenant connection, check that there's no log message yet and set the log level - // to debug - assert(!checkLog.checkContainsOnceJson(conn, kUnknownCSPLogId, {name: 'foo_' + i})); - const originalLogLevel = - assert - .commandWorked(connConfig.setLogLevel(kUnknownCSPLogLevel, kUnknownCSPLogComponent)) - .was.verbosity; - // With given connection, insert into this tenant's cluster parameter collection - assert.writeOK(myConnConfig.clusterParameters.insert( - {_id: 'foo_' + i, clusterParameterTime: Date(), value: 123})); - - // With non-tenant connection, reset log level and check that the op observer triggered and - // caused a log message about unknown cluster parameter - assert.commandWorked(connConfig.setLogLevel(originalLogLevel, kUnknownCSPLogComponent)); - assert(checkLog.checkContainsOnceJson(conn, kUnknownCSPLogId, {name: 'foo_' + i})); + // With given connection, insert into this tenant's cluster parameter collection. Should + // fail since this is an invalid parameter. + const res = myConnConfig.clusterParameters.insert( + {_id: 'foo_' + i, clusterParameterTime: Date(), value: 123}); + assert(res.hasWriteError()); + assert.neq(res.getWriteError().length, 0); i += 1; } } diff --git a/jstests/serverless/cluster_parameters_initial_sync_restart.js b/jstests/serverless/cluster_parameters_initial_sync_restart.js index 0ec9ff16fe0dc..16cfeb2a75711 100644 --- a/jstests/serverless/cluster_parameters_initial_sync_restart.js +++ b/jstests/serverless/cluster_parameters_initial_sync_restart.js @@ -10,10 +10,11 @@ * serverless * ] */ -(function() { -'use strict'; - -load('jstests/libs/cluster_server_parameter_utils.js'); +import { + runGetClusterParameterNode, + runGetClusterParameterReplicaSet, + runSetClusterParameter, +} from "jstests/libs/cluster_server_parameter_utils.js"; const tenantId = ObjectId(); @@ -125,5 +126,4 @@ for (let syncMethod of ["logical", "fileCopyBased"]) { jsTest.log("Testing cluster restart"); checkClusterParameterRestart(rst); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/serverless/cluster_server_parameter_commands_serverless_replset.js b/jstests/serverless/cluster_server_parameter_commands_serverless_replset.js index b749dd4abe5d1..4dcd4b3c7e82e 100644 --- a/jstests/serverless/cluster_server_parameter_commands_serverless_replset.js +++ b/jstests/serverless/cluster_server_parameter_commands_serverless_replset.js @@ -8,10 +8,11 @@ * serverless * ] */ -(function() { -'use strict'; - -load('jstests/libs/cluster_server_parameter_utils.js'); +import { + setupReplicaSet, + testInvalidClusterParameterCommands, + testValidServerlessClusterParameterCommands, +} from "jstests/libs/cluster_server_parameter_utils.js"; // Tests that set/getClusterParameter works on a non-sharded replica set. const rst = new ReplSetTest({ @@ -38,5 +39,4 @@ for (const tenantId of [undefined, ObjectId()]) { // majority of the nodes in the replica set. testValidServerlessClusterParameterCommands(rst); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/serverless/create_indexes_with_tenant_migration.js b/jstests/serverless/create_indexes_with_tenant_migration.js index a2952dc1659aa..77871aa7fde3c 100644 --- a/jstests/serverless/create_indexes_with_tenant_migration.js +++ b/jstests/serverless/create_indexes_with_tenant_migration.js @@ -5,11 +5,12 @@ * @tags: [requires_fcv_52, serverless] */ +import {ShardedServerlessTest} from "jstests/serverless/libs/sharded_serverless_test.js"; + (function() { "use strict"; load("jstests/libs/fail_point_util.js"); -load("jstests/serverless/serverlesstest.js"); load('jstests/concurrency/fsm_libs/worker_thread.js'); // A function, not a constant, to ensure unique UUIDs. @@ -25,7 +26,7 @@ function donorStartMigrationCmd(tenantID, realConnUrl) { let createIndexesCmd = {createIndexes: "foo", indexes: [{key: {x: 1}, name: "x_1"}]}; -let st = new ServerlessTest(); +let st = new ShardedServerlessTest(); let donor = st.rs0; let recipient = st.rs1; let mongoq = st.q0; diff --git a/jstests/serverless/disabled_cluster_parameters_serverless.js b/jstests/serverless/disabled_cluster_parameters_serverless.js index f59349f7bb6a7..c7a5ac7a6ea09 100644 --- a/jstests/serverless/disabled_cluster_parameters_serverless.js +++ b/jstests/serverless/disabled_cluster_parameters_serverless.js @@ -9,10 +9,10 @@ * serverless * ] */ -(function() { -'use strict'; - -load('jstests/libs/cluster_server_parameter_utils.js'); +import { + setupReplicaSet, + testDisabledClusterParameters +} from "jstests/libs/cluster_server_parameter_utils.js"; // Verifies that test-only parameters are disabled and excluded when enableTestCommands is false. TestData.enableTestCommands = false; @@ -30,5 +30,4 @@ setupReplicaSet(rst); // getClusterParameter: '*' with and without a tenantId. testDisabledClusterParameters(rst); testDisabledClusterParameters(rst, ObjectId()); -rst.stopSet(); -}()); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/serverless/findAndModify_with_tenant_migration.js b/jstests/serverless/findAndModify_with_tenant_migration.js index 2f2c9437f32e1..89fda5624b517 100644 --- a/jstests/serverless/findAndModify_with_tenant_migration.js +++ b/jstests/serverless/findAndModify_with_tenant_migration.js @@ -4,11 +4,12 @@ * @tags: [requires_fcv_52, serverless] */ +import {ShardedServerlessTest} from "jstests/serverless/libs/sharded_serverless_test.js"; + (function() { "use strict"; load("jstests/libs/fail_point_util.js"); -load("jstests/serverless/serverlesstest.js"); load('jstests/concurrency/fsm_libs/worker_thread.js'); function donorStartMigrationCmd(tenantID, realConnUrl) { @@ -25,7 +26,7 @@ const kCollName = 'foo'; let findAndModifyCmd = {findAndModify: kCollName, update: {$set: {y: 1}}, upsert: true}; -let st = new ServerlessTest(); +let st = new ShardedServerlessTest(); let adminDB = st.rs0.getPrimary().getDB('admin'); diff --git a/jstests/serverless/libs/shard_split_test.js b/jstests/serverless/libs/shard_split_test.js index ec1496c721d33..6f4c207a1a8e2 100644 --- a/jstests/serverless/libs/shard_split_test.js +++ b/jstests/serverless/libs/shard_split_test.js @@ -98,8 +98,7 @@ async function runCommitSplitThreadWrapper(rstArgs, tenantIds, recipientTagName, recipientSetName, - retryOnRetryableErrors, - enableDonorStartMigrationFsync) { + retryOnRetryableErrors) { load("jstests/replsets/rslib.js"); const {runShardSplitCommand} = await import("jstests/serverless/libs/shard_split_test.js"); @@ -114,8 +113,7 @@ async function runCommitSplitThreadWrapper(rstArgs, jsTestLog(`Running async split command ${tojson(commitShardSplitCmdObj)}`); - return runShardSplitCommand( - donorRst, commitShardSplitCmdObj, retryOnRetryableErrors, enableDonorStartMigrationFsync); + return runShardSplitCommand(donorRst, commitShardSplitCmdObj, retryOnRetryableErrors); } /* @@ -163,10 +161,8 @@ export function commitSplitAsync({ recipientSetName, migrationId, retryOnRetryableErrors, - enableDonorStartMigrationFsync } = { - retryOnRetryableErrors: false, - enableDonorStartMigrationFsync: false + retryOnRetryableErrors: false }) { jsTestLog("Running commitAsync command"); @@ -179,20 +175,14 @@ export function commitSplitAsync({ tojson(tenantIds), recipientTagName, recipientSetName, - retryOnRetryableErrors, - enableDonorStartMigrationFsync); + retryOnRetryableErrors); thread.start(); return thread; } -export function runShardSplitCommand( - replicaSet, cmdObj, retryOnRetryableErrors, enableDonorStartMigrationFsync) { +export function runShardSplitCommand(replicaSet, cmdObj, retryOnRetryableErrors) { let res; - if (enableDonorStartMigrationFsync) { - replicaSet.awaitLastOpCommitted(); - assert.commandWorked(replicaSet.getPrimary().adminCommand({fsync: 1})); - } assert.soon(() => { try { @@ -248,8 +238,7 @@ class ShardSplitOperation { * Starts a shard split synchronously. */ - commit({retryOnRetryableErrors} = {retryOnRetryableErrors: false}, - {enableDonorStartMigrationFsync} = {enableDonorStartMigrationFsync: false}) { + commit({retryOnRetryableErrors} = {retryOnRetryableErrors: false}) { jsTestLog("Running commit command"); const localCmdObj = { commitShardSplit: 1, @@ -259,18 +248,14 @@ class ShardSplitOperation { recipientSetName: this.recipientSetName }; - return runShardSplitCommand( - this.donorSet, localCmdObj, retryOnRetryableErrors, enableDonorStartMigrationFsync); + return runShardSplitCommand(this.donorSet, localCmdObj, retryOnRetryableErrors); } /** * Starts a shard split asynchronously and returns the Thread that runs it. * @returns the Thread running the commitShardSplit command. */ - commitAsync({retryOnRetryableErrors, enableDonorStartMigrationFsync} = { - retryOnRetryableErrors: false, - enableDonorStartMigrationFsync: false - }) { + commitAsync({retryOnRetryableErrors} = {retryOnRetryableErrors: false}) { return commitSplitAsync({ rst: this.donorSet, tenantIds: this.tenantIds, @@ -278,7 +263,6 @@ class ShardSplitOperation { recipientSetName: this.recipientSetName, migrationId: this.migrationId, retryOnRetryableErrors, - enableDonorStartMigrationFsync }); } @@ -294,10 +278,7 @@ class ShardSplitOperation { const donorSet = createRst(donorRstArgs, true); const cmdObj = {forgetShardSplit: 1, migrationId: this.migrationId}; - assert.commandWorked(runShardSplitCommand(donorSet, - cmdObj, - true /* retryableOnErrors */, - false /*enableDonorStartMigrationFsync*/)); + assert.commandWorked(runShardSplitCommand(donorSet, cmdObj, true /* retryableOnErrors */)); } forgetAsync() { @@ -355,7 +336,6 @@ export class ShardSplitTest { donorRst, nodeOptions, allowStaleReadsOnDonor = false, - initiateWithShortElectionTimeout = false } = {}) { nodeOptions = nodeOptions || {}; if (quickGarbageCollection) { @@ -372,11 +352,7 @@ export class ShardSplitTest { } else { this.donor = new ReplSetTest({name: "donor", nodes: 3, serverless: true, nodeOptions}); this.donor.startSet(); - if (initiateWithShortElectionTimeout) { - this.initiateWithShortElectionTimeout(); - } else { - this.donor.initiate(); - } + this.donor.initiate(); } this.recipientTagName = recipientTagName; @@ -384,13 +360,6 @@ export class ShardSplitTest { this.recipientNodes = []; } - initiateWithShortElectionTimeout() { - let config = this.donor.getReplSetConfig(); - config.settings = config.settings || {}; - config.settings["electionTimeoutMillis"] = 500; - this.donor.initiate(config); - } - /* * Removes and stops the recipient nodes and then stops the donor nodes. * @param {shouldRestart} indicates whether stop() is being called with the intent to call diff --git a/jstests/serverless/libs/sharded_serverless_test.js b/jstests/serverless/libs/sharded_serverless_test.js new file mode 100644 index 0000000000000..028597c37a2a6 --- /dev/null +++ b/jstests/serverless/libs/sharded_serverless_test.js @@ -0,0 +1,91 @@ +/** + * Starts up a cluster with all default configurations required by a serverless test. + * The cluster has a mongoq, a config server with 3 nodes and 2 shards. Each shard has 3 nodes. + * The X509 authentication is disabled in the cluster. + */ +export class ShardedServerlessTest { + constructor() { + let numShards = 2; + + this.stop = () => { + jsTest.log("Going to stop mongoq."); + MongoRunner.stopMongoq(this.q); + + jsTest.log("Going to stop all replica sets."); + for (var i = 0; i < numShards; i++) { + let rs = this["rs" + i]; + rs.stopSet(15); + } + + jsTest.log("Going to stop config server."); + this.configRS.stopSet(); + }; + + jsTest.log("Going to create and start config server."); + this.configRS = + new ReplSetTest({name: "configRS", serverless: true, nodes: 3, useHostName: true}); + this.configRS.startSet({configsvr: '', storageEngine: 'wiredTiger'}); + + jsTest.log("Initiate config server before starting mongoq."); + let replConfig = this.configRS.getReplSetConfig(); + replConfig.configsvr = true; + this.configRS.initiate(replConfig); + + jsTest.log("Going to start mongoq."); + this.q = MongoRunner.runMongoq({configdb: this.configRS.getURL()}); + assert.neq(this.q, null, "Failed to start mongoq"); + + jsTest.log("Going to add replica sets."); + let adminDB = this.q.getDB('admin'); + for (let i = 0; i < numShards; i++) { + let rs = new ReplSetTest({ + name: "testShard-rs-" + i, + nodes: 3, + serverless: true, + nodeOptions: {shardsvr: ""} + }); + rs.startSet({setParameter: {tenantMigrationDisableX509Auth: true}}); + rs.initiate(); + this["rs" + i] = rs; + } + + jsTest.log("Going to create connection with each shard."); + for (let i = 0; i < numShards; i++) { + let rs = this["rs" + i]; + var result = assert.commandWorked(adminDB.runCommand({addShard: rs.getURL()})); + + let rsConn = new Mongo(rs.getURL()); + rsConn.name = rs.getURL(); + rsConn.rs = rs; + rsConn.shardName = result.shardAdded; + this["shard" + i] = rsConn; + } + + this.q0 = this.q; + jsTest.log("ShardedServerlessTest is created."); + } + + /** + * Helper method for setting primary shard of a database and making sure that it was + * successful. Note: first mongoq needs to be up. + */ + ensurePrimaryShard(dbName, shardName) { + var db = this.q.getDB('admin'); + var res = db.adminCommand({movePrimary: dbName, to: shardName}); + assert(res.ok || res.errmsg == "it is already the primary", tojson(res)); + } + + addTenant(tenantId, shardId) { + return assert.commandWorked( + this.configRS.getPrimary() + .getCollection('config.tenants') + .insert({_id: tenantId, shardId: shardId}, {writeConcern: {w: "majority"}})); + } + + removeTenant(tenantId) { + return assert.commandWorked( + this.configRS.getPrimary().getCollection('config.tenants').remove({_id: tenantId}, { + writeConcern: {w: "majority"} + })); + } +} diff --git a/jstests/serverless/multitenancy_rollback_crud_op.js b/jstests/serverless/multitenancy_rollback_crud_op.js new file mode 100644 index 0000000000000..b44f13e19c7b8 --- /dev/null +++ b/jstests/serverless/multitenancy_rollback_crud_op.js @@ -0,0 +1,169 @@ +/* + * Test of a successfull replica set rollback for basic CRUD operations in multitenancy environment + * with featureFlagRequireTenantId. This test is modeled from rollback_crud_ops_sequence.js. + */ +load('jstests/replsets/libs/rollback_test.js'); + +(function() { +"use strict"; + +const kColl = "bar"; +const tenantA = ObjectId(); +const tenantB = ObjectId(); + +const insertDocs = function(db, coll, tenant, documents) { + assert.commandWorked(db.runCommand({insert: coll, documents, '$tenant': tenant})); +}; + +const updateDocs = function(db, coll, tenant, updates) { + assert.commandWorked(db.runCommand({update: coll, updates, '$tenant': tenant})); +}; + +const deleteMany = function(db, coll, tenant, query) { + assert.commandWorked(db.runCommand({ + delete: coll, + deletes: [ + {q: query, limit: 0}, + ], + '$tenant': tenant + + })); +}; + +const validateCounts = function(db, coll, tenant, expect) { + for (let expected of expect) { + let res = db.runCommand({count: coll, query: expected.q, '$tenant': tenant}); + assert.eq(res.n, expected.n); + } +}; + +// Helper function for verifying contents at the end of the test. +const checkFinalResults = function(db) { + validateCounts(db, kColl, tenantA, [ + {q: {q: 70}, n: 0}, + {q: {q: 40}, n: 2}, + {q: {a: 'foo'}, n: 3}, + {q: {q: {$gt: -1}}, n: 6}, + {q: {txt: 'foo'}, n: 1}, + {q: {q: 4}, n: 0} + ]); + + validateCounts(db, kColl, tenantB, [{q: {q: 1}, n: 1}, {q: {q: 40}, n: 0}]); + + let res = db.runCommand({find: kColl, filter: {q: 0}, '$tenant': tenantA}); + assert.eq(res.cursor.firstBatch.length, 1); + assert.eq(res.cursor.firstBatch[0].y, 33); + + res = db.runCommand({find: 'kap', '$tenant': tenantA}); + assert.eq(res.cursor.firstBatch.length, 1); + + res = db.runCommand({find: 'kap2', '$tenant': tenantA}); + assert.eq(res.cursor.firstBatch.length, 0); +}; + +function setFastGetMoreEnabled(node) { + assert.commandWorked( + node.adminCommand({configureFailPoint: 'setSmallOplogGetMoreMaxTimeMS', mode: 'alwaysOn'}), + `Failed to enable setSmallOplogGetMoreMaxTimeMS failpoint.`); +} + +function setUpRst() { + const replSet = new ReplSetTest({ + nodes: 3, + useBridge: true, + nodeOptions: {setParameter: {multitenancySupport: true, featureFlagRequireTenantID: true}} + }); + replSet.startSet(); + replSet.nodes.forEach(setFastGetMoreEnabled); + + let config = replSet.getReplSetConfig(); + config.members[2].priority = 0; + config.settings = {chainingAllowed: false}; + replSet.initiateWithHighElectionTimeout(config); + // Tiebreaker's replication is paused for most of the test, avoid falling off the oplog. + replSet.nodes.forEach((node) => { + assert.commandWorked(node.adminCommand({replSetResizeOplog: 1, minRetentionHours: 2})); + }); + + assert.eq(replSet.nodes.length, + 3, + "Mismatch between number of data bearing nodes and test configuration."); + + return replSet; +} + +const replSet = setUpRst(); +const rollbackTest = new RollbackTest("MultitenancyRollbackTest", replSet); + +const rollbackNode = rollbackTest.getPrimary(); +rollbackNode.setSecondaryOk(); +const syncSource = rollbackTest.getSecondary(); +syncSource.setSecondaryOk(); + +const rollbackNodeDB = rollbackNode.getDB("foo"); +const syncSourceDB = syncSource.getDB("foo"); + +// Insert initial data for both nodes. +insertDocs(rollbackNodeDB, kColl, tenantA, [{q: -2}, {q: 0}, {q: 1, a: "foo"}]); +insertDocs(rollbackNodeDB, kColl, tenantB, [{q: 1}, {q: 40, a: "foo"}]); +insertDocs(rollbackNodeDB, kColl, tenantA, [ + {q: 2, a: "foo", x: 1}, + {q: 3, bb: 9, a: "foo"}, + {q: 40, a: 1}, + {q: 40, a: 2}, + {q: 70, txt: 'willremove'} +]); + +// Testing capped collection. +rollbackNodeDB.createCollection("kap", {'$tenant': tenantA, capped: true, size: 5000}); +insertDocs(rollbackNodeDB, 'kap', tenantA, [{foo: 1}]); +// Going back to empty on capped is a special case and must be tested. +rollbackNodeDB.createCollection("kap2", {'$tenant': tenantA, capped: true, size: 5000}); + +rollbackTest.awaitReplication(); +rollbackTest.transitionToRollbackOperations(); + +// These operations are only done on 'rollbackNode' and should eventually be rolled back. +insertDocs(rollbackNodeDB, kColl, tenantA, [{q: 4}]); +updateDocs(rollbackNodeDB, kColl, tenantA, [ + {q: {q: 3}, u: {q: 3, rb: true}}, +]); +insertDocs(rollbackNodeDB, kColl, tenantB, [{q: 1, foo: 2}]); +deleteMany(rollbackNodeDB, kColl, tenantA, {q: 40}); +updateDocs(rollbackNodeDB, kColl, tenantA, [ + {q: {q: 2}, u: {q: 39, rb: true}}, +]); + +// Rolling back a delete will involve reinserting the item(s). +deleteMany(rollbackNodeDB, kColl, tenantA, {q: 1}); +updateDocs(rollbackNodeDB, kColl, tenantA, [ + {q: {q: 0}, u: {$inc: {y: 1}}}, +]); +insertDocs(rollbackNodeDB, 'kap', tenantA, [{foo: 2}]); +insertDocs(rollbackNodeDB, 'kap2', tenantA, [{foo: 2}]); + +// Create a collection (need to roll back the whole thing). +insertDocs(rollbackNodeDB, 'newcoll', tenantA, [{a: true}]); +// Create a new empty collection (need to roll back the whole thing). +assert.commandWorked(rollbackNodeDB.createCollection("abc", {'$tenant': tenantA})); + +rollbackTest.transitionToSyncSourceOperationsBeforeRollback(); + +// Insert new data into syncSource so that rollbackNode enters rollback when it is reconnected. +// These operations should not be rolled back. +insertDocs(syncSourceDB, kColl, tenantA, [{txt: 'foo'}]); +deleteMany(syncSourceDB, kColl, tenantA, {q: 70}); +updateDocs(syncSourceDB, kColl, tenantA, [ + {q: {q: 0}, u: {$inc: {y: 33}}}, +]); +deleteMany(syncSourceDB, kColl, tenantB, {q: 40}); + +rollbackTest.transitionToSyncSourceOperationsDuringRollback(); +rollbackTest.transitionToSteadyStateOperations(); + +rollbackTest.awaitReplication(); +checkFinalResults(rollbackNodeDB); +checkFinalResults(syncSourceDB); + +rollbackTest.stop(); +}()); diff --git a/jstests/serverless/native_tenant_data_isolation_basic_dollar_tenant.js b/jstests/serverless/native_tenant_data_isolation_basic_dollar_tenant.js index aff88c405b136..a61b2052b6c35 100644 --- a/jstests/serverless/native_tenant_data_isolation_basic_dollar_tenant.js +++ b/jstests/serverless/native_tenant_data_isolation_basic_dollar_tenant.js @@ -1,10 +1,7 @@ // Test basic db operations in multitenancy using $tenant. -(function() { -"use strict"; - load('jstests/aggregation/extras/utils.js'); // For arrayEq() -load("jstests/libs/feature_flag_util.js"); // for isEnabled +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const rst = new ReplSetTest({ nodes: 3, @@ -70,11 +67,14 @@ const testColl = testDb.getCollection(kCollName); // Check that the resulting array of catalog entries contains our target databases and // namespaces. - assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === kCollName))); + assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === kCollName)), + tojson(resultArray)); // Also check that the resulting array contains views specific to our target database. - assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === targetViews))); - assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === viewName))); + assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === targetViews)), + tojson(resultArray)); + assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === viewName)), + tojson(resultArray)); // Get catalog when specifying our target collection, which should only return one result. result = testDb.runCommand({ @@ -87,13 +87,16 @@ const testColl = testDb.getCollection(kCollName); // Check that the resulting array of catalog entries contains our target database and // namespace. - assert(resultArray.length == 1); - assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === kCollName))); + assert.eq(resultArray.length, 1, tojson(resultArray)); + assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === kCollName)), + tojson(resultArray)); // These collections should not be accessed with a different tenant. const collsWithDiffTenant = assert.commandWorked( testDb.runCommand({listCollections: 1, nameOnly: true, '$tenant': kOtherTenant})); - assert.eq(0, collsWithDiffTenant.cursor.firstBatch.length); + assert.eq(0, + collsWithDiffTenant.cursor.firstBatch.length, + tojson(collsWithDiffTenant.cursor.firstBatch)); } // Test listDatabases command. @@ -106,25 +109,25 @@ const testColl = testDb.getCollection(kCollName); const dbs = assert.commandWorked( adminDb.runCommand({listDatabases: 1, nameOnly: true, '$tenant': kTenant})); - assert.eq(2, dbs.databases.length); + assert.eq(2, dbs.databases.length, tojson(dbs)); // The 'admin' database is not expected because we do not create a tenant user in this test. const expectedDbs = featureFlagRequireTenantId ? [kDbName, kOtherDbName] : [kTenant + "_" + kDbName, kTenant + "_" + kOtherDbName]; - assert(arrayEq(expectedDbs, dbs.databases.map(db => db.name))); + assert(arrayEq(expectedDbs, dbs.databases.map(db => db.name)), tojson(dbs)); // These databases should not be accessed with a different tenant. const dbsWithDiffTenant = assert.commandWorked( adminDb.runCommand({listDatabases: 1, nameOnly: true, '$tenant': kOtherTenant})); - assert.eq(0, dbsWithDiffTenant.databases.length); + assert.eq(0, dbsWithDiffTenant.databases.length, tojson(dbsWithDiffTenant)); const allDbs = assert.commandWorked(adminDb.runCommand({listDatabases: 1, nameOnly: true})); expectedDbs.push("admin"); expectedDbs.push("config"); expectedDbs.push("local"); - assert.eq(5, allDbs.databases.length); - assert(arrayEq(expectedDbs, allDbs.databases.map(db => db.name))); + assert.eq(5, allDbs.databases.length, tojson(allDbs)); + assert(arrayEq(expectedDbs, allDbs.databases.map(db => db.name)), tojson(allDbs)); } // Test insert, agg, find, getMore, and explain commands. @@ -174,7 +177,7 @@ const testColl = testDb.getCollection(kCollName); '$tenant': kTenant })); assert.eq(1, aggRes.cursor.firstBatch.length, tojson(aggRes.cursor.firstBatch)); - assert.eq(kTenantDocs[0], aggRes.cursor.firstBatch[0]); + assert.eq(kTenantDocs[0], aggRes.cursor.firstBatch[0], tojson(aggRes.cursor.firstBatch)); const aggRes2 = assert.commandWorked(testDb.runCommand({ aggregate: kCollName, @@ -183,7 +186,7 @@ const testColl = testDb.getCollection(kCollName); '$tenant': kOtherTenant })); assert.eq(1, aggRes2.cursor.firstBatch.length, tojson(aggRes2.cursor.firstBatch)); - assert.eq(kOtherTenantDocs[0], aggRes2.cursor.firstBatch[0]); + assert.eq(kOtherTenantDocs[0], aggRes2.cursor.firstBatch[0], tojson(aggRes2.cursor.firstBatch)); // Test that explain works correctly. const kTenantExplainRes = assert.commandWorked(testDb.runCommand( @@ -204,14 +207,14 @@ const testColl = testDb.getCollection(kCollName); const fad1 = assert.commandWorked(testDb.runCommand( {findAndModify: kCollName, query: {a: 1}, update: {$inc: {a: 10}}, '$tenant': kTenant})); - assert.eq({_id: 0, a: 1, b: 1}, fad1.value); + assert.eq({_id: 0, a: 1, b: 1}, fad1.value, tojson(fad1)); const fad2 = assert.commandWorked(testDb.runCommand({ findAndModify: kCollName, query: {a: 11}, update: {$set: {a: 1, b: 1}}, '$tenant': kTenant })); - assert.eq({_id: 0, a: 11, b: 1}, fad2.value); + assert.eq({_id: 0, a: 11, b: 1}, fad2.value, tojson(fad2)); // This document should not be accessed with a different tenant. const fadOtherUser = assert.commandWorked(testDb.runCommand({ findAndModify: kCollName, @@ -219,7 +222,7 @@ const testColl = testDb.getCollection(kCollName); update: {$inc: {b: 10}}, '$tenant': kOtherTenant })); - assert.eq(null, fadOtherUser.value); + assert.eq(null, fadOtherUser.value, tojson(fadOtherUser)); } // Test count and distinct command. @@ -230,18 +233,18 @@ const testColl = testDb.getCollection(kCollName); // Test count command. const resCount = assert.commandWorked( testDb.runCommand({count: kCollName, query: {c: 1}, '$tenant': kTenant})); - assert.eq(2, resCount.n); + assert.eq(2, resCount.n, tojson(resCount)); const resCountOtherUser = assert.commandWorked( testDb.runCommand({count: kCollName, query: {c: 1}, '$tenant': kOtherTenant})); - assert.eq(0, resCountOtherUser.n); + assert.eq(0, resCountOtherUser.n, tojson(resCountOtherUser)); // Test Distict command. const resDistinct = assert.commandWorked( testDb.runCommand({distinct: kCollName, key: 'd', query: {}, '$tenant': kTenant})); - assert.eq([1, 2], resDistinct.values.sort()); + assert.eq([1, 2], resDistinct.values.sort(), tojson(resDistinct)); const resDistinctOtherUser = assert.commandWorked( testDb.runCommand({distinct: kCollName, key: 'd', query: {}, '$tenant': kOtherTenant})); - assert.eq([], resDistinctOtherUser.values); + assert.eq([], resDistinctOtherUser.values, tojson(resDistinctOtherUser)); } // Test renameCollection command. @@ -258,7 +261,7 @@ const testColl = testDb.getCollection(kCollName); update: {$inc: {a: 10}}, '$tenant': kTenant })); - assert.eq({_id: 0, a: 1, b: 1}, fad1.value); + assert.eq({_id: 0, a: 1, b: 1}, fad1.value, tojson(fad1)); // This collection should not be accessed with a different tenant. assert.commandFailedWithCode( @@ -357,17 +360,18 @@ const testColl = testDb.getCollection(kCollName); indexes: [{key: {a: 1}, name: "indexA"}, {key: {b: 1}, name: "indexB"}], '$tenant': kTenant })); - assert.eq(3, res.numIndexesAfter); + assert.eq(3, res.numIndexesAfter, tojson(res)); res = assert.commandWorked(testDb.runCommand({listIndexes: kCollName, '$tenant': kTenant})); - assert.eq(3, res.cursor.firstBatch.length); + assert.eq(3, res.cursor.firstBatch.length, tojson(res.cursor.firstBatch)); assert(arrayEq( - [ - {key: {"_id": 1}, name: "_id_"}, - {key: {a: 1}, name: "indexA"}, - {key: {b: 1}, name: "indexB"} - ], - getIndexesKeyAndName(res.cursor.firstBatch))); + [ + {key: {"_id": 1}, name: "_id_"}, + {key: {a: 1}, name: "indexA"}, + {key: {b: 1}, name: "indexB"} + ], + getIndexesKeyAndName(res.cursor.firstBatch)), + tojson(res.cursor.firstBatch)); // These indexes should not be accessed with a different tenant. assert.commandFailedWithCode( @@ -383,8 +387,9 @@ const testColl = testDb.getCollection(kCollName); {dropIndexes: kCollName, index: ["indexA", "indexB"], '$tenant': kTenant})); res = assert.commandWorked(testDb.runCommand({listIndexes: kCollName, '$tenant': kTenant})); - assert.eq(1, res.cursor.firstBatch.length); - assert(arrayEq([{key: {"_id": 1}, name: "_id_"}], getIndexesKeyAndName(res.cursor.firstBatch))); + assert.eq(1, res.cursor.firstBatch.length, tojson(res.cursor.firstBatch)); + assert(arrayEq([{key: {"_id": 1}, name: "_id_"}], getIndexesKeyAndName(res.cursor.firstBatch)), + tojson(res.cursor.firstBatch)); } // Test collMod @@ -395,7 +400,7 @@ const testColl = testDb.getCollection(kCollName); indexes: [{key: {c: 1}, name: "indexC", expireAfterSeconds: 50}], '$tenant': kTenant })); - assert.eq(2, res.numIndexesAfter); + assert.eq(2, res.numIndexesAfter, tojson(res)); // Modifying the index without the tenantId should not work. res = testDb.runCommand({ @@ -416,8 +421,8 @@ const testColl = testDb.getCollection(kCollName); "index": {"keyPattern": {c: 1}, expireAfterSeconds: 100}, '$tenant': kTenant })); - assert.eq(50, res.expireAfterSeconds_old); - assert.eq(100, res.expireAfterSeconds_new); + assert.eq(50, res.expireAfterSeconds_old, tojson(res)); + assert.eq(100, res.expireAfterSeconds_new, tojson(res)); // Drop the index created assert.commandWorked( @@ -441,15 +446,15 @@ const testColl = testDb.getCollection(kCollName); // Check applyOp inserted the document. const findRes = assert.commandWorked( testDb.runCommand({find: kCollName, filter: {_id: 5}, '$tenant': kTenant})); - assert.eq(1, findRes.cursor.firstBatch.length); - assert.eq(17, findRes.cursor.firstBatch[0].x); + assert.eq(1, findRes.cursor.firstBatch.length, tojson(findRes.cursor.firstBatch)); + assert.eq(17, findRes.cursor.firstBatch[0].x, tojson(findRes.cursor.firstBatch)); } // Test the validate command. { const validateRes = assert.commandWorked(testDb.runCommand({validate: kCollName, '$tenant': kTenant})); - assert(validateRes.valid); + assert(validateRes.valid, tojson(validateRes)); } // Test dbCheck command. @@ -542,5 +547,68 @@ const testColl = testDb.getCollection(kCollName); 31264); } -rst.stopSet(); -})(); +// Test the fail command failpoint with $tenant. +{ + // We should not pass $tenant in the data field. Here it is passed twice. + assert.commandFailedWithCode(adminDb.runCommand({ + configureFailPoint: "failCommand", + mode: {times: 1}, + '$tenant': kTenant, + data: { + failCommands: ["find"], + namespace: testDb.getName() + "." + kCollName, + '$tenant': kTenant, + } + }), + 7302300); + + // We should not pass $tenant in the data field. + assert.commandFailedWithCode(adminDb.runCommand({ + configureFailPoint: "failCommand", + mode: {times: 1}, + data: { + failCommands: ["find"], + namespace: testDb.getName() + "." + kCollName, + '$tenant': kTenant, + } + }), + 7302300); + + // enable the failCommand failpoint for kTenant on myDb.myColl for the find command. + assert.commandWorked(adminDb.runCommand({ + configureFailPoint: "failCommand", + mode: "alwaysOn", + '$tenant': kTenant, + data: { + errorCode: ErrorCodes.InternalError, + failCommands: ["find"], + namespace: testDb.getName() + "." + kCollName, + } + })); + + // same tenant and same namespace should fail. + assert.commandFailedWithCode(testDb.runCommand({find: kCollName, '$tenant': kTenant}), + ErrorCodes.InternalError); + + // same tenant different namespace. + assert.commandWorked(testDb.runCommand({find: "foo", '$tenant': kTenant})); + + // different tenant passed and same namespace. + assert.commandWorked(testDb.runCommand({find: kCollName, '$tenant': kOtherTenant})); + + // different tenant passed and different namespace. + assert.commandWorked(testDb.runCommand({find: "foo", '$tenant': kOtherTenant})); + + // disable the failCommand failpoint. + assert.commandWorked(adminDb.runCommand({configureFailPoint: "failCommand", mode: "off"})); + assert.commandWorked(testDb.runCommand({find: kCollName, '$tenant': kTenant})); +} + +// Test invalid db name length which is more than 38 chars. +{ + const longDb = primary.getDB("ThisIsADbExceedsTheMaxLengthOfTenantDB38"); + assert.commandFailedWithCode(longDb.createCollection("testColl", {'$tenant': kTenant}), + ErrorCodes.InvalidNamespace); +} + +rst.stopSet(); \ No newline at end of file diff --git a/jstests/serverless/native_tenant_data_isolation_basic_security_token.js b/jstests/serverless/native_tenant_data_isolation_basic_security_token.js index 018ce6062b948..27eba35cfa394 100644 --- a/jstests/serverless/native_tenant_data_isolation_basic_security_token.js +++ b/jstests/serverless/native_tenant_data_isolation_basic_security_token.js @@ -1,24 +1,11 @@ // Test basic db operations in multitenancy using a securityToken. -(function() { -"use strict"; - load('jstests/aggregation/extras/utils.js'); // For arrayEq() -load("jstests/libs/feature_flag_util.js"); // for isEnabled - -function checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollectionName, nsField) { - if (featureFlagRequireTenantId) { - // This case represents the upgraded state where we will not include the tenantId as the - // db prefix. - const nss = kDbName + (kCollectionName == "" ? "" : "." + kCollectionName); - assert.eq(nsField, nss); - } else { - // This case represents the downgraded state where we will continue to prefix namespaces. - const prefixedNss = - kTenant + "_" + kDbName + (kCollectionName == "" ? "" : "." + kCollectionName); - assert.eq(nsField, prefixedNss); - } +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + +function checkNsSerializedCorrectly(kDbName, kCollectionName, nsField) { + const nss = kDbName + (kCollectionName == "" ? "" : "." + kCollectionName); + assert.eq(nsField, nss); } const rst = new ReplSetTest({ @@ -88,13 +75,11 @@ const tokenDB = tokenConn.getDB(kDbName); const findRes = assert.commandWorked( tokenDB.runCommand({find: kCollName, filter: {a: 1}, batchSize: 1})); assert(arrayEq([{_id: 0, a: 1, b: 1}], findRes.cursor.firstBatch), tojson(findRes)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollName, findRes.cursor.ns); + checkNsSerializedCorrectly(kDbName, kCollName, findRes.cursor.ns); const getMoreRes = assert.commandWorked( tokenDB.runCommand({getMore: findRes.cursor.id, collection: kCollName})); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollName, getMoreRes.cursor.ns); + checkNsSerializedCorrectly(kDbName, kCollName, getMoreRes.cursor.ns); } // Test the aggregate command. @@ -102,18 +87,17 @@ const tokenDB = tokenConn.getDB(kDbName); const aggRes = assert.commandWorked( tokenDB.runCommand({aggregate: kCollName, pipeline: [{$match: {a: 1}}], cursor: {}})); assert(arrayEq([{_id: 0, a: 1, b: 1}], aggRes.cursor.firstBatch), tojson(aggRes)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollName, aggRes.cursor.ns); + checkNsSerializedCorrectly(kDbName, kCollName, aggRes.cursor.ns); } // Find and modify the document. { const fad1 = assert.commandWorked( tokenDB.runCommand({findAndModify: kCollName, query: {a: 1}, update: {$inc: {a: 10}}})); - assert.eq({_id: 0, a: 1, b: 1}, fad1.value); + assert.eq({_id: 0, a: 1, b: 1}, fad1.value, tojson(fad1)); const fad2 = assert.commandWorked(tokenDB.runCommand( {findAndModify: kCollName, query: {a: 11}, update: {$set: {a: 1, b: 1}}})); - assert.eq({_id: 0, a: 11, b: 1}, fad2.value); + assert.eq({_id: 0, a: 11, b: 1}, fad2.value, tojson(fad2)); } // Create a view on the collection, and check that listCollections sees the original @@ -135,8 +119,7 @@ const tokenDB = tokenConn.getDB(kDbName); {"name": viewName, "type": "view"} ]; assert(arrayEq(expectedColls, colls.cursor.firstBatch), tojson(colls.cursor.firstBatch)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, "$cmd.listCollections", colls.cursor.ns); + checkNsSerializedCorrectly(kDbName, "$cmd.listCollections", colls.cursor.ns); const prefixedDbName = kTenant + '_' + tokenDB.getName(); const targetDb = featureFlagRequireTenantId ? tokenDB.getName() : prefixedDbName; @@ -150,12 +133,14 @@ const tokenDB = tokenConn.getDB(kDbName); // Check that the resulting array of catalog entries contains our target databases and // namespaces. - assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === kCollName))); + assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === kCollName)), + tojson(resultArray)); // Also check that the resulting array contains views specific to our target database. - assert( - resultArray.some((entry) => (entry.db === targetDb) && (entry.name === targetViews))); - assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === viewName))); + assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === targetViews)), + tojson(resultArray)); + assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === viewName)), + tojson(resultArray)); // Get catalog when specifying our target collection, which should only return one // result. @@ -165,16 +150,16 @@ const tokenDB = tokenConn.getDB(kDbName); // Check that the resulting array of catalog entries contains our target databases and // namespaces. - assert(resultArray.length == 1); - assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === kCollName))); + assert.eq(resultArray.length, 1, tojson(resultArray)); + assert(resultArray.some((entry) => (entry.db === targetDb) && (entry.name === kCollName)), + tojson(resultArray)); } // Test explain command with find { const cmdRes = tokenDB.runCommand({explain: {find: kCollName, filter: {a: 1}}}); assert.eq(1, cmdRes.executionStats.nReturned, tojson(cmdRes)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollName, cmdRes.queryPlanner.namespace); + checkNsSerializedCorrectly(kDbName, kCollName, cmdRes.queryPlanner.namespace); } // Test count and distinct command. @@ -184,11 +169,11 @@ const tokenDB = tokenConn.getDB(kDbName); const resCount = assert.commandWorked(tokenDB.runCommand({count: kCollName, query: {c: 1}})); - assert.eq(2, resCount.n); + assert.eq(2, resCount.n, tojson(resCount)); - const resDitinct = + const resDistinct = assert.commandWorked(tokenDB.runCommand({distinct: kCollName, key: 'd', query: {}})); - assert.eq([1, 2], resDitinct.values.sort()); + assert.eq([1, 2], resDistinct.values.sort(), tojson(resDistinct)); } // Rename the collection. @@ -202,7 +187,7 @@ const tokenDB = tokenConn.getDB(kDbName); // Verify the the renamed collection by findAndModify existing documents. const fad1 = assert.commandWorked(tokenDB.runCommand( {findAndModify: kCollName + "_renamed", query: {a: 1}, update: {$set: {a: 11, b: 1}}})); - assert.eq({_id: 0, a: 1, b: 1}, fad1.value); + assert.eq({_id: 0, a: 1, b: 1}, fad1.value, tojson(fad1)); // Reset the collection name and document data. assert.commandWorked( @@ -220,17 +205,17 @@ const tokenDB = tokenConn.getDB(kDbName); const tokenAdminDB = tokenConn.getDB('admin'); const dbs = assert.commandWorked(tokenAdminDB.runCommand({listDatabases: 1, nameOnly: true})); - assert.eq(3, dbs.databases.length); + assert.eq(3, dbs.databases.length, tojson(dbs)); const expectedDbs = featureFlagRequireTenantId ? ["admin", kDbName, kOtherDbName] : [kTenant + "_admin", kTenant + "_" + kDbName, kTenant + "_" + kOtherDbName]; - assert(arrayEq(expectedDbs, dbs.databases.map(db => db.name))); + assert(arrayEq(expectedDbs, dbs.databases.map(db => db.name)), tojson(dbs)); } { // Test the collStats command. let res = assert.commandWorked(tokenDB.runCommand({collStats: kCollName})); - checkNsSerializedCorrectly(featureFlagRequireTenantId, kTenant, kDbName, kCollName, res.ns); + checkNsSerializedCorrectly(kDbName, kCollName, res.ns); // perform the same test on a timeseries collection const timeFieldName = "time"; @@ -238,12 +223,8 @@ const tokenDB = tokenConn.getDB(kDbName); assert.commandWorked( tokenDB.createCollection(tsColl, {timeseries: {timeField: timeFieldName}})); res = assert.commandWorked(tokenDB.runCommand({collStats: tsColl})); - checkNsSerializedCorrectly(featureFlagRequireTenantId, kTenant, kDbName, tsColl, res.ns); - checkNsSerializedCorrectly(featureFlagRequireTenantId, - kTenant, - kDbName, - 'system.buckets.' + tsColl, - res.timeseries.bucketsNs); + checkNsSerializedCorrectly(kDbName, tsColl, res.ns); + checkNsSerializedCorrectly(kDbName, 'system.buckets.' + tsColl, res.timeseries.bucketsNs); } // Drop the collection, and then the database. Check that listCollections no longer returns @@ -251,8 +232,7 @@ const tokenDB = tokenConn.getDB(kDbName); { // Drop the collection, and check that the "ns" returned is serialized correctly. const dropRes = assert.commandWorked(tokenDB.runCommand({drop: kCollName})); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollName, dropRes.ns); + checkNsSerializedCorrectly(kDbName, kCollName, dropRes.ns); const collsAfterDropColl = assert.commandWorked( tokenDB.runCommand({listCollections: 1, nameOnly: true, filter: {name: kCollName}})); @@ -307,28 +287,29 @@ const tokenDB = tokenConn.getDB(kDbName); createIndexes: kCollName, indexes: [{key: {a: 1}, name: "indexA"}, {key: {b: 1}, name: "indexB"}] })); - assert.eq(3, res.numIndexesAfter); + assert.eq(3, res.numIndexesAfter, tojson(res)); res = assert.commandWorked(tokenDB.runCommand({listIndexes: kCollName})); - assert.eq(3, res.cursor.firstBatch.length); + assert.eq(3, res.cursor.firstBatch.length, tojson(res.cursor.firstBatch)); assert(arrayEq( - [ - {key: {"_id": 1}, name: "_id_"}, - {key: {a: 1}, name: "indexA"}, - {key: {b: 1}, name: "indexB"} - ], - getIndexesKeyAndName(res.cursor.firstBatch))); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollName, res.cursor.ns); + [ + {key: {"_id": 1}, name: "_id_"}, + {key: {a: 1}, name: "indexA"}, + {key: {b: 1}, name: "indexB"} + ], + getIndexesKeyAndName(res.cursor.firstBatch)), + tojson(res.cursor.firstBatch)); + checkNsSerializedCorrectly(kDbName, kCollName, res.cursor.ns); // Drop those new created indexes. res = assert.commandWorked( tokenDB.runCommand({dropIndexes: kCollName, index: ["indexA", "indexB"]})); res = assert.commandWorked(tokenDB.runCommand({listIndexes: kCollName})); - assert.eq(1, res.cursor.firstBatch.length); - assert(arrayEq([{key: {"_id": 1}, name: "_id_"}], - getIndexesKeyAndName(res.cursor.firstBatch))); + assert.eq(1, res.cursor.firstBatch.length, tojson(res.cursor.firstBatch)); + assert( + arrayEq([{key: {"_id": 1}, name: "_id_"}], getIndexesKeyAndName(res.cursor.firstBatch)), + tojson(res.cursor.firstBatch)); } // Test aggregation stage commands @@ -384,9 +365,9 @@ const tokenDB = tokenConn.getDB(kDbName); cursor: {} })); - assert(arrayEq(graphLookupTarget, graphLookupRes.cursor.firstBatch)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollA, graphLookupRes.cursor.ns); + assert(arrayEq(graphLookupTarget, graphLookupRes.cursor.firstBatch), + tojson(graphLookupRes.cursor.firstBatch)); + checkNsSerializedCorrectly(kDbName, kCollA, graphLookupRes.cursor.ns); } // $out agg stage using string input for collection name @@ -394,16 +375,15 @@ const tokenDB = tokenConn.getDB(kDbName); { const outStrRes = assert.commandWorked(tokenDB.runCommand( {aggregate: kCollA, pipeline: [graphLookupStage, outStageStr], cursor: {}})); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollA, outStrRes.cursor.ns); + checkNsSerializedCorrectly(kDbName, kCollA, outStrRes.cursor.ns); // Because we're using the same graphLookup stage from the first test, we should see the // exact same results but stored in kCollC let projectRes = assert.commandWorked(tokenDB.runCommand( {aggregate: kCollC, pipeline: [{$project: {_id: 1, connections: 1}}], cursor: {}})); - assert(arrayEq(graphLookupTarget, projectRes.cursor.firstBatch)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollC, projectRes.cursor.ns); + assert(arrayEq(graphLookupTarget, projectRes.cursor.firstBatch), + tojson(projectRes.cursor.firstBatch)); + checkNsSerializedCorrectly(kDbName, kCollC, projectRes.cursor.ns); assert.commandWorked(tokenDB.runCommand({drop: kCollC})); } @@ -412,16 +392,15 @@ const tokenDB = tokenConn.getDB(kDbName); { const outObjRes = assert.commandWorked(tokenDB.runCommand( {aggregate: kCollA, pipeline: [graphLookupStage, outStageObj], cursor: {}})); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollA, outObjRes.cursor.ns); + checkNsSerializedCorrectly(kDbName, kCollA, outObjRes.cursor.ns); // Because we're using the same graphLookup stage from the first test, we should see the // exact same results but stored in kCollD let projectRes = assert.commandWorked(tokenDB.runCommand( {aggregate: kCollD, pipeline: [{$project: {_id: 1, connections: 1}}], cursor: {}})); - assert(arrayEq(graphLookupTarget, projectRes.cursor.firstBatch)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollD, projectRes.cursor.ns); + assert(arrayEq(graphLookupTarget, projectRes.cursor.firstBatch), + tojson(projectRes.cursor.firstBatch)); + checkNsSerializedCorrectly(kDbName, kCollD, projectRes.cursor.ns); assert.commandWorked(tokenDB.runCommand({drop: kCollD})); } @@ -437,9 +416,9 @@ const tokenDB = tokenConn.getDB(kDbName); cursor: {} })); - assert(arrayEq(lookupTarget, lookupPipelineRes.cursor.firstBatch)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollA, lookupPipelineRes.cursor.ns); + assert(arrayEq(lookupTarget, lookupPipelineRes.cursor.firstBatch), + tojson(lookupPipelineRes.cursor.firstBatch)); + checkNsSerializedCorrectly(kDbName, kCollA, lookupPipelineRes.cursor.ns); } // $merge agg stage @@ -447,14 +426,13 @@ const tokenDB = tokenConn.getDB(kDbName); { const mergeRes = assert.commandWorked( tokenDB.runCommand({aggregate: kCollA, pipeline: [mergeStage], cursor: {}})); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollA, mergeRes.cursor.ns); + checkNsSerializedCorrectly(kDbName, kCollA, mergeRes.cursor.ns); // Merging kCollA into a new collection kCollD should give us matching contents let findRes = assert.commandWorked(tokenDB.runCommand({find: kCollD})); - assert(arrayEq(collADocs, findRes.cursor.firstBatch)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollD, findRes.cursor.ns); + assert(arrayEq(collADocs, findRes.cursor.firstBatch), + tojson(findRes.cursor.firstBatch)); + checkNsSerializedCorrectly(kDbName, kCollD, findRes.cursor.ns); assert.commandWorked(tokenDB.runCommand({drop: kCollD})); } @@ -464,9 +442,9 @@ const tokenDB = tokenConn.getDB(kDbName); const unionWithRes = assert.commandWorked( tokenDB.runCommand({aggregate: kCollA, pipeline: [unionWithStage], cursor: {}})); - assert(arrayEq(collADocs.concat(collBDocs), unionWithRes.cursor.firstBatch)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollA, unionWithRes.cursor.ns); + assert(arrayEq(collADocs.concat(collBDocs), unionWithRes.cursor.firstBatch), + tojson(unionWithRes.cursor.firstBatch)); + checkNsSerializedCorrectly(kDbName, kCollA, unionWithRes.cursor.ns); } // $collStats agg stage @@ -477,33 +455,28 @@ const tokenDB = tokenConn.getDB(kDbName); const collStatsRes = assert.commandWorked( tokenDB.runCommand({aggregate: kCollD, pipeline: [collStatsStage], cursor: {}})); - assert.eq(1, collStatsRes.cursor.firstBatch.length); + assert.eq( + 1, collStatsRes.cursor.firstBatch.length, tojson(collStatsRes.cursor.firstBatch)); - checkNsSerializedCorrectly(featureFlagRequireTenantId, - kTenant, - kDbName, - kCollD, - collStatsRes.cursor.firstBatch[0].ns); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollD, collStatsRes.cursor.ns); + checkNsSerializedCorrectly(kDbName, kCollD, collStatsRes.cursor.firstBatch[0].ns); + checkNsSerializedCorrectly(kDbName, kCollD, collStatsRes.cursor.ns); let stats = collStatsRes.cursor.firstBatch[0]; - assert('latencyStats' in collStatsRes.cursor.firstBatch[0]); - assert(stats.latencyStats.writes.ops == 1); - assert(stats.latencyStats.reads.ops == 1); - assert(stats.latencyStats.commands.ops == 0); - assert(stats.latencyStats.transactions.ops == 0); + assert('latencyStats' in stats, tojson(stats)); + assert.eq(stats.latencyStats.writes.ops, 1, tojson(stats)); + assert.eq(stats.latencyStats.reads.ops, 1, tojson(stats)); + assert.eq(stats.latencyStats.commands.ops, 0, tojson(stats)); + assert.eq(stats.latencyStats.transactions.ops, 0, tojson(stats)); // Also check the next() cursor results. const collStatsResNext = tokenDB.collD.aggregate(collStatsStage).next(); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollD, collStatsResNext.ns); + checkNsSerializedCorrectly(kDbName, kCollD, collStatsResNext.ns); - assert('latencyStats' in collStatsResNext); - assert(collStatsResNext.latencyStats.writes.ops == 1); - assert(collStatsResNext.latencyStats.reads.ops == 2); - assert(collStatsResNext.latencyStats.commands.ops == 0); - assert(collStatsResNext.latencyStats.transactions.ops == 0); + assert('latencyStats' in collStatsResNext, tojson(collStatsResNext)); + assert.eq(collStatsResNext.latencyStats.writes.ops, 1, tojson(collStatsResNext)); + assert.eq(collStatsResNext.latencyStats.reads.ops, 2, tojson(collStatsResNext)); + assert.eq(collStatsResNext.latencyStats.commands.ops, 0, tojson(collStatsResNext)); + assert.eq(collStatsResNext.latencyStats.transactions.ops, 0, tojson(collStatsResNext)); assert.commandWorked(tokenDB.runCommand({drop: kCollD})); } @@ -516,9 +489,8 @@ const tokenDB = tokenConn.getDB(kDbName); // Test the validate command. { const validateRes = assert.commandWorked(tokenDB.runCommand({validate: kCollName})); - assert(validateRes.valid); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollName, validateRes.ns); + assert(validateRes.valid, tojson(validateRes)); + checkNsSerializedCorrectly(kDbName, kCollName, validateRes.ns); } } @@ -549,15 +521,15 @@ const tokenDB = tokenConn.getDB(kDbName); const fadOtherUser = assert.commandWorked( tokenDB2.runCommand({findAndModify: kCollName, query: {b: 1}, update: {$inc: {b: 10}}})); - assert.eq(null, fadOtherUser.value); + assert.eq(null, fadOtherUser.value, tojson(fadOtherUser)); const countOtherUser = assert.commandWorked(tokenDB2.runCommand({count: kCollName, query: {c: 1}})); - assert.eq(0, countOtherUser.n); + assert.eq(0, countOtherUser.n, tojson(countOtherUser)); const distinctOtherUer = assert.commandWorked(tokenDB2.runCommand({distinct: kCollName, key: 'd', query: {}})); - assert.eq([], distinctOtherUer.values); + assert.eq([], distinctOtherUer.values, tojson(distinctOtherUer)); const fromName = kDbName + '.' + kCollName; const toName = fromName + "_renamed"; @@ -579,9 +551,10 @@ const tokenDB = tokenConn.getDB(kDbName); const dbsWithDiffToken = assert.commandWorked( tokenConn.getDB('admin').runCommand({listDatabases: 1, nameOnly: true})); // Only the 'admin' db exists - assert.eq(1, dbsWithDiffToken.databases.length); + assert.eq(1, dbsWithDiffToken.databases.length, tojson(dbsWithDiffToken)); const expectedAdminDb = featureFlagRequireTenantId ? "admin" : kOtherTenant + "_admin"; - assert(arrayEq([expectedAdminDb], dbsWithDiffToken.databases.map(db => db.name))); + assert(arrayEq([expectedAdminDb], dbsWithDiffToken.databases.map(db => db.name)), + tojson(dbsWithDiffToken)); // Attempt to drop the database, then check it was not dropped. assert.commandWorked(tokenDB2.runCommand({dropDatabase: 1})); @@ -639,9 +612,9 @@ const tokenDB = tokenConn.getDB(kDbName); pipeline: [lookupPlannerStage, {$project: {_id: 1, refs: 1}}], cursor: {} })); - assert(arrayEq(lookupTarget, lookupPlannerRes.cursor.firstBatch)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenant, kDbName, kCollA, lookupPlannerRes.cursor.ns); + assert(arrayEq(lookupTarget, lookupPlannerRes.cursor.firstBatch), + tojson(lookupPlannerRes.cursor.firstBatch)); + checkNsSerializedCorrectly(kDbName, kCollA, lookupPlannerRes.cursor.ns); } } @@ -661,7 +634,8 @@ const tokenDB = tokenConn.getDB(kDbName); update: {$inc: {b: 10}}, '$tenant': kTenant })); - assert.eq({_id: 0, a: 1, b: 1}, fadCorrectDollarTenant.value); + assert.eq( + {_id: 0, a: 1, b: 1}, fadCorrectDollarTenant.value, tojson(fadCorrectDollarTenant)); const fadOtherDollarTenant = assert.commandWorked(privelegedDB.runCommand({ findAndModify: kCollName, @@ -669,7 +643,7 @@ const tokenDB = tokenConn.getDB(kDbName); update: {$inc: {b: 10}}, '$tenant': kOtherTenant })); - assert.eq(null, fadOtherDollarTenant.value); + assert.eq(null, fadOtherDollarTenant.value, tojson(fadOtherDollarTenant)); // Reset document data. assert.commandWorked(privelegedDB.runCommand({ @@ -695,7 +669,7 @@ const tokenDB = tokenConn.getDB(kDbName); update: {$set: {a: 11, b: 1}}, '$tenant': kTenant })); - assert.eq({_id: 0, a: 1, b: 1}, fad1.value); + assert.eq({_id: 0, a: 1, b: 1}, fad1.value, tojson(fad1)); // Reset the collection name and document data. assert.commandWorked(privelegedAdminDB.runCommand( @@ -716,14 +690,14 @@ const tokenDB = tokenConn.getDB(kDbName); createIndexes: kCollName, indexes: [{key: {c: 1}, name: "indexC", expireAfterSeconds: 50}] })); - assert.eq(2, res.numIndexesAfter); + assert.eq(2, res.numIndexesAfter, tojson(res)); jsTestLog(`Created index`); // Modify the index with the tenantId res = assert.commandWorked(tokenDB.runCommand( {"collMod": kCollName, "index": {"keyPattern": {c: 1}, expireAfterSeconds: 100}})); - assert.eq(50, res.expireAfterSeconds_old); - assert.eq(100, res.expireAfterSeconds_new); + assert.eq(50, res.expireAfterSeconds_old, tojson(res)); + assert.eq(100, res.expireAfterSeconds_new, tojson(res)); // Drop the index created assert.commandWorked(tokenDB.runCommand({dropIndexes: kCollName, index: ["indexC"]})); @@ -733,5 +707,4 @@ const tokenDB = tokenConn.getDB(kDbName); // This should fail since dbCheck is not supporting using a security token. { assert.commandFailedWithCode(tokenDB.runCommand({dbCheck: kCollName}), ErrorCodes.Unauthorized); } -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/serverless/native_tenant_data_isolation_concurrent_lock_ops.js b/jstests/serverless/native_tenant_data_isolation_concurrent_lock_ops.js index e17bcfb06151a..778046101886c 100644 --- a/jstests/serverless/native_tenant_data_isolation_concurrent_lock_ops.js +++ b/jstests/serverless/native_tenant_data_isolation_concurrent_lock_ops.js @@ -32,7 +32,6 @@ const kOtherTenant = ObjectId(); const kDbName = 'myDb'; const kCollName = 'myColl'; const testDb = primary.getDB(kDbName); -const testColl = testDb.getCollection(kCollName); /** * Configure a failpoint which will block two threads that will be holding locks and check @@ -87,8 +86,8 @@ function waitForLock(nss, resource, expectedLockMode) { "index": {"keyPattern": {c: 1}, expireAfterSeconds: 100}, '$tenant': eval(tenantId) })); - assert.eq(50, res.expireAfterSeconds_old); - assert.eq(100, res.expireAfterSeconds_new); + assert.eq(50, res.expireAfterSeconds_old, tojson(res)); + assert.eq(100, res.expireAfterSeconds_new, tojson(res)); } assert.commandWorked(testDb.createCollection(kCollName, {'$tenant': kTenant})); @@ -100,14 +99,14 @@ function waitForLock(nss, resource, expectedLockMode) { indexes: [{key: {c: 1}, name: "indexA", expireAfterSeconds: 50}], '$tenant': kTenant })); - assert.eq(2, res.numIndexesAfter); + assert.eq(2, res.numIndexesAfter, tojson(res)); res = assert.commandWorked(testDb.runCommand({ createIndexes: kCollName, indexes: [{key: {c: 1}, name: "indexA", expireAfterSeconds: 50}], '$tenant': kOtherTenant })); - assert.eq(2, res.numIndexesAfter); + assert.eq(2, res.numIndexesAfter, tojson(res)); checkConcurrentLockDifferentTenant(primary, kTenant, @@ -208,7 +207,7 @@ function waitForLock(nss, resource, expectedLockMode) { indexes: [{key: {a: 1}, name: "indexA"}, {key: {b: 1}, name: "indexB"}], '$tenant': eval(tenantId) })); - assert.eq(3, res.numIndexesAfter); + assert.eq(3, res.numIndexesAfter, tojson(res)); } assert.commandWorked(testDb.createCollection(kCollName, {'$tenant': kTenant})); @@ -240,7 +239,7 @@ function waitForLock(nss, resource, expectedLockMode) { const validateRes = assert.commandWorked( db.getDB(dbName).runCommand({validate: collName, '$tenant': eval(tenantId)})); - assert(validateRes.valid); + assert(validateRes.valid, tojson(validateRes)); } assert.commandWorked(testDb.createCollection(kCollName, {'$tenant': kTenant})); diff --git a/jstests/serverless/native_tenant_data_isolation_curr_op.js b/jstests/serverless/native_tenant_data_isolation_curr_op.js index 1d3270a7d44d0..cab8298f35d38 100644 --- a/jstests/serverless/native_tenant_data_isolation_curr_op.js +++ b/jstests/serverless/native_tenant_data_isolation_curr_op.js @@ -1,12 +1,9 @@ // Test that currentOp works as expected in a multitenant environment. // @tags: [requires_fcv_62] -(function() { -"use strict"; - load('jstests/aggregation/extras/utils.js'); // For arrayEq() load("jstests/libs/fail_point_util.js"); // For configureFailPoint() load("jstests/libs/parallel_shell_helpers.js"); // For funWithArgs() -load("jstests/libs/feature_flag_util.js"); // for isEnabled +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const kTenant = ObjectId(); const kOtherTenant = ObjectId(); @@ -15,8 +12,7 @@ const kNewCollectionName = "currOpColl"; // Check for the 'insert' op(s) in the currOp output for 'tenantId' when issuing '$currentOp' in // aggregation pipeline with a security token. -function assertCurrentOpAggOutputToken( - tokenConn, tenantId, dbName, expectedBatchSize, featureFlagRequireTenantId) { +function assertCurrentOpAggOutputToken(tokenConn, dbName, expectedBatchSize) { // Security token users are not allowed to pass "allUsers: true" because it requires having the // "inprog" action type, which is only available to the "clusterMonitor" role. Security token // users should not be allowed this role. @@ -26,14 +22,12 @@ function assertCurrentOpAggOutputToken( cursor: {} }); assert.eq(res.cursor.firstBatch.length, expectedBatchSize, tojson(res)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, tenantId, dbName, kNewCollectionName, res.cursor.firstBatch); + checkNsSerializedCorrectly(dbName, kNewCollectionName, res.cursor.firstBatch); } // Check for the 'insert' op(s) in the currOp output for 'tenantId' when issuing '$currentOp' in // aggregation pipeline and passing '$tenant' to it. -function assertCurrentOpAggOutputDollarTenant( - rootConn, tenantId, dbName, expectedBatchSize, featureFlagRequireTenantId) { +function assertCurrentOpAggOutputDollarTenant(rootConn, tenantId, dbName, expectedBatchSize) { // We pass "allUsers: true" in order to see ops run by other users, including the security token // user. Passing $tenant will filter for only ops which belong to this tenant. const res = rootConn.runCommand({ @@ -43,19 +37,16 @@ function assertCurrentOpAggOutputDollarTenant( '$tenant': tenantId }); assert.eq(res.cursor.firstBatch.length, expectedBatchSize, tojson(res)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, tenantId, dbName, kNewCollectionName, res.cursor.firstBatch); + checkNsSerializedCorrectly(dbName, kNewCollectionName, res.cursor.firstBatch); } // Check for the 'insert' op(s) in the currOp output for 'tenantId' when issuing the currentOp // command with a security token. -function assertCurrentOpCommandOutputToken( - tokenConn, tenantId, dbName, expectedBatchSize, featureFlagRequireTenantId) { +function assertCurrentOpCommandOutputToken(tokenConn, dbName, expectedBatchSize) { const res = tokenConn.getDB("admin").runCommand( {currentOp: 1, $ownOps: true, $all: true, op: "insert"}); assert.eq(res.inprog.length, expectedBatchSize, tojson(res)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, tenantId, dbName, kNewCollectionName, res.inprog); + checkNsSerializedCorrectly(dbName, kNewCollectionName, res.inprog); res.inprog.forEach(op => { assert.eq(op.command.insert, kNewCollectionName); }); @@ -63,33 +54,20 @@ function assertCurrentOpCommandOutputToken( // Check for the 'insert' op in the currOp output for 'tenantId' when issuing the currentOp // command with $tenant. -function assertCurrentOpCommandOutputDollarTenant( - rootConn, tenantId, dbName, expectedBatchSize, featureFlagRequireTenantId) { +function assertCurrentOpCommandOutputDollarTenant(rootConn, tenantId, dbName, expectedBatchSize) { const res = rootConn.runCommand( {currentOp: 1, $ownOps: false, $all: true, op: "insert", '$tenant': tenantId}); assert.eq(res.inprog.length, expectedBatchSize, tojson(res)); - checkNsSerializedCorrectly( - featureFlagRequireTenantId, tenantId, dbName, kNewCollectionName, res.inprog); + checkNsSerializedCorrectly(dbName, kNewCollectionName, res.inprog); res.inprog.forEach(op => { assert.eq(op.command.insert, kNewCollectionName); }); } -function checkNsSerializedCorrectly( - featureFlagRequireTenantId, kTenantId, dbName, collectionName, cursorRes) { +function checkNsSerializedCorrectly(dbName, collectionName, cursorRes) { cursorRes.forEach(op => { - if (featureFlagRequireTenantId) { - // This case represents the upgraded state where we will not include the tenantId as the - // db prefix. - assert.eq(op.ns, dbName + "." + collectionName); - assert.eq(op.command.$db, dbName); - } else { - // This case represents the downgraded state where we will continue to prefix - // namespaces. - const prefixedDb = kTenant + "_" + kDbName; - assert.eq(op.ns, prefixedDb + "." + collectionName); - assert.eq(op.command.$db, kTenantId + "_" + dbName); - } + assert.eq(op.ns, dbName + "." + collectionName); + assert.eq(op.command.$db, dbName); }); } @@ -153,29 +131,22 @@ tokenConn._setSecurityToken(securityToken); // Check that the 'insert' op shows up in the currOp output for 'kTenant' when issuing // '$currentOp' in aggregation pipeline using both a security token and $tenant. - assertCurrentOpAggOutputToken( - tokenConn, kTenant, kDbName, 1 /* expectedBatchSize */, featureFlagRequireTenantId); - assertCurrentOpAggOutputDollarTenant( - adminDb, kTenant, kDbName, 1 /* expectedBatchSize */, featureFlagRequireTenantId); + assertCurrentOpAggOutputToken(tokenConn, kDbName, 1 /* expectedBatchSize */); + assertCurrentOpAggOutputDollarTenant(adminDb, kTenant, kDbName, 1 /* expectedBatchSize */); // Check that the 'insert' op shows up in the currOp output for 'kTenant' when issuing // the currentOp command using both a security token and $tenant. - assertCurrentOpCommandOutputToken( - tokenConn, kTenant, kDbName, 1 /* expectedBatchSize */, featureFlagRequireTenantId); - assertCurrentOpCommandOutputDollarTenant( - adminDb, kTenant, kDbName, 1 /* expectedBatchSize */, featureFlagRequireTenantId); + assertCurrentOpCommandOutputToken(tokenConn, kDbName, 1 /* expectedBatchSize */); + assertCurrentOpCommandOutputDollarTenant(adminDb, kTenant, kDbName, 1 /* expectedBatchSize */); // Check that the other tenant does not see the op in any currentOp output. tokenConn._setSecurityToken(securityTokenOtherTenant); - assertCurrentOpAggOutputToken( - tokenConn, kOtherTenant, kDbName, 0 /* expectedBatchSize */, featureFlagRequireTenantId); - assertCurrentOpAggOutputDollarTenant( - adminDb, kOtherTenant, kDbName, 0 /* expectedBatchSize */, featureFlagRequireTenantId); + assertCurrentOpAggOutputToken(tokenConn, kDbName, 0 /* expectedBatchSize */); + assertCurrentOpAggOutputDollarTenant(adminDb, kOtherTenant, kDbName, 0 /* expectedBatchSize */); - assertCurrentOpCommandOutputToken( - tokenConn, kOtherTenant, kDbName, 0 /* expectedBatchSize */, featureFlagRequireTenantId); + assertCurrentOpCommandOutputToken(tokenConn, kDbName, 0 /* expectedBatchSize */); assertCurrentOpCommandOutputDollarTenant( - adminDb, kOtherTenant, kDbName, 0 /* expectedBatchSize */, featureFlagRequireTenantId); + adminDb, kOtherTenant, kDbName, 0 /* expectedBatchSize */); createCollFP.off(); createShell(); @@ -205,35 +176,28 @@ tokenConn._setSecurityToken(securityToken); // '$currentOp' in aggregation pipeline using a security token. A security token user is not // authorized to pass ""allUsers: true", so it can only see ops that it has actually run itself. // In this case, the insert was issued by the "admin" user. - assertCurrentOpAggOutputToken( - tokenConn, kTenant, kDbName, 0 /* expectedBatchSize */, featureFlagRequireTenantId); + assertCurrentOpAggOutputToken(tokenConn, kDbName, 0 /* expectedBatchSize */); // Check that the 'insert' op shows up in the currOp output for 'kTenant' when issuing // '$currentOp' in aggregation pipeline using $tenant. - assertCurrentOpAggOutputDollarTenant( - adminDb, kTenant, kDbName, 1 /* expectedBatchSize */, featureFlagRequireTenantId); + assertCurrentOpAggOutputDollarTenant(adminDb, kTenant, kDbName, 1 /* expectedBatchSize */); // Check that the 'insert' op also does NOT show up in the currOp output for 'kTenant' when // issuing the currentOp command, for the same reason as above. - assertCurrentOpCommandOutputToken( - tokenConn, kTenant, kDbName, 0 /* expectedBatchSize */, featureFlagRequireTenantId); + assertCurrentOpCommandOutputToken(tokenConn, kDbName, 0 /* expectedBatchSize */); // Check that the 'insert' op shows up in the currOp output for 'kTenant' when issuing // the currentOp command using $tenant. - assertCurrentOpCommandOutputDollarTenant( - adminDb, kTenant, kDbName, 1 /* expectedBatchSize */, featureFlagRequireTenantId); + assertCurrentOpCommandOutputDollarTenant(adminDb, kTenant, kDbName, 1 /* expectedBatchSize */); // Now, check that the other tenant does not see the op in any currentOp output. tokenConn._setSecurityToken(securityTokenOtherTenant); - assertCurrentOpAggOutputToken( - tokenConn, kOtherTenant, kDbName, 0 /* expectedBatchSize */, featureFlagRequireTenantId); - assertCurrentOpAggOutputDollarTenant( - adminDb, kOtherTenant, kDbName, 0 /* expectedBatchSize */, featureFlagRequireTenantId); + assertCurrentOpAggOutputToken(tokenConn, kDbName, 0 /* expectedBatchSize */); + assertCurrentOpAggOutputDollarTenant(adminDb, kOtherTenant, kDbName, 0 /* expectedBatchSize */); - assertCurrentOpCommandOutputToken( - tokenConn, kOtherTenant, kDbName, 0 /* expectedBatchSize */, featureFlagRequireTenantId); + assertCurrentOpCommandOutputToken(tokenConn, kDbName, 0 /* expectedBatchSize */); assertCurrentOpCommandOutputDollarTenant( - adminDb, kOtherTenant, kDbName, 0 /* expectedBatchSize */, featureFlagRequireTenantId); + adminDb, kOtherTenant, kDbName, 0 /* expectedBatchSize */); // Now check that a privileged user can see this op using both $currentOp and the currentOp // command when no tenantId is provided. The user currently authenticated on the adminDb @@ -273,5 +237,4 @@ tokenConn._setSecurityToken(securityToken); createShell(); } -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/serverless/native_tenant_data_isolation_initial_sync.js b/jstests/serverless/native_tenant_data_isolation_initial_sync.js index 678618facb4e3..85f29a6a78d06 100644 --- a/jstests/serverless/native_tenant_data_isolation_initial_sync.js +++ b/jstests/serverless/native_tenant_data_isolation_initial_sync.js @@ -2,11 +2,8 @@ * Tests that initial sync works correctly when multitenancySupport is enabled. */ -(function() { -"use strict"; - load('jstests/aggregation/extras/utils.js'); // For arrayEq() -load("jstests/libs/feature_flag_util.js"); // for isEnabled +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const rst = new ReplSetTest({ nodes: 1, @@ -66,7 +63,7 @@ assert.commandWorked(primaryDB.runCommand({insert: kCollName, documents: tenant1 const tenant1Idxs = [{key: {a: 1}, name: "indexA"}, {key: {b: 1}, name: "indexB"}]; let res = assert.commandWorked(primaryDB.runCommand({createIndexes: kCollName, indexes: tenant1Idxs})); -assert.eq(3, res.numIndexesAfter); +assert.eq(3, res.numIndexesAfter, tojson(res)); // Create a collections, insert some data, and create indexes on the collection for tenant2. primaryConn._setSecurityToken(securityToken2); @@ -76,7 +73,7 @@ assert.commandWorked(primaryDB.runCommand({insert: kCollName, documents: tenant2 const tenant2Idxs = [{key: {a: -1}, name: "indexA"}, {key: {b: -1}, name: "indexB"}]; res = assert.commandWorked(primaryDB.runCommand({createIndexes: kCollName, indexes: tenant2Idxs})); -assert.eq(3, res.numIndexesAfter); +assert.eq(3, res.numIndexesAfter, tojson(res)); // Add a new secondary to the replica set and wait for initial sync to finish. const secondary = rst.add({ @@ -101,13 +98,14 @@ const findTenant1Res = assert.commandWorked(secondaryDB.runCommand({find: kCollN assert(arrayEq(tenant1Docs, findTenant1Res.cursor.firstBatch), tojson(findTenant1Res)); res = assert.commandWorked(secondaryDB.runCommand({listIndexes: kCollName})); -assert.eq(3, res.cursor.firstBatch.length); +assert.eq(3, res.cursor.firstBatch.length, tojson(res.cursor.firstBatch)); assert(arrayEq(tenant1Idxs.concat([ - {key: {"_id": 1}, name: "_id_"}, -]), + {key: {"_id": 1}, name: "_id_"}, + ]), res.cursor.firstBatch.map(function(index) { return {key: index.key, name: index.name}; - }))); + })), + tojson(res.cursor.firstBatch)); // Look for tenant2's data and indexes. secondaryConn._setSecurityToken(securityToken2); @@ -115,13 +113,13 @@ const findTenant2Res = assert.commandWorked(secondaryDB.runCommand({find: kCollN assert(arrayEq(tenant2Docs, findTenant2Res.cursor.firstBatch), tojson(findTenant2Res)); res = assert.commandWorked(secondaryDB.runCommand({listIndexes: kCollName})); -assert.eq(3, res.cursor.firstBatch.length); +assert.eq(3, res.cursor.firstBatch.length, tojson(res.cursor.firstBatch)); assert(arrayEq(tenant2Idxs.concat([ - {key: {"_id": 1}, name: "_id_"}, -]), + {key: {"_id": 1}, name: "_id_"}, + ]), res.cursor.firstBatch.map(function(index) { return {key: index.key, name: index.name}; - }))); + })), + tojson(res.cursor.firstBatch)); -rst.stopSet(); -})(); +rst.stopSet(); \ No newline at end of file diff --git a/jstests/serverless/native_tenant_data_isolation_mirrored_reads.js b/jstests/serverless/native_tenant_data_isolation_mirrored_reads.js new file mode 100644 index 0000000000000..7527620dcba0d --- /dev/null +++ b/jstests/serverless/native_tenant_data_isolation_mirrored_reads.js @@ -0,0 +1,107 @@ +/** + * Test mirrored reads in a multi-tenant environment. + */ + +load('jstests/aggregation/extras/utils.js'); // For arrayEq() + +const rst = new ReplSetTest({ + nodes: 2, + nodeOptions: { + auth: '', + setParameter: { + multitenancySupport: true, + logComponentVerbosity: tojson({command: 1}), + mirrorReads: tojsononeline({samplingRate: 1.0}), + "failpoint.mirrorMaestroExpectsResponse": tojson({mode: "alwaysOn"}), + } + } +}); +rst.startSet({keyFile: 'jstests/libs/key1'}); +rst.initiate(); + +const primary = rst.getPrimary(); +const adminDb = primary.getDB('admin'); +const secondary = rst.getSecondary(); + +// Prepare a user for testing of passing the tenant using $tenant. +// Must be authenticated as a user with ActionType::useTenant in order to use $tenant. +assert.commandWorked(adminDb.runCommand({createUser: 'admin', pwd: 'pwd', roles: ['root']})); +assert(adminDb.auth('admin', 'pwd')); +assert(secondary.getDB('admin').auth('admin', 'pwd')); + +const kTenant = ObjectId(); +const kOtherTenant = ObjectId(); +const kDbName = 'myDb'; +const kCollName = 'myColl'; +const testDb = primary.getDB(kDbName); + +function getMirroredReadsStats(node) { + return node.getDB(kDbName).serverStatus({mirroredReads: 1}).mirroredReads; +} + +function assertSecondaryStats(initialSecondaryStats, numSentSince) { + const currentSecondaryStats = getMirroredReadsStats(secondary); + jsTestLog("Current secondary stats: " + tojson(currentSecondaryStats)); + const numProcessed = currentSecondaryStats.processedAsSecondary; + return initialSecondaryStats.processedAsSecondary + numSentSince == numProcessed; +} + +const kTenantDocs = [{w: 0}, {x: 1}, {y: 2}, {z: 3}]; +const kOtherTenantDocs = [{i: 1}, {j: 2}, {k: 3}]; + +assert.commandWorked( + testDb.runCommand({insert: kCollName, documents: kTenantDocs, '$tenant': kTenant})); +assert.commandWorked( + testDb.runCommand({insert: kCollName, documents: kOtherTenantDocs, '$tenant': kOtherTenant})); + +function verifyMirroredReadStats(cmd) { + const initialPrimaryStats = getMirroredReadsStats(primary); + const initialSecondaryStats = getMirroredReadsStats(secondary); + jsTestLog("Verifying mirrored reads for cmd: " + tojson(cmd)); + jsTestLog("Initial primary stats: " + tojson(initialPrimaryStats)); + jsTestLog("Initial secondary stats: " + tojson(initialSecondaryStats)); + + // Check that the mirrored operation is observable through the metrics. + assert.commandWorked(testDb.runCommand(cmd)); + let currentPrimaryStats; + assert.soon(() => { + currentPrimaryStats = getMirroredReadsStats(primary); + jsTestLog("Current primary stats: " + tojson(currentPrimaryStats)); + let resolved = currentPrimaryStats.resolved; + let succeeded = currentPrimaryStats.succeeded; + return (initialPrimaryStats.resolved + 1 == resolved) && + (initialPrimaryStats.succeeded + 1 == succeeded); + }); + assert.eq(initialPrimaryStats.seen + 1, currentPrimaryStats.seen, currentPrimaryStats); + assertSecondaryStats(initialSecondaryStats, 1); +} + +// Verify that mirrored reads are successful for mirrored operations with '$tenant'. +verifyMirroredReadStats({find: kCollName, projection: {_id: 0}, '$tenant': kTenant}); +verifyMirroredReadStats({find: kCollName, projection: {_id: 0}, '$tenant': kOtherTenant}); + +verifyMirroredReadStats({count: kCollName, query: {x: 1}, '$tenant': kTenant}); +verifyMirroredReadStats({count: kCollName, query: {i: 1}, '$tenant': kOtherTenant}); + +verifyMirroredReadStats({distinct: kCollName, key: 'x', '$tenant': kTenant}); +verifyMirroredReadStats({distinct: kCollName, key: 'i', '$tenant': kOtherTenant}); + +verifyMirroredReadStats( + {findAndModify: kCollName, query: {x: 1}, update: {$inc: {x: 10}}, '$tenant': kTenant}); +verifyMirroredReadStats( + {findAndModify: kCollName, query: {i: 1}, update: {$inc: {i: 10}}, '$tenant': kOtherTenant}); + +verifyMirroredReadStats({ + update: kCollName, + updates: [{q: {x: 1}, u: {'inc': {x: 1}}}], + ordered: false, + '$tenant': kTenant +}); +verifyMirroredReadStats({ + update: kCollName, + updates: [{q: {i: 1}, u: {'inc': {i: 1}}}], + ordered: false, + '$tenant': kOtherTenant +}); + +rst.stopSet(); \ No newline at end of file diff --git a/jstests/serverless/native_tenant_data_isolation_stop_restart.js b/jstests/serverless/native_tenant_data_isolation_stop_restart.js index 2889dff9026c9..449bf8484b015 100644 --- a/jstests/serverless/native_tenant_data_isolation_stop_restart.js +++ b/jstests/serverless/native_tenant_data_isolation_stop_restart.js @@ -1,11 +1,7 @@ // Test that the collection catalog is restored correctly after a restart in a multitenant // environment. -(function() { -"use strict"; - load('jstests/aggregation/extras/utils.js'); // For arrayEq() -load("jstests/libs/feature_flag_util.js"); // for isEnabled const rst = new ReplSetTest({nodes: 3, nodeOptions: {auth: '', setParameter: {multitenancySupport: true}}}); @@ -19,8 +15,6 @@ let adminDb = primary.getDB('admin'); assert.commandWorked(adminDb.runCommand({createUser: 'admin', pwd: 'pwd', roles: ['root']})); assert(adminDb.auth('admin', 'pwd')); -const featureFlagRequireTenantId = FeatureFlagUtil.isEnabled(adminDb, "RequireTenantID"); - { const kTenant = ObjectId(); let testDb = primary.getDB('myDb0'); @@ -32,7 +26,7 @@ const featureFlagRequireTenantId = FeatureFlagUtil.isEnabled(adminDb, "RequireTe // Run findAndModify on the document. let fad = assert.commandWorked(testDb.runCommand( {findAndModify: "myColl0", query: {a: 1}, update: {$inc: {a: 10}}, '$tenant': kTenant})); - assert.eq({_id: 0, a: 1, b: 1}, fad.value); + assert.eq({_id: 0, a: 1, b: 1}, fad.value, tojson(fad)); // Create a view on the collection. assert.commandWorked(testDb.runCommand( @@ -62,23 +56,15 @@ const featureFlagRequireTenantId = FeatureFlagUtil.isEnabled(adminDb, "RequireTe // Assert we can still run findAndModify on the doc. fad = assert.commandWorked(testDb.runCommand( {findAndModify: "myColl0", query: {a: 11}, update: {$inc: {a: 10}}, '$tenant': kTenant})); - assert.eq({_id: 0, a: 11, b: 1}, fad.value); + assert.eq({_id: 0, a: 11, b: 1}, fad.value, tojson(fad)); + // Check that we do find the doc when the tenantId was passed as a prefix. Without $tenant or + // a security token, the tenantId MUST be prefixed in a multitenant environment. const findAndModPrefixed = primary.getDB(kTenant + '_myDb0') .runCommand({findAndModify: "myColl0", query: {b: 1}, update: {$inc: {b: 10}}}); - if (!featureFlagRequireTenantId) { - // Check that we do find the doc when the tenantId was passed as a prefix, only if the - // feature flag is not enabled. In this case, the server still accepts prefixed names, - // and will parse the tenant from the db name. - assert.commandWorked(findAndModPrefixed); - assert.eq({_id: 0, a: 21, b: 1}, findAndModPrefixed.value); - } else { - // assert.commandFailed(findAndModPrefixed); - // TODO SERVER-73025 Uncomment out the check above, and remove the check below. - assert.eq(null, findAndModPrefixed.value); - } + assert.commandWorked(findAndModPrefixed); + assert.eq({_id: 0, a: 21, b: 1}, findAndModPrefixed.value, tojson(findAndModPrefixed)); } rst.stopSet(); -})(); diff --git a/jstests/serverless/oplog_rollover.js b/jstests/serverless/oplog_rollover.js new file mode 100644 index 0000000000000..a5981a0bda826 --- /dev/null +++ b/jstests/serverless/oplog_rollover.js @@ -0,0 +1,11 @@ +/** + * Test that oplog (on both primary and secondary) rolls over when its size exceeds the configured + * maximum. This test runs on wiredTiger storage engine for the serverless environment. + */ +(function() { +"use strict"; + +load("jstests/replsets/libs/oplog_rollover_test.js"); + +oplogRolloverTest("wiredTiger", false /* initialSyncMethod */, true /* serverless */); +})(); diff --git a/jstests/serverless/serverlesstest.js b/jstests/serverless/serverlesstest.js deleted file mode 100644 index 3d53e860ef2fd..0000000000000 --- a/jstests/serverless/serverlesstest.js +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Starts up a cluster with all default configurations required by a serverless test. - * The cluster has a mongoq, a config server with 3 nodes and 2 shards. Each shard has 3 nodes. - * The X509 authentication is disabled in the cluster. - */ -class ServerlessTest { - constructor() { - let numShards = 2; - - this.stop = () => { - jsTest.log("Going to stop mongoq."); - MongoRunner.stopMongoq(this.q); - - jsTest.log("Going to stop all replica sets."); - for (var i = 0; i < numShards; i++) { - let rs = this["rs" + i]; - rs.stopSet(15); - } - - jsTest.log("Going to stop config server."); - this.configRS.stopSet(); - }; - - jsTest.log("Going to create and start config server."); - this.configRS = new ReplSetTest({name: "configRS", nodes: 3, useHostName: true}); - this.configRS.startSet({configsvr: '', storageEngine: 'wiredTiger'}); - - jsTest.log("Initiate config server before starting mongoq."); - let replConfig = this.configRS.getReplSetConfig(); - replConfig.configsvr = true; - this.configRS.initiate(replConfig); - - jsTest.log("Going to start mongoq."); - this.q = MongoRunner.runMongoq({configdb: this.configRS.getURL()}); - assert.neq(this.q, null, "Failed to start mongoq"); - - jsTest.log("Going to add replica sets."); - let adminDB = this.q.getDB('admin'); - for (let i = 0; i < numShards; i++) { - let rs = - new ReplSetTest({name: "testShard-rs-" + i, nodes: 3, nodeOptions: {shardsvr: ""}}); - rs.startSet({setParameter: {tenantMigrationDisableX509Auth: true}}); - rs.initiate(); - this["rs" + i] = rs; - } - - jsTest.log("Going to create connection with each shard."); - for (let i = 0; i < numShards; i++) { - let rs = this["rs" + i]; - var result = assert.commandWorked(adminDB.runCommand({addShard: rs.getURL()})); - - let rsConn = new Mongo(rs.getURL()); - rsConn.name = rs.getURL(); - rsConn.rs = rs; - rsConn.shardName = result.shardAdded; - this["shard" + i] = rsConn; - } - - this.q0 = this.q; - jsTest.log("ServerlessTest is created."); - } - - /** - * Helper method for setting primary shard of a database and making sure that it was - * successful. Note: first mongoq needs to be up. - */ - ensurePrimaryShard(dbName, shardName) { - var db = this.q.getDB('admin'); - var res = db.adminCommand({movePrimary: dbName, to: shardName}); - assert(res.ok || res.errmsg == "it is already the primary", tojson(res)); - } - - addTenant(tenantId, shardId) { - return assert.commandWorked( - this.configRS.getPrimary() - .getCollection('config.tenants') - .insert({_id: tenantId, shardId: shardId}, {writeConcern: {w: "majority"}})); - } - - removeTenant(tenantId) { - return assert.commandWorked( - this.configRS.getPrimary().getCollection('config.tenants').remove({_id: tenantId}, { - writeConcern: {w: "majority"} - })); - } -} diff --git a/jstests/serverless/serverlesstest_guide.js b/jstests/serverless/serverlesstest_guide.js index 09c06e525633c..800f54f471937 100644 --- a/jstests/serverless/serverlesstest_guide.js +++ b/jstests/serverless/serverlesstest_guide.js @@ -1,12 +1,13 @@ /* * @tags: [serverless] */ + +import {ShardedServerlessTest} from "jstests/serverless/libs/sharded_serverless_test.js"; + (function() { "use strict"; -load("jstests/serverless/serverlesstest.js"); - -let st = new ServerlessTest(); +let st = new ShardedServerlessTest(); (() => { jsTest.log("Test adding and removing tenants to/from config.tenants"); diff --git a/jstests/serverless/shard_split_change_collections_test.js b/jstests/serverless/shard_split_change_collections_test.js index 8279b71a68ef1..51652db6704a9 100644 --- a/jstests/serverless/shard_split_change_collections_test.js +++ b/jstests/serverless/shard_split_change_collections_test.js @@ -4,12 +4,19 @@ */ import {assertMigrationState, ShardSplitTest} from "jstests/serverless/libs/shard_split_test.js"; +load("jstests/libs/fail_point_util.js"); load("jstests/serverless/libs/change_collection_util.js"); const tenantIds = [ObjectId(), ObjectId()]; const donorRst = new ChangeStreamMultitenantReplicaSetTest({ nodes: 3, - nodeOptions: {setParameter: {shardSplitGarbageCollectionDelayMS: 0, ttlMonitorSleepSecs: 1}} + nodeOptions: { + setParameter: { + shardSplitGarbageCollectionDelayMS: 0, + ttlMonitorSleepSecs: 1, + shardSplitTimeoutMS: 100000 + } + } }); const test = new ShardSplitTest({quickGarbageCollection: true, donorRst}); @@ -21,12 +28,21 @@ const donorTenantConn = ChangeStreamMultitenantReplicaSetTest.getTenantConnection(donorPrimary.host, tenantIds[0]); test.donor.setChangeStreamState(donorTenantConn, true); +const donorNonMovingTenantConn = + ChangeStreamMultitenantReplicaSetTest.getTenantConnection(donorPrimary.host, ObjectId()); +test.donor.setChangeStreamState(donorNonMovingTenantConn, true); +const donorNonMovingCursor = donorNonMovingTenantConn.getDB("database").collection.watch(); + // Open a change stream and insert documents into database.collection before the split // starts. const donorCursor = donorTenantConn.getDB("database").collection.watch([]); const insertedDocs = [{_id: "tenant1_1"}, {_id: "tenant1_2"}, {_id: "tenant1_3"}]; donorTenantConn.getDB("database").collection.insertMany(insertedDocs); +// Start up a cursor to check if we can getMore after the tenant has been migrated and change +// collection is dropped. +const donorCursor2 = donorTenantConn.getDB("database").collection.watch([]); + const donorTenantSession = donorTenantConn.startSession({retryWrites: true}); const donorTenantSessionCollection = donorTenantSession.getDatabase("database").collection; assert.commandWorked(donorTenantSessionCollection.insert({_id: "tenant1_4", w: "RETRYABLE"})); @@ -50,19 +66,65 @@ donorTxnSession.endSession(); assert.eq(donorCursor.hasNext(), true); const {_id: resumeToken} = donorCursor.next(); +// Set this break point so that we can run commands against the primary when the split operation +// enters a blocking state. +const blockingFp = configureFailPoint(donorPrimary, "pauseShardSplitAfterBlocking"); const operation = test.createSplitOperation(tenantIds); -assert.commandWorked(operation.commit()); +const splitThread = operation.commitAsync(); + +// Wait for the split to enter the blocking state. +blockingFp.wait(); + +assert.commandFailedWithCode( + donorTenantConn.getDB("database").runCommand({ + aggregate: "collection", + cursor: {}, + pipeline: [{$changeStream: {}}], + // Timeout set higher than 1000ms to make sure its actually blocked and not just waiting for + // inserts, since change streams are awaitdata cursors. + maxTimeMS: 2 * 1000 + }), + ErrorCodes.MaxTimeMSExpired, + "Opening new change streams should block while a split operation is in a blocking state"); + +blockingFp.off(); +splitThread.join(); +assert.commandWorked(splitThread.returnData()); assertMigrationState(donorPrimary, operation.migrationId, "committed"); -let errCode; -try { - donorTenantConn.getDB("database").collection.watch([]); -} catch (err) { - errCode = err.code; -} -assert.eq(errCode, - ErrorCodes.TenantMigrationCommitted, - "Opening a change stream on the donor after completion of a shard split should fail."); +// Test that we cannot open a new change stream after the tenant has been migrated. +assert.commandFailedWithCode( + donorTenantConn.getDB("database") + .runCommand({aggregate: "collection", cursor: {}, pipeline: [{$changeStream: {}}]}), + ErrorCodes.TenantMigrationCommitted, + "Opening a change stream on the donor after completion of a shard split should fail."); + +// Test change stream cursor behavior on the donor for a tenant which was migrated, and for one +// which remains on the donor. +assert.commandWorked( + donorNonMovingTenantConn.getDB("database") + .runCommand("getMore", {getMore: donorNonMovingCursor._cursorid, collection: "collection"}), + "Tailing a change stream for a tenant that wasn't moved by a split" + + "should not be blocked after the split was committed"); + +// Test that running a getMore on a change stream cursor after the migration commits throws a +// resumable change stream exception. +const failedGetMore = donorTenantConn.getDB("database").runCommand("getMore", { + getMore: donorCursor._cursorid, + collection: "collection" +}); +assert.commandFailedWithCode( + failedGetMore, + ErrorCodes.ResumeTenantChangeStream, + "Tailing a change stream on the donor after completion of a shard split should fail."); +assert(failedGetMore.hasOwnProperty("errorLabels")); +assert.contains("ResumableChangeStreamError", failedGetMore.errorLabels); + +// The cursor should have been deleted after the error so a getMore should fail. +assert.commandFailedWithCode( + donorTenantConn.getDB("database") + .runCommand("getMore", {getMore: donorCursor._cursorid, collection: "collection"}), + ErrorCodes.CursorNotFound); operation.forget(); @@ -100,4 +162,12 @@ const cursors = [recipientPrimaryTenantConn, ...recipientSecondaryConns].map( }); test.cleanupSuccesfulCommitted(operation.migrationId, tenantIds); + +// getMore cursor to check if we can getMore after the database is dropped. +donorTenantSession.getDatabase("config")["system.change_collection"].drop(); +assert.commandFailedWithCode( + donorTenantConn.getDB("database") + .runCommand("getMore", {getMore: donorCursor2._cursorid, collection: "collection"}), + ErrorCodes.QueryPlanKilled); + test.stop(); diff --git a/jstests/serverless/shard_split_cluster_parameters_test.js b/jstests/serverless/shard_split_cluster_parameters_test.js index 57563a8b11264..1ec6262434912 100644 --- a/jstests/serverless/shard_split_cluster_parameters_test.js +++ b/jstests/serverless/shard_split_cluster_parameters_test.js @@ -3,8 +3,8 @@ * @tags: [requires_fcv_63, serverless] */ +import {tenantCommand} from "jstests/libs/cluster_server_parameter_utils.js"; import {assertMigrationState, ShardSplitTest} from "jstests/serverless/libs/shard_split_test.js"; -load("jstests/libs/cluster_server_parameter_utils.js"); const tenantIds = [ObjectId(), ObjectId()]; diff --git a/jstests/serverless/shard_split_concurrent_writes_on_donor_aborted.js b/jstests/serverless/shard_split_concurrent_writes_on_donor_aborted.js index 5555eeccb1baa..fcc4bd901e054 100644 --- a/jstests/serverless/shard_split_concurrent_writes_on_donor_aborted.js +++ b/jstests/serverless/shard_split_concurrent_writes_on_donor_aborted.js @@ -26,7 +26,6 @@ TestData.skipCheckDBHashes = true; const tenantMigrationTest = new ShardSplitTest({ quickGarbageCollection: true, allowStaleReadsOnDonor: true, - initiateWithShortElectionTimeout: true }); const donorPrimary = tenantMigrationTest.getDonorPrimary(); @@ -138,7 +137,7 @@ const operation = tenantMigrationTest.createSplitOperation(tenantIds); setupTestsBeforeMigration(); -operation.commit({retryOnRetryableErrors: false}, {enableDonorStartMigrationFsync: true}); +operation.commit({retryOnRetryableErrors: false}); assertMigrationState(tenantMigrationTest.getDonorPrimary(), operation.migrationId, "aborted"); abortFp.off(); diff --git a/jstests/serverless/shard_split_concurrent_writes_on_donor_blocking.js b/jstests/serverless/shard_split_concurrent_writes_on_donor_blocking.js index d50d55b2aba56..eb30f0cdb9e70 100644 --- a/jstests/serverless/shard_split_concurrent_writes_on_donor_blocking.js +++ b/jstests/serverless/shard_split_concurrent_writes_on_donor_blocking.js @@ -26,7 +26,6 @@ TestData.skipCheckDBHashes = true; const shardSplitTest = new ShardSplitTest({ quickGarbageCollection: true, allowStaleReadsOnDonor: true, - initiateWithShortElectionTimeout: true, // Increase timeout because blocking in the critical section contributes to operation latency. nodeOptions: {setParameter: {shardSplitTimeoutMS: 100000}} }); diff --git a/jstests/serverless/shard_split_concurrent_writes_on_donor_committed.js b/jstests/serverless/shard_split_concurrent_writes_on_donor_committed.js index f25a9295c5f95..6fb3b16e5b042 100644 --- a/jstests/serverless/shard_split_concurrent_writes_on_donor_committed.js +++ b/jstests/serverless/shard_split_concurrent_writes_on_donor_committed.js @@ -25,7 +25,6 @@ TestData.skipCheckDBHashes = true; const test = new ShardSplitTest({ quickGarbageCollection: true, allowStaleReadsOnDonor: true, - initiateWithShortElectionTimeout: true }); const donorPrimary = test.getDonorPrimary(); @@ -129,8 +128,7 @@ const operation = test.createSplitOperation(tenantIds); setupTestsBeforeMigration(); -assert.commandWorked( - operation.commit({retryOnRetryableErrors: false}, {enableDonorStartMigrationFsync: true})); +assert.commandWorked(operation.commit({retryOnRetryableErrors: false})); runTestsAfterMigration(); ShardSplitTest.checkShardSplitAccessBlocker(donorPrimary, kTenantID, { diff --git a/jstests/serverless/shard_split_drop_state_doc_collection_aborted.js b/jstests/serverless/shard_split_drop_state_doc_collection_aborted.js index ed53375dd4ba1..8b809987faec0 100644 --- a/jstests/serverless/shard_split_drop_state_doc_collection_aborted.js +++ b/jstests/serverless/shard_split_drop_state_doc_collection_aborted.js @@ -78,8 +78,7 @@ function testDroppingStateDocCollections( } jsTest.log("Test dropping donor and recipient state doc collections during a shard split."); -const test = - new ShardSplitTest({quickGarbageCollection: true, initiateWithShortElectionTimeout: true}); +const test = new ShardSplitTest({quickGarbageCollection: true}); const fpName = "abortShardSplitBeforeLeavingBlockingState"; testDroppingStateDocCollections(test, fpName, {dropDonorsCollection: true}); diff --git a/jstests/serverless/shard_split_drop_state_doc_collection_blocking.js b/jstests/serverless/shard_split_drop_state_doc_collection_blocking.js index 5adf3fd00bcd0..27455aa98922c 100644 --- a/jstests/serverless/shard_split_drop_state_doc_collection_blocking.js +++ b/jstests/serverless/shard_split_drop_state_doc_collection_blocking.js @@ -78,8 +78,7 @@ function testDroppingStateDocCollections( } jsTest.log("Test dropping donor and recipient state doc collections during a shard split."); -const test = - new ShardSplitTest({quickGarbageCollection: true, initiateWithShortElectionTimeout: true}); +const test = new ShardSplitTest({quickGarbageCollection: true}); const fpName = "pauseShardSplitAfterBlocking"; testDroppingStateDocCollections(test, fpName, {dropDonorsCollection: true}); diff --git a/jstests/serverless/shard_split_drop_state_doc_collection_committed.js b/jstests/serverless/shard_split_drop_state_doc_collection_committed.js index 41d6d539a75ba..4940a45bb0eed 100644 --- a/jstests/serverless/shard_split_drop_state_doc_collection_committed.js +++ b/jstests/serverless/shard_split_drop_state_doc_collection_committed.js @@ -64,8 +64,7 @@ function testDroppingStateDocCollections( } jsTest.log("Test dropping donor and recipient state doc collections during a shard split."); -const test = - new ShardSplitTest({quickGarbageCollection: true, initiateWithShortElectionTimeout: true}); +const test = new ShardSplitTest({quickGarbageCollection: true}); const fpName = undefined; testDroppingStateDocCollections(test, fpName, {dropDonorsCollection: true}); diff --git a/jstests/serverless/shard_split_drop_state_doc_collection_decision_fullfilled.js b/jstests/serverless/shard_split_drop_state_doc_collection_decision_fullfilled.js index 85afaf7dba123..dcbdf83201ead 100644 --- a/jstests/serverless/shard_split_drop_state_doc_collection_decision_fullfilled.js +++ b/jstests/serverless/shard_split_drop_state_doc_collection_decision_fullfilled.js @@ -81,8 +81,7 @@ function testDroppingStateDocCollections( } jsTest.log("Test dropping donor and recipient state doc collections during a shard split."); -const test = - new ShardSplitTest({quickGarbageCollection: true, initiateWithShortElectionTimeout: true}); +const test = new ShardSplitTest({quickGarbageCollection: true}); const fpName = "pauseShardSplitAfterDecision"; testDroppingStateDocCollections(test, fpName, {dropDonorsCollection: true}); diff --git a/jstests/serverless/shard_split_startup_recovery_recipient_caught_up.js b/jstests/serverless/shard_split_startup_recovery_recipient_caught_up.js new file mode 100644 index 0000000000000..c79030fffc23c --- /dev/null +++ b/jstests/serverless/shard_split_startup_recovery_recipient_caught_up.js @@ -0,0 +1,50 @@ +/** + * Commits a shard split and shuts down while being in a "recipient caught up" state. Tests that we + * recover the tenant access blockers in blocking state with `blockOpTime` set. + * @tags: [requires_fcv_71, serverless] + */ + +import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; +import { + assertMigrationState, + findSplitOperation, + ShardSplitTest +} from "jstests/serverless/libs/shard_split_test.js"; + +load("jstests/libs/fail_point_util.js"); // for "configureFailPoint" + +// Skip db hash check because secondary is left with a different config. +TestData.skipCheckDBHashes = true; + +const test = new ShardSplitTest({ + quickGarbageCollection: true, + nodeOptions: { + setParameter: + {"failpoint.PrimaryOnlyServiceSkipRebuildingInstances": tojson({mode: "alwaysOn"})} + } +}); +test.addRecipientNodes(); + +let donorPrimary = test.donor.getPrimary(); +const fp = configureFailPoint(donorPrimary.getDB("admin"), "pauseShardSplitAfterRecipientCaughtUp"); + +jsTestLog("Running Shard Split restart after recipient caught up"); +const tenantIds = [ObjectId(), ObjectId()]; +const operation = test.createSplitOperation(tenantIds); +const splitThread = operation.commitAsync(); + +fp.wait(); +assertMigrationState(donorPrimary, operation.migrationId, "recipient caught up"); + +test.stop({shouldRestart: true}); +splitThread.join(); + +test.donor.startSet({restart: true}); + +donorPrimary = test.donor.getPrimary(); +assert(findSplitOperation(donorPrimary, operation.migrationId), "There must be a config document"); + +test.validateTenantAccessBlockers( + operation.migrationId, tenantIds, TenantMigrationTest.DonorAccessState.kBlockWritesAndReads); + +test.stop(); diff --git a/jstests/serverless/shard_split_test_max_bson_limit.js b/jstests/serverless/shard_split_test_max_bson_limit.js index 2aa7815fb975c..39794d58b035e 100644 --- a/jstests/serverless/shard_split_test_max_bson_limit.js +++ b/jstests/serverless/shard_split_test_max_bson_limit.js @@ -30,8 +30,8 @@ function bulkWriteDocsUnordered(primaryHost, dbName, collName, numDocs) { } let request = {insert: collName, documents: batch, writeConcern: {w: 1}, ordered: false}; - res = assert.commandFailedWithCode(primaryDB[collName].runCommand(request), - ErrorCodes.TenantMigrationCommitted); + let res = assert.commandFailedWithCode(primaryDB[collName].runCommand(request), + ErrorCodes.TenantMigrationCommitted); return res; } diff --git a/jstests/serverless/shard_split_write_during_aborted_split.js b/jstests/serverless/shard_split_write_during_aborted_split.js index aa3dd57daf418..4f143f0ac1d15 100644 --- a/jstests/serverless/shard_split_write_during_aborted_split.js +++ b/jstests/serverless/shard_split_write_during_aborted_split.js @@ -45,10 +45,11 @@ const writes = tenantIds.map(tenantId => { // Verify that we have blocked the expected number of writes to tenant data tenantIds.forEach(tenantId => { assert.soon(() => { - // We expect the numBlockedWrites to be a function of tenantIds size because shard split - // donor access blockers are shared for all tenants being split. I don't understand why - // there are two writes for each insert though. - const kExpectedBlockedWrites = tenantIds.length * 2; + // There are two writes for each insert. The function insertBatchAndHandleErrors first try + // to acquire the collection lock and create the collection as it doesn't exist. This result + // in an error that is recorded. However insertBatchAndHandleErrors still try to process the + // insert, which leads to a second write error. + const kExpectedBlockedWrites = 2; return ShardSplitTest.getNumBlockedWrites(donorPrimary, tenantId) == kExpectedBlockedWrites; }); diff --git a/jstests/serverless/tenant_migration_concurrent_bulk_writes_against_mongoq.js b/jstests/serverless/tenant_migration_concurrent_bulk_writes_against_mongoq.js index e9436e10f8375..2166714f732f4 100644 --- a/jstests/serverless/tenant_migration_concurrent_bulk_writes_against_mongoq.js +++ b/jstests/serverless/tenant_migration_concurrent_bulk_writes_against_mongoq.js @@ -4,11 +4,12 @@ * @tags: [requires_fcv_52, serverless] */ +import {ShardedServerlessTest} from "jstests/serverless/libs/sharded_serverless_test.js"; + (function() { "use strict"; load("jstests/libs/fail_point_util.js"); -load("jstests/serverless/serverlesstest.js"); load('jstests/concurrency/fsm_libs/worker_thread.js'); function donorStartMigrationCmd(tenantID, realConnUrl) { @@ -134,7 +135,7 @@ function orderedBulkInsertAfterTenantMigrationAborted(st, isBulkWriteOrdered) { assert.eq(bulkRes.res.writeErrors.length, 0); } -let st = new ServerlessTest(); +let st = new ShardedServerlessTest(); orderedBulkInsertDuringBlockingState(st, true); orderedBulkInsertDuringBlockingState(st, false); diff --git a/jstests/serverless/tenant_migration_recipient_bulkclone.js b/jstests/serverless/tenant_migration_recipient_bulkclone.js index d29eb9d6e090c..49ed361780240 100644 --- a/jstests/serverless/tenant_migration_recipient_bulkclone.js +++ b/jstests/serverless/tenant_migration_recipient_bulkclone.js @@ -4,12 +4,12 @@ * @tags: [requires_fcv_52, serverless] */ +import {ShardedServerlessTest} from "jstests/serverless/libs/sharded_serverless_test.js"; + (function() { "use strict"; -load("jstests/serverless/serverlesstest.js"); - -let st = new ServerlessTest(); +let st = new ShardedServerlessTest(); let donor = st.rs0; let recipient = st.rs1; diff --git a/jstests/serverless/upgrade_to_use_multitenancy_support.js b/jstests/serverless/upgrade_to_use_multitenancy_support.js new file mode 100644 index 0000000000000..a2daec3216e8e --- /dev/null +++ b/jstests/serverless/upgrade_to_use_multitenancy_support.js @@ -0,0 +1,280 @@ +/** + * This test checks that tenants can access their data before, during and after enabling + * multitenancySupport in a rolling fashion in a replica set. + */ + +load("jstests/aggregation/extras/utils.js"); +load("jstests/replsets/rslib.js"); + +// In production, we will upgrade to start using multitenancySupport before enabling this feature +// flag, and this test is meant to exercise that upgrade behavior, so don't run if the feature flag +// is enabled. +const featureFlagRequireTenantId = TestData.setParameters.featureFlagRequireTenantID; +if (featureFlagRequireTenantId) { + quit(); +} + +/* + * Runs a find using a prefixed db, and asserts the find returns 'expectedDocsReturned'. Also + * checks that the "ns" returned in the cursor result is serialized as expected, including the + * tenantId. + */ +function runFindOnPrefixedDb(conn, prefixedDb, collName, expectedDocsReturned) { + const res = + assert.commandWorked(conn.getDB(prefixedDb).runCommand({find: collName, filter: {}})); + assert(arrayEq(expectedDocsReturned, res.cursor.firstBatch), tojson(res)); + const prefixedNamespace = prefixedDb + "." + collName; + assert.eq(res.cursor.ns, prefixedNamespace); +} + +/* + * Runs a findAndModify using a prefixed db. + */ +function runFindAndModOnPrefixedDb(conn, prefixedDb, collName, query, update, expectedDocReturned) { + const res = assert.commandWorked( + conn.getDB(prefixedDb).runCommand({findAndModify: collName, query: query, update: update})); + assert.eq(res.value, expectedDocReturned); +} + +/* + * Runs a find using $tenant, and asserts the find returns 'expectedDocsReturned'. Also + * checks that the "ns" returned in the cursor result is serialized as expected, without the + * tenantId. + */ +function runFindUsingDollarTenant(conn, db, collName, tenantId, expectedDocsReturned) { + const res = assert.commandWorked( + conn.getDB(db).runCommand({find: collName, filter: {}, $tenant: tenantId})); + assert(arrayEq(expectedDocsReturned, res.cursor.firstBatch), tojson(res)); + const namespace = db + "." + collName; + assert.eq(res.cursor.ns, namespace); +} + +/* + * Runs a find using $tenant and prefixed db, and asserts the find returns + * 'expectedDocsReturned'. Also checks that the "ns" returned in the cursor result is serialized + * as expected, including the tenantId. + */ +function runFindUsingDollarTenantAndPrefix( + conn, prefixedDb, collName, tenantId, expectedDocsReturned) { + const res = assert.commandWorked( + conn.getDB(prefixedDb) + .runCommand({find: collName, filter: {}, $tenant: tenantId, expectPrefix: true})); + assert(arrayEq(expectedDocsReturned, res.cursor.firstBatch), tojson(res)); + const prefixedNamespace = prefixedDb + "." + collName; + assert.eq(res.cursor.ns, prefixedNamespace); +} + +/* + * Runs a find for both tenants using a prefixed db, and asserts the find returns + * 'expectedDocsReturned'. + */ +function assertFindBothTenantsPrefixedDb( + conn, tenant1DbPrefixed, tenant2DbPrefixed, kCollName, tenant1Docs, tenant2Docs) { + runFindOnPrefixedDb(conn, tenant1DbPrefixed, kCollName, tenant1Docs); + runFindOnPrefixedDb(conn, tenant2DbPrefixed, kCollName, tenant2Docs); +} + +/* + * Runs a find for both tenants using a prefixed db, and asserts the find returns + * 'expectedDocsReturned'. + */ +function assertFindBothTenantsUsingDollarTenant(conn, + db, + collName, + tenantId1, + tenantId2, + expectedDocsReturnedTenant1, + expectedDocsReturnedTenant2) { + runFindUsingDollarTenant(conn, db, collName, tenantId1, expectedDocsReturnedTenant1); + runFindUsingDollarTenant(conn, db, collName, tenantId2, expectedDocsReturnedTenant2); +} + +const rst = new ReplSetTest({ + nodes: 2, + nodeOptions: { + auth: '', + } +}); +rst.startSet({keyFile: 'jstests/libs/key1'}); +rst.initiate(); + +let originalPrimary = rst.getPrimary(); +let originalSecondary = rst.getSecondary(); + +const kTenant1 = ObjectId(); +const kTenant2 = ObjectId(); +const kDbName = "test"; +const kCollName = "foo"; + +// Create a root user and login on both the primary and secondary. +const primaryAdminDb = originalPrimary.getDB('admin'); +let secondaryAdminDb = originalSecondary.getDB('admin'); +assert.commandWorked(primaryAdminDb.runCommand({createUser: 'admin', pwd: 'pwd', roles: ['root']})); +assert(primaryAdminDb.auth('admin', 'pwd')); +assert(secondaryAdminDb.auth('admin', 'pwd')); + +// Insert data for two different tenants - multitenancySupport is not yet enabled, so we use a +// prefixed db. Then, check that we find the correct docs for both tenants, reading from both +// the primary and secondary. +const tenant1DbPrefixed = kTenant1 + "_" + kDbName; +const tenant1Docs = [{_id: 0, x: 1, y: 1}, {_id: 1, x: 2, y: 3}]; +assert.commandWorked(originalPrimary.getDB(tenant1DbPrefixed) + .runCommand({insert: kCollName, documents: tenant1Docs})); + +const tenant2DbPrefixed = kTenant2 + "_" + kDbName; +const tenant2Docs = [{_id: 10, a: 10, b: 10}, {_id: 11, a: 20, b: 30}]; +assert.commandWorked(originalPrimary.getDB(tenant2DbPrefixed) + .runCommand({insert: kCollName, documents: tenant2Docs})); + +assertFindBothTenantsPrefixedDb( + originalPrimary, tenant1DbPrefixed, tenant2DbPrefixed, kCollName, tenant1Docs, tenant2Docs); +assertFindBothTenantsPrefixedDb( + originalSecondary, tenant1DbPrefixed, tenant2DbPrefixed, kCollName, tenant1Docs, tenant2Docs); + +// Now, restart the secondary and enable multitenancySupport. The primary still does not have +// multitenancySupport enabled. +originalSecondary = rst.restart(originalSecondary, + {startClean: false, setParameter: {'multitenancySupport': true}}); + +originalSecondary.setSecondaryOk(); +assert(originalSecondary.getDB("admin").auth('admin', 'pwd')); + +// Check that we can still find the docs when using a prefixed db on both the primary and +// secondary. +assertFindBothTenantsPrefixedDb( + originalPrimary, tenant1DbPrefixed, tenant2DbPrefixed, kCollName, tenant1Docs, tenant2Docs); +assertFindBothTenantsPrefixedDb( + originalSecondary, tenant1DbPrefixed, tenant2DbPrefixed, kCollName, tenant1Docs, tenant2Docs); + +// Now check that we find the docs for both tenants when reading from the secondary using +// $tenant and a security token. The primary does not yet support $tenant or a security token +// since it does not have multitenancySupport enabled. +assertFindBothTenantsUsingDollarTenant( + originalSecondary, kDbName, kCollName, kTenant1, kTenant2, tenant1Docs, tenant2Docs); + +// Also assert both tenants find the new doc on the secondary using $tenant and a prefixed db. +runFindUsingDollarTenantAndPrefix( + originalSecondary, tenant1DbPrefixed, kCollName, kTenant1, tenant1Docs); +runFindUsingDollarTenantAndPrefix( + originalSecondary, tenant2DbPrefixed, kCollName, kTenant2, tenant2Docs); + +// Now insert a new doc for both tenants using the prefixed db, and assert that we can find it +// on both the primary and secondary. +const newTenant1Doc = [{_id: 2, x: 3}]; +const newTenant2Doc = [{_id: 12, a: 30}]; +assert.commandWorked(originalPrimary.getDB(tenant1DbPrefixed) + .runCommand({insert: kCollName, documents: newTenant1Doc})); +assert.commandWorked(originalPrimary.getDB(tenant2DbPrefixed) + .runCommand({insert: kCollName, documents: newTenant2Doc})); + +const allTenant1Docs = tenant1Docs.concat(newTenant1Doc); +const allTenant2Docs = tenant2Docs.concat(newTenant2Doc); + +// Assert both tenants find the new doc on both the primary and secondary when using the +// prefixed db. +assertFindBothTenantsPrefixedDb(originalPrimary, + tenant1DbPrefixed, + tenant2DbPrefixed, + kCollName, + allTenant1Docs, + allTenant2Docs); +assertFindBothTenantsPrefixedDb(originalSecondary, + tenant1DbPrefixed, + tenant2DbPrefixed, + kCollName, + allTenant1Docs, + allTenant2Docs); + +// Assert both tenants find the new doc on the secondary using $tenant. +assertFindBothTenantsUsingDollarTenant( + originalSecondary, kDbName, kCollName, kTenant1, kTenant2, allTenant1Docs, allTenant2Docs); + +// Assert both tenants find the new doc on the secondary using $tenant and a prefixed db. +runFindUsingDollarTenantAndPrefix( + originalSecondary, tenant1DbPrefixed, kCollName, kTenant1, allTenant1Docs); +runFindUsingDollarTenantAndPrefix( + originalSecondary, tenant2DbPrefixed, kCollName, kTenant2, allTenant2Docs); + +// Now run findAndModify on one doc using a prefixed db and check that we can read from the +// secondary using just $tenant and $tenant and a prefix. +runFindAndModOnPrefixedDb(originalPrimary, + tenant1DbPrefixed, + kCollName, + newTenant1Doc[0], + {$set: {x: 4}}, + newTenant1Doc[0]); +runFindAndModOnPrefixedDb(originalPrimary, + tenant2DbPrefixed, + kCollName, + newTenant2Doc[0], + {$set: {a: 40}}, + newTenant2Doc[0]); + +const modifiedTenant1Docs = tenant1Docs.concat([{_id: 2, x: 4}]); +const modifiedTenant2Docs = tenant2Docs.concat([{_id: 12, a: 40}]); +assertFindBothTenantsUsingDollarTenant(originalSecondary, + kDbName, + kCollName, + kTenant1, + kTenant2, + modifiedTenant1Docs, + modifiedTenant2Docs); + +runFindUsingDollarTenantAndPrefix( + originalSecondary, tenant1DbPrefixed, kCollName, kTenant1, modifiedTenant1Docs); +runFindUsingDollarTenantAndPrefix( + originalSecondary, tenant2DbPrefixed, kCollName, kTenant2, modifiedTenant2Docs); + +// Now, restart the primary and enable multitenancySupport. The secondary will step up to +// become primary. +originalPrimary = + rst.restart(originalPrimary, {startClean: false, setParameter: {'multitenancySupport': true}}); +assert(originalPrimary.getDB("admin").auth('admin', 'pwd')); +waitForState(originalSecondary, ReplSetTest.State.PRIMARY); +waitForState(originalPrimary, ReplSetTest.State.SECONDARY); +originalPrimary.setSecondaryOk(); + +// Check that we can still find the docs when using a prefixed db on both the primary and +// secondary. +assertFindBothTenantsPrefixedDb(originalPrimary, + tenant1DbPrefixed, + tenant2DbPrefixed, + kCollName, + modifiedTenant1Docs, + modifiedTenant2Docs); +assertFindBothTenantsPrefixedDb(originalSecondary, + tenant1DbPrefixed, + tenant2DbPrefixed, + kCollName, + modifiedTenant1Docs, + modifiedTenant2Docs); + +// Now check that we find the docs for both tenants when reading from both the primary and +// secondary using $tenant. +assertFindBothTenantsUsingDollarTenant(originalPrimary, + kDbName, + kCollName, + kTenant1, + kTenant2, + modifiedTenant1Docs, + modifiedTenant2Docs); +assertFindBothTenantsUsingDollarTenant(originalSecondary, + kDbName, + kCollName, + kTenant1, + kTenant2, + modifiedTenant1Docs, + modifiedTenant2Docs); + +// Also check that both tenants find the new doc on the primary and secondary using $tenant and +// a prefixed db. +runFindUsingDollarTenantAndPrefix( + originalPrimary, tenant1DbPrefixed, kCollName, kTenant1, modifiedTenant1Docs); +runFindUsingDollarTenantAndPrefix( + originalSecondary, tenant2DbPrefixed, kCollName, kTenant2, modifiedTenant2Docs); +runFindUsingDollarTenantAndPrefix( + originalPrimary, tenant1DbPrefixed, kCollName, kTenant1, modifiedTenant1Docs); +runFindUsingDollarTenantAndPrefix( + originalSecondary, tenant2DbPrefixed, kCollName, kTenant2, modifiedTenant2Docs); + +rst.stopSet(); \ No newline at end of file diff --git a/jstests/sharding/addshard2.js b/jstests/sharding/addshard2.js index 6f8e6d8ae498c..6503b91692a78 100644 --- a/jstests/sharding/addshard2.js +++ b/jstests/sharding/addshard2.js @@ -39,8 +39,8 @@ const assertAddShardFailed = function(res, shardName) { // If a shard name was specified in the addShard, make sure no shard with its name shows up // in config.shards. if (shardName) { - if (TestData.catalogShard && shardName === "config") { - // In catalog shard mode there's always an entry for config for the config server. + if (TestData.configShard && shardName === "config") { + // In config shard mode there's always an entry for config for the config server. assert.neq(null, st.s.getDB('config').shards.findOne({_id: shardName})); } else { assert.eq(null, @@ -52,7 +52,7 @@ const assertAddShardFailed = function(res, shardName) { }; const st = new ShardingTest({ - shards: TestData.catalogShard ? 1 : 0, + shards: TestData.configShard ? 1 : 0, mongos: 1, }); diff --git a/jstests/sharding/addshard6.js b/jstests/sharding/addshard6.js index f04a91a4661ce..7e40058ec375d 100644 --- a/jstests/sharding/addshard6.js +++ b/jstests/sharding/addshard6.js @@ -21,7 +21,7 @@ var assertAddShardFailed = function(res, shardName) { }; var st = new ShardingTest({ - shards: TestData.catalogShard ? 1 : 0, + shards: TestData.configShard ? 1 : 0, mongos: 1, }); diff --git a/jstests/sharding/after_cluster_time.js b/jstests/sharding/after_cluster_time.js index 690c1f8b50b85..b4b4becce39b5 100644 --- a/jstests/sharding/after_cluster_time.js +++ b/jstests/sharding/after_cluster_time.js @@ -31,6 +31,9 @@ rst.initiate(); // Start the sharding test and add the majority read concern enabled replica set. const st = new ShardingTest({manualAddShard: true}); +if (TestData.configShard) { + assert.commandWorked(st.s.adminCommand({transitionFromDedicatedConfigServer: 1})); +} assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()})); const testDB = st.s.getDB("test"); diff --git a/jstests/sharding/agg_out_drop_database.js b/jstests/sharding/agg_out_drop_database.js index 7cceb60e848f2..2e612a37dab92 100644 --- a/jstests/sharding/agg_out_drop_database.js +++ b/jstests/sharding/agg_out_drop_database.js @@ -2,7 +2,7 @@ * Test that aggregation's $out stage serializes behind a drop database and fails. * * @tags: [ - * requires_fcv_51, + * requires_fcv_71, * does_not_support_stepdowns, # DropDatabaseCoordinator drops the input collection on step-up * ] */ @@ -26,7 +26,7 @@ assert.commandWorked(inputColl.insert({_id: 0})); const outputCollName = "output_coll"; let failpoint = - configureFailPoint(st.rs0.getPrimary(), 'blockBeforeInternalRenameIfOptionsAndIndexesMatch'); + configureFailPoint(st.rs0.getPrimary(), 'blockBeforeInternalRenameAndBeforeTakingDDLLocks'); function aggOut(inputCollName, outputCollName) { // Make sure the aggregation fails because the database has been dropped diff --git a/jstests/sharding/agg_project_limit_pipe_split.js b/jstests/sharding/agg_project_limit_pipe_split.js index 7f5c7a51951a9..c864776d8f9ee 100644 --- a/jstests/sharding/agg_project_limit_pipe_split.js +++ b/jstests/sharding/agg_project_limit_pipe_split.js @@ -1,8 +1,4 @@ // Tests that the correct number of results are returned when $limit is coalesced with $sort. -(function() { -"use strict"; -load("jstests/libs/analyze_plan.js"); - const shardingTest = new ShardingTest({shards: 2}); const db = shardingTest.getDB("project_limit"); const coll = db.project_limit_pipe_split; @@ -74,5 +70,4 @@ assert.eq( ], agg.toArray()); -shardingTest.stop(); -})(); \ No newline at end of file +shardingTest.stop(); \ No newline at end of file diff --git a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js index 7ba54f04eba59..b3a1e881d6125 100644 --- a/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js +++ b/jstests/sharding/all_shard_and_config_hosts_brought_down_one_by_one.js @@ -1,7 +1,9 @@ /** * Shuts down config server and shard replica set nodes one by one and ensures correct behaviour. * - * @tags: [temporary_catalog_shard_incompatible] + * Restarts the config server, which requires persistence so restarted nodes can rejoin their + * original replica set and run shutdown hooks. + * @tags: [requires_persistence] */ // Checking UUID and index consistency involves talking to the config servers, which are shut down @@ -14,52 +16,78 @@ TestData.skipCheckShardFilteringMetadata = true; (function() { 'use strict'; -var st = new ShardingTest({shards: {rs0: {nodes: 2}}}); +var st = new ShardingTest({ + shards: { + rs0: {nodes: 2}, + }, + config: 3 +}); // The default read concern is local, which is incompatible with secondary reads when the primary is // down. st.s.adminCommand({setDefaultRWConcern: 1, defaultReadConcern: {level: "available"}}); +let count = 0; jsTest.log('Config nodes up: 3 of 3, shard nodes up: 2 of 2: ' + 'Insert test data to work with'); assert.commandWorked(st.s0.getDB('TestDB').TestColl.update( {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}})); -assert.eq([{_id: 0, count: 1}], st.s0.getDB('TestDB').TestColl.find().toArray()); +count += 1; +assert.eq([{_id: 0, count}], st.s0.getDB('TestDB').TestColl.find().toArray()); jsTest.log('Config nodes up: 2 of 3, shard nodes up: 2 of 2: ' + 'Inserts and queries must work'); -st.configRS.stop(0); +st.configRS.stop(0, undefined, undefined, {forRestart: true}); st.restartMongos(0); assert.commandWorked(st.s0.getDB('TestDB').TestColl.update( {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}})); -assert.eq([{_id: 0, count: 2}], st.s0.getDB('TestDB').TestColl.find().toArray()); +count += 1; +assert.eq([{_id: 0, count}], st.s0.getDB('TestDB').TestColl.find().toArray()); -jsTest.log('Config nodes up: 1 of 3, shard nodes up: 2 of 2: ' + - 'Inserts and queries must work'); -st.configRS.stop(1); -st.restartMongos(0); -assert.commandWorked(st.s0.getDB('TestDB').TestColl.update( - {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}})); -assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray()); +if (!TestData.configShard) { + // For a config shard, the config server is the shard, so we can't have a different number up. + jsTest.log('Config nodes up: 1 of 3, shard nodes up: 2 of 2: ' + + 'Inserts and queries must work'); + st.configRS.stop(1, undefined, undefined, {forRestart: true}); + st.restartMongos(0); + assert.commandWorked(st.s0.getDB('TestDB').TestColl.update( + {_id: 0}, {$inc: {count: 1}}, {upsert: true, writeConcern: {w: 2, wtimeout: 30000}})); + count += 1; + assert.eq([{_id: 0, count}], st.s0.getDB('TestDB').TestColl.find().toArray()); +} jsTest.log('Config nodes up: 1 of 3, shard nodes up: 1 of 2: ' + 'Only queries will work (no shard primary)'); st.rs0.stop(0); st.restartMongos(0); st.s0.setSecondaryOk(); -assert.eq([{_id: 0, count: 3}], st.s0.getDB('TestDB').TestColl.find().toArray()); +assert.eq([{_id: 0, count}], st.s0.getDB('TestDB').TestColl.find().toArray()); -jsTest.log('Config nodes up: 1 of 3, shard nodes up: 0 of 2: ' + - 'MongoS must start, but no operations will work (no shard nodes available)'); -st.rs0.stop(1); -st.restartMongos(0); -assert.throws(function() { - st.s0.getDB('TestDB').TestColl.find().toArray(); -}); +if (!TestData.configShard) { + // For a config shard, the config server is the shard, so we can't have a different number up. + jsTest.log('Config nodes up: 1 of 3, shard nodes up: 0 of 2: ' + + 'MongoS must start, but no operations will work (no shard nodes available)'); + st.rs0.stop(1); + st.restartMongos(0); + assert.throws(function() { + st.s0.getDB('TestDB').TestColl.find().toArray(); + }); +} jsTest.log('Config nodes up: 0 of 3, shard nodes up: 0 of 2: ' + 'Metadata cannot be loaded at all, no operations will work'); -st.configRS.stop(1); +if (!TestData.configShard) { + st.configRS.stop(2); +} else if (TestData.configShard) { + st.configRS.stop(1, undefined, undefined, {forRestart: true}); + // Restart mongos while a config server is still up. + st.restartMongos(0); + // After taking down the last config/shard node, no user data operations will work. + st.configRS.stop(2, undefined, undefined, {forRestart: true}); + assert.throws(function() { + st.s0.getDB('TestDB').TestColl.find().toArray(); + }); +} // Instead of restarting mongos, ensure it has no metadata assert.commandWorked(st.s0.adminCommand({flushRouterConfig: 1})); @@ -83,7 +111,7 @@ for (var i = 0; i < 2; i++) { } } -// Restart one config server node to ensure that teardown checks may be executed -st.restartConfigServer(0); +// Restart two config server nodes to ensure that teardown checks may be executed +st.restartAllConfigServers(); st.stop(); }()); diff --git a/jstests/sharding/allow_partial_results.js b/jstests/sharding/allow_partial_results.js index f3d79b4f92ead..810ff19e6c432 100644 --- a/jstests/sharding/allow_partial_results.js +++ b/jstests/sharding/allow_partial_results.js @@ -1,6 +1,8 @@ /** * Tests that the 'allowPartialResults' option to find is respected, and that aggregation does not * accept the 'allowPartialResults' option. + * TODO SERVER-71169: Re-enable this test after shard filtering in CQF is implemented. + * @tags: [cqf_incompatible] */ // This test shuts down a shard. diff --git a/jstests/sharding/analyze_shard_key/analyze_shard_key_agg_stage_auth.js b/jstests/sharding/analyze_shard_key/analyze_shard_key_agg_stage_auth.js index 94d02661b7ecc..a2362508ca14f 100644 --- a/jstests/sharding/analyze_shard_key/analyze_shard_key_agg_stage_auth.js +++ b/jstests/sharding/analyze_shard_key/analyze_shard_key_agg_stage_auth.js @@ -33,7 +33,7 @@ function runTest(primary) { // $_analyzeShardKeyReadWriteDistribution spec const stageSpec = { key: {x: 1}, - splitPointsFilter: {"_id.commandId": UUID()}, + splitPointsFilter: {"_id.analyzeShardKeyId": UUID()}, splitPointsAfterClusterTime: new Timestamp(100, 1), // The use of "dummyShard" for splitPointsShardId will cause the aggregation to fail on // a sharded cluster with error code ShardNotFound. diff --git a/jstests/sharding/analyze_shard_key/analyze_shard_key_agg_validation.js b/jstests/sharding/analyze_shard_key/analyze_shard_key_agg_validation.js index b5479d7c26f27..5bcdf43fa0616 100644 --- a/jstests/sharding/analyze_shard_key/analyze_shard_key_agg_validation.js +++ b/jstests/sharding/analyze_shard_key/analyze_shard_key_agg_validation.js @@ -3,17 +3,15 @@ * * @tags: [requires_fcv_70] */ -(function() { -"use strict"; +import {ConfigShardUtil} from "jstests/libs/config_shard_util.js"; -load("jstests/libs/catalog_shard_util.js"); load("jstests/sharding/analyze_shard_key/libs/validation_common.js"); function makeAnalyzeShardKeyAggregateCmdObj(collName, key, splitPointsShardId) { - const commandId = UUID(); + const analyzeShardKeyId = UUID(); const spec = { key, - splitPointsFilter: {"_id.commandId": commandId}, + splitPointsFilter: {"_id.analyzeShardKeyId": analyzeShardKeyId}, splitPointsAfterClusterTime: new Timestamp(100, 1), }; if (splitPointsShardId) { @@ -26,7 +24,7 @@ function makeAnalyzeShardKeyAggregateCmdObj(collName, key, splitPointsShardId) { cursor: {} }, makeSplitPointIdFunc: () => { - return {commandId, splitPointId: UUID()}; + return {analyzeShardKeyId, splitPointId: UUID()}; } }; } @@ -97,11 +95,6 @@ function runTest(rst, validationTest, shardName) { const {aggCmdObj} = makeAnalyzeShardKeyAggregateCmdObj(validationTest.collName, {id: 1}, st.shard0.name); assert.commandWorked(shard0Primary.getDB(validationTest.dbName).runCommand(aggCmdObj)); - if (!CatalogShardUtil.isEnabledIgnoringFCV(st)) { - assert.commandFailedWithCode( - configPrimary.getDB(validationTest.dbName).runCommand(aggCmdObj), - ErrorCodes.IllegalOperation); - } } runTest(st.rs0, validationTest, st.shard0.name); @@ -121,5 +114,4 @@ function runTest(rst, validationTest, shardName) { runTest(rst, validationTest, null /* shardName */); rst.stopSet(); -} -})(); +} \ No newline at end of file diff --git a/jstests/sharding/analyze_shard_key/analyze_shard_key_auth.js b/jstests/sharding/analyze_shard_key/analyze_shard_key_auth.js index c25b6508d970d..acc0460d3eaee 100644 --- a/jstests/sharding/analyze_shard_key/analyze_shard_key_auth.js +++ b/jstests/sharding/analyze_shard_key/analyze_shard_key_auth.js @@ -1,6 +1,5 @@ /** - * Test to validate the privileges required by the analyzeShardKey and configureQueryAnalyzer - * commands and _refreshQueryAnalyzerConfiguration internal command. + * Test to validate the privileges required by the analyzeShardKey command. * * @tags: [requires_fcv_70] */ diff --git a/jstests/sharding/analyze_shard_key/analyze_shard_key_basic.js b/jstests/sharding/analyze_shard_key/analyze_shard_key_basic.js index 3a034cc214222..87a2dbffb9dd3 100644 --- a/jstests/sharding/analyze_shard_key/analyze_shard_key_basic.js +++ b/jstests/sharding/analyze_shard_key/analyze_shard_key_basic.js @@ -3,10 +3,7 @@ * * @tags: [requires_fcv_70] */ -(function() { -"use strict"; - -load("jstests/libs/catalog_shard_util.js"); +import {ConfigShardUtil} from "jstests/libs/config_shard_util.js"; const setParameterOpts = { analyzeShardKeyNumRanges: 100 @@ -222,17 +219,15 @@ function testNotSupportReadWriteConcern(writeConn, testCases) { testCases.push({conn: node, isSupported: true, isPrimaryShardMongod: false}); }); - // The analyzeShardKey command is not supported on dedicated configsvr mongods. - const isCatalogShardEnabled = CatalogShardUtil.isEnabledIgnoringFCV(st); st.configRS.nodes.forEach(node => { - // If catalog shard mode isn't enabled, don't expect a sharded collection since the config + // If config shard mode isn't enabled, don't expect a sharded collection since the config // server isn't enabled as a shard and won't have chunks. testCases.push({ conn: node, - isSupported: isCatalogShardEnabled, - // The config server is shard0 in catalog shard mode. - isPrimaryShardMongod: TestData.catalogShard, - doNotExpectColl: !TestData.catalogShard + isSupported: true, + // The config server is shard0 in config shard mode. + isPrimaryShardMongod: TestData.configShard, + doNotExpectColl: !TestData.configShard }); }); @@ -302,5 +297,4 @@ if (!TestData.auth) { testExistingUnshardedCollection(mongod, testCases); MongoRunner.stopMongod(mongod); -} -})(); +} \ No newline at end of file diff --git a/jstests/sharding/analyze_shard_key/analyze_shard_key_cmd_validation.js b/jstests/sharding/analyze_shard_key/analyze_shard_key_cmd_validation.js index ecf401b7327dc..ae1d466dc30ba 100644 --- a/jstests/sharding/analyze_shard_key/analyze_shard_key_cmd_validation.js +++ b/jstests/sharding/analyze_shard_key/analyze_shard_key_cmd_validation.js @@ -12,9 +12,15 @@ load("jstests/sharding/analyze_shard_key/libs/validation_common.js"); const analyzeShardKeyNumRanges = 10; -function testValidationBeforeMetricsCalculation(conn, validationTest) { +function testValidationBeforeMetricsCalculation(conn, mongodConn, validationTest) { jsTest.log(`Testing validation before calculating any metrics`); + // Set the fail point that would make the analyzeShardKey command fail with an InternalError + // before metrics calculation. That way if there is no expected validation before the metrics + // calculation, the command would fail with an InternalError error instead of the expected + // error. + let fp = configureFailPoint(mongodConn, "analyzeShardKeyFailBeforeMetricsCalculation"); + for (let {dbName, collName, isView} of validationTest.invalidNamespaceTestCases) { jsTest.log(`Testing that the analyzeShardKey command fails if the namespace is invalid ${ tojson({dbName, collName})}`); @@ -30,9 +36,11 @@ function testValidationBeforeMetricsCalculation(conn, validationTest) { assert.commandFailedWithCode(conn.adminCommand({analyzeShardKey: ns, key: shardKey}), ErrorCodes.BadValue); } + + fp.off(); } -function testValidationDuringKeyCharactericsMetricsCalculation(conn, validationTest) { +function testValidationDuringKeyCharacteristicsMetricsCalculation(conn, validationTest) { const dbName = validationTest.dbName; const collName = validationTest.collName; const ns = dbName + "." + collName; @@ -58,8 +66,13 @@ function testValidationDuringKeyCharactericsMetricsCalculation(conn, validationT for (let {indexOptions, shardKey} of validationTest.noCompatibleIndexTestCases) { jsTest.log(`Testing incompatible index ${tojson({indexOptions, shardKey})}`); assert.commandWorked(testDB.runCommand({createIndexes: collName, indexes: [indexOptions]})); - const res = assert.commandWorked(conn.adminCommand({analyzeShardKey: ns, key: shardKey})); - AnalyzeShardKeyUtil.assertNotContainKeyCharacteristicsMetrics(res); + assert.commandFailedWithCode(conn.adminCommand({ + analyzeShardKey: ns, + key: shardKey, + keyCharacteristics: true, + readWriteDistribution: false + }), + ErrorCodes.IllegalOperation); assert.commandWorked(testDB.runCommand({dropIndexes: collName, index: indexOptions.name})); } @@ -85,7 +98,12 @@ function testValidationDuringReadWriteDistributionMetricsCalculation( aggConn, "analyzeShardKeyPauseBeforeCalculatingReadWriteDistributionMetrics"); let analyzeShardKeyFunc = (cmdHost, ns, arrayFieldName) => { const cmdConn = new Mongo(cmdHost); - return cmdConn.adminCommand({analyzeShardKey: ns, key: {[arrayFieldName]: 1}}); + return cmdConn.adminCommand({ + analyzeShardKey: ns, + key: {[arrayFieldName]: 1}, + keyCharacteristics: false, + readWriteDistribution: true + }); }; let analyzeShardKeyThread = new Thread(analyzeShardKeyFunc, cmdConn.host, ns, arrayFieldName); @@ -114,20 +132,8 @@ const setParameterOpts = {analyzeShardKeyNumRanges}; const validationTest = ValidationTest(st.s); // Disable the calculation of all metrics to test validation at the start of the command. - let fp0 = - configureFailPoint(shard0Primary, "analyzeShardKeySkipCalcalutingKeyCharactericsMetrics"); - let fp1 = configureFailPoint(shard0Primary, - "analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics"); - testValidationBeforeMetricsCalculation(st.s, validationTest); - - // Enable the calculation of the metrics about the characteristics of the shard key to test - // validation during that step. - fp0.off(); - testValidationDuringKeyCharactericsMetricsCalculation(st.s, validationTest); - - // Enable the calculation of the metrics about the read and write distribution to test - // validation during that step. - fp1.off(); + testValidationBeforeMetricsCalculation(st.s, shard0Primary, validationTest); + testValidationDuringKeyCharacteristicsMetricsCalculation(st.s, validationTest); testValidationDuringReadWriteDistributionMetricsCalculation( st.s, validationTest, shard0Primary); @@ -142,20 +148,8 @@ const setParameterOpts = {analyzeShardKeyNumRanges}; const validationTest = ValidationTest(primary); - // Disable the calculation of all metrics to test validation at the start of the command. - let fp0 = configureFailPoint(primary, "analyzeShardKeySkipCalcalutingKeyCharactericsMetrics"); - let fp1 = - configureFailPoint(primary, "analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics"); - testValidationBeforeMetricsCalculation(primary, validationTest); - - // Enable the calculation of the metrics about the characteristics of the shard key to test - // validation during that step. - fp0.off(); - testValidationDuringKeyCharactericsMetricsCalculation(primary, validationTest); - - // Enable the calculation of the metrics about the read and write distribution to test - // validation during that step. - fp1.off(); + testValidationBeforeMetricsCalculation(primary, primary, validationTest); + testValidationDuringKeyCharacteristicsMetricsCalculation(primary, validationTest); testValidationDuringReadWriteDistributionMetricsCalculation(primary, validationTest, primary); rst.stopSet(); diff --git a/jstests/sharding/analyze_shard_key/analyze_shard_key_database_and_shard_versioning.js b/jstests/sharding/analyze_shard_key/analyze_shard_key_database_and_shard_versioning.js index 59d8bbfb4d647..a1aaa2678777d 100644 --- a/jstests/sharding/analyze_shard_key/analyze_shard_key_database_and_shard_versioning.js +++ b/jstests/sharding/analyze_shard_key/analyze_shard_key_database_and_shard_versioning.js @@ -23,14 +23,7 @@ const st = new ShardingTest({ shards: 2, rs: { nodes: numNodesPerRS, - setParameter: { - // The calculation of the read and write distribution metrics involves generating split - // points which requires the shard key to have sufficient cardinality. To avoid needing - // to insert a lot of documents, just skip the calculation. - "failpoint.analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics": - tojson({mode: "alwaysOn"}), - analyzeShardKeyNumMostCommonValues: numMostCommonValues - } + setParameter: {analyzeShardKeyNumMostCommonValues: numMostCommonValues} } }); @@ -51,7 +44,11 @@ function runTest(readPreference) { const analyzeShardKeyCmdObj = { analyzeShardKey: ns, key: {x: 1}, - $readPreference: readPreference + $readPreference: readPreference, + // The calculation of the read and write distribution metrics involves generating split + // points which requires the shard key to have sufficient cardinality. To avoid needing + // to insert a lot of documents, just skip the calculation. + readWriteDistribution: false, }; const expectedMetrics = { numDocs: 2, @@ -63,7 +60,7 @@ function runTest(readPreference) { // Run the analyzeShardKey command and verify that the metrics are as expected. const res0 = assert.commandWorked(st.s1.adminCommand(analyzeShardKeyCmdObj)); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res0, expectedMetrics); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res0.keyCharacteristics, expectedMetrics); // Make shard1 the primary shard instead by running the movePrimary command against mongos0. assert.commandWorked(st.s0.adminCommand({movePrimary: dbName, to: st.shard1.name})); @@ -74,7 +71,7 @@ function runTest(readPreference) { // run on shard0 instead of on shard1. As a result, the command would fail with a // NamespaceNotFound error. const res1 = assert.commandWorked(st.s1.adminCommand(analyzeShardKeyCmdObj)); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res1, expectedMetrics); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res1.keyCharacteristics, expectedMetrics); // Shard the collection and make it have two chunks: // shard0: [MinKey, 0] @@ -91,7 +88,7 @@ function runTest(readPreference) { // only on shard1 instead of on both shard0 and shard1. As a result, the metrics would be // incorrect. const res2 = assert.commandWorked(st.s1.adminCommand(analyzeShardKeyCmdObj)); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res2, expectedMetrics); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res2.keyCharacteristics, expectedMetrics); } runTest({mode: "primary"}); diff --git a/jstests/sharding/analyze_shard_key/analyze_shard_key_options.js b/jstests/sharding/analyze_shard_key/analyze_shard_key_options.js new file mode 100644 index 0000000000000..09424c63d9c52 --- /dev/null +++ b/jstests/sharding/analyze_shard_key/analyze_shard_key_options.js @@ -0,0 +1,91 @@ +/** + * Tests that the analyzeShardKey command supports analyzing the characteristics of the shard + * key and/or the read and write distribution. + */ +(function() { +"use strict"; + +load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js"); + +const numNodesPerRS = 2; +// The write concern to use when inserting documents into test collections. Waiting for the +// documents to get replicated to all nodes is necessary since mongos runs the analyzeShardKey +// command with readPreference "secondaryPreferred". +const writeConcern = { + w: numNodesPerRS +}; + +function runTest(conn) { + const dbName = "testDb"; + const collName = "testColl"; + const numDocs = 10000; + const ns = dbName + "." + collName; + const db = conn.getDB(dbName); + const coll = db.getCollection(collName); + + const docs = []; + for (let i = 0; i < numDocs; i++) { + docs.push({x: i}); + } + assert.commandWorked(coll.insert(docs, {writeConcern})); + assert.commandWorked(coll.createIndex({x: 1})); + + const res0 = assert.commandWorked(conn.adminCommand({analyzeShardKey: ns, key: {x: 1}})); + AnalyzeShardKeyUtil.assertContainKeyCharacteristicsMetrics(res0); + AnalyzeShardKeyUtil.assertContainReadWriteDistributionMetrics(res0); + + const res1 = assert.commandWorked( + conn.adminCommand({analyzeShardKey: ns, key: {x: 1}, keyCharacteristics: false})); + AnalyzeShardKeyUtil.assertNotContainKeyCharacteristicsMetrics(res1); + AnalyzeShardKeyUtil.assertContainReadWriteDistributionMetrics(res1); + + const res2 = assert.commandWorked( + conn.adminCommand({analyzeShardKey: ns, key: {x: 1}, readWriteDistribution: false})); + AnalyzeShardKeyUtil.assertContainKeyCharacteristicsMetrics(res2); + AnalyzeShardKeyUtil.assertNotContainReadWriteDistributionMetrics(res2); + + const res3 = assert.commandWorked(conn.adminCommand( + {analyzeShardKey: ns, key: {x: 1}, keyCharacteristics: true, readWriteDistribution: true})); + AnalyzeShardKeyUtil.assertContainKeyCharacteristicsMetrics(res3); + AnalyzeShardKeyUtil.assertContainReadWriteDistributionMetrics(res3); + + // Verify that when both 'keyCharacteristics' and 'readWriteDistribution' are false, + // the command fails because there are metrics to return. + assert.commandFailedWithCode(conn.adminCommand({ + analyzeShardKey: ns, + key: {x: 1}, + keyCharacteristics: false, + readWriteDistribution: false + }), + ErrorCodes.InvalidOptions); + + // Verify that when 'readWriteDistribution' is false and the shard key does not have a + // supporting index, the command fails because there are metrics to return. + const res4 = assert.commandFailedWithCode(conn.adminCommand({ + analyzeShardKey: ns, + key: {y: 1}, + keyCharacteristics: true, + readWriteDistribution: false + }), + ErrorCodes.IllegalOperation); + assert.eq( + res4.errmsg, + "Cannot analyze the characteristics of a shard key that does not have a supporting index"); + + assert(coll.drop()); +} + +{ + const st = new ShardingTest({shards: 2, rs: {nodes: numNodesPerRS}}); + runTest(st.s); + st.stop(); +} + +{ + const rst = new ReplSetTest({nodes: numNodesPerRS}); + rst.startSet(); + rst.initiate(); + runTest(rst.getPrimary()); + rst.stopSet(); +} +})(); diff --git a/jstests/sharding/analyze_shard_key/avg_doc_size.js b/jstests/sharding/analyze_shard_key/avg_doc_size.js index c35d0b6f0a589..0137d31ece7f4 100644 --- a/jstests/sharding/analyze_shard_key/avg_doc_size.js +++ b/jstests/sharding/analyze_shard_key/avg_doc_size.js @@ -17,9 +17,15 @@ function testUnshardedCollection(conn) { assert.commandWorked( coll.insert([{candidateKey: "a"}, {candidateKey: new Array(1000).join("a")}])); - const res = assert.commandWorked(conn.adminCommand({analyzeShardKey: ns, key: candidateKey})); - assert.lt(res.avgDocSizeBytes, 1000, res); - assert.gt(res.avgDocSizeBytes, 1000 / 2, res); + const res = assert.commandWorked(conn.adminCommand({ + analyzeShardKey: ns, + key: candidateKey, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + })); + assert.lt(res.keyCharacteristics.avgDocSizeBytes, 1000, res); + assert.gt(res.keyCharacteristics.avgDocSizeBytes, 1000 / 2, res); assert(coll.drop()); } @@ -54,22 +60,21 @@ function testShardedCollection(st) { {currentKey: 10, candidateKey: new Array(1000).join("a")} ])); - const res = st.s.adminCommand({analyzeShardKey: ns, key: candidateKey}); - assert.lt(res.avgDocSizeBytes, 1000, res); - assert.gt(res.avgDocSizeBytes, 3000 / 5, res); + const res = st.s.adminCommand({ + analyzeShardKey: ns, + key: candidateKey, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + }); + assert.lt(res.keyCharacteristics.avgDocSizeBytes, 1000, res); + assert.gt(res.keyCharacteristics.avgDocSizeBytes, 3000 / 5, res); assert(coll.drop()); } -const setParameterOpts = { - // Skip calculating the read and write distribution metrics since there are no sampled queries - // anyway. - "failpoint.analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics": - tojson({mode: "alwaysOn"}) -}; - { - const st = new ShardingTest({shards: 2, rs: {nodes: 2, setParameter: setParameterOpts}}); + const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); testUnshardedCollection(st.s); testShardedCollection(st); @@ -78,7 +83,7 @@ const setParameterOpts = { } { - const rst = new ReplSetTest({nodes: 2, nodeOptions: {setParameter: setParameterOpts}}); + const rst = new ReplSetTest({nodes: 2}); rst.startSet(); rst.initiate(); const primary = rst.getPrimary(); diff --git a/jstests/sharding/analyze_shard_key/cardinality_and_frequency_basic.js b/jstests/sharding/analyze_shard_key/cardinality_and_frequency_basic.js index a51041f6466a8..3c544737e66b6 100644 --- a/jstests/sharding/analyze_shard_key/cardinality_and_frequency_basic.js +++ b/jstests/sharding/analyze_shard_key/cardinality_and_frequency_basic.js @@ -1,5 +1,6 @@ /** - * Tests that the analyzeShardKey command returns correct cardinality and frequency metrics. + * Tests that the analyzeShardKey command returns correct cardinality and frequency metrics when + * no document sampling is involved. * * @tags: [requires_fcv_70] */ @@ -7,6 +8,7 @@ "use strict"; load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js"); +load("jstests/sharding/analyze_shard_key/libs/cardinality_and_frequency_common.js"); // Define base test cases. For each test case: // - 'shardKey' is the shard key being analyzed. @@ -67,16 +69,19 @@ const noIndexTestCases = [ shardKey: {a: 1}, indexKey: {a: 1}, indexOptions: {collation: {locale: "fr"}}, // non-simple collation. + expectMetrics: false }, { shardKey: {a: 1}, indexKey: {a: 1}, indexOptions: {sparse: true}, + expectMetrics: false, }, { shardKey: {a: 1}, indexKey: {a: 1}, indexOptions: {partialFilterExpression: {a: {$gte: 1}}}, + expectMetrics: false }, ]; @@ -129,7 +134,6 @@ for (let testCaseBase of noIndexTestCases) { } const numNodesPerRS = 2; -const numMostCommonValues = 5; // The write concern to use when inserting documents into test collections. Waiting for the // documents to get replicated to all nodes is necessary since mongos runs the analyzeShardKey @@ -138,66 +142,6 @@ const writeConcern = { w: numNodesPerRS }; -/** - * Finds the profiler entries for all aggregate and count commands with the given comment on the - * given mongods and verifies that: - * - The aggregate commands used index scan and did not fetch any documents. - * - The count commands used fast count, i.e. did not scan the index or fetch any documents. - */ -function assertReadQueryPlans(mongodConns, dbName, collName, comment) { - mongodConns.forEach(conn => { - const profilerColl = conn.getDB(dbName).system.profile; - - profilerColl.find({"command.aggregate": collName, "command.comment": comment}) - .forEach(doc => { - if (doc.hasOwnProperty("ok") && (doc.ok === 0)) { - return; - } - - const firstStage = doc.command.pipeline[0]; - - if (firstStage.hasOwnProperty("$collStats")) { - return; - } - - assert(!doc.usedDisk, doc); - if (firstStage.hasOwnProperty("$match") || firstStage.hasOwnProperty("$limit")) { - // This corresponds to the aggregation that the analyzeShardKey command runs - // when analyzing a shard key with a unique supporting index, which should - // fetch at most 'numMostCommonValues' documents. - assert(doc.hasOwnProperty("planSummary"), doc); - assert(doc.planSummary.includes("COLLSCAN"), doc); - assert.lte(doc.docsExamined, numMostCommonValues, doc); - } else { - // This corresponds to the aggregation that the analyzeShardKey command runs - // when analyzing a shard key with a non-unique supporting index. - if (!firstStage.hasOwnProperty("$mergeCursors")) { - assert(doc.hasOwnProperty("planSummary"), doc); - assert(doc.planSummary.includes("IXSCAN"), doc); - } - - // Verify that it did not fetch any documents. - assert.eq(doc.docsExamined, 0, doc); - // Verify that it opted out of shard filtering. - assert.eq(doc.readConcern.level, "available", doc); - } - }); - - profilerColl.find({"command.count": collName, "command.comment": comment}).forEach(doc => { - if (doc.hasOwnProperty("ok") && (doc.ok === 0)) { - return; - } - - assert(doc.hasOwnProperty("planSummary"), doc); - assert(doc.planSummary.includes("RECORD_STORE_FAST_COUNT"), doc); - assert(!doc.usedDisk, doc); - // Verify that it did not scan the index or fetch any documents. - assert.eq(doc.keysExamined, 0, doc); - assert.eq(doc.docsExamined, 0, doc); - }); - }); -} - /** * Returns an object where each field name is set to the given value. */ @@ -232,7 +176,9 @@ function testAnalyzeShardKeyNoUniqueIndex(conn, dbName, collName, currentShardKe const maxFrequency = shardKeyContainsId ? 1 : numDistinctValues; let sign = 1; for (let i = 1; i <= numDistinctValues; i++) { - const doc = makeDocument(fieldNames, sign * i); + // Test with integer field half of time and object field half of the time. + const val = sign * i; + const doc = makeDocument(fieldNames, Math.random() > 0.5 ? val : {foo: val}); const frequency = shardKeyContainsId ? 1 : i; for (let j = 1; j <= frequency; j++) { @@ -242,8 +188,8 @@ function testAnalyzeShardKeyNoUniqueIndex(conn, dbName, collName, currentShardKe const isMostCommon = (maxFrequency - frequency) < numMostCommonValues; if (testCase.expectMetrics && isMostCommon) { mostCommonValues.push({ - value: AnalyzeShardKeyUtil.extractShardKeyValueFromDocument( - doc, testCase.shardKey, testCase.indexKey), + value: AnalyzeShardKeyUtil.extractShardKeyValueFromDocument(doc, + testCase.shardKey), frequency }); } @@ -266,12 +212,18 @@ function testAnalyzeShardKeyNoUniqueIndex(conn, dbName, collName, currentShardKe // key values. const [docs0, metrics0] = makeSubTestCase(numMostCommonValues - 1); assert.commandWorked(coll.insert(docs0, {writeConcern})); - const res0 = assert.commandWorked(conn.adminCommand( - {analyzeShardKey: ns, key: testCase.shardKey, comment: testCase.comment})); + const res0 = conn.adminCommand({ + analyzeShardKey: ns, + key: testCase.shardKey, + comment: testCase.comment, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + }); if (testCase.expectMetrics) { - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res0, metrics0); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res0.keyCharacteristics, metrics0); } else { - AnalyzeShardKeyUtil.assertNotContainKeyCharacteristicsMetrics(res0); + assert.commandFailedWithCode(res0, ErrorCodes.IllegalOperation); } assert.commandWorked(coll.remove({})); @@ -279,12 +231,18 @@ function testAnalyzeShardKeyNoUniqueIndex(conn, dbName, collName, currentShardKe // key values. const [docs1, metrics1] = makeSubTestCase(numMostCommonValues); assert.commandWorked(coll.insert(docs1, {writeConcern})); - const res1 = assert.commandWorked(conn.adminCommand( - {analyzeShardKey: ns, key: testCase.shardKey, comment: testCase.comment})); + const res1 = conn.adminCommand({ + analyzeShardKey: ns, + key: testCase.shardKey, + comment: testCase.comment, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + }); if (testCase.expectMetrics) { - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res1, metrics1); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res1.keyCharacteristics, metrics1); } else { - AnalyzeShardKeyUtil.assertNotContainKeyCharacteristicsMetrics(res1); + assert.commandFailedWithCode(res1, ErrorCodes.IllegalOperation); } assert.commandWorked(coll.remove({})); @@ -292,12 +250,18 @@ function testAnalyzeShardKeyNoUniqueIndex(conn, dbName, collName, currentShardKe // key values. const [docs2, metrics2] = makeSubTestCase(numMostCommonValues * 25); assert.commandWorked(coll.insert(docs2, {writeConcern})); - const res2 = assert.commandWorked(conn.adminCommand( - {analyzeShardKey: ns, key: testCase.shardKey, comment: testCase.comment})); + const res2 = conn.adminCommand({ + analyzeShardKey: ns, + key: testCase.shardKey, + comment: testCase.comment, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + }); if (testCase.expectMetrics) { - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res2, metrics2); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res2.keyCharacteristics, metrics2); } else { - AnalyzeShardKeyUtil.assertNotContainKeyCharacteristicsMetrics(res2); + assert.commandFailedWithCode(res2, ErrorCodes.IllegalOperation); } assert.commandWorked(coll.remove({})); } @@ -324,11 +288,12 @@ function testAnalyzeShardKeyUniqueIndex(conn, dbName, collName, currentShardKey, let sign = 1; for (let i = 1; i <= numDistinctValues; i++) { - const doc = makeDocument(fieldNames, sign * i); + // Test with integer field half of time and object field half of the time. + const val = sign * i; + const doc = makeDocument(fieldNames, Math.random() > 0.5 ? val : {foo: val}); docs.push(doc); mostCommonValues.push({ - value: AnalyzeShardKeyUtil.extractShardKeyValueFromDocument( - doc, testCase.shardKey, testCase.indexKey), + value: AnalyzeShardKeyUtil.extractShardKeyValueFromDocument(doc, testCase.shardKey), frequency: 1 }); @@ -350,35 +315,54 @@ function testAnalyzeShardKeyUniqueIndex(conn, dbName, collName, currentShardKey, // key values. const [docs0, metrics0] = makeSubTestCase(numMostCommonValues - 1); assert.commandWorked(coll.insert(docs0, {writeConcern})); - const res0 = assert.commandWorked(conn.adminCommand( - {analyzeShardKey: ns, key: testCase.shardKey, comment: testCase.comment})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res0, metrics0); + const res0 = assert.commandWorked(conn.adminCommand({ + analyzeShardKey: ns, + key: testCase.shardKey, + comment: testCase.comment, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + })); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res0.keyCharacteristics, metrics0); assert.commandWorked(coll.remove({})); // Analyze the shard key while the collection has exactly 'numMostCommonValues' distinct shard // key values. const [docs1, metrics1] = makeSubTestCase(numMostCommonValues); assert.commandWorked(coll.insert(docs1, {writeConcern})); - const res1 = assert.commandWorked(conn.adminCommand( - {analyzeShardKey: ns, key: testCase.shardKey, comment: testCase.comment})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res1, metrics1); + const res1 = assert.commandWorked(conn.adminCommand({ + analyzeShardKey: ns, + key: testCase.shardKey, + comment: testCase.comment, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + })); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res1.keyCharacteristics, metrics1); assert.commandWorked(coll.remove({})); // Analyze the shard key while the collection has more than 'numMostCommonValues' distinct shard // key values. const [docs2, metrics2] = makeSubTestCase(numMostCommonValues * 25); assert.commandWorked(coll.insert(docs2, {writeConcern})); - const res2 = assert.commandWorked(conn.adminCommand( - {analyzeShardKey: ns, key: testCase.shardKey, comment: testCase.comment})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res2, metrics2); + const res2 = assert.commandWorked(conn.adminCommand({ + analyzeShardKey: ns, + key: testCase.shardKey, + comment: testCase.comment, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + })); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res2.keyCharacteristics, metrics2); assert.commandWorked(coll.remove({})); } -function testAnalyzeCandidateShardKeysUnshardedCollection(conn, mongodConns) { +function testAnalyzeCandidateShardKeysUnshardedCollection(conn, {rst, st}) { const dbName = "testDb"; const collName = "testCollUnshardedCandidate"; const db = conn.getDB(dbName); const coll = db.getCollection(collName); + const mongodConns = getMongodConns({rst, st}); jsTest.log( `Testing candidate shard keys for an unsharded collection: ${tojson({dbName, collName})}`); @@ -404,7 +388,14 @@ function testAnalyzeCandidateShardKeysUnshardedCollection(conn, mongodConns) { } AnalyzeShardKeyUtil.disableProfiler(mongodConns, dbName); - assertReadQueryPlans(mongodConns, dbName, collName, testCase.comment); + assertAggregateQueryPlans( + mongodConns, + dbName, + collName, + testCase.comment, + // On a replica set, the analyzeShardKey command runs the aggregate commands locally, + // i.e. the commands do not go through the service entry point so do not get profiled. + testCase.expectMetrics && !rst /* expectEntries */); if (testCase.indexKey && !AnalyzeShardKeyUtil.isIdKeyPattern(testCase.indexKey)) { assert.commandWorked(coll.dropIndex(testCase.indexKey)); } @@ -413,7 +404,7 @@ function testAnalyzeCandidateShardKeysUnshardedCollection(conn, mongodConns) { assert.commandWorked(db.dropDatabase()); } -function testAnalyzeCandidateShardKeysShardedCollection(st, mongodConns) { +function testAnalyzeCandidateShardKeysShardedCollection(st) { const dbName = "testDb"; const collName = "testCollShardedCandidate"; const ns = dbName + "." + collName; @@ -421,6 +412,7 @@ function testAnalyzeCandidateShardKeysShardedCollection(st, mongodConns) { const currentShardKeySplitPoint = {skey: 0}; const db = st.s.getDB(dbName); const coll = db.getCollection(collName); + const mongodConns = getMongodConns({st}); jsTest.log( `Testing candidate shard keys for a sharded collection: ${tojson({dbName, collName})}`); @@ -461,7 +453,11 @@ function testAnalyzeCandidateShardKeysShardedCollection(st, mongodConns) { } AnalyzeShardKeyUtil.disableProfiler(mongodConns, dbName); - assertReadQueryPlans(mongodConns, dbName, collName, testCase.comment); + assertAggregateQueryPlans(mongodConns, + dbName, + collName, + testCase.comment, + testCase.expectMetrics /* expectEntries */); if (testCase.indexKey && !AnalyzeShardKeyUtil.isIdKeyPattern(testCase.indexKey)) { assert.commandWorked(coll.dropIndex(testCase.indexKey)); } @@ -470,9 +466,10 @@ function testAnalyzeCandidateShardKeysShardedCollection(st, mongodConns) { assert.commandWorked(db.dropDatabase()); } -function testAnalyzeCurrentShardKeys(st, mongodConns) { +function testAnalyzeCurrentShardKeys(st) { const dbName = "testDb"; const db = st.s.getDB(dbName); + const mongodConns = getMongodConns({st}); jsTest.log(`Testing current shard key for sharded collections: ${tojson({dbName})}`); @@ -517,30 +514,27 @@ function testAnalyzeCurrentShardKeys(st, mongodConns) { } AnalyzeShardKeyUtil.disableProfiler(mongodConns, dbName); - assertReadQueryPlans(mongodConns, dbName, collName, testCase.comment); + assertAggregateQueryPlans(mongodConns, + dbName, + collName, + testCase.comment, + testCase.expectMetrics /* expectEntries */); }); assert.commandWorked(db.dropDatabase()); } const setParameterOpts = { - analyzeShardKeyNumMostCommonValues: numMostCommonValues, - // Skip calculating the read and write distribution metrics since there are no sampled queries - // anyway. - "failpoint.analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics": - tojson({mode: "alwaysOn"}) + analyzeShardKeyNumMostCommonValues: numMostCommonValues }; { const st = new ShardingTest({shards: numNodesPerRS, rs: {nodes: 2, setParameter: setParameterOpts}}); - const mongodConns = []; - st.rs0.nodes.forEach(node => mongodConns.push(node)); - st.rs1.nodes.forEach(node => mongodConns.push(node)); - testAnalyzeCandidateShardKeysUnshardedCollection(st.s, mongodConns); - testAnalyzeCandidateShardKeysShardedCollection(st, mongodConns); - testAnalyzeCurrentShardKeys(st, mongodConns); + testAnalyzeCandidateShardKeysUnshardedCollection(st.s, {st}); + testAnalyzeCandidateShardKeysShardedCollection(st); + testAnalyzeCurrentShardKeys(st); st.stop(); } @@ -550,9 +544,8 @@ const setParameterOpts = { new ReplSetTest({nodes: numNodesPerRS, nodeOptions: {setParameter: setParameterOpts}}); rst.startSet(); rst.initiate(); - const mongodConns = rst.nodes; - testAnalyzeCandidateShardKeysUnshardedCollection(rst.getPrimary(), mongodConns); + testAnalyzeCandidateShardKeysUnshardedCollection(rst.getPrimary(), {rst}); rst.stopSet(); } diff --git a/jstests/sharding/analyze_shard_key/cardinality_and_frequency_index_selection.js b/jstests/sharding/analyze_shard_key/cardinality_and_frequency_index_selection.js index df2749c10fabe..915dfd6d6c348 100644 --- a/jstests/sharding/analyze_shard_key/cardinality_and_frequency_index_selection.js +++ b/jstests/sharding/analyze_shard_key/cardinality_and_frequency_index_selection.js @@ -34,8 +34,14 @@ function testAnalyzeShardKey(conn, {docs, indexSpecs, shardKeys, metrics}) { assert.commandWorked(coll.insert(docs, {writeConcern})); for (let shardKey of shardKeys) { - const res = assert.commandWorked(conn.adminCommand({analyzeShardKey: ns, key: shardKey})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res, metrics); + const res = assert.commandWorked(conn.adminCommand({ + analyzeShardKey: ns, + key: shardKey, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + })); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res.keyCharacteristics, metrics); } } @@ -136,12 +142,7 @@ function runTest(conn) { } const setParameterOpts = { - analyzeShardKeyNumMostCommonValues: numMostCommonValues, - // Skip calculating the read and write distribution metrics since there are no sampled queries - // anyway. - "failpoint.analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics": - tojson({mode: "alwaysOn"}) - + analyzeShardKeyNumMostCommonValues: numMostCommonValues }; { diff --git a/jstests/sharding/analyze_shard_key/cardinality_and_frequency_sample.js b/jstests/sharding/analyze_shard_key/cardinality_and_frequency_sample.js new file mode 100644 index 0000000000000..863e4de9abea6 --- /dev/null +++ b/jstests/sharding/analyze_shard_key/cardinality_and_frequency_sample.js @@ -0,0 +1,340 @@ +/** + * Tests that the analyzeShardKey command returns correct cardinality and frequency metrics when + * document sampling is involved. + * + * @tags: [requires_fcv_70] + */ +(function() { +"use strict"; + +load("jstests/libs/fixture_helpers.js"); +load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js"); +load("jstests/sharding/analyze_shard_key/libs/cardinality_and_frequency_common.js"); + +const numNodesPerRS = 2; + +const batchSize = 1000; +// The write concern to use when inserting documents into test collections. Waiting for the +// documents to get replicated to all nodes is necessary since mongos runs the analyzeShardKey +// command with readPreference "secondaryPreferred". +const writeConcern = { + w: numNodesPerRS +}; + +const defaultSampleSize = 10000; +const numDocsTotal = 50000; + +const sampleSizeTestCases = [ + {sampleSize: Math.floor(0.8 * numDocsTotal)}, + {sampleSize: numDocsTotal}, + {sampleSize: Math.floor(1.5 * numDocsTotal)}, + { + sampleSize: Math.floor(0.5 * numMostCommonValues), + expectedErrCodes: [ + // The number of sampled documents for the monotonicity step is 0 because the sample + // rate is too low. + 7826505, + // The number of sampled documents for the monotonicity step is greater than 0 so + // the command fails the validation in the cardinality and frequency step because + // the requested sample size is less than 'numMostCommonValues'. + ErrorCodes.InvalidOptions, + ] + } +]; + +function makeSampleRateTestCases(isUnique) { + return [ + {sampleRate: 0.35}, + {sampleRate: 1}, + { + // The expected sample size is 1, which is less than 'numMostCommonValues' (defaults to + // 5). + sampleRate: 1 / numDocsTotal, + expectedErrCodes: [ + // The number of sampled documents for the monotonicity step is 0 because the sample + // rate is too low. + 7826505, + // The number of sampled documents for the monotonicity step is greater than 0 but + // the number of sampled documents for the cardinality and frequency step is 0. + isUnique ? 7826506 : 7826507, + ] + }, + { + // The expected sample size is less than 1, which is less than 'numMostCommonValues'. + sampleRate: 0.1 / numDocsTotal, + expectedErrCodes: [ + // The number of sampled documents for the monotonicity step is 0 because the sample + // rate is too low. + 7826505, + // The number of sampled documents for the monotonicity step is greater than 0 but + // the number of sampled documents for the cardinality and frequency step is 0. + isUnique ? 7826506 : 7826507, + ] + }, + ]; +} +const sampleSizeTestCasesUnique = makeSampleRateTestCases(true /* isUnique */); +const sampleSizeTestCasesNotUnique = makeSampleRateTestCases(false /* isUnique */); + +function runTest(conn, {isUnique, isShardedColl, st, rst}) { + const dbName = "testDb"; + const collName = "testColl"; + const ns = dbName + "." + collName; + const db = conn.getDB(dbName); + const coll = db.getCollection(collName); + const mongodConns = getMongodConns({st, rst}); + + jsTest.log("Testing the test cases for " + tojsononeline({isUnique, isShardedColl})); + + const indexOptions = isUnique ? {unique: true} : {}; + assert.commandWorked(coll.createIndex({a: 1}, indexOptions)); + const isClusteredColl = AnalyzeShardKeyUtil.isClusterCollection(conn, dbName, collName); + + if (isShardedColl) { + assert(st); + assert.commandWorked(st.s.adminCommand({enableSharding: dbName})); + st.ensurePrimaryShard(dbName, st.shard0.name); + assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {a: "hashed"}})); + assert.commandWorked( + st.s.adminCommand({moveChunk: ns, find: {a: 1}, to: st.shard1.shardName})); + } + + // Insert documents for this collection. + let docsBuffer = []; + let numDocsInserted = 0; + const bufferOrInsertDoc = (doc) => { + docsBuffer.push(doc); + if (docsBuffer.length == batchSize) { + assert.commandWorked(coll.insert(docsBuffer, {writeConcern})); + numDocsInserted += docsBuffer.length; + docsBuffer = []; + } + }; + + // Only set if the shard key is unique. + let mostCommonValue0, mostCommonRatio0, mostCommonValue1, mostCommonRatio1; + if (!isUnique) { + // The documents for the most common value. + mostCommonValue0 = -87654321; + mostCommonRatio0 = 0.5; + for (let i = 0; i < (numDocsTotal * mostCommonRatio0); i++) { + bufferOrInsertDoc({a: mostCommonValue0}); + } + // The documents for the second most common value. + mostCommonValue1 = -12345678; + mostCommonRatio1 = 0.25; + for (let i = 0; i < (numDocsTotal * mostCommonRatio1); i++) { + bufferOrInsertDoc({a: mostCommonValue1}); + } + } + // The other documents. + let aValue = 1; + while ((numDocsInserted + docsBuffer.length) < numDocsTotal) { + bufferOrInsertDoc({a: aValue++}); + } + + // Insert any remaining docs in the buffer. + assert.commandWorked(coll.insert(docsBuffer, {writeConcern})); + assert.eq(coll.find().itcount(), numDocsTotal); + + const ratioMaxDiff = 0.1; + const checkMostCommonValuesFn = (metrics) => { + let startIndex = 0; + if (!isUnique) { + const mostCommon0 = metrics.mostCommonValues[0]; + assert.eq(mostCommon0.value, {a: mostCommonValue0}, metrics); + AnalyzeShardKeyUtil.assertApprox(mostCommon0.frequency / metrics.numDocsSampled, + mostCommonRatio0, + {metrics}, + ratioMaxDiff); + const mostCommon1 = metrics.mostCommonValues[1]; + assert.eq(mostCommon1.value, {a: mostCommonValue1}, metrics); + AnalyzeShardKeyUtil.assertApprox(mostCommon1.frequency / metrics.numDocsSampled, + mostCommonRatio1, + {metrics}, + ratioMaxDiff); + + startIndex += 2; + } + for (let i = startIndex; i < metrics.mostCommonValues.length; i++) { + const mostCommon = metrics.mostCommonValues[i]; + assert.lte(Math.abs(mostCommon.value.a), aValue, metrics); + assert.eq(mostCommon.frequency, 1, metrics); + } + }; + + AnalyzeShardKeyUtil.enableProfiler(mongodConns, dbName); + + for (let isHashed of [false, true]) { + if (isHashed && isUnique) { + // Hashed indexes cannot have a uniqueness constraint. + continue; + } + + const shardKey = {a: isHashed ? "hashed" : 1}; + const monotonicityType = + isClusteredColl ? "unknown" : (isHashed ? "not monotonic" : "monotonic"); + + const comment = UUID(); + + // Cannot specify both sampleRate and sampleSize. + assert.commandFailedWithCode(conn.adminCommand({ + analyzeShardKey: ns, + key: shardKey, + comment, + sampleRate: 0.5, + sampleSize: 10000, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + }), + ErrorCodes.InvalidOptions); + + // sampleSize < numTotalDocs (default). + jsTest.log("Testing default 'sampleSize': " + + tojsononeline({defaultSampleSize, isHashed, isUnique, isShardedColl})); + + const res = assert.commandWorked(conn.adminCommand({ + analyzeShardKey: ns, + key: shardKey, + comment: comment, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + })); + jsTest.log("Response for default 'sampleSize': " + tojsononeline({defaultSampleSize, res})); + const metrics = res.keyCharacteristics; + + AnalyzeShardKeyUtil.validateKeyCharacteristicsMetrics(metrics); + assert.eq(metrics.numDocsTotal, numDocsTotal, res); + assert.lte(metrics.numDocsSampled, defaultSampleSize, metrics); + checkMostCommonValuesFn(metrics); + assert.eq(metrics.monotonicity.type, monotonicityType, metrics); + assertAggregateQueryPlans(mongodConns, + dbName, + collName, + comment, + // On a replica set, the analyzeShardKey command runs the + // aggregate commands locally, i.e. the commands do not go + // through the service entry point so do not get profiled. + !rst /* expectEntries */); + + for (let {sampleSize, expectedErrCodes} of sampleSizeTestCases) { + jsTest.log("Testing custom 'sampleSize': " + + tojsononeline({sampleSize, isHashed, isUnique, isShardedColl})); + const comment = UUID(); + const res = conn.adminCommand({ + analyzeShardKey: ns, + key: shardKey, + sampleSize, + comment: comment, + // Skip calculating the read and write distribution metrics since they are not + // needed by this test. + readWriteDistribution: false + }); + jsTest.log("Response custom 'sampleSize': " + tojsononeline({sampleSize, res})); + + if (expectedErrCodes) { + assert.commandFailedWithCode(res, expectedErrCodes); + continue; + } + assert.commandWorked(res); + const metrics = res.keyCharacteristics; + + AnalyzeShardKeyUtil.validateKeyCharacteristicsMetrics(metrics); + assert.eq(metrics.numDocsTotal, numDocsTotal, res); + assert.lte(res.keyCharacteristics.numDocsSampled, sampleSize, res); + if (expectedErrCodes) { + continue; + } + checkMostCommonValuesFn(metrics); + assert.eq(metrics.monotonicity.type, monotonicityType, metrics); + assertAggregateQueryPlans(mongodConns, + dbName, + collName, + comment, + // On a replica set, the analyzeShardKey command runs the + // aggregate commands locally, i.e. the commands do not go + // through the service entry point so do not get profiled. + !rst /* expectEntries */); + } + + const sampleRateTestCases = + isUnique ? sampleSizeTestCasesUnique : sampleSizeTestCasesNotUnique; + for (let {sampleRate, expectedErrCodes} of sampleRateTestCases) { + jsTest.log("Testing custom 'sampleRate': " + + tojsononeline({sampleRate, isHashed, isUnique, isShardedColl})); + const comment = UUID(); + const res = conn.adminCommand({ + analyzeShardKey: ns, + key: shardKey, + sampleRate, + comment: comment, + // Skip calculating the read and write distribution metrics since they are not + // needed by this test. + readWriteDistribution: false + }); + jsTest.log("Response for custom 'sampleRate': " + tojsononeline({sampleRate, res})); + + const sampleSize = Math.ceil(sampleRate * numDocsTotal); + if (!res.ok) { + assert.lte(sampleSize, 1); + assert.commandFailedWithCode(res, expectedErrCodes); + continue; + } + assert.commandWorked(res); + const metrics = res.keyCharacteristics; + + AnalyzeShardKeyUtil.validateKeyCharacteristicsMetrics(metrics); + assert.eq(metrics.numDocsTotal, numDocsTotal, res); + assert.lte(metrics.numDocsSampled, sampleSize, metrics); + if (expectedErrCodes) { + continue; + } + checkMostCommonValuesFn(metrics); + assert.eq(metrics.monotonicity.type, monotonicityType, metrics); + assertAggregateQueryPlans(mongodConns, + dbName, + collName, + comment, + // On a replica set, the analyzeShardKey command runs the + // aggregate commands locally, i.e. the commands do not go + // through the service entry point so do not get profiled. + !rst /* expectEntries */); + } + } + + AnalyzeShardKeyUtil.disableProfiler(mongodConns, dbName); + assert(coll.drop()); +} + +const setParameterOpts = { + analyzeShardKeyNumMostCommonValues: numMostCommonValues, + analyzeShardKeyCharacteristicsDefaultSampleSize: defaultSampleSize +}; + +{ + const st = + new ShardingTest({shards: 2, rs: {nodes: numNodesPerRS, setParameter: setParameterOpts}}); + + for (let isShardedColl of [false, true]) { + runTest(st.s, {isUnique: true, isShardedColl, st}); + runTest(st.s, {isUnique: false, isShardedColl, st}); + } + + st.stop(); +} + +{ + const rst = + new ReplSetTest({nodes: numNodesPerRS, nodeOptions: {setParameter: setParameterOpts}}); + rst.startSet(); + rst.initiate(); + const primary = rst.getPrimary(); + + runTest(primary, {isUnique: true, isShardedColl: false, rst}); + runTest(primary, {isUnique: false, isShardedColl: false, rst}); + + rst.stopSet(); +} +})(); diff --git a/jstests/sharding/analyze_shard_key/cardinality_and_frequency_stress.js b/jstests/sharding/analyze_shard_key/cardinality_and_frequency_stress.js index 8a43f12f98ca0..326e7d463c937 100644 --- a/jstests/sharding/analyze_shard_key/cardinality_and_frequency_stress.js +++ b/jstests/sharding/analyze_shard_key/cardinality_and_frequency_stress.js @@ -91,8 +91,7 @@ function testAnalyzeShardKeysUnshardedCollection(conn, mongodConns) { docs.push(doc); mostCommonValues.push({ - value: AnalyzeShardKeyUtil.extractShardKeyValueFromDocument( - doc, candidateShardKey, candidateShardKey), + value: AnalyzeShardKeyUtil.extractShardKeyValueFromDocument(doc, candidateShardKey), frequency: 1 }); } @@ -100,9 +99,15 @@ function testAnalyzeShardKeysUnshardedCollection(conn, mongodConns) { AnalyzeShardKeyUtil.enableProfiler(mongodConns, dbName); - const res = assert.commandWorked( - conn.adminCommand({analyzeShardKey: ns, key: candidateShardKey, comment})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res, { + const res = assert.commandWorked(conn.adminCommand({ + analyzeShardKey: ns, + key: candidateShardKey, + comment, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + })); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res.keyCharacteristics, { numDocs, isUnique: false, numDistinctValues: numDocs, @@ -150,8 +155,7 @@ function testAnalyzeShardKeysShardedCollection(st, mongodConns) { docs.push(doc); mostCommonValues.push({ - value: AnalyzeShardKeyUtil.extractShardKeyValueFromDocument( - doc, candidateShardKey, candidateShardKey), + value: AnalyzeShardKeyUtil.extractShardKeyValueFromDocument(doc, candidateShardKey), frequency: 1 }); @@ -161,9 +165,15 @@ function testAnalyzeShardKeysShardedCollection(st, mongodConns) { AnalyzeShardKeyUtil.enableProfiler(mongodConns, dbName); - const res = assert.commandWorked( - st.s.adminCommand({analyzeShardKey: ns, key: candidateShardKey, comment})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res, { + const res = assert.commandWorked(st.s.adminCommand({ + analyzeShardKey: ns, + key: candidateShardKey, + comment, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + })); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res.keyCharacteristics, { numDocs, isUnique: false, numDistinctValues: numDocs, @@ -179,11 +189,7 @@ function testAnalyzeShardKeysShardedCollection(st, mongodConns) { const setParameterOpts = { internalDocumentSourceGroupMaxMemoryBytes, - analyzeShardKeyNumMostCommonValues: numMostCommonValues, - // Skip calculating the read and write distribution metrics since there are no sampled queries - // anyway. - "failpoint.analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics": - tojson({mode: "alwaysOn"}), + analyzeShardKeyNumMostCommonValues: numMostCommonValues }; { diff --git a/jstests/sharding/analyze_shard_key/configure_query_analyzer_auth.js b/jstests/sharding/analyze_shard_key/configure_query_analyzer_auth.js index 34a16d035f6a2..df2da3bfd29fb 100644 --- a/jstests/sharding/analyze_shard_key/configure_query_analyzer_auth.js +++ b/jstests/sharding/analyze_shard_key/configure_query_analyzer_auth.js @@ -27,7 +27,7 @@ function testConfigureQueryAnalyzer(conn) { assert(adminDb.logout()); const mode = "full"; - const sampleRate = 100; + const samplesPerSecond = 1; // Set up a user without any role or privilege. assert(adminDb.auth("super", "super")); @@ -36,10 +36,10 @@ function testConfigureQueryAnalyzer(conn) { // Verify that the user is not authorized to run the configureQueryAnalyzer command. assert(adminDb.auth("user_no_priv", "pwd")); assert.commandFailedWithCode( - adminDb.runCommand({"configureQueryAnalyzer": ns0, mode, sampleRate}), + adminDb.runCommand({"configureQueryAnalyzer": ns0, mode, samplesPerSecond}), ErrorCodes.Unauthorized); assert.commandFailedWithCode( - adminDb.runCommand({"configureQueryAnalyzer": ns1, mode, sampleRate}), + adminDb.runCommand({"configureQueryAnalyzer": ns1, mode, samplesPerSecond}), ErrorCodes.Unauthorized); assert(adminDb.logout()); @@ -60,9 +60,10 @@ function testConfigureQueryAnalyzer(conn) { // Verify that the user is authorized to run the configureQueryAnalyzer command against ns0 // but not ns1. assert(adminDb.auth("user_with_explicit_ns0_priv", "pwd")); - assert.commandWorked(adminDb.runCommand({"configureQueryAnalyzer": ns0, mode, sampleRate})); + assert.commandWorked( + adminDb.runCommand({"configureQueryAnalyzer": ns0, mode, samplesPerSecond})); assert.commandFailedWithCode( - adminDb.runCommand({"configureQueryAnalyzer": ns1, mode, sampleRate}), + adminDb.runCommand({"configureQueryAnalyzer": ns1, mode, samplesPerSecond}), ErrorCodes.Unauthorized); assert(adminDb.logout()); @@ -77,8 +78,10 @@ function testConfigureQueryAnalyzer(conn) { // Verify that the user is authorized to run the configureQueryAnalyzer command against both // ns0 and ns1. assert(adminDb.auth("user_cluster_mgr", "pwd")); - assert.commandWorked(adminDb.runCommand({"configureQueryAnalyzer": ns0, mode, sampleRate})); - assert.commandWorked(adminDb.runCommand({"configureQueryAnalyzer": ns1, mode, sampleRate})); + assert.commandWorked( + adminDb.runCommand({"configureQueryAnalyzer": ns0, mode, samplesPerSecond})); + assert.commandWorked( + adminDb.runCommand({"configureQueryAnalyzer": ns1, mode, samplesPerSecond})); assert(adminDb.logout()); // Set up a user with the 'dbAdmin' role. @@ -89,10 +92,13 @@ function testConfigureQueryAnalyzer(conn) { // Verify that the user is authorized to run the configureQueryAnalyzer command against both // ns0 and ns1 but not against a ns in some other database. assert(adminDb.auth("user_db_admin", "pwd")); - assert.commandWorked(adminDb.runCommand({"configureQueryAnalyzer": ns0, mode, sampleRate})); - assert.commandWorked(adminDb.runCommand({"configureQueryAnalyzer": ns1, mode, sampleRate})); + assert.commandWorked( + adminDb.runCommand({"configureQueryAnalyzer": ns0, mode, samplesPerSecond})); + assert.commandWorked( + adminDb.runCommand({"configureQueryAnalyzer": ns1, mode, samplesPerSecond})); assert.commandFailedWithCode( - adminDb.runCommand({"configureQueryAnalyzer": otherDbName + collName0, mode, sampleRate}), + adminDb.runCommand( + {"configureQueryAnalyzer": otherDbName + collName0, mode, samplesPerSecond}), ErrorCodes.Unauthorized); assert(adminDb.logout()); } diff --git a/jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js b/jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js index 99a40ab38c360..f72cca74b9257 100644 --- a/jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js +++ b/jstests/sharding/analyze_shard_key/configure_query_analyzer_basic.js @@ -6,6 +6,9 @@ (function() { "use strict"; +// Set this to opt into the 'samplesPerSecond' check. +TestData.testingDiagnosticsEnabled = false; + const dbNameBase = "testDb"; function testNonExistingCollection(testCases, tenantId) { @@ -15,17 +18,14 @@ function testNonExistingCollection(testCases, tenantId) { testCases.forEach(testCase => { jsTest.log(`Running configureQueryAnalyzer command against an non-existing collection: ${ - tojson(testCase)}`); - const cmdObj = {configureQueryAnalyzer: ns, mode: "full", sampleRate: 1}; + tojson({testCase, ns})}`); + const cmdObj = {configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 1}; if (tenantId) { cmdObj.$tenant = tenantId; } const res = testCase.conn.adminCommand(cmdObj); - // If the command is not supported, it should fail even before the collection validation - // step. That is, it should fail with an IllegalOperation error instead of a - // NamespaceNotFound error. const expectedErrorCode = - testCase.isSupported ? ErrorCodes.NamespaceNotFound : testCase.expectedErrorCode; + testCase.expectedErrorCode ? testCase.expectedErrorCode : ErrorCodes.NamespaceNotFound; assert.commandFailedWithCode(res, expectedErrorCode); }); } @@ -38,38 +38,45 @@ function testExistingCollection(writeConn, testCases) { assert.commandWorked(db.createCollection(collName)); testCases.forEach(testCase => { + if (testCase.conn.isConfigsvr) { + // The collection created below will not exist on the config server. + return; + } + jsTest.log( `Running configureQueryAnalyzer command against an existing collection: ${tojson(testCase)}`); - // Can set 'sampleRate' to > 0. - const basicRes = - testCase.conn.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 0.1}); - if (!testCase.isSupported) { + // Can set 'samplesPerSecond' to > 0. + const basicRes = testCase.conn.adminCommand( + {configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 0.1}); + if (testCase.expectedErrorCode) { assert.commandFailedWithCode(basicRes, testCase.expectedErrorCode); // There is no need to test the remaining cases. return; } assert.commandWorked(basicRes); - assert.commandWorked( - testCase.conn.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 1})); assert.commandWorked(testCase.conn.adminCommand( - {configureQueryAnalyzer: ns, mode: "full", sampleRate: 1000})); + {configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 1})); + assert.commandWorked(testCase.conn.adminCommand( + {configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 50})); - // Cannot set 'sampleRate' to 0. + // Cannot set 'samplesPerSecond' to 0. assert.commandFailedWithCode( - testCase.conn.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 0}), + testCase.conn.adminCommand( + {configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 0}), ErrorCodes.InvalidOptions); - // Cannot set 'sampleRate' to larger than 1'000'000. + // Cannot set 'samplesPerSecond' to larger than 50. assert.commandFailedWithCode( testCase.conn.adminCommand( - {configureQueryAnalyzer: ns, mode: "full", sampleRate: 1000001}), + {configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 51}), ErrorCodes.InvalidOptions); - // Cannot specify 'sampleRate' when 'mode' is "off". + // Cannot specify 'samplesPerSecond' when 'mode' is "off". assert.commandFailedWithCode( - testCase.conn.adminCommand({configureQueryAnalyzer: ns, mode: "off", sampleRate: 1}), + testCase.conn.adminCommand( + {configureQueryAnalyzer: ns, mode: "off", samplesPerSecond: 1}), ErrorCodes.InvalidOptions); assert.commandWorked(testCase.conn.adminCommand({configureQueryAnalyzer: ns, mode: "off"})); @@ -77,14 +84,14 @@ function testExistingCollection(writeConn, testCases) { assert.commandFailedWithCode(testCase.conn.adminCommand({ configureQueryAnalyzer: ns, mode: "full", - sampleRate: 1, + samplesPerSecond: 1, readConcern: {level: "available"} }), ErrorCodes.InvalidOptions); assert.commandFailedWithCode(testCase.conn.adminCommand({ configureQueryAnalyzer: ns, mode: "full", - sampleRate: 1, + samplesPerSecond: 1, writeConcern: {w: "majority"} }), ErrorCodes.InvalidOptions); @@ -98,25 +105,34 @@ function testExistingCollection(writeConn, testCases) { const shard0Secondaries = st.rs0.getSecondaries(); const configPrimary = st.configRS.getPrimary(); const configSecondaries = st.configRS.getSecondaries(); + st.configRS.nodes.forEach(node => { + node.isConfigsvr = true; + }); const testCases = []; - // The configureQueryAnalyzer command is only supported on mongos and configsvr primary mongod. - testCases.push({conn: st.s, isSupported: true}); - testCases.push({conn: configPrimary, isSupported: true}); - configSecondaries.forEach(node => { - testCases.push( - {conn: node, isSupported: false, expectedErrorCode: ErrorCodes.NotWritablePrimary}); + // The configureQueryAnalyzer command is only supported on mongos and shardsvr primary mongod. + testCases.push({conn: st.s}); + testCases.push({ + conn: shard0Primary, + // It is illegal to send a configureQueryAnalyzer command to a shardsvr mongod without + // attaching the database version. + expectedErrorCode: ErrorCodes.IllegalOperation }); - // If there's a catalog shard, shard0 will be the config server and can accept - // configureQueryAnalyzer. - testCases.push( - Object.assign({conn: shard0Primary}, - TestData.catalogShard - ? {isSupported: true} - : {isSupported: false, expectedErrorCode: ErrorCodes.IllegalOperation})); shard0Secondaries.forEach(node => { - testCases.push( - {conn: node, isSupported: false, expectedErrorCode: ErrorCodes.NotWritablePrimary}); + testCases.push({ + conn: node, + // configureQueryAnalyzer is a primary-only command. + expectedErrorCode: ErrorCodes.NotWritablePrimary + }); + }); + // The analyzeShardKey command is not supported on dedicated configsvr mongods. + testCases.push({conn: configPrimary, expectedErrorCode: ErrorCodes.IllegalOperation}); + configSecondaries.forEach(node => { + testCases.push({ + conn: node, + // configureQueryAnalyzer is a primary-only command. + expectedErrorCode: ErrorCodes.NotWritablePrimary + }); }); testNonExistingCollection(testCases); @@ -134,10 +150,13 @@ function testExistingCollection(writeConn, testCases) { const testCases = []; // The configureQueryAnalyzer command is only supported on primary mongod. - testCases.push(Object.assign({conn: primary, isSupported: true})); + testCases.push(Object.assign({conn: primary})); secondaries.forEach(node => { - testCases.push( - {conn: node, isSupported: false, expectedErrorCode: ErrorCodes.NotWritablePrimary}); + testCases.push({ + conn: node, + // configureQueryAnalyzer is a primary-only command. + expectedErrorCode: ErrorCodes.NotWritablePrimary + }); }); testNonExistingCollection(testCases); @@ -183,4 +202,31 @@ if (!TestData.auth) { MongoRunner.stopMongod(mongod); } + +{ + // Verify that an external client cannot run the configureQueryAnalyzer command against a + // shardsvr mongod. + + // Start a sharded cluster with testing diagnostics (TestingProctor) disabled so the command + // below not bypass the internal client check. + TestData.testingDiagnosticsEnabled = false; + + const st = new ShardingTest({shards: 1, rs: {nodes: 1}}); + const shard0Primary = st.rs0.getPrimary(); + + const dbName = "testDb"; + const collName = "testColl"; + const ns = dbName + "." + collName; + + assert.commandWorked(st.s.getCollection(ns).insert({x: 1})); + + const configureRes = assert.commandFailedWithCode( + shard0Primary.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 1}), + ErrorCodes.IllegalOperation); + // Verify that the error message is as expected. + assert.eq(configureRes.errmsg, + "Cannot run configureQueryAnalyzer command directly against a shardsvr mongod"); + + st.stop(); +} })(); diff --git a/jstests/sharding/analyze_shard_key/configure_query_analyzer_cmd_validation.js b/jstests/sharding/analyze_shard_key/configure_query_analyzer_cmd_validation.js index c200332a2e639..401932cf66191 100644 --- a/jstests/sharding/analyze_shard_key/configure_query_analyzer_cmd_validation.js +++ b/jstests/sharding/analyze_shard_key/configure_query_analyzer_cmd_validation.js @@ -17,7 +17,7 @@ function runTest(conn) { const aggCmdObj = { configureQueryAnalyzer: dbName + "." + collName, mode: "full", - sampleRate: 1 + samplesPerSecond: 1 }; assert.commandFailedWithCode( conn.adminCommand(aggCmdObj), diff --git a/jstests/sharding/analyze_shard_key/configure_query_analyzer_database_versioning.js b/jstests/sharding/analyze_shard_key/configure_query_analyzer_database_versioning.js index 2d4e766a9b30d..ef5f2cb63b608 100644 --- a/jstests/sharding/analyze_shard_key/configure_query_analyzer_database_versioning.js +++ b/jstests/sharding/analyze_shard_key/configure_query_analyzer_database_versioning.js @@ -26,7 +26,7 @@ assert.commandWorked(mongos0Coll.insert([{x: -1}, {x: 1}])); const configureCmdObj = { configureQueryAnalyzer: ns, mode: "full", - sampleRate: 100 + samplesPerSecond: 1 }; // Run the configureQueryAnalyzer command. diff --git a/jstests/sharding/analyze_shard_key/configure_query_analyzer_persistence.js b/jstests/sharding/analyze_shard_key/configure_query_analyzer_persistence.js index 47db8fb259614..9ab004f967dc9 100644 --- a/jstests/sharding/analyze_shard_key/configure_query_analyzer_persistence.js +++ b/jstests/sharding/analyze_shard_key/configure_query_analyzer_persistence.js @@ -1,7 +1,7 @@ /** * Tests that the configureQueryAnalyzer command persists the configuration in a document * in config.queryAnalyzers and that the document is deleted when the associated collection - * is dropped. + * is dropped or renamed. * * @tags: [requires_fcv_70] */ @@ -15,17 +15,18 @@ load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); function assertConfigQueryAnalyzerResponse(res, newConfig, oldConfig) { assert.eq(res.newConfiguration, newConfig, res); if (oldConfig) { - assert.eq(res.oldConfiguration, oldConfig); + assert.eq(res.oldConfiguration, oldConfig, res); } else { - assert(!res.hasOwnProperty("oldConfiguration"), oldConfig); + assert(!res.hasOwnProperty("oldConfiguration"), res); } } -function assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode, sampleRate, startTime, stopTime) { - const doc = conn.getCollection("config.queryAnalyzers").findOne({_id: collUuid}); - assert.eq(doc.ns, ns, doc); +function assertQueryAnalyzerConfigDoc( + conn, ns, collUuid, mode, samplesPerSecond, startTime, stopTime) { + const doc = conn.getCollection("config.queryAnalyzers").findOne({_id: ns}); + assert.eq(doc.collUuid, collUuid, doc); assert.eq(doc.mode, mode, doc); - assert.eq(doc.sampleRate, sampleRate, doc); + assert.eq(doc.samplesPerSecond, samplesPerSecond, doc); assert(doc.hasOwnProperty("startTime"), doc); if (startTime) { assert.eq(doc.startTime, startTime, doc); @@ -37,147 +38,243 @@ function assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode, sampleRate, star return doc; } -function assertNoQueryAnalyzerConfigDoc(conn, collUuid) { - const doc = conn.getCollection("config.queryAnalyzers").findOne({_id: collUuid}); +function assertNoQueryAnalyzerConfigDoc(conn, ns) { + const doc = conn.getCollection("config.queryAnalyzers").findOne({_id: ns}); assert.eq(doc, null, doc); } -function testPersistingConfiguration(conn) { +function setUpCollection(conn, {isShardedColl, st}) { const dbName = "testDb-" + extractUUIDFromObject(UUID()); - const collName = "testColl"; + const collName = isShardedColl ? "testCollSharded" : "testCollUnsharded"; const ns = dbName + "." + collName; const db = conn.getDB(dbName); assert.commandWorked(db.createCollection(collName)); - const collUuid = QuerySamplingUtil.getCollectionUuid(db, collName); + if (isShardedColl) { + assert(st); + assert.commandWorked(st.s0.adminCommand({enableSharding: dbName})); + st.ensurePrimaryShard(dbName, st.shard0.name); + assert.commandWorked(st.s0.adminCommand({shardCollection: ns, key: {x: 1}})); + assert.commandWorked(st.s0.adminCommand({split: ns, middle: {x: 0}})); + assert.commandWorked( + st.s0.adminCommand({moveChunk: ns, find: {x: 0}, to: st.shard1.shardName})); + } + + return {dbName, collName}; +} + +function testPersistingConfiguration(conn) { + const {dbName, collName} = setUpCollection(conn, {isShardedColl: false}); + const ns = dbName + "." + collName; + const db = conn.getDB(dbName); + let collUuid = QuerySamplingUtil.getCollectionUuid(db, collName); jsTest.log( `Testing that the configureQueryAnalyzer command persists the configuration correctly ${ tojson({dbName, collName, collUuid})}`); // Run a configureQueryAnalyzer command to disable query sampling. Verify that the command - // fails since query sampling is not even active. + // does not fail although query sampling is not even active. const mode0 = "off"; - assert.commandFailedWithCode(conn.adminCommand({configureQueryAnalyzer: ns, mode: mode0}), - ErrorCodes.IllegalOperation); - assertNoQueryAnalyzerConfigDoc(conn, collUuid); + const res0 = assert.commandWorked(conn.adminCommand({configureQueryAnalyzer: ns, mode: mode0})); + assertConfigQueryAnalyzerResponse(res0, {mode: mode0} /* newConfig */); + assertNoQueryAnalyzerConfigDoc(conn, ns); // Run a configureQueryAnalyzer command to enable query sampling. const mode1 = "full"; - const sampleRate1 = 100; - const res1 = assert.commandWorked( - conn.adminCommand({configureQueryAnalyzer: ns, mode: mode1, sampleRate: sampleRate1})); - const doc1 = assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode1, sampleRate1); - assertConfigQueryAnalyzerResponse(res1, {mode: mode1, sampleRate: sampleRate1} /* newConfig */); + const samplesPerSecond1 = 50; + const res1 = assert.commandWorked(conn.adminCommand( + {configureQueryAnalyzer: ns, mode: mode1, samplesPerSecond: samplesPerSecond1})); + const doc1 = assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode1, samplesPerSecond1); + assertConfigQueryAnalyzerResponse( + res1, {mode: mode1, samplesPerSecond: samplesPerSecond1} /* newConfig */); // Run a configureQueryAnalyzer command to modify the sample rate. Verify that the 'startTime' // remains the same. const mode2 = "full"; - const sampleRate2 = 0.2; - const res2 = assert.commandWorked( - conn.adminCommand({configureQueryAnalyzer: ns, mode: mode2, sampleRate: sampleRate2})); - assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode2, sampleRate2, doc1.startTime); - assertConfigQueryAnalyzerResponse(res2, - {mode: mode2, sampleRate: sampleRate2} /* newConfig */, - {mode: mode1, sampleRate: sampleRate1} /* oldConfig */); + const samplesPerSecond2 = 0.2; + const res2 = assert.commandWorked(conn.adminCommand( + {configureQueryAnalyzer: ns, mode: mode2, samplesPerSecond: samplesPerSecond2})); + assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode2, samplesPerSecond2, doc1.startTime); + assertConfigQueryAnalyzerResponse( + res2, + {mode: mode2, samplesPerSecond: samplesPerSecond2} /* newConfig */, + {mode: mode1, samplesPerSecond: samplesPerSecond1} /* oldConfig */); // Run a configureQueryAnalyzer command to disable query sampling. const mode3 = "off"; const res3 = assert.commandWorked(conn.adminCommand({configureQueryAnalyzer: ns, mode: mode3})); - assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode3, sampleRate2, doc1.startTime); - assertConfigQueryAnalyzerResponse(res3, - {mode: mode3} /* newConfig */, - {mode: mode2, sampleRate: sampleRate2} /* oldConfig */); + assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode3, samplesPerSecond2, doc1.startTime); + assertConfigQueryAnalyzerResponse( + res3, + {mode: mode3} /* newConfig */, + {mode: mode2, samplesPerSecond: samplesPerSecond2} /* oldConfig */); // Run a configureQueryAnalyzer command to re-enable query sampling. Verify that the 'startTime' // is new. const mode4 = "full"; - const sampleRate4 = 1; - const res4 = assert.commandWorked( - conn.adminCommand({configureQueryAnalyzer: ns, mode: mode4, sampleRate: sampleRate4})); - const doc4 = assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode4, sampleRate4); + const samplesPerSecond4 = 1; + const res4 = assert.commandWorked(conn.adminCommand( + {configureQueryAnalyzer: ns, mode: mode4, samplesPerSecond: samplesPerSecond4})); + const doc4 = assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode4, samplesPerSecond4); assert.gt(doc4.startTime, doc1.startTime, doc4); - assertConfigQueryAnalyzerResponse(res4, - {mode: mode4, sampleRate: sampleRate4} /* newConfig */, - {mode: mode3, sampleRate: sampleRate2} /* oldConfig */); + assertConfigQueryAnalyzerResponse( + res4, + {mode: mode4, samplesPerSecond: samplesPerSecond4} /* newConfig */, + {mode: mode3, samplesPerSecond: samplesPerSecond2} /* oldConfig */); // Retry the previous configureQueryAnalyzer command. Verify that the 'startTime' remains the // same. - const res4Retry = assert.commandWorked( - conn.adminCommand({configureQueryAnalyzer: ns, mode: mode4, sampleRate: sampleRate4})); - assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode4, sampleRate4, doc4.startTime); - assertConfigQueryAnalyzerResponse(res4Retry, - {mode: mode4, sampleRate: sampleRate4} /* newConfig */, - {mode: mode4, sampleRate: sampleRate4} /* oldConfig */); - - // Run a configureQueryAnalyzer command to disable query sampling. Verify that the 'sampleRate' - // doesn't get unset. - const mode5 = "off"; - const res5 = assert.commandWorked(conn.adminCommand({configureQueryAnalyzer: ns, mode: mode5})); - const doc5 = - assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode5, sampleRate4, doc4.startTime); - assertConfigQueryAnalyzerResponse(res5, - {mode: mode5} /* newConfig */, - {mode: mode4, sampleRate: sampleRate4} /* oldConfig */); - - // Retry the previous configureQueryAnalyzer command. Verify that the 'stopTime' remains the - // same. - assert.commandFailedWithCode(conn.adminCommand({configureQueryAnalyzer: ns, mode: mode5}), - ErrorCodes.IllegalOperation); + const res4Retry = assert.commandWorked(conn.adminCommand( + {configureQueryAnalyzer: ns, mode: mode4, samplesPerSecond: samplesPerSecond4})); + assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode4, samplesPerSecond4, doc4.startTime); + assertConfigQueryAnalyzerResponse( + res4Retry, + {mode: mode4, samplesPerSecond: samplesPerSecond4} /* newConfig */, + {mode: mode4, samplesPerSecond: samplesPerSecond4} /* oldConfig */); + + assert(db.getCollection(collName).drop()); + assert.commandWorked(db.createCollection(collName)); + collUuid = QuerySamplingUtil.getCollectionUuid(db, collName); + + // Run a configureQueryAnalyzer command to re-enable query sampling after dropping the + // collection. Verify that the 'startTime' is new, and "oldConfiguration" is not returned. + const mode5 = "full"; + const samplesPerSecond5 = 0.1; + const res5 = assert.commandWorked(conn.adminCommand( + {configureQueryAnalyzer: ns, mode: mode5, samplesPerSecond: samplesPerSecond5})); + const doc5 = assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode5, samplesPerSecond5); + assert.gt(doc5.startTime, doc4.startTime, doc5); + assertConfigQueryAnalyzerResponse( + res5, {mode: mode5, samplesPerSecond: samplesPerSecond5} /* newConfig */); + + // Run a configureQueryAnalyzer command to disable query sampling. Verify that the + // 'samplesPerSecond' doesn't get unset. + const mode6 = "off"; + const res6 = assert.commandWorked(conn.adminCommand({configureQueryAnalyzer: ns, mode: mode6})); + const doc6 = + assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode6, samplesPerSecond5, doc5.startTime); + assertConfigQueryAnalyzerResponse( + res6, + {mode: mode6} /* newConfig */, + {mode: mode5, samplesPerSecond: samplesPerSecond5} /* oldConfig */); + + // Retry the previous configureQueryAnalyzer command. Verify that the retry does not fail and + // that the 'stopTime' remains the same. + assert.commandWorked(conn.adminCommand({configureQueryAnalyzer: ns, mode: mode6})); assertQueryAnalyzerConfigDoc( - conn, ns, collUuid, mode5, sampleRate4, doc4.startTime, doc5.stopTime); + conn, ns, collUuid, mode6, samplesPerSecond5, doc5.startTime, doc6.stopTime); } -function testDeletingConfigurations(conn, {dropDatabase, dropCollection, isShardedColl, st}) { - assert(dropDatabase || dropCollection, "Expected the test to drop the database or collection"); - assert(!dropDatabase || !dropCollection); - assert(!isShardedColl || st); - - const dbName = "testDb-" + extractUUIDFromObject(UUID()); - const collName = isShardedColl ? "testCollSharded" : "testCollUnsharded"; +function testConfigurationDeletionDropCollection(conn, {isShardedColl, rst, st}) { + const {dbName, collName} = setUpCollection(conn, {isShardedColl, rst, st}); const ns = dbName + "." + collName; - const db = conn.getDB(dbName); - const coll = db.getCollection(collName); - jsTest.log(`Testing configuration deletion ${ - tojson({dbName, collName, isShardedColl, dropDatabase, dropCollection})}`); + const collUuid = QuerySamplingUtil.getCollectionUuid(conn.getDB(dbName), collName); + jsTest.log(`Testing configuration deletion upon dropCollection ${ + tojson({dbName, collName, isShardedColl})}`); - assert.commandWorked(db.createCollection(collName)); - if (isShardedColl) { - assert.commandWorked(st.s0.adminCommand({enableSharding: dbName})); - st.ensurePrimaryShard(dbName, st.shard0.name); - assert.commandWorked(st.s0.adminCommand({shardCollection: ns, key: {x: 1}})); - assert.commandWorked(st.s0.adminCommand({split: ns, middle: {x: 0}})); - assert.commandWorked( - st.s0.adminCommand({moveChunk: ns, find: {x: 0}, to: st.shard1.shardName})); + const mode = "full"; + const samplesPerSecond = 0.5; + const res = assert.commandWorked( + conn.adminCommand({configureQueryAnalyzer: ns, mode, samplesPerSecond})); + assertConfigQueryAnalyzerResponse(res, {mode, samplesPerSecond}); + assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode, samplesPerSecond); + + assert(conn.getDB(dbName).getCollection(collName).drop()); + if (st) { + assertNoQueryAnalyzerConfigDoc(conn, ns); + } else { + // TODO (SERVER-76443): Make sure that dropCollection on replica set delete the + // config.queryAnalyzers doc for the collection being dropped. + assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode, samplesPerSecond); } - const collUuid = QuerySamplingUtil.getCollectionUuid(db, collName); +} + +function testConfigurationDeletionDropDatabase(conn, {isShardedColl, rst, st}) { + const {dbName, collName} = setUpCollection(conn, {isShardedColl, rst, st}); + const ns = dbName + "." + collName; + const collUuid = QuerySamplingUtil.getCollectionUuid(conn.getDB(dbName), collName); + jsTest.log(`Testing configuration deletion upon dropDatabase ${ + tojson({dbName, collName, isShardedColl})}`); const mode = "full"; - const sampleRate = 0.5; - const res = - assert.commandWorked(conn.adminCommand({configureQueryAnalyzer: ns, mode, sampleRate})); - assertConfigQueryAnalyzerResponse(res, {mode, sampleRate}); - assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode, sampleRate); - - if (dropDatabase) { - assert.commandWorked(db.dropDatabase()); - } else if (dropCollection) { - assert(coll.drop()); + const samplesPerSecond = 0.5; + const res = assert.commandWorked( + conn.adminCommand({configureQueryAnalyzer: ns, mode, samplesPerSecond})); + assertConfigQueryAnalyzerResponse(res, {mode, samplesPerSecond}); + assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode, samplesPerSecond); + + assert.commandWorked(conn.getDB(dbName).dropDatabase()); + if (st) { + assertNoQueryAnalyzerConfigDoc(conn, ns); + } else { + // TODO (SERVER-76443): Make sure that dropDatabase on replica set delete the + // config.queryAnalyzers docs for all collections in the database being dropped. + assertQueryAnalyzerConfigDoc(conn, ns, collUuid, mode, samplesPerSecond); } +} + +function testConfigurationDeletionRenameCollection(conn, {sameDatabase, isShardedColl, rst, st}) { + const {dbName, collName} = setUpCollection(conn, {isShardedColl, rst, st}); - assertNoQueryAnalyzerConfigDoc(conn, collUuid); + const srcDbName = dbName; + const srcCollName = collName; + const srcNs = srcDbName + "." + srcCollName; + const srcDb = conn.getDB(srcDbName); + const srcCollUuid = QuerySamplingUtil.getCollectionUuid(srcDb, srcCollName); + + const dstDbName = sameDatabase ? srcDbName : (srcDbName + "New"); + const dstCollName = sameDatabase ? (srcCollName + "New") : srcCollName; + const dstNs = dstDbName + "." + dstCollName; + const dstDb = conn.getDB(dstDbName); + assert.commandWorked(dstDb.createCollection(dstCollName)); + if (!sameDatabase && st) { + // On a sharded cluster, the src and dst collections must be on same shard. + st.ensurePrimaryShard(dstDbName, st.getPrimaryShardIdForDatabase(srcDbName)); + } + const dstCollUuid = QuerySamplingUtil.getCollectionUuid(dstDb, dstCollName); + + jsTest.log(`Testing configuration deletion upon renameCollection ${ + tojson({sameDatabase, srcDbName, srcCollName, dstDbName, dstCollName, isShardedColl})}`); + + const mode = "full"; + const samplesPerSecond = 0.5; + + const srcRes = assert.commandWorked( + conn.adminCommand({configureQueryAnalyzer: srcNs, mode, samplesPerSecond})); + assertConfigQueryAnalyzerResponse(srcRes, {mode, samplesPerSecond}); + assertQueryAnalyzerConfigDoc(conn, srcNs, srcCollUuid, mode, samplesPerSecond); + + const dstRes = assert.commandWorked( + conn.adminCommand({configureQueryAnalyzer: dstNs, mode, samplesPerSecond})); + assertConfigQueryAnalyzerResponse(dstRes, {mode, samplesPerSecond}); + assertQueryAnalyzerConfigDoc(conn, dstNs, dstCollUuid, mode, samplesPerSecond); + + assert.commandWorked(conn.adminCommand({renameCollection: srcNs, to: dstNs, dropTarget: true})); + if (st) { + assertNoQueryAnalyzerConfigDoc(conn, srcNs); + assertNoQueryAnalyzerConfigDoc(conn, dstNs); + } else { + // TODO (SERVER-76443): Make sure that renameCollection on replica set delete the + // config.queryAnalyzers doc for the collection being renamed. + assertQueryAnalyzerConfigDoc(conn, srcNs, srcCollUuid, mode, samplesPerSecond); + assertQueryAnalyzerConfigDoc(conn, dstNs, dstCollUuid, mode, samplesPerSecond); + } } { const st = new ShardingTest({shards: 2, rs: {nodes: 1}}); testPersistingConfiguration(st.s); - // TODO (SERVER-70479): Make sure that dropDatabase and dropCollection delete the - // config.queryAnalyzers doc for the collection being dropped. - // testDeletingConfigurations(st.s, {dropDatabase: true, isShardedColl: false, st}); - testDeletingConfigurations(st.s, {dropDatabase: true, isShardedColl: true, st}); - testDeletingConfigurations(st.s, {dropCollection: true, isShardedColl: false, st}); - testDeletingConfigurations(st.s, {dropCollection: true, isShardedColl: true, st}); + for (let isShardedColl of [true, false]) { + testConfigurationDeletionDropCollection(st.s, {st, isShardedColl}); + testConfigurationDeletionDropDatabase(st.s, {st, isShardedColl}); + testConfigurationDeletionRenameCollection(st.s, {st, sameDatabase: true, isShardedColl}); + } + // During renameCollection, the source database is only allowed to be different from the + // destination database when the collection being renamed is unsharded. + testConfigurationDeletionRenameCollection(st.s, + {st, sameDatabase: false, isShardedColl: false}); st.stop(); } @@ -189,10 +286,10 @@ function testDeletingConfigurations(conn, {dropDatabase, dropCollection, isShard const primary = rst.getPrimary(); testPersistingConfiguration(primary); - // TODO (SERVER-70479): Make sure that dropDatabase and dropCollection delete the - // config.queryAnalyzers doc for the collection being dropped. - // testDeletingConfigurations(primary, {dropDatabase: true, isShardedColl: false}); - // testDeletingConfigurations(primary, {dropCollection: true, isShardedColl: false}); + testConfigurationDeletionDropCollection(primary, {rst}); + testConfigurationDeletionDropDatabase(primary, {rst}); + testConfigurationDeletionRenameCollection(primary, {rst, sameDatabase: false}); + testConfigurationDeletionRenameCollection(primary, {rst, sameDatabase: true}); rst.stopSet(); } diff --git a/jstests/sharding/analyze_shard_key/ddl.js b/jstests/sharding/analyze_shard_key/ddl.js index 41e3d4b9a8112..69b4c06e02ac5 100644 --- a/jstests/sharding/analyze_shard_key/ddl.js +++ b/jstests/sharding/analyze_shard_key/ddl.js @@ -4,12 +4,10 @@ * * @tags: [requires_fcv_70] */ -(function() { -"use strict"; - -load("jstests/libs/catalog_shard_util.js"); load("jstests/libs/fail_point_util.js"); +load("jstests/libs/parallelTester.js"); // For Thread. load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js"); +load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); load("jstests/sharding/analyze_shard_key/libs/validation_common.js"); const queryAnalysisSamplerConfigurationRefreshSecs = 1; @@ -23,26 +21,26 @@ const numDocs = 10 * analyzeShardKeyNumRanges; // Given the number of documents defined above, the error code 4952606 is only expected because of // the deletes that will occur as part of renaming, dropping, recreating and emptying the // collection. +const expectedAnalyzeShardKeyErrCodes = [ + ErrorCodes.NamespaceNotFound, + ErrorCodes.QueryPlanKilled, + ErrorCodes.IllegalOperation, + // The shard key does not have enough cardinality for generating split points because + // documents are being deleted. + 4952606, + // The collection becomes empty during the $collStats step. + 7826501, + // The collection becomes empty during the step for calculating the monotonicity metrics. + 7826505, + // The collection becomes empty during the step for calculating the cardinality and frequency + // metrics. + 7826506 +]; const analyzeShardKeyTestCases = [ - { - operationType: "rename", - expectedErrCodes: [ErrorCodes.NamespaceNotFound, ErrorCodes.QueryPlanKilled, 4952606] - }, - { - operationType: "drop", - expectedErrCodes: [ErrorCodes.NamespaceNotFound, ErrorCodes.QueryPlanKilled, 4952606] - }, - { - operationType: "recreate", - expectedErrCodes: [ - ErrorCodes.NamespaceNotFound, - ErrorCodes.CollectionUUIDMismatch, - ErrorCodes.QueryPlanKilled, - ErrorCodes.IllegalOperation, - 4952606 - ] - }, - {operationType: "makeEmpty", expectedErrCodes: [ErrorCodes.IllegalOperation, 4952606]} + {operationType: "rename", expectedErrCodes: expectedAnalyzeShardKeyErrCodes}, + {operationType: "drop", expectedErrCodes: expectedAnalyzeShardKeyErrCodes}, + {operationType: "recreate", expectedErrCodes: expectedAnalyzeShardKeyErrCodes}, + {operationType: "makeEmpty", expectedErrCodes: expectedAnalyzeShardKeyErrCodes} ]; // Test DDL operations after each step below. const analyzeShardKeyFpNames = [ @@ -57,7 +55,7 @@ const configureQueryAnalyzerTestCases = [ {operationType: "makeEmpty", expectedErrCodes: [ErrorCodes.IllegalOperation]} ]; -function setUpTestMode(conn, dbName, collName, operationType) { +function setUpTestCase(conn, dbName, collName, operationType) { const testDB = conn.getDB(dbName); const testColl = testDB.getCollection(collName); switch (operationType) { @@ -90,7 +88,9 @@ function runAnalyzeShardKeyTest(conn, testCase, fpConn, fpName) { assert.commandWorked(conn.getCollection(ns).insert(docs)); const runCmdFunc = (host, ns) => { + load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js"); const conn = new Mongo(host); + sleep(AnalyzeShardKeyUtil.getRandInteger(10, 100)); return conn.adminCommand({analyzeShardKey: ns, key: {_id: 1}}); }; @@ -100,38 +100,59 @@ function runAnalyzeShardKeyTest(conn, testCase, fpConn, fpName) { fp = configureFailPoint(fpConn, fpName); } runCmdThread.start(); - sleep(AnalyzeShardKeyUtil.getRandInteger(10, 100)); - setUpTestMode(conn, dbName, collName, testCase.operationType); + setUpTestCase(conn, dbName, collName, testCase.operationType); if (fp) { fp.off(); } assert.commandWorkedOrFailedWithCode(runCmdThread.returnData(), testCase.expectedErrCodes); } -function runConfigureQueryAnalyzerTest(conn, testCase) { +function runConfigureQueryAnalyzerTest(conn, testCase, {rst} = {}) { const validationTest = ValidationTest(conn); const dbName = validationTest.dbName; const collName = validationTest.collName; const ns = dbName + "." + collName; + jsTest.log(`Testing configureQueryAnalyzer command ${tojson({testCase, dbName, collName})}`); - const runCmdFunc = (host, ns, mode, sampleRate) => { + const runCmdFunc = (host, ns, mode, samplesPerSecond) => { + load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js"); const conn = new Mongo(host); - return conn.adminCommand({configureQueryAnalyzer: ns, mode, sampleRate}); + sleep(AnalyzeShardKeyUtil.getRandInteger(10, 100)); + return conn.adminCommand({configureQueryAnalyzer: ns, mode, samplesPerSecond}); }; - let runCmdThread = new Thread(runCmdFunc, conn.host, ns, "full" /* mode */, 1 /* sampleRate */); + let runCmdThread = + new Thread(runCmdFunc, conn.host, ns, "full" /* mode */, 1 /* samplesPerSecond */); runCmdThread.start(); - sleep(AnalyzeShardKeyUtil.getRandInteger(10, 100)); - setUpTestMode(conn, dbName, collName, testCase.operationType); - assert.commandWorkedOrFailedWithCode(runCmdThread.returnData(), testCase.expectedErrCodes); + setUpTestCase(conn, dbName, collName, testCase.operationType); + const res = + assert.commandWorkedOrFailedWithCode(runCmdThread.returnData(), testCase.expectedErrCodes); + + if (testCase.operationType == "recreate") { + const configDoc = conn.getCollection("config.queryAnalyzers").findOne({_id: ns}); + if (configDoc) { + // The configureQueryAnalyzer command is serialized with DDL commands. In addition, on a + // sharded cluster, dropping a collection causes its config.queryAnalyzers document to + // get deleted. So if there is config.queryAnalyzers document for this collection after + // it is dropped and recreated, the configureQueryAnalyzer command must have run after + // the collection has been recreated so it must have the new collection uuid. + assert(res.ok, res); + const collUuid = QuerySamplingUtil.getCollectionUuid(conn.getDB(dbName), collName); + if (bsonWoCompare(configDoc.collUuid, collUuid) != 0) { + // (SERVER-76443): Make sure that dropCollection on replica set delete the + // config.queryAnalyzers doc for the collection being dropped. + assert(rst); + } + } + } // Verify that running the configureQueryAnalyzer command after the DDL operation does not // lead to a crash. sleep(queryAnalysisSamplerConfigurationRefreshSecs); assert.commandWorkedOrFailedWithCode( - runCmdFunc(conn.host, ns, "full" /* mode */, 10 /* sampleRate */), + runCmdFunc(conn.host, ns, "full" /* mode */, 10 /* samplesPerSecond */), testCase.expectedErrCodes); sleep(queryAnalysisSamplerConfigurationRefreshSecs); assert.commandWorkedOrFailedWithCode(runCmdFunc(conn.host, ns, "off" /* mode */), @@ -178,9 +199,8 @@ function runConfigureQueryAnalyzerTest(conn, testCase) { } } for (let testCase of configureQueryAnalyzerTestCases) { - runConfigureQueryAnalyzerTest(primary, testCase); + runConfigureQueryAnalyzerTest(primary, testCase, {rst}); } rst.stopSet(); -} -})(); +} \ No newline at end of file diff --git a/jstests/sharding/analyze_shard_key/deprioritize_query_sampling_inserts.js b/jstests/sharding/analyze_shard_key/deprioritize_query_sampling_inserts.js new file mode 100644 index 0000000000000..c26f45a6c8700 --- /dev/null +++ b/jstests/sharding/analyze_shard_key/deprioritize_query_sampling_inserts.js @@ -0,0 +1,100 @@ +/** + * Tests that inserts related to query sampling are deprioritized. + * + * @tags: [requires_fcv_70] + */ + +(function() { + +"use strict"; + +load("jstests/libs/fail_point_util.js"); +load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); + +const samplesPerSecond = 1000; +const queryAnalysisWriterIntervalSecs = 1; +const queryAnalysisSamplerConfigurationRefreshSecs = 1; +const mongodSetParameterOpts = { + queryAnalysisWriterIntervalSecs, +}; +const mongosSetParameterOpts = { + queryAnalysisSamplerConfigurationRefreshSecs, +}; + +function runTest(conn, primary, {st, rst}) { + const dbName = "testDb"; + const collName = "testColl"; + const ns = dbName + "." + collName; + const testDb = conn.getDB(dbName); + const testColl = testDb.getCollection(collName); + const sampleCollName = "sampledQueries"; + const sampleDiffCollName = "sampledQueriesDiff"; + const sampleNs = "config." + sampleCollName; + const sampleDiffNs = "config." + sampleDiffCollName; + + assert.commandWorked(testColl.insert([{x: 1}])); + const collUuid = QuerySamplingUtil.getCollectionUuid(testDb, collName); + + assert.commandWorked( + conn.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond})); + QuerySamplingUtil.waitForActiveSampling(ns, collUuid, {st, rst}); + + // Test insert to config.sampledQueries. + const fp1 = configureFailPoint(primary, "hangInsertBeforeWrite", {ns: sampleNs}); + assert.commandWorked(testDb.runCommand({find: collName, filter: {x: 1}})); + // Wait for the sampling buffer to be flushed. + fp1.wait(); + + let currentOpDocs = + conn.getDB("admin") + .aggregate( + [{$currentOp: {allUsers: true}}, {$match: {"command.insert": sampleCollName}}]) + .toArray(); + assert.eq(currentOpDocs.length, 1, tojson(currentOpDocs)); + assert.eq(currentOpDocs[0]["admissionPriority"], "low", tojson(currentOpDocs[0])); + + fp1.off(); + + // Test insert to config.sampledQueriesDiff. + const fp2 = configureFailPoint(primary, "hangInsertBeforeWrite", {ns: sampleDiffNs}); + assert.commandWorked( + testDb.runCommand({update: collName, updates: [{q: {x: 1}, u: {$set: {y: 1}}}]})); + // Wait for the sampling buffer to be flushed. + fp2.wait(); + + currentOpDocs = + conn.getDB("admin") + .aggregate( + [{$currentOp: {allUsers: true}}, {$match: {"command.insert": sampleDiffCollName}}]) + .toArray(); + assert.eq(currentOpDocs.length, 1, tojson(currentOpDocs)); + assert.eq(currentOpDocs[0]["admissionPriority"], "low", tojson(currentOpDocs[0])); + + fp2.off(); +} + +{ + const st = new ShardingTest({ + shards: 1, + mongos: 1, + rs: {nodes: 2, setParameter: mongodSetParameterOpts}, + mongosOptions: {setParameter: mongosSetParameterOpts} + }); + + jsTest.log("Testing deprioritized insert in sharded cluster."); + runTest(st.s, st.rs0.getPrimary(), {st}); + + st.stop(); +} + +{ + const rst = new ReplSetTest({nodes: 2, nodeOptions: {setParameter: mongodSetParameterOpts}}); + rst.startSet(); + rst.initiate(); + + jsTest.log("Testing deprioritized insert in replica set."); + runTest(rst.getPrimary(), rst.getPrimary(), {rst}); + + rst.stopSet(); +} +})(); diff --git a/jstests/sharding/analyze_shard_key/inaccurate_coll_stats.js b/jstests/sharding/analyze_shard_key/inaccurate_coll_stats.js index 73fe42a413b67..363f9fee8b46e 100644 --- a/jstests/sharding/analyze_shard_key/inaccurate_coll_stats.js +++ b/jstests/sharding/analyze_shard_key/inaccurate_coll_stats.js @@ -7,18 +7,17 @@ * shard, therefore the node would need to be restarted as a configsvr and then transitioned to be * a config shard node. The ShardingTest and ReplSetTest API currently doesn't support doing that. * - * @tags: [requires_fcv_70, requires_persistence, catalog_shard_incompatible] + * @tags: [requires_fcv_70, requires_persistence, config_shard_incompatible] */ (function() { "use strict"; +load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js"); const numMostCommonValues = 5; const setParameterOpts = { - "failpoint.analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics": - tojson({mode: "alwaysOn"}), analyzeShardKeyNumMostCommonValues: numMostCommonValues }; @@ -72,25 +71,70 @@ function runTest(conn, {rst, st}) { jsTest.log("Verify that the analyzeShardKey metrics prior to the unclean shutdown"); - const resXBefore = assert.commandWorked(conn.adminCommand({analyzeShardKey: ns, key: {x: 1}})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(resXBefore, { + const expectedMetricsX = { numDocs: docs.length, isUnique: false, numDistinctValues: numDistinctXValues, mostCommonValues: mostCommonXValues, numMostCommonValues - }); - assert.eq(resXBefore.avgDocSizeBytes, Object.bsonsize(docs[0])); - - const resYBefore = assert.commandWorked(conn.adminCommand({analyzeShardKey: ns, key: {y: 1}})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(resYBefore, { + }; + const expectedMetricsY = { numDocs: docs.length, isUnique: true, numDistinctValues: numDistinctYValues, mostCommonValues: mostCommonYValues, numMostCommonValues - }); - assert.eq(resYBefore.avgDocSizeBytes, Object.bsonsize(docs[0])); + }; + const expectedAvgDocSize = Object.bsonsize(docs[0]); + + const resXBefore = assert.commandWorked(conn.adminCommand({ + analyzeShardKey: ns, + key: {x: 1}, + // Skip calculating the read and write distribution metrics since there are not needed by + // this test. + readWriteDistribution: false + })); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(resXBefore.keyCharacteristics, + expectedMetricsX); + assert.eq(resXBefore.keyCharacteristics.avgDocSizeBytes, expectedAvgDocSize); + + const resYBefore = assert.commandWorked(conn.adminCommand({ + analyzeShardKey: ns, + key: {y: 1}, + // Skip calculating the read and write distribution metrics since there are not needed by + // this test. + readWriteDistribution: false + })); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(resYBefore.keyCharacteristics, + expectedMetricsY); + assert.eq(resYBefore.keyCharacteristics.avgDocSizeBytes, expectedAvgDocSize); + + let runAnalyzeShardKeyCmd = (host, ns, key) => { + const conn = new Mongo(host); + return conn.adminCommand({ + analyzeShardKey: ns, + key: key, + // Skip calculating the read and write distribution metrics since there are not needed + // by this test. + readWriteDistribution: false + }); + }; + for (let shardKey of [{x: 1}, {y: 1}]) { + jsTest.log( + "Verify that the analyzeShardKey command fails when the collection becomes empty " + + "right before the $collStats step " + tojson({shardKey})); + const analyzeShardKeyThread = new Thread(runAnalyzeShardKeyCmd, conn.host, ns, shardKey); + const fp = configureFailPoint(st ? st.rs0.nodes[0] : rst.nodes[0], + "analyzeShardKeyPauseBeforeCalculatingCollStatsMetrics"); + analyzeShardKeyThread.start(); + fp.wait(); + // Delete all documents in the collection. + assert.commandWorked(coll.remove({})); + fp.off(); + assert.commandFailedWithCode(analyzeShardKeyThread.returnData(), 7826501); + // Reinsert the documents. + assert.commandWorked(coll.insert(docs)); + } assert(rst || st); const rstToKill = rst ? rst : st.rs0; @@ -115,48 +159,55 @@ function runTest(conn, {rst, st}) { rstToKill.getPrimary(); } - jsTest.log("Verify that the fast data statistics are inaccurate after the unclean shutdown"); + jsTest.log("Verify that the analyzeShardKey command fails if the fast data statistics " + + "checked in the $collStats step or monotonicity step indicate that the " + + "collection is empty (although it is not) after the unclean shutdown"); const collStatsAfter = coll.aggregate([ {$collStats: {storageStats: {}}}, {$project: {count: "$storageStats.count", size: "$storageStats.size"}} ]) .toArray()[0]; - assert.eq(collStatsAfter.count, 0); - assert.eq(collStatsAfter.size, 0); jsTest.log("Verify the analyzeShardKey metrics after the unclean shutdown"); - // The cardinality and frequency metrics for {x: 1} should be accurate since the metrics - // calculation for a shard key that is not unique does not depend on fast count. However, the - // average document size should be set to the size of an empty document since that information - // is not available from $collStats. - const resXAfter = assert.commandWorked(conn.adminCommand({analyzeShardKey: ns, key: {x: 1}})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(resXBefore, { - numDocs: docs.length, - isUnique: false, - numDistinctValues: numDistinctXValues, - mostCommonValues: mostCommonXValues, - numMostCommonValues + const resXAfter = conn.adminCommand({ + analyzeShardKey: ns, + key: {x: 1}, + // Skip calculating the read and write distribution metrics since there are not needed by + // this test. + readWriteDistribution: false }); - assert.eq(resXAfter.avgDocSizeBytes, Object.bsonsize({})); - - // The cardinality and frequency metrics for {y: 1} should be inaccurate since the metrics - // calculation for a shard key that is unique depends on fast count (this is optimization that - // unfortunately does not work out correctly in this rare case). However, the metrics should - // still make sense. That is, the number of documents and the number distinct values should not - // be zero and instead should be equal to the number of most common values returned. Similar to - // previous case, the average document size should be set to the size of an empty document since - // that information is not available from $collStats. - const resYAfter = assert.commandWorked(conn.adminCommand({analyzeShardKey: ns, key: {y: 1}})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(resYAfter, { - numDocs: numMostCommonValues, - isUnique: true, - numDistinctValues: numMostCommonValues, - mostCommonValues: mostCommonYValues, - numMostCommonValues + const resYAfter = conn.adminCommand({ + analyzeShardKey: ns, + key: {y: 1}, + // Skip calculating the read and write distribution metrics since there are not needed by + // this test. + readWriteDistribution: false }); - assert.eq(resYAfter.avgDocSizeBytes, Object.bsonsize({})); + + if (collStatsAfter.count == 0) { + assert.eq(collStatsAfter.size, 0); + // IllegalOperation is the error thrown by the monotonicity step, whereas 7826501 is the + // error thrown in the $collStats step. Currently, the monotonicity step comes before the + // $collStats step and there is no monotonicity check for clustered collections. + const expectedErrCode = AnalyzeShardKeyUtil.isClusterCollection(conn, dbName, collName) + ? 7826501 + : ErrorCodes.IllegalOperation; + assert.commandFailedWithCode(resXAfter, expectedErrCode); + assert.commandFailedWithCode(resYAfter, expectedErrCode); + } else { + assert.gt(collStatsAfter.size, 0); + assert.commandWorked(resXAfter); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(resXAfter.keyCharacteristics, + expectedMetricsX); + assert.eq(resXAfter.keyCharacteristics.avgDocSizeBytes, expectedAvgDocSize); + + assert.commandWorked(resYAfter); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(resYAfter.keyCharacteristics, + expectedMetricsY); + assert.eq(resYAfter.keyCharacteristics.avgDocSizeBytes, expectedAvgDocSize); + } } { diff --git a/jstests/sharding/analyze_shard_key/invalid_config_docs.js b/jstests/sharding/analyze_shard_key/invalid_config_docs.js index 836d1c634a06f..20c4016fcf7d7 100644 --- a/jstests/sharding/analyze_shard_key/invalid_config_docs.js +++ b/jstests/sharding/analyze_shard_key/invalid_config_docs.js @@ -10,15 +10,16 @@ function runAnalyzerDocTest(conn) { const configColl = conn.getCollection("config.queryAnalyzers"); - assert.commandFailedWithCode(configColl.insert({_id: UUID(), unknownField: 0}), - 40414 /* IDL required field error */); - const dbName = "testDb"; const collName = "testColl"; const ns = dbName + "." + collName; + + assert.commandFailedWithCode(configColl.insert({_id: ns, unknownField: 0}), + 40414 /* IDL required field error */); + assert.commandWorked(conn.getDB(dbName).createCollection(collName)); assert.commandWorked( - conn.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 1})); + conn.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 1})); assert.commandFailedWithCode(configColl.update({}, {unknownField: 0}), 40414 /* IDL required field error */); } @@ -26,6 +27,13 @@ function runAnalyzerDocTest(conn) { function runMongosDocTest(conn) { const configColl = conn.getCollection("config.mongos"); assert.commandFailedWithCode(configColl.insert({_id: "mongos0"}), ErrorCodes.NoSuchKey); + + jsTest.log("Wait for the mongos to report its uptime, i.e. for its config.mongos document " + + "to exist. Otherwise, the update below would be a no-op and not fail"); + assert.soon(() => { + return configColl.find().itcount() == 1; + }); + assert.commandFailedWithCode(configColl.update({}, {unknownField: 0}), ErrorCodes.NoSuchKey); } diff --git a/jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js b/jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js index d5f14db5a8f62..f5302646c6f86 100644 --- a/jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js +++ b/jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js @@ -73,14 +73,10 @@ var AnalyzeShardKeyUtil = (function() { /** * Extracts the shard key value from the given document. */ - function extractShardKeyValueFromDocument(doc, shardKey, indexKey) { + function extractShardKeyValueFromDocument(doc, shardKey) { const shardKeyValue = {}; for (let fieldName in shardKey) { - const isHashed = indexKey[fieldName] == "hashed"; - const value = AnalyzeShardKeyUtil.getDottedField(doc, fieldName); - // TODO (SERVER-75886): Make analyzeShardKey command return shard key values correctly - // when the supporting index is hashed. - shardKeyValue[fieldName] = isHashed ? convertShardKeyToHashed(value) : value; + shardKeyValue[fieldName] = AnalyzeShardKeyUtil.getDottedField(doc, fieldName); } return shardKeyValue; } @@ -113,6 +109,16 @@ var AnalyzeShardKeyUtil = (function() { return prefix + ".x.y"; } + /** + * Returns true if the collection is a clustered collection. Assumes that the collection + * exists. + */ + function isClusterCollection(conn, dbName, collName) { + const listCollectionRes = assert.commandWorked( + conn.getDB(dbName).runCommand({listCollections: 1, filter: {name: collName}})); + return listCollectionRes.cursor.firstBatch[0].options.hasOwnProperty("clusteredIndex"); + } + /** * Enables profiling of the given database on all the given mongods. */ @@ -168,7 +174,22 @@ var AnalyzeShardKeyUtil = (function() { } function validateKeyCharacteristicsMetrics(metrics) { - assert.gte(metrics.numDocs, metrics.numDistinctValues, metrics); + assert.gt(metrics.numDocsTotal, 0, metrics); + assert.gt(metrics.numDocsSampled, 0, metrics); + assert.gt(metrics.numDistinctValues, 0, metrics); + assert.gt(metrics.mostCommonValues.length, 0, metrics); + assert.gt(metrics.avgDocSizeBytes, 0, metrics); + + assert.gte(metrics.numDocsTotal, metrics.numDocsSampled, metrics); + if (metrics.hasOwnProperty("numOrphanDocs")) { + assert.gte(metrics.numOrphanDocs, 0, metrics); + assert.gte(metrics.numDocsTotal, metrics.numOrphanDocs); + } + if (metrics.isUnique) { + assert.eq(metrics.numDocsSampled, metrics.numDistinctValues, metrics); + } else { + assert.gte(metrics.numDocsSampled, metrics.numDistinctValues, metrics); + } assert.gte(metrics.numDistinctValues, metrics.mostCommonValues.length, metrics); let totalFrequency = 0; @@ -181,7 +202,7 @@ var AnalyzeShardKeyUtil = (function() { totalFrequency += frequency; prevFrequency = frequency; } - assert.gte(metrics.numDocs, totalFrequency, metrics); + assert.gte(metrics.numDocsTotal, totalFrequency, metrics); if (metrics.monotonicity.type == "unknown") { assert(!metrics.monotonicity.hasOwnProperty("recordIdCorrelationCoefficient"), metrics); @@ -191,21 +212,16 @@ var AnalyzeShardKeyUtil = (function() { assert.gte(Math.abs(coefficient), 0, metrics); assert.lte(Math.abs(coefficient), 1, metrics); } - - assert.gt(metrics.avgDocSizeBytes, 0); } - function assertNotContainKeyCharacteristicsMetrics(metrics) { - assert(!metrics.hasOwnProperty("numDocs"), metrics); - assert(!metrics.hasOwnProperty("isUnique"), metrics); - assert(!metrics.hasOwnProperty("numDistinctValues"), metrics); - assert(!metrics.hasOwnProperty("mostCommonValues"), metrics); - assert(!metrics.hasOwnProperty("monotonicity"), metrics); - assert(!metrics.hasOwnProperty("avgDocSizeBytes"), metrics); + function assertNotContainKeyCharacteristicsMetrics(res) { + assert(!res.hasOwnProperty("keyCharacteristics"), res); } - function assertContainKeyCharacteristicsMetrics(metrics) { - assert(metrics.hasOwnProperty("numDocs"), metrics); + function assertContainKeyCharacteristicsMetrics(res) { + assert(res.hasOwnProperty("keyCharacteristics"), res); + const metrics = res.keyCharacteristics; + assert(metrics.hasOwnProperty("numDocsTotal"), metrics); assert(metrics.hasOwnProperty("isUnique"), metrics); assert(metrics.hasOwnProperty("numDistinctValues"), metrics); assert(metrics.hasOwnProperty("mostCommonValues"), metrics); @@ -215,9 +231,8 @@ var AnalyzeShardKeyUtil = (function() { } function assertKeyCharacteristicsMetrics(actual, expected) { - assertContainKeyCharacteristicsMetrics(actual); - - assert.eq(actual.numDocs, expected.numDocs, {actual, expected}); + assert.eq(actual.numDocsTotal, expected.numDocs, {actual, expected}); + assert.eq(actual.numDocsSampled, expected.numDocs, {actual, expected}); assert.eq(actual.isUnique, expected.isUnique, {actual, expected}); assert.eq(actual.numDistinctValues, expected.numDistinctValues, {actual, expected}); @@ -317,16 +332,16 @@ var AnalyzeShardKeyUtil = (function() { } } - function assertNotContainReadWriteDistributionMetrics(metrics) { - assert(!metrics.hasOwnProperty("readDistribution")); - assert(!metrics.hasOwnProperty("writeDistribution")); + function assertNotContainReadWriteDistributionMetrics(res) { + assert(!res.hasOwnProperty("readDistribution")); + assert(!res.hasOwnProperty("writeDistribution")); } - function assertContainReadWriteDistributionMetrics(metrics) { - assert(metrics.hasOwnProperty("readDistribution")); - assert(metrics.hasOwnProperty("writeDistribution")); - validateReadDistributionMetrics(metrics.readDistribution); - validateWriteDistributionMetrics(metrics.writeDistribution); + function assertContainReadWriteDistributionMetrics(res) { + assert(res.hasOwnProperty("readDistribution")); + assert(res.hasOwnProperty("writeDistribution")); + validateReadDistributionMetrics(res.readDistribution); + validateWriteDistributionMetrics(res.writeDistribution); } function validateSampledQueryDocument(doc) { @@ -352,6 +367,7 @@ var AnalyzeShardKeyUtil = (function() { getRandInteger, getRandomElement, getRandomFieldName, + isClusterCollection, enableProfiler, disableProfiler, calculatePercentage, @@ -360,6 +376,7 @@ var AnalyzeShardKeyUtil = (function() { assertNotContainKeyCharacteristicsMetrics, assertContainKeyCharacteristicsMetrics, assertKeyCharacteristicsMetrics, + validateKeyCharacteristicsMetrics, assertNotContainReadWriteDistributionMetrics, assertContainReadWriteDistributionMetrics, validateSampledQueryDocument diff --git a/jstests/sharding/analyze_shard_key/libs/cardinality_and_frequency_common.js b/jstests/sharding/analyze_shard_key/libs/cardinality_and_frequency_common.js new file mode 100644 index 0000000000000..f643336a99e6e --- /dev/null +++ b/jstests/sharding/analyze_shard_key/libs/cardinality_and_frequency_common.js @@ -0,0 +1,73 @@ +const numMostCommonValues = 5; + +/** + * If 'expectEntries' is true , asserts that there are profiler entries for aggregate commands for + * the analyzeShardKey command with the given 'comment', and verifies that the aggregate commands + * used index scan and fetch no more 'numMostCommonValues' documents. If 'expectEntries' is false, + * there are such profiler entries. + */ +function assertAggregateQueryPlans(mongodConns, dbName, collName, comment, expectEntries) { + let numEntries = 0; + + mongodConns.forEach(conn => { + const profilerColl = conn.getDB(dbName).system.profile; + + profilerColl.find({"command.aggregate": collName, "command.comment": comment}) + .forEach(doc => { + if (doc.hasOwnProperty("ok") && (doc.ok === 0)) { + return; + } + + const firstStage = doc.command.pipeline[0]; + + if (firstStage.hasOwnProperty("$collStats")) { + return; + } + + numEntries++; + if (firstStage.hasOwnProperty("$match") || firstStage.hasOwnProperty("$limit")) { + // This corresponds to the aggregation that the analyzeShardKey command runs + // to look up documents for a shard key with a unique or hashed supporting + // index. For both cases, it should fetch at most 'numMostCommonValues' + // documents. + assert(doc.hasOwnProperty("planSummary"), doc); + assert.lte(doc.docsExamined, numMostCommonValues, doc); + } else { + // This corresponds to the aggregation that the analyzeShardKey command runs + // when analyzing a shard key with a non-unique supporting index. + if (!firstStage.hasOwnProperty("$mergeCursors")) { + assert(doc.hasOwnProperty("planSummary"), doc); + assert(doc.planSummary.includes("IXSCAN"), doc); + } + + // Verify that it fetched at most 'numMostCommonValues' documents. + assert.lte(doc.docsExamined, numMostCommonValues, doc); + // Verify that it opted out of shard filtering. + assert.eq(doc.readConcern.level, "available", doc); + } + }); + }); + + if (expectEntries) { + assert.gt(numEntries, 0); + } else { + assert.eq(numEntries, 0); + } +} + +/** + * Returns the connections to all data-bearing mongods in the sharded cluster or replica set. + */ +function getMongodConns({st, rst}) { + assert(st || rst); + assert(!st || !rst); + const conns = []; + if (st) { + st._rs.forEach((rst) => { + rst.nodes.forEach(node => conns.push(node)); + }); + } else { + rst.nodes.forEach(node => conns.push(node)); + } + return conns; +} diff --git a/jstests/sharding/analyze_shard_key/libs/monotonicity_common.js b/jstests/sharding/analyze_shard_key/libs/monotonicity_common.js index 9e0db01703240..a105b1415973e 100644 --- a/jstests/sharding/analyze_shard_key/libs/monotonicity_common.js +++ b/jstests/sharding/analyze_shard_key/libs/monotonicity_common.js @@ -29,6 +29,9 @@ const kOrderTypes = [ } ]; +const numNodesPerRS = 2; +const insertBatchSize = 1000; + /** * Appends the field of the specified name and type to the given documents such that the field value * is identical across the documents. @@ -234,30 +237,44 @@ function testMonotonicity(conn, dbName, collName, currentShardKey, testCases, nu tojson({dbName, collName, currentShardKey, numDocs, testCase})}`); assert.commandWorked(coll.createIndex(testCase.indexKey)); + // To reduce the insertion order noise caused by parallel oplog application on + // secondaries, insert the documents in multiple batches. const docs = makeDocuments(numDocs, fieldOpts); - assert.commandWorked(db.runCommand({insert: collName, documents: docs, ordered: true})); - - const res = - assert.commandWorked(conn.adminCommand({analyzeShardKey: ns, key: testCase.shardKey})); + let currIndex = 0; + while (currIndex < docs.length) { + const endIndex = currIndex + insertBatchSize; + assert.commandWorked(db.runCommand({ + insert: collName, + documents: docs.slice(currIndex, endIndex), + // Wait for secondaries to have replicated the writes. + writeConcern: {w: numNodesPerRS} + })); + currIndex = endIndex; + } - const listCollectionRes = - assert.commandWorked(db.runCommand({listCollections: 1, filter: {name: collName}})); - const isClusteredColl = - listCollectionRes.cursor.firstBatch[0].options.hasOwnProperty("clusteredIndex"); + const res = assert.commandWorked(conn.adminCommand({ + analyzeShardKey: ns, + key: testCase.shardKey, + // Skip calculating the read and write distribution metrics since there are not needed + // by this test. + readWriteDistribution: false + })); + const metrics = res.keyCharacteristics; + const isClusteredColl = AnalyzeShardKeyUtil.isClusterCollection(conn, dbName, collName); const expectedType = isClusteredColl ? "unknown" : testCase.expected; - assert.eq(res.monotonicity.type, expectedType, res); + assert.eq(metrics.monotonicity.type, expectedType, res); if (expectedType == "unknown") { - assert(!res.monotonicity.hasOwnProperty("recordIdCorrelationCoefficient")); + assert(!metrics.monotonicity.hasOwnProperty("recordIdCorrelationCoefficient")); } else { - assert(res.monotonicity.hasOwnProperty("recordIdCorrelationCoefficient")); + assert(metrics.monotonicity.hasOwnProperty("recordIdCorrelationCoefficient")); if (expectedType == "monotonic") { - assert.gte(Math.abs(res.monotonicity.recordIdCorrelationCoefficient), + assert.gte(Math.abs(metrics.monotonicity.recordIdCorrelationCoefficient), correlationCoefficientThreshold); } else if (expectedType == "not monotonic") { - assert.lt(Math.abs(res.monotonicity.recordIdCorrelationCoefficient), + assert.lt(Math.abs(metrics.monotonicity.recordIdCorrelationCoefficient), correlationCoefficientThreshold); } else { throw new Error("Unknown expected monotonicity '" + expectedType + "'"); diff --git a/jstests/sharding/analyze_shard_key/libs/query_sampling_util.js b/jstests/sharding/analyze_shard_key/libs/query_sampling_util.js index 51c4011d28fc6..21888a4f2da1d 100644 --- a/jstests/sharding/analyze_shard_key/libs/query_sampling_util.js +++ b/jstests/sharding/analyze_shard_key/libs/query_sampling_util.js @@ -2,6 +2,7 @@ * Utilities for testing query sampling. */ var QuerySamplingUtil = (function() { + load("jstests/libs/fail_point_util.js"); load("jstests/libs/uuid_util.js"); load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js"); @@ -27,15 +28,76 @@ var QuerySamplingUtil = (function() { } /** - * Waits for the given node to have at least one active collection for query sampling. If - * 'waitForTokens' is true, additionally waits for the sampling bucket to contain at least one - * second of tokens. + * Returns the query sampling current op documents that match the given filter. */ - function waitForActiveSampling(node, waitForTokens = true) { + function getQuerySamplingCurrentOp(conn, filter) { + return conn.getDB("admin") + .aggregate([ + {$currentOp: {allUsers: true, localOps: true}}, + {$match: Object.assign({desc: "query analyzer"}, filter)}, + ]) + .toArray(); + } + + /** + * Waits for the query sampling for the collection with the namespace and collection uuid + * to be active on the given node. + */ + function waitForActiveSamplingOnNode(node, ns, collUuid) { + jsTest.log("Start waiting for active sampling " + tojsononeline({node, ns, collUuid})); + let numTries = 0; assert.soon(() => { - const res = assert.commandWorked(node.adminCommand({serverStatus: 1})); - assert(res.hasOwnProperty("queryAnalyzers")); - return res.queryAnalyzers.activeCollections >= 1; + numTries++; + + const docs = getQuerySamplingCurrentOp(node, {ns, collUuid}); + if (docs.length == 1) { + return true; + } + assert.eq(docs.length, 0, docs); + + if (numTries % 100 == 0) { + jsTest.log("Still waiting for active sampling " + + tojson({node, ns, collUuid, docs})); + } + return false; + }); + jsTest.log("Finished waiting for active sampling " + tojsononeline({node, ns, collUuid})); + } + + /** + * Waits for the query sampling for the collection with the namespace and collection uuid + * to be inactive on the given node. + */ + function waitForInactiveSamplingOnNode(node, ns, collUuid) { + jsTest.log("Start waiting for inactive sampling " + tojsononeline({node, ns, collUuid})); + let numTries = 0; + assert.soon(() => { + numTries++; + + const docs = getQuerySamplingCurrentOp(node, {ns, collUuid}); + if (docs.length == 0) { + return true; + } + assert.eq(docs.length, 1, docs); + + if (numTries % 100 == 0) { + jsTest.log("Still waiting for inactive sampling " + + tojson({node, ns, collUuid, docs})); + } + return false; + }); + jsTest.log("Finished waiting for inactive sampling " + tojsononeline({node, ns, collUuid})); + } + + /** + * Waits for the query sampling for the collection with the namespace and collection uuid + * to be active on all nodes in the given replica set. If 'waitForTokens' is true, additionally + * waits for the sampling bucket to contain at least one second of tokens. + */ + function waitForActiveSamplingReplicaSet(rst, ns, collUuid, waitForTokens = true) { + rst.nodes.forEach(node => { + // Skip waiting for tokens now and just wait once at the end if needed. + waitForActiveSamplingOnNode(node, ns, collUuid, false /* waitForTokens */); }); if (waitForTokens) { // Wait for the bucket to contain at least one second of tokens. @@ -44,38 +106,86 @@ var QuerySamplingUtil = (function() { } /** - * Waits for the given node to have no active collections for query sampling. + * Waits for the query sampling for the collection with the namespace and collection uuid + * to be inactive on all nodes in the given replica set. */ - function waitForInactiveSampling(node) { - assert.soon(() => { - const res = assert.commandWorked(node.adminCommand({serverStatus: 1})); - return res.queryAnalyzers.activeCollections == 0; + function waitForInactiveSamplingReplicaSet(rst, ns, collUuid) { + rst.nodes.forEach(node => { + waitForInactiveSamplingOnNode(node, ns, collUuid); }); } /** - * Waits for all shard nodes to have one active collection for query sampling. + * Waits for the query sampling for the collection with the namespace and collection uuid + * to be active on all mongos and shardsvr mongod nodes in the given sharded cluster. */ - function waitForActiveSamplingOnAllShards(st) { - st._rs.forEach(rs => { - rs.nodes.forEach(node => { - // Skip waiting for tokens now and just wait once at the end. - waitForActiveSampling(node, false /* waitForTokens */); + function waitForActiveSamplingShardedCluster(st, ns, collUuid, {skipMongoses} = {}) { + if (!skipMongoses) { + st.forEachMongos(mongos => { + waitForActiveSamplingOnNode(mongos, ns, collUuid); }); + } + st._rs.forEach(rst => { + // Skip waiting for tokens now and just wait once at the end if needed. + waitForActiveSamplingReplicaSet(rst, ns, collUuid, false /* waitForTokens */); }); // Wait for the bucket to contain at least one second of tokens. sleep(1000); } /** - * Waits for all shard nodes to have no active collection for query sampling. + * Waits for the query sampling for the collection with the namespace and collection uuid + * to be inactive on all mongos and shardsvr mongod nodes in the given sharded cluster. */ - function waitForInactiveSamplingOnAllShards(st) { - st._rs.forEach(rs => { - rs.nodes.forEach(node => { - waitForInactiveSampling(node); + function waitForInactiveSamplingShardedCluster(st, ns, collUuid) { + st.forEachMongos(mongos => { + waitForInactiveSamplingOnNode(mongos, ns, collUuid); + }); + st._rs.forEach(rst => { + waitForInactiveSamplingReplicaSet(rst, ns, collUuid); + }); + } + + /** + * Waits for the query sampling for the collection with the namespace and collection uuid + * to be active on all nodes in the given replica set or sharded cluster. + */ + function waitForActiveSampling(ns, collUuid, {rst, st}) { + assert(rst || st); + assert(!rst || !st); + if (st) { + waitForActiveSamplingShardedCluster(st, ns, collUuid); + } else { + waitForActiveSamplingReplicaSet(rst, ns, collUuid); + } + } + + /** + * Waits for the query sampling for the collection with the namespace and collection uuid + * to be inactive on all nodes in the given replica set or sharded cluster. + */ + function waitForInactiveSampling(ns, collUuid, {rst, st}) { + assert(rst || st); + assert(!rst || !st); + if (st) { + waitForInactiveSamplingShardedCluster(st, ns, collUuid); + } else { + waitForInactiveSamplingReplicaSet(rst, ns, collUuid); + } + } + + /** + * Forces samples to get persisted whether or not query sampling is active. + */ + function skipActiveSamplingCheckWhenPersistingSamples(st) { + st._rs.forEach(rst => { + rst.nodes.forEach(node => { + configureFailPoint(node, "queryAnalysisWriterSkipActiveSamplingCheck"); }); }); + st.configRS.nodes.forEach(node => { + configureFailPoint(node, "queryAnalysisWriterSkipActiveSamplingCheck"); + }); } /** @@ -327,10 +437,13 @@ var QuerySamplingUtil = (function() { generateRandomString, generateRandomCollation, makeCmdObjIgnoreSessionInfo, + waitForActiveSamplingReplicaSet, + waitForInactiveSamplingReplicaSet, + waitForActiveSamplingShardedCluster, + waitForInactiveSamplingShardedCluster, waitForActiveSampling, waitForInactiveSampling, - waitForActiveSamplingOnAllShards, - waitForInactiveSamplingOnAllShards, + skipActiveSamplingCheckWhenPersistingSamples, assertSubObject, assertSoonSampledQueryDocuments, assertSoonSampledQueryDocumentsAcrossShards, diff --git a/jstests/sharding/analyze_shard_key/libs/sample_nested_agg_queries_common.js b/jstests/sharding/analyze_shard_key/libs/sample_nested_agg_queries_common.js index 38a39b3ed2ef1..3f2d28c39fe2f 100644 --- a/jstests/sharding/analyze_shard_key/libs/sample_nested_agg_queries_common.js +++ b/jstests/sharding/analyze_shard_key/libs/sample_nested_agg_queries_common.js @@ -2,15 +2,15 @@ * Utilities for testing basic support for sampling nested aggregate queries (i.e. ones inside * $lookup, $graphLookup, $unionWith) on a sharded cluster. */ - -load("jstests/libs/sbe_util.js"); // For checkSBEEnabled. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; // Make the periodic jobs for refreshing sample rates and writing sampled queries and diffs have a // period of 1 second to speed up the test. -const queryAnalysisSamplerConfigurationRefreshSecs = 1; -const queryAnalysisWriterIntervalSecs = 1; +export const queryAnalysisSamplerConfigurationRefreshSecs = 1; + +export const queryAnalysisWriterIntervalSecs = 1; -const outerAggTestCases = [ +export const outerAggTestCases = [ // The test cases for singly-nested aggregate queries. { name: "lookup_custom_pipeline", @@ -29,15 +29,15 @@ const outerAggTestCases = [ ]; }, requireShardToRouteFunc: (db, collName, isShardedColl) => { - // When SBE is enabled, if the collection is not sharded and not clustered, the shard - // will not create a separate pipeline to execute the inner side of a $lookup stage so - // there is no nested aggregate query to route. const listCollectionRes = assert.commandWorked(db.runCommand({listCollections: 1, filter: {name: collName}})); const isClusteredColl = listCollectionRes.cursor.firstBatch[0].options.hasOwnProperty("clusteredIndex"); - const isEligibleForSBELookupPushdown = - checkSBEEnabled(db) && !isShardedColl && !isClusteredColl; + + // When SBE is used, the shard will not create a separate pipeline to execute the inner + // side of a $lookup stage so there is no nested aggregate query to route, because SBE + // does $lookup pushdown whereas Classic does not. + const isEligibleForSBELookupPushdown = !isShardedColl && checkSBEEnabled(db); return !isEligibleForSBELookupPushdown; } }, @@ -187,7 +187,7 @@ const outerAggTestCases = [ } ]; -const innerAggTestCases = [ +export const innerAggTestCases = [ { // The filter is in the first stage. containInitialFilter: true, @@ -215,17 +215,17 @@ const innerAggTestCases = [ * Tests that a nested aggregate query run internally by an aggregation stage that takes in a * "pipeline" is sampled correctly. */ -function testCustomInnerPipeline(makeOuterPipelineFunc, - makeInnerPipelineFunc, - containInitialFilter, - st, - dbName, - localCollName, - foreignCollName, - filter, - shardNames, - explain, - requireShardToRoute) { +export function testCustomInnerPipeline(makeOuterPipelineFunc, + makeInnerPipelineFunc, + containInitialFilter, + st, + dbName, + localCollName, + foreignCollName, + filter, + shardNames, + explain, + requireShardToRoute) { const mongosDB = st.s.getDB(dbName); const foreignNs = dbName + "." + foreignCollName; const foreignCollUuid = QuerySamplingUtil.getCollectionUuid(mongosDB, foreignCollName); @@ -266,13 +266,13 @@ function testCustomInnerPipeline(makeOuterPipelineFunc, * Tests that a nested aggregate query run internally by an aggregation stage that does not take in * a "pipeline" is sampled correctly. */ -function testNoCustomInnerPipeline(makeOuterPipelineFunc, - st, - dbName, - localCollName, - foreignCollName, - explain, - requireShardToRoute) { +export function testNoCustomInnerPipeline(makeOuterPipelineFunc, + st, + dbName, + localCollName, + foreignCollName, + explain, + requireShardToRoute) { const mongosDB = st.s.getDB(dbName); const foreignNs = dbName + "." + foreignCollName; const foreignCollUuid = QuerySamplingUtil.getCollectionUuid(mongosDB, foreignCollName); diff --git a/jstests/sharding/analyze_shard_key/libs/sampling_current_op_and_server_status_common.js b/jstests/sharding/analyze_shard_key/libs/sampling_current_op_and_server_status_common.js index 399ecc2b77c9d..038a943f493ed 100644 --- a/jstests/sharding/analyze_shard_key/libs/sampling_current_op_and_server_status_common.js +++ b/jstests/sharding/analyze_shard_key/libs/sampling_current_op_and_server_status_common.js @@ -24,7 +24,7 @@ function validateCurrentOpMongos(currentOp) { assert(currentOp.hasOwnProperty("desc"), currentOp); assert(currentOp.hasOwnProperty("ns"), currentOp); assert(currentOp.hasOwnProperty("collUuid"), currentOp); - assert(currentOp.hasOwnProperty("sampleRate"), currentOp); + assert(currentOp.hasOwnProperty("samplesPerSecond"), currentOp); assert(currentOp.hasOwnProperty("startTime"), currentOp); assert(currentOp.hasOwnProperty("sampledReadsCount"), currentOp); assert(currentOp.hasOwnProperty("sampledWritesCount"), currentOp); @@ -36,7 +36,7 @@ function validateCurrentOpMongod(currentOp, isShardSvr) { assert(currentOp.hasOwnProperty("desc"), currentOp); assert(currentOp.hasOwnProperty("ns"), currentOp); assert(currentOp.hasOwnProperty("collUuid"), currentOp); - assert.eq(currentOp.hasOwnProperty("sampleRate"), !isShardSvr, currentOp); + assert.eq(currentOp.hasOwnProperty("samplesPerSecond"), !isShardSvr, currentOp); assert(currentOp.hasOwnProperty("startTime"), currentOp); assert(currentOp.hasOwnProperty("sampledReadsCount"), currentOp); assert(currentOp.hasOwnProperty("sampledWritesCount"), currentOp); @@ -103,14 +103,18 @@ const opKindNoop = 2; * attached in 'oldState'. */ function assertCurrentOpAndServerStatusMongos( - ns, opKind, oldState, newState, {expectedSampleRate} = {}) { + ns, opKind, oldState, newState, {expectedSamplesPerSecond} = {}) { const errMsg = {opKind, oldState, newState}; validateCurrentOpMongos(newState.currentOp[0]); assert.eq(newState.currentOp.length, 1, errMsg); assert.eq(newState.currentOp[0].ns, ns, errMsg); - if (expectedSampleRate !== undefined) { - assert.eq(newState.currentOp[0].sampleRate, expectedSampleRate, errMsg); + if (expectedSamplesPerSecond !== undefined) { + if (newState.currentOp[0].samplesPerSecond != expectedSamplesPerSecond) { + jsTest.log("The actual sample rate doesn't match the expected sample rate " + + tojson({errMsg, expectedSamplesPerSecond})); + return false; + } } validateServerStatusMongos(newState.serverStatus); diff --git a/jstests/sharding/analyze_shard_key/list_sampled_queries.js b/jstests/sharding/analyze_shard_key/list_sampled_queries.js index 0d4bc7e0cdd11..fe9fc996e45d6 100644 --- a/jstests/sharding/analyze_shard_key/list_sampled_queries.js +++ b/jstests/sharding/analyze_shard_key/list_sampled_queries.js @@ -12,14 +12,17 @@ load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js"); load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); load("jstests/sharding/analyze_shard_key/libs/sampling_current_op_and_server_status_common.js"); -const sampleRate = 10000; +const samplesPerSecond = 10000; + +const queryAnalysisSamplerConfigurationRefreshSecs = 1; const queryAnalysisWriterIntervalSecs = 1; const mongodSetParameterOpts = { + queryAnalysisSamplerConfigurationRefreshSecs, queryAnalysisWriterIntervalSecs, }; const mongosSetParameterOpts = { - queryAnalysisSamplerConfigurationRefreshSecs: 1, + queryAnalysisSamplerConfigurationRefreshSecs, }; function insertDocuments(collection, numDocs) { @@ -30,23 +33,21 @@ function insertDocuments(collection, numDocs) { assert.commandWorked(bulk.execute()); } -function runTest(conn, st) { - const dbName = "test"; - const collName0 = "coll0"; - const collName1 = "coll1"; +function runTest(conn, {rst, st}) { + assert(rst || st); + assert(!rst || !st); + + const dbName = "testDb"; + const collName0 = "testColl0"; + const collName1 = "testColl1"; const ns0 = dbName + "." + collName0; const ns1 = dbName + "." + collName1; const numDocs = 100; const adminDb = conn.getDB("admin"); - const configDb = conn.getDB("config"); const testDb = conn.getDB(dbName); const collection0 = testDb.getCollection(collName0); const collection1 = testDb.getCollection(collName1); - insertDocuments(collection0, numDocs); - insertDocuments(collection1, numDocs); - const collUuid0 = QuerySamplingUtil.getCollectionUuid(testDb, collName0); - const collUuid1 = QuerySamplingUtil.getCollectionUuid(testDb, collName1); if (st) { // Shard collection1 and move one chunk to shard1. @@ -60,102 +61,186 @@ function runTest(conn, st) { conn.adminCommand({moveChunk: ns1, find: {x: 0}, to: st.shard1.shardName})); } - conn.adminCommand({configureQueryAnalyzer: ns0, mode: "full", sampleRate}); - conn.adminCommand({configureQueryAnalyzer: ns1, mode: "full", sampleRate}); - QuerySamplingUtil.waitForActiveSampling(conn, true); + insertDocuments(collection0, numDocs); + insertDocuments(collection1, numDocs); + const collUuid0 = QuerySamplingUtil.getCollectionUuid(testDb, collName0); + const collUuid1 = QuerySamplingUtil.getCollectionUuid(testDb, collName1); + + jsTest.log( + "Test running a $listSampledQueries aggregate command while there are no sampled queries"); + let actualSamples = adminDb.aggregate([{$listSampledQueries: {}}]).toArray(); + assert.eq(actualSamples.length, 0); + + conn.adminCommand({configureQueryAnalyzer: ns0, mode: "full", samplesPerSecond}); + conn.adminCommand({configureQueryAnalyzer: ns1, mode: "full", samplesPerSecond}); + QuerySamplingUtil.waitForActiveSampling(ns0, collUuid0, {rst, st}); + QuerySamplingUtil.waitForActiveSampling(ns1, collUuid1, {rst, st}); - // Create read samples on collection0. let expectedSamples = []; - assert.commandWorked( - testDb.runCommand({aggregate: collName0, pipeline: [{$match: {x: 1}}], cursor: {}})); - expectedSamples["aggregate"] = { + // Use this to identify expected samples later. + let sampleNum = -1; + const getSampleNum = (sample) => { + switch (sample.cmdName) { + case "aggregate": + case "count": + case "distinct": + case "find": + return sample.cmd.filter.sampleNum; + case "update": + case "delete": + case "findAndModify": + return sample.cmd.let.sampleNum; + default: + throw Error("Unexpected command name"); + } + }; + + let numSamplesColl0 = 0; + let numSamplesColl1 = 0; + + // Create read samples on collection0. + const aggregateFilter = {x: 1, sampleNum: ++sampleNum}; + assert.commandWorked(testDb.runCommand( + {aggregate: collName0, pipeline: [{$match: aggregateFilter}], cursor: {}})); + expectedSamples[sampleNum] = { ns: ns0, collectionUuid: collUuid0, cmdName: "aggregate", - cmd: {filter: {x: 1}, collation: {locale: "simple"}} + cmd: {filter: aggregateFilter, collation: {locale: "simple"}} }; - assert.commandWorked(testDb.runCommand({count: collName0, query: {x: -1}})); - expectedSamples["count"] = - {ns: ns0, collectionUuid: collUuid0, cmdName: "count", cmd: {filter: {x: -1}}}; - assert.commandWorked(testDb.runCommand({distinct: collName0, key: "x", query: {x: 2}})); - expectedSamples["distinct"] = - {ns: ns0, collectionUuid: collUuid0, cmdName: "distinct", cmd: {filter: {x: 2}}}; - assert.commandWorked(testDb.runCommand({find: collName0, filter: {x: -3}, collation: {}})); - expectedSamples["find"] = { + numSamplesColl0++; + + const countFilter = {x: -1, sampleNum: ++sampleNum}; + assert.commandWorked(testDb.runCommand({count: collName0, query: countFilter})); + expectedSamples[sampleNum] = + {ns: ns0, collectionUuid: collUuid0, cmdName: "count", cmd: {filter: countFilter}}; + numSamplesColl0++; + + const distinctFilter = {x: 2, sampleNum: ++sampleNum}; + assert.commandWorked(testDb.runCommand({distinct: collName0, key: "x", query: distinctFilter})); + expectedSamples[sampleNum] = + {ns: ns0, collectionUuid: collUuid0, cmdName: "distinct", cmd: {filter: distinctFilter}}; + numSamplesColl0++; + + const findFilter = {x: -3, sampleNum: ++sampleNum}; + assert.commandWorked(testDb.runCommand({find: collName0, filter: findFilter, collation: {}})); + expectedSamples[sampleNum] = { ns: ns0, collectionUuid: collUuid0, cmdName: "find", - cmd: {filter: {x: -3}, collation: {}} + cmd: {filter: findFilter, collation: {}} }; + numSamplesColl0++; // Create write samples on collection1. const updateCmdObj = { update: collName1, - updates: [{q: {x: 4}, u: [{$set: {y: 1}}], multi: false}] + updates: [{q: {x: 4}, u: [{$set: {y: 1}}], multi: false}], + let : {sampleNum: ++sampleNum} }; assert.commandWorked(testDb.runCommand(updateCmdObj)); - expectedSamples["update"] = { + expectedSamples[sampleNum] = { ns: ns1, collectionUuid: collUuid1, cmdName: "update", cmd: Object.assign({}, updateCmdObj, {$db: dbName}) }; - const findAndModifyCmdObj = - {findAndModify: collName1, query: {x: 5}, sort: {x: 1}, update: {$set: {z: 1}}}; + numSamplesColl1++; + + const findAndModifyCmdObj = { + findAndModify: collName1, + query: {x: 5}, + sort: {x: 1}, + update: {$set: {z: 1}}, + let : {sampleNum: ++sampleNum} + }; assert.commandWorked(testDb.runCommand(findAndModifyCmdObj)); - expectedSamples["findAndModify"] = { + expectedSamples[sampleNum] = { ns: ns1, collectionUuid: collUuid1, cmdName: "findAndModify", cmd: Object.assign({}, findAndModifyCmdObj, {$db: dbName}) }; - const deleteCmdObj = {delete: collName1, deletes: [{q: {x: -6}, limit: 1}]}; + numSamplesColl1++; + + const deleteCmdObj = { + delete: collName1, + deletes: [{q: {x: -6}, limit: 1}], + let : {sampleNum: ++sampleNum} + }; assert.commandWorked(testDb.runCommand(deleteCmdObj)); - expectedSamples["delete"] = { + expectedSamples[sampleNum] = { ns: ns1, collectionUuid: collUuid1, cmdName: "delete", cmd: Object.assign({}, deleteCmdObj, {$db: dbName}) }; + numSamplesColl1++; + jsTest.log("Test running a $listSampledQueries aggregate command that doesn't involve " + + "getMore commands"); // Verify samples on both collections. - let response; assert.soon(() => { - response = assert.commandWorked(adminDb.runCommand( - {aggregate: 1, pipeline: [{$listSampledQueries: {}}, {$sort: {ns: 1}}], cursor: {}})); - return response.cursor.firstBatch.length == 7; + actualSamples = adminDb.aggregate([{$listSampledQueries: {}}, {$sort: {ns: 1}}]).toArray(); + return actualSamples.length >= (numSamplesColl0 + numSamplesColl1); }); - let samples = response.cursor.firstBatch; - samples.forEach((sample) => { + assert.eq(actualSamples.length, numSamplesColl0 + numSamplesColl1); + actualSamples.forEach((sample) => { AnalyzeShardKeyUtil.validateSampledQueryDocument(sample); - QuerySamplingUtil.assertSubObject(sample, expectedSamples[sample.cmdName]); + QuerySamplingUtil.assertSubObject(sample, expectedSamples[getSampleNum(sample)]); }); // Verify that listing for collection0 returns only collection0 samples. assert.soon(() => { - response = assert.commandWorked(adminDb.runCommand( - {aggregate: 1, pipeline: [{$listSampledQueries: {namespace: ns0}}], cursor: {}})); - return response.cursor.firstBatch.length == 4; + actualSamples = adminDb.aggregate([{$listSampledQueries: {namespace: ns0}}]).toArray(); + return actualSamples.length >= numSamplesColl0; }); - samples = response.cursor.firstBatch; - samples.forEach((sample) => { + assert.eq(actualSamples.length, numSamplesColl0); + actualSamples.forEach((sample) => { AnalyzeShardKeyUtil.validateSampledQueryDocument(sample); - QuerySamplingUtil.assertSubObject(sample, expectedSamples[sample.cmdName]); + QuerySamplingUtil.assertSubObject(sample, expectedSamples[getSampleNum(sample)]); }); // Verify that listing for collection1 returns only collection1 samples. assert.soon(() => { - response = assert.commandWorked(adminDb.runCommand( - {aggregate: 1, pipeline: [{$listSampledQueries: {namespace: ns1}}], cursor: {}})); - return response.cursor.firstBatch.length == 3; + actualSamples = adminDb.aggregate([{$listSampledQueries: {namespace: ns1}}]).toArray(); + return actualSamples.length >= numSamplesColl1; }); - samples = response.cursor.firstBatch; - samples.forEach((sample) => { + assert.eq(actualSamples.length, numSamplesColl1); + actualSamples.forEach((sample) => { AnalyzeShardKeyUtil.validateSampledQueryDocument(sample); - QuerySamplingUtil.assertSubObject(sample, expectedSamples[sample.cmdName]); + QuerySamplingUtil.assertSubObject(sample, expectedSamples[getSampleNum(sample)]); }); - // Verify that running on a database other than "admin" results in error. + jsTest.log("Test running a $listSampledQueries aggregate command that involves getMore " + + "commands"); + // Make the number of sampled queries larger than the batch size so that getMore commands are + // required when $listSampledQueries is run. + const batchSize = 101; + for (let i = 0; i < 250; i++) { + const sign = (i % 2 == 0) ? 1 : -1; + const findFilter = {x: sign * 7, sampleNum: ++sampleNum}; + assert.commandWorked( + testDb.runCommand({find: collName1, filter: findFilter, collation: {}})); + expectedSamples[sampleNum] = { + ns: ns1, + collectionUuid: collUuid1, + cmdName: "find", + cmd: {filter: findFilter, collation: {}} + }; + numSamplesColl1++; + } + assert.soon(() => { + actualSamples = adminDb.aggregate([{$listSampledQueries: {}}], {batchSize}).toArray(); + return actualSamples.length >= expectedSamples.length; + }); + assert.eq(actualSamples.length, expectedSamples.length); + actualSamples.forEach((sample) => { + AnalyzeShardKeyUtil.validateSampledQueryDocument(sample); + QuerySamplingUtil.assertSubObject(sample, expectedSamples[getSampleNum(sample)]); + }); + + jsTest.log("Test that running on a database other than \"admin\" results in error"); assert.commandFailedWithCode( testDb.runCommand({aggregate: 1, pipeline: [{$listSampledQueries: {}}], cursor: {}}), ErrorCodes.InvalidNamespace); @@ -169,7 +254,7 @@ function runTest(conn, st) { mongosOptions: {setParameter: mongosSetParameterOpts} }); - runTest(st.s, st); + runTest(st.s, {st}); st.stop(); } @@ -178,8 +263,9 @@ function runTest(conn, st) { const rst = new ReplSetTest({nodes: 2, nodeOptions: {setParameter: mongodSetParameterOpts}}); rst.startSet(); rst.initiate(); + const primary = rst.getPrimary(); - runTest(rst.getPrimary()); + runTest(primary, {rst}); rst.stopSet(); } @@ -188,7 +274,7 @@ function runTest(conn, st) { // replica sets. if (!TestData.auth) { const rst = new ReplSetTest({ - name: jsTest.name() + "_umltitenant", + name: jsTest.name() + "_multitenant", nodes: 2, nodeOptions: {setParameter: {multitenancySupport: true}} }); diff --git a/jstests/sharding/analyze_shard_key/list_sampled_queries_auth.js b/jstests/sharding/analyze_shard_key/list_sampled_queries_auth.js index 0d9a3256e0b08..df771e8fcdbab 100644 --- a/jstests/sharding/analyze_shard_key/list_sampled_queries_auth.js +++ b/jstests/sharding/analyze_shard_key/list_sampled_queries_auth.js @@ -1,7 +1,7 @@ /** * Test to validate the privileges required by the listSampledQuery aggregation stage. * - * @tags: [requires_fcv_70, featureFlagAnalyzeShardKey] + * @tags: [requires_fcv_70] */ (function() { @@ -14,10 +14,6 @@ function runTest(conn) { const dbName = "testDb"; const collName0 = "testColl0"; const collName1 = "testColl1"; - const ns0 = dbName + "." + collName0; - const ns1 = dbName + "." + collName1; - - const sampleRate = 10000; const adminDb = conn.getDB("admin"); assert.commandWorked( diff --git a/jstests/sharding/analyze_shard_key/monotonicity_after_chunk_migration.js b/jstests/sharding/analyze_shard_key/monotonicity_after_chunk_migration.js index 9ff1c53049d90..7b52b92df7be1 100644 --- a/jstests/sharding/analyze_shard_key/monotonicity_after_chunk_migration.js +++ b/jstests/sharding/analyze_shard_key/monotonicity_after_chunk_migration.js @@ -9,16 +9,7 @@ load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js"); -const st = new ShardingTest({ - shards: 2, - rs: { - nodes: 2, - setParameter: { - "failpoint.analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics": - tojson({mode: "alwaysOn"}) - } - } -}); +const st = new ShardingTest({shards: 2, rs: {nodes: 1}}); const dbName = "testDb"; const collName = "testColl"; @@ -51,8 +42,14 @@ const isClusteredColl = listCollectionRes.cursor.firstBatch[0].options.hasOwnProperty("clusteredIndex"); const expectedType = isClusteredColl ? "unknown" : "monotonic"; -const res0 = assert.commandWorked(st.s.adminCommand({analyzeShardKey: ns, key: {x: 1}})); -assert.eq(res0.monotonicity.type, expectedType, res0); +const res0 = assert.commandWorked(st.s.adminCommand({ + analyzeShardKey: ns, + key: {x: 1}, + // Skip calculating the read and write distribution metrics since there are not needed by + // this test. + readWriteDistribution: false +})); +assert.eq(res0.keyCharacteristics.monotonicity.type, expectedType, res0); // Make the collection have the following chunks: // shard0: [MinKey, -1000] (10000 documents) @@ -63,8 +60,14 @@ assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: -1000}, to: st. // If mongos forwards the command to shard1 instead of shard0 (primary shard), the monotonicity // check will find that the documents [0, 2000] were inserted before the documents [-1000, 0] and // return that the shard key is not monotonically changing. -const res1 = assert.commandWorked(st.s.adminCommand({analyzeShardKey: ns, key: {x: 1}})); -assert.eq(res1.monotonicity.type, expectedType, res1); +const res1 = assert.commandWorked(st.s.adminCommand({ + analyzeShardKey: ns, + key: {x: 1}, + // Skip calculating the read and write distribution metrics since there are not needed by + // this test. + readWriteDistribution: false +})); +assert.eq(res1.keyCharacteristics.monotonicity.type, expectedType, res1); st.stop(); })(); diff --git a/jstests/sharding/analyze_shard_key/monotonicity_hashed_sharding_compound.js b/jstests/sharding/analyze_shard_key/monotonicity_hashed_sharding_compound.js index e071aadf4e7b1..acfe7c40e2467 100644 --- a/jstests/sharding/analyze_shard_key/monotonicity_hashed_sharding_compound.js +++ b/jstests/sharding/analyze_shard_key/monotonicity_hashed_sharding_compound.js @@ -17,8 +17,11 @@ load("jstests/sharding/analyze_shard_key/libs/monotonicity_common.js"); // collection. The order refers to whether the value is constant, fluctuating, increasing or // decreasing. // - 'expected' is the expected monotonicity. Since the shard key is compound, its monotonicity is -// determined by the monotonicity of the first non-constant shard key field. If the field is -// hashed, then it is not monotonic since hashing guarantees randomness. +// determined by the monotonicity of the first non-constant shard key field. However, the +// monotonicity of a hashed shard key cannot inferred from the recordIds in the index since +// hashing introduces randomness. So the analyzeShardKey command handles hashed shard keys as +// follows. If the first field is hashed, it returns "not monotonic". Otherwise, it returns +// "unknown". const testCases = []; for (let orderType0 of kOrderTypes) { @@ -37,7 +40,7 @@ for (let orderType0 of kOrderTypes) { {name: fieldName0, type: fieldType0, order: orderType0.name}, {name: fieldName1, type: fieldType1, order: orderType1.name} ], - expected: orderType0.name == "constant" ? orderType1.monotonicity : "not monotonic" + expected: "not monotonic" }); // Test compound shard key without a hashed prefix. @@ -48,27 +51,20 @@ for (let orderType0 of kOrderTypes) { {name: fieldName0, type: fieldType0, order: orderType0.name}, {name: fieldName1, type: fieldType1, order: orderType1.name} ], - expected: orderType0.monotonicity + expected: "unknown" }); } } // This test requires the collection to contain at least a few thousands of documents to smooth out -// the noise in the insertion order caused by the oplog application batching on secondaries. +// the insertion order noise caused by parallel oplog application on secondaries. const numDocsRange = { - min: 2500, - max: 5000 -}; - -const setParameterOpts = { - // Skip calculating the read and write distribution metrics since there are no sampled queries - // anyway. - "failpoint.analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics": - tojson({mode: "alwaysOn"}) + min: 7500, + max: 10000 }; { - const st = new ShardingTest({shards: 2, rs: {nodes: 2, setParameter: setParameterOpts}}); + const st = new ShardingTest({shards: 2, rs: {nodes: numNodesPerRS}}); testAnalyzeShardKeysUnshardedCollection(st.s, testCases, numDocsRange); testAnalyzeShardKeysShardedCollection(st, testCases, numDocsRange); @@ -77,7 +73,7 @@ const setParameterOpts = { } { - const rst = new ReplSetTest({nodes: 2, nodeOptions: {setParameter: setParameterOpts}}); + const rst = new ReplSetTest({nodes: numNodesPerRS}); rst.startSet(); rst.initiate(); const primary = rst.getPrimary(); diff --git a/jstests/sharding/analyze_shard_key/monotonicity_hashed_sharding_non_compound.js b/jstests/sharding/analyze_shard_key/monotonicity_hashed_sharding_non_compound.js index 6439daab767bb..52f31bc14337a 100644 --- a/jstests/sharding/analyze_shard_key/monotonicity_hashed_sharding_non_compound.js +++ b/jstests/sharding/analyze_shard_key/monotonicity_hashed_sharding_non_compound.js @@ -49,21 +49,14 @@ for (let orderType0 of kOrderTypes) { } // This test requires the collection to contain at least a few thousands of documents to smooth out -// the noise in the insertion order caused by the oplog application batching on secondaries. +// the insertion order noise caused by parallel oplog application on secondaries. const numDocsRange = { - min: 2500, - max: 5000 -}; - -const setParameterOpts = { - // Skip calculating the read and write distribution metrics since there are no sampled queries - // anyway. - "failpoint.analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics": - tojson({mode: "alwaysOn"}) + min: 7500, + max: 10000 }; { - const st = new ShardingTest({shards: 2, rs: {nodes: 2, setParameter: setParameterOpts}}); + const st = new ShardingTest({shards: 2, rs: {nodes: numNodesPerRS}}); testAnalyzeShardKeysUnshardedCollection(st.s, testCases, numDocsRange); testAnalyzeShardKeysShardedCollection(st, testCases, numDocsRange); @@ -72,7 +65,7 @@ const setParameterOpts = { } { - const rst = new ReplSetTest({nodes: 2, nodeOptions: {setParameter: setParameterOpts}}); + const rst = new ReplSetTest({nodes: numNodesPerRS}); rst.startSet(); rst.initiate(); const primary = rst.getPrimary(); diff --git a/jstests/sharding/analyze_shard_key/monotonicity_range_sharding_compound.js b/jstests/sharding/analyze_shard_key/monotonicity_range_sharding_compound.js index d5b95f6a3bedf..0b9a0624f7ba5 100644 --- a/jstests/sharding/analyze_shard_key/monotonicity_range_sharding_compound.js +++ b/jstests/sharding/analyze_shard_key/monotonicity_range_sharding_compound.js @@ -61,21 +61,14 @@ for (let orderType0 of kOrderTypes) { } // This test requires the collection to contain at least a few thousands of documents to smooth out -// the noise in the insertion order caused by the oplog application batching on secondaries. +// the insertion order noise caused by parallel oplog application on secondaries. const numDocsRange = { - min: 2500, - max: 5000 -}; - -const setParameterOpts = { - // Skip calculating the read and write distribution metrics since there are no sampled queries - // anyway. - "failpoint.analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics": - tojson({mode: "alwaysOn"}) + min: 7500, + max: 10000 }; { - const st = new ShardingTest({shards: 2, rs: {nodes: 2, setParameter: setParameterOpts}}); + const st = new ShardingTest({shards: 2, rs: {nodes: numNodesPerRS, oplogSize: 500}}); testAnalyzeShardKeysUnshardedCollection(st.s, testCases, numDocsRange); testAnalyzeShardKeysShardedCollection(st, testCases, numDocsRange); @@ -84,7 +77,7 @@ const setParameterOpts = { } { - const rst = new ReplSetTest({nodes: 2, nodeOptions: {setParameter: setParameterOpts}}); + const rst = new ReplSetTest({nodes: numNodesPerRS, oplogSize: 250}); rst.startSet(); rst.initiate(); const primary = rst.getPrimary(); diff --git a/jstests/sharding/analyze_shard_key/monotonicity_range_sharding_non_compound.js b/jstests/sharding/analyze_shard_key/monotonicity_range_sharding_non_compound.js index 1910b76613760..6d84d2ae80a18 100644 --- a/jstests/sharding/analyze_shard_key/monotonicity_range_sharding_non_compound.js +++ b/jstests/sharding/analyze_shard_key/monotonicity_range_sharding_non_compound.js @@ -49,21 +49,14 @@ for (let orderType0 of kOrderTypes) { } // This test requires the collection to contain at least a few thousands of documents to smooth out -// the noise in the insertion order caused by the oplog application batching on secondaries. +// the insertion order noise caused by parallel oplog application on secondaries. const numDocsRange = { min: 7500, max: 10000 }; -const setParameterOpts = { - // Skip calculating the read and write distribution metrics since there are no sampled queries - // anyway. - "failpoint.analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics": - tojson({mode: "alwaysOn"}) -}; - { - const st = new ShardingTest({shards: 2, rs: {nodes: 2, setParameter: setParameterOpts}}); + const st = new ShardingTest({shards: 2, rs: {nodes: numNodesPerRS}}); testAnalyzeShardKeysUnshardedCollection(st.s, testCases, numDocsRange); testAnalyzeShardKeysShardedCollection(st, testCases, numDocsRange); @@ -72,7 +65,7 @@ const setParameterOpts = { } { - const rst = new ReplSetTest({nodes: 2, nodeOptions: {setParameter: setParameterOpts}}); + const rst = new ReplSetTest({nodes: numNodesPerRS}); rst.startSet(); rst.initiate(); const primary = rst.getPrimary(); diff --git a/jstests/sharding/analyze_shard_key/most_common_values.js b/jstests/sharding/analyze_shard_key/most_common_values.js index 9cda04693057f..7d907459c1043 100644 --- a/jstests/sharding/analyze_shard_key/most_common_values.js +++ b/jstests/sharding/analyze_shard_key/most_common_values.js @@ -31,7 +31,30 @@ const caseInsensitiveCollation = { caseLevel: false }; -function runTest(conn, {isUnique, isShardedColl, st}) { +function setMongodServerParametersReplicaSet(rst, params) { + rst.nodes.forEach(node => { + assert.commandWorked(node.adminCommand(Object.assign({setParameter: 1}, params))); + }); +} + +function setMongodServerParametersShardedCluster(st, params) { + st._rs.forEach(rst => { + setMongodServerParametersReplicaSet(rst, params); + }); +} + +function setMongodServerParameters({st, rst, params}) { + if (st) { + setMongodServerParametersShardedCluster(st, params); + } else if (rst) { + setMongodServerParametersReplicaSet(rst, params); + } +} + +function runTest(conn, {isHashed, isUnique, isShardedColl, st, rst}) { + assert(!isHashed || !isUnique); + jsTest.log("Testing the test cases for " + tojson({isHashed, isUnique, isShardedColl})); + const dbName = "testDb"; const collName = "testColl"; const ns = dbName + "." + collName; @@ -45,9 +68,9 @@ function runTest(conn, {isUnique, isShardedColl, st}) { const indexOptions = Object.assign({collation: simpleCollation}, isUnique ? {unique: true} : {}); - assert.commandWorked(coll.createIndex({a: 1}, indexOptions)); - assert.commandWorked(coll.createIndex({"a.y": 1}, indexOptions)); - assert.commandWorked(coll.createIndex({"a.y.ii": 1}, indexOptions)); + assert.commandWorked(coll.createIndex({a: isHashed ? "hashed" : 1}, indexOptions)); + assert.commandWorked(coll.createIndex({"a.y": isHashed ? "hashed" : 1}, indexOptions)); + assert.commandWorked(coll.createIndex({"a.y.ii": isHashed ? "hashed" : 1}, indexOptions)); if (isShardedColl) { assert(!isUnique, @@ -82,9 +105,17 @@ function runTest(conn, {isUnique, isShardedColl, st}) { coll.insert({a: {x: 2, y: {i: 2, ii: new Array(kSize10MB).join("D"), iii: 2}, z: 2}, b: 2}, {writeConcern})); + const testCases = []; + // Verify the analyzeShardKey command truncates large primitive type fields. - const res0 = assert.commandWorked(conn.adminCommand({analyzeShardKey: ns, key: {"a.y.ii": 1}})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res0, { + const cmdObj0 = { + analyzeShardKey: ns, + key: {"a.y.ii": 1}, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + }; + const expectedMetrics0 = { numDocs: 5, isUnique, numDistinctValues: 5, @@ -105,11 +136,18 @@ function runTest(conn, {isUnique, isShardedColl, st}) { }, ], numMostCommonValues - }); + }; + testCases.push({cmdObj: cmdObj0, expectedMetrics: expectedMetrics0}); // Verify the analyzeShardKey command truncates large primitive type subfields. - const res1 = assert.commandWorked(conn.adminCommand({analyzeShardKey: ns, key: {"a.y": 1}})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res1, { + const cmdObj1 = { + analyzeShardKey: ns, + key: {"a.y": 1}, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + }; + const expectedMetrics1 = { numDocs: 5, isUnique, numDistinctValues: 5, @@ -148,11 +186,18 @@ function runTest(conn, {isUnique, isShardedColl, st}) { }, ], numMostCommonValues - }); + }; + testCases.push({cmdObj: cmdObj1, expectedMetrics: expectedMetrics1}); // Verify the analyzeShardKey command truncates large object type subfields. - const res2 = assert.commandWorked(conn.adminCommand({analyzeShardKey: ns, key: {a: 1}})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res2, { + const cmdObj2 = { + analyzeShardKey: ns, + key: {a: 1}, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + }; + const expectedMetrics2 = { numDocs: 5, isUnique, numDistinctValues: 5, @@ -176,26 +221,62 @@ function runTest(conn, {isUnique, isShardedColl, st}) { }, ], numMostCommonValues - }); + }; + testCases.push({cmdObj: cmdObj2, expectedMetrics: expectedMetrics2}); + + const sufficientAccumulatorBytesLimitParams = { + internalQueryTopNAccumulatorBytes: kSize10MB * 15, + }; + const insufficientAccumulatorBytesLimitParams = { + internalQueryTopNAccumulatorBytes: kSize10MB, + }; + + for (let {cmdObj, expectedMetrics} of testCases) { + jsTest.log("Testing " + tojson({isHashed, isUnique, isShardedColl, cmdObj})); + + setMongodServerParameters({st, rst, params: sufficientAccumulatorBytesLimitParams}); + let res = conn.adminCommand(cmdObj); + assert.commandWorked(res); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res.keyCharacteristics, + expectedMetrics); + + setMongodServerParameters({st, rst, params: insufficientAccumulatorBytesLimitParams}); + res = conn.adminCommand(cmdObj); + if (isUnique || isHashed) { + assert.commandWorked(res); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res.keyCharacteristics, + expectedMetrics); + } else { + // The aggregation pipeline that the analyzeShardKey command uses to calculate the + // cardinality and frequency metrics when the supporting index is not unique contains + // a $group stage with $topN. The small size limit for $topN would therefore cause + // the analyzeShardKey command to fail with an ExceededMemoryLimit error when the + // index (i.e. values to group and sort) is not hashed. + assert.commandFailedWithCode(res, ErrorCodes.ExceededMemoryLimit); + } + } assert(coll.drop()); } const setParameterOpts = { analyzeShardKeyNumMostCommonValues: numMostCommonValues, - // Skip calculating the read and write distribution metrics since there are no sampled queries - // anyway. - "failpoint.analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics": - tojson({mode: "alwaysOn"}) }; { const st = new ShardingTest({shards: 2, rs: {nodes: numNodesPerRS, setParameter: setParameterOpts}}); - runTest(st.s, {isUnique: true, isShardedColl: false}); - runTest(st.s, {isUnique: false, isShardedColl: false}); - runTest(st.s, {isUnique: false, isShardedColl: true, st}); + runTest(st.s, {isHashed: false, isUnique: true, isShardedColl: false, st}); + runTest(st.s, {isHashed: false, isUnique: false, isShardedColl: false, st}); + // Not testing unique hashed index since hashed indexes cannot have a uniqueness constraint. + runTest(st.s, {isHashed: true, isUnique: false, isShardedColl: false, st}); + + // Not testing unique b-tree index since uniqueness can't be maintained unless the shard key + // is prefix of the candidate shard keys. + runTest(st.s, {isHashed: false, isUnique: false, isShardedColl: true, st}); + // Not testing unique hashed index since hashed indexes cannot have a uniqueness constraint. + runTest(st.s, {isHashed: true, isUnique: false, isShardedColl: true, st}); st.stop(); } @@ -207,8 +288,10 @@ const setParameterOpts = { rst.initiate(); const primary = rst.getPrimary(); - runTest(primary, {isUnique: true, isShardedColl: false}); - runTest(primary, {isUnique: false, isShardedColl: false}); + runTest(primary, {isHashed: false, isUnique: true, isShardedColl: false, rst}); + runTest(primary, {isHashed: false, isUnique: false, isShardedColl: false, rst}); + // Not testing unique hashed index since hashed indexes cannot have a uniqueness constraint. + runTest(primary, {isHashed: true, isUnique: false, isShardedColl: false, rst}); rst.stopSet(); } diff --git a/jstests/sharding/analyze_shard_key/num_orphan_docs.js b/jstests/sharding/analyze_shard_key/num_orphan_docs.js index 76e0d49e03a75..41c1a8c1d6087 100644 --- a/jstests/sharding/analyze_shard_key/num_orphan_docs.js +++ b/jstests/sharding/analyze_shard_key/num_orphan_docs.js @@ -31,8 +31,14 @@ function testAnalyzeShardKeyUnshardedCollection(conn) { const docs = [{candidateKey: 1}]; assert.commandWorked(coll.insert(docs, {writeConcern})); - const res = assert.commandWorked(conn.adminCommand({analyzeShardKey: ns, key: candidateKey})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res, { + const res = assert.commandWorked(conn.adminCommand({ + analyzeShardKey: ns, + key: candidateKey, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + })); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res.keyCharacteristics, { numDocs: 1, isUnique: false, numDistinctValues: 1, @@ -74,8 +80,14 @@ function testAnalyzeShardKeyShardedCollection(st) { assert.commandWorked(st.s.adminCommand({split: ns, middle: {currentKey: 0}})); assert.commandWorked(st.s.adminCommand( {moveChunk: ns, find: {currentKey: 0}, to: st.shard1.shardName, _waitForDelete: true})); - let res = assert.commandWorked(st.s.adminCommand({analyzeShardKey: ns, key: candidateKey})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res, { + let res = assert.commandWorked(st.s.adminCommand({ + analyzeShardKey: ns, + key: candidateKey, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + })); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res.keyCharacteristics, { numDocs: 5, isUnique: false, numDistinctValues: 5, @@ -88,8 +100,8 @@ function testAnalyzeShardKeyShardedCollection(st) { ], numMostCommonValues }); - assert(res.hasOwnProperty("numOrphanDocs"), res); - assert.eq(res.numOrphanDocs, 0, res); + assert(res.keyCharacteristics.hasOwnProperty("numOrphanDocs"), res); + assert.eq(res.keyCharacteristics.numOrphanDocs, 0, res); // Pause range deletion on both shards. let suspendRangeDeletionFp0 = configureFailPoint(st.shard0, "suspendRangeDeletion"); @@ -101,8 +113,14 @@ function testAnalyzeShardKeyShardedCollection(st) { assert.commandWorked(st.s.adminCommand({split: ns, middle: {currentKey: -5}})); assert.commandWorked( st.s.adminCommand({moveChunk: ns, find: {currentKey: -5}, to: st.shard1.shardName})); - res = assert.commandWorked(st.s.adminCommand({analyzeShardKey: ns, key: candidateKey})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res, { + res = assert.commandWorked(st.s.adminCommand({ + analyzeShardKey: ns, + key: candidateKey, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + })); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res.keyCharacteristics, { numDocs: 6, isUnique: false, numDistinctValues: 5, @@ -115,8 +133,8 @@ function testAnalyzeShardKeyShardedCollection(st) { ], numMostCommonValues }); - assert(res.hasOwnProperty("numOrphanDocs"), res); - assert.eq(res.numOrphanDocs, 1, res); + assert(res.keyCharacteristics.hasOwnProperty("numOrphanDocs"), res); + assert.eq(res.keyCharacteristics.numOrphanDocs, 1, res); // Analyze a shard key while two shards have orphan documents. Chunk distribution: // shard0: [MinKey, -5], [5, MaxKey] @@ -124,8 +142,14 @@ function testAnalyzeShardKeyShardedCollection(st) { assert.commandWorked(st.s.adminCommand({split: ns, middle: {currentKey: 5}})); assert.commandWorked( st.s.adminCommand({moveChunk: ns, find: {currentKey: 5}, to: st.shard0.shardName})); - res = assert.commandWorked(st.s.adminCommand({analyzeShardKey: ns, key: candidateKey})); - AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res, { + res = assert.commandWorked(st.s.adminCommand({ + analyzeShardKey: ns, + key: candidateKey, + // Skip calculating the read and write distribution metrics since they are not needed by + // this test. + readWriteDistribution: false + })); + AnalyzeShardKeyUtil.assertKeyCharacteristicsMetrics(res.keyCharacteristics, { numDocs: 8, isUnique: false, numDistinctValues: 5, @@ -138,9 +162,9 @@ function testAnalyzeShardKeyShardedCollection(st) { ], numMostCommonValues }); - assert(res.hasOwnProperty("numOrphanDocs"), res); - assert.eq(res.numOrphanDocs, 3, res); - assert(res.hasOwnProperty("note"), res); + assert(res.keyCharacteristics.hasOwnProperty("numOrphanDocs"), res); + assert.eq(res.keyCharacteristics.numOrphanDocs, 3, res); + assert(res.keyCharacteristics.hasOwnProperty("note"), res); suspendRangeDeletionFp0.off(); suspendRangeDeletionFp1.off(); @@ -148,11 +172,7 @@ function testAnalyzeShardKeyShardedCollection(st) { } const setParameterOpts = { - analyzeShardKeyNumMostCommonValues: numMostCommonValues, - // Skip calculating the read and write distribution metrics since there are no sampled queries - // anyway. - "failpoint.analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics": - tojson({mode: "alwaysOn"}) + analyzeShardKeyNumMostCommonValues: numMostCommonValues }; { diff --git a/jstests/sharding/analyze_shard_key/persist_sampled_diffs.js b/jstests/sharding/analyze_shard_key/persist_sampled_diffs.js index 7c1ebd85da147..2143bdf210ca9 100644 --- a/jstests/sharding/analyze_shard_key/persist_sampled_diffs.js +++ b/jstests/sharding/analyze_shard_key/persist_sampled_diffs.js @@ -5,15 +5,10 @@ * * @tags: [requires_fcv_70] */ -(function() { -"use strict"; +import {ConfigShardUtil} from "jstests/libs/config_shard_util.js"; -load("jstests/libs/catalog_shard_util.js"); load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); -// Set this to allow sample ids to be set by an external client. -TestData.enableTestCommands = true; - const testCases = []; // multi=false update. @@ -177,12 +172,13 @@ function testDiffs(rst, testCase, expectSampling) { // allow the test helper to know if it should use "config" as the name for the test database. st.configRS.isConfigRS = true; - const isCatalogShardEnabled = CatalogShardUtil.isEnabledIgnoringFCV(st); + // Force samples to get persisted even though query sampling is not enabled. + QuerySamplingUtil.skipActiveSamplingCheckWhenPersistingSamples(st); + for (const testCase of testCases) { testDiffs(st.rs0, testCase, true /* expectSampling */); - testDiffs(st.configRS, testCase, isCatalogShardEnabled /* expectSampling */); + testDiffs(st.configRS, testCase, true /* expectSampling */); } st.stop(); -} -})(); +} \ No newline at end of file diff --git a/jstests/sharding/analyze_shard_key/persist_sampled_queries_failover.js b/jstests/sharding/analyze_shard_key/persist_sampled_queries_failover.js index 20ef6b313ab3e..2dfe013b47e7e 100644 --- a/jstests/sharding/analyze_shard_key/persist_sampled_queries_failover.js +++ b/jstests/sharding/analyze_shard_key/persist_sampled_queries_failover.js @@ -10,9 +10,6 @@ load("jstests/libs/fail_point_util.js"); load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); -// Set this to allow sample ids to be set by an external client. -TestData.enableTestCommands = true; - function testStepDown(rst) { const dbName = "testDb"; const collName = "testCollStepDown"; @@ -101,6 +98,9 @@ const st = new ShardingTest({ } }); +// Force samples to get persisted even though query sampling is not enabled. +QuerySamplingUtil.skipActiveSamplingCheckWhenPersistingSamples(st); + testStepDown(st.rs0); testStepUp(st.rs0); diff --git a/jstests/sharding/analyze_shard_key/persist_sampled_read_queries.js b/jstests/sharding/analyze_shard_key/persist_sampled_read_queries.js index c3f0988c2f47f..7359a347bfb5d 100644 --- a/jstests/sharding/analyze_shard_key/persist_sampled_read_queries.js +++ b/jstests/sharding/analyze_shard_key/persist_sampled_read_queries.js @@ -4,25 +4,16 @@ * * @tags: [requires_fcv_70] */ -(function() { -"use strict"; +import {ConfigShardUtil} from "jstests/libs/config_shard_util.js"; -load("jstests/libs/catalog_shard_util.js"); load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); -// Set this to allow sample ids to be set by an external client. -TestData.enableTestCommands = true; - const supportedTestCases = [ {collectionExists: true, markForSampling: true, expectSampling: true}, {collectionExists: true, markForSampling: false, expectSampling: false}, {collectionExists: false, markForSampling: true, expectSampling: false}, ]; -const unsupportedTestCases = [ - {collectionExists: true, markForSampling: true, expectSampling: false}, -]; - // Test with empty, non-empty and missing filter and/or collation to verify that query sampling // doesn't require filter or collation to be non-empty. const filterCollationTestCases = [ @@ -180,18 +171,18 @@ function testAggregateCmd(rst, testCases) { // allow the test helper to know if it should use "config" as the name for the test database. st.configRS.isConfigRS = true; + // Force samples to get persisted even though query sampling is not enabled. + QuerySamplingUtil.skipActiveSamplingCheckWhenPersistingSamples(st); + testFindCmd(st.rs0, supportedTestCases); testCountCmd(st.rs0, supportedTestCases); testDistinctCmd(st.rs0, supportedTestCases); testAggregateCmd(st.rs0, supportedTestCases); - const configTests = - CatalogShardUtil.isEnabledIgnoringFCV(st) ? supportedTestCases : unsupportedTestCases; - testFindCmd(st.configRS, configTests); - testCountCmd(st.configRS, configTests); - testDistinctCmd(st.configRS, configTests); - testAggregateCmd(st.configRS, configTests); + testFindCmd(st.configRS, supportedTestCases); + testCountCmd(st.configRS, supportedTestCases); + testDistinctCmd(st.configRS, supportedTestCases); + testAggregateCmd(st.configRS, supportedTestCases); st.stop(); -} -})(); +} \ No newline at end of file diff --git a/jstests/sharding/analyze_shard_key/persist_sampled_retryable_delete_queries.js b/jstests/sharding/analyze_shard_key/persist_sampled_retryable_delete_queries.js index eb31988f5a65c..e09b2be204fc7 100644 --- a/jstests/sharding/analyze_shard_key/persist_sampled_retryable_delete_queries.js +++ b/jstests/sharding/analyze_shard_key/persist_sampled_retryable_delete_queries.js @@ -9,9 +9,6 @@ load("jstests/libs/fail_point_util.js"); load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); -// Set this to allow sample ids to be set by an external client. -TestData.enableTestCommands = true; - // Make the periodic job for writing sampled queries have a period of 1 second to speed up the test. const queryAnalysisWriterIntervalSecs = 1; @@ -132,6 +129,9 @@ const st = new ShardingTest({ } }); +// Force samples to get persisted even though query sampling is not enabled. +QuerySamplingUtil.skipActiveSamplingCheckWhenPersistingSamples(st); + testRetryExecutedWrite(st.rs0); testRetryUnExecutedWrite(st.rs0); diff --git a/jstests/sharding/analyze_shard_key/persist_sampled_retryable_findAndModify_queries.js b/jstests/sharding/analyze_shard_key/persist_sampled_retryable_findAndModify_queries.js index 88dc86a188822..6e5dee10fe63f 100644 --- a/jstests/sharding/analyze_shard_key/persist_sampled_retryable_findAndModify_queries.js +++ b/jstests/sharding/analyze_shard_key/persist_sampled_retryable_findAndModify_queries.js @@ -10,9 +10,6 @@ load("jstests/libs/fail_point_util.js"); load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); -// Set this to allow sample ids to be set by an external client. -TestData.enableTestCommands = true; - // Make the periodic job for writing sampled queries have a period of 1 second to speed up the test. const queryAnalysisWriterIntervalSecs = 1; @@ -134,6 +131,9 @@ const st = new ShardingTest({ } }); +// Force samples to get persisted even though query sampling is not enabled. +QuerySamplingUtil.skipActiveSamplingCheckWhenPersistingSamples(st); + testRetryExecutedWrite(st.rs0); testRetryUnExecutedWrite(st.rs0); diff --git a/jstests/sharding/analyze_shard_key/persist_sampled_retryable_update_queries.js b/jstests/sharding/analyze_shard_key/persist_sampled_retryable_update_queries.js index b02b1d0e8cda1..c2dba6cadff99 100644 --- a/jstests/sharding/analyze_shard_key/persist_sampled_retryable_update_queries.js +++ b/jstests/sharding/analyze_shard_key/persist_sampled_retryable_update_queries.js @@ -9,9 +9,6 @@ load("jstests/libs/fail_point_util.js"); load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); -// Set this to allow sample ids to be set by an external client. -TestData.enableTestCommands = true; - // Make the periodic job for writing sampled queries have a period of 1 second to speed up the test. const queryAnalysisWriterIntervalSecs = 1; @@ -133,6 +130,9 @@ const st = new ShardingTest({ } }); +// Force samples to get persisted even though query sampling is not enabled. +QuerySamplingUtil.skipActiveSamplingCheckWhenPersistingSamples(st); + testRetryExecutedWrite(st.rs0); testRetryUnExecutedWrite(st.rs0); diff --git a/jstests/sharding/analyze_shard_key/persist_sampled_write_queries.js b/jstests/sharding/analyze_shard_key/persist_sampled_write_queries.js index 5a17c33e437dc..d8a72e68c4a48 100644 --- a/jstests/sharding/analyze_shard_key/persist_sampled_write_queries.js +++ b/jstests/sharding/analyze_shard_key/persist_sampled_write_queries.js @@ -4,25 +4,16 @@ * * @tags: [requires_fcv_70] */ -(function() { -"use strict"; +import {ConfigShardUtil} from "jstests/libs/config_shard_util.js"; -load("jstests/libs/catalog_shard_util.js"); load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); -// Set this to allow sample ids to be set by an external client. -TestData.enableTestCommands = true; - const supportedTestCases = [ {collectionExists: true, markForSampling: true, expectSampling: true}, {collectionExists: true, markForSampling: false, expectSampling: false}, {collectionExists: false, markForSampling: true, expectSampling: false}, ]; -const unsupportedTestCases = [ - {collectionExists: true, markForSampling: true, expectSampling: false}, -]; - // Make the periodic job for writing sampled queries have a period of 1 second to speed up the test. const queryAnalysisWriterIntervalSecs = 1; @@ -223,18 +214,18 @@ function testInsertCmd(rst) { // allow the test helper to know if it should use "config" as the name for the test database. st.configRS.isConfigRS = true; + // Force samples to get persisted even though query sampling is not enabled. + QuerySamplingUtil.skipActiveSamplingCheckWhenPersistingSamples(st); + testUpdateCmd(st.rs0, supportedTestCases); testDeleteCmd(st.rs0, supportedTestCases); testFindAndModifyCmd(st.rs0, supportedTestCases); testInsertCmd(st.rs0); - const configTests = - CatalogShardUtil.isEnabledIgnoringFCV(st) ? supportedTestCases : unsupportedTestCases; - testUpdateCmd(st.configRS, configTests); - testDeleteCmd(st.configRS, configTests); - testFindAndModifyCmd(st.configRS, configTests); + testUpdateCmd(st.configRS, supportedTestCases); + testDeleteCmd(st.configRS, supportedTestCases); + testFindAndModifyCmd(st.configRS, supportedTestCases); testInsertCmd(st.configRS); st.stop(); -} -})(); +} \ No newline at end of file diff --git a/jstests/sharding/analyze_shard_key/query_sampling_after_ddl.js b/jstests/sharding/analyze_shard_key/query_sampling_after_ddl.js new file mode 100644 index 0000000000000..07062363a487b --- /dev/null +++ b/jstests/sharding/analyze_shard_key/query_sampling_after_ddl.js @@ -0,0 +1,205 @@ +/** + * Tests that query sampling stops when the collection is dropped or renamed. + * + * @tags: [requires_fcv_71] + */ + +(function() { +"use strict"; + +load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' +load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); + +function setUpCollection(conn, {isShardedColl, st}) { + const dbName = "testDb-" + extractUUIDFromObject(UUID()); + const collName = isShardedColl ? "testCollSharded" : "testCollUnsharded"; + const ns = dbName + "." + collName; + const db = conn.getDB(dbName); + + assert.commandWorked(db.createCollection(collName)); + if (isShardedColl) { + assert(st); + assert.commandWorked(st.s0.adminCommand({enableSharding: dbName})); + st.ensurePrimaryShard(dbName, st.shard0.name); + assert.commandWorked(st.s0.adminCommand({shardCollection: ns, key: {x: 1}})); + assert.commandWorked(st.s0.adminCommand({split: ns, middle: {x: 0}})); + assert.commandWorked( + st.s0.adminCommand({moveChunk: ns, find: {x: 0}, to: st.shard1.shardName})); + } + + return {dbName, collName}; +} + +function enableQuerySampling(conn, dbName, collName, {rst, st}) { + const ns = dbName + "." + collName; + const collUuid = QuerySamplingUtil.getCollectionUuid(conn.getDB(dbName), collName); + assert.commandWorked( + conn.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 1000})); + QuerySamplingUtil.waitForActiveSampling(ns, collUuid, {rst, st}); +} + +function assertNumSampledQueries(conn, ns, expectedNum) { + // Wait for one refresh interval so that if 'expectedNum' is 0 the check doesn't pass just + // because the sampled queries have been flushed. + sleep(queryAnalysisWriterIntervalSecs * 1000); + + let sampledQueryDocs; + assert.soon(() => { + const aggRes = assert.commandWorked(conn.adminCommand( + {aggregate: 1, pipeline: [{$listSampledQueries: {namespace: ns}}], cursor: {}})); + sampledQueryDocs = aggRes.cursor.firstBatch; + if (sampledQueryDocs.length >= expectedNum) { + return true; + } + return false; + }); + assert.eq(sampledQueryDocs.length, expectedNum, sampledQueryDocs); +} + +function testDropCollection(conn, {recreateCollection, isShardedColl, rst, st}) { + assert(rst || st); + assert(!rst || !st); + + const {dbName, collName} = setUpCollection(conn, {isShardedColl, st}); + const ns = dbName + "." + collName; + const db = conn.getDB(dbName); + jsTest.log( + `Testing dropCollection ${tojson({dbName, collName, isShardedColl, recreateCollection})}`); + + enableQuerySampling(conn, dbName, collName, {rst, st}); + + assert(db.getCollection(collName).drop()); + if (recreateCollection) { + assert.commandWorked(db.createCollection(collName)); + } + // Verify that no queries get sampled. + assert.commandWorked(db.runCommand({find: collName, filter: {x: 0}})); + assertNumSampledQueries(conn, ns, 0); + + if (recreateCollection) { + // Re-enable query sampling and verify that queries get sampled. + enableQuerySampling(conn, dbName, collName, {rst, st}); + assert.commandWorked(db.runCommand({find: collName, filter: {x: 0}})); + assertNumSampledQueries(conn, ns, 1); + } +} + +function testDropDatabase(conn, {recreateCollection, isShardedColl, rst, st}) { + assert(rst || st); + assert(!rst || !st); + + const {dbName, collName} = setUpCollection(conn, {isShardedColl, st}); + const ns = dbName + "." + collName; + const db = conn.getDB(dbName); + jsTest.log(`Testing testDropDatabase ${ + tojson({dbName, collName, isShardedColl, recreateCollection})}`); + + enableQuerySampling(conn, dbName, collName, {rst, st}); + + assert.commandWorked(db.dropDatabase()); + if (recreateCollection) { + assert.commandWorked(db.createCollection(collName)); + } + // Verify that no queries get sampled. + assert.commandWorked(db.runCommand({find: collName, filter: {x: 0}})); + assertNumSampledQueries(conn, ns, 0); + + if (recreateCollection) { + // Re-enable query sampling and verify that queries get sampled. + enableQuerySampling(conn, dbName, collName, {rst, st}); + assert.commandWorked(db.runCommand({find: collName, filter: {x: 0}})); + assertNumSampledQueries(conn, ns, 1); + } +} + +function testRenameCollection(conn, {sameDatabase, isShardedColl, rst, st}) { + assert(rst || st); + assert(!rst || !st); + + const {dbName, collName} = setUpCollection(conn, {isShardedColl, st}); + + const srcDbName = dbName; + const srcCollName = collName; + const srcNs = srcDbName + "." + srcCollName; + const srcDb = conn.getDB(srcDbName); + + const dstDbName = sameDatabase ? srcDbName : (srcDbName + "New"); + const dstCollName = sameDatabase ? (srcCollName + "New") : srcCollName; + const dstNs = dstDbName + "." + dstCollName; + const dstDb = conn.getDB(dstDbName); + assert.commandWorked(dstDb.createCollection(dstCollName)); + if (!sameDatabase && st) { + // On a sharded cluster, the src and dst collections must be on same shard. + st.ensurePrimaryShard(dstDbName, st.getPrimaryShardIdForDatabase(srcDbName)); + } + + jsTest.log(`Testing configuration deletion upon renameCollection ${ + tojson({sameDatabase, srcDbName, srcCollName, dstDbName, dstCollName, isShardedColl})}`); + + enableQuerySampling(conn, srcDbName, srcCollName, {rst, st}); + enableQuerySampling(conn, dstDbName, dstCollName, {rst, st}); + + assert.commandWorked(conn.adminCommand({renameCollection: srcNs, to: dstNs, dropTarget: true})); + // Verify that no queries get sampled for the src and dst collections. + assert.commandWorked(srcDb.runCommand({find: srcCollName, filter: {x: 0}})); + assertNumSampledQueries(conn, srcNs, 0); + assert.commandWorked(dstDb.runCommand({find: dstCollName, filter: {x: 0}})); + assertNumSampledQueries(conn, dstNs, 0); + + // Enable query sampling for the new collection and verify that queries get sampled. + enableQuerySampling(conn, dstDbName, dstCollName, {rst, st}); + assert.commandWorked(conn.getDB(dstDbName).runCommand({find: dstCollName, filter: {x: 0}})); + assertNumSampledQueries(conn, dstNs, 1); +} + +const queryAnalysisSamplerConfigurationRefreshSecs = 1; +const queryAnalysisWriterIntervalSecs = 1; + +const mongodSetParameterOpts = { + queryAnalysisSamplerConfigurationRefreshSecs, + queryAnalysisWriterIntervalSecs, + logComponentVerbosity: tojson({sharding: 2}), +}; +const mongosSetParametersOpts = { + queryAnalysisSamplerConfigurationRefreshSecs, + logComponentVerbosity: tojson({sharding: 3}) +}; + +{ + const st = new ShardingTest({ + shards: 2, + rs: {nodes: 1, setParameter: mongodSetParameterOpts}, + mongosOptions: {setParameter: mongosSetParametersOpts} + }); + + for (let isShardedColl of [true, false]) { + for (let recreateCollection of [true, false]) { + testDropCollection(st.s, {st, recreateCollection, isShardedColl}); + testDropDatabase(st.s, {st, recreateCollection, isShardedColl}); + } + testRenameCollection(st.s, {st, sameDatabase: true, isShardedColl}); + } + // The source database is only allowed to be different from the destination database when the + // collection being renamed is unsharded. + testRenameCollection(st.s, {st, sameDatabase: false, isShardedColl: false}); + + st.stop(); +} + +{ + const rst = new ReplSetTest({nodes: 1, nodeOptions: {setParameter: mongodSetParameterOpts}}); + rst.startSet(); + rst.initiate(); + const primary = rst.getPrimary(); + + for (let recreateCollection of [true, false]) { + testDropCollection(primary, {rst, recreateCollection}); + testDropDatabase(primary, {rst, recreateCollection}); + } + for (let sameDatabase of [true, false]) { + testRenameCollection(primary, {rst, sameDatabase}); + } + + rst.stopSet(); +} +})(); diff --git a/jstests/sharding/analyze_shard_key/read_and_write_distribution.js b/jstests/sharding/analyze_shard_key/read_and_write_distribution.js index 4e700bd666b86..55335ac35c133 100644 --- a/jstests/sharding/analyze_shard_key/read_and_write_distribution.js +++ b/jstests/sharding/analyze_shard_key/read_and_write_distribution.js @@ -3,7 +3,7 @@ * distribution metrics, but on replica sets it does not since query sampling is only supported on * sharded clusters at this point. * - * @tags: [requires_fcv_70, featureFlagUpdateOneWithoutShardKey] + * @tags: [requires_fcv_71] */ (function() { "use strict"; @@ -444,6 +444,8 @@ function waitForSampledQueries(conn, ns, shardKey, testCase) { (numShardKeyUpdates >= testCase.metrics.writeDistribution.numShardKeyUpdates); }); + jsTest.log("??? res " + tojson(res)); + return res; } @@ -481,6 +483,8 @@ function runTest(fixture, {isShardedColl, shardKeyField, isHashed}) { docs.push({_id: i, x: i, y: i, ts: new Date()}); } assert.commandWorked(sampledColl.insert(docs)); + const sampledCollUuid = + QuerySamplingUtil.getCollectionUuid(fixture.conn.getDB(dbName), sampledCollName); // Verify that the analyzeShardKey command returns zeros for the read and write sample size // when there are no sampled queries. @@ -489,9 +493,9 @@ function runTest(fixture, {isShardedColl, shardKeyField, isHashed}) { assertMetricsEmptySampleSize(res); // Turn on query sampling and wait for sampling to become active. - assert.commandWorked( - fixture.conn.adminCommand({configureQueryAnalyzer: sampledNs, mode: "full", sampleRate})); - fixture.waitForActiveSamplingFn(); + assert.commandWorked(fixture.conn.adminCommand( + {configureQueryAnalyzer: sampledNs, mode: "full", samplesPerSecond})); + fixture.waitForActiveSamplingFn(sampledNs, sampledCollUuid); // Create and run test queries. const testCase = makeTestCase( @@ -504,7 +508,7 @@ function runTest(fixture, {isShardedColl, shardKeyField, isHashed}) { // getting sampled. assert.commandWorked( fixture.conn.adminCommand({configureQueryAnalyzer: sampledNs, mode: "off"})); - fixture.waitForInactiveSamplingFn(); + fixture.waitForInactiveSamplingFn(sampledNs, sampledCollUuid); res = waitForSampledQueries(fixture.conn, sampledNs, shardKey, testCase); // Verify that the metrics are as expected. @@ -524,7 +528,7 @@ function runTest(fixture, {isShardedColl, shardKeyField, isHashed}) { const queryAnalysisSamplerConfigurationRefreshSecs = 1; const queryAnalysisWriterIntervalSecs = 1; -const sampleRate = 10000; +const samplesPerSecond = 10000; const analyzeShardKeyNumRanges = 10; const mongodSetParameterOpts = { @@ -560,7 +564,7 @@ const mongosSetParametersOpts = { // This test expects every query to get sampled regardless of which mongos or mongod routes it. st.configRS.nodes.forEach(node => { - configureFailPoint(node, "queryAnalysisCoordinatorDistributeSampleRateEqually"); + configureFailPoint(node, "queryAnalysisCoordinatorDistributeSamplesPerSecondEqually"); }); const fixture = { @@ -585,10 +589,8 @@ const mongosSetParametersOpts = { st.s0.adminCommand({moveChunk: ns, find: {x: 1000}, to: st.shard2.shardName})); } }, - waitForActiveSamplingFn: () => { - for (let i = 0; i < numMongoses; i++) { - QuerySamplingUtil.waitForActiveSampling(st["s" + String(i)]); - } + waitForActiveSamplingFn: (ns, collUuid) => { + QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collUuid); }, runCmdsFn: (dbName, cmdObjs) => { for (let i = 0; i < cmdObjs.length; i++) { @@ -596,11 +598,8 @@ const mongosSetParametersOpts = { assert.commandWorked(db.runCommand(cmdObjs[i])); } }, - waitForInactiveSamplingFn: () => { - for (let i = 0; i < numMongoses; i++) { - QuerySamplingUtil.waitForInactiveSampling(st["s" + String(i)]); - } - QuerySamplingUtil.waitForInactiveSamplingOnAllShards(st); + waitForInactiveSamplingFn: (ns, collUuid) => { + QuerySamplingUtil.waitForInactiveSamplingShardedCluster(st, ns, collUuid); } }; @@ -634,7 +633,7 @@ const mongosSetParametersOpts = { // This test expects every query to get sampled regardless of which mongod it runs against. rst.nodes.forEach(node => { - configureFailPoint(node, "queryAnalysisCoordinatorDistributeSampleRateEqually"); + configureFailPoint(node, "queryAnalysisCoordinatorDistributeSamplesPerSecondEqually"); }); const fixture = { @@ -642,10 +641,8 @@ const mongosSetParametersOpts = { setUpCollectionFn: (dbName, collName, isShardedColl) => { // No setup is needed. }, - waitForActiveSamplingFn: () => { - rst.nodes.forEach(node => { - QuerySamplingUtil.waitForActiveSampling(node); - }); + waitForActiveSamplingFn: (ns, collUuid) => { + QuerySamplingUtil.waitForActiveSamplingReplicaSet(rst, ns, collUuid); }, runCmdsFn: (dbName, cmdObjs) => { for (let i = 0; i < cmdObjs.length; i++) { @@ -653,10 +650,8 @@ const mongosSetParametersOpts = { assert.commandWorked(node.getDB(dbName).runCommand(cmdObjs[i])); } }, - waitForInactiveSamplingFn: () => { - rst.nodes.forEach(node => { - QuerySamplingUtil.waitForInactiveSampling(node); - }); + waitForInactiveSamplingFn: (ns, collUuid) => { + QuerySamplingUtil.waitForInactiveSamplingReplicaSet(rst, ns, collUuid); } }; diff --git a/jstests/sharding/analyze_shard_key/refresh_sample_rates.js b/jstests/sharding/analyze_shard_key/refresh_sample_rates.js index 2dab18df68d3b..691129cc5bef2 100644 --- a/jstests/sharding/analyze_shard_key/refresh_sample_rates.js +++ b/jstests/sharding/analyze_shard_key/refresh_sample_rates.js @@ -26,11 +26,11 @@ function testBasic(createConnFn, rst, samplerNames) { const collName0 = "testColl0"; const ns0 = dbName + "." + collName0; - const sampleRate0 = 5; + const samplesPerSecond0 = 5; const collName1 = "testColl1"; const ns1 = dbName + "." + collName1; - const sampleRate1 = 50; + const samplesPerSecond1 = 50; const db = conn.getDB(dbName); assert.commandWorked(db.createCollection(collName0)); @@ -39,16 +39,16 @@ function testBasic(createConnFn, rst, samplerNames) { const collUuid1 = QuerySamplingUtil.getCollectionUuid(db, collName1); jsTest.log("Verifying that refreshing returns the correct configurations"); - assert.commandWorked( - conn.adminCommand({configureQueryAnalyzer: ns0, mode: "full", sampleRate: sampleRate0})); - assert.commandWorked( - conn.adminCommand({configureQueryAnalyzer: ns1, mode: "full", sampleRate: sampleRate1})); + assert.commandWorked(conn.adminCommand( + {configureQueryAnalyzer: ns0, mode: "full", samplesPerSecond: samplesPerSecond0})); + assert.commandWorked(conn.adminCommand( + {configureQueryAnalyzer: ns1, mode: "full", samplesPerSecond: samplesPerSecond1})); const configColl = conn.getCollection("config.queryAnalyzers"); - const startTime0 = configColl.findOne({ns: ns0}).startTime; - const startTime1 = configColl.findOne({ns: ns1}).startTime; + const startTime0 = configColl.findOne({_id: ns0}).startTime; + const startTime1 = configColl.findOne({_id: ns1}).startTime; // Query distribution after: [1, unknown, unknown]. Verify that refreshing returns - // sampleRate / numSamplers. + // samplesPerSecond / numSamplers. let res0 = assert.commandWorked(primary.adminCommand({ _refreshQueryAnalyzerConfiguration: 1, name: samplerNames[0], @@ -59,19 +59,19 @@ function testBasic(createConnFn, rst, samplerNames) { { ns: ns0, collectionUuid: collUuid0, - sampleRate: expectedRatio0 * sampleRate0, + samplesPerSecond: expectedRatio0 * samplesPerSecond0, startTime: startTime0 }, { ns: ns1, collectionUuid: collUuid1, - sampleRate: expectedRatio0 * sampleRate1, + samplesPerSecond: expectedRatio0 * samplesPerSecond1, startTime: startTime1 }, ]); // Query distribution after: [1, 0, unknown]. Verify that refreshing returns - // sampleRate / numSamplers. + // samplesPerSecond / numSamplers. let res1 = assert.commandWorked(primary.adminCommand({ _refreshQueryAnalyzerConfiguration: 1, name: samplerNames[1], @@ -82,13 +82,13 @@ function testBasic(createConnFn, rst, samplerNames) { { ns: ns0, collectionUuid: collUuid0, - sampleRate: expectedRatio1 * sampleRate0, + samplesPerSecond: expectedRatio1 * samplesPerSecond0, startTime: startTime0 }, { ns: ns1, collectionUuid: collUuid1, - sampleRate: expectedRatio1 * sampleRate1, + samplesPerSecond: expectedRatio1 * samplesPerSecond1, startTime: startTime1 }, ]); @@ -105,13 +105,13 @@ function testBasic(createConnFn, rst, samplerNames) { { ns: ns0, collectionUuid: collUuid0, - sampleRate: expectedRatio2 * sampleRate0, + samplesPerSecond: expectedRatio2 * samplesPerSecond0, startTime: startTime0 }, { ns: ns1, collectionUuid: collUuid1, - sampleRate: expectedRatio2 * sampleRate1, + samplesPerSecond: expectedRatio2 * samplesPerSecond1, startTime: startTime1 }, ]); @@ -128,13 +128,13 @@ function testBasic(createConnFn, rst, samplerNames) { { ns: ns0, collectionUuid: collUuid0, - sampleRate: expectedRatio0 * sampleRate0, + samplesPerSecond: expectedRatio0 * samplesPerSecond0, startTime: startTime0 }, { ns: ns1, collectionUuid: collUuid1, - sampleRate: expectedRatio0 * sampleRate1, + samplesPerSecond: expectedRatio0 * samplesPerSecond1, startTime: startTime1 }, ]); @@ -148,8 +148,8 @@ function testBasic(createConnFn, rst, samplerNames) { })); assert.eq(res1.configurations.length, 2); assert.sameMembers(res1.configurations, [ - {ns: ns0, collectionUuid: collUuid0, sampleRate: 0, startTime: startTime0}, - {ns: ns1, collectionUuid: collUuid1, sampleRate: 0, startTime: startTime1}, + {ns: ns0, collectionUuid: collUuid0, samplesPerSecond: 0, startTime: startTime0}, + {ns: ns1, collectionUuid: collUuid1, samplesPerSecond: 0, startTime: startTime1}, ]); assert.commandWorked(conn.adminCommand({configureQueryAnalyzer: ns1, mode: "off"})); @@ -166,7 +166,7 @@ function testBasic(createConnFn, rst, samplerNames) { { ns: ns0, collectionUuid: collUuid0, - sampleRate: expectedRatio1 * sampleRate0, + samplesPerSecond: expectedRatio1 * samplesPerSecond0, startTime: startTime0 }, ]); @@ -190,17 +190,17 @@ function testFailover(createConnFn, rst, samplerNames) { const dbName = "testDbFailover-" + extractUUIDFromObject(UUID()); const collName = "testColl"; const ns = dbName + "." + collName; - const sampleRate = 5; + const samplesPerSecond = 5; let db = conn.getDB(dbName); assert.commandWorked(db.createCollection(collName)); const collUuid = QuerySamplingUtil.getCollectionUuid(db, collName); jsTest.log("Verify that configurations are persisted and available after failover"); - assert.commandWorked( - conn.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: sampleRate})); + assert.commandWorked(conn.adminCommand( + {configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: samplesPerSecond})); const configColl = conn.getCollection("config.queryAnalyzers"); - const startTime = configColl.findOne({ns: ns}).startTime; + const startTime = configColl.findOne({_id: ns}).startTime; assert.commandWorked( primary.adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: true})); @@ -210,16 +210,19 @@ function testFailover(createConnFn, rst, samplerNames) { db = conn.getDB(dbName); // Query distribution after: [1, unknown, unknown]. Verify that refreshing returns - // sampleRate / numSamplers. + // samplesPerSecond / numSamplers. let res = assert.commandWorked(primary.adminCommand({ _refreshQueryAnalyzerConfiguration: 1, name: samplerNames[0], numQueriesExecutedPerSecond: 1 })); const expectedRatio = 1.0 / 3; - assert.sameMembers( - res.configurations, - [{ns: ns, collectionUuid: collUuid, sampleRate: expectedRatio * sampleRate, startTime}]); + assert.sameMembers(res.configurations, [{ + ns: ns, + collectionUuid: collUuid, + samplesPerSecond: expectedRatio * samplesPerSecond, + startTime + }]); assert.commandWorked(conn.adminCommand({configureQueryAnalyzer: ns, mode: "off"})); } @@ -232,17 +235,17 @@ function testRestart(createConnFn, rst, samplerNames) { const dbName = "testDbRestart-" + extractUUIDFromObject(UUID()); const collName = "testColl"; const ns = dbName + "." + collName; - const sampleRate = 5; + const samplesPerSecond = 5; let db = conn.getDB(dbName); assert.commandWorked(db.createCollection(collName)); const collUuid = QuerySamplingUtil.getCollectionUuid(db, collName); jsTest.log("Verify that configurations are persisted and available after restart"); - assert.commandWorked( - conn.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: sampleRate})); + assert.commandWorked(conn.adminCommand( + {configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: samplesPerSecond})); const configColl = conn.getCollection("config.queryAnalyzers"); - const startTime = configColl.findOne({ns: ns}).startTime; + const startTime = configColl.findOne({_id: ns}).startTime; rst.stopSet(null /* signal */, true /*forRestart */); rst.startSet({restart: true}); @@ -251,16 +254,19 @@ function testRestart(createConnFn, rst, samplerNames) { db = conn.getDB(dbName); // Query distribution after: [1, unknown, unknown]. Verify that refreshing returns - // sampleRate / numSamplers. + // samplesPerSecond / numSamplers. let res = assert.commandWorked(primary.adminCommand({ _refreshQueryAnalyzerConfiguration: 1, name: samplerNames[0], numQueriesExecutedPerSecond: 1 })); const expectedRatio = 1.0 / 3; - assert.sameMembers( - res.configurations, - [{ns: ns, collectionUuid: collUuid, sampleRate: expectedRatio * sampleRate, startTime}]); + assert.sameMembers(res.configurations, [{ + ns: ns, + collectionUuid: collUuid, + samplesPerSecond: expectedRatio * samplesPerSecond, + startTime + }]); assert.commandWorked(conn.adminCommand({configureQueryAnalyzer: ns, mode: "off"})); } @@ -293,6 +299,11 @@ function runTest(createConnFn, rst, samplerNames) { st.configRS.isConfigSvr = true; const samplerNames = [st.s0.host, st.s1.host, st.s2.host]; + jsTest.log("Wait for the config server to be aware that there are 3 mongoses in the cluster"); + assert.soon(() => { + return st.s.getCollection("config.mongos").find().itcount() == 3; + }); + jsTest.log("Test that the _refreshQueryAnalyzerConfiguration command is not supported on " + "mongos or shardsvr mongod or configsvr secondary mongod"); const cmdObj = { @@ -301,8 +312,8 @@ function runTest(createConnFn, rst, samplerNames) { numQueriesExecutedPerSecond: 1 }; assert.commandFailedWithCode(st.s.adminCommand(cmdObj), ErrorCodes.CommandNotFound); - if (!TestData.catalogShard) { - // Shard0 is the config server in catalog shard mode. + if (!TestData.configShard) { + // Shard0 is the config server in config shard mode. st.rs0.nodes.forEach(node => { assert.commandFailedWithCode(node.adminCommand(cmdObj), ErrorCodes.IllegalOperation); }); diff --git a/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_sharded.js b/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_sharded.js index 2ac5c33c2ca8e..2d0d7764b1892 100644 --- a/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_sharded.js +++ b/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_sharded.js @@ -4,10 +4,15 @@ * * @tags: [requires_fcv_70] */ -(function() { -"use strict"; +import { + innerAggTestCases, + outerAggTestCases, + queryAnalysisSamplerConfigurationRefreshSecs, + queryAnalysisWriterIntervalSecs, + testCustomInnerPipeline, + testNoCustomInnerPipeline +} from "jstests/sharding/analyze_shard_key/libs/sample_nested_agg_queries_common.js"; -load("jstests/sharding/analyze_shard_key/libs/sample_nested_agg_queries_common.js"); load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); const st = new ShardingTest({ @@ -51,8 +56,10 @@ assert.commandWorked( st.s.adminCommand({moveChunk: foreignNs, find: {x: 1000}, to: st.shard2.name})); assert.commandWorked( - st.s.adminCommand({configureQueryAnalyzer: foreignNs, mode: "full", sampleRate: 1000})); -QuerySamplingUtil.waitForActiveSamplingOnAllShards(st); + st.s.adminCommand({configureQueryAnalyzer: foreignNs, mode: "full", samplesPerSecond: 1000})); +const foreignCollUUid = QuerySamplingUtil.getCollectionUuid(mongosDB, foreignCollName); +QuerySamplingUtil.waitForActiveSamplingShardedCluster( + st, foreignNs, foreignCollUUid, {skipMongoses: true}); for (let {name, makeOuterPipelineFunc, @@ -143,4 +150,3 @@ for (let {name, assert.commandWorked(st.s.adminCommand({configureQueryAnalyzer: foreignNs, mode: "off"})); st.stop(); -})(); diff --git a/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_unsharded.js b/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_unsharded.js index 38c794969085e..a10d19ea32702 100644 --- a/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_unsharded.js +++ b/jstests/sharding/analyze_shard_key/sample_nested_agg_queries_unsharded.js @@ -4,10 +4,15 @@ * * @tags: [requires_fcv_70] */ -(function() { -"use strict"; +import { + innerAggTestCases, + outerAggTestCases, + queryAnalysisSamplerConfigurationRefreshSecs, + queryAnalysisWriterIntervalSecs, + testCustomInnerPipeline, + testNoCustomInnerPipeline +} from "jstests/sharding/analyze_shard_key/libs/sample_nested_agg_queries_common.js"; -load("jstests/sharding/analyze_shard_key/libs/sample_nested_agg_queries_common.js"); load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); const st = new ShardingTest({ @@ -42,8 +47,10 @@ st.ensurePrimaryShard(dbName, st.shard0.name); assert.commandWorked(mongosDB.createCollection(foreignCollName)); assert.commandWorked( - st.s.adminCommand({configureQueryAnalyzer: foreignNs, mode: "full", sampleRate: 1000})); -QuerySamplingUtil.waitForActiveSamplingOnAllShards(st); + st.s.adminCommand({configureQueryAnalyzer: foreignNs, mode: "full", samplesPerSecond: 1000})); +const foreignCollUUid = QuerySamplingUtil.getCollectionUuid(mongosDB, foreignCollName); +QuerySamplingUtil.waitForActiveSamplingShardedCluster( + st, foreignNs, foreignCollUUid, {skipMongoses: true}); // The foreign collection is unsharded so all documents are on the primary shard. const shardNames = [st.rs0.name]; @@ -103,4 +110,3 @@ for (let {name, assert.commandWorked(st.s.adminCommand({configureQueryAnalyzer: foreignNs, mode: "off"})); st.stop(); -})(); diff --git a/jstests/sharding/analyze_shard_key/sample_rates_rs.js b/jstests/sharding/analyze_shard_key/sample_rates_rs.js index b6b709524b019..6ce713b5a9b37 100644 --- a/jstests/sharding/analyze_shard_key/sample_rates_rs.js +++ b/jstests/sharding/analyze_shard_key/sample_rates_rs.js @@ -30,7 +30,7 @@ const rst = new ReplSetTest({ setParameter: { queryAnalysisSamplerConfigurationRefreshSecs, queryAnalysisWriterIntervalSecs, - logComponentVerbosity: tojson({sharding: 2}) + logComponentVerbosity: tojson({sharding: 3}) }, } }); @@ -73,11 +73,11 @@ function getSampleSize() { */ function testQuerySampling(dbName, collNameNotSampled, collNameSampled) { const sampledNs = dbName + "." + collNameSampled; - const sampleRate = 5; + const samplesPerSecond = 5; const durationSecs = 90; assert.commandWorked( - primary.adminCommand({configureQueryAnalyzer: sampledNs, mode: "full", sampleRate})); + primary.adminCommand({configureQueryAnalyzer: sampledNs, mode: "full", samplesPerSecond})); sleep(queryAnalysisSamplerConfigurationRefreshSecs * 1000); // Define a thread for executing find commands via one of the secondaries. @@ -139,7 +139,7 @@ function testQuerySampling(dbName, collNameNotSampled, collNameSampled) { // Verify that the difference between the actual and expected number of samples is within the // expected threshold. - const expectedTotalCount = durationSecs * sampleRate; + const expectedTotalCount = durationSecs * samplesPerSecond; const expectedFindPercentage = AnalyzeShardKeyUtil.calculatePercentage(actualNumFindPerSec, actualTotalQueriesPerSec); const expectedDeletePercentage = diff --git a/jstests/sharding/analyze_shard_key/sample_rates_sharded.js b/jstests/sharding/analyze_shard_key/sample_rates_sharded.js index 2b3df2de49a45..3d5ff629dae65 100644 --- a/jstests/sharding/analyze_shard_key/sample_rates_sharded.js +++ b/jstests/sharding/analyze_shard_key/sample_rates_sharded.js @@ -46,11 +46,16 @@ const st = new ShardingTest({ setParameter: { queryAnalysisSamplerConfigurationRefreshSecs, queryAnalysisWriterIntervalSecs, - logComponentVerbosity: tojson({sharding: 2}) + logComponentVerbosity: tojson({sharding: 3}) } }, configOptions: { - setParameter: {queryAnalysisSamplerInActiveThresholdSecs: 3}, + setParameter: { + queryAnalysisSamplerInActiveThresholdSecs: 3, + queryAnalysisSamplerConfigurationRefreshSecs, + queryAnalysisWriterIntervalSecs, + logComponentVerbosity: tojson({sharding: 3}) + }, } }); @@ -104,11 +109,11 @@ function getSampleSize() { */ function testQuerySampling(dbName, collNameNotSampled, collNameSampled) { const sampledNs = dbName + "." + collNameSampled; - const sampleRate = 5; + const samplesPerSecond = 5; const durationSecs = 90; assert.commandWorked( - st.s.adminCommand({configureQueryAnalyzer: sampledNs, mode: "full", sampleRate})); + st.s.adminCommand({configureQueryAnalyzer: sampledNs, mode: "full", samplesPerSecond})); sleep(queryAnalysisSamplerConfigurationRefreshSecs * 1000); // Define a thread for executing find commands via mongos0. @@ -169,7 +174,7 @@ function testQuerySampling(dbName, collNameNotSampled, collNameSampled) { // Verify that the difference between the actual and expected number of samples is within the // expected threshold. - const expectedTotalCount = durationSecs * sampleRate; + const expectedTotalCount = durationSecs * samplesPerSecond; const expectedFindPercentage = AnalyzeShardKeyUtil.calculatePercentage(actualNumFindPerSec, actualTotalQueriesPerSec); const expectedDeletePercentage = diff --git a/jstests/sharding/analyze_shard_key/sample_read_queries_sharded.js b/jstests/sharding/analyze_shard_key/sample_read_queries_sharded.js index 1cbd72406a9e4..fb6e0157f3d6b 100644 --- a/jstests/sharding/analyze_shard_key/sample_read_queries_sharded.js +++ b/jstests/sharding/analyze_shard_key/sample_read_queries_sharded.js @@ -10,14 +10,19 @@ load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); // Make the periodic jobs for refreshing sample rates and writing sampled queries and diffs have a // period of 1 second to speed up the test. +const queryAnalysisWriterIntervalSecs = 1; +const queryAnalysisSamplerConfigurationRefreshSecs = 1; const st = new ShardingTest({ shards: 3, rs: { nodes: 2, - setParameter: - {queryAnalysisWriterIntervalSecs: 1, logComponentVerbosity: tojson({sharding: 2})} + setParameter: { + queryAnalysisSamplerConfigurationRefreshSecs, + queryAnalysisWriterIntervalSecs, + logComponentVerbosity: tojson({sharding: 2}) + } }, - mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs: 1}} + mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs}} }); const dbName = "testDb"; @@ -40,8 +45,8 @@ assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: 1000}, to: st.s const collectionUuid = QuerySamplingUtil.getCollectionUuid(mongosDB, collName); assert.commandWorked( - st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 1000})); -QuerySamplingUtil.waitForActiveSampling(st.s); + st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 1000})); +QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collectionUuid); const expectedSampledQueryDocs = []; diff --git a/jstests/sharding/analyze_shard_key/sample_read_queries_unsharded.js b/jstests/sharding/analyze_shard_key/sample_read_queries_unsharded.js index e21fcb96f04bd..0494cd1305a30 100644 --- a/jstests/sharding/analyze_shard_key/sample_read_queries_unsharded.js +++ b/jstests/sharding/analyze_shard_key/sample_read_queries_unsharded.js @@ -11,14 +11,19 @@ load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); // Make the periodic jobs for refreshing sample rates and writing sampled queries and diffs have a // period of 1 second to speed up the test. +const queryAnalysisWriterIntervalSecs = 1; +const queryAnalysisSamplerConfigurationRefreshSecs = 1; const st = new ShardingTest({ shards: 2, rs: { nodes: 2, - setParameter: - {queryAnalysisWriterIntervalSecs: 1, logComponentVerbosity: tojson({sharding: 2})} + setParameter: { + queryAnalysisSamplerConfigurationRefreshSecs, + queryAnalysisWriterIntervalSecs, + logComponentVerbosity: tojson({sharding: 2}) + } }, - mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs: 1}} + mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs}} }); const dbName = "testDb"; @@ -33,8 +38,8 @@ assert.commandWorked(mongosDB.createCollection(collName)); const collectionUuid = QuerySamplingUtil.getCollectionUuid(mongosDB, collName); assert.commandWorked( - st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 1000})); -QuerySamplingUtil.waitForActiveSampling(st.s); + st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 1000})); +QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collectionUuid); const expectedSampledQueryDocs = []; // This is an unsharded collection so all documents are on the primary shard. diff --git a/jstests/sharding/analyze_shard_key/sample_write_queries_sharded.js b/jstests/sharding/analyze_shard_key/sample_write_queries_sharded.js index e3f10c1190a53..9e653c5bb3f6b 100644 --- a/jstests/sharding/analyze_shard_key/sample_write_queries_sharded.js +++ b/jstests/sharding/analyze_shard_key/sample_write_queries_sharded.js @@ -10,14 +10,19 @@ load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); // Make the periodic jobs for refreshing sample rates and writing sampled queries and diffs have a // period of 1 second to speed up the test. +const queryAnalysisWriterIntervalSecs = 1; +const queryAnalysisSamplerConfigurationRefreshSecs = 1; const st = new ShardingTest({ shards: 3, rs: { nodes: 2, - setParameter: - {queryAnalysisWriterIntervalSecs: 1, logComponentVerbosity: tojson({sharding: 2})} + setParameter: { + queryAnalysisSamplerConfigurationRefreshSecs, + queryAnalysisWriterIntervalSecs, + logComponentVerbosity: tojson({sharding: 2}) + } }, - mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs: 1}} + mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs}} }); const dbName = "testDb"; @@ -42,8 +47,8 @@ assert.commandWorked(st.s.adminCommand({moveChunk: ns, find: {x: 1000}, to: st.s const collectionUuid = QuerySamplingUtil.getCollectionUuid(mongosDB, collName); assert.commandWorked( - st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 1000})); -QuerySamplingUtil.waitForActiveSampling(st.s); + st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 1000})); +QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collectionUuid); const expectedSampledQueryDocs = []; diff --git a/jstests/sharding/analyze_shard_key/sample_write_queries_unsharded.js b/jstests/sharding/analyze_shard_key/sample_write_queries_unsharded.js index 9f28c130f305a..68c4cdc367396 100644 --- a/jstests/sharding/analyze_shard_key/sample_write_queries_unsharded.js +++ b/jstests/sharding/analyze_shard_key/sample_write_queries_unsharded.js @@ -11,14 +11,19 @@ load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); // Make the periodic jobs for refreshing sample rates and writing sampled queries and diffs have a // period of 1 second to speed up the test. +const queryAnalysisWriterIntervalSecs = 1; +const queryAnalysisSamplerConfigurationRefreshSecs = 1; const st = new ShardingTest({ shards: 2, rs: { nodes: 2, - setParameter: - {queryAnalysisWriterIntervalSecs: 1, logComponentVerbosity: tojson({sharding: 2})} + setParameter: { + queryAnalysisSamplerConfigurationRefreshSecs, + queryAnalysisWriterIntervalSecs, + logComponentVerbosity: tojson({sharding: 2}) + } }, - mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs: 1}} + mongosOptions: {setParameter: {queryAnalysisSamplerConfigurationRefreshSecs}} }); const dbName = "testDb"; @@ -33,8 +38,8 @@ assert.commandWorked(mongosDB.createCollection(collName)); const collectionUuid = QuerySamplingUtil.getCollectionUuid(mongosDB, collName); assert.commandWorked( - st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 1000})); -QuerySamplingUtil.waitForActiveSampling(st.s); + st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 1000})); +QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collectionUuid); const expectedSampledQueryDocs = []; // This is an unsharded collection so all documents are on the primary shard. diff --git a/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_rs.js b/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_rs.js index ae28250cbd4a4..6cae2dbff4706 100644 --- a/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_rs.js +++ b/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_rs.js @@ -32,7 +32,7 @@ const collName = "testColl"; const ns = dbName + "." + collName; const numDocs = 10; -const sampleRate = 1000; +const samplesPerSecond = 1000; const db = primary.getDB(dbName); const coll = db.getCollection(collName); @@ -42,6 +42,7 @@ for (let i = 0; i < numDocs; i++) { bulk.insert({x: i, y: i}); } assert.commandWorked(bulk.execute()); +const collUuid = QuerySamplingUtil.getCollectionUuid(db, collName); function runCommandAndAssertCurrentOpAndServerStatus(opKind, cmdObj, oldState) { assert.commandWorked(primary.getDB(dbName).runCommand(cmdObj)); @@ -60,9 +61,9 @@ assert.eq( bsonWoCompare(currentState, makeInitialCurrentOpAndServerStatusMongod(0)), 0, {currentState}); // Start query sampling. -assert.commandWorked( - primary.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: sampleRate})); -QuerySamplingUtil.waitForActiveSampling(primary); +assert.commandWorked(primary.adminCommand( + {configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: samplesPerSecond})); +QuerySamplingUtil.waitForActiveSamplingReplicaSet(rst, ns, collUuid); // Execute different kinds of queries and check counters. const cmdObj0 = { @@ -98,7 +99,7 @@ const state4 = runCommandAndAssertCurrentOpAndServerStatus(opKindWrite, cmdObj4, // Stop query sampling. assert.commandWorked(primary.adminCommand({configureQueryAnalyzer: ns, mode: "off"})); -QuerySamplingUtil.waitForInactiveSampling(primary); +QuerySamplingUtil.waitForInactiveSamplingReplicaSet(rst, ns, collUuid); const expectedFinalState = Object.assign({}, state4, true /* deep */); expectedFinalState.currentOp = []; diff --git a/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_sharded.js b/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_sharded.js index d1c0553c96daa..32a4389001a1d 100644 --- a/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_sharded.js +++ b/jstests/sharding/analyze_shard_key/sampling_current_op_and_server_status_sharded.js @@ -42,7 +42,7 @@ const collName = "testColl"; const ns = dbName + "." + collName; const numDocs = 10; -const sampleRate = 1000; +const samplesPerSecond = 1000; const db = st.s0.getDB(dbName); const coll = db.getCollection(collName); @@ -52,6 +52,7 @@ for (let i = 0; i < numDocs; i++) { bulk.insert({x: i, y: i}); } assert.commandWorked(bulk.execute()); +const collUuid = QuerySamplingUtil.getCollectionUuid(db, collName); function makeInitialCurrentOpAndServerStatus(numColls) { return { @@ -78,8 +79,11 @@ function runCommandAndAssertCurrentOpAndServerStatus(opKind, cmdObj, oldState) { newState = getCurrentOpAndServerStatus(); return assertCurrentOpAndServerStatusMongos( ns, opKind, oldState.mongos0, newState.mongos0) && - assertCurrentOpAndServerStatusMongos( - ns, opKindNoop, oldState.mongos1, newState.mongos1, {expectedSampleRate: 0}) && + assertCurrentOpAndServerStatusMongos(ns, + opKindNoop, + oldState.mongos1, + newState.mongos1, + {expectedSamplesPerSecond: 0}) && assertCurrentOpAndServerStatusMongod( ns, opKind, oldState.mongod, newState.mongod, true /* isShardSvr */); }); @@ -90,14 +94,9 @@ let currentState = getCurrentOpAndServerStatus(); assert.eq(bsonWoCompare(currentState, makeInitialCurrentOpAndServerStatus(0)), 0, {currentState}); // Start query sampling. -assert.commandWorked( - st.s0.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: sampleRate})); -QuerySamplingUtil.waitForActiveSampling(st.s0); -QuerySamplingUtil.waitForActiveSampling(st.s1); -QuerySamplingUtil.waitForActiveSampling(st.rs0.getPrimary()); -// Wait for at least one refresh interval to make the inactive mongos find out that its sample rate -// is 0. -sleep(2 * queryAnalysisSamplerConfigurationRefreshSecs); +assert.commandWorked(st.s0.adminCommand( + {configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: samplesPerSecond})); +QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collUuid); // Execute different kinds of queries and check counters. const cmdObj0 = { @@ -133,9 +132,7 @@ const state4 = runCommandAndAssertCurrentOpAndServerStatus(opKindWrite, cmdObj4, // Stop query sampling. assert.commandWorked(st.s0.adminCommand({configureQueryAnalyzer: ns, mode: "off"})); -QuerySamplingUtil.waitForInactiveSampling(st.s0); -QuerySamplingUtil.waitForInactiveSampling(st.s1); -QuerySamplingUtil.waitForInactiveSampling(st.rs0.getPrimary()); +QuerySamplingUtil.waitForInactiveSamplingShardedCluster(st, ns, collUuid); const expectedFinalState = Object.assign({}, state4, true /* deep */); expectedFinalState.mongos0.currentOp = []; diff --git a/jstests/sharding/analyze_shard_key/shard_key_updates.js b/jstests/sharding/analyze_shard_key/shard_key_updates.js index d5992154c988c..36f0efdfcf5b5 100644 --- a/jstests/sharding/analyze_shard_key/shard_key_updates.js +++ b/jstests/sharding/analyze_shard_key/shard_key_updates.js @@ -3,12 +3,9 @@ * * @tags: [requires_fcv_70] */ -(function() { -"use strict"; - load("jstests/sharding/analyze_shard_key/libs/analyze_shard_key_util.js"); load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load("jstests/libs/uuid_util.js"); // for 'extractUUIDFromObject' // This command involves running commands outside a session. @@ -22,7 +19,7 @@ const assertApprox = AnalyzeShardKeyUtil.assertApprox; const queryAnalysisSamplerConfigurationRefreshSecs = 1; const queryAnalysisWriterIntervalSecs = 1; -const sampleRate = 10000; +const samplesPerSecond = 10000; const analyzeShardKeyNumRanges = 10; const st = new ShardingTest({ @@ -112,8 +109,9 @@ function runTest({isShardedColl, execCtxType}) { assert.commandWorked(mongosDB.getCollection(collName).insert(docs)); const collectionUuid = QuerySamplingUtil.getCollectionUuid(mongosDB, collName); - assert.commandWorked(st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate})); - QuerySamplingUtil.waitForActiveSampling(st.s); + assert.commandWorked( + st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond})); + QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collectionUuid); // Test with a mix of modifier, replacement and pipeline updates and findAndModify updates. let numUpdates = 0; @@ -222,8 +220,7 @@ function runTest({isShardedColl, execCtxType}) { // preventing the internal aggregate commands run by the analyzeShardKey commands below from // getting sampled. assert.commandWorked(st.s.adminCommand({configureQueryAnalyzer: ns, mode: "off"})); - QuerySamplingUtil.waitForInactiveSampling(st.s); - QuerySamplingUtil.waitForInactiveSamplingOnAllShards(st); + QuerySamplingUtil.waitForInactiveSamplingShardedCluster(st, ns, collectionUuid); let numTotal = numUpdates + numFindAndModifys; assert.soon(() => { @@ -239,8 +236,9 @@ function runTest({isShardedColl, execCtxType}) { assert.eq(res0.writeDistribution.sampleSize.total, numTotal, res0); assert.eq(res0.writeDistribution.percentageOfShardKeyUpdates, 100, res0); - assert.commandWorked(st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate})); - QuerySamplingUtil.waitForActiveSampling(st.s); + assert.commandWorked( + st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond})); + QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collectionUuid); // Below are not shard key updates. @@ -293,8 +291,7 @@ function runTest({isShardedColl, execCtxType}) { // preventing the internal aggregate commands run by the analyzeShardKey commands below from // getting sampled. assert.commandWorked(st.s.adminCommand({configureQueryAnalyzer: ns, mode: "off"})); - QuerySamplingUtil.waitForInactiveSampling(st.s); - QuerySamplingUtil.waitForInactiveSamplingOnAllShards(st); + QuerySamplingUtil.waitForInactiveSamplingShardedCluster(st, ns, collectionUuid); numTotal = numUpdates + numFindAndModifys; assert.soon(() => { @@ -327,5 +324,4 @@ runTest({isShardedColl: true, execCtxType: execCtxTypes.kClientSessionTransactio // basic one. runTest({isShardedColl: false, execCtxType: execCtxTypes.kNoClientSession}); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/analyze_shard_key/timeseries.js b/jstests/sharding/analyze_shard_key/timeseries.js new file mode 100644 index 0000000000000..2760fc273c471 --- /dev/null +++ b/jstests/sharding/analyze_shard_key/timeseries.js @@ -0,0 +1,57 @@ +/** + * Tests that timeseries collections do not support the analyzeShardKey and configureQueryAnalyzer + * commands since a timeseries collection is a view (of a bucket collection) and the analyzeShardKey + * and configureQueryAnalyzer commands cannot be run against a view. + */ +(function() { +"use strict"; + +const numNodesPerRS = 2; + +function runTest(conn, {isShardedColl, st}) { + const dbName = "testDb"; + const collName = "testColl"; + const numDocs = 10; + const ns = dbName + "." + collName; + const db = conn.getDB(dbName); + const coll = db.getCollection(collName); + + assert.commandWorked(db.createCollection(collName, {timeseries: {timeField: "ts"}})); + + if (isShardedColl) { + assert(st); + assert.commandWorked(st.s.adminCommand({enableSharding: dbName})); + st.ensurePrimaryShard(dbName, st.shard0.name); + assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {ts: 1}})); + } + + const docs = []; + for (let i = 0; i < numDocs; i++) { + docs.push({ts: new Date()}); + } + assert.commandWorked(coll.insert(docs)); + + assert.commandFailedWithCode(conn.adminCommand({analyzeShardKey: ns, key: {ts: 1}}), + ErrorCodes.IllegalOperation); + assert.commandFailedWithCode( + conn.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 1}), + ErrorCodes.IllegalOperation); + + assert(coll.drop()); +} + +{ + const st = new ShardingTest({shards: 2, rs: {nodes: numNodesPerRS}}); + runTest(st.s, {isShardedColl: false, st}); + runTest(st.s, {isShardedColl: true, st}); + st.stop(); +} + +{ + const rst = new ReplSetTest({nodes: numNodesPerRS}); + rst.startSet(); + rst.initiate(); + runTest(rst.getPrimary(), {isShardedColl: false}); + rst.stopSet(); +} +})(); diff --git a/jstests/sharding/analyze_shard_key/ttl_delete_samples.js b/jstests/sharding/analyze_shard_key/ttl_delete_samples.js index dd153ca0ff826..087812bdfc5f5 100644 --- a/jstests/sharding/analyze_shard_key/ttl_delete_samples.js +++ b/jstests/sharding/analyze_shard_key/ttl_delete_samples.js @@ -9,19 +9,30 @@ load("jstests/sharding/analyze_shard_key/libs/query_sampling_util.js"); +const queryAnalysisSamplerConfigurationRefreshSecs = 1; +const queryAnalysisWriterIntervalSecs = 1; +// To speed up the test, make the sampled query documents expire right away. To prevent the +// documents from being deleted before the count is verified, make the TTL monitor have a large +// sleep interval at first and then lower it at the end of the test when verifying that the +// documents do get deleted by the TTL monitor. +const queryAnalysisSampleExpirationSecs = 1; +const ttlMonitorSleepSecs = 3600; + const st = new ShardingTest({ shards: 1, rs: { nodes: 2, setParameter: { - queryAnalysisWriterIntervalSecs: 1, - queryAnalysisSampleExpirationSecs: 2, + queryAnalysisSamplerConfigurationRefreshSecs, + queryAnalysisWriterIntervalSecs, + queryAnalysisSampleExpirationSecs, + ttlMonitorSleepSecs, logComponentVerbosity: tojson({sharding: 2}) } }, mongosOptions: { setParameter: { - queryAnalysisSamplerConfigurationRefreshSecs: 1, + queryAnalysisSamplerConfigurationRefreshSecs, } }, }); @@ -41,12 +52,12 @@ for (let i = 0; i < kNumDocs; i++) { bulk.insert({x: i, y: i}); } assert.commandWorked(bulk.execute()); +const collUuid = QuerySamplingUtil.getCollectionUuid(testDB, collName); // Enable query sampling assert.commandWorked( - st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", sampleRate: 1000})); - -QuerySamplingUtil.waitForActiveSampling(st.s); + st.s.adminCommand({configureQueryAnalyzer: ns, mode: "full", samplesPerSecond: 1000})); +QuerySamplingUtil.waitForActiveSamplingShardedCluster(st, ns, collUuid); // Find each document for (let i = 0; i < kNumDocs; i++) { @@ -71,9 +82,10 @@ assert.soon(() => { }); printjson({"numQueryDocs": numQueryDocs, "numDiffDocs": numDiffDocs}); +// Lower the TTL monitor sleep interval. assert.commandWorked(shard0Primary.adminCommand({setParameter: 1, ttlMonitorSleepSecs: 1})); -// Assert that query sample documents have been deleted +// Assert that query sample documents are eventually deleted. assert.soon(() => { return (QuerySamplingUtil.getNumSampledQueryDocuments(st) == 0 && QuerySamplingUtil.getNumSampledQueryDiffDocuments(st) == 0); diff --git a/jstests/sharding/api_params_nontransaction_sharded.js b/jstests/sharding/api_params_nontransaction_sharded.js index a4f878322acda..696f3f6ab20e1 100644 --- a/jstests/sharding/api_params_nontransaction_sharded.js +++ b/jstests/sharding/api_params_nontransaction_sharded.js @@ -5,10 +5,6 @@ * multiversion_incompatible, * ] */ +import {MongosAPIParametersUtil} from "jstests/sharding/libs/mongos_api_params_util.js"; -(function() { -'use strict'; - -load('jstests/sharding/libs/mongos_api_params_util.js'); MongosAPIParametersUtil.runTests({inTransaction: false, shardedCollection: true}); -})(); diff --git a/jstests/sharding/api_params_nontransaction_unsharded.js b/jstests/sharding/api_params_nontransaction_unsharded.js index 3afe1275d962e..94257dbbc8060 100644 --- a/jstests/sharding/api_params_nontransaction_unsharded.js +++ b/jstests/sharding/api_params_nontransaction_unsharded.js @@ -5,10 +5,6 @@ * multiversion_incompatible, * ] */ +import {MongosAPIParametersUtil} from "jstests/sharding/libs/mongos_api_params_util.js"; -(function() { -'use strict'; - -load('jstests/sharding/libs/mongos_api_params_util.js'); MongosAPIParametersUtil.runTests({inTransaction: false, shardedCollection: false}); -})(); diff --git a/jstests/sharding/api_params_transaction_sharded.js b/jstests/sharding/api_params_transaction_sharded.js index 558192f58a70f..1d0850ca96b4c 100644 --- a/jstests/sharding/api_params_transaction_sharded.js +++ b/jstests/sharding/api_params_transaction_sharded.js @@ -5,10 +5,6 @@ * multiversion_incompatible, * ] */ +import {MongosAPIParametersUtil} from "jstests/sharding/libs/mongos_api_params_util.js"; -(function() { -'use strict'; - -load('jstests/sharding/libs/mongos_api_params_util.js'); MongosAPIParametersUtil.runTests({inTransaction: true, shardedCollection: true}); -})(); diff --git a/jstests/sharding/api_params_transaction_unsharded.js b/jstests/sharding/api_params_transaction_unsharded.js index 4a49190a3133c..0a130ff0fd0b9 100644 --- a/jstests/sharding/api_params_transaction_unsharded.js +++ b/jstests/sharding/api_params_transaction_unsharded.js @@ -5,10 +5,6 @@ * multiversion_incompatible, * ] */ +import {MongosAPIParametersUtil} from "jstests/sharding/libs/mongos_api_params_util.js"; -(function() { -'use strict'; - -load('jstests/sharding/libs/mongos_api_params_util.js'); MongosAPIParametersUtil.runTests({inTransaction: true, shardedCollection: false}); -})(); diff --git a/jstests/sharding/api_version_stage_allowance_checks.js b/jstests/sharding/api_version_stage_allowance_checks.js index ac4e49181d701..92a28bb43b240 100644 --- a/jstests/sharding/api_version_stage_allowance_checks.js +++ b/jstests/sharding/api_version_stage_allowance_checks.js @@ -9,11 +9,6 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. - const st = new ShardingTest({shards: 2}); const mongos = st.s0; const dbName = jsTestName(); @@ -33,12 +28,6 @@ assert.commandWorked(result); // Tests that sharded time-series collection can be queried (invoking $_internalUnpackBucket stage) // from an external client with 'apiStrict'. (function testInternalUnpackBucketAllowance() { - if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog( - "Skipping test because the sharded time-series collection feature flag is disabled"); - return; - } - const collName = 'timeseriesColl'; const timeField = 'tm'; const coll = db[collName]; @@ -92,5 +81,4 @@ assert.commandWorked(result); .includes("$_internalUnpackBucket"))); })(); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/append_oplog_note_mongos.js b/jstests/sharding/append_oplog_note_mongos.js index 1369ecb06bea0..363eb00df5618 100644 --- a/jstests/sharding/append_oplog_note_mongos.js +++ b/jstests/sharding/append_oplog_note_mongos.js @@ -1,6 +1,10 @@ /** * Tests that the 'appendOplogNote' command on mongos correctly performs a no-op write on each * shard and advances the $clusterTime. + * + * Expects a particular oplog entry to be the latest in a shard's oplog, but if the shard is the + * config server, background writes, like to config.mongos, can break its assumption. + * @tags: [config_shard_incompatible] */ (function() { diff --git a/jstests/sharding/arbiters_do_not_use_cluster_time.js b/jstests/sharding/arbiters_do_not_use_cluster_time.js index 2874649f3f608..0fa2bda027cbb 100644 --- a/jstests/sharding/arbiters_do_not_use_cluster_time.js +++ b/jstests/sharding/arbiters_do_not_use_cluster_time.js @@ -2,7 +2,7 @@ * Tests that arbiters do not gossip clusterTime or operationTime. * * A config server can't have arbiter nodes. - * @tags: [catalog_shard_incompatible] + * @tags: [config_shard_incompatible] */ (function() { diff --git a/jstests/sharding/auth.js b/jstests/sharding/auth.js index 84a6a8ff5bc59..b36d14452ea58 100644 --- a/jstests/sharding/auth.js +++ b/jstests/sharding/auth.js @@ -6,11 +6,8 @@ * of 5MB across all sharding tests in wiredTiger. * @tags: [resource_intensive] */ -(function() { -'use strict'; load("jstests/replsets/rslib.js"); load("jstests/sharding/libs/find_chunks_util.js"); -load("jstests/libs/feature_flag_util.js"); // Replica set nodes started with --shardsvr do not enable key generation until they are added // to a sharded cluster and reject commands with gossiped clusterTime from users without the @@ -52,13 +49,13 @@ function getShardName(rsTest) { var s = new ShardingTest({ name: "auth", mongos: 1, - shards: TestData.catalogShard ? 1 : 0, + shards: TestData.configShard ? 1 : 0, other: {keyFile: "jstests/libs/key1", chunkSize: 1}, }); if (s.getDB('admin').runCommand('buildInfo').bits < 64) { print('Skipping test on 32-bit platforms'); - return; + quit(); } print("Configuration: Add user " + tojson(adminUser)); @@ -325,5 +322,4 @@ assert.commandFailed(readOnlyDB.killOp(2000000000)); s.stop(); d1.stopSet(); -d2.stopSet(); -})(); +d2.stopSet(); \ No newline at end of file diff --git a/jstests/sharding/authCommands.js b/jstests/sharding/authCommands.js index 145686a796fc9..8d361300562df 100644 --- a/jstests/sharding/authCommands.js +++ b/jstests/sharding/authCommands.js @@ -56,8 +56,8 @@ var authenticatedConn = new Mongo(mongos.host); authenticatedConn.getDB('admin').auth(rwUser, password); // Add user to shards to prevent localhost connections from having automatic full access -if (!TestData.catalogShard) { - // In catalog shard mode, the first shard is the config server, so the user we made via mongos +if (!TestData.configShard) { + // In config shard mode, the first shard is the config server, so the user we made via mongos // already used up this shard's localhost bypass. st.rs0.getPrimary().getDB('admin').createUser( {user: 'user', pwd: 'password', roles: jsTest.basicUserRoles}, {w: 3, wtimeout: 30000}); diff --git a/jstests/sharding/auth_catalog_shard_localhost_exception.js b/jstests/sharding/auth_catalog_shard_localhost_exception.js index 7e85e0c490377..b29a6f91aff4b 100644 --- a/jstests/sharding/auth_catalog_shard_localhost_exception.js +++ b/jstests/sharding/auth_catalog_shard_localhost_exception.js @@ -1,21 +1,21 @@ /** * Using the localhost exception, a user can create a cluster wide user on the config server (via - * the mongos) and a shard specific user on a shard server. On a catalog shard, since the config + * the mongos) and a shard specific user on a shard server. On a config shard, since the config * server is also a shard server, we want to make sure that we can't use the localhost exception to * create two users. * - * @tags: [requires_fcv_70, featureFlagCatalogShard, featureFlagTransitionToCatalogShard] + * @tags: [requires_fcv_70, featureFlagTransitionToCatalogShard] */ (function() { "use strict"; -// Test that we can't create a shard specific user on the catalog shard if we already created a +// Test that we can't create a shard specific user on the config shard if we already created a // cluster wide user using the localhost exception. var st = new ShardingTest({ mongos: 1, shards: 1, config: 1, - catalogShard: true, + configShard: true, keyFile: 'jstests/libs/key1', useHostname: false // This is required to use the localhost auth exception }); @@ -28,12 +28,12 @@ assert(adminDB.auth('admin', 'admin')); st.stop(); // Test that we can't create another cluster wide user if we already created a shard specific user -// on a catalog shard using the localhost exception. +// on a config shard using the localhost exception. var st = new ShardingTest({ mongos: 1, shards: 1, config: 1, - catalogShard: true, + configShard: true, keyFile: 'jstests/libs/key1', useHostname: false // This is required to use the localhost auth exception }); @@ -44,7 +44,7 @@ adminDB = st.s0.getDB('admin'); assert.commandFailedWithCode(adminDB.runCommand({createUser: "joe", pwd: "joe", roles: ["root"]}), ErrorCodes.Unauthorized); -// Test that the shard specific user created on the catalog shard is also a cluster wide user by +// Test that the shard specific user created on the config shard is also a cluster wide user by // using it to auth into the mongos assert(adminDB.auth('admin', 'admin')); diff --git a/jstests/sharding/auth_repl.js b/jstests/sharding/auth_repl.js index b806090fc3adb..42342a3b9c952 100644 --- a/jstests/sharding/auth_repl.js +++ b/jstests/sharding/auth_repl.js @@ -39,7 +39,7 @@ jsTest.log('Sending an authorized query that should be ok'); assert.commandWorked(testColl.insert({x: 1}, {writeConcern: {w: nodeCount}})); conn.setSecondaryOk(); -doc = testColl.findOne(); +let doc = testColl.findOne(); assert(doc != null); doc = testColl.find().readPref('secondary').next(); diff --git a/jstests/sharding/auth_secondaryok_routing.js b/jstests/sharding/auth_secondaryok_routing.js index cdf3a5ce6430e..8c1c136df3b27 100644 --- a/jstests/sharding/auth_secondaryok_routing.js +++ b/jstests/sharding/auth_secondaryok_routing.js @@ -55,8 +55,8 @@ var nodeCount = replTest.nodes.length; var adminDB = mongos.getDB('admin'); adminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles}); adminDB.auth('user', 'password'); -if (!TestData.catalogShard) { - // In catalog shard mode, creating this user above also created it on the first shard. +if (!TestData.configShard) { + // In config shard mode, creating this user above also created it on the first shard. var priAdminDB = replTest.getPrimary().getDB('admin'); replTest.getPrimary().waitForClusterTime(60); priAdminDB.createUser({user: 'user', pwd: 'password', roles: jsTest.adminUserRoles}, diff --git a/jstests/sharding/auth_sharding_cmd_metadata.js b/jstests/sharding/auth_sharding_cmd_metadata.js index 2219815bf5dfd..4186003b9469d 100644 --- a/jstests/sharding/auth_sharding_cmd_metadata.js +++ b/jstests/sharding/auth_sharding_cmd_metadata.js @@ -29,13 +29,13 @@ const shardAdminDB = st.rs0.getPrimary().getDB('admin'); const shardTestDB = st.rs0.getPrimary().getDB('test'); // ConfigOpTime can't be advanced from external clients -if (TestData.catalogShard) { - // We've already used up the localhost bypass in catalog shard mode, so we have to log in to +if (TestData.configShard) { + // We've already used up the localhost bypass in config shard mode, so we have to log in to // create the user below. shardAdminDB.auth('foo', 'bar'); } shardAdminDB.createUser({user: 'user', pwd: 'pwd', roles: jsTest.adminUserRoles}); -if (TestData.catalogShard) { +if (TestData.configShard) { shardAdminDB.logout(); } shardAdminDB.auth('user', 'pwd'); diff --git a/jstests/sharding/awaitable_hello_primary_failures.js b/jstests/sharding/awaitable_hello_primary_failures.js index fdc43ad07fb67..f97895a0d87db 100644 --- a/jstests/sharding/awaitable_hello_primary_failures.js +++ b/jstests/sharding/awaitable_hello_primary_failures.js @@ -2,7 +2,9 @@ * Test to assert that the RSM behaves correctly when contacting the primary node fails in various * ways. * - * @tags: [temporary_catalog_shard_incompatible] + * Restarts the config server in config shard suites, which requires persistence so restarted nodes + * can rejoin their original replica set and run shutdown hooks. + * @tags: [requires_persistence] */ // Checking UUID consistency and orphans involves talking to a shard node, which in this test is @@ -25,9 +27,9 @@ let rsPrimary = st.rs0.getPrimary(); // Make sure mongos knows who the primary is awaitRSClientHosts(mongos, {host: rsPrimary.name}, {ok: true, ismaster: true}); -// Turn on the waitInHello failpoint. This will cause the primary node to cease sending isMaster +// Turn on the waitInHello failpoint. This will cause the primary node to cease sending "hello" // responses and the RSM should mark the node as down -jsTestLog("Turning on waitInHello failpoint. Node should stop sending isMaster responses."); +jsTestLog("Turning on waitInHello failpoint. Node should stop sending hello responses."); const helloFailpoint = configureFailPoint(rsPrimary, "waitInHello"); awaitRSClientHosts(mongos, {host: rsPrimary.name}, {ok: false, ismaster: false}); helloFailpoint.off(); @@ -35,25 +37,26 @@ helloFailpoint.off(); // Wait for mongos to find out the node is still primary awaitRSClientHosts(mongos, {host: rsPrimary.name}, {ok: true, ismaster: true}); -// Force the primary node to fail all isMaster requests. The RSM should mark the node as down. -jsTestLog("Turning on failCommand failpoint. Node should fail all isMaster responses."); -const failCmdFailpoint = configureFailPoint( - rsPrimary, - "failCommand", - {errorCode: ErrorCodes.CommandFailed, failCommands: ["isMaster"], failInternalCommands: true}); +// Force the primary node to fail all "hello" requests. The RSM should mark the node as down. +jsTestLog("Turning on failCommand failpoint. Node should fail all hello/isMaster responses."); +const failCmdFailpoint = configureFailPoint(rsPrimary, "failCommand", { + errorCode: ErrorCodes.CommandFailed, + failCommands: ["hello", "isMaster"], + failInternalCommands: true +}); awaitRSClientHosts(mongos, {host: rsPrimary.name}, {ok: false, ismaster: false}); failCmdFailpoint.off(); // Wait for mongos to find out the node is still primary awaitRSClientHosts(mongos, {host: rsPrimary.name}, {ok: true, ismaster: true}); -// Force the primary node to end the isMaster stream by not setting the 'moreToCome' bit on the +// Force the primary node to end the "hello" stream by not setting the 'moreToCome' bit on the // resposne. The RSM should not mark the server as down or unknown and should continue monitoring // the node. jsTestLog( - "Turning on doNotSetMoreToCome failpoint. Node should return successful isMaster responses."); + "Turning on doNotSetMoreToCome failpoint. Node should return successful hello responses."); const moreToComeFailpoint = configureFailPoint(rsPrimary, "doNotSetMoreToCome"); -// Wait for maxAwaitTimeMS to guarantee that mongos has received at least one isMaster response from +// Wait for maxAwaitTimeMS to guarantee that mongos has received at least one "hello" response from // the primary without the moreToCome bit set. sleep(10000); awaitRSClientHosts(mongos, {host: rsPrimary.name}, {ok: true, ismaster: true}); @@ -64,8 +67,18 @@ awaitRSClientHosts(mongos, {host: rsPrimary.name}, {ok: true, ismaster: true}); // Shutdown the primary node. The RSM should mark the node as down. jsTestLog("Shutting down primary node."); -st.rs0.stop(0); +if (TestData.configShard) { + st.rs0.stop(0, undefined, undefined, {forRestart: true}); +} else { + st.rs0.stop(0); +} awaitRSClientHosts(mongos, {host: rsPrimary.name}, {ok: false}); +if (TestData.configShard) { + // Shard0 is the config server in config shard mode, so restart it for the ShardingTest + // shutdown hooks. + st.rs0.start(0, undefined, true /* restart */); +} + st.stop(); }()); diff --git a/jstests/sharding/balancer_defragmentation_merge_chunks.js b/jstests/sharding/balancer_defragmentation_merge_chunks.js index 0118618d59a90..ce782b0f9f271 100644 --- a/jstests/sharding/balancer_defragmentation_merge_chunks.js +++ b/jstests/sharding/balancer_defragmentation_merge_chunks.js @@ -54,13 +54,8 @@ function getNewColl() { } // Shorten time between balancer rounds for faster initial balancing -st.forEachConfigServer((conn) => { - conn.adminCommand({ - configureFailPoint: 'overrideBalanceRoundInterval', - mode: 'alwaysOn', - data: {intervalMs: 200} - }); -}); +configureFailPointForRS( + st.configRS.nodes, 'overrideBalanceRoundInterval', {intervalMs: 200}, 'alwaysOn'); const targetChunkSizeMB = 2; @@ -73,7 +68,7 @@ function setupCollection() { 10 /* numChunks */, targetChunkSizeMB / 2 /* maxChunkFillMB */, 0 /* numZones */, - 32 * 1024 /* docSizeBytes */, + [32 * 1024, 32 * 1024] /* docSizeBytesRange */, 1000 /* chunkSpacing */, false /* disableCollectionBalancing */); jsTest.log("Collection " + coll.getFullName() + ", number of chunks before defragmentation: " + @@ -81,24 +76,6 @@ function setupCollection() { return coll; } -function setFailPointOnConfigNodes(failpoint, mode) { - // Use clearFailPointOnConfigNodes() instead - assert(mode !== "off"); - let timesEnteredByNode = {}; - st.forEachConfigServer((config) => { - const fp = - assert.commandWorked(config.adminCommand({configureFailPoint: failpoint, mode: mode})); - timesEnteredByNode[config.host] = fp.count; - }); - return timesEnteredByNode; -} - -function clearFailPointOnConfigNodes(failpoint) { - st.forEachConfigServer((config) => { - assert.commandWorked(config.adminCommand({configureFailPoint: failpoint, mode: "off"})); - }); -} - // Setup collection for first tests const coll1 = setupCollection(); const coll1Name = coll1.getFullName(); @@ -196,7 +173,8 @@ jsTest.log("Begin and end defragmentation with balancer on"); { st.startBalancer(); // Allow the first phase transition to build the initial defragmentation state - setFailPointOnConfigNodes("skipDefragmentationPhaseTransition", {skip: 1}); + let configRSFailPoints = configureFailPointForRS( + st.configRS.nodes, "skipDefragmentationPhaseTransition", {}, {skip: 1}); assert.commandWorked(st.s.adminCommand({ configureCollectionBalancing: coll1Name, defragmentCollection: true, @@ -212,7 +190,7 @@ jsTest.log("Begin and end defragmentation with balancer on"); chunkSize: targetChunkSizeMB, })); // Ensure that the policy completes the phase transition... - clearFailPointOnConfigNodes("skipDefragmentationPhaseTransition"); + configRSFailPoints.off(); defragmentationUtil.waitForEndOfDefragmentation(st.s, coll1Name); st.stopBalancer(); } @@ -223,7 +201,8 @@ jsTest.log("Begin defragmentation with balancer off, end with it on"); const nss = coll.getFullName(); st.stopBalancer(); // Allow the first phase transition to build the initial defragmentation state - setFailPointOnConfigNodes("skipDefragmentationPhaseTransition", {skip: 1}); + let configRSFailPoints = configureFailPointForRS( + st.configRS.nodes, "skipDefragmentationPhaseTransition", {}, {skip: 1}); assert.commandWorked(st.s.adminCommand({ configureCollectionBalancing: nss, defragmentCollection: true, @@ -239,7 +218,7 @@ jsTest.log("Begin defragmentation with balancer off, end with it on"); chunkSize: targetChunkSizeMB, })); // Ensure that the policy completes the phase transition... - clearFailPointOnConfigNodes("skipDefragmentationPhaseTransition"); + configRSFailPoints.off(); defragmentationUtil.waitForEndOfDefragmentation(st.s, nss); st.stopBalancer(); } @@ -254,7 +233,8 @@ jsTest.log("Changed uuid causes defragmentation to restart"); coll.insertOne({key: 1, key2: 1}); assert.commandWorked(db.adminCommand({split: nss, middle: {key: 1}})); // Pause defragmentation after initialization but before phase 1 runs - setFailPointOnConfigNodes("afterBuildingNextDefragmentationPhase", "alwaysOn"); + let configRSFailPoints = configureFailPointForRS( + st.configRS.nodes, "afterBuildingNextDefragmentationPhase", {}, "alwaysOn"); assert.commandWorked(st.s.adminCommand({ configureCollectionBalancing: nss, defragmentCollection: true, @@ -264,7 +244,7 @@ jsTest.log("Changed uuid causes defragmentation to restart"); // Reshard collection assert.commandWorked(db.adminCommand({reshardCollection: nss, key: {key2: 1}})); // Let defragementation run - clearFailPointOnConfigNodes("afterBuildingNextDefragmentationPhase"); + configRSFailPoints.off(); defragmentationUtil.waitForEndOfDefragmentation(st.s, nss); st.stopBalancer(); // Ensure the defragmentation succeeded @@ -282,7 +262,8 @@ jsTest.log("Refined shard key causes defragmentation to restart"); coll.insertOne({key: 1, key2: 1}); assert.commandWorked(db.adminCommand({split: nss, middle: {key: 1}})); // Pause defragmentation after initialization but before phase 1 runs - setFailPointOnConfigNodes("afterBuildingNextDefragmentationPhase", "alwaysOn"); + let configRSFailPoints = configureFailPointForRS( + st.configRS.nodes, "afterBuildingNextDefragmentationPhase", {}, "alwaysOn"); assert.commandWorked(st.s.adminCommand({ configureCollectionBalancing: nss, defragmentCollection: true, @@ -293,7 +274,7 @@ jsTest.log("Refined shard key causes defragmentation to restart"); assert.commandWorked(coll.createIndex({key: 1, key2: 1})); assert.commandWorked(db.adminCommand({refineCollectionShardKey: nss, key: {key: 1, key2: 1}})); // Let defragementation run - clearFailPointOnConfigNodes("afterBuildingNextDefragmentationPhase"); + configRSFailPoints.off(); defragmentationUtil.waitForEndOfDefragmentation(st.s, nss); st.stopBalancer(); // Ensure the defragmentation succeeded diff --git a/jstests/sharding/balancing_based_on_size.js b/jstests/sharding/balancing_based_on_size.js index e47b53d86455e..1e9cd9112febe 100644 --- a/jstests/sharding/balancing_based_on_size.js +++ b/jstests/sharding/balancing_based_on_size.js @@ -7,6 +7,7 @@ 'use strict'; load("jstests/sharding/libs/find_chunks_util.js"); +load("jstests/libs/fail_point_util.js"); const maxChunkSizeMB = 1; const st = new ShardingTest({ @@ -75,13 +76,8 @@ jsTestLog("Printing sharding status after waiting for collection balance"); st.printShardingStatus(); // Wait for some more rounds and then check the balancer is not wrongly moving around data -st.forEachConfigServer((conn) => { - conn.adminCommand({ - configureFailPoint: 'overrideBalanceRoundInterval', - mode: 'alwaysOn', - data: {intervalMs: 100} - }); -}); +configureFailPointForRS( + st.configRS.nodes, 'overrideBalanceRoundInterval', {intervalMs: 100}, 'alwaysOn'); st.awaitBalancerRound(); st.awaitBalancerRound(); diff --git a/jstests/sharding/basic_sharding_params.js b/jstests/sharding/basic_sharding_params.js index 0790e39c45d04..adbf1a8c52c96 100644 --- a/jstests/sharding/basic_sharding_params.js +++ b/jstests/sharding/basic_sharding_params.js @@ -45,7 +45,7 @@ function shardingTestUsingObjects() { assert(s0.commandLine.hasOwnProperty("vvvvvv")); assert(s1.commandLine.hasOwnProperty("vvvvv")); - if (!TestData.catalogShard) { + if (!TestData.configShard) { assert(c0.commandLine.hasOwnProperty("vvvv")); } else { // Same as shard 1. @@ -86,7 +86,7 @@ function shardingTestUsingArrays() { assert(s0.commandLine.hasOwnProperty("vvvvv")); assert(s1.commandLine.hasOwnProperty("vvvv")); - if (!TestData.catalogShard) { + if (!TestData.configShard) { assert(c0.commandLine.hasOwnProperty("vvv")); } else { // Same as shard 1. diff --git a/jstests/sharding/bulk_shard_insert.js b/jstests/sharding/bulk_shard_insert.js index fddb4fcf0315e..41afeb67e33d3 100644 --- a/jstests/sharding/bulk_shard_insert.js +++ b/jstests/sharding/bulk_shard_insert.js @@ -8,18 +8,14 @@ */ (function() { 'use strict'; +load("jstests/libs/fail_point_util.js"); var st = new ShardingTest({shards: 4, chunkSize: 1}); // Double the balancer interval to produce fewer migrations per unit time so that the test does not // run out of stale shard version retries. -st.forEachConfigServer((conn) => { - conn.adminCommand({ - configureFailPoint: 'overrideBalanceRoundInterval', - mode: 'alwaysOn', - data: {intervalMs: 2000} - }); -}); +configureFailPointForRS( + st.configRS.nodes, 'overrideBalanceRoundInterval', {intervalMs: 2000}, 'alwaysOn'); assert.commandWorked(st.s0.adminCommand({enableSharding: 'TestDB'})); st.ensurePrimaryShard('TestDB', st.shard0.shardName); diff --git a/jstests/sharding/bulk_write_basic.js b/jstests/sharding/bulk_write_basic.js new file mode 100644 index 0000000000000..80631b1adefb5 --- /dev/null +++ b/jstests/sharding/bulk_write_basic.js @@ -0,0 +1,181 @@ +/* + * Tests that bulk write operations succeed on a two shard cluster with both + * sharded and unsharded data. + * @tags: [multiversion_incompatible, featureFlagBulkWriteCommand] + */ + +(function() { +'use strict'; + +load("jstests/libs/namespace_utils.js"); // getDBNameAndCollNameFromFullNamespace() + +function bulkWriteBasicTest(ordered) { + jsTestLog(`Running bulkWrite command sharding test with ordered: ${ordered}`); + const st = new ShardingTest({ + shards: 2, + mongos: 2, + config: 1, + rs: {nodes: 1}, + mongosOptions: {setParameter: {logComponentVerbosity: tojson({sharding: 4})}} + }); + + function getCollection(ns) { + const [dbName, collName] = getDBNameAndCollNameFromFullNamespace(ns); + return st.s0.getDB(dbName)[collName]; + } + + const banana = "test.banana"; + const orange = "test2.orange"; + + const staleConfigBananaLog = /7279201.*Noting stale config response.*banana/; + const staleConfigOrangeLog = /7279201.*Noting stale config response.*orange/; + const staleDbTest2Log = /7279202.*Noting stale database response.*test2/; + + jsTestLog("Case 1: Collection does't exist yet."); + // Case 1: The collection doesn't exist yet. This results in a StaleConfig error on the + // shards and consequently mongos and the shards must all refresh. Then mongos needs to + // retry the bulk operation. + + // Connect via the first mongos. We do this so that the second mongos remains unused until + // a later test case. + const db_s0 = st.s0.getDB("test"); + assert.commandWorked(db_s0.adminCommand({ + bulkWrite: 1, + ops: [{insert: 0, document: {a: 0}}, {insert: 0, document: {a: 1}}], + ordered, + nsInfo: [{ns: banana}] + })); + + let insertedDocs = getCollection(banana).find({}).toArray(); + assert.eq(2, insertedDocs.length, `Inserted docs: '${tojson(insertedDocs)}'`); + assert(checkLog.checkContainsOnce(st.s0, staleConfigBananaLog)); + if (!ordered) { + // Check that the error for the 0th op was duplicated and used for the 1st op as well. + assert( + checkLog.checkContainsOnce(st.s0, /7695304.*Duplicating the error.*opIdx":1.*banana/)); + } + + jsTestLog("Case 2: The collection exists for some of writes, but not for others."); + assert.commandWorked(db_s0.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {a: 2}}, + {insert: 1, document: {a: 0}}, + {insert: 0, document: {a: 3}} + ], + ordered, + nsInfo: [{ns: banana}, {ns: orange}] + })); + + insertedDocs = getCollection(banana).find({}).toArray(); + assert.eq(4, insertedDocs.length, `Inserted docs: '${tojson(insertedDocs)}'`); + insertedDocs = getCollection(orange).find({}).toArray(); + assert.eq(1, insertedDocs.length, `Inserted docs: '${tojson(insertedDocs)}'`); + assert(checkLog.checkContainsOnce(st.s0, staleConfigOrangeLog)); + + jsTestLog("Case 3: StaleDbVersion when unsharded collection moves between shards."); + const db_s1 = st.s1.getDB("test"); + // Case 3: Move the 'test2' DB back and forth across shards. This will result in bulkWrite + // getting a StaleDbVersion error. We run this on s1 so s0 doesn't know about the change. + assert.commandWorked(db_s1.adminCommand({movePrimary: 'test2', to: st.shard0.shardName})); + assert.commandWorked(db_s1.adminCommand({movePrimary: 'test2', to: st.shard1.shardName})); + + // Now run the bulk write command on s0. + assert.commandWorked(db_s0.adminCommand( + {bulkWrite: 1, ops: [{insert: 0, document: {a: 3}}], nsInfo: [{ns: orange}]})); + insertedDocs = getCollection(orange).find({}).toArray(); + assert.eq(2, insertedDocs.length, `Inserted docs: '${tojson(insertedDocs)}'`); + assert(checkLog.checkContainsOnce(st.s0, staleDbTest2Log)); + + jsTestLog("Case 4: The collection is sharded and lives on both shards."); + // Case 4: Shard the collection and manually move chunks so that they live on + // both shards. We stop the balancer as well. We do all of this on s0, but then, + // we run a bulk write command through the s1 that has a stale view of the cluster. + assert.commandWorked(st.stopBalancer()); + + jsTestLog("Shard the collection."); + assert.commandWorked(getCollection(banana).createIndex({a: 1})); + assert.commandWorked(db_s0.adminCommand({enableSharding: "test"})); + assert.commandWorked(db_s0.adminCommand({shardCollection: banana, key: {a: 1}})); + + jsTestLog("Create chunks, then move them."); + assert.commandWorked(db_s0.adminCommand({split: banana, middle: {a: 2}})); + assert.commandWorked( + db_s0.adminCommand({moveChunk: banana, find: {a: 0}, to: st.shard0.shardName})); + assert.commandWorked( + db_s0.adminCommand({moveChunk: banana, find: {a: 3}, to: st.shard1.shardName})); + + jsTestLog("Running bulk write command."); + assert.commandWorked(db_s1.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {a: -1}}, + {insert: 1, document: {a: 1}}, + {insert: 0, document: {a: 4}} + ], + ordered, + nsInfo: [{ns: banana}, {ns: orange}] + })); + + insertedDocs = getCollection(banana).find({}).toArray(); + assert.eq(6, insertedDocs.length, `Inserted docs: '${tojson(insertedDocs)}'`); + insertedDocs = getCollection(orange).find({}).toArray(); + assert.eq(3, insertedDocs.length, `Inserted docs: '${tojson(insertedDocs)}'`); + + // Checklog doesn't work in this case because mongos may refresh its routing info before + // runningthe bulkWrite command, which means that the logs we're looking for won't get printed. + // However, since the number of documents matched up in the asserts above, it means that mongos + // must've correctly routed the bulkWrite command. + + if (!ordered) { + jsTestLog("Case 5: Remaining operations executed on non-staleness error."); + // On errors like a DuplicateKeyError, execution of the bulkWrite command extends beyond + // the erroring operation. + // So overall, we expect: + // 1) bulkWrite command sent + // 2) Collection mango doesn't exist yet. StaleConfig error returned. + // 3) StaleConfig error duplicated for all operations. + // 4) Retry operation after refreshing + // 5) Operations 0, 1 (DuplicateKeyError), and 2 go through. Operation 3 hits + // a StaleConfig error, but no error duplication occurs. And finally the operation is + // retried and succeeds. + const mango = 'test3.mango'; + const strawberry = 'test3.strawberry'; + assert.commandWorked(db_s0.adminCommand({ + bulkWrite: 1, + ops: [ + {insert: 0, document: {_id: 1}}, + {insert: 0, document: {_id: 1}}, // DuplicateKeyError + {insert: 0, document: {a: 1}}, + {insert: 1, document: {a: 1}}, + {insert: 1, document: {a: 2}} + ], + ordered, + nsInfo: [{ns: mango}, {ns: strawberry}] + })); + // The fact that more than one document was inserted proves that the bulkWrite advanced + // past op 1's DuplicateKeyError. + insertedDocs = getCollection(mango).find({}).toArray(); + assert.eq(2, insertedDocs.length, `Inserted docs: '${tojson(insertedDocs)}'`); + insertedDocs = getCollection(strawberry).find({}).toArray(); + assert.eq(2, insertedDocs.length, `Inserted docs: '${tojson(insertedDocs)}'`); + + // The StaleConfig error on op 0 should have been duplicated to all operations. + for (let i = 1; i < 5; i++) { + assert(checkLog.checkContainsOnce( + st.s0, new RegExp(`7695304.*Duplicating the error.*opIdx":${i}.*mango`))); + } + + // The StaleConfig error on op 3 should have been duplicated to op 4. + assert( + checkLog.checkContainsOnce(st.s0, /7279201.*Noting stale config response.*strawberry/)); + assert(checkLog.checkContainsOnce(st.s0, + /7695304.*Duplicating the error.*opIdx":4.*strawberry/)); + } + + st.stop(); +} + +bulkWriteBasicTest(true); +bulkWriteBasicTest(false); +})(); diff --git a/jstests/sharding/cancel_coordinate_txn_commit_with_tickets_exhausted.js b/jstests/sharding/cancel_coordinate_txn_commit_with_tickets_exhausted.js index b84a8b05a2464..c1e0f1e91cac9 100644 --- a/jstests/sharding/cancel_coordinate_txn_commit_with_tickets_exhausted.js +++ b/jstests/sharding/cancel_coordinate_txn_commit_with_tickets_exhausted.js @@ -27,6 +27,7 @@ load("jstests/libs/fail_point_util.js"); load('jstests/libs/parallelTester.js'); load("jstests/sharding/libs/create_sharded_collection_util.js"); +load("jstests/libs/auto_retry_transaction_in_sharding.js"); // For withTxnAndAutoRetryOnMongos. const kNumWriteTickets = 10; const st = new ShardingTest({ @@ -37,7 +38,7 @@ const st = new ShardingTest({ rsOptions: { setParameter: { // This test requires a fixed ticket pool size. - storageEngineConcurrencyAdjustmentAlgorithm: "", + storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions", wiredTigerConcurrentWriteTransactions: kNumWriteTickets, // Lower transactionLifetimeLimitSeconds to cause TransactionCoordinators which haven't // yet made their commit or abort decision to time out and abort the transaction. @@ -85,10 +86,10 @@ const sessionCollection = session.getDatabase(dbName).getCollection(collName); // transactionThread won't need to persist a topology time. The scenario reported in SERVER-60685 // depended on the TransactionCoordinator being interrupted while persisting the participant list // which happens after waiting for the topology time to become durable. -session.startTransaction(); -assert.commandWorked(sessionCollection.insert({key: 400})); -assert.commandWorked(sessionCollection.insert({key: -400})); -assert.commandWorked(session.commitTransaction_forTesting()); +withTxnAndAutoRetryOnMongos(session, () => { + assert.commandWorked(sessionCollection.insert({key: 400})); + assert.commandWorked(sessionCollection.insert({key: -400})); +}); const hangWithLockDuringBatchRemoveFp = configureFailPoint(txnCoordinator, failpointName); diff --git a/jstests/sharding/catalog_shard_mongos_ops_on_config_and_admin.js b/jstests/sharding/catalog_shard_mongos_ops_on_config_and_admin.js index 582a87323045a..0925bc5cc410e 100644 --- a/jstests/sharding/catalog_shard_mongos_ops_on_config_and_admin.js +++ b/jstests/sharding/catalog_shard_mongos_ops_on_config_and_admin.js @@ -1,13 +1,13 @@ /** * Tests to make sure that the mongos does not allow certain commands on the config and admin - * databases when catalogShard is enabled. + * databases when configShard is enabled. * - * @tags: [requires_fcv_70, featureFlagCatalogShard, featureFlagTransitionToCatalogShard] + * @tags: [requires_fcv_70, featureFlagTransitionToCatalogShard] */ (function() { "use strict"; -var st = new ShardingTest({mongos: 1, shards: 1, config: 1, catalogShard: true}); +var st = new ShardingTest({mongos: 1, shards: 1, config: 1, configShard: true}); let mongosAdminDB = st.s0.getDB("admin"); let mongosConfigDB = st.s0.getDB("config"); @@ -20,7 +20,7 @@ var res = mongosAdminDB.runCommand(cmdObj); assert.commandWorked(res); // Commands that should fail when run on collections in the config database when -// catalogShard is enabled +// configShard is enabled { assert.commandFailedWithCode( mongosAdminDB.runCommand({renameCollection: "config.shards", to: "config.joe"}), @@ -41,7 +41,7 @@ assert.commandWorked(res); } // Commands that should fail when run on collections in the admin database when -// catalogShard is enabled +// configShard is enabled { assert.commandFailedWithCode( mongosAdminDB.runCommand({renameCollection: "admin.system.roles", to: "admin.joe"}), diff --git a/jstests/sharding/change_stream_against_shard_mongod.js b/jstests/sharding/change_stream_against_shard_mongod.js index e66571caeb5ff..e42261faed096 100644 --- a/jstests/sharding/change_stream_against_shard_mongod.js +++ b/jstests/sharding/change_stream_against_shard_mongod.js @@ -46,4 +46,4 @@ for (let event of expectedEvents) { } st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/change_stream_error_label.js b/jstests/sharding/change_stream_error_label.js index 2e4476a46df65..37467f8ff85de 100644 --- a/jstests/sharding/change_stream_error_label.js +++ b/jstests/sharding/change_stream_error_label.js @@ -105,10 +105,10 @@ assert.eq(findCursor.objsLeftInBatch(), 0); const aggCursor = coll.aggregate([{$match: {}}, {$sort: {_id: 1}}], {cursor: {batchSize: 0}}); // Now stop shard0... -if (!TestData.catalogShard) { +if (!TestData.configShard) { st.rs0.stopSet(); } else { - // The config server is shard0 in catalog shard mode and we'll restart it later. + // The config server is shard0 in config shard mode and we'll restart it later. st.rs0.stopSet(undefined, true /* forRestart */); } @@ -156,7 +156,7 @@ assert.commandFailedWithCode(err, ErrorCodes.FailedToSatisfyReadPreference); // ... but does NOT include the "ResumableChangeStreamError" error label. assert(!("errorLabels" in err), err); -if (TestData.catalogShard) { +if (TestData.configShard) { // shard0 is the config server and it needs to be up for ShardingTest shutdown. st.rs0.startSet(undefined, true /* forRestart */); } diff --git a/jstests/sharding/change_stream_no_drop.js b/jstests/sharding/change_stream_no_drop.js index d378421cd18c5..60a50176c10bd 100644 --- a/jstests/sharding/change_stream_no_drop.js +++ b/jstests/sharding/change_stream_no_drop.js @@ -46,12 +46,9 @@ emptyChangeStream(changeStream); jsTest.log( 'The shard_collection_coordinator at second attempt (after failure) should not report drop events for orphaned'); { - configureFailPoint(st.shard0, - 'failAtCommitCreateCollectionCoordinator', - data = {}, - failPointMode = {times: 1}); + configureFailPoint(st.shard0, 'failAtCommitCreateCollectionCoordinator', {}, {times: 1}); - collectionName = dbName + '.coll'; + let collectionName = dbName + '.coll'; assert.commandWorked(st.s.adminCommand( {shardCollection: collectionName, key: {_id: "hashed"}, numInitialChunks: 10})); diff --git a/jstests/sharding/change_stream_no_shards.js b/jstests/sharding/change_stream_no_shards.js index e3ad7921b8281..0d8afc428229f 100644 --- a/jstests/sharding/change_stream_no_shards.js +++ b/jstests/sharding/change_stream_no_shards.js @@ -2,8 +2,8 @@ * Test that running a $changeStream aggregation on a cluster with no shards returns an empty result * set with a cursorID of zero. * - * Requires no shards so there can't be a catalog shard. - * @tags: [catalog_shard_incompatible] + * Requires no shards so there can't be a config shard. + * @tags: [config_shard_incompatible] */ (function() { const st = new ShardingTest({shards: 0}); diff --git a/jstests/sharding/change_streams_primary_shard_unaware.js b/jstests/sharding/change_streams_primary_shard_unaware.js index a377577012903..22e7eb7cfb449 100644 --- a/jstests/sharding/change_streams_primary_shard_unaware.js +++ b/jstests/sharding/change_streams_primary_shard_unaware.js @@ -9,7 +9,6 @@ // requires_majority_read_concern, // requires_persistence, // uses_change_streams, -// temporary_catalog_shard_incompatible, // ] (function() { "use strict"; @@ -36,7 +35,11 @@ const st = new ShardingTest({ rs: { nodes: 1, // Use a higher frequency for periodic noops to speed up the test. - setParameter: {periodicNoopIntervalSecs: 1, writePeriodicNoops: true}, + setParameter: { + periodicNoopIntervalSecs: 1, + writePeriodicNoops: true, + enableShardedIndexConsistencyCheck: false + }, }, other: {configOptions: nodeOptions} }); diff --git a/jstests/sharding/change_streams_unsharded_update_resume.js b/jstests/sharding/change_streams_unsharded_update_resume.js index be692f6e75820..a13b3077b1b30 100644 --- a/jstests/sharding/change_streams_unsharded_update_resume.js +++ b/jstests/sharding/change_streams_unsharded_update_resume.js @@ -62,4 +62,4 @@ assert.soon(() => { }); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/change_streams_update_lookup_shard_metadata_missing.js b/jstests/sharding/change_streams_update_lookup_shard_metadata_missing.js index cb830f804e19e..769646ab01210 100644 --- a/jstests/sharding/change_streams_update_lookup_shard_metadata_missing.js +++ b/jstests/sharding/change_streams_update_lookup_shard_metadata_missing.js @@ -75,4 +75,4 @@ assert.soon(() => csCursor.hasNext()); assert.docEq({_id: 0, a: -100, updated: true}, csCursor.next().fullDocument); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/check_metadata_consistency.js b/jstests/sharding/check_metadata_consistency.js index edada9a15a41e..7c8d83f158535 100644 --- a/jstests/sharding/check_metadata_consistency.js +++ b/jstests/sharding/check_metadata_consistency.js @@ -1,11 +1,9 @@ /* * Tests to validate the correct behaviour of checkMetadataConsistency command. * - * TODO SERVER-74445: Fix cluster level checkMetadataConsistency command with a catalog shard. * @tags: [ * featureFlagCheckMetadataConsistency, * requires_fcv_70, - * catalog_shard_incompatible, * ] */ @@ -23,6 +21,36 @@ function getNewDb() { return mongos.getDB(dbName + dbCounter++); } +function assertNoInconsistencies() { + const checkOptions = {'checkIndexes': 1}; + + let res = mongos.getDB("admin").checkMetadataConsistency(checkOptions).toArray(); + assert.eq(0, + res.length, + "Found unexpected metadata inconsistencies at cluster level: " + tojson(res)); + + mongos.getDBNames().forEach(dbName => { + if (dbName == 'admin') { + return; + } + + let db = mongos.getDB(dbName); + res = db.checkMetadataConsistency(checkOptions).toArray(); + assert.eq(0, + res.length, + "Found unexpected metadata inconsistencies at database level: " + tojson(res)); + + db.getCollectionNames().forEach(collName => { + let coll = db.getCollection(collName); + res = coll.checkMetadataConsistency(checkOptions).toArray(); + assert.eq( + 0, + res.length, + "Found unexpected metadata inconsistencies at collection level: " + tojson(res)); + }); + }); +} + (function testCursor() { const db = getNewDb(); @@ -68,8 +96,7 @@ function getNewDb() { // Clean up the database to pass the hooks that detect inconsistencies db.dropDatabase(); - res = mongos.getDB("admin").checkMetadataConsistency().toArray(); - assert.eq(0, res.length, tojson(res)); + assertNoInconsistencies(); })(); (function testCollectionUUIDMismatchInconsistency() { @@ -96,8 +123,7 @@ function getNewDb() { // Clean up the database to pass the hooks that detect inconsistencies db.dropDatabase(); - inconsistencies = mongos.getDB("admin").checkMetadataConsistency().toArray(); - assert.eq(0, inconsistencies.length, tojson(inconsistencies)); + assertNoInconsistencies(); })(); (function testMisplacedCollection() { @@ -120,8 +146,7 @@ function getNewDb() { // Clean up the database to pass the hooks that detect inconsistencies db.dropDatabase(); - inconsistencies = mongos.getDB("admin").checkMetadataConsistency().toArray(); - assert.eq(0, inconsistencies.length, tojson(inconsistencies)); + assertNoInconsistencies(); })(); (function testMissingShardKeyInconsistency() { @@ -146,8 +171,7 @@ function getNewDb() { // Clean up the database to pass the hooks that detect inconsistencies db.dropDatabase(); - inconsistencies = mongos.getDB("admin").checkMetadataConsistency().toArray(); - assert.eq(0, inconsistencies.length, tojson(inconsistencies)); + assertNoInconsistencies(); })(); (function testMissingIndex() { @@ -171,8 +195,7 @@ function getNewDb() { // Fix inconsistencies and assert none are left assert.commandWorked(shard0Coll.dropIndex('index1')); - inconsistencies = db.checkMetadataConsistency({'checkIndexes': 1}).toArray(); - assert.eq(0, inconsistencies.length, tojson(inconsistencies)); + assertNoInconsistencies(); // Check inconsistent index property across shards assert.commandWorked(shard0Coll.createIndex( @@ -188,8 +211,7 @@ function getNewDb() { // Fix inconsistencies and assert none are left assert.commandWorked(shard0Coll.dropIndex('index1')); assert.commandWorked(shard1Coll.dropIndex('index1')); - inconsistencies = db.checkMetadataConsistency({'checkIndexes': 1}).toArray(); - assert.eq(0, inconsistencies.length, tojson(inconsistencies)); + assertNoInconsistencies(); })(); (function testHiddenShardedCollections() { @@ -209,16 +231,16 @@ function getNewDb() { const db2ConfigEntry = configDatabasesColl.findOne({_id: db2.getName()}); // Check that there are no inconsistencies so far - let inconsistencies = mongos.getDB("admin").checkMetadataConsistency().toArray(); - assert.eq(0, inconsistencies.length, tojson(inconsistencies)); + assertNoInconsistencies(); // Remove db1 so that coll1 became hidden assert.commandWorked(configDatabasesColl.deleteOne({_id: db1.getName()})); - inconsistencies = mongos.getDB("admin").checkMetadataConsistency().toArray(); + let inconsistencies = mongos.getDB("admin").checkMetadataConsistency().toArray(); assert.eq(1, inconsistencies.length, tojson(inconsistencies)); assert.eq("HiddenShardedCollection", inconsistencies[0].type, tojson(inconsistencies[0])); - assert.eq(coll1.getFullName(), inconsistencies[0].details.ns, tojson(inconsistencies[0])); + assert.eq( + coll1.getFullName(), inconsistencies[0].details.namespace, tojson(inconsistencies[0])); // Remove db2 so that coll2 also became hidden assert.commandWorked(configDatabasesColl.deleteOne({_id: db2.getName()})); @@ -226,9 +248,11 @@ function getNewDb() { inconsistencies = mongos.getDB("admin").checkMetadataConsistency().toArray(); assert.eq(2, inconsistencies.length, tojson(inconsistencies)); assert.eq("HiddenShardedCollection", inconsistencies[0].type, tojson(inconsistencies[0])); - assert.eq(coll1.getFullName(), inconsistencies[0].details.ns, tojson(inconsistencies[0])); + assert.eq( + coll1.getFullName(), inconsistencies[0].details.namespace, tojson(inconsistencies[0])); assert.eq("HiddenShardedCollection", inconsistencies[1].type, tojson(inconsistencies[1])); - assert.eq(coll2.getFullName(), inconsistencies[1].details.ns, tojson(inconsistencies[1])); + assert.eq( + coll2.getFullName(), inconsistencies[1].details.namespace, tojson(inconsistencies[1])); // Restore db1 and db2 configuration to ensure the correct behavior of dropDatabase operations assert.commandWorked(configDatabasesColl.insertMany([db1ConfigEntry, db2ConfigEntry])); @@ -236,6 +260,48 @@ function getNewDb() { // Clean up the database to pass the hooks that detect inconsistencies db1.dropDatabase(); db2.dropDatabase(); + assertNoInconsistencies(); +})(); + +(function testRoutingTableInconsistency() { + const db = getNewDb(); + const kSourceCollName = "coll"; + const ns = db[kSourceCollName].getFullName(); + + st.shardColl(db[kSourceCollName], {skey: 1}); + + // Insert a RoutingTableRangeOverlap inconsistency + const collUuid = st.config.collections.findOne({_id: ns}).uuid; + assert.commandWorked(st.config.chunks.updateOne({uuid: collUuid}, {$set: {max: {skey: 10}}})); + + // Insert a ZonesRangeOverlap inconsistency + let entry = { + _id: {ns: ns, min: {"skey": -100}}, + ns: ns, + min: {"skey": -100}, + max: {"skey": 100}, + tag: "a", + }; + assert.commandWorked(st.config.tags.insert(entry)); + entry = { + _id: {ns: ns, min: {"skey": 50}}, + ns: ns, + min: {"skey": 50}, + max: {"skey": 150}, + tag: "a", + }; + assert.commandWorked(st.config.tags.insert(entry)); + + // Database level mode command + let inconsistencies = db.checkMetadataConsistency().toArray(); + assert.eq(2, inconsistencies.length, tojson(inconsistencies)); + assert(inconsistencies.some(object => object.type === "RoutingTableRangeOverlap"), + tojson(inconsistencies)); + assert(inconsistencies.some(object => object.type === "ZonesRangeOverlap"), + tojson(inconsistencies)); + + // Clean up the database to pass the hooks that detect inconsistencies + db.dropDatabase(); inconsistencies = mongos.getDB("admin").checkMetadataConsistency().toArray(); assert.eq(0, inconsistencies.length, tojson(inconsistencies)); })(); @@ -283,8 +349,7 @@ function getNewDb() { db_MisplacedCollection1.dropDatabase(); db_MisplacedCollection2.dropDatabase(); db_CollectionUUIDMismatch.dropDatabase(); - inconsistencies = mongos.getDB("admin").checkMetadataConsistency().toArray(); - assert.eq(0, inconsistencies.length, tojson(inconsistencies)); + assertNoInconsistencies(); })(); st.stop(); diff --git a/jstests/sharding/check_metadata_consistency_large.js b/jstests/sharding/check_metadata_consistency_large.js new file mode 100644 index 0000000000000..a0fcdf08ce67b --- /dev/null +++ b/jstests/sharding/check_metadata_consistency_large.js @@ -0,0 +1,91 @@ +/* + * Tests to validate the correct behaviour of checkMetadataConsistency command with a lot of + * inconsistencies. + * + * @tags: [ + * featureFlagCheckMetadataConsistency, + * requires_fcv_71, + * resource_intensive, + * ] + */ + +(function() { +'use strict'; + +load("jstests/libs/fail_point_util.js"); +load("jstests/sharding/libs/create_sharded_collection_util.js"); + +// Configure initial sharding cluster +const st = new ShardingTest({}); +const mongos = st.s; + +const dbName = "testCheckMetadataConsistencyDB"; +var dbCounter = 0; + +function getNewDb() { + return mongos.getDB(dbName + dbCounter++); +} + +(function testManyInconsistencies() { + // Introduce a misplaced inconsistency + const db = getNewDb(); + assert.commandWorked( + mongos.adminCommand({enableSharding: db.getName(), primaryShard: st.shard0.shardName})); + assert.commandWorked(st.shard1.getDB(db.getName()).coll.insert({_id: 'foo'})); + + const kFakeInconsistenciesPerShard = 1000; + const data = {numInconsistencies: NumberInt(kFakeInconsistenciesPerShard)}; + const fp1 = configureFailPoint(st.shard0, 'insertFakeInconsistencies', data); + const fp2 = configureFailPoint(st.shard1, 'insertFakeInconsistencies', data); + + // If catalog shard is enabled, there will be introduced inconsistencies in shard0, shard1 and + // config. Otherwise, only shard0 and shard1. + const kExpectedInconsistencies = TestData.configShard ? 3 * kFakeInconsistenciesPerShard + 1 + : 2 * kFakeInconsistenciesPerShard + 1; + + let inconsistencies = db.checkMetadataConsistency().toArray(); + assert.eq(kExpectedInconsistencies, inconsistencies.length, tojson(inconsistencies)); + + // Clean up the database to pass the hooks that detect inconsistencies + fp1.off(); + fp2.off(); + db.dropDatabase(); + inconsistencies = mongos.getDB("admin").checkMetadataConsistency().toArray(); + assert.eq(0, inconsistencies.length, tojson(inconsistencies)); +})(); + +(function testMissingManyIndexes() { + const db = getNewDb(); + const checkOptions = {'checkIndexes': 1}; + const kIndexes = 60; + + assert.commandWorked(st.s.adminCommand({enableSharding: db.getName()})); + st.ensurePrimaryShard(db.getName(), st.shard0.shardName); + CreateShardedCollectionUtil.shardCollectionWithChunks(db.coll, {x: 1}, [ + {min: {x: MinKey}, max: {x: 1}, shard: st.shard0.shardName}, + {min: {x: 1}, max: {x: MaxKey}, shard: st.shard1.shardName}, + ]); + + const shard0Coll = st.shard0.getDB(db.getName()).coll; + const shard1Coll = st.shard1.getDB(db.getName()).coll; + + const shard0Indexes = Array.from({length: kIndexes}, (_, i) => ({['index0' + i]: 1})); + const shard1Indexes = Array.from({length: kIndexes}, (_, i) => ({['index1' + i]: 1})); + assert.commandWorked(shard0Coll.createIndexes(shard0Indexes)); + assert.commandWorked(shard1Coll.createIndexes(shard1Indexes)); + + // Check that the number of inconsistencies is correct + let inconsistencies = db.checkMetadataConsistency(checkOptions).toArray(); + assert.eq(kIndexes * 2, inconsistencies.length, tojson(inconsistencies)); + inconsistencies.forEach(inconsistency => { + assert.eq("InconsistentIndex", inconsistency.type, tojson(inconsistency)); + }); + + // Clean up the database to pass the hooks that detect inconsistencies + assert.commandWorked(db.coll.dropIndexes()); + inconsistencies = db.checkMetadataConsistency({'checkIndexes': 1}).toArray(); + assert.eq(0, inconsistencies.length, tojson(inconsistencies)); +})(); + +st.stop(); +})(); diff --git a/jstests/sharding/chunks_onCurrentShardSince.js b/jstests/sharding/chunks_onCurrentShardSince.js index 97ef8e010d39f..5237d8d1f61cb 100644 --- a/jstests/sharding/chunks_onCurrentShardSince.js +++ b/jstests/sharding/chunks_onCurrentShardSince.js @@ -1,15 +1,7 @@ /** * Tests that `onCurrentShardSince` is always consistent with `history[0].validAfter` on * config.chunks entries - * - * TODO (SERVER-72791) remove multiversion_incompatible, featureFlagAutoMerger and - * does_not_support_stepdowns flags since they are required only for upgradeFCVTest - * @tags: [multiversion_incompatible, featureFlagAutoMerger, does_not_support_stepdowns] */ -(function() { -'use strict'; - -load("jstests/libs/feature_flag_util.js"); load("jstests/sharding/libs/find_chunks_util.js"); Random.setRandomSeed(); @@ -74,25 +66,6 @@ function assertChunksConsistency(chunksColl) { assert.eq(numTotalChunks, numConsistenChunks); } -/* Upgrade FCV test - * The upgrade procedure must add the new field `onCurrentShardSince` to all chunks - * TODO (SERVER-72791) remove this test after v7.0 becomes lastLTS - */ -function upgradeFCVTest(st, chunksColl, testDB) { - // Downgrade to the lastLTSFCV to force an upgrade afterwards - assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); - - // Create several chunks on different collections and perform some random moves to have - // different values on `onCurrentShardSince` fields - for (let i = 0; i < 10; i++) { - const coll = newShardedColl(st, testDB); - performRandomMoveChunks(coll); - } - - assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - assertChunksConsistency(chunksColl); -} - function moveAndMergeChunksTest(st, chunksColl, testDB) { const coll = newShardedColl(st, testDB); const collUuid = st.s.getDB("config").collections.findOne({_id: coll.getFullName()}).uuid; @@ -154,11 +127,9 @@ const chunksColl = st.config.chunks; const testDB = st.s.getDB(jsTestName()); /* Perform tests */ -if (!TestData.catalogShard) { - upgradeFCVTest(st, chunksColl, testDB); +if (!TestData.configShard) { moveAndMergeChunksTest(st, chunksColl, testDB); splitChunksTest(st, chunksColl, testDB); } -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/cleanup_orphaned_auth.js b/jstests/sharding/cleanup_orphaned_auth.js index dd6b384a27654..9ad965fa2c530 100644 --- a/jstests/sharding/cleanup_orphaned_auth.js +++ b/jstests/sharding/cleanup_orphaned_auth.js @@ -26,11 +26,14 @@ function assertUnauthorized(res, msg) { var st = new ShardingTest({auth: true, other: {keyFile: 'jstests/libs/key1', useHostname: false}}); var shardAdmin = st.shard0.getDB('admin'); -if (!TestData.catalogShard) { - // In catalog shard mode, this will create a user on the config server, which we already do +if (!TestData.configShard) { + // In config shard mode, this will create a user on the config server, which we already do // below. - shardAdmin.createUser( - {user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']}); + shardAdmin.createUser({ + user: 'admin', + pwd: 'x', + roles: ['clusterAdmin', 'userAdminAnyDatabase', 'directShardOperations'] + }); shardAdmin.auth('admin', 'x'); } @@ -38,7 +41,11 @@ var mongos = st.s0; var mongosAdmin = mongos.getDB('admin'); var coll = mongos.getCollection('foo.bar'); -mongosAdmin.createUser({user: 'admin', pwd: 'x', roles: ['clusterAdmin', 'userAdminAnyDatabase']}); +mongosAdmin.createUser({ + user: 'admin', + pwd: 'x', + roles: ['clusterAdmin', 'userAdminAnyDatabase', 'directShardOperations'] +}); mongosAdmin.auth('admin', 'x'); assert.commandWorked(mongosAdmin.runCommand({enableSharding: coll.getDB().getName()})); @@ -47,7 +54,7 @@ assert.commandWorked( mongosAdmin.runCommand({shardCollection: coll.getFullName(), key: {_id: 'hashed'}})); // cleanupOrphaned requires auth as admin user. -if (!TestData.catalogShard) { +if (!TestData.configShard) { assert.commandWorked(shardAdmin.logout()); } assertUnauthorized(shardAdmin.runCommand({cleanupOrphaned: 'foo.bar'})); diff --git a/jstests/sharding/clear_jumbo.js b/jstests/sharding/clear_jumbo.js index 5b2eb77b2d380..1ed2eb1fcbfd9 100644 --- a/jstests/sharding/clear_jumbo.js +++ b/jstests/sharding/clear_jumbo.js @@ -9,6 +9,7 @@ TestData.skipCheckShardFilteringMetadata = true; "use strict"; load("jstests/sharding/libs/find_chunks_util.js"); +load("jstests/libs/fail_point_util.js"); let st = new ShardingTest({shards: 2, other: {chunkSize: 1}}); @@ -137,13 +138,8 @@ let chunk = findChunksUtil.findOneChunkByNs(configDB, testNs, {min: {x: 0}}); assert(chunk.jumbo, tojson(chunk)); assert.eq(st.shard0.shardName, chunk.shard); -st.forEachConfigServer((conn) => { - conn.adminCommand({ - configureFailPoint: 'overrideBalanceRoundInterval', - mode: 'alwaysOn', - data: {intervalMs: 200} - }); -}); +configureFailPointForRS( + st.configRS.nodes, 'overrideBalanceRoundInterval', {intervalMs: 200}, 'alwaysOn'); runBalancer(testColl); diff --git a/jstests/sharding/clone_catalog_data.js b/jstests/sharding/clone_catalog_data.js index 0ec44aa13ead2..671673990afc4 100644 --- a/jstests/sharding/clone_catalog_data.js +++ b/jstests/sharding/clone_catalog_data.js @@ -1,15 +1,12 @@ -'use strict'; - // Test that the 'cloneCatalogData' command works correctly. // Eventually, _shardsvrMovePrimary will use this command. +import {ConfigShardUtil} from "jstests/libs/config_shard_util.js"; // Do not check metadata consistency as unsharded collections are cloned to non-primary shards for // testing purposes. TestData.skipCheckMetadataConsistency = true; (() => { - load("jstests/libs/catalog_shard_util.js"); - function sortByName(a, b) { if (a.name < b.name) return -1; @@ -118,10 +115,12 @@ TestData.skipCheckMetadataConsistency = true; var indexes = res.cursor.firstBatch; indexes.sort(sortByName); - // TODO SERVER-74252: once 7.0 becomes LastLTS we can assume that the movePrimary will never - // copy indexes of sharded collections. + // For each unsharded collection, there should be a total of 3 indexes - one for the _id + // field and the other two that we have created. However, in the case of sharded + // collections, only the _id index is present. When cloning sharded collections, indexes are + // not copied. if (shardedColl) - assert(indexes.length === 1 || indexes.length === 3); + assert(indexes.length === 1); else assert(indexes.length === 3); @@ -166,9 +165,7 @@ TestData.skipCheckMetadataConsistency = true; }), ErrorCodes.InvalidOptions); - const isCatalogShardEnabled = CatalogShardUtil.isEnabledIgnoringFCV(st); - - if (TestData.catalogShard) { + if (TestData.configShard) { // The config server is a shard and already has collections for the database. assert.commandFailedWithCode(st.configRS.getPrimary().adminCommand({ _shardsvrCloneCatalogData: 'test', @@ -176,22 +173,14 @@ TestData.skipCheckMetadataConsistency = true; writeConcern: {w: "majority"} }), ErrorCodes.NamespaceExists); - } else if (isCatalogShardEnabled) { - // The config server is dedicated but supports catalog shard mode, so it can accept shaded + } else { + // The config server is dedicated but supports config shard mode, so it can accept sharded // commands. assert.commandWorked(st.configRS.getPrimary().adminCommand({ _shardsvrCloneCatalogData: 'test', from: fromShard.host, writeConcern: {w: "majority"} })); - } else { - // A dedicated non-catalog shard supporting config server cannot run the command. - assert.commandFailedWithCode(st.configRS.getPrimary().adminCommand({ - _shardsvrCloneCatalogData: 'test', - from: fromShard.host, - writeConcern: {w: "majority"} - }), - ErrorCodes.NoShardingEnabled); } // Check that the command fails when failing to specify a source. diff --git a/jstests/sharding/cluster_cardinality_parameter.js b/jstests/sharding/cluster_cardinality_parameter.js index 9bec9f40167e8..ac8d2c0263d81 100644 --- a/jstests/sharding/cluster_cardinality_parameter.js +++ b/jstests/sharding/cluster_cardinality_parameter.js @@ -1,10 +1,6 @@ /** * Tests that the cluster parameter "shardedClusterCardinalityForDirectConns" has the correct value * after upgrade, downgrade, and addShard. - * - * TODO SERVER-75391: Enable when catalog shards can downgrade FCV - * @tags: [multiversion_incompatible, featureFlagClusterCardinalityParameter, - * catalog_shard_incompatible] */ (function() { @@ -28,23 +24,6 @@ let checkClusterParameter = function(conn, expectedValue) { checkClusterParameter(st.configRS.getPrimary(), false); checkClusterParameter(st.shard0, false); -assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); -assert.commandWorked(st.s.adminCommand({addShard: additionalShard.getURL(), name: "shard02"})); -assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - -// There are two shards in the cluster while upgrading, so the cluster parameter should be true -checkClusterParameter(st.configRS.getPrimary(), true); -checkClusterParameter(st.shard0, true); -checkClusterParameter(additionalShard.getPrimary(), true); - -assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV})); -removeShard(st, "shard02"); -assert.commandWorked(st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV})); - -// There is one shard in the cluster while upgrading, so the cluster parameter should be false -checkClusterParameter(st.configRS.getPrimary(), false); -checkClusterParameter(st.shard0, false); - assert.commandWorked(st.s.adminCommand({addShard: additionalShard.getURL(), name: "shard02"})); // Since the feature flag is enabled, addShard should update the cluster parameter diff --git a/jstests/sharding/cluster_server_parameter_commands_sharded.js b/jstests/sharding/cluster_server_parameter_commands_sharded.js index c1547d0c3ace7..14a3145a7d9c6 100644 --- a/jstests/sharding/cluster_server_parameter_commands_sharded.js +++ b/jstests/sharding/cluster_server_parameter_commands_sharded.js @@ -8,10 +8,11 @@ * multiversion_incompatible * ] */ -(function() { -'use strict'; - -load('jstests/libs/cluster_server_parameter_utils.js'); +import { + setupSharded, + testInvalidClusterParameterCommands, + testValidClusterParameterCommands, +} from "jstests/libs/cluster_server_parameter_utils.js"; // Tests that set/getClusterParameter works on all nodes of a sharded cluster. const options = { @@ -35,5 +36,4 @@ testInvalidClusterParameterCommands(st); // and the majority of nodes on all replica sets in the cluster. testValidClusterParameterCommands(st); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/cluster_time_across_add_shard.js b/jstests/sharding/cluster_time_across_add_shard.js new file mode 100644 index 0000000000000..47731eb5f7c69 --- /dev/null +++ b/jstests/sharding/cluster_time_across_add_shard.js @@ -0,0 +1,234 @@ +/** + * Test that a shardsvr replica set that has not initialized its shard identity via an + * addShard command can validate and sign cluster times, and that after its shard identity has + * been initialized, it is still able to validate cluster times that were signed when it was not a + * shard. If running in a config shard suite, instead test that a configsvr and shardsvr replica + * set that has not initialized its shard identity via a transitionFromDedicatedConfigServer + * command can validate and sign cluster times, and that after its shard identity has been + * initialized, it is still able to validate cluster times that were signed when it was not a + * shard. + */ + +(function() { +"use strict"; + +load("jstests/libs/fail_point_util.js"); +load("jstests/multiVersion/libs/multi_rs.js"); +load('jstests/replsets/rslib.js'); + +function createUser(rst) { + rst.getPrimary().getDB("admin").createUser({user: "root", pwd: "root", roles: ["root"]}, + {w: rst.nodes.length}); +} + +function authUser(node) { + assert(node.getDB("admin").auth("root", "root")); +} + +function createSession(node) { + const conn = new Mongo(node.host); + authUser(conn); + return conn.startSession({causalConsistency: false, retryWrites: false}); +} + +function withTemporaryTestData(callback, mods = {}) { + const originalTestData = TestData; + try { + TestData = Object.assign({}, TestData, mods); + callback(); + } finally { + TestData = originalTestData; + } +} + +// Start a replica set with keyfile authentication enabled so it will return signed cluster times +// its responses. +const numNodes = 3; +const keyFile = "jstests/libs/key1"; +const rstOpts = { + nodes: numNodes, + keyFile +}; +// TODO (SERVER-75472): Support transition from standalone replica set directly to config shard. +if (TestData.configShard) { + // Used to allow restarting config server as an independent replica set. + rstOpts["nodeOptions"] = {setParameter: {skipShardingConfigurationChecks: true}}; +} +const rst = new ReplSetTest(rstOpts); + +// If testing config shard, specify "wiredTiger" as the storage engine so that when this test runs +// in the inMemory variant this replica set doesn't get started with the "inMemory" storage engine +// and fail since the config server cannot use the "inMemory" storage engine. +rst.startSet(TestData.configShard ? {storageEngine: "wiredTiger"} : {}); +rst.initiate(); +const primary = rst.getPrimary(); + +// Create a user for running commands later on in the test. Make the user not have the +// advanceClusterTime privilege. This ensures that the server will not return cluster times +// signed with a dummy key. +createUser(rst); + +let sessions = []; +let sessionOnPrimary; +rst.nodes.forEach(node => { + const session = createSession(node); + if (node == primary) { + sessionOnPrimary = session; + } + sessions.push(session); +}); + +const dbName = "testDb"; +const collName = "testColl"; +assert.commandWorked(sessionOnPrimary.getDatabase(dbName).getCollection(collName).insert({})); +const lastClusterTime = sessionOnPrimary.getClusterTime(); + +for (let session of sessions) { + session.advanceClusterTime(lastClusterTime); + assert.commandWorked(session.getDatabase("admin").runCommand("hello")); +} + +// Restart the replica set as a shardsvr (or configsvr if testing config shard). Use TestData with +// authentication settings so Mongo.prototype.getDB() takes care of re-authenticating after the +// network connection is re-established during ReplSetTest.prototype.upgradeSet(). +const isShardSvrRst = !TestData.configShard; +const tmpTestData = { + auth: true, + keyFile, + authUser: "__system", + keyFileData: "foopdedoop", + authenticationDatabase: "local" +}; +const upgradeOpts = { + appendOptions: true +}; +if (isShardSvrRst) { + // Restart the replica set as a shardsvr. + withTemporaryTestData(() => { + rst.upgradeSet(Object.assign({shardsvr: ""}, upgradeOpts)); + }, tmpTestData); +} else { + // Restart the replica set as a configsvr. + withTemporaryTestData(() => { + const cfg = rst.getReplSetConfigFromNode(); + cfg["configsvr"] = true; + reconfig(rst, cfg); + rst.upgradeSet(Object.assign( + {configsvr: "", setParameter: {skipShardingConfigurationChecks: false}}, upgradeOpts)); + }, tmpTestData); +} + +for (let session of sessions) { + // Reconnect and re-authenticate after the network connection was closed due to restart. + const error = assert.throws(() => session.getDatabase("admin").runCommand("hello")); + assert(isNetworkError(error), error); + authUser(session.getClient()); + + // Verify that the application is able to use a signed cluster time although the addShard + // or transitionFromDedicatedConfigServer command has not been run. + assert.commandWorked(session.getDatabase("admin").runCommand("hello")); + assert.eq(session.getClusterTime().signature.keyId, lastClusterTime.signature.keyId); +} + +let st, mongos, configRstPrimary; +if (isShardSvrRst) { + // Start a sharded cluster and add the shardsvr replica set to it. + st = new ShardingTest({ + mongos: 1, + config: 1, + shards: 1, + other: {keyFile}, + configOptions: { + // Additionally test TTL deletion of key documents. To speed up the test, make the + // documents expire right away. To prevent the documents from being deleted before all + // cluster time validation testing is completed, make the TTL monitor have a large + // sleep interval at first and then lower it at the end of the test when verifying that + // the documents do get deleted by the TTL monitor. + setParameter: + {newShardExistingClusterTimeKeysExpirationSecs: 1, ttlMonitorSleepSecs: 3600} + } + }); + assert.commandWorked(st.s.adminCommand({addShard: rst.getURL()})); + createUser(st.configRS); + configRstPrimary = st.configRS.getPrimary(); +} else { + // Start a sharded cluster and add the configsvr replica set to it. + if (TestData.mongosBinVersion) { + // Make sure the configsvr is in the same FCV as the mongos. + const fcv = binVersionToFCV(TestData.mongosBinVersion); + authutil.asCluster(rst.nodes, keyFile, () => { + // Transitioning from last-lts to last-continuous is only allowed when + // setFeatureCompatibilityVersion is called with fromConfigServer: true. + assert.commandWorked(rst.getPrimary().adminCommand( + {setFeatureCompatibilityVersion: fcv, fromConfigServer: true})); + + // Wait for the new FCV to propagate to all configsvr nodes. + rst.awaitReplication(); + }); + } + + mongos = MongoRunner.runMongos({configdb: rst.getURL(), keyFile}); + authutil.asCluster(mongos, keyFile, () => { + assert.commandWorked(mongos.adminCommand({transitionFromDedicatedConfigServer: 1})); + }); + // Each client connection may only be authenticated once. + configRstPrimary = new Mongo(rst.getPrimary().host); +} + +rst.awaitReplication(); + +for (let session of sessions) { + // As a performance optimization, LogicalTimeValidator::validate() skips validating $clusterTime + // values which have a $clusterTime.clusterTime value smaller than the currently known signed + // $clusterTime value. It is possible (but not strictly guaranteed) for internal communication + // to have already happened between cluster members such that they all know about a signed + // $clusterTime value. This signed $clusterTime value would come from the new signing key + // generated by the config server primary. Here we use the alwaysValidateClientsClusterTime + // fail point to simulate the behavior of case when the internal communication with a signed + // $clusterTime value has not happened yet. + const fp = (() => { + const fpConn = new Mongo(session.getClient().host); + authUser(fpConn); + return configureFailPoint(fpConn, "alwaysValidateClientsClusterTime"); + })(); + + // Verify that after the addShard or transitionFromDedicatedConfigServer command has been run, + // the application is still able to use the cluster time signed when the replica set was not a + // shard. + assert.commandWorked(session.getDatabase("admin").runCommand("hello")); + + if (isShardSvrRst) { + // Verify that the new cluster time was signed with the sharded cluster's key (generated by + // the config server) instead of the shardsvr replica set's key. + assert.neq(session.getClusterTime().signature.keyId, lastClusterTime.signature.keyId); + + // Verify that the old cluster time can also be used against the mongos, config server, and + // other shard. + assert.commandWorked( + st.s.getDB("admin").runCommand({hello: 1, $clusterTime: lastClusterTime})); + assert.commandWorked(st.configRS.getPrimary().getDB("admin").runCommand( + {hello: 1, $clusterTime: lastClusterTime})); + assert.commandWorked(st.rs0.getPrimary().getDB("admin").runCommand( + {hello: 1, $clusterTime: lastClusterTime})); + } else { + // Verify that the new cluster time was signed with the existing key. + assert.eq(session.getClusterTime().signature.keyId, lastClusterTime.signature.keyId); + + // Verify that the old cluster time can also be used against the mongos. + assert.commandWorked( + mongos.getDB("admin").runCommand({hello: 1, $clusterTime: lastClusterTime})); + } + + fp.off(); +} + +authUser(configRstPrimary); + +if (mongos) { + MongoRunner.stopMongos(mongos); +} +if (st) { + st.stop(); +} +rst.stopSet(); +})(); diff --git a/jstests/sharding/clustered_top_chunk_split.js b/jstests/sharding/clustered_top_chunk_split.js index 301fd1fa51608..116cde729ec2a 100644 --- a/jstests/sharding/clustered_top_chunk_split.js +++ b/jstests/sharding/clustered_top_chunk_split.js @@ -4,13 +4,7 @@ * where extreme chunk is defined as the chunk containing either the upper or lower bound of the * entire shard key space. * - * This test mimics the existing clustered_top_chunk_split.js but on a clustered collection. - * - * TODO SERVER-61557: evaluate usefulness of this test. - * - * @tags: [ - * requires_fcv_53, - * ] + * This test mimics the existing top_chunk_split.js but on a clustered collection. */ (function() { 'use strict'; diff --git a/jstests/sharding/commands_that_write_accept_wc_configRS.js b/jstests/sharding/commands_that_write_accept_wc_configRS.js index b722d96472ace..5c7031c03ff80 100644 --- a/jstests/sharding/commands_that_write_accept_wc_configRS.js +++ b/jstests/sharding/commands_that_write_accept_wc_configRS.js @@ -10,9 +10,9 @@ * This test is labeled resource intensive because its total io_write is 70MB compared to a median * of 5MB across all sharding tests in wiredTiger. * - * Incompatible with catalog shard because it disables replication on shards but expects the - * config server to still satisfy majority write concern, which can't be true for a catalog shard. - * @tags: [resource_intensive, catalog_shard_incompatible] + * Incompatible with config shard because it disables replication on shards but expects the + * config server to still satisfy majority write concern, which can't be true for a config shard. + * @tags: [resource_intensive, config_shard_incompatible] */ load('jstests/libs/write_concern_util.js'); load('jstests/multiVersion/libs/auth_helpers.js'); diff --git a/jstests/sharding/compact_structured_encryption_data_coordinator.js b/jstests/sharding/compact_structured_encryption_data_coordinator.js index b909dbd10c5fe..ab48af9393b15 100644 --- a/jstests/sharding/compact_structured_encryption_data_coordinator.js +++ b/jstests/sharding/compact_structured_encryption_data_coordinator.js @@ -4,10 +4,7 @@ // requires_fcv_70, // ] -(function() { -'use strict'; - -load('jstests/fle2/libs/encrypted_client_util.js'); +import {EncryptedClient} from "jstests/fle2/libs/encrypted_client_util.js"; const options = { mongos: 1, @@ -15,9 +12,9 @@ const options = { rs: {nodes: [{}]}, }; -if (!TestData.catalogShard) { - // Setting config options will override shard options in catalog shard mode, which doesn't set - // the right audit node options on the catalog shard. +if (!TestData.configShard) { + // Setting config options will override shard options in config shard mode, which doesn't set + // the right audit node options on the config shard. options.config = 1; } @@ -87,5 +84,4 @@ if (kHaveAuditing) { st.stop(); -jsTest.log(reply); -})(); +jsTest.log(reply); \ No newline at end of file diff --git a/jstests/sharding/compound_hashed_shard_key_covered_query.js b/jstests/sharding/compound_hashed_shard_key_covered_query.js index 33996d25c0ecc..f2f9df698c437 100644 --- a/jstests/sharding/compound_hashed_shard_key_covered_query.js +++ b/jstests/sharding/compound_hashed_shard_key_covered_query.js @@ -2,10 +2,7 @@ * Test to verify the covering behaviour of compound hashed index on a cluster sharded with compound * hashed shard key. */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand(). +import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js"; const st = new ShardingTest({shards: 2}); const kDbName = jsTestName(); @@ -199,5 +196,4 @@ validateFindCmdOutputAndPlan({ stagesNotExpected: ["FETCH"] }); -st.stop(); -})(); \ No newline at end of file +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/compound_hashed_shard_key_targeting.js b/jstests/sharding/compound_hashed_shard_key_targeting.js index f1dab8f5fa7a7..10db1f6c74dba 100644 --- a/jstests/sharding/compound_hashed_shard_key_targeting.js +++ b/jstests/sharding/compound_hashed_shard_key_targeting.js @@ -7,12 +7,9 @@ * requires_majority_read_concern, * ] */ -(function() { -"use strict"; - load("jstests/aggregation/extras/utils.js"); // For arrayEq(). -load("jstests/libs/analyze_plan.js"); // For assertStagesForExplainOfCommand(). -load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions. +import {assertStagesForExplainOfCommand} from "jstests/libs/analyze_plan.js"; +load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions. load("jstests/sharding/libs/find_chunks_util.js"); load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); @@ -300,5 +297,4 @@ if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(st.s)) { ErrorCodes.ShardKeyNotFound); } -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/concurrent_create_indexes_with_drop_and_create_sharded_collection.js b/jstests/sharding/concurrent_create_indexes_with_drop_and_create_sharded_collection.js index e8b6965f96f9a..b3161df8bf90e 100644 --- a/jstests/sharding/concurrent_create_indexes_with_drop_and_create_sharded_collection.js +++ b/jstests/sharding/concurrent_create_indexes_with_drop_and_create_sharded_collection.js @@ -79,4 +79,4 @@ let rs0Collections = assert.commandWorked( assert.eq(1, rs0Collections.cursor.firstBatch.length); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/config_rs_no_primary.js b/jstests/sharding/config_rs_no_primary.js index c14e60f7269c6..7af8e11f745de 100644 --- a/jstests/sharding/config_rs_no_primary.js +++ b/jstests/sharding/config_rs_no_primary.js @@ -38,15 +38,15 @@ assert.neq(null, mongos2); var testOps = function(mongos) { jsTestLog("Doing ops that don't require metadata writes and thus should succeed against: " + mongos); - if (TestData.catalogShard) { - // In catalog shard mode there's also only one shard node up with no primary, so just verify + if (TestData.configShard) { + // In config shard mode there's also only one shard node up with no primary, so just verify // we can still do ops on a secondary that don't require metadata. mongos.setSecondaryOk(true); assert.eq(1, mongos.getDB('test').foo.count()); mongos.setSecondaryOk(false); } else { var initialCount = mongos.getDB('test').foo.count(); - // In catalog shard mode there's no primary. + // In config shard mode there's no primary. assert.commandWorked(mongos.getDB('test').foo.insert({a: 1})); assert.eq(initialCount + 1, mongos.getDB('test').foo.count()); } diff --git a/jstests/sharding/config_settings_schema.js b/jstests/sharding/config_settings_schema.js index 2f8d778bee435..d1d60bb896cea 100644 --- a/jstests/sharding/config_settings_schema.js +++ b/jstests/sharding/config_settings_schema.js @@ -1,13 +1,11 @@ /** * Tests that the schema on config.settings works as intended. * - * @tags: [featureFlagConfigSettingsSchema, requires_fcv_62, does_not_support_stepdowns] + * @tags: [does_not_support_stepdowns] */ (function() { 'use strict'; -load("jstests/libs/feature_flag_util.js"); - var st = new ShardingTest({shards: 1, config: 2}); let coll = st.config.settings; @@ -28,9 +26,7 @@ assert.commandFailed(coll.update({_id: "notARealSetting"}, {$set: {value: 10}}, // Updates that match the schema are accepted // No schema is enforced for balancer, automerge, and ReadWriteConcernDefaults assert.commandWorked(coll.update({_id: "balancer"}, {$set: {anything: true}}, {upsert: true})); -if (FeatureFlagUtil.isEnabled(st.config, "AutoMerger")) { - assert.commandWorked(coll.update({_id: "automerge"}, {$set: {anything: true}}, {upsert: true})); -} +assert.commandWorked(coll.update({_id: "automerge"}, {$set: {anything: true}}, {upsert: true})); assert.commandWorked( coll.update({_id: "ReadWriteConcernDefaults"}, {$set: {anything: true}}, {upsert: true})); // Schema enforces chunksize to be a number (not an int), so doubles will be accepted and the diff --git a/jstests/sharding/config_settings_schema_upgrade_downgrade.js b/jstests/sharding/config_settings_schema_upgrade_downgrade.js deleted file mode 100644 index 83a033d482ee0..0000000000000 --- a/jstests/sharding/config_settings_schema_upgrade_downgrade.js +++ /dev/null @@ -1,38 +0,0 @@ -/** - * TODO (SERVER-70763) remove this test after 7.0 becomes lastLTS - * - * Tests that a schema is added to the config.settings collection on upgrade and removed on - * downgrade. - * - * Incompatible with a catalog shard because catalog shards can't downgrade FCV below 7.0, and this - * test is only for that particular transition. - * @tags: [ - * multiversion_incompatible, - * featureFlagConfigSettingsSchema, - * does_not_support_stepdowns, - * catalog_shard_incompatible, - * ] - */ -(function() { -'use strict'; - -load("jstests/libs/feature_flag_util.js"); - -var st = new ShardingTest({shards: 1, config: 2}); - -// Validator should be created for new clusters in 6.2 -let validatorDoc = st.config.getCollectionInfos({name: "settings"})[0].options.validator; -assert(validatorDoc); - -// Validator should be removed on downgrade -st.s.adminCommand({setFeatureCompatibilityVersion: lastLTSFCV}); -validatorDoc = st.config.getCollectionInfos({name: "settings"})[0].options.validator; -assert(!validatorDoc); - -// Validator should be added in on upgrade -st.s.adminCommand({setFeatureCompatibilityVersion: latestFCV}); -validatorDoc = st.config.getCollectionInfos({name: "settings"})[0].options.validator; -assert(validatorDoc); - -st.stop(); -})(); diff --git a/jstests/sharding/configsvr_retries_createindex_on_stale_config.js b/jstests/sharding/configsvr_retries_createindex_on_stale_config.js index 934636f45d5f9..b3fd3d6f49ec8 100644 --- a/jstests/sharding/configsvr_retries_createindex_on_stale_config.js +++ b/jstests/sharding/configsvr_retries_createindex_on_stale_config.js @@ -14,7 +14,7 @@ let st = new ShardingTest({shards: 2}); // Validate the initial state. validateSessionsCollection(st.shard0, true, true); validateSessionsCollection(st.shard1, false, false); -validateSessionsCollection(st.configRS.getPrimary(), TestData.catalogShard, TestData.catalogShard); +validateSessionsCollection(st.configRS.getPrimary(), TestData.configShard, TestData.configShard); // Drop the TTL index on shard0. assert.commandWorked(st.shard0.getDB("config").system.sessions.dropIndex({lastUse: 1})); diff --git a/jstests/sharding/configsvr_set_allow_migrations.js b/jstests/sharding/configsvr_set_allow_migrations.js new file mode 100644 index 0000000000000..581f631401c67 --- /dev/null +++ b/jstests/sharding/configsvr_set_allow_migrations.js @@ -0,0 +1,65 @@ + +(function() { +'use strict'; + +load("jstests/libs/retryable_writes_util.js"); + +function runConfigsvrSetAllowMigrationsWithRetries(st, ns, lsid, txnNumber, allowMigrations) { + var res; + assert.soon(() => { + res = st.configRS.getPrimary().adminCommand({ + _configsvrSetAllowMigrations: ns, + allowMigrations: allowMigrations, + collectionUUID: st.s.getCollection('config.collections').findOne({_id: ns}).uuid, + lsid: lsid, + txnNumber: txnNumber, + writeConcern: {w: "majority"} + }); + + if (RetryableWritesUtil.isRetryableCode(res.code) || + RetryableWritesUtil.errmsgContainsRetryableCodeName(res.errmsg) || + (res.writeConcernError && + RetryableWritesUtil.isRetryableCode(res.writeConcernError.code))) { + return false; // Retry + } + + return true; + }); + + return res; +} + +const st = new ShardingTest({shards: 1}); + +const dbName = "test"; +const collName = "foo"; +const ns = dbName + "." + collName; + +st.s.adminCommand({shardCollection: ns, key: {x: 1}}); + +let lsid = assert.commandWorked(st.s.getDB("admin").runCommand({startSession: 1})).id; + +assert.eq( + false, + st.s.getCollection('config.collections').findOne({_id: ns}).hasOwnProperty('allowMigrations')); + +assert.commandWorked(runConfigsvrSetAllowMigrationsWithRetries(st, ns, lsid, NumberLong(1), false)); + +let collectionMetadata = st.s.getCollection('config.collections').findOne({_id: ns}); +assert.eq(true, collectionMetadata.hasOwnProperty('allowMigrations')); +assert.eq(false, collectionMetadata.allowMigrations); + +// We should get a TransactionTooOld error if we try to re-execute the TXN with an older txnNumber +assert.commandFailedWithCode( + runConfigsvrSetAllowMigrationsWithRetries(st, ns, lsid, NumberLong(0), true), + ErrorCodes.TransactionTooOld); + +// The command should be idempotent +assert.commandWorked(runConfigsvrSetAllowMigrationsWithRetries(st, ns, lsid, NumberLong(2), false)); + +collectionMetadata = st.s.getCollection('config.collections').findOne({_id: ns}); +assert.eq(true, collectionMetadata.hasOwnProperty('allowMigrations')); +assert.eq(false, collectionMetadata.allowMigrations); + +st.stop(); +})(); diff --git a/jstests/sharding/conn_pool_stats.js b/jstests/sharding/conn_pool_stats.js index a34710be3e504..9c72eac2c5855 100644 --- a/jstests/sharding/conn_pool_stats.js +++ b/jstests/sharding/conn_pool_stats.js @@ -1,7 +1,9 @@ /** * Tests for the connPoolStats command. * - * @tags: [requires_fcv_63, temporary_catalog_shard_incompatible] + * Incompatible because it makes assertions about the specific number of connections used, which + * don't account for background activity on a config server. + * @tags: [requires_fcv_63, config_shard_incompatible] */ load("jstests/libs/fail_point_util.js"); load("jstests/libs/conn_pool_helpers.js"); diff --git a/jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js b/jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js index c49b295d7bac7..d33c98c4f2967 100644 --- a/jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js +++ b/jstests/sharding/conversion_of_replica_set_to_sharded_cluster.js @@ -307,7 +307,7 @@ checkCRUDCommands(rst0.getPrimary().getDB(dbName)); checkDDLCommands(rst0.getPrimary().getDB(DDLDbName)); let st = new ShardingTest({ - shards: TestData.catalogShard ? 1 : 0, + shards: TestData.configShard ? 1 : 0, mongos: 1, }); diff --git a/jstests/sharding/convert_to_and_from_sharded.js b/jstests/sharding/convert_to_and_from_sharded.js index 12e5eccfe0065..6ef54a2e37a30 100644 --- a/jstests/sharding/convert_to_and_from_sharded.js +++ b/jstests/sharding/convert_to_and_from_sharded.js @@ -32,7 +32,7 @@ var checkBasicCRUD = function(coll) { assert.eq('marker', coll.findOne({_id: 'marker'})._id); }; -const numShards = TestData.catalogShard ? 1 : 0; +const numShards = TestData.configShard ? 1 : 0; var st = new ShardingTest({shards: numShards}); var replShard = new ReplSetTest({nodes: NUM_NODES}); diff --git a/jstests/sharding/coordinate_txn_commit_with_tickets_exhausted.js b/jstests/sharding/coordinate_txn_commit_with_tickets_exhausted.js index a2d16451495d5..a5681b54e1e09 100644 --- a/jstests/sharding/coordinate_txn_commit_with_tickets_exhausted.js +++ b/jstests/sharding/coordinate_txn_commit_with_tickets_exhausted.js @@ -26,7 +26,7 @@ const st = new ShardingTest({ rsOptions: { setParameter: { // This test requires a fixed ticket pool size. - storageEngineConcurrencyAdjustmentAlgorithm: "", + storageEngineConcurrencyAdjustmentAlgorithm: "fixedConcurrentTransactions", wiredTigerConcurrentWriteTransactions: kNumWriteTickets, // Raise maxTransactionLockRequestTimeoutMillis to prevent the transactions in prepare // conflict state from aborting early due to being unable to acquire a write ticket. diff --git a/jstests/sharding/count_config_servers.js b/jstests/sharding/count_config_servers.js index b127273b05e4c..540118d2dd65c 100644 --- a/jstests/sharding/count_config_servers.js +++ b/jstests/sharding/count_config_servers.js @@ -13,8 +13,7 @@ TestData.skipCheckShardFilteringMetadata = true; (function() { "use strict"; -var st = - new ShardingTest({name: 'sync_conn_cmd', shards: TestData.catalogShard ? 1 : 0, config: 3}); +var st = new ShardingTest({name: 'sync_conn_cmd', shards: TestData.configShard ? 1 : 0, config: 3}); st.s.setSecondaryOk(); var configDB = st.config; diff --git a/jstests/sharding/covered_shard_key_indexes.js b/jstests/sharding/covered_shard_key_indexes.js index a5bc09b45a92b..13bec77584c76 100644 --- a/jstests/sharding/covered_shard_key_indexes.js +++ b/jstests/sharding/covered_shard_key_indexes.js @@ -3,10 +3,7 @@ // particular queries // -load("jstests/libs/analyze_plan.js"); - -(function() { -'use strict'; +import {getChunkSkipsFromShard} from "jstests/libs/analyze_plan.js"; const st = new ShardingTest({shards: 1}); const coll = st.s0.getCollection("foo.bar"); @@ -148,4 +145,3 @@ assert.eq(0, explain.executionStats.executionStages.shards[0])); st.stop(); -})(); diff --git a/jstests/sharding/cursor1.js b/jstests/sharding/cursor1.js index 45af3d6f6dc2a..1be914ff117d4 100644 --- a/jstests/sharding/cursor1.js +++ b/jstests/sharding/cursor1.js @@ -14,12 +14,12 @@ s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}); db = s.getDB("test"); -primary = s.getPrimaryShard("test").getDB("test"); -secondary = s.getOther(primary).getDB("test"); +let primary = s.getPrimaryShard("test").getDB("test"); +let secondary = s.getOther(primary).getDB("test"); var numObjs = 30; var bulk = db.foo.initializeUnorderedBulkOp(); -for (i = 0; i < numObjs; i++) { +for (let i = 0; i < numObjs; i++) { bulk.insert({_id: i}); } assert.commandWorked(bulk.execute()); diff --git a/jstests/sharding/data_size_aware_balancing_sessions_collection.js b/jstests/sharding/data_size_aware_balancing_sessions_collection.js index 8f10308779f65..3958b36abfcf1 100644 --- a/jstests/sharding/data_size_aware_balancing_sessions_collection.js +++ b/jstests/sharding/data_size_aware_balancing_sessions_collection.js @@ -5,10 +5,6 @@ * resource_intensive, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); load("jstests/sharding/libs/find_chunks_util.js"); load('jstests/sharding/libs/remove_shard_util.js'); @@ -178,5 +174,4 @@ st.stopBalancer(); st.stop(); addedReplicaSets.forEach(rs => { rs.stopSet(); -}); -}()); +}); \ No newline at end of file diff --git a/jstests/sharding/database_versioning_all_commands.js b/jstests/sharding/database_versioning_all_commands.js index 7e031d1e99070..9297401b948f2 100644 --- a/jstests/sharding/database_versioning_all_commands.js +++ b/jstests/sharding/database_versioning_all_commands.js @@ -5,7 +5,6 @@ (function() { 'use strict'; -load("jstests/libs/feature_flag_util.js"); load('jstests/sharding/libs/last_lts_mongos_commands.js'); function getNewDbName(dbName) { @@ -118,13 +117,7 @@ function testCommandAfterMovePrimary(testCase, st, dbName, collName) { // After the movePrimary, both old and new primary shards should have cleared the dbVersion. assertMongosDatabaseVersion(st.s0, dbName, dbVersionBefore); assertShardDatabaseVersion(primaryShardBefore, dbName, {}); - // TODO (SERVER-71309): Remove once 7.0 becomes last LTS. - if (FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'), - "ResilientMovePrimary")) { - assertShardDatabaseVersion(primaryShardAfter, dbName, {}); - } else { - assertShardDatabaseVersion(primaryShardAfter, dbName, dbVersionBefore); - } + assertShardDatabaseVersion(primaryShardAfter, dbName, {}); // Run the test case's command. const res = st.s0.getDB(testCase.runsAgainstAdminDb ? "admin" : dbName).runCommand(command); @@ -152,13 +145,7 @@ function testCommandAfterMovePrimary(testCase, st, dbName, collName) { // 3. Both old and new primary shards should have cleared the dbVersion assertMongosDatabaseVersion(st.s0, dbName, dbVersionBefore); assertShardDatabaseVersion(primaryShardBefore, dbName, {}); - // TODO (SERVER-71309): Remove once 7.0 becomes last LTS. - if (FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'), - "ResilientMovePrimary")) { - assertShardDatabaseVersion(primaryShardAfter, dbName, {}); - } else { - assertShardDatabaseVersion(primaryShardAfter, dbName, dbVersionBefore); - } + assertShardDatabaseVersion(primaryShardAfter, dbName, {}); } if (testCase.cleanUp) { @@ -270,6 +257,8 @@ let testCases = { _isSelf: {skip: "executes locally on mongos (not sent to any remote node)"}, _killOperations: {skip: "executes locally on mongos (not sent to any remote node)"}, _mergeAuthzCollections: {skip: "always targets the config server"}, + _mongotConnPoolStats: {skip: "not on a user database", conditional: true}, + _dropConnectionsToMongot: {skip: "not on a user database", conditional: true}, abortReshardCollection: {skip: "always targets the config server"}, abortTransaction: {skip: "unversioned and uses special targetting rules"}, addShard: {skip: "not on a user database"}, @@ -316,9 +305,32 @@ let testCases = { balancerStatus: {skip: "not on a user database"}, balancerStop: {skip: "not on a user database"}, buildInfo: {skip: "executes locally on mongos (not sent to any remote node)"}, - bulkWrite: {skip: "not yet implemented"}, - checkMetadataConsistency: {skip: "not yet implemented"}, + bulkWrite: { + // TODO SERVER-52419: Run this test and remove the skip. + // run: { + // sendsDbVersion: true, + // runsAgainstAdminDb: true, + // command: function(dbName, collName) { + // return { + // bulkWrite: 1, + // ops: [{insert: 0, document: {_id: 1}}], + // nsInfo: [{ns: dbName + "." + collName}] + // }; + // }, + // } + skip: "requires feature flag" + }, + checkMetadataConsistency: { + run: { + sendsDbVersion: true, + runsAgainstAdminDb: false, + command: function(dbName, collName) { + return {checkMetadataConsistency: 1}; + } + } + }, cleanupReshardCollection: {skip: "always targets the config server"}, + cleanupStructuredEncryptionData: {skip: "requires encrypted collections"}, clearJumboFlag: {skip: "does not forward command to primary shard"}, clearLog: {skip: "executes locally on mongos (not sent to any remote node)"}, collMod: { @@ -373,6 +385,7 @@ let testCases = { } } }, + cpuload: {skip: "executes locally on mongos (not sent to any remote node)"}, create: { run: { sendsDbVersion: true, @@ -460,7 +473,7 @@ let testCases = { echo: {skip: "does not forward command to primary shard"}, enableSharding: {skip: "does not forward command to primary shard"}, endSessions: {skip: "goes through the cluster write path"}, - explain: {skip: "TODO SERVER-31226"}, + explain: {skip: "already tested by each CRUD command through the 'explain' field"}, features: {skip: "executes locally on mongos (not sent to any remote node)"}, filemd5: { run: { @@ -678,6 +691,7 @@ let testCases = { }, repairShardedCollectionChunksHistory: {skip: "always targets the config server"}, replSetGetStatus: {skip: "not supported in mongos"}, + resetPlacementHistory: {skip: "always targets the config server"}, reshardCollection: {skip: "does not forward command to primary shard"}, revokePrivilegesFromRole: {skip: "always targets the config server"}, revokeRolesFromRole: {skip: "always targets the config server"}, @@ -725,7 +739,7 @@ let testCases = { testRemoval: {skip: "executes locally on mongos (not sent to any remote node)"}, testVersion2: {skip: "executes locally on mongos (not sent to any remote node)"}, testVersions1And2: {skip: "executes locally on mongos (not sent to any remote node)"}, - transitionToCatalogShard: {skip: "not on a user database"}, + transitionFromDedicatedConfigServer: {skip: "not on a user database"}, transitionToDedicatedConfigServer: {skip: "not on a user database"}, update: { run: { diff --git a/jstests/sharding/ddl_commits_trigger_placement_op_entries.js b/jstests/sharding/ddl_commits_trigger_placement_op_entries.js new file mode 100644 index 0000000000000..943cd7adca01e --- /dev/null +++ b/jstests/sharding/ddl_commits_trigger_placement_op_entries.js @@ -0,0 +1,204 @@ +/** + * Verifies that successful commits of Sharding DDL operations generate the expected op entry types + * (following the format and rules defined in the design doc of PM-1939). + * + * @tags: [ + * does_not_support_stepdowns, + * requires_fcv_70, + * ] + */ +(function() { +load('jstests/libs/fail_point_util.js'); +load('jstests/libs/parallel_shell_helpers.js'); +load('jstests/libs/discover_topology.js'); + +const st = new ShardingTest({shards: 3, chunkSize: 1}); +const configDB = st.s.getDB('config'); + +function getExpectedOpEntriesOnNewDb(dbName, primaryShard, isImported = false) { + // The creation of a database is matched by the generation of two op entries: + return [ + // - One emitted before the metadata is committed on the sharding catalog + { + op: 'n', + ns: dbName, + o: {msg: {createDatabasePrepare: dbName}}, + o2: {createDatabasePrepare: dbName, primaryShard: primaryShard, isImported: isImported} + }, + // - The second one emitted once the metadata is committed on the sharding catalog + { + op: 'n', + ns: dbName, + o: {msg: {createDatabase: dbName}}, + o2: {createDatabase: dbName, isImported: isImported} + }, + ]; +} + +function verifyOpEntriesOnNodes(expectedOpEntryTemplates, nodes) { + const namespaces = [...new Set(expectedOpEntryTemplates.map(t => t.ns))]; + for (const node of nodes) { + const foundOpEntries = node.getCollection('local.oplog.rs') + .find({ns: {$in: namespaces}, op: {$in: ['c', 'n']}}) + .sort({ts: -1}) + .limit(expectedOpEntryTemplates.length) + .toArray() + .reverse(); + + assert.eq(expectedOpEntryTemplates.length, foundOpEntries.length); + for (let i = 0; i < foundOpEntries.length; ++i) { + assert.eq(expectedOpEntryTemplates[i].op, foundOpEntries[i].op); + assert.eq(expectedOpEntryTemplates[i].ns, foundOpEntries[i].ns); + assert.docEq(expectedOpEntryTemplates[i].o, foundOpEntries[i].o); + assert.docEq(expectedOpEntryTemplates[i].o2, foundOpEntries[i].o2); + } + } +} + +function testCreateDatabase(dbName = 'createDatabaseTestDB', primaryShardId = st.shard0.shardName) { + jsTest.log('test createDatabase'); + + assert.commandWorked(st.s.adminCommand({enableSharding: dbName, primaryShard: primaryShardId})); + + // Each shard of the cluster should have received the notifications about the database creation + // (and generated the related op entries). + const shardPrimaryNodes = Object.values(DiscoverTopology.findConnectedNodes(st.s).shards) + .map(s => new Mongo(s.primary)); + const expectedOpEntries = getExpectedOpEntriesOnNewDb(dbName, primaryShardId); + verifyOpEntriesOnNodes(expectedOpEntries, shardPrimaryNodes); +} + +function testShardCollection() { + jsTest.log('Testing placement entries added by shardCollection() (with implicit DB creation)'); + + const dbName = 'shardCollectionTestDB'; + const collName = 'coll'; + const nss = dbName + '.' + collName; + + // Run shardCollection, ensuring that each shard will host at least one of the chunks. + const topology = DiscoverTopology.findConnectedNodes(st.s); + const numInitialChunks = Object.values(topology.shards).length + 1; + assert.commandWorked(st.s.adminCommand( + {shardCollection: nss, key: {_id: "hashed"}, numInitialChunks: numInitialChunks})); + + // Verify that the op entries for the creation of the parent DB have been generated on each + // shard of the cluster. + const primaryShard = configDB.databases.findOne({_id: dbName}).primary; + const expectedEntriesForDbCreation = getExpectedOpEntriesOnNewDb(dbName, primaryShard); + const shardPrimaryNodes = Object.values(topology.shards).map(s => new Mongo(s.primary)); + verifyOpEntriesOnNodes(expectedEntriesForDbCreation, shardPrimaryNodes); + + // Verify that the op entries for the sharded collection have been generated by the primary + // shard. + const allShardNames = Object.keys(topology.shards); + const primaryShardPrimaryNode = new Mongo(topology.shards[primaryShard].primary); + + const expectedEntriesForCollSharded = [ + // One entry emitted before the metadata is committed on the sharding catalog + { + op: 'n', + ns: nss, + o: {msg: {shardCollectionPrepare: nss}}, + o2: { + shardCollectionPrepare: nss, + shards: allShardNames, + shardKey: {_id: 'hashed'}, + unique: false, + numInitialChunks: numInitialChunks, + presplitHashedZones: false + } + }, + // One entry emitted once the metadata is committed on the sharding catalog + { + op: 'n', + ns: nss, + o: {msg: {shardCollection: nss}}, + o2: { + shardCollection: nss, + shardKey: {_id: 'hashed'}, + unique: false, + numInitialChunks: numInitialChunks, + presplitHashedZones: false + } + } + ]; + + verifyOpEntriesOnNodes(expectedEntriesForCollSharded, [primaryShardPrimaryNode]); +} + +function testAddShard() { + jsTest.log('Test addShard'); + const shardPrimaryNodes = Object.values(DiscoverTopology.findConnectedNodes(st.s).shards) + .map(s => new Mongo(s.primary)); + + // Create a new replica set and populate it with two DBs + const newReplicaSet = new ReplSetTest({name: 'addedShard', nodes: 1}); + const newShardName = 'addedShard'; + const preExistingCollName = 'preExistingColl'; + newReplicaSet.startSet({shardsvr: ""}); + newReplicaSet.initiate(); + const dbsOnNewReplicaSet = ['addShardTestDB1', 'addShardTestDB2']; + for (const dbName of dbsOnNewReplicaSet) { + const db = newReplicaSet.getPrimary().getDB(dbName); + assert.commandWorked(db[preExistingCollName].save({value: 1})); + } + + // Add the new replica set as a shard + assert.commandWorked(st.s.adminCommand({addShard: newReplicaSet.getURL(), name: newShardName})); + + // Each already existing shard should contain the op entries for each database hosted by + // newReplicaSet (that have been added to the catalog as part of addShard). + for (let dbName of dbsOnNewReplicaSet) { + const expectedOpEntries = + getExpectedOpEntriesOnNewDb(dbName, newShardName, true /*isImported*/); + verifyOpEntriesOnNodes(expectedOpEntries, shardPrimaryNodes); + } + + // Execute the test case teardown + for (const dbName of dbsOnNewReplicaSet) { + assert.commandWorked(st.getDB(dbName).dropDatabase()); + } + let res = assert.commandWorked(st.s.adminCommand({removeShard: newShardName})); + assert.eq('started', res.state); + res = assert.commandWorked(st.s.adminCommand({removeShard: newShardName})); + assert.eq('completed', res.state); + newReplicaSet.stopSet(); +} + +function testMovePrimary() { + jsTest.log( + 'Testing placement entries added by movePrimary() over a new sharding-enabled DB with no data'); + + // Set the initial state + const dbName = 'movePrimaryTestDB'; + const fromPrimaryShard = st.shard0; + const fromReplicaSet = st.rs0; + testCreateDatabase(dbName, fromPrimaryShard.shardName); + + // Move the primary shard. + const toPrimaryShard = st.shard1; + assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: toPrimaryShard.shardName})); + + // Verify that the old shard generated the expected event. + const expectedEntriesForPrimaryMoved = [{ + op: 'n', + ns: dbName, + o: {msg: {movePrimary: dbName}}, + o2: {movePrimary: dbName, from: fromPrimaryShard.shardName, to: toPrimaryShard.shardName}, + }]; + + verifyOpEntriesOnNodes(expectedEntriesForPrimaryMoved, [fromReplicaSet.getPrimary()]); +} + +jsTest.log(`TROLL! ${tojson(DiscoverTopology.findConnectedNodes(st.s))}`); + +testCreateDatabase(); + +testShardCollection(); + +testAddShard(); + +testMovePrimary(); + +st.stop(); +}()); diff --git a/jstests/sharding/ddl_commits_with_two_phase_oplog_notification.js b/jstests/sharding/ddl_commits_with_two_phase_oplog_notification.js deleted file mode 100644 index 9502c94b312da..0000000000000 --- a/jstests/sharding/ddl_commits_with_two_phase_oplog_notification.js +++ /dev/null @@ -1,123 +0,0 @@ -/** - * Verifies that the successful commit of Sharding DDL operations implementing the "2-phase oplog" - * notification generate the expected op entries. - * @tags: [ - * does_not_support_stepdowns, - * requires_fcv_70, - * ] - */ -(function() { -load('jstests/libs/fail_point_util.js'); -load('jstests/libs/parallel_shell_helpers.js'); - -const kPrepareCommit = 0; -const kCommitSuccessful = 1; - -const st = new ShardingTest({shards: 2, chunkSize: 1}); - -function verifyOpEntriesForDatabaseOnRS(dbName, isImported, dbPrimaryShard, replicaSet) { - const primaryNodeOplog = replicaSet.getPrimary().getDB('local').oplog.rs; - - const generatedOpEntries = primaryNodeOplog.find({'o.msg.createDatabase': dbName}).toArray(); - assert.eq(2, generatedOpEntries.length); - - const prepareCommitEntry = generatedOpEntries[0]; - assert.eq(dbName, prepareCommitEntry.o2.createDatabase); - assert.eq(kPrepareCommit, prepareCommitEntry.o2.phase); - assert.eq(isImported, prepareCommitEntry.o2.isImported); - assert.eq(dbPrimaryShard, prepareCommitEntry.o2.primaryShard); - - const commitSuccessfulEntry = generatedOpEntries[1]; - assert.eq(dbName, commitSuccessfulEntry.o2.createDatabase); - assert.eq(kCommitSuccessful, commitSuccessfulEntry.o2.phase); - assert.eq(isImported, commitSuccessfulEntry.o2.isImported); - assert.eq(undefined, commitSuccessfulEntry.o2.primaryShard); -} - -function testCreateDatabase() { - jsTest.log('test createDatabase'); - const dbName = 'createDatabaseTestDB'; - const primaryShard = st.rs0; - const primaryShardId = st.shard0.shardName; - - // Execute enableSharding, injecting a stepdown of the config server between the write into the - // sharding catalog and the remote notification of the "commitSuccessful" event. The command is - // expected to eventually succeed. - let failpointHandle = - configureFailPoint(st.configRS.getPrimary(), 'hangBeforeNotifyingCreateDatabaseCommitted'); - - const joinDatabaseCreation = startParallelShell( - funWithArgs(function(dbName, primaryShardName) { - assert.commandWorked( - db.adminCommand({enableSharding: dbName, primaryShard: primaryShardName})); - }, dbName, primaryShardId), st.s.port); - - failpointHandle.wait(); - assert.commandWorked(st.configRS.getPrimary().adminCommand( - {replSetStepDown: 10 /* stepDownSecs */, force: true})); - failpointHandle.off(); - - // Allow enableSharding to finish. - joinDatabaseCreation(); - - // Despite the CSRS stepdown, the remote notification of each phase should have reached the - // primary shard of the newly created database. As a consequence of this, a single op entry for - // each phase should have been generated. - verifyOpEntriesForDatabaseOnRS(dbName, false /*isImported*/, primaryShardId, primaryShard); -} - -function testAddShard() { - jsTest.log('Test addShard'); - - // Create a new replica set and populate it with two DBs - const newReplicaSet = new ReplSetTest({name: 'addedShard', nodes: 1}); - const newShardName = 'addedShard'; - const preExistingCollName = 'preExistingColl'; - newReplicaSet.startSet({shardsvr: ""}); - newReplicaSet.initiate(); - const dbsOnNewReplicaSet = ['addShardTestDB1', 'addShardTestDB2']; - for (const dbName of dbsOnNewReplicaSet) { - const db = newReplicaSet.getPrimary().getDB(dbName); - assert.commandWorked(db[preExistingCollName].save({value: 1})); - } - - // Execute addShard, injecting a stepdown of the config server between the write into the - // sharding catalog and the remote notification of the "commitSuccessful" event. The command is - // expected to eventually succeed. - let failpointHandle = - configureFailPoint(st.configRS.getPrimary(), 'hangBeforeNotifyingaddShardCommitted'); - - const joinAddShard = startParallelShell( - funWithArgs(function(newShardUrl, newShardName) { - assert.commandWorked(db.adminCommand({addShard: newShardUrl, name: newShardName})); - }, newReplicaSet.getURL(), newShardName), st.s.port); - - failpointHandle.wait(); - assert.commandWorked(st.configRS.getPrimary().adminCommand( - {replSetStepDown: 10 /* stepDownSecs */, force: true})); - failpointHandle.off(); - - // Allow addShard to finish. - joinAddShard(); - - // Despite the CSRS stepdown, the remote notification of each phase should have reached each - // pre-existing shard of the cluster. As a consequence of this, each shard should contain 2 op - // entries for each database imported from the new RS as part of addShard. - for (let existingShard of [st.rs0, st.rs1]) { - for (let importedDB of dbsOnNewReplicaSet) { - verifyOpEntriesForDatabaseOnRS( - importedDB, true /*isImported*/, newShardName, existingShard); - } - } - - // Execute the test case teardown - st.s.adminCommand({removeShard: newShardName}); - newReplicaSet.stopSet(); -} - -testCreateDatabase(); - -testAddShard(); - -st.stop(); -}()); diff --git a/jstests/sharding/defragment_large_collection.js b/jstests/sharding/defragment_large_collection.js index beea51aafe508..ebca6e2403339 100644 --- a/jstests/sharding/defragment_large_collection.js +++ b/jstests/sharding/defragment_large_collection.js @@ -40,7 +40,7 @@ let runTest = function(numCollections, dbName) { for (let i = 0; i < numCollections; ++i) { const numChunks = Random.randInt(28) + 2; const numZones = Random.randInt(numChunks / 2); - const docSizeBytes = Random.randInt(1024 * 1024) + 50; + const docSizeBytesRange = [50, 1024 * 1024]; const coll = db[coll_prefix + i]; @@ -49,7 +49,7 @@ let runTest = function(numCollections, dbName) { numChunks, maxChunkFillMB, numZones, - docSizeBytes, + docSizeBytesRange, chunkSpacing, true); diff --git a/jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js b/jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js index 22dcf02724a94..0e077cbe50845 100644 --- a/jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js +++ b/jstests/sharding/delete_range_deletion_tasks_on_stepup_after_drop_collection.js @@ -41,7 +41,8 @@ const dbName = "test"; const collName = "foo"; const ns = dbName + "." + collName; -let st = new ShardingTest({shards: 2}); +let st = new ShardingTest( + {shards: 2, configOptions: {setParameter: {enableShardedIndexConsistencyCheck: false}}}); let testColl = st.s.getDB(dbName).getCollection(collName); assert.commandWorked(st.s.adminCommand({enableSharding: dbName})); @@ -67,9 +68,7 @@ moveChunkHangAtStep5FailPoint.wait(); donorReplSetTest.freeze(donorPrimary); moveChunkHangAtStep5FailPoint.off(); -if (!TestData.catalogShard) { - moveChunkThread.join(); -} +moveChunkThread.join(); metadataRefreshFailPoint.wait(); donorReplSetTest.unfreeze(donorPrimary); @@ -85,15 +84,6 @@ assert.eq(1, getNumRangeDeletionDocs(recipientShard, ns)); testColl.drop(); metadataRefreshFailPoint.off(); -if (TestData.catalogShard) { - // In catalog shard mode, the migration won't finish until after we finish migration recovery, - // which is blocked by the fail point until we disable it above. - // - // SERVER-74446: Investigate why this only happens in catalog shard mode and if its safe to - // ignore by changing the test. - moveChunkThread.join(); -} - jsTest.log("Wait for the recipient to delete the range deletion task doc"); assert.soon(() => { return 0 == getNumRangeDeletionDocs(recipientShard, ns); diff --git a/jstests/sharding/direct_shard_connection_auth.js b/jstests/sharding/direct_shard_connection_auth.js new file mode 100644 index 0000000000000..5af15205a530c --- /dev/null +++ b/jstests/sharding/direct_shard_connection_auth.js @@ -0,0 +1,94 @@ +/** + * Tests that direct shard connections are correctly allowed and disallowed using authentication. + * + * @tags: [featureFlagCheckForDirectShardOperations, requires_fcv_71] + */ +(function() { +'use strict'; + +// Create a new sharded cluster for testing and enable auth. +const st = new ShardingTest({name: jsTestName(), keyFile: "jstests/libs/key1", shards: 1}); + +const shardConn = st.rs0.getPrimary(); +const shardAdminDB = shardConn.getDB("admin"); +const shardAdminTestDB = shardConn.getDB("test"); +const userConn = new Mongo(st.shard0.host); +const userTestDB = userConn.getDB("test"); + +function getUnauthorizedDirectWritesCount() { + return assert.commandWorked(shardAdminDB.runCommand({serverStatus: 1})) + .shardingStatistics.unauthorizedDirectShardOps; +} + +// With only one shard, direct shard operations should be allowed. +jsTest.log("Running tests with only one shard."); +{ + // Direct writes to collections with root priviledge should be authorized. + shardAdminDB.createUser({user: "admin", pwd: 'x', roles: ["root"]}); + assert(shardAdminDB.auth("admin", 'x'), "Authentication failed"); + assert.commandWorked(shardAdminTestDB.getCollection("coll").insert({value: 1})); + assert.eq(getUnauthorizedDirectWritesCount(), 0); + + // Direct writes to collections with read/write priviledge should be authorized. + shardAdminTestDB.createUser({user: "user", pwd: "y", roles: ["readWrite"]}); + assert(userTestDB.auth("user", "y"), "Authentication failed"); + assert.commandWorked(userTestDB.getCollection("coll").insert({value: 2})); + assert.eq(getUnauthorizedDirectWritesCount(), 0); + + // Logging out and dropping users should be authorized. + userTestDB.logout(); + shardAdminTestDB.dropUser("user"); + assert.eq(getUnauthorizedDirectWritesCount(), 0); +} + +// Adding the second shard will trigger the check for direct shard ops. +var newShard = new ReplSetTest({name: "additionalShard", nodes: 1}); +newShard.startSet({keyFile: "jstests/libs/key1", shardsvr: ""}); +newShard.initiate(); +let mongosAdminUser = st.s.getDB('admin'); +if (!TestData.configShard) { + mongosAdminUser.createUser({user: "globalAdmin", pwd: 'a', roles: ["root"]}); + assert(mongosAdminUser.auth("globalAdmin", "a"), "Authentication failed"); +} else { + assert(mongosAdminUser.auth("admin", "x"), "Authentication failed"); +} +assert.commandWorked(mongosAdminUser.runCommand({addShard: newShard.getURL()})); + +jsTest.log("Running tests with two shards."); +{ + // Direct writes to collections with root priviledge (which includes directShardOperations) + // should be authorized. + assert.commandWorked(shardAdminTestDB.getCollection("coll").insert({value: 3})); + assert.eq(getUnauthorizedDirectWritesCount(), 0); + + // Direct writes to collections with read/write priviledge should not be authorized. + shardAdminTestDB.createUser({user: "user", pwd: "y", roles: ["readWrite"]}); + assert(userTestDB.auth("user", "y"), "Authentication failed"); + assert.commandWorked(userTestDB.getCollection("coll").insert({value: 4})); + assert.eq(getUnauthorizedDirectWritesCount(), 1); + userTestDB.logout(); + assert.eq(getUnauthorizedDirectWritesCount(), 1); + + // Direct writes with just read/write and direct operations should be authorized. + shardAdminDB.createUser( + {user: "user2", pwd: "z", roles: ["readWriteAnyDatabase", "directShardOperations"]}); + let shardUserWithDirectWritesAdminDB = userConn.getDB("admin"); + let shardUserWithDirectWritesTestDB = userConn.getDB("test"); + assert(shardUserWithDirectWritesAdminDB.auth("user2", "z"), "Authentication failed"); + assert.commandWorked(shardUserWithDirectWritesTestDB.getCollection("coll").insert({value: 5})); + assert.eq(getUnauthorizedDirectWritesCount(), 1); + + // Logout should always be authorized and drop user from admin should be authorized. + shardUserWithDirectWritesAdminDB.logout(); + shardAdminTestDB.dropUser("user"); + shardAdminTestDB.dropUser("user2"); + mongosAdminUser.logout(); + assert.eq(getUnauthorizedDirectWritesCount(), 1); + // shardAdminDB is used to check the direct writes count, so log it out last. + shardAdminDB.logout(); +} + +// Stop the sharding test before the additional shard to ensure the test hooks run successfully. +st.stop(); +newShard.stopSet(); +})(); diff --git a/jstests/sharding/documents_sharded.js b/jstests/sharding/documents_sharded.js index 85b20cd29c3e1..c5eabc7790921 100644 --- a/jstests/sharding/documents_sharded.js +++ b/jstests/sharding/documents_sharded.js @@ -157,4 +157,4 @@ assert.throwsWithCode(() => { }, ErrorCodes.InvalidNamespace); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/drop_collection.js b/jstests/sharding/drop_collection.js index 874b3d58e27ad..2eb78a1bf8067 100644 --- a/jstests/sharding/drop_collection.js +++ b/jstests/sharding/drop_collection.js @@ -107,10 +107,10 @@ jsTest.log("Drop sharded collection repeated."); } } -jsTest.log("Drop unexistent collections also remove tags."); +jsTest.log("Drop inexistent collections also remove tags."); { const db = getNewDb(); - const coll = db['unexistent']; + const coll = db['inexistent']; // Create the database assert.commandWorked(st.s.adminCommand({enableSharding: db.getName()})); // Add a zone diff --git a/jstests/sharding/drop_collection_if_uuid_not_matching.js b/jstests/sharding/drop_collection_if_uuid_not_matching.js index bf67017320131..0813d839c8ef8 100644 --- a/jstests/sharding/drop_collection_if_uuid_not_matching.js +++ b/jstests/sharding/drop_collection_if_uuid_not_matching.js @@ -1,6 +1,6 @@ /** - * Tests that the _shardsvrDropCollectionIfUUIDNotMatching and - * _shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern command works as expected: + * Tests that the _shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern command works as + * expected: * - Noop in case the collection doesn't exist. * - Drop collection if uuid different from the expected. * - Keep the collection if the uuid is exactly the expected one. @@ -50,12 +50,8 @@ function runTests(collName, commandName, writeConcern) { assert.neq(null, db.getCollection(collName).findOne({_id: 0})); } -// TODO SERVER-74324: deprecate _shardsvrDropCollectionIfUUIDNotMatching after 7.0 is lastLTS. -runTests("coll", "_shardsvrDropCollectionIfUUIDNotMatching", null); -if (!jsTestOptions().shardMixedBinVersions && jsTestOptions().mongosBinVersion == "latest") { - runTests("coll2", - "_shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern", - {writeConcern: {w: 'majority'}}); -} +runTests("coll2", + "_shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern", + {writeConcern: {w: 'majority'}}); st.stop(); diff --git a/jstests/sharding/enable_sharding.js b/jstests/sharding/enable_sharding.js index 91285a383eb65..ac66e7dbc7430 100644 --- a/jstests/sharding/enable_sharding.js +++ b/jstests/sharding/enable_sharding.js @@ -2,11 +2,6 @@ // Basic tests for enableSharding command. // -(function() { -'use strict'; - -load("jstests/libs/feature_flag_util.js"); // for FeatureFlagUtil.isEnabled - var st = new ShardingTest({shards: 2}); jsTest.log('enableSharding can run only against the admin database'); @@ -89,5 +84,4 @@ jsTest.log( ErrorCodes.NamespaceExists); } -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/enforce_zone_policy.js b/jstests/sharding/enforce_zone_policy.js index 8f881721ebd34..8b0416b667a76 100644 --- a/jstests/sharding/enforce_zone_policy.js +++ b/jstests/sharding/enforce_zone_policy.js @@ -85,9 +85,9 @@ assertBalanceCompleteAndStable(function() { }, 'chunks to zones a and b'); // Tag the entire collection to shard0 and wait for everything to move to that shard -st.removeTagRange(ns, {_id: -100}, {_id: 100}, 'a'); -st.removeTagRange(ns, {_id: MinKey}, {_id: -100}, 'b'); -st.removeTagRange(ns, {_id: 100}, {_id: MaxKey}, 'b'); +st.removeTagRange(ns, {_id: -100}, {_id: 100}); +st.removeTagRange(ns, {_id: MinKey}, {_id: -100}); +st.removeTagRange(ns, {_id: 100}, {_id: MaxKey}); st.removeShardTag(st.shard1.shardName, 'a'); st.removeShardTag(st.shard2.shardName, 'b'); @@ -96,13 +96,13 @@ st.addTagRange(ns, {_id: MinKey}, {_id: MaxKey}, 'a'); assertBalanceCompleteAndStable(function() { var counts = st.chunkCounts(collName); printjson(counts); - return counts[st.shard0.shardName] == 11 && counts[st.shard1.shardName] == 0 && - counts[st.shard2.shardName] == 0; + // All chunks must have been moved to shard 0, none left on shard 1 and 2 + return counts[st.shard1.shardName] == 0 && counts[st.shard2.shardName] == 0; }, 'all chunks to zone a'); // Remove all zones and ensure collection is correctly redistributed st.removeShardTag(st.shard0.shardName, 'a'); -st.removeTagRange(ns, {_id: MinKey}, {_id: MaxKey}, 'a'); +st.removeTagRange(ns, {_id: MinKey}, {_id: MaxKey}); assertBalanceCompleteAndStable(checkClusterEvenlyBalanced, 'final'); diff --git a/jstests/sharding/exhaust_hello_topology_changes.js b/jstests/sharding/exhaust_hello_topology_changes.js index 6e5647bc5d628..f1e4de8c2c927 100644 --- a/jstests/sharding/exhaust_hello_topology_changes.js +++ b/jstests/sharding/exhaust_hello_topology_changes.js @@ -1,12 +1,12 @@ /** - * Test to check that the RSM receives an isMaster reply "immediately" (or "quickly") after a RS + * Test to check that the RSM receives a hello reply "immediately" (or "quickly") after a RS * topology change when using the exhaust protocol. In order to test this, we'll set the * maxAwaitTimeMS to much higher than the default (5 mins). This will allow us to assert that the - * RSM receives the isMaster replies because of a topology change rather than maxAwaitTimeMS being + * RSM receives the hello replies because of a topology change rather than maxAwaitTimeMS being * hit. A replica set node should send a response to the mongos as soon as it processes a topology * change, so "immediately"/"quickly" can vary - we specify 5 seconds in this test ('timeoutMS'). * - * @tags: [requires_streamable_rsm, temporary_catalog_shard_incompatible] + * @tags: [requires_streamable_rsm] */ // This test shuts down a shard's node and because of this consistency checking @@ -29,7 +29,7 @@ let st = new ShardingTest({ shards: {rs0: {nodes: [{}, {}, {rsConfig: {priority: 0}}]}} }); -let timeoutMS = 5000; +let timeoutMS = 10000; let mongos = st.s; let rsPrimary = st.rs0.getPrimary(); let electableRsSecondary; diff --git a/jstests/sharding/hash_shard1.js b/jstests/sharding/hash_shard1.js index e69ff07f226c0..5bdfef5340956 100644 --- a/jstests/sharding/hash_shard1.js +++ b/jstests/sharding/hash_shard1.js @@ -24,7 +24,7 @@ s.printShardingStatus(); // insert stuff var numitems = 1000; -for (i = 0; i < numitems; i++) { +for (let i = 0; i < numitems; i++) { t.insert({a: i}); } // check they all got inserted diff --git a/jstests/sharding/implicit_create_collection_triggered_by_DDLs.js b/jstests/sharding/implicit_create_collection_triggered_by_DDLs.js new file mode 100644 index 0000000000000..46547779d2c28 --- /dev/null +++ b/jstests/sharding/implicit_create_collection_triggered_by_DDLs.js @@ -0,0 +1,54 @@ +(function() { +"use strict"; + +function shardKnowledgeIsShardedOrUnknown(shard, nss) { + let res = assert.commandWorked(shard.adminCommand({getShardVersion: nss, fullMetadata: true})); + return (typeof res.global == 'string' && res.global == 'UNKNOWN') || + (typeof res.metadata == 'object' && typeof res.metadata.collVersion != 'undefined'); +} + +const st = new ShardingTest({shards: 2, mongos: 1}); + +void function testOptimizedShardCollection() { + const dbName = 'testDB1'; + const collName = 'testColl1'; + + jsTest.log("Testing that implicit collection creation triggered by optimized " + + "shardCollection leaves all shards with the expected knowledge"); + + assert.commandWorked(st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.name})); + + assert.commandWorked( + st.s.adminCommand({shardCollection: `${dbName}.${collName}`, key: {_id: 'hashed'}})); + + assert(shardKnowledgeIsShardedOrUnknown(st.shard0, `${dbName}.${collName}`), + "Unexpected sharding state in Shard 0"); + assert(shardKnowledgeIsShardedOrUnknown(st.shard1, `${dbName}.${collName}`), + "Unexpected sharding state in Shard 1"); +}(); + +void function testmovePrimary() { + const dbName = 'testDB2'; + const collName = 'testColl2'; + + jsTest.log("Testing that implicit collection creation triggered by movePrimary " + + "leaves all shards with the expected knowledge"); + + assert.commandWorked(st.s.adminCommand({enableSharding: dbName, primaryShard: st.shard0.name})); + + assert.commandWorked( + st.s.adminCommand({shardCollection: `${dbName}.${collName}`, key: {_id: 1}})); + + assert.commandWorked(st.s.adminCommand({ + movePrimary: dbName, + to: st.shard1.name, + })); + + assert(shardKnowledgeIsShardedOrUnknown(st.shard0, `${dbName}.${collName}`), + "Unexpected sharding state in Shard 0"); + assert(shardKnowledgeIsShardedOrUnknown(st.shard1, `${dbName}.${collName}`), + "Unexpected sharding state in Shard 1"); +}(); + +st.stop(); +})(); \ No newline at end of file diff --git a/jstests/sharding/implicit_default_write_concern_add_shard.js b/jstests/sharding/implicit_default_write_concern_add_shard.js index 155cc9f7fa690..6cab41aca96a7 100644 --- a/jstests/sharding/implicit_default_write_concern_add_shard.js +++ b/jstests/sharding/implicit_default_write_concern_add_shard.js @@ -6,8 +6,10 @@ (function() { "use strict"; -// TODO SERVER-75820: Investigate why a shard node doesn't have metadata at test shutdown. -TestData.skipCheckShardFilteringMetadata = true; +// Adds a shard near the end of the test that won't have metadata for the sessions collection during +// test shutdown. This is only a problem with a config shard because otherwise there are no shards +// so the sessions collection can't be created. +TestData.skipCheckShardFilteringMetadata = TestData.configShard; load("jstests/replsets/rslib.js"); // For reconfig and isConfigCommitted. @@ -41,7 +43,7 @@ function testAddShard(CWWCSet, isPSASet, fixAddShard) { shardServer.initiate(); const st = new ShardingTest({ - shards: TestData.catalogShard ? 1 : 0, + shards: TestData.configShard ? 1 : 0, mongos: 1, }); var admin = st.getDB('admin'); diff --git a/jstests/sharding/index1.js b/jstests/sharding/index1.js index 019a110091998..5a080de83b5d1 100644 --- a/jstests/sharding/index1.js +++ b/jstests/sharding/index1.js @@ -26,7 +26,7 @@ for (var i = 0; i < 22; i++) { coll.createIndex({num: 1}, {unique: true}); coll.createIndex({x: 1}); - passed = false; + let passed = false; try { s.adminCommand({shardcollection: "" + coll, key: {x: 1}}); passed = true; @@ -52,7 +52,7 @@ for (var i = 0; i < 22; i++) { coll.createIndex({x: 1}); coll.createIndex({x: 1, num: 1}); - passed = false; + let passed = false; try { s.adminCommand({shardcollection: "" + coll, key: {x: 1}}); passed = true; @@ -141,7 +141,7 @@ for (var i = 0; i < 22; i++) { // No index exists - passed = false; + let passed = false; try { assert.eq(coll.find().itcount(), 0); s.adminCommand({shardcollection: "" + coll, key: {num: 1}, unique: true}); @@ -168,7 +168,7 @@ for (var i = 0; i < 22; i++) { coll.createIndex({num: 1}, {unique: true}); coll.createIndex({x: 1}); - passed = false; + let passed = false; try { s.adminCommand({shardcollection: "" + coll, key: {x: 1}}); passed = true; @@ -179,7 +179,7 @@ for (var i = 0; i < 22; i++) { } if (i == 10) { // try sharding non-empty collection without any index - passed = false; + let passed = false; try { s.adminCommand({shardcollection: "" + coll, key: {num: 1}}); passed = true; @@ -210,6 +210,7 @@ for (var i = 0; i < 22; i++) { // empty collection with useful index. check new index not created coll.createIndex({num: 1, x: 1}); + let passed = false; try { s.adminCommand({shardcollection: "" + coll, key: {num: 1}}); passed = true; @@ -228,7 +229,7 @@ for (var i = 0; i < 22; i++) { coll.save({num: 100, x: [2, 3]}); coll.createIndex({num: 1, x: 1}); - passed = false; + let passed = false; try { s.adminCommand({shardcollection: "" + coll, key: {num: 1}}); passed = true; @@ -241,7 +242,7 @@ for (var i = 0; i < 22; i++) { coll.save({num: [100, 200], x: 10}); coll.createIndex({num: 1, x: 1}); - passed = false; + let passed = false; try { s.adminCommand({shardcollection: "" + coll, key: {num: 1}}); passed = true; @@ -254,7 +255,7 @@ for (var i = 0; i < 22; i++) { coll.save({num: 100, x: 10, y: [1, 2]}); coll.createIndex({num: 1, x: 1, y: 1}); - passed = false; + let passed = false; try { s.adminCommand({shardcollection: "" + coll, key: {num: 1}}); passed = true; @@ -278,7 +279,7 @@ for (var i = 0; i < 22; i++) { // create hashed index, but try to declare it unique when sharding coll.createIndex({num: "hashed"}); - passed = false; + let passed = false; try { s.adminCommand({shardcollection: "" + coll, key: {num: "hashed"}, unique: true}); passed = true; @@ -292,7 +293,7 @@ for (var i = 0; i < 22; i++) { coll.createIndex({x: "hashed"}); coll.createIndex({num: 1}, {unique: true}); - passed = false; + let passed = false; try { s.adminCommand({shardcollection: "" + coll, key: {x: "hashed"}}); passed = true; @@ -317,7 +318,7 @@ for (var i = 0; i < 22; i++) { // Create sparse index. coll.createIndex({x: 1}, {sparse: true}); - passed = false; + let passed = false; try { s.adminCommand({shardcollection: "" + coll, key: {x: 1}}); passed = true; @@ -330,7 +331,7 @@ for (var i = 0; i < 22; i++) { // Create partial index. coll.createIndex({x: 1}, {filter: {num: {$gt: 1}}}); - passed = false; + let passed = false; try { s.adminCommand({shardcollection: "" + coll, key: {x: 1}}); passed = true; diff --git a/jstests/sharding/index_and_collection_option_propagation.js b/jstests/sharding/index_and_collection_option_propagation.js index 00b72b7a45cb0..6e525bfc2d978 100644 --- a/jstests/sharding/index_and_collection_option_propagation.js +++ b/jstests/sharding/index_and_collection_option_propagation.js @@ -7,8 +7,8 @@ * This test verifies this behavior. * * Shuts down shard0, which also shuts down the config server. Tests mongos targeting, which won't - * be affected by a catalog shard. - * @tags: [catalog_shard_incompatible] + * be affected by a config shard. + * @tags: [config_shard_incompatible] */ // This test shuts down a shard's node and because of this consistency checking @@ -26,7 +26,7 @@ function checkShardIndexes(indexKey, shardsWithIndex, shardsWithoutIndex) { return [res, false]; } assert.commandWorked(res); - for (index of res.cursor.firstBatch) { + for (let index of res.cursor.firstBatch) { if (index.key.hasOwnProperty(indexKey)) { return [res, true]; } @@ -34,15 +34,15 @@ function checkShardIndexes(indexKey, shardsWithIndex, shardsWithoutIndex) { return [res, false]; } - for (shard of shardsWithIndex) { - [listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard); + for (let shard of shardsWithIndex) { + let [listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard); assert(foundIndex, "expected to see index with key " + indexKey + " in listIndexes response from " + shard + ": " + tojson(listIndexesRes)); } - for (shard of shardsWithoutIndex) { - [listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard); + for (let shard of shardsWithoutIndex) { + let [listIndexesRes, foundIndex] = shardHasIndex(indexKey, shard); assert(!foundIndex, "expected not to see index with key " + indexKey + " in listIndexes response from " + shard + ": " + tojson(listIndexesRes)); @@ -65,18 +65,18 @@ function checkShardCollOption(optionKey, optionValue, shardsWithOption, shardsWi return [res, false]; } - for (shard of shardsWithOption) { - [listCollsRes, foundOption] = shardHasOption(optionKey, optionValue, shard); + for (let shard of shardsWithOption) { + let [listCollsRes, foundOption] = shardHasOption(optionKey, optionValue, shard); assert(foundOption, "expected to see option " + optionKey + " in listCollections response from " + shard + ": " + tojson(listCollsRes)); } - for (shard of shardsWithoutOption) { - [listOptionsRes, foundOption] = shardHasOption(optionKey, optionValue, shard); + for (let shard of shardsWithoutOption) { + let [listOptionsRes, foundOption] = shardHasOption(optionKey, optionValue, shard); assert(!foundOption, "expected not to see option " + optionKey + " in listCollections response from " + - shard + ": " + tojson(listCollsRes)); + shard + ": " + tojson(listOptionsRes)); } } diff --git a/jstests/sharding/ingress_handshake_and_auth_metrics_mongos.js b/jstests/sharding/ingress_handshake_and_auth_metrics_mongos.js index 2a06943ee998e..dda4ee68d3f16 100644 --- a/jstests/sharding/ingress_handshake_and_auth_metrics_mongos.js +++ b/jstests/sharding/ingress_handshake_and_auth_metrics_mongos.js @@ -9,7 +9,7 @@ load('jstests/libs/ingress_handshake_metrics_helpers.js'); let runTest = (connectionHealthLoggingOn) => { - let st = new ShardingTest({shards: TestData.catalogShard ? 1 : 0, other: {auth: ''}}); + let st = new ShardingTest({shards: TestData.configShard ? 1 : 0, other: {auth: ''}}); let conn = st.s; jsTestLog("Setting up users and test data."); diff --git a/jstests/sharding/internal_txns/end_sessions.js b/jstests/sharding/internal_txns/end_sessions.js index ff08aa34c43f7..abbd7eff27a65 100644 --- a/jstests/sharding/internal_txns/end_sessions.js +++ b/jstests/sharding/internal_txns/end_sessions.js @@ -12,15 +12,8 @@ // implicit sessions. TestData.disableImplicitSessions = true; -const st = new ShardingTest({ - shards: 1, - shardOptions: { - setParameter: { - TransactionRecordMinimumLifetimeMinutes: 0, - storeFindAndModifyImagesInSideCollection: true, - } - } -}); +const st = new ShardingTest( + {shards: 1, shardOptions: {setParameter: {TransactionRecordMinimumLifetimeMinutes: 0}}}); const shard0Rst = st.rs0; const shard0Primary = shard0Rst.getPrimary(); diff --git a/jstests/sharding/internal_txns/internal_client_restrictions.js b/jstests/sharding/internal_txns/internal_client_restrictions.js index fcb93eaf1e70d..4201e726c75c9 100644 --- a/jstests/sharding/internal_txns/internal_client_restrictions.js +++ b/jstests/sharding/internal_txns/internal_client_restrictions.js @@ -90,8 +90,8 @@ jsTestLog("Verify internal session and txnRetryCounter require internal privileg // Auth as a user with enough privileges to read from any collection, but not to identify as an // internal client. const mongosDB = st.s.getDB("admin"); -if (!TestData.catalogShard) { - // In catalog shard mode, the user made on the shard above is also a cluster global user. +if (!TestData.configShard) { + // In config shard mode, the user made on the shard above is also a cluster global user. mongosDB.createUser({user: "admin", pwd: "password", roles: jsTest.adminUserRoles}); } assert(mongosDB.auth("admin", "password")); diff --git a/jstests/sharding/internal_txns/kill_sessions.js b/jstests/sharding/internal_txns/kill_sessions.js index 1240a5735108b..6b36ef37c3109 100644 --- a/jstests/sharding/internal_txns/kill_sessions.js +++ b/jstests/sharding/internal_txns/kill_sessions.js @@ -1,7 +1,7 @@ /* * Tests running killSessions to kill internal sessions on both mongos and mongod. * - * @tags: [requires_fcv_70, uses_transactions] + * @tags: [requires_fcv_60, uses_transactions] */ (function() { 'use strict'; @@ -15,8 +15,8 @@ const st = new ShardingTest({ {maxSessions: 1, 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"} }, // The config server uses a session for internal operations, so raise the limit by 1 for a - // catalog shard. - shardOptions: {setParameter: {maxSessions: TestData.catalogShard ? 2 : 1}} + // config shard. + shardOptions: {setParameter: {maxSessions: TestData.configShard ? 2 : 1}} }); const shard0Primary = st.rs0.getPrimary(); diff --git a/jstests/sharding/internal_txns/libs/chunk_migration_test.js b/jstests/sharding/internal_txns/libs/chunk_migration_test.js index 0210c547a2681..5042da354b4f8 100644 --- a/jstests/sharding/internal_txns/libs/chunk_migration_test.js +++ b/jstests/sharding/internal_txns/libs/chunk_migration_test.js @@ -17,21 +17,15 @@ load('jstests/libs/chunk_manipulation_util.js'); load('jstests/sharding/internal_txns/libs/fixture_helpers.js'); load('jstests/sharding/libs/sharded_transactions_helpers.js'); -function InternalTransactionChunkMigrationTest(storeFindAndModifyImagesInSideCollection = true) { - jsTest.log(`Running chunk migration test with options ${ - tojson({storeFindAndModifyImagesInSideCollection})}`); +function InternalTransactionChunkMigrationTest() { + jsTest.log(`Running chunk migration test`); let st = new ShardingTest({ mongos: 1, shards: 3, rs: {nodes: 2}, - rsOptions: { - oplogSize: 256, - setParameter: { - storeFindAndModifyImagesInSideCollection: storeFindAndModifyImagesInSideCollection, - maxNumberOfTransactionOperationsInSingleOplogEntry: 1 - } - } + rsOptions: + {oplogSize: 256, setParameter: {maxNumberOfTransactionOperationsInSingleOplogEntry: 1}} }); let staticMongod = MongoRunner.runMongod({}); diff --git a/jstests/sharding/internal_txns/libs/resharding_test.js b/jstests/sharding/internal_txns/libs/resharding_test.js index 42b32ea2f9d9e..577c04954fcf3 100644 --- a/jstests/sharding/internal_txns/libs/resharding_test.js +++ b/jstests/sharding/internal_txns/libs/resharding_test.js @@ -18,16 +18,13 @@ load('jstests/sharding/internal_txns/libs/fixture_helpers.js'); load("jstests/sharding/libs/resharding_test_fixture.js"); load('jstests/sharding/libs/sharded_transactions_helpers.js'); -function InternalTransactionReshardingTest( - {reshardInPlace, storeFindAndModifyImagesInSideCollection}) { - jsTest.log(`Running resharding test with options ${ - tojson({reshardInPlace, storeFindAndModifyImagesInSideCollection})}`); +function InternalTransactionReshardingTest({reshardInPlace}) { + jsTest.log(`Running resharding test with options ${tojson({reshardInPlace})}`); const reshardingTest = new ReshardingTest({ numDonors: 2, numRecipients: 1, reshardInPlace, - storeFindAndModifyImagesInSideCollection, oplogSize: 256, maxNumberOfTransactionOperationsInSingleOplogEntry: 1 }); diff --git a/jstests/sharding/internal_txns/libs/retryable_internal_transaction_test.js b/jstests/sharding/internal_txns/libs/retryable_internal_transaction_test.js index 06749b81111e2..fbf10dd330bf9 100644 --- a/jstests/sharding/internal_txns/libs/retryable_internal_transaction_test.js +++ b/jstests/sharding/internal_txns/libs/retryable_internal_transaction_test.js @@ -125,15 +125,10 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { assert.commandWorked(mongosTestDB.adminCommand(commitCmdObj)); } - function testNonRetryableBasic(cmdObj, { - txnOptions, - testMode, - expectFindAndModifyImageInOplog, - expectFindAndModifyImageInSideCollection - }) { + function testNonRetryableBasic( + cmdObj, {txnOptions, testMode, expectFindAndModifyImageInSideCollection}) { // A findAndModify write statement in a non-retryable transaction will not generate a // pre/post image. - assert(!expectFindAndModifyImageInOplog); assert(!expectFindAndModifyImageInSideCollection); jsTest.log("Testing retrying a non-retryable internal transaction"); cmdObj.startTransaction = true; @@ -150,7 +145,7 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { const initialTxnStateBefore = getTransactionState(initialLsid, initialTxnNumber); assert.eq(initialTxnStateBefore.oplogEntries.length, - (txnOptions.isPreparedTxn ? 2 : 1) + (expectFindAndModifyImageInOplog ? 1 : 0), + (txnOptions.isPreparedTxn ? 2 : 1), initialTxnStateBefore.oplogEntries); assert.eq(initialTxnStateBefore.imageEntries.length, expectFindAndModifyImageInSideCollection ? 1 : 0, @@ -171,14 +166,9 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { assert.commandWorked(mongosTestColl.remove({})); } - function testRetryableBasic(cmdObj, { - txnOptions, - testMode, - expectFindAndModifyImageInOplog, - expectFindAndModifyImageInSideCollection, - checkRetryResponseFunc - }) { - assert(!expectFindAndModifyImageInOplog || !expectFindAndModifyImageInSideCollection); + function testRetryableBasic( + cmdObj, + {txnOptions, testMode, expectFindAndModifyImageInSideCollection, checkRetryResponseFunc}) { jsTest.log( "Testing retrying a retryable internal transaction with one applyOps oplog entry"); cmdObj.startTransaction = true; @@ -196,7 +186,7 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { const initialTxnStateBefore = getTransactionState(initialLsid, initialTxnNumber); assert.eq(initialTxnStateBefore.oplogEntries.length, - (txnOptions.isPreparedTxn ? 2 : 1) + (expectFindAndModifyImageInOplog ? 1 : 0), + (txnOptions.isPreparedTxn ? 2 : 1), initialTxnStateBefore.oplogEntries); assert.eq(initialTxnStateBefore.imageEntries.length, expectFindAndModifyImageInSideCollection ? 1 : 0, @@ -236,15 +226,9 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { assert.commandWorked(mongosTestColl.remove({})); } - function testRetryableLargeTxn(cmdObj, { - txnOptions, - testMode, - expectFindAndModifyImageInOplog, - expectFindAndModifyImageInSideCollection, - checkRetryResponseFunc - }) { - assert(!expectFindAndModifyImageInOplog || !expectFindAndModifyImageInSideCollection); - + function testRetryableLargeTxn( + cmdObj, + {txnOptions, testMode, expectFindAndModifyImageInSideCollection, checkRetryResponseFunc}) { jsTest.log( "Testing retrying a retryable internal transaction with more than one applyOps oplog entry"); @@ -315,8 +299,7 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { const expectedOplogLength = Math.floor(stmtId / maxNumberOfTransactionOperationsInSingleOplogEntry); assert.eq(initialTxnStateBefore.oplogEntries.length, - (txnOptions.isPreparedTxn ? expectedOplogLength + 1 : expectedOplogLength) + - (expectFindAndModifyImageInOplog ? 1 : 0)); + (txnOptions.isPreparedTxn ? expectedOplogLength + 1 : expectedOplogLength)); assert.eq(initialTxnStateBefore.imageEntries.length, expectFindAndModifyImageInSideCollection ? 1 : 0, initialTxnStateBefore.imageEntries); @@ -365,7 +348,6 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { txnOptions, testMode, expectRetryToSucceed, - expectFindAndModifyImageInOplog, expectFindAndModifyImageInSideCollection, checkRetryResponseFunc }) { @@ -383,7 +365,6 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { txnOptions, testMode, expectRetryToSucceed, - expectFindAndModifyImageInOplog, expectFindAndModifyImageInSideCollection, checkRetryResponseFunc }); @@ -452,15 +433,9 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { function testRetryFindAndModify(findAndModifyCmdObj, { txnOptions, testMode, - enableFindAndModifyImageCollection, expectRetryToSucceed, expectFindAndModifyImage, }) { - const shard0Primary = st.rs0.getPrimary(); - assert.commandWorked(shard0Primary.adminCommand({ - setParameter: 1, - storeFindAndModifyImagesInSideCollection: enableFindAndModifyImageCollection - })); const checkRetryResponseFunc = (initialRes, retryRes) => { assert.eq(initialRes.lastErrorObject, retryRes.lastErrorObject); assert.eq(initialRes.value, retryRes.value); @@ -470,10 +445,8 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { txnOptions, testMode, expectRetryToSucceed, - expectFindAndModifyImageInOplog: expectRetryToSucceed && expectFindAndModifyImage && - !enableFindAndModifyImageCollection, - expectFindAndModifyImageInSideCollection: expectRetryToSucceed && - expectFindAndModifyImage && enableFindAndModifyImageCollection, + expectFindAndModifyImageInSideCollection: + expectRetryToSucceed && expectFindAndModifyImage, checkRetryResponseFunc }); } @@ -481,12 +454,9 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { function testRetryFindAndModifyUpsert({ txnOptions, testMode, - enableFindAndModifyImageCollection, expectRetryToSucceed, }) { - jsTest.log( - "Testing findAndModify upsert (i.e. no preImage or postImage) with enableFindAndModifyImageCollection: " + - enableFindAndModifyImageCollection); + jsTest.log("Testing findAndModify upsert (i.e. no preImage or postImage)"); const findAndModifyCmdObj = { findAndModify: kCollName, @@ -498,7 +468,6 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { testRetryFindAndModify(findAndModifyCmdObj, { txnOptions, testMode, - enableFindAndModifyImageCollection, expectFindAndModifyImage, expectRetryToSucceed, }); @@ -507,12 +476,9 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { function testRetryFindAndModifyUpdateWithPreImage({ txnOptions, testMode, - enableFindAndModifyImageCollection, expectRetryToSucceed, }) { - jsTest.log( - "Testing findAndModify update with preImage with enableFindAndModifyImageCollection: " + - enableFindAndModifyImageCollection); + jsTest.log("Testing findAndModify update with preImage"); assert.commandWorked(mongosTestColl.insert([{_id: -1, x: -1}])); const findAndModifyCmdObj = { @@ -524,7 +490,6 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { testRetryFindAndModify(findAndModifyCmdObj, { txnOptions, testMode, - enableFindAndModifyImageCollection, expectFindAndModifyImage, expectRetryToSucceed, }); @@ -533,12 +498,9 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { function testRetryFindAndModifyUpdateWithPostImage({ txnOptions, testMode, - enableFindAndModifyImageCollection, expectRetryToSucceed, }) { - jsTest.log( - "Testing findAndModify update with postImage with enableFindAndModifyImageCollection: " + - enableFindAndModifyImageCollection); + jsTest.log("Testing findAndModify update with postImage"); assert.commandWorked(mongosTestColl.insert([{_id: -1, x: -1}])); const findAndModifyCmdObj = { @@ -551,7 +513,6 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { testRetryFindAndModify(findAndModifyCmdObj, { txnOptions, testMode, - enableFindAndModifyImageCollection, expectFindAndModifyImage, expectRetryToSucceed, }); @@ -560,12 +521,9 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { function testRetryFindAndModifyRemove({ txnOptions, testMode, - enableFindAndModifyImageCollection, expectRetryToSucceed, }) { - jsTest.log( - "Testing findAndModify remove (i.e. with preImage) with enableFindAndModifyImageCollection: " + - enableFindAndModifyImageCollection); + jsTest.log("Testing findAndModify remove (i.e. with preImage)"); assert.commandWorked(mongosTestColl.insert([{_id: -1, x: -1}])); const findAndModifyCmdObj = { @@ -577,7 +535,6 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { testRetryFindAndModify(findAndModifyCmdObj, { txnOptions, testMode, - enableFindAndModifyImageCollection, expectFindAndModifyImage, expectRetryToSucceed, }); @@ -609,7 +566,6 @@ function RetryableInternalTransactionTest(collectionOptions = {}) { } this.runFindAndModifyTestsEnableImageCollection = function(testOptions) { - testOptions.enableFindAndModifyImageCollection = true; runFindAndModifyTests(testOptions); }; diff --git a/jstests/sharding/internal_txns/partial_index.js b/jstests/sharding/internal_txns/partial_index.js index d77c20c73c5aa..802c11b72ee36 100644 --- a/jstests/sharding/internal_txns/partial_index.js +++ b/jstests/sharding/internal_txns/partial_index.js @@ -4,10 +4,7 @@ * * @tags: [requires_fcv_60, uses_transactions] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getWinningPlan} from "jstests/libs/analyze_plan.js"; const kDbName = "testDb"; const kCollName = "testColl"; @@ -93,7 +90,7 @@ function runTest(st, alwaysCreateFeatureFlagEnabled) { }); } - if (TestData.catalogShard) { + if (TestData.configShard) { // A config server does internal txns, clear the transaction table to make sure it's // empty before dropping the index, otherwise it can't be recreated automatically. @@ -345,5 +342,4 @@ function runTest(st, alwaysCreateFeatureFlagEnabled) { runTest(featureFlagSt, true /* alwaysCreateFeatureFlagEnabled */); featureFlagSt.stop(); -} -})(); +} \ No newline at end of file diff --git a/jstests/sharding/internal_txns/retryable_findAndModify_before_migration_oplog.js b/jstests/sharding/internal_txns/retryable_findAndModify_before_migration_oplog.js deleted file mode 100644 index 30fd67520dd7b..0000000000000 --- a/jstests/sharding/internal_txns/retryable_findAndModify_before_migration_oplog.js +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Tests that retryable findAndModify statements that are executed with the image collection - * disabled inside internal transactions that start and commit on the donor before a chunk migration - * are retryable on the recipient after the migration. - * - * @tags: [requires_fcv_60, uses_transactions, requires_persistence, exclude_from_large_txns] - */ -(function() { -"use strict"; - -load("jstests/sharding/internal_txns/libs/chunk_migration_test.js"); - -const transactionTest = - new InternalTransactionChunkMigrationTest(false /* storeFindAndModifyImagesInSideCollection */); -transactionTest.runTestForFindAndModifyBeforeChunkMigration( - transactionTest.InternalTxnType.kRetryable, false /* abortOnInitialTry */); -transactionTest.stop(); -})(); diff --git a/jstests/sharding/internal_txns/retryable_findAndModify_before_migration_side_coll.js b/jstests/sharding/internal_txns/retryable_findAndModify_before_migration_side_coll.js index 83093f2ff8867..5859687d67d2c 100644 --- a/jstests/sharding/internal_txns/retryable_findAndModify_before_migration_side_coll.js +++ b/jstests/sharding/internal_txns/retryable_findAndModify_before_migration_side_coll.js @@ -10,8 +10,7 @@ load("jstests/sharding/internal_txns/libs/chunk_migration_test.js"); -const transactionTest = - new InternalTransactionChunkMigrationTest(true /* storeFindAndModifyImagesInSideCollection */); +const transactionTest = new InternalTransactionChunkMigrationTest(); transactionTest.runTestForFindAndModifyBeforeChunkMigration( transactionTest.InternalTxnType.kRetryable, false /* abortOnInitialTry */); transactionTest.stop(); diff --git a/jstests/sharding/internal_txns/retryable_findAndModify_commit_and_abort_prepared_txns_after_failover_and_restart.js b/jstests/sharding/internal_txns/retryable_findAndModify_commit_and_abort_prepared_txns_after_failover_and_restart.js index bfade15b54aed..dfd540f929243 100644 --- a/jstests/sharding/internal_txns/retryable_findAndModify_commit_and_abort_prepared_txns_after_failover_and_restart.js +++ b/jstests/sharding/internal_txns/retryable_findAndModify_commit_and_abort_prepared_txns_after_failover_and_restart.js @@ -19,11 +19,7 @@ TestData.skipCheckDBHashes = true; load("jstests/replsets/rslib.js"); load("jstests/sharding/libs/sharded_transactions_helpers.js"); -function runTest(st, stepDownShard0PrimaryFunc, testOpts = { - runFindAndModifyWithPreOrPostImage, - abortTxnAfterFailover, - enableFindAndModifyImageCollection -}) { +function runTest(st, stepDownShard0PrimaryFunc, testOpts) { jsTest.log("Testing with options " + tojson(testOpts)); const sessionUUID = UUID(); @@ -38,11 +34,6 @@ function runTest(st, stepDownShard0PrimaryFunc, testOpts = { let testDB = st.rs0.getPrimary().getDB(kDbName); let testColl = testDB.getCollection(kCollName); - assert.commandWorked(testDB.adminCommand({ - setParameter: 1, - storeFindAndModifyImagesInSideCollection: testOpts.enableFindAndModifyImageCollection - })); - assert.commandWorked(testDB.createCollection(kCollName)); if (testOpts.runFindAndModifyWithPreOrPostImage) { assert.commandWorked(testColl.insert({_id: 0, x: 0})); diff --git a/jstests/sharding/internal_txns/retryable_findAndModify_committed_during_resharding_oplog.js b/jstests/sharding/internal_txns/retryable_findAndModify_committed_during_resharding_oplog.js deleted file mode 100644 index 27cd291ac6dfa..0000000000000 --- a/jstests/sharding/internal_txns/retryable_findAndModify_committed_during_resharding_oplog.js +++ /dev/null @@ -1,31 +0,0 @@ -/** - * Tests that retryable findAndModify statements that are executed with image collection disabled - * inside internal transactions that start and commit the donor(s) during resharding are retryable - * on the recipient after resharding. - * - * @tags: [requires_fcv_60, uses_transactions, requires_persistence, exclude_from_large_txns] - */ -(function() { -"use strict"; - -load("jstests/sharding/internal_txns/libs/resharding_test.js"); - -const storeFindAndModifyImagesInSideCollection = false; -const abortOnInitialTry = false; - -{ - const transactionTest = new InternalTransactionReshardingTest( - {reshardInPlace: false, storeFindAndModifyImagesInSideCollection}); - transactionTest.runTestForFindAndModifyDuringResharding( - transactionTest.InternalTxnType.kRetryable, abortOnInitialTry); - transactionTest.stop(); -} - -{ - const transactionTest = new InternalTransactionReshardingTest( - {reshardInPlace: true, storeFindAndModifyImagesInSideCollection}); - transactionTest.runTestForFindAndModifyDuringResharding( - transactionTest.InternalTxnType.kRetryable, abortOnInitialTry); - transactionTest.stop(); -} -})(); diff --git a/jstests/sharding/internal_txns/retryable_findAndModify_committed_during_resharding_side_coll.js b/jstests/sharding/internal_txns/retryable_findAndModify_committed_during_resharding_side_coll.js index 135c25b8163ff..d5aeceb153f47 100644 --- a/jstests/sharding/internal_txns/retryable_findAndModify_committed_during_resharding_side_coll.js +++ b/jstests/sharding/internal_txns/retryable_findAndModify_committed_during_resharding_side_coll.js @@ -10,20 +10,17 @@ load("jstests/sharding/internal_txns/libs/resharding_test.js"); -const storeFindAndModifyImagesInSideCollection = true; const abortOnInitialTry = false; { - const transactionTest = new InternalTransactionReshardingTest( - {reshardInPlace: false, storeFindAndModifyImagesInSideCollection}); + const transactionTest = new InternalTransactionReshardingTest({reshardInPlace: false}); transactionTest.runTestForFindAndModifyDuringResharding( transactionTest.InternalTxnType.kRetryable, abortOnInitialTry); transactionTest.stop(); } { - const transactionTest = new InternalTransactionReshardingTest( - {reshardInPlace: true, storeFindAndModifyImagesInSideCollection}); + const transactionTest = new InternalTransactionReshardingTest({reshardInPlace: true}); transactionTest.runTestForFindAndModifyDuringResharding( transactionTest.InternalTxnType.kRetryable, abortOnInitialTry); transactionTest.stop(); diff --git a/jstests/sharding/internal_txns/retryable_findAndModify_during_migration_oplog.js b/jstests/sharding/internal_txns/retryable_findAndModify_during_migration_oplog.js deleted file mode 100644 index a233335c1482f..0000000000000 --- a/jstests/sharding/internal_txns/retryable_findAndModify_during_migration_oplog.js +++ /dev/null @@ -1,18 +0,0 @@ -/** - * Tests that retryable findAndModify statements that are executed with the image collection - * disabled inside internal transactions that start and commit on the donor during a chunk migration - * are retryable on the recipient after the migration. - * - * @tags: [requires_fcv_60, uses_transactions, requires_persistence, exclude_from_large_txns] - */ -(function() { -"use strict"; - -load("jstests/sharding/internal_txns/libs/chunk_migration_test.js"); - -const transactionTest = - new InternalTransactionChunkMigrationTest(false /* storeFindAndModifyImagesInSideCollection */); -transactionTest.runTestForFindAndModifyDuringChunkMigration( - transactionTest.InternalTxnType.kRetryable, false /* abortOnInitialTry */); -transactionTest.stop(); -})(); diff --git a/jstests/sharding/internal_txns/retryable_findAndModify_during_migration_side_coll.js b/jstests/sharding/internal_txns/retryable_findAndModify_during_migration_side_coll.js index 5f27e4c5cbd0d..7e516712a873a 100644 --- a/jstests/sharding/internal_txns/retryable_findAndModify_during_migration_side_coll.js +++ b/jstests/sharding/internal_txns/retryable_findAndModify_during_migration_side_coll.js @@ -10,8 +10,7 @@ load("jstests/sharding/internal_txns/libs/chunk_migration_test.js"); -const transactionTest = - new InternalTransactionChunkMigrationTest(true /* storeFindAndModifyImagesInSideCollection */); +const transactionTest = new InternalTransactionChunkMigrationTest(); transactionTest.runTestForFindAndModifyDuringChunkMigration( transactionTest.InternalTxnType.kRetryable, false /* abortOnInitialTry */); transactionTest.stop(); diff --git a/jstests/sharding/internal_txns/retryable_writes_retry_conflict.js b/jstests/sharding/internal_txns/retryable_writes_retry_conflict.js index 922e729b71826..9af7ec50c4c3e 100644 --- a/jstests/sharding/internal_txns/retryable_writes_retry_conflict.js +++ b/jstests/sharding/internal_txns/retryable_writes_retry_conflict.js @@ -50,11 +50,7 @@ let currentParentTxnNumber = 35; * blocked until the transaction commits or aborts and does not cause the write statement to execute * more than once. */ -function testBlockingRetry(retryFunc, testOpts = { - prepareBeforeRetry, - abortAfterBlockingRetry, - stepDownPrimaryAfterBlockingRetry -}) { +function testBlockingRetry(retryFunc, testOpts) { jsTest.log("Test blocking retry with test options " + tojson(testOpts)); const parentTxnNumber = currentParentTxnNumber++; const docToInsert = {x: 1}; diff --git a/jstests/sharding/internal_txns/sessions.js b/jstests/sharding/internal_txns/sessions.js index 538d9ffa58b96..5e4d185be1057 100644 --- a/jstests/sharding/internal_txns/sessions.js +++ b/jstests/sharding/internal_txns/sessions.js @@ -1,7 +1,7 @@ /* * Tests basic support for internal sessions. * - * @tags: [requires_fcv_70, uses_transactions] + * @tags: [requires_fcv_60, uses_transactions] */ (function() { 'use strict'; @@ -15,8 +15,8 @@ const st = new ShardingTest({ {maxSessions: 1, 'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"} }, // The config server uses a session for internal operations, so raise the limit by 1 for a - // catalog shard. - shardOptions: {setParameter: {maxSessions: TestData.catalogShard ? 2 : 1}} + // config shard. + shardOptions: {setParameter: {maxSessions: TestData.configShard ? 2 : 1}} }); const shard0Primary = st.rs0.getPrimary(); @@ -97,7 +97,7 @@ const kConfigSessionNs = "config.system.sessions"; // document). const sessionUUID = UUID(); - if (TestData.catalogShard) { + if (TestData.configShard) { // Create the collection first separately, otherwise the session will be used for the // transaction that creates the collection, leading to one extra transaction document. assert.commandWorked(testDB.createCollection(kCollName)); diff --git a/jstests/sharding/invalid_shard_identity_doc.js b/jstests/sharding/invalid_shard_identity_doc.js index 0d0a14cec27a3..0b4fd39501511 100644 --- a/jstests/sharding/invalid_shard_identity_doc.js +++ b/jstests/sharding/invalid_shard_identity_doc.js @@ -4,14 +4,13 @@ * * @tags: [ * requires_fcv_70, - * featureFlagCatalogShard, * featureFlagTransitionToCatalogShard, * ] */ (function() { "use strict"; -const st = new ShardingTest({shards: 2, catalogShard: true}); +const st = new ShardingTest({shards: 2, configShard: true}); const rs = new ReplSetTest({name: "new-shard-rs", nodes: 1, nodeOptions: {shardsvr: ""}}); rs.startSet(); diff --git a/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js b/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js index feebe8ccc67b4..f560b28cc2d98 100644 --- a/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js +++ b/jstests/sharding/invalidate_plan_cache_entries_when_collection_generation_changes.js @@ -5,17 +5,13 @@ * @tags: [ * # The SBE plan cache was enabled by default in 6.3. * requires_fcv_63, - * temporary_catalog_shard_incompatible, * ] */ +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; // Cannot run the filtering metadata check on tests that run refineCollectionShardKey. TestData.skipCheckShardFilteringMetadata = true; -(function() { -'use strict'; -load("jstests/libs/sbe_util.js"); - const criticalSectionTimeoutMS = 24 * 60 * 60 * 1000; // 1 day const st = new ShardingTest({ mongos: 1, @@ -37,7 +33,7 @@ const collB = db["collB"]; if (!checkSBEEnabled(db)) { jsTestLog("********** Skip the test because SBE is disabled **********"); st.stop(); - return; + quit(); } function assertPlanCacheSizeForColl(nss, expectedEntriesCount) { @@ -97,7 +93,14 @@ assert.commandWorked(mongos.adminCommand({enableSharding: dbName})); st.shard0.adminCommand( {_flushRoutingTableCacheUpdates: collA.getFullName(), syncFromConfig: true}); - assertPlanCacheSizeForColl(collA.getFullName(), 0); + if (TestData.configShard) { + // Refining a shard key runs a "noop" find on the refined namespace, which runs locally on + // the config server without a shard version, so it generates a plan key cache on collA that + // is not cleared. + assertPlanCacheSizeForColl(collA.getFullName(), 1); + } else { + assertPlanCacheSizeForColl(collA.getFullName(), 0); + } assertPlanCacheSizeForColl(collB.getFullName(), 1); })(); @@ -142,4 +145,3 @@ assert.commandWorked(mongos.adminCommand({enableSharding: dbName})); })(); st.stop(); -})(); diff --git a/jstests/sharding/key_string.js b/jstests/sharding/key_string.js index b3416158d4137..3a1a5e670ca0e 100644 --- a/jstests/sharding/key_string.js +++ b/jstests/sharding/key_string.js @@ -8,8 +8,8 @@ s.adminCommand({enablesharding: "test"}); s.ensurePrimaryShard('test', s.shard1.shardName); s.adminCommand({shardcollection: "test.foo", key: {name: 1}}); -primary = s.getPrimaryShard("test").getDB("test"); -seconday = s.getOther(primary).getDB("test"); +let primary = s.getPrimaryShard("test").getDB("test"); +let seconday = s.getOther(primary).getDB("test"); assert.eq(1, findChunksUtil.countChunksForNs(s.config, "test.foo"), "sanity check A"); diff --git a/jstests/sharding/libs/defragmentation_util.js b/jstests/sharding/libs/defragmentation_util.js index 8b33b97d6b5d0..53dcd6fd4e6a6 100644 --- a/jstests/sharding/libs/defragmentation_util.js +++ b/jstests/sharding/libs/defragmentation_util.js @@ -1,5 +1,4 @@ var defragmentationUtil = (function() { - load("jstests/libs/feature_flag_util.js"); load("jstests/sharding/libs/find_chunks_util.js"); let createFragmentedCollection = function(mongos, @@ -7,11 +6,12 @@ var defragmentationUtil = (function() { numChunks, maxChunkFillMB, numZones, - docSizeBytes, + docSizeBytesRange, chunkSpacing, disableCollectionBalancing) { - jsTest.log("Creating fragmented collection " + ns + " with parameters: numChunks = " + - numChunks + ", numZones = " + numZones + ", docSizeBytes = " + docSizeBytes + + jsTest.log("Creating fragmented collection " + ns + + " with parameters: numChunks = " + numChunks + ", numZones = " + numZones + + ", docSizeBytesRange = " + docSizeBytesRange + ", maxChunkFillMB = " + maxChunkFillMB + ", chunkSpacing = " + chunkSpacing); assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {key: 1}})); // Turn off balancer for this collection @@ -28,7 +28,7 @@ var defragmentationUtil = (function() { // Created zones will line up exactly with existing chunks so as not to trigger zone // violations in the balancer. createRandomZones(mongos, ns, numZones); - fillChunksToRandomSize(mongos, ns, docSizeBytes, maxChunkFillMB); + fillChunksToRandomSize(mongos, ns, docSizeBytesRange, maxChunkFillMB); const beginningNumberChunks = findChunksUtil.countChunksForNs(mongos.getDB('config'), ns); const beginningNumberZones = mongos.getDB('config').tags.countDocuments({ns: ns}); @@ -72,13 +72,17 @@ var defragmentationUtil = (function() { } }; - let fillChunksToRandomSize = function(mongos, ns, docSizeBytes, maxChunkFillMB) { + let fillChunksToRandomSize = function(mongos, ns, docSizeBytesRange, maxChunkFillMB) { const chunks = findChunksUtil.findChunksByNs(mongos.getDB('config'), ns).toArray(); - const bigString = "X".repeat(docSizeBytes); const coll = mongos.getCollection(ns); let bulk = coll.initializeUnorderedBulkOp(); + assert.gte(docSizeBytesRange[1], docSizeBytesRange[0]); chunks.forEach((chunk) => { let chunkSize = Random.randInt(maxChunkFillMB); + let docSizeBytes = Random.randInt(docSizeBytesRange[1] - docSizeBytesRange[0] + 1) + + docSizeBytesRange[0]; + docSizeBytes = docSizeBytes === 0 ? 1 : docSizeBytes; + let bigString = "X".repeat(docSizeBytes); let docsPerChunk = (chunkSize * 1024 * 1024) / docSizeBytes; if (docsPerChunk === 0) { return; diff --git a/jstests/sharding/libs/find_chunks_util.js b/jstests/sharding/libs/find_chunks_util.js index a28ea7554189f..06a19155b4f19 100644 --- a/jstests/sharding/libs/find_chunks_util.js +++ b/jstests/sharding/libs/find_chunks_util.js @@ -67,4 +67,4 @@ var findChunksUtil = (function() { countChunksForNs, getChunksJoinClause, }; -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/libs/last_lts_mongod_commands.js b/jstests/sharding/libs/last_lts_mongod_commands.js index b709acb908a1c..8340760f4a425 100644 --- a/jstests/sharding/libs/last_lts_mongod_commands.js +++ b/jstests/sharding/libs/last_lts_mongod_commands.js @@ -7,8 +7,10 @@ const commandsRemovedFromMongodSinceLastLTS = [ "_configsvrCreateCollection", "_configsvrMoveChunk", "_configsvrRepairShardedCollectionChunksHistory", + "_configsvrTransitionToCatalogShard", + "_configsvrRenameCollectionMetadata", + "_shardsvrDropCollectionIfUUIDNotMatching", "mapreduce.shardedfinish", - "availableQueryOptions", // TODO SERVER-67689: remove this once 7.0 becomes last-lts "getLastError", "driverOIDTest", ]; @@ -16,11 +18,9 @@ const commandsRemovedFromMongodSinceLastLTS = [ // listCommands output of a last LTS version mongod. We will allow these commands to have a // test defined without always existing on the mongod being used. const commandsAddedToMongodSinceLastLTS = [ - "_refreshQueryAnalyzerConfiguration", // TODO (SERVER-68977): Remove upgrade/downgrade for - // PM-1858. - "analyzeShardKey", // TODO (SERVER-68977): Remove upgrade/downgrade for PM-1858. "clusterAbortTransaction", "clusterAggregate", + "clusterBulkWrite", "clusterCommitTransaction", "clusterCount", "clusterDelete", @@ -28,9 +28,8 @@ const commandsAddedToMongodSinceLastLTS = [ "clusterGetMore", "clusterInsert", "clusterUpdate", - "configureQueryAnalyzer", // TODO (SERVER-68977): Remove upgrade/downgrade for PM-1858. - "createSearchIndexes", // TODO (SERVER-73309): Remove once 7.0 becomes last LTS. - "dropSearchIndex", // TODO (SERVER-73309): Remove once 7.0 becomes last LTS. + "createSearchIndexes", // TODO (SERVER-73309): Remove once 7.0 becomes last LTS. + "dropSearchIndex", // TODO (SERVER-73309): Remove once 7.0 becomes last LTS. "getChangeStreamState", "getClusterParameter", "listDatabasesForAllTenants", diff --git a/jstests/sharding/libs/last_lts_mongos_commands.js b/jstests/sharding/libs/last_lts_mongos_commands.js index 5963e99ee0673..5f5c46c046f38 100644 --- a/jstests/sharding/libs/last_lts_mongos_commands.js +++ b/jstests/sharding/libs/last_lts_mongos_commands.js @@ -6,10 +6,10 @@ const commandsRemovedFromMongosSinceLastLTS = [ "repairShardedCollectionChunksHistory", // last-continuos - "availableQueryOptions", // TODO SERVER-67689: remove this once 7.0 becomes last-lts "getLastError", "getnonce", "driverOIDTest", + "transitionToCatalogShard", ]; // These commands were added in mongos since the last LTS version, so will not appear in the // listCommands output of a last LTS version mongos. We will allow these commands to have a test @@ -19,16 +19,16 @@ const commandsAddedToMongosSinceLastLTS = [ "_clusterWriteWithoutShardKey", "abortReshardCollection", "analyze", - "analyzeShardKey", // TODO (SERVER-68977): Remove upgrade/downgrade for PM-1858. "appendOplogNote", "bulkWrite", "checkMetadataConsistency", "cleanupReshardCollection", + "cleanupStructuredEncryptionData", "commitReshardCollection", "compactStructuredEncryptionData", "configureCollectionBalancing", - "configureQueryAnalyzer", // TODO (SERVER-68977): Remove upgrade/downgrade for PM-1858. "coordinateCommitTransaction", + "cpuload", "createSearchIndexes", "dropSearchIndex", "getClusterParameter", @@ -49,7 +49,7 @@ const commandsAddedToMongosSinceLastLTS = [ "testRemoval", "testVersions1And2", "testVersion2", - "transitionToCatalogShard", + "transitionFromDedicatedConfigServer", "transitionToDedicatedConfigServer", "updateSearchIndex", ]; diff --git a/jstests/sharding/libs/mongos_api_params_util.js b/jstests/sharding/libs/mongos_api_params_util.js index 5c8f04539b0db..d7fa8cc9f148c 100644 --- a/jstests/sharding/libs/mongos_api_params_util.js +++ b/jstests/sharding/libs/mongos_api_params_util.js @@ -3,24 +3,22 @@ * servers and shards. */ -let MongosAPIParametersUtil = (function() { - 'use strict'; - - load('jstests/replsets/rslib.js'); - load('jstests/sharding/libs/last_lts_mongos_commands.js'); - load('jstests/sharding/libs/remove_shard_util.js'); - load('jstests/sharding/libs/sharded_transactions_helpers.js'); - load('jstests/libs/auto_retry_transaction_in_sharding.js'); - load('jstests/libs/catalog_shard_util.js'); - - // TODO SERVER-50144 Remove this and allow orphan checking. - // This test calls removeShard which can leave docs in config.rangeDeletions in state "pending", - // therefore preventing orphans from being cleaned up. - TestData.skipCheckOrphans = true; - - // Cannot run the filtering metadata check on tests that run refineCollectionShardKey. - TestData.skipCheckShardFilteringMetadata = true; - +load('jstests/replsets/rslib.js'); +load('jstests/sharding/libs/last_lts_mongos_commands.js'); +load('jstests/sharding/libs/remove_shard_util.js'); +load('jstests/sharding/libs/sharded_transactions_helpers.js'); +load('jstests/libs/auto_retry_transaction_in_sharding.js'); +import {ConfigShardUtil} from "jstests/libs/config_shard_util.js"; + +// TODO SERVER-50144 Remove this and allow orphan checking. +// This test calls removeShard which can leave docs in config.rangeDeletions in state "pending", +// therefore preventing orphans from being cleaned up. +TestData.skipCheckOrphans = true; + +// Cannot run the filtering metadata check on tests that run refineCollectionShardKey. +TestData.skipCheckShardFilteringMetadata = true; + +export let MongosAPIParametersUtil = (function() { function validateTestCase(testCase) { assert(testCase.skip || testCase.run, "must specify exactly one of 'skip' or 'run' for test case " + tojson(testCase)); @@ -90,7 +88,7 @@ let MongosAPIParametersUtil = (function() { function awaitTransitionToDedicatedConfigServer() { assert.commandWorked(st.startBalancer()); st.awaitBalancerRound(); - CatalogShardUtil.transitionToDedicatedConfigServer(st); + ConfigShardUtil.transitionToDedicatedConfigServer(st); assert.commandWorked(st.stopBalancer()); } @@ -163,11 +161,11 @@ let MongosAPIParametersUtil = (function() { } }, { - commandName: "transitionToCatalogShard", + commandName: "transitionFromDedicatedConfigServer", run: { inAPIVersion1: false, runsAgainstAdminDb: true, - configServerCommandName: "_configsvrTransitionToCatalogShard", + configServerCommandName: "_configsvrTransitionFromDedicatedConfigServer", permittedInTxn: false, requiresCatalogShardEnabled: true, setUp: () => { @@ -175,7 +173,7 @@ let MongosAPIParametersUtil = (function() { assert.commandWorked(st.s0.getDB("db").dropDatabase()); awaitTransitionToDedicatedConfigServer(); }, - command: () => ({transitionToCatalogShard: 1}) + command: () => ({transitionFromDedicatedConfigServer: 1}) } }, { @@ -1068,7 +1066,7 @@ let MongosAPIParametersUtil = (function() { // Wait for the shard to be removed completely before re-adding it. awaitTransitionToDedicatedConfigServer(st.shard0.shardName); assert.commandWorked( - st.s0.getDB("admin").runCommand({transitionToCatalogShard: 1})); + st.s0.getDB("admin").runCommand({transitionFromDedicatedConfigServer: 1})); } } }, @@ -1458,8 +1456,7 @@ let MongosAPIParametersUtil = (function() { assert.commandWorked(st.rs0.getPrimary().adminCommand({serverStatus: 1})) .storageEngine.supportsCommittedReads; - const isCatalogShardEnabled = CatalogShardUtil.isEnabledIgnoringFCV(st) && - CatalogShardUtil.isTransitionEnabledIgnoringFCV(st); + const isConfigShardEnabled = ConfigShardUtil.isTransitionEnabledIgnoringFCV(st); (() => { // Validate test cases for all commands. Ensure there is at least one test case for every @@ -1575,7 +1572,7 @@ let MongosAPIParametersUtil = (function() { if (!supportsCommittedReads && runOrExplain.requiresCommittedReads) continue; - if (!isCatalogShardEnabled && runOrExplain.requiresCatalogShardEnabled) + if (!isConfigShardEnabled && runOrExplain.requiresCatalogShardEnabled) continue; if (apiParameters.apiStrict && !runOrExplain.inAPIVersion1) diff --git a/jstests/sharding/libs/proxy_protocol.js b/jstests/sharding/libs/proxy_protocol.js index a8c67e1d201f2..b98c53f4ca49a 100644 --- a/jstests/sharding/libs/proxy_protocol.js +++ b/jstests/sharding/libs/proxy_protocol.js @@ -29,7 +29,7 @@ class ProxyProtocolServer { * @return {number} ingress port number */ getIngressPort() { - return ingress_port; + return this.ingress_port; } /** @@ -38,7 +38,7 @@ class ProxyProtocolServer { * @return {number} egress port number */ getEgressPort() { - return egress_port; + return this.egress_port; } /** diff --git a/jstests/sharding/libs/remove_shard_util.js b/jstests/sharding/libs/remove_shard_util.js index 42879e0c1f5dc..812e41a9b0905 100644 --- a/jstests/sharding/libs/remove_shard_util.js +++ b/jstests/sharding/libs/remove_shard_util.js @@ -5,7 +5,7 @@ function removeShard(st, shardName, timeout) { assert.soon(function() { let res; - if (TestData.catalogShard && shardName == "config") { + if (TestData.configShard && shardName == "config") { // Need to use transitionToDedicatedConfigServer if trying // to remove config server as a shard res = st.s.adminCommand({transitionToDedicatedConfigServer: shardName}); diff --git a/jstests/sharding/libs/reshard_collection_util.js b/jstests/sharding/libs/reshard_collection_util.js new file mode 100644 index 0000000000000..07b8253ee69ed --- /dev/null +++ b/jstests/sharding/libs/reshard_collection_util.js @@ -0,0 +1,255 @@ +/** + * Util class for testing reshardCollection cmd. + */ + +"use strict"; + +load("jstests/libs/uuid_util.js"); + +class ReshardCollectionCmdTest { + constructor(testConfig) { + assert(testConfig.st && testConfig.dbName && testConfig.collName && + testConfig.numInitialDocs); + this._st = testConfig.st; + this._mongos = this._st.s0; + this._mongosConfig = this._mongos.getDB('config'); + this._dbName = testConfig.dbName; + this._collName = testConfig.collName; + this._ns = this._dbName + "." + this._collName; + this._numInitialDocs = testConfig.numInitialDocs; + + this._shardToRSMap = {}; + this._shardToRSMap[this._st.shard0.shardName] = this._st.rs0; + this._shardToRSMap[this._st.shard1.shardName] = this._st.rs1; + this._shardIdToShardMap = {}; + this._shardIdToShardMap[this._st.shard0.shardName] = this._st.shard0; + this._shardIdToShardMap[this._st.shard1.shardName] = this._st.shard1; + } + + _getUUIDFromCollectionInfo(dbName, collName, collInfo) { + if (collInfo) { + return extractUUIDFromObject(collInfo.info.uuid); + } + + const uuidObject = getUUIDFromListCollections(this._mongos.getDB(dbName), collName); + return extractUUIDFromObject(uuidObject); + } + + _constructTemporaryReshardingCollName(dbName, collName, collInfo) { + const existingUUID = this._getUUIDFromCollectionInfo(dbName, collName, collInfo); + return 'system.resharding.' + existingUUID; + } + + _getAllShardIdsFromExpectedChunks(expectedChunks) { + let shardIds = new Set(); + expectedChunks.forEach(chunk => { + shardIds.add(chunk.recipientShardId); + }); + return shardIds; + } + + _verifyChunksMatchExpected(numExpectedChunks, presetExpectedChunks) { + let collEntry = + this._mongos.getDB('config').getCollection('collections').findOne({_id: this._ns}); + let chunkQuery = {uuid: collEntry.uuid}; + + const reshardedChunks = this._mongosConfig.chunks.find(chunkQuery).toArray(); + + if (presetExpectedChunks) { + presetExpectedChunks.sort(); + } + + reshardedChunks.sort(); + assert.eq(numExpectedChunks, reshardedChunks.length, tojson(reshardedChunks)); + + let shardChunkCounts = {}; + let incChunkCount = key => { + if (shardChunkCounts.hasOwnProperty(key)) { + shardChunkCounts[key]++; + } else { + shardChunkCounts[key] = 1; + } + }; + + for (let i = 0; i < numExpectedChunks; i++) { + incChunkCount(reshardedChunks[i].shard); + + // match exact chunk boundaries for presetExpectedChunks + if (presetExpectedChunks) { + assert.eq(presetExpectedChunks[i].recipientShardId, reshardedChunks[i].shard); + assert.eq(presetExpectedChunks[i].min, reshardedChunks[i].min); + assert.eq(presetExpectedChunks[i].max, reshardedChunks[i].max); + } + } + + // if presetChunks not specified, we only assert that chunks counts are balanced across + // shards + if (!presetExpectedChunks) { + let maxDiff = 0; + let shards = Object.keys(shardChunkCounts); + + shards.forEach(shard1 => { + shards.forEach(shard2 => { + let diff = Math.abs(shardChunkCounts[shard1] - shardChunkCounts[shard2]); + maxDiff = (diff > maxDiff) ? diff : maxDiff; + }); + }); + + assert.lte(maxDiff, 1, tojson(reshardedChunks)); + } + } + + _verifyCollectionExistenceForConn(collName, expectedToExist, conn) { + const doesExist = Boolean(conn.getDB(this._dbName)[collName].exists()); + assert.eq(doesExist, expectedToExist); + } + + _verifyTemporaryReshardingCollectionExistsWithCorrectOptions(shardKey, + expectedRecipientShards) { + const originalCollInfo = + this._mongos.getDB(this._dbName).getCollectionInfos({name: this._collName})[0]; + assert.neq(originalCollInfo, undefined); + + const tempReshardingCollName = this._constructTemporaryReshardingCollName( + this._dbName, this._collName, originalCollInfo); + this._verifyCollectionExistenceForConn(tempReshardingCollName, false, this._mongos); + + expectedRecipientShards.forEach(shardId => { + const rsPrimary = this._shardToRSMap[shardId].getPrimary(); + this._verifyCollectionExistenceForConn(this._collName, true, rsPrimary); + this._verifyCollectionExistenceForConn(tempReshardingCollName, false, rsPrimary); + ShardedIndexUtil.assertIndexExistsOnShard( + this._shardIdToShardMap[shardId], this._dbName, this._collName, shardKey); + }); + } + + _verifyAllShardingCollectionsRemoved(tempReshardingCollName) { + assert.eq(0, this._mongos.getDB(this._dbName)[tempReshardingCollName].find().itcount()); + assert.eq(0, this._mongosConfig.reshardingOperations.find({ns: this._ns}).itcount()); + assert.eq( + 0, this._mongosConfig.collections.find({reshardingFields: {$exists: true}}).itcount()); + assert.eq(0, + this._st.rs0.getPrimary() + .getDB('config') + .localReshardingOperations.donor.find({ns: this._ns}) + .itcount()); + assert.eq(0, + this._st.rs0.getPrimary() + .getDB('config') + .localReshardingOperations.recipient.find({ns: this._ns}) + .itcount()); + assert.eq(0, + this._st.rs1.getPrimary() + .getDB('config') + .localReshardingOperations.donor.find({ns: this._ns}) + .itcount()); + assert.eq(0, + this._st.rs1.getPrimary() + .getDB('config') + .localReshardingOperations.recipient.find({ns: this._ns}) + .itcount()); + } + + _verifyTagsDocumentsAfterOperationCompletes(ns, shardKeyPattern, expectedZones) { + const tagsArr = this._mongos.getCollection('config.tags').find({ns: ns}).toArray(); + if (expectedZones !== undefined) { + assert.eq(tagsArr.length, expectedZones.length); + tagsArr.sort((a, b) => a["tag"].localeCompare(b["tag"])); + expectedZones.sort((a, b) => a["zone"].localeCompare(b["zone"])); + } + for (let i = 0; i < tagsArr.length; ++i) { + assert.eq(Object.keys(tagsArr[i]["min"]), shardKeyPattern); + assert.eq(Object.keys(tagsArr[i]["max"]), shardKeyPattern); + if (expectedZones !== undefined) { + assert.eq(tagsArr[i]["min"], expectedZones[i]["min"]); + assert.eq(tagsArr[i]["max"], expectedZones[i]["max"]); + assert.eq(tagsArr[i]["tag"], expectedZones[i]["zone"]); + } + } + } + + _verifyIndexesCreated(oldIndexes, shardKey) { + const indexes = this._mongos.getDB(this._dbName).getCollection(this._collName).getIndexes(); + const indexKeySet = new Set(); + indexes.forEach(index => indexKeySet.add(tojson(index.key))); + assert.eq(indexKeySet.has(tojson(shardKey)), true); + oldIndexes.forEach(index => { + assert.eq(indexKeySet.has(tojson(index.key)), true); + }); + } + + assertReshardCollOkWithPreset(commandObj, presetReshardedChunks) { + assert.commandWorked( + this._mongos.adminCommand({shardCollection: this._ns, key: {oldKey: 1}})); + + let bulk = this._mongos.getDB(this._dbName) + .getCollection(this._collName) + .initializeOrderedBulkOp(); + for (let x = 0; x < this._numInitialDocs; x++) { + bulk.insert({oldKey: x, newKey: this._numInitialDocs - x}); + } + assert.commandWorked(bulk.execute()); + + commandObj._presetReshardedChunks = presetReshardedChunks; + const tempReshardingCollName = + this._constructTemporaryReshardingCollName(this._dbName, this._collName); + + assert.commandWorked(this._mongos.adminCommand(commandObj)); + + this._verifyTemporaryReshardingCollectionExistsWithCorrectOptions( + commandObj.key, this._getAllShardIdsFromExpectedChunks(presetReshardedChunks)); + + this._verifyTagsDocumentsAfterOperationCompletes(this._ns, Object.keys(commandObj.key)); + + this._verifyChunksMatchExpected(presetReshardedChunks.length, presetReshardedChunks); + + this._mongos.getDB(this._dbName)[this._collName].drop(); + this._verifyAllShardingCollectionsRemoved(tempReshardingCollName); + } + + /** + * Run reshardCollection and check the number of chunks is as expected. + * @param {Object} commandObj The reshardCollection cmd to execute. + * @param {Number} expectedChunkNum Number of chunks to have after reshardCollection. + * @param {Object[]} expectedChunks Expected chunk distribution after reshardCollection. + * @param {Object[]} expectedZones Expected zones for the collection after reshardCollection. + * @param {Function} additionalSetup Additional setup needed, taking the class object as input. + */ + assertReshardCollOk( + commandObj, expectedChunkNum, expectedChunks, expectedZones, additionalSetup) { + assert.commandWorked( + this._mongos.adminCommand({shardCollection: this._ns, key: {oldKey: 1}})); + + let bulk = this._mongos.getDB(this._dbName) + .getCollection(this._collName) + .initializeOrderedBulkOp(); + for (let x = 0; x < this._numInitialDocs; x++) { + bulk.insert({oldKey: x, newKey: this._numInitialDocs - x}); + } + assert.commandWorked(bulk.execute()); + if (additionalSetup) { + additionalSetup(this); + } + + const indexes = this._mongos.getDB(this._dbName).getCollection(this._collName).getIndexes(); + const tempReshardingCollName = + this._constructTemporaryReshardingCollName(this._dbName, this._collName); + + assert.commandWorked(this._mongos.adminCommand(commandObj)); + + if (expectedChunks) { + this._verifyTemporaryReshardingCollectionExistsWithCorrectOptions( + commandObj.key, this._getAllShardIdsFromExpectedChunks(expectedChunks)); + } + + this._verifyTagsDocumentsAfterOperationCompletes( + this._ns, Object.keys(commandObj.key), expectedZones); + + this._verifyChunksMatchExpected(expectedChunkNum, expectedChunks); + + this._verifyIndexesCreated(indexes, commandObj.key); + + this._mongos.getDB(this._dbName)[this._collName].drop(); + this._verifyAllShardingCollectionsRemoved(tempReshardingCollName); + } +} \ No newline at end of file diff --git a/jstests/sharding/libs/resharding_test_fixture.js b/jstests/sharding/libs/resharding_test_fixture.js index 9657a63db98fd..f9359414a6a46 100644 --- a/jstests/sharding/libs/resharding_test_fixture.js +++ b/jstests/sharding/libs/resharding_test_fixture.js @@ -33,11 +33,12 @@ var ReshardingTest = class { writePeriodicNoops: writePeriodicNoops = undefined, enableElections: enableElections = false, logComponentVerbosity: logComponentVerbosity = undefined, - storeFindAndModifyImagesInSideCollection: storeFindAndModifyImagesInSideCollection = true, oplogSize: oplogSize = undefined, maxNumberOfTransactionOperationsInSingleOplogEntry: maxNumberOfTransactionOperationsInSingleOplogEntry = undefined, - catalogShard: catalogShard = false, + configShard: configShard = false, + wiredTigerConcurrentWriteTransactions: wiredTigerConcurrentWriteTransactions = undefined, + reshardingOplogBatchTaskCount: reshardingOplogBatchTaskCount = undefined, } = {}) { // The @private JSDoc comments cause VS Code to not display the corresponding properties and // methods in its autocomplete list. This makes it simpler for test authors to know what the @@ -64,12 +65,12 @@ var ReshardingTest = class { this._enableElections = enableElections; /** @private */ this._logComponentVerbosity = logComponentVerbosity; - /** @private */ - this._storeFindAndModifyImagesInSideCollection = storeFindAndModifyImagesInSideCollection; this._oplogSize = oplogSize; this._maxNumberOfTransactionOperationsInSingleOplogEntry = maxNumberOfTransactionOperationsInSingleOplogEntry; - this._catalogShard = catalogShard || jsTestOptions().catalogShard; + this._configShard = configShard || jsTestOptions().configShard; + this._wiredTigerConcurrentWriteTransactions = wiredTigerConcurrentWriteTransactions; + this._reshardingOplogBatchTaskCount = reshardingOplogBatchTaskCount; // Properties set by setup(). /** @private */ @@ -107,20 +108,15 @@ var ReshardingTest = class { setup() { const mongosOptions = {setParameter: {}}; let configOptions = {setParameter: {}}; - let rsOptions = { - setParameter: { - storeFindAndModifyImagesInSideCollection: - this._storeFindAndModifyImagesInSideCollection - } - }; + let rsOptions = {setParameter: {}}; if (this._oplogSize) { rsOptions.oplogSize = this._oplogSize; } const configReplSetTestOptions = {}; let nodesPerShard = 2; - // Use the shard default in catalog shard mode since the config server will be a shard. - let nodesPerConfigRs = this._catalogShard ? 2 : 1; + // Use the shard default in config shard mode since the config server will be a shard. + let nodesPerConfigRs = this._configShard ? 2 : 1; if (this._enableElections) { nodesPerShard = 3; @@ -177,7 +173,7 @@ var ReshardingTest = class { this._maxNumberOfTransactionOperationsInSingleOplogEntry; } - if (this._catalogShard) { + if (this._configShard) { // ShardingTest does not currently support deep merging of options, so merge the set // parameters for config and replica sets here. rsOptions.setParameter = @@ -186,6 +182,18 @@ var ReshardingTest = class { Object.merge(configOptions.setParameter, rsOptions.setParameter); } + if (this._wiredTigerConcurrentWriteTransactions !== undefined) { + rsOptions.setParameter.storageEngineConcurrencyAdjustmentAlgorithm = + "fixedConcurrentTransactions"; + rsOptions.setParameter.wiredTigerConcurrentWriteTransactions = + this._wiredTigerConcurrentWriteTransactions; + } + + if (this._reshardingOplogBatchTaskCount !== undefined) { + rsOptions.setParameter.reshardingOplogBatchTaskCount = + this._reshardingOplogBatchTaskCount; + } + this._st = new ShardingTest({ mongos: 1, mongosOptions, @@ -196,7 +204,7 @@ var ReshardingTest = class { rsOptions, configReplSetTestOptions, manualAddShard: true, - catalogShard: this._catalogShard, + configShard: this._configShard, }); for (let i = 0; i < this._numShards; ++i) { @@ -219,8 +227,9 @@ var ReshardingTest = class { } const shard = this._st[`shard${i}`]; - if (this._catalogShard && i == 0) { - assert.commandWorked(this._st.s.adminCommand({transitionToCatalogShard: 1})); + if (this._configShard && i == 0) { + assert.commandWorked( + this._st.s.adminCommand({transitionFromDedicatedConfigServer: 1})); shard.shardName = "config"; } else { const res = assert.commandWorked( @@ -315,6 +324,17 @@ var ReshardingTest = class { return this._tempNs; } + get presetReshardedChunks() { + assert.neq( + undefined, this._presetReshardedChunks, "createShardedCollection must be called first"); + return this._presetReshardedChunks; + } + + get sourceCollectionUUID() { + assert.neq( + undefined, this._sourceCollectionUUID, "createShardedCollection must be called first"); + return this._sourceCollectionUUID; + } /** * Reshards an existing collection using the specified new shard key and new chunk ranges. * @@ -330,8 +350,8 @@ var ReshardingTest = class { } /** @private */ - _startReshardingInBackgroundAndAllowCommandFailure({newShardKeyPattern, newChunks}, - expectedErrorCode) { + _startReshardingInBackgroundAndAllowCommandFailure( + {newShardKeyPattern, newChunks, forceRedistribution, reshardingUUID}, expectedErrorCode) { for (let disallowedErrorCode of [ErrorCodes.FailedToSatisfyReadPreference, ErrorCodes.HostUnreachable, ]) { @@ -345,6 +365,7 @@ var ReshardingTest = class { newChunks = newChunks.map( chunk => ({min: chunk.min, max: chunk.max, recipientShardId: chunk.shard})); + this._presetReshardedChunks = newChunks; this._newShardKey = Object.assign({}, newShardKeyPattern); @@ -364,8 +385,14 @@ var ReshardingTest = class { this._commandDoneSignal = new CountDownLatch(1); - this._reshardingThread = - new Thread(function(host, ns, newShardKeyPattern, newChunks, commandDoneSignal) { + this._reshardingThread = new Thread( + function(host, + ns, + newShardKeyPattern, + newChunks, + forceRedistribution, + reshardingUUID, + commandDoneSignal) { const conn = new Mongo(host); // We allow the client to retry the reshardCollection a large but still finite @@ -377,11 +404,21 @@ var ReshardingTest = class { let res; for (let i = 1; i <= kMaxNumAttempts; ++i) { - res = conn.adminCommand({ + let command = { reshardCollection: ns, key: newShardKeyPattern, _presetReshardedChunks: newChunks, - }); + }; + if (forceRedistribution !== undefined) { + command = Object.merge(command, {forceRedistribution: forceRedistribution}); + } + if (reshardingUUID !== undefined) { + // UUIDs are passed in as strings because the UUID type cannot pass + // through the thread constructor. + reshardingUUID = eval(reshardingUUID); + command = Object.merge(command, {reshardingUUID: reshardingUUID}); + } + res = conn.adminCommand(command); if (res.ok === 1 || (res.code !== ErrorCodes.FailedToSatisfyReadPreference && @@ -397,7 +434,14 @@ var ReshardingTest = class { } return res; - }, this._st.s.host, this._ns, newShardKeyPattern, newChunks, this._commandDoneSignal); + }, + this._st.s.host, + this._ns, + newShardKeyPattern, + newChunks, + forceRedistribution, + reshardingUUID ? reshardingUUID.toString() : undefined, + this._commandDoneSignal); this._reshardingThread.start(); this._isReshardingActive = true; @@ -430,7 +474,7 @@ var ReshardingTest = class { * finishes but before checking the the state post resharding. By the time afterReshardingFn * is called the temporary resharding collection will either have been dropped or renamed. */ - withReshardingInBackground({newShardKeyPattern, newChunks}, + withReshardingInBackground({newShardKeyPattern, newChunks, forceRedistribution, reshardingUUID}, duringReshardingFn = (tempNs) => {}, { expectedErrorCode = ErrorCodes.OK, @@ -438,8 +482,9 @@ var ReshardingTest = class { postDecisionPersistedFn = () => {}, afterReshardingFn = () => {} } = {}) { - this._startReshardingInBackgroundAndAllowCommandFailure({newShardKeyPattern, newChunks}, - expectedErrorCode); + this._startReshardingInBackgroundAndAllowCommandFailure( + {newShardKeyPattern, newChunks, forceRedistribution, reshardingUUID}, + expectedErrorCode); assert.soon(() => { const op = this._findReshardingCommandOp(); @@ -735,9 +780,11 @@ var ReshardingTest = class { /** @private */ _checkCoordinatorPostState(expectedErrorCode) { - assert.eq([], - this._st.config.reshardingOperations.find({ns: this._ns}).toArray(), - "expected config.reshardingOperations to be empty, but found it wasn't"); + assert.eq( + [], + this._st.config.reshardingOperations.find({ns: this._ns, state: {$ne: "quiesced"}}) + .toArray(), + "expected config.reshardingOperations to be empty (except quiesced operations), but found it wasn't"); assert.eq([], this._st.config.collections.find({reshardingFields: {$exists: true}}).toArray(), diff --git a/jstests/sharding/libs/timeseries_update_multi_util.js b/jstests/sharding/libs/timeseries_update_multi_util.js new file mode 100644 index 0000000000000..2e59c53fd1e57 --- /dev/null +++ b/jstests/sharding/libs/timeseries_update_multi_util.js @@ -0,0 +1,140 @@ +/** + * Helpers for testing timeseries multi updates. + */ + +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; + +export const TimeseriesMultiUpdateUtil = (function() { + const timeField = 'time'; + const metaField = 'hostid'; + + // The split point between two shards. This value guarantees that generated time values do not + // fall on this boundary. + const splitTimePointBetweenTwoShards = ISODate("2001-06-30"); + const numOfDocs = 4; + + function generateTimeValue(index) { + return ISODate(`${2000 + index}-01-01`); + } + + const collectionConfigurations = { + // Shard key only on meta field/subfields. + metaShardKey: { + metaGenerator: (id => id), + shardKey: {[metaField]: 1}, + splitPoint: {meta: 2}, + }, + metaShardKeyString: { + metaGenerator: (id => `string:${id}`), + shardKey: {[metaField]: 1}, + splitPoint: {meta: `string:2`}, + }, + metaObjectShardKey: { + metaGenerator: (index => ({a: index})), + shardKey: {[metaField]: 1}, + splitPoint: {meta: {a: 2}}, + }, + metaSubFieldShardKey: { + metaGenerator: (index => ({a: index})), + shardKey: {[metaField + '.a']: 1}, + splitPoint: {'meta.a': 2}, + }, + + // Shard key on time field. + timeShardKey: { + shardKey: {[timeField]: 1}, + splitPoint: {[`control.min.${timeField}`]: splitTimePointBetweenTwoShards}, + }, + + // Shard key on both meta and time field. + metaTimeShardKey: { + metaGenerator: (id => id), + shardKey: {[metaField]: 1, [timeField]: 1}, + splitPoint: {meta: 2, [`control.min.${timeField}`]: splitTimePointBetweenTwoShards}, + }, + metaObjectTimeShardKey: { + metaGenerator: (index => ({a: index})), + shardKey: {[metaField]: 1, [timeField]: 1}, + splitPoint: + {meta: {a: 2}, [`control.min.${timeField}`]: splitTimePointBetweenTwoShards}, + }, + metaSubFieldTimeShardKey: { + metaGenerator: (index => ({a: index})), + shardKey: {[metaField + '.a']: 1, [timeField]: 1}, + splitPoint: {'meta.a': 2, [`control.min.${timeField}`]: splitTimePointBetweenTwoShards}, + }, + }; + + function generateDocsForTestCase(collConfig) { + const documents = TimeseriesTest.generateHosts(numOfDocs); + for (let i = 0; i < numOfDocs; i++) { + documents[i]._id = i; + if (collConfig.metaGenerator) { + documents[i][metaField] = collConfig.metaGenerator(i); + } + documents[i][timeField] = generateTimeValue(i); + documents[i].f = i; + documents[i].stringField = "testString"; + } + return documents; + } + + function prepareShardedTimeseriesCollection( + mongos, shardingTest, db, collName, collConfig, insertFn) { + // Ensures that the collection does not exist. + const coll = db.getCollection(collName); + coll.drop(); + + // Creates timeseries collection. + const tsOptions = {timeField: timeField}; + const hasMetaField = !!collConfig.metaGenerator; + if (hasMetaField) { + tsOptions.metaField = metaField; + } + assert.commandWorked(db.createCollection(collName, {timeseries: tsOptions})); + + // Shards timeseries collection. + assert.commandWorked(coll.createIndex(collConfig.shardKey)); + assert.commandWorked(mongos.adminCommand({ + shardCollection: `${db.getName()}.${collName}`, + key: collConfig.shardKey, + })); + + // Inserts initial set of documents. + const documents = generateDocsForTestCase(collConfig); + assert.commandWorked(insertFn(coll, documents)); + + // Manually splits the data into two chunks. + assert.commandWorked(mongos.adminCommand( + {split: `${db.getName()}.system.buckets.${collName}`, middle: collConfig.splitPoint})); + + // Ensures that currently both chunks reside on the primary shard. + let counts = shardingTest.chunkCounts(`system.buckets.${collName}`, db.getName()); + const primaryShard = shardingTest.getPrimaryShard(db.getName()); + assert.eq(2, counts[primaryShard.shardName], counts); + + // Moves one of the chunks into the second shard. + const otherShard = shardingTest.getOther(primaryShard); + assert.commandWorked(mongos.adminCommand({ + movechunk: `${db.getName()}.system.buckets.${collName}`, + find: collConfig.splitPoint, + to: otherShard.name, + _waitForDelete: true + })); + + // Ensures that each shard owns one chunk. + counts = shardingTest.chunkCounts(`system.buckets.${collName}`, db.getName()); + assert.eq(1, counts[primaryShard.shardName], counts); + assert.eq(1, counts[otherShard.shardName], counts); + + return [coll, documents]; + } + + return { + timeField, + metaField, + collectionConfigurations, + generateTimeValue, + prepareShardedTimeseriesCollection, + }; +})(); diff --git a/jstests/sharding/limit_push.js b/jstests/sharding/limit_push.js index f3cd1245a33ec..0ee8c3cd840e7 100644 --- a/jstests/sharding/limit_push.js +++ b/jstests/sharding/limit_push.js @@ -8,7 +8,7 @@ var s = new ShardingTest({name: "limit_push", shards: 2, mongos: 1}); var db = s.getDB("test"); // Create some data -for (i = 0; i < 100; i++) { +for (let i = 0; i < 100; i++) { db.limit_push.insert({_id: i, x: i}); } db.limit_push.createIndex({x: 1}); @@ -34,9 +34,7 @@ assert.eq( // The query is asking for the maximum value below a given value // db.limit_push.find( { x : { $lt : 60} } ).sort( { x:-1} ).limit(1) -q = { - x: {$lt: 60} -}; +let q = {x: {$lt: 60}}; // Make sure the basic queries are correct assert.eq(60, db.limit_push.find(q).count(), "Did not find 60 documents"); @@ -46,7 +44,7 @@ assert.eq(60, db.limit_push.find(q).count(), "Did not find 60 documents"); // Now make sure that the explain shos that each shard is returning a single document as // indicated // by the "n" element for each shard -exp = db.limit_push.find(q).sort({x: -1}).limit(1).explain("executionStats"); +let exp = db.limit_push.find(q).sort({x: -1}).limit(1).explain("executionStats"); printjson(exp); var execStages = exp.executionStats.executionStages; diff --git a/jstests/sharding/linearizable_read_concern.js b/jstests/sharding/linearizable_read_concern.js index 98724828edd9f..ca46d7c08146a 100644 --- a/jstests/sharding/linearizable_read_concern.js +++ b/jstests/sharding/linearizable_read_concern.js @@ -19,8 +19,6 @@ * document. This test is mainly trying to ensure that system behavior is * reasonable when executing linearizable reads in a sharded cluster, so as to * exercise possible (invalid) user behavior. - * - * @tags: [temporary_catalog_shard_incompatible] */ load("jstests/replsets/rslib.js"); @@ -38,10 +36,9 @@ var testName = "linearizable_read_concern"; var st = new ShardingTest({ name: testName, - shards: 2, other: {rs0: {nodes: 3}, rs1: {nodes: 3}, useBridge: true}, mongos: 1, - config: 1, + config: TestData.configShard ? undefined : 1, enableBalancer: false }); @@ -126,5 +123,11 @@ var result = testDB.runReadCommand({ }); assert.commandFailedWithCode(result, ErrorCodes.MaxTimeMSExpired); +if (TestData.configShard) { + // Reconnect so the config server is available for shutdown hooks. + secondaries[0].reconnect(primary); + secondaries[1].reconnect(primary); +} + st.stop(); })(); diff --git a/jstests/sharding/listDatabases.js b/jstests/sharding/listDatabases.js index 88ec308ddab08..ed6bd773006f2 100644 --- a/jstests/sharding/listDatabases.js +++ b/jstests/sharding/listDatabases.js @@ -38,9 +38,9 @@ var dbEntryCheck = function(dbEntry, onConfig) { res = mongos.adminCommand("listDatabases"); dbArray = res.databases; - dbEntryCheck(getDBSection(dbArray, "blah"), TestData.catalogShard); - dbEntryCheck(getDBSection(dbArray, "foo"), TestData.catalogShard); - dbEntryCheck(getDBSection(dbArray, "raw"), TestData.catalogShard); + dbEntryCheck(getDBSection(dbArray, "blah"), TestData.configShard); + dbEntryCheck(getDBSection(dbArray, "foo"), TestData.configShard); + dbEntryCheck(getDBSection(dbArray, "raw"), TestData.configShard); } // Local db is never returned. @@ -73,8 +73,8 @@ var dbEntryCheck = function(dbEntry, onConfig) { var entry = getDBSection(dbArray, "config"); dbEntryCheck(entry, true); assert(entry["shards"]); - // There's only the "config" shard in catalog shard mode. - assert.eq(Object.keys(entry["shards"]).length, TestData.catalogShard ? 1 : 2); + // There's only the "config" shard in config shard mode. + assert.eq(Object.keys(entry["shards"]).length, TestData.configShard ? 1 : 2); } // Admin db is only reported on the config shard, never on other shards. diff --git a/jstests/sharding/listener_processing_server_status_metrics.js b/jstests/sharding/listener_processing_server_status_metrics.js index 64b10e78ea4b4..209a0da476866 100644 --- a/jstests/sharding/listener_processing_server_status_metrics.js +++ b/jstests/sharding/listener_processing_server_status_metrics.js @@ -9,18 +9,10 @@ (function() { "use strict"; -load("jstests/libs/feature_flag_util.js"); - const numConnections = 10; const st = new ShardingTest({shards: 1, mongos: 1}); const admin = st.s.getDB("admin"); -if (!FeatureFlagUtil.isEnabled(st.s.getDB("test"), "ConnHealthMetrics")) { - jsTestLog('Skipping test because the connection health metrics feature flag is disabled.'); - st.stop(); - return; -} - let previous = 0; for (var i = 0; i < numConnections; i++) { const conn = new Mongo(admin.getMongo().host); diff --git a/jstests/sharding/live_shard_logical_initial_sync.js b/jstests/sharding/live_shard_logical_initial_sync.js index 9fd41c33e3d31..5716d6cd90d9d 100644 --- a/jstests/sharding/live_shard_logical_initial_sync.js +++ b/jstests/sharding/live_shard_logical_initial_sync.js @@ -3,8 +3,7 @@ * shards using logical initial sync. * * We control our own failovers, and we also need the RSM to react reasonably quickly to those. - * @tags: [does_not_support_stepdowns, requires_streamable_rsm, - * temporary_catalog_shard_incompatible] + * @tags: [does_not_support_stepdowns, requires_streamable_rsm] */ (function() { @@ -15,7 +14,8 @@ load("jstests/sharding/libs/sharding_state_test.js"); const st = new ShardingTest({config: 1, shards: {rs0: {nodes: 1}}}); const rs = st.rs0; -const newNode = ShardingStateTest.addReplSetNode({replSet: rs, serverTypeFlag: "shardsvr"}); +const serverTypeFlag = TestData.configShard ? "configsvr" : "shardsvr"; +const newNode = ShardingStateTest.addReplSetNode({replSet: rs, serverTypeFlag}); jsTestLog("Checking sharding state before failover."); ShardingStateTest.checkShardingState(st); diff --git a/jstests/sharding/localhostAuthBypass.js b/jstests/sharding/localhostAuthBypass.js index aa9668e252e5c..25d1adc23e197 100644 --- a/jstests/sharding/localhostAuthBypass.js +++ b/jstests/sharding/localhostAuthBypass.js @@ -27,9 +27,9 @@ var createUser = function(mongo) { }; var addUsersToEachShard = function(st) { - // In catalog shard mode skip the first shard because it is also the config server and will + // In config shard mode skip the first shard because it is also the config server and will // already have a user made on it through mongos. - for (var i = TestData.catalogShard ? 1 : 0; i < numShards; i++) { + for (var i = TestData.configShard ? 1 : 0; i < numShards; i++) { print("============ adding a user to shard " + i); var d = st["shard" + i]; d.getDB("admin").createUser({user: username, pwd: password, roles: jsTest.adminUserRoles}); @@ -38,7 +38,7 @@ var addUsersToEachShard = function(st) { var addShard = function(st, shouldPass) { adhocShard++; - var rs = + const rs = new ReplSetTest({nodes: 1, host: 'localhost', name: 'localhostAuthShard-' + adhocShard}); rs.startSet({shardsvr: "", keyFile: keyfile, auth: ""}); rs.initiate(); diff --git a/jstests/sharding/max_time_ms_connection_pool.js b/jstests/sharding/max_time_ms_connection_pool.js index f4b5422c76089..54ae36b8e0db0 100644 --- a/jstests/sharding/max_time_ms_connection_pool.js +++ b/jstests/sharding/max_time_ms_connection_pool.js @@ -1,10 +1,12 @@ /** - * * Tests the rewrite of NetworkInterfaceExceededTimeLimit exception coming from * `executor/connection_pool.cpp` into MaxTimeMSError when MaxTimeMS option is set for a given * sharding command. * - * @tags: [requires_fcv_61] + * @tags: [ + * requires_fcv_61, + * does_not_support_stepdowns, + * ] */ (function() { diff --git a/jstests/sharding/merge_all_chunks_on_shard.js b/jstests/sharding/merge_all_chunks_on_shard.js index 3abf22de35d97..f42aa761c5ce2 100644 --- a/jstests/sharding/merge_all_chunks_on_shard.js +++ b/jstests/sharding/merge_all_chunks_on_shard.js @@ -8,6 +8,7 @@ (function() { 'use strict'; load("jstests/sharding/libs/find_chunks_util.js"); +load("jstests/libs/fail_point_util.js"); /* Create new sharded collection on testDB */ let _collCounter = 0; @@ -35,17 +36,23 @@ function moveRange(st, coll, minKeyValue, maxKeyValue, toShard) { } /* Set `onCurrentShardSince` field to (refTimestamp + offsetInSeconds) */ -function setOnCurrentShardSince(configDB, coll, extraQuery, refTimestamp, offsetInSeconds) { - const collUuid = configDB.collections.findOne({_id: coll.getFullName()}).uuid; +function setOnCurrentShardSince(mongoS, coll, extraQuery, refTimestamp, offsetInSeconds) { + // Use 'retryWrites' when writing to the configsvr because they are not automatically retried. + const mongosSession = mongoS.startSession({retryWrites: true}); + const sessionConfigDB = mongosSession.getDatabase('config'); + const collUuid = sessionConfigDB.collections.findOne({_id: coll.getFullName()}).uuid; const query = Object.assign({uuid: collUuid}, extraQuery); const newValue = new Timestamp(refTimestamp.getTime() + offsetInSeconds, 0); - assert.commandWorked( - configDB.chunks.updateMany(query, [{ - $set: { - "onCurrentShardSince": newValue, - "history": [{validAfter: newValue, shard: "$shard"}] - } - }])); + const chunks = sessionConfigDB.chunks.find(query); + chunks.forEach((chunk) => { + assert.commandWorked(sessionConfigDB.chunks.updateOne( + {_id: chunk._id}, [{ + $set: { + "onCurrentShardSince": newValue, + "history": [{validAfter: newValue, shard: "$shard"}] + } + }])); + }); } /* Set jumbo flag to true */ @@ -56,20 +63,12 @@ function setJumboFlag(configDB, coll, chunkQuery) { } function setHistoryWindowInSecs(st, valueInSeconds) { - st.forEachConfigServer((conn) => { - assert.commandWorked(conn.adminCommand({ - configureFailPoint: 'overrideHistoryWindowInSecs', - mode: 'alwaysOn', - data: {seconds: valueInSeconds} - })); - }); + configureFailPointForRS( + st.configRS.nodes, "overrideHistoryWindowInSecs", {seconds: valueInSeconds}, "alwaysOn"); } function resetHistoryWindowInSecs(st) { - st.forEachConfigServer((conn) => { - assert.commandWorked( - conn.adminCommand({configureFailPoint: 'overrideHistoryWindowInSecs', mode: 'off'})); - }); + configureFailPointForRS(st.configRS.nodes, "overrideHistoryWindowInSecs", {}, "off"); } let defaultAutoMergerThrottlingMS = null; @@ -94,20 +93,12 @@ function resetBalancerMergeThrottling(st) { } function setBalanceRoundInterval(st, valueInMs) { - st.forEachConfigServer((conn) => { - assert.commandWorked(conn.adminCommand({ - configureFailPoint: 'overrideBalanceRoundInterval', - mode: 'alwaysOn', - data: {intervalMs: valueInMs} - })); - }); + configureFailPointForRS( + st.configRS.nodes, "overrideBalanceRoundInterval", {intervalMs: valueInMs}, "alwaysOn"); } function resetBalanceRoundInterval(st) { - st.forEachConfigServer((conn) => { - assert.commandWorked( - conn.adminCommand({configureFailPoint: 'overrideBalanceRoundInterval', mode: 'off'})); - }); + configureFailPointForRS(st.configRS.nodes, "overrideBalanceRoundInterval", {}, "off"); } function assertExpectedChunksOnShard(configDB, coll, shardName, expectedChunks) { @@ -200,7 +191,7 @@ function mergeAllChunksOnShardTest(st, testDB) { const now = buildInitialScenario(st, coll, shard0, shard1, historyWindowInSeconds); // Make sure that all chunks are out of the history window - setOnCurrentShardSince(configDB, coll, {}, now, -historyWindowInSeconds - 1000); + setOnCurrentShardSince(st.s, coll, {}, now, -historyWindowInSeconds - 1000); // Merge all mergeable chunks on shard0 assert.commandWorked( @@ -215,6 +206,11 @@ function mergeAllChunksOnShardTest(st, testDB) { } function mergeAllChunksWithMaxNumberOfChunksTest(st, testDB) { + // Skip this test if running in a suite with stepdowns + if (typeof ContinuousStepdown !== 'undefined') { + return; + } + // Consider all chunks mergeable setHistoryWindowInSecs(st, -10 /* seconds */); @@ -254,7 +250,7 @@ function mergeAllChunksOnShardConsideringHistoryWindowTest(st, testDB) { const now = buildInitialScenario(st, coll, shard0, shard1); // Initially, make all chunks older than history window - setOnCurrentShardSince(configDB, coll, {}, now, -historyWindowInSeconds - 1000); + setOnCurrentShardSince(st.s, coll, {}, now, -historyWindowInSeconds - 1000); // Perform some move so that those chunks will fall inside the history window and won't be able // to be merged @@ -299,7 +295,7 @@ function mergeAllChunksOnShardConsideringJumboFlagTest(st, testDB) { const now = buildInitialScenario(st, coll, shard0, shard1, historyWindowInSeconds); // Make sure that all chunks are out of the history window - setOnCurrentShardSince(configDB, coll, {}, now, -historyWindowInSeconds - 1000); + setOnCurrentShardSince(st.s, coll, {}, now, -historyWindowInSeconds - 1000); // Set jumbo flag to a couple of chunks // Setting a chunks as jumbo must prevent it from being merged @@ -353,7 +349,7 @@ function balancerTriggersAutomergerWhenIsEnabledTest(st, testDB) { const now = buildInitialScenario(st, coll, shard0, shard1, historyWindowInSeconds); // Make sure that all chunks are out of the history window - setOnCurrentShardSince(configDB, coll, {}, now, -historyWindowInSeconds - 1000); + setOnCurrentShardSince(st.s, coll, {}, now, -historyWindowInSeconds - 1000); }); // Override balancer round interval and merge throttling to speed up the test diff --git a/jstests/sharding/merge_chunks_test.js b/jstests/sharding/merge_chunks_test.js index 3f9daee196006..0bbb71cc58cd7 100644 --- a/jstests/sharding/merge_chunks_test.js +++ b/jstests/sharding/merge_chunks_test.js @@ -1,5 +1,7 @@ // // Tests that merging chunks via mongos works/doesn't work with different chunk configurations +// TODO SERVER-71169: Re-enable this test after shard filtering in CQF is implemented. +// @tags: [cqf_incompatible] // (function() { 'use strict'; diff --git a/jstests/sharding/merge_let_params_size_estimation.js b/jstests/sharding/merge_let_params_size_estimation.js new file mode 100644 index 0000000000000..66f30d3833575 --- /dev/null +++ b/jstests/sharding/merge_let_params_size_estimation.js @@ -0,0 +1,155 @@ +/** + * Test which verifies that $merge accounts for the size of let parameters and runtime constants + * when it serializes writes to send to other nodes. + * + * @tags: [ + * # The $merge in this test targets the '_id' field, and requires a unique index. + * expects_explicit_underscore_id_index, + * ] + */ +(function() { +"use strict"; + +load('jstests/libs/fixture_helpers.js'); // For isReplSet(). + +// Function to run the test against a test fixture. Accepts an object that contains the following +// fields: +// - testFixture: The fixture to run the test against. +// - conn: Connection to the test fixture specified above. +// - shardLocal and shardOutput: Indicates whether the local/output collection should be sharded in +// this test run (ignored when not running against a sharded cluster). +function runTest({testFixture, conn, shardLocal, shardOutput}) { + const dbName = "db"; + const collName = "merge_let_params"; + const dbCollName = dbName + "." + collName; + const outCollName = "outcoll"; + const dbOutCollName = dbName + "." + outCollName; + const admin = conn.getDB("admin"); + const isReplSet = FixtureHelpers.isReplSet(admin); + + function shardColls() { + // When running against a sharded cluster, configure the collections according to + // 'shardLocal' and 'shardOutput'. + if (!isReplSet) { + assert.commandWorked(admin.runCommand({enableSharding: dbName})); + testFixture.ensurePrimaryShard(dbName, testFixture.shard0.shardName); + if (shardLocal) { + testFixture.shardColl(collName, {_id: 1}, {_id: 0}, {_id: 0}, dbName); + } + if (shardOutput) { + testFixture.shardColl(outCollName, {_id: 1}, {_id: 0}, {_id: 0}, dbName); + } + } + } + const coll = conn.getCollection(dbCollName); + const outColl = conn.getCollection(dbOutCollName); + coll.drop(); + outColl.drop(); + shardColls(); + + // Insert two large documents in both collections. By inserting the documents with the same _id + // values in both collections and splitting these documents between chunks, this will guarantee + // that we need to serialize and send update command(s) across the wire when targeting the + // output collection. + const kOneMB = 1024 * 1024; + const kDataString = "a".repeat(4 * kOneMB); + const kDocs = [{_id: 2, data: kDataString}, {_id: -2, data: kDataString}]; + assert.commandWorked(coll.insertMany(kDocs)); + assert.commandWorked(outColl.insertMany(kDocs)); + + // The sizes of the different update command components are deliberately chosen to test the + // batching logic when the update is targeted to another node in the cluster. In particular, the + // update command will contain the 10MB 'outFieldValue' and we will be updating two 4MB + // documents. The 18MB total exceeds the 16MB size limit, so we expect the batching logic to + // split the two documents into separate batches of 14MB each. + const outFieldValue = "a".repeat(10 * kOneMB); + let aggCommand = { + pipeline: [{ + $merge: { + into: {db: "db", coll: outCollName}, + on: "_id", + whenMatched: [{$addFields: {out: "$$outField"}}], + whenNotMatched: "insert" + } + }], + cursor: {}, + let : {"outField": outFieldValue} + }; + + // If this is a replica set, we need to target a secondary node to force writes to go over + // the wire. + const aggColl = isReplSet ? testFixture.getSecondary().getCollection(dbCollName) : coll; + + if (isReplSet) { + aggCommand["$readPreference"] = {mode: "secondary"}; + } + + // The aggregate should not fail. + assert.commandWorked(aggColl.runCommand("aggregate", aggCommand)); + + // Verify that each document in the output collection contains the value of 'outField'. + let outContents = outColl.find().toArray(); + for (const res of outContents) { + const out = res["out"]; + assert.eq(out, outFieldValue, outContents); + } + + assert(coll.drop()); + assert(outColl.drop()); + shardColls(); + + // Insert four large documents in both collections. As before, this will force updates to be + // sent across the wire, but this will generate double the batches. + const kMoreDocs = [ + {_id: -2, data: kDataString}, + {_id: -1, data: kDataString}, + {_id: 1, data: kDataString}, + {_id: 2, data: kDataString}, + ]; + + assert.commandWorked(coll.insertMany(kMoreDocs)); + assert.commandWorked(outColl.insertMany(kMoreDocs)); + + // The aggregate should not fail. + assert.commandWorked(aggColl.runCommand("aggregate", aggCommand)); + + // Verify that each document in the output collection contains the value of 'outField'. + outContents = outColl.find().toArray(); + for (const res of outContents) { + const out = res["out"]; + assert.eq(out, outFieldValue, outContents); + } + + assert(coll.drop()); + assert(outColl.drop()); + shardColls(); + + // If the documents and the let parameters are large enough, the $merge is expected to fail. + const kVeryLargeDataString = "a".repeat(10 * kOneMB); + const kLargeDocs = + [{_id: 2, data: kVeryLargeDataString}, {_id: -2, data: kVeryLargeDataString}]; + assert.commandWorked(coll.insertMany(kLargeDocs)); + assert.commandWorked(outColl.insertMany(kLargeDocs)); + assert.commandFailedWithCode(aggColl.runCommand("aggregate", aggCommand), + ErrorCodes.BSONObjectTooLarge); +} + +// Test against a replica set. +const rst = new ReplSetTest({nodes: 2}); +rst.startSet(); +rst.initiate(); +rst.awaitSecondaryNodes(); + +runTest({testFixture: rst, conn: rst.getPrimary()}); + +rst.stopSet(); + +// Test against a sharded cluster. +const st = new ShardingTest({shards: 2, mongos: 1}); +runTest({testFixture: st, conn: st.s0, shardLocal: false, shardOutput: false}); +runTest({testFixture: st, conn: st.s0, shardLocal: true, shardOutput: false}); +runTest({testFixture: st, conn: st.s0, shardLocal: false, shardOutput: true}); +runTest({testFixture: st, conn: st.s0, shardLocal: true, shardOutput: true}); + +st.stop(); +})(); diff --git a/jstests/sharding/merge_split_chunks_test.js b/jstests/sharding/merge_split_chunks_test.js index c33dbcad94198..1ae7a1e767d80 100644 --- a/jstests/sharding/merge_split_chunks_test.js +++ b/jstests/sharding/merge_split_chunks_test.js @@ -1,6 +1,8 @@ // // Tests that merge, split and move chunks via mongos works/doesn't work with different chunk // configurations +// TODO SERVER-71169: Re-enable this test once shard filtering is implemented for CQF. +// @tags: [cqf_incompatible] // (function() { 'use strict'; diff --git a/jstests/sharding/merge_with_chunk_migrations.js b/jstests/sharding/merge_with_chunk_migrations.js index 097f469d29816..8fea5c8f05f02 100644 --- a/jstests/sharding/merge_with_chunk_migrations.js +++ b/jstests/sharding/merge_with_chunk_migrations.js @@ -13,7 +13,7 @@ const targetColl = mongosDB["target"]; function setAggHang(mode) { // Match on the output namespace to avoid hanging the sharding metadata refresh aggregation when - // shard0 is a catalog shard. + // shard0 is a config shard. assert.commandWorked(st.shard0.adminCommand({ configureFailPoint: "hangBeforeDocumentSourceCursorLoadBatch", mode: mode, diff --git a/jstests/sharding/merge_with_drop_shard.js b/jstests/sharding/merge_with_drop_shard.js index ae6c83297f668..d50cc3e015cc0 100644 --- a/jstests/sharding/merge_with_drop_shard.js +++ b/jstests/sharding/merge_with_drop_shard.js @@ -22,7 +22,7 @@ st.ensurePrimaryShard(mongosDB.getName(), st.shard0.name); function setAggHang(mode) { // Match on the output namespace to avoid hanging the sharding metadata refresh aggregation when - // shard0 is a catalog shard. + // shard0 is a config shard. assert.commandWorked(st.shard0.adminCommand({ configureFailPoint: "hangWhileBuildingDocumentSourceMergeBatch", mode: mode, diff --git a/jstests/sharding/migrateBig.js b/jstests/sharding/migrateBig.js index ba2f985f8939f..923b59b31296c 100644 --- a/jstests/sharding/migrateBig.js +++ b/jstests/sharding/migrateBig.js @@ -1,8 +1,3 @@ -(function() { -'use strict'; - -load("jstests/libs/feature_flag_util.js"); - var s = new ShardingTest({name: "migrateBig", shards: 2, other: {chunkSize: 1}}); assert.commandWorked( @@ -63,4 +58,3 @@ s.startBalancer(); s.awaitBalance('foo', 'test', 60 * 1000); s.stop(); -})(); diff --git a/jstests/sharding/migration_coordinator_abort_failover.js b/jstests/sharding/migration_coordinator_abort_failover.js index aca052bf42d4f..961b14d8f572e 100644 --- a/jstests/sharding/migration_coordinator_abort_failover.js +++ b/jstests/sharding/migration_coordinator_abort_failover.js @@ -2,10 +2,7 @@ * Tests that a donor resumes coordinating a migration if it fails over after creating the * migration coordinator document but before deleting it. * - * Assumes a donor stepdown will trigger a failover migration response, but if donor is catalog - * shard, it will trigger a full retry from mongos, which leads to a successful retry despite the - * original interrupted attempt correctly failing. See if the test can be reworked. - * @tags: [temporary_catalog_shard_incompatible] + * @tags [requires_fcv_71] */ // This test induces failovers on shards. @@ -24,82 +21,73 @@ var st = new ShardingTest({shards: 2, rs: {nodes: 2}}); assert.commandWorked(st.s.adminCommand({enableSharding: dbName})); assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shardName})); -runMoveChunkMakeDonorStepDownAfterFailpoint(st, - dbName, - "moveChunkHangAtStep3", - false /* shouldMakeMigrationFailToCommitOnConfig */, - ErrorCodes.OperationFailed); +runMoveChunkMakeDonorStepDownAfterFailpoint( + st, dbName, "moveChunkHangAtStep3", false /* shouldMakeMigrationFailToCommitOnConfig */); -runMoveChunkMakeDonorStepDownAfterFailpoint(st, - dbName, - "moveChunkHangAtStep4", - false /* shouldMakeMigrationFailToCommitOnConfig */, - ErrorCodes.OperationFailed); +runMoveChunkMakeDonorStepDownAfterFailpoint( + st, dbName, "moveChunkHangAtStep4", false /* shouldMakeMigrationFailToCommitOnConfig */); -runMoveChunkMakeDonorStepDownAfterFailpoint(st, - dbName, - "moveChunkHangAtStep5", - false /* shouldMakeMigrationFailToCommitOnConfig */, - ErrorCodes.OperationFailed); +runMoveChunkMakeDonorStepDownAfterFailpoint( + st, dbName, "moveChunkHangAtStep5", false /* shouldMakeMigrationFailToCommitOnConfig */); runMoveChunkMakeDonorStepDownAfterFailpoint( st, dbName, "hangInEnsureChunkVersionIsGreaterThanThenSimulateErrorUninterruptible", true /* shouldMakeMigrationFailToCommitOnConfig */, - [ErrorCodes.OperationFailed, ErrorCodes.StaleEpoch]); + [ErrorCodes.StaleEpoch]); runMoveChunkMakeDonorStepDownAfterFailpoint( st, dbName, "hangInRefreshFilteringMetadataUntilSuccessThenSimulateErrorUninterruptible", true /* shouldMakeMigrationFailToCommitOnConfig */, - [ErrorCodes.OperationFailed, ErrorCodes.StaleEpoch]); + [ErrorCodes.StaleEpoch]); runMoveChunkMakeDonorStepDownAfterFailpoint( st, dbName, "hangInPersistMigrateAbortDecisionThenSimulateErrorUninterruptible", true /* shouldMakeMigrationFailToCommitOnConfig */, - [ErrorCodes.OperationFailed, ErrorCodes.StaleEpoch]); + [ErrorCodes.StaleEpoch]); runMoveChunkMakeDonorStepDownAfterFailpoint( st, dbName, "hangInDeleteRangeDeletionLocallyThenSimulateErrorUninterruptible", true /* shouldMakeMigrationFailToCommitOnConfig */, - [ErrorCodes.OperationFailed, ErrorCodes.StaleEpoch]); + [ErrorCodes.StaleEpoch]); runMoveChunkMakeDonorStepDownAfterFailpoint( st, dbName, "hangInReadyRangeDeletionOnRecipientThenSimulateErrorUninterruptible", true /* shouldMakeMigrationFailToCommitOnConfig */, - [ErrorCodes.OperationFailed, ErrorCodes.StaleEpoch]); + [ErrorCodes.StaleEpoch]); runMoveChunkMakeDonorStepDownAfterFailpoint(st, dbName, "hangInAdvanceTxnNumThenSimulateErrorUninterruptible", true /* shouldMakeMigrationFailToCommitOnConfig */, - [ErrorCodes.OperationFailed, ErrorCodes.StaleEpoch]); + [ErrorCodes.StaleEpoch]); runMoveChunkMakeDonorStepDownAfterFailpoint(st, dbName, "hangBeforeMakingAbortDecisionDurable", true /* shouldMakeMigrationFailToCommitOnConfig */, - [ErrorCodes.OperationFailed, ErrorCodes.StaleEpoch]); + [ErrorCodes.StaleEpoch]); runMoveChunkMakeDonorStepDownAfterFailpoint(st, dbName, "hangBeforeSendingAbortDecision", true /* shouldMakeMigrationFailToCommitOnConfig */, - [ErrorCodes.OperationFailed, ErrorCodes.StaleEpoch]); + [ErrorCodes.StaleEpoch]); runMoveChunkMakeDonorStepDownAfterFailpoint(st, dbName, "hangBeforeForgettingMigrationAfterAbortDecision", true /* shouldMakeMigrationFailToCommitOnConfig */, - [ErrorCodes.OperationFailed, ErrorCodes.StaleEpoch]); + [ErrorCodes.StaleEpoch]); st.stop(); })(); diff --git a/jstests/sharding/migration_coordinator_failover_include.js b/jstests/sharding/migration_coordinator_failover_include.js index faeb33b0ad9aa..de7844c49844e 100644 --- a/jstests/sharding/migration_coordinator_failover_include.js +++ b/jstests/sharding/migration_coordinator_failover_include.js @@ -58,10 +58,12 @@ function runMoveChunkMakeDonorStepDownAfterFailpoint(st, }, ns, st.shard1.shardName, expectAbortDecisionWithCode), st.s.port); failpointHandle.wait(); - jsTest.log("Make the donor primary step down."); + jsTest.log("Make the donor primary step down and the donor secondary step up."); + const donorSecondary = st.rs0.getSecondary(); assert.commandWorked( st.rs0.getPrimary().adminCommand({replSetStepDown: 10 /* stepDownSecs */, force: true})); failpointHandle.off(); + st.rs0.stepUp(donorSecondary); jsTest.log("Allow the moveChunk to finish."); awaitResult(); diff --git a/jstests/sharding/migration_coordinator_shutdown_in_critical_section.js b/jstests/sharding/migration_coordinator_shutdown_in_critical_section.js index 1a39a9b43c3b1..a6e80016ed358 100644 --- a/jstests/sharding/migration_coordinator_shutdown_in_critical_section.js +++ b/jstests/sharding/migration_coordinator_shutdown_in_critical_section.js @@ -3,11 +3,13 @@ * _configsvrEnsureChunkVersionIsGreaterThan and while the node is forcing a filtering metadata * refresh. * + * Shuts down a donor shard which leads mongos to retry if the donor is also the config server, and + * this can fail waiting for read preference if the shard is slow to recover. * @tags: [ * does_not_support_stepdowns, * # Require persistence to restart nodes * requires_persistence, - * temporary_catalog_shard_incompatible, + * config_shard_incompatible, * ] */ diff --git a/jstests/sharding/migration_critical_section_concurrency.js b/jstests/sharding/migration_critical_section_concurrency.js index db9f6c7b74928..0d0dd8bda6f62 100644 --- a/jstests/sharding/migration_critical_section_concurrency.js +++ b/jstests/sharding/migration_critical_section_concurrency.js @@ -1,7 +1,7 @@ // This test ensures that if one collection is its migration critical section, this won't stall // operations for other sharded or unsharded collections -load('./jstests/libs/chunk_manipulation_util.js'); +load('jstests/libs/chunk_manipulation_util.js'); (function() { 'use strict'; diff --git a/jstests/sharding/migration_fails_if_exists_in_rangedeletions.js b/jstests/sharding/migration_fails_if_exists_in_rangedeletions.js index f2af78f5d8fb0..28cec7c386d44 100644 --- a/jstests/sharding/migration_fails_if_exists_in_rangedeletions.js +++ b/jstests/sharding/migration_fails_if_exists_in_rangedeletions.js @@ -17,12 +17,6 @@ const ns = dbName + "." + collName; let st = new ShardingTest({shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}, other: {enableBalancer: false}}); -// Increase timeout for range deletion of overlapping range on recipient. -st.shard0.rs.getPrimary().adminCommand( - {setParameter: 1, receiveChunkWaitForRangeDeleterTimeoutMS: 90000}); -st.shard1.rs.getPrimary().adminCommand( - {setParameter: 1, receiveChunkWaitForRangeDeleterTimeoutMS: 90000}); - (() => { jsTestLog("Test simple shard key"); diff --git a/jstests/sharding/migration_ignore_interrupts_1.js b/jstests/sharding/migration_ignore_interrupts_1.js index 0642593c6e34c..d69febc37d680 100644 --- a/jstests/sharding/migration_ignore_interrupts_1.js +++ b/jstests/sharding/migration_ignore_interrupts_1.js @@ -3,7 +3,7 @@ // 1. coll2 shard0 to shard2 -- shard0 can't send two chunks simultaneously. // 2. coll2 shard2 to shard1 -- shard1 can't receive two chunks simultaneously. -load('./jstests/libs/chunk_manipulation_util.js'); +load('jstests/libs/chunk_manipulation_util.js'); (function() { "use strict"; diff --git a/jstests/sharding/migration_ignore_interrupts_2.js b/jstests/sharding/migration_ignore_interrupts_2.js index fa496d84318ae..96e6eb8981419 100644 --- a/jstests/sharding/migration_ignore_interrupts_2.js +++ b/jstests/sharding/migration_ignore_interrupts_2.js @@ -1,7 +1,7 @@ // When a migration between shard0 and shard1 is about to enter the commit phase, a commit command // with different migration session ID is rejected. -load('./jstests/libs/chunk_manipulation_util.js'); +load('jstests/libs/chunk_manipulation_util.js'); (function() { "use strict"; diff --git a/jstests/sharding/migration_recovers_unfinished_migrations.js b/jstests/sharding/migration_recovers_unfinished_migrations.js index 7203e3540a697..65e1cb940505d 100644 --- a/jstests/sharding/migration_recovers_unfinished_migrations.js +++ b/jstests/sharding/migration_recovers_unfinished_migrations.js @@ -7,7 +7,9 @@ * # that migration by sending a new `moveChunk` command to the donor shard causing the test to * # hang. * does_not_support_stepdowns, - * temporary_catalog_shard_incompatible, + * # Flaky with a config shard because the failovers it triggers trigger a retry from mongos, + * # which can prevent the fail point from being unset and time out. + * config_shard_incompatible, * ] */ (function() { @@ -15,6 +17,7 @@ load("jstests/libs/fail_point_util.js"); load('jstests/libs/chunk_manipulation_util.js'); +load('jstests/replsets/rslib.js'); // Disable checking for index consistency to ensure that the config server doesn't trigger a // StaleShardVersion exception on the shards and cause them to refresh their sharding metadata. That @@ -27,6 +30,7 @@ const nodeOptions = { // the shards that would interfere with the migration recovery interleaving this test requires. var st = new ShardingTest({ shards: {rs0: {nodes: 2}, rs1: {nodes: 1}}, + config: 3, other: {configOptions: nodeOptions, enableBalancer: false} }); let staticMongod = MongoRunner.runMongod({}); @@ -62,7 +66,8 @@ const rs0Secondary = st.rs0.getSecondary(); let hangInEnsureChunkVersionIsGreaterThanInterruptibleFailpoint = configureFailPoint(rs0Secondary, "hangInEnsureChunkVersionIsGreaterThanInterruptible"); -assert.commandWorked(st.rs0.getPrimary().adminCommand({replSetStepDown: 60, force: true})); +st.rs0.stepUp(rs0Secondary); + joinMoveChunk1(); migrationCommitNetworkErrorFailpoint.off(); skipShardFilteringMetadataRefreshFailpoint.off(); diff --git a/jstests/sharding/migration_server_status.js b/jstests/sharding/migration_server_status.js index ab73e053b82da..d1661e140247f 100644 --- a/jstests/sharding/migration_server_status.js +++ b/jstests/sharding/migration_server_status.js @@ -2,14 +2,10 @@ * Tests that serverStatus includes a migration status when called on the source shard of an active * migration. * - * @tags: [requires_fcv_63, temporary_catalog_shard_incompatible] + * @tags: [requires_fcv_63] */ -load('./jstests/libs/chunk_manipulation_util.js'); -load("jstests/libs/feature_flag_util.js"); - -(function() { -'use strict'; +load('jstests/libs/chunk_manipulation_util.js'); var staticMongod = MongoRunner.runMongod({}); // For startParallelOps. @@ -18,8 +14,6 @@ var st = new ShardingTest({shards: 2, mongos: 1}); var mongos = st.s0; var admin = mongos.getDB("admin"); var coll = mongos.getCollection("migration_server_status.coll"); -const usingSetClusterParameter = - FeatureFlagUtil.isPresentAndEnabled(st.config, "ClusterCardinalityParameter"); assert.commandWorked( admin.runCommand({enableSharding: coll.getDB() + "", primaryShard: st.shard0.shardName})); @@ -74,10 +68,8 @@ var assertSessionMigrationStatusSource = function( if (expectedEntriesSkippedLowerBound == null) { assert(migrationResult.sessionOplogEntriesSkippedSoFarLowerBound); } else { - // Running DDL operations increases this number by 1 - let actualEntriesSkippedLowerBound = usingSetClusterParameter - ? expectedEntriesSkippedLowerBound + 1 - : expectedEntriesSkippedLowerBound; + // Running DDL operations increases this number by 1, and addShard runs setClusterParameter + let actualEntriesSkippedLowerBound = expectedEntriesSkippedLowerBound + 1; assert.eq(migrationResult.sessionOplogEntriesSkippedSoFarLowerBound, actualEntriesSkippedLowerBound); } @@ -141,7 +133,12 @@ assertMigrationStatusOnServerStatus(shard0ServerStatus, {"_id": 0}, {"_id": {"$maxKey": 1}}, coll + ""); -assertSessionMigrationStatusSource(shard0ServerStatus, 2400, 2600); +// Background metadata operations on the config server can throw off the count, so just assert the +// fields are present for a config shard. +const expectedEntriesMigrated = TestData.configShard ? undefined : 2400; +const expectedEntriesSkipped = TestData.configShard ? undefined : 2600; +assertSessionMigrationStatusSource( + shard0ServerStatus, expectedEntriesMigrated, expectedEntriesSkipped); // Destination shard should have the correct server status shard1ServerStatus = st.shard1.getDB('admin').runCommand({serverStatus: 1}); @@ -153,7 +150,8 @@ assertMigrationStatusOnServerStatus(shard1ServerStatus, {"_id": 0}, {"_id": {"$maxKey": 1}}, coll + ""); -assertSessionMigrationStatusDestination(shard1ServerStatus, 2400); +assertSessionMigrationStatusDestination( + shard1ServerStatus, expectedEntriesMigrated, expectedEntriesSkipped); unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.chunkDataCommitted); @@ -167,4 +165,3 @@ assert(!shard1ServerStatus.sharding.migrations); st.stop(); MongoRunner.stopMongod(staticMongod); -})(); diff --git a/jstests/sharding/migration_sets_fromMigrate_flag.js b/jstests/sharding/migration_sets_fromMigrate_flag.js index 34edecdadac3d..e628240c69b81 100644 --- a/jstests/sharding/migration_sets_fromMigrate_flag.js +++ b/jstests/sharding/migration_sets_fromMigrate_flag.js @@ -14,7 +14,7 @@ // delete op is done during chunk migration within the chunk range. // -load('./jstests/libs/chunk_manipulation_util.js'); +load('jstests/libs/chunk_manipulation_util.js'); (function() { "use strict"; diff --git a/jstests/sharding/migration_waits_for_majority_commit.js b/jstests/sharding/migration_waits_for_majority_commit.js index 72ddd53c329fa..4fbfaa727910b 100644 --- a/jstests/sharding/migration_waits_for_majority_commit.js +++ b/jstests/sharding/migration_waits_for_majority_commit.js @@ -9,7 +9,7 @@ (function() { "use strict"; -load('./jstests/libs/chunk_manipulation_util.js'); +load('jstests/libs/chunk_manipulation_util.js'); load("jstests/libs/write_concern_util.js"); // Set up a sharded cluster with two shards, two chunks, and one document in one of the chunks. diff --git a/jstests/sharding/migration_with_source_ops.js b/jstests/sharding/migration_with_source_ops.js index 76dfbb9b23920..4ddc514e6f4dd 100644 --- a/jstests/sharding/migration_with_source_ops.js +++ b/jstests/sharding/migration_with_source_ops.js @@ -15,7 +15,7 @@ // chunk is empty. // -load('./jstests/libs/chunk_manipulation_util.js'); +load('jstests/libs/chunk_manipulation_util.js'); (function() { "use strict"; diff --git a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js b/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js deleted file mode 100644 index 133a1d3f84b77..0000000000000 --- a/jstests/sharding/min_optime_recovery_on_failed_move_chunk_commit.js +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Tests that the shard will update the min optime recovery document after startup. - * @tags: [requires_persistence] - */ -(function() { -"use strict"; - -var st = new ShardingTest({shards: 1}); - -// Insert a recovery doc with non-zero minOpTimeUpdaters to simulate a migration -// process that crashed in the middle of the critical section. - -var recoveryDoc = { - _id: 'minOpTimeRecovery', - minOpTime: {ts: Timestamp(0, 0), t: 0}, - minOpTimeUpdaters: 2 -}; - -assert.commandWorked(st.shard0.getDB('admin').system.version.insert(recoveryDoc)); - -// Make sure test is setup correctly. -var minOpTimeRecoveryDoc = - st.shard0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'}); - -assert.neq(null, minOpTimeRecoveryDoc); -assert.eq(0, minOpTimeRecoveryDoc.minOpTime.ts.getTime()); -assert.eq(2, minOpTimeRecoveryDoc.minOpTimeUpdaters); - -st.restartShardRS(0); - -// After the restart, the shard should have updated the opTime and reset minOpTimeUpdaters. -minOpTimeRecoveryDoc = st.shard0.getDB('admin').system.version.findOne({_id: 'minOpTimeRecovery'}); - -assert.neq(null, minOpTimeRecoveryDoc); -assert.gt(minOpTimeRecoveryDoc.minOpTime.ts.getTime(), 0); -assert.eq(0, minOpTimeRecoveryDoc.minOpTimeUpdaters); - -st.stop(); -})(); diff --git a/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js b/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js deleted file mode 100644 index ffbe5ab8ef0e4..0000000000000 --- a/jstests/sharding/min_optime_recovery_on_successful_move_chunk_commit.js +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Tests that the minOpTimeRecovery document will be created after a migration. - */ -(function() { -"use strict"; - -var st = new ShardingTest({shards: 2}); - -var testDB = st.s.getDB('test'); -testDB.adminCommand({enableSharding: 'test'}); -st.ensurePrimaryShard('test', st.shard0.shardName); -testDB.adminCommand({shardCollection: 'test.user', key: {x: 1}}); - -var priConn = st.configRS.getPrimary(); -var replStatus = priConn.getDB('admin').runCommand({replSetGetStatus: 1}); -replStatus.members.forEach(function(memberState) { - if (memberState.state == 1) { // if primary - assert.neq(null, memberState.optime); - assert.neq(null, memberState.optime.ts); - assert.neq(null, memberState.optime.t); - } -}); - -testDB.adminCommand({moveChunk: 'test.user', find: {x: 0}, to: st.shard1.shardName}); - -var shardAdmin = st.rs0.getPrimary().getDB('admin'); -var minOpTimeRecoveryDoc = shardAdmin.system.version.findOne({_id: 'minOpTimeRecovery'}); - -assert.neq(null, minOpTimeRecoveryDoc); -assert.eq('minOpTimeRecovery', minOpTimeRecoveryDoc._id); -assert.gt(minOpTimeRecoveryDoc.minOpTime.ts.getTime(), 0); -assert.eq(0, minOpTimeRecoveryDoc.minOpTimeUpdaters); - -st.stop(); -})(); diff --git a/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js b/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js index b1a5c5c22d820..ac8a5e2274be8 100644 --- a/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js +++ b/jstests/sharding/mongod_returns_no_cluster_time_without_keys.js @@ -56,13 +56,13 @@ adminDB.auth(adminUser.username, adminUser.password); assert(st.s.getDB("admin").system.keys.count() >= 2); let priRSConn = st.rs0.getPrimary().getDB("admin"); -if (TestData.catalogShard) { - // In catalog shard mode we've already used up the localhost exception on the first shard, so we +if (TestData.configShard) { + // In config shard mode we've already used up the localhost exception on the first shard, so we // have to auth to create the user below. priRSConn.auth(adminUser.username, adminUser.password); } priRSConn.createUser({user: rUser.username, pwd: rUser.password, roles: ["root"]}); -if (TestData.catalogShard) { +if (TestData.configShard) { priRSConn.logout(); } priRSConn.auth(rUser.username, rUser.password); diff --git a/jstests/sharding/mongos_helloOk_protocol.js b/jstests/sharding/mongos_helloOk_protocol.js index 2579d0404a669..18f102cba30c0 100644 --- a/jstests/sharding/mongos_helloOk_protocol.js +++ b/jstests/sharding/mongos_helloOk_protocol.js @@ -23,4 +23,4 @@ assert.eq("boolean", typeof res.helloOk, "helloOk field is not a boolean" + tojs assert.eq(res.helloOk, true); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/mongos_precache_routing_info.js b/jstests/sharding/mongos_precache_routing_info.js index 573391a3b59c2..7a0f4031d88a8 100644 --- a/jstests/sharding/mongos_precache_routing_info.js +++ b/jstests/sharding/mongos_precache_routing_info.js @@ -1,5 +1,3 @@ -// @tags: [requires_fcv_70] - (function() { 'use strict'; diff --git a/jstests/sharding/mongos_wait_csrs_initiate.js b/jstests/sharding/mongos_wait_csrs_initiate.js index d85266037deea..883b8a8e484dd 100644 --- a/jstests/sharding/mongos_wait_csrs_initiate.js +++ b/jstests/sharding/mongos_wait_csrs_initiate.js @@ -21,6 +21,7 @@ assert.commandWorked( jsTestLog("getting mongos"); var e; +let mongos2; assert.soon( function() { try { diff --git a/jstests/sharding/move_chunk_allowMigrations.js b/jstests/sharding/move_chunk_allowMigrations.js index f13b5f9384ca2..a1004d767a88e 100644 --- a/jstests/sharding/move_chunk_allowMigrations.js +++ b/jstests/sharding/move_chunk_allowMigrations.js @@ -8,10 +8,6 @@ * does_not_support_stepdowns, * ] */ -(function() { -'use strict'; - -load("jstests/libs/feature_flag_util.js"); load('jstests/libs/fail_point_util.js'); load('jstests/libs/parallel_shell_helpers.js'); load("jstests/sharding/libs/find_chunks_util.js"); @@ -204,5 +200,4 @@ testAllowMigrationsFalseDisablesBalancer(false /* allowMigrations */, {}); testAllowMigrationsFalseDisablesBalancer(false /* allowMigrations */, {noBalance: false}); testAllowMigrationsFalseDisablesBalancer(false /* allowMigrations */, {noBalance: true}); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/move_chunk_concurrent_cloning.js b/jstests/sharding/move_chunk_concurrent_cloning.js index 0bda3e375b699..0e4a4aa6622f7 100644 --- a/jstests/sharding/move_chunk_concurrent_cloning.js +++ b/jstests/sharding/move_chunk_concurrent_cloning.js @@ -7,7 +7,7 @@ (function() { "use strict"; -load('./jstests/libs/chunk_manipulation_util.js'); +load('jstests/libs/chunk_manipulation_util.js'); const runParallelMoveChunk = (numThreads) => { // For startParallelOps to write its state diff --git a/jstests/sharding/move_chunk_deferred_lookup.js b/jstests/sharding/move_chunk_deferred_lookup.js new file mode 100644 index 0000000000000..7e3a9149c9438 --- /dev/null +++ b/jstests/sharding/move_chunk_deferred_lookup.js @@ -0,0 +1,101 @@ +/** + * Ensure that updates are not lost if they are made between processing deferred updates and reading + * from the updates list in _transferMods. + * + * @tags: [uses_transactions, uses_prepare_transaction, requires_persistence] + */ + +(function() { +"use strict"; +load('jstests/libs/chunk_manipulation_util.js'); +load("jstests/libs/fail_point_util.js"); +load('jstests/replsets/rslib.js'); +load('jstests/sharding/libs/create_sharded_collection_util.js'); + +const dbName = "test"; +const collName = "user"; +const staticMongod = MongoRunner.runMongod({}); +const st = new ShardingTest({shards: {rs0: {nodes: 2}, rs1: {nodes: 1}}}); +const collection = st.s.getDB(dbName).getCollection(collName); +const lsid = { + id: UUID() +}; +const txnNumber = 0; + +function setup() { + CreateShardedCollectionUtil.shardCollectionWithChunks(collection, {_id: 1}, [ + {min: {_id: MinKey}, max: {_id: 10}, shard: st.shard0.shardName}, + {min: {_id: 10}, max: {_id: MaxKey}, shard: st.shard1.shardName}, + ]); + + for (let i = 0; i < 20; i++) { + assert.commandWorked(collection.insertOne({_id: i, x: i})); + } +} + +function prepareTransactionAndTriggerFailover() { + assert.commandWorked(st.s.getDB(dbName).runCommand({ + update: collName, + updates: [ + {q: {_id: 1}, u: {$set: {x: 5}}}, + {q: {_id: 2}, u: {$set: {x: -10}}}, + ], + lsid: lsid, + txnNumber: NumberLong(txnNumber), + stmtId: NumberInt(0), + startTransaction: true, + autocommit: false, + })); + + const result = assert.commandWorked(st.shard0.getDB(dbName).adminCommand({ + prepareTransaction: 1, + lsid: lsid, + txnNumber: NumberLong(txnNumber), + autocommit: false, + writeConcern: {w: "majority"}, + })); + + let oldSecondary = st.rs0.getSecondary(); + + st.rs0.stepUp(oldSecondary); + + awaitRSClientHosts(st.s, oldSecondary, {ok: true, ismaster: true}); + + return result.prepareTimestamp; +} + +function commitPreparedTransaction(prepareTimestamp) { + assert.commandWorked( + st.shard0.getDB(dbName).adminCommand(Object.assign({ + commitTransaction: 1, + lsid: lsid, + txnNumber: NumberLong(txnNumber), + autocommit: false, + }, + {commitTimestamp: prepareTimestamp}))); +} + +function runMoveChunkAndCommitTransaction() { + const joinMoveChunk = moveChunkParallel( + staticMongod, st.s.host, {_id: 1}, null, 'test.user', st.shard1.shardName); + pauseMigrateAtStep(st.shard1, migrateStepNames.catchup); + waitForMoveChunkStep(st.shard0, moveChunkStepNames.startedMoveChunk); + commitPreparedTransaction(prepareTimestamp); + unpauseMigrateAtStep(st.shard1, migrateStepNames.catchup); + return joinMoveChunk; +} + +setup(); +const prepareTimestamp = prepareTransactionAndTriggerFailover(); +const fp = configureFailPoint(st.rs0.getPrimary(), "hangAfterProcessingDeferredXferMods"); +const joinMoveChunk = runMoveChunkAndCommitTransaction(); +fp.wait(); +assert.commandWorked(st.s.getDB(dbName).getCollection(collName).update({_id: 4}, {$set: {x: 501}})); +fp.off(); +joinMoveChunk(); +assert.eq(collection.findOne({_id: 4}).x, 501); + +st.stop(); + +MongoRunner.stopMongod(staticMongod); +})(); diff --git a/jstests/sharding/move_chunk_open_cursors.js b/jstests/sharding/move_chunk_open_cursors.js index 312f8143048dc..a6a6b5f41dbb8 100644 --- a/jstests/sharding/move_chunk_open_cursors.js +++ b/jstests/sharding/move_chunk_open_cursors.js @@ -1,6 +1,8 @@ /** * Tests that cursors opened before a chunk is moved will not see the effects of the chunk * migration. + * TODO SERVER-71169: Re-enable this test once shard filtering is implemented for CQF. + * @tags: [cqf_incompatible] */ (function() { "use strict"; diff --git a/jstests/sharding/move_chunk_remove_shard.js b/jstests/sharding/move_chunk_remove_shard.js index 6cafa8a4a3cb1..c99a5874c4fce 100644 --- a/jstests/sharding/move_chunk_remove_shard.js +++ b/jstests/sharding/move_chunk_remove_shard.js @@ -12,6 +12,7 @@ load('jstests/libs/chunk_manipulation_util.js'); load('jstests/sharding/libs/remove_shard_util.js'); +load('jstests/libs/fail_point_util.js'); // TODO SERVER-50144 Remove this and allow orphan checking. // This test calls removeShard which can leave docs in config.rangeDeletions in state "pending", @@ -30,13 +31,8 @@ assert.commandWorked(st.s.adminCommand({split: 'test.user', middle: {x: 0}})); pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState); -st.forEachConfigServer((conn) => { - conn.adminCommand({ - configureFailPoint: 'overrideBalanceRoundInterval', - mode: 'alwaysOn', - data: {intervalMs: 200} - }); -}); +configureFailPointForRS( + st.configRS.nodes, 'overrideBalanceRoundInterval', {intervalMs: 200}, 'alwaysOn'); let joinMoveChunk = moveChunkParallel(staticMongod, st.s.host, diff --git a/jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js b/jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js index a22537e87ec10..de6a067a24698 100644 --- a/jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js +++ b/jstests/sharding/move_chunk_update_shard_key_in_retryable_write.js @@ -13,7 +13,7 @@ "use strict"; load('jstests/sharding/libs/sharded_transactions_helpers.js'); -load('./jstests/libs/chunk_manipulation_util.js'); +load('jstests/libs/chunk_manipulation_util.js'); // For startParallelOps to write its state let staticMongod = MongoRunner.runMongod({}); diff --git a/jstests/sharding/move_chunk_with_in_progress_index_builds.js b/jstests/sharding/move_chunk_with_in_progress_index_builds.js new file mode 100644 index 0000000000000..0230126409a4d --- /dev/null +++ b/jstests/sharding/move_chunk_with_in_progress_index_builds.js @@ -0,0 +1,135 @@ +/* + * Tests to validate the different behaviours of the moveChunk with concurrent index builds + * creation. + * + * @tags: [ + * requires_fcv_70, + * ] + */ + +(function() { +'use strict'; + +load("jstests/sharding/libs/create_sharded_collection_util.js"); +load("jstests/libs/fail_point_util.js"); +load('jstests/noPassthrough/libs/index_build.js'); + +// Configure initial sharding cluster +const st = new ShardingTest({}); +let dbCounter = 0; + +function setupCollection() { + const db = st.s.getDB("test" + dbCounter++); + const coll = db.coll; + + assert.commandWorked( + st.s.adminCommand({enableSharding: db.getName(), primaryShard: st.shard0.shardName})); + CreateShardedCollectionUtil.shardCollectionWithChunks(coll, {x: 1}, [ + {min: {x: MinKey}, max: {x: 1}, shard: st.shard0.shardName}, + {min: {x: 1}, max: {x: MaxKey}, shard: st.shard1.shardName}, + ]); + + return coll; +} + +// Test the correct behaviour of the moveChunk when it is not the first migration to a shard, the +// collection is empty and there is an index build in progress. This moveChunk must succeed because +// it will wait for the index build to be finished before completing the migration. +(function testSucceedFirstMigrationWithInProgressIndexBuild() { + const coll = setupCollection(); + const db = coll.getDB(); + const ns = coll.getFullName(); + + // Insert documents to force a two-phase index build + coll.insert({x: 10}); + + // Create new index and pause its build on shard1 + const hangIndexBuildBeforeCommit = configureFailPoint(st.shard1, "hangIndexBuildBeforeCommit"); + const awaitIndexBuild = IndexBuildTest.startIndexBuild(db.getMongo(), ns, {y: 1}); + hangIndexBuildBeforeCommit.wait(); + + // Migrate all chunks from shard1 to shard0 + assert.commandWorked(st.s.adminCommand( + {moveChunk: ns, find: {x: 10}, to: st.shard0.shardName, _waitForDelete: true})); + + // Migrate one chunk from shard0 to shard1 in a parallel shell + const hangMigrationRecipientBeforeWaitingNoIndexBuildInProgress = + configureFailPoint(st.shard1, "hangMigrationRecipientBeforeWaitingNoIndexBuildInProgress"); + const shardName = st.shard1.shardName; + const awaitMoveChunkShell = startParallelShell( + funWithArgs(function(shardName, ns) { + const mongos = db.getMongo(); + assert.commandWorked( + mongos.adminCommand({moveChunk: ns, find: {x: 10}, to: shardName})); + }, shardName, ns), st.s.port); + + // Wait for the awaiting of the in-progress index builds from the chunk migration + hangMigrationRecipientBeforeWaitingNoIndexBuildInProgress.wait(); + hangMigrationRecipientBeforeWaitingNoIndexBuildInProgress.off(); + + // Finish the index build on shard1 + hangIndexBuildBeforeCommit.off(); + + // Finally check for the succeed in the index build and the last move chunk + awaitIndexBuild(); + awaitMoveChunkShell(); +})(); + +// Test the correct behaviour of the moveChunk when it is the first migration to a shard, the +// collection is not empty and there is an index build in progress. This moveChunk must fail until +// the index build is finished or there is no range deletion. +(function testFailedFirstMigrationWithInProgressIndexBuild() { + const coll = setupCollection(); + const ns = coll.getFullName(); + + // Pause range deletion on shard1 + const suspendRangeDeletion = configureFailPoint(st.shard1, "suspendRangeDeletion"); + + // Insert documents to force a two-phase index build + coll.insert({x: 10}); + + // Create new index and pause its build on shard1 + const hangIndexBuildBeforeCommit = configureFailPoint(st.shard1, "hangIndexBuildBeforeCommit"); + const awaitIndexBuild = IndexBuildTest.startIndexBuild(coll.getMongo(), ns, {y: 1}); + hangIndexBuildBeforeCommit.wait(); + + // Migrate all chunks from shard1 to shard0 + assert.commandWorked( + st.s.adminCommand({moveChunk: ns, find: {x: 10}, to: st.shard0.shardName})); + + // Migrate one chunk from shard0 to shard1 and fail on the migration + const res = st.s.adminCommand({moveChunk: ns, find: {x: -10}, to: st.shard1.shardName}); + assert.commandFailedWithCode(res, ErrorCodes.OperationFailed); + assert.includes( + res.errmsg, "Non-trivial index creation should be scheduled manually", tojson(res)); + + suspendRangeDeletion.off(); + hangIndexBuildBeforeCommit.off(); + awaitIndexBuild(); +})(); + +// Test the correct behaviour of the moveChunk when it is not the first migration to a shard, the +// collection is not empty and there is an index build in progress. This moveChunk must succeed +// before finishing the index build. +(function testSucceedNotFirstMigrationWithInProgressIndexBuild() { + const coll = setupCollection(); + const ns = coll.getFullName(); + + // Insert documents to force a two-phase index build + coll.insert({x: 10}); + + // Create new index and pause its build on shard1 + const hangIndexBuildBeforeCommit = configureFailPoint(st.shard1, "hangIndexBuildBeforeCommit"); + const awaitIndexBuild = IndexBuildTest.startIndexBuild(coll.getMongo(), ns, {y: 1}); + hangIndexBuildBeforeCommit.wait(); + + // Migrate one chunk from shard0 to shard1 + assert.commandWorked( + st.s.adminCommand({moveChunk: ns, find: {x: -10}, to: st.shard1.shardName})); + + hangIndexBuildBeforeCommit.off(); + awaitIndexBuild(); +})(); + +st.stop(); +})(); diff --git a/jstests/sharding/move_primary/move_primary_recipient_cmds_basic.js b/jstests/sharding/move_primary/move_primary_recipient_cmds_basic.js deleted file mode 100644 index bd4001df35520..0000000000000 --- a/jstests/sharding/move_primary/move_primary_recipient_cmds_basic.js +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Tests that MovePrimaryRecipient commands work as intended. - * - * @tags: [ - * requires_fcv_70, - * featureFlagOnlineMovePrimaryLifecycle - * ] - */ -(function() { -'use strict'; - -load("jstests/libs/collection_drop_recreate.js"); - -const st = new ShardingTest({mongos: 1, shards: 2}); - -const mongos = st.s0; -const donor = st.shard0; -const recipient = st.shard1; - -const dbName = jsTestName(); -const testDB = donor.getDB(dbName); -const collName = 'testcoll0'; - -assert.commandWorked(mongos.adminCommand({enableSharding: dbName, primaryShard: donor.shardName})); - -const donorColl0 = assertDropAndRecreateCollection(testDB, collName); -const recipientColl0 = recipient.getDB(dbName).getCollection(collName); - -assert.commandWorked(donorColl0.insert([{a: 1}, {b: 1}])); - -assert.eq(2, donorColl0.find().itcount(), "Donor does not have data before move"); -assert.eq(0, recipientColl0.find().itcount(), "Recipient has data before move"); - -function runRecipientSyncDataCmds(uuid) { - assert.commandWorked(recipient.adminCommand({ - _movePrimaryRecipientSyncData: 1, - migrationId: uuid, - databaseName: dbName, - fromShardName: donor.shardName, - toShardName: recipient.shardName - })); - - assert.commandWorked(recipient.adminCommand({ - _movePrimaryRecipientSyncData: 1, - returnAfterReachingDonorTimestamp: new Timestamp(1, 1), - migrationId: uuid, - databaseName: dbName, - fromShardName: donor.shardName, - toShardName: recipient.shardName - })); -} - -// Test that _movePrimaryRecipientSyncData commands work followed by -// _movePrimaryRecipientForgetMigration. - -let uuid = UUID(); -runRecipientSyncDataCmds(uuid); - -assert.commandWorked(recipient.adminCommand({ - _movePrimaryRecipientForgetMigration: 1, - migrationId: uuid, - databaseName: dbName, - fromShardName: donor.shardName, - toShardName: recipient.shardName -})); - -assert.eq(2, recipientColl0.count(), "Data has not been cloned to the Recipient correctly"); - -// Test that _movePrimaryRecipientForgetMigration called on an already forgotten migration succeeds -assert.commandWorked(recipient.adminCommand({ - _movePrimaryRecipientForgetMigration: 1, - migrationId: uuid, - databaseName: dbName, - fromShardName: donor.shardName, - toShardName: recipient.shardName -})); - -// Test that _movePrimaryRecipientAbortMigration command aborts an ongoing movePrimary op. -assertDropCollection(recipient.getDB(dbName), collName); -uuid = UUID(); - -runRecipientSyncDataCmds(uuid); - -assert.commandWorked(recipient.adminCommand({ - _movePrimaryRecipientAbortMigration: 1, - migrationId: uuid, - databaseName: dbName, - fromShardName: donor.shardName, - toShardName: recipient.shardName -})); - -// Test that _movePrimaryRecipientAbortMigration called on an already aborted migration succeeds -assert.commandWorked(recipient.adminCommand({ - _movePrimaryRecipientAbortMigration: 1, - migrationId: uuid, - databaseName: dbName, - fromShardName: donor.shardName, - toShardName: recipient.shardName -})); - -assert.eq(0, recipientColl0.count(), "Recipient has orphaned collections"); - -// Cleanup to prevent metadata inconsistencies as we are not committing config changes. -assert.commandWorked(recipient.getDB(dbName).dropDatabase()); -assert.commandWorked(donor.getDB(dbName).dropDatabase()); - -st.stop(); -})(); diff --git a/jstests/sharding/move_primary_basic.js b/jstests/sharding/move_primary_basic.js index faff3a487031c..0a9e9b17e7ea9 100644 --- a/jstests/sharding/move_primary_basic.js +++ b/jstests/sharding/move_primary_basic.js @@ -1,7 +1,4 @@ -(function() { -'use strict'; - -load('jstests/libs/feature_flag_util.js'); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; function collectionExists(shard, dbName, collName) { return Array.contains(shard.getDB(dbName).getCollectionNames(), collName); @@ -110,10 +107,8 @@ jsTest.log('Test that only unsharded collections are moved'); } } -// TODO (SERVER-71309): Remove once 7.0 becomes last LTS. -if (FeatureFlagUtil.isPresentAndEnabled(config.admin, 'ResilientMovePrimary')) { - jsTest.log('Test that orphaned documents on recipient causes the operation to fail'); - +jsTest.log('Test that orphaned documents on recipient causes the operation to fail'); +{ // Insert an orphaned document on shard1. assert.commandWorked(shard1.getCollection(coll1NS).insertOne({name: 'Emma'})); @@ -124,36 +119,19 @@ if (FeatureFlagUtil.isPresentAndEnabled(config.admin, 'ResilientMovePrimary')) { assert.commandFailedWithCode(mongos.adminCommand({movePrimary: dbName, to: shard1.shardName}), ErrorCodes.NamespaceExists); - const expectDropOnFailure = - FeatureFlagUtil.isPresentAndEnabled(config.admin, 'OnlineMovePrimaryLifecycle'); - - if (expectDropOnFailure) { - // The orphaned collection on shard1 should have been dropped due to the previous failure. - assert.eq(2, shard0.getCollection(coll1NS).find().itcount()); - assert(!collectionExists(shard1, dbName, coll1Name)); - - // Create another empty collection. - shard1.getDB(dbName).createCollection(coll1Name); - } else { - // The documents are on both the shards. - assert.eq(2, shard0.getCollection(coll1NS).find().itcount()); - assert.eq(1, shard1.getCollection(coll1NS).find().itcount()); + // The documents are on both the shards. + assert.eq(2, shard0.getCollection(coll1NS).find().itcount()); + assert.eq(1, shard1.getCollection(coll1NS).find().itcount()); - // Remove the orphaned document on shard1 leaving an empty collection. - assert.commandWorked(shard1.getCollection(coll1NS).remove({name: 'Emma'})); - assert.eq(0, shard1.getCollection(coll1NS).find().itcount()); - } + // Remove the orphaned document on shard1 leaving an empty collection. + assert.commandWorked(shard1.getCollection(coll1NS).remove({name: 'Emma'})); + assert.eq(0, shard1.getCollection(coll1NS).find().itcount()); assert.commandFailedWithCode(mongos.adminCommand({movePrimary: dbName, to: shard1.shardName}), ErrorCodes.NamespaceExists); - if (expectDropOnFailure) { - // The orphaned collection on shard1 should have been dropped due to the previous failure. - assert(!collectionExists(shard1, dbName, coll1Name)); - } else { - // Drop the orphaned collection on shard1. - shard1.getCollection(coll1NS).drop(); - } + // Drop the orphaned collection on shard1. + shard1.getCollection(coll1NS).drop(); } jsTest.log('Test that metadata has changed'); @@ -175,4 +153,3 @@ jsTest.log('Test that metadata has changed'); } st.stop(); -})(); diff --git a/jstests/sharding/move_primary_clone.js b/jstests/sharding/move_primary_clone.js index dcc93a7d5913a..34e0872be80bd 100644 --- a/jstests/sharding/move_primary_clone.js +++ b/jstests/sharding/move_primary_clone.js @@ -1,8 +1,6 @@ (function() { 'use strict'; -load('jstests/libs/feature_flag_util.js'); - function sortByName(a, b) { if (a.name < b.name) return -1; @@ -60,10 +58,12 @@ function checkCollectionsCopiedCorrectly(fromShard, toShard, sharded, barUUID, f var indexes = res.cursor.firstBatch; indexes.sort(sortByName); - // TODO SERVER-74252: once 7.0 becomes LastLTS we can assume that the movePrimary will never - // copy indexes of sharded collections. + // For each unsharded collection, there should be a total of 2 indexes - one for the _id + // field and the other we have created. However, in the case of sharded collections, only + // the _id index is present. When running movePrimary, indexes of sharded collections are + // not copied. if (sharded) - assert(indexes.length == 1 || indexes.length == 2); + assert(indexes.length == 1); else assert(indexes.length == 2); @@ -197,16 +197,9 @@ function movePrimaryWithFailpoint(sharded) { assert.commandFailedWithCode(st.s0.adminCommand({movePrimary: "test1", to: toShard.name}), ErrorCodes.InvalidOptions); } else { - // TODO (SERVER-71309): Remove once 7.0 becomes last LTS. - if (!FeatureFlagUtil.isPresentAndEnabled(db, 'ResilientMovePrimary')) { - // If the collections are unsharded, we should fail when any collections being copied - // exist on the target shard. - assert.commandFailed(st.s0.adminCommand({movePrimary: "test1", to: toShard.name})); - } else { - // The failure of the previous attempt caused the dirty data on the recipient to be - // dropped, so the data cloning shouldn't find any impediments. - assert.commandWorked(st.s0.adminCommand({movePrimary: "test1", to: toShard.name})); - } + // The failure of the previous attempt caused the dirty data on the recipient to be dropped, + // so the data cloning shouldn't find any impediments. + assert.commandWorked(st.s0.adminCommand({movePrimary: "test1", to: toShard.name})); } } diff --git a/jstests/sharding/move_primary_failover_before_persist_block_timestamp.js b/jstests/sharding/move_primary_failover_before_persist_block_timestamp.js deleted file mode 100644 index b302f5179a56d..0000000000000 --- a/jstests/sharding/move_primary_failover_before_persist_block_timestamp.js +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Test that movePrimary sets valid block timestamp if a failover occurs before persisting it. - * - * @tags: [ - * requires_fcv_70, - * featureFlagOnlineMovePrimaryLifecycle - * ] - */ -(function() { -'use strict'; -load("jstests/libs/fail_point_util.js"); -load("jstests/libs/parallel_shell_helpers.js"); - -const st = new ShardingTest({mongos: 1, shards: 2, rs: {nodes: 3}}); - -const mongos = st.s0; -const shard0 = st.shard0; -const oldDonorPrimary = st.rs0.getPrimary(); -const shard1 = st.shard1; -const config = st.config; - -const dbName = 'test_db'; -const collName = 'test_coll'; -const collNS = dbName + '.' + collName; - -assert.commandWorked(mongos.adminCommand({enableSharding: dbName, primaryShard: shard0.shardName})); -assert.commandWorked(mongos.getCollection(collNS).insert({value: 1})); -assert.commandWorked(mongos.getCollection(collNS).insert({value: 2})); - -const fp = configureFailPoint(oldDonorPrimary, "pauseBeforeMovePrimaryDonorPersistsBlockTimestamp"); - -const joinMovePrimary = startParallelShell( - funWithArgs(function(dbName, toShard) { - assert.commandWorked(db.adminCommand({movePrimary: dbName, to: toShard})); - }, dbName, shard1.shardName), mongos.port); - -fp.wait(); -st.rs0.getPrimary().adminCommand({replSetStepDown: ReplSetTest.kForeverSecs, force: 1}); -fp.off(); -st.rs0.awaitNodesAgreeOnPrimary(); -joinMovePrimary(); - -st.stop(); -})(); diff --git a/jstests/sharding/move_primary_fails_without_database_version.js b/jstests/sharding/move_primary_fails_without_database_version.js index 0c9566716d826..01b2fe05eae42 100644 --- a/jstests/sharding/move_primary_fails_without_database_version.js +++ b/jstests/sharding/move_primary_fails_without_database_version.js @@ -2,6 +2,7 @@ // Do not check metadata consistency as the database version is missing for testing purposes. TestData.skipCheckMetadataConsistency = true; +TestData.skipCheckRoutingTableConsistency = true; (function() { "use strict"; @@ -10,9 +11,10 @@ const dbName = "test"; const st = new ShardingTest({shards: 2}); -assert.commandWorked(st.s.getDB("config") - .getCollection("databases") - .insert({_id: dbName, partitioned: false, primary: st.shard0.shardName})); +assert.commandWorked(st.s.getDB("config").getCollection("databases").insert({ + _id: dbName, + primary: st.shard0.shardName +})); assert.commandFailed(st.s.adminCommand({movePrimary: dbName, to: st.shard1.shardName})); diff --git a/jstests/sharding/move_primary_with_writes.js b/jstests/sharding/move_primary_with_writes.js index 86aef4084b7f4..5a80236f672f2 100644 --- a/jstests/sharding/move_primary_with_writes.js +++ b/jstests/sharding/move_primary_with_writes.js @@ -5,7 +5,6 @@ 'use strict'; load('jstests/libs/fail_point_util.js'); -load("jstests/libs/feature_flag_util.js"); let st = new ShardingTest({ mongos: 2, @@ -288,12 +287,6 @@ st.forEachConnection(shard => { let cloningDataFPName = "hangBeforeCloningData"; -// TODO (SERVER-71309): Remove once 7.0 becomes last LTS. -if (!FeatureFlagUtil.isPresentAndEnabled(st.configRS.getPrimary().getDB('admin'), - "ResilientMovePrimary")) { - cloningDataFPName = "hangInCloneStage"; -} - createCollections(); let fromShard = st.getPrimaryShard(dbName); let toShard = st.getOther(fromShard); @@ -315,16 +308,6 @@ fromShard = st.getPrimaryShard(dbName); toShard = st.getOther(fromShard); testMovePrimaryDDL(cloningDataFPName, fromShard, toShard, st.s.getDB("admin"), false, true); -// TODO (SERVER-71309): Remove once 7.0 becomes last LTS. With the new DDL coordinator, the -// recipient blocks any CRUD operations until movePrimary is complete. -if (!FeatureFlagUtil.isEnabled(st.configRS.getPrimary().getDB('admin'), "ResilientMovePrimary")) { - createCollections(); - fromShard = st.getPrimaryShard(dbName); - toShard = st.getOther(fromShard); - testMovePrimary( - "hangInCleanStaleDataStage", fromShard, toShard, st.s.getDB(dbName), false, false); -} - overrideDDLLockTimeoutFPs.forEach(fp => fp.off()); st.stop(); diff --git a/jstests/sharding/move_range_basic.js b/jstests/sharding/move_range_basic.js index ca731a34cf4e0..75bae381d7b18 100644 --- a/jstests/sharding/move_range_basic.js +++ b/jstests/sharding/move_range_basic.js @@ -2,32 +2,33 @@ * Basic tests for moveRange. * * @tags: [ - * requires_fcv_60, + * assumes_balancer_off * ] */ +(function() { 'use strict'; load('jstests/sharding/libs/find_chunks_util.js'); load('jstests/sharding/libs/chunk_bounds_util.js'); -var st = new ShardingTest({mongos: 1, shards: 2, chunkSize: 1}); -var kDbName = 'db'; +const st = new ShardingTest({mongos: 1, shards: 2, chunkSize: 1}); +const kDbName = 'db'; -var mongos = st.s0; -var shard0 = st.shard0.shardName; -var shard1 = st.shard1.shardName; +const mongos = st.s0; +const shard0 = st.shard0.shardName; +const shard1 = st.shard1.shardName; assert.commandWorked(mongos.adminCommand({enableSharding: kDbName, primaryShard: shard0})); -function getRandomShardKeyValue(ns, skPattern) { +function getRandomShardKeyValue(ns, skPattern, filter) { const isHashedShardKey = Object.values(skPattern).includes('hashed'); const coll = mongos.getCollection(ns); // Get a random document from the collection - var doc = coll.aggregate([{$sample: {size: 1}}]).next(); + let doc = coll.aggregate([{$match: filter}, {$sample: {size: 1}}]).next(); // Delete fields not making part of the shard key - for (var key in doc) { + for (let key in doc) { if (!(key in skPattern)) { delete doc[key]; } @@ -40,13 +41,56 @@ function getRandomShardKeyValue(ns, skPattern) { return doc; } +// Tests for moveRange that will call move&split. We call moveRange with minBound on the positive +// values and moveRange with maxBound on negative values to ensure that the chunk chosen is big +// enough to be split by moveRange. +function testMoveRangeWithBigChunk(mongos, ns, skPattern, minBound) { + // Get a random existing shard key value, `moveRange` will be called on the owning chunk + let filter = minBound ? {a: {$gte: 0}} : {a: {$lt: 0}}; + let randomSK = getRandomShardKeyValue(ns, skPattern, filter); + + // Get bounds and shard of the chunk owning `randomSK` + const chunksBefore = findChunksUtil.findChunksByNs(mongos.getDB('config'), ns).toArray(); + const shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunksBefore); + const {shard, bounds} = + chunkBoundsUtil.findShardAndChunkBoundsForShardKey(st, shardChunkBounds, randomSK); + + const donor = shard.shardName; + const recipient = donor == shard0 ? shard1 : shard0; + + // Count chunks belonging to donor and recipient shards BEFORE moveRange + const nChunksOnDonorBefore = chunksBefore.filter(chunk => chunk.shard == donor).length; + const nChunksOnRecipientBefore = chunksBefore.filter(chunk => chunk.shard == recipient).length; + + if (minBound) { + assert.commandWorked( + mongos.adminCommand({moveRange: ns, min: randomSK, toShard: recipient})); + } else { + assert.commandWorked( + mongos.adminCommand({moveRange: ns, max: randomSK, toShard: recipient})); + } + + // Count chunks belonging to donor and recipient shards AFTER moveRange + const chunksAfter = findChunksUtil.findChunksByNs(mongos.getDB('config'), ns).toArray(); + const nChunksOnDonorAfter = chunksAfter.filter(chunk => chunk.shard == donor).length; + const nChunksOnRecipientAfter = chunksAfter.filter(chunk => chunk.shard == recipient).length; + + assert.eq(nChunksOnRecipientAfter, + nChunksOnRecipientBefore + 1, + "The number of chunks on the recipient shard did not increase following a moveRange"); + assert(nChunksOnDonorAfter == nChunksOnDonorBefore || + nChunksOnDonorAfter == nChunksOnDonorBefore + 1, + "Unexpected number of chunks on the donor shard after triggering a split + move"); +} + function test(collName, skPattern) { const ns = kDbName + '.' + collName; assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: skPattern})); - var aChunk = findChunksUtil.findOneChunkByNs(mongos.getDB('config'), ns, {shard: shard0}); + let aChunk = findChunksUtil.findOneChunkByNs(mongos.getDB('config'), ns, {shard: shard0}); assert(aChunk); + jsTest.log("Testing invalid commands"); // Fail if one of the bounds is not a valid shard key assert.commandFailed(mongos.adminCommand( {moveRange: ns, min: aChunk.min, max: {invalidShardKey: 10}, toShard: shard1})); @@ -56,6 +100,7 @@ function test(collName, skPattern) { {moveRange: ns, min: aChunk.min, max: aChunk.max, toShard: 'WrongShard'})); // Test that `moveRange` with min & max bounds works + jsTest.log("Testing moveRange with both bounds"); assert.commandWorked( mongos.adminCommand({moveRange: ns, min: aChunk.min, max: aChunk.max, toShard: shard1})); @@ -64,56 +109,37 @@ function test(collName, skPattern) { // Test that `moveRange` only with min bound works (translates to `moveChunk` because chunk too // small to be split) + jsTest.log("Testing moveRange with only min bound"); assert.commandWorked(mongos.adminCommand({moveRange: ns, min: aChunk.min, toShard: shard0})); assert.eq(1, mongos.getDB('config').chunks.countDocuments({_id: aChunk._id, shard: shard0})); assert.eq(0, mongos.getDB('config').chunks.countDocuments({_id: aChunk._id, shard: shard1})); - // Test that `moveRange` only with min bound works (split+move) - { - // Insert 10MB in order to create big chunk (chunkSize is set to 1MB) - const bigString = "X".repeat(1024 * 1024 / 4); // 1 MB - const coll = mongos.getCollection(ns); - let bulk = coll.initializeUnorderedBulkOp(); - for (var i = 0; i < 10; i++) { - bulk.insert({a: i, b: i, str: bigString}); - } - assert.commandWorked(bulk.execute()); - - // Get a random existing shard key value, `moveRange` will be called on the owning chunk - var randomSK = getRandomShardKeyValue(ns, skPattern); - - // Get bounds and shard of the chunk owning `randomSK` - const chunksBefore = findChunksUtil.findChunksByNs(mongos.getDB('config'), ns).toArray(); - const shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunksBefore); - const {shard, bounds} = - chunkBoundsUtil.findShardAndChunkBoundsForShardKey(st, shardChunkBounds, randomSK); + // Test that `moveRange` only with max bound works (translates to `moveChunk` because chunk too + // small to be split) + jsTest.log("Testing moveRange with only max bound"); + assert.commandWorked(mongos.adminCommand({moveRange: ns, max: aChunk.max, toShard: shard1})); - const donor = shard.shardName; - const recipient = donor == shard0 ? shard1 : shard0; + assert.eq(0, mongos.getDB('config').chunks.countDocuments({_id: aChunk._id, shard: shard0})); + assert.eq(1, mongos.getDB('config').chunks.countDocuments({_id: aChunk._id, shard: shard1})); - // Count chunks belonging to donor and recipient shards BEFORE moveRange - const nChunksOnDonorBefore = chunksBefore.filter(chunk => chunk.shard == donor).length; - const nChunksOnRecipientBefore = - chunksBefore.filter(chunk => chunk.shard == recipient).length; + // Insert 10MB >0 and <0 in order to create multiple big chunks (chunkSize is set to 1MB) + jsTest.log("Inserting data to create large chunks"); + const bigString = "X".repeat(1024 * 1024 / 4); // 1 MB + const coll = mongos.getCollection(ns); + let bulk = coll.initializeUnorderedBulkOp(); + for (let i = -10; i < 10; i++) { + bulk.insert({a: i, b: i, str: bigString}); + } + assert.commandWorked(bulk.execute()); - assert.commandWorked( - mongos.adminCommand({moveRange: ns, min: randomSK, toShard: recipient})); + // Test moving large chunk with only min bound + jsTest.log("Testing moveChunk with only min bound and large chunk"); + testMoveRangeWithBigChunk(mongos, ns, skPattern, true /* minBound */); - // Count chunks belonging to donor and recipient shards AFTER moveRange - const chunksAfter = findChunksUtil.findChunksByNs(mongos.getDB('config'), ns).toArray(); - const nChunksOnDonorAfter = chunksAfter.filter(chunk => chunk.shard == donor).length; - const nChunksOnRecipientAfter = - chunksAfter.filter(chunk => chunk.shard == recipient).length; - - assert.eq( - nChunksOnRecipientAfter, - nChunksOnRecipientBefore + 1, - "The number of chunks on the recipient shard did not increase following a moveRange"); - assert(nChunksOnDonorAfter == nChunksOnDonorBefore || - nChunksOnDonorAfter == nChunksOnDonorBefore + 1, - "Unexpected number of chunks on the donor shard after triggering a split + move"); - } + // Test moving large chunk with only max bound + jsTest.log("Testing moveChunk with only max bound and large chunk"); + testMoveRangeWithBigChunk(mongos, ns, skPattern, false /* maxBound */); } test('nonHashedShardKey', {a: 1}); @@ -123,3 +149,4 @@ test('nonHashedCompundShardKey', {a: 1, b: 1}); test('hashedShardKey', {a: 'hashed'}); st.stop(); +})(); diff --git a/jstests/sharding/move_range_max_bound.js b/jstests/sharding/move_range_max_bound.js deleted file mode 100644 index e2fe174bae089..0000000000000 --- a/jstests/sharding/move_range_max_bound.js +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Basic tests for moveRange when called only with max bound. - * - * TODO (SERVER-74536) remove this file and move these test cases to move_range_basic.js after 7.0 - * branches out. - * - * @tags: [ - * requires_fcv_70, - * ] - */ -'use strict'; - -load('jstests/sharding/libs/find_chunks_util.js'); -load('jstests/sharding/libs/chunk_bounds_util.js'); - -var st = new ShardingTest({mongos: 1, shards: 2, chunkSize: 1}); -var kDbName = 'db'; - -var mongos = st.s0; -var shard0 = st.shard0.shardName; -var shard1 = st.shard1.shardName; - -assert.commandWorked(mongos.adminCommand({enableSharding: kDbName, primaryShard: shard0})); - -function getRandomShardKeyValue(ns, skPattern) { - const isHashedShardKey = Object.values(skPattern).includes('hashed'); - const coll = mongos.getCollection(ns); - - // Get a random document from the collection - var doc = coll.aggregate([{$sample: {size: 1}}]).next(); - - // Delete fields not making part of the shard key - for (var key in doc) { - if (!(key in skPattern)) { - delete doc[key]; - } - } - - if (isHashedShardKey) { - doc.a = convertShardKeyToHashed(doc.a); - } - - return doc; -} - -function test(collName, skPattern) { - const ns = kDbName + '.' + collName; - assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: skPattern})); - - var aChunk = findChunksUtil.findOneChunkByNs(mongos.getDB('config'), ns, {shard: shard0}); - assert(aChunk); - - // Test that `moveRange` only with max bound works (translates to `moveChunk` because chunk too - // small to be split) - assert.commandWorked(mongos.adminCommand({moveRange: ns, max: aChunk.max, toShard: shard1})); - - assert.eq(0, mongos.getDB('config').chunks.countDocuments({_id: aChunk._id, shard: shard0})); - assert.eq(1, mongos.getDB('config').chunks.countDocuments({_id: aChunk._id, shard: shard1})); - - // Test that `moveRange` only with max bound works (split+move) - { - // Insert 10MB in order to create big chunk (chunkSize is set to 1MB) - const bigString = "X".repeat(1024 * 1024 / 4); // 1 MB - const coll = mongos.getCollection(ns); - let bulk = coll.initializeUnorderedBulkOp(); - for (var i = 0; i < 10; i++) { - bulk.insert({a: i, b: i, str: bigString}); - } - assert.commandWorked(bulk.execute()); - - // Get a random existing shard key value, `moveRange` will be called on the owning chunk - var randomSK = getRandomShardKeyValue(ns, skPattern); - - // Get bounds and shard of the chunk owning `randomSK` - const chunksBefore = findChunksUtil.findChunksByNs(mongos.getDB('config'), ns).toArray(); - const shardChunkBounds = chunkBoundsUtil.findShardChunkBounds(chunksBefore); - const {shard, bounds} = - chunkBoundsUtil.findShardAndChunkBoundsForShardKey(st, shardChunkBounds, randomSK); - - const donor = shard.shardName; - const recipient = donor == shard0 ? shard1 : shard0; - - // Count chunks belonging to donor and recipient shards BEFORE moveRange - const nChunksOnDonorBefore = chunksBefore.filter(chunk => chunk.shard == donor).length; - const nChunksOnRecipientBefore = - chunksBefore.filter(chunk => chunk.shard == recipient).length; - - assert.commandWorked( - mongos.adminCommand({moveRange: ns, max: randomSK, toShard: recipient})); - - // Count chunks belonging to donor and recipient shards AFTER moveRange - const chunksAfter = findChunksUtil.findChunksByNs(mongos.getDB('config'), ns).toArray(); - const nChunksOnDonorAfter = chunksAfter.filter(chunk => chunk.shard == donor).length; - const nChunksOnRecipientAfter = - chunksAfter.filter(chunk => chunk.shard == recipient).length; - - assert.eq( - nChunksOnRecipientAfter, - nChunksOnRecipientBefore + 1, - "The number of chunks on the recipient shard did not increase following a moveRange"); - assert(nChunksOnDonorAfter == nChunksOnDonorBefore || - nChunksOnDonorAfter == nChunksOnDonorBefore + 1, - "Unexpected number of chunks on the donor shard after triggering a split + move"); - } -} - -test('nonHashedShardKey', {a: 1}); - -test('nonHashedCompundShardKey', {a: 1, b: 1}); - -test('hashedShardKey', {a: 'hashed'}); - -st.stop(); diff --git a/jstests/sharding/movechunk_commit_changelog_stats.js b/jstests/sharding/movechunk_commit_changelog_stats.js index 2bfb9160fba06..7e66dc974df75 100644 --- a/jstests/sharding/movechunk_commit_changelog_stats.js +++ b/jstests/sharding/movechunk_commit_changelog_stats.js @@ -39,4 +39,4 @@ assertCountsInChangelog(); mongos.getDB(kDbName).fooHashed.drop(); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/movechunk_include.js b/jstests/sharding/movechunk_include.js index 653326e2ab604..f21e02f66ac56 100644 --- a/jstests/sharding/movechunk_include.js +++ b/jstests/sharding/movechunk_include.js @@ -41,7 +41,7 @@ function setupMoveChunkTest(shardOptions) { var stats = st.chunkCounts("foo"); var to = ""; - for (shard in stats) { + for (let shard in stats) { if (stats[shard] == 0) { to = shard; break; diff --git a/jstests/sharding/movechunk_parallel.js b/jstests/sharding/movechunk_parallel.js index 061e3db75a711..209165db509fe 100644 --- a/jstests/sharding/movechunk_parallel.js +++ b/jstests/sharding/movechunk_parallel.js @@ -9,7 +9,7 @@ * ] */ -load('./jstests/libs/chunk_manipulation_util.js'); +load('jstests/libs/chunk_manipulation_util.js'); load("jstests/sharding/libs/find_chunks_util.js"); (function() { diff --git a/jstests/sharding/movechunk_with_default_paranoia.js b/jstests/sharding/movechunk_with_default_paranoia.js deleted file mode 100644 index 82daf3be66d51..0000000000000 --- a/jstests/sharding/movechunk_with_default_paranoia.js +++ /dev/null @@ -1,22 +0,0 @@ -/** - * This test checks that moveParanoia defaults to off (ie the moveChunk directory will not - * be created). - */ - -load("jstests/sharding/movechunk_include.js"); - -// Passing no shardOptions to test default moveParanoia -var st = setupMoveChunkTest({}); - -var shards = [st.shard0, st.shard1]; -for (i in shards) { - var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath; - var hasMoveChunkDir = 0 != - ls(dbpath) - .filter(function(a) { - return null != a.match("moveChunk"); - }) - .length; - assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath)); -} -st.stop(); diff --git a/jstests/sharding/movechunk_with_moveParanoia.js b/jstests/sharding/movechunk_with_moveParanoia.js deleted file mode 100644 index 3b63b9b8eea3d..0000000000000 --- a/jstests/sharding/movechunk_with_moveParanoia.js +++ /dev/null @@ -1,24 +0,0 @@ -/** - * This test sets moveParanoia flag and then check that the directory is created with the moved data - */ - -load("jstests/sharding/movechunk_include.js"); - -var st = setupMoveChunkTest({moveParanoia: ""}); - -var shards = [st.shard0, st.shard1]; -var foundMoveChunk = false; -for (i in shards) { - var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath; - var hasMoveChunkDir = 0 != - ls(dbpath) - .filter(function(a) { - return null != a.match("moveChunk"); - }) - .length; - foundMoveChunk = foundMoveChunk || hasMoveChunkDir; -} - -assert(foundMoveChunk, "did not find moveChunk directory!"); - -st.stop(); diff --git a/jstests/sharding/movechunk_with_noMoveParanoia.js b/jstests/sharding/movechunk_with_noMoveParanoia.js deleted file mode 100644 index 49df2f896144c..0000000000000 --- a/jstests/sharding/movechunk_with_noMoveParanoia.js +++ /dev/null @@ -1,20 +0,0 @@ -/** - * This test sets moveParanoia flag and then check that the directory is created with the moved data - */ - -load("jstests/sharding/movechunk_include.js"); - -var st = setupMoveChunkTest({noMoveParanoia: ""}); - -var shards = [st.shard0, st.shard1]; -for (i in shards) { - var dbpath = shards[i].adminCommand("getCmdLineOpts").parsed.storage.dbPath; - var hasMoveChunkDir = 0 != - ls(dbpath) - .filter(function(a) { - return null != a.match("moveChunk"); - }) - .length; - assert(!hasMoveChunkDir, dbpath + ": has MoveChunk directory + " + ls(dbpath)); -} -st.stop(); diff --git a/jstests/sharding/names.js b/jstests/sharding/names.js index da5de45c1a84a..2aaa39d093429 100644 --- a/jstests/sharding/names.js +++ b/jstests/sharding/names.js @@ -2,7 +2,7 @@ (function() { 'use strict'; -var st = new ShardingTest({shards: TestData.catalogShard ? 1 : 0, mongos: 1}); +var st = new ShardingTest({shards: TestData.configShard ? 1 : 0, mongos: 1}); var rsA = new ReplSetTest({nodes: 2, name: "rsA", nodeOptions: {shardsvr: ""}}); var rsB = new ReplSetTest({nodes: 2, name: "rsB", nodeOptions: {shardsvr: ""}}); @@ -24,7 +24,7 @@ printjson(config.shards.find().toArray()); assert.commandWorked(mongos.adminCommand({addShard: rsB.getURL(), name: rsA.name})); printjson(config.shards.find().toArray()); -assert.eq(TestData.catalogShard ? 3 : 2, config.shards.count(), "Error adding a shard"); +assert.eq(TestData.configShard ? 3 : 2, config.shards.count(), "Error adding a shard"); assert.eq(rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA"); assert.eq(rsA.getURL(), config.shards.findOne({_id: rsB.name})["host"], "Wrong host for shard rsB"); @@ -34,7 +34,7 @@ assert.commandWorked(mongos.adminCommand({removeshard: rsA.name}), var res = assert.commandWorked(mongos.adminCommand({removeshard: rsA.name}), "failed to remove shard"); -assert.eq(TestData.catalogShard ? 2 : 1, +assert.eq(TestData.configShard ? 2 : 1, config.shards.count(), "Shard was not removed: " + res + "; Shards: " + tojson(config.shards.find().toArray())); assert.eq( @@ -50,7 +50,7 @@ assert.soon(() => { printjson(config.shards.find().toArray()); -assert.eq(TestData.catalogShard ? 3 : 2, config.shards.count(), "Error re-adding a shard"); +assert.eq(TestData.configShard ? 3 : 2, config.shards.count(), "Error re-adding a shard"); assert.eq( rsB.getURL(), config.shards.findOne({_id: rsA.name})["host"], "Wrong host for shard rsA 3"); assert.eq( diff --git a/jstests/sharding/noUpdateButN1inAnotherCollection.js b/jstests/sharding/noUpdateButN1inAnotherCollection.js index 910717b811f0f..bd49d3184d3ac 100644 --- a/jstests/sharding/noUpdateButN1inAnotherCollection.js +++ b/jstests/sharding/noUpdateButN1inAnotherCollection.js @@ -11,10 +11,10 @@ var s = new ShardingTest({name: name, shards: 2, mongos: 2}); var mongosA = s.s0; var mongosB = s.s1; -ns = "test.coll"; -ns2 = "test.coll2"; +let ns = "test.coll"; +let ns2 = "test.coll2"; -adminSA = mongosA.getDB("admin"); +let adminSA = mongosA.getDB("admin"); adminSA.runCommand({enableSharding: "test"}); adminSA.runCommand({moveprimary: "test", to: "s.shard0.shardName"}); @@ -34,7 +34,7 @@ var db = mongosA.getDB("test"); var coll = db.coll; var coll2 = db.coll2; -numDocs = 10; +let numDocs = 10; for (var i = 1; i < numDocs; i++) { coll.insert({_id: i, control: 0}); coll2.insert({_id: i, control: 0}); diff --git a/jstests/sharding/nonreplicated_uuids_on_shardservers.js b/jstests/sharding/nonreplicated_uuids_on_shardservers.js index ac1a875f24af9..75389fd1e54f4 100644 --- a/jstests/sharding/nonreplicated_uuids_on_shardservers.js +++ b/jstests/sharding/nonreplicated_uuids_on_shardservers.js @@ -12,7 +12,7 @@ let rs = st.rs0; mongos.getDB("test").coll.insert({_id: 1, x: 1}); // Add a node with --shardsvr to the replica set. -const clusterRoleOption = TestData.catalogShard ? "configsvr" : "shardsvr"; +const clusterRoleOption = TestData.configShard ? "configsvr" : "shardsvr"; let newNode = rs.add({[clusterRoleOption]: '', rsConfig: {priority: 0, votes: 0}}); rs.reInitiate(); rs.awaitSecondaryNodes(); diff --git a/jstests/sharding/prepare_transaction_then_migrate.js b/jstests/sharding/prepare_transaction_then_migrate.js index 5a12a83ed4c42..27524b41b8335 100644 --- a/jstests/sharding/prepare_transaction_then_migrate.js +++ b/jstests/sharding/prepare_transaction_then_migrate.js @@ -9,6 +9,7 @@ (function() { "use strict"; load('jstests/libs/chunk_manipulation_util.js'); +load('jstests/replsets/rslib.js'); load('jstests/sharding/libs/create_sharded_collection_util.js'); load('jstests/sharding/libs/sharded_transactions_helpers.js'); diff --git a/jstests/sharding/primary_config_server_blackholed_from_mongos.js b/jstests/sharding/primary_config_server_blackholed_from_mongos.js index 0511a22d41444..1fbe07a665177 100644 --- a/jstests/sharding/primary_config_server_blackholed_from_mongos.js +++ b/jstests/sharding/primary_config_server_blackholed_from_mongos.js @@ -2,7 +2,7 @@ * Ensures that if the primary config server is blackholed from the point of view of mongos, CRUD * and read-only config operations continue to work. * - * @tags: [catalog_shard_incompatible] + * @tags: [config_shard_incompatible] */ (function() { diff --git a/jstests/sharding/queries_elide_shard_filter.js b/jstests/sharding/queries_elide_shard_filter.js index c1e7625cc30ab..f578ffc4d33c9 100644 --- a/jstests/sharding/queries_elide_shard_filter.js +++ b/jstests/sharding/queries_elide_shard_filter.js @@ -5,10 +5,7 @@ // @tags: [ // ] -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); +import {getPlanStage} from "jstests/libs/analyze_plan.js"; function assertShardFilter(explain) { const filterStage = getPlanStage(explain.queryPlanner.winningPlan, "SHARDING_FILTER"); @@ -105,5 +102,4 @@ assertShardFilter(explain); explain = assert.commandWorked(coll.find({b: true}).explain()); assertShardFilter(explain); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/query/agg_explain_fmt.js b/jstests/sharding/query/agg_explain_fmt.js index a1049e6f93771..dcf05311aceed 100644 --- a/jstests/sharding/query/agg_explain_fmt.js +++ b/jstests/sharding/query/agg_explain_fmt.js @@ -6,10 +6,7 @@ * ] */ -(function() { -"use strict"; - -load('jstests/libs/analyze_plan.js'); // For planHasStage. +import {planHasStage} from "jstests/libs/analyze_plan.js"; const st = new ShardingTest({shards: 2}); const mongosDB = st.s.getDB("test"); @@ -65,5 +62,4 @@ assert.eq(mergeCursors.allowPartialResults, false, mergeCursors); const shardDB = st.shard0.getDB(mongosDB.getName()); explain = shardDB[coll.getName()].explain().aggregate([{$match: {}}]); assert(!planHasStage(shardDB, explain.queryPlanner.winningPlan, "SHARDING_FILTER"), explain); -st.stop(); -}()); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/query/agg_percentile.js b/jstests/sharding/query/agg_percentile.js index 0965073498ebe..79966ddda6e05 100644 --- a/jstests/sharding/query/agg_percentile.js +++ b/jstests/sharding/query/agg_percentile.js @@ -2,7 +2,6 @@ * Tests that $percentile is computed correctly for sharded collections. * @tags: [ * requires_fcv_70, - * featureFlagApproxPercentiles * ] */ diff --git a/jstests/sharding/query/agg_shard_targeting.js b/jstests/sharding/query/agg_shard_targeting.js index d99032277aca1..ee36ae742d361 100644 --- a/jstests/sharding/query/agg_shard_targeting.js +++ b/jstests/sharding/query/agg_shard_targeting.js @@ -35,7 +35,8 @@ const mongosForMove = st.s1; const mongosDB = mongosForAgg.getDB(jsTestName()); const mongosColl = mongosDB.test; -const shard0DB = primaryShardDB = st.shard0.getDB(jsTestName()); +const shard0DB = st.shard0.getDB(jsTestName()); +const primaryShardDB = shard0DB; const shard1DB = st.shard1.getDB(jsTestName()); // Turn off best-effort recipient metadata refresh post-migration commit on both shards because diff --git a/jstests/sharding/query/aggregation_currentop.js b/jstests/sharding/query/aggregation_currentop.js index 1c566fbc9fbd3..292199b3015d7 100644 --- a/jstests/sharding/query/aggregation_currentop.js +++ b/jstests/sharding/query/aggregation_currentop.js @@ -13,7 +13,9 @@ * applicable. * * This test requires replica set configuration and user credentials to persist across a restart. - * @tags: [requires_persistence, uses_transactions, uses_prepare_transaction, requires_fcv_70] + * TODO SERVER-78101: Investigate the test failure and re-enable the test with CQF enabled. + * @tags: [requires_persistence, uses_transactions, uses_prepare_transaction, requires_fcv_70, + * cqf_incompatible] */ // Restarts cause issues with authentication for awaiting replication. @@ -67,7 +69,7 @@ shardConn.waitForClusterTime(60); let shardTestDB = shardConn.getDB(jsTestName()); let shardAdminDB = shardConn.getDB("admin"); -function createUsers(conn) { +function createUsers(conn, grantDirectShardOperationsRole) { let adminDB = conn.getDB("admin"); // Create an admin user, one user with the inprog privilege, and one without. @@ -80,20 +82,31 @@ function createUsers(conn) { privileges: [{resource: {cluster: true}, actions: ["inprog"]}] })); - assert.commandWorked(adminDB.runCommand( - {createUser: "user_inprog", pwd: "pwd", roles: ["readWriteAnyDatabase", "role_inprog"]})); + let rolesUserInprog = ["readWriteAnyDatabase", "role_inprog"]; + if (grantDirectShardOperationsRole) + rolesUserInprog.push("directShardOperations"); + assert.commandWorked( + adminDB.runCommand({createUser: "user_inprog", pwd: "pwd", roles: rolesUserInprog})); - assert.commandWorked(adminDB.runCommand( - {createUser: "user_no_inprog", pwd: "pwd", roles: ["readWriteAnyDatabase"]})); + let rolesUserNoInprog = ["readWriteAnyDatabase"]; + if (grantDirectShardOperationsRole) + rolesUserNoInprog.push("directShardOperations"); + + assert.commandWorked( + adminDB.runCommand({createUser: "user_no_inprog", pwd: "pwd", roles: rolesUserNoInprog})); } // Create necessary users at both cluster and shard-local level. -if (!TestData.catalogShard) { - // In catalog shard mode, the first shard is the config server, so creating the users via mongos +if (!TestData.configShard) { + // In config shard mode, the first shard is the config server, so creating the users via mongos // below will also create them on the shard. - createUsers(shardConn); + createUsers(shardConn, /* grantDirectShardOperationsRole */ true); + createUsers(mongosConn, /* grantDirectShardOperationsRole */ false); +} else { + // Since this is making the user on both the config server and the shard, grant it direct shard + // operations role. + createUsers(mongosConn, /* grantDirectShardOperationsRole */ true); } -createUsers(mongosConn); // Create a test database and some dummy data on rs0. assert(clusterAdminDB.auth("admin", "pwd")); @@ -483,7 +496,7 @@ assert.commandFailedWithCode(clusterAdminDB.currentOp({$ownOps: true}), ErrorCod assert(clusterAdminDB.logout()); assert(clusterAdminDB.auth("user_inprog", "pwd")); -const expectedOutput = TestData.catalogShard ? +const expectedOutput = TestData.configShard ? [ {_id: {shard: "aggregation_currentop-rs1", host: st.rs1.getPrimary().host}}, {_id: {shard: "aggregation_currentop-rs2", host: st.rs2.getPrimary().host}}, diff --git a/jstests/sharding/query/collation_targeting.js b/jstests/sharding/query/collation_targeting.js index 602c63c990acb..329e65567c5bb 100644 --- a/jstests/sharding/query/collation_targeting.js +++ b/jstests/sharding/query/collation_targeting.js @@ -190,11 +190,10 @@ if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { coll.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive}); assert(res.a === "foo" || res.a === "FOO"); - // TODO: SERVER-69925 Implement explain for findAndModify. - // assert.throws(function() { - // coll.explain().findAndModify( - // {query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive}); - // }); + explain = coll.explain().findAndModify( + {query: {a: "foo"}, update: {$set: {b: 1}}, collation: caseInsensitive}); + assert.commandWorked(explain); + assert.eq(1, explain.queryPlanner.winningPlan.shards.length); } else { // Sharded findAndModify on strings with non-simple collation should fail, because findAndModify // must target a single shard. @@ -318,9 +317,13 @@ if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { assert.eq(1, writeRes.nRemoved); let afterNumDocsMatch = coll.find({a: "foo"}).collation(caseInsensitive).count(); assert.eq(beforeNumDocsMatch - 1, afterNumDocsMatch); + + explain = coll.explain().remove({a: "foo"}, {justOne: true, collation: caseInsensitive}); + assert.commandWorked(explain); + assert.eq(1, explain.queryPlanner.winningPlan.shards.length); + coll.insert(a_foo); coll.insert(a_FOO); - // TODO: SERVER-69924 Implement explain for deleteOne } else { // A single remove (justOne: true) must be single-shard or an exact-ID query. A query is // exact-ID if it contains an equality on _id and either has the collection default collation or @@ -421,7 +424,9 @@ if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { writeRes = assert.commandWorked(coll.update({a: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive})); assert.eq(1, writeRes.nMatched); - // TODO: SERVER-69922 Implement explain for updateOne + explain = coll.explain().update({a: "foo"}, {$set: {b: 1}}, {collation: caseInsensitive}); + assert.commandWorked(explain); + assert.eq(1, explain.queryPlanner.winningPlan.shards.length); } else { // A single (non-multi) update must be single-shard or an exact-ID query. A query is exact-ID if // it contains an equality on _id and either has the collection default collation or _id is not diff --git a/jstests/sharding/query/collation_targeting_inherited.js b/jstests/sharding/query/collation_targeting_inherited.js index 7662baeee0ff3..4cc9e23765dbb 100644 --- a/jstests/sharding/query/collation_targeting_inherited.js +++ b/jstests/sharding/query/collation_targeting_inherited.js @@ -208,11 +208,10 @@ assert.eq(1, explain.queryPlanner.winningPlan.shards.length); if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { let res = collCaseInsensitive.findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}}); assert(res.a === "foo" || res.a === "FOO"); - - // TODO: SERVER-69925 Implement explain for findAndModify. - // assert.throws(function() { - // collCaseInsensitive.explain().findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}}); - // }); + explain = + collCaseInsensitive.explain().findAndModify({query: {a: "foo"}, update: {$set: {b: 1}}}); + assert.commandWorked(explain); + assert.eq(1, explain.queryPlanner.winningPlan.shards.length); } else { // Sharded findAndModify on strings with non-simple collation inherited from the collection // default should fail, because findAndModify must target a single shard. @@ -340,7 +339,9 @@ if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { assert.eq(1, writeRes.nRemoved); let afterNumDocsMatch = collCaseInsensitive.find({a: "foo"}).collation(caseInsensitive).count(); assert.eq(beforeNumDocsMatch - 1, afterNumDocsMatch); - // TODO: SERVER-69924 Implement explain for deleteOne + explain = collCaseInsensitive.explain().remove({a: "foo"}, {justOne: true}); + assert.commandWorked(explain); + assert.eq(1, explain.queryPlanner.winningPlan.shards.length); // Re-insert documents for later test cases. collCaseInsensitive.insert(a_foo); @@ -451,7 +452,9 @@ assert.eq(1, explain.queryPlanner.winningPlan.shards.length); if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { writeRes = assert.commandWorked(collCaseInsensitive.update({a: "foo"}, {$set: {b: 1}})); assert.eq(1, writeRes.nMatched); - // TODO: SERVER-69922 Implement explain for updateOne + explain = collCaseInsensitive.explain().update({a: "foo"}, {$set: {b: 1}}); + assert.commandWorked(explain); + assert.eq(1, explain.queryPlanner.winningPlan.shards.length); } else { // A single (non-multi) update must be single-shard or an exact-ID query. A query is exact-ID if // it diff --git a/jstests/sharding/query/current_op_no_shards.js b/jstests/sharding/query/current_op_no_shards.js index d9af6d3cf19ab..2e3142fe61c0f 100644 --- a/jstests/sharding/query/current_op_no_shards.js +++ b/jstests/sharding/query/current_op_no_shards.js @@ -2,8 +2,8 @@ * Test that running a $currentOp aggregation on a cluster with no shards returns an empty result * set, and does not cause the mongoS floating point failure described in SERVER-30084. * - * Requires no shards so there can't be a catalog shard. - * @tags: [catalog_shard_incompatible] + * Requires no shards so there can't be a config shard. + * @tags: [config_shard_incompatible] */ (function() { const st = new ShardingTest({shards: 0}); diff --git a/jstests/sharding/query/explain_agg_read_pref.js b/jstests/sharding/query/explain_agg_read_pref.js index 7ecdc1230188a..da3ad99ad10f5 100644 --- a/jstests/sharding/query/explain_agg_read_pref.js +++ b/jstests/sharding/query/explain_agg_read_pref.js @@ -1,7 +1,5 @@ /** * Tests that readPref applies on an explain for an aggregation command. - * - * @tags: [temporary_catalog_shard_incompatible] */ (function() { "use strict"; @@ -36,7 +34,7 @@ assert.commandWorked(mongosDB.dropDatabase()); const coll = mongosDB.getCollection("coll"); assert.commandWorked(config.adminCommand({enableSharding: mongosDB.getName()})); -st.ensurePrimaryShard(mongosDB.getName(), "agg_explain_readPref-rs0"); +st.ensurePrimaryShard(mongosDB.getName(), st.shard0.shardName); const rs0Primary = st.rs0.getPrimary(); const rs0Secondary = st.rs0.getSecondary(); const rs1Primary = st.rs1.getPrimary(); diff --git a/jstests/sharding/query/explain_cmd.js b/jstests/sharding/query/explain_cmd.js index 544c426f546e0..249c7e30f4265 100644 --- a/jstests/sharding/query/explain_cmd.js +++ b/jstests/sharding/query/explain_cmd.js @@ -135,15 +135,21 @@ assert.eq(explain.queryPlanner.winningPlan.shards.length, 1); // Check that the upsert didn't actually happen. assert.eq(0, collSharded.count({a: 10})); +// Sharded updateOne that does not target a single shard can now be executed with a two phase +// write protocol that will target at most 1 matching document. if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(collSharded.getDB())) { // Explain an upsert operation which cannot be targeted and verify that it is successful. - // TODO SERVER-69922: Verify expected response. explain = db.runCommand({ explain: {update: collSharded.getName(), updates: [{q: {b: 10}, u: {b: 10}, upsert: true}]}, verbosity: "allPlansExecution" }); - assert.commandWorked(explain, tojson(explain)); - assert.eq(explain.queryPlanner.winningPlan.shards.length, 2); + assert(explain.queryPlanner); + assert(explain.executionStats); + assert.eq(explain.queryPlanner.winningPlan.stage, "SHARD_WRITE"); + assert.eq(explain.queryPlanner.winningPlan.inputStage.winningPlan.stage, "SHARD_MERGE"); + assert.eq(explain.executionStats.executionStages.stage, "SHARD_WRITE"); + assert.eq(explain.executionStats.inputStage.executionStages.stage, "SHARD_MERGE"); + // Check that the upsert didn't actually happen. assert.eq(0, collSharded.count({b: 10})); } else { diff --git a/jstests/sharding/query/explain_find_and_modify_sharded.js b/jstests/sharding/query/explain_find_and_modify_sharded.js index 65f5dc17d6c3f..29dab239f5ee8 100644 --- a/jstests/sharding/query/explain_find_and_modify_sharded.js +++ b/jstests/sharding/query/explain_find_and_modify_sharded.js @@ -5,6 +5,8 @@ (function() { 'use strict'; +load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); + var collName = 'explain_find_and_modify'; // Create a cluster with 2 shards. @@ -37,21 +39,53 @@ assert.commandWorked(testDB.adminCommand( var res; -// Queries that do not involve the shard key are invalid. -res = testDB.runCommand( - {explain: {findAndModify: collName, query: {b: 1}, remove: true}, verbosity: 'queryPlanner'}); -assert.commandFailed(res); +// Sharded updateOne that does not target a single shard can now be executed with a two phase +// write protocol that will target at most 1 matching document. +if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(testDB)) { + res = assert.commandWorked(testDB.runCommand({ + explain: {findAndModify: collName, query: {b: 1}, remove: true}, + verbosity: 'queryPlanner' + })); -// Queries that have non-equality queries on the shard key are invalid. -res = testDB.runCommand({ - explain: { - findAndModify: collName, - query: {a: {$gt: 5}}, - update: {$inc: {b: 7}}, - }, - verbosity: 'allPlansExecution' -}); -assert.commandFailed(res); + assert(res.queryPlanner); + assert(!res.executionStats); + assert.eq(res.queryPlanner.winningPlan.stage, "SHARD_WRITE"); + assert.eq(res.queryPlanner.winningPlan.inputStage.winningPlan.stage, "SHARD_MERGE"); + + res = assert.commandWorked(testDB.runCommand({ + explain: { + findAndModify: collName, + query: {a: {$gt: 5}}, + update: {$inc: {b: 7}}, + }, + verbosity: 'allPlansExecution' + })); + + assert(res.queryPlanner); + assert(res.executionStats); + assert.eq(res.queryPlanner.winningPlan.stage, "SHARD_WRITE"); + assert.eq(res.queryPlanner.winningPlan.inputStage.winningPlan.stage, "SHARD_MERGE"); + assert.eq(res.executionStats.executionStages.stage, "SHARD_WRITE"); + assert.eq(res.executionStats.inputStage.executionStages.stage, "SHARD_MERGE"); +} else { + // Queries that do not involve the shard key are invalid. + res = testDB.runCommand({ + explain: {findAndModify: collName, query: {b: 1}, remove: true}, + verbosity: 'queryPlanner' + }); + assert.commandFailed(res); + + // Queries that have non-equality queries on the shard key are invalid. + res = testDB.runCommand({ + explain: { + findAndModify: collName, + query: {a: {$gt: 5}}, + update: {$inc: {b: 7}}, + }, + verbosity: 'allPlansExecution' + }); + assert.commandFailed(res); +} // Asserts that the explain command ran on the specified shard and used the given stage // for performing the findAndModify command. diff --git a/jstests/sharding/query/group_plan_cache_sharded.js b/jstests/sharding/query/group_plan_cache_sharded.js index 788d4bd16492e..ea24513ebfa3b 100644 --- a/jstests/sharding/query/group_plan_cache_sharded.js +++ b/jstests/sharding/query/group_plan_cache_sharded.js @@ -12,10 +12,7 @@ * expects_explicit_underscore_id_index, * ] */ -(function() { -"use strict"; - -load("jstests/libs/sbe_util.js"); // For 'checkSBEEnabled'. +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const st = new ShardingTest({shards: 2, rs: {nodes: 1}}); const mongosDB = st.s.getDB(jsTestName()); @@ -25,7 +22,7 @@ const mongosDB = st.s.getDB(jsTestName()); if (!checkSBEEnabled(mongosDB)) { jsTestLog("Skipping test because SBE is not enabled"); st.stop(); - return; + quit(); } const collName = jsTestName(); @@ -96,5 +93,4 @@ for (const cacheEntry of cacheEntries) { assert.eq(nonMergingKeyCount, 1, tojson(cacheEntries)); assert.eq(mergingKeyCount, 2, tojson(cacheEntries)); -st.stop(); -}()); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/query/lookup_mongod_unaware.js b/jstests/sharding/query/lookup_mongod_unaware.js index ab7292b790fa0..0e98c29dec612 100644 --- a/jstests/sharding/query/lookup_mongod_unaware.js +++ b/jstests/sharding/query/lookup_mongod_unaware.js @@ -6,10 +6,8 @@ * We restart a mongod to cause it to forget that a collection was sharded. When restarted, we * expect it to still have all the previous data. * - * // TODO (SERVER-74380): Remove requires_fcv_70 once SERVER-74380 has been backported to v6.0 * @tags: [ * requires_persistence, - * requires_fcv_70 * ] * */ diff --git a/jstests/sharding/query/lookup_on_shard.js b/jstests/sharding/query/lookup_on_shard.js index 7e9583d212db3..421b68d0ba5ea 100644 --- a/jstests/sharding/query/lookup_on_shard.js +++ b/jstests/sharding/query/lookup_on_shard.js @@ -29,7 +29,7 @@ const runTest = function() { (function testSingleLookupFromShard() { // Run a pipeline which must be merged on a shard. This should force the $lookup (on // the sharded collection) to be run on a mongod. - pipeline = [ + let pipeline = [ {$_internalSplitPipeline: {mergeType: "anyShard"}}, { $lookup: { @@ -50,7 +50,7 @@ const runTest = function() { (function testMultipleLookupsFromShard() { // Run two lookups in a row (both on mongod). - pipeline = [ + let pipeline = [ {$_internalSplitPipeline: {mergeType: "anyShard"}}, { $lookup: { @@ -79,7 +79,7 @@ const runTest = function() { (function testUnshardedLookupWithinShardedLookup() { // Pipeline with unsharded $lookup inside a sharded $lookup. - pipeline = [ + let pipeline = [ {$_internalSplitPipeline: {mergeType: "anyShard"}}, { $lookup: { diff --git a/jstests/sharding/query/map_reduce_scope.js b/jstests/sharding/query/map_reduce_scope.js index 8657a99afdb9b..e66abcc831e98 100644 --- a/jstests/sharding/query/map_reduce_scope.js +++ b/jstests/sharding/query/map_reduce_scope.js @@ -16,20 +16,19 @@ function runTest(coll) { emit(xx.val, this.a); }; const reduce = function(key, values) { - return {reduce: Array.sum(values) + xx.val}; + return {reduce: xx.val + 1}; }; const finalize = function(key, values) { - values.finalize = xx.val + 1; + values.finalize = xx.val + 2; return values; }; const res = assert.commandWorked( coll.mapReduce(map, reduce, {finalize: finalize, out: {inline: 1}, scope: {xx: {val: 9}}})); - assert.eq(9, res.results[0].value.reduce); - assert.eq(10, res.results[0].value.finalize); + assert.eq(res.results.length, 1, res); + assert.eq(res.results[0], {_id: 9, value: {reduce: 10, finalize: 11}}, res); } -assert.commandWorked(coll.insert({a: -4})); -assert.commandWorked(coll.insert({a: 4})); +assert.commandWorked(coll.insert({})); // Run test when a single shard is targetted. runTest(coll); diff --git a/jstests/sharding/query/merge_nondefault_read_concern.js b/jstests/sharding/query/merge_nondefault_read_concern.js new file mode 100644 index 0000000000000..70df9207bd8f1 --- /dev/null +++ b/jstests/sharding/query/merge_nondefault_read_concern.js @@ -0,0 +1,39 @@ +/** + * Tests that $merge doesn't fail when a non-default readConcern is + * set on the session. + * @tags: [requires_fcv_71] + */ +(function() { +"use strict"; + +load("jstests/aggregation/extras/merge_helpers.js"); + +const st = new ShardingTest({shards: 2, rs: {nodes: 1}}); + +const mongosDB = st.s0.getDB("merge_nondefault_read_concern"); +const source = mongosDB["source"]; +const target = mongosDB["target"]; + +assert.commandWorked(mongosDB.adminCommand({enableSharding: mongosDB.getName()})); + +const baseMergeCommand = { + aggregate: "source", + pipeline: + [{$merge: {into: "target", on: "_id", whenMatched: "replace", whenNotMatched: "insert"}}], + cursor: {}, +}; + +// Test with command level override. +var withReadConcern = baseMergeCommand; +withReadConcern.readConcern = { + level: "majority" +}; +assert.commandWorked(mongosDB.runCommand(withReadConcern)); + +// Test with global override. +assert.commandWorked( + mongosDB.adminCommand({"setDefaultRWConcern": 1, "defaultReadConcern": {level: "majority"}})); +assert.commandWorked(mongosDB.runCommand(baseMergeCommand)); + +st.stop(); +}()); diff --git a/jstests/sharding/query/merge_on_fields.js b/jstests/sharding/query/merge_on_fields.js index 4d0889caba67d..1bff38be06baf 100644 --- a/jstests/sharding/query/merge_on_fields.js +++ b/jstests/sharding/query/merge_on_fields.js @@ -1,9 +1,7 @@ // Tests that the "on" fields are correctly automatically generated when the user does not specify // it in the $merge stage. -(function() { -"use strict"; +import {getAggPlanStage} from "jstests/libs/analyze_plan.js"; -load("jstests/libs/analyze_plan.js"); // For 'getAggPlanStage'. load("jstests/aggregation/extras/merge_helpers.js"); // For withEachMergeMode. const st = new ShardingTest({shards: 2, rs: {nodes: 1}}); @@ -83,5 +81,4 @@ withEachMergeMode(({whenMatchedMode, whenNotMatchedMode}) => { assert.eq(["_id"], getAggPlanStage(explainResult, "$merge").$merge.on); }); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/query/metadata_removal.js b/jstests/sharding/query/metadata_removal.js index 298d1f74099a3..495817bf6a1af 100644 --- a/jstests/sharding/query/metadata_removal.js +++ b/jstests/sharding/query/metadata_removal.js @@ -43,4 +43,4 @@ try { } finally { st.stop(); } -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/query/mongos_query_comment.js b/jstests/sharding/query/mongos_query_comment.js index 873f8017960f1..3d225d39039f5 100644 --- a/jstests/sharding/query/mongos_query_comment.js +++ b/jstests/sharding/query/mongos_query_comment.js @@ -53,4 +53,4 @@ profilerHasSingleMatchingEntryOrThrow({ }); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/query/owning_shard_expression.js b/jstests/sharding/query/owning_shard_expression.js index a735654f86d90..23c357af929ec 100644 --- a/jstests/sharding/query/owning_shard_expression.js +++ b/jstests/sharding/query/owning_shard_expression.js @@ -14,7 +14,6 @@ const st = new ShardingTest({ config: 1, shards: 3, }); -const mongos = st.s; const dbName = jsTestName(); const db = st.getDB(dbName); const sourceColl = db["source"]; @@ -61,14 +60,23 @@ function assertOwningShardExpressionResults(shardVersion, expectedResult) { // Asserts that $_internalOwningShard expression fails when routing information is stale. function assertOwningShardExpressionFailure(shardVersion) { + let expectedErrorCodes = [ErrorCodes.ShardCannotRefreshDueToLocksHeld]; + + // TODO SERVER-78379: Remove once 8.0 becomes last-lts. If fcv is lower than 7.1, + // $_internalOwningShard can throw StaleConfig when routing information is stale. + const fcvResult = assert.commandWorked( + st.shard0.getDB(db).adminCommand({getParameter: 1, featureCompatibilityVersion: 1})); + if (MongoRunner.compareBinVersions(fcvResult.featureCompatibilityVersion.version, "7.1") < 0) { + expectedErrorCodes.push(ErrorCodes.StaleConfig); + } + const projectionStage = buildProjectionStageWithOwningShardExpression(shardVersion); - assert.commandFailedWithCode( - db.runCommand({ - aggregate: sourceColl.getName(), - pipeline: [projectionStage, {$sort: {"indexData._id": 1}}], - cursor: {} - }), - [ErrorCodes.StaleConfig, ErrorCodes.ShardCannotRefreshDueToLocksHeld]); + assert.commandFailedWithCode(db.runCommand({ + aggregate: sourceColl.getName(), + pipeline: [projectionStage, {$sort: {"indexData._id": 1}}], + cursor: {} + }), + expectedErrorCodes); // Assert the expression fails while executing on the mongos. assert.commandFailedWithCode(db.runCommand({ diff --git a/jstests/sharding/query/pipeline_length_limit.js b/jstests/sharding/query/pipeline_length_limit.js index aad69742163f6..8deb227882727 100644 --- a/jstests/sharding/query/pipeline_length_limit.js +++ b/jstests/sharding/query/pipeline_length_limit.js @@ -1,5 +1,8 @@ /** * Confirms that the limit on number of aggregragation pipeline stages is respected. + * @tags: [ + * requires_fcv_71, + * ] */ (function() { "use strict"; @@ -10,6 +13,29 @@ function testLimits(testDB, lengthLimit) { let maxLength = lengthLimit; let tooLarge = lengthLimit + 1; + // Test that the enforced pre-parse length limit is the same as the post-parse limit. + // We use $count because it is desugared into two separate stages, so it will pass the pre-parse + // limit but fail after. + let kPreParseErrCode = 7749501; + let kPostParseErrCode = 5054701; + + // 1. This test case will pass the pre-parse enforcer but fail after. + assert.commandFailedWithCode(testDB.runCommand({ + aggregate: "test", + cursor: {}, + pipeline: new Array(maxLength).fill({$count: "thecount"}) + }), + kPostParseErrCode); + + // 2. This test case should be caught by the pre-parse enforcer, and the error code reflects + // that. + assert.commandFailedWithCode(testDB.runCommand({ + aggregate: "test", + cursor: {}, + pipeline: new Array(tooLarge).fill({$count: "thecount"}) + }), + kPreParseErrCode); + assert.commandWorked(testDB.runCommand({ aggregate: "test", cursor: {}, @@ -20,7 +46,7 @@ function testLimits(testDB, lengthLimit) { cursor: {}, pipeline: new Array(tooLarge).fill({$project: {_id: 1}}) }), - ErrorCodes.FailedToParse); + kPreParseErrCode); testDB.setLogLevel(1); assert.commandWorked(testDB.runCommand({ @@ -36,7 +62,7 @@ function testLimits(testDB, lengthLimit) { pipeline: [{$unionWith: {coll: "test", pipeline: new Array(tooLarge).fill({$project: {_id: 1}})}}] }), - ErrorCodes.FailedToParse); + kPreParseErrCode); assert.commandWorked(testDB.runCommand({ aggregate: "test", @@ -48,7 +74,7 @@ function testLimits(testDB, lengthLimit) { cursor: {}, pipeline: [{$facet: {foo: new Array(tooLarge).fill({$project: {_id: 1}}), bar: []}}] }), - ErrorCodes.FailedToParse); + kPreParseErrCode); assert.commandWorked(testDB.runCommand( {update: "test", updates: [{q: {}, u: new Array(maxLength).fill({$project: {_id: 1}})}]})); @@ -56,7 +82,7 @@ function testLimits(testDB, lengthLimit) { update: "test", updates: [{q: {}, u: new Array(tooLarge).fill({$project: {_id: 1}})}] }), - ErrorCodes.FailedToParse); + kPreParseErrCode); const collname = "test"; @@ -141,7 +167,7 @@ function testLimits(testDB, lengthLimit) { {from: "test", as: "as", pipeline: new Array(tooLarge).fill({$project: {_id: 1}})} }] }), - ErrorCodes.FailedToParse); + [kPostParseErrCode, kPreParseErrCode]); } function runTest(lengthLimit, mongosConfig = {}, mongodConfig = {}) { diff --git a/jstests/sharding/query/shard_key_prefix_with_in_operator.js b/jstests/sharding/query/shard_key_prefix_with_in_operator.js index 8b9548e457142..c395f29c8da01 100644 --- a/jstests/sharding/query/shard_key_prefix_with_in_operator.js +++ b/jstests/sharding/query/shard_key_prefix_with_in_operator.js @@ -19,4 +19,4 @@ assert.commandWorked(db.adminCommand({shardCollection: coll.getFullName(), key: assert.doesNotThrow(() => coll.find({a: {$in: [/myRegex/, 1]}}).toArray()); shardingTest.stop(); -}()); \ No newline at end of file +}()); diff --git a/jstests/sharding/query/views.js b/jstests/sharding/query/views.js index 0dde8f067361e..bf23ef44abdb4 100644 --- a/jstests/sharding/query/views.js +++ b/jstests/sharding/query/views.js @@ -4,33 +4,54 @@ * requires_fcv_63, * ] */ -(function() { -"use strict"; - // For profilerHasSingleMatchingEntryOrThrow. load("jstests/libs/profiler.js"); -// For checkSBEEnabled. -load("jstests/libs/sbe_util.js"); // For areAllCollectionsClustered. load("jstests/libs/clustered_collections/clustered_collection_util.js"); +// Legal values for the verifyExplainResult() 'optimizedAwayPipeline' argument. +const kOptFalse = 0; +const kOptTrue = 1; +const kOptEither = 2; + // Given sharded explain output in 'shardedExplain', verifies that the explain mode 'verbosity' // affected the output verbosity appropriately, and that the response has the expected format. -// Set 'optimizedAwayPipeline' to true if the pipeline is expected to be optimized away. +// Set 'optimizedAwayPipeline' to: +// kOptTrue if the pipeline is expected to be optimized away +// kOptFalse if the pipeline is expected to be present +// kOptEither if the call does not know so must accept either of the prior two cases function verifyExplainResult( - {shardedExplain = null, verbosity = "", optimizedAwayPipeline = false} = {}) { + {shardedExplain = null, verbosity = "", optimizedAwayPipeline = kOptFalse} = {}) { assert.commandWorked(shardedExplain); assert(shardedExplain.hasOwnProperty("shards"), tojson(shardedExplain)); + + // Verifies the explain for each shard. for (let elem in shardedExplain.shards) { let shard = shardedExplain.shards[elem]; let root; - if (optimizedAwayPipeline) { + + // Resolve 'kOptEither' to 'kOptTrue' or 'kOptFalse' for the current shard. If 'shard' has a + // "queryPlanner" property, this means the pipeline has been optimized away. (When the + // pipeline is present, "queryPlanner" is instead a property of shard.stages[0].$cursor.) + let optedAwayOnThisShard = optimizedAwayPipeline; + if (optedAwayOnThisShard == kOptEither) { + if (shard.hasOwnProperty("queryPlanner")) { + optedAwayOnThisShard = kOptTrue; + } else { + optedAwayOnThisShard = kOptFalse; + } + } + + // Verify the explain output. + if (optedAwayOnThisShard == kOptTrue) { assert(shard.hasOwnProperty("queryPlanner"), tojson(shardedExplain)); root = shard; - } else { + } else if (optedAwayOnThisShard == kOptFalse) { assert(shard.stages[0].hasOwnProperty("$cursor"), tojson(shardedExplain)); assert(shard.stages[0].$cursor.hasOwnProperty("queryPlanner"), tojson(shardedExplain)); root = shard.stages[0].$cursor; + } else { + assert(false, `Unsupported 'optimizedAwayPipeline' value ${optimizedAwayPipeline}`); } if (verbosity === "queryPlanner") { assert(!root.hasOwnProperty("executionStats"), tojson(shardedExplain)); @@ -81,11 +102,11 @@ assert.eq(5, view.find({a: {$lte: 8}}).itcount()); let result = db.runCommand({explain: {find: "view", filter: {a: {$lte: 7}}}}); verifyExplainResult( - {shardedExplain: result, verbosity: "allPlansExecution", optimizedAwayPipeline: true}); + {shardedExplain: result, verbosity: "allPlansExecution", optimizedAwayPipeline: kOptTrue}); for (let verbosity of explainVerbosities) { result = db.runCommand({explain: {find: "view", filter: {a: {$lte: 7}}}, verbosity: verbosity}); verifyExplainResult( - {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: true}); + {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: kOptTrue}); } // @@ -96,19 +117,19 @@ assert.eq(5, view.aggregate([{$match: {a: {$lte: 8}}}]).itcount()); // Test that the explain:true flag for the aggregate command results in queryPlanner verbosity. result = db.runCommand({aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], explain: true}); verifyExplainResult( - {shardedExplain: result, verbosity: "queryPlanner", optimizedAwayPipeline: true}); + {shardedExplain: result, verbosity: "queryPlanner", optimizedAwayPipeline: kOptTrue}); result = db.runCommand({explain: {aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], cursor: {}}}); verifyExplainResult( - {shardedExplain: result, verbosity: "allPlansExecution", optimizedAwayPipeline: true}); + {shardedExplain: result, verbosity: "allPlansExecution", optimizedAwayPipeline: kOptTrue}); for (let verbosity of explainVerbosities) { result = db.runCommand({ explain: {aggregate: "view", pipeline: [{$match: {a: {$lte: 8}}}], cursor: {}}, verbosity: verbosity }); verifyExplainResult( - {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: true}); + {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: kOptTrue}); } // @@ -116,25 +137,18 @@ for (let verbosity of explainVerbosities) { // assert.eq(5, view.count({a: {$lte: 8}})); -// If SBE is enabled on all nodes, then validate the explain results for a count command. We could -// validate the explain results when the classic engine is enabled, but doing so is complicated for -// multiversion scenarios (e.g. in the multiversion passthrough on the classic engine build variant -// only some shards have SBE enabled, so the expected results differ across shards). -if (checkSBEEnabled(db, [], true /*checkAllNodes*/)) { - result = db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}}); - // Allow success whether or not the pipeline is optimized away, as it differs based on test - // environment and execution engine used. - verifyExplainResult({ - shardedExplain: result, - verbosity: "allPlansExecution", - optimizedAwayPipeline: !isClustered - }); - for (let verbosity of explainVerbosities) { - result = - db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}, verbosity: verbosity}); - verifyExplainResult( - {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: !isClustered}); - } +// "count" on a view that is a $match will produce different explain output on Classic vs SBE, as +// the query will be rewriten as a $group, but only SBE has a $group pushdown feature, which +// optimizes away the pipeline. Depending on build variant and engine selection flags, as well as +// specific configurations of individual nodes in multiversion clusters, we may get either the +// Classic or SBE explain variant, so here we accept either one ('kOptEither'). +result = db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}}); +verifyExplainResult( + {shardedExplain: result, verbosity: "allPlansExecution", optimizedAwayPipeline: kOptEither}); +for (let verbosity of explainVerbosities) { + result = db.runCommand({explain: {count: "view", query: {a: {$lte: 8}}}, verbosity: verbosity}); + verifyExplainResult( + {shardedExplain: result, verbosity: verbosity, optimizedAwayPipeline: kOptEither}); } // @@ -186,4 +200,3 @@ profilerHasSingleMatchingEntryOrThrow({ }); st.stop(); -})(); diff --git a/jstests/sharding/query_config.js b/jstests/sharding/query_config.js index 66bbdb3d7e363..7fca4af4c611f 100644 --- a/jstests/sharding/query_config.js +++ b/jstests/sharding/query_config.js @@ -234,7 +234,16 @@ var queryConfigChunks = function(st) { } }; var reduceFunction = function(key, values) { - return {chunks: values.length}; + // We may be re-reducing values that have already been partially reduced. In that case, we + // expect to see an object like {chunks: } in the array of input values. + const numValues = values.reduce(function(acc, currentValue) { + if (typeof currentValue === "object") { + return acc + currentValue.chunks; + } else { + return acc + 1; + } + }, 0); + return {chunks: numValues}; }; result = configDB.chunks.mapReduce( mapFunction, @@ -330,7 +339,16 @@ var queryUserCreated = function(database) { emit(this.g, 1); }; var reduceFunction = function(key, values) { - return {count: values.length}; + // We may be re-reducing values that have already been partially reduced. In that case, we + // expect to see an object like {count: } in the array of input values. + const numValues = values.reduce(function(acc, currentValue) { + if (typeof currentValue === "object") { + return acc + currentValue.count; + } else { + return acc + 1; + } + }, 0); + return {count: numValues}; }; result = userColl.mapReduce(mapFunction, reduceFunction, {out: {inline: 1}}); assert.eq(result.ok, 1); diff --git a/jstests/sharding/range_deleter_interacts_correctly_with_refine_shard_key.js b/jstests/sharding/range_deleter_interacts_correctly_with_refine_shard_key.js index 518ffb6ee39c7..a3e796dc42dcb 100644 --- a/jstests/sharding/range_deleter_interacts_correctly_with_refine_shard_key.js +++ b/jstests/sharding/range_deleter_interacts_correctly_with_refine_shard_key.js @@ -5,14 +5,9 @@ // Cannot run the filtering metadata check on tests that run refineCollectionShardKey. TestData.skipCheckShardFilteringMetadata = true; -(function() { - -"use strict"; - load("jstests/libs/fail_point_util.js"); load('jstests/libs/parallel_shell_helpers.js'); load('jstests/replsets/rslib.js'); -load('jstests/libs/feature_flag_util.js'); TestData.skipCheckingUUIDsConsistentAcrossCluster = true; @@ -151,10 +146,6 @@ function test(st, description, testBody) { let hangDonorAtEndOfMigration = configureFailPoint(st.rs1.getPrimary(), "moveChunkHangAtStep6"); - // Increase timeout for range deletion of overlapping range on recipient. - st.shard0.rs.getPrimary().adminCommand( - {setParameter: 1, receiveChunkWaitForRangeDeleterTimeoutMS: 90000}); - // Attempt to move the chunk back to shard 0. Synchronize with the parallel shell to // make sure that the moveChunk started. let hangOnStep1 = configureFailPoint(st.rs1.getPrimary(), "moveChunkHangAtStep1"); @@ -256,4 +247,3 @@ function test(st, description, testBody) { st.stop(); })(); -})(); diff --git a/jstests/sharding/read_after_optime.js b/jstests/sharding/read_after_optime.js index 0936a3f560b2e..d51682e49036f 100644 --- a/jstests/sharding/read_after_optime.js +++ b/jstests/sharding/read_after_optime.js @@ -3,7 +3,7 @@ (function() { 'use strict'; -var shardingTest = new ShardingTest({shards: TestData.catalogShard ? 1 : 0}); +var shardingTest = new ShardingTest({shards: TestData.configShard ? 1 : 0}); assert(shardingTest.configRS, 'this test requires config servers to run in CSRS mode'); diff --git a/jstests/sharding/read_committed_lookup.js b/jstests/sharding/read_committed_lookup.js index 0bbf1cfbf79de..274a47cd4974d 100644 --- a/jstests/sharding/read_committed_lookup.js +++ b/jstests/sharding/read_committed_lookup.js @@ -39,6 +39,9 @@ let shardSecondary = rst.getSecondary(); let st = new ShardingTest({ manualAddShard: true, }); +if (TestData.configShard) { + assert.commandWorked(st.s.adminCommand({transitionFromDedicatedConfigServer: 1})); +} // The default WC is majority and this test can't satisfy majority writes. assert.commandWorked(st.s.adminCommand( {setDefaultRWConcern: 1, defaultWriteConcern: {w: 1}, writeConcern: {w: "majority"}})); diff --git a/jstests/sharding/read_pref_with_hedging_mode.js b/jstests/sharding/read_pref_with_hedging_mode.js index 408d93e4ca720..b1e6d85aa7a1f 100644 --- a/jstests/sharding/read_pref_with_hedging_mode.js +++ b/jstests/sharding/read_pref_with_hedging_mode.js @@ -46,4 +46,4 @@ assert.commandWorked(st.s.getDB(dbName).runCommand( {distinct: collName, key: "x", $readPreference: {mode: "primaryPreferred", hedge: {}}})); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/read_write_concern_defaults_application.js b/jstests/sharding/read_write_concern_defaults_application.js index d2cf98abf4c98..0b4fb1e03238e 100644 --- a/jstests/sharding/read_write_concern_defaults_application.js +++ b/jstests/sharding/read_write_concern_defaults_application.js @@ -33,6 +33,7 @@ load('jstests/libs/profiler.js'); load('jstests/sharding/libs/last_lts_mongod_commands.js'); +load('jstests/sharding/libs/last_lts_mongos_commands.js'); // TODO SERVER-50144 Remove this and allow orphan checking. // This test calls removeShard which can leave docs in config.rangeDeletions in state "pending", @@ -112,16 +113,17 @@ let testCases = { _configsvrRemoveShardFromZone: {skip: "internal command"}, _configsvrRemoveTags: {skip: "internal command"}, _configsvrRenameCollection: {skip: "internal command"}, - _configsvrRenameCollectionMetadata: {skip: "internal command"}, _configsvrRepairShardedCollectionChunksHistory: {skip: "internal command"}, + _configsvrResetPlacementHistory: {skip: "internal command"}, _configsvrReshardCollection: {skip: "internal command"}, _configsvrRunRestore: {skip: "internal command"}, _configsvrSetAllowMigrations: {skip: "internal command"}, _configsvrSetClusterParameter: {skip: "internal command"}, _configsvrSetUserWriteBlockMode: {skip: "internal command"}, - _configsvrTransitionToCatalogShard: {skip: "internal command"}, + _configsvrTransitionFromDedicatedConfigServer: {skip: "internal command"}, _configsvrTransitionToDedicatedConfigServer: {skip: "internal command"}, _configsvrUpdateZoneKeyRange: {skip: "internal command"}, + _dropConnectionsToMongot: {skip: "internal command"}, _flushDatabaseCacheUpdates: {skip: "internal command"}, _flushDatabaseCacheUpdatesWithWriteConcern: {skip: "internal command"}, _flushReshardingStateChange: {skip: "internal command"}, @@ -135,6 +137,7 @@ let testCases = { _killOperations: {skip: "internal command"}, _mergeAuthzCollections: {skip: "internal command"}, _migrateClone: {skip: "internal command"}, + _mongotConnPoolStats: {skip: "internal command"}, _movePrimaryRecipientSyncData: {skip: "internal command"}, _movePrimaryRecipientAbortMigration: {skip: "internal command"}, _movePrimaryRecipientForgetMigration: {skip: "internal command"}, @@ -150,6 +153,7 @@ let testCases = { _shardsvrRegisterIndex: {skip: "internal command"}, _shardsvrCheckMetadataConsistency: {skip: "internal command"}, _shardsvrCheckMetadataConsistencyParticipant: {skip: "internal command"}, + _shardsvrCleanupStructuredEncryptionData: {skip: "internal command"}, _shardsvrCommitIndexParticipant: {skip: "internal command"}, _shardsvrCommitReshardCollection: {skip: "internal command"}, _shardsvrCompactStructuredEncryptionData: {skip: "internal command"}, @@ -158,8 +162,6 @@ let testCases = { _shardsvrCreateGlobalIndex: {skip: "internal command"}, _shardsvrDropGlobalIndex: {skip: "internal command"}, _shardsvrDropCollection: {skip: "internal command"}, - // TODO SERVER-74324: deprecate _shardsvrDropCollectionIfUUIDNotMatching after 7.0 is lastLTS. - _shardsvrDropCollectionIfUUIDNotMatching: {skip: "internal command"}, _shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern: {skip: "internal command"}, _shardsvrDropCollectionParticipant: {skip: "internal command"}, _shardsvrUnregisterIndex: {skip: "internal command"}, @@ -195,7 +197,14 @@ let testCases = { _shardsvrCollMod: {skip: "internal command"}, _shardsvrCollModParticipant: {skip: "internal command"}, _shardsvrParticipantBlock: {skip: "internal command"}, - _startStreamProcessor: {skip: "internal command"}, + streams_startStreamProcessor: {skip: "internal command"}, + streams_startStreamSample: {skip: "internal command"}, + streams_stopStreamProcessor: {skip: "internal command"}, + streams_listStreamProcessors: {skip: "internal command"}, + streams_getMoreStreamSample: {skip: "internal command"}, + streams_getStats: {skip: "internal command"}, + streams_testOnlyInsert: {skip: "internal command"}, + streams_getMetrics: {skip: "internal command"}, _transferMods: {skip: "internal command"}, _vectorClockPersist: {skip: "internal command"}, abortReshardCollection: {skip: "does not accept read or write concern"}, @@ -253,12 +262,31 @@ let testCases = { balancerStatus: {skip: "does not accept read or write concern"}, balancerStop: {skip: "does not accept read or write concern"}, buildInfo: {skip: "does not accept read or write concern"}, - bulkWrite: {skip: "not yet implemented"}, + bulkWrite: { + // TODO SERVER-52419: Run this test and remove the skip. + // setUp: function(conn) { + // assert.commandWorked(conn.getDB(db).runCommand({create: coll, writeConcern: {w: + // 1}})); + // }, + // db: "admin", + // command: { + // bulkWrite: 1, + // ops: [{insert: 0, document: {_id: ObjectId()}}], + // nsInfo: [{ns: db + "." + coll}] + // }, + // checkReadConcern: false, + // checkWriteConcern: true, + // // TODO SERVER-78258: Depending on what profiling behavior we implement we may be able to + // // use profiler output here instead rather than logs. + // useLogs: true, + skip: "requires feature flag" + }, captrunc: {skip: "test command"}, checkMetadataConsistency: {skip: "does not accept read or write concern"}, checkShardingIndex: {skip: "does not accept read or write concern"}, cleanupOrphaned: {skip: "only on shard server"}, cleanupReshardCollection: {skip: "does not accept read or write concern"}, + cleanupStructuredEncryptionData: {skip: "does not accept read or write concern"}, clearJumboFlag: {skip: "does not accept read or write concern"}, clearLog: {skip: "does not accept read or write concern"}, clone: {skip: "deprecated"}, @@ -272,6 +300,7 @@ let testCases = { }, clusterAbortTransaction: {skip: "already tested by 'abortTransaction' tests on mongos"}, clusterAggregate: {skip: "already tested by 'aggregate' tests on mongos"}, + clusterBulkWrite: {skip: "already tested by 'bulkWrite' tests on mongos"}, clusterCommitTransaction: {skip: "already tested by 'commitTransaction' tests on mongos"}, clusterCount: {skip: "already tested by 'count' tests on mongos"}, clusterDelete: {skip: "already tested by 'delete' tests on mongos"}, @@ -507,23 +536,7 @@ let testCases = { getLog: {skip: "does not accept read or write concern"}, getMore: {skip: "does not accept read or write concern"}, getParameter: {skip: "does not accept read or write concern"}, - getQueryableEncryptionCountInfo: { - // TODO SERVER-75631 - Enable this test once the feature flag is gone - skip: "requires feature flag" - // setUp: function(conn) { - // assert.commandWorked(conn.getCollection(nss).insert({x: 1}, {writeConcern: {w: 1}})); - // }, - // command: { - // getQueryableEncryptionCountInfo: coll, - // tokens: [ - // {tokens: [{"s": BinData(0, "lUBO7Mov5Sb+c/D4cJ9whhhw/+PZFLCk/AQU2+BpumQ=")}]}, - // ], - // "forInsert": true, - // }, - // checkReadConcern: true, - // checkWriteConcern: false, - // useLogs: true, - }, + getQueryableEncryptionCountInfo: {skip: "not profiled or logged"}, getShardMap: {skip: "internal command"}, getShardVersion: {skip: "internal command"}, getnonce: {skip: "removed in v6.3"}, @@ -668,6 +681,7 @@ let testCases = { replSetTest: {skip: "does not accept read or write concern"}, replSetTestEgress: {skip: "does not accept read or write concern"}, replSetUpdatePosition: {skip: "does not accept read or write concern"}, + resetPlacementHistory: {skip: "does not accept read or write concern"}, reshardCollection: {skip: "does not accept read or write concern"}, resync: {skip: "does not accept read or write concern"}, revokePrivilegesFromRole: { @@ -760,7 +774,7 @@ let testCases = { testVersions1And2: {skip: "does not accept read or write concern"}, testVersion2: {skip: "does not accept read or write concern"}, top: {skip: "does not accept read or write concern"}, - transitionToCatalogShard: {skip: "does not accept read or write concern"}, + transitionFromDedicatedConfigServer: {skip: "does not accept read or write concern"}, transitionToDedicatedConfigServer: {skip: "does not accept read or write concern"}, update: { setUp: function(conn) { @@ -813,6 +827,10 @@ commandsRemovedFromMongodSinceLastLTS.forEach(function(cmd) { testCases[cmd] = {skip: "must define test coverage for backwards compatibility"}; }); +commandsRemovedFromMongosSinceLastLTS.forEach(function(cmd) { + testCases[cmd] = {skip: "must define test coverage for backwards compatibility"}; +}); + // Running setDefaultRWConcern in the middle of a scenario would define defaults when there // shouldn't be for subsequently-tested commands. Thus it is special-cased to be run at the end of // the scenario. diff --git a/jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js b/jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js index 55445e5e5727b..8f96e8bd4fc69 100644 --- a/jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js +++ b/jstests/sharding/reconfig_fails_no_cwwc_set_sharding.js @@ -2,8 +2,6 @@ * Test that a reconfig for a shard that would change the implicit default write concern to w:1 * fails if CWWC is not set. * - * Temporary catalog shard incompatible because it hits a sharding metadata hook failure on cluster - * shutdown. * @tags: [ * requires_majority_read_concern, * requires_persistence, @@ -14,8 +12,10 @@ (function() { 'use strict'; -// TODO SERVER-75820: Investigate why a shard node doesn't have metadata at test shutdown. -TestData.skipCheckShardFilteringMetadata = true; +// Adds a shard near the end of the test that won't have metadata for the sessions collection during +// test shutdown. This is only a problem with a config shard because otherwise there are no shards +// so the sessions collection can't be created. +TestData.skipCheckShardFilteringMetadata = TestData.configShard; load("jstests/replsets/rslib.js"); // For reconfig, isConfigCommitted and // safeReconfigShouldFail. @@ -89,7 +89,7 @@ shardServer = new ReplSetTest( shardServer.startSet(); shardServer.initiateWithHighElectionTimeout(); -const st = new ShardingTest({shards: TestData.catalogShard ? 1 : 0, mongos: 1}); +const st = new ShardingTest({shards: TestData.configShard ? 1 : 0, mongos: 1}); var admin = st.getDB('admin'); jsTestLog("Adding the shard to the cluster should succeed."); diff --git a/jstests/sharding/reconfig_race_with_failover.js b/jstests/sharding/reconfig_race_with_failover.js index a63c40596cf37..32d7183a459ce 100644 --- a/jstests/sharding/reconfig_race_with_failover.js +++ b/jstests/sharding/reconfig_race_with_failover.js @@ -17,6 +17,7 @@ TestData.skipCheckDBHashes = true; const st = new ShardingTest({shards: {rs0: {nodes: [{}, {}, {rsConfig: {priority: 0}}]}}}); const rst = st.rs0; const primary = rst.getPrimary(); +const nodes = rst.nodes; if (primary !== nodes[0]) { st.stop(); return; // For simplicity. diff --git a/jstests/sharding/refine_collection_shard_key_basic.js b/jstests/sharding/refine_collection_shard_key_basic.js index acdc9bda09b63..fed32c198e5e8 100644 --- a/jstests/sharding/refine_collection_shard_key_basic.js +++ b/jstests/sharding/refine_collection_shard_key_basic.js @@ -1,13 +1,10 @@ // // Basic tests for refineCollectionShardKey. // +import {ConfigShardUtil} from "jstests/libs/config_shard_util.js"; // Cannot run the filtering metadata check on tests that run refineCollectionShardKey. TestData.skipCheckShardFilteringMetadata = true; - -(function() { -'use strict'; -load("jstests/libs/catalog_shard_util.js"); load('jstests/libs/fail_point_util.js'); load('jstests/libs/profiler.js'); load('jstests/sharding/libs/shard_versioning_util.js'); @@ -306,11 +303,9 @@ assert.commandFailedWithCode( mongos.adminCommand({refineCollectionShardKey: kNsName, key: {_id: 1, aKey: 1}}), ErrorCodes.NamespaceNotSharded); -// Should fail because operation can't run on config server -const isCatalogShardEnabled = CatalogShardUtil.isEnabledIgnoringFCV(st); assert.commandFailedWithCode( mongos.adminCommand({refineCollectionShardKey: "config.collections", key: {_id: 1, aKey: 1}}), - isCatalogShardEnabled ? ErrorCodes.NamespaceNotSharded : ErrorCodes.NoShardingEnabled); + ErrorCodes.NamespaceNotSharded); enableShardingAndShardColl({_id: 1}); @@ -737,85 +732,9 @@ if (!isStepdownSuite) { assert.soon(() => oldPrimaryEpoch !== st.shard0.adminCommand({getShardVersion: kNsName, fullMetadata: true}) .metadata.shardVersionEpoch.toString()); - // TODO (SERVER-74477): Always assume that all shards will refresh during rename. - if (FeatureFlagUtil.isPresentAndEnabled(st.shard0.getDB(kDbName), - "AllowMigrationsRefreshToAll")) { - assert.soon(() => oldSecondaryEpoch !== - st.shard1.adminCommand({getShardVersion: kNsName, fullMetadata: true}) - .metadata.shardVersionEpoch.toString()); - } else { - assert.soon(() => oldSecondaryEpoch === - st.shard1.adminCommand({getShardVersion: kNsName, fullMetadata: true}) - .metadata.shardVersionEpoch.toString()); - } -} - -// TODO SERVER-72515: remove once 7.0 becomes last-lts. -const fcvDoc = assert.commandWorked( - st.configRS.getPrimary().adminCommand({getParameter: 1, featureCompatibilityVersion: 1})); -if (fcvDoc.featureCompatibilityVersion.version == lastLTSFCV && - !jsTestOptions().shardMixedBinVersions) { - (() => { - // - // Verify listIndexes and checkShardingIndexes are retried on shard version errors and are - // sent with shard versions. - // - - // Create a sharded collection with one chunk on shard0. - const dbName = "testShardVersions"; - const collName = "fooShardVersions"; - const ns = dbName + "." + collName; - assert.commandWorked(st.s.adminCommand({enableSharding: dbName})); - st.ensurePrimaryShard(dbName, st.shard0.shardName); - assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {x: 1}})); - - const minKeyShardDB = st.rs0.getPrimary().getDB(dbName); - assert.commandWorked(minKeyShardDB.setProfilingLevel(2)); - - // Refining the shard key should internally retry on a stale epoch error for listIndexes and - // succeed. - assert.commandWorked(minKeyShardDB.adminCommand({ - configureFailPoint: "failCommand", - mode: {times: 5}, - data: { - errorCode: ErrorCodes.StaleEpoch, - failCommands: ["listIndexes"], - failInternalCommands: true - } - })); - assert.commandWorked(st.s.getCollection(ns).createIndex({x: 1, y: 1})); - assert.commandWorked(st.s.adminCommand({refineCollectionShardKey: ns, key: {x: 1, y: 1}})); - - // Refining the shard key should internally retry on a stale epoch error for - // checkShardingIndex and succeed. - assert.commandWorked(minKeyShardDB.adminCommand({ - configureFailPoint: "failCommand", - mode: {times: 5}, - data: { - errorCode: ErrorCodes.StaleEpoch, - failCommands: ["checkShardingIndex"], - failInternalCommands: true - } - })); - assert.commandWorked(st.s.getCollection(ns).createIndex({x: 1, y: 1, z: 1})); - assert.commandWorked( - st.s.adminCommand({refineCollectionShardKey: ns, key: {x: 1, y: 1, z: 1}})); - - // Verify both commands were sent with shard versions through the profiler. - profilerHasAtLeastOneMatchingEntryOrThrow({ - profileDB: minKeyShardDB, - filter: {"command.listIndexes": collName, "command.shardVersion": {"$exists": true}} - }); - - profilerHasAtLeastOneMatchingEntryOrThrow({ - profileDB: minKeyShardDB, - filter: {"command.checkShardingIndex": ns, "command.shardVersion": {"$exists": true}} - }); - - // Clean up. - assert.commandWorked(minKeyShardDB.setProfilingLevel(0)); - assert(minKeyShardDB.system.profile.drop()); - })(); + assert.soon(() => oldSecondaryEpoch !== + st.shard1.adminCommand({getShardVersion: kNsName, fullMetadata: true}) + .metadata.shardVersionEpoch.toString()); } // Assumes the given arrays are sorted by the max field. @@ -934,4 +853,3 @@ function compareBoundaries(conn, shardedNs, refinedNs) { })(); st.stop(); -})(); diff --git a/jstests/sharding/remove1.js b/jstests/sharding/remove1.js index c9410a3d04d9a..aef5de61b6585 100644 --- a/jstests/sharding/remove1.js +++ b/jstests/sharding/remove1.js @@ -1,7 +1,4 @@ -(function() { -'use strict'; - -load("jstests/libs/catalog_shard_util.js"); +import {ConfigShardUtil} from "jstests/libs/config_shard_util.js"; var s = new ShardingTest({shards: 2, other: {enableBalancer: true}}); var config = s.s0.getDB('config'); @@ -16,10 +13,10 @@ var topologyTime0 = config.shards.findOne({_id: s.shard0.shardName}).topologyTim var topologyTime1 = config.shards.findOne({_id: s.shard1.shardName}).topologyTime; assert.gt(topologyTime1, topologyTime0); -// removeShard is not permited on shard0 (the catalogShard) if catalogShard is enabled, so we want +// removeShard is not permited on shard0 (the configShard) if configShard is enabled, so we want // to use transitionToDedicatedConfigServer instead var removeShardOrTransitionToDedicated = - TestData.catalogShard ? "transitionToDedicatedConfigServer" : "removeShard"; + TestData.configShard ? "transitionToDedicatedConfigServer" : "removeShard"; // First remove puts in draining mode, the second tells me a db needs to move, the third // actually removes @@ -39,9 +36,9 @@ s.s0.getDB('needToMove').dropDatabase(); // removed s.awaitBalancerRound(); -if (TestData.catalogShard) { - // A catalog shard can't be removed until all range deletions have finished. - CatalogShardUtil.waitForRangeDeletions(s.s); +if (TestData.configShard) { + // A config shard can't be removed until all range deletions have finished. + ConfigShardUtil.waitForRangeDeletions(s.s); } removeResult = assert.commandWorked( @@ -58,7 +55,7 @@ assert.gt(topologyTime2, topologyTime1); assert.commandFailed(s.s0.adminCommand({removeshard: s.shard1.shardName})); // Should create a shard0002 shard -var rs = new ReplSetTest({nodes: 1}); +const rs = new ReplSetTest({nodes: 1}); rs.startSet({shardsvr: ""}); rs.initiate(); assert.commandWorked(s.s0.adminCommand({addshard: rs.getURL()})); @@ -66,4 +63,3 @@ assert.eq(2, s.config.shards.count(), "new server does not appear in count"); rs.stopSet(); s.stop(); -})(); diff --git a/jstests/sharding/rename.js b/jstests/sharding/rename.js index 5e44f76079609..0ab80aec657d7 100644 --- a/jstests/sharding/rename.js +++ b/jstests/sharding/rename.js @@ -1,13 +1,7 @@ -// The following checks involve talking to a shard node, which in this test is shutdown. -TestData.skipCheckingUUIDsConsistentAcrossCluster = true; -TestData.skipCheckShardFilteringMetadata = true; - (function() { 'use strict'; -load("jstests/replsets/rslib.js"); - -var s = new ShardingTest({shards: 2, mongos: 1, rs: {oplogSize: 10}}); +var s = new ShardingTest({}); var db = s.getDB("test"); assert.commandWorked(db.foo.insert({_id: 1})); @@ -38,33 +32,34 @@ assert.commandWorked( assert.commandFailed(db.bar.renameCollection('shardedColl')); // Renaming unsharded collection to a different db with different primary shard. -db.unSharded.insert({x: 1}); +let unshardedColl = db['unSharded']; + +unshardedColl.insert({x: 1}); assert.commandFailedWithCode( - db.adminCommand({renameCollection: 'test.unSharded', to: 'otherDBDifferentPrimary.foo'}), + db.adminCommand( + {renameCollection: unshardedColl.getFullName(), to: 'otherDBDifferentPrimary.foo'}), [ErrorCodes.CommandFailed], "Source and destination collections must be on the same database."); // Renaming unsharded collection to a different db with same primary shard. assert.commandWorked( - db.adminCommand({renameCollection: 'test.unSharded', to: 'otherDBSamePrimary.foo'})); -assert.eq(0, db.unsharded.countDocuments({})); + db.adminCommand({renameCollection: unshardedColl.getFullName(), to: 'otherDBSamePrimary.foo'})); +assert.eq(0, unshardedColl.countDocuments({})); assert.eq(1, s.getDB('otherDBSamePrimary').foo.countDocuments({})); -const testDB = s.rs0.getPrimary().getDB('test'); -const fcvDoc = testDB.adminCommand({getParameter: 1, featureCompatibilityVersion: 1}); jsTest.log("Testing that rename operations involving views are not allowed"); { assert.commandWorked(db.collForView.insert({_id: 1})); assert.commandWorked(db.createView('view', 'collForView', [])); - let toAView = db.unsharded.renameCollection('view', true /* dropTarget */); + unshardedColl.insert({x: 1}); + let toAView = unshardedColl.renameCollection('view', true /* dropTarget */); assert.commandFailedWithCode( toAView, [ ErrorCodes.NamespaceExists, - ErrorCodes.CommandNotSupportedOnView, // TODO SERVER-68084 remove this error code - ErrorCodes.NamespaceNotFound // TODO SERVER-68084 remove this error code + ErrorCodes.CommandNotSupportedOnView, // TODO SERVER-78217 remove this error code ], "renameCollection should fail with NamespaceExists when the target is view"); @@ -73,7 +68,6 @@ jsTest.log("Testing that rename operations involving views are not allowed"); fromAView, [ ErrorCodes.CommandNotSupportedOnView, - ErrorCodes.NamespaceNotFound // TODO SERVER-68084 remove this error code ], "renameCollection should fail with CommandNotSupportedOnView when renaming a view"); } @@ -90,7 +84,7 @@ jsTest.log("Testing that rename operations involving views are not allowed"); assert.eq(1, sameColl.countDocuments({}), "Rename a collection to itself must not loose data"); } -if (MongoRunner.compareBinVersions(fcvDoc.featureCompatibilityVersion.version, '6.1') >= 0) { +{ // Create collection on non-primary shard (shard1 for test db) to simulate wrong creation via // direct connection: collection rename should fail since `badcollection` uuids are inconsistent // across shards @@ -102,6 +96,7 @@ if (MongoRunner.compareBinVersions(fcvDoc.featureCompatibilityVersion.version, ' s.s0.getDB('test').badcollection.renameCollection('goodcollection'), [ErrorCodes.InvalidUUID], "collection rename should fail since test.badcollection uuids are inconsistent across shards"); + s.shard1.getDB('test').badcollection.drop(); // Target collection existing on non-primary shard: rename with `dropTarget=false` must fail jsTest.log( @@ -117,24 +112,6 @@ if (MongoRunner.compareBinVersions(fcvDoc.featureCompatibilityVersion.version, ' assert.commandWorked( s.s0.getDB('test').goodcollection.renameCollection('superbadcollection', true)); } -// Ensure write concern works by shutting down 1 node in a replica set shard -jsTest.log("Testing write concern (2)"); - -var replTest = s.rs0; - -// Kill any node. Don't care if it's a primary or secondary. -replTest.stop(0); - -// Call getPrimary() to populate replTest._secondaries. -replTest.getPrimary(); -let liveSecondaries = replTest.getSecondaries().filter(function(node) { - return node.host !== replTest.nodes[0].host; -}); -replTest.awaitSecondaryNodes(null, liveSecondaries); -awaitRSClientHosts(s.s, replTest.getPrimary(), {ok: true, ismaster: true}, replTest.name); - -assert.commandWorked(db.foo.insert({_id: 4})); -assert.commandWorked(db.foo.renameCollection('bar', true)); s.stop(); })(); diff --git a/jstests/sharding/rename_sharded.js b/jstests/sharding/rename_sharded.js index a566369af9df8..3f60c11a17877 100644 --- a/jstests/sharding/rename_sharded.js +++ b/jstests/sharding/rename_sharded.js @@ -202,7 +202,7 @@ const mongos = st.s0; toTags.forEach(deleteDifferentTagFields); // Compare field by field because keys can potentially be in different order - for (field in Object.keys(fromTags[0])) { + for (let field in Object.keys(fromTags[0])) { assert.eq(fromTags[0][field], toTags[0][field], "Expected source tags to be passed to target collection"); diff --git a/jstests/sharding/rename_write_concern.js b/jstests/sharding/rename_write_concern.js new file mode 100644 index 0000000000000..d09f5cba5e4ec --- /dev/null +++ b/jstests/sharding/rename_write_concern.js @@ -0,0 +1,34 @@ +(function() { +'use strict'; + +// The following checks involve talking to a shard node, which in this test is shutdown. +TestData.skipCheckingUUIDsConsistentAcrossCluster = true; +TestData.skipCheckShardFilteringMetadata = true; +TestData.skipCheckRoutingTableConsistency = true; +TestData.skipCheckMetadataConsistency = true; + +load("jstests/replsets/rslib.js"); + +jsTest.log("Testing write concern (2)"); + +let st = new ShardingTest({rs: {nodes: 3}}); +let db = st.getDB("test"); + +var replTest = st.rs0; + +// Kill any node. Don't care if it's a primary or secondary. +replTest.remove(0); + +// Call getPrimary() to populate replTest._secondaries. +replTest.getPrimary(); +let liveSecondaries = replTest.getSecondaries().filter(function(node) { + return node.host !== replTest.nodes[0].host; +}); +replTest.awaitSecondaryNodes(null, liveSecondaries); +awaitRSClientHosts(st.s, replTest.getPrimary(), {ok: true, ismaster: true}, replTest.name); + +assert.commandWorked(db.foo.insert({_id: 4})); +assert.commandWorked(db.foo.renameCollection('bar', true)); + +st.stop(); +})(); diff --git a/jstests/sharding/replication_with_undefined_shard_key.js b/jstests/sharding/replication_with_undefined_shard_key.js index a363ff06b9e04..600887b33f88d 100644 --- a/jstests/sharding/replication_with_undefined_shard_key.js +++ b/jstests/sharding/replication_with_undefined_shard_key.js @@ -27,4 +27,4 @@ assert.commandWorked( mongosColl.remove({}, {writeConcern: {w: 2, wtimeout: ReplSetTest.kDefaultTimeoutMS}})); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/replmonitor_bad_seed.js b/jstests/sharding/replmonitor_bad_seed.js index e99dcab248f9b..113a2fa8744b0 100644 --- a/jstests/sharding/replmonitor_bad_seed.js +++ b/jstests/sharding/replmonitor_bad_seed.js @@ -23,7 +23,7 @@ load("jstests/replsets/rslib.js"); var st, replTest; -if (TestData.catalogShard) { +if (TestData.configShard) { // Use a second shard so we don't shut down the config server. st = new ShardingTest({shards: 2, rs: {oplogSize: 10}}); replTest = st.rs1; diff --git a/jstests/sharding/reshard_collection_basic.js b/jstests/sharding/reshard_collection_basic.js index c235243264ae2..d1f2b874d455d 100644 --- a/jstests/sharding/reshard_collection_basic.js +++ b/jstests/sharding/reshard_collection_basic.js @@ -6,9 +6,9 @@ // load("jstests/libs/fail_point_util.js"); -load("jstests/libs/uuid_util.js"); load("jstests/sharding/libs/find_chunks_util.js"); load("jstests/libs/discover_topology.js"); +load("jstests/sharding/libs/reshard_collection_util.js"); (function() { 'use strict'; @@ -18,8 +18,9 @@ const kDbName = 'db'; const collName = 'foo'; const ns = kDbName + '.' + collName; const mongos = st.s0; -const mongosConfig = mongos.getDB('config'); const kNumInitialDocs = 500; +const reshardCmdTest = + new ReshardCollectionCmdTest({st, dbName: kDbName, collName, numInitialDocs: kNumInitialDocs}); const criticalSectionTimeoutMS = 24 * 60 * 60 * 1000; /* 1 day */ const topology = DiscoverTopology.findConnectedNodes(mongos); @@ -27,170 +28,6 @@ const coordinator = new Mongo(topology.configsvr.nodes[0]); assert.commandWorked(coordinator.getDB("admin").adminCommand( {setParameter: 1, reshardingCriticalSectionTimeoutMillis: criticalSectionTimeoutMS})); -let shardToRSMap = {}; -shardToRSMap[st.shard0.shardName] = st.rs0; -shardToRSMap[st.shard1.shardName] = st.rs1; - -let shardIdToShardMap = {}; -shardIdToShardMap[st.shard0.shardName] = st.shard0; -shardIdToShardMap[st.shard1.shardName] = st.shard1; - -let getUUIDFromCollectionInfo = (dbName, collName, collInfo) => { - if (collInfo) { - return extractUUIDFromObject(collInfo.info.uuid); - } - - const uuidObject = getUUIDFromListCollections(mongos.getDB(dbName), collName); - return extractUUIDFromObject(uuidObject); -}; - -let constructTemporaryReshardingCollName = (dbName, collName, collInfo) => { - const existingUUID = getUUIDFromCollectionInfo(dbName, collName, collInfo); - return 'system.resharding.' + existingUUID; -}; - -let getAllShardIdsFromExpectedChunks = (expectedChunks) => { - let shardIds = new Set(); - expectedChunks.forEach(chunk => { - shardIds.add(chunk.recipientShardId); - }); - return shardIds; -}; - -let verifyChunksMatchExpected = (numExpectedChunks, presetExpectedChunks) => { - let collEntry = mongos.getDB('config').getCollection('collections').findOne({_id: ns}); - let chunkQuery = {uuid: collEntry.uuid}; - - const reshardedChunks = mongosConfig.chunks.find(chunkQuery).toArray(); - - if (presetExpectedChunks) { - presetExpectedChunks.sort(); - } - - reshardedChunks.sort(); - assert.eq(numExpectedChunks, reshardedChunks.length, tojson(reshardedChunks)); - - let shardChunkCounts = {}; - let incChunkCount = key => { - if (shardChunkCounts.hasOwnProperty(key)) { - shardChunkCounts[key]++; - } else { - shardChunkCounts[key] = 1; - } - }; - - for (let i = 0; i < numExpectedChunks; i++) { - incChunkCount(reshardedChunks[i].shard); - - // match exact chunk boundaries for presetExpectedChunks - if (presetExpectedChunks) { - assert.eq(presetExpectedChunks[i].recipientShardId, reshardedChunks[i].shard); - assert.eq(presetExpectedChunks[i].min, reshardedChunks[i].min); - assert.eq(presetExpectedChunks[i].max, reshardedChunks[i].max); - } - } - - // if presetChunks not specified, we only assert that chunks counts are balanced across shards - if (!presetExpectedChunks) { - let maxDiff = 0; - let shards = Object.keys(shardChunkCounts); - - shards.forEach(shard1 => { - shards.forEach(shard2 => { - let diff = Math.abs(shardChunkCounts[shard1] - shardChunkCounts[shard2]); - maxDiff = (diff > maxDiff) ? diff : maxDiff; - }); - }); - - assert.lte(maxDiff, 1, tojson(reshardedChunks)); - } -}; - -let verifyCollectionExistenceForConn = (collName, expectedToExist, conn) => { - const doesExist = Boolean(conn.getDB(kDbName)[collName].exists()); - assert.eq(doesExist, expectedToExist); -}; - -let verifyTemporaryReshardingCollectionExistsWithCorrectOptions = (expectedRecipientShards) => { - const originalCollInfo = mongos.getDB(kDbName).getCollectionInfos({name: collName})[0]; - assert.neq(originalCollInfo, undefined); - - const tempReshardingCollName = - constructTemporaryReshardingCollName(kDbName, collName, originalCollInfo); - verifyCollectionExistenceForConn(tempReshardingCollName, false, mongos); - - expectedRecipientShards.forEach(shardId => { - const rsPrimary = shardToRSMap[shardId].getPrimary(); - verifyCollectionExistenceForConn(collName, true, rsPrimary); - verifyCollectionExistenceForConn(tempReshardingCollName, false, rsPrimary); - ShardedIndexUtil.assertIndexExistsOnShard( - shardIdToShardMap[shardId], kDbName, collName, {newKey: 1}); - }); -}; - -let verifyAllShardingCollectionsRemoved = (tempReshardingCollName) => { - assert.eq(0, mongos.getDB(kDbName)[tempReshardingCollName].find().itcount()); - assert.eq(0, mongosConfig.reshardingOperations.find({ns}).itcount()); - assert.eq(0, mongosConfig.collections.find({reshardingFields: {$exists: true}}).itcount()); - assert.eq( - 0, - st.rs0.getPrimary().getDB('config').localReshardingOperations.donor.find({ns}).itcount()); - assert.eq(0, - st.rs0.getPrimary() - .getDB('config') - .localReshardingOperations.recipient.find({ns}) - .itcount()); - assert.eq( - 0, - st.rs1.getPrimary().getDB('config').localReshardingOperations.donor.find({ns}).itcount()); - assert.eq(0, - st.rs1.getPrimary() - .getDB('config') - .localReshardingOperations.recipient.find({ns}) - .itcount()); -}; - -let assertReshardCollOkWithPreset = (commandObj, presetReshardedChunks) => { - assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {oldKey: 1}})); - - let bulk = mongos.getDB(kDbName).getCollection(collName).initializeOrderedBulkOp(); - for (let x = 0; x < kNumInitialDocs; x++) { - bulk.insert({oldKey: x, newKey: kNumInitialDocs - x}); - } - assert.commandWorked(bulk.execute()); - - commandObj._presetReshardedChunks = presetReshardedChunks; - const tempReshardingCollName = constructTemporaryReshardingCollName(kDbName, collName); - - assert.commandWorked(mongos.adminCommand(commandObj)); - - verifyTemporaryReshardingCollectionExistsWithCorrectOptions( - getAllShardIdsFromExpectedChunks(presetReshardedChunks)); - verifyChunksMatchExpected(presetReshardedChunks.length, presetReshardedChunks); - - mongos.getDB(kDbName)[collName].drop(); - verifyAllShardingCollectionsRemoved(tempReshardingCollName); -}; - -let assertReshardCollOk = (commandObj, expectedChunks) => { - assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {oldKey: 1}})); - - let bulk = mongos.getDB(kDbName).getCollection(collName).initializeOrderedBulkOp(); - for (let x = 0; x < kNumInitialDocs; x++) { - bulk.insert({oldKey: x, newKey: kNumInitialDocs - x}); - } - assert.commandWorked(bulk.execute()); - - const tempReshardingCollName = constructTemporaryReshardingCollName(kDbName, collName); - - assert.commandWorked(mongos.adminCommand(commandObj)); - - verifyChunksMatchExpected(expectedChunks); - - mongos.getDB(kDbName)[collName].drop(); - verifyAllShardingCollectionsRemoved(tempReshardingCollName); -}; - let presetReshardedChunks = [{recipientShardId: st.shard1.shardName, min: {newKey: MinKey}, max: {newKey: MaxKey}}]; @@ -280,17 +117,19 @@ assert.commandFailedWithCode(mongos.getDB('test').system.resharding.mycoll.inser mongos.getDB(kDbName)[collName].drop(); jsTest.log("Succeed when correct locale is provided."); -assertReshardCollOk({reshardCollection: ns, key: {newKey: 1}, collation: {locale: 'simple'}}, 1); +reshardCmdTest.assertReshardCollOk( + {reshardCollection: ns, key: {newKey: 1}, collation: {locale: 'simple'}}, 1); jsTest.log("Succeed base case."); -assertReshardCollOk({reshardCollection: ns, key: {newKey: 1}}, 1); +reshardCmdTest.assertReshardCollOk({reshardCollection: ns, key: {newKey: 1}}, 1); jsTest.log("Succeed if unique is specified and is false."); -assertReshardCollOk({reshardCollection: ns, key: {newKey: 1}, unique: false}, 1); +reshardCmdTest.assertReshardCollOk({reshardCollection: ns, key: {newKey: 1}, unique: false}, 1); jsTest.log( "Succeed if _presetReshardedChunks is provided and test commands are enabled (default)."); -assertReshardCollOkWithPreset({reshardCollection: ns, key: {newKey: 1}}, presetReshardedChunks); +reshardCmdTest.assertReshardCollOkWithPreset({reshardCollection: ns, key: {newKey: 1}}, + presetReshardedChunks); presetReshardedChunks = [ {recipientShardId: st.shard0.shardName, min: {newKey: MinKey}, max: {newKey: 0}}, @@ -298,19 +137,19 @@ presetReshardedChunks = [ ]; jsTest.log("Succeed if all optional fields and numInitialChunks are provided with correct values."); -assertReshardCollOk({ +reshardCmdTest.assertReshardCollOk({ reshardCollection: ns, key: {newKey: 1}, unique: false, collation: {locale: 'simple'}, numInitialChunks: 2, }, - 2); + 2); jsTest.log( "Succeed if all optional fields and _presetReshardedChunks are provided with correct values" + " and test commands are enabled (default)."); -assertReshardCollOkWithPreset( +reshardCmdTest.assertReshardCollOkWithPreset( {reshardCollection: ns, key: {newKey: 1}, unique: false, collation: {locale: 'simple'}}, presetReshardedChunks); @@ -318,17 +157,17 @@ jsTest.log("Succeed if the zone provided is assigned to a shard but not a range " collection."); const newZoneName = 'x2'; assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: newZoneName})); -assertReshardCollOk({ +reshardCmdTest.assertReshardCollOk({ reshardCollection: ns, key: {newKey: 1}, unique: false, collation: {locale: 'simple'}, zones: [{zone: newZoneName, min: {newKey: 5}, max: {newKey: 10}}] }, - 3); + 3); jsTest.log("Succeed if resulting chunks all end up in one shard."); -assertReshardCollOk({ +reshardCmdTest.assertReshardCollOk({ reshardCollection: ns, key: {newKey: 1}, unique: false, @@ -336,10 +175,10 @@ assertReshardCollOk({ collation: {locale: 'simple'}, zones: [{zone: newZoneName, min: {newKey: MinKey}, max: {newKey: MaxKey}}] }, - 1); + 1); jsTest.log("Succeed if zones are empty"); -assertReshardCollOk({ +reshardCmdTest.assertReshardCollOk({ reshardCollection: ns, key: {newKey: 1}, unique: false, @@ -347,7 +186,21 @@ assertReshardCollOk({ collation: {locale: 'simple'}, zones: [] }, - 1); + 1); + +jsTest.log("Succeed if zones are not empty."); +assert.commandWorked( + mongos.adminCommand({addShardToZone: st.shard1.shardName, zone: existingZoneName})); +assert.commandWorked(st.s.adminCommand( + {updateZoneKeyRange: ns, min: {oldKey: 0}, max: {oldKey: 5}, zone: existingZoneName})); +reshardCmdTest.assertReshardCollOk({ + reshardCollection: ns, + key: {oldKey: 1, newKey: 1}, + unique: false, + collation: {locale: 'simple'}, + zones: [{zone: existingZoneName, min: {oldKey: 0}, max: {oldKey: 5}}] +}, + 3); jsTest.log("Succeed with hashed shard key that provides enough cardinality."); assert.commandWorked( diff --git a/jstests/sharding/reshard_collection_resharding_improvements_basic.js b/jstests/sharding/reshard_collection_resharding_improvements_basic.js new file mode 100644 index 0000000000000..133e2bdfdc5af --- /dev/null +++ b/jstests/sharding/reshard_collection_resharding_improvements_basic.js @@ -0,0 +1,255 @@ +/** + * Tests for basic functionality of the resharding improvements feature. + * + * @tags: [ + * requires_fcv_71, + * featureFlagReshardingImprovements + * ] + */ + +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; +load("jstests/sharding/libs/reshard_collection_util.js"); + +const st = new ShardingTest({mongos: 1, shards: 2}); +const kDbName = 'db'; +const collName = 'foo'; +const ns = kDbName + '.' + collName; +const mongos = st.s0; +const kNumInitialDocs = 500; +const reshardCmdTest = + new ReshardCollectionCmdTest({st, dbName: kDbName, collName, numInitialDocs: kNumInitialDocs}); + +const testShardDistribution = (mongos) => { + if (!FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements")) { + jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled"); + return; + } + + /** + * Failure cases. + */ + assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {oldKey: 1}})); + + jsTest.log("reshardCollection cmd should fail when shardDistribution has duplicate shardId."); + assert.commandFailedWithCode(mongos.adminCommand({ + reshardCollection: ns, + key: {newKey: 1}, + shardDistribution: [ + {shard: st.shard0.shardName, min: {newKey: MinKey}}, + {shard: st.shard0.shardName, max: {newKey: MaxKey}} + ] + }), + ErrorCodes.InvalidOptions); + + jsTest.log("reshardCollection cmd should fail when shardDistribution is missing min or max."); + assert.commandFailedWithCode(mongos.adminCommand({ + reshardCollection: ns, + key: {newKey: 1}, + shardDistribution: [ + {shard: st.shard0.shardName, min: {newKey: MinKey}}, + {shard: st.shard1.shardName, max: {newKey: MaxKey}} + ] + }), + ErrorCodes.InvalidOptions); + + jsTest.log( + "reshardCollection cmd should fail when shardDistribution is not specified using the shard key."); + assert.commandFailedWithCode(mongos.adminCommand({ + reshardCollection: ns, + key: {newKey: 1}, + shardDistribution: [ + {shard: st.shard0.shardName, min: {oldKey: MinKey}, max: {oldKey: 0}}, + {shard: st.shard1.shardName, min: {oldKey: 0}, max: {oldKey: MaxKey}} + ] + }), + ErrorCodes.InvalidOptions); + + jsTest.log( + "reshardCollection cmd should fail when one shard specifies min/max and the other does not."); + assert.commandFailedWithCode(mongos.adminCommand({ + reshardCollection: ns, + key: {newKey: 1}, + shardDistribution: [ + {shard: st.shard0.shardName}, + {shard: st.shard1.shardName, min: {newKey: MinKey}, max: {newKey: MaxKey}} + ] + }), + ErrorCodes.InvalidOptions); + + jsTest.log( + "reshardCollection cmd should fail when shardDistribution is not starting with globalMin."); + assert.commandFailedWithCode(mongos.adminCommand({ + reshardCollection: ns, + key: {newKey: 1}, + shardDistribution: [ + {shard: st.shard0.shardName, min: {newKey: -1}, max: {newKey: 0}}, + {shard: st.shard1.shardName, min: {newKey: 0}, max: {newKey: MaxKey}} + ] + }), + ErrorCodes.InvalidOptions); + + jsTest.log("reshardCollection cmd should fail when shardDistribution is not continuous."); + assert.commandFailedWithCode(mongos.adminCommand({ + reshardCollection: ns, + key: {newKey: 1}, + shardDistribution: [ + {shard: st.shard0.shardName, min: {newKey: MinKey}, max: {newKey: -1}}, + {shard: st.shard1.shardName, min: {newKey: 0}, max: {newKey: MaxKey}} + ] + }), + ErrorCodes.InvalidOptions); + + jsTest.log( + "reshardCollection cmd should fail when the shardId in shardDistribution is not recognized."); + assert.commandFailedWithCode(mongos.adminCommand({ + reshardCollection: ns, + key: {newKey: 1}, + shardDistribution: [ + {shard: "s1", min: {newKey: MinKey}, max: {newKey: 0}}, + {shard: "s2", min: {newKey: 0}, max: {newKey: MaxKey}} + ] + }), + ErrorCodes.ShardNotFound); + mongos.getDB(kDbName)[collName].drop(); + + /** + * Success cases go below. + */ + jsTest.log("reshardCollection cmd should succeed with shardDistribution parameter."); + reshardCmdTest.assertReshardCollOk({ + reshardCollection: ns, + key: {newKey: 1}, + numInitialChunks: 2, + shardDistribution: [{shard: st.shard0.shardName}, {shard: st.shard1.shardName}] + }, + 2); + reshardCmdTest.assertReshardCollOk( + { + reshardCollection: ns, + key: {newKey: 1}, + shardDistribution: [ + {shard: st.shard0.shardName, min: {newKey: MinKey}, max: {newKey: 0}}, + {shard: st.shard1.shardName, min: {newKey: 0}, max: {newKey: MaxKey}} + ] + }, + 2, + [ + {recipientShardId: st.shard0.shardName, min: {newKey: MinKey}, max: {newKey: 0}}, + {recipientShardId: st.shard1.shardName, min: {newKey: 0}, max: {newKey: MaxKey}} + ]); +}; + +const testForceRedistribution = (mongos) => { + if (!FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements")) { + jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled"); + return; + } + + jsTest.log( + "When forceRedistribution is not set to true, same-key resharding should have no effect"); + reshardCmdTest.assertReshardCollOk( + {reshardCollection: ns, key: {oldKey: 1}, numInitialChunks: 2}, 1); + reshardCmdTest.assertReshardCollOk( + {reshardCollection: ns, key: {oldKey: 1}, numInitialChunks: 2, forceRedistribution: false}, + 1); + + jsTest.log("When forceRedistribution is true, same-key resharding should take effect"); + reshardCmdTest.assertReshardCollOk( + {reshardCollection: ns, key: {oldKey: 1}, numInitialChunks: 2, forceRedistribution: true}, + 2); + + // Create a sharded collection with 2 zones, then force same-key resharding without specifying + // zones and the resharding should use existing 2 zones + jsTest.log("When zones is not provided, use existing zones on the collection"); + const additionalSetup = function(test) { + const st = test._st; + const ns = test._ns; + const zoneName1 = 'z1'; + const zoneName2 = 'z2'; + assert.commandWorked( + st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: zoneName1})); + assert.commandWorked( + st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: zoneName2})); + assert.commandWorked( + st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: zoneName2})); + assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {oldKey: 1}})); + assert.commandWorked(st.s.adminCommand( + {updateZoneKeyRange: ns, min: {oldKey: MinKey}, max: {oldKey: 0}, zone: zoneName1})); + assert.commandWorked(st.s.adminCommand( + {updateZoneKeyRange: ns, min: {oldKey: 0}, max: {oldKey: MaxKey}, zone: zoneName2})); + }; + + reshardCmdTest.assertReshardCollOk( + { + reshardCollection: ns, + key: {oldKey: 1}, + forceRedistribution: true, + shardDistribution: [ + {shard: st.shard0.shardName, min: {oldKey: MinKey}, max: {oldKey: -1}}, + {shard: st.shard0.shardName, min: {oldKey: -1}, max: {oldKey: 1}}, + {shard: st.shard1.shardName, min: {oldKey: 1}, max: {oldKey: MaxKey}} + ] + }, + 4, + [ + {recipientShardId: st.shard0.shardName, min: {oldKey: MinKey}, max: {oldKey: -1}}, + {recipientShardId: st.shard0.shardName, min: {oldKey: -1}, max: {oldKey: 0}}, + {recipientShardId: st.shard0.shardName, min: {oldKey: 0}, max: {oldKey: 1}}, + {recipientShardId: st.shard1.shardName, min: {oldKey: 1}, max: {oldKey: MaxKey}} + ], + [ + {zone: "z1", min: {oldKey: MinKey}, max: {oldKey: 0}}, + {zone: "z2", min: {oldKey: 0}, max: {oldKey: MaxKey}} + ], + additionalSetup); + jsTest.log("When empty zones is provided, should discard the existing zones."); + reshardCmdTest.assertReshardCollOk( + { + reshardCollection: ns, + key: {oldKey: 1}, + forceRedistribution: true, + zones: [], + shardDistribution: [ + {shard: st.shard0.shardName, min: {oldKey: MinKey}, max: {oldKey: -1}}, + {shard: st.shard0.shardName, min: {oldKey: -1}, max: {oldKey: 1}}, + {shard: st.shard1.shardName, min: {oldKey: 1}, max: {oldKey: MaxKey}} + ] + }, + 3, + [ + {recipientShardId: st.shard0.shardName, min: {oldKey: MinKey}, max: {oldKey: -1}}, + {recipientShardId: st.shard0.shardName, min: {oldKey: -1}, max: {oldKey: 1}}, + {recipientShardId: st.shard1.shardName, min: {oldKey: 1}, max: {oldKey: MaxKey}} + ], + [], + additionalSetup); +}; + +const testReshardingWithIndex = (mongos) => { + if (!FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements")) { + jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled"); + return; + } + + jsTest.log( + "When there is no index on the new shard-key, we should create one during resharding."); + + const additionalSetup = function(test) { + assert.commandWorked( + test._mongos.getDB(test._dbName).getCollection(test._collName).createIndex({ + oldKey: 1 + })); + }; + + reshardCmdTest.assertReshardCollOk( + {reshardCollection: ns, key: {newKey: 1}, numInitialChunks: 2}, + 2, + undefined, + undefined, + additionalSetup); +}; + +testShardDistribution(mongos); +testForceRedistribution(mongos); +testReshardingWithIndex(mongos); +st.stop(); diff --git a/jstests/sharding/reshard_collection_resharding_improvements_recovery.js b/jstests/sharding/reshard_collection_resharding_improvements_recovery.js new file mode 100644 index 0000000000000..25f412b097094 --- /dev/null +++ b/jstests/sharding/reshard_collection_resharding_improvements_recovery.js @@ -0,0 +1,68 @@ +/** + * Tests that when resharding on a shard key not covered by a secondary index, the new shard-key + * index can be successfully created if the recipient shard is restarted during building-index + * stage. + * + * @tags: [ + * requires_fcv_71, + * featureFlagReshardingImprovements + * ] + */ + +load("jstests/libs/discover_topology.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; +load("jstests/sharding/libs/resharding_test_fixture.js"); + +const reshardingTest = new ReshardingTest({numDonors: 2, enableElections: true}); +reshardingTest.setup(); + +const donorShardNames = reshardingTest.donorShardNames; +const sourceCollection = reshardingTest.createShardedCollection({ + ns: "reshardingDb.coll", + shardKeyPattern: {oldKey: 1}, + chunks: [ + {min: {oldKey: MinKey}, max: {oldKey: 0}, shard: donorShardNames[0]}, + {min: {oldKey: 0}, max: {oldKey: MaxKey}, shard: donorShardNames[1]}, + ], +}); +const mongos = sourceCollection.getMongo(); +const topology = DiscoverTopology.findConnectedNodes(mongos); + +const recipientShardNames = reshardingTest.recipientShardNames; +const recipient = new Mongo(topology.shards[recipientShardNames[0]].primary); + +if (!FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements")) { + jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled"); + reshardingTest.teardown(); + quit(); +} + +const reshardingPauseRecipientBeforeBuildingIndexFailpoint = + configureFailPoint(recipient, "reshardingPauseRecipientBeforeBuildingIndex"); + +reshardingTest.withReshardingInBackground( + { + newShardKeyPattern: {newKey: 1}, + newChunks: [{min: {newKey: MinKey}, max: {newKey: MaxKey}, shard: recipientShardNames[0]}], + }, + () => { + // Wait until participants are aware of the resharding operation. + reshardingTest.awaitCloneTimestampChosen(); + reshardingPauseRecipientBeforeBuildingIndexFailpoint.wait(); + + reshardingTest.killAndRestartPrimaryOnShard(recipientShardNames[0]); + }, + { + afterReshardingFn: () => { + const indexes = mongos.getDB("reshardingDb").getCollection("coll").getIndexes(); + let haveNewShardKeyIndex = false; + indexes.forEach(index => { + if ("newKey" in index["key"]) { + haveNewShardKeyIndex = true; + } + }); + assert.eq(haveNewShardKeyIndex, true); + } + }); + +reshardingTest.teardown(); \ No newline at end of file diff --git a/jstests/sharding/reshard_collection_retry_after_failover.js b/jstests/sharding/reshard_collection_retry_after_failover.js new file mode 100644 index 0000000000000..dd99904450bc3 --- /dev/null +++ b/jstests/sharding/reshard_collection_retry_after_failover.js @@ -0,0 +1,173 @@ +/** + * Tests that if a reshardCollection command with a user-provided reshardingUUID is completed, + * then after failover the result is available to retries. + * + * @tags: [ + * uses_atclustertime, + * featureFlagReshardingImprovements, + * ] + */ +load("jstests/libs/discover_topology.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; +load("jstests/libs/parallelTester.js"); +load("jstests/sharding/libs/resharding_test_fixture.js"); + +const enterAbortFailpointName = "reshardingPauseCoordinatorBeforeStartingErrorFlow"; +const originalReshardingUUID = UUID(); +const newReshardingUUID = UUID(); + +const getTempUUID = (tempNs) => { + const tempCollection = mongos.getCollection(tempNs); + return getUUIDFromConfigCollections(mongos, tempCollection.getFullName()); +}; + +const reshardingTest = new ReshardingTest({numDonors: 1, minimumOperationDurationMS: 0}); +reshardingTest.setup(); +const donorShardNames = reshardingTest.donorShardNames; +const recipientShardNames = reshardingTest.recipientShardNames; +const sourceCollection = reshardingTest.createShardedCollection({ + ns: "reshardingDb.coll", + shardKeyPattern: {oldKey: 1}, + chunks: [{min: {oldKey: MinKey}, max: {oldKey: MaxKey}, shard: donorShardNames[0]}], +}); + +const mongos = sourceCollection.getMongo(); +let topology = DiscoverTopology.findConnectedNodes(mongos); +let configsvr = new Mongo(topology.configsvr.primary); + +if (!FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements")) { + jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled"); + reshardingTest.teardown(); + quit(); +} + +let pauseBeforeCloningFP = configureFailPoint(configsvr, "reshardingPauseCoordinatorBeforeCloning"); + +// Fulfilled once the first reshardCollection command creates the temporary collection. +let expectedUUIDAfterReshardingCompletes = undefined; + +const generateAbortThread = (mongosConnString, ns) => { + return new Thread((mongosConnString, ns) => { + const mongos = new Mongo(mongosConnString); + assert.commandWorked(mongos.adminCommand({abortReshardCollection: ns})); + }, mongosConnString, ns); +}; + +let abortThread = generateAbortThread(mongos.host, sourceCollection.getFullName()); + +jsTestLog("Attempting a resharding that will abort, with UUID: " + originalReshardingUUID); +reshardingTest.withReshardingInBackground( + { + newShardKeyPattern: {newKey: 1}, + reshardingUUID: originalReshardingUUID, + newChunks: [{min: {newKey: MinKey}, max: {newKey: MaxKey}, shard: recipientShardNames[0]}], + }, + (tempNs) => { + pauseBeforeCloningFP.wait(); + + const enterAbortFailpoint = configureFailPoint(configsvr, enterAbortFailpointName); + abortThread.start(); + enterAbortFailpoint.wait(); + enterAbortFailpoint.off(); + + pauseBeforeCloningFP.off(); + }, + { + expectedErrorCode: ErrorCodes.ReshardCollectionAborted, + }); +abortThread.join(); + +// Confirm the collection UUID did not change. +let finalSourceCollectionUUID = + getUUIDFromListCollections(sourceCollection.getDB(), sourceCollection.getName()); +assert.eq(reshardingTest.sourceCollectionUUID, finalSourceCollectionUUID); + +jsTestLog("Retrying aborted resharding with UUID: " + originalReshardingUUID); +// A retry after the fact with the same UUID should not attempt to reshard the collection again, +// and also should return same error code. +assert.commandFailedWithCode(mongos.adminCommand({ + reshardCollection: sourceCollection.getFullName(), + key: {newKey: 1}, + _presetReshardedChunks: reshardingTest.presetReshardedChunks, + reshardingUUID: originalReshardingUUID +}), + ErrorCodes.ReshardCollectionAborted); +finalSourceCollectionUUID = + getUUIDFromListCollections(sourceCollection.getDB(), sourceCollection.getName()); +assert.eq(reshardingTest.sourceCollectionUUID, finalSourceCollectionUUID); + +// Makes sure the same thing happens after failover +reshardingTest.shutdownAndRestartPrimaryOnShard(reshardingTest.configShardName); +topology = DiscoverTopology.findConnectedNodes(mongos); +configsvr = new Mongo(topology.configsvr.primary); + +jsTestLog("After failover, retrying aborted resharding with UUID: " + originalReshardingUUID); +assert.commandFailedWithCode(mongos.adminCommand({ + reshardCollection: sourceCollection.getFullName(), + key: {newKey: 1}, + _presetReshardedChunks: reshardingTest.presetReshardedChunks, + reshardingUUID: originalReshardingUUID +}), + ErrorCodes.ReshardCollectionAborted); +finalSourceCollectionUUID = + getUUIDFromListCollections(sourceCollection.getDB(), sourceCollection.getName()); +assert.eq(reshardingTest.sourceCollectionUUID, finalSourceCollectionUUID); + +// Try it again but let it succeed this time. +jsTestLog("Trying resharding with new UUID: " + newReshardingUUID); +reshardingTest.retryOnceOnNetworkError(() => { + pauseBeforeCloningFP = configureFailPoint(configsvr, "reshardingPauseCoordinatorBeforeCloning"); +}); +reshardingTest.withReshardingInBackground({ + newShardKeyPattern: {newKey: 1}, + reshardingUUID: newReshardingUUID, + newChunks: [{min: {newKey: MinKey}, max: {newKey: MaxKey}, shard: recipientShardNames[0]}], +}, + (tempNs) => { + pauseBeforeCloningFP.wait(); + + // The UUID of the temporary resharding collection + // should become the UUID of the original collection + // once resharding has completed. + expectedUUIDAfterReshardingCompletes = + getTempUUID(tempNs); + + pauseBeforeCloningFP.off(); + }); + +// Resharding should have succeeded. +assert.neq(expectedUUIDAfterReshardingCompletes, undefined); +finalSourceCollectionUUID = + getUUIDFromListCollections(sourceCollection.getDB(), sourceCollection.getName()); +assert.eq(expectedUUIDAfterReshardingCompletes, finalSourceCollectionUUID); + +jsTestLog("After completion, retrying resharding with UUID: " + newReshardingUUID); +// A retry after the fact with the same UUID should not attempt to reshard the collection again, +// and should succeed. +assert.commandWorked(mongos.adminCommand({ + reshardCollection: sourceCollection.getFullName(), + key: {newKey: 1}, + _presetReshardedChunks: reshardingTest.presetReshardedChunks, + reshardingUUID: newReshardingUUID +})); +finalSourceCollectionUUID = + getUUIDFromListCollections(sourceCollection.getDB(), sourceCollection.getName()); +assert.eq(expectedUUIDAfterReshardingCompletes, finalSourceCollectionUUID); + +// Makes sure the same thing happens after failover +reshardingTest.shutdownAndRestartPrimaryOnShard(reshardingTest.configShardName); +topology = DiscoverTopology.findConnectedNodes(mongos); +configsvr = new Mongo(topology.configsvr.primary); + +jsTestLog("After completion and failover, retrying resharding with UUID: " + newReshardingUUID); +assert.commandWorked(mongos.adminCommand({ + reshardCollection: sourceCollection.getFullName(), + key: {newKey: 1}, + _presetReshardedChunks: reshardingTest.presetReshardedChunks, + reshardingUUID: newReshardingUUID +})); +finalSourceCollectionUUID = + getUUIDFromListCollections(sourceCollection.getDB(), sourceCollection.getName()); +assert.eq(expectedUUIDAfterReshardingCompletes, finalSourceCollectionUUID); + +reshardingTest.teardown(); diff --git a/jstests/sharding/reshard_collection_retryability_with_uuid.js b/jstests/sharding/reshard_collection_retryability_with_uuid.js new file mode 100644 index 0000000000000..f3de7fe4c844e --- /dev/null +++ b/jstests/sharding/reshard_collection_retryability_with_uuid.js @@ -0,0 +1,207 @@ +/** + * Tests that if a reshardCollection command is issued while there is an ongoing + * resharding operation for the same resharding UUID, the command joins with the ongoing + * resharding instance. But a reshardCollection command for the same collection with the + * same resharding key but a different UUID or no UUID should fail. Further, after the + * resharding operation completes, reshardCollection with the same UUID should receive the results + * even if forceRedistribution is true. + * + * @tags: [ + * uses_atclustertime, + * featureFlagReshardingImprovements, + * ] + */ +load("jstests/libs/discover_topology.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; +load("jstests/libs/parallelTester.js"); +load("jstests/sharding/libs/resharding_test_fixture.js"); + +const originalReshardingUUID = UUID(); + +// Generates a new thread to run subsequent reshardCollections. This command must be exactly the +// same as the original resharding command we're trying to retry. +const makeReshardCollectionThread = + (routerConnString, ns, presetReshardedChunks, reshardingUUID, forceRedistribution) => { + if (reshardingUUID) + reshardingUUID = reshardingUUID.toString(); + return new Thread( + (routerConnString, ns, presetReshardedChunks, reshardingUUID, forceRedistribution) => { + const s = new Mongo(routerConnString); + let command = { + reshardCollection: ns, + key: {newKey: 1}, + _presetReshardedChunks: presetReshardedChunks + }; + if (reshardingUUID !== undefined) { + reshardingUUID = eval(reshardingUUID); + command = Object.merge(command, {reshardingUUID: reshardingUUID}); + } + if (forceRedistribution !== undefined) { + command = Object.merge(command, {forceRedistribution: forceRedistribution}); + } + assert.commandWorked(s.adminCommand(command)); + }, + routerConnString, + ns, + presetReshardedChunks, + reshardingUUID, + forceRedistribution); + }; + +const getTempUUID = (tempNs) => { + const tempCollection = mongos.getCollection(tempNs); + return getUUIDFromConfigCollections(mongos, tempCollection.getFullName()); +}; + +const reshardingTest = new ReshardingTest({numDonors: 1, minimumOperationDurationMS: 0}); +reshardingTest.setup(); +const donorShardNames = reshardingTest.donorShardNames; +const recipientShardNames = reshardingTest.recipientShardNames; +const sourceCollection = reshardingTest.createShardedCollection({ + ns: "reshardingDb.coll", + shardKeyPattern: {oldKey: 1}, + chunks: [{min: {oldKey: MinKey}, max: {oldKey: MaxKey}, shard: donorShardNames[0]}], +}); + +const mongos = sourceCollection.getMongo(); +const topology = DiscoverTopology.findConnectedNodes(mongos); +const configsvr = new Mongo(topology.configsvr.nodes[0]); + +if (!FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements")) { + jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled"); + reshardingTest.teardown(); + quit(); +} + +const pauseBeforeCloningFP = + configureFailPoint(configsvr, "reshardingPauseCoordinatorBeforeCloning"); + +// Fulfilled once the first reshardCollection command creates the temporary collection. +let expectedUUIDAfterReshardingCompletes = undefined; + +let reshardCollectionThread; +reshardingTest.withReshardingInBackground( + { + newShardKeyPattern: {newKey: 1}, + reshardingUUID: originalReshardingUUID, + newChunks: [{min: {newKey: MinKey}, max: {newKey: MaxKey}, shard: recipientShardNames[0]}], + }, + (tempNs) => { + pauseBeforeCloningFP.wait(); + + // The UUID of the temporary resharding collection should become the UUID of the original + // collection once resharding has completed. + expectedUUIDAfterReshardingCompletes = getTempUUID(tempNs); + + reshardCollectionThread = makeReshardCollectionThread(mongos.host, + sourceCollection.getFullName(), + reshardingTest.presetReshardedChunks, + originalReshardingUUID); + + // Trying to reconnect using a different resharding UUID should not work. This + // tests the config server command directly because otherwise the + // ReshardCollectionCoordinator on the primary shard would reject the command. + assert.commandFailedWithCode(configsvr.adminCommand({ + _configsvrReshardCollection: sourceCollection.getFullName(), + reshardingUUID: UUID(), + key: {newKey: 1}, + writeConcern: {w: "majority"} + }), + ErrorCodes.ReshardCollectionInProgress); + + // Trying to reconnect using no resharding UUID should not work either. + assert.commandFailedWithCode(configsvr.adminCommand({ + _configsvrReshardCollection: sourceCollection.getFullName(), + key: {newKey: 1}, + writeConcern: {w: "majority"} + }), + ErrorCodes.ReshardCollectionInProgress); + + reshardCollectionThread.start(); + + pauseBeforeCloningFP.off(); + }); + +reshardCollectionThread.join(); + +// Confirm the UUID for the namespace that was resharded is the same as the temporary collection's +// UUID before the second reshardCollection command was issued. +assert.neq(expectedUUIDAfterReshardingCompletes, undefined); +let finalSourceCollectionUUID = + getUUIDFromListCollections(sourceCollection.getDB(), sourceCollection.getName()); +assert.eq(expectedUUIDAfterReshardingCompletes, finalSourceCollectionUUID); + +// A retry after the fact with the same UUID should not reshard the collection again. +assert.commandWorked(mongos.adminCommand({ + reshardCollection: sourceCollection.getFullName(), + key: {newKey: 1}, + _presetReshardedChunks: reshardingTest.presetReshardedChunks, + reshardingUUID: originalReshardingUUID +})); +finalSourceCollectionUUID = + getUUIDFromListCollections(sourceCollection.getDB(), sourceCollection.getName()); +assert.eq(expectedUUIDAfterReshardingCompletes, finalSourceCollectionUUID); + +// A retry after the fact with the same UUID and forceRedistribution should not reshard the +// collection again. +assert.commandWorked(mongos.adminCommand({ + reshardCollection: sourceCollection.getFullName(), + key: {newKey: 1}, + _presetReshardedChunks: reshardingTest.presetReshardedChunks, + reshardingUUID: originalReshardingUUID, + forceRedistribution: true +})); +finalSourceCollectionUUID = + getUUIDFromListCollections(sourceCollection.getDB(), sourceCollection.getName()); +assert.eq(expectedUUIDAfterReshardingCompletes, finalSourceCollectionUUID); + +// A retry after the fact with no UUID should not reshard the collection again (because the key +// is already the same). +assert.commandWorked(mongos.adminCommand({ + reshardCollection: sourceCollection.getFullName(), + key: {newKey: 1}, + _presetReshardedChunks: reshardingTest.presetReshardedChunks +})); +finalSourceCollectionUUID = + getUUIDFromListCollections(sourceCollection.getDB(), sourceCollection.getName()); +assert.eq(expectedUUIDAfterReshardingCompletes, finalSourceCollectionUUID); + +const newReshardingUUID = UUID(); +// A retry after the fact with a new UUID should not reshard the collection again (because +// forceRedistribution was not specified and the key has not changed) +assert.commandWorked(mongos.adminCommand({ + reshardCollection: sourceCollection.getFullName(), + key: {newKey: 1}, + _presetReshardedChunks: reshardingTest.presetReshardedChunks, + reshardingUUID: newReshardingUUID +})); +finalSourceCollectionUUID = + getUUIDFromListCollections(sourceCollection.getDB(), sourceCollection.getName()); +assert.eq(expectedUUIDAfterReshardingCompletes, finalSourceCollectionUUID); + +// A retry after the fact with a new UUID and forceRedistribution SHOULD reshard the collection +// again. +assert.commandWorked(mongos.adminCommand({ + reshardCollection: sourceCollection.getFullName(), + key: {newKey: 1}, + _presetReshardedChunks: reshardingTest.presetReshardedChunks, + reshardingUUID: newReshardingUUID, + forceRedistribution: true +})); +finalSourceCollectionUUID = + getUUIDFromListCollections(sourceCollection.getDB(), sourceCollection.getName()); +assert.neq(expectedUUIDAfterReshardingCompletes, finalSourceCollectionUUID); + +// A retry after the fact with no UUID and forceRedistribution SHOULD reshard the collection again. +let newSourceCollectionUUID = finalSourceCollectionUUID; +assert.commandWorked(mongos.adminCommand({ + reshardCollection: sourceCollection.getFullName(), + key: {newKey: 1}, + _presetReshardedChunks: reshardingTest.presetReshardedChunks, + forceRedistribution: true +})); +finalSourceCollectionUUID = + getUUIDFromListCollections(sourceCollection.getDB(), sourceCollection.getName()); +assert.neq(newSourceCollectionUUID, finalSourceCollectionUUID); + +reshardingTest.teardown(); diff --git a/jstests/sharding/resharding_build_index_metrics.js b/jstests/sharding/resharding_build_index_metrics.js new file mode 100644 index 0000000000000..82bb4a0acdf9e --- /dev/null +++ b/jstests/sharding/resharding_build_index_metrics.js @@ -0,0 +1,69 @@ +/** + * Tests that during resharding building index phase, we can see how many indexes to build and how + * many indexes are built. + * + * @tags: [ + * requires_fcv_71, + * featureFlagReshardingImprovements + * ] + */ +load("jstests/libs/discover_topology.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; +load("jstests/sharding/libs/resharding_test_fixture.js"); + +const reshardingTest = new ReshardingTest({numDonors: 2, enableElections: true}); +reshardingTest.setup(); + +const kDbName = 'reshardingDb'; +const kCollName = 'resharding_build_index_metrics'; +const ns = kDbName + '.' + kCollName; + +const donorShardNames = reshardingTest.donorShardNames; +const sourceCollection = reshardingTest.createShardedCollection({ + ns, + shardKeyPattern: {oldKey: 1}, + chunks: [ + {min: {oldKey: MinKey}, max: {oldKey: 0}, shard: donorShardNames[0]}, + {min: {oldKey: 0}, max: {oldKey: MaxKey}, shard: donorShardNames[1]}, + ], +}); + +const mongos = sourceCollection.getMongo(); +const topology = DiscoverTopology.findConnectedNodes(mongos); + +const recipientShardNames = reshardingTest.recipientShardNames; +const recipient = new Mongo(topology.shards[recipientShardNames[0]].primary); + +if (!FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements")) { + jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled"); + reshardingTest.teardown(); + quit(); +} + +// Create an index on oldKey. +assert.commandWorked( + mongos.getCollection(ns).insert([{oldKey: 1, newKey: -1}, {oldKey: 2, newKey: -2}])); +assert.commandWorked(mongos.getCollection(ns).createIndex({oldKey: 1})); +const hangAfterInitializingIndexBuildFailPoint = + configureFailPoint(recipient, "hangAfterInitializingIndexBuild"); + +reshardingTest.withReshardingInBackground( + { + newShardKeyPattern: {newKey: 1}, + newChunks: [{min: {newKey: MinKey}, max: {newKey: MaxKey}, shard: recipientShardNames[0]}], + }, + () => { + hangAfterInitializingIndexBuildFailPoint.wait(); + + jsTestLog("Entered building index phase, check currentOp"); + const report = recipient.getDB("admin").currentOp( + {ns, desc: {$regex: 'ReshardingMetricsRecipientService'}}); + assert.eq(report.inprog.length, 1); + const curOp = report.inprog[0]; + jsTestLog("Fetched currentOp: " + tojson(curOp)); + // There should be 2 indexes in progress: oldKey and newKey. + assert.eq(curOp["indexesToBuild"] - curOp["indexesBuilt"], 2); + hangAfterInitializingIndexBuildFailPoint.off(); + }); + +reshardingTest.teardown(); \ No newline at end of file diff --git a/jstests/sharding/resharding_building_index_failover.js b/jstests/sharding/resharding_building_index_failover.js new file mode 100644 index 0000000000000..30f5e767a7557 --- /dev/null +++ b/jstests/sharding/resharding_building_index_failover.js @@ -0,0 +1,77 @@ +/** + * Tests that when resharding is in building-index phase, failover happens and resharding should + * still work correctly. + * + * @tags: [ + * requires_fcv_71, + * featureFlagReshardingImprovements + * ] + */ + +load("jstests/libs/discover_topology.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; +load("jstests/sharding/libs/resharding_test_fixture.js"); + +const ns = "reshardingDb.coll"; +const reshardingTest = new ReshardingTest({numDonors: 2, enableElections: true}); +reshardingTest.setup(); + +const donorShardNames = reshardingTest.donorShardNames; +const sourceCollection = reshardingTest.createShardedCollection({ + ns, + shardKeyPattern: {oldKey: 1}, + chunks: [ + {min: {oldKey: MinKey}, max: {oldKey: 0}, shard: donorShardNames[0]}, + {min: {oldKey: 0}, max: {oldKey: MaxKey}, shard: donorShardNames[1]}, + ], +}); +const mongos = sourceCollection.getMongo(); +const topology = DiscoverTopology.findConnectedNodes(mongos); + +const recipientShardNames = reshardingTest.recipientShardNames; +const recipient = new Mongo(topology.shards[recipientShardNames[0]].primary); + +if (!FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements")) { + jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled"); + reshardingTest.teardown(); + quit(); +} + +// Create an index on oldKey. +assert.commandWorked( + mongos.getCollection(ns).insert([{oldKey: 1, newKey: -1}, {oldKey: 2, newKey: -2}])); +assert.commandWorked(mongos.getCollection(ns).createIndex({oldKey: 1})); +const hangAfterInitializingIndexBuildFailPoint = + configureFailPoint(recipient, "hangAfterInitializingIndexBuild"); + +reshardingTest.withReshardingInBackground( + { + newShardKeyPattern: {newKey: 1}, + newChunks: [{min: {newKey: MinKey}, max: {newKey: MaxKey}, shard: recipientShardNames[0]}], + }, + () => { + // Wait until participants are aware of the resharding operation. + reshardingTest.awaitCloneTimestampChosen(); + hangAfterInitializingIndexBuildFailPoint.wait(); + jsTestLog("Hang primary during building index, then step up a new primary"); + + reshardingTest.stepUpNewPrimaryOnShard(recipientShardNames[0]); + const recipientRS = reshardingTest.getReplSetForShard(recipientShardNames[0]); + recipientRS.awaitSecondaryNodes(); + recipientRS.awaitReplication(); + hangAfterInitializingIndexBuildFailPoint.off(); + }, + { + afterReshardingFn: () => { + const indexes = mongos.getDB("reshardingDb").getCollection("coll").getIndexes(); + let haveNewShardKeyIndex = false; + indexes.forEach(index => { + if ("newKey" in index["key"]) { + haveNewShardKeyIndex = true; + } + }); + assert.eq(haveNewShardKeyIndex, true); + } + }); + +reshardingTest.teardown(); \ No newline at end of file diff --git a/jstests/sharding/resharding_change_stream_internal_ops.js b/jstests/sharding/resharding_change_stream_internal_ops.js index 46d14a5e8908b..c108958982171 100644 --- a/jstests/sharding/resharding_change_stream_internal_ops.js +++ b/jstests/sharding/resharding_change_stream_internal_ops.js @@ -89,23 +89,17 @@ reshardingTest.withReshardingInBackground( // Check for reshardBegin event on both donors. const expectedReshardBeginEvent = { reshardingUUID: reshardingUUID, - operationType: "reshardBegin" + operationType: "reshardBegin", + ns: {db: kDbName, coll: collName}, }; const reshardBeginDonor0Event = cstDonor0.getNextChanges(changeStreamsCursorDonor0, 1, false /* skipFirstBatch */); - // The 'ns' field was added after 6.0, so the field will be absent when running on a 6.0 - // mongod. Delete the field so that the test can run on a mixed version suite. - // - // TODO SERVER-66645: Remove this line after branching for 7.0. - delete reshardBeginDonor0Event[0].ns; - assertChangeStreamEventEq(reshardBeginDonor0Event[0], expectedReshardBeginEvent); const reshardBeginDonor1Event = cstDonor1.getNextChanges(changeStreamsCursorDonor1, 1, false /* skipFirstBatch */); - delete reshardBeginDonor1Event[0].ns; assertChangeStreamEventEq(reshardBeginDonor1Event[0], expectedReshardBeginEvent); }, { @@ -113,19 +107,21 @@ reshardingTest.withReshardingInBackground( // Check for reshardDoneCatchUp event on the recipient. const expectedReshardDoneCatchUpEvent = { reshardingUUID: reshardingUUID, - operationType: "reshardDoneCatchUp" + operationType: "reshardDoneCatchUp", }; const reshardDoneCatchUpEvent = cstRecipient0.getNextChanges( - changeStreamsCursorRecipient0, 1, false /* skipFirstBatch */); + changeStreamsCursorRecipient0, 1, false /* skipFirstBatch */)[0]; - // The 'ns' field was added after 6.0, so the field will be absent when running on a 6.0 - // mongod. Delete the field so that the test can run on a mixed version suite. - // - // TODO SERVER-66645: Remove this line after branching for 7.0. - delete reshardDoneCatchUpEvent[0].ns; + // Ensure that the 'reshardingDoneCatchUp' event has an 'ns' field of the format + // '{ns: kDbName, coll: "system.resharding.<>"}. + assert(reshardDoneCatchUpEvent.ns, reshardDoneCatchUpEvent); + assert.eq(reshardDoneCatchUpEvent.ns.db, kDbName, reshardDoneCatchUpEvent); + assert(reshardDoneCatchUpEvent.ns.coll.startsWith("system.resharding."), + reshardDoneCatchUpEvent); + delete reshardDoneCatchUpEvent.ns; - assertChangeStreamEventEq(reshardDoneCatchUpEvent[0], expectedReshardDoneCatchUpEvent); + assertChangeStreamEventEq(reshardDoneCatchUpEvent, expectedReshardDoneCatchUpEvent); } }); diff --git a/jstests/sharding/resharding_disallow_drop.js b/jstests/sharding/resharding_disallow_drop.js index c9f7b373a0b22..ad214d8aeb0b9 100644 --- a/jstests/sharding/resharding_disallow_drop.js +++ b/jstests/sharding/resharding_disallow_drop.js @@ -12,7 +12,7 @@ load("jstests/libs/fail_point_util.js"); var st = new ShardingTest({ shards: {rs0: {nodes: 2}}, - config: TestData.catalogShard ? 2 : 1, + config: TestData.configShard ? 2 : 1, mongos: 1, other: { configOptions: {setParameter: {reshardingCriticalSectionTimeoutMillis: 24 * 60 * 60 * 1000}} @@ -27,15 +27,15 @@ const db = st.s.getDB(dbName); assert.commandWorked(st.s.adminCommand({enableSharding: dbName})); assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {_id: 1}})); -const reshardingPauseBeforeInsertCoordinatorDocFailpoint = - configureFailPoint(st.configRS.getPrimary(), "pauseBeforeInsertCoordinatorDoc"); +const reshardingPauseCoordinatorBeforeInitializingFailpoint = + configureFailPoint(st.configRS.getPrimary(), "reshardingPauseCoordinatorBeforeInitializing"); assert.commandFailedWithCode( db.adminCommand({reshardCollection: ns, key: {newKey: 1}, maxTimeMS: 1000}), ErrorCodes.MaxTimeMSExpired); // Wait for resharding to start running on the configsvr -reshardingPauseBeforeInsertCoordinatorDocFailpoint.wait(); +reshardingPauseCoordinatorBeforeInitializingFailpoint.wait(); // Drop cannot progress while resharding is in progress assert.commandFailedWithCode(db.runCommand({drop: collName, maxTimeMS: 5000}), @@ -52,7 +52,7 @@ assert.commandFailedWithCode(db.runCommand({drop: collName, maxTimeMS: 5000}), ErrorCodes.MaxTimeMSExpired); // Finish resharding -reshardingPauseBeforeInsertCoordinatorDocFailpoint.off(); +reshardingPauseCoordinatorBeforeInitializingFailpoint.off(); assert.commandWorked(db.adminCommand({reshardCollection: ns, key: {newKey: 1}})); // Now the drop can complete diff --git a/jstests/sharding/resharding_feature_flagging.js b/jstests/sharding/resharding_feature_flagging.js deleted file mode 100644 index 8b02316886551..0000000000000 --- a/jstests/sharding/resharding_feature_flagging.js +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Tests the resharding feature cannot be used when the feature flag is off. - * - * @tags: [ - * ] - */ -(function() { -"use strict"; - -load("jstests/sharding/libs/create_sharded_collection_util.js"); - -const st = new ShardingTest({ - mongos: 1, - mongosOptions: {setParameter: {featureFlagResharding: false}}, - config: 1, - configOptions: {setParameter: {featureFlagResharding: false}}, - shards: 1, - rs: {nodes: 1}, - rsOptions: {setParameter: {featureFlagResharding: false}}, -}); - -const sourceCollection = st.s.getCollection("reshardingDb.coll"); - -CreateShardedCollectionUtil.shardCollectionWithChunks( - sourceCollection, {x: 1}, [{min: {x: MinKey}, max: {x: MaxKey}, shard: st.shard0.shardName}]); - -assert.commandFailedWithCode( - st.s.adminCommand({reshardCollection: sourceCollection.getFullName(), key: {y: 1}}), - ErrorCodes.CommandNotFound); - -assert.commandFailedWithCode( - st.s.adminCommand({abortReshardCollection: sourceCollection.getFullName()}), - ErrorCodes.CommandNotFound); - -const configPrimary = st.configRS.getPrimary(); -assert.commandFailedWithCode(configPrimary.adminCommand({ - _configsvrReshardCollection: sourceCollection.getFullName(), - key: {y: 1}, - writeConcern: {w: 'majority'} -}), - ErrorCodes.CommandNotSupported); - -assert.commandFailedWithCode( - configPrimary.adminCommand({_configsvrAbortReshardCollection: sourceCollection.getFullName()}), - ErrorCodes.CommandNotSupported); - -const serverStatusCmd = ({serverStatus: 1, shardingStatistics: 1}); -let res = assert.commandWorked(configPrimary.adminCommand(serverStatusCmd)); -assert(!res.shardingStatistics.hasOwnProperty("resharding"), res.shardingStatistics); - -const shardPrimary = st.shard0.rs.getPrimary(); -res = assert.commandWorked(shardPrimary.adminCommand(serverStatusCmd)); -assert(!res.shardingStatistics.hasOwnProperty("resharding"), res.shardingStatistics); - -st.stop(); -})(); diff --git a/jstests/sharding/resharding_improvements_aggregate_resume_token.js b/jstests/sharding/resharding_improvements_aggregate_resume_token.js new file mode 100644 index 0000000000000..5a01f292cfb2d --- /dev/null +++ b/jstests/sharding/resharding_improvements_aggregate_resume_token.js @@ -0,0 +1,90 @@ +/** + * Tests $_requestResumeToken in aggregate command. + * + * @tags: [ + * require_fcv_71, + * featureFlagReshardingImprovements + * ] + */ + +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; + +const st = new ShardingTest({mongos: 1, shards: 2}); +const kDbName = 'db'; +const collName = 'foo'; +const timeFieldName = "time"; +const timeseriesCollName = "ts"; +const mongos = st.s0; +const numInitialDocs = 10; +const db = st.rs0.getPrimary().getDB(kDbName); + +let bulk = db.getCollection(collName).initializeOrderedBulkOp(); +for (let x = 0; x < numInitialDocs; x++) { + bulk.insert({oldKey: x, newKey: numInitialDocs - x}); +} +assert.commandWorked(bulk.execute()); + +if (!FeatureFlagUtil.isEnabled(mongos, "ReshardingImprovements")) { + jsTestLog("Skipping test since featureFlagReshardingImprovements is not enabled."); + quit(); +} + +jsTest.log("aggregate with $requestResumeToken should fail without hint: {$natural: 1}."); +assert.commandFailedWithCode( + db.runCommand({aggregate: collName, pipeline: [], $_requestResumeToken: true, cursor: {}}), + ErrorCodes.BadValue); + +jsTest.log("aggregate with $requestResumeToken should fail if the hint is not {$natural: 1}."); +assert.commandFailedWithCode(db.runCommand({ + aggregate: collName, + pipeline: [], + $_requestResumeToken: true, + cursor: {}, + hint: {oldKey: 1} +}), + ErrorCodes.BadValue); + +jsTest.log( + "aggregate with $requestResumeToken should return PBRT with recordId and initialSyncId."); +let res = db.runCommand({ + aggregate: collName, + pipeline: [], + $_requestResumeToken: true, + hint: {$natural: 1}, + cursor: {batchSize: 1} +}); +assert.hasFields(res.cursor, ["postBatchResumeToken"]); +assert.hasFields(res.cursor.postBatchResumeToken, ["$recordId"]); +assert.hasFields(res.cursor.postBatchResumeToken, ["$initialSyncId"]); +const resumeToken = res.cursor.postBatchResumeToken; + +jsTest.log("aggregate with wrong $recordId type in $resumeAfter should fail"); +assert.commandFailedWithCode(db.runCommand({ + aggregate: collName, + pipeline: [], + hint: {$natural: 1}, + $_requestResumeToken: true, + $_resumeAfter: {$recordId: 1, $initialSyncId: UUID("81fd5473-1747-4c9d-8743-f10642b3bb99")}, + cursor: {batchSize: 1} +}), + ErrorCodes.BadValue); + +jsTest.log("aggregate with $resumeAfter should fail without {$_requestResumeToken: true}."); +assert.commandFailedWithCode(db.runCommand({ + aggregate: collName, + pipeline: [], + hint: {$natural: 1}, + $_resumeAfter: resumeToken, + cursor: {batchSize: 1} +}), + ErrorCodes.BadValue); + +res = db.runCommand({ + aggregate: collName, + pipeline: [], + $_requestResumeToken: true, + hint: {$natural: 1}, + $_resumeAfter: resumeToken, + cursor: {batchSize: 1} +}); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/resharding_large_number_of_initial_chunks.js b/jstests/sharding/resharding_large_number_of_initial_chunks.js index 2b0d7b4b1a713..0bda116b9c710 100644 --- a/jstests/sharding/resharding_large_number_of_initial_chunks.js +++ b/jstests/sharding/resharding_large_number_of_initial_chunks.js @@ -1,6 +1,6 @@ /** - * Tests that resharding can complete successfully when the original collection has a large number - * of chunks. + * Tests that resharding can complete successfully when it has a large number + * of chunks being created during the process. * * @tags: [ * uses_atclustertime, @@ -29,41 +29,33 @@ const kDbName = 'db'; const collName = 'foo'; const ns = kDbName + '.' + collName; const mongos = st.s; +const shard0 = st.shard0.shardName; +const shard1 = st.shard1.shardName; assert.commandWorked(mongos.adminCommand({enableSharding: kDbName})); assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {oldKey: 1}})); -let nZones = 175000; -let zones = []; -let shard0Zones = []; -let shard1Zones = []; -for (let i = 0; i < nZones; i++) { - let zoneName = "zone" + i; - zones.push({zone: zoneName, min: {"newKey": i}, max: {"newKey": i + 1}}); +let nChunks = 100000; +let newChunks = []; +newChunks.push({min: {newKey: MinKey}, max: {newKey: 0}, recipientShardId: shard0}); +for (let i = 0; i < nChunks; i++) { if (i % 2 == 0) { - shard0Zones.push(zoneName); + newChunks.push({min: {newKey: i}, max: {newKey: i + 1}, recipientShardId: shard0}); } else { - shard1Zones.push(zoneName); + newChunks.push({min: {newKey: i}, max: {newKey: i + 1}, recipientShardId: shard1}); } } - -jsTestLog("Updating First Zone"); -assert.commandWorked( - mongos.getDB("config").shards.update({_id: st.shard0.shardName}, {$set: {tags: shard0Zones}})); -jsTestLog("Updating First Zone"); -assert.commandWorked( - mongos.getDB("config").shards.update({_id: st.shard1.shardName}, {$set: {tags: shard1Zones}})); +newChunks.push({min: {newKey: nChunks}, max: {newKey: MaxKey}, recipientShardId: shard1}); jsTestLog("Resharding Collection"); -assert.commandWorked(mongos.adminCommand({reshardCollection: ns, key: {newKey: 1}, zones: zones})); +assert.commandWorked(mongos.adminCommand( + {reshardCollection: ns, key: {newKey: 1}, _presetReshardedChunks: newChunks})); -// Assert that the correct number of zones and chunks documents exist after resharding 'db.foo'. -// There should be two more chunks docs than zones docs created to cover the ranges -// {newKey: minKey -> newKey : 0} and {newKey: nZones -> newKey : maxKey} which are not associated -// with a zone. -assert.eq(mongos.getDB("config").tags.find({ns: ns}).itcount(), nZones); -assert.eq(findChunksUtil.countChunksForNs(mongos.getDB("config"), ns), nZones + 2); +// Assert that the correct number of chunks documents exist after resharding 'db.foo'. +// There should be two more chunks docs to cover the ranges +// {newKey: minKey -> newKey : 0} and {newKey: nChunks -> newKey : maxKey} +assert.eq(findChunksUtil.countChunksForNs(mongos.getDB("config"), ns), nChunks + 2); // check_orphans_are_deleted.js is skipped because it takes 1 minute to run on an optimized build // and this test doesn't insert any data for there to be unowned documents anyway. diff --git a/jstests/sharding/resharding_nonblocking_coordinator_rebuild.js b/jstests/sharding/resharding_nonblocking_coordinator_rebuild.js index c005f1e0e778d..e5557178d9815 100644 --- a/jstests/sharding/resharding_nonblocking_coordinator_rebuild.js +++ b/jstests/sharding/resharding_nonblocking_coordinator_rebuild.js @@ -2,8 +2,11 @@ * Tests that resharding participants do not block replication while waiting for the * ReshardingCoordinatorService to be rebuilt. * - * Looks like a test incompatibility, but should be verified and maybe rework the test. - * @tags: [temporary_catalog_shard_incompatible] + * Incompatible because it uses a fail point to block all primary only services from being rebuilt + * on the config server, and if the config server is the first shard, this prevents the test from + * making progress. This tests logic that shouldn't be different on a config server, so there's no + * need to run it with a config shard. + * @tags: [config_shard_incompatible] */ (function() { "use strict"; diff --git a/jstests/sharding/resharding_update_tag_zones.js b/jstests/sharding/resharding_update_tag_zones.js new file mode 100644 index 0000000000000..f61fe0bb0d615 --- /dev/null +++ b/jstests/sharding/resharding_update_tag_zones.js @@ -0,0 +1,53 @@ +/** + * Testing that config.tags are correctly updated after resharding hashed shard key with zones. + */ + +(function() { +"use strict"; + +const st = new ShardingTest({shard: 2}); +const dbName = "testDb"; +const collName = "testColl"; +const ns = dbName + "." + collName; + +// Enable sharding on the test DB and ensure its primary is st.shard0.shardName. +assert.commandWorked(st.s.adminCommand({enablesharding: dbName})); +st.ensurePrimaryShard(dbName, st.shard0.shardName); +assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {oldKey: "hashed"}})); + +const existingZoneName = 'x1'; +assert.commandWorked( + st.s.adminCommand({addShardToZone: st.shard1.shardName, zone: existingZoneName})); + +assert.commandWorked(st.s.adminCommand({ + updateZoneKeyRange: ns, + min: {oldKey: NumberLong("4470791281878691347")}, + max: {oldKey: NumberLong("7766103514953448109")}, + zone: existingZoneName +})); + +assert.commandWorked(st.s.adminCommand({ + reshardCollection: ns, + key: {oldKey: 1}, + unique: false, + collation: {locale: 'simple'}, + zones: [{ + zone: existingZoneName, + min: {oldKey: NumberLong("4470791281878691346")}, + max: {oldKey: NumberLong("7766103514953448108")} + }], + numInitialChunks: 2, +})); + +// Find the tags docs. +var configDB = st.s.getDB("config"); +let tags = configDB.tags.find({}).toArray(); + +// Assert only one tag doc is present and zone ranges are correct. +assert.eq(1, configDB.tags.countDocuments({})); +assert.eq({oldKey: NumberLong("4470791281878691346")}, tags[0].min); +assert.eq({oldKey: NumberLong("7766103514953448108")}, tags[0].max); +assert.eq(existingZoneName, tags[0].tag); + +st.stop(); +})(); diff --git a/jstests/sharding/resharding_update_tag_zones_large.js b/jstests/sharding/resharding_update_tag_zones_large.js new file mode 100644 index 0000000000000..67441f8bc98a1 --- /dev/null +++ b/jstests/sharding/resharding_update_tag_zones_large.js @@ -0,0 +1,137 @@ +/** + * Testing that the reshardCollection command aborts correctly when the transaction for updating + * the persistent state (e.g. config.collections and config.tags) in the resharding commit phase + * fails with a TransactionTooLargeForCache error. + */ + +(function() { +"use strict"; + +load("jstests/libs/fail_point_util.js"); + +function assertEqualObj(lhs, rhs, keysToIgnore) { + assert.eq(Object.keys(lhs).length, Object.keys(lhs).length, {lhs, rhs}); + for (let key in rhs) { + if (keysToIgnore && keysToIgnore.has(key)) { + continue; + } + + const value = rhs[key]; + if (typeof value === 'object') { + assertEqualObj(lhs[key], rhs[key], keysToIgnore); + } else { + assert.eq(lhs[key], rhs[key], {key, actual: lhs, expected: rhs}); + } + } +} + +const st = new ShardingTest({ + shard: 2, + // This test uses a fail point to force the commitTransaction command in the resharding commit + // phase to fail with a TransactionTooLargeForCache error. To make the test setup work reliably, + // disable the cluster parameter refresher since it periodically runs internal transactions + // against the the config server. + mongosOptions: {setParameter: {'failpoint.skipClusterParameterRefresh': "{'mode':'alwaysOn'}"}}, + configOptions: + {setParameter: + {'reshardingCriticalSectionTimeoutMillis': 24 * 60 * 60 * 1000 /* 1 day */}} +}); +const configRSPrimary = st.configRS.getPrimary(); + +const dbName = "testDb"; +const collName = "testColl"; +const ns = dbName + "." + collName; + +const configDB = st.s.getDB("config"); +const collectionsColl = configDB.getCollection("collections"); +const chunksColl = configDB.getCollection("chunks"); +const tagsColl = configDB.getCollection("tags"); + +assert.commandWorked(st.s.adminCommand({enablesharding: dbName})); +st.ensurePrimaryShard(dbName, st.shard0.shardName); +assert.commandWorked(st.s.adminCommand({shardCollection: ns, key: {skey: "hashed"}})); + +const zoneName = "testZone"; +assert.commandWorked(st.s.adminCommand({addShardToZone: st.shard0.shardName, zone: zoneName})); + +const oldZone = { + tag: zoneName, + min: {skey: NumberLong("4470791281878691347")}, + max: {skey: NumberLong("7766103514953448109")} +}; +assert.commandWorked(st.s.adminCommand( + {updateZoneKeyRange: ns, min: oldZone.min, max: oldZone.max, zone: oldZone.tag})); + +const collBefore = collectionsColl.findOne({_id: ns}); +assert.neq(collBefore, null); +const chunksBefore = chunksColl.find({uuid: collBefore.uuid}).sort({lastmod: -1}).toArray(); +assert.gte(chunksBefore.length, 1, chunksBefore); +const tagsBefore = tagsColl.find({ns}).toArray(); +assert.gte(tagsBefore.length, 1, tagsBefore); + +const reshardingFunc = (mongosHost, ns, zoneName) => { + const mongos = new Mongo(mongosHost); + const newZone = { + tag: zoneName, + min: {skey: NumberLong("4470791281878691346")}, + max: {skey: NumberLong("7766103514953448108")} + }; + jsTest.log("Start resharding"); + const reshardingRes = mongos.adminCommand({ + reshardCollection: ns, + key: {skey: 1}, + unique: false, + collation: {locale: 'simple'}, + zones: [{zone: newZone.tag, min: newZone.min, max: newZone.max}], + numInitialChunks: 2, + }); + jsTest.log("Finished resharding"); + return reshardingRes; +}; +let reshardingThread = new Thread(reshardingFunc, st.s.host, ns, zoneName); + +const persistFp = + configureFailPoint(configRSPrimary, "reshardingPauseCoordinatorBeforeDecisionPersisted"); +reshardingThread.start(); +persistFp.wait(); + +const commitFp = configureFailPoint(configRSPrimary, + "failCommand", + { + failCommands: ["commitTransaction"], + failInternalCommands: true, + failLocalClients: true, + errorCode: ErrorCodes.TransactionTooLargeForCache, + }, + {times: 1}); +persistFp.off(); +commitFp.wait(); +commitFp.off(); +const reshardingRes = reshardingThread.returnData(); + +assert.commandFailedWithCode(reshardingRes, ErrorCodes.TransactionTooLargeForCache); + +const collAfter = collectionsColl.findOne({_id: ns}); +assert.neq(collAfter, null); +const chunksAfter = chunksColl.find({uuid: collAfter.uuid}).sort({lastmod: -1}).toArray(); +const tagsAfter = tagsColl.find({ns}).toArray(); + +jsTest.log( + "Verify that the collection metadata remains the same since the resharding operation failed."); + +assertEqualObj(collBefore, collAfter); + +assert.eq(chunksBefore.length, chunksAfter.length, {chunksBefore, chunksAfter}); +for (let i = 0; i < chunksAfter.length; i++) { + // Ignore "lastmod" when verifying the newest chunk because resharding bumps the minor version + // of the newest chunk whenever it goes through a state transition. + assertEqualObj(chunksBefore[i], chunksAfter[i], new Set(i == 0 ? ["lastmod"] : [])); +} + +assert.eq(tagsBefore.length, tagsAfter.length, {tagsBefore, tagsAfter}); +for (let i = 0; i < tagsAfter.length; i++) { + assertEqualObj(tagsBefore[i], tagsAfter[i]); +} + +st.stop(); +})(); diff --git a/jstests/sharding/resharding_with_multi_deletes_reduced_ticket_pool_size.js b/jstests/sharding/resharding_with_multi_deletes_reduced_ticket_pool_size.js new file mode 100644 index 0000000000000..7b4d77c52919a --- /dev/null +++ b/jstests/sharding/resharding_with_multi_deletes_reduced_ticket_pool_size.js @@ -0,0 +1,50 @@ +/** + * Test the correctness of multiple deletes during resharding with a reduced ticket pool size. + * + * @tags: [ + * requires_sharding, + * ] + */ + +(function() { +"use strict"; + +load("jstests/libs/discover_topology.js"); +load("jstests/sharding/libs/resharding_test_fixture.js"); + +const kNumWriteTickets = 5; +const kReshardingOplogBatchTaskCount = 20; +const reshardingTest = new ReshardingTest({ + wiredTigerConcurrentWriteTransactions: kNumWriteTickets, + reshardingOplogBatchTaskCount: kReshardingOplogBatchTaskCount +}); + +reshardingTest.setup(); + +const donorShardNames = reshardingTest.donorShardNames; +const sourceCollection = reshardingTest.createShardedCollection({ + ns: "reshardingDb.coll", + shardKeyPattern: {oldKey: 1}, + chunks: [{min: {oldKey: MinKey}, max: {oldKey: MaxKey}, shard: donorShardNames[0]}], +}); +for (let i = 0; i < 100; i++) { + assert.commandWorked(sourceCollection.insert([{x: 1}])); +} +assert.commandWorked(sourceCollection.insert([{x: 3}, {x: 3}])); +const mongos = sourceCollection.getMongo(); +const topology = DiscoverTopology.findConnectedNodes(mongos); +const coordinator = new Mongo(topology.configsvr.nodes[0]); +const recipientShardNames = reshardingTest.recipientShardNames; +reshardingTest.withReshardingInBackground( + { + newShardKeyPattern: {newKey: 1}, + newChunks: [{min: {newKey: MinKey}, max: {newKey: MaxKey}, shard: recipientShardNames[0]}], + }, + () => { + // We wait until cloneTimestamp has been chosen to guarantee that any subsequent writes will + // be applied by the ReshardingOplogApplier. + reshardingTest.awaitCloneTimestampChosen(); + assert.commandWorked(sourceCollection.remove({x: 1}, {justOne: false})); + }); +reshardingTest.teardown(); +})(); diff --git a/jstests/sharding/retryable_write_error_labels.js b/jstests/sharding/retryable_write_error_labels.js index 23b0ff3c8313b..c75ff11103bb3 100644 --- a/jstests/sharding/retryable_write_error_labels.js +++ b/jstests/sharding/retryable_write_error_labels.js @@ -145,10 +145,11 @@ function testMongosError() { const retryableInsertThread = new Thread((mongosHost, dbName, collName) => { const mongos = new Mongo(mongosHost); const session = mongos.startSession(); + session.startTransaction(); return session.getDatabase(dbName).runCommand({ insert: collName, documents: [{a: 0, b: "retryable"}], - txnNumber: NumberLong(0), + txnNumber: NumberLong(session.getTxnNumber_forTesting()), }); }, st.s.host, dbName, collName); retryableInsertThread.start(); diff --git a/jstests/sharding/retryable_writes.js b/jstests/sharding/retryable_writes.js index 653526d91b08e..2f8d78e7bc383 100644 --- a/jstests/sharding/retryable_writes.js +++ b/jstests/sharding/retryable_writes.js @@ -36,7 +36,7 @@ function verifyServerStatusChanges( function runTests(mainConn, priConn) { var lsid = UUID(); - if (TestData.catalogShard) { + if (TestData.configShard) { // Creating a collection updates counters on the config server, so do that before getting // the initial stats. assert.commandWorked(mainConn.getDB("test").createCollection("user")); @@ -329,12 +329,6 @@ function runFailpointTests(mainConn, priConn) { var lsid = UUID(); var testDb = mainConn.getDB('TestDB'); - if (TestData.catalogShard) { - // TODO SERVER-75821: Workaround for crash when executing the fail point while implicitly - // creating a collection in a transaction on the config server. - assert.commandWorked(testDb.createCollection("user")); - } - // Test connection close (default behaviour). The connection will get closed, but the // inserts must succeed assert.commandWorked(priConn.adminCommand( diff --git a/jstests/sharding/return_partial_shards_down.js b/jstests/sharding/return_partial_shards_down.js index ec76e5bfa524a..032a7ca9b13f8 100644 --- a/jstests/sharding/return_partial_shards_down.js +++ b/jstests/sharding/return_partial_shards_down.js @@ -1,6 +1,10 @@ // // Tests that zero results are correctly returned with returnPartial and shards down // +// Shuts down all shards, which includes the config server. Can be made to pass by restarting the +// config server, but this makes the test flaky. +// @tags: [config_shard_incompatible] +// // Checking UUID and index consistency involves talking to shards, but this test shuts down shards. TestData.skipCheckingUUIDsConsistentAcrossCluster = true; @@ -85,12 +89,4 @@ checkDocCount(collAllShards, returnPartialFlag, true, 0); jsTest.log("DONE!"); -if (TestData.catalogShard) { - // Sharding test stop requires the config server to be up, so restart the first shard if it's - // the config server. - st.rs0.startSet({restart: true}); - st.rs0.initiate(); - st.rs0.awaitReplication(); -} - st.stop(); diff --git a/jstests/sharding/run_restore.js b/jstests/sharding/run_restore.js index d16560f872528..b63ff3f1639ea 100644 --- a/jstests/sharding/run_restore.js +++ b/jstests/sharding/run_restore.js @@ -6,10 +6,7 @@ * requires_persistence, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const s = new ShardingTest({name: "runRestore", shards: 2, mongos: 1, config: 1, other: {chunkSize: 1}}); @@ -19,7 +16,7 @@ let db = s.getDB("test"); if (!FeatureFlagUtil.isEnabled(s.configRS.getPrimary().getDB("test"), "SelectiveBackup")) { jsTestLog("Skipping as featureFlagSelectiveBackup is not enabled"); s.stop(); - return; + quit(); } s.adminCommand({enablesharding: "test"}); @@ -166,4 +163,21 @@ assert.eq(1, conn.getDB("config").getCollection("databases").find({_id: "test"}) assert.eq(0, conn.getDB("config").getCollection("databases").find({_id: "unusedDB"}).count()); MongoRunner.stopMongod(conn); -}()); + +// Start the config server in standalone restore mode. +conn = MongoRunner.runMongod({noCleanData: true, dbpath: configDbPath, restore: ""}); +assert(conn); + +// '_configsvrRunRestore' command ignores cache collections. +assert.commandWorked(conn.getDB("config").createCollection("cache.test")); +assert.commandWorked(conn.getDB("admin").runCommand({_configsvrRunRestore: 1})); + +// Can't run during testing if the config server has unrecognized collections. +assert.commandWorked(conn.getDB("config").createCollection("unknown")); +let error = assert.throws(function() { + conn.getDB("admin").runCommand({_configsvrRunRestore: 1}); +}); +assert(isNetworkError(error)); + +// The server should have crashed from fatally asserting on unknown config collection. +MongoRunner.stopMongod(conn, null, {allowedExitCode: MongoRunner.EXIT_ABORT}); \ No newline at end of file diff --git a/jstests/sharding/run_restore_unsharded.js b/jstests/sharding/run_restore_unsharded.js index d9ad5d0edc9ea..6a10904a5fe1a 100644 --- a/jstests/sharding/run_restore_unsharded.js +++ b/jstests/sharding/run_restore_unsharded.js @@ -6,10 +6,7 @@ * requires_persistence, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; const s = new ShardingTest( {name: "runRestoreUnsharded", shards: 2, mongos: 1, config: 1, other: {chunkSize: 1}}); @@ -19,7 +16,7 @@ let db = s.getDB("test"); if (!FeatureFlagUtil.isEnabled(s.configRS.getPrimary().getDB("test"), "SelectiveBackup")) { jsTestLog("Skipping as featureFlagSelectiveBackup is not enabled"); s.stop(); - return; + quit(); } s.adminCommand({enablesharding: "test"}); @@ -58,5 +55,4 @@ assert.eq(0, conn.getDB("config").getCollection("collections").find({_id: "test. assert.eq(1, conn.getDB("config").getCollection("databases").find({_id: "test"}).count()); -MongoRunner.stopMongod(conn); -}()); +MongoRunner.stopMongod(conn); \ No newline at end of file diff --git a/jstests/sharding/safe_secondary_reads_causal_consistency.js b/jstests/sharding/safe_secondary_reads_causal_consistency.js index 60726008fe5a6..75bfd0528a16c 100644 --- a/jstests/sharding/safe_secondary_reads_causal_consistency.js +++ b/jstests/sharding/safe_secondary_reads_causal_consistency.js @@ -10,7 +10,7 @@ "use strict"; load("jstests/libs/fail_point_util.js"); -load("./jstests/libs/chunk_manipulation_util.js"); +load("jstests/libs/chunk_manipulation_util.js"); /** * @summary This function executes a count command with read preference "secondary" and returns the diff --git a/jstests/sharding/safe_secondary_reads_drop_recreate.js b/jstests/sharding/safe_secondary_reads_drop_recreate.js index 41fa3fe9b2a14..85134e61a2f27 100644 --- a/jstests/sharding/safe_secondary_reads_drop_recreate.js +++ b/jstests/sharding/safe_secondary_reads_drop_recreate.js @@ -52,6 +52,8 @@ let testCases = { _configsvrBalancerStart: {skip: "primary only"}, _configsvrBalancerStatus: {skip: "primary only"}, _configsvrBalancerStop: {skip: "primary only"}, + _configsvrCheckClusterMetadataConsistency: {skip: "internal command"}, + _configsvrCheckMetadataConsistency: {skip: "internal command"}, _configsvrClearJumboFlag: {skip: "primary only"}, _configsvrCommitChunksMerge: {skip: "primary only"}, _configsvrCommitChunkMigration: {skip: "primary only"}, @@ -62,10 +64,12 @@ let testCases = { _configsvrDropIndexCatalogEntry: {skip: "primary only"}, _configsvrMoveRange: {skip: "primary only"}, _configsvrRemoveShardFromZone: {skip: "primary only"}, + _configsvrResetPlacementHistory: {skip: "primary only"}, _configsvrReshardCollection: {skip: "primary only"}, - _configsvrTransitionToCatalogShard: {skip: "primary only"}, + _configsvrTransitionFromDedicatedConfigServer: {skip: "primary only"}, _configsvrTransitionToDedicatedConfigServer: {skip: "primary only"}, _configsvrUpdateZoneKeyRange: {skip: "primary only"}, + _dropConnectionsToMongot: {skip: "internal command"}, _flushReshardingStateChange: {skip: "does not return user data"}, _flushRoutingTableCacheUpdates: {skip: "does not return user data"}, _flushRoutingTableCacheUpdatesWithWriteConcern: {skip: "does not return user data"}, @@ -76,8 +80,10 @@ let testCases = { _killOperations: {skip: "does not return user data"}, _mergeAuthzCollections: {skip: "primary only"}, _migrateClone: {skip: "primary only"}, + _mongotConnPoolStats: {skip: "internal command"}, _shardsvrCheckMetadataConsistency: {skip: "internal command"}, _shardsvrCheckMetadataConsistencyParticipant: {skip: "internal command"}, + _shardsvrCleanupStructuredEncryptionData: {skip: "primary only"}, _shardsvrCompactStructuredEncryptionData: {skip: "primary only"}, _shardsvrMergeAllChunksOnShard: {skip: "primary only"}, _shardsvrMovePrimary: {skip: "primary only"}, @@ -128,18 +134,20 @@ let testCases = { balancerStatus: {skip: "primary only"}, balancerStop: {skip: "primary only"}, buildInfo: {skip: "does not return user data"}, - bulkWrite: {skip: "not yet implemented"}, + bulkWrite: {skip: "primary only"}, captrunc: {skip: "primary only"}, checkMetadataConsistency: {skip: "primary only"}, checkShardingIndex: {skip: "primary only"}, cleanupOrphaned: {skip: "primary only"}, cleanupReshardCollection: {skip: "primary only"}, + cleanupStructuredEncryptionData: {skip: "does not return user data"}, clearJumboFlag: {skip: "primary only"}, clearLog: {skip: "does not return user data"}, clone: {skip: "primary only"}, cloneCollectionAsCapped: {skip: "primary only"}, clusterAbortTransaction: {skip: "already tested by 'abortTransaction' tests on mongos"}, clusterAggregate: {skip: "already tested by 'aggregate' tests on mongos"}, + clusterBulkWrite: {skip: "already tested by 'bulkWrite' tests on mongos"}, clusterCommitTransaction: {skip: "already tested by 'commitTransaction' tests on mongos"}, clusterCount: {skip: "already tested by 'count' tests on mongos"}, clusterDelete: {skip: "already tested by 'delete' tests on mongos"}, @@ -328,6 +336,7 @@ let testCases = { replSetTest: {skip: "does not return user data"}, replSetUpdatePosition: {skip: "does not return user data"}, replSetResizeOplog: {skip: "does not return user data"}, + resetPlacementHistory: {skip: "primary only"}, reshardCollection: {skip: "primary only"}, resync: {skip: "primary only"}, revokePrivilegesFromRole: {skip: "primary only"}, @@ -370,7 +379,7 @@ let testCases = { testVersions1And2: {skip: "does not return user data"}, testVersion2: {skip: "does not return user data"}, top: {skip: "does not return user data"}, - transitionToCatalogShard: {skip: "primary only"}, + transitionFromDedicatedConfigServer: {skip: "primary only"}, transitionToDedicatedConfigServer: {skip: "primary only"}, update: {skip: "primary only"}, updateRole: {skip: "primary only"}, diff --git a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js index b25f985b0b6bb..c6d20d7b79f5e 100644 --- a/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js +++ b/jstests/sharding/safe_secondary_reads_single_migration_suspend_range_deletion.js @@ -56,6 +56,8 @@ let testCases = { _configsvrBalancerStart: {skip: "primary only"}, _configsvrBalancerStatus: {skip: "primary only"}, _configsvrBalancerStop: {skip: "primary only"}, + _configsvrCheckClusterMetadataConsistency: {skip: "internal command"}, + _configsvrCheckMetadataConsistency: {skip: "internal command"}, _configsvrClearJumboFlag: {skip: "primary only"}, _configsvrCommitChunksMerge: {skip: "primary only"}, _configsvrCommitChunkMigration: {skip: "primary only"}, @@ -68,13 +70,15 @@ let testCases = { _configsvrRemoveChunks: {skip: "primary only"}, _configsvrRemoveShardFromZone: {skip: "primary only"}, _configsvrRemoveTags: {skip: "primary only"}, + _configsvrResetPlacementHistory: {skip: "primary only"}, _configsvrReshardCollection: {skip: "primary only"}, _configsvrSetAllowMigrations: {skip: "primary only"}, _configsvrSetClusterParameter: {skip: "primary only"}, _configsvrSetUserWriteBlockMode: {skip: "primary only"}, - _configsvrTransitionToCatalogShard: {skip: "primary only"}, + _configsvrTransitionFromDedicatedConfigServer: {skip: "primary only"}, _configsvrTransitionToDedicatedConfigServer: {skip: "primary only"}, _configsvrUpdateZoneKeyRange: {skip: "primary only"}, + _dropConnectionsToMongot: {skip: "internal command"}, _flushReshardingStateChange: {skip: "does not return user data"}, _flushRoutingTableCacheUpdates: {skip: "does not return user data"}, _flushRoutingTableCacheUpdatesWithWriteConcern: {skip: "does not return user data"}, @@ -85,8 +89,10 @@ let testCases = { _killOperations: {skip: "does not return user data"}, _mergeAuthzCollections: {skip: "primary only"}, _migrateClone: {skip: "primary only"}, + _mongotConnPoolStats: {skip: "internal command"}, _shardsvrCheckMetadataConsistency: {skip: "internal command"}, _shardsvrCheckMetadataConsistencyParticipant: {skip: "internal command"}, + _shardsvrCleanupStructuredEncryptionData: {skip: "primary only"}, _shardsvrCompactStructuredEncryptionData: {skip: "primary only"}, _shardsvrMergeAllChunksOnShard: {skip: "primary only"}, _shardsvrMovePrimary: {skip: "primary only"}, @@ -131,18 +137,20 @@ let testCases = { balancerStatus: {skip: "primary only"}, balancerStop: {skip: "primary only"}, buildInfo: {skip: "does not return user data"}, - bulkWrite: {skip: "not yet implemented"}, + bulkWrite: {skip: "primary only"}, captrunc: {skip: "primary only"}, checkMetadataConsistency: {skip: "primary only"}, checkShardingIndex: {skip: "primary only"}, cleanupOrphaned: {skip: "primary only"}, cleanupReshardCollection: {skip: "primary only"}, + cleanupStructuredEncryptionData: {skip: "does not return user data"}, clearJumboFlag: {skip: "primary only"}, clearLog: {skip: "does not return user data"}, clone: {skip: "primary only"}, cloneCollectionAsCapped: {skip: "primary only"}, clusterAbortTransaction: {skip: "already tested by 'abortTransaction' tests on mongos"}, clusterAggregate: {skip: "already tested by 'aggregate' tests on mongos"}, + clusterBulkWrite: {skip: "already tested by 'bulkWrite' tests on mongos"}, clusterCommitTransaction: {skip: "already tested by 'commitTransaction' tests on mongos"}, clusterCount: {skip: "already tested by 'count' tests on mongos"}, clusterDelete: {skip: "already tested by 'delete' tests on mongos"}, @@ -386,6 +394,7 @@ let testCases = { replSetTest: {skip: "does not return user data"}, replSetUpdatePosition: {skip: "does not return user data"}, replSetResizeOplog: {skip: "does not return user data"}, + resetPlacementHistory: {skip: "primary only"}, reshardCollection: {skip: "primary only"}, resync: {skip: "primary only"}, revokePrivilegesFromRole: {skip: "primary only"}, @@ -428,7 +437,7 @@ let testCases = { testVersions1And2: {skip: "does not return user data"}, testVersion2: {skip: "does not return user data"}, top: {skip: "does not return user data"}, - transitionToCatalogShard: {skip: "primary only"}, + transitionFromDedicatedConfigServer: {skip: "primary only"}, transitionToDedicatedConfigServer: {skip: "primary only"}, update: {skip: "primary only"}, updateRole: {skip: "primary only"}, diff --git a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js index b153d91fde95c..c65c48bb42618 100644 --- a/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js +++ b/jstests/sharding/safe_secondary_reads_single_migration_waitForDelete.js @@ -49,6 +49,8 @@ let testCases = { _configsvrBalancerStart: {skip: "primary only"}, _configsvrBalancerStatus: {skip: "primary only"}, _configsvrBalancerStop: {skip: "primary only"}, + _configsvrCheckClusterMetadataConsistency: {skip: "internal command"}, + _configsvrCheckMetadataConsistency: {skip: "internal command"}, _configsvrClearJumboFlag: {skip: "primary only"}, _configsvrCommitChunksMerge: {skip: "primary only"}, _configsvrCommitChunkMigration: {skip: "primary only"}, @@ -61,13 +63,15 @@ let testCases = { _configsvrRemoveChunks: {skip: "primary only"}, _configsvrRemoveShardFromZone: {skip: "primary only"}, _configsvrRemoveTags: {skip: "primary only"}, + _configsvrResetPlacementHistory: {skip: "primary only"}, _configsvrReshardCollection: {skip: "primary only"}, _configsvrSetAllowMigrations: {skip: "primary only"}, _configsvrSetClusterParameter: {skip: "primary only"}, _configsvrSetUserWriteBlockMode: {skip: "primary only"}, - _configsvrTransitionToCatalogShard: {skip: "primary only"}, + _configsvrTransitionFromDedicatedConfigServer: {skip: "primary only"}, _configsvrTransitionToDedicatedConfigServer: {skip: "primary only"}, _configsvrUpdateZoneKeyRange: {skip: "primary only"}, + _dropConnectionsToMongot: {skip: "does not return user data"}, _flushReshardingStateChange: {skip: "does not return user data"}, _flushRoutingTableCacheUpdates: {skip: "does not return user data"}, _flushRoutingTableCacheUpdatesWithWriteConcern: {skip: "does not return user data"}, @@ -78,8 +82,10 @@ let testCases = { _killOperations: {skip: "does not return user data"}, _mergeAuthzCollections: {skip: "primary only"}, _migrateClone: {skip: "primary only"}, + _mongotConnPoolStats: {skip: "internal command"}, _shardsvrCheckMetadataConsistency: {skip: "internal command"}, _shardsvrCheckMetadataConsistencyParticipant: {skip: "internal command"}, + _shardsvrCleanupStructuredEncryptionData: {skip: "primary only"}, _shardsvrCompactStructuredEncryptionData: {skip: "primary only"}, _shardsvrMergeAllChunksOnShard: {skip: "primary only"}, _shardsvrMovePrimary: {skip: "primary only"}, @@ -122,7 +128,7 @@ let testCases = { checkResults: function(res) { // The command should work and return correct results. assert.commandWorked(res); - assert.eq(res.numDocs, 1000, res); + assert.eq(res.keyCharacteristics.numDocsTotal, 1000, res); }, behavior: "versioned" }, @@ -135,18 +141,20 @@ let testCases = { balancerStatus: {skip: "primary only"}, balancerStop: {skip: "primary only"}, buildInfo: {skip: "does not return user data"}, - bulkWrite: {skip: "not yet implemented"}, + bulkWrite: {skip: "primary only"}, captrunc: {skip: "primary only"}, checkMetadataConsistency: {skip: "primary only"}, checkShardingIndex: {skip: "primary only"}, cleanupOrphaned: {skip: "primary only"}, cleanupReshardCollection: {skip: "primary only"}, + cleanupStructuredEncryptionData: {skip: "does not return user data"}, clearJumboFlag: {skip: "primary only"}, clearLog: {skip: "does not return user data"}, clone: {skip: "primary only"}, cloneCollectionAsCapped: {skip: "primary only"}, clusterAbortTransaction: {skip: "already tested by 'abortTransaction' tests on mongos"}, clusterAggregate: {skip: "already tested by 'aggregate' tests on mongos"}, + clusterBulkWrite: {skip: "already tested by 'bulkWrite' tests on mongos"}, clusterCommitTransaction: {skip: "already tested by 'commitTransaction' tests on mongos"}, clusterCount: {skip: "already tested by 'count' tests on mongos"}, clusterDelete: {skip: "already tested by 'delete' tests on mongos"}, @@ -339,6 +347,7 @@ let testCases = { replSetTest: {skip: "does not return user data"}, replSetUpdatePosition: {skip: "does not return user data"}, replSetResizeOplog: {skip: "does not return user data"}, + resetPlacementHistory: {skip: "primary only"}, reshardCollection: {skip: "primary only"}, resync: {skip: "primary only"}, revokePrivilegesFromRole: {skip: "primary only"}, @@ -380,7 +389,7 @@ let testCases = { testRemoval: {skip: "does not return user data"}, testVersions1And2: {skip: "does not return user data"}, testVersion2: {skip: "does not return user data"}, - transitionToCatalogShard: {skip: "primary only"}, + transitionFromDedicatedConfigServer: {skip: "primary only"}, transitionToDedicatedConfigServer: {skip: "primary only"}, top: {skip: "does not return user data"}, update: {skip: "primary only"}, diff --git a/jstests/sharding/sample_timeseries.js b/jstests/sharding/sample_timeseries.js index a02856ac1f701..b38a078a5c1a0 100644 --- a/jstests/sharding/sample_timeseries.js +++ b/jstests/sharding/sample_timeseries.js @@ -3,15 +3,12 @@ * * @tags: [requires_fcv_51] */ +load("jstests/aggregation/extras/utils.js"); // For arrayEq, documentEq. +import {planHasStage} from "jstests/libs/analyze_plan.js"; // Test deliberately inserts orphans. TestData.skipCheckOrphans = true; -(function() { -load("jstests/aggregation/extras/utils.js"); // For arrayEq, documentEq. -load("jstests/core/timeseries/libs/timeseries.js"); // For TimeseriesTest. -load("jstests/libs/analyze_plan.js"); // For planHasStage. - const dbName = 'test'; const collName = 'weather'; const bucketCollName = `system.buckets.${collName}`; @@ -25,12 +22,6 @@ const primaryDB = primary.getDB(dbName); const otherShard = st.shard1; const otherShardDB = otherShard.getDB(dbName); -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(primary)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - let currentId = 0; function generateId() { return currentId++; @@ -422,4 +413,3 @@ testPipeline({ }); st.stop(); -})(); diff --git a/jstests/sharding/sbe_plan_cache_does_not_block_range_deletion.js b/jstests/sharding/sbe_plan_cache_does_not_block_range_deletion.js index b117f7b3e69e2..e7cd6ddaeec81 100644 --- a/jstests/sharding/sbe_plan_cache_does_not_block_range_deletion.js +++ b/jstests/sharding/sbe_plan_cache_does_not_block_range_deletion.js @@ -9,11 +9,8 @@ * expects_explicit_underscore_id_index, * ] */ -(function() { -"use strict"; - -load("jstests/libs/analyze_plan.js"); // For getPlanCacheKeyFromShape. -load("jstests/libs/sbe_util.js"); +import {getPlanCacheKeyFromShape} from "jstests/libs/analyze_plan.js"; +import {checkSBEEnabled} from "jstests/libs/sbe_util.js"; const dbName = "test"; const collName = "sbe_plan_cache_does_not_block_range_deletion"; @@ -93,4 +90,3 @@ if (isSBEEnabled) { } st.stop(); -})(); diff --git a/jstests/sharding/server_status_crud_metrics.js b/jstests/sharding/server_status_crud_metrics.js index af763cabfbc51..36d3e3f189495 100644 --- a/jstests/sharding/server_status_crud_metrics.js +++ b/jstests/sharding/server_status_crud_metrics.js @@ -88,7 +88,7 @@ if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(st.s)) { "retryWrites: true.")); } - // Shouldn't increment the metrics for unsharded collection. + // Should increment the metrics for unsharded collection. assert.commandWorked(unshardedColl.update({_id: "missing"}, {$set: {a: 1}}, {multi: false})); assert.commandWorked(unshardedColl.update({_id: 1}, {$set: {a: 2}}, {multi: false})); @@ -99,9 +99,10 @@ if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(st.s)) { mongosServerStatus = testDB.adminCommand({serverStatus: 1}); - // TODO: SERVER-69810 ServerStatus metrics for tracking number of - // updateOnes/deleteOnes/findAndModifies - // assert.eq(5, mongosServerStatus.metrics.query.updateOneOpStyleBroadcastWithExactIDCount); + // Verifying metrics for updateOnes commands. + assert.eq(1, mongosServerStatus.metrics.query.updateOneNonTargetedShardedCount); + assert.eq(2, mongosServerStatus.metrics.query.updateOneUnshardedCount); + } else { // Shouldn't increment the metric when routing fails. assert.commandFailedWithCode(testColl.update({}, {$set: {x: 2}}, {multi: false}), diff --git a/jstests/sharding/session_info_in_oplog.js b/jstests/sharding/session_info_in_oplog.js index 63832abb72d17..01a76f8063f2e 100644 --- a/jstests/sharding/session_info_in_oplog.js +++ b/jstests/sharding/session_info_in_oplog.js @@ -238,10 +238,7 @@ var runTests = function(mainConn, priConn, secConn) { // This test specifically looks for side-effects of writing retryable findAndModify images into the // oplog as noops. Ensure images are not stored in a side collection. -var replTest = new ReplSetTest({ - nodes: kNodes, - nodeOptions: {setParameter: {storeFindAndModifyImagesInSideCollection: false}} -}); +var replTest = new ReplSetTest({nodes: kNodes}); replTest.startSet(); replTest.initiate(); @@ -253,9 +250,7 @@ runTests(priConn, priConn, secConn); replTest.stopSet(); -var st = new ShardingTest({ - shards: {rs0: {nodes: kNodes, setParameter: {storeFindAndModifyImagesInSideCollection: false}}} -}); +var st = new ShardingTest({shards: {rs0: {nodes: kNodes}}}); secConn = st.rs0.getSecondary(); secConn.setSecondaryOk(); diff --git a/jstests/sharding/sessions_collection_auto_healing.js b/jstests/sharding/sessions_collection_auto_healing.js index f314d8ac58cb6..084b26acb65f2 100644 --- a/jstests/sharding/sessions_collection_auto_healing.js +++ b/jstests/sharding/sessions_collection_auto_healing.js @@ -1,16 +1,11 @@ /** * Requires no shards. * @tags: [ - * requires_fcv_70, - * catalog_shard_incompatible, + * config_shard_incompatible, * requires_fcv_70, * ] */ load('jstests/libs/sessions_collection.js'); -load("jstests/libs/feature_flag_util.js"); - -(function() { -"use strict"; load("jstests/libs/collection_drop_recreate.js"); // For assert[Drop|Create]Collection. @@ -64,7 +59,7 @@ var mongosConfig = mongos.getDB("config"); } // Test-wide: add a shard -var rs = new ReplSetTest({nodes: 1}); +const rs = new ReplSetTest({nodes: 1}); rs.startSet({shardsvr: ""}); rs.initiate(); @@ -173,4 +168,3 @@ var shardConfig = shard.getDB("config"); st.stop(); rs.stopSet(); -})(); diff --git a/jstests/sharding/set_cluster_parameter.js b/jstests/sharding/set_cluster_parameter.js index 0869796f2b6d1..1b0ca3ab5d407 100644 --- a/jstests/sharding/set_cluster_parameter.js +++ b/jstests/sharding/set_cluster_parameter.js @@ -9,12 +9,8 @@ * requires_persistence, * ] */ -(function() { -'use strict'; - load('jstests/libs/fail_point_util.js'); load('jstests/sharding/libs/remove_shard_util.js'); -load("jstests/libs/catalog_shard_util.js"); load('jstests/replsets/rslib.js'); const clusterParameter1Value = { @@ -210,7 +206,7 @@ const checkClusterParameters = st.stop(); } -if (!TestData.catalogShard) { +if (!TestData.configShard) { { const st2 = new ShardingTest({mongos: 1, shards: 0, name: 'second_cluster'}); @@ -339,49 +335,55 @@ if (!TestData.catalogShard) { st3.stop(); } } else { - // In catalog shard mode + // In config shard mode { - jsTestLog('Check that RS which transitions to a catalog shard keeps cluster params.'); + jsTestLog('Check that RS which transitions to a config shard keeps cluster params.'); - const catalogShardName = 'catalogShard'; - const catalogShard = new ReplSetTest({ - name: catalogShardName, + const configShardName = 'configShard'; + const configShard = new ReplSetTest({ + name: configShardName, nodes: 1, nodeOptions: {setParameter: {skipShardingConfigurationChecks: true}} }); - catalogShard.startSet(); - catalogShard.initiate(); + configShard.startSet(); + configShard.initiate(); - catalogShard.getPrimary().adminCommand({setClusterParameter: clusterParameter2}); + configShard.getPrimary().adminCommand({setClusterParameter: clusterParameter2}); - var cfg = catalogShard.getReplSetConfigFromNode(); + var cfg = configShard.getReplSetConfigFromNode(); cfg.configsvr = true; - reconfig(catalogShard, cfg); + reconfig(configShard, cfg); - catalogShard.restart( + configShard.restart( 0, {configsvr: '', setParameter: {skipShardingConfigurationChecks: false}}); - catalogShard.awaitNodesAgreeOnPrimary(); + configShard.awaitNodesAgreeOnPrimary(); // Cluster params should still exist. checkClusterParameters(clusterParameter2Name, clusterParameter2Value, - catalogShard.getPrimary(), - catalogShard.getPrimary()); + configShard.getPrimary(), + configShard.getPrimary()); - var mongos = MongoRunner.runMongos({configdb: catalogShard.getURL()}); - assert.commandWorked(mongos.adminCommand({transitionToCatalogShard: 1})); + if (TestData.mongosBinVersion) { + // Lower the config shard's FCV so an earlier binary mongos can connect. + const targetFCV = binVersionToFCV(TestData.mongosBinVersion); + assert.commandWorked( + configShard.getPrimary().adminCommand({setFeatureCompatibilityVersion: targetFCV})); + } + var mongos = MongoRunner.runMongos({configdb: configShard.getURL()}); + assert.commandWorked(mongos.adminCommand({transitionFromDedicatedConfigServer: 1})); checkClusterParameters(clusterParameter2Name, clusterParameter2Value, - catalogShard.getPrimary(), - catalogShard.getPrimary()); + configShard.getPrimary(), + configShard.getPrimary()); - // Catalog shard should not accept cluster parameters set directly on it. + // Config shard should not accept cluster parameters set directly on it. assert.commandFailedWithCode( - catalogShard.getPrimary().adminCommand({setClusterParameter: clusterParameter3}), + configShard.getPrimary().adminCommand({setClusterParameter: clusterParameter3}), ErrorCodes.NotImplemented); jsTestLog( - 'Check that parameters added in a catalog shard cluster overwrite custom RS parameters.'); + 'Check that parameters added in a config shard cluster overwrite custom RS parameters.'); const newShard5Name = 'newShard5'; const newShard5 = new ReplSetTest({name: newShard5Name, nodes: 1}); @@ -403,7 +405,7 @@ if (!TestData.catalogShard) { checkClusterParameters(clusterParameter2Name, clusterParameter2Value, - catalogShard.getPrimary(), + configShard.getPrimary(), newShard5.getPrimary()); assert.eq(0, @@ -413,7 +415,6 @@ if (!TestData.catalogShard) { MongoRunner.stopMongos(mongos); newShard5.stopSet(); - catalogShard.stopSet(); + configShard.stopSet(); } -} -})(); +} \ No newline at end of file diff --git a/jstests/sharding/set_fcv_logging.js b/jstests/sharding/set_fcv_logging.js index a3536a4650350..7f53beddc7a74 100644 --- a/jstests/sharding/set_fcv_logging.js +++ b/jstests/sharding/set_fcv_logging.js @@ -5,11 +5,9 @@ * FCV is upgrading or downgrading (6744301) * FCV upgrade or downgrade success (6744302). * - * TODO SERVER-75391: Enable with catalog shards when catalog shards can downgrade FCV. * @tags: [ * multiversion_incompatible, * does_not_support_stepdowns, - * catalog_shard_incompatible, * ] */ @@ -201,8 +199,10 @@ function runShardingTest() { checkFCV(shardPrimaryAdminDB, latestFCV); jsTest.log("Checking for correct FCV logging on a sharded cluster."); + // One of the shards is the config server in config shard mode. + const numShardServers = TestData.configShard ? 1 : 2; assertLogsWithFailpoints( - st.configRS.getPrimary(), mongosAdminDB, "shardedCluster", 2 /*numShardServers*/); + st.configRS.getPrimary(), mongosAdminDB, "shardedCluster", numShardServers); st.stop(); } diff --git a/jstests/sharding/set_fcv_to_downgrading_fast.js b/jstests/sharding/set_fcv_to_downgrading_fast.js index 008f9c6e41e09..066e9cbd40a4b 100644 --- a/jstests/sharding/set_fcv_to_downgrading_fast.js +++ b/jstests/sharding/set_fcv_to_downgrading_fast.js @@ -2,20 +2,15 @@ * Tests that FCV downgrade will reach the transitional kDowngrading state quickly (within a few * seconds). * - * Catalog shard incompatible because we do not currently allow downgrading FCV with a catalog - * shard. TODO SERVER-73279: Enable in catalog shard mode when it supports FCV downgrade. + * Config shard incompatible because we do not currently allow downgrading FCV with a catalog + * shard. * @tags: [ * requires_fcv_70, * multiversion_incompatible, * does_not_support_stepdowns, - * catalog_shard_incompatible, * ] */ -(function() { -"use strict"; - load("jstests/libs/fail_point_util.js"); -load("jstests/libs/feature_flag_util.js"); const latest = "latest"; // The FCV downgrade should be < 1 second but we added a buffer for slow machines. @@ -147,5 +142,4 @@ function runShardingTest() { runStandaloneTest(); runReplicaSetTest(); -runShardingTest(); -})(); +runShardingTest(); \ No newline at end of file diff --git a/jstests/sharding/shard3.js b/jstests/sharding/shard3.js index 1a3e6939a052b..a95e4c6988350 100644 --- a/jstests/sharding/shard3.js +++ b/jstests/sharding/shard3.js @@ -1,8 +1,5 @@ -(function() { -'use strict'; - // Include helpers for analyzing explain output. -load("jstests/libs/analyze_plan.js"); +import {getChunkSkipsFromAllShards} from "jstests/libs/analyze_plan.js"; const s = new ShardingTest({name: "shard3", shards: 2, mongos: 2, other: {enableBalancer: true}}); const s2 = s.s1; @@ -158,4 +155,3 @@ printjson(dba.foo.stats()); printjson(dbb.foo.stats()); s.stop(); -})(); diff --git a/jstests/sharding/shard7.js b/jstests/sharding/shard7.js index 9f56553aa7f5c..56057d4aeca03 100644 --- a/jstests/sharding/shard7.js +++ b/jstests/sharding/shard7.js @@ -1,10 +1,10 @@ // Check that shard selection does not assert for certain unsatisfiable queries. // SERVER-4554, SERVER-4914 -s = new ShardingTest({name: 'shard7', shards: 2}); +let s = new ShardingTest({name: 'shard7', shards: 2}); db = s.admin._mongo.getDB('test'); -c = db['foo']; +let c = db['foo']; c.drop(); s.adminCommand({enablesharding: '' + db}); @@ -28,15 +28,12 @@ assert.eq(0, c.find({$or: [{a: 1, b: {$gt: 0, $lt: 10}}, {a: 1, b: 5}]}).itcount // Check other operations that use getShardsForQuery. -unsatisfiable = { - a: 1, - b: {$gt: 4, $lt: 2} -}; +let unsatisfiable = {a: 1, b: {$gt: 4, $lt: 2}}; assert.eq(0, c.count(unsatisfiable)); assert.eq([], c.distinct('a', unsatisfiable)); -aggregate = c.aggregate({$match: unsatisfiable}); +let aggregate = c.aggregate({$match: unsatisfiable}); assert.eq(0, aggregate.toArray().length); c.save({a: null, b: null}); @@ -49,7 +46,7 @@ assert.eq(0, c.count({c: 1})); c.createIndex({loc: '2d'}); c.save({a: 2, b: 2, loc: [0, 0]}); -near = +let near = c.aggregate({$geoNear: {near: [0, 0], query: unsatisfiable, distanceField: "dist"}}).toArray(); assert.eq(0, near.length, tojson(near)); diff --git a/jstests/sharding/shard_collection_basic.js b/jstests/sharding/shard_collection_basic.js index 7fc136dbc8b17..9dbc1e8666186 100644 --- a/jstests/sharding/shard_collection_basic.js +++ b/jstests/sharding/shard_collection_basic.js @@ -74,6 +74,27 @@ if (MongoRunner.compareBinVersions(fcvDoc.featureCompatibilityVersion.version, ' ErrorCodes.InvalidNamespace); } +{ + jsTestLog("Special collections can't be sharded"); + + let specialColls = [ + 'config.foo', // all collections in config db except config.system.sessions + 'admin.foo', // all collections is admin db can't be sharded + `${kDbName}.system.foo`, // any custom system collection in any db + ]; + + specialColls.forEach(collName => { + assert.commandFailedWithCode( + mongos.adminCommand({shardCollection: collName, key: {_id: 1}}), + ErrorCodes.IllegalOperation); + }); + + // For collections in the local database the router will attempt to create the db and it will + // fail with InvalidOptions + assert.commandFailedWithCode(mongos.adminCommand({shardCollection: 'local.foo', key: {_id: 1}}), + ErrorCodes.InvalidOptions); +} + jsTestLog('shardCollection may only be run against admin database.'); assert.commandFailed( mongos.getDB('test').runCommand({shardCollection: kDbName + '.foo', key: {_id: 1}})); diff --git a/jstests/sharding/shard_collection_config_db.js b/jstests/sharding/shard_collection_config_db.js index 12a456ce89416..7e6ad5a59fc0f 100644 --- a/jstests/sharding/shard_collection_config_db.js +++ b/jstests/sharding/shard_collection_config_db.js @@ -1,5 +1,5 @@ // Requires no shards. -// @tags: [catalog_shard_incompatible] +// @tags: [config_shard_incompatible] (function() { 'use strict'; diff --git a/jstests/sharding/shard_drain_works_with_chunks_of_any_size.js b/jstests/sharding/shard_drain_works_with_chunks_of_any_size.js new file mode 100644 index 0000000000000..1f65d93c7ca09 --- /dev/null +++ b/jstests/sharding/shard_drain_works_with_chunks_of_any_size.js @@ -0,0 +1,68 @@ +/* + * Shard a collection with documents spread on 2 shards and then call `removeShard` checking that: + * - Huge non-jumbo chunks are split during draining (moveRange moves off pieces of `chunkSize` MB) + * - Jumbo chunks are moved off (without splitting, since it's not possible) + * + * Regression test for SERVER-76550. + */ + +(function() { +"use strict"; +load("jstests/sharding/libs/find_chunks_util.js"); +load("jstests/libs/fail_point_util.js"); +load('jstests/sharding/libs/remove_shard_util.js'); + +const st = new ShardingTest({other: {enableBalancer: false, chunkSize: 1}}); +const mongos = st.s0; +const configDB = st.getDB('config'); + +// Stop auto-merger because the test expects a specific number of chunks +sh.stopAutoMerger(configDB); +configureFailPointForRS( + st.configRS.nodes, "overrideBalanceRoundInterval", {intervalMs: 100}, "alwaysOn"); + +const dbName = 'test'; +const collName = 'collToDrain'; +const ns = dbName + '.' + collName; +const db = st.getDB(dbName); +const coll = db.getCollection(collName); + +// Shard collection with shard0 as db primary +assert.commandWorked( + mongos.adminCommand({enablesharding: dbName, primaryShard: st.shard0.shardName})); +assert.commandWorked(mongos.adminCommand({shardCollection: ns, key: {x: 1}})); + +// shard0 owns docs with shard key [MinKey, 0), shard1 owns docs with shard key [0, MaxKey) +assert.commandWorked(st.s.adminCommand( + {moveRange: ns, min: {x: 0}, max: {x: MaxKey}, toShard: st.shard1.shardName})); + +// Insert ~20MB of docs with different shard keys (10MB on shard0 and 10MB on shard1) +// and ~10MB of docs with the same shard key (jumbo chunk) +const big = 'X'.repeat(1024 * 1024); // 1MB +const jumboKey = 100; +var bulk = coll.initializeUnorderedBulkOp(); +for (var i = -10; i < 10; i++) { + bulk.insert({x: i, big: big}); + bulk.insert({x: jumboKey, big: big}); +} +assert.commandWorked(bulk.execute()); + +// Check that there are only 2 big chunks before starting draining +const chunksBeforeDrain = findChunksUtil.findChunksByNs(configDB, ns).toArray(); +assert.eq(2, chunksBeforeDrain.length); + +st.startBalancer(); + +// Remove shard 1 and wait for all chunks to be moved off from it +removeShard(st, st.shard1.shardName); + +// Check that after draining there are 12 chunks on shard0: +// - [MinKey, 0) original chunk on shard 1 +// - [0, 1), [1, 2), ... [8, 9) 1 MB chunks +// - [9, MaxKey) 10MB jumbo chunk +const chunksAfterDrain = + findChunksUtil.findChunksByNs(configDB, ns, {shard: st.shard0.shardName}).toArray(); +assert.eq(12, chunksAfterDrain.length); + +st.stop(); +})(); diff --git a/jstests/sharding/shard_identity_config_update.js b/jstests/sharding/shard_identity_config_update.js index b0b018b5b2909..ecbedbeae8702 100644 --- a/jstests/sharding/shard_identity_config_update.js +++ b/jstests/sharding/shard_identity_config_update.js @@ -1,10 +1,7 @@ /** * Tests that the config server connection string in the shard identity document of both the * primary and secondary will get updated whenever the config server membership changes. - * - * Shuts down the first shard but expects the config server to still be up. See if we can rework to - * get coverage in catalog shard mode. - * @tags: [requires_persistence, temporary_catalog_shard_incompatible] + * @tags: [requires_persistence] */ // Checking UUID consistency involves talking to a shard node, which in this test is shutdown @@ -17,8 +14,6 @@ load('jstests/replsets/rslib.js'); var st = new ShardingTest({shards: {rs0: {nodes: 2}}}); -var shardPri = st.rs0.getPrimary(); - // Note: Adding new replica set member by hand because of SERVER-24011. var newNode = @@ -65,10 +60,27 @@ assert.soon(function() { return checkConfigStrUpdated(st.rs0.getPrimary(), expectedConfigStr); }); -var secConn = st.rs0.getSecondary(); -secConn.setSecondaryOk(); +st.rs0.getSecondaries().forEach(secConn => { + secConn.setSecondaryOk(); + assert.soon(function() { + return checkConfigStrUpdated(secConn, expectedConfigStr); + }); +}); + +assert.soon(function() { + return checkConfigStrUpdated(st.configRS.getPrimary(), expectedConfigStr); +}); + +st.configRS.getSecondaries().forEach(secConn => { + secConn.setSecondaryOk(); + assert.soon(function() { + return checkConfigStrUpdated(secConn, expectedConfigStr); + }); +}); + +newNode.setSecondaryOk(); assert.soon(function() { - return checkConfigStrUpdated(secConn, expectedConfigStr); + return checkConfigStrUpdated(newNode, expectedConfigStr); }); // @@ -77,8 +89,12 @@ assert.soon(function() { // string when they come back up. // -st.rs0.stop(0); -st.rs0.stop(1); +// We can't reconfigure the config server if some nodes are down, so skip in config shard mode and +// just verify all nodes update the config string eventually. +if (!TestData.configShard) { + st.rs0.stop(0); + st.rs0.stop(1); +} MongoRunner.stopMongod(newNode); @@ -88,8 +104,10 @@ replConfig.members.pop(); reconfig(st.configRS, replConfig); -st.rs0.restart(0, {shardsvr: ''}); -st.rs0.restart(1, {shardsvr: ''}); +if (!TestData.configShard) { + st.rs0.restart(0, {shardsvr: ''}); + st.rs0.restart(1, {shardsvr: ''}); +} st.rs0.waitForPrimary(); st.rs0.awaitSecondaryNodes(); @@ -98,10 +116,23 @@ assert.soon(function() { return checkConfigStrUpdated(st.rs0.getPrimary(), origConfigConnStr); }); -secConn = st.rs0.getSecondary(); -secConn.setSecondaryOk(); +st.rs0.getSecondaries().forEach(secConn => { + secConn.setSecondaryOk(); + assert.soon(function() { + return checkConfigStrUpdated(secConn, origConfigConnStr); + }); +}); + +// Config servers in 7.0 also maintain the connection string in their shard identity document. assert.soon(function() { - return checkConfigStrUpdated(secConn, origConfigConnStr); + return checkConfigStrUpdated(st.configRS.getPrimary(), origConfigConnStr); +}); + +st.configRS.getSecondaries().forEach(secConn => { + secConn.setSecondaryOk(); + assert.soon(function() { + return checkConfigStrUpdated(secConn, origConfigConnStr); + }); }); st.stop(); diff --git a/jstests/sharding/shard_insert_getlasterror_w2.js b/jstests/sharding/shard_insert_getlasterror_w2.js index 12e2c24c8ec19..4840386752a5c 100644 --- a/jstests/sharding/shard_insert_getlasterror_w2.js +++ b/jstests/sharding/shard_insert_getlasterror_w2.js @@ -1,6 +1,5 @@ // replica set as solo shard // TODO: Add assertion code that catches hang -// @tags: [temporary_catalog_shard_incompatible] // The UUID and index check must be able to contact the shard primaries, but this test manually // stops 2/3 nodes of a replica set. @@ -45,7 +44,11 @@ var mongosConn = shardingTest.s; var testDB = mongosConn.getDB(testDBName); // Add replSet1 as only shard -assert.commandWorked(mongosConn.adminCommand({addshard: replSet1.getURL()})); +if (!TestData.configShard) { + assert.commandWorked(mongosConn.adminCommand({addshard: replSet1.getURL()})); +} else { + assert.commandWorked(mongosConn.adminCommand({transitionFromDedicatedConfigServer: 1})); +} // Enable sharding on test db and its collection foo assert.commandWorked(mongosConn.getDB('admin').runCommand({enablesharding: testDBName})); diff --git a/jstests/sharding/shard_keys_with_dollar_sign.js b/jstests/sharding/shard_keys_with_dollar_sign.js index 5d18d6ab7ab35..c7fbbc73be5ed 100644 --- a/jstests/sharding/shard_keys_with_dollar_sign.js +++ b/jstests/sharding/shard_keys_with_dollar_sign.js @@ -1,11 +1,20 @@ /** * Tests that the shardCollection command and reshardCollection command correctly reject a shard key - * that has a field name with parts that start with '$'. + * that has a field name that starts with '$' or contains parts that start with '$' unless the part + * is a DBRef (i.e. is equal to '$id', '$db' or '$ref'). */ (function() { "use strict"; -const st = new ShardingTest({shards: 1}); +const criticalSectionTimeoutMS = 24 * 60 * 60 * 1000; // 1 day +const st = new ShardingTest({ + shards: 1, + other: { + // Avoid spurious failures with small 'ReshardingCriticalSectionTimeout' values being set. + configOptions: + {setParameter: {reshardingCriticalSectionTimeoutMillis: criticalSectionTimeoutMS}} + } +}); const dbName = "testDb"; const ns0 = dbName + ".testColl0"; @@ -53,5 +62,14 @@ testValidation({"x$": 1}, {isValidIndexKey: true, isValidShardKey: true}); testValidation({"x$.y": 1}, {isValidIndexKey: true, isValidShardKey: true}); testValidation({"x.y$": 1}, {isValidIndexKey: true, isValidShardKey: true}); +// Verify that a shard key can have a field that contains a DBRef as long as the field itself +// does not start with '$'. +testValidation({"$id": 1}, {isValidIndexKey: false, isValidShardKey: false}); +testValidation({"$db": 1}, {isValidIndexKey: false, isValidShardKey: false}); +testValidation({"$ref": 1}, {isValidIndexKey: false, isValidShardKey: false}); +testValidation({"x.$id": 1}, {isValidIndexKey: true, isValidShardKey: true}); +testValidation({"x.$db": 1}, {isValidIndexKey: true, isValidShardKey: true}); +testValidation({"x.$ref": 1}, {isValidIndexKey: true, isValidShardKey: true}); + st.stop(); })(); diff --git a/jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js b/jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js index d2e40b75b1af1..7b57a35759856 100644 --- a/jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js +++ b/jstests/sharding/shard_removal_triggers_catalog_cache_invalidation.js @@ -1,8 +1,6 @@ /** * Tests that shard removal triggers an update of the catalog cache so that routers don't continue * to target shards that have been removed. - * - * @tags: [temporary_catalog_shard_incompatible] */ (function() { 'use strict'; @@ -69,8 +67,13 @@ const dbName = 'TestDB'; // Remove shard0. removeShard(st, st.shard0.shardName); - // Stop the replica set so that future requests to this shard will be unsuccessful. - st.rs0.stopSet(); + // Stop the replica set so that future requests to this shard will be unsuccessful. Skip this + // step for a config shard, since the config server must be up for the second router to + // refresh. The default read concern is local, so the router should eventually target a shard + // with chunks. + if (!TestData.configShard) { + st.rs0.stopSet(); + } // Ensure that s1, the router which did not run removeShard, eventually stops targeting chunks // for the sharded collection which previously resided on a shard that no longer exists. @@ -128,21 +131,15 @@ const dbName = 'TestDB'; // Remove shard0. We need assert.soon since chunks in the sessions collection may need to be // migrated off by the balancer. - assert.soon(() => { - const removeRes = st.s0.adminCommand({removeShard: st.shard0.shardName}); - if (!removeRes.ok && removeRes.code === ErrorCodes.ShardNotFound) { - // If the config server primary steps down after removing the config.shards doc for the - // shard being removed but before completing the _configsvrRemoveShard command, the - // mongos would retry the command on the new config server primary which would not find - // the removed shard in its ShardRegistry causing the command to fail with - // ShardNotFound. - return true; - } - return removeRes.state === 'completed'; - }); + removeShard(st, st.shard0.shardName); - // Stop the replica set so that future requests to this shard will be unsuccessful. - st.rs0.stopSet(); + // Stop the replica set so that future requests to this shard will be unsuccessful. Skip this + // step for a config shard, since the config server must be up for the second router to + // refresh. The default read concern is local, so the router should eventually target a shard + // with chunks. + if (!TestData.configShard) { + st.rs0.stopSet(); + } // Ensure that s1, the router which did not run removeShard, eventually stops targeting data for // the unsharded collection which previously had as primary a shard that no longer exists. diff --git a/jstests/sharding/sharded_profile.js b/jstests/sharding/sharded_profile.js index b22e94581ea99..d5dd1f70719bc 100644 --- a/jstests/sharding/sharded_profile.js +++ b/jstests/sharding/sharded_profile.js @@ -25,7 +25,7 @@ var inserts = [{_id: 0}, {_id: 1}, {_id: 2}]; assert.commandWorked(st.s1.getCollection(coll.toString()).insert(inserts)); -profileEntry = profileColl.findOne({"op": "insert", "ns": coll.getFullName()}); +let profileEntry = profileColl.findOne({"op": "insert", "ns": coll.getFullName()}); assert.neq(null, profileEntry); printjson(profileEntry); assert.eq(profileEntry.command.documents, inserts); diff --git a/jstests/sharding/sharding_index_catalog_API.js b/jstests/sharding/sharding_index_catalog_API.js index 40cb0de806b72..6c8bbd92fd32a 100644 --- a/jstests/sharding/sharding_index_catalog_API.js +++ b/jstests/sharding/sharding_index_catalog_API.js @@ -4,7 +4,6 @@ * @tags: [ * multiversion_incompatible, * featureFlagGlobalIndexesShardingCatalog, - * temporary_catalog_shard_incompatible, * ] */ @@ -34,6 +33,16 @@ function unregisterIndex(rs, nss, name, uuid) { }); } +// Assert a secondary contains the number of documents specified by count. +function assertSecondaryCount(rst, collection, query, count) { + let matchCount = false; + rst.getSecondaries().forEach(function(secondary) { + matchCount = + matchCount || secondary.getCollection(collection).countDocuments(query) == count; + }); + assert(matchCount); +} + const st = new ShardingTest({mongos: 1, shards: {rs0: {nodes: 3}, rs1: {nodes: 3}}}); const shard0 = st.shard0.shardName; @@ -96,28 +105,37 @@ assert.eq(1, st.rs0.getPrimary().getCollection(shardCollectionCatalog).countDocu uuid: collectionUUID, indexVersion: {$exists: true} })); -assert.eq(1, st.rs0.getSecondary().getCollection(shardIndexCatalog).countDocuments({ - collectionUUID: collectionUUID, - name: index1Name -})); -assert.eq(1, st.rs0.getSecondary().getCollection(shardCollectionCatalog).countDocuments({ - uuid: collectionUUID, - indexVersion: {$exists: true} -})); + +assertSecondaryCount( + st.rs0, shardIndexCatalog, {collectionUUID: collectionUUID, name: index1Name}, 1); +assertSecondaryCount( + st.rs0, shardCollectionCatalog, {uuid: collectionUUID, indexVersion: {$exists: true}}, 1); assert.eq(0, st.rs1.getPrimary().getCollection(shardIndexCatalog).countDocuments({ collectionUUID: collectionUUID, name: index1Name })); jsTestLog("Ensure we committed in the right collection."); -assert.eq(0, st.configRS.getPrimary().getCollection(shardIndexCatalog).countDocuments({ - collectionUUID: collectionUUID, - name: index1Name -})); -assert.eq(0, st.rs0.getPrimary().getCollection(configsvrIndexCatalog).countDocuments({ - collectionUUID: collectionUUID, - name: index1Name -})); +if (TestData.configShard) { + // The config server is shard0 in config shard mode, so it should have both collections. + assert.eq(1, st.configRS.getPrimary().getCollection(shardIndexCatalog).countDocuments({ + collectionUUID: collectionUUID, + name: index1Name + })); + assert.eq(1, st.rs0.getPrimary().getCollection(configsvrIndexCatalog).countDocuments({ + collectionUUID: collectionUUID, + name: index1Name + })); +} else { + assert.eq(0, st.configRS.getPrimary().getCollection(shardIndexCatalog).countDocuments({ + collectionUUID: collectionUUID, + name: index1Name + })); + assert.eq(0, st.rs0.getPrimary().getCollection(configsvrIndexCatalog).countDocuments({ + collectionUUID: collectionUUID, + name: index1Name + })); +} assert.eq(0, st.rs1.getPrimary().getCollection(configsvrIndexCatalog).countDocuments({ collectionUUID: collectionUUID, name: index1Name @@ -131,10 +149,9 @@ assert.eq(1, st.rs1.getPrimary().getCollection(shardIndexCatalog).countDocuments collectionUUID: collectionUUID, name: index1Name })); -assert.eq(1, st.rs1.getSecondary().getCollection(shardIndexCatalog).countDocuments({ - collectionUUID: collectionUUID, - name: index1Name -})); + +assertSecondaryCount( + st.rs1, shardIndexCatalog, {collectionUUID: collectionUUID, name: index1Name}, 1); jsTestLog("AND the index version."); const indexVersionRS0 = st.rs0.getPrimary() @@ -159,32 +176,38 @@ assert.eq(1, st.rs0.getPrimary().getCollection(shardIndexCatalog).countDocuments collectionUUID: collectionUUID, name: index2Name })); -assert.eq(1, st.rs0.getSecondary().getCollection(shardIndexCatalog).countDocuments({ - collectionUUID: collectionUUID, - name: index2Name -})); -assert.eq(1, st.rs0.getSecondary().getCollection(shardCollectionCatalog).countDocuments({ - uuid: collectionUUID, - indexVersion: {$exists: true} -})); +assertSecondaryCount( + st.rs0, shardIndexCatalog, {collectionUUID: collectionUUID, name: index2Name}, 1); +assertSecondaryCount( + st.rs0, shardCollectionCatalog, {uuid: collectionUUID, indexVersion: {$exists: true}}, 1); assert.eq(1, st.rs1.getPrimary().getCollection(shardIndexCatalog).countDocuments({ collectionUUID: collectionUUID, name: index2Name })); -assert.eq(1, st.rs1.getSecondary().getCollection(shardIndexCatalog).countDocuments({ - collectionUUID: collectionUUID, - name: index2Name -})); +assertSecondaryCount( + st.rs1, shardIndexCatalog, {collectionUUID: collectionUUID, name: index2Name}, 1); jsTestLog("Check we didn't commit in a wrong collection."); -assert.eq(0, st.configRS.getPrimary().getCollection(shardIndexCatalog).countDocuments({ - collectionUUID: collectionUUID, - name: index2Name -})); -assert.eq(0, st.rs0.getPrimary().getCollection(configsvrIndexCatalog).countDocuments({ - collectionUUID: collectionUUID, - name: index2Name -})); +if (TestData.configShard) { + // The config server is shard0 in config shard mode, so it should have both collections. + assert.eq(1, st.configRS.getPrimary().getCollection(shardIndexCatalog).countDocuments({ + collectionUUID: collectionUUID, + name: index2Name + })); + assert.eq(1, st.rs0.getPrimary().getCollection(configsvrIndexCatalog).countDocuments({ + collectionUUID: collectionUUID, + name: index2Name + })); +} else { + assert.eq(0, st.configRS.getPrimary().getCollection(shardIndexCatalog).countDocuments({ + collectionUUID: collectionUUID, + name: index2Name + })); + assert.eq(0, st.rs0.getPrimary().getCollection(configsvrIndexCatalog).countDocuments({ + collectionUUID: collectionUUID, + name: index2Name + })); +} assert.eq(0, st.rs1.getPrimary().getCollection(configsvrIndexCatalog).countDocuments({ collectionUUID: collectionUUID, name: index2Name @@ -201,18 +224,14 @@ assert.eq(0, st.rs0.getPrimary().getCollection(shardIndexCatalog).countDocuments collectionUUID: collectionUUID, name: index2Name })); -assert.eq(0, st.rs0.getSecondary().getCollection(shardIndexCatalog).countDocuments({ - collectionUUID: collectionUUID, - name: index2Name -})); +assertSecondaryCount( + st.rs0, shardIndexCatalog, {collectionUUID: collectionUUID, name: index2Name}, 0); assert.eq(0, st.rs1.getPrimary().getCollection(shardIndexCatalog).countDocuments({ collectionUUID: collectionUUID, name: index2Name })); -assert.eq(0, st.rs1.getSecondary().getCollection(shardIndexCatalog).countDocuments({ - collectionUUID: collectionUUID, - name: index2Name -})); +assertSecondaryCount( + st.rs1, shardIndexCatalog, {collectionUUID: collectionUUID, name: index2Name}, 0); jsTestLog( "Check global index consolidation. Case 1: 1 leftover index dropped. Initial state: there must be only one index in the shards."); diff --git a/jstests/sharding/sharding_index_catalog_upgrade_downgrade.js b/jstests/sharding/sharding_index_catalog_upgrade_downgrade.js index 5c1a06ddd4da5..c8923dec1a96f 100644 --- a/jstests/sharding/sharding_index_catalog_upgrade_downgrade.js +++ b/jstests/sharding/sharding_index_catalog_upgrade_downgrade.js @@ -2,8 +2,7 @@ * Tests that the global indexes collections are dropped on FCV downgrade and recreated after * upgrading. * - * @tags: [multiversion_incompatible, featureFlagGlobalIndexesShardingCatalog, - * requires_fcv_70, temporary_catalog_shard_incompatible] + * @tags: [multiversion_incompatible, featureFlagGlobalIndexesShardingCatalog, requires_fcv_70] */ (function() { diff --git a/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js b/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js index ce97d9164946a..50e5ff55e434b 100644 --- a/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js +++ b/jstests/sharding/sharding_non_transaction_snapshot_aggregate.js @@ -5,7 +5,6 @@ * @tags: [ * requires_majority_read_concern, * requires_persistence, - * temporary_catalog_shard_incompatible, * ] */ @@ -42,7 +41,7 @@ const st = new ShardingTest({ rs2: {nodes: 2}, }, mongos: 1, - config: 1, + config: TestData.configShard ? undefined : 1, other: {configOptions: nodeOptions, rsOptions: nodeOptions} }); // Config sharded collections. diff --git a/jstests/sharding/sharding_non_transaction_snapshot_read.js b/jstests/sharding/sharding_non_transaction_snapshot_read.js index 9c93e9d608248..554ea2c8eb550 100644 --- a/jstests/sharding/sharding_non_transaction_snapshot_read.js +++ b/jstests/sharding/sharding_non_transaction_snapshot_read.js @@ -4,7 +4,6 @@ * @tags: [ * requires_majority_read_concern, * requires_persistence, - * temporary_catalog_shard_incompatible, * ] */ @@ -36,7 +35,7 @@ let shardingScenarios = { setUp: function() { const st = new ShardingTest({ mongos: 1, - config: 1, + config: TestData.configShard ? undefined : 1, shards: {rs0: {nodes: 2}}, other: {configOptions: nodeOptions, rsOptions: nodeOptions} }); @@ -54,7 +53,7 @@ let shardingScenarios = { rs2: {nodes: 2}, }, mongos: 1, - config: 1, + config: TestData.configShard ? undefined : 1, other: {configOptions: nodeOptions, rsOptions: nodeOptions} }); setUpAllScenarios(st); @@ -98,7 +97,7 @@ let shardingScenarios = { rs2: {nodes: 2}, }, mongos: 1, - config: 1, + config: TestData.configShard ? undefined : 1, other: {configOptions: nodeOptions, rsOptions: nodeOptions} }); setUpAllScenarios(st); diff --git a/jstests/sharding/sharding_options.js b/jstests/sharding/sharding_options.js index addc5d2901de8..4001c26d23fe8 100644 --- a/jstests/sharding/sharding_options.js +++ b/jstests/sharding/sharding_options.js @@ -5,27 +5,6 @@ var baseName = "jstests_sharding_sharding_options"; load('jstests/libs/command_line/test_parsed_options.js'); -// Move Paranoia -jsTest.log("Testing \"moveParanoia\" command line option"); -var expectedResult = {"parsed": {"sharding": {"archiveMovedChunks": true}}}; -testGetCmdLineOptsMongod({moveParanoia: ""}, expectedResult); - -jsTest.log("Testing \"noMoveParanoia\" command line option"); -expectedResult = { - "parsed": {"sharding": {"archiveMovedChunks": false}} -}; -testGetCmdLineOptsMongod({noMoveParanoia: ""}, expectedResult); - -jsTest.log("Testing \"sharding.archiveMovedChunks\" config file option"); -expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/enable_paranoia.json", - "sharding": {"archiveMovedChunks": true} - } -}; -testGetCmdLineOptsMongod({config: "jstests/libs/config_files/enable_paranoia.json"}, - expectedResult); - // Sharding Role jsTest.log("Testing \"configsvr\" command line option"); var expectedResult = { @@ -61,27 +40,6 @@ expectedResult = { testGetCmdLineOptsMongod({config: "jstests/libs/config_files/set_shardingrole_configsvr.json"}, expectedResult); -// Test that we preserve switches explicitly set to false in config files. See SERVER-13439. -jsTest.log("Testing explicitly disabled \"moveParanoia\" config file option"); -expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/disable_moveparanoia.ini", - "sharding": {"archiveMovedChunks": false} - } -}; -testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_moveparanoia.ini"}, - expectedResult); - -jsTest.log("Testing explicitly disabled \"noMoveParanoia\" config file option"); -expectedResult = { - "parsed": { - "config": "jstests/libs/config_files/disable_nomoveparanoia.ini", - "sharding": {"archiveMovedChunks": true} - } -}; -testGetCmdLineOptsMongod({config: "jstests/libs/config_files/disable_nomoveparanoia.ini"}, - expectedResult); - jsTest.log("Ensure starting a standalone with --shardsvr fails"); testGetCmdLineOptsMongodFailed({shardsvr: ""}); diff --git a/jstests/sharding/sharding_rs2.js b/jstests/sharding/sharding_rs2.js index dbeca63a78e87..09c7b2bf9d97d 100644 --- a/jstests/sharding/sharding_rs2.js +++ b/jstests/sharding/sharding_rs2.js @@ -65,12 +65,13 @@ function countNodes() { assert.eq(2, countNodes(), "A1"); -var rs = s.rs0; -if (!TestData.catalogShard) { +const rs = s.rs0; +if (!TestData.configShard) { rs.add({'shardsvr': ""}); } else { rs.add({'configsvr': ""}); } + try { rs.reInitiate(); } catch (e) { diff --git a/jstests/sharding/sharding_statistics_server_status.js b/jstests/sharding/sharding_statistics_server_status.js index abe0019e45952..c2dc4b1515a01 100644 --- a/jstests/sharding/sharding_statistics_server_status.js +++ b/jstests/sharding/sharding_statistics_server_status.js @@ -18,7 +18,7 @@ function ShardStat() { this.countRecipientMoveChunkStarted = 0; this.countDocsClonedOnRecipient = 0; this.countDocsClonedOnDonor = 0; - this.countDocsDeletedOnDonor = 0; + this.countDocsDeletedByRangeDeleter = 0; } function incrementStatsAndCheckServerShardStats(donor, recipient, numDocs) { @@ -26,11 +26,14 @@ function incrementStatsAndCheckServerShardStats(donor, recipient, numDocs) { donor.countDocsClonedOnDonor += numDocs; ++recipient.countRecipientMoveChunkStarted; recipient.countDocsClonedOnRecipient += numDocs; - donor.countDocsDeletedOnDonor += numDocs; + donor.countDocsDeletedByRangeDeleter += numDocs; const statsFromServerStatus = shardArr.map(function(shardVal) { return shardVal.getDB('admin').runCommand({serverStatus: 1}).shardingStatistics; }); for (let i = 0; i < shardArr.length; ++i) { + let countDocsDeleted = statsFromServerStatus[i].hasOwnProperty('countDocsDeletedOnDonor') + ? statsFromServerStatus[i].countDocsDeletedOnDonor + : statsFromServerStatus[i].countDocsDeletedByRangeDeleter; assert(statsFromServerStatus[i]); assert(statsFromServerStatus[i].countStaleConfigErrors); assert(statsFromServerStatus[i].totalCriticalSectionCommitTimeMillis); @@ -43,8 +46,7 @@ function incrementStatsAndCheckServerShardStats(donor, recipient, numDocs) { assert.eq(stats[i].countDocsClonedOnRecipient, statsFromServerStatus[i].countDocsClonedOnRecipient); assert.eq(stats[i].countDocsClonedOnDonor, statsFromServerStatus[i].countDocsClonedOnDonor); - assert.eq(stats[i].countDocsDeletedOnDonor, - statsFromServerStatus[i].countDocsDeletedOnDonor); + assert.eq(stats[i].countDocsDeletedByRangeDeleter, countDocsDeleted); assert.eq(stats[i].countRecipientMoveChunkStarted, statsFromServerStatus[i].countRecipientMoveChunkStarted); } diff --git a/jstests/sharding/sharding_system_namespaces.js b/jstests/sharding/sharding_system_namespaces.js index d44f1150d6446..778a745442551 100644 --- a/jstests/sharding/sharding_system_namespaces.js +++ b/jstests/sharding/sharding_system_namespaces.js @@ -41,7 +41,7 @@ if (Array.contains(storageEngines, "wiredTiger")) { st.printShardingStatus(); var primaryShard = st.getPrimaryShard("test"); - anotherShard = st.getOther(primaryShard); + let anotherShard = st.getOther(primaryShard); assert.commandWorked( db.adminCommand({movechunk: coll + '', find: {x: 5}, to: anotherShard.name})); diff --git a/jstests/sharding/sharding_with_keyfile.key b/jstests/sharding/sharding_with_keyfile.key index fe3344b944470..edac9fdf43466 100644 --- a/jstests/sharding/sharding_with_keyfile.key +++ b/jstests/sharding/sharding_with_keyfile.key @@ -1,3 +1,3 @@ aBcDeFg 1010101 -JJJJJJJ \ No newline at end of file +JJJJJJJ diff --git a/jstests/sharding/shardingtest_control_12_nodes.js b/jstests/sharding/shardingtest_control_12_nodes.js index b598f26465921..53afb03fbc7e0 100644 --- a/jstests/sharding/shardingtest_control_12_nodes.js +++ b/jstests/sharding/shardingtest_control_12_nodes.js @@ -13,4 +13,4 @@ (function() { const st = new ShardingTest({shards: 4, rs: {nodes: 3}, mongos: 1}); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/shardingtest_control_1_node.js b/jstests/sharding/shardingtest_control_1_node.js index 516e01cb48784..7944097bf0100 100644 --- a/jstests/sharding/shardingtest_control_1_node.js +++ b/jstests/sharding/shardingtest_control_1_node.js @@ -8,4 +8,4 @@ (function() { const st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1}); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/single_shard_find_forwarding.js b/jstests/sharding/single_shard_find_forwarding.js index 2957637e54f47..82d59b51d8955 100644 --- a/jstests/sharding/single_shard_find_forwarding.js +++ b/jstests/sharding/single_shard_find_forwarding.js @@ -8,11 +8,8 @@ * meets expectation. It then runs the same test against a sharded collection with a single shard. */ -load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions. -load("jstests/libs/analyze_plan.js"); // For getPlanStages helper function. - -(function() { -"use strict"; +load("jstests/libs/profiler.js"); // For profilerHas*OrThrow helper functions. +import {getPlanStages} from "jstests/libs/analyze_plan.js"; function testArraySorted(arr, key) { for (let i = 0; i < arr.length - 1; i++) { @@ -101,4 +98,3 @@ assert.eq(singleShardColl2.find().skip(nDocs - 1).limit(nDocs).itcount(), 1); assert.eq(singleShardColl2.find().skip(nDocs + 1000).limit(nDocs).itcount(), 0); st.stop(); -})(); diff --git a/jstests/sharding/single_shard_transaction_with_arbiter.js b/jstests/sharding/single_shard_transaction_with_arbiter.js index 846767f4458fb..2a845ef2acaa4 100644 --- a/jstests/sharding/single_shard_transaction_with_arbiter.js +++ b/jstests/sharding/single_shard_transaction_with_arbiter.js @@ -4,7 +4,7 @@ * A config server can't have arbiter nodes. * @tags: [ * uses_transactions, - * catalog_shard_incompatible, + * config_shard_incompatible, * ] */ diff --git a/jstests/sharding/split_stale_mongos.js b/jstests/sharding/split_stale_mongos.js index ddfe820c05cae..58683cb1fed20 100644 --- a/jstests/sharding/split_stale_mongos.js +++ b/jstests/sharding/split_stale_mongos.js @@ -17,4 +17,4 @@ for (var i = 0; i < 100; i += 10) { st.configRS.awaitLastOpCommitted(); // Ensure that other mongos sees the previous split } -st.stop(); \ No newline at end of file +st.stop(); diff --git a/jstests/sharding/startup_with_all_configs_down.js b/jstests/sharding/startup_with_all_configs_down.js index 02607ecf8b661..c7959ac8eb2d1 100644 --- a/jstests/sharding/startup_with_all_configs_down.js +++ b/jstests/sharding/startup_with_all_configs_down.js @@ -4,7 +4,8 @@ // // This test involves restarting a standalone shard, so cannot be run on ephemeral storage engines. // A restarted standalone will lose all data when using an ephemeral storage engine. -// @tags: [requires_persistence] +// TODO SERVER-71169: Re-enable this test once shard filtering is implemented for CQF. +// @tags: [requires_persistence, cqf_incompatible] // The following checks use connections to shards cached on the ShardingTest object, but this test // restarts a shard, so the cached connection is not usable. diff --git a/jstests/sharding/stats.js b/jstests/sharding/stats.js index 84f6f63354670..b47bd4e84ed4e 100644 --- a/jstests/sharding/stats.js +++ b/jstests/sharding/stats.js @@ -30,7 +30,7 @@ s.adminCommand({shardcollection: "test.aaa", key: {_id: 1}}); s.adminCommand({shardcollection: "test.foo", key: {_id: 1}}); // this collection is actually used s.adminCommand({shardcollection: "test.zzz", key: {_id: 1}}); -N = 10000; +let N = 10000; s.adminCommand({split: "test.foo", middle: {_id: N / 2}}); s.adminCommand({ moveChunk: "test.foo", @@ -40,7 +40,7 @@ s.adminCommand({ }); var bulk = db.foo.initializeUnorderedBulkOp(); -for (i = 0; i < N; i++) +for (let i = 0; i < N; i++) bulk.insert({_id: i}); assert.commandWorked(bulk.execute()); @@ -48,10 +48,10 @@ assert.commandWorked(bulk.execute()); // totalIndexSize). assert.commandWorked(db.adminCommand({fsync: 1})); -a = s.shard0.getDB("test"); -b = s.shard1.getDB("test"); +let a = s.shard0.getDB("test"); +let b = s.shard1.getDB("test"); -x = assert.commandWorked(db.foo.stats()); +let x = assert.commandWorked(db.foo.stats()); assert.eq(N, x.count, "coll total count expected"); assert.eq(db.foo.count(), x.count, "coll total count match"); assert.eq(2, x.nchunks, "coll chunk num"); @@ -69,8 +69,8 @@ assert(!x.shards[s.shard1.shardName].indexDetails, 'indexDetails should not be present in s.shard1.shardName: ' + tojson(x.shards[s.shard1.shardName])); -a_extras = a.stats().objects - a.foo.count(); -b_extras = b.stats().objects - b.foo.count(); +let a_extras = a.stats().objects - a.foo.count(); +let b_extras = b.stats().objects - b.foo.count(); print("a_extras: " + a_extras); print("b_extras: " + b_extras); @@ -120,9 +120,9 @@ function collStatComp(stat_obj, stat_obj_scaled, scale, mongos) { } /* db.stats() tests */ -db_not_scaled = assert.commandWorked(db.stats()); -db_scaled_512 = assert.commandWorked(db.stats(512)); -db_scaled_1024 = assert.commandWorked(db.stats(1024)); +let db_not_scaled = assert.commandWorked(db.stats()); +let db_scaled_512 = assert.commandWorked(db.stats(512)); +let db_scaled_1024 = assert.commandWorked(db.stats(1024)); for (var shard in db_not_scaled.raw) { dbStatComp(db_not_scaled.raw[shard], db_scaled_512.raw[shard], 512); @@ -133,9 +133,9 @@ dbStatComp(db_not_scaled, db_scaled_512, 512); dbStatComp(db_not_scaled, db_scaled_1024, 1024); /* db.collection.stats() tests */ -coll_not_scaled = assert.commandWorked(db.foo.stats()); -coll_scaled_512 = assert.commandWorked(db.foo.stats(512)); -coll_scaled_1024 = assert.commandWorked(db.foo.stats(1024)); +let coll_not_scaled = assert.commandWorked(db.foo.stats()); +let coll_scaled_512 = assert.commandWorked(db.foo.stats(512)); +let coll_scaled_1024 = assert.commandWorked(db.foo.stats(1024)); for (var shard in coll_not_scaled.shards) { collStatComp(coll_not_scaled.shards[shard], coll_scaled_512.shards[shard], 512, false); diff --git a/jstests/sharding/store_historical_placement_data.js b/jstests/sharding/store_historical_placement_data.js index f8cf5dc14ae71..73c73f5f1ef58 100644 --- a/jstests/sharding/store_historical_placement_data.js +++ b/jstests/sharding/store_historical_placement_data.js @@ -1,8 +1,4 @@ -(function() { -"use strict"; -load("jstests/libs/feature_flag_util.js"); - const st = new ShardingTest({shards: 3, chunkSize: 1}); const configDB = st.s.getDB('config'); const shard0 = st.shard0.shardName; @@ -397,17 +393,15 @@ function testAddShard() { } // Execute the test case teardown - st.s.adminCommand({removeShard: newShardName}); - newReplicaSet.stopSet(); -} + for (const dbName of dbsOnNewReplicaSet) { + assert.commandWorked(st.getDB(dbName).dropDatabase()); + } -// TODO SERVER-69106 remove the logic to skip the test execution -const historicalPlacementDataFeatureFlag = FeatureFlagUtil.isEnabled( - st.configRS.getPrimary().getDB('admin'), "HistoricalPlacementShardingCatalog"); -if (!historicalPlacementDataFeatureFlag) { - jsTestLog("Skipping as featureFlagHistoricalPlacementShardingCatalog is disabled"); - st.stop(); - return; + let res = assert.commandWorked(st.s.adminCommand({removeShard: newShardName})); + assert.eq('started', res.state); + res = assert.commandWorked(st.s.adminCommand({removeShard: newShardName})); + assert.eq('completed', res.state); + newReplicaSet.stopSet(); } jsTest.log('Testing placement entries added by explicit DB creation'); @@ -447,4 +441,3 @@ jsTest.log( testAddShard(); st.stop(); -}()); diff --git a/jstests/sharding/tag_range.js b/jstests/sharding/tag_range.js index 1e497a11b8c53..6880d27f4b58c 100644 --- a/jstests/sharding/tag_range.js +++ b/jstests/sharding/tag_range.js @@ -29,7 +29,7 @@ countTags(2, 'tag ranges were not successfully added'); // remove the second range, should be left with one -assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 10}, {_id: 15}, 'b')); +assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 10}, {_id: 15})); countTags(1, 'tag range not removed successfully'); @@ -47,7 +47,7 @@ assert.commandWorked( assert.commandWorked(st.addTagRange('test.tag_range_dotted', {"_id.a": 5}, {"_id.a": 10}, 'c')); countTags(2, 'Dotted path tag range not successfully added.'); -assert.commandWorked(st.removeTagRange('test.tag_range_dotted', {"_id.a": 5}, {"_id.a": 10}, 'c')); +assert.commandWorked(st.removeTagRange('test.tag_range_dotted', {"_id.a": 5}, {"_id.a": 10})); assert.commandFailed(st.addTagRange('test.tag_range_dotted', {"_id.b": 5}, {"_id.b": 10}, 'c')); countTags(1, 'Incorrectly added tag range.'); @@ -58,35 +58,35 @@ countTags(1, 'Incorrectly added embedded field tag range'); // removeTagRange tests for tag ranges that do not exist // Bad namespace -assert.commandFailed(st.removeTagRange('badns', {_id: 5}, {_id: 11}, 'a')); +assert.commandFailed(st.removeTagRange('badns', {_id: 5}, {_id: 11})); countTags(1, 'Bad namespace: tag range does not exist'); // Bad tag -assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 11}, 'badtag')); +assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 11})); countTags(1, 'Bad tag: tag range does not exist'); // Bad min -assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 0}, {_id: 11}, 'a')); +assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 0}, {_id: 11})); countTags(1, 'Bad min: tag range does not exist'); // Bad max -assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 12}, 'a')); +assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 12})); countTags(1, 'Bad max: tag range does not exist'); // Invalid namesapce -assert.commandFailed(st.removeTagRange(35, {_id: 5}, {_id: 11}, 'a')); +assert.commandFailed(st.removeTagRange(35, {_id: 5}, {_id: 11})); countTags(1, 'Invalid namespace: tag range does not exist'); // Invalid tag -assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 11}, 35)); +assert.commandWorked(st.removeTagRange('test.tag_range', {_id: 5}, {_id: 11})); countTags(1, 'Invalid tag: tag range does not exist'); // Invalid min -assert.commandFailed(st.removeTagRange('test.tag_range', 35, {_id: 11}, 'a')); +assert.commandFailed(st.removeTagRange('test.tag_range', 35, {_id: 11})); countTags(1, 'Invalid min: tag range does not exist'); // Invalid max -assert.commandFailed(st.removeTagRange('test.tag_range', {_id: 5}, 35, 'a')); +assert.commandFailed(st.removeTagRange('test.tag_range', {_id: 5}, 35)); countTags(1, 'Invalid max: tag range does not exist'); st.stop(); diff --git a/jstests/sharding/tenant_migration_disallowed_on_config_server.js b/jstests/sharding/tenant_migration_disallowed_on_config_server.js index 32c64148768d7..8a13e8797ab1e 100644 --- a/jstests/sharding/tenant_migration_disallowed_on_config_server.js +++ b/jstests/sharding/tenant_migration_disallowed_on_config_server.js @@ -12,12 +12,7 @@ * serverless, * ] */ - import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; -import {donorStartMigrationWithProtocol} from "jstests/replsets/libs/tenant_migration_util.js"; - -(function() { -load("jstests/libs/catalog_shard_util.js"); const st = new ShardingTest({shards: 1}); const donorRstShard = st.rs0; @@ -31,50 +26,44 @@ const tenantMigrationTest = new TenantMigrationTest({name: jsTestName(), donorRst: donorRstShard, recipientRst}); // Run tenant migration commands on config servers. -let donorPrimary = donorRstConfig.getPrimary(); +const donorPrimary = donorRstConfig.getPrimary(); -let cmdObj = donorStartMigrationWithProtocol({ +assert.commandFailedWithCode(donorPrimary.adminCommand({ donorStartMigration: 1, tenantId: ObjectId().str, migrationId: UUID(), recipientConnectionString: tenantMigrationTest.getRecipientConnString(), readPreference: {mode: "primary"} -}, - donorPrimary.getDB("admin")); -assert.commandFailedWithCode(donorPrimary.adminCommand(cmdObj), ErrorCodes.IllegalOperation); +}), + ErrorCodes.IllegalOperation); -cmdObj = { - donorForgetMigration: 1, - migrationId: UUID() -}; -assert.commandFailedWithCode(donorPrimary.adminCommand(cmdObj), ErrorCodes.IllegalOperation); +assert.commandFailedWithCode( + donorPrimary.adminCommand({donorForgetMigration: 1, migrationId: UUID()}), + ErrorCodes.IllegalOperation); -cmdObj = { - donorAbortMigration: 1, - migrationId: UUID() -}; -assert.commandFailedWithCode(donorPrimary.adminCommand(cmdObj), ErrorCodes.IllegalOperation); +assert.commandFailedWithCode( + donorPrimary.adminCommand({donorAbortMigration: 1, migrationId: UUID()}), + ErrorCodes.IllegalOperation); -cmdObj = { +assert.commandFailedWithCode(donorPrimary.adminCommand({ recipientSyncData: 1, migrationId: UUID(), donorConnectionString: tenantMigrationTest.getRecipientRst().getURL(), tenantId: ObjectId().str, readPreference: {mode: "primary"}, startMigrationDonorTimestamp: Timestamp(1, 1) -}; -assert.commandFailedWithCode(donorPrimary.adminCommand(cmdObj), ErrorCodes.IllegalOperation); +}), + ErrorCodes.IllegalOperation); -cmdObj = { +assert.commandFailedWithCode(donorPrimary.adminCommand({ recipientForgetMigration: 1, migrationId: UUID(), donorConnectionString: tenantMigrationTest.getRecipientRst().getURL(), tenantId: ObjectId().str, readPreference: {mode: "primary"}, -}; -assert.commandFailedWithCode(donorPrimary.adminCommand(cmdObj), ErrorCodes.IllegalOperation); +}), + ErrorCodes.IllegalOperation); tenantMigrationTest.stop(); recipientRst.stopSet(); st.stop(); -})(); diff --git a/jstests/sharding/tenant_migration_shard_merge_disallowed_on_config_server.js b/jstests/sharding/tenant_migration_shard_merge_disallowed_on_config_server.js index c7bef956e0703..3c9a478164b00 100644 --- a/jstests/sharding/tenant_migration_shard_merge_disallowed_on_config_server.js +++ b/jstests/sharding/tenant_migration_shard_merge_disallowed_on_config_server.js @@ -15,9 +15,6 @@ import {TenantMigrationTest} from "jstests/replsets/libs/tenant_migration_test.js"; import {isShardMergeEnabled} from "jstests/replsets/libs/tenant_migration_util.js"; -(function() { -load("jstests/libs/catalog_shard_util.js"); - const st = new ShardingTest({shards: 1}); const donorRstShard = st.rs0; const donorRstConfig = st.configRS; @@ -88,4 +85,3 @@ assert.commandFailedWithCode(donorPrimary.adminCommand(cmdObj), ErrorCodes.Illeg tenantMigrationTest.stop(); recipientRst.stopSet(); st.stop(); -})(); diff --git a/jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js b/jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js index c727b22714518..c868131dd6a16 100644 --- a/jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js +++ b/jstests/sharding/test_resharding_test_fixture_shutdown_retry_needed.js @@ -4,10 +4,13 @@ * Verifies that the background thread running the reshardCollection command will retry when mongos * reports an error caused by a network error from the primary shard. * + * Incompatible with a config shard because it uses a sequence of fail points to test the + * resharding test fixture, which doesn't work when the first shard is the config server. This only + * tests the testing fixture, so it wouldn't add meaningful coverage for a config shard. * @tags: [ * requires_persistence, * uses_atclustertime, - * temporary_catalog_shard_incompatible, + * config_shard_incompatible, * ] */ (function() { diff --git a/jstests/sharding/test_stacked_migration_cleanup.js b/jstests/sharding/test_stacked_migration_cleanup.js index bb54106efd67d..a5fb4e71575c0 100644 --- a/jstests/sharding/test_stacked_migration_cleanup.js +++ b/jstests/sharding/test_stacked_migration_cleanup.js @@ -1,6 +1,7 @@ // Tests "stacking" multiple migration cleanup threads and their behavior when the collection // changes -// @tags: [assumes_balancer_off] +// TODO SERVER-71169: Re-enable this test once shard filtering is implemented for CQF. +// @tags: [assumes_balancer_off, cqf_incompatible] (function() { 'use strict'; diff --git a/jstests/sharding/time_zone_info_mongos.js b/jstests/sharding/time_zone_info_mongos.js index 343dffc400bfe..ba3ce673f7d5b 100644 --- a/jstests/sharding/time_zone_info_mongos.js +++ b/jstests/sharding/time_zone_info_mongos.js @@ -114,7 +114,7 @@ function testWithGoodTimeZoneDir(tzGoodInfoDir) { testForTimezone("America/New_York"); // Confirm that aggregating with a timezone which is not present in 'tzGoodInfoDir' fails. - timeZonePipeline = buildTimeZonePipeline("Europe/Dublin"); + let timeZonePipeline = buildTimeZonePipeline("Europe/Dublin"); assert.eq(assert.throws(() => mongosColl.aggregate(timeZonePipeline)).code, 40485); st.stop(); diff --git a/jstests/sharding/timeseries_balancer.js b/jstests/sharding/timeseries_balancer.js index d09ecf41c81e5..8a6a9eaf7b46a 100644 --- a/jstests/sharding/timeseries_balancer.js +++ b/jstests/sharding/timeseries_balancer.js @@ -8,10 +8,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; Random.setRandomSeed(); @@ -24,13 +21,6 @@ const metaField = 'hostid'; const st = new ShardingTest({shards: 2, rs: {nodes: 2}, other: {chunkSize: 1}}); const mongos = st.s0; -// Sanity checks. -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - // Databases and collections. assert.commandWorked(mongos.adminCommand({enableSharding: dbName})); const mainDB = mongos.getDB(dbName); @@ -96,5 +86,4 @@ function runTest(shardKey) { } runTest({time: 1}); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_cluster_collstats.js b/jstests/sharding/timeseries_cluster_collstats.js index b672c67a3fec6..34ce5bd8829a7 100644 --- a/jstests/sharding/timeseries_cluster_collstats.js +++ b/jstests/sharding/timeseries_cluster_collstats.js @@ -1,6 +1,7 @@ /** * Tests that the cluster collStats command returns timeseries statistics in the expected format. * + * For legacy collStats command: * { * ...., * "ns" : ..., @@ -20,21 +21,41 @@ * .... * } * + * For aggregate $collStats stage: + * [ + * { + * ...., + * "ns" : ..., + * "shard" : ..., + * "latencyStats" : { + * .... + * }, + * "storageStats" : { + * ..., + * "timeseries" : { + * ..., + * }, + * }, + * "count" : { + * .... + * }, + * "queryExecStats" : { + * .... + * }, + * }, + * { + * .... (Other shard's result) + * }, + * ... + * ] + * * @tags: [ - * requires_fcv_51 + * requires_fcv_71 * ] */ -(function() { -load("jstests/core/timeseries/libs/timeseries.js"); - -const st = new ShardingTest({shards: 2}); - -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} +const numShards = 2; +const st = new ShardingTest({shards: numShards}); const dbName = 'testDB'; const collName = 'testColl'; @@ -83,14 +104,14 @@ assert.commandWorked(st.s.adminCommand({ key: {[metaField]: 1}, })); -// Force splitting two chunks. +// Force splitting numShards chunks. const splitPoint = { - meta: numberDoc / 2 + meta: numberDoc / numShards }; assert.commandWorked(st.s.adminCommand({split: bucketNs, middle: splitPoint})); // Ensure that currently both chunks reside on the primary shard. let counts = st.chunkCounts(`system.buckets.${collName}`, dbName); -assert.eq(2, counts[primaryShard.shardName]); +assert.eq(numShards, counts[primaryShard.shardName]); // Move one of the chunks into the second shard. assert.commandWorked(st.s.adminCommand( {movechunk: bucketNs, find: splitPoint, to: otherShard.name, _waitForDelete: true})); @@ -105,20 +126,13 @@ for (let i = 0; i < numberDoc; i++) { } assert.eq(mongosColl.find().itcount(), numberDoc * 2); -clusterCollStatsResult = assert.commandWorked(mongosDB.runCommand({collStats: collName})); -jsTestLog("Sharded cluster collStats command result: " + tojson(clusterCollStatsResult)); +function checkAllFieldsAreInResult(result) { + assert(result.hasOwnProperty("latencyStats"), result); + assert(result.hasOwnProperty("storageStats"), result); + assert(result.hasOwnProperty("count"), result); + assert(result.hasOwnProperty("queryExecStats"), result); +} -// Check that the top-level 'timeseries' fields match the sum of two shard's, that the stats were -// correctly aggregated. -assert(clusterCollStatsResult.shards[primaryShard.shardName].timeseries, - "Expected a shard 'timeseries' field on shard " + primaryShard.shardName + - " but didn't find one: " + tojson(clusterCollStatsResult)); -assert(clusterCollStatsResult.shards[otherShard.shardName].timeseries, - "Expected a shard 'timeseries' field on shard " + otherShard.shardName + - " but didn't find one: " + tojson(clusterCollStatsResult)); -assert(clusterCollStatsResult.timeseries, - "Expected an aggregated 'timeseries' field but didn't find one: " + - tojson(clusterCollStatsResult)); function assertTimeseriesAggregationCorrectness(total, shards) { assert(shards.every(x => x.bucketNs === total.bucketNs)); assert.eq(total.bucketCount, @@ -166,10 +180,68 @@ function assertTimeseriesAggregationCorrectness(total, shards) { assert(total.numCommits > 0); assert(total.numMeasurementsCommitted > 0); } -assertTimeseriesAggregationCorrectness(clusterCollStatsResult.timeseries, [ - clusterCollStatsResult.shards[primaryShard.shardName].timeseries, - clusterCollStatsResult.shards[otherShard.shardName].timeseries -]); -st.stop(); -})(); +function verifyClusterCollStatsResult( + clusterCollStatsResult, sumTimeseriesStatsAcrossShards, isAggregation) { + if (isAggregation) { + // $collStats should output one document per shard. + assert.eq(clusterCollStatsResult.length, + numShards, + "Expected " + numShards + + "documents to be returned: " + tojson(clusterCollStatsResult)); + + checkAllFieldsAreInResult(clusterCollStatsResult[0]); + checkAllFieldsAreInResult(clusterCollStatsResult[1]); + } + + assert(sumTimeseriesStatsAcrossShards, + "Expected an aggregated 'timeseries' field but didn't find one: " + + tojson(clusterCollStatsResult)); + + const primaryShardStats = isAggregation + ? clusterCollStatsResult[0].storageStats.timeseries + : clusterCollStatsResult.shards[primaryShard.shardName].timeseries; + + const otherShardStats = isAggregation + ? clusterCollStatsResult[1].storageStats.timeseries + : clusterCollStatsResult.shards[otherShard.shardName].timeseries; + + // Check that the top-level 'timeseries' fields match the sum of two shard's, that the stats + // were correctly aggregated. + assert(primaryShardStats, + "Expected a shard 'timeseries' field on shard " + primaryShard.shardName + + " but didn't find one: " + tojson(clusterCollStatsResult)); + assert(otherShardStats, + "Expected a shard 'timeseries' field on shard " + otherShard.shardName + + " but didn't find one: " + tojson(clusterCollStatsResult)); + + assertTimeseriesAggregationCorrectness(sumTimeseriesStatsAcrossShards, + [primaryShardStats, otherShardStats]); +} + +// Tests that the output of the collStats command returns results from both the shards and +// includes all the expected fields. +clusterCollStatsResult = assert.commandWorked(mongosDB.runCommand({collStats: collName})); +jsTestLog("Sharded cluster collStats command result: " + tojson(clusterCollStatsResult)); +const sumTimeseriesStatsAcrossShards = clusterCollStatsResult.timeseries; +verifyClusterCollStatsResult( + clusterCollStatsResult, sumTimeseriesStatsAcrossShards, false // isAggregation +); + +// Tests that the output of the $collStats stage returns results from both the shards and includes +// all the expected fields. +clusterCollStatsResult = + mongosColl + .aggregate( + [{$collStats: {latencyStats: {}, storageStats: {}, count: {}, queryExecStats: {}}}]) + .toArray(); +jsTestLog("Sharded cluster collStats aggregation result: " + tojson(clusterCollStatsResult)); + +// Use the same sumTimeseriesStatsAcrossShards value as the collStats command since +// aggregation does not sum up timeseries stats results. This will also verify that the results +// output by collStats in find and aggregation are the same. +verifyClusterCollStatsResult( + clusterCollStatsResult, sumTimeseriesStatsAcrossShards, true // isAggregation +); + +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_cluster_indexstats.js b/jstests/sharding/timeseries_cluster_indexstats.js index 339c618b59957..764c91018d20a 100644 --- a/jstests/sharding/timeseries_cluster_indexstats.js +++ b/jstests/sharding/timeseries_cluster_indexstats.js @@ -6,17 +6,10 @@ * ] */ -(function() { -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; const st = new ShardingTest({shards: 2}); -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - const dbName = 'testDB'; const collName = 'testColl'; const mongosDB = st.s.getDB(dbName); @@ -124,5 +117,4 @@ if (TimeseriesTest.timeseriesScalabilityImprovementsEnabled(st.shard0)) { mongosBucketColl, [{"meta": 1}, {"control.time.min": 1}, {"control.time.max": 1}], true); } -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_coll_mod.js b/jstests/sharding/timeseries_coll_mod.js index ffe93303f3eb9..386871e1947af 100644 --- a/jstests/sharding/timeseries_coll_mod.js +++ b/jstests/sharding/timeseries_coll_mod.js @@ -6,10 +6,6 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); load("jstests/libs/fail_point_util.js"); load("jstests/libs/parallel_shell_helpers.js"); @@ -27,13 +23,6 @@ function runBasicTest() { const mongos = st.s0; const db = mongos.getDB(dbName); - if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog( - "Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; - } - assert.commandWorked( db.createCollection(collName, {timeseries: {timeField: timeField, metaField: metaField}})); @@ -164,5 +153,4 @@ function runReadAfterWriteTest() { runBasicTest(); -runReadAfterWriteTest(); -})(); +runReadAfterWriteTest(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_coll_mod_bucketing_parameters.js b/jstests/sharding/timeseries_coll_mod_bucketing_parameters.js index 93806f782789e..d890f4176c178 100644 --- a/jstests/sharding/timeseries_coll_mod_bucketing_parameters.js +++ b/jstests/sharding/timeseries_coll_mod_bucketing_parameters.js @@ -8,10 +8,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; load("jstests/libs/fail_point_util.js"); load("jstests/libs/parallel_shell_helpers.js"); @@ -47,13 +44,6 @@ const checkConfigParametersAfterCollMod = function() { const mongos = st.s0; const db = mongos.getDB(dbName); - if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog( - "Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; - } - if (!TimeseriesTest.timeseriesScalabilityImprovementsEnabled(st.shard0)) { jsTestLog("Skipping test because the timeseries scalability feature flag is disabled"); st.stop(); @@ -247,4 +237,3 @@ const checkShardRoutingAfterCollMod = function() { checkConfigParametersAfterCollMod(); checkShardRoutingAfterCollMod(); -})(); diff --git a/jstests/sharding/timeseries_delete_multi.js b/jstests/sharding/timeseries_delete_multi.js index 68db47f99c18d..e761a9dc13220 100644 --- a/jstests/sharding/timeseries_delete_multi.js +++ b/jstests/sharding/timeseries_delete_multi.js @@ -4,16 +4,13 @@ * * @tags: [ * # To avoid multiversion tests - * requires_fcv_70, + * requires_fcv_71, * # To avoid burn-in tests in in-memory build variants * requires_persistence, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; Random.setRandomSeed(); @@ -34,6 +31,10 @@ function generateTimeValue(index) { return ISODate(`${2000 + index}-01-01`); } +// The split point between two shards. This value guarantees that generated time values do not fall +// on this boundary. +const splitTimePointBetweenTwoShards = ISODate("2001-06-30"); + function generateDocsForTestCase(collConfig) { const documents = TimeseriesTest.generateHosts(collConfig.nDocs); for (let i = 0; i < collConfig.nDocs; i++) { @@ -72,7 +73,7 @@ const collectionConfigurations = { timeShardKey: { nDocs: 4, shardKey: {[timeField]: 1}, - splitPoint: {[`control.min.${timeField}`]: generateTimeValue(2)}, + splitPoint: {[`control.min.${timeField}`]: splitTimePointBetweenTwoShards}, }, // Shard key on both meta and time field. @@ -80,19 +81,19 @@ const collectionConfigurations = { nDocs: 4, metaGenerator: (id => id), shardKey: {[metaField]: 1, [timeField]: 1}, - splitPoint: {meta: 2, [`control.min.${timeField}`]: generateTimeValue(2)}, + splitPoint: {meta: 2, [`control.min.${timeField}`]: splitTimePointBetweenTwoShards}, }, metaObjectTimeShardKey: { nDocs: 4, metaGenerator: (index => ({a: index})), shardKey: {[metaField]: 1, [timeField]: 1}, - splitPoint: {meta: {a: 2}, [`control.min.${timeField}`]: generateTimeValue(2)}, + splitPoint: {meta: {a: 2}, [`control.min.${timeField}`]: splitTimePointBetweenTwoShards}, }, metaSubFieldTimeShardKey: { nDocs: 4, metaGenerator: (index => ({a: index})), shardKey: {[metaField + '.a']: 1, [timeField]: 1}, - splitPoint: {'meta.a': 1, [`control.min.${timeField}`]: generateTimeValue(2)}, + splitPoint: {'meta.a': 2, [`control.min.${timeField}`]: splitTimePointBetweenTwoShards}, }, }; @@ -111,17 +112,15 @@ const requestConfigurations = { reachesPrimary: true, reachesOther: true, }, - // Time field filter leads to broadcasted request. - // TODO SERVER-75160: Update this test case to a targeted request to shard0. + // This time field filter has the request targeted to the shard0. timeFilterOneShard: { deletePredicates: [{[timeField]: generateTimeValue(0), f: 0}, {[timeField]: generateTimeValue(1), f: 1}], remainingDocumentsIds: [2, 3], reachesPrimary: true, - reachesOther: true, + reachesOther: false, }, - // Time field filter leads to broadcasted request. - // TODO SERVER-75160: Update the above comment to a targeted request. + // This time field filter has the request targeted to both shards. timeFilterTwoShards: { deletePredicates: [{[timeField]: generateTimeValue(1), f: 1}, {[timeField]: generateTimeValue(3), f: 3}], @@ -130,8 +129,15 @@ const requestConfigurations = { reachesOther: true, }, metaFilterOneShard: { - deletePredicates: [{[metaField]: 2, f: 2}, {[metaField]: 3, f: 3}], - remainingDocumentsIds: [0, 1], + deletePredicates: [{[metaField]: 2, f: 2}], + remainingDocumentsIds: [0, 1, 3], + reachesPrimary: false, + reachesOther: true, + }, + // Meta + time filter has the request targeted to shard1. + metaTimeFilterOneShard: { + deletePredicates: [{[metaField]: 2, [timeField]: generateTimeValue(2), f: 2}], + remainingDocumentsIds: [0, 1, 3], reachesPrimary: false, reachesOther: true, }, @@ -147,6 +153,13 @@ const requestConfigurations = { reachesPrimary: false, reachesOther: true, }, + // Meta object + time filter has the request targeted to shard1. + metaObjectTimeFilterOneShard: { + deletePredicates: [{[metaField]: {a: 2}, [timeField]: generateTimeValue(2), f: 2}], + remainingDocumentsIds: [0, 1, 3], + reachesPrimary: false, + reachesOther: true, + }, metaObjectFilterTwoShards: { deletePredicates: [{[metaField]: {a: 1}, f: 1}, {[metaField]: {a: 2}, f: 2}], remainingDocumentsIds: [0, 3], @@ -159,6 +172,13 @@ const requestConfigurations = { reachesPrimary: false, reachesOther: true, }, + // Meta sub field + time filter has the request targeted to shard1. + metaSubFieldTimeFilterOneShard: { + deletePredicates: [{[metaField + '.a']: 2, [timeField]: generateTimeValue(2), f: 2}], + remainingDocumentsIds: [0, 1, 3], + reachesPrimary: false, + reachesOther: true, + }, metaSubFieldFilterTwoShards: { deletePredicates: [{[metaField + '.a']: 1, f: 1}, {[metaField + '.a']: 2, f: 2}], remainingDocumentsIds: [0, 3], @@ -358,24 +378,19 @@ runOneTestCase("timeShardKey", "timeFilterTwoShards"); runOneTestCase("metaTimeShardKey", "emptyFilter"); runOneTestCase("metaTimeShardKey", "nonShardKeyFilter"); -// TODO SERVER-75160: The shard key extractor can't extract the correct shard key when the shard -// key is meta + time fields. After the fix, uncomment the following line. -// runOneTestCase("metaTimeShardKey", "metaFilterOneShard"); +runOneTestCase("metaTimeShardKey", "metaTimeFilterOneShard"); runOneTestCase("metaTimeShardKey", "metaFilterTwoShards"); runOneTestCase("metaObjectTimeShardKey", "emptyFilter"); runOneTestCase("metaObjectTimeShardKey", "nonShardKeyFilter"); -// TODO SERVER-75160: The shard key extractor can't extract the correct shard key when the shard -// key is meta + time fields. After the fix, uncomment the following line. -// runOneTestCase("metaObjectTimeShardKey", "metaObjectFilterOneShard"); +runOneTestCase("metaObjectTimeShardKey", "metaObjectTimeFilterOneShard"); runOneTestCase("metaObjectTimeShardKey", "metaObjectFilterTwoShards"); runOneTestCase("metaObjectTimeShardKey", "metaSubFieldFilterTwoShards"); runOneTestCase("metaSubFieldTimeShardKey", "emptyFilter"); runOneTestCase("metaSubFieldTimeShardKey", "nonShardKeyFilter"); +runOneTestCase("metaSubFieldTimeShardKey", "metaSubFieldTimeFilterOneShard"); runOneTestCase("metaSubFieldTimeShardKey", "metaObjectFilterTwoShards"); -runOneTestCase("metaSubFieldTimeShardKey", "metaSubFieldFilterOneShard"); runOneTestCase("metaSubFieldTimeShardKey", "metaSubFieldFilterTwoShards"); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_delete_one_with_shard_key.js b/jstests/sharding/timeseries_delete_one_with_shard_key.js index 021e85a0ef34b..774e2fd833de5 100644 --- a/jstests/sharding/timeseries_delete_one_with_shard_key.js +++ b/jstests/sharding/timeseries_delete_one_with_shard_key.js @@ -9,11 +9,6 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. - const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); // @@ -144,5 +139,4 @@ assert.eq(originalCount - numOfDeletedMeasurements, remainingDocuments.length, "Remaining Documents: " + tojson(remainingDocuments)); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_delete_one_without_shard_key.js b/jstests/sharding/timeseries_delete_one_without_shard_key.js index 5f968d094b085..52b0ed6a151aa 100644 --- a/jstests/sharding/timeseries_delete_one_without_shard_key.js +++ b/jstests/sharding/timeseries_delete_one_without_shard_key.js @@ -3,18 +3,12 @@ * * @tags: [ * # To avoid multiversion tests - * requires_fcv_70, + * requires_fcv_71, * # To avoid burn-in tests in in-memory build variants * requires_persistence, - * featureFlagUpdateOneWithoutShardKey, * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. - const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); // @@ -40,8 +34,10 @@ const shard0RoutingValues = { }; const shard1RoutingValues = { shardNumber: 1, - timestamp: ISODate("2010-05-18T08:00:00.000Z") + timestamp1: ISODate("2010-05-18T08:00:00.000Z"), + timestamp2: ISODate("2010-05-19T08:00:00.000Z") }; + const data = [ // Cork. { @@ -67,23 +63,23 @@ const data = [ }, { location: {city: "Dublin", shardNumber: shard1RoutingValues.shardNumber}, - time: shard1RoutingValues.timestamp, + time: shard1RoutingValues.timestamp1, temperature: 12.5, }, { location: {city: "Dublin", shardNumber: shard1RoutingValues.shardNumber}, - time: shard1RoutingValues.timestamp, + time: shard1RoutingValues.timestamp1, temperature: 13, }, // Galway. { location: {city: "Galway", shardNumber: shard1RoutingValues.shardNumber}, - time: shard1RoutingValues.timestamp, + time: shard1RoutingValues.timestamp1, temperature: 20, }, { location: {city: "Galway", shardNumber: shard1RoutingValues.shardNumber}, - time: shard1RoutingValues.timestamp, + time: shard1RoutingValues.timestamp1, temperature: 20, }, // New York City. @@ -95,13 +91,13 @@ const data = [ }, { location: {city: "New York City", shardNumber: shard1RoutingValues.shardNumber}, - time: shard1RoutingValues.timestamp, + time: shard1RoutingValues.timestamp2, temperature: 39, }, { _id: 100, location: {city: "New York City", shardNumber: shard1RoutingValues.shardNumber}, - time: shard1RoutingValues.timestamp, + time: shard1RoutingValues.timestamp2, temperature: 20, }, ]; @@ -196,7 +192,7 @@ const runTests = function(collName) { // We expect 'deleteOne' on the time field to succeed. runDeleteOneWithQuery(collName, {"time": shard0RoutingValues.timestamp}, 1); - runDeleteOneWithQuery(collName, {"time": shard1RoutingValues.timestamp}, 1); + runDeleteOneWithQuery(collName, {"time": shard1RoutingValues.timestamp1}, 1); // We expect 'deleteOne' on the _id field to succeed. runDeleteOneWithQuery(collName, {"_id": 100}, 1); @@ -220,5 +216,4 @@ runTests(collNameWithMeta); // Run tests on a collection with no metaField specified. runTests(collNameWithoutMeta); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_delete_with_meta.js b/jstests/sharding/timeseries_delete_with_meta.js index 05e1344d83515..1a8d84304adcf 100644 --- a/jstests/sharding/timeseries_delete_with_meta.js +++ b/jstests/sharding/timeseries_delete_with_meta.js @@ -7,10 +7,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; Random.setRandomSeed(); @@ -23,19 +20,6 @@ const metaField = 'hostid'; const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); const mongos = st.s0; -// Sanity checks. -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - -const deletesEnabled = TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(st.shard0); -if (!deletesEnabled) { - jsTestLog( - "Sharded time-series updates and deletes feature flag is disabled, expecting all delete commands to fail."); -} - // Databases. assert.commandWorked(mongos.adminCommand({enableSharding: dbName})); const mainDB = mongos.getDB(dbName); @@ -228,14 +212,6 @@ function runTest(collConfig, reqConfig, insert) { const isBulkOperation = !reqConfig.deleteQuery; if (!isBulkOperation) { - // If sharded updates and deletes feature flag is disabled, we only test that the delete - // command fails. - if (!deletesEnabled) { - assert.throwsWithCode(() => coll.deleteMany(reqConfig.deleteQuery), - ErrorCodes.NotImplemented); - return; - } - // The 'isTimeseriesNamespace' parameter is not allowed on mongos. const failingDeleteCommand = { delete: `system.buckets.${collName}`, @@ -282,12 +258,7 @@ function runTest(collConfig, reqConfig, insert) { for (let predicate of predicates) { bulk.find(predicate).remove(); } - if (deletesEnabled) { - assert.commandWorked(bulk.execute()); - } else { - assert.throws(() => bulk.execute()); - return; - } + assert.commandWorked(bulk.execute()); } // Check that the query was routed to the correct shards. @@ -366,5 +337,4 @@ TimeseriesTest.run((insert) => { } }, mainDB); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_drop.js b/jstests/sharding/timeseries_drop.js index 1d2d78ab8a867..f018157f8b5e3 100644 --- a/jstests/sharding/timeseries_drop.js +++ b/jstests/sharding/timeseries_drop.js @@ -6,10 +6,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; Random.setRandomSeed(); @@ -22,13 +19,6 @@ const metaField = 'hostid'; const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); const mongos = st.s0; -// Sanity checks. -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - // Databases. const mainDB = mongos.getDB(dbName); const configDB = mongos.getDB('config'); @@ -144,5 +134,4 @@ try { } } finally { st.stop(); -} -})(); +} \ No newline at end of file diff --git a/jstests/sharding/timeseries_find_and_modify_remove.js b/jstests/sharding/timeseries_find_and_modify_remove.js new file mode 100644 index 0000000000000..7f8bad7da856d --- /dev/null +++ b/jstests/sharding/timeseries_find_and_modify_remove.js @@ -0,0 +1,364 @@ +/** + * Tests findAndModify remove on a sharded timeseries collection. + * + * @tags: [ + * # We need a timeseries collection. + * requires_timeseries, + * # To avoid burn-in tests in in-memory build variants + * requires_persistence, + * # findAndModify remove on a sharded timeseries collection is supported since 7.1 + * requires_fcv_71, + * # TODO SERVER-76583: Remove following two tags. + * does_not_support_retryable_writes, + * requires_non_retryable_writes, + * ] + */ + +import { + doc1_a_nofields, + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc5_b_f104, + doc6_c_f105, + doc7_c_f106, + makeBucketFilter, + metaFieldName, + setUpShardedCluster, + tearDownShardedCluster, + testFindOneAndRemoveOnShardedCollection, + timeFieldName, +} from "jstests/core/timeseries/libs/timeseries_writes_util.js"; + +const docs = [ + doc1_a_nofields, + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc5_b_f104, + doc6_c_f105, + doc7_c_f106, +]; + +setUpShardedCluster(); + +(function testSortOptionFailsOnShardedCollection() { + testFindOneAndRemoveOnShardedCollection({ + initialDocList: docs, + cmd: {filter: {f: {$gt: 100}}, sort: {f: 1}}, + res: {errorCode: ErrorCodes.InvalidOptions}, + }); +})(); + +(function testProjectOptionHonoredOnShardedCollection() { + testFindOneAndRemoveOnShardedCollection({ + initialDocList: docs, + cmd: {filter: {f: 106}, fields: {_id: 1, [timeFieldName]: 1, f: 1}}, + res: { + nDeleted: 1, + deletedDoc: { + _id: doc7_c_f106._id, + [timeFieldName]: doc7_c_f106[timeFieldName], + f: doc7_c_f106.f + }, + writeType: "twoPhaseProtocol", + dataBearingShard: "other", + rootStage: "PROJECTION_DEFAULT", + bucketFilter: makeBucketFilter({ + $and: [ + {"control.min.f": {$_internalExprLte: 106}}, + {"control.max.f": {$_internalExprGte: 106}}, + ] + }), + residualFilter: {f: {$eq: 106}}, + }, + }); +})(); + +// Verifies that the collation is properly propagated to the bucket-level filter when the +// query-level collation overrides the collection default collation. This is a two phase delete due +// to the user-specified collation. +(function testTwoPhaseDeleteCanHonorCollationOnShardedCollection() { + testFindOneAndRemoveOnShardedCollection({ + initialDocList: docs, + cmd: { + filter: {[metaFieldName]: "a", f: {$gt: 101}}, + // caseInsensitive collation + collation: {locale: "en", strength: 2} + }, + res: { + nDeleted: 1, + deletedDoc: doc3_a_f102, + writeType: "twoPhaseProtocol", + dataBearingShard: "primary", + rootStage: "TS_MODIFY", + bucketFilter: + makeBucketFilter({"meta": {$eq: "a"}}, {"control.max.f": {$_internalExprGt: 101}}), + residualFilter: {f: {$gt: 101}}, + }, + }); +})(); + +// Query on the meta field and 'f' field leads to a targeted delete but no measurement is deleted. +(function testTargetedDeleteByNonMatchingFilter() { + testFindOneAndRemoveOnShardedCollection({ + initialDocList: docs, + cmd: {filter: {[metaFieldName]: "C", f: 17}}, + res: { + nDeleted: 0, + writeType: "targeted", + dataBearingShard: "other", + rootStage: "TS_MODIFY", + bucketFilter: makeBucketFilter({"meta": {$eq: "C"}}, { + $and: [ + {"control.min.f": {$_internalExprLte: 17}}, + {"control.max.f": {$_internalExprGte: 17}}, + ] + }), + residualFilter: {f: {$eq: 17}}, + nBucketsUnpacked: 0, + nReturned: 0, + }, + }); +})(); + +// Query on the 'f' field leads to zero measurement delete. +(function testTwoPhaseDeleteByNonMatchingFilter() { + testFindOneAndRemoveOnShardedCollection({ + initialDocList: docs, + cmd: {filter: {f: 17}}, + res: { + nDeleted: 0, + writeType: "twoPhaseProtocol", + dataBearingShard: "none", + rootStage: "TS_MODIFY", + bucketFilter: makeBucketFilter({ + $and: [ + {"control.min.f": {$_internalExprLte: 17}}, + {"control.max.f": {$_internalExprGte: 17}}, + ] + }), + residualFilter: {f: {$eq: 17}}, + }, + }); +})(); + +// Query on the meta field and 'f' field leads to a targeted delete when the meta field is included +// in the shard key. +(function testTargetedDeleteByShardKeyAndFieldFilter() { + testFindOneAndRemoveOnShardedCollection({ + initialDocList: docs, + cmd: {filter: {[metaFieldName]: "B", f: 103}}, + res: { + nDeleted: 1, + deletedDoc: doc4_b_f103, + writeType: "targeted", + dataBearingShard: "other", + rootStage: "TS_MODIFY", + bucketFilter: makeBucketFilter({"meta": {$eq: "B"}}, { + $and: [ + {"control.min.f": {$_internalExprLte: 103}}, + {"control.max.f": {$_internalExprGte: 103}}, + ] + }), + residualFilter: {f: {$eq: 103}}, + nBucketsUnpacked: 1, + nReturned: 1, + }, + }); +})(); + +// Query on the meta field and 'f' field leads to a two phase delete when the meta field is not +// included in the shard key. +(function testTwoPhaseDeleteByMetaAndFieldFilter() { + testFindOneAndRemoveOnShardedCollection({ + initialDocList: docs, + includeMeta: false, + cmd: {filter: {[metaFieldName]: "B", f: 103}}, + res: { + nDeleted: 1, + deletedDoc: doc4_b_f103, + writeType: "twoPhaseProtocol", + dataBearingShard: "other", + rootStage: "TS_MODIFY", + bucketFilter: makeBucketFilter({ + $and: [ + { + $and: [ + {[`control.min.${metaFieldName}`]: {$_internalExprLte: "B"}}, + {[`control.max.${metaFieldName}`]: {$_internalExprGte: "B"}}, + ] + }, + { + $and: [ + {"control.min.f": {$_internalExprLte: 103}}, + {"control.max.f": {$_internalExprGte: 103}}, + ] + } + ] + }), + residualFilter: {$and: [{[metaFieldName]: {$eq: "B"}}, {f: {$eq: 103}}]}, + }, + }); +})(); + +// Query on the meta field and 'f' field leads to a targeted delete when the meta field is included +// in the shard key. +(function testTargetedDeleteByShardKeyAndFieldRangeFilter() { + testFindOneAndRemoveOnShardedCollection({ + initialDocList: docs, + cmd: {filter: {[metaFieldName]: "A", f: {$lt: 103}}}, + res: { + nDeleted: 1, + deletedDoc: doc2_a_f101, + writeType: "targeted", + dataBearingShard: "primary", + rootStage: "TS_MODIFY", + bucketFilter: + makeBucketFilter({"meta": {$eq: "A"}}, {"control.min.f": {$_internalExprLt: 103}}), + residualFilter: {f: {$lt: 103}}, + // 'doc1_a_nofields' and 'doc1_a_f101' are in different buckets because the time values + // are distant enough and $_internalExprLt matches no 'control.min.f' field too. So, the + // TS_MODIFY stage will unpack two buckets. + nBucketsUnpacked: 2, + nReturned: 1, + }, + }); +})(); + +// Query on the meta field and 'f' field leads to a two phase delete when the meta field is not +// included in the shard key. +(function testTwoPhaseDeleteByShardKeyAndFieldRangeFilter() { + testFindOneAndRemoveOnShardedCollection({ + initialDocList: docs, + includeMeta: false, + cmd: {filter: {[metaFieldName]: "A", f: {$lt: 103}}}, + res: { + nDeleted: 1, + deletedDoc: doc2_a_f101, + writeType: "twoPhaseProtocol", + dataBearingShard: "primary", + rootStage: "TS_MODIFY", + bucketFilter: makeBucketFilter({ + $and: [ + { + $and: [ + {[`control.min.${metaFieldName}`]: {$_internalExprLte: "A"}}, + {[`control.max.${metaFieldName}`]: {$_internalExprGte: "A"}}, + ] + }, + {"control.min.f": {$_internalExprLt: 103}} + ] + }), + residualFilter: {$and: [{[metaFieldName]: {$eq: "A"}}, {f: {$lt: 103}}]}, + }, + }); +})(); + +// Query on the time field leads to a targeted delete when the time field is included in the shard +// key. +(function testTargetedDeleteByTimeShardKeyFilter() { + testFindOneAndRemoveOnShardedCollection({ + initialDocList: docs, + includeMeta: false, + cmd: {filter: {[timeFieldName]: doc6_c_f105[timeFieldName]}}, + res: { + nDeleted: 1, + deletedDoc: doc6_c_f105, + writeType: "targeted", + dataBearingShard: "other", + rootStage: "TS_MODIFY", + bucketFilter: makeBucketFilter({ + $and: [ + { + [`control.min.${timeFieldName}`]: + {$_internalExprLte: doc6_c_f105[timeFieldName]} + }, + // -1 hour + { + [`control.min.${timeFieldName}`]: + {$_internalExprGte: ISODate("2005-12-31T23:00:00Z")} + }, + { + [`control.max.${timeFieldName}`]: + {$_internalExprGte: doc6_c_f105[timeFieldName]} + }, + // +1 hour + { + [`control.max.${timeFieldName}`]: + {$_internalExprLte: ISODate("2006-01-01T01:00:00Z")} + }, + // The bucket's _id encodes the time info and so the bucket filter will include + // the _id range filter. + {"_id": {"$lte": ObjectId("43b71b80ffffffffffffffff")}}, + {"_id": {"$gte": ObjectId("43b70d700000000000000000")}} + ] + }), + residualFilter: {[timeFieldName]: {$eq: doc6_c_f105[timeFieldName]}}, + nBucketsUnpacked: 1, + nReturned: 1, + }, + }); +})(); + +// Query on the time field leads to a two phase delete when the time field is not included in the +// shard key. +(function testTwoPhaseDeleteByTimeFieldFilter() { + testFindOneAndRemoveOnShardedCollection({ + initialDocList: docs, + cmd: {filter: {[timeFieldName]: doc7_c_f106[timeFieldName]}}, + res: { + nDeleted: 1, + deletedDoc: doc7_c_f106, + writeType: "twoPhaseProtocol", + dataBearingShard: "other", + rootStage: "TS_MODIFY", + bucketFilter: makeBucketFilter({ + $and: [ + { + [`control.min.${timeFieldName}`]: + {$_internalExprLte: doc7_c_f106[timeFieldName]} + }, + // -1 hour + { + [`control.min.${timeFieldName}`]: + {$_internalExprGte: ISODate("2006-12-31T23:00:00Z")} + }, + { + [`control.max.${timeFieldName}`]: + {$_internalExprGte: doc7_c_f106[timeFieldName]} + }, + // +1 hour + { + [`control.max.${timeFieldName}`]: + {$_internalExprLte: ISODate("2007-01-01T01:00:00Z")} + }, + // The bucket's _id encodes the time info and so the bucket filter will include + // the _id range filter. + {"_id": {"$lte": ObjectId("45984f00ffffffffffffffff")}}, + {"_id": {"$gte": ObjectId("459840f00000000000000000")}} + ] + }), + residualFilter: {[timeFieldName]: {$eq: doc7_c_f106[timeFieldName]}}, + }, + }); +})(); + +// Empty filter matches all docs but only deletes one. +(function testTwoPhaseDeleteByEmptyFilter() { + testFindOneAndRemoveOnShardedCollection({ + initialDocList: docs, + cmd: {filter: {}}, + // Don't validate exact results as we could delete any doc from any shard. + res: { + nDeleted: 1, + writeType: "twoPhaseProtocol", + dataBearingShard: "any", + rootStage: "TS_MODIFY", + bucketFilter: makeBucketFilter({}), + residualFilter: {}, + }, + }); +})(); + +tearDownShardedCluster(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_find_and_modify_update.js b/jstests/sharding/timeseries_find_and_modify_update.js new file mode 100644 index 0000000000000..d99fb3015c186 --- /dev/null +++ b/jstests/sharding/timeseries_find_and_modify_update.js @@ -0,0 +1,444 @@ +/** + * Tests findAndModify remove on a sharded timeseries collection. + * + * @tags: [ + * # We need a timeseries collection. + * requires_timeseries, + * # To avoid burn-in tests in in-memory build variants + * requires_persistence, + * # findAndModify update on a sharded timeseries collection is supported since 7.1 + * requires_fcv_71, + * # TODO SERVER-76583: Remove following two tags. + * does_not_support_retryable_writes, + * requires_non_retryable_writes, + * featureFlagTimeseriesUpdatesSupport, + * ] + */ + +import { + doc1_a_nofields, + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc5_b_f104, + doc6_c_f105, + doc7_c_f106, + generateTimeValue, + makeBucketFilter, + metaFieldName, + setUpShardedCluster, + tearDownShardedCluster, + testFindOneAndUpdateOnShardedCollection, + timeFieldName +} from "jstests/core/timeseries/libs/timeseries_writes_util.js"; + +const docs = [ + doc1_a_nofields, + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc5_b_f104, + doc6_c_f105, + doc7_c_f106, +]; + +setUpShardedCluster(); + +(function testSortOptionFailsOnShardedCollection() { + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + cmd: {filter: {f: {$gt: 100}}, update: {f: 200}, sort: {f: 1}}, + res: {errorCode: ErrorCodes.InvalidOptions}, + }); +})(); + +(function testProjectOptionHonoredOnShardedCollection() { + const returnDoc = + {_id: doc7_c_f106._id, [timeFieldName]: doc7_c_f106[timeFieldName], f: doc7_c_f106.f}; + const copyDocs = docs.map(doc => Object.assign({}, doc)); + const resultDocList = copyDocs.filter(doc => doc._id !== 7); + resultDocList.push(Object.assign({}, doc7_c_f106, {f: 300})); + + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + cmd: { + filter: {f: 106}, + update: {$set: {f: 300}}, + fields: {_id: 1, [timeFieldName]: 1, f: 1} + }, + res: { + resultDocList: resultDocList, + returnDoc: returnDoc, + writeType: "twoPhaseProtocol", + dataBearingShard: "other", + }, + }); +})(); + +// Verifies that the collation is properly propagated to the bucket-level filter when the +// query-level collation overrides the collection default collation. This is a two phase update due +// to the user-specified collation. +(function testTwoPhaseUpdateCanHonorCollationOnShardedCollection() { + const returnDoc = Object.assign({}, doc3_a_f102, {[metaFieldName]: "C"}); + const copyDocs = docs.map(doc => Object.assign({}, doc)); + const resultDocList = copyDocs.filter(doc => doc._id !== 3); + resultDocList.push(returnDoc); + + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + cmd: { + filter: {[metaFieldName]: "a", f: 102}, + // This also excercises the shard key update in the two phase update. The two phase + // update will run inside an internal transaction. So we don't need to run this update + // in a transaction. + update: {$set: {[metaFieldName]: "C"}}, + returnNew: true, + // caseInsensitive collation + collation: {locale: "en", strength: 2} + }, + res: { + resultDocList: resultDocList, + returnDoc: returnDoc, + writeType: "twoPhaseProtocol", + dataBearingShard: "primary", + }, + }); +})(); + +// Query on the meta field and 'f' field leads to a targeted update but no measurement is updated. +(function testTargetedUpdateByNonMatchingFilter() { + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + cmd: {filter: {[metaFieldName]: "C", f: 17}, update: {$set: {_id: 1000}}}, + res: { + resultDocList: docs, + writeType: "targeted", + dataBearingShard: "other", + rootStage: "TS_MODIFY", + bucketFilter: makeBucketFilter({"meta": {$eq: "C"}}, { + $and: [ + {"control.min.f": {$_internalExprLte: 17}}, + {"control.max.f": {$_internalExprGte: 17}}, + ] + }), + residualFilter: {f: {$eq: 17}}, + nBucketsUnpacked: 0, + nMatched: 0, + nModified: 0, + }, + }); +})(); + +// Query on the 'f' field leads to zero measurement update. +(function testTwoPhaseUpdateByNonMatchingFilter() { + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + cmd: {filter: {f: 17}, update: {$set: {_id: 1000}}}, + res: { + resultDocList: docs, + writeType: "twoPhaseProtocol", + dataBearingShard: "none", + rootStage: "TS_MODIFY", + bucketFilter: makeBucketFilter({ + $and: [ + {"control.min.f": {$_internalExprLte: 17}}, + {"control.max.f": {$_internalExprGte: 17}}, + ] + }), + residualFilter: {f: {$eq: 17}}, + }, + }); +})(); + +// Query on the meta field and 'f' field leads to a targeted update when the meta field is included +// in the shard key. Pipeline-style update. +(function testTargetedUpdateByShardKeyAndFieldFilter() { + const modifiedDoc = {_id: 1000, [metaFieldName]: "B", [timeFieldName]: generateTimeValue(4)}; + const copyDocs = docs.map(doc => Object.assign({}, doc)); + const resultDocList = copyDocs.filter(doc => doc._id !== 4); + resultDocList.push(modifiedDoc); + + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + cmd: { + filter: {[metaFieldName]: "B", f: 103}, + update: [{$set: {_id: 1000}}, {$unset: "f"}], + }, + res: { + resultDocList: resultDocList, + returnDoc: doc4_b_f103, + writeType: "targeted", + dataBearingShard: "other", + rootStage: "TS_MODIFY", + bucketFilter: makeBucketFilter({"meta": {$eq: "B"}}, { + $and: [ + {"control.min.f": {$_internalExprLte: 103}}, + {"control.max.f": {$_internalExprGte: 103}}, + ] + }), + residualFilter: {f: {$eq: 103}}, + nBucketsUnpacked: 1, + nReturned: 1, + }, + }); +})(); + +// Query on the meta field and 'f' field leads to a targeted update but fails because of unset of +// the time field. +(function testTargetedUpdateByShardKeyAndFieldFilter() { + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + cmd: { + filter: {[metaFieldName]: "B", f: 103}, + update: [{$set: {_id: 1000}}, {$unset: timeFieldName}], + }, + res: {errorCode: ErrorCodes.BadValue}, + }); +})(); + +const replacementDoc = { + _id: 1000, + [metaFieldName]: "A", + [timeFieldName]: generateTimeValue(0), + f: 2000 +}; + +// Query on the meta field and 'f' field leads to a two phase update when the meta field is not +// included in the shard key. Replacement-style update. The new time value makes the measurement +// belong to a different shard but the time field is not specified in the query and so, this update +// should fail. +(function testTwoPhaseShardKeyUpdateByMetaAndFieldFilterButNoShardKey() { + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + includeMeta: false, + cmd: {filter: {[metaFieldName]: "B", f: 103}, update: replacementDoc}, + res: {errorCode: 7717803}, + }); +})(); + +// Query on the 'f' field leads to a two phase update. Replacement-style update. The meta value +// makes the measurement belong to a different shard and the request runs in a transaction. This +// should succeed. +(function testTwoPhaseShardKeyUpdateByFieldFilter() { + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + startTxn: true, + cmd: {filter: {f: 106}, update: replacementDoc, returnNew: true}, + // Don't validate the resultDocList because we don't know which doc will be replaced. + res: { + returnDoc: replacementDoc, + writeType: "twoPhaseProtocol", + dataBearingShard: "other", + }, + }); +})(); + +// Query on the meta field and 'f' field leads to a targeted update when the meta field is not +// included in the shard key. Replacement-style update. The new meta value makes the measurement +// belong to a different shard but it does not run in a transaction and it should fail. +(function testTargetedShardKeyUpdateByMetaAndFieldFilterButNotInTxn() { + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + cmd: {filter: {[metaFieldName]: "B", f: 103}, update: replacementDoc, returnNew: true}, + res: {errorCode: ErrorCodes.IllegalOperation}, + }); +})(); + +// Query on the meta field and 'f' field leads to a targeted update when the meta field is included +// in the shard key. Replacement-style update. The new meta value makes the measurement belong to a +// different shard. This should run in a transaction. +(function testTargetedShardKeyUpdateByMetaAndFieldFilter() { + const copyDocs = docs.map(doc => Object.assign({}, doc)); + const resultDocList = copyDocs.filter(doc => doc._id !== 4); + resultDocList.push(replacementDoc); + + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + startTxn: true, + cmd: {filter: {[metaFieldName]: "B", f: 103}, update: replacementDoc, returnNew: true}, + res: { + resultDocList: resultDocList, + returnDoc: replacementDoc, + writeType: "targeted", + dataBearingShard: "other", + // We can't verify explain output because explain can't run in a transaction. + }, + }); +})(); + +// Meta filter matches all docs with tag: "B" but only update one. The replacement doc has tag: "A" +// and so, the measurement will be moved to a different shard. This should run in a transaction and +// succeed. +(function testTargetedShardKeyUpdateByMetaFilter() { + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + startTxn: true, + cmd: {filter: {[metaFieldName]: "B"}, update: replacementDoc, returnNew: true}, + // Don't validate the resultDocList because we don't know which doc will be replaced. + res: { + returnDoc: replacementDoc, + writeType: "targeted", + dataBearingShard: "other", + }, + }); +})(); + +// The update is targeted but there's actually no match. So, the update becomes an upsert. +(function testTargetedPipelineUpsertByMetaAndFieldFilter() { + const returnDoc = Object.assign( + {}, {_id: -100, [metaFieldName]: "B", [timeFieldName]: generateTimeValue(10), f: 2345}); + const resultDocList = docs.map(doc => Object.assign({}, doc)); + resultDocList.push(returnDoc); + + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + cmd: { + filter: {[metaFieldName]: "B", f: 2345}, + update: [{$set: {_id: -100}}, {$set: {[timeFieldName]: generateTimeValue(10)}}], + upsert: true, + returnNew: true, + }, + res: { + resultDocList: resultDocList, + returnDoc: returnDoc, + writeType: "targeted", + dataBearingShard: "other", + bucketFilter: makeBucketFilter({"meta": {$eq: "B"}}, { + $and: [ + {"control.min.f": {$_internalExprLte: 2345}}, + {"control.max.f": {$_internalExprGte: 2345}}, + ] + }), + residualFilter: {f: {$eq: 2345}}, + nBucketsUnpacked: 0, + nMatched: 0, + nModified: 0, + nUpserted: 1, + }, + }); +})(); + +// The update is targeted but there's actually no match. The update becomes an upsert but the +// replacement document has a different shard key value. +(function testTargetedReplacementUpsertByMetaAndFieldFilter() { + const replacementDoc = Object.assign( + {}, {_id: -100, [metaFieldName]: "A", [timeFieldName]: generateTimeValue(10), f: 2345}); + const resultDocList = docs.map(doc => Object.assign({}, doc)); + resultDocList.push(replacementDoc); + + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + startTxn: true, + cmd: { + filter: {[metaFieldName]: "B", f: 2345}, + update: replacementDoc, + upsert: true, + returnNew: true, + }, + res: { + resultDocList: resultDocList, + returnDoc: replacementDoc, + writeType: "targeted", + dataBearingShard: "other", + nUpserted: 1, + }, + }); +})(); + +(function testTwoPhaseReplacementUpsertByFieldFilter() { + const replacementDoc = Object.assign( + {}, {_id: -100, [metaFieldName]: "A", [timeFieldName]: generateTimeValue(10), f: 2345}); + const resultDocList = docs.map(doc => Object.assign({}, doc)); + resultDocList.push(replacementDoc); + + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + cmd: { + filter: {f: {$gt: 500}}, + update: replacementDoc, + upsert: true, + returnNew: true, + }, + res: { + resultDocList: resultDocList, + returnDoc: replacementDoc, + writeType: "twoPhaseProtocol", + // For a two-phase upsert, no shard will get the targeted findAndModify update command. + // Instead, one of them will get an insert command. + dataBearingShard: "none", + nUpserted: 1, + }, + }); +})(); + +// Query on the time field leads to a targeted update when the time field is included in the shard +// key. +(function testTargetedUpdateByTimeShardKeyFilter() { + const modifiedDoc = Object.assign({}, doc6_c_f105, {f: 1234}); + const copyDocs = docs.map(doc => Object.assign({}, doc)); + const resultDocList = copyDocs.map(doc => doc._id !== doc6_c_f105._id ? doc : modifiedDoc); + + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + includeMeta: false, + cmd: {filter: {[timeFieldName]: doc6_c_f105[timeFieldName]}, update: {$set: {f: 1234}}}, + res: { + resultDocList: resultDocList, + returnDoc: doc6_c_f105, + writeType: "targeted", + dataBearingShard: "other", + rootStage: "TS_MODIFY", + bucketFilter: makeBucketFilter({ + $and: [ + { + [`control.min.${timeFieldName}`]: + {$_internalExprLte: doc6_c_f105[timeFieldName]} + }, + // -1 hour + { + [`control.min.${timeFieldName}`]: + {$_internalExprGte: ISODate("2005-12-31T23:00:00Z")} + }, + { + [`control.max.${timeFieldName}`]: + {$_internalExprGte: doc6_c_f105[timeFieldName]} + }, + // +1 hour + { + [`control.max.${timeFieldName}`]: + {$_internalExprLte: ISODate("2006-01-01T01:00:00Z")} + }, + // The bucket's _id encodes the time info and so the bucket filter will include + // the _id range filter. + {"_id": {"$lte": ObjectId("43b71b80ffffffffffffffff")}}, + {"_id": {"$gte": ObjectId("43b70d700000000000000000")}} + ] + }), + residualFilter: {[timeFieldName]: {$eq: doc6_c_f105[timeFieldName]}}, + nBucketsUnpacked: 1, + nMatched: 1, + nModified: 1, + }, + }); +})(); + +// Query on the time field leads to a two phase update when the time field is not included in the +// shard key. +(function testTwoPhaseUpdateByTimeFieldFilter() { + const modifiedDoc = Object.assign({}, doc7_c_f106, {f: 107}); + const copyDocs = docs.map(doc => Object.assign({}, doc)); + const resultDocList = copyDocs.map(doc => doc._id !== doc7_c_f106._id ? doc : modifiedDoc); + + testFindOneAndUpdateOnShardedCollection({ + initialDocList: docs, + cmd: {filter: {[timeFieldName]: doc7_c_f106[timeFieldName]}, update: {$inc: {f: 1}}}, + res: { + resultDocList: resultDocList, + returnDoc: doc7_c_f106, + writeType: "twoPhaseProtocol", + dataBearingShard: "other", + }, + }); +})(); + +tearDownShardedCluster(); diff --git a/jstests/sharding/timeseries_indexes.js b/jstests/sharding/timeseries_indexes.js index 5b9927e2f17b7..286684527003f 100644 --- a/jstests/sharding/timeseries_indexes.js +++ b/jstests/sharding/timeseries_indexes.js @@ -7,9 +7,8 @@ * ] */ -(function() { -load("jstests/core/timeseries/libs/timeseries.js"); -load('jstests/libs/analyze_plan.js'); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {getAggPlanStages} from "jstests/libs/analyze_plan.js"; Random.setRandomSeed(); @@ -23,12 +22,6 @@ const sDB = st.s.getDB(dbName); const shard0DB = st.shard0.getDB(dbName); const shard1DB = st.shard1.getDB(dbName); -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - // Helpers. let currentId = 0; function generateId() { @@ -59,7 +52,7 @@ function generateDoc(time, metaValue) { // Split the chunks such that primary shard has chunk: [MinKey, 2020-01-01) and other shard has // chunk [2020-01-01, MaxKey]. - splitPoint = {[`control.min.${timeField}`]: ISODate(`2020-01-01`)}; + let splitPoint = {[`control.min.${timeField}`]: ISODate(`2020-01-01`)}; assert.commandWorked( mongosDB.adminCommand({split: `${dbName}.system.buckets.${collName}`, middle: splitPoint})); @@ -150,4 +143,3 @@ function generateDoc(time, metaValue) { })(); st.stop(); -})(); diff --git a/jstests/sharding/timeseries_insert.js b/jstests/sharding/timeseries_insert.js index 100e394b2d668..1a389b6684a9c 100644 --- a/jstests/sharding/timeseries_insert.js +++ b/jstests/sharding/timeseries_insert.js @@ -6,10 +6,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; Random.setRandomSeed(); @@ -22,13 +19,6 @@ const metaField = 'hostid'; const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); const mongos = st.s0; -// Sanity checks. -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - // Databases and collections. assert.commandWorked(mongos.adminCommand({enableSharding: dbName})); const mainDB = mongos.getDB(dbName); @@ -222,5 +212,4 @@ try { }, mainDB); } finally { st.stop(); -} -})(); +} \ No newline at end of file diff --git a/jstests/sharding/timeseries_multiple_mongos.js b/jstests/sharding/timeseries_multiple_mongos.js index de23cb94fa5b1..f8f2aec9e8b06 100644 --- a/jstests/sharding/timeseries_multiple_mongos.js +++ b/jstests/sharding/timeseries_multiple_mongos.js @@ -7,10 +7,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; load('jstests/sharding/libs/shard_versioning_util.js'); Random.setRandomSeed(); @@ -28,12 +25,6 @@ const mongos1 = st.s1.getDB(dbName); const shard0DB = st.shard0.getDB(dbName); const shard1DB = st.shard1.getDB(dbName); -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - // Databases and collections. assert.commandWorked(mongos0.adminCommand({enableSharding: dbName})); @@ -289,109 +280,106 @@ runTest({ numProfilerEntries: {sharded: 1, unsharded: 1}, }); -if (TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(st.shard0)) { - // Tests for updates. - runTest({ - shardKey: {[metaField + ".a"]: 1}, - cmdObj: { - update: collName, - updates: [{ - q: {}, - u: {$inc: {[metaField + ".b"]: 1}}, - multi: true, - }] - }, - numProfilerEntries: {sharded: 2, unsharded: 1}, - }); - - runTest({ - shardKey: {[metaField + ".a"]: 1}, - cmdObj: { - update: collName, - updates: [{ - q: {[metaField + ".a"]: 1}, - u: {$inc: {[metaField + ".b"]: -1}}, - multi: true, - }] - }, - numProfilerEntries: {sharded: 1, unsharded: 1}, - }); - - runTest({ - shardKey: {[metaField + ".a"]: 1}, - cmdObj: { - update: bucketsCollName, - updates: [{ - q: {}, - u: {$inc: {["meta.b"]: 1}}, - multi: true, - }] - }, - numProfilerEntries: {sharded: 2, unsharded: 1}, - }); - - runTest({ - shardKey: {[metaField + ".a"]: 1}, - cmdObj: { - update: bucketsCollName, - updates: [{ - q: {["meta.a"]: 1}, - u: {$inc: {["meta.b"]: -1}}, - multi: true, - }] - }, - numProfilerEntries: {sharded: 1, unsharded: 1}, - }); - - // Tests for deletes. - runTest({ - shardKey: {[metaField]: 1}, - cmdObj: { - delete: collName, - deletes: [{ - q: {}, - limit: 0, - }], - }, - numProfilerEntries: {sharded: 2, unsharded: 1}, - }); - - runTest({ - shardKey: {[metaField]: 1}, - cmdObj: { - delete: collName, - deletes: [{ - q: {[metaField]: 0}, - limit: 0, - }], - }, - numProfilerEntries: {sharded: 1, unsharded: 1}, - }); - - runTest({ - shardKey: {[metaField]: 1}, - cmdObj: { - delete: bucketsCollName, - deletes: [{ - q: {}, - limit: 0, - }], - }, - numProfilerEntries: {sharded: 2, unsharded: 1}, - }); - - runTest({ - shardKey: {[metaField]: 1}, - cmdObj: { - delete: bucketsCollName, - deletes: [{ - q: {meta: 0}, - limit: 0, - }], - }, - numProfilerEntries: {sharded: 1, unsharded: 1}, - }); -} +// Tests for updates. +runTest({ + shardKey: {[metaField + ".a"]: 1}, + cmdObj: { + update: collName, + updates: [{ + q: {}, + u: {$inc: {[metaField + ".b"]: 1}}, + multi: true, + }] + }, + numProfilerEntries: {sharded: 2, unsharded: 1}, +}); + +runTest({ + shardKey: {[metaField + ".a"]: 1}, + cmdObj: { + update: collName, + updates: [{ + q: {[metaField + ".a"]: 1}, + u: {$inc: {[metaField + ".b"]: -1}}, + multi: true, + }] + }, + numProfilerEntries: {sharded: 1, unsharded: 1}, +}); + +runTest({ + shardKey: {[metaField + ".a"]: 1}, + cmdObj: { + update: bucketsCollName, + updates: [{ + q: {}, + u: {$inc: {["meta.b"]: 1}}, + multi: true, + }] + }, + numProfilerEntries: {sharded: 2, unsharded: 1}, +}); + +runTest({ + shardKey: {[metaField + ".a"]: 1}, + cmdObj: { + update: bucketsCollName, + updates: [{ + q: {["meta.a"]: 1}, + u: {$inc: {["meta.b"]: -1}}, + multi: true, + }] + }, + numProfilerEntries: {sharded: 1, unsharded: 1}, +}); + +// Tests for deletes. +runTest({ + shardKey: {[metaField]: 1}, + cmdObj: { + delete: collName, + deletes: [{ + q: {}, + limit: 0, + }], + }, + numProfilerEntries: {sharded: 2, unsharded: 1}, +}); + +runTest({ + shardKey: {[metaField]: 1}, + cmdObj: { + delete: collName, + deletes: [{ + q: {[metaField]: 0}, + limit: 0, + }], + }, + numProfilerEntries: {sharded: 1, unsharded: 1}, +}); + +runTest({ + shardKey: {[metaField]: 1}, + cmdObj: { + delete: bucketsCollName, + deletes: [{ + q: {}, + limit: 0, + }], + }, + numProfilerEntries: {sharded: 2, unsharded: 1}, +}); + +runTest({ + shardKey: {[metaField]: 1}, + cmdObj: { + delete: bucketsCollName, + deletes: [{ + q: {meta: 0}, + limit: 0, + }], + }, + numProfilerEntries: {sharded: 1, unsharded: 1}, +}); st.stop(); -})(); diff --git a/jstests/sharding/timeseries_orphan_buckets.js b/jstests/sharding/timeseries_orphan_buckets.js index 89e79e20fa7bf..a4c0f9d0926bd 100644 --- a/jstests/sharding/timeseries_orphan_buckets.js +++ b/jstests/sharding/timeseries_orphan_buckets.js @@ -4,10 +4,8 @@ * @tags: [requires_fcv_51] */ -(function() { - load("jstests/libs/fail_point_util.js"); -load("jstests/core/timeseries/libs/timeseries.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; Random.setRandomSeed(); const dbName = "test"; @@ -24,12 +22,6 @@ assert.commandWorked(st.s.adminCommand({movePrimary: dbName, to: st.shard0.shard const primaryShard = st.getPrimaryShard(dbName); const otherShard = st.getOther(primaryShard); -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(primaryShard)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - let currentId = 0; function generateId() { return currentId++; @@ -55,9 +47,7 @@ assert.commandWorked(sDB.adminCommand({ // Split the chunks such that primary shard has chunk: [MinKey, 2020-01-01) and other shard has // chunk [2020-01-01, MaxKey]. -splitPoint = { - [`control.min.${timeField}`]: ISODate(`2020-01-01`) -}; +let splitPoint = {[`control.min.${timeField}`]: ISODate(`2020-01-01`)}; assert.commandWorked( sDB.adminCommand({split: `${dbName}.system.buckets.${collName}`, middle: splitPoint})); @@ -117,5 +107,4 @@ assert.commandWorked(res); assert.eq(0, primaryShard.getDB("config").getCollection("rangeDeletions").count()); assert.eq(16, coll.find().itcount(), coll.find().toArray()); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_out_sharded.js b/jstests/sharding/timeseries_out_sharded.js new file mode 100644 index 0000000000000..0d1aee96560fe --- /dev/null +++ b/jstests/sharding/timeseries_out_sharded.js @@ -0,0 +1,170 @@ +/** + * Verifies that $out can write to time-series collections from sharded collections. An + * identical test for non-sharded source collections exist in + * jstests/core/timeseries/timeseries_out_nonsharded.js + * + * + * @tags: [ + * # TimeseriesAggTests doesn't handle stepdowns. + * does_not_support_stepdowns, + * # We need a timeseries collection. + * requires_timeseries, + * requires_fcv_71, + * featureFlagAggOutTimeseries + * ] + */ +import {TimeseriesAggTests} from "jstests/core/timeseries/libs/timeseries_agg_helpers.js"; + +const numHosts = 10; +const numIterations = 20; + +const st = new ShardingTest({shards: 2}); +const dbName = "test"; +const testDB = st.s.getDB(dbName); +const targetCollName = "out_time"; +assert.commandWorked(testDB.adminCommand({enableSharding: dbName})); + +let [inColl, observerInColl] = + TimeseriesAggTests.prepareInputCollections(numHosts, numIterations, true, testDB); + +st.shardColl(inColl.getName(), {tags: 1}, false, dbName); +observerInColl.createIndex({tags: 1}); +st.shardColl(observerInColl.getName(), {tags: 1}, {tags: 1}, false, dbName); + +function runTest({ + observer: observerPipeline, + timeseries: timeseriesPipeline, + drop: shouldDrop = true, + value: valueToCheck = null +}) { + let expectedTSOptions = null; + if (!shouldDrop) { + // To test if an index is preserved by $out when replacing an existing collection. + assert.commandWorked(testDB[targetCollName].createIndex({usage_guest: 1})); + // To test if $out preserves the original collection options. + let collections = testDB.getCollectionInfos({name: targetCollName}); + assert.eq(collections.length, 1, collections); + expectedTSOptions = collections[0]["options"]["timeseries"]; + } else { + expectedTSOptions = timeseriesPipeline[0]["$out"]["timeseries"]; + } + + // Gets the expected results from a non time-series observer input collection. + const expectedResults = TimeseriesAggTests.getOutputAggregateResults( + observerInColl, observerPipeline, null, shouldDrop, testDB); + + // Gets the actual results from a time-series input collection. + const actualResults = TimeseriesAggTests.getOutputAggregateResults( + inColl, timeseriesPipeline, null, shouldDrop, testDB); + + // Verifies that the number of measurements is same as expected. + TimeseriesAggTests.verifyResults(actualResults, expectedResults); + if (valueToCheck) { + for (var i = 0; i < expectedResults.length; ++i) { + assert.eq(actualResults[i], {"time": valueToCheck}, actualResults); + } + } + + let collections = testDB.getCollectionInfos({name: targetCollName}); + assert.eq(collections.length, 1, collections); + + // Verifies a time-series collection was not made, if that is expected. + if (!expectedTSOptions) { + assert(!collections[0]["options"]["timeseries"], collections); + return; + } + + // Verifies the time-series options are correct, if a time-series collection is expected. + let actualOptions = collections[0]["options"]["timeseries"]; + for (let option in expectedTSOptions) { + assert.eq(expectedTSOptions[option], actualOptions[option], actualOptions); + } + + // Verifies the original index is maintained, if $out is replacing an existing collection. + if (!shouldDrop) { + let indexSpecs = testDB[targetCollName].getIndexes(); + assert.eq(indexSpecs.filter(index => index.name == "usage_guest_1").length, 1); + } +} + +// Tests that $out works with a source time-series collections writing to a non-timeseries +// collection. +runTest({observer: [{$out: "observer_out"}], timeseries: [{$out: targetCollName}]}); + +// Tests that $out creates a time-series collection when the collection does not exist. +let timeseriesPipeline = TimeseriesAggTests.generateOutPipeline( + targetCollName, dbName, {timeField: "time", metaField: "tags"}); +runTest({observer: [{$out: "observer_out"}], timeseries: timeseriesPipeline}); + +// Test that $out can replace an existing time-series collection without the 'timeseries' option. +// Change an option in the existing time-series collections. +assert.commandWorked(testDB.runCommand({collMod: targetCollName, expireAfterSeconds: 360})); +// Run the $out stage. +timeseriesPipeline = [{$out: targetCollName}]; +runTest({observer: [{$out: "observer_out"}], timeseries: timeseriesPipeline, drop: false}); + +// Test that $out can replace an existing time-series collection with the 'timeseries' option. +let newDate = new Date('1999-09-30T03:24:00'); +let observerPipeline = [{$set: {"time": newDate}}, {$out: "observer_out"}]; +timeseriesPipeline = TimeseriesAggTests.generateOutPipeline( + targetCollName, dbName, {timeField: "time", metaField: "tags"}, {$set: {"time": newDate}}); +// Run the $out stage and confirm all the documents have the new value. +runTest({observer: observerPipeline, timeseries: timeseriesPipeline, drop: false, value: newDate}); + +// Test that an error is raised if the target collection is a sharded time-series collection. +assert.throwsWithCode(() => observerInColl.aggregate([{$out: inColl.getName()}]), + ErrorCodes.IllegalOperation); + +// Test that an error is raised if the database does not exist. +const destDB = testDB.getSiblingDB("outDifferentDB"); +assert.commandWorked(destDB.dropDatabase()); +assert.throwsWithCode( + () => inColl.aggregate( + {$out: {db: destDB.getName(), coll: targetCollName, timeseries: {timeField: "time"}}}), + ErrorCodes.NamespaceNotFound); + +// Tests that an error is raised when trying to create a time-series collection from a non +// time-series collection. +let pipeline = TimeseriesAggTests.generateOutPipeline("observer_out", dbName, {timeField: "time"}); +assert.throwsWithCode(() => inColl.aggregate(pipeline), 7268700); +assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7268700); + +// Tests that an error is raised for invalid timeseries options. +pipeline = TimeseriesAggTests.generateOutPipeline( + targetCollName, dbName, {timeField: "time", invalidField: "invalid"}); +assert.throwsWithCode(() => inColl.aggregate(pipeline), 40415); +assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 40415); + +// Tests that an error is raised if the user changes the 'timeField'. +pipeline = + TimeseriesAggTests.generateOutPipeline(targetCollName, dbName, {timeField: "usage_guest_nice"}); +assert.throwsWithCode(() => inColl.aggregate(pipeline), 7406103); +assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7406103); + +// Tests that an error is raised if the user changes the 'metaField'. +pipeline = TimeseriesAggTests.generateOutPipeline( + targetCollName, dbName, {timeField: "time", metaField: "usage_guest_nice"}); +assert.throwsWithCode(() => inColl.aggregate(pipeline), 7406103); +assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7406103); + +// Tests that an error is raised if the user changes 'bucketManSpanSeconds'. +pipeline = TimeseriesAggTests.generateOutPipeline( + targetCollName, + dbName, + {timeField: "time", bucketMaxSpanSeconds: 330, bucketRoundingSeconds: 330}); +assert.throwsWithCode(() => inColl.aggregate(pipeline), 7406103); +assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7406103); + +// Tests that an error is raised if the user changes 'granularity'. +pipeline = TimeseriesAggTests.generateOutPipeline( + targetCollName, dbName, {timeField: "time", granularity: "minutes"}); +assert.throwsWithCode(() => inColl.aggregate(pipeline), 7406103); +assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7406103); + +// Tests that an error is raised if a conflicting view exists. +assert.commandWorked(testDB.createCollection("view_out", {viewOn: "out"})); +pipeline = TimeseriesAggTests.generateOutPipeline("view_out", dbName, {timeField: "time"}); +assert.throwsWithCode(() => inColl.aggregate(pipeline), 7268703); +assert.throwsWithCode(() => observerInColl.aggregate(pipeline), 7268703); + +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_query.js b/jstests/sharding/timeseries_query.js index e3adf118f54e2..93d775c610e86 100644 --- a/jstests/sharding/timeseries_query.js +++ b/jstests/sharding/timeseries_query.js @@ -7,9 +7,8 @@ * ] */ -(function() { -load("jstests/core/timeseries/libs/timeseries.js"); -load("jstests/libs/analyze_plan.js"); +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {isClusteredIxscan, isCollscan, isIxscan} from "jstests/libs/analyze_plan.js"; Random.setRandomSeed(); @@ -25,12 +24,6 @@ assert.commandWorked(sDB.adminCommand({enableSharding: dbName})); const shard0DB = st.shard0.getDB(dbName); const shard1DB = st.shard1.getDB(dbName); -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - // Helpers. let currentId = 0; function generateId() { @@ -142,7 +135,7 @@ function runQuery( // Split the chunks such that primary shard has chunk: [MinKey, 2020-01-01) and other shard has // chunk [2020-01-01, MaxKey]. - splitPoint = {[`control.min.${timeField}`]: ISODate(`2020-01-01`)}; + let splitPoint = {[`control.min.${timeField}`]: ISODate(`2020-01-01`)}; assert.commandWorked( sDB.adminCommand({split: `${dbName}.system.buckets.${collName}`, middle: splitPoint})); @@ -475,7 +468,7 @@ function runQuery( timeseries: {timeField, metaField} })); - splitPoint = {'meta.prefix': 0, 'meta.suffix': 0}; + let splitPoint = {'meta.prefix': 0, 'meta.suffix': 0}; assert.commandWorked( sDB.adminCommand({split: `${dbName}.system.buckets.${collName}`, middle: splitPoint})); @@ -525,5 +518,4 @@ function runQuery( assert(coll.drop()); })(); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_shard_collection.js b/jstests/sharding/timeseries_shard_collection.js index 6068743a6314a..de4dc8622a3f7 100644 --- a/jstests/sharding/timeseries_shard_collection.js +++ b/jstests/sharding/timeseries_shard_collection.js @@ -6,18 +6,10 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); - Random.setRandomSeed(); const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); -// TODO SERVER-68008 remove error codes 6235600, 6235601, 6235602 and 6235603 from all the -// assertions contained by this test. - const dbName = 'test'; assert.commandWorked(st.s.adminCommand({enableSharding: dbName})); const sDB = st.s.getDB(dbName); @@ -26,346 +18,303 @@ const timeseries = { metaField: 'hostId', }; -if (TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - function validateBucketsCollectionSharded({collName, shardKey}) { - const configColls = st.s.getDB('config').collections; - const output = configColls - .find({ - _id: 'test.system.buckets.' + collName, - key: shardKey, - timeseriesFields: {$exists: true}, - }) - .toArray(); - assert.eq(output.length, 1, configColls.find().toArray()); - assert.eq(output[0].timeseriesFields.timeField, timeseries.timeField, output[0]); - assert.eq(output[0].timeseriesFields.metaField, timeseries.metaField, output[0]); - } +function validateBucketsCollectionSharded({collName, shardKey}) { + const configColls = st.s.getDB('config').collections; + const output = configColls + .find({ + _id: 'test.system.buckets.' + collName, + key: shardKey, + timeseriesFields: {$exists: true}, + }) + .toArray(); + assert.eq(output.length, 1, configColls.find().toArray()); + assert.eq(output[0].timeseriesFields.timeField, timeseries.timeField, output[0]); + assert.eq(output[0].timeseriesFields.metaField, timeseries.metaField, output[0]); +} + +function validateViewCreated(viewName) { + const views = sDB.runCommand({listCollections: 1, filter: {type: 'timeseries', name: viewName}}) + .cursor.firstBatch; + assert.eq(views.length, 1, views); - function validateViewCreated(viewName) { - const views = - sDB.runCommand({listCollections: 1, filter: {type: 'timeseries', name: viewName}}) - .cursor.firstBatch; - assert.eq(views.length, 1, views); + const tsOpts = views[0].options.timeseries; + assert.eq(tsOpts.timeField, timeseries.timeField, tsOpts); + assert.eq(tsOpts.metaField, timeseries.metaField, tsOpts); +} - const tsOpts = views[0].options.timeseries; - assert.eq(tsOpts.timeField, timeseries.timeField, tsOpts); - assert.eq(tsOpts.metaField, timeseries.metaField, tsOpts); +// Simple shard key on the metadata field. +function metaShardKey(implicit) { + // Command should fail since the 'timeseries' specification does not match that existing + // collection. + if (!implicit) { + assert.commandWorked(sDB.createCollection('ts', {timeseries})); + // This index gets created as {meta: 1} on the buckets collection. + assert.commandWorked(sDB.ts.createIndex({hostId: 1})); + assert.commandFailedWithCode(st.s.adminCommand({ + shardCollection: 'test.ts', + key: {'hostId': 1}, + timeseries: {timeField: 'time'}, + }), + [5731500]); } - // Simple shard key on the metadata field. - function metaShardKey(implicit) { - // Command should fail since the 'timeseries' specification does not match that existing - // collection. - if (!implicit) { - assert.commandWorked(sDB.createCollection('ts', {timeseries})); - // This index gets created as {meta: 1} on the buckets collection. - assert.commandWorked(sDB.ts.createIndex({hostId: 1})); - assert.commandFailedWithCode(st.s.adminCommand({ - shardCollection: 'test.ts', - key: {'hostId': 1}, - timeseries: {timeField: 'time'}, - }), - [5731500, 6235601]); - } + assert.commandWorked( + st.s.adminCommand({shardCollection: 'test.ts', key: {'hostId': 1}, timeseries})); - assert.commandWorked( - st.s.adminCommand({shardCollection: 'test.ts', key: {'hostId': 1}, timeseries})); + validateBucketsCollectionSharded({collName: 'ts', shardKey: {meta: 1}, timeseries}); - validateBucketsCollectionSharded({collName: 'ts', shardKey: {meta: 1}, timeseries}); + validateViewCreated("ts"); - validateViewCreated("ts"); + assert.commandWorked(st.s.adminCommand({split: 'test.system.buckets.ts', middle: {meta: 10}})); - assert.commandWorked( - st.s.adminCommand({split: 'test.system.buckets.ts', middle: {meta: 10}})); + const primaryShard = st.getPrimaryShard(dbName); + assert.commandWorked(st.s.adminCommand({ + movechunk: 'test.system.buckets.ts', + find: {meta: 10}, + to: st.getOther(primaryShard).shardName, + _waitForDelete: true, + })); - const primaryShard = st.getPrimaryShard(dbName); - assert.commandWorked(st.s.adminCommand({ - movechunk: 'test.system.buckets.ts', - find: {meta: 10}, - to: st.getOther(primaryShard).shardName, - _waitForDelete: true, - })); + let counts = st.chunkCounts('system.buckets.ts', 'test'); + assert.eq(1, counts[st.shard0.shardName]); + assert.eq(1, counts[st.shard1.shardName]); - let counts = st.chunkCounts('system.buckets.ts', 'test'); - assert.eq(1, counts[st.shard0.shardName]); - assert.eq(1, counts[st.shard1.shardName]); + assert(sDB.ts.drop()); +} + +// Sharding an existing timeseries collection. +metaShardKey(false); + +// Sharding a new timeseries collection. +metaShardKey(true); + +// Shard key on the metadata field and time fields. +function metaAndTimeShardKey(implicit) { + assert.commandWorked(st.s.adminCommand({enableSharding: 'test'})); - assert(sDB.ts.drop()); + if (!implicit) { + assert.commandWorked(sDB.createCollection('ts', {timeseries})); } - // Sharding an existing timeseries collection. - metaShardKey(false); + assert.commandWorked(st.s.adminCommand({ + shardCollection: 'test.ts', + key: {'hostId': 1, 'time': 1}, + timeseries, + })); + + validateViewCreated("ts"); + + validateBucketsCollectionSharded({ + collName: 'ts', + // The 'time' field should be translated to 'control.min.time' on buckets collection. + shardKey: {meta: 1, 'control.min.time': 1}, + timeseries, + }); + + assert.commandWorked(st.s.adminCommand( + {split: 'test.system.buckets.ts', middle: {meta: 10, 'control.min.time': MinKey}})); + + const primaryShard = st.getPrimaryShard(dbName); + assert.commandWorked(st.s.adminCommand({ + movechunk: 'test.system.buckets.ts', + find: {meta: 10, 'control.min.time': MinKey}, + to: st.getOther(primaryShard).shardName, + _waitForDelete: true, + })); + + let counts = st.chunkCounts('system.buckets.ts', 'test'); + assert.eq(1, counts[st.shard0.shardName]); + assert.eq(1, counts[st.shard1.shardName]); + + assert(sDB.ts.drop()); +} - // Sharding a new timeseries collection. - metaShardKey(true); +// Sharding an existing timeseries collection. +metaAndTimeShardKey(false); + +// Sharding a new timeseries collection. +metaAndTimeShardKey(true); + +function timeseriesInsert(coll) { + let insertCount = 0; + for (let i = 10; i < 100; i++) { + assert.commandWorked(coll.insert([ + {hostId: 10, time: ISODate(`19` + i + `-01-01`)}, + {hostId: 11, time: ISODate(`19` + i + `-01-01`)}, + {hostId: 12, time: ISODate(`19` + i + `-01-01`)}, + {hostId: 13, time: ISODate(`19` + i + `-01-01`)}, + {hostId: 14, time: ISODate(`19` + i + `-01-01`)}, + {hostId: 15, time: ISODate(`19` + i + `-01-01`)}, + {hostId: 16, time: ISODate(`19` + i + `-01-01`)}, + {hostId: 17, time: ISODate(`19` + i + `-01-01`)}, + {hostId: 18, time: ISODate(`19` + i + `-01-01`)}, + {hostId: 19, time: ISODate(`19` + i + `-01-01`)} + ])); + insertCount += 10; + } + return insertCount; +} - // Shard key on the metadata field and time fields. - function metaAndTimeShardKey(implicit) { - assert.commandWorked(st.s.adminCommand({enableSharding: 'test'})); +// Shard key on the hashed field. - if (!implicit) { - assert.commandWorked(sDB.createCollection('ts', {timeseries})); +function runShardKeyPatternValidation(collectionExists) { + (function hashAndTimeShardKey() { + if (collectionExists) { + assert.commandWorked( + sDB.createCollection('ts', {timeseries: {timeField: 'time', metaField: 'hostId'}})); + } + + // Only range is allowed on time field. + assert.commandFailedWithCode(st.s.adminCommand({ + shardCollection: 'test.ts', + key: {time: 'hashed'}, + timeseries: {timeField: 'time', metaField: 'hostId'}, + }), + ErrorCodes.BadValue); + + if (!collectionExists) { + assert.commandWorked( + sDB.createCollection('ts', {timeseries: {timeField: 'time', metaField: 'hostId'}})); } + let coll = sDB.getCollection('ts'); + assert.commandWorked(coll.insert([ + {hostId: 10, time: ISODate(`1901-01-01`)}, + {hostId: 11, time: ISODate(`1902-01-01`)}, + ])); + assert.commandWorked(coll.createIndex({hostId: 'hashed'})); assert.commandWorked(st.s.adminCommand({ shardCollection: 'test.ts', - key: {'hostId': 1, 'time': 1}, - timeseries, + key: {hostId: 'hashed'}, + timeseries: {timeField: 'time', metaField: 'hostId'} })); - validateViewCreated("ts"); - validateBucketsCollectionSharded({ collName: 'ts', - // The 'time' field should be translated to 'control.min.time' on buckets collection. - shardKey: {meta: 1, 'control.min.time': 1}, - timeseries, + shardKey: {meta: 'hashed'}, + timeSeriesParams: {timeField: 'time', metaField: 'hostId'} }); - assert.commandWorked(st.s.adminCommand( - {split: 'test.system.buckets.ts', middle: {meta: 10, 'control.min.time': MinKey}})); + assert.eq(coll.find().itcount(), 2); // Validate count after sharding. + let insertCount = timeseriesInsert(coll); + assert.eq(coll.find().itcount(), insertCount + 2); + coll.drop(); - const primaryShard = st.getPrimaryShard(dbName); + if (collectionExists) { + assert.commandWorked( + sDB.createCollection('ts', {timeseries: {timeField: 'time', metaField: 'hostId'}})); + } + assert.commandWorked(st.s.adminCommand({enableSharding: 'test'})); + + // Sharding key with hashed meta field and time field. assert.commandWorked(st.s.adminCommand({ - movechunk: 'test.system.buckets.ts', - find: {meta: 10, 'control.min.time': MinKey}, - to: st.getOther(primaryShard).shardName, - _waitForDelete: true, + shardCollection: 'test.ts', + key: {hostId: 'hashed', time: 1}, + timeseries: {timeField: 'time', metaField: 'hostId'}, + numInitialChunks: 2 })); - let counts = st.chunkCounts('system.buckets.ts', 'test'); - assert.eq(1, counts[st.shard0.shardName]); - assert.eq(1, counts[st.shard1.shardName]); - - assert(sDB.ts.drop()); - } + coll = sDB.getCollection('ts'); + assert.eq(coll.find().itcount(), 0); + insertCount = timeseriesInsert(coll); + assert.eq(coll.find().itcount(), insertCount); + coll.drop(); + })(); - // Sharding an existing timeseries collection. - metaAndTimeShardKey(false); - - // Sharding a new timeseries collection. - metaAndTimeShardKey(true); - - function timeseriesInsert(coll) { - let insertCount = 0; - for (let i = 10; i < 100; i++) { - assert.commandWorked(coll.insert([ - {hostId: 10, time: ISODate(`19` + i + `-01-01`)}, - {hostId: 11, time: ISODate(`19` + i + `-01-01`)}, - {hostId: 12, time: ISODate(`19` + i + `-01-01`)}, - {hostId: 13, time: ISODate(`19` + i + `-01-01`)}, - {hostId: 14, time: ISODate(`19` + i + `-01-01`)}, - {hostId: 15, time: ISODate(`19` + i + `-01-01`)}, - {hostId: 16, time: ISODate(`19` + i + `-01-01`)}, - {hostId: 17, time: ISODate(`19` + i + `-01-01`)}, - {hostId: 18, time: ISODate(`19` + i + `-01-01`)}, - {hostId: 19, time: ISODate(`19` + i + `-01-01`)} - ])); - insertCount += 10; + // Test that invalid shard keys fail. + (function invalidShardKeyPatterns() { + if (collectionExists) { + assert.commandWorked( + sDB.createCollection('ts', {timeseries: {timeField: 'time', metaField: 'hostId'}})); } - return insertCount; - } - // Shard key on the hashed field. - - function runShardKeyPatternValidation(collectionExists) { - (function hashAndTimeShardKey() { - if (collectionExists) { - assert.commandWorked(sDB.createCollection( - 'ts', {timeseries: {timeField: 'time', metaField: 'hostId'}})); - } - - // Only range is allowed on time field. - assert.commandFailedWithCode(st.s.adminCommand({ - shardCollection: 'test.ts', - key: {time: 'hashed'}, - timeseries: {timeField: 'time', metaField: 'hostId'}, - }), - ErrorCodes.BadValue); - - if (!collectionExists) { - assert.commandWorked(sDB.createCollection( - 'ts', {timeseries: {timeField: 'time', metaField: 'hostId'}})); - } - let coll = sDB.getCollection('ts'); - assert.commandWorked(coll.insert([ - {hostId: 10, time: ISODate(`1901-01-01`)}, - {hostId: 11, time: ISODate(`1902-01-01`)}, - ])); - assert.commandWorked(coll.createIndex({hostId: 'hashed'})); - - assert.commandWorked(st.s.adminCommand({ - shardCollection: 'test.ts', - key: {hostId: 'hashed'}, - timeseries: {timeField: 'time', metaField: 'hostId'} - })); - - validateBucketsCollectionSharded({ - collName: 'ts', - shardKey: {meta: 'hashed'}, - timeSeriesParams: {timeField: 'time', metaField: 'hostId'} - }); - - assert.eq(coll.find().itcount(), 2); // Validate count after sharding. - let insertCount = timeseriesInsert(coll); - assert.eq(coll.find().itcount(), insertCount + 2); - coll.drop(); - - if (collectionExists) { - assert.commandWorked(sDB.createCollection( - 'ts', {timeseries: {timeField: 'time', metaField: 'hostId'}})); - } - assert.commandWorked(st.s.adminCommand({enableSharding: 'test'})); - - // Sharding key with hashed meta field and time field. - assert.commandWorked(st.s.adminCommand({ - shardCollection: 'test.ts', - key: {hostId: 'hashed', time: 1}, - timeseries: {timeField: 'time', metaField: 'hostId'}, - numInitialChunks: 2 - })); - - coll = sDB.getCollection('ts'); - assert.eq(coll.find().itcount(), 0); - insertCount = timeseriesInsert(coll); - assert.eq(coll.find().itcount(), insertCount); - coll.drop(); - })(); - - // Test that invalid shard keys fail. - (function invalidShardKeyPatterns() { - if (collectionExists) { - assert.commandWorked(sDB.createCollection( - 'ts', {timeseries: {timeField: 'time', metaField: 'hostId'}})); - } - - // No other fields, including _id, are allowed in the shard key pattern - assert.commandFailedWithCode(st.s.adminCommand({ - shardCollection: 'test.ts', - key: {_id: 1}, - timeseries: {timeField: 'time', metaField: 'hostId'}, - }), - [5914001, 6235603]); - - assert.commandFailedWithCode(st.s.adminCommand({ - shardCollection: 'test.ts', - key: {_id: 1, time: 1}, - timeseries: {timeField: 'time', metaField: 'hostId'}, - }), - [5914001, 6235603]); - - assert.commandFailedWithCode(st.s.adminCommand({ - shardCollection: 'test.ts', - key: {_id: 1, hostId: 1}, - timeseries: {timeField: 'time', metaField: 'hostId'}, - }), - [5914001, 6235603]); - - assert.commandFailedWithCode(st.s.adminCommand({ - shardCollection: 'test.ts', - key: {a: 1}, - timeseries: {timeField: 'time', metaField: 'hostId'}, - }), - [5914001, 6235603]); - - // Shared key where time is not the last field in shard key should fail. - assert.commandFailedWithCode(st.s.adminCommand({ - shardCollection: 'test.ts', - key: {time: 1, hostId: 1}, - timeseries: {timeField: 'time', metaField: 'hostId'} - }), - [5914000, 6235602]); - assert(sDB.getCollection("ts").drop()); - })(); - - (function noMetaFieldTimeseries() { - if (collectionExists) { - assert.commandWorked(sDB.createCollection('ts', {timeseries: {timeField: 'time'}})); - } - - assert.commandFailedWithCode(st.s.adminCommand({ - shardCollection: 'test.ts', - key: {_id: 1}, - timeseries: {timeField: 'time'}, - }), - [5914001, 6235603]); - - assert.commandFailedWithCode(st.s.adminCommand({ - shardCollection: 'test.ts', - key: {a: 1}, - timeseries: {timeField: 'time'}, - }), - [5914001, 6235603]); - - assert.commandWorked(st.s.adminCommand( - {shardCollection: 'test.ts', key: {time: 1}, timeseries: {timeField: 'time'}})); - - assert(sDB.getCollection("ts").drop()); - })(); - } + // No other fields, including _id, are allowed in the shard key pattern + assert.commandFailedWithCode(st.s.adminCommand({ + shardCollection: 'test.ts', + key: {_id: 1}, + timeseries: {timeField: 'time', metaField: 'hostId'}, + }), + [5914001]); - runShardKeyPatternValidation(true); - runShardKeyPatternValidation(false); + assert.commandFailedWithCode(st.s.adminCommand({ + shardCollection: 'test.ts', + key: {_id: 1, time: 1}, + timeseries: {timeField: 'time', metaField: 'hostId'}, + }), + [5914001]); - // Verify that the shardCollection command fails if the 'system.buckets' collection does not - // have time-series options. - sDB.getCollection("ts").drop(); - sDB.createCollection("system.buckets.ts"); - assert.commandFailedWithCode(st.s.adminCommand({ - shardCollection: 'test.ts', - key: {time: 1}, - }), - [6159000, 6235600]); - assert.commandFailedWithCode( - st.s.adminCommand( - {shardCollection: 'test.ts', key: {time: 1}, timeseries: {timeField: 'time'}}), - [6159000, 6235600]); - - // Cannot shard a system namespace. - assert.commandFailedWithCode(st.s.adminCommand({ - shardCollection: 'test.system.bucket.ts', - key: {time: 1}, - }), - ErrorCodes.IllegalOperation); - -} else { - (function timeseriesCollectionsCannotBeSharded() { - assert.commandFailedWithCode( - st.s.adminCommand({shardCollection: 'test.ts', key: {meta: 1}, timeseries}), 5731502); + assert.commandFailedWithCode(st.s.adminCommand({ + shardCollection: 'test.ts', + key: {_id: 1, hostId: 1}, + timeseries: {timeField: 'time', metaField: 'hostId'}, + }), + [5914001]); - assert.commandWorked(sDB.createCollection('ts', {timeseries})); + assert.commandFailedWithCode(st.s.adminCommand({ + shardCollection: 'test.ts', + key: {a: 1}, + timeseries: {timeField: 'time', metaField: 'hostId'}, + }), + [5914001]); + + // Shared key where time is not the last field in shard key should fail. + assert.commandFailedWithCode(st.s.adminCommand({ + shardCollection: 'test.ts', + key: {time: 1, hostId: 1}, + timeseries: {timeField: 'time', metaField: 'hostId'} + }), + [5914000]); + assert(sDB.getCollection("ts").drop()); + })(); - assert.commandFailedWithCode( - st.s.adminCommand({shardCollection: 'test.ts', key: {meta: 1}}), 5731502); - - // Insert directly on the primary shard because mongos does not know how to insert into a TS - // collection. - st.ensurePrimaryShard(dbName, st.shard0.shardName); - const tsColl = st.shard0.getDB(dbName).ts; - const numDocs = 20; - let docs = []; - for (let i = 0; i < numDocs; i++) { - const doc = { - time: ISODate(), - hostId: i, - _id: i, - data: Random.rand(), - }; - docs.push(doc); - assert.commandWorked(tsColl.insert(doc)); + (function noMetaFieldTimeseries() { + if (collectionExists) { + assert.commandWorked(sDB.createCollection('ts', {timeseries: {timeField: 'time'}})); } - // This index gets created as {meta: 1} on the buckets collection. - assert.commandWorked(tsColl.createIndex({hostId: 1})); + assert.commandFailedWithCode(st.s.adminCommand({ + shardCollection: 'test.ts', + key: {_id: 1}, + timeseries: {timeField: 'time'}, + }), + [5914001]); - // Trying to shard the buckets collection -> error - assert.commandFailedWithCode( - st.s.adminCommand({shardCollection: 'test.system.buckets.ts', key: {meta: 1}}), - 5731501); + assert.commandFailedWithCode(st.s.adminCommand({ + shardCollection: 'test.ts', + key: {a: 1}, + timeseries: {timeField: 'time'}, + }), + [5914001]); + + assert.commandWorked(st.s.adminCommand( + {shardCollection: 'test.ts', key: {time: 1}, timeseries: {timeField: 'time'}})); - assert(tsColl.drop()); + assert(sDB.getCollection("ts").drop()); })(); } -st.stop(); -})(); +runShardKeyPatternValidation(true); +runShardKeyPatternValidation(false); + +// Verify that the shardCollection command fails if the 'system.buckets' collection does not +// have time-series options. +sDB.getCollection("ts").drop(); +sDB.createCollection("system.buckets.ts"); +assert.commandFailedWithCode(st.s.adminCommand({ + shardCollection: 'test.ts', + key: {time: 1}, +}), + [6159000]); +assert.commandFailedWithCode( + st.s.adminCommand( + {shardCollection: 'test.ts', key: {time: 1}, timeseries: {timeField: 'time'}}), + [6159000]); + +// Cannot shard a system namespace. +assert.commandFailedWithCode(st.s.adminCommand({ + shardCollection: 'test.system.bucket.ts', + key: {time: 1}, +}), + ErrorCodes.IllegalOperation); + +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_sharding_admin_commands.js b/jstests/sharding/timeseries_sharding_admin_commands.js index 4f43152f5486d..323fd893173b9 100644 --- a/jstests/sharding/timeseries_sharding_admin_commands.js +++ b/jstests/sharding/timeseries_sharding_admin_commands.js @@ -9,21 +9,8 @@ // Cannot run the filtering metadata check on tests that run refineCollectionShardKey. TestData.skipCheckShardFilteringMetadata = true; -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); - // Connections. const mongo = new ShardingTest({shards: 2, rs: {nodes: 3}}); - -// Sanity checks. -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(mongo.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - mongo.stop(); - return; -} - const dbName = 'testDB'; const collName = 'testColl'; const timeField = 'time'; @@ -176,10 +163,7 @@ function assertRangeMatch(savedRange, paramRange) { assert.commandFailedWithCode( mongo.s0.adminCommand( {reshardCollection: viewNss, key: {[metaField]: 1, [controlTimeField]: 1}}), - [ - ErrorCodes.NotImplemented, - ErrorCodes.NamespaceNotSharded /* TODO SERVER-67929 Remove this error code */ - ]); + [ErrorCodes.NotImplemented]); assert.commandFailedWithCode( mongo.s0.adminCommand( {reshardCollection: bucketNss, key: {[metaField]: 1, [controlTimeField]: 1}}), @@ -246,10 +230,8 @@ function assertRangeMatch(savedRange, paramRange) { // Can add control.min.time as the last shard key component on the timeseries collection. (function checkRefineCollectionShardKeyCommand() { createTimeSeriesColl({index: {[metaField]: 1, [timeField]: 1}, shardKey: {[metaField]: 1}}); - assert.commandWorkedOrFailedWithCode( - mongo.s0.adminCommand( - {refineCollectionShardKey: viewNss, key: {[metaField]: 1, [controlTimeField]: 1}}), - ErrorCodes.NamespaceNotSharded /* TODO SERVER-67929 Remove this error code */); + assert.commandWorked(mongo.s0.adminCommand( + {refineCollectionShardKey: viewNss, key: {[metaField]: 1, [controlTimeField]: 1}})); assert.commandWorked(mongo.s0.adminCommand( {refineCollectionShardKey: bucketNss, key: {[metaField]: 1, [controlTimeField]: 1}})); const coll = mongo.s0.getDB(dbName)[collName]; @@ -285,21 +267,4 @@ function assertRangeMatch(savedRange, paramRange) { dropTimeSeriesColl(); })(); -// Check renameCollection command cannot modify name through the view namespace. -(function checkRenameCollectionCommand() { - createTimeSeriesColl( - {index: {[metaField]: 1, [timeField]: 1}, shardKey: {[metaField]: 1, [timeField]: 1}}); - const newCollName = `${collName}New`; - const newViewNss = `${dbName}.${newCollName}`; - // Rename collection is not supported through view namespace. - assert.commandFailedWithCode( - mongo.s.adminCommand({renameCollection: viewNss, to: newViewNss}), [ - ErrorCodes.IllegalOperation, - ErrorCodes.CommandNotSupportedOnView, /* TODO SERVER-67929 Remove this error code */ - ErrorCodes.NamespaceNotFound, /* TODO SERVER-67929 Remove this error code */ - ]); - dropTimeSeriesColl(); -})(); - mongo.stop(); -})(); diff --git a/jstests/sharding/timeseries_shardkey_update.js b/jstests/sharding/timeseries_shardkey_update.js new file mode 100644 index 0000000000000..5165fadefcea6 --- /dev/null +++ b/jstests/sharding/timeseries_shardkey_update.js @@ -0,0 +1,181 @@ +/** + * Tests shard key updates on a sharded timeseries collection. + * + * @tags: [ + * # We need a timeseries collection. + * requires_timeseries, + * # To avoid burn-in tests in in-memory build variants + * requires_persistence, + * # Update on a sharded timeseries collection is supported since 7.1 + * requires_fcv_71, + * # TODO SERVER-76583: Remove following two tags. + * does_not_support_retryable_writes, + * requires_non_retryable_writes, + * featureFlagTimeseriesUpdatesSupport, + * ] + */ + +import { + doc1_a_nofields, + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc5_b_f104, + doc6_c_f105, + doc7_c_f106, + generateTimeValue, + getCallerName, + metaFieldName, + prepareShardedCollection, + setUpShardedCluster, + tearDownShardedCluster, + testDB, + timeFieldName +} from "jstests/core/timeseries/libs/timeseries_writes_util.js"; + +const docs = [ + doc1_a_nofields, + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc5_b_f104, + doc6_c_f105, + doc7_c_f106, +]; + +setUpShardedCluster(); + +(function testUpdateMultiModifyingShardKey() { + // This will create a sharded collection with 2 chunks: (MinKey, meta: "A"] and [meta: "B", + // MaxKey). + const coll = prepareShardedCollection( + {collName: getCallerName(1), initialDocList: docs, includeMeta: true}); + + // This update command tries to update doc5_b_f104 into {_id: 5, meta: "A", f: 104}. The owning + // shard would be the shard that owns [MinKey, meta: "A"]. + const updateMultiCmd = { + update: coll.getName(), + updates: [{ + q: {[metaFieldName]: "B", f: {$gt: 103}}, + u: {$set: {[metaFieldName]: "A"}}, + multi: true + }] + }; + jsTestLog(`Running update multi: ${tojson(updateMultiCmd)}`); + + // We don't allow update multi to modify the shard key at all. + const res = assert.commandFailedWithCode(testDB.runCommand(updateMultiCmd), + ErrorCodes.InvalidOptions, + `cmd = ${tojson(updateMultiCmd)}`); + assert.sameMembers(docs, coll.find().toArray(), "Collection contents did not match"); +})(); + +(function testUpdateOneModifyingShardKey() { + // This will create a sharded collection with 2 chunks: (MinKey, meta: "A"] and [meta: "B", + // MaxKey). + const coll = prepareShardedCollection( + {collName: getCallerName(1), initialDocList: docs, includeMeta: true}); + + // Update one command as retryable write can modify the shard key. + const session = testDB.getMongo().startSession({retryWrites: true}); + const sessionDB = session.getDatabase(testDB.getName()); + + // This update command tries to update doc5_b_f104 into {_id: 5, meta: "A", f: 104}. The owning + // shard would be the shard that owns (MinKey, meta: "A"]. + const query = {[metaFieldName]: "B", f: {$gt: 103}}; + const update = {$set: {[metaFieldName]: "A"}}; + + jsTestLog(`Running updateOne: {q: ${tojson(query)}, u: ${tojson(update)}}`); + + const result = assert.commandWorked(sessionDB.runCommand({ + update: coll.getName(), + updates: [{q: query, u: update, multi: false}], + lsid: session.getSessionId(), + txnNumber: NumberLong(1) + })); + assert.eq(1, result.nModified, tojson(result)); + + assert.docEq({_id: 5, [metaFieldName]: "A", f: 104, [timeFieldName]: generateTimeValue(5)}, + coll.findOne({_id: 5}), + "Document was not updated correctly " + tojson(coll.find().toArray())); +})(); + +(function testFindOneAndUpdateModifyingMetaShardKey() { + // This will create a sharded collection with 2 chunks: (MinKey, meta: "A"] and [meta: "B", + // MaxKey). + const coll = prepareShardedCollection( + {collName: getCallerName(1), initialDocList: docs, includeMeta: true}); + + // This findAndModify command tries to update doc5_b_f104 into {_id: 5, meta: "A", f: 104}. The + // owning shard would be the shard that owns (MinKey, meta: "A"]. + const findOneAndUpdateCmd = { + findAndModify: coll.getName(), + query: {[metaFieldName]: "B", f: {$gt: 103}}, + update: {$set: {[metaFieldName]: "A"}}, + new: true, + }; + jsTestLog(`Running findAndModify update: ${tojson(findOneAndUpdateCmd)}`); + + // As of now, shard key update is only allowed in retryable writes or transactions when + // 'featureFlagUpdateDocumentShardKeyUsingTransactionApi' is turned off and findAndModify on + // timeseries collections does not support retryable writes. So we should use transaction here. + // + // TODO SERVER-67429 or SERVER-76583 Relax this restriction. + const session = testDB.getMongo().startSession(); + const sessionDB = session.getDatabase(testDB.getName()); + session.startTransaction(); + + const res = assert.commandWorked(sessionDB.runCommand(findOneAndUpdateCmd)); + assert.eq(1, res.lastErrorObject.n, "Expected 1 document to be updated"); + assert.eq( + true, res.lastErrorObject.updatedExisting, "Expected existing document to be updated"); + const updatedDoc = Object.assign(doc5_b_f104, {[metaFieldName]: "A"}); + assert.docEq(updatedDoc, res.value, "Wrong new document"); + + session.commitTransaction(); + + let expectedDocs = docs.filter(doc => doc._id !== 5); + expectedDocs.push(updatedDoc); + assert.sameMembers(expectedDocs, coll.find().toArray(), "Collection contents did not match"); +})(); + +(function testFindOneAndUpdateModifyingTimeShardKey() { + // This will create a sharded collection with 2 chunks: [MinKey, + // 'splitTimePointBetweenTwoShards') and ['splitTimePointBetweenTwoShards', MaxKey). + const coll = prepareShardedCollection( + {collName: getCallerName(1), initialDocList: docs, includeMeta: false}); + + // This findAndModify command tries to update doc1_a_nofields into {_id: 1, tag: "A", + // time: generateTimeValue(8)}. The owning shard would be the shard that owns [MinKey, + // 'splitTimePointBetweenTwoShards'). + const findOneAndUpdateCmd = { + findAndModify: coll.getName(), + query: {[timeFieldName]: generateTimeValue(1)}, + update: {$set: {[timeFieldName]: generateTimeValue(8)}}, + }; + jsTestLog(`Running findAndModify update: ${tojson(findOneAndUpdateCmd)}`); + + // As of now, shard key update is allowed in retryable writes or transactions when 'featureFlag- + // UpdateDocumentShardKeyUsingTransactionApi' is turned off and findAndModify on timeseries + // collections does not support retryable writes. So we should use transaction here. + // + // TODO SERVER-67429 or SERVER-76583 Relax this restriction. + const session = testDB.getMongo().startSession(); + const sessionDB = session.getDatabase(testDB.getName()); + session.startTransaction(); + + const res = assert.commandWorked(sessionDB.runCommand(findOneAndUpdateCmd)); + assert.eq(1, res.lastErrorObject.n, "Expected 1 document to be updated"); + assert.eq( + true, res.lastErrorObject.updatedExisting, "Expected existing document to be updated"); + assert.docEq(doc1_a_nofields, res.value, "Wrong old document"); + + session.commitTransaction(); + + const updatedDoc = Object.assign(doc1_a_nofields, {[timeFieldName]: generateTimeValue(8)}); + let expectedDocs = docs.filter(doc => doc._id !== 1); + expectedDocs.push(updatedDoc); + assert.sameMembers(expectedDocs, coll.find().toArray(), "Collection contents did not match"); +})(); + +tearDownShardedCluster(); diff --git a/jstests/sharding/timeseries_time_value_rounding.js b/jstests/sharding/timeseries_time_value_rounding.js index 2fb65f5a9ec8a..fa7f0bba1dd2d 100644 --- a/jstests/sharding/timeseries_time_value_rounding.js +++ b/jstests/sharding/timeseries_time_value_rounding.js @@ -6,10 +6,6 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); load("jstests/aggregation/extras/utils.js"); Random.setRandomSeed(); @@ -23,13 +19,6 @@ const metaField = 'hostId'; const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); const mongos = st.s0; -// Sanity checks. -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - // Databases. assert.commandWorked(mongos.adminCommand({enableSharding: dbName})); const mainDB = mongos.getDB(dbName); @@ -134,5 +123,4 @@ try { runTest(); } finally { st.stop(); -} -})(); +} \ No newline at end of file diff --git a/jstests/sharding/timeseries_update.js b/jstests/sharding/timeseries_update.js index c11127321d97b..c77779f682101 100644 --- a/jstests/sharding/timeseries_update.js +++ b/jstests/sharding/timeseries_update.js @@ -6,10 +6,7 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); const mongos = st.s0; @@ -22,65 +19,40 @@ const dbName = 'testDB'; const collName = 'coll'; const timeField = "time"; const metaField = "tag"; -const dateTime = ISODate("2021-07-12T16:00:00Z"); - -// -// Checks for feature flags. -// - -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} +const dateTime1 = ISODate("2021-07-12T16:00:00Z"); +const dateTime2 = ISODate("2021-07-13T16:00:00Z"); const testDB = mongos.getDB(dbName); testDB.dropDatabase(); assert.commandWorked(mongos.adminCommand({enableSharding: dbName})); -if (!TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(st.shard0)) { - // Ensure that the feature flag correctly prevents us from running an update on a sharded - // timeseries collection. - assert.commandWorked(testDB.createCollection(collName, {timeseries: {timeField, metaField}})); - const coll = testDB.getCollection(collName); - assert.commandWorked(coll.createIndex({[timeField]: 1})); - assert.commandWorked(mongos.adminCommand({ - shardCollection: `${dbName}.${collName}`, - key: {[timeField]: 1}, - })); - assert.commandFailedWithCode( - testDB.runCommand( - {update: coll.getName(), updates: [{q: {}, u: {[metaField]: 1}, multi: true}]}), - [ErrorCodes.NotImplemented, ErrorCodes.InvalidOptions]); - st.stop(); - return; -} +const arbitraryUpdatesEnabled = TimeseriesTest.arbitraryUpdatesEnabled(st.shard0); const doc1 = { _id: 1, - [timeField]: dateTime, + [timeField]: dateTime1, [metaField]: {a: "A", b: "B"} }; const doc2 = { _id: 2, - [timeField]: dateTime, + [timeField]: dateTime2, [metaField]: {c: "C", d: 2}, f: [{"k": "K", "v": "V"}] }; const doc3 = { _id: 3, - [timeField]: dateTime, + [timeField]: dateTime1, f: "F" }; const doc4 = { _id: 4, - [timeField]: dateTime, + [timeField]: dateTime1, [metaField]: {a: "A", b: "B"}, f: "F" }; const doc5 = { _id: 5, - [timeField]: dateTime, + [timeField]: dateTime1, [metaField]: {a: "A", b: "B", c: "C"} }; @@ -223,11 +195,19 @@ function expectFailedUpdate(initialDocList) { // function testCaseMultiFalseUpdateFails({testUpdate}) { + if (arbitraryUpdatesEnabled) { + return; + } + testUpdate({updates: [{q: {[metaField]: {b: "B"}}, u: {$set: {[metaField]: {b: "C"}}}}]}, expectFailedUpdate([doc1])); } function testCaseReplacementAndPipelineUpdateFails({testUpdate}) { + if (arbitraryUpdatesEnabled) { + return; + } + const expectFailedUpdateDoc = expectFailedUpdate([doc2]); // Replace a document to have no metaField, which should fail since updates with replacement @@ -267,6 +247,10 @@ function testCaseReplacementAndPipelineUpdateFails({testUpdate}) { } function testCaseNoMetaFieldQueryUpdateFails({testUpdate}) { + if (arbitraryUpdatesEnabled) { + return; + } + // Query on a field which is not the (nonexistent) metaField. testUpdate({ updates: [{ @@ -331,6 +315,10 @@ function testCaseNoMetaFieldQueryUpdateFails({testUpdate}) { } function testCaseIllegalMetaFieldUpdateFails({testUpdate}) { + if (arbitraryUpdatesEnabled) { + return; + } + // Query on the metaField and modify a field that is not the metaField. testUpdate({ updates: [{ @@ -399,7 +387,7 @@ function testCaseBatchUpdates({testUpdate}) { ], resultDocList: [{ _id: 2, - [timeField]: dateTime, + [timeField]: dateTime1, [metaField]: 3, f: [{"k": "K", "v": "V"}], }], @@ -447,7 +435,7 @@ function testCaseBatchUpdates({testUpdate}) { ], resultDocList: [{ _id: 2, - [timeField]: dateTime, + [timeField]: dateTime1, [metaField]: {c: "C", d: 8}, f: [{"k": "K", "v": "V"}], }], @@ -459,54 +447,56 @@ function testCaseBatchUpdates({testUpdate}) { // Multiple updates, ordered: query on the metaField and modify a field that is not the // metaField using dot notation. - testUpdate({ - updates: [ - { - q: {[metaField]: {c: "C", d: 2}}, - u: {$set: {"f1.0": "f2"}}, - multi: true, - }, - { - q: {[metaField]: {c: "C", d: 2}}, - u: {$inc: {[metaField + ".d"]: 6}}, - multi: true, - } - ] - }, - expectFailedUpdate([doc2])); - - // Multiple updates, unordered: Modify the metaField, a field that is not the metaField, and the - // metaField. The first and last updates should succeed. - testUpdate({ - initialDocList: [doc2], - updates: [ - { - q: {[metaField]: {c: "C", d: 2}}, - u: {$inc: {[metaField + ".d"]: 6}}, - multi: true, - }, - { - q: {[metaField]: {c: "C", d: 8}}, - u: {$set: {"f1.0": "f2"}}, - multi: true, - }, - { - q: {[metaField]: {c: "C", d: 8}}, - u: {$inc: {[metaField + ".d"]: 7}}, - multi: true, - } - ], - resultDocList: [{ - _id: 2, - [timeField]: dateTime, - [metaField]: {c: "C", d: 15}, - f: [{"k": "K", "v": "V"}], - }], - ordered: false, - n: 2, - pathToMetaFieldBeingUpdated: "d", - failCode: ErrorCodes.InvalidOptions, - }); + if (!arbitraryUpdatesEnabled) { + testUpdate({ + updates: [ + { + q: {[metaField]: {c: "C", d: 2}}, + u: {$set: {"f1.0": "f2"}}, + multi: true, + }, + { + q: {[metaField]: {c: "C", d: 2}}, + u: {$inc: {[metaField + ".d"]: 6}}, + multi: true, + } + ] + }, + expectFailedUpdate([doc2])); + + // Multiple updates, unordered: Modify the metaField, a field that is not the metaField, and + // the metaField. The first and last updates should succeed. + testUpdate({ + initialDocList: [doc2], + updates: [ + { + q: {[metaField]: {c: "C", d: 2}}, + u: {$inc: {[metaField + ".d"]: 6}}, + multi: true, + }, + { + q: {[metaField]: {c: "C", d: 8}}, + u: {$set: {"f1.0": "f2"}}, + multi: true, + }, + { + q: {[metaField]: {c: "C", d: 8}}, + u: {$inc: {[metaField + ".d"]: 7}}, + multi: true, + } + ], + resultDocList: [{ + _id: 2, + [timeField]: dateTime1, + [metaField]: {c: "C", d: 15}, + f: [{"k": "K", "v": "V"}], + }], + ordered: false, + n: 2, + pathToMetaFieldBeingUpdated: "d", + failCode: ErrorCodes.InvalidOptions, + }); + } } function testCaseValidMetaFieldUpdates({testUpdate}) { @@ -518,7 +508,7 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { u: {$rename: {[metaField + ".a"]: metaField + ".z"}}, multi: true, }], - resultDocList: [{_id: 1, [timeField]: dateTime, [metaField]: {z: "A", b: "B"}}, doc2], + resultDocList: [{_id: 1, [timeField]: dateTime1, [metaField]: {z: "A", b: "B"}}, doc2], n: 1, pathToMetaFieldBeingUpdated: "a", }); @@ -531,7 +521,7 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { u: {$set: {[metaField]: {c: "C"}}}, multi: true, }], - resultDocList: [{_id: 1, [timeField]: dateTime, [metaField]: {c: "C"}}], + resultDocList: [{_id: 1, [timeField]: dateTime1, [metaField]: {c: "C"}}], n: 1, pathToMetaFieldBeingUpdated: "", }); @@ -546,7 +536,7 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { }], resultDocList: [ doc1, - {_id: 2, [timeField]: dateTime, [metaField]: {c: 1, d: 2}, f: [{"k": "K", "v": "V"}]}, + {_id: 2, [timeField]: dateTime2, [metaField]: {c: 1, d: 2}, f: [{"k": "K", "v": "V"}]}, doc4, doc5 ], @@ -564,10 +554,15 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { multi: true, }], resultDocList: [ - {_id: 1, [timeField]: dateTime, [metaField]: {b: "B"}}, - {_id: 2, [timeField]: dateTime, [metaField]: {c: "C", d: 2}, f: [{"k": "K", "v": "V"}]}, - {_id: 4, [timeField]: dateTime, [metaField]: {b: "B"}, f: "F"}, - {_id: 5, [timeField]: dateTime, [metaField]: {b: "B", c: "C"}} + {_id: 1, [timeField]: dateTime1, [metaField]: {b: "B"}}, + { + _id: 2, + [timeField]: dateTime1, + [metaField]: {c: "C", d: 2}, + f: [{"k": "K", "v": "V"}] + }, + {_id: 4, [timeField]: dateTime1, [metaField]: {b: "B"}, f: "F"}, + {_id: 5, [timeField]: dateTime1, [metaField]: {b: "B", c: "C"}} ], ordered: false, n: 3, @@ -582,7 +577,7 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { u: {$set: {[metaField]: {c: "C"}}}, multi: true, }], - resultDocList: [{_id: 1, [timeField]: dateTime, [metaField]: {c: "C"}}], + resultDocList: [{_id: 1, [timeField]: dateTime1, [metaField]: {c: "C"}}], n: 1, pathToMetaFieldBeingUpdated: "", }); @@ -595,7 +590,7 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { u: {$set: {[metaField]: {c: "C"}}}, multi: true, }], - resultDocList: [{_id: 1, [timeField]: dateTime, [metaField]: {c: "C"}}], + resultDocList: [{_id: 1, [timeField]: dateTime1, [metaField]: {c: "C"}}], n: 1, pathToMetaFieldBeingUpdated: "", }); @@ -608,9 +603,12 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { u: {$inc: {[metaField + ".d"]: 10}}, multi: true, }], - resultDocList: [ - {_id: 2, [timeField]: dateTime, [metaField]: {c: "C", d: 12}, f: [{"k": "K", "v": "V"}]} - ], + resultDocList: [{ + _id: 2, + [timeField]: dateTime2, + [metaField]: {c: "C", d: 12}, + f: [{"k": "K", "v": "V"}] + }], n: 1, pathToMetaFieldBeingUpdated: "d", }); @@ -624,8 +622,8 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { multi: true, }], resultDocList: [ - {_id: 1, [timeField]: dateTime, [metaField]: {z: "Z"}}, - {_id: 2, [timeField]: dateTime, [metaField]: {z: "Z"}, f: [{"k": "K", "v": "V"}]} + {_id: 1, [timeField]: dateTime1, [metaField]: {z: "Z"}}, + {_id: 2, [timeField]: dateTime2, [metaField]: {z: "Z"}, f: [{"k": "K", "v": "V"}]} ], n: 2, pathToMetaFieldBeingUpdated: "", @@ -636,7 +634,7 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { initialDocList: [doc1], updates: [{q: {[metaField]: {a: "A", b: "B"}}, u: {$unset: {[metaField]: ""}}, multi: true}], - resultDocList: [{_id: 1, [timeField]: dateTime}], + resultDocList: [{_id: 1, [timeField]: dateTime1}], n: 1, pathToMetaFieldBeingUpdated: "", }); @@ -652,9 +650,9 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { }, ], resultDocList: [ - {_id: 1, [timeField]: dateTime, [metaField]: {a: "A", b: "B", c: "C"}}, - {_id: 4, [timeField]: dateTime, [metaField]: {a: "A", b: "B", c: "C"}, f: "F"}, - {_id: 5, [timeField]: dateTime, [metaField]: {a: "A", b: "B", c: "C"}} + {_id: 1, [timeField]: dateTime1, [metaField]: {a: "A", b: "B", c: "C"}}, + {_id: 4, [timeField]: dateTime1, [metaField]: {a: "A", b: "B", c: "C"}, f: "F"}, + {_id: 5, [timeField]: dateTime1, [metaField]: {a: "A", b: "B", c: "C"}} ], n: 3, nModified: 2, @@ -670,8 +668,8 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { multi: true }], resultDocList: [ - {_id: 1, [timeField]: dateTime, [metaField]: "a"}, - {_id: 2, [timeField]: dateTime, [metaField]: "a", f: [{"k": "K", "v": "V"}]}, + {_id: 1, [timeField]: dateTime1, [metaField]: "a"}, + {_id: 2, [timeField]: dateTime2, [metaField]: "a", f: [{"k": "K", "v": "V"}]}, doc3 ], n: 2, @@ -686,57 +684,60 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { u: {$set: {[metaField]: "a"}}, multi: true }], - resultDocList: [{_id: 1, [timeField]: dateTime, [metaField]: "a"}, doc2, doc3], - n: 1, - pathToMetaFieldBeingUpdated: "", - }); - - // Query for documents using $jsonSchema with a field that is not the metaField required. - testUpdate({ - updates: [{ - q: {"$jsonSchema": {"required": [metaField, timeField]}}, - u: {$set: {[metaField]: "a"}}, - multi: true - }], - }, - expectFailedUpdate([doc1, doc2, doc3])); - - const nestedMetaObj = {_id: 6, [timeField]: dateTime, [metaField]: {[metaField]: "A", a: 1}}; - - // Query for documents using $jsonSchema with the metaField required and a required subfield of - // the metaField with the same name as the metaField. - testUpdate({ - initialDocList: [doc1, nestedMetaObj], - updates: [{ - q: { - "$jsonSchema": { - "required": [metaField], - "properties": {[metaField]: {"required": [metaField]}} - } - }, - u: {$set: {[metaField]: "a"}}, - multi: true - }], - resultDocList: [doc1, {_id: 6, [timeField]: dateTime, [metaField]: "a", a: 1}], + resultDocList: [{_id: 1, [timeField]: dateTime1, [metaField]: "a"}, doc2, doc3], n: 1, pathToMetaFieldBeingUpdated: "", }); - // Query for documents using $jsonSchema with the metaField required and an optional field that - // is not the metaField. - testUpdate({ - updates: [{ - q: { - "$jsonSchema": { - "required": [metaField], - "properties": {"measurement": {description: "can be any value"}} - } - }, - u: {$set: {[metaField]: "a"}}, - multi: true - }] - }, - expectFailedUpdate([doc1, nestedMetaObj])); + if (!arbitraryUpdatesEnabled) { + // Query for documents using $jsonSchema with a field that is not the metaField required. + testUpdate({ + updates: [{ + q: {"$jsonSchema": {"required": [metaField, timeField]}}, + u: {$set: {[metaField]: "a"}}, + multi: true + }], + }, + expectFailedUpdate([doc1, doc2, doc3])); + + const nestedMetaObj = + {_id: 6, [timeField]: dateTime1, [metaField]: {[metaField]: "A", a: 1}}; + + // Query for documents using $jsonSchema with the metaField required and a required subfield + // of the metaField with the same name as the metaField. + testUpdate({ + initialDocList: [doc1, nestedMetaObj], + updates: [{ + q: { + "$jsonSchema": { + "required": [metaField], + "properties": {[metaField]: {"required": [metaField]}} + } + }, + u: {$set: {[metaField]: "a"}}, + multi: true + }], + resultDocList: [doc1, {_id: 6, [timeField]: dateTime1, [metaField]: "a", a: 1}], + n: 1, + pathToMetaFieldBeingUpdated: "", + }); + + // Query for documents using $jsonSchema with the metaField required and an optional field + // that is not the metaField. + testUpdate({ + updates: [{ + q: { + "$jsonSchema": { + "required": [metaField], + "properties": {"measurement": {description: "can be any value"}} + } + }, + u: {$set: {[metaField]: "a"}}, + multi: true + }] + }, + expectFailedUpdate([doc1, nestedMetaObj])); + } // Query for documents on the metaField with the metaField nested within nested operators. testUpdate({ @@ -751,7 +752,7 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { u: {$set: {[metaField]: "a"}}, multi: true }], - resultDocList: [{_id: 1, [timeField]: dateTime, [metaField]: "a"}, doc2, doc3], + resultDocList: [{_id: 1, [timeField]: dateTime1, [metaField]: "a"}, doc2, doc3], n: 1, pathToMetaFieldBeingUpdated: "", }); @@ -770,16 +771,18 @@ function testCaseValidMetaFieldUpdates({testUpdate}) { n: 0, }); - // Do the same test case as above but with upsert:true, which should fail. - testUpdate({ - updates: [{ - q: {[metaField]: "Z"}, - u: {$set: {[metaField]: 5}}, - multi: true, - upsert: true, - }] - }, - expectFailedUpdate([doc1, doc4, doc5])); + if (!arbitraryUpdatesEnabled) { + // Do the same test case as above but with upsert:true, which should fail. + testUpdate({ + updates: [{ + q: {[metaField]: "Z"}, + u: {$set: {[metaField]: 5}}, + multi: true, + upsert: true, + }] + }, + expectFailedUpdate([doc1, doc4, doc5])); + } } function testCaseUpdateWithLetDoc({testUpdate}) { @@ -793,9 +796,9 @@ function testCaseUpdateWithLetDoc({testUpdate}) { }], letDoc: {oldVal: "A"}, resultDocList: [ - {_id: 1, [timeField]: dateTime, [metaField]: "aaa"}, - {_id: 4, [timeField]: dateTime, [metaField]: "aaa", f: "F"}, - {_id: 5, [timeField]: dateTime, [metaField]: "aaa"} + {_id: 1, [timeField]: dateTime1, [metaField]: "aaa"}, + {_id: 4, [timeField]: dateTime1, [metaField]: "aaa", f: "F"}, + {_id: 5, [timeField]: dateTime1, [metaField]: "aaa"} ], n: 3, pathToMetaFieldBeingUpdated: "", @@ -812,7 +815,7 @@ function testCaseUpdateWithLetDoc({testUpdate}) { multi: true, }], letDoc: {myVar: "aaa"}, - resultDocList: [{_id: 1, [timeField]: dateTime, [metaField]: "$$myVar"}], + resultDocList: [{_id: 1, [timeField]: dateTime1, [metaField]: "$$myVar"}], n: 1, pathToMetaFieldBeingUpdated: "", }); @@ -834,9 +837,9 @@ function testCaseUpdateWithLetDoc({testUpdate}) { ], letDoc: {val1: "A", val2: "aaa"}, resultDocList: [ - {_id: 1, [timeField]: dateTime, [metaField]: "bbb"}, - {_id: 4, [timeField]: dateTime, [metaField]: "bbb", f: "F"}, - {_id: 5, [timeField]: dateTime, [metaField]: "bbb"} + {_id: 1, [timeField]: dateTime1, [metaField]: "bbb"}, + {_id: 4, [timeField]: dateTime1, [metaField]: "bbb", f: "F"}, + {_id: 5, [timeField]: dateTime1, [metaField]: "bbb"} ], n: 6, pathToMetaFieldBeingUpdated: "", @@ -844,9 +847,9 @@ function testCaseUpdateWithLetDoc({testUpdate}) { } function testCaseCollationUpdates({testUpdate}) { - const collationDoc1 = {_id: 1, [timeField]: dateTime, [metaField]: "café"}; - const collationDoc2 = {_id: 2, [timeField]: dateTime, [metaField]: "cafe"}; - const collationDoc3 = {_id: 3, [timeField]: dateTime, [metaField]: "cafE"}; + const collationDoc1 = {_id: 1, [timeField]: dateTime1, [metaField]: "café"}; + const collationDoc2 = {_id: 2, [timeField]: dateTime1, [metaField]: "cafe"}; + const collationDoc3 = {_id: 3, [timeField]: dateTime1, [metaField]: "cafE"}; const initialDocList = [collationDoc1, collationDoc2, collationDoc3]; // Query on the metaField and modify the metaField using collation with strength level 1. @@ -859,9 +862,9 @@ function testCaseCollationUpdates({testUpdate}) { collation: {locale: "fr", strength: 1}, }], resultDocList: [ - {_id: 1, [timeField]: dateTime, [metaField]: "Updated"}, - {_id: 2, [timeField]: dateTime, [metaField]: "Updated"}, - {_id: 3, [timeField]: dateTime, [metaField]: "Updated"} + {_id: 1, [timeField]: dateTime1, [metaField]: "Updated"}, + {_id: 2, [timeField]: dateTime1, [metaField]: "Updated"}, + {_id: 3, [timeField]: dateTime1, [metaField]: "Updated"} ], n: 3, pathToMetaFieldBeingUpdated: "", @@ -879,7 +882,7 @@ function testCaseCollationUpdates({testUpdate}) { }], resultDocList: [ collationDoc1, - {_id: 2, [timeField]: dateTime, [metaField]: "Updated"}, + {_id: 2, [timeField]: dateTime1, [metaField]: "Updated"}, collationDoc3, ], n: 1, @@ -889,9 +892,9 @@ function testCaseCollationUpdates({testUpdate}) { function testCaseNullUpdates({testUpdate}) { // Assumes shard key is meta.a. - const nullDoc = {_id: 1, [timeField]: dateTime, [metaField]: {a: null, b: 1}}; - const missingDoc1 = {_id: 2, [timeField]: dateTime, [metaField]: {b: 1}}; - const missingDoc2 = {_id: 3, [timeField]: dateTime, [metaField]: "foo"}; + const nullDoc = {_id: 1, [timeField]: dateTime1, [metaField]: {a: null, b: 1}}; + const missingDoc1 = {_id: 2, [timeField]: dateTime1, [metaField]: {b: 1}}; + const missingDoc2 = {_id: 3, [timeField]: dateTime1, [metaField]: "foo"}; const initialDocList = [nullDoc, missingDoc1, missingDoc2]; // Query on the metaField and modify the metaField using collation with strength level 1. @@ -903,9 +906,9 @@ function testCaseNullUpdates({testUpdate}) { multi: true, }], resultDocList: [ - {_id: 1, [timeField]: dateTime, [metaField]: "Updated"}, - {_id: 2, [timeField]: dateTime, [metaField]: "Updated"}, - {_id: 3, [timeField]: dateTime, [metaField]: "Updated"}, + {_id: 1, [timeField]: dateTime1, [metaField]: "Updated"}, + {_id: 2, [timeField]: dateTime1, [metaField]: "Updated"}, + {_id: 3, [timeField]: dateTime1, [metaField]: "Updated"}, ], n: 3, }); @@ -926,7 +929,9 @@ const tests = [ testCaseBatchUpdates, testCaseValidMetaFieldUpdates, ]; -testUpdates({shardKeyTimeField: timeField, timeseriesOptions: {timeField}, tests}); +if (!arbitraryUpdatesEnabled) { + testUpdates({shardKeyTimeField: timeField, timeseriesOptions: {timeField}, tests}); +} testUpdates({shardKeyMetaFieldPath: metaField, timeseriesOptions, tests}); testUpdates( {shardKeyTimeField: timeField, shardKeyMetaFieldPath: metaField, timeseriesOptions, tests}); @@ -952,5 +957,4 @@ testUpdates({ tests: testsForMetaSubfieldShardKey, }); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_update_multi.js b/jstests/sharding/timeseries_update_multi.js new file mode 100644 index 0000000000000..fcd31c3a1f227 --- /dev/null +++ b/jstests/sharding/timeseries_update_multi.js @@ -0,0 +1,424 @@ +/** + * Verifies multi-updates on sharded timeseries collection. These commands operate on multiple + * individual measurements by targeting them with their meta and/or time field value. + * + * @tags: [ + * # To avoid burn-in tests in in-memory build variants + * requires_persistence, + * featureFlagTimeseriesUpdatesSupport, + * ] + */ + +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import {TimeseriesMultiUpdateUtil} from "jstests/sharding/libs/timeseries_update_multi_util.js"; + +Random.setRandomSeed(); + +const dbName = jsTestName(); +const collName = 'sharded_timeseries_update_multi'; +const timeField = TimeseriesMultiUpdateUtil.timeField; +const metaField = TimeseriesMultiUpdateUtil.metaField; +const testStringNoCase = "teststring"; +const caseInsensitiveCollation = { + locale: "en", + strength: 2 +}; + +// Connections. +const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); +const mongos = st.s0; + +// Databases. +assert.commandWorked(mongos.adminCommand({enableSharding: dbName})); +const testDB = mongos.getDB(dbName); + +const requestConfigurations = { + // Empty filter leads to broadcasted request. + emptyFilter: { + updateList: [{ + q: {}, + u: {$set: {"newField": 1}}, + multi: true, + }], + expectedUpdates: {findQuery: {"newField": {$eq: 1}}, expectedMatchingIds: [0, 1, 2, 3]}, + reachesPrimary: true, + reachesOther: true, + }, + // Non-shard key filter without meta or time field leads to broadcasted request. + nonShardKeyFilter: { + updateList: [ + { + q: {f: 0}, + u: {$unset: {f: ""}}, + multi: true, + }, + { + q: {f: 2}, + u: {$unset: {f: ""}}, + multi: true, + } + ], + expectedUpdates: {findQuery: {f: {$exists: false}}, expectedMatchingIds: [0, 2]}, + reachesPrimary: true, + reachesOther: true, + }, + // This time field filter has the request targeted to the shard0. + timeFilterOneShard: { + updateList: [ + { + q: {[timeField]: TimeseriesMultiUpdateUtil.generateTimeValue(0), f: 0}, + u: [ + {$unset: "f"}, + {$set: {"newField": 1}}, + ], + multi: true, + }, + { + q: {[timeField]: TimeseriesMultiUpdateUtil.generateTimeValue(1), f: 1}, + u: [ + {$unset: "f"}, + {$set: {"newField": 1}}, + ], + multi: true, + } + ], + expectedUpdates: { + findQuery: {$and: [{"f": {$exists: false}}, {"newField": {$exists: true}}]}, + expectedMatchingIds: [0, 1] + }, + reachesPrimary: true, + reachesOther: false, + }, + // This time field filter has the request targeted to both shards. + timeFilterTwoShards: { + updateList: [ + { + q: {[timeField]: TimeseriesMultiUpdateUtil.generateTimeValue(1), f: 1}, + u: {$set: {f: ["arr", "ay"]}}, + multi: true, + }, + { + q: {[timeField]: TimeseriesMultiUpdateUtil.generateTimeValue(3), f: 3}, + u: {$set: {f: ["arr", "ay"]}}, + multi: true, + } + ], + expectedUpdates: {findQuery: {f: ["arr", "ay"]}, expectedMatchingIds: [1, 3]}, + reachesPrimary: true, + reachesOther: true, + }, + // This meta field filter targets shard1 and queries on the 'stringField' using the default + // collation. We expect no document to be modified. + metaFilterOneShard: { + updateList: [{ + q: {[metaField]: 2, f: 2, stringField: testStringNoCase}, + u: [ + {$unset: "f"}, + {$set: {"newField": 1}}, + {$set: {"_id": 200}}, + ], + multi: true, + }], + expectedUpdates: { + findQuery: {$and: [{[metaField]: {$eq: 2}}, {"_id": {$eq: 200}}]}, + expectedMatchingIds: [] + }, + reachesPrimary: false, + reachesOther: true, + }, + // This meta field filter targets shard1 and queries on the 'stringField' using a case + // insensitive collation. + metaFilterOneShardWithCaseInsensitiveCollation: { + updateList: [{ + q: {[metaField]: 2, f: 2, stringField: testStringNoCase}, + u: [ + {$unset: "f"}, + {$set: {"newField": 1}}, + {$set: {"_id": 200}}, + ], + multi: true, + collation: caseInsensitiveCollation, + }], + expectedUpdates: { + findQuery: {$and: [{[metaField]: {$eq: 2}}, {"_id": {$eq: 200}}]}, + expectedMatchingIds: [200] + }, + reachesPrimary: false, + reachesOther: true, + }, + // This string, meta field filter targets shard1 using the default collation. + metaFilterOneShardString: { + updateList: [{ + q: {[metaField]: `string:3`}, + u: [ + {$set: {"newField": 1}}, + {$set: {"_id": 300}}, + ], + multi: true, + }], + expectedUpdates: {findQuery: {"_id": {$eq: 300}}, expectedMatchingIds: [300]}, + reachesPrimary: false, + reachesOther: true, + }, + // This string, meta field filter broadcasts the request because collection routing info is + // organized by the collection default collation and modifies the corresponding doc using a case + // insensitive collation. + metaFilterTwoShardsStringCaseInsensitive: { + updateList: [{ + q: {[metaField]: `StrinG:3`}, + u: [ + {$set: {"newField": 1}}, + {$set: {"_id": 300}}, + ], + multi: true, + collation: caseInsensitiveCollation, + }], + expectedUpdates: {findQuery: {"_id": {$eq: 300}}, expectedMatchingIds: [300]}, + reachesPrimary: true, + reachesOther: true, + }, + // Meta + time filter has the request targeted to shard1. + metaTimeFilterOneShard: { + updateList: [{ + q: {[metaField]: 2, [timeField]: TimeseriesMultiUpdateUtil.generateTimeValue(2), f: 2}, + u: {$set: {f: 1000}}, + multi: true, + }], + expectedUpdates: {findQuery: {f: 1000}, expectedMatchingIds: [2]}, + reachesPrimary: false, + reachesOther: true, + }, + metaFilterTwoShards: { + updateList: [ + { + q: {[timeField]: TimeseriesMultiUpdateUtil.generateTimeValue(1), f: 1}, + u: {$set: {"newField": 101}}, + multi: true, + }, + { + q: {[timeField]: TimeseriesMultiUpdateUtil.generateTimeValue(3), f: 3}, + u: {$set: {"newField": 101}}, + multi: true, + } + ], + expectedUpdates: {findQuery: {"newField": 101}, expectedMatchingIds: [1, 3]}, + reachesPrimary: true, + reachesOther: true, + }, + metaObjectFilterOneShard: { + updateList: [{ + q: {[metaField]: {a: 2}, f: 2}, + u: {$set: {"newField": 101}}, + multi: true, + }], + expectedUpdates: {findQuery: {"newField": 101}, expectedMatchingIds: [2]}, + reachesPrimary: false, + reachesOther: true, + }, + // Meta object + time filter has the request targeted to shard1. + metaObjectTimeFilterOneShard: { + updateList: [{ + q: { + [metaField]: {a: 2}, + [timeField]: TimeseriesMultiUpdateUtil.generateTimeValue(2), + f: 2 + }, + u: {$set: {f: 2000}}, + multi: true, + }], + expectedUpdates: {findQuery: {f: 2000}, expectedMatchingIds: [2]}, + reachesPrimary: false, + reachesOther: true, + }, + metaObjectFilterTwoShards: { + updateList: [ + { + q: {[metaField]: {a: 1}, f: 1}, + u: {$set: {"newField": 101}}, + multi: true, + }, + { + q: {[metaField]: {a: 2}, f: 2}, + u: {$set: {"newField": 101}}, + multi: true, + } + ], + expectedUpdates: {findQuery: {"newField": 101}, expectedMatchingIds: [1, 2]}, + reachesPrimary: true, + reachesOther: true, + }, + metaSubFieldFilterOneShard: { + updateList: [{ + q: {[metaField + '.a']: 2, f: 2}, + u: [ + {$set: {"newField": 101}}, + ], + multi: true, + }], + expectedUpdates: {findQuery: {"newField": 101}, expectedMatchingIds: [2]}, + reachesPrimary: false, + reachesOther: true, + }, + // Meta sub field + time filter has the request targeted to shard1. + metaSubFieldTimeFilterOneShard: { + updateList: [{ + q: { + [metaField + '.a']: 2, + [timeField]: TimeseriesMultiUpdateUtil.generateTimeValue(2), + f: 2 + }, + u: {$set: {"newField": 101}}, + multi: true, + }], + expectedUpdates: {findQuery: {"newField": 101}, expectedMatchingIds: [2]}, + reachesPrimary: false, + reachesOther: true, + }, + metaSubFieldFilterTwoShards: { + updateList: [ + { + q: {[metaField + '.a']: 1, f: 1}, + u: {$set: {"newField": 101}}, + multi: true, + }, + { + q: {[metaField + '.a']: 2, f: 2}, + u: {$set: {"newField": 101}}, + multi: true, + } + ], + expectedUpdates: {findQuery: {"newField": 101}, expectedMatchingIds: [1, 2]}, + reachesPrimary: true, + reachesOther: true, + }, +}; + +function getProfilerEntriesForSuccessfulMultiUpdate(db) { + const profilerFilter = { + op: 'update', + ns: `${dbName}.${collName}`, + // Filters out events recorded because of StaleConfig error. + ok: {$ne: 0}, + }; + return db.system.profile.find(profilerFilter).toArray(); +} + +function assertAndGetProfileEntriesIfRequestIsRoutedToCorrectShards(reqConfig, primaryDB, otherDB) { + const primaryEntries = getProfilerEntriesForSuccessfulMultiUpdate(primaryDB); + const otherEntries = getProfilerEntriesForSuccessfulMultiUpdate(otherDB); + + if (reqConfig.reachesPrimary) { + assert.gt(primaryEntries.length, 0, tojson(primaryEntries)); + } else { + assert.eq(primaryEntries.length, 0, tojson(primaryEntries)); + } + + if (reqConfig.reachesOther) { + assert.gt(otherEntries.length, 0, tojson(otherEntries)); + } else { + assert.eq(otherEntries.length, 0, tojson(otherEntries)); + } + + return [primaryEntries, otherEntries]; +} + +function runTest(collConfig, reqConfig, insertFn) { + jsTestLog(`Running a test with configuration: ${tojson({collConfig, reqConfig})}`); + + // Prepares a sharded timeseries collection. + const [coll, documents] = TimeseriesMultiUpdateUtil.prepareShardedTimeseriesCollection( + mongos, st, testDB, collName, collConfig, insertFn); + + // Resets database profiler to verify that the update request is routed to the correct shards. + const primaryShard = st.getPrimaryShard(dbName); + const otherShard = st.getOther(primaryShard); + const primaryDB = primaryShard.getDB(dbName); + const otherDB = otherShard.getDB(dbName); + for (let shardDB of [primaryDB, otherDB]) { + shardDB.setProfilingLevel(0); + shardDB.system.profile.drop(); + shardDB.setProfilingLevel(2); + } + + // Performs updates. + const updateCommand = {update: coll.getName(), updates: reqConfig.updateList}; + assert.commandWorked(testDB.runCommand(updateCommand)); + + // Checks that the query was routed to the correct shards and gets profile entries if so. + const [primaryEntries, otherEntries] = + assertAndGetProfileEntriesIfRequestIsRoutedToCorrectShards(reqConfig, primaryDB, otherDB); + + // Ensures that the collection contains only expected documents. + const matchingPred = reqConfig.expectedUpdates.findQuery; + const updatedDocIds = + coll.find(matchingPred, {_id: 1}).sort({_id: 1}).toArray().map(x => x._id); + const updatedDocs = + coll.find(matchingPred, {time: 1, hostid: 1, f: 1}).sort({_id: 1}).toArray(); + + reqConfig.expectedUpdates.expectedMatchingIds.sort(); + + assert.eq(updatedDocIds, reqConfig.expectedUpdates.expectedMatchingIds, ` + Update list: ${tojsononeline(reqConfig.updateList)} + Input documents: + Ids: ${tojsononeline(documents.map(x => x._id))} + Meta: ${tojsononeline(documents.map(x => x[metaField]))} + Time: ${tojsononeline(documents.map(x => x[timeField]))} + Remaining ids: ${tojsononeline(updatedDocIds)} + Remaining docs: ${tojsononeline(updatedDocs)} + Match query: ${tojsononeline(reqConfig.expectedUpdates.findQuery)} + Expected remaining ids: ${tojsononeline(reqConfig.expectedUpdates.expectedMatchingIds)} + Primary shard profiler entries: ${tojson(primaryEntries)} + Other shard profiler entries: ${tojson(otherEntries)}`); +} + +function runOneTestCase(collConfigName, reqConfigName) { + const collConfig = TimeseriesMultiUpdateUtil.collectionConfigurations[collConfigName]; + const reqConfig = requestConfigurations[reqConfigName]; + + TimeseriesTest.run((insertFn) => { + runTest(collConfig, reqConfig, insertFn); + }, testDB); +} + +runOneTestCase("metaShardKey", "emptyFilter"); +runOneTestCase("metaShardKey", "nonShardKeyFilter"); +runOneTestCase("metaShardKey", "metaFilterOneShard"); +runOneTestCase("metaShardKey", "metaFilterOneShardWithCaseInsensitiveCollation"); +runOneTestCase("metaShardKeyString", "metaFilterOneShardString"); +runOneTestCase("metaShardKeyString", "metaFilterTwoShardsStringCaseInsensitive"); +runOneTestCase("metaShardKey", "metaFilterTwoShards"); + +runOneTestCase("metaObjectShardKey", "emptyFilter"); +runOneTestCase("metaObjectShardKey", "nonShardKeyFilter"); +runOneTestCase("metaObjectShardKey", "metaObjectFilterOneShard"); +runOneTestCase("metaObjectShardKey", "metaObjectFilterTwoShards"); +runOneTestCase("metaObjectShardKey", "metaSubFieldFilterTwoShards"); + +runOneTestCase("metaSubFieldShardKey", "emptyFilter"); +runOneTestCase("metaSubFieldShardKey", "nonShardKeyFilter"); +runOneTestCase("metaSubFieldShardKey", "metaObjectFilterTwoShards"); +runOneTestCase("metaSubFieldShardKey", "metaSubFieldFilterOneShard"); +runOneTestCase("metaSubFieldShardKey", "metaSubFieldFilterTwoShards"); + +runOneTestCase("timeShardKey", "nonShardKeyFilter"); +runOneTestCase("timeShardKey", "timeFilterOneShard"); +runOneTestCase("timeShardKey", "timeFilterTwoShards"); + +runOneTestCase("metaTimeShardKey", "emptyFilter"); +runOneTestCase("metaTimeShardKey", "nonShardKeyFilter"); +runOneTestCase("metaTimeShardKey", "metaTimeFilterOneShard"); +runOneTestCase("metaTimeShardKey", "metaFilterTwoShards"); + +runOneTestCase("metaObjectTimeShardKey", "emptyFilter"); +runOneTestCase("metaObjectTimeShardKey", "nonShardKeyFilter"); +runOneTestCase("metaObjectTimeShardKey", "metaObjectTimeFilterOneShard"); +runOneTestCase("metaObjectTimeShardKey", "metaObjectFilterTwoShards"); +runOneTestCase("metaObjectTimeShardKey", "metaSubFieldFilterTwoShards"); + +runOneTestCase("metaSubFieldTimeShardKey", "emptyFilter"); +runOneTestCase("metaSubFieldTimeShardKey", "nonShardKeyFilter"); +runOneTestCase("metaSubFieldTimeShardKey", "metaSubFieldTimeFilterOneShard"); +runOneTestCase("metaSubFieldTimeShardKey", "metaObjectFilterTwoShards"); +runOneTestCase("metaSubFieldTimeShardKey", "metaSubFieldFilterTwoShards"); + +st.stop(); diff --git a/jstests/sharding/timeseries_update_multi_explain.js b/jstests/sharding/timeseries_update_multi_explain.js new file mode 100644 index 0000000000000..eca89d87d682a --- /dev/null +++ b/jstests/sharding/timeseries_update_multi_explain.js @@ -0,0 +1,553 @@ +/** + * Verifies multi-updates explains work on sharded timeseries collection. Runs a subset of the test + * cases included in 'jstests/sharding/timeseries_update_multi.js'. + * + * @tags: [ + * # To avoid burn-in tests in in-memory build variants + * requires_persistence, + * featureFlagTimeseriesUpdatesSupport, + * ] + */ + +import {TimeseriesTest} from "jstests/core/timeseries/libs/timeseries.js"; +import { + generateTimeValue, + makeBucketFilter +} from "jstests/core/timeseries/libs/timeseries_writes_util.js"; +import {getExecutionStages} from "jstests/libs/analyze_plan.js"; +import {TimeseriesMultiUpdateUtil} from "jstests/sharding/libs/timeseries_update_multi_util.js"; + +Random.setRandomSeed(); + +// Connections. +const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); +const mongos = st.s0; + +const dbName = jsTestName(); +const collName = 'sharded_timeseries_update_multi_explain'; +const timeField = TimeseriesMultiUpdateUtil.timeField; +const metaField = TimeseriesMultiUpdateUtil.metaField; + +// Databases. +assert.commandWorked(mongos.adminCommand({enableSharding: dbName})); +const testDB = mongos.getDB(dbName); +const primaryShard = st.getPrimaryShard(dbName); +const primaryShardName = primaryShard.shardName; +const otherShard = st.getOther(primaryShard); +const otherShardName = otherShard.shardName; + +// These configurations contain expected explain results corresponding to a particular shard for +// each update in the update list. Queries including time filters avoid checking bucket filters for +// brevity since they include clustered index optimizations. +const requestConfigurations = { + // Empty filter leads to broadcasted request. + emptyFilter: { + updateList: [{ + q: {}, + u: {$set: {"newField": 1}}, + multi: true, + }], + expectedExplain: { + [primaryShardName]: { + bucketFilter: makeBucketFilter({}), + residualFilter: {}, + nBucketsUnpacked: 2, + nMeasurementsMatched: 2, + nMeasurementsUpdated: 2, + }, + [otherShardName]: { + bucketFilter: makeBucketFilter({}), + residualFilter: {}, + nBucketsUnpacked: 2, + nMeasurementsMatched: 2, + nMeasurementsUpdated: 2, + } + } + }, + // Non-shard key filter without meta or time field leads to broadcasted request. + nonShardKeyFilter: { + updateList: [{ + q: {f: 0}, + u: {$unset: {f: ""}}, + multi: true, + }], + expectedExplain: { + [primaryShardName]: { + bucketFilter: makeBucketFilter({ + "$and": [ + {"control.min.f": {"$_internalExprLte": 0}}, + {"control.max.f": {"$_internalExprGte": 0}} + ] + }), + residualFilter: {"f": {"$eq": 0}}, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + }, + [otherShardName]: { + bucketFilter: makeBucketFilter({ + "$and": [ + {"control.min.f": {"$_internalExprLte": 0}}, + {"control.max.f": {"$_internalExprGte": 0}} + ] + }), + residualFilter: {"f": {"$eq": 0}}, + nBucketsUnpacked: 0, + nMeasurementsMatched: 0, + nMeasurementsUpdated: 0, + } + } + }, + // This time field filter has the request targeted to the shard0. + timeFilterOneShard: { + updateList: [{ + q: {[timeField]: generateTimeValue(0), f: 0}, + u: [ + {$unset: "f"}, + {$set: {"newField": 1}}, + ], + multi: true, + }], + expectedExplain: { + [primaryShardName]: { + bucketFilter: makeBucketFilter({ + "$and": [ + { + "$and": [ + { + "control.min.time": + {"$_internalExprLte": ISODate("2000-01-01T00:00:00Z")} + }, + { + "control.min.time": + {"$_internalExprGte": ISODate("1999-12-31T23:00:00Z")} + }, + { + "control.max.time": + {"$_internalExprGte": ISODate("2000-01-01T00:00:00Z")} + }, + { + "control.max.time": + {"$_internalExprLte": ISODate("2000-01-01T01:00:00Z")} + }, + {"_id": {"$lte": ObjectId("386d4380ffffffffffffffff")}}, + {"_id": {"$gte": ObjectId("386d35700000000000000000")}} + ] + }, + { + "$and": [ + {"control.min.f": {"$_internalExprLte": 0}}, + {"control.max.f": {"$_internalExprGte": 0}} + ] + } + ] + }), + residualFilter: + {"$and": [{[timeField]: {"$eq": generateTimeValue(0)}}, {"f": {"$eq": 0}}]}, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + }, + }, + }, + // This time field filter has the request targeted to both shards. + timeFilterTwoShards: { + updateList: [{ + q: { + $or: [ + {$and: [{[timeField]: generateTimeValue(1)}, {"f": {"$eq": 1}}]}, + {$and: [{[timeField]: generateTimeValue(3)}, {"f": {"$eq": 3}}]} + ] + }, + u: {$set: {f: ["arr", "ay"]}}, + multi: true, + }], + expectedExplain: { + [primaryShardName]: { + residualFilter: { + "$or": [ + {"$and": [{[timeField]: {"$eq": generateTimeValue(1)}}, {"f": {"$eq": 1}}]}, + {"$and": [{[timeField]: {"$eq": generateTimeValue(3)}}, {"f": {"$eq": 3}}]} + ] + }, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + }, + [otherShardName]: { + residualFilter: { + "$or": [ + {"$and": [{[timeField]: {"$eq": generateTimeValue(1)}}, {"f": {"$eq": 1}}]}, + {"$and": [{[timeField]: {"$eq": generateTimeValue(3)}}, {"f": {"$eq": 3}}]} + ] + }, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + } + } + }, + // This meta field filter targets the primary shard. + metaFilterOneShard: { + updateList: [{ + q: {[metaField]: 1, f: 1}, + u: [ + {$unset: "f"}, + {$set: {"newField": 1}}, + {$set: {"_id": 200}}, + ], + multi: true, + }], + expectedExplain: { + [primaryShardName]: { + bucketFilter: makeBucketFilter( + {"meta": {"$eq": 1}}, + { + "$and": [ + {"control.min.f": {"$_internalExprLte": 1}}, + {"control.max.f": {"$_internalExprGte": 1}} + ] + }, + ), + residualFilter: {"f": {"$eq": 1}}, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + } + } + }, + // Meta + time filter has the request targeted to shard1. + metaTimeFilterOneShard: { + updateList: [{ + q: {[metaField]: 2, [timeField]: generateTimeValue(2), f: 2}, + u: {$set: {f: 1000}}, + multi: true, + }], + expectedExplain: { + [otherShardName]: { + residualFilter: + {"$and": [{[timeField]: {"$eq": generateTimeValue(2)}}, {"f": {"$eq": 2}}]}, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + } + } + }, + metaFilterTwoShards: { + updateList: [ + { + q: {$and: [{[metaField]: {$gt: 0}}, {$or: [{f: {$eq: 1}}, {f: {$eq: 3}}]}]}, + u: {$set: {"newField": 101}}, + multi: true, + }, + ], + expectedExplain: { + [primaryShardName]: { + bucketFilter: makeBucketFilter({"meta": {"$gt": 0}}, { + "$or": [ + { + "$and": [ + {"control.min.f": {"$_internalExprLte": 1}}, + {"control.max.f": {"$_internalExprGte": 1}} + ] + }, + { + "$and": [ + {"control.min.f": {"$_internalExprLte": 3}}, + {"control.max.f": {"$_internalExprGte": 3}} + ] + } + ] + }), + residualFilter: {"$or": [{"f": {"$eq": 1}}, {"f": {"$eq": 3}}]}, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + }, + [otherShardName]: { + bucketFilter: makeBucketFilter({"meta": {"$gt": 0}}, { + "$or": [ + { + "$and": [ + {"control.min.f": {"$_internalExprLte": 1}}, + {"control.max.f": {"$_internalExprGte": 1}} + ] + }, + { + "$and": [ + {"control.min.f": {"$_internalExprLte": 3}}, + {"control.max.f": {"$_internalExprGte": 3}} + ] + } + ] + }), + residualFilter: {"$or": [{"f": {"$eq": 1}}, {"f": {"$eq": 3}}]}, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + }, + } + }, + metaObjectFilterOneShard: { + updateList: [{ + q: {[metaField]: {a: 2}, f: 2}, + u: {$set: {"newField": 101}}, + multi: true, + }], + expectedExplain: { + [otherShardName]: { + bucketFilter: makeBucketFilter( + {"meta": {"$eq": {"a": 2}}}, + { + "$and": [ + {"control.min.f": {"$_internalExprLte": 2}}, + {"control.max.f": {"$_internalExprGte": 2}} + ] + }, + ), + residualFilter: {"f": {"$eq": 2}}, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + } + } + }, + // Meta object + time filter has the request targeted to shard1. + metaObjectTimeFilterOneShard: { + updateList: [{ + q: {[metaField]: {a: 2}, [timeField]: generateTimeValue(2), f: 2}, + u: {$set: {f: 2000}}, + multi: true, + }], + expectedExplain: { + [otherShardName]: { + residualFilter: + {"$and": [{[timeField]: {"$eq": generateTimeValue(2)}}, {"f": {"$eq": 2}}]}, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + } + } + }, + metaObjectFilterTwoShards: { + updateList: [ + { + q: {[metaField]: {a: 1}, f: 1}, + u: {$set: {"newField": 101}}, + multi: true, + }, + ], + expectedExplain: { + [primaryShardName]: { + bucketFilter: makeBucketFilter( + {"meta": {"$eq": {"a": 1}}}, + { + "$and": [ + {"control.min.f": {"$_internalExprLte": 1}}, + {"control.max.f": {"$_internalExprGte": 1}} + ] + }, + ), + residualFilter: {"f": {"$eq": 1}}, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + } + } + }, + metaSubFieldFilterOneShard: { + updateList: [{ + q: {[metaField + '.a']: 2, f: 2}, + u: [ + {$set: {"newField": 101}}, + ], + multi: true, + }], + expectedExplain: { + [otherShardName]: { + bucketFilter: makeBucketFilter({"meta.a": {"$eq": 2}}, { + "$and": [ + {"control.min.f": {"$_internalExprLte": 2}}, + {"control.max.f": {"$_internalExprGte": 2}} + ] + }), + residualFilter: {"f": {"$eq": 2}}, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + } + } + }, + // Meta sub field + time filter has the request targeted to shard1. + metaSubFieldTimeFilterOneShard: { + updateList: [{ + q: {[metaField + '.a']: 2, [timeField]: generateTimeValue(2), f: 2}, + u: {$set: {"newField": 101}}, + multi: true, + }], + expectedExplain: { + [otherShardName]: { + residualFilter: + {"$and": [{[timeField]: {"$eq": generateTimeValue(2)}}, {"f": {"$eq": 2}}]}, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + } + } + }, + metaSubFieldFilterTwoShards: { + updateList: [ + { + q: {$and: [{[metaField + '.a']: {$gt: 0}}, {$or: [{f: {$eq: 1}}, {f: {$eq: 2}}]}]}, + u: {$set: {"newField": 101}}, + multi: true + }, + ], + expectedExplain: { + [primaryShardName]: { + bucketFilter: makeBucketFilter({"meta.a": {"$gt": 0}}, { + "$or": [ + { + "$and": [ + {"control.min.f": {"$_internalExprLte": 1}}, + {"control.max.f": {"$_internalExprGte": 1}} + ] + }, + { + "$and": [ + {"control.min.f": {"$_internalExprLte": 2}}, + {"control.max.f": {"$_internalExprGte": 2}} + ] + } + ] + }), + residualFilter: {"$or": [{"f": {"$eq": 1}}, {"f": {"$eq": 2}}]}, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + }, + [otherShardName]: { + bucketFilter: makeBucketFilter({"meta.a": {"$gt": 0}}, { + "$or": [ + { + "$and": [ + {"control.min.f": {"$_internalExprLte": 1}}, + {"control.max.f": {"$_internalExprGte": 1}} + ] + }, + { + "$and": [ + {"control.min.f": {"$_internalExprLte": 2}}, + {"control.max.f": {"$_internalExprGte": 2}} + ] + } + ] + }), + residualFilter: {"$or": [{"f": {"$eq": 1}}, {"f": {"$eq": 2}}]}, + nBucketsUnpacked: 1, + nMeasurementsMatched: 1, + nMeasurementsUpdated: 1, + } + } + } +}; + +function runExplainTest(collConfig, reqConfig, insertFn) { + jsTestLog(`Running a test with configuration: ${tojson({collConfig, reqConfig})}`); + + // Prepares a sharded timeseries collection. + const [coll, _] = TimeseriesMultiUpdateUtil.prepareShardedTimeseriesCollection( + mongos, st, testDB, collName, collConfig, insertFn); + + // We can only run the explain on one update at a time. + assert.eq(reqConfig.updateList.length, + 1, + `The updateList can only contain one update: ${tojson(reqConfig)}`); + const update = reqConfig.updateList[0]; + const expectedExplainOutput = reqConfig.expectedExplain; + + // Run explain on the update and examine the execution stages for the expected results. + const explainOutput = assert.commandWorked(coll.runCommand( + {explain: {update: coll.getName(), updates: [update]}, verbosity: "executionStats"})); + const execStages = getExecutionStages(explainOutput); + assert.eq(execStages.length, + Object.keys(expectedExplainOutput).length, + `Mismatch in expected explain: ${tojson(expectedExplainOutput)} and exec stages: ${ + tojson(explainOutput)}`); + + for (const execStage of execStages) { + // Based off of the shard name, extract corresponding expected output. + const expectedExplainForShard = expectedExplainOutput[execStage.shardName]; + assert(expectedExplainForShard !== undefined, + `No expected explain output included for the execution stage: ${tojson(execStage)}`); + + assert.eq("TS_MODIFY", + execStage.stage, + `TS_MODIFY stage not found in executionStages: ${tojson(execStage)}`); + assert.eq("updateMany", + execStage.opType, + `TS_MODIFY stage not found in executionStages: ${tojson(execStage)}`); + + // Check the bucket and residual filters if they are provided in the expected explain + // result. + if (expectedExplainForShard.bucketFilter !== undefined) { + assert.eq(expectedExplainForShard.bucketFilter, + execStage.bucketFilter, + `TS_MODIFY bucketFilter is wrong: ${tojson(execStage)}`); + } + if (expectedExplainForShard.residualFilter !== undefined) { + assert.eq(expectedExplainForShard.residualFilter, + execStage.residualFilter, + `TS_MODIFY residualFilter is wrong: ${tojson(execStage)}`); + } + + // Check the expected metrics for the expected explain result. + assert.eq(expectedExplainForShard.nBucketsUnpacked, + execStage.nBucketsUnpacked, + `Got wrong nBucketsUnpacked: ${tojson(execStage)}`); + assert.eq(expectedExplainForShard.nMeasurementsMatched, + execStage.nMeasurementsMatched, + `Got wrong nMeasurementsMatched: ${tojson(execStage)}`); + assert.eq(expectedExplainForShard.nMeasurementsUpdated, + execStage.nMeasurementsUpdated, + `Got wrong nMeasurementsUpdated: ${tojson(execStage)}`); + } +} + +function runOneTestCase(collConfigName, reqConfigName) { + const collConfig = TimeseriesMultiUpdateUtil.collectionConfigurations[collConfigName]; + const reqConfig = requestConfigurations[reqConfigName]; + + TimeseriesTest.run((insertFn) => { + jsTestLog("req config " + reqConfigName); + runExplainTest(collConfig, reqConfig, insertFn); + }, testDB); +} + +runOneTestCase("metaShardKey", "emptyFilter"); + +runOneTestCase("metaShardKey", "nonShardKeyFilter"); + +runOneTestCase("timeShardKey", "timeFilterOneShard"); + +runOneTestCase("timeShardKey", "timeFilterTwoShards"); + +runOneTestCase("metaTimeShardKey", "metaTimeFilterOneShard"); + +runOneTestCase("metaObjectTimeShardKey", "metaObjectTimeFilterOneShard"); + +runOneTestCase("metaSubFieldTimeShardKey", "metaSubFieldTimeFilterOneShard"); + +runOneTestCase("metaShardKey", "metaFilterOneShard"); + +runOneTestCase("metaShardKey", "metaFilterTwoShards"); + +runOneTestCase("metaObjectShardKey", "metaObjectFilterOneShard"); + +runOneTestCase("metaObjectShardKey", "metaSubFieldFilterTwoShards"); + +runOneTestCase("metaObjectShardKey", "metaObjectFilterTwoShards"); + +runOneTestCase("metaSubFieldShardKey", "metaSubFieldFilterOneShard"); + +st.stop(); diff --git a/jstests/sharding/timeseries_update_one.js b/jstests/sharding/timeseries_update_one.js new file mode 100644 index 0000000000000..647a0d76ea990 --- /dev/null +++ b/jstests/sharding/timeseries_update_one.js @@ -0,0 +1,243 @@ +/** + * Tests 'updateOne' command on sharded collections. + * + * @tags: [ + * # To avoid multiversion tests + * requires_fcv_71, + * # To avoid burn-in tests in in-memory build variants + * requires_persistence, + * featureFlagTimeseriesUpdatesSupport, + * ] + */ + +import { + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc6_c_f105, + generateTimeValue, + getCallerName, + getTestDB, + metaFieldName, + prepareShardedCollection, + setUpShardedCluster, + st, + timeFieldName +} from "jstests/core/timeseries/libs/timeseries_writes_util.js"; + +setUpShardedCluster(); +const testDB = getTestDB(); + +const runTest = function({ + initialDocList, + query, + update, + nModified, + resultDocList, + includeMeta = true, + retryableWrite = false, +}) { + const collName = getCallerName(); + jsTestLog(`Running ${collName}(${tojson(arguments[0])})`); + + // Creates and shards a timeseries collection. + const coll = prepareShardedCollection({collName: collName, initialDocList, includeMeta}); + + const updateCommand = {update: collName, updates: [{q: query, u: update, multi: false}]}; + const result = (() => { + if (!retryableWrite) { + return assert.commandWorked(testDB.runCommand(updateCommand)); + } + + // Run as a retryable write to modify the shard key value. + const session = coll.getDB().getMongo().startSession({retryWrites: true}); + const sessionDb = session.getDatabase(coll.getDB().getName()); + updateCommand["lsid"] = session.getSessionId(); + updateCommand["txnNumber"] = NumberLong(1); + const res = assert.commandWorked(sessionDb.runCommand(updateCommand)); + + return res; + })(); + assert.eq(nModified, result.nModified, tojson(result)); + + if (resultDocList) { + assert.sameMembers(resultDocList, + coll.find().toArray(), + "Collection contents did not match expected after update"); + } else { + assert.eq(coll.countDocuments({}), + initialDocList.length, + "Collection count did not match expected after update: " + + tojson(coll.find().toArray())); + } +}; + +(function testTargetSingleShardByMeta() { + runTest({ + initialDocList: [doc2_a_f101, doc4_b_f103], + query: {[metaFieldName]: "A"}, + update: {$set: {f: 110}}, + nModified: 1, + resultDocList: [ + {_id: 2, [metaFieldName]: "A", [timeFieldName]: generateTimeValue(2), f: 110}, + doc4_b_f103 + ], + }); +})(); + +(function testTargetSingleShardByMetaNoMatches() { + runTest({ + initialDocList: [doc2_a_f101, doc4_b_f103], + query: {[metaFieldName]: "C"}, + update: {$set: {f: 110}}, + nModified: 0, + resultDocList: [doc2_a_f101, doc4_b_f103], + }); +})(); + +(function testTargetSingleShardByMetaWithAdditionalMetricFilter() { + runTest({ + initialDocList: [doc2_a_f101, doc3_a_f102, doc4_b_f103], + query: {[metaFieldName]: "A", f: 102}, + update: {$set: {f: 110}}, + nModified: 1, + resultDocList: [ + doc2_a_f101, + {_id: 3, [metaFieldName]: "A", [timeFieldName]: generateTimeValue(3), f: 110}, + doc4_b_f103 + ], + }); +})(); + +(function testTargetSingleShardByTime() { + runTest({ + initialDocList: [doc2_a_f101, doc4_b_f103], + includeMeta: false, + query: {[timeFieldName]: generateTimeValue(2)}, + update: {$set: {f: 110}}, + nModified: 1, + resultDocList: [ + {_id: 2, [metaFieldName]: "A", [timeFieldName]: generateTimeValue(2), f: 110}, + doc4_b_f103 + ], + }); +})(); + +(function testTargetSingleShardUnsetShardKey() { + runTest({ + initialDocList: [doc2_a_f101, doc4_b_f103], + query: {[metaFieldName]: "A"}, + update: {$unset: {[metaFieldName]: 1}}, + nModified: 1, + resultDocList: [{_id: 2, [timeFieldName]: generateTimeValue(2), f: 101}, doc4_b_f103], + retryableWrite: true, + }); +})(); + +(function testTargetSingleShardUnsetShardKeyByReplacement() { + runTest({ + initialDocList: [doc2_a_f101, doc4_b_f103], + query: {[metaFieldName]: "A"}, + update: {[timeFieldName]: generateTimeValue(2), f: 110}, + replacement: true, + nModified: 1, + resultDocList: [{_id: 2, [timeFieldName]: generateTimeValue(2), f: 110}, doc4_b_f103], + retryableWrite: true, + }); +})(); + +(function testTargetSingleShardretryableWriteByReplacement() { + runTest({ + initialDocList: [doc2_a_f101, doc4_b_f103], + query: {[metaFieldName]: "B"}, + update: {[metaFieldName]: "C", [timeFieldName]: generateTimeValue(4), f: 110}, + replacement: true, + nModified: 1, + resultDocList: [ + doc2_a_f101, + {_id: 4, [metaFieldName]: "C", [timeFieldName]: generateTimeValue(4), f: 110} + ], + retryableWrite: true, + }); +})(); + +(function testTargetSingleShardretryableWriteByReplacementChangeShard() { + runTest({ + initialDocList: [doc2_a_f101, doc4_b_f103], + query: {[metaFieldName]: "B"}, + update: {[metaFieldName]: "A", [timeFieldName]: generateTimeValue(4), f: 110}, + replacement: true, + nModified: 1, + resultDocList: [ + doc2_a_f101, + {_id: 4, [metaFieldName]: "A", [timeFieldName]: generateTimeValue(4), f: 110} + ], + retryableWrite: true, + }); +})(); + +(function testTwoPhaseUpdate() { + runTest({ + initialDocList: [doc2_a_f101, doc3_a_f102, doc4_b_f103, doc6_c_f105], + query: {f: {$gt: 100}}, + update: {$set: {f: 110}}, + nModified: 1, + }); +})(); + +(function testTwoPhaseRetryableUpdate() { + runTest({ + initialDocList: [doc2_a_f101, doc3_a_f102, doc4_b_f103, doc6_c_f105], + query: {f: {$gt: 100}}, + update: {$set: {f: 110}}, + nModified: 1, + retryableWrite: true, + }); +})(); + +(function testTwoPhaseUpdateNoMatches() { + runTest({ + initialDocList: [doc2_a_f101, doc3_a_f102, doc4_b_f103, doc6_c_f105], + query: {f: {$gt: 1000}}, + update: {$set: {f: 110}}, + nModified: 0, + resultDocList: [doc2_a_f101, doc3_a_f102, doc4_b_f103, doc6_c_f105], + }); +})(); + +(function testTwoPhaseUpdateById() { + runTest({ + initialDocList: [doc2_a_f101, doc4_b_f103], + query: {_id: 4}, + update: {$set: {f: 110}}, + nModified: 1, + resultDocList: [ + doc2_a_f101, + {_id: 4, [metaFieldName]: "B", [timeFieldName]: generateTimeValue(4), f: 110}, + ], + }); +})(); + +(function testTwoPhaseUpdateByTimeField() { + runTest({ + initialDocList: [doc2_a_f101, doc4_b_f103], + query: {[timeFieldName]: generateTimeValue(4)}, + update: {$set: {f: 110}}, + nModified: 1, + resultDocList: [ + doc2_a_f101, + {_id: 4, [metaFieldName]: "B", [timeFieldName]: generateTimeValue(4), f: 110}, + ], + }); +})(); + +(function testTwoPhaseUpdateEmptyPredicate() { + runTest({ + initialDocList: [doc2_a_f101, doc4_b_f103, doc6_c_f105], + query: {}, + update: {$set: {f: 110}}, + nModified: 1, + }); +})(); + +st.stop(); diff --git a/jstests/sharding/timeseries_update_routing.js b/jstests/sharding/timeseries_update_routing.js index 69379a10a515f..3f86f12b7035a 100644 --- a/jstests/sharding/timeseries_update_routing.js +++ b/jstests/sharding/timeseries_update_routing.js @@ -6,11 +6,6 @@ * ] */ -(function() { -"use strict"; - -load("jstests/core/timeseries/libs/timeseries.js"); // For 'TimeseriesTest' helpers. - const st = new ShardingTest({shards: 2, rs: {nodes: 2}}); // @@ -22,23 +17,6 @@ const collName = 'weather'; const bucketCollName = `system.buckets.${collName}`; const bucketCollFullName = `${dbName}.${bucketCollName}`; -// -// Checks for feature flags. -// - -if (!TimeseriesTest.shardedtimeseriesCollectionsEnabled(st.shard0)) { - jsTestLog("Skipping test because the sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - -if (!TimeseriesTest.shardedTimeseriesUpdatesAndDeletesEnabled(st.shard0)) { - jsTestLog( - "Skipping test because the updates and deletes on sharded time-series collection feature flag is disabled"); - st.stop(); - return; -} - const mongos = st.s; const testDB = mongos.getDB(dbName); const primary = st.shard0; @@ -188,5 +166,4 @@ testUpdateRouting({ shardsTargetedCount: 2 }); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/timeseries_upsert.js b/jstests/sharding/timeseries_upsert.js new file mode 100644 index 0000000000000..0885864899f91 --- /dev/null +++ b/jstests/sharding/timeseries_upsert.js @@ -0,0 +1,339 @@ +/** + * Verifies upserts on sharded timeseries collection. + * + * @tags: [ + * # To avoid burn-in tests in in-memory build variants + * requires_persistence, + * # Upsert on sharded timeseries collection is only supported in FCV 7.1+ + * requires_fcv_71, + * featureFlagTimeseriesUpdatesSupport, + * ] + */ + +import { + metaFieldName, + prepareShardedCollection, + setUpShardedCluster, + st, + testDB, + timeFieldName +} from "jstests/core/timeseries/libs/timeseries_writes_util.js"; +load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); + +setUpShardedCluster(); + +const collName = 'sharded_timeseries_upsert'; +const dateTime = new ISODate(); + +function generateDocsForTestCase(collConfig) { + let documents = []; + for (let i = 0; i < 4; i++) { + documents.push({ + _id: i, + [metaFieldName]: collConfig.metaGenerator(i), + [timeFieldName]: dateTime, + f: i + }); + } + return documents; +} + +const metaShardKey = { + metaGenerator: (id => id), + shardKey: {[metaFieldName]: 1}, + splitPoint: {meta: 2}, +}; +const metaSubFieldShardKey = { + metaGenerator: (index => ({a: index})), + shardKey: {[metaFieldName + '.a']: 1}, + splitPoint: {'meta.a': 2}, +}; + +function runTest({collConfig, updateOp, upsertedDoc, errorCode, updateShardKey = false}) { + // Prepares a sharded timeseries collection. + const documents = generateDocsForTestCase(collConfig); + const coll = prepareShardedCollection({ + collName, + initialDocList: documents, + shardKey: collConfig.shardKey, + splitPoint: collConfig.splitPoint + }); + + // Performs updates. + const updateCommand = {update: coll.getName(), updates: [updateOp]}; + if (errorCode) { + assert.commandFailedWithCode(testDB.runCommand(updateCommand), errorCode); + return; + } + const res = (() => { + if (!updateShardKey) { + return assert.commandWorked(testDB.runCommand(updateCommand)); + } + + // Run as a retryable write to modify the shard key value. + const session = coll.getDB().getMongo().startSession({retryWrites: true}); + const sessionDb = session.getDatabase(coll.getDB().getName()); + updateCommand["lsid"] = session.getSessionId(); + updateCommand["txnNumber"] = NumberLong(1); + const res = assert.commandWorked(sessionDb.runCommand(updateCommand)); + + return res; + })(); + + if (upsertedDoc) { + assert.eq(res.upserted.length, 1, tojson(res)); + upsertedDoc["_id"] = res.upserted[0]._id; + documents.push(upsertedDoc); + } + + const resultDocs = coll.find().toArray(); + assert.eq(resultDocs.length, documents.length); + + assert.sameMembers( + resultDocs, documents, "Collection contents did not match expected after upsert"); +} + +// +// Tests for multi updates. +// + +(function testMultiUpdateNoShardKey() { + runTest({ + collConfig: metaShardKey, + updateOp: {q: {f: 1000}, u: {$set: {[timeFieldName]: dateTime}}, multi: true, upsert: true}, + errorCode: ErrorCodes.ShardKeyNotFound, + }); +})(); + +(function testMultiUpdateNoEqualityOperator() { + runTest({ + collConfig: metaShardKey, + updateOp: { + q: {[metaFieldName]: {$gt: 0}, f: 1000}, + u: {$set: {[timeFieldName]: dateTime}}, + multi: true, + upsert: true + }, + errorCode: ErrorCodes.ShardKeyNotFound, + }); +})(); + +(function testMultiUpdateUnsetShardKey() { + runTest({ + collConfig: metaShardKey, + updateOp: { + q: {[metaFieldName]: -1}, + u: [{$set: {[timeFieldName]: dateTime, f: 15}}, {$unset: metaFieldName}], + multi: true, + upsert: true + }, + upsertedDoc: {[timeFieldName]: dateTime, f: 15}, + }); +})(); + +(function testMultiUpdateUpsertShardKeyFromQuery() { + runTest({ + collConfig: metaShardKey, + updateOp: { + q: {[metaFieldName]: -1}, + u: {$set: {[timeFieldName]: dateTime, f: 15}}, + multi: true, + upsert: true + }, + upsertedDoc: {[metaFieldName]: -1, [timeFieldName]: dateTime, f: 15}, + }); +})(); + +(function testMultiUpdateUpsertShardKeyFromUpdate() { + runTest({ + collConfig: metaShardKey, + updateOp: { + q: {[metaFieldName]: -1}, + u: {$set: {[metaFieldName]: -10, [timeFieldName]: dateTime, f: 15}}, + multi: true, + upsert: true + }, + upsertedDoc: {[metaFieldName]: -10, [timeFieldName]: dateTime, f: 15}, + }); +})(); + +(function testMultiUpdateUpsertShardKeyArrayNotAllowed() { + runTest({ + collConfig: metaShardKey, + updateOp: { + q: {[metaFieldName]: -1}, + u: {$set: {[metaFieldName]: [1, 2, 3], [timeFieldName]: dateTime, f: 15}}, + multi: true, + upsert: true + }, + errorCode: ErrorCodes.NotSingleValueField, + }); +})(); + +// +// Tests for a nested shard key. +// + +(function testMultiUpdateNestedShardKeyNotFound() { + runTest({ + collConfig: metaSubFieldShardKey, + updateOp: { + q: {[metaFieldName]: 1}, + u: {$set: {[timeFieldName]: dateTime}}, + multi: true, + upsert: true + }, + errorCode: ErrorCodes.ShardKeyNotFound, + }); +})(); + +(function testMultiUpdateNestedShardKeyNoEqualityOperator() { + runTest({ + collConfig: metaSubFieldShardKey, + updateOp: { + q: {[metaFieldName + '.a']: {$gt: 0}, f: 1000}, + u: {$set: {[timeFieldName]: dateTime}}, + multi: true, + upsert: true + }, + errorCode: ErrorCodes.ShardKeyNotFound, + }); +})(); + +(function testMultiUpdateUnsetNestedShardKey() { + runTest({ + collConfig: metaSubFieldShardKey, + updateOp: { + q: {[metaFieldName + '.a']: -1}, + u: [ + {$set: {[metaFieldName + '.b']: 10, [timeFieldName]: dateTime, f: 15}}, + {$unset: metaFieldName + '.a'} + ], + multi: true, + upsert: true + }, + upsertedDoc: {[metaFieldName]: {b: 10}, [timeFieldName]: dateTime, f: 15}, + }); +})(); + +(function testMultiUpdateUpsertNestedShardKeyFromQuery() { + runTest({ + collConfig: metaSubFieldShardKey, + updateOp: { + q: {[metaFieldName + '.a']: -1}, + u: {$set: {[timeFieldName]: dateTime, f: 15}}, + multi: true, + upsert: true + }, + upsertedDoc: {[metaFieldName]: {a: -1}, [timeFieldName]: dateTime, f: 15}, + }); +})(); + +(function testMultiUpdateUpsertNestedShardKeyFromUpdate() { + runTest({ + collConfig: metaSubFieldShardKey, + updateOp: { + q: {[metaFieldName + '.a']: -1}, + u: {$set: {[metaFieldName + '.a']: -10, [timeFieldName]: dateTime, f: 15}}, + multi: true, + upsert: true + }, + upsertedDoc: {[metaFieldName]: {a: -10}, [timeFieldName]: dateTime, f: 15}, + }); +})(); + +(function testMultiUpdateUpsertNestedShardKeyArrayNotAllowed() { + runTest({ + collConfig: metaSubFieldShardKey, + updateOp: { + q: {[metaFieldName + '.a']: -1}, + u: {$set: {[metaFieldName + '.a']: [1, 2, 3], [timeFieldName]: dateTime, f: 15}}, + multi: true, + upsert: true + }, + errorCode: ErrorCodes.NotSingleValueField, + }); +})(); + +// +// Tests for singleton updates. +// + +(function testSingleUpdateNoShardKey() { + runTest({ + collConfig: metaShardKey, + updateOp: + {q: {f: 1000}, u: {$set: {[timeFieldName]: dateTime}}, multi: false, upsert: true}, + upsertedDoc: {f: 1000, [timeFieldName]: dateTime}, + }); +})(); + +(function testSingleUpdateNoEqualityOperator() { + runTest({ + collConfig: metaShardKey, + updateOp: { + q: {[metaFieldName]: {$gt: 0}, f: 1000}, + u: {$set: {[timeFieldName]: dateTime}}, + multi: false, + upsert: true + }, + upsertedDoc: {f: 1000, [timeFieldName]: dateTime}, + }); +})(); + +(function testSingleUpdateWithShardKey() { + runTest({ + collConfig: metaShardKey, + updateOp: { + q: {[metaFieldName]: -1}, + u: {$set: {[metaFieldName]: -10, [timeFieldName]: dateTime, f: 15}}, + multi: false, + upsert: true + }, + upsertedDoc: {[metaFieldName]: -10, [timeFieldName]: dateTime, f: 15}, + }); +})(); + +(function testSingleUpdateReplacementDocWouldChangeOwningShard() { + runTest({ + collConfig: metaShardKey, + updateOp: { + q: {[metaFieldName]: -1}, + u: {[metaFieldName]: 10, [timeFieldName]: dateTime, f: 15}, + multi: false, + upsert: true + }, + upsertedDoc: {[metaFieldName]: 10, [timeFieldName]: dateTime, f: 15}, + updateShardKey: true, + }); +})(); + +(function testSingleUpdateReplacementDocWithNoShardKey() { + runTest({ + collConfig: metaShardKey, + updateOp: { + q: {[metaFieldName]: -1}, + u: {[timeFieldName]: dateTime, f: 15}, + multi: false, + upsert: true + }, + upsertedDoc: {[timeFieldName]: dateTime, f: 15} + }); +})(); + +(function testSingleUpdateUpsertSuppliedWithNoShardKey() { + runTest({ + collConfig: metaShardKey, + updateOp: { + q: {[metaFieldName]: -1}, + u: [{$set: {unused: true}}], + multi: false, + upsert: true, + upsertSupplied: true, + c: {new: {[timeFieldName]: dateTime, f: 15}}, + }, + upsertedDoc: {[timeFieldName]: dateTime, f: 15} + }); +})(); + +st.stop(); diff --git a/jstests/sharding/timeseries_write_one_stale_sharding_state.js b/jstests/sharding/timeseries_write_one_stale_sharding_state.js new file mode 100644 index 0000000000000..b70a5e3a15f9f --- /dev/null +++ b/jstests/sharding/timeseries_write_one_stale_sharding_state.js @@ -0,0 +1,183 @@ +/** + * Tests two-phase write commands on a timeseries collection when the sharding state is stale. + * + * @tags: [ + * # We need a timeseries collection. + * requires_timeseries, + * # To avoid burn-in tests in in-memory build variants + * requires_persistence, + * # 'NamespaceNotSharded' error is supported since 7.1 + * requires_fcv_71, + * featureFlagTimeseriesUpdatesSupport, + * ] + */ + +import { + doc1_a_nofields, + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc5_b_f104, + doc6_c_f105, + doc7_c_f106, + generateTimeValue, + getCallerName, + mongos0DB, + mongos1DB, + prepareCollection, + prepareShardedCollection, + setUpShardedCluster, + tearDownShardedCluster +} from "jstests/core/timeseries/libs/timeseries_writes_util.js"; + +const docs = [ + doc1_a_nofields, + doc2_a_f101, + doc3_a_f102, + doc4_b_f103, + doc5_b_f104, + doc6_c_f105, + doc7_c_f106, +]; + +function verifyUpdateDeleteOneRes(res, nAffected) { + assert.eq(nAffected, res.n, tojson(res)); +} + +function verifyFindAndModifyRes(res, nAffected, resultDoc) { + assert.eq(nAffected, res.lastErrorObject.n, tojson(res)); + assert.docEq(resultDoc, res.value, tojson(res)); +} + +/** + * Verifies that a write one command succeed or fail with the expected error code when the sharding + * state is stale. + */ +function testWriteOneOnCollectionWithStaleShardingState({ + writeCmd, + nAffected, + resultDoc, +}) { + const callerName = getCallerName(); + jsTestLog(`Running ${callerName}(${tojson(arguments[0])})`); + + let isFindAndModifyCmd = false; + // The collection name is same as the caller name. + const collName = (() => { + if (writeCmd.hasOwnProperty("findAndModify")) { + isFindAndModifyCmd = true; + writeCmd["findAndModify"] = callerName; + return writeCmd["findAndModify"]; + } else if (writeCmd.hasOwnProperty("delete") && writeCmd["deletes"].length === 1 && + writeCmd["deletes"][0].limit === 1) { + writeCmd["delete"] = callerName; + return writeCmd["delete"]; + } else if (writeCmd.hasOwnProperty("update") && writeCmd["updates"].length === 1 && + !writeCmd["updates"][0].multi) { + writeCmd["update"] = callerName; + return writeCmd["update"]; + } else { + assert(false, "Unsupported write command"); + } + })(); + + // Prepares an unsharded collection on mongos1 which will be soon sharded and then mongos1 will + // have a stale sharding state. + prepareCollection({dbToUse: mongos1DB, collName: collName, initialDocList: docs}); + + // Creates and shards a timeseries collection on mongos0. + prepareShardedCollection({dbToUse: mongos0DB, collName: collName, initialDocList: docs}); + + // This write command should succeed though mongos1 has a stale sharding state since the mongos1 + // should be able to refresh its sharding state from the config server and retry the write + // command internally. + let res = assert.commandWorked(mongos1DB[collName].runCommand(writeCmd)); + if (isFindAndModifyCmd) { + verifyFindAndModifyRes(res, nAffected, resultDoc); + } else { + verifyUpdateDeleteOneRes(res, nAffected); + } + + // This will cause mongos1 to have the up-to-date sharding state but this state will be soon + // stale again. + mongos1DB[collName].insert(resultDoc); + + // Drops and recreates the collection on mongos0. + prepareCollection({dbToUse: mongos0DB, collName: collName, initialDocList: docs}); + + // This write command will fail because mongos1 has a stale sharding state. + res = assert.commandFailedWithCode(mongos1DB[collName].runCommand(writeCmd), + ErrorCodes.NamespaceNotSharded); + + // This write command should succeed since mongos1 should have refreshed its sharding state. + res = assert.commandWorked(mongos1DB[collName].runCommand(writeCmd)); + jsTestLog(tojson(res)); + if (isFindAndModifyCmd) { + verifyFindAndModifyRes(res, nAffected, resultDoc); + } else { + verifyUpdateDeleteOneRes(res, nAffected); + } +} + +setUpShardedCluster({nMongos: 2}); + +(function testFindOneAndRemoveOnCollectionWithStaleShardingState() { + testWriteOneOnCollectionWithStaleShardingState({ + writeCmd: {findAndModify: "$$$", query: {f: 101}, remove: true}, + nAffected: 1, + resultDoc: doc2_a_f101, + }); +})(); + +(function testDeleteOneOnCollectionWithStaleShardingState() { + testWriteOneOnCollectionWithStaleShardingState({ + writeCmd: {delete: "$$$", deletes: [{q: {f: 105}, limit: 1}]}, + nAffected: 1, + resultDoc: doc6_c_f105, + }); +})(); + +(function testFindOneAndUpdateOnCollectionWithStaleShardingState() { + testWriteOneOnCollectionWithStaleShardingState({ + writeCmd: {findAndModify: "$$$", query: {f: 106}, update: {$set: {f: 107}}}, + nAffected: 1, + resultDoc: doc7_c_f106, + }); +})(); + +(function testUpdateOneOnCollectionWithStaleShardingState() { + testWriteOneOnCollectionWithStaleShardingState({ + writeCmd: {update: "$$$", updates: [{q: {f: 106}, u: {$set: {f: 107}}, multi: false}]}, + nAffected: 1, + resultDoc: doc7_c_f106, + }); +})(); + +(function testFindAndModifyUpsertOnCollectionWithStaleShardingState() { + const replacementDoc = {_id: 1000, tag: "A", time: generateTimeValue(0), f: 1000}; + testWriteOneOnCollectionWithStaleShardingState({ + writeCmd: { + findAndModify: "$$$", + query: {f: 1000}, + update: replacementDoc, + upsert: true, + new: true + }, + nAffected: 1, + resultDoc: replacementDoc, + }); +})(); + +(function testUpdateOneUpsertOnCollectionWithStaleShardingState() { + const replacementDoc = {_id: 1000, tag: "A", time: generateTimeValue(0), f: 1000}; + testWriteOneOnCollectionWithStaleShardingState({ + writeCmd: { + update: "$$$", + updates: [{q: {f: 1000}, u: replacementDoc, multi: false, upsert: true}] + }, + nAffected: 1, + resultDoc: replacementDoc, + }); +})(); + +tearDownShardedCluster(); diff --git a/jstests/sharding/transactions_targeting_errors.js b/jstests/sharding/transactions_targeting_errors.js index 90ddfb52a9521..d8488a6197a48 100644 --- a/jstests/sharding/transactions_targeting_errors.js +++ b/jstests/sharding/transactions_targeting_errors.js @@ -1,11 +1,10 @@ -// Verifies targeting errors encountered in a transaction lead to write errors. +// Verifies targeting errors encountered in a transaction lead to write errors when write without +// shard key feature is not enabled. // // @tags: [uses_transactions] (function() { "use strict"; -// TODO: SERVER-72438 Change transaction_targeting_errors.js to validate writeErrors that aren't due -// to shard key targeting. load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); const dbName = "test"; @@ -20,6 +19,16 @@ const session = st.s.startSession(); const sessionDB = session.getDatabase("test"); if (WriteWithoutShardKeyTestUtil.isWriteWithoutShardKeyFeatureEnabled(sessionDB)) { + session.startTransaction(); + assert.commandWorked(sessionDB.runCommand( + {update: collName, updates: [{q: {skey: {$lte: 5}}, u: {$set: {x: 1}}, multi: false}]})); + assert.commandWorked(session.abortTransaction_forTesting()); + + session.startTransaction(); + assert.commandWorked( + sessionDB.runCommand({delete: collName, deletes: [{q: {skey: {$lte: 5}}, limit: 1}]})); + assert.commandWorked(session.abortTransaction_forTesting()); + st.stop(); return; } diff --git a/jstests/sharding/transfer_mods_large_batches.js b/jstests/sharding/transfer_mods_large_batches.js new file mode 100644 index 0000000000000..966285024edeb --- /dev/null +++ b/jstests/sharding/transfer_mods_large_batches.js @@ -0,0 +1,132 @@ +/** + * Verify the recipient shard continues to run the _transferMods command against the donor shard + * primary until it receives an empty _transferMods batch after the kCommitStart recipient state was + * reached. In particular, a batch of changes unrelated to the chunk migration must not cause the + * recipient shard to stop running the _transferMods command. + * + * @tags: [uses_transactions] + */ +(function() { +"use strict"; + +load('jstests/libs/chunk_manipulation_util.js'); +load('jstests/libs/fail_point_util.js'); +load('jstests/sharding/libs/create_sharded_collection_util.js'); +load('jstests/sharding/libs/sharded_transactions_helpers.js'); + +const staticMongod = MongoRunner.runMongod({}); // Mongod used for startParallelOps(). +const st = new ShardingTest({shards: {rs0: {nodes: 1}, rs1: {nodes: 1}}}); + +const dbName = "test"; +const collName = "transfer_mods_large_batches"; +const collection = st.s.getDB(dbName).getCollection(collName); + +CreateShardedCollectionUtil.shardCollectionWithChunks(collection, {x: 1}, [ + {min: {x: MinKey}, max: {x: 0}, shard: st.shard0.shardName}, + {min: {x: 0}, max: {x: 1000}, shard: st.shard0.shardName}, + {min: {x: 1000}, max: {x: MaxKey}, shard: st.shard1.shardName}, +]); + +function insertLargeDocsInTransaction(collection, docIds, shardKey) { + const lsid = {id: UUID()}; + const txnNumber = 0; + const largeStr = "x".repeat(9 * 1024 * 1024); + + for (let i = 0; i < docIds.length; ++i) { + const docToInsert = {_id: docIds[i]._id}; + Object.assign(docToInsert, shardKey); + docToInsert.note = "large document to force separate _transferMods call"; + docToInsert.padding = largeStr; + + const commandObj = { + documents: [docToInsert], + lsid: lsid, + txnNumber: NumberLong(txnNumber), + autocommit: false + }; + + if (i === 0) { + commandObj.startTransaction = true; + } + + assert.commandWorked(collection.runCommand("insert", commandObj)); + } + + assert.commandWorked(collection.getDB().adminCommand( + {commitTransaction: 1, lsid: lsid, txnNumber: NumberLong(txnNumber), autocommit: false})); +} + +assert.commandWorked(collection.insert([ + {_id: 1, x: -2, note: "keep out of chunk range being migrated"}, + {_id: 2, x: 100, note: "keep in chunk range being migrated"}, +])); + +pauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState); +const fp = configureFailPoint(st.shard1.rs.getPrimary(), "migrateThreadHangAfterSteadyTransition"); + +const joinMoveChunk = moveChunkParallel( + staticMongod, st.s.host, {x: 1}, undefined, collection.getFullName(), st.shard1.shardName); + +waitForMoveChunkStep(st.shard0, moveChunkStepNames.reachedSteadyState); +insertLargeDocsInTransaction(collection, [{_id: 3}, {_id: 4}], {x: -1000}); +assert.commandWorked( + collection.insert({_id: 5, x: 1, note: "inserted into range after large _transferMods"})); + +unpauseMoveChunkAtStep(st.shard0, moveChunkStepNames.reachedSteadyState); + +// The kCommitStart state isn't a separate "step" of the chunk migration procedure on the +// recipient shard. We therefore cannot use the waitForMigrateStep() helper to wait for the +// _recvChunkCommit command to have been received by the recipient shard. The problematic +// behavior of the recipient shard finishing its catch up too early only manifests after the +// _recvChunkCommit command has been received by the recipient shard. +assert.soon(() => { + const res = assert.commandWorked(fp.conn.adminCommand({_recvChunkStatus: 1})); + return res.state !== "steady"; +}); + +fp.off(); +joinMoveChunk(); + +class ArrayCursor { + constructor(arr) { + this.i = 0; + this.arr = arr; + } + + hasNext() { + return this.i < this.arr.length; + } + + next() { + return this.arr[this.i++]; + } +} + +const expected = new ArrayCursor([ + {_id: 1, x: -2, note: "keep out of chunk range being migrated"}, + {_id: 2, x: 100, note: "keep in chunk range being migrated"}, + {_id: 3, x: -1000, note: "large document to force separate _transferMods call"}, + {_id: 4, x: -1000, note: "large document to force separate _transferMods call"}, + {_id: 5, x: 1, note: "inserted into range after large _transferMods"}, +]); + +const diff = ((diff) => { + return { + docsWithDifferentContents: diff.docsWithDifferentContents.map( + ({first, second}) => ({expected: first, actual: second})), + docsExtraAfterMigration: diff.docsMissingOnFirst, + docsMissingAfterMigration: diff.docsMissingOnSecond, + }; +})( + DataConsistencyChecker.getDiff( + expected, collection.find({}, {_id: 1, x: 1, note: 1}).sort({_id: 1, x: 1}))); + +assert.eq(diff, { + docsWithDifferentContents: [], + docsExtraAfterMigration: [], + docsMissingAfterMigration: [], +}); + +st.stop(); +MongoRunner.stopMongod(staticMongod); +})(); diff --git a/jstests/sharding/transient_txn_error_labels.js b/jstests/sharding/transient_txn_error_labels.js index 036f8136e611d..db434b62be3b2 100644 --- a/jstests/sharding/transient_txn_error_labels.js +++ b/jstests/sharding/transient_txn_error_labels.js @@ -2,7 +2,6 @@ * Test TransientTransactionErrors error label in transactions. * @tags: [ * uses_transactions, - * temporary_catalog_shard_incompatible, * ] */ @@ -17,8 +16,11 @@ const collName = "no_error_labels_outside_txn"; // We are testing coordinateCommitTransaction, which requires the nodes to be started with // --shardsvr. -const st = new ShardingTest( - {config: 1, mongos: 1, shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}}); +const st = new ShardingTest({ + config: TestData.configShard ? undefined : 1, + mongos: 1, + shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}} +}); const primary = st.rs0.getPrimary(); const secondary = st.rs0.getSecondary(); diff --git a/jstests/sharding/transient_txn_error_labels_with_write_concern.js b/jstests/sharding/transient_txn_error_labels_with_write_concern.js index db59d5d6c8179..33cc021ed2712 100644 --- a/jstests/sharding/transient_txn_error_labels_with_write_concern.js +++ b/jstests/sharding/transient_txn_error_labels_with_write_concern.js @@ -2,7 +2,6 @@ * Test TransientTransactionError error label for commands in transactions with write concern. * @tags: [ * uses_transactions, - * temporary_catalog_shard_incompatible, * ] */ (function() { @@ -17,8 +16,11 @@ const collName = "transient_txn_error_labels_with_write_concern"; // We are testing coordinateCommitTransaction, which requires the nodes to be started with // --shardsvr. -const st = new ShardingTest( - {config: 1, mongos: 1, shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}}}); +const st = new ShardingTest({ + config: TestData.configShard ? undefined : 1, + mongos: 1, + shards: {rs0: {nodes: [{}, {rsConfig: {priority: 0}}]}} +}); const rst = st.rs0; const primary = rst.getPrimary(); diff --git a/jstests/sharding/txn_addingParticipantParameter.js b/jstests/sharding/txn_addingParticipantParameter.js index e49b39ec67aab..b8b3e98929a82 100644 --- a/jstests/sharding/txn_addingParticipantParameter.js +++ b/jstests/sharding/txn_addingParticipantParameter.js @@ -3,10 +3,7 @@ * 'featureFlagAdditionalParticipants' is enabled. */ -(function() { -'use strict'; - -load("jstests/libs/feature_flag_util.js"); // for FeatureFlagUtil.isEnabled +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load("jstests/libs/fail_point_util.js"); load('jstests/sharding/libs/sharded_transactions_helpers.js'); @@ -14,7 +11,7 @@ const dbName = "test"; const collName = "foo"; const ns = dbName + "." + collName; -const shard0Name = TestData.catalogShard ? "config" : "txn_addingParticipantParameter-rs0"; +const shard0Name = TestData.configShard ? "config" : "txn_addingParticipantParameter-rs0"; const shard1Name = "txn_addingParticipantParameter-rs1"; const shard2Name = "txn_addingParticipantParameter-rs2"; const shard3Name = "txn_addingParticipantParameter-rs3"; @@ -166,4 +163,3 @@ const fpDataMultiple = { }; let expectedParticipantListMultiple = [shard0Name, shard1Name, shard2Name, shard3Name]; testAddingParticipant(true, expectedParticipantListMultiple, fpDataMultiple); -})(); diff --git a/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js b/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js index 159662b1849ee..269bcc028642b 100644 --- a/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js +++ b/jstests/sharding/txn_commit_optimizations_for_read_only_shards.js @@ -6,8 +6,7 @@ * no failures, a participant having failed over, a participant being unable to satisfy the client's * writeConcern, and an invalid client writeConcern. * - * @tags: [requires_fcv_70, uses_transactions, uses_multi_shard_transaction, - * temporary_catalog_shard_incompatible] + * @tags: [uses_transactions, uses_multi_shard_transaction] */ (function() { @@ -64,7 +63,7 @@ TestData.transactionLifetimeLimitSeconds = 30; let st = new ShardingTest({ shards: 3, // Create shards with more than one node because we test for writeConcern majority failing. - config: 1, + config: TestData.configShard ? undefined : 1, other: { mongosOptions: { verbose: 3, @@ -348,6 +347,7 @@ const failureModes = { for (const failureModeName in failureModes) { for (const type in transactionTypes) { + clearRawMongoProgramOutput(); const lsid = getLSID(); txnNumber++; assert.lt(txnNumber, diff --git a/jstests/sharding/txn_single_write_shard_failover.js b/jstests/sharding/txn_single_write_shard_failover.js index a582622ed33dd..50aaa139ffebc 100644 --- a/jstests/sharding/txn_single_write_shard_failover.js +++ b/jstests/sharding/txn_single_write_shard_failover.js @@ -13,7 +13,6 @@ * @tags: [ * uses_multi_shard_transaction, * uses_transactions, - * temporary_catalog_shard_incompatible, * ] */ @@ -32,7 +31,7 @@ const ns2 = db2Name + "." + coll2Name; const st = new ShardingTest({ shards: {rs0: {nodes: 2}, rs1: {nodes: 1}}, - config: 1, + config: TestData.configShard ? undefined : 1, other: { mongosOptions: {verbose: 3}, } diff --git a/jstests/sharding/txn_two_phase_commit_server_status.js b/jstests/sharding/txn_two_phase_commit_server_status.js index 2525e16ee8603..d95a68ae0cfa7 100644 --- a/jstests/sharding/txn_two_phase_commit_server_status.js +++ b/jstests/sharding/txn_two_phase_commit_server_status.js @@ -1,5 +1,4 @@ // Basic test that the two-phase commit coordinator metrics fields appear in serverStatus output. -// @tags: [temporary_catalog_shard_incompatible] (function() { "use strict"; @@ -7,16 +6,16 @@ const st = new ShardingTest({shards: 1}); const res = assert.commandWorked(st.shard0.adminCommand({serverStatus: 1})); assert.neq(null, res.twoPhaseCommitCoordinator); -assert.eq(0, res.twoPhaseCommitCoordinator.totalCreated); -assert.eq(0, res.twoPhaseCommitCoordinator.totalStartedTwoPhaseCommit); -assert.eq(0, res.twoPhaseCommitCoordinator.totalCommittedTwoPhaseCommit); -assert.eq(0, res.twoPhaseCommitCoordinator.totalAbortedTwoPhaseCommit); +assert.hasFields(res.twoPhaseCommitCoordinator, ["totalCreated"]); +assert.hasFields(res.twoPhaseCommitCoordinator, ["totalStartedTwoPhaseCommit"]); +assert.hasFields(res.twoPhaseCommitCoordinator, ["totalCommittedTwoPhaseCommit"]); +assert.hasFields(res.twoPhaseCommitCoordinator, ["totalAbortedTwoPhaseCommit"]); assert.neq(null, res.twoPhaseCommitCoordinator.currentInSteps); -assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.writingParticipantList); -assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.waitingForVotes); -assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.writingDecision); -assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.waitingForDecisionAcks); -assert.eq(0, res.twoPhaseCommitCoordinator.currentInSteps.deletingCoordinatorDoc); +assert.hasFields(res.twoPhaseCommitCoordinator.currentInSteps, ["writingParticipantList"]); +assert.hasFields(res.twoPhaseCommitCoordinator.currentInSteps, ["waitingForVotes"]); +assert.hasFields(res.twoPhaseCommitCoordinator.currentInSteps, ["writingDecision"]); +assert.hasFields(res.twoPhaseCommitCoordinator.currentInSteps, ["waitingForDecisionAcks"]); +assert.hasFields(res.twoPhaseCommitCoordinator.currentInSteps, ["deletingCoordinatorDoc"]); st.stop(); })(); diff --git a/jstests/sharding/txn_writes_during_movechunk.js b/jstests/sharding/txn_writes_during_movechunk.js index 66cecf302ed28..31d40c816440b 100644 --- a/jstests/sharding/txn_writes_during_movechunk.js +++ b/jstests/sharding/txn_writes_during_movechunk.js @@ -1,5 +1,5 @@ // @tags: [uses_transactions] -load('./jstests/libs/chunk_manipulation_util.js'); +load('jstests/libs/chunk_manipulation_util.js'); (function() { 'use strict'; diff --git a/jstests/sharding/unfinished_migration_server_status.js b/jstests/sharding/unfinished_migration_server_status.js index 96ec9804f7cd6..b76e026dbc70e 100644 --- a/jstests/sharding/unfinished_migration_server_status.js +++ b/jstests/sharding/unfinished_migration_server_status.js @@ -5,7 +5,7 @@ (function() { "use strict"; -load('./jstests/libs/chunk_manipulation_util.js'); +load('jstests/libs/chunk_manipulation_util.js'); // Test calls step down on primaries. TestData.skipCheckingUUIDsConsistentAcrossCluster = true; diff --git a/jstests/sharding/unique_index_on_shardservers.js b/jstests/sharding/unique_index_on_shardservers.js index f34894f3c442c..6e483c5c3f5a8 100644 --- a/jstests/sharding/unique_index_on_shardservers.js +++ b/jstests/sharding/unique_index_on_shardservers.js @@ -21,7 +21,7 @@ assert.commandWorked(mongos.getDB("test").coll.createIndex({f: 1}, {"unique": tr // Add a node with --shardsvr to the replica set. let newNode; -if (TestData.catalogShard) { +if (TestData.configShard) { newNode = rs.add({'configsvr': '', rsConfig: {priority: 0, votes: 0}}); } else { newNode = rs.add({'shardsvr': '', rsConfig: {priority: 0, votes: 0}}); diff --git a/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js b/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js index 325e37815df44..0f971eb88519c 100644 --- a/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js +++ b/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_basic.js @@ -5,9 +5,7 @@ * initial request. * * @tags: [ - * requires_fcv_63, - * featureFlagUpdateOneWithoutShardKey, - * temporary_catalog_shard_incompatible, + * requires_fcv_71, * ] */ (function() { @@ -105,7 +103,7 @@ function testCommandUnshardedCollection(testCase) { startTransaction: true, autocommit: false }; - assert.commandFailedWithCode(mongosConn.runCommand(cmdObj), ErrorCodes.InvalidOptions); + assert.commandFailedWithCode(mongosConn.runCommand(cmdObj), ErrorCodes.NamespaceNotSharded); } function testCommandShardedCollectionOnSingleShard(testCase) { @@ -191,11 +189,9 @@ function testCommandShardedCollectionOnMultipleShards(testCase) { // _id, but the way the test is structured, the _id and the shard key have the same value when // inserted. if (res.targetDoc["_id"] < splitPoint) { - let hostname = st.shard0.host.split("/")[0]; - assert.eq(res.shardId, hostname); + assert.eq(res.shardId, st.shard0.shardName); } else { - let hostname = st.shard1.host.split("/")[0]; - assert.eq(res.shardId, hostname); + assert.eq(res.shardId, st.shard1.shardName); } // Check that no modifications were made to the documents. diff --git a/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_produces_upsert_document.js b/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_produces_upsert_document.js index d82cad8424fd7..3137627c3d7ed 100644 --- a/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_produces_upsert_document.js +++ b/jstests/sharding/updateOne_without_shard_key/cluster_query_without_shard_key_produces_upsert_document.js @@ -3,10 +3,10 @@ * filter and {upsert: true}. * * @tags: [ + * requires_fcv_71, * requires_sharding, * uses_transactions, * uses_multi_shard_transaction, - * featureFlagUpdateOneWithoutShardKey, * ] */ diff --git a/jstests/sharding/updateOne_without_shard_key/cluster_write_without_shard_key_basic.js b/jstests/sharding/updateOne_without_shard_key/cluster_write_without_shard_key_basic.js index e2ef51ad8f91a..d063e4f6b362f 100644 --- a/jstests/sharding/updateOne_without_shard_key/cluster_write_without_shard_key_basic.js +++ b/jstests/sharding/updateOne_without_shard_key/cluster_write_without_shard_key_basic.js @@ -2,7 +2,7 @@ * Tests the internal command _clusterWriteWithoutShardKey. The command must be run in a * transaction. * - * @tags: [requires_fcv_63, featureFlagUpdateOneWithoutShardKey] + * @tags: [requires_fcv_71] */ (function() { "use strict"; @@ -359,7 +359,7 @@ function runAndVerifyCommand(testCase) { autocommit: false }; mongosConn.getCollection(unshardedCollName).insert([{_id: _id, a: aFieldValue}]); - assert.commandFailedWithCode(mongosConn.runCommand(cmdObj), ErrorCodes.InvalidOptions); + assert.commandFailedWithCode(mongosConn.runCommand(cmdObj), ErrorCodes.NamespaceNotSharded); // Must run in a transaction. cmdObj = { @@ -369,6 +369,90 @@ function runAndVerifyCommand(testCase) { targetDocId: {_id: _id}, }; assert.commandFailedWithCode(mongosConn.runCommand(cmdObj), ErrorCodes.IllegalOperation); + + // Cannot pass $_originalQuery as an external client in an update command. + cmdObj = { + update: collName, + updates: [ + { + q: {}, + u: {$set: {a: aFieldValue}}, + sampleId: UUID(), + }, + ], + $_originalQuery: {}, + }; + assert.commandFailedWithCode(shardConn.getCollection(collName).runCommand(cmdObj), + ErrorCodes.InvalidOptions); + + // Cannot pass $_originalCollation as an external client in an update command. + cmdObj = { + update: collName, + updates: [ + { + q: {}, + u: {$set: {a: aFieldValue}}, + sampleId: UUID(), + collation: {locale: "simple"}, + }, + ], + $_originalCollation: {}, + }; + assert.commandFailedWithCode(shardConn.getCollection(collName).runCommand(cmdObj), + ErrorCodes.InvalidOptions); + + // Cannot pass $_originalQuery as an external client in a delete command. + cmdObj = { + delete: collName, + deletes: [ + { + q: {}, + limit: 1, + sampleId: UUID(), + }, + ], + $_originalQuery: {}, + }; + assert.commandFailedWithCode(shardConn.getCollection(collName).runCommand(cmdObj), + ErrorCodes.InvalidOptions); + + // Cannot pass $_originalCollation as an external client in a delete command. + cmdObj = { + delete: collName, + deletes: [ + { + q: {}, + limit: 1, + sampleId: UUID(), + collation: {locale: "simple"}, + }, + ], + $_originalCollation: {}, + }; + assert.commandFailedWithCode(shardConn.getCollection(collName).runCommand(cmdObj), + ErrorCodes.InvalidOptions); + + // Cannot pass $_originalQuery as an external client in a findandmodify command. + cmdObj = { + findandmodify: collName, + query: {}, + update: [{$set: {a: aFieldValue}}], + sampleId: UUID(), + $_originalQuery: {}, + }; + assert.commandFailedWithCode(shardConn.getCollection(collName).runCommand(cmdObj), + ErrorCodes.InvalidOptions); + + // Cannot pass $_originalCollation as an external client in a findandmodify command. + cmdObj = { + findandmodify: collName, + query: {}, + update: [{$set: {a: aFieldValue}}], + sampleId: UUID(), + $_originalCollation: {}, + }; + assert.commandFailedWithCode(shardConn.getCollection(collName).runCommand(cmdObj), + ErrorCodes.InvalidOptions); })(); st.stop(); diff --git a/jstests/sharding/updateOne_without_shard_key/deleteOne_without_shard_key_basic.js b/jstests/sharding/updateOne_without_shard_key/deleteOne_without_shard_key_basic.js index 685dbbeeb249e..80cfd29e91a12 100644 --- a/jstests/sharding/updateOne_without_shard_key/deleteOne_without_shard_key_basic.js +++ b/jstests/sharding/updateOne_without_shard_key/deleteOne_without_shard_key_basic.js @@ -3,8 +3,7 @@ * * @tags: [ * requires_sharding, - * requires_fcv_63, - * featureFlagUpdateOneWithoutShardKey, + * requires_fcv_71, * ] */ (function() { diff --git a/jstests/sharding/updateOne_without_shard_key/dropping_collection_during_clusterQueryWithoutShardKey_errors.js b/jstests/sharding/updateOne_without_shard_key/dropping_collection_during_clusterQueryWithoutShardKey_errors.js new file mode 100644 index 0000000000000..78f2946435267 --- /dev/null +++ b/jstests/sharding/updateOne_without_shard_key/dropping_collection_during_clusterQueryWithoutShardKey_errors.js @@ -0,0 +1,87 @@ +/** + * Verify that running _clusterQueryWithoutShardKey on a collection that is not sharded errors + * with NamespaceNotSharded. + * + * @tags: [ + * requires_sharding, + * requires_fcv_71, + * uses_transactions, + * uses_multi_shard_transaction, + * ] + */ + +(function() { +"use strict"; + +load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); +load("jstests/libs/fail_point_util.js"); + +// 2 shards single node, 1 mongos, 1 config server 3-node. +const st = new ShardingTest({}); +const dbName = "testDb"; +const collName = "testColl"; +const nss = dbName + "." + collName; +const splitPoint = 0; +const docsToInsert = + [{_id: 0, x: -2, y: 1}, {_id: 1, x: -1, y: 2}, {_id: 3, x: 1, y: 3}, {_id: 4, x: 2, y: 1}]; +const testColl = st.getDB(dbName).getCollection(collName); + +const findAndModifyThread = new Thread((host, dbName, collName) => { + const conn = new Mongo(host); + const cmdObj = { + findAndModify: collName, + query: {y: 1}, + update: {y: 5}, + }; + assert.commandFailedWithCode(conn.getDB(dbName).getCollection(collName).runCommand(cmdObj), + ErrorCodes.NamespaceNotSharded); + assert.eq(null, conn.getDB(dbName).getCollection(collName).findOne({y: 5})); +}, st.s.host, dbName, collName); + +const updateOneThread = new Thread((host, dbName, collName) => { + const conn = new Mongo(host); + const cmdObj = {update: collName, updates: [{q: {y: 2}, u: {$set: {z: 0}}}]}; + assert.commandFailedWithCode(conn.getDB(dbName).getCollection(collName).runCommand(cmdObj), + ErrorCodes.NamespaceNotSharded); + assert.eq(null, conn.getDB(dbName).getCollection(collName).findOne({z: 0})); +}, st.s.host, dbName, collName); + +const deleteOneThread = new Thread((host, dbName, collName) => { + const conn = new Mongo(host); + const cmdObj = { + delete: collName, + deletes: [{q: {y: 3}, limit: 1}], + }; + assert.commandFailedWithCode(conn.getDB(dbName).getCollection(collName).runCommand(cmdObj), + ErrorCodes.NamespaceNotSharded); + assert.neq(null, conn.getDB(dbName).getCollection(collName).findOne({y: 3})); +}, st.s.host, dbName, collName); + +// Sets up a 2 shard cluster using 'x' as a shard key where Shard 0 owns x < +// splitPoint and Shard 1 x >= splitPoint. +WriteWithoutShardKeyTestUtil.setupShardedCollection( + st, nss, {x: 1}, [{x: splitPoint}], [{query: {x: splitPoint}, shard: st.shard1.shardName}]); + +let hangQueryFp = configureFailPoint(st.s, "hangBeforeMetadataRefreshClusterQuery"); +assert.commandWorked(testColl.insert(docsToInsert)); + +findAndModifyThread.start(); +updateOneThread.start(); +deleteOneThread.start(); +hangQueryFp.wait(3); + +// Drop sharded collection. +assert.commandWorked(st.s.getDB(dbName).runCommand({drop: collName})); + +// Create unsharded collection. +assert.commandWorked(st.s.getDB(dbName).runCommand({create: collName})); +assert.commandWorked(testColl.insert(docsToInsert)); + +hangQueryFp.off(); + +findAndModifyThread.join(); +updateOneThread.join(); +deleteOneThread.join(); + +st.stop(); +})(); diff --git a/jstests/sharding/updateOne_without_shard_key/errors.js b/jstests/sharding/updateOne_without_shard_key/errors.js index 953317d6300ff..eecd5ac9b6101 100644 --- a/jstests/sharding/updateOne_without_shard_key/errors.js +++ b/jstests/sharding/updateOne_without_shard_key/errors.js @@ -3,10 +3,9 @@ * * @tags: [ * requires_sharding, - * requires_fcv_63, + * requires_fcv_71, * uses_transactions, * uses_multi_shard_transaction, - * featureFlagUpdateOneWithoutShardKey, * ] */ diff --git a/jstests/sharding/updateOne_without_shard_key/explain.js b/jstests/sharding/updateOne_without_shard_key/explain.js new file mode 100644 index 0000000000000..f8116c7992fd9 --- /dev/null +++ b/jstests/sharding/updateOne_without_shard_key/explain.js @@ -0,0 +1,489 @@ +/** + * Test explain output for updateOne, deleteOne, and findAndModify without shard key. + * + * @tags: [ + * requires_sharding, + * requires_fcv_71, + * ] + */ + +(function() { +"use strict"; + +load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); + +// 2 shards single node, 1 mongos, 1 config server 3-node. +const st = new ShardingTest({}); +const dbName = "testDb"; +const collName = "testColl"; +const nss = dbName + "." + collName; +const splitPoint = 0; +const dbConn = st.s.getDB(dbName); +const docsToInsert = [ + {_id: 0, x: -2, y: 1, a: [1, 2, 3]}, + {_id: 1, x: -1, y: 1, a: [1, 2, 3]}, + {_id: 2, x: 1, y: 1, a: [1, 2, 3]}, + {_id: 3, x: 2, y: 1, a: [1, 2, 3]} +]; + +// Sets up a 2 shard cluster using 'x' as a shard key where Shard 0 owns x < +// splitPoint and Shard 1 splitPoint >= 0. +WriteWithoutShardKeyTestUtil.setupShardedCollection( + st, nss, {x: 1}, [{x: splitPoint}], [{query: {x: splitPoint}, shard: st.shard1.shardName}]); + +assert.commandWorked(dbConn[collName].insert(docsToInsert)); + +let listCollRes = assert.commandWorked(dbConn.runCommand({listCollections: 1})); +// There should only be one collection created in this test. +const usingClusteredIndex = listCollRes.cursor.firstBatch[0].options.clusteredIndex != null; + +let testCases = [ + { + logMessage: "Running explain for findAndModify update with sort.", + hasSort: true, + opType: "UPDATE", + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1}, + sort: {x: 1}, + update: {$inc: {z: 1}}, + } + }, + }, + { + logMessage: "Running explain for findAndModify update with sort and upsert: true.", + hasSort: true, + isUpsert: true, + opType: "UPDATE", + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 5}, // Query matches no documents. + sort: {x: 1}, + update: {$inc: {z: 1}}, + upsert: true + } + }, + }, + { + logMessage: "Running explain for findAndModify update.", + opType: "UPDATE", + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1}, + update: {$inc: {z: 1}}, + } + }, + }, + { + logMessage: "Running explain for findAndModify update without sort and upsert: true.", + isUpsert: true, + opType: "UPDATE", + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 5}, // Query matches no documents. + update: {$inc: {z: 1}}, + upsert: true, + } + }, + }, + { + logMessage: "Running explain for findAndModify remove with sort.", + hasSort: true, + opType: "DELETE", + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1}, + sort: {x: 1}, + remove: true, + } + }, + }, + { + logMessage: "Running explain for findAndModify remove without sort.", + opType: "DELETE", + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1}, + remove: true, + } + }, + }, + { + logMessage: + "Running explain for findAndModify remove with positional projection with sort.", + opType: "DELETE", + hasSort: true, + isPositionalProjection: true, + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1, a: 1}, + fields: {'a.$': 1}, + sort: {x: 1}, + remove: true, + } + } + }, + { + logMessage: + "Running explain for findAndModify remove with positional projection without sort.", + opType: "DELETE", + isPositionalProjection: true, + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1, a: 1}, + fields: {'a.$': 1}, + remove: true, + } + } + }, + { + logMessage: + "Running explain for findAndModify update with positional projection with sort.", + opType: "UPDATE", + hasSort: true, + isPositionalProjection: true, + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1, a: 1}, + sort: {x: 1}, + fields: {'a.$': 1}, + update: {$inc: {z: 1}}, + } + } + }, + { + logMessage: + "Running explain for findAndModify update with positional projection without sort.", + opType: "UPDATE", + isPositionalProjection: true, + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1, a: 1}, + fields: {'a.$': 1}, + update: {$inc: {z: 1}}, + } + } + }, + { + logMessage: "Running explain for findAndModify update with positional update with sort.", + opType: "UPDATE", + hasSort: true, + isPositionalUpdate: true, + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1, a: 1}, + sort: {x: 1}, + update: {$set: {"a.$": 3}}, + } + } + }, + { + logMessage: "Running explain for findAndModify update with positional update without sort.", + opType: "UPDATE", + isPositionalUpdate: true, + cmdObj: { + explain: { + findAndModify: collName, + query: {y: 1, a: 1}, + update: {$set: {"a.$": 3}}, + } + } + }, + { + logMessage: "Running explain for updateOne.", + opType: "UPDATE", + cmdObj: { + explain: { + update: collName, + updates: [{ + q: {y: 1}, + u: {$set: {z: 1}}, + multi: false, + upsert: false, + }], + }, + }, + }, + { + logMessage: "Running explain for updateOne and upsert: true.", + isUpsert: true, + opType: "UPDATE", + cmdObj: { + explain: { + update: collName, + updates: [{ + q: {y: 5}, + u: {$set: {z: 1}}, + multi: false, + upsert: true, + }], // Query matches no documents. + }, + }, + }, + { + logMessage: "Running explain for updateOne and with positional update.", + opType: "UPDATE", + isPositionalUpdate: true, + cmdObj: { + explain: { + update: collName, + updates: [{ + q: {y: 1, a: 1}, + u: {$set: {"a.$": 3}}, + multi: false, + upsert: false, + }], + }, + }, + }, + { + logMessage: "Running explain for deleteOne.", + opType: "DELETE", + cmdObj: { + explain: { + delete: collName, + deletes: [{q: {y: 1}, limit: 1}], + }, + }, + }, +]; + +function runTestCase(testCase) { + jsTestLog(testCase.logMessage + "\n" + tojson(testCase)); + + let verbosityLevels = ["queryPlanner", "executionStats", "allPlansExecution"]; + verbosityLevels.forEach(verbosityLevel => { + jsTestLog("Running with verbosity level: " + verbosityLevel); + let explainCmdObj = Object.assign(testCase.cmdObj, {verbosity: verbosityLevel}); + let res = assert.commandWorked(dbConn.runCommand(explainCmdObj)); + validateResponse(res, testCase, verbosityLevel); + }); +} + +function validateResponse(res, testCase, verbosity) { + assert.eq(res.queryPlanner.winningPlan.stage, "SHARD_WRITE"); + + if (testCase.hasSort) { + assert.eq(res.queryPlanner.winningPlan.inputStage.winningPlan.stage, "SHARD_MERGE_SORT"); + } else { + assert.eq(res.queryPlanner.winningPlan.inputStage.winningPlan.stage, "SHARD_MERGE"); + } + + if (testCase.isPositionalProjection) { + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.stage, "PROJECTION_DEFAULT"); + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.stage, + testCase.opType); + if (testCase.hasSort) { + assert.eq(res.queryPlanner.winningPlan.shards[0] + .winningPlan.inputStage.inputStage.inputStage.stage, + "FETCH"); + assert.eq(res.queryPlanner.winningPlan.shards[0] + .winningPlan.inputStage.inputStage.inputStage.inputStage.stage, + "IXSCAN"); + } else { + if (usingClusteredIndex) { + assert.eq( + res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.inputStage.stage, + "CLUSTERED_IXSCAN"); + } else { + assert.eq( + res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.inputStage.stage, + "FETCH", + res); + assert.eq(res.queryPlanner.winningPlan.shards[0] + .winningPlan.inputStage.inputStage.inputStage.stage, + "IXSCAN"); + } + } + } else if (testCase.isPositionalUpdate) { + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.stage, testCase.opType); + if (testCase.hasSort) { + assert.eq( + res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.inputStage.stage, + "FETCH"); + assert.eq(res.queryPlanner.winningPlan.shards[0] + .winningPlan.inputStage.inputStage.inputStage.stage, + "IXSCAN"); + } else { + if (usingClusteredIndex) { + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.stage, + "CLUSTERED_IXSCAN"); + } else { + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.stage, + "FETCH"); + assert.eq( + res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.inputStage.stage, + "IXSCAN"); + } + } + } else { + if (usingClusteredIndex) { + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.stage, + "CLUSTERED_IXSCAN"); + } else { + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.stage, testCase.opType); + assert.eq(res.queryPlanner.winningPlan.shards[0].winningPlan.inputStage.stage, + "IDHACK"); + } + } + + assert.eq(res.queryPlanner.winningPlan.shards.length, + 1); // Only 1 shard targeted by the write. + assert.eq(res.queryPlanner.winningPlan.inputStage.winningPlan.shards.length, + 2); // 2 shards had matching documents. + + if (verbosity === "queryPlanner") { + assert.eq(res.executionStats, null); + } else { + assert.eq(res.executionStats.executionStages.stage, "SHARD_WRITE"); + if (testCase.isPositionalProjection) { + assert.eq(res.executionStats.executionStages.shards[0].executionStages.stage, + "PROJECTION_DEFAULT"); + assert.eq(res.executionStats.executionStages.shards[0].executionStages.inputStage.stage, + testCase.opType); + if (testCase.hasSort) { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.inputStage.stage, + "FETCH"); + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.inputStage.inputStage.stage, + "IXSCAN"); + } else { + if (usingClusteredIndex) { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.stage, + "CLUSTERED_IXSCAN"); + } else { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.stage, + "FETCH"); + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.inputStage.stage, + "IXSCAN"); + } + } + } else if (testCase.isPositionalUpdate) { + assert.eq(res.executionStats.executionStages.shards[0].executionStages.stage, + testCase.opType); + if (testCase.hasSort) { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.stage, + "FETCH"); + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.inputStage.stage, + "IXSCAN"); + } else { + if (usingClusteredIndex) { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.stage, + "CLUSTERED_IXSCAN"); + } else { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.stage, + "FETCH"); + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.inputStage.stage, + "IXSCAN"); + } + } + } else { + if (usingClusteredIndex) { + assert.eq( + res.executionStats.executionStages.shards[0].executionStages.inputStage.stage, + "CLUSTERED_IXSCAN"); + + } else { + assert.eq(res.executionStats.executionStages.shards[0].executionStages.stage, + testCase.opType); + assert.eq( + res.executionStats.executionStages.shards[0].executionStages.inputStage.stage, + "IDHACK"); + } + } + assert.eq(res.executionStats.executionStages.shards.length, + 1); // Only 1 shard targeted by the write. + assert.eq(res.executionStats.inputStage.executionStages.shards.length, + 2); // 2 shards had matching documents. + + // We use a dummy _id target document for the Write Phase which should not match any + // existing documents in the collection. This will at least preserve the query plan, + // but may lead to incorrect executionStats. + if (testCase.isUpsert) { + assert.eq(res.executionStats.nReturned, 0); + assert.eq(res.executionStats.executionStages.shards[0].executionStages.nWouldModify, 0); + assert.eq(res.executionStats.executionStages.shards[0].executionStages.nWouldUpsert, 1); + assert.eq(res.executionStats.inputStage.nReturned, 0); + } else { + // TODO SERVER-29449: Properly report explain results for sharded queries with a + // limit. assert.eq(res.executionStats.nReturned, 1); + if (testCase.opType === "DELETE") { + // We use a dummy _id target document for the Write Phase which should not match any + // existing documents in the collection. This will at least preserve the query plan, + // but may lead to incorrect executionStats. + if (testCase.isPositionalProjection) { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.nWouldDelete, + 0); + } else { + assert.eq( + res.executionStats.executionStages.shards[0].executionStages.nWouldDelete, + 0); + } + } else { + // We use a dummy _id target document for the Write Phase which should not match any + // existing documents in the collection. This will at least preserve the query plan, + // but may lead to incorrect executionStats. + if (testCase.isPositionalProjection) { + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.nWouldModify, + 0); + assert.eq(res.executionStats.executionStages.shards[0] + .executionStages.inputStage.nWouldUpsert, + 0); + } else { + assert.eq( + res.executionStats.executionStages.shards[0].executionStages.nWouldModify, + 0); + assert.eq( + res.executionStats.executionStages.shards[0].executionStages.nWouldUpsert, + 0); + } + } + assert.eq(res.executionStats.inputStage.nReturned, 2); + } + + if (testCase.hasSort) { + assert.eq(res.executionStats.inputStage.executionStages.stage, "SHARD_MERGE_SORT"); + } else { + assert.eq(res.executionStats.inputStage.executionStages.stage, "SHARD_MERGE"); + } + } + + assert(res.serverInfo); + assert(res.serverParameters); + assert(res.command); + + // Checks that 'command' field of the explain output is the same command that we originally + // wanted to explain. + for (const [key, value] of Object.entries(testCase.cmdObj.explain)) { + assert.eq(res.command[key], value); + } +} + +testCases.forEach(testCase => { + runTestCase(testCase); +}); + +st.stop(); +})(); diff --git a/jstests/sharding/updateOne_without_shard_key/find_and_modify_without_shard_key.js b/jstests/sharding/updateOne_without_shard_key/find_and_modify_without_shard_key.js index 4a3bb99dbec4c..d7a4ae121faaf 100644 --- a/jstests/sharding/updateOne_without_shard_key/find_and_modify_without_shard_key.js +++ b/jstests/sharding/updateOne_without_shard_key/find_and_modify_without_shard_key.js @@ -4,10 +4,9 @@ * * @tags: [ * requires_sharding, - * requires_fcv_63, + * requires_fcv_71, * uses_transactions, * uses_multi_shard_transaction, - * featureFlagUpdateOneWithoutShardKey, * ] */ diff --git a/jstests/sharding/updateOne_without_shard_key/find_and_modify_without_shard_key_sort.js b/jstests/sharding/updateOne_without_shard_key/find_and_modify_without_shard_key_sort.js index 05595265f7035..f6cfe7d4ef953 100644 --- a/jstests/sharding/updateOne_without_shard_key/find_and_modify_without_shard_key_sort.js +++ b/jstests/sharding/updateOne_without_shard_key/find_and_modify_without_shard_key_sort.js @@ -3,10 +3,9 @@ * * @tags: [ * requires_sharding, - * requires_fcv_70, + * requires_fcv_71, * uses_transactions, * uses_multi_shard_transaction, - * featureFlagUpdateOneWithoutShardKey, * ] */ diff --git a/jstests/sharding/updateOne_without_shard_key/find_and_modify_without_shard_key_text_search.js b/jstests/sharding/updateOne_without_shard_key/find_and_modify_without_shard_key_text_search.js new file mode 100644 index 0000000000000..b9426b4cdc28e --- /dev/null +++ b/jstests/sharding/updateOne_without_shard_key/find_and_modify_without_shard_key_text_search.js @@ -0,0 +1,136 @@ +/** + * Test findAndModify without shard key works with $text search predicates. + * + * @tags: [ + * requires_sharding, + * requires_fcv_71, + * uses_transactions, + * uses_multi_shard_transaction, + * ] + */ + +(function() { +"use strict"; + +load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); + +// 2 shards single node, 1 mongos, 1 config server 3-node. +const st = new ShardingTest({}); +const dbName = "testDb"; +const collName = "testColl"; +const nss = dbName + "." + collName; +const splitPoint = 0; +const docsToInsert = [ + {_id: 0, x: -2, numbers: "one"}, + {_id: 1, x: -1, numbers: "two"}, + {_id: 2, x: 1, numbers: "one"}, + {_id: 3, x: 2, numbers: "two one"}, + {_id: 4, x: 3, numbers: "two three"}, +]; +const dbConn = st.s.getDB(dbName); +const coll = dbConn.getCollection(collName); + +// Sets up a 2 shard cluster using 'x' as a shard key where Shard 0 owns x < +// splitPoint and Shard 1 splitPoint >= 0. +WriteWithoutShardKeyTestUtil.setupShardedCollection( + st, nss, {x: 1}, [{x: splitPoint}], [{query: {x: splitPoint}, shard: st.shard1.shardName}]); + +assert.commandWorked(coll.insert(docsToInsert)); +assert.commandWorked(coll.createIndex({numbers: "text"})); + +function runTest(testCase) { + let res = assert.commandWorked(coll.runCommand(testCase.cmdObj)); + assert.eq(res.lastErrorObject.n, 1); + assert.eq(res.value.numbers, testCase.expectedResult.numbers); + if (testCase.projectTextScore) { + assert(res.value.score); + } else { + assert(!res.value.score); + } + if (testCase.opType === "update") { + assert.eq(res.lastErrorObject.updatedExisting, true); + } +} + +let testCases = [ + { + logMessage: "Running findAndModify update with textScore projection.", + opType: "update", + projectTextScore: true, + cmdObj: { + findAndModify: collName, + query: {$text: {$search: "one"}}, + fields: {score: {$meta: "textScore"}}, + update: [{$set: {a: 1}}], + }, + expectedResult: {numbers: "one"}, + }, + { + logMessage: "Running findAndModify update with textScore sort.", + opType: "update", + cmdObj: { + findAndModify: collName, + query: {$text: {$search: "two"}}, + sort: {score: {$meta: "textScore"}}, + update: [{$set: {a: 1}}], + }, + expectedResult: {numbers: "two"}, + }, + { + logMessage: "Running findAndModify update with textScore sort and projection.", + projectTextScore: true, + opType: "update", + cmdObj: { + findAndModify: collName, + query: {$text: {$search: "two"}}, + sort: {score: {$meta: "textScore"}}, + fields: {score: {$meta: "textScore"}}, + update: [{$set: {a: 1}}], + }, + expectedResult: {numbers: "two"}, + }, + { + logMessage: "Running findAndModify remove with textScore projection.", + opType: "delete", + projectTextScore: true, + cmdObj: { + findAndModify: collName, + query: {$text: {$search: "one"}}, + fields: {score: {$meta: "textScore"}}, + remove: true, + }, + expectedResult: {numbers: "one"}, + }, + { + logMessage: "Running findAndModify remove with textScore sort.", + opType: "delete", + cmdObj: { + findAndModify: collName, + query: {$text: {$search: "two one"}}, + sort: {score: {$meta: "textScore"}}, + remove: true, + }, + expectedResult: {numbers: "two one"}, + }, + { + logMessage: "Running findAndModify remove with textScore sort and projection.", + projectTextScore: true, + opType: "delete", + cmdObj: { + findAndModify: collName, + query: {$text: {$search: "two three"}}, + fields: {score: {$meta: "textScore"}}, + sort: {score: {$meta: "textScore"}}, + remove: true, + }, + expectedResult: {numbers: "two three"}, + }, +]; + +testCases.forEach(testCase => { + jsTestLog(testCase.logMessage); + runTest(testCase); +}); + +st.stop(); +})(); diff --git a/jstests/sharding/updateOne_without_shard_key/retryable_writes.js b/jstests/sharding/updateOne_without_shard_key/retryable_writes.js index 230a9b2976960..5982d8510c759 100644 --- a/jstests/sharding/updateOne_without_shard_key/retryable_writes.js +++ b/jstests/sharding/updateOne_without_shard_key/retryable_writes.js @@ -3,10 +3,9 @@ * * @tags: [ * requires_sharding, - * requires_fcv_63, + * requires_fcv_71, * uses_transactions, * uses_multi_shard_transaction, - * featureFlagUpdateOneWithoutShardKey, * ] */ diff --git a/jstests/sharding/updateOne_without_shard_key/single_targetable_shard.js b/jstests/sharding/updateOne_without_shard_key/single_targetable_shard.js index f94aa26187f08..a96dc9d3a0b32 100644 --- a/jstests/sharding/updateOne_without_shard_key/single_targetable_shard.js +++ b/jstests/sharding/updateOne_without_shard_key/single_targetable_shard.js @@ -3,17 +3,12 @@ * * @tags: [ * requires_sharding, - * requires_fcv_70, + * requires_fcv_71, * uses_transactions, * uses_multi_shard_transaction, - * featureFlagUpdateOneWithoutShardKey, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); // Make sure we're testing with no implicit session. @@ -103,5 +98,4 @@ configurations.forEach(config => { }); }); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/sharding/updateOne_without_shard_key/updateOne_without_shard_key_basic.js b/jstests/sharding/updateOne_without_shard_key/updateOne_without_shard_key_basic.js index 43190e150d122..8dab64fc1102b 100644 --- a/jstests/sharding/updateOne_without_shard_key/updateOne_without_shard_key_basic.js +++ b/jstests/sharding/updateOne_without_shard_key/updateOne_without_shard_key_basic.js @@ -3,14 +3,10 @@ * * @tags: [ * requires_sharding, - * requires_fcv_63, - * featureFlagUpdateOneWithoutShardKey, + * requires_fcv_71, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); // Make sure we're testing with no implicit session. @@ -389,7 +385,7 @@ const testCases = [ updates: [{q: {y: 5}, u: {_id: 5, x: -1}, upsert: true}], }, options: [{ordered: true}, {ordered: false}], - expectedMods: [{_id: 5, x: -1, y: 5}], + expectedMods: [{_id: 5}, {x: -1}], expectedResponse: {n: 1, nModified: 0, upserted: [{"index": 0, _id: 5}]}, dbName: dbName, collName: collName @@ -408,7 +404,7 @@ const testCases = [ }, mustBeInRetryableWriteOrTransaction: true, options: [{ordered: true}, {ordered: false}], - expectedMods: [{_id: 0, x: xFieldValShard0_1, y: yFieldVal + 1}, {_id: 6, y: 6, x: -1}], + expectedMods: [{_id: 0}, {y: yFieldVal + 1}, {_id: 6}, {x: -1}], expectedResponse: {n: 2, nModified: 1, upserted: [{"index": 1, _id: 6}]}, dbName: dbName, collName: collName @@ -439,4 +435,3 @@ configurations.forEach(config => { }); st.stop(); -})(); diff --git a/jstests/sharding/updateOne_without_shard_key/would_change_owning_shard_test.js b/jstests/sharding/updateOne_without_shard_key/would_change_owning_shard_test.js index 7a8315adf008b..17f6f36fb5795 100644 --- a/jstests/sharding/updateOne_without_shard_key/would_change_owning_shard_test.js +++ b/jstests/sharding/updateOne_without_shard_key/would_change_owning_shard_test.js @@ -4,17 +4,13 @@ * * @tags: [ * requires_sharding, - * requires_fcv_63, + * requires_fcv_71, * uses_transactions, * uses_multi_shard_transaction, - * featureFlagUpdateOneWithoutShardKey, * ] */ -(function() { -"use strict"; - -load("jstests/libs/feature_flag_util.js"); +import {FeatureFlagUtil} from "jstests/libs/feature_flag_util.js"; load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); // Make sure we're testing with no implicit session. @@ -92,4 +88,3 @@ configurations.forEach(config => { }); st.stop(); -})(); diff --git a/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_hint.js b/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_hint.js new file mode 100644 index 0000000000000..39b607a47ea76 --- /dev/null +++ b/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_hint.js @@ -0,0 +1,91 @@ +/** + * Test writes without shard key uses the hint provided in the original query. + * + * @tags: [ + * requires_sharding, + * requires_fcv_71, + * uses_transactions, + * uses_multi_shard_transaction, + * ] + */ + +(function() { +"use strict"; + +load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); + +// 2 shards single node, 1 mongos, 1 config server 3-node. +const st = new ShardingTest({}); +const dbName = "testDb"; +const collName = "testColl"; +const nss = dbName + "." + collName; +const splitPoint = 0; +const docsToInsert = [ + {_id: 0, x: -2, y: 1}, + {_id: 1, x: -1, y: 1}, + {_id: 2, x: 1, y: 1}, + {_id: 3, x: 2, y: 1}, +]; +const dbConn = st.s.getDB(dbName); +const coll = dbConn.getCollection(collName); + +// Sets up a 2 shard cluster using 'x' as a shard key where Shard 0 owns x < +// splitPoint and Shard 1 splitPoint >= 0. +WriteWithoutShardKeyTestUtil.setupShardedCollection( + st, nss, {x: 1}, [{x: splitPoint}], [{query: {x: splitPoint}, shard: st.shard1.shardName}]); + +assert.commandWorked(coll.insert(docsToInsert)); + +// Create a sparse index on 'a' which has no documents. We use a hint for a sparse index to assert +// that we use the expected index, which in this case should match no documents even though we have +// potentially matching documents. +assert.commandWorked(coll.createIndex({a: 1}, {sparse: true})); + +function runTest(testCase) { + let res = assert.commandWorked(coll.runCommand(testCase.cmdObj)); + if (testCase.op == "update") { + assert.eq(res.nModified, 0); + assert.eq(res.n, 0); + } else if (testCase.op == "findAndModify") { + assert.eq(res.lastErrorObject.n, 0); + } else { + assert.eq(res.n, 0); + } +} + +let testCases = [ + { + logMessage: "Running updateOne with hint.", + op: "update", + cmdObj: { + update: collName, + updates: [{q: {y: 1}, u: {$set: {z: 3}}, hint: {a: 1}}], + }, + }, + { + logMessage: "Running findAndModify with hint.", + op: "findAndModify", + cmdObj: { + findAndModify: collName, + query: {y: 1}, + update: {$set: {z: 4}}, + hint: {a: 1}, + }, + }, + { + logMessage: "Running deleteOne with hint.", + op: "delete", + cmdObj: { + delete: collName, + deletes: [{q: {y: 1}, limit: 1, hint: {a: 1}}], + }, + } +]; + +testCases.forEach(testCase => { + jsTestLog(testCase.logMessage); + runTest(testCase); +}); + +st.stop(); +})(); diff --git a/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_metrics.js b/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_metrics.js new file mode 100644 index 0000000000000..fd2670caddbe8 --- /dev/null +++ b/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_metrics.js @@ -0,0 +1,417 @@ +/** + * Tests that verify metrics counters related to updateOne, deleteOne, and findAndModify commands + * are correctly incremented. + * + * @tags: [ + * requires_sharding, + * requires_fcv_71, + * ] + */ + +(function() { +"use strict"; + +load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); + +// 2 shards single node, 1 mongos, 1 config server 3-node. +const st = new ShardingTest({}); +const dbName = "testDb"; +const collectionName = "testColl"; +const ns = dbName + "." + collectionName; +const testColl = st.getDB(dbName).getCollection(collectionName); +const unshardedCollName = "unshardedColl"; +const unshardedColl = st.getDB(dbName).getCollection(unshardedCollName); + +const splitPoint = 0; + +// Sets up a 2 shard cluster using 'x' as a shard key where Shard 0 owns x < +// splitPoint and Shard 1 x >= splitPoint. +WriteWithoutShardKeyTestUtil.setupShardedCollection( + st, ns, {x: 1}, [{x: splitPoint}], [{query: {x: splitPoint}, shard: st.shard1.shardName}]); + +function runCommandAndVerify(testCase) { + testCase.insertDocs.forEach(function(insertDoc) { + assert.commandWorked(testCase.collName.insert(insertDoc)); + }); + const res = st.getDB(dbName).runCommand(testCase.cmdObj); + assert.commandWorked(res); + + if (testCase.resultDocs) { + testCase.resultDocs.forEach(function(resultDoc) { + assert.eq(resultDoc, testCase.collName.findOne(resultDoc)); + }); + } else { + // Check if all the inserted docs were deleted in the delete commands. + testCase.insertDocs.forEach(function(insertDoc) { + assert.eq(null, testCase.collName.findOne(insertDoc)); + }); + } +} + +function runCommandAndCheckError(testCase) { + const res = st.getDB(dbName).runCommand(testCase.cmdObj); + assert.commandFailedWithCode(res, testCase.errorCode); + + // FindAndModify is not a batch command, thus will not have a writeErrors field. + if (!testCase.cmdObj.findAndModify) { + res.writeErrors.forEach(writeError => { + assert(testCase.errorCode.includes(writeError.code)); + assert(testCase.index.includes(writeError.index)); + }); + } +} + +let mongosServerStatus = st.s.getDB(dbName).adminCommand({serverStatus: 1}); + +// Verify all all counter metrics are 0 before executing write commands. +assert.eq(0, mongosServerStatus.metrics.query.updateOneTargetedShardedCount); +assert.eq(0, mongosServerStatus.metrics.query.deleteOneTargetedShardedCount); +assert.eq(0, mongosServerStatus.metrics.query.findAndModifyTargetedShardedCount); +assert.eq(0, mongosServerStatus.metrics.query.updateOneUnshardedCount); +assert.eq(0, mongosServerStatus.metrics.query.deleteOneUnshardedCount); +assert.eq(0, mongosServerStatus.metrics.query.findAndModifyUnshardedCount); +assert.eq(0, mongosServerStatus.metrics.query.updateOneNonTargetedShardedCount); +assert.eq(0, mongosServerStatus.metrics.query.deleteOneNonTargetedShardedCount); +assert.eq(0, mongosServerStatus.metrics.query.findAndModifyNonTargetedShardedCount); + +const testCases = [ + { + // This will increase updateOneNonTargetedShardedCount by 1. + logMessage: + "Running non-targeted updateOne command on sharded collection without shard key.", + collName: testColl, + insertDocs: [{_id: 0, x: 0, a: 0}], + resultDocs: [{_id: 0, x: 0, a: 5}], + cmdObj: { + update: collectionName, + updates: [{q: {a: 0}, u: {$inc: {a: 5}}}], + } + }, + { + // This will increase updateOneTargetedShardedCount by 1. + logMessage: + "Running targeted updateOne command on sharded collection without shard key but _id is specified.", + collName: testColl, + insertDocs: [{_id: 1, x: 1}], + resultDocs: [{_id: 1, x: 1, b: 1}], + cmdObj: { + update: collectionName, + updates: [{q: {_id: 1}, u: {$set: {b: 1}}}], + } + }, + { + // This will increase updateOneTargetedShardedCount by 1. + logMessage: "Running targeted updateOne command on sharded collection with shard key.", + collName: testColl, + insertDocs: [{_id: 2, x: -1}], + resultDocs: [{_id: 2, x: -1, c: 2}], + cmdObj: { + update: collectionName, + updates: [{q: {x: -1}, u: {$set: {c: 2}}}], + } + }, + { + // This will increase updateOneNonTargetedShardedCount by 2 since there are two updates. + logMessage: + "Running non-targeted updateOne command with multiple updates on sharded collection without shard key.", + collName: testColl, + insertDocs: [{_id: 3, x: 2, d: -5}, {_id: 4, x: -2, d: 5}], + resultDocs: [{_id: 3, x: 2, d: -5, a: 1}, {_id: 4, x: -2, d: 5, a: 2}], + cmdObj: { + update: collectionName, + updates: [{q: {d: -5}, u: {$set: {a: 1}}}, {q: {d: 5}, u: {$set: {a: 2}}}] + } + }, + { + // This will increase deleteOneNonTargetedShardedCount by 1. + logMessage: + "Running non-targeted deleteOne command on sharded collection without shard key.", + collName: testColl, + insertDocs: [{_id: 5, x: 3, y: 0}], + cmdObj: { + delete: collectionName, + deletes: [{q: {y: 0}, limit: 1}], + } + }, + { + // This will increase deleteOneTargetedShardedCount by 1. + logMessage: + "Running targeted deleteOne command on sharded collection without shard key but _id is specified.", + collName: testColl, + insertDocs: [{_id: 6, x: -3}], + cmdObj: { + delete: collectionName, + deletes: [{q: {_id: 6}, limit: 1}], + } + }, + { + // This will increase deleteOneTargetedShardedCount by 1. + logMessage: "Running targeted deleteOne command on sharded collection with shard key.", + collName: testColl, + insertDocs: [{_id: 7, x: 4}], + cmdObj: { + delete: collectionName, + deletes: [{q: {x: 4}, limit: 1}], + } + }, + { + // This will increase deleteOneNonTargetedShardedCount by 2 since there are two deletes. + logMessage: + "Running non-targeted deleteOne commmand with multiple deletes on sharded collection without shard key.", + collName: testColl, + insertDocs: [{_id: 8, x: -4, y: 8}, {_id: 9, x: 5, y: 9}], + cmdObj: {delete: collectionName, deletes: [{q: {y: 8}, limit: 1}, {q: {y: 9}, limit: 1}]} + }, + { + // This will increase findAndModifyNonTargetedShardedCount by 1. + logMessage: + "Running non-targeted findAndModify command on sharded collection without shard key.", + collName: testColl, + insertDocs: [{_id: 10, x: -5, e: 5}], + resultDocs: [{_id: 10, x: -5, e: 10, f: 15}], + cmdObj: { + findAndModify: collectionName, + query: {e: 5}, + update: {_id: 10, x: -5, e: 10, f: 15}, + } + }, + { + // This will increase findAndModifyTargetedShardedCount by 1. + logMessage: "Running targeted findAndModify command on sharded collection with shard key.", + collName: testColl, + insertDocs: [{_id: 11, x: 6, f: 0}], + resultDocs: [{_id: 11, x: 6, f: 5}], + cmdObj: {findAndModify: collectionName, query: {x: 6}, update: {_id: 11, x: 6, f: 5}} + }, + { + // This will increase the updateOneUnshardedCount by 1. + logMessage: "Running targeted updateOne command on unsharded collection.", + collName: unshardedColl, + insertDocs: [{_id: 12, x: -6}], + resultDocs: [{_id: 12, x: -6, g: 20}], + cmdObj: { + update: unshardedCollName, + updates: [{q: {_id: 12}, u: {$set: {g: 20}}}], + } + }, + { + // This will increase the deleteOneUnshardedCount by 1. + logMessage: "Running targeted deleteOne commmand on unsharded collection.", + collName: unshardedColl, + insertDocs: [{_id: 13, x: 7}], + cmdObj: { + delete: unshardedCollName, + deletes: [{q: {_id: 13}, limit: 1}], + } + }, + { + // This will increase findAndModifyUnshardedCount by 1. + logMessage: "Running findAndModify command on unsharded collection.", + collName: unshardedColl, + insertDocs: [{_id: 14, x: -7, h: 0}], + resultDocs: [{_id: 14, x: -7, h: 25}], + cmdObj: + {findAndModify: unshardedCollName, query: {_id: 14}, update: {_id: 14, x: -7, h: 25}} + }, + { + // This will increase updateOneNonTargetedShardedCount by 1. + logMessage: + "Running a single update where no document matches on the query and {upsert: true}", + collName: testColl, + insertDocs: [], + resultDocs: [{_id: 50, x: -50}], + cmdObj: + {update: collectionName, updates: [{q: {k: 50}, u: {_id: 50, x: -50}, upsert: true}]} + }, + { + // This will increase updateOneTargetedShardedCount by 1. + logMessage: + "Running a single update where no document matches on the query and {upsert: true} with shard key", + collName: testColl, + insertDocs: [], + resultDocs: [{_id: 51, x: 51, k: 51}], + cmdObj: { + update: collectionName, + updates: [{q: {x: 51}, u: {_id: 51, x: 51, k: 51}, upsert: true}] + } + } +]; + +testCases.forEach(testCase => { + jsTest.log(testCase.logMessage); + runCommandAndVerify(testCase); +}); + +mongosServerStatus = st.s.getDB(dbName).adminCommand({serverStatus: 1}); + +// Verify all counter metrics were updated correctly after the write commands. +assert.eq(3, mongosServerStatus.metrics.query.updateOneTargetedShardedCount); +assert.eq(2, mongosServerStatus.metrics.query.deleteOneTargetedShardedCount); +assert.eq(1, mongosServerStatus.metrics.query.findAndModifyTargetedShardedCount); +assert.eq(1, mongosServerStatus.metrics.query.updateOneUnshardedCount); +assert.eq(1, mongosServerStatus.metrics.query.deleteOneUnshardedCount); +assert.eq(1, mongosServerStatus.metrics.query.findAndModifyUnshardedCount); +assert.eq(4, mongosServerStatus.metrics.query.updateOneNonTargetedShardedCount); +assert.eq(3, mongosServerStatus.metrics.query.deleteOneNonTargetedShardedCount); +assert.eq(1, mongosServerStatus.metrics.query.findAndModifyNonTargetedShardedCount); + +// Testing the counters with WCOS commands. + +const WCOStestCases = [ + { + // This call will increase deleteOneTargetedShardedCount by 1 and + // updateOneNonTargetedShardedCount by 2. + logMessage: "Running non-targeted WouldChangeOwningShard updateOne command.", + collName: testColl, + insertDocs: [{_id: 15, x: 8, y: 1}], + resultDocs: [{_id: 15, x: -8, y: 1}], + cmdObj: { + update: collectionName, + updates: [{q: {y: 1}, u: {x: -8, y: 1}}], + lsid: {id: UUID()}, + txnNumber: NumberLong(1) + } + }, + { + // This call will increase deleteOneTargetedShardedCount by 1 and + // findAndModifyNonTargetedShardedCount by 2. + logMessage: "Running non-targeted WouldChangeOwningShard findAndModify command.", + collName: testColl, + insertDocs: [{_id: 16, x: 9, z: 1}], + resultDocs: [{_id: 16, x: -9, z: 1}], + cmdObj: { + findAndModify: collectionName, + query: {_id: 16}, + update: {_id: 16, x: -9, z: 1}, + lsid: {id: UUID()}, + txnNumber: NumberLong(1) + } + } +]; + +WCOStestCases.forEach(testCase => { + jsTest.log(testCase.logMessage); + runCommandAndVerify(testCase); +}); + +mongosServerStatus = st.s.getDB(dbName).adminCommand({serverStatus: 1}); + +// Verify all counter metrics were updated correctly after the wcos write commands. +assert.eq(3, mongosServerStatus.metrics.query.updateOneTargetedShardedCount); +assert.eq(4, mongosServerStatus.metrics.query.deleteOneTargetedShardedCount); +assert.eq(1, mongosServerStatus.metrics.query.findAndModifyTargetedShardedCount); +assert.eq(1, mongosServerStatus.metrics.query.updateOneUnshardedCount); +assert.eq(1, mongosServerStatus.metrics.query.deleteOneUnshardedCount); +assert.eq(1, mongosServerStatus.metrics.query.findAndModifyUnshardedCount); +assert.eq(6, mongosServerStatus.metrics.query.updateOneNonTargetedShardedCount); +assert.eq(3, mongosServerStatus.metrics.query.deleteOneNonTargetedShardedCount); +assert.eq(3, mongosServerStatus.metrics.query.findAndModifyNonTargetedShardedCount); + +// Insert Docs for error testing. +const insertDocs = [{_id: 17, x: 10, y: 5}, {_id: 18, x: -10, y: 5}, {_id: 19, x: 11, y: 5}]; +assert.commandWorked(testColl.insert(insertDocs)); + +const errorTestCases = [ + { + // This call will increase findAndModifyNonTargetedShardedCount by 1 even though the + // command should fail. + logMessage: "Unknown modifier in findAndModify, FailedToParse expected.", + errorCode: [ErrorCodes.FailedToParse], + cmdObj: { + findAndModify: collectionName, + query: {y: 5}, + update: {$match: {y: 3}}, + } + }, + { + // This call will updateOneNonTargetedShardedCount by 1. + logMessage: "Unknown modifier in batch update, FailedToParse expected.", + errorCode: [ErrorCodes.FailedToParse], + index: [0], + cmdObj: { + update: collectionName, + updates: [{q: {y: 5}, u: {$match: {z: 0}}}], + } + }, + { + // This call will not increase any counters because query had invalid operator. + logMessage: "Incorrect query in delete, BadValue expected.", + errorCode: [ErrorCodes.BadValue], + index: [0], + cmdObj: { + delete: collectionName, + deletes: [{q: {y: {$match: 5}}, limit: 1}], + } + }, + { + // This call will updateOneNonTargetedShardedCount by 2. + logMessage: "Two updates in a batch, one successful, one FailedToParse expected.", + errorCode: [ErrorCodes.FailedToParse], + index: [1], + cmdObj: { + update: collectionName, + updates: [{q: {y: 5}, u: {$set: {a: 0}}}, {q: {y: 5}, u: {$match: {z: 0}}}], + } + }, + { + // This call will increase updateOneNonTargetedShardedCount by 2. + logMessage: "Three updates in a batch, one BadValue and two FailedToParse expected.", + errorCode: [ErrorCodes.BadValue, ErrorCodes.FailedToParse], + index: [0, 1, 2], + cmdObj: { + update: collectionName, + updates: [ + {q: {y: {$match: 5}}, u: {$set: {z: 0}}}, + {q: {y: 5}, u: {$match: {z: 0}}}, + {q: {y: 5}, u: {$match: {z: 0}}} + ], + ordered: false + } + }, + { + // This call will increase deleteOneNonTargetedShardedCount by 1. + logMessage: "Two deletes in a batch, one successful, one BadValue expected.", + errorCode: [ErrorCodes.BadValue], + index: [1], + cmdObj: { + delete: collectionName, + deletes: [{q: {y: 5}, limit: 1}, {q: {y: {$match: 5}}, limit: 1}], + } + }, + { + // This call will increase deleteOneNonTargetedShardedCount by 1. + logMessage: "Three deletes in a batch, one successful, two BadValues expected.", + errorCode: [ErrorCodes.BadValue], + index: [0, 2], + cmdObj: { + delete: collectionName, + deletes: [ + {q: {y: {$match: 5}}, limit: 1}, + {q: {y: 5}, limit: 1}, + {q: {y: {$match: 5}}, limit: 1} + ], + ordered: false + } + } +]; + +errorTestCases.forEach(testCase => { + jsTest.log(testCase.logMessage); + runCommandAndCheckError(testCase); +}); + +mongosServerStatus = st.s.getDB(dbName).adminCommand({serverStatus: 1}); + +// Verify all counter metrics were not updated after the error write commands. +assert.eq(3, mongosServerStatus.metrics.query.updateOneTargetedShardedCount); +assert.eq(4, mongosServerStatus.metrics.query.deleteOneTargetedShardedCount); +assert.eq(1, mongosServerStatus.metrics.query.findAndModifyTargetedShardedCount); +assert.eq(1, mongosServerStatus.metrics.query.updateOneUnshardedCount); +assert.eq(1, mongosServerStatus.metrics.query.deleteOneUnshardedCount); +assert.eq(1, mongosServerStatus.metrics.query.findAndModifyUnshardedCount); +assert.eq(11, mongosServerStatus.metrics.query.updateOneNonTargetedShardedCount); +assert.eq(5, mongosServerStatus.metrics.query.deleteOneNonTargetedShardedCount); +assert.eq(4, mongosServerStatus.metrics.query.findAndModifyNonTargetedShardedCount); + +st.stop(); +})(); diff --git a/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_stable_api_test.js b/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_stable_api_test.js index ba5adfc38e940..83bd9712fd6a8 100644 --- a/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_stable_api_test.js +++ b/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_stable_api_test.js @@ -3,10 +3,9 @@ * * @tags: [ * requires_sharding, - * requires_fcv_70, + * requires_fcv_71, * uses_transactions, * uses_multi_shard_transaction, - * featureFlagUpdateOneWithoutShardKey, * ] */ diff --git a/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_update_shard_key_errors.js b/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_update_shard_key_errors.js index 9b9fca7f006cc..fe40466eeac65 100644 --- a/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_update_shard_key_errors.js +++ b/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_update_shard_key_errors.js @@ -7,10 +7,9 @@ * * @tags: [ * requires_sharding, - * requires_fcv_70, + * requires_fcv_71, * uses_transactions, * uses_multi_shard_transaction, - * featureFlagUpdateOneWithoutShardKey, * ] */ diff --git a/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_validate_query_plan.js b/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_validate_query_plan.js new file mode 100644 index 0000000000000..4a2ec484403be --- /dev/null +++ b/jstests/sharding/updateOne_without_shard_key/write_without_shard_key_validate_query_plan.js @@ -0,0 +1,245 @@ +/** + * Test writes without shard key uses the appropriate query plan. + * + * @tags: [ + * requires_sharding, + * requires_fcv_71, + * uses_transactions, + * uses_multi_shard_transaction, + * ] + */ + +(function() { +"use strict"; + +load("jstests/sharding/updateOne_without_shard_key/libs/write_without_shard_key_test_util.js"); + +// Make sure we're testing with no implicit session. +TestData.disableImplicitSessions = true; + +// 2 shards single node, 1 mongos, 1 config server 3-node. +const st = new ShardingTest({}); +const dbName = "testDb"; +const collName = "testColl"; +const nss = dbName + "." + collName; +const splitPoint = 0; +const docsToInsert = [ + {_id: 0, x: -2, y: 1, z: [1, 2, 3]}, + {_id: 1, x: -1, y: 1, z: [1, 2, 3]}, + {_id: 2, x: 1, y: 1, z: [1, 2, 3]}, + {_id: 3, x: 2, y: 1, z: [1, 2, 3]}, + {_id: 4, x: 3, y: 1, z: [1, 2, 3]}, + {_id: 5, x: 4, y: 1, z: [1, 2, 3]} +]; +const dbConn = st.s.getDB(dbName); +const profileCollectionShard0 = st.shard0.getDB(dbName).system.profile; +const profileCollectionShard1 = st.shard1.getDB(dbName).system.profile; + +function runTest(testCase, usingClusteredIndex) { + // Turn on profiling for both shards. + assert.commandWorked(st.shard0.getDB(dbName).setProfilingLevel(2)); + assert.commandWorked(st.shard1.getDB(dbName).setProfilingLevel(2)); + + assert.commandWorked(dbConn.runCommand(testCase.cmdObj)); + + const profileOnShard0 = profileCollectionShard0.findOne(testCase.profileDocToFind); + const profileOnShard1 = profileCollectionShard1.findOne(testCase.profileDocToFind); + + // Only one shard should have recorded that it performed the write. + assert.neq(profileOnShard0, profileOnShard1); + const profileDoc = profileOnShard0 ? profileOnShard0 : profileOnShard1; + + if (usingClusteredIndex) { + if (testCase.hasPositionalProjection) { + assert.eq(profileDoc.execStats.stage, "PROJECTION_DEFAULT", profileDoc); + assert.eq( + profileDoc.execStats.inputStage.inputStage.stage, "CLUSTERED_IXSCAN", profileDoc); + } else { + assert.eq(profileDoc.execStats.inputStage.stage, "CLUSTERED_IXSCAN", profileDoc); + } + } else { + // The two phase write protocol will include the original query and collation for updates + // with a positional operator, which means it cannot only use the _id index to fulfill the + // query. + if (testCase.hasPositionalUpdate) { + assert.eq(profileDoc.execStats.inputStage.stage, "FETCH", profileDoc); + assert.eq(profileDoc.execStats.inputStage.inputStage.stage, "IXSCAN", profileDoc); + } else if (testCase.hasPositionalProjection) { + assert.eq(profileDoc.execStats.stage, "PROJECTION_DEFAULT", profileDoc); + assert.eq(profileDoc.execStats.inputStage.inputStage.stage, "FETCH", profileDoc); + assert.eq( + profileDoc.execStats.inputStage.inputStage.inputStage.stage, "IXSCAN", profileDoc); + } else { + assert.eq(profileDoc.execStats.inputStage.stage, "IDHACK", profileDoc); + } + } + + // Turn off profiling on both shards so we can clear the systems.profile collection for the next + // test. + assert.commandWorked(st.shard0.getDB(dbName).setProfilingLevel(0)); + assert.commandWorked(st.shard1.getDB(dbName).setProfilingLevel(0)); + profileCollectionShard0.drop(); + profileCollectionShard1.drop(); +} + +// Sets up a 2 shard cluster using 'x' as a shard key where Shard 0 owns x < +// splitPoint and Shard 1 splitPoint >= 0. +WriteWithoutShardKeyTestUtil.setupShardedCollection( + st, nss, {x: 1}, [{x: splitPoint}], [{query: {x: splitPoint}, shard: st.shard1.shardName}]); + +assert.commandWorked(dbConn.getCollection(collName).insert(docsToInsert)); + +// There should only be one collection created in this test. +const listCollRes = assert.commandWorked(dbConn.runCommand({listCollections: 1})); +const usingClusteredIndex = listCollRes.cursor.firstBatch[0].options.clusteredIndex != null; + +let testCases = [ + { + logMessage: "Running updateOne without positional update.", + cmdObj: { + update: collName, + updates: [{q: {y: 1}, u: {$set: {a: 3}}}], + }, + profileDocToFind: {"op": "update", "ns": nss} + }, + { + logMessage: "Running updateOne without positional update and non-default collation.", + cmdObj: { + update: collName, + updates: [ + {q: {y: 1}, u: {$set: {a: 3}}, collation: {locale: "en", strength: 2}}, + ], + }, + profileDocToFind: {"op": "update", "ns": nss} + }, + { + logMessage: "Running updateOne with positional update.", + cmdObj: { + update: collName, + updates: [{q: {y: 1, z: 1}, u: {$set: {"z.$": 3}}}], + }, + hasPositionalUpdate: true, + profileDocToFind: {"op": "update", "ns": nss} + }, + { + logMessage: "Running updateOne with positional update and non-default collation.", + cmdObj: { + update: collName, + updates: + [{q: {y: 1, z: 1}, u: {$set: {"z.$": 3}}, collation: {locale: "en", strength: 2}}], + }, + hasPositionalUpdate: true, + profileDocToFind: {"op": "update", "ns": nss} + }, + { + logMessage: "Running findAndModify update without positional update.", + cmdObj: { + findAndModify: collName, + query: {y: 1}, + update: {$set: {a: 4}}, + }, + profileDocToFind: {"op": "command", "ns": nss, "command.findAndModify": collName} + }, + { + logMessage: + "Running findAndModify update without positional update and non-default collation.", + cmdObj: { + findAndModify: collName, + query: {y: 1}, + update: {$set: {a: 4}}, + collation: {locale: "en", strength: 2} + }, + profileDocToFind: {"op": "command", "ns": nss, "command.findAndModify": collName} + }, + { + logMessage: "Running findAndModify update with positional update.", + cmdObj: { + findAndModify: collName, + query: {y: 1, z: 1}, + update: {$set: {"z.$": 3}}, + }, + hasPositionalUpdate: true, + profileDocToFind: {"op": "command", "ns": nss, "command.findAndModify": collName} + }, + { + logMessage: + "Running findAndModify update with positional update and non-default collation.", + cmdObj: { + findAndModify: collName, + query: {y: 1, z: 1}, + update: {$set: {"z.$": 3}}, + collation: {locale: "en", strength: 2} + }, + hasPositionalUpdate: true, + profileDocToFind: {"op": "command", "ns": nss, "command.findAndModify": collName} + }, + { + logMessage: "Running findAndModify with positional projection.", + cmdObj: { + findAndModify: collName, + query: {y: 1, z: 1}, + fields: {'z.$': 1}, + remove: true, + }, + hasPositionalProjection: true, + profileDocToFind: {"op": "command", "ns": nss, "command.findAndModify": collName} + }, + { + logMessage: "Running findAndModify with positional projection and non-default collation.", + cmdObj: { + findAndModify: collName, + query: {y: 1, z: 1}, + fields: {'z.$': 1}, + update: {$set: {a: 3}}, + collation: {locale: "en", strength: 2} + + }, + hasPositionalProjection: true, + profileDocToFind: {"op": "command", "ns": nss, "command.findAndModify": collName} + }, + { + logMessage: "Running findAndModify remove.", + cmdObj: { + findAndModify: collName, + query: {y: 1}, + remove: true, + }, + profileDocToFind: {"op": "command", "ns": nss, "command.findAndModify": collName} + }, + { + logMessage: "Running findAndModify remove and non-default collation.", + cmdObj: { + findAndModify: collName, + query: {y: 1}, + collation: {locale: "en", strength: 2}, + remove: true, + }, + profileDocToFind: {"op": "command", "ns": nss, "command.findAndModify": collName} + }, + { + logMessage: "Running deleteOne.", + docsToInsert: docsToInsert, + cmdObj: { + delete: collName, + deletes: [{q: {y: 1}, limit: 1}], + }, + profileDocToFind: {"op": "remove", "ns": nss} + }, + { + logMessage: "Running deleteOne and non-default collation.", + docsToInsert: docsToInsert, + cmdObj: { + delete: collName, + deletes: [{q: {y: 1}, limit: 1, collation: {locale: "en", strength: 2}}], + }, + profileDocToFind: {"op": "remove", "ns": nss} + } +]; + +testCases.forEach(testCase => { + jsTestLog(testCase.logMessage); + runTest(testCase, usingClusteredIndex); +}); + +st.stop(); +})(); diff --git a/jstests/sharding/update_zone_key_range.js b/jstests/sharding/update_zone_key_range.js index eae462f7c6f0e..cd4909638be48 100644 --- a/jstests/sharding/update_zone_key_range.js +++ b/jstests/sharding/update_zone_key_range.js @@ -151,4 +151,4 @@ compoundKeyTestCases.forEach(function(test) { }); st.stop(); -})(); \ No newline at end of file +})(); diff --git a/jstests/sharding/use_rsm_data_for_cs.js b/jstests/sharding/use_rsm_data_for_cs.js index 8bff84db6eeca..36e5fc75468d6 100644 --- a/jstests/sharding/use_rsm_data_for_cs.js +++ b/jstests/sharding/use_rsm_data_for_cs.js @@ -4,7 +4,7 @@ // init with one shard with one node rs var st = new ShardingTest({shards: 1, rs: {nodes: 1}, mongos: 1}); var mongos = st.s; -var rs = st.rs0; +const rs = st.rs0; assert.commandWorked(st.s0.adminCommand({enablesharding: "test"})); @@ -21,7 +21,7 @@ rs.nodes.forEach(function(node) { }); // add a node to shard rs -if (TestData.catalogShard) { +if (TestData.configShard) { rs.add({'configsvr': ''}); } else { rs.add({'shardsvr': ''}); diff --git a/jstests/sharding/warm_up_connection_pool.js b/jstests/sharding/warm_up_connection_pool.js index 4dceb38ef1d5d..d0f86ff133e83 100644 --- a/jstests/sharding/warm_up_connection_pool.js +++ b/jstests/sharding/warm_up_connection_pool.js @@ -74,8 +74,8 @@ var warmUpDisabledConnPoolStatsCheck = function(connPoolStats, currentShard) { return undefined === connPoolStats["hosts"][currentShard]; }; -if (!TestData.catalogShard) { - // In catalog shard mode we have RSM entries for the catalog shard without warming up its conn +if (!TestData.configShard) { + // In config shard mode we have RSM entries for the config shard without warming up its conn // pool. runTest(warmUpDisabledParams, warmUpDisabledConnPoolStatsCheck); } @@ -121,8 +121,8 @@ var shutdownNodeExtraOptions = function(test) { return {connString: nodeList[pId], nodeId: pId}; }; -if (!TestData.catalogShard) { - // In catalog shard mode this shuts down the config server, which prevents mongos from starting +if (!TestData.configShard) { + // In config shard mode this shuts down the config server, which prevents mongos from starting // up. runTest(shutdownNodeParams, shutdownNodeConnPoolStatsCheck, shutdownNodeExtraOptions); } diff --git a/jstests/sharding/write_transactions_during_migration.js b/jstests/sharding/write_transactions_during_migration.js index fa7249711bfea..17a4f1637af6f 100644 --- a/jstests/sharding/write_transactions_during_migration.js +++ b/jstests/sharding/write_transactions_during_migration.js @@ -3,7 +3,7 @@ * new writes are being sent to the source shard. */ -load('./jstests/libs/chunk_manipulation_util.js'); +load('jstests/libs/chunk_manipulation_util.js'); /** * Test outline: diff --git a/jstests/sharding/zone_changes_hashed.js b/jstests/sharding/zone_changes_hashed.js index 5b62bb4524e46..c86f0e7338536 100644 --- a/jstests/sharding/zone_changes_hashed.js +++ b/jstests/sharding/zone_changes_hashed.js @@ -1,10 +1,6 @@ /** * Test that chunks and documents are moved correctly after zone changes. */ -(function() { -'use strict'; - -load("jstests/libs/feature_flag_util.js"); load("jstests/sharding/libs/zone_changes_util.js"); load("jstests/sharding/libs/find_chunks_util.js"); @@ -246,5 +242,4 @@ shardChunkBounds = { assertChunksOnShards(configDB, ns, shardChunkBounds); assertDocsOnShards(st, ns, shardChunkBounds, docs, shardKey); -st.stop(); -})(); +st.stop(); \ No newline at end of file diff --git a/jstests/slow1/repeated_crash_recovery_with_history_store.js b/jstests/slow1/repeated_crash_recovery_with_history_store.js index 2f63e9cbef0f8..f2e65244b94d9 100644 --- a/jstests/slow1/repeated_crash_recovery_with_history_store.js +++ b/jstests/slow1/repeated_crash_recovery_with_history_store.js @@ -100,4 +100,4 @@ for (let i = 0; i < 10; i++) { } rst.stopSet(); -})(); \ No newline at end of file +})(); diff --git a/jstests/slow1/replsets_priority1.js b/jstests/slow1/replsets_priority1.js index ed72b92e96072..92c820fbd62d0 100644 --- a/jstests/slow1/replsets_priority1.js +++ b/jstests/slow1/replsets_priority1.js @@ -7,7 +7,7 @@ load("jstests/replsets/rslib.js"); -var rs = new ReplSetTest({name: 'testSet', nodes: 3, nodeOptions: {verbose: 2}}); +const rs = new ReplSetTest({name: 'testSet', nodes: 3, nodeOptions: {verbose: 2}}); var nodes = rs.startSet(); rs.initiate(); diff --git a/jstests/ssl/crl_x509_rotate.js b/jstests/ssl/crl_x509_rotate.js index e4559307a9f28..28a82ad058366 100644 --- a/jstests/ssl/crl_x509_rotate.js +++ b/jstests/ssl/crl_x509_rotate.js @@ -68,4 +68,4 @@ out = runMongoProgram("mongo", assert.eq(out, 0, "Mongo invocation failed"); MongoRunner.stopMongod(mongod); -}()); \ No newline at end of file +}()); diff --git a/jstests/ssl/disable_x509.js b/jstests/ssl/disable_x509.js index a081037003549..869b44ceaa768 100644 --- a/jstests/ssl/disable_x509.js +++ b/jstests/ssl/disable_x509.js @@ -16,7 +16,7 @@ if (cmdOut.ok) { MongoRunner.stopMongod(conn); conn = MongoRunner.runMongod( {restart: conn, setParameter: "authenticationMechanisms=MONGODB-X509"}); - external = conn.getDB("$external"); + let external = conn.getDB("$external"); // Add user using localhost exception external.createUser({ diff --git a/jstests/ssl/libs/cluster-member-bar.pem b/jstests/ssl/libs/cluster-member-bar.pem index 27b9f533afd4d..e2ee2e84be5bf 100644 --- a/jstests/ssl/libs/cluster-member-bar.pem +++ b/jstests/ssl/libs/cluster-member-bar.pem @@ -3,56 +3,56 @@ # # A server certificate with the mongoClusterMembership extension with a value of bar -----BEGIN CERTIFICATE----- -MIIEejCCA2KgAwIBAgIEWTZe1DANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIEejCCA2KgAwIBAgIEOLC7HDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjMwMzE1MTU0MTU2WhcNMjUwNjE2MTU0MTU2WjBsMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQxWhcNMjUwOTEwMTQyODQxWjBsMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UEAwwG -c2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAu9lxe3kPI/Hk -ZTloS7DbcXxJOfvz6+SXkEmsQeWh8asKYl1vMj9trkwZonpUvdGy3u32aQ2OttBw -ajE6TWpNxBpLPlksrpYcvOZBHROvVek5jkQIjCFY2a/xoD6bNSUKfjXiBVl3ahDy -b7cg6oGC6X3xe+Sa9Zj7HhiOY0LaoRZr0PSuIkxBxboMpghEv/Mq0YFoxhyuS/XI -9HGcIiipp9sVZNhiP4yZPfqruSB4ACYNVjDJTbNAgYhlCT8W1lHnO2pc2BRTbIj5 -NTbjcGeIjLzRf5ARzPF1XCknnECmszJFLHCONRG/k8Z8i87vIBqf83jo0y5W0GK7 -t5hTfDci3wIDAQABo4IBGjCCARYwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYD -VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBQKCntdQt8iZZ8C -mEjgcbjEJZhO/DCBiwYDVR0jBIGDMIGAoXikdjB0MQswCQYDVQQGEwJVUzERMA8G +c2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA47hiLP4/iAzu +wNEXYrx3+FIqSWGQGFzT9FF9qQtJHEXQfxldul+E9df/Buz0YHaF/OK1bNUILpV4 +GpzRW63YEmppD6jyeoZJ1RO4FhAKseDATb0B1jj0+FlYcxGwnAFJgWc3/C5vqn6B +GUICQuAEYON6zdifdeRzt6Bu+8H3Z+E6pS37YkpOD3PpXZUH9S4hQmx8UZn7wR3/ +2EPO4i0zz0OhiaFKubqlRTsPDZtr2SesLQiaIhUfS4GZEGAnM6oHoDV0ke4+bBYq +JxUx06DgevtMmGo3EB0bqsa6mZngYYWUdjG5lBFxayxPG45OIWCSXRIk2QoLbvVU +pFjrioCdMwIDAQABo4IBGjCCARYwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYD +VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBTEScJIuSkeRwLI +ttE2VM4iiIJxdjCBiwYDVR0jBIGDMIGAoXikdjB0MQswCQYDVQQGEwJVUzERMA8G A1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoM B01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3Qg -Q0GCBHvUrJMwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMBQGCysGAQQBgo4p -AgECBAUMA2JhcjANBgkqhkiG9w0BAQsFAAOCAQEAjY+PUCpyNisWgM82A+eN+ipq -xGUJE97j7ikoGTzFYeGJ4ANYXxL9MlDakZjv+fNXy+ngSDqBGvZzN/mIIa72Phkz -Q/L+jLSH2HUZL8/ptTnf6M2mdYwuABSBE7+KG6emb1ywUudHFztzxZZDlSE+JVCO -F39amF2TMnzNqb1hBOz07RdZKBqEpo3PrL8MFlZxuN9i6YHp5b5Og+Li/ktWMaBv -6kZ+drMK3E+ku5QRPTARXuGXf7vFT+eC5Rk/jTi3prwveg7n4WKmecS6BuzVlLjt -kUIe0RqTS3HqkFqtb/mb4Dc1Bbi5MD86CZ1JNkWT1m8LozsAnKhfnrHbUViPdg== +Q0GCBEreWhowGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMBQGCysGAQQBgo4p +AgECBAUMA2JhcjANBgkqhkiG9w0BAQsFAAOCAQEAibX4uOuIRewh6spaL9/2/i7p +kryJLAZMhKNzfD537M8FMUKzfblsrgrPUUixJd6dBDCRGM9EqOBABJ6lg5PoZ1TC +WB7Hhvb2N5tEO8mcd7lWk02Kkt1PNxhkundK0gnFb2XPxgZ0/ChnBXhJvE9zkyBk +1bTqs2D4LEU5icabiDJXy9xJqvUU1FRanq8RbmRKdvZ4XvaFfeYSfk2N2ZyGKSCG +A/d3yFij1EQBN2Rp0IwdxO8dw0JYyOBK7OIqyKKy2mfjBsYNq3jV1tIw11QOY9EH +cAC3M/vSov2josWyZVeefLotKtmFjp1NNbfNSjCybWLK1Tp7VKqq8Fhw/WGvSw== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQC72XF7eQ8j8eRl -OWhLsNtxfEk5+/Pr5JeQSaxB5aHxqwpiXW8yP22uTBmielS90bLe7fZpDY620HBq -MTpNak3EGks+WSyulhy85kEdE69V6TmORAiMIVjZr/GgPps1JQp+NeIFWXdqEPJv -tyDqgYLpffF75Jr1mPseGI5jQtqhFmvQ9K4iTEHFugymCES/8yrRgWjGHK5L9cj0 -cZwiKKmn2xVk2GI/jJk9+qu5IHgAJg1WMMlNs0CBiGUJPxbWUec7alzYFFNsiPk1 -NuNwZ4iMvNF/kBHM8XVcKSecQKazMkUscI41Eb+TxnyLzu8gGp/zeOjTLlbQYru3 -mFN8NyLfAgMBAAECggEAJDf8pW3l+Ww+OTYkYdOru+nWxJNLqIPepTdPOzVnUA1G -Z0jUk7+fCigqGSW1CRRRhKIlDIRMq/rscc0kDKEedV0MfOz8rHzM9a7/hvewqsPZ -EREVBM+5Ld+6msb3bfvCVitVdOqXF6BE3j1U32IxN4vM77JYHlpssJTTf1f4h25S -qgZb+b8D+J54nuxiB6Q54WYSnzCMMCGmtIVceS/Itc6CVxLUl86WJUpoZwiQTUxX -sXJoYDOahJLPnuwOT+tNzlXHiFLQ4kB1M7mYFjNhurj9X9YSOR4fxWLqy4IJGQTH -oehCLuGBPVI578gPXim1QrAf3hG8to8ViFVgeNWygQKBgQDVCljM8t9dVZEm+F1r -e4lDb6rauycK9zAwQLKzgRAAxAT7OMs0WyuCMWHD3ZZDjtpSmb5m1kP22OXy0BRB -G08xFUtNAzsQkH5RBQHScec8RJ6bFgLQ99hd++Gc7jgp0XxuxoEvMKm1bEtbfoUu -6AoCuRf/kTqL3PY1HP5Yt7L9UQKBgQDhuqzwLNpCXnGk+0gf7qEkIMPqZqFzqrdb -eWWVmC1JO4kc5TNGJnEtVlS32ow+iN1qjZBptQY0/Ykohj3F2PZHF93zQ2sbDFAy -7BQWbYjO9DAzemtBqMNVtFe+5oABzbyhqzmZtNVG4C2XKkJkw0RUCtyvmE5/0obH -xT/t//RRLwKBgBO8JKO/r+9eeNbKVSUayYlks8gVZDWA1obxx1wXjZr0jZ2UEkbk -VzB1UKArS7swZYsXUOsH2D3qs8p9ehLZ68kZNuOIdBVBvWHV++g5wvjzRloJfPNM -sk9qgOjfrHY7QLKmUttDP8VdpdFw8/d3aU39RXrYQjsomeorqGghhEQxAoGAfqIZ -LswazazqGGIX/kIDCJ+RCUj2PkuBfcHG6XtrvG+35gv3Dd23FHYgJNxoXRSvEn3E -jGjPyJ6Leb6FnR6wWwXasAQcbBomS8sBIevlGiUHfXmp/jXND6GSsDfjjB99OT0z -nTVDiPVu3iUJBjo9dOB7Gc9aCn9yuVPBH6W9zGUCgYB6ew5VTrbwWO0KKYpiM6aN -ZXiYTMMcaJOjlmyBYtWYNdRusshh4i+ICF/eV9CXtW1cQcXGCM5gB0Py3r9ugSWk -xQDVotkSUP3GswftggX17jEyjTQKMVtDDATyIxU3XohAWeNQYuRV98+A41lFqE0v -GXbb4Dhv87TuIEAIsHCV5Q== +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDjuGIs/j+IDO7A +0RdivHf4UipJYZAYXNP0UX2pC0kcRdB/GV26X4T11/8G7PRgdoX84rVs1QgulXga +nNFbrdgSamkPqPJ6hknVE7gWEAqx4MBNvQHWOPT4WVhzEbCcAUmBZzf8Lm+qfoEZ +QgJC4ARg43rN2J915HO3oG77wfdn4TqlLftiSk4Pc+ldlQf1LiFCbHxRmfvBHf/Y +Q87iLTPPQ6GJoUq5uqVFOw8Nm2vZJ6wtCJoiFR9LgZkQYCczqgegNXSR7j5sFion +FTHToOB6+0yYajcQHRuqxrqZmeBhhZR2MbmUEXFrLE8bjk4hYJJdEiTZCgtu9VSk +WOuKgJ0zAgMBAAECggEAIoVeJLH6UZacTxCjee1WRoe2L1+h53+TBhOgeBLk52tR ++yKOSeWXE2nGGHetaD4ve7GmjTQSRHWWOnqRZlOJUOleuegDsj1PTgTqQDX4WRTY +MjC36ZDC2i85MGB7JHsxeHN8Ujd76B27VmfiTd8F6uYxRKlVKxsRy8F/cZzq840w +/TnvVhwJyPtTbdgWQQLtGMTAOHY0vZIQbnCn3k0nuOiv5vYWWO5PG7qeTRXI9dkj +oT9n1yMxqbUnSyCRWYpB5Lav6Ua7rHWDUg1g0W2krTDZum8PgKh2EoEElrCp2AIy +2uXhGHK2khEKI1L45PUNTR3eaVsk50mAd02Lf8jwaQKBgQD85iyVfJGA3otW9AK3 +d8jqyouYwroEYcDRUDy6N5WUNhZ/5esvVWic5BW4dJI1DEq3hPVRpgeI9TVGy5xa +iLyIGDIgpsXKubY4kUox17+QwnM8r3SkmanfV5cM84rS08g2uK0vO0y0av/z75Xv +UeaX/oUkIOWl1MLG9o3h8UFsZQKBgQDmgyzhCf4rmkTVr9oZoNjG9Fbg5gkI0qcb +z6i2VSwliT3OzltYkDyyPN9NLK51tUiFy3qbxuGHrY00MNMKor5o6H9OC2b9rT4D +ep20O4ORC7IDgi2eOEzFkEC759YyNBfxQbY4Zc/RXNfb+Vwphm1Ry0kDunJqBfJY +4uZN+h8NtwKBgQDH39LUwoLs6vYNXwTWo+EzT6lMrnn+BJQz2cplEpExdh4KL1jV +LBdmCA3yno7/Rhu3N2n7gdDquYHF1h5Yn0EB/r1/eM9SDbyqsImHIxDQBB63k6QI +gznyvnMvVTwerCrMtOCLN77DyN1Jmc1SO9SXxBzOFQHrMmBOh4ZTUC140QKBgBsy +FrVmLGqQhNaBLMYhR/IRGb7m5OH5mGWbllgMrGyxSG2Bwx0rxz1wGRhN0Tdp7+h5 +HToLuMf9Hc+ugveaQ8S0yXA3wCrlqY9XhAujid4u9w2a6i7kjYoxfrGspIS/yKNa +EVmOCXq17XGydcEgbv1bcn3SHXWMxg7eljVU8Rz5AoGATaHyeuoT+ERt7eCs9eCx +Ox+/FCnyHEAqFciQpYEBq8434RZu+7IXn0P9c81c0UGd1bP8I1jW+80Xo0Qredpf +PaC29fQf6HxhY/Pqe4m1u8C+KiGUihEgquVAbdteEwIiJ3AA36ui1GhLLTKF/GHa +GLGbnCGE2p1IOzZleizN/6M= -----END PRIVATE KEY----- diff --git a/jstests/ssl/libs/cluster-member-bar.pem.digest.sha1 b/jstests/ssl/libs/cluster-member-bar.pem.digest.sha1 index 53b59e4aa3569..9400316e6319e 100644 --- a/jstests/ssl/libs/cluster-member-bar.pem.digest.sha1 +++ b/jstests/ssl/libs/cluster-member-bar.pem.digest.sha1 @@ -1 +1 @@ -D498E0A2F8CF71D5349BB91E11E6D05350C88A3C \ No newline at end of file +59F7BD4EB68A0D41F03580611411F44331B29C69 \ No newline at end of file diff --git a/jstests/ssl/libs/cluster-member-bar.pem.digest.sha256 b/jstests/ssl/libs/cluster-member-bar.pem.digest.sha256 index a0ce1bd86a813..7574f226934cc 100644 --- a/jstests/ssl/libs/cluster-member-bar.pem.digest.sha256 +++ b/jstests/ssl/libs/cluster-member-bar.pem.digest.sha256 @@ -1 +1 @@ -F957FEBEEC5C9C08C2500C17432B47635C12101E4DD42183FD333542ACD0AE5D \ No newline at end of file +5FEBC2EE21BFF475CB05DE21471339C07D89892A049304BEE80C990D452111CD \ No newline at end of file diff --git a/jstests/ssl/libs/cluster-member-foo-alt-rdn.pem b/jstests/ssl/libs/cluster-member-foo-alt-rdn.pem index 9b4a86dfd1798..852c7cd453fa6 100644 --- a/jstests/ssl/libs/cluster-member-foo-alt-rdn.pem +++ b/jstests/ssl/libs/cluster-member-foo-alt-rdn.pem @@ -3,56 +3,56 @@ # # A server certificate with the mongoClusterMembership extension with a value of foo, but an unrelated RDN -----BEGIN CERTIFICATE----- -MIIEdjCCA16gAwIBAgIEGs/cgTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIEdjCCA16gAwIBAgIEDH6YBDANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjMwMzE1MTU0MjEwWhcNMjUwNjE2MTU0MjEwWjBoMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQxWhcNMjUwOTEwMTQyODQxWjBoMQswCQYD VQQGEwJaWjEQMA4GA1UECAwHRXhhbXBsZTETMBEGA1UEBwwKRmFrZXN2aWxsZTEQ MA4GA1UECgwHQ29tcGFueTERMA8GA1UECwwIQnVzaW5lc3MxDTALBgNVBAMMBERv -ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCKB1iyc78amtXCaOfh -3wZ7jidmiLI0IMGk1KuGnUzyoRlX6PlFKm+I5/rbyVgVK0MEKIJU1rxrxBwwJyW/ -/D1NOH1FTcKk+FnkBs7T1iwct+2OocMArQVcavFayqcqubxvWFztjBNxCoh578OH -u7BBqG3iXu8HvWivm+FAkqYWNk8M0us5Ui/yQShRXcPRTYqAFyTatlcesijGMKEA -J1AE4xgVNmJI88qoUmS7ftbFW0B53ru7aJKtQ9xGcu1EtDEUSXpJAVmmSDmuAF0L -ZaGYUd/zerCweOgkmy0rEoFQPKKb9Ib9PJ4vo4VN6RKYt3DzDxpqu58pZMVJxxn+ -UjmnAgMBAAGjggEaMIIBFjAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDAdBgNVHSUE -FjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFOHQkRJH13hAyapsryfr -spM9JebhMIGLBgNVHSMEgYMwgYCheKR2MHQxCzAJBgNVBAYTAlVTMREwDwYDVQQI +ZXIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCexx4Dn6nsYXtoUt03 +lKjKOQfEiZsGAgGBHAMl6n3cwoUHsLfmeyfHBwTx54rx+VbawG9YHLSx/173GQRU +cGY2ubx9I9OSi+LydJKaGBObNAzTeLkpE1AcQxWAFNUiSWmvwj/tpdIanYIaEbYx +fXTmtlC6kBD+UQkTGtGmvuM7bKdhqvbz69ANwzX3wx9XyBAXZor86h4KcEVlgHBo +u5hUJKMIHHbkonPZoklKyaHUT1g7k9cdGPEMuyR9T6ypNJGU1RHy6IKH7INDWjnf +ITOwfN0pOE7Nl7BRqmfsvNht0NCEjRwKaJ4Bzjo9WkzSC3879gAvhYTwK2dgneNr +6NAlAgMBAAGjggEaMIIBFjAJBgNVHRMEAjAAMAsGA1UdDwQEAwIFoDAdBgNVHSUE +FjAUBggrBgEFBQcDAQYIKwYBBQUHAwIwHQYDVR0OBBYEFJTjwS2tkX9xLo7YNo/A +0/RSrLSUMIGLBgNVHSMEgYMwgYCheKR2MHQxCzAJBgNVBAYTAlVTMREwDwYDVQQI DAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9u Z29EQjEPMA0GA1UECwwGS2VybmVsMRcwFQYDVQQDDA5LZXJuZWwgVGVzdCBDQYIE -e9SskzAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEwFAYLKwYBBAGCjikCAQIE -BQwDZm9vMA0GCSqGSIb3DQEBCwUAA4IBAQBYpHCMUlGWm803moqfVGTkU/xGlPQd -hpMtmcf8GsSlDKmGXW335+95f5emZV7WmfKqaolAI0rjA7/sI98QuiqcloCEhSE9 -eS3jEuEEeDvySwnqKgz45eTXyjqjpH746uIXju427xQtr4z6gYYQZBls1ozEFrYp -MfQXZJqVm6Kodg72LNrjToWeuNGkeGtGikyqXAlCM3/s7FsapuN89KjNsQv1p8e0 -LTXnJAm/5yxcuQyxWq87pta11IS89RylDDwmMMBIJWwAE07O+zH/1OC6yevapKn7 -rZw/gYz4uhbmlzsQVJrHdsaZ8Dr6+Enpz5X9CmNRqijk5dbzaSS9wvbu +St5aGjAaBgNVHREEEzARgglsb2NhbGhvc3SHBH8AAAEwFAYLKwYBBAGCjikCAQIE +BQwDZm9vMA0GCSqGSIb3DQEBCwUAA4IBAQBDEmQAW9XyURar2YNHstTQk7XhCYLJ +dEUAXgUi4+l9CilHPhi0hT2Fbd4HwG+2flyfJhtac6LK0uAwVKxsa+HdVnEtZwhg +YXFd2RfIQAyNEhhaCHeh+Q7BqGlwnZ9LZd/mdqff89uJ5YOoLLPvlsgVBh2teFmX +a74/BsqUP8V8axa3P5E/RnCec0jUbP4JCSwLqvuwJ8oZZLoDll8/6H9/Ju/Y39Db +tddfLSt7YcRHChLTZZL00h+x9pB7xwNvFM9p0Sq7TdnQKMg/FL5yikwKIy4gdS2q +R4wEM+g/p97OsA7I16N+zYBAr+i4nwlu/80QsnbsUUyXxyP70bD19OC1 -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCKB1iyc78amtXC -aOfh3wZ7jidmiLI0IMGk1KuGnUzyoRlX6PlFKm+I5/rbyVgVK0MEKIJU1rxrxBww -JyW//D1NOH1FTcKk+FnkBs7T1iwct+2OocMArQVcavFayqcqubxvWFztjBNxCoh5 -78OHu7BBqG3iXu8HvWivm+FAkqYWNk8M0us5Ui/yQShRXcPRTYqAFyTatlcesijG -MKEAJ1AE4xgVNmJI88qoUmS7ftbFW0B53ru7aJKtQ9xGcu1EtDEUSXpJAVmmSDmu -AF0LZaGYUd/zerCweOgkmy0rEoFQPKKb9Ib9PJ4vo4VN6RKYt3DzDxpqu58pZMVJ -xxn+UjmnAgMBAAECggEAKHROwr653ApVbE1i6Qh81emsEpkt4alYF/9c5m9kBhjB -XMqjhGoTloSnOZOhhVLQqX9V85ecUdmAiXxvy/0Z2nAcBxvrWH6RmguEwwGanDAs -KAmxJZmQYK3XX0zWAee+GsRDODw91nvH1DU5kaao2hWLXzWDyTjyXcXKFyrkEs4Z -1sQiJHCFQW1l5j6x7kAXrbOHUCziCww+vvCUCW7ujut/Nl1MLzPrsIvtghQKwvhe -uJVB7uZxtBHjQEfycZOLWCNUEE6WOJ/muUeCtHbmVbr50omOlRSJHP/MqUQaxnpM -KS38BoUbJpbaVOKvgokjLHXF8KojNQ+Embx1Ql0AgQKBgQC/6lS/3eQKAr1FmjlN -PicKDb6t08aE7THupy/sDL9jqEIYGfJPCbu8Guyd7nCwBCdJ8KcHQehukcZ8i1O4 -2Z/gMtuurHvD2S4+6sYjHZ9SyRiTkW8XY/jmwCYqAraS9fNNWj+maifyTcAreO+f -KVI5/2QCNPMqjqzFGaS2w5XueQKBgQC4HpMpbikIzUEK3QnpcpqIyufpYiZGhOb8 -qgwTZCw3Bqn7KKB7kfTYpghLdzQmqUch6yaBWN5+YgabFzNtOkXvstpH4m5BJ1Q0 -N1zTTiPHOxup0TUPWQo2Qa+h52p8BOvKSBNGcgNFDJ3tdDOAX5tNeywunx/w1HjA -aUUNoKLhHwKBgB+xjDNvaoR4tVc0Q/hMplfTs0Szr5ouLcvS0mgyJr1HgTrHtit1 -WQqUi7T9NqDq3q4oTv001jTEYDobLEVfszZsT7lGBN5wFGIRlY0hDDm4uhVMtELx -oJ5C50qSziHw+jAxEkfiShyK2IyVWUU4prqrQZHXuryxeTjHplsEa9NJAoGAURLV -hjbFxuRqsZfnV25pcba3K+NWK1M2SyetrZQ8i/ZZPwkCsabxg7yIhoJ06lk7w0nC -aM5zGn+bnQs4T+6LASNmTqT8G6BvyZZfP4R26LG0WrCOhrWUc5O0/Lvj/bxE/4uB -QVHO8sa9e+PhEbQHtLR6HgVfkTJeAYvZJkkHr80CgYBAo06hacP0ATWy9GetJztU -9OZfhobBuk3kBdU3tNFNe8UFRLg0MnUwv3FdXM6XsVhH/r18ApACityAzA+cMw5o -9nPyi+C8GqWum1eg3XaSPpKxNCVsAQuTJpSjL3JqeZSo07XUOAS2om5AQBLsbGHB -2hpwDA/Ccom2Sc8E/VysmQ== +MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCexx4Dn6nsYXto +Ut03lKjKOQfEiZsGAgGBHAMl6n3cwoUHsLfmeyfHBwTx54rx+VbawG9YHLSx/173 +GQRUcGY2ubx9I9OSi+LydJKaGBObNAzTeLkpE1AcQxWAFNUiSWmvwj/tpdIanYIa +EbYxfXTmtlC6kBD+UQkTGtGmvuM7bKdhqvbz69ANwzX3wx9XyBAXZor86h4KcEVl +gHBou5hUJKMIHHbkonPZoklKyaHUT1g7k9cdGPEMuyR9T6ypNJGU1RHy6IKH7IND +WjnfITOwfN0pOE7Nl7BRqmfsvNht0NCEjRwKaJ4Bzjo9WkzSC3879gAvhYTwK2dg +neNr6NAlAgMBAAECggEADBbhJCl+CK+Lc/ALQ/Yfu/YHr2FC2nxAnRIC81odZzew +ZVuBRR1PdAoGyFx26eGwSUuprq3Q3FSzaGERQVSlk7kkruqRa3EXQJq7ptv6cjDO +YTOp9YSw7tX0xyHO8KNiVq2VrISZsMkZZN7AXphM1FxBXVMm2BZXeIrsN6QFLTsD +7wK32W2hmDu//gRfO6ptfHg+4xPQxix8TW8XrFwmEINCNe7/U/DeHAew9fvA1aXx +k1QubdGQBK0uzwIhAgGHT9LkN18ocswx/EL3fEJNJ40r+A716WTozvFMogMBh+g+ +A4YP76asmPZgSWjeDg/p80qheuw7gPVtmmqYFL8wKQKBgQDTcghF9eM5TCzQKrRR +piKqZskw11DxfV/+83XQuE5c/riaf5q4eN6fBvtvQEkMbII8ntOx2lMODkrvw2GW +ImNN9UHnhbQXFEWo9htNyhXAW4gIsr2adqICs5q028i2OyM44tegKhMjwkcrGIif +h0SY7zo4zu09LC7euJ+pOgekFwKBgQDAPAr/K4OppHsyGZUm9WIHc8iU+XEqZZk6 +jCJfE3MpPcFC2rv1eMkqCq826lmnfqRp1bV8rjN6OMJRCRfY9OKjse5G7/S2DEuY +tqzTSp2XJgyObEYRy3ohkaP8XJebbjNkwGJMfMaJJQFnMKl7r6O0k7TdW5T5nzS7 +jm8rjl1HIwKBgB+yGEtkcItGP/F0dVNH9CpW/hG0NQ3+HUW6eQ6wkg6eoE2Ik0jT +1YtBIK+sk0apfVxY8qLWd1zEB7epMhevT/N/sEFkaXqML2UEf1NJ17jd/6ZOJN3i +qqZ9BdPWnl+yvbTpTClWJUhW9xQeuVwwirfiJHgfjLB+zwfT5RsY98xBAoGANw/7 +xmNcbzch4pPZWTNoROIxThq1A4PehBqnrbwQfHun49JrjEBwvo4Y6g3uaD+qMHZ6 +aaAsNpITdsq3/fkHAgRxT/hji9GxxahtkIgK48SOTC1PVMGH1h21bw/GjUrfhbhl +b6s94wKqiDg0E62Xop28bLtSzL2BiUefKEa7CSkCgYBVZY35S8Gdy6x5mXZ1UjVI +knDEfy/GSHWp23WO844Dc/gItn3EB09RkVmFaeC1FnsM2K/JJnephfnwjhbgbAs1 +gSJFiN/GWgS6aEZXfjaBNcJJDx2cHkxO4F62hBnsDMD7SEOcL5vtM9snx6RX1O+g +ocYjFO0HRaQtlVyfy2ZX7w== -----END PRIVATE KEY----- diff --git a/jstests/ssl/libs/cluster-member-foo-alt-rdn.pem.digest.sha1 b/jstests/ssl/libs/cluster-member-foo-alt-rdn.pem.digest.sha1 index 773e449398942..bb66718300433 100644 --- a/jstests/ssl/libs/cluster-member-foo-alt-rdn.pem.digest.sha1 +++ b/jstests/ssl/libs/cluster-member-foo-alt-rdn.pem.digest.sha1 @@ -1 +1 @@ -94F9962116E92EBDB4FC7007304957CCE1A41F26 \ No newline at end of file +F0BCC1A01D84E30C4F870445B82740FC65A007A0 \ No newline at end of file diff --git a/jstests/ssl/libs/cluster-member-foo-alt-rdn.pem.digest.sha256 b/jstests/ssl/libs/cluster-member-foo-alt-rdn.pem.digest.sha256 index 02ea263cddb7e..5309f85b90ac3 100644 --- a/jstests/ssl/libs/cluster-member-foo-alt-rdn.pem.digest.sha256 +++ b/jstests/ssl/libs/cluster-member-foo-alt-rdn.pem.digest.sha256 @@ -1 +1 @@ -215C9A1DB0D815E937668EBE8230496B9FDB3DBE2F9700820B9F631B87C28CB5 \ No newline at end of file +3311F2299E02D351ED2FFEB045E66562BA92B3620DDB0A03A1E08F9FD48BA357 \ No newline at end of file diff --git a/jstests/ssl/libs/cluster-member-foo.pem b/jstests/ssl/libs/cluster-member-foo.pem index a80d90767d70e..4249b62d61a7b 100644 --- a/jstests/ssl/libs/cluster-member-foo.pem +++ b/jstests/ssl/libs/cluster-member-foo.pem @@ -3,56 +3,56 @@ # # A server certificate with the mongoClusterMembership extension with a value of foo -----BEGIN CERTIFICATE----- -MIIEejCCA2KgAwIBAgIEU7DfoTANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIEejCCA2KgAwIBAgIEWdTu/zANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjMwMzE1MTU0MTUzWhcNMjUwNjE2MTU0MTUzWjBsMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODQxWhcNMjUwOTEwMTQyODQxWjBsMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEPMA0GA1UEAwwG -c2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAoj7sGnUpd3gu -MWBZD3gwilIw5IoVySUak0g9F7VESbU0nvCS6Df594TnE7v+pYUczq6U2o8fgAUi -8J1iH6Zj/osIbeQuoDbFpWyVmYGNFwDsvWcxXQEuWpdn0Fk2U6Ropaxbbp9Md9je -Xp/1kfpV2Fmg0IKvC+l3hkoalnBBJseftbVV5qs0Gw1yftyL0t8Fu4JVl/mQQKYD -19pyPxuDapgMRhGCmcjhjuNeFY0w6T17TBT/tQ9B8wM5hNlXElvWQqKnQybXF1S7 -ZRfXOHRFgBxUxJaEREPHHjt9QozFY6NS/BN9oBQyihj1PFqB54yFNoNRx/eQAM2C -LUXk+wyfZwIDAQABo4IBGjCCARYwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYD -VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBRS6fOGvmeuH1/U -CQikWX+BkLLLgzCBiwYDVR0jBIGDMIGAoXikdjB0MQswCQYDVQQGEwJVUzERMA8G +c2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAujuHzpKxR18e +dSo98LUtYqvQmOXizigAe3O1TiXaoh+vcuhVk4sKDEanVZ1EkD5wCdKe4Y7FeuGu +iFgkQJIz2cQfBd3p339BUwE86XQVopaHcTtkUm3eO1asmu6GW0uvEiYj3u7SuELH +IErcAcn8+aSfeOESD+3MBXqwgjB2lLAbpOOnfFWlL9QOQQ0JIvIn7vdkjs3TefJe +D4Ie+Sg1rVsGv8BLy0DsyVdClgV+lRKPJHsPNWhE/Jg7/Xx3B5xKcYM5/Wks+UH1 +TIKpR+wMX0/h+w3vjfDk1YUFnpLRmnL5V1jyTzlKfz1PpAO7YPdQsj874RhrBdRt +x/YWPPJueQIDAQABo4IBGjCCARYwCQYDVR0TBAIwADALBgNVHQ8EBAMCBaAwHQYD +VR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMB0GA1UdDgQWBBSsvx+7i4nG6PfW +q+Geam2haGG5ZTCBiwYDVR0jBIGDMIGAoXikdjB0MQswCQYDVQQGEwJVUzERMA8G A1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAOBgNVBAoM B01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVsIFRlc3Qg -Q0GCBHvUrJMwGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMBQGCysGAQQBgo4p -AgECBAUMA2ZvbzANBgkqhkiG9w0BAQsFAAOCAQEAPNCV4cJ+4rirKCT5Rw3p0ZUW -OBmb4ZRKVJn0VuLTBth8516ftP3N1IXtuSy7UjpqW3wSrqN3YNI9tibNlrs5CGkA -9EZiX1y0sxxUTM73EqzV9kx6dJ2g0BDolgc68sYdofIdIDNMzvfqg4cyIsH94KxJ -h4FXD8bE3fnrusaZoD0TDUwJ7/YX6Jv191R06vZHR5YXnnPzZD+Kig+tKLh5ePCN -KcgoPPMf3TPPbvpZVcyQHeceBSZ4+1lN/s4EUhSvit7TMO0TlfleLv2gC48MQt3R -YKu70fqITRKchyXu2kAgIjAhUWjtllmYrIiWjiWwCaPJYcYIxXXIn3f9itzjjA== +Q0GCBEreWhowGgYDVR0RBBMwEYIJbG9jYWxob3N0hwR/AAABMBQGCysGAQQBgo4p +AgECBAUMA2ZvbzANBgkqhkiG9w0BAQsFAAOCAQEAZRevRwSTn1JD5Uu4zW6QEhbj +eZoOE8J+/ACf2eUvbbIJmS6E9XjJ13PaRUoeXpkckHgvslzR/Yhr6OpJtolkFlAB +s6KmX8AbxDA+P/0pk54h9moRjh5gNw9mbHA8aIDrLzAIiGadOIcTctRvswMI3aLC +pLnSQDJWc+22jAgDevSht6Po7SlRSx9SVIE+24dCWNeeZZyvchI9Q/a2hHaAwS9A +4AeV7ceRmb+HlYyLau30A1p7sqV58rZzlpoU7UH2/dROmhXZjVsmzQ+7e6q35Le4 +HKpOuJFKvyQwFU1mZJPXXeXK+mOyZZczYd4gCBs9oKjb+KDxR9wglwgGG1L8qQ== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCiPuwadSl3eC4x -YFkPeDCKUjDkihXJJRqTSD0XtURJtTSe8JLoN/n3hOcTu/6lhRzOrpTajx+ABSLw -nWIfpmP+iwht5C6gNsWlbJWZgY0XAOy9ZzFdAS5al2fQWTZTpGilrFtun0x32N5e -n/WR+lXYWaDQgq8L6XeGShqWcEEmx5+1tVXmqzQbDXJ+3IvS3wW7glWX+ZBApgPX -2nI/G4NqmAxGEYKZyOGO414VjTDpPXtMFP+1D0HzAzmE2VcSW9ZCoqdDJtcXVLtl -F9c4dEWAHFTEloREQ8ceO31CjMVjo1L8E32gFDKKGPU8WoHnjIU2g1HH95AAzYIt -ReT7DJ9nAgMBAAECggEAD9NUY1ZHR6x00QMlXMFr9qoCs+AWNOsGFxSmROA8+3WN -3uz3X2hKXQ7dHUsqkQmVYEGeKl1ohKu7lz26uvyXZ1Y3acSmmaEOEU8wnmsJEJPa -A7WDlp9NXq/DBAsXpfv06ygPORCXvF7ufctbgDQrWHGRopUErwREUNh8lGz5pecO -FawUQoIrWfOx8bq/PFXAFiaJHfk1SaadZdHS1TX4ZUm07iYuYUTqxarffmyub8ZN -lO7G/3fdivgfuBnMETUDFOu7xSphk56AFlxLBuEVk+u8/I5XHWiRcVKTA4vtbyUX -xjM/sxO7qCZ6cY1Z/xaHOJUj0FoT5wqjjn4UltWerQKBgQDOPdiL6TLlQkF6rr6t -Jo2anm/Xs+dr70NIwsfYxNNLGoLCENxAwYIjKjD93UuS/eTEkKga141bS/05pEiM -rjv+jBxJd3El3RavBjYV+npjRiC7qlVg/hPhx6ZXCEOg2gx/6zIj9fzGmf8JEeLh -VvIDqXw2b7mJUSv80AfWb+SHLQKBgQDJY8CZRP8hsGhV3b64PquME38CGwCSIdqW -3MxAQHE0KFo98HFoElUecUklZh+AoHL9hpGuDRvSJ7AC3ynNnV5IxKYYjwM2pNL+ -nr3RNsNhrPcEMj/elW7BZoVG+zGyLjiTx7GO6e3S0rivUDhAQSEhp3G8ZM0kxsvi -/RqxLXVdYwKBgCp+RaK2Hp1r5E/htzm3ys9Du6mG0LTFbGiOcVyxWRONV8miba8N -78FNDSEROmQD2eHCKFC3ftGDu53nwmbx8zyEI8PjTzXM8sKHFhe7LwJLTa088DB2 -ySPo3dXqxvxaUN7+V6tfIIDO8+QrgkKJhn3IquYQaPro9ZY2SpcdIMnVAoGAFF08 -5YK/lcWD12Lz3SehKynxhuH6HczEkMrE8J5TlCWccnT00sQ/zTNBZUG9X8FZv18z -LflvXcHbn363eG44UX1pGkSj24uxNkQRB63U9fSKiecW5EgSCgZ25aWS8eSQngjs -YHoxLUdXm4quFXlAg2muK5G52MUtaseTQmVJX+cCgYBRFJ1zUDYHpniXEsGUx/OV -FBEZg359fRTKZUnXAtOW4gbaWMmLZ8N47pRvJakbkvqrMWUFagZKWDbA7+BxopVB -prIvNrSaIM3Q9o+H7gsA85O1qPkQQV+1Ue6OCPlxLz8AVnWb5zu1eFUNkrLeKwz0 -WREDb3ONFYMUxgzcvCDXgQ== +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQC6O4fOkrFHXx51 +Kj3wtS1iq9CY5eLOKAB7c7VOJdqiH69y6FWTiwoMRqdVnUSQPnAJ0p7hjsV64a6I +WCRAkjPZxB8F3enff0FTATzpdBWilodxO2RSbd47Vqya7oZbS68SJiPe7tK4Qscg +StwByfz5pJ944RIP7cwFerCCMHaUsBuk46d8VaUv1A5BDQki8ifu92SOzdN58l4P +gh75KDWtWwa/wEvLQOzJV0KWBX6VEo8kew81aET8mDv9fHcHnEpxgzn9aSz5QfVM +gqlH7AxfT+H7De+N8OTVhQWektGacvlXWPJPOUp/PU+kA7tg91CyPzvhGGsF1G3H +9hY88m55AgMBAAECggEAD2hJbg+41vzJmkvqX/W9UYKBeYTbZM7ua0hBYbvEANG6 +RDlwiwpfxHA7+9tNBtoDjux79JFJSUdOlz0jkqwF2ihdoLChXro6f1GE5pOSDqeB +gR62yBMK5cHf+dluCDOUwl2hbho4GMJ9kvBej09UvJqgDvIf+m1kCbgCzmKQDPD4 +z7wKeZP2TIaoK3FmwvLki9HOUg7wqMOLcz+qQwC3TaqRDvmX+aGuYVpnVd1eaQ4P +9Dd1yapDau+SE12kA9/iiDkhwoKjqMVI0csyYugEbleasBQVfCRHkGd9Q8I0Mox2 +wbvB9fqXWbrGxbfRj2mDmyn1si/rnUNmQ4jtSo64AQKBgQDjddyW9mcQ0sCiGKtp +hZaWgZf0ShBoVDSFXKBdRI2odTVTP2OD7JST7OfT3l+aMayLUjzJ9+WMq3y/OIzb +g4a9LpoeyJqScJvqhWYq4Ra6gYm8M3al+5lZUACXF/+o+rHlNOekdIuvviVSnkbT +MwsjjkbQWJCDFzmE2IZ88Gpq+QKBgQDRmWj6ifjIp6FpmnkgfsdWL5lRTUYGZ4y7 +k6rSSyxugvEp8Ds2lBb1SG0iN2Dx91kf8sE+q+WhsQBuv9mibqc8/Z1fx4hW8x8G +wsRJnEL8AyqqwJETW62VsA44jBOuHNfGUJg5R+hfJ1sCkgh8MAsyHQDIomw/UYUe +btw/tYl/gQKBgQDHlIMZctq8SC/+Cudcj3qyVMOf9dyvrjLnUX7kV2J0jWNcyCHp +f4xev2Aq5tO72BotpNrx3DHuXAn5PwVZFu8C0AErQZlfJK+p96LNvPLUcxUHraBN +w5WqaCgca1JIUrAFDYazJg/rBEx7UaWTaxY6/HvmE8+5NoehqNtYhRrbAQKBgDJw +Q3X/G9JVnJ9IIBv0xk10OOX6vX4vDwvYtkPS+UdC1XwtJmK08m1mV3TjdL3lsxng +FibZwPHPIXaFTn8rN62SrnttDd82tGGxHwFDzE7PCEZ8/qx05GngKxvqgZXltPsx +S+CE8GNtmhYdRFN2UEtn9VdS1s/J80nmjbP7ILKBAoGAE6kcmVu7ijpxvzD2Q0pX +76f/KAX4/17MPXGW/YmT7NB/NZeSt8dirnTr6VX/5RS8SB7DgMMTbfUpF9dK3yag +PmGpWaySCLHgb7AUJrodVxSB3p0SMkcuxFuX6slbmVvnUb6pq46W/SD+xaXc1067 +eraAYO2KH/1zavICZYypXiM= -----END PRIVATE KEY----- diff --git a/jstests/ssl/libs/cluster-member-foo.pem.digest.sha1 b/jstests/ssl/libs/cluster-member-foo.pem.digest.sha1 index 21209a1dc306c..832c3c13898fb 100644 --- a/jstests/ssl/libs/cluster-member-foo.pem.digest.sha1 +++ b/jstests/ssl/libs/cluster-member-foo.pem.digest.sha1 @@ -1 +1 @@ -5A081EAA0D42DED66771504EC405C5F9AE4885EA \ No newline at end of file +A87698C429A7C23785C03D2BD8F674C56535BD43 \ No newline at end of file diff --git a/jstests/ssl/libs/cluster-member-foo.pem.digest.sha256 b/jstests/ssl/libs/cluster-member-foo.pem.digest.sha256 index 4ef362e70f50f..e776510889d31 100644 --- a/jstests/ssl/libs/cluster-member-foo.pem.digest.sha256 +++ b/jstests/ssl/libs/cluster-member-foo.pem.digest.sha256 @@ -1 +1 @@ -D0298CCEA9CEEBF3E739E331AD7A4C5A485DCB8FC66AD236F281BB5040136076 \ No newline at end of file +38C59BC364D14E531F0EA8730E48BE885D082565AD7353AE18DC0316C80B19EB \ No newline at end of file diff --git a/jstests/ssl/libs/localhost-cn-with-san.pem b/jstests/ssl/libs/localhost-cn-with-san.pem index d91b1ae5069c4..052c6c7c4dd12 100644 --- a/jstests/ssl/libs/localhost-cn-with-san.pem +++ b/jstests/ssl/libs/localhost-cn-with-san.pem @@ -3,52 +3,52 @@ # # Localhost based certificate using non-matching subject alternate name. -----BEGIN CERTIFICATE----- -MIIDkDCCAnigAwIBAgIEV9X0HjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDkDCCAnigAwIBAgIEdRykOzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQ0WhcNMjQwNDMwMjE1OTQ0WjBvMQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM5WhcNMjUwOTEwMTQyODM5WjBvMQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDESMBAGA1UEAwwJ -bG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5uaoVP0l -xsKbPIr8qNsd7OQ2Vks2WzaPiwiyQ7uRd8+kRymJE/F+nKAgZvUFebjT70FJs/kI -D1dQqcAn3OxOZFSCv62i0Owp7oLrQ3fiQR2xwuQVFa2D8hV9z8vLc7CZyiKdi4EU -EZOv8N1K8KxThV7E12YI5OVXQl+oniXwbDf4Dkiex7go4mgIqv9SBDjBLvWheCkP -WTSGULtsL8Dg42neqR7Uh/4Iawm7ka0czwluM0GrLjDAirdw6OOBYNuc3Be1XhjK -/5zY6JOt6yeHce2jjte7YzLoCBv0hm1sVyIkocrxPi6kvC8crw+RGI0m9ijZIq0K -N90aMGwVq32jkwIDAQABoy8wLTATBgNVHSUEDDAKBggrBgEFBQcDATAWBgNVHREE -DzANggtleGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAKroMjfypcbZ5Nq6x -chBMOgVZRMtleqosP51zBk8KTkLSyfQCYbwM+lq+Us4Y5zzLHjHUmQltJi5Yn4l6 -Voe33UEAi5xA0SEPGBvG2uzGzeY6ShQl+BhfUxgMWxaitm8/Hr0wnpm8AqOglskb -9GozqQHfWISwY+9JDxR7aLaFofgRhR2iZEqbELkz+1KwOoUtHm8HMvS+k6tZziTE -fO6Ergg8iCVixvtV0EOKUMqmiUSXH0ZT6uOw/z1XFloJSolg95g07z5LgCRMe+zT -zwNOmnHdJwJTDYjXMQS1sEdvjQvBKObJ4xIKhkwqrTOx+PY40yBNYN2iPIViWEOv -sHxp0A== +bG9jYWxob3N0MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1I+l+f5V +5dcZVKFGOS3rj15/9IJ2wyeEykMrgxEtLGRH7ztLkTDY6UQfJJi0mffjXRAeq/Qk +u9d3OxvXfptGq4iR6VI9aL1AabTbF0qkIu5QCRwjESeJrV4Elg5YZtd1KRZ2XbFW +3uVKrJmyyVmzIadfvSEezWhEm4cLENvq1jF6kgMmkxabtRS48VTlL0ZZ4Q3/Nqnq +VtuRf4tE24Qy0fJwAH+zi9RPvi/+y8hZoEl6c1upe6TpK1toufCuGHDKTqjsNVNE +HS736VQJVUgbMipYGODMceOKkJoF3BoqszBo2sF+cYzXDA+etbuFYBHkeJxgSKw8 +ppbYLODGOpYM8wIDAQABoy8wLTATBgNVHSUEDDAKBggrBgEFBQcDATAWBgNVHREE +DzANggtleGFtcGxlLmNvbTANBgkqhkiG9w0BAQsFAAOCAQEAYZXTtZBmZtre1Gwg +zpbhqjjMPJuOOzu5UxmmExOJLYhdPbwj56pkUgBOVYLhyluaUy6zCUJjL5peNVbl +2evOe0blZiRb5L5ed0yQHLwcMQONsETJO581+ul8Xcb41L8FZLKxpqJRGI5tZ5j/ +ZDi0yFEe1YVjD5udxool6OpHfXUBHWDLgYqU5MOQtAd65Q1BeGjx2skeQpUyOAE6 +Mir18Jz2og3OQKZ2ykZrR1RGzufkdvAyqh+AVdXdUp8BWxf2HFrh4wTB1ano0XIe +D9mSP4IlHoHkXTHTy3yfUzlUhVkQ5WsGGdmAis6KiGUAblYpqP9sAf68b2ewW4qg +JHzIgA== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDm5qhU/SXGwps8 -ivyo2x3s5DZWSzZbNo+LCLJDu5F3z6RHKYkT8X6coCBm9QV5uNPvQUmz+QgPV1Cp -wCfc7E5kVIK/raLQ7CnugutDd+JBHbHC5BUVrYPyFX3Py8tzsJnKIp2LgRQRk6/w -3UrwrFOFXsTXZgjk5VdCX6ieJfBsN/gOSJ7HuCjiaAiq/1IEOMEu9aF4KQ9ZNIZQ -u2wvwODjad6pHtSH/ghrCbuRrRzPCW4zQasuMMCKt3Do44Fg25zcF7VeGMr/nNjo -k63rJ4dx7aOO17tjMugIG/SGbWxXIiShyvE+LqS8LxyvD5EYjSb2KNkirQo33Row -bBWrfaOTAgMBAAECggEBAMqFgRuaNeoRgqsllNpd5e+Dcw1ZRsHWJyzhYcn62Gpn -20T3b13osQ6bfBAR1M2giXMOpqCOSr157iwVeGFTeqaRYWTR3T62BUlH1yYEHpTS -gLnZ54xt9o78vq7lpvs/6IPcvKZBfuKz0NvInRRfcZpHNwteWWq6pUB4OqMLz8Z5 -3j9mo+69izUOaTbLptXngV9c0TYi2J7JHVwV7nxZga4sbq5btxN7gTv/M6dKKEvl -CB7p/aD+EVMUgIICw++FvykgOfCXVbUEWld97k68R+UY805xec15ovAIIwzPWViG -ry8yVUewVM6a03AOHEV9lOtfyJOm683EnflvFVG3nDECgYEA8zDwjk+gDfbEC0+1 -C1zsC9ojyg3tve80chrnpwQfL8hrrdUWq0eHpL1boA1vYvgl5Uukqkj6xKFaJXFZ -0QaDB6xTemxpeXX/+KffjgZbglvpm4Yr5C/tCv6U0lUx19ReQB9L+Qqpzg5kJvDc -xc7eSFW3RPo7bZJJLUwLq3Np1GcCgYEA8xAA+3N2+/TjTOae4Ks1VtcvBn5shs4Z -kg3IraLznpkL1SlQ4asxr3iy0ie7gWUIKS4TwELDjXnNprfgmJZfZMgE2MZfZwCO -9P0/p+BCtqubekVFnMGezul3XzzT+wPCGoOlqz5VeiW6ognm9RBm5e0jz42j13dp -B5z/wFatm/UCgYBVF1OkR8IWALjZyFrtjebdwsbxBOyhn5f3MOjLLIsI+hSLL1sO -NSoF/2eW2fyWYYNI9q24E28C6/4RydaGZ8PjJG3VESfaouochAiZtinAtA5KJ6kl -34sOZMOH0N1uylTsFMdNbWi6u5hZc7+byuVF5BALJ48xqJTIL6qJpAlskwKBgE3j -Xf011fYNVl1JNbZXBsOqNvaEwrA8ETOdWSZTJnA3KPSIxdNa8ZQCQINZmhtvzbqs -ekXM3y9RzdXT7JPY8/6uneb9QosWQbk+Ag0Ar0AsI6l90z3VSdeSNt989YzlemjW -sNr8IZX/yxurwqfbNq4NXMFg6RTdvfljlQ0EeaOVAoGAR002FIKL1Z/3DqbVV+tQ -FlOksFDTshG1Y9mDQAfTCSlNBeGDz/bQnSo1WWj/DXVLoHxpBglvFmWSajmOUa1R -W4JDOK/W949pPYfP1QRvbJVGmZ/Y8u90GZbBjWQF+E9yLicESOpN/BH45FIbxQHQ -2wH/9G1OCozBqCVXGhJw7CA= +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDUj6X5/lXl1xlU +oUY5LeuPXn/0gnbDJ4TKQyuDES0sZEfvO0uRMNjpRB8kmLSZ9+NdEB6r9CS713c7 +G9d+m0ariJHpUj1ovUBptNsXSqQi7lAJHCMRJ4mtXgSWDlhm13UpFnZdsVbe5Uqs +mbLJWbMhp1+9IR7NaESbhwsQ2+rWMXqSAyaTFpu1FLjxVOUvRlnhDf82qepW25F/ +i0TbhDLR8nAAf7OL1E++L/7LyFmgSXpzW6l7pOkrW2i58K4YcMpOqOw1U0QdLvfp +VAlVSBsyKlgY4Mxx44qQmgXcGiqzMGjawX5xjNcMD561u4VgEeR4nGBIrDymltgs +4MY6lgzzAgMBAAECggEAVcmMATNm05ZWgNN+FgG0em53UeV9rRG8BC3L1+AMzUfD +yIez3etLAAogT5LaAPIM7zhcSKYGVVGGq3J3LH/aq8w4p2QrqKQI5SC689GKIRbx +clvKw1ejtzakvzMW9pqqjGSmbPXvy5Ha64I0qBLtuv1kJF7Dob8dQW/lRW6cfslI +4ZQZqGJhnkc3USIUMXP+MyZrHOQ+b2V0Pnn2s3rSat/nsHZmDbuqZK69iHPSGoqT +7pxfal8ABLUQUifWk7SOBEqNrA9X155N/ufd+6MTCv+C7DgueyYoJRhuknNQHkSo +uuK/ZeI7k7SjnMF+VeN55BatjkJRugujMDiZKLKKAQKBgQD0VZXYZAgU1DmONzU5 +ZdBvC8Y7ijYtvRalJ9NaQqJTJr0C80erzwmPd8QxwqziiiOasgr5xWKJ1dn0W5ea +qM8TRqf2T2cKTpJ76PT2lXvkdBO/L4GtSsrToXnM6HjP1OKAYWeZ9k+nouC7GsjK +ynAVfJShF1QfyVCQpZOxQkMSgQKBgQDetbXOSRx7wslD7UWAbmIAhYW5da1ersDr +c/B3iv0IcCp23LpgjCFpkgn8i9BagvzBLbGRvVv/ZvSZ3wOlKX1ElTWmwnhB3qgz +A0a7h7WeCEKO9u4qSiV6VWxqenllYiiY0r+D9bxFXNTEqKlpiqOSelS783XOlHpb +n2weHQw9cwKBgCHJ4CH/m1p+CsOPAO5CFTP2FeDP+TQUDRijzEeUJWyIU+UfjY7F +39WWQ2x6NnxRjCDtdDUcRcc36p8vN2Ts6U4KHFokmuRC/2DhU1byTyLFpqFuyWkU +ib4Ae56Jm4NqNjR4w+H3vQY6sQdO6euZWSyvgMJSPpDyvxBxxV3jAvGBAoGAB1Ag +MC3maYnz4e8goR7+yNWZloVus/U5EII2GzbNKEHGozCMTi9LyZOqEEjnBpHWiuFd +O1PkeXaAAa15CStbmazsnhU0rbUKAOLSp8XFCPNsfBVPrMPShznW3Yg4BEQ84QfK +kiK3k++AbRS8u2J6DI1HywQrkiT9/5Q0kKiThlECgYEAwp0C+89RUAiDlCug0l2h +rwTPpa6iHIi9wTdrZpCNRzyu66pFBDbwny770TpSl6gVjg2oIvAiUIcASFGeMPDk +QaVt4XEhfBKDbc28GOgEUCKoKWXlpqNHbRtpSquVbEVuUTki53jY772u1HnTzb4o +bP+Z3rnjUQ0lKvzRnRCltTk= -----END PRIVATE KEY----- diff --git a/jstests/ssl/libs/localhost-cn-with-san.pem.digest.sha1 b/jstests/ssl/libs/localhost-cn-with-san.pem.digest.sha1 index 1127761edbfac..729768c9992a1 100644 --- a/jstests/ssl/libs/localhost-cn-with-san.pem.digest.sha1 +++ b/jstests/ssl/libs/localhost-cn-with-san.pem.digest.sha1 @@ -1 +1 @@ -A8FBDA18A45E8945D1D6E08E77B3070314B80458 \ No newline at end of file +183A6B4C451F5D2031D61B72FCA52B57C10E3396 \ No newline at end of file diff --git a/jstests/ssl/libs/localhost-cn-with-san.pem.digest.sha256 b/jstests/ssl/libs/localhost-cn-with-san.pem.digest.sha256 index 4bb65aa4d9042..e8cf40568f723 100644 --- a/jstests/ssl/libs/localhost-cn-with-san.pem.digest.sha256 +++ b/jstests/ssl/libs/localhost-cn-with-san.pem.digest.sha256 @@ -1 +1 @@ -5926109C2D0A4565771FFA92814319B5946DF5710A4F99A77FFF9A7881681376 \ No newline at end of file +3A1F8897CBD941A65C8BC2FA88A83E0B273DF821171519456EF7DD561F8F74B0 \ No newline at end of file diff --git a/jstests/ssl/libs/ssl_helpers.js b/jstests/ssl/libs/ssl_helpers.js index c144e104d07a1..c8b625c54ec32 100644 --- a/jstests/ssl/libs/ssl_helpers.js +++ b/jstests/ssl/libs/ssl_helpers.js @@ -1,5 +1,6 @@ load('jstests/multiVersion/libs/multi_rs.js'); load('jstests/libs/os_helpers.js'); +load('jstests/replsets/libs/basic_replset_test.js'); // Do not fail if this test leaves unterminated processes because this file expects replset1.js to // throw for invalid SSL options. @@ -46,26 +47,18 @@ var dhparamSSL = { // Test if ssl replset configs work -var replSetTestFile = "jstests/replsets/replset1.js"; - var replShouldSucceed = function(name, opt1, opt2) { - ssl_options1 = opt1; - ssl_options2 = opt2; - ssl_name = name; // try running this file using the given config - load(replSetTestFile); + basicReplsetTest(15, opt1, opt2, name); }; // Test if ssl replset configs fail var replShouldFail = function(name, opt1, opt2) { - ssl_options1 = opt1; - ssl_options2 = opt2; - ssl_name = name; // This will cause an assert.soon() in ReplSetTest to fail. This normally triggers the hang // analyzer, but since we do not want to run it on expected timeouts, we temporarily disable it. MongoRunner.runHangAnalyzer.disable(); try { - assert.throws(load, [replSetTestFile], "This setup should have failed"); + assert.throws(() => basicReplsetTest(15, opt1, opt2, name)); } finally { MongoRunner.runHangAnalyzer.enable(); } @@ -322,7 +315,7 @@ function sslProviderSupportsTLS1_0() { return false; } - return !isDebian10() && !isUbuntu2004(); + return !isDebian() && !isUbuntu2004(); } function sslProviderSupportsTLS1_1() { @@ -335,7 +328,7 @@ function sslProviderSupportsTLS1_1() { return false; } - return !isDebian10() && !isUbuntu2004(); + return !isDebian() && !isUbuntu2004(); } function isOpenSSL3orGreater() { @@ -362,6 +355,22 @@ function opensslVersionAsInt() { return version; } +function supportsFIPS() { + // OpenSSL supports FIPS + let expectSupportsFIPS = (determineSSLProvider() == "openssl"); + + // But OpenSSL supports FIPS only sometimes + // - Debian does not support FIPS, Fedora 37 does not, Fedora 38 does + // - Ubuntu only supports FIPS with Ubuntu pro + if (expectSupportsFIPS) { + if (isDebian() || isUbuntu()) { + expectSupportsFIPS = false; + } + } + + return expectSupportsFIPS; +} + function copyCertificateFile(a, b) { if (_isWindows()) { // correctly replace forward slashes for Windows diff --git a/jstests/ssl/mongo_uri_secondaries.js b/jstests/ssl/mongo_uri_secondaries.js index 73cca540c8031..f6ed83b0c360b 100644 --- a/jstests/ssl/mongo_uri_secondaries.js +++ b/jstests/ssl/mongo_uri_secondaries.js @@ -4,6 +4,8 @@ // for details. // To install trusted-ca.pem for local testing on OSX, invoke the following at a console: // security add-trusted-cert -d jstests/libs/trusted-ca.pem +// TODO BUILD-17503 Remove this tag +// @tags: [incompatible_with_macos] (function() { 'use strict'; diff --git a/jstests/ssl/openssl_ciphersuites.js b/jstests/ssl/openssl_ciphersuites.js index 770a8fb8c548f..9b3811988d439 100644 --- a/jstests/ssl/openssl_ciphersuites.js +++ b/jstests/ssl/openssl_ciphersuites.js @@ -55,4 +55,4 @@ sleep(30000); assert.eq( false, testConn(), "Client successfully connected to server with invalid ciphersuite config."); MongoRunner.stopMongod(mongod); -})(); \ No newline at end of file +})(); diff --git a/jstests/ssl/repl_ssl_noca.js b/jstests/ssl/repl_ssl_noca.js index 258530ccc9aff..1ab1a68eccac9 100644 --- a/jstests/ssl/repl_ssl_noca.js +++ b/jstests/ssl/repl_ssl_noca.js @@ -1,3 +1,6 @@ +// TODO BUILD-17503 Remove this tag +// @tags: [incompatible_with_macos] + (function() { 'use strict'; if (_isWindows()) { diff --git a/jstests/ssl/server_x509_rotate.js b/jstests/ssl/server_x509_rotate.js index 35d9f5a72c041..52c7c53df5d37 100644 --- a/jstests/ssl/server_x509_rotate.js +++ b/jstests/ssl/server_x509_rotate.js @@ -54,4 +54,4 @@ out = runMongoProgram("mongo", assert.eq(out, 0, "Mongo invocation failed"); MongoRunner.stopMongod(mongod); -}()); \ No newline at end of file +}()); diff --git a/jstests/ssl/set_parameter_ssl.js b/jstests/ssl/set_parameter_ssl.js index 0b6be77711abf..cba191854edd7 100644 --- a/jstests/ssl/set_parameter_ssl.js +++ b/jstests/ssl/set_parameter_ssl.js @@ -67,7 +67,7 @@ function testTransportTransitionCluster(scheme, oldMode, newMode) { rst.awaitReplication(); print(`=== Switching ${scheme} from ${oldMode} to ${newMode[scheme]} for all nodes in cluster`); - for (n of rst.nodes) { + for (let n of rst.nodes) { let adminDB = n.getDB("admin"); assert.commandWorked(adminDB.runCommand({"setParameter": 1, [scheme]: newMode[scheme]})); } diff --git a/jstests/ssl/ssl_client_bad_certificate_warning.js b/jstests/ssl/ssl_client_bad_certificate_warning.js new file mode 100644 index 0000000000000..1e63c5a7489bf --- /dev/null +++ b/jstests/ssl/ssl_client_bad_certificate_warning.js @@ -0,0 +1,61 @@ +// Test mongo shell output logs correct messages when not including certificates or using bad +// certificates. +(function() { +'use strict'; + +const SERVER_CERT = "jstests/libs/server.pem"; +const CA_CERT = "jstests/libs/ca.pem"; + +const BAD_CLIENT_CERT = 'jstests/libs/trusted-client.pem'; + +function testConnect(outputLog, ...args) { + const command = ['mongo', '--host', 'localhost', '--port', mongod.port, '--tls', ...args]; + + clearRawMongoProgramOutput(); + const clientPID = _startMongoProgram({args: command}); + + assert.soon(function() { + const output = rawMongoProgramOutput(); + if (output.includes(outputLog)) { + stopMongoProgramByPid(clientPID); + return true; + } + return false; + }); +} + +function runTests() { + // --tlsCertificateKeyFile not specifed when mongod was started with --tlsCAFile or + // --tlsClusterCAFile. + testConnect('No SSL certificate provided by peer', '--tlsCAFile', CA_CERT); + + // Certificate not signed by CA_CERT used. + testConnect('SSL peer certificate validation failed', + '--tlsCAFile', + CA_CERT, + '--tlsCertificateKeyFile', + BAD_CLIENT_CERT); +} + +// Use tlsClusterCAFile +let mongod = MongoRunner.runMongod({ + tlsMode: "requireTLS", + tlsCertificateKeyFile: SERVER_CERT, + tlsClusterCAFile: CA_CERT, +}); + +runTests(); + +MongoRunner.stopMongod(mongod); + +// Use tlsCAFile +mongod = MongoRunner.runMongod({ + tlsMode: "requireTLS", + tlsCertificateKeyFile: SERVER_CERT, + tlsCAFile: CA_CERT, +}); + +runTests(); + +MongoRunner.stopMongod(mongod); +})(); diff --git a/jstests/ssl/ssl_hostname_validation.js b/jstests/ssl/ssl_hostname_validation.js index d52d430dd2614..51416d420c59c 100644 --- a/jstests/ssl/ssl_hostname_validation.js +++ b/jstests/ssl/ssl_hostname_validation.js @@ -88,7 +88,7 @@ testCombination(NOSUBJ_NOSAN_CERT, false, false, false); TestData.skipCheckDBHashes = true; // 2. Initiate ReplSetTest with invalid certs -ssl_options = { +let ssl_options = { sslMode: "requireSSL", // SERVER_CERT has SAN=localhost. CLIENT_CERT is exact same except no SANS sslPEMKeyFile: CLIENT_CERT, diff --git a/jstests/ssl/ssl_ingress_conn_metrics.js b/jstests/ssl/ssl_ingress_conn_metrics.js index 333d3015a9017..b179c58224c79 100644 --- a/jstests/ssl/ssl_ingress_conn_metrics.js +++ b/jstests/ssl/ssl_ingress_conn_metrics.js @@ -117,4 +117,4 @@ let runTest = (connectionHealthLoggingOn) => { // Parameterized on turning connection health logging on/off. runTest(true); runTest(false); -}()); \ No newline at end of file +}()); diff --git a/jstests/ssl/ssl_weak.js b/jstests/ssl/ssl_weak.js index 90f41f136cbe5..7827093ea6bd3 100644 --- a/jstests/ssl/ssl_weak.js +++ b/jstests/ssl/ssl_weak.js @@ -44,4 +44,4 @@ mongo = runMongoProgram( // 1 is the exit code for failure assert(mongo == 1); -MongoRunner.stopMongod(md2); \ No newline at end of file +MongoRunner.stopMongod(md2); diff --git a/jstests/ssl/ssl_with_system_ca.js b/jstests/ssl/ssl_with_system_ca.js index 4626cceeaa967..5b0fecbbf49ce 100644 --- a/jstests/ssl/ssl_with_system_ca.js +++ b/jstests/ssl/ssl_with_system_ca.js @@ -4,6 +4,8 @@ // for details. // To install trusted-ca.pem for local testing on OSX, invoke the following at a console: // security add-trusted-cert -d jstests/libs/trusted-ca.pem +// TODO BUILD-17503 Remove this tag +// @tags: [incompatible_with_macos] (function() { 'use strict'; diff --git a/jstests/ssl/x509/certs.yml b/jstests/ssl/x509/certs.yml index f91b148a0a336..7f72dccd265a2 100644 --- a/jstests/ssl/x509/certs.yml +++ b/jstests/ssl/x509/certs.yml @@ -23,8 +23,6 @@ certs: Primary Root Certificate Authority Most Certificates are issued by this CA. Subject: {CN: 'Kernel Test CA'} - # ca-2019.pem is our "legacy" SHA-1 self-signed CA root. - append_cert: ['jstests/libs/ca-2019.pem'] Issuer: self extensions: basicConstraints: @@ -104,14 +102,6 @@ certs: pseudonym: Datum-65 role: Datum-72 -- name: 'client-custom-oids.pem' - description: Client certificate using non-standard OIDs. - Subject: - OU: 'KernelUser' - CN: 'client' - '1.2.3.56': 'RandoValue' - '1.2.3.45': 'Value,Rando' - - name: 'client_email.pem' description: >- Client certificate containing an email address. @@ -979,3 +969,37 @@ certs: description: ECDSA certificate and key for OCSP responder Issuer: 'ecdsa-ca-ocsp.pem' tags: [ecdsa, ocsp, responder] + +### +# Non-expiring chain +### + +# The below certificates were generated once, and are intended to be permanent (non-renewable). +# Due to changes in OpenSSL and/or the pyOpenSSL library, the definition for client-custom-oids.pem +# will no longer work, as custom OIDs are no longer supported. These certificates were minted using +# pyOpenSSL=19.0.0, OpenSSL version 1.1.1, using the following definitions: + +# - name: 'non-expiring-ca.pem' +# description: >- +# CA with an expiration date far into the future, used exclusively for testing client-custom-oids.pem. +# DO NOT regenerate this certificate or add any certificates to this certificate chain without +# consulting the Server Security team first. +# Subject: {CN: 'Kernel Test CA'} +# Issuer: self +# extensions: +# basicConstraints: +# critical: true +# CA: true +# not_after: 99999999999 +# +# - name: 'client-custom-oids.pem' +# description: >- +# Client certificate using non-standard OIDs. +# DO NOT regenerate without consulting the Server Security team first. +# Issuer: 'non-expiring-ca.pem' +# Subject: +# OU: 'KernelUser' +# CN: 'client' +# '1.2.3.56': 'RandoValue' +# '1.2.3.45': 'Value,Rando' +# not_after: 99999999999 diff --git a/jstests/ssl/x509/mkdigest.py b/jstests/ssl/x509/mkdigest.py index b9926b48dfa06..a7b0bdb448ded 100755 --- a/jstests/ssl/x509/mkdigest.py +++ b/jstests/ssl/x509/mkdigest.py @@ -39,4 +39,4 @@ def main(): make_digest(fname, args.type, args.digest) if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/jstests/ssl/x509/root-and-trusted-ca.pem b/jstests/ssl/x509/root-and-trusted-ca.pem index 18882c21d6816..147894c19d2b9 100644 --- a/jstests/ssl/x509/root-and-trusted-ca.pem +++ b/jstests/ssl/x509/root-and-trusted-ca.pem @@ -5,46 +5,46 @@ # Certificate from ca.pem -----BEGIN CERTIFICATE----- -MIIDeTCCAmGgAwIBAgIEe9SskzANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV +MIIDeTCCAmGgAwIBAgIESt5aGjANBgkqhkiG9w0BAQsFADB0MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwOS2VybmVs -IFRlc3QgQ0EwHhcNMjIwMTI3MjE1OTQyWhcNMjQwNDMwMjE1OTQyWjB0MQswCQYD +IFRlc3QgQ0EwHhcNMjMwNjA5MTQyODM1WhcNMjUwOTEwMTQyODM1WjB0MQswCQYD VQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENp dHkxEDAOBgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEXMBUGA1UEAwwO -S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDf -vZIt82obTHnc3iHgUYSc+yVkCHyERF3kdcTTFszDbN9mVPL5ZkH9lIAC3A2rj24T -pItMW1N+zOaLHU5tJB9VnCnKSFz5CHd/KEcLA3Ql2K70z7n1FvINnBmqAQdgPcPu -Et2rFgGg3atR3T3bV7ZRlla0CcoAFl/YoDI16oHRXboxAtoAzaIwvS6HUrOYQPYq -BLGt00Wws4bpILk3b04lDLEHmzDe6N3/v3FgBurPzR2tL97/sJGePE94I833hYG4 -vBdU0Kdt9FbTDEFOgrfRCisHyZY6Vw6rIiWBSLUBCjtm2vipgoD0H3DvyZLbMQRr -qmctCX4KQtOZ8dV3JQkNAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI -hvcNAQELBQADggEBAJnz4lK9GiCWhCXIPzghYRRheYWL8nhkZ+3+oC1B3/mGEf71 -2VOdND6fMPdHinD8jONH75mOpa7TanriVYX3KbrQ4WABFNJMX9uz09F+0A2D5tyc -iDkldnei+fiX4eSx80oCPgvaxdJWauiTsEi+fo2Do47PYkch9+BDXT9F/m3S3RRW -cia7URBAV8Itq6jj2BHcpS/dEqZcmN9kGWujVagcCorc0wBKSmkO/PZIjISid+TO -Db2g+AvqSBDU0lbdP7NXRSIxvZejDz4qMjcpSbhW9OS2BCYZcq5wgH2lwYkdPtmX -JkhxWKwsW11WJWDcmaXcffO3a6lDizxyjnTedoU= +S2VybmVsIFRlc3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCb +k/WPIqqv46Nv9FeodWiPtx4/v3XZJwGxTk3JEje2CLjeVjU0q6OZoofP1wgSIZSh +iO2o9iDC5O1Aedop0i+wqe9dMcn34O1K5aM4ff8c4orfBe0xqyvE3cJx4BeSTZ4n +NY00x9PkCcoq98SoU7S9vkJq+AxUzUII34GQ4xCeaM7+g43PpGo5KFDwrzI/VUJX +qaeRNXS0/j8Wwp7Gv8L1a+ZGlxrgpXTJLGamhtkWyVEWSpgcc5suA0qSwvkAE1KX +5aJoBUDL22fLRhs91xNFDUYTAvkG8X4gM0f8lBL24+nbOBkOLdpqSZZ+dk59JKHD +TFGBx0p17I1g0xjWNjMVAgMBAAGjEzARMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZI +hvcNAQELBQADggEBAIwWNyaQhZglJyKMIAUAwlvBL5REA99bua06xWfJwdmdlci9 +Bb6MgQzVk5K68rsNlcL0ma+Ri5FfU+j7gsYZh4pILYb9xqFxiKX7bxMZv99LR8Mi +0EImM7gz3S579qYBXWd4V6/1G864qln8neHv+X3MF/wk3O9IYqepWsC3xDRos1Zv +xQfb37Ol4pcHtue4wHXr5TV8+KPcUusfNcftnpsEHyEUHqPORdHB7xRpfhosRYvL +7WwMXNseuyHFcdA/rEhUVsca+SUeOMIW+8euuU/as3ZaEpv1ZmpHEYXHb2SlS6W+ +gTzUOtNXsKVDrm9uEcUHytp+xvp9l9NNM/IRGGA= -----END CERTIFICATE----- # Certificate from trusted-ca.pem -----BEGIN CERTIFICATE----- -MIIDojCCAoqgAwIBAgIEclbQATANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV +MIIDojCCAoqgAwIBAgIEc+efUTANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEfMB0GA1UEAwwWVHJ1c3Rl -ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMjAxMjcyMTU5NDhaFw0yNDA0MzAyMTU5NDha +ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMzA2MDkxNDI4NDdaFw0yNTA5MTAxNDI4NDda MHwxCzAJBgNVBAYTAlVTMREwDwYDVQQIDAhOZXcgWW9yazEWMBQGA1UEBwwNTmV3 IFlvcmsgQ2l0eTEQMA4GA1UECgwHTW9uZ29EQjEPMA0GA1UECwwGS2VybmVsMR8w HQYDVQQDDBZUcnVzdGVkIEtlcm5lbCBUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEF -AAOCAQ8AMIIBCgKCAQEA8h+5axgTodw8KmHz/rcPcy2N/etFkipOVL0i3Ug6JcKk -DjuSIdyLULuIQlR8nXWQ3hW9CZ2gDCeSnmnUKY6GWDQPHoSUJPhmGkXPuPBXivcL -QpLVZeOHrqR4+SHzOA3317LF/QYm9kC3dEZIz+dWUlTHs4NFwR+Yo84XNosSGaUh -o0mK5YcBx0W7y82rNrijcygOkXF9QrANUZfUz5uQ/ZPDjgoISqFvgMzJtpL6LqSC -TbsUM4NbPSYECDFzIosO+rhYCUsgZ5pE6NWZjmKzq4+zeb/2iSIoEb7U/5f6i4H4 -880y+usrcsBuNCS1OVHaEB1ZrlinJbzplB3nV9Hj1wIDAQABoywwKjAMBgNVHRME +AAOCAQ8AMIIBCgKCAQEAn4/NB8z28VxeJ2Opsvm83sjk4dZGkok1Z9QlKS9VcTZU +sfYN2nrCUEq0mMGg7mFsbSBgZq0a1IoRYP0Ci1ycaqqg0iLGlvNAsBhazVgnlr6O +P1j+hkf5JGM7r+ZgVF/0u7i9EFAgVs8EwqCH/RE5p0oJ5ncGiNf92KB/uG0r/eWz +TF3/VGuudWcOaCzs8MMMWY4iYDpm5QWUnS7eu/VWW1efGH6ZEEo63bnAFsQZu6xZ +yKOKealhiDLRVatigFqZh6oLQoEckl4+QzWKWxscAHuMuTy+fWYLdhtrGZIBEutO +DmzUMupifSy70VMt9nPcD3/Z93agswMJuU5hktpvUQIDAQABoywwKjAMBgNVHRME BTADAQH/MBoGA1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsF -AAOCAQEArn+KmfD2JEXa0G81jY+v1+XBT4BCcFExbdpOYbIoo2m0Qvx+sla5+Qu7 -nG51R+3rnkVPr03ogKYtf3hYtQJk6DqfuF0V9ESYkz09XRwyW93mh3z4yumXnk3y -d6SG2quC6iJV0EqT/OnmmveGBpxaBjf80ezRq+8t0mVGeNwZSxv0OprAkmKIIDM8 -Qa1/LlGhStiU+hN62c3m4wHdY5jreRYH7NyIZCHJ/wKgo0cDWWdJ4MeAaQhuijUI -BaNg6mFHlxVMMRGIGSduUhu7vHzjbAES6kJxdIpDM8tZMlRZQ3ORml5s9onSMb2n -NmJkjwyB62odD+yrygWRLtFMJmKODQ== +AAOCAQEAfDOw6TjvfP6w137p3z+FncTYQM8a+Ytgtniy4VvJjLXyev4ibzGyBiBk +Pj6Y5AcCVRyxzUgPnL3kNOTOPI2HMRLu6WR3vzzvJPZQcetTt91A9rGr6C/I08gS +AlPaWFsiMmJML/QxH/C5Jh1wvoRha69U0IlXITGHiGBvmYtvjUXD12S6W95zlbSO +g9zKc/MBZxe+bjaR5e4l+ieMI5QvBf3ehTg8g0kV7CEA0ZCmbuHL/yLkIz+Yvf7l +QK4NXwZCOq+ERpugG0cGh1zwk5K7N3MsBvA5NhyPQnN/4WHZ3c0Lqznf6m4h7QyW +U0F1wL+qogbpLVQ/oZOdnjUm9JzlIA== -----END CERTIFICATE----- diff --git a/jstests/ssl/x509/root-and-trusted-ca.pem.digest.sha1 b/jstests/ssl/x509/root-and-trusted-ca.pem.digest.sha1 index dbe9e3898afc7..e1ec750dc4655 100644 --- a/jstests/ssl/x509/root-and-trusted-ca.pem.digest.sha1 +++ b/jstests/ssl/x509/root-and-trusted-ca.pem.digest.sha1 @@ -1 +1 @@ -F42B9419C2EF9D431D7C0E5061A82902D385203A \ No newline at end of file +D33E7C8B0748C66DBEEE6E24410FA72A47607DF3 \ No newline at end of file diff --git a/jstests/ssl/x509/root-and-trusted-ca.pem.digest.sha256 b/jstests/ssl/x509/root-and-trusted-ca.pem.digest.sha256 index 2cffe1b5da960..4ac5afdd90414 100644 --- a/jstests/ssl/x509/root-and-trusted-ca.pem.digest.sha256 +++ b/jstests/ssl/x509/root-and-trusted-ca.pem.digest.sha256 @@ -1 +1 @@ -21A1C6A87B31AF590F5074EE716F193522B8F540081A5D571B25AE5DF72863E3 \ No newline at end of file +6568E01751761F5EC6A07B050857C77DD2D2604CD05A70A62F7DDA14829C1077 \ No newline at end of file diff --git a/jstests/ssl/x509/trusted-client-testdb-roles.pem b/jstests/ssl/x509/trusted-client-testdb-roles.pem index 6868581d6ca47..3ea64ac336bd6 100644 --- a/jstests/ssl/x509/trusted-client-testdb-roles.pem +++ b/jstests/ssl/x509/trusted-client-testdb-roles.pem @@ -3,53 +3,53 @@ # # Client certificate with X509 role grants via trusted chain. -----BEGIN CERTIFICATE----- -MIIDwzCCAqugAwIBAgIEIEan5jANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV +MIIDwzCCAqugAwIBAgIEZUNgejANBgkqhkiG9w0BAQsFADB8MQswCQYDVQQGEwJV UzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5ldyBZb3JrIENpdHkxEDAO BgNVBAoMB01vbmdvREIxDzANBgNVBAsMBktlcm5lbDEfMB0GA1UEAwwWVHJ1c3Rl -ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMjAxMjcyMTU5NDhaFw0yNDA0MzAyMTU5NDha +ZCBLZXJuZWwgVGVzdCBDQTAeFw0yMzA2MDkxNDI4NDhaFw0yNTA5MTAxNDI4NDha MIGRMQswCQYDVQQGEwJVUzERMA8GA1UECAwITmV3IFlvcmsxFjAUBgNVBAcMDU5l dyBZb3JrIENpdHkxEDAOBgNVBAoMB01vbmdvREIxFTATBgNVBAsMDEtlcm5lbCBV c2VyczEuMCwGA1UEAwwlVHJ1c3RlZCBLZXJuZWwgVGVzdCBDbGllbnQgV2l0aCBS -b2xlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOpl8u+oSvvW1n2Q -6+XzknlDB1C0zd8ztNiTjOdSD0Nv4OHQX++ft+t3ZFCKq5GM61LcDKoa6tKr3QXf -lnHl1Ywz0swOXJ1L/FJuyM4FugSCpcWrD/zMPylrxTGWGW5YiiiwBvzh6ZZ40dVM -VlP3oXcbl96/7EufJNYRYJVOQxMw+T4elZCC4J/keMb33daluwckouOSmOrQu9lx -8croX81+uHUiHuTLH5XlIlLi/Z8BjuPwXt5JSxmHnc4cBhAC1fHjEDr/u85vfdTe -5tn2PIdA9o5dOsU+IqNbvdW5KZ74G44QlRyGr8roj1SwHfIHvNZDYxrC2uXhOKoK -1tRv11UCAwEAAaM3MDUwMwYLKwYBBAGCjikCAQEEJDEiMA8MBXJvbGUxDAZ0ZXN0 -REIwDwwFcm9sZTIMBnRlc3REQjANBgkqhkiG9w0BAQsFAAOCAQEAaEL9nJI66Qec -7KnKysAi/uaEKIajs86bnb/nkUQJxDSEOb/YDi41LQ2D2+MX52b26GD4rUAHvkjU -hHzCHprc2mgjWm0J7jCY8dlqj7ka9g5SWi56gDRfjSjuAOV93+Q4Ty/kAFLBIy8H -z0v7ed+a14i6NHqAmZAXVx61zI6nZgDhN8kcuvJICQKTWk8rVp9Cv/OhntIJgU7z -Lqqdn7eSOnwYllVG18dsQHOea6f6aCicbLDZgbSOnSPowhxMaPiIfq+WXXo1YiPl -kRHxE9OVYuGaT7qTQoda/SNfotRNJ6ApbGZLjtCr3mVA0LoK8e2HWFmNqRIUUx37 -zecLCwDx3g== +b2xlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKU4mtQcsGM9rOsu +vZw23OWnRd2/rmgw7n9c0sIB6v8E6cLtZuJseIU/4E6I7qrQbN/Cy++DyIbRFtqw +J/Ahf6EzHO4Ic0uVhK3pOrENMsjQSRPSXdnRlTlBLGrXQ2gpibJrACQ3ej9PbA86 +OJpzLkgbv+ehpizpq39/PrdetaUVOzSTlBb5wjoYai4xNUHwuCU7tw5ZT/62wcYk +SR1CbNEeJX2HvR3FkubsDEV8uLPiHzze928g3p/YjWqq4MiyKjEd6ftiq/JK85i4 +vcdXFo8u1zHCSaM1HCXudXRqOZjjn8O8lZ7GpfdbsFYnIAeIsrICtzwakcn8qjri +VdO85GcCAwEAAaM3MDUwMwYLKwYBBAGCjikCAQEEJDEiMA8MBXJvbGUxDAZ0ZXN0 +REIwDwwFcm9sZTIMBnRlc3REQjANBgkqhkiG9w0BAQsFAAOCAQEAbUmfPM9sbMCR +YAyvqUfjq0sXK6+z5ZO9WQFm0/VhsPg9iK910yQtjbU1jYBOnO7fJ0qjBglQ0wJY +KrHOz+vo9ExnkXBa3c0ErASdYQbxQ0uZyH18QltxPWwZ9Nih2FY1n5MIxMnJlKax +5o26pr7QnpV+NryigabLuZOHYrt8HfoepmLPJuSByVZQbdt0anopjN2srUVqAe8v +E2moR1Dm6on+rzqfLy9UIBx9yyN5lMby8RNgeBKXcHfIaIkqASUdMMk8D4gxO3GU +I3dL+s0hl6MqnSWPqkCuGFcuN67bmkaSWnK+rDCZ+ST6uUWAbxOsrpKpMEzjB7+i +uXwxSKRKrg== -----END CERTIFICATE----- -----BEGIN PRIVATE KEY----- -MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDqZfLvqEr71tZ9 -kOvl85J5QwdQtM3fM7TYk4znUg9Db+Dh0F/vn7frd2RQiquRjOtS3AyqGurSq90F -35Zx5dWMM9LMDlydS/xSbsjOBboEgqXFqw/8zD8pa8UxlhluWIoosAb84emWeNHV -TFZT96F3G5fev+xLnyTWEWCVTkMTMPk+HpWQguCf5HjG993WpbsHJKLjkpjq0LvZ -cfHK6F/Nfrh1Ih7kyx+V5SJS4v2fAY7j8F7eSUsZh53OHAYQAtXx4xA6/7vOb33U -3ubZ9jyHQPaOXTrFPiKjW73VuSme+BuOEJUchq/K6I9UsB3yB7zWQ2Mawtrl4Tiq -CtbUb9dVAgMBAAECggEAXvcfDenpN087Z3QnnuqoFWkKtbALGLsiMfzVwYKFnJTf -T53UwIi9QzED+32zNS0ayl9M4j41lVT8Hz0O/uZXNo5ogTPwYeo/OEaaJJ49V0Z3 -UzaTK6C0lluLcSOW+hX1I7btndcJhGU+3mQnNa3GRTNwvFaVra20huZTcypaEpOm -Xxo26QcJFloaMm2n7HAuO756HXd+63SqoQubMQxGFBga76H3ep3WTTgFR6VBejfb -brdXwT0/uS/0rQ9hkcs/RFfqZqsI8ADXhqhu7aO1+xu5cdzGN6Oa6NKnh3tLX52v -MQxurnWBIkZNKEczqfVwGWSnwdTlymS11ohleOGrAQKBgQD57xw9fU1VjZ5u4d7Q -IYQTcBGEf4EHzmJNcXmvz5U98UmrVCuH0TGgMlUUs4LcdvPBbOhW74cfYXPSDQVF -Afig77BGCn1o3/6T2VgYBJqf9OAMZC+mnMhEuWUcyCYTwmh8v/qHfuJbcADGtQ5B -05OXKBkjtrBWTbdekis90mk52QKBgQDwFk/Nq2jkff7q/6HwiBXnphHnvl/QLSJl -3dgNkICl+7HIVHCe+hhLEG5SlNx0drchFuYH2ZN/MMqeWju2oXgcBZmeULKct5VT -Hii7B3fOkILPCaVJTDtgYMUHsdZ4j+HG7PsjB8/V8DzdijmKMaYb0ZmJmy01ipBv -T8+XwvC/3QKBgQCJR4HOcG2yyTe1ldDJpy8hchPdIB+iRwUNnn+FRtKllEuvlGrY -jdnhMOQ0m6kMKTYYDxbK8YPZg7CXNlmnnr6OvzimMArUOPxe/yl4/8Zih6EsjTbz -H/iMbvyPw4vOnKDBrL0SAWqZaLq0aixrkafmhbrRN/5BWSyYAFdJ/LGZeQKBgDJt -LUPQfc6IHDO1j4javGcUPWyEUtGBuVjV+JwYvryeGeAuxBzQAKw7fkCAHbGkgaBE -k/oQG+e6EsShxSr9zSFtl69l2a4K7SUxD3MBBYvwVFkx+HJlvY7npFqSYq6d4dkL -S1A3QtL3i4EomB9LgE0Vf/8kBaHaQa3vgHWqrzBNAoGBAKgqghZTroNcX91Mqu0p -iriLMkiDqJkzehD9XQMhOjMfoGxyMoYFmrvo5UHSOEl6ztMFBwR5j6S8hwDHolXr -wiz0kqRQwivrHN2O613S0Ruis17cvhe0GfMJs+3fCpN8Hu3Wb3d3O1PZ6khjORTy -Mo9z1LS8fe+6pqKSMPvL24Jp +MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQClOJrUHLBjPazr +Lr2cNtzlp0Xdv65oMO5/XNLCAer/BOnC7WbibHiFP+BOiO6q0Gzfwsvvg8iG0Rba +sCfwIX+hMxzuCHNLlYSt6TqxDTLI0EkT0l3Z0ZU5QSxq10NoKYmyawAkN3o/T2wP +Ojiacy5IG7/noaYs6at/fz63XrWlFTs0k5QW+cI6GGouMTVB8LglO7cOWU/+tsHG +JEkdQmzRHiV9h70dxZLm7AxFfLiz4h883vdvIN6f2I1qquDIsioxHen7YqvySvOY +uL3HVxaPLtcxwkmjNRwl7nV0ajmY45/DvJWexqX3W7BWJyAHiLKyArc8GpHJ/Ko6 +4lXTvORnAgMBAAECggEBAKPYOXpFpQoVXIixhmkG4xJ/SkK4Vp2OgMUe6QZp3n/d +gG/E+7Voui/yA1/+XShZns5bn9+7AT3o1dAyxrhUj/vs8zB+gs+ey7+nPYPcwdLw +8rdMc7ORKnlnz0N4i6kdICcmAzLnLTzhZxjGtOE9fdUWZ72tyVxasa6zIh+4NUwp +5HqaiBs3Qv0XpJuN01rUb20/hLUWSVSstyQJUD3LFtY4l6LnnBJmwuwVRQFvMa6v +2js3za+dFUp4RWH9UFhzLNP5GF+aU+oNNWbxtvdMXm4V+6affbG1I9TVLNi/yqYF +LjyKgnSS8Td39wo0SO6eXjX4w2ykJV/ujBCcDdr5plECgYEA161PivKHb9D1vaDY +fUvgQvpw+q2S1WdmO6CHmi1ghsZbShOvkobTE73dRu46EELpav4a1nJ6m02atLmC +naYUcGChO4XN9z1MJuObwB6UbqjxpMNeTt/FdnrHX/QuLPJ3gsDOk6frC7dEjy4b +cxqabtr6FSc7on0RsXH2Mvt+2k8CgYEAxBxifEHEX+N5fa1bbtS4rwxU8aeXPp2s +AxngPn1i0N1dyWL4SP2oUu4M9WTbZ4HaGy/ZVjuWe9L6WiaAYWVGHQSIj2bDdbmI +Vg5c4MI4hhnUng3nbUkLQGWU98D7QE2TItSYnzB95vsH7UhxUi7YSjpK+OpTyxCy +2U7A7maQhmkCgYBxxF/zAV/j4ave7VuFLGMNsaGq+ag8naBVz6X38/u43CUHfD7e +4fOxtDNm2P8T2NfbslJh0F1QUq1Kq5h0pEifxrkF9tzgkBr9YI6l1vuhK8jbiprS +rEMy7i0BNoKs/vF/rUjKpobb1T568G1WWDAiTawhQGK6oZKVsXGXDwapDQKBgBqd ++6P/zSZymme7YsCNMK/ZjaHq7DU8cz9MgMoafJH6qaMOwsItY242Dx4zXTAUSMUN +3oK8vbT6sq4PsbcGZtjDZ8GG4hqVXvLHKkX7Nm7FDL2TB4kms9c+slRaJf/ZR5If +psNccooqIi5Lec38u7B/bvOP4mWaU5v4+2igFReZAoGADRSmjIUVBHmejz2EaYTy +EF9+L6o1t6pJe67yo65hHd/GIRALSpZGA3mViRs368dV1bOx0ydjuzxx186wKt9S +0VVKA+qi3kFn2rXt2EGl8nuvQtcZKoElR2urwvM5Two/z31UHM3Z4tyiDZdEWsfS +Tio1y38di//lKXPKVqgCK4s= -----END PRIVATE KEY----- diff --git a/jstests/ssl/x509/trusted-client-testdb-roles.pem.digest.sha1 b/jstests/ssl/x509/trusted-client-testdb-roles.pem.digest.sha1 index dd30990811eaa..c1fdb44a03afb 100644 --- a/jstests/ssl/x509/trusted-client-testdb-roles.pem.digest.sha1 +++ b/jstests/ssl/x509/trusted-client-testdb-roles.pem.digest.sha1 @@ -1 +1 @@ -80AA73D8FFE6C854A357A836C4657D7C03480011 \ No newline at end of file +B74393EDB81655C4CF2EC063DDCB67AB83FDB2AD \ No newline at end of file diff --git a/jstests/ssl/x509/trusted-client-testdb-roles.pem.digest.sha256 b/jstests/ssl/x509/trusted-client-testdb-roles.pem.digest.sha256 index f209638b2310a..9c8fbfe681367 100644 --- a/jstests/ssl/x509/trusted-client-testdb-roles.pem.digest.sha256 +++ b/jstests/ssl/x509/trusted-client-testdb-roles.pem.digest.sha256 @@ -1 +1 @@ -C02BAD2324CEB0793749AAF4C01CC35B58525BFE00D683B558B4A6E5BF9C2406 \ No newline at end of file +8BF64435BC341EA33C603EA3B2676DAF96AD6D081D038488F171EE1C407059FC \ No newline at end of file diff --git a/jstests/ssl/x509_client.js b/jstests/ssl/x509_client.js index e906cda32cb33..beb423612332b 100644 --- a/jstests/ssl/x509_client.js +++ b/jstests/ssl/x509_client.js @@ -7,8 +7,8 @@ const CLIENT_USER = "CN=client,OU=KernelUser,O=MongoDB,L=New York City,ST=New Yo const INVALID_CLIENT_USER = "C=US,ST=New York,L=New York City,O=MongoDB,OU=KernelUser,CN=invalid"; function authAndTest(mongo) { - external = mongo.getDB("$external"); - test = mongo.getDB("test"); + let external = mongo.getDB("$external"); + let test = mongo.getDB("test"); // Add user using localhost exception external.createUser({ diff --git a/jstests/ssl/x509_custom.js b/jstests/ssl/x509_custom.js index cd90acfcabfd8..7dc5bc2b1754f 100644 --- a/jstests/ssl/x509_custom.js +++ b/jstests/ssl/x509_custom.js @@ -3,9 +3,6 @@ (function() { 'use strict'; -const SERVER_CERT = 'jstests/libs/server.pem'; -const CA_CERT = 'jstests/libs/ca.pem'; - function testClient(conn, name) { let auth = {mechanism: 'MONGODB-X509'}; if (name !== null) { @@ -19,7 +16,7 @@ function testClient(conn, name) { '--sslPEMKeyFile', 'jstests/libs/client-custom-oids.pem', '--sslCAFile', - CA_CERT, + 'jstests/libs/ca.pem', '--port', conn.port, '--eval', @@ -46,8 +43,10 @@ function runTest(conn) { const mongod = MongoRunner.runMongod({ auth: '', sslMode: 'requireSSL', - sslPEMKeyFile: SERVER_CERT, - sslCAFile: CA_CERT, + // Server PEM file is server.pem to match the shell's ca.pem. + sslPEMKeyFile: 'jstests/libs/server.pem', + // Server CA file is non-expiring-ca.pem to match the shell's client-custom-oids.pem. + sslCAFile: 'jstests/libs/non-expiring-ca.pem', sslAllowInvalidCertificates: '', }); runTest(mongod); diff --git a/jstests/ssl/x509_rotate_missing_files.js b/jstests/ssl/x509_rotate_missing_files.js index 45038dabfb3f5..ff6decbb7c8ce 100644 --- a/jstests/ssl/x509_rotate_missing_files.js +++ b/jstests/ssl/x509_rotate_missing_files.js @@ -49,4 +49,4 @@ for (let certType of certTypes) { } MongoRunner.stopMongod(mongod); -})(); \ No newline at end of file +})(); diff --git a/jstests/ssl_x509/sharding_with_x509.js b/jstests/ssl_x509/sharding_with_x509.js index 348f5f7773ad9..bbfb59d5875e6 100644 --- a/jstests/ssl_x509/sharding_with_x509.js +++ b/jstests/ssl_x509/sharding_with_x509.js @@ -63,7 +63,7 @@ print("starting updating phase"); // Update a bunch of data var toUpdate = toInsert; bulk = coll.initializeUnorderedBulkOp(); -for (var i = 0; i < toUpdate; i++) { +for (let i = 0; i < toUpdate; i++) { var id = coll.findOne({insert: i})._id; bulk.find({insert: i, _id: id}).update({$inc: {counter: 1}}); } @@ -74,7 +74,7 @@ print("starting deletion"); // Remove a bunch of data var toDelete = toInsert / 2; bulk = coll.initializeUnorderedBulkOp(); -for (var i = 0; i < toDelete; i++) { +for (let i = 0; i < toDelete; i++) { bulk.find({insert: i}).removeOne(); } assert.commandWorked(bulk.execute()); diff --git a/jstests/ssl_x509/upgrade_to_x509_ssl.js b/jstests/ssl_x509/upgrade_to_x509_ssl.js index e4a7e828da4fb..7bc6134ea4233 100644 --- a/jstests/ssl_x509/upgrade_to_x509_ssl.js +++ b/jstests/ssl_x509/upgrade_to_x509_ssl.js @@ -30,7 +30,7 @@ if (jsTestOptions().storageEngine == "inMemory") { wcMajorityJournalDefault = true; } -opts = { +let opts = { sslMode: "allowSSL", sslPEMKeyFile: SERVER_CERT, sslAllowInvalidCertificates: "", diff --git a/rpm/mongod.service b/rpm/mongod.service index 81059df834343..8d4a4c4142cda 100644 --- a/rpm/mongod.service +++ b/rpm/mongod.service @@ -11,6 +11,7 @@ Environment="OPTIONS=-f /etc/mongod.conf" Environment="MONGODB_CONFIG_OVERRIDE_NOFORK=1" EnvironmentFile=-/etc/sysconfig/mongod ExecStart=/usr/bin/mongod $OPTIONS +RuntimeDirectory=mongodb # file size LimitFSIZE=infinity # cpu time diff --git a/site_scons/site_tools/auto_install_binaries.py b/site_scons/site_tools/auto_install_binaries.py index 7ec9810e2d26d..5fdce083dc258 100644 --- a/site_scons/site_tools/auto_install_binaries.py +++ b/site_scons/site_tools/auto_install_binaries.py @@ -387,7 +387,7 @@ def auto_install_pseudobuilder(env, target, source, **kwargs): new_installed_files = env.Install(target=target_for_source, source=s) setattr(s.attributes, INSTALLED_FILES, new_installed_files) - + setattr(new_installed_files[0].attributes, 'AIB_INSTALL_FROM', s) installed_files.extend(new_installed_files) entry.files.update(installed_files) diff --git a/site_scons/site_tools/compilation_db.py b/site_scons/site_tools/compilation_db.py index dc21a334c2f34..ada1e49534af2 100644 --- a/site_scons/site_tools/compilation_db.py +++ b/site_scons/site_tools/compilation_db.py @@ -23,6 +23,7 @@ import json import SCons import itertools +import shlex # Implements the ability for SCons to emit a compilation database for the MongoDB project. See # http://clang.llvm.org/docs/JSONCompilationDatabase.html for details on what a compilation @@ -154,6 +155,7 @@ def CompilationDbEntryAction(target, source, env, **kw): "directory": env.Dir("#").abspath, "command": ' '.join(cmd_list), "file": str(env["__COMPILATIONDB_USOURCE"][0]), + "output": shlex.quote(' '.join([str(t) for t in env["__COMPILATIONDB_UTARGET"]])), } target[0].write(entry) diff --git a/site_scons/site_tools/distsrc.py b/site_scons/site_tools/distsrc.py index 83f47f2ab3fe1..893552e6ace42 100644 --- a/site_scons/site_tools/distsrc.py +++ b/site_scons/site_tools/distsrc.py @@ -20,6 +20,7 @@ # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # +import git import SCons import os import os.path as ospath @@ -192,11 +193,32 @@ def run_distsrc_callbacks(target=None, source=None, env=None): print("Invalid file format for distsrc. Must be tar or zip file") env.Exit(1) - git_cmd = ('"%s" archive --format %s --output %s --prefix ${MONGO_DIST_SRC_PREFIX} HEAD' % - (git_path, target_ext, target[0])) + def create_archive(target=None, source=None, env=None): + try: + git_repo = git.Repo(os.getcwd()) + # get the original HEAD position of repo + head_commit_sha = git_repo.head.object.hexsha + + # add and commit the uncommited changes + git_repo.git.add(all=True) + # only commit changes if there are any + if len(git_repo.index.diff("HEAD")) != 0: + with git_repo.git.custom_environment(GIT_COMMITTER_NAME="Evergreen", + GIT_COMMITTER_EMAIL="evergreen@mongodb.com"): + git_repo.git.commit("--author='Evergreen <>'", "-m", "temp commit") + + # archive repo + dist_src_prefix = env.get("MONGO_DIST_SRC_PREFIX") + git_repo.git.archive("--format", target_ext, "--output", target[0], "--prefix", + dist_src_prefix, "HEAD") + + # reset branch to original state + git_repo.git.reset("--mixed", head_commit_sha) + except Exception as e: + env.FatalError(f"Error archiving: {e}") return [ - SCons.Action.Action(git_cmd, "Running git archive for $TARGET"), + SCons.Action.Action(create_archive, "Creating archive for $TARGET"), SCons.Action.Action( run_distsrc_callbacks, "Running distsrc callbacks for $TARGET", diff --git a/site_scons/site_tools/mongo_pretty_printer_tests.py b/site_scons/site_tools/mongo_pretty_printer_tests.py new file mode 100644 index 0000000000000..b45e786a7ff64 --- /dev/null +++ b/site_scons/site_tools/mongo_pretty_printer_tests.py @@ -0,0 +1,219 @@ +# Copyright 2020 MongoDB Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY +# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE +# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +"""Pseudo-builders for building and registering tests for pretty printers.""" +import subprocess +import os +import sys + +import SCons +from SCons.Script import Chmod + +not_building_already_warned = False + + +def print_warning(message: str): + global not_building_already_warned + if not not_building_already_warned: + not_building_already_warned = True + print(message) + + +def exists(env): + return True + + +ninja_fake_testlist = None + + +def build_pretty_printer_test(env, target, **kwargs): + + if not isinstance(target, list): + target = [target] + + if env.GetOption('ninja') != 'disabled': + return [] + + gdb_bin = None + if env.get('GDB'): + gdb_bin = env.get('GDB') + elif env.ToolchainIs('gcc', 'clang'): + # Always prefer v4 gdb, otherwise try anything in the path + gdb_bin = env.WhereIs('gdb', ['/opt/mongodbtoolchain/v4/bin']) or env.WhereIs('gdb') + + if gdb_bin is None: + print_warning("Can't find gdb, not building pretty printer tests.") + return [] + + test_component = {"dist-test"} + + if "AIB_COMPONENTS_EXTRA" in kwargs: + kwargs["AIB_COMPONENTS_EXTRA"] = set(kwargs["AIB_COMPONENTS_EXTRA"]).union(test_component) + else: + kwargs["AIB_COMPONENTS_EXTRA"] = list(test_component) + + # GDB has a built in python interpreter, but it may have a python binary on the system which + # we can use to check package requirements. + python_bin = None + result = subprocess.run([gdb_bin, '--configuration'], capture_output=True, text=True) + if result.returncode == 0: + for line in result.stdout.splitlines(): + if line.strip().startswith('--with-python='): + python_root = line.strip()[len('--with-python=') - 1:] + if python_root.endswith(' (relocatable)'): + python_root = python_root[:-len(' (relocatable)')] + python_bin = os.path.join(python_root, 'bin/python3') + if not python_bin: + print( + f"Failed to find gdb's python from gdb '--configuration', defaulting to {sys.executable}" + ) + python_bin = sys.executable + + test_program = kwargs.get("TEST_PROGRAM", ['$DESTDIR/$PREFIX/bin/mongod']) + if isinstance(test_program, list): + test_program = test_program[0] + test_args = kwargs.get('TEST_ARGS', []) + gdb_test_script = env.File(target[0]).srcnode().abspath + + if not gdb_test_script: + env.FatalError( + f"{target[0]}: You must supply a gdb python script to use in the pretty printer test.") + + with open(gdb_test_script) as test_script: + verify_reqs_file = env.File('#site_scons/mongo/pip_requirements.py') + + gen_test_script = env.Textfile( + target=os.path.basename(gdb_test_script), + source=verify_reqs_file.get_contents().decode('utf-8').split('\n') + [ + "import os,subprocess,sys", + "cmd = 'python -c \"import os,sys;print(os.linesep.join(sys.path).strip())\"'", + "paths = subprocess.check_output(cmd,shell=True).decode('utf-8').split()", + "sys.path.extend(paths)", + "symbols_loaded = False", + "try:", + " if gdb.objfiles()[0].lookup_global_symbol('main') is not None:", + " symbols_loaded = True", + "except Exception:", + " pass", + "if not symbols_loaded:", + r" gdb.write('Could not find main symbol, debug info may not be loaded.\n')", + r" gdb.write('TEST FAILED -- No Symbols.\\\n')", + " gdb.execute('quit 1', to_string=True)", + "else:", + r" gdb.write('Symbols loaded.\n')", + "gdb.execute('set confirm off')", + "gdb.execute('source .gdbinit')", + "try:", + " verify_requirements('etc/pip/components/core.req', executable=f'@python_executable@')", + "except MissingRequirements as ex:", + " print(ex)", + " print('continuing testing anyways!')", + ] + [line.rstrip() for line in test_script.readlines()]) + + gen_test_script_install = env.AutoInstall( + target='$PREFIX_BINDIR', + source=gen_test_script, + AIB_ROLE='runtime', + AIB_COMPONENT='pretty-printer-tests', + AIB_COMPONENTS_EXTRA=kwargs["AIB_COMPONENTS_EXTRA"], + ) + + pretty_printer_test_launcher = env.Substfile( + target=f'pretty_printer_test_launcher_{target[0]}', + source='#/src/mongo/util/pretty_printer_test_launcher.py.in', SUBST_DICT={ + '@VERBOSE@': + str(env.Verbose()), + '@pretty_printer_test_py@': + gen_test_script_install[0].path, + '@gdb_path@': + gdb_bin, + '@pretty_printer_test_program@': + env.File(test_program).path, + '@test_args@': + '["' + '", "'.join([env.subst(arg, target=target) for arg in test_args]) + '"]', + }, AIB_ROLE='runtime', AIB_COMPONENT='pretty-printer-tests', + AIB_COMPONENTS_EXTRA=kwargs["AIB_COMPONENTS_EXTRA"]) + env.Depends( + pretty_printer_test_launcher[0], + [ + test_program, + gen_test_script_install, + ], + ) + env.AddPostAction(pretty_printer_test_launcher[0], + Chmod(pretty_printer_test_launcher[0], 'ugo+x')) + + pretty_printer_test_launcher_install = env.AutoInstall( + target='$PREFIX_BINDIR', + source=pretty_printer_test_launcher, + AIB_ROLE='runtime', + AIB_COMPONENT='pretty-printer-tests', + AIB_COMPONENTS_EXTRA=kwargs["AIB_COMPONENTS_EXTRA"], + ) + + def new_scanner(node, env, path=()): + source_binary = getattr( + env.File(env.get('TEST_PROGRAM')).attributes, 'AIB_INSTALL_FROM', None) + if source_binary: + debug_files = getattr(env.File(source_binary).attributes, 'separate_debug_files', None) + if debug_files: + if debug_files: + installed_debug_files = getattr( + env.File(debug_files[0]).attributes, 'AIB_INSTALLED_FILES', None) + if installed_debug_files: + if env.Verbose(): + print( + f"Found and installing pretty_printer_test {node} test_program {env.File(env.get('TEST_PROGRAM'))} debug file {installed_debug_files[0]}" + ) + return installed_debug_files + if env.Verbose(): + print(f"Did not find separate debug files for pretty_printer_test {node}") + return [] + + scanner = SCons.Scanner.Scanner(function=new_scanner) + + run_test = env.Command(target='+' + os.path.splitext(os.path.basename(gdb_test_script))[0], + source=pretty_printer_test_launcher_install, action=str( + pretty_printer_test_launcher_install[0]), TEST_PROGRAM=test_program, + target_scanner=scanner) + env.Pseudo(run_test) + env.Alias('+' + os.path.splitext(os.path.basename(gdb_test_script))[0], run_test) + env.Depends(pretty_printer_test_launcher_install, [gen_test_script_install, test_program]) + + env.RegisterTest('$PRETTY_PRINTER_TEST_LIST', pretty_printer_test_launcher_install[0]) + env.Alias("$PRETTY_PRINTER_TEST_ALIAS", pretty_printer_test_launcher_install[0]) + env.Alias('+pretty-printer-tests', run_test) + return run_test + + +def generate(env): + global ninja_fake_testlist + if env.GetOption('ninja') != 'disabled' and ninja_fake_testlist is None: + print_warning("Can't run pretty printer tests with ninja.") + ninja_fake_testlist = env.Command( + '$PRETTY_PRINTER_TEST_LIST', __file__, + "type nul >>$TARGET" if sys.platform == 'win32' else "touch $TARGET") + else: + env.TestList("$PRETTY_PRINTER_TEST_LIST", source=[]) + + env.AddMethod(build_pretty_printer_test, "PrettyPrinterTest") + alias = env.Alias("$PRETTY_PRINTER_TEST_ALIAS", "$PRETTY_PRINTER_TEST_LIST") + env.Alias('+pretty-printer-tests', alias) diff --git a/site_scons/site_tools/oom_auto_retry.py b/site_scons/site_tools/oom_auto_retry.py index 1eacc3fb40161..7ff457d2798d6 100644 --- a/site_scons/site_tools/oom_auto_retry.py +++ b/site_scons/site_tools/oom_auto_retry.py @@ -28,6 +28,7 @@ import time import random import os +import re from typing import Callable, List, Dict @@ -38,7 +39,10 @@ def command_spawn_func(sh: str, escape: Callable[[str], str], cmd: str, args: Li success = False build_env = target[0].get_build_env() - oom_messages = build_env.get('OOM_RETRY_MESSAGES', []) + oom_messages = [ + re.compile(msg, re.MULTILINE | re.DOTALL) + for msg in build_env.get('OOM_RETRY_MESSAGES', []) + ] oom_returncodes = [int(returncode) for returncode in build_env.get('OOM_RETRY_RETURNCODES', [])] max_retries = build_env.get('OOM_RETRY_ATTEMPTS', 10) oom_max_retry_delay = build_env.get('OOM_RETRY_MAX_DELAY_SECONDS', 120) @@ -59,7 +63,7 @@ def command_spawn_func(sh: str, escape: Callable[[str], str], cmd: str, args: Li except subprocess.CalledProcessError as exc: print(f"{os.path.basename(__file__)} captured error:") print(exc.stdout) - if any([oom_message in exc.stdout for oom_message in oom_messages]) or any( + if any([re.findall(oom_message, exc.stdout) for oom_message in oom_messages]) or any( [oom_returncode == exc.returncode for oom_returncode in oom_returncodes]): retries += 1 retry_delay = int((time.time() - start_time) + diff --git a/src/mongo/SConscript b/src/mongo/SConscript index 37e4ae0199c8c..b666ad8fc846b 100644 --- a/src/mongo/SConscript +++ b/src/mongo/SConscript @@ -120,6 +120,9 @@ if has_option("gdbserver"): elif has_option("lldb-server"): debugger_env.Append(CPPDEFINES=["USE_LLDB_SERVER"]) +if has_option("wait-for-debugger"): + debugger_env.Append(CPPDEFINES=["WAIT_FOR_DEBUGGER"]) + debugger_obj = debugger_env.LibraryObject( target='debugger', source=[ diff --git a/src/mongo/base/clonable_ptr.h b/src/mongo/base/clonable_ptr.h index 8da7e6d841f15..1406458eafc23 100644 --- a/src/mongo/base/clonable_ptr.h +++ b/src/mongo/base/clonable_ptr.h @@ -33,6 +33,7 @@ #include #include #include +#include namespace mongo { namespace clonable_ptr_detail { diff --git a/src/mongo/base/clonable_ptr_test.cpp b/src/mongo/base/clonable_ptr_test.cpp index ba59dffbf193f..0b2a07c055d99 100644 --- a/src/mongo/base/clonable_ptr_test.cpp +++ b/src/mongo/base/clonable_ptr_test.cpp @@ -31,9 +31,12 @@ #include #include +#include #include -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/base/counter_test.cpp b/src/mongo/base/counter_test.cpp index 1c413f7110b17..ea1e38962b899 100644 --- a/src/mongo/base/counter_test.cpp +++ b/src/mongo/base/counter_test.cpp @@ -28,13 +28,10 @@ */ -#include "mongo/platform/basic.h" - -#include -#include - #include "mongo/base/counter.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/base/data_builder.h b/src/mongo/base/data_builder.h index aa7cb29154b0a..0cb5060a1ab6e 100644 --- a/src/mongo/base/data_builder.h +++ b/src/mongo/base/data_builder.h @@ -30,13 +30,19 @@ #pragma once #include +#include #include +#include #include #include #include +#include #include "mongo/base/data_range_cursor.h" +#include "mongo/base/data_type.h" +#include "mongo/base/status.h" #include "mongo/util/allocator.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/base/data_builder_test.cpp b/src/mongo/base/data_builder_test.cpp index 5a0abdc09a487..4d8a15089a9e7 100644 --- a/src/mongo/base/data_builder_test.cpp +++ b/src/mongo/base/data_builder_test.cpp @@ -28,10 +28,17 @@ */ #include "mongo/base/data_builder.h" -#include "mongo/base/data_type_terminated.h" -#include "mongo/platform/endian.h" -#include "mongo/unittest/unittest.h" +#include +#include +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_type_terminated.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/base/data_cursor_test.cpp b/src/mongo/base/data_cursor_test.cpp index 4c31fa086e6d8..75a13c7283f85 100644 --- a/src/mongo/base/data_cursor_test.cpp +++ b/src/mongo/base/data_cursor_test.cpp @@ -29,9 +29,12 @@ #include "mongo/base/data_cursor.h" +#include + #include "mongo/base/data_type_endian.h" -#include "mongo/platform/endian.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/base/data_range.h b/src/mongo/base/data_range.h index 316a97fa3e37e..898d5aca199d0 100644 --- a/src/mongo/base/data_range.h +++ b/src/mongo/base/data_range.h @@ -29,6 +29,8 @@ #pragma once +#include +#include #include #include #include @@ -36,8 +38,11 @@ #include "mongo/base/data_type.h" #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/platform/endian.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { @@ -46,29 +51,32 @@ class ConstDataRange { // These are helper types to make ConstDataRange's and friends constructable either from // a range of byte-like pointers or from a container of byte-like values. template - constexpr static auto isByteV = - (((std::is_integral_v && sizeof(T) == 1) || - std::is_same_v)&&(!std::is_same_v, bool>)); - - template - struct HasDataSize : std::false_type {}; + static constexpr bool isByte = [] { + if constexpr (std::is_same_v) { + return true; + } else if constexpr (std::is_same_v) { + return false; + } else if constexpr (!std::is_integral_v) { + return false; + } else if constexpr (sizeof(T) != 1) { + return false; + } else { + return true; + } + }(); template - struct HasDataSize< - T, - std::enable_if_t().data()), decltype(std::declval().size())>>>> - : std::true_type {}; - - template - struct ContiguousContainerOfByteLike : std::false_type {}; + using DataOp = decltype(std::declval().data()); + template + using SizeOp = decltype(std::declval().size()); + template + using ValueTypeOp = typename T::value_type; template - struct ContiguousContainerOfByteLike< - T, - std::void_t().data()), - std::enable_if_t && HasDataSize::value>>> - : std::true_type {}; + static constexpr bool isContiguousContainerOfByteLike = // + stdx::is_detected_v&& // + stdx::is_detected_v&& // + isByte>; public: using byte_type = char; @@ -84,7 +92,7 @@ class ConstDataRange { // to a non-zero value, you'll change the Status messages that are // returned on failure to be offset by the amount passed to this // constructor. - template , int> = 0> + template , int> = 0> ConstDataRange(const ByteLike* begin, const ByteLike* end, std::ptrdiff_t debug_offset = 0) : _begin(reinterpret_cast(begin)), _end(reinterpret_cast(end)), @@ -97,7 +105,7 @@ class ConstDataRange { : _begin(nullptr), _end(nullptr), _debug_offset(debug_offset) {} // You can also construct from a pointer to a byte-like type and a size. - template , int> = 0> + template , int> = 0> ConstDataRange(const ByteLike* begin, std::size_t length, std::ptrdiff_t debug_offset = 0) : _begin(reinterpret_cast(begin)), _end(reinterpret_cast(_begin + length)), @@ -109,12 +117,12 @@ class ConstDataRange { // must have a data() function that returns a pointer to the front and a size() function // that returns the number of elements. template ::value, int> = 0> + std::enable_if_t, int> = 0> ConstDataRange(const Container& container, std::ptrdiff_t debug_offset = 0) : ConstDataRange(container.data(), container.size(), debug_offset) {} // You can also construct from a C-style array, including string literals. - template , int> = 0> + template , int> = 0> ConstDataRange(const ByteLike (&arr)[N], std::ptrdiff_t debug_offset = 0) : ConstDataRange(arr, N, debug_offset) {} @@ -195,9 +203,7 @@ class ConstDataRange { protected: // Shared implementation of split() logic between DataRange and ConstDataRange. - template , int> = 0> + template , int> = 0> std::pair doSplit(const ByteLike* splitPoint) const { const auto* typedPoint = reinterpret_cast(splitPoint); uassert(ErrorCodes::BadValue, @@ -258,16 +264,16 @@ class DataRange : public ConstDataRange { : ConstDataRange(nullptr, nullptr, debug_offset) {} template ::value, int> = 0> + std::enable_if_t, int> = 0> DataRange(Container& container, std::ptrdiff_t debug_offset = 0) : ConstDataRange(std::forward(container), debug_offset) {} template ::value, int> = 0> + std::enable_if_t, int> = 0> DataRange(const Container&, std::ptrdiff_t) = delete; template ::value, int> = 0> + std::enable_if_t, int> = 0> DataRange(const Container&) = delete; template @@ -323,8 +329,7 @@ struct DataRangeTypeHelper { // Enable for classes derived from ConstDataRange template -struct DataType::Handler::value>::type> { +struct DataType::Handler>> { static Status load( T* t, const char* ptr, size_t length, size_t* advanced, std::ptrdiff_t debug_offset) { if (t) { diff --git a/src/mongo/base/data_range_cursor.cpp b/src/mongo/base/data_range_cursor.cpp index b22ee726e2a2f..bf96d53497d89 100644 --- a/src/mongo/base/data_range_cursor.cpp +++ b/src/mongo/base/data_range_cursor.cpp @@ -29,6 +29,7 @@ #include "mongo/base/data_range_cursor.h" +#include "mongo/base/error_codes.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/base/data_range_cursor.h b/src/mongo/base/data_range_cursor.h index b11797c1a7398..06c3d1517100b 100644 --- a/src/mongo/base/data_range_cursor.h +++ b/src/mongo/base/data_range_cursor.h @@ -32,10 +32,14 @@ #include #include #include +#include #include "mongo/base/data_range.h" #include "mongo/base/data_type.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/platform/endian.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/base/data_range_cursor_test.cpp b/src/mongo/base/data_range_cursor_test.cpp index a91bf708d282d..0ad53c59c2c47 100644 --- a/src/mongo/base/data_range_cursor_test.cpp +++ b/src/mongo/base/data_range_cursor_test.cpp @@ -29,9 +29,17 @@ #include "mongo/base/data_range_cursor.h" +#include +#include + +#include +#include + #include "mongo/base/data_type_endian.h" -#include "mongo/platform/endian.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/data_view.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/base/data_range_test.cpp b/src/mongo/base/data_range_test.cpp index 9d121406ed825..70accde790bb6 100644 --- a/src/mongo/base/data_range_test.cpp +++ b/src/mongo/base/data_range_test.cpp @@ -29,11 +29,21 @@ #include "mongo/base/data_range.h" +#include +#include #include +#include +#include +#include + +#include +#include #include "mongo/base/data_type_endian.h" +#include "mongo/base/string_data.h" #include "mongo/platform/endian.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/base/data_type.cpp b/src/mongo/base/data_type.cpp index 879c7175b404d..db235d043a843 100644 --- a/src/mongo/base/data_type.cpp +++ b/src/mongo/base/data_type.cpp @@ -31,7 +31,8 @@ #include -#include "mongo/util/str.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" namespace mongo { diff --git a/src/mongo/base/data_type.h b/src/mongo/base/data_type.h index c3d2cb442fa13..ac458fa6142d4 100644 --- a/src/mongo/base/data_type.h +++ b/src/mongo/base/data_type.h @@ -29,15 +29,15 @@ #pragma once -#include "mongo/config.h" - #include +#include #include #include "mongo/base/error_codes.h" #include "mongo/base/static_assert.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/config.h" // IWYU pragma: keep namespace mongo { @@ -166,4 +166,5 @@ struct DataType { // Force the visibility of the DataType::Handler specializations. #define MONGO_BASE_DATA_TYPE_H_INCLUDE_HANDSHAKE_ #include "mongo/base/data_type_string_data.h" + #undef MONGO_BASE_DATA_TYPE_H_INCLUDE_HANDSHAKE_ diff --git a/src/mongo/base/data_type_string_data.cpp b/src/mongo/base/data_type_string_data.cpp index 51929f718a540..7242bdf86cd41 100644 --- a/src/mongo/base/data_type_string_data.cpp +++ b/src/mongo/base/data_type_string_data.cpp @@ -27,8 +27,13 @@ * it in the license file. */ -#include "mongo/base/data_type.h" +#include +#include +#include "mongo/base/data_type.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/base/data_type_string_data.h b/src/mongo/base/data_type_string_data.h index b7e3b1c3403fc..db612ce66dcaf 100644 --- a/src/mongo/base/data_type_string_data.h +++ b/src/mongo/base/data_type_string_data.h @@ -33,6 +33,7 @@ #include #include "mongo/base/data_type.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" #ifndef MONGO_BASE_DATA_TYPE_H_INCLUDE_HANDSHAKE_ diff --git a/src/mongo/base/data_type_string_data_test.cpp b/src/mongo/base/data_type_string_data_test.cpp index b2fb8b29a503c..0196f433cf9f2 100644 --- a/src/mongo/base/data_type_string_data_test.cpp +++ b/src/mongo/base/data_type_string_data_test.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/base/data_type.h" +#include -#include "mongo/base/data_range.h" #include "mongo/base/data_range_cursor.h" #include "mongo/base/data_type_terminated.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/base/data_type_terminated.cpp b/src/mongo/base/data_type_terminated.cpp index e3daa5d33dcb8..8ce4d0d51d84e 100644 --- a/src/mongo/base/data_type_terminated.cpp +++ b/src/mongo/base/data_type_terminated.cpp @@ -29,6 +29,8 @@ #include "mongo/base/data_type_terminated.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/base/data_type_terminated.h b/src/mongo/base/data_type_terminated.h index 094c101179fac..7a40a3348e289 100644 --- a/src/mongo/base/data_type_terminated.h +++ b/src/mongo/base/data_type_terminated.h @@ -30,8 +30,10 @@ #pragma once #include +#include #include "mongo/base/data_type.h" +#include "mongo/base/status.h" namespace mongo { diff --git a/src/mongo/base/data_type_terminated_test.cpp b/src/mongo/base/data_type_terminated_test.cpp index fdc5d1a6ef7d9..703f1cefd5492 100644 --- a/src/mongo/base/data_type_terminated_test.cpp +++ b/src/mongo/base/data_type_terminated_test.cpp @@ -29,10 +29,15 @@ #include "mongo/base/data_type_terminated.h" +#include +#include + #include "mongo/base/data_range.h" #include "mongo/base/data_range_cursor.h" -#include "mongo/unittest/unittest.h" -#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/base/data_type_validated.h b/src/mongo/base/data_type_validated.h index 0fa8aabdb7888..36f72f38d30b7 100644 --- a/src/mongo/base/data_type_validated.h +++ b/src/mongo/base/data_type_validated.h @@ -29,9 +29,11 @@ #pragma once +#include #include #include "mongo/base/data_type.h" +#include "mongo/base/status.h" namespace mongo { diff --git a/src/mongo/base/data_type_validated_test.cpp b/src/mongo/base/data_type_validated_test.cpp index 392ef98989b0f..ab5eb0144cfda 100644 --- a/src/mongo/base/data_type_validated_test.cpp +++ b/src/mongo/base/data_type_validated_test.cpp @@ -29,15 +29,15 @@ #include "mongo/base/data_type_validated.h" -#include #include -#include "mongo/base/data_range.h" #include "mongo/base/data_range_cursor.h" -#include "mongo/base/data_type_endian.h" +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { template <> diff --git a/src/mongo/base/data_view.h b/src/mongo/base/data_view.h index 562778a43abdb..396ffc3daa191 100644 --- a/src/mongo/base/data_view.h +++ b/src/mongo/base/data_view.h @@ -30,11 +30,11 @@ #pragma once #include +#include #include -#include "mongo/config.h" - #include "mongo/base/data_type.h" +#include "mongo/config.h" // IWYU pragma: keep namespace mongo { diff --git a/src/mongo/base/data_view_test.cpp b/src/mongo/base/data_view_test.cpp index b63c3d1adecf6..414f1c76f713f 100644 --- a/src/mongo/base/data_view_test.cpp +++ b/src/mongo/base/data_view_test.cpp @@ -29,11 +29,14 @@ #include "mongo/base/data_view.h" +#include #include #include "mongo/base/data_type_endian.h" +#include "mongo/base/string_data.h" #include "mongo/platform/endian.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/base/dependency_graph.cpp b/src/mongo/base/dependency_graph.cpp index 612fcbc7d3d8f..fb95ccfb07f93 100644 --- a/src/mongo/base/dependency_graph.cpp +++ b/src/mongo/base/dependency_graph.cpp @@ -30,13 +30,19 @@ #include "mongo/base/dependency_graph.h" #include +#include #include -#include -#include +#include // IWYU pragma: keep #include #include -#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/util/assert_util.h" #include "mongo/util/string_map.h" diff --git a/src/mongo/base/dependency_graph_test.cpp b/src/mongo/base/dependency_graph_test.cpp index b79533bc081d5..8dffc61f369d5 100644 --- a/src/mongo/base/dependency_graph_test.cpp +++ b/src/mongo/base/dependency_graph_test.cpp @@ -32,14 +32,21 @@ */ #include +#include +#include // IWYU pragma: keep #include #include -#include +#include +// IWYU pragma: no_include "format.h" #include "mongo/base/dependency_graph.h" -#include "mongo/base/init.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/base/encoded_value_storage_test.cpp b/src/mongo/base/encoded_value_storage_test.cpp index 2a6ed09b5e2df..ba19fc36b40bb 100644 --- a/src/mongo/base/encoded_value_storage_test.cpp +++ b/src/mongo/base/encoded_value_storage_test.cpp @@ -29,12 +29,15 @@ #include "mongo/base/encoded_value_storage.h" +#include #include #include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" #include "mongo/base/static_assert.h" -#include "mongo/platform/endian.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/base/error_codes.yml b/src/mongo/base/error_codes.yml index ff48a9c4facbd..53de55b50a61d 100644 --- a/src/mongo/base/error_codes.yml +++ b/src/mongo/base/error_codes.yml @@ -505,7 +505,7 @@ error_codes: - {code: 386, name: DuplicateKeyId} - - {code: 387, name: EncounteredFLEPayloadWhileRedacting} + - {code: 387, name: EncounteredFLEPayloadWhileApplyingHmac} - {code: 388, name: TransactionTooLargeForCache} - {code: 389, name: LibmongocryptError} @@ -515,19 +515,21 @@ error_codes: - {code: 392, name: InvalidJWT} - {code: 393, name: InvalidTenantId} - - {code: 394, name: MovePrimaryRecipientDocNotFound, categories: [InternalOnly]} - - {code: 395, name: TruncatedSerialization} # This error code is not generated internally in mongod/s servers, but must be parsed and # reserve an error code. It can be returned by a remote search index management server. - {code: 396, name: IndexInformationTooLarge} - - - {code: 397, name: MovePrimaryAborted} - + - {code: 398, name: StreamTerminated, categories: [CloseConnectionError]} - - {code: 399, name: MovePrimaryRecipientPastAbortableStage, categories: [InternalOnly]} + - {code: 400, name: CannotUpgrade} + + - {code: 401, name: ResumeTenantChangeStream} + + - {code: 402, name: ResourceExhausted, categories: [RetriableError]} + + - {code: 403, name: UnsupportedShardingEventNotification} # Error codes 4000-8999 are reserved. @@ -554,8 +556,9 @@ error_codes: - {code: 13436,name: NotPrimaryOrSecondary,categories: [NotPrimaryError,RetriableError]} - {code: 14031,name: OutOfDiskSpace} - {code: 17280,name: OBSOLETE_KeyTooLong} + - {code: 28769,name: NamespaceCannotBeSharded} - {code: 46841,name: ClientMarkedKilled,categories: [Interruption,CancellationError]} - {code: 50768,name: NotARetryableWriteCommand} - {code: 50915,name: BackupCursorOpenConflictWithCheckpoint, categories: [RetriableError]} - {code: 56846,name: ConfigServerUnreachable} - + - {code: 57986,name: RetryableInternalTransactionNotSupported} diff --git a/src/mongo/base/error_extra_info.cpp b/src/mongo/base/error_extra_info.cpp index f0d60cd8c29cc..6c135f68a1cbf 100644 --- a/src/mongo/base/error_extra_info.cpp +++ b/src/mongo/base/error_extra_info.cpp @@ -27,12 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/base/error_extra_info.h" - -#include "mongo/base/init.h" -#include "mongo/db/jsobj.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/base/error_extra_info.h b/src/mongo/base/error_extra_info.h index c8a05f184eff2..07e7a4fb5cea5 100644 --- a/src/mongo/base/error_extra_info.h +++ b/src/mongo/base/error_extra_info.h @@ -30,6 +30,8 @@ #pragma once #include +#include +#include // This file is included by many low-level headers including status.h, so it isn't able to include // much without creating a cycle. diff --git a/src/mongo/base/init.cpp b/src/mongo/base/init.cpp index 3708841589da9..ad95ea86dd40c 100644 --- a/src/mongo/base/init.cpp +++ b/src/mongo/base/init.cpp @@ -27,10 +27,11 @@ * it in the license file. */ -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include #include +#include #include "mongo/base/initializer.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/base/initializer.cpp b/src/mongo/base/initializer.cpp index 6e0d90b7e39b1..1d019a827ddb9 100644 --- a/src/mongo/base/initializer.cpp +++ b/src/mongo/base/initializer.cpp @@ -27,10 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/base/initializer.h" - #include #include #include @@ -38,12 +34,16 @@ #include #include "mongo/base/dependency_graph.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/initializer.h" #include "mongo/base/status.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_truncation.h" #include "mongo/util/assert_util.h" #include "mongo/util/exit_code.h" #include "mongo/util/quick_exit.h" -#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/base/initializer.h b/src/mongo/base/initializer.h index e71e7878da002..e9736f5632e75 100644 --- a/src/mongo/base/initializer.h +++ b/src/mongo/base/initializer.h @@ -30,7 +30,9 @@ #pragma once #include +#include #include +#include #include #include "mongo/base/status.h" diff --git a/src/mongo/base/initializer_test.cpp b/src/mongo/base/initializer_test.cpp index 907b38813846a..bdeea1301779e 100644 --- a/src/mongo/base/initializer_test.cpp +++ b/src/mongo/base/initializer_test.cpp @@ -31,11 +31,16 @@ * Unit tests of the Initializer type. */ +#include #include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/initializer.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/base/murmurhash3_test.cpp b/src/mongo/base/murmurhash3_test.cpp index d1da7121fc7cb..0ef3a5878e1b1 100644 --- a/src/mongo/base/murmurhash3_test.cpp +++ b/src/mongo/base/murmurhash3_test.cpp @@ -28,37 +28,38 @@ */ #include +#include #include - -#include - -#include "mongo/unittest/unittest.h" +#include #include "mongo/base/data_type_endian.h" #include "mongo/base/data_view.h" - -#define TEST_STRING32(str, seed, expected) ASSERT_EQUALS(compute32(str, seed), expected) -#define TEST_STRING64(str, seed, a, b) \ - do { \ - auto pair = compute128(str, seed); \ - ASSERT_EQUALS(pair.first, a); \ - ASSERT_EQUALS(pair.second, b); \ +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/murmur3.h" + +#define TEST_STRING32(str, seed, expected) \ + ASSERT_EQUALS(murmur3(StringData{str}, seed), expected) + +#define TEST_STRING64(str, seed, expected) \ + ASSERT_EQUALS(murmur3(StringData{str}, seed), expected) + +#define TEST_STRING128(str, seed, a, b) \ + do { \ + auto pair = compute128(StringData{str}, seed); \ + ASSERT_EQUALS(pair.first, a); \ + ASSERT_EQUALS(pair.second, b); \ } while (0) namespace mongo { namespace { -uint32_t compute32(StringData input, uint32_t seed) { - char hash[4]; - MurmurHash3_x86_32(input.rawData(), input.size(), seed, &hash); - return ConstDataView(hash).read>(); -} - std::pair compute128(StringData input, uint32_t seed) { - char hash[16]; - MurmurHash3_x64_128(input.rawData(), input.size(), seed, &hash); - return {ConstDataView(hash).read>(), - ConstDataView(hash).read>(8)}; + std::array hash; + murmur3(input, seed, hash); + return {ConstDataView(hash.data()).read>(), + ConstDataView(hash.data()).read>(8)}; } TEST(MurmurHash3, TestVectors32) { @@ -92,52 +93,79 @@ TEST(MurmurHash3, TestVectors32) { } -TEST(MurmurHash3, TestVectors64) { - TEST_STRING64("", 0, 0ULL, 0ULL); - - TEST_STRING64("", 1ULL, 5048724184180415669ULL, 5864299874987029891ULL); - TEST_STRING64("", - 0xffffffffULL, - 7706185961851046380ULL, - 9616347466054386795ULL); // make sure seed value is handled unsigned - TEST_STRING64("\0\0\0\0"_sd, - 0ULL, - 14961230494313510588ULL, - 6383328099726337777ULL); // make sure we handle embedded nulls - - - TEST_STRING64( - "aaaa", 0x9747b28cULL, 13033599803469372400ULL, 11949150323828610719ULL); // one full chunk - TEST_STRING64("aaa", - 0x9747b28cULL, - 10278871841506805355ULL, - 17952965428487426844ULL); // three characters - TEST_STRING64( - "aa", 0x9747b28cULL, 1343929393636293407ULL, 16804672932933964801ULL); // two characters - TEST_STRING64( - "a", 0x9747b28cULL, 6694838689256856093ULL, 11415968713816993796ULL); // one character +TEST(MurmurHash3, TestVectors128) { + TEST_STRING128("", 0, 0ULL, 0ULL); + + TEST_STRING128("", 1ULL, 5048724184180415669ULL, 5864299874987029891ULL); + // Make sure seed value is handled unsigned. + TEST_STRING128("", 0xffffffffULL, 7706185961851046380ULL, 9616347466054386795ULL); + // Make sure we handle embedded nulls. + TEST_STRING128("\0\0\0\0"_sd, 0ULL, 14961230494313510588ULL, 6383328099726337777ULL); + + // One full chunk. + TEST_STRING128("aaaa", 0x9747b28cULL, 13033599803469372400ULL, 11949150323828610719ULL); + // Three characters. + TEST_STRING128("aaa", 0x9747b28cULL, 10278871841506805355ULL, 17952965428487426844ULL); + // Two characters. + TEST_STRING128("aa", 0x9747b28cULL, 1343929393636293407ULL, 16804672932933964801ULL); + // One character. + TEST_STRING128("a", 0x9747b28cULL, 6694838689256856093ULL, 11415968713816993796ULL); // Endian order within the chunks - TEST_STRING64( - "abcd", 0x9747b28cULL, 5310993687375067025ULL, 9979528070057666491ULL); // one full chunk - TEST_STRING64("abc", 0x9747b28cULL, 3982135406228655836ULL, 14835035517329147071ULL); - TEST_STRING64("ab", 0x9747b28cULL, 9526501539032868875ULL, 9131386788375312171ULL); - TEST_STRING64("a", 0x9747b28cULL, 6694838689256856093ULL, 11415968713816993796ULL); + TEST_STRING128("abcd", 0x9747b28cULL, 5310993687375067025ULL, 9979528070057666491ULL); + TEST_STRING128("abc", 0x9747b28cULL, 3982135406228655836ULL, 14835035517329147071ULL); + TEST_STRING128("ab", 0x9747b28cULL, 9526501539032868875ULL, 9131386788375312171ULL); + TEST_STRING128("a", 0x9747b28cULL, 6694838689256856093ULL, 11415968713816993796ULL); - TEST_STRING64("Hello, world!", 0x9747b28cULL, 17132966038248896814ULL, 17896881015324243642ULL); + TEST_STRING128( + "Hello, world!", 0x9747b28cULL, 17132966038248896814ULL, 17896881015324243642ULL); - // Make sure you handle UTF-8 high characters. A bcrypt implementation messed this up - TEST_STRING64("ππππππππ", - 0x9747b28cULL, - 10874605236735318559ULL, - 17921841414653337979ULL); // U+03C0: Greek Small Letter Pi + // Make sure to handle UTF-8 high characters. A bcrypt implementation messed this up. Here we + // use U+03C0: Greek Small Letter Pi. + TEST_STRING128("ππππππππ", 0x9747b28cULL, 10874605236735318559ULL, 17921841414653337979ULL); - // String of 256 characters. - // Make sure you don't store string lengths in a char, and overflow at 255 bytes (as OpenBSD's - // canonical BCrypt implementation did) - TEST_STRING64( + // String of 256 characters. Make sure you don't store string lengths in a char, and overflow at + // 255 bytes (as OpenBSD's canonical BCrypt implementation did). + TEST_STRING128( std::string(256, 'a'), 0x9747b28cULL, 557766291455132100ULL, 14184293241195392597ULL); } +// Output of the 64-bit version of murmur3() should be the same as the first 8 bytes of the 128-bit +// version. +TEST(MurmurHash3, TestVectors64) { + TEST_STRING64("", 0, 0ULL); + + TEST_STRING64("", 1ULL, 5048724184180415669ULL); + // Make sure seed value is handled unsigned. + TEST_STRING64("", 0xffffffffULL, 7706185961851046380ULL); + // Make sure we handle embedded nulls. + TEST_STRING64("\0\0\0\0"_sd, 0ULL, 14961230494313510588ULL); + + // One full chunk. + TEST_STRING64("aaaa", 0x9747b28cULL, 13033599803469372400ULL); + // Three characters. + TEST_STRING64("aaa", 0x9747b28cULL, 10278871841506805355ULL); + // Two characters. + TEST_STRING64("aa", 0x9747b28cULL, 1343929393636293407ULL); + // One character. + TEST_STRING64("a", 0x9747b28cULL, 6694838689256856093ULL); + + // Endian order within the chunks + TEST_STRING64("abcd", 0x9747b28cULL, 5310993687375067025ULL); + TEST_STRING64("abc", 0x9747b28cULL, 3982135406228655836ULL); + TEST_STRING64("ab", 0x9747b28cULL, 9526501539032868875ULL); + TEST_STRING64("a", 0x9747b28cULL, 6694838689256856093ULL); + + TEST_STRING64("Hello, world!", 0x9747b28cULL, 17132966038248896814ULL); + + // Make sure to handle UTF-8 high characters. A bcrypt implementation messed this up. Here we + // use U+03C0: Greek Small Letter Pi. + TEST_STRING64("ππππππππ", 0x9747b28cULL, 10874605236735318559ULL); + + // String of 256 characters. Make sure you don't store string lengths in a char, and overflow at + // 255 bytes (as OpenBSD's canonical BCrypt implementation did). + TEST_STRING64(std::string(256, 'a'), 0x9747b28cULL, 557766291455132100ULL); +} + } // namespace } // namespace mongo diff --git a/src/mongo/base/parse_number.cpp b/src/mongo/base/parse_number.cpp index 2bb46f0ba1003..914b9929e7ea5 100644 --- a/src/mongo/base/parse_number.cpp +++ b/src/mongo/base/parse_number.cpp @@ -27,17 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/base/parse_number.h" - #include #include #include #include +#include #include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/parse_number.h" +#include "mongo/base/static_assert.h" #include "mongo/base/status_with.h" #include "mongo/platform/decimal128.h" #include "mongo/platform/overflow_arithmetic.h" diff --git a/src/mongo/base/parse_number.h b/src/mongo/base/parse_number.h index dcdbcc50424bc..ffd07f2b8d5c2 100644 --- a/src/mongo/base/parse_number.h +++ b/src/mongo/base/parse_number.h @@ -33,6 +33,8 @@ #pragma once +#include + #include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/platform/decimal128.h" diff --git a/src/mongo/base/parse_number_test.cpp b/src/mongo/base/parse_number_test.cpp index d3c0cda1abce0..b663b3130c0a6 100644 --- a/src/mongo/base/parse_number_test.cpp +++ b/src/mongo/base/parse_number_test.cpp @@ -27,18 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include -#include +#include +#include #include -#include +#include +#include #include #include +#include "mongo/base/error_codes.h" #include "mongo/base/parse_number.h" #include "mongo/base/status.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/str.h" // for str::stream()! #define ASSERT_PARSES_WITH_PARSER(type, input_string, parser, expected_value) \ diff --git a/src/mongo/base/secure_allocator.cpp b/src/mongo/base/secure_allocator.cpp index 2bdd87a2c5405..22a5cb5c2e42a 100644 --- a/src/mongo/base/secure_allocator.cpp +++ b/src/mongo/base/secure_allocator.cpp @@ -28,32 +28,34 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/base/secure_allocator.h" - +#include #include #include +#include + +#include + +#include "mongo/base/initializer.h" +#include "mongo/base/secure_allocator.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/errno_util.h" #ifdef _WIN32 #include #include #else #include -#include -#include #endif -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/logv2/log.h" -#include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_map.h" #include "mongo/util/assert_util.h" #include "mongo/util/processinfo.h" -#include "mongo/util/scopeguard.h" #include "mongo/util/secure_zero_memory.h" #include "mongo/util/static_immortal.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/base/secure_allocator.h b/src/mongo/base/secure_allocator.h index 2dedc89472a3d..f492c5534c39b 100644 --- a/src/mongo/base/secure_allocator.h +++ b/src/mongo/base/secure_allocator.h @@ -29,18 +29,24 @@ #pragma once -#include "mongo/config.h" - +#include +#include #include +#include #include #include +#include #include #include +#include #include #include "mongo/base/static_assert.h" +#include "mongo/base/string_data.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/server_options.h" #include "mongo/stdx/type_traits.h" +#include "mongo/util/allocator.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/base/secure_allocator_test.cpp b/src/mongo/base/secure_allocator_test.cpp index f8d5befbe7269..2139ad411082d 100644 --- a/src/mongo/base/secure_allocator_test.cpp +++ b/src/mongo/base/secure_allocator_test.cpp @@ -27,13 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/base/secure_allocator.h" - #include +#include -#include "mongo/unittest/unittest.h" +#include "mongo/base/secure_allocator.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/base/shim.h b/src/mongo/base/shim.h index 5a97f82647517..10fe7de2f710c 100644 --- a/src/mongo/base/shim.h +++ b/src/mongo/base/shim.h @@ -33,9 +33,10 @@ #include #include #include +#include -#include "mongo/base/init.h" -#include "mongo/config.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/util/assert_util.h" /** diff --git a/src/mongo/base/simple_string_data_comparator.cpp b/src/mongo/base/simple_string_data_comparator.cpp index d366832563a49..59d2fd4862443 100644 --- a/src/mongo/base/simple_string_data_comparator.cpp +++ b/src/mongo/base/simple_string_data_comparator.cpp @@ -27,38 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/base/simple_string_data_comparator.h" - -#include - -#include "mongo/base/data_type_endian.h" -#include "mongo/base/data_view.h" +#include "mongo/util/murmur3.h" namespace mongo { -namespace { - -template -size_t murmur3(StringData str, size_t seed); - -template <> -size_t murmur3<4>(StringData str, size_t seed) { - char hash[4]; - MurmurHash3_x86_32(str.rawData(), str.size(), seed, &hash); - return ConstDataView(hash).read>(); -} - -template <> -size_t murmur3<8>(StringData str, size_t seed) { - char hash[16]; - MurmurHash3_x64_128(str.rawData(), str.size(), seed, hash); - return static_cast(ConstDataView(hash).read>()); -} - -} // namespace - const SimpleStringDataComparator SimpleStringDataComparator::kInstance{}; int SimpleStringDataComparator::compare(StringData left, StringData right) const { diff --git a/src/mongo/base/simple_string_data_comparator.h b/src/mongo/base/simple_string_data_comparator.h index d9bce9969ef33..9107fd9e04c00 100644 --- a/src/mongo/base/simple_string_data_comparator.h +++ b/src/mongo/base/simple_string_data_comparator.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/base/string_data.h" #include "mongo/base/string_data_comparator_interface.h" namespace mongo { diff --git a/src/mongo/base/status.cpp b/src/mongo/base/status.cpp index b7f0b29302f96..a8d97a4c1e9fa 100644 --- a/src/mongo/base/status.cpp +++ b/src/mongo/base/status.cpp @@ -28,13 +28,21 @@ */ +#include #include -#include + +#include +#include #include "mongo/base/status.h" -#include "mongo/db/jsobj.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/base/status.h b/src/mongo/base/status.h index d5f79718d874d..5e39483b8d6da 100644 --- a/src/mongo/base/status.h +++ b/src/mongo/base/status.h @@ -29,15 +29,21 @@ #pragma once +#include +#include +#include +#include +#include #include +#include +#include #include #include - -#include -#include +#include #include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" +#include "mongo/base/static_assert.h" #include "mongo/base/string_data.h" #include "mongo/bson/util/builder_fwd.h" #include "mongo/platform/compiler.h" diff --git a/src/mongo/base/status_bm.cpp b/src/mongo/base/status_bm.cpp index c59cc5b20d0bf..ce28847128d66 100644 --- a/src/mongo/base/status_bm.cpp +++ b/src/mongo/base/status_bm.cpp @@ -27,17 +27,14 @@ * it in the license file. */ +#include #include +#include -#include -#include -#include - -#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/base/string_data.h" -#include "mongo/util/processinfo.h" namespace mongo { namespace { diff --git a/src/mongo/base/status_test.cpp b/src/mongo/base/status_test.cpp index 9e129388231d8..6531f406a345c 100644 --- a/src/mongo/base/status_test.cpp +++ b/src/mongo/base/status_test.cpp @@ -27,19 +27,23 @@ * it in the license file. */ -#include +#include +#include +#include #include #include +#include -#include -#include +#include #include "mongo/base/status.h" -#include "mongo/config.h" -#include "mongo/db/json.h" +#include "mongo/bson/json.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { diff --git a/src/mongo/base/status_with.h b/src/mongo/base/status_with.h index 0781fd9948095..a0c8bbdb9d2d1 100644 --- a/src/mongo/base/status_with.h +++ b/src/mongo/base/status_with.h @@ -30,10 +30,14 @@ #pragma once #include +#include +#include #include +#include #include #include +#include "mongo/base/error_codes.h" #include "mongo/base/static_assert.h" #include "mongo/base/status.h" #include "mongo/bson/util/builder_fwd.h" @@ -248,22 +252,22 @@ bool operator!=(const Status& status, const StatusWith& sw) { // template -bool operator==(const StatusWith& sw, const ErrorCodes::Error code) { +bool operator==(const StatusWith& sw, ErrorCodes::Error code) { return sw.getStatus() == code; } template -bool operator==(const ErrorCodes::Error code, const StatusWith& sw) { +bool operator==(ErrorCodes::Error code, const StatusWith& sw) { return code == sw.getStatus(); } template -bool operator!=(const StatusWith& sw, const ErrorCodes::Error code) { +bool operator!=(const StatusWith& sw, ErrorCodes::Error code) { return !(sw == code); } template -bool operator!=(const ErrorCodes::Error code, const StatusWith& sw) { +bool operator!=(ErrorCodes::Error code, const StatusWith& sw) { return !(code == sw); } diff --git a/src/mongo/base/status_with_test.cpp b/src/mongo/base/status_with_test.cpp index daa22b63f132b..1b4ff04f22cbd 100644 --- a/src/mongo/base/status_with_test.cpp +++ b/src/mongo/base/status_with_test.cpp @@ -31,11 +31,14 @@ #include #include +#include + #include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/base/string_data.cpp b/src/mongo/base/string_data.cpp index e87fdb8b63ad6..6efe6a81fd93a 100644 --- a/src/mongo/base/string_data.cpp +++ b/src/mongo/base/string_data.cpp @@ -29,9 +29,10 @@ #include "mongo/base/string_data.h" +#include #include -#include +#include namespace mongo { diff --git a/src/mongo/base/string_data.h b/src/mongo/base/string_data.h index 2b1d3c0df7b56..72ee02a09c300 100644 --- a/src/mongo/base/string_data.h +++ b/src/mongo/base/string_data.h @@ -30,13 +30,14 @@ #pragma once #include // for min +#include #include +#include #include #include #include #include - -#include +#include #include "mongo/platform/compiler.h" #include "mongo/stdx/type_traits.h" diff --git a/src/mongo/base/string_data_test.cpp b/src/mongo/base/string_data_test.cpp index b4119eb99efc5..139b8af68dacc 100644 --- a/src/mongo/base/string_data_test.cpp +++ b/src/mongo/base/string_data_test.cpp @@ -29,16 +29,21 @@ #include #include +#include #include #include #include +#include #include +#include + #include "mongo/base/simple_string_data_comparator.h" #include "mongo/base/string_data.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/base/system_error.cpp b/src/mongo/base/system_error.cpp index 18c76d5f47056..f3a6d4933d442 100644 --- a/src/mongo/base/system_error.cpp +++ b/src/mongo/base/system_error.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include #include "mongo/base/system_error.h" diff --git a/src/mongo/base/system_error_test.cpp b/src/mongo/base/system_error_test.cpp index 08481f8ca7a07..ba9628786baf0 100644 --- a/src/mongo/base/system_error_test.cpp +++ b/src/mongo/base/system_error_test.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include "mongo/base/string_data.h" #include "mongo/base/system_error.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/base/uuid_test.cpp b/src/mongo/base/uuid_test.cpp index a0742d8e8aa3e..d172f78d03cae 100644 --- a/src/mongo/base/uuid_test.cpp +++ b/src/mongo/base/uuid_test.cpp @@ -27,13 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/stdx/unordered_set.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/base/validate_locale.cpp b/src/mongo/base/validate_locale.cpp index 7a102f5bf24a8..669fff374a421 100644 --- a/src/mongo/base/validate_locale.cpp +++ b/src/mongo/base/validate_locale.cpp @@ -27,13 +27,16 @@ * it in the license file. */ -#include - #include -#include -#include +#include // IWYU pragma: keep +#include // IWYU pragma: keep +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/bson/bson_bm.cpp b/src/mongo/bson/bson_bm.cpp index 29d12bbd6d3e4..9a745735f3e9f 100644 --- a/src/mongo/bson/bson_bm.cpp +++ b/src/mongo/bson/bson_bm.cpp @@ -28,13 +28,26 @@ */ -#include "mongo/platform/basic.h" - #include +#include +#include +#include +#include +#include + +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/bson/bson_comparator_interface_base.cpp b/src/mongo/bson/bson_comparator_interface_base.cpp index e0d85f3e92653..986afbfa47443 100644 --- a/src/mongo/bson/bson_comparator_interface_base.cpp +++ b/src/mongo/bson/bson_comparator_interface_base.cpp @@ -27,16 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/bson/bson_comparator_interface_base.h" - -#include +#include +#include +#include #include "mongo/base/simple_string_data_comparator.h" +#include "mongo/base/string_data_comparator_interface.h" +#include "mongo/bson/bson_comparator_interface_base.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/bson/bson_comparator_interface_base.h b/src/mongo/bson/bson_comparator_interface_base.h index 8f918062a5037..a9f72d0eb6cc7 100644 --- a/src/mongo/bson/bson_comparator_interface_base.h +++ b/src/mongo/bson/bson_comparator_interface_base.h @@ -29,11 +29,17 @@ #pragma once +#include +#include +#include #include +#include #include #include #include +#include "mongo/base/error_extra_info.h" +#include "mongo/base/string_data.h" #include "mongo/base/string_data_comparator_interface.h" #include "mongo/stdx/unordered_map.h" #include "mongo/stdx/unordered_set.h" diff --git a/src/mongo/bson/bson_depth.cpp b/src/mongo/bson/bson_depth.cpp index 94277fba3f687..a1d514e74c16c 100644 --- a/src/mongo/bson/bson_depth.cpp +++ b/src/mongo/bson/bson_depth.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/bson/bson_depth.h" namespace mongo { diff --git a/src/mongo/bson/bson_field.h b/src/mongo/bson/bson_field.h index dd8a9e2ffd090..197fbc6acfdf3 100644 --- a/src/mongo/bson/bson_field.h +++ b/src/mongo/bson/bson_field.h @@ -29,9 +29,9 @@ #pragma once -#include - #include +#include +#include #include "mongo/bson/bsonobj.h" diff --git a/src/mongo/bson/bson_field_test.cpp b/src/mongo/bson/bson_field_test.cpp index 523dbc27a3ec1..7aab9db354a17 100644 --- a/src/mongo/bson/bson_field_test.cpp +++ b/src/mongo/bson/bson_field_test.cpp @@ -27,8 +27,14 @@ * it in the license file. */ -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/mutable_bson_test_utils.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/bson/bson_obj_data_type_test.cpp b/src/mongo/bson/bson_obj_data_type_test.cpp index d0073d58f6225..615b690eb8645 100644 --- a/src/mongo/bson/bson_obj_data_type_test.cpp +++ b/src/mongo/bson/bson_obj_data_type_test.cpp @@ -27,12 +27,15 @@ * it in the license file. */ -#include "mongo/base/data_range.h" +#include + #include "mongo/base/data_range_cursor.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" - -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/bson/bson_obj_test.cpp b/src/mongo/bson/bson_obj_test.cpp index 3745f77f13500..7823214debace 100644 --- a/src/mongo/bson/bson_obj_test.cpp +++ b/src/mongo/bson/bson_obj_test.cpp @@ -27,16 +27,40 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/data_type_endian.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelement_comparator.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" #include "mongo/bson/simple_bsonelement_comparator.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" #include "mongo/platform/decimal128.h" - -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/string_map.h" namespace { using namespace mongo; diff --git a/src/mongo/bson/bson_validate.cpp b/src/mongo/bson/bson_validate.cpp index bdb5be5983a13..57491a1b844cb 100644 --- a/src/mongo/bson/bson_validate.cpp +++ b/src/mongo/bson/bson_validate.cpp @@ -29,17 +29,35 @@ #include "mongo/bson/bson_validate.h" +#include +#include #include +#include +#include +#include +#include +#include #include +#include + +#include "mongo/base/data_type_endian.h" #include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/static_assert.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_depth.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelementvalue.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/util/bsoncolumn.h" #include "mongo/crypto/encryption_fields_util.h" #include "mongo/crypto/fle_field_schema_gen.h" -#include "mongo/logv2/log.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decimal_counter.h" +#include "mongo/util/str.h" #include "mongo/util/str_escape.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/bson/bson_validate_test.cpp b/src/mongo/bson/bson_validate_test.cpp index 4f38726a69a11..b1cabd00de23f 100644 --- a/src/mongo/bson/bson_validate_test.cpp +++ b/src/mongo/bson/bson_validate_test.cpp @@ -28,19 +28,42 @@ */ -#include "mongo/base/status.h" -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/data_type_endian.h" #include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_depth.h" #include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bsoncolumnbuilder.h" +#include "mongo/bson/util/builder.h" #include "mongo/crypto/fle_field_schema_gen.h" -#include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression_type.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/decimal128.h" #include "mongo/platform/random.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/bson/bsonelement.cpp b/src/mongo/bson/bsonelement.cpp index 1d62d0c3491d9..f94ecc9b15e67 100644 --- a/src/mongo/bson/bsonelement.cpp +++ b/src/mongo/bson/bsonelement.cpp @@ -30,35 +30,32 @@ #include "mongo/bson/bsonelement.h" -#include -#include +#include #include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include "mongo/base/compare_numbers.h" #include "mongo/base/data_cursor.h" #include "mongo/base/parse_number.h" -#include "mongo/base/simple_string_data_comparator.h" +#include "mongo/base/static_assert.h" +#include "mongo/base/string_data_comparator_interface.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/generator_extended_canonical_2_0_0.h" #include "mongo/bson/generator_extended_relaxed_2_0_0.h" #include "mongo/bson/generator_legacy_strict.h" -#include "mongo/db/jsobj.h" #include "mongo/logv2/log.h" -#include "mongo/platform/strnlen.h" -#include "mongo/util/base64.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/util/duration.h" #include "mongo/util/hex.h" -#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" -#include "mongo/util/string_map.h" -#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif - namespace mongo { using std::dec; @@ -456,7 +453,7 @@ std::vector BSONElement::Array() const { unsigned u; Status status = NumberParser{}(f, &u); if (status.isOK()) { - verify(u < 1000000); + MONGO_verify(u < 1000000); if (u >= v.size()) v.resize(u + 1); v[u] = e; @@ -612,12 +609,12 @@ BSONObj BSONElement::embeddedObjectUserCheck() const { } BSONObj BSONElement::embeddedObject() const { - verify(isABSONObj()); + MONGO_verify(isABSONObj()); return BSONObj(value(), BSONObj::LargeSizeTrait{}); } BSONObj BSONElement::codeWScopeObject() const { - verify(type() == CodeWScope); + MONGO_verify(type() == CodeWScope); int strSizeWNull = ConstDataView(value() + 4).read>(); return BSONObj(value() + 4 + 4 + strSizeWNull); } diff --git a/src/mongo/bson/bsonelement.h b/src/mongo/bson/bsonelement.h index 55c47d9c940db..b2da3725d6590 100644 --- a/src/mongo/bson/bsonelement.h +++ b/src/mongo/bson/bsonelement.h @@ -30,30 +30,47 @@ #pragma once #include +#include +#include #include #include #include // strlen #include +#include #include +#include +#include #include #include "mongo/base/data_range.h" #include "mongo/base/data_type_endian.h" #include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/base/string_data_comparator_interface.h" #include "mongo/bson/bson_comparator_interface_base.h" #include "mongo/bson/bsontypes.h" #include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" -#include "mongo/config.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/platform/decimal128.h" +#include "mongo/platform/mutex.h" #include "mongo/platform/strnlen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { class BSONObj; + class BSONElement; class BSONObjBuilder; class Timestamp; + class ExtendedCanonicalV200Generator; class ExtendedRelaxedV200Generator; class LegacyStrictGenerator; @@ -619,7 +636,7 @@ class BSONElement { */ const char* binData(int& len) const { // BinData: - verify(type() == BinData); + MONGO_verify(type() == BinData); len = valuestrsize(); return value() + 5; } @@ -639,14 +656,14 @@ class BSONElement { static BinDataType binDataType(const char* raw, size_t length) { // BinData: - verify(length >= 5); + MONGO_verify(length >= 5); unsigned char c = raw[4]; return static_cast(c); } BinDataType binDataType() const { // BinData: - verify(type() == BinData); + MONGO_verify(type() == BinData); unsigned char c = (value() + 4)[0]; return static_cast(c); } @@ -663,7 +680,7 @@ class BSONElement { * Retrieve the regex std::string for a Regex element */ const char* regex() const { - verify(type() == RegEx); + MONGO_verify(type() == RegEx); return value(); } @@ -866,6 +883,14 @@ class BSONElement { } } + /** + * Construct a BSONElement where you already know the length of the name and the total size + * of the element. fieldNameSize includes the null terminator. + */ + struct TrustedInitTag {}; + constexpr BSONElement(const char* d, int fieldNameSize, int totSize, TrustedInitTag) + : data(d), fieldNameSize_(fieldNameSize), totalSize(totSize) {} + std::string _asCode() const; bool coerce(std::string* out) const; diff --git a/src/mongo/bson/bsonelement_test.cpp b/src/mongo/bson/bsonelement_test.cpp index 4e5bc3f6f07f2..c8547042ff1d8 100644 --- a/src/mongo/bson/bsonelement_test.cpp +++ b/src/mongo/bson/bsonelement_test.cpp @@ -27,17 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +// IWYU pragma: no_include "ext/type_traits.h" #include #include -#include #include +#include +#include "mongo/base/data_range.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/bson/bsonelementvalue.h b/src/mongo/bson/bsonelementvalue.h index 22b083f40f6b9..af7e0ef79599b 100644 --- a/src/mongo/bson/bsonelementvalue.h +++ b/src/mongo/bson/bsonelementvalue.h @@ -29,15 +29,19 @@ #pragma once +#include #include // strlen #include "mongo/base/data_type_endian.h" #include "mongo/base/data_view.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsontypes.h" #include "mongo/bson/bsontypes_util.h" #include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" #include "mongo/platform/decimal128.h" +#include "mongo/util/time_support.h" namespace mongo { class BSONObj; diff --git a/src/mongo/bson/bsonmisc.cpp b/src/mongo/bson/bsonmisc.cpp index f65de3a2af70b..e69765a463b0c 100644 --- a/src/mongo/bson/bsonmisc.cpp +++ b/src/mongo/bson/bsonmisc.cpp @@ -27,7 +27,16 @@ * it in the license file. */ -#include "mongo/db/jsobj.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -93,7 +102,7 @@ Labeler BSONObjBuilderValueStream::operator<<(const Labeler::Label& l) { void BSONObjBuilderValueStream::endField(StringData nextFieldName) { if (haveSubobj()) { - verify(_fieldName.rawData()); + MONGO_verify(_fieldName.rawData()); _builder->append(_fieldName, subobj()->done()); _subobj.reset(); } diff --git a/src/mongo/bson/bsonmisc.h b/src/mongo/bson/bsonmisc.h index c4c20b98efabb..82f8688b57b32 100644 --- a/src/mongo/bson/bsonmisc.h +++ b/src/mongo/bson/bsonmisc.h @@ -29,9 +29,14 @@ #pragma once +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/util/builder.h" namespace mongo { diff --git a/src/mongo/bson/bsonobj.cpp b/src/mongo/bson/bsonobj.cpp index 6f5ca8aec3f6f..197bec06d1de4 100644 --- a/src/mongo/bson/bsonobj.cpp +++ b/src/mongo/bson/bsonobj.cpp @@ -27,20 +27,45 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/data_type.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" - -#include "mongo/db/jsobj.h" - -#include "mongo/base/data_range.h" #include "mongo/bson/bsonelement_comparator_interface.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/generator_extended_canonical_2_0_0.h" #include "mongo/bson/generator_extended_relaxed_2_0_0.h" #include "mongo/bson/generator_legacy_strict.h" -#include "mongo/db/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/ordering.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/logv2/log.h" -#include "mongo/util/allocator.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #include "mongo/util/hex.h" +#include "mongo/util/shared_buffer.h" #include "mongo/util/str.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -875,7 +900,7 @@ BSONIteratorSorted::BSONIteratorSorted(const BSONObj& o, const ElementFieldCmp& auto elem = i.next(); _fields[x++] = {elem.fieldNameStringData(), elem.size()}; } - verify(x == _nfields); + MONGO_verify(x == _nfields); std::sort(_fields.get(), _fields.get() + _nfields, cmp); _cur = 0; } diff --git a/src/mongo/bson/bsonobj.h b/src/mongo/bson/bsonobj.h index 0c24e4cd67678..67f8ad17801c5 100644 --- a/src/mongo/bson/bsonobj.h +++ b/src/mongo/bson/bsonobj.h @@ -29,8 +29,20 @@ #pragma once +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include #include #include @@ -38,6 +50,10 @@ #include #include "mongo/base/data_type.h" +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/base/static_assert.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/base/string_data_comparator_interface.h" #include "mongo/bson/bson_comparator_interface_base.h" @@ -46,7 +62,10 @@ #include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" #include "mongo/util/bufreader.h" #include "mongo/util/shared_buffer.h" #include "mongo/util/string_map.h" @@ -54,6 +73,7 @@ namespace mongo { class BSONObjBuilder; + class BSONObjStlIterator; class ExtendedCanonicalV200Generator; class ExtendedRelaxedV200Generator; @@ -677,7 +697,7 @@ class BSONObj { iterator end() const; void appendSelfToBufBuilder(BufBuilder& b) const { - verify(objsize()); + MONGO_verify(objsize()); b.appendBuf(objdata(), objsize()); } @@ -881,7 +901,7 @@ class BSONObjIterator { } BSONElement next() { - verify(_pos <= _theend); + MONGO_verify(_pos <= _theend); BSONElement e(_pos); _pos += e.size(); return e; @@ -905,7 +925,7 @@ class BSONObjIterator { } BSONElement operator*() { - verify(_pos <= _theend); + MONGO_verify(_pos <= _theend); return BSONElement(_pos); } @@ -932,7 +952,7 @@ class BSONIteratorSorted { public: ~BSONIteratorSorted() { - verify(_fields); + MONGO_verify(_fields); } bool more() { @@ -940,7 +960,7 @@ class BSONIteratorSorted { } BSONElement next() { - verify(_fields); + MONGO_verify(_fields); if (_cur < _nfields) { const auto& element = _fields[_cur++]; return BSONElement(element.fieldName.rawData() - 1, // Include type byte diff --git a/src/mongo/bson/bsonobjbuilder.cpp b/src/mongo/bson/bsonobjbuilder.cpp index bde3526c543c2..61db0272f490e 100644 --- a/src/mongo/bson/bsonobjbuilder.cpp +++ b/src/mongo/bson/bsonobjbuilder.cpp @@ -28,8 +28,13 @@ */ #include "mongo/bson/bsonobjbuilder.h" + +#include + #include "mongo/bson/timestamp.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/bson/bsonobjbuilder.h b/src/mongo/bson/bsonobjbuilder.h index be826d5a824cd..c928924d66dde 100644 --- a/src/mongo/bson/bsonobjbuilder.h +++ b/src/mongo/bson/bsonobjbuilder.h @@ -29,22 +29,38 @@ #pragma once +#include #include #include +#include #include +#include #include +#include +#include #include +#include +#include +#include "mongo/base/data_type_endian.h" #include "mongo/base/data_view.h" #include "mongo/base/parse_number.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_field.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/builder.h" #include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" #include "mongo/util/decimal_counter.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/shared_buffer.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -143,7 +159,7 @@ class BSONObjBuilderBase { /** append element to the object we are building */ Derived& append(const BSONElement& e) { // do not append eoo, that would corrupt us. the builder auto appends when done() is called. - verify(!e.eoo()); + MONGO_verify(!e.eoo()); _b.appendBuf((void*)e.rawdata(), e.size()); return static_cast(*this); } @@ -151,7 +167,7 @@ class BSONObjBuilderBase { /** append an element but with a new name */ Derived& appendAs(const BSONElement& e, StringData fieldName) { // do not append eoo, that would corrupt us. the builder auto appends when done() is called. - verify(!e.eoo()); + MONGO_verify(!e.eoo()); _b.appendNum((char)e.type()); _b.appendStr(fieldName); _b.appendBuf((void*)e.value(), e.valuesize()); @@ -168,12 +184,12 @@ class BSONObjBuilderBase { /** add a subobject as a member */ Derived& appendObject(StringData fieldName, const char* objdata, int size = 0) { - verify(objdata); + MONGO_verify(objdata); if (size == 0) { size = ConstDataView(objdata).read>(); } - verify(size > 4 && size < 100000000); + MONGO_verify(size > 4 && size < 100000000); _b.appendNum((char)Object); _b.appendStr(fieldName); @@ -539,9 +555,9 @@ class BSONObjBuilderBase { * Append a map of values as a sub-object. * Note: the keys of the map should be StringData-compatible (i.e. strings). */ - TEMPLATE(typename Map) - REQUIRES(std::is_convertible_v().begin()->first), StringData>) - Derived& append(StringData fieldName, const Map& map) { + template + requires std::is_convertible_v().begin()->first), StringData> + Derived& append(StringData fieldName, const Map& map) { typename std::remove_reference::type bob; for (auto&& [k, v] : map) { bob.append(k, v); @@ -701,6 +717,7 @@ class BSONObjBuilderBase { // without being sure that you are not undoing the advantages of the // extern template declaration. class BSONObjBuilder; + extern template class BSONObjBuilderBase; // BSONObjBuilder needs this forward declared in order to declare the diff --git a/src/mongo/bson/bsonobjbuilder_test.cpp b/src/mongo/bson/bsonobjbuilder_test.cpp index 386b68f99c4c3..99f1f45945863 100644 --- a/src/mongo/bson/bsonobjbuilder_test.cpp +++ b/src/mongo/bson/bsonobjbuilder_test.cpp @@ -27,13 +27,39 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include #include - -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" -#include "mongo/unittest/unittest.h" +#include +#include + +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/data_type_endian.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/static_assert.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/shared_buffer.h" namespace mongo { namespace { diff --git a/src/mongo/bson/bsontypes.cpp b/src/mongo/bson/bsontypes.cpp index 8be3c5e1d04d2..321890db1d093 100644 --- a/src/mongo/bson/bsontypes.cpp +++ b/src/mongo/bson/bsontypes.cpp @@ -27,12 +27,17 @@ * it in the license file. */ +#include +#include #include +#include +#include -#include "mongo/bson/bsontypes.h" +#include -#include "mongo/config.h" -#include "mongo/db/jsobj.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/bson/bsontypes.h b/src/mongo/bson/bsontypes.h index 0dc2fe8a4ab33..909dd065c983e 100644 --- a/src/mongo/bson/bsontypes.h +++ b/src/mongo/bson/bsontypes.h @@ -30,13 +30,18 @@ #pragma once #include +#include #include +#include #include +#include #include #include "mongo/base/counter.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/platform/decimal128.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/bson/json.cpp b/src/mongo/bson/json.cpp index fe572941c2a25..80824e8788386 100644 --- a/src/mongo/bson/json.cpp +++ b/src/mongo/bson/json.cpp @@ -32,18 +32,29 @@ #include #include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/parse_number.h" -#include "mongo/db/jsobj.h" -#include "mongo/logv2/log.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" #include "mongo/platform/decimal128.h" -#include "mongo/platform/strtoll.h" +#include "mongo/util/assert_util.h" #include "mongo/util/base64.h" #include "mongo/util/ctype.h" #include "mongo/util/decimal_counter.h" #include "mongo/util/hex.h" -#include "mongo/util/str.h" #include "mongo/util/time_support.h" #include "mongo/util/uuid.h" diff --git a/src/mongo/bson/json.h b/src/mongo/bson/json.h index a2be12c93e749..02fc381c6184b 100644 --- a/src/mongo/bson/json.h +++ b/src/mongo/bson/json.h @@ -29,11 +29,14 @@ #pragma once +#include #include #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/bson/mutable/document.cpp b/src/mongo/bson/mutable/document.cpp index 8a5ae28067ebe..7b8de727d4a05 100644 --- a/src/mongo/bson/mutable/document.cpp +++ b/src/mongo/bson/mutable/document.cpp @@ -27,18 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/bson/mutable/document.h" - -#include -#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include +#include #include #include +#include "mongo/base/error_codes.h" #include "mongo/base/static_assert.h" +#include "mongo/base/status.h" #include "mongo/bson/mutable/damage_vector.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/util/builder.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/debug_util.h" namespace mongo { diff --git a/src/mongo/bson/mutable/document.h b/src/mongo/bson/mutable/document.h index 6573a6559f5c8..d5bcb31bcb400 100644 --- a/src/mongo/bson/mutable/document.h +++ b/src/mongo/bson/mutable/document.h @@ -29,15 +29,26 @@ #pragma once +#include #include +#include +#include #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/const_element.h" #include "mongo/bson/mutable/damage_vector.h" #include "mongo/bson/mutable/element.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/jsobj.h" +#include "mongo/platform/decimal128.h" #include "mongo/platform/visibility.h" #include "mongo/util/safe_num.h" +#include "mongo/util/time_support.h" namespace mongo { namespace mutablebson { diff --git a/src/mongo/bson/mutable/element.cpp b/src/mongo/bson/mutable/element.cpp index 6fe877903378f..32b1457b7b1b8 100644 --- a/src/mongo/bson/mutable/element.cpp +++ b/src/mongo/bson/mutable/element.cpp @@ -27,11 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/bson/mutable/element.h" -#include "mongo/bson/mutable/algorithm.h" +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/mutable/document.h" namespace mongo { diff --git a/src/mongo/bson/mutable/element.h b/src/mongo/bson/mutable/element.h index 4fb43be5ab489..881acd1aad0f4 100644 --- a/src/mongo/bson/mutable/element.h +++ b/src/mongo/bson/mutable/element.h @@ -29,13 +29,25 @@ #pragma once +#include +#include #include +#include #include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/jsobj.h" +#include "mongo/platform/decimal128.h" #include "mongo/platform/visibility.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/safe_num.h" +#include "mongo/util/time_support.h" namespace mongo { namespace mutablebson { diff --git a/src/mongo/bson/mutable/mutable_bson_algo_test.cpp b/src/mongo/bson/mutable/mutable_bson_algo_test.cpp index 20d26d83b6707..d8297da0ad0c7 100644 --- a/src/mongo/bson/mutable/mutable_bson_algo_test.cpp +++ b/src/mongo/bson/mutable/mutable_bson_algo_test.cpp @@ -27,16 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/algorithm.h" - +#include "mongo/bson/mutable/const_element.h" #include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" #include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/platform/basic.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/safe_num.h" namespace { diff --git a/src/mongo/bson/mutable/mutable_bson_test.cpp b/src/mongo/bson/mutable/mutable_bson_test.cpp index 95f1919b06c0c..c92f19d60af4e 100644 --- a/src/mongo/bson/mutable/mutable_bson_test.cpp +++ b/src/mongo/bson/mutable/mutable_bson_test.cpp @@ -27,20 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/bson/mutable/document.h" +#include +#include +#include +#include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/algorithm.h" +#include "mongo/bson/mutable/const_element.h" #include "mongo/bson/mutable/damage_vector.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" #include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/platform/decimal128.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/safe_num.h" +#include "mongo/util/time_support.h" namespace { diff --git a/src/mongo/bson/mutable/mutable_bson_test_utils.cpp b/src/mongo/bson/mutable/mutable_bson_test_utils.cpp index 337d9c42560f3..285e3c3091d8c 100644 --- a/src/mongo/bson/mutable/mutable_bson_test_utils.cpp +++ b/src/mongo/bson/mutable/mutable_bson_test_utils.cpp @@ -27,19 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/bson/mutable/mutable_bson_test_utils.h" - #include -#include +#include +#include #include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/algorithm.h" #include "mongo/bson/mutable/const_element.h" #include "mongo/bson/mutable/document.h" -#include "mongo/unittest/unittest.h" +#include "mongo/bson/mutable/mutable_bson_test_utils.h" +#include "mongo/unittest/assert.h" namespace mongo { namespace mutablebson { diff --git a/src/mongo/bson/mutable/mutable_bson_test_utils.h b/src/mongo/bson/mutable/mutable_bson_test_utils.h index c7223131fd2b5..5ca758e76e159 100644 --- a/src/mongo/bson/mutable/mutable_bson_test_utils.h +++ b/src/mongo/bson/mutable/mutable_bson_test_utils.h @@ -27,9 +27,14 @@ * it in the license file. */ +#pragma once + #include +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/mutable/const_element.h" #include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" namespace mongo { diff --git a/src/mongo/bson/oid.cpp b/src/mongo/bson/oid.cpp index 78850acbba174..f23e35600b682 100644 --- a/src/mongo/bson/oid.cpp +++ b/src/mongo/bson/oid.cpp @@ -27,20 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/bson/oid.h" - -#include +#include +#include +#include #include #include -#include "mongo/base/init.h" +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/string_data.h" -#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/random.h" #include "mongo/util/hex.h" +#include "mongo/util/str.h" namespace mongo { @@ -153,7 +157,7 @@ void OID::initFromTermNumber(int64_t term) { } void OID::init(StringData s) { - verify(s.size() == (2 * kOIDSize)); + MONGO_verify(s.size() == (2 * kOIDSize)); std::string blob = hexblob::decode(s.substr(0, 2 * kOIDSize)); std::copy(blob.begin(), blob.end(), _data); } diff --git a/src/mongo/bson/oid.h b/src/mongo/bson/oid.h index 3ff6a0e12aba4..6c0b000504f0b 100644 --- a/src/mongo/bson/oid.h +++ b/src/mongo/bson/oid.h @@ -29,12 +29,21 @@ #pragma once +#include +#include +#include #include +#include +#include #include "mongo/base/data_range.h" #include "mongo/base/data_view.h" #include "mongo/base/static_assert.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/util/assert_util.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/bson/oid_test.cpp b/src/mongo/bson/oid_test.cpp index 5a34e6af7c161..1e5c8315f8153 100644 --- a/src/mongo/bson/oid_test.cpp +++ b/src/mongo/bson/oid_test.cpp @@ -29,9 +29,14 @@ #include "mongo/bson/oid.h" +#include + +#include + #include "mongo/base/parse_number.h" #include "mongo/platform/endian.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/bson/simple_bsonelement_comparator.cpp b/src/mongo/bson/simple_bsonelement_comparator.cpp index 640db437b943a..3e1b65d483615 100644 --- a/src/mongo/bson/simple_bsonelement_comparator.cpp +++ b/src/mongo/bson/simple_bsonelement_comparator.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/bson/simple_bsonelement_comparator.h" namespace mongo { diff --git a/src/mongo/bson/simple_bsonelement_comparator.h b/src/mongo/bson/simple_bsonelement_comparator.h index e724f198722f3..23eeccfc46761 100644 --- a/src/mongo/bson/simple_bsonelement_comparator.h +++ b/src/mongo/bson/simple_bsonelement_comparator.h @@ -29,6 +29,10 @@ #pragma once +#include + +#include "mongo/bson/bson_comparator_interface_base.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelement_comparator_interface.h" namespace mongo { diff --git a/src/mongo/bson/simple_bsonobj_comparator.cpp b/src/mongo/bson/simple_bsonobj_comparator.cpp index 39af07dcf7748..506c2dad6aba5 100644 --- a/src/mongo/bson/simple_bsonobj_comparator.cpp +++ b/src/mongo/bson/simple_bsonobj_comparator.cpp @@ -28,8 +28,6 @@ */ -#include "mongo/platform/basic.h" - #include "mongo/bson/simple_bsonobj_comparator.h" namespace mongo { diff --git a/src/mongo/bson/simple_bsonobj_comparator.h b/src/mongo/bson/simple_bsonobj_comparator.h index ed08cbdd939ad..2fd78a91e919c 100644 --- a/src/mongo/bson/simple_bsonobj_comparator.h +++ b/src/mongo/bson/simple_bsonobj_comparator.h @@ -29,9 +29,12 @@ #pragma once +#include #include #include +#include "mongo/bson/bson_comparator_interface_base.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator_interface.h" #include "mongo/stdx/unordered_map.h" #include "mongo/stdx/unordered_set.h" diff --git a/src/mongo/bson/simple_bsonobj_comparator_test.cpp b/src/mongo/bson/simple_bsonobj_comparator_test.cpp index b633fb0f236f6..af5a790e9ffd6 100644 --- a/src/mongo/bson/simple_bsonobj_comparator_test.cpp +++ b/src/mongo/bson/simple_bsonobj_comparator_test.cpp @@ -27,18 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include -#include +#include +#include +#include #include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/stdx/unordered_map.h" -#include "mongo/stdx/unordered_set.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/bson/timestamp.cpp b/src/mongo/bson/timestamp.cpp index c9f65f46cc172..c4693458a550b 100644 --- a/src/mongo/bson/timestamp.cpp +++ b/src/mongo/bson/timestamp.cpp @@ -28,14 +28,11 @@ */ #include "mongo/bson/timestamp.h" -#include "mongo/bson/bsontypes.h" -#include -#include #include #include -#include +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/bson/timestamp.h b/src/mongo/bson/timestamp.h index 3d0befd7450c0..c2d34af01d2ca 100644 --- a/src/mongo/bson/timestamp.h +++ b/src/mongo/bson/timestamp.h @@ -29,9 +29,19 @@ #pragma once +#include +#include +#include +#include +#include + #include "mongo/base/data_view.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/bson/ugly_bson_integration_test.cpp b/src/mongo/bson/ugly_bson_integration_test.cpp index 5b9d7a2c28ccf..00291cd9806f7 100644 --- a/src/mongo/bson/ugly_bson_integration_test.cpp +++ b/src/mongo/bson/ugly_bson_integration_test.cpp @@ -27,17 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include -#include - -#include "mongo/client/connection_string.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/executor/network_interface_integration_fixture.h" +#include "mongo/platform/atomic_word.h" #include "mongo/rpc/op_msg.h" -#include "mongo/unittest/integration_test.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/scopeguard.h" namespace mongo { diff --git a/src/mongo/bson/util/SConscript b/src/mongo/bson/util/SConscript index 57ea076ecfe44..51a237aa3a111 100644 --- a/src/mongo/bson/util/SConscript +++ b/src/mongo/bson/util/SConscript @@ -19,7 +19,7 @@ env.Library( source=[ 'bsoncolumn.cpp', 'bsoncolumnbuilder.cpp', - 'simple8b.cpp', + 'simple8b_builder.cpp', 'simple8b_type_util.cpp', ], LIBDEPS=[ diff --git a/src/mongo/bson/util/bson_check.h b/src/mongo/bson/util/bson_check.h index 2b5fe87e0b4c9..7f46921bd970e 100644 --- a/src/mongo/bson/util/bson_check.h +++ b/src/mongo/bson/util/bson_check.h @@ -29,10 +29,18 @@ #pragma once +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/commands.h" #include "mongo/db/jsobj.h" #include "mongo/idl/command_generic_argument.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #include "mongo/util/string_map.h" diff --git a/src/mongo/bson/util/bson_check_test.cpp b/src/mongo/bson/util/bson_check_test.cpp index 93716c84a6bdc..42b48455dd929 100644 --- a/src/mongo/bson/util/bson_check_test.cpp +++ b/src/mongo/bson/util/bson_check_test.cpp @@ -27,11 +27,15 @@ * it in the license file. */ +#include +#include #include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/bson_check.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/bson/util/bson_column_compressed_data.inl b/src/mongo/bson/util/bson_column_compressed_data.inl index 0fbf12a0717a4..bfabaf7ef0531 100644 --- a/src/mongo/bson/util/bson_column_compressed_data.inl +++ b/src/mongo/bson/util/bson_column_compressed_data.inl @@ -9420,4 +9420,4 @@ "F1E85yEwAAAAAAhKwLCwyO4HIXDJkJzt2htw+" "s6xp2VGIICuxPCgzQgKYMTg0NAAAAAACBCQwYsMABHVAKCGACQ2ABC4H6PwAAAAAAAAsAAAAAAAAAgcuM0AtgGbQgi7NRD9jHB" "BSACCQGAghSDgSACwAAAPw/ABCACwAAAPw/" -"ABCATAAABgAgAACALAAAAAAgAACADAAAAAAAAACArQUOAAAAAACADZ8AAAAAAACADZ8AAAAAAAAAAA==" \ No newline at end of file +"ABCATAAABgAgAACALAAAAAAgAACADAAAAAAAAACArQUOAAAAAACADZ8AAAAAAACADZ8AAAAAAAAAAA==" diff --git a/src/mongo/bson/util/bson_extract.cpp b/src/mongo/bson/util/bson_extract.cpp index 4335d1c369941..d6d02025eefa8 100644 --- a/src/mongo/bson/util/bson_extract.cpp +++ b/src/mongo/bson/util/bson_extract.cpp @@ -29,7 +29,11 @@ #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/bson/util/bson_extract_test.cpp b/src/mongo/bson/util/bson_extract_test.cpp index 2573e6739abfe..9367f9289206a 100644 --- a/src/mongo/bson/util/bson_extract_test.cpp +++ b/src/mongo/bson/util/bson_extract_test.cpp @@ -27,14 +27,21 @@ * it in the license file. */ -#include +#include +// IWYU pragma: no_include "ext/type_traits.h" #include #include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/repl/optime.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" using namespace mongo; diff --git a/src/mongo/bson/util/bsoncolumn.cpp b/src/mongo/bson/util/bsoncolumn.cpp index 78808eb7c41f6..29c5124ed7009 100644 --- a/src/mongo/bson/util/bsoncolumn.cpp +++ b/src/mongo/bson/util/bsoncolumn.cpp @@ -30,10 +30,31 @@ #include "mongo/bson/util/bsoncolumn.h" #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" #include "mongo/bson/util/bsoncolumn_util.h" +#include "mongo/bson/util/builder.h" #include "mongo/bson/util/simple8b_type_util.h" +#include "mongo/platform/decimal128.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/time_support.h" namespace mongo { using namespace bsoncolumn; @@ -49,9 +70,6 @@ constexpr int kMaxCapacity = BSONObjMaxUserSize; // Memory offset to get to BSONElement value when field name is an empty string. constexpr int kElementValueOffset = 2; -// Sentinel to indicate end index for BSONColumn Iterator. -constexpr size_t kEndIndex = 0xFFFFFFFFFFFFFFFF; - // Lookup table to go from Control byte (high 4 bits) to scale index. constexpr uint8_t kInvalidScaleIndex = 0xFF; constexpr std::array kControlToScaleIndex = { @@ -143,7 +161,10 @@ int BSONColumn::ElementStorage::Element::size() const { } BSONElement BSONColumn::ElementStorage::Element::element() const { - return {_buffer, _nameSize + 1, _valueSize + _nameSize + kElementValueOffset}; + return {_buffer, + _nameSize + 1, + _valueSize + _nameSize + kElementValueOffset, + BSONElement::TrustedInitTag{}}; } BSONColumn::ElementStorage::ContiguousBlock::ContiguousBlock(ElementStorage& storage) @@ -287,101 +308,64 @@ struct BSONColumn::SubObjectAllocator { bool _allowEmpty; }; -BSONColumn::Iterator::Iterator(BSONColumn& column, const char* pos, const char* end) - : _column(&column), _control(pos), _end(end) {} - -void BSONColumn::Iterator::_initialize(size_t index) { - _index = index; - - if (_isInterleavedStart(*_control)) { - _initializeInterleaving(); - return; - } - - const BSONElement* current = nullptr; - if (index < _column->_decompressed.size()) { - current = &_column->_decompressed[index]; - _state._lastValue = *current; - } - // If we are at EOO then start at end. - if (*_control == EOO) { - _handleEOO(); - return; - } - - // previous doesn't matter when we load literals - auto result = _state._loadControl(*_column, _control, _end, current); - if (!current) { - _column->_decompressed.push_back(result.element); - } - _control += result.size; +BSONColumn::Iterator::Iterator(boost::intrusive_ptr allocator, + const char* pos, + const char* end) + : _index(0), _control(pos), _end(end), _allocator(std::move(allocator)), _mode(Regular{}) { + // Initialize the iterator state to the first element + _incrementRegular(stdx::get(_mode)); } void BSONColumn::Iterator::_initializeInterleaving() { - _interleavedArrays = *_control == bsoncolumn::kInterleavedStartControlByte || - *_control == bsoncolumn::kInterleavedStartArrayRootControlByte; - _interleavedRootType = - *_control == bsoncolumn::kInterleavedStartArrayRootControlByte ? Array : Object; - _interleavedReferenceObj = BSONObj(_control + 1); + Interleaved& interleaved = _mode.emplace( + BSONObj(_control + 1), + *_control == bsoncolumn::kInterleavedStartArrayRootControlByte ? Array : Object, + *_control == bsoncolumn::kInterleavedStartControlByte || + *_control == bsoncolumn::kInterleavedStartArrayRootControlByte); BSONObjTraversal t( - _interleavedArrays, - _interleavedRootType, + interleaved.arrays, + interleaved.rootType, [](StringData fieldName, const BSONObj& obj, BSONType type) { return true; }, - [this](const BSONElement& elem) { - _states.emplace_back(); - _states.back()._loadLiteral(elem); + [&interleaved](const BSONElement& elem) { + interleaved.states.emplace_back(); + interleaved.states.back().loadUncompressed(elem); return true; }); - t.traverse(_interleavedReferenceObj); - uassert(6067610, "Invalid BSONColumn encoding", !_states.empty()); + t.traverse(interleaved.referenceObj); + uassert(6067610, "Invalid BSONColumn encoding", !interleaved.states.empty()); - _control += _interleavedReferenceObj.objsize() + 1; - _incrementInterleaved(); + _control += interleaved.referenceObj.objsize() + 1; + _incrementInterleaved(interleaved); } BSONColumn::Iterator& BSONColumn::Iterator::operator++() { - // We need to setup iterator state even if this is not the first time we iterate in case we need - // to decompress elements further along ++_index; - if (_states.empty()) { - _incrementRegular(); - } else { - _incrementInterleaved(); - } + stdx::visit(OverloadedVisitor{[&](Regular& regular) { _incrementRegular(regular); }, + [&](Interleaved& interleaved) { + _incrementInterleaved(interleaved); + }}, + _mode); return *this; } -BSONColumn::Iterator BSONColumn::Iterator::operator++(int) { - auto ret = *this; - operator++(); - return ret; -} - -void BSONColumn::Iterator::_incrementRegular() { - DecodingState& state = _state; +void BSONColumn::Iterator::_incrementRegular(Regular& regular) { + DecodingState& state = regular.state; - // Get pointer to current element if we are already decompressed - const BSONElement* current = - _index < _column->_decompressed.size() ? &_column->_decompressed[_index] : nullptr; - // Traverse current Simple8b block for 64bit values if it exists - if (state._decoder64 && ++state._decoder64->pos != state._decoder64->end) { - auto elem = state._loadDelta(*_column, *state._decoder64->pos, current); - if (!current) { - _column->_decompressed.emplace_back(elem); + if (auto d64 = stdx::get_if(&state.decoder)) { + // Traverse current Simple8b block for 64bit values if it exists + if (d64->pos.valid() && (++d64->pos).more()) { + _decompressed = state.loadDelta(*_allocator, *d64); + return; } - return; - } - - // Traverse current Simple8b block for 128bit values if it exists - if (state._decoder128 && ++state._decoder128->pos != state._decoder128->end) { - auto elem = state._loadDelta(*_column, *state._decoder128->pos, current); - if (!current) { - _column->_decompressed.emplace_back(elem); + } else if (auto d128 = stdx::get_if(&state.decoder)) { + // Traverse current Simple8b block for 128bit values if it exists + if (d128->pos.valid() && (++d128->pos).more()) { + _decompressed = state.loadDelta(*_allocator, *d128); + return; } - return; } // We don't have any more delta values in current block so we need to load next control byte. @@ -396,31 +380,17 @@ void BSONColumn::Iterator::_incrementRegular() { // Load new control byte if (_isInterleavedStart(*_control)) { - // Remember this position to speed up "random access" for further access. - _column->_maxDecodingStartPos.setIfLarger(_index, _control); - _initializeInterleaving(); return; } - auto result = state._loadControl(*_column, _control, _end, current); - if (!current) { - _column->_decompressed.emplace_back(result.element); - } - auto prevControl = _control; + auto result = state.loadControl(*_allocator, _control, _end); + _decompressed = result.element; _control += result.size; - if (result.full) { - // Remember this position to speed up "random access" for further access. - _column->_maxDecodingStartPos.setIfLarger(_index, prevControl); - } } -void BSONColumn::Iterator::_incrementInterleaved() { - // Get pointer to current element if we are already decompressed - const BSONElement* current = - _index < _column->_decompressed.size() ? &_column->_decompressed[_index] : nullptr; - +void BSONColumn::Iterator::_incrementInterleaved(Interleaved& interleaved) { // Notify the internal allocator to keep all allocations in contigous memory. That way we can // produce the full BSONObj that we need to return. - auto contiguous = _column->_elementStorage.startContiguous(); + auto contiguous = _allocator->startContiguous(); // Iterate over the reference interleaved object. We match scalar subfields with our interleaved // states in order. Internally the necessary recursion is performed and the second lambda below @@ -429,16 +399,16 @@ void BSONColumn::Iterator::_incrementInterleaved() { // cases where re-materialization of the Element wasn't required (same as previous for example). // The first lambda outputs an RAII object that is instantiated every time we recurse deeper. // This handles writing the BSONObj size and EOO bytes for subobjects. - auto stateIt = _states.begin(); - auto stateEnd = _states.end(); + auto stateIt = interleaved.states.begin(); + auto stateEnd = interleaved.states.end(); int processed = 0; BSONObjTraversal t( - _interleavedArrays, - _interleavedRootType, + interleaved.arrays, + interleaved.rootType, [this](StringData fieldName, const BSONObj& obj, BSONType type) { // Called every time we recurse into a subobject. It makes sure we write the size and // EOO bytes. - return SubObjectAllocator(_column->_elementStorage, fieldName, obj, type); + return SubObjectAllocator(*_allocator, fieldName, obj, type); }, [this, &stateIt, &stateEnd, &processed](const BSONElement& referenceField) { // Called for every scalar field in the reference interleaved BSONObj. We have as many @@ -448,16 +418,18 @@ void BSONColumn::Iterator::_incrementInterleaved() { // Remember the iterator position before writing anything. This is to detect that // nothing was written and we need to copy the element into the allocator position. - auto allocatorPosition = _column->_elementStorage.position(); + auto allocatorPosition = _allocator->position(); BSONElement elem; // Load deltas if decoders are setup. nullptr is always used for "current". So even if // we are iterating the second time we are going to allocate new memory. This is a // tradeoff to avoid a decoded list of literals for every state that will only be used // if we iterate multiple times. - if (state._decoder64 && ++state._decoder64->pos != state._decoder64->end) { - elem = state._loadDelta(*_column, *state._decoder64->pos, nullptr); - } else if (state._decoder128 && ++state._decoder128->pos != state._decoder128->end) { - elem = state._loadDelta(*_column, *state._decoder128->pos, nullptr); + if (auto d64 = stdx::get_if(&state.decoder); + d64 && d64->pos.valid() && (++d64->pos).more()) { + elem = state.loadDelta(*_allocator, *d64); + } else if (auto d128 = stdx::get_if(&state.decoder); + d128 && d128->pos.valid() && (++d128->pos).more()) { + elem = state.loadDelta(*_allocator, *d128); } else if (*_control == EOO) { // Decoders are exhausted and the next control byte was EOO then we should exit // interleaved mode. Return false to end the recursion early. @@ -466,7 +438,7 @@ void BSONColumn::Iterator::_incrementInterleaved() { } else { // Decoders are exhausted so we need to load the next control byte that by // definition belong to this decoder state as we iterate in the same known order. - auto result = state._loadControl(*_column, _control, _end, nullptr); + auto result = state.loadControl(*_allocator, _control, _end); _control += result.size; elem = result.element; @@ -476,23 +448,23 @@ void BSONColumn::Iterator::_incrementInterleaved() { auto fieldName = referenceField.fieldNameStringData(); if (!elem.eoo() && elem.fieldNameStringData() != fieldName) { auto allocatedElem = - _column->_elementStorage.allocate(elem.type(), fieldName, elem.valuesize()); + _allocator->allocate(elem.type(), fieldName, elem.valuesize()); memcpy(allocatedElem.value(), elem.value(), elem.valuesize()); elem = allocatedElem.element(); - state._lastValue = elem; + state.lastValue = elem; } } // If the encoded element wasn't stored in the allocator above we need to copy it here // as we're building a full BSONObj. if (!elem.eoo()) { - if (_column->_elementStorage.position() == allocatorPosition) { + if (_allocator->position() == allocatorPosition) { auto size = elem.size(); - memcpy(_column->_elementStorage.allocate(size), elem.rawdata(), size); + memcpy(_allocator->allocate(size), elem.rawdata(), size); } // Remember last known value, needed for further decompression. - state._lastValue = elem; + state.lastValue = elem; } ++processed; @@ -500,28 +472,22 @@ void BSONColumn::Iterator::_incrementInterleaved() { }); // Traverse interleaved reference object, we will match interleaved states with literals. - auto res = t.traverse(_interleavedReferenceObj); + auto res = t.traverse(interleaved.referenceObj); if (!res) { // Exit interleaved mode and load as regular. Re-instantiate the state and set last known // value. - _states.clear(); uassert(6067604, "Invalid BSON Column interleaved encoding", processed == 0); - _state = {}; - _state._lastValue = _column->_decompressed[_index - 1]; - - _incrementRegular(); + // This invalidates 'interleaved' reference, may no longer be dereferenced. + Regular& regular = _mode.emplace(); + get<0>(regular.state.decoder).deltaOfDelta = false; + regular.state.lastValue = _decompressed; + _incrementRegular(regular); return; } // There should have been as many interleaved states as scalar fields. uassert(6067605, "Invalid BSON Column interleaved encoding", stateIt == stateEnd); - // If this element has been decompressed in a previous iteration we don't need to store it in - // our decompressed list. - if (current) { - return; - } - // Store built BSONObj in the decompressed list const char* objdata = contiguous.done(); BSONElement obj(objdata); @@ -531,13 +497,14 @@ void BSONColumn::Iterator::_incrementInterleaved() { obj = BSONElement(); } - _column->_decompressed.emplace_back(obj); + _decompressed = obj; } void BSONColumn::Iterator::_handleEOO() { ++_control; + uassert(7482200, "Invalid BSONColumn encoding", _control == _end); _index = kEndIndex; - _column->_fullyDecompressed = true; + _decompressed = {}; } bool BSONColumn::Iterator::_isLiteral(char control) { @@ -554,96 +521,74 @@ uint8_t BSONColumn::Iterator::_numSimple8bBlocks(char control) { return (control & 0x0F) + 1; } -bool BSONColumn::Iterator::operator==(const Iterator& rhs) const { - return _index == rhs._index; -} -bool BSONColumn::Iterator::operator!=(const Iterator& rhs) const { - return !operator==(rhs); -} - -BSONColumn::Iterator BSONColumn::Iterator::moveTo(BSONColumn& column) { - auto copy = *this; - _column = nullptr; - copy._column = &column; - return copy; -} - -void BSONColumn::Iterator::DecodingState::_loadLiteral(const BSONElement& elem) { - _lastType = elem.type(); - _deltaOfDelta = usesDeltaOfDelta(_lastType); - switch (_lastType) { - case String: - case Code: - _lastEncodedValue128 = - Simple8bTypeUtil::encodeString(elem.valueStringData()).value_or(0); - break; - case BinData: { - int size; - const char* binary = elem.binData(size); - _lastEncodedValue128 = Simple8bTypeUtil::encodeBinary(binary, size).value_or(0); - break; +void BSONColumn::Iterator::DecodingState::loadUncompressed(const BSONElement& elem) { + BSONType type = elem.type(); + if (uses128bit(type)) { + auto& d128 = decoder.emplace(); + switch (type) { + case String: + case Code: + d128.lastEncodedValue = + Simple8bTypeUtil::encodeString(elem.valueStringData()).value_or(0); + break; + case BinData: { + int size; + const char* binary = elem.binData(size); + d128.lastEncodedValue = Simple8bTypeUtil::encodeBinary(binary, size).value_or(0); + break; + } + case NumberDecimal: + d128.lastEncodedValue = Simple8bTypeUtil::encodeDecimal128(elem._numberDecimal()); + break; + default: + MONGO_UNREACHABLE; + }; + } else { + auto& d64 = decoder.emplace(); + d64.deltaOfDelta = usesDeltaOfDelta(type); + switch (type) { + case jstOID: + d64.lastEncodedValue = Simple8bTypeUtil::encodeObjectId(elem.__oid()); + break; + case Date: + d64.lastEncodedValue = elem.date().toMillisSinceEpoch(); + break; + case Bool: + d64.lastEncodedValue = elem.boolean(); + break; + case NumberInt: + d64.lastEncodedValue = elem._numberInt(); + break; + case NumberLong: + d64.lastEncodedValue = elem._numberLong(); + break; + case bsonTimestamp: + d64.lastEncodedValue = elem.timestampValue(); + break; + default: + break; + }; + if (d64.deltaOfDelta) { + d64.lastEncodedValueForDeltaOfDelta = d64.lastEncodedValue; + d64.lastEncodedValue = 0; } - case jstOID: - _lastEncodedValue64 = Simple8bTypeUtil::encodeObjectId(elem.__oid()); - break; - case Date: - _lastEncodedValue64 = elem.date().toMillisSinceEpoch(); - break; - case Bool: - _lastEncodedValue64 = elem.boolean(); - break; - case NumberInt: - _lastEncodedValue64 = elem._numberInt(); - break; - case NumberLong: - _lastEncodedValue64 = elem._numberLong(); - break; - case bsonTimestamp: - _lastEncodedValue64 = elem.timestampValue(); - break; - case NumberDecimal: - _lastEncodedValue128 = Simple8bTypeUtil::encodeDecimal128(elem._numberDecimal()); - break; - default: - break; - }; - if (_deltaOfDelta) { - _lastEncodedValueForDeltaOfDelta = _lastEncodedValue64; - _lastEncodedValue64 = 0; } - _lastValue = elem; + + lastValue = elem; } BSONColumn::Iterator::DecodingState::LoadControlResult -BSONColumn::Iterator::DecodingState::_loadControl(BSONColumn& column, - const char* buffer, - const char* end, - const BSONElement* current) { +BSONColumn::Iterator::DecodingState::loadControl(ElementStorage& allocator, + const char* buffer, + const char* end) { // Load current control byte, it can be either a literal or Simple-8b deltas uint8_t control = *buffer; if (_isLiteral(control)) { // Load BSONElement from the literal and set last encoded in case we need to calculate // deltas from this literal BSONElement literalElem(buffer, 1, -1); - _loadLiteral(literalElem); - - _decoder64 = boost::none; - _decoder128 = boost::none; - _lastValue = literalElem; - - return {literalElem, literalElem.size(), true}; - } - - // Simple-8b delta block, load its scale factor and validate for sanity - _scaleIndex = kControlToScaleIndex[(control & 0xF0) >> 4]; - uassert(6067606, "Invalid control byte in BSON Column", _scaleIndex != kInvalidScaleIndex); - - // If Double, scale last value according to this scale factor - auto type = _lastValue.type(); - if (type == NumberDouble) { - auto encoded = Simple8bTypeUtil::encodeDouble(_lastValue._numberDouble(), _scaleIndex); - uassert(6067607, "Invalid double encoding in BSON Column", encoded); - _lastEncodedValue64 = *encoded; + loadUncompressed(literalElem); + return {literalElem, literalElem.size()}; } // Setup decoder for this range of Simple-8b blocks @@ -654,63 +599,81 @@ BSONColumn::Iterator::DecodingState::_loadControl(BSONColumn& column, // Instantiate decoder and load first value, every Simple-8b block should have at least one // value BSONElement deltaElem; - if (!uses128bit(type)) { - // We can read the last known value from the decoder iterator even as it has reached end. - boost::optional lastSimple8bValue = _decoder64 ? *_decoder64->pos : 0; - _decoder64.emplace(buffer + 1, size, lastSimple8bValue); - deltaElem = _loadDelta(column, *_decoder64->pos, current); - } else { - // We can read the last known value from the decoder iterator even as it has reached end. - boost::optional lastSimple8bValue = - _decoder128 ? *_decoder128->pos : uint128_t(0); - _decoder128.emplace(buffer + 1, size, lastSimple8bValue); - deltaElem = _loadDelta(column, *_decoder128->pos, current); - } - - return {deltaElem, size + 1, false}; -} - -BSONElement BSONColumn::Iterator::DecodingState::_loadDelta(BSONColumn& column, - const boost::optional& delta, - const BSONElement* current) { + stdx::visit(OverloadedVisitor{ + [&](DecodingState::Decoder64& d64) { + // Simple-8b delta block, load its scale factor and validate for sanity + d64.scaleIndex = kControlToScaleIndex[(control & 0xF0) >> 4]; + uassert(6067606, + "Invalid control byte in BSON Column", + d64.scaleIndex != kInvalidScaleIndex); + + // If Double, scale last value according to this scale factor + auto type = lastValue.type(); + if (type == NumberDouble) { + auto encoded = Simple8bTypeUtil::encodeDouble(lastValue._numberDouble(), + d64.scaleIndex); + uassert(6067607, "Invalid double encoding in BSON Column", encoded); + d64.lastEncodedValue = *encoded; + } + + // We can read the last known value from the decoder iterator even as it has + // reached end. + boost::optional lastSimple8bValue = + d64.pos.valid() ? *d64.pos : 0; + d64.pos = Simple8b(buffer + 1, size, lastSimple8bValue).begin(); + deltaElem = loadDelta(allocator, d64); + }, + [&](DecodingState::Decoder128& d128) { + // We can read the last known value from the decoder iterator even as it has + // reached end. + boost::optional lastSimple8bValue = + d128.pos.valid() ? *d128.pos : uint128_t(0); + d128.pos = Simple8b(buffer + 1, size, lastSimple8bValue).begin(); + deltaElem = loadDelta(allocator, d128); + }}, + decoder); + + return {deltaElem, size + 1}; +} + +BSONElement BSONColumn::Iterator::DecodingState::loadDelta(ElementStorage& allocator, + Decoder64& d64) { + const auto& delta = *d64.pos; // boost::none represent skip, just append EOO BSONElement. if (!delta) { return BSONElement(); } // If we have a zero delta no need to allocate a new Element, we can just use previous. - if (!_deltaOfDelta && *delta == 0) { - return _lastValue; + if (!d64.deltaOfDelta && *delta == 0) { + return lastValue; } // Expand delta or delta-of-delta as last encoded. - _lastEncodedValue64 = expandDelta(_lastEncodedValue64, Simple8bTypeUtil::decodeInt64(*delta)); - if (_deltaOfDelta) { - _lastEncodedValueForDeltaOfDelta = - expandDelta(_lastEncodedValueForDeltaOfDelta, _lastEncodedValue64); + d64.lastEncodedValue = expandDelta(d64.lastEncodedValue, Simple8bTypeUtil::decodeInt64(*delta)); + if (d64.deltaOfDelta) { + d64.lastEncodedValueForDeltaOfDelta = + expandDelta(d64.lastEncodedValueForDeltaOfDelta, d64.lastEncodedValue); } - // Decoder state is now setup, no need to create BSONElement if already exist decompressed, - // return dummy EOO element. - if (current) { - _lastValue = *current; - return *current; - } - // Allocate a new BSONElement that fits same value size as previous - ElementStorage::Element elem = column._elementStorage.allocate( - _lastType, _lastValue.fieldNameStringData(), _lastValue.valuesize()); + // Decoder state is now setup, materialize new value. We allocate a new BSONElement that fits + // same value size as previous + BSONType type = lastValue.type(); + ElementStorage::Element elem = + allocator.allocate(type, lastValue.fieldNameStringData(), lastValue.valuesize()); // Write value depending on type - int64_t valueToWrite = _deltaOfDelta ? _lastEncodedValueForDeltaOfDelta : _lastEncodedValue64; - switch (_lastType) { + int64_t valueToWrite = + d64.deltaOfDelta ? d64.lastEncodedValueForDeltaOfDelta : d64.lastEncodedValue; + switch (type) { case NumberDouble: DataView(elem.value()) .write>( - Simple8bTypeUtil::decodeDouble(valueToWrite, _scaleIndex)); + Simple8bTypeUtil::decodeDouble(valueToWrite, d64.scaleIndex)); break; case jstOID: { Simple8bTypeUtil::decodeObjectIdInto( - elem.value(), valueToWrite, _lastValue.__oid().getInstanceUnique()); + elem.value(), valueToWrite, lastValue.__oid().getInstanceUnique()); } break; case Date: case NumberLong: @@ -738,13 +701,13 @@ BSONElement BSONColumn::Iterator::DecodingState::_loadDelta(BSONColumn& column, MONGO_UNREACHABLE; } - _lastValue = elem.element(); - return _lastValue; + lastValue = elem.element(); + return lastValue; } -BSONElement BSONColumn::Iterator::DecodingState::_loadDelta(BSONColumn& column, - const boost::optional& delta, - const BSONElement* current) { +BSONElement BSONColumn::Iterator::DecodingState::loadDelta(ElementStorage& allocator, + Decoder128& d128) { + const auto& delta = *d128.pos; // boost::none represent skip, just append EOO BSONElement. if (!delta) { return BSONElement(); @@ -752,31 +715,24 @@ BSONElement BSONColumn::Iterator::DecodingState::_loadDelta(BSONColumn& column, // If we have a zero delta no need to allocate a new Element, we can just use previous. if (*delta == 0) { - return _lastValue; + return lastValue; } // Expand delta as last encoded. - _lastEncodedValue128 = - expandDelta(_lastEncodedValue128, Simple8bTypeUtil::decodeInt128(*delta)); - - // Decoder state is now setup, no need to create BSONElement if already exist decompressed, - // return dummy EOO element. - if (current) { - _lastValue = *current; - return *current; - } + d128.lastEncodedValue = + expandDelta(d128.lastEncodedValue, Simple8bTypeUtil::decodeInt128(*delta)); - // Write value depending on type + // Decoder state is now setup, write value depending on type auto elemFn = [&]() -> ElementStorage::Element { - switch (_lastType) { + BSONType type = lastValue.type(); + switch (type) { case String: case Code: { Simple8bTypeUtil::SmallString ss = - Simple8bTypeUtil::decodeString(_lastEncodedValue128); + Simple8bTypeUtil::decodeString(d128.lastEncodedValue); // Add 5 bytes to size, strings begin with a 4 byte count and ends with a null // terminator - auto elem = column._elementStorage.allocate( - _lastType, _lastValue.fieldNameStringData(), ss.size + 5); + auto elem = allocator.allocate(type, lastValue.fieldNameStringData(), ss.size + 5); // Write count, size includes null terminator DataView(elem.value()).write>(ss.size + 1); // Write string value @@ -786,22 +742,22 @@ BSONElement BSONColumn::Iterator::DecodingState::_loadDelta(BSONColumn& column, return elem; } case BinData: { - auto elem = column._elementStorage.allocate( - _lastType, _lastValue.fieldNameStringData(), _lastValue.valuesize()); + auto elem = allocator.allocate( + type, lastValue.fieldNameStringData(), lastValue.valuesize()); // The first 5 bytes in binData is a count and subType, copy them from previous - memcpy(elem.value(), _lastValue.value(), 5); + memcpy(elem.value(), lastValue.value(), 5); Simple8bTypeUtil::decodeBinary( - _lastEncodedValue128, elem.value() + 5, _lastValue.valuestrsize()); + d128.lastEncodedValue, elem.value() + 5, lastValue.valuestrsize()); return elem; } case NumberDecimal: { - auto elem = column._elementStorage.allocate( - _lastType, _lastValue.fieldNameStringData(), _lastValue.valuesize()); - Decimal128 d128 = Simple8bTypeUtil::decodeDecimal128(_lastEncodedValue128); - Decimal128::Value d128Val = d128.getValue(); - DataView(elem.value()).write>(d128Val.low64); + auto elem = allocator.allocate( + type, lastValue.fieldNameStringData(), lastValue.valuesize()); + Decimal128 dec128 = Simple8bTypeUtil::decodeDecimal128(d128.lastEncodedValue); + Decimal128::Value dec128Val = dec128.getValue(); + DataView(elem.value()).write>(dec128Val.low64); DataView(elem.value() + sizeof(long long)) - .write>(d128Val.high64); + .write>(dec128Val.high64); return elem; } default: @@ -810,8 +766,18 @@ BSONElement BSONColumn::Iterator::DecodingState::_loadDelta(BSONColumn& column, } }(); - _lastValue = elemFn.element(); - return _lastValue; + lastValue = elemFn.element(); + return lastValue; +} + +BSONColumn::Iterator::Interleaved::Interleaved(BSONObj refObj, + BSONType referenceObjType, + bool interleavedArrays) + : referenceObj(std::move(refObj)), arrays(interleavedArrays), rootType(referenceObjType) {} + +BSONColumn::BSONColumn(const char* buffer, size_t size) + : _binary(buffer), _size(size), _allocator(new ElementStorage()) { + _initialValidate(); } BSONColumn::BSONColumn(BSONElement bin) { @@ -820,75 +786,44 @@ BSONColumn::BSONColumn(BSONElement bin) { bin.type() == BSONType::BinData && bin.binDataType() == BinDataType::Column); _binary = bin.binData(_size); - _name = bin.fieldNameStringData().toString(); - _init(); + _allocator = new ElementStorage(); + _initialValidate(); } -BSONColumn::BSONColumn(BSONBinData bin, StringData name) { +BSONColumn::BSONColumn(BSONBinData bin) + : BSONColumn(static_cast(bin.data), bin.length) { tassert(6179300, "Invalid BSON type for column", bin.type == BinDataType::Column); - _binary = static_cast(bin.data); - _size = bin.length; - _name = name.toString(); - _init(); } -void BSONColumn::_init() { +void BSONColumn::_initialValidate() { uassert(6067609, "Invalid BSON Column encoding", _size > 0); - _maxDecodingStartPos._control = _binary; } -BSONColumn::Iterator BSONColumn::begin() { - Iterator it{*this, _binary, _binary + _size}; - it._initialize(0); - return it; +BSONColumn::Iterator BSONColumn::begin() const { + return {_allocator, _binary, _binary + _size}; } -BSONColumn::Iterator BSONColumn::end() { - Iterator it{*this, _binary + _size, _binary + _size}; - it._index = kEndIndex; - return it; +BSONColumn::Iterator BSONColumn::end() const { + return {}; } -boost::optional BSONColumn::operator[](size_t index) { - // If index is already decompressed, we can just return the element - if (index < _decompressed.size()) { - return _decompressed[index]; - } - - // No more elements to be found if we are fully decompressed, return EOO - if (_fullyDecompressed) - return boost::none; - - // We can begin iterating from last known literal - Iterator it{*this, _maxDecodingStartPos._control, _binary + _size}; - it._initialize(_maxDecodingStartPos._index); - +boost::optional BSONColumn::operator[](size_t index) const { // Traverse until we reach desired index or end + auto it = begin(); auto e = end(); - for (size_t i = _maxDecodingStartPos._index; it != e && i < index; ++it, ++i) { + for (size_t i = 0; it != e && i < index; ++it, ++i) { } - // Return EOO if not found - if (it == e) + // Return none if out of bounds + if (it == e) { return boost::none; + } return *it; } -size_t BSONColumn::size() { - if (_fullyDecompressed) - return _decompressed.size(); - - // We can begin iterating from last known literal - Iterator it{*this, _maxDecodingStartPos._control, _binary + _size}; - it._initialize(_maxDecodingStartPos._index); - - // Traverse until we reach end - for (auto e = end(); it != e; ++it) { - } - - invariant(_fullyDecompressed); - return _decompressed.size(); +size_t BSONColumn::size() const { + return std::distance(begin(), end()); } bool BSONColumn::contains_forTest(BSONType elementType) const { @@ -928,11 +863,10 @@ bool BSONColumn::contains_forTest(BSONType elementType) const { return false; } -void BSONColumn::DecodingStartPosition::setIfLarger(size_t index, const char* control) { - if (_index < index) { - _control = control; - _index = index; - } +boost::intrusive_ptr BSONColumn::release() { + auto previous = _allocator; + _allocator = new ElementStorage(); + return previous; } } // namespace mongo diff --git a/src/mongo/bson/util/bsoncolumn.h b/src/mongo/bson/util/bsoncolumn.h index 2285f878d35f0..9353e5f9362d9 100644 --- a/src/mongo/bson/util/bsoncolumn.h +++ b/src/mongo/bson/util/bsoncolumn.h @@ -29,58 +29,91 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/util/simple8b.h" - -#include -#include -#include +#include "mongo/platform/int128.h" +#include "mongo/stdx/variant.h" namespace mongo { /** - * The BSONColumn class represents a reference to a BSONElement of BinDataType 7, which can - * efficiently store any BSONArray and also allows for missing values. At a high level, two - * optimizations are applied: - * - implied field names: do not store decimal keys representing index keys. - * - delta compression using Simple-8b: store difference between subsequent scalars of the same - * type + * The BSONColumn class represents an implementation to interpret a BSONElement of BinDataType 7, + * which can efficiently store any BSONArray in a compact representation. The format has the + * following high-level features and capabilities: + * - implied field names: decimal keys representing index keys are not stored. + * - type specific delta/delta-of-delta compression stored using Simple-8b: difference between + * subsequent scalars of the same type are stored with as few bits as possible. + * - doubles are scaled and rounded to nearest integer for efficient storage. + * - internal encoding for missing values. + * - run-length-encoding for efficient storage of large number of repeated values + * - object/array compression where scalars are internally stored as separate interleaved + * BSONColumn compressed binary streams. * - * The BSONColumn will not take ownership of the BinData element, but otherwise implements - * an interface similar to BSONObj. Because iterators over the BSONColumn need to rematerialize - * deltas, they use additional storage owned by the BSONColumn for this. As all iterators will - * create new delta's in the same order, they share a single ElementStore, with a worst-case memory - * usage bounded to a total size on the order of that of the size of the expanded BSONColumn. + * The BSONColumn will not take ownership of the provided binary, but otherwise implements an + * interface similar to BSONObj. * - * All iterators are invalidated when moving the BSONColumn. + * Iterators over the BSONColumn need to materialize BSONElement from deltas and use additional + * storage owned by the BSONColumn. All BSONElements returned remain valid while the BSONColumn is + * kept in scope. Multiple passes grows memory usage which is not free'd until the BSONColumn goes + * out of scope or the release() function is called. + * + * Thread safety: The BSONColumn class is generally NOT thread-safe, unless declared otherwise. This + * also applies to functions declared 'const'. */ class BSONColumn { +private: + class ElementStorage; + public: - BSONColumn(BSONElement bin); - BSONColumn(BSONBinData bin, StringData name); + BSONColumn(const char* buffer, size_t size); + explicit BSONColumn(BSONElement bin); + explicit BSONColumn(BSONBinData bin); /** - * Forward iterator type to access BSONElement from BSONColumn. + * Input iterator type to access BSONElement from BSONColumn. + * + * A default-constructed BSONElement (EOO type) represents a missing value. Returned + * BSONElements are owned by the BSONColumn instance and should not be kept after the BSONColumn + * instance goes out of scope. * - * Default-constructed BSONElement (EOO type) represent missing value. - * Returned BSONElement are owned by BSONColumn instance and should not be kept after the - * BSONColumn instance goes out of scope. + * Iterator can be used either as an STL iterator with begin() and end() or as a non-STL + * iterator via begin() and incrementing until more() returns false. */ class Iterator { public: friend class BSONColumn; // typedefs expected in iterators - using iterator_category = std::forward_iterator_tag; + using iterator_category = std::input_iterator_tag; using difference_type = ptrdiff_t; using value_type = BSONElement; using pointer = const BSONElement*; using reference = const BSONElement&; + // Constructs an end iterator + Iterator() = default; + reference operator*() const { - return _column->_decompressed.at(_index); + return _decompressed; } pointer operator->() const { return &operator*(); @@ -89,32 +122,29 @@ class BSONColumn { // pre-increment operator Iterator& operator++(); - // post-increment operator - Iterator operator++(int); - - bool operator==(const Iterator& rhs) const; - bool operator!=(const Iterator& rhs) const; + bool operator==(const Iterator& rhs) const { + return _index == rhs._index; + } + bool operator!=(const Iterator& rhs) const { + return !operator==(rhs); + } - // Move this Iterator to a new BSONColumn instance. Should only be used when moving - // BSONColumn instances and we want to re-attach the iterator to the new instance without - // losing position - Iterator moveTo(BSONColumn& column); + /** + * Returns true if iterator may be incremented. Equivalent to comparing not equal with the + * end iterator. + */ + bool more() const { + return _control != _end; + } private: - Iterator(BSONColumn& column, const char* pos, const char* end); - - // Initializes Iterator and makes it ready for iteration. Provided index must be 0 or point - // to a full literal. - void _initialize(size_t index); + // Constructs a begin iterator + Iterator(boost::intrusive_ptr allocator, const char* pos, const char* end); // Initialize sub-object interleaving from current control byte position. Must be on a // interleaved start byte. void _initializeInterleaving(); - // Helpers to increment the iterator in regular and interleaved mode. - void _incrementRegular(); - void _incrementInterleaved(); - // Handles EOO when in regular mode. Iterator is set to end. void _handleEOO(); @@ -127,31 +157,23 @@ class BSONColumn { // Returns number of Simple-8b blocks from control byte static uint8_t _numSimple8bBlocks(char control); - // Pointer to BSONColumn this Iterator is created from, this will be stale when moving the - // BSONColumn. All iterators are invalidated on move! - BSONColumn* _column; + // Sentinel to represent end iterator + static constexpr uint32_t kEndIndex = 0xFFFFFFFF; + + // Current iterator value + BSONElement _decompressed; // Current iterator position - size_t _index = 0; + uint32_t _index = kEndIndex; // Current control byte on iterator position - const char* _control; + const char* _control = nullptr; // End of BSONColumn memory block, we may not dereference any memory past this. - const char* _end; - - // Helper to create Simple8b decoding iterators for 64bit and 128bit value types. - // previousValue is used in case the first Simple8b block is RLE and this value will then be - // used for the RLE repeat. - template - struct Decoder { - Decoder(const char* buf, size_t size, const boost::optional& previousValue) - : s8b(buf, size, previousValue), pos(s8b.begin()), end(s8b.end()) {} - - Simple8b s8b; - typename Simple8b::Iterator pos; - typename Simple8b::Iterator end; - }; + const char* _end = nullptr; + + // Allocator to use when materializing elements + boost::intrusive_ptr _allocator; /** * Decoding state for decoding compressed binary into BSONElement. It is detached from the @@ -159,74 +181,100 @@ class BSONColumn { * states. */ struct DecodingState { + DecodingState(); + + /** + * Internal decoding state for types using 64bit aritmetic + */ + struct Decoder64 { + Decoder64(); + + Simple8b::Iterator pos; + int64_t lastEncodedValue = 0; + int64_t lastEncodedValueForDeltaOfDelta = 0; + uint8_t scaleIndex; + bool deltaOfDelta; + }; + + /** + * Internal decoding state for types using 128bit aritmetic + */ + struct Decoder128 { + Simple8b::Iterator pos; + int128_t lastEncodedValue = 0; + }; + struct LoadControlResult { BSONElement element; int size; - bool full; }; // Loads a literal - void _loadLiteral(const BSONElement& elem); + void loadUncompressed(const BSONElement& elem); // Loads current control byte - LoadControlResult _loadControl(BSONColumn& column, - const char* buffer, - const char* end, - const BSONElement* current); + LoadControlResult loadControl(ElementStorage& allocator, + const char* buffer, + const char* end); // Loads delta value - BSONElement _loadDelta(BSONColumn& column, - const boost::optional& delta, - const BSONElement* current); - BSONElement _loadDelta(BSONColumn& column, - const boost::optional& delta, - const BSONElement* current); - - // Decoders, only one should be instantiated at a time. - boost::optional> _decoder64; - boost::optional> _decoder128; + BSONElement loadDelta(ElementStorage& allocator, Decoder64& decoder); + BSONElement loadDelta(ElementStorage& allocator, Decoder128& decoder); // Last encoded values used to calculate delta and delta-of-delta - BSONType _lastType; - bool _deltaOfDelta; - BSONElement _lastValue; - int64_t _lastEncodedValue64 = 0; - int64_t _lastEncodedValueForDeltaOfDelta = 0; - int128_t _lastEncodedValue128 = 0; - - // Current scale index - uint8_t _scaleIndex; + BSONElement lastValue; + stdx::variant decoder = Decoder64{}; + }; + + /** + * Internal state for regular decoding mode (decoding of scalars) + */ + struct Regular { + DecodingState state; }; - // Decoding states. Interleaved mode is active when '_states' is not empty. When in regular - // mode we use '_state'. - DecodingState _state; - std::vector _states; + /** + * Internal state for interleaved decoding mode (decoding of objects/arrays) + */ + struct Interleaved { + Interleaved(BSONObj refObj, BSONType referenceObjType, bool interleavedArrays); + + std::vector states; - // Interleaving reference object read when encountered the interleaving start control byte. - // We setup a decoding state for each scalar field in this object. The object hierarchy is - // used to re-construct with full objects with the correct hierachy to the user. - BSONObj _interleavedReferenceObj; + // Interleaving reference object read when encountered the interleaving start control + // byte. We setup a decoding state for each scalar field in this object. The object + // hierarchy is used to re-construct with full objects with the correct hierachy to the + // user. + BSONObj referenceObj; - // Indicates if decoding states should be opened when encountering arrays - bool _interleavedArrays; + // Indicates if decoding states should be opened when encountering arrays + bool arrays; + + // Type for root object/reference object. May be Object or Array. + BSONType rootType; + }; + + // Helpers to increment the iterator in regular and interleaved mode. + void _incrementRegular(Regular& regular); + void _incrementInterleaved(Interleaved& interleaved); - // Type for root object/reference object. May be Object or Array. - BSONType _interleavedRootType; + stdx::variant _mode = Regular{}; }; /** - * Forward iterator access. + * Input iterator access. * * Iterator value is EOO when element is skipped. * * Iterators materialize compressed BSONElement as they iterate over the compressed binary. - * It is NOT safe to do this from multiple threads concurrently. + * Grows memory usage for this BSONColumn. + * + * It is NOT safe to call this or iterate from multiple threads concurrently. * * Throws if invalid encoding is encountered. */ - Iterator begin(); - Iterator end(); + Iterator begin() const; + Iterator end() const; /** * Element lookup by index @@ -234,34 +282,28 @@ class BSONColumn { * Returns EOO if index represent skipped element. * Returns boost::none if index is out of bounds. * - * O(1) time complexity if element has been previously accessed - * O(N) time complexity otherwise + * O(N) time complexity + * + * Materializes BSONElement as needed and grows memory usage for this BSONColumn. * - * Materializes compressed BSONElement as needed. It is NOT safe to do this from multiple - * threads concurrently. + * It is NOT safe to call this from multiple threads concurrently. * * Throws if invalid encoding is encountered. */ - boost::optional operator[](size_t index); + boost::optional operator[](size_t index) const; /** * Number of elements stored in this BSONColumn * - * O(1) time complexity if BSONColumn is fully decompressed (iteration reached end). - * O(N) time complexity otherwise, will fully decompress BSONColumn. + * O(N) time complexity * - * * Throws if invalid encoding is encountered. - */ - size_t size(); + * Materializes BSONElements internally and grows memory usage for this BSONColumn. - /** - * Field name that this BSONColumn represents. + * It is NOT safe to call this from multiple threads concurrently. * - * O(1) time complexity + * Throws if invalid encoding is encountered. */ - StringData name() const { - return _name; - } + size_t size() const; // Scans the compressed BSON Column format to efficiently determine if the // column contains an element of type `elementType`. @@ -270,12 +312,23 @@ class BSONColumn { // TODO SERVER-74926: add interleaved support bool contains_forTest(BSONType elementType) const; + /** + * Releases memory that has been used to materialize BSONElements for this BSONColumn. + * + * The returned reference counted pointer holds are reference to the previously materialized + * BSONElements and can be used to extend their lifetime over the BSONColumn. + * + * It is NOT safe to call this from multiple threads concurrently. + */ + boost::intrusive_ptr release(); + private: /** * BSONElement storage, owns materialised BSONElement returned by BSONColumn. * Allocates memory in blocks which double in size as they grow. */ - class ElementStorage { + class ElementStorage + : public boost::intrusive_ref_counter { public: /** * "Writable" BSONElement. Provides access to a writable pointer for writing the value of @@ -388,29 +441,25 @@ class BSONColumn { }; /** - * Validates the BSONColumn on init(). Should be the last call in the constructor when all + * Validates the BSONColumn on construction, should be the last call in the constructor when all * members are initialized. */ - void _init(); + void _initialValidate(); struct SubObjectAllocator; - std::deque _decompressed; - ElementStorage _elementStorage; - const char* _binary; int _size; - struct DecodingStartPosition { - void setIfLarger(size_t index, const char* control); + // Reference counted allocator, used to allocate memory when materializing BSONElements. + boost::intrusive_ptr _allocator; +}; - const char* _control = nullptr; - size_t _index = 0; - }; - DecodingStartPosition _maxDecodingStartPos; +// Avoid GCC/Clang compiler issues +// See +// https://stackoverflow.com/questions/53408962/try-to-understand-compiler-error-message-default-member-initializer-required-be +inline BSONColumn::Iterator::DecodingState::DecodingState() = default; +inline BSONColumn::Iterator::DecodingState::Decoder64::Decoder64() = default; - bool _fullyDecompressed = false; - std::string _name; -}; } // namespace mongo diff --git a/src/mongo/bson/util/bsoncolumn_bm.cpp b/src/mongo/bson/util/bsoncolumn_bm.cpp index 9a2154127bf88..bf9834e724e74 100644 --- a/src/mongo/bson/util/bsoncolumn_bm.cpp +++ b/src/mongo/bson/util/bsoncolumn_bm.cpp @@ -27,16 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include +#include +#include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bsoncolumn.h" #include "mongo/bson/util/bsoncolumnbuilder.h" #include "mongo/util/base64.h" - -#include +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -148,9 +158,7 @@ BSONObj getCompressedFTDC() { // The large literal emits this on Visual Studio: Fatal error C1091: compiler limit: string exceeds // 65535 bytes in length #if !defined(_MSC_VER) - StringData compressedBase64Encoded = { -#include "mongo/bson/util/bson_column_compressed_data.inl" - }; + StringData compressedBase64Encoded = {}; std::string compressed = base64::decode(compressedBase64Encoded); BSONObjBuilder builder; diff --git a/src/mongo/bson/util/bsoncolumn_test.cpp b/src/mongo/bson/util/bsoncolumn_test.cpp index 5d25de91d1989..c5ef188b8d92e 100644 --- a/src/mongo/bson/util/bsoncolumn_test.cpp +++ b/src/mongo/bson/util/bsoncolumn_test.cpp @@ -28,10 +28,35 @@ */ #include "mongo/bson/util/bsoncolumn.h" + +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bsoncolumnbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/simple8b_builder.h" #include "mongo/bson/util/simple8b_type_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/base64.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -429,9 +454,12 @@ class BSONColumnTest : public unittest::Test { auto it = col.begin(); for (auto elem : expected) { - BSONElement other = *(it++); + BSONElement other = *it; ASSERT(elem.binaryEqualValues(other)); + ASSERT_TRUE(it.more()); + ++it; } + ASSERT_FALSE(it.more()); } // Verify that we can traverse BSONColumn and extract values on the first pass @@ -440,8 +468,9 @@ class BSONColumnTest : public unittest::Test { auto it = col.begin(); for (auto elem : expected) { - BSONElement other = *(it++); + BSONElement other = *it; ASSERT(elem.binaryEqualValues(other)); + ++it; } } @@ -486,11 +515,27 @@ class BSONColumnTest : public unittest::Test { for (; it1 != itEnd && it2 != itEnd; ++it1, ++it2) { ASSERT(it1->binaryEqualValues(*it2)); - ASSERT_EQ(&*it1, &*it2); // Iterators should point to same reference } ASSERT(it1 == it2); } + + // Verify iterator equality operator + { + BSONColumn col(columnElement); + + auto iIt = col.begin(); + for (size_t i = 0; i < expected.size(); ++i, ++iIt) { + auto jIt = col.begin(); + for (size_t j = 0; j < expected.size(); ++j, ++jIt) { + if (i == j) { + ASSERT(iIt == jIt); + } else { + ASSERT(iIt != jIt); + } + } + } + } } /** @@ -6368,7 +6413,7 @@ TEST_F(BSONColumnTest, Intermediate) { #if !defined(_MSC_VER) || _MSC_VER >= 1929 TEST_F(BSONColumnTest, FTDCRoundTrip) { StringData compressedBase64Encoded = { -#include "mongo/bson/util/bson_column_compressed_data.inl" +#include "mongo/bson/util/bson_column_compressed_data.inl" // IWYU pragma: keep }; std::string compressed = base64::decode(compressedBase64Encoded); diff --git a/src/mongo/bson/util/bsoncolumnbuilder.cpp b/src/mongo/bson/util/bsoncolumnbuilder.cpp index 762fc11fb7258..ccf787b6fd071 100644 --- a/src/mongo/bson/util/bsoncolumnbuilder.cpp +++ b/src/mongo/bson/util/bsoncolumnbuilder.cpp @@ -29,12 +29,31 @@ #include "mongo/bson/util/bsoncolumnbuilder.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/bson/util/bsoncolumn_util.h" - +#include "mongo/bson/util/simple8b.h" #include "mongo/bson/util/simple8b_type_util.h" - -#include +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" namespace mongo { using namespace bsoncolumn; diff --git a/src/mongo/bson/util/bsoncolumnbuilder.h b/src/mongo/bson/util/bsoncolumnbuilder.h index 283f36ce7b372..568fb7c849a31 100644 --- a/src/mongo/bson/util/bsoncolumnbuilder.h +++ b/src/mongo/bson/util/bsoncolumnbuilder.h @@ -29,17 +29,26 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelementvalue.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/util/builder.h" -#include "mongo/bson/util/simple8b.h" +#include "mongo/bson/util/simple8b_builder.h" #include "mongo/platform/int128.h" -#include -#include -#include - namespace mongo { /** diff --git a/src/mongo/bson/util/builder.h b/src/mongo/bson/util/builder.h index 5d7d2d5673c1a..72831d56c4ac9 100644 --- a/src/mongo/bson/util/builder.h +++ b/src/mongo/bson/util/builder.h @@ -29,31 +29,36 @@ #pragma once +#include +#include +#include +#include #include #include +#include #include #include +#include #include #include #include +#include #include - -#include - -#include "mongo/bson/util/builder_fwd.h" +#include #include "mongo/base/data_type_endian.h" #include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" #include "mongo/base/static_assert.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/platform/bits.h" #include "mongo/platform/compiler.h" #include "mongo/platform/decimal128.h" #include "mongo/stdx/type_traits.h" #include "mongo/util/allocator.h" #include "mongo/util/assert_util.h" -#include "mongo/util/concepts.h" #include "mongo/util/itoa.h" #include "mongo/util/shared_buffer.h" #include "mongo/util/shared_buffer_fragment.h" @@ -380,8 +385,8 @@ class BasicBufBuilder { appendNumImpl(high); } - REQUIRES_FOR_NON_TEMPLATE(!std::is_same_v) - void appendNum(int64_t j) { + template + requires(!std::is_same_v) void appendNum(int64_t j) { appendNumImpl(j); } @@ -465,7 +470,8 @@ class BasicBufBuilder { * Replaces the buffer backing this BufBuilder with the passed in SharedBuffer. * Only legal to call when this builder is empty and when the SharedBuffer isn't shared. */ - REQUIRES_FOR_NON_TEMPLATE(std::is_same_v) + template + requires std::is_same_v void useSharedBuffer(SharedBuffer buf) { invariant(len() == 0); // Can only do this while empty. invariant(reservedBytes() == 0); @@ -737,8 +743,8 @@ class StringBuilderImpl { const int maxSize = 32; char* start = _buf.grow(maxSize); int z = snprintf(start, maxSize, "%.16g", x); - verify(z >= 0); - verify(z < maxSize); + MONGO_verify(z >= 0); + MONGO_verify(z < maxSize); _buf.setlen(prev + z); if (strchr(start, '.') == nullptr && strchr(start, 'E') == nullptr && strchr(start, 'N') == nullptr) { @@ -805,8 +811,8 @@ class StringBuilderImpl { StringBuilderImpl& SBNUM(T val, int maxSize, const char* macro) { int prev = _buf.len(); int z = snprintf(_buf.grow(maxSize), maxSize, macro, (val)); - verify(z >= 0); - verify(z < maxSize); + MONGO_verify(z >= 0); + MONGO_verify(z < maxSize); _buf.setlen(prev + z); return *this; } diff --git a/src/mongo/bson/util/builder_test.cpp b/src/mongo/bson/util/builder_test.cpp index 8344c43b83980..d180331b2cac4 100644 --- a/src/mongo/bson/util/builder_test.cpp +++ b/src/mongo/bson/util/builder_test.cpp @@ -27,9 +27,12 @@ * it in the license file. */ -#include "mongo/unittest/unittest.h" +#include #include "mongo/bson/util/builder.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/str.h" namespace mongo { TEST(Builder, String1) { diff --git a/src/mongo/bson/util/simple8b.cpp b/src/mongo/bson/util/simple8b.cpp deleted file mode 100644 index 54114375e8310..0000000000000 --- a/src/mongo/bson/util/simple8b.cpp +++ /dev/null @@ -1,921 +0,0 @@ -/** - * Copyright (C) 2021-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/bson/util/simple8b.h" - -#include "mongo/base/data_type_endian.h" -#include "mongo/platform/bits.h" - -#include -#include - -namespace mongo { - -namespace { -/* - * Simple8B is a compression method for storing unsigned int 64 values. In this case - * we make a few optimizations detailed below. We reserve the 4 lsbs for a baseSelector value. And - * then we encode integers based on the following selector choice: - * - * Selector value: 0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 | 15 (RLE) - * Integers coded: 0 | 60 30 20 15 12 10 8 7 6 5 4 3 2 1 | up to 1920 - * Value Bits/integer: 0 | 1 2 3 4 5 6 7 8 10 12 15 20 30 60 | Last Value added - * Wasted bits: 0 | 0 0 0 0 0 0 4 4 0 0 0 0 0 0 | 56 - * Total Bits/Integer: 0 | 1 2 3 4 5 6 7 8 10 12 15 20 30 60 | Last Valued added - * - * However, we make optimizations for selector value 7 and 8. We can see there are 4 - * wasted trailing bits. Using these 4 bits we can consider compression of trailing zeros. - * For a selector extension value of 7, we store 4 bits and these represent up to 15 trailing zeros. - * The extension bits are stored directly after the initial selector bits so that the simple8b word - * looks like: | Base Selector (0-3) | Selector Extension (4-7) | Bits for Values (8 - 63) - * - * Selector Value: 0 | 7 7 7 7 7 7 7 7 7 - * Selector 7 Extension Value: 0 | 1 2 3 4 5 6 7 8 9 - * Value Bits/Integer: 0 | 2 3 4 5 7 10 14 24 52 - * TrailingZeroBits: 0 | 4 4 4 4 4 4 4 4 4 - * MaxTrailingZeroSize: 0 |15 15 15 15 15 15 15 15 15 - * Total Bits/Integer: 0 | 6 7 8 9 11 14 18 28 56 - * - * Additionally, we consider larger trailing zero counts in selector 8. In this case the value - * of the trailing zero bits is multiplied by a nibble shift of 4. We consider trailing zero sizes - * of both 4 and 5 bits and thus, we split selector 8 in our implementation into Selector8Small and - * Selector8Large - * - * Selector Value: 0 | 8 8 8 8 8 8 8 8 8 8 8 8 8 - * Selector 8 Extension Value: 0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 - * Value Bits/Integer: 0 | 4 5 7 10 14 24 52 4 6 9 13 23 51 - * TrailingZeroBits: 0 | 4 4 4 4 4 4 4 5 5 5 5 5 5 - * MaxTrailingZerosSize: 0 |60 60 60 60 60 60 60 124 124 124 124 124 124 - * Total Bits/Integer: 0 | 8 9 11 14 18 28 56 9 11 14 18 28 56 - * - * The simple8b words are according to this spec of selectors and their extension types. - */ - -// Map selectorNames to their indexs. -static constexpr uint8_t kBaseSelector = 0; -static constexpr uint8_t kSevenSelector = 1; -static constexpr uint8_t kEightSelectorSmall = 2; -static constexpr uint8_t kEightSelectorLarge = 3; - -// Variables to handle RLE -static constexpr uint8_t kRleSelector = 15; -static constexpr uint8_t kMaxRleCount = 16; -static constexpr uint8_t kRleMultiplier = 120; - -// Mask to obtain the base and extended selectors. -static constexpr uint64_t kBaseSelectorMask = 0x000000000000000F; - -// Selectors are always of size 4 -static constexpr uint8_t kSelectorBits = 4; - -// Nibble Shift is always of size 4 -static constexpr uint8_t kNibbleShiftSize = 4; - -// The max selector value for each extension -constexpr std::array kMaxSelector = {14, 9, 7, 13}; - -// The min selector value for each extension -constexpr std::array kMinSelector = {1, 1, 1, 8}; - -// The max amount of data bits each selector type can store. This is the amount of bits in the 64bit -// word that are not used for selector values. -constexpr std::array kDataBits = {60, 56, 56, 56}; - -// The amount of bits allocated to store a set of trailing zeros -constexpr std::array kTrailingZeroBitSize = {0, 4, 4, 5}; - -// The amount of possible trailing zeros each selector can handle in the trailingZeroBitSize -constexpr std::array kTrailingZerosMaxCount = {0, 15, 60, 124}; - -// Obtain a mask for the trailing zeros for the seven and eight selectors. We shift 4 and 5 bits to -// create the mask The trailingZeroBitSize variable is used as an index, but must be shifted - 4 to -// correspond to indexes 0 and 1. -constexpr std::array kTrailingZerosMask = { - 0, (1ull << 4) - 1, (1ull << 4) - 1, (1ull << 5) - 1}; - -// The amount of zeros each value in the trailing zero count represents -constexpr std::array kTrailingZerosMultiplier = { - 0, 1, kNibbleShiftSize, kNibbleShiftSize}; - -// Transfer from the base selector to the shift size. -constexpr std::array kBaseSelectorToShiftSize = { - 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 0}; - -// Transfer from a selector to a specific extension type -// This is for selector 7 and 8 extensions where the selector value is passed along with -// selector index. -constexpr std::array, 2> kSelectorToExtension = { - std::array{0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, - std::array{0, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3}}; - -// Transfer from a extensionType and selectorIdx to the selector value to be held in the 4 lsb (base -// selector) -constexpr std::array, 4> kExtensionToBaseSelector = { - std::array{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, - std::array{7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}, - std::array{8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8}, - std::array{8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8}}; - - -// A mask to obtain the value bits for each selector including the trailing zero bits. The masks are -// calculated as the following: Mask = 2^(kBitsPerInteger+kTrailingZeroBitSize) - 1 -constexpr std::array, 4> kDecodeMask = { - std::array{0, - 1, - (1ull << 2) - 1, - (1ull << 3) - 1, - (1ull << 4) - 1, - (1ull << 5) - 1, - (1ull << 6) - 1, - (1ull << 7) - 1, - (1ull << 8) - 1, - (1ull << 10) - 1, - (1ull << 12) - 1, - (1ull << 15) - 1, - (1ull << 20) - 1, - (1ull << 30) - 1, - (1ull << 60) - 1, - 1}, - std::array{0, - (1ull << 6) - 1, - (1ull << 7) - 1, - (1ull << 8) - 1, - (1ull << 9) - 1, - (1ull << 11) - 1, - (1ull << 14) - 1, - (1ull << 18) - 1, - (1ull << 28) - 1, - (1ull << 56) - 1, - 0, - 0, - 0, - 0, - 0, - 0}, - std::array{0, - (1ull << 8) - 1, - (1ull << 9) - 1, - (1ull << 11) - 1, - (1ull << 14) - 1, - (1ull << 18) - 1, - (1ull << 28) - 1, - (1ull << 56) - 1, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0}, - std::array{ - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - (1ull << 9) - 1, - (1ull << 11) - 1, - (1ull << 14) - 1, - (1ull << 18) - 1, - (1ull << 28) - 1, - (1ull << 56) - 1, - 0, - 0}}; - -// The number of meaningful bits for each selector. This does not include any trailing zero bits. -// We use 64 bits for all invalid selectors, this is to make sure iteration does not get stuck. -constexpr std::array, 4> kBitsPerIntForSelector = { - std::array{64, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 15, 20, 30, 60, 64}, - std::array{64, 2, 3, 4, 5, 7, 10, 14, 24, 52, 64, 64, 64, 64, 64, 64}, - std::array{64, 4, 5, 7, 10, 14, 24, 52, 0, 0, 64, 64, 64, 64, 64, 64}, - std::array{64, 0, 0, 0, 0, 0, 0, 0, 4, 6, 9, 13, 23, 51, 64, 64}}; - -// The number of integers coded for each selector. -constexpr std::array, 4> kIntsStoreForSelector = { - std::array{0, 60, 30, 20, 15, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1, 0}, - std::array{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0}, - std::array{0, 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0}, - std::array{0, 0, 0, 0, 0, 0, 0, 0, 6, 5, 4, 3, 2, 1, 0, 0}}; - -// Calculates number of bits needed to store value. Must be less than -// numeric_limits::max(). -uint8_t _countBitsWithoutLeadingZeros(uint64_t value) { - // All 1s is reserved for skip encoding so we add 1 to value to account for that case. - return 64 - countLeadingZerosNonZero64(value + 1); -} - -uint8_t _countTrailingZerosWithZero(uint64_t value) { - // countTrailingZeros64 returns 64 if the value is 0 but we consider this to be 0 trailing - // zeros. - return value == 0 ? 0 : countTrailingZerosNonZero64(value); -} - -uint8_t _countTrailingZerosWithZero(uint128_t value) { - uint64_t low = absl::Uint128Low64(value); - uint64_t high = absl::Uint128High64(value); - - // If value == 0 then we cannot add 64 - if (low == 0 && high != 0) { - return countTrailingZerosNonZero64(high) + 64; - } else { - return _countTrailingZerosWithZero(low); - } -} - -// Calculates number of bits needed to store value. Must be less than -// numeric_limits::max(). -uint8_t _countBitsWithoutLeadingZeros(uint128_t value) { - uint64_t high = absl::Uint128High64(value); - if (high == 0) { - uint64_t low = absl::Uint128Low64(value); - // We can't call _countBitsWithoutLeadingZeros() with numeric_limits::max as it - // would overflow and yield the wrong result. Just return the correct value instead. - if (low == std::numeric_limits::max()) - return 65; - return _countBitsWithoutLeadingZeros(low); - } else { - return 128 - countLeadingZerosNonZero64(high); - } -} - -/* - * This method takes a number of intsNeeded and an extensionType and returns the selector index for - * that type. This method should never fail as it is called when we are encoding a largest value. - */ -uint8_t _getSelectorIndex(uint8_t intsNeeded, uint8_t extensionType) { - auto iteratorIdx = std::find_if( - kIntsStoreForSelector[extensionType].begin() + kMinSelector[extensionType], - kIntsStoreForSelector[extensionType].begin() + kMaxSelector[extensionType], - [intsNeeded](uint8_t intsPerSelectorIdx) { return intsNeeded >= intsPerSelectorIdx; }); - return iteratorIdx - kIntsStoreForSelector[extensionType].begin(); -} - -} // namespace - -// This is called in _encode while iterating through _pendingValues. For the base selector, we just -// return val. Contains unsed vars in order to seamlessly integrate with seven and eight selector -// extensions. -template -struct Simple8bBuilder::BaseSelectorEncodeFunctor { - uint64_t operator()(const PendingValue& value) { - return static_cast(value.value()); - }; -}; - -// This is called in _encode while iterating through _pendingValues. It creates part of a simple8b -// word according to the specifications of the sevenSelector extension. This value is then appended -// to the full simple8b word in _encode. -template -struct Simple8bBuilder::SevenSelectorEncodeFunctor { - uint64_t operator()(const PendingValue& value) { - uint8_t trailingZeros = value.trailingZerosCount[kSevenSelector]; - uint64_t currWord = trailingZeros; - // We do two shifts here to account for the case where trailingZeros is > kTrailingZero bit - // size. If we subtracted this could lead to shift by a negative value which is undefined. - currWord |= static_cast((value.value() >> trailingZeros) - << kTrailingZeroBitSize[kSevenSelector]); - return currWord; - }; -}; - -// This is a helper functor that is extended by the EightSelectorSmall and EightSelectorLarge encode -// functors. It provides the logic for encoding with the eight selector where the extension type is -// designated by the inheritance in the EightSelectorSmall and EightSelectorLarge functors. -template -template -struct Simple8bBuilder::EightSelectorEncodeFunctor { - uint64_t operator()(const PendingValue& value) { - // integer division. We have a nibble shift of size 4 - uint8_t trailingZeros = value.trailingZerosCount[ExtensionType] / kNibbleShiftSize; - uint64_t currWord = trailingZeros; - // Shift to remove trailing zeros * 4 and then shift over for the 4 bits to hold - // the trailingZerosCount - currWord |= static_cast((value.value() >> (trailingZeros * kNibbleShiftSize)) - << kTrailingZeroBitSize[ExtensionType]); - return currWord; - } -}; - -// This is called in _encode while iterating through _pendingValues. It creates part of a simple8b -// word according to the specifications of the eightSelectorSmall extension. This value is then -// appended to the full simple8b word in _encode. -template -struct Simple8bBuilder::EightSelectorSmallEncodeFunctor - : public EightSelectorEncodeFunctor {}; - -// This is called in _encode while iterating through _pendingValues. It creates part of a simple8b -// word according to the specifications of the eightSelectorLarge extension. This value is then -// appended to the full simple8b word in _encode. -template -struct Simple8bBuilder::EightSelectorLargeEncodeFunctor - : public EightSelectorEncodeFunctor {}; - -// Base Constructor for PendingValue -template -Simple8bBuilder::PendingValue::PendingValue( - boost::optional val, - std::array bitCount, - std::array trailingZerosCount) - : val(val), bitCount(bitCount), trailingZerosCount(trailingZerosCount){}; - -template -Simple8bBuilder::PendingIterator::PendingIterator( - typename std::deque::const_iterator beginning, - typename std::deque::const_iterator it, - reference rleValue, - uint32_t rleCount) - : _begin(beginning), _it(it), _rleValue(rleValue), _rleCount(rleCount) {} - -template -auto Simple8bBuilder::PendingIterator::operator->() const -> pointer { - return &operator*(); -} - -template -auto Simple8bBuilder::PendingIterator::operator*() const -> reference { - if (_rleCount > 0) - return _rleValue; - - return _it->val; -} - -template -auto Simple8bBuilder::PendingIterator::operator++() -> PendingIterator& { - if (_rleCount > 0) { - --_rleCount; - return *this; - } - - ++_it; - return *this; -} - -template -auto Simple8bBuilder::PendingIterator::operator++(int) -> PendingIterator { - auto ret = *this; - ++(*this); - return ret; -} - -template -auto Simple8bBuilder::PendingIterator::operator--() -> PendingIterator& { - if (_rleCount > 0 || _it == _begin) { - ++_rleCount; - return *this; - } - - --_it; - return *this; -} - -template -auto Simple8bBuilder::PendingIterator::operator--(int) -> PendingIterator { - auto ret = *this; - --(*this); - return ret; -} - -template -bool Simple8bBuilder::PendingIterator::operator==( - const Simple8bBuilder::PendingIterator& rhs) const { - return _it == rhs._it && _rleCount == rhs._rleCount; -} - -template -bool Simple8bBuilder::PendingIterator::operator!=( - const Simple8bBuilder::PendingIterator& rhs) const { - return !operator==(rhs); -} - -template -Simple8bBuilder::Simple8bBuilder(Simple8bWriteFn writeFunc) : _writeFn(std::move(writeFunc)) {} - -template -Simple8bBuilder::~Simple8bBuilder() = default; - -template -bool Simple8bBuilder::append(T value) { - if (_rlePossible()) { - if (_lastValueInPrevWord.val == value) { - ++_rleCount; - return true; - } - _handleRleTermination(); - } - - return _appendValue(value, true); -} - -template -void Simple8bBuilder::skip() { - if (_rlePossible() && _lastValueInPrevWord.isSkip()) { - ++_rleCount; - return; - } - - _handleRleTermination(); - _appendSkip(true /* tryRle */); -} - -template -void Simple8bBuilder::flush() { - // Flush repeating integers that have been kept for RLE. - _handleRleTermination(); - // Flush buffered values in _pendingValues. - if (!_pendingValues.empty()) { - // always flush with the most recent valid selector. This value is the baseSelector if we - // have not have a valid selector yet. - do { - uint64_t simple8bWord = _encodeLargestPossibleWord(_lastValidExtensionType); - _writeFn(simple8bWord); - } while (!_pendingValues.empty()); - - // There are no more words in _pendingValues and RLE is possible. - // However the _rleCount is 0 because we have not read any of the values in the next word. - _rleCount = 0; - } - - // Always reset _lastValueInPrevWord. We may only start RLE after flush on 0 value. - _lastValueInPrevWord = {}; -} - -template -bool Simple8bBuilder::_appendValue(T value, bool tryRle) { - // Early exit if we try to store max value. They are not handled when counting zeros. - if (value == std::numeric_limits::max()) - return false; - - uint8_t trailingZerosCount = _countTrailingZerosWithZero(value); - // Initially set every selector as invalid. - uint8_t bitCountWithoutLeadingZeros = _countBitsWithoutLeadingZeros(value); - uint8_t trailingZerosStoredInCountSeven = - (std::min(trailingZerosCount, kTrailingZerosMaxCount[kSevenSelector])); - uint8_t meaningfulValueBitsStoredWithSeven = - bitCountWithoutLeadingZeros - trailingZerosStoredInCountSeven; - // We use integer division to ensure that a multiple of 4 is stored in - // trailingZerosStoredInCount when we have the nibble shift. - uint8_t trailingZerosStoredInCountEightSmall = - (std::min(trailingZerosCount, kTrailingZerosMaxCount[kEightSelectorSmall]) / - kNibbleShiftSize) * - kNibbleShiftSize; - uint8_t meaningfulValueBitsStoredWithEightSmall = - bitCountWithoutLeadingZeros - trailingZerosStoredInCountEightSmall; - // We use integer division to ensure that a multiple of 4 is stored in - // trailingZerosStoredInCount when we have the nibble shift. - uint8_t trailingZerosStoredInCountEightLarge = - (std::min(trailingZerosCount, kTrailingZerosMaxCount[kEightSelectorLarge]) / - kNibbleShiftSize) * - kNibbleShiftSize; - uint8_t meaningfulValueBitsStoredWithEightLarge = - bitCountWithoutLeadingZeros - trailingZerosStoredInCountEightLarge; - - // Edge cases where we have the number of trailing zeros bits as all ones and we need to add a - // padded zero to the meaningful bits to avoid confilicts with skip storage. Otherwise, we can - // reuse the bitCountWithoutLeadingZeros already calculated above. - if (trailingZerosCount == kTrailingZerosMaxCount[kSevenSelector]) { - meaningfulValueBitsStoredWithSeven = - _countBitsWithoutLeadingZeros(value >> trailingZerosCount); - } else if (trailingZerosCount == kTrailingZerosMaxCount[kEightSelectorSmall]) { - meaningfulValueBitsStoredWithEightSmall = - _countBitsWithoutLeadingZeros(value >> trailingZerosCount); - } - - // This case is specifically for 128 bit types where we have 124 zeros or max zeros - // count. We do not need to even check this for 64 bit types - if constexpr (std::is_same::value) { - if (trailingZerosCount == kTrailingZerosMaxCount[kEightSelectorLarge]) { - meaningfulValueBitsStoredWithEightLarge = - _countBitsWithoutLeadingZeros(value >> trailingZerosCount); - } - } - - std::array zeroCount = {0, - trailingZerosStoredInCountSeven, - trailingZerosStoredInCountEightSmall, - trailingZerosStoredInCountEightLarge}; - - // Check if the amount of bits needed is more than we can store using all selector combinations. - if ((bitCountWithoutLeadingZeros > kDataBits[kBaseSelector]) && - (meaningfulValueBitsStoredWithSeven + kTrailingZeroBitSize[kSevenSelector] > - kDataBits[kSevenSelector]) && - (meaningfulValueBitsStoredWithEightSmall + kTrailingZeroBitSize[kEightSelectorSmall] > - kDataBits[kEightSelectorSmall]) && - (meaningfulValueBitsStoredWithEightLarge + kTrailingZeroBitSize[kEightSelectorLarge] > - kDataBits[kEightSelectorLarge])) { - return false; - } - - PendingValue pendingValue(value, - {bitCountWithoutLeadingZeros, - meaningfulValueBitsStoredWithSeven, - meaningfulValueBitsStoredWithEightSmall, - meaningfulValueBitsStoredWithEightLarge}, - zeroCount); - // Check if we have a valid selector for the current word. This method update the global - // isSelectorValid to avoid redundant computation. - if (_doesIntegerFitInCurrentWord(pendingValue)) { - // If the integer fits in the current word, add it. - _pendingValues.push_back(pendingValue); - _updateSimple8bCurrentState(pendingValue); - } else { - // If the integer does not fit in the current word, convert the integers into simple8b - // word(s) with no unused buckets until the new value can be added to _pendingValues. Then - // add the Simple8b word(s) to the buffer. Finally add the new integer and update any global - // variables. We add based on the lastSelector that was valid where priority ordering is the - // following: base, seven, eightSmall, eightLarge. Store pending last value for RLE. - PendingValue lastPendingValue = _pendingValues.back(); - do { - uint64_t simple8bWord = _encodeLargestPossibleWord(_lastValidExtensionType); - _writeFn(simple8bWord); - } while (!(_doesIntegerFitInCurrentWord(pendingValue))); - - if (tryRle && _pendingValues.empty() && lastPendingValue.val == value) { - // There are no more words in _pendingValues and the last element of the last Simple8b - // word is the same as the new value. Therefore, start RLE. - _rleCount = 1; - _lastValueInPrevWord = lastPendingValue; - } else { - _pendingValues.push_back(pendingValue); - _updateSimple8bCurrentState(pendingValue); - } - } - return true; -} - -template -void Simple8bBuilder::_appendSkip(bool tryRle) { - if (!_pendingValues.empty()) { - bool isLastValueSkip = _pendingValues.back().isSkip(); - - // There is never a case where we need to write more than one Simple8b wrod - // because we only need 1 bit for skip - if (!_doesIntegerFitInCurrentWord({boost::none, kMinDataBits, {0, 0, 0, 0}})) { - // Form simple8b word if skip can not fit with last selector - uint64_t simple8bWord = _encodeLargestPossibleWord(_lastValidExtensionType); - _writeFn(simple8bWord); - _lastValidExtensionType = kBaseSelector; - } - - if (_pendingValues.empty() && isLastValueSkip && tryRle) { - // It is possible to start rle - _rleCount = 1; - _lastValueInPrevWord = {boost::none, {0, 0, 0, 0}, {0, 0, 0, 0}}; - return; - } - } - // Push true into skip and the dummy value, 0, into currNum. We use the dummy value, 0 because - // it takes 1 bit and it will not affect our global curr bit length calculations. - _pendingValues.push_back({boost::none, {0, 0, 0, 0}, {0, 0, 0, 0}}); -} - -template -void Simple8bBuilder::_handleRleTermination() { - if (_rleCount == 0) - return; - - // Try to create a RLE Simple8b word. - _appendRleEncoding(); - // Add any values that could not be encoded in RLE. - while (_rleCount > 0) { - if (_lastValueInPrevWord.isSkip()) { - _appendSkip(false /* tryRle */); - } else { - _appendValue(_lastValueInPrevWord.value(), false); - } - --_rleCount; - } -} - -template -void Simple8bBuilder::_appendRleEncoding() { - // This encodes a value using rle. The selector is set as 15 and the count is added in the next - // 4 bits. The value is the previous value stored by simple8b or 0 if no previous value was - // stored. - auto createRleEncoding = [this](uint8_t count) { - uint64_t rleEncoding = kRleSelector; - // We will store (count - 1) during encoding and execute (count + 1) during decoding. - rleEncoding |= (count - 1) << kSelectorBits; - _writeFn(rleEncoding); - }; - - uint32_t count = _rleCount / kRleMultiplier; - // Check to make sure count is big enough for RLE encoding - if (count >= 1) { - while (count > kMaxRleCount) { - // If one RLE word is insufficient use multiple RLE words. - createRleEncoding(kMaxRleCount); - count -= kMaxRleCount; - } - createRleEncoding(count); - _rleCount %= kRleMultiplier; - } -} - -template -bool Simple8bBuilder::_rlePossible() const { - return _pendingValues.empty() || _rleCount != 0; -} - - -template -bool Simple8bBuilder::_doesIntegerFitInCurrentWord(const PendingValue& value) { - bool fitsInCurrentWord = false; - for (uint8_t i = 0; i < kNumOfSelectorTypes; ++i) { - if (isSelectorPossible[i]) { - fitsInCurrentWord = - fitsInCurrentWord || _doesIntegerFitInCurrentWordWithGivenSelectorType(value, i); - } - // Stop loop early if we find a valid selector. - if (fitsInCurrentWord) - return fitsInCurrentWord; - } - return false; -} - -template -bool Simple8bBuilder::_doesIntegerFitInCurrentWordWithGivenSelectorType( - const PendingValue& value, uint8_t extensionType) { - uint64_t numBitsWithValue = - (std::max(_currMaxBitLen[extensionType], value.bitCount[extensionType]) + - kTrailingZeroBitSize[extensionType]) * - (_pendingValues.size() + 1); - // If the numBitswithValue is greater than max bits or we cannot fit the trailingZeros we update - // this selector as false and return false. Special case for baseSelector where we never add - // trailingZeros so we always pass the zeros comparison. - if (kDataBits[extensionType] < numBitsWithValue) { - isSelectorPossible[extensionType] = false; - return false; - } - // Update so we remember the last validExtensionType when its time to encode a word - _lastValidExtensionType = extensionType; - return true; -} - -template -int64_t Simple8bBuilder::_encodeLargestPossibleWord(uint8_t extensionType) { - // Since this is always called right after _doesIntegerFitInCurrentWord fails for the first - // time, we know all values in _pendingValues fits in the slots for the selector that can store - // this many values. Find the smallest selector that doesn't leave any unused slots. - uint8_t selector = _getSelectorIndex(_pendingValues.size(), extensionType); - uint8_t integersCoded = kIntsStoreForSelector[extensionType][selector]; - uint64_t encodedWord; - switch (extensionType) { - case kEightSelectorSmall: - encodedWord = _encode(EightSelectorSmallEncodeFunctor(), selector, extensionType); - break; - case kEightSelectorLarge: - encodedWord = _encode(EightSelectorLargeEncodeFunctor(), selector, extensionType); - break; - case kSevenSelector: - encodedWord = _encode(SevenSelectorEncodeFunctor(), selector, extensionType); - break; - default: - encodedWord = _encode(BaseSelectorEncodeFunctor(), selector, extensionType); - } - - _pendingValues.erase(_pendingValues.begin(), _pendingValues.begin() + integersCoded); - _currMaxBitLen = kMinDataBits; - for (const auto& val : _pendingValues) { - _updateSimple8bCurrentState(val); - } - // Reset which selectors are possible to use for next word - isSelectorPossible.fill(true); - return encodedWord; -} - -template -template -uint64_t Simple8bBuilder::_encode(Func func, uint8_t selectorIdx, uint8_t extensionType) { - uint8_t baseSelector = kExtensionToBaseSelector[extensionType][selectorIdx]; - uint8_t bitShiftExtension = kBaseSelectorToShiftSize[baseSelector]; - uint64_t encodedWord = baseSelector; - uint8_t bitsPerInteger = kBitsPerIntForSelector[extensionType][selectorIdx]; - uint8_t integersCoded = kIntsStoreForSelector[extensionType][selectorIdx]; - uint64_t unshiftedMask = kDecodeMask[extensionType][selectorIdx]; - uint8_t bitsForTrailingZeros = kTrailingZeroBitSize[extensionType]; - for (uint8_t i = 0; i < integersCoded; ++i) { - uint8_t shiftSize = - (bitsPerInteger + bitsForTrailingZeros) * i + kSelectorBits + bitShiftExtension; - uint64_t currEncodedWord; - if (_pendingValues[i].isSkip()) { - currEncodedWord = unshiftedMask; - } else { - currEncodedWord = func(_pendingValues[i]); - } - encodedWord |= currEncodedWord << shiftSize; - } - if (extensionType != kBaseSelector) { - encodedWord |= (uint64_t(selectorIdx) << kSelectorBits); - } - return encodedWord; -} - -template -void Simple8bBuilder::_updateSimple8bCurrentState(const PendingValue& val) { - for (uint8_t i = 0; i < kNumOfSelectorTypes; ++i) { - _currMaxBitLen[i] = std::max(_currMaxBitLen[i], val.bitCount[i]); - } -} - -template -typename Simple8bBuilder::PendingIterator Simple8bBuilder::begin() const { - return {_pendingValues.begin(), _pendingValues.begin(), _lastValueInPrevWord.val, _rleCount}; -} - -template -typename Simple8bBuilder::PendingIterator Simple8bBuilder::end() const { - return {_pendingValues.begin(), _pendingValues.end(), _lastValueInPrevWord.val, 0}; -} - -template -std::reverse_iterator::PendingIterator> Simple8bBuilder::rbegin() - const { - return std::reverse_iterator::PendingIterator>(end()); -} - -template -std::reverse_iterator::PendingIterator> Simple8bBuilder::rend() - const { - return std::reverse_iterator::PendingIterator>(begin()); -} - -template -void Simple8bBuilder::setWriteCallback(Simple8bWriteFn writer) { - _writeFn = std::move(writer); -} - -template -Simple8b::Iterator::Iterator(const char* pos, - const char* end, - const boost::optional& previous) - : _pos(pos), _end(end), _value(previous), _rleRemaining(0), _shift(0) { - if (pos != end) { - _loadBlock(); - } -} - -template -void Simple8b::Iterator::_loadBlock() { - _current = ConstDataView(_pos).read>(); - - _selector = _current & kBaseSelectorMask; - uint8_t selectorExtension = ((_current >> kSelectorBits) & kBaseSelectorMask); - - // If RLE selector, just load remaining count. Keep value from previous. - if (_selector == kRleSelector) { - // Set shift to something larger than 64bit to force a new block to be loaded when - // we've extinguished RLE count. - _shift = (sizeof(_current) * 8) + 1; - _rleRemaining = _rleCountInCurrent(selectorExtension) - 1; - return; - } - - _extensionType = kBaseSelector; - uint8_t extensionBits = 0; - - // If Selectors 7 or 8 check if we are using extended selectors - if (_selector == 7 || _selector == 8) { - _extensionType = kSelectorToExtension[_selector - 7][selectorExtension]; - // Use the extended selector if extension is != 0 - if (_extensionType != kBaseSelector) { - _selector = selectorExtension; - // Make shift the size of 2 selectors to handle extensions - } - extensionBits = 4; - } - - // Initialize all variables needed to advance the iterator for this block - _mask = kDecodeMask[_extensionType][_selector]; - _countMask = kTrailingZerosMask[_extensionType]; - _countBits = kTrailingZeroBitSize[_extensionType]; - _countMultiplier = kTrailingZerosMultiplier[_extensionType]; - _bitsPerValue = kBitsPerIntForSelector[_extensionType][_selector] + _countBits; - _shift = kSelectorBits + extensionBits; - _rleRemaining = 0; - - // Finally load the first value in the block. - _loadValue(); -} - -template -void Simple8b::Iterator::_loadValue() { - // Mask out the value of current slot - auto shiftedMask = _mask << _shift; - uint64_t value = (_current & shiftedMask) >> _shift; - - // Check if this a skip - if (value == _mask) { - _value = boost::none; - return; - } - - // Shift in any trailing zeros that are stored in the count for extended selectors 7 and 8. - auto trailingZeros = (value & _countMask); - _value = static_cast((value >> _countBits)) << (trailingZeros * _countMultiplier); -} - -template -size_t Simple8b::Iterator::blockSize() const { - if (_selector == kRleSelector) { - uint8_t selectorExtension = (_current >> kSelectorBits) & kBaseSelectorMask; - return _rleCountInCurrent(selectorExtension); - } - return kIntsStoreForSelector[_extensionType][_selector]; -} - -template -uint16_t Simple8b::Iterator::_rleCountInCurrent(uint8_t selectorExtension) const { - // SelectorExtension holds the rle count in this case - return (selectorExtension + 1) * kRleMultiplier; -} - -template -typename Simple8b::Iterator& Simple8b::Iterator::operator++() { - if (_rleRemaining > 0) { - --_rleRemaining; - return *this; - } - - _shift += _bitsPerValue; - if (_shift + _bitsPerValue > sizeof(_current) * 8) { - return advanceBlock(); - } - - _loadValue(); - return *this; -} - -template -typename Simple8b::Iterator& Simple8b::Iterator::advanceBlock() { - _pos += sizeof(uint64_t); - if (_pos == _end) { - _rleRemaining = 0; - _shift = 0; - return *this; - } - - _loadBlock(); - return *this; -} - -template -bool Simple8b::Iterator::operator==(const Simple8b::Iterator& rhs) const { - return _pos == rhs._pos && _rleRemaining == rhs._rleRemaining && _shift == rhs._shift; -} - -template -bool Simple8b::Iterator::operator!=(const Simple8b::Iterator& rhs) const { - return !operator==(rhs); -} - -template -Simple8b::Simple8b(const char* buffer, int size, boost::optional previous) - : _buffer(buffer), _size(size), _previous(previous) { - invariant(size % sizeof(uint64_t) == 0); -} - -template -typename Simple8b::Iterator Simple8b::begin() const { - return {_buffer, _buffer + _size, _previous}; -} - -template -typename Simple8b::Iterator Simple8b::end() const { - return {_buffer + _size, _buffer + _size, boost::none}; -} - -template class Simple8b; -template class Simple8b; -template class Simple8bBuilder; -template class Simple8bBuilder; -} // namespace mongo diff --git a/src/mongo/bson/util/simple8b.h b/src/mongo/bson/util/simple8b.h index f12a94c4e1597..ccaa4dd955209 100644 --- a/src/mongo/bson/util/simple8b.h +++ b/src/mongo/bson/util/simple8b.h @@ -30,272 +30,23 @@ #pragma once #include -#include -#include - -#include "mongo/bson/util/builder.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/bson/util/simple8b_constants.h" #include "mongo/platform/int128.h" namespace mongo { /** - * Callback type to implement writing of 64 bit Simple8b words. - */ -using Simple8bWriteFn = std::function; - -/** - * Simple8bBuilder compresses a series of integers into chains of 64 bit Simple8b blocks. - * - * T may be uint64_t and uint128_t only. - */ -template -class Simple8bBuilder { -private: - struct PendingValue; - -public: - // Callback to handle writing of finalized Simple-8b blocks. Machine Endian byte order, the - // value need to be converted to Little Endian before persisting. - Simple8bBuilder(Simple8bWriteFn writeFunc = nullptr); - ~Simple8bBuilder(); - - /** - * Appends val to Simple8b. Returns true if the append was successful and false if the value was - * outside the range of possible values we can store in Simple8b. - * - * A call to append may result in multiple Simple8b blocks being finalized. - */ - bool append(T val); - - /** - * Appends a missing value to Simple8b. - * - * May result in a single Simple8b being finalized. - */ - void skip(); - - /** - * Flushes all buffered values into finalized Simple8b blocks. - * - * It is allowed to continue to append values after this call. - */ - void flush(); - - /** - * Iterator for reading pending values in Simple8bBuilder that has not yet been written to - * Simple-8b blocks. - * - * Provides bidirectional iteration - */ - class PendingIterator { - public: - friend class Simple8bBuilder; - // typedefs expected in iterators - using iterator_category = std::bidirectional_iterator_tag; - using difference_type = ptrdiff_t; - using value_type = boost::optional; - using pointer = const boost::optional*; - using reference = const boost::optional&; - - pointer operator->() const; - reference operator*() const; - - PendingIterator& operator++(); - PendingIterator operator++(int); - - PendingIterator& operator--(); - PendingIterator operator--(int); - - bool operator==(const PendingIterator& rhs) const; - bool operator!=(const PendingIterator& rhs) const; - - private: - PendingIterator(typename std::deque::const_iterator beginning, - typename std::deque::const_iterator it, - reference rleValue, - uint32_t rleCount); - - typename std::deque::const_iterator _begin; - typename std::deque::const_iterator _it; - - const boost::optional& _rleValue; - uint32_t _rleCount; - }; - - /** - * Forward iterators to read pending values - */ - PendingIterator begin() const; - PendingIterator end() const; - - /** - * Reverse iterators to read pending values - */ - std::reverse_iterator rbegin() const; - std::reverse_iterator rend() const; - - /** - * Set write callback - */ - void setWriteCallback(Simple8bWriteFn writer); - -private: - // Number of different type of selectors and their extensions available - static constexpr uint8_t kNumOfSelectorTypes = 4; - - /** - * This stores a value that has yet to be added to the buffer. It also stores the number of bits - * required to store the value for each selector extension type. Furthermore, it stores the - * number of trailing zeros that would be stored if this value was stored according to the - * respective selector type. The arrays are indexed using the same selector indexes as defined - * in the cpp file. - */ - struct PendingValue { - PendingValue() = default; - PendingValue(boost::optional val, - std::array bitCount, - std::array trailingZerosCount); - - bool isSkip() const { - return !val.has_value(); - } - - T value() const { - return val.value(); - } - - boost::optional val = T{0}; - std::array bitCount = {0, 0, 0, 0}; - // This is not the total number of trailing zeros, but the trailing zeros that will be - // stored given the selector chosen. - std::array trailingZerosCount = {0, 0, 0, 0}; - }; - - // The min number of meaningful bits each selector can store - static constexpr std::array kMinDataBits = {1, 2, 4, 4}; - /** - * Function objects to encode Simple8b blocks for the different extension types. - * - * See .cpp file for more information. - */ - struct BaseSelectorEncodeFunctor; - struct SevenSelectorEncodeFunctor; - - template - struct EightSelectorEncodeFunctor; - - struct EightSelectorSmallEncodeFunctor; - struct EightSelectorLargeEncodeFunctor; - - /** - * Appends a value to the Simple8b chain of words. - * Return true if successfully appended and false otherwise. - * - * 'tryRle' indicates if we are allowed to put this skip in RLE count or not. Should only be set - * to true when terminating RLE and we are flushing excess values. - */ - bool _appendValue(T value, bool tryRle); - - /** - * Appends a skip to _pendingValues and forms a new Simple8b word if there is no space. - * - * 'tryRle' indicates if we are allowed to put this value in RLE count or not. Should only be - * set to true when terminating RLE and we are flushing excess values. - */ - void _appendSkip(bool tryRle); - - /** - * When an RLE ends because of inconsecutive values, check if there are enough - * consecutive values for a RLE value and/or any values to be appended to _pendingValues. - */ - void _handleRleTermination(); - - /** - * Based on _rleCount, create a RLE Simple8b word if possible. - * If _rleCount is not large enough, do nothing. - */ - void _appendRleEncoding(); - - /* - * Checks to see if RLE is possible and/or ongoing - */ - bool _rlePossible() const; - - /** - * Tests if a value would fit inside the current simple8b word using any of the selectors - * selector. Returns true if adding the value fits in the current simple8b word and false - * otherwise. - */ - bool _doesIntegerFitInCurrentWord(const PendingValue& value); - - /* - * This is a helper method for testing if a given selector will allow an integer to fit in a - * simple8b word. Takes in a value to be stored and an extensionType representing the selector - * compression method to check. Returns true if the word fits and updates the global - * _lastValidExtensionType with the extensionType passed. If false, updates - * isSelectorPossible[extensionType] to false so we do not need to recheck that extension if we - * find a valid type and more values are added into the current word. - */ - bool _doesIntegerFitInCurrentWordWithGivenSelectorType(const PendingValue& value, - uint8_t extensionType); - - /** - * Encodes the largest possible simple8b word from _pendingValues without unused buckets using - * the selector compression method passed in extensionType. Assumes is always called right after - * _doesIntegerFitInCurrentWord fails for the first time. It removes the integers used to form - * the simple8b word from _pendingValues permanently and updates our global state with any - * remaining integers in _pendingValues. - */ - int64_t _encodeLargestPossibleWord(uint8_t extensionType); - - /** - * Takes a vector of integers to be compressed into a 64 bit word via the selector type given. - * The values will be stored from right to left in little endian order. - * For now, we will assume that all ints in the vector are greater or equal to zero. - * We will also assume that the selector and all values will fit into the 64 bit word. - * Returns the encoded Simple8b word if the inputs are valid and errCode otherwise. - */ - template - uint64_t _encode(Func func, uint8_t selectorIdx, uint8_t extensionType); - - /** - * Updates the simple8b current state with the passed parameters. The maximum is always taken - * between the current state and the new value passed. This is used to keep track of the size of - * the simple8b word that we will need to encode. - */ - void _updateSimple8bCurrentState(const PendingValue& val); - - // If RLE is ongoing, the number of consecutive repeats fo lastValueInPrevWord. - uint32_t _rleCount = 0; - // If RLE is ongoing, the last value in the previous Simple8b word. - PendingValue _lastValueInPrevWord; - - // These variables hold the max amount of bits for each value in _pendingValues. They are - // updated whenever values are added or removed from _pendingValues to always reflect the max - // value in the deque. - std::array _currMaxBitLen = kMinDataBits; - std::array _currTrailingZerosCount = {0, 0, 0, 0}; - - // This holds the last valid selector compression method that succeded for - // doesIntegerFitInCurrentWord and is used to designate the compression type when we need to - // write a simple8b word to buffer. - uint8_t _lastValidExtensionType = 0; - - // Holds whether the selector compression method is possible. This is updated in - // doesIntegerFitInCurrentWordWithSelector to avoid unnecessary calls when a selector is already - // invalid for the current set of words in _pendingValues. - std::array isSelectorPossible = {true, true, true, true}; - - // This holds values that have not be encoded to the simple8b buffer, but are waiting for a full - // simple8b word to be filled before writing to buffer. - std::deque _pendingValues; - - // User-defined callback to handle writing of finalized Simple-8b blocks - Simple8bWriteFn _writeFn; -}; - -/** - * Simple8b provides an interface to read Simple8b encoded data built by Simple8bBuilder above + * Simple8b provides an interface to read Simple8b encoded data built by Simple8bBuilder */ template class Simple8b { @@ -304,6 +55,8 @@ class Simple8b { public: friend class Simple8b; + Iterator() = default; + // typedefs expected in iterators using iterator_category = std::input_iterator_tag; using difference_type = ptrdiff_t; @@ -340,7 +93,19 @@ class Simple8b { bool operator==(const Iterator& rhs) const; bool operator!=(const Iterator& rhs) const; + /** + * Returns true if iterator can be incremented. Equivalent to comparing not equal with the + * end iterator. + */ + bool more() const; + + /** + * Returns true if iterator was instantiated with a valid memory block. + */ + bool valid() const; + private: + Iterator(const char* end); Iterator(const char* pos, const char* end, const boost::optional& previous); /** @@ -354,8 +119,8 @@ class Simple8b { */ uint16_t _rleCountInCurrent(uint8_t selectorExtension) const; - const char* _pos; - const char* _end; + const char* _pos = nullptr; + const char* _end = nullptr; // Current Simple8b block in native endian uint64_t _current; @@ -366,10 +131,10 @@ class Simple8b { uint64_t _mask; // Remaining RLE count for repeating previous value - uint16_t _rleRemaining; + uint16_t _rleRemaining = 0; // Number of positions to shift the mask to get slot for current iterator position - uint8_t _shift; + uint8_t _shift = 0; // Number of bits in single Simple-8b slot, used to increment _shift when updating iterator // position @@ -395,6 +160,7 @@ class Simple8b { /** * Does not take ownership of buffer, must remain valid during the lifetime of this class. */ + Simple8b() = default; Simple8b(const char* buffer, int size, boost::optional previous = T{}); /** @@ -404,10 +170,168 @@ class Simple8b { Iterator end() const; private: - const char* _buffer; - int _size; + const char* _buffer = nullptr; + int _size = 0; // Previous value to be used in case the first block in the buffer is RLE. - boost::optional _previous; + boost::optional _previous = boost::none; }; +template +Simple8b::Iterator::Iterator(const char* end) + : _pos(end), _end(end), _rleRemaining(0), _shift(0) {} + +template +Simple8b::Iterator::Iterator(const char* pos, + const char* end, + const boost::optional& previous) + : _pos(pos), _end(end), _value(previous), _rleRemaining(0), _shift(0) { + _loadBlock(); +} + +template +void Simple8b::Iterator::_loadBlock() { + using namespace simple8b_internal; + + _current = ConstDataView(_pos).read>(); + + _selector = _current & kBaseSelectorMask; + uint8_t selectorExtension = ((_current >> kSelectorBits) & kBaseSelectorMask); + + // If RLE selector, just load remaining count. Keep value from previous. + if (_selector == kRleSelector) { + // Set shift to something larger than 64bit to force a new block to be loaded when + // we've extinguished RLE count. + _shift = (sizeof(_current) * 8) + 1; + _rleRemaining = _rleCountInCurrent(selectorExtension) - 1; + return; + } + + _extensionType = kBaseSelector; + uint8_t extensionBits = 0; + + // If Selectors 7 or 8 check if we are using extended selectors + if (_selector == 7 || _selector == 8) { + _extensionType = kSelectorToExtension[_selector - 7][selectorExtension]; + // Use the extended selector if extension is != 0 + if (_extensionType != kBaseSelector) { + _selector = selectorExtension; + // Make shift the size of 2 selectors to handle extensions + } + extensionBits = 4; + } + + // Initialize all variables needed to advance the iterator for this block + _mask = kDecodeMask[_extensionType][_selector]; + _countMask = kTrailingZerosMask[_extensionType]; + _countBits = kTrailingZeroBitSize[_extensionType]; + _countMultiplier = kTrailingZerosMultiplier[_extensionType]; + _bitsPerValue = kBitsPerIntForSelector[_extensionType][_selector] + _countBits; + _shift = kSelectorBits + extensionBits; + _rleRemaining = 0; + + // Finally load the first value in the block. + _loadValue(); +} + +template +void Simple8b::Iterator::_loadValue() { + // Mask out the value of current slot + auto shiftedMask = _mask << _shift; + uint64_t value = (_current & shiftedMask) >> _shift; + + // Check if this a skip + if (value == _mask) { + _value = boost::none; + return; + } + + // Shift in any trailing zeros that are stored in the count for extended selectors 7 and 8. + auto trailingZeros = (value & _countMask); + _value = static_cast((value >> _countBits)) << (trailingZeros * _countMultiplier); +} + +template +size_t Simple8b::Iterator::blockSize() const { + using namespace simple8b_internal; + + if (_selector == kRleSelector) { + uint8_t selectorExtension = (_current >> kSelectorBits) & kBaseSelectorMask; + return _rleCountInCurrent(selectorExtension); + } + return kIntsStoreForSelector[_extensionType][_selector]; +} + +template +uint16_t Simple8b::Iterator::_rleCountInCurrent(uint8_t selectorExtension) const { + using namespace simple8b_internal; + // SelectorExtension holds the rle count in this case + return (selectorExtension + 1) * kRleMultiplier; +} + +template +typename Simple8b::Iterator& Simple8b::Iterator::operator++() { + if (_rleRemaining > 0) { + --_rleRemaining; + return *this; + } + + _shift += _bitsPerValue; + if (_shift + _bitsPerValue > sizeof(_current) * 8) { + return advanceBlock(); + } + + _loadValue(); + return *this; +} + +template +typename Simple8b::Iterator& Simple8b::Iterator::advanceBlock() { + _pos += sizeof(uint64_t); + if (_pos == _end) { + _rleRemaining = 0; + _shift = 0; + return *this; + } + + _loadBlock(); + return *this; +} + +template +bool Simple8b::Iterator::operator==(const Simple8b::Iterator& rhs) const { + return _pos == rhs._pos && _rleRemaining == rhs._rleRemaining && _shift == rhs._shift; +} + +template +bool Simple8b::Iterator::operator!=(const Simple8b::Iterator& rhs) const { + return !operator==(rhs); +} + +template +bool Simple8b::Iterator::more() const { + return _pos != _end; +} + +template +bool Simple8b::Iterator::valid() const { + return _pos != nullptr; +} + +template +Simple8b::Simple8b(const char* buffer, int size, boost::optional previous) + : _buffer(buffer), _size(size), _previous(previous) {} + +template +typename Simple8b::Iterator Simple8b::begin() const { + if (_size == 0) { + return {_buffer}; + } + return {_buffer, _buffer + _size, _previous}; +} + +template +typename Simple8b::Iterator Simple8b::end() const { + return {_buffer + _size}; +} + } // namespace mongo diff --git a/src/mongo/bson/util/simple8b_bm.cpp b/src/mongo/bson/util/simple8b_bm.cpp index 892d542f55964..f0a65d3b3c11b 100644 --- a/src/mongo/bson/util/simple8b_bm.cpp +++ b/src/mongo/bson/util/simple8b_bm.cpp @@ -28,9 +28,16 @@ */ #include +#include +#include +#include +#include + +#include "mongo/bson/util/builder.h" #include "mongo/bson/util/simple8b.h" -#include "mongo/platform/bits.h" +#include "mongo/bson/util/simple8b_builder.h" +#include "mongo/util/shared_buffer.h" namespace mongo { diff --git a/src/mongo/bson/util/simple8b_builder.cpp b/src/mongo/bson/util/simple8b_builder.cpp new file mode 100644 index 0000000000000..74456ff5be506 --- /dev/null +++ b/src/mongo/bson/util/simple8b_builder.cpp @@ -0,0 +1,606 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/bson/util/simple8b_builder.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/bson/util/simple8b_constants.h" +#include "mongo/platform/bits.h" +#include "mongo/platform/int128.h" + +namespace mongo { + +namespace { +using namespace simple8b_internal; + + +// Calculates number of bits needed to store value. Must be less than +// numeric_limits::max(). +uint8_t _countBitsWithoutLeadingZeros(uint64_t value) { + // All 1s is reserved for skip encoding so we add 1 to value to account for that case. + return 64 - countLeadingZerosNonZero64(value + 1); +} + +uint8_t _countTrailingZerosWithZero(uint64_t value) { + // countTrailingZeros64 returns 64 if the value is 0 but we consider this to be 0 trailing + // zeros. + return value == 0 ? 0 : countTrailingZerosNonZero64(value); +} + +uint8_t _countTrailingZerosWithZero(uint128_t value) { + uint64_t low = absl::Uint128Low64(value); + uint64_t high = absl::Uint128High64(value); + + // If value == 0 then we cannot add 64 + if (low == 0 && high != 0) { + return countTrailingZerosNonZero64(high) + 64; + } else { + return _countTrailingZerosWithZero(low); + } +} + +// Calculates number of bits needed to store value. Must be less than +// numeric_limits::max(). +uint8_t _countBitsWithoutLeadingZeros(uint128_t value) { + uint64_t high = absl::Uint128High64(value); + if (high == 0) { + uint64_t low = absl::Uint128Low64(value); + // We can't call _countBitsWithoutLeadingZeros() with numeric_limits::max as it + // would overflow and yield the wrong result. Just return the correct value instead. + if (low == std::numeric_limits::max()) + return 65; + return _countBitsWithoutLeadingZeros(low); + } else { + return 128 - countLeadingZerosNonZero64(high); + } +} + +/* + * This method takes a number of intsNeeded and an extensionType and returns the selector index for + * that type. This method should never fail as it is called when we are encoding a largest value. + */ +uint8_t _getSelectorIndex(uint8_t intsNeeded, uint8_t extensionType) { + auto iteratorIdx = std::find_if( + kIntsStoreForSelector[extensionType].begin() + kMinSelector[extensionType], + kIntsStoreForSelector[extensionType].begin() + kMaxSelector[extensionType], + [intsNeeded](uint8_t intsPerSelectorIdx) { return intsNeeded >= intsPerSelectorIdx; }); + return iteratorIdx - kIntsStoreForSelector[extensionType].begin(); +} + +} // namespace + + +// This is called in _encode while iterating through _pendingValues. For the base selector, we just +// return val. Contains unsed vars in order to seamlessly integrate with seven and eight selector +// extensions. +template +struct Simple8bBuilder::BaseSelectorEncodeFunctor { + uint64_t operator()(const PendingValue& value) { + return static_cast(value.value()); + }; +}; + +// This is called in _encode while iterating through _pendingValues. It creates part of a simple8b +// word according to the specifications of the sevenSelector extension. This value is then appended +// to the full simple8b word in _encode. +template +struct Simple8bBuilder::SevenSelectorEncodeFunctor { + uint64_t operator()(const PendingValue& value) { + uint8_t trailingZeros = value.trailingZerosCount[kSevenSelector]; + uint64_t currWord = trailingZeros; + // We do two shifts here to account for the case where trailingZeros is > kTrailingZero bit + // size. If we subtracted this could lead to shift by a negative value which is undefined. + currWord |= static_cast((value.value() >> trailingZeros) + << kTrailingZeroBitSize[kSevenSelector]); + return currWord; + }; +}; + +// This is a helper functor that is extended by the EightSelectorSmall and EightSelectorLarge encode +// functors. It provides the logic for encoding with the eight selector where the extension type is +// designated by the inheritance in the EightSelectorSmall and EightSelectorLarge functors. +template +template +struct Simple8bBuilder::EightSelectorEncodeFunctor { + uint64_t operator()(const PendingValue& value) { + // integer division. We have a nibble shift of size 4 + uint8_t trailingZeros = value.trailingZerosCount[ExtensionType] / kNibbleShiftSize; + uint64_t currWord = trailingZeros; + // Shift to remove trailing zeros * 4 and then shift over for the 4 bits to hold + // the trailingZerosCount + currWord |= static_cast((value.value() >> (trailingZeros * kNibbleShiftSize)) + << kTrailingZeroBitSize[ExtensionType]); + return currWord; + } +}; + +// This is called in _encode while iterating through _pendingValues. It creates part of a simple8b +// word according to the specifications of the eightSelectorSmall extension. This value is then +// appended to the full simple8b word in _encode. +template +struct Simple8bBuilder::EightSelectorSmallEncodeFunctor + : public EightSelectorEncodeFunctor {}; + +// This is called in _encode while iterating through _pendingValues. It creates part of a simple8b +// word according to the specifications of the eightSelectorLarge extension. This value is then +// appended to the full simple8b word in _encode. +template +struct Simple8bBuilder::EightSelectorLargeEncodeFunctor + : public EightSelectorEncodeFunctor {}; + +// Base Constructor for PendingValue +template +Simple8bBuilder::PendingValue::PendingValue( + boost::optional val, + std::array bitCount, + std::array trailingZerosCount) + : val(val), bitCount(bitCount), trailingZerosCount(trailingZerosCount){}; + +template +Simple8bBuilder::PendingIterator::PendingIterator( + typename std::deque::const_iterator beginning, + typename std::deque::const_iterator it, + reference rleValue, + uint32_t rleCount) + : _begin(beginning), _it(it), _rleValue(rleValue), _rleCount(rleCount) {} + +template +auto Simple8bBuilder::PendingIterator::operator->() const -> pointer { + return &operator*(); +} + +template +auto Simple8bBuilder::PendingIterator::operator*() const -> reference { + if (_rleCount > 0) + return _rleValue; + + return _it->val; +} + +template +auto Simple8bBuilder::PendingIterator::operator++() -> PendingIterator& { + if (_rleCount > 0) { + --_rleCount; + return *this; + } + + ++_it; + return *this; +} + +template +auto Simple8bBuilder::PendingIterator::operator++(int) -> PendingIterator { + auto ret = *this; + ++(*this); + return ret; +} + +template +auto Simple8bBuilder::PendingIterator::operator--() -> PendingIterator& { + if (_rleCount > 0 || _it == _begin) { + ++_rleCount; + return *this; + } + + --_it; + return *this; +} + +template +auto Simple8bBuilder::PendingIterator::operator--(int) -> PendingIterator { + auto ret = *this; + --(*this); + return ret; +} + +template +bool Simple8bBuilder::PendingIterator::operator==( + const Simple8bBuilder::PendingIterator& rhs) const { + return _it == rhs._it && _rleCount == rhs._rleCount; +} + +template +bool Simple8bBuilder::PendingIterator::operator!=( + const Simple8bBuilder::PendingIterator& rhs) const { + return !operator==(rhs); +} + +template +Simple8bBuilder::Simple8bBuilder(Simple8bWriteFn writeFunc) : _writeFn(std::move(writeFunc)) {} + +template +Simple8bBuilder::~Simple8bBuilder() = default; + +template +bool Simple8bBuilder::append(T value) { + if (_rlePossible()) { + if (_lastValueInPrevWord.val == value) { + ++_rleCount; + return true; + } + _handleRleTermination(); + } + + return _appendValue(value, true); +} + +template +void Simple8bBuilder::skip() { + if (_rlePossible() && _lastValueInPrevWord.isSkip()) { + ++_rleCount; + return; + } + + _handleRleTermination(); + _appendSkip(true /* tryRle */); +} + +template +void Simple8bBuilder::flush() { + // Flush repeating integers that have been kept for RLE. + _handleRleTermination(); + // Flush buffered values in _pendingValues. + if (!_pendingValues.empty()) { + // always flush with the most recent valid selector. This value is the baseSelector if we + // have not have a valid selector yet. + do { + uint64_t simple8bWord = _encodeLargestPossibleWord(_lastValidExtensionType); + _writeFn(simple8bWord); + } while (!_pendingValues.empty()); + + // There are no more words in _pendingValues and RLE is possible. + // However the _rleCount is 0 because we have not read any of the values in the next word. + _rleCount = 0; + } + + // Always reset _lastValueInPrevWord. We may only start RLE after flush on 0 value. + _lastValueInPrevWord = {}; +} + +template +bool Simple8bBuilder::_appendValue(T value, bool tryRle) { + // Early exit if we try to store max value. They are not handled when counting zeros. + if (value == std::numeric_limits::max()) + return false; + + uint8_t trailingZerosCount = _countTrailingZerosWithZero(value); + // Initially set every selector as invalid. + uint8_t bitCountWithoutLeadingZeros = _countBitsWithoutLeadingZeros(value); + uint8_t trailingZerosStoredInCountSeven = + (std::min(trailingZerosCount, kTrailingZerosMaxCount[kSevenSelector])); + uint8_t meaningfulValueBitsStoredWithSeven = + bitCountWithoutLeadingZeros - trailingZerosStoredInCountSeven; + // We use integer division to ensure that a multiple of 4 is stored in + // trailingZerosStoredInCount when we have the nibble shift. + uint8_t trailingZerosStoredInCountEightSmall = + (std::min(trailingZerosCount, kTrailingZerosMaxCount[kEightSelectorSmall]) / + kNibbleShiftSize) * + kNibbleShiftSize; + uint8_t meaningfulValueBitsStoredWithEightSmall = + bitCountWithoutLeadingZeros - trailingZerosStoredInCountEightSmall; + // We use integer division to ensure that a multiple of 4 is stored in + // trailingZerosStoredInCount when we have the nibble shift. + uint8_t trailingZerosStoredInCountEightLarge = + (std::min(trailingZerosCount, kTrailingZerosMaxCount[kEightSelectorLarge]) / + kNibbleShiftSize) * + kNibbleShiftSize; + uint8_t meaningfulValueBitsStoredWithEightLarge = + bitCountWithoutLeadingZeros - trailingZerosStoredInCountEightLarge; + + // Edge cases where we have the number of trailing zeros bits as all ones and we need to add a + // padded zero to the meaningful bits to avoid confilicts with skip storage. Otherwise, we can + // reuse the bitCountWithoutLeadingZeros already calculated above. + if (trailingZerosCount == kTrailingZerosMaxCount[kSevenSelector]) { + meaningfulValueBitsStoredWithSeven = + _countBitsWithoutLeadingZeros(value >> trailingZerosCount); + } else if (trailingZerosCount == kTrailingZerosMaxCount[kEightSelectorSmall]) { + meaningfulValueBitsStoredWithEightSmall = + _countBitsWithoutLeadingZeros(value >> trailingZerosCount); + } + + // This case is specifically for 128 bit types where we have 124 zeros or max zeros + // count. We do not need to even check this for 64 bit types + if constexpr (std::is_same::value) { + if (trailingZerosCount == kTrailingZerosMaxCount[kEightSelectorLarge]) { + meaningfulValueBitsStoredWithEightLarge = + _countBitsWithoutLeadingZeros(value >> trailingZerosCount); + } + } + + std::array zeroCount = {0, + trailingZerosStoredInCountSeven, + trailingZerosStoredInCountEightSmall, + trailingZerosStoredInCountEightLarge}; + + // Check if the amount of bits needed is more than we can store using all selector combinations. + if ((bitCountWithoutLeadingZeros > kDataBits[kBaseSelector]) && + (meaningfulValueBitsStoredWithSeven + kTrailingZeroBitSize[kSevenSelector] > + kDataBits[kSevenSelector]) && + (meaningfulValueBitsStoredWithEightSmall + kTrailingZeroBitSize[kEightSelectorSmall] > + kDataBits[kEightSelectorSmall]) && + (meaningfulValueBitsStoredWithEightLarge + kTrailingZeroBitSize[kEightSelectorLarge] > + kDataBits[kEightSelectorLarge])) { + return false; + } + + PendingValue pendingValue(value, + {bitCountWithoutLeadingZeros, + meaningfulValueBitsStoredWithSeven, + meaningfulValueBitsStoredWithEightSmall, + meaningfulValueBitsStoredWithEightLarge}, + zeroCount); + // Check if we have a valid selector for the current word. This method update the global + // isSelectorValid to avoid redundant computation. + if (_doesIntegerFitInCurrentWord(pendingValue)) { + // If the integer fits in the current word, add it. + _pendingValues.push_back(pendingValue); + _updateSimple8bCurrentState(pendingValue); + } else { + // If the integer does not fit in the current word, convert the integers into simple8b + // word(s) with no unused buckets until the new value can be added to _pendingValues. Then + // add the Simple8b word(s) to the buffer. Finally add the new integer and update any global + // variables. We add based on the lastSelector that was valid where priority ordering is the + // following: base, seven, eightSmall, eightLarge. Store pending last value for RLE. + PendingValue lastPendingValue = _pendingValues.back(); + do { + uint64_t simple8bWord = _encodeLargestPossibleWord(_lastValidExtensionType); + _writeFn(simple8bWord); + } while (!(_doesIntegerFitInCurrentWord(pendingValue))); + + if (tryRle && _pendingValues.empty() && lastPendingValue.val == value) { + // There are no more words in _pendingValues and the last element of the last Simple8b + // word is the same as the new value. Therefore, start RLE. + _rleCount = 1; + _lastValueInPrevWord = lastPendingValue; + } else { + _pendingValues.push_back(pendingValue); + _updateSimple8bCurrentState(pendingValue); + } + } + return true; +} + +template +void Simple8bBuilder::_appendSkip(bool tryRle) { + if (!_pendingValues.empty()) { + bool isLastValueSkip = _pendingValues.back().isSkip(); + + // There is never a case where we need to write more than one Simple8b wrod + // because we only need 1 bit for skip + if (!_doesIntegerFitInCurrentWord({boost::none, kMinDataBits, {0, 0, 0, 0}})) { + // Form simple8b word if skip can not fit with last selector + uint64_t simple8bWord = _encodeLargestPossibleWord(_lastValidExtensionType); + _writeFn(simple8bWord); + _lastValidExtensionType = kBaseSelector; + } + + if (_pendingValues.empty() && isLastValueSkip && tryRle) { + // It is possible to start rle + _rleCount = 1; + _lastValueInPrevWord = {boost::none, {0, 0, 0, 0}, {0, 0, 0, 0}}; + return; + } + } + // Push true into skip and the dummy value, 0, into currNum. We use the dummy value, 0 because + // it takes 1 bit and it will not affect our global curr bit length calculations. + _pendingValues.push_back({boost::none, {0, 0, 0, 0}, {0, 0, 0, 0}}); +} + +template +void Simple8bBuilder::_handleRleTermination() { + if (_rleCount == 0) + return; + + // Try to create a RLE Simple8b word. + _appendRleEncoding(); + // Add any values that could not be encoded in RLE. + while (_rleCount > 0) { + if (_lastValueInPrevWord.isSkip()) { + _appendSkip(false /* tryRle */); + } else { + _appendValue(_lastValueInPrevWord.value(), false); + } + --_rleCount; + } +} + +template +void Simple8bBuilder::_appendRleEncoding() { + // This encodes a value using rle. The selector is set as 15 and the count is added in the next + // 4 bits. The value is the previous value stored by simple8b or 0 if no previous value was + // stored. + auto createRleEncoding = [this](uint8_t count) { + uint64_t rleEncoding = kRleSelector; + // We will store (count - 1) during encoding and execute (count + 1) during decoding. + rleEncoding |= (count - 1) << kSelectorBits; + _writeFn(rleEncoding); + }; + + uint32_t count = _rleCount / kRleMultiplier; + // Check to make sure count is big enough for RLE encoding + if (count >= 1) { + while (count > kMaxRleCount) { + // If one RLE word is insufficient use multiple RLE words. + createRleEncoding(kMaxRleCount); + count -= kMaxRleCount; + } + createRleEncoding(count); + _rleCount %= kRleMultiplier; + } +} + +template +bool Simple8bBuilder::_rlePossible() const { + return _pendingValues.empty() || _rleCount != 0; +} + + +template +bool Simple8bBuilder::_doesIntegerFitInCurrentWord(const PendingValue& value) { + bool fitsInCurrentWord = false; + for (uint8_t i = 0; i < kNumOfSelectorTypes; ++i) { + if (isSelectorPossible[i]) { + fitsInCurrentWord = + fitsInCurrentWord || _doesIntegerFitInCurrentWordWithGivenSelectorType(value, i); + } + // Stop loop early if we find a valid selector. + if (fitsInCurrentWord) + return fitsInCurrentWord; + } + return false; +} + +template +bool Simple8bBuilder::_doesIntegerFitInCurrentWordWithGivenSelectorType( + const PendingValue& value, uint8_t extensionType) { + uint64_t numBitsWithValue = + (std::max(_currMaxBitLen[extensionType], value.bitCount[extensionType]) + + kTrailingZeroBitSize[extensionType]) * + (_pendingValues.size() + 1); + // If the numBitswithValue is greater than max bits or we cannot fit the trailingZeros we update + // this selector as false and return false. Special case for baseSelector where we never add + // trailingZeros so we always pass the zeros comparison. + if (kDataBits[extensionType] < numBitsWithValue) { + isSelectorPossible[extensionType] = false; + return false; + } + // Update so we remember the last validExtensionType when its time to encode a word + _lastValidExtensionType = extensionType; + return true; +} + +template +int64_t Simple8bBuilder::_encodeLargestPossibleWord(uint8_t extensionType) { + // Since this is always called right after _doesIntegerFitInCurrentWord fails for the first + // time, we know all values in _pendingValues fits in the slots for the selector that can store + // this many values. Find the smallest selector that doesn't leave any unused slots. + uint8_t selector = _getSelectorIndex(_pendingValues.size(), extensionType); + uint8_t integersCoded = kIntsStoreForSelector[extensionType][selector]; + uint64_t encodedWord; + switch (extensionType) { + case kEightSelectorSmall: + encodedWord = _encode(EightSelectorSmallEncodeFunctor(), selector, extensionType); + break; + case kEightSelectorLarge: + encodedWord = _encode(EightSelectorLargeEncodeFunctor(), selector, extensionType); + break; + case kSevenSelector: + encodedWord = _encode(SevenSelectorEncodeFunctor(), selector, extensionType); + break; + default: + encodedWord = _encode(BaseSelectorEncodeFunctor(), selector, extensionType); + } + + _pendingValues.erase(_pendingValues.begin(), _pendingValues.begin() + integersCoded); + _currMaxBitLen = kMinDataBits; + for (const auto& val : _pendingValues) { + _updateSimple8bCurrentState(val); + } + // Reset which selectors are possible to use for next word + isSelectorPossible.fill(true); + return encodedWord; +} + +template +template +uint64_t Simple8bBuilder::_encode(Func func, uint8_t selectorIdx, uint8_t extensionType) { + uint8_t baseSelector = kExtensionToBaseSelector[extensionType][selectorIdx]; + uint8_t bitShiftExtension = kBaseSelectorToShiftSize[baseSelector]; + uint64_t encodedWord = baseSelector; + uint8_t bitsPerInteger = kBitsPerIntForSelector[extensionType][selectorIdx]; + uint8_t integersCoded = kIntsStoreForSelector[extensionType][selectorIdx]; + uint64_t unshiftedMask = kDecodeMask[extensionType][selectorIdx]; + uint8_t bitsForTrailingZeros = kTrailingZeroBitSize[extensionType]; + for (uint8_t i = 0; i < integersCoded; ++i) { + uint8_t shiftSize = + (bitsPerInteger + bitsForTrailingZeros) * i + kSelectorBits + bitShiftExtension; + uint64_t currEncodedWord; + if (_pendingValues[i].isSkip()) { + currEncodedWord = unshiftedMask; + } else { + currEncodedWord = func(_pendingValues[i]); + } + encodedWord |= currEncodedWord << shiftSize; + } + if (extensionType != kBaseSelector) { + encodedWord |= (uint64_t(selectorIdx) << kSelectorBits); + } + return encodedWord; +} + +template +void Simple8bBuilder::_updateSimple8bCurrentState(const PendingValue& val) { + for (uint8_t i = 0; i < kNumOfSelectorTypes; ++i) { + _currMaxBitLen[i] = std::max(_currMaxBitLen[i], val.bitCount[i]); + } +} + +template +typename Simple8bBuilder::PendingIterator Simple8bBuilder::begin() const { + return {_pendingValues.begin(), _pendingValues.begin(), _lastValueInPrevWord.val, _rleCount}; +} + +template +typename Simple8bBuilder::PendingIterator Simple8bBuilder::end() const { + return {_pendingValues.begin(), _pendingValues.end(), _lastValueInPrevWord.val, 0}; +} + +template +std::reverse_iterator::PendingIterator> Simple8bBuilder::rbegin() + const { + return std::reverse_iterator::PendingIterator>(end()); +} + +template +std::reverse_iterator::PendingIterator> Simple8bBuilder::rend() + const { + return std::reverse_iterator::PendingIterator>(begin()); +} + +template +void Simple8bBuilder::setWriteCallback(Simple8bWriteFn writer) { + _writeFn = std::move(writer); +} + +template class Simple8bBuilder; +template class Simple8bBuilder; +} // namespace mongo diff --git a/src/mongo/bson/util/simple8b_builder.h b/src/mongo/bson/util/simple8b_builder.h new file mode 100644 index 0000000000000..f8ef751f06f12 --- /dev/null +++ b/src/mongo/bson/util/simple8b_builder.h @@ -0,0 +1,306 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/util/builder.h" +#include "mongo/platform/int128.h" + +namespace mongo { + + +/** + * Callback type to implement writing of 64 bit Simple8b words. + */ +using Simple8bWriteFn = std::function; + +/** + * Simple8bBuilder compresses a series of integers into chains of 64 bit Simple8b blocks. + * + * T may be uint64_t and uint128_t only. + */ +template +class Simple8bBuilder { +private: + struct PendingValue; + +public: + // Callback to handle writing of finalized Simple-8b blocks. Machine Endian byte order, the + // value need to be converted to Little Endian before persisting. + Simple8bBuilder(Simple8bWriteFn writeFunc = nullptr); + ~Simple8bBuilder(); + + /** + * Appends val to Simple8b. Returns true if the append was successful and false if the value was + * outside the range of possible values we can store in Simple8b. + * + * A call to append may result in multiple Simple8b blocks being finalized. + */ + bool append(T val); + + /** + * Appends a missing value to Simple8b. + * + * May result in a single Simple8b being finalized. + */ + void skip(); + + /** + * Flushes all buffered values into finalized Simple8b blocks. + * + * It is allowed to continue to append values after this call. + */ + void flush(); + + /** + * Iterator for reading pending values in Simple8bBuilder that has not yet been written to + * Simple-8b blocks. + * + * Provides bidirectional iteration + */ + class PendingIterator { + public: + friend class Simple8bBuilder; + // typedefs expected in iterators + using iterator_category = std::bidirectional_iterator_tag; + using difference_type = ptrdiff_t; + using value_type = boost::optional; + using pointer = const boost::optional*; + using reference = const boost::optional&; + + pointer operator->() const; + reference operator*() const; + + PendingIterator& operator++(); + PendingIterator operator++(int); + + PendingIterator& operator--(); + PendingIterator operator--(int); + + bool operator==(const PendingIterator& rhs) const; + bool operator!=(const PendingIterator& rhs) const; + + private: + PendingIterator(typename std::deque::const_iterator beginning, + typename std::deque::const_iterator it, + reference rleValue, + uint32_t rleCount); + + typename std::deque::const_iterator _begin; + typename std::deque::const_iterator _it; + + const boost::optional& _rleValue; + uint32_t _rleCount; + }; + + /** + * Forward iterators to read pending values + */ + PendingIterator begin() const; + PendingIterator end() const; + + /** + * Reverse iterators to read pending values + */ + std::reverse_iterator rbegin() const; + std::reverse_iterator rend() const; + + /** + * Set write callback + */ + void setWriteCallback(Simple8bWriteFn writer); + +private: + // Number of different type of selectors and their extensions available + static constexpr uint8_t kNumOfSelectorTypes = 4; + + /** + * This stores a value that has yet to be added to the buffer. It also stores the number of bits + * required to store the value for each selector extension type. Furthermore, it stores the + * number of trailing zeros that would be stored if this value was stored according to the + * respective selector type. The arrays are indexed using the same selector indexes as defined + * in the cpp file. + */ + struct PendingValue { + + PendingValue() = default; + PendingValue(boost::optional val, + std::array bitCount, + std::array trailingZerosCount); + + bool isSkip() const { + return !val.has_value(); + } + + T value() const { + return val.value(); + } + + boost::optional val = T{0}; + std::array bitCount = {0, 0, 0, 0}; + // This is not the total number of trailing zeros, but the trailing zeros that will be + // stored given the selector chosen. + std::array trailingZerosCount = {0, 0, 0, 0}; + }; + + // The min number of meaningful bits each selector can store + static constexpr std::array kMinDataBits = {1, 2, 4, 4}; + /** + * Function objects to encode Simple8b blocks for the different extension types. + * + * See .cpp file for more information. + */ + struct BaseSelectorEncodeFunctor; + struct SevenSelectorEncodeFunctor; + + template + struct EightSelectorEncodeFunctor; + + struct EightSelectorSmallEncodeFunctor; + struct EightSelectorLargeEncodeFunctor; + + /** + * Appends a value to the Simple8b chain of words. + * Return true if successfully appended and false otherwise. + * + * 'tryRle' indicates if we are allowed to put this skip in RLE count or not. Should only be set + * to true when terminating RLE and we are flushing excess values. + */ + bool _appendValue(T value, bool tryRle); + + /** + * Appends a skip to _pendingValues and forms a new Simple8b word if there is no space. + * + * 'tryRle' indicates if we are allowed to put this value in RLE count or not. Should only be + * set to true when terminating RLE and we are flushing excess values. + */ + void _appendSkip(bool tryRle); + + /** + * When an RLE ends because of inconsecutive values, check if there are enough + * consecutive values for a RLE value and/or any values to be appended to _pendingValues. + */ + void _handleRleTermination(); + + /** + * Based on _rleCount, create a RLE Simple8b word if possible. + * If _rleCount is not large enough, do nothing. + */ + void _appendRleEncoding(); + + /* + * Checks to see if RLE is possible and/or ongoing + */ + bool _rlePossible() const; + + /** + * Tests if a value would fit inside the current simple8b word using any of the selectors + * selector. Returns true if adding the value fits in the current simple8b word and false + * otherwise. + */ + bool _doesIntegerFitInCurrentWord(const PendingValue& value); + + /* + * This is a helper method for testing if a given selector will allow an integer to fit in a + * simple8b word. Takes in a value to be stored and an extensionType representing the selector + * compression method to check. Returns true if the word fits and updates the global + * _lastValidExtensionType with the extensionType passed. If false, updates + * isSelectorPossible[extensionType] to false so we do not need to recheck that extension if we + * find a valid type and more values are added into the current word. + */ + bool _doesIntegerFitInCurrentWordWithGivenSelectorType(const PendingValue& value, + uint8_t extensionType); + + /** + * Encodes the largest possible simple8b word from _pendingValues without unused buckets using + * the selector compression method passed in extensionType. Assumes is always called right after + * _doesIntegerFitInCurrentWord fails for the first time. It removes the integers used to form + * the simple8b word from _pendingValues permanently and updates our global state with any + * remaining integers in _pendingValues. + */ + int64_t _encodeLargestPossibleWord(uint8_t extensionType); + + /** + * Takes a vector of integers to be compressed into a 64 bit word via the selector type given. + * The values will be stored from right to left in little endian order. + * For now, we will assume that all ints in the vector are greater or equal to zero. + * We will also assume that the selector and all values will fit into the 64 bit word. + * Returns the encoded Simple8b word if the inputs are valid and errCode otherwise. + */ + template + uint64_t _encode(Func func, uint8_t selectorIdx, uint8_t extensionType); + + /** + * Updates the simple8b current state with the passed parameters. The maximum is always taken + * between the current state and the new value passed. This is used to keep track of the size of + * the simple8b word that we will need to encode. + */ + void _updateSimple8bCurrentState(const PendingValue& val); + + // If RLE is ongoing, the number of consecutive repeats fo lastValueInPrevWord. + uint32_t _rleCount = 0; + // If RLE is ongoing, the last value in the previous Simple8b word. + PendingValue _lastValueInPrevWord; + + // These variables hold the max amount of bits for each value in _pendingValues. They are + // updated whenever values are added or removed from _pendingValues to always reflect the max + // value in the deque. + std::array _currMaxBitLen = kMinDataBits; + std::array _currTrailingZerosCount = {0, 0, 0, 0}; + + // This holds the last valid selector compression method that succeded for + // doesIntegerFitInCurrentWord and is used to designate the compression type when we need to + // write a simple8b word to buffer. + uint8_t _lastValidExtensionType = 0; + + // Holds whether the selector compression method is possible. This is updated in + // doesIntegerFitInCurrentWordWithSelector to avoid unnecessary calls when a selector is already + // invalid for the current set of words in _pendingValues. + std::array isSelectorPossible = {true, true, true, true}; + + // This holds values that have not be encoded to the simple8b buffer, but are waiting for a full + // simple8b word to be filled before writing to buffer. + std::deque _pendingValues; + + // User-defined callback to handle writing of finalized Simple-8b blocks + Simple8bWriteFn _writeFn; +}; + +} // namespace mongo diff --git a/src/mongo/bson/util/simple8b_constants.h b/src/mongo/bson/util/simple8b_constants.h new file mode 100644 index 0000000000000..8cedf7d79ecaa --- /dev/null +++ b/src/mongo/bson/util/simple8b_constants.h @@ -0,0 +1,225 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +namespace mongo::simple8b_internal { +/* + * Simple8B is a compression method for storing unsigned int 64 values. In this case + * we make a few optimizations detailed below. We reserve the 4 lsbs for a baseSelector value. And + * then we encode integers based on the following selector choice: + * + * Selector value: 0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 | 15 (RLE) + * Integers coded: 0 | 60 30 20 15 12 10 8 7 6 5 4 3 2 1 | up to 1920 + * Value Bits/integer: 0 | 1 2 3 4 5 6 7 8 10 12 15 20 30 60 | Last Value added + * Wasted bits: 0 | 0 0 0 0 0 0 4 4 0 0 0 0 0 0 | 56 + * Total Bits/Integer: 0 | 1 2 3 4 5 6 7 8 10 12 15 20 30 60 | Last Valued added + * + * However, we make optimizations for selector value 7 and 8. We can see there are 4 + * wasted trailing bits. Using these 4 bits we can consider compression of trailing zeros. + * For a selector extension value of 7, we store 4 bits and these represent up to 15 trailing zeros. + * The extension bits are stored directly after the initial selector bits so that the simple8b word + * looks like: | Base Selector (0-3) | Selector Extension (4-7) | Bits for Values (8 - 63) + * + * Selector Value: 0 | 7 7 7 7 7 7 7 7 7 + * Selector 7 Extension Value: 0 | 1 2 3 4 5 6 7 8 9 + * Value Bits/Integer: 0 | 2 3 4 5 7 10 14 24 52 + * TrailingZeroBits: 0 | 4 4 4 4 4 4 4 4 4 + * MaxTrailingZeroSize: 0 |15 15 15 15 15 15 15 15 15 + * Total Bits/Integer: 0 | 6 7 8 9 11 14 18 28 56 + * + * Additionally, we consider larger trailing zero counts in selector 8. In this case the value + * of the trailing zero bits is multiplied by a nibble shift of 4. We consider trailing zero sizes + * of both 4 and 5 bits and thus, we split selector 8 in our implementation into Selector8Small and + * Selector8Large + * + * Selector Value: 0 | 8 8 8 8 8 8 8 8 8 8 8 8 8 + * Selector 8 Extension Value: 0 | 1 2 3 4 5 6 7 8 9 10 11 12 13 + * Value Bits/Integer: 0 | 4 5 7 10 14 24 52 4 6 9 13 23 51 + * TrailingZeroBits: 0 | 4 4 4 4 4 4 4 5 5 5 5 5 5 + * MaxTrailingZerosSize: 0 |60 60 60 60 60 60 60 124 124 124 124 124 124 + * Total Bits/Integer: 0 | 8 9 11 14 18 28 56 9 11 14 18 28 56 + * + * The simple8b words are according to this spec of selectors and their extension types. + */ + +// Map selectorNames to their indexs. +static constexpr uint8_t kBaseSelector = 0; +static constexpr uint8_t kSevenSelector = 1; +static constexpr uint8_t kEightSelectorSmall = 2; +static constexpr uint8_t kEightSelectorLarge = 3; + +// Variables to handle RLE +static constexpr uint8_t kRleSelector = 15; +static constexpr uint8_t kMaxRleCount = 16; +static constexpr uint8_t kRleMultiplier = 120; + +// Mask to obtain the base and extended selectors. +static constexpr uint64_t kBaseSelectorMask = 0x000000000000000F; + +// Selectors are always of size 4 +static constexpr uint8_t kSelectorBits = 4; + +// Nibble Shift is always of size 4 +static constexpr uint8_t kNibbleShiftSize = 4; + +// The max selector value for each extension +constexpr std::array kMaxSelector = {14, 9, 7, 13}; + +// The min selector value for each extension +constexpr std::array kMinSelector = {1, 1, 1, 8}; + +// The max amount of data bits each selector type can store. This is the amount of bits in the 64bit +// word that are not used for selector values. +constexpr std::array kDataBits = {60, 56, 56, 56}; + +// The amount of bits allocated to store a set of trailing zeros +constexpr std::array kTrailingZeroBitSize = {0, 4, 4, 5}; + +// The amount of possible trailing zeros each selector can handle in the trailingZeroBitSize +constexpr std::array kTrailingZerosMaxCount = {0, 15, 60, 124}; + +// Obtain a mask for the trailing zeros for the seven and eight selectors. We shift 4 and 5 bits to +// create the mask The trailingZeroBitSize variable is used as an index, but must be shifted - 4 to +// correspond to indexes 0 and 1. +constexpr std::array kTrailingZerosMask = { + 0, (1ull << 4) - 1, (1ull << 4) - 1, (1ull << 5) - 1}; + +// The amount of zeros each value in the trailing zero count represents +constexpr std::array kTrailingZerosMultiplier = { + 0, 1, kNibbleShiftSize, kNibbleShiftSize}; + +// Transfer from the base selector to the shift size. +constexpr std::array kBaseSelectorToShiftSize = { + 0, 0, 0, 0, 0, 0, 0, 4, 4, 0, 0, 0, 0, 0}; + +// Transfer from a selector to a specific extension type +// This is for selector 7 and 8 extensions where the selector value is passed along with +// selector index. +constexpr std::array, 2> kSelectorToExtension = { + std::array{0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, + std::array{0, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3}}; + +// Transfer from a extensionType and selectorIdx to the selector value to be held in the 4 lsb (base +// selector) +constexpr std::array, 4> kExtensionToBaseSelector = { + std::array{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, + std::array{7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7}, + std::array{8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8}, + std::array{8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8}}; + + +// A mask to obtain the value bits for each selector including the trailing zero bits. The masks are +// calculated as the following: Mask = 2^(kBitsPerInteger+kTrailingZeroBitSize) - 1 +constexpr std::array, 4> kDecodeMask = { + std::array{0, + 1, + (1ull << 2) - 1, + (1ull << 3) - 1, + (1ull << 4) - 1, + (1ull << 5) - 1, + (1ull << 6) - 1, + (1ull << 7) - 1, + (1ull << 8) - 1, + (1ull << 10) - 1, + (1ull << 12) - 1, + (1ull << 15) - 1, + (1ull << 20) - 1, + (1ull << 30) - 1, + (1ull << 60) - 1, + 1}, + std::array{0, + (1ull << 6) - 1, + (1ull << 7) - 1, + (1ull << 8) - 1, + (1ull << 9) - 1, + (1ull << 11) - 1, + (1ull << 14) - 1, + (1ull << 18) - 1, + (1ull << 28) - 1, + (1ull << 56) - 1, + 0, + 0, + 0, + 0, + 0, + 0}, + std::array{0, + (1ull << 8) - 1, + (1ull << 9) - 1, + (1ull << 11) - 1, + (1ull << 14) - 1, + (1ull << 18) - 1, + (1ull << 28) - 1, + (1ull << 56) - 1, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0}, + std::array{ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + (1ull << 9) - 1, + (1ull << 11) - 1, + (1ull << 14) - 1, + (1ull << 18) - 1, + (1ull << 28) - 1, + (1ull << 56) - 1, + 0, + 0}}; + +// The number of meaningful bits for each selector. This does not include any trailing zero bits. +// We use 64 bits for all invalid selectors, this is to make sure iteration does not get stuck. +constexpr std::array, 4> kBitsPerIntForSelector = { + std::array{64, 1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 15, 20, 30, 60, 64}, + std::array{64, 2, 3, 4, 5, 7, 10, 14, 24, 52, 64, 64, 64, 64, 64, 64}, + std::array{64, 4, 5, 7, 10, 14, 24, 52, 0, 0, 64, 64, 64, 64, 64, 64}, + std::array{64, 0, 0, 0, 0, 0, 0, 0, 4, 6, 9, 13, 23, 51, 64, 64}}; + +// The number of integers coded for each selector. +constexpr std::array, 4> kIntsStoreForSelector = { + std::array{0, 60, 30, 20, 15, 12, 10, 8, 7, 6, 5, 4, 3, 2, 1, 0}, + std::array{0, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0}, + std::array{0, 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0}, + std::array{0, 0, 0, 0, 0, 0, 0, 0, 6, 5, 4, 3, 2, 1, 0, 0}}; + +} // namespace mongo::simple8b_internal diff --git a/src/mongo/bson/util/simple8b_test.cpp b/src/mongo/bson/util/simple8b_test.cpp index a1c5f9d1ee595..fe048239b3cb4 100644 --- a/src/mongo/bson/util/simple8b_test.cpp +++ b/src/mongo/bson/util/simple8b_test.cpp @@ -28,25 +28,48 @@ */ #include "mongo/bson/util/simple8b.h" -#include "mongo/unittest/unittest.h" +#include +#include +#include +#include #include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/simple8b_builder.h" +#include "mongo/platform/int128.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/shared_buffer.h" + using namespace mongo; template void assertValuesEqual(const Simple8b& actual, const std::vector>& expected) { auto it = actual.begin(); auto end = actual.end(); - size_t i = 0; + ASSERT_TRUE(it.valid()); + size_t i = 0; for (; i < expected.size() && it != end; ++i, ++it) { ASSERT_EQ(*it, expected[i]); + ASSERT_TRUE(it.more()); } ASSERT(it == end); ASSERT_EQ(i, expected.size()); + ASSERT_TRUE(it.valid()); + ASSERT_FALSE(it.more()); } template @@ -97,6 +120,13 @@ TEST(Simple8b, NoValues) { testSimple8b(expectedInts, expectedBinary); } +TEST(Simple8b, Null) { + Simple8b s8b; + ASSERT_FALSE(s8b.begin().valid()); + ASSERT_FALSE(s8b.begin().more()); + ASSERT(s8b.begin() == s8b.end()); +} + TEST(Simple8b, OnlySkip) { std::vector> expectedInts = {boost::none}; diff --git a/src/mongo/bson/util/simple8b_type_util.cpp b/src/mongo/bson/util/simple8b_type_util.cpp index 1625148319d52..4243bd6599ce3 100644 --- a/src/mongo/bson/util/simple8b_type_util.cpp +++ b/src/mongo/bson/util/simple8b_type_util.cpp @@ -29,11 +29,20 @@ #include "mongo/bson/util/simple8b_type_util.h" +#include +#include +#include +#include +#include +#include + +#include +#include + #include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" #include "mongo/bson/bsonelement.h" -#include - namespace mongo { namespace { int128_t encodeCharArray(const char (&arr)[16]) { @@ -43,34 +52,6 @@ int128_t encodeCharArray(const char (&arr)[16]) { } } // namespace -uint64_t Simple8bTypeUtil::encodeInt64(int64_t val) { - return (static_cast(val) << 1) ^ (val >> 63); -} - -int64_t Simple8bTypeUtil::decodeInt64(uint64_t val) { - return (val >> 1) ^ (~(val & 1) + 1); -} - -uint128_t Simple8bTypeUtil::encodeInt128(int128_t val) { -// The Abseil right shift implementation on signed int128 is not correct as an arithmetic shift in -// their non-intrinsic implementation. When we detect this case we replace the right arithmetic -// shift of 127 positions that needs to produce 0xFF..FF or 0x00..00 depending on the sign bit. We -// take the high 64 bits and performing a right arithmetic shift 63 positions which produces -// 0xFF..FF if the sign bit is set and 0x00..00 otherwise. We can then use this value in both the -// high and low components of int128 to produce the value that we need. -#if defined(ABSL_HAVE_INTRINSIC_INT128) - return (static_cast(val) << 1) ^ (val >> 127); -#else - // get signed bit - uint64_t component = absl::Int128High64(val) >> 63; - return (static_cast(val) << 1) ^ absl::MakeUint128(component, component); -#endif -} - -int128_t Simple8bTypeUtil::decodeInt128(uint128_t val) { - return static_cast((val >> 1) ^ (~(val & 1) + 1)); -} - int64_t Simple8bTypeUtil::encodeObjectId(const OID& oid) { uint64_t encoded = 0; uint8_t* encodedBytes = reinterpret_cast(&encoded); diff --git a/src/mongo/bson/util/simple8b_type_util.h b/src/mongo/bson/util/simple8b_type_util.h index 00a5e826a6087..feddb70bb6f50 100644 --- a/src/mongo/bson/util/simple8b_type_util.h +++ b/src/mongo/bson/util/simple8b_type_util.h @@ -27,14 +27,19 @@ * it in the license file. */ -#include "mongo/bson/oid.h" -#include "mongo/platform/decimal128.h" -#include "mongo/platform/int128.h" - +#include +#include #include #include +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/platform/decimal128.h" +#include "mongo/platform/int128.h" + namespace mongo { /* @@ -49,10 +54,32 @@ class Simple8bTypeUtil { // store as an unsigned integer // the most significant bit position to the least significant bit and call simple8b as an // unsigned integer. - static uint64_t encodeInt64(int64_t val); - static int64_t decodeInt64(uint64_t val); - static uint128_t encodeInt128(int128_t val); - static int128_t decodeInt128(uint128_t val); + static uint64_t encodeInt64(int64_t val) { + return (static_cast(val) << 1) ^ (val >> 63); + } + static int64_t decodeInt64(uint64_t val) { + return (val >> 1) ^ (~(val & 1) + 1); + } + static uint128_t encodeInt128(int128_t val) { + // The Abseil right shift implementation on signed int128 is not correct as an arithmetic + // shift in their non-intrinsic implementation. When we detect this case we replace the + // right arithmetic shift of 127 positions that needs to produce 0xFF..FF or 0x00..00 + // depending on the sign bit. We take the high 64 bits and performing a right arithmetic + // shift 63 positions which produces 0xFF..FF if the sign bit is set and 0x00..00 otherwise. + // We can then use this value in both the high and low components of int128 to produce the + // value that we need. +#if defined(ABSL_HAVE_INTRINSIC_INT128) + return (static_cast(val) << 1) ^ (val >> 127); +#else + // get signed bit + uint64_t component = absl::Int128High64(val) >> 63; + return (static_cast(val) << 1) ^ absl::MakeUint128(component, component); +#endif + } + + static int128_t decodeInt128(uint128_t val) { + return static_cast((val >> 1) ^ (~(val & 1) + 1)); + } // These methods are for encoding OID with simple8b. The unique identifier is not part of // the encoded integer and must thus be provided when decoding. diff --git a/src/mongo/bson/util/simple8b_type_util_test.cpp b/src/mongo/bson/util/simple8b_type_util_test.cpp index c8775cdfd3bcd..e0c3a086a4862 100644 --- a/src/mongo/bson/util/simple8b_type_util_test.cpp +++ b/src/mongo/bson/util/simple8b_type_util_test.cpp @@ -28,9 +28,18 @@ */ #include "mongo/bson/util/simple8b_type_util.h" -#include "mongo/unittest/unittest.h" +#include +#include #include +#include + +#include +#include + +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" using namespace mongo; diff --git a/src/mongo/client/README.md b/src/mongo/client/README.md index b79a397125465..4bc8afea3c7e7 100644 --- a/src/mongo/client/README.md +++ b/src/mongo/client/README.md @@ -1,13 +1,37 @@ # Internal Client ## Replica set monitoring and host targeting -The internal client driver responsible for routing a command request to a replica set must determine which member to target. Host targeting involves finding which nodes in a topology satisfy the $readPreference. Node eligibility depends on the type of a node (i.e primary, secondary, etc.) and its average network latency round-trip-time (RTT). For example, { $readPreference: secondary } requires the client to know which nodes are secondaries and, of those nodes, which nodes fit within a delta of the node with the smallest RTT. A FailedToSatisfyReadPreference error occurs when there is a host selection time out and no eligible nodes are found. +The internal client driver responsible for routing a command request to a replica set must determine +which member to target. Host targeting involves finding which nodes in a topology satisfy the +$readPreference. Node eligibility depends on the type of a node (i.e primary, secondary, etc.) and +its average network latency round-trip-time (RTT). For example, { $readPreference: secondary } +requires the client to know which nodes are secondaries and, of those nodes, which nodes fit within +a delta of the node with the smallest RTT. A FailedToSatisfyReadPreference error occurs when there +is a host selection time out and no eligible nodes are found. -Nodes in a topology are discovered and monitored through replica set monitoring. Replica set monitoring entails periodically refreshing the local view of topologies for which the client needs to perform targeting. The client has a ReplicaSetMonitor for each replica set it needs to target in the cluster. So, if a mongos needs to target 2 shards for a query, it either has or creates a ReplicaSetMonitor for each of the corresponding shards. +Nodes in a topology are discovered and monitored through replica set monitoring. Replica set +monitoring entails periodically refreshing the local view of topologies for which the client needs +to perform targeting. The client has a ReplicaSetMonitor for each replica set it needs to target in +the cluster. So, if a mongos needs to target 2 shards for a query, it either has or creates a +ReplicaSetMonitor for each of the corresponding shards. -The ReplicaSetMonitorInterface supports the replica set monitoring protocol. The replica set monitoring protocol supports the "awaitable hello" command feature and abides by the Server Discovery and Monitoring (SDAM) specifications. The "awaitable hello" command feature allows the isMaster/hello command to wait for a significant topology change or timeout before replying. For more information about why MongoDB supports both hello and isMaster, please refer to the Replication Arch Guide. Two different versions of the protocol are supported - "sdam", which does not support awaitable hello with exhaust, and "streamable", which does support exhaust and is on by default. Clients who enable the awaitable hello (with or without exhaust) will learn much sooner about stepdowns, elections, reconfigs, and other events. +The ReplicaSetMonitorInterface supports the replica set monitoring protocol. The replica set +monitoring protocol supports the "awaitable hello" command feature and abides by the Server +Discovery and Monitoring (SDAM) specifications. The "awaitable hello" command feature allows the +isMaster/hello command to wait for a significant topology change or timeout before replying. For +more information about why MongoDB supports both hello and isMaster, please refer to the Replication +Arch Guide. Two different versions of the protocol are supported - "sdam", which does not support +awaitable hello with exhaust, and "streamable", which does support exhaust and is on by default. +Clients who enable the awaitable hello (with or without exhaust) will learn much sooner about +stepdowns, elections, reconfigs, and other events. -In the streamable protocol, the StreamableReplicaSetMonitor is used to gather and maintain information regarding the client's local topology description. The topology description holds the learned states of each member in the replica set. Since the new protocol supports exhaust, the RTT is measured by sending a 'ping' to each node in the topology at a fixed frequency rather than through the isMaster response latency. Aside from the RTT, the remaining information for satisfying read preferences is gathered through awaitable isMaster commands asynchronously sent to each node in the topology. +In the streamable protocol, the StreamableReplicaSetMonitor is used to gather and maintain +information regarding the client's local topology description. The topology description holds the +learned states of each member in the replica set. Since the new protocol supports exhaust, the RTT +is measured by sending a 'ping' to each node in the topology at a fixed frequency rather than +through the hello response latency. Aside from the RTT, the remaining information for satisfying +read preferences is gathered through awaitable hello commands asynchronously sent to each node in +the topology. #### Code references diff --git a/src/mongo/client/SConscript b/src/mongo/client/SConscript index 6bc147b7c90f7..abd741193499d 100644 --- a/src/mongo/client/SConscript +++ b/src/mongo/client/SConscript @@ -85,7 +85,7 @@ saslClientSource = [ 'sasl_client_conversation.cpp', 'sasl_client_session.cpp', 'sasl_oidc_client_conversation.cpp', - 'sasl_oidc_client_params.idl', + 'sasl_oidc_client_types.idl', 'sasl_plain_client_conversation.cpp', 'sasl_scram_client_conversation.cpp', ] diff --git a/src/mongo/client/async_client.cpp b/src/mongo/client/async_client.cpp index 7f4406007549d..93aa3294c06a0 100644 --- a/src/mongo/client/async_client.cpp +++ b/src/mongo/client/async_client.cpp @@ -28,34 +28,57 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/client/async_client.h" - +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/client/async_client.h" #include "mongo/client/authenticate.h" +#include "mongo/client/internal_auth.h" #include "mongo/client/sasl_client_authenticate.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/sasl_command_constants.h" +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/connection_health_metrics_parameter_gen.h" -#include "mongo/db/dbmessage.h" -#include "mongo/db/server_feature_flags_gen.h" #include "mongo/db/server_options.h" #include "mongo/db/wire_version.h" #include "mongo/executor/egress_tag_closer_manager.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/log_severity_suppressor.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/factory.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/rpc/protocol.h" #include "mongo/rpc/reply_interface.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" #include "mongo/util/net/socket_utils.h" #include "mongo/util/net/ssl_manager.h" #include "mongo/util/net/ssl_peer_info.h" +#include "mongo/util/net/ssl_types.h" +#include "mongo/util/str.h" #include "mongo/util/version.h" @@ -66,12 +89,8 @@ MONGO_FAIL_POINT_DEFINE(pauseBeforeMarkKeepOpen); MONGO_FAIL_POINT_DEFINE(alwaysLogConnAcquisitionToWireTime) namespace { -bool connHealthMetricsEnabled() { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - return gFeatureFlagConnHealthMetrics.isEnabledAndIgnoreFCVUnsafe(); -} CounterMetric totalTimeForEgressConnectionAcquiredToWireMicros( - "network.totalTimeForEgressConnectionAcquiredToWireMicros", connHealthMetricsEnabled); + "network.totalTimeForEgressConnectionAcquiredToWireMicros"); } // namespace Future AsyncDBClient::connect( @@ -91,17 +110,17 @@ Future AsyncDBClient::connect( }); } -BSONObj AsyncDBClient::_buildIsMasterRequest(const std::string& appName, - executor::NetworkConnectionHook* hook) { +BSONObj AsyncDBClient::_buildHelloRequest(const std::string& appName, + executor::NetworkConnectionHook* hook) { BSONObjBuilder bob; - bob.append("isMaster", 1); + bob.append("hello", 1); const auto versionString = VersionInfoInterface::instance().version(); ClientMetadata::serialize(appName, versionString, &bob); if (getTestCommandsEnabled()) { - // Only include the host:port of this process in the isMaster command request if test + // Only include the host:port of this process in the "hello" command request if test // commands are enabled. mongobridge uses this field to identify the process opening a // connection to it. StringBuilder sb; @@ -116,16 +135,16 @@ BSONObj AsyncDBClient::_buildIsMasterRequest(const std::string& appName, } if (hook) { - return hook->augmentIsMasterRequest(remote(), bob.obj()); + return hook->augmentHelloRequest(remote(), bob.obj()); } else { return bob.obj(); } } -void AsyncDBClient::_parseIsMasterResponse(BSONObj request, - const std::unique_ptr& response) { +void AsyncDBClient::_parseHelloResponse(BSONObj request, + const std::unique_ptr& response) { uassert(50786, - "Expected OP_MSG response to isMaster", + "Expected OP_MSG response to 'hello'", response->getProtocol() == rpc::Protocol::kOpMsg); auto wireSpec = WireSpec::instance().get(); auto responseBody = response->getCommandReply(); @@ -224,7 +243,7 @@ Future AsyncDBClient::completeSpeculativeAuth(std::shared_ptr AsyncDBClient::completeSpeculativeAuth(std::shared_ptr AsyncDBClient::initWireVersion(const std::string& appName, executor::NetworkConnectionHook* const hook) { - auto requestObj = _buildIsMasterRequest(appName, hook); + auto requestObj = _buildHelloRequest(appName, hook); auto opMsgRequest = OpMsgRequest::fromDBAndBody("admin", requestObj); auto msgId = nextMessageId(); @@ -258,7 +277,7 @@ Future AsyncDBClient::initWireVersion(const std::string& appName, .then([msgId, this]() { return _waitForResponse(msgId); }) .then([this, requestObj, hook, timer = Timer{}](Message response) { auto cmdReply = rpc::makeReply(&response); - _parseIsMasterResponse(requestObj, cmdReply); + _parseHelloResponse(requestObj, cmdReply); if (hook) { executor::RemoteCommandResponse cmdResp(*cmdReply, timer.elapsed()); uassertStatusOK(hook->validateHost(_peer, requestObj, std::move(cmdResp))); diff --git a/src/mongo/client/async_client.h b/src/mongo/client/async_client.h index bbcec51b7111c..b3452859d25be 100644 --- a/src/mongo/client/async_client.h +++ b/src/mongo/client/async_client.h @@ -29,21 +29,37 @@ #pragma once +#include +#include +#include #include +#include +#include +#include "mongo/bson/bsonobj.h" #include "mongo/client/authenticate.h" +#include "mongo/client/sasl_client_session.h" +#include "mongo/db/baton.h" #include "mongo/db/service_context.h" #include "mongo/executor/connection_metrics.h" #include "mongo/executor/network_connection_hook.h" #include "mongo/executor/remote_command_request.h" #include "mongo/executor/remote_command_response.h" #include "mongo/logv2/log_severity_suppressor.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" #include "mongo/rpc/unique_message.h" #include "mongo/transport/baton.h" #include "mongo/transport/message_compressor_manager.h" +#include "mongo/transport/session.h" #include "mongo/transport/ssl_connection_context.h" #include "mongo/transport/transport_layer.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" #include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/timer.h" namespace mongo { @@ -112,10 +128,8 @@ class AsyncDBClient : public std::enable_shared_from_this { Future _waitForResponse(boost::optional msgId, const BatonHandle& baton = nullptr); Future _call(Message request, int32_t msgId, const BatonHandle& baton = nullptr); - BSONObj _buildIsMasterRequest(const std::string& appName, - executor::NetworkConnectionHook* hook); - void _parseIsMasterResponse(BSONObj request, - const std::unique_ptr& response); + BSONObj _buildHelloRequest(const std::string& appName, executor::NetworkConnectionHook* hook); + void _parseHelloResponse(BSONObj request, const std::unique_ptr& response); auth::RunCommandHook _makeAuthRunCommandHook(); const HostAndPort _peer; diff --git a/src/mongo/client/async_remote_command_targeter_adapter.h b/src/mongo/client/async_remote_command_targeter_adapter.h index ace8c3360fa7d..65fed1388facf 100644 --- a/src/mongo/client/async_remote_command_targeter_adapter.h +++ b/src/mongo/client/async_remote_command_targeter_adapter.h @@ -29,12 +29,21 @@ #pragma once +#include +#include +#include +#include + +#include + +#include "mongo/base/status.h" #include "mongo/client/read_preference.h" #include "mongo/client/remote_command_targeter.h" #include "mongo/executor/async_rpc_targeter.h" +#include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" #include "mongo/util/future.h" -#include +#include "mongo/util/net/hostandport.h" namespace mongo { namespace async_rpc { diff --git a/src/mongo/client/async_remote_command_targeter_adapter_test.cpp b/src/mongo/client/async_remote_command_targeter_adapter_test.cpp index ef6e83a5e1088..7ffae94821bc7 100644 --- a/src/mongo/client/async_remote_command_targeter_adapter_test.cpp +++ b/src/mongo/client/async_remote_command_targeter_adapter_test.cpp @@ -28,15 +28,22 @@ */ #include "mongo/client/async_remote_command_targeter_adapter.h" + +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter.h" #include "mongo/client/remote_command_targeter_factory_mock.h" #include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/client/remote_command_targeter_rs.h" -#include "mongo/unittest/unittest.h" -#include -#include -#include +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace async_rpc { diff --git a/src/mongo/client/authenticate.cpp b/src/mongo/client/authenticate.cpp index a31dbb5467266..52079ce9dc78f 100644 --- a/src/mongo/client/authenticate.cpp +++ b/src/mongo/client/authenticate.cpp @@ -28,28 +28,41 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/client/authenticate.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/json.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/client/authenticate.h" #include "mongo/client/internal_auth.h" #include "mongo/client/sasl_client_authenticate.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/sasl_command_constants.h" +#include "mongo/db/auth/user.h" #include "mongo/db/server_options.h" +#include "mongo/db/wire_version.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/logv2/log.h" -#include "mongo/platform/mutex.h" -#include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/rpc/op_msg_rpc_impls.h" -#include "mongo/util/net/ssl_manager.h" -#include "mongo/util/net/ssl_options.h" -#include "mongo/util/password_digest.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl @@ -215,7 +228,7 @@ Future negotiateSaslMechanism(RunCommandHook runCommand, } BSONObjBuilder builder; - builder.append("ismaster", 1); + builder.append("hello", 1); builder.append("saslSupportedMechs", username.getUnambiguousName()); if (stepDownBehavior == StepDownBehavior::kKeepConnectionOpen) { builder.append("hangUpOnStepDown", false); @@ -299,11 +312,12 @@ StringData getSaslCommandUserFieldName() { namespace { -StatusWith> _speculateSaslStart(BSONObjBuilder* isMaster, - const std::string& mechanism, - const HostAndPort& host, - StringData authDB, - BSONObj params) { +StatusWith> _speculateSaslStart( + BSONObjBuilder* helloRequestBuilder, + const std::string& mechanism, + const HostAndPort& host, + StringData authDB, + BSONObj params) { if (mechanism == kMechanismSaslPlain) { return {ErrorCodes::BadValue, "PLAIN mechanism not supported with speculativeSaslStart"}; } @@ -325,13 +339,13 @@ StatusWith> _speculateSaslStart(BSONObjBuilde saslStart.append("mechanism", mechanism); saslStart.appendBinData("payload", int(payload.size()), BinDataGeneral, payload.c_str()); saslStart.append("db", authDB); - isMaster->append(kSpeculativeAuthenticate, saslStart.obj()); + helloRequestBuilder->append(kSpeculativeAuthenticate, saslStart.obj()); return session; } StatusWith _speculateAuth( - BSONObjBuilder* isMaster, + BSONObjBuilder* helloRequestBuilder, const std::string& mechanism, const HostAndPort& host, StringData authDB, @@ -339,17 +353,18 @@ StatusWith _speculateAuth( std::shared_ptr* saslClientSession) { if (mechanism == kMechanismMongoX509) { // MONGODB-X509 - isMaster->append(kSpeculativeAuthenticate, - BSON(kAuthenticateCommand << "1" << saslCommandMechanismFieldName - << mechanism << saslCommandUserDBFieldName - << "$external")); + helloRequestBuilder->append(kSpeculativeAuthenticate, + BSON(kAuthenticateCommand + << "1" << saslCommandMechanismFieldName << mechanism + << saslCommandUserDBFieldName << "$external")); return SpeculativeAuthType::kAuthenticate; } // Proceed as if this is a SASL mech and we either have a password, // or we don't need one (e.g. MONGODB-AWS). // Failure is absolutely an option. - auto swSaslClientSession = _speculateSaslStart(isMaster, mechanism, host, authDB, params); + auto swSaslClientSession = + _speculateSaslStart(helloRequestBuilder, mechanism, host, authDB, params); if (!swSaslClientSession.isOK()) { return swSaslClientSession.getStatus(); } @@ -368,7 +383,7 @@ std::string getBSONString(BSONObj container, StringData field) { } } // namespace -SpeculativeAuthType speculateAuth(BSONObjBuilder* isMasterRequest, +SpeculativeAuthType speculateAuth(BSONObjBuilder* helloRequestBuilder, const MongoURI& uri, std::shared_ptr* saslClientSession) { auto mechanism = uri.getOption("authMechanism").get_value_or(kMechanismScramSha256.toString()); @@ -380,7 +395,7 @@ SpeculativeAuthType speculateAuth(BSONObjBuilder* isMasterRequest, auto params = std::move(optParams.value()); - auto ret = _speculateAuth(isMasterRequest, + auto ret = _speculateAuth(helloRequestBuilder, mechanism, uri.getServers().front(), uri.getAuthenticationDatabase(), @@ -396,7 +411,7 @@ SpeculativeAuthType speculateAuth(BSONObjBuilder* isMasterRequest, SpeculativeAuthType speculateInternalAuth( const HostAndPort& remoteHost, - BSONObjBuilder* isMasterRequest, + BSONObjBuilder* helloRequestBuilder, std::shared_ptr* saslClientSession) try { auto params = getInternalAuthParams(0, kMechanismScramSha256.toString()); if (params.isEmpty()) { @@ -406,8 +421,8 @@ SpeculativeAuthType speculateInternalAuth( auto mechanism = getBSONString(params, saslCommandMechanismFieldName); auto authDB = getBSONString(params, saslCommandUserDBFieldName); - auto ret = - _speculateAuth(isMasterRequest, mechanism, remoteHost, authDB, params, saslClientSession); + auto ret = _speculateAuth( + helloRequestBuilder, mechanism, remoteHost, authDB, params, saslClientSession); if (!ret.isOK()) { return SpeculativeAuthType::kNone; } diff --git a/src/mongo/client/authenticate.h b/src/mongo/client/authenticate.h index fc4a8df8e7883..93cd134084935 100644 --- a/src/mongo/client/authenticate.h +++ b/src/mongo/client/authenticate.h @@ -29,6 +29,9 @@ #pragma once +#include +#include +#include #include #include #include @@ -36,6 +39,7 @@ #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/internal_auth.h" #include "mongo/client/mongo_uri.h" #include "mongo/client/sasl_client_session.h" @@ -137,7 +141,7 @@ Future authenticateClient(const BSONObj& params, * but the __system user's credentials will be filled in automatically. * * The "mechanismHint" parameter will force authentication with a specific mechanism - * (e.g. SCRAM-SHA-256). If it is boost::none, then an isMaster will be called to negotiate + * (e.g. SCRAM-SHA-256). If it is boost::none, then a "hello" will be called to negotiate * a SASL mechanism with the server. * * The "stepDownBehavior" parameter controls whether replication will kill the connection on @@ -169,7 +173,7 @@ BSONObj buildAuthParams(StringData dbname, StringData mechanism); /** - * Run an isMaster exchange to negotiate a SASL mechanism for authentication. + * Run a "hello" exchange to negotiate a SASL mechanism for authentication. */ Future negotiateSaslMechanism(RunCommandHook runCommand, const UserName& username, @@ -196,19 +200,19 @@ enum class SpeculativeAuthType { }; /** - * Constructs a "speculativeAuthenticate" or "speculativeSaslStart" - * payload for an isMaster request based on a given URI. + * Constructs a "speculativeAuthenticate" or "speculativeSaslStart" payload for an "hello" request + * based on a given URI. */ -SpeculativeAuthType speculateAuth(BSONObjBuilder* isMasterRequest, +SpeculativeAuthType speculateAuth(BSONObjBuilder* helloRequestBuilder, const MongoURI& uri, std::shared_ptr* saslClientSession); /** - * Constructs a "speculativeAuthenticate" or "speculativeSaslStart" - * payload for an isMaster request using internal (intracluster) authentication. + * Constructs a "speculativeAuthenticate" or "speculativeSaslStart" payload for an "hello" request + * using internal (intracluster) authentication. */ SpeculativeAuthType speculateInternalAuth(const HostAndPort& remoteHost, - BSONObjBuilder* isMasterRequest, + BSONObjBuilder* helloRequestBuilder, std::shared_ptr* saslClientSession); } // namespace auth diff --git a/src/mongo/client/authenticate_test.cpp b/src/mongo/client/authenticate_test.cpp index c72f6ddba0413..d00765562e2c4 100644 --- a/src/mongo/client/authenticate_test.cpp +++ b/src/mongo/client/authenticate_test.cpp @@ -27,16 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include + +#include -#include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/client/authenticate.h" -#include "mongo/config.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/base64.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/md5.h" #include "mongo/util/md5.hpp" #include "mongo/util/net/hostandport.h" #include "mongo/util/password_digest.h" diff --git a/src/mongo/client/connection_pool.cpp b/src/mongo/client/connection_pool.cpp index 2000e86f539d9..211cd18c5add4 100644 --- a/src/mongo/client/connection_pool.cpp +++ b/src/mongo/client/connection_pool.cpp @@ -29,13 +29,29 @@ #include "mongo/client/connection_pool.h" -#include "mongo/client/authenticate.h" -#include "mongo/client/mongo_uri.h" +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/internal_auth.h" #include "mongo/executor/network_connection_hook.h" #include "mongo/executor/remote_command_request.h" #include "mongo/executor/remote_command_response.h" +#include "mongo/rpc/op_msg.h" #include "mongo/rpc/reply_interface.h" #include "mongo/rpc/unique_message.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/ssl_options.h" namespace mongo { namespace { @@ -173,8 +189,8 @@ ConnectionPool::ConnectionList::iterator ConnectionPool::acquireConnection( false, // auto reconnect 0, // socket timeout {}, // MongoURI - [this, target](const executor::RemoteCommandResponse& isMasterReply) { - return _hook->validateHost(target, BSONObj(), isMasterReply); + [this, target](const executor::RemoteCommandResponse& helloReply) { + return _hook->validateHost(target, BSONObj(), helloReply); })); } else { conn.reset(new DBClientConnection()); diff --git a/src/mongo/client/connection_pool.h b/src/mongo/client/connection_pool.h index 15ca704313f25..2219219e13006 100644 --- a/src/mongo/client/connection_pool.h +++ b/src/mongo/client/connection_pool.h @@ -31,10 +31,12 @@ #include #include +#include #include "mongo/client/dbclient_connection.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/client/connection_string.cpp b/src/mongo/client/connection_string.cpp index df126ba4a22e5..841c06926b5be 100644 --- a/src/mongo/client/connection_string.cpp +++ b/src/mongo/client/connection_string.cpp @@ -27,13 +27,17 @@ * it in the license file. */ +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include +#include -#include "mongo/platform/basic.h" - -#include "mongo/client/connection_string.h" - +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/client/connection_string.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/client/connection_string.h b/src/mongo/client/connection_string.h index f6ee47df22b09..002b57e0ad805 100644 --- a/src/mongo/client/connection_string.h +++ b/src/mongo/client/connection_string.h @@ -29,7 +29,9 @@ #pragma once +#include #include +#include #include #include #include @@ -37,6 +39,7 @@ #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/platform/mutex.h" #include "mongo/util/assert_util.h" #include "mongo/util/net/hostandport.h" diff --git a/src/mongo/client/connection_string_connect.cpp b/src/mongo/client/connection_string_connect.cpp index 9af07f5a97487..d4879b7c73b54 100644 --- a/src/mongo/client/connection_string_connect.cpp +++ b/src/mongo/client/connection_string_connect.cpp @@ -28,18 +28,35 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/client/connection_string.h" - -#include #include - +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/client/client_api_version_parameters_gen.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/dbclient_base.h" +#include "mongo/client/dbclient_connection.h" #include "mongo/client/dbclient_rs.h" #include "mongo/client/mongo_uri.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/mutex.h" #include "mongo/util/assert_util.h" +#include "mongo/util/net/ssl_options.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/client/connection_string_test.cpp b/src/mongo/client/connection_string_test.cpp index 355a62c0910e4..9baa2ee97bf77 100644 --- a/src/mongo/client/connection_string_test.cpp +++ b/src/mongo/client/connection_string_test.cpp @@ -27,11 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/status.h" #include "mongo/client/connection_string.h" - -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/client/connpool.cpp b/src/mongo/client/connpool.cpp index 1442275d9f0e4..aa71149bab35a 100644 --- a/src/mongo/client/connpool.cpp +++ b/src/mongo/client/connpool.cpp @@ -30,30 +30,44 @@ // _ todo: reconnect? -#include "mongo/platform/basic.h" - -#include "mongo/client/connpool.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include #include +#include +#include #include - -#include "mongo/base/init.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/connection_string.h" +#include "mongo/client/connpool.h" #include "mongo/client/dbclient_connection.h" #include "mongo/client/global_conn_pool.h" -#include "mongo/client/replica_set_monitor.h" -#include "mongo/config.h" -#include "mongo/db/server_feature_flags_gen.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/executor/connection_pool_stats.h" #include "mongo/logv2/log.h" -#include "mongo/stdx/chrono.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/util/exit.h" #include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/net/socket_exception.h" - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif +#include "mongo/util/str.h" #if __has_feature(address_sanitizer) #include @@ -73,8 +87,7 @@ auto makeDuration(double secs) { } void recordWaitTime(PoolForHost& p, DBClientBase* conn, Date_t connRequestedAt) { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (gFeatureFlagConnHealthMetrics.isEnabledAndIgnoreFCVUnsafe() && conn) { + if (conn) { p.recordConnectionWaitTime(connRequestedAt); } } @@ -201,7 +214,7 @@ DBClientBase* PoolForHost::get(DBConnectionPool* pool, double socketTimeout) { continue; } - verify(sc.conn->getSoTimeout() == socketTimeout); + MONGO_verify(sc.conn->getSoTimeout() == socketTimeout); #ifdef MONGO_CONFIG_SSL invariant(!sc.conn->isUsingTransientSSLParams()); #endif @@ -626,10 +639,7 @@ void DBConnectionPool::appendConnectionStats(executor::ConnectionPoolStats* stat 0, 0, Milliseconds{0}}; - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (gFeatureFlagConnHealthMetrics.isEnabledAndIgnoreFCVUnsafe()) { - hostStats.acquisitionWaitTimes = i->second.connectionWaitTimeStats(); - } + hostStats.acquisitionWaitTimes = i->second.connectionWaitTimeStats(); stats->updateStatsForHost("global", host, hostStats); } } @@ -658,7 +668,7 @@ bool DBConnectionPool::serverNameCompare::operator()(const string& a, const stri ++ap; ++bp; } - verify(false); + MONGO_verify(false); } bool DBConnectionPool::poolKeyCompare::operator()(const PoolKey& a, const PoolKey& b) const { diff --git a/src/mongo/client/connpool.h b/src/mongo/client/connpool.h index ba514fc46bb24..da5bf79bd2acd 100644 --- a/src/mongo/client/connpool.h +++ b/src/mongo/client/connpool.h @@ -29,22 +29,34 @@ #pragma once +#include #include +#include +#include +#include +#include #include +#include +#include +#include "mongo/client/connection_string.h" #include "mongo/client/dbclient_base.h" #include "mongo/client/mongo_uri.h" #include "mongo/executor/connection_pool_stats.h" #include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util.h" #include "mongo/util/background.h" #include "mongo/util/concurrency/mutex.h" +#include "mongo/util/duration.h" #include "mongo/util/hierarchical_acquisition.h" #include "mongo/util/time_support.h" namespace mongo { class BSONObjBuilder; + class DBConnectionPool; namespace executor { @@ -140,7 +152,7 @@ class PoolForHost { } ConnectionString::ConnectionType type() const { - verify(_created); + MONGO_verify(_created); return _type; } diff --git a/src/mongo/client/connpool_integration_test.cpp b/src/mongo/client/connpool_integration_test.cpp index 5bfa62259870c..50d6801c51733 100644 --- a/src/mongo/client/connpool_integration_test.cpp +++ b/src/mongo/client/connpool_integration_test.cpp @@ -27,12 +27,22 @@ * it in the license file. */ +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/client/connection_string.h" #include "mongo/client/connpool.h" -#include "mongo/client/global_conn_pool.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/integration_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/client/cyrus_sasl_client_session.cpp b/src/mongo/client/cyrus_sasl_client_session.cpp index 6d1434085d312..6345eb9a9a521 100644 --- a/src/mongo/client/cyrus_sasl_client_session.cpp +++ b/src/mongo/client/cyrus_sasl_client_session.cpp @@ -49,7 +49,8 @@ void saslSetError(sasl_conn_t* conn, const std::string& msg) { SaslClientSession* createCyrusSaslClientSession(const std::string& mech) { if ((mech == auth::kMechanismScramSha1) || (mech == auth::kMechanismScramSha256) || - (mech == auth::kMechanismMongoAWS) || (mech == auth::kMechanismMongoOIDC)) { + (mech == auth::kMechanismSaslPlain) || (mech == auth::kMechanismMongoAWS) || + (mech == auth::kMechanismMongoOIDC)) { return new NativeSaslClientSession(); } return new CyrusSaslClientSession(); diff --git a/src/mongo/client/dbclient_base.cpp b/src/mongo/client/dbclient_base.cpp index 4be14e256b1f2..10fa8e8b6e063 100644 --- a/src/mongo/client/dbclient_base.cpp +++ b/src/mongo/client/dbclient_base.cpp @@ -32,45 +32,55 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/client/dbclient_base.h" - -#include +#include +#include +#include +#include #include +#include +#include +#include + #include "mongo/base/status.h" #include "mongo/base/status_with.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/bson/util/builder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/client/authenticate.h" #include "mongo/client/client_api_version_parameters_gen.h" -#include "mongo/client/constants.h" +#include "mongo/client/dbclient_base.h" #include "mongo/client/dbclient_cursor.h" -#include "mongo/config.h" +#include "mongo/client/internal_auth.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/api_parameters_gen.h" -#include "mongo/db/auth/validated_tenancy_scope.h" -#include "mongo/db/commands.h" -#include "mongo/db/json.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/client.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/dbmessage.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/kill_cursors_gen.h" -#include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/server_options.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/wire_version.h" #include "mongo/executor/remote_command_request.h" -#include "mongo/executor/remote_command_response.h" #include "mongo/logv2/log.h" -#include "mongo/platform/mutex.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/factory.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/metadata.h" -#include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/rpc/protocol.h" #include "mongo/rpc/reply_interface.h" -#include "mongo/s/stale_exception.h" #include "mongo/util/assert_util.h" -#include "mongo/util/concurrency/mutex.h" -#include "mongo/util/debug_util.h" -#include "mongo/util/net/ssl_manager.h" -#include "mongo/util/net/ssl_options.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork @@ -283,11 +293,11 @@ long long DBClientBase::count(const NamespaceStringOrUUID nsOrUuid, int limit, int skip, boost::optional readConcernObj) { - auto dbName = (nsOrUuid.uuid() ? nsOrUuid.dbName() : (*nsOrUuid.nss()).dbName()); + auto dbName = nsOrUuid.dbName(); BSONObj cmd = _countCmd(nsOrUuid, query, options, limit, skip, readConcernObj); BSONObj res; - if (!runCommand(*dbName, cmd, res, options)) { + if (!runCommand(dbName, cmd, res, options)) { auto status = getStatusFromCommandResult(res); uassertStatusOK(status.withContext("count fails:")); } @@ -302,11 +312,11 @@ BSONObj DBClientBase::_countCmd(const NamespaceStringOrUUID nsOrUuid, int skip, boost::optional readConcernObj) { BSONObjBuilder b; - if (nsOrUuid.uuid()) { - const auto uuid = *nsOrUuid.uuid(); + if (nsOrUuid.isUUID()) { + const auto uuid = nsOrUuid.uuid(); uuid.appendToBuilder(&b, "count"); } else { - b.append("count", (*nsOrUuid.nss()).coll()); + b.append("count", nsOrUuid.nss().coll()); } b.append("query", query); if (limit) @@ -448,12 +458,12 @@ bool DBClientBase::auth(const string& dbname, } void DBClientBase::logout(const string& dbname, BSONObj& info) { - runCommand(DatabaseName(boost::none, dbname), BSON("logout" << 1), info); + runCommand(DatabaseNameUtil::deserialize(boost::none, dbname), BSON("logout" << 1), info); } bool DBClientBase::isPrimary(bool& isPrimary, BSONObj* info) { BSONObjBuilder bob; - bob.append(_apiParameters.getVersion() ? "hello" : "ismaster", 1); + bob.append("hello", 1); if (auto wireSpec = WireSpec::instance().get(); wireSpec->isInternalClient) { WireSpec::appendInternalClientWireVersion(wireSpec->outgoing, &bob); } @@ -461,9 +471,8 @@ bool DBClientBase::isPrimary(bool& isPrimary, BSONObj* info) { BSONObj o; if (info == nullptr) info = &o; - bool ok = runCommand(DatabaseName(boost::none, "admin"), bob.obj(), *info); - isPrimary = - info->getField(_apiParameters.getVersion() ? "isWritablePrimary" : "ismaster").trueValue(); + bool ok = runCommand(DatabaseName::kAdmin, bob.obj(), *info); + isPrimary = info->getField("isWritablePrimary").trueValue(); return ok; } @@ -473,7 +482,7 @@ bool DBClientBase::createCollection(const NamespaceString& nss, int max, BSONObj* info, boost::optional writeConcernObj) { - verify(!capped || size); + MONGO_verify(!capped || size); BSONObj o; if (info == nullptr) info = &o; @@ -553,7 +562,7 @@ vector DBClientBase::getDatabaseInfos(const BSONObj& filter, BSONObj cmd = bob.done(); BSONObj res; - if (runCommand(DatabaseName(boost::none, "admin"), cmd, res, QueryOption_SecondaryOk)) { + if (runCommand(DatabaseName::kAdmin, cmd, res, QueryOption_SecondaryOk)) { BSONObj dbs = res["databases"].Obj(); BSONObjIterator it(dbs); while (it.more()) { @@ -751,11 +760,11 @@ namespace { */ BSONObj makeListIndexesCommand(const NamespaceStringOrUUID& nsOrUuid, bool includeBuildUUIDs) { BSONObjBuilder bob; - if (nsOrUuid.nss()) { - bob.append("listIndexes", (*nsOrUuid.nss()).coll()); + if (nsOrUuid.isNamespaceString()) { + bob.append("listIndexes", nsOrUuid.nss().coll()); bob.append("cursor", BSONObj()); } else { - const auto uuid = (*nsOrUuid.uuid()); + const auto& uuid = nsOrUuid.uuid(); uuid.appendToBuilder(&bob, "listIndexes"); bob.append("cursor", BSONObj()); } @@ -778,10 +787,10 @@ std::list DBClientBase::_getIndexSpecs(const NamespaceStringOrUUID& nsO const BSONObj& cmd, int options) { list specs; - auto dbName = (nsOrUuid.uuid() ? nsOrUuid.dbName() : (*nsOrUuid.nss()).dbName()); + auto dbName = nsOrUuid.dbName(); BSONObj res; - if (runCommand(*dbName, cmd, res, options)) { + if (runCommand(dbName, cmd, res, options)) { BSONObj cursorObj = res["cursor"].Obj(); BSONObjIterator i(cursorObj["firstBatch"].Obj()); while (i.more()) { @@ -795,9 +804,9 @@ std::list DBClientBase::_getIndexSpecs(const NamespaceStringOrUUID& nsO const long long id = cursorObj["id"].Long(); if (id != 0) { const auto cursorNs = - NamespaceStringUtil::deserialize(dbName->tenantId(), cursorObj["ns"].String()); - if (nsOrUuid.nss()) { - invariant((*nsOrUuid.nss()) == cursorNs); + NamespaceStringUtil::deserialize(dbName.tenantId(), cursorObj["ns"].String()); + if (nsOrUuid.isNamespaceString()) { + invariant(nsOrUuid.nss() == cursorNs); } unique_ptr cursor = getMore(cursorNs, id); while (cursor->more()) { @@ -815,7 +824,7 @@ std::list DBClientBase::_getIndexSpecs(const NamespaceStringOrUUID& nsO // "NamespaceNotFound" is an error for UUID but returns an empty list for NamespaceString; this // matches the behavior for other commands such as 'find' and 'count'. - if (nsOrUuid.nss() && status.code() == ErrorCodes::NamespaceNotFound) { + if (nsOrUuid.isNamespaceString() && status.code() == ErrorCodes::NamespaceNotFound) { return specs; } uassertStatusOK(status.withContext(str::stream() << "listIndexes failed: " << res)); diff --git a/src/mongo/client/dbclient_base.h b/src/mongo/client/dbclient_base.h index b05c7f6945386..e17dd465ef2db 100644 --- a/src/mongo/client/dbclient_base.h +++ b/src/mongo/client/dbclient_base.h @@ -29,10 +29,26 @@ #pragma once +#include +#include +#include #include #include - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/authenticate.h" #include "mongo/client/client_api_version_parameters_gen.h" #include "mongo/client/connection_string.h" @@ -40,9 +56,12 @@ #include "mongo/client/index_spec.h" #include "mongo/client/mongo_uri.h" #include "mongo/client/read_preference.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/database_name.h" #include "mongo/db/dbmessage.h" #include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/write_concern_options.h" #include "mongo/logv2/log_severity.h" #include "mongo/platform/atomic_word.h" @@ -53,6 +72,8 @@ #include "mongo/transport/message_compressor_manager.h" #include "mongo/transport/session.h" #include "mongo/transport/transport_layer.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/ssl_types.h" #include "mongo/util/str.h" namespace mongo { @@ -496,7 +517,7 @@ class DBClientBase { * Used by QueryOption_Exhaust. To use that your subclass must implement this. */ virtual Status recv(Message& m, int lastRequestId) { - verify(false); + MONGO_verify(false); return {ErrorCodes::NotImplemented, "recv() not implemented"}; } diff --git a/src/mongo/client/dbclient_connection.cpp b/src/mongo/client/dbclient_connection.cpp index b35954c14d47a..3034e8edbafdc 100644 --- a/src/mongo/client/dbclient_connection.cpp +++ b/src/mongo/client/dbclient_connection.cpp @@ -32,52 +32,64 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/client/dbclient_connection.h" - -#include +#include +#include +#include #include #include +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/client/authenticate.h" -#include "mongo/client/dbclient_cursor.h" +#include "mongo/client/dbclient_connection.h" #include "mongo/client/replica_set_monitor.h" #include "mongo/client/sasl_client_authenticate.h" #include "mongo/client/sasl_client_session.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/sasl_command_constants.h" #include "mongo/db/auth/user_name.h" -#include "mongo/db/client.h" -#include "mongo/db/commands.h" #include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/json.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/query/kill_cursors_gen.h" #include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/wire_version.h" -#include "mongo/executor/remote_command_request.h" #include "mongo/executor/remote_command_response.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/platform/mutex.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/metadata/client_metadata.h" -#include "mongo/s/stale_exception.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/transport/transport_layer.h" #include "mongo/util/assert_util.h" -#include "mongo/util/concurrency/mutex.h" -#include "mongo/util/debug_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" #include "mongo/util/net/socket_exception.h" #include "mongo/util/net/socket_utils.h" #include "mongo/util/net/ssl_manager.h" #include "mongo/util/net/ssl_options.h" #include "mongo/util/net/ssl_peer_info.h" -#include "mongo/util/password_digest.h" -#include "mongo/util/testing_proctor.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #include "mongo/util/time_support.h" #include "mongo/util/version.h" @@ -99,28 +111,27 @@ StatusWith completeSpeculativeAuth(DBClientConnection* conn, auth::SpeculativeAuthType speculativeAuthType, std::shared_ptr session, const MongoURI& uri, - BSONObj isMaster) { - auto specAuthElem = isMaster[auth::kSpeculativeAuthenticate]; + BSONObj helloReply) { + auto specAuthElem = helloReply[auth::kSpeculativeAuthenticate]; if (specAuthElem.eoo()) { return false; } if (speculativeAuthType == auth::SpeculativeAuthType::kNone) { return {ErrorCodes::BadValue, - str::stream() << "Unexpected isMaster." << auth::kSpeculativeAuthenticate - << " reply"}; + str::stream() << "Unexpected hello." << auth::kSpeculativeAuthenticate << " reply"}; } if (specAuthElem.type() != Object) { return {ErrorCodes::BadValue, - str::stream() << "isMaster." << auth::kSpeculativeAuthenticate + str::stream() << "hello." << auth::kSpeculativeAuthenticate << " reply must be an object"}; } auto specAuth = specAuthElem.Obj(); if (specAuth.isEmpty()) { return {ErrorCodes::BadValue, - str::stream() << "isMaster." << auth::kSpeculativeAuthenticate + str::stream() << "hello." << auth::kSpeculativeAuthenticate << " reply must be a non-empty obejct"}; } @@ -154,7 +165,7 @@ StatusWith completeSpeculativeAuth(DBClientConnection* conn, } /** - * Initializes the wire version of conn, and returns the isMaster reply. + * Initializes the wire version of conn, and returns the "hello" reply. */ executor::RemoteCommandResponse initWireVersion( DBClientConnection* conn, @@ -165,7 +176,7 @@ executor::RemoteCommandResponse initWireVersion( std::shared_ptr* saslClientSession) try { BSONObjBuilder bob; - bob.append(conn->getApiParameters().getVersion() ? "hello" : "isMaster", 1); + bob.append("hello", 1); if (uri.isHelloOk()) { // Attach "helloOk: true" to the initial handshake to indicate that the client supports the @@ -185,7 +196,7 @@ executor::RemoteCommandResponse initWireVersion( } if (getTestCommandsEnabled()) { - // Only include the host:port of this process in the isMaster command request if test + // Only include the host:port of this process in the "hello" command request if test // commands are enabled. mongobridge uses this field to identify the process opening a // connection to it. StringBuilder sb; @@ -211,25 +222,24 @@ executor::RemoteCommandResponse initWireVersion( auto result = conn->runCommand(OpMsgRequest::fromDBAndBody("admin", bob.obj())); Date_t finish{Date_t::now()}; - BSONObj isMasterObj = result->getCommandReply().getOwned(); + BSONObj helloObj = result->getCommandReply().getOwned(); - auto replyWireVersion = wire_version::parseWireVersionFromHelloReply(isMasterObj); + auto replyWireVersion = wire_version::parseWireVersionFromHelloReply(helloObj); if (replyWireVersion.isOK()) { conn->setWireVersions(replyWireVersion.getValue().minWireVersion, replyWireVersion.getValue().maxWireVersion); } - if (isMasterObj.hasField("saslSupportedMechs") && - isMasterObj["saslSupportedMechs"].type() == Array) { - auto array = isMasterObj["saslSupportedMechs"].Array(); + if (helloObj.hasField("saslSupportedMechs") && helloObj["saslSupportedMechs"].type() == Array) { + auto array = helloObj["saslSupportedMechs"].Array(); for (const auto& elem : array) { saslMechsForAuth->push_back(elem.checkAndGetStringData().toString()); } } - conn->getCompressorManager().clientFinish(isMasterObj); + conn->getCompressorManager().clientFinish(helloObj); - return executor::RemoteCommandResponse{std::move(isMasterObj), finish - start}; + return executor::RemoteCommandResponse{std::move(helloObj), finish - start}; } catch (...) { return exceptionToStatus(); @@ -287,41 +297,41 @@ Status DBClientConnection::connect(const HostAndPort& serverAddress, auto speculativeAuthType = auth::SpeculativeAuthType::kNone; std::shared_ptr saslClientSession; - auto swIsMasterReply = initWireVersion( + auto swHelloReply = initWireVersion( this, _applicationName, _uri, &_saslMechsForAuth, &speculativeAuthType, &saslClientSession); - if (!swIsMasterReply.isOK()) { + if (!swHelloReply.isOK()) { _markFailed(kSetFlag); - swIsMasterReply.status.addContext( + swHelloReply.status.addContext( "Connection handshake failed. Is your mongod/mongos 3.4 or older?"_sd); - return swIsMasterReply.status; + return swHelloReply.status; } - // Ensure that the isMaster response is "ok:1". - auto isMasterStatus = getStatusFromCommandResult(swIsMasterReply.data); - if (!isMasterStatus.isOK()) { - return isMasterStatus; + // Ensure that the "hello" response is "ok:1". + auto helloStatus = getStatusFromCommandResult(swHelloReply.data); + if (!helloStatus.isOK()) { + return helloStatus; } - auto replyWireVersion = wire_version::parseWireVersionFromHelloReply(swIsMasterReply.data); + auto replyWireVersion = wire_version::parseWireVersionFromHelloReply(swHelloReply.data); if (!replyWireVersion.isOK()) { return replyWireVersion.getStatus(); } { // The Server Discovery and Monitoring (SDAM) specification identifies a replica set member - // as either (a) having a "setName" field in the isMaster response, or (b) having - // "isreplicaset: true" in the isMaster response. + // as either (a) having a "setName" field in the "hello" response, or (b) having + // "isreplicaset: true" in the "hello" response. // // https://github.com/mongodb/specifications/blob/c386e23724318e2fa82f4f7663d77581b755b2c3/ // source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#type - const bool hasSetNameField = swIsMasterReply.data.hasField("setName"); - const bool isReplicaSetField = swIsMasterReply.data.getBoolField("isreplicaset"); + const bool hasSetNameField = swHelloReply.data.hasField("setName"); + const bool isReplicaSetField = swHelloReply.data.getBoolField("isreplicaset"); _isReplicaSetMember = hasSetNameField || isReplicaSetField; } { std::string msgField; - auto msgFieldExtractStatus = bsonExtractStringField(swIsMasterReply.data, "msg", &msgField); + auto msgFieldExtractStatus = bsonExtractStringField(swHelloReply.data, "msg", &msgField); if (msgFieldExtractStatus == ErrorCodes::NoSuchKey) { _isMongos = false; @@ -345,7 +355,7 @@ Status DBClientConnection::connect(const HostAndPort& serverAddress, } if (_hook) { - auto validationStatus = _hook(swIsMasterReply); + auto validationStatus = _hook(swHelloReply); if (!validationStatus.isOK()) { // Disconnect and mark failed. _markFailed(kReleaseSession); @@ -355,7 +365,7 @@ Status DBClientConnection::connect(const HostAndPort& serverAddress, { auto swAuth = completeSpeculativeAuth( - this, speculativeAuthType, saslClientSession, _uri, swIsMasterReply.data); + this, speculativeAuthType, saslClientSession, _uri, swHelloReply.data); if (!swAuth.isOK()) { return swAuth.getStatus(); } @@ -435,7 +445,7 @@ Status DBClientConnection::connectSocketOnly( void DBClientConnection::logout(const string& dbname, BSONObj& info) { authCache.erase(dbname); _internalAuthOnReconnect = false; - runCommand(DatabaseName(boost::none, dbname), BSON("logout" << 1), info); + runCommand(DatabaseNameUtil::deserialize(boost::none, dbname), BSON("logout" << 1), info); } std::pair DBClientConnection::runCommandWithTarget( @@ -560,7 +570,6 @@ void DBClientConnection::_checkConnection() { "Trying to reconnect to {connString}", "Trying to reconnect", "connString"_attr = toString()); - string errmsg; auto connectStatus = connect(_serverAddress, _applicationName, _transientSSLParams); if (!connectStatus.isOK()) { @@ -570,7 +579,7 @@ void DBClientConnection::_checkConnection() { "Reconnect attempt to {connString} failed: {reason}", "Reconnect attempt failed", "connString"_attr = toString(), - "error"_attr = errmsg); + "error"_attr = connectStatus); if (connectStatus == ErrorCodes::IncompatibleCatalogManager) { uassertStatusOK(connectStatus); // Will always throw } else { diff --git a/src/mongo/client/dbclient_connection.h b/src/mongo/client/dbclient_connection.h index ff939eec0a660..37e0508ceda5b 100644 --- a/src/mongo/client/dbclient_connection.h +++ b/src/mongo/client/dbclient_connection.h @@ -29,19 +29,32 @@ #pragma once +#include +#include #include #include +#include +#include +#include +#include +#include +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/authenticate.h" +#include "mongo/client/client_api_version_parameters_gen.h" #include "mongo/client/connection_string.h" #include "mongo/client/dbclient_base.h" #include "mongo/client/index_spec.h" #include "mongo/client/mongo_uri.h" #include "mongo/client/read_preference.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/dbmessage.h" #include "mongo/db/jsobj.h" #include "mongo/db/write_concern_options.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/logv2/log_severity.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" @@ -52,8 +65,13 @@ #include "mongo/transport/message_compressor_manager.h" #include "mongo/transport/session.h" #include "mongo/transport/transport_layer.h" +#include "mongo/util/duration.h" #include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_options.h" +#include "mongo/util/net/ssl_types.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -74,13 +92,13 @@ class DBClientCursor; class DBClientConnection : public DBClientBase { public: /** - * A hook used to validate the reply of an 'isMaster' command during connection. If the hook + * A hook used to validate the reply of a "hello" command during connection. If the hook * returns a non-OK Status, the DBClientConnection object will disconnect from the remote * server. This function must not throw - it can only indicate failure by returning a non-OK * status. */ using HandshakeValidationHook = - std::function; + std::function; /** @param _autoReconnect if true, automatically reconnect on a connection failure @@ -123,8 +141,8 @@ class DBClientConnection : public DBClientBase { boost::optional transientSSLParams); /** - * This version of connect does not run 'isMaster' after creating a TCP connection to the - * remote host. This method should be used only when calling 'isMaster' would create a deadlock, + * This version of connect does not run "hello" after creating a TCP connection to the + * remote host. This method should be used only when calling "hello" would create a deadlock, * such as in 'isSelf'. * * @param server The server to connect to. diff --git a/src/mongo/client/dbclient_connection_integration_test.cpp b/src/mongo/client/dbclient_connection_integration_test.cpp index 65b3311583bde..7dbbdb2d10650 100644 --- a/src/mongo/client/dbclient_connection_integration_test.cpp +++ b/src/mongo/client/dbclient_connection_integration_test.cpp @@ -27,14 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/dbclient_base.h" #include "mongo/client/dbclient_connection.h" - -#include "mongo/base/checked_cast.h" +#include "mongo/db/database_name.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/integration_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -58,7 +73,7 @@ class DBClientConnectionFixture : public unittest::Test { auto conn = makeConn(kAppName + "-cleanup"); BSONObj currOp; - if (!conn->runCommand({boost::none, "admin"}, BSON("currentOp" << 1), currOp)) + if (!conn->runCommand(DatabaseName::kAdmin, BSON("currentOp" << 1), currOp)) uassertStatusOK(getStatusFromCommandResult(currOp)); for (auto&& op : currOp["inprog"].Obj()) { @@ -75,7 +90,7 @@ class DBClientConnectionFixture : public unittest::Test { // Ignore failures to clean up. BSONObj ignored; (void)conn->runCommand( - {boost::none, "admin"}, BSON("killOp" << 1 << "op" << op["opid"]), ignored); + DatabaseName::kAdmin, BSON("killOp" << 1 << "op" << op["opid"]), ignored); } } }; @@ -86,7 +101,7 @@ TEST_F(DBClientConnectionFixture, shutdownWorksIfCalledFirst) { conn->shutdownAndDisallowReconnect(); BSONObj reply; - ASSERT_THROWS(conn->runCommand({boost::none, "admin"}, sleepCmd, reply), + ASSERT_THROWS(conn->runCommand(DatabaseName::kAdmin, sleepCmd, reply), ExceptionForCat); // Currently SocketException. } @@ -101,7 +116,7 @@ TEST_F(DBClientConnectionFixture, shutdownWorksIfRunCommandInProgress) { ON_BLOCK_EXIT([&] { shutdownThread.join(); }); BSONObj reply; - ASSERT_THROWS(conn->runCommand({boost::none, "admin"}, sleepCmd, reply), + ASSERT_THROWS(conn->runCommand(DatabaseName::kAdmin, sleepCmd, reply), ExceptionForCat); // Currently HostUnreachable. } diff --git a/src/mongo/client/dbclient_cursor.cpp b/src/mongo/client/dbclient_cursor.cpp index 333a2988814c3..b7711fa9e9daa 100644 --- a/src/mongo/client/dbclient_cursor.cpp +++ b/src/mongo/client/dbclient_cursor.cpp @@ -29,23 +29,43 @@ #include "mongo/client/dbclient_cursor.h" +#include +#include +#include #include - +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/connection_string.h" #include "mongo/client/connpool.h" +#include "mongo/client/dbclient_base.h" #include "mongo/db/client.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbmessage.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/query/cursor_response.h" #include "mongo/db/query/getmore_command_gen.h" -#include "mongo/db/query/query_request_helper.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/factory.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/metadata.h" -#include "mongo/s/stale_exception.h" -#include "mongo/util/debug_util.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/util/assert_util.h" #include "mongo/util/destructor_guard.h" #include "mongo/util/exit.h" #include "mongo/util/scopeguard.h" @@ -124,7 +144,7 @@ Message DBClientCursor::assembleGetMore() { bool DBClientCursor::init() { invariant(!_connectionHasPendingReplies); Message toSend = assembleInit(); - verify(_client); + MONGO_verify(_client); Message reply; try { _client->call(toSend, reply, &_originalHost); @@ -152,7 +172,7 @@ void DBClientCursor::requestMore() { } invariant(!_connectionHasPendingReplies); - verify(_cursorId && _batch.pos == _batch.objs.size()); + MONGO_verify(_cursorId && _batch.pos == _batch.objs.size()); auto doRequestMore = [&] { Message toSend = assembleGetMore(); @@ -176,10 +196,10 @@ void DBClientCursor::requestMore() { * cursor id of 0. */ void DBClientCursor::exhaustReceiveMore() { - verify(_cursorId); - verify(_batch.pos == _batch.objs.size()); + MONGO_verify(_cursorId); + MONGO_verify(_batch.pos == _batch.objs.size()); Message response; - verify(_client); + MONGO_verify(_client); uassertStatusOK( _client->recv(response, _lastRequestId).withContext("recv failed while exhausting cursor")); dataReceived(response); @@ -300,11 +320,11 @@ bool DBClientCursor::peekError(BSONObj* error) { vector v; peek(v, 1); - verify(v.size() == 1); + MONGO_verify(v.size() == 1); // We check both the legacy error format, and the new error format. hasErrField checks for // $err, and getStatusFromCommandResult checks for modern errors of the form '{ok: 0.0, code: // <...>, errmsg: ...}'. - verify(hasErrField(v[0]) || !getStatusFromCommandResult(v[0]).isOK()); + MONGO_verify(hasErrField(v[0]) || !getStatusFromCommandResult(v[0]).isOK()); if (error) *error = v[0].getOwned(); @@ -312,9 +332,9 @@ bool DBClientCursor::peekError(BSONObj* error) { } void DBClientCursor::attach(AScopedConnection* conn) { - verify(_scopedHost.size() == 0); - verify(conn); - verify(conn->get()); + MONGO_verify(_scopedHost.size() == 0); + MONGO_verify(conn); + MONGO_verify(conn->get()); if (conn->get()->type() == ConnectionString::ConnectionType::kReplicaSet) { if (_client) @@ -341,7 +361,7 @@ DBClientCursor::DBClientCursor(DBClientBase* client, _originalHost(_client->getServerAddress()), _nsOrUuid(nsOrUuid), _isInitialized(true), - _ns(nsOrUuid.nss() ? *nsOrUuid.nss() : NamespaceString(nsOrUuid.dbName().value())), + _ns(nsOrUuid.isNamespaceString() ? nsOrUuid.nss() : NamespaceString{nsOrUuid.dbName()}), _cursorId(cursorId), _isExhaust(isExhaust), _operationTime(operationTime), @@ -354,7 +374,7 @@ DBClientCursor::DBClientCursor(DBClientBase* client, : _client(client), _originalHost(_client->getServerAddress()), _nsOrUuid(findRequest.getNamespaceOrUUID()), - _ns(_nsOrUuid.nss() ? *_nsOrUuid.nss() : NamespaceString(_nsOrUuid.dbName().value())), + _ns(_nsOrUuid.isNamespaceString() ? _nsOrUuid.nss() : NamespaceString{_nsOrUuid.dbName()}), _batchSize(findRequest.getBatchSize().value_or(0)), _findRequest(std::move(findRequest)), _readPref(readPref), diff --git a/src/mongo/client/dbclient_cursor.h b/src/mongo/client/dbclient_cursor.h index c5cfd1484bb55..5f07c97093071 100644 --- a/src/mongo/client/dbclient_cursor.h +++ b/src/mongo/client/dbclient_cursor.h @@ -29,18 +29,35 @@ #pragma once +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/read_preference.h" +#include "mongo/db/basic_types.h" #include "mongo/db/dbmessage.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/optime.h" #include "mongo/rpc/message.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" namespace mongo { class AScopedConnection; class DBClientBase; + class AggregateCommandRequest; /** @@ -190,10 +207,6 @@ class DBClientCursor { return _originalHost; } - std::string getns() const { - return _ns.ns(); - } - const NamespaceString& getNamespaceString() const { return _ns; } diff --git a/src/mongo/client/dbclient_cursor_test.cpp b/src/mongo/client/dbclient_cursor_test.cpp index e9b0adaf20368..b5274c4040f17 100644 --- a/src/mongo/client/dbclient_cursor_test.cpp +++ b/src/mongo/client/dbclient_cursor_test.cpp @@ -28,13 +28,34 @@ */ +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" #include "mongo/client/dbclient_connection.h" #include "mongo/client/dbclient_cursor.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/client/dbclient_mockcursor.cpp b/src/mongo/client/dbclient_mockcursor.cpp index 0e33d4360d18c..693c1c2cc5b03 100644 --- a/src/mongo/client/dbclient_mockcursor.cpp +++ b/src/mongo/client/dbclient_mockcursor.cpp @@ -27,12 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/client/dbclient_mockcursor.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/dbclient_cursor.h" +#include "mongo/client/dbclient_mockcursor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/client/dbclient_mockcursor.h b/src/mongo/client/dbclient_mockcursor.h index 7430a1aa3cbc0..9ec20766db0df 100644 --- a/src/mongo/client/dbclient_mockcursor.h +++ b/src/mongo/client/dbclient_mockcursor.h @@ -29,9 +29,12 @@ #pragma once -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/bson/bsonobj.h" #include "mongo/client/dbclient_cursor.h" +#include "mongo/platform/basic.h" namespace mongo { diff --git a/src/mongo/client/dbclient_rs.cpp b/src/mongo/client/dbclient_rs.cpp index 3257ee82712bc..098dcdc203f60 100644 --- a/src/mongo/client/dbclient_rs.cpp +++ b/src/mongo/client/dbclient_rs.cpp @@ -29,20 +29,39 @@ #include "mongo/client/dbclient_rs.h" +#include +#include +#include +#include #include +#include +#include +#include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/client/connpool.h" #include "mongo/client/dbclient_cursor.h" #include "mongo/client/global_conn_pool.h" +#include "mongo/client/internal_auth.h" #include "mongo/client/read_preference.h" #include "mongo/client/replica_set_monitor.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/sasl_command_constants.h" -#include "mongo/db/dbmessage.h" -#include "mongo/db/jsobj.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork @@ -211,14 +230,14 @@ bool _isSecondaryCommand(StringData commandName, const BSONObj& commandArgs) { } // Internal implementation of isSecondaryQuery, takes previously-parsed read preference -bool _isSecondaryQuery(const string& ns, +bool _isSecondaryQuery(const NamespaceString& ns, const BSONObj& filter, const ReadPreferenceSetting& readPref) { // If the read pref is primary only, this is not a secondary query if (readPref.pref == ReadPreference::PrimaryOnly) return false; - if (ns.find(".$cmd") == string::npos) { + if (ns.coll().find("$cmd") == string::npos) { return true; } @@ -470,7 +489,7 @@ void DBClientReplicaSet::logout(const string& dbname, BSONObj& info) { _lastSecondaryOkConn->logout(dbname, dummy); } catch (const DBException&) { // Make sure we can't use this connection again. - verify(_lastSecondaryOkConn->isFailed()); + MONGO_verify(_lastSecondaryOkConn->isFailed()); } } } @@ -501,9 +520,9 @@ void DBClientReplicaSet::remove(const NamespaceString& nss, std::unique_ptr DBClientReplicaSet::find(FindCommandRequest findRequest, const ReadPreferenceSetting& readPref, ExhaustMode exhaustMode) { - invariant(findRequest.getNamespaceOrUUID().nss()); - const std::string nss = findRequest.getNamespaceOrUUID().nss()->ns(); - if (_isSecondaryQuery(nss, findRequest.toBSON(BSONObj{}), readPref)) { + invariant(findRequest.getNamespaceOrUUID().isNamespaceString()); + if (_isSecondaryQuery( + findRequest.getNamespaceOrUUID().nss(), findRequest.toBSON(BSONObj{}), readPref)) { LOGV2_DEBUG(5951202, 3, "dbclient_rs query using secondary or tagged node selection", @@ -558,7 +577,7 @@ void DBClientReplicaSet::killCursor(const NamespaceString& ns, long long cursorI // since we don't know which server it belongs to // can't assume primary because of secondary ok // and can have a cursor survive a primary change - verify(0); + MONGO_verify(0); } void DBClientReplicaSet::isNotPrimary() { @@ -714,7 +733,7 @@ void DBClientReplicaSet::say(Message& toSend, bool isRetry, string* actualServer } Status DBClientReplicaSet::recv(Message& m, int lastRequestId) { - verify(_lastClient); + MONGO_verify(_lastClient); try { return _lastClient->recv(m, lastRequestId); diff --git a/src/mongo/client/dbclient_rs.h b/src/mongo/client/dbclient_rs.h index 32cc8f241860c..c872c8f1bd5c2 100644 --- a/src/mongo/client/dbclient_rs.h +++ b/src/mongo/client/dbclient_rs.h @@ -33,18 +33,41 @@ * Connect to a Replica Set, from C++. */ +#include +#include +#include +#include +#include #include - +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/authenticate.h" +#include "mongo/client/client_api_version_parameters_gen.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/dbclient_base.h" #include "mongo/client/dbclient_connection.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/client/mongo_uri.h" -#include "mongo/config.h" +#include "mongo/client/read_preference.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/find_command.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/metadata.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/unique_message.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_types.h" namespace mongo { class ReplicaSetMonitor; class TagSet; struct ReadPreferenceSetting; + typedef std::shared_ptr ReplicaSetMonitorPtr; /** Use this class to connect to a replica set of servers. The class will manage diff --git a/src/mongo/client/dbclient_rs_test.cpp b/src/mongo/client/dbclient_rs_test.cpp index 9a7f3cc07835c..fa986ec306450 100644 --- a/src/mongo/client/dbclient_rs_test.cpp +++ b/src/mongo/client/dbclient_rs_test.cpp @@ -32,27 +32,39 @@ * the DBClientReplicaSet talks to, so the tests only covers the client side logic. */ -#include "mongo/platform/basic.h" - #include #include #include #include -#include "mongo/base/init.h" +#include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" + +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/connpool.h" #include "mongo/client/dbclient_rs.h" #include "mongo/client/replica_set_monitor.h" -#include "mongo/client/replica_set_monitor_protocol_test_util.h" +#include "mongo/client/sdam/mock_topology_manager.h" #include "mongo/client/streamable_replica_set_monitor_for_testing.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/service_context.h" #include "mongo/dbtests/mock/mock_conn_registry.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" #include "mongo/dbtests/mock/mock_replica_set.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/rpc/reply_interface.h" #include "mongo/stdx/unordered_set.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" namespace mongo { diff --git a/src/mongo/client/fetcher.cpp b/src/mongo/client/fetcher.cpp index 868ffb90a9786..65379f9140d7d 100644 --- a/src/mongo/client/fetcher.cpp +++ b/src/mongo/client/fetcher.cpp @@ -27,16 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/client/fetcher.h" - +#include +#include #include #include -#include "mongo/db/jsobj.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/dbclient_base.h" +#include "mongo/client/fetcher.h" #include "mongo/db/namespace_string.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/util/assert_util.h" #include "mongo/util/destructor_guard.h" @@ -167,7 +176,7 @@ Status parseCursorResponse(const BSONObj& obj, Fetcher::Fetcher(executor::TaskExecutor* executor, const HostAndPort& source, - const std::string& dbname, + StringData dbname, const BSONObj& findCmdObj, CallbackFn work, const BSONObj& metadata, diff --git a/src/mongo/client/fetcher.h b/src/mongo/client/fetcher.h index 90ba61b00fca4..8109b010b290d 100644 --- a/src/mongo/client/fetcher.h +++ b/src/mongo/client/fetcher.h @@ -29,21 +29,35 @@ #pragma once +#include +#include +#include #include #include #include #include +#include #include #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/client/remote_command_retry_scheduler.h" #include "mongo/db/clientcursor.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/namespace_string.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/interruptible.h" #include "mongo/util/net/hostandport.h" namespace mongo { @@ -130,7 +144,7 @@ class Fetcher { */ Fetcher(executor::TaskExecutor* executor, const HostAndPort& source, - const std::string& dbname, + StringData dbname, const BSONObj& cmdObj, CallbackFn work, const BSONObj& metadata = ReadPreferenceSetting::secondaryPreferredMetadata(), diff --git a/src/mongo/client/fetcher_test.cpp b/src/mongo/client/fetcher_test.cpp index 37ec7f5f0e7fe..c85ff896abb9d 100644 --- a/src/mongo/client/fetcher_test.cpp +++ b/src/mongo/client/fetcher_test.cpp @@ -27,19 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include +#include + +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/client/fetcher.h" -#include "mongo/db/jsobj.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_test_fixture.h" +#include "mongo/executor/thread_pool_mock.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" #include "mongo/rpc/metadata.h" +#include "mongo/stdx/thread.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/future_test_utils.h" -#include "mongo/unittest/unittest.h" - namespace { using namespace mongo; @@ -271,7 +284,7 @@ TEST_F(FetcherTest, FetcherCompletionFutureBecomesReadyAfterCompletingWork) { FetcherState::kInactive); ASSERT_OK(status); ASSERT_EQUALS(0, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc, documents.front()); @@ -304,7 +317,7 @@ TEST_F(FetcherTest, FetcherCompletionFutureBecomesReadyEvenWhenWorkIsInterrupted FetcherState::kInactive); ASSERT_OK(status); ASSERT_EQUALS(0, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc, documents.front()); @@ -675,7 +688,7 @@ TEST_F(FetcherTest, FirstBatchEmptyArray) { FetcherState::kInactive); ASSERT_OK(status); ASSERT_EQUALS(0, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_TRUE(documents.empty()); } @@ -690,7 +703,7 @@ TEST_F(FetcherTest, FetchOneDocument) { FetcherState::kInactive); ASSERT_OK(status); ASSERT_EQUALS(0, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc, documents.front()); } @@ -716,7 +729,7 @@ TEST_F(FetcherTest, SetNextActionToContinueWhenNextBatchIsNotAvailable) { FetcherState::kInactive); ASSERT_OK(status); ASSERT_EQUALS(0, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc, documents.front()); } @@ -749,7 +762,7 @@ TEST_F(FetcherTest, FetchMultipleBatches) { ASSERT_OK(status); ASSERT_EQUALS(1LL, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc, documents.front()); ASSERT_EQUALS(elapsedMillis, Milliseconds(100)); @@ -768,7 +781,7 @@ TEST_F(FetcherTest, FetchMultipleBatches) { ASSERT_OK(status); ASSERT_EQUALS(1LL, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc2, documents.front()); ASSERT_EQUALS(elapsedMillis, Milliseconds(200)); @@ -787,7 +800,7 @@ TEST_F(FetcherTest, FetchMultipleBatches) { ASSERT_OK(status); ASSERT_EQUALS(0, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc3, documents.front()); ASSERT_EQUALS(elapsedMillis, Milliseconds(300)); @@ -811,7 +824,7 @@ TEST_F(FetcherTest, ScheduleGetMoreAndCancel) { ASSERT_OK(status); ASSERT_EQUALS(1LL, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc, documents.front()); ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction); @@ -826,7 +839,7 @@ TEST_F(FetcherTest, ScheduleGetMoreAndCancel) { ASSERT_OK(status); ASSERT_EQUALS(1LL, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc2, documents.front()); ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction); @@ -889,7 +902,7 @@ TEST_F(FetcherTest, ScheduleGetMoreButShutdown) { ASSERT_OK(status); ASSERT_EQUALS(1LL, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc, documents.front()); ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction); @@ -905,7 +918,7 @@ TEST_F(FetcherTest, ScheduleGetMoreButShutdown) { ASSERT_OK(status); ASSERT_EQUALS(1LL, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc2, documents.front()); ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction); @@ -943,7 +956,7 @@ TEST_F(FetcherTest, EmptyGetMoreRequestAfterFirstBatchMakesFetcherInactiveAndKil ASSERT_OK(status); ASSERT_EQUALS(1LL, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc, documents.front()); ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction); @@ -957,7 +970,7 @@ TEST_F(FetcherTest, EmptyGetMoreRequestAfterFirstBatchMakesFetcherInactiveAndKil request = noi->getRequest(); } - ASSERT_EQUALS(nss.db(), request.dbname); + ASSERT_EQUALS(nss.db_forTest(), request.dbname); auto&& cmdObj = request.cmdObj; auto firstElement = cmdObj.firstElement(); ASSERT_EQUALS("killCursors", firstElement.fieldNameStringData()); @@ -1002,7 +1015,7 @@ TEST_F(FetcherTest, UpdateNextActionAfterSecondBatch) { ASSERT_OK(status); ASSERT_EQUALS(1LL, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc, documents.front()); ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction); @@ -1020,7 +1033,7 @@ TEST_F(FetcherTest, UpdateNextActionAfterSecondBatch) { ASSERT_OK(status); ASSERT_EQUALS(1LL, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc2, documents.front()); ASSERT_TRUE(Fetcher::NextAction::kNoAction == nextAction); @@ -1032,7 +1045,7 @@ TEST_F(FetcherTest, UpdateNextActionAfterSecondBatch) { auto noi = net->getNextReadyRequest(); auto request = noi->getRequest(); - ASSERT_EQUALS(nss.db(), request.dbname); + ASSERT_EQUALS(nss.db_forTest(), request.dbname); auto&& cmdObj = request.cmdObj; auto firstElement = cmdObj.firstElement(); ASSERT_EQUALS("killCursors", firstElement.fieldNameStringData()); @@ -1100,7 +1113,7 @@ TEST_F(FetcherTest, ShutdownDuringSecondBatch) { ASSERT_OK(status); ASSERT_EQUALS(1LL, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc, documents.front()); ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction); @@ -1163,7 +1176,7 @@ TEST_F(FetcherTest, FetcherAppliesRetryPolicyToFirstCommandButNotToGetMoreReques FetcherState::kActive); ASSERT_OK(status); ASSERT_EQUALS(1LL, cursorId); - ASSERT_EQUALS("db.coll", nss.ns()); + ASSERT_EQUALS("db.coll", nss.ns_forTest()); ASSERT_EQUALS(1U, documents.size()); ASSERT_BSONOBJ_EQ(doc, documents.front()); ASSERT_TRUE(Fetcher::NextAction::kGetMore == nextAction); diff --git a/src/mongo/client/global_conn_pool.cpp b/src/mongo/client/global_conn_pool.cpp index 0533c1a760ca7..053a20a403ba6 100644 --- a/src/mongo/client/global_conn_pool.cpp +++ b/src/mongo/client/global_conn_pool.cpp @@ -27,11 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/client/global_conn_pool.h" - -#include "mongo/base/init.h" #include "mongo/client/global_conn_pool_gen.h" namespace mongo { diff --git a/src/mongo/client/index_spec.cpp b/src/mongo/client/index_spec.cpp index 5cc1348ef7e04..4c97b3ea342c3 100644 --- a/src/mongo/client/index_spec.cpp +++ b/src/mongo/client/index_spec.cpp @@ -27,12 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/client/index_spec.h" - +#include "mongo/base/error_codes.h" #include "mongo/client/dbclient_base.h" -#include "mongo/client/read_preference.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/client/index_spec.h b/src/mongo/client/index_spec.h index 62180daadff78..8c33378079e5c 100644 --- a/src/mongo/client/index_spec.h +++ b/src/mongo/client/index_spec.h @@ -33,6 +33,10 @@ #include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/jsobj.h" namespace mongo { diff --git a/src/mongo/client/index_spec_test.cpp b/src/mongo/client/index_spec_test.cpp index 4c574a77087b3..b881a58b790ba 100644 --- a/src/mongo/client/index_spec_test.cpp +++ b/src/mongo/client/index_spec_test.cpp @@ -27,11 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/bson/bsonmisc.h" #include "mongo/client/index_spec.h" - -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define ASSERT_UASSERTS(STATEMENT) ASSERT_THROWS(STATEMENT, AssertionException) diff --git a/src/mongo/client/internal_auth.cpp b/src/mongo/client/internal_auth.cpp index f016f3a4f06a8..ab5c8db606277 100644 --- a/src/mongo/client/internal_auth.cpp +++ b/src/mongo/client/internal_auth.cpp @@ -27,17 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/authenticate.h" #include "mongo/client/internal_auth.h" - -#include "mongo/bson/json.h" -#include "mongo/client/sasl_client_authenticate.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/sasl_command_constants.h" +#include "mongo/db/auth/user.h" +#include "mongo/db/auth/user_name.h" #include "mongo/platform/mutex.h" +#include "mongo/util/assert_util.h" #include "mongo/util/password_digest.h" +#include "mongo/util/read_through_cache.h" +#include "mongo/util/str.h" namespace mongo { namespace auth { diff --git a/src/mongo/client/internal_auth.h b/src/mongo/client/internal_auth.h index 2ba33c9e136a0..37085bbdca241 100644 --- a/src/mongo/client/internal_auth.h +++ b/src/mongo/client/internal_auth.h @@ -29,10 +29,15 @@ #pragma once +#include +#include +#include #include #include #include +#include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" namespace mongo { diff --git a/src/mongo/client/mongo_uri.cpp b/src/mongo/client/mongo_uri.cpp index 786b7ea3c7066..d837b1f250e43 100644 --- a/src/mongo/client/mongo_uri.cpp +++ b/src/mongo/client/mongo_uri.cpp @@ -27,22 +27,42 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/client/mongo_uri.h" - -#include - #include #include +#include #include -#include #include #include +// IWYU pragma: no_include "boost/algorithm/string/detail/classification.hpp" +// IWYU pragma: no_include "boost/algorithm/string/detail/finder.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/client/sasl_client_authenticate.h" +#include "mongo/bson/util/builder.h" +#include "mongo/client/authenticate.h" +#include "mongo/client/mongo_uri.h" #include "mongo/db/auth/sasl_command_constants.h" #include "mongo/db/namespace_string.h" #include "mongo/stdx/utility.h" diff --git a/src/mongo/client/mongo_uri.h b/src/mongo/client/mongo_uri.h index 741ddf869c8d2..bd3c3e3bded20 100644 --- a/src/mongo/client/mongo_uri.h +++ b/src/mongo/client/mongo_uri.h @@ -29,15 +29,23 @@ #pragma once +#include +#include +#include +#include +#include +#include #include #include #include +#include #include #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/client/connection_string.h" #include "mongo/platform/mutex.h" #include "mongo/transport/transport_layer.h" diff --git a/src/mongo/client/mongo_uri_connect.cpp b/src/mongo/client/mongo_uri_connect.cpp index 1ac571a7e30ca..d379e695afa26 100644 --- a/src/mongo/client/mongo_uri_connect.cpp +++ b/src/mongo/client/mongo_uri_connect.cpp @@ -27,12 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/client/mongo_uri.h" +#include +#include -#include "mongo/client/authenticate.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/client/client_api_version_parameters_gen.h" +#include "mongo/client/connection_string.h" #include "mongo/client/dbclient_base.h" +#include "mongo/client/mongo_uri.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/client/mongo_uri_test.cpp b/src/mongo/client/mongo_uri_test.cpp index 2555121ae0fde..3f141808f0cfb 100644 --- a/src/mongo/client/mongo_uri_test.cpp +++ b/src/mongo/client/mongo_uri_test.cpp @@ -28,22 +28,34 @@ */ -#include "mongo/platform/basic.h" - -#include - +#include +#include +#include +#include +#include // IWYU pragma: keep +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" #include "mongo/client/mongo_uri.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/unittest/unittest.h" - #include "mongo/logv2/log.h" -#include -#include +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/client/native_sasl_client_session.cpp b/src/mongo/client/native_sasl_client_session.cpp index 172a950c3cccc..1129c889a12e4 100644 --- a/src/mongo/client/native_sasl_client_session.cpp +++ b/src/mongo/client/native_sasl_client_session.cpp @@ -27,21 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/client/native_sasl_client_session.h" - -#include "mongo/base/init.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/authenticate.h" +#include "mongo/client/native_sasl_client_session.h" #include "mongo/client/sasl_client_conversation.h" #include "mongo/client/sasl_oidc_client_conversation.h" #include "mongo/client/sasl_plain_client_conversation.h" #include "mongo/client/sasl_scram_client_conversation.h" #include "mongo/client/scram_client_cache.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/crypto/sha1_block.h" #include "mongo/crypto/sha256_block.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" #include "mongo/util/str.h" #ifdef MONGO_CONFIG_SSL diff --git a/src/mongo/client/native_sasl_client_session.h b/src/mongo/client/native_sasl_client_session.h index f3c8571fdffe9..2b6e2848eb0ff 100644 --- a/src/mongo/client/native_sasl_client_session.h +++ b/src/mongo/client/native_sasl_client_session.h @@ -28,6 +28,11 @@ */ +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/client/sasl_client_session.h" namespace mongo { diff --git a/src/mongo/client/read_preference.cpp b/src/mongo/client/read_preference.cpp index 900375df114bc..58e9a95e156f0 100644 --- a/src/mongo/client/read_preference.cpp +++ b/src/mongo/client/read_preference.cpp @@ -27,19 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/client/read_preference.h" - #include +#include + +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/client/read_preference.h" +#include "mongo/idl/idl_parser.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/str.h" namespace mongo { @@ -106,13 +111,17 @@ ReadPreferenceSetting::ReadPreferenceSetting(ReadPreference pref, hedgingMode(std::move(hedgingMode)) {} ReadPreferenceSetting::ReadPreferenceSetting(ReadPreference pref, Seconds maxStalenessSeconds) - : ReadPreferenceSetting(pref, defaultTagSetForMode(pref), maxStalenessSeconds) {} + : ReadPreferenceSetting(pref, defaultTagSetForMode(pref), maxStalenessSeconds) { + _usedDefaultReadPrefValue = true; +} ReadPreferenceSetting::ReadPreferenceSetting(ReadPreference pref, TagSet tags) : pref(std::move(pref)), tags(std::move(tags)) {} ReadPreferenceSetting::ReadPreferenceSetting(ReadPreference pref) - : ReadPreferenceSetting(pref, defaultTagSetForMode(pref)) {} + : ReadPreferenceSetting(pref, defaultTagSetForMode(pref)) { + _usedDefaultReadPrefValue = true; +} StatusWith ReadPreferenceSetting::fromInnerBSON(const BSONObj& readPrefObj) { std::string modeStr; diff --git a/src/mongo/client/read_preference.h b/src/mongo/client/read_preference.h index 368117d91b5e8..51bb4784e70fb 100644 --- a/src/mongo/client/read_preference.h +++ b/src/mongo/client/read_preference.h @@ -29,12 +29,23 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/bson/timestamp.h" #include "mongo/client/hedging_mode_gen.h" #include "mongo/client/read_preference_gen.h" #include "mongo/db/jsobj.h" #include "mongo/db/operation_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/util/duration.h" namespace mongo { @@ -123,7 +134,9 @@ struct ReadPreferenceSetting { ReadPreferenceSetting(ReadPreference pref, Seconds maxStalenessSeconds); ReadPreferenceSetting(ReadPreference pref, TagSet tags); explicit ReadPreferenceSetting(ReadPreference pref); - ReadPreferenceSetting() : ReadPreferenceSetting(ReadPreference::PrimaryOnly) {} + ReadPreferenceSetting() : ReadPreferenceSetting(ReadPreference::PrimaryOnly) { + _usedDefaultReadPrefValue = true; + } inline bool equals(const ReadPreferenceSetting& other) const { auto hedgingModeEquals = [](const boost::optional& hedgingModeA, @@ -168,7 +181,9 @@ struct ReadPreferenceSetting { toContainingBSON(&bob); return bob.obj(); } - + bool usedDefaultReadPrefValue() const { + return _usedDefaultReadPrefValue; + } /** * Parses a ReadPreferenceSetting from a BSON document of the form: * { mode: , tags: , maxStalenessSeconds: Number, hedge: }. @@ -226,6 +241,9 @@ struct ReadPreferenceSetting { * Either way, it must be that a node opTime of X implies ClusterTime >= X. */ Timestamp minClusterTime{}; + +private: + bool _usedDefaultReadPrefValue = false; }; } // namespace mongo diff --git a/src/mongo/client/read_preference_test.cpp b/src/mongo/client/read_preference_test.cpp index e11c67aa73afe..97d57d244416a 100644 --- a/src/mongo/client/read_preference_test.cpp +++ b/src/mongo/client/read_preference_test.cpp @@ -26,11 +26,18 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/client/read_preference.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/duration.h" namespace { diff --git a/src/mongo/client/remote_command_retry_scheduler.cpp b/src/mongo/client/remote_command_retry_scheduler.cpp index 351bf68ecf11e..3da00cb787b9f 100644 --- a/src/mongo/client/remote_command_retry_scheduler.cpp +++ b/src/mongo/client/remote_command_retry_scheduler.cpp @@ -27,15 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include #include -#include +#include +#include +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/remote_command_retry_scheduler.h" #include "mongo/util/assert_util.h" #include "mongo/util/destructor_guard.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/client/remote_command_retry_scheduler.h b/src/mongo/client/remote_command_retry_scheduler.h index 4b6cdd798f446..131ea57e65d9d 100644 --- a/src/mongo/client/remote_command_retry_scheduler.h +++ b/src/mongo/client/remote_command_retry_scheduler.h @@ -29,16 +29,21 @@ #pragma once +#include #include +#include #include #include - -#include +#include +#include #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/duration.h" #include "mongo/util/hierarchical_acquisition.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/client/remote_command_retry_scheduler_test.cpp b/src/mongo/client/remote_command_retry_scheduler_test.cpp index 3e97425f3a140..0827a81aaabfc 100644 --- a/src/mongo/client/remote_command_retry_scheduler_test.cpp +++ b/src/mongo/client/remote_command_retry_scheduler_test.cpp @@ -28,21 +28,31 @@ */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include #include +#include #include #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/remote_command_retry_scheduler.h" -#include "mongo/db/jsobj.h" -#include "mongo/executor/remote_command_response.h" +#include "mongo/db/baton.h" +#include "mongo/executor/network_interface_mock.h" #include "mongo/executor/task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/task_executor_proxy.h" -#include "mongo/unittest/unittest.h" #include "mongo/util/assert_util.h" #include "mongo/util/net/hostandport.h" diff --git a/src/mongo/client/remote_command_targeter_factory_impl.cpp b/src/mongo/client/remote_command_targeter_factory_impl.cpp index c140a184a31eb..b78bfcfef38af 100644 --- a/src/mongo/client/remote_command_targeter_factory_impl.cpp +++ b/src/mongo/client/remote_command_targeter_factory_impl.cpp @@ -27,14 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/client/remote_command_targeter_factory_impl.h" - #include +#include + +#include -#include "mongo/base/status_with.h" #include "mongo/client/connection_string.h" +#include "mongo/client/remote_command_targeter_factory_impl.h" #include "mongo/client/remote_command_targeter_rs.h" #include "mongo/client/remote_command_targeter_standalone.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/client/remote_command_targeter_factory_impl.h b/src/mongo/client/remote_command_targeter_factory_impl.h index 235947402a76c..243f15e7b279a 100644 --- a/src/mongo/client/remote_command_targeter_factory_impl.h +++ b/src/mongo/client/remote_command_targeter_factory_impl.h @@ -29,6 +29,10 @@ #pragma once +#include + +#include "mongo/client/connection_string.h" +#include "mongo/client/remote_command_targeter.h" #include "mongo/client/remote_command_targeter_factory.h" namespace mongo { diff --git a/src/mongo/client/remote_command_targeter_factory_mock.cpp b/src/mongo/client/remote_command_targeter_factory_mock.cpp index 478dbfd96ec5d..d5bfa34edd5db 100644 --- a/src/mongo/client/remote_command_targeter_factory_mock.cpp +++ b/src/mongo/client/remote_command_targeter_factory_mock.cpp @@ -27,9 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/client/read_preference.h" #include "mongo/client/remote_command_targeter_factory_mock.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace { diff --git a/src/mongo/client/remote_command_targeter_factory_mock.h b/src/mongo/client/remote_command_targeter_factory_mock.h index 470d79e21a844..cca2922a9b1a0 100644 --- a/src/mongo/client/remote_command_targeter_factory_mock.h +++ b/src/mongo/client/remote_command_targeter_factory_mock.h @@ -30,7 +30,10 @@ #pragma once #include +#include +#include "mongo/client/connection_string.h" +#include "mongo/client/remote_command_targeter.h" #include "mongo/client/remote_command_targeter_factory.h" #include "mongo/client/remote_command_targeter_mock.h" diff --git a/src/mongo/client/remote_command_targeter_mock.cpp b/src/mongo/client/remote_command_targeter_mock.cpp index 6ae7c0dabcee2..03fedf3bf4962 100644 --- a/src/mongo/client/remote_command_targeter_mock.cpp +++ b/src/mongo/client/remote_command_targeter_mock.cpp @@ -27,9 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/future_impl.h" namespace mongo { diff --git a/src/mongo/client/remote_command_targeter_mock.h b/src/mongo/client/remote_command_targeter_mock.h index 108764b08a2fd..92092804d1b76 100644 --- a/src/mongo/client/remote_command_targeter_mock.h +++ b/src/mongo/client/remote_command_targeter_mock.h @@ -29,9 +29,22 @@ #pragma once +#include #include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" #include "mongo/client/remote_command_targeter.h" +#include "mongo/db/operation_context.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/client/remote_command_targeter_rs.cpp b/src/mongo/client/remote_command_targeter_rs.cpp index cb19576086e62..9b34324edbe55 100644 --- a/src/mongo/client/remote_command_targeter_rs.cpp +++ b/src/mongo/client/remote_command_targeter_rs.cpp @@ -28,20 +28,28 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/client/remote_command_targeter_rs.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" #include "mongo/client/connection_string.h" #include "mongo/client/read_preference.h" +#include "mongo/client/remote_command_targeter_rs.h" #include "mongo/client/replica_set_monitor.h" #include "mongo/client/replica_set_monitor_server_parameters_gen.h" #include "mongo/db/operation_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" -#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/client/remote_command_targeter_rs.h b/src/mongo/client/remote_command_targeter_rs.h index 49c2b6850f6de..c7c64c59fe250 100644 --- a/src/mongo/client/remote_command_targeter_rs.h +++ b/src/mongo/client/remote_command_targeter_rs.h @@ -33,7 +33,15 @@ #include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" #include "mongo/client/remote_command_targeter.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/client/remote_command_targeter_standalone.cpp b/src/mongo/client/remote_command_targeter_standalone.cpp index d719f1fd44f2a..21f0e2913b782 100644 --- a/src/mongo/client/remote_command_targeter_standalone.cpp +++ b/src/mongo/client/remote_command_targeter_standalone.cpp @@ -27,12 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/client/connection_string.h" -#include "mongo/client/remote_command_targeter_standalone.h" +#include +#include #include "mongo/base/status_with.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/remote_command_targeter_standalone.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/client/remote_command_targeter_standalone.h b/src/mongo/client/remote_command_targeter_standalone.h index 05c7bcbc54592..31f2c538bc21d 100644 --- a/src/mongo/client/remote_command_targeter_standalone.h +++ b/src/mongo/client/remote_command_targeter_standalone.h @@ -29,7 +29,16 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" #include "mongo/client/remote_command_targeter.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" #include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/client/replica_set_change_notifier.cpp b/src/mongo/client/replica_set_change_notifier.cpp index 6f788b884188c..9a31b94565500 100644 --- a/src/mongo/client/replica_set_change_notifier.cpp +++ b/src/mongo/client/replica_set_change_notifier.cpp @@ -28,13 +28,16 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/client/replica_set_change_notifier.h" +#include +#include +#include "mongo/client/replica_set_change_notifier.h" #include "mongo/logv2/log.h" -#include "mongo/util/fail_point.h" -#include "mongo/util/stacktrace.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/client/replica_set_change_notifier.h b/src/mongo/client/replica_set_change_notifier.h index 4408cd46d5371..2d9fd41a882da 100644 --- a/src/mongo/client/replica_set_change_notifier.h +++ b/src/mongo/client/replica_set_change_notifier.h @@ -29,8 +29,13 @@ #pragma once +#include +#include +#include +#include #include #include +#include #include #include "mongo/client/connection_string.h" @@ -39,6 +44,7 @@ #include "mongo/stdx/unordered_map.h" #include "mongo/util/functional.h" #include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/client/replica_set_monitor.cpp b/src/mongo/client/replica_set_monitor.cpp index 343168425e859..523a8be5493a5 100644 --- a/src/mongo/client/replica_set_monitor.cpp +++ b/src/mongo/client/replica_set_monitor.cpp @@ -29,26 +29,17 @@ #include "mongo/client/replica_set_monitor.h" -#include #include -#include +#include -#include "mongo/bson/simple_bsonelement_comparator.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/connpool.h" #include "mongo/client/global_conn_pool.h" -#include "mongo/client/read_preference.h" -#include "mongo/db/operation_context.h" -#include "mongo/db/repl/bson_extract_optime.h" -#include "mongo/db/server_options.h" +#include "mongo/client/replica_set_monitor_manager.h" #include "mongo/logv2/log.h" -#include "mongo/platform/atomic_word.h" -#include "mongo/platform/mutex.h" -#include "mongo/stdx/condition_variable.h" -#include "mongo/util/background.h" -#include "mongo/util/debug_util.h" -#include "mongo/util/exit.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/fail_point.h" -#include "mongo/util/string_map.h" -#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/client/replica_set_monitor.h b/src/mongo/client/replica_set_monitor.h index 4683e927123da..9a40495244872 100644 --- a/src/mongo/client/replica_set_monitor.h +++ b/src/mongo/client/replica_set_monitor.h @@ -34,6 +34,7 @@ #include #include +#include "mongo/base/string_data.h" #include "mongo/client/mongo_uri.h" #include "mongo/client/replica_set_change_notifier.h" #include "mongo/client/replica_set_monitor_interface.h" diff --git a/src/mongo/client/replica_set_monitor_integration_test.cpp b/src/mongo/client/replica_set_monitor_integration_test.cpp index 383411d0d61b9..a614c442ed053 100644 --- a/src/mongo/client/replica_set_monitor_integration_test.cpp +++ b/src/mongo/client/replica_set_monitor_integration_test.cpp @@ -26,17 +26,49 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/mongo_uri.h" +#include "mongo/client/read_preference.h" +#include "mongo/client/replica_set_change_notifier.h" +#include "mongo/client/replica_set_monitor.h" #include "mongo/client/replica_set_monitor_manager.h" -#include "mongo/client/streamable_replica_set_monitor.h" #include "mongo/db/wire_version.h" +#include "mongo/executor/network_interface.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/network_interface_thread_pool.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/integration_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/hierarchical_acquisition.h" namespace mongo { namespace executor { @@ -157,7 +189,6 @@ class ReplicaSetMonitorFixture : public mongo::unittest::Test { TEST_F(ReplicaSetMonitorFixture, StreamableRSMWireVersion) { auto rsm = ReplicaSetMonitorManager::get()->getOrCreateMonitor(replSetUri, nullptr); - // Schedule isMaster requests and wait for the responses. auto primaryFuture = rsm->getHostOrRefresh(ReadPreferenceSetting(mongo::ReadPreference::PrimaryOnly), CancellationToken::uncancelable()); diff --git a/src/mongo/client/replica_set_monitor_manager.cpp b/src/mongo/client/replica_set_monitor_manager.cpp index 09394f8d2e6b2..3caaef530cc8f 100644 --- a/src/mongo/client/replica_set_monitor_manager.cpp +++ b/src/mongo/client/replica_set_monitor_manager.cpp @@ -28,18 +28,28 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/client/replica_set_monitor_manager.h" - +#include +#include +#include #include +#include +#include +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/connection_string.h" #include "mongo/client/mongo_uri.h" #include "mongo/client/replica_set_monitor.h" +#include "mongo/client/replica_set_monitor_manager.h" #include "mongo/client/replica_set_monitor_server_parameters.h" +#include "mongo/client/sdam/topology_listener.h" #include "mongo/client/streamable_replica_set_monitor.h" +#include "mongo/db/service_context.h" #include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/network_interface_thread_pool.h" @@ -47,9 +57,14 @@ #include "mongo/executor/task_executor_pool.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/mutex.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" -#include "mongo/util/duration.h" +#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork @@ -74,8 +89,8 @@ const auto getGlobalRSMMonitorManager = Status ReplicaSetMonitorManagerNetworkConnectionHook::validateHost( const HostAndPort& remoteHost, - const BSONObj& isMasterRequest, - const executor::RemoteCommandResponse& isMasterReply) { + const BSONObj& helloRequest, + const executor::RemoteCommandResponse& helloReply) { auto monitor = ReplicaSetMonitorManager::get()->getMonitorForHost(remoteHost); if (!monitor) { return Status::OK(); @@ -87,19 +102,19 @@ Status ReplicaSetMonitorManagerNetworkConnectionHook::validateHost( auto publisher = streamableMonitor->getEventsPublisher(); if (publisher) { try { - if (isMasterReply.status.isOK()) { + if (helloReply.status.isOK()) { publisher->onServerHandshakeCompleteEvent( - *isMasterReply.elapsed, remoteHost, isMasterReply.data); + *helloReply.elapsed, remoteHost, helloReply.data); } else { publisher->onServerHandshakeFailedEvent( - remoteHost, isMasterReply.status, isMasterReply.data); + remoteHost, helloReply.status, helloReply.data); } } catch (const DBException& exception) { LOGV2_ERROR(4712101, "An error occurred publishing a ReplicaSetMonitor handshake event", "error"_attr = exception.toStatus(), "replicaSet"_attr = monitor->getName(), - "handshakeStatus"_attr = isMasterReply.status); + "handshakeStatus"_attr = helloReply.status); return exception.toStatus(); } } diff --git a/src/mongo/client/replica_set_monitor_manager.h b/src/mongo/client/replica_set_monitor_manager.h index 2e26ba8776943..edf21ee000f57 100644 --- a/src/mongo/client/replica_set_monitor_manager.h +++ b/src/mongo/client/replica_set_monitor_manager.h @@ -29,18 +29,37 @@ #pragma once +#include +#include +#include #include +#include +#include #include #include +#include "mongo/base/counter.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/mongo_uri.h" #include "mongo/client/replica_set_change_notifier.h" #include "mongo/client/replica_set_monitor_stats.h" #include "mongo/executor/egress_tag_closer.h" #include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" +#include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/string_map.h" namespace mongo { @@ -56,8 +75,8 @@ class ReplicaSetMonitorManagerNetworkConnectionHook final : public executor::Net virtual ~ReplicaSetMonitorManagerNetworkConnectionHook() = default; Status validateHost(const HostAndPort& remoteHost, - const BSONObj& isMasterRequest, - const executor::RemoteCommandResponse& isMasterReply) override; + const BSONObj& helloRequest, + const executor::RemoteCommandResponse& helloReply) override; StatusWith> makeRequest( const HostAndPort& remoteHost) override; diff --git a/src/mongo/client/replica_set_monitor_protocol_test_util.cpp b/src/mongo/client/replica_set_monitor_protocol_test_util.cpp index cb78bbaf097bb..f526a360a520f 100644 --- a/src/mongo/client/replica_set_monitor_protocol_test_util.cpp +++ b/src/mongo/client/replica_set_monitor_protocol_test_util.cpp @@ -26,9 +26,16 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/client/replica_set_monitor_protocol_test_util.h" +#include "mongo/db/tenant_id.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/client/replica_set_monitor_protocol_test_util.h b/src/mongo/client/replica_set_monitor_protocol_test_util.h index 7cd45f4ed998d..6ca47ef4e24eb 100644 --- a/src/mongo/client/replica_set_monitor_protocol_test_util.h +++ b/src/mongo/client/replica_set_monitor_protocol_test_util.h @@ -29,10 +29,16 @@ #pragma once -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/replica_set_monitor_server_parameters.h" #include "mongo/client/replica_set_monitor_server_parameters_gen.h" +#include "mongo/db/server_parameter.h" +#include "mongo/platform/basic.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/client/replica_set_monitor_server_parameters.cpp b/src/mongo/client/replica_set_monitor_server_parameters.cpp index 9b831d768c409..840cf8cc87af9 100644 --- a/src/mongo/client/replica_set_monitor_server_parameters.cpp +++ b/src/mongo/client/replica_set_monitor_server_parameters.cpp @@ -29,8 +29,15 @@ #include "mongo/client/replica_set_monitor_server_parameters.h" +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/replica_set_monitor_server_parameters_gen.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/client/replica_set_monitor_server_parameters.h b/src/mongo/client/replica_set_monitor_server_parameters.h index 40bcf3f701632..36074b83b76c7 100644 --- a/src/mongo/client/replica_set_monitor_server_parameters.h +++ b/src/mongo/client/replica_set_monitor_server_parameters.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/platform/atomic_word.h" #include "mongo/platform/basic.h" #include "mongo/util/str.h" diff --git a/src/mongo/client/replica_set_monitor_server_parameters.idl b/src/mongo/client/replica_set_monitor_server_parameters.idl index c1cf9f3322af4..6baf9ee6253f4 100644 --- a/src/mongo/client/replica_set_monitor_server_parameters.idl +++ b/src/mongo/client/replica_set_monitor_server_parameters.idl @@ -32,7 +32,7 @@ server_parameters: replicaSetMonitorProtocol: description: >- Select which replica set monitor protocol to use - the new 'streamable' protocol that is - both SDAM compliant and allows for awaitable isMaster with exhaust, the 'sdam' compliant + both SDAM compliant and allows for awaitable "hello" with exhaust, the 'sdam' compliant protocol or the old 'scanning' protocol. set_at: startup cpp_class: diff --git a/src/mongo/client/replica_set_monitor_server_parameters_test.cpp b/src/mongo/client/replica_set_monitor_server_parameters_test.cpp index 7a994cf3f95f1..f7cb837c1d0b6 100644 --- a/src/mongo/client/replica_set_monitor_server_parameters_test.cpp +++ b/src/mongo/client/replica_set_monitor_server_parameters_test.cpp @@ -27,14 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/client/replica_set_monitor.h" #include "mongo/client/replica_set_monitor_protocol_test_util.h" #include "mongo/client/replica_set_monitor_server_parameters.h" -#include "mongo/client/streamable_replica_set_monitor.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/db/service_context.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/client/replica_set_monitor_stats.h b/src/mongo/client/replica_set_monitor_stats.h index 2512479782536..139ef0ff9f4ae 100644 --- a/src/mongo/client/replica_set_monitor_stats.h +++ b/src/mongo/client/replica_set_monitor_stats.h @@ -31,8 +31,11 @@ #include #include +#include +#include #include "mongo/base/counter.h" +#include "mongo/platform/mutex.h" #include "mongo/util/duration.h" #include "mongo/util/scopeguard.h" #include "mongo/util/timer.h" diff --git a/src/mongo/client/sasl_aws_client_conversation.cpp b/src/mongo/client/sasl_aws_client_conversation.cpp index 861741be9d18e..c554b67f684ce 100644 --- a/src/mongo/client/sasl_aws_client_conversation.cpp +++ b/src/mongo/client/sasl_aws_client_conversation.cpp @@ -27,20 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/client/sasl_aws_client_conversation.h" - -#include +#include +#include #include +#include + +#include "mongo/base/data_builder.h" +#include "mongo/base/data_range.h" +#include "mongo/base/data_range_cursor.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" -#include "mongo/bson/json.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/client/sasl_aws_client_conversation.h" #include "mongo/client/sasl_aws_client_options.h" #include "mongo/client/sasl_aws_client_protocol.h" -#include "mongo/client/sasl_aws_client_protocol_gen.h" +#include "mongo/util/assert_util.h" #include "mongo/util/net/http_client.h" +#include "mongo/util/str.h" namespace mongo { namespace awsIam { diff --git a/src/mongo/client/sasl_aws_client_conversation.h b/src/mongo/client/sasl_aws_client_conversation.h index 63eed69a42cb5..2217b0476c84e 100644 --- a/src/mongo/client/sasl_aws_client_conversation.h +++ b/src/mongo/client/sasl_aws_client_conversation.h @@ -29,10 +29,12 @@ #pragma once +#include #include #include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/client/sasl_aws_protocol_common.h" #include "mongo/client/sasl_client_conversation.h" diff --git a/src/mongo/client/sasl_aws_client_protocol.cpp b/src/mongo/client/sasl_aws_client_protocol.cpp index 70b5b9d91580d..5a58e8b04fd0c 100644 --- a/src/mongo/client/sasl_aws_client_protocol.cpp +++ b/src/mongo/client/sasl_aws_client_protocol.cpp @@ -27,22 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/client/sasl_aws_client_protocol.h" - -#include - -#include "mongo/base/data_range_cursor.h" -#include "mongo/base/data_type_validated.h" -#include "mongo/base/init.h" +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" +#include "mongo/client/sasl_aws_client_protocol.h" #include "mongo/client/sasl_aws_client_protocol_gen.h" +#include "mongo/client/sasl_aws_protocol_common_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/platform/mutex.h" #include "mongo/platform/random.h" +#include "mongo/util/assert_util.h" #include "mongo/util/base64.h" #include "mongo/util/kms_message_support.h" +#include "mongo/util/str.h" namespace mongo { namespace awsIam { diff --git a/src/mongo/client/sasl_client_authenticate.cpp b/src/mongo/client/sasl_client_authenticate.cpp index aafc6523494a7..00c351a8f6678 100644 --- a/src/mongo/client/sasl_client_authenticate.cpp +++ b/src/mongo/client/sasl_client_authenticate.cpp @@ -31,9 +31,13 @@ #include -#include "mongo/base/string_data.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/auth/sasl_command_constants.h" +#include "mongo/util/assert_util.h" #include "mongo/util/base64.h" #include "mongo/util/str.h" diff --git a/src/mongo/client/sasl_client_authenticate.h b/src/mongo/client/sasl_client_authenticate.h index 7ac1419308fe3..d3ef4059ab9b2 100644 --- a/src/mongo/client/sasl_client_authenticate.h +++ b/src/mongo/client/sasl_client_authenticate.h @@ -29,15 +29,21 @@ #pragma once +#include #include #include +#include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsontypes.h" #include "mongo/client/authenticate.h" +#include "mongo/client/sasl_client_session.h" #include "mongo/executor/remote_command_request.h" #include "mongo/executor/remote_command_response.h" #include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" namespace mongo { class BSONObj; diff --git a/src/mongo/client/sasl_client_authenticate_impl.cpp b/src/mongo/client/sasl_client_authenticate_impl.cpp index ffba7ba146db5..a5c6c1f4d4249 100644 --- a/src/mongo/client/sasl_client_authenticate_impl.cpp +++ b/src/mongo/client/sasl_client_authenticate_impl.cpp @@ -35,21 +35,43 @@ */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include #include +#include + +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/client/authenticate.h" #include "mongo/client/sasl_client_authenticate.h" #include "mongo/client/sasl_client_session.h" #include "mongo/db/auth/sasl_command_constants.h" +#include "mongo/db/database_name.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" #include "mongo/util/base64.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/password_digest.h" @@ -147,7 +169,7 @@ Status saslConfigureSession(SaslClientSession* session, status = bsonExtractStringField(saslParameters, saslCommandUserFieldName, &value); if (status.isOK()) { session->setParameter(SaslClientSession::parameterUser, value); - } else if ((targetDatabase != NamespaceString::kExternalDb) || + } else if ((targetDatabase != DatabaseName::kExternal.db()) || ((mechanism != auth::kMechanismMongoAWS) && (mechanism != auth::kMechanismMongoOIDC))) { return status; @@ -164,7 +186,7 @@ Status saslConfigureSession(SaslClientSession* session, if (status.isOK()) { session->setParameter(SaslClientSession::parameterPassword, value); } else if (!(status == ErrorCodes::NoSuchKey && - targetDatabase == NamespaceString::kExternalDb)) { + targetDatabase == DatabaseName::kExternal.db())) { // $external users do not have passwords, hence NoSuchKey is expected return status; } diff --git a/src/mongo/client/sasl_client_session.cpp b/src/mongo/client/sasl_client_session.cpp index 4c92134a094de..74966f2bde104 100644 --- a/src/mongo/client/sasl_client_session.cpp +++ b/src/mongo/client/sasl_client_session.cpp @@ -29,12 +29,12 @@ #include "mongo/client/sasl_client_session.h" -#include "mongo/base/init.h" -#include "mongo/util/allocator.h" +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/util/assert_util.h" -#include "mongo/util/concurrency/mutex.h" -#include "mongo/util/signal_handlers_synchronous.h" -#include "mongo/util/str.h" namespace mongo { SaslClientSession::SaslClientSessionFactoryFn SaslClientSession::create; diff --git a/src/mongo/client/sasl_client_session.h b/src/mongo/client/sasl_client_session.h index 340b1a76440be..280975309f50d 100644 --- a/src/mongo/client/sasl_client_session.h +++ b/src/mongo/client/sasl_client_session.h @@ -29,6 +29,7 @@ #pragma once +#include #include #include #include diff --git a/src/mongo/client/sasl_oidc_client_conversation.cpp b/src/mongo/client/sasl_oidc_client_conversation.cpp index e0e15b6c39527..7bc05586fd893 100644 --- a/src/mongo/client/sasl_oidc_client_conversation.cpp +++ b/src/mongo/client/sasl_oidc_client_conversation.cpp @@ -27,20 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/client/sasl_oidc_client_conversation.h" +#include +#include +#include "mongo/base/data_builder.h" #include "mongo/base/data_range.h" +#include "mongo/base/data_range_cursor.h" +#include "mongo/base/data_type_validated.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/client/mongo_uri.h" -#include "mongo/client/sasl_client_session.h" -#include "mongo/client/sasl_oidc_client_params_gen.h" +#include "mongo/client/sasl_oidc_client_conversation.h" +#include "mongo/client/sasl_oidc_client_types_gen.h" +#include "mongo/db/auth/oauth_authorization_server_metadata_gen.h" #include "mongo/db/auth/oauth_discovery_factory.h" #include "mongo/db/auth/oidc_protocol_gen.h" -#include "mongo/rpc/object_check.h" -#include "mongo/shell/program_runner.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" #include "mongo/util/net/http_client.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -51,6 +68,7 @@ constexpr auto kGrantTypeParameterDeviceCodeValue = "urn:ietf:params:oauth:grant-type:device_code"_sd; constexpr auto kGrantTypeParameterRefreshTokenValue = "refresh_token"_sd; constexpr auto kDeviceCodeParameterName = "device_code"_sd; +constexpr auto kCodeParameterName = "code"_sd; constexpr auto kRefreshTokenParameterName = kGrantTypeParameterRefreshTokenValue; inline void appendPostBodyRequiredParams(StringBuilder* sb, StringData clientId) { @@ -71,8 +89,10 @@ inline void appendPostBodyDeviceCodeRequestParams( } inline void appendPostBodyTokenRequestParams(StringBuilder* sb, StringData deviceCode) { + // kDeviceCodeParameterName and kCodeParameterName are the same, IDP's use different names. *sb << "&" << kGrantTypeParameterName << "=" << kGrantTypeParameterDeviceCodeValue << "&" - << kDeviceCodeParameterName << "=" << uriEncode(deviceCode); + << kDeviceCodeParameterName << "=" << uriEncode(deviceCode) << "&" << kCodeParameterName + << "=" << uriEncode(deviceCode); } inline void appendPostBodyRefreshFlowParams(StringBuilder* sb, StringData refreshToken) { @@ -122,8 +142,18 @@ std::pair doDeviceAuthorizationGrantFlow( // Simulate end user login via user verification URI. auto deviceAuthorizationResponse = OIDCDeviceAuthorizationResponse::parse( IDLParserContext{"oidcDeviceAuthorizationResponse"}, deviceAuthorizationResponseObj); + + // IDP's use different names to refer to the verification url. + const auto& optURI = deviceAuthorizationResponse.getVerificationUri(); + const auto& optURL = deviceAuthorizationResponse.getVerificationUrl(); + uassert(ErrorCodes::BadValue, "Encountered empty device authorization url", optURI || optURL); + uassert(ErrorCodes::BadValue, + "Encounterd both verification_uri and verification_url", + !(optURI && optURL)); + auto deviceAuthURL = optURI ? optURI.get() : optURL.get(); + oidcClientGlobalParams.oidcIdPAuthCallback( - principalName, deviceAuthorizationResponse.getVerificationUriComplete()); + principalName, deviceAuthURL, deviceAuthorizationResponse.getUserCode()); // Poll token endpoint for access and refresh tokens. It should return immediately since // the shell blocks on the authenticationSimulator until it completes, but poll anyway. diff --git a/src/mongo/client/sasl_oidc_client_conversation.h b/src/mongo/client/sasl_oidc_client_conversation.h index ee2f91c9a32bb..4dc69b1ae46cc 100644 --- a/src/mongo/client/sasl_oidc_client_conversation.h +++ b/src/mongo/client/sasl_oidc_client_conversation.h @@ -29,6 +29,11 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/client/sasl_client_conversation.h" #include "mongo/client/sasl_oidc_client_params.h" @@ -46,9 +51,8 @@ class SaslOIDCClientConversation : public SaslClientConversation { _principalName(principalName.rawData()), _accessToken(accessToken.rawData()) {} - static void setOIDCIdPAuthCallback( - const std::function& callback) { - oidcClientGlobalParams.oidcIdPAuthCallback = callback; + static void setOIDCIdPAuthCallback(std::function callback) { + oidcClientGlobalParams.oidcIdPAuthCallback = std::move(callback); } StatusWith step(StringData inputData, std::string* outputData) override; diff --git a/src/mongo/client/sasl_oidc_client_params.h b/src/mongo/client/sasl_oidc_client_params.h index dfa63a6e34492..536e562370e46 100644 --- a/src/mongo/client/sasl_oidc_client_params.h +++ b/src/mongo/client/sasl_oidc_client_params.h @@ -36,6 +36,8 @@ #include "mongo/base/string_data.h" namespace mongo { +using oidcIdPAuthCallbackT = void(StringData, StringData, StringData); + /** * OIDC Client parameters */ @@ -51,10 +53,10 @@ struct OIDCClientGlobalParams { std::string oidcRefreshToken; /* - * Callback function that accepts the username and IdP endpoint and then performs IdP - * authentication. This should be provided by tests, presumably as a JS function. + * Callback function that accepts the username, activation code and IdP endpoint and then + * performs IdP authentication. This should be provided by tests, presumably as a JS function. */ - std::function oidcIdPAuthCallback; + std::function oidcIdPAuthCallback; /** * Client ID. Populated via server SASL reply. */ diff --git a/src/mongo/client/sasl_oidc_client_params.idl b/src/mongo/client/sasl_oidc_client_params.idl deleted file mode 100644 index 0ea16c025caeb..0000000000000 --- a/src/mongo/client/sasl_oidc_client_params.idl +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright (C) 2023-present MongoDB, Inc. - -global: - cpp_namespace: "mongo" - configs: - section: 'OIDC Options' - source: [ cli ] - cpp_includes: - - mongo/client/sasl_oidc_client_params.h - -imports: - - "mongo/db/basic_types.idl" - -configs: - oidcAccessToken: - description: >- - If set, the shell will pass this token to the server for any user that tries - authenticating with the MONGODB-OIDC mechanism. This will bypass the device authorization - grant flow. - arg_vartype: String - cpp_varname: oidcClientGlobalParams.oidcAccessToken - -structs: - OIDCDeviceAuthorizationResponse: - description: "IdP response from the deviceAuthorization endpoint." - strict: false - fields: - device_code: - description: "Device code to use in token request" - cpp_name: deviceCode - type: string - verification_uri_complete: - description: "URI for end user authentication" - cpp_name: verificationUriComplete - type: string - - OIDCTokenResponse: - description: IdP response from the token endpoint. - strict: false - fields: - access_token: - description: "Access token returned to be sent to the server." - cpp_name: accessToken - type: string - optional: true - refresh_token: - description: "Refresh token returned to be used for token reacquisition." - cpp_name: refreshToken - type: string - optional: true - error: - description: "Error message returned by the token endpoint." - type: string - optional: true diff --git a/src/mongo/client/sasl_oidc_client_types.idl b/src/mongo/client/sasl_oidc_client_types.idl new file mode 100644 index 0000000000000..9a38bc9c9313f --- /dev/null +++ b/src/mongo/client/sasl_oidc_client_types.idl @@ -0,0 +1,75 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +global: + cpp_namespace: "mongo" + +imports: + - "mongo/db/basic_types.idl" + +structs: + OIDCDeviceAuthorizationResponse: + description: "IdP response from the deviceAuthorization endpoint." + strict: false + fields: + device_code: + description: "Device code to use in token request" + cpp_name: deviceCode + type: string + user_code: + description: "Code to be added in verification_uri to authenticate" + cpp_name: userCode + type: string + verification_uri: + description: "URI for end user authentication" + cpp_name: verificationUri + type: string + optional: true + verification_url: + description: "Same as verification_uri, some providers use this name" + cpp_name: verificationUrl + type: string + optional: true + OIDCTokenResponse: + description: IdP response from the token endpoint. + strict: false + fields: + access_token: + description: "Access token returned to be sent to the server." + cpp_name: accessToken + type: string + optional: true + refresh_token: + description: "Refresh token returned to be used for token reacquisition." + cpp_name: refreshToken + type: string + optional: true + error: + description: "Error message returned by the token endpoint." + type: string + optional: true diff --git a/src/mongo/client/sasl_plain_client_conversation.cpp b/src/mongo/client/sasl_plain_client_conversation.cpp index 06755f19facfc..6f5b87eed5973 100644 --- a/src/mongo/client/sasl_plain_client_conversation.cpp +++ b/src/mongo/client/sasl_plain_client_conversation.cpp @@ -27,14 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/client/sasl_plain_client_conversation.h" +#include #include "mongo/base/status_with.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/client/sasl_client_session.h" -#include "mongo/util/password_digest.h" +#include "mongo/client/sasl_plain_client_conversation.h" namespace mongo { diff --git a/src/mongo/client/sasl_scram_client_conversation.cpp b/src/mongo/client/sasl_scram_client_conversation.cpp index 52bc641479787..e7ccae8ed5ef0 100644 --- a/src/mongo/client/sasl_scram_client_conversation.cpp +++ b/src/mongo/client/sasl_scram_client_conversation.cpp @@ -27,19 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/client/sasl_scram_client_conversation.h" - #include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/parse_number.h" -#include "mongo/client/scram_client_cache.h" +#include "mongo/base/status.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/client/sasl_scram_client_conversation.h" #include "mongo/platform/random.h" #include "mongo/util/base64.h" -#include "mongo/util/password_digest.h" #include "mongo/util/str.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep namespace mongo { diff --git a/src/mongo/client/sasl_scram_client_conversation.h b/src/mongo/client/sasl_scram_client_conversation.h index 0d5f4eafc6c05..2cac3cfca259e 100644 --- a/src/mongo/client/sasl_scram_client_conversation.h +++ b/src/mongo/client/sasl_scram_client_conversation.h @@ -29,16 +29,24 @@ #pragma once +#include +#include #include +#include +#include #include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/client/sasl_client_conversation.h" #include "mongo/client/sasl_client_session.h" #include "mongo/client/scram_client_cache.h" #include "mongo/crypto/mechanism_scram.h" +#include "mongo/crypto/sha1_block.h" +#include "mongo/util/assert_util.h" #include "mongo/util/icu.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/client/scoped_db_connection_test.cpp b/src/mongo/client/scoped_db_connection_test.cpp index 141003d7253a5..c9e0473fd713f 100644 --- a/src/mongo/client/scoped_db_connection_test.cpp +++ b/src/mongo/client/scoped_db_connection_test.cpp @@ -27,11 +27,27 @@ * it in the license file. */ +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/connpool.h" #include "mongo/client/global_conn_pool.h" #include "mongo/dbtests/mock/mock_conn_registry.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" +#include "mongo/executor/connection_pool_stats.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/assert_that.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/matcher.h" +#include "mongo/unittest/matcher_core.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace { @@ -59,7 +75,6 @@ class ConnectionPoolTest : public unittest::Test { TEST_F(ConnectionPoolTest, ConnectionPoolHistogramStats) { using namespace unittest::match; - RAIIServerParameterControllerForTest controller("featureFlagConnHealthMetrics", true); FailPointEnableBlock fp("injectWaitTimeForConnpoolAcquisition", BSON("sleepTimeMillis" << 60)); const auto host = getServerHostAndPort().toString(); diff --git a/src/mongo/client/sdam/election_id_set_version_pair.h b/src/mongo/client/sdam/election_id_set_version_pair.h index 0325eed418df9..c8a906f94723c 100644 --- a/src/mongo/client/sdam/election_id_set_version_pair.h +++ b/src/mongo/client/sdam/election_id_set_version_pair.h @@ -30,8 +30,12 @@ #include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/oid.h" + namespace mongo::sdam { // Comparable pair or ElectionId (term) and SetVersion. diff --git a/src/mongo/client/sdam/election_id_set_version_pair_test.cpp b/src/mongo/client/sdam/election_id_set_version_pair_test.cpp index dbb6550aa0eee..b7a1da026e39c 100644 --- a/src/mongo/client/sdam/election_id_set_version_pair_test.cpp +++ b/src/mongo/client/sdam/election_id_set_version_pair_test.cpp @@ -28,7 +28,18 @@ */ #include "mongo/client/sdam/election_id_set_version_pair.h" -#include "mongo/unittest/unittest.h" + +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/str.h" namespace mongo::sdam { namespace { diff --git a/src/mongo/client/sdam/json_test_arg_parser.cpp b/src/mongo/client/sdam/json_test_arg_parser.cpp index f5d4c8e39f205..154aff11903ea 100644 --- a/src/mongo/client/sdam/json_test_arg_parser.cpp +++ b/src/mongo/client/sdam/json_test_arg_parser.cpp @@ -29,10 +29,21 @@ #include "mongo/client/sdam/json_test_arg_parser.h" +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/client/sdam/json_test_runner_cli_options_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/option_section.h" #include "mongo/util/options_parser/options_parser.h" +#include "mongo/util/options_parser/value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/client/sdam/json_test_arg_parser.h b/src/mongo/client/sdam/json_test_arg_parser.h index 40d68d5e2db04..0d8f1d9036723 100644 --- a/src/mongo/client/sdam/json_test_arg_parser.h +++ b/src/mongo/client/sdam/json_test_arg_parser.h @@ -27,14 +27,16 @@ * it in the license file. */ -#include -#include -#include - #include #include #include #include +#include +#include // IWYU pragma: keep +#include +#include +#include +#include #include "mongo/client/sdam/json_test_runner_cli_options_gen.h" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/README.rst b/src/mongo/client/sdam/json_tests/sdam_tests/README.rst index f8bd43f8d4d68..3bc0127f0576e 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/README.rst +++ b/src/mongo/client/sdam/json_tests/sdam_tests/README.rst @@ -35,12 +35,12 @@ Each phase object has two keys: A response is a pair of values: - The source, for example "a:27017". - This is the address the client sent the "ismaster" command to. -- An ismaster response, for example `{ok: 1, ismaster: true}`. + This is the address the client sent the "hello" command to. +- A "hello" response, for example `{ok: 1, isWritablePrimary: true}`. If the response includes an electionId it is shown in extended JSON like `{"$oid": "000000000000000000000002"}`. The empty response `{}` indicates a network error - when attempting to call "ismaster". + when attempting to call "hello". In non-monitoring tests, an "outcome" represents the correct TopologyDescription that results from processing the responses in the phases @@ -67,10 +67,10 @@ current TopologyDescription. It has the following keys: - maxWireVersion: absent or an integer. In monitoring tests, an "outcome" contains a list of SDAM events that should -have been published by the client as a result of processing ismaster responses +have been published by the client as a result of processing "hello" responses in the current phase. Any SDAM events published by the client during its construction (that is, prior to processing any of the responses) should be -combined with the events published during processing of ismaster responses +combined with the events published during processing of "hello" responses of the first phase of the test. A test MAY explicitly verify events published during client construction by providing an empty responses array for the first phase. @@ -83,7 +83,7 @@ Mocking ~~~~~~~ Drivers should be able to test their server discovery and monitoring logic -without any network I/O, by parsing ismaster responses from the test file +without any network I/O, by parsing "hello" responses from the test file and passing them into the driver code. Parts of the client and monitoring code may need to be mocked or subclassed to achieve this. `A reference implementation for PyMongo 3.x is available here diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/compatible.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/compatible.json index 0e2c68bf1f765..2ff33116a40a8 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/compatible.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/compatible.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -26,7 +26,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/compatible_unknown.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/compatible_unknown.json index 878c1d8df543c..3ad398be5c403 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/compatible_unknown.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/compatible_unknown.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_arbiters.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_arbiters.json index 98d72b5ccbed5..f7931d6f484a2 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_arbiters.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_arbiters.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_passives.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_passives.json index 5ee3b27478db6..58bc42e6b2fbe 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_passives.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_passives.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -47,7 +47,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "passive": true, "hosts": [ diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_primary.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_primary.json index a8ff093cb8edf..fe8464aad5257 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_primary.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_primary.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_secondary.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_secondary.json index 7210b3845c782..473ae8fd0e873 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_secondary.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/discover_secondary.json @@ -8,7 +8,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/discovery.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/discovery.json index f464134f12ed1..7d8a3777c230b 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/discovery.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/discovery.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ @@ -47,7 +47,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "primary": "d:27017", @@ -91,7 +91,7 @@ "d:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "b:27017", @@ -138,7 +138,7 @@ "c:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/discovery.json.notused b/src/mongo/client/sdam/json_tests/sdam_tests/rs/discovery.json.notused index 57ed568e3b0c3..4a489f68ab5ac 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/discovery.json.notused +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/discovery.json.notused @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ @@ -47,7 +47,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "primary": "d:27017", @@ -91,7 +91,7 @@ "d:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "b:27017", @@ -134,7 +134,7 @@ "c:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/equal_electionids.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/equal_electionids.json index f8d20b350df42..0e8efc46cf703 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/equal_electionids.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/equal_electionids.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -26,7 +26,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/ghost_discovered.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/ghost_discovered.json index bf22cbb0eb5ef..994b69cece6cd 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/ghost_discovered.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/ghost_discovered.json @@ -8,7 +8,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/hosts_differ_from_seeds.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/hosts_differ_from_seeds.json index 4f2351cf05cd9..2d5b577cd670e 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/hosts_differ_from_seeds.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/hosts_differ_from_seeds.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "b:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/incompatible_arbiter.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/incompatible_arbiter.json index 1d59b967c40c4..2aae7c37fe727 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/incompatible_arbiter.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/incompatible_arbiter.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/incompatible_ghost.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/incompatible_ghost.json index 193eea0c7678f..589671a1a75b5 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/incompatible_ghost.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/incompatible_ghost.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/incompatible_other.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/incompatible_other.json index 93ba88e661777..dd485e0113be9 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/incompatible_other.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/incompatible_other.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/ls_timeout.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/ls_timeout.json index 0b0c2c6a885e5..963f8801a546e 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/ls_timeout.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/ls_timeout.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", @@ -57,7 +57,7 @@ "d:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "isreplicaset": true, "setVersion": 1, "electionId": { @@ -98,7 +98,7 @@ "e:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "hosts": [ "a:27017", "b:27017", @@ -144,7 +144,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", @@ -192,7 +192,7 @@ "c:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "setName": "rs", "hidden": true, "logicalSessionTimeoutMinutes": 1, @@ -234,7 +234,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/member_reconfig.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/member_reconfig.json index 9f970baccea4f..f815374842d01 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/member_reconfig.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/member_reconfig.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -45,7 +45,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/member_standalone.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/member_standalone.json index bcd9571934235..cb2eaec426cec 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/member_standalone.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/member_standalone.json @@ -8,7 +8,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setVersion": 1, "electionId": { "$oid": "000000000000000000000001" @@ -40,7 +40,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary.json index d22450decc78b..600dba83d3e23 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -45,7 +45,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary_new_electionid.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary_new_electionid.json index 67f314b1edc50..6c88dc2399f35 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary_new_electionid.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary_new_electionid.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -54,7 +54,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -100,7 +100,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary_new_setversion.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary_new_setversion.json index c1ec50c845cd2..cebdf9ab4e2d7 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary_new_setversion.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary_new_setversion.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -54,7 +54,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -100,7 +100,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary_wrong_set_name.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary_wrong_set_name.json index 9940b47b6e4bc..399776090755c 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary_wrong_set_name.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/new_primary_wrong_set_name.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -45,7 +45,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/normalize_case.json.disabled b/src/mongo/client/sdam/json_tests/sdam_tests/rs/normalize_case.json.disabled index 4d0b0ae629b08..6cfd75168fc7f 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/normalize_case.json.disabled +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/normalize_case.json.disabled @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "A:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/normalize_case_me.json.disabled b/src/mongo/client/sdam/json_tests/sdam_tests/rs/normalize_case_me.json.disabled index e854e7fb432e2..c89522275c165 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/normalize_case_me.json.disabled +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/normalize_case_me.json.disabled @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "me": "A:27017", "hosts": [ @@ -51,7 +51,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "me": "B:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/null_election_id.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/null_election_id.json index 3d6f76555762f..0f03c0b6c7f64 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/null_election_id.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/null_election_id.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", @@ -52,7 +52,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", @@ -104,7 +104,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", @@ -154,7 +154,7 @@ "c:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_becomes_ghost.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_becomes_ghost.json index 6ff0ecc0b067d..881b6cf07a299 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_becomes_ghost.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_becomes_ghost.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -40,7 +40,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "isreplicaset": true, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_becomes_mongos.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_becomes_mongos.json index 92acf6cca2f24..18b3e3e4427ce 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_becomes_mongos.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_becomes_mongos.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -40,7 +40,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "setVersion": 1, "electionId": { diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_becomes_standalone.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_becomes_standalone.json index 3e7144057ca5e..9bb846dcd6a42 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_becomes_standalone.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_becomes_standalone.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_changes_set_name.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_changes_set_name.json index 366df6e3500ac..247cab35cc47b 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_changes_set_name.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_changes_set_name.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -40,7 +40,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_disconnect.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_disconnect.json index 67c95de6b1476..627518d4c886c 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_disconnect.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_disconnect.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_disconnect_electionid.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_disconnect_electionid.json index 59c8faf180536..a374e3e4fbbda 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_disconnect_electionid.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_disconnect_electionid.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -26,7 +26,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -101,7 +101,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -144,7 +144,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -190,7 +190,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_disconnect_setversion.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_disconnect_setversion.json index beb023e4f414f..f1e2c0097c195 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_disconnect_setversion.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_disconnect_setversion.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -26,7 +26,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -101,7 +101,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -144,7 +144,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -190,7 +190,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_hint_from_secondary_with_mismatched_me.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_hint_from_secondary_with_mismatched_me.json index bad86c8175da9..d516af6f715ed 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_hint_from_secondary_with_mismatched_me.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_hint_from_secondary_with_mismatched_me.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "me": "c:27017", "hosts": [ @@ -39,7 +39,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "me": "b:27017", "hosts": [ "b:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_mismatched_me.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_mismatched_me.json index 381edc7e4b1c4..5259d2398e65d 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_mismatched_me.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_mismatched_me.json @@ -26,7 +26,7 @@ "a:27017", "b:27017" ], - "ismaster": true, + "isWritablePrimary": true, "ok": 1, "setName": "rs", "setVersion": 1, diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_reports_new_member.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_reports_new_member.json index 4595bb93a78ce..65eedff59bdec 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_reports_new_member.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_reports_new_member.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ @@ -42,7 +42,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -79,7 +79,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -121,7 +121,7 @@ "c:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "primary": "b:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_to_no_primary_mismatched_me.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_to_no_primary_mismatched_me.json index ade818740ac01..e28a7211bb411 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_to_no_primary_mismatched_me.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_to_no_primary_mismatched_me.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -46,7 +46,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "c:27017", "d:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_wrong_set_name.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_wrong_set_name.json index c656ebf35d6fc..2bb9523b9f566 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_wrong_set_name.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/primary_wrong_set_name.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/response_from_removed.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/response_from_removed.json index 36e5d1e44f25c..e0cb697d51917 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/response_from_removed.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/response_from_removed.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017" @@ -40,7 +40,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/rsother_discovered.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/rsother_discovered.json index c575501d80388..2c90cdc717228 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/rsother_discovered.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/rsother_discovered.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hidden": true, "hosts": [ @@ -24,7 +24,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": false, "hosts": [ "c:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/sec_not_auth.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/sec_not_auth.json index 28fc85fc494f2..99c3aa6622e66 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/sec_not_auth.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/sec_not_auth.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -26,7 +26,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_ignore_ok_0.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_ignore_ok_0.json index 4867b07844ece..133aafdc406c2 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_ignore_ok_0.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_ignore_ok_0.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -26,7 +26,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_mismatched_me.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_mismatched_me.json index d2a70f67889ab..7df79c73c0144 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_mismatched_me.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_mismatched_me.json @@ -26,7 +26,7 @@ "a:27017", "b:27017" ], - "ismaster": false, + "isWritablePrimary": false, "ok": 1, "setName": "rs", "minWireVersion": 0, diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_wrong_set_name.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_wrong_set_name.json index 4c132b633e19b..be621c06c831a 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_wrong_set_name.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_wrong_set_name.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_wrong_set_name_with_primary.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_wrong_set_name_with_primary.json index 0bca723e5c3b3..3e7b1f83ed97d 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_wrong_set_name_with_primary.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/secondary_wrong_set_name_with_primary.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -45,7 +45,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/set_version_can_rollback.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/set_version_can_rollback.json index d3fa9acb3c2f4..d90e097de75c3 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/set_version_can_rollback.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/set_version_can_rollback.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -55,7 +55,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -102,7 +102,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/setversion_without_electionid.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/setversion_without_electionid.json index 07ec55cee1ca3..c5c8107d0afa2 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/setversion_without_electionid.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/setversion_without_electionid.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -46,7 +46,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/stepdown_change_set_name.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/stepdown_change_set_name.json index fcf35ce50418f..ca6b0e77b4547 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/stepdown_change_set_name.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/stepdown_change_set_name.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017" ], @@ -40,7 +40,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/too_new.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/too_new.json index 299b6fa9aabf1..f2b65b843be59 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/too_new.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/too_new.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -26,7 +26,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/too_old.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/too_old.json index e1ad154ba6911..0056594f9432a 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/too_old.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/too_old.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "setName": "rs", "hosts": [ "a:27017", @@ -26,7 +26,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "setName": "rs", "hosts": [ diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/unexpected_mongos.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/unexpected_mongos.json index b9440de58053d..b89547959d664 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/unexpected_mongos.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/unexpected_mongos.json @@ -8,7 +8,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "setVersion": 1, "electionId": { diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/use_setversion_without_electionid.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/use_setversion_without_electionid.json index 365af2f7e198b..63635c9c419d3 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/use_setversion_without_electionid.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/use_setversion_without_electionid.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -54,7 +54,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" @@ -97,7 +97,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/rs/wrong_set_name.json b/src/mongo/client/sdam/json_tests/sdam_tests/rs/wrong_set_name.json index 45be2f502b201..cc4e3e963ce35 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/rs/wrong_set_name.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/rs/wrong_set_name.json @@ -8,7 +8,7 @@ "b:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "b:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/compatible.json b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/compatible.json index 3dae1f7ea1e39..3f6df3e09e55e 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/compatible.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/compatible.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 1000 @@ -18,7 +18,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/ls_timeout_mongos.json b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/ls_timeout_mongos.json index 96f8dec17ac61..7a46adee5173f 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/ls_timeout_mongos.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/ls_timeout_mongos.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, @@ -19,7 +19,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 2, "minWireVersion": 0, @@ -49,7 +49,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "logicalSessionTimeoutMinutes": 1, "minWireVersion": 0, @@ -60,7 +60,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/mongos_disconnect.json b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/mongos_disconnect.json index 04015694a8d06..f0f98648a78a4 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/mongos_disconnect.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/mongos_disconnect.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 @@ -18,7 +18,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 @@ -70,7 +70,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/multiple_mongoses.json b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/multiple_mongoses.json index 6e60fd05c7d92..753983609941e 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/multiple_mongoses.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/multiple_mongoses.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 @@ -18,7 +18,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/non_mongos_removed.json b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/non_mongos_removed.json index 7bf039d9bc892..553c8debf749c 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/non_mongos_removed.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/non_mongos_removed.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 @@ -18,7 +18,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "b:27017" ], diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/too_new.json b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/too_new.json index 9521e11789918..b4f9f1495167b 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/too_new.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/too_new.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 999, "maxWireVersion": 1000 @@ -18,7 +18,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid" } ] diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/too_old.json b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/too_old.json index 6bd187f61dba6..41ffed925d2f0 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/sharded/too_old.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/sharded/too_old.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 2, "maxWireVersion": 6 @@ -18,7 +18,7 @@ "b:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid" } ] diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/single/compatible.json b/src/mongo/client/sdam/json_tests/sdam_tests/single/compatible.json index ee6b847ade773..9c91ae1db1b6c 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/single/compatible.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/single/compatible.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_external_ip.json b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_external_ip.json index 44581501862f2..f0b2cfe0a484d 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_external_ip.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_external_ip.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "b:27017" ], diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_mongos.json b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_mongos.json index a7fa0794901e8..d1ec4ad53b5af 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_mongos.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_mongos.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "msg": "isdbgrid", "minWireVersion": 0, "maxWireVersion": 6 diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_rsarbiter.json b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_rsarbiter.json index 3ef374d6f1e64..461a032ac0cbe 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_rsarbiter.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_rsarbiter.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "arbiterOnly": true, "hosts": [ "a:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_rsprimary.json b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_rsprimary.json index bd5aaf7f044fc..10d0b5452aa0c 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_rsprimary.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_rsprimary.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "hosts": [ "a:27017", "b:27017" diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_rssecondary.json b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_rssecondary.json index 3b4f3c8c5adb9..48595ac4d73df 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_rssecondary.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_rssecondary.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "secondary": true, "hosts": [ "a:27017", diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_slave.json b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_slave.json index a40debd1838ab..37bfa86fcf949 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_slave.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_slave.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": false, + "isWritablePrimary": false, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_standalone.json b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_standalone.json index 2ecff9b9ae2a1..373d4ebc9a979 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_standalone.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/single/direct_connection_standalone.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/single/ls_timeout_standalone.json b/src/mongo/client/sdam/json_tests/sdam_tests/single/ls_timeout_standalone.json index ae6c8ba11beaa..e48f6151f982c 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/single/ls_timeout_standalone.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/single/ls_timeout_standalone.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "logicalSessionTimeoutMinutes": 7, "minWireVersion": 0, "maxWireVersion": 6 diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/single/not_ok_response.json b/src/mongo/client/sdam/json_tests/sdam_tests/single/not_ok_response.json index 06f71305dc014..c27ed1d9c4aef 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/single/not_ok_response.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/single/not_ok_response.json @@ -1,5 +1,5 @@ { - "description": "Handle a not-ok ismaster response", + "description": "Handle a not-ok isWritablePrimary response", "uri": "mongodb://a", "phases": [ { @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } @@ -17,7 +17,7 @@ "a:27017", { "ok": 0, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/single/standalone_removed.json b/src/mongo/client/sdam/json_tests/sdam_tests/single/standalone_removed.json index be1a73d30b868..de6ffaa84dc02 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/single/standalone_removed.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/single/standalone_removed.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 0, "maxWireVersion": 6 } diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/single/too_new.json b/src/mongo/client/sdam/json_tests/sdam_tests/single/too_new.json index 38e4621d60fd9..5320c4a261354 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/single/too_new.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/single/too_new.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true, + "isWritablePrimary": true, "minWireVersion": 999, "maxWireVersion": 1000 } diff --git a/src/mongo/client/sdam/json_tests/sdam_tests/single/too_old.json b/src/mongo/client/sdam/json_tests/sdam_tests/single/too_old.json index fbf68262c02f2..55ef82acb7199 100644 --- a/src/mongo/client/sdam/json_tests/sdam_tests/single/too_old.json +++ b/src/mongo/client/sdam/json_tests/sdam_tests/single/too_old.json @@ -8,7 +8,7 @@ "a:27017", { "ok": 1, - "ismaster": true + "isWritablePrimary": true } ] ], diff --git a/src/mongo/client/sdam/mock_topology_manager.cpp b/src/mongo/client/sdam/mock_topology_manager.cpp index e13f175d669be..a41f55ee9d206 100644 --- a/src/mongo/client/sdam/mock_topology_manager.cpp +++ b/src/mongo/client/sdam/mock_topology_manager.cpp @@ -30,11 +30,11 @@ #include "mongo/client/sdam/mock_topology_manager.h" -#include +#include +#include -#include "mongo/client/sdam/topology_state_machine.h" -#include "mongo/logv2/log.h" -#include "mongo/rpc/topology_version_gen.h" +#include "mongo/client/sdam/topology_description.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/client/sdam/mock_topology_manager.h b/src/mongo/client/sdam/mock_topology_manager.h index c12afc758f2c7..f12072c087178 100644 --- a/src/mongo/client/sdam/mock_topology_manager.h +++ b/src/mongo/client/sdam/mock_topology_manager.h @@ -28,9 +28,15 @@ */ #pragma once +#include #include +#include +#include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/client/sdam/topology_manager.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" namespace mongo::sdam { diff --git a/src/mongo/client/sdam/sdam_configuration.cpp b/src/mongo/client/sdam/sdam_configuration.cpp index b42616aef1fc0..e83d8d275d4f3 100644 --- a/src/mongo/client/sdam/sdam_configuration.cpp +++ b/src/mongo/client/sdam/sdam_configuration.cpp @@ -28,6 +28,16 @@ */ #include "sdam_configuration.h" +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/util/assert_util.h" + namespace mongo::sdam { SdamConfiguration::SdamConfiguration(boost::optional> seedList, TopologyType initialType, diff --git a/src/mongo/client/sdam/sdam_configuration.h b/src/mongo/client/sdam/sdam_configuration.h index f1b1122936c4f..9601f98ae56ca 100644 --- a/src/mongo/client/sdam/sdam_configuration.h +++ b/src/mongo/client/sdam/sdam_configuration.h @@ -28,9 +28,20 @@ */ #pragma once +#include +#include + +#include +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/client/sdam/sdam_configuration_parameters_gen.h" #include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/db/server_options.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" + namespace mongo::sdam { class SdamConfiguration { public: @@ -90,7 +101,7 @@ class SdamConfiguration { const boost::optional& getSetName() const; /** - * The frequency at which we measure RTT and IsMaster responses. + * The frequency at which we measure RTT and "hello" responses. */ Milliseconds getHeartBeatFrequency() const; diff --git a/src/mongo/client/sdam/sdam_configuration_parameters.idl b/src/mongo/client/sdam/sdam_configuration_parameters.idl index f36a6d9144b14..851cfa307d357 100644 --- a/src/mongo/client/sdam/sdam_configuration_parameters.idl +++ b/src/mongo/client/sdam/sdam_configuration_parameters.idl @@ -32,7 +32,9 @@ global: server_parameters: heartBeatFrequencyMs: - description: For the 'sdam' replicaSetMonitorProtocol, determines how long to wait between isMaster requests. For the 'streamable' replicaSetMonitorProtocol, duration between rtt measurements. + description: "For the 'sdam' replicaSetMonitorProtocol, determines how long to wait between + 'hello' requests. For the 'streamable' replicaSetMonitorProtocol, duration between rtt + measurements." set_at: startup cpp_vartype: int cpp_varname: sdamHeartBeatFrequencyMs diff --git a/src/mongo/client/sdam/sdam_datatypes.cpp b/src/mongo/client/sdam/sdam_datatypes.cpp index 3efea4140cb74..b10012f6060aa 100644 --- a/src/mongo/client/sdam/sdam_datatypes.cpp +++ b/src/mongo/client/sdam/sdam_datatypes.cpp @@ -29,6 +29,15 @@ #include "mongo/client/sdam/sdam_datatypes.h" +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/util/assert_util.h" + namespace mongo::sdam { std::string toString(const ServerType serverType) { switch (serverType) { diff --git a/src/mongo/client/sdam/sdam_datatypes.h b/src/mongo/client/sdam/sdam_datatypes.h index b70bcb2e8ea74..07665db62bb77 100644 --- a/src/mongo/client/sdam/sdam_datatypes.h +++ b/src/mongo/client/sdam/sdam_datatypes.h @@ -29,11 +29,22 @@ #pragma once +#include +#include #include +#include #include +#include +#include #include +#include +#include +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/topology_version_gen.h" #include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" diff --git a/src/mongo/client/sdam/sdam_json_test_runner.cpp b/src/mongo/client/sdam/sdam_json_test_runner.cpp index 5cd4a4ddafabb..b10f7a2597103 100644 --- a/src/mongo/client/sdam/sdam_json_test_runner.cpp +++ b/src/mongo/client/sdam/sdam_json_test_runner.cpp @@ -27,28 +27,54 @@ * it in the license file. */ -#include +#include +#include +#include +#include // IWYU pragma: keep #include #include - -#include -#include -#include -#include -#include - +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" +#include "mongo/bson/oid.h" #include "mongo/client/mongo_uri.h" +#include "mongo/client/sdam/election_id_set_version_pair.h" #include "mongo/client/sdam/json_test_arg_parser.h" +#include "mongo/client/sdam/sdam_configuration.h" #include "mongo/client/sdam/sdam_configuration_parameters_gen.h" +#include "mongo/client/sdam/sdam_datatypes.h" +#include "mongo/client/sdam/server_description.h" +#include "mongo/client/sdam/topology_description.h" #include "mongo/client/sdam/topology_manager.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_component_settings.h" +#include "mongo/logv2/log_manager.h" +#include "mongo/logv2/log_severity.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_options.h" #include "mongo/util/optional_util.h" -#include "mongo/util/options_parser/environment.h" -#include "mongo/util/options_parser/option_section.h" -#include "mongo/util/options_parser/options_parser.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -85,13 +111,13 @@ class TestCasePhase { for (auto& response : bsonResponses) { const auto pair = response.Array(); const auto address = HostAndPort(pair[0].String()); - const auto bsonIsMaster = pair[1].Obj(); + const auto bsonHello = pair[1].Obj(); - if (bsonIsMaster.nFields() == 0) { - _isMasterResponses.push_back(HelloOutcome(address, BSONObj(), "network error")); + if (bsonHello.nFields() == 0) { + _helloResponses.push_back(HelloOutcome(address, BSONObj(), "network error")); } else { - _isMasterResponses.push_back( - HelloOutcome(address, bsonIsMaster, duration_cast(kLatency))); + _helloResponses.push_back( + HelloOutcome(address, bsonHello, duration_cast(kLatency))); } } _topologyOutcome = phase["outcome"].Obj(); @@ -112,7 +138,7 @@ class TestCasePhase { PhaseResult execute(TopologyManager& topology) const { PhaseResult testResult{{}, _phaseNum}; - for (const auto& response : _isMasterResponses) { + for (const auto& response : _helloResponses) { auto descriptionStr = (response.getResponse()) ? response.getResponse()->toString() : "[ Network Error ]"; LOGV2(20202, @@ -429,7 +455,7 @@ class TestCasePhase { MongoURI _testUri; int _phaseNum; - std::vector _isMasterResponses; + std::vector _helloResponses; BSONObj _topologyOutcome; }; diff --git a/src/mongo/client/sdam/server_description.cpp b/src/mongo/client/sdam/server_description.cpp index 5766db050b419..2db82a4665e26 100644 --- a/src/mongo/client/sdam/server_description.cpp +++ b/src/mongo/client/sdam/server_description.cpp @@ -30,14 +30,27 @@ #include "mongo/client/sdam/server_description.h" #include -#include #include +#include +#include #include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/oid.h" #include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/duration.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork @@ -210,7 +223,7 @@ void ServerDescription::parseTypeFromHelloReply(const BSONObj helloReply) { t = ServerType::kMongos; } else if (hasSetName && helloReply.getBoolField("hidden")) { t = ServerType::kRSOther; - } else if (hasSetName && helloReply.getBoolField("ismaster")) { + } else if (hasSetName && helloReply.getBoolField("isWritablePrimary")) { t = ServerType::kRSPrimary; } else if (hasSetName && helloReply.getBoolField("secondary")) { t = ServerType::kRSSecondary; diff --git a/src/mongo/client/sdam/server_description.h b/src/mongo/client/sdam/server_description.h index 0be4cfa426c93..ef8e12dd9d48a 100644 --- a/src/mongo/client/sdam/server_description.h +++ b/src/mongo/client/sdam/server_description.h @@ -28,13 +28,22 @@ */ #pragma once +#include #include +#include +#include #include +#include #include +#include #include #include +#include #include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/oid.h" #include "mongo/client/sdam/election_id_set_version_pair.h" #include "mongo/client/sdam/sdam_datatypes.h" @@ -42,6 +51,8 @@ #include "mongo/platform/basic.h" #include "mongo/rpc/topology_version_gen.h" #include "mongo/util/clock_source.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo::sdam { class ServerDescription { diff --git a/src/mongo/client/sdam/server_description_builder.cpp b/src/mongo/client/sdam/server_description_builder.cpp index cfd65eed60c26..ea8197e4db53a 100644 --- a/src/mongo/client/sdam/server_description_builder.cpp +++ b/src/mongo/client/sdam/server_description_builder.cpp @@ -28,6 +28,14 @@ */ #include "mongo/client/sdam/server_description_builder.h" +#include +#include +#include +#include +#include + +#include + namespace mongo::sdam { ServerDescriptionPtr ServerDescriptionBuilder::instance() const { return _instance; diff --git a/src/mongo/client/sdam/server_description_builder.h b/src/mongo/client/sdam/server_description_builder.h index 916017b68cab3..07819c2f24f56 100644 --- a/src/mongo/client/sdam/server_description_builder.h +++ b/src/mongo/client/sdam/server_description_builder.h @@ -28,8 +28,17 @@ */ #pragma once #include +#include +#include + +#include "mongo/bson/oid.h" +#include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/client/sdam/server_description.h" +#include "mongo/db/repl/optime.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo::sdam { diff --git a/src/mongo/client/sdam/server_description_test.cpp b/src/mongo/client/sdam/server_description_test.cpp index 07ded9ddb8405..53c7a9e1af400 100644 --- a/src/mongo/client/sdam/server_description_test.cpp +++ b/src/mongo/client/sdam/server_description_test.cpp @@ -26,18 +26,31 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/client/sdam/sdam_test_base.h" - -#include -#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/sdam/sdam_test_base.h" #include "mongo/client/sdam/server_description.h" #include "mongo/client/sdam/server_description_builder.h" -#include "mongo/db/jsobj.h" #include "mongo/db/repl/optime.h" +#include "mongo/idl/idl_parser.h" #include "mongo/platform/random.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" #include "mongo/util/str.h" #include "mongo/util/system_clock_source.h" @@ -240,7 +253,7 @@ class ServerDescriptionTestFixture : public SdamTestFixture { static inline const auto kBsonMissingOk = BSONObjBuilder().obj(); static inline const auto kBsonMongos = okBuilder().append("msg", "isdbgrid").obj(); static inline const auto kBsonRsPrimary = - okBuilder().append("ismaster", true).append("setName", "foo").obj(); + okBuilder().append("isWritablePrimary", true).append("setName", "foo").obj(); static inline const auto kBsonRsSecondary = okBuilder().append("secondary", true).append("setName", "foo").obj(); static inline const auto kBsonRsArbiter = @@ -288,7 +301,7 @@ class ServerDescriptionTestFixture : public SdamTestFixture { okBuilder().append("topologyVersion", TopologyVersion(OID::max(), 0).toBSON()).obj(); }; -TEST_F(ServerDescriptionTestFixture, ShouldParseTypeAsUnknownForIsMasterError) { +TEST_F(ServerDescriptionTestFixture, ShouldParseTypeAsUnknownForHelloError) { auto response = HelloOutcome(HostAndPort("foo:1234"), kTopologyVersion, "an error occurred"); auto description = ServerDescription(clockSource, response); ASSERT_EQUALS(ServerType::kUnknown, description.getType()); @@ -315,7 +328,7 @@ TEST_F(ServerDescriptionTestFixture, ShouldParseTypeAsMongos) { } TEST_F(ServerDescriptionTestFixture, ShouldParseTypeAsRSPrimary) { - // "ismaster: true", "setName" in response + // "isWritablePrimary: true", "setName" in response auto response = HelloOutcome(HostAndPort("foo:1234"), kBsonRsPrimary, HelloRTT::min()); auto description = ServerDescription(clockSource, response); ASSERT_EQUALS(ServerType::kRSPrimary, description.getType()); diff --git a/src/mongo/client/sdam/server_selection_json_test_runner.cpp b/src/mongo/client/sdam/server_selection_json_test_runner.cpp index 484677efadb0e..d70eaa4c408e7 100644 --- a/src/mongo/client/sdam/server_selection_json_test_runner.cpp +++ b/src/mongo/client/sdam/server_selection_json_test_runner.cpp @@ -27,29 +27,59 @@ * it in the license file. */ -#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include // IWYU pragma: keep #include +#include #include - -#include -#include -#include -#include - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/client/read_preference.h" #include "mongo/client/sdam/json_test_arg_parser.h" +#include "mongo/client/sdam/sdam_configuration.h" #include "mongo/client/sdam/sdam_configuration_parameters_gen.h" +#include "mongo/client/sdam/sdam_datatypes.h" +#include "mongo/client/sdam/server_description.h" #include "mongo/client/sdam/server_description_builder.h" #include "mongo/client/sdam/server_selector.h" -#include "mongo/client/sdam/topology_manager.h" +#include "mongo/client/sdam/topology_description.h" +#include "mongo/db/server_options.h" #include "mongo/logv2/log.h" -#include "mongo/stdx/unordered_set.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_component_settings.h" +#include "mongo/logv2/log_manager.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/util/assert_util.h" #include "mongo/util/clock_source_mock.h" #include "mongo/util/ctype.h" -#include "mongo/util/optional_util.h" -#include "mongo/util/options_parser/environment.h" -#include "mongo/util/options_parser/option_section.h" -#include "mongo/util/options_parser/options_parser.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/options_parser/value.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -132,7 +162,7 @@ class JsonRttTestCase : public JsonTestCase { HelloOutcome(HostAndPort("dummy"), BSON("ok" << 1 << "setname" << "replSet" - << "ismaster" << true), + << "isWritablePrimary" << true), HelloRTT(Milliseconds(_newRtt))))); } diff --git a/src/mongo/client/sdam/server_selector.cpp b/src/mongo/client/sdam/server_selector.cpp index f09761a91106e..d30456be4927f 100644 --- a/src/mongo/client/sdam/server_selector.cpp +++ b/src/mongo/client/sdam/server_selector.cpp @@ -28,12 +28,33 @@ */ #include "server_selector.h" +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/sdam/sdam_configuration_parameters_gen.h" #include "mongo/client/sdam/topology_description.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/wire_version.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/platform/random.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/client/sdam/server_selector.h b/src/mongo/client/sdam/server_selector.h index 3d6877fcf156b..72d7f78d1c32d 100644 --- a/src/mongo/client/sdam/server_selector.h +++ b/src/mongo/client/sdam/server_selector.h @@ -27,15 +27,27 @@ * it in the license file. */ #pragma once +#include +#include #include +#include #include +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/client/read_preference.h" #include "mongo/client/sdam/sdam_configuration.h" #include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/client/sdam/server_description.h" #include "mongo/client/sdam/topology_description.h" #include "mongo/platform/random.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/functional.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo::sdam { /** diff --git a/src/mongo/client/sdam/server_selector_test.cpp b/src/mongo/client/sdam/server_selector_test.cpp index 22e7d2f534601..352dadea88566 100644 --- a/src/mongo/client/sdam/server_selector_test.cpp +++ b/src/mongo/client/sdam/server_selector_test.cpp @@ -28,11 +28,30 @@ */ #include "mongo/client/sdam/server_selector.h" +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/sdam/sdam_test_base.h" #include "mongo/client/sdam/server_description_builder.h" #include "mongo/client/sdam/topology_description.h" -#include "mongo/client/sdam/topology_manager.h" +#include "mongo/client/sdam/topology_state_machine.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/wire_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/system_clock_source.h" namespace mongo::sdam { diff --git a/src/mongo/client/sdam/topology_description.cpp b/src/mongo/client/sdam/topology_description.cpp index 6c4bc2dfea148..9691a39b292f0 100644 --- a/src/mongo/client/sdam/topology_description.cpp +++ b/src/mongo/client/sdam/topology_description.cpp @@ -29,15 +29,28 @@ #include "mongo/client/sdam/topology_description.h" +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include -#include +#include #include #include +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/client/sdam/server_description.h" #include "mongo/db/wire_version.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/client/sdam/topology_description.h b/src/mongo/client/sdam/topology_description.h index 41a39f8849286..8a99be9683b56 100644 --- a/src/mongo/client/sdam/topology_description.h +++ b/src/mongo/client/sdam/topology_description.h @@ -29,11 +29,17 @@ #pragma once +#include +#include #include +#include #include #include +#include #include +#include +#include "mongo/bson/bsonobj.h" #include "mongo/bson/oid.h" #include "mongo/client/read_preference.h" #include "mongo/client/sdam/election_id_set_version_pair.h" @@ -41,6 +47,9 @@ #include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/client/sdam/server_description.h" #include "mongo/platform/basic.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo::sdam { class TopologyDescription : public std::enable_shared_from_this { diff --git a/src/mongo/client/sdam/topology_description_builder.cpp b/src/mongo/client/sdam/topology_description_builder.cpp index 6afc3d8126bf4..c28caac935aca 100644 --- a/src/mongo/client/sdam/topology_description_builder.cpp +++ b/src/mongo/client/sdam/topology_description_builder.cpp @@ -29,6 +29,11 @@ #include "mongo/client/sdam/topology_description_builder.h" +#include +#include + +#include "mongo/client/sdam/election_id_set_version_pair.h" + namespace mongo::sdam { diff --git a/src/mongo/client/sdam/topology_description_builder.h b/src/mongo/client/sdam/topology_description_builder.h index 7c77bed86db22..30c74c0782ab8 100644 --- a/src/mongo/client/sdam/topology_description_builder.h +++ b/src/mongo/client/sdam/topology_description_builder.h @@ -27,7 +27,14 @@ * it in the license file. */ #pragma once +#include +#include +#include + +#include "mongo/bson/oid.h" +#include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/client/sdam/topology_description.h" +#include "mongo/util/uuid.h" namespace mongo::sdam { diff --git a/src/mongo/client/sdam/topology_description_test.cpp b/src/mongo/client/sdam/topology_description_test.cpp index a401b87061202..9f93ad633b31f 100644 --- a/src/mongo/client/sdam/topology_description_test.cpp +++ b/src/mongo/client/sdam/topology_description_test.cpp @@ -28,14 +28,28 @@ */ +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" #include "mongo/client/sdam/sdam_test_base.h" -#include "mongo/client/sdam/topology_description.h" - #include "mongo/client/sdam/server_description.h" #include "mongo/client/sdam/server_description_builder.h" +#include "mongo/client/sdam/topology_description.h" #include "mongo/db/wire_version.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/death_test.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/client/sdam/topology_listener.cpp b/src/mongo/client/sdam/topology_listener.cpp index 215b64d2871c2..5517c4b2c0a4f 100644 --- a/src/mongo/client/sdam/topology_listener.cpp +++ b/src/mongo/client/sdam/topology_listener.cpp @@ -28,7 +28,15 @@ */ #include "mongo/client/sdam/topology_listener.h" + +#include +#include +#include +#include + #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/client/sdam/topology_listener.h b/src/mongo/client/sdam/topology_listener.h index 304009aacc3ff..4fe73a1f4b4f9 100644 --- a/src/mongo/client/sdam/topology_listener.h +++ b/src/mongo/client/sdam/topology_listener.h @@ -31,8 +31,15 @@ #include #include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/net/hostandport.h" namespace mongo::sdam { diff --git a/src/mongo/client/sdam/topology_listener_mock.cpp b/src/mongo/client/sdam/topology_listener_mock.cpp index fb306d9158d99..0f573b2459476 100644 --- a/src/mongo/client/sdam/topology_listener_mock.cpp +++ b/src/mongo/client/sdam/topology_listener_mock.cpp @@ -26,20 +26,24 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include #include "mongo/client/sdam/topology_listener_mock.h" +#include "mongo/util/assert_util_core.h" namespace mongo::sdam { void TopologyListenerMock::onServerHeartbeatSucceededEvent(const HostAndPort& hostAndPort, const BSONObj reply) { stdx::lock_guard lk(_mutex); - auto it = _serverIsMasterReplies.find(hostAndPort); - if (it != _serverIsMasterReplies.end()) { + auto it = _serverHelloReplies.find(hostAndPort); + if (it != _serverHelloReplies.end()) { it->second.emplace_back(Status::OK()); } else { - _serverIsMasterReplies.emplace(hostAndPort, std::vector{Status::OK()}); + _serverHelloReplies.emplace(hostAndPort, std::vector{Status::OK()}); } } @@ -49,30 +53,30 @@ void TopologyListenerMock::onServerHeartbeatFailureEvent(Status errorStatus, stdx::lock_guard lk(_mutex); // If the map already contains an element for hostAndPort, append to its already existing // vector. Otherwise, create a new vector. - auto it = _serverIsMasterReplies.find(hostAndPort); - if (it != _serverIsMasterReplies.end()) { + auto it = _serverHelloReplies.find(hostAndPort); + if (it != _serverHelloReplies.end()) { it->second.emplace_back(errorStatus); } else { - _serverIsMasterReplies.emplace(hostAndPort, std::vector{errorStatus}); + _serverHelloReplies.emplace(hostAndPort, std::vector{errorStatus}); } } -bool TopologyListenerMock::hasIsMasterResponse(const HostAndPort& hostAndPort) { +bool TopologyListenerMock::hasHelloResponse(const HostAndPort& hostAndPort) { stdx::lock_guard lock(_mutex); - return _hasIsMasterResponse(lock, hostAndPort); + return _hasHelloResponse(lock, hostAndPort); } -bool TopologyListenerMock::_hasIsMasterResponse(WithLock, const HostAndPort& hostAndPort) { - return _serverIsMasterReplies.find(hostAndPort) != _serverIsMasterReplies.end(); +bool TopologyListenerMock::_hasHelloResponse(WithLock, const HostAndPort& hostAndPort) { + return _serverHelloReplies.find(hostAndPort) != _serverHelloReplies.end(); } -std::vector TopologyListenerMock::getIsMasterResponse(const HostAndPort& hostAndPort) { +std::vector TopologyListenerMock::getHelloResponse(const HostAndPort& hostAndPort) { stdx::lock_guard lock(_mutex); - invariant(_hasIsMasterResponse(lock, hostAndPort)); - auto it = _serverIsMasterReplies.find(hostAndPort); - auto statusWithIsMasterResponse = it->second; - _serverIsMasterReplies.erase(it); - return statusWithIsMasterResponse; + invariant(_hasHelloResponse(lock, hostAndPort)); + auto it = _serverHelloReplies.find(hostAndPort); + auto statusWithHelloResponse = it->second; + _serverHelloReplies.erase(it); + return statusWithHelloResponse; } void TopologyListenerMock::onServerPingSucceededEvent(HelloRTT latency, diff --git a/src/mongo/client/sdam/topology_listener_mock.h b/src/mongo/client/sdam/topology_listener_mock.h index ad8db6f46dae8..b07ccd377af7c 100644 --- a/src/mongo/client/sdam/topology_listener_mock.h +++ b/src/mongo/client/sdam/topology_listener_mock.h @@ -29,8 +29,17 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/client/sdam/topology_listener.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/uuid.h" namespace mongo::sdam { @@ -47,15 +56,15 @@ class TopologyListenerMock : public TopologyListener { BSONObj reply) override; /** - * Returns true if _serverIsMasterReplies contains an element corresponding to hostAndPort. + * Returns true if _serverHelloReplies contains an element corresponding to hostAndPort. */ - bool hasIsMasterResponse(const HostAndPort& hostAndPort); - bool _hasIsMasterResponse(WithLock, const HostAndPort& hostAndPort); + bool hasHelloResponse(const HostAndPort& hostAndPort); + bool _hasHelloResponse(WithLock, const HostAndPort& hostAndPort); /** * Returns the responses for the most recent onServerHeartbeat events. */ - std::vector getIsMasterResponse(const HostAndPort& hostAndPort); + std::vector getHelloResponse(const HostAndPort& hostAndPort); void onServerPingSucceededEvent(HelloRTT latency, const HostAndPort& hostAndPort) override; @@ -74,7 +83,7 @@ class TopologyListenerMock : public TopologyListener { private: Mutex _mutex; - stdx::unordered_map> _serverIsMasterReplies; + stdx::unordered_map> _serverHelloReplies; stdx::unordered_map>> _serverPingRTTs; }; diff --git a/src/mongo/client/sdam/topology_listener_test.cpp b/src/mongo/client/sdam/topology_listener_test.cpp index e763919272976..5a611ba59af49 100644 --- a/src/mongo/client/sdam/topology_listener_test.cpp +++ b/src/mongo/client/sdam/topology_listener_test.cpp @@ -27,15 +27,23 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/client/replica_set_monitor.h" #include "mongo/client/replica_set_monitor_protocol_test_util.h" +#include "mongo/client/replica_set_monitor_server_parameters.h" #include "mongo/client/sdam/topology_listener.h" #include "mongo/client/sdam/topology_listener_mock.h" -#include "mongo/executor/thread_pool_mock.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/network_interface_mock.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/client/sdam/topology_manager.cpp b/src/mongo/client/sdam/topology_manager.cpp index cd845d68d1476..dee6811961ab4 100644 --- a/src/mongo/client/sdam/topology_manager.cpp +++ b/src/mongo/client/sdam/topology_manager.cpp @@ -29,10 +29,20 @@ #include "mongo/client/sdam/topology_manager.h" -#include +#include +#include +#include +#include +#include + +#include "mongo/bson/oid.h" +#include "mongo/client/sdam/server_description.h" +#include "mongo/client/sdam/topology_description.h" #include "mongo/client/sdam/topology_state_machine.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/topology_version_gen.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/client/sdam/topology_manager.h b/src/mongo/client/sdam/topology_manager.h index 6f70e3270082a..b4dfd92a01a41 100644 --- a/src/mongo/client/sdam/topology_manager.h +++ b/src/mongo/client/sdam/topology_manager.h @@ -27,12 +27,19 @@ * it in the license file. */ #pragma once +#include #include +#include +#include "mongo/client/sdam/sdam_configuration.h" #include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/client/sdam/topology_description.h" #include "mongo/client/sdam/topology_listener.h" #include "mongo/client/sdam/topology_state_machine.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" namespace mongo::sdam { diff --git a/src/mongo/client/sdam/topology_manager_test.cpp b/src/mongo/client/sdam/topology_manager_test.cpp index e084e6072e5a9..a6e7b1301fc48 100644 --- a/src/mongo/client/sdam/topology_manager_test.cpp +++ b/src/mongo/client/sdam/topology_manager_test.cpp @@ -28,8 +28,28 @@ */ #include "mongo/client/sdam/topology_manager.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/client/sdam/sdam_test_base.h" -#include "mongo/unittest/death_test.h" +#include "mongo/client/sdam/server_description.h" +#include "mongo/client/sdam/topology_description.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" #include "mongo/util/system_clock_source.h" namespace mongo { @@ -58,7 +78,7 @@ class TopologyManagerTestFixture : public SdamTestFixture { static inline const auto kBsonTopologyVersionHigh = okBuilder().append("topologyVersion", TopologyVersion(OID::max(), 1).toBSON()).obj(); static inline const auto kBsonRsPrimary = okBuilder() - .append("ismaster", true) + .append("isWritablePrimary", true) .append("setName", kSetName) .append("minWireVersion", 2) .append("maxWireVersion", 10) diff --git a/src/mongo/client/sdam/topology_state_machine.cpp b/src/mongo/client/sdam/topology_state_machine.cpp index 6cdff657f826c..025b7724cceb6 100644 --- a/src/mongo/client/sdam/topology_state_machine.cpp +++ b/src/mongo/client/sdam/topology_state_machine.cpp @@ -29,12 +29,19 @@ #include "mongo/client/sdam/topology_state_machine.h" -#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include #include "mongo/client/sdam/election_id_set_version_pair.h" -#include "mongo/client/sdam/sdam_test_base.h" +#include "mongo/client/sdam/server_description.h" #include "mongo/logv2/log.h" -#include "mongo/util/fail_point.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork @@ -61,7 +68,7 @@ inline int idx(T enumType) { */ void mongo::sdam::TopologyStateMachine::initTransitionTable() { auto bindThis = [&](auto&& pmf) { - return [=](auto&&... a) { + return [=, this](auto&&... a) { (this->*pmf)(a...); }; }; @@ -160,14 +167,11 @@ void TopologyStateMachine::onServerDescription(TopologyDescription& topologyDesc const ServerDescriptionPtr& serverDescription) { if (!topologyDescription.containsServerAddress(serverDescription->getAddress())) { const auto& setName = topologyDescription.getSetName(); - LOGV2_DEBUG( - 20219, - kLogLevel, - "{replSetName}: Ignoring isMaster reply from server that is not in the topology: " - "{serverAddress}", - "Ignoring isMaster reply from server that is not in the topology", - "replicaSet"_attr = setName ? *setName : std::string(""), - "serverAddress"_attr = serverDescription->getAddress()); + LOGV2_DEBUG(20219, + kLogLevel, + "Ignoring 'hello' reply from server that is not in the topology", + "replicaSet"_attr = setName ? *setName : std::string(""), + "serverAddress"_attr = serverDescription->getAddress()); return; } diff --git a/src/mongo/client/sdam/topology_state_machine.h b/src/mongo/client/sdam/topology_state_machine.h index 158377c3346ec..86dcdef925473 100644 --- a/src/mongo/client/sdam/topology_state_machine.h +++ b/src/mongo/client/sdam/topology_state_machine.h @@ -28,13 +28,18 @@ */ #pragma once +#include #include #include +#include #include +#include "mongo/client/sdam/sdam_configuration.h" +#include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/client/sdam/server_description.h" #include "mongo/client/sdam/topology_description.h" #include "mongo/platform/mutex.h" +#include "mongo/util/net/hostandport.h" namespace mongo::sdam { // Actions that mutate the state of the topology description via events. diff --git a/src/mongo/client/sdam/topology_state_machine_test.cpp b/src/mongo/client/sdam/topology_state_machine_test.cpp index 0481b896e1eef..1669f37203059 100644 --- a/src/mongo/client/sdam/topology_state_machine_test.cpp +++ b/src/mongo/client/sdam/topology_state_machine_test.cpp @@ -28,10 +28,23 @@ */ #include "mongo/client/sdam/topology_state_machine.h" +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/client/sdam/election_id_set_version_pair.h" #include "mongo/client/sdam/sdam_test_base.h" #include "mongo/client/sdam/server_description.h" #include "mongo/client/sdam/server_description_builder.h" #include "mongo/client/sdam/topology_description.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" namespace mongo::sdam { class TopologyStateMachineTestFixture : public SdamTestFixture { diff --git a/src/mongo/client/server_discovery_monitor.cpp b/src/mongo/client/server_discovery_monitor.cpp index a7c88b6372ce9..5b640316501e5 100644 --- a/src/mongo/client/server_discovery_monitor.cpp +++ b/src/mongo/client/server_discovery_monitor.cpp @@ -29,18 +29,41 @@ #include "mongo/client/server_discovery_monitor.h" +#include +#include #include +#include +#include +#include #include +#include +#include +#include +#include -#include "mongo/client/replica_set_monitor.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/replica_set_monitor_server_parameters.h" -#include "mongo/client/sdam/sdam.h" +#include "mongo/client/sdam/server_description.h" +#include "mongo/client/sdam/topology_description.h" #include "mongo/db/wire_version.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/network_interface_thread_pool.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -241,7 +264,7 @@ StatusWith SingleServerDiscoveryMonitor::_schedule }); BSONObjBuilder bob; - bob.append("isMaster", 1); + bob.append("hello", 1); bob.append("maxAwaitTimeMS", maxAwaitTimeMS); bob.append("topologyVersion", _topologyVersion->toBSON()); @@ -301,7 +324,7 @@ StatusWith SingleServerDiscoveryMonitor::_schedule StatusWith SingleServerDiscoveryMonitor::_scheduleSingleHello() { BSONObjBuilder bob; - bob.append("isMaster", 1); + bob.append("hello", 1); if (auto wireSpec = WireSpec::instance().get(); wireSpec->isInternalClient) { WireSpec::appendInternalClientWireVersion(wireSpec->outgoing, &bob); } diff --git a/src/mongo/client/server_discovery_monitor.h b/src/mongo/client/server_discovery_monitor.h index 93bcd4c53a811..3f77b5b0de49c 100644 --- a/src/mongo/client/server_discovery_monitor.h +++ b/src/mongo/client/server_discovery_monitor.h @@ -26,12 +26,32 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ + +#pragma once + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/mongo_uri.h" #include "mongo/client/replica_set_monitor_stats.h" +#include "mongo/client/sdam/election_id_set_version_pair.h" #include "mongo/client/sdam/sdam.h" +#include "mongo/client/sdam/sdam_configuration.h" +#include "mongo/client/sdam/sdam_datatypes.h" +#include "mongo/client/sdam/topology_listener.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/rpc/topology_version_gen.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/hierarchical_acquisition.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { using namespace sdam; diff --git a/src/mongo/client/server_discovery_monitor_expedited_test.cpp b/src/mongo/client/server_discovery_monitor_expedited_test.cpp index 4268985ba4c65..41bda00261db3 100644 --- a/src/mongo/client/server_discovery_monitor_expedited_test.cpp +++ b/src/mongo/client/server_discovery_monitor_expedited_test.cpp @@ -26,11 +26,18 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/client/server_discovery_monitor.h" +#include +#include +#include -#include "mongo/executor/task_executor.h" +#include "mongo/base/string_data.h" +#include "mongo/client/server_discovery_monitor.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork @@ -39,7 +46,7 @@ namespace mongo { class SingleServerDiscoveryMonitorExpeditedFixture : public unittest::Test { public: struct TestCase { - boost::optional timeElapsedSinceLastIsMaster; + boost::optional timeElapsedSinceLastHello; Milliseconds previousRefreshPeriod; boost::optional expectedResult; }; @@ -47,11 +54,11 @@ class SingleServerDiscoveryMonitorExpeditedFixture : public unittest::Test { void verifyTestCase(TestCase testCase) { LOGV2_INFO(4712103, "TestCase", - "timeElapsedSinceLastIsMaster"_attr = testCase.timeElapsedSinceLastIsMaster, + "timeElapsedSinceLastHello"_attr = testCase.timeElapsedSinceLastHello, "previousRefreshPeriod"_attr = testCase.previousRefreshPeriod, "expeditedRefreshPeriod"_attr = kExpeditedRefreshPeriod); auto result = SingleServerDiscoveryMonitor::calculateExpeditedDelayUntilNextCheck( - testCase.timeElapsedSinceLastIsMaster, + testCase.timeElapsedSinceLastHello, kExpeditedRefreshPeriod, testCase.previousRefreshPeriod); ASSERT_EQUALS(testCase.expectedResult, result); diff --git a/src/mongo/client/server_discovery_monitor_test.cpp b/src/mongo/client/server_discovery_monitor_test.cpp index 7d52799019a89..b593d51fe7338 100644 --- a/src/mongo/client/server_discovery_monitor_test.cpp +++ b/src/mongo/client/server_discovery_monitor_test.cpp @@ -28,24 +28,43 @@ */ -#include "mongo/platform/basic.h" - +#include +#include #include +#include +#include +#include + +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/client/replica_set_monitor.h" #include "mongo/client/replica_set_monitor_protocol_test_util.h" -#include "mongo/client/sdam/sdam.h" -#include "mongo/client/sdam/sdam_configuration_parameters_gen.h" +#include "mongo/client/replica_set_monitor_server_parameters.h" #include "mongo/client/sdam/topology_description.h" #include "mongo/client/sdam/topology_listener_mock.h" #include "mongo/client/server_discovery_monitor.h" +#include "mongo/db/service_context.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" #include "mongo/dbtests/mock/mock_replica_set.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_test_fixture.h" #include "mongo/executor/thread_pool_mock.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/duration.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -120,53 +139,53 @@ class ServerDiscoveryMonitorTestFixture : public unittest::Test { } /** - * Sets up a SingleServerDiscoveryMonitor that starts sending isMasters to the server. + * Sets up a SingleServerDiscoveryMonitor that starts sending "hello" to the server. */ std::shared_ptr initSingleServerDiscoveryMonitor( const sdam::SdamConfiguration& sdamConfiguration, const HostAndPort& hostAndPort, MockReplicaSet* replSet) { - auto ssIsMasterMonitor = std::make_shared(replSet->getURI(), - hostAndPort, - boost::none, - sdamConfiguration, - _eventsPublisher, - _executor, - _stats); - ssIsMasterMonitor->init(); - - // Ensure that the clock has not advanced since setUp() and _startDate is representative - // of when the first isMaster request was sent. + auto ssHelloMonitor = std::make_shared(replSet->getURI(), + hostAndPort, + boost::none, + sdamConfiguration, + _eventsPublisher, + _executor, + _stats); + ssHelloMonitor->init(); + + // Ensure that the clock has not advanced since setUp() and _startDate is representative of + // when the first "hello" request was sent. ASSERT_EQ(getStartDate(), getNet()->now()); - return ssIsMasterMonitor; + return ssHelloMonitor; } std::shared_ptr initServerDiscoveryMonitor( const MongoURI& setUri, const sdam::SdamConfiguration& sdamConfiguration, const sdam::TopologyDescriptionPtr topologyDescription) { - auto serverIsMasterMonitor = std::make_shared( + auto serverHelloMonitor = std::make_shared( setUri, sdamConfiguration, _eventsPublisher, topologyDescription, _stats, _executor); // Ensure that the clock has not advanced since setUp() and _startDate is representative - // of when the first isMaster request was sent. + // of when the first "hello" request was sent. ASSERT_EQ(getStartDate(), getNet()->now()); - return serverIsMasterMonitor; + return serverHelloMonitor; } /** - * Checks that an isMaster request has been sent to some server and schedules a response. If - * assertHostCheck is true, asserts that the isMaster was sent to the server at hostAndPort. + * Checks that an "hello" request has been sent to some server and schedules a response. If + * assertHostCheck is true, asserts that the "hello" was sent to the server at hostAndPort. */ - void processIsMasterRequest(MockReplicaSet* replSet, - boost::optional hostAndPort = boost::none) { + void processHelloRequest(MockReplicaSet* replSet, + boost::optional hostAndPort = boost::none) { ASSERT(hasReadyRequests()); InNetworkGuard guard(_net); _net->runReadyNetworkOperations(); auto noi = _net->getNextReadyRequest(); auto request = noi->getRequest(); - executor::TaskExecutorTest::assertRemoteCommandNameEquals("isMaster", request); + executor::TaskExecutorTest::assertRemoteCommandNameEquals("hello", request); auto requestHost = request.target.toString(); if (hostAndPort) { ASSERT_EQ(request.target, hostAndPort); @@ -201,55 +220,55 @@ class ServerDiscoveryMonitorTestFixture : public unittest::Test { } /** - * Checks that exactly one successful isMaster occurs within a time interval of + * Checks that exactly one successful "hello" occurs within a time interval of * heartbeatFrequency. */ - void checkSingleIsMaster(Milliseconds heartbeatFrequency, - const HostAndPort& hostAndPort, - MockReplicaSet* replSet) { + void checkSingleHello(Milliseconds heartbeatFrequency, + const HostAndPort& hostAndPort, + MockReplicaSet* replSet) { auto deadline = elapsed() + heartbeatFrequency; - processIsMasterRequest(replSet, hostAndPort); + processHelloRequest(replSet, hostAndPort); - while (elapsed() < deadline && !_topologyListener->hasIsMasterResponse(hostAndPort)) { + while (elapsed() < deadline && !_topologyListener->hasHelloResponse(hostAndPort)) { advanceTime(Milliseconds(1)); } - validateIsMasterResponse(hostAndPort, deadline); + validateHelloResponse(hostAndPort, deadline); checkNoActivityBefore(deadline, hostAndPort); } - void validateIsMasterResponse(const HostAndPort& hostAndPort, Milliseconds deadline) { - ASSERT_TRUE(_topologyListener->hasIsMasterResponse(hostAndPort)); + void validateHelloResponse(const HostAndPort& hostAndPort, Milliseconds deadline) { + ASSERT_TRUE(_topologyListener->hasHelloResponse(hostAndPort)); ASSERT_LT(elapsed(), deadline); - auto isMasterResponse = _topologyListener->getIsMasterResponse(hostAndPort); + auto helloResponse = _topologyListener->getHelloResponse(hostAndPort); - // There should only be one isMaster response queued up. - ASSERT_EQ(isMasterResponse.size(), 1); - ASSERT(isMasterResponse[0].isOK()); + // There should only be one "hello" response queued up. + ASSERT_EQ(helloResponse.size(), 1); + ASSERT(helloResponse[0].isOK()); } /** - * Confirms no more isMaster requests are sent between elapsed() and deadline. Confirms no more - * isMaster responses are received between elapsed() and deadline when hostAndPort is specified. + * Confirms no more "hello" requests are sent between elapsed() and deadline. Confirms no more + * "hello" responses are received between elapsed() and deadline when hostAndPort is specified. */ void checkNoActivityBefore(Milliseconds deadline, boost::optional hostAndPort = boost::none) { while (elapsed() < deadline) { ASSERT_FALSE(hasReadyRequests()); if (hostAndPort) { - ASSERT_FALSE(_topologyListener->hasIsMasterResponse(hostAndPort.value())); + ASSERT_FALSE(_topologyListener->hasHelloResponse(hostAndPort.value())); } advanceTime(Milliseconds(1)); } } /** - * Waits up to timeoutMS for the next isMaster request to go out. - * Causes the test to fail if timeoutMS time passes and no request is ready. + * Waits up to timeoutMS for the next "hello" request to go out. Causes the test to fail if + * timeoutMS time passes and no request is ready. * - * NOTE: The time between each isMaster request is the heartbeatFrequency compounded by response + * NOTE: The time between each "hello" request is the heartbeatFrequency compounded by response * time. */ - void waitForNextIsMaster(Milliseconds timeoutMS) { + void waitForNextHello(Milliseconds timeoutMS) { auto deadline = elapsed() + timeoutMS; while (!hasReadyRequests() && elapsed() < deadline) { advanceTime(Milliseconds(1)); @@ -272,7 +291,7 @@ class ServerDiscoveryMonitorTestFixture : public unittest::Test { }; /** - * Checks that a SingleServerDiscoveryMonitor sends isMaster requests at least heartbeatFrequency + * Checks that a SingleServerDiscoveryMonitor sends "hello" requests at least heartbeatFrequency * apart. */ TEST_F(ServerDiscoveryMonitorTestFixture, heartbeatFrequencyCheck) { @@ -281,28 +300,28 @@ TEST_F(ServerDiscoveryMonitorTestFixture, heartbeatFrequencyCheck) { auto hostAndPort = HostAndPort(replSet->getSecondaries()[0]); const auto config = SdamConfiguration(std::vector{hostAndPort}); - auto ssIsMasterMonitor = initSingleServerDiscoveryMonitor(config, hostAndPort, replSet.get()); - ssIsMasterMonitor->disableExpeditedChecking(); + auto ssHelloMonitor = initSingleServerDiscoveryMonitor(config, hostAndPort, replSet.get()); + ssHelloMonitor->disableExpeditedChecking(); - // An isMaster command fails if it takes as long or longer than timeoutMS. + // A "hello" command fails if it takes as long or longer than timeoutMS. auto timeoutMS = config.getConnectionTimeout(); auto heartbeatFrequency = config.getHeartBeatFrequency(); - checkSingleIsMaster(heartbeatFrequency, hostAndPort, replSet.get()); - waitForNextIsMaster(timeoutMS); + checkSingleHello(heartbeatFrequency, hostAndPort, replSet.get()); + waitForNextHello(timeoutMS); - checkSingleIsMaster(heartbeatFrequency, hostAndPort, replSet.get()); - waitForNextIsMaster(timeoutMS); + checkSingleHello(heartbeatFrequency, hostAndPort, replSet.get()); + waitForNextHello(timeoutMS); - checkSingleIsMaster(heartbeatFrequency, hostAndPort, replSet.get()); - waitForNextIsMaster(timeoutMS); + checkSingleHello(heartbeatFrequency, hostAndPort, replSet.get()); + waitForNextHello(timeoutMS); - checkSingleIsMaster(heartbeatFrequency, hostAndPort, replSet.get()); - waitForNextIsMaster(timeoutMS); + checkSingleHello(heartbeatFrequency, hostAndPort, replSet.get()); + waitForNextHello(timeoutMS); } /** - * Confirms that a SingleServerDiscoveryMonitor reports to the TopologyListener when an isMaster + * Confirms that a SingleServerDiscoveryMonitor reports to the TopologyListener when a "hello" * command generates an error. */ TEST_F(ServerDiscoveryMonitorTestFixture, singleServerDiscoveryMonitorReportsFailure) { @@ -317,23 +336,23 @@ TEST_F(ServerDiscoveryMonitorTestFixture, singleServerDiscoveryMonitorReportsFai } const auto config = SdamConfiguration(std::vector{hostAndPort}); - auto ssIsMasterMonitor = initSingleServerDiscoveryMonitor(config, hostAndPort, replSet.get()); - ssIsMasterMonitor->disableExpeditedChecking(); + auto ssHelloMonitor = initSingleServerDiscoveryMonitor(config, hostAndPort, replSet.get()); + ssHelloMonitor->disableExpeditedChecking(); - processIsMasterRequest(replSet.get(), hostAndPort); + processHelloRequest(replSet.get(), hostAndPort); auto topologyListener = getTopologyListener(); auto timeoutMS = config.getConnectionTimeout(); - while (elapsed() < timeoutMS && !topologyListener->hasIsMasterResponse(hostAndPort)) { - // Advance time in small increments to ensure we stop before another isMaster is sent. + while (elapsed() < timeoutMS && !topologyListener->hasHelloResponse(hostAndPort)) { + // Advance time in small increments to ensure we stop before another "hello" is sent. advanceTime(Milliseconds(1)); } - ASSERT_TRUE(topologyListener->hasIsMasterResponse(hostAndPort)); - auto response = topologyListener->getIsMasterResponse(hostAndPort); + ASSERT_TRUE(topologyListener->hasHelloResponse(hostAndPort)); + auto response = topologyListener->getHelloResponse(hostAndPort); ASSERT_EQ(response.size(), 1); ASSERT_EQ(response[0], ErrorCodes::HostUnreachable); } -TEST_F(ServerDiscoveryMonitorTestFixture, serverIsMasterMonitorOnTopologyDescriptionChangeAddHost) { +TEST_F(ServerDiscoveryMonitorTestFixture, ServerHelloMonitorOnTopologyDescriptionChangeAddHost) { auto replSet = std::make_unique( "test", 2, /* hasPrimary = */ false, /* dollarPrefixHosts = */ false); @@ -345,11 +364,11 @@ TEST_F(ServerDiscoveryMonitorTestFixture, serverIsMasterMonitorOnTopologyDescrip auto sdamConfig0 = sdam::SdamConfiguration(host0Vec); auto topologyDescription0 = std::make_shared(sdamConfig0); auto uri = replSet->getURI(); - auto isMasterMonitor = initServerDiscoveryMonitor(uri, sdamConfig0, topologyDescription0); - isMasterMonitor->disableExpeditedChecking(); + auto helloMonitor = initServerDiscoveryMonitor(uri, sdamConfig0, topologyDescription0); + helloMonitor->disableExpeditedChecking(); auto host1Delay = Milliseconds(100); - checkSingleIsMaster(host1Delay, host0, replSet.get()); + checkSingleHello(host1Delay, host0, replSet.get()); ASSERT_FALSE(hasReadyRequests()); // Start monitoring host1. @@ -358,21 +377,20 @@ TEST_F(ServerDiscoveryMonitorTestFixture, serverIsMasterMonitorOnTopologyDescrip auto sdamConfigAllHosts = sdam::SdamConfiguration(allHostsVec); auto topologyDescriptionAllHosts = std::make_shared(sdamConfigAllHosts); - isMasterMonitor->onTopologyDescriptionChangedEvent(topologyDescription0, - topologyDescriptionAllHosts); + helloMonitor->onTopologyDescriptionChangedEvent(topologyDescription0, + topologyDescriptionAllHosts); // Ensure expedited checking is disabled for the SingleServerDiscoveryMonitor corresponding to // host1 as well. - isMasterMonitor->disableExpeditedChecking(); + helloMonitor->disableExpeditedChecking(); // Confirm host0 and host1 are monitored. auto heartbeatFrequency = sdamConfigAllHosts.getHeartBeatFrequency(); - checkSingleIsMaster(heartbeatFrequency - host1Delay, host1, replSet.get()); - waitForNextIsMaster(sdamConfigAllHosts.getConnectionTimeout()); - checkSingleIsMaster(host1Delay, host0, replSet.get()); + checkSingleHello(heartbeatFrequency - host1Delay, host1, replSet.get()); + waitForNextHello(sdamConfigAllHosts.getConnectionTimeout()); + checkSingleHello(host1Delay, host0, replSet.get()); } -TEST_F(ServerDiscoveryMonitorTestFixture, - serverIsMasterMonitorOnTopologyDescriptionChangeRemoveHost) { +TEST_F(ServerDiscoveryMonitorTestFixture, ServerHelloMonitorOnTopologyDescriptionChangeRemoveHost) { auto replSet = std::make_unique( "test", 2, /* hasPrimary = */ false, /* dollarPrefixHosts = */ false); @@ -386,45 +404,45 @@ TEST_F(ServerDiscoveryMonitorTestFixture, auto topologyDescriptionAllHosts = std::make_shared(sdamConfigAllHosts); auto uri = replSet->getURI(); - auto isMasterMonitor = + auto helloMonitor = initServerDiscoveryMonitor(uri, sdamConfigAllHosts, topologyDescriptionAllHosts); - isMasterMonitor->disableExpeditedChecking(); + helloMonitor->disableExpeditedChecking(); // Confirm that both hosts are monitored. auto heartbeatFrequency = sdamConfigAllHosts.getHeartBeatFrequency(); while (hasReadyRequests()) { - processIsMasterRequest(replSet.get()); + processHelloRequest(replSet.get()); } auto deadline = elapsed() + heartbeatFrequency; auto topologyListener = getTopologyListener(); auto hasResponses = [&]() { - return topologyListener->hasIsMasterResponse(host0) && - topologyListener->hasIsMasterResponse(host1); + return topologyListener->hasHelloResponse(host0) && + topologyListener->hasHelloResponse(host1); }; while (elapsed() < heartbeatFrequency && !hasResponses()) { advanceTime(Milliseconds(1)); } - validateIsMasterResponse(host0, deadline); - validateIsMasterResponse(host1, deadline); + validateHelloResponse(host0, deadline); + validateHelloResponse(host1, deadline); // Remove host1 from the TopologyDescription to stop monitoring it. std::vector host0Vec{host0}; auto sdamConfig0 = sdam::SdamConfiguration(host0Vec); auto topologyDescription0 = std::make_shared(sdamConfig0); - isMasterMonitor->onTopologyDescriptionChangedEvent(topologyDescriptionAllHosts, - topologyDescription0); + helloMonitor->onTopologyDescriptionChangedEvent(topologyDescriptionAllHosts, + topologyDescription0); checkNoActivityBefore(deadline); - waitForNextIsMaster(sdamConfig0.getConnectionTimeout()); + waitForNextHello(sdamConfig0.getConnectionTimeout()); - checkSingleIsMaster(heartbeatFrequency, host0, replSet.get()); - waitForNextIsMaster(sdamConfig0.getConnectionTimeout()); + checkSingleHello(heartbeatFrequency, host0, replSet.get()); + waitForNextHello(sdamConfig0.getConnectionTimeout()); - // Confirm the next isMaster request is sent to host0 and not host1. - checkSingleIsMaster(heartbeatFrequency, host0, replSet.get()); + // Confirm the next "hello" request is sent to host0 and not host1. + checkSingleHello(heartbeatFrequency, host0, replSet.get()); } -TEST_F(ServerDiscoveryMonitorTestFixture, serverIsMasterMonitorShutdownStopsIsMasterRequests) { +TEST_F(ServerDiscoveryMonitorTestFixture, ServerHelloMonitorShutdownStopsHelloRequests) { auto replSet = std::make_unique( "test", 1, /* hasPrimary = */ false, /* dollarPrefixHosts = */ false); @@ -432,13 +450,13 @@ TEST_F(ServerDiscoveryMonitorTestFixture, serverIsMasterMonitorShutdownStopsIsMa auto sdamConfig = sdam::SdamConfiguration(hostVec); auto topologyDescription = std::make_shared(sdamConfig); auto uri = replSet->getURI(); - auto isMasterMonitor = initServerDiscoveryMonitor(uri, sdamConfig, topologyDescription); - isMasterMonitor->disableExpeditedChecking(); + auto helloMonitor = initServerDiscoveryMonitor(uri, sdamConfig, topologyDescription); + helloMonitor->disableExpeditedChecking(); auto heartbeatFrequency = sdamConfig.getHeartBeatFrequency(); - checkSingleIsMaster(heartbeatFrequency - Milliseconds(200), hostVec[0], replSet.get()); + checkSingleHello(heartbeatFrequency - Milliseconds(200), hostVec[0], replSet.get()); - isMasterMonitor->shutdown(); + helloMonitor->shutdown(); // After the ServerDiscoveryMonitor shuts down, the TopologyListener may have responses until // heartbeatFrequency has passed, but none of them should indicate Status::OK. @@ -448,27 +466,26 @@ TEST_F(ServerDiscoveryMonitorTestFixture, serverIsMasterMonitorShutdownStopsIsMa // Drain any requests already scheduled. while (elapsed() < deadline) { while (hasReadyRequests()) { - processIsMasterRequest(replSet.get(), hostVec[0]); + processHelloRequest(replSet.get(), hostVec[0]); } - if (topologyListener->hasIsMasterResponse(hostVec[0])) { - auto isMasterResponses = topologyListener->getIsMasterResponse(hostVec[0]); - for (auto& response : isMasterResponses) { + if (topologyListener->hasHelloResponse(hostVec[0])) { + auto helloResponses = topologyListener->getHelloResponse(hostVec[0]); + for (auto& response : helloResponses) { ASSERT_FALSE(response.isOK()); } } advanceTime(Milliseconds(1)); } - ASSERT_FALSE(topologyListener->hasIsMasterResponse(hostVec[0])); + ASSERT_FALSE(topologyListener->hasHelloResponse(hostVec[0])); } /** * Tests that the ServerDiscoveryMonitor waits until SdamConfiguration::kMinHeartbeatFrequency has - * passed since the last isMaster was received if requestImmediateCheck() is called before enough + * passed since the last "hello" was received if requestImmediateCheck() is called before enough * time has passed. */ -TEST_F(ServerDiscoveryMonitorTestFixture, - serverIsMasterMonitorRequestImmediateCheckWaitMinHeartbeat) { +TEST_F(ServerDiscoveryMonitorTestFixture, ServerHelloMonitorRequestImmediateCheckWaitMinHeartbeat) { auto replSet = std::make_unique( "test", 1, /* hasPrimary = */ false, /* dollarPrefixHosts = */ false); @@ -478,41 +495,41 @@ TEST_F(ServerDiscoveryMonitorTestFixture, auto sdamConfig0 = sdam::SdamConfiguration(hostVec); auto topologyDescription0 = std::make_shared(sdamConfig0); auto uri = replSet->getURI(); - auto isMasterMonitor = initServerDiscoveryMonitor(uri, sdamConfig0, topologyDescription0); + auto helloMonitor = initServerDiscoveryMonitor(uri, sdamConfig0, topologyDescription0); // Ensure the server is not in expedited mode *before* requestImmediateCheck(). - isMasterMonitor->disableExpeditedChecking(); + helloMonitor->disableExpeditedChecking(); - // Check that there is only one isMaster request at time t=0 up until - // timeAdvanceFromFirstIsMaster. + // Check that there is only one "hello" request at time t=0 up until + // timeAdvanceFromFirstHello. auto minHeartbeatFrequency = SdamConfiguration::kMinHeartbeatFrequency; - auto timeAdvanceFromFirstIsMaster = Milliseconds(10); - ASSERT_LT(timeAdvanceFromFirstIsMaster, minHeartbeatFrequency); - checkSingleIsMaster(timeAdvanceFromFirstIsMaster, hostVec[0], replSet.get()); + auto timeAdvanceFromFirstHello = Milliseconds(10); + ASSERT_LT(timeAdvanceFromFirstHello, minHeartbeatFrequency); + checkSingleHello(timeAdvanceFromFirstHello, hostVec[0], replSet.get()); - // It's been less than SdamConfiguration::kMinHeartbeatFrequency since the last isMaster was - // received. The next isMaster should be sent SdamConfiguration::kMinHeartbeatFrequency since - // the last isMaster was received rather than immediately. + // It's been less than SdamConfiguration::kMinHeartbeatFrequency since the last "hello" was + // received. The next "hello" should be sent SdamConfiguration::kMinHeartbeatFrequency since + // the last "hello" was received rather than immediately. auto timeRequestImmediateSent = elapsed(); - isMasterMonitor->requestImmediateCheck(); - waitForNextIsMaster(minHeartbeatFrequency); + helloMonitor->requestImmediateCheck(); + waitForNextHello(minHeartbeatFrequency); - auto timeIsMasterSent = elapsed(); - ASSERT_LT(timeRequestImmediateSent, timeIsMasterSent); - ASSERT_LT(timeIsMasterSent, timeRequestImmediateSent + minHeartbeatFrequency); - checkSingleIsMaster(minHeartbeatFrequency, hostVec[0], replSet.get()); + auto timeHelloSent = elapsed(); + ASSERT_LT(timeRequestImmediateSent, timeHelloSent); + ASSERT_LT(timeHelloSent, timeRequestImmediateSent + minHeartbeatFrequency); + checkSingleHello(minHeartbeatFrequency, hostVec[0], replSet.get()); // Confirm expedited requests continue since there is no primary. - waitForNextIsMaster(sdamConfig0.getConnectionTimeout()); - checkSingleIsMaster(minHeartbeatFrequency, hostVec[0], replSet.get()); + waitForNextHello(sdamConfig0.getConnectionTimeout()); + checkSingleHello(minHeartbeatFrequency, hostVec[0], replSet.get()); } /** * Tests that if more than SdamConfiguration::kMinHeartbeatFrequency has passed since the last - * isMaster response was received, the ServerDiscoveryMonitor sends an isMaster immediately after + * "hello" response was received, the ServerDiscoveryMonitor sends an "hello" immediately after * requestImmediateCheck() is called. */ -TEST_F(ServerDiscoveryMonitorTestFixture, serverIsMasterMonitorRequestImmediateCheckNoWait) { +TEST_F(ServerDiscoveryMonitorTestFixture, ServerHelloMonitorRequestImmediateCheckNoWait) { auto replSet = std::make_unique( "test", 1, /* hasPrimary = */ false, /* dollarPrefixHosts = */ false); @@ -522,23 +539,23 @@ TEST_F(ServerDiscoveryMonitorTestFixture, serverIsMasterMonitorRequestImmediateC auto sdamConfig0 = sdam::SdamConfiguration(hostVec); auto topologyDescription0 = std::make_shared(sdamConfig0); auto uri = replSet->getURI(); - auto isMasterMonitor = initServerDiscoveryMonitor(uri, sdamConfig0, topologyDescription0); + auto helloMonitor = initServerDiscoveryMonitor(uri, sdamConfig0, topologyDescription0); // Ensure the server is not in expedited mode *before* requestImmediateCheck(). - isMasterMonitor->disableExpeditedChecking(); + helloMonitor->disableExpeditedChecking(); // No less than SdamConfiguration::kMinHeartbeatFrequency must pass before // requestImmediateCheck() is called in order to ensure the server reschedules for an immediate // check. auto minHeartbeatFrequency = SdamConfiguration::kMinHeartbeatFrequency; - checkSingleIsMaster(minHeartbeatFrequency + Milliseconds(10), hostVec[0], replSet.get()); + checkSingleHello(minHeartbeatFrequency + Milliseconds(10), hostVec[0], replSet.get()); - isMasterMonitor->requestImmediateCheck(); - checkSingleIsMaster(minHeartbeatFrequency, hostVec[0], replSet.get()); + helloMonitor->requestImmediateCheck(); + checkSingleHello(minHeartbeatFrequency, hostVec[0], replSet.get()); // Confirm expedited requests continue since there is no primary. - waitForNextIsMaster(sdamConfig0.getConnectionTimeout()); - checkSingleIsMaster(minHeartbeatFrequency, hostVec[0], replSet.get()); + waitForNextHello(sdamConfig0.getConnectionTimeout()); + checkSingleHello(minHeartbeatFrequency, hostVec[0], replSet.get()); } } // namespace diff --git a/src/mongo/client/server_ping_monitor.cpp b/src/mongo/client/server_ping_monitor.cpp index f4ccc1d97a5d0..f9151011d8caa 100644 --- a/src/mongo/client/server_ping_monitor.cpp +++ b/src/mongo/client/server_ping_monitor.cpp @@ -28,19 +28,39 @@ */ +#include +#include #include - -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/sdam/topology_description.h" #include "mongo/client/server_ping_monitor.h" - -#include "mongo/client/sdam/sdam.h" -#include "mongo/executor/network_interface_factory.h" #include "mongo/executor/network_interface_thread_pool.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/client/server_ping_monitor.h b/src/mongo/client/server_ping_monitor.h index b4667e856067d..337c8237dc504 100644 --- a/src/mongo/client/server_ping_monitor.h +++ b/src/mongo/client/server_ping_monitor.h @@ -29,11 +29,18 @@ #pragma once +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/client/mongo_uri.h" #include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/client/sdam/topology_listener.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -150,7 +157,7 @@ class ServerPingMonitor : public sdam::TopologyListener { void shutdown(); /** - * The first isMaster exchange for a connection to the server succeeded. Creates a new + * The first "hello" exchange for a connection to the server succeeded. Creates a new * SingleServerPingMonitor to monitor the new replica set member. */ void onServerHandshakeCompleteEvent(sdam::HelloRTT durationMs, diff --git a/src/mongo/client/server_ping_monitor_test.cpp b/src/mongo/client/server_ping_monitor_test.cpp index 9a3f53982b58b..177d6db927b6d 100644 --- a/src/mongo/client/server_ping_monitor_test.cpp +++ b/src/mongo/client/server_ping_monitor_test.cpp @@ -28,20 +28,42 @@ */ -#include "mongo/platform/basic.h" - +#include +#include #include - -#include "mongo/client/sdam/sdam.h" +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/sdam/sdam_configuration.h" +#include "mongo/client/sdam/topology_description.h" #include "mongo/client/sdam/topology_listener_mock.h" #include "mongo/client/server_ping_monitor.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" #include "mongo/dbtests/mock/mock_replica_set.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_test_fixture.h" #include "mongo/executor/thread_pool_mock.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/duration.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -164,7 +186,7 @@ class ServerPingMonitorTestFixture : public unittest::Test { ASSERT_LT(elapsed(), deadline); auto pingResponse = _topologyListener->getPingResponse(hostAndPort); - // There should only be one isMaster response queued up. + // There should only be one "hello" response queued up. ASSERT_EQ(pingResponse.size(), 1); ASSERT(pingResponse[0].isOK()); diff --git a/src/mongo/client/streamable_replica_set_monitor.cpp b/src/mongo/client/streamable_replica_set_monitor.cpp index fcbf2e99a0380..baddfb728875d 100644 --- a/src/mongo/client/streamable_replica_set_monitor.cpp +++ b/src/mongo/client/streamable_replica_set_monitor.cpp @@ -27,33 +27,41 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/client/streamable_replica_set_monitor.h" - #include +#include #include +#include +#include +#include #include +#include +#include + +#include +#include +#include -#include "mongo/bson/simple_bsonelement_comparator.h" -#include "mongo/client/connpool.h" -#include "mongo/client/global_conn_pool.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/read_preference.h" +#include "mongo/client/replica_set_change_notifier.h" +#include "mongo/client/replica_set_monitor_interface.h" +#include "mongo/client/replica_set_monitor_manager.h" #include "mongo/client/replica_set_monitor_server_parameters_gen.h" +#include "mongo/client/sdam/election_id_set_version_pair.h" +#include "mongo/client/sdam/server_description.h" +#include "mongo/client/sdam/topology_description.h" +#include "mongo/client/sdam/topology_manager.h" +#include "mongo/client/streamable_replica_set_monitor.h" #include "mongo/client/streamable_replica_set_monitor_discovery_time_processor.h" #include "mongo/client/streamable_replica_set_monitor_query_processor.h" -#include "mongo/db/operation_context.h" -#include "mongo/db/repl/bson_extract_optime.h" -#include "mongo/db/server_options.h" -#include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" -#include "mongo/rpc/metadata/egress_metadata_hook_list.h" -#include "mongo/stdx/condition_variable.h" -#include "mongo/stdx/unordered_set.h" -#include "mongo/util/string_map.h" -#include "mongo/util/timer.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork @@ -414,12 +422,12 @@ SemiFuture> StreamableReplicaSetMonitor::_enqueueOutsta query->start = _executor->now(); // Add the query to the list of outstanding queries. - auto queryIter = _outstandingQueries.insert(_outstandingQueries.end(), query); + _outstandingQueries.insert(_outstandingQueries.end(), query); // After a deadline or when the input cancellation token is canceled, cancel this query. If the // query completes first, the deadlineCancelSource will be used to cancel this task. _executor->sleepUntil(deadline, query->deadlineCancelSource.token()) - .getAsync([this, query, queryIter, self = shared_from_this(), cancelToken](Status status) { + .getAsync([this, query, self = shared_from_this(), cancelToken](Status status) { // If the deadline was reached or cancellation occurred on the input cancellation token, // mark the query as canceled. Otherwise, the deadlineCancelSource must have been // canceled due to the query completing successfully. @@ -439,7 +447,7 @@ SemiFuture> StreamableReplicaSetMonitor::_enqueueOutsta // been cleared) before erasing. if (!_isDropped.load()) { invariant(_outstandingQueries.size() > 0); - _eraseQueryFromOutstandingQueries(lk, queryIter); + _eraseQueryFromOutstandingQueries(lk, query); } } } @@ -797,11 +805,16 @@ void StreamableReplicaSetMonitor::_failOutstandingWithStatus(WithLock, Status st } std::list::iterator -StreamableReplicaSetMonitor::_eraseQueryFromOutstandingQueries( +StreamableReplicaSetMonitor::_eraseQueryIterFromOutstandingQueries( WithLock, std::list::iterator iter) { return _outstandingQueries.erase(iter); } +void StreamableReplicaSetMonitor::_eraseQueryFromOutstandingQueries(WithLock, + const HostQueryPtr& query) { + std::erase_if(_outstandingQueries, [&query](const HostQueryPtr& _q) { return _q == query; }); +} + void StreamableReplicaSetMonitor::_processOutstanding( const TopologyDescriptionPtr& topologyDescription) { @@ -835,7 +848,7 @@ void StreamableReplicaSetMonitor::_processOutstanding( "readPref"_attr = readPrefToStringFull(query->criteria), "duration"_attr = Milliseconds(latency)); - it = _eraseQueryFromOutstandingQueries(lock, it); + it = _eraseQueryIterFromOutstandingQueries(lock, it); } else { // The query was canceled, so skip to the next entry without erasing it. ++it; diff --git a/src/mongo/client/streamable_replica_set_monitor.h b/src/mongo/client/streamable_replica_set_monitor.h index 3601494a95512..e4eac1f4232fa 100644 --- a/src/mongo/client/streamable_replica_set_monitor.h +++ b/src/mongo/client/streamable_replica_set_monitor.h @@ -29,26 +29,49 @@ #pragma once +#include +#include +#include +#include #include +#include #include #include #include +#include +#include +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" #include "mongo/client/mongo_uri.h" +#include "mongo/client/read_preference.h" #include "mongo/client/replica_set_change_notifier.h" #include "mongo/client/replica_set_monitor.h" #include "mongo/client/replica_set_monitor_stats.h" #include "mongo/client/sdam/sdam.h" +#include "mongo/client/sdam/sdam_configuration.h" +#include "mongo/client/sdam/sdam_datatypes.h" +#include "mongo/client/sdam/server_selector.h" +#include "mongo/client/sdam/topology_listener.h" #include "mongo/client/server_discovery_monitor.h" #include "mongo/client/server_ping_monitor.h" #include "mongo/client/streamable_replica_set_monitor_error_handler.h" #include "mongo/executor/egress_tag_closer.h" #include "mongo/executor/task_executor.h" #include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" #include "mongo/util/concurrency/with_lock.h" #include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/time_support.h" namespace mongo { @@ -57,6 +80,7 @@ class BSONObj; class ReplicaSetMonitor; class ReplicaSetMonitorTest; struct ReadPreferenceSetting; + using ReplicaSetMonitorPtr = std::shared_ptr; /** @@ -222,9 +246,12 @@ class StreamableReplicaSetMonitor final const Date_t& deadline); // Removes the query pointed to by iter and returns an iterator to the next item in the list. - std::list::iterator _eraseQueryFromOutstandingQueries( + std::list::iterator _eraseQueryIterFromOutstandingQueries( WithLock, std::list::iterator iter); + // Removes the given query from the list, if it is there. + void _eraseQueryFromOutstandingQueries(WithLock, const HostQueryPtr& query); + std::vector _extractHosts( const std::vector& serverDescriptions); diff --git a/src/mongo/client/streamable_replica_set_monitor_discovery_time_processor.cpp b/src/mongo/client/streamable_replica_set_monitor_discovery_time_processor.cpp index cd5f9850a91dd..765721c94ceac 100644 --- a/src/mongo/client/streamable_replica_set_monitor_discovery_time_processor.cpp +++ b/src/mongo/client/streamable_replica_set_monitor_discovery_time_processor.cpp @@ -29,9 +29,16 @@ #include "mongo/client/streamable_replica_set_monitor_discovery_time_processor.h" #include +#include -#include "mongo/client/global_conn_pool.h" +#include + +#include "mongo/client/sdam/server_description.h" +#include "mongo/client/sdam/topology_description.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/net/hostandport.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/client/streamable_replica_set_monitor_discovery_time_processor.h b/src/mongo/client/streamable_replica_set_monitor_discovery_time_processor.h index 494c9e82fcc50..64cf6868a251d 100644 --- a/src/mongo/client/streamable_replica_set_monitor_discovery_time_processor.h +++ b/src/mongo/client/streamable_replica_set_monitor_discovery_time_processor.h @@ -28,8 +28,16 @@ */ #pragma once +#include + #include "mongo/client/sdam/sdam.h" +#include "mongo/client/sdam/sdam_datatypes.h" +#include "mongo/client/sdam/topology_listener.h" #include "mongo/client/streamable_replica_set_monitor.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/duration.h" +#include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/timer.h" namespace mongo { class StreamableReplicaSetMonitor::StreamableReplicaSetMonitorDiscoveryTimeProcessor final diff --git a/src/mongo/client/streamable_replica_set_monitor_discovery_time_processor_test.cpp b/src/mongo/client/streamable_replica_set_monitor_discovery_time_processor_test.cpp index 7532bfd3074c0..89450ab1499a5 100644 --- a/src/mongo/client/streamable_replica_set_monitor_discovery_time_processor_test.cpp +++ b/src/mongo/client/streamable_replica_set_monitor_discovery_time_processor_test.cpp @@ -27,11 +27,25 @@ * it in the license file. */ +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/client/sdam/sdam_configuration.h" #include "mongo/client/sdam/sdam_test_base.h" #include "mongo/client/sdam/server_description_builder.h" +#include "mongo/client/sdam/server_selector.h" #include "mongo/client/sdam/topology_description.h" -#include "mongo/client/sdam/topology_manager.h" +#include "mongo/client/sdam/topology_state_machine.h" #include "mongo/client/streamable_replica_set_monitor_discovery_time_processor.h" +#include "mongo/db/wire_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/client/streamable_replica_set_monitor_error_handler.cpp b/src/mongo/client/streamable_replica_set_monitor_error_handler.cpp index f4696942354d1..de3e6f3a92ae6 100644 --- a/src/mongo/client/streamable_replica_set_monitor_error_handler.cpp +++ b/src/mongo/client/streamable_replica_set_monitor_error_handler.cpp @@ -28,7 +28,18 @@ */ #include "mongo/client/streamable_replica_set_monitor_error_handler.h" +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/client/streamable_replica_set_monitor_error_handler.h b/src/mongo/client/streamable_replica_set_monitor_error_handler.h index 2b44f404bfa29..94afca9f6c972 100644 --- a/src/mongo/client/streamable_replica_set_monitor_error_handler.h +++ b/src/mongo/client/streamable_replica_set_monitor_error_handler.h @@ -27,12 +27,20 @@ * it in the license file. */ #pragma once +#include #include +#include +#include +#include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/sdam/sdam.h" +#include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/executor/network_interface.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/net/hostandport.h" namespace mongo { class StreamableReplicaSetMonitorErrorHandler { diff --git a/src/mongo/client/streamable_replica_set_monitor_error_handler_test.cpp b/src/mongo/client/streamable_replica_set_monitor_error_handler_test.cpp index 413dca44722c0..e435d1d2bb8ae 100644 --- a/src/mongo/client/streamable_replica_set_monitor_error_handler_test.cpp +++ b/src/mongo/client/streamable_replica_set_monitor_error_handler_test.cpp @@ -28,10 +28,22 @@ */ #include "mongo/client/streamable_replica_set_monitor_error_handler.h" -#include "mongo/client/sdam/sdam.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/logv2/log.h" -#include "mongo/platform/basic.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -200,7 +212,7 @@ TEST_F(StreamableReplicaSetMonitorErrorHandlerTestFixture, MonitoringNonNetworkE } TEST_F(StreamableReplicaSetMonitorErrorHandlerTestFixture, - ApplicationNonNetworkIsMasterOrRecoveringError) { + ApplicationNonNetworkHelloOrRecoveringError) { testScenario( HandshakeStage::kPostHandshake, kMonitoringOperation, diff --git a/src/mongo/client/streamable_replica_set_monitor_for_testing.cpp b/src/mongo/client/streamable_replica_set_monitor_for_testing.cpp index f32837ed8792d..48e0820912f09 100644 --- a/src/mongo/client/streamable_replica_set_monitor_for_testing.cpp +++ b/src/mongo/client/streamable_replica_set_monitor_for_testing.cpp @@ -29,10 +29,15 @@ #include "mongo/client/streamable_replica_set_monitor_for_testing.h" +#include + +#include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/network_interface.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/network_interface_thread_pool.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/rpc/metadata/metadata_hook.h" namespace mongo { diff --git a/src/mongo/client/streamable_replica_set_monitor_for_testing.h b/src/mongo/client/streamable_replica_set_monitor_for_testing.h index 4efbe8c3d413c..1fa0913418db6 100644 --- a/src/mongo/client/streamable_replica_set_monitor_for_testing.h +++ b/src/mongo/client/streamable_replica_set_monitor_for_testing.h @@ -29,9 +29,14 @@ #pragma once +#include + +#include "mongo/client/mongo_uri.h" #include "mongo/client/replica_set_monitor_manager.h" +#include "mongo/client/replica_set_monitor_stats.h" #include "mongo/client/sdam/mock_topology_manager.h" #include "mongo/client/streamable_replica_set_monitor.h" +#include "mongo/executor/task_executor.h" namespace mongo { diff --git a/src/mongo/client/streamable_replica_set_monitor_query_processor.cpp b/src/mongo/client/streamable_replica_set_monitor_query_processor.cpp index fb048b546711a..bb73eb1e80ea7 100644 --- a/src/mongo/client/streamable_replica_set_monitor_query_processor.cpp +++ b/src/mongo/client/streamable_replica_set_monitor_query_processor.cpp @@ -30,8 +30,14 @@ #include -#include "mongo/client/global_conn_pool.h" +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/client/replica_set_monitor_manager.h" +#include "mongo/client/sdam/topology_description.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork @@ -58,7 +64,6 @@ void StreamableReplicaSetMonitor::StreamableReplicaSetMonitorQueryProcessor:: if (!replicaSetMonitor) { LOGV2_DEBUG(4333215, kLogLevel, - "Could not find rsm instance {replicaSet} for query processing", "Could not find rsm instance for query processing", "replicaSet"_attr = *setName); return; @@ -66,7 +71,7 @@ void StreamableReplicaSetMonitor::StreamableReplicaSetMonitorQueryProcessor:: replicaSetMonitor->_processOutstanding(newDescription); } - // No set name occurs when there is an error monitoring isMaster replies (e.g. HostUnreachable). + // No set name occurs when there is an error monitoring "hello" replies (e.g. HostUnreachable). // There is nothing to do in that case. } }; // namespace mongo diff --git a/src/mongo/client/streamable_replica_set_monitor_query_processor.h b/src/mongo/client/streamable_replica_set_monitor_query_processor.h index 0b7b1b5e41a51..58907923940bb 100644 --- a/src/mongo/client/streamable_replica_set_monitor_query_processor.h +++ b/src/mongo/client/streamable_replica_set_monitor_query_processor.h @@ -29,7 +29,10 @@ #pragma once #include "mongo/client/sdam/sdam.h" +#include "mongo/client/sdam/sdam_datatypes.h" +#include "mongo/client/sdam/topology_listener.h" #include "mongo/client/streamable_replica_set_monitor.h" +#include "mongo/platform/mutex.h" namespace mongo { class StreamableReplicaSetMonitor::StreamableReplicaSetMonitorQueryProcessor final diff --git a/src/mongo/crypto/SConscript b/src/mongo/crypto/SConscript index 67aadbffd7674..f6d5b6c5cddc4 100644 --- a/src/mongo/crypto/SConscript +++ b/src/mongo/crypto/SConscript @@ -116,6 +116,7 @@ fleCryptoEnv.Library( '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/idl/cluster_server_parameter', '$BUILD_DIR/mongo/shell/kms_idl', + '$BUILD_DIR/mongo/util/namespace_string_database_name_util', '$BUILD_DIR/mongo/util/testing_options', '$BUILD_DIR/third_party/shim_libmongocrypt', 'aead_encryption', @@ -156,6 +157,8 @@ env.CppUnitTest( LIBDEPS=[ '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/base/secure_allocator', + '$BUILD_DIR/mongo/db/service_context_non_d', + '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/mongo/shell/kms_idl', '$BUILD_DIR/mongo/util/net/http_client_impl', '$BUILD_DIR/mongo/util/net/openssl_init' if ssl_provider == 'openssl' else [], diff --git a/src/mongo/crypto/aead_encryption.cpp b/src/mongo/crypto/aead_encryption.cpp index 9372463451315..cbb2068341bea 100644 --- a/src/mongo/crypto/aead_encryption.cpp +++ b/src/mongo/crypto/aead_encryption.cpp @@ -27,16 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/data_range_cursor.h" +#include "mongo/base/data_type_endian.h" +#include "mongo/base/error_codes.h" #include "mongo/crypto/aead_encryption.h" - -#include "mongo/base/data_view.h" +#include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/crypto/sha256_block.h" #include "mongo/crypto/sha512_block.h" #include "mongo/crypto/symmetric_crypto.h" -#include "mongo/db/matcher/schema/encrypt_schema_gen.h" +#include "mongo/util/assert_util.h" #include "mongo/util/secure_compare_memory.h" +#include "mongo/util/str.h" namespace mongo { namespace crypto { diff --git a/src/mongo/crypto/aead_encryption.h b/src/mongo/crypto/aead_encryption.h index 2414c622cb5c7..19081ff505567 100644 --- a/src/mongo/crypto/aead_encryption.h +++ b/src/mongo/crypto/aead_encryption.h @@ -32,10 +32,12 @@ #include #include -#include "mongo/crypto/fle_data_frames.h" - +#include "mongo/base/data_range.h" #include "mongo/base/data_view.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/crypto/fle_data_frames.h" +#include "mongo/crypto/symmetric_crypto.h" #include "mongo/crypto/symmetric_key.h" namespace mongo { diff --git a/src/mongo/crypto/aead_encryption_test.cpp b/src/mongo/crypto/aead_encryption_test.cpp index aa3318ce152ea..e9675c41d058c 100644 --- a/src/mongo/crypto/aead_encryption_test.cpp +++ b/src/mongo/crypto/aead_encryption_test.cpp @@ -28,16 +28,28 @@ */ #include +#include +#include +#include +#include +#include +#include +#include -#include "mongo/base/data_range.h" +#include "aead_encryption.h" +#include +#include +#include +#include "mongo/base/data_range.h" +#include "mongo/base/data_type_endian.h" +#include "mongo/base/secure_allocator.h" +#include "mongo/base/string_data.h" #include "mongo/platform/random.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/hex.h" -#include "aead_encryption.h" - namespace mongo { namespace { diff --git a/src/mongo/crypto/crypto_bm.cpp b/src/mongo/crypto/crypto_bm.cpp index ece39e91b323a..ac4bd29a64d1e 100644 --- a/src/mongo/crypto/crypto_bm.cpp +++ b/src/mongo/crypto/crypto_bm.cpp @@ -27,15 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include +#include +#include -#include "mongo/crypto/aead_encryption.h" +#include "mongo/base/data_range.h" +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" #include "mongo/crypto/fle_crypto.h" -#include "mongo/crypto/fle_fields_util.h" +#include "mongo/crypto/fle_crypto_types.h" #include "mongo/crypto/sha256_block.h" -#include "mongo/crypto/symmetric_key.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/crypto/encryption_fields.idl b/src/mongo/crypto/encryption_fields.idl index 5e68ba8d9d427..ad6a67df82e20 100644 --- a/src/mongo/crypto/encryption_fields.idl +++ b/src/mongo/crypto/encryption_fields.idl @@ -56,20 +56,11 @@ enums: RangePreview: "rangePreview" feature_flags: - featureFlagFLE2Range: - description: "Enable support for range indexes in Queryable Encryption" - version: 6.2 - cpp_varname: gFeatureFlagFLE2Range - default: true - featureFlagFLE2CompactForProtocolV2: - description: "Enable support for version 2 of Queryable Encryption wire protocol" - version: 7.0 - cpp_varname: gFeatureFlagFLE2CompactForProtocolV2 - default: true featureFlagFLE2CleanupCommand: description: "Enable support for the Cleanup Structured Encryption Data command" cpp_varname: gFeatureFlagFLE2CleanupCommand default: false + shouldBeFCVGated: true structs: diff --git a/src/mongo/crypto/encryption_fields_util.cpp b/src/mongo/crypto/encryption_fields_util.cpp index bf7bfbeccf1aa..6571c6929f01d 100644 --- a/src/mongo/crypto/encryption_fields_util.cpp +++ b/src/mongo/crypto/encryption_fields_util.cpp @@ -29,7 +29,13 @@ #include "mongo/crypto/encryption_fields_util.h" +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" #include +#include +#include + +#include namespace mongo { diff --git a/src/mongo/crypto/encryption_fields_util.h b/src/mongo/crypto/encryption_fields_util.h index cd723950c35ea..5793148be9652 100644 --- a/src/mongo/crypto/encryption_fields_util.h +++ b/src/mongo/crypto/encryption_fields_util.h @@ -29,6 +29,8 @@ #pragma once #include +#include +#include #include "mongo/base/status.h" #include "mongo/base/string_data.h" diff --git a/src/mongo/crypto/encryption_fields_util_test.cpp b/src/mongo/crypto/encryption_fields_util_test.cpp index f223ce0227777..0b68e76f9b05c 100644 --- a/src/mongo/crypto/encryption_fields_util_test.cpp +++ b/src/mongo/crypto/encryption_fields_util_test.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/crypto/encryption_fields_util.h" +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/crypto/encryption_fields_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/crypto/encryption_fields_validation.cpp b/src/mongo/crypto/encryption_fields_validation.cpp index 42379f293ee7b..5cb38e7002f32 100644 --- a/src/mongo/crypto/encryption_fields_validation.cpp +++ b/src/mongo/crypto/encryption_fields_validation.cpp @@ -29,11 +29,31 @@ #include "encryption_fields_validation.h" +#include +#include +#include +#include +#include + +#include +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsontypes.h" #include "mongo/crypto/encryption_fields_gen.h" #include "mongo/crypto/encryption_fields_util.h" #include "mongo/db/field_ref.h" -#include +#include "mongo/db/namespace_string.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -240,12 +260,6 @@ void validateEncryptedFieldConfig(const EncryptedFieldConfig* config) { "Encrypted State Collection name should follow enxcol_..esc naming pattern", NamespaceString("", config->getEscCollection().get()).isFLE2StateCollection()); } - if (config->getEccCollection()) { - uassert( - 7406901, - "Encrypted Cache Collection name should follow enxcol_..ecc naming pattern", - NamespaceString("", config->getEccCollection().get()).isFLE2StateCollection()); - } if (config->getEcocCollection()) { uassert(7406902, "Encrypted Compaction Collection name should follow enxcol_..ecoc " diff --git a/src/mongo/crypto/encryption_fields_validation.h b/src/mongo/crypto/encryption_fields_validation.h index 6735d7b2b1ec2..b29de550cd3c6 100644 --- a/src/mongo/crypto/encryption_fields_validation.h +++ b/src/mongo/crypto/encryption_fields_validation.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/platform/decimal128.h" diff --git a/src/mongo/crypto/encryption_fields_validation_test.cpp b/src/mongo/crypto/encryption_fields_validation_test.cpp index 5ab9b2a432a16..3ed170bfab2cb 100644 --- a/src/mongo/crypto/encryption_fields_validation_test.cpp +++ b/src/mongo/crypto/encryption_fields_validation_test.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" #include "mongo/crypto/encryption_fields_validation.h" - #include "mongo/platform/decimal128.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/crypto/fle_crypto.cpp b/src/mongo/crypto/fle_crypto.cpp index a30e9951b73e1..dc35f2a95c6c9 100644 --- a/src/mongo/crypto/fle_crypto.cpp +++ b/src/mongo/crypto/fle_crypto.cpp @@ -29,12 +29,34 @@ #include "mongo/crypto/fle_crypto.h" -#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/multiprecision/detail/default_ops.hpp" +// IWYU pragma: no_include "boost/multiprecision/detail/integer_ops.hpp" +// IWYU pragma: no_include "boost/multiprecision/detail/no_et_ops.hpp" +// IWYU pragma: no_include "boost/multiprecision/detail/number_base.hpp" +// IWYU pragma: no_include "boost/multiprecision/detail/number_compare.hpp" +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include #include #include +#include #include -#include #include #include #include @@ -42,12 +64,10 @@ #include #include #include -#include #include #include extern "C" { -#include #include #include } @@ -75,18 +95,22 @@ extern "C" { #include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/crypto/fle_fields_util.h" #include "mongo/crypto/sha256_block.h" -#include "mongo/crypto/symmetric_key.h" -#include "mongo/db/basic_types_gen.h" +#include "mongo/db/basic_types.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/bits.h" #include "mongo/platform/decimal128.h" #include "mongo/platform/random.h" -#include "mongo/shell/kms_gen.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/stdx/variant.h" #include "mongo/util/assert_util.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/debug_util.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -1806,9 +1830,101 @@ BSONObj runStateMachineForDecryption(mongocrypt_ctx_t* ctx, FLEKeyVault* keyVaul return result; } +/** + * Reads the anchor document identified by anchorId, and if found, decrypts the value + * and returns the parsed positions as a pair. If the anchor is not found, returns none. + */ +boost::optional readAndDecodeAnchor(const FLEStateCollectionReader& reader, + const ESCTwiceDerivedValueToken& valueToken, + const PrfBlock& anchorId) { + auto anchor = reader.getById(anchorId); + if (anchor.isEmpty()) { + return boost::none; + } + + auto anchorDoc = uassertStatusOK(ESCCollection::decryptAnchorDocument(valueToken, anchor)); + ESCCountsPair positions; + positions.apos = anchorDoc.position; + positions.cpos = anchorDoc.count; + return positions; +} + +/** + * Performs all the ESC reads required by the QE cleanup algorithm. + */ +FLEEdgeCountInfo getEdgeCountInfoForCleanup(const FLEStateCollectionReader& reader, + ConstDataRange tag) { + auto escToken = EDCServerPayloadInfo::getESCToken(tag); + + auto tagToken = FLETwiceDerivedTokenGenerator::generateESCTwiceDerivedTagToken(escToken); + auto valueToken = FLETwiceDerivedTokenGenerator::generateESCTwiceDerivedValueToken(escToken); + + // step (C) + // positions.cpos is a_1 + // positions.apos is a_2 + auto positions = ESCCollection::emuBinaryV2(reader, tagToken, valueToken); + + // step (D) + // nullAnchorPositions is r + auto nullAnchorPositions = + readAndDecodeAnchor(reader, valueToken, ESCCollection::generateNullAnchorId(tagToken)); + + // This holds what value of a_1 should be used when inserting/updating the null anchor. + auto latestCpos = 0; + + if (positions.apos == boost::none) { + // case (E) + // Null anchor exists & contains the latest anchor position, + // and *maybe* the latest non-anchor position. + uassert(7295004, "ESC null anchor is expected but not found", nullAnchorPositions); + + // emuBinary must not return 0 for cpos if an anchor exists + uassert(7295005, "Invalid non-anchor position encountered", positions.cpos.value_or(1) > 0); + + // If emuBinary returns none for a_1, then the null anchor has the latest non-anchor pos. + // This may happen if a prior cleanup was interrupted after the null anchors were updated, + // but before the ECOC temp collection could be dropped, and on resume, no new insertions + // or compactions have occurred since the previous cleanup. + latestCpos = positions.cpos.value_or(nullAnchorPositions->cpos); + + } else if (positions.apos.value() == 0) { + // case (F) + // No anchors yet exist, so null anchor cannot exist and emuBinary must have + // returned a value for cpos. + uassert(7295006, "Unexpected ESC null anchor is found", !nullAnchorPositions); + uassert(7295007, "Invalid non-anchor position encountered", positions.cpos); + + latestCpos = positions.cpos.value(); + } else /* (apos > 0) */ { + // case (G) + // New anchors exist - if null anchor exists, then it contains stale positions. + + // emuBinary must not return 0 for cpos if an anchor exists + uassert(7295008, "Invalid non-anchor position encountered", positions.cpos.value_or(1) > 0); + + // If emuBinary returns none for cpos, then the newest anchor has the latest non-anchor pos. + // This may happen if a prior compact was interrupted after it inserted a new anchor, but + // before the ECOC temp collection could be dropped, and cleanup started immediately + // after. + latestCpos = positions.cpos.value_or_eval([&]() { + auto anchorPositions = readAndDecodeAnchor( + reader, + valueToken, + ESCCollection::generateAnchorId(tagToken, positions.apos.value())); + uassert(7295009, "ESC anchor is expected but not found", anchorPositions); + return anchorPositions->cpos; + }); + } + + return FLEEdgeCountInfo( + latestCpos, tagToken, positions, nullAnchorPositions, reader.getStats(), boost::none); +} + +/** + * Performs all the ESC reads required by the QE compact algorithm. + */ FLEEdgeCountInfo getEdgeCountInfoForCompact(const FLEStateCollectionReader& reader, - ConstDataRange tag, - const boost::optional& edc) { + ConstDataRange tag) { auto escToken = EDCServerPayloadInfo::getESCToken(tag); @@ -1823,7 +1939,7 @@ FLEEdgeCountInfo getEdgeCountInfoForCompact(const FLEStateCollectionReader& read // was interrupted. On restart, the compaction will run emuBinaryV2 again, but since the // anchor was already inserted for this value, it may return null cpos if there have been no // new insertions for that value since the first compact attempt. - if (!positions.cpos.has_value()) { + if (positions.cpos == boost::none) { // No new non-anchors since the last compact/cleanup. // There must be at least one anchor. uassert(7293602, @@ -1832,33 +1948,24 @@ FLEEdgeCountInfo getEdgeCountInfoForCompact(const FLEStateCollectionReader& read // the anchor with the latest cpos already exists so no more work needed return FLEEdgeCountInfo( - 0, tagToken, positions.cpos, positions.apos, reader.getStats(), boost::none); + 0, tagToken, positions, boost::none, reader.getStats(), boost::none); } uint64_t nextAnchorPos = 0; - if (!positions.apos.has_value()) { - auto r_esc = reader.getById(ESCCollection::generateNullAnchorId(tagToken)); + if (positions.apos == boost::none) { + auto nullAnchorPositions = + readAndDecodeAnchor(reader, valueToken, ESCCollection::generateNullAnchorId(tagToken)); - uassert(7293601, "ESC null anchor document not found", !r_esc.isEmpty()); + uassert(7293601, "ESC null anchor document not found", nullAnchorPositions); - auto nullAnchorDoc = - uassertStatusOK(ESCCollection::decryptAnchorDocument(valueToken, r_esc)); - nextAnchorPos = nullAnchorDoc.position + 1; + nextAnchorPos = nullAnchorPositions->apos + 1; } else { nextAnchorPos = positions.apos.value() + 1; } return FLEEdgeCountInfo( - nextAnchorPos, - tagToken, - positions.cpos, - positions.apos, - reader.getStats(), - edc.map([](const PrfBlock& prf) { - return FLETokenFromCDR( - prf); - })); + nextAnchorPos, tagToken, positions, boost::none, reader.getStats(), boost::none); } FLEEdgeCountInfo getEdgeCountInfo(const FLEStateCollectionReader& reader, @@ -1893,12 +2000,10 @@ FLEEdgeCountInfo getEdgeCountInfo(const FLEStateCollectionReader& reader, anchorId = ESCCollection::generateAnchorId(tagToken, positions.apos.value()); } - BSONObj anchorDoc = reader.getById(anchorId); - uassert(7291903, "ESC anchor document not found", !anchorDoc.isEmpty()); + auto anchorPositions = readAndDecodeAnchor(reader, valueToken, anchorId); + uassert(7291903, "ESC anchor document not found", anchorPositions); - auto escAnchor = - uassertStatusOK(ESCCollection::decryptAnchorDocument(valueToken, anchorDoc)); - count = escAnchor.count + 1; + count = anchorPositions->cpos + 1; } @@ -2662,10 +2767,9 @@ boost::optional binarySearchCommon(const FLEStateCollectionReader& rea } } // namespace -ESCCollection::EmuBinaryResult ESCCollection::emuBinaryV2( - const FLEStateCollectionReader& reader, - const ESCTwiceDerivedTagToken& tagToken, - const ESCTwiceDerivedValueToken& valueToken) { +EmuBinaryResult ESCCollection::emuBinaryV2(const FLEStateCollectionReader& reader, + const ESCTwiceDerivedTagToken& tagToken, + const ESCTwiceDerivedValueToken& valueToken) { auto tracker = FLEStatusSection::get().makeEmuBinaryTracker(); auto x = ESCCollection::anchorBinaryHops(reader, tagToken, valueToken, tracker); @@ -2771,10 +2875,19 @@ std::vector> ESCCollection::getTags( countInfos.reserve(tokens.size()); for (const auto& token : tokens) { - if (type == FLETagQueryInterface::TagQueryType::kCompact) { - countInfos.push_back(getEdgeCountInfoForCompact(reader, token.esc, token.edc)); - } else { - countInfos.push_back(getEdgeCountInfo(reader, token.esc, type, token.edc)); + switch (type) { + case FLETagQueryInterface::TagQueryType::kCompact: + countInfos.push_back(getEdgeCountInfoForCompact(reader, token.esc)); + break; + case FLETagQueryInterface::TagQueryType::kCleanup: + countInfos.push_back(getEdgeCountInfoForCleanup(reader, token.esc)); + break; + case FLETagQueryInterface::TagQueryType::kInsert: + case FLETagQueryInterface::TagQueryType::kQuery: + countInfos.push_back(getEdgeCountInfo(reader, token.esc, type, token.edc)); + break; + default: + MONGO_UNREACHABLE; } } @@ -3943,7 +4056,8 @@ BSONObj EncryptionInformationHelpers::encryptionInformationSerialize( EncryptionInformation ei; ei.setType(kEncryptionInformationSchemaVersion); - ei.setSchema(BSON(nss.toString() << encryptedFields)); + // Do not include tenant id in nss in the schema as the command request has "$tenant". + ei.setSchema(BSON(nss.serializeWithoutTenantPrefix_UNSAFE() << encryptedFields)); return ei.toBSON(); } @@ -3952,7 +4066,8 @@ EncryptedFieldConfig EncryptionInformationHelpers::getAndValidateSchema( const NamespaceString& nss, const EncryptionInformation& ei) { BSONObj schema = ei.getSchema(); - auto element = schema.getField(nss.toString()); + // Do not include tenant id in nss in the schema as the command request has "$tenant". + auto element = schema.getField(nss.serializeWithoutTenantPrefix_UNSAFE()); uassert(6371205, "Expected an object for schema in EncryptionInformation", @@ -4061,14 +4176,23 @@ std::vector CompactionHelpers::parseCompactionTokens(BSONObj co void CompactionHelpers::validateCompactionTokens(const EncryptedFieldConfig& efc, BSONObj compactionTokens) { + _validateTokens(efc, compactionTokens, "Compaction"_sd); +} + +void CompactionHelpers::validateCleanupTokens(const EncryptedFieldConfig& efc, + BSONObj cleanupTokens) { + _validateTokens(efc, cleanupTokens, "Cleanup"_sd); +} + +void CompactionHelpers::_validateTokens(const EncryptedFieldConfig& efc, + BSONObj tokens, + StringData cmd) { for (const auto& field : efc.getFields()) { - const auto& tokenElement = compactionTokens.getField(field.getPath()); - uassert( - 6346806, - str::stream() - << "Compaction tokens object is missing compaction token for the encrypted path '" - << field.getPath() << "'", - !tokenElement.eoo()); + const auto& tokenElement = tokens.getField(field.getPath()); + uassert(7294900, + str::stream() << cmd << " tokens object is missing " << cmd + << " token for the encrypted path '" << field.getPath() << "'", + !tokenElement.eoo()); } } @@ -4894,6 +5018,14 @@ PrfBlock FLEUtil::prf(ConstDataRange key, uint64_t value) { return prf(key, bufValue); } +void FLEUtil::checkEFCForECC(const EncryptedFieldConfig& efc) { + uassert(7568300, + str::stream() + << "Queryable Encryption version 2 collections must not contain the eccCollection" + << " in EncryptedFieldConfig", + !efc.getEccCollection()); +} + StatusWith> FLEUtil::decryptData(ConstDataRange key, ConstDataRange cipherText) { auto plainTextLength = fle2GetPlainTextLength(cipherText.length()); diff --git a/src/mongo/crypto/fle_crypto.h b/src/mongo/crypto/fle_crypto.h index 24ac7425baedd..ef0d6f9024d7c 100644 --- a/src/mongo/crypto/fle_crypto.h +++ b/src/mongo/crypto/fle_crypto.h @@ -30,30 +30,44 @@ #pragma once #include +#include #include #include +#include +#include #include +#include +#include +#include #include #include #include #include "mongo/base/data_range.h" +#include "mongo/base/data_range_cursor.h" #include "mongo/base/data_type_validated.h" #include "mongo/base/secure_allocator.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/crypto/encryption_fields_gen.h" #include "mongo/crypto/fle_crypto_types.h" #include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/crypto/fle_stats.h" +#include "mongo/crypto/fle_stats_gen.h" +#include "mongo/crypto/sha256_block.h" #include "mongo/crypto/symmetric_crypto.h" +#include "mongo/crypto/symmetric_key.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/namespace_string.h" -#include "mongo/rpc/object_check.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/platform/decimal128.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep #include "mongo/util/uuid.h" namespace mongo { @@ -331,7 +345,7 @@ struct ESCDocument { */ class FLETagQueryInterface { public: - enum class TagQueryType { kInsert, kQuery, kCompact }; + enum class TagQueryType { kInsert, kQuery, kCompact, kCleanup }; virtual ~FLETagQueryInterface(); @@ -516,10 +530,6 @@ class ESCCollection { * (x > 0) means non-null anchors exist without a null anchor OR new non-null anchors * have been added since the last-recorded apos in the null anchor. */ - struct EmuBinaryResult { - boost::optional cpos; - boost::optional apos; - }; static EmuBinaryResult emuBinaryV2(const FLEStateCollectionReader& reader, const ESCTwiceDerivedTagToken& tagToken, const ESCTwiceDerivedValueToken& valueToken); @@ -1330,6 +1340,15 @@ class CompactionHelpers { * in the encrypted field config */ static void validateCompactionTokens(const EncryptedFieldConfig& efc, BSONObj compactionTokens); + + /** + * Validates the compaction tokens BSON contains an element for each field + * in the encrypted field config + */ + static void validateCleanupTokens(const EncryptedFieldConfig& efc, BSONObj cleanupTokens); + +private: + static void _validateTokens(const EncryptedFieldConfig& efc, BSONObj tokens, StringData cmd); }; /** @@ -1547,6 +1566,8 @@ class FLEUtil { static PrfBlock prf(ConstDataRange key, uint64_t value); + static void checkEFCForECC(const EncryptedFieldConfig& efc); + /** * Decrypt AES-256-CTR encrypted data. Exposed for benchmarking purposes. */ diff --git a/src/mongo/crypto/fle_crypto_test.cpp b/src/mongo/crypto/fle_crypto_test.cpp index 94fff514108b9..b416330d4591c 100644 --- a/src/mongo/crypto/fle_crypto_test.cpp +++ b/src/mongo/crypto/fle_crypto_test.cpp @@ -27,24 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/crypto/fle_crypto.h" - #include -#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include +#include #include -#include #include #include #include #include #include "mongo/base/data_range.h" -#include "mongo/base/data_type_validated.h" #include "mongo/base/error_codes.h" +#include "mongo/base/secure_allocator.h" +#include "mongo/base/status.h" +#include "mongo/bson/bson_depth.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" @@ -52,21 +58,24 @@ #include "mongo/bson/json.h" #include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/crypto/aead_encryption.h" +#include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_data_frames.h" #include "mongo/crypto/fle_field_schema_gen.h" -#include "mongo/crypto/fle_fields_util.h" #include "mongo/crypto/symmetric_crypto.h" -#include "mongo/db/matcher/schema/encrypt_schema_gen.h" -#include "mongo/db/operation_context.h" +#include "mongo/db/basic_types.h" #include "mongo/idl/idl_parser.h" -#include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/decimal128.h" -#include "mongo/rpc/object_check.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep #include "mongo/shell/kms_gen.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" #include "mongo/util/hex.h" #include "mongo/util/time_support.h" @@ -667,14 +676,13 @@ std::tuple generateEmuBinary return std::tie(escTwiceTag, escTwiceValue); } -mongo::ESCCollection::EmuBinaryResult EmuBinaryV2Test( - boost::optional> nullAnchor, - uint64_t anchorStart, - uint64_t anchorCount, - uint64_t anchorCposStart, - uint64_t anchorCposEnd, - uint64_t nonAnchorStart, - uint64_t nonAnchorCount) { +EmuBinaryResult EmuBinaryV2Test(boost::optional> nullAnchor, + uint64_t anchorStart, + uint64_t anchorCount, + uint64_t anchorCposStart, + uint64_t anchorCposEnd, + uint64_t nonAnchorStart, + uint64_t nonAnchorCount) { TestDocumentCollection coll; ConstDataRange value(testValue); @@ -2843,7 +2851,7 @@ TEST(CompactionHelpersTest, validateCompactionTokensTest) { // validate fails until all fields are present ASSERT_THROWS_CODE(CompactionHelpers::validateCompactionTokens(efc, builder.asTempObj()), DBException, - 6346806); + 7294900); // validate doesn't care about the value, so this is fine builder.append(field.getPath(), "foo"); diff --git a/src/mongo/crypto/fle_crypto_test_vectors.cpp b/src/mongo/crypto/fle_crypto_test_vectors.cpp index 448da457457f1..4e316a1d050df 100644 --- a/src/mongo/crypto/fle_crypto_test_vectors.cpp +++ b/src/mongo/crypto/fle_crypto_test_vectors.cpp @@ -27,32 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include #include +#include #include -#include -#include +#include #include -#include +#include #include -#include "mongo/base/data_range.h" -#include "mongo/bson/bsonmisc.h" -#include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/bsontypes.h" -#include "mongo/bson/json.h" -#include "mongo/config.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/crypto/fle_crypto.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/object_check.h" -#include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" -#include "mongo/util/hex.h" -#include "mongo/util/time_support.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/decimal128.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep +#include "mongo/stdx/unordered_set.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -122,7 +120,7 @@ struct EdgeCalcTestVector { TEST(EdgeCalcTest, Int32_TestVectors) { std::vector> testVectors = { -#include "test_vectors/edges_int32.cstruct" +#include "test_vectors/edges_int32.cstruct" // IWYU pragma: keep }; for (const auto& testVector : testVectors) { ASSERT_TRUE(testVector.validate()); @@ -131,7 +129,7 @@ TEST(EdgeCalcTest, Int32_TestVectors) { TEST(EdgeCalcTest, Int64_TestVectors) { std::vector> testVectors = { -#include "test_vectors/edges_int64.cstruct" +#include "test_vectors/edges_int64.cstruct" // IWYU pragma: keep }; for (const auto& testVector : testVectors) { ASSERT_TRUE(testVector.validate()); @@ -148,7 +146,7 @@ std::unique_ptr getEdgesDoubleForTest(double value, TEST(EdgeCalcTest, Double_TestVectors) { std::vector> testVectors = { -#include "test_vectors/edges_double.cstruct" +#include "test_vectors/edges_double.cstruct" // IWYU pragma: keep }; for (const auto& testVector : testVectors) { ASSERT_TRUE(testVector.validate()); @@ -168,7 +166,7 @@ std::unique_ptr getEdgesDecimal128ForTest(Decimal128 value, TEST(EdgeCalcTest, Decimal128_TestVectors) { std::vector> testVectors = { -#include "test_vectors/edges_decimal128.cstruct" +#include "test_vectors/edges_decimal128.cstruct" // IWYU pragma: keep }; for (const auto& testVector : testVectors) { ASSERT_TRUE(testVector.validate()); @@ -215,7 +213,7 @@ struct MinCoverTestVector { TEST(MinCoverCalcTest, Int32_TestVectors) { const MinCoverTestVector testVectors[] = { -#include "test_vectors/mincover_int32.cstruct" +#include "test_vectors/mincover_int32.cstruct" // IWYU pragma: keep }; for (const auto& testVector : testVectors) { ASSERT_TRUE(testVector.validate(minCoverInt32)); @@ -224,7 +222,7 @@ TEST(MinCoverCalcTest, Int32_TestVectors) { TEST(MinCoverCalcTest, Int64_TestVectors) { const MinCoverTestVector testVectors[] = { -#include "test_vectors/mincover_int64.cstruct" +#include "test_vectors/mincover_int64.cstruct" // IWYU pragma: keep }; for (const auto& testVector : testVectors) { ASSERT_TRUE(testVector.validate(minCoverInt64)); @@ -251,7 +249,7 @@ std::vector minCoverDoubleForTest(double lowerBound, TEST(MinCoverCalcTest, Double_TestVectors) { MinCoverTestVector testVectors[] = { -#include "test_vectors/mincover_double.cstruct" +#include "test_vectors/mincover_double.cstruct" // IWYU pragma: keep }; for (const auto& testVector : testVectors) { ASSERT_TRUE(testVector.validate(minCoverDoubleForTest)); @@ -280,7 +278,7 @@ std::vector minCoverDecimal128ForTest(Decimal128 lowerBound, TEST(MinCoverCalcTest, Decimal128_TestVectors) { MinCoverTestVector testVectors[] = { -#include "test_vectors/mincover_decimal128.cstruct" +#include "test_vectors/mincover_decimal128.cstruct" // IWYU pragma: keep }; for (const auto& testVector : testVectors) { ASSERT_TRUE(testVector.validate(minCoverDecimal128ForTest)); @@ -330,7 +328,7 @@ struct MinCoverTestVectorPrecision { TEST(MinCoverCalcPrecisionTest, Double_TestVectors) { MinCoverTestVectorPrecision testVectors[] = { -#include "test_vectors/mincover_double_precision.cstruct" +#include "test_vectors/mincover_double_precision.cstruct" // IWYU pragma: keep }; for (const auto& testVector : testVectors) { ASSERT_TRUE(testVector.validate(minCoverDouble)); @@ -339,7 +337,7 @@ TEST(MinCoverCalcPrecisionTest, Double_TestVectors) { TEST(MinCoverCalcPrecisionTest, Decimal128_TestVectors) { MinCoverTestVectorPrecision testVectors[] = { -#include "test_vectors/mincover_decimal128_precision.cstruct" +#include "test_vectors/mincover_decimal128_precision.cstruct" // IWYU pragma: keep }; for (const auto& testVector : testVectors) { ASSERT_TRUE(testVector.validate(minCoverDecimal128)); diff --git a/src/mongo/crypto/fle_crypto_types.h b/src/mongo/crypto/fle_crypto_types.h index 12bb268b2944b..676cd4ab5ffee 100644 --- a/src/mongo/crypto/fle_crypto_types.h +++ b/src/mongo/crypto/fle_crypto_types.h @@ -250,6 +250,22 @@ struct FLEEdgePrfBlock { boost::optional edc; // EDCDerivedFromDataTokenAndContentionFactorToken }; +/** + * A pair of non-anchor and anchor positions. + */ +struct ESCCountsPair { + uint64_t cpos; + uint64_t apos; +}; + +/** + * A pair of optional non-anchor and anchor positions returned by emulated binary search. + */ +struct EmuBinaryResult { + boost::optional cpos; + boost::optional apos; +}; + /** * The information retrieved from ESC for a given ESC token. Count may reflect a count suitable for * insert or query. @@ -264,11 +280,16 @@ struct FLEEdgeCountInfo { FLEEdgeCountInfo(uint64_t c, ESCTwiceDerivedTagToken t, - boost::optional cpos, - boost::optional apos, + boost::optional searchedCounts, + boost::optional nullAnchorCounts, boost::optional stats, boost::optional edcParam) - : count(c), tagToken(t), cpos(cpos), apos(apos), stats(stats), edc(edcParam) {} + : count(c), + tagToken(t), + searchedCounts(searchedCounts), + nullAnchorCounts(nullAnchorCounts), + stats(stats), + edc(edcParam) {} // May reflect a value suitable for insert or query. @@ -276,9 +297,11 @@ struct FLEEdgeCountInfo { ESCTwiceDerivedTagToken tagToken; - boost::optional cpos; + // Positions returned by emuBinary (used by compact & cleanup) + boost::optional searchedCounts; - boost::optional apos; + // Positions obtained from null anchor decode (used by cleanup) + boost::optional nullAnchorCounts; boost::optional stats; diff --git a/src/mongo/crypto/fle_fields_util.cpp b/src/mongo/crypto/fle_fields_util.cpp index 94e6f107ef0bf..87c55289d65e6 100644 --- a/src/mongo/crypto/fle_fields_util.cpp +++ b/src/mongo/crypto/fle_fields_util.cpp @@ -29,11 +29,19 @@ #include "mongo/crypto/fle_fields_util.h" +#include +#include + +#include + #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/crypto/fle_field_schema_gen.h" -#include "mongo/db/basic_types_gen.h" +#include "mongo/db/basic_types.h" #include "mongo/db/exec/document_value/value.h" -#include +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { void validateIDLFLE2EncryptionPlaceholder(const FLE2EncryptionPlaceholder* placeholder) { diff --git a/src/mongo/crypto/fle_options.idl b/src/mongo/crypto/fle_options.idl index 39a0088e26d11..f21eacf8a7c4e 100644 --- a/src/mongo/crypto/fle_options.idl +++ b/src/mongo/crypto/fle_options.idl @@ -68,4 +68,4 @@ server_parameters: protocol version 2" set_at: cluster cpp_varname: "fleCompactionOptions" - cpp_vartype: FLECompactionOptions \ No newline at end of file + cpp_vartype: FLECompactionOptions diff --git a/src/mongo/crypto/fle_stats.cpp b/src/mongo/crypto/fle_stats.cpp index 1798682a12f87..5cc04ccb68855 100644 --- a/src/mongo/crypto/fle_stats.cpp +++ b/src/mongo/crypto/fle_stats.cpp @@ -27,11 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/crypto/fle_stats.h" +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/crypto/fle_options_gen.h" +#include "mongo/crypto/fle_stats.h" #include "mongo/util/system_tick_source.h" #include "mongo/util/testing_options_gen.h" @@ -50,6 +51,8 @@ FLEStatusSection::FLEStatusSection(TickSource* tickSource) _compactStats.setEsc(zeroStats); _compactStats.setEcoc(zeroECOC); + _cleanupStats.setEsc(zeroStats); + _cleanupStats.setEcoc(zeroECOC); } FLEStatusSection& FLEStatusSection::get() { @@ -62,12 +65,21 @@ BSONObj FLEStatusSection::generateSection(OperationContext* opCtx, { CompactStats temp; { - stdx::lock_guard lock(_mutex); + stdx::lock_guard lock(_compactMutex); temp = _compactStats; } auto sub = BSONObjBuilder(builder.subobjStart("compactStats")); temp.serialize(&sub); } + { + CleanupStats temp; + { + stdx::lock_guard lock(_cleanupMutex); + temp = _cleanupStats; + } + auto sub = BSONObjBuilder(builder.subobjStart("cleanupStats")); + temp.serialize(&sub); + } if (gTestingDiagnosticsEnabledAtStartup && gUnsupportedDangerousTestingFLEDiagnosticsEnabledAtStartup) { diff --git a/src/mongo/crypto/fle_stats.h b/src/mongo/crypto/fle_stats.h index 39eec42809704..dec484134d3f7 100644 --- a/src/mongo/crypto/fle_stats.h +++ b/src/mongo/crypto/fle_stats.h @@ -30,13 +30,34 @@ #pragma once +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/crypto/fle_stats_gen.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" #include "mongo/util/tick_source.h" +#include "mongo/util/timer.h" namespace mongo { +namespace FLEStatsUtil { +static void accumulateStats(ECStats& left, const ECStats& right) { + left.setRead(left.getRead() + right.getRead()); + left.setInserted(left.getInserted() + right.getInserted()); + left.setUpdated(left.getUpdated() + right.getUpdated()); + left.setDeleted(left.getDeleted() + right.getDeleted()); +} +static void accumulateStats(ECOCStats& left, const ECOCStats& right) { + left.setRead(left.getRead() + right.getRead()); + left.setDeleted(left.getDeleted() + right.getDeleted()); +} +} // namespace FLEStatsUtil + /** * Tracks and reports statistics about the server-side Queryable Encryption integration. */ @@ -96,26 +117,20 @@ class FLEStatusSection : public ServerStatusSection { EmuBinaryTracker makeEmuBinaryTracker(); void updateCompactionStats(const CompactStats& stats) { - stdx::lock_guard lock(_mutex); - + stdx::lock_guard lock(_compactMutex); _hasStats.store(true); - accumulateStats(_compactStats.getEsc(), stats.getEsc()); - accumulateStats(_compactStats.getEcoc(), stats.getEcoc()); + FLEStatsUtil::accumulateStats(_compactStats.getEsc(), stats.getEsc()); + FLEStatsUtil::accumulateStats(_compactStats.getEcoc(), stats.getEcoc()); } -private: - static void accumulateStats(ECStats& left, const ECStats& right) { - left.setRead(left.getRead() + right.getRead()); - left.setInserted(left.getInserted() + right.getInserted()); - left.setUpdated(left.getUpdated() + right.getUpdated()); - left.setDeleted(left.getDeleted() + right.getDeleted()); - } - - static void accumulateStats(ECOCStats& left, const ECOCStats& right) { - left.setRead(left.getRead() + right.getRead()); - left.setDeleted(left.getDeleted() + right.getDeleted()); + void updateCleanupStats(const CleanupStats& stats) { + stdx::lock_guard lock(_cleanupMutex); + _hasStats.store(true); + FLEStatsUtil::accumulateStats(_cleanupStats.getEsc(), stats.getEsc()); + FLEStatsUtil::accumulateStats(_cleanupStats.getEcoc(), stats.getEcoc()); } +private: TickSource* _tickSource; AtomicWord _hasStats{false}; @@ -124,8 +139,11 @@ class FLEStatusSection : public ServerStatusSection { AtomicWord emuBinarySuboperation; AtomicWord emuBinaryTotalMillis; - mutable Mutex _mutex = MONGO_MAKE_LATCH("FLECompactStats::_mutex"); + mutable Mutex _compactMutex = MONGO_MAKE_LATCH("FLECompactStats::_mutex"); CompactStats _compactStats; + + mutable Mutex _cleanupMutex = MONGO_MAKE_LATCH("FLECleanupStats::_mutex"); + CleanupStats _cleanupStats; }; } // namespace mongo diff --git a/src/mongo/crypto/fle_stats.idl b/src/mongo/crypto/fle_stats.idl index 485a59f8f5058..ba44dd8b0a758 100644 --- a/src/mongo/crypto/fle_stats.idl +++ b/src/mongo/crypto/fle_stats.idl @@ -63,7 +63,10 @@ structs: description: "Stats about records in ECOC, and ESC compact touched" fields: ecoc: ECOCStats - ecc: # TODO: SERVER-68373 remove ecc when 7.0 becomes last LTS - type: ECStats - optional: true + esc: ECStats + + CleanupStats: + description: "Stats about records in ECOC, and ESC cleanup touched" + fields: + ecoc: ECOCStats esc: ECStats diff --git a/src/mongo/crypto/fle_stats_test.cpp b/src/mongo/crypto/fle_stats_test.cpp index 1336c0ee4c642..cee1fb2148c43 100644 --- a/src/mongo/crypto/fle_stats_test.cpp +++ b/src/mongo/crypto/fle_stats_test.cpp @@ -27,24 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/crypto/fle_stats.h" - -#include "mongo/bson/unordered_fields_bsonobj_comparator.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/idl/idl_parser.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" #include "mongo/util/testing_options_gen.h" #include "mongo/util/tick_source_mock.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest namespace mongo { +namespace { -class FLEStatsTest : public unittest::Test { +class FLEStatsTest : public ServiceContextTest { public: + FLEStatsTest() { + opCtxPtr = makeOperationContext(); + opCtx = opCtxPtr.get(); + } + void setUp() final { + ServiceContextTest::setUp(); oldDiagnosticsFlag = gTestingDiagnosticsEnabledAtStartup; tickSource = std::make_unique>(); instance = std::make_unique(tickSource.get()); @@ -52,9 +65,12 @@ class FLEStatsTest : public unittest::Test { void tearDown() final { gTestingDiagnosticsEnabledAtStartup = oldDiagnosticsFlag; + ServiceContextTest::tearDown(); } -protected: + ServiceContext::UniqueOperationContext opCtxPtr; + OperationContext* opCtx; + CompactStats zeroStats = CompactStats::parse( IDLParserContext("compactStats"), BSON("ecoc" << BSON("deleted" << 0 << "read" << 0) << "esc" @@ -64,18 +80,26 @@ class FLEStatsTest : public unittest::Test { IDLParserContext("compactStats"), BSON("ecoc" << BSON("deleted" << 1 << "read" << 1) << "esc" << BSON("deleted" << 1 << "inserted" << 1 << "read" << 1 << "updated" << 1))); + + CleanupStats cleanupStats = CleanupStats::parse( + IDLParserContext("cleanupStats"), + BSON("ecoc" << BSON("deleted" << 1 << "read" << 1) << "esc" + << BSON("deleted" << 1 << "inserted" << 1 << "read" << 1 << "updated" << 1))); + std::unique_ptr> tickSource; std::unique_ptr instance; - OperationContextNoop opCtx; + bool oldDiagnosticsFlag; }; TEST_F(FLEStatsTest, NoopStats) { ASSERT_FALSE(instance->includeByDefault()); - auto obj = instance->generateSection(&opCtx, BSONElement()); + auto obj = instance->generateSection(opCtx, BSONElement()); ASSERT_TRUE(obj.hasField("compactStats")); ASSERT_BSONOBJ_EQ(zeroStats.toBSON(), obj["compactStats"].Obj()); + ASSERT_TRUE(obj.hasField("cleanupStats")); + ASSERT_BSONOBJ_EQ(zeroStats.toBSON(), obj["cleanupStats"].Obj()); ASSERT_FALSE(obj.hasField("emuBinaryStats")); } @@ -84,10 +108,25 @@ TEST_F(FLEStatsTest, CompactStats) { ASSERT_TRUE(instance->includeByDefault()); - auto obj = instance->generateSection(&opCtx, BSONElement()); + auto obj = instance->generateSection(opCtx, BSONElement()); ASSERT_TRUE(obj.hasField("compactStats")); ASSERT_BSONOBJ_NE(zeroStats.toBSON(), obj["compactStats"].Obj()); ASSERT_BSONOBJ_EQ(compactStats.toBSON(), obj["compactStats"].Obj()); + ASSERT_TRUE(obj.hasField("cleanupStats")); + ASSERT_BSONOBJ_EQ(zeroStats.toBSON(), obj["cleanupStats"].Obj()); + ASSERT_FALSE(obj.hasField("emuBinaryStats")); +} + +TEST_F(FLEStatsTest, CleanupStats) { + instance->updateCleanupStats(cleanupStats); + + ASSERT_TRUE(instance->includeByDefault()); + + auto obj = instance->generateSection(opCtx, BSONElement()); + ASSERT_TRUE(obj.hasField("compactStats")); + ASSERT_BSONOBJ_EQ(zeroStats.toBSON(), obj["compactStats"].Obj()); + ASSERT_TRUE(obj.hasField("cleanupStats")); + ASSERT_BSONOBJ_EQ(cleanupStats.toBSON(), obj["cleanupStats"].Obj()); ASSERT_FALSE(obj.hasField("emuBinaryStats")); } @@ -99,9 +138,11 @@ TEST_F(FLEStatsTest, BinaryEmuStatsAreEmptyWithoutTesting) { ASSERT_FALSE(instance->includeByDefault()); - auto obj = instance->generateSection(&opCtx, BSONElement()); + auto obj = instance->generateSection(opCtx, BSONElement()); ASSERT_TRUE(obj.hasField("compactStats")); ASSERT_BSONOBJ_EQ(zeroStats.toBSON(), obj["compactStats"].Obj()); + ASSERT_TRUE(obj.hasField("cleanupStats")); + ASSERT_BSONOBJ_EQ(zeroStats.toBSON(), obj["cleanupStats"].Obj()); ASSERT_FALSE(obj.hasField("emuBinaryStats")); } @@ -118,14 +159,16 @@ TEST_F(FLEStatsTest, BinaryEmuStatsArePopulatedWithTesting) { ASSERT_TRUE(instance->includeByDefault()); - auto obj = instance->generateSection(&opCtx, BSONElement()); + auto obj = instance->generateSection(opCtx, BSONElement()); ASSERT_TRUE(obj.hasField("compactStats")); ASSERT_BSONOBJ_EQ(zeroStats.toBSON(), obj["compactStats"].Obj()); + ASSERT_TRUE(obj.hasField("cleanupStats")); + ASSERT_BSONOBJ_EQ(zeroStats.toBSON(), obj["cleanupStats"].Obj()); ASSERT_TRUE(obj.hasField("emuBinaryStats")); ASSERT_EQ(1, obj["emuBinaryStats"]["calls"].Long()); ASSERT_EQ(1, obj["emuBinaryStats"]["suboperations"].Long()); ASSERT_EQ(100, obj["emuBinaryStats"]["totalMillis"].Long()); } - +} // namespace } // namespace mongo diff --git a/src/mongo/crypto/fle_tags.cpp b/src/mongo/crypto/fle_tags.cpp index ffd901e356784..886f78e2403b6 100644 --- a/src/mongo/crypto/fle_tags.cpp +++ b/src/mongo/crypto/fle_tags.cpp @@ -27,13 +27,20 @@ * it in the license file. */ -#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/crypto/fle_crypto.h" #include "mongo/crypto/fle_tags.h" -#include "mongo/db/fle_crud.h" #include "mongo/db/namespace_string.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" namespace mongo::fle { @@ -95,16 +102,12 @@ size_t sizeArrayElementsMemory(size_t tagCount) { return size; } -// A positive contention factor (cm) means we must run the above algorithm (cm) times. -std::vector readTags(FLETagQueryInterface* queryImpl, - const NamespaceString& nssEsc, - ESCDerivedFromDataToken s, - EDCDerivedFromDataToken d, - boost::optional cm) { - - auto memoryLimit = static_cast(internalQueryFLERewriteMemoryLimit.load()); +std::vector> getCountInfoSets(FLETagQueryInterface* queryImpl, + const NamespaceString& nssEsc, + ESCDerivedFromDataToken s, + EDCDerivedFromDataToken d, + boost::optional cm) { auto contentionMax = cm.value_or(0); - std::vector binaryTags; std::vector blocks; blocks.reserve(contentionMax + 1); @@ -123,9 +126,21 @@ std::vector readTags(FLETagQueryInterface* queryImpl, std::vector> blockSets; blockSets.push_back(blocks); - auto countInfoSets = - queryImpl->getTags(nssEsc, blockSets, FLETagQueryInterface::TagQueryType::kQuery); + return queryImpl->getTags(nssEsc, blockSets, FLETagQueryInterface::TagQueryType::kQuery); +} + + +// A positive contention factor (cm) means we must run the above algorithm (cm) times. +std::vector readTags(FLETagQueryInterface* queryImpl, + const NamespaceString& nssEsc, + ESCDerivedFromDataToken s, + EDCDerivedFromDataToken d, + boost::optional cm) { + + auto memoryLimit = static_cast(internalQueryFLERewriteMemoryLimit.load()); + std::vector binaryTags; + auto countInfoSets = getCountInfoSets(queryImpl, nssEsc, s, d, cm); // Count how many tags we will need and check once if we they will fit // diff --git a/src/mongo/crypto/fle_tags.h b/src/mongo/crypto/fle_tags.h index ac69de0350a87..b2bf4708ea50f 100644 --- a/src/mongo/crypto/fle_tags.h +++ b/src/mongo/crypto/fle_tags.h @@ -30,8 +30,13 @@ #pragma once #include +#include +#include +#include #include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_crypto_types.h" +#include "mongo/db/namespace_string.h" namespace mongo { class FLETagQueryInterface; @@ -39,6 +44,12 @@ class FLETagQueryInterface; namespace mongo::fle { +std::vector> getCountInfoSets(FLETagQueryInterface* queryImpl, + const NamespaceString& nssEsc, + ESCDerivedFromDataToken s, + EDCDerivedFromDataToken d, + boost::optional cm); + /** * Read a list of binary tags given ESC and and EDC derived tokens and a maximum contention * factor. diff --git a/src/mongo/crypto/hash_block.h b/src/mongo/crypto/hash_block.h index e16d6301d41a6..df4bdd64c1575 100644 --- a/src/mongo/crypto/hash_block.h +++ b/src/mongo/crypto/hash_block.h @@ -29,10 +29,11 @@ #pragma once -#include +#include #include #include #include +#include #include #include "mongo/base/data_range.h" @@ -43,6 +44,7 @@ #include "mongo/bson/util/builder.h" #include "mongo/util/base64.h" #include "mongo/util/hex.h" +#include "mongo/util/murmur3.h" #include "mongo/util/secure_compare_memory.h" namespace mongo { @@ -301,15 +303,29 @@ class HashBlock { * Custom hasher so HashBlocks can be used in unordered data structures. * * ex: std::unordered_set shaSet; + * + * Cryptographically secure hashes are good hashes so no need to hash them again. Just truncate + * the hash and return it. */ struct Hash { std::size_t operator()(const HashBlock& HashBlock) const { - uint32_t hash; - MurmurHash3_x86_32(HashBlock.data(), HashBlock::kHashLength, 0, &hash); - return hash; + static_assert(kHashLength >= sizeof(std::size_t)); + + return ConstDataView(reinterpret_cast(HashBlock.data())) + .read>(); } }; + /** + * Hash function compatible with absl::Hash for absl::unordered_{map,set} + */ + template + friend H AbslHashValue(H h, const HashBlock& HashBlock) { + static_assert(kHashLength >= sizeof(std::size_t)); + + return H::combine(std::move(h), Hash()(HashBlock)); + } + private: // The backing array of bytes for the sha block HashType _hash; diff --git a/src/mongo/crypto/jwk_manager.cpp b/src/mongo/crypto/jwk_manager.cpp index 79a65f0db195a..47730b7aa91a2 100644 --- a/src/mongo/crypto/jwk_manager.cpp +++ b/src/mongo/crypto/jwk_manager.cpp @@ -29,10 +29,23 @@ #include "mongo/crypto/jwk_manager.h" +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/crypto/jws_validator.h" #include "mongo/crypto/jwt_types_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #include "mongo/util/base64.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl diff --git a/src/mongo/crypto/jwk_manager.h b/src/mongo/crypto/jwk_manager.h index e3c5e1986b00e..30d588fc19fa4 100644 --- a/src/mongo/crypto/jwk_manager.h +++ b/src/mongo/crypto/jwk_manager.h @@ -29,14 +29,18 @@ #pragma once -#include "mongo/crypto/jwt_types_gen.h" +#include #include +#include #include +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/crypto/jwks_fetcher.h" #include "mongo/crypto/jws_validator.h" +#include "mongo/crypto/jwt_types_gen.h" namespace mongo::crypto { diff --git a/src/mongo/crypto/jwks_fetcher_impl.cpp b/src/mongo/crypto/jwks_fetcher_impl.cpp index 69f0d24ea6e1a..63e4e1f15282c 100644 --- a/src/mongo/crypto/jwks_fetcher_impl.cpp +++ b/src/mongo/crypto/jwks_fetcher_impl.cpp @@ -29,10 +29,22 @@ #include "mongo/crypto/jwks_fetcher_impl.h" +#include + +#include + +#include "mongo/base/data_builder.h" +#include "mongo/base/data_range.h" +#include "mongo/base/data_range_cursor.h" +#include "mongo/base/error_codes.h" #include "mongo/bson/json.h" +#include "mongo/db/auth/oauth_authorization_server_metadata_gen.h" #include "mongo/db/auth/oauth_discovery_factory.h" #include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" #include "mongo/util/net/http_client.h" +#include "mongo/util/str.h" namespace mongo::crypto { diff --git a/src/mongo/crypto/jwks_fetcher_impl.h b/src/mongo/crypto/jwks_fetcher_impl.h index 582f986def54f..312a1b89a79cb 100644 --- a/src/mongo/crypto/jwks_fetcher_impl.h +++ b/src/mongo/crypto/jwks_fetcher_impl.h @@ -29,9 +29,11 @@ #pragma once -#include "mongo/crypto/jwks_fetcher.h" +#include #include "mongo/base/string_data.h" +#include "mongo/crypto/jwks_fetcher.h" +#include "mongo/crypto/jwt_types_gen.h" namespace mongo::crypto { diff --git a/src/mongo/crypto/jws_validated_token.cpp b/src/mongo/crypto/jws_validated_token.cpp index e8a0738e905c1..bf4b63020405b 100644 --- a/src/mongo/crypto/jws_validated_token.cpp +++ b/src/mongo/crypto/jws_validated_token.cpp @@ -29,13 +29,23 @@ #include "mongo/crypto/jws_validated_token.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" -#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" -#include "mongo/crypto/jws_validator.h" -#include "mongo/db/basic_types_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/util/assert_util.h" #include "mongo/util/base64.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo::crypto { namespace { diff --git a/src/mongo/crypto/jws_validated_token.h b/src/mongo/crypto/jws_validated_token.h index 58474ec3e17f4..4217bf8ec31b9 100644 --- a/src/mongo/crypto/jws_validated_token.h +++ b/src/mongo/crypto/jws_validated_token.h @@ -29,6 +29,11 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/crypto/jwk_manager.h" #include "mongo/crypto/jwt_types_gen.h" diff --git a/src/mongo/crypto/jws_validated_token_test.cpp b/src/mongo/crypto/jws_validated_token_test.cpp index 1f1998d4bec0e..4a4b74fc10647 100644 --- a/src/mongo/crypto/jws_validated_token_test.cpp +++ b/src/mongo/crypto/jws_validated_token_test.cpp @@ -29,22 +29,21 @@ #include "mongo/crypto/jws_validated_token.h" -#include -#include -#include - +#include #include +#include #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/crypto/jwk_manager.h" +#include "mongo/crypto/jwks_fetcher.h" #include "mongo/crypto/jwks_fetcher_mock.h" -#include "mongo/crypto/jws_validator.h" +#include "mongo/idl/idl_parser.h" #include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" #include "mongo/util/base64.h" diff --git a/src/mongo/crypto/jws_validator.h b/src/mongo/crypto/jws_validator.h index 3bb599d4643de..9e096f5c2e40b 100644 --- a/src/mongo/crypto/jws_validator.h +++ b/src/mongo/crypto/jws_validator.h @@ -29,8 +29,12 @@ #pragma once +#include + +#include "mongo/base/error_extra_info.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" namespace mongo::crypto { diff --git a/src/mongo/crypto/jws_validator_openssl.cpp b/src/mongo/crypto/jws_validator_openssl.cpp index e6d8cfc385fba..0d96f54e27b12 100644 --- a/src/mongo/crypto/jws_validator_openssl.cpp +++ b/src/mongo/crypto/jws_validator_openssl.cpp @@ -27,16 +27,27 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/crypto/jws_validator.h" #include "mongo/crypto/jwt_types_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/util/assert_util.h" #include "mongo/util/net/ssl_manager.h" - -#include -#include -#include +#include "mongo/util/str.h" #if OPENSSL_VERSION_NUMBER < 0x10100000L || \ (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x2070000fL) diff --git a/src/mongo/crypto/jws_validator_test.cpp b/src/mongo/crypto/jws_validator_test.cpp index 36f81731193a5..a8e67ecc47457 100644 --- a/src/mongo/crypto/jws_validator_test.cpp +++ b/src/mongo/crypto/jws_validator_test.cpp @@ -29,15 +29,14 @@ #include "mongo/crypto/jws_validator.h" -#include #include -#include #include "mongo/base/data_range.h" #include "mongo/base/status.h" -#include "mongo/bson/bsontypes.h" -#include "mongo/config.h" -#include "mongo/unittest/unittest.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" #include "mongo/util/base64.h" #include "mongo/util/hex.h" diff --git a/src/mongo/crypto/jwt_test.cpp b/src/mongo/crypto/jwt_test.cpp index 0cc4baa02c26b..c7405902cc3fe 100644 --- a/src/mongo/crypto/jwt_test.cpp +++ b/src/mongo/crypto/jwt_test.cpp @@ -27,12 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/bson/json.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/crypto/jwk_manager.h" +#include "mongo/crypto/jwks_fetcher.h" #include "mongo/crypto/jwks_fetcher_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" #include "mongo/util/net/http_client.h" diff --git a/src/mongo/crypto/jwt_types.idl b/src/mongo/crypto/jwt_types.idl index f8cc4034e8b7e..ed23a57ae4752 100644 --- a/src/mongo/crypto/jwt_types.idl +++ b/src/mongo/crypto/jwt_types.idl @@ -27,6 +27,8 @@ global: cpp_namespace: "mongo::crypto" + cpp_includes: + - "mongo/util/base64.h" imports: - "mongo/db/basic_types.idl" diff --git a/src/mongo/crypto/mechanism_scram.h b/src/mongo/crypto/mechanism_scram.h index fd70e585cfde2..7c2eaf53d10b6 100644 --- a/src/mongo/crypto/mechanism_scram.h +++ b/src/mongo/crypto/mechanism_scram.h @@ -29,17 +29,30 @@ #pragma once +#include #include +#include +#include +#include #include #include +#include +#include #include +#include "mongo/base/error_codes.h" #include "mongo/base/secure_allocator.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/crypto/sha1_block.h" #include "mongo/db/jsobj.h" #include "mongo/platform/random.h" #include "mongo/util/assert_util.h" +#include "mongo/util/base64.h" +#include "mongo/util/secure_compare_memory.h" namespace mongo { namespace scram { diff --git a/src/mongo/crypto/mechanism_scram_test.cpp b/src/mongo/crypto/mechanism_scram_test.cpp index 6185844fe711a..cc08ded7dcb8d 100644 --- a/src/mongo/crypto/mechanism_scram_test.cpp +++ b/src/mongo/crypto/mechanism_scram_test.cpp @@ -28,12 +28,16 @@ */ -#include "mongo/platform/basic.h" +#include +#include + +#include #include "mongo/crypto/mechanism_scram.h" #include "mongo/crypto/sha1_block.h" #include "mongo/crypto/sha256_block.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/crypto/scripts/encryption_fle2_test_vectors.sh b/src/mongo/crypto/scripts/encryption_fle2_test_vectors.sh index 85fef45c3e682..dde59c37106e7 100644 --- a/src/mongo/crypto/scripts/encryption_fle2_test_vectors.sh +++ b/src/mongo/crypto/scripts/encryption_fle2_test_vectors.sh @@ -15,4 +15,4 @@ cat < +#include +#include +#include "mongo/base/data_range.h" +#include "mongo/base/string_data.h" +#include "mongo/crypto/hash_block.h" #include "mongo/util/make_array_type.h" namespace mongo { diff --git a/src/mongo/crypto/sha1_block_test.cpp b/src/mongo/crypto/sha1_block_test.cpp index 2c22374340893..c1dd222f787db 100644 --- a/src/mongo/crypto/sha1_block_test.cpp +++ b/src/mongo/crypto/sha1_block_test.cpp @@ -27,12 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/bson/bsonmisc.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/crypto/sha1_block.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/crypto/sha256_block.cpp b/src/mongo/crypto/sha256_block.cpp index 3ca20d3a4d1de..a232a5ef93291 100644 --- a/src/mongo/crypto/sha256_block.cpp +++ b/src/mongo/crypto/sha256_block.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/crypto/sha256_block.h" namespace mongo { diff --git a/src/mongo/crypto/sha256_block.h b/src/mongo/crypto/sha256_block.h index 606e36cf516ad..f630ce54f778f 100644 --- a/src/mongo/crypto/sha256_block.h +++ b/src/mongo/crypto/sha256_block.h @@ -29,8 +29,13 @@ #pragma once -#include "mongo/crypto/hash_block.h" +#include +#include +#include +#include "mongo/base/data_range.h" +#include "mongo/base/string_data.h" +#include "mongo/crypto/hash_block.h" #include "mongo/util/make_array_type.h" namespace mongo { diff --git a/src/mongo/crypto/sha256_block_test.cpp b/src/mongo/crypto/sha256_block_test.cpp index 8c62a032943c8..e415a6a85acfd 100644 --- a/src/mongo/crypto/sha256_block_test.cpp +++ b/src/mongo/crypto/sha256_block_test.cpp @@ -27,12 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/bson/bsonmisc.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/crypto/sha256_block.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/crypto/sha512_block.h b/src/mongo/crypto/sha512_block.h index 58b0e1c1d161e..9dc29e2be6b3f 100644 --- a/src/mongo/crypto/sha512_block.h +++ b/src/mongo/crypto/sha512_block.h @@ -29,8 +29,13 @@ #pragma once -#include "mongo/crypto/hash_block.h" +#include +#include +#include +#include "mongo/base/data_range.h" +#include "mongo/base/string_data.h" +#include "mongo/crypto/hash_block.h" #include "mongo/util/make_array_type.h" namespace mongo { diff --git a/src/mongo/crypto/sha512_block_test.cpp b/src/mongo/crypto/sha512_block_test.cpp index 79ef4e3ffcc03..2f73b84eb557f 100644 --- a/src/mongo/crypto/sha512_block_test.cpp +++ b/src/mongo/crypto/sha512_block_test.cpp @@ -27,12 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/bson/bsonmisc.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/crypto/sha512_block.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/crypto/sha_block_openssl.cpp b/src/mongo/crypto/sha_block_openssl.cpp index 80a752fbafa54..1a8abb2031472 100644 --- a/src/mongo/crypto/sha_block_openssl.cpp +++ b/src/mongo/crypto/sha_block_openssl.cpp @@ -27,25 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include +#include "mongo/base/data_range.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/crypto/sha1_block.h" #include "mongo/crypto/sha256_block.h" #include "mongo/crypto/sha512_block.h" - -#include "mongo/config.h" #include "mongo/util/assert_util.h" #ifndef MONGO_CONFIG_SSL #error This file should only be included in SSL-enabled builds #endif +#include #include #include #include -#include +#include +#include #if OPENSSL_VERSION_NUMBER < 0x10100000L || \ (defined(LIBRESSL_VERSION_NUMBER) && LIBRESSL_VERSION_NUMBER < 0x2070000fL) @@ -87,6 +89,66 @@ namespace mongo { namespace { + +/** + * Class to load singleton instances of each SHA algorithm. + */ +#if OPENSSL_VERSION_NUMBER > 0x30000000L +class OpenSSLHashLoader { +public: + OpenSSLHashLoader() { + _algoSHA1 = EVP_MD_fetch(NULL, "SHA1", NULL); + _algoSHA256 = EVP_MD_fetch(NULL, "SHA2-256", NULL); + _algoSHA512 = EVP_MD_fetch(NULL, "SHA2-512", NULL); + } + + ~OpenSSLHashLoader() { + EVP_MD_free(_algoSHA1); + EVP_MD_free(_algoSHA256); + EVP_MD_free(_algoSHA512); + } + + const EVP_MD* getSHA512() { + return _algoSHA512; + } + + const EVP_MD* getSHA256() { + return _algoSHA256; + } + + const EVP_MD* getSHA1() { + return _algoSHA1; + } + +private: + EVP_MD* _algoSHA512; + EVP_MD* _algoSHA256; + EVP_MD* _algoSHA1; +}; +#else + +class OpenSSLHashLoader { +public: + const EVP_MD* getSHA512() { + return EVP_sha512(); + } + + const EVP_MD* getSHA256() { + return EVP_sha256(); + } + + const EVP_MD* getSHA1() { + return EVP_sha1(); + } +}; +#endif + +static OpenSSLHashLoader& getOpenSSLHashLoader() { + static OpenSSLHashLoader* loader = new OpenSSLHashLoader(); + return *loader; +} + + /* * Computes a SHA hash of 'input'. */ @@ -131,38 +193,41 @@ void computeHmacImpl(const EVP_MD* md, void SHA1BlockTraits::computeHash(std::initializer_list input, HashType* const output) { - computeHashImpl(EVP_sha1(), input, output); + computeHashImpl(getOpenSSLHashLoader().getSHA1(), input, output); } void SHA256BlockTraits::computeHash(std::initializer_list input, HashType* const output) { - computeHashImpl(EVP_sha256(), input, output); + computeHashImpl(getOpenSSLHashLoader().getSHA256(), input, output); } void SHA512BlockTraits::computeHash(std::initializer_list input, HashType* const output) { - computeHashImpl(EVP_sha512(), input, output); + computeHashImpl(getOpenSSLHashLoader().getSHA512(), input, output); } void SHA1BlockTraits::computeHmac(const uint8_t* key, size_t keyLen, std::initializer_list input, SHA1BlockTraits::HashType* const output) { - return computeHmacImpl(EVP_sha1(), key, keyLen, input, output); + return computeHmacImpl( + getOpenSSLHashLoader().getSHA1(), key, keyLen, input, output); } void SHA256BlockTraits::computeHmac(const uint8_t* key, size_t keyLen, std::initializer_list input, SHA256BlockTraits::HashType* const output) { - return computeHmacImpl(EVP_sha256(), key, keyLen, input, output); + return computeHmacImpl( + getOpenSSLHashLoader().getSHA256(), key, keyLen, input, output); } void SHA512BlockTraits::computeHmac(const uint8_t* key, size_t keyLen, std::initializer_list input, SHA512BlockTraits::HashType* const output) { - return computeHmacImpl(EVP_sha512(), key, keyLen, input, output); + return computeHmacImpl( + getOpenSSLHashLoader().getSHA512(), key, keyLen, input, output); } } // namespace mongo diff --git a/src/mongo/crypto/symmetric_crypto.cpp b/src/mongo/crypto/symmetric_crypto.cpp index 7c7dde399baf1..93cc5f52417f7 100644 --- a/src/mongo/crypto/symmetric_crypto.cpp +++ b/src/mongo/crypto/symmetric_crypto.cpp @@ -28,20 +28,18 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/crypto/symmetric_crypto.h" - -#include +#include -#include "mongo/base/data_cursor.h" -#include "mongo/base/init.h" -#include "mongo/base/status.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/secure_allocator.h" +#include "mongo/crypto/symmetric_crypto.h" #include "mongo/crypto/symmetric_key.h" #include "mongo/platform/random.h" #include "mongo/util/assert_util.h" -#include "mongo/util/net/ssl_manager.h" -#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/crypto/symmetric_crypto.h b/src/mongo/crypto/symmetric_crypto.h index d739fbe9c074a..159af19a0235b 100644 --- a/src/mongo/crypto/symmetric_crypto.h +++ b/src/mongo/crypto/symmetric_crypto.h @@ -31,6 +31,7 @@ #include #include +#include #include #include diff --git a/src/mongo/crypto/symmetric_crypto_openssl.cpp b/src/mongo/crypto/symmetric_crypto_openssl.cpp index 42efc9a1d8d48..ceaeab2c66849 100644 --- a/src/mongo/crypto/symmetric_crypto_openssl.cpp +++ b/src/mongo/crypto/symmetric_crypto_openssl.cpp @@ -28,18 +28,28 @@ */ -#include "mongo/platform/basic.h" - +#include +#include #include +#include +#include +#include +#include #include #include +#include +#include + +#include +#include -#include "mongo/base/data_cursor.h" -#include "mongo/base/init.h" +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/crypto/symmetric_crypto.h" #include "mongo/crypto/symmetric_key.h" -#include "mongo/platform/random.h" #include "mongo/util/assert_util.h" #include "mongo/util/net/ssl_manager.h" #include "mongo/util/str.h" @@ -50,6 +60,79 @@ namespace mongo { namespace crypto { + +/** + * Class to load singleton instances of each Encryption Cipher algorithm. + */ +#if OPENSSL_VERSION_NUMBER > 0x30000000L + +class OpenSSLCipherLoader { +public: + OpenSSLCipherLoader() { + _algoAES256CBC = EVP_CIPHER_fetch(NULL, "AES-256-CBC", NULL); + _algoAES256GCM = EVP_CIPHER_fetch(NULL, "AES-256-GCM", NULL); + _algoAES256CTR = EVP_CIPHER_fetch(NULL, "AES-256-CTR", NULL); + } + + ~OpenSSLCipherLoader() { + EVP_CIPHER_free(_algoAES256CBC); + EVP_CIPHER_free(_algoAES256GCM); + EVP_CIPHER_free(_algoAES256CTR); + } + + const EVP_CIPHER* getAES256CTR() { + return _algoAES256CTR; + } + + const EVP_CIPHER* getAES256GCM() { + return _algoAES256GCM; + } + + const EVP_CIPHER* getAES256CBC() { + return _algoAES256CBC; + } + +private: + EVP_CIPHER* _algoAES256CTR; + EVP_CIPHER* _algoAES256GCM; + EVP_CIPHER* _algoAES256CBC; +}; +#else + +class OpenSSLCipherLoader { +public: + OpenSSLCipherLoader() { + _algoAES256CBC = EVP_get_cipherbyname("aes-256-cbc"); + _algoAES256GCM = EVP_get_cipherbyname("aes-256-gcm"); + _algoAES256CTR = EVP_get_cipherbyname("aes-256-ctr"); + } + + const EVP_CIPHER* getAES256CTR() { + return _algoAES256CTR; + } + + const EVP_CIPHER* getAES256GCM() { + return _algoAES256GCM; + } + + const EVP_CIPHER* getAES256CBC() { + return _algoAES256CBC; + } + +private: + const EVP_CIPHER* _algoAES256CTR; + const EVP_CIPHER* _algoAES256GCM; + const EVP_CIPHER* _algoAES256CBC; +}; + +#endif + +static OpenSSLCipherLoader& getOpenSSLCipherLoader() { + static OpenSSLCipherLoader* loader = new OpenSSLCipherLoader(); + return *loader; +} + + namespace { template void initCipherContext( @@ -58,11 +141,11 @@ void initCipherContext( const EVP_CIPHER* cipher = nullptr; if (keySize == sym256KeySize) { if (mode == crypto::aesMode::cbc) { - cipher = EVP_get_cipherbyname("aes-256-cbc"); + cipher = getOpenSSLCipherLoader().getAES256CBC(); } else if (mode == crypto::aesMode::gcm) { - cipher = EVP_get_cipherbyname("aes-256-gcm"); + cipher = getOpenSSLCipherLoader().getAES256GCM(); } else if (mode == crypto::aesMode::ctr) { - cipher = EVP_get_cipherbyname("aes-256-ctr"); + cipher = getOpenSSLCipherLoader().getAES256CTR(); } } uassert(ErrorCodes::BadValue, @@ -84,6 +167,26 @@ class SymmetricEncryptorOpenSSL : public SymmetricEncryptor { } StatusWith update(ConstDataRange in, DataRange out) final { + size_t cipherBlockSize = EVP_CIPHER_CTX_block_size(_ctx.get()); + + + if (out.data() == nullptr) { + // Presumed intentional null output buffer + invariant(out.length() == 0); + } else { + // Data is padded to the next multiple of cipherBlockSize + size_t minimumOutputSize = in.length(); + if (auto remainder = in.length() % cipherBlockSize) { + minimumOutputSize += cipherBlockSize - remainder; + } + + if (out.length() < minimumOutputSize) { + return Status(ErrorCodes::Overflow, + str::stream() << "Write buffer too small for Encryptor update: " + << static_cast(out.length())); + } + } + int len = 0; if (1 != EVP_EncryptUpdate( @@ -114,6 +217,14 @@ class SymmetricEncryptorOpenSSL : public SymmetricEncryptor { } StatusWith finalize(DataRange out) final { + + size_t cipherBlockSize = EVP_CIPHER_CTX_block_size(_ctx.get()); + + if (cipherBlockSize > 1 && out.length() < cipherBlockSize) { + return Status(ErrorCodes::Overflow, + str::stream() << "Write buffer too small for Encryptor finalize: " + << static_cast(out.length())); + } int len = 0; if (1 != EVP_EncryptFinal_ex(_ctx.get(), out.data(), &len)) { return Status(ErrorCodes::UnknownError, @@ -157,6 +268,25 @@ class SymmetricDecryptorOpenSSL : public SymmetricDecryptor { StatusWith update(ConstDataRange in, DataRange out) final { int len = 0; + + if (out.data() == nullptr) { + // Presumed intentional null output buffer + invariant(out.length() == 0); + } else { + + size_t minimumOutputSize = in.length(); + size_t cipherBlockSize = EVP_CIPHER_CTX_block_size(_ctx.get()); + if (in.length() % cipherBlockSize) { + minimumOutputSize += cipherBlockSize; + } + + if (out.length() < minimumOutputSize) { + return Status(ErrorCodes::Overflow, + str::stream() << "Write buffer too small for Decryptor update: " + << static_cast(out.length())); + } + } + if (1 != EVP_DecryptUpdate( _ctx.get(), out.data(), &len, in.data(), in.length())) { @@ -187,6 +317,14 @@ class SymmetricDecryptorOpenSSL : public SymmetricDecryptor { StatusWith finalize(DataRange out) final { int len = 0; + + size_t cipherBlockSize = EVP_CIPHER_CTX_block_size(_ctx.get()); + if (cipherBlockSize > 1 && out.length() < cipherBlockSize) { + return Status(ErrorCodes::Overflow, + str::stream() << "Write buffer too small for Encryptor finalize: " + << static_cast(out.length())); + } + if (1 != EVP_DecryptFinal_ex(_ctx.get(), out.data(), &len)) { return Status(ErrorCodes::UnknownError, str::stream() diff --git a/src/mongo/crypto/symmetric_crypto_test.cpp b/src/mongo/crypto/symmetric_crypto_test.cpp index 8f7acbd03dc45..361add269210b 100644 --- a/src/mongo/crypto/symmetric_crypto_test.cpp +++ b/src/mongo/crypto/symmetric_crypto_test.cpp @@ -28,12 +28,33 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include + +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/data_range_cursor.h" +#include "mongo/base/secure_allocator.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/crypto/block_packer.h" -#include "mongo/unittest/unittest.h" +#include "mongo/crypto/symmetric_crypto.h" +#include "mongo/crypto/symmetric_key.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/hex.h" namespace mongo { @@ -295,6 +316,72 @@ TEST(BlockPacker, AlignedThenOverfill) { ASSERT_EQ(1, leftovers.length()); } +#ifdef __linux__ +// (Only for OpenSSL, i.e. on Linux) +// ... Try using insufficiently large output buffers for encryption and decryption +TEST(SymmetricEncryptor, InsufficientOutputBuffer) { + SymmetricKey key = crypto::aesGenerate(crypto::sym256KeySize, "InsufficientOutputBufferTest"); + constexpr auto plaintextMessage = "DOLOREM IPSUM"_sd; + std::vector encodedPlaintext(plaintextMessage.begin(), plaintextMessage.end()); + const std::array iv = {}; + std::array cryptoBuffer; + DataRange cryptoRange(cryptoBuffer.data(), cryptoBuffer.size()); + + auto swEnc = crypto::SymmetricEncryptor::create(key, crypto::aesMode::cbc, iv); + ASSERT_OK(swEnc.getStatus()); + auto encryptor = std::move(swEnc.getValue()); + DataRangeCursor cryptoCursor(cryptoRange); + + // Validate that encryption with insufficient output buffer does not succeed + DataRange smallOutputBuffer(cryptoBuffer.data(), 1); + ASSERT_NOT_OK(encryptor->update(encodedPlaintext, smallOutputBuffer)); + + // Validate that encryption with zero output buffer does not succeed + DataRange zeroOutputBuffer(cryptoBuffer.data(), 0); + ASSERT_NOT_OK( + encryptor->update({plaintextMessage.rawData(), plaintextMessage.size()}, zeroOutputBuffer)); + + auto swSize = encryptor->update(encodedPlaintext, cryptoCursor); + ASSERT_OK(swSize); + cryptoCursor.advance(swSize.getValue()); + + swSize = encryptor->finalize(cryptoCursor); + ASSERT_OK(swSize); + + // finalize is guaranteed to output at least 16 bytes for the CBC blockmode + ASSERT_GTE(swSize.getValue(), 16); + cryptoCursor.advance(swSize.getValue()); + + // Validate beginning of decryption process + auto swDec = crypto::SymmetricDecryptor::create(key, crypto::aesMode::cbc, iv); + ASSERT_OK(swDec.getStatus()); + auto decryptor = std::move(swDec.getValue()); + + // Validate that decryption with insufficient output buffer does not succeed + std::array shortOutputBuffer; + DataRangeCursor shortOutputCursor(shortOutputBuffer); + ASSERT_NOT_OK(decryptor->update( + {cryptoRange.data(), cryptoRange.length() - cryptoCursor.length()}, shortOutputCursor)); + + // Validate that decryption with zero output buffer does not succeed + DataRangeCursor zeroOutputCursor(zeroOutputBuffer); + ASSERT_NOT_OK(decryptor->update( + {cryptoRange.data(), cryptoRange.length() - cryptoCursor.length()}, zeroOutputCursor)); + + // Validate that decryption update/finalize with sufficient output buffer succeeds + std::array decryptionBuffer; + DataRangeCursor decryptionCursor(decryptionBuffer); + auto swUpdateSize = decryptor->update( + {cryptoRange.data(), cryptoRange.length() - cryptoCursor.length()}, decryptionCursor); + ASSERT_OK(swUpdateSize.getStatus()); + decryptionCursor.advance(swUpdateSize.getValue()); + auto swFinalizeSize = decryptor->finalize(decryptionCursor); + ASSERT_OK(swFinalizeSize.getStatus()); + + // Validate that the decrypted ciphertext matches the original plaintext + ASSERT(std::equal(plaintextMessage.begin(), plaintextMessage.end(), decryptionBuffer.begin())); +} +#endif // The following tests validate that SymmetricEncryptors function when called with inputs with // varying block alignments. @@ -739,9 +826,6 @@ TEST_F(AESTestVectors, CTRTestCase1234) { "dfc9c58db67aada613c2dd08457941a6")); } -// The tests vectors below are generated using random data. Since they do not contain logic, -// we will have them in a separate file so that they do not overtake the code space -#include "symmetric_crypto_tests.gen" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/crypto/symmetric_key.cpp b/src/mongo/crypto/symmetric_key.cpp index 66e084e3a74e1..2621dbec0d654 100644 --- a/src/mongo/crypto/symmetric_key.cpp +++ b/src/mongo/crypto/symmetric_key.cpp @@ -28,15 +28,14 @@ */ -#include "mongo/platform/basic.h" - #include "mongo/crypto/symmetric_key.h" -#include +#include #include "mongo/crypto/symmetric_crypto.h" #include "mongo/logv2/log.h" -#include "mongo/util/secure_zero_memory.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/crypto/symmetric_key.h b/src/mongo/crypto/symmetric_key.h index 9f9c3c42a44d6..26b33a5de3a34 100644 --- a/src/mongo/crypto/symmetric_key.h +++ b/src/mongo/crypto/symmetric_key.h @@ -29,12 +29,19 @@ #pragma once -#include +#include +#include +#include #include #include +#include +#include +#include #include "mongo/base/secure_allocator.h" +#include "mongo/base/string_data.h" #include "mongo/platform/atomic_word.h" +#include "mongo/util/murmur3.h" namespace mongo { class Status; @@ -74,9 +81,7 @@ class SymmetricKeyId { struct Hash { std::size_t operator()(const SymmetricKeyId& keyid) const { auto rep = keyid.toString(); - uint32_t hash; - MurmurHash3_x86_32(rep.data(), rep.size(), 0, &hash); - return hash; + return murmur3(StringData{rep}, 0 /*seed*/); } }; diff --git a/src/mongo/db/SConscript b/src/mongo/db/SConscript index 0f014dd8848bb..41cd6a9666c19 100644 --- a/src/mongo/db/SConscript +++ b/src/mongo/db/SConscript @@ -124,6 +124,7 @@ env.Library( '$BUILD_DIR/mongo/db/session/logical_session_id_helpers', '$BUILD_DIR/mongo/db/storage/key_string', '$BUILD_DIR/mongo/db/transaction/transaction', + 'dbhelpers', 'index_builds_coordinator_interface', 'query_exec', 'shard_role', @@ -343,9 +344,9 @@ env.Library( ) env.Library( - target="global_settings", + target='global_settings', source=[ - "global_settings.cpp", + 'global_settings.cpp', ], LIBDEPS=[ 'repl/repl_settings', @@ -364,11 +365,10 @@ env.Library( ], ) -# mongod options env.Library( - target="mongod_options", + target='mongod_options', source=[ - "mongod_options.cpp", + 'mongod_options.cpp', 'mongod_options_general.idl', 'mongod_options_legacy.idl', 'mongod_options_replication.idl', @@ -531,6 +531,8 @@ env.Library( '$BUILD_DIR/mongo/db/dbhelpers', '$BUILD_DIR/mongo/db/server_feature_flags', '$BUILD_DIR/mongo/db/service_context', + 'change_streams_cluster_parameter', + 'record_id_helpers', ], ) @@ -548,11 +550,27 @@ env.Library( ], ) +env.Library( + target='change_stream_pre_image_util', + source=[ + 'change_stream_pre_image_util.cpp', + ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/pipeline/change_stream_preimage', + 'change_stream_options_manager', + 'change_stream_serverless_helpers', + 'query_exec', + 'record_id_helpers', + 'server_base', + ], +) + env.Library( target='change_stream_pre_images_collection_manager', source=[ 'change_stream_pre_images_collection_manager.cpp', - 'change_stream_pre_images_truncate_markers.cpp' + 'change_stream_pre_images_truncate_manager.cpp', + 'change_stream_pre_images_truncate_markers_per_nsUUID.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/db/pipeline/change_stream_preimage', @@ -565,6 +583,7 @@ env.Library( '$BUILD_DIR/mongo/db/query/op_metrics', '$BUILD_DIR/mongo/db/repl/storage_interface', 'change_stream_options_manager', + 'change_stream_pre_image_util', 'change_stream_serverless_helpers', 'query_exec', 'record_id_helpers', @@ -572,6 +591,30 @@ env.Library( ], ) +env.CppUnitTest( + target='change_stream_pre_images_manager_test', + source=[ + 'change_stream_pre_images_remover_test.cpp', + 'change_stream_pre_images_truncate_manager_test.cpp', + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/db/catalog/catalog_test_fixture', + '$BUILD_DIR/mongo/db/catalog/collection_catalog', + '$BUILD_DIR/mongo/db/catalog/collection_crud', + '$BUILD_DIR/mongo/db/op_observer/op_observer', + '$BUILD_DIR/mongo/db/op_observer/op_observer_impl', + '$BUILD_DIR/mongo/db/op_observer/oplog_writer_impl', + '$BUILD_DIR/mongo/db/storage/record_store_base', + 'change_stream_options', + 'change_stream_options_manager', + 'change_stream_pre_image_util', + 'change_stream_pre_images_collection_manager', + 'change_stream_serverless_helpers', + 'change_streams_cluster_parameter', + 'shard_role', + ], +) + env.Library( target='write_block_bypass', source=[ @@ -584,9 +627,9 @@ env.Library( ) env.Library( - target="read_write_concern_defaults_mock", + target='read_write_concern_defaults_mock', source=[ - "read_write_concern_defaults_cache_lookup_mock.cpp", + 'read_write_concern_defaults_cache_lookup_mock.cpp', ], LIBDEPS=[ 'read_write_concern_defaults', @@ -654,7 +697,7 @@ env.Library( env.Library( target='not_primary_error_tracker', source=[ - "not_primary_error_tracker.cpp", + 'not_primary_error_tracker.cpp', ], LIBDEPS=[ 'service_context', @@ -749,7 +792,7 @@ env.Library( ) env.Library( - target="metadata_consistency_types_idl", + target='metadata_consistency_types_idl', source=[ 'metadata_consistency_types.idl', ], @@ -764,6 +807,7 @@ env.Library( source=[ 'coll_mod_reply_validation.cpp', 'commands.cpp', + 'curop.cpp', 'drop.idl', 'drop_database.idl', 'explain.idl', @@ -774,25 +818,36 @@ env.Library( 'api_parameters', ], LIBDEPS_PRIVATE=[ - '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/bson/mutable/mutable_bson', - '$BUILD_DIR/mongo/db/auth/auth', - '$BUILD_DIR/mongo/db/auth/authprivilege', - '$BUILD_DIR/mongo/db/catalog/collection_options', - '$BUILD_DIR/mongo/db/commands/create_command', - '$BUILD_DIR/mongo/db/commands/server_status_core', - '$BUILD_DIR/mongo/db/commands/test_commands_enabled', - '$BUILD_DIR/mongo/db/pipeline/change_stream_pre_and_post_images_options', - '$BUILD_DIR/mongo/db/query/common_query_enums_and_helpers', - '$BUILD_DIR/mongo/db/timeseries/timeseries_options', '$BUILD_DIR/mongo/rpc/command_status', '$BUILD_DIR/mongo/rpc/rewrite_state_change_errors', '$BUILD_DIR/mongo/rpc/rpc', + '$BUILD_DIR/mongo/transport/service_executor', + '$BUILD_DIR/mongo/util/concurrency/admission_context', + '$BUILD_DIR/mongo/util/diagnostic_info' + if get_option('use-diagnostic-latches') == 'on' else [], '$BUILD_DIR/mongo/util/namespace_string_database_name_util', + '$BUILD_DIR/mongo/util/progress_meter', 'audit', + 'auth/auth', + 'auth/authprivilege', + 'auth/user_acquisition_stats', + 'catalog/collection_options', 'coll_mod_command_idl', + 'commands/create_command', + 'commands/server_status_core', + 'commands/test_commands_enabled', + 'concurrency/lock_manager', + 'generic_cursor', 'index_commands_idl', + 'pipeline/change_stream_pre_and_post_images_options', + 'prepare_conflict_tracker', + 'query/common_query_enums_and_helpers', + 'repl/read_concern_args', 'server_base', + 'stats/resource_consumption_metrics', + 'stats/timer_stats', + 'timeseries/timeseries_options', ], ) @@ -811,9 +866,22 @@ env.Library( ) env.Library( - target='shard_role_api', + target='shard_role_api_stor_ex', source=[ 'catalog/collection.cpp', + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/s/common_s', + 'catalog/clustered_collection_options', + 'catalog/index_catalog', + 'query/collation/collator_factory_interface', + 'server_base', + ], +) + +env.Library( + target='shard_role_api', + source=[ 'catalog/collection_operation_source.cpp', 's/collection_metadata.cpp', 's/collection_sharding_state.cpp', @@ -831,15 +899,17 @@ env.Library( 's/transaction_coordinator_curop.cpp', 's/transaction_coordinator_factory.cpp', 's/transaction_coordinator_worker_curop_repository.cpp', + 'transaction_resources.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/s/grid', - 'catalog/index_catalog', 'concurrency/lock_manager', 'index/index_access_method', + 'rw_concern_d', + 'shard_role_api_stor_ex', + 'views/views', ], LIBDEPS_PRIVATE=[ - 'catalog/clustered_collection_options', 'server_base', 'write_block_bypass', ], @@ -848,13 +918,12 @@ env.Library( env.Library( target='shard_role', source=[ - 'catalog_raii.cpp', 'catalog/catalog_helper.cpp', 'catalog/collection_uuid_mismatch.cpp', 'catalog/collection_yield_restore.cpp', + 'catalog_raii.cpp', 'db_raii.cpp', 'shard_role.cpp', - 'transaction_resources.cpp', ], LIBDEPS=[ 'catalog/collection_catalog', @@ -862,7 +931,6 @@ env.Library( 'shard_role_api', 'stats/top', 'storage/write_unit_of_work', - 'views/views', ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/catalog/collection_uuid_mismatch_info', @@ -911,7 +979,6 @@ env.Library( '$BUILD_DIR/mongo/db/session/logical_session_id', '$BUILD_DIR/mongo/executor/inline_executor', '$BUILD_DIR/mongo/executor/task_executor_pool', - '$BUILD_DIR/mongo/s/grid', '$BUILD_DIR/mongo/s/sharding_router_api', 'dbdirectclient', 'query_expressions', @@ -925,22 +992,20 @@ env.Library( ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/crypto/fle_crypto', - '$BUILD_DIR/mongo/db/concurrency/exception_util', - '$BUILD_DIR/mongo/db/query/query_request', - '$BUILD_DIR/mongo/db/record_id_helpers', - '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', - '$BUILD_DIR/mongo/db/session/logical_session_id', - '$BUILD_DIR/mongo/db/session/session_catalog', - '$BUILD_DIR/mongo/db/session/session_catalog_mongod', - '$BUILD_DIR/mongo/db/transaction/transaction', '$BUILD_DIR/mongo/executor/inline_executor', '$BUILD_DIR/mongo/executor/network_interface_factory', '$BUILD_DIR/mongo/executor/task_executor_pool', '$BUILD_DIR/mongo/executor/thread_pool_task_executor', - '$BUILD_DIR/mongo/s/grid', '$BUILD_DIR/mongo/util/concurrency/thread_pool', + 'concurrency/exception_util', 'fle_crud', + 'query/query_request', + 'record_id_helpers', + 'repl/repl_coordinator_interface', + 'session/logical_session_id', + 'session/session_catalog_mongod', 'shard_role', + 'transaction/transaction', ], ) @@ -1026,16 +1091,12 @@ env.Library( target='service_context_d', source=[ 'service_entry_point_mongod.cpp', + 'transaction_resources_init_mongod.cpp', ], LIBDEPS=[ - '$BUILD_DIR/mongo/base', - '$BUILD_DIR/mongo/db/query/op_metrics', '$BUILD_DIR/mongo/transport/service_entry_point', - 'curop_metrics', - 'rw_concern_d', + 'service_context', 'storage/storage_engine_common', - 'storage/storage_engine_lock_file', - 'storage/storage_engine_metadata', ], LIBDEPS_PRIVATE=[ 'auth/auth', @@ -1043,13 +1104,31 @@ env.Library( 'commands/fsync_locked', 'concurrency/lock_manager', 'not_primary_error_tracker', + 'query/op_metrics', 'read_concern_d_impl', + 'rw_concern_d', 's/sharding_runtime_d', + 'server_base', 'service_entry_point_common', + 'storage/storage_engine_lock_file', + 'storage/storage_engine_metadata', 'storage/storage_options', ], ) +env.Library( + target='service_context_non_d', + source=[ + 'transaction_resources_init_non_mongod.cpp', + ], + LIBDEPS=[ + 'service_context', + ], + LIBDEPS_PRIVATE=[ + 'concurrency/lock_manager', + ], +) + env.Library( target='service_entry_point_common', source=[ @@ -1058,6 +1137,7 @@ env.Library( LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/s/query_analysis_sampler', + '$BUILD_DIR/mongo/transport/service_executor', 'audit', 'auth/auth', 'auth/auth_umc', @@ -1142,6 +1222,7 @@ env.Library( 'catalog/index_builds_manager', ], LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/executor/task_executor_interface', '$BUILD_DIR/mongo/util/fail_point', 'catalog/collection_catalog', @@ -1160,6 +1241,7 @@ env.Library( 'resumable_index_builds_idl', 'server_base', 'shard_role', + 'storage/disk_space_util', 'storage/encryption_hooks', 'storage/storage_util', 'storage/two_phase_index_build_knobs_idl', @@ -1265,7 +1347,6 @@ env.Library( ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/query/op_metrics', - '$BUILD_DIR/mongo/s/grid', 'concurrency/exception_util', 'repl/repl_coordinator_interface', 'repl/speculative_majority_read_info', @@ -1368,7 +1449,9 @@ env.Library( ], ) -env.Library( +execEnv = env.Clone() +execEnv.InjectThirdParty(libraries=['snappy']) +execEnv.Library( target='query_exec', source=[ 'clientcursor.cpp', @@ -1383,7 +1466,6 @@ env.Library( 'exec/count.cpp', 'exec/count_scan.cpp', 'exec/delete_stage.cpp', - 'exec/disk_use_options.idl', 'exec/distinct_scan.cpp', 'exec/eof.cpp', 'exec/fetch.cpp', @@ -1415,6 +1497,7 @@ env.Library( 'exec/text_match.cpp', 'exec/text_or.cpp', 'exec/timeseries_modify.cpp', + 'exec/timeseries_upsert.cpp', 'exec/trial_period_utils.cpp', 'exec/trial_stage.cpp', 'exec/unpack_timeseries_bucket.cpp', @@ -1422,6 +1505,7 @@ env.Library( 'exec/upsert_stage.cpp', 'ops/delete_request.idl', 'ops/parsed_delete.cpp', + 'ops/parsed_update.cpp', 'ops/update_result.cpp', 'pipeline/document_source_cursor.cpp', 'pipeline/document_source_geo_near_cursor.cpp', @@ -1492,6 +1576,7 @@ env.Library( 'curop_failpoint_helpers', 'cursor_server_params', 'dbdirectclient', + 'disk_use_options_idl', 'exec/projection_executor', 'exec/sbe/query_sbe_stages', 'exec/sbe/query_sbe_storage', @@ -1515,7 +1600,6 @@ env.Library( 'shared_request_handling', 'stats/latency_server_stats', 'stats/serveronly_stats', - 'storage/remove_saver', 'storage/storage_options', 'update/update_driver', ], @@ -1536,7 +1620,9 @@ env.Library( '$BUILD_DIR/mongo/db/query/ce/query_ce_histogram', '$BUILD_DIR/mongo/db/query/ce/query_ce_sampling', '$BUILD_DIR/mongo/db/query/optimizer/optimizer', + '$BUILD_DIR/mongo/db/repl/wait_for_majority_service', '$BUILD_DIR/mongo/db/session/kill_sessions', + '$BUILD_DIR/mongo/db/sorter/sorter_idl', '$BUILD_DIR/mongo/db/sorter/sorter_stats', '$BUILD_DIR/mongo/db/stats/resource_consumption_metrics', '$BUILD_DIR/mongo/db/storage/record_store_base', @@ -1550,6 +1636,15 @@ env.Library( ], ) +env.Library( + target='disk_use_options_idl', + source=['exec/disk_use_options.idl'], + LIBDEPS_PRIVATE=[ + 'query/query_knobs', + 'server_base', + ], +) + env.Library( target='query_expressions', source=[ @@ -1560,6 +1655,7 @@ env.Library( 'matcher/expression_array.cpp', 'matcher/expression_expr.cpp', 'matcher/expression_geo.cpp', + 'matcher/expression_geo_serializer.cpp', 'matcher/expression_internal_bucket_geo_within.cpp', 'matcher/expression_leaf.cpp', 'matcher/expression_parameterization.cpp', @@ -1613,9 +1709,9 @@ env.Library( LIBDEPS=[ '$BUILD_DIR/mongo/bson/util/bson_extract', '$BUILD_DIR/mongo/crypto/fle_crypto', + '$BUILD_DIR/mongo/db/query/str_trim_utils', '$BUILD_DIR/mongo/scripting/scripting', '$BUILD_DIR/mongo/scripting/scripting_common', - '$BUILD_DIR/mongo/util/intrusive_counter', '$BUILD_DIR/mongo/util/pcre_util', '$BUILD_DIR/mongo/util/pcre_wrapper', '$BUILD_DIR/mongo/util/summation', @@ -1634,6 +1730,7 @@ env.Library( 'query/collation/collator_interface', 'query/datetime/date_time_support', 'query/query_knobs', + 'serialization_options', 'stats/counters', 'update/pattern_cmp', ], @@ -1645,6 +1742,19 @@ env.Library( ], ) +env.Library( + target='serialization_options', + source=[ + 'query/serialization_options.cpp', + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/base', + '$BUILD_DIR/mongo/db/exec/document_value/document_value', + '$BUILD_DIR/mongo/db/pipeline/field_path', + ], + LIBDEPS_PRIVATE=[], +) + env.Library( target='startup_recovery', source=[ @@ -1654,7 +1764,6 @@ env.Library( LIBDEPS_PRIVATE=[ 'catalog/catalog_helpers', 'catalog/collection_catalog', - 'catalog/collection_crud', 'catalog/collection_validation', 'catalog/database_holder', 'catalog/document_validation', @@ -1759,23 +1868,23 @@ env.Library( 'error_labels.cpp', ], LIBDEPS=[ - '$BUILD_DIR/mongo/db/pipeline/lite_parsed_document_source', - '$BUILD_DIR/mongo/db/query/op_metrics', - '$BUILD_DIR/mongo/db/session/logical_session_id', 'commands', + 'pipeline/lite_parsed_document_source', + 'session/logical_session_id', ], ) env.Library( target='shared_request_handling', source=[ + 'initialize_operation_session_info.cpp', 'transaction_validation.cpp', ], LIBDEPS=[ - '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', 'api_parameters', 'error_labels', - 'session/logical_session_cache_impl', + 'repl/repl_coordinator_interface', + 'session/logical_session_cache', ], ) @@ -1882,6 +1991,19 @@ env.Library( ], ) +env.Library( + target='keys_collection_util', + source=[ + 'keys_collection_util.cpp', + ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/concurrency/exception_util', + 'dbhelpers', + 'keys_collection_document', + 'shard_role', + ], +) + env.Library( target='keys_collection_client_direct', source=[ @@ -2004,23 +2126,13 @@ env.Library( ], ) -env.Library( - target='vector_clock_test_fixture', - source=[ - 'vector_clock_test_fixture.cpp', - ], - LIBDEPS=[ - '$BUILD_DIR/mongo/db/auth/authmocks', - '$BUILD_DIR/mongo/util/clock_source_mock', - 's/sharding_mongod_test_fixture', - 'signed_logical_time', - 'vector_clock', - ], - LIBDEPS_PRIVATE=[ - '$BUILD_DIR/mongo/db/op_observer/oplog_writer_impl', - ], -) - +# This library is the base class for tests which require the presence of ServiceContext. However it +# does not perform any default initialisation of the relevant services, such as lockers and +# transaction resources, so any test which directly references it must perform its own +# initialisation. +# +# If a test requires the presence of a storage engine, it should use the +# service_context_d_test_fixture below directly. env.Library( target='service_context_test_fixture', source=[ @@ -2032,7 +2144,6 @@ env.Library( 'service_context', ], LIBDEPS_PRIVATE=[ - '$BUILD_DIR/mongo/db/op_observer/op_observer', '$BUILD_DIR/mongo/util/clock_source_mock', ], ) @@ -2048,9 +2159,7 @@ env.Library( LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/util/clock_source_mock', 'catalog/catalog_impl', - 'catalog/database_holder', 'commands/mongod', - 'index/index_access_method', 'index_builds_coordinator_mongod', 's/sharding_runtime_d', 'service_context_d', @@ -2061,20 +2170,6 @@ env.Library( ], ) -env.Library( - target='service_context_devnull_test_fixture', - source=[ - 'service_context_devnull_test_fixture.cpp', - ], - LIBDEPS=[ - 'service_context_d_test_fixture', - ], - LIBDEPS_PRIVATE=[ - '$BUILD_DIR/mongo/db/auth/authmocks', - '$BUILD_DIR/mongo/db/storage/devnull/storage_devnull', - ], -) - env.Library( target='log_process_details', source=[ @@ -2200,11 +2295,11 @@ env.Library( ) env.Library( - target="mongod_initializers", + target='mongod_initializers', source=[ # NOTE: If you add an additional source file here, please delete # the file db/mongod_initializers.cpp. - "mongod_initializers.cpp", + 'mongod_initializers.cpp', ], LIBDEPS_PRIVATE=[ # NOTE: If you need to add a static or mongo initializer to mongod startup, @@ -2286,7 +2381,6 @@ env.Library( 'serverless/shard_split_commands', 'service_liaison_mongod', 'session/kill_sessions_local', - 'session/logical_session_cache_impl', 'session/sessions_collection_rs', 'session/sessions_collection_standalone', 'shard_role', @@ -2327,27 +2421,15 @@ env.Library( 'read_write_concern_defaults_cache_lookup_mongod.cpp', ], LIBDEPS_PRIVATE=[ - # NOTE: Do not add new libdeps (public or private) here unless - # required by the linker to satisfy symbol dependencies from - # the files listed above in `sources`. If you need to add a - # library to inject a static or mongo initializer to mongod, - # please add that library as a private libdep of - # mongod_initializers. + # NOTE: Do not add new libdeps (public or private) here unless required by the linker to + # satisfy symbol dependencies from the files listed above in `sources`. If you need to add a + # library to inject a static or mongo initializer to mongod, please add that library as a + # private libdep of mongod_initializers. '$BUILD_DIR/mongo/client/clientdriver_minimal', - '$BUILD_DIR/mongo/db/catalog/collection_crud', - '$BUILD_DIR/mongo/db/change_collection_expired_change_remover', - '$BUILD_DIR/mongo/db/change_stream_change_collection_manager', - '$BUILD_DIR/mongo/db/change_stream_options_manager', - '$BUILD_DIR/mongo/db/change_streams_cluster_parameter', - '$BUILD_DIR/mongo/db/pipeline/change_stream_expired_pre_image_remover', - '$BUILD_DIR/mongo/db/query/stats/query_stats', - '$BUILD_DIR/mongo/db/s/query_analysis_writer', - '$BUILD_DIR/mongo/db/set_change_stream_state_coordinator', '$BUILD_DIR/mongo/idl/cluster_server_parameter', '$BUILD_DIR/mongo/idl/cluster_server_parameter_initializer', '$BUILD_DIR/mongo/idl/cluster_server_parameter_op_observer', '$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_impl', - '$BUILD_DIR/mongo/s/grid', '$BUILD_DIR/mongo/s/sessions_collection_sharded', '$BUILD_DIR/mongo/scripting/scripting', '$BUILD_DIR/mongo/transport/ingress_handshake_metrics', @@ -2364,9 +2446,15 @@ env.Library( 'auth/auth_op_observer', 'catalog/catalog_helpers', 'catalog/catalog_impl', + 'catalog/collection_crud', 'catalog/health_log', 'catalog/health_log_interface', + 'change_collection_expired_change_remover', + 'change_stream_change_collection_manager', + 'change_stream_options_manager', + 'change_streams_cluster_parameter', 'commands/mongod', + 'commands/mongod_fsync', 'commands/test_commands', 'concurrency/flow_control_ticketholder', 'concurrency/lock_manager', @@ -2380,13 +2468,17 @@ env.Library( 'mirror_maestro', 'mongod_initializers', 'mongod_options', + 'op_observer/fallback_op_observer', 'op_observer/fcv_op_observer', 'op_observer/op_observer', 'op_observer/oplog_writer_impl', 'op_observer/oplog_writer_transaction_proxy', 'op_observer/user_write_block_mode_op_observer', 'periodic_runner_job_abort_expired_transactions', + 'pipeline/change_stream_expired_pre_image_remover', 'pipeline/process_interface/mongod_process_interface_factory', + 'query/query_settings_manager', + 'query/stats/query_stats', 'repl/drop_pending_collection_reaper', 'repl/initial_syncer', 'repl/repl_coordinator_impl', @@ -2398,18 +2490,22 @@ env.Library( 'repl/tenant_migration_recipient_service', 'repl/topology_coordinator', 'repl/wait_for_majority_service', + 's/query_analysis_writer', 's/sessions_collection_config_server', 's/sharding_commands_d', 's/sharding_runtime_d', 'serverinit', + 'serverless/multitenancy_check', 'serverless/shard_split_donor_service', 'service_context_d', 'service_liaison_mongod', 'session/kill_sessions', 'session/kill_sessions_local', + 'session/logical_session_cache_impl', 'session/session_catalog_mongod', 'session/sessions_collection_rs', 'session/sessions_collection_standalone', + 'set_change_stream_state_coordinator', 'shard_role', 'startup_recovery', 'startup_warnings_mongod', @@ -2419,8 +2515,8 @@ env.Library( 'storage/flow_control_parameters', 'storage/oplog_cap_maintainer_thread', 'storage/storage_control', - 'storage/storage_engine_common', 'system_index', + 'timeseries/timeseries_op_observer', 'ttl_d', 'vector_clock', ], @@ -2446,29 +2542,27 @@ if env.TargetOSIs('windows'): env.Depends("mongod.res", generatedDbManifest) env.Program( - target="mongod", - source=['mongod.cpp'] + env.WindowsResourceFile("mongod.rc"), - LIBDEPS=[ - # NOTE: Do not add new libdeps (public or private) here unless - # required by the linker to satisfy symbol dependencies from - # the files listed above in `sources`. If you need to add a - # library to inject a static or mongo initializer to mongod, - # please add that library as a private libdep of - # mongod_initializers. - '$BUILD_DIR/mongo/base', + target='mongod', + source=['mongod.cpp'] + env.WindowsResourceFile('mongod.rc'), + LIBDEPS=[ + # NOTE: Do not add new libdeps (public or private) here unless required by the linker to + # satisfy symbol dependencies from the files listed above in `sources`. If you need to add a + # library to inject a static or mongo initializer to mongod, please add that library as a + # private libdep of mongod_initializers. 'mongod_main', + 'server_base', ], - AIB_COMPONENT="mongod", + AIB_COMPONENT='mongod', AIB_COMPONENTS_EXTRA=[ - "core", - "default", - "devcore", - "dist", - "dist-test", - "integration-tests", - "serverless", - "serverless-test", - "servers", + 'core', + 'default', + 'devcore', + 'dist', + 'dist-test', + 'integration-tests', + 'serverless', + 'serverless-test', + 'servers', ], ) @@ -2566,6 +2660,7 @@ if wiredtiger: 'record_id_test.cpp', 'server_options_test.cpp', 'session/internal_session_pool_test.cpp', + 'session/internal_transactions_reap_service_test.cpp', 'session/logical_session_cache_test.cpp', 'session/logical_session_id_test.cpp', 'session/session_catalog_mongod_test.cpp', @@ -2580,6 +2675,7 @@ if wiredtiger: 'update_index_data_test.cpp', 'vector_clock_mongod_test.cpp', 'vector_clock_test.cpp', + 'vector_clock_test_fixture.cpp', 'wire_version_test.cpp', 'write_concern_options_test.cpp', ], @@ -2589,41 +2685,6 @@ if wiredtiger: '$BUILD_DIR/mongo/crypto/aead_encryption', '$BUILD_DIR/mongo/crypto/encrypted_field_config', '$BUILD_DIR/mongo/crypto/fle_crypto', - '$BUILD_DIR/mongo/db/auth/auth', - '$BUILD_DIR/mongo/db/auth/authmocks', - '$BUILD_DIR/mongo/db/auth/security_token', - '$BUILD_DIR/mongo/db/catalog/catalog_test_fixture', - '$BUILD_DIR/mongo/db/catalog/collection_crud', - '$BUILD_DIR/mongo/db/catalog/collection_uuid_mismatch_info', - '$BUILD_DIR/mongo/db/catalog/database_holder', - '$BUILD_DIR/mongo/db/catalog/index_build_entry_idl', - '$BUILD_DIR/mongo/db/change_collection_expired_change_remover', - '$BUILD_DIR/mongo/db/change_stream_change_collection_manager', - '$BUILD_DIR/mongo/db/change_stream_serverless_helpers', - '$BUILD_DIR/mongo/db/change_streams_cluster_parameter', - '$BUILD_DIR/mongo/db/commands/bulk_write_command', - '$BUILD_DIR/mongo/db/mongohasher', - '$BUILD_DIR/mongo/db/ops/write_ops', - '$BUILD_DIR/mongo/db/pipeline/change_stream_expired_pre_image_remover', - '$BUILD_DIR/mongo/db/query/common_query_enums_and_helpers', - '$BUILD_DIR/mongo/db/query/op_metrics', - '$BUILD_DIR/mongo/db/query/query_test_service_context', - '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', - '$BUILD_DIR/mongo/db/repl/repl_server_parameters', - '$BUILD_DIR/mongo/db/repl/replmocks', - '$BUILD_DIR/mongo/db/repl/storage_interface_impl', - '$BUILD_DIR/mongo/db/repl/tenant_migration_access_blocker', - '$BUILD_DIR/mongo/db/s/shard_server_test_fixture', - '$BUILD_DIR/mongo/db/session/logical_session_cache', - '$BUILD_DIR/mongo/db/session/logical_session_cache_impl', - '$BUILD_DIR/mongo/db/session/logical_session_id', - '$BUILD_DIR/mongo/db/session/logical_session_id_helpers', - '$BUILD_DIR/mongo/db/session/session_catalog', - '$BUILD_DIR/mongo/db/session/session_catalog_mongod', - '$BUILD_DIR/mongo/db/session/sessions_collection', - '$BUILD_DIR/mongo/db/session/sessions_collection_mock', - '$BUILD_DIR/mongo/db/stats/fill_locker_info', - '$BUILD_DIR/mongo/db/storage/wiredtiger/storage_wiredtiger', '$BUILD_DIR/mongo/executor/async_timer_mock', '$BUILD_DIR/mongo/rpc/command_status', '$BUILD_DIR/mongo/rpc/rpc', @@ -2632,9 +2693,23 @@ if wiredtiger: '$BUILD_DIR/mongo/util/clock_source_mock', '$BUILD_DIR/mongo/util/net/network', '$BUILD_DIR/mongo/util/net/ssl_options_server', + 'auth/auth', + 'auth/authentication_restriction', + 'auth/authmocks', + 'auth/security_token', + 'catalog/catalog_test_fixture', + 'catalog/collection_crud', + 'catalog/collection_uuid_mismatch_info', + 'catalog/database_holder', + 'catalog/index_build_entry_idl', + 'change_collection_expired_change_remover', + 'change_stream_change_collection_manager', 'change_stream_options_manager', + 'change_stream_serverless_helpers', + 'change_streams_cluster_parameter', 'collection_index_usage_tracker', 'commands', + 'commands/bulk_write_command', 'common', 'dbmessage', 'fle_crud', @@ -2645,29 +2720,44 @@ if wiredtiger: 'keys_collection_client_direct', 'keys_collection_document', 'mirror_maestro', + 'mongohasher', 'multitenancy', + 'op_observer/oplog_writer_impl', 'operation_time_tracker', + 'ops/write_ops', + 'pipeline/change_stream_expired_pre_image_remover', + 'query/common_query_enums_and_helpers', + 'query/op_metrics', + 'query/query_test_service_context', 'query_exec', 'read_write_concern_defaults_mock', 'record_id_helpers', + 'repl/image_collection_entry', + 'repl/repl_coordinator_interface', + 'repl/repl_server_parameters', + 'repl/replica_set_aware_service', + 'repl/replmocks', + 'repl/storage_interface_impl', + 'repl/tenant_migration_access_blocker', 'rw_concern_d', + 's/shard_server_test_fixture', 'server_base', 'server_options_servers', - 'service_context', - 'service_context_d', - 'service_context_d_test_fixture', - 'service_context_devnull_test_fixture', - 'service_context_test_fixture', 'service_liaison_mock', - 'shard_role', + 'session/logical_session_cache_impl', + 'session/logical_session_id_helpers', + 'session/session_catalog_mongod', + 'session/sessions_collection', + 'session/sessions_collection_mock', 'signed_logical_time', 'snapshot_window_options', 'startup_warnings_mongod', + 'stats/fill_locker_info', + 'storage/wiredtiger/storage_wiredtiger', 'ttl_collection_cache', 'ttl_d', 'update_index_data', 'vector_clock', - 'vector_clock_test_fixture', 'write_concern_options', ], ) diff --git a/src/mongo/db/active_index_builds.cpp b/src/mongo/db/active_index_builds.cpp index a8086c9955f63..e071b1c362ef8 100644 --- a/src/mongo/db/active_index_builds.cpp +++ b/src/mongo/db/active_index_builds.cpp @@ -27,13 +27,27 @@ * it in the license file. */ +#include +#include +#include +#include #include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/db/active_index_builds.h" #include "mongo/db/catalog/index_builds_manager.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" - -#include +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -44,7 +58,11 @@ ActiveIndexBuilds::~ActiveIndexBuilds() { invariant(_allIndexBuilds.empty()); } -void ActiveIndexBuilds::waitForAllIndexBuildsToStopForShutdown(OperationContext* opCtx) { +void ActiveIndexBuilds::waitForAllIndexBuildsToStopForShutdown() { + waitForAllIndexBuildsToStop(OperationContext::notInterruptible()); +} + +void ActiveIndexBuilds::waitForAllIndexBuildsToStop(Interruptible* interruptible) { stdx::unique_lock lk(_mutex); // All index builds should have been signaled to stop via the ServiceContext. @@ -66,7 +84,7 @@ void ActiveIndexBuilds::waitForAllIndexBuildsToStopForShutdown(OperationContext* auto pred = [this]() { return _allIndexBuilds.empty(); }; - _indexBuildsCondVar.wait(lk, pred); + interruptible->waitForConditionOrInterrupt(_indexBuildsCondVar, lk, pred); } void ActiveIndexBuilds::assertNoIndexBuildInProgress() const { @@ -135,6 +153,11 @@ StatusWith> ActiveIndexBuilds::getIndexBuil return it->second; } +std::vector> ActiveIndexBuilds::getAllIndexBuilds() const { + stdx::unique_lock lk(_mutex); + return _filterIndexBuilds_inlock(lk, [](const auto& replState) { return true; }); +} + void ActiveIndexBuilds::unregisterIndexBuild( IndexBuildsManager* indexBuildsManager, std::shared_ptr replIndexBuildState) { @@ -216,7 +239,7 @@ Status ActiveIndexBuilds::registerIndexBuild( return Status::OK(); } -size_t ActiveIndexBuilds::getActiveIndexBuilds() const { +size_t ActiveIndexBuilds::getActiveIndexBuildsCount() const { stdx::unique_lock lk(_mutex); return _allIndexBuilds.size(); } diff --git a/src/mongo/db/active_index_builds.h b/src/mongo/db/active_index_builds.h index b6b3e3ecb45a6..8c1c38ba88b7d 100644 --- a/src/mongo/db/active_index_builds.h +++ b/src/mongo/db/active_index_builds.h @@ -29,11 +29,26 @@ #pragma once +#include +#include +#include #include +#include +#include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/index_builds_manager.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl_index_build_state.h" #include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -51,12 +66,22 @@ class ActiveIndexBuilds { */ ~ActiveIndexBuilds(); + /** + * Waits for all index builds to stop after they have been interrupted during shutdown. + * Leaves the index builds in a recoverable state. + * + * This should only be called when certain the server will not start any new index builds -- + * i.e. when the server is not accepting user requests and no internal operations are + * concurrently starting new index builds. + */ + void waitForAllIndexBuildsToStopForShutdown(); + /** * The following functions all have equivalent definitions in IndexBuildsCoordinator. The * IndexBuildsCoordinator functions forward to these functions. For descriptions of what they * do, see IndexBuildsCoordinator. */ - void waitForAllIndexBuildsToStopForShutdown(OperationContext* opCtx); + void waitForAllIndexBuildsToStop(Interruptible* opCtx); void assertNoIndexBuildInProgress() const; @@ -68,6 +93,8 @@ class ActiveIndexBuilds { StatusWith> getIndexBuild(const UUID& buildUUID) const; + std::vector> getAllIndexBuilds() const; + void awaitNoIndexBuildInProgressForCollection(OperationContext* opCtx, const UUID& collectionUUID, IndexBuildProtocol protocol); @@ -98,7 +125,7 @@ class ActiveIndexBuilds { /** * Get the number of in-progress index builds. */ - size_t getActiveIndexBuilds() const; + size_t getActiveIndexBuildsCount() const; /** * Provides passthrough access to ReplIndexBuildState for index build info. diff --git a/src/mongo/db/aggregated_index_usage_tracker.cpp b/src/mongo/db/aggregated_index_usage_tracker.cpp index 77266e257051b..93b8f2cddfeac 100644 --- a/src/mongo/db/aggregated_index_usage_tracker.cpp +++ b/src/mongo/db/aggregated_index_usage_tracker.cpp @@ -29,8 +29,19 @@ #include "mongo/db/aggregated_index_usage_tracker.h" +#include + +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/db/aggregated_index_usage_tracker.h b/src/mongo/db/aggregated_index_usage_tracker.h index 96a08cd3b35a2..b795a37370925 100644 --- a/src/mongo/db/aggregated_index_usage_tracker.h +++ b/src/mongo/db/aggregated_index_usage_tracker.h @@ -29,7 +29,9 @@ #pragma once +#include #include +#include #include "mongo/db/index_names.h" #include "mongo/platform/atomic_word.h" diff --git a/src/mongo/db/allocate_cursor_id.cpp b/src/mongo/db/allocate_cursor_id.cpp index e9b8371aadf2f..9a5c1a3b045be 100644 --- a/src/mongo/db/allocate_cursor_id.cpp +++ b/src/mongo/db/allocate_cursor_id.cpp @@ -29,6 +29,9 @@ #include "mongo/db/allocate_cursor_id.h" +#include +#include + #include "mongo/util/assert_util.h" namespace mongo::generic_cursor { diff --git a/src/mongo/db/api_parameters.cpp b/src/mongo/db/api_parameters.cpp index 5823069898972..324799e05fabe 100644 --- a/src/mongo/db/api_parameters.cpp +++ b/src/mongo/db/api_parameters.cpp @@ -28,9 +28,13 @@ */ -#include "mongo/platform/basic.h" +#include +#include + +#include #include "mongo/db/api_parameters.h" +#include "mongo/idl/idl_parser.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/db/api_parameters.h b/src/mongo/db/api_parameters.h index 92a56f26b3c2b..cfbedac446a54 100644 --- a/src/mongo/db/api_parameters.h +++ b/src/mongo/db/api_parameters.h @@ -29,8 +29,19 @@ #pragma once +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/api_parameters_gen.h" #include "mongo/db/operation_context.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/api_parameters.idl b/src/mongo/db/api_parameters.idl index 68b823caa05ca..b67e1b582b23c 100644 --- a/src/mongo/db/api_parameters.idl +++ b/src/mongo/db/api_parameters.idl @@ -25,7 +25,7 @@ # exception statement from all source files in the program, then also delete # it in the license file. -# This IDL file describes the BSON format for an APIParametersFromClient. +# This IDL file describes the BSON format for an APIParametersFromClient. # It also handles the serialization to / deserialization from its # BSON representation for that class. @@ -35,25 +35,26 @@ global: imports: - "mongo/db/basic_types.idl" -structs: +structs: - APIParametersFromClient: + APIParametersFromClient: description: "Parser for pulling out VersionedAPI parameters from commands" strict: false + unsafe_dangerous_disable_extra_field_duplicate_checks: true fields: apiVersion: - description: "The api version specified by the command" + description: "The api version specified by the command" type: string - optional: true - apiStrict: - description: "With apiVersion: 'V' and apiStrict: true, the server rejects requests to + optional: true + apiStrict: + description: "With apiVersion: 'V' and apiStrict: true, the server rejects requests to use behaviors not included in V" - type: bool - optional: true - apiDeprecationErrors: + type: bool + optional: true + apiDeprecationErrors: description: "With apiVersion: 'V' and apiDeprecationErrors: true, the server rejects requests to use behaviors deprecated in V in the current MongoDB release" - type: bool + type: bool optional: true server_parameters: diff --git a/src/mongo/db/audit.cpp b/src/mongo/db/audit.cpp index 1f6c375d8e93a..bd53c253cedd9 100644 --- a/src/mongo/db/audit.cpp +++ b/src/mongo/db/audit.cpp @@ -29,11 +29,21 @@ #include "mongo/db/audit.h" +#include + +#include + +#include "mongo/util/assert_util_core.h" + namespace mongo { namespace audit { std::function initializeManager; std::function opObserverRegistrar; std::function initializeSynchronizeJob; +std::function shutdownSynchronizeJob; +std::function)> migrateOldToNew; +std::function removeOldConfig; +std::function updateAuditConfigOnDowngrade; #if !MONGO_ENTERPRISE_AUDIT @@ -76,7 +86,7 @@ void logDropUser(Client* client, const UserName& username) { invariant(client); } -void logDropAllUsersFromDatabase(Client* client, StringData dbname) { +void logDropAllUsersFromDatabase(Client* client, const DatabaseName& dbname) { invariant(client); } @@ -121,7 +131,7 @@ void logDropRole(Client* client, const RoleName& role) { invariant(client); } -void logDropAllRolesFromDatabase(Client* client, StringData dbname) { +void logDropAllRolesFromDatabase(Client* client, const DatabaseName& dbname) { invariant(client); } @@ -194,7 +204,7 @@ void logImportCollection(Client* client, const NamespaceString& nsname) { invariant(client); } -void logCreateDatabase(Client* client, StringData dbname) { +void logCreateDatabase(Client* client, const DatabaseName& dbname) { invariant(client); } @@ -215,7 +225,7 @@ void logDropView(Client* client, invariant(client); } -void logDropDatabase(Client* client, StringData dbname) { +void logDropDatabase(Client* client, const DatabaseName& dbname) { invariant(client); } diff --git a/src/mongo/db/audit.h b/src/mongo/db/audit.h index 324c7d675c7a6..f12a5ec2ae95a 100644 --- a/src/mongo/db/audit.h +++ b/src/mongo/db/audit.h @@ -34,12 +34,32 @@ #pragma once +#include #include +#include +#include +#include +#include +#include #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/user.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/rpc/op_msg.h" #include "mongo/util/functional.h" @@ -51,6 +71,7 @@ class BSONObjBuilder; class Client; class NamespaceString; class OperationContext; + class OpObserverRegistry; class ServiceContext; class StringData; @@ -66,6 +87,10 @@ namespace audit { extern std::function initializeManager; extern std::function opObserverRegistrar; extern std::function initializeSynchronizeJob; +extern std::function shutdownSynchronizeJob; +extern std::function)> migrateOldToNew; +extern std::function removeOldConfig; +extern std::function updateAuditConfigOnDowngrade; /** * Struct that temporarily stores client information when an audit hook @@ -202,7 +227,7 @@ void logDropUser(Client* client, const UserName& username); /** * Logs the result of a dropAllUsersFromDatabase command. */ -void logDropAllUsersFromDatabase(Client* client, StringData dbname); +void logDropAllUsersFromDatabase(Client* client, const DatabaseName& dbname); /** * Logs the result of a updateUser command. @@ -254,7 +279,7 @@ void logDropRole(Client* client, const RoleName& role); /** * Logs the result of a dropAllRolesForDatabase command. */ -void logDropAllRolesFromDatabase(Client* client, StringData dbname); +void logDropAllRolesFromDatabase(Client* client, const DatabaseName& dbname); /** * Logs the result of a grantRolesToRole command. @@ -342,7 +367,7 @@ void logImportCollection(Client* client, const NamespaceString& nsname); /** * Logs the result of a createDatabase command. */ -void logCreateDatabase(Client* client, StringData dbname); +void logCreateDatabase(Client* client, const DatabaseName& dbname); /** @@ -367,7 +392,7 @@ void logDropView(Client* client, /** * Logs the result of a dropDatabase command. */ -void logDropDatabase(Client* client, StringData dbname); +void logDropDatabase(Client* client, const DatabaseName& dbname); /** * Logs a collection rename event. diff --git a/src/mongo/db/auth/README.md b/src/mongo/db/auth/README.md index 5d1cdd32c1594..4d4be71e8e24b 100644 --- a/src/mongo/db/auth/README.md +++ b/src/mongo/db/auth/README.md @@ -9,6 +9,8 @@ - [SASL Supported Mechs](#sasl-supported-mechs) - [X509 Authentication](#x509-authentication) - [Cluster Authentication](#cluster-authentication) + - [X509 Intracluster Auth](#x509-intracluster-auth-and-member-certificate-rotation) + - [Keyfile Intracluster Auth](#keyfile-intracluster-auth) - [Localhost Auth Bypass](#localhost-auth-bypass) - [Authorization](#authorization) - [AuthName](#authname) (`UserName` and `RoleName`) @@ -203,11 +205,23 @@ The specific properties that each SASL mechanism provides is outlined in this ta certificate key exchange. When the peer certificate validation happens during the SSL handshake, an [`SSLPeerInfo`](https://github.com/mongodb/mongo/blob/r4.4.0/src/mongo/util/net/ssl_types.h#L113-L143) is created and attached to the transport layer SessionHandle. During `MONGODB-X509` auth, the server -grabs the client's username from the `SSLPeerInfo` struct and, if the client is a driver, verifies -that the client name matches the username provided by the command object. If the client is -performing intracluster authentication, see the details below in the authentication section and the -code comments -[here](https://github.com/mongodb/mongo/blob/r4.4.0/src/mongo/db/commands/authentication_commands.cpp#L74-L139). +first determines whether or not the client is a driver or a peer server. The server inspects the +following criteria in this order to determine whether the connecting client is a peer server node: +1. `net.tls.clusterAuthX509.attributes` is set on the server and the parsed certificate's subject name + contains all of the attributes and values specified in that option. +2. `net.tls.clusterAuthX509.extensionValue` is set on the server and the parsed certificate contains + the OID 1.3.6.1.4.1.34601.2.1.2 with a value matching the one specified in that option. This OID + is reserved for the MongoDB cluster membership extension. +3. Neither of the above options are set on the server and the parsed certificate's subject name contains + the same DC, O, and OU as the certificate the server presents to inbound connections (`tls.certificateKeyFile`). +4. `tlsClusterAuthX509Override.attributes` is set on the server and the parsed certificate's subject name + contains all of the attributes and values specified in that option. +5. `tlsClusterAuthX509Override.extensionValue` is set on the server and the parsed certificate contains + the OID 1.3.6.1.4.1.34601.2.1.2 with a value matching the one specified in that option. +If all of these conditions fail, then the server grabs the client's username from the `SSLPeerInfo` +struct and verifies that the client name matches the username provided by the command object and exists +in the `$external` database. In that case, the client is authenticated as that user in `$external`. +Otherwise, authentication fails with ErrorCodes.UserNotFound. ### Cluster Authentication @@ -217,9 +231,43 @@ a server, they can use any of the authentication mechanisms described [below in section](#sasl). When a mongod or a mongos needs to authenticate to a mongodb server, it does not pass in distinguishing user credentials to authenticate (all servers authenticate to other servers as the `__system` user), so most of the options described below will not necessarily work. However, -two options are available for authentication - keyfile auth and X509 auth. X509 auth is described in -more detail above, but a precondition to using it is having TLS enabled. - +two options are available for authentication - keyfile auth and X509 auth. + +#### X509 Intracluster Auth and Member Certificate Rotation +`X509` auth is described in more detail above, but a precondition to using it is having TLS enabled. +It is possible for customers to rotate their certificates or change the criteria that is used to +determine X.509 cluster membership without any downtime. When the server uses the default criteria +(matching DC, O, and OU), its certificates can be rotated via the following procedure: + +1. Update server nodes' config files to contain the old certificate subject DN in + `setParameter.tlsX509ClusterAuthDNOverride`. +2. Perform a rolling restart of server nodes so that they all load in the override value. +3. Update server nodes' config files to contain the new certificates in `net.tls.clusterFile` + and `net.tls.certificateKeyFile`. +4. Perform a rolling restart of server nodes. During this process, some nodes will use new certificates + while others will use old, but they will still all recognize each other as cluster members either + via the standard process or the override, respectively. +5. Remove `setParameter.tlsX509ClusterAuthDNOverride` from all server node config files. +6. Perform a rolling restart of server nodes so they stop treating clients presenting the old certificate + as peers. + +An administrator can update the criteria the server uses to determine cluster membership alongside +certificate rotation without downtime via the following procedure: +1. Update server nodes' config files to contain the old certificate subject DN attributes or extension + value in `setParameter.tlsClusterAuthX509Override` and the new certificate subject DN attributes + or extension value in `net.tls.clusterAuthX509.attributes` or `net.tls.clusterAuthX509.extensionValue`. +2. Perform a rolling restart of server nodes so that they all load in the override value and new + config options. +3. Update server nodes' config files to contain the new certificates in `net.tls.clusterFile` + and `net.tls.certificateKeyFile`. +4. Perform a rolling restart of server nodes so that they start using the new certificates. During + this process, some nodes will use new certificates while others will use old, but they will still + recognize each other via the new config option or the override. +5. Remove `setParameter.tlsClusterAuthX509Override` from all server node config files. +6. Perform a rolling restart of server nodes so they stop treating clients presenting certificates + meeting the old criteria as peers. + +#### Keyfile Intracluster Auth `keyfile` auth instructors servers to authenticate to each other using the `SCRAM-SHA-256` mechanism as the `local.__system` user who's password can be found in the named key file. A keyfile is a file stored on disk that servers load on startup, sending them when they behave as clients to another diff --git a/src/mongo/db/auth/SConscript b/src/mongo/db/auth/SConscript index 14e01e752c918..8588e6d9cbf6b 100644 --- a/src/mongo/db/auth/SConscript +++ b/src/mongo/db/auth/SConscript @@ -81,6 +81,7 @@ env.Library( '$BUILD_DIR/mongo/db/op_observer/op_observer', '$BUILD_DIR/mongo/db/op_observer/op_observer_util', '$BUILD_DIR/mongo/db/repl/oplog_entry', + '$BUILD_DIR/mongo/util/namespace_string_database_name_util', 'auth', ], ) @@ -95,6 +96,7 @@ env.Library( ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/crypto/sha_block_${MONGO_CRYPTO}', + '$BUILD_DIR/mongo/db/server_base', 'auth', 'authentication_restriction', 'authprivilege', @@ -144,6 +146,7 @@ env.Library( ], LIBDEPS=[ '$BUILD_DIR/mongo/db/server_base', + '$BUILD_DIR/mongo/util/namespace_string_database_name_util', 'auth', 'auth_options', 'authprivilege', @@ -237,10 +240,8 @@ env.Library( '$BUILD_DIR/mongo/db/catalog/document_validation', '$BUILD_DIR/mongo/db/common', '$BUILD_DIR/mongo/db/pipeline/lite_parsed_document_source', - '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/update/update_driver', 'auth', - 'authorization_manager_global', 'authprivilege', 'builtin_roles', 'user', @@ -248,6 +249,7 @@ env.Library( ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/audit', + '$BUILD_DIR/mongo/db/server_base', ], ) @@ -259,8 +261,8 @@ env.Library( 'action_type.cpp', 'action_type.idl', 'authorization_contract.cpp', + 'parsed_privilege.idl', 'privilege.cpp', - 'privilege_parser.cpp', 'resource_pattern.cpp', ], LIBDEPS_PRIVATE=[ @@ -268,6 +270,7 @@ env.Library( '$BUILD_DIR/mongo/bson/mutable/mutable_bson', '$BUILD_DIR/mongo/db/common', '$BUILD_DIR/mongo/idl/idl_parser', + '$BUILD_DIR/mongo/util/namespace_string_database_name_util', ], ) @@ -490,6 +493,7 @@ env.Library( 'authz_session_external_state_mock.cpp', ], LIBDEPS=[ + '$BUILD_DIR/mongo/db/concurrency/lock_manager', '$BUILD_DIR/mongo/db/query_expressions', '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/db/update/update_driver', @@ -555,6 +559,7 @@ env.CppUnitTest( 'builtin_roles_test.cpp', 'oauth_discovery_factory_test.cpp', 'privilege_parser_test.cpp', + 'resource_pattern_search_list_test.cpp', 'restriction_test.cpp', 'sasl_authentication_session_test.cpp', 'sasl_mechanism_registry_test.cpp', @@ -573,7 +578,6 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/repl/oplog_interface_local', '$BUILD_DIR/mongo/db/repl/replmocks', '$BUILD_DIR/mongo/db/service_context_d_test_fixture', - '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/mongo/transport/transport_layer_common', '$BUILD_DIR/mongo/transport/transport_layer_mock', '$BUILD_DIR/mongo/util/net/mock_http_client', diff --git a/src/mongo/db/auth/action_set.cpp b/src/mongo/db/auth/action_set.cpp index 37a4ede2b0d29..7b95ef8516001 100644 --- a/src/mongo/db/auth/action_set.cpp +++ b/src/mongo/db/auth/action_set.cpp @@ -27,15 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/action_set.h" - #include +#include #include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/util/str.h" +#include "mongo/base/status_with.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type_gen.h" namespace mongo { @@ -93,29 +93,28 @@ bool ActionSet::isSupersetOf(const ActionSet& other) const { return (_actions & other._actions) == other._actions; } -Status ActionSet::parseActionSetFromStringVector(const std::vector& actionsVector, - ActionSet* result, - std::vector* unrecognizedActions) { - result->removeAllActions(); - for (StringData actionName : actionsVector) { - auto parseResult = parseActionFromString(actionName); - if (!parseResult.isOK()) { - const auto& status = parseResult.getStatus(); - if (status == ErrorCodes::FailedToParse) { - unrecognizedActions->push_back(std::string{actionName}); - } else { - invariant(status); - } - } else { - const auto& action = parseResult.getValue(); - if (action == ActionType::anyAction) { - result->addAllActions(); - return Status::OK(); +ActionSet ActionSet::parseFromStringVector(const std::vector& actions, + std::vector* unrecognizedActions) { + ActionSet ret; + + for (auto action : actions) { + auto swActionType = parseActionFromString(action); + if (!swActionType.isOK()) { + if ((swActionType.getStatus() == ErrorCodes::FailedToParse) && unrecognizedActions) { + unrecognizedActions->push_back(action.toString()); } - result->addAction(action); + continue; } + + if (swActionType.getValue() == ActionType::anyAction) { + ret.addAllActions(); + return ret; + } + + ret.addAction(swActionType.getValue()); } - return Status::OK(); + + return ret; } std::string ActionSet::toString() const { @@ -137,17 +136,16 @@ std::string ActionSet::toString() const { return str; } -std::vector ActionSet::getActionsAsStrings() const { - using mongo::toString; - std::vector result; +std::vector ActionSet::getActionsAsStringDatas() const { if (contains(ActionType::anyAction)) { - result.push_back(toString(ActionType::anyAction)); - return result; + return {ActionType_serializer(ActionType::anyAction)}; } + + std::vector result; for (size_t i = 0; i < kNumActionTypes; ++i) { auto action = static_cast(i); if (contains(action)) { - result.push_back(toString(action)); + result.push_back(toStringData(action)); } } return result; diff --git a/src/mongo/db/auth/action_set.h b/src/mongo/db/auth/action_set.h index 7d4d3599b9c73..2ae8dfd608df6 100644 --- a/src/mongo/db/auth/action_set.h +++ b/src/mongo/db/auth/action_set.h @@ -31,9 +31,11 @@ #include #include +#include #include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/auth/action_type.h" namespace mongo { @@ -49,6 +51,11 @@ class ActionSet { ActionSet() = default; ActionSet(std::initializer_list actions); + // Parse a human-readable set of ActionTypes into a bitset of actions. + // unrecognizedActions will be populated with a copy of any unexpected action, if present. + static ActionSet parseFromStringVector(const std::vector& actions, + std::vector* unrecognizedActions = nullptr); + void addAction(ActionType action); void addAllActionsFromSet(const ActionSet& actionSet); void addAllActions(); @@ -80,15 +87,9 @@ class ActionSet { std::string toString() const; // Returns a vector of strings representing the actions in the ActionSet. - std::vector getActionsAsStrings() const; - - // Takes a vector of action type std::string representations and writes into *result an - // ActionSet of all valid actions encountered. - // If it encounters any actions that it doesn't recognize, will put those into - // *unrecognizedActions, while still returning the valid actions in *result, and returning OK. - static Status parseActionSetFromStringVector(const std::vector& actionsVector, - ActionSet* result, - std::vector* unrecognizedActions); + // The storage for these StringDatas comes from IDL constexpr definitions for + // ActionTypes and is therefore guaranteed for the life of the process. + std::vector getActionsAsStringDatas() const; friend bool operator==(const ActionSet& lhs, const ActionSet& rhs) { return lhs.equals(rhs); diff --git a/src/mongo/db/auth/action_set_test.cpp b/src/mongo/db/auth/action_set_test.cpp index 1c125f8a2eace..a67ab314d8b96 100644 --- a/src/mongo/db/auth/action_set_test.cpp +++ b/src/mongo/db/auth/action_set_test.cpp @@ -32,53 +32,58 @@ */ #include "mongo/db/auth/action_set.h" + +#include + #include "mongo/db/auth/action_type.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { TEST(ActionSetTest, ParseActionSetFromStringVector) { - ActionSet result; - std::vector actions1 = {"find", "insert", "update", "remove"}; - std::vector actions2 = {"update", "find", "remove", "insert"}; + const std::vector actions1 = {"find"_sd, "insert"_sd, "update"_sd, "remove"_sd}; + const std::vector actions2 = {"update"_sd, "find"_sd, "remove"_sd, "insert"_sd}; std::vector unrecognized; - ASSERT_OK(ActionSet::parseActionSetFromStringVector(actions1, &result, &unrecognized)); - ASSERT_TRUE(result.contains(ActionType::find)); - ASSERT_TRUE(result.contains(ActionType::insert)); - ASSERT_TRUE(result.contains(ActionType::update)); - ASSERT_TRUE(result.contains(ActionType::remove)); + auto set1 = ActionSet::parseFromStringVector(actions1, &unrecognized); + ASSERT_TRUE(set1.contains(ActionType::find)); + ASSERT_TRUE(set1.contains(ActionType::insert)); + ASSERT_TRUE(set1.contains(ActionType::update)); + ASSERT_TRUE(set1.contains(ActionType::remove)); ASSERT_TRUE(unrecognized.empty()); // Order of the strings doesn't matter - ASSERT_OK(ActionSet::parseActionSetFromStringVector(actions2, &result, &unrecognized)); - ASSERT_TRUE(result.contains(ActionType::find)); - ASSERT_TRUE(result.contains(ActionType::insert)); - ASSERT_TRUE(result.contains(ActionType::update)); - ASSERT_TRUE(result.contains(ActionType::remove)); + auto set2 = ActionSet::parseFromStringVector(actions2, &unrecognized); + ASSERT_TRUE(set2.contains(ActionType::find)); + ASSERT_TRUE(set2.contains(ActionType::insert)); + ASSERT_TRUE(set2.contains(ActionType::update)); + ASSERT_TRUE(set2.contains(ActionType::remove)); ASSERT_TRUE(unrecognized.empty()); - ASSERT_OK(ActionSet::parseActionSetFromStringVector({"find"}, &result, &unrecognized)); - - ASSERT_TRUE(result.contains(ActionType::find)); - ASSERT_FALSE(result.contains(ActionType::insert)); - ASSERT_FALSE(result.contains(ActionType::update)); - ASSERT_FALSE(result.contains(ActionType::remove)); + // Only one ActionType + auto findSet = ActionSet::parseFromStringVector({"find"}, &unrecognized); + ASSERT_TRUE(findSet.contains(ActionType::find)); + ASSERT_FALSE(findSet.contains(ActionType::insert)); + ASSERT_FALSE(findSet.contains(ActionType::update)); + ASSERT_FALSE(findSet.contains(ActionType::remove)); ASSERT_TRUE(unrecognized.empty()); - ASSERT_OK(ActionSet::parseActionSetFromStringVector({""}, &result, &unrecognized)); - - ASSERT_FALSE(result.contains(ActionType::find)); - ASSERT_FALSE(result.contains(ActionType::insert)); - ASSERT_FALSE(result.contains(ActionType::update)); - ASSERT_FALSE(result.contains(ActionType::remove)); + // Empty string as an ActionType + auto nonEmptyBlankSet = ActionSet::parseFromStringVector({""}, &unrecognized); + ASSERT_FALSE(nonEmptyBlankSet.contains(ActionType::find)); + ASSERT_FALSE(nonEmptyBlankSet.contains(ActionType::insert)); + ASSERT_FALSE(nonEmptyBlankSet.contains(ActionType::update)); + ASSERT_FALSE(nonEmptyBlankSet.contains(ActionType::remove)); ASSERT_TRUE(unrecognized.size() == 1); ASSERT_TRUE(unrecognized.front().empty()); - unrecognized.clear(); - ASSERT_OK(ActionSet::parseActionSetFromStringVector({"INVALID INPUT"}, &result, &unrecognized)); - ASSERT_TRUE(unrecognized.size() == 1); + + // Unknown ActionType + auto unknownSet = ActionSet::parseFromStringVector({"INVALID INPUT"}, &unrecognized); + ASSERT_TRUE(unknownSet.empty()); + ASSERT_EQ(unrecognized.size(), 1UL); ASSERT_TRUE(unrecognized.front() == "INVALID INPUT"); } @@ -109,15 +114,9 @@ TEST(ActionSetTest, ToString) { } TEST(ActionSetTest, IsSupersetOf) { - ActionSet set1, set2, set3; - std::vector actions1 = {"find", "update", "insert"}; - std::vector actions2 = {"find", "update", "remove"}; - std::vector actions3 = {"find", "update"}; - std::vector unrecognized; - - ASSERT_OK(ActionSet::parseActionSetFromStringVector(actions1, &set1, &unrecognized)); - ASSERT_OK(ActionSet::parseActionSetFromStringVector(actions2, &set2, &unrecognized)); - ASSERT_OK(ActionSet::parseActionSetFromStringVector(actions3, &set3, &unrecognized)); + ActionSet set1({ActionType::find, ActionType::update, ActionType::insert}); + ActionSet set2({ActionType::find, ActionType::update, ActionType::remove}); + ActionSet set3({ActionType::find, ActionType::update}); ASSERT_FALSE(set1.isSupersetOf(set2)); ASSERT_TRUE(set1.isSupersetOf(set3)); @@ -130,11 +129,7 @@ TEST(ActionSetTest, IsSupersetOf) { } TEST(ActionSetTest, anyAction) { - ActionSet set; - std::vector actions = {"anyAction"}; - std::vector unrecognized; - - ASSERT_OK(ActionSet::parseActionSetFromStringVector(actions, &set, &unrecognized)); + ActionSet set{ActionType::anyAction}; ASSERT_TRUE(set.contains(ActionType::find)); ASSERT_TRUE(set.contains(ActionType::insert)); ASSERT_TRUE(set.contains(ActionType::anyAction)); @@ -188,5 +183,15 @@ TEST(ActionSetTest, constructor) { ASSERT_TRUE(set3.contains(ActionType::insert)); } +TEST(ActionSetTest, DuplicateActions) { + auto fromString = ActionSet::parseFromStringVector({"find"_sd, "find"_sd, "insert"_sd}); + ASSERT_TRUE(fromString.contains(ActionType::find)); + ASSERT_TRUE(fromString.contains(ActionType::insert)); + + ActionSet fromEnum({ActionType::find, ActionType::find, ActionType::insert}); + ASSERT_TRUE(fromEnum.contains(ActionType::find)); + ASSERT_TRUE(fromEnum.contains(ActionType::insert)); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/auth/action_type.cpp b/src/mongo/db/auth/action_type.cpp index 3b6604182052c..e6ab322bcdab2 100644 --- a/src/mongo/db/auth/action_type.cpp +++ b/src/mongo/db/auth/action_type.cpp @@ -27,17 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/action_type.h" - #include #include #include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/auth/action_type.h b/src/mongo/db/auth/action_type.h index 4cc4d67798a79..77bbd6c64d4d1 100644 --- a/src/mongo/db/auth/action_type.h +++ b/src/mongo/db/auth/action_type.h @@ -37,6 +37,7 @@ #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/db/auth/action_type_gen.h" namespace mongo { diff --git a/src/mongo/db/auth/action_type.idl b/src/mongo/db/auth/action_type.idl index 26d993be4c1fb..6837625e6f18c 100644 --- a/src/mongo/db/auth/action_type.idl +++ b/src/mongo/db/auth/action_type.idl @@ -67,6 +67,7 @@ enums: checkMetadataConsistency : "checkMetadataConsistency" cleanupOrphaned : "cleanupOrphaned" clearJumboFlag : "clearJumboFlag" + cleanupStructuredEncryptionData: "cleanupStructuredEncryptionData" closeAllDatabases : "closeAllDatabases" # Deprecated (backwards compatibility) collMod : "collMod" collStats : "collStats" @@ -148,7 +149,7 @@ enums: planCacheIndexFilter : "planCacheIndexFilter" # view/update index filters planCacheRead : "planCacheRead" # view contents of plan cache planCacheWrite : "planCacheWrite" # clear cache, drop cache entry, pin/unpin/shun plans - telemetryRead: "telemetryRead" # view contents of telemetry store + queryStatsRead: "queryStatsRead" # view contents of queryStats store refineCollectionShardKey : "refineCollectionShardKey" reIndex : "reIndex" remove : "remove" @@ -190,7 +191,7 @@ enums: top : "top" touch : "touch" trafficRecord : "trafficRecord" - transitionToCatalogShard : "transitionToCatalogShard" + transitionFromDedicatedConfigServer : "transitionFromDedicatedConfigServer" transitionToDedicatedConfigServer : "transitionToDedicatedConfigServer" unlock : "unlock" useTenant : "useTenant" @@ -242,6 +243,7 @@ enums: - analyze - bypassDocumentValidation - changeStream + - cleanupStructuredEncryptionData - collMod - collStats - compact @@ -290,6 +292,7 @@ enums: - analyze - bypassDocumentValidation - changeStream + - cleanupStructuredEncryptionData - collMod - collStats - compact @@ -363,6 +366,7 @@ enums: - analyze - bypassDocumentValidation - changeStream + - cleanupStructuredEncryptionData - collMod - collStats - compact diff --git a/src/mongo/db/auth/address_restriction.cpp b/src/mongo/db/auth/address_restriction.cpp index c6aa462dcee35..17ece7a59fc33 100644 --- a/src/mongo/db/auth/address_restriction.cpp +++ b/src/mongo/db/auth/address_restriction.cpp @@ -27,13 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include +#include + +#include +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/auth/address_restriction.h" #include "mongo/db/auth/address_restriction_gen.h" -#include "mongo/db/server_options.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" constexpr mongo::StringData mongo::address_restriction_detail::ClientSource::label; constexpr mongo::StringData mongo::address_restriction_detail::ClientSource::field; diff --git a/src/mongo/db/auth/address_restriction.h b/src/mongo/db/auth/address_restriction.h index 0bdce14580ee8..77007369cf1a8 100644 --- a/src/mongo/db/auth/address_restriction.h +++ b/src/mongo/db/auth/address_restriction.h @@ -29,19 +29,25 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/restriction.h" #include "mongo/db/auth/restriction_environment.h" #include "mongo/db/auth/restriction_set.h" #include "mongo/util/net/cidr.h" -#include -#include -#include - namespace mongo { namespace address_restriction_detail { diff --git a/src/mongo/db/auth/address_restriction_test.cpp b/src/mongo/db/auth/address_restriction_test.cpp index 56f881b8faef6..b1be9cee50c22 100644 --- a/src/mongo/db/auth/address_restriction_test.cpp +++ b/src/mongo/db/auth/address_restriction_test.cpp @@ -27,10 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/auth/address_restriction.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/net/sockaddr.h" #include "mongo/util/net/socket_utils.h" diff --git a/src/mongo/db/auth/auth_decorations.cpp b/src/mongo/db/auth/auth_decorations.cpp index c4eb1af64af7b..71af7ca9c8de6 100644 --- a/src/mongo/db/auth/auth_decorations.cpp +++ b/src/mongo/db/auth/auth_decorations.cpp @@ -27,20 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include #include +#include + +#include +#include "mongo/base/string_data.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/cluster_auth_mode.h" #include "mongo/db/auth/sasl_options.h" #include "mongo/db/client.h" #include "mongo/db/commands/authentication_commands.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/sequence_util.h" +#include "mongo/util/synchronized_value.h" namespace mongo { namespace { diff --git a/src/mongo/db/auth/auth_identifier_test.cpp b/src/mongo/db/auth/auth_identifier_test.cpp index f8905afeefcf0..ce49acd67393b 100644 --- a/src/mongo/db/auth/auth_identifier_test.cpp +++ b/src/mongo/db/auth/auth_identifier_test.cpp @@ -31,17 +31,31 @@ * Unit tests of the UserName and RoleName types. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include #include +#include #include "mongo/base/status.h" #include "mongo/base/string_data.h" -#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/role_name.h" #include "mongo/db/auth/user_name.h" #include "mongo/db/tenant_id.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { diff --git a/src/mongo/db/auth/auth_name.cpp b/src/mongo/db/auth/auth_name.cpp index 4eb73e327be2d..5805f7c5801a8 100644 --- a/src/mongo/db/auth/auth_name.cpp +++ b/src/mongo/db/auth/auth_name.cpp @@ -29,8 +29,20 @@ #include "mongo/db/auth/auth_name.h" +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/auth/role_name.h" #include "mongo/db/auth/user_name.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -164,6 +176,15 @@ BSONObj AuthName::toBSON(bool encodeTenant) const { return bob.obj(); } +template +std::size_t AuthName::getBSONObjSize() const { + return 4UL + // BSONObj size + 1UL + T::kFieldName.size() + 1UL + // FieldName elem type, FieldName, terminating NULL. + 4UL + getName().size() + 1UL + // Length of name data, name data, terminating NULL. + 1UL + ("db"_sd).size() + 1UL + // DB field elem type, "db", terminating NULL. + 4UL + getDB().size() + 1UL + // DB value length, DB value, terminating NULL. + 1UL; // EOD marker. +} // Materialize the types we care about. template class AuthName; diff --git a/src/mongo/db/auth/auth_name.h b/src/mongo/db/auth/auth_name.h index 6c5b052e5c7cc..2198361da31de 100644 --- a/src/mongo/db/auth/auth_name.h +++ b/src/mongo/db/auth/auth_name.h @@ -29,19 +29,28 @@ #pragma once +#include +#include #include +#include +#include +#include #include #include #include +#include +#include #include "mongo/base/clonable_ptr.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/database_name.h" #include "mongo/db/tenant_id.h" #include "mongo/stdx/variant.h" +#include "mongo/util/str.h" namespace mongo { @@ -96,6 +105,8 @@ class AuthName { void appendToBSON(BSONObjBuilder* bob, bool encodeTenant = false) const; BSONObj toBSON(bool encodeTenant = false) const; + std::size_t getBSONObjSize() const; + /** * Gets the name part of a AuthName. */ @@ -111,7 +122,7 @@ class AuthName { } DatabaseName getDatabaseName() const { - return DatabaseName(_tenant, _db); + return DatabaseName::createDatabaseNameForAuth(_tenant, _db); } /** diff --git a/src/mongo/db/auth/auth_op_observer.cpp b/src/mongo/db/auth/auth_op_observer.cpp index 1a61ba188dd22..e2d0a4b80c820 100644 --- a/src/mongo/db/auth/auth_op_observer.cpp +++ b/src/mongo/db/auth/auth_op_observer.cpp @@ -27,22 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/auth/auth_op_observer.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/audit.h" +#include "mongo/db/auth/auth_op_observer.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/op_observer/op_observer_util.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { namespace { -const auto documentIdDecoration = OperationContext::declareDecoration(); +const auto documentIdDecoration = OplogDeleteEntryArgs::declareDecoration(); } // namespace @@ -55,7 +65,8 @@ void AuthOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { for (auto it = first; it != last; it++) { audit::logInsertOperation(opCtx->getClient(), coll->ns(), it->doc); AuthorizationManager::get(opCtx->getServiceContext()) @@ -63,7 +74,9 @@ void AuthOpObserver::onInserts(OperationContext* opCtx, } } -void AuthOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) { +void AuthOpObserver::onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (args.updateArgs->update.isEmpty()) { return; } @@ -76,19 +89,22 @@ void AuthOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg void AuthOpObserver::aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - BSONObj const& doc) { + BSONObj const& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { audit::logRemoveOperation(opCtx->getClient(), coll->ns(), doc); // Extract the _id field from the document. If it does not have an _id, use the // document itself as the _id. - documentIdDecoration(opCtx) = doc["_id"] ? doc["_id"].wrap() : doc; + documentIdDecoration(args) = doc["_id"] ? doc["_id"].wrap() : doc; } void AuthOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { - auto& documentId = documentIdDecoration(opCtx); + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { + auto& documentId = documentIdDecoration(args); invariant(!documentId.isEmpty()); AuthorizationManager::get(opCtx->getServiceContext()) ->logOp(opCtx, "d", coll->ns(), documentId, nullptr); @@ -119,7 +135,7 @@ void AuthOpObserver::onCollMod(OperationContext* opCtx, const auto cmdNss = nss.getCommandNS(); // Create the 'o' field object. - const auto cmdObj = repl::makeCollModCmdObj(collModCmd, oldCollOptions, indexInfo); + const auto cmdObj = makeCollModCmdObj(collModCmd, oldCollOptions, indexInfo); AuthorizationManager::get(opCtx->getServiceContext()) ->logOp(opCtx, "c", cmdNss, cmdObj, nullptr); @@ -137,7 +153,8 @@ repl::OpTime AuthOpObserver::onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) { + const CollectionDropType dropType, + bool markFromMigrate) { const auto cmdNss = collectionName.getCommandNS(); const auto cmdObj = BSON("drop" << collectionName.coll()); @@ -168,8 +185,8 @@ void AuthOpObserver::postRenameCollection(OperationContext* const opCtx, const auto cmdNss = fromCollection.getCommandNS(); BSONObjBuilder builder; - builder.append("renameCollection", fromCollection.ns()); - builder.append("to", toCollection.ns()); + builder.append("renameCollection", NamespaceStringUtil::serialize(fromCollection)); + builder.append("to", NamespaceStringUtil::serialize(toCollection)); builder.append("stayTemp", stayTemp); if (dropTargetUUID) { dropTargetUUID->appendToBuilder(&builder, "dropTarget"); @@ -187,7 +204,8 @@ void AuthOpObserver::onRenameCollection(OperationContext* const opCtx, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) { + bool stayTemp, + bool markFromMigrate) { postRenameCollection(opCtx, fromCollection, toCollection, uuid, dropTargetUUID, stayTemp); } @@ -222,8 +240,8 @@ void AuthOpObserver::onEmptyCapped(OperationContext* opCtx, ->logOp(opCtx, "c", cmdNss, cmdObj, nullptr); } -void AuthOpObserver::_onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) { +void AuthOpObserver::onReplicationRollback(OperationContext* opCtx, + const RollbackObserverInfo& rbInfo) { // Invalidate any in-memory auth data if necessary. const auto& rollbackNamespaces = rbInfo.rollbackNamespaces; if (rollbackNamespaces.count(NamespaceString::kServerConfigurationNamespace) == 1 || diff --git a/src/mongo/db/auth/auth_op_observer.h b/src/mongo/db/auth/auth_op_observer.h index c3ef4a6ecb1f4..cce625f816af1 100644 --- a/src/mongo/db/auth/auth_op_observer.h +++ b/src/mongo/db/auth/auth_op_observer.h @@ -29,7 +29,24 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -37,7 +54,7 @@ namespace mongo { * OpObserver for authentication. Observes all secondary replication traffic and filters down to * relevant entries for authentication. */ -class AuthOpObserver final : public OpObserver { +class AuthOpObserver final : public OpObserverNoop { AuthOpObserver(const AuthOpObserver&) = delete; AuthOpObserver& operator=(const AuthOpObserver&) = delete; @@ -45,91 +62,29 @@ class AuthOpObserver final : public OpObserver { AuthOpObserver(); ~AuthOpObserver(); - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) final {} - - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - void onCreateIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc, - bool fromMigrate) final {} - - void onStartIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onStartIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) final {} - void onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) final; - - void onInsertGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final{}; - - void onDeleteGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final {} + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) final; + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) final; + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) final; - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final{}; + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; void onCreateCollection(OperationContext* opCtx, const CollectionPtr& coll, @@ -148,12 +103,12 @@ class AuthOpObserver final : public OpObserver { void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final; - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) final; + CollectionDropType dropType, + bool markFromMigrate) final; void onDropIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -161,14 +116,14 @@ class AuthOpObserver final : public OpObserver { const std::string& indexName, const BSONObj& indexInfo) final; - using OpObserver::onRenameCollection; void onRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) final; + bool stayTemp, + bool markFromMigrate) final; void onImportCollection(OperationContext* opCtx, const UUID& importUUID, @@ -179,22 +134,13 @@ class AuthOpObserver final : public OpObserver { const BSONObj& storageMetadata, bool isDryRun) final; - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final { - return repl::OpTime(); - } void postRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, const UUID& uuid, const boost::optional& dropTargetUUID, bool stayTemp) final; + void onApplyOps(OperationContext* opCtx, const DatabaseName& dbName, const BSONObj& applyOpCmd) final; @@ -203,56 +149,7 @@ class AuthOpObserver final : public OpObserver { const NamespaceString& collectionName, const UUID& uuid) final; - void onTransactionStart(OperationContext* opCtx) final {} - - void onUnpreparedTransactionCommit(OperationContext* opCtx, - const TransactionOperations& transactionOperations) final {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept final {} - - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) final { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) final {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) final {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) final {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} - - void onMajorityCommitPointUpdate(ServiceContext* service, - const repl::OpTime& newCommitPoint) final {} - - // Contains the fields of the document that are in the collection's shard key, and "_id". - static BSONObj getDocumentKey(OperationContext* opCtx, - NamespaceString const& nss, - BSONObj const& doc); - -private: - void _onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo); + void onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final; }; } // namespace mongo diff --git a/src/mongo/db/auth/auth_op_observer_test.cpp b/src/mongo/db/auth/auth_op_observer_test.cpp index 4912b71a13322..88171e5273f26 100644 --- a/src/mongo/db/auth/auth_op_observer_test.cpp +++ b/src/mongo/db/auth/auth_op_observer_test.cpp @@ -27,28 +27,42 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/auth/auth_op_observer.h" + +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/concurrency/locker_noop.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/keys_collection_client_sharded.h" -#include "mongo/db/keys_collection_manager.h" -#include "mongo/db/logical_time_validator.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" -#include "mongo/db/repl/oplog_interface_local.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -77,7 +91,7 @@ class AuthOpObserverTest : public ServiceContextMongoDTest { ASSERT_OK(replCoord->setFollowerMode(repl::MemberState::RS_PRIMARY)); // Create test collection - writeConflictRetry(opCtx.get(), "createColl", _nss.ns(), [&] { + writeConflictRetry(opCtx.get(), "createColl", _nss, [&] { opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kNoTimestamp); opCtx->recoveryUnit()->abandonSnapshot(); @@ -146,29 +160,30 @@ TEST_F(AuthOpObserverTest, MultipleAboutToDeleteAndOnDelete) { NamespaceString nss = NamespaceString::createNamespaceString_forTest("test", "coll"); WriteUnitOfWork wunit(opCtx.get()); AutoGetCollection autoColl(opCtx.get(), nss, MODE_IX); - opObserver.aboutToDelete(opCtx.get(), *autoColl, BSON("_id" << 1)); - opObserver.onDelete(opCtx.get(), *autoColl, {}, {}); - opObserver.aboutToDelete(opCtx.get(), *autoColl, BSON("_id" << 1)); - opObserver.onDelete(opCtx.get(), *autoColl, {}, {}); + OplogDeleteEntryArgs args; + opObserver.aboutToDelete(opCtx.get(), *autoColl, BSON("_id" << 1), &args); + opObserver.onDelete(opCtx.get(), *autoColl, {}, args); + opObserver.aboutToDelete(opCtx.get(), *autoColl, BSON("_id" << 1), &args); + opObserver.onDelete(opCtx.get(), *autoColl, {}, args); } DEATH_TEST_F(AuthOpObserverTest, AboutToDeleteMustPreceedOnDelete, "invariant") { AuthOpObserver opObserver; auto opCtx = cc().makeOperationContext(); - cc().swapLockState(std::make_unique()); NamespaceString nss = NamespaceString::createNamespaceString_forTest("test", "coll"); AutoGetCollection autoColl(opCtx.get(), nss, MODE_IX); - opObserver.onDelete(opCtx.get(), *autoColl, {}, {}); + OplogDeleteEntryArgs args; + opObserver.onDelete(opCtx.get(), *autoColl, {}, args); } DEATH_TEST_F(AuthOpObserverTest, EachOnDeleteRequiresAboutToDelete, "invariant") { AuthOpObserver opObserver; auto opCtx = cc().makeOperationContext(); - cc().swapLockState(std::make_unique()); AutoGetCollection autoColl(opCtx.get(), _nss, MODE_IX); - opObserver.aboutToDelete(opCtx.get(), *autoColl, {}); - opObserver.onDelete(opCtx.get(), *autoColl, {}, {}); - opObserver.onDelete(opCtx.get(), *autoColl, {}, {}); + OplogDeleteEntryArgs args; + opObserver.aboutToDelete(opCtx.get(), *autoColl, {}, &args); + opObserver.onDelete(opCtx.get(), *autoColl, {}, args); + opObserver.onDelete(opCtx.get(), *autoColl, {}, args); } } // namespace diff --git a/src/mongo/db/auth/auth_types.idl b/src/mongo/db/auth/auth_types.idl index 032ec847055f4..bbc2c0a80109d 100644 --- a/src/mongo/db/auth/auth_types.idl +++ b/src/mongo/db/auth/auth_types.idl @@ -59,13 +59,6 @@ types: deserializer: "mongo::RoleNameOrString::parseFromBSON" serializer: "mongo::RoleNameOrString::serializeToBSON" - Privilege: - bson_serialization_type: object - description: "A struct representing a privilege grant" - cpp_type: "Privilege" - deserializer: "mongo::Privilege::fromBSON" - serializer: "mongo::Privilege::toBSON" - structs: authLocalGetUserFailPoint: description: Data for authLocalGetUser failpoint diff --git a/src/mongo/db/auth/authentication_session.cpp b/src/mongo/db/auth/authentication_session.cpp index d75eb8cb8eee6..fdbaa0873603d 100644 --- a/src/mongo/db/auth/authentication_session.cpp +++ b/src/mongo/db/auth/authentication_session.cpp @@ -29,12 +29,31 @@ #include "mongo/db/auth/authentication_session.h" + +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonmisc.h" #include "mongo/client/authenticate.h" #include "mongo/db/audit.h" -#include "mongo/db/auth/authentication_metrics.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/client.h" +#include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/connection_health_metrics_parameter_gen.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl diff --git a/src/mongo/db/auth/authentication_session.h b/src/mongo/db/auth/authentication_session.h index f06d6d304dc41..99436a9170ad4 100644 --- a/src/mongo/db/auth/authentication_session.h +++ b/src/mongo/db/auth/authentication_session.h @@ -29,14 +29,25 @@ #pragma once +#include +#include +#include #include #include +#include -#include - +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/sasl_mechanism_registry.h" #include "mongo/db/auth/user_name.h" +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" #include "mongo/db/stats/counters.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/timer.h" namespace mongo { diff --git a/src/mongo/db/auth/authentication_session_test.cpp b/src/mongo/db/auth/authentication_session_test.cpp index 18caaaeb94604..80a48ce988f08 100644 --- a/src/mongo/db/auth/authentication_session_test.cpp +++ b/src/mongo/db/auth/authentication_session_test.cpp @@ -28,8 +28,21 @@ */ #include "mongo/db/auth/authentication_session.h" + +#include +#include +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" +#include "mongo/transport/session.h" #include "mongo/transport/transport_layer_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/concurrency/thread_name.h" namespace mongo { namespace { diff --git a/src/mongo/db/auth/authorization_checks.cpp b/src/mongo/db/auth/authorization_checks.cpp index ac37ab4b000bb..d1b7c85ffff5c 100644 --- a/src/mongo/db/auth/authorization_checks.cpp +++ b/src/mongo/db/auth/authorization_checks.cpp @@ -27,14 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_checks.h" - +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/commands/create_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/pipeline/aggregation_request_helper.h" -#include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" namespace mongo { namespace auth { @@ -77,21 +95,23 @@ Status checkAuthForFind(AuthorizationSession* authSession, bool hasTerm) { if (MONGO_unlikely(ns.isCommand())) { return Status(ErrorCodes::InternalError, - str::stream() << "Checking query auth on command namespace " << ns.ns()); + str::stream() << "Checking query auth on command namespace " + << ns.toStringForErrorMsg()); } if (!authSession->isAuthorizedForActionsOnNamespace(ns, ActionType::find)) { return Status(ErrorCodes::Unauthorized, - str::stream() << "not authorized for query on " << ns.ns()); + str::stream() << "not authorized for query on " << ns.toStringForErrorMsg()); } // Only internal clients (such as other nodes in a replica set) are allowed to use // the 'term' field in a find operation. Use of this field could trigger changes // in the receiving server's replication state and should be protected. if (hasTerm && - !authSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + !authSession->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(ns.tenantId()), ActionType::internal)) { return Status(ErrorCodes::Unauthorized, - str::stream() << "not authorized for query with term on " << ns.ns()); + str::stream() + << "not authorized for query with term on " << ns.toStringForErrorMsg()); } return Status::OK(); @@ -113,10 +133,11 @@ Status checkAuthForGetMore(AuthorizationSession* authSession, // the 'term' field in a getMore operation. Use of this field could trigger changes // in the receiving server's replication state and should be protected. if (hasTerm && - !authSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + !authSession->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(ns.tenantId()), ActionType::internal)) { return Status(ErrorCodes::Unauthorized, - str::stream() << "not authorized for getMore with term on " << ns.ns()); + str::stream() << "not authorized for getMore with term on " + << ns.toStringForErrorMsg()); } return Status::OK(); @@ -131,7 +152,7 @@ Status checkAuthForInsert(AuthorizationSession* authSession, } if (!authSession->isAuthorizedForActionsOnNamespace(ns, required)) { return Status(ErrorCodes::Unauthorized, - str::stream() << "not authorized for insert on " << ns.ns()); + str::stream() << "not authorized for insert on " << ns.toStringForErrorMsg()); } return Status::OK(); @@ -157,7 +178,8 @@ Status checkAuthForUpdate(AuthorizationSession* authSession, if (!authSession->isAuthorizedForActionsOnNamespace(ns, required)) { return Status(ErrorCodes::Unauthorized, - str::stream() << "not authorized for " << operationType << " on " << ns.ns()); + str::stream() << "not authorized for " << operationType << " on " + << ns.toStringForErrorMsg()); } return Status::OK(); @@ -169,7 +191,8 @@ Status checkAuthForDelete(AuthorizationSession* authSession, const BSONObj& query) { if (!authSession->isAuthorizedForActionsOnNamespace(ns, ActionType::remove)) { return Status(ErrorCodes::Unauthorized, - str::stream() << "not authorized to remove from " << ns.ns()); + str::stream() + << "not authorized to remove from " << ns.toStringForErrorMsg()); } return Status::OK(); } @@ -177,8 +200,8 @@ Status checkAuthForDelete(AuthorizationSession* authSession, Status checkAuthForKillCursors(AuthorizationSession* authSession, const NamespaceString& ns, const boost::optional& cursorOwner) { - if (authSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::killAnyCursor)) { + if (authSession->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(ns.tenantId()), ActionType::killAnyCursor)) { return Status::OK(); } @@ -188,7 +211,7 @@ Status checkAuthForKillCursors(AuthorizationSession* authSession, ResourcePattern target; if (ns.isListCollectionsCursorNS()) { - target = ResourcePattern::forDatabaseName(ns.db()); + target = ResourcePattern::forDatabaseName(ns.dbName()); } else { target = ResourcePattern::forExactNamespace(ns); } @@ -198,7 +221,7 @@ Status checkAuthForKillCursors(AuthorizationSession* authSession, } return Status(ErrorCodes::Unauthorized, - str::stream() << "not authorized to kill cursor on " << ns.ns()); + str::stream() << "not authorized to kill cursor on " << ns.toStringForErrorMsg()); } Status checkAuthForCreate(OperationContext* opCtx, @@ -284,7 +307,7 @@ StatusWith getPrivilegesForAggregate(AuthorizationSession* auth bool isMongos) { if (!nss.isValid()) { return Status(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid input namespace, " << nss.ns()); + str::stream() << "Invalid input namespace, " << nss.toStringForErrorMsg()); } PrivilegeVector privileges; diff --git a/src/mongo/db/auth/authorization_checks.h b/src/mongo/db/auth/authorization_checks.h index 10979dc263c00..02806f1eec485 100644 --- a/src/mongo/db/auth/authorization_checks.h +++ b/src/mongo/db/auth/authorization_checks.h @@ -30,11 +30,19 @@ #pragma once +#include + #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/pipeline/aggregate_command_gen.h" diff --git a/src/mongo/db/auth/authorization_contract.cpp b/src/mongo/db/auth/authorization_contract.cpp index 18b5dfd24da78..01ed08276a0a4 100644 --- a/src/mongo/db/auth/authorization_contract.cpp +++ b/src/mongo/db/auth/authorization_contract.cpp @@ -30,11 +30,19 @@ #include "mongo/db/auth/authorization_contract.h" -#include "mongo/bson/bsontypes.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/access_checks_gen.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/action_type_gen.h" #include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/debug_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl @@ -52,6 +60,10 @@ void AuthorizationContract::clear() { } void AuthorizationContract::addAccessCheck(AccessCheckEnum check) { + if (!_isTestModeEnabled) { + return; + } + stdx::lock_guard lck(_mutex); _checks.set(static_cast(check), true); @@ -64,6 +76,10 @@ bool AuthorizationContract::hasAccessCheck(AccessCheckEnum check) const { } void AuthorizationContract::addPrivilege(const Privilege& p) { + if (!_isTestModeEnabled) { + return; + } + stdx::lock_guard lck(_mutex); auto matchType = p.getResourcePattern().matchType(); diff --git a/src/mongo/db/auth/authorization_contract.h b/src/mongo/db/auth/authorization_contract.h index f5400557d3c39..460a21fd64653 100644 --- a/src/mongo/db/auth/authorization_contract.h +++ b/src/mongo/db/auth/authorization_contract.h @@ -36,7 +36,9 @@ #include "mongo/db/auth/access_checks_gen.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/action_type_gen.h" #include "mongo/db/auth/privilege.h" +#include "mongo/platform/mutex.h" namespace mongo { @@ -56,6 +58,7 @@ namespace mongo { class AuthorizationContract { public: AuthorizationContract() = default; + AuthorizationContract(bool isTestModeEnabled) : _isTestModeEnabled(isTestModeEnabled){}; template AuthorizationContract(const Checks& checks, const Privileges& privileges) { @@ -110,6 +113,9 @@ class AuthorizationContract { // Set of privileges performed per resource pattern type std::array _privilegeChecks; + + // If false accounting and mutex guards are disabled + bool _isTestModeEnabled{true}; }; } // namespace mongo diff --git a/src/mongo/db/auth/authorization_contract_test.cpp b/src/mongo/db/auth/authorization_contract_test.cpp index 733fe27bb187d..3de4c4c0704ab 100644 --- a/src/mongo/db/auth/authorization_contract_test.cpp +++ b/src/mongo/db/auth/authorization_contract_test.cpp @@ -27,12 +27,21 @@ * it in the license file. */ +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/auth/access_checks_gen.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_contract.h" - #include "mongo/db/auth/privilege.h" -#include "mongo/unittest/unittest.h" -#include +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/tenant_id.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -47,7 +56,8 @@ TEST(AuthContractTest, Basic) { enableShardingActions.addAction(ActionType::enableSharding); enableShardingActions.addAction(ActionType::refineCollectionShardKey); enableShardingActions.addAction(ActionType::reshardCollection); - ac.addPrivilege(Privilege(ResourcePattern::forAnyNormalResource(), enableShardingActions)); + ac.addPrivilege( + Privilege(ResourcePattern::forAnyNormalResource(boost::none), enableShardingActions)); ASSERT_TRUE(ac.hasAccessCheck(AccessCheckEnum::kIsAuthenticated)); ASSERT_TRUE(ac.hasAccessCheck(AccessCheckEnum::kIsCoAuthorized)); @@ -56,20 +66,20 @@ TEST(AuthContractTest, Basic) { ASSERT_TRUE(ac.hasPrivileges( - Privilege(ResourcePattern::forAnyNormalResource(), ActionType::enableSharding))); - ASSERT_TRUE(ac.hasPrivileges( - Privilege(ResourcePattern::forAnyNormalResource(), ActionType::refineCollectionShardKey))); - ASSERT_TRUE(ac.hasPrivileges( - Privilege(ResourcePattern::forAnyNormalResource(), ActionType::reshardCollection))); + Privilege(ResourcePattern::forAnyNormalResource(boost::none), ActionType::enableSharding))); + ASSERT_TRUE(ac.hasPrivileges(Privilege(ResourcePattern::forAnyNormalResource(boost::none), + ActionType::refineCollectionShardKey))); + ASSERT_TRUE(ac.hasPrivileges(Privilege(ResourcePattern::forAnyNormalResource(boost::none), + ActionType::reshardCollection))); - ASSERT_FALSE( - ac.hasPrivileges(Privilege(ResourcePattern::forAnyNormalResource(), ActionType::shutdown))); ASSERT_FALSE(ac.hasPrivileges( - Privilege(ResourcePattern::forClusterResource(), ActionType::enableSharding))); + Privilege(ResourcePattern::forAnyNormalResource(boost::none), ActionType::shutdown))); + ASSERT_FALSE(ac.hasPrivileges( + Privilege(ResourcePattern::forClusterResource(boost::none), ActionType::enableSharding))); ASSERT_TRUE(ac.hasPrivileges( - Privilege(ResourcePattern::forAnyNormalResource(), enableShardingActions))); + Privilege(ResourcePattern::forAnyNormalResource(boost::none), enableShardingActions))); ASSERT_TRUE(ac.contains(ac)); } @@ -103,7 +113,8 @@ TEST(AuthContractTest, DifferentAccessCheck) { TEST(AuthContractTest, SimplePrivilege) { AuthorizationContract ac; - ac.addPrivilege(Privilege(ResourcePattern::forAnyNormalResource(), ActionType::enableSharding)); + ac.addPrivilege( + Privilege(ResourcePattern::forAnyNormalResource(boost::none), ActionType::enableSharding)); AuthorizationContract empty; @@ -116,10 +127,11 @@ TEST(AuthContractTest, DifferentResoucePattern) { AuthorizationContract ac1; ac1.addPrivilege( - Privilege(ResourcePattern::forAnyNormalResource(), ActionType::enableSharding)); + Privilege(ResourcePattern::forAnyNormalResource(boost::none), ActionType::enableSharding)); AuthorizationContract ac2; - ac2.addPrivilege(Privilege(ResourcePattern::forClusterResource(), ActionType::enableSharding)); + ac2.addPrivilege( + Privilege(ResourcePattern::forClusterResource(boost::none), ActionType::enableSharding)); ASSERT_FALSE(ac1.contains(ac2)); ASSERT_FALSE(ac2.contains(ac1)); @@ -130,17 +142,16 @@ TEST(AuthContractTest, DifferentActionType) { AuthorizationContract ac1; ac1.addPrivilege( - Privilege(ResourcePattern::forAnyNormalResource(), ActionType::enableSharding)); + Privilege(ResourcePattern::forAnyNormalResource(boost::none), ActionType::enableSharding)); AuthorizationContract ac2; - ac2.addPrivilege( - Privilege(ResourcePattern::forAnyNormalResource(), ActionType::grantPrivilegesToRole)); + ac2.addPrivilege(Privilege(ResourcePattern::forAnyNormalResource(boost::none), + ActionType::grantPrivilegesToRole)); ASSERT_FALSE(ac1.contains(ac2)); ASSERT_FALSE(ac2.contains(ac1)); } - TEST(AuthContractTest, InitializerList) { AuthorizationContract ac1; @@ -151,19 +162,45 @@ TEST(AuthContractTest, InitializerList) { enableShardingActions.addAction(ActionType::enableSharding); enableShardingActions.addAction(ActionType::refineCollectionShardKey); enableShardingActions.addAction(ActionType::reshardCollection); - ac1.addPrivilege(Privilege(ResourcePattern::forAnyNormalResource(), enableShardingActions)); + ac1.addPrivilege( + Privilege(ResourcePattern::forAnyNormalResource(boost::none), enableShardingActions)); AuthorizationContract ac2( std::initializer_list{AccessCheckEnum::kIsAuthenticated, AccessCheckEnum::kIsCoAuthorized}, - std::initializer_list{Privilege(ResourcePattern::forAnyNormalResource(), - {ActionType::enableSharding, - ActionType::refineCollectionShardKey, - ActionType::reshardCollection})}); + std::initializer_list{ + Privilege(ResourcePattern::forAnyNormalResource(boost::none), + {ActionType::enableSharding, + ActionType::refineCollectionShardKey, + ActionType::reshardCollection})}); ASSERT_TRUE(ac1.contains(ac2)); ASSERT_TRUE(ac2.contains(ac1)); } +TEST(AuthContractTest, NonTestModeCheck) { + AuthorizationContract ac(/* isTestModeEnabled */ false); + ac.addAccessCheck(AccessCheckEnum::kIsAuthenticated); + ac.addAccessCheck(AccessCheckEnum::kIsCoAuthorized); + + ActionSet enableShardingActions; + enableShardingActions.addAction(ActionType::enableSharding); + enableShardingActions.addAction(ActionType::refineCollectionShardKey); + enableShardingActions.addAction(ActionType::reshardCollection); + ac.addPrivilege( + Privilege(ResourcePattern::forAnyNormalResource(boost::none), enableShardingActions)); + + // Non-test mode will not keep accounting and will not take any mutex guard + ASSERT_FALSE(ac.hasAccessCheck(AccessCheckEnum::kIsAuthenticated)); + ASSERT_FALSE(ac.hasAccessCheck(AccessCheckEnum::kIsCoAuthorized)); + + ASSERT_FALSE(ac.hasPrivileges( + Privilege(ResourcePattern::forAnyNormalResource(boost::none), ActionType::enableSharding))); + ASSERT_FALSE(ac.hasPrivileges(Privilege(ResourcePattern::forAnyNormalResource(boost::none), + ActionType::refineCollectionShardKey))); + ASSERT_FALSE(ac.hasPrivileges(Privilege(ResourcePattern::forAnyNormalResource(boost::none), + ActionType::reshardCollection))); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/auth/authorization_manager.cpp b/src/mongo/db/auth/authorization_manager.cpp index b7b32531f1688..a7e5b468f53db 100644 --- a/src/mongo/db/auth/authorization_manager.cpp +++ b/src/mongo/db/auth/authorization_manager.cpp @@ -27,12 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/auth/authorization_manager.h" -#include "mongo/base/init.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/shim.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" namespace mongo { diff --git a/src/mongo/db/auth/authorization_manager.h b/src/mongo/db/auth/authorization_manager.h index 58e19c21032bb..f23ed01326941 100644 --- a/src/mongo/db/auth/authorization_manager.h +++ b/src/mongo/db/auth/authorization_manager.h @@ -30,18 +30,31 @@ #pragma once #include +#include +#include #include +#include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/oid.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/builtin_roles.h" +#include "mongo/db/auth/privilege.h" #include "mongo/db/auth/privilege_format.h" #include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/restriction_set.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/user.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/tenant_id.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { diff --git a/src/mongo/db/auth/authorization_manager_global.cpp b/src/mongo/db/auth/authorization_manager_global.cpp index 2f5e5059a4c59..0a1248e001894 100644 --- a/src/mongo/db/auth/authorization_manager_global.cpp +++ b/src/mongo/db/auth/authorization_manager_global.cpp @@ -27,21 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/client/authenticate.h" -#include "mongo/config.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/internal_auth.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_manager_global_parameters_gen.h" -#include "mongo/db/auth/authz_manager_external_state.h" #include "mongo/db/auth/cluster_auth_mode.h" -#include "mongo/db/auth/sasl_command_constants.h" #include "mongo/db/auth/security_key.h" #include "mongo/db/operation_context.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/util/assert_util.h" #include "mongo/util/net/ssl_manager.h" +#include "mongo/util/net/ssl_types.h" namespace mongo { namespace { diff --git a/src/mongo/db/auth/authorization_manager_impl.cpp b/src/mongo/db/auth/authorization_manager_impl.cpp index 48d464814e6d5..d9c9a4d633033 100644 --- a/src/mongo/db/auth/authorization_manager_impl.cpp +++ b/src/mongo/db/auth/authorization_manager_impl.cpp @@ -28,39 +28,55 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/authorization_manager_impl.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include +#include #include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/shim.h" #include "mongo/base/status.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/config.h" -#include "mongo/crypto/mechanism_scram.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/auth/action_set.h" #include "mongo/db/auth/address_restriction.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/auth_types_gen.h" -#include "mongo/db/auth/authorization_manager_global_parameters_gen.h" -#include "mongo/db/auth/authorization_manager_impl_parameters_gen.h" +#include "mongo/db/auth/authorization_manager_impl.h" +#include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/authorization_session_impl.h" #include "mongo/db/auth/authz_manager_external_state.h" -#include "mongo/db/auth/sasl_options.h" -#include "mongo/db/auth/user_document_parser.h" -#include "mongo/db/auth/user_management_commands_parser.h" +#include "mongo/db/auth/builtin_roles.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/restriction_set.h" +#include "mongo/db/auth/user_acquisition_stats.h" #include "mongo/db/commands/authentication_commands.h" #include "mongo/db/curop.h" #include "mongo/db/global_settings.h" -#include "mongo/db/mongod_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" -#include "mongo/util/net/ssl_peer_info.h" -#include "mongo/util/net/ssl_types.h" +#include "mongo/util/future.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl @@ -75,7 +91,7 @@ std::shared_ptr createSystemUserHandle() { ActionSet allActions; allActions.addAllActions(); PrivilegeVector privileges; - auth::generateUniversalPrivileges(&privileges); + auth::generateUniversalPrivileges(&privileges, boost::none /* tenantId */); (*user)->addPrivileges(privileges); if (internalSecurity.credentials) { @@ -411,7 +427,7 @@ StatusWith AuthorizationManagerImpl::acquireUser(OperationContext* o // X.509 will give us our roles for initial acquire, but we have to lose them during // reacquire (for now) so reparse those roles into the request if not already present. if ((request.roles == boost::none) && request.mechanismData.empty() && - (userName.getDB() == "$external"_sd)) { + (userName.getDatabaseName().isExternalDB())) { userRequest = getX509UserRequest(opCtx, std::move(userRequest)); } #endif @@ -530,7 +546,7 @@ Status AuthorizationManagerImpl::refreshExternalUsers(OperationContext* opCtx) { // First, get a snapshot of the UserHandles in the cache. auto cachedUsers = _userCache.peekLatestCachedIf([&](const UserRequest& userRequest, const User&) { - return userRequest.name.getDB() == "$external"_sd; + return userRequest.name.getDatabaseName().isExternalDB(); }); // Then, retrieve the corresponding Users from the backing store for users in the $external diff --git a/src/mongo/db/auth/authorization_manager_impl.h b/src/mongo/db/auth/authorization_manager_impl.h index 891841b8999c7..fc29d6ea8026c 100644 --- a/src/mongo/db/auth/authorization_manager_impl.h +++ b/src/mongo/db/auth/authorization_manager_impl.h @@ -29,14 +29,40 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/privilege_format.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/user.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/thread_pool_interface.h" +#include "mongo/util/invalidating_lru_cache.h" +#include "mongo/util/read_through_cache.h" namespace mongo { diff --git a/src/mongo/db/auth/authorization_manager_test.cpp b/src/mongo/db/auth/authorization_manager_test.cpp index 4a9f4db10ed5f..7a617ce877662 100644 --- a/src/mongo/db/auth/authorization_manager_test.cpp +++ b/src/mongo/db/auth/authorization_manager_test.cpp @@ -27,32 +27,44 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include #include +#include +#include +#include + +#include -#include "mongo/base/status.h" -#include "mongo/config.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/crypto/mechanism_scram.h" #include "mongo/crypto/sha1_block.h" #include "mongo/crypto/sha256_block.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_manager_impl.h" -#include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/authz_manager_external_state_mock.h" -#include "mongo/db/auth/authz_session_external_state_mock.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/auth/sasl_options.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/client.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/operation_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/db/storage/recovery_unit_noop.h" +#include "mongo/platform/atomic_word.h" #include "mongo/transport/session.h" #include "mongo/transport/transport_layer_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/concurrency/thread_name.h" #include "mongo/util/net/ssl_peer_info.h" +#include "mongo/util/net/ssl_types.h" +#include "mongo/util/read_through_cache.h" #define ASSERT_NULL(EXPR) ASSERT_FALSE(EXPR) #define ASSERT_NON_NULL(EXPR) ASSERT_TRUE(EXPR) @@ -74,6 +86,9 @@ void setX509PeerInfo(const std::shared_ptr& session, SSLPeer #endif +const auto kTestDB = DatabaseName::createDatabaseName_forTest(boost::none, "test"_sd); +const auto kTestRsrc = ResourcePattern::forDatabaseName(kTestDB); + class AuthorizationManagerTest : public ServiceContextTest { public: AuthorizationManagerTest() { @@ -149,7 +164,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2User) { ASSERT_EQUALS(RoleName("read", "test"), roles.next()); ASSERT_FALSE(roles.more()); auto privilegeMap = v2read->getPrivileges(); - auto testDBPrivilege = privilegeMap[ResourcePattern::forDatabaseName("test")]; + auto testDBPrivilege = privilegeMap[kTestRsrc]; ASSERT(testDBPrivilege.getActions().contains(ActionType::find)); // Make sure user's refCount is 0 at the end of the test to avoid an assertion failure @@ -162,7 +177,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2User) { ASSERT_EQUALS(RoleName("clusterAdmin", "admin"), clusterRoles.next()); ASSERT_FALSE(clusterRoles.more()); privilegeMap = v2cluster->getPrivileges(); - auto clusterPrivilege = privilegeMap[ResourcePattern::forClusterResource()]; + auto clusterPrivilege = privilegeMap[ResourcePattern::forClusterResource(boost::none)]; ASSERT(clusterPrivilege.getActions().contains(ActionType::serverStatus)); // Make sure user's refCount is 0 at the end of the test to avoid an assertion failure } @@ -185,7 +200,7 @@ TEST_F(AuthorizationManagerTest, testLocalX509Authorization) { const User::ResourcePrivilegeMap& privileges = x509User->getPrivileges(); ASSERT_FALSE(privileges.empty()); - auto privilegeIt = privileges.find(ResourcePattern::forDatabaseName("test")); + auto privilegeIt = privileges.find(kTestRsrc); ASSERT(privilegeIt != privileges.end()); ASSERT(privilegeIt->second.includesAction(ActionType::insert)); } @@ -246,7 +261,7 @@ TEST_F(AuthorizationManagerTest, testAcquireV2UserWithUnrecognizedActions) { ASSERT_EQUALS(RoleName("myRole", "test"), roles.next()); ASSERT_FALSE(roles.more()); auto privilegeMap = myUser->getPrivileges(); - auto testDBPrivilege = privilegeMap[ResourcePattern::forDatabaseName("test")]; + auto testDBPrivilege = privilegeMap[kTestRsrc]; ActionSet actions = testDBPrivilege.getActions(); ASSERT(actions.contains(ActionType::find)); ASSERT(actions.contains(ActionType::insert)); @@ -349,7 +364,7 @@ TEST_F(AuthorizationManagerTest, testRefreshExternalV2User) { // Assert that all checked-out $external users are now marked invalid. for (const auto& checkedOutUser : checkedOutUsers) { - if (checkedOutUser->getName().getDB() == "$external"_sd) { + if (checkedOutUser->getName().getDatabaseName().isExternalDB()) { ASSERT(!checkedOutUser.isValid()); } else { ASSERT(checkedOutUser.isValid()); @@ -368,7 +383,7 @@ TEST_F(AuthorizationManagerTest, testRefreshExternalV2User) { ASSERT(user.isValid()); RoleNameIterator cachedUserRolesIt = user->getRoles(); - if (userDoc.getStringField(kDbFieldName) == "$external"_sd) { + if (userDoc.getStringField(kDbFieldName) == DatabaseName::kExternal.db()) { for (const auto& userDocRole : updatedRoles) { ASSERT_EQUALS(cachedUserRolesIt.next(), RoleName(userDocRole.getStringField(kRoleFieldName), diff --git a/src/mongo/db/auth/authorization_session.cpp b/src/mongo/db/auth/authorization_session.cpp index 807d2bc66d7e1..3074e4eaa1661 100644 --- a/src/mongo/db/auth/authorization_session.cpp +++ b/src/mongo/db/auth/authorization_session.cpp @@ -27,30 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/authorization_session.h" - +#include #include #include +#include + #include "mongo/base/shim.h" -#include "mongo/base/status.h" -#include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/authz_session_external_state.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/auth/restriction_environment.h" -#include "mongo/db/auth/security_key.h" -#include "mongo/db/auth/user_management_commands_parser.h" -#include "mongo/db/bson/dotted_path_support.h" -#include "mongo/db/catalog/document_validation.h" -#include "mongo/db/client.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/pipeline/lite_parsed_pipeline.h" -#include "mongo/util/assert_util.h" -#include "mongo/util/str.h" +#include "mongo/db/auth/authorization_session.h" namespace mongo { diff --git a/src/mongo/db/auth/authorization_session.h b/src/mongo/db/auth/authorization_session.h index 4c63ebeb1e6d7..eb4a9ce65eb3e 100644 --- a/src/mongo/db/auth/authorization_session.h +++ b/src/mongo/db/auth/authorization_session.h @@ -29,25 +29,44 @@ #pragma once +#include +#include #include #include +#include #include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authz_session_external_state.h" #include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/user.h" #include "mongo/db/auth/user_name.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/explain_verbosity_gen.h" +#include "mongo/db/read_write_concern_provenance_base_gen.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/time_support.h" namespace mongo { class Client; class AuthorizationContract; +class ListCollections; + /** * Contains all the authorization logic for a single client connection. It contains a set of * the users which have been authenticated, as well as a set of privileges that have been @@ -157,6 +176,9 @@ class AuthorizationSession { // Get the authenticated user's object handle, if any. virtual boost::optional getAuthenticatedUser() = 0; + // Get the authenticated user's tenant ID, if any. + virtual boost::optional getUserTenantId() const = 0; + // Is auth disabled? Returns true if auth is disabled. virtual bool shouldIgnoreAuthChecks() = 0; @@ -199,8 +221,8 @@ class AuthorizationSession { // Checks if the current session is authorized to list the collections in the given // database. If it is, return a privilegeVector containing the privileges used to authorize // this command. - virtual StatusWith checkAuthorizedToListCollections(StringData dbname, - const BSONObj& cmdObj) = 0; + virtual StatusWith checkAuthorizedToListCollections( + const ListCollections&) = 0; // Checks if this connection is using the localhost bypass virtual bool isUsingLocalhostBypass() = 0; @@ -254,7 +276,7 @@ class AuthorizationSession { // Returns true if the current session possesses a privilege which could apply to the // database resource, or a specific or arbitrary resource within the database. - virtual bool isAuthorizedForAnyActionOnAnyResourceInDB(StringData dbname) = 0; + virtual bool isAuthorizedForAnyActionOnAnyResourceInDB(const DatabaseName&) = 0; // Returns true if the current session possesses a privilege which applies to the resource. virtual bool isAuthorizedForAnyActionOnResource(const ResourcePattern& resource) = 0; diff --git a/src/mongo/db/auth/authorization_session_for_test.cpp b/src/mongo/db/auth/authorization_session_for_test.cpp index e88dd65ebc4ca..a3fbfdce7db5b 100644 --- a/src/mongo/db/auth/authorization_session_for_test.cpp +++ b/src/mongo/db/auth/authorization_session_for_test.cpp @@ -27,17 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/auth/authorization_session_for_test.h" - -#include -#include +#include +#include +#include +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/authorization_session_for_test.h" #include "mongo/db/auth/builtin_roles.h" #include "mongo/db/auth/privilege.h" #include "mongo/db/auth/user.h" #include "mongo/db/auth/user_name.h" +#include "mongo/util/read_through_cache.h" namespace mongo { constexpr StringData AuthorizationSessionForTest::kTestDBName; diff --git a/src/mongo/db/auth/authorization_session_for_test.h b/src/mongo/db/auth/authorization_session_for_test.h index 61145661a9523..b6ec7c28b5a69 100644 --- a/src/mongo/db/auth/authorization_session_for_test.h +++ b/src/mongo/db/auth/authorization_session_for_test.h @@ -32,8 +32,11 @@ #include #include +#include "mongo/base/string_data.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/authorization_session_impl.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/user.h" namespace mongo { diff --git a/src/mongo/db/auth/authorization_session_impl.cpp b/src/mongo/db/auth/authorization_session_impl.cpp index 82fb7078846fb..1ef40586d985e 100644 --- a/src/mongo/db/auth/authorization_session_impl.cpp +++ b/src/mongo/db/auth/authorization_session_impl.cpp @@ -28,29 +28,63 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/authorization_session_impl.h" - -#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/shim.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/audit.h" +#include "mongo/db/auth/access_checks_gen.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/action_type_gen.h" +#include "mongo/db/auth/auth_name.h" +#include "mongo/db/auth/authorization_session_impl.h" #include "mongo/db/auth/authz_session_external_state.h" #include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern_search_list.h" #include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/client.h" +#include "mongo/db/list_collections_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/read_through_cache.h" #include "mongo/util/str.h" #include "mongo/util/testing_proctor.h" @@ -101,25 +135,17 @@ MONGO_INITIALIZER(ServerlessPrivilegePermittedMap)(InitializerContext*) try { auto matchTypeName = MatchType_serializer(matchType); auto dataObj = MatchType_get_extra_data(matchType); auto data = MatchTypeExtraData::parse(IDLParserContext{matchTypeName}, dataObj); - auto actionTypes = data.getServerlessActionTypes(); - - std::vector actionsToParse; - std::transform(actionTypes.cbegin(), - actionTypes.cend(), - std::back_inserter(actionsToParse), - [](const auto& at) { return at.toString(); }); - ActionSet actions; std::vector unknownActions; - auto status = - ActionSet::parseActionSetFromStringVector(actionsToParse, &actions, &unknownActions); - if (!status.isOK()) { + auto actions = + ActionSet::parseFromStringVector(data.getServerlessActionTypes(), &unknownActions); + if (!unknownActions.empty()) { StringBuilder sb; sb << "Unknown actions listed for match type '" << matchTypeName << "':"; for (const auto& unknownAction : unknownActions) { sb << " '" << unknownAction << "'"; } - uassertStatusOK(status.withContext(sb.str())); + uasserted(ErrorCodes::FailedToParse, sb.str()); } ret[matchType] = std::move(actions); @@ -154,14 +180,13 @@ void validateSecurityTokenUserPrivileges(const User::ResourcePrivilegeMap& privs MONGO_FAIL_POINT_DEFINE(allowMultipleUsersWithApiStrict); -const Privilege kBypassWriteBlockingModeOnClusterPrivilege(ResourcePattern::forClusterResource(), - ActionType::bypassWriteBlockingMode); } // namespace AuthorizationSessionImpl::AuthorizationSessionImpl( std::unique_ptr externalState, InstallMockForTestingOrAuthImpl) : _externalState(std::move(externalState)), _impersonationFlag(false), + _contract(TestingProctor::instance().isEnabled()), _mayBypassWriteBlockingMode(false) {} AuthorizationSessionImpl::~AuthorizationSessionImpl() { @@ -433,15 +458,17 @@ PrivilegeVector AuthorizationSessionImpl::_getDefaultPrivileges() { // return a vector of the minimum privileges required to bootstrap // a system and add the first user. if (_externalState->shouldAllowLocalhost()) { - ResourcePattern adminDBResource = ResourcePattern::forDatabaseName(ADMIN_DBNAME); - ActionSet setupAdminUserActionSet; - setupAdminUserActionSet.addAction(ActionType::createUser); - setupAdminUserActionSet.addAction(ActionType::grantRole); - Privilege setupAdminUserPrivilege = Privilege(adminDBResource, setupAdminUserActionSet); - ResourcePattern externalDBResource = ResourcePattern::forDatabaseName("$external"); - Privilege setupExternalUserPrivilege = - Privilege(externalDBResource, ActionType::createUser); + const DatabaseName kAdminDB = + DatabaseName::createDatabaseNameForAuth(boost::none, ADMIN_DBNAME); + const ResourcePattern adminDBResource = ResourcePattern::forDatabaseName(kAdminDB); + const ActionSet setupAdminUserActionSet{ActionType::createUser, ActionType::grantRole}; + Privilege setupAdminUserPrivilege(adminDBResource, setupAdminUserActionSet); + + const DatabaseName kExternalDB = + DatabaseName::createDatabaseNameForAuth(boost::none, "$external"_sd); + const ResourcePattern externalDBResource = ResourcePattern::forDatabaseName(kExternalDB); + Privilege setupExternalUserPrivilege(externalDBResource, ActionType::createUser); ActionSet setupServerConfigActionSet; @@ -457,8 +484,9 @@ PrivilegeVector AuthorizationSessionImpl::_getDefaultPrivileges() { setupServerConfigActionSet.addAction(ActionType::addShard); setupServerConfigActionSet.addAction(ActionType::replSetConfigure); setupServerConfigActionSet.addAction(ActionType::replSetGetStatus); + setupServerConfigActionSet.addAction(ActionType::issueDirectShardOperations); Privilege setupServerConfigPrivilege = - Privilege(ResourcePattern::forClusterResource(), setupServerConfigActionSet); + Privilege(ResourcePattern::forClusterResource(boost::none), setupServerConfigActionSet); Privilege::addPrivilegeToPrivilegeVector(&defaultPrivileges, setupAdminUserPrivilege); Privilege::addPrivilegeToPrivilegeVector(&defaultPrivileges, setupExternalUserPrivilege); @@ -469,6 +497,10 @@ PrivilegeVector AuthorizationSessionImpl::_getDefaultPrivileges() { return defaultPrivileges; } +boost::optional AuthorizationSessionImpl::getUserTenantId() const { + return _authenticatedUser ? _authenticatedUser.value()->getName().getTenant() : boost::none; +} + bool AuthorizationSessionImpl::isAuthorizedToParseNamespaceElement(const BSONElement& element) { const bool isUUID = element.type() == BinData && element.binDataType() == BinDataType::newUUID; _contract.addAccessCheck(AccessCheckEnum::kIsAuthorizedToParseNamespaceElement); @@ -478,8 +510,8 @@ bool AuthorizationSessionImpl::isAuthorizedToParseNamespaceElement(const BSONEle element.type() == String || isUUID); if (isUUID) { - return isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::useUUID); + return isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(getUserTenantId()), ActionType::useUUID); } return true; @@ -489,9 +521,9 @@ bool AuthorizationSessionImpl::isAuthorizedToParseNamespaceElement( const NamespaceStringOrUUID& nss) { _contract.addAccessCheck(AccessCheckEnum::kIsAuthorizedToParseNamespaceElement); - if (nss.uuid()) { - return isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::useUUID); + if (nss.isUUID()) { + return isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(getUserTenantId()), ActionType::useUUID); } return true; } @@ -502,8 +534,8 @@ bool AuthorizationSessionImpl::isAuthorizedToCreateRole(const RoleName& roleName // A user is allowed to create a role under either of two conditions. // The user may create a role if the authorization system says they are allowed to. - if (isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(roleName.getDB()), - ActionType::createRole)) { + if (isAuthorizedForActionsOnResource( + ResourcePattern::forDatabaseName(roleName.getDatabaseName()), ActionType::createRole)) { return true; } @@ -562,89 +594,6 @@ bool AuthorizationSessionImpl::isAuthorizedForActionsOnNamespace(const Namespace return isAuthorizedForPrivilege(Privilege(ResourcePattern::forExactNamespace(ns), actions)); } -constexpr int resourceSearchListCapacity = 7; -/** - * Builds from "target" an exhaustive list of all ResourcePatterns that match "target". - * - * Some resources are considered to be "normal resources", and are matched by the - * forAnyNormalResource pattern. Collections which are not prefixed with "system.", - * and which do not belong inside of the "local" or "config" databases are "normal". - * Database other than "local" and "config" are normal. - * - * Most collections are matched by their database's resource. Collections prefixed with "system." - * are not. Neither are collections on the "local" database, whose name are prefixed with "replset." - * - * - * Stores the resulting list into resourceSearchList, and returns the length. - * - * The seach lists are as follows, depending on the type of "target": - * - * target is ResourcePattern::forAnyResource(): - * searchList = { ResourcePattern::forAnyResource(), ResourcePattern::forAnyResource() } - * target is the ResourcePattern::forClusterResource(): - * searchList = { ResourcePattern::forAnyResource(), ResourcePattern::forClusterResource() } - * target is a database, db: - * searchList = { ResourcePattern::forAnyResource(), - * ResourcePattern::forAnyNormalResource(), - * db } - * target is a non-system collection, db.coll: - * searchList = { ResourcePattern::forAnyResource(), - * ResourcePattern::forAnyNormalResource(), - * db, - * coll, - * db.coll } - * target is a system buckets collection, db.system.buckets.coll: - * searchList = { ResourcePattern::forAnyResource(), - * ResourcePattern::forAnySystemBuckets(), - * ResourcePattern::forAnySystemBucketsInDatabase("db"), - * ResourcePattern::forAnySystemBucketsInAnyDatabase("coll"), - * ResourcePattern::forExactSystemBucketsCollection("db", "coll"), - * system.buckets.coll, - * db.system.buckets.coll } - * target is a system collection, db.system.coll: - * searchList = { ResourcePattern::forAnyResource(), - * system.coll, - * db.system.coll } - */ -static int buildResourceSearchList(const ResourcePattern& target, - ResourcePattern resourceSearchList[resourceSearchListCapacity]) { - int size = 0; - resourceSearchList[size++] = ResourcePattern::forAnyResource(); - if (target.isExactNamespacePattern()) { - // Normal collections can be matched by anyNormalResource, or their database's resource. - if (target.ns().isNormalCollection()) { - // But even normal collections in non-normal databases should not be matchable with - // ResourcePattern::forAnyNormalResource. 'local' and 'config' are - // used to store special system collections, which user level - // administrators should not be able to manipulate. - if (target.ns().db() != "local" && target.ns().db() != "config") { - resourceSearchList[size++] = ResourcePattern::forAnyNormalResource(); - } - resourceSearchList[size++] = ResourcePattern::forDatabaseName(target.ns().db()); - } else if (target.ns().coll().startsWith(SYSTEM_BUCKETS_PREFIX) && - target.ns().coll().size() > SYSTEM_BUCKETS_PREFIX.size()) { - auto bucketColl = target.ns().coll().substr(SYSTEM_BUCKETS_PREFIX.size()); - resourceSearchList[size++] = - ResourcePattern::forExactSystemBucketsCollection(target.ns().db(), bucketColl); - resourceSearchList[size++] = ResourcePattern::forAnySystemBuckets(); - resourceSearchList[size++] = - ResourcePattern::forAnySystemBucketsInDatabase(target.ns().db()); - resourceSearchList[size++] = - ResourcePattern::forAnySystemBucketsInAnyDatabase(bucketColl); - } - - // All collections can be matched by a collection resource for their name - resourceSearchList[size++] = ResourcePattern::forCollectionName(target.ns().coll()); - } else if (target.isDatabasePattern()) { - if (target.ns().db() != "local" && target.ns().db() != "config") { - resourceSearchList[size++] = ResourcePattern::forAnyNormalResource(); - } - } - resourceSearchList[size++] = target; - dassert(size <= resourceSearchListCapacity); - return size; -} - bool AuthorizationSessionImpl::isAuthorizedToChangeAsUser(const UserName& userName, ActionType actionType) { _contract.addAccessCheck(AccessCheckEnum::kIsAuthorizedToChangeAsUser); @@ -653,22 +602,20 @@ bool AuthorizationSessionImpl::isAuthorizedToChangeAsUser(const UserName& userNa if (!user) { return false; } - ResourcePattern resourceSearchList[resourceSearchListCapacity]; - const int resourceSearchListLength = buildResourceSearchList( - ResourcePattern::forDatabaseName(userName.getDB()), resourceSearchList); - ActionSet actions; - for (int i = 0; i < resourceSearchListLength; ++i) { - actions.addAllActionsFromSet(user->getActionsForResource(resourceSearchList[i])); - } - return actions.contains(actionType); + auth::ResourcePatternSearchList search( + ResourcePattern::forDatabaseName(userName.getDatabaseName())); + return std::any_of(search.cbegin(), search.cend(), [&user, &actionType](const auto& pattern) { + return user->getActionsForResource(pattern).contains(actionType); + }); } StatusWith AuthorizationSessionImpl::checkAuthorizedToListCollections( - StringData dbname, const BSONObj& cmdObj) { + const ListCollections& cmd) { + const auto& dbname = cmd.getDbName(); _contract.addAccessCheck(AccessCheckEnum::kCheckAuthorizedToListCollections); - if (cmdObj["authorizedCollections"].trueValue() && cmdObj["nameOnly"].trueValue() && + if (cmd.getAuthorizedCollections() && cmd.getNameOnly() && AuthorizationSessionImpl::isAuthorizedForAnyActionOnAnyResourceInDB(dbname)) { return PrivilegeVector(); } @@ -681,7 +628,7 @@ StatusWith AuthorizationSessionImpl::checkAuthorizedToListColle } return Status(ErrorCodes::Unauthorized, - str::stream() << "Not authorized to list collections on db: " << dbname); + str::stream() << "Not authorized to list collections on db: " << dbname.db()); } bool AuthorizationSessionImpl::isAuthenticatedAsUserWithRole(const RoleName& roleName) { @@ -728,20 +675,6 @@ void AuthorizationSessionImpl::_refreshUserInfoAsNeeded(OperationContext* opCtx) auto swUser = getAuthorizationManager().reacquireUser(opCtx, currentUser); if (!swUser.isOK()) { auto& status = swUser.getStatus(); - // If an LDAP user is no longer in the cache and cannot be acquired from the cache's - // backing LDAP host, it should be cleared from _authenticatedUser. This - // guarantees that no operations can be performed until the LDAP host comes back up. - // TODO SERVER-72678 avoid this edge case hack when rearchitecting user acquisition. - if (name.getDB() == "$external"_sd && currentUser->getUserRequest().mechanismData.empty()) { - clearUser(); - LOGV2(5914804, - "Removed external user from session cache of user information because of " - "error status", - "user"_attr = name, - "status"_attr = status); - return; - } - switch (status.code()) { case ErrorCodes::UserNotFound: { // User does not exist anymore. @@ -820,8 +753,10 @@ void AuthorizationSessionImpl::_refreshUserInfoAsNeeded(OperationContext* opCtx) updateUser(std::move(user)); } -bool AuthorizationSessionImpl::isAuthorizedForAnyActionOnAnyResourceInDB(StringData db) { +bool AuthorizationSessionImpl::isAuthorizedForAnyActionOnAnyResourceInDB( + const DatabaseName& dbname) { _contract.addAccessCheck(AccessCheckEnum::kIsAuthorizedForAnyActionOnAnyResourceInDB); + const auto& tenantId = dbname.tenantId(); if (_externalState->shouldIgnoreAuthChecks()) { return true; @@ -833,25 +768,25 @@ bool AuthorizationSessionImpl::isAuthorizedForAnyActionOnAnyResourceInDB(StringD const auto& user = _authenticatedUser.value(); // First lookup any Privileges on this database specifying Database resources - if (user->hasActionsForResource(ResourcePattern::forDatabaseName(db))) { + if (user->hasActionsForResource(ResourcePattern::forDatabaseName(dbname))) { return true; } // Any resource will match any collection in the database - if (user->hasActionsForResource(ResourcePattern::forAnyResource())) { + if (user->hasActionsForResource(ResourcePattern::forAnyResource(tenantId))) { return true; } // Any resource will match any system_buckets collection in the database - if (user->hasActionsForResource(ResourcePattern::forAnySystemBuckets()) || - user->hasActionsForResource(ResourcePattern::forAnySystemBucketsInDatabase(db))) { + if (user->hasActionsForResource(ResourcePattern::forAnySystemBuckets(tenantId)) || + user->hasActionsForResource(ResourcePattern::forAnySystemBucketsInDatabase(dbname))) { return true; } // If the user is authorized for anyNormalResource, then they implicitly have access // to most databases. - if (db != "local" && db != "config" && - user->hasActionsForResource(ResourcePattern::forAnyNormalResource())) { + if (!dbname.isLocalDB() && !dbname.isConfigDB() && + user->hasActionsForResource(ResourcePattern::forAnyNormalResource(tenantId))) { return true; } @@ -859,28 +794,29 @@ bool AuthorizationSessionImpl::isAuthorizedForAnyActionOnAnyResourceInDB(StringD // iterate all privileges, until we see something that could reside in the target database. auto map = user->getPrivileges(); for (const auto& privilege : map) { + const auto& privRsrc = privilege.first; + // If the user has a Collection privilege, then they're authorized for this resource // on all databases. - if (privilege.first.isCollectionPattern()) { + if (privRsrc.isCollectionPattern()) { return true; } // User can see system_buckets in any database so we consider them to have permission in // this database - if (privilege.first.isAnySystemBucketsCollectionInAnyDB()) { + if (privRsrc.isAnySystemBucketsCollectionInAnyDB()) { return true; } // If the user has an exact namespace privilege on a collection in this database, they // have access to a resource in this database. - if (privilege.first.isExactNamespacePattern() && privilege.first.databaseToMatch() == db) { + if (privRsrc.isExactNamespacePattern() && (privRsrc.dbNameToMatch() == dbname)) { return true; } // If the user has an exact namespace privilege on a system.buckets collection in this // database, they have access to a resource in this database. - if (privilege.first.isExactSystemBucketsCollection() && - privilege.first.databaseToMatch() == db) { + if (privRsrc.isExactSystemBucketsCollection() && (privRsrc.dbNameToMatch() == dbname)) { return true; } } @@ -899,39 +835,26 @@ bool AuthorizationSessionImpl::isAuthorizedForAnyActionOnResource(const Resource return false; } - std::array resourceSearchList; - const int resourceSearchListLength = - buildResourceSearchList(resource, resourceSearchList.data()); - const auto& user = _authenticatedUser.value(); - for (int i = 0; i < resourceSearchListLength; ++i) { - if (user->hasActionsForResource(resourceSearchList[i])) { - return true; - } - } - - return false; + auth::ResourcePatternSearchList search(resource); + return std::any_of(search.cbegin(), search.cend(), [&user](const auto& pattern) { + return user->hasActionsForResource(pattern); + }); } bool AuthorizationSessionImpl::_isAuthorizedForPrivilege(const Privilege& privilege) { _contract.addPrivilege(privilege); - const ResourcePattern& target(privilege.getResourcePattern()); - - ResourcePattern resourceSearchList[resourceSearchListCapacity]; - const int resourceSearchListLength = buildResourceSearchList(target, resourceSearchList); - + auth::ResourcePatternSearchList search(privilege.getResourcePattern()); ActionSet unmetRequirements = privilege.getActions(); for (const auto& priv : _getDefaultPrivileges()) { - for (int i = 0; i < resourceSearchListLength; ++i) { - if (!(priv.getResourcePattern() == resourceSearchList[i])) { + for (auto patternIt = search.cbegin(); patternIt != search.cend(); ++patternIt) { + if (!priv.getResourcePattern().matchesIgnoringTenant(*patternIt)) { continue; } - ActionSet userActions = priv.getActions(); - unmetRequirements.removeAllActionsFromSet(userActions); - + unmetRequirements.removeAllActionsFromSet(priv.getActions()); if (unmetRequirements.empty()) { return true; } @@ -943,16 +866,10 @@ bool AuthorizationSessionImpl::_isAuthorizedForPrivilege(const Privilege& privil } const auto& user = _authenticatedUser.value(); - for (int i = 0; i < resourceSearchListLength; ++i) { - ActionSet userActions = user->getActionsForResource(resourceSearchList[i]); - unmetRequirements.removeAllActionsFromSet(userActions); - - if (unmetRequirements.empty()) { - return true; - } - } - - return false; + return std::any_of(search.cbegin(), search.cend(), [&](const auto& pattern) { + unmetRequirements.removeAllActionsFromSet(user->getActionsForResource(pattern)); + return unmetRequirements.empty(); + }); } void AuthorizationSessionImpl::setImpersonatedUserData(const UserName& username, @@ -1029,7 +946,8 @@ auto AuthorizationSessionImpl::checkCursorSessionPrivilege( auto authHasImpersonatePrivilege = [authSession = this] { return authSession->isAuthorizedForPrivilege( - Privilege(ResourcePattern::forClusterResource(), ActionType::impersonate)); + Privilege(ResourcePattern::forClusterResource(authSession->getUserTenantId()), + ActionType::impersonate)); }; auto authIsOn = [authSession = this] { @@ -1102,15 +1020,16 @@ void AuthorizationSessionImpl::verifyContract(const AuthorizationContract* contr // "internal" comes from readRequestMetadata and sharded clusters // "advanceClusterTime" is an implicit check in clusters in metadata handling - tempContract.addPrivilege(Privilege(ResourcePattern::forClusterResource(), + tempContract.addPrivilege(Privilege(ResourcePattern::forClusterResource(boost::none), {ActionType::advanceClusterTime, ActionType::internal})); // Implicitly checked often to keep mayBypassWriteBlockingMode() fast - tempContract.addPrivilege(kBypassWriteBlockingModeOnClusterPrivilege); + tempContract.addPrivilege(Privilege(ResourcePattern::forClusterResource(boost::none), + ActionType::bypassWriteBlockingMode)); // Needed for internal sessions started by the server. - tempContract.addPrivilege( - Privilege(ResourcePattern::forClusterResource(), ActionType::issueDirectShardOperations)); + tempContract.addPrivilege(Privilege(ResourcePattern::forClusterResource(boost::none), + ActionType::issueDirectShardOperations)); uassert(5452401, "Authorization Session contains more authorization checks then permitted by contract.", @@ -1132,7 +1051,9 @@ void AuthorizationSessionImpl::_updateInternalAuthorizationState() { // Update cached _mayBypassWriteBlockingMode to reflect current state. _mayBypassWriteBlockingMode = getAuthorizationManager().isAuthEnabled() - ? _isAuthorizedForPrivilege(kBypassWriteBlockingModeOnClusterPrivilege) + ? _isAuthorizedForPrivilege( + Privilege(ResourcePattern::forClusterResource(getUserTenantId()), + ActionType::bypassWriteBlockingMode)) : true; } diff --git a/src/mongo/db/auth/authorization_session_impl.h b/src/mongo/db/auth/authorization_session_impl.h index 7aab611d2c222..b064e537f3d93 100644 --- a/src/mongo/db/auth/authorization_session_impl.h +++ b/src/mongo/db/auth/authorization_session_impl.h @@ -29,18 +29,35 @@ #pragma once +#include +#include #include +#include #include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_contract.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/authz_session_external_state.h" #include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/user.h" #include "mongo/db/auth/user_name.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -88,6 +105,8 @@ class AuthorizationSessionImpl : public AuthorizationSession { boost::optional getAuthenticatedUser() override; + boost::optional getUserTenantId() const override; + boost::optional getAuthenticatedUserName() override; RoleNameIterator getAuthenticatedRoleNames() override; @@ -104,8 +123,7 @@ class AuthorizationSessionImpl : public AuthorizationSession { void grantInternalAuthorization(OperationContext* opCtx) override; - StatusWith checkAuthorizedToListCollections(StringData dbname, - const BSONObj& cmdObj) override; + StatusWith checkAuthorizedToListCollections(const ListCollections&) override; bool isUsingLocalhostBypass() override; @@ -134,7 +152,7 @@ class AuthorizationSessionImpl : public AuthorizationSession { bool isAuthorizedForActionsOnNamespace(const NamespaceString& ns, const ActionSet& actions) override; - bool isAuthorizedForAnyActionOnAnyResourceInDB(StringData dbname) override; + bool isAuthorizedForAnyActionOnAnyResourceInDB(const DatabaseName&) override; bool isAuthorizedForAnyActionOnResource(const ResourcePattern& resource) override; @@ -200,7 +218,6 @@ class AuthorizationSessionImpl : public AuthorizationSession { return std::make_tuple(&_impersonatedUserName, &_impersonatedRoleNames); } - // Generates a vector of default privileges that are granted to any user, // regardless of which roles that user does or does not possess. // If localhost exception is active, the permissions include the ability to create diff --git a/src/mongo/db/auth/authorization_session_test.cpp b/src/mongo/db/auth/authorization_session_test.cpp index cd81c6f1ce34f..827148a0ece0b 100644 --- a/src/mongo/db/auth/authorization_session_test.cpp +++ b/src/mongo/db/auth/authorization_session_test.cpp @@ -27,35 +27,73 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include #include +#include +#include +#include + +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_depth.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" #include "mongo/crypto/mechanism_scram.h" #include "mongo/crypto/sha1_block.h" #include "mongo/crypto/sha256_block.h" +#include "mongo/db/auth/access_checks_gen.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_checks.h" +#include "mongo/db/auth/authorization_contract.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_manager_impl.h" +#include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/authorization_session_for_test.h" +#include "mongo/db/auth/authorization_session_impl.h" #include "mongo/db/auth/authz_manager_external_state_mock.h" #include "mongo/db/auth/authz_session_external_state_mock.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/auth/restriction_environment.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/sasl_options.h" #include "mongo/db/auth/security_token_gen.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/db/auth/user.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/list_collections_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/platform/atomic_word.h" #include "mongo/transport/session.h" #include "mongo/transport/transport_layer_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/sockaddr.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -96,7 +134,7 @@ class AuthorizationSessionTest : public ServiceContextMongoDTest { _opCtx = _client->makeOperationContext(); auto localManagerState = std::make_unique(); managerState = localManagerState.get(); - managerState->setAuthzVersion(AuthorizationManager::schemaVersion26Final); + managerState->setAuthzVersion(_opCtx.get(), AuthorizationManager::schemaVersion26Final); auto uniqueAuthzManager = std::make_unique( getServiceContext(), std::move(localManagerState)); authzManager = uniqueAuthzManager.get(); @@ -136,8 +174,7 @@ class AuthorizationSessionTest : public ServiceContextMongoDTest { return managerState->insert( _opCtx.get(), - NamespaceString::createNamespaceString_forTest( - username.getTenant(), DatabaseName::kAdmin.db(), NamespaceString::kSystemUsers), + NamespaceString::makeTenantUsersCollection(username.getTenant()), userDoc.obj(), {}); } @@ -201,9 +238,14 @@ const NamespaceString testFooNss = NamespaceString::createNamespaceString_forTes const NamespaceString testBarNss = NamespaceString::createNamespaceString_forTest("test.bar"); const NamespaceString testQuxNss = NamespaceString::createNamespaceString_forTest("test.qux"); -const ResourcePattern testDBResource(ResourcePattern::forDatabaseName("test")); -const ResourcePattern otherDBResource(ResourcePattern::forDatabaseName("other")); -const ResourcePattern adminDBResource(ResourcePattern::forDatabaseName("admin")); +const DatabaseName testDB = DatabaseName::createDatabaseName_forTest(boost::none, "test"_sd); +const DatabaseName otherDB = DatabaseName::createDatabaseName_forTest(boost::none, "other"_sd); +const DatabaseName adminDB = DatabaseName::createDatabaseName_forTest(boost::none, "admin"_sd); +const DatabaseName ignoredDB = DatabaseName::createDatabaseName_forTest(boost::none, "ignored"_sd); + +const ResourcePattern testDBResource = ResourcePattern::forDatabaseName(testDB); +const ResourcePattern otherDBResource = ResourcePattern::forDatabaseName(otherDB); +const ResourcePattern adminDBResource = ResourcePattern::forDatabaseName(adminDB); const ResourcePattern testFooCollResource(ResourcePattern::forExactNamespace(testFooNss)); const ResourcePattern testBarCollResource(ResourcePattern::forExactNamespace(testBarNss)); const ResourcePattern testQuxCollResource(ResourcePattern::forExactNamespace(testQuxNss)); @@ -326,7 +368,7 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) { AuthorizationContract ac( std::initializer_list{}, std::initializer_list{ - Privilege(ResourcePattern::forDatabaseName("ignored"), + Privilege(ResourcePattern::forDatabaseName(ignoredDB), {ActionType::insert, ActionType::dbStats}), Privilege(ResourcePattern::forExactNamespace( NamespaceString::createNamespaceString_forTest("ignored.ignored")), @@ -338,7 +380,7 @@ TEST_F(AuthorizationSessionTest, AddUserAndCheckAuthorization) { // Verify against a smaller contract that verifyContract fails AuthorizationContract acMissing(std::initializer_list{}, std::initializer_list{ - Privilege(ResourcePattern::forDatabaseName("ignored"), + Privilege(ResourcePattern::forDatabaseName(ignoredDB), {ActionType::insert, ActionType::dbStats}), }); ASSERT_THROWS_CODE(authzSession->verifyContract(&acMissing), AssertionException, 5452401); @@ -611,7 +653,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateEmptyPipelineWithoutFindAction) auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << BSONArray() << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -623,7 +665,7 @@ TEST_F(AuthorizationSessionTest, CanAggregateEmptyPipelineWithFindAction) { auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << BSONArray() << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -639,7 +681,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateWithoutFindActionIfFirstStageNot auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -653,7 +695,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateWithFindActionIfPipelineContains auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -666,7 +708,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateCollStatsWithoutCollStatsAction) auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -679,7 +721,7 @@ TEST_F(AuthorizationSessionTest, CanAggregateCollStatsWithCollStatsAction) { auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -692,7 +734,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateIndexStatsWithoutIndexStatsActio auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -705,7 +747,7 @@ TEST_F(AuthorizationSessionTest, CanAggregateIndexStatsWithIndexStatsAction) { auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -718,7 +760,7 @@ TEST_F(AuthorizationSessionTest, CanAggregateCurrentOpAllUsersFalseWithoutInprog auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -731,7 +773,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateCurrentOpAllUsersFalseWithoutInp auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, true)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -742,7 +784,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateCurrentOpAllUsersFalseIfNotAuthe auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); ASSERT_FALSE(authzSession->isAuthenticated()); } @@ -751,7 +793,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateCurrentOpAllUsersFalseIfNotAuthe auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, true)); @@ -765,7 +807,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateCurrentOpAllUsersTrueWithoutInpr auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -778,7 +820,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateCurrentOpAllUsersTrueWithoutInpr auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, true)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -786,13 +828,13 @@ TEST_F(AuthorizationSessionTest, CannotAggregateCurrentOpAllUsersTrueWithoutInpr TEST_F(AuthorizationSessionTest, CanAggregateCurrentOpAllUsersTrueWithInprogActionOnMongoD) { authzSession->assumePrivilegesForDB( - Privilege(ResourcePattern::forClusterResource(), ActionType::inprog)); + Privilege(ResourcePattern::forClusterResource(boost::none), ActionType::inprog)); BSONArray pipeline = BSON_ARRAY(BSON("$currentOp" << BSON("allUsers" << true))); auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -800,13 +842,13 @@ TEST_F(AuthorizationSessionTest, CanAggregateCurrentOpAllUsersTrueWithInprogActi TEST_F(AuthorizationSessionTest, CanAggregateCurrentOpAllUsersTrueWithInprogActionOnMongoS) { authzSession->assumePrivilegesForDB( - Privilege(ResourcePattern::forClusterResource(), ActionType::inprog)); + Privilege(ResourcePattern::forClusterResource(boost::none), ActionType::inprog)); BSONArray pipeline = BSON_ARRAY(BSON("$currentOp" << BSON("allUsers" << true))); auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, true)); ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -820,7 +862,7 @@ TEST_F(AuthorizationSessionTest, CannotSpoofAllUsersTrueWithoutInprogActionOnMon auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -834,7 +876,7 @@ TEST_F(AuthorizationSessionTest, CannotSpoofAllUsersTrueWithoutInprogActionOnMon auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, true)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -848,7 +890,7 @@ TEST_F(AuthorizationSessionTest, AddPrivilegesForStageFailsIfOutNamespaceIsNotVa auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); ASSERT_THROWS_CODE( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false), AssertionException, @@ -863,7 +905,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateOutWithoutInsertAndRemoveOnTarge auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -888,7 +930,7 @@ TEST_F(AuthorizationSessionTest, CanAggregateOutWithInsertAndRemoveOnTargetNames auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -898,7 +940,7 @@ TEST_F(AuthorizationSessionTest, CanAggregateOutWithInsertAndRemoveOnTargetNames testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "bypassDocumentValidation" << false << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); privileges = uassertStatusOK(auth::getPrivilegesForAggregate( authzSession.get(), testFooNss, aggNoBypassDocumentValidationReq, false)); @@ -915,7 +957,8 @@ TEST_F(AuthorizationSessionTest, auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "bypassDocumentValidation" << true << "$db" << testFooNss.db()))); + << "bypassDocumentValidation" << true << "$db" + << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -933,7 +976,8 @@ TEST_F(AuthorizationSessionTest, auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "bypassDocumentValidation" << true << "$db" << testFooNss.db()))); + << "bypassDocumentValidation" << true << "$db" + << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, true)); ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -946,7 +990,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateLookupWithoutFindOnJoinedNamespa auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -960,7 +1004,7 @@ TEST_F(AuthorizationSessionTest, CanAggregateLookupWithFindOnJoinedNamespace) { auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, true)); ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -977,7 +1021,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateLookupWithoutFindOnNestedJoinedN auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -994,7 +1038,7 @@ TEST_F(AuthorizationSessionTest, CanAggregateLookupWithFindOnNestedJoinedNamespa auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -1036,7 +1080,7 @@ TEST_F(AuthorizationSessionTest, CheckAuthForAggregateWithDeeplyNestedLookup) { BSONArrayBuilder pipelineBuilder(cmdBuilder.subarrayStart("pipeline")); addNestedPipeline(&pipelineBuilder, maxLookupDepth); pipelineBuilder.doneFast(); - cmdBuilder << "cursor" << BSONObj() << "$db" << testFooNss.db(); + cmdBuilder << "cursor" << BSONObj() << "$db" << testFooNss.db_forTest(); auto aggReq = uassertStatusOK( aggregation_request_helper::parseFromBSONForTests(testFooNss, cmdBuilder.obj())); @@ -1053,7 +1097,7 @@ TEST_F(AuthorizationSessionTest, CannotAggregateGraphLookupWithoutFindOnJoinedNa auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -1067,7 +1111,7 @@ TEST_F(AuthorizationSessionTest, CanAggregateGraphLookupWithFindOnJoinedNamespac auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -1084,7 +1128,7 @@ TEST_F(AuthorizationSessionTest, auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, false)); ASSERT_FALSE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -1113,7 +1157,7 @@ TEST_F(AuthorizationSessionTest, auto aggReq = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests( testFooNss, BSON("aggregate" << testFooNss.coll() << "pipeline" << pipeline << "cursor" << BSONObj() - << "$db" << testFooNss.db()))); + << "$db" << testFooNss.db_forTest()))); PrivilegeVector privileges = uassertStatusOK( auth::getPrivilegesForAggregate(authzSession.get(), testFooNss, aggReq, true)); ASSERT_TRUE(authzSession->isAuthorizedForPrivileges(privileges)); @@ -1147,77 +1191,87 @@ TEST_F(AuthorizationSessionTest, AuthorizedSessionIsCoauthorizedNobodyWhenAuthIs authzSession->logoutDatabase(_client.get(), "test", "Kill the test!"); } +const auto listTestCollectionsPayload = BSON("listCollections"_sd << 1 << "$db" + << "test"_sd); +const auto listTestCollectionsCmd = + ListCollections::parse(IDLParserContext("listTestCollectionsCmd"), listTestCollectionsPayload); +const auto listOtherCollectionsPayload = BSON("listCollections"_sd << 1 << "$db" + << "other"_sd); +const auto listOtherCollectionsCmd = ListCollections::parse( + IDLParserContext("listOtherCollectionsCmd"), listOtherCollectionsPayload); +const auto listOwnTestCollectionsPayload = + BSON("listCollections"_sd << 1 << "$db" + << "test"_sd + << "nameOnly"_sd << true << "authorizedCollections"_sd << true); +const auto listOwnTestCollectionsCmd = ListCollections::parse( + IDLParserContext("listOwnTestCollectionsCmd"), listOwnTestCollectionsPayload); + TEST_F(AuthorizationSessionTest, CannotListCollectionsWithoutListCollectionsPrivilege) { - BSONObj cmd = BSON("listCollections" << 1); - // With no privileges, there is not authorization to list collections - ASSERT_EQ(ErrorCodes::Unauthorized, - authzSession->checkAuthorizedToListCollections(testFooNss.db(), cmd).getStatus()); + // With no privileges, there is no authorization to list collections ASSERT_EQ(ErrorCodes::Unauthorized, - authzSession->checkAuthorizedToListCollections(testBarNss.db(), cmd).getStatus()); + authzSession->checkAuthorizedToListCollections(listTestCollectionsCmd).getStatus()); ASSERT_EQ(ErrorCodes::Unauthorized, - authzSession->checkAuthorizedToListCollections(testQuxNss.db(), cmd).getStatus()); + authzSession->checkAuthorizedToListCollections(listOtherCollectionsCmd).getStatus()); } TEST_F(AuthorizationSessionTest, CanListCollectionsWithListCollectionsPrivilege) { - BSONObj cmd = BSON("listCollections" << 1); - // The listCollections privilege authorizes the list collections command. + // The listCollections privilege authorizes the list collections command on the named database + // only. authzSession->assumePrivilegesForDB(Privilege(testDBResource, ActionType::listCollections)); - ASSERT_OK(authzSession->checkAuthorizedToListCollections(testFooNss.db(), cmd).getStatus()); - ASSERT_OK(authzSession->checkAuthorizedToListCollections(testBarNss.db(), cmd).getStatus()); - ASSERT_OK(authzSession->checkAuthorizedToListCollections(testQuxNss.db(), cmd).getStatus()); + // "test" DB is okay. + ASSERT_OK(authzSession->checkAuthorizedToListCollections(listTestCollectionsCmd).getStatus()); + + // "other" DB is not. + ASSERT_EQ(ErrorCodes::Unauthorized, + authzSession->checkAuthorizedToListCollections(listOtherCollectionsCmd).getStatus()); } TEST_F(AuthorizationSessionTest, CanListOwnCollectionsWithPrivilege) { - BSONObj cmd = - BSON("listCollections" << 1 << "nameOnly" << true << "authorizedCollections" << true); - // The listCollections privilege authorizes the list collections command. + // Any privilege on a DB implies authorization to list one's own collections. authzSession->assumePrivilegesForDB(Privilege(testFooCollResource, ActionType::find)); - ASSERT_OK(authzSession->checkAuthorizedToListCollections(testFooNss.db(), cmd).getStatus()); - ASSERT_OK(authzSession->checkAuthorizedToListCollections(testBarNss.db(), cmd).getStatus()); - ASSERT_OK(authzSession->checkAuthorizedToListCollections(testQuxNss.db(), cmd).getStatus()); + // Just own collections is okay. + ASSERT_OK( + authzSession->checkAuthorizedToListCollections(listOwnTestCollectionsCmd).getStatus()); + // All collections is not. ASSERT_EQ(ErrorCodes::Unauthorized, - authzSession->checkAuthorizedToListCollections("other", cmd).getStatus()); + authzSession->checkAuthorizedToListCollections(listTestCollectionsCmd).getStatus()); } +const auto kAnyResource = ResourcePattern::forAnyResource(boost::none); +const auto kAnyNormalResource = ResourcePattern::forAnyNormalResource(boost::none); + TEST_F(AuthorizationSessionTest, CanCheckIfHasAnyPrivilegeOnResource) { ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource(testFooCollResource)); // If we have a collection privilege, we have actions on that collection authzSession->assumePrivilegesForDB(Privilege(testFooCollResource, ActionType::find)); ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnResource(testFooCollResource)); - ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource( - ResourcePattern::forDatabaseName(testFooNss.db()))); ASSERT_FALSE( - authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forAnyNormalResource())); - ASSERT_FALSE( - authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forAnyResource())); + authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forDatabaseName(testDB))); + ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource(kAnyNormalResource)); + ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource(kAnyResource)); // If we have a database privilege, we have actions on that database and all collections it // contains authzSession->assumePrivilegesForDB( - Privilege(ResourcePattern::forDatabaseName(testFooNss.db()), ActionType::find)); + Privilege(ResourcePattern::forDatabaseName(testDB), ActionType::find)); ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnResource(testFooCollResource)); - ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnResource( - ResourcePattern::forDatabaseName(testFooNss.db()))); - ASSERT_FALSE( - authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forAnyNormalResource())); - ASSERT_FALSE( - authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forAnyResource())); + ASSERT_TRUE( + authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forDatabaseName(testDB))); + ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource(kAnyNormalResource)); + ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource(kAnyResource)); // If we have a privilege on anyNormalResource, we have actions on all databases and all // collections they contain - authzSession->assumePrivilegesForDB( - Privilege(ResourcePattern::forAnyNormalResource(), ActionType::find)); + authzSession->assumePrivilegesForDB(Privilege(kAnyNormalResource, ActionType::find)); ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnResource(testFooCollResource)); - ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnResource( - ResourcePattern::forDatabaseName(testFooNss.db()))); ASSERT_TRUE( - authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forAnyNormalResource())); - ASSERT_FALSE( - authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forAnyResource())); + authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forDatabaseName(testDB))); + ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnResource(kAnyNormalResource)); + ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource(kAnyResource)); } TEST_F(AuthorizationSessionTest, CanUseUUIDNamespacesWithPrivilege) { @@ -1239,7 +1293,7 @@ TEST_F(AuthorizationSessionTest, CanUseUUIDNamespacesWithPrivilege) { // The useUUID privilege allows UUIDs to be parsed authzSession->assumePrivilegesForDB( - Privilege(ResourcePattern::forClusterResource(), ActionType::useUUID)); + Privilege(ResourcePattern::forClusterResource(boost::none), ActionType::useUUID)); ASSERT_TRUE(authzSession->isAuthorizedToParseNamespaceElement(stringObj.firstElement())); ASSERT_TRUE(authzSession->isAuthorizedToParseNamespaceElement(uuidObj.firstElement())); @@ -1252,7 +1306,7 @@ TEST_F(AuthorizationSessionTest, CanUseUUIDNamespacesWithPrivilege) { std::initializer_list{ AccessCheckEnum::kIsAuthorizedToParseNamespaceElement}, std::initializer_list{ - Privilege(ResourcePattern::forClusterResource(), ActionType::useUUID)}); + Privilege(ResourcePattern::forClusterResource(boost::none), ActionType::useUUID)}); authzSession->verifyContract(&ac); } @@ -1395,23 +1449,19 @@ TEST_F(AuthorizationSessionTest, ExpiredSessionWithReauth) { ActionType::insert); } -/** - * TODO (SERVER-75289): This test was disabled by SERVER-69653, which added a privilege action - * called 'configureQueryAnalyzer' to the 'dbAdmin' role but not to the list for Serverless since - * the action is not meant to be supported in multitenant configurations. This has caused this unit - * test to fail. - */ -/**** + TEST_F(AuthorizationSessionTest, ExpirationWithSecurityTokenNOK) { + const auto kTenantOID = OID::gen(); + const TenantId kTenantId(kTenantOID); + // Tests authorization flow from unauthenticated to active (via token) to unauthenticated to // active (via stateful connection) to unauthenticated. using VTS = auth::ValidatedTenancyScope; // Create and authorize a security token user. constexpr auto authUserFieldName = auth::SecurityToken::kAuthenticatedUserFieldName; - auto kOid = OID::gen(); - auto body = BSON("ping" << 1 << "$tenant" << kOid); - const UserName user("spencer", "test", TenantId(kOid)); + auto body = BSON("ping" << 1 << "$tenant" << kTenantOID); + const UserName user("spencer", "test", kTenantId); const UserRequest userRequest(user, boost::none); const UserName adminUser("admin", "admin"); const UserRequest adminUserRequest(adminUser, boost::none); @@ -1419,8 +1469,9 @@ TEST_F(AuthorizationSessionTest, ExpirationWithSecurityTokenNOK) { ASSERT_OK(createUser(user, {{"readWrite", "test"}, {"dbAdmin", "test"}})); ASSERT_OK(createUser(adminUser, {{"readWriteAnyDatabase", "admin"}})); - VTS validatedTenancyScope = VTS(BSON(authUserFieldName << user.toBSON(true / * encodeTenant * -/)), VTS::TokenForTestingTag{}); VTS::set(_opCtx.get(), validatedTenancyScope); + VTS validatedTenancyScope = VTS(BSON(authUserFieldName << user.toBSON(true /* encodeTenant */)), + VTS::TokenForTestingTag{}); + VTS::set(_opCtx.get(), validatedTenancyScope); // Make sure that security token users can't be authorized with an expiration date. Date_t expirationTime = clockSource()->now() + Hours(1); @@ -1428,7 +1479,13 @@ TEST_F(AuthorizationSessionTest, ExpirationWithSecurityTokenNOK) { ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), userRequest, boost::none)); // Assert that the session is authenticated and authorized as expected. - assertSecurityToken(testFooCollResource, ActionType::insert); + const auto kFooCollNss = + NamespaceString::createNamespaceString_forTest(kTenantId, "test"_sd, "foo"_sd); + const auto kFooCollRsrc = ResourcePattern::forExactNamespace(kFooCollNss); + assertSecurityToken(kFooCollRsrc, ActionType::insert); + + // TODO (SERVER-76195) Remove legacy non-tenant aware APIs from ResourcePattern + // Add additional tests for cross-tenancy authorizations. // Assert that another user can't be authorized while the security token is auth'd. ASSERT_NOT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), adminUserRequest, boost::none)); @@ -1441,24 +1498,21 @@ TEST_F(AuthorizationSessionTest, ExpirationWithSecurityTokenNOK) { // Assert that a connection-based user with an expiration policy can be authorized after token // logout. + const auto kSomeCollNss = NamespaceString::createNamespaceString_forTest( + boost::none, "anydb"_sd, "somecollection"_sd); + const auto kSomeCollRsrc = ResourcePattern::forExactNamespace(kSomeCollNss); ASSERT_OK(authzSession->addAndAuthorizeUser(_opCtx.get(), adminUserRequest, expirationTime)); - assertActive(ResourcePattern::forExactNamespace( - NamespaceString::createNamespaceString_forTest("anydb.somecollection")), - ActionType::insert); + assertActive(kSomeCollRsrc, ActionType::insert); // Check that logout proceeds normally. authzSession->logoutDatabase(_client.get(), "admin", "Kill the test!"); - assertLogout(ResourcePattern::forExactNamespace( - NamespaceString::createNamespaceString_forTest("anydb.somecollection")), - ActionType::insert); + assertLogout(kSomeCollRsrc, ActionType::insert); } -****/ class SystemBucketsTest : public AuthorizationSessionTest { protected: - static constexpr auto sb_db_test = "sb_db_test"_sd; - static constexpr auto sb_db_other = "sb_db_other"_sd; - static constexpr auto sb_coll_test = "sb_coll_test"_sd; + static const DatabaseName sb_db_test; + static const DatabaseName sb_db_other; static const ResourcePattern testMissingSystemBucketResource; static const ResourcePattern otherMissingSystemBucketResource; @@ -1471,8 +1525,15 @@ class SystemBucketsTest : public AuthorizationSessionTest { static const ResourcePattern testBucketResource; static const ResourcePattern otherBucketResource; static const ResourcePattern otherDbBucketResource; + + static const ResourcePattern sbCollTestInAnyDB; }; +const DatabaseName SystemBucketsTest::sb_db_test = + DatabaseName::createDatabaseName_forTest(boost::none, "sb_db_test"_sd); +const DatabaseName SystemBucketsTest::sb_db_other = + DatabaseName::createDatabaseName_forTest(boost::none, "sb_db_other"_sd); + const ResourcePattern SystemBucketsTest::testMissingSystemBucketResource( ResourcePattern::forExactNamespace( NamespaceString::createNamespaceString_forTest("sb_db_test.sb_coll_test"))); @@ -1494,11 +1555,17 @@ const ResourcePattern SystemBucketsTest::otherDbSystemBucketResource( NamespaceString::createNamespaceString_forTest("sb_db_other.system.buckets.sb_coll_test"))); const ResourcePattern SystemBucketsTest::testBucketResource( - ResourcePattern::forExactSystemBucketsCollection("sb_db_test", "sb_coll_test")); + ResourcePattern::forExactSystemBucketsCollection(NamespaceString::createNamespaceString_forTest( + boost::none /* tenantId */, "sb_db_test"_sd, "sb_coll_test"_sd))); const ResourcePattern SystemBucketsTest::otherBucketResource( - ResourcePattern::forExactSystemBucketsCollection("sb_db_test", "sb_coll_other")); + ResourcePattern::forExactSystemBucketsCollection(NamespaceString::createNamespaceString_forTest( + boost::none /* tenantId */, "sb_db_test"_sd, "sb_coll_other"_sd))); const ResourcePattern SystemBucketsTest::otherDbBucketResource( - ResourcePattern::forExactSystemBucketsCollection("sb_db_other", "sb_coll_test")); + ResourcePattern::forExactSystemBucketsCollection(NamespaceString::createNamespaceString_forTest( + boost::none /* tenantId */, "sb_db_other"_sd, "sb_coll_test"_sd))); + +const ResourcePattern SystemBucketsTest::sbCollTestInAnyDB( + ResourcePattern::forAnySystemBucketsInAnyDatabase(boost::none, "sb_coll_test"_sd)); TEST_F(SystemBucketsTest, CheckExactSystemBucketsCollection) { // If we have a system_buckets exact priv @@ -1527,7 +1594,7 @@ TEST_F(SystemBucketsTest, CheckExactSystemBucketsCollection) { TEST_F(SystemBucketsTest, CheckAnySystemBuckets) { // If we have an any system_buckets priv authzSession->assumePrivilegesForDB( - Privilege(ResourcePattern::forAnySystemBuckets(), ActionType::find)); + Privilege(ResourcePattern::forAnySystemBuckets(boost::none), ActionType::find)); ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(testSystemBucketResource, ActionType::insert)); @@ -1552,7 +1619,7 @@ TEST_F(SystemBucketsTest, CheckAnySystemBuckets) { TEST_F(SystemBucketsTest, CheckAnySystemBucketsInDatabase) { // If we have a system_buckets in a db priv authzSession->assumePrivilegesForDB( - Privilege(ResourcePattern::forAnySystemBucketsInDatabase("sb_db_test"), ActionType::find)); + Privilege(ResourcePattern::forAnySystemBucketsInDatabase(sb_db_test), ActionType::find)); ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(testSystemBucketResource, ActionType::insert)); @@ -1576,8 +1643,7 @@ TEST_F(SystemBucketsTest, CheckAnySystemBucketsInDatabase) { TEST_F(SystemBucketsTest, CheckforAnySystemBucketsInAnyDatabase) { // If we have a system_buckets for a coll in any db priv - authzSession->assumePrivilegesForDB(Privilege( - ResourcePattern::forAnySystemBucketsInAnyDatabase("sb_coll_test"), ActionType::find)); + authzSession->assumePrivilegesForDB(Privilege(sbCollTestInAnyDB, ActionType::find)); ASSERT_FALSE(authzSession->isAuthorizedForActionsOnResource(testSystemBucketResource, @@ -1606,10 +1672,8 @@ TEST_F(SystemBucketsTest, CanCheckIfHasAnyPrivilegeOnResourceForSystemBuckets) { ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnResource(testSystemBucketResource)); ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource( ResourcePattern::forDatabaseName(sb_db_test))); - ASSERT_FALSE( - authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forAnyNormalResource())); - ASSERT_FALSE( - authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forAnyResource())); + ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource(kAnyNormalResource)); + ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource(kAnyResource)); // If we have any buckets in a database privilege, we have actions on that database and all // system.buckets collections it contains @@ -1620,22 +1684,17 @@ TEST_F(SystemBucketsTest, CanCheckIfHasAnyPrivilegeOnResourceForSystemBuckets) { ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnResource(testSystemBucketResource)); ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource( ResourcePattern::forDatabaseName(sb_db_test))); - ASSERT_FALSE( - authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forAnyNormalResource())); - ASSERT_FALSE( - authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forAnyResource())); + ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource(kAnyNormalResource)); + ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource(kAnyResource)); // If we have a privilege on any systems buckets in any db, we have actions on all databases and // system.buckets. they contain - authzSession->assumePrivilegesForDB(Privilege( - ResourcePattern::forAnySystemBucketsInAnyDatabase(sb_coll_test), ActionType::find)); + authzSession->assumePrivilegesForDB(Privilege(sbCollTestInAnyDB, ActionType::find)); ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnResource(testSystemBucketResource)); ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource( ResourcePattern::forDatabaseName(sb_db_test))); - ASSERT_FALSE( - authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forAnyNormalResource())); - ASSERT_FALSE( - authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forAnyResource())); + ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource(kAnyNormalResource)); + ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnResource(kAnyResource)); } TEST_F(SystemBucketsTest, CheckBuiltinRolesForSystemBuckets) { @@ -1701,13 +1760,12 @@ TEST_F(SystemBucketsTest, CanCheckIfHasAnyPrivilegeInResourceDBForSystemBuckets) ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnAnyResourceInDB(sb_db_test)); ASSERT_FALSE(authzSession->isAuthorizedForAnyActionOnAnyResourceInDB(sb_db_other)); - authzSession->assumePrivilegesForDB(Privilege( - ResourcePattern::forAnySystemBucketsInAnyDatabase(sb_coll_test), ActionType::find)); + authzSession->assumePrivilegesForDB(Privilege(sbCollTestInAnyDB, ActionType::find)); ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnAnyResourceInDB(sb_db_test)); ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnAnyResourceInDB(sb_db_other)); authzSession->assumePrivilegesForDB( - Privilege(ResourcePattern::forAnySystemBuckets(), ActionType::find)); + Privilege(ResourcePattern::forAnySystemBuckets(boost::none), ActionType::find)); ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnAnyResourceInDB(sb_db_test)); ASSERT_TRUE(authzSession->isAuthorizedForAnyActionOnAnyResourceInDB(sb_db_other)); } diff --git a/src/mongo/db/auth/authz_manager_external_state.cpp b/src/mongo/db/auth/authz_manager_external_state.cpp index ee8fc2b7118fb..d2378b1629a5d 100644 --- a/src/mongo/db/auth/authz_manager_external_state.cpp +++ b/src/mongo/db/auth/authz_manager_external_state.cpp @@ -27,14 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/shim.h" -#include "mongo/config.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/authz_manager_external_state.h" -#include "mongo/db/auth/user_name.h" -#include "mongo/db/operation_context.h" -#include "mongo/util/net/ssl_types.h" +#include "mongo/util/assert_util_core.h" namespace mongo { namespace { diff --git a/src/mongo/db/auth/authz_manager_external_state.h b/src/mongo/db/auth/authz_manager_external_state.h index 2702d2cde3686..c6f91114a5853 100644 --- a/src/mongo/db/auth/authz_manager_external_state.h +++ b/src/mongo/db/auth/authz_manager_external_state.h @@ -29,12 +29,16 @@ #pragma once +#include #include #include #include #include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_manager_impl.h" #include "mongo/db/auth/privilege.h" @@ -44,6 +48,10 @@ #include "mongo/db/auth/user_name.h" #include "mongo/db/database_name.h" #include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { diff --git a/src/mongo/db/auth/authz_manager_external_state_d.cpp b/src/mongo/db/auth/authz_manager_external_state_d.cpp index c7404c0b58bc7..3e9a04e8ac19b 100644 --- a/src/mongo/db/auth/authz_manager_external_state_d.cpp +++ b/src/mongo/db/auth/authz_manager_external_state_d.cpp @@ -27,24 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/authz_manager_external_state_d.h" - #include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/shim.h" #include "mongo/base/status.h" +#include "mongo/db/auth/authz_manager_external_state_d.h" +#include "mongo/db/auth/authz_session_external_state.h" #include "mongo/db/auth/authz_session_external_state_d.h" -#include "mongo/db/auth/user_name.h" -#include "mongo/db/client.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/jsobj.h" #include "mongo/db/operation_context.h" -#include "mongo/db/service_context.h" -#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/record_id.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" @@ -87,7 +85,8 @@ Status AuthzManagerExternalStateMongod::findOne(OperationContext* opCtx, return Status::OK(); } return {ErrorCodes::NoMatchingDocument, - str::stream() << "No document in " << nss.ns() << " matches " << query}; + str::stream() << "No document in " << nss.toStringForErrorMsg() << " matches " + << query}; } bool AuthzManagerExternalStateMongod::hasOne(OperationContext* opCtx, diff --git a/src/mongo/db/auth/authz_manager_external_state_d.h b/src/mongo/db/auth/authz_manager_external_state_d.h index 62bd7a08fcebd..3a35f394ea591 100644 --- a/src/mongo/db/auth/authz_manager_external_state_d.h +++ b/src/mongo/db/auth/authz_manager_external_state_d.h @@ -30,12 +30,18 @@ #pragma once #include +#include #include #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/auth/authz_manager_external_state.h" #include "mongo/db/auth/authz_manager_external_state_local.h" #include "mongo/db/auth/builtin_roles.h" #include "mongo/db/auth/user_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" namespace mongo { diff --git a/src/mongo/db/auth/authz_manager_external_state_local.cpp b/src/mongo/db/auth/authz_manager_external_state_local.cpp index f799c2fb1baa8..564011bfc0b20 100644 --- a/src/mongo/db/auth/authz_manager_external_state_local.cpp +++ b/src/mongo/db/auth/authz_manager_external_state_local.cpp @@ -28,27 +28,48 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/authz_manager_external_state_local.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/auth/address_restriction.h" -#include "mongo/db/auth/auth_options_gen.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/auth_types_gen.h" -#include "mongo/db/auth/privilege_parser.h" +#include "mongo/db/auth/authz_manager_external_state_local.h" +#include "mongo/db/auth/builtin_roles.h" +#include "mongo/db/auth/parsed_privilege_gen.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/restriction_set.h" #include "mongo/db/auth/user_document_parser.h" -#include "mongo/db/multitenancy.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/operation_context.h" -#include "mongo/db/server_options.h" -#include "mongo/db/storage/snapshot_manager.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #include "mongo/util/duration.h" #include "mongo/util/fail_point.h" -#include "mongo/util/net/ssl_types.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl @@ -461,6 +482,7 @@ StatusWith AuthzManagerExternalStateLocal::resolveRoles( const bool processPrivs = option & ResolveRoleOption::kPrivileges; const bool processRests = option & ResolveRoleOption::kRestrictions; const bool walkIndirect = (option & ResolveRoleOption::kDirectOnly) == 0; + IDLParserContext idlctx("resolveRoles"); RoleNameSet inheritedRoles; PrivilegeVector inheritedPrivileges; @@ -520,8 +542,15 @@ StatusWith AuthzManagerExternalStateLocal::resolveRoles( << "Invalid 'privileges' field in role document '" << role << "'"}; } for (const auto& privElem : elem.Obj()) { - auto priv = Privilege::fromBSON(privElem); - Privilege::addPrivilegeToPrivilegeVector(&inheritedPrivileges, priv); + if (privElem.type() != Object) { + return {ErrorCodes::UnsupportedFormat, + "Expected privilege document as object, got {}"_format( + typeName(privElem.type()))}; + } + auto pp = auth::ParsedPrivilege::parse(idlctx, privElem.Obj()); + Privilege::addPrivilegeToPrivilegeVector( + &inheritedPrivileges, + Privilege::resolvePrivilegeWithTenant(role.getTenant(), pp)); } } @@ -752,7 +781,7 @@ class AuthzCollection { // invalidators will purge cache on a per-tenant basis as needed. auto db = nss.dbName(); auto coll = nss.coll(); - if (db.db() != DatabaseName::kAdmin.db()) { + if (!db.isAdminDB()) { return; } diff --git a/src/mongo/db/auth/authz_manager_external_state_local.h b/src/mongo/db/auth/authz_manager_external_state_local.h index c93641176d280..5bece24f8a15a 100644 --- a/src/mongo/db/auth/authz_manager_external_state_local.h +++ b/src/mongo/db/auth/authz_manager_external_state_local.h @@ -29,16 +29,31 @@ #pragma once +#include #include +#include #include +#include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/auth/authorization_manager_impl.h" #include "mongo/db/auth/authz_manager_external_state.h" #include "mongo/db/auth/builtin_roles.h" +#include "mongo/db/auth/privilege_format.h" #include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/user.h" #include "mongo/db/auth/user_name.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/tenant_id.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" namespace mongo { diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.cpp b/src/mongo/db/auth/authz_manager_external_state_mock.cpp index d5e6b41552294..7c01324925dc7 100644 --- a/src/mongo/db/auth/authz_manager_external_state_mock.cpp +++ b/src/mongo/db/auth/authz_manager_external_state_mock.cpp @@ -27,29 +27,45 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/auth/authz_manager_external_state_mock.h" -#include #include +#include + +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/shim.h" #include "mongo/base/status.h" -#include "mongo/bson/mutable/algorithm.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/mutable/document.h" #include "mongo/bson/mutable/element.h" +#include "mongo/bson/oid.h" +#include "mongo/db/auth/authz_session_external_state.h" #include "mongo/db/auth/authz_session_external_state_mock.h" -#include "mongo/db/auth/privilege_parser.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/role_name.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/field_ref_set.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/expression_with_placeholder.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/update/update_driver.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/safe_num.h" namespace mongo { - namespace { std::unique_ptr authzManagerExternalStateCreateImpl() { @@ -75,18 +91,15 @@ void addRoleNameObjectsToArrayElement(mutablebson::Element array, RoleNameIterat void addPrivilegeObjectsOrWarningsToArrayElement(mutablebson::Element privilegesElement, mutablebson::Element warningsElement, const PrivilegeVector& privileges) { - std::string errmsg; - for (size_t i = 0; i < privileges.size(); ++i) { - ParsedPrivilege pp; - if (ParsedPrivilege::privilegeToParsedPrivilege(privileges[i], &pp, &errmsg)) { - fassert(17178, privilegesElement.appendObject("", pp.toBSON())); - } else { + for (const auto& privilege : privileges) { + try { + fassert(17178, privilegesElement.appendObject("", privilege.toBSON())); + } catch (const DBException& ex) { fassert(17179, warningsElement.appendString( "", - std::string(str::stream() << "Skipped privileges on resource " - << privileges[i].getResourcePattern().toString() - << ". Reason: " << errmsg))); + "Skipped privileges on resource {}. Reason: {}"_format( + privilege.getResourcePattern().toString(), ex.what()))); } } } @@ -99,10 +112,9 @@ void AuthzManagerExternalStateMock::setAuthorizationManager(AuthorizationManager _authzManager = authzManager; } -void AuthzManagerExternalStateMock::setAuthzVersion(int version) { - OperationContextNoop opCtx; +void AuthzManagerExternalStateMock::setAuthzVersion(OperationContext* opCtx, int version) { uassertStatusOK( - updateOne(&opCtx, + updateOne(opCtx, NamespaceString::kServerConfigurationNamespace, AuthorizationManager::versionDocumentQuery, BSON("$set" << BSON(AuthorizationManager::schemaVersionFieldName << version)), diff --git a/src/mongo/db/auth/authz_manager_external_state_mock.h b/src/mongo/db/auth/authz_manager_external_state_mock.h index 0eb82b74e4a1e..10ce9fca0685a 100644 --- a/src/mongo/db/auth/authz_manager_external_state_mock.h +++ b/src/mongo/db/auth/authz_manager_external_state_mock.h @@ -29,16 +29,23 @@ #pragma once +#include #include #include +#include #include #include #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/auth/authz_manager_external_state.h" #include "mongo/db/auth/authz_manager_external_state_local.h" #include "mongo/db/auth/builtin_roles.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/tenant_id.h" namespace mongo { @@ -56,7 +63,7 @@ class AuthzManagerExternalStateMock : public AuthzManagerExternalStateLocal { virtual ~AuthzManagerExternalStateMock(); void setAuthorizationManager(AuthorizationManager* authzManager); - void setAuthzVersion(int version); + void setAuthzVersion(OperationContext* opCtx, int version); std::unique_ptr makeAuthzSessionExternalState( AuthorizationManager* authzManager) override; diff --git a/src/mongo/db/auth/authz_manager_external_state_s.cpp b/src/mongo/db/auth/authz_manager_external_state_s.cpp index 4f415fd724725..4df96d67920e1 100644 --- a/src/mongo/db/auth/authz_manager_external_state_s.cpp +++ b/src/mongo/db/auth/authz_manager_external_state_s.cpp @@ -27,23 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/authz_manager_external_state_s.h" - +#include +#include +#include +#include +#include #include +#include #include +#include + #include "mongo/base/shim.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/auth/auth_name.h" +#include "mongo/db/auth/authz_manager_external_state_s.h" +#include "mongo/db/auth/authz_session_external_state.h" #include "mongo/db/auth/authz_session_external_state_s.h" #include "mongo/db/auth/user_document_parser.h" -#include "mongo/db/auth/user_management_commands_parser.h" #include "mongo/db/auth/user_name.h" #include "mongo/db/multitenancy.h" #include "mongo/db/operation_context.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/grid.h" -#include "mongo/util/net/ssl_types.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/auth/authz_manager_external_state_s.h b/src/mongo/db/auth/authz_manager_external_state_s.h index 5789b46433737..5c05f171fb34e 100644 --- a/src/mongo/db/auth/authz_manager_external_state_s.h +++ b/src/mongo/db/auth/authz_manager_external_state_s.h @@ -29,14 +29,25 @@ #pragma once +#include #include #include #include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authz_manager_external_state.h" #include "mongo/db/auth/privilege_format.h" +#include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/user.h" #include "mongo/db/auth/user_name.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/tenant_id.h" namespace mongo { diff --git a/src/mongo/db/auth/authz_session_external_state.cpp b/src/mongo/db/auth/authz_session_external_state.cpp index 73a3db66a5e01..ccc474fa51413 100644 --- a/src/mongo/db/auth/authz_session_external_state.cpp +++ b/src/mongo/db/auth/authz_session_external_state.cpp @@ -27,13 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/authz_session_external_state.h" +#include #include "mongo/base/shim.h" -#include "mongo/base/status.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/auth/authz_session_external_state.h" namespace mongo { diff --git a/src/mongo/db/auth/authz_session_external_state.h b/src/mongo/db/auth/authz_session_external_state.h index d6af2dbfc4ebb..e8b9ffa6fb3c7 100644 --- a/src/mongo/db/auth/authz_session_external_state.h +++ b/src/mongo/db/auth/authz_session_external_state.h @@ -29,11 +29,13 @@ #pragma once +#include #include #include "mongo/base/status.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/user_name.h" +#include "mongo/db/operation_context.h" namespace mongo { diff --git a/src/mongo/db/auth/authz_session_external_state_d.cpp b/src/mongo/db/auth/authz_session_external_state_d.cpp index 37246f91054b0..bad99e954fc11 100644 --- a/src/mongo/db/auth/authz_session_external_state_d.cpp +++ b/src/mongo/db/auth/authz_session_external_state_d.cpp @@ -27,17 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/auth/authz_session_external_state_d.h" +#include #include "mongo/base/shim.h" -#include "mongo/base/status.h" +#include "mongo/db/auth/authz_session_external_state.h" +#include "mongo/db/auth/authz_session_external_state_d.h" #include "mongo/db/client.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/scripting/engine.h" +#include "mongo/db/service_context.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/auth/authz_session_external_state_d.h b/src/mongo/db/auth/authz_session_external_state_d.h index 1b29c027b97d8..85047c31cc88e 100644 --- a/src/mongo/db/auth/authz_session_external_state_d.h +++ b/src/mongo/db/auth/authz_session_external_state_d.h @@ -30,7 +30,9 @@ #pragma once #include "mongo/base/status.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authz_session_external_state_server_common.h" +#include "mongo/db/operation_context.h" namespace mongo { diff --git a/src/mongo/db/auth/authz_session_external_state_mock.cpp b/src/mongo/db/auth/authz_session_external_state_mock.cpp index fa9c141d2306b..9d1c711143bcc 100644 --- a/src/mongo/db/auth/authz_session_external_state_mock.cpp +++ b/src/mongo/db/auth/authz_session_external_state_mock.cpp @@ -28,6 +28,10 @@ */ #include "mongo/db/auth/authz_session_external_state_mock.h" + +#include +#include + #include "mongo/base/shim.h" namespace mongo { diff --git a/src/mongo/db/auth/authz_session_external_state_mock.h b/src/mongo/db/auth/authz_session_external_state_mock.h index ade50f65f98b5..9baf148cb3a4e 100644 --- a/src/mongo/db/auth/authz_session_external_state_mock.h +++ b/src/mongo/db/auth/authz_session_external_state_mock.h @@ -30,7 +30,9 @@ #pragma once #include "mongo/base/status.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authz_session_external_state.h" +#include "mongo/db/operation_context.h" namespace mongo { diff --git a/src/mongo/db/auth/authz_session_external_state_s.cpp b/src/mongo/db/auth/authz_session_external_state_s.cpp index 2313564224c80..6857f7d829ea0 100644 --- a/src/mongo/db/auth/authz_session_external_state_s.cpp +++ b/src/mongo/db/auth/authz_session_external_state_s.cpp @@ -27,15 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/authz_session_external_state_s.h" - +#include #include #include "mongo/base/shim.h" -#include "mongo/base/status.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/auth/authz_session_external_state.h" +#include "mongo/db/auth/authz_session_external_state_s.h" namespace mongo { diff --git a/src/mongo/db/auth/authz_session_external_state_s.h b/src/mongo/db/auth/authz_session_external_state_s.h index 46046e3bef141..d34e1948dfec5 100644 --- a/src/mongo/db/auth/authz_session_external_state_s.h +++ b/src/mongo/db/auth/authz_session_external_state_s.h @@ -30,7 +30,9 @@ #pragma once #include "mongo/base/status.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authz_session_external_state_server_common.h" +#include "mongo/db/operation_context.h" namespace mongo { diff --git a/src/mongo/db/auth/authz_session_external_state_server_common.cpp b/src/mongo/db/auth/authz_session_external_state_server_common.cpp index cf22ce96aae69..083c6f06c6faf 100644 --- a/src/mongo/db/auth/authz_session_external_state_server_common.cpp +++ b/src/mongo/db/auth/authz_session_external_state_server_common.cpp @@ -28,17 +28,13 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/authz_session_external_state_server_common.h" - #include -#include "mongo/base/status.h" +#include "mongo/db/auth/authz_session_external_state_server_common.h" #include "mongo/db/auth/enable_localhost_auth_bypass_parameter_gen.h" #include "mongo/db/client.h" #include "mongo/logv2/log.h" -#include "mongo/util/debug_util.h" +#include "mongo/logv2/log_component.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl diff --git a/src/mongo/db/auth/authz_session_external_state_server_common.h b/src/mongo/db/auth/authz_session_external_state_server_common.h index 51dcd1b2bc2c3..480e1cb1b9ca1 100644 --- a/src/mongo/db/auth/authz_session_external_state_server_common.h +++ b/src/mongo/db/auth/authz_session_external_state_server_common.h @@ -30,7 +30,9 @@ #pragma once #include "mongo/base/status.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authz_session_external_state.h" +#include "mongo/db/operation_context.h" namespace mongo { diff --git a/src/mongo/db/auth/builtin_roles.h b/src/mongo/db/auth/builtin_roles.h index e20dbdaa86dcf..798f54e19e916 100644 --- a/src/mongo/db/auth/builtin_roles.h +++ b/src/mongo/db/auth/builtin_roles.h @@ -29,9 +29,13 @@ #pragma once +#include +#include + #include "mongo/db/auth/privilege.h" #include "mongo/db/auth/role_name.h" #include "mongo/db/database_name.h" +#include "mongo/db/tenant_id.h" #include "mongo/stdx/unordered_set.h" namespace mongo { @@ -53,7 +57,7 @@ stdx::unordered_set getBuiltinRoleNamesForDB(const DatabaseName& dbnam /** * Adds to "privileges" the necessary privileges to do absolutely anything on the system. */ -void generateUniversalPrivileges(PrivilegeVector* privileges); +void generateUniversalPrivileges(PrivilegeVector* privileges, const boost::optional&); /** * Returns whether the given role corresponds to a built-in role. diff --git a/src/mongo/db/auth/builtin_roles.tpl.cpp b/src/mongo/db/auth/builtin_roles.tpl.cpp index b698f34fce772..562761d44a2c2 100644 --- a/src/mongo/db/auth/builtin_roles.tpl.cpp +++ b/src/mongo/db/auth/builtin_roles.tpl.cpp @@ -48,7 +48,7 @@ constexpr auto kAdminDB = "admin"_sd; */ //#for $role in $roles -void addPrivileges_${role.name}(PrivilegeVector* privileges, StringData dbName); +void addPrivileges_${role.name}(PrivilegeVector* privileges, const DatabaseName& dbName); //#end for /* Implemenations */ @@ -59,15 +59,15 @@ void addPrivileges_${role.name}(PrivilegeVector* privileges, StringData dbName); //#if $db is None //#echo 'dbName' //#else -//#echo '"' + $db + '"_sd' +//#echo 'DatabaseName::createDatabaseNameForAuth(dbName.tenantId(), "' + $db + '"_sd)' //#end if //#end def //#for $role in $roles -void addPrivileges_${role.name}(PrivilegeVector* privileges, StringData dbName) { +void addPrivileges_${role.name}(PrivilegeVector* privileges, const DatabaseName& dbName) { //#if $role.adminOnly /* Admin only builtin role */ - fassert(6837401, dbName == kAdminDB); + fassert(6837401, dbName.db() == kAdminDB); //#end if //#for $subrole in $role.roles @@ -79,22 +79,23 @@ void addPrivileges_${role.name}(PrivilegeVector* privileges, StringData dbName) privileges, Privilege( //#if $priv.matchType == 'any' - ResourcePattern::forAnyResource(), + ResourcePattern::forAnyResource(dbName.tenantId()), //#elif $priv.matchType == 'any_normal' - ResourcePattern::forAnyNormalResource(), + ResourcePattern::forAnyNormalResource(dbName.tenantId()), //#elif $priv.matchType == 'cluster' - ResourcePattern::forClusterResource(), + ResourcePattern::forClusterResource(dbName.tenantId()), //#elif $priv.matchType == 'database' ResourcePattern::forDatabaseName($dbName($priv.db)), //#elif $priv.matchType == 'collection' - ResourcePattern::forCollectionName("$priv.collection"_sd), + ResourcePattern::forCollectionName(dbName.tenantId(), "$priv.collection"_sd), //#elif $priv.matchType == 'exact_namespace' - ResourcePattern::forExactNamespace( - NamespaceString($dbName($priv.db), "$priv.collection"_sd)), + ResourcePattern::forExactNamespace(NamespaceStringUtil::parseNamespaceFromDoc( + $dbName($priv.db), "$priv.collection"_sd)), //#elif $priv.matchType == 'any_system_buckets' - ResourcePattern::forAnySystemBuckets(), + ResourcePattern::forAnySystemBuckets(dbName.tenantId()), //#elif $priv.matchType == 'system_buckets_in_any_db' - ResourcePattern::forAnySystemBucketsInAnyDatabase("$priv.system_buckets"_sd), + ResourcePattern::forAnySystemBucketsInAnyDatabase(dbName.tenantId(), + "$priv.system_buckets"_sd), //#elif $priv.matchType == 'system_buckets' ResourcePattern::forExactSystemBucketsCollection($dbName($priv.db), "$priv.system_buckets"_sd), @@ -116,13 +117,13 @@ void addPrivileges_${role.name}(PrivilegeVector* privileges, StringData dbName) ActionSet allActions; allActions.addAllActions(); Privilege::addPrivilegeToPrivilegeVector( - privileges, Privilege(ResourcePattern::forAnyResource(), allActions)); + privileges, Privilege(ResourcePattern::forAnyResource(dbName.tenantId()), allActions)); //#end if } //#end for -using addPrivilegesFn = void (*)(PrivilegeVector*, StringData); +using addPrivilegesFn = void (*)(PrivilegeVector*, const DatabaseName&); struct BuiltinRoleAttributes { bool adminOnly; addPrivilegesFn addPrivileges; @@ -142,9 +143,9 @@ const std::map kBuiltinRoleMap = { //#end for }; -const stdx::unordered_set kAdminBuiltinRoles = { +const stdx::unordered_set kAdminBuiltinRolesNoTenant = { //#for $role in $roles - RoleName("$role.name"_sd, kAdminDB), + RoleName("$role.name"_sd, DatabaseName::createDatabaseNameForAuth(boost::none, kAdminDB)), //#end for }; @@ -153,7 +154,7 @@ const stdx::unordered_set kAdminBuiltinRoles = { // Therefore, granting privileges on this database does not make sense. bool isValidDB(const DatabaseName& dbname) { return NamespaceString::validDBName(dbname, NamespaceString::DollarInDbNameBehavior::Allow) && - (dbname.db() != NamespaceString::kExternalDb); + (!dbname.isExternalDB()); } } // namespace @@ -167,18 +168,28 @@ stdx::unordered_set auth::getBuiltinRoleNamesForDB(const DatabaseName& } if (dbName.db() == kAdminDB) { - return kAdminBuiltinRoles; + if (dbName.tenantId() == boost::none) { + // Specialcase for the admin DB in non-multitenancy mode. + return kAdminBuiltinRolesNoTenant; + } + return stdx::unordered_set({ + //#for $role in $roles + RoleName("$role.name"_sd, dbName), + //#end for + }); + + } else { + return stdx::unordered_set({ + //#for $role in $global_roles + RoleName("$role.name"_sd, dbName), + //#end for + }); } - - return stdx::unordered_set({ - //#for $role in $global_roles - RoleName("$role.name"_sd, dbName), - //#end for - }); } -void auth::generateUniversalPrivileges(PrivilegeVector* privileges) { - addPrivileges___system(privileges, kAdminDB); +void auth::generateUniversalPrivileges(PrivilegeVector* privileges, + const boost::optional& tenantId) { + addPrivileges___system(privileges, DatabaseName::createDatabaseNameForAuth(tenantId, kAdminDB)); } bool auth::addPrivilegesForBuiltinRole(const RoleName& role, PrivilegeVector* privileges) { @@ -196,7 +207,7 @@ bool auth::addPrivilegesForBuiltinRole(const RoleName& role, PrivilegeVector* pr return false; } - def.addPrivileges(privileges, role.getDB()); + def.addPrivileges(privileges, role.getDatabaseName()); return true; } diff --git a/src/mongo/db/auth/builtin_roles.yml b/src/mongo/db/auth/builtin_roles.yml index 1babdd934865e..7afbe27b4bc38 100644 --- a/src/mongo/db/auth/builtin_roles.yml +++ b/src/mongo/db/auth/builtin_roles.yml @@ -51,6 +51,7 @@ roles: - matchType: database actions: &readWriteRoleActions - *readRoleActions + - cleanupStructuredEncryptionData - compactStructuredEncryptionData - convertToCapped # db admin gets this also - createCollection # db admin gets this also @@ -63,6 +64,7 @@ roles: - remove - renameCollectionSameDB # db admin gets this also - update + - updateSearchIndex - matchType: exact_namespace collection: 'system.js' actions: *readWriteRoleActions @@ -352,9 +354,9 @@ roles: - getClusterParameter - setChangeStreamState - getChangeStreamState - - telemetryRead + - queryStatsRead - checkMetadataConsistency - - transitionToCatalogShard + - transitionFromDedicatedConfigServer - transitionToDedicatedConfigServer - matchType: any_normal diff --git a/src/mongo/db/auth/builtin_roles_test.cpp b/src/mongo/db/auth/builtin_roles_test.cpp index 22c8d5af51546..d6549e0b93547 100644 --- a/src/mongo/db/auth/builtin_roles_test.cpp +++ b/src/mongo/db/auth/builtin_roles_test.cpp @@ -31,16 +31,33 @@ * Unit tests of the builtin roles psuedo-collection. */ -#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/builtin_roles.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/sequence_util.h" -#include "mongo/util/str.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/database_name_util.h" namespace mongo { namespace { +const auto kAdminDB = DatabaseNameUtil::deserialize(boost::none, "admin"_sd); +const auto kAdminRsrc = ResourcePattern::forDatabaseName(kAdminDB); +const auto kAdminSystemJSNSS = + NamespaceString::createNamespaceString_forTest(kAdminDB, "system.js"_sd); +const auto kAdminSystemJSRsrc = ResourcePattern::forExactNamespace(kAdminSystemJSNSS); + TEST(BuiltinRoles, BuiltinRolesOnlyOnAppropriateDatabases) { ASSERT(auth::isBuiltinRole(RoleName("read", "test"))); ASSERT(auth::isBuiltinRole(RoleName("readWrite", "test"))); @@ -75,7 +92,7 @@ TEST(BuiltinRoles, BuiltinRolesOnlyOnAppropriateDatabases) { } TEST(BuiltinRoles, getBuiltinRolesForDB) { - auto adminRoles = auth::getBuiltinRoleNamesForDB({boost::none, "admin"}); + auto adminRoles = auth::getBuiltinRoleNamesForDB(DatabaseName::kAdmin); ASSERT(adminRoles.contains(RoleName("read", "admin"))); ASSERT(adminRoles.contains(RoleName("readAnyDatabase", "admin"))); for (const auto& role : adminRoles) { @@ -83,7 +100,8 @@ TEST(BuiltinRoles, getBuiltinRolesForDB) { ASSERT(auth::isBuiltinRole(role)); } - auto testRoles = auth::getBuiltinRoleNamesForDB({boost::none, "test"}); + auto testRoles = auth::getBuiltinRoleNamesForDB( + DatabaseName::createDatabaseName_forTest(boost::none, "test")); ASSERT(testRoles.contains(RoleName("read", "test"))); ASSERT(!testRoles.contains(RoleName("readAnyDatabase", "test"))); for (const auto& role : testRoles) { @@ -111,13 +129,10 @@ TEST(BuiltinRoles, addPrivilegesForBuiltinRole) { ActionType::listSearchIndexes, ActionType::planCacheRead, }); - const auto adminDB = ResourcePattern::forDatabaseName("admin"); - const auto adminSystemJS = ResourcePattern::forExactNamespace( - NamespaceString::createNamespaceString_forTest("admin", "system.js")); for (const auto& priv : privs) { auto resource = priv.getResourcePattern(); - ASSERT((resource == adminDB) || (resource == adminSystemJS)); + ASSERT((resource == kAdminRsrc) || (resource == kAdminSystemJSRsrc)); ASSERT(priv.getActions() == expSet); } } @@ -127,7 +142,7 @@ TEST(BuiltinRoles, addSystemBucketsPrivilegesForBuiltinRoleClusterManager) { ASSERT(auth::addPrivilegesForBuiltinRole(RoleName("clusterManager", "admin"), &privs)); ASSERT_EQ(privs.size(), 11); - const auto systemBucketsResourcePattern = ResourcePattern::forAnySystemBuckets(); + const auto systemBucketsResourcePattern = ResourcePattern::forAnySystemBuckets(boost::none); const ActionSet clusterManagerRoleDatabaseActionSet({ ActionType::clearJumboFlag, diff --git a/src/mongo/db/auth/cluster_auth_mode.cpp b/src/mongo/db/auth/cluster_auth_mode.cpp index 43757c112467f..0ac5a8cd1c6c5 100644 --- a/src/mongo/db/auth/cluster_auth_mode.cpp +++ b/src/mongo/db/auth/cluster_auth_mode.cpp @@ -28,11 +28,13 @@ */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/db/auth/cluster_auth_mode.h" - -#include "mongo/logv2/log.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl diff --git a/src/mongo/db/auth/impersonation_session.cpp b/src/mongo/db/auth/impersonation_session.cpp index 1cf9d790e06ce..ecc571a9f8386 100644 --- a/src/mongo/db/auth/impersonation_session.cpp +++ b/src/mongo/db/auth/impersonation_session.cpp @@ -27,22 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/impersonation_session.h" - #include -#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/impersonation_session.h" #include "mongo/db/auth/privilege.h" #include "mongo/db/auth/resource_pattern.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/operation_context.h" #include "mongo/rpc/metadata/impersonated_user_metadata.h" +#include "mongo/rpc/metadata/impersonated_user_metadata_gen.h" #include "mongo/util/assert_util.h" -#include "mongo/util/destructor_guard.h" namespace mongo { @@ -53,7 +54,8 @@ ImpersonationSessionGuard::ImpersonationSessionGuard(OperationContext* opCtx) : uassert(ErrorCodes::Unauthorized, "Unauthorized use of impersonation metadata.", authSession->isAuthorizedForPrivilege( - Privilege(ResourcePattern::forClusterResource(), ActionType::impersonate))); + Privilege(ResourcePattern::forClusterResource(authSession->getUserTenantId()), + ActionType::impersonate))); fassert(ErrorCodes::InternalError, !authSession->isImpersonating()); if (impersonatedUsersAndRoles->getUser()) { fassert(ErrorCodes::InternalError, diff --git a/src/mongo/db/auth/impersonation_session.h b/src/mongo/db/auth/impersonation_session.h index 9c15797b36416..a581525ab8f54 100644 --- a/src/mongo/db/auth/impersonation_session.h +++ b/src/mongo/db/auth/impersonation_session.h @@ -27,6 +27,7 @@ * it in the license file. */ +#include "mongo/db/operation_context.h" #include "mongo/rpc/metadata/impersonated_user_metadata.h" namespace mongo { diff --git a/src/mongo/db/auth/ldap_cumulative_operation_stats.cpp b/src/mongo/db/auth/ldap_cumulative_operation_stats.cpp index d135e9e9a9a4b..0d1adc65cd721 100644 --- a/src/mongo/db/auth/ldap_cumulative_operation_stats.cpp +++ b/src/mongo/db/auth/ldap_cumulative_operation_stats.cpp @@ -27,13 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/base/error_extra_info.h" #include "mongo/db/auth/ldap_cumulative_operation_stats.h" + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/auth/ldap_operation_stats.h" -#include "mongo/db/curop_metrics.h" #include "mongo/db/service_context.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/db/auth/ldap_cumulative_operation_stats.h b/src/mongo/db/auth/ldap_cumulative_operation_stats.h index c24bce49c473b..fd015d4ecaf9d 100644 --- a/src/mongo/db/auth/ldap_cumulative_operation_stats.h +++ b/src/mongo/db/auth/ldap_cumulative_operation_stats.h @@ -34,6 +34,7 @@ #include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/platform/mutex.h" #include "mongo/util/duration.h" #include "mongo/util/tick_source.h" diff --git a/src/mongo/db/auth/ldap_operation_stats.cpp b/src/mongo/db/auth/ldap_operation_stats.cpp index a103c308a8fa4..9cdb62fdc1340 100644 --- a/src/mongo/db/auth/ldap_operation_stats.cpp +++ b/src/mongo/db/auth/ldap_operation_stats.cpp @@ -27,11 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include -#include "mongo/base/error_extra_info.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/auth/ldap_operation_stats.h" #include "mongo/util/duration.h" #include "mongo/util/tick_source.h" diff --git a/src/mongo/db/auth/ldap_operation_stats.h b/src/mongo/db/auth/ldap_operation_stats.h index 3ef49bf9ae196..32e46bb042087 100644 --- a/src/mongo/db/auth/ldap_operation_stats.h +++ b/src/mongo/db/auth/ldap_operation_stats.h @@ -29,12 +29,15 @@ #pragma once +#include #include #include #include #include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/duration.h" #include "mongo/util/tick_source.h" diff --git a/src/mongo/db/auth/oauth_discovery_factory.cpp b/src/mongo/db/auth/oauth_discovery_factory.cpp index cfa7b3633d80b..40c2cb9eab00d 100644 --- a/src/mongo/db/auth/oauth_discovery_factory.cpp +++ b/src/mongo/db/auth/oauth_discovery_factory.cpp @@ -31,8 +31,14 @@ #include +#include + +#include "mongo/base/data_builder.h" +#include "mongo/base/data_range_cursor.h" #include "mongo/base/string_data.h" #include "mongo/bson/json.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/auth/oauth_discovery_factory.h b/src/mongo/db/auth/oauth_discovery_factory.h index 8287fb6910186..7f80ada27edde 100644 --- a/src/mongo/db/auth/oauth_discovery_factory.h +++ b/src/mongo/db/auth/oauth_discovery_factory.h @@ -29,12 +29,14 @@ #pragma once +#include +#include +#include + #include "mongo/base/string_data.h" #include "mongo/db/auth/oauth_authorization_server_metadata_gen.h" #include "mongo/util/net/http_client.h" -#include - namespace mongo { /** diff --git a/src/mongo/db/auth/oauth_discovery_factory_test.cpp b/src/mongo/db/auth/oauth_discovery_factory_test.cpp index c5341fc64f978..3039c8a2f5ffe 100644 --- a/src/mongo/db/auth/oauth_discovery_factory_test.cpp +++ b/src/mongo/db/auth/oauth_discovery_factory_test.cpp @@ -27,15 +27,20 @@ * it in the license file. */ -#include "mongo/idl/idl_parser.h" -#include "mongo/platform/basic.h" +#include + +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/oauth_discovery_factory.h" +#include "mongo/idl/idl_parser.h" #include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/net/http_client_mock.h" -#include "mongo/unittest/unittest.h" - namespace mongo { namespace { diff --git a/src/mongo/db/auth/parsed_privilege.idl b/src/mongo/db/auth/parsed_privilege.idl new file mode 100644 index 0000000000000..de2faf40c9810 --- /dev/null +++ b/src/mongo/db/auth/parsed_privilege.idl @@ -0,0 +1,76 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +global: + cpp_namespace: "mongo::auth" + +imports: + - "mongo/db/basic_types.idl" + - "mongo/db/auth/auth_types.idl" + +structs: + ParsedResource: + description: ResourcePattern as read from disk + strict: false + fields: + cluster: + # Conflicts with all other resource subfields + description: 'ResourcePattern::forClusterResource' + type: safeBool + optional: true + anyResource: + # Conflicts with all other resource subfields + description: 'ResourcePattern::forAnyResource' + type: safeBool + optional: true + db: + # Conflicts with all but 'collection' and 'system_buckets' + description: 'Used with anyNormalResource or partial/exact namespace match' + type: string + optional: true + collection: + # Conflicts with all but 'db', which is required + description: 'Used with anyNormalResource or partial/exact namespace match' + type: string + optional: true + system_buckets: + # conflicts with all but 'db', which is optional + type: string + cpp_name: systemBuckets + optional: true + + ParsedPrivilege: + description: Privilege document as read from disk + strict: false + fields: + resource: + description: A description of the ResourcePattern used by this Privilege + type: ParsedResource + actions: + description: A list of ActionType names granted for the described resource + type: array diff --git a/src/mongo/db/auth/privilege.cpp b/src/mongo/db/auth/privilege.cpp index dc3cd1d75a171..ea8838668d923 100644 --- a/src/mongo/db/auth/privilege.cpp +++ b/src/mongo/db/auth/privilege.cpp @@ -29,11 +29,48 @@ #include "mongo/db/auth/privilege.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/privilege_parser.h" +#include "mongo/db/auth/action_type_gen.h" +#include "mongo/db/auth/parsed_privilege_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/util/assert_util.h" namespace mongo { +namespace { +void uassertNoConflict(StringData resource, StringData found, bool cond) { + uassert( + ErrorCodes::BadValue, "{} conflicts with resource type '{}'"_format(resource, found), cond); +} + +bool isValidCollectionName(StringData db, StringData coll) { + if (NamespaceString::validCollectionName(coll)) { + return true; + } + + // local.oplog.$main is a real collection that the server will create. But, collection + // names with a '$' character are illegal. We must make an exception for this collection + // here so we can grant users access to it. + if ((db == "local"_sd) && (coll == "oplog.$main"_sd)) { + return true; + } + + return false; +} +} // namespace void Privilege::addPrivilegeToPrivilegeVector(PrivilegeVector* privileges, const Privilege& privilegeToAdd) { @@ -61,6 +98,105 @@ Privilege::Privilege(const ResourcePattern& resource, const ActionType action) Privilege::Privilege(const ResourcePattern& resource, const ActionSet& actions) : _resource(resource), _actions(actions) {} +Privilege Privilege::resolvePrivilegeWithTenant(const boost::optional& tenantId, + const auth::ParsedPrivilege& pp, + std::vector* unrecognizedActions) { + using PR = auth::ParsedResource; + const auto& rsrc = pp.getResource(); + Privilege ret; + + if (auto cluster = rsrc.getCluster()) { + // { cluster: 1 } + constexpr StringData kClusterRsrc = "resource: {cluster: true}"_sd; + uassert(ErrorCodes::BadValue, "resource: {cluster: false} must be true", cluster.get()); + uassertNoConflict(kClusterRsrc, PR::kAnyResourceFieldName, !rsrc.getAnyResource()); + uassertNoConflict(kClusterRsrc, PR::kDbFieldName, !rsrc.getDb()); + uassertNoConflict(kClusterRsrc, PR::kCollectionFieldName, !rsrc.getCollection()); + uassertNoConflict(kClusterRsrc, PR::kSystemBucketsFieldName, !rsrc.getSystemBuckets()); + ret._resource = ResourcePattern::forClusterResource(tenantId); + } else if (auto any = rsrc.getAnyResource()) { + // { anyResource: 1 } + constexpr StringData kAnyRsrc = "resource: {anyResource: true}"_sd; + uassert(ErrorCodes::BadValue, "resource: {anyResource: false} must be true", any.get()); + uassertNoConflict(kAnyRsrc, PR::kDbFieldName, !rsrc.getDb()); + uassertNoConflict(kAnyRsrc, PR::kCollectionFieldName, !rsrc.getCollection()); + uassertNoConflict(kAnyRsrc, PR::kSystemBucketsFieldName, !rsrc.getSystemBuckets()); + ret._resource = ResourcePattern::forAnyResource(tenantId); + } else { + // db, collection, systemBuckets format + const bool hasCollection = (rsrc.getCollection() != boost::none); + const bool hasSystemBuckets = (rsrc.getSystemBuckets() != boost::none); + uassertNoConflict("resource: {collection: '...'}", + PR::kSystemBucketsFieldName, + !(hasCollection && hasSystemBuckets)); + if (hasCollection) { + // { db: '...', collection: '...' } + uassert(ErrorCodes::BadValue, + "resource {collection: '...'} must include 'db' field as well", + rsrc.getDb()); + + auto db = rsrc.getDb().get(); + auto coll = rsrc.getCollection().get(); + uassert(ErrorCodes::BadValue, + "'{}' is not a valid collection name"_format(coll), + coll.empty() || isValidCollectionName(db, coll)); + + if (db.empty() && coll.empty()) { + ret._resource = ResourcePattern::forAnyNormalResource(tenantId); + } else if (db.empty()) { + ret._resource = ResourcePattern::forCollectionName(tenantId, coll); + } else if (coll.empty()) { + ret._resource = ResourcePattern::forDatabaseName( + DatabaseName::createDatabaseNameForAuth(tenantId, db)); + } else { + ret._resource = ResourcePattern::forExactNamespace( + NamespaceString::createNamespaceStringForAuth(tenantId, db, coll)); + } + } else if (hasSystemBuckets) { + // { systemBuckets: '...' } + auto bucket = rsrc.getSystemBuckets().get(); + const bool emptyDb = !rsrc.getDb() || rsrc.getDb()->empty(); + if (emptyDb && bucket.empty()) { + ret._resource = ResourcePattern::forAnySystemBuckets(tenantId); + } else if (bucket.empty()) { + ret._resource = ResourcePattern::forAnySystemBucketsInDatabase( + DatabaseName::createDatabaseNameForAuth(tenantId, rsrc.getDb().get())); + } else if (emptyDb) { + ret._resource = ResourcePattern::forAnySystemBucketsInAnyDatabase(tenantId, bucket); + } else { + ret._resource = ResourcePattern::forExactSystemBucketsCollection( + NamespaceString::createNamespaceStringForAuth( + tenantId, rsrc.getDb().get(), bucket)); + } + } else { + uasserted(ErrorCodes::BadValue, + "resource pattern must contain 'collection' or 'systemBuckets' specifier"); + } + } + + uassert(ErrorCodes::BadValue, + "'actions' field of privilege resource must not be empty", + !pp.getActions().empty()); + ret._actions = ActionSet::parseFromStringVector(pp.getActions(), unrecognizedActions); + + return ret; +} + +PrivilegeVector Privilege::privilegeVectorFromParsedPrivilegeVector( + const boost::optional& tenantId, + const std::vector& parsedPrivileges, + std::vector* unrecognizedActions) { + PrivilegeVector privileges; + std::transform(parsedPrivileges.cbegin(), + parsedPrivileges.cend(), + std::back_inserter(privileges), + [&](const auto& pp) { + return Privilege::resolvePrivilegeWithTenant( + tenantId, pp, unrecognizedActions); + }); + return privileges; +} + void Privilege::addActions(const ActionSet& actionsToAdd) { _actions.addAllActionsFromSet(actionsToAdd); } @@ -78,62 +214,86 @@ bool Privilege::includesActions(const ActionSet& actions) const { } BSONObj Privilege::toBSON() const { - ParsedPrivilege pp; - std::string errmsg; - invariant(ParsedPrivilege::privilegeToParsedPrivilege(*this, &pp, &errmsg)); - return pp.toBSON(); -} - -Privilege Privilege::fromBSON(const BSONElement elem) { - uassert( - ErrorCodes::BadValue, "Privilege documents must be of type object", elem.type() == Object); - return fromBSON(elem.Obj()); -} - -Privilege Privilege::fromBSON(BSONObj obj) { - ParsedPrivilege pp; - std::string errmsg; - if (!pp.parseBSON(obj, &errmsg)) { - uasserted(ErrorCodes::BadValue, - str::stream() << "Unable to parse privilege document: " << obj - << ", error: " << errmsg); - } - Privilege ret; - std::vector unrecognized; - uassertStatusOK(ParsedPrivilege::parsedPrivilegeToPrivilege(pp, &ret, &unrecognized)); - - if (!unrecognized.empty()) { - StringBuilder sb; - sb << "Unrecognized action"; - if (unrecognized.size() > 1) { - sb << 's'; - } - sb << ": "; - for (std::size_t i = 0; i < unrecognized.size(); ++i) { - if (i > 0) { - sb << ", "; - } - sb << unrecognized[i]; - } - uasserted(ErrorCodes::BadValue, sb.str()); - } - - return ret; + BSONObjBuilder builder; + toParsedPrivilege().serialize(&builder); + return builder.obj(); } Status Privilege::getBSONForPrivileges(const PrivilegeVector& privileges, mutablebson::Element resultArray) try { - for (auto& currPriv : privileges) { - std::string errmsg; - ParsedPrivilege privilege; - if (!ParsedPrivilege::privilegeToParsedPrivilege(currPriv, &privilege, &errmsg)) { - return Status(ErrorCodes::BadValue, errmsg); - } - uassertStatusOK(resultArray.appendObject("privileges", privilege.toBSON())); + for (const auto& currPriv : privileges) { + uassertStatusOK( + resultArray.appendObject("privileges", currPriv.toParsedPrivilege().toBSON())); } return Status::OK(); } catch (...) { return exceptionToStatus(); } +auth::ParsedPrivilege Privilege::toParsedPrivilege() const { + auth::ParsedPrivilege pp; + pp.setActions(_actions.getActionsAsStringDatas()); + + auth::ParsedResource rsrc; + switch (_resource.matchType()) { + case MatchTypeEnum::kMatchClusterResource: + // { cluster: true } + rsrc.setCluster(true); + break; + case MatchTypeEnum::kMatchAnyResource: + // { anyResource: true } + rsrc.setAnyResource(true); + break; + + case MatchTypeEnum::kMatchExactNamespace: + // { db: '...', collection: '...' } + rsrc.setDb(_resource.dbNameToMatch().db()); + rsrc.setCollection(_resource.collectionToMatch()); + break; + case MatchTypeEnum::kMatchDatabaseName: + // { db: '...', collection: '' } + rsrc.setDb(_resource.dbNameToMatch().db()); + rsrc.setCollection(""_sd); + break; + case MatchTypeEnum::kMatchCollectionName: + // { db: '', collection: '...' } + rsrc.setDb(""_sd); + rsrc.setCollection(_resource.collectionToMatch()); + break; + case MatchTypeEnum::kMatchAnyNormalResource: + // { db: '', collection: '' } + rsrc.setDb(""_sd); + rsrc.setCollection(""_sd); + break; + + case MatchTypeEnum::kMatchExactSystemBucketResource: + // { db: '...', system_buckets: '...' } + rsrc.setDb(_resource.dbNameToMatch().db()); + rsrc.setSystemBuckets(_resource.collectionToMatch()); + break; + case MatchTypeEnum::kMatchSystemBucketInAnyDBResource: + // { system_buckets: '...' } + rsrc.setSystemBuckets(_resource.collectionToMatch()); + break; + case MatchTypeEnum::kMatchAnySystemBucketInDBResource: + // { db: '...', system_buckets: '' } + rsrc.setDb(_resource.dbNameToMatch().db()); + rsrc.setSystemBuckets(""_sd); + break; + case MatchTypeEnum::kMatchAnySystemBucketResource: + // { system_buckets: '' } + rsrc.setSystemBuckets(""_sd); + break; + + default: + uasserted( + ErrorCodes::InvalidOptions, + "{} is not a valid user-grantable resource pattern"_format(_resource.toString())); + } + + pp.setResource(rsrc); + + return pp; +} + } // namespace mongo diff --git a/src/mongo/db/auth/privilege.h b/src/mongo/db/auth/privilege.h index c04ca3c873424..2ca27d9a0a566 100644 --- a/src/mongo/db/auth/privilege.h +++ b/src/mongo/db/auth/privilege.h @@ -29,17 +29,29 @@ #pragma once +#include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/mutable/element.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/tenant_id.h" namespace mongo { class Privilege; -typedef std::vector PrivilegeVector; +class TenantId; + +using PrivilegeVector = std::vector; + +namespace auth { +class ParsedPrivilege; +} // namespace auth /** * A representation of the permission to perform a set of actions on a resource. @@ -58,6 +70,14 @@ class Privilege { static void addPrivilegesToPrivilegeVector(PrivilegeVector* privileges, const PrivilegeVector& privilegesToAdd); + /** + * Promote a vector of ParsedPrivilege documents into tenant aware privileges. + */ + static PrivilegeVector privilegeVectorFromParsedPrivilegeVector( + const boost::optional&, + const std::vector&, + std::vector*); + /** * Takes a vector of privileges and fills the output param "resultArray" with a BSON array * representation of the privileges. @@ -78,6 +98,14 @@ class Privilege { Privilege(const ResourcePattern& resource, ActionType action); Privilege(const ResourcePattern& resource, const ActionSet& actions); + // Transform a ParsedPrivilege into a concrete Privilege by adding tenantId + // and turning string actions into ActionSet bits. + // unrecognizedActions will be populated with unexpected ActionType names, if present. + static Privilege resolvePrivilegeWithTenant( + const boost::optional&, + const auth::ParsedPrivilege&, + std::vector* unrecognizedActions = nullptr); + const ResourcePattern& getResourcePattern() const { return _resource; } @@ -86,6 +114,8 @@ class Privilege { return _actions; } + auth::ParsedPrivilege toParsedPrivilege() const; + void addActions(const ActionSet& actionsToAdd); void removeActions(const ActionSet& actionsToRemove); @@ -94,8 +124,6 @@ class Privilege { // Checks if the given actions are present in the Privilege. bool includesActions(const ActionSet& actions) const; - static Privilege fromBSON(BSONElement obj); - static Privilege fromBSON(BSONObj obj); BSONObj toBSON() const; private: diff --git a/src/mongo/db/auth/privilege_parser.cpp b/src/mongo/db/auth/privilege_parser.cpp deleted file mode 100644 index 801e731af084e..0000000000000 --- a/src/mongo/db/auth/privilege_parser.cpp +++ /dev/null @@ -1,563 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/auth/privilege_parser.h" - -#include - -#include "mongo/db/auth/privilege.h" -#include "mongo/db/field_parser.h" -#include "mongo/db/namespace_string.h" -#include "mongo/util/str.h" - -namespace mongo { - -using std::string; -using std::vector; - -using str::stream; - -const BSONField ParsedResource::anyResource("anyResource"); -const BSONField ParsedResource::cluster("cluster"); -const BSONField ParsedResource::systemBuckets("system_buckets"); -const BSONField ParsedResource::db("db"); -const BSONField ParsedResource::collection("collection"); - -ParsedResource::ParsedResource() { - clear(); -} - -ParsedResource::~ParsedResource() {} - -bool ParsedResource::isValid(std::string* errMsg) const { - std::string dummy; - if (errMsg == nullptr) { - errMsg = &dummy; - } - - int numCandidateTypes = 0; - if (isAnyResourceSet()) - ++numCandidateTypes; - if (isClusterSet()) - ++numCandidateTypes; - if (isDbSet() || isCollectionSet() || isSystemBucketsSet()) - ++numCandidateTypes; - - if (!isSystemBucketsSet() && isDbSet() != isCollectionSet()) { - *errMsg = stream() << "resource must set both " << db.name() << " and " << collection.name() - << " or neither, but not exactly one."; - return false; - } else if (isSystemBucketsSet()) { - if (isCollectionSet()) { - *errMsg = stream() << "system_buckets and collection cannot both be set"; - return false; - } - } - - if (numCandidateTypes != 1) { - *errMsg = stream() << "resource must have exactly " << db.name() << " and " - << collection.name() << " set, or have only " << cluster.name() - << " set " - << " or have only " << anyResource.name() << " set"; - return false; - } - - if (isAnyResourceSet() && !getAnyResource()) { - *errMsg = stream() << anyResource.name() << " must be true when specified"; - return false; - } - - if (isClusterSet() && !getCluster()) { - *errMsg = stream() << cluster.name() << " must be true when specified"; - return false; - } - - if (isDbSet() && - (!NamespaceString::validDBName(getDb(), NamespaceString::DollarInDbNameBehavior::Allow) && - !getDb().empty())) { - *errMsg = stream() << getDb() << " is not a valid database name"; - return false; - } - - if (isCollectionSet() && - (!NamespaceString::validCollectionName(getCollection()) && !getCollection().empty())) { - // local.oplog.$main is a real collection that the server will create. But, collection - // names with a '$' character are illegal. We must make an exception for this collection - // here so we can grant users access to it. - if (!(getDb() == "local" && getCollection() == "oplog.$main")) { - *errMsg = stream() << getCollection() << " is not a valid collection name"; - return false; - } - } - - return true; -} - -BSONObj ParsedResource::toBSON() const { - BSONObjBuilder builder; - - if (_isAnyResourceSet) - builder.append(anyResource(), _anyResource); - - if (_isClusterSet) - builder.append(cluster(), _cluster); - - if (_isSystemBucketsSet) - builder.append(systemBuckets(), _systemBuckets); - - if (_isDbSet) - builder.append(db(), _db); - - if (_isCollectionSet) - builder.append(collection(), _collection); - - return builder.obj(); -} - -bool ParsedResource::parseBSON(const BSONObj& source, string* errMsg) { - clear(); - - std::string dummy; - if (!errMsg) - errMsg = &dummy; - - FieldParser::FieldState fieldState; - fieldState = FieldParser::extract(source, anyResource, &_anyResource, errMsg); - if (fieldState == FieldParser::FIELD_INVALID) - return false; - _isAnyResourceSet = fieldState == FieldParser::FIELD_SET; - - fieldState = FieldParser::extract(source, cluster, &_cluster, errMsg); - if (fieldState == FieldParser::FIELD_INVALID) - return false; - _isClusterSet = fieldState == FieldParser::FIELD_SET; - - fieldState = FieldParser::extract(source, systemBuckets, &_systemBuckets, errMsg); - if (fieldState == FieldParser::FIELD_INVALID) - return false; - _isSystemBucketsSet = fieldState == FieldParser::FIELD_SET; - - fieldState = FieldParser::extract(source, db, &_db, errMsg); - if (fieldState == FieldParser::FIELD_INVALID) - return false; - _isDbSet = fieldState == FieldParser::FIELD_SET; - - fieldState = FieldParser::extract(source, collection, &_collection, errMsg); - if (fieldState == FieldParser::FIELD_INVALID) - return false; - _isCollectionSet = fieldState == FieldParser::FIELD_SET; - - return true; -} - -void ParsedResource::clear() { - _anyResource = false; - _isAnyResourceSet = false; - - _cluster = false; - _isClusterSet = false; - - _systemBuckets.clear(); - _isSystemBucketsSet = false; - - _db.clear(); - _isDbSet = false; - - _collection.clear(); - _isCollectionSet = false; -} - -void ParsedResource::cloneTo(ParsedResource* other) const { - other->clear(); - - other->_anyResource = _anyResource; - other->_isAnyResourceSet = _isAnyResourceSet; - - other->_cluster = _cluster; - other->_isClusterSet = _isClusterSet; - - other->_systemBuckets = _systemBuckets; - other->_isSystemBucketsSet = _isSystemBucketsSet; - - other->_db = _db; - other->_isDbSet = _isDbSet; - - other->_collection = _collection; - other->_isCollectionSet = _isCollectionSet; -} - -std::string ParsedResource::toString() const { - return toBSON().toString(); -} - -void ParsedResource::setAnyResource(bool anyResource) { - _anyResource = anyResource; - _isAnyResourceSet = true; -} - -void ParsedResource::unsetAnyResource() { - _isAnyResourceSet = false; -} - -bool ParsedResource::isAnyResourceSet() const { - return _isAnyResourceSet; -} - -bool ParsedResource::getAnyResource() const { - dassert(_isAnyResourceSet); - return _anyResource; -} - -void ParsedResource::setCluster(bool cluster) { - _cluster = cluster; - _isClusterSet = true; -} - -void ParsedResource::unsetCluster() { - _isClusterSet = false; -} - -bool ParsedResource::isClusterSet() const { - return _isClusterSet; -} - -bool ParsedResource::getCluster() const { - dassert(_isClusterSet); - return _cluster; -} - -void ParsedResource::setSystemBuckets(StringData collection) { - _systemBuckets = collection.toString(); - _isSystemBucketsSet = true; -} - -void ParsedResource::unsetSystemBuckets() { - _isSystemBucketsSet = false; -} - -bool ParsedResource::isSystemBucketsSet() const { - return _isSystemBucketsSet; -} - -const std::string& ParsedResource::getSystemBuckets() const { - dassert(_isSystemBucketsSet); - return _systemBuckets; -} - -void ParsedResource::setDb(StringData db) { - _db = db.toString(); - _isDbSet = true; -} - -void ParsedResource::unsetDb() { - _isDbSet = false; -} - -bool ParsedResource::isDbSet() const { - return _isDbSet; -} - -const std::string& ParsedResource::getDb() const { - dassert(_isDbSet); - return _db; -} - -void ParsedResource::setCollection(StringData collection) { - _collection = collection.toString(); - _isCollectionSet = true; -} - -void ParsedResource::unsetCollection() { - _isCollectionSet = false; -} - -bool ParsedResource::isCollectionSet() const { - return _isCollectionSet; -} - -const std::string& ParsedResource::getCollection() const { - dassert(_isCollectionSet); - return _collection; -} - -const BSONField> ParsedPrivilege::actions("actions"); -const BSONField ParsedPrivilege::resource("resource"); - -ParsedPrivilege::ParsedPrivilege() { - clear(); -} - -ParsedPrivilege::~ParsedPrivilege() {} - -bool ParsedPrivilege::isValid(std::string* errMsg) const { - std::string dummy; - if (errMsg == nullptr) { - errMsg = &dummy; - } - - // All the mandatory fields must be present. - if (!_isActionsSet || !_actions.size()) { - *errMsg = stream() << "missing " << actions.name() << " field"; - return false; - } - - if (!_isResourceSet) { - *errMsg = stream() << "missing " << resource.name() << " field"; - return false; - } - - return getResource().isValid(errMsg); -} - -BSONObj ParsedPrivilege::toBSON() const { - BSONObjBuilder builder; - - if (_isResourceSet) - builder.append(resource(), _resource.toBSON()); - - if (_isActionsSet) { - BSONArrayBuilder actionsBuilder(builder.subarrayStart(actions())); - for (std::vector::const_iterator it = _actions.begin(); it != _actions.end(); - ++it) { - actionsBuilder.append(*it); - } - actionsBuilder.doneFast(); - } - - return builder.obj().getOwned(); -} - -bool ParsedPrivilege::parseBSON(const BSONObj& source, string* errMsg) { - clear(); - - std::string dummy; - if (!errMsg) - errMsg = &dummy; - - FieldParser::FieldState fieldState; - fieldState = FieldParser::extract(source, actions, &_actions, errMsg); - if (fieldState == FieldParser::FIELD_INVALID) - return false; - _isActionsSet = fieldState == FieldParser::FIELD_SET; - - fieldState = FieldParser::extract(source, resource, &_resource, errMsg); - if (fieldState == FieldParser::FIELD_INVALID) - return false; - _isResourceSet = fieldState == FieldParser::FIELD_SET; - - return true; -} - -void ParsedPrivilege::clear() { - _actions.clear(); - _isActionsSet = false; - _resource.clear(); - _isResourceSet = false; -} - -std::string ParsedPrivilege::toString() const { - return toBSON().toString(); -} - -void ParsedPrivilege::setActions(const std::vector& actions) { - for (std::vector::const_iterator it = actions.begin(); it != actions.end(); ++it) { - addToActions((*it)); - } - _isActionsSet = actions.size() > 0; -} - -void ParsedPrivilege::addToActions(const string& actions) { - _actions.push_back(actions); - _isActionsSet = true; -} - -void ParsedPrivilege::unsetActions() { - _actions.clear(); - _isActionsSet = false; -} - -bool ParsedPrivilege::isActionsSet() const { - return _isActionsSet; -} - -size_t ParsedPrivilege::sizeActions() const { - return _actions.size(); -} - -const std::vector& ParsedPrivilege::getActions() const { - dassert(_isActionsSet); - return _actions; -} - -const string& ParsedPrivilege::getActionsAt(size_t pos) const { - dassert(_isActionsSet); - dassert(_actions.size() > pos); - return _actions.at(pos); -} - -void ParsedPrivilege::setResource(const ParsedResource& resource) { - resource.cloneTo(&_resource); - _isResourceSet = true; -} - -void ParsedPrivilege::unsetResource() { - _isResourceSet = false; -} - -bool ParsedPrivilege::isResourceSet() const { - return _isResourceSet; -} - -const ParsedResource& ParsedPrivilege::getResource() const { - dassert(_isResourceSet); - return _resource; -} - -Status ParsedPrivilege::parsedPrivilegeToPrivilege(const ParsedPrivilege& parsedPrivilege, - Privilege* result, - std::vector* unrecognizedActions) { - std::string errmsg; - if (!parsedPrivilege.isValid(&errmsg)) { - return Status(ErrorCodes::FailedToParse, errmsg); - } - - // Build actions - ActionSet actions; - const vector& parsedActions = parsedPrivilege.getActions(); - Status status = - ActionSet::parseActionSetFromStringVector(parsedActions, &actions, unrecognizedActions); - if (!status.isOK()) { - return status; - } - - // Build resource - ResourcePattern resource; - const ParsedResource& parsedResource = parsedPrivilege.getResource(); - if (parsedResource.isAnyResourceSet() && parsedResource.getAnyResource()) { - resource = ResourcePattern::forAnyResource(); - } else if (parsedResource.isClusterSet() && parsedResource.getCluster()) { - resource = ResourcePattern::forClusterResource(); - } else if (parsedResource.isSystemBucketsSet()) { - if (parsedResource.isDbSet()) { - if (parsedResource.getDb().empty()) { - if (parsedResource.getSystemBuckets().empty()) { - // {db: "", system_buckets: ""} - match any system buckets in any db - resource = ResourcePattern::forAnySystemBuckets(); - } else { - // {db: "", system_buckets: ""} - match any system.buckets. in - // any db - resource = ResourcePattern::forAnySystemBucketsInAnyDatabase( - parsedResource.getSystemBuckets()); - } - } else { - if (parsedResource.getSystemBuckets().empty()) { - // {db: "", system_buckets: ""} - match any system buckets in db - resource = - ResourcePattern::forAnySystemBucketsInDatabase(parsedResource.getDb()); - } else { - // {db: "", system_buckets: ""} - match .system.buckets. - resource = ResourcePattern::forExactSystemBucketsCollection( - parsedResource.getDb(), parsedResource.getSystemBuckets()); - } - } - } else { - if (parsedResource.getSystemBuckets().empty()) { - // {system_buckets: ""} - match any system buckets in any db - resource = ResourcePattern::forAnySystemBuckets(); - } else { - // {system_buckets: ""} - match any system.buckets. in any db - resource = ResourcePattern::forAnySystemBucketsInAnyDatabase( - parsedResource.getSystemBuckets()); - } - } - - } else { - if (parsedResource.isDbSet() && !parsedResource.getDb().empty()) { - if (parsedResource.isCollectionSet() && !parsedResource.getCollection().empty()) { - resource = ResourcePattern::forExactNamespace( - NamespaceString::createNamespaceStringForAuth( - boost::none, parsedResource.getDb(), parsedResource.getCollection())); - } else { - resource = ResourcePattern::forDatabaseName(parsedResource.getDb()); - } - } else { - if (parsedResource.isCollectionSet() && !parsedResource.getCollection().empty()) { - resource = ResourcePattern::forCollectionName(parsedResource.getCollection()); - } else { - resource = ResourcePattern::forAnyNormalResource(); - } - } - } - - *result = Privilege(resource, actions); - return Status::OK(); -} - -bool ParsedPrivilege::privilegeToParsedPrivilege(const Privilege& privilege, - ParsedPrivilege* result, - std::string* errmsg) { - ParsedResource parsedResource; - if (privilege.getResourcePattern().isExactNamespacePattern()) { - parsedResource.setDb(privilege.getResourcePattern().databaseToMatch()); - parsedResource.setCollection(privilege.getResourcePattern().collectionToMatch()); - } else if (privilege.getResourcePattern().isDatabasePattern()) { - parsedResource.setDb(privilege.getResourcePattern().databaseToMatch()); - parsedResource.setCollection(""); - } else if (privilege.getResourcePattern().isCollectionPattern()) { - parsedResource.setDb(""); - parsedResource.setCollection(privilege.getResourcePattern().collectionToMatch()); - } else if (privilege.getResourcePattern().isAnyNormalResourcePattern()) { - parsedResource.setDb(""); - parsedResource.setCollection(""); - } else if (privilege.getResourcePattern().isClusterResourcePattern()) { - parsedResource.setCluster(true); - } else if (privilege.getResourcePattern().isAnySystemBucketsCollection()) { - parsedResource.setSystemBuckets(""); - } else if (privilege.getResourcePattern().isAnySystemBucketsCollectionInDB()) { - parsedResource.setSystemBuckets(""); - parsedResource.setDb(privilege.getResourcePattern().databaseToMatch()); - } else if (privilege.getResourcePattern().isAnySystemBucketsCollectionInAnyDB()) { - parsedResource.setSystemBuckets(privilege.getResourcePattern().collectionToMatch()); - } else if (privilege.getResourcePattern().isExactSystemBucketsCollection()) { - parsedResource.setDb(privilege.getResourcePattern().databaseToMatch()); - parsedResource.setSystemBuckets(privilege.getResourcePattern().collectionToMatch()); - } else if (privilege.getResourcePattern().isAnyResourcePattern()) { - parsedResource.setAnyResource(true); - } else { - *errmsg = stream() << privilege.getResourcePattern().toString() - << " is not a valid user-grantable resource pattern"; - return false; - } - - result->clear(); - result->setResource(parsedResource); - result->setActions(privilege.getActions().getActionsAsStrings()); - return result->isValid(errmsg); -} -} // namespace mongo diff --git a/src/mongo/db/auth/privilege_parser.h b/src/mongo/db/auth/privilege_parser.h deleted file mode 100644 index de315a65a74ca..0000000000000 --- a/src/mongo/db/auth/privilege_parser.h +++ /dev/null @@ -1,211 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include -#include - -#include "mongo/base/string_data.h" -#include "mongo/db/jsobj.h" - -namespace mongo { - -class Privilege; - -/** - * This class is used to parse documents describing resources as they are represented as part - * of privileges granted to roles in the role management commands. - */ -class ParsedResource { - ParsedResource(const ParsedResource&) = delete; - ParsedResource& operator=(const ParsedResource&) = delete; - -public: - // - // schema declarations - // - - static const BSONField anyResource; - static const BSONField cluster; - static const BSONField systemBuckets; - static const BSONField db; - static const BSONField collection; - - // - // construction / destruction - // - - ParsedResource(); - ~ParsedResource(); - - /** Copies all the fields present in 'this' to 'other'. */ - void cloneTo(ParsedResource* other) const; - - // - // bson serializable interface implementation - // - - bool isValid(std::string* errMsg) const; - BSONObj toBSON() const; - bool parseBSON(const BSONObj& source, std::string* errMsg); - void clear(); - std::string toString() const; - - // - // individual field accessors - // - - void setAnyResource(bool anyResource); - void unsetAnyResource(); - bool isAnyResourceSet() const; - bool getAnyResource() const; - - void setCluster(bool cluster); - void unsetCluster(); - bool isClusterSet() const; - bool getCluster() const; - - void setDb(StringData db); - void unsetDb(); - bool isDbSet() const; - const std::string& getDb() const; - - void setCollection(StringData collection); - void unsetCollection(); - bool isCollectionSet() const; - const std::string& getCollection() const; - - void setSystemBuckets(StringData collection); - void unsetSystemBuckets(); - bool isSystemBucketsSet() const; - const std::string& getSystemBuckets() const; - -private: - // Convention: (M)andatory, (O)ptional - - // (O) Only present if the resource matches anything. - bool _anyResource; - bool _isAnyResourceSet; - - // (O) Only present if the resource is the cluster - bool _cluster; - bool _isClusterSet; - - // (O) Only present if the resource is the system.buckets. or system.buckets.* - // resource - std::string _systemBuckets; - bool _isSystemBucketsSet; - - // (O) database portion of the resource - std::string _db; - bool _isDbSet; - - // (O) collection portion of the resource - std::string _collection; - bool _isCollectionSet; -}; - -/** - * This class is used to parse documents describing privileges in the role managment commands. - */ -class ParsedPrivilege { - ParsedPrivilege(const ParsedPrivilege&) = delete; - ParsedPrivilege& operator=(const ParsedPrivilege&) = delete; - -public: - // - // schema declarations - // - - static const BSONField> actions; - static const BSONField resource; - - // - // construction / destruction - // - - ParsedPrivilege(); - ~ParsedPrivilege(); - - /** - * Takes a parsedPrivilege and turns it into a true Privilege object. - * If the parsedPrivilege contains any unrecognized privileges it will add those to - * unrecognizedActions. - */ - static Status parsedPrivilegeToPrivilege(const ParsedPrivilege& parsedPrivilege, - Privilege* result, - std::vector* unrecognizedActions); - /** - * Takes a Privilege object and turns it into a ParsedPrivilege. - */ - static bool privilegeToParsedPrivilege(const Privilege& privilege, - ParsedPrivilege* result, - std::string* errmsg); - - // - // bson serializable interface implementation - // - - bool isValid(std::string* errMsg) const; - BSONObj toBSON() const; - bool parseBSON(const BSONObj& source, std::string* errMsg); - void clear(); - std::string toString() const; - - // - // individual field accessors - // - - void setActions(const std::vector& actions); - void addToActions(const std::string& actions); - void unsetActions(); - bool isActionsSet() const; - size_t sizeActions() const; - const std::vector& getActions() const; - const std::string& getActionsAt(size_t pos) const; - - void setResource(const ParsedResource& resource); - void unsetResource(); - bool isResourceSet() const; - const ParsedResource& getResource() const; - -private: - // Convention: (M)andatory, (O)ptional - - // (M) Array of action types - std::vector _actions; - bool _isActionsSet; - - // (M) Object describing the resource pattern of this privilege - ParsedResource _resource; - bool _isResourceSet; -}; - -} // namespace mongo diff --git a/src/mongo/db/auth/privilege_parser_test.cpp b/src/mongo/db/auth/privilege_parser_test.cpp index 36dc18d3ada72..c676905f014a6 100644 --- a/src/mongo/db/auth/privilege_parser_test.cpp +++ b/src/mongo/db/auth/privilege_parser_test.cpp @@ -31,442 +31,288 @@ * Unit tests of the ParsedPrivilege class. */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/parsed_privilege_gen.h" #include "mongo/db/auth/privilege.h" -#include "mongo/db/auth/privilege_parser.h" -#include "mongo/db/server_options.h" -#include "mongo/unittest/unittest.h" - -namespace mongo { +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/database_name.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault + +namespace mongo::auth { namespace { +constexpr auto kActions = "actions"_sd; +constexpr auto kResource = "resource"_sd; +const BSONObj kClusterResource = BSON("cluster"_sd << true); +const BSONArray kFindActions = BSON_ARRAY("find"_sd); -TEST(PrivilegeParserTest, IsValidTest) { - ParsedPrivilege parsedPrivilege; - std::string errmsg; +TEST(PrivilegeParserTest, IsNotValidTest) { + const ErrorCodes::Error kParseFailure{40414}; + IDLParserContext ctx("IsNotValidTest"); // must have resource - parsedPrivilege.parseBSON(BSON("actions" << BSON_ARRAY("find")), &errmsg); - ASSERT_FALSE(parsedPrivilege.isValid(&errmsg)); + const BSONObj noRsrc = BSON(kActions << kFindActions); + constexpr auto noRsrcExpect = + "BSON field 'IsNotValidTest.resource' is missing but a required field"_sd; + ASSERT_THROWS_CODE_AND_WHAT( + ParsedPrivilege::parse(ctx, noRsrc), DBException, kParseFailure, noRsrcExpect); // must have actions - parsedPrivilege.parseBSON(BSON("resource" << BSON("cluster" << true)), &errmsg); - ASSERT_FALSE(parsedPrivilege.isValid(&errmsg)); - - // resource can't have cluster as well as db or collection - parsedPrivilege.parseBSON(BSON("resource" << BSON("cluster" << true << "db" - << "" - << "collection" - << "") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT_FALSE(parsedPrivilege.isValid(&errmsg)); - - // resource can't have db without collection - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT_FALSE(parsedPrivilege.isValid(&errmsg)); + const BSONObj noActions = BSON(kResource << kClusterResource); + constexpr auto noActionsExpect = + "BSON field 'IsNotValidTest.actions' is missing but a required field"_sd; + ASSERT_THROWS_CODE_AND_WHAT( + ParsedPrivilege::parse(ctx, noActions), DBException, kParseFailure, noActionsExpect); +} - // resource can't have collection without db - parsedPrivilege.parseBSON(BSON("resource" << BSON("collection" - << "") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT_FALSE(parsedPrivilege.isValid(&errmsg)); +Privilege resolvePrivilege(BSONObj obj, std::vector* unrecognized = nullptr) { + IDLParserContext ctx("resolvePrivilege"); + auto pp = ParsedPrivilege::parse(ctx, obj); + return Privilege::resolvePrivilegeWithTenant(boost::none /* tenantId */, pp, unrecognized); +} - // Works with wildcard db and resource - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "" - << "collection" - << "") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); +const std::set kBoolResourceTypes = { + "cluster"_sd, + "anyResource"_sd, +}; + +const std::set kAllResourceTypes = { + "cluster"_sd, + "anyResource"_sd, + "db"_sd, + "collection"_sd, + "system_buckets"_sd, +}; + +BSONObj makeResource(const boost::optional& db, + const boost::optional& collection, + const boost::optional& system_buckets) { + BSONObjBuilder builder; + if (db) { + builder.append("db"_sd, db.get()); + } + if (collection) { + builder.append("collection"_sd, collection.get()); + } + if (system_buckets) { + builder.append("system_buckets"_sd, system_buckets.get()); + } + return builder.obj(); +} - // Works with real db and collection - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "test" - << "collection" - << "foo") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); +TEST(PrivilegeParserTest, CombiningTypesNegative) { + // resource can't have cluster or anyResource with other keys + for (StringData primary : {"cluster"_sd, "anyResource"_sd}) { + for (StringData secondary : kAllResourceTypes) { + if (primary == secondary) { + continue; + } + + BSONObjBuilder builder; + { + BSONObjBuilder rsrcBuilder(builder.subobjStart("resource"_sd)); + rsrcBuilder.append(primary, true); + if (kBoolResourceTypes.count(secondary) > 0) { + rsrcBuilder.append(secondary, true); + } else { + rsrcBuilder.append(secondary, "foo"_sd); + } + rsrcBuilder.doneFast(); + } + builder.append("actions"_sd, kFindActions); + + if (secondary == "cluster"_sd) { + // Error messages treat cluster as always primary. + std::swap(primary, secondary); + } + const std::string expect = str::stream() + << "resource: {" << primary << ": true} conflicts with resource type '" << secondary + << "'"; + + ASSERT_THROWS_CODE_AND_WHAT( + resolvePrivilege(builder.obj()), DBException, ErrorCodes::BadValue, expect); + } + } + + // collection and system_buckets may not co-exist + ASSERT_THROWS_CODE_AND_WHAT( + resolvePrivilege(BSON("resource"_sd << makeResource("db"_sd, "coll"_sd, "bucket"_sd) + << "actions"_sd << kFindActions)), + DBException, + ErrorCodes::BadValue, + "resource: {collection: '...'} conflicts with resource type 'system_buckets'"); + + // db requires collection (or system_buckets) + ASSERT_THROWS_CODE_AND_WHAT( + resolvePrivilege(BSON("resource"_sd << makeResource("db"_sd, boost::none, boost::none) + << "actions"_sd << kFindActions)), + DBException, + ErrorCodes::BadValue, + "resource pattern must contain 'collection' or 'systemBuckets' specifier"); - // Works with cluster resource - parsedPrivilege.parseBSON( - BSON("resource" << BSON("cluster" << true) << "actions" << BSON_ARRAY("find")), &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - - - // Works with no db and system_buckets any - parsedPrivilege.parseBSON(BSON("resource" << BSON("system_buckets" - << "") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - - // Works with empty db and system_buckets any - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "" - << "system_buckets" - << "") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - - // Works with real db and system_buckets foo - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "test" - << "system_buckets" - << "foo") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - - // Works with real db and system_buckets any - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "test" - << "system_buckets" - << "") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - - // Works with only system_buckets and no db - parsedPrivilege.parseBSON(BSON("resource" << BSON("system_buckets" - << "foo") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - - // Fails with real db and system_buckets foo and any - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "test" - << "system_buckets" - << "foo" - << "anyResource" << true) - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT_FALSE(parsedPrivilege.isValid(&errmsg)); - - // Fails with real db and system_buckets foo and any - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "test" - << "system_buckets" - << "foo" - << "cluster" << true) - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT_FALSE(parsedPrivilege.isValid(&errmsg)); - - - // Fails with real collection and system_buckets foo - parsedPrivilege.parseBSON(BSON("resource" << BSON("collection" - << "test" - << "system_buckets" - << "foo") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT_FALSE(parsedPrivilege.isValid(&errmsg)); + // resource can't have collection without db + ASSERT_THROWS_CODE_AND_WHAT( + resolvePrivilege(BSON("resource"_sd << makeResource(boost::none, "coll"_sd, boost::none) + << "actions"_sd << kFindActions)), + DBException, + ErrorCodes::BadValue, + "resource {collection: '...'} must include 'db' field as well"); } -TEST(PrivilegeParserTest, ConvertBetweenPrivilegeTest) { - ParsedPrivilege parsedPrivilege; - Privilege privilege; - std::string errmsg; - std::vector actionsVector; - std::vector unrecognizedActions; - actionsVector.push_back("find"); +TEST(PrivilegeParserTest, IsValidTest) { + // Works with cluster resource + auto clusterPriv = + resolvePrivilege(BSON("resource"_sd << kClusterResource << "actions"_sd << kFindActions)); + ASSERT_TRUE(clusterPriv.getResourcePattern().isClusterResourcePattern()); + + // Works with anyResource resource + auto anyResourcePriv = resolvePrivilege( + BSON("resource"_sd << BSON("anyResource"_sd << true) << "actions"_sd << kFindActions)); + ASSERT_TRUE(anyResourcePriv.getResourcePattern().isAnyResourcePattern()); // Works with wildcard db and resource - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "" - << "collection" - << "") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege( - parsedPrivilege, &privilege, &unrecognizedActions)); - ASSERT(unrecognizedActions.empty()); - ASSERT(privilege.getActions().contains(ActionType::find)); - ASSERT(!privilege.getActions().contains(ActionType::insert)); - ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forAnyNormalResource()); - - ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg)); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT(parsedPrivilege.isResourceSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet()); - ASSERT(parsedPrivilege.getResource().isDbSet()); - ASSERT(parsedPrivilege.getResource().isCollectionSet()); - ASSERT_EQUALS("", parsedPrivilege.getResource().getDb()); - ASSERT_EQUALS("", parsedPrivilege.getResource().getCollection()); - ASSERT_FALSE(parsedPrivilege.getResource().isSystemBucketsSet()); - ASSERT(parsedPrivilege.isActionsSet()); - ASSERT(actionsVector == parsedPrivilege.getActions()); - - // Works with exact namespaces - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "test" - << "collection" - << "foo") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege( - parsedPrivilege, &privilege, &unrecognizedActions)); - ASSERT(unrecognizedActions.empty()); - ASSERT(privilege.getActions().contains(ActionType::find)); - ASSERT(!privilege.getActions().contains(ActionType::insert)); - ASSERT_EQUALS(privilege.getResourcePattern(), - ResourcePattern::forExactNamespace( - NamespaceString::createNamespaceString_forTest("test.foo"))); - - ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg)); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT(parsedPrivilege.isResourceSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet()); - ASSERT(parsedPrivilege.getResource().isDbSet()); - ASSERT(parsedPrivilege.getResource().isCollectionSet()); - ASSERT_EQUALS("test", parsedPrivilege.getResource().getDb()); - ASSERT_EQUALS("foo", parsedPrivilege.getResource().getCollection()); - ASSERT_FALSE(parsedPrivilege.getResource().isSystemBucketsSet()); - ASSERT(parsedPrivilege.isActionsSet()); - ASSERT(actionsVector == parsedPrivilege.getActions()); - - // Works with database resource - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "test" - << "collection" - << "") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege( - parsedPrivilege, &privilege, &unrecognizedActions)); - ASSERT(unrecognizedActions.empty()); - ASSERT(privilege.getActions().contains(ActionType::find)); - ASSERT(!privilege.getActions().contains(ActionType::insert)); - ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forDatabaseName("test")); - - ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg)); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT(parsedPrivilege.isResourceSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet()); - ASSERT(parsedPrivilege.getResource().isDbSet()); - ASSERT(parsedPrivilege.getResource().isCollectionSet()); - ASSERT_EQUALS("test", parsedPrivilege.getResource().getDb()); - ASSERT_EQUALS("", parsedPrivilege.getResource().getCollection()); - ASSERT_FALSE(parsedPrivilege.getResource().isSystemBucketsSet()); - ASSERT(parsedPrivilege.isActionsSet()); - ASSERT(actionsVector == parsedPrivilege.getActions()); - - // Works with collection resource - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "" - << "collection" - << "foo") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege( - parsedPrivilege, &privilege, &unrecognizedActions)); - ASSERT(unrecognizedActions.empty()); - ASSERT(privilege.getActions().contains(ActionType::find)); - ASSERT(!privilege.getActions().contains(ActionType::insert)); - ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forCollectionName("foo")); - - ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg)); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT(parsedPrivilege.isResourceSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet()); - ASSERT(parsedPrivilege.getResource().isDbSet()); - ASSERT(parsedPrivilege.getResource().isCollectionSet()); - ASSERT_EQUALS("", parsedPrivilege.getResource().getDb()); - ASSERT_EQUALS("foo", parsedPrivilege.getResource().getCollection()); - ASSERT_FALSE(parsedPrivilege.getResource().isSystemBucketsSet()); - ASSERT(parsedPrivilege.isActionsSet()); - ASSERT(actionsVector == parsedPrivilege.getActions()); + auto anyNormalPriv = resolvePrivilege(BSON( + "resource"_sd << makeResource(""_sd, ""_sd, boost::none) << "actions"_sd << kFindActions)); + ASSERT_TRUE(anyNormalPriv.getResourcePattern().isAnyNormalResourcePattern()); - // Works with cluster resource - parsedPrivilege.parseBSON( - BSON("resource" << BSON("cluster" << true) << "actions" << BSON_ARRAY("find")), &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege( - parsedPrivilege, &privilege, &unrecognizedActions)); - ASSERT(unrecognizedActions.empty()); - ASSERT(privilege.getActions().contains(ActionType::find)); - ASSERT(!privilege.getActions().contains(ActionType::insert)); - ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forClusterResource()); - - ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg)); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT(parsedPrivilege.isResourceSet()); - ASSERT(parsedPrivilege.getResource().isClusterSet()); - ASSERT(parsedPrivilege.getResource().getCluster()); - ASSERT_FALSE(parsedPrivilege.getResource().isDbSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isCollectionSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isSystemBucketsSet()); - ASSERT(parsedPrivilege.isActionsSet()); - ASSERT(actionsVector == parsedPrivilege.getActions()); - - // Works with any system.buckets resource - parsedPrivilege.parseBSON(BSON("resource" << BSON("system_buckets" - << "") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege( - parsedPrivilege, &privilege, &unrecognizedActions)); - ASSERT(unrecognizedActions.empty()); - ASSERT(privilege.getActions().contains(ActionType::find)); - ASSERT(!privilege.getActions().contains(ActionType::insert)); - ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forAnySystemBuckets()); - - ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg)); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT(parsedPrivilege.isResourceSet()); - ASSERT_TRUE(parsedPrivilege.getResource().isSystemBucketsSet()); - ASSERT_EQUALS(parsedPrivilege.getResource().getSystemBuckets(), ""); - ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isDbSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isCollectionSet()); - ASSERT(parsedPrivilege.isActionsSet()); - ASSERT(actionsVector == parsedPrivilege.getActions()); - - // Works with any system.buckets resource with empty db - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "" - << "system_buckets" - << "") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege( - parsedPrivilege, &privilege, &unrecognizedActions)); - ASSERT(unrecognizedActions.empty()); - ASSERT(privilege.getActions().contains(ActionType::find)); - ASSERT(!privilege.getActions().contains(ActionType::insert)); - ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forAnySystemBuckets()); - - ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg)); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT(parsedPrivilege.isResourceSet()); - ASSERT_TRUE(parsedPrivilege.getResource().isSystemBucketsSet()); - ASSERT_EQUALS(parsedPrivilege.getResource().getSystemBuckets(), ""); - ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isDbSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isCollectionSet()); - ASSERT(parsedPrivilege.isActionsSet()); - ASSERT(actionsVector == parsedPrivilege.getActions()); - - // Works with system.buckets.foo resource in test db - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "test" - << "system_buckets" - << "foo") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege( - parsedPrivilege, &privilege, &unrecognizedActions)); - ASSERT(unrecognizedActions.empty()); - ASSERT(privilege.getActions().contains(ActionType::find)); - ASSERT(!privilege.getActions().contains(ActionType::insert)); - ASSERT_EQUALS(privilege.getResourcePattern(), - ResourcePattern::forExactSystemBucketsCollection("test", "foo")); - - ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg)); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT(parsedPrivilege.isResourceSet()); - ASSERT_TRUE(parsedPrivilege.getResource().isSystemBucketsSet()); - ASSERT_EQUALS(parsedPrivilege.getResource().getSystemBuckets(), "foo"); - ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet()); - ASSERT_TRUE(parsedPrivilege.getResource().isDbSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isCollectionSet()); - ASSERT_EQUALS("test", parsedPrivilege.getResource().getDb()); - ASSERT(parsedPrivilege.isActionsSet()); - ASSERT(actionsVector == parsedPrivilege.getActions()); - - - // Works with any system.buckets resource named foo - parsedPrivilege.parseBSON(BSON("resource" << BSON("system_buckets" - << "foo") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege( - parsedPrivilege, &privilege, &unrecognizedActions)); - ASSERT(unrecognizedActions.empty()); - ASSERT(privilege.getActions().contains(ActionType::find)); - ASSERT(!privilege.getActions().contains(ActionType::insert)); - ASSERT_EQUALS(privilege.getResourcePattern(), - ResourcePattern::forAnySystemBucketsInAnyDatabase("foo")); - - ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg)); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT(parsedPrivilege.isResourceSet()); - ASSERT_TRUE(parsedPrivilege.getResource().isSystemBucketsSet()); - ASSERT_EQUALS(parsedPrivilege.getResource().getSystemBuckets(), "foo"); - ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isDbSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isCollectionSet()); - ASSERT(parsedPrivilege.isActionsSet()); - ASSERT(actionsVector == parsedPrivilege.getActions()); - - - // Works with any system.buckets resource in db test - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "test" - << "system_buckets" - << "") - << "actions" << BSON_ARRAY("find")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege( - parsedPrivilege, &privilege, &unrecognizedActions)); - ASSERT(unrecognizedActions.empty()); - ASSERT(privilege.getActions().contains(ActionType::find)); - ASSERT(!privilege.getActions().contains(ActionType::insert)); - ASSERT_EQUALS(privilege.getResourcePattern(), - ResourcePattern::forAnySystemBucketsInDatabase("test")); - ASSERT(ParsedPrivilege::privilegeToParsedPrivilege(privilege, &parsedPrivilege, &errmsg)); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT(parsedPrivilege.isResourceSet()); - ASSERT_TRUE(parsedPrivilege.getResource().isSystemBucketsSet()); - ASSERT_EQUALS(parsedPrivilege.getResource().getSystemBuckets(), ""); - ASSERT_FALSE(parsedPrivilege.getResource().isClusterSet()); - ASSERT_TRUE(parsedPrivilege.getResource().isDbSet()); - ASSERT_FALSE(parsedPrivilege.getResource().isCollectionSet()); - ASSERT_EQUALS("test", parsedPrivilege.getResource().getDb()); - ASSERT(parsedPrivilege.isActionsSet()); - ASSERT(actionsVector == parsedPrivilege.getActions()); + // Works with real db and collection + auto exactNSSPriv = + resolvePrivilege(BSON("resource"_sd << makeResource("db1"_sd, "coll"_sd, boost::none) + << "actions"_sd << kFindActions)); + ASSERT_TRUE(exactNSSPriv.getResourcePattern().isExactNamespacePattern()); + ASSERT_EQ(exactNSSPriv.getResourcePattern().dbNameToMatch().toString_forTest(), "db1"_sd); + ASSERT_EQ(exactNSSPriv.getResourcePattern().collectionToMatch(), "coll"_sd); + + // Works with any bucket in any db (implicit) + auto anyBucketImplicit = + resolvePrivilege(BSON("resource"_sd << makeResource(boost::none, boost::none, ""_sd) + << "actions"_sd << kFindActions)); + ASSERT_TRUE(anyBucketImplicit.getResourcePattern().isAnySystemBucketsCollection()); + ASSERT_EQ(anyBucketImplicit.getResourcePattern().dbNameToMatch().toString_forTest(), ""_sd); + ASSERT_EQ(anyBucketImplicit.getResourcePattern().collectionToMatch(), ""_sd); + + // Works with any bucket in any db (explicit) + auto anyBucketExplicit = resolvePrivilege(BSON( + "resource"_sd << makeResource(""_sd, boost::none, ""_sd) << "actions"_sd << kFindActions)); + ASSERT_TRUE(anyBucketExplicit.getResourcePattern().isAnySystemBucketsCollection()); + ASSERT_EQ(anyBucketExplicit.getResourcePattern().dbNameToMatch().toString_forTest(), ""_sd); + ASSERT_EQ(anyBucketExplicit.getResourcePattern().collectionToMatch(), ""_sd); + + // Works with system_buckets in any db (implicit) + auto bucketAnyDBImplicit = + resolvePrivilege(BSON("resource"_sd << makeResource(boost::none, boost::none, "bucket"_sd) + << "actions"_sd << kFindActions)); + ASSERT_TRUE(bucketAnyDBImplicit.getResourcePattern().isAnySystemBucketsCollectionInAnyDB()); + ASSERT_EQ(bucketAnyDBImplicit.getResourcePattern().dbNameToMatch().toString_forTest(), ""_sd); + ASSERT_EQ(bucketAnyDBImplicit.getResourcePattern().collectionToMatch(), "bucket"_sd); + + // Works with system_buckets in any db (explicit) + auto bucketAnyDBExplicit = + resolvePrivilege(BSON("resource"_sd << makeResource(""_sd, boost::none, "bucket"_sd) + << "actions"_sd << kFindActions)); + ASSERT_TRUE(bucketAnyDBExplicit.getResourcePattern().isAnySystemBucketsCollectionInAnyDB()); + ASSERT_EQ(bucketAnyDBExplicit.getResourcePattern().dbNameToMatch().toString_forTest(), ""_sd); + ASSERT_EQ(bucketAnyDBExplicit.getResourcePattern().collectionToMatch(), "bucket"_sd); + + // Works with any system_bucket in specific db + auto bucketInDB = + resolvePrivilege(BSON("resource"_sd << makeResource("db1"_sd, boost::none, ""_sd) + << "actions"_sd << kFindActions)); + ASSERT_TRUE(bucketInDB.getResourcePattern().isAnySystemBucketsCollectionInDB()); + ASSERT_EQ(bucketInDB.getResourcePattern().dbNameToMatch().toString_forTest(), "db1"_sd); + ASSERT_EQ(bucketInDB.getResourcePattern().collectionToMatch(), ""_sd); + + // Works with exact system buckets namespace. + auto exactBucket = + resolvePrivilege(BSON("resource"_sd << makeResource("db1"_sd, boost::none, "bucket"_sd) + << "actions"_sd << kFindActions)); + ASSERT_TRUE(exactBucket.getResourcePattern().isExactSystemBucketsCollection()); + ASSERT_EQ(exactBucket.getResourcePattern().dbNameToMatch().toString_forTest(), "db1"_sd); + ASSERT_EQ(exactBucket.getResourcePattern().collectionToMatch(), "bucket"_sd); +} + +TEST(PrivilegeParserTest, RoundTrip) { + const std::vector resourcePatterns = { + BSON("cluster"_sd << true), + BSON("anyResource"_sd << true), + BSON("db"_sd + << "" + << "collection"_sd + << ""), + BSON("db"_sd + << "" + << "collection"_sd + << "coll1"), + BSON("db"_sd + << "db1" + << "collection"_sd + << ""), + BSON("db"_sd + << "db1" + << "collection"_sd + << "coll1"), + BSON("system_buckets"_sd + << "bucket"_sd), + BSON("db"_sd + << "db1" + << "system_buckets"_sd + << "bucket"_sd), + }; + const std::vector actionTypes = { + BSON_ARRAY("find"_sd), + BSON_ARRAY("anyAction"_sd), + BSON_ARRAY("find"_sd + << "insert"_sd + << "remove"_sd + << "update"_sd), + }; + + for (const auto& pattern : resourcePatterns) { + for (const auto& actions : actionTypes) { + auto obj = BSON("resource"_sd << pattern << "actions"_sd << actions); + auto priv = resolvePrivilege(obj); + auto serialized = priv.toBSON(); + ASSERT_BSONOBJ_EQ(obj, serialized); + } + } } TEST(PrivilegeParserTest, ParseInvalidActionsTest) { - ParsedPrivilege parsedPrivilege; - Privilege privilege; - std::string errmsg; - std::vector actionsVector; - std::vector unrecognizedActions; - actionsVector.push_back("find"); - - parsedPrivilege.parseBSON(BSON("resource" << BSON("db" - << "" - << "collection" - << "") - << "actions" - << BSON_ARRAY("find" - << "fakeAction")), - &errmsg); - ASSERT(parsedPrivilege.isValid(&errmsg)); - ASSERT_OK(ParsedPrivilege::parsedPrivilegeToPrivilege( - parsedPrivilege, &privilege, &unrecognizedActions)); - ASSERT(privilege.getActions().contains(ActionType::find)); - ASSERT(!privilege.getActions().contains(ActionType::insert)); - ASSERT_EQUALS(privilege.getResourcePattern(), ResourcePattern::forAnyNormalResource()); - ASSERT_EQUALS(1U, unrecognizedActions.size()); - ASSERT_EQUALS("fakeAction", unrecognizedActions[0]); + auto obj = BSON("resource"_sd << kClusterResource << "actions"_sd + << BSON_ARRAY("find"_sd + << "fakeAction"_sd)); + std::vector unrecognized; + auto priv = resolvePrivilege(obj, &unrecognized); + + ASSERT_TRUE(priv.getResourcePattern().isClusterResourcePattern()); + ASSERT_TRUE(priv.getActions().contains(ActionType::find)); + ASSERT_FALSE(priv.getActions().contains(ActionType::insert)); + ASSERT_EQUALS(1U, unrecognized.size()); + ASSERT_EQUALS("fakeAction", unrecognized[0]); } + } // namespace -} // namespace mongo +} // namespace mongo::auth diff --git a/src/mongo/db/auth/resource_pattern.cpp b/src/mongo/db/auth/resource_pattern.cpp index 75ede19ebb06a..27f300dac1a72 100644 --- a/src/mongo/db/auth/resource_pattern.cpp +++ b/src/mongo/db/auth/resource_pattern.cpp @@ -27,11 +27,8 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include - #include "mongo/db/auth/resource_pattern.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { @@ -47,7 +44,7 @@ std::string ResourcePattern::toString() const { case MatchTypeEnum::kMatchCollectionName: return ""; case MatchTypeEnum::kMatchExactNamespace: - return "<" + _ns.ns() + ">"; + return "<" + NamespaceStringUtil::serializeForAuth(_ns) + ">"; case MatchTypeEnum::kMatchAnyNormalResource: return ""; case MatchTypeEnum::kMatchAnyResource: diff --git a/src/mongo/db/auth/resource_pattern.h b/src/mongo/db/auth/resource_pattern.h index 036c12df65562..5e564c28a49ee 100644 --- a/src/mongo/db/auth/resource_pattern.h +++ b/src/mongo/db/auth/resource_pattern.h @@ -29,12 +29,21 @@ #pragma once +#include +#include +#include +#include +#include #include #include +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" #include "mongo/db/auth/action_type_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -52,46 +61,50 @@ class ResourcePattern { friend class AuthorizationContract; public: + // TODO (SERVER-76195) Remove legacy non-tenant aware APIs from ResourcePattern + // databaseToMatch() - Remove in favor of dbNameToMatch. + /** * Returns a pattern that matches absolutely any resource. */ - static ResourcePattern forAnyResource() { - return ResourcePattern(MatchTypeEnum::kMatchAnyResource); + static ResourcePattern forAnyResource(const boost::optional& tenantId) { + return ResourcePattern(MatchTypeEnum::kMatchAnyResource, tenantId); } /** * Returns a pattern that matches any database or collection resource except collections for * which ns.isSystem(). */ - static ResourcePattern forAnyNormalResource() { - return ResourcePattern(MatchTypeEnum::kMatchAnyNormalResource); + static ResourcePattern forAnyNormalResource(const boost::optional& tenantId) { + return ResourcePattern(MatchTypeEnum::kMatchAnyNormalResource, tenantId); } /** * Returns a pattern that matches the "cluster" resource. */ - static ResourcePattern forClusterResource() { - return ResourcePattern(MatchTypeEnum::kMatchClusterResource); + static ResourcePattern forClusterResource(const boost::optional& tenantId) { + return ResourcePattern(MatchTypeEnum::kMatchClusterResource, tenantId); } /** * Returns a pattern that matches the named database, and NamespaceStrings * "ns" for which ns.isSystem() is false and ns.db() == dbname. */ - static ResourcePattern forDatabaseName(StringData dbName) { + static ResourcePattern forDatabaseName(const DatabaseName& dbName) { return ResourcePattern( MatchTypeEnum::kMatchDatabaseName, - NamespaceString::createNamespaceStringForAuth(boost::none, dbName, "")); + NamespaceString::createNamespaceStringForAuth(dbName.tenantId(), dbName.db(), ""_sd)); } /** * Returns a pattern that matches NamespaceStrings "ns" for which ns.coll() == * collectionName. */ - static ResourcePattern forCollectionName(StringData collectionName) { + static ResourcePattern forCollectionName(const boost::optional& tenantId, + StringData collectionName) { return ResourcePattern( MatchTypeEnum::kMatchCollectionName, - NamespaceString::createNamespaceStringForAuth(boost::none, "", collectionName)); + NamespaceString::createNamespaceStringForAuth(tenantId, ""_sd, collectionName)); } /** @@ -105,25 +118,25 @@ class ResourcePattern { * Returns a pattern that matches any collection with the prefix "system.buckets." in any * database. */ - static ResourcePattern forAnySystemBuckets() { - return ResourcePattern(MatchTypeEnum::kMatchAnySystemBucketResource); + static ResourcePattern forAnySystemBuckets(const boost::optional& tenantId) { + return ResourcePattern(MatchTypeEnum::kMatchAnySystemBucketResource, tenantId); } /** * Returns a pattern that matches any collection with the prefix "system.buckets." in database * "db". */ - static ResourcePattern forAnySystemBucketsInDatabase(StringData dbName) { - return ResourcePattern( - MatchTypeEnum::kMatchAnySystemBucketInDBResource, - NamespaceString::createNamespaceStringForAuth(boost::none, dbName, "")); + static ResourcePattern forAnySystemBucketsInDatabase(const DatabaseName& dbName) { + return ResourcePattern(MatchTypeEnum::kMatchAnySystemBucketInDBResource, + NamespaceString(dbName)); } /** * Returns a pattern that matches any collection with the prefix "system.buckets." * in any database. */ - static ResourcePattern forAnySystemBucketsInAnyDatabase(StringData collectionName) { + static ResourcePattern forAnySystemBucketsInAnyDatabase( + const boost::optional& tenantId, StringData collectionName) { return ResourcePattern( MatchTypeEnum::kMatchSystemBucketInAnyDBResource, NamespaceString::createNamespaceStringForAuth(boost::none, "", collectionName)); @@ -133,12 +146,11 @@ class ResourcePattern { * Returns a pattern that matches a collection with the name * ".system.buckets." */ - static ResourcePattern forExactSystemBucketsCollection(StringData dbName, - StringData collectionName) { - invariant(!collectionName.startsWith("system.buckets.")); - return ResourcePattern( - MatchTypeEnum::kMatchExactSystemBucketResource, - NamespaceString::createNamespaceStringForAuth(boost::none, dbName, collectionName)); + static ResourcePattern forExactSystemBucketsCollection(const NamespaceString& nss) { + uassert(ErrorCodes::InvalidNamespace, + "Invalid namespace '{}.system.buckets.{}'"_format(nss.db(), nss.coll()), + !nss.coll().startsWith("system.buckets.")); + return ResourcePattern(MatchTypeEnum::kMatchExactSystemBucketResource, nss); } /** @@ -225,12 +237,23 @@ class ResourcePattern { return _ns; } + /** + * Returns the tenantId that this pattern matches. + */ + boost::optional tenantId() const { + return _ns.tenantId(); + } + /** * Returns the database that this pattern matches. * * Behavior is undefined unless the pattern is of type matchDatabaseName or * matchExactNamespace or matchExactSystemBucketResource or matchAnySystemBucketInDBResource */ + DatabaseName dbNameToMatch() const { + return _ns.dbName(); + } + StringData databaseToMatch() const { return _ns.db(); } @@ -248,11 +271,24 @@ class ResourcePattern { std::string toString() const; bool operator==(const ResourcePattern& other) const { - if (_matchType != other._matchType) - return false; - if (_ns != other._ns) - return false; - return true; + return (_matchType == other._matchType) && (_ns == other._ns); + } + + bool operator<(const ResourcePattern& other) const { + if (_matchType < other._matchType) { + return true; + } + return (_matchType == other._matchType) && (_ns < other._ns); + } + + /** + * Perform an equality comparison ignoring the TenantID component of NamespaceString. + * This is necessary during migration of ResourcePattern to be tenant aware. + * TODO (SERVER-76195) Remove legacy non-tenant aware APIs from ResourcePattern + */ + bool matchesIgnoringTenant(const ResourcePattern& other) const { + return (_matchType == other._matchType) && (_ns.db() == other._ns.db()) && + (_ns.coll() == other._ns.coll()); } template @@ -264,7 +300,7 @@ class ResourcePattern { * Returns a pattern for IDL generated code to use. */ static ResourcePattern forAuthorizationContract(MatchTypeEnum e) { - return ResourcePattern(e); + return ResourcePattern(e, boost::none); } // AuthorizationContract works directly with MatchTypeEnum. Users should not be concerned with @@ -274,7 +310,9 @@ class ResourcePattern { } private: - explicit ResourcePattern(MatchTypeEnum type) : _matchType(type) {} + ResourcePattern(MatchTypeEnum type, const boost::optional& tenantId) + : ResourcePattern(type, + NamespaceString::createNamespaceStringForAuth(tenantId, ""_sd, ""_sd)) {} ResourcePattern(MatchTypeEnum type, const NamespaceString& ns) : _matchType(type), _ns(ns) {} MatchTypeEnum _matchType; diff --git a/src/mongo/db/auth/resource_pattern_search_list.h b/src/mongo/db/auth/resource_pattern_search_list.h new file mode 100644 index 0000000000000..2a58c630c5033 --- /dev/null +++ b/src/mongo/db/auth/resource_pattern_search_list.h @@ -0,0 +1,152 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/namespace_string_util.h" + +namespace mongo::auth { +/** + * A ResourcePatternSearchList represents up to kMaxResourcePatternLookups elements + * of ResourcePattern objects representing the breakdown of the target ResourcePattern + * into the subpatterns which may potentially match it. + * + * The seach lists are as follows, depending on the type of "target": + * + * target is ResourcePattern::forAnyResource(): + * searchList = { ResourcePattern::forAnyResource() } + * target is the ResourcePattern::forClusterResource(): + * searchList = { ResourcePattern::forAnyResource(), ResourcePattern::forClusterResource() } + * target is a database, db: + * searchList = { ResourcePattern::forAnyResource(), + * ResourcePattern::forAnyNormalResource(), + * db } + * target is a non-system collection, db.coll: + * searchList = { ResourcePattern::forAnyResource(), + * ResourcePattern::forAnyNormalResource(), + * db, + * coll, + * db.coll } + * target is a system buckets collection, db.system.buckets.coll: + * searchList = { ResourcePattern::forAnyResource(), + * ResourcePattern::forAnySystemBuckets(), + * ResourcePattern::forAnySystemBucketsInDatabase("db"), + * ResourcePattern::forAnySystemBucketsInAnyDatabase("coll"), + * ResourcePattern::forExactSystemBucketsCollection("db", "coll"), + * system.buckets.coll, + * db.system.buckets.coll } + * target is a system collection, db.system.coll: + * searchList = { ResourcePattern::forAnyResource(), + * system.coll, + * db.system.coll } + */ +class ResourcePatternSearchList { +private: + static constexpr StringData kSystemBucketsPrefix = "system.buckets."_sd; + static constexpr std::size_t kMaxResourcePatternLookups = 7; + using ListType = std::array; + +public: + ResourcePatternSearchList() = delete; + explicit ResourcePatternSearchList(const ResourcePattern& target) { + _list[_size++] = ResourcePattern::forAnyResource(target.tenantId()); + if (target.isExactNamespacePattern()) { + const auto& nss = target.ns(); + + // Normal collections can be matched by anyNormalResource, or their database's resource. + if (nss.isNormalCollection()) { + // But even normal collections in non-normal databases should not be matchable with + // ResourcePattern::forAnyNormalResource. 'local' and 'config' are + // used to store special system collections, which user level + // administrators should not be able to manipulate. + if (!nss.isLocalDB() && !nss.isConfigDB()) { + _list[_size++] = ResourcePattern::forAnyNormalResource(target.tenantId()); + } + _list[_size++] = ResourcePattern::forDatabaseName(nss.dbName()); + } else if ((nss.coll().size() > kSystemBucketsPrefix.size()) && + nss.coll().startsWith(kSystemBucketsPrefix)) { + // System bucket patterns behave similar to any/db/coll/exact patterns, + // But with a fixed "system.buckets." prefix to the collection name. + StringData coll = nss.coll().substr(kSystemBucketsPrefix.size()); + _list[_size++] = ResourcePattern::forExactSystemBucketsCollection( + NamespaceStringUtil::parseNamespaceFromRequest(nss.dbName(), coll)); + _list[_size++] = ResourcePattern::forAnySystemBuckets(target.tenantId()); + _list[_size++] = ResourcePattern::forAnySystemBucketsInDatabase(nss.dbName()); + _list[_size++] = + ResourcePattern::forAnySystemBucketsInAnyDatabase(target.tenantId(), coll); + } + + // All collections can be matched by a collection resource for their name + _list[_size++] = ResourcePattern::forCollectionName(target.tenantId(), nss.coll()); + } else if (target.isDatabasePattern()) { + if (!target.ns().isLocalDB() && !target.ns().isConfigDB()) { + _list[_size++] = ResourcePattern::forAnyNormalResource(target.tenantId()); + } + } + + if (!target.isAnyResourcePattern()) { + _list[_size++] = target; + } + dassert(_size <= _list.size()); + } + + using const_iterator = ListType::const_iterator; + using size_type = ListType::size_type; + + const_iterator cbegin() const noexcept { + return _list.cbegin(); + } + + const_iterator cend() const noexcept { + return _list.cbegin() + _size; + } + + size_type size() const noexcept { + return _size; + } + + bool empty() const noexcept { + return _size > 0; + } + +private: + ListType _list; + size_type _size{0}; +}; + +} // namespace mongo::auth diff --git a/src/mongo/db/auth/resource_pattern_search_list_test.cpp b/src/mongo/db/auth/resource_pattern_search_list_test.cpp new file mode 100644 index 0000000000000..c91716f1daa21 --- /dev/null +++ b/src/mongo/db/auth/resource_pattern_search_list_test.cpp @@ -0,0 +1,113 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include + +#include +#include + +#include "mongo/db/auth/resource_pattern_search_list.h" +#include "mongo/db/database_name.h" +#include "mongo/db/tenant_id.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest + +namespace mongo::auth { +namespace { + +struct TestCase { + ResourcePattern target; + std::set lookups; +}; + +const auto kAdminDB = DatabaseName::createDatabaseName_forTest(boost::none, "admin"_sd); +const auto kTestDB = DatabaseName::createDatabaseName_forTest(boost::none, "test"_sd); +const auto kTestFooNSS = NamespaceString::createNamespaceString_forTest(kTestDB, "foo"_sd); +const auto kTestViewsNSS = + NamespaceString::createNamespaceString_forTest(kTestDB, "system.views"_sd); +const auto kTestBarBucketNSS = + NamespaceString::createNamespaceString_forTest(kTestDB, "system.buckets.bar"_sd); +const auto kTestBarNSS = NamespaceString::createNamespaceString_forTest(kTestDB, "bar"_sd); + +const auto kAnyRsrc = ResourcePattern::forAnyResource(boost::none); +const auto kAnyNormalRsrc = ResourcePattern::forAnyNormalResource(boost::none); +const auto kClusterRsrc = ResourcePattern::forClusterResource(boost::none); +const auto kAnySystemBuckets = ResourcePattern::forAnySystemBuckets(boost::none); + +const auto kAdminDBRsrc = ResourcePattern::forDatabaseName(kAdminDB); +const auto kTestDBRsrc = ResourcePattern::forDatabaseName(kTestDB); +const auto kFooCollRsrc = ResourcePattern::forCollectionName(boost::none, "foo"_sd); +const auto kTestFooRsrc = ResourcePattern::forExactNamespace(kTestFooNSS); +const auto kViewsCollRsrc = ResourcePattern::forCollectionName(boost::none, "system.views"_sd); +const auto kTestViewsRsrc = ResourcePattern::forExactNamespace(kTestViewsNSS); +const auto kTestBarBucketRsrc = ResourcePattern::forExactNamespace(kTestBarBucketNSS); +const auto kTestBarSystemBucketsRsrc = + ResourcePattern::forExactSystemBucketsCollection(kTestBarNSS); +const auto kTestDBSystemBucketsRsrc = ResourcePattern::forAnySystemBucketsInDatabase(kTestDB); +const auto kBarCollSystemBucketsRsrc = + ResourcePattern::forAnySystemBucketsInAnyDatabase(boost::none, "bar"_sd); +const auto kBucketsBarCollRsrc = + ResourcePattern::forCollectionName(boost::none, "system.buckets.bar"_sd); + +const TestCase kTestCases[] = { + {kAnyRsrc, {kAnyRsrc}}, + {kClusterRsrc, {kAnyRsrc, kClusterRsrc}}, + {kAdminDBRsrc, {kAnyRsrc, kAnyNormalRsrc, kAdminDBRsrc}}, + {kTestFooRsrc, {kAnyRsrc, kAnyNormalRsrc, kTestDBRsrc, kFooCollRsrc, kTestFooRsrc}}, + {kTestViewsRsrc, {kAnyRsrc, kTestDBRsrc, kViewsCollRsrc, kTestViewsRsrc}}, + {kTestBarBucketRsrc, + {kAnyRsrc, + kAnySystemBuckets, + kTestBarSystemBucketsRsrc, + kAnySystemBuckets, + kTestDBSystemBucketsRsrc, + kBarCollSystemBucketsRsrc, + kBucketsBarCollRsrc, + kTestBarBucketRsrc}}, +}; + + +TEST(ResourcePatternSearchListTest, ExpectedSearchLists) { + for (const auto& testCase : kTestCases) { + LOGV2(7705501, "Building search list", "target"_attr = testCase.target); + const ResourcePatternSearchList searchList(testCase.target); + for (auto it = searchList.cbegin(); it != searchList.cend(); ++it) { + LOGV2(7705502, "Built search pattern", "search"_attr = *it); + ASSERT_TRUE(testCase.lookups.find(*it) != testCase.lookups.end()); + } + } +} + +} // namespace +} // namespace mongo::auth diff --git a/src/mongo/db/auth/restriction.h b/src/mongo/db/auth/restriction.h index cdc4586ea2e2e..2bfa94470ab26 100644 --- a/src/mongo/db/auth/restriction.h +++ b/src/mongo/db/auth/restriction.h @@ -33,7 +33,10 @@ #include #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/auth/restriction_environment.h" namespace mongo { diff --git a/src/mongo/db/auth/restriction_environment.cpp b/src/mongo/db/auth/restriction_environment.cpp index 1b1beed50cc8b..f368e8eb50e3b 100644 --- a/src/mongo/db/auth/restriction_environment.cpp +++ b/src/mongo/db/auth/restriction_environment.cpp @@ -27,10 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/auth/restriction_environment.h" #include "mongo/transport/session.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/db/auth/restriction_environment.h b/src/mongo/db/auth/restriction_environment.h index 7f47be118caac..d05b8a5cc77ab 100644 --- a/src/mongo/db/auth/restriction_environment.h +++ b/src/mongo/db/auth/restriction_environment.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include "mongo/db/client.h" #include "mongo/transport/session.h" diff --git a/src/mongo/db/auth/restriction_test.cpp b/src/mongo/db/auth/restriction_test.cpp index f48e379f638d9..2f786f105816c 100644 --- a/src/mongo/db/auth/restriction_test.cpp +++ b/src/mongo/db/auth/restriction_test.cpp @@ -27,13 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/auth/restriction.h" #include "mongo/db/auth/restriction_environment.h" #include "mongo/db/auth/restriction_mock.h" #include "mongo/db/auth/restriction_set.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/net/sockaddr.h" namespace mongo { diff --git a/src/mongo/db/auth/role_name_or_string.cpp b/src/mongo/db/auth/role_name_or_string.cpp index 72d7b4773caeb..0d99195c436d8 100644 --- a/src/mongo/db/auth/role_name_or_string.cpp +++ b/src/mongo/db/auth/role_name_or_string.cpp @@ -29,6 +29,12 @@ #include "mongo/db/auth/role_name_or_string.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/util/assert_util.h" + namespace mongo { /** diff --git a/src/mongo/db/auth/role_name_or_string.h b/src/mongo/db/auth/role_name_or_string.h index ccc933cce0c46..a8007b90ccf15 100644 --- a/src/mongo/db/auth/role_name_or_string.h +++ b/src/mongo/db/auth/role_name_or_string.h @@ -30,13 +30,14 @@ #pragma once #include +#include +#include #include -#include "mongo/db/auth/role_name.h" - #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/database_name.h" namespace mongo { diff --git a/src/mongo/db/auth/sasl_authentication_session_test.cpp b/src/mongo/db/auth/sasl_authentication_session_test.cpp index f63fa24776417..945de4f15e4d7 100644 --- a/src/mongo/db/auth/sasl_authentication_session_test.cpp +++ b/src/mongo/db/auth/sasl_authentication_session_test.cpp @@ -27,25 +27,42 @@ * it in the license file. */ +#include +#include +#include +#include #include -#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/sasl_client_session.h" #include "mongo/crypto/mechanism_scram.h" +#include "mongo/crypto/sha1_block.h" +#include "mongo/crypto/sha256_block.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_manager_impl.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/authz_manager_external_state.h" #include "mongo/db/auth/authz_manager_external_state_mock.h" -#include "mongo/db/auth/authz_session_external_state_mock.h" #include "mongo/db/auth/sasl_command_constants.h" #include "mongo/db/auth/sasl_mechanism_registry.h" #include "mongo/db/auth/sasl_options.h" #include "mongo/db/auth/sasl_plain_server_conversation.h" #include "mongo/db/auth/sasl_scram_server_conversation.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/operation_context.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/password_digest.h" namespace mongo { diff --git a/src/mongo/db/auth/sasl_commands.cpp b/src/mongo/db/auth/sasl_commands.cpp index ff160e753e674..3b606b43b82a2 100644 --- a/src/mongo/db/auth/sasl_commands.cpp +++ b/src/mongo/db/auth/sasl_commands.cpp @@ -28,33 +28,49 @@ */ -#include "mongo/platform/basic.h" - +#include +#include #include - -#include "mongo/base/init.h" +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/authenticate.h" -#include "mongo/client/sasl_client_authenticate.h" -#include "mongo/db/auth/authentication_metrics.h" #include "mongo/db/auth/authentication_session.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/authz_manager_external_state_mock.h" -#include "mongo/db/auth/authz_session_external_state_mock.h" -#include "mongo/db/auth/sasl_command_constants.h" #include "mongo/db/auth/sasl_commands_gen.h" +#include "mongo/db/auth/sasl_mechanism_registry.h" #include "mongo/db/auth/sasl_options.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/sasl_payload.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/authentication_commands.h" -#include "mongo/db/server_options.h" -#include "mongo/logv2/attribute_storage.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/util/base64.h" -#include "mongo/util/sequence_util.h" -#include "mongo/util/str.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl @@ -207,9 +223,10 @@ void warnIfCompressed(OperationContext* opCtx) { SaslReply doSaslStart(OperationContext* opCtx, AuthenticationSession* session, const SaslStartCommand& request) { - auto mechanism = uassertStatusOK( - SASLServerMechanismRegistry::get(opCtx->getServiceContext()) - .getServerMechanism(request.getMechanism(), request.getDbName().toString())); + auto mechanism = + uassertStatusOK(SASLServerMechanismRegistry::get(opCtx->getServiceContext()) + .getServerMechanism(request.getMechanism(), + DatabaseNameUtil::serialize(request.getDbName()))); uassert(ErrorCodes::BadValue, "Plaintext mechanisms may not be used with speculativeSaslStart", @@ -232,7 +249,7 @@ SaslReply runSaslStart(OperationContext* opCtx, opCtx->markKillOnClientDisconnect(); // Note that while updateDatabase can throw, it should not be able to for saslStart. - session->updateDatabase(request.getDbName().toStringWithTenantId()); + session->updateDatabase(DatabaseNameUtil::serializeForAuth(request.getDbName())); session->setMechanismName(request.getMechanism()); return doSaslStart(opCtx, session, request); diff --git a/src/mongo/db/auth/sasl_commands.h b/src/mongo/db/auth/sasl_commands.h index 054756745e672..57dcb585794e5 100644 --- a/src/mongo/db/auth/sasl_commands.h +++ b/src/mongo/db/auth/sasl_commands.h @@ -36,7 +36,7 @@ namespace mongo { class OperationContext; /** - * Handle isMaster: { speculativeAuthenticate: {...} } + * Handle hello: { speculativeAuthenticate: {...} } */ void doSpeculativeSaslStart(OperationContext* opCtx, const BSONObj& sourceObj, diff --git a/src/mongo/db/auth/sasl_mechanism_registry.cpp b/src/mongo/db/auth/sasl_mechanism_registry.cpp index fd2f45363be9a..f28b0860009eb 100644 --- a/src/mongo/db/auth/sasl_mechanism_registry.cpp +++ b/src/mongo/db/auth/sasl_mechanism_registry.cpp @@ -27,22 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/auth/sasl_mechanism_registry.h" +#include +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/client/authenticate.h" +#include "mongo/db/auth/auth_name.h" +#include "mongo/db/auth/sasl_mechanism_registry.h" #include "mongo/db/auth/sasl_options.h" #include "mongo/db/auth/user.h" -#include "mongo/db/connection_health_metrics_parameter_gen.h" +#include "mongo/db/commands/test_commands_enabled.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/exit_code.h" -#include "mongo/util/icu.h" -#include "mongo/util/net/socket_utils.h" #include "mongo/util/quick_exit.h" -#include "mongo/util/scopeguard.h" #include "mongo/util/sequence_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl @@ -102,20 +110,7 @@ void SASLServerMechanismRegistry::advertiseMechanismNamesForUser(OperationContex } AuthorizationManager* authManager = AuthorizationManager::get(opCtx->getServiceContext()); - - UserHandle user; - const auto swUser = [&] { - if (gEnableDetailedConnectionHealthMetricLogLines) { - ScopedCallbackTimer timer([&](Microseconds elapsed) { - LOGV2(6788603, - "Auth metrics report", - "metric"_attr = "sasl_acquireUser", - "micros"_attr = elapsed.count()); - }); - } - - return authManager->acquireUser(opCtx, UserRequest(userName, boost::none)); - }(); + const auto swUser = authManager->acquireUser(opCtx, UserRequest(userName, boost::none)); if (!swUser.isOK()) { auto& status = swUser.getStatus(); @@ -128,7 +123,7 @@ void SASLServerMechanismRegistry::advertiseMechanismNamesForUser(OperationContex uassertStatusOK(status); } - user = std::move(swUser.getValue()); + UserHandle user = std::move(swUser.getValue()); BSONArrayBuilder mechanismsBuilder; const auto& mechList = _getMapRef(userName.getDB()); diff --git a/src/mongo/db/auth/sasl_mechanism_registry.h b/src/mongo/db/auth/sasl_mechanism_registry.h index 2112ad06e7dfc..c829c8e93bdcd 100644 --- a/src/mongo/db/auth/sasl_mechanism_registry.h +++ b/src/mongo/db/auth/sasl_mechanism_registry.h @@ -29,17 +29,42 @@ #pragma once +#include +#include +#include +#include #include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include #include +#include +#include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/authentication_metrics.h" #include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/user.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/commands.h" #include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -105,12 +130,12 @@ class SaslServerCommonBase { virtual SecurityPropertySet properties() const = 0; /** - * This returns a number that represents the "amount" of security provided by this mechanism - * to determine the order in which it is offered to clients in the isMaster - * saslSupportedMechs response. + * This returns a number that represents the "amount" of security provided by this mechanism to + * determine the order in which it is offered to clients in the "hello" saslSupportedMechs + * response. * - * The value of securityLevel is arbitrary so long as the more secure mechanisms return a - * higher value than the less secure mechanisms. + * The value of securityLevel is arbitrary so long as the more secure mechanisms return a higher + * value than the less secure mechanisms. * * For example, SCRAM-SHA-256 > SCRAM-SHA-1 > PLAIN */ @@ -405,7 +430,7 @@ class SASLServerMechanismRegistry { using MechList = std::vector>; MechList& _getMapRef(StringData dbName) { - return _getMapRef(dbName != "$external"_sd); + return _getMapRef(dbName != DatabaseName::kExternal.db()); } MechList& _getMapRef(bool internal) { diff --git a/src/mongo/db/auth/sasl_mechanism_registry_test.cpp b/src/mongo/db/auth/sasl_mechanism_registry_test.cpp index 11bdb67385930..49774a0cf9fd0 100644 --- a/src/mongo/db/auth/sasl_mechanism_registry_test.cpp +++ b/src/mongo/db/auth/sasl_mechanism_registry_test.cpp @@ -27,14 +27,27 @@ * it in the license file. */ +#include + +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" #include "mongo/crypto/mechanism_scram.h" +#include "mongo/crypto/sha256_block.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_manager_impl.h" +#include "mongo/db/auth/authz_manager_external_state.h" #include "mongo/db/auth/authz_manager_external_state_mock.h" #include "mongo/db/auth/sasl_mechanism_registry.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -251,7 +264,7 @@ class MechanismRegistryTest : public ServiceContextTest { SASLServerMechanismRegistry registry; const UserName internalSajack = {"sajack"_sd, "test"_sd}; - const UserName externalSajack = {"sajack"_sd, "$external"_sd}; + const UserName externalSajack = {"sajack"_sd, DatabaseName::kExternal.db()}; }; TEST_F(MechanismRegistryTest, acquireInternalMechanism) { diff --git a/src/mongo/db/auth/sasl_options.cpp b/src/mongo/db/auth/sasl_options.cpp index 03dd0d862458c..433dda80cc9b8 100644 --- a/src/mongo/db/auth/sasl_options.cpp +++ b/src/mongo/db/auth/sasl_options.cpp @@ -28,9 +28,12 @@ */ #include "mongo/db/auth/sasl_options.h" + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/db/auth/sasl_options_gen.h" #include "mongo/db/stats/counters.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep namespace mongo { diff --git a/src/mongo/db/auth/sasl_options_init.cpp b/src/mongo/db/auth/sasl_options_init.cpp index 6d22544af6da7..6aff2c0321431 100644 --- a/src/mongo/db/auth/sasl_options_init.cpp +++ b/src/mongo/db/auth/sasl_options_init.cpp @@ -27,16 +27,23 @@ * it in the license file. */ -#include "mongo/db/auth/sasl_options.h" -#include "mongo/db/auth/sasl_options_gen.h" +#include +#include +#include -#include +#include +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/status.h" +#include "mongo/db/auth/sasl_options.h" +#include "mongo/db/auth/sasl_options_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" #include "mongo/util/net/socket_utils.h" -#include "mongo/util/options_parser/startup_option_init.h" +#include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/startup_options.h" -#include "mongo/util/str.h" +#include "mongo/util/options_parser/value.h" namespace mongo { diff --git a/src/mongo/db/auth/sasl_payload.cpp b/src/mongo/db/auth/sasl_payload.cpp index 5061c96ba36a8..f580597baae4c 100644 --- a/src/mongo/db/auth/sasl_payload.cpp +++ b/src/mongo/db/auth/sasl_payload.cpp @@ -29,7 +29,12 @@ #include "mongo/db/auth/sasl_payload.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/util/assert_util.h" #include "mongo/util/base64.h" +#include "mongo/util/str.h" namespace mongo { namespace auth { diff --git a/src/mongo/db/auth/sasl_payload.h b/src/mongo/db/auth/sasl_payload.h index 671c75fbce837..f11b5805db471 100644 --- a/src/mongo/db/auth/sasl_payload.h +++ b/src/mongo/db/auth/sasl_payload.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" diff --git a/src/mongo/db/auth/sasl_plain_server_conversation.cpp b/src/mongo/db/auth/sasl_plain_server_conversation.cpp index 2a579bb3f5a1f..da1c98e077748 100644 --- a/src/mongo/db/auth/sasl_plain_server_conversation.cpp +++ b/src/mongo/db/auth/sasl_plain_server_conversation.cpp @@ -27,23 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include #include +#include -#include "mongo/db/auth/sasl_plain_server_conversation.h" +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/secure_allocator.h" #include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/crypto/mechanism_scram.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/sasl_mechanism_registry.h" +#include "mongo/db/auth/sasl_plain_server_conversation.h" #include "mongo/db/auth/user.h" #include "mongo/db/connection_health_metrics_parameter_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/base64.h" +#include "mongo/util/duration.h" #include "mongo/util/password_digest.h" -#include "mongo/util/text.h" +#include "mongo/util/read_through_cache.h" +#include "mongo/util/str.h" +#include "mongo/util/text.h" // IWYU pragma: keep #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl diff --git a/src/mongo/db/auth/sasl_plain_server_conversation.h b/src/mongo/db/auth/sasl_plain_server_conversation.h index 8ade19b30f0b2..60e6571fec934 100644 --- a/src/mongo/db/auth/sasl_plain_server_conversation.h +++ b/src/mongo/db/auth/sasl_plain_server_conversation.h @@ -29,8 +29,22 @@ #pragma once +#include +#include +#include + +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/crypto/sha1_block.h" +#include "mongo/crypto/sha256_block.h" #include "mongo/db/auth/sasl_mechanism_policies.h" #include "mongo/db/auth/sasl_mechanism_registry.h" +#include "mongo/db/auth/user.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/auth/sasl_scram_server_conversation.cpp b/src/mongo/db/auth/sasl_scram_server_conversation.cpp index 109de067ea2f5..38ea9d6d9b247 100644 --- a/src/mongo/db/auth/sasl_scram_server_conversation.cpp +++ b/src/mongo/db/auth/sasl_scram_server_conversation.cpp @@ -27,29 +27,49 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/sasl_scram_server_conversation.h" -#include +#include #include - -#include "mongo/base/init.h" +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/crypto/mechanism_scram.h" -#include "mongo/crypto/sha1_block.h" +#include "mongo/db/auth/auth_name.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/cluster_auth_mode.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/sasl_command_constants.h" #include "mongo/db/auth/sasl_mechanism_policies.h" #include "mongo/db/auth/sasl_mechanism_registry.h" #include "mongo/db/auth/sasl_options.h" +#include "mongo/db/auth/sasl_scram_server_conversation.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/connection_health_metrics_parameter_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/random.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/base64.h" +#include "mongo/util/duration.h" +#include "mongo/util/read_through_cache.h" #include "mongo/util/sequence_util.h" #include "mongo/util/str.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl diff --git a/src/mongo/db/auth/sasl_scram_server_conversation.h b/src/mongo/db/auth/sasl_scram_server_conversation.h index a25487af8b099..1530133fa771d 100644 --- a/src/mongo/db/auth/sasl_scram_server_conversation.h +++ b/src/mongo/db/auth/sasl_scram_server_conversation.h @@ -29,9 +29,26 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/crypto/mechanism_scram.h" +#include "mongo/crypto/sha1_block.h" #include "mongo/db/auth/sasl_mechanism_policies.h" #include "mongo/db/auth/sasl_mechanism_registry.h" +#include "mongo/db/auth/user.h" +#include "mongo/db/operation_context.h" #include "mongo/util/icu.h" namespace mongo { diff --git a/src/mongo/db/auth/sasl_scram_test.cpp b/src/mongo/db/auth/sasl_scram_test.cpp index 431157c9b9d25..ffc677c2cc376 100644 --- a/src/mongo/db/auth/sasl_scram_test.cpp +++ b/src/mongo/db/auth/sasl_scram_test.cpp @@ -28,11 +28,31 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/native_sasl_client_session.h" +#include "mongo/client/sasl_client_session.h" #include "mongo/client/scram_client_cache.h" #include "mongo/crypto/mechanism_scram.h" #include "mongo/crypto/sha1_block.h" @@ -47,9 +67,13 @@ #include "mongo/db/auth/sasl_scram_server_conversation.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/base64.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/password_digest.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/auth/security_file.cpp b/src/mongo/db/auth/security_file.cpp index 9539eb958c09f..722157fbf4fdc 100644 --- a/src/mongo/db/auth/security_file.cpp +++ b/src/mongo/db/auth/security_file.cpp @@ -29,13 +29,26 @@ #include +#include +#include +#include +#include #include #include #include -#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" -#include "mongo/db/auth/security_key.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/auth/security_key.cpp b/src/mongo/db/auth/security_key.cpp index 9c77e9fd5819f..e5a47031e40c9 100644 --- a/src/mongo/db/auth/security_key.cpp +++ b/src/mongo/db/auth/security_key.cpp @@ -28,23 +28,33 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/security_key.h" - +#include +#include +#include #include +#include #include +#include +#include +#include + #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/internal_auth.h" #include "mongo/crypto/mechanism_scram.h" #include "mongo/crypto/sha256_block.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/sasl_options.h" #include "mongo/db/auth/security_file.h" +#include "mongo/db/auth/security_key.h" #include "mongo/db/auth/user.h" -#include "mongo/db/server_options.h" +#include "mongo/db/auth/user_name.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/icu.h" #include "mongo/util/password_digest.h" diff --git a/src/mongo/db/auth/security_key_test.cpp b/src/mongo/db/auth/security_key_test.cpp index 7a33cfbbad758..7a720d3edc962 100644 --- a/src/mongo/db/auth/security_key_test.cpp +++ b/src/mongo/db/auth/security_key_test.cpp @@ -27,17 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include #include - +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/cluster_auth_mode.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/security_file.h" #include "mongo/db/auth/security_key.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/auth/user.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/auth/security_token_authentication_guard.cpp b/src/mongo/db/auth/security_token_authentication_guard.cpp index 04462ee0a3d4e..f59c0919fb884 100644 --- a/src/mongo/db/auth/security_token_authentication_guard.cpp +++ b/src/mongo/db/auth/security_token_authentication_guard.cpp @@ -30,8 +30,20 @@ #include "mongo/db/auth/security_token_authentication_guard.h" +#include + +#include +#include +#include + #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/user.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl diff --git a/src/mongo/db/auth/user.cpp b/src/mongo/db/auth/user.cpp index c50de72a5461d..0f7e77fc41415 100644 --- a/src/mongo/db/auth/user.cpp +++ b/src/mongo/db/auth/user.cpp @@ -29,18 +29,28 @@ #include "mongo/db/auth/user.h" +#include +#include +#include +#include #include +#include + +#include "mongo/base/data_range.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/crypto/sha1_block.h" #include "mongo/crypto/sha256_block.h" -#include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/privilege.h" #include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/restriction_environment.h" #include "mongo/db/auth/role_name.h" #include "mongo/db/auth/user_name.h" -#include "mongo/platform/atomic_word.h" -#include "mongo/util/assert_util.h" -#include "mongo/util/sequence_util.h" +#include "mongo/db/multitenancy_gen.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -104,11 +114,25 @@ const User::CredentialData& User::getCredentials() const { } ActionSet User::getActionsForResource(const ResourcePattern& resource) const { - stdx::unordered_map::const_iterator it = _privileges.find(resource); - if (it == _privileges.end()) { - return ActionSet(); + if (gMultitenancySupport) { + // TODO (SERVER-76195) Remove legacy non-tenant aware APIs from ResourcePattern + // During migration of resource patterns, we may have a mismatch between privileges and + // privilege checks where one may have a tenantId attached and the other doesn't. + // Once all checks have been updated, we can remove this branch and use the find below. + ActionSet actions; + for (const auto& priv : _privileges) { + if (priv.second.getResourcePattern().matchesIgnoringTenant(resource)) { + actions.addAllActionsFromSet(priv.second.getActions()); + } + } + return actions; } - return it->second.getActions(); + + if (auto it = _privileges.find(resource); it != _privileges.end()) { + return it->second.getActions(); + } + + return ActionSet(); } bool User::hasActionsForResource(const ResourcePattern& resource) const { diff --git a/src/mongo/db/auth/user.h b/src/mongo/db/auth/user.h index f9ca505322b62..eea1bbd6b2147 100644 --- a/src/mongo/db/auth/user.h +++ b/src/mongo/db/auth/user.h @@ -29,19 +29,35 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/crypto/sha1_block.h" #include "mongo/crypto/sha256_block.h" +#include "mongo/db/auth/action_set.h" #include "mongo/db/auth/privilege.h" #include "mongo/db/auth/resource_pattern.h" #include "mongo/db/auth/restriction_set.h" #include "mongo/db/auth/role_name.h" #include "mongo/db/auth/user_name.h" +#include "mongo/db/operation_context.h" #include "mongo/platform/atomic_word.h" #include "mongo/stdx/unordered_map.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/base64.h" #include "mongo/util/read_through_cache.h" namespace mongo { diff --git a/src/mongo/db/auth/user_cache_acquisition_stats.cpp b/src/mongo/db/auth/user_cache_acquisition_stats.cpp index 9b0df3cd45207..de346e28a008c 100644 --- a/src/mongo/db/auth/user_cache_acquisition_stats.cpp +++ b/src/mongo/db/auth/user_cache_acquisition_stats.cpp @@ -27,12 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/auth/user_cache_acquisition_stats.h" - +#include "mongo/base/string_data.h" +#include "mongo/bson/util/builder.h" #include "mongo/util/duration.h" -#include "mongo/util/system_tick_source.h" namespace mongo { diff --git a/src/mongo/db/auth/user_cache_acquisition_stats.h b/src/mongo/db/auth/user_cache_acquisition_stats.h index c7323de86590b..836813dfd3875 100644 --- a/src/mongo/db/auth/user_cache_acquisition_stats.h +++ b/src/mongo/db/auth/user_cache_acquisition_stats.h @@ -29,10 +29,16 @@ #pragma once +#include +#include #include +#include #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/client.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" #include "mongo/util/tick_source.h" namespace mongo { diff --git a/src/mongo/db/auth/user_cache_invalidator_job.cpp b/src/mongo/db/auth/user_cache_invalidator_job.cpp index fe8650325f546..c385ff43b11eb 100644 --- a/src/mongo/db/auth/user_cache_invalidator_job.cpp +++ b/src/mongo/db/auth/user_cache_invalidator_job.cpp @@ -28,22 +28,37 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/user_cache_invalidator_job.h" - #include +#include + +#include +#include #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/auth/user_cache_invalidator_job.h" #include "mongo/db/auth/user_cache_invalidator_job_parameters_gen.h" #include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/grid.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/duration.h" -#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl @@ -145,7 +160,9 @@ void UserCacheInvalidator::start(ServiceContext* serviceCtx, OperationContext* o PeriodicRunner::PeriodicJob job( "UserCacheInvalidator", [serviceCtx](Client* client) { getUserCacheInvalidator(serviceCtx)->run(); }, - loadInterval()); + loadInterval(), + // TODO(SERVER-74660): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/); invalidator->_job = std::make_unique(periodicRunner->makeJob(std::move(job))); @@ -197,7 +214,7 @@ void UserCacheInvalidator::run() { "users in cache", "error"_attr = refreshStatus); try { - _authzManager->invalidateUsersFromDB(opCtx.get(), "$external"_sd); + _authzManager->invalidateUsersFromDB(opCtx.get(), DatabaseName::kExternal); } catch (const DBException& e) { LOGV2_WARNING(5914805, "Error invalidating $external users from user cache", diff --git a/src/mongo/db/auth/user_cache_invalidator_job.h b/src/mongo/db/auth/user_cache_invalidator_job.h index 15f8d4f6903d1..2b5c23b96aa99 100644 --- a/src/mongo/db/auth/user_cache_invalidator_job.h +++ b/src/mongo/db/auth/user_cache_invalidator_job.h @@ -28,9 +28,14 @@ */ #pragma once +#include +#include + +#include "mongo/base/status.h" #include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" #include "mongo/db/service_context.h" +#include "mongo/util/duration.h" #include "mongo/util/periodic_runner.h" namespace mongo { diff --git a/src/mongo/db/auth/user_document_parser.cpp b/src/mongo/db/auth/user_document_parser.cpp index 738cf5214b57d..c37325f15d92a 100644 --- a/src/mongo/db/auth/user_document_parser.cpp +++ b/src/mongo/db/auth/user_document_parser.cpp @@ -30,17 +30,34 @@ #include "mongo/db/auth/user_document_parser.h" +#include +#include +#include #include +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/auth/address_restriction.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/authorization_manager.h" -#include "mongo/db/auth/privilege_parser.h" +#include "mongo/db/auth/parsed_privilege_gen.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/restriction_set.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/user.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl @@ -204,11 +221,11 @@ Status V2UserDocumentParser::checkValidUserDocument(const BSONObj& doc) const { return Status::OK(); }; - const auto sha1status = validateScram(SCRAMSHA1_CREDENTIAL_FIELD_NAME); + auto sha1status = validateScram(SCRAMSHA1_CREDENTIAL_FIELD_NAME); if (!sha1status.isOK() && (sha1status.code() != ErrorCodes::NoSuchKey)) { return sha1status; } - const auto sha256status = validateScram(SCRAMSHA256_CREDENTIAL_FIELD_NAME); + auto sha256status = validateScram(SCRAMSHA256_CREDENTIAL_FIELD_NAME); if (!sha256status.isOK() && (sha256status.code() != ErrorCodes::NoSuchKey)) { return sha256status; } @@ -413,7 +430,7 @@ Status V2UserDocumentParser::initializeUserIndirectRolesFromUserDocument(const B } Status V2UserDocumentParser::initializeUserPrivilegesFromUserDocument(const BSONObj& doc, - User* user) const { + User* user) const try { BSONElement privilegesElement = doc[PRIVILEGES_FIELD_NAME]; if (privilegesElement.eoo()) return Status::OK(); @@ -422,34 +439,20 @@ Status V2UserDocumentParser::initializeUserPrivilegesFromUserDocument(const BSON "User document 'inheritedPrivileges' element must be Array if present."); } PrivilegeVector privileges; - std::string errmsg; - for (BSONObjIterator it(privilegesElement.Obj()); it.more(); it.next()) { - if ((*it).type() != Object) { + + for (const auto& element : privilegesElement.Obj()) { + if (element.type() != Object) { LOGV2_WARNING(23743, "Wrong type of element in inheritedPrivileges array", "user"_attr = user->getName(), - "element"_attr = *it); - continue; - } - Privilege privilege; - ParsedPrivilege pp; - if (!pp.parseBSON((*it).Obj(), &errmsg)) { - LOGV2_WARNING(23744, - "Could not parse privilege element in user document", - "user"_attr = user->getName(), - "error"_attr = errmsg); + "element"_attr = element); continue; } + + auto pp = auth::ParsedPrivilege::parse(IDLParserContext("userPrivilegeDoc"), element.Obj()); std::vector unrecognizedActions; - Status status = - ParsedPrivilege::parsedPrivilegeToPrivilege(pp, &privilege, &unrecognizedActions); - if (!status.isOK()) { - LOGV2_WARNING(23745, - "Could not parse privilege element in user document", - "user"_attr = user->getName(), - "error"_attr = causedBy(status)); - continue; - } + auto privilege = Privilege::resolvePrivilegeWithTenant( + user->getName().getTenant(), pp, &unrecognizedActions); if (unrecognizedActions.size()) { std::string unrecognizedActionsString; str::joinStringDelim(unrecognizedActions, &unrecognizedActionsString, ','); @@ -464,6 +467,8 @@ Status V2UserDocumentParser::initializeUserPrivilegesFromUserDocument(const BSON } user->setPrivileges(privileges); return Status::OK(); +} catch (const DBException& ex) { + return ex.toStatus(); } Status V2UserDocumentParser::initializeUserFromUserDocument(const BSONObj& privDoc, diff --git a/src/mongo/db/auth/user_document_parser.h b/src/mongo/db/auth/user_document_parser.h index d4593a5ae125b..f76eb7ff26685 100644 --- a/src/mongo/db/auth/user_document_parser.h +++ b/src/mongo/db/auth/user_document_parser.h @@ -29,7 +29,13 @@ #pragma once +#include + +#include +#include + #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/auth/user.h" #include "mongo/db/tenant_id.h" diff --git a/src/mongo/db/auth/user_document_parser_test.cpp b/src/mongo/db/auth/user_document_parser_test.cpp index 9773927ad5035..6134dc8047967 100644 --- a/src/mongo/db/auth/user_document_parser_test.cpp +++ b/src/mongo/db/auth/user_document_parser_test.cpp @@ -31,17 +31,31 @@ * Unit tests of the UserDocumentParser type. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/crypto/mechanism_scram.h" -#include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/authorization_manager.h" +#include "mongo/crypto/sha1_block.h" +#include "mongo/crypto/sha256_block.h" +#include "mongo/db/auth/auth_name.h" +#include "mongo/db/auth/restriction_environment.h" +#include "mongo/db/auth/restriction_set.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/sasl_options.h" #include "mongo/db/auth/user_document_parser.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/net/sockaddr.h" #include "mongo/util/net/socket_utils.h" #define ASSERT_NULL(EXPR) ASSERT_FALSE(EXPR) diff --git a/src/mongo/db/auth/user_management_commands_parser.cpp b/src/mongo/db/auth/user_management_commands_parser.cpp index 01f94f81d7af2..45a1de98ac0c6 100644 --- a/src/mongo/db/auth/user_management_commands_parser.cpp +++ b/src/mongo/db/auth/user_management_commands_parser.cpp @@ -27,27 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/user_management_commands_parser.h" - -#include #include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/address_restriction.h" #include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/auth/parsed_privilege_gen.h" #include "mongo/db/auth/privilege.h" -#include "mongo/db/auth/privilege_parser.h" -#include "mongo/db/auth/user_document_parser.h" +#include "mongo/db/auth/user_management_commands_parser.h" #include "mongo/db/auth/user_name.h" -#include "mongo/db/commands.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/command_generic_argument.h" +#include "mongo/idl/idl_parser.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { @@ -149,30 +150,18 @@ Status parseRoleNamesFromBSONArray(const BSONArray& rolesArray, * If parsedPrivileges is not NULL, adds to it the privileges parsed out of the input BSONArray. */ Status parseAndValidatePrivilegeArray(const BSONArray& privileges, - PrivilegeVector* parsedPrivileges) { - for (BSONObjIterator it(privileges); it.more(); it.next()) { - BSONElement element = *it; + PrivilegeVector* parsedPrivileges) try { + for (const auto& element : privileges) { if (element.type() != Object) { return Status(ErrorCodes::FailedToParse, "Elements in privilege arrays must be objects"); } - ParsedPrivilege parsedPrivilege; - std::string errmsg; - if (!parsedPrivilege.parseBSON(element.Obj(), &errmsg)) { - return Status(ErrorCodes::FailedToParse, errmsg); - } - if (!parsedPrivilege.isValid(&errmsg)) { - return Status(ErrorCodes::FailedToParse, errmsg); - } - - Privilege privilege; + auto parsedPrivilege = + auth::ParsedPrivilege::parse(IDLParserContext("privilege"), element.Obj()); std::vector unrecognizedActions; - Status status = ParsedPrivilege::parsedPrivilegeToPrivilege( - parsedPrivilege, &privilege, &unrecognizedActions); - if (!status.isOK()) { - return status; - } + auto privilege = Privilege::resolvePrivilegeWithTenant( + boost::none /* tenantId */, parsedPrivilege, &unrecognizedActions); if (unrecognizedActions.size()) { std::string unrecognizedActionsString; str::joinStringDelim(unrecognizedActions, &unrecognizedActionsString, ','); @@ -184,6 +173,8 @@ Status parseAndValidatePrivilegeArray(const BSONArray& privileges, parsedPrivileges->push_back(privilege); } return Status::OK(); +} catch (const DBException& ex) { + return Status(ErrorCodes::FailedToParse, ex.toStatus().reason()); } } // namespace auth diff --git a/src/mongo/db/auth/user_management_commands_parser.h b/src/mongo/db/auth/user_management_commands_parser.h index 639c6cde630df..1846209351056 100644 --- a/src/mongo/db/auth/user_management_commands_parser.h +++ b/src/mongo/db/auth/user_management_commands_parser.h @@ -34,6 +34,7 @@ #include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/privilege.h" #include "mongo/db/auth/privilege_format.h" diff --git a/src/mongo/db/auth/validated_tenancy_scope.cpp b/src/mongo/db/auth/validated_tenancy_scope.cpp index 2c05409607f48..5364f2cea65be 100644 --- a/src/mongo/db/auth/validated_tenancy_scope.cpp +++ b/src/mongo/db/auth/validated_tenancy_scope.cpp @@ -29,14 +29,39 @@ #include "mongo/db/auth/validated_tenancy_scope.h" -#include "mongo/base/init.h" +#include +#include +#include +#include + +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/crypto/hash_block.h" +#include "mongo/crypto/sha256_block.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/auth/security_token_gen.h" -#include "mongo/db/multitenancy.h" +#include "mongo/db/client.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/multitenancy_gen.h" +#include "mongo/db/operation_context.h" #include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" #include "mongo/logv2/log_detail.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl @@ -46,28 +71,33 @@ const auto validatedTenancyScopeDecoration = OperationContext::declareDecoration>(); MONGO_INITIALIZER(SecurityTokenOptionValidate)(InitializerContext*) { if (gMultitenancySupport) { - logv2::detail::setGetTenantIDCallback([]() -> boost::optional { + logv2::detail::setGetTenantIDCallback([]() -> std::string { auto* client = Client::getCurrent(); if (!client) { - return boost::none; + return std::string(); } if (auto* opCtx = client->getOperationContext()) { if (auto token = ValidatedTenancyScope::get(opCtx)) { - return token->tenantId(); + return token->tenantId().toString(); } } - return boost::none; + return std::string(); }); } + + if (gFeatureFlagSecurityToken.isEnabledAndIgnoreFCVUnsafeAtStartup()) { + LOGV2_WARNING( + 7539600, + "featureFlagSecurityToken is enabled. This flag MUST NOT be enabled in production"); + } } } // namespace ValidatedTenancyScope::ValidatedTenancyScope(BSONObj obj, InitTag tag) : _originalToken(obj) { - // (Ignore FCV check): TODO(SERVER-75396): add why FCV is ignored here. - const bool enabled = - gMultitenancySupport && gFeatureFlagSecurityToken.isEnabledAndIgnoreFCVUnsafe(); + const bool enabled = gMultitenancySupport && + gFeatureFlagSecurityToken.isEnabled(serverGlobalParams.featureCompatibility); uassert(ErrorCodes::InvalidOptions, "Multitenancy not enabled, refusing to accept securityToken", @@ -97,11 +127,16 @@ ValidatedTenancyScope::ValidatedTenancyScope(Client* client, TenantId tenant) "Multitenancy not enabled, refusing to accept $tenant parameter", gMultitenancySupport); - uassert(ErrorCodes::Unauthorized, - "'$tenant' may only be specified with the useTenant action type", - client && - AuthorizationSession::get(client)->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::useTenant)); + auto as = AuthorizationSession::get(client); + // The useTenant action type allows the action of impersonating any tenant, so we check against + // the cluster resource with the current authenticated user's tenant ID rather than the specific + // tenant ID being impersonated. + uassert( + ErrorCodes::Unauthorized, + "'$tenant' may only be specified with the useTenant action type", + client && + as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(as->getUserTenantId()), ActionType::useTenant)); } boost::optional ValidatedTenancyScope::create(Client* client, diff --git a/src/mongo/db/auth/validated_tenancy_scope.h b/src/mongo/db/auth/validated_tenancy_scope.h index 98b5c06c302be..0742a01cb9f47 100644 --- a/src/mongo/db/auth/validated_tenancy_scope.h +++ b/src/mongo/db/auth/validated_tenancy_scope.h @@ -30,12 +30,15 @@ #pragma once #include +#include +#include +#include #include "mongo/bson/bsonobj.h" #include "mongo/db/auth/user_name.h" #include "mongo/db/tenant_id.h" #include "mongo/stdx/variant.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo { diff --git a/src/mongo/db/auth/validated_tenancy_scope_test.cpp b/src/mongo/db/auth/validated_tenancy_scope_test.cpp index ee2859f70a7bb..1ab4b712069db 100644 --- a/src/mongo/db/auth/validated_tenancy_scope_test.cpp +++ b/src/mongo/db/auth/validated_tenancy_scope_test.cpp @@ -27,18 +27,42 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/auth_name.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_manager_impl.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/authorization_session_impl.h" #include "mongo/db/auth/authz_manager_external_state_mock.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/security_token_gen.h" +#include "mongo/db/auth/user.h" #include "mongo/db/auth/validated_tenancy_scope.h" -#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/client.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -50,7 +74,7 @@ class AuthorizationSessionImplTestHelper { static void grantUseTenant(Client& client) { User user(UserRequest(UserName("useTenant"_sd, "admin"_sd), boost::none)); user.setPrivileges( - {Privilege(ResourcePattern::forClusterResource(), ActionType::useTenant)}); + {Privilege(ResourcePattern::forClusterResource(boost::none), ActionType::useTenant)}); auto* as = dynamic_cast(AuthorizationSession::get(client)); if (as->_authenticatedUser != boost::none) { as->logoutAllDatabases(&client, "AuthorizationSessionImplTestHelper"_sd); diff --git a/src/mongo/db/baton.cpp b/src/mongo/db/baton.cpp index 937a8a616647f..64a8982017c4a 100644 --- a/src/mongo/db/baton.cpp +++ b/src/mongo/db/baton.cpp @@ -27,17 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include #include #include -#include "mongo/db/baton.h" +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/db/baton.h" #include "mongo/platform/mutex.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/functional.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/baton.h b/src/mongo/db/baton.h index b822ac5ddace3..3f4f9b45d9a76 100644 --- a/src/mongo/db/baton.h +++ b/src/mongo/db/baton.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include "mongo/util/functional.h" #include "mongo/util/future.h" diff --git a/src/mongo/db/bson/dotted_path_support.cpp b/src/mongo/db/bson/dotted_path_support.cpp index c6c4d107a3bf6..8ba0ddf2ec240 100644 --- a/src/mongo/db/bson/dotted_path_support.cpp +++ b/src/mongo/db/bson/dotted_path_support.cpp @@ -27,16 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/bson/dotted_path_support.h" - +#include +#include +#include #include +#include + +#include "mongo/bson/bson_depth.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/bson/dotted_path_support.h" +#include "mongo/util/assert_util.h" #include "mongo/util/ctype.h" namespace mongo { @@ -54,70 +59,70 @@ void _extractAllElementsAlongPath(const BSONObj& obj, bool expandArrayOnTrailingField, BSONDepthIndex depth, MultikeyComponents* arrayComponents) { - BSONElement e = obj.getField(path); - - if (e.eoo()) { - size_t idx = path.find('.'); - if (idx != std::string::npos) { - invariant(depth != std::numeric_limits::max()); - StringData left = path.substr(0, idx); - StringData next = path.substr(idx + 1, path.size()); - - BSONElement e = obj.getField(left); - - if (e.type() == Object) { + size_t idx = path.find('.'); + if (idx != std::string::npos) { + invariant(depth != std::numeric_limits::max()); + StringData left = path.substr(0, idx); + StringData next = path.substr(idx + 1, path.size()); + + BSONElement e = obj.getField(left); + + if (e.type() == Object) { + _extractAllElementsAlongPath(e.embeddedObject(), + next, + elements, + expandArrayOnTrailingField, + depth + 1, + arrayComponents); + } else if (e.type() == Array) { + bool allDigits = false; + if (next.size() > 0 && ctype::isDigit(next[0])) { + unsigned temp = 1; + while (temp < next.size() && ctype::isDigit(next[temp])) + temp++; + allDigits = temp == next.size() || next[temp] == '.'; + } + if (allDigits) { _extractAllElementsAlongPath(e.embeddedObject(), next, elements, expandArrayOnTrailingField, depth + 1, arrayComponents); - } else if (e.type() == Array) { - bool allDigits = false; - if (next.size() > 0 && ctype::isDigit(next[0])) { - unsigned temp = 1; - while (temp < next.size() && ctype::isDigit(next[temp])) - temp++; - allDigits = temp == next.size() || next[temp] == '.'; + } else { + BSONObjIterator i(e.embeddedObject()); + while (i.more()) { + BSONElement e2 = i.next(); + if (e2.type() == Object || e2.type() == Array) + _extractAllElementsAlongPath(e2.embeddedObject(), + next, + elements, + expandArrayOnTrailingField, + depth + 1, + arrayComponents); } - if (allDigits) { - _extractAllElementsAlongPath(e.embeddedObject(), - next, - elements, - expandArrayOnTrailingField, - depth + 1, - arrayComponents); - } else { - BSONObjIterator i(e.embeddedObject()); - while (i.more()) { - BSONElement e2 = i.next(); - if (e2.type() == Object || e2.type() == Array) - _extractAllElementsAlongPath(e2.embeddedObject(), - next, - elements, - expandArrayOnTrailingField, - depth + 1, - arrayComponents); - } - if (arrayComponents) { - arrayComponents->insert(depth); - } + if (arrayComponents) { + arrayComponents->insert(depth); } - } else { - // do nothing: no match } + } else { + // do nothing: no match } } else { - if (e.type() == Array && expandArrayOnTrailingField) { - BSONObjIterator i(e.embeddedObject()); - while (i.more()) { - elements.insert(i.next()); - } - if (arrayComponents) { - arrayComponents->insert(depth); + BSONElement e = obj.getField(path); + + if (e.ok()) { + if (e.type() == Array && expandArrayOnTrailingField) { + BSONObjIterator i(e.embeddedObject()); + while (i.more()) { + elements.insert(i.next()); + } + if (arrayComponents) { + arrayComponents->insert(depth); + } + } else { + elements.insert(e); } - } else { - elements.insert(e); } } } diff --git a/src/mongo/db/bson/dotted_path_support.h b/src/mongo/db/bson/dotted_path_support.h index b41dd3df3ba2c..bfd3f13474ba2 100644 --- a/src/mongo/db/bson/dotted_path_support.h +++ b/src/mongo/db/bson/dotted_path_support.h @@ -31,6 +31,8 @@ #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelement_comparator_interface.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/index/multikey_paths.h" diff --git a/src/mongo/db/bson/dotted_path_support_test.cpp b/src/mongo/db/bson/dotted_path_support_test.cpp index facdf459860fa..197aba455ab45 100644 --- a/src/mongo/db/bson/dotted_path_support_test.cpp +++ b/src/mongo/db/bson/dotted_path_support_test.cpp @@ -27,11 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include +#include #include +// IWYU pragma: no_include "boost/container/detail/flat_tree.hpp" +#include +#include + #include "mongo/base/simple_string_data_comparator.h" #include "mongo/bson/bson_depth.h" #include "mongo/bson/bsonelement.h" @@ -40,8 +45,12 @@ #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/bson/dotted_path_support.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -626,5 +635,19 @@ TEST(ExtractElementAtPathOrArrayAlongPath, NumericalPathElementTreatedAsFieldNam ASSERT(StringData(pathData).empty()); } +TEST(ExtractElementAtPathOrArrayAlongPath, FieldWithDotsDontHideNestedObjects) { + BSONObj obj(fromjson("{b: {c: 'foo'}, \"b.c\": 'bar'}")); + BSONElementSet actualElements; + const bool expandArrayOnTrailingField = true; + MultikeyComponents actualArrayComponents; + dps::extractAllElementsAlongPath( + obj, "b.c", actualElements, expandArrayOnTrailingField, &actualArrayComponents); + + assertBSONElementSetsAreEqual({BSON("c" + << "foo")}, + actualElements); + assertArrayComponentsAreEqual(MultikeyComponents{}, actualArrayComponents); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/bulk_write_shard_test.cpp b/src/mongo/db/bulk_write_shard_test.cpp index c53a3961d57de..93cbf71bad95c 100644 --- a/src/mongo/db/bulk_write_shard_test.cpp +++ b/src/mongo/db/bulk_write_shard_test.cpp @@ -27,23 +27,57 @@ * it in the license file. */ -#include "mongo/db/catalog/collection_uuid_mismatch_info.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/create_collection.h" -#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands/bulk_write.h" #include "mongo/db/commands/bulk_write_gen.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/commands/bulk_write_parser.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/s/sharding_state.h" -#include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/db/shard_role.h" +#include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/s/type_collection_common_types_gen.h" #include "mongo/unittest/assert.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -65,20 +99,19 @@ namespace { // | testDB1 | sharded.porcupine.tree | YES | dbV1 | sV1 | // | testDB2 | sharded.oasis | YES | dbV2 | sV2 | // +---------+-------------------------+-------------+---------------+---------------+ -class BulkWriteShardTest : public ServiceContextMongoDTest { +class BulkWriteShardTest : public ShardServerTestFixture { protected: OperationContext* opCtx() { - return _opCtx.get(); + return operationContext(); } void setUp() override; - void tearDown() override; - const ShardId thisShardId{"this"}; - - const DatabaseName dbNameTestDb1{"testDB1"}; + const DatabaseName dbNameTestDb1 = + DatabaseName::createDatabaseName_forTest(boost::none, "testDB1"); const DatabaseVersion dbVersionTestDb1{UUID::gen(), Timestamp(1, 0)}; - const DatabaseName dbNameTestDb2{"testDB2"}; + const DatabaseName dbNameTestDb2 = + DatabaseName::createDatabaseName_forTest(boost::none, "testDB2"); const DatabaseVersion dbVersionTestDb2{UUID::gen(), Timestamp(2, 0)}; const NamespaceString nssUnshardedCollection1 = @@ -103,21 +136,20 @@ class BulkWriteShardTest : public ServiceContextMongoDTest { ShardVersionFactory::make(ChunkVersion(CollectionGeneration{OID::gen(), Timestamp(12, 0)}, CollectionPlacement(10, 1)), boost::optional(boost::none)); - -private: - ServiceContext::UniqueOperationContext _opCtx; }; void createTestCollection(OperationContext* opCtx, const NamespaceString& nss) { + OperationShardingState::ScopedAllowImplicitCollectionCreate_UNSAFE unsafeCreateCollection( + opCtx); uassertStatusOK(createCollection(opCtx, nss.dbName(), BSON("create" << nss.coll()))); } void installDatabaseMetadata(OperationContext* opCtx, const DatabaseName& dbName, const DatabaseVersion& dbVersion) { - AutoGetDb autoDb(opCtx, dbName, MODE_X, {}); + AutoGetDb autoDb(opCtx, dbName, MODE_X); auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx, dbName); - scopedDss->setDbInfo(opCtx, {dbName.db(), ShardId("this"), dbVersion}); + scopedDss->setDbInfo(opCtx, {dbName.toString_forTest(), ShardId("this"), dbVersion}); } void installUnshardedCollectionMetadata(OperationContext* opCtx, const NamespaceString& nss) { @@ -177,21 +209,7 @@ UUID getCollectionUUID(OperationContext* opCtx, const NamespaceString& nss) { } void BulkWriteShardTest::setUp() { - ServiceContextMongoDTest::setUp(); - _opCtx = getGlobalServiceContext()->makeOperationContext(&cc()); - serverGlobalParams.clusterRole = ClusterRole::ShardServer; - - const repl::ReplSettings replSettings = {}; - repl::ReplicationCoordinator::set( - getGlobalServiceContext(), - std::unique_ptr( - new repl::ReplicationCoordinatorMock(_opCtx->getServiceContext(), replSettings))); - ASSERT_OK(repl::ReplicationCoordinator::get(getGlobalServiceContext()) - ->setFollowerMode(repl::MemberState::RS_PRIMARY)); - - repl::createOplog(_opCtx.get()); - - ShardingState::get(getServiceContext())->setInitialized(ShardId("this"), OID::gen()); + ShardServerTestFixture::setUp(); // Setup test collections and metadata installDatabaseMetadata(opCtx(), dbNameTestDb1, dbVersionTestDb1); @@ -203,7 +221,7 @@ void BulkWriteShardTest::setUp() { // Create nssShardedCollection1 createTestCollection(opCtx(), nssShardedCollection1); - const auto uuidShardedCollection1 = getCollectionUUID(_opCtx.get(), nssShardedCollection1); + const auto uuidShardedCollection1 = getCollectionUUID(opCtx(), nssShardedCollection1); installShardedCollectionMetadata( opCtx(), nssShardedCollection1, @@ -211,12 +229,12 @@ void BulkWriteShardTest::setUp() { {ChunkType(uuidShardedCollection1, ChunkRange{BSON("skey" << MINKEY), BSON("skey" << MAXKEY)}, shardVersionShardedCollection1.placementVersion(), - thisShardId)}, - thisShardId); + _myShardName)}, + _myShardName); // Create nssShardedCollection2 createTestCollection(opCtx(), nssShardedCollection2); - const auto uuidShardedCollection2 = getCollectionUUID(_opCtx.get(), nssShardedCollection2); + const auto uuidShardedCollection2 = getCollectionUUID(opCtx(), nssShardedCollection2); installShardedCollectionMetadata( opCtx(), nssShardedCollection2, @@ -224,14 +242,8 @@ void BulkWriteShardTest::setUp() { {ChunkType(uuidShardedCollection2, ChunkRange{BSON("skey" << MINKEY), BSON("skey" << MAXKEY)}, shardVersionShardedCollection2.placementVersion(), - thisShardId)}, - thisShardId); -} - -void BulkWriteShardTest::tearDown() { - _opCtx.reset(); - ServiceContextMongoDTest::tearDown(); - repl::ReplicationCoordinator::set(getGlobalServiceContext(), nullptr); + _myShardName)}, + _myShardName); } NamespaceInfoEntry nsInfoWithShardDatabaseVersions(NamespaceString nss, @@ -258,12 +270,14 @@ TEST_F(BulkWriteShardTest, ThreeSuccessfulInsertsOrdered) { nssShardedCollection2, dbVersionTestDb2, shardVersionShardedCollection2), }); - auto replyItems = bulk_write::performWrites(opCtx(), request); + const auto& [replyItems, retriedStmtIds, numErrors] = + bulk_write::performWrites(opCtx(), request); ASSERT_EQ(3, replyItems.size()); for (const auto& reply : replyItems) { ASSERT_OK(reply.getStatus()); } + ASSERT_EQ(0, numErrors); OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); } @@ -278,10 +292,12 @@ TEST_F(BulkWriteShardTest, OneFailingShardedOneSkippedUnshardedSuccessInsertOrde nsInfoWithShardDatabaseVersions( nssUnshardedCollection1, dbVersionTestDb1, ShardVersion::UNSHARDED())}); - auto replyItems = bulk_write::performWrites(opCtx(), request); + const auto& [replyItems, retriedStmtIds, numErrors] = + bulk_write::performWrites(opCtx(), request); ASSERT_EQ(1, replyItems.size()); ASSERT_EQ(ErrorCodes::StaleConfig, replyItems.back().getStatus().code()); + ASSERT_EQ(1, numErrors); OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); } @@ -296,10 +312,12 @@ TEST_F(BulkWriteShardTest, TwoFailingShardedInsertsOrdered) { nssShardedCollection1, dbVersionTestDb1, incorrectShardVersion), }); - auto replyItems = bulk_write::performWrites(opCtx(), request); + const auto& [replyItems, retriedStmtIds, numErrors] = + bulk_write::performWrites(opCtx(), request); ASSERT_EQ(1, replyItems.size()); ASSERT_EQ(ErrorCodes::StaleConfig, replyItems.back().getStatus().code()); + ASSERT_EQ(1, numErrors); OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); } @@ -314,11 +332,13 @@ TEST_F(BulkWriteShardTest, OneSuccessfulShardedOneFailingShardedOrdered) { nsInfoWithShardDatabaseVersions( nssShardedCollection2, dbVersionTestDb2, incorrectShardVersion)}); - auto replyItems = bulk_write::performWrites(opCtx(), request); + const auto& [replyItems, retriedStmtIds, numErrors] = + bulk_write::performWrites(opCtx(), request); ASSERT_EQ(2, replyItems.size()); ASSERT_OK(replyItems.front().getStatus()); ASSERT_EQ(ErrorCodes::StaleConfig, replyItems.back().getStatus().code()); + ASSERT_EQ(1, numErrors); OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); } @@ -332,10 +352,12 @@ TEST_F(BulkWriteShardTest, OneFailingShardedOneSkippedShardedUnordered) { nssShardedCollection1, dbVersionTestDb1, incorrectShardVersion)}); request.setOrdered(false); - auto replyItems = bulk_write::performWrites(opCtx(), request); + const auto& [replyItems, retriedStmtIds, numErrors] = + bulk_write::performWrites(opCtx(), request); ASSERT_EQ(1, replyItems.size()); ASSERT_EQ(ErrorCodes::StaleConfig, replyItems.back().getStatus().code()); + ASSERT_EQ(1, numErrors); OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); } @@ -351,10 +373,12 @@ TEST_F(BulkWriteShardTest, OneSuccessfulShardedOneFailingShardedUnordered) { nssShardedCollection2, dbVersionTestDb2, shardVersionShardedCollection2)}); request.setOrdered(false); - auto replyItems = bulk_write::performWrites(opCtx(), request); + const auto& [replyItems, retriedStmtIds, numErrors] = + bulk_write::performWrites(opCtx(), request); ASSERT_EQ(1, replyItems.size()); ASSERT_EQ(ErrorCodes::StaleConfig, replyItems.back().getStatus().code()); + ASSERT_EQ(1, numErrors); OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); } @@ -373,12 +397,14 @@ TEST_F(BulkWriteShardTest, InsertsAndUpdatesSuccessOrdered) { nsInfoWithShardDatabaseVersions( nssUnshardedCollection1, dbVersionTestDb1, ShardVersion::UNSHARDED())}); - auto replyItems = bulk_write::performWrites(opCtx(), request); + const auto& [replyItems, retriedStmtIds, numErrors] = + bulk_write::performWrites(opCtx(), request); ASSERT_EQ(4, replyItems.size()); for (const auto& reply : replyItems) { ASSERT_OK(reply.getStatus()); } + ASSERT_EQ(0, numErrors); OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); } @@ -399,12 +425,14 @@ TEST_F(BulkWriteShardTest, InsertsAndUpdatesSuccessUnordered) { request.setOrdered(false); - auto replyItems = bulk_write::performWrites(opCtx(), request); + const auto& [replyItems, retriedStmtIds, numErrors] = + bulk_write::performWrites(opCtx(), request); ASSERT_EQ(4, replyItems.size()); for (const auto& reply : replyItems) { ASSERT_OK(reply.getStatus()); } + ASSERT_EQ(0, numErrors); OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); } @@ -425,38 +453,64 @@ TEST_F(BulkWriteShardTest, InsertsAndUpdatesFailUnordered) { request.setOrdered(false); - auto replyItems = bulk_write::performWrites(opCtx(), request); + const auto& [replyItems, retriedStmtIds, numErrors] = + bulk_write::performWrites(opCtx(), request); ASSERT_EQ(2, replyItems.size()); ASSERT_OK(replyItems.front().getStatus()); ASSERT_EQ(ErrorCodes::StaleConfig, replyItems.back().getStatus().code()); + ASSERT_EQ(1, numErrors); OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); } -// TODO (SERVER-75202): Re-enable this test & write a test for deletes. -// Unordered updates into different collections where some fail. -// TEST_F(BulkWriteShardTest, UpdatesFailUnordered) { -// BulkWriteCommandRequest request( -// { -// BulkWriteUpdateOp(1, BSON("x" << BSON("$gt" << 0)), BSON("x" << -99)), -// BulkWriteUpdateOp(0, BSON("x" << BSON("$gt" << 0)), BSON("x" << -9)), -// BulkWriteInsertOp(1, BSON("x" << -1))}, -// {nsInfoWithShardDatabaseVersions( -// nssShardedCollection1, dbVersionTestDb, incorrectShardVersion), -// nsInfoWithShardDatabaseVersions( -// nssShardedCollection2, dbVersionTestDb, shardVersionShardedCollection2)}); +// Ordered updates into different collections where some fail. +TEST_F(BulkWriteShardTest, UpdatesFailOrdered) { + BulkWriteCommandRequest request( + {BulkWriteUpdateOp(1, BSON("x" << BSON("$gt" << 0)), BSON("x" << -99)), + BulkWriteUpdateOp(0, BSON("x" << BSON("$gt" << 0)), BSON("x" << -9)), + BulkWriteInsertOp(1, BSON("x" << -1))}, + {nsInfoWithShardDatabaseVersions( + nssShardedCollection1, dbVersionTestDb1, incorrectShardVersion), + nsInfoWithShardDatabaseVersions( + nssShardedCollection2, dbVersionTestDb2, shardVersionShardedCollection2)}); -// request.setOrdered(false); + request.setOrdered(true); -// auto replyItems = bulk_write::performWrites(opCtx(), request); + const auto& [replyItems, retriedStmtIds, numErrors] = + bulk_write::performWrites(opCtx(), request); -// ASSERT_EQ(2, replyItems.size()); -// ASSERT_OK(replyItems.front().getStatus()); -// ASSERT_EQ(ErrorCodes::StaleConfig, replyItems[1].getStatus().code()); + ASSERT_EQ(2, replyItems.size()); + ASSERT_OK(replyItems.front().getStatus()); + ASSERT_EQ(ErrorCodes::StaleConfig, replyItems[1].getStatus().code()); + ASSERT_EQ(1, numErrors); -// OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); -// } + OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); +} + +// Ordered deletes into different collections where some fail. +TEST_F(BulkWriteShardTest, DeletesFailOrdered) { + BulkWriteCommandRequest request( + {BulkWriteInsertOp(1, BSON("x" << -1)), + BulkWriteDeleteOp(0, BSON("x" << BSON("$gt" << 0))), + BulkWriteInsertOp(1, BSON("x" << -1))}, + {nsInfoWithShardDatabaseVersions( + nssShardedCollection1, dbVersionTestDb1, incorrectShardVersion), + nsInfoWithShardDatabaseVersions( + nssShardedCollection2, dbVersionTestDb2, shardVersionShardedCollection2)}); + + request.setOrdered(true); + + const auto& [replyItems, retriedStmtIds, numErrors] = + bulk_write::performWrites(opCtx(), request); + + ASSERT_EQ(2, replyItems.size()); + ASSERT_OK(replyItems.front().getStatus()); + ASSERT_EQ(ErrorCodes::StaleConfig, replyItems[1].getStatus().code()); + ASSERT_EQ(1, numErrors); + + OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); +} // After the first insert fails due to an incorrect database version, the rest // of the writes are skipped when operations are ordered. @@ -470,10 +524,12 @@ TEST_F(BulkWriteShardTest, FirstFailsRestSkippedStaleDbVersionOrdered) { nsInfoWithShardDatabaseVersions( nssShardedCollection2, dbVersionTestDb2, shardVersionShardedCollection2)}); - auto replyItems = bulk_write::performWrites(opCtx(), request); + const auto& [replyItems, retriedStmtIds, numErrors] = + bulk_write::performWrites(opCtx(), request); ASSERT_EQ(1, replyItems.size()); ASSERT_EQ(ErrorCodes::StaleDbVersion, replyItems.back().getStatus().code()); + ASSERT_EQ(1, numErrors); OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); } @@ -491,11 +547,13 @@ TEST_F(BulkWriteShardTest, FirstFailsRestSkippedStaleDbVersionUnordered) { nssShardedCollection2, dbVersionTestDb2, shardVersionShardedCollection2)}); request.setOrdered(false); - auto replyItems = bulk_write::performWrites(opCtx(), request); + const auto& [replyItems, retriedStmtIds, numErrors] = + bulk_write::performWrites(opCtx(), request); ASSERT_EQ(2, replyItems.size()); ASSERT_OK(replyItems.front().getStatus()); ASSERT_EQ(ErrorCodes::StaleDbVersion, replyItems.back().getStatus().code()); + ASSERT_EQ(1, numErrors); OperationShardingState::get(opCtx()).resetShardingOperationFailedStatus(); } diff --git a/src/mongo/db/cancelable_operation_context.cpp b/src/mongo/db/cancelable_operation_context.cpp index abcee949cb844..d6318873019d2 100644 --- a/src/mongo/db/cancelable_operation_context.cpp +++ b/src/mongo/db/cancelable_operation_context.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/cancelable_operation_context.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/cancelable_operation_context.h" #include "mongo/db/operation_context.h" -#include "mongo/stdx/mutex.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future_impl.h" namespace mongo { diff --git a/src/mongo/db/cancelable_operation_context.h b/src/mongo/db/cancelable_operation_context.h index 7f51b7bfd383c..8d24dab92cab1 100644 --- a/src/mongo/db/cancelable_operation_context.h +++ b/src/mongo/db/cancelable_operation_context.h @@ -29,7 +29,9 @@ #pragma once +#include #include +#include #include "mongo/db/client.h" #include "mongo/db/service_context.h" diff --git a/src/mongo/db/cancelable_operation_context_test.cpp b/src/mongo/db/cancelable_operation_context_test.cpp index fd08402085bbb..14b0165520249 100644 --- a/src/mongo/db/cancelable_operation_context_test.cpp +++ b/src/mongo/db/cancelable_operation_context_test.cpp @@ -27,13 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/cancelable_operation_context.h" #include "mongo/db/client.h" #include "mongo/db/operation_context.h" -#include "mongo/stdx/mutex.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/concurrency/thread_pool.h" namespace mongo { diff --git a/src/mongo/db/catalog/README.md b/src/mongo/db/catalog/README.md index c16383f56c544..b0da804492a47 100644 --- a/src/mongo/db/catalog/README.md +++ b/src/mongo/db/catalog/README.md @@ -365,27 +365,30 @@ cache of the [durable catalog](#durable-catalog) state. It provides the followin * Ensures `Collection` objects are in-sync with opened storage snapshots. ### Synchronization -Catalog access is synchronized using [read-copy-update][] where reads operate on an immutable -instance and writes on a new instance with its contents copied from the previous immutable instance -used for reads. Readers holding on to a catalog instance will thus not observe any writes that -happen after requesting an instance. If it is desired to observe writes while holding a catalog +Catalog access is synchronized using [Multiversion concurrency control] where readers operate on +immutable catalog, collection and index instances. Writes use [copy-on-write][] to create newer +versions of the catalog, collection and index instances to be changed, contents are copied from the +previous latest version. Readers holding on to a catalog instance will thus not observe any writes +that happen after requesting an instance. If it is desired to observe writes while holding a catalog instance then the reader must refresh it. Catalog writes are handled with the `CollectionCatalog::write(callback)` interface. It provides the -necessary [read-copy-update][] abstractions. A writable catalog instance is created by making a +necessary [copy-on-write][] abstractions. A writable catalog instance is created by making a shallow copy of the existing catalog. The actual write is implemented in the supplied callback which is allowed to throw. Execution of the write callbacks are serialized and may run on a different -thread than the thread calling `CollectionCatalog::write`. +thread than the thread calling `CollectionCatalog::write`. Users should take care of not performing +any blocking operations in these callbacks as it would block all other DDL writes in the system. To avoid a bottleneck in the case the catalog contains a large number of collections (being slow to -copy), concurrent writes are batched together. Any thread that enters `CollectionCatalog::write` -while a catalog instance is being copied is enqueued. When the copy finishes, all enqueued write -jobs are run on that catalog instance by the copying thread. +copy), immutable data structures are used, concurrent writes are also batched together. Any thread +that enters `CollectionCatalog::write` while a catalog instance is being copied or while executing +write callbacks is enqueued. When the copy finishes, all enqueued write jobs are run on that catalog +instance by the copying thread. ### Collection objects Objects of the `Collection` class provide access to a collection's properties between [DDL](#glossary) operations that modify these properties. Modifications are synchronized using -[read-copy-update][]. Reads access immutable `Collection` instances. Writes, such as rename +[copy-on-write][]. Reads access immutable `Collection` instances. Writes, such as rename collection, apply changes to a clone of the latest `Collection` instance and then atomically install the new `Collection` instance in the catalog. It is possible for operations that read at different points in time to use different `Collection` objects. @@ -407,16 +410,17 @@ In addition `Collection` objects have shared ownership of: by the storage engine. A writable `Collection` may only be requested in an active [WriteUnitOfWork](#WriteUnitOfWork). The -new `Collection` instance is installed in the catalog when the storage transaction commits, but only -after all other `onCommit` [Changes](#Changes) have run. This ensures `onCommit` operations can -write to the writable `Collection` before it becomes visible to readers in the catalog. If the -storage transaction rolls back then the writable `Collection` object is simply discarded and no -change is ever made to the catalog. +new `Collection` instance is installed in the catalog when the storage transaction commits as the +first `onCommit` [Changes](#Changes) that run. This means that it is not allowed to perform any +modification to catalog, collection or index instances in `onCommit` handlers. Such modifications +would break the immutability property of these instances for readers. If the storage transaction +rolls back then the writable `Collection` object is simply discarded and no change is ever made to +the catalog. A writable `Collection` is a clone of the existing `Collection`, members are either deep or shallowed copied. Notably, a shallow copy is made for the [`IndexCatalog`](#index-catalog). -The oplog `Collection` follows special rules, it does not use [read-copy-update][] or any other form +The oplog `Collection` follows special rules, it does not use [copy-on-write][] or any other form of synchronization. Modifications operate directly on the instance installed in the catalog. It is not allowed to read concurrently with writes on the oplog `Collection`. @@ -438,6 +442,8 @@ The `Collection` object is brought to existence in two ways: is not present in the `CollectionCatalog`, or the `Collection` is there, but incompatible with the snapshot. See [here](#catalog-changes-versioning-and-the-minimum-valid-snapshot) how a `Collection` is determined to be incompatible. + 3. When we read at latest concurrently with a DDL operation that is also performing multikey + changes. For (1) and (2.1) the `Collection` objects are stored as shared pointers in the `CollectionCatalog` and available to all operations running in the database. These `Collection` objects are released @@ -452,6 +458,9 @@ that instantiated them. When the snapshot is abandoned, such as during query yie previously instantiated `Collection` instead of performing the instantiation at every lookup for the same operation. +(2.3) is an edge case where neither latest or pending `Collection` match the opened snapshot due to +concurrent multikey changes. + Users of `Collection` instances have a few responsibilities to keep the object valid. 1. Hold a collection-level lock. 2. Use an AutoGetCollection helper. @@ -925,6 +934,14 @@ synchronize shutdown, so that all operations are finished with the storage engin Certain types of global storage engine operations, such as recoverToStableTimestamp(), also require this lock to be held in exclusive mode. +### Tenant Lock + +A resource of ResourceType Tenant is used when a database belongs to a tenant. It is used to synchronize +change streams enablement and disablement for a tenant operation with other operations associated with the tenant. +Enabling or disabling of change streams (by creating or dropping a change collection) for a tenant takes this lock +in exclusive (X) mode. Acquiring this resource with an intent lock is an indication that the operation is doing reads (IS) +or writes (IX) at the database or lower level. + ### Database Lock Any resource of ResourceType Database protects certain database-wide operations such as database @@ -942,7 +959,7 @@ ResourceType, as locking at this level is done in the storage engine itself for ### Document Level Concurrency Control Each storage engine is responsible for locking at the document level. The WiredTiger storage engine -uses MVCC (multiversion concurrency control) along with optimistic locking in order to provide +uses MVCC [multiversion concurrency control][] along with optimistic locking in order to provide concurrency guarantees. ## Two-Phase Locking @@ -1127,6 +1144,16 @@ index build does not need to be the same node that decides to commit it. See [Index Builds in Replicated Environments - MongoDB Manual](https://docs.mongodb.com/master/core/index-creation/#index-builds-in-replicated-environments). +Server 7.1 introduces the following improvements: + +* Index builds abort immediately after detecting errors other than duplicate key +violations. Before 7.1, index builds aborted the index build close to +completion, potentially long after detection. +* A secondary member can abort a two-phase index build. Before 7.1, a secondary was forced +to crash instead. See the [Voting for Abort](#voting-for-abort) section. +* Index builds are cancelled if there isn't enough storage space available. See the + [Disk Space](#disk-space) section. + ### Commit Quorum The purpose of `commitQuorm` is to ensure secondaries are ready to commit an index build quickly. @@ -1150,10 +1177,10 @@ data on a collection and performed the first drain of side-writes. Voting is imp `voteCommitIndexBuild` command, and is persisted as a write to the replicated `config.system.indexBuilds` collection. -While waiting for a commit decision, primaries and secondaries continue recieving and applying new +While waiting for a commit decision, primaries and secondaries continue receiving and applying new side writes. When a quorum is reached, the current primary, under a collection X lock, will check -all index constraints. If there are errors, it will replicate an `abortIndexBuild` oplog entry. If -the index build is successful, it will replicate a `commitIndexBuild` oplog entry. +the remaining index constraints. If there are errors, it will replicate an `abortIndexBuild` oplog +entry. If the index build is successful, it will replicate a `commitIndexBuild` oplog entry. Secondaries that were not included in the commit quorum and receive a `commitIndexBuild` oplog entry will block replication until their index build is complete. @@ -1165,6 +1192,28 @@ server command. See [IndexBuildsCoordinator::_waitForNextIndexBuildActionAndCommit](https://github.com/mongodb/mongo/blob/r4.4.0-rc9/src/mongo/db/index_builds_coordinator_mongod.cpp#L632). +### Voting for Abort + +As of 7.1, a secondary can abort a two-phase index build by sending a `voteAbortIndexBuild` signal +to the primary. In contrast, before 7.1 it was forced to crash. Common causes for aborting the index +build are a killOp on the index build or running low on storage space. +The primary, upon receiving a vote to abort the index build from a secondary, will replicate an +`abortIndexBuild` oplog entry. This will cause all secondaries to gracefully abort the index build, +even if a specific secondary had already voted to commit the index build. + +Note that once a secondary has voted to commit the index build, it cannot retract the vote. In the +unlikely event that a secondary has voted for commit and for some reason it must abort while waiting +for the primary to replicate a `commitIndexBuild` oplog entry, the secondary is forced to crash. + +### Disk Space + +As of 7.1, an index build can abort due to a replica set member running low on disk space. This +applies both to primary and secondary nodes. Additionally, on a primary the index build won't start +if the available disk space is low. +The minimum amount of disk space is controlled by +[indexBuildMinAvailableDiskSpaceMB](https://github.com/mongodb/mongo/blob/406e69f6f5dee8b698c4e4308de2e9e5cef6c12c/src/mongo/db/storage/two_phase_index_build_knobs.idl#L71) +which defaults to 500MB. + ## Resumable Index Builds On clean shutdown, index builds save their progress in internal idents that will be used for resuming @@ -1220,7 +1269,7 @@ See [createIndexForApplyOps](https://github.com/mongodb/mongo/blob/6ea7d1923619b # KeyString The `KeyString` format is an alternative serialization format for `BSON`. In the text below, -`KeyString` may refer to values in this format, the C++ namespace of that name or the format itself. +`KeyString` may refer to values in this format or the format itself, while `key_string` refers to the C++ namespace. Indexes sort keys based on their BSON sorting order. In this order all numerical values compare according to their mathematical value. Given a BSON document `{ x: 42.0, y : "hello"}` and an index with the compound key `{ x : 1, y : 1}`, the document is sorted as the BSON document @@ -1280,18 +1329,18 @@ validation to check if there are keys in the old format in unique secondary inde ## Building KeyString values and passing them around There are three kinds of builders for constructing `KeyString` values: -* `KeyString::Builder`: starts building using a small allocation on the stack, and +* `key_string::Builder`: starts building using a small allocation on the stack, and dynamically switches to allocating memory from the heap. This is generally preferable if the value is only needed in the scope where it was created. -* `KeyString::HeapBuilder`: always builds using dynamic memory allocation. This has advantage that +* `key_string::HeapBuilder`: always builds using dynamic memory allocation. This has advantage that calling the `release` method can transfer ownership of the memory without copying. -* `KeyString::PooledBuilder`: This class allow building many `KeyString` values tightly packed into +* `key_string::PooledBuilder`: This class allow building many `KeyString` values tightly packed into larger blocks. The advantage is fewer, larger memory allocations and no wasted space due to internal fragmentation. This is a good approach when a large number of values is needed, such as for index building. However, memory for a block is only released after _no_ references to that block remain. -The `KeyString::Value` class holds a reference to a `SharedBufferFragment` with the `KeyString` and +The `key_string::Value` class holds a reference to a `SharedBufferFragment` with the `KeyString` and its `TypeBits` if any and can be used for passing around values. # The External Sorter @@ -1496,8 +1545,6 @@ that checkpoint's timestamp is known as the ## Recovery To A Stable Timestamp -## Table Ident Resolution - # File-System Backups Backups represent a full copy of the data files at a point-in-time. These copies of the data files can be used to recover data from a consistent state at an earlier time. This technique is commonly @@ -1623,19 +1670,35 @@ Flow Control is only concerned whether an operation is 'immediate' priority and * `kNormal` - An operation that should be throttled when the server is under load. If an operation is throttled, it will not affect availability or observability. Most operations, both user and internal, should use this priority unless they qualify as 'kLow' or 'kImmediate' priority. * `kLow` - It's of low importance that the operation acquires a ticket in Execution Admission Control. Reserved for background tasks that have no other operations dependent on them. The operation will be throttled under load and make significantly less progress compared to operations of higher priorities in the Execution Admission Control. -Developers should consciously decide admission priority when adding new features. Admission priority can be set through the [ScopedAdmissionPriorityForLock](https://github.com/mongodb/mongo/blob/r6.3.0-rc0/src/mongo/db/concurrency/lock_state.h#L428) RAII. +[See AdmissionContext::Priority for more details](https://github.com/mongodb/mongo/blob/r7.0.0-rc0/src/mongo/util/concurrency/admission_context.h#L45-L67). + +### How to Set Admission Priority +The preferred method for setting an operation's priority is through the RAII type [ScopedAdmissionPriorityForLock](https://github.com/mongodb/mongo/blob/r7.0.0-rc0/src/mongo/db/concurrency/locker.h#L747). + +``` +ScopedAdmissionPriorityForLock priority(opCtx->lockState(), AdmissionContext::Priority::kLow); +``` + +Since the GlobalLock may be acquired and released multiple times throughout an operation's lifetime, it's important to limit the scope of reprioritization to prevent unintentional side-effects. However, if there is a special circumstance where the RAII cannot possibly be used, the priority can be set directly through [Locker::setAdmissionPriority()](https://github.com/10gen/mongo/blob/r7.0.0-rc0/src/mongo/db/concurrency/locker.h#L525). ### Developer Guidelines for Declaring Low Admission Priority -Developers must evaluate the consequences of each low priority operation from falling too far behind, and implement safeguards to avoid any undesirable behaviors for excessive delays in low priority operations. +Developers must evaluate the consequences of each low priority operation from falling too far behind, and should try to implement safeguards to avoid any undesirable behaviors for excessive delays in low priority operations. -An operation should dynamically choose when to be deprioritized or re-prioritized. More -specifically, all low-priority candidates must assess the state of the system before taking the +Whenever possible, an operation should dynamically choose when to be deprioritized or re-prioritized. More +specifically, all low-priority candidates should assess the impact of deprioritizing their operation with respect to the state of the system before taking the GlobalLock with low priority. For example, since TTL deletes can be an expensive background task, they should default to low priority. However, it's important they don't fall too far behind TTL inserts - otherwise, there is a risk of unbounded collection growth. To remedy this issue, TTL deletes on a collection [are reprioritized](https://github.com/mongodb/mongo/blob/d1a0e34e1e67d4a2b23104af2512d14290b25e5f/src/mongo/db/ttl.idl#L96) to normal priority if they can't catch up after n-subpasses. +Examples of Deprioritized Operations: +* [TTL deletes](https://github.com/mongodb/mongo/blob/0ceb784512f81f77f0bc55001f83ca77d1aa1d84/src/mongo/db/ttl.cpp#L488) +* [Persisting sampled queries for analyze shard key](https://github.com/10gen/mongo/blob/0ef2c68f58ea20c2dde99e5ce3ea10b79e18453d/src/mongo/db/commands/write_commands.cpp#L295) +* [Unbounded Index Scans](https://github.com/10gen/mongo/blob/0ef2c68f58ea20c2dde99e5ce3ea10b79e18453d/src/mongo/db/query/planner_access.cpp#L1913) +* [Unbounded Collection Scans](https://github.com/10gen/mongo/blob/0ef2c68f58ea20c2dde99e5ce3ea10b79e18453d/src/mongo/db/query/planner_analysis.cpp#L1254) +* Index Builds [(1)](https://github.com/10gen/mongo/blob/0ef2c68f58ea20c2dde99e5ce3ea10b79e18453d/src/mongo/db/index_builds_coordinator.cpp#L3064), [(2)](https://github.com/10gen/mongo/blob/0ef2c68f58ea20c2dde99e5ce3ea10b79e18453d/src/mongo/db/index_builds_coordinator.cpp#L3105) + ## Execution Admission Control A ticketing mechanism that limits the number of concurrent storage engine transactions in a single mongod to reduce contention on storage engine resources. @@ -1722,7 +1785,7 @@ by another configurable constant (the ticket "multiplier" constant). This produc of tickets to be assigned in the next period. When the Flow Control mechanism is disabled, the ticket refresher mechanism always allows one -billion flow control ticket acquisitions per second. The Flow Control mechanism can be disabled via +billion flow control ticket acquisitions per second. The Flow Control mechanism can be disabled via a server parameter. Additionally, the mechanism is disabled on nodes that cannot accept writes. Criteria #2 and #3 are determined using a sampling mechanism that periodically stores the necessary @@ -2342,4 +2405,5 @@ oplog. for removal). - `ops.key-hex` and `ops.value-bson` are specific to the pretty printing tool used. -[read-copy-update]: https://en.wikipedia.org/wiki/Read-copy-update +[copy-on-write]: https://en.wikipedia.org/wiki/Copy-on-write +[Multiversion concurrency control]: https://en.wikipedia.org/wiki/Multiversion_concurrency_control diff --git a/src/mongo/db/catalog/SConscript b/src/mongo/db/catalog/SConscript index 531064815d8cf..6be7ecf63ef16 100644 --- a/src/mongo/db/catalog/SConscript +++ b/src/mongo/db/catalog/SConscript @@ -147,6 +147,7 @@ env.Library( '$BUILD_DIR/mongo/db/shard_role', '$BUILD_DIR/mongo/db/storage/record_store_base', '$BUILD_DIR/mongo/db/storage/storage_repair_observer', + '$BUILD_DIR/mongo/util/progress_meter', 'index_repair', 'multi_index_block', ], @@ -216,6 +217,7 @@ env.Library( '$BUILD_DIR/mongo/db/index/index_access_method', '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', '$BUILD_DIR/mongo/db/server_base', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/db/ttl_collection_cache', '$BUILD_DIR/mongo/db/vector_clock', 'collection_catalog', @@ -293,6 +295,7 @@ env.Library( target='collection_catalog', source=[ 'collection_catalog.cpp', + 'historical_catalogid_tracker.cpp', 'uncommitted_catalog_updates.cpp', 'uncommitted_multikey.cpp', 'views_for_database.cpp', @@ -310,8 +313,8 @@ env.Library( '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/db/storage/bson_collection_catalog_entry', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/db/storage/storage_options', - '$BUILD_DIR/mongo/db/views/util', '$BUILD_DIR/mongo/db/views/views', '$BUILD_DIR/mongo/util/namespace_string_database_name_util', 'collection_crud', @@ -326,6 +329,7 @@ env.Benchmark( LIBDEPS=[ '$BUILD_DIR/mongo/db/concurrency/lock_manager', '$BUILD_DIR/mongo/db/multitenancy', + '$BUILD_DIR/mongo/db/service_context_non_d', 'collection_catalog', ], ) @@ -359,6 +363,7 @@ env.Library( '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/shard_role', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/db/storage/storage_options', '$BUILD_DIR/mongo/db/views/views', '$BUILD_DIR/mongo/util/fail_point', @@ -402,9 +407,8 @@ env.Library( '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/db/shard_role', '$BUILD_DIR/mongo/db/storage/capped_snapshots', - '$BUILD_DIR/mongo/db/storage/durable_catalog_impl', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/db/storage/execution_context', - '$BUILD_DIR/mongo/db/storage/historical_ident_tracker', '$BUILD_DIR/mongo/db/storage/key_string', '$BUILD_DIR/mongo/db/storage/record_store_base', '$BUILD_DIR/mongo/db/storage/storage_engine_common', @@ -417,6 +421,7 @@ env.Library( '$BUILD_DIR/mongo/db/ttl_collection_cache', '$BUILD_DIR/mongo/db/vector_clock', '$BUILD_DIR/mongo/db/views/view_catalog_helpers', + '$BUILD_DIR/mongo/util/progress_meter', 'capped_visibility', 'catalog_helpers', 'catalog_stats', @@ -474,6 +479,7 @@ env.Library( '$BUILD_DIR/mongo/db/storage/record_store_base', '$BUILD_DIR/mongo/db/timeseries/bucket_catalog/bucket_catalog', '$BUILD_DIR/mongo/db/timeseries/timeseries_options', + '$BUILD_DIR/mongo/util/progress_meter', 'catalog_impl', 'clustered_collection_options', 'collection_options', @@ -566,7 +572,6 @@ env.Library( '$BUILD_DIR/mongo/db/ttl_collection_cache', '$BUILD_DIR/mongo/db/views/view_catalog_helpers', '$BUILD_DIR/mongo/db/views/views', - '$BUILD_DIR/mongo/util/namespace_string_database_name_util', 'cannot_convert_index_to_unique_info', 'clustered_collection_options', 'collection_crud', @@ -600,7 +605,8 @@ env.Library( '$BUILD_DIR/mongo/db/query/query_planner', '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/service_context', - '$BUILD_DIR/mongo/db/shard_role_api', + #TODO SERVER-77571 this should only depend on the shard_role_api library. + '$BUILD_DIR/mongo/db/shard_role', 'index_catalog', ], ) @@ -649,6 +655,7 @@ env.Library( ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/base', + '$BUILD_DIR/mongo/util/namespace_string_database_name_util', ], ) @@ -680,6 +687,7 @@ if wiredtiger: 'create_collection_test.cpp', 'database_test.cpp', 'drop_database_test.cpp', + 'historical_catalogid_tracker_test.cpp', 'index_build_entry_test.cpp', 'index_builds_manager_test.cpp', 'index_key_validate_test.cpp', @@ -692,7 +700,9 @@ if wiredtiger: ], LIBDEPS=[ '$BUILD_DIR/mongo/db/auth/authmocks', + '$BUILD_DIR/mongo/db/commands/create_command', '$BUILD_DIR/mongo/db/commands/test_commands_enabled', + '$BUILD_DIR/mongo/db/concurrency/exception_util', '$BUILD_DIR/mongo/db/index/index_access_method', '$BUILD_DIR/mongo/db/index_builds_coordinator_mongod', '$BUILD_DIR/mongo/db/multitenancy', @@ -708,11 +718,10 @@ if wiredtiger: '$BUILD_DIR/mongo/db/repl/replmocks', '$BUILD_DIR/mongo/db/repl/storage_interface_impl', '$BUILD_DIR/mongo/db/server_base', - '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/db/service_context_d_test_fixture', - '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/mongo/db/shard_role', '$BUILD_DIR/mongo/db/storage/wiredtiger/storage_wiredtiger', + '$BUILD_DIR/mongo/db/timeseries/timeseries_collmod', '$BUILD_DIR/mongo/db/timeseries/timeseries_options', '$BUILD_DIR/mongo/unittest/unittest', '$BUILD_DIR/mongo/util/clock_source_mock', diff --git a/src/mongo/db/catalog/cannot_convert_index_to_unique_info.cpp b/src/mongo/db/catalog/cannot_convert_index_to_unique_info.cpp index e86b73ef1a754..cb625f0f3cb3d 100644 --- a/src/mongo/db/catalog/cannot_convert_index_to_unique_info.cpp +++ b/src/mongo/db/catalog/cannot_convert_index_to_unique_info.cpp @@ -29,7 +29,8 @@ #include "mongo/db/catalog/cannot_convert_index_to_unique_info.h" -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" namespace mongo { diff --git a/src/mongo/db/catalog/cannot_convert_index_to_unique_info.h b/src/mongo/db/catalog/cannot_convert_index_to_unique_info.h index 076230db3d0fb..086e609bcd9dd 100644 --- a/src/mongo/db/catalog/cannot_convert_index_to_unique_info.h +++ b/src/mongo/db/catalog/cannot_convert_index_to_unique_info.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" diff --git a/src/mongo/db/catalog/capped_collection_maintenance.cpp b/src/mongo/db/catalog/capped_collection_maintenance.cpp index 7167da6873f4d..0583ba8107a94 100644 --- a/src/mongo/db/catalog/capped_collection_maintenance.cpp +++ b/src/mongo/db/catalog/capped_collection_maintenance.cpp @@ -29,8 +29,36 @@ #include "mongo/db/catalog/capped_collection_maintenance.h" +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/storage/capped_snapshots.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/mutex.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" + namespace mongo { namespace collection_internal { namespace { @@ -101,8 +129,7 @@ void cappedDeleteUntilBelowConfiguredMaximum(OperationContext* opCtx, // 'cappedFirstRecord' until the outermost WriteUnitOfWork commits or aborts. Locking the // metadata resource exclusively on the collection gives us that guarantee as it uses // two-phase locking semantics. - invariant(opCtx->lockState()->getLockMode(ResourceId(RESOURCE_METADATA, nss.ns())) == - MODE_X); + invariant(opCtx->lockState()->getLockMode(ResourceId(RESOURCE_METADATA, nss)) == MODE_X); } else { // Capped deletes not performed under the capped lock need the 'cappedFirstRecordMutex' // mutex. @@ -165,9 +192,10 @@ void cappedDeleteUntilBelowConfiguredMaximum(OperationContext* opCtx, BSONObj doc = record->data.toBson(); if (nss.isReplicated()) { OpObserver* opObserver = opCtx->getServiceContext()->getOpObserver(); - opObserver->aboutToDelete(opCtx, collection, doc); OplogDeleteEntryArgs args; + opObserver->aboutToDelete(opCtx, collection, doc, &args); + // Explicitly setting values despite them being the defaults. args.deletedDoc = nullptr; args.fromMigrate = false; diff --git a/src/mongo/db/catalog/capped_collection_maintenance.h b/src/mongo/db/catalog/capped_collection_maintenance.h index b767547213cfa..a7c18e6694424 100644 --- a/src/mongo/db/catalog/capped_collection_maintenance.h +++ b/src/mongo/db/catalog/capped_collection_maintenance.h @@ -30,6 +30,8 @@ #pragma once #include "mongo/db/catalog/collection.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" namespace mongo { namespace collection_internal { diff --git a/src/mongo/db/catalog/capped_collection_test.cpp b/src/mongo/db/catalog/capped_collection_test.cpp index b95317f207dee..40fdbcbe472da 100644 --- a/src/mongo/db/catalog/capped_collection_test.cpp +++ b/src/mongo/db/catalog/capped_collection_test.cpp @@ -27,17 +27,46 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/capped_visibility.h" -#include "mongo/db/catalog/catalog_test_fixture.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/concurrency/locker_impl.h" #include "mongo/db/db_raii.h" -#include "mongo/db/record_id_helpers.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/catalog/capped_utils.cpp b/src/mongo/db/catalog/capped_utils.cpp index 93dd61ed5a91f..2bda46196ac37 100644 --- a/src/mongo/db/catalog/capped_utils.cpp +++ b/src/mongo/db/catalog/capped_utils.cpp @@ -29,29 +29,54 @@ #include "mongo/db/catalog/capped_utils.h" +#include +#include +#include +#include +#include + +#include +#include +#include + #include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/document_validation.h" -#include "mongo/db/catalog/drop_collection.h" -#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/local_oplog_info.h" #include "mongo/db/catalog/rename_collection.h" #include "mongo/db/catalog/unique_collection_name.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/service_context.h" -#include "mongo/util/scopeguard.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -65,8 +90,8 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam if (userInitiatedWritesAndNotPrimary) { return Status(ErrorCodes::NotWritablePrimary, - str::stream() - << "Not primary while truncating collection: " << collectionName); + str::stream() << "Not primary while truncating collection: " + << collectionName.toStringForErrorMsg()); } Database* db = autoDb.getDb(); @@ -74,21 +99,23 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam CollectionWriter collection(opCtx, collectionName); uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "emptycapped not supported on view: " << collectionName.ns(), + str::stream() << "emptycapped not supported on view: " + << collectionName.toStringForErrorMsg(), collection || !CollectionCatalog::get(opCtx)->lookupView(opCtx, collectionName)); uassert(ErrorCodes::NamespaceNotFound, "no such collection", collection); if (collectionName.isSystem() && !collectionName.isSystemDotProfile()) { return Status(ErrorCodes::IllegalOperation, - str::stream() << "Cannot truncate a system collection: " << collectionName); + str::stream() << "Cannot truncate a system collection: " + << collectionName.toStringForErrorMsg()); } if ((repl::ReplicationCoordinator::get(opCtx)->getReplicationMode() != repl::ReplicationCoordinator::modeNone) && collectionName.isOplog()) { return Status(ErrorCodes::OplogOperationUnsupported, - str::stream() - << "Cannot truncate a live oplog while replicating: " << collectionName); + str::stream() << "Cannot truncate a live oplog while replicating: " + << collectionName.toStringForErrorMsg()); } IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(collection->uuid()); @@ -101,14 +128,6 @@ Status emptyCapped(OperationContext* opCtx, const NamespaceString& collectionNam return status; } - opCtx->recoveryUnit()->onCommit( - [writableCollection](OperationContext*, boost::optional commitTime) { - // Ban reading from this collection on snapshots before now. - if (commitTime) { - writableCollection->setMinimumVisibleSnapshot(commitTime.value()); - } - }); - const auto service = opCtx->getServiceContext(); service->getOpObserver()->onEmptyCapped(opCtx, collection->ns(), collection->uuid()); @@ -127,11 +146,13 @@ void cloneCollectionAsCapped(OperationContext* opCtx, CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, fromNss)); if (!fromCollection) { uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "cloneCollectionAsCapped not supported for views: " << fromNss, + str::stream() << "cloneCollectionAsCapped not supported for views: " + << fromNss.toStringForErrorMsg(), !CollectionCatalog::get(opCtx)->lookupView(opCtx, fromNss)); uasserted(ErrorCodes::NamespaceNotFound, - str::stream() << "source collection " << fromNss << " does not exist"); + str::stream() << "source collection " << fromNss.toStringForErrorMsg() + << " does not exist"); } uassert(6367302, @@ -139,13 +160,14 @@ void cloneCollectionAsCapped(OperationContext* opCtx, !fromCollection->getCollectionOptions().encryptedFieldConfig); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "source collection " << fromNss + str::stream() << "source collection " << fromNss.toStringForErrorMsg() << " is currently in a drop-pending state.", !fromNss.isDropPendingNamespace()); uassert(ErrorCodes::NamespaceExists, - str::stream() << "cloneCollectionAsCapped failed - destination collection " << toNss - << " already exists. source collection: " << fromNss, + str::stream() << "cloneCollectionAsCapped failed - destination collection " + << toNss.toStringForErrorMsg() << " already exists. source collection: " + << fromNss.toStringForErrorMsg(), !CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, toNss)); // create new collection @@ -251,10 +273,11 @@ void cloneCollectionAsCapped(OperationContext* opCtx, // Go to the next document retries = 0; - } catch (const WriteConflictException&) { + } catch (const WriteConflictException& e) { CurOp::get(opCtx)->debug().additiveMetrics.incrementWriteConflicts(1); retries++; // logAndBackoff expects this to be 1 on first call. - logWriteConflictAndBackoff(retries, "cloneCollectionAsCapped", fromNss.ns()); + logWriteConflictAndBackoff( + retries, "cloneCollectionAsCapped", e.reason(), NamespaceStringOrUUID(fromNss)); // Can't use writeConflictRetry since we need to save/restore exec around call to // abandonSnapshot. @@ -279,7 +302,8 @@ void convertToCapped(OperationContext* opCtx, const NamespaceString& ns, long lo !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns); uassert(ErrorCodes::NotWritablePrimary, - str::stream() << "Not primary while converting " << ns << " to a capped collection", + str::stream() << "Not primary while converting " << ns.toStringForErrorMsg() + << " to a capped collection", !userInitiatedWritesAndNotPrimary); Database* const db = coll.getDb(); @@ -297,8 +321,8 @@ void convertToCapped(OperationContext* opCtx, const NamespaceString& ns, long lo while (true) { auto tmpName = uassertStatusOKWithContext( makeUniqueCollectionName(opCtx, dbname, "tmp%%%%%.convertToCapped." + shortSource), - str::stream() << "Cannot generate temporary collection namespace to convert " << ns - << " to a capped collection"); + str::stream() << "Cannot generate temporary collection namespace to convert " + << ns.toStringForErrorMsg() << " to a capped collection"); collLock.emplace(opCtx, tmpName, MODE_X); if (!CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, tmpName)) { diff --git a/src/mongo/db/catalog/capped_utils_test.cpp b/src/mongo/db/catalog/capped_utils_test.cpp index 70386a62bd0e7..cb878e3e438ec 100644 --- a/src/mongo/db/catalog/capped_utils_test.cpp +++ b/src/mongo/db/catalog/capped_utils_test.cpp @@ -27,18 +27,42 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include +#include +#include + +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/db/catalog/capped_utils.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/client.h" #include "mongo/db/db_raii.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/uuid.h" namespace { @@ -96,7 +120,7 @@ bool collectionExists(OperationContext* opCtx, const NamespaceString& nss) { */ CollectionOptions getCollectionOptions(OperationContext* opCtx, const NamespaceString& nss) { AutoGetCollectionForRead collection(opCtx, nss); - ASSERT_TRUE(collection) << "Unable to get collections options for " << nss + ASSERT_TRUE(collection) << "Unable to get collections options for " << nss.toStringForErrorMsg() << " because collection does not exist."; return collection->getCollectionOptions(); } diff --git a/src/mongo/db/catalog/capped_visibility.cpp b/src/mongo/db/catalog/capped_visibility.cpp index fd2e8f10f73a5..39ac0f3431a70 100644 --- a/src/mongo/db/catalog/capped_visibility.cpp +++ b/src/mongo/db/catalog/capped_visibility.cpp @@ -31,9 +31,21 @@ #include "mongo/db/catalog/capped_visibility.h" -#include "mongo/db/concurrency/exception_util.h" +#include +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/operation_context.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/catalog/capped_visibility.h b/src/mongo/db/catalog/capped_visibility.h index d23bac0c7147f..69f16826d5ce3 100644 --- a/src/mongo/db/catalog/capped_visibility.h +++ b/src/mongo/db/catalog/capped_visibility.h @@ -29,11 +29,27 @@ #pragma once -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/record_id.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/basic.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/decorable.h" +#include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/string_map.h" namespace mongo { class OperationContext; diff --git a/src/mongo/db/catalog/capped_visibility_test.cpp b/src/mongo/db/catalog/capped_visibility_test.cpp index d235c4cb8fb0d..c4f7f16bd2db7 100644 --- a/src/mongo/db/catalog/capped_visibility_test.cpp +++ b/src/mongo/db/catalog/capped_visibility_test.cpp @@ -27,40 +27,44 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/catalog/capped_visibility.h" -#include "mongo/db/operation_context_noop.h" -#include "mongo/db/storage/recovery_unit_noop.h" -#include "mongo/unittest/unittest.h" + +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { -using OpCtxAndRecoveryUnit = - std::pair, std::unique_ptr>; +struct ClientAndOpCtx { + ClientAndOpCtx(ServiceContext* service, std::string desc) + : client(service->makeClient(std::move(desc), nullptr)), + opCtx(client->makeOperationContext()) {} -OpCtxAndRecoveryUnit makeOpCtxAndRecoveryUnit() { - auto opCtx = std::make_unique(); - auto ru = std::make_unique(); - ru->setOperationContext(opCtx.get()); - return {std::move(opCtx), std::move(ru)}; -} + ServiceContext::UniqueClient client; + ServiceContext::UniqueOperationContext opCtx; +}; + +class CappedVisibilityTest : public unittest::Test, public ScopedGlobalServiceContextForTest {}; -// Basic RecordId hole -TEST(CappedVisibilityTest, BasicHole) { +TEST_F(CappedVisibilityTest, BasicRecordIdHole) { CappedVisibilityObserver observer("test"); observer.setRecordImmediatelyVisible(RecordId(1)); + ClientAndOpCtx cando1(getServiceContext(), "Client1"); + ClientAndOpCtx cando2(getServiceContext(), "Client2"); - auto [op1, ru1] = makeOpCtxAndRecoveryUnit(); - auto [op2, ru2] = makeOpCtxAndRecoveryUnit(); - auto writer1 = observer.registerWriter(ru1.get()); - auto writer2 = observer.registerWriter(ru2.get()); + auto writer1 = observer.registerWriter(cando1.opCtx->recoveryUnit()); + auto writer2 = observer.registerWriter(cando2.opCtx->recoveryUnit()); writer1->registerRecordId(RecordId(2)); writer2->registerRecordId(RecordId(3)); - ru2->commitUnitOfWork(); + + cando2.opCtx->recoveryUnit()->commitUnitOfWork(); // Only RecordId 1 should be visible. { @@ -70,7 +74,7 @@ TEST(CappedVisibilityTest, BasicHole) { ASSERT_FALSE(snapshot.isRecordVisible(RecordId(3))); } - ru1->commitUnitOfWork(); + cando1.opCtx->recoveryUnit()->commitUnitOfWork(); // All RecordIds should be visible now. { @@ -82,12 +86,12 @@ TEST(CappedVisibilityTest, BasicHole) { } } -TEST(CappedVisibilityTest, RollBack) { +TEST_F(CappedVisibilityTest, RollBack) { CappedVisibilityObserver observer("test"); observer.setRecordImmediatelyVisible(RecordId(1)); - auto [op1, ru1] = makeOpCtxAndRecoveryUnit(); - auto writer1 = observer.registerWriter(ru1.get()); + ClientAndOpCtx cando1(getServiceContext(), "Client1"); + auto writer1 = observer.registerWriter(cando1.opCtx->recoveryUnit()); writer1->registerRecordId(RecordId(2)); // Only RecordId 1 should be visible. @@ -97,7 +101,7 @@ TEST(CappedVisibilityTest, RollBack) { ASSERT_FALSE(snapshot.isRecordVisible(RecordId(2))); } - ru1->abortUnitOfWork(); + cando1.opCtx->recoveryUnit()->abortUnitOfWork(); { auto snapshot = observer.makeSnapshot(); @@ -108,18 +112,18 @@ TEST(CappedVisibilityTest, RollBack) { } } -TEST(CappedVisibilityTest, RollBackHole) { +TEST_F(CappedVisibilityTest, RollBackHole) { CappedVisibilityObserver observer("test"); observer.setRecordImmediatelyVisible(RecordId(1)); - auto [op1, ru1] = makeOpCtxAndRecoveryUnit(); - auto [op2, ru2] = makeOpCtxAndRecoveryUnit(); - auto writer1 = observer.registerWriter(ru1.get()); - auto writer2 = observer.registerWriter(ru2.get()); + ClientAndOpCtx cando1(getServiceContext(), "Client1"); + ClientAndOpCtx cando2(getServiceContext(), "Client2"); + auto writer1 = observer.registerWriter(cando1.opCtx->recoveryUnit()); + auto writer2 = observer.registerWriter(cando2.opCtx->recoveryUnit()); writer1->registerRecordId(RecordId(2)); writer2->registerRecordId(RecordId(3)); - ru2->commitUnitOfWork(); + cando2.opCtx->recoveryUnit()->commitUnitOfWork(); // Only RecordId 1 should be visible. { @@ -129,7 +133,7 @@ TEST(CappedVisibilityTest, RollBackHole) { ASSERT_FALSE(snapshot.isRecordVisible(RecordId(3))); } - ru1->abortUnitOfWork(); + cando1.opCtx->recoveryUnit()->abortUnitOfWork(); // All committed RecordIds should be visible now. { @@ -143,14 +147,14 @@ TEST(CappedVisibilityTest, RollBackHole) { } // Hole with multiple uncommitted writers and one writer hasn't register any records yet. -TEST(CappedVisibilityTest, UnregisteredRecords) { +TEST_F(CappedVisibilityTest, UnregisteredRecords) { CappedVisibilityObserver observer("test"); observer.setRecordImmediatelyVisible(RecordId(1)); - auto [op1, ru1] = makeOpCtxAndRecoveryUnit(); - auto [op2, ru2] = makeOpCtxAndRecoveryUnit(); - auto writer1 = observer.registerWriter(ru1.get()); - auto writer2 = observer.registerWriter(ru2.get()); + ClientAndOpCtx cando1(getServiceContext(), "Client1"); + ClientAndOpCtx cando2(getServiceContext(), "Client2"); + auto writer1 = observer.registerWriter(cando1.opCtx->recoveryUnit()); + auto writer2 = observer.registerWriter(cando2.opCtx->recoveryUnit()); writer1->registerRecordId(RecordId(2)); @@ -172,7 +176,7 @@ TEST(CappedVisibilityTest, UnregisteredRecords) { ASSERT_FALSE(snapshot.isRecordVisible(RecordId(3))); } - ru1->commitUnitOfWork(); + cando1.opCtx->recoveryUnit()->commitUnitOfWork(); // RecordIds except for 3 should be visible. { @@ -182,7 +186,7 @@ TEST(CappedVisibilityTest, UnregisteredRecords) { ASSERT_FALSE(snapshot.isRecordVisible(RecordId(3))); } - ru2->commitUnitOfWork(); + cando2.opCtx->recoveryUnit()->commitUnitOfWork(); // All RecordIds should be visible now. { @@ -194,17 +198,16 @@ TEST(CappedVisibilityTest, UnregisteredRecords) { } } -TEST(CappedVisibilityTest, RegisterRange) { +TEST_F(CappedVisibilityTest, RegisterRange) { CappedVisibilityObserver observer("test"); observer.setRecordImmediatelyVisible(RecordId(1)); - auto [op1, ru1] = makeOpCtxAndRecoveryUnit(); - auto [op2, ru2] = makeOpCtxAndRecoveryUnit(); - auto writer1 = observer.registerWriter(ru1.get()); - auto writer2 = observer.registerWriter(ru2.get()); + ClientAndOpCtx cando1(getServiceContext(), "Client1"); + ClientAndOpCtx cando2(getServiceContext(), "Client2"); + auto writer1 = observer.registerWriter(cando1.opCtx->recoveryUnit()); + auto writer2 = observer.registerWriter(cando2.opCtx->recoveryUnit()); writer1->registerRecordIds(RecordId(2), RecordId(5)); - writer2->registerRecordIds(RecordId(6), RecordId(10)); // The highest visible record should be 1. @@ -216,7 +219,7 @@ TEST(CappedVisibilityTest, RegisterRange) { ASSERT_FALSE(snapshot.isRecordVisible(RecordId(10))); } - ru2->commitUnitOfWork(); + cando2.opCtx->recoveryUnit()->commitUnitOfWork(); // The highest visible record should be 1. { @@ -227,7 +230,8 @@ TEST(CappedVisibilityTest, RegisterRange) { ASSERT_FALSE(snapshot.isRecordVisible(RecordId(10))); } - ru1->commitUnitOfWork(); + cando1.opCtx->recoveryUnit()->commitUnitOfWork(); + // All records should be visible. { auto snapshot = observer.makeSnapshot(); @@ -238,14 +242,14 @@ TEST(CappedVisibilityTest, RegisterRange) { } } -TEST(CappedVisibilityTest, MultiRegistration) { +TEST_F(CappedVisibilityTest, MultiRegistration) { CappedVisibilityObserver observer("test"); observer.setRecordImmediatelyVisible(RecordId(1)); - auto [op1, ru1] = makeOpCtxAndRecoveryUnit(); - auto [op2, ru2] = makeOpCtxAndRecoveryUnit(); - auto writer1 = observer.registerWriter(ru1.get()); - auto writer2 = observer.registerWriter(ru2.get()); + ClientAndOpCtx cando1(getServiceContext(), "Client1"); + ClientAndOpCtx cando2(getServiceContext(), "Client2"); + auto writer1 = observer.registerWriter(cando1.opCtx->recoveryUnit()); + auto writer2 = observer.registerWriter(cando2.opCtx->recoveryUnit()); writer1->registerRecordId(RecordId(2)); writer2->registerRecordId(RecordId(3)); @@ -262,7 +266,7 @@ TEST(CappedVisibilityTest, MultiRegistration) { ASSERT_FALSE(snapshot.isRecordVisible(RecordId(5))); } - ru2->commitUnitOfWork(); + cando2.opCtx->recoveryUnit()->commitUnitOfWork(); // The highest visible record should still be 1. { @@ -274,7 +278,7 @@ TEST(CappedVisibilityTest, MultiRegistration) { ASSERT_FALSE(snapshot.isRecordVisible(RecordId(5))); } - ru1->commitUnitOfWork(); + cando1.opCtx->recoveryUnit()->commitUnitOfWork(); // All records should be visible. { @@ -309,21 +313,21 @@ class CappedCollection { }; // Tests writes to multiple capped collections at once -TEST(CappedVisibilityTest, MultiCollection) { +TEST_F(CappedVisibilityTest, MultiCollection) { CappedCollection coll1("coll1"); CappedCollection coll2("coll2"); coll1.insertRecordImmediately(RecordId(1)); coll2.insertRecordImmediately(RecordId(11)); - auto [op1, ru1] = makeOpCtxAndRecoveryUnit(); - auto [op2, ru2] = makeOpCtxAndRecoveryUnit(); + ClientAndOpCtx cando1(getServiceContext(), "Client1"); + ClientAndOpCtx cando2(getServiceContext(), "Client2"); - coll1.insertRecord(ru1.get(), RecordId(2)); - coll1.insertRecord(ru2.get(), RecordId(3)); + coll1.insertRecord(cando1.opCtx->recoveryUnit(), RecordId(2)); + coll1.insertRecord(cando2.opCtx->recoveryUnit(), RecordId(3)); - coll2.insertRecord(ru1.get(), RecordId(12)); - coll2.insertRecord(ru2.get(), RecordId(13)); + coll2.insertRecord(cando1.opCtx->recoveryUnit(), RecordId(12)); + coll2.insertRecord(cando2.opCtx->recoveryUnit(), RecordId(13)); // Only the first record should be visible to both collections. { @@ -340,7 +344,7 @@ TEST(CappedVisibilityTest, MultiCollection) { ASSERT_FALSE(snapshot.isRecordVisible(RecordId(13))); } - ru2->commitUnitOfWork(); + cando2.opCtx->recoveryUnit()->commitUnitOfWork(); // Nothing should become newly visible { @@ -357,7 +361,7 @@ TEST(CappedVisibilityTest, MultiCollection) { ASSERT_FALSE(snapshot.isRecordVisible(RecordId(13))); } - ru1->commitUnitOfWork(); + cando1.opCtx->recoveryUnit()->commitUnitOfWork(); // All RecordIds should be visible now. { @@ -376,5 +380,6 @@ TEST(CappedVisibilityTest, MultiCollection) { ASSERT_FALSE(snapshot.isRecordVisible(RecordId(14))); } } + } // namespace } // namespace mongo diff --git a/src/mongo/db/catalog/catalog_control.cpp b/src/mongo/db/catalog/catalog_control.cpp index c7ccba9bdb05e..8d6204223d1de 100644 --- a/src/mongo/db/catalog/catalog_control.cpp +++ b/src/mongo/db/catalog/catalog_control.cpp @@ -30,23 +30,43 @@ #define LOGV2_FOR_RECOVERY(ID, DLEVEL, MESSAGE, ...) \ LOGV2_DEBUG_OPTIONS(ID, DLEVEL, {logv2::LogComponent::kStorageRecovery}, MESSAGE, ##__VA_ARGS__) -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/catalog_control.h" - #include "mongo/db/catalog/catalog_stats.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/historical_catalogid_tracker.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/database_name.h" -#include "mongo/db/ftdc/ftdc_mongod.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/namespace_string.h" #include "mongo/db/rebuild_indexes.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/timeseries/timeseries_extended_range.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -73,32 +93,14 @@ void reopenAllDatabasesAndReloadCollectionCatalog(OperationContext* opCtx, for (auto&& dbName : databasesToOpen) { LOGV2_FOR_RECOVERY(23992, 1, "openCatalog: dbholder reopening database", logAttrs(dbName)); auto db = databaseHolder->openDb(opCtx, dbName); - invariant(db, str::stream() << "failed to reopen database " << dbName.toString()); + invariant(db, + str::stream() << "failed to reopen database " << dbName.toStringForErrorMsg()); for (auto&& collNss : catalogWriter.value()->getAllCollectionNamesFromDb(opCtx, dbName)) { // Note that the collection name already includes the database component. auto collection = catalogWriter.value()->lookupCollectionByNamespace(opCtx, collNss); invariant(collection, - str::stream() - << "failed to get valid collection pointer for namespace " << collNss); - - if (previousCatalogState.minVisibleTimestampMap.count(collection->uuid()) > 0) { - // After rolling back to a stable timestamp T, the minimum visible timestamp for - // each collection must be reset to (at least) its value at T. Additionally, there - // cannot exist a minimum visible timestamp greater than lastApplied. This allows us - // to upper bound what the minimum visible timestamp can be coming out of rollback. - // - // Because we only save the latest minimum visible timestamp for each collection, we - // bound the minimum visible timestamp (where necessary) to the stable timestamp. - // The benefit of fine grained tracking is assumed to be low-value compared to the - // cost/effort. - auto minVisible = std::min( - stableTimestamp, - previousCatalogState.minVisibleTimestampMap.find(collection->uuid())->second); - auto writableCollection = - catalogWriter.value()->lookupCollectionByUUIDForMetadataWrite( - opCtx, collection->uuid()); - writableCollection->setMinimumVisibleSnapshot(minVisible); - } + str::stream() << "failed to get valid collection pointer for namespace " + << collNss.toStringForErrorMsg()); if (auto it = previousCatalogState.minValidTimestampMap.find(collection->uuid()); it != previousCatalogState.minValidTimestampMap.end()) { @@ -165,35 +167,20 @@ PreviousCatalogState closeCatalog(OperationContext* opCtx) { auto databaseHolder = DatabaseHolder::get(opCtx); auto catalog = CollectionCatalog::get(opCtx); for (auto&& dbName : allDbs) { - for (auto collIt = catalog->begin(opCtx, dbName); collIt != catalog->end(opCtx); ++collIt) { - auto coll = *collIt; + for (auto&& coll : catalog->range(dbName)) { if (!coll) { break; } - boost::optional minVisible = coll->getMinimumVisibleSnapshot(); - - // If there's a minimum visible, invariant there's also a UUID. - if (minVisible) { - LOGV2_DEBUG(20269, - 1, - "closeCatalog: preserving min visible timestamp.", - "coll_ns"_attr = coll->ns(), - "uuid"_attr = coll->uuid(), - "minVisible"_attr = minVisible); - previousCatalogState.minVisibleTimestampMap[coll->uuid()] = *minVisible; - } - - boost::optional minValid = coll->getMinimumValidSnapshot(); - // If there's a minimum valid, invariant there's also a UUID. + boost::optional minValid = coll->getMinimumValidSnapshot(); if (minValid) { LOGV2_DEBUG(6825500, 1, "closeCatalog: preserving min valid timestamp.", "ns"_attr = coll->ns(), "uuid"_attr = coll->uuid(), - "minVisible"_attr = minValid); + "minValid"_attr = minValid); previousCatalogState.minValidTimestampMap[coll->uuid()] = *minValid; } @@ -244,7 +231,7 @@ void openCatalog(OperationContext* opCtx, // Remove catalogId mappings for larger timestamp than 'stableTimestamp'. CollectionCatalog::write(opCtx, [stableTimestamp](CollectionCatalog& catalog) { - catalog.cleanupForCatalogReopen(stableTimestamp); + catalog.catalogIdTracker().rollback(stableTimestamp); }); // Ignore orphaned idents because this function is used during rollback and not at @@ -269,8 +256,9 @@ void openCatalog(OperationContext* opCtx, if (!indexSpecs.isOK() || indexSpecs.getValue().first.empty()) { fassert(40689, {ErrorCodes::InternalError, - str::stream() << "failed to get index spec for index " << indexName - << " in collection " << indexIdentifier.nss}); + str::stream() + << "failed to get index spec for index " << indexName << " in collection " + << indexIdentifier.nss.toStringForErrorMsg()}); } auto indexesToRebuild = indexSpecs.getValue(); invariant( @@ -291,7 +279,8 @@ void openCatalog(OperationContext* opCtx, NamespaceString collNss(entry.first); auto collection = catalog->lookupCollectionByNamespace(opCtx, collNss); - invariant(collection, str::stream() << "couldn't get collection " << collNss.toString()); + invariant(collection, + str::stream() << "couldn't get collection " << collNss.toStringForErrorMsg()); for (const auto& indexName : entry.second.first) { LOGV2(20275, @@ -301,7 +290,7 @@ void openCatalog(OperationContext* opCtx, "index"_attr = indexName); } - std::vector indexSpecs = entry.second.second; + const std::vector& indexSpecs = entry.second.second; fassert(40690, rebuildIndexesOnCollection(opCtx, collection, indexSpecs, RepairData::kNo)); } diff --git a/src/mongo/db/catalog/catalog_control.h b/src/mongo/db/catalog/catalog_control.h index 20fe8ab511b79..090292cea28d2 100644 --- a/src/mongo/db/catalog/catalog_control.h +++ b/src/mongo/db/catalog/catalog_control.h @@ -29,7 +29,9 @@ #include +#include "mongo/bson/timestamp.h" #include "mongo/db/operation_context.h" +#include "mongo/util/uuid.h" namespace mongo { namespace catalog { @@ -38,7 +40,6 @@ using MinVisibleTimestamp = Timestamp; using MinVisibleTimestampMap = std::map; using RequiresTimestampExtendedRangeSupportMap = std::map; struct PreviousCatalogState { - MinVisibleTimestampMap minVisibleTimestampMap; MinVisibleTimestampMap minValidTimestampMap; RequiresTimestampExtendedRangeSupportMap requiresTimestampExtendedRangeSupportMap; }; diff --git a/src/mongo/db/catalog/catalog_control_test.cpp b/src/mongo/db/catalog/catalog_control_test.cpp index 901c69450eeb6..886b0128bd0e0 100644 --- a/src/mongo/db/catalog/catalog_control_test.cpp +++ b/src/mongo/db/catalog/catalog_control_test.cpp @@ -29,13 +29,22 @@ #include "mongo/db/catalog/catalog_control.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/database_holder_mock.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/index_builds_coordinator.h" #include "mongo/db/index_builds_coordinator_mongod.h" -#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -76,7 +85,7 @@ TEST_F(CatalogControlTest, CloseAndOpenCatalog) { Lock::GlobalLock globalLk(opCtx.get(), MODE_X); auto previousState = catalog::closeCatalog(opCtx.get()); - ASSERT_EQUALS(0U, previousState.minVisibleTimestampMap.size()); + ASSERT_EQUALS(0U, previousState.minValidTimestampMap.size()); catalog::openCatalog(opCtx.get(), {}, Timestamp()); } diff --git a/src/mongo/db/catalog/catalog_helper.cpp b/src/mongo/db/catalog/catalog_helper.cpp index 84a6e31632197..8d412d22100ef 100644 --- a/src/mongo/db/catalog/catalog_helper.cpp +++ b/src/mongo/db/catalog/catalog_helper.cpp @@ -29,7 +29,18 @@ #include "mongo/db/catalog/catalog_helper.h" +#include +#include +#include +#include + +#include + #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/database_name.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" namespace mongo::catalog_helper { namespace { @@ -68,10 +79,11 @@ void acquireCollectionLocksInResourceIdOrder( // ResourceId(RESOURCE_COLLECTION, nss). temp.insert(catalog->resolveNamespaceStringOrUUID(opCtx, nsOrUUID)); for (const auto& secondaryNssOrUUID : secondaryNssOrUUIDs) { - invariant(secondaryNssOrUUID.db() == nsOrUUID.db(), + invariant(secondaryNssOrUUID.dbName() == nsOrUUID.dbName(), str::stream() << "Unable to acquire locks for collections across different databases (" - << secondaryNssOrUUID << " vs " << nsOrUUID << ")"); + << secondaryNssOrUUID.toStringForErrorMsg() << " vs " + << nsOrUUID.toStringForErrorMsg() << ")"); temp.insert(catalog->resolveNamespaceStringOrUUID(opCtx, secondaryNssOrUUID)); } diff --git a/src/mongo/db/catalog/catalog_helper.h b/src/mongo/db/catalog/catalog_helper.h index cd2d70e6d7fca..22ae97c56ace9 100644 --- a/src/mongo/db/catalog/catalog_helper.h +++ b/src/mongo/db/catalog/catalog_helper.h @@ -29,8 +29,15 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/util/time_support.h" namespace mongo::catalog_helper { diff --git a/src/mongo/db/catalog/catalog_stats.cpp b/src/mongo/db/catalog/catalog_stats.cpp index 4c456a8d7fab5..66143681435fa 100644 --- a/src/mongo/db/catalog/catalog_stats.cpp +++ b/src/mongo/db/catalog/catalog_stats.cpp @@ -30,11 +30,20 @@ #include "mongo/db/catalog/catalog_stats.h" +#include + +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/views_for_database.h" #include "mongo/db/commands/server_status.h" -#include "mongo/db/db_raii.h" -#include "mongo/logv2/log.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/db/catalog/catalog_test_fixture.cpp b/src/mongo/db/catalog/catalog_test_fixture.cpp index 47af379fdc22f..d370ebe6475d0 100644 --- a/src/mongo/db/catalog/catalog_test_fixture.cpp +++ b/src/mongo/db/catalog/catalog_test_fixture.cpp @@ -27,15 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include #include "mongo/db/catalog/catalog_test_fixture.h" - +#include "mongo/db/client.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/unittest/assert.h" namespace mongo { @@ -44,7 +46,6 @@ void CatalogTestFixture::setUp() { ServiceContextMongoDTest::setUp(); auto service = getServiceContext(); - _storage = std::make_unique(); _opCtx = cc().makeOperationContext(); // Set up ReplicationCoordinator and ensure that we are primary. @@ -52,13 +53,17 @@ void CatalogTestFixture::setUp() { ASSERT_OK(replCoord->setFollowerMode(repl::MemberState::RS_PRIMARY)); repl::ReplicationCoordinator::set(service, std::move(replCoord)); + // Setup ReplicationInterface + auto storageInterface = std::make_unique(); + _storage = storageInterface.get(); + repl::StorageInterface::set(service, std::move(storageInterface)); + // Set up oplog collection. If the WT storage engine is used, the oplog collection is expected // to exist when fetching the next opTime (LocalOplogInfo::getNextOpTimes) to use for a write. repl::createOplog(operationContext()); } void CatalogTestFixture::tearDown() { - _storage.reset(); _opCtx.reset(); // Tear down mongod. @@ -70,7 +75,7 @@ OperationContext* CatalogTestFixture::operationContext() { } repl::StorageInterface* CatalogTestFixture::storageInterface() { - return _storage.get(); + return _storage; } } // namespace mongo diff --git a/src/mongo/db/catalog/catalog_test_fixture.h b/src/mongo/db/catalog/catalog_test_fixture.h index b60b2de088f79..7cc1460aaaee2 100644 --- a/src/mongo/db/catalog/catalog_test_fixture.h +++ b/src/mongo/db/catalog/catalog_test_fixture.h @@ -29,8 +29,12 @@ #pragma once +#include + #include "mongo/db/operation_context.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" namespace mongo { @@ -54,7 +58,7 @@ class CatalogTestFixture : public ServiceContextMongoDTest { void tearDown() override; private: - std::unique_ptr _storage; + repl::StorageInterface* _storage; ServiceContext::UniqueOperationContext _opCtx; }; diff --git a/src/mongo/db/catalog/clustered_collection_util.cpp b/src/mongo/db/catalog/clustered_collection_util.cpp index 4255e8c82f245..6190c79418195 100644 --- a/src/mongo/db/catalog/clustered_collection_util.cpp +++ b/src/mongo/db/catalog/clustered_collection_util.cpp @@ -28,13 +28,21 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/db/catalog/clustered_collection_util.h" +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/catalog/clustered_collection_util.h" #include "mongo/db/namespace_string.h" -#include "mongo/logv2/log.h" -#include "mongo/util/represent_as.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/db/catalog/clustered_collection_util.h b/src/mongo/db/catalog/clustered_collection_util.h index 39671c1de6e76..4941c2457d690 100644 --- a/src/mongo/db/catalog/clustered_collection_util.h +++ b/src/mongo/db/catalog/clustered_collection_util.h @@ -29,7 +29,13 @@ #pragma once +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" namespace mongo { diff --git a/src/mongo/db/catalog/coll_mod.cpp b/src/mongo/db/catalog/coll_mod.cpp index d100a95814f9a..436f058abe7bd 100644 --- a/src/mongo/db/catalog/coll_mod.cpp +++ b/src/mongo/db/catalog/coll_mod.cpp @@ -29,39 +29,85 @@ #include "mongo/db/catalog/coll_mod.h" -#include "mongo/db/stats/counters.h" #include - -#include "mongo/db/catalog/clustered_collection_util.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/coll_mod_index.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/collection_options_gen.h" #include "mongo/db/catalog/collection_uuid_mismatch.h" -#include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/index_key_validate.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/coll_mod_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/curop_failpoint_helpers.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/pipeline/change_stream_pre_and_post_images_options_gen.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/s/scoped_collection_metadata.h" #include "mongo/db/s/shard_key_index_util.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/db/stats/counters.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_options.h" #include "mongo/db/ttl_collection_cache.h" +#include "mongo/db/views/view.h" #include "mongo/db/views/view_catalog_helpers.h" -#include "mongo/idl/command_generic_argument.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -87,7 +133,7 @@ void assertNoMovePrimaryInProgress(OperationContext* opCtx, NamespaceString cons LOGV2(4945200, "assertNoMovePrimaryInProgress", logAttrs(nss)); uasserted(ErrorCodes::MovePrimaryInProgress, - "movePrimary is in progress for namespace " + nss.toString()); + "movePrimary is in progress for namespace " + nss.toStringForErrorMsg()); } } } catch (const DBException& ex) { @@ -135,6 +181,10 @@ Status getOnlySupportedOnTimeseriesError(StringData fieldName) { boost::optional getShardKeyPattern(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID, const CollMod& cmd) { + if (!Grid::get(opCtx)->isInitialized()) { + return boost::none; + } + try { const NamespaceString nss = CollectionCatalog::get(opCtx)->resolveNamespaceStringOrUUID(opCtx, nsOrUUID); @@ -144,6 +194,7 @@ boost::optional getShardKeyPattern(OperationContext* opCtx, } catch (ExceptionFor&) { // The collection is unsharded or doesn't exist. } + return boost::none; } @@ -287,7 +338,8 @@ StatusWith> parseCollModRequest( cmrIndex->idx = coll->getIndexCatalog()->findIndexByName(opCtx, indexName); if (!cmrIndex->idx) { return {ErrorCodes::IndexNotFound, - str::stream() << "cannot find index " << indexName << " for ns " << nss}; + str::stream() << "cannot find index " << indexName << " for ns " + << nss.toStringForErrorMsg()}; } } else { std::vector indexes; @@ -303,7 +355,8 @@ StatusWith> parseCollModRequest( << indexes[1]->infoObj()}; } else if (indexes.empty()) { return {ErrorCodes::IndexNotFound, - str::stream() << "cannot find index " << keyPattern << " for ns " << nss}; + str::stream() << "cannot find index " << keyPattern << " for ns " + << nss.toStringForErrorMsg()}; } cmrIndex->idx = indexes[0]; @@ -703,7 +756,8 @@ StatusWith _setUpCollModIndexUnique( if (!collection) { checkCollectionUUIDMismatch(opCtx, nss, CollectionPtr(), cmd.getCollectionUUID()); return Status(ErrorCodes::NamespaceNotFound, - str::stream() << "ns does not exist for unique index conversion: " << nss); + str::stream() << "ns does not exist for unique index conversion: " + << nss.toStringForErrorMsg()); } // Scan index for duplicates without exclusive access. @@ -821,7 +875,8 @@ Status _collModInternal(OperationContext* opCtx, if (userInitiatedWritesAndNotPrimary) { return Status(ErrorCodes::NotWritablePrimary, - str::stream() << "Not primary while setting collection options on " << nss); + str::stream() << "Not primary while setting collection options on " + << nss.toStringForErrorMsg()); } auto statusW = parseCollModRequest(opCtx, nss, coll.getCollection(), cmd, shardKeyPattern); @@ -845,7 +900,7 @@ Status _collModInternal(OperationContext* opCtx, LOGV2(5324200, "CMD: collMod", "cmdObj"_attr = cmd.toBSON(BSONObj())); } - return writeConflictRetry(opCtx, "collMod", nss.ns(), [&] { + return writeConflictRetry(opCtx, "collMod", nss, [&] { WriteUnitOfWork wunit(opCtx); // Handle collMod on a view and return early. The CollectionCatalog handles the creation of @@ -930,6 +985,8 @@ Status _collModInternal(OperationContext* opCtx, auto [newOptions, changed] = res.getValue(); if (changed) { coll.getWritableCollection(opCtx)->setTimeseriesOptions(opCtx, newOptions); + coll.getWritableCollection(opCtx)->setTimeseriesBucketingParametersChanged(opCtx, + true); } } diff --git a/src/mongo/db/catalog/coll_mod.h b/src/mongo/db/catalog/coll_mod.h index f2b0c1702d5e6..51f37bc7f262a 100644 --- a/src/mongo/db/catalog/coll_mod.h +++ b/src/mongo/db/catalog/coll_mod.h @@ -29,8 +29,11 @@ #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/coll_mod_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/oplog.h" namespace mongo { diff --git a/src/mongo/db/catalog/coll_mod_index.cpp b/src/mongo/db/catalog/coll_mod_index.cpp index 807a15372e2b8..0903d938e98cf 100644 --- a/src/mongo/db/catalog/coll_mod_index.cpp +++ b/src/mongo/db/catalog/coll_mod_index.cpp @@ -30,18 +30,39 @@ #include "mongo/db/catalog/coll_mod_index.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/catalog/cannot_convert_index_to_unique_info.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/index_key_validate.h" #include "mongo/db/catalog/throttle_cursor.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/storage/index_entry_comparison.h" #include "mongo/db/storage/key_string.h" #include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/sorted_data_interface.h" #include "mongo/db/ttl_collection_cache.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" -#include "mongo/util/shared_buffer_fragment.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -141,28 +162,6 @@ void _processCollModIndexRequestHidden(OperationContext* opCtx, } } -/** - * Returns set of keys for a document in an index. - */ -void getKeysForIndex(OperationContext* opCtx, - const CollectionPtr& collection, - const SortedDataIndexAccessMethod* accessMethod, - const BSONObj& doc, - KeyStringSet* keys) { - SharedBufferFragmentBuilder pooledBuilder(KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); - - accessMethod->getKeys(opCtx, - collection, - pooledBuilder, - doc, - InsertDeleteOptions::ConstraintEnforcementMode::kEnforceConstraints, - SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, - keys, - nullptr, // multikeyMetadataKeys - nullptr, // multikeyPaths - boost::none); // loc -} - /** * Adjusts unique setting on an index to true. */ @@ -353,7 +352,7 @@ std::list> scanIndexForDuplicates( OperationContext* opCtx, const CollectionPtr& collection, const IndexDescriptor* idx, - boost::optional firstKeyString) { + boost::optional firstKeyString) { auto entry = idx->getEntry(); auto accessMethod = entry->accessMethod()->asSortedData(); // Only scans for the duplicates on one key if 'firstKeyString' is provided. @@ -362,10 +361,10 @@ std::list> scanIndexForDuplicates( // Starting point of index traversal. if (!firstKeyString) { auto keyStringVersion = accessMethod->getSortedDataInterface()->getKeyStringVersion(); - KeyString::Builder firstKeyStringBuilder(keyStringVersion, - BSONObj(), - entry->ordering(), - KeyString::Discriminator::kExclusiveBefore); + key_string::Builder firstKeyStringBuilder(keyStringVersion, + BSONObj(), + entry->ordering(), + key_string::Discriminator::kExclusiveBefore); firstKeyString = firstKeyStringBuilder.getValueCopy(); } diff --git a/src/mongo/db/catalog/coll_mod_index.h b/src/mongo/db/catalog/coll_mod_index.h index a00413a33bb38..4528dfd357dea 100644 --- a/src/mongo/db/catalog/coll_mod_index.h +++ b/src/mongo/db/catalog/coll_mod_index.h @@ -27,12 +27,19 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/status.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/coll_mod_gen.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/storage/key_string.h" @@ -77,7 +84,7 @@ std::list> scanIndexForDuplicates( OperationContext* opCtx, const CollectionPtr& collection, const IndexDescriptor* idx, - boost::optional firstKeyString = {}); + boost::optional firstKeyString = {}); /** * Builds a BSONArray of the violations with duplicate index keys and returns the formatted error diff --git a/src/mongo/db/catalog/coll_mod_test.cpp b/src/mongo/db/catalog/coll_mod_test.cpp index 818b0cc8fdb99..14c157d932ece 100644 --- a/src/mongo/db/catalog/coll_mod_test.cpp +++ b/src/mongo/db/catalog/coll_mod_test.cpp @@ -29,10 +29,41 @@ #include "mongo/db/catalog/coll_mod.h" -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/coll_mod_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/commands/create_gen.h" +#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/db_raii.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_collmod.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -102,5 +133,103 @@ TEST(CollModOptionTest, makeDryRunRequest) { ASSERT_TRUE(dryRunRequest.getIndex()->getUnique() && *dryRunRequest.getIndex()->getUnique()); ASSERT_TRUE(dryRunRequest.getDryRun() && *dryRunRequest.getDryRun()); } + +class CollModTest : public ServiceContextMongoDTest { +protected: + void setUp() override { + // Set up mongod. + ServiceContextMongoDTest::setUp(); + + auto service = getServiceContext(); + + // Set up ReplicationCoordinator and ensure that we are primary. + auto replCoord = std::make_unique(service); + ASSERT_OK(replCoord->setFollowerMode(repl::MemberState::RS_PRIMARY)); + repl::ReplicationCoordinator::set(service, std::move(replCoord)); + } + void tearDown() override { + // Tear down mongod. + ServiceContextMongoDTest::tearDown(); + } +}; + +ServiceContext::UniqueOperationContext makeOpCtx() { + auto opCtx = cc().makeOperationContext(); + repl::createOplog(opCtx.get()); + return opCtx; +} + +TEST_F(CollModTest, CollModTimeseriesWithFixedBucket) { + NamespaceString curNss = NamespaceString::createNamespaceString_forTest("test.curColl"); + auto bucketsColl = + NamespaceString::createNamespaceString_forTest("test.system.buckets.curColl"); + + auto opCtx = makeOpCtx(); + auto tsOptions = TimeseriesOptions("t"); + tsOptions.setBucketRoundingSeconds(100); + tsOptions.setBucketMaxSpanSeconds(100); + CreateCommand cmd = CreateCommand(curNss); + cmd.setTimeseries(std::move(tsOptions)); + uassertStatusOK(createCollection(opCtx.get(), cmd)); + + // Run collMod without changing the bucket span and validate that the + // timeseriesBucketingParametersMayHaveChanged() returns false. + CollMod collModCmd(curNss); + CollModRequest collModRequest; + stdx::variant expireAfterSeconds = 100; + collModRequest.setExpireAfterSeconds(expireAfterSeconds); + collModCmd.setCollModRequest(collModRequest); + BSONObjBuilder result; + uassertStatusOK(timeseries::processCollModCommandWithTimeSeriesTranslation( + opCtx.get(), curNss, collModCmd, true, &result)); + { + AutoGetCollectionForRead bucketsCollForRead(opCtx.get(), bucketsColl); + ASSERT_FALSE(bucketsCollForRead->timeseriesBucketingParametersMayHaveChanged()); + } + + // Run collMod which changes the bucket span and validate that the + // timeseriesBucketingParametersMayHaveChanged() returns true. + CollModTimeseries collModTs; + collModTs.setBucketMaxSpanSeconds(200); + collModTs.setBucketRoundingSeconds(200); + collModRequest.setTimeseries(std::move(collModTs)); + collModCmd.setCollModRequest(std::move(collModRequest)); + uassertStatusOK(timeseries::processCollModCommandWithTimeSeriesTranslation( + opCtx.get(), curNss, collModCmd, true, &result)); + { + AutoGetCollectionForRead bucketsCollForRead(opCtx.get(), bucketsColl); + ASSERT_TRUE(bucketsCollForRead->timeseriesBucketingParametersMayHaveChanged()); + } +} + +TEST_F(CollModTest, TimeseriesBucketingParameterChanged) { + NamespaceString curNss = NamespaceString::createNamespaceString_forTest("test.curColl"); + auto bucketsColl = + NamespaceString::createNamespaceString_forTest("test.system.buckets.curColl"); + + auto opCtx = makeOpCtx(); + auto tsOptions = TimeseriesOptions("t"); + tsOptions.setBucketRoundingSeconds(100); + tsOptions.setBucketMaxSpanSeconds(100); + CreateCommand cmd = CreateCommand(curNss); + cmd.setTimeseries(std::move(tsOptions)); + uassertStatusOK(createCollection(opCtx.get(), cmd)); + + uassertStatusOK(writeConflictRetry( + opCtx.get(), "unitTestTimeseriesBucketingParameterChanged", bucketsColl, [&] { + WriteUnitOfWork wunit(opCtx.get()); + + AutoGetCollection collection(opCtx.get(), bucketsColl, MODE_X); + auto writableColl = collection.getWritableCollection(opCtx.get()); + writableColl->setTimeseriesBucketingParametersChanged(opCtx.get(), boost::none); + + wunit.commit(); + return Status::OK(); + })); + + AutoGetCollectionForRead bucketsCollForRead(opCtx.get(), bucketsColl); + ASSERT_TRUE(bucketsCollForRead->timeseriesBucketingParametersMayHaveChanged()); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/catalog/collection.cpp b/src/mongo/db/catalog/collection.cpp index 0107be9830862..616fce109c1cf 100644 --- a/src/mongo/db/catalog/collection.cpp +++ b/src/mongo/db/catalog/collection.cpp @@ -29,9 +29,9 @@ #include "mongo/db/catalog/collection.h" -#include - -#include "mongo/logv2/log.h" +#include +#include +#include #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -98,4 +98,28 @@ void Collection::Factory::set(ServiceContext* service, factory = std::move(newFactory); } +std::pair, ExpressionContext::CollationMatchesDefault> +resolveCollator(OperationContext* opCtx, BSONObj userCollation, const CollectionPtr& collection) { + if (!collection || !collection->getDefaultCollator()) { + if (userCollation.isEmpty()) { + return {nullptr, ExpressionContext::CollationMatchesDefault::kYes}; + } else { + return {getUserCollator(opCtx, userCollation), + ExpressionContext::CollationMatchesDefault::kYes}; + } + } + + auto defaultCollator = collection->getDefaultCollator()->clone(); + if (userCollation.isEmpty()) { + return {std::move(defaultCollator), ExpressionContext::CollationMatchesDefault::kYes}; + } + auto userCollator = getUserCollator(opCtx, userCollation); + + if (CollatorInterface::collatorsMatch(defaultCollator.get(), userCollator.get())) { + return {std::move(defaultCollator), ExpressionContext::CollationMatchesDefault::kYes}; + } else { + return {std::move(userCollator), ExpressionContext::CollationMatchesDefault::kNo}; + } +} + } // namespace mongo diff --git a/src/mongo/db/catalog/collection.h b/src/mongo/db/catalog/collection.h index 295286bea6026..126d59d93416d 100644 --- a/src/mongo/db/catalog/collection.h +++ b/src/mongo/db/catalog/collection.h @@ -29,33 +29,62 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include +#include #include #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/timestamp.h" #include "mongo/db/catalog/capped_visibility.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/collection_operation_source.h" #include "mongo/db/catalog/collection_options.h" -#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/catalog/collection_options_gen.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/change_stream_pre_and_post_images_options_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/collation/collator_interface.h" -#include "mongo/db/query/plan_executor.h" #include "mongo/db/record_id.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" #include "mongo/db/storage/bson_collection_catalog_entry.h" #include "mongo/db/storage/durable_catalog_entry.h" +#include "mongo/db/storage/ident.h" #include "mongo/db/storage/record_store.h" #include "mongo/db/storage/snapshot.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/yieldable.h" #include "mongo/logv2/log_attr.h" #include "mongo/platform/mutex.h" #include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" #include "mongo/util/decorable.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/uuid.h" +#include "mongo/util/version/releases.h" namespace mongo { @@ -132,17 +161,6 @@ class Collection : public Decorable { static Factory* get(ServiceContext* service); static Factory* get(OperationContext* opCtx); static void set(ServiceContext* service, std::unique_ptr factory); - - /** - * Constructs a Collection object. This does not persist any state to the storage engine, - * only constructs an in-memory representation of what already exists on disk. - */ - virtual std::shared_ptr make(OperationContext* opCtx, - const NamespaceString& nss, - RecordId catalogId, - const CollectionOptions& options, - std::unique_ptr rs) const = 0; - /** * Constructs a Collection object. This does not persist any state to the storage engine, * only constructs an in-memory representation of what already exists on disk. @@ -234,16 +252,6 @@ class Collection : public Decorable { const DurableCatalogEntry& catalogEntry, boost::optional readTimestamp) = 0; - virtual bool isCommitted() const { - return true; - } - - /** - * Update the visibility of this collection in the Collection Catalog. Updates to this value - * are not idempotent, as successive updates with the same `val` should not occur. - */ - virtual void setCommitted(bool val) {} - virtual bool isInitialized() const { return false; } @@ -386,6 +394,17 @@ class Collection : public Decorable { virtual void setTimeseriesBucketsMayHaveMixedSchemaData(OperationContext* opCtx, boost::optional setting) = 0; + virtual bool timeseriesBucketingParametersMayHaveChanged() const = 0; + + /** + * Sets the 'timeseriesBucketingParametersHaveChanged' catalog entry flag to 'value' for this + * collection. + * + * Throws if this is not a time-series collection. + */ + virtual void setTimeseriesBucketingParametersChanged(OperationContext* opCtx, + boost::optional value) = 0; + /** * Returns true if the passed in time-series bucket document contains mixed-schema data. */ @@ -643,24 +662,13 @@ class Collection : public Decorable { */ virtual uint64_t getIndexFreeStorageBytes(OperationContext* opCtx) const = 0; - /** - * If return value is not boost::none, reads with majority read concern using an older snapshot - * must error. - */ - virtual boost::optional getMinimumVisibleSnapshot() const = 0; - - virtual void setMinimumVisibleSnapshot(Timestamp name) = 0; - - /** * Get the timestamp this Collection instance was most recently changed at. - * TODO SERVER-68270: Should currently not be used until min visible snapshot is removed */ virtual boost::optional getMinimumValidSnapshot() const = 0; /** * Sets the timestamp this Collection instance was most recently changed at. - * TODO SERVER-68270: Should currently not be used until min visible snapshot is removed */ virtual void setMinimumValidSnapshot(Timestamp name) = 0; @@ -810,4 +818,25 @@ inline ValidationLevelEnum validationLevelOrDefault(boost::optional getUserCollator(OperationContext* opCtx, + const BSONObj& userCollation) { + tassert(7542402, "Empty user collation", !userCollation.isEmpty()); + return uassertStatusOK( + CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(userCollation)); +} + +/** + * Resolves the collator to either the user-specified collation or, if none was specified, to + * the collection-default collation and also returns a flag indicating whether the user-provided + * collation matches the collection default collation. + */ +std::pair, ExpressionContext::CollationMatchesDefault> +resolveCollator(OperationContext* opCtx, BSONObj userCollation, const CollectionPtr& collection); + } // namespace mongo diff --git a/src/mongo/db/catalog/collection_catalog.cpp b/src/mongo/db/catalog/collection_catalog.cpp index 857a6618ad627..1dcac042a861d 100644 --- a/src/mongo/db/catalog/collection_catalog.cpp +++ b/src/mongo/db/catalog/collection_catalog.cpp @@ -27,24 +27,65 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include #include "collection_catalog.h" -#include "mongo/db/catalog/database.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/uncommitted_catalog_updates.h" +#include "mongo/db/client.h" #include "mongo/db/commands/server_status.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/concurrency/resource_catalog.h" -#include "mongo/db/multitenancy_gen.h" -#include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" #include "mongo/db/server_options.h" +#include "mongo/db/storage/bson_collection_catalog_entry.h" +#include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/ident.h" #include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/recovery_unit.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -52,13 +93,6 @@ namespace mongo { namespace { -// Sentinel id for marking a catalogId mapping range as unknown. Must use an invalid RecordId. -static RecordId kUnknownRangeMarkerId = RecordId::minLong(); -// Maximum number of entries in catalogId mapping when inserting catalogId missing at timestamp. -// Used to avoid quadratic behavior when inserting entries at the beginning. When threshold is -// reached we will fall back to more durable catalog scans. -static constexpr int kMaxCatalogIdMappingLengthForMissingInsert = 1000; - constexpr auto kNumDurableCatalogScansDueToMissingMapping = "numScansDueToMissingMapping"_sd; struct LatestCollectionCatalog { @@ -71,6 +105,7 @@ const ServiceContext::Decoration getCatalog = // batched write is ongoing without having to take locks. std::shared_ptr batchedCatalogWriteInstance; AtomicWord ongoingBatchedWrite{false}; +absl::flat_hash_set batchedCatalogClonedCollections; const RecoveryUnit::Snapshot::Decoration> stashedCatalog = RecoveryUnit::Snapshot::declareDecoration>(); @@ -99,20 +134,26 @@ void assertViewCatalogValid(const ViewsForDatabase& viewsForDb) { viewsForDb.valid()); } -const auto maxUuid = UUID::parse("FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF").getValue(); -const auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue(); - - -// Copy existing value from immutable data structure or default-construct if not existing -template -auto copyIfExists(const Container& container, const Key& key) { - const auto* value = container.find(key); - if (value) { - return *value; +ViewsForDatabase loadViewsForDatabase(OperationContext* opCtx, + const CollectionCatalog& catalog, + const DatabaseName& dbName) { + ViewsForDatabase viewsForDb; + auto systemDotViews = NamespaceString::makeSystemDotViewsNamespace(dbName); + if (auto status = viewsForDb.reload( + opCtx, CollectionPtr(catalog.lookupCollectionByNamespace(opCtx, systemDotViews))); + !status.isOK()) { + LOGV2_WARNING_OPTIONS(20326, + {logv2::LogTag::kStartupWarnings}, + "Unable to parse views; remove any invalid views from the " + "collection to restore server functionality", + "error"_attr = redact(status), + logAttrs(systemDotViews)); } - return typename Container::mapped_type(); + return viewsForDb; } +const auto maxUuid = UUID::parse("FFFFFFFF-FFFF-FFFF-FFFF-FFFFFFFFFFFF").getValue(); +const auto minUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue(); } // namespace /** @@ -177,7 +218,7 @@ class CollectionCatalog::PublishCatalogUpdates final : public RecoveryUnit::Chan catalog._collections = catalog._collections.set(collection->ns(), collection); catalog._catalog = catalog._catalog.set(collection->uuid(), collection); auto dbIdPair = std::make_pair(collection->ns().dbName(), collection->uuid()); - catalog._orderedCollections[dbIdPair] = collection; + catalog._orderedCollections = catalog._orderedCollections.set(dbIdPair, collection); catalog._pendingCommitNamespaces = catalog._pendingCommitNamespaces.erase(collection->ns()); catalog._pendingCommitUUIDs = catalog._pendingCommitUUIDs.erase(collection->uuid()); @@ -188,20 +229,18 @@ class CollectionCatalog::PublishCatalogUpdates final : public RecoveryUnit::Chan static void ensureRegisteredWithRecoveryUnit( OperationContext* opCtx, UncommittedCatalogUpdates& uncommittedCatalogUpdates) { - if (opCtx->recoveryUnit()->hasRegisteredChangeForCatalogVisibility()) + if (uncommittedCatalogUpdates.hasRegisteredWithRecoveryUnit()) return; - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - opCtx->recoveryUnit()->registerPreCommitHook( - [](OperationContext* opCtx) { PublishCatalogUpdates::preCommit(opCtx); }); - } + opCtx->recoveryUnit()->registerPreCommitHook( + [](OperationContext* opCtx) { PublishCatalogUpdates::preCommit(opCtx); }); opCtx->recoveryUnit()->registerChangeForCatalogVisibility( std::make_unique(uncommittedCatalogUpdates)); + uncommittedCatalogUpdates.markRegisteredWithRecoveryUnit(); } static void preCommit(OperationContext* opCtx) { - const auto& uncommittedCatalogUpdates = UncommittedCatalogUpdates::get(opCtx); + auto& uncommittedCatalogUpdates = UncommittedCatalogUpdates::get(opCtx); const auto& entries = uncommittedCatalogUpdates.entries(); if (std::none_of( @@ -210,6 +249,16 @@ class CollectionCatalog::PublishCatalogUpdates final : public RecoveryUnit::Chan return; } CollectionCatalog::write(opCtx, [&](CollectionCatalog& catalog) { + // First do a pass to check that we are not conflicting with any namespace that we are + // trying to create. + for (auto&& entry : entries) { + if (entry.action == UncommittedCatalogUpdates::Entry::Action::kCreatedCollection) { + catalog._ensureNamespaceDoesNotExist( + opCtx, entry.collection->ns(), NamespaceType::kAll); + } + } + + // We did not conflict with any namespace, mark all the collections as pending commit. for (auto&& entry : entries) { if (!UncommittedCatalogUpdates::isTwoPhaseCommitEntry(entry)) { continue; @@ -230,6 +279,11 @@ class CollectionCatalog::PublishCatalogUpdates final : public RecoveryUnit::Chan catalog._pendingCommitUUIDs.set(*entry.externalUUID, nullptr); } } + + // Mark that we've successfully run preCommit, this allows rollback to clean up the + // collections marked as pending commit. We need to make sure we do not clean anything + // up for other transactions. + uncommittedCatalogUpdates.markPrecommitted(); }); } @@ -249,22 +303,21 @@ class CollectionCatalog::PublishCatalogUpdates final : public RecoveryUnit::Chan break; } case UncommittedCatalogUpdates::Entry::Action::kRenamedCollection: { - writeJobs.push_back([opCtx, - &from = entry.nss, - &to = entry.renameTo, - commitTime](CollectionCatalog& catalog) { - // We just need to do modifications on 'from' here. 'to' is taken care - // of by a separate kWritableCollection entry. - catalog._collections = catalog._collections.erase(from); - catalog._pendingCommitNamespaces = - catalog._pendingCommitNamespaces.erase(from); + writeJobs.push_back( + [opCtx, &from = entry.nss, &to = entry.renameTo, commitTime]( + CollectionCatalog& catalog) { + // We just need to do modifications on 'from' here. 'to' is taken care + // of by a separate kWritableCollection entry. + catalog._collections = catalog._collections.erase(from); + catalog._pendingCommitNamespaces = + catalog._pendingCommitNamespaces.erase(from); - auto& resourceCatalog = ResourceCatalog::get(opCtx->getServiceContext()); - resourceCatalog.remove({RESOURCE_COLLECTION, from}, from); - resourceCatalog.add({RESOURCE_COLLECTION, to}, to); + auto& resourceCatalog = ResourceCatalog::get(); + resourceCatalog.remove({RESOURCE_COLLECTION, from}, from); + resourceCatalog.add({RESOURCE_COLLECTION, to}, to); - catalog._pushCatalogIdForRename(from, to, commitTime); - }); + catalog._catalogIdTracker.rename(from, to, commitTime); + }); break; } case UncommittedCatalogUpdates::Entry::Action::kDroppedCollection: { @@ -283,39 +336,17 @@ class CollectionCatalog::PublishCatalogUpdates final : public RecoveryUnit::Chan commitTime](CollectionCatalog& catalog) { // Override existing Collection on this namespace catalog._registerCollection(opCtx, - uuid, std::move(collection), - /*twoPhase=*/false, /*ts=*/commitTime); }); - // Fallthrough to the createCollection case to finish committing the collection. - [[fallthrough]]; + break; } case UncommittedCatalogUpdates::Entry::Action::kCreatedCollection: { - // By this point, we may or may not have reserved an oplog slot for the - // collection creation. - // For example, multi-document transactions will only reserve the oplog slot at - // commit time. As a result, we may or may not have a reliable value to use to - // set the new collection's minimum visible snapshot until commit time. - // Pre-commit hooks do not presently have awareness of the commit timestamp, so - // we must update the minVisibleTimestamp with the appropriate value. This is - // fine because the collection should not be visible in the catalog until we - // call setCommitted(true). - writeJobs.push_back( - [coll = entry.collection.get(), commitTime](CollectionCatalog& catalog) { - if (commitTime) { - coll->setMinimumVisibleSnapshot(commitTime.value()); - coll->setMinimumValidSnapshot(commitTime.value()); - } - catalog._pushCatalogIdForNSSAndUUID( - coll->ns(), coll->uuid(), coll->getCatalogId(), commitTime); - - catalog._pendingCommitNamespaces = - catalog._pendingCommitNamespaces.erase(coll->ns()); - catalog._pendingCommitUUIDs = - catalog._pendingCommitUUIDs.erase(coll->uuid()); - coll->setCommitted(true); - }); + writeJobs.push_back([opCtx, + collection = std::move(entry.collection), + commitTime](CollectionCatalog& catalog) { + catalog._registerCollection(opCtx, std::move(collection), commitTime); + }); break; } case UncommittedCatalogUpdates::Entry::Action::kReplacedViewsForDatabase: { @@ -328,16 +359,14 @@ class CollectionCatalog::PublishCatalogUpdates final : public RecoveryUnit::Chan } case UncommittedCatalogUpdates::Entry::Action::kAddViewResource: { writeJobs.push_back([opCtx, &viewName = entry.nss](CollectionCatalog& catalog) { - ResourceCatalog::get(opCtx->getServiceContext()) - .add({RESOURCE_COLLECTION, viewName}, viewName); + ResourceCatalog::get().add({RESOURCE_COLLECTION, viewName}, viewName); catalog.deregisterUncommittedView(viewName); }); break; } case UncommittedCatalogUpdates::Entry::Action::kRemoveViewResource: { writeJobs.push_back([opCtx, &viewName = entry.nss](CollectionCatalog& catalog) { - ResourceCatalog::get(opCtx->getServiceContext()) - .remove({RESOURCE_COLLECTION, viewName}, viewName); + ResourceCatalog::get().remove({RESOURCE_COLLECTION, viewName}, viewName); }); break; } @@ -365,9 +394,12 @@ class CollectionCatalog::PublishCatalogUpdates final : public RecoveryUnit::Chan void rollback(OperationContext* opCtx) override { auto entries = _uncommittedCatalogUpdates.releaseEntries(); - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) + + // Skip rollback logic if we failed to preCommit this transaction. We must make sure we + // don't clean anything up for other transactions. + if (!_uncommittedCatalogUpdates.hasPrecommitted()) { return; + } if (std::none_of( entries.begin(), entries.end(), UncommittedCatalogUpdates::isTwoPhaseCommitEntry)) { @@ -398,83 +430,53 @@ class CollectionCatalog::PublishCatalogUpdates final : public RecoveryUnit::Chan UncommittedCatalogUpdates& _uncommittedCatalogUpdates; }; -CollectionCatalog::iterator::iterator(OperationContext* opCtx, - const DatabaseName& dbName, - const CollectionCatalog& catalog) - : _opCtx(opCtx), _dbName(dbName), _catalog(&catalog) { - - _mapIter = _catalog->_orderedCollections.lower_bound(std::make_pair(_dbName, minUuid)); - - // Start with the first collection that is visible outside of its transaction. - while (!_exhausted() && !_mapIter->second->isCommitted()) { - _mapIter++; - } - - if (!_exhausted()) { - _uuid = _mapIter->first.second; - } -} - -CollectionCatalog::iterator::iterator( - OperationContext* opCtx, - std::map, std::shared_ptr>::const_iterator mapIter, - const CollectionCatalog& catalog) - : _opCtx(opCtx), _mapIter(mapIter), _catalog(&catalog) {} +CollectionCatalog::iterator::iterator(const DatabaseName& dbName, + OrderedCollectionMap::iterator it, + const OrderedCollectionMap& map) + : _map{map}, _mapIter{it} {} CollectionCatalog::iterator::value_type CollectionCatalog::iterator::operator*() { - if (_exhausted()) { + if (_mapIter == _map.end()) { return nullptr; } - return _mapIter->second.get(); } -UUID CollectionCatalog::iterator::uuid() const { - invariant(_uuid); - return *_uuid; -} - CollectionCatalog::iterator CollectionCatalog::iterator::operator++() { - _mapIter++; + invariant(_mapIter != _map.end()); + ++_mapIter; + return *this; +} - // Skip any collections that are not yet visible outside of their respective transactions. - while (!_exhausted() && !_mapIter->second->isCommitted()) { - _mapIter++; - } +bool CollectionCatalog::iterator::operator==(const iterator& other) const { + invariant(_map == other._map); - if (_exhausted()) { - // If the iterator is at the end of the map or now points to an entry that does not - // correspond to the correct database. - _mapIter = _catalog->_orderedCollections.end(); - _uuid = boost::none; - return *this; + if (other._mapIter == other._map.end()) { + return _mapIter == _map.end(); + } else if (_mapIter == _map.end()) { + return other._mapIter == other._map.end(); } - _uuid = _mapIter->first.second; - return *this; + return _mapIter->first.second == other._mapIter->first.second; } -CollectionCatalog::iterator CollectionCatalog::iterator::operator++(int) { - auto oldPosition = *this; - ++(*this); - return oldPosition; +bool CollectionCatalog::iterator::operator!=(const iterator& other) const { + return !(*this == other); } -bool CollectionCatalog::iterator::operator==(const iterator& other) const { - invariant(_catalog == other._catalog); - if (other._mapIter == _catalog->_orderedCollections.end()) { - return _uuid == boost::none; - } +CollectionCatalog::Range::Range(const OrderedCollectionMap& map, const DatabaseName& dbName) + : _map{map}, _dbName{dbName} {} - return _uuid == other._uuid; +CollectionCatalog::iterator CollectionCatalog::Range::begin() const { + return {_dbName, _map.lower_bound(std::make_pair(_dbName, minUuid)), _map}; } -bool CollectionCatalog::iterator::operator!=(const iterator& other) const { - return !(*this == other); +CollectionCatalog::iterator CollectionCatalog::Range::end() const { + return {_dbName, _map.upper_bound(std::make_pair(_dbName, maxUuid)), _map}; } -bool CollectionCatalog::iterator::_exhausted() { - return _mapIter == _catalog->_orderedCollections.end() || _mapIter->first.first != _dbName; +bool CollectionCatalog::Range::empty() const { + return begin() == end(); } std::shared_ptr CollectionCatalog::latest(ServiceContext* svcCtx) { @@ -719,7 +721,8 @@ Status CollectionCatalog::modifyView( auto viewPtr = viewsForDb.lookup(viewName); if (!viewPtr) return Status(ErrorCodes::NamespaceNotFound, - str::stream() << "cannot modify missing view " << viewName.ns()); + str::stream() + << "cannot modify missing view " << viewName.toStringForErrorMsg()); if (!NamespaceString::validCollectionName(viewOn.coll())) return Status(ErrorCodes::InvalidNamespace, @@ -787,11 +790,8 @@ Status CollectionCatalog::dropView(OperationContext* opCtx, const NamespaceStrin } void CollectionCatalog::reloadViews(OperationContext* opCtx, const DatabaseName& dbName) const { - // Two-phase locking ensures that all locks are held while a Change's commit() or - // rollback()function runs, for thread saftey. And, MODE_X locks always opt for two-phase - // locking. - invariant(opCtx->lockState()->isCollectionLockedForMode( - NamespaceString::makeSystemDotViewsNamespace(dbName), MODE_X)); + invariantHasExclusiveAccessToCollection(opCtx, + NamespaceString::makeSystemDotViewsNamespace(dbName)); auto& uncommittedCatalogUpdates = UncommittedCatalogUpdates::get(opCtx); if (uncommittedCatalogUpdates.shouldIgnoreExternalViewChanges(dbName)) { @@ -800,24 +800,8 @@ void CollectionCatalog::reloadViews(OperationContext* opCtx, const DatabaseName& LOGV2_DEBUG(22546, 1, "Reloading view catalog for database", logAttrs(dbName)); - ViewsForDatabase viewsForDb; - auto status = viewsForDb.reload(opCtx, CollectionPtr(_lookupSystemViews(opCtx, dbName))); - if (!status.isOK()) { - // If we encountered an error while reloading views, then the 'viewsForDb' variable will be - // empty, and marked invalid. Any further operations that attempt to use a view will fail - // until the view catalog is fixed. Most of the time, this means the system.views collection - // needs to be dropped. - // - // Unfortunately, we don't have a good way to respond to this error, as when we're calling - // this function, we're in an op observer, and we expect the operation to succeed once it's - // gotten to that point since it's passed all our other checks. Instead, we can log this - // information to aid in diagnosing the problem. - LOGV2(7267300, - "Encountered an error while reloading the view catalog", - "error"_attr = status); - } - - uncommittedCatalogUpdates.replaceViewsForDatabase(dbName, std::move(viewsForDb)); + uncommittedCatalogUpdates.replaceViewsForDatabase(dbName, + loadViewsForDatabase(opCtx, *this, dbName)); PublishCatalogUpdates::ensureRegisteredWithRecoveryUnit(opCtx, uncommittedCatalogUpdates); } @@ -835,18 +819,13 @@ const Collection* CollectionCatalog::establishConsistentCollection( bool CollectionCatalog::_needsOpenCollection(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID, boost::optional readTimestamp) const { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - return false; - } - // Don't need to open the collection if it was already previously instantiated. - if (nsOrUUID.nss()) { - if (OpenedCollections::get(opCtx).lookupByNamespace(*nsOrUUID.nss())) { + if (nsOrUUID.isNamespaceString()) { + if (OpenedCollections::get(opCtx).lookupByNamespace(nsOrUUID.nss())) { return false; } } else { - if (OpenedCollections::get(opCtx).lookupByUUID(*nsOrUUID.uuid())) { + if (OpenedCollections::get(opCtx).lookupByUUID(nsOrUUID.uuid())) { return false; } } @@ -855,10 +834,10 @@ bool CollectionCatalog::_needsOpenCollection(OperationContext* opCtx, auto coll = lookupCollectionByNamespaceOrUUID(opCtx, nsOrUUID); return !coll || *readTimestamp < coll->getMinimumValidSnapshot(); } else { - if (nsOrUUID.nss()) { - return _pendingCommitNamespaces.find(*nsOrUUID.nss()); + if (nsOrUUID.isNamespaceString()) { + return _pendingCommitNamespaces.find(nsOrUUID.nss()); } else { - return _pendingCommitUUIDs.find(*nsOrUUID.uuid()); + return _pendingCommitUUIDs.find(nsOrUUID.uuid()); } } } @@ -867,11 +846,6 @@ const Collection* CollectionCatalog::_openCollection( OperationContext* opCtx, const NamespaceStringOrUUID& nssOrUUID, boost::optional readTimestamp) const { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - return nullptr; - } - // The implementation of openCollection() is quite different at a timestamp compared to at // latest. Separated the implementation into helper functions and we call the right one // depending on the input parameters. @@ -890,22 +864,23 @@ const Collection* CollectionCatalog::_openCollectionAtLatestByNamespaceOrUUID( // compare the collection instance in _pendingCommitNamespaces and the collection instance in // the in-memory catalog with the durable catalog entry to determine which instance to return. const auto& pendingCollection = [&]() -> std::shared_ptr { - if (const auto& nss = nssOrUUID.nss()) { - const std::shared_ptr* pending = _pendingCommitNamespaces.find(*nss); + if (nssOrUUID.isNamespaceString()) { + const std::shared_ptr* pending = + _pendingCommitNamespaces.find(nssOrUUID.nss()); invariant(pending); return *pending; } - const std::shared_ptr* pending = _pendingCommitUUIDs.find(*nssOrUUID.uuid()); + const std::shared_ptr* pending = _pendingCommitUUIDs.find(nssOrUUID.uuid()); invariant(pending); return *pending; }(); auto latestCollection = [&]() -> std::shared_ptr { - if (const auto& nss = nssOrUUID.nss()) { - return _getCollectionByNamespace(opCtx, *nss); + if (nssOrUUID.isNamespaceString()) { + return _getCollectionByNamespace(opCtx, nssOrUUID.nss()); } - return _getCollectionByUUID(opCtx, *nssOrUUID.uuid()); + return _getCollectionByUUID(opCtx, nssOrUUID.uuid()); }(); // At least one of latest and pending should be a valid pointer. @@ -921,18 +896,18 @@ const Collection* CollectionCatalog::_openCollectionAtLatestByNamespaceOrUUID( return latestCollection->getCatalogId(); }(); - auto catalogEntry = DurableCatalog::get(opCtx)->getCatalogEntry(opCtx, catalogId); + auto catalogEntry = DurableCatalog::get(opCtx)->getParsedCatalogEntry(opCtx, catalogId); const NamespaceString& nss = [&]() { - if (auto nss = nssOrUUID.nss()) { - return *nss; + if (nssOrUUID.isNamespaceString()) { + return nssOrUUID.nss(); } return latestCollection ? latestCollection->ns() : pendingCollection->ns(); }(); const UUID uuid = [&]() { - if (auto uuid = nssOrUUID.uuid()) { - return *uuid; + if (nssOrUUID.isUUID()) { + return nssOrUUID.uuid(); } // If pendingCollection is nullptr, the collection is being dropped, so latestCollection @@ -942,13 +917,13 @@ const Collection* CollectionCatalog::_openCollectionAtLatestByNamespaceOrUUID( // If the catalog entry is not found in our snapshot then the collection is being dropped and we // can observe the drop. Lookups by this namespace or uuid should not find a collection. - if (catalogEntry.isEmpty()) { + if (!catalogEntry) { // If we performed this lookup by UUID we could be in a case where we're looking up // concurrently with a rename with dropTarget=true where the UUID that we use is the target // that got dropped. If that rename has committed we need to put the correct collection // under open collection for this namespace. We can detect this case by comparing the // catalogId with what is pending for this namespace. - if (nssOrUUID.uuid()) { + if (nssOrUUID.isUUID()) { const std::shared_ptr& pending = *_pendingCommitNamespaces.find(nss); if (pending && pending->getCatalogId() != catalogId) { openedCollections.store(nullptr, boost::none, uuid); @@ -962,8 +937,8 @@ const Collection* CollectionCatalog::_openCollectionAtLatestByNamespaceOrUUID( // When trying to open the latest collection by namespace and the catalog entry has a different // namespace in our snapshot, then there is a rename operation concurrent with this call. - NamespaceString nsInDurableCatalog = DurableCatalog::getNamespaceFromCatalogEntry(catalogEntry); - if (nssOrUUID.nss() && nss != nsInDurableCatalog) { + NamespaceString nsInDurableCatalog = catalogEntry->metadata->nss; + if (nssOrUUID.isNamespaceString() && nss != nsInDurableCatalog) { // There are two types of rename depending on the dropTarget flag. if (pendingCollection && latestCollection && pendingCollection->getCatalogId() != latestCollection->getCatalogId()) { @@ -1007,7 +982,7 @@ const Collection* CollectionCatalog::_openCollectionAtLatestByNamespaceOrUUID( // entries under uncommitted catalog changes for two namespaces (rename 'from' and 'to') so we // can make sure lookups by UUID is supported and will return a Collection with its namespace in // sync with the storage snapshot. - if (nssOrUUID.uuid() && latestCollection && pendingCollection && + if (nssOrUUID.isUUID() && latestCollection && pendingCollection && latestCollection->ns() != pendingCollection->ns()) { if (latestCollection->ns() == nsInDurableCatalog) { // If this is a rename with dropTarget=true and we're looking up with the 'from' UUID @@ -1032,33 +1007,41 @@ const Collection* CollectionCatalog::_openCollectionAtLatestByNamespaceOrUUID( } } - auto metadata = DurableCatalog::getMetadataFromCatalogEntry(catalogEntry); + auto metadataObj = catalogEntry->metadata->toBSON(); - if (latestCollection && latestCollection->isMetadataEqual(metadata)) { + if (latestCollection && latestCollection->isMetadataEqual(metadataObj)) { openedCollections.store(latestCollection, nss, uuid); return latestCollection.get(); } // Use the pendingCollection if there is no latestCollection or if the metadata of the // latestCollection doesn't match the durable catalogEntry. - if (pendingCollection && pendingCollection->isMetadataEqual(metadata)) { + if (pendingCollection && pendingCollection->isMetadataEqual(metadataObj)) { // If the latest collection doesn't exist then the pending collection must exist as it's // being created in this snapshot. Otherwise, if the latest collection is incompatible // with this snapshot, then the change came from an uncommitted update by an operation - // operating on this snapshot. + // operating on this snapshot. If both latestCollection and pendingCollection exists check + // if their uuid differs in which case this is a rename with dropTarget=true that just + // committed. + if (pendingCollection && latestCollection && + pendingCollection->uuid() != latestCollection->uuid()) { + openedCollections.store(nullptr, boost::none, latestCollection->uuid()); + } openedCollections.store(pendingCollection, nss, uuid); return pendingCollection.get(); } // If neither `latestCollection` or `pendingCollection` match the metadata we fully instantiate // a new collection instance from durable storage that is guaranteed to match. This can happen - // when multikey is not consistent with the storage snapshot. + // when multikey is not consistent with the storage snapshot. We use 'pendingCollection' as the + // base when available as it might contain an index that is about to be added. Dropped indexes + // can be found through other means in the drop pending state. invariant(latestCollection || pendingCollection); auto durableCatalogEntry = DurableCatalog::get(opCtx)->getParsedCatalogEntry(opCtx, catalogId); invariant(durableCatalogEntry); auto compatibleCollection = _createCompatibleCollection(opCtx, - latestCollection ? latestCollection : pendingCollection, + pendingCollection ? pendingCollection : latestCollection, /*readTimestamp=*/boost::none, durableCatalogEntry.get()); @@ -1079,7 +1062,20 @@ const Collection* CollectionCatalog::_openCollectionAtPointInTimeByNamespaceOrUU // Try to find a catalog entry matching 'readTimestamp'. auto catalogEntry = _fetchPITCatalogEntry(opCtx, nssOrUUID, readTimestamp); if (!catalogEntry) { - openedCollections.store(nullptr, nssOrUUID.nss(), nssOrUUID.uuid()); + openedCollections.store( + nullptr, + [nssOrUUID]() -> boost::optional { + if (nssOrUUID.isNamespaceString()) { + return nssOrUUID.nss(); + } + return boost::none; + }(), + [nssOrUUID]() -> boost::optional { + if (nssOrUUID.isUUID()) { + return nssOrUUID.uuid(); + } + return boost::none; + }()); return nullptr; } @@ -1109,102 +1105,86 @@ const Collection* CollectionCatalog::_openCollectionAtPointInTimeByNamespaceOrUU return newCollection.get(); } - openedCollections.store(nullptr, nssOrUUID.nss(), nssOrUUID.uuid()); + openedCollections.store( + nullptr, + [nssOrUUID]() -> boost::optional { + if (nssOrUUID.isNamespaceString()) { + return nssOrUUID.nss(); + } + return boost::none; + }(), + [nssOrUUID]() -> boost::optional { + if (nssOrUUID.isUUID()) { + return nssOrUUID.uuid(); + } + return boost::none; + }()); return nullptr; } -CollectionCatalog::CatalogIdLookup CollectionCatalog::_checkWithOldestCatalogIdTimestampMaintained( - boost::optional ts) const { - // If the request was with a time prior to the oldest maintained time it is unknown, otherwise - // we know it is not existing. - return {RecordId{}, - ts && *ts < _oldestCatalogIdTimestampMaintained - ? CollectionCatalog::CatalogIdLookup::Existence::kUnknown - : CollectionCatalog::CatalogIdLookup::Existence::kNotExists}; -} - -CollectionCatalog::CatalogIdLookup CollectionCatalog::_findCatalogIdInRange( - boost::optional ts, const std::vector& range) const { - if (!ts) { - auto catalogId = range.back().id; - if (catalogId) { - return {*catalogId, CatalogIdLookup::Existence::kExists}; - } - return {RecordId{}, CatalogIdLookup::Existence::kNotExists}; - } - - auto rangeIt = - std::upper_bound(range.begin(), range.end(), *ts, [](const auto& ts, const auto& entry) { - return ts < entry.ts; - }); - if (rangeIt == range.begin()) { - return _checkWithOldestCatalogIdTimestampMaintained(ts); - } - // Upper bound returns an iterator to the first entry with a larger timestamp. Decrement the - // iterator to get the last entry where the time is less or equal. - auto catalogId = (--rangeIt)->id; - if (catalogId) { - if (*catalogId != kUnknownRangeMarkerId) { - return {*catalogId, CatalogIdLookup::Existence::kExists}; - } else { - return {RecordId{}, CatalogIdLookup::Existence::kUnknown}; - } - } - return {RecordId{}, CatalogIdLookup::Existence::kNotExists}; -} - boost::optional CollectionCatalog::_fetchPITCatalogEntry( OperationContext* opCtx, const NamespaceStringOrUUID& nssOrUUID, boost::optional readTimestamp) const { - auto [catalogId, result] = nssOrUUID.nss() - ? lookupCatalogIdByNSS(*nssOrUUID.nss(), readTimestamp) - : lookupCatalogIdByUUID(*nssOrUUID.uuid(), readTimestamp); - if (result == CatalogIdLookup::Existence::kNotExists) { + auto [catalogId, result] = nssOrUUID.isNamespaceString() + ? _catalogIdTracker.lookup(nssOrUUID.nss(), readTimestamp) + : _catalogIdTracker.lookup(nssOrUUID.uuid(), readTimestamp); + if (result == HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists) { return boost::none; } auto writeCatalogIdAfterScan = [&](const boost::optional& catalogEntry) { + if (!catalogEntry) { + if (nssOrUUID.isNamespaceString()) { + if (!_catalogIdTracker.canRecordNonExisting(nssOrUUID.nss())) { + return; + } + } else { + if (!_catalogIdTracker.canRecordNonExisting(nssOrUUID.uuid())) { + return; + } + } + } + CollectionCatalog::write(opCtx, [&](CollectionCatalog& catalog) { - // Convert from 'const boost::optional&' to 'boost::optional' without copy. - auto nss = [&]() -> boost::optional { - if (const boost::optional& ns = nssOrUUID.nss()) - return ns.value(); - return boost::none; - }(); // Insert catalogId for both the namespace and UUID if the catalog entry is found. - catalog._insertCatalogIdForNSSAndUUIDAfterScan( - catalogEntry ? catalogEntry->metadata->nss : nss, - catalogEntry ? catalogEntry->metadata->options.uuid : nssOrUUID.uuid(), - catalogEntry ? boost::make_optional(catalogEntry->catalogId) : boost::none, - *readTimestamp); + if (catalogEntry) { + catalog._catalogIdTracker.recordExistingAtTime( + catalogEntry->metadata->nss, + *catalogEntry->metadata->options.uuid, + catalogEntry->catalogId, + *readTimestamp); + } else if (nssOrUUID.isNamespaceString()) { + catalog._catalogIdTracker.recordNonExistingAtTime(nssOrUUID.nss(), *readTimestamp); + } else { + catalog._catalogIdTracker.recordNonExistingAtTime(nssOrUUID.uuid(), *readTimestamp); + } }); }; - if (result == CatalogIdLookup::Existence::kUnknown) { + if (result == HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown) { // We shouldn't receive kUnknown when we don't have a timestamp since no timestamp means // we're operating on the latest. invariant(readTimestamp); // Scan durable catalog when we don't have accurate catalogId mapping for this timestamp. gCollectionCatalogSection.numScansDueToMissingMapping.fetchAndAdd(1); - auto catalogEntry = nssOrUUID.nss() - ? DurableCatalog::get(opCtx)->scanForCatalogEntryByNss(opCtx, *nssOrUUID.nss()) - : DurableCatalog::get(opCtx)->scanForCatalogEntryByUUID(opCtx, *nssOrUUID.uuid()); + auto catalogEntry = nssOrUUID.isNamespaceString() + ? DurableCatalog::get(opCtx)->scanForCatalogEntryByNss(opCtx, nssOrUUID.nss()) + : DurableCatalog::get(opCtx)->scanForCatalogEntryByUUID(opCtx, nssOrUUID.uuid()); writeCatalogIdAfterScan(catalogEntry); return catalogEntry; } auto catalogEntry = DurableCatalog::get(opCtx)->getParsedCatalogEntry(opCtx, catalogId); - if (const auto& nss = nssOrUUID.nss(); - !catalogEntry || (nss && nss != catalogEntry->metadata->nss)) { + if (!catalogEntry || + (nssOrUUID.isNamespaceString() && nssOrUUID.nss() != catalogEntry->metadata->nss)) { invariant(readTimestamp); // If no entry is found or the entry contains a different namespace, the mapping might be // incorrect since it is incomplete after startup; scans durable catalog to confirm. - auto catalogEntry = nss - ? DurableCatalog::get(opCtx)->scanForCatalogEntryByNss(opCtx, *nss) - : DurableCatalog::get(opCtx)->scanForCatalogEntryByUUID(opCtx, *nssOrUUID.uuid()); + auto catalogEntry = nssOrUUID.isNamespaceString() + ? DurableCatalog::get(opCtx)->scanForCatalogEntryByNss(opCtx, nssOrUUID.nss()) + : DurableCatalog::get(opCtx)->scanForCatalogEntryByUUID(opCtx, nssOrUUID.uuid()); writeCatalogIdAfterScan(catalogEntry); return catalogEntry; } @@ -1326,12 +1306,12 @@ std::shared_ptr CollectionCatalog::findDropPendingIndex(Strin void CollectionCatalog::onCreateCollection(OperationContext* opCtx, std::shared_ptr coll) const { invariant(coll); + const auto& nss = coll->ns(); auto& uncommittedCatalogUpdates = UncommittedCatalogUpdates::get(opCtx); - auto [found, existingColl, newColl] = - UncommittedCatalogUpdates::lookupCollection(opCtx, coll->ns()); + auto [found, existingColl, newColl] = UncommittedCatalogUpdates::lookupCollection(opCtx, nss); uassert(31370, - str::stream() << "collection already exists. ns: " << coll->ns(), + str::stream() << "collection already exists. ns: " << nss.toStringForErrorMsg(), existingColl == nullptr); // When we already have a drop and recreate the collection, we want to seamlessly swap out the @@ -1344,6 +1324,10 @@ void CollectionCatalog::onCreateCollection(OperationContext* opCtx, uncommittedCatalogUpdates.createCollection(opCtx, std::move(coll)); } + if (!storageGlobalParams.repair && nss.isSystemDotViews()) { + reloadViews(opCtx, nss.dbName()); + } + PublishCatalogUpdates::ensureRegisteredWithRecoveryUnit(opCtx, uncommittedCatalogUpdates); } @@ -1381,7 +1365,7 @@ void CollectionCatalog::dropCollection(OperationContext* opCtx, void CollectionCatalog::onCloseDatabase(OperationContext* opCtx, DatabaseName dbName) { invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X)); - ResourceCatalog::get(opCtx->getServiceContext()).remove({RESOURCE_DATABASE, dbName}, dbName); + ResourceCatalog::get().remove({RESOURCE_DATABASE, dbName}, dbName); _viewsForDatabase = _viewsForDatabase.erase(dbName); } @@ -1405,6 +1389,10 @@ uint64_t CollectionCatalog::getEpoch() const { return _epoch; } +CollectionCatalog::Range CollectionCatalog::range(const DatabaseName& dbName) const { + return {_orderedCollections, dbName}; +} + std::shared_ptr CollectionCatalog::_getCollectionByUUID(OperationContext* opCtx, const UUID& uuid) const { // It's important to look in UncommittedCatalogUpdates before OpenedCollections because in a @@ -1422,8 +1410,7 @@ std::shared_ptr CollectionCatalog::_getCollectionByUUID(Operat return openedColl.value(); } - auto coll = _lookupCollectionByUUID(uuid); - return (coll && coll->isCommitted()) ? coll : nullptr; + return _lookupCollectionByUUID(uuid); } Collection* CollectionCatalog::lookupCollectionByUUIDForMetadataWrite(OperationContext* opCtx, @@ -1440,13 +1427,13 @@ Collection* CollectionCatalog::lookupCollectionByUUIDForMetadataWrite(OperationC auto nss = uncommittedPtr->ns(); // If the collection is newly created, invariant on the collection being locked in MODE_IX. invariant(!newColl || opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX), - nss.toString()); + nss.toStringForErrorMsg()); return uncommittedPtr.get(); } std::shared_ptr coll = _lookupCollectionByUUID(uuid); - if (!coll || !coll->isCommitted()) + if (!coll) return nullptr; if (coll->ns().isOplog()) @@ -1467,6 +1454,7 @@ Collection* CollectionCatalog::lookupCollectionByUUIDForMetadataWrite(OperationC // on the thread doing the batch write and it would trigger the regular path where we do a // copy-on-write on the catalog when committing. if (_isCatalogBatchWriter()) { + batchedCatalogClonedCollections.emplace(cloned.get()); // Do not update min valid timestamp in batched write as the write is not corresponding to // an oplog entry. If the write require an update to this timestamp it is the responsibility // of the user. @@ -1500,20 +1488,16 @@ const Collection* CollectionCatalog::lookupCollectionByUUID(OperationContext* op return openedColl.value() ? openedColl->get() : nullptr; } - auto coll = _lookupCollectionByUUID(uuid); - return (coll && coll->isCommitted()) ? coll.get() : nullptr; + return _lookupCollectionByUUID(uuid).get(); } const Collection* CollectionCatalog::lookupCollectionByNamespaceOrUUID( OperationContext* opCtx, const NamespaceStringOrUUID& nssOrUUID) const { - if (boost::optional uuid = nssOrUUID.uuid()) - return lookupCollectionByUUID(opCtx, *uuid); - return lookupCollectionByNamespace(opCtx, *nssOrUUID.nss()); -} + if (nssOrUUID.isUUID()) { + return lookupCollectionByUUID(opCtx, nssOrUUID.uuid()); + } -bool CollectionCatalog::isCollectionAwaitingVisibility(UUID uuid) const { - auto coll = _lookupCollectionByUUID(uuid); - return coll && !coll->isCommitted(); + return lookupCollectionByNamespace(opCtx, nssOrUUID.nss()); } std::shared_ptr CollectionCatalog::_lookupCollectionByUUID(UUID uuid) const { @@ -1544,8 +1528,7 @@ std::shared_ptr CollectionCatalog::_getCollectionByNamespace( } const std::shared_ptr* collPtr = _collections.find(nss); - auto coll = collPtr ? *collPtr : nullptr; - return (coll && coll->isCommitted()) ? coll : nullptr; + return collPtr ? *collPtr : nullptr; } Collection* CollectionCatalog::lookupCollectionByNamespaceForMetadataWrite( @@ -1565,7 +1548,7 @@ Collection* CollectionCatalog::lookupCollectionByNamespaceForMetadataWrite( if (uncommittedPtr) { // If the collection is newly created, invariant on the collection being locked in MODE_IX. invariant(!newColl || opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX), - nss.toString()); + nss.toStringForErrorMsg()); return uncommittedPtr.get(); } @@ -1577,7 +1560,7 @@ Collection* CollectionCatalog::lookupCollectionByNamespaceForMetadataWrite( const std::shared_ptr* collPtr = _collections.find(nss); auto coll = collPtr ? *collPtr : nullptr; - if (!coll || !coll->isCommitted()) + if (!coll) return nullptr; invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X)); @@ -1595,6 +1578,7 @@ Collection* CollectionCatalog::lookupCollectionByNamespaceForMetadataWrite( // on the thread doing the batch write and it would trigger the regular path where we do a // copy-on-write on the catalog when committing. if (_isCatalogBatchWriter()) { + batchedCatalogClonedCollections.emplace(cloned.get()); // Do not update min valid timestamp in batched write as the write is not corresponding to // an oplog entry. If the write require an update to this timestamp it is the responsibility // of the user. @@ -1633,8 +1617,7 @@ const Collection* CollectionCatalog::lookupCollectionByNamespace(OperationContex } const std::shared_ptr* collPtr = _collections.find(nss); - auto coll = collPtr ? *collPtr : nullptr; - return (coll && coll->isCommitted()) ? coll.get() : nullptr; + return collPtr ? collPtr->get() : nullptr; } boost::optional CollectionCatalog::lookupNSSByUUID(OperationContext* opCtx, @@ -1665,9 +1648,7 @@ boost::optional CollectionCatalog::lookupNSSByUUID(OperationCon const std::shared_ptr* collPtr = _catalog.find(uuid); if (collPtr) { auto coll = *collPtr; - boost::optional ns = coll->ns(); - invariant(!ns.value().isEmpty()); - return coll->isCommitted() ? ns : boost::none; + return coll->ns(); } // Only in the case that the catalog is closed and a UUID is currently unknown, resolve it @@ -1708,13 +1689,12 @@ boost::optional CollectionCatalog::lookupUUIDByNSS(OperationContext* opCtx const std::shared_ptr* collPtr = _collections.find(nss); if (collPtr) { auto coll = *collPtr; - const boost::optional& uuid = coll->uuid(); - return coll->isCommitted() ? uuid : boost::none; + return coll->uuid(); } return boost::none; } -bool CollectionCatalog::containsCollection(OperationContext* opCtx, +bool CollectionCatalog::isLatestCollection(OperationContext* opCtx, const Collection* collection) const { // Any writable Collection instance created under MODE_X lock is considered to belong to this // catalog instance @@ -1730,26 +1710,16 @@ bool CollectionCatalog::containsCollection(OperationContext* opCtx, // Verify that we store the same instance in this catalog const std::shared_ptr* coll = _catalog.find(collection->uuid()); - if (!coll) - return false; - - return coll->get() == collection; -} - -CollectionCatalog::CatalogIdLookup CollectionCatalog::lookupCatalogIdByNSS( - const NamespaceString& nss, boost::optional ts) const { - if (const std::vector* mapping = _nssCatalogIds.find(nss)) { - return _findCatalogIdInRange(ts, *mapping); + if (!coll) { + // If there is nothing in the main catalog check for pending commit, we could have just + // committed a newly created collection which would be considered latest. + coll = _pendingCommitUUIDs.find(collection->uuid()); + if (!coll || !coll->get()) { + return false; + } } - return _checkWithOldestCatalogIdTimestampMaintained(ts); -} -CollectionCatalog::CatalogIdLookup CollectionCatalog::lookupCatalogIdByUUID( - const UUID& uuid, boost::optional ts) const { - if (const std::vector* mapping = _uuidCatalogIds.find(uuid)) { - return _findCatalogIdInRange(ts, *mapping); - } - return _checkWithOldestCatalogIdTimestampMaintained(ts); + return coll->get() == collection; } void CollectionCatalog::iterateViews( @@ -1774,7 +1744,7 @@ std::shared_ptr CollectionCatalog::lookupView( if (!viewsForDb->valid() && opCtx->getClient()->isFromUserConnection()) { // We want to avoid lookups on invalid collection names. - if (!NamespaceString::validCollectionName(ns.ns())) { + if (!NamespaceString::validCollectionName(NamespaceStringUtil::serializeForCatalog(ns))) { return nullptr; } @@ -1799,24 +1769,26 @@ std::shared_ptr CollectionCatalog::lookupViewWithoutValida NamespaceString CollectionCatalog::resolveNamespaceStringOrUUID( OperationContext* opCtx, NamespaceStringOrUUID nsOrUUID) const { - if (auto& nss = nsOrUUID.nss()) { + if (nsOrUUID.isNamespaceString()) { uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Namespace " << *nss << " is not a valid collection name", - nss->isValid()); - return std::move(*nss); + str::stream() << "Namespace " << nsOrUUID.toStringForErrorMsg() + << " is not a valid collection name", + nsOrUUID.nss().isValid()); + return nsOrUUID.nss(); } - auto resolvedNss = lookupNSSByUUID(opCtx, *nsOrUUID.uuid()); + auto resolvedNss = lookupNSSByUUID(opCtx, nsOrUUID.uuid()); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Unable to resolve " << nsOrUUID.toString(), + str::stream() << "Unable to resolve " << nsOrUUID.toStringForErrorMsg(), resolvedNss && resolvedNss->isValid()); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "UUID: " << nsOrUUID.toString() << " specified in provided db name: " - << nsOrUUID.dbName()->toStringForErrorMsg() + str::stream() << "UUID: " << nsOrUUID.toStringForErrorMsg() + << " specified in provided db name: " + << nsOrUUID.dbName().toStringForErrorMsg() << " resolved to a collection in a different database, resolved nss: " - << *resolvedNss, + << (*resolvedNss).toStringForErrorMsg(), resolvedNss->dbName() == nsOrUUID.dbName()); return std::move(*resolvedNss); @@ -1839,9 +1811,7 @@ std::vector CollectionCatalog::getAllCollectionUUIDsFromDb(const DatabaseN std::vector ret; while (it != _orderedCollections.end() && it->first.first == dbName) { - if (it->second->isCommitted()) { - ret.push_back(it->first.second); - } + ret.push_back(it->first.second); ++it; } return ret; @@ -1855,9 +1825,7 @@ std::vector CollectionCatalog::getAllCollectionNamesFromDb( for (auto it = _orderedCollections.lower_bound(std::make_pair(dbName, minUuid)); it != _orderedCollections.end() && it->first.first == dbName; ++it) { - if (it->second->isCommitted()) { - ret.push_back(it->second->ns()); - } + ret.push_back(it->second->ns()); } return ret; } @@ -1868,25 +1836,19 @@ Status CollectionCatalog::_iterAllDbNamesHelper( const std::function(const DatabaseName&)>& nextUpperBound) const { // _orderedCollections is sorted by . upper_bound will return the iterator to the // first element in _orderedCollections greater than . - auto iter = - _orderedCollections.upper_bound(std::make_pair(DatabaseName(tenantId, ""), maxUuid)); + auto iter = _orderedCollections.upper_bound( + std::make_pair(DatabaseNameUtil::deserialize(tenantId, ""), maxUuid)); while (iter != _orderedCollections.end()) { auto dbName = iter->first.first; if (tenantId && dbName.tenantId() != tenantId) { break; } - if (iter->second->isCommitted()) { - auto status = callback(dbName); - if (!status.isOK()) { - return status; - } - } else { - // If the first collection found for `dbName` is not yet committed, increment the - // iterator to find the next visible collection (possibly under a different - // `dbName`). - iter++; - continue; + + auto status = callback(dbName); + if (!status.isOK()) { + return status; } + // Move on to the next database after `dbName`. iter = _orderedCollections.upper_bound(nextUpperBound(dbName)); } @@ -1921,7 +1883,8 @@ std::set CollectionCatalog::getAllTenants() const { return Status::OK(); }, [](const DatabaseName& dbName) { - return std::make_pair(DatabaseName(dbName.tenantId(), "\xff"), maxUuid); + return std::make_pair(DatabaseNameUtil::deserialize(dbName.tenantId(), "\xff"), + maxUuid); }); return ret; } @@ -1976,27 +1939,26 @@ CollectionCatalog::ViewCatalogSet CollectionCatalog::getViewCatalogDbNames( } void CollectionCatalog::registerCollection(OperationContext* opCtx, - const UUID& uuid, std::shared_ptr coll, boost::optional commitTime) { invariant(opCtx->lockState()->isW()); - _registerCollection(opCtx, uuid, std::move(coll), /*twoPhase=*/false, commitTime); -} -void CollectionCatalog::registerCollectionTwoPhase(OperationContext* opCtx, - const UUID& uuid, - std::shared_ptr coll, - boost::optional commitTime) { - _registerCollection(opCtx, uuid, std::move(coll), /*twoPhase=*/true, commitTime); + const auto& nss = coll->ns(); + + _ensureNamespaceDoesNotExist(opCtx, coll->ns(), NamespaceType::kAll); + _registerCollection(opCtx, coll, commitTime); + + if (!storageGlobalParams.repair && coll->ns().isSystemDotViews()) { + _viewsForDatabase = + _viewsForDatabase.set(nss.dbName(), loadViewsForDatabase(opCtx, *this, nss.dbName())); + } } void CollectionCatalog::_registerCollection(OperationContext* opCtx, - const UUID& uuid, std::shared_ptr coll, - bool twoPhase, boost::optional commitTime) { - auto nss = coll->ns(); - _ensureNamespaceDoesNotExist(opCtx, nss, NamespaceType::kAll); + const auto& nss = coll->ns(); + auto uuid = coll->uuid(); LOGV2_DEBUG(20280, 1, @@ -2013,23 +1975,18 @@ void CollectionCatalog::_registerCollection(OperationContext* opCtx, _catalog = _catalog.set(uuid, coll); _collections = _collections.set(nss, coll); - _orderedCollections[dbIdPair] = coll; - if (twoPhase) { - _pendingCommitNamespaces = _pendingCommitNamespaces.set(nss, coll); - _pendingCommitUUIDs = _pendingCommitUUIDs.set(uuid, coll); - } else { - _pendingCommitNamespaces = _pendingCommitNamespaces.erase(nss); - _pendingCommitUUIDs = _pendingCommitUUIDs.erase(uuid); - } + _orderedCollections = _orderedCollections.set(dbIdPair, coll); + _pendingCommitNamespaces = _pendingCommitNamespaces.erase(nss); + _pendingCommitUUIDs = _pendingCommitUUIDs.erase(uuid); if (commitTime) { coll->setMinimumValidSnapshot(commitTime.value()); - - // When restarting from standalone mode to a replica set, the stable timestamp may be null. - // We still need to register the nss and UUID with the catalog. - _pushCatalogIdForNSSAndUUID(nss, uuid, coll->getCatalogId(), commitTime); } + // When restarting from standalone mode to a replica set, the stable timestamp may be null. + // We still need to register the nss and UUID with the catalog. + _catalogIdTracker.create(nss, uuid, coll->getCatalogId(), commitTime); + if (!nss.isOnInternalDb() && !nss.isSystem()) { _stats.userCollections += 1; @@ -2045,24 +2002,9 @@ void CollectionCatalog::_registerCollection(OperationContext* opCtx, invariant(static_cast(_stats.internal + _stats.userCollections) == _collections.size()); - auto& resourceCatalog = ResourceCatalog::get(opCtx->getServiceContext()); + auto& resourceCatalog = ResourceCatalog::get(); resourceCatalog.add({RESOURCE_DATABASE, nss.dbName()}, nss.dbName()); resourceCatalog.add({RESOURCE_COLLECTION, nss}, nss); - - if (!storageGlobalParams.repair && coll->ns().isSystemDotViews()) { - ViewsForDatabase viewsForDb; - if (auto status = viewsForDb.reload( - opCtx, CollectionPtr(_lookupSystemViews(opCtx, coll->ns().dbName()))); - !status.isOK()) { - LOGV2_WARNING_OPTIONS(20326, - {logv2::LogTag::kStartupWarnings}, - "Unable to parse views; remove any invalid views from the " - "collection to restore server functionality", - "error"_attr = redact(status), - logAttrs(coll->ns())); - } - _viewsForDatabase = _viewsForDatabase.set(coll->ns().dbName(), std::move(viewsForDb)); - } } std::shared_ptr CollectionCatalog::deregisterCollection( @@ -2082,9 +2024,7 @@ std::shared_ptr CollectionCatalog::deregisterCollection( invariant(_collections.find(ns)); invariant(_orderedCollections.find(dbIdPair) != _orderedCollections.end()); - // TODO SERVER-68674: Remove feature flag check. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe() && isDropPending) { + if (isDropPending) { if (auto sharedIdent = coll->getSharedIdent(); sharedIdent) { auto ident = sharedIdent->getIdent(); LOGV2_DEBUG( @@ -2095,16 +2035,13 @@ std::shared_ptr CollectionCatalog::deregisterCollection( } } - _orderedCollections.erase(dbIdPair); + _orderedCollections = _orderedCollections.erase(dbIdPair); _collections = _collections.erase(ns); _catalog = _catalog.erase(uuid); _pendingCommitNamespaces = _pendingCommitNamespaces.erase(ns); _pendingCommitUUIDs = _pendingCommitUUIDs.erase(uuid); - // Push drop unless this is a rollback of a create - if (coll->isCommitted()) { - _pushCatalogIdForNSSAndUUID(ns, uuid, boost::none, commitTime); - } + _catalogIdTracker.drop(ns, uuid, commitTime); if (!ns.isOnInternalDb() && !ns.isSystem()) { _stats.userCollections -= 1; @@ -2122,7 +2059,7 @@ std::shared_ptr CollectionCatalog::deregisterCollection( coll->onDeregisterFromCatalog(opCtx); - ResourceCatalog::get(opCtx->getServiceContext()).remove({RESOURCE_COLLECTION, ns}, ns); + ResourceCatalog::get().remove({RESOURCE_COLLECTION, ns}, ns); if (!storageGlobalParams.repair && coll->ns().isSystemDotViews()) { _viewsForDatabase = _viewsForDatabase.erase(coll->ns().dbName()); @@ -2155,8 +2092,19 @@ void CollectionCatalog::_ensureNamespaceDoesNotExist(OperationContext* opCtx, LOGV2(5725001, "Conflicted registering namespace, already have a collection with the same namespace", "nss"_attr = nss); - throwWriteConflictException(str::stream() << "Collection namespace '" << nss.ns() - << "' is already in use."); + throwWriteConflictException(str::stream() + << "Collection namespace '" << nss.toStringForErrorMsg() + << "' is already in use."); + } + + existingCollection = _pendingCommitNamespaces.find(nss); + if (existingCollection && existingCollection->get()) { + LOGV2(7683900, + "Conflicted registering namespace, already have a collection with the same namespace", + "nss"_attr = nss); + throwWriteConflictException(str::stream() + << "Collection namespace '" << nss.toStringForErrorMsg() + << "' is already in use."); } if (type == NamespaceType::kAll) { @@ -2164,8 +2112,9 @@ void CollectionCatalog::_ensureNamespaceDoesNotExist(OperationContext* opCtx, LOGV2(5725002, "Conflicted registering namespace, already have a view with the same namespace", "nss"_attr = nss); - throwWriteConflictException(str::stream() << "Collection namespace '" << nss.ns() - << "' is already in use."); + throwWriteConflictException(str::stream() + << "Collection namespace '" << nss.toStringForErrorMsg() + << "' is already in use."); } if (auto viewsForDb = _getViewsForDatabase(opCtx, nss.dbName())) { @@ -2182,285 +2131,6 @@ void CollectionCatalog::_ensureNamespaceDoesNotExist(OperationContext* opCtx, } } -void CollectionCatalog::_pushCatalogIdForNSSAndUUID(const NamespaceString& nss, - const UUID& uuid, - boost::optional catalogId, - boost::optional ts) { - // TODO SERVER-68674: Remove feature flag check. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - // No-op. - return; - } - - auto doPushCatalogId = [this, &ts, &catalogId](auto& catalogIdsContainer, - auto& catalogIdChangesContainer, - const auto& key) { - auto ids = copyIfExists(catalogIdsContainer, key); - - // Helper to write updated id mapping back into container at scope exit - ScopeGuard scopedGuard([&] { - // Make sure we erase mapping for namespace or UUID if the list is left empty as - // lookups expect at least one entry for existing namespaces or UUIDs. - if (ids.empty()) { - catalogIdsContainer = catalogIdsContainer.erase(key); - } else { - catalogIdsContainer = catalogIdsContainer.set(key, std::move(ids)); - } - }); - - if (!ts) { - // Make sure untimestamped writes have a single entry in mapping. If we're mixing - // timestamped with untimestamped (such as repair). Ignore the untimestamped writes as - // an untimestamped deregister will correspond with an untimestamped register. We should - // leave the mapping as-is in this case. - if (ids.empty() && catalogId) { - // This namespace or UUID was added due to an untimestamped write, add an entry - // with min timestamp - ids.push_back(TimestampedCatalogId{catalogId, Timestamp::min()}); - } else if (ids.size() == 1 && !catalogId) { - // This namespace or UUID was removed due to an untimestamped write, clear entries. - ids.clear(); - } else if (ids.size() > 1 && catalogId && !storageGlobalParams.repair) { - // This namespace or UUID was added due to an untimestamped write. But this - // namespace or UUID already had some timestamped writes performed. In this case, we - // re-write the history. The only known area that does this today is when profiling - // is enabled (untimestamped collection creation), followed by dropping the database - // (timestamped collection drop). - // TODO SERVER-75740: Remove this branch. - invariant(!ids.back().ts.isNull()); - - ids.clear(); - ids.push_back(TimestampedCatalogId{catalogId, Timestamp::min()}); - } - - return; - } - - // An entry could exist already if concurrent writes are performed, keep the latest change - // in that case. - if (!ids.empty() && ids.back().ts == *ts) { - ids.back().id = catalogId; - return; - } - - // Otherwise, push new entry at the end. Timestamp is always increasing - invariant(ids.empty() || ids.back().ts < *ts); - // If the catalogId is the same as last entry, there's nothing we need to do. This can - // happen when the catalog is reopened. - if (!ids.empty() && ids.back().id == catalogId) { - return; - } - - // A drop entry can't be pushed in the container if it's empty. This is because we cannot - // initialize the namespace or UUID with a single drop. - invariant(!ids.empty() || catalogId); - - ids.push_back(TimestampedCatalogId{catalogId, *ts}); - - auto changes = catalogIdChangesContainer.transient(); - _markForCatalogIdCleanupIfNeeded(key, changes, ids); - catalogIdChangesContainer = changes.persistent(); - }; - - doPushCatalogId(_nssCatalogIds, _nssCatalogIdChanges, nss); - doPushCatalogId(_uuidCatalogIds, _uuidCatalogIdChanges, uuid); -} - -void CollectionCatalog::_pushCatalogIdForRename(const NamespaceString& from, - const NamespaceString& to, - boost::optional ts) { - // TODO SERVER-68674: Remove feature flag check. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - // No-op. - return; - } - - // Get 'toIds' first, it may need to instantiate in the container which invalidates all - // references. - auto idsWriter = _nssCatalogIds.transient(); - auto changesWriter = _nssCatalogIdChanges.transient(); - auto toIds = copyIfExists(idsWriter, to); - auto fromIds = copyIfExists(idsWriter, from); - invariant(!fromIds.empty()); - - // Helper to write updated id mappings back into containers at scope exit - ScopeGuard scopedGuard([&] { - // Make sure we erase mapping for namespace or UUID if the list is left empty as - // lookups expect at least one entry for existing namespaces or UUIDs. - idsWriter.set(to, std::move(toIds)); - if (fromIds.empty()) { - idsWriter.erase(from); - } else { - idsWriter.set(from, std::move(fromIds)); - } - _nssCatalogIds = idsWriter.persistent(); - _nssCatalogIdChanges = changesWriter.persistent(); - }); - - // Make sure untimestamped writes have a single entry in mapping. We move the single entry from - // 'from' to 'to'. We do not have to worry about mixing timestamped and untimestamped like - // _pushCatalogId. - if (!ts) { - // We should never perform rename in a mixed-mode environment. 'from' should contain a - // single entry and there should be nothing in 'to' . - invariant(fromIds.size() == 1); - invariant(toIds.empty()); - toIds.push_back(TimestampedCatalogId{fromIds.back().id, Timestamp::min()}); - fromIds.clear(); - return; - } - - // An entry could exist already if concurrent writes are performed, keep the latest change in - // that case. - if (!toIds.empty() && toIds.back().ts == *ts) { - toIds.back().id = fromIds.back().id; - } else { - invariant(toIds.empty() || toIds.back().ts < *ts); - toIds.push_back(TimestampedCatalogId{fromIds.back().id, *ts}); - _markForCatalogIdCleanupIfNeeded(to, changesWriter, toIds); - } - - // Re-write latest entry if timestamp match (multiple changes occured in this transaction), - // otherwise push at end - if (!fromIds.empty() && fromIds.back().ts == *ts) { - fromIds.back().id = boost::none; - } else { - invariant(fromIds.empty() || fromIds.back().ts < *ts); - fromIds.push_back(TimestampedCatalogId{boost::none, *ts}); - _markForCatalogIdCleanupIfNeeded(from, changesWriter, fromIds); - } -} - -void CollectionCatalog::_insertCatalogIdForNSSAndUUIDAfterScan( - boost::optional nss, - boost::optional uuid, - boost::optional catalogId, - Timestamp ts) { - // TODO SERVER-68674: Remove feature flag check. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - // No-op. - return; - } - - auto doInsert = [this, &catalogId, &ts](auto& catalogIdsContainer, - auto& catalogIdChangesContainer, - const auto& key) { - auto changesWriter = catalogIdChangesContainer.transient(); - auto ids = copyIfExists(catalogIdsContainer, key); - - // Helper to write updated id mapping back into container at scope exit - ScopeGuard scopedGuard([&] { - // Make sure we erase mapping for namespace or UUID if the list is left empty as - // lookups expect at least one entry for existing namespaces or UUIDs. - if (ids.empty()) { - catalogIdsContainer = catalogIdsContainer.erase(key); - } else { - catalogIdsContainer = catalogIdsContainer.set(key, std::move(ids)); - } - catalogIdChangesContainer = changesWriter.persistent(); - }); - - // Binary search for to the entry with same or larger timestamp - auto it = std::lower_bound( - ids.begin(), ids.end(), ts, [](const auto& entry, const Timestamp& ts) { - return entry.ts < ts; - }); - - // The logic of what we need to do differs whether we are inserting a valid catalogId or - // not. - if (catalogId) { - if (it != ids.end()) { - // An entry could exist already if concurrent writes are performed, keep the latest - // change in that case. - if (it->ts == ts) { - it->id = catalogId; - return; - } - - // If next element has same catalogId, we can adjust its timestamp to cover a longer - // range - if (it->id == catalogId) { - it->ts = ts; - _markForCatalogIdCleanupIfNeeded(key, changesWriter, ids); - return; - } - } - - // Otherwise insert new entry at timestamp - ids.insert(it, TimestampedCatalogId{catalogId, ts}); - _markForCatalogIdCleanupIfNeeded(key, changesWriter, ids); - return; - } - - // Avoid inserting missing mapping when the list has grown past the threshold. Will cause - // the system to fall back to scanning the durable catalog. - if (ids.size() >= kMaxCatalogIdMappingLengthForMissingInsert) { - return; - } - - if (it != ids.end() && it->ts == ts) { - // An entry could exist already if concurrent writes are performed, keep the latest - // change in that case. - it->id = boost::none; - } else { - // Otherwise insert new entry - it = ids.insert(it, TimestampedCatalogId{boost::none, ts}); - } - - // The iterator is positioned on the added/modified element above, reposition it to the next - // entry - ++it; - - // We don't want to assume that the namespace or UUID remains not existing until the next - // entry, as there can be times where the namespace or UUID actually does exist. To make - // sure we trigger the scanning of the durable catalog in this range we will insert a bogus - // entry using an invalid RecordId at the next timestamp. This will treat the range forward - // as unknown. - auto nextTs = ts + 1; - - // If the next entry is on the next timestamp already, we can skip adding the bogus entry. - // If this function is called for a previously unknown namespace or UUID, we may not have - // any future valid entries and the iterator would be positioned at and at this point. - if (it == ids.end() || it->ts != nextTs) { - ids.insert(it, TimestampedCatalogId{kUnknownRangeMarkerId, nextTs}); - } - - _markForCatalogIdCleanupIfNeeded(key, changesWriter, ids); - }; - - if (nss) { - doInsert(_nssCatalogIds, _nssCatalogIdChanges, *nss); - } - - if (uuid) { - doInsert(_uuidCatalogIds, _uuidCatalogIdChanges, *uuid); - } -} - -template -void CollectionCatalog::_markForCatalogIdCleanupIfNeeded( - const Key& key, - CatalogIdChangesContainer& catalogIdChangesContainer, - const std::vector& ids) { - - auto markForCleanup = [this, &key, &catalogIdChangesContainer](Timestamp ts) { - catalogIdChangesContainer.insert(key); - if (ts < _lowestCatalogIdTimestampForCleanup) { - _lowestCatalogIdTimestampForCleanup = ts; - } - }; - - // Cleanup may occur if we have more than one entry for the namespace. - if (ids.size() > 1) { - // When we have multiple entries, use the time at the second entry as the cleanup time, - // when the oldest timestamp advances past this we no longer need the first entry. - markForCleanup(ids.at(1).ts); - } -} - void CollectionCatalog::deregisterAllCollectionsAndViews(ServiceContext* svcCtx) { LOGV2(20282, "Deregistering all the collections"); for (auto& entry : _catalog) { @@ -2471,14 +2141,14 @@ void CollectionCatalog::deregisterAllCollectionsAndViews(ServiceContext* svcCtx) } _collections = {}; - _orderedCollections.clear(); + _orderedCollections = {}; _catalog = {}; _viewsForDatabase = {}; _dropPendingCollection = {}; _dropPendingIndex = {}; _stats = {}; - ResourceCatalog::get(svcCtx).clear(); + ResourceCatalog::get().clear(); } void CollectionCatalog::clearViews(OperationContext* opCtx, const DatabaseName& dbName) const { @@ -2499,14 +2169,6 @@ void CollectionCatalog::clearViews(OperationContext* opCtx, const DatabaseName& void CollectionCatalog::deregisterIndex(OperationContext* opCtx, std::shared_ptr indexEntry, bool isDropPending) { - // TODO SERVER-68674: Remove feature flag check. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe() || - !isDropPending) { - // No-op. - return; - } - // Unfinished index builds return a nullptr for getSharedIdent(). Use getIdent() instead. std::string ident = indexEntry->getIdent(); @@ -2531,160 +2193,9 @@ void CollectionCatalog::notifyIdentDropped(const std::string& ident) { _dropPendingIndex = _dropPendingIndex.erase(ident); } -CollectionCatalog::iterator CollectionCatalog::begin(OperationContext* opCtx, - const DatabaseName& dbName) const { - return iterator(opCtx, dbName, *this); -} - -CollectionCatalog::iterator CollectionCatalog::end(OperationContext* opCtx) const { - return iterator(opCtx, _orderedCollections.end(), *this); -} - -bool CollectionCatalog::needsCleanupForOldestTimestamp(Timestamp oldest) const { - // TODO SERVER-68674: Remove feature flag check. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - // No-op. - return false; - } - - return _lowestCatalogIdTimestampForCleanup <= oldest; -} - -void CollectionCatalog::cleanupForOldestTimestampAdvanced(Timestamp oldest) { - Timestamp nextLowestCleanupTimestamp = Timestamp::max(); - // Helper to calculate the smallest entry that needs to be kept and its timestamp - auto assignLowestCleanupTimestamp = [&nextLowestCleanupTimestamp](const auto& range) { - // The second entry is cleanup time as at that point the first entry is no longer needed. - // The input range have at a minimum two entries. - auto it = range.begin() + 1; - nextLowestCleanupTimestamp = std::min(nextLowestCleanupTimestamp, it->ts); - }; - - auto doCleanup = [this, &oldest, &assignLowestCleanupTimestamp]( - auto& catalogIdsContainer, auto& catalogIdChangesContainer) { - // Batch all changes together - auto ids = catalogIdsContainer.transient(); - auto changes = catalogIdChangesContainer.transient(); - - for (auto it = catalogIdChangesContainer.begin(), end = catalogIdChangesContainer.end(); - it != end;) { - auto range = ids[*it]; - - // Binary search for next larger timestamp - auto rangeIt = std::upper_bound( - range.begin(), range.end(), oldest, [](const auto& ts, const auto& entry) { - return ts < entry.ts; - }); - - // Continue if there is nothing to cleanup for this timestamp yet - if (rangeIt == range.begin()) { - // There should always be at least two entries in the range when we hit this - // branch. For the namespace to be put in '_nssCatalogIdChanges' we normally - // need at least two entries. The namespace could require cleanup with just a - // single entry if 'cleanupForCatalogReopen' leaves a single drop entry in the - // range. But because we cannot initialize the namespace with a single drop - // there must have been a non-drop entry earlier that got cleaned up in a - // previous call to 'cleanupForOldestTimestampAdvanced', which happens when the - // oldest timestamp advances past the drop timestamp. This guarantees that the - // oldest timestamp is larger than the timestamp in the single drop entry - // resulting in this branch cannot be taken when we only have a drop in the - // range. - invariant(range.size() > 1); - assignLowestCleanupTimestamp(range); - ++it; - continue; - } - - // The iterator is positioned to the closest entry that has a larger timestamp, - // decrement to get a lower or equal timestamp - --rangeIt; - - // Erase range, we will leave at least one element due to the decrement above - range.erase(range.begin(), rangeIt); - - // If more changes are needed for this namespace, keep it in the set and keep track - // of lowest timestamp. - if (range.size() > 1) { - assignLowestCleanupTimestamp(range); - ids.set(*it, std::move(range)); - ++it; - continue; - } - // If the last remaining element is a drop earlier than the oldest timestamp, we can - // remove tracking this namespace - if (range.back().id == boost::none) { - ids.erase(*it); - } else { - ids.set(*it, std::move(range)); - } - - // Unmark this namespace or UUID for needing changes. - changes.erase(*it); - ++it; - } - - // Write back all changes to main container - catalogIdChangesContainer = changes.persistent(); - catalogIdsContainer = ids.persistent(); - }; - - // Iterate over all namespaces and UUIDs that is marked that they need cleanup - doCleanup(_nssCatalogIds, _nssCatalogIdChanges); - doCleanup(_uuidCatalogIds, _uuidCatalogIdChanges); - - _lowestCatalogIdTimestampForCleanup = nextLowestCleanupTimestamp; - _oldestCatalogIdTimestampMaintained = std::max(_oldestCatalogIdTimestampMaintained, oldest); -} - -void CollectionCatalog::cleanupForCatalogReopen(Timestamp stable) { - _nssCatalogIdChanges = {}; - _uuidCatalogIdChanges = {}; - _lowestCatalogIdTimestampForCleanup = Timestamp::max(); - _oldestCatalogIdTimestampMaintained = std::min(_oldestCatalogIdTimestampMaintained, stable); - - auto removeLargerTimestamps = [this, &stable](auto& catalogIdsContainer, - auto& catalogIdChangesContainer) { - // Batch all changes together - auto idsWriter = catalogIdsContainer.transient(); - auto changesWriter = catalogIdChangesContainer.transient(); - - for (auto it = catalogIdsContainer.begin(); it != catalogIdsContainer.end();) { - auto ids = it->second; - - // Remove all larger timestamps in this range - ids.erase( - std::upper_bound(ids.begin(), - ids.end(), - stable, - [](Timestamp ts, const auto& entry) { return ts < entry.ts; }), - ids.end()); - - // Remove namespace or UUID if there are no entries left - if (ids.empty()) { - idsWriter.erase(it->first); - ++it; - continue; - } - - // Calculate when this namespace needs to be cleaned up next - _markForCatalogIdCleanupIfNeeded(it->first, changesWriter, ids); - idsWriter.set(it->first, std::move(ids)); - ++it; - } - - // Write back all changes to main container - catalogIdChangesContainer = changesWriter.persistent(); - catalogIdsContainer = idsWriter.persistent(); - }; - - removeLargerTimestamps(_nssCatalogIds, _nssCatalogIdChanges); - removeLargerTimestamps(_uuidCatalogIds, _uuidCatalogIdChanges); -} - void CollectionCatalog::invariantHasExclusiveAccessToCollection(OperationContext* opCtx, const NamespaceString& nss) { - invariant(hasExclusiveAccessToCollection(opCtx, nss), nss.toString()); + invariant(hasExclusiveAccessToCollection(opCtx, nss), nss.toStringForErrorMsg()); } bool CollectionCatalog::hasExclusiveAccessToCollection(OperationContext* opCtx, @@ -2720,65 +2231,29 @@ void CollectionCatalog::_replaceViewsForDatabase(const DatabaseName& dbName, _viewsForDatabase = _viewsForDatabase.set(dbName, std::move(views)); } +const HistoricalCatalogIdTracker& CollectionCatalog::catalogIdTracker() const { + return _catalogIdTracker; +} +HistoricalCatalogIdTracker& CollectionCatalog::catalogIdTracker() { + return _catalogIdTracker; +} + bool CollectionCatalog::_isCatalogBatchWriter() const { return ongoingBatchedWrite.load() && batchedCatalogWriteInstance.get() == this; } bool CollectionCatalog::_alreadyClonedForBatchedWriter( const std::shared_ptr& collection) const { - // We may skip cloning the Collection instance if and only if we are currently in a batched - // catalog write and all references to this Collection is owned by the cloned CollectionCatalog - // instance owned by the batch writer. i.e. the Collection is uniquely owned by the batch - // writer. When the batch writer initially clones the catalog, all collections will have a - // 'use_count' of at least kNumCollectionReferencesStored*2 (because there are at least 2 - // catalog instances). To check for uniquely owned we need to check that the reference count is - // exactly kNumCollectionReferencesStored (owned by a single catalog) while also account for the - // instance that is extracted from the catalog and provided as a parameter to this function, we - // therefore need to add 1. - return _isCatalogBatchWriter() && collection.use_count() == kNumCollectionReferencesStored + 1; -} - -CollectionCatalogStasher::CollectionCatalogStasher(OperationContext* opCtx) - : _opCtx(opCtx), _stashed(false) {} - -CollectionCatalogStasher::CollectionCatalogStasher(OperationContext* opCtx, - std::shared_ptr catalog) - : _opCtx(opCtx), _stashed(true) { - invariant(catalog); - CollectionCatalog::stash(_opCtx, std::move(catalog)); -} - -CollectionCatalogStasher::CollectionCatalogStasher(CollectionCatalogStasher&& other) - : _opCtx(other._opCtx), _stashed(other._stashed) { - other._stashed = false; -} - -CollectionCatalogStasher::~CollectionCatalogStasher() { - if (_opCtx->isLockFreeReadsOp()) { - // Leave the catalog stashed on the opCtx because there is another Stasher instance still - // using it. - return; - } - - reset(); -} - -void CollectionCatalogStasher::stash(std::shared_ptr catalog) { - CollectionCatalog::stash(_opCtx, std::move(catalog)); - _stashed = true; -} - -void CollectionCatalogStasher::reset() { - if (_stashed) { - CollectionCatalog::stash(_opCtx, nullptr); - _stashed = false; - } + // We may skip cloning the Collection instance if and only if have already cloned it for write + // use in this batch writer. + return _isCatalogBatchWriter() && batchedCatalogClonedCollections.contains(collection.get()); } BatchedCollectionCatalogWriter::BatchedCollectionCatalogWriter(OperationContext* opCtx) : _opCtx(opCtx) { invariant(_opCtx->lockState()->isW()); invariant(!batchedCatalogWriteInstance); + invariant(batchedCatalogClonedCollections.empty()); auto& storage = getCatalog(_opCtx->getServiceContext()); // hold onto base so if we need to delete it we can do it outside of the lock @@ -2803,6 +2278,7 @@ BatchedCollectionCatalogWriter::~BatchedCollectionCatalogWriter() { ongoingBatchedWrite.store(false); _batchedInstance = nullptr; batchedCatalogWriteInstance = nullptr; + batchedCatalogClonedCollections.clear(); } } // namespace mongo diff --git a/src/mongo/db/catalog/collection_catalog.h b/src/mongo/db/catalog/collection_catalog.h index 482b1ee5036b8..3171327438ec5 100644 --- a/src/mongo/db/catalog/collection_catalog.h +++ b/src/mongo/db/catalog/collection_catalog.h @@ -29,68 +29,85 @@ #pragma once +#include +#include +#include +#include +#include #include +#include #include +#include #include - +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/historical_catalogid_tracker.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/views_for_database.h" #include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/profile_filter.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/durable_catalog_entry.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/views/view.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/functional.h" +#include "mongo/util/immutable/map.h" #include "mongo/util/immutable/unordered_map.h" #include "mongo/util/immutable/unordered_set.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #include "mongo/util/uuid.h" namespace mongo { class CollectionCatalog { friend class iterator; + using OrderedCollectionMap = + immutable::map, std::shared_ptr>; public: using CollectionInfoFn = std::function; - // Number of how many Collection references for a single Collection that is stored in the - // catalog. Used to determine whether there are external references (uniquely owned). Needs to - // be kept in sync with the data structures below. - static constexpr size_t kNumCollectionReferencesStored = 3; class iterator { public: using value_type = const Collection*; - iterator(OperationContext* opCtx, - const DatabaseName& dbName, - const CollectionCatalog& catalog); - iterator(OperationContext* opCtx, - std::map, - std::shared_ptr>::const_iterator mapIter, - const CollectionCatalog& catalog); + iterator(const DatabaseName& dbName, + OrderedCollectionMap::iterator it, + const OrderedCollectionMap& catalog); value_type operator*(); iterator operator++(); - iterator operator++(int); - UUID uuid() const; - - /* - * Equality operators == and != do not attempt to reposition the iterators being compared. - * The behavior for comparing invalid iterators is undefined. - */ bool operator==(const iterator& other) const; bool operator!=(const iterator& other) const; private: - bool _exhausted(); + const OrderedCollectionMap& _map; + immutable::map, std::shared_ptr>::iterator + _mapIter; + }; + + class Range { + public: + Range(const OrderedCollectionMap&, const DatabaseName& dbName); + iterator begin() const; + iterator end() const; + bool empty() const; - OperationContext* _opCtx; + private: + OrderedCollectionMap _map; DatabaseName _dbName; - boost::optional _uuid; - std::map, std::shared_ptr>::const_iterator - _mapIter; - const CollectionCatalog* _catalog; }; struct ProfileSettings { @@ -307,21 +324,9 @@ class CollectionCatalog { * The global lock must be held in exclusive mode. */ void registerCollection(OperationContext* opCtx, - const UUID& uuid, std::shared_ptr collection, boost::optional commitTime); - /** - * Like 'registerCollection' above but allows the Collection to be registered using just a - * MODE_IX lock on the namespace. The collection will be added to the catalog using a two-phase - * commit where it is marked as 'pending commit' internally. The user must call - * 'onCreateCollection' which sets up the necessary state for finishing the two-phase commit. - */ - void registerCollectionTwoPhase(OperationContext* opCtx, - const UUID& uuid, - std::shared_ptr collection, - boost::optional commitTime); - /** * Deregister the collection. * @@ -420,23 +425,6 @@ class CollectionCatalog { Collection* lookupCollectionByNamespaceForMetadataWrite(OperationContext* opCtx, const NamespaceString& nss) const; - /** - * Returns true if the collection has been registered in the CollectionCatalog but not yet made - * visible. - */ - bool isCollectionAwaitingVisibility(UUID uuid) const; - - // TODO SERVER-74468: Remove this function - std::shared_ptr lookupCollectionByNamespaceForRead_DONT_USE( - OperationContext* opCtx, const NamespaceString& nss) const { - return _getCollectionByNamespace(opCtx, nss); - } - // TODO SERVER-74468: Remove this function - std::shared_ptr lookupCollectionByUUIDForRead_DONT_USE( - OperationContext* opCtx, const UUID& uuid) const { - return _getCollectionByUUID(opCtx, uuid); - } - /** * This function gets the NamespaceString from the collection catalog entry that * corresponds to UUID uuid. If no collection exists with the uuid, return @@ -452,30 +440,11 @@ class CollectionCatalog { const NamespaceString& nss) const; /** - * Returns true if this CollectionCatalog contains the provided collection instance - */ - bool containsCollection(OperationContext* opCtx, const Collection* collection) const; - - /** - * Returns the CatalogId for a given 'nss' or 'uuid' at timestamp 'ts'. + * Checks if the provided instance is the latest version for this catalog version. This check + * should be used to determine if the collection instance is safe to perform CRUD writes on. For + * the check to be meaningful it should be performed against CollectionCatalog::latest. */ - struct CatalogIdLookup { - enum class Existence { - // Namespace or UUID exists at time 'ts' and catalogId set in 'id'. - kExists, - // Namespace or UUID does not exist at time 'ts'. - kNotExists, - // Namespace or UUID existence at time 'ts' is unknown. The durable catalog must be - // scanned to determine. - kUnknown - }; - RecordId id; - Existence result; - }; - CatalogIdLookup lookupCatalogIdByNSS(const NamespaceString& nss, - boost::optional ts = boost::none) const; - CatalogIdLookup lookupCatalogIdByUUID(const UUID& uuid, - boost::optional ts = boost::none) const; + bool isLatestCollection(OperationContext* opCtx, const Collection* collection) const; /** * Iterates through the views in the catalog associated with database `dbName`, applying @@ -663,25 +632,13 @@ class CollectionCatalog { */ uint64_t getEpoch() const; - iterator begin(OperationContext* opCtx, const DatabaseName& dbName) const; - iterator end(OperationContext* opCtx) const; - - /** - * Checks if 'cleanupForOldestTimestampAdvanced' should be called when the oldest timestamp - * advanced. Used to avoid a potentially expensive call to 'cleanupForOldestTimestampAdvanced' - * if no write is needed. - */ - bool needsCleanupForOldestTimestamp(Timestamp oldest) const; - - /** - * Cleans up internal structures when the oldest timestamp advances - */ - void cleanupForOldestTimestampAdvanced(Timestamp oldest); - /** - * Cleans up internal structures after catalog reopen + * Provides an iterable range for the collections belonging to the specified database. + * + * Will not observe any updates made to the catalog after the creation of the 'Range'. The + * 'Range' object just remain in scope for the duration of the iteration. */ - void cleanupForCatalogReopen(Timestamp stable); + Range range(const DatabaseName& dbName) const; /** * Ensures we have a MODE_X lock on a collection or MODE_IX lock for newly created collections. @@ -690,6 +647,13 @@ class CollectionCatalog { const NamespaceString& nss); static bool hasExclusiveAccessToCollection(OperationContext* opCtx, const NamespaceString& nss); + /** + * Returns HistoricalCatalogIdTracker for historical namespace/uuid mappings to catalogId based + * on timestamp. + */ + const HistoricalCatalogIdTracker& catalogIdTracker() const; + HistoricalCatalogIdTracker& catalogIdTracker(); + private: friend class CollectionCatalog::iterator; class PublishCatalogUpdates; @@ -704,15 +668,10 @@ class CollectionCatalog { const UUID& uuid) const; /** - * Register the collection with `uuid`. - * - * If 'twoPhase' is true, this call must be followed by 'onCreateCollection' which continues the - * two-phase commit process. + * Register the collection. */ void _registerCollection(OperationContext* opCtx, - const UUID& uuid, std::shared_ptr collection, - bool twoPhase, boost::optional commitTime); std::shared_ptr _lookupCollectionByUUID(UUID uuid) const; @@ -796,43 +755,6 @@ class CollectionCatalog { const NamespaceString& nss, NamespaceType type) const; - /** - * CatalogId with Timestamp - */ - struct TimestampedCatalogId { - boost::optional id; - Timestamp ts; - }; - - // Push a catalogId for namespace and UUID at given Timestamp. Timestamp needs to be larger than - // other entries for this namespace and UUID. boost::none for catalogId represent drop, - // boost::none for timestamp turns this operation into a no-op. - void _pushCatalogIdForNSSAndUUID(const NamespaceString& nss, - const UUID& uuid, - boost::optional catalogId, - boost::optional ts); - - // Push a catalogId for 'from' and 'to' for a rename operation at given Timestamp. Timestamp - // needs to be larger than other entries for these namespaces. boost::none for timestamp turns - // this operation into a no-op. - void _pushCatalogIdForRename(const NamespaceString& from, - const NamespaceString& to, - boost::optional ts); - - // Inserts a catalogId for namespace and UUID at given Timestamp, if not boost::none. Used after - // scanning the durable catalog for a correct mapping at the given timestamp. - void _insertCatalogIdForNSSAndUUIDAfterScan(boost::optional nss, - boost::optional uuid, - boost::optional catalogId, - Timestamp ts); - - // Helper to calculate if a namespace or UUID needs to be marked for cleanup for a set of - // timestamped catalogIds - template - void _markForCatalogIdCleanupIfNeeded(const Key& key, - CatalogIdChangesContainer& catalogIdChangesContainer, - const std::vector& ids); - /** * Returns true if catalog information about this namespace or UUID should be looked up from the * durable catalog rather than using the in-memory state of the catalog. @@ -868,12 +790,6 @@ class CollectionCatalog { const NamespaceStringOrUUID& nssOrUUID, Timestamp readTimestamp) const; - // Helpers for 'lookupCatalogIdByNSS' and 'lookupCatalogIdByUUID'. - CatalogIdLookup _checkWithOldestCatalogIdTimestampMaintained( - boost::optional ts) const; - CatalogIdLookup _findCatalogIdInRange(boost::optional ts, - const std::vector& range) const; - /** * When present, indicates that the catalog is in closed state, and contains a map from UUID * to pre-close NSS. See also onCloseCatalog. @@ -882,8 +798,6 @@ class CollectionCatalog { using CollectionCatalogMap = immutable::unordered_map, UUID::Hash>; - using OrderedCollectionMap = - std::map, std::shared_ptr>; using NamespaceCollectionMap = immutable::unordered_map>; using UncommittedViewsSet = immutable::unordered_set; @@ -901,21 +815,8 @@ class CollectionCatalog { immutable::unordered_map> _pendingCommitNamespaces; immutable::unordered_map, UUID::Hash> _pendingCommitUUIDs; - // CatalogId mappings for all known namespaces and UUIDs for the CollectionCatalog. The vector - // is sorted on timestamp. UUIDs will have at most two entries. One for the create and another - // for the drop. UUIDs stay the same across collection renames. - immutable::unordered_map> _nssCatalogIds; - immutable::unordered_map, UUID::Hash> _uuidCatalogIds; - // Set of namespaces and UUIDs that need cleanup when the oldest timestamp advances - // sufficiently. - immutable::unordered_set _nssCatalogIdChanges; - immutable::unordered_set _uuidCatalogIdChanges; - // Point at which the oldest timestamp need to advance for there to be any catalogId namespace - // that can be cleaned up - Timestamp _lowestCatalogIdTimestampForCleanup = Timestamp::max(); - // The oldest timestamp at which the catalog maintains catalogId mappings. Anything older than - // this is unknown and must be discovered by scanning the durable catalog. - Timestamp _oldestCatalogIdTimestampMaintained = Timestamp::max(); + // Provides functionality to lookup catalogId by namespace/uuid for a given timestamp. + HistoricalCatalogIdTracker _catalogIdTracker; // Map of database names to their corresponding views and other associated state. ViewsForDatabaseMap _viewsForDatabase; @@ -952,48 +853,6 @@ class CollectionCatalog { Stats _stats; }; -/** - * RAII style object to stash a versioned CollectionCatalog on the OperationContext. - * Calls to CollectionCatalog::get(OperationContext*) will return this instance. - * - * Unstashes the CollectionCatalog at destruction if the OperationContext::isLockFreeReadsOp() - * flag is no longer set. This is handling for the nested Stasher use case. - */ -class CollectionCatalogStasher { -public: - CollectionCatalogStasher(OperationContext* opCtx); - CollectionCatalogStasher(OperationContext* opCtx, - std::shared_ptr catalog); - - /** - * Unstashes the catalog if _opCtx->isLockFreeReadsOp() is no longer set. - */ - ~CollectionCatalogStasher(); - - /** - * Moves ownership of the stash to the new instance, and marks the old one unstashed. - */ - CollectionCatalogStasher(CollectionCatalogStasher&& other); - - CollectionCatalogStasher(const CollectionCatalogStasher&) = delete; - CollectionCatalogStasher& operator=(const CollectionCatalogStasher&) = delete; - CollectionCatalogStasher& operator=(CollectionCatalogStasher&&) = delete; - - /** - * Stashes 'catalog' on the _opCtx. - */ - void stash(std::shared_ptr catalog); - - /** - * Resets the OperationContext so CollectionCatalog::get() returns latest catalog again - */ - void reset(); - -private: - OperationContext* _opCtx; - bool _stashed; -}; - /** * RAII class to perform multiple writes to the CollectionCatalog on a single copy of the * CollectionCatalog instance. Requires the global lock to be held in exclusive write mode (MODE_X) diff --git a/src/mongo/db/catalog/collection_catalog_bm.cpp b/src/mongo/db/catalog/collection_catalog_bm.cpp index 0d0336531884e..2438cf2806954 100644 --- a/src/mongo/db/catalog/collection_catalog_bm.cpp +++ b/src/mongo/db/catalog/collection_catalog_bm.cpp @@ -28,43 +28,34 @@ */ #include - +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_mock.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/uuid.h" namespace mongo { - namespace { -class LockerImplClientObserver : public ServiceContext::ClientObserver { -public: - LockerImplClientObserver() = default; - ~LockerImplClientObserver() = default; - - void onCreateClient(Client* client) final {} - - void onDestroyClient(Client* client) final {} - - void onCreateOperationContext(OperationContext* opCtx) override { - opCtx->setLockState(std::make_unique(opCtx->getServiceContext())); - } - - void onDestroyOperationContext(OperationContext* opCtx) final {} -}; - -const ServiceContext::ConstructorActionRegisterer clientObserverRegisterer{ - "CollectionCatalogBenchmarkClientObserver", - [](ServiceContext* service) { - service->registerClientObserver(std::make_unique()); - }, - [](ServiceContext* serviceContext) { - }}; - ServiceContext* setupServiceContext() { auto serviceContext = ServiceContext::make(); auto serviceContextPtr = serviceContext.get(); @@ -77,10 +68,10 @@ void createCollections(OperationContext* opCtx, int numCollections) { BatchedCollectionCatalogWriter batched(opCtx); for (auto i = 0; i < numCollections; i++) { - const NamespaceString nss("collection_catalog_bm", std::to_string(i)); + const NamespaceString nss = NamespaceString::createNamespaceString_forTest( + "collection_catalog_bm", std::to_string(i)); CollectionCatalog::write(opCtx, [&](CollectionCatalog& catalog) { catalog.registerCollection(opCtx, - UUID::gen(), std::make_shared(nss), /*ts=*/boost::none); }); @@ -107,6 +98,12 @@ void BM_CollectionCatalogWriteBatchedWithGlobalExclusiveLock(benchmark::State& s ThreadClient threadClient(serviceContext); ServiceContext::UniqueOperationContext opCtx = threadClient->makeOperationContext(); + // TODO(SERVER-74657): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*threadClient.get()); + threadClient.get()->setSystemOperationUnkillableByStepdown(lk); + } + createCollections(opCtx.get(), state.range(0)); Lock::GlobalLock globalLk(opCtx.get(), MODE_X); @@ -118,7 +115,132 @@ void BM_CollectionCatalogWriteBatchedWithGlobalExclusiveLock(benchmark::State& s } } +void BM_CollectionCatalogCreateDropCollection(benchmark::State& state) { + auto serviceContext = setupServiceContext(); + ThreadClient threadClient(serviceContext); + ServiceContext::UniqueOperationContext opCtx = threadClient->makeOperationContext(); + Lock::GlobalLock globalLk(opCtx.get(), MODE_X); + + createCollections(opCtx.get(), state.range(0)); + + for (auto _ : state) { + benchmark::ClobberMemory(); + CollectionCatalog::write(opCtx.get(), [&](CollectionCatalog& catalog) { + const NamespaceString nss = NamespaceString::createNamespaceString_forTest( + "collection_catalog_bm", std::to_string(state.range(0))); + const UUID uuid = UUID::gen(); + catalog.registerCollection( + opCtx.get(), std::make_shared(uuid, nss), boost::none); + catalog.deregisterCollection(opCtx.get(), uuid, false, boost::none); + }); + } +} + +void BM_CollectionCatalogCreateNCollectionsBatched(benchmark::State& state) { + for (auto _ : state) { + benchmark::ClobberMemory(); + + auto serviceContext = setupServiceContext(); + ThreadClient threadClient(serviceContext); + ServiceContext::UniqueOperationContext opCtx = threadClient->makeOperationContext(); + + Lock::GlobalLock globalLk(opCtx.get(), MODE_X); + BatchedCollectionCatalogWriter batched(opCtx.get()); + + auto numCollections = state.range(0); + for (auto i = 0; i < numCollections; i++) { + const NamespaceString nss = NamespaceString::createNamespaceString_forTest( + "collection_catalog_bm", std::to_string(i)); + CollectionCatalog::write(opCtx.get(), [&](CollectionCatalog& catalog) { + catalog.registerCollection( + opCtx.get(), std::make_shared(nss), boost::none); + }); + } + } +} + +void BM_CollectionCatalogCreateNCollections(benchmark::State& state) { + for (auto _ : state) { + benchmark::ClobberMemory(); + + auto serviceContext = setupServiceContext(); + ThreadClient threadClient(serviceContext); + ServiceContext::UniqueOperationContext opCtx = threadClient->makeOperationContext(); + Lock::GlobalLock globalLk(opCtx.get(), MODE_X); + + auto numCollections = state.range(0); + for (auto i = 0; i < numCollections; i++) { + const NamespaceString nss = NamespaceString::createNamespaceString_forTest( + "collection_catalog_bm", std::to_string(i)); + CollectionCatalog::write(opCtx.get(), [&](CollectionCatalog& catalog) { + catalog.registerCollection( + opCtx.get(), std::make_shared(nss), boost::none); + }); + } + } +} + +void BM_CollectionCatalogLookupCollectionByNamespace(benchmark::State& state) { + auto serviceContext = setupServiceContext(); + ThreadClient threadClient(serviceContext); + ServiceContext::UniqueOperationContext opCtx = threadClient->makeOperationContext(); + + createCollections(opCtx.get(), state.range(0)); + const NamespaceString nss = NamespaceString::createNamespaceString_forTest( + "collection_catalog_bm", std::to_string(state.range(0) / 2)); + + for (auto _ : state) { + benchmark::ClobberMemory(); + auto coll = + CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespace(opCtx.get(), nss); + invariant(coll); + } +} + +void BM_CollectionCatalogLookupCollectionByUUID(benchmark::State& state) { + auto serviceContext = setupServiceContext(); + ThreadClient threadClient(serviceContext); + ServiceContext::UniqueOperationContext opCtx = threadClient->makeOperationContext(); + + createCollections(opCtx.get(), state.range(0)); + const NamespaceString nss = NamespaceString::createNamespaceString_forTest( + "collection_catalog_bm", std::to_string(state.range(0) / 2)); + auto coll = CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespace(opCtx.get(), nss); + invariant(coll->ns() == nss); + const UUID uuid = coll->uuid(); + + for (auto _ : state) { + benchmark::ClobberMemory(); + auto res = CollectionCatalog::get(opCtx.get())->lookupCollectionByUUID(opCtx.get(), uuid); + invariant(res == coll); + } +} + +void BM_CollectionCatalogIterateCollections(benchmark::State& state) { + auto serviceContext = setupServiceContext(); + ThreadClient threadClient(serviceContext); + ServiceContext::UniqueOperationContext opCtx = threadClient->makeOperationContext(); + + createCollections(opCtx.get(), state.range(0)); + + for (auto _ : state) { + benchmark::ClobberMemory(); + auto catalog = CollectionCatalog::get(opCtx.get()); + auto count = 0; + for ([[maybe_unused]] auto&& coll : catalog->range( + DatabaseName::createDatabaseName_forTest(boost::none, "collection_catalog_bm"))) { + benchmark::DoNotOptimize(count++); + } + } +} + BENCHMARK(BM_CollectionCatalogWrite)->Ranges({{{1}, {100'000}}}); BENCHMARK(BM_CollectionCatalogWriteBatchedWithGlobalExclusiveLock)->Ranges({{{1}, {100'000}}}); +BENCHMARK(BM_CollectionCatalogCreateDropCollection)->Ranges({{{1}, {100'000}}}); +BENCHMARK(BM_CollectionCatalogCreateNCollectionsBatched)->Ranges({{{1}, {100'000}}}); +BENCHMARK(BM_CollectionCatalogCreateNCollections)->Ranges({{{1}, {32'768}}}); +BENCHMARK(BM_CollectionCatalogLookupCollectionByNamespace)->Ranges({{{1}, {100'000}}}); +BENCHMARK(BM_CollectionCatalogLookupCollectionByUUID)->Ranges({{{1}, {100'000}}}); +BENCHMARK(BM_CollectionCatalogIterateCollections)->Ranges({{{1}, {100'000}}}); } // namespace mongo diff --git a/src/mongo/db/catalog/collection_catalog_helper.cpp b/src/mongo/db/catalog/collection_catalog_helper.cpp index 249a1b3f5b6ac..4a0b72c1fae33 100644 --- a/src/mongo/db/catalog/collection_catalog_helper.cpp +++ b/src/mongo/db/catalog/collection_catalog_helper.cpp @@ -29,9 +29,24 @@ #include "mongo/db/catalog/collection_catalog_helper.h" +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/views/view.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" namespace mongo { @@ -43,7 +58,8 @@ Status checkIfNamespaceExists(OperationContext* opCtx, const NamespaceString& ns auto catalog = CollectionCatalog::get(opCtx); if (catalog->lookupCollectionByNamespace(opCtx, nss)) { return Status(ErrorCodes::NamespaceExists, - str::stream() << "Collection " << nss.ns() << " already exists."); + str::stream() + << "Collection " << nss.toStringForErrorMsg() << " already exists."); } auto view = catalog->lookupView(opCtx, nss); @@ -52,11 +68,12 @@ Status checkIfNamespaceExists(OperationContext* opCtx, const NamespaceString& ns if (view->timeseries()) { return Status(ErrorCodes::NamespaceExists, - str::stream() << "A timeseries collection already exists. NS: " << nss); + str::stream() << "A timeseries collection already exists. NS: " + << nss.toStringForErrorMsg()); } return Status(ErrorCodes::NamespaceExists, - str::stream() << "A view already exists. NS: " << nss); + str::stream() << "A view already exists. NS: " << nss.toStringForErrorMsg()); } @@ -67,11 +84,9 @@ void forEachCollectionFromDb(OperationContext* opCtx, CollectionCatalog::CollectionInfoFn predicate) { auto catalogForIteration = CollectionCatalog::get(opCtx); - for (auto collectionIt = catalogForIteration->begin(opCtx, dbName); - collectionIt != catalogForIteration->end(opCtx);) { - auto uuid = collectionIt.uuid(); + for (auto&& coll : catalogForIteration->range(dbName)) { + auto uuid = coll->uuid(); if (predicate && !catalogForIteration->checkIfCollectionSatisfiable(uuid, predicate)) { - ++collectionIt; continue; } @@ -95,10 +110,6 @@ void forEachCollectionFromDb(OperationContext* opCtx, clk.reset(); } - // Increment iterator before calling callback. This allows for collection deletion inside - // this callback even if we are in batched inplace mode. - ++collectionIt; - // The NamespaceString couldn't be resolved from the uuid, so the collection was dropped. if (!collection) continue; diff --git a/src/mongo/db/catalog/collection_catalog_helper.h b/src/mongo/db/catalog/collection_catalog_helper.h index 7b47eca2f8bf3..1765a2296c758 100644 --- a/src/mongo/db/catalog/collection_catalog_helper.h +++ b/src/mongo/db/catalog/collection_catalog_helper.h @@ -29,8 +29,11 @@ #pragma once +#include "mongo/base/status.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" namespace mongo { diff --git a/src/mongo/db/catalog/collection_catalog_test.cpp b/src/mongo/db/catalog/collection_catalog_test.cpp index 124af7ceb36a8..0e3f4274bc4ed 100644 --- a/src/mongo/db/catalog/collection_catalog_test.cpp +++ b/src/mongo/db/catalog/collection_catalog_test.cpp @@ -29,22 +29,59 @@ #include "mongo/db/catalog/collection_catalog.h" +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" #include - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/client/index_spec.h" #include "mongo/db/catalog/catalog_test_fixture.h" #include "mongo/db/catalog/collection_catalog_helper.h" #include "mongo/db/catalog/collection_mock.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_yield_restore.h" +#include "mongo/db/catalog/index_build_block.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/uncommitted_catalog_updates.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/concurrency/resource_catalog.h" +#include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/index_names.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/resumable_index_builds_gen.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/bson_collection_catalog_entry.h" #include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/ident.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -55,7 +92,7 @@ namespace { class CollectionCatalogTest : public ServiceContextMongoDTest { public: CollectionCatalogTest() - : nss("testdb", "testcol"), + : nss(NamespaceString::createNamespaceString_forTest("testdb", "testcol")), col(nullptr), colUUID(UUID::gen()), nextUUID(UUID::gen()), @@ -78,12 +115,7 @@ class CollectionCatalogTest : public ServiceContextMongoDTest { std::shared_ptr collection = std::make_shared(colUUID, nss); col = CollectionPtr(collection.get()); // Register dummy collection in catalog. - catalog.registerCollection(opCtx.get(), colUUID, collection, boost::none); - - // Validate that kNumCollectionReferencesStored is correct, add one reference for the one we - // hold in this function. - ASSERT_EQUALS(collection.use_count(), - CollectionCatalog::kNumCollectionReferencesStored + 1); + catalog.registerCollection(opCtx.get(), collection, boost::none); } void tearDown() { @@ -115,17 +147,14 @@ class CollectionCatalogIterationTest : public ServiceContextMongoDTest { NamespaceString barNss = NamespaceString::createNamespaceString_forTest( "bar", "coll" + std::to_string(counter)); - auto fooUuid = UUID::gen(); std::shared_ptr fooColl = std::make_shared(fooNss); - - auto barUuid = UUID::gen(); std::shared_ptr barColl = std::make_shared(barNss); - dbMap["foo"].insert(std::make_pair(fooUuid, fooColl.get())); - dbMap["bar"].insert(std::make_pair(barUuid, barColl.get())); + dbMap["foo"].insert(std::make_pair(fooColl->uuid(), fooColl.get())); + dbMap["bar"].insert(std::make_pair(barColl->uuid(), barColl.get())); - catalog.registerCollection(opCtx.get(), fooUuid, fooColl, boost::none); - catalog.registerCollection(opCtx.get(), barUuid, barColl, boost::none); + catalog.registerCollection(opCtx.get(), fooColl, boost::none); + catalog.registerCollection(opCtx.get(), barColl, boost::none); } } @@ -153,11 +182,12 @@ class CollectionCatalogIterationTest : public ServiceContextMongoDTest { void checkCollections(const DatabaseName& dbName) { unsigned long counter = 0; + const auto dbNameStr = dbName.toString_forTest(); - for (auto [orderedIt, catalogIt] = - std::tuple{collsIterator(dbName.toString()), catalog.begin(opCtx.get(), dbName)}; - catalogIt != catalog.end(opCtx.get()) && - orderedIt != collsIteratorEnd(dbName.toString()); + auto orderedIt = collsIterator(dbNameStr); + auto catalogRange = catalog.range(dbName); + auto catalogIt = catalogRange.begin(); + for (; catalogIt != catalogRange.end() && orderedIt != collsIteratorEnd(dbNameStr); ++catalogIt, ++orderedIt) { auto catalogColl = *catalogIt; @@ -167,7 +197,7 @@ class CollectionCatalogIterationTest : public ServiceContextMongoDTest { ++counter; } - ASSERT_EQUALS(counter, dbMap[dbName.toString()].size()); + ASSERT_EQUALS(counter, dbMap[dbNameStr].size()); } void dropColl(const std::string dbName, UUID uuid) { @@ -192,20 +222,17 @@ class CollectionCatalogResourceTest : public ServiceContextMongoDTest { NamespaceString nss = NamespaceString::createNamespaceString_forTest( "resourceDb", "coll" + std::to_string(i)); std::shared_ptr collection = std::make_shared(nss); - auto uuid = collection->uuid(); - catalog.registerCollection(opCtx.get(), uuid, std::move(collection), boost::none); + catalog.registerCollection(opCtx.get(), std::move(collection), boost::none); } int numEntries = 0; - for (auto it = catalog.begin(opCtx.get(), DatabaseName(boost::none, "resourceDb")); - it != catalog.end(opCtx.get()); - it++) { - auto coll = *it; + for (auto&& coll : + catalog.range(DatabaseName::createDatabaseName_forTest(boost::none, "resourceDb"))) { auto collName = coll->ns(); ResourceId rid(RESOURCE_COLLECTION, collName); - ASSERT_NE(ResourceCatalog::get(getServiceContext()).name(rid), boost::none); + ASSERT_NE(ResourceCatalog::get().name(rid), boost::none); numEntries++; } ASSERT_EQ(5, numEntries); @@ -213,10 +240,8 @@ class CollectionCatalogResourceTest : public ServiceContextMongoDTest { void tearDown() { std::vector collectionsToDeregister; - for (auto it = catalog.begin(opCtx.get(), DatabaseName(boost::none, "resourceDb")); - it != catalog.end(opCtx.get()); - ++it) { - auto coll = *it; + for (auto&& coll : + catalog.range(DatabaseName::createDatabaseName_forTest(boost::none, "resourceDb"))) { auto uuid = coll->uuid(); if (!coll) { break; @@ -230,9 +255,8 @@ class CollectionCatalogResourceTest : public ServiceContextMongoDTest { } int numEntries = 0; - for (auto it = catalog.begin(opCtx.get(), DatabaseName(boost::none, "resourceDb")); - it != catalog.end(opCtx.get()); - it++) { + for ([[maybe_unused]] auto&& coll : + catalog.range(DatabaseName::createDatabaseName_forTest(boost::none, "resourceDb"))) { numEntries++; } ASSERT_EQ(0, numEntries); @@ -248,48 +272,48 @@ class CollectionCatalogResourceTest : public ServiceContextMongoDTest { TEST_F(CollectionCatalogResourceTest, RemoveAllResources) { catalog.deregisterAllCollectionsAndViews(getServiceContext()); - const DatabaseName dbName = DatabaseName(boost::none, "resourceDb"); + const DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "resourceDb"); auto rid = ResourceId(RESOURCE_DATABASE, dbName); - ASSERT_EQ(boost::none, ResourceCatalog::get(getServiceContext()).name(rid)); + ASSERT_EQ(boost::none, ResourceCatalog::get().name(rid)); for (int i = 0; i < 5; i++) { NamespaceString nss = NamespaceString::createNamespaceString_forTest( "resourceDb", "coll" + std::to_string(i)); rid = ResourceId(RESOURCE_COLLECTION, nss); - ASSERT_EQ(boost::none, ResourceCatalog::get(getServiceContext()).name(rid)); + ASSERT_EQ(boost::none, ResourceCatalog::get().name(rid)); } } TEST_F(CollectionCatalogResourceTest, LookupDatabaseResource) { - const DatabaseName dbName = DatabaseName(boost::none, "resourceDb"); + const DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "resourceDb"); auto rid = ResourceId(RESOURCE_DATABASE, dbName); - auto ridStr = ResourceCatalog::get(getServiceContext()).name(rid); + auto ridStr = ResourceCatalog::get().name(rid); ASSERT(ridStr); - ASSERT(ridStr->find(dbName.toStringWithTenantId()) != std::string::npos); + ASSERT(ridStr->find(dbName.toStringWithTenantId_forTest()) != std::string::npos); } TEST_F(CollectionCatalogResourceTest, LookupMissingDatabaseResource) { - const DatabaseName dbName = DatabaseName(boost::none, "missingDb"); + const DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "missingDb"); auto rid = ResourceId(RESOURCE_DATABASE, dbName); - ASSERT(!ResourceCatalog::get(getServiceContext()).name(rid)); + ASSERT(!ResourceCatalog::get().name(rid)); } TEST_F(CollectionCatalogResourceTest, LookupCollectionResource) { const NamespaceString collNs = NamespaceString::createNamespaceString_forTest(boost::none, "resourceDb.coll1"); auto rid = ResourceId(RESOURCE_COLLECTION, collNs); - auto ridStr = ResourceCatalog::get(getServiceContext()).name(rid); + auto ridStr = ResourceCatalog::get().name(rid); ASSERT(ridStr); - ASSERT(ridStr->find(collNs.toStringWithTenantId()) != std::string::npos); + ASSERT(ridStr->find(collNs.toStringWithTenantId_forTest()) != std::string::npos); } TEST_F(CollectionCatalogResourceTest, LookupMissingCollectionResource) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest(boost::none, "resourceDb.coll5"); auto rid = ResourceId(RESOURCE_COLLECTION, nss); - ASSERT(!ResourceCatalog::get(getServiceContext()).name(rid)); + ASSERT(!ResourceCatalog::get().name(rid)); } TEST_F(CollectionCatalogResourceTest, RemoveCollection) { @@ -298,30 +322,31 @@ TEST_F(CollectionCatalogResourceTest, RemoveCollection) { auto coll = catalog.lookupCollectionByNamespace(opCtx.get(), NamespaceString(collNs)); catalog.deregisterCollection(opCtx.get(), coll->uuid(), /*isDropPending=*/false, boost::none); auto rid = ResourceId(RESOURCE_COLLECTION, collNs); - ASSERT(!ResourceCatalog::get(getServiceContext()).name(rid)); + ASSERT(!ResourceCatalog::get().name(rid)); } // Create an iterator over the CollectionCatalog and assert that all collections are present. // Iteration ends when the end of the catalog is reached. TEST_F(CollectionCatalogIterationTest, EndAtEndOfCatalog) { - checkCollections(DatabaseName(boost::none, "foo")); + checkCollections(DatabaseName::createDatabaseName_forTest(boost::none, "foo")); } // Create an iterator over the CollectionCatalog and test that all collections are present. // Iteration ends // when the end of a database-specific section of the catalog is reached. TEST_F(CollectionCatalogIterationTest, EndAtEndOfSection) { - checkCollections(DatabaseName(boost::none, "bar")); + checkCollections(DatabaseName::createDatabaseName_forTest(boost::none, "bar")); } TEST_F(CollectionCatalogIterationTest, GetUUIDWontRepositionEvenIfEntryIsDropped) { - auto it = catalog.begin(opCtx.get(), DatabaseName(boost::none, "bar")); + auto range = catalog.range(DatabaseName::createDatabaseName_forTest(boost::none, "bar")); + auto it = range.begin(); auto collsIt = collsIterator("bar"); auto uuid = collsIt->first; catalog.deregisterCollection(opCtx.get(), uuid, /*isDropPending=*/false, boost::none); dropColl("bar", uuid); - ASSERT_EQUALS(uuid, it.uuid()); + ASSERT_EQUALS(uuid, (*it)->uuid()); } TEST_F(CollectionCatalogTest, OnCreateCollection) { @@ -330,15 +355,16 @@ TEST_F(CollectionCatalogTest, OnCreateCollection) { TEST_F(CollectionCatalogTest, LookupCollectionByUUID) { // Ensure the string value of the NamespaceString of the obtained Collection is equal to - // nss.ns(). - ASSERT_EQUALS(catalog.lookupCollectionByUUID(opCtx.get(), colUUID)->ns().ns(), nss.ns()); + // nss.ns_forTest(). + ASSERT_EQUALS(catalog.lookupCollectionByUUID(opCtx.get(), colUUID)->ns().ns_forTest(), + nss.ns_forTest()); // Ensure lookups of unknown UUIDs result in null pointers. ASSERT(catalog.lookupCollectionByUUID(opCtx.get(), UUID::gen()) == nullptr); } TEST_F(CollectionCatalogTest, LookupNSSByUUID) { - // Ensure the string value of the obtained NamespaceString is equal to nss.ns(). - ASSERT_EQUALS(catalog.lookupNSSByUUID(opCtx.get(), colUUID)->ns(), nss.ns()); + // Ensure the string value of the obtained NamespaceString is equal to nss.ns_forTest(). + ASSERT_EQUALS(catalog.lookupNSSByUUID(opCtx.get(), colUUID)->ns_forTest(), nss.ns_forTest()); // Ensure namespace lookups of unknown UUIDs result in empty NamespaceStrings. ASSERT_EQUALS(catalog.lookupNSSByUUID(opCtx.get(), UUID::gen()), boost::none); } @@ -346,13 +372,13 @@ TEST_F(CollectionCatalogTest, LookupNSSByUUID) { TEST_F(CollectionCatalogTest, InsertAfterLookup) { auto newUUID = UUID::gen(); NamespaceString newNss = NamespaceString::createNamespaceString_forTest(nss.dbName(), "newcol"); - std::shared_ptr newCollShared = std::make_shared(newNss); + std::shared_ptr newCollShared = std::make_shared(newUUID, newNss); auto newCol = newCollShared.get(); // Ensure that looking up non-existing UUIDs doesn't affect later registration of those UUIDs. ASSERT(catalog.lookupCollectionByUUID(opCtx.get(), newUUID) == nullptr); ASSERT_EQUALS(catalog.lookupNSSByUUID(opCtx.get(), newUUID), boost::none); - catalog.registerCollection(opCtx.get(), newUUID, std::move(newCollShared), boost::none); + catalog.registerCollection(opCtx.get(), std::move(newCollShared), boost::none); ASSERT_EQUALS(catalog.lookupCollectionByUUID(opCtx.get(), newUUID), newCol); ASSERT_EQUALS(*catalog.lookupNSSByUUID(opCtx.get(), colUUID), nss); } @@ -374,7 +400,7 @@ TEST_F(CollectionCatalogTest, OnDropCollection) { // The global catalog is used to refresh the CollectionPtr's internal state, so we temporarily // replace the global instance initialized in the service context test fixture with our own. - CollectionCatalogStasher catalogStasher(opCtx.get(), sharedCatalog); + CollectionCatalog::stash(opCtx.get(), sharedCatalog); // Before dropping collection, confirm that the CollectionPtr can be restored successfully. yieldableColl.restore(); @@ -396,10 +422,10 @@ TEST_F(CollectionCatalogTest, OnDropCollection) { TEST_F(CollectionCatalogTest, RenameCollection) { auto uuid = UUID::gen(); - NamespaceString oldNss = NamespaceString::createNamespaceString_forTest(nss.db(), "oldcol"); + NamespaceString oldNss = NamespaceString::createNamespaceString_forTest(nss.dbName(), "oldcol"); std::shared_ptr collShared = std::make_shared(uuid, oldNss); auto collection = collShared.get(); - catalog.registerCollection(opCtx.get(), uuid, std::move(collShared), boost::none); + catalog.registerCollection(opCtx.get(), std::move(collShared), boost::none); CollectionPtr yieldableColl(catalog.lookupCollectionByUUID(opCtx.get(), uuid)); ASSERT(yieldableColl); ASSERT_EQUALS(yieldableColl, CollectionPtr(collection)); @@ -416,7 +442,7 @@ TEST_F(CollectionCatalogTest, RenameCollection) { // The global catalog is used to refresh the CollectionPtr's internal state, so we temporarily // replace the global instance initialized in the service context test fixture with our own. - CollectionCatalogStasher catalogStasher(opCtx.get(), sharedCatalog); + CollectionCatalog::stash(opCtx.get(), sharedCatalog); // Before renaming collection, confirm that the CollectionPtr can be restored successfully. yieldableColl.restore(); @@ -458,7 +484,7 @@ TEST_F(CollectionCatalogTest, LookupNSSByUUIDForClosedCatalogReturnsOldNSSIfDrop TEST_F(CollectionCatalogTest, LookupNSSByUUIDForClosedCatalogReturnsNewlyCreatedNSS) { auto newUUID = UUID::gen(); NamespaceString newNss = NamespaceString::createNamespaceString_forTest(nss.dbName(), "newcol"); - std::shared_ptr newCollShared = std::make_shared(newNss); + std::shared_ptr newCollShared = std::make_shared(newUUID, newNss); auto newCol = newCollShared.get(); // Ensure that looking up non-existing UUIDs doesn't affect later registration of those UUIDs. @@ -469,7 +495,7 @@ TEST_F(CollectionCatalogTest, LookupNSSByUUIDForClosedCatalogReturnsNewlyCreated ASSERT(catalog.lookupCollectionByUUID(opCtx.get(), newUUID) == nullptr); ASSERT_EQUALS(catalog.lookupNSSByUUID(opCtx.get(), newUUID), boost::none); - catalog.registerCollection(opCtx.get(), newUUID, std::move(newCollShared), boost::none); + catalog.registerCollection(opCtx.get(), std::move(newCollShared), boost::none); ASSERT_EQUALS(catalog.lookupCollectionByUUID(opCtx.get(), newUUID), newCol); ASSERT_EQUALS(*catalog.lookupNSSByUUID(opCtx.get(), colUUID), nss); @@ -485,7 +511,7 @@ TEST_F(CollectionCatalogTest, LookupNSSByUUIDForClosedCatalogReturnsNewlyCreated TEST_F(CollectionCatalogTest, LookupNSSByUUIDForClosedCatalogReturnsFreshestNSS) { NamespaceString newNss = NamespaceString::createNamespaceString_forTest(nss.dbName(), "newcol"); - std::shared_ptr newCollShared = std::make_shared(newNss); + std::shared_ptr newCollShared = std::make_shared(colUUID, newNss); auto newCol = newCollShared.get(); { @@ -498,7 +524,7 @@ TEST_F(CollectionCatalogTest, LookupNSSByUUIDForClosedCatalogReturnsFreshestNSS) ASSERT_EQUALS(*catalog.lookupNSSByUUID(opCtx.get(), colUUID), nss); { Lock::GlobalWrite lk(opCtx.get()); - catalog.registerCollection(opCtx.get(), colUUID, std::move(newCollShared), boost::none); + catalog.registerCollection(opCtx.get(), std::move(newCollShared), boost::none); } ASSERT_EQUALS(catalog.lookupCollectionByUUID(opCtx.get(), colUUID), newCol); @@ -540,8 +566,7 @@ TEST_F(CollectionCatalogTest, GetAllCollectionNamesAndGetAllDbNames) { std::vector nsss = {aColl, b1Coll, b2Coll, cColl, d1Coll, d2Coll, d3Coll}; for (auto& nss : nsss) { std::shared_ptr newColl = std::make_shared(nss); - auto uuid = UUID::gen(); - catalog.registerCollection(opCtx.get(), uuid, std::move(newColl), boost::none); + catalog.registerCollection(opCtx.get(), std::move(newColl), boost::none); } std::vector dCollList = {d1Coll, d2Coll, d3Coll}; @@ -551,11 +576,12 @@ TEST_F(CollectionCatalogTest, GetAllCollectionNamesAndGetAllDbNames) { std::sort(res.begin(), res.end()); ASSERT(res == dCollList); - std::vector dbNames = {DatabaseName(boost::none, "dbA"), - DatabaseName(boost::none, "dbB"), - DatabaseName(boost::none, "dbC"), - DatabaseName(boost::none, "dbD"), - DatabaseName(boost::none, "testdb")}; + std::vector dbNames = { + DatabaseName::createDatabaseName_forTest(boost::none, "dbA"), + DatabaseName::createDatabaseName_forTest(boost::none, "dbB"), + DatabaseName::createDatabaseName_forTest(boost::none, "dbC"), + DatabaseName::createDatabaseName_forTest(boost::none, "dbD"), + DatabaseName::createDatabaseName_forTest(boost::none, "testdb")}; ASSERT(catalog.getAllDbNames() == dbNames); catalog.deregisterAllCollectionsAndViews(getServiceContext()); @@ -572,24 +598,50 @@ TEST_F(CollectionCatalogTest, GetAllDbNamesForTenant) { std::vector nsss = {dbA, dbB, dbC, dbD}; for (auto& nss : nsss) { std::shared_ptr newColl = std::make_shared(nss); - auto uuid = UUID::gen(); - catalog.registerCollection(opCtx.get(), uuid, std::move(newColl), boost::none); + catalog.registerCollection(opCtx.get(), std::move(newColl), boost::none); } std::vector dbNamesForTid1 = { - DatabaseName(tid1, "dbA"), DatabaseName(tid1, "dbB"), DatabaseName(tid1, "dbC")}; + DatabaseName::createDatabaseName_forTest(tid1, "dbA"), + DatabaseName::createDatabaseName_forTest(tid1, "dbB"), + DatabaseName::createDatabaseName_forTest(tid1, "dbC")}; ASSERT(catalog.getAllDbNamesForTenant(tid1) == dbNamesForTid1); - std::vector dbNamesForTid2 = {DatabaseName(tid2, "dbB")}; + std::vector dbNamesForTid2 = { + DatabaseName::createDatabaseName_forTest(tid2, "dbB")}; ASSERT(catalog.getAllDbNamesForTenant(tid2) == dbNamesForTid2); catalog.deregisterAllCollectionsAndViews(getServiceContext()); } +TEST_F(CollectionCatalogTest, GetAllTenants) { + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); + TenantId tid1 = TenantId(OID::gen()); + TenantId tid2 = TenantId(OID::gen()); + std::vector nsss = { + NamespaceString::createNamespaceString_forTest(boost::none, "a"), + NamespaceString::createNamespaceString_forTest(boost::none, "c"), + NamespaceString::createNamespaceString_forTest(boost::none, "l"), + NamespaceString::createNamespaceString_forTest(tid1, "c"), + NamespaceString::createNamespaceString_forTest(tid2, "c")}; + + for (auto& nss : nsss) { + std::shared_ptr newColl = std::make_shared(nss); + catalog.registerCollection(opCtx.get(), std::move(newColl), boost::none); + } + + std::set expectedTenants = {tid1, tid2}; + ASSERT_EQ(catalog.getAllTenants(), expectedTenants); + + catalog.deregisterAllCollectionsAndViews(getServiceContext()); +} + // Test setting and fetching the profile level for a database. TEST_F(CollectionCatalogTest, DatabaseProfileLevel) { - DatabaseName testDBNameFirst(boost::none, "testdbfirst"); - DatabaseName testDBNameSecond(boost::none, "testdbsecond"); + DatabaseName testDBNameFirst = + DatabaseName::createDatabaseName_forTest(boost::none, "testdbfirst"); + DatabaseName testDBNameSecond = + DatabaseName::createDatabaseName_forTest(boost::none, "testdbsecond"); // Requesting a profile level that is not in the _databaseProfileLevel map should return the // default server-wide setting @@ -608,78 +660,6 @@ TEST_F(CollectionCatalogTest, DatabaseProfileLevel) { serverGlobalParams.defaultProfile + 1); } -TEST_F(CollectionCatalogTest, GetAllCollectionNamesAndGetAllDbNamesWithUncommittedCollections) { - NamespaceString aColl = NamespaceString::createNamespaceString_forTest("dbA", "collA"); - NamespaceString b1Coll = NamespaceString::createNamespaceString_forTest("dbB", "collB1"); - NamespaceString b2Coll = NamespaceString::createNamespaceString_forTest("dbB", "collB2"); - NamespaceString cColl = NamespaceString::createNamespaceString_forTest("dbC", "collC"); - NamespaceString d1Coll = NamespaceString::createNamespaceString_forTest("dbD", "collD1"); - NamespaceString d2Coll = NamespaceString::createNamespaceString_forTest("dbD", "collD2"); - NamespaceString d3Coll = NamespaceString::createNamespaceString_forTest("dbD", "collD3"); - - std::vector nsss = {aColl, b1Coll, b2Coll, cColl, d1Coll, d2Coll, d3Coll}; - for (auto& nss : nsss) { - std::shared_ptr newColl = std::make_shared(nss); - auto uuid = UUID::gen(); - catalog.registerCollection(opCtx.get(), uuid, std::move(newColl), boost::none); - } - - // One dbName with only an invisible collection does not appear in dbNames. Use const_cast to - // modify the collection in the catalog inplace, this bypasses copy-on-write behavior. - auto invisibleCollA = - const_cast(catalog.lookupCollectionByNamespace(opCtx.get(), aColl)); - invisibleCollA->setCommitted(false); - - Lock::DBLock dbLock(opCtx.get(), aColl.dbName(), MODE_S); - auto res = catalog.getAllCollectionNamesFromDb(opCtx.get(), DatabaseName(boost::none, "dbA")); - ASSERT(res.empty()); - - std::vector dbNames = {DatabaseName(boost::none, "dbB"), - DatabaseName(boost::none, "dbC"), - DatabaseName(boost::none, "dbD"), - DatabaseName(boost::none, "testdb")}; - ASSERT(catalog.getAllDbNames() == dbNames); - - // One dbName with both visible and invisible collections is still visible. - std::vector dbDNss = {d1Coll, d2Coll, d3Coll}; - for (auto& nss : dbDNss) { - // Test each combination of one collection in dbD being invisible while the other two are - // visible. - std::vector dCollList = dbDNss; - dCollList.erase(std::find(dCollList.begin(), dCollList.end(), nss)); - - // Use const_cast to modify the collection in the catalog inplace, this bypasses - // copy-on-write behavior. - auto invisibleCollD = - const_cast(catalog.lookupCollectionByNamespace(opCtx.get(), nss)); - invisibleCollD->setCommitted(false); - - Lock::DBLock dbLock(opCtx.get(), d1Coll.dbName(), MODE_S); - res = catalog.getAllCollectionNamesFromDb(opCtx.get(), DatabaseName(boost::none, "dbD")); - std::sort(res.begin(), res.end()); - ASSERT(res == dCollList); - - ASSERT(catalog.getAllDbNames() == dbNames); - invisibleCollD->setCommitted(true); - } - - invisibleCollA->setCommitted(true); // reset visibility. - - // If all dbNames consist only of invisible collections, none of these dbs is visible. - for (auto& nss : nsss) { - // Use const_cast to modify the collection in the catalog inplace, this bypasses - // copy-on-write behavior. - auto invisibleColl = - const_cast(catalog.lookupCollectionByNamespace(opCtx.get(), nss)); - invisibleColl->setCommitted(false); - } - - std::vector dbList = {DatabaseName(boost::none, "testdb")}; - ASSERT(catalog.getAllDbNames() == dbList); - - catalog.deregisterAllCollectionsAndViews(getServiceContext()); -} - class ForEachCollectionFromDbTest : public CatalogTestFixture { public: void createTestData() { @@ -712,7 +692,7 @@ TEST_F(ForEachCollectionFromDbTest, ForEachCollectionFromDb) { auto opCtx = operationContext(); { - const DatabaseName dbName(boost::none, "db"); + const DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "db"); auto dbLock = std::make_unique(opCtx, dbName, MODE_IX); int numCollectionsTraversed = 0; catalog::forEachCollectionFromDb(opCtx, dbName, MODE_X, [&](const Collection* collection) { @@ -725,7 +705,7 @@ TEST_F(ForEachCollectionFromDbTest, ForEachCollectionFromDb) { } { - const DatabaseName dbName(boost::none, "db2"); + const DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "db2"); auto dbLock = std::make_unique(opCtx, dbName, MODE_IX); int numCollectionsTraversed = 0; catalog::forEachCollectionFromDb(opCtx, dbName, MODE_IS, [&](const Collection* collection) { @@ -738,7 +718,7 @@ TEST_F(ForEachCollectionFromDbTest, ForEachCollectionFromDb) { } { - const DatabaseName dbName(boost::none, "db3"); + const DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "db3"); auto dbLock = std::make_unique(opCtx, dbName, MODE_IX); int numCollectionsTraversed = 0; catalog::forEachCollectionFromDb(opCtx, dbName, MODE_S, [&](const Collection* collection) { @@ -755,7 +735,7 @@ TEST_F(ForEachCollectionFromDbTest, ForEachCollectionFromDbWithPredicate) { auto opCtx = operationContext(); { - const DatabaseName dbName(boost::none, "db"); + const DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "db"); auto dbLock = std::make_unique(opCtx, dbName, MODE_IX); int numCollectionsTraversed = 0; catalog::forEachCollectionFromDb( @@ -778,7 +758,7 @@ TEST_F(ForEachCollectionFromDbTest, ForEachCollectionFromDbWithPredicate) { } { - const DatabaseName dbName(boost::none, "db"); + const DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "db"); auto dbLock = std::make_unique(opCtx, dbName, MODE_IX); int numCollectionsTraversed = 0; catalog::forEachCollectionFromDb( @@ -853,19 +833,6 @@ class CollectionCatalogTimestampTest : public ServiceContextMongoDTest { return uuid; } - CollectionCatalog::CatalogIdLookup lookupCatalogId(const NamespaceString& nss, - const UUID& uuid, - boost::optional ts) { - // Verify that lookups and NSS and UUID yield the same result. - CollectionCatalog::CatalogIdLookup nssLookup = catalog()->lookupCatalogIdByNSS(nss, ts); - CollectionCatalog::CatalogIdLookup uuidLookup = catalog()->lookupCatalogIdByUUID(uuid, ts); - - ASSERT_EQ(nssLookup.result, uuidLookup.result); - ASSERT_EQ(nssLookup.id, uuidLookup.id); - - return nssLookup; - } - void dropCollection(OperationContext* opCtx, const NamespaceString& nss, Timestamp timestamp) { _setupDDLOperation(opCtx, timestamp); WriteUnitOfWork wuow(opCtx); @@ -877,7 +844,7 @@ class CollectionCatalogTimestampTest : public ServiceContextMongoDTest { const NamespaceString& from, const NamespaceString& to, Timestamp timestamp) { - invariant(from.db() == to.db()); + invariant(from == to); _setupDDLOperation(opCtx, timestamp); WriteUnitOfWork wuow(opCtx); @@ -930,7 +897,7 @@ class CollectionCatalogTimestampTest : public ServiceContextMongoDTest { auto indexBuildBlock = std::make_unique( writableColl->ns(), indexSpec, IndexBuildMethod::kForeground, UUID::gen()); uassertStatusOK(indexBuildBlock->init(opCtx, writableColl, /*forRecover=*/false)); - uassertStatusOK(indexBuildBlock->getEntry(opCtx, writableColl) + uassertStatusOK(indexBuildBlock->getWritableEntry(opCtx, writableColl) ->accessMethod() ->initializeAsEmpty(opCtx)); wuow.commit(); @@ -1029,13 +996,17 @@ class CollectionCatalogTimestampTest : public ServiceContextMongoDTest { Timestamp timestamp, bool openSnapshotBeforeCommit, bool expectedExistence, - int expectedNumIndexes) { + int expectedNumIndexes, + std::function extraOpHook = {}) { _concurrentDDLOperationAndEstablishConsistentCollection( opCtx, readNssOrUUID, timestamp, - [this, &nss, &indexSpec](OperationContext* opCtx) { + [this, &nss, &indexSpec, extraOpHook](OperationContext* opCtx) { _createIndex(opCtx, nss, indexSpec); + if (extraOpHook) { + extraOpHook(opCtx); + } }, openSnapshotBeforeCommit, expectedExistence, @@ -1050,13 +1021,17 @@ class CollectionCatalogTimestampTest : public ServiceContextMongoDTest { Timestamp timestamp, bool openSnapshotBeforeCommit, bool expectedExistence, - int expectedNumIndexes) { + int expectedNumIndexes, + std::function extraOpHook = {}) { _concurrentDDLOperationAndEstablishConsistentCollection( opCtx, readNssOrUUID, timestamp, - [this, &nss, &indexName](OperationContext* opCtx) { + [this, &nss, &indexName, extraOpHook](OperationContext* opCtx) { _dropIndex(opCtx, nss, indexName); + if (extraOpHook) { + extraOpHook(opCtx); + } }, openSnapshotBeforeCommit, expectedExistence, @@ -1099,10 +1074,11 @@ class CollectionCatalogTimestampTest : public ServiceContextMongoDTest { uassertStatusOK(storageEngine->getCatalog()->createCollection( opCtx, nss, options, /*allocateDefaultSpace=*/true)); auto& catalogId = catalogIdRecordStorePair.first; + auto catalogEntry = DurableCatalog::get(opCtx)->getParsedCatalogEntry(opCtx, catalogId); + auto metadata = catalogEntry->metadata; std::shared_ptr ownedCollection = Collection::Factory::get(opCtx)->make( - opCtx, nss, catalogId, options, std::move(catalogIdRecordStorePair.second)); + opCtx, nss, catalogId, metadata, std::move(catalogIdRecordStorePair.second)); ownedCollection->init(opCtx); - ownedCollection->setCommitted(false); // Adds the collection to the in-memory catalog. CollectionCatalog::get(opCtx)->onCreateCollection(opCtx, std::move(ownedCollection)); @@ -1110,7 +1086,7 @@ class CollectionCatalogTimestampTest : public ServiceContextMongoDTest { } void _dropCollection(OperationContext* opCtx, const NamespaceString& nss, Timestamp timestamp) { - Lock::DBLock dbLk(opCtx, nss.db(), MODE_IX); + Lock::DBLock dbLk(opCtx, nss.dbName(), MODE_IX); Lock::CollectionLock collLk(opCtx, nss, MODE_X); CollectionWriter collection(opCtx, nss); @@ -1121,11 +1097,11 @@ class CollectionCatalogTimestampTest : public ServiceContextMongoDTest { writableCollection->getAllIndexes(&indexNames); for (const auto& indexName : indexNames) { IndexCatalog* indexCatalog = writableCollection->getIndexCatalog(); - auto indexDescriptor = indexCatalog->findIndexByName( + auto writableEntry = indexCatalog->getWritableEntryByName( opCtx, indexName, IndexCatalog::InclusionPolicy::kReady); // This also adds the index ident to the drop-pending reaper. - ASSERT_OK(indexCatalog->dropIndex(opCtx, writableCollection, indexDescriptor)); + ASSERT_OK(indexCatalog->dropIndexEntry(opCtx, writableCollection, writableEntry)); } // Add the collection ident to the drop-pending reaper. @@ -1146,7 +1122,7 @@ class CollectionCatalogTimestampTest : public ServiceContextMongoDTest { const NamespaceString& from, const NamespaceString& to, Timestamp timestamp) { - Lock::DBLock dbLk(opCtx, from.db(), MODE_IX); + Lock::DBLock dbLk(opCtx, from.dbName(), MODE_IX); Lock::CollectionLock fromLk(opCtx, from, MODE_X); Lock::CollectionLock toLk(opCtx, to, MODE_X); @@ -1180,11 +1156,11 @@ class CollectionCatalogTimestampTest : public ServiceContextMongoDTest { Collection* writableCollection = collection.getWritableCollection(opCtx); IndexCatalog* indexCatalog = writableCollection->getIndexCatalog(); - auto indexDescriptor = - indexCatalog->findIndexByName(opCtx, indexName, IndexCatalog::InclusionPolicy::kReady); + auto writableEntry = indexCatalog->getWritableEntryByName( + opCtx, indexName, IndexCatalog::InclusionPolicy::kReady); // This also adds the index ident to the drop-pending reaper. - ASSERT_OK(indexCatalog->dropIndex(opCtx, writableCollection, indexDescriptor)); + ASSERT_OK(indexCatalog->dropIndexEntry(opCtx, writableCollection, writableEntry)); } /** @@ -1233,10 +1209,24 @@ class CollectionCatalogTimestampTest : public ServiceContextMongoDTest { // The onCommit handler must be registered prior to the DDL operation so it's executed // before any onCommit handlers set up in the operation. if (!openSnapshotBeforeCommit) { - newOpCtx.get()->recoveryUnit()->onCommit( - [&commitHandler](OperationContext*, boost::optional) { - commitHandler(); - }); + // Need to use 'registerChangeForCatalogVisibility' so it can happen after storage + // engine commit but before the changes become visible in the catalog. + class ChangeForCatalogVisibility : public RecoveryUnit::Change { + public: + ChangeForCatalogVisibility(std::function commitHandler) + : callback(std::move(commitHandler)) {} + + void commit(OperationContext* opCtx, boost::optional) final { + callback(); + } + + void rollback(OperationContext* opCtx) final {} + + std::function callback; + }; + + newOpCtx.get()->recoveryUnit()->registerChangeForCatalogVisibility( + std::make_unique(commitHandler)); } ddlOperation(newOpCtx.get()); @@ -1289,32 +1279,31 @@ class CollectionCatalogTimestampTest : public ServiceContextMongoDTest { ASSERT_EQ(coll->getIndexCatalog()->numIndexesTotal(), expectedNumIndexes); auto catalogEntry = - DurableCatalog::get(opCtx)->getCatalogEntry(opCtx, coll->getCatalogId()); - ASSERT(!catalogEntry.isEmpty()); - ASSERT( - coll->isMetadataEqual(DurableCatalog::getMetadataFromCatalogEntry(catalogEntry))); + DurableCatalog::get(opCtx)->getParsedCatalogEntry(opCtx, coll->getCatalogId()); + ASSERT(catalogEntry); + ASSERT(coll->isMetadataEqual(catalogEntry->metadata->toBSON())); // Lookups from the catalog should return the newly opened collection. ASSERT_EQ(catalog->lookupCollectionByNamespace(opCtx, coll->ns()), coll); ASSERT_EQ(catalog->lookupCollectionByUUID(opCtx, coll->uuid()), coll); } else { ASSERT(!coll); - if (auto nss = nssOrUUID.nss()) { + if (nssOrUUID.isNamespaceString()) { auto catalogEntry = - DurableCatalog::get(opCtx)->scanForCatalogEntryByNss(opCtx, *nss); + DurableCatalog::get(opCtx)->scanForCatalogEntryByNss(opCtx, nssOrUUID.nss()); ASSERT(!catalogEntry); // Lookups from the catalog should return the newly opened collection (in this case // nullptr). - ASSERT_EQ(catalog->lookupCollectionByNamespace(opCtx, *nss), coll); - } else if (auto uuid = nssOrUUID.uuid()) { + ASSERT_EQ(catalog->lookupCollectionByNamespace(opCtx, nssOrUUID.nss()), coll); + } else { auto catalogEntry = - DurableCatalog::get(opCtx)->scanForCatalogEntryByUUID(opCtx, *uuid); + DurableCatalog::get(opCtx)->scanForCatalogEntryByUUID(opCtx, nssOrUUID.uuid()); ASSERT(!catalogEntry); // Lookups from the catalog should return the newly opened collection (in this case // nullptr). - ASSERT_EQ(catalog->lookupCollectionByUUID(opCtx, *uuid), coll); + ASSERT_EQ(catalog->lookupCollectionByUUID(opCtx, nssOrUUID.uuid()), coll); } } @@ -1354,31 +1343,18 @@ TEST_F(CollectionCatalogTimestampTest, MinimumValidSnapshot) { auto coll = CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespace(opCtx.get(), nss); ASSERT(coll); - ASSERT_EQ(coll->getMinimumVisibleSnapshot(), createCollectionTs); ASSERT_EQ(coll->getMinimumValidSnapshot(), createYIndexTs); - const IndexDescriptor* desc = coll->getIndexCatalog()->findIndexByName(opCtx.get(), "x_1"); - const IndexCatalogEntry* entry = coll->getIndexCatalog()->getEntry(desc); - ASSERT_EQ(entry->getMinimumVisibleSnapshot(), createXIndexTs); - - desc = coll->getIndexCatalog()->findIndexByName(opCtx.get(), "y_1"); - entry = coll->getIndexCatalog()->getEntry(desc); - ASSERT_EQ(entry->getMinimumVisibleSnapshot(), createYIndexTs); - dropIndex(opCtx.get(), nss, "x_1", dropIndexTs); dropIndex(opCtx.get(), nss, "y_1", dropIndexTs); // Fetch the latest collection instance without the indexes. coll = CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespace(opCtx.get(), nss); ASSERT(coll); - ASSERT_EQ(coll->getMinimumVisibleSnapshot(), createCollectionTs); ASSERT_EQ(coll->getMinimumValidSnapshot(), dropIndexTs); } TEST_F(CollectionCatalogTimestampTest, OpenCollectionBeforeCreateTimestamp) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); @@ -1398,9 +1374,6 @@ TEST_F(CollectionCatalogTimestampTest, OpenCollectionBeforeCreateTimestamp) { } TEST_F(CollectionCatalogTimestampTest, OpenEarlierCollection) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createIndexTs = Timestamp(20, 20); @@ -1439,9 +1412,6 @@ TEST_F(CollectionCatalogTimestampTest, OpenEarlierCollection) { } TEST_F(CollectionCatalogTimestampTest, OpenEarlierCollectionWithIndex) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createXIndexTs = Timestamp(20, 20); @@ -1492,9 +1462,6 @@ TEST_F(CollectionCatalogTimestampTest, OpenEarlierCollectionWithIndex) { } TEST_F(CollectionCatalogTimestampTest, OpenLatestCollectionWithIndex) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createXIndexTs = Timestamp(20, 20); @@ -1532,9 +1499,6 @@ TEST_F(CollectionCatalogTimestampTest, OpenLatestCollectionWithIndex) { } TEST_F(CollectionCatalogTimestampTest, OpenEarlierCollectionWithDropPendingIndex) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createIndexTs = Timestamp(20, 20); @@ -1603,9 +1567,6 @@ TEST_F(CollectionCatalogTimestampTest, OpenEarlierCollectionWithDropPendingIndex TEST_F(CollectionCatalogTimestampTest, OpenEarlierCollectionWithDropPendingIndexDoesNotCrashWhenCheckingMultikey) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const std::string xIndexName{"x_1"}; @@ -1689,9 +1650,6 @@ TEST_F(CollectionCatalogTimestampTest, } TEST_F(CollectionCatalogTimestampTest, OpenEarlierAlreadyDropPendingCollection) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString firstNss = NamespaceString::createNamespaceString_forTest("a.b"); const NamespaceString secondNss = NamespaceString::createNamespaceString_forTest("c.d"); const Timestamp createCollectionTs = Timestamp(10, 10); @@ -1756,9 +1714,6 @@ TEST_F(CollectionCatalogTimestampTest, OpenEarlierAlreadyDropPendingCollection) } TEST_F(CollectionCatalogTimestampTest, OpenNewCollectionUsingDropPendingCollectionSharedState) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createIndexTs = Timestamp(20, 20); @@ -1800,9 +1755,6 @@ TEST_F(CollectionCatalogTimestampTest, OpenNewCollectionUsingDropPendingCollecti } TEST_F(CollectionCatalogTimestampTest, OpenExistingCollectionWithReaper) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp dropCollectionTs = Timestamp(20, 20); @@ -1861,9 +1813,6 @@ TEST_F(CollectionCatalogTimestampTest, OpenExistingCollectionWithReaper) { } TEST_F(CollectionCatalogTimestampTest, OpenNewCollectionWithReaper) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp dropCollectionTs = Timestamp(20, 20); @@ -1915,9 +1864,6 @@ TEST_F(CollectionCatalogTimestampTest, OpenNewCollectionWithReaper) { } TEST_F(CollectionCatalogTimestampTest, OpenExistingCollectionAndIndexesWithReaper) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createIndexTs = Timestamp(20, 20); @@ -2034,9 +1980,6 @@ TEST_F(CollectionCatalogTimestampTest, OpenExistingCollectionAndIndexesWithReape } TEST_F(CollectionCatalogTimestampTest, OpenNewCollectionAndIndexesWithReaper) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createIndexTs = Timestamp(20, 20); @@ -2138,748 +2081,7 @@ TEST_F(CollectionCatalogTimestampTest, OpenNewCollectionAndIndexesWithReaper) { } } -TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingCreate) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); - - // Initialize the oldest timestamp to (1, 1) - CollectionCatalog::write(opCtx.get(), [](CollectionCatalog& catalog) { - catalog.cleanupForOldestTimestampAdvanced(Timestamp(1, 1)); - }); - - // Create collection and extract the catalogId - UUID uuid = createCollection(opCtx.get(), nss, Timestamp(1, 2)); - RecordId rid = catalog()->lookupCollectionByNamespace(opCtx.get(), nss)->getCatalogId(); - - // Lookup without timestamp returns latest catalogId - ASSERT_EQ(lookupCatalogId(nss, uuid, boost::none).id, rid); - // Lookup before create returns unknown if looking before oldest - ASSERT_EQ(lookupCatalogId(nss, uuid, Timestamp(1, 0)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - // Lookup before create returns not exists if looking after oldest - ASSERT_EQ(lookupCatalogId(nss, uuid, Timestamp(1, 1)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup at create returns catalogId - ASSERT_EQ(lookupCatalogId(nss, uuid, Timestamp(1, 2)).id, rid); - // Lookup after create returns catalogId - ASSERT_EQ(lookupCatalogId(nss, uuid, Timestamp(1, 3)).id, rid); -} - -TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingDrop) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); - - // Initialize the oldest timestamp to (1, 1) - CollectionCatalog::write(opCtx.get(), [](CollectionCatalog& catalog) { - catalog.cleanupForOldestTimestampAdvanced(Timestamp(1, 1)); - }); - - // Create and drop collection. We have a time window where the namespace exists - UUID uuid = createCollection(opCtx.get(), nss, Timestamp(1, 5)); - RecordId rid = catalog()->lookupCollectionByNamespace(opCtx.get(), nss)->getCatalogId(); - dropCollection(opCtx.get(), nss, Timestamp(1, 10)); - - // Lookup without timestamp returns none - ASSERT_EQ(lookupCatalogId(nss, uuid, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup before create and oldest returns unknown - ASSERT_EQ(lookupCatalogId(nss, uuid, Timestamp(1, 0)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - // Lookup before create returns not exists - ASSERT_EQ(lookupCatalogId(nss, uuid, Timestamp(1, 4)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup at create returns catalogId - ASSERT_EQ(lookupCatalogId(nss, uuid, Timestamp(1, 5)).id, rid); - // Lookup after create returns catalogId - ASSERT_EQ(lookupCatalogId(nss, uuid, Timestamp(1, 6)).id, rid); - // Lookup at drop returns none - ASSERT_EQ(lookupCatalogId(nss, uuid, Timestamp(1, 10)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup after drop returns none - ASSERT_EQ(lookupCatalogId(nss, uuid, Timestamp(1, 20)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); -} - -TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingRename) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString from = NamespaceString::createNamespaceString_forTest("a.b"); - NamespaceString to = NamespaceString::createNamespaceString_forTest("a.c"); - - // Initialize the oldest timestamp to (1, 1) - CollectionCatalog::write(opCtx.get(), [](CollectionCatalog& catalog) { - catalog.cleanupForOldestTimestampAdvanced(Timestamp(1, 1)); - }); - - // Create and rename collection. We have two windows where the collection exists but for - // different namespaces - UUID uuid = createCollection(opCtx.get(), from, Timestamp(1, 5)); - RecordId rid = catalog()->lookupCollectionByNamespace(opCtx.get(), from)->getCatalogId(); - renameCollection(opCtx.get(), from, to, Timestamp(1, 10)); - - // Lookup without timestamp on 'from' returns none. By 'uuid' returns catalogId - ASSERT_EQ(catalog()->lookupCatalogIdByNSS(from, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(uuid, boost::none).id, rid); - // Lookup before create and oldest returns unknown - ASSERT_EQ(lookupCatalogId(from, uuid, Timestamp(1, 0)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - // Lookup before create returns not exists - ASSERT_EQ(lookupCatalogId(from, uuid, Timestamp(1, 4)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup at create returns catalogId - ASSERT_EQ(lookupCatalogId(from, uuid, Timestamp(1, 5)).id, rid); - // Lookup after create returns catalogId - ASSERT_EQ(lookupCatalogId(from, uuid, Timestamp(1, 6)).id, rid); - // Lookup at rename on 'from' returns none. By 'uuid' returns catalogId - ASSERT_EQ(catalog()->lookupCatalogIdByNSS(from, Timestamp(1, 10)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(uuid, Timestamp(1, 10)).id, rid); - // Lookup after rename on 'from' returns none. By 'uuid' returns catalogId - ASSERT_EQ(catalog()->lookupCatalogIdByNSS(from, Timestamp(1, 20)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(uuid, Timestamp(1, 20)).id, rid); - - // Lookup without timestamp on 'to' returns catalogId - ASSERT_EQ(lookupCatalogId(to, uuid, boost::none).id, rid); - // Lookup before rename and oldest on 'to' returns unknown - ASSERT_EQ(lookupCatalogId(to, uuid, Timestamp(1, 0)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - // Lookup before rename on 'to' returns not exists. By 'uuid' returns catalogId - ASSERT_EQ(catalog()->lookupCatalogIdByNSS(to, Timestamp(1, 9)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(uuid, Timestamp(1, 9)).id, rid); - // Lookup at rename on 'to' returns catalogId - ASSERT_EQ(lookupCatalogId(to, uuid, Timestamp(1, 10)).id, rid); - // Lookup after rename on 'to' returns catalogId - ASSERT_EQ(lookupCatalogId(to, uuid, Timestamp(1, 20)).id, rid); -} - -TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingRenameDropTarget) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString from = NamespaceString::createNamespaceString_forTest("a.b"); - NamespaceString to = NamespaceString::createNamespaceString_forTest("a.c"); - - // Initialize the oldest timestamp to (1, 1) - CollectionCatalog::write(opCtx.get(), [](CollectionCatalog& catalog) { - catalog.cleanupForOldestTimestampAdvanced(Timestamp(1, 1)); - }); - - // Create collections. The 'to' namespace will exist for one collection from Timestamp(1, 6) - // until it is dropped by the rename at Timestamp(1, 10), after which the 'to' namespace will - // correspond to the renamed collection. - UUID uuid = createCollection(opCtx.get(), from, Timestamp(1, 5)); - UUID originalUUID = createCollection(opCtx.get(), to, Timestamp(1, 6)); - RecordId rid = catalog()->lookupCollectionByNamespace(opCtx.get(), from)->getCatalogId(); - RecordId originalToRid = - catalog()->lookupCollectionByNamespace(opCtx.get(), to)->getCatalogId(); - renameCollection(opCtx.get(), from, to, Timestamp(1, 10)); - - // Lookup without timestamp on 'to' and 'uuid' returns latest catalog id. By 'originalUUID' - // returns not exists as the target was dropped. - ASSERT_EQ(lookupCatalogId(to, uuid, boost::none).id, rid); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(originalUUID, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup before rename and oldest on 'to' returns unknown - ASSERT_EQ(lookupCatalogId(to, uuid, Timestamp(1, 0)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(to, originalUUID, Timestamp(1, 0)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - // Lookup before rename on 'to' returns the original rid - ASSERT_EQ(lookupCatalogId(to, originalUUID, Timestamp(1, 9)).id, originalToRid); - // Lookup before rename on 'from' returns the rid - ASSERT_EQ(lookupCatalogId(from, uuid, Timestamp(1, 9)).id, rid); - // Lookup at rename timestamp on 'to' and 'uuid' returns catalogId - ASSERT_EQ(lookupCatalogId(to, uuid, Timestamp(1, 10)).id, rid); - // Lookup at rename timestamp on 'originalUUID' returns not exists as it was dropped during the - // rename. - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(originalUUID, Timestamp(1, 10)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup after rename on 'to' and 'uuid' returns catalogId - ASSERT_EQ(lookupCatalogId(to, uuid, Timestamp(1, 20)).id, rid); - // Lookup after rename timestamp on 'originalUUID' returns not exists as it was dropped during - // the rename. - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(originalUUID, Timestamp(1, 20)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); -} - -TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingDropCreate) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); - - // Create, drop and recreate collection on the same namespace. We have different catalogId. - UUID firstUUID = createCollection(opCtx.get(), nss, Timestamp(1, 5)); - RecordId rid1 = catalog()->lookupCollectionByNamespace(opCtx.get(), nss)->getCatalogId(); - dropCollection(opCtx.get(), nss, Timestamp(1, 10)); - UUID secondUUID = createCollection(opCtx.get(), nss, Timestamp(1, 15)); - RecordId rid2 = catalog()->lookupCollectionByNamespace(opCtx.get(), nss)->getCatalogId(); - - // Lookup without timestamp returns latest catalogId - ASSERT_EQ(lookupCatalogId(nss, secondUUID, boost::none).id, rid2); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(firstUUID, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup before first create returns not exists - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 4)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 4)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup at first create returns first catalogId - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).id, rid1); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(secondUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup after first create returns first catalogId - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 6)).id, rid1); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(secondUUID, Timestamp(1, 6)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup at drop returns none - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 10)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 10)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup after drop returns none - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 13)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 13)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup at second create returns second catalogId - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 15)).id, rid2); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(firstUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // Lookup after second create returns second catalogId - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 20)).id, rid2); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(firstUUID, Timestamp(1, 20)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); -} - -TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingCleanupEqDrop) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); - - // Create collection and verify we have nothing to cleanup - UUID firstUUID = createCollection(opCtx.get(), nss, Timestamp(1, 5)); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 1))); - - // Drop collection and verify we have nothing to cleanup as long as the oldest timestamp is - // before the drop - dropCollection(opCtx.get(), nss, Timestamp(1, 10)); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 1))); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 5))); - ASSERT_TRUE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 10))); - - // Create new collection and nothing changed with answers to needsCleanupForOldestTimestamp. - UUID secondUUID = createCollection(opCtx.get(), nss, Timestamp(1, 15)); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 1))); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 5))); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 7))); - ASSERT_TRUE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 10))); - - // We can lookup the old catalogId before we advance the oldest timestamp and cleanup - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(secondUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - - // Cleanup at drop timestamp - CollectionCatalog::write(opCtx.get(), [&](CollectionCatalog& c) { - c.cleanupForOldestTimestampAdvanced(Timestamp(1, 10)); - }); - // After cleanup, we cannot find the old catalogId anymore. Also verify that we don't need - // anymore cleanup - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 10))); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(firstUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); -} - -TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingCleanupGtDrop) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); - - // Create collection and verify we have nothing to cleanup - UUID firstUUID = createCollection(opCtx.get(), nss, Timestamp(1, 5)); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 1))); - - // Drop collection and verify we have nothing to cleanup as long as the oldest timestamp is - // before the drop - dropCollection(opCtx.get(), nss, Timestamp(1, 10)); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 1))); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 5))); - ASSERT_TRUE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 10))); - - // Create new collection and nothing changed with answers to needsCleanupForOldestTimestamp. - UUID secondUUID = createCollection(opCtx.get(), nss, Timestamp(1, 15)); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 1))); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 5))); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 7))); - ASSERT_TRUE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 12))); - - // We can lookup the old catalogId before we advance the oldest timestamp and cleanup - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(secondUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - - // Cleanup after the drop timestamp - CollectionCatalog::write(opCtx.get(), [&](CollectionCatalog& c) { - c.cleanupForOldestTimestampAdvanced(Timestamp(1, 12)); - }); - - // After cleanup, we cannot find the old catalogId anymore. Also verify that we don't need - // anymore cleanup - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 12))); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(firstUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); -} - -TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingCleanupGtRecreate) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); - - // Create collection and verify we have nothing to cleanup - UUID firstUUID = createCollection(opCtx.get(), nss, Timestamp(1, 5)); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 1))); - - // Drop collection and verify we have nothing to cleanup as long as the oldest timestamp is - // before the drop - dropCollection(opCtx.get(), nss, Timestamp(1, 10)); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 1))); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 5))); - ASSERT_TRUE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 10))); - - // Create new collection and nothing changed with answers to needsCleanupForOldestTimestamp. - UUID secondUUID = createCollection(opCtx.get(), nss, Timestamp(1, 15)); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 1))); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 5))); - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 7))); - ASSERT_TRUE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 20))); - - // We can lookup the old catalogId before we advance the oldest timestamp and cleanup - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(secondUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - - // Cleanup after the recreate timestamp - CollectionCatalog::write(opCtx.get(), [&](CollectionCatalog& c) { - c.cleanupForOldestTimestampAdvanced(Timestamp(1, 20)); - }); - - // After cleanup, we cannot find the old catalogId anymore. Also verify that we don't need - // anymore cleanup - ASSERT_FALSE(catalog()->needsCleanupForOldestTimestamp(Timestamp(1, 20))); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(catalog()->lookupCatalogIdByUUID(firstUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); -} - -TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingCleanupMultiple) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); - - // Create and drop multiple namespace on the same namespace - UUID firstUUID = createCollection(opCtx.get(), nss, Timestamp(1, 5)); - dropCollection(opCtx.get(), nss, Timestamp(1, 10)); - UUID secondUUID = createCollection(opCtx.get(), nss, Timestamp(1, 15)); - dropCollection(opCtx.get(), nss, Timestamp(1, 20)); - UUID thirdUUID = createCollection(opCtx.get(), nss, Timestamp(1, 25)); - dropCollection(opCtx.get(), nss, Timestamp(1, 30)); - UUID fourthUUID = createCollection(opCtx.get(), nss, Timestamp(1, 35)); - dropCollection(opCtx.get(), nss, Timestamp(1, 40)); - - // Lookup can find all four collections - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, thirdUUID, Timestamp(1, 25)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, fourthUUID, Timestamp(1, 35)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - - // Cleanup oldest - CollectionCatalog::write(opCtx.get(), [&](CollectionCatalog& c) { - c.cleanupForOldestTimestampAdvanced(Timestamp(1, 10)); - }); - - // Lookup can find the three remaining collections - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, thirdUUID, Timestamp(1, 25)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, fourthUUID, Timestamp(1, 35)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - - // Cleanup - CollectionCatalog::write(opCtx.get(), [&](CollectionCatalog& c) { - c.cleanupForOldestTimestampAdvanced(Timestamp(1, 21)); - }); - - // Lookup can find the two remaining collections - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, thirdUUID, Timestamp(1, 25)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, fourthUUID, Timestamp(1, 35)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - - // Cleanup - CollectionCatalog::write(opCtx.get(), [&](CollectionCatalog& c) { - c.cleanupForOldestTimestampAdvanced(Timestamp(1, 32)); - }); - - // Lookup can find the last remaining collections - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, thirdUUID, Timestamp(1, 25)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, fourthUUID, Timestamp(1, 35)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - - // Cleanup - CollectionCatalog::write(opCtx.get(), [&](CollectionCatalog& c) { - c.cleanupForOldestTimestampAdvanced(Timestamp(1, 50)); - }); - - // Lookup now result in unknown as the oldest timestamp has advanced where mapping has been - // removed - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, thirdUUID, Timestamp(1, 25)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, fourthUUID, Timestamp(1, 35)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); -} - -TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingCleanupMultipleSingleCall) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); - - // Create and drop multiple namespace on the same namespace - UUID firstUUID = createCollection(opCtx.get(), nss, Timestamp(1, 5)); - dropCollection(opCtx.get(), nss, Timestamp(1, 10)); - UUID secondUUID = createCollection(opCtx.get(), nss, Timestamp(1, 15)); - dropCollection(opCtx.get(), nss, Timestamp(1, 20)); - UUID thirdUUID = createCollection(opCtx.get(), nss, Timestamp(1, 25)); - dropCollection(opCtx.get(), nss, Timestamp(1, 30)); - UUID fourthUUID = createCollection(opCtx.get(), nss, Timestamp(1, 35)); - dropCollection(opCtx.get(), nss, Timestamp(1, 40)); - - // Lookup can find all four collections - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, thirdUUID, Timestamp(1, 25)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, fourthUUID, Timestamp(1, 35)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - - // Cleanup all - CollectionCatalog::write(opCtx.get(), [&](CollectionCatalog& c) { - c.cleanupForOldestTimestampAdvanced(Timestamp(1, 50)); - }); - - // Lookup now result in unknown as the oldest timestamp has advanced where mapping has been - // removed - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 5)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, thirdUUID, Timestamp(1, 25)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, fourthUUID, Timestamp(1, 35)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); -} - -TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingRollback) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString a = NamespaceString::createNamespaceString_forTest("b.a"); - NamespaceString b = NamespaceString::createNamespaceString_forTest("b.b"); - NamespaceString c = NamespaceString::createNamespaceString_forTest("b.c"); - NamespaceString d = NamespaceString::createNamespaceString_forTest("b.d"); - NamespaceString e = NamespaceString::createNamespaceString_forTest("b.e"); - - // Create and drop multiple namespace on the same namespace - UUID firstUUID = createCollection(opCtx.get(), a, Timestamp(1, 1)); - dropCollection(opCtx.get(), a, Timestamp(1, 2)); - UUID secondUUID = createCollection(opCtx.get(), a, Timestamp(1, 3)); - UUID thirdUUID = createCollection(opCtx.get(), b, Timestamp(1, 5)); - UUID fourthUUID = createCollection(opCtx.get(), c, Timestamp(1, 7)); - UUID fifthUUID = createCollection(opCtx.get(), d, Timestamp(1, 8)); - UUID sixthUUID = createCollection(opCtx.get(), e, Timestamp(1, 9)); - dropCollection(opCtx.get(), b, Timestamp(1, 10)); - - // Rollback to Timestamp(1, 8) - CollectionCatalog::write( - opCtx.get(), [&](CollectionCatalog& c) { c.cleanupForCatalogReopen(Timestamp(1, 8)); }); - - ASSERT_EQ(lookupCatalogId(e, firstUUID, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(lookupCatalogId(a, secondUUID, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(b, thirdUUID, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(c, fourthUUID, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(d, fifthUUID, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(e, sixthUUID, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); -} - -TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingInsert) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); - - // Create a collection on the namespace - UUID firstUUID = createCollection(opCtx.get(), nss, Timestamp(1, 10)); - dropCollection(opCtx.get(), nss, Timestamp(1, 20)); - UUID secondUUID = createCollection(opCtx.get(), nss, Timestamp(1, 30)); - - auto rid1 = lookupCatalogId(nss, firstUUID, Timestamp(1, 10)).id; - auto rid2 = lookupCatalogId(nss, secondUUID, Timestamp(1, 30)).id; - - // Simulate startup where we have a range [oldest, stable] by creating and dropping collections - // and then advancing the oldest timestamp and then reading behind it. - CollectionCatalog::write(opCtx.get(), [](CollectionCatalog& catalog) { - catalog.cleanupForOldestTimestampAdvanced(Timestamp(1, 40)); - }); - - // Confirm that the mappings have been cleaned up - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - - { - OneOffRead oor(opCtx.get(), Timestamp(1, 17)); - Lock::GlobalLock globalLock(opCtx.get(), MODE_IS); - CollectionCatalog::get(opCtx.get()) - ->establishConsistentCollection(opCtx.get(), nss, Timestamp(1, 17)); - CollectionCatalog::get(opCtx.get()) - ->establishConsistentCollection(opCtx.get(), {nss.db(), firstUUID}, Timestamp(1, 17)); - CollectionCatalog::get(opCtx.get()) - ->establishConsistentCollection(opCtx.get(), {nss.db(), secondUUID}, Timestamp(1, 17)); - - // Lookups before the inserted timestamp is still unknown - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 11)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 11)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - - // Lookups at or after the inserted timestamp is found, even if they don't match with WT - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 17)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 17)).id, rid1); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 19)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 19)).id, rid1); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 25)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 25)).id, rid1); - // The entry at Timestamp(1, 30) is unaffected - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 30)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 30)).id, rid2); - } - - { - OneOffRead oor(opCtx.get(), Timestamp(1, 12)); - Lock::GlobalLock globalLock(opCtx.get(), MODE_IS); - - CollectionCatalog::get(opCtx.get()) - ->establishConsistentCollection(opCtx.get(), nss, Timestamp(1, 12)); - CollectionCatalog::get(opCtx.get()) - ->establishConsistentCollection(opCtx.get(), {nss.db(), firstUUID}, Timestamp(1, 12)); - CollectionCatalog::get(opCtx.get()) - ->establishConsistentCollection(opCtx.get(), {nss.db(), secondUUID}, Timestamp(1, 12)); - - // We should now have extended the range from Timestamp(1, 17) to Timestamp(1, 12) - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 12)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 12)).id, rid1); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 16)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 16)).id, rid1); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 17)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 17)).id, rid1); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 19)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 19)).id, rid1); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 25)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 25)).id, rid1); - // The entry at Timestamp(1, 30) is unaffected - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 30)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 30)).id, rid2); - } - - { - OneOffRead oor(opCtx.get(), Timestamp(1, 25)); - Lock::GlobalLock globalLock(opCtx.get(), MODE_IS); - CollectionCatalog::get(opCtx.get()) - ->establishConsistentCollection(opCtx.get(), nss, Timestamp(1, 25)); - CollectionCatalog::get(opCtx.get()) - ->establishConsistentCollection(opCtx.get(), {nss.db(), firstUUID}, Timestamp(1, 25)); - CollectionCatalog::get(opCtx.get()) - ->establishConsistentCollection(opCtx.get(), {nss.db(), secondUUID}, Timestamp(1, 25)); - - // Check the entries, most didn't change - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 17)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 17)).id, rid1); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 19)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 19)).id, rid1); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 22)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 22)).id, rid1); - // At Timestamp(1, 25) we now return kNotExists - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 25)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 25)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // But next timestamp returns unknown - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 26)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 26)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - // The entry at Timestamp(1, 30) is unaffected - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 30)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 30)).id, rid2); - } - - { - OneOffRead oor(opCtx.get(), Timestamp(1, 25)); - Lock::GlobalLock globalLock(opCtx.get(), MODE_IS); - CollectionCatalog::get(opCtx.get()) - ->establishConsistentCollection(opCtx.get(), nss, Timestamp(1, 26)); - CollectionCatalog::get(opCtx.get()) - ->establishConsistentCollection(opCtx.get(), {nss.db(), firstUUID}, Timestamp(1, 26)); - CollectionCatalog::get(opCtx.get()) - ->establishConsistentCollection(opCtx.get(), {nss.db(), secondUUID}, Timestamp(1, 26)); - - // We should not have re-written the existing entry at Timestamp(1, 26) - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 17)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 17)).id, rid1); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 19)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 19)).id, rid1); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 22)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 22)).id, rid1); - // At Timestamp(1, 25) we now return kNotExists - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 25)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 25)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - // But next timestamp returns unknown - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 26)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 26)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 27)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 27)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - // The entry at Timestamp(1, 30) is unaffected - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 30)).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 30)).id, rid2); - } - - - // Clean up, check so we are back to the original state - CollectionCatalog::write(opCtx.get(), [](CollectionCatalog& catalog) { - catalog.cleanupForOldestTimestampAdvanced(Timestamp(1, 41)); - }); - - ASSERT_EQ(lookupCatalogId(nss, firstUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - ASSERT_EQ(lookupCatalogId(nss, secondUUID, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); -} - -TEST_F(CollectionCatalogTimestampTest, CatalogIdMappingInsertUnknown) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); - - // Simulate startup where we have a range [oldest, stable] by advancing the oldest timestamp and - // then reading behind it. - CollectionCatalog::write(opCtx.get(), [](CollectionCatalog& catalog) { - catalog.cleanupForOldestTimestampAdvanced(Timestamp(1, 40)); - }); - - // Reading before the oldest is unknown - ASSERT_EQ(catalog()->lookupCatalogIdByNSS(nss, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kUnknown); - - // Try to instantiate a non existing collection at this timestamp. - CollectionCatalog::get(opCtx.get()) - ->establishConsistentCollection(opCtx.get(), nss, Timestamp(1, 15)); - - // Lookup should now be not existing - ASSERT_EQ(catalog()->lookupCatalogIdByNSS(nss, Timestamp(1, 15)).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); -} - TEST_F(CollectionCatalogTimestampTest, CollectionLifetimeTiedToStorageTransactionLifetime) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createIndexTs = Timestamp(20, 20); @@ -2963,89 +2165,7 @@ TEST_F(CollectionCatalogTimestampTest, CollectionLifetimeTiedToStorageTransactio } } -TEST_F(CollectionCatalogNoTimestampTest, CatalogIdMappingNoTimestamp) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); - - // Create a collection on the namespace and confirm that we can lookup - UUID uuid = createCollection(opCtx.get(), nss, Timestamp()); - ASSERT_EQ(lookupCatalogId(nss, uuid, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - - // Drop the collection and confirm it is also removed from mapping - dropCollection(opCtx.get(), nss, Timestamp()); - ASSERT_EQ(lookupCatalogId(nss, uuid, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); -} - -TEST_F(CollectionCatalogNoTimestampTest, CatalogIdMappingNoTimestampRename) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString a = NamespaceString::createNamespaceString_forTest("a.a"); - NamespaceString b = NamespaceString::createNamespaceString_forTest("a.b"); - - // Create a collection on the namespace and confirm that we can lookup - UUID uuid = createCollection(opCtx.get(), a, Timestamp()); - ASSERT_EQ(lookupCatalogId(a, uuid, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(catalog()->lookupCatalogIdByNSS(b).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - - // Rename the collection and check lookup behavior - renameCollection(opCtx.get(), a, b, Timestamp()); - ASSERT_EQ(catalog()->lookupCatalogIdByNSS(a).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(lookupCatalogId(b, uuid, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kExists); - - // Drop the collection and confirm it is also removed from mapping - dropCollection(opCtx.get(), b, Timestamp()); - ASSERT_EQ(lookupCatalogId(a, uuid, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(lookupCatalogId(b, uuid, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); -} - -TEST_F(CollectionCatalogNoTimestampTest, CatalogIdMappingNoTimestampRenameDropTarget) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString a = NamespaceString::createNamespaceString_forTest("a.a"); - NamespaceString b = NamespaceString::createNamespaceString_forTest("a.b"); - - // Create collections on the namespaces and confirm that we can lookup - UUID uuidA = createCollection(opCtx.get(), a, Timestamp()); - UUID uuidB = createCollection(opCtx.get(), b, Timestamp()); - auto [aId, aResult] = lookupCatalogId(a, uuidA, boost::none); - auto [bId, bResult] = lookupCatalogId(b, uuidB, boost::none); - ASSERT_EQ(aResult, CollectionCatalog::CatalogIdLookup::Existence::kExists); - ASSERT_EQ(bResult, CollectionCatalog::CatalogIdLookup::Existence::kExists); - - // Rename the collection and check lookup behavior - renameCollection(opCtx.get(), a, b, Timestamp()); - auto [aIdAfter, aResultAfter] = catalog()->lookupCatalogIdByNSS(a, boost::none); - auto [bIdAfter, bResultAfter] = lookupCatalogId(b, uuidA, boost::none); - ASSERT_EQ(aResultAfter, CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(bResultAfter, CollectionCatalog::CatalogIdLookup::Existence::kExists); - // Verify that the the recordId on b is now what was on a. We performed a rename with - // dropTarget=true. - ASSERT_EQ(aId, bIdAfter); - - // Drop the collection and confirm it is also removed from mapping - dropCollection(opCtx.get(), b, Timestamp()); - ASSERT_EQ(lookupCatalogId(a, uuidA, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); - ASSERT_EQ(lookupCatalogId(b, uuidB, boost::none).result, - CollectionCatalog::CatalogIdLookup::Existence::kNotExists); -} - DEATH_TEST_F(CollectionCatalogTimestampTest, OpenCollectionInWriteUnitOfWork, "invariant") { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createIndexTs = Timestamp(20, 20); @@ -3068,9 +2188,6 @@ DEATH_TEST_F(CollectionCatalogTimestampTest, OpenCollectionInWriteUnitOfWork, "i } TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateCollectionAndOpenCollectionBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); @@ -3081,9 +2198,6 @@ TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateCollectionAndOpenCollecti } TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateCollectionAndOpenCollectionAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); @@ -3095,9 +2209,6 @@ TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateCollectionAndOpenCollecti TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateCollectionAndOpenCollectionByUUIDBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); UUID uuid = UUID::gen(); @@ -3110,9 +2221,6 @@ TEST_F(CollectionCatalogTimestampTest, TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateCollectionAndOpenCollectionByUUIDAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); UUID uuid = UUID::gen(); @@ -3124,9 +2232,6 @@ TEST_F(CollectionCatalogTimestampTest, } TEST_F(CollectionCatalogTimestampTest, ConcurrentDropCollectionAndOpenCollectionBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp dropCollectionTs = Timestamp(20, 20); @@ -3140,9 +2245,6 @@ TEST_F(CollectionCatalogTimestampTest, ConcurrentDropCollectionAndOpenCollection } TEST_F(CollectionCatalogTimestampTest, ConcurrentDropCollectionAndOpenCollectionAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp dropCollectionTs = Timestamp(20, 20); @@ -3157,9 +2259,6 @@ TEST_F(CollectionCatalogTimestampTest, ConcurrentDropCollectionAndOpenCollection TEST_F(CollectionCatalogTimestampTest, ConcurrentDropCollectionAndOpenCollectionByUUIDBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp dropCollectionTs = Timestamp(20, 20); @@ -3176,9 +2275,6 @@ TEST_F(CollectionCatalogTimestampTest, } TEST_F(CollectionCatalogTimestampTest, ConcurrentDropCollectionAndOpenCollectionByUUIDAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp dropCollectionTs = Timestamp(20, 20); @@ -3196,9 +2292,6 @@ TEST_F(CollectionCatalogTimestampTest, ConcurrentDropCollectionAndOpenCollection TEST_F(CollectionCatalogTimestampTest, ConcurrentRenameCollectionAndOpenCollectionWithOriginalNameBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString originalNss = NamespaceString::createNamespaceString_forTest("a.b"); const NamespaceString newNss = NamespaceString::createNamespaceString_forTest("a.c"); const Timestamp createCollectionTs = Timestamp(10, 10); @@ -3214,9 +2307,6 @@ TEST_F(CollectionCatalogTimestampTest, TEST_F(CollectionCatalogTimestampTest, ConcurrentRenameCollectionAndOpenCollectionWithOriginalNameAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString originalNss = NamespaceString::createNamespaceString_forTest("a.b"); const NamespaceString newNss = NamespaceString::createNamespaceString_forTest("a.c"); const Timestamp createCollectionTs = Timestamp(10, 10); @@ -3246,9 +2336,6 @@ TEST_F(CollectionCatalogTimestampTest, TEST_F(CollectionCatalogTimestampTest, ConcurrentRenameCollectionAndOpenCollectionWithNewNameBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString originalNss = NamespaceString::createNamespaceString_forTest("a.b"); const NamespaceString newNss = NamespaceString::createNamespaceString_forTest("a.c"); const Timestamp createCollectionTs = Timestamp(10, 10); @@ -3278,9 +2365,6 @@ TEST_F(CollectionCatalogTimestampTest, TEST_F(CollectionCatalogTimestampTest, ConcurrentRenameCollectionAndOpenCollectionWithNewNameAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString originalNss = NamespaceString::createNamespaceString_forTest("a.b"); const NamespaceString newNss = NamespaceString::createNamespaceString_forTest("a.c"); const Timestamp createCollectionTs = Timestamp(10, 10); @@ -3296,9 +2380,6 @@ TEST_F(CollectionCatalogTimestampTest, TEST_F(CollectionCatalogTimestampTest, ConcurrentRenameCollectionAndOpenCollectionWithUUIDBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString originalNss = NamespaceString::createNamespaceString_forTest("a.b"); const NamespaceString newNss = NamespaceString::createNamespaceString_forTest("a.c"); const Timestamp createCollectionTs = Timestamp(10, 10); @@ -3323,9 +2404,6 @@ TEST_F(CollectionCatalogTimestampTest, TEST_F(CollectionCatalogTimestampTest, ConcurrentRenameCollectionAndOpenCollectionWithUUIDAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString originalNss = NamespaceString::createNamespaceString_forTest("a.b"); const NamespaceString newNss = NamespaceString::createNamespaceString_forTest("a.c"); const Timestamp createCollectionTs = Timestamp(10, 10); @@ -3360,9 +2438,6 @@ TEST_F(CollectionCatalogTimestampTest, TEST_F(CollectionCatalogTimestampTest, ConcurrentRenameCollectionWithDropTargetAndOpenCollectionBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString originalNss = NamespaceString::createNamespaceString_forTest("a.b"); const NamespaceString targetNss = NamespaceString::createNamespaceString_forTest("a.c"); const Timestamp createOriginalCollectionTs = Timestamp(10, 10); @@ -3394,9 +2469,6 @@ TEST_F(CollectionCatalogTimestampTest, TEST_F(CollectionCatalogTimestampTest, ConcurrentRenameCollectionWithDropTargetAndOpenCollectionAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString originalNss = NamespaceString::createNamespaceString_forTest("a.b"); const NamespaceString targetNss = NamespaceString::createNamespaceString_forTest("a.c"); const Timestamp createOriginalCollectionTs = Timestamp(10, 10); @@ -3406,10 +2478,13 @@ TEST_F(CollectionCatalogTimestampTest, createCollection(opCtx.get(), originalNss, createOriginalCollectionTs); createCollection(opCtx.get(), targetNss, createTargetCollectionTs); - // We expect to find the UUID for the target collection + // We expect to find the UUID for the original collection UUID uuid = CollectionCatalog::get(opCtx.get()) - ->lookupCollectionByNamespace(opCtx.get(), targetNss) + ->lookupCollectionByNamespace(opCtx.get(), originalNss) ->uuid(); + UUID uuidDropped = CollectionCatalog::get(opCtx.get()) + ->lookupCollectionByNamespace(opCtx.get(), targetNss) + ->uuid(); // When the snapshot is opened right after the rename is committed to the durable catalog, and // the openCollection looks for the targetNss, we find the original collection. @@ -3420,6 +2495,8 @@ TEST_F(CollectionCatalogTimestampTest, CollectionCatalog::get(opCtx.get())->lookupCollectionByUUID(opCtx.get(), uuid); ASSERT(coll); ASSERT_EQ(coll->ns(), targetNss); + ASSERT(!CollectionCatalog::get(opCtx.get()) + ->lookupCollectionByUUID(opCtx.get(), uuidDropped)); ASSERT_EQ(CollectionCatalog::get(opCtx.get())->lookupNSSByUUID(opCtx.get(), uuid), targetNss); @@ -3428,9 +2505,6 @@ TEST_F(CollectionCatalogTimestampTest, TEST_F(CollectionCatalogTimestampTest, ConcurrentRenameCollectionWithDropTargetAndOpenCollectionWithOriginalUUIDBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString originalNss = NamespaceString::createNamespaceString_forTest("a.b"); const NamespaceString targetNss = NamespaceString::createNamespaceString_forTest("a.c"); const Timestamp createOriginalCollectionTs = Timestamp(10, 10); @@ -3479,9 +2553,6 @@ TEST_F(CollectionCatalogTimestampTest, TEST_F(CollectionCatalogTimestampTest, ConcurrentRenameCollectionWithDropTargetAndOpenCollectionWithOriginalUUIDAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString originalNss = NamespaceString::createNamespaceString_forTest("a.b"); const NamespaceString targetNss = NamespaceString::createNamespaceString_forTest("a.c"); const Timestamp createOriginalCollectionTs = Timestamp(10, 10); @@ -3524,9 +2595,6 @@ TEST_F(CollectionCatalogTimestampTest, TEST_F(CollectionCatalogTimestampTest, ConcurrentRenameCollectionWithDropTargetAndOpenCollectionWithTargetUUIDBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString originalNss = NamespaceString::createNamespaceString_forTest("a.b"); const NamespaceString targetNss = NamespaceString::createNamespaceString_forTest("a.c"); const Timestamp createOriginalCollectionTs = Timestamp(10, 10); @@ -3574,9 +2642,6 @@ TEST_F(CollectionCatalogTimestampTest, TEST_F(CollectionCatalogTimestampTest, ConcurrentRenameCollectionWithDropTargetAndOpenCollectionWithTargetUUIDAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString originalNss = NamespaceString::createNamespaceString_forTest("a.b"); const NamespaceString targetNss = NamespaceString::createNamespaceString_forTest("a.c"); const Timestamp createOriginalCollectionTs = Timestamp(10, 10); @@ -3616,9 +2681,6 @@ TEST_F(CollectionCatalogTimestampTest, } TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateIndexAndOpenCollectionBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createXIndexTs = Timestamp(20, 20); @@ -3646,10 +2708,42 @@ TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateIndexAndOpenCollectionBef 1); } -TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateIndexAndOpenCollectionAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); +TEST_F(CollectionCatalogTimestampTest, + ConcurrentCreateIndexAndOpenCollectionBeforeCommitWithUnrelatedMultikey) { + const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + const Timestamp createCollectionTs = Timestamp(10, 10); + const Timestamp createXIndexTs = Timestamp(20, 20); + const Timestamp createYIndexTs = Timestamp(30, 30); + createCollection(opCtx.get(), nss, createCollectionTs); + createIndex(opCtx.get(), + nss, + BSON("v" << 2 << "name" + << "x_1" + << "key" << BSON("x" << 1)), + createXIndexTs); + + auto makeIndexMultikey = [nss](OperationContext* opCtx) { + auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss); + coll->setIndexIsMultikey(opCtx, "x_1", {{0U}}); + }; + + // When the snapshot is opened right before the second index create is committed to the durable + // catalog, the collection instance should not have the second index. + concurrentCreateIndexAndEstablishConsistentCollection(opCtx.get(), + nss, + nss, + BSON("v" << 2 << "name" + << "y_1" + << "key" << BSON("y" << 1)), + createYIndexTs, + true, + true, + 1, + makeIndexMultikey); +} + +TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateIndexAndOpenCollectionAfterCommit) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createXIndexTs = Timestamp(20, 20); @@ -3677,10 +2771,42 @@ TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateIndexAndOpenCollectionAft 2); } -TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateIndexAndOpenCollectionByUUIDBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); +TEST_F(CollectionCatalogTimestampTest, + ConcurrentCreateIndexAndOpenCollectionAfterCommitWithUnrelatedMultikey) { + const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + const Timestamp createCollectionTs = Timestamp(10, 10); + const Timestamp createXIndexTs = Timestamp(20, 20); + const Timestamp createYIndexTs = Timestamp(30, 30); + + createCollection(opCtx.get(), nss, createCollectionTs); + createIndex(opCtx.get(), + nss, + BSON("v" << 2 << "name" + << "x_1" + << "key" << BSON("x" << 1)), + createXIndexTs); + + auto makeIndexMultikey = [nss](OperationContext* opCtx) { + auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss); + coll->setIndexIsMultikey(opCtx, "x_1", {{0U}}); + }; + + // When the snapshot is opened right after the second index create is committed to the durable + // catalog, the collection instance should have both indexes. + concurrentCreateIndexAndEstablishConsistentCollection(opCtx.get(), + nss, + nss, + BSON("v" << 2 << "name" + << "y_1" + << "key" << BSON("y" << 1)), + createYIndexTs, + false, + true, + 2, + makeIndexMultikey); +} +TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateIndexAndOpenCollectionByUUIDBeforeCommit) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createXIndexTs = Timestamp(20, 20); @@ -3711,10 +2837,46 @@ TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateIndexAndOpenCollectionByU 1); } -TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateIndexAndOpenCollectionByUUIDAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); +TEST_F(CollectionCatalogTimestampTest, + ConcurrentCreateIndexAndOpenCollectionByUUIDBeforeCommitWithUnrelatedMultikey) { + const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + const Timestamp createCollectionTs = Timestamp(10, 10); + const Timestamp createXIndexTs = Timestamp(20, 20); + const Timestamp createYIndexTs = Timestamp(30, 30); + + createCollection(opCtx.get(), nss, createCollectionTs); + createIndex(opCtx.get(), + nss, + BSON("v" << 2 << "name" + << "x_1" + << "key" << BSON("x" << 1)), + createXIndexTs); + + auto makeIndexMultikey = [nss](OperationContext* opCtx) { + auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss); + coll->setIndexIsMultikey(opCtx, "x_1", {{0U}}); + }; + + UUID uuid = + CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespace(opCtx.get(), nss)->uuid(); + NamespaceStringOrUUID uuidWithDbName(nss.dbName(), uuid); + + // When the snapshot is opened right before the second index create is committed to the durable + // catalog, the collection instance should not have the second index. + concurrentCreateIndexAndEstablishConsistentCollection(opCtx.get(), + nss, + uuidWithDbName, + BSON("v" << 2 << "name" + << "y_1" + << "key" << BSON("y" << 1)), + createYIndexTs, + true, + true, + 1, + makeIndexMultikey); +} +TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateIndexAndOpenCollectionByUUIDAfterCommit) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createXIndexTs = Timestamp(20, 20); @@ -3745,10 +2907,46 @@ TEST_F(CollectionCatalogTimestampTest, ConcurrentCreateIndexAndOpenCollectionByU 2); } -TEST_F(CollectionCatalogTimestampTest, ConcurrentDropIndexAndOpenCollectionBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); +TEST_F(CollectionCatalogTimestampTest, + ConcurrentCreateIndexAndOpenCollectionByUUIDAfterCommitWithUnrelatedMultikey) { + const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + const Timestamp createCollectionTs = Timestamp(10, 10); + const Timestamp createXIndexTs = Timestamp(20, 20); + const Timestamp createYIndexTs = Timestamp(30, 30); + + createCollection(opCtx.get(), nss, createCollectionTs); + createIndex(opCtx.get(), + nss, + BSON("v" << 2 << "name" + << "x_1" + << "key" << BSON("x" << 1)), + createXIndexTs); + auto makeIndexMultikey = [nss](OperationContext* opCtx) { + auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss); + coll->setIndexIsMultikey(opCtx, "x_1", {{0U}}); + }; + + UUID uuid = + CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespace(opCtx.get(), nss)->uuid(); + NamespaceStringOrUUID uuidWithDbName(nss.dbName(), uuid); + + // When the snapshot is opened right after the second index create is committed to the durable + // catalog, the collection instance should have both indexes. + concurrentCreateIndexAndEstablishConsistentCollection(opCtx.get(), + nss, + uuidWithDbName, + BSON("v" << 2 << "name" + << "y_1" + << "key" << BSON("y" << 1)), + createYIndexTs, + false, + true, + 2, + makeIndexMultikey); +} + +TEST_F(CollectionCatalogTimestampTest, ConcurrentDropIndexAndOpenCollectionBeforeCommit) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createIndexTs = Timestamp(20, 20); @@ -3774,10 +2972,39 @@ TEST_F(CollectionCatalogTimestampTest, ConcurrentDropIndexAndOpenCollectionBefor opCtx.get(), nss, nss, "y_1", dropIndexTs, true, true, 2); } -TEST_F(CollectionCatalogTimestampTest, ConcurrentDropIndexAndOpenCollectionAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); +TEST_F(CollectionCatalogTimestampTest, + ConcurrentDropIndexAndOpenCollectionBeforeCommitWithUnrelatedMultikey) { + const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + const Timestamp createCollectionTs = Timestamp(10, 10); + const Timestamp createIndexTs = Timestamp(20, 20); + const Timestamp dropIndexTs = Timestamp(30, 30); + + createCollection(opCtx.get(), nss, createCollectionTs); + createIndex(opCtx.get(), + nss, + BSON("v" << 2 << "name" + << "x_1" + << "key" << BSON("x" << 1)), + createIndexTs); + createIndex(opCtx.get(), + nss, + BSON("v" << 2 << "name" + << "y_1" + << "key" << BSON("y" << 1)), + createIndexTs); + auto makeIndexMultikey = [nss](OperationContext* opCtx) { + auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss); + coll->setIndexIsMultikey(opCtx, "x_1", {{0U}}); + }; + + // When the snapshot is opened right before the index drop is committed to the durable + // catalog, the collection instance should not have the second index. + concurrentDropIndexAndEstablishConsistentCollection( + opCtx.get(), nss, nss, "y_1", dropIndexTs, true, true, 2, makeIndexMultikey); +} + +TEST_F(CollectionCatalogTimestampTest, ConcurrentDropIndexAndOpenCollectionAfterCommit) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createIndexTs = Timestamp(20, 20); @@ -3803,10 +3030,39 @@ TEST_F(CollectionCatalogTimestampTest, ConcurrentDropIndexAndOpenCollectionAfter opCtx.get(), nss, nss, "y_1", dropIndexTs, false, true, 1); } -TEST_F(CollectionCatalogTimestampTest, ConcurrentDropIndexAndOpenCollectionByUUIDBeforeCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); +TEST_F(CollectionCatalogTimestampTest, + ConcurrentDropIndexAndOpenCollectionAfterCommitWithUnrelatedMultikey) { + const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + const Timestamp createCollectionTs = Timestamp(10, 10); + const Timestamp createIndexTs = Timestamp(20, 20); + const Timestamp dropIndexTs = Timestamp(30, 30); + createCollection(opCtx.get(), nss, createCollectionTs); + createIndex(opCtx.get(), + nss, + BSON("v" << 2 << "name" + << "x_1" + << "key" << BSON("x" << 1)), + createIndexTs); + createIndex(opCtx.get(), + nss, + BSON("v" << 2 << "name" + << "y_1" + << "key" << BSON("y" << 1)), + createIndexTs); + + auto makeIndexMultikey = [nss](OperationContext* opCtx) { + auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss); + coll->setIndexIsMultikey(opCtx, "x_1", {{0U}}); + }; + + // When the snapshot is opened right after the index drop is committed to the durable + // catalog, the collection instance should not have the second index. + concurrentDropIndexAndEstablishConsistentCollection( + opCtx.get(), nss, nss, "y_1", dropIndexTs, false, true, 1, makeIndexMultikey); +} + +TEST_F(CollectionCatalogTimestampTest, ConcurrentDropIndexAndOpenCollectionByUUIDBeforeCommit) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createIndexTs = Timestamp(20, 20); @@ -3835,10 +3091,43 @@ TEST_F(CollectionCatalogTimestampTest, ConcurrentDropIndexAndOpenCollectionByUUI opCtx.get(), nss, uuidWithDbName, "y_1", dropIndexTs, true, true, 2); } -TEST_F(CollectionCatalogTimestampTest, ConcurrentDropIndexAndOpenCollectionByUUIDAfterCommit) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); +TEST_F(CollectionCatalogTimestampTest, + ConcurrentDropIndexAndOpenCollectionByUUIDBeforeCommitWithUnrelatedMultikey) { + const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + const Timestamp createCollectionTs = Timestamp(10, 10); + const Timestamp createIndexTs = Timestamp(20, 20); + const Timestamp dropIndexTs = Timestamp(30, 30); + createCollection(opCtx.get(), nss, createCollectionTs); + createIndex(opCtx.get(), + nss, + BSON("v" << 2 << "name" + << "x_1" + << "key" << BSON("x" << 1)), + createIndexTs); + createIndex(opCtx.get(), + nss, + BSON("v" << 2 << "name" + << "y_1" + << "key" << BSON("y" << 1)), + createIndexTs); + + auto makeIndexMultikey = [nss](OperationContext* opCtx) { + auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss); + coll->setIndexIsMultikey(opCtx, "x_1", {{0U}}); + }; + + UUID uuid = + CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespace(opCtx.get(), nss)->uuid(); + NamespaceStringOrUUID uuidWithDbName(nss.dbName(), uuid); + + // When the snapshot is opened right before the index drop is committed to the durable + // catalog, the collection instance should not have the second index. + concurrentDropIndexAndEstablishConsistentCollection( + opCtx.get(), nss, uuidWithDbName, "y_1", dropIndexTs, true, true, 2, makeIndexMultikey); +} + +TEST_F(CollectionCatalogTimestampTest, ConcurrentDropIndexAndOpenCollectionByUUIDAfterCommit) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createIndexTs = Timestamp(20, 20); @@ -3867,10 +3156,43 @@ TEST_F(CollectionCatalogTimestampTest, ConcurrentDropIndexAndOpenCollectionByUUI opCtx.get(), nss, uuidWithDbName, "y_1", dropIndexTs, false, true, 1); } -TEST_F(CollectionCatalogTimestampTest, OpenCollectionBetweenIndexBuildInProgressAndReady) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); +TEST_F(CollectionCatalogTimestampTest, + ConcurrentDropIndexAndOpenCollectionByUUIDAfterCommitWithUnrelatedMultikey) { + const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + const Timestamp createCollectionTs = Timestamp(10, 10); + const Timestamp createIndexTs = Timestamp(20, 20); + const Timestamp dropIndexTs = Timestamp(30, 30); + createCollection(opCtx.get(), nss, createCollectionTs); + createIndex(opCtx.get(), + nss, + BSON("v" << 2 << "name" + << "x_1" + << "key" << BSON("x" << 1)), + createIndexTs); + createIndex(opCtx.get(), + nss, + BSON("v" << 2 << "name" + << "y_1" + << "key" << BSON("y" << 1)), + createIndexTs); + + auto makeIndexMultikey = [nss](OperationContext* opCtx) { + auto coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss); + coll->setIndexIsMultikey(opCtx, "x_1", {{0U}}); + }; + + UUID uuid = + CollectionCatalog::get(opCtx.get())->lookupCollectionByNamespace(opCtx.get(), nss)->uuid(); + NamespaceStringOrUUID uuidWithDbName(nss.dbName(), uuid); + + // When the snapshot is opened right after the index drop is committed to the durable + // catalog, the collection instance should not have the second index. + concurrentDropIndexAndEstablishConsistentCollection( + opCtx.get(), nss, uuidWithDbName, "y_1", dropIndexTs, false, true, 1, makeIndexMultikey); +} + +TEST_F(CollectionCatalogTimestampTest, OpenCollectionBetweenIndexBuildInProgressAndReady) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const Timestamp createIndexTs = Timestamp(20, 20); @@ -3928,13 +3250,10 @@ TEST_F(CollectionCatalogTimestampTest, OpenCollectionBetweenIndexBuildInProgress } TEST_F(CollectionCatalogTimestampTest, ResolveNamespaceStringOrUUIDAtLatest) { - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - const NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); const Timestamp createCollectionTs = Timestamp(10, 10); const UUID uuid = createCollection(opCtx.get(), nss, createCollectionTs); - const NamespaceStringOrUUID nssOrUUID = NamespaceStringOrUUID(nss.db(), uuid); + const NamespaceStringOrUUID nssOrUUID = NamespaceStringOrUUID(nss.dbName(), uuid); NamespaceString resolvedNss = CollectionCatalog::get(opCtx.get())->resolveNamespaceStringOrUUID(opCtx.get(), nssOrUUID); @@ -3964,39 +3283,57 @@ TEST_F(CollectionCatalogTimestampTest, ResolveNamespaceStringOrUUIDAtLatest) { } } -TEST_F(CollectionCatalogTimestampTest, MixedModeWrites) { - // This test simulates the creation and dropping of system.profile collections. This collection - // is created untimestamped, but dropped with a timestamp. - // TODO SERVER-75740: Remove this test. - RAIIServerParameterControllerForTest featureFlagController( - "featureFlagPointInTimeCatalogLookups", true); - - NamespaceString nss = NamespaceString::createNamespaceString_forTest("system.profile"); - - // Initialize the oldest timestamp. - CollectionCatalog::write(opCtx.get(), [](CollectionCatalog& catalog) { - catalog.cleanupForOldestTimestampAdvanced(Timestamp(1, 1)); - }); - - // Create and drop the collection. We have a time window where the namespace exists. +TEST_F(CollectionCatalogTimestampTest, IndexCatalogEntryCopying) { + const NamespaceString nss("test.abc"); createCollection(opCtx.get(), nss, Timestamp::min()); - dropCollection(opCtx.get(), nss, Timestamp(10, 10)); - // Before performing cleanup, re-create the collection. - createCollection(opCtx.get(), nss, Timestamp::min()); + { + // Start but do not finish an index build. + IndexSpec spec; + spec.version(1).name("x_1").addKeys(BSON("x" << 1)); + auto desc = IndexDescriptor(IndexNames::BTREE, spec.toBSON()); + AutoGetCollection autoColl(opCtx.get(), nss, MODE_X); + WriteUnitOfWork wuow(opCtx.get()); + auto collWriter = autoColl.getWritableCollection(opCtx.get()); + ASSERT_OK(collWriter->prepareForIndexBuild(opCtx.get(), &desc, boost::none, false)); + collWriter->getIndexCatalog()->createIndexEntry( + opCtx.get(), collWriter, std::move(desc), CreateIndexEntryFlags::kNone); + wuow.commit(); + } + + // In a different client, open the latest collection instance and verify the index is not ready. + auto newClient = opCtx->getServiceContext()->makeClient("alternativeClient"); + auto newOpCtx = newClient->makeOperationContext(); + auto latestCatalog = CollectionCatalog::latest(newOpCtx.get()); + auto latestColl = + latestCatalog->establishConsistentCollection(newOpCtx.get(), nss, boost::none); - // Perform collection catalog cleanup. - CollectionCatalog::write(opCtx.get(), [](CollectionCatalog& catalog) { - catalog.cleanupForOldestTimestampAdvanced(Timestamp(20, 20)); - }); + ASSERT_EQ(1, latestColl->getIndexCatalog()->numIndexesTotal()); + ASSERT_EQ(0, latestColl->getIndexCatalog()->numIndexesReady()); + ASSERT_EQ(1, latestColl->getIndexCatalog()->numIndexesInProgress()); + const IndexDescriptor* desc = latestColl->getIndexCatalog()->findIndexByName( + newOpCtx.get(), "x_1", IndexCatalog::InclusionPolicy::kUnfinished); + const IndexCatalogEntry* entry = latestColl->getIndexCatalog()->getEntry(desc); + ASSERT(!entry->isReady()); - // Drop the re-created collection. - dropCollection(opCtx.get(), nss, Timestamp(30, 30)); + { + // Now finish the index build on the original client. + AutoGetCollection autoColl(opCtx.get(), nss, MODE_X); + WriteUnitOfWork wuow(opCtx.get()); + auto collWriter = autoColl.getWritableCollection(opCtx.get()); + auto writableEntry = collWriter->getIndexCatalog()->getWritableEntryByName( + opCtx.get(), "x_1", IndexCatalog::InclusionPolicy::kUnfinished); + ASSERT_NOT_EQUALS(desc, writableEntry->descriptor()); + collWriter->getIndexCatalog()->indexBuildSuccess(opCtx.get(), collWriter, writableEntry); + ASSERT(writableEntry->isReady()); + wuow.commit(); + } - // Cleanup again. - CollectionCatalog::write(opCtx.get(), [](CollectionCatalog& catalog) { - catalog.cleanupForOldestTimestampAdvanced(Timestamp(25, 25)); - }); + // The index entry in the different client remains untouched. + ASSERT_EQ(1, latestColl->getIndexCatalog()->numIndexesTotal()); + ASSERT_EQ(0, latestColl->getIndexCatalog()->numIndexesReady()); + ASSERT_EQ(1, latestColl->getIndexCatalog()->numIndexesInProgress()); + ASSERT(!entry->isReady()); } } // namespace } // namespace mongo diff --git a/src/mongo/db/catalog/collection_compact.cpp b/src/mongo/db/catalog/collection_compact.cpp index 1186fb92b7aeb..316c4dbe312e8 100644 --- a/src/mongo/db/catalog/collection_compact.cpp +++ b/src/mongo/db/catalog/collection_compact.cpp @@ -30,24 +30,43 @@ #include "mongo/db/catalog/collection_compact.h" +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/document_validation.h" -#include "mongo/db/catalog/index_key_validate.h" -#include "mongo/db/catalog/multi_index_block.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/db_raii.h" -#include "mongo/db/index/index_access_method.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/index_builds_coordinator.h" #include "mongo/db/operation_context.h" #include "mongo/db/timeseries/catalog_helper.h" +#include "mongo/db/views/view.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage - namespace mongo { +MONGO_FAIL_POINT_DEFINE(pauseCompactCommandBeforeWTCompact); + using logv2::LogComponent; namespace { @@ -118,6 +137,8 @@ StatusWith compactCollection(OperationContext* opCtx, auto bytesBefore = recordStore->storageSize(opCtx) + collection->getIndexSize(opCtx); auto indexCatalog = collection->getIndexCatalog(); + pauseCompactCommandBeforeWTCompact.pauseWhileSet(); + Status status = recordStore->compact(opCtx); if (!status.isOK()) return status; diff --git a/src/mongo/db/catalog/collection_compact.h b/src/mongo/db/catalog/collection_compact.h index ab22cf0715516..7609747c75104 100644 --- a/src/mongo/db/catalog/collection_compact.h +++ b/src/mongo/db/catalog/collection_compact.h @@ -29,7 +29,11 @@ #pragma once +#include + #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/storage/record_store.h" namespace mongo { diff --git a/src/mongo/db/catalog/collection_impl.cpp b/src/mongo/db/catalog/collection_impl.cpp index e04547b392a7a..5459b6e1e9620 100644 --- a/src/mongo/db/catalog/collection_impl.cpp +++ b/src/mongo/db/catalog/collection_impl.cpp @@ -29,44 +29,85 @@ #include "mongo/db/catalog/collection_impl.h" -#include "mongo/bson/ordering.h" -#include "mongo/bson/simple_bsonelement_comparator.h" -#include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/crypto/fle_crypto.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/catalog/catalog_stats.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/index_catalog_impl.h" #include "mongo/db/catalog/index_key_validate.h" -#include "mongo/db/catalog/local_oplog_info.h" #include "mongo/db/catalog/uncommitted_multikey.h" -#include "mongo/db/catalog_shard_feature_flag_gen.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/curop.h" +#include "mongo/db/catalog_shard_feature_flag_gen.h" // IWYU pragma: keep +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/keypattern.h" +#include "mongo/db/index_names.h" #include "mongo/db/matcher/doc_validation_error.h" #include "mongo/db/matcher/doc_validation_util.h" -#include "mongo/db/matcher/expression_always_boolean.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/matcher/implicit_validator.h" -#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/collation/collation_spec.h" #include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/collation/collator_interface.h" -#include "mongo/db/query/collection_query_info.h" +#include "mongo/db/query/util/make_data_structure.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/capped_snapshots.h" #include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/db/timeseries/timeseries_extended_range.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/ttl_collection_cache.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -97,16 +138,16 @@ Status checkValidatorCanBeUsedOnNs(const BSONObj& validator, if (nss.isSystem() && !nss.isDropPendingNamespace()) { return {ErrorCodes::InvalidOptions, - str::stream() << "Document validators not allowed on system collection " << nss - << " with UUID " << uuid}; + str::stream() << "Document validators not allowed on system collection " + << nss.toStringForErrorMsg() << " with UUID " << uuid}; } // Allow schema on config.settings. This is created internally, and user changes to this // validator are disallowed in the createCollection and collMod commands. if (nss.isOnInternalDb() && nss != NamespaceString::kConfigSettingsNamespace) { return {ErrorCodes::InvalidOptions, - str::stream() << "Document validators are not allowed on collection " << nss.ns() - << " with UUID " << uuid << " in the " + str::stream() << "Document validators are not allowed on collection " + << nss.toStringForErrorMsg() << " with UUID " << uuid << " in the " << nss.dbName().toStringForErrorMsg() << " internal database"}; } return Status::OK(); @@ -145,7 +186,7 @@ Status validateIsNotInDbs(const NamespaceString& ns, // Validates that the option is not used on admin, local or config db as well as not being used on // config servers. Status validateChangeStreamPreAndPostImagesOptionIsPermitted(const NamespaceString& ns) { - const auto validationStatus = + auto validationStatus = validateIsNotInDbs(ns, {DatabaseName::kAdmin, DatabaseName::kLocal, DatabaseName::kConfig}, "changeStreamPreAndPostImages"); @@ -153,13 +194,6 @@ Status validateChangeStreamPreAndPostImagesOptionIsPermitted(const NamespaceStri return validationStatus; } - if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && - !gFeatureFlagCatalogShard.isEnabled(serverGlobalParams.featureCompatibility)) { - return { - ErrorCodes::InvalidOptions, - "changeStreamPreAndPostImages collection option is not supported on config servers"}; - } - return Status::OK(); } @@ -304,23 +338,15 @@ CollectionImpl::SharedState::~SharedState() { CollectionImpl::CollectionImpl(OperationContext* opCtx, const NamespaceString& nss, RecordId catalogId, - const CollectionOptions& options, + std::shared_ptr metadata, std::unique_ptr recordStore) : _ns(nss), _catalogId(std::move(catalogId)), - _uuid(options.uuid.value()), - _shared(std::make_shared(this, std::move(recordStore), options)), + _uuid(metadata->options.uuid.value()), + _shared(std::make_shared(this, std::move(recordStore), metadata->options)), + _metadata(std::move(metadata)), _indexCatalog(std::make_unique()) {} -CollectionImpl::CollectionImpl(OperationContext* opCtx, - const NamespaceString& nss, - RecordId catalogId, - std::shared_ptr metadata, - std::unique_ptr recordStore) - : CollectionImpl(opCtx, nss, std::move(catalogId), metadata->options, std::move(recordStore)) { - _metadata = std::move(metadata); -} - CollectionImpl::~CollectionImpl() = default; void CollectionImpl::onDeregisterFromCatalog(OperationContext* opCtx) { @@ -329,16 +355,6 @@ void CollectionImpl::onDeregisterFromCatalog(OperationContext* opCtx) { } } -std::shared_ptr CollectionImpl::FactoryImpl::make( - OperationContext* opCtx, - const NamespaceString& nss, - RecordId catalogId, - const CollectionOptions& options, - std::unique_ptr rs) const { - return std::make_shared( - opCtx, nss, std::move(catalogId), options, std::move(rs)); -} - std::shared_ptr CollectionImpl::FactoryImpl::make( OperationContext* opCtx, const NamespaceString& nss, @@ -350,10 +366,7 @@ std::shared_ptr CollectionImpl::FactoryImpl::make( } std::shared_ptr CollectionImpl::clone() const { - auto cloned = std::make_shared(*this); - // We are per definition committed if we get cloned - cloned->_cachedCommitted = true; - return cloned; + return std::make_shared(*this); } SharedCollectionDecorations* CollectionImpl::getSharedDecorations() const { @@ -361,7 +374,6 @@ SharedCollectionDecorations* CollectionImpl::getSharedDecorations() const { } void CollectionImpl::init(OperationContext* opCtx) { - _metadata = DurableCatalog::get(opCtx)->getMetaData(opCtx, getCatalogId()); const auto& collectionOptions = _metadata->options; _initShared(opCtx, collectionOptions); @@ -394,9 +406,6 @@ Status CollectionImpl::initFromExisting(OperationContext* opCtx, const std::shared_ptr& collection, const DurableCatalogEntry& catalogEntry, boost::optional readTimestamp) { - // We are per definition committed if we initialize from an existing collection. - _cachedCommitted = true; - if (collection) { // Use the shared state from the existing collection. LOGV2_DEBUG( @@ -409,7 +418,6 @@ Status CollectionImpl::initFromExisting(OperationContext* opCtx, // When initializing a collection from an earlier point-in-time, we don't know when the last DDL // operation took place at that point-in-time. We conservatively set the minimum valid snapshot // to the read point-in-time. - _minVisibleSnapshot = readTimestamp; _minValidSnapshot = readTimestamp; _initCommon(opCtx); @@ -450,10 +458,9 @@ Status CollectionImpl::initFromExisting(OperationContext* opCtx, // objects from existing indexes to prevent the index idents from being dropped by the drop // pending ident reaper while this collection is still using them. for (const auto& sharedIdent : sharedIdents) { - auto desc = getIndexCatalog()->findIndexByName(opCtx, sharedIdent.first); - invariant(desc); - auto entry = getIndexCatalog()->getEntryShared(desc); - entry->setIdent(sharedIdent.second); + auto writableEntry = getIndexCatalog()->getWritableEntryByName(opCtx, sharedIdent.first); + invariant(writableEntry); + writableEntry->setIdent(sharedIdent.second); } _initialized = true; @@ -503,22 +510,6 @@ bool CollectionImpl::isInitialized() const { return _initialized; } -bool CollectionImpl::isCommitted() const { - return _cachedCommitted || _shared->_committed.load(); -} - -void CollectionImpl::setCommitted(bool val) { - bool previous = isCommitted(); - invariant((!previous && val) || (previous && !val)); - _shared->_committed.store(val); - - // Going from false->true need to be synchronized by an atomic. Leave this as false and read - // from the atomic in the shared state that will be flipped to true at first clone. - if (!val) { - _cachedCommitted = val; - } -} - bool CollectionImpl::requiresIdIndex() const { if (_ns.isOplog()) { // No indexes on the oplog. @@ -550,7 +541,7 @@ std::unique_ptr CollectionImpl::getCursor(OperationContext CollectionCatalog::hasExclusiveAccessToCollection(opCtx, ns()) || snapshot, fmt::format("Capped visibility snapshot was not initialized before reading from " "collection non-exclusively: {}", - _ns.ns())); + _ns.toStringForErrorMsg())); } else { // We can lazily initialize the capped snapshot because no storage snapshot has been // opened yet. @@ -793,12 +784,6 @@ bool CollectionImpl::isCappedAndNeedsDelete(OperationContext* opCtx) const { return false; } -void CollectionImpl::setMinimumVisibleSnapshot(Timestamp newMinimumVisibleSnapshot) { - if (!_minVisibleSnapshot || (newMinimumVisibleSnapshot > _minVisibleSnapshot.value())) { - _minVisibleSnapshot = newMinimumVisibleSnapshot; - } -} - void CollectionImpl::setMinimumValidSnapshot(Timestamp newMinimumValidSnapshot) { if (!_minValidSnapshot || (newMinimumValidSnapshot > _minValidSnapshot.value())) { _minValidSnapshot = newMinimumValidSnapshot; @@ -820,6 +805,21 @@ boost::optional CollectionImpl::getTimeseriesBucketsMayHaveMixedSchemaData return _metadata->timeseriesBucketsMayHaveMixedSchemaData; } +bool CollectionImpl::timeseriesBucketingParametersMayHaveChanged() const { + return _metadata->timeseriesBucketingParametersHaveChanged + ? *_metadata->timeseriesBucketingParametersHaveChanged + : true; +} + +void CollectionImpl::setTimeseriesBucketingParametersChanged(OperationContext* opCtx, + boost::optional value) { + tassert(7625800, "This is not a time-series collection", _metadata->options.timeseries); + + _writeMetadata(opCtx, [&](BSONCollectionCatalogEntry::MetaData& md) { + md.timeseriesBucketingParametersHaveChanged = value; + }); +} + void CollectionImpl::setTimeseriesBucketsMayHaveMixedSchemaData(OperationContext* opCtx, boost::optional setting) { uassert(6057500, "This is not a time-series collection", _metadata->options.timeseries); @@ -898,7 +898,8 @@ Status CollectionImpl::updateCappedSize(OperationContext* opCtx, if (!_shared->_isCapped) { return Status(ErrorCodes::InvalidNamespace, - str::stream() << "Cannot update size on a non-capped collection " << ns()); + str::stream() << "Cannot update size on a non-capped collection " + << ns().toStringForErrorMsg()); } if (ns().isOplog() && newCappedSize) { @@ -993,7 +994,7 @@ void CollectionImpl::registerCappedInserts(OperationContext* opCtx, // we never get here while holding an uninterruptible, read-ticketed lock. That would indicate // that we are operating with the wrong global lock semantics, and either hold too weak a lock // (e.g. IS) or that we upgraded in a way we shouldn't (e.g. IS -> IX). - invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->hasReadTicket() || + invariant(!opCtx->lockState()->hasReadTicket() || !opCtx->lockState()->uninterruptibleLocksRequested()); auto* uncommitted = @@ -1067,9 +1068,18 @@ uint64_t CollectionImpl::getIndexSize(OperationContext* opCtx, } uint64_t CollectionImpl::getIndexFreeStorageBytes(OperationContext* const opCtx) const { + // Unfinished index builds are excluded to avoid a potential deadlock when trying to collect + // statistics from the index table while the index build is in the bulk load phase. See + // SERVER-77018. This should not be too impactful as: + // - During the collection scan phase, the index table is unused. + // - During the bulk load phase, getFreeStorageBytes will probably return EBUSY, as the ident is + // in use by the index builder. (And worst case results in the deadlock). + // - It might be possible to return meaningful data post bulk-load, but reusable bytes should be + // low anyways as the collection has been bulk loaded. Additionally, this would be a inaccurate + // anyways as the build is in progress. + // - Once the index build is finished, this will be eventually accounted for. const auto idxCatalog = getIndexCatalog(); - auto indexIt = idxCatalog->getIndexIterator( - opCtx, IndexCatalog::InclusionPolicy::kReady | IndexCatalog::InclusionPolicy::kUnfinished); + auto indexIt = idxCatalog->getIndexIterator(opCtx, IndexCatalog::InclusionPolicy::kReady); uint64_t totalSize = 0; while (indexIt->more()) { @@ -1543,7 +1553,9 @@ bool CollectionImpl::isIndexMultikey(OperationContext* opCtx, // We need to read from the durable catalog if there are concurrent multikey writers to avoid // reading between the multikey write committing in the storage engine but before its onCommit // handler made the write visible for readers. - auto snapshotMetadata = DurableCatalog::get(opCtx)->getMetaData(opCtx, getCatalogId()); + const auto catalogEntry = + DurableCatalog::get(opCtx)->getParsedCatalogEntry(opCtx, getCatalogId()); + const auto snapshotMetadata = catalogEntry->metadata; int snapshotOffset = snapshotMetadata->findIndexOffset(indexName); invariant(snapshotOffset >= 0, str::stream() << "cannot get multikey for index " << indexName << " @ " @@ -1648,7 +1660,9 @@ bool CollectionImpl::setIndexIsMultikey(OperationContext* opCtx, // collection. We cannot use the cached metadata in this collection as we may have just // committed a multikey change concurrently to the storage engine without being able to // observe it if its onCommit handlers haven't run yet. - auto metadataLocal = *DurableCatalog::get(opCtx)->getMetaData(opCtx, getCatalogId()); + const auto catalogEntry = + DurableCatalog::get(opCtx)->getParsedCatalogEntry(opCtx, getCatalogId()); + auto metadataLocal = *catalogEntry->metadata; // When reading from the durable catalog the index offsets are different because when // removing indexes in-memory just zeros out the slot instead of actually removing it. We // must adjust the entries so they match how they are stored in _metadata so we can rely on diff --git a/src/mongo/db/catalog/collection_impl.h b/src/mongo/db/catalog/collection_impl.h index 01a92cd511311..1140a387669c5 100644 --- a/src/mongo/db/catalog/collection_impl.h +++ b/src/mongo/db/catalog/collection_impl.h @@ -29,8 +29,51 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/capped_visibility.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/collection_options_gen.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/change_stream_pre_and_post_images_options_gen.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/bson_collection_catalog_entry.h" +#include "mongo/db/storage/durable_catalog_entry.h" +#include "mongo/db/storage/ident.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/uuid.h" +#include "mongo/util/version/releases.h" namespace mongo { @@ -43,12 +86,6 @@ class CollectionImpl final : public Collection { const NamespaceString& nss, BSONObj collationSpec); - // TODO SERVER-56999: We should just need one API to create Collections - explicit CollectionImpl(OperationContext* opCtx, - const NamespaceString& nss, - RecordId catalogId, - const CollectionOptions& options, - std::unique_ptr recordStore); explicit CollectionImpl(OperationContext* opCtx, const NamespaceString& nss, @@ -62,13 +99,6 @@ class CollectionImpl final : public Collection { class FactoryImpl : public Factory { public: - // TODO SERVER-56999: We should just need one API to create Collections - std::shared_ptr make(OperationContext* opCtx, - const NamespaceString& nss, - RecordId catalogId, - const CollectionOptions& options, - std::unique_ptr rs) const final; - std::shared_ptr make( OperationContext* opCtx, const NamespaceString& nss, @@ -85,8 +115,6 @@ class CollectionImpl final : public Collection { const DurableCatalogEntry& catalogEntry, boost::optional readTimestamp) final; bool isInitialized() const final; - bool isCommitted() const final; - void setCommitted(bool val) final; const NamespaceString& ns() const final { return _ns; @@ -209,9 +237,15 @@ class CollectionImpl final : public Collection { bool isTemporary() const final; boost::optional getTimeseriesBucketsMayHaveMixedSchemaData() const final; + void setTimeseriesBucketsMayHaveMixedSchemaData(OperationContext* opCtx, boost::optional setting) final; + bool timeseriesBucketingParametersMayHaveChanged() const final; + + void setTimeseriesBucketingParametersChanged(OperationContext* opCtx, + boost::optional value) final; + bool doesTimeseriesBucketsDocContainMixedSchemaData(const BSONObj& bucketsDoc) const final; bool getRequiresTimeseriesExtendedRangeSupport() const final; @@ -274,22 +308,14 @@ class CollectionImpl final : public Collection { uint64_t getIndexFreeStorageBytes(OperationContext* opCtx) const final; - /** - * If return value is not boost::none, reads with majority read concern using an older snapshot - * must error. - */ - boost::optional getMinimumVisibleSnapshot() const final { - return _minVisibleSnapshot; - } boost::optional getMinimumValidSnapshot() const final { return _minValidSnapshot; } /** - * Updates the minimum visible snapshot. The 'newMinimumVisibleSnapshot' is ignored if it would - * set the minimum visible snapshot backwards in time. + * Updates the minimum valid snapshot. The 'newMinimumValidSnapshot' is ignored if it would + * set the minimum valid snapshot backwards in time. */ - void setMinimumVisibleSnapshot(Timestamp newMinimumVisibleSnapshot) final; void setMinimumValidSnapshot(Timestamp newMinimumValidSnapshot) final; boost::optional getTimeseriesOptions() const final; @@ -421,8 +447,6 @@ class CollectionImpl final : public Collection { const bool _isCapped; const bool _needCappedLock; - AtomicWord _committed{true}; - // Tracks in-progress capped inserts to inform visibility for forward scans so that no // uncommitted records are skipped. CappedVisibilityObserver _cappedObserver; @@ -448,7 +472,6 @@ class CollectionImpl final : public Collection { NamespaceString _ns; RecordId _catalogId; UUID _uuid; - bool _cachedCommitted = true; std::shared_ptr _shared; // Collection metadata cached from the DurableCatalog. Is kept separate from the SharedState @@ -462,7 +485,6 @@ class CollectionImpl final : public Collection { Validator _validator; // The earliest snapshot that is allowed to use this collection. - boost::optional _minVisibleSnapshot; boost::optional _minValidSnapshot; bool _initialized = false; diff --git a/src/mongo/db/catalog/collection_mock.h b/src/mongo/db/catalog/collection_mock.h index f6587a9c69d92..806e46312cc18 100644 --- a/src/mongo/db/catalog/collection_mock.h +++ b/src/mongo/db/catalog/collection_mock.h @@ -31,6 +31,8 @@ #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" @@ -101,7 +103,7 @@ class CollectionMock : public Collection { MONGO_UNREACHABLE; } std::shared_ptr getSharedIdent() const { - return std::make_shared(_nss.toString()); + return std::make_shared(_nss.toString_forTest()); } void setIdent(std::shared_ptr newIdent) { MONGO_UNREACHABLE; @@ -199,6 +201,15 @@ class CollectionMock : public Collection { MONGO_UNREACHABLE; } + bool timeseriesBucketingParametersMayHaveChanged() const { + MONGO_UNREACHABLE; + } + + void setTimeseriesBucketingParametersChanged(OperationContext* opCtx, + boost::optional value) { + MONGO_UNREACHABLE; + } + bool doesTimeseriesBucketsDocContainMixedSchemaData(const BSONObj& bucketsDoc) const { MONGO_UNREACHABLE; } @@ -297,14 +308,6 @@ class CollectionMock : public Collection { MONGO_UNREACHABLE; } - boost::optional getMinimumVisibleSnapshot() const { - MONGO_UNREACHABLE; - } - - void setMinimumVisibleSnapshot(Timestamp name) { - MONGO_UNREACHABLE; - } - boost::optional getMinimumValidSnapshot() const { MONGO_UNREACHABLE; } @@ -349,14 +352,6 @@ class CollectionMock : public Collection { return _uuid; } - bool isCommitted() const final { - return _committed; - } - - void setCommitted(bool val) final { - _committed = val; - } - void indexBuildSuccess(OperationContext* opCtx, IndexCatalogEntry* index) { MONGO_UNREACHABLE; } diff --git a/src/mongo/db/catalog/collection_operation_source.cpp b/src/mongo/db/catalog/collection_operation_source.cpp index 84b291f19ce31..198e929c7f473 100644 --- a/src/mongo/db/catalog/collection_operation_source.cpp +++ b/src/mongo/db/catalog/collection_operation_source.cpp @@ -38,6 +38,8 @@ StringData toString(OperationSource source) { static constexpr StringData kTimeseriesInsertString = "time-series insert"_sd; static constexpr StringData kTimeseriesUpdateString = "time-series update"_sd; static constexpr StringData kTimeseriesDeleteString = "time-series delete"_sd; + static constexpr StringData kTimeseriesBucketCompressionString = + "time-series bucket compression"_sd; switch (source) { case OperationSource::kStandard: @@ -50,6 +52,8 @@ StringData toString(OperationSource source) { return kTimeseriesUpdateString; case OperationSource::kTimeseriesDelete: return kTimeseriesDeleteString; + case OperationSource::kTimeseriesBucketCompression: + return kTimeseriesBucketCompressionString; } MONGO_UNREACHABLE; diff --git a/src/mongo/db/catalog/collection_operation_source.h b/src/mongo/db/catalog/collection_operation_source.h index 6cfff61882fa9..7546d68185002 100644 --- a/src/mongo/db/catalog/collection_operation_source.h +++ b/src/mongo/db/catalog/collection_operation_source.h @@ -43,6 +43,7 @@ enum class OperationSource { kTimeseriesInsert, kTimeseriesUpdate, kTimeseriesDelete, + kTimeseriesBucketCompression }; StringData toString(OperationSource source); diff --git a/src/mongo/db/catalog/collection_options.cpp b/src/mongo/db/catalog/collection_options.cpp index 86ccb5e4a6fa1..3ae3cd3d83d98 100644 --- a/src/mongo/db/catalog/collection_options.cpp +++ b/src/mongo/db/catalog/collection_options.cpp @@ -27,44 +27,44 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog/collection_options.h" - -#include - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_options_validation.h" -#include "mongo/db/commands.h" #include "mongo/db/commands/create_gen.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/collation/collator_interface.h" -#include "mongo/db/query/query_feature_flags_gen.h" #include "mongo/idl/command_generic_argument.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage namespace mongo { namespace { -long long adjustCappedSize(long long cappedSize) { - if (serverGlobalParams.featureCompatibility.isVersionInitialized() && - !feature_flags::gfeatureFlagCappedCollectionsRelaxedSize.isEnabled( - serverGlobalParams.featureCompatibility)) { - auto originalCappedSize = cappedSize; - cappedSize += 0xff; - cappedSize &= 0xffffffffffffff00LL; - LOGV2(7386100, - "Capped collection maxSize being rounded up to nearest 256-byte size.", - "originalSize"_attr = originalCappedSize, - "adjustedSize"_attr = cappedSize); - } - return cappedSize; -} - long long adjustCappedMaxDocs(long long cappedMaxDocs) { if (cappedMaxDocs <= 0 || cappedMaxDocs == std::numeric_limits::max()) { auto originalCappedMaxDocs = cappedMaxDocs; @@ -103,7 +103,7 @@ StatusWith CollectionOptions::checkAndAdjustCappedSize(long long capp return Status(ErrorCodes::BadValue, "size cannot exceed 1 PB"); } - return adjustCappedSize(cappedSize); + return cappedSize; } StatusWith CollectionOptions::checkAndAdjustCappedMaxDocs(long long cappedMaxDocs) { @@ -344,7 +344,7 @@ CollectionOptions CollectionOptions::fromCreateCommand(const CreateCommand& cmd) options.validationAction = cmd.getValidationAction(); options.capped = cmd.getCapped(); if (auto size = cmd.getSize()) { - options.cappedSize = adjustCappedSize(*size); + options.cappedSize = *size; } if (auto max = cmd.getMax()) { options.cappedMaxDocs = adjustCappedMaxDocs(*max); diff --git a/src/mongo/db/catalog/collection_options.h b/src/mongo/db/catalog/collection_options.h index f5dc1dd0c55d0..b6e90f3f52bd4 100644 --- a/src/mongo/db/catalog/collection_options.h +++ b/src/mongo/db/catalog/collection_options.h @@ -29,16 +29,24 @@ #pragma once +#include #include +#include +#include #include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/crypto/encryption_fields_gen.h" #include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/collection_options_gen.h" #include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/change_stream_pre_and_post_images_options_gen.h" #include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/util/string_map.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/catalog/collection_options.idl b/src/mongo/db/catalog/collection_options.idl index d8e5e1b30eb27..762e664f57d83 100644 --- a/src/mongo/db/catalog/collection_options.idl +++ b/src/mongo/db/catalog/collection_options.idl @@ -34,13 +34,6 @@ global: imports: - "mongo/db/basic_types.idl" -feature_flags: - featureFlagCappedCollectionsRelaxedSize: - description: Enables capped collections to have a size non multiple of 256 bytes. - cpp_varname: feature_flags::gfeatureFlagCappedCollectionsRelaxedSize - default: true - version: 6.2 - enums: ValidationLevel: description: "Determines how strictly MongoDB applies the validation rules to existing documents during an update." diff --git a/src/mongo/db/catalog/collection_options_test.cpp b/src/mongo/db/catalog/collection_options_test.cpp index cfe1b20f66ec9..81f99c84bd085 100644 --- a/src/mongo/db/catalog/collection_options_test.cpp +++ b/src/mongo/db/catalog/collection_options_test.cpp @@ -27,16 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog/collection_options.h" - +#include #include +#include +#include -#include "mongo/db/json.h" -#include "mongo/idl/server_parameter_test_util.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/json.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/server_options.h" #include "mongo/platform/decimal128.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/version/releases.h" namespace mongo { using unittest::assertGet; @@ -115,21 +127,6 @@ TEST(CollectionOptions, CappedSizeNotRoundUpForAlignment) { ASSERT_EQUALS(options.cappedMaxDocs, 0); } -TEST(CollectionOptions, CappedSizeRoundsUpForAlignment) { - serverGlobalParams.mutableFeatureCompatibility.setVersion( - multiversion::FeatureCompatibilityVersion::kVersion_6_0); - const long long kUnalignedCappedSize = 1000; - const long long kAlignedCappedSize = 1024; - - // Check size rounds up to multiple of alignment. - auto options = assertGet( - CollectionOptions::parse((BSON("capped" << true << "size" << kUnalignedCappedSize)))); - - ASSERT_EQUALS(options.capped, true); - ASSERT_EQUALS(options.cappedSize, kAlignedCappedSize); - ASSERT_EQUALS(options.cappedMaxDocs, 0); -} - TEST(CollectionOptions, IgnoreSizeWrongType) { auto options = assertGet(CollectionOptions::parse(fromjson("{size: undefined, capped: undefined}"))); @@ -647,9 +644,6 @@ TEST(FLECollectionOptions, Equality_DisAllowedTypes) { TEST(FLECollectionOptions, Range_AllowedTypes) { - // TODO: SERVER-67760 remove feature flag - RAIIServerParameterControllerForTest featureFlagController("featureFlagFLE2Range", true); - ASSERT_OK(CollectionOptions::parse(fromjson(str::stream() << R"({ encryptedFields: { "fields": [ @@ -744,9 +738,6 @@ TEST(FLECollectionOptions, Range_AllowedTypes) { TEST(FLECollectionOptions, Range_DisAllowedTypes) { - // TODO: SERVER-67760 remove feature flag - RAIIServerParameterControllerForTest featureFlagController("featureFlagFLE2Range", true); - std::vector typesDisallowedIndexed({ "array", "binData", @@ -782,9 +773,6 @@ TEST(FLECollectionOptions, Range_DisAllowedTypes) { } TEST(FLECollectionOptions, Range_MissingFields) { - // TODO: SERVER-67760 remove feature flag - RAIIServerParameterControllerForTest featureFlagController("featureFlagFLE2Range", true); - ASSERT_STATUS_CODE(6775202, CollectionOptions::parse(fromjson(R"({ encryptedFields: { "fields": [ @@ -826,9 +814,6 @@ TEST(FLECollectionOptions, Range_MissingFields) { } TEST(FLECollectionOptions, Equality_ExtraFields) { - // TODO: SERVER-67760 remove feature flag - RAIIServerParameterControllerForTest featureFlagController("featureFlagFLE2Range", true); - ASSERT_STATUS_CODE(6775205, CollectionOptions::parse(fromjson(R"({ encryptedFields: { "fields": [ @@ -871,10 +856,6 @@ TEST(FLECollectionOptions, Equality_ExtraFields) { TEST(FLECollectionOptions, Range_MinMax) { - // TODO: SERVER-67760 remove feature flag - RAIIServerParameterControllerForTest featureFlagController("featureFlagFLE2Range", true); - - { auto doc = BSON("encryptedFields" << BSON("fields" << BSON_ARRAY(BSON("path" @@ -1003,7 +984,6 @@ TEST(FLECollectionOptions, Range_MinMax) { } TEST(FLECollectionOptions, Range_BoundTypeMismatch) { - RAIIServerParameterControllerForTest featureFlagController("featureFlagFLE2Range", true); ASSERT_STATUS_CODE(7018200, CollectionOptions::parse(fromjson(str::stream() << R"({ encryptedFields: { "fields": [ diff --git a/src/mongo/db/catalog/collection_options_validation.cpp b/src/mongo/db/catalog/collection_options_validation.cpp index 67b3f7c47f108..6710c9b0d5d6e 100644 --- a/src/mongo/db/catalog/collection_options_validation.cpp +++ b/src/mongo/db/catalog/collection_options_validation.cpp @@ -29,8 +29,10 @@ #include "mongo/db/catalog/collection_options_validation.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsontypes.h" -#include "mongo/crypto/encryption_fields_util.h" +#include "mongo/util/str.h" namespace mongo::collection_options_validation { Status validateStorageEngineOptions(const BSONObj& storageEngine) { diff --git a/src/mongo/db/catalog/collection_test.cpp b/src/mongo/db/catalog/collection_test.cpp index b1e2ea74d1a7e..229c7ad69e4e6 100644 --- a/src/mongo/db/catalog/collection_test.cpp +++ b/src/mongo/db/catalog/collection_test.cpp @@ -27,21 +27,62 @@ * it in the license file. */ -#include "mongo/bson/oid.h" -#include "mongo/db/catalog/capped_utils.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_mock.h" -#include "mongo/db/catalog/collection_validation.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/index_access_method.h" -#include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/update/document_diff_applier.h" #include "mongo/db/update/document_diff_calculator.h" #include "mongo/stdx/thread.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/shared_buffer.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -94,7 +135,7 @@ TEST_F(CollectionTest, CappedNotifierTimeouts) { ASSERT_EQ(notifier->getVersion(), 0u); auto before = Date_t::now(); - notifier->waitUntil(0u, before + Milliseconds(25)); + notifier->waitUntil(operationContext(), 0u, before + Milliseconds(25)); auto after = Date_t::now(); ASSERT_GTE(after - before, Milliseconds(25)); ASSERT_EQ(notifier->getVersion(), 0u); @@ -114,7 +155,7 @@ TEST_F(CollectionTest, CappedNotifierWaitAfterNotifyIsImmediate) { ASSERT_EQ(notifier->getVersion(), thisVersion); auto before = Date_t::now(); - notifier->waitUntil(prevVersion, before + Seconds(25)); + notifier->waitUntil(operationContext(), prevVersion, before + Seconds(25)); auto after = Date_t::now(); ASSERT_LT(after - before, Seconds(25)); } @@ -130,13 +171,15 @@ TEST_F(CollectionTest, CappedNotifierWaitUntilAsynchronousNotifyAll) { auto thisVersion = prevVersion + 1; auto before = Date_t::now(); - stdx::thread thread([before, prevVersion, ¬ifier] { - notifier->waitUntil(prevVersion, before + Milliseconds(25)); + stdx::thread thread([this, before, prevVersion, ¬ifier] { + ThreadClient client(getServiceContext()); + auto opCtx = cc().makeOperationContext(); + notifier->waitUntil(opCtx.get(), prevVersion, before + Milliseconds(25)); auto after = Date_t::now(); ASSERT_GTE(after - before, Milliseconds(25)); notifier->notifyAll(); }); - notifier->waitUntil(prevVersion, before + Seconds(25)); + notifier->waitUntil(operationContext(), prevVersion, before + Seconds(25)); auto after = Date_t::now(); ASSERT_LT(after - before, Seconds(25)); ASSERT_GTE(after - before, Milliseconds(25)); @@ -147,20 +190,54 @@ TEST_F(CollectionTest, CappedNotifierWaitUntilAsynchronousNotifyAll) { TEST_F(CollectionTest, CappedNotifierWaitUntilAsynchronousKill) { NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.t"); makeCapped(nss); - AutoGetCollectionForRead acfr(operationContext(), nss); const CollectionPtr& col = acfr.getCollection(); auto notifier = col->getRecordStore()->getCappedInsertNotifier(); auto prevVersion = notifier->getVersion(); auto before = Date_t::now(); - stdx::thread thread([before, prevVersion, ¬ifier] { - notifier->waitUntil(prevVersion, before + Milliseconds(25)); + stdx::thread thread([this, before, prevVersion, ¬ifier] { + ThreadClient client(getServiceContext()); + auto opCtx = cc().makeOperationContext(); + notifier->waitUntil(opCtx.get(), prevVersion, before + Milliseconds(25)); auto after = Date_t::now(); ASSERT_GTE(after - before, Milliseconds(25)); notifier->kill(); }); - notifier->waitUntil(prevVersion, before + Seconds(25)); + notifier->waitUntil(operationContext(), prevVersion, before + Seconds(25)); + auto after = Date_t::now(); + ASSERT_LT(after - before, Seconds(25)); + ASSERT_GTE(after - before, Milliseconds(25)); + thread.join(); + ASSERT_EQ(notifier->getVersion(), prevVersion); +} + +TEST_F(CollectionTest, CappedNotifierWaitUntilInterrupt) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.t"); + makeCapped(nss); + + AutoGetCollectionForRead acfr(operationContext(), nss); + const CollectionPtr& col = acfr.getCollection(); + auto notifier = col->getRecordStore()->getCappedInsertNotifier(); + auto prevVersion = notifier->getVersion(); + + auto& clientToInterrupt = cc(); + auto before = Date_t::now(); + stdx::thread thread([this, before, prevVersion, ¬ifier, &clientToInterrupt] { + ThreadClient client(getServiceContext()); + auto opCtx = cc().makeOperationContext(); + notifier->waitUntil(opCtx.get(), prevVersion, before + Milliseconds(25)); + auto after = Date_t::now(); + ASSERT_GTE(after - before, Milliseconds(25)); + + stdx::lock_guard lk(clientToInterrupt); + getServiceContext()->killOperation( + lk, clientToInterrupt.getOperationContext(), ErrorCodes::Interrupted); + }); + + ASSERT_THROWS(notifier->waitUntil(operationContext(), prevVersion, before + Seconds(25)), + ExceptionFor); + auto after = Date_t::now(); ASSERT_LT(after - before, Seconds(25)); ASSERT_GTE(after - before, Milliseconds(25)); @@ -208,13 +285,13 @@ TEST_F(CollectionTest, AsynchronouslyNotifyCappedWaitersIfNeeded) { auto thisVersion = prevVersion + 1; auto before = Date_t::now(); - notifier->waitUntil(prevVersion, before + Milliseconds(25)); + notifier->waitUntil(operationContext(), prevVersion, before + Milliseconds(25)); stdx::thread thread([before, prevVersion, notifier] { auto after = Date_t::now(); ASSERT_GTE(after - before, Milliseconds(25)); notifier->notifyAll(); }); - notifier->waitUntil(prevVersion, before + Seconds(25)); + notifier->waitUntil(operationContext(), prevVersion, before + Seconds(25)); auto after = Date_t::now(); ASSERT_LT(after - before, Seconds(25)); ASSERT_GTE(after - before, Milliseconds(25)); @@ -262,9 +339,9 @@ TEST_F(CollectionTest, VerifyIndexIsUpdated) { auto idIndex = idxCatalog->findIdIndex(opCtx); auto userIdx = idxCatalog->findIndexByName(opCtx, indexName); auto oldRecordId = idIndex->getEntry()->accessMethod()->asSortedData()->findSingle( - opCtx, coll, BSON("_id" << 1)); + opCtx, coll, idIndex->getEntry(), BSON("_id" << 1)); auto oldIndexRecordID = userIdx->getEntry()->accessMethod()->asSortedData()->findSingle( - opCtx, coll, BSON("a" << 1)); + opCtx, coll, userIdx->getEntry(), BSON("a" << 1)); ASSERT_TRUE(!oldRecordId.isNull()); ASSERT_EQ(oldRecordId, oldIndexRecordID); { @@ -284,15 +361,16 @@ TEST_F(CollectionTest, VerifyIndexIsUpdated) { oldSnap, newDoc, collection_internal::kUpdateAllIndexes, - nullptr, + nullptr /* indexesAffected */, + nullptr /* opDebug */, &args); wuow.commit(); } auto indexRecordId = userIdx->getEntry()->accessMethod()->asSortedData()->findSingle( - opCtx, coll, BSON("a" << 1)); + opCtx, coll, userIdx->getEntry(), BSON("a" << 1)); ASSERT_TRUE(indexRecordId.isNull()); indexRecordId = userIdx->getEntry()->accessMethod()->asSortedData()->findSingle( - opCtx, coll, BSON("a" << 5)); + opCtx, coll, userIdx->getEntry(), BSON("a" << 5)); ASSERT_EQ(indexRecordId, oldRecordId); } @@ -317,13 +395,13 @@ TEST_F(CollectionTest, VerifyIndexIsUpdatedWithDamages) { auto idIndex = idxCatalog->findIdIndex(opCtx); auto userIdx = idxCatalog->findIndexByName(opCtx, indexName); auto oldRecordId = idIndex->getEntry()->accessMethod()->asSortedData()->findSingle( - opCtx, coll, BSON("_id" << 1)); + opCtx, coll, idIndex->getEntry(), BSON("_id" << 1)); ASSERT_TRUE(!oldRecordId.isNull()); auto newDoc = BSON("_id" << 1 << "a" << 5 << "b" << 32); - auto diff = doc_diff::computeOplogDiff(oldDoc, newDoc, 0, nullptr); + auto diff = doc_diff::computeOplogDiff(oldDoc, newDoc, 0); ASSERT(diff); - auto damagesOutput = doc_diff::computeDamages(oldDoc, diff->diff, false); + auto damagesOutput = doc_diff::computeDamages(oldDoc, *diff, false); { WriteUnitOfWork wuow(opCtx); Snapshotted oldSnap(opCtx->recoveryUnit()->getSnapshotId(), oldDoc); @@ -336,17 +414,18 @@ TEST_F(CollectionTest, VerifyIndexIsUpdatedWithDamages) { damagesOutput.damageSource.get(), damagesOutput.damages, collection_internal::kUpdateAllIndexes, - nullptr, + nullptr /* indexesAffected */, + nullptr /* opDebug */, &args); ASSERT_OK(newDocStatus); ASSERT_BSONOBJ_EQ(newDoc, newDocStatus.getValue()); wuow.commit(); } auto indexRecordId = userIdx->getEntry()->accessMethod()->asSortedData()->findSingle( - opCtx, coll, BSON("a" << 1)); + opCtx, coll, userIdx->getEntry(), BSON("a" << 1)); ASSERT_TRUE(indexRecordId.isNull()); indexRecordId = userIdx->getEntry()->accessMethod()->asSortedData()->findSingle( - opCtx, coll, BSON("a" << 5)); + opCtx, coll, userIdx->getEntry(), BSON("a" << 5)); ASSERT_EQ(indexRecordId, oldRecordId); } diff --git a/src/mongo/db/catalog/collection_uuid_mismatch.cpp b/src/mongo/db/catalog/collection_uuid_mismatch.cpp index 6c68130f70148..dac04f3296ad8 100644 --- a/src/mongo/db/catalog/collection_uuid_mismatch.cpp +++ b/src/mongo/db/catalog/collection_uuid_mismatch.cpp @@ -29,9 +29,16 @@ #include "mongo/db/catalog/collection_uuid_mismatch.h" +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_uuid_mismatch_info.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -39,18 +46,18 @@ void checkCollectionUUIDMismatch(OperationContext* opCtx, const NamespaceString& ns, const Collection* coll, const boost::optional& uuid) { - checkCollectionUUIDMismatch(opCtx, CollectionCatalog::get(opCtx), ns, coll, uuid); + checkCollectionUUIDMismatch(opCtx, *CollectionCatalog::get(opCtx), ns, coll, uuid); } void checkCollectionUUIDMismatch(OperationContext* opCtx, const NamespaceString& ns, const CollectionPtr& coll, const boost::optional& uuid) { - checkCollectionUUIDMismatch(opCtx, CollectionCatalog::get(opCtx), ns, coll.get(), uuid); + checkCollectionUUIDMismatch(opCtx, *CollectionCatalog::get(opCtx), ns, coll.get(), uuid); } void checkCollectionUUIDMismatch(OperationContext* opCtx, - const std::shared_ptr& catalog, + const CollectionCatalog& catalog, const NamespaceString& ns, const CollectionPtr& coll, const boost::optional& uuid) { @@ -58,7 +65,7 @@ void checkCollectionUUIDMismatch(OperationContext* opCtx, } void checkCollectionUUIDMismatch(OperationContext* opCtx, - const std::shared_ptr& catalog, + const CollectionCatalog& catalog, const NamespaceString& ns, const Collection* coll, const boost::optional& uuid) { @@ -66,7 +73,7 @@ void checkCollectionUUIDMismatch(OperationContext* opCtx, return; } - auto actualNamespace = catalog->lookupNSSByUUID(opCtx, *uuid); + auto actualNamespace = catalog.lookupNSSByUUID(opCtx, *uuid); uassert( (CollectionUUIDMismatchInfo{ns.dbName(), *uuid, diff --git a/src/mongo/db/catalog/collection_uuid_mismatch.h b/src/mongo/db/catalog/collection_uuid_mismatch.h index 03561276e73be..ad0f95c2d9719 100644 --- a/src/mongo/db/catalog/collection_uuid_mismatch.h +++ b/src/mongo/db/catalog/collection_uuid_mismatch.h @@ -29,9 +29,15 @@ #pragma once -#include "mongo/db/operation_context.h" +#include + +#include +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -49,13 +55,14 @@ void checkCollectionUUIDMismatch(OperationContext* opCtx, * Same as above, but with the catalog passed explicitly. */ void checkCollectionUUIDMismatch(OperationContext* opCtx, - const std::shared_ptr& catalog, + const CollectionCatalog& catalog, const NamespaceString& ns, const Collection* coll, const boost::optional& uuid); + void checkCollectionUUIDMismatch(OperationContext* opCtx, - const std::shared_ptr& catalog, + const CollectionCatalog& catalog, const NamespaceString& ns, const CollectionPtr& coll, const boost::optional& uuid); diff --git a/src/mongo/db/catalog/collection_uuid_mismatch_info.cpp b/src/mongo/db/catalog/collection_uuid_mismatch_info.cpp index 6b267d8829abb..01d5240fd41b0 100644 --- a/src/mongo/db/catalog/collection_uuid_mismatch_info.cpp +++ b/src/mongo/db/catalog/collection_uuid_mismatch_info.cpp @@ -29,8 +29,19 @@ #include "mongo/db/catalog/collection_uuid_mismatch_info.h" -#include "mongo/base/init.h" +#include +#include + +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/database_name_util.h" namespace mongo { namespace { @@ -45,7 +56,7 @@ constexpr StringData kActualCollectionFieldName = "actualCollection"_sd; std::shared_ptr CollectionUUIDMismatchInfo::parse(const BSONObj& obj) { auto actualNamespace = obj[kActualCollectionFieldName]; return std::make_shared( - DatabaseName(boost::none, obj[kDbFieldName].str()), + DatabaseNameUtil::deserialize(boost::none, obj[kDbFieldName].str()), UUID::parse(obj[kCollectionUUIDFieldName]).getValue(), obj[kExpectedCollectionFieldName].str(), actualNamespace.isNull() ? boost::none : boost::make_optional(actualNamespace.str())); diff --git a/src/mongo/db/catalog/collection_uuid_mismatch_info.h b/src/mongo/db/catalog/collection_uuid_mismatch_info.h index 01147676076ef..a86fa33d26cfb 100644 --- a/src/mongo/db/catalog/collection_uuid_mismatch_info.h +++ b/src/mongo/db/catalog/collection_uuid_mismatch_info.h @@ -29,7 +29,17 @@ #pragma once +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/database_name.h" #include "mongo/util/uuid.h" diff --git a/src/mongo/db/catalog/collection_validation.cpp b/src/mongo/db/catalog/collection_validation.cpp index bb3b39414a268..d20f8635178ef 100644 --- a/src/mongo/db/catalog/collection_validation.cpp +++ b/src/mongo/db/catalog/collection_validation.cpp @@ -28,25 +28,73 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog/collection_validation.h" - +#include +#include +#include #include - +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database_holder.h" -#include "mongo/db/catalog/index_consistency.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/collection_options_gen.h" +#include "mongo/db/catalog/collection_validation.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/index_key_validate.h" #include "mongo/db/catalog/validate_adaptor.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog/validate_state.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/operation_context.h" -#include "mongo/db/record_id_helpers.h" -#include "mongo/db/storage/key_string.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -116,21 +164,32 @@ void _validateIndexes(OperationContext* opCtx, ValidateAdaptor* indexValidator, ValidateResults* results) { // Validate Indexes, checking for mismatch between index entries and collection records. - for (const auto& index : validateState->getIndexes()) { + for (const auto& indexIdent : validateState->getIndexIdents()) { opCtx->checkForInterrupt(); - const IndexDescriptor* descriptor = index->descriptor(); + // Make a copy of the index name. The `traverseIndex()` function below will yield + // periodically, so it's unsafe to hold a pointer to the index here. + const std::string indexName = validateState->getCollection() + ->getIndexCatalog() + ->findIndexByIdent(opCtx, indexIdent) + ->indexName(); LOGV2_OPTIONS(20296, {LogComponent::kIndex}, "Validating index consistency", - "index"_attr = descriptor->indexName(), + "index"_attr = indexName, logAttrs(validateState->nss())); int64_t numTraversedKeys; - indexValidator->traverseIndex(opCtx, index.get(), &numTraversedKeys, results); - - auto& curIndexResults = (results->indexResultsMap)[descriptor->indexName()]; + indexValidator->traverseIndex(opCtx, + validateState->getCollection() + ->getIndexCatalog() + ->findIndexByIdent(opCtx, indexIdent) + ->getEntry(), + &numTraversedKeys, + results); + + auto& curIndexResults = (results->indexResultsMap)[indexName]; curIndexResults.keysTraversed = numTraversedKeys; if (!curIndexResults.valid) { @@ -170,10 +229,11 @@ void _gatherIndexEntryErrors(OperationContext* opCtx, // Iterate through all the indexes in the collection and only record the index entry keys that // had inconsistencies during the first phase. - for (const auto& index : validateState->getIndexes()) { + for (const auto& indexIdent : validateState->getIndexIdents()) { opCtx->checkForInterrupt(); - const IndexDescriptor* descriptor = index->descriptor(); + const IndexDescriptor* descriptor = + validateState->getCollection()->getIndexCatalog()->findIndexByIdent(opCtx, indexIdent); LOGV2_OPTIONS(20300, {LogComponent::kIndex}, @@ -181,7 +241,7 @@ void _gatherIndexEntryErrors(OperationContext* opCtx, "index"_attr = descriptor->indexName()); indexValidator->traverseIndex(opCtx, - index.get(), + descriptor->getEntry(), /*numTraversedKeys=*/nullptr, result); } @@ -205,28 +265,24 @@ void _validateIndexKeyCount(OperationContext* opCtx, ValidateState* validateState, ValidateAdaptor* indexValidator, ValidateResultsMap* indexNsResultsMap) { - for (const auto& index : validateState->getIndexes()) { - const IndexDescriptor* descriptor = index->descriptor(); + for (const auto& indexIdent : validateState->getIndexIdents()) { + const IndexDescriptor* descriptor = + validateState->getCollection()->getIndexCatalog()->findIndexByIdent(opCtx, indexIdent); auto& curIndexResults = (*indexNsResultsMap)[descriptor->indexName()]; if (curIndexResults.valid) { - indexValidator->validateIndexKeyCount(opCtx, index.get(), curIndexResults); + indexValidator->validateIndexKeyCount(opCtx, descriptor->getEntry(), curIndexResults); } } } -void _printIndexSpec(const ValidateState* validateState, StringData indexName) { - auto& indexes = validateState->getIndexes(); - auto indexEntry = - std::find_if(indexes.begin(), - indexes.end(), - [&](const std::shared_ptr indexEntry) -> bool { - return indexEntry->descriptor()->indexName() == indexName; - }); - if (indexEntry != indexes.end()) { - auto indexSpec = (*indexEntry)->descriptor()->infoObj(); - LOGV2_ERROR(7463100, "Index failed validation", "spec"_attr = indexSpec); - } +void _printIndexSpec(OperationContext* opCtx, + const ValidateState* validateState, + StringData indexName) { + const IndexDescriptor* descriptor = + validateState->getCollection()->getIndexCatalog()->findIndexByName(opCtx, indexName); + auto indexSpec = descriptor->infoObj(); + LOGV2_ERROR(7463100, "Index failed validation", "spec"_attr = indexSpec); } /** @@ -310,7 +366,7 @@ void _reportValidationResults(OperationContext* opCtx, for (const auto& [indexName, vr] : results->indexResultsMap) { if (!vr.valid) { results->valid = false; - _printIndexSpec(validateState, indexName); + _printIndexSpec(opCtx, validateState, indexName); } if (validateState->getSkippedIndexes().contains(indexName)) { @@ -465,9 +521,9 @@ void _validateCatalogEntry(OperationContext* opCtx, const IndexCatalogEntry* indexEntry = indexIt->next(); const std::string indexName = indexEntry->descriptor()->indexName(); - Status status = index_key_validate::validateIndexSpec( - opCtx, indexEntry->descriptor()->infoObj(), true /* inCollValidation */) - .getStatus(); + Status status = + index_key_validate::validateIndexSpec(opCtx, indexEntry->descriptor()->infoObj()) + .getStatus(); if (!status.isOK()) { results->valid = false; results->errors.push_back( @@ -478,7 +534,7 @@ void _validateCatalogEntry(OperationContext* opCtx, status.reason())); } - if (!indexEntry->isReady(opCtx)) { + if (!indexEntry->isReady()) { continue; } @@ -522,7 +578,9 @@ Status validate(OperationContext* opCtx, uassertStatusOK(replCoord->checkCanServeReadsFor( opCtx, nss, ReadPreferenceSetting::get(opCtx).canRunOnSecondary())); - output->append("ns", NamespaceStringUtil::serialize(validateState.nss())); + SerializationContext sc = SerializationContext::stateCommandReply(); + sc.setTenantIdSource(auth::ValidatedTenancyScope::get(opCtx) != boost::none); + output->append("ns", NamespaceStringUtil::serialize(validateState.nss(), sc)); validateState.uuid().appendToBuilder(output, "uuid"); @@ -535,6 +593,14 @@ Status validate(OperationContext* opCtx, opCtx->recoveryUnit()->abandonSnapshot(); opCtx->recoveryUnit()->setPrepareConflictBehavior(oldPrepareConflictBehavior); }); + + // Relax corruption detection so that we log and continue scanning instead of failing early. + auto oldDataCorruptionMode = opCtx->recoveryUnit()->getDataCorruptionDetectionMode(); + opCtx->recoveryUnit()->setDataCorruptionDetectionMode( + DataCorruptionDetectionMode::kLogAndContinue); + ON_BLOCK_EXIT( + [&] { opCtx->recoveryUnit()->setDataCorruptionDetectionMode(oldDataCorruptionMode); }); + if (validateState.fixErrors()) { // Note: cannot set PrepareConflictBehavior here, since the validate command with repair // needs kIngnoreConflictsAllowWrites, but validate repair at startup cannot set that here diff --git a/src/mongo/db/catalog/collection_validation.h b/src/mongo/db/catalog/collection_validation.h index d0597f2198d62..d43810ac899b2 100644 --- a/src/mongo/db/catalog/collection_validation.h +++ b/src/mongo/db/catalog/collection_validation.h @@ -29,6 +29,8 @@ #pragma once +#include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/validate_results.h" #include "mongo/db/namespace_string.h" diff --git a/src/mongo/db/catalog/collection_validation_test.cpp b/src/mongo/db/catalog/collection_validation_test.cpp index db67369add099..1dd556ab6a13e 100644 --- a/src/mongo/db/catalog/collection_validation_test.cpp +++ b/src/mongo/db/catalog/collection_validation_test.cpp @@ -27,22 +27,66 @@ * it in the license file. */ +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/builder.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_validation.h" #include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/catalog/column_index_consistency.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/index/column_key_generator.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/index/columns_access_method.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/column_store.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/stdx/thread.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/bufreader.h" #include "mongo/util/fail_point.h" +#include "mongo/util/shared_buffer.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -747,19 +791,19 @@ TEST_F(CollectionValidationDiskTest, BackgroundValidateRunsConcurrentlyWithWrite /** * Generates a KeyString suitable for positioning a cursor at the beginning of an index. */ -KeyString::Value makeFirstKeyString(const SortedDataInterface& sortedDataInterface) { - KeyString::Builder firstKeyStringBuilder(sortedDataInterface.getKeyStringVersion(), - BSONObj(), - sortedDataInterface.getOrdering(), - KeyString::Discriminator::kExclusiveBefore); +key_string::Value makeFirstKeyString(const SortedDataInterface& sortedDataInterface) { + key_string::Builder firstKeyStringBuilder(sortedDataInterface.getKeyStringVersion(), + BSONObj(), + sortedDataInterface.getOrdering(), + key_string::Discriminator::kExclusiveBefore); return firstKeyStringBuilder.getValueCopy(); } /** * Extracts KeyString without RecordId. */ -KeyString::Value makeKeyStringWithoutRecordId(const KeyString::Value& keyStringWithRecordId, - KeyString::Version version) { +key_string::Value makeKeyStringWithoutRecordId(const key_string::Value& keyStringWithRecordId, + key_string::Version version) { BufBuilder bufBuilder; keyStringWithRecordId.serializeWithoutRecordIdLong(bufBuilder); auto builderSize = bufBuilder.len(); @@ -767,7 +811,7 @@ KeyString::Value makeKeyStringWithoutRecordId(const KeyString::Value& keyStringW auto buffer = bufBuilder.release(); BufReader bufReader(buffer.get(), builderSize); - return KeyString::Value::deserialize(bufReader, version); + return key_string::Value::deserialize(bufReader, version); } // Verify calling validate() on a collection with old (pre-4.2) keys in a WT unique index. @@ -811,7 +855,7 @@ TEST_F(CollectionValidationTest, ValidateOldUniqueIndexKeyWarning) { // Check key in index for only document. auto firstKeyString = makeFirstKeyString(*sortedDataInterface); - KeyString::Value keyStringWithRecordId; + key_string::Value keyStringWithRecordId; RecordId recordId; { auto cursor = sortedDataInterface->newCursor(opCtx); @@ -888,7 +932,7 @@ TEST_F(CollectionValidationColumnStoreIndexTest, SingleInvalidIndexEntryCSI) { for (int corruptedFldIndex = 1; corruptedFldIndex <= numFields; ++corruptedFldIndex) { for (int corruptedDocIndex = 0; corruptedDocIndex < numDocs; ++corruptedDocIndex) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest( - kNss.toString() + std::to_string(++testCaseIdx)); + kNss.toString_forTest() + std::to_string(++testCaseIdx)); // Create collection nss for unit tests to use. const CollectionOptions defaultCollectionOptions; @@ -1011,7 +1055,7 @@ TEST_F(CollectionValidationColumnStoreIndexTest, SingleExtraIndexEntry) { const int corruptedDocIndex = corruption.second; const auto nss = NamespaceString::createNamespaceString_forTest( - kNss.toString() + std::to_string(++testCaseIdx)); + kNss.toString_forTest() + std::to_string(++testCaseIdx)); // Create collection nss for unit tests to use. const CollectionOptions defaultCollectionOptions; @@ -1099,7 +1143,7 @@ TEST_F(CollectionValidationColumnStoreIndexTest, SingleMissingIndexEntryCSI) { for (int corruptedFldIndex = 1; corruptedFldIndex <= numFields; ++corruptedFldIndex) { for (int corruptedDocIndex = 0; corruptedDocIndex < numDocs; ++corruptedDocIndex) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest( - kNss.toString() + std::to_string(++testCaseIdx)); + kNss.toString_forTest() + std::to_string(++testCaseIdx)); // Create collection nss for unit tests to use. const CollectionOptions defaultCollectionOptions; diff --git a/src/mongo/db/catalog/collection_write_path.cpp b/src/mongo/db/catalog/collection_write_path.cpp index 6017c78fe7d07..0d6efd396ca5e 100644 --- a/src/mongo/db/catalog/collection_write_path.cpp +++ b/src/mongo/db/catalog/collection_write_path.cpp @@ -29,19 +29,61 @@ #include "mongo/db/catalog/collection_write_path.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonelement_comparator.h" -#include "mongo/crypto/fle_crypto.h" +#include "mongo/bson/timestamp.h" +#include "mongo/crypto/fle_crypto_types.h" #include "mongo/db/catalog/capped_collection_maintenance.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/collection_options_gen.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/local_oplog_info.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/write_stage_common.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/record_id_helpers.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_parameters_gen.h" -#include "mongo/db/transaction/transaction_participant.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -185,8 +227,7 @@ Status insertDocumentsImpl(OperationContext* opCtx, // SERVER-21646. On the other hand, capped clustered collections with a monotonically // increasing cluster key natively guarantee preservation of the insertion order, and don't // need serialisation. We allow concurrent inserts for clustered capped collections. - Lock::ResourceLock heldUntilEndOfWUOW{ - opCtx, ResourceId(RESOURCE_METADATA, nss.ns()), MODE_X}; + Lock::ResourceLock heldUntilEndOfWUOW{opCtx, ResourceId(RESOURCE_METADATA, nss), MODE_X}; } std::vector records; @@ -398,7 +439,7 @@ Status insertDocuments(OperationContext* opCtx, return Status(ErrorCodes::InternalError, str::stream() << "Collection::insertDocument got document without _id for ns:" - << nss.toString()); + << nss.toStringForErrorMsg()); } auto status = collection->checkValidationAndParseResult(opCtx, it->doc); @@ -450,10 +491,10 @@ Status insertDocuments(OperationContext* opCtx, hangAfterCollectionInserts.pauseWhileSet(opCtx); }, [&](const BSONObj& data) { - const auto& collElem = data["collectionNS"]; + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "collectionNS"); const auto& firstIdElem = data["first_id"]; // If the failpoint specifies no collection or matches the existing one, hang. - return (!collElem || nss.ns() == collElem.str()) && + return (fpNss.isEmpty() || nss == fpNss) && (!firstIdElem || (begin != end && firstIdElem.type() == mongo::String && begin->doc["_id"].str() == firstIdElem.str())); @@ -487,8 +528,8 @@ Status checkFailCollectionInsertsFailPoint(const NamespaceString& ns, const BSON }, [&](const BSONObj& data) { // If the failpoint specifies no collection or matches the existing one, fail. - const auto collElem = data["collectionNS"]; - return !collElem || ns.ns() == collElem.str(); + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "collectionNS"); + return fpNss.isEmpty() || ns == fpNss; }); return s; } @@ -499,6 +540,7 @@ void updateDocument(OperationContext* opCtx, const Snapshotted& oldDoc, const BSONObj& newDoc, const BSONObj* opDiff, + bool* indexesAffected, OpDebug* opDebug, CollectionUpdateArgs* args) { { @@ -584,6 +626,9 @@ void updateDocument(OperationContext* opCtx, oldLocation, &keysInserted, &keysDeleted)); + if (indexesAffected) { + *indexesAffected = (keysInserted > 0 || keysDeleted > 0); + } if (opDebug) { opDebug->additiveMetrics.incrementKeysInserted(keysInserted); @@ -612,6 +657,7 @@ StatusWith updateDocumentWithDamages(OperationContext* opCtx, const char* damageSource, const mutablebson::DamageVector& damages, const BSONObj* opDiff, + bool* indexesAffected, OpDebug* opDebug, CollectionUpdateArgs* args) { dassert(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_IX)); @@ -663,6 +709,9 @@ StatusWith updateDocumentWithDamages(OperationContext* opCtx, loc, &keysInserted, &keysDeleted)); + if (indexesAffected) { + *indexesAffected = (keysInserted > 0 || keysDeleted > 0); + } if (opDebug) { opDebug->additiveMetrics.incrementKeysInserted(keysInserted); @@ -727,27 +776,27 @@ void deleteDocument(OperationContext* opCtx, Lock::ResourceLock heldUntilEndOfWUOW{opCtx, ResourceId(RESOURCE_METADATA, nss), MODE_X}; } - std::vector oplogSlots; - auto retryableFindAndModifyLocation = RetryableFindAndModifyLocation::kNone; - if (storeDeletedDoc == StoreDeletedDoc::On && retryableWrite == RetryableWrite::kYes) { - retryableFindAndModifyLocation = RetryableFindAndModifyLocation::kSideCollection; - oplogSlots = reserveOplogSlotsForRetryableFindAndModify(opCtx); - } - OplogDeleteEntryArgs deleteArgs{nullptr /* deletedDoc */, - fromMigrate, - collection->isChangeStreamPreAndPostImagesEnabled(), - retryableFindAndModifyLocation, - oplogSlots}; + OplogDeleteEntryArgs deleteArgs; + opCtx->getServiceContext()->getOpObserver()->aboutToDelete( + opCtx, collection, doc.value(), &deleteArgs); + + deleteArgs.deletedDoc = nullptr; + deleteArgs.fromMigrate = fromMigrate; + deleteArgs.changeStreamPreAndPostImagesEnabledForCollection = + collection->isChangeStreamPreAndPostImagesEnabled(); - opCtx->getServiceContext()->getOpObserver()->aboutToDelete(opCtx, collection, doc.value()); + const bool shouldRecordPreImageForRetryableWrite = + storeDeletedDoc == StoreDeletedDoc::On && retryableWrite == RetryableWrite::kYes; + if (shouldRecordPreImageForRetryableWrite) { + deleteArgs.retryableFindAndModifyLocation = RetryableFindAndModifyLocation::kSideCollection; + deleteArgs.oplogSlots = reserveOplogSlotsForRetryableFindAndModify(opCtx); + } boost::optional deletedDoc; - const bool isRecordingPreImageForRetryableWrite = - retryableFindAndModifyLocation != RetryableFindAndModifyLocation::kNone; const bool isTimeseriesCollection = collection->getTimeseriesOptions() || nss.isTimeseriesBucketsCollection(); - if (isRecordingPreImageForRetryableWrite || + if (shouldRecordPreImageForRetryableWrite || collection->isChangeStreamPreAndPostImagesEnabled() || (isTimeseriesCollection && feature_flags::gTimeseriesScalabilityImprovements.isEnabled( diff --git a/src/mongo/db/catalog/collection_write_path.h b/src/mongo/db/catalog/collection_write_path.h index e4cd984e8c20e..a51b3504cd13a 100644 --- a/src/mongo/db/catalog/collection_write_path.h +++ b/src/mongo/db/catalog/collection_write_path.h @@ -29,11 +29,22 @@ #pragma once +#include +#include + #include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/mutable/damage_vector.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/curop.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/storage/snapshot.h" namespace mongo { namespace collection_internal { @@ -99,11 +110,13 @@ Status checkFailCollectionInsertsFailPoint(const NamespaceString& ns, const BSON * Updates the document @ oldLocation with newDoc. * * If the document fits in the old space, it is put there; if not, it is moved. - * Sets 'args.updatedDoc' to the updated version of the document with damages applied, on success. - * 'opDiff' Optional argument. If set to kUpdateAllIndexes, all the indexes are updated. If it is - * set to kUpdateNoIndexes, no indexes are updated. Otherwise, it is the precomputed difference - * between 'oldDoc' and 'newDoc', used to determine which indexes need to be updated. - * 'opDebug' Optional argument. When not null, will be used to record operation statistics. + * + *'args.updatedDoc' is set to the updated version of the document with damages applied, on success + *'opDiff' is optional. If set to kUpdateAllIndexes, all the indexes are updated. If it is set to + * kUpdateNoIndexes, no indexes are updated. Otherwise, it is the precomputed difference between + * 'oldDoc' and 'newDoc', used to determine which indexes need to be updated. + * 'indexesAffected' is optional. When not null, will be set to whether any indexes were updated + * 'opDebug' is argument. When not null, will be used to record operation statistics. */ void updateDocument(OperationContext* opCtx, const CollectionPtr& collection, @@ -111,6 +124,7 @@ void updateDocument(OperationContext* opCtx, const Snapshotted& oldDoc, const BSONObj& newDoc, const BSONObj* opDiff, + bool* indexesAffected, OpDebug* opDebug, CollectionUpdateArgs* args); @@ -126,6 +140,7 @@ StatusWith updateDocumentWithDamages(OperationContext* opCtx, const char* damageSource, const mutablebson::DamageVector& damages, const BSONObj* opDiff, + bool* indexesAffected, OpDebug* opDebug, CollectionUpdateArgs* args); diff --git a/src/mongo/db/catalog/collection_writer_test.cpp b/src/mongo/db/catalog/collection_writer_test.cpp index 15368437ff439..f9c2907d704d6 100644 --- a/src/mongo/db/catalog/collection_writer_test.cpp +++ b/src/mongo/db/catalog/collection_writer_test.cpp @@ -27,23 +27,40 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include +#include +#include + +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_mock.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/mutex.h" #include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +namespace mongo { namespace { -using namespace mongo; - /** * Sets up the catalog (via CatalogTestFixture), installs a collection in the catalog and provides * helper function to access the collection from the catalog. @@ -59,9 +76,8 @@ class CollectionWriterTest : public CatalogTestFixture { std::shared_ptr collection = std::make_shared(kNss); CollectionCatalog::write(getServiceContext(), [&](CollectionCatalog& catalog) { - auto uuid = collection->uuid(); catalog.registerCollection( - operationContext(), uuid, std::move(collection), /*ts=*/boost::none); + operationContext(), std::move(collection), /*ts=*/boost::none); }); } @@ -255,7 +271,6 @@ class CatalogReadCopyUpdateTest : public CatalogTestFixture { for (size_t i = 0; i < NumCollections; ++i) { catalog.registerCollection( operationContext(), - UUID::gen(), std::make_shared(NamespaceString::createNamespaceString_forTest( "many", fmt::format("coll{}", i))), /*ts=*/boost::none); @@ -328,3 +343,4 @@ TEST_F(BatchedCollectionCatalogWriterTest, BatchedTest) { } } // namespace +} // namespace mongo diff --git a/src/mongo/db/catalog/collection_yield_restore.cpp b/src/mongo/db/catalog/collection_yield_restore.cpp index 0e6d5cad14617..b54ab386c98c6 100644 --- a/src/mongo/db/catalog/collection_yield_restore.cpp +++ b/src/mongo/db/catalog/collection_yield_restore.cpp @@ -29,9 +29,16 @@ #include "mongo/db/catalog/collection_yield_restore.h" +#include + +#include + #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/storage/capped_snapshots.h" #include "mongo/db/storage/snapshot_helper.h" +#include "mongo/util/assert_util_core.h" namespace mongo { LockedCollectionYieldRestore::LockedCollectionYieldRestore(OperationContext* opCtx, @@ -47,7 +54,7 @@ const Collection* LockedCollectionYieldRestore::operator()(OperationContext* opC // Confirm that we were set with a valid collection instance at construction if yield is // performed. invariant(!_nss.isEmpty()); - // Confirm that we are holding the neccessary collection level lock. + // Confirm that we are holding the necessary collection level lock. invariant(opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_IS)); // Hold reference to the catalog for collection lookup without locks to be safe. diff --git a/src/mongo/db/catalog/collection_yield_restore.h b/src/mongo/db/catalog/collection_yield_restore.h index e9aa98aca33bf..b4b5ad8468a09 100644 --- a/src/mongo/db/catalog/collection_yield_restore.h +++ b/src/mongo/db/catalog/collection_yield_restore.h @@ -30,6 +30,9 @@ #pragma once #include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/catalog/column_index_consistency.cpp b/src/mongo/db/catalog/column_index_consistency.cpp index 768d328436d27..c73f3e850e64e 100644 --- a/src/mongo/db/catalog/column_index_consistency.cpp +++ b/src/mongo/db/catalog/column_index_consistency.cpp @@ -28,7 +28,33 @@ */ #include "mongo/db/catalog/column_index_consistency.h" + +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/throttle_cursor.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/index/column_cell.h" +#include "mongo/db/index/column_key_generator.h" +#include "mongo/db/index/columns_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -57,7 +83,19 @@ int64_t ColumnIndexConsistency::traverseIndex(OperationContext* opCtx, if (numIndexEntries % kInterruptIntervalNumRecords == 0) { // Periodically checks for interrupts and yields. opCtx->checkForInterrupt(); + + // Make a copy of the ident as the IndexCatalogEntry pointer may be invalidated after + // yielding. + const std::string indexIdent = index->getIdent(); _validateState->yield(opCtx); + + // After yielding, the latest instance of the collection is fetched and can be different + // from the collection instance prior to yielding. For this reason we need to refresh + // the index entry pointer. + index = _validateState->getCollection() + ->getIndexCatalog() + ->findIndexByIdent(opCtx, indexIdent) + ->getEntry(); } if (_firstPhase) { @@ -179,9 +217,9 @@ void ColumnIndexConsistency::_addIndexEntryErrors(OperationContext* opCtx, if (!_missingIndexEntries.empty() || !_extraIndexEntries.empty()) { StringBuilder ss; - ss << "Index with name '" << csam->indexName() << "' has inconsistencies."; + ss << "Index with name '" << csam->indexName(index) << "' has inconsistencies."; results->errors.push_back(ss.str()); - results->indexResultsMap.at(csam->indexName()).valid = false; + results->indexResultsMap.at(csam->indexName(index)).valid = false; } if (!_missingIndexEntries.empty()) { StringBuilder ss; @@ -189,8 +227,8 @@ void ColumnIndexConsistency::_addIndexEntryErrors(OperationContext* opCtx, results->warnings.push_back(ss.str()); results->valid = false; for (const auto& ent : _missingIndexEntries) { - results->missingIndexEntries.push_back( - _generateInfo(csam->indexName(), RecordId(ent.rid), ent.path, ent.rid, ent.value)); + results->missingIndexEntries.push_back(_generateInfo( + csam->indexName(index), RecordId(ent.rid), ent.path, ent.rid, ent.value)); } } if (!_extraIndexEntries.empty()) { @@ -199,8 +237,8 @@ void ColumnIndexConsistency::_addIndexEntryErrors(OperationContext* opCtx, results->warnings.push_back(ss.str()); results->valid = false; for (const auto& ent : _extraIndexEntries) { - results->extraIndexEntries.push_back( - _generateInfo(csam->indexName(), RecordId(ent.rid), ent.path, ent.rid, ent.value)); + results->extraIndexEntries.push_back(_generateInfo( + csam->indexName(index), RecordId(ent.rid), ent.path, ent.rid, ent.value)); } } } @@ -208,14 +246,15 @@ void ColumnIndexConsistency::_addIndexEntryErrors(OperationContext* opCtx, void ColumnIndexConsistency::addIndexEntryErrors(OperationContext* opCtx, ValidateResults* results) { int numColumnStoreIndexes = 0; - for (const auto& index : _validateState->getIndexes()) { - const IndexDescriptor* descriptor = index->descriptor(); + for (const auto& indexIdent : _validateState->getIndexIdents()) { + const IndexDescriptor* descriptor = + _validateState->getCollection()->getIndexCatalog()->findIndexByIdent(opCtx, indexIdent); if (descriptor->getAccessMethodName() == IndexNames::COLUMN) { ++numColumnStoreIndexes; uassert(7106138, "The current implementation only supports a single column-store index.", numColumnStoreIndexes <= 1); - _addIndexEntryErrors(opCtx, index.get(), results); + _addIndexEntryErrors(opCtx, descriptor->getEntry(), results); } } } @@ -226,9 +265,9 @@ void ColumnIndexConsistency::repairIndexEntries(OperationContext* opCtx, ColumnStoreAccessMethod* csam = checked_cast(index->accessMethod()); - writeConflictRetry(opCtx, "removingExtraColumnIndexEntries", _validateState->nss().ns(), [&] { + writeConflictRetry(opCtx, "removingExtraColumnIndexEntries", _validateState->nss(), [&] { WriteUnitOfWork wunit(opCtx); - auto& indexResults = results->indexResultsMap[csam->indexName()]; + auto& indexResults = results->indexResultsMap[csam->indexName(index)]; auto cursor = csam->writableStorage()->newWriteCursor(opCtx); for (auto it = _extraIndexEntries.begin(); it != _extraIndexEntries.end();) { @@ -252,14 +291,15 @@ void ColumnIndexConsistency::repairIndexEntries(OperationContext* opCtx, void ColumnIndexConsistency::repairIndexEntries(OperationContext* opCtx, ValidateResults* results) { int numColumnStoreIndexes = 0; - for (const auto& index : _validateState->getIndexes()) { - const IndexDescriptor* descriptor = index->descriptor(); + for (const auto& indexIdent : _validateState->getIndexIdents()) { + const IndexDescriptor* descriptor = + _validateState->getCollection()->getIndexCatalog()->findIndexByIdent(opCtx, indexIdent); if (descriptor->getAccessMethodName() == IndexNames::COLUMN) { ++numColumnStoreIndexes; uassert(7106123, "The current implementation only supports a single column-store index.", numColumnStoreIndexes <= 1); - repairIndexEntries(opCtx, index.get(), results); + repairIndexEntries(opCtx, descriptor->getEntry(), results); } } } diff --git a/src/mongo/db/catalog/column_index_consistency.h b/src/mongo/db/catalog/column_index_consistency.h index defe030ba98cd..5c05b9f0442f5 100644 --- a/src/mongo/db/catalog/column_index_consistency.h +++ b/src/mongo/db/catalog/column_index_consistency.h @@ -31,14 +31,31 @@ #include #include +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/static_assert.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/index_consistency.h" #include "mongo/db/catalog/throttle_cursor.h" +#include "mongo/db/catalog/validate_results.h" #include "mongo/db/catalog/validate_state.h" #include "mongo/db/index/columns_access_method.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/column_store.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/progress_meter.h" namespace mongo { diff --git a/src/mongo/db/catalog/column_index_consistency_test.cpp b/src/mongo/db/catalog/column_index_consistency_test.cpp index 46ba7d88600e1..27d6be3adb2fd 100644 --- a/src/mongo/db/catalog/column_index_consistency_test.cpp +++ b/src/mongo/db/catalog/column_index_consistency_test.cpp @@ -27,11 +27,21 @@ * it in the license file. */ +#include +#include + +#include + #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/collection_validation.h" #include "mongo/db/catalog/column_index_consistency.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context_d_test_fixture.h" #include "mongo/stdx/unordered_set.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/catalog/commit_quorum_options.cpp b/src/mongo/db/catalog/commit_quorum_options.cpp index 9a12abdfe2329..2dd4ebef42f86 100644 --- a/src/mongo/db/catalog/commit_quorum_options.cpp +++ b/src/mongo/db/catalog/commit_quorum_options.cpp @@ -29,10 +29,17 @@ #include "mongo/db/catalog/commit_quorum_options.h" +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/string_data.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/repl/repl_set_config.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/catalog/commit_quorum_options.h b/src/mongo/db/catalog/commit_quorum_options.h index a910e0a083176..897242b3f4e13 100644 --- a/src/mongo/db/catalog/commit_quorum_options.h +++ b/src/mongo/db/catalog/commit_quorum_options.h @@ -31,6 +31,11 @@ #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/jsobj.h" namespace mongo { diff --git a/src/mongo/db/catalog/commit_quorum_options_test.cpp b/src/mongo/db/catalog/commit_quorum_options_test.cpp index e0c6e4d6e4e7c..9256203600bc7 100644 --- a/src/mongo/db/catalog/commit_quorum_options_test.cpp +++ b/src/mongo/db/catalog/commit_quorum_options_test.cpp @@ -27,11 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/catalog/commit_quorum_options.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/catalog/create_collection.cpp b/src/mongo/db/catalog/create_collection.cpp index 2707a8457b244..e88fd90469967 100644 --- a/src/mongo/db/catalog/create_collection.cpp +++ b/src/mongo/db/catalog/create_collection.cpp @@ -29,41 +29,78 @@ #include "mongo/db/catalog/create_collection.h" -#include - +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: keep +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/json.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_catalog_helper.h" #include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/index_key_validate.h" #include "mongo/db/catalog/unique_collection_name.h" #include "mongo/db/catalog/virtual_collection_options.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" #include "mongo/db/commands/create_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/curop.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/insert.h" +#include "mongo/db/pipeline/change_stream_pre_and_post_images_options_gen.h" #include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/stats/top.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" #include "mongo/db/timeseries/timeseries_options.h" #include "mongo/idl/command_generic_argument.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -163,7 +200,7 @@ Status _createView(OperationContext* opCtx, << "': this is a reserved system namespace", !nss.isSystemDotViews()); - return writeConflictRetry(opCtx, "create", nss.ns(), [&] { + return writeConflictRetry(opCtx, "create", nss, [&] { AutoGetDb autoDb(opCtx, nss.dbName(), MODE_IX); Lock::CollectionLock collLock(opCtx, nss, MODE_IX); // Operations all lock system.views in the end to prevent deadlock. @@ -175,7 +212,8 @@ Status _createView(OperationContext* opCtx, if (opCtx->writesAreReplicated() && !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)) { return Status(ErrorCodes::NotWritablePrimary, - str::stream() << "Not primary while creating collection " << nss); + str::stream() << "Not primary while creating collection " + << nss.toStringForErrorMsg()); } CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, nss) @@ -186,14 +224,6 @@ Status _createView(OperationContext* opCtx, "option not supported on a view: changeStreamPreAndPostImages"); } - // Cannot directly create a view on a system.buckets collection, only by creating a - // time-series collection. - auto viewOnNss = NamespaceString{collectionOptions.viewOn}; - uassert(ErrorCodes::InvalidNamespace, - "Cannot create view on a system.buckets namespace except by creating a time-series " - "collection", - !viewOnNss.isTimeseriesBucketsCollection()); - _createSystemDotViewsIfNecessary(opCtx, db); WriteUnitOfWork wunit(opCtx); @@ -252,84 +282,122 @@ Status _createDefaultTimeseriesIndex(OperationContext* opCtx, CollectionWriter& } BSONObj _generateTimeseriesValidator(int bucketVersion, StringData timeField) { - switch (bucketVersion) { - case timeseries::kTimeseriesControlCompressedVersion: - return fromjson(fmt::sprintf(R"( -{ -'$jsonSchema' : { - bsonType: 'object', - required: ['_id', 'control', 'data'], - properties: { - _id: {bsonType: 'objectId'}, - control: { - bsonType: 'object', - required: ['version', 'min', 'max'], - properties: { - version: {bsonType: 'number'}, - min: { - bsonType: 'object', - required: ['%s'], - properties: {'%s': {bsonType: 'date'}} - }, - max: { - bsonType: 'object', - required: ['%s'], - properties: {'%s': {bsonType: 'date'}} - }, - closed: {bsonType: 'bool'}, - count: {bsonType: 'number', minimum: 1} - }, - additionalProperties: false - }, - data: {bsonType: 'object'}, - meta: {} - }, - additionalProperties: false -} -})", - timeField, - timeField, - timeField, - timeField)); - case timeseries::kTimeseriesControlUncompressedVersion: - return fromjson(fmt::sprintf(R"( -{ -'$jsonSchema' : { - bsonType: 'object', - required: ['_id', 'control', 'data'], - properties: { - _id: {bsonType: 'objectId'}, - control: { - bsonType: 'object', - required: ['version', 'min', 'max'], - properties: { - version: {bsonType: 'number'}, - min: { - bsonType: 'object', - required: ['%s'], - properties: {'%s': {bsonType: 'date'}} - }, - max: { - bsonType: 'object', - required: ['%s'], - properties: {'%s': {bsonType: 'date'}} - }, - closed: {bsonType: 'bool'} + if (bucketVersion != timeseries::kTimeseriesControlCompressedVersion && + bucketVersion != timeseries::kTimeseriesControlUncompressedVersion) { + MONGO_UNREACHABLE; + } + // '$jsonSchema' : { + // bsonType: 'object', + // required: ['_id', 'control', 'data'], + // properties: { + // _id: {bsonType: 'objectId'}, + // control: { + // bsonType: 'object', + // required: ['version', 'min', 'max'], + // properties: { + // version: {bsonType: 'number'}, + // min: { + // bsonType: 'object', + // required: ['%s'], + // properties: {'%s': {bsonType: 'date'}} + // }, + // max: { + // bsonType: 'object', + // required: ['%s'], + // properties: {'%s': {bsonType: 'date'}} + // }, + // closed: {bsonType: 'bool'}, + // count: {bsonType: 'number', minimum: 1} // only if bucketVersion == + // timeseries::kTimeseriesControlCompressedVersion + // }, + // additionalProperties: false // only if bucketVersion == + // timeseries::kTimeseriesControlCompressedVersion + // }, + // data: {bsonType: 'object'}, + // meta: {} + // }, + // additionalProperties: false + // } + BSONObjBuilder validator; + BSONObjBuilder schema(validator.subobjStart("$jsonSchema")); + schema.append("bsonType", "object"); + schema.append("required", + BSON_ARRAY("_id" + << "control" + << "data")); + { + BSONObjBuilder properties(schema.subobjStart("properties")); + { + BSONObjBuilder _id(properties.subobjStart("_id")); + _id.append("bsonType", "objectId"); + _id.done(); + } + { + BSONObjBuilder control(properties.subobjStart("control")); + control.append("bsonType", "object"); + control.append("required", + BSON_ARRAY("version" + << "min" + << "max")); + { + BSONObjBuilder innerProperties(control.subobjStart("properties")); + { + BSONObjBuilder version(innerProperties.subobjStart("version")); + version.append("bsonType", "number"); + version.done(); + } + { + BSONObjBuilder min(innerProperties.subobjStart("min")); + min.append("bsonType", "object"); + min.append("required", BSON_ARRAY(timeField)); + BSONObjBuilder minProperties(min.subobjStart("properties")); + BSONObjBuilder timeFieldObj(minProperties.subobjStart(timeField)); + timeFieldObj.append("bsonType", "date"); + timeFieldObj.done(); + minProperties.done(); + min.done(); + } + + { + BSONObjBuilder max(innerProperties.subobjStart("max")); + max.append("bsonType", "object"); + max.append("required", BSON_ARRAY(timeField)); + BSONObjBuilder maxProperties(max.subobjStart("properties")); + BSONObjBuilder timeFieldObj(maxProperties.subobjStart(timeField)); + timeFieldObj.append("bsonType", "date"); + timeFieldObj.done(); + maxProperties.done(); + max.done(); + } + { + BSONObjBuilder closed(innerProperties.subobjStart("closed")); + closed.append("bsonType", "bool"); + closed.done(); + } + if (bucketVersion == timeseries::kTimeseriesControlCompressedVersion) { + BSONObjBuilder count(innerProperties.subobjStart("count")); + count.append("bsonType", "number"); + count.append("minimum", 1); + count.done(); + } + innerProperties.done(); } - }, - data: {bsonType: 'object'}, - meta: {} - }, - additionalProperties: false -} -})", - timeField, - timeField, - timeField, - timeField)); - default: - MONGO_UNREACHABLE; - }; + if (bucketVersion == timeseries::kTimeseriesControlCompressedVersion) { + control.append("additionalProperties", false); + } + control.done(); + } + { + BSONObjBuilder data(properties.subobjStart("data")); + data.append("bsonType", "object"); + data.done(); + } + properties.append("meta", BSONObj{}); + properties.done(); + } + schema.append("additionalProperties", false); + schema.done(); + return validator.obj(); } Status _createTimeseries(OperationContext* opCtx, @@ -361,108 +429,112 @@ Status _createTimeseries(OperationContext* opCtx, bool existingBucketCollectionIsCompatible = false; - Status ret = - writeConflictRetry(opCtx, "createBucketCollection", bucketsNs.ns(), [&]() -> Status { - AutoGetDb autoDb(opCtx, bucketsNs.dbName(), MODE_IX); - Lock::CollectionLock bucketsCollLock(opCtx, bucketsNs, MODE_X); - auto db = autoDb.ensureDbExists(opCtx); - - // Check if there already exist a Collection on the namespace we will later create a - // view on. We're not holding a Collection lock for this Collection so we may only check - // if the pointer is null or not. The answer may also change at any point after this - // call which is fine as we properly handle an orphaned bucket collection. This check is - // just here to prevent it from being created in the common case. - Status status = catalog::checkIfNamespaceExists(opCtx, ns); - if (!status.isOK()) { - return status; - } + Status ret = writeConflictRetry(opCtx, "createBucketCollection", bucketsNs, [&]() -> Status { + AutoGetDb autoDb(opCtx, bucketsNs.dbName(), MODE_IX); + Lock::CollectionLock bucketsCollLock(opCtx, bucketsNs, MODE_X); + auto db = autoDb.ensureDbExists(opCtx); - if (opCtx->writesAreReplicated() && - !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, bucketsNs)) { - // Report the error with the user provided namespace - return Status(ErrorCodes::NotWritablePrimary, - str::stream() << "Not primary while creating collection " << ns); - } + // Check if there already exist a Collection on the namespace we will later create a + // view on. We're not holding a Collection lock for this Collection so we may only check + // if the pointer is null or not. The answer may also change at any point after this + // call which is fine as we properly handle an orphaned bucket collection. This check is + // just here to prevent it from being created in the common case. + Status status = catalog::checkIfNamespaceExists(opCtx, ns); + if (!status.isOK()) { + return status; + } - CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, bucketsNs) - ->checkShardVersionOrThrow(opCtx); + if (opCtx->writesAreReplicated() && + !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, bucketsNs)) { + // Report the error with the user provided namespace + return Status(ErrorCodes::NotWritablePrimary, + str::stream() << "Not primary while creating collection " + << ns.toStringForErrorMsg()); + } - WriteUnitOfWork wuow(opCtx); - AutoStatsTracker bucketsStatsTracker( - opCtx, - bucketsNs, - Top::LockType::NotLocked, - AutoStatsTracker::LogMode::kUpdateTopAndCurOp, - CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(ns.dbName())); - - // If the buckets collection and time-series view creation roll back, ensure that their - // Top entries are deleted. - opCtx->recoveryUnit()->onRollback( - [serviceContext = opCtx->getServiceContext(), bucketsNs](OperationContext*) { - Top::get(serviceContext).collectionDropped(bucketsNs); - }); + CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, bucketsNs) + ->checkShardVersionOrThrow(opCtx); + WriteUnitOfWork wuow(opCtx); + AutoStatsTracker bucketsStatsTracker( + opCtx, + bucketsNs, + Top::LockType::NotLocked, + AutoStatsTracker::LogMode::kUpdateTopAndCurOp, + CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(ns.dbName())); - // Prepare collection option and index spec using the provided options. In case the - // collection already exist we use these to validate that they are the same as being - // requested here. - CollectionOptions bucketsOptions = options; - bucketsOptions.validator = validatorObj; + // If the buckets collection and time-series view creation roll back, ensure that their + // Top entries are deleted. + opCtx->recoveryUnit()->onRollback( + [serviceContext = opCtx->getServiceContext(), bucketsNs](OperationContext*) { + Top::get(serviceContext).collectionDropped(bucketsNs); + }); - // Cluster time-series buckets collections by _id. - auto expireAfterSeconds = options.expireAfterSeconds; - if (expireAfterSeconds) { - uassertStatusOK(index_key_validate::validateExpireAfterSeconds( - *expireAfterSeconds, - index_key_validate::ValidateExpireAfterSecondsMode::kClusteredTTLIndex)); - bucketsOptions.expireAfterSeconds = expireAfterSeconds; - } - bucketsOptions.clusteredIndex = - clustered_util::makeCanonicalClusteredInfoForLegacyFormat(); + // Prepare collection option and index spec using the provided options. In case the + // collection already exist we use these to validate that they are the same as being + // requested here. + CollectionOptions bucketsOptions = options; + bucketsOptions.validator = validatorObj; + + // Cluster time-series buckets collections by _id. + auto expireAfterSeconds = options.expireAfterSeconds; + if (expireAfterSeconds) { + uassertStatusOK(index_key_validate::validateExpireAfterSeconds( + *expireAfterSeconds, + index_key_validate::ValidateExpireAfterSecondsMode::kClusteredTTLIndex)); + bucketsOptions.expireAfterSeconds = expireAfterSeconds; + } + + bucketsOptions.clusteredIndex = clustered_util::makeCanonicalClusteredInfoForLegacyFormat(); + + if (auto coll = + CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, bucketsNs)) { + // Compare CollectionOptions and eventual TTL index to see if this bucket collection + // may be reused for this request. + existingBucketCollectionIsCompatible = + coll->getCollectionOptions().matchesStorageOptions( + bucketsOptions, CollatorFactoryInterface::get(opCtx->getServiceContext())); + + // We may have a bucket collection created with a previous version of mongod, this + // is also OK as we do not convert bucket collections to latest version during + // upgrade. + while (!existingBucketCollectionIsCompatible && + bucketVersion > timeseries::kTimeseriesControlMinVersion) { + validatorObj = _generateTimeseriesValidator(--bucketVersion, timeField); + bucketsOptions.validator = validatorObj; - if (auto coll = - CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, bucketsNs)) { - // Compare CollectionOptions and eventual TTL index to see if this bucket collection - // may be reused for this request. existingBucketCollectionIsCompatible = coll->getCollectionOptions().matchesStorageOptions( bucketsOptions, CollatorFactoryInterface::get(opCtx->getServiceContext())); + } - // We may have a bucket collection created with a previous version of mongod, this - // is also OK as we do not convert bucket collections to latest version during - // upgrade. - while (!existingBucketCollectionIsCompatible && - bucketVersion > timeseries::kTimeseriesControlMinVersion) { - validatorObj = _generateTimeseriesValidator(--bucketVersion, timeField); - bucketsOptions.validator = validatorObj; - - existingBucketCollectionIsCompatible = - coll->getCollectionOptions().matchesStorageOptions( - bucketsOptions, - CollatorFactoryInterface::get(opCtx->getServiceContext())); - } + return Status(ErrorCodes::NamespaceExists, + str::stream() + << "Bucket Collection already exists. NS: " + << bucketsNs.toStringForErrorMsg() << ". UUID: " << coll->uuid()); + } - return Status(ErrorCodes::NamespaceExists, - str::stream() << "Bucket Collection already exists. NS: " << bucketsNs - << ". UUID: " << coll->uuid()); - } + // Create the buckets collection that will back the view. + const bool createIdIndex = false; + uassertStatusOK(db->userCreateNS(opCtx, bucketsNs, bucketsOptions, createIdIndex)); - // Create the buckets collection that will back the view. - const bool createIdIndex = false; - uassertStatusOK(db->userCreateNS(opCtx, bucketsNs, bucketsOptions, createIdIndex)); + CollectionWriter collectionWriter(opCtx, bucketsNs); + collectionWriter.getWritableCollection(opCtx)->setTimeseriesBucketingParametersChanged( + opCtx, false); - CollectionWriter collectionWriter(opCtx, bucketsNs); - uassertStatusOK(_createDefaultTimeseriesIndex(opCtx, collectionWriter)); - wuow.commit(); - return Status::OK(); - }); + uassertStatusOK(_createDefaultTimeseriesIndex(opCtx, collectionWriter)); + wuow.commit(); + return Status::OK(); + }); - // If compatible bucket collection already exists then proceed with creating view definition. - if (!ret.isOK() && !existingBucketCollectionIsCompatible) + // If compatible bucket collection already exists then proceed with creating view defintion. + // If the 'temp' flag is true, we are in the $out stage, and should return without creating the + // view defintion. + if ((!ret.isOK() && !existingBucketCollectionIsCompatible) || options.temp) return ret; - ret = writeConflictRetry(opCtx, "create", ns.ns(), [&]() -> Status { + ret = writeConflictRetry(opCtx, "create", ns, [&]() -> Status { AutoGetCollection autoColl( opCtx, ns, @@ -483,7 +555,8 @@ Status _createTimeseries(OperationContext* opCtx, if (opCtx->writesAreReplicated() && !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns)) { return {ErrorCodes::NotWritablePrimary, - str::stream() << "Not primary while creating collection " << ns}; + str::stream() << "Not primary while creating collection " + << ns.toStringForErrorMsg()}; } CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, ns) @@ -507,13 +580,15 @@ Status _createTimeseries(OperationContext* opCtx, Top::get(serviceContext).collectionDropped(ns); }); - if (MONGO_unlikely(failTimeseriesViewCreation.shouldFail( - [&ns](const BSONObj& data) { return data["ns"_sd].String() == ns.ns(); }))) { + if (MONGO_unlikely(failTimeseriesViewCreation.shouldFail([&ns](const BSONObj& data) { + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "ns"); + return fpNss == ns; + }))) { LOGV2(5490200, "failTimeseriesViewCreation fail point enabled. Failing creation of view " "definition after bucket collection was created successfully."); return {ErrorCodes::OperationFailed, - str::stream() << "Timeseries view definition " << ns + str::stream() << "Timeseries view definition " << ns.toStringForErrorMsg() << " creation failed due to 'failTimeseriesViewCreation' " "fail point enabled."}; } @@ -527,9 +602,10 @@ Status _createTimeseries(OperationContext* opCtx, // Create the time-series view. status = db->userCreateNS(opCtx, ns, viewOptions); if (!status.isOK()) { - return status.withContext(str::stream() << "Failed to create view on " << bucketsNs - << " for time-series collection " << ns - << " with options " << viewOptions.toBSON()); + return status.withContext( + str::stream() << "Failed to create view on " << bucketsNs.toStringForErrorMsg() + << " for time-series collection " << ns.toStringForErrorMsg() + << " with options " << viewOptions.toBSON()); } wuow.commit(); @@ -545,8 +621,13 @@ Status _createCollection( const CollectionOptions& collectionOptions, const boost::optional& idIndex, const boost::optional& virtualCollectionOptions = boost::none) { - return writeConflictRetry(opCtx, "create", nss.ns(), [&] { - AutoGetDb autoDb(opCtx, nss.dbName(), MODE_IX); + return writeConflictRetry(opCtx, "create", nss, [&] { + // If a change collection is to be created, that is, the change streams are being enabled + // for a tenant, acquire exclusive tenant lock. + AutoGetDb autoDb(opCtx, + nss.dbName(), + MODE_IX /* database lock mode*/, + boost::make_optional(nss.tenantId() && nss.isChangeCollection(), MODE_X)); Lock::CollectionLock collLock(opCtx, nss, MODE_IX); auto db = autoDb.ensureDbExists(opCtx); @@ -591,7 +672,8 @@ Status _createCollection( if (opCtx->writesAreReplicated() && !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)) { return Status(ErrorCodes::NotWritablePrimary, - str::stream() << "Not primary while creating collection " << nss); + str::stream() << "Not primary while creating collection " + << nss.toStringForErrorMsg()); } CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, nss) @@ -703,6 +785,18 @@ Status createCollection(OperationContext* opCtx, } } // namespace +Status createTimeseries(OperationContext* opCtx, + const NamespaceString& ns, + const BSONObj& options) { + StatusWith statusWith = + CollectionOptions::parse(options, CollectionOptions::parseForCommand); + if (!statusWith.isOK()) { + return statusWith.getStatus(); + } + auto collectionOptions = statusWith.getValue(); + return _createTimeseries(opCtx, ns, collectionOptions); +} + Status createCollection(OperationContext* opCtx, const DatabaseName& dbName, const BSONObj& cmdObj, @@ -768,9 +862,10 @@ Status createCollectionForApplyOps(OperationContext* opCtx, "conflictingUUID"_attr = uuid, "existingCollection"_attr = *currentName); return Status(ErrorCodes::NamespaceExists, - str::stream() << "existing collection " << currentName->toString() - << " with conflicting UUID " << uuid.toString() - << " is in a drop-pending state."); + str::stream() + << "existing collection " << currentName->toStringForErrorMsg() + << " with conflicting UUID " << uuid.toString() + << " is in a drop-pending state."); } // In the case of oplog replay, a future command may have created or renamed a @@ -781,8 +876,9 @@ Status createCollectionForApplyOps(OperationContext* opCtx, auto futureColl = db ? catalog->lookupCollectionByNamespace(opCtx, newCollName) : nullptr; bool needsRenaming(futureColl); invariant(!needsRenaming || allowRenameOutOfTheWay, - str::stream() << "Current collection name: " << currentName << ", UUID: " << uuid - << ". Future collection name: " << newCollName); + str::stream() << "Current collection name: " << currentName->toStringForErrorMsg() + << ", UUID: " << uuid << ". Future collection name: " + << newCollName.toStringForErrorMsg()); for (int tries = 0; needsRenaming && tries < 10; ++tries) { auto tmpNameResult = makeUniqueCollectionName(opCtx, dbName, "tmp%%%%%.create"); @@ -791,7 +887,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx, << "Cannot generate temporary " "collection namespace for applyOps " "create command: collection: " - << newCollName); + << newCollName.toStringForErrorMsg()); } const auto& tmpName = tmpNameResult.getValue(); @@ -809,7 +905,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx, "conflictingUUID"_attr = uuid, "tempName"_attr = tmpName); Status status = - writeConflictRetry(opCtx, "createCollectionForApplyOps", newCollName.ns(), [&] { + writeConflictRetry(opCtx, "createCollectionForApplyOps", newCollName, [&] { WriteUnitOfWork wuow(opCtx); Status status = db->renameCollection(opCtx, newCollName, tmpName, stayTemp); if (!status.isOK()) @@ -821,7 +917,8 @@ Status createCollectionForApplyOps(OperationContext* opCtx, uuid, /*dropTargetUUID*/ {}, /*numRecords*/ 0U, - stayTemp); + stayTemp, + /*markFromMigrate=*/false); wuow.commit(); // Re-fetch collection after commit to get a valid pointer @@ -848,7 +945,7 @@ Status createCollectionForApplyOps(OperationContext* opCtx, str::stream() << "Cannot generate temporary " "collection namespace for applyOps " "create command: collection: " - << newCollName); + << newCollName.toStringForErrorMsg()); } // If the collection with the requested UUID already exists, but with a different @@ -856,9 +953,10 @@ Status createCollectionForApplyOps(OperationContext* opCtx, if (catalog->lookupCollectionByUUID(opCtx, uuid)) { invariant(currentName); uassert(40655, - str::stream() << "Invalid name " << newCollName << " for UUID " << uuid, + str::stream() << "Invalid name " << newCollName.toStringForErrorMsg() + << " for UUID " << uuid, currentName->db() == newCollName.db()); - return writeConflictRetry(opCtx, "createCollectionForApplyOps", newCollName.ns(), [&] { + return writeConflictRetry(opCtx, "createCollectionForApplyOps", newCollName, [&] { WriteUnitOfWork wuow(opCtx); Status status = db->renameCollection(opCtx, *currentName, newCollName, stayTemp); if (!status.isOK()) @@ -869,7 +967,8 @@ Status createCollectionForApplyOps(OperationContext* opCtx, uuid, /*dropTargetUUID*/ {}, /*numRecords*/ 0U, - stayTemp); + stayTemp, + /*markFromMigrate=*/false); wuow.commit(); return Status::OK(); @@ -926,7 +1025,7 @@ Status createCollection(OperationContext* opCtx, return _createTimeseries(opCtx, ns, options); } else { uassert(ErrorCodes::OperationNotSupportedInTransaction, - str::stream() << "Cannot create system collection " << ns + str::stream() << "Cannot create system collection " << ns.toStringForErrorMsg() << " within a transaction.", !opCtx->inMultiDocumentTransaction() || !ns.isSystem()); return _createCollection(opCtx, ns, options, idIndex); diff --git a/src/mongo/db/catalog/create_collection.h b/src/mongo/db/catalog/create_collection.h index 655644c4e36c2..d78d92cc560df 100644 --- a/src/mongo/db/catalog/create_collection.h +++ b/src/mongo/db/catalog/create_collection.h @@ -29,7 +29,9 @@ #pragma once +#include #include +#include #include #include "mongo/base/status.h" @@ -37,6 +39,10 @@ #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/virtual_collection_options.h" #include "mongo/db/commands/create_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -86,5 +92,8 @@ Status createCollectionForApplyOps(OperationContext* opCtx, const BSONObj& cmdObj, bool allowRenameOutOfTheWay, const boost::optional& idIndex = boost::none); - +/** + * Creates a time-series collection as described in 'option' on the namespace 'ns'. + */ +Status createTimeseries(OperationContext* opCtx, const NamespaceString& ns, const BSONObj& options); } // namespace mongo diff --git a/src/mongo/db/catalog/create_collection_test.cpp b/src/mongo/db/catalog/create_collection_test.cpp index 1d443428ad82d..a4d0cc956aa18 100644 --- a/src/mongo/db/catalog/create_collection_test.cpp +++ b/src/mongo/db/catalog/create_collection_test.cpp @@ -27,22 +27,55 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include + +#include + #include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/create_collection.h" -#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/virtual_collection_impl.h" #include "mongo/db/catalog/virtual_collection_options.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/commands/create_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/db_raii.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/external_data_source_option_gen.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/stdx/utility.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/uuid.h" namespace mongo { @@ -112,11 +145,12 @@ void CreateCollectionTest::validateValidator(const std::string& validatorStr, options.validator = fromjson(validatorStr); options.uuid = UUID::gen(); - return writeConflictRetry(opCtx.get(), "create", newNss.ns(), [&] { + return writeConflictRetry(opCtx.get(), "create", newNss, [&] { AutoGetCollection autoColl(opCtx.get(), newNss, MODE_IX); auto db = autoColl.ensureDbExists(opCtx.get()); - ASSERT_TRUE(db) << "Cannot create collection " << newNss << " because database " - << newNss.dbName().toStringForErrorMsg() << " does not exist."; + ASSERT_TRUE(db) << "Cannot create collection " << newNss.toStringForErrorMsg() + << " because database " << newNss.dbName().toStringForErrorMsg() + << " does not exist."; WriteUnitOfWork wuow(opCtx.get()); const auto status = @@ -137,7 +171,7 @@ bool collectionExists(OperationContext* opCtx, const NamespaceString& nss) { */ CollectionOptions getCollectionOptions(OperationContext* opCtx, const NamespaceString& nss) { AutoGetCollectionForRead collection(opCtx, nss); - ASSERT_TRUE(collection) << "Unable to get collections options for " << nss + ASSERT_TRUE(collection) << "Unable to get collections options for " << nss.toStringForErrorMsg() << " because collection does not exist."; return collection->getCollectionOptions(); } @@ -252,7 +286,7 @@ TEST_F(CreateCollectionTest, ASSERT(renamedCollectionNss); ASSERT_TRUE(collectionExists(opCtx.get(), *renamedCollectionNss)) << "old renamed collection with UUID " << existingCollectionUuid - << " missing: " << *renamedCollectionNss; + << " missing: " << (*renamedCollectionNss).toStringForErrorMsg(); } TEST_F(CreateCollectionTest, @@ -288,6 +322,33 @@ TEST_F(CreateCollectionTest, ASSERT_FALSE(collectionExists(opCtx.get(), newNss)); } +TEST_F(CreateCollectionTest, TimeseriesBucketingParametersChangedFlagTrue) { + NamespaceString curNss = NamespaceString::createNamespaceString_forTest("test.curColl"); + auto bucketsColl = + NamespaceString::createNamespaceString_forTest("test.system.buckets.curColl"); + + auto opCtx = makeOpCtx(); + auto tsOptions = TimeseriesOptions("t"); + CreateCommand cmd = CreateCommand(curNss); + cmd.setTimeseries(std::move(tsOptions)); + uassertStatusOK(createCollection(opCtx.get(), cmd)); + + ASSERT_TRUE(collectionExists(opCtx.get(), bucketsColl)); + AutoGetCollectionForRead bucketsCollForRead(opCtx.get(), bucketsColl); + ASSERT_FALSE(bucketsCollForRead->timeseriesBucketingParametersMayHaveChanged()); +} + +TEST_F(CreateCollectionTest, TimeseriesBucketingParametersChangedFlagFalse) { + NamespaceString curNss = NamespaceString::createNamespaceString_forTest("test.curColl"); + + auto opCtx = makeOpCtx(); + uassertStatusOK(createCollection(opCtx.get(), CreateCommand(curNss))); + + ASSERT_TRUE(collectionExists(opCtx.get(), curNss)); + AutoGetCollectionForRead collForRead(opCtx.get(), curNss); + ASSERT_TRUE(collForRead->timeseriesBucketingParametersMayHaveChanged()); +} + TEST_F(CreateCollectionTest, ValidationOptions) { // Try a valid validator before trying invalid validators. validateValidator("", static_cast(ErrorCodes::Error::OK)); diff --git a/src/mongo/db/catalog/database.h b/src/mongo/db/catalog/database.h index 8d3f410ae9fd0..881a9021d3a35 100644 --- a/src/mongo/db/catalog/database.h +++ b/src/mongo/db/catalog/database.h @@ -39,7 +39,9 @@ #include "mongo/db/database_name.h" #include "mongo/db/dbcommands_gen.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/optime.h" +#include "mongo/util/decorable.h" #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/catalog/database_holder.cpp b/src/mongo/db/catalog/database_holder.cpp index 4d6c60212bfca..14f55cdab6360 100644 --- a/src/mongo/db/catalog/database_holder.cpp +++ b/src/mongo/db/catalog/database_holder.cpp @@ -27,9 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/catalog/database_holder.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/catalog/database_holder.h b/src/mongo/db/catalog/database_holder.h index 628995ee1ae4c..b8cba39ad954d 100644 --- a/src/mongo/db/catalog/database_holder.h +++ b/src/mongo/db/catalog/database_holder.h @@ -29,12 +29,17 @@ #pragma once +#include +#include #include #include +#include #include "mongo/base/string_data.h" #include "mongo/db/catalog/database.h" #include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" namespace mongo { @@ -105,9 +110,10 @@ class DatabaseHolder { virtual void closeAll(OperationContext* opCtx) = 0; /** - * Returns the set of existing database names that differ only in casing. + * Returns the name of the database with conflicting casing if one exists. */ - virtual std::set getNamesWithConflictingCasing(const DatabaseName& dbName) = 0; + virtual boost::optional getNameWithConflictingCasing( + const DatabaseName& dbName) = 0; /** * Returns all the database names (including those which are empty). diff --git a/src/mongo/db/catalog/database_holder_impl.cpp b/src/mongo/db/catalog/database_holder_impl.cpp index 835fd7bb8de3e..ae48ebbfdea2a 100644 --- a/src/mongo/db/catalog/database_holder_impl.cpp +++ b/src/mongo/db/catalog/database_holder_impl.cpp @@ -29,27 +29,53 @@ #include "mongo/db/catalog/database_holder_impl.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/audit.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/database_impl.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/service_context.h" #include "mongo/db/stats/top.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_engine.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage namespace mongo { Database* DatabaseHolderImpl::getDb(OperationContext* opCtx, const DatabaseName& dbName) const { - uassert( - 13280, - "invalid db name: " + dbName.toStringForErrorMsg(), - NamespaceString::validDBName(dbName.db(), NamespaceString::DollarInDbNameBehavior::Allow)); + uassert(13280, + "invalid db name: " + dbName.toStringForErrorMsg(), + NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow)); invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_IS) || (dbName.db().compare("local") == 0 && opCtx->lockState()->isLocked())); @@ -64,31 +90,29 @@ Database* DatabaseHolderImpl::getDb(OperationContext* opCtx, const DatabaseName& } bool DatabaseHolderImpl::dbExists(OperationContext* opCtx, const DatabaseName& dbName) const { - uassert( - 6198702, - "invalid db name: " + dbName.toStringForErrorMsg(), - NamespaceString::validDBName(dbName.db(), NamespaceString::DollarInDbNameBehavior::Allow)); + uassert(6198702, + "invalid db name: " + dbName.toStringForErrorMsg(), + NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow)); stdx::lock_guard lk(_m); auto it = _dbs.find(dbName); return it != _dbs.end() && it->second != nullptr; } -std::set DatabaseHolderImpl::_getNamesWithConflictingCasing_inlock( +boost::optional DatabaseHolderImpl::_getNameWithConflictingCasing_inlock( const DatabaseName& dbName) { - std::set duplicates; - for (const auto& nameAndPointer : _dbs) { - // A name that's equal with case-insensitive match must be identical, or it's a duplicate. + // A case insensitive match indicates that 'dbName' is a duplicate of an existing database. if (dbName.equalCaseInsensitive(nameAndPointer.first) && dbName != nameAndPointer.first) - duplicates.insert(nameAndPointer.first); + return nameAndPointer.first; } - return duplicates; + + return boost::none; } -std::set DatabaseHolderImpl::getNamesWithConflictingCasing( +boost::optional DatabaseHolderImpl::getNameWithConflictingCasing( const DatabaseName& dbName) { stdx::lock_guard lk(_m); - return _getNamesWithConflictingCasing_inlock(dbName); + return _getNameWithConflictingCasing_inlock(dbName); } std::vector DatabaseHolderImpl::getNames() { @@ -103,10 +127,9 @@ std::vector DatabaseHolderImpl::getNames() { Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, const DatabaseName& dbName, bool* justCreated) { - uassert( - 6198701, - "invalid db name: " + dbName.toStringForErrorMsg(), - NamespaceString::validDBName(dbName.db(), NamespaceString::DollarInDbNameBehavior::Allow)); + uassert(6198701, + "invalid db name: " + dbName.toStringForErrorMsg(), + NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow)); invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_IX)); if (justCreated) @@ -115,7 +138,7 @@ Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, stdx::unique_lock lk(_m); // The following will insert a nullptr for dbname, which will treated the same as a non- - // existant database by the get method, yet still counts in getNamesWithConflictingCasing. + // existant database by the get method, yet still counts in getNameWithConflictingCasing. if (auto db = _dbs[dbName]) return db; @@ -131,19 +154,19 @@ Database* DatabaseHolderImpl::openDb(OperationContext* opCtx, }); // Check casing in lock to avoid transient duplicates. - auto duplicates = _getNamesWithConflictingCasing_inlock(dbName); + auto duplicate = _getNameWithConflictingCasing_inlock(dbName); uassert(ErrorCodes::DatabaseDifferCase, str::stream() << "db already exists with different case already have: [" - << (*duplicates.cbegin()) << "] trying to create [" + << duplicate->toStringForErrorMsg() << "] trying to create [" << dbName.toStringForErrorMsg() << "]", - duplicates.empty()); + !duplicate); // Do the catalog lookup and database creation outside of the scoped lock, because these may // block. lk.unlock(); if (CollectionCatalog::get(opCtx)->getAllCollectionUUIDsFromDb(dbName).empty()) { - audit::logCreateDatabase(opCtx->getClient(), dbName.toString()); + audit::logCreateDatabase(opCtx->getClient(), dbName); if (justCreated) *justCreated = true; } @@ -179,8 +202,7 @@ void DatabaseHolderImpl::dropDb(OperationContext* opCtx, Database* db) { invariant(opCtx->lockState()->isDbLockedForMode(name, MODE_X)); auto catalog = CollectionCatalog::get(opCtx); - for (auto collIt = catalog->begin(opCtx, name); collIt != catalog->end(opCtx); ++collIt) { - auto coll = *collIt; + for (auto&& coll : catalog->range(name)) { if (!coll) { break; } @@ -188,15 +210,15 @@ void DatabaseHolderImpl::dropDb(OperationContext* opCtx, Database* db) { // It is the caller's responsibility to ensure that no index builds are active in the // database. invariant(!coll->getIndexCatalog()->haveAnyIndexesInProgress(), - str::stream() << "An index is building on collection '" << coll->ns() << "'."); + str::stream() << "An index is building on collection '" + << coll->ns().toStringForErrorMsg() << "'."); } - audit::logDropDatabase(opCtx->getClient(), name.toString()); + audit::logDropDatabase(opCtx->getClient(), name); auto const serviceContext = opCtx->getServiceContext(); - for (auto collIt = catalog->begin(opCtx, name); collIt != catalog->end(opCtx); ++collIt) { - auto coll = *collIt; + for (auto&& coll : catalog->range(name)) { if (!coll) { break; } @@ -212,7 +234,8 @@ void DatabaseHolderImpl::dropDb(OperationContext* opCtx, Database* db) { coll->ns(), coll->uuid(), coll->numRecords(opCtx), - OpObserver::CollectionDropType::kOnePhase); + OpObserver::CollectionDropType::kOnePhase, + /*markFromMigrate=*/false); } Top::get(serviceContext).collectionDropped(coll->ns()); @@ -230,16 +253,15 @@ void DatabaseHolderImpl::dropDb(OperationContext* opCtx, Database* db) { }); auto const storageEngine = serviceContext->getStorageEngine(); - writeConflictRetry(opCtx, "dropDatabase", name.toString(), [&] { + writeConflictRetry(opCtx, "dropDatabase", NamespaceString(name), [&] { storageEngine->dropDatabase(opCtx, name).transitional_ignore(); }); } void DatabaseHolderImpl::close(OperationContext* opCtx, const DatabaseName& dbName) { - uassert( - 6198700, - "invalid db name: " + dbName.toStringForErrorMsg(), - NamespaceString::validDBName(dbName.db(), NamespaceString::DollarInDbNameBehavior::Allow)); + uassert(6198700, + "invalid db name: " + dbName.toStringForErrorMsg(), + NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow)); invariant(opCtx->lockState()->isDbLockedForMode(dbName, MODE_X)); stdx::lock_guard lk(_m); diff --git a/src/mongo/db/catalog/database_holder_impl.h b/src/mongo/db/catalog/database_holder_impl.h index 1c04808db430d..86305cfeda1b9 100644 --- a/src/mongo/db/catalog/database_holder_impl.h +++ b/src/mongo/db/catalog/database_holder_impl.h @@ -29,9 +29,14 @@ #pragma once -#include "mongo/db/catalog/database_holder.h" +#include +#include +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/database_holder.h" #include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/util/concurrency/mutex.h" #include "mongo/util/string_map.h" @@ -55,12 +60,12 @@ class DatabaseHolderImpl : public DatabaseHolder { void closeAll(OperationContext* opCtx) override; - std::set getNamesWithConflictingCasing(const DatabaseName& dbName) override; + boost::optional getNameWithConflictingCasing(const DatabaseName& dbName) override; std::vector getNames() override; private: - std::set _getNamesWithConflictingCasing_inlock(const DatabaseName& dbName); + boost::optional _getNameWithConflictingCasing_inlock(const DatabaseName& dbName); typedef stdx::unordered_map DBs; mutable SimpleMutex _m; diff --git a/src/mongo/db/catalog/database_holder_mock.h b/src/mongo/db/catalog/database_holder_mock.h index d21558b4be014..af29483734ca5 100644 --- a/src/mongo/db/catalog/database_holder_mock.h +++ b/src/mongo/db/catalog/database_holder_mock.h @@ -30,6 +30,7 @@ #pragma once #include "mongo/db/catalog/database_holder.h" +#include namespace mongo { @@ -57,8 +58,9 @@ class DatabaseHolderMock : public DatabaseHolder { void closeAll(OperationContext* opCtx) override {} - std::set getNamesWithConflictingCasing(const DatabaseName& dbName) override { - return std::set(); + boost::optional getNameWithConflictingCasing( + const DatabaseName& dbName) override { + return boost::none; } std::vector getNames() override { diff --git a/src/mongo/db/catalog/database_impl.cpp b/src/mongo/db/catalog/database_impl.cpp index 47dc2b10a4660..1d24df901ab24 100644 --- a/src/mongo/db/catalog/database_impl.cpp +++ b/src/mongo/db/catalog/database_impl.cpp @@ -31,53 +31,86 @@ #include #include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/audit.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_catalog_helper.h" -#include "mongo/db/catalog/collection_impl.h" #include "mongo/db/catalog/collection_options.h" -#include "mongo/db/catalog/database_holder.h" -#include "mongo/db/catalog/drop_indexes.h" +#include "mongo/db/catalog/collection_options_gen.h" +#include "mongo/db/catalog/drop_collection.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/uncommitted_catalog_updates.h" #include "mongo/db/catalog/virtual_collection_impl.h" #include "mongo/db/catalog/virtual_collection_options.h" -#include "mongo/db/clientcursor.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/curop.h" -#include "mongo/db/index/index_access_method.h" -#include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/introspect.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" #include "mongo/db/repl/oplog.h" -#include "mongo/db/repl/replication_consistency_markers_impl.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/tenant_migration_decoration.h" -#include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" #include "mongo/db/service_context.h" +#include "mongo/db/stats/counters.h" #include "mongo/db/stats/top.h" #include "mongo/db/storage/durable_catalog.h" -#include "mongo/db/storage/historical_ident_tracker.h" +#include "mongo/db/storage/durable_catalog_entry.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_init.h" #include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/storage_util.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/system_index.h" +#include "mongo/db/views/resolved_view.h" +#include "mongo/db/views/view.h" #include "mongo/db/views/view_catalog_helpers.h" #include "mongo/logv2/log.h" -#include "mongo/platform/random.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" +#include "mongo/util/ctype.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/synchronized_value.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" +#include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -88,7 +121,6 @@ MONGO_FAIL_POINT_DEFINE(throwWCEDuringTxnCollCreate); MONGO_FAIL_POINT_DEFINE(hangBeforeLoggingCreateCollection); MONGO_FAIL_POINT_DEFINE(hangAndFailAfterCreateCollectionReservesOpTime); MONGO_FAIL_POINT_DEFINE(openCreateCollectionWindowFp); -MONGO_FAIL_POINT_DEFINE(allowSystemViewsDrop); // When active, a column index will be created for all new collections. This is used for the column // index JS test passthrough suite. Other passthroughs work by overriding javascript methods on the @@ -123,7 +155,7 @@ void assertNoMovePrimaryInProgress(OperationContext* opCtx, NamespaceString cons LOGV2(4909100, "assertNoMovePrimaryInProgress", logAttrs(nss)); uasserted(ErrorCodes::MovePrimaryInProgress, - "movePrimary is in progress for namespace " + nss.toString()); + "movePrimary is in progress for namespace " + nss.toStringForErrorMsg()); } } @@ -135,11 +167,12 @@ static const BSONObj kColumnStoreSpec = BSON("name" << "v" << 2); } // namespace -Status DatabaseImpl::validateDBName(StringData dbname) { +Status DatabaseImpl::validateDBName(const DatabaseName& dbName) { + const auto dbname = DatabaseNameUtil::serializeForCatalog(dbName); if (dbname.size() <= 0) return Status(ErrorCodes::BadValue, "db name is empty"); - if (dbname.size() >= 64) + if (dbname.size() > DatabaseName::kMaxDatabaseNameLength) return Status(ErrorCodes::BadValue, "db name is too long"); if (dbname.find('.') != std::string::npos) @@ -159,7 +192,7 @@ DatabaseImpl::DatabaseImpl(const DatabaseName& dbName) : _name(dbName), _viewsName(NamespaceString::makeSystemDotViewsNamespace(_name)) {} void DatabaseImpl::init(OperationContext* const opCtx) { - Status status = validateDBName(_name.db()); + Status status = validateDBName(_name); if (!status.isOK()) { LOGV2_WARNING( @@ -368,34 +401,8 @@ Status DatabaseImpl::dropCollection(OperationContext* opCtx, invariant(nss.dbName() == _name); - // Returns true if the supplied namespace 'nss' is a system collection that can be dropped, - // false otherwise. - auto isDroppableSystemCollection = [](const auto& nss) { - return nss.isHealthlog() || nss == NamespaceString::kLogicalSessionsNamespace || - nss == NamespaceString::kKeysCollectionNamespace || - nss.isTemporaryReshardingCollection() || nss.isTimeseriesBucketsCollection() || - nss.isChangeStreamPreImagesCollection() || - nss == NamespaceString::kConfigsvrRestoreNamespace || nss.isChangeCollection() || - nss.isSystemDotJavascript() || nss.isSystemStatsCollection(); - }; - - if (nss.isSystem()) { - if (nss.isSystemDotProfile()) { - if (catalog->getDatabaseProfileLevel(_name) != 0) - return Status(ErrorCodes::IllegalOperation, - "turn off profiling before dropping system.profile collection"); - } else if (nss.isSystemDotViews()) { - if (!MONGO_unlikely(allowSystemViewsDrop.shouldFail())) { - const auto viewStats = catalog->getViewStatsForDatabase(opCtx, _name); - uassert(ErrorCodes::CommandFailed, - str::stream() << "cannot drop collection " << nss - << " when time-series collections are present.", - viewStats && viewStats->userTimeseries == 0); - } - } else if (!isDroppableSystemCollection(nss)) { - return Status(ErrorCodes::IllegalOperation, - str::stream() << "can't drop system collection " << nss); - } + if (auto droppable = isDroppableCollection(opCtx, nss); !droppable.isOK()) { + return droppable; } return dropCollectionEvenIfSystem(opCtx, nss, dropOpTime, markFromMigrate); @@ -430,8 +437,8 @@ Status DatabaseImpl::dropCollectionEvenIfSystem(OperationContext* opCtx, // Use massert() to be consistent with IndexCatalog::dropAllIndexes(). auto numIndexesInProgress = collection->getIndexCatalog()->numIndexesInProgress(); massert(ErrorCodes::BackgroundOperationInProgressForNamespace, - str::stream() << "cannot drop collection " << nss << " (" << uuid << ") when " - << numIndexesInProgress << " index builds in progress.", + str::stream() << "cannot drop collection " << nss.toStringForErrorMsg() << " (" << uuid + << ") when " << numIndexesInProgress << " index builds in progress.", numIndexesInProgress == 0); audit::logDropCollection(opCtx->getClient(), nss); @@ -589,16 +596,6 @@ Status DatabaseImpl::_finishDropCollection(OperationContext* opCtx, opCtx, collection->ns(), collection->getCatalogId(), sharedIdent); if (!status.isOK()) return status; - - opCtx->recoveryUnit()->onCommit( - [nss, uuid, ident = sharedIdent->getIdent()](OperationContext* opCtx, - boost::optional commitTime) { - if (!commitTime) { - return; - } - - HistoricalIdentTracker::get(opCtx).recordDrop(ident, nss, uuid, commitTime.value()); - }); } CollectionCatalog::get(opCtx)->dropCollection( @@ -621,7 +618,8 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx, invariant(toNss.dbName() == _name); if (CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, toNss)) { return Status(ErrorCodes::NamespaceExists, - str::stream() << "Cannot rename '" << fromNss << "' to '" << toNss + str::stream() << "Cannot rename '" << fromNss.toStringForErrorMsg() + << "' to '" << toNss.toStringForErrorMsg() << "' because the destination namespace already exists"); } @@ -650,30 +648,6 @@ Status DatabaseImpl::renameCollection(OperationContext* opCtx, return status; CollectionCatalog::get(opCtx)->onCollectionRename(opCtx, writableCollection, fromNss); - - opCtx->recoveryUnit()->onCommit([fromNss, - writableCollection](OperationContext* opCtx, - boost::optional commitTime) { - if (!commitTime) { - return; - } - - HistoricalIdentTracker::get(opCtx).recordRename( - writableCollection->getSharedIdent()->getIdent(), - fromNss, - writableCollection->uuid(), - commitTime.value()); - - const auto readyIndexes = writableCollection->getIndexCatalog()->getAllReadyEntriesShared(); - for (const auto& readyIndex : readyIndexes) { - HistoricalIdentTracker::get(opCtx).recordRename( - readyIndex->getIdent(), fromNss, writableCollection->uuid(), commitTime.value()); - } - - // Ban reading from this collection on committed reads on snapshots before now. - writableCollection->setMinimumVisibleSnapshot(commitTime.value()); - }); - return status; } @@ -683,11 +657,12 @@ void DatabaseImpl::_checkCanCreateCollection(OperationContext* opCtx, if (CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss)) { if (options.isView()) { uasserted(ErrorCodes::NamespaceExists, - str::stream() - << "Cannot create collection " << nss << " - collection already exists."); + str::stream() << "Cannot create collection " << nss.toStringForErrorMsg() + << " - collection already exists."); } else { - throwWriteConflictException(str::stream() << "Collection namespace '" << nss.ns() - << "' is already in use."); + throwWriteConflictException(str::stream() + << "Collection namespace '" << nss.toStringForErrorMsg() + << "' is already in use."); } } @@ -701,7 +676,7 @@ void DatabaseImpl::_checkCanCreateCollection(OperationContext* opCtx, uassert(28838, "cannot create a non-capped oplog collection", options.capped || !nss.isOplog()); uassert(ErrorCodes::DatabaseDropPending, - str::stream() << "Cannot create collection " << nss + str::stream() << "Cannot create collection " << nss.toStringForErrorMsg() << " - database is in the process of being dropped.", !_dropPending.load()); } @@ -722,7 +697,8 @@ Status DatabaseImpl::createView(OperationContext* opCtx, auto status = Status::OK(); if (viewName.isOplog()) { status = {ErrorCodes::InvalidNamespace, - str::stream() << "invalid namespace name for a view: " + viewName.toString()}; + str::stream() << "invalid namespace name for a view: " + + viewName.toStringForErrorMsg()}; } else { status = CollectionCatalog::get(opCtx)->createView(opCtx, viewName, @@ -732,8 +708,11 @@ Status DatabaseImpl::createView(OperationContext* opCtx, options.collation); } - audit::logCreateView( - opCtx->getClient(), viewName, viewOnNss.toString(), pipeline, status.code()); + audit::logCreateView(opCtx->getClient(), + viewName, + NamespaceStringUtil::serialize(viewOnNss), + pipeline, + status.code()); return status; } @@ -769,10 +748,8 @@ Collection* DatabaseImpl::createVirtualCollection(OperationContext* opCtx, * it. */ bool doesCollectionModificationsUpdateIndexes(const NamespaceString& nss) { - const auto& collName = nss.ns(); - return collName != "config.transactions" && - collName != - repl::ReplicationConsistencyMarkersImpl::kDefaultOplogTruncateAfterPointNamespace; + return nss != NamespaceString::kSessionTransactionsTableNamespace && + nss != NamespaceString::kDefaultOplogTruncateAfterPointNamespace; } Collection* DatabaseImpl::_createCollection( @@ -827,8 +804,8 @@ Collection* DatabaseImpl::_createCollection( uasserted(51267, "hangAndFailAfterCreateCollectionReservesOpTime fail point enabled"); }, [&](const BSONObj& data) { - auto fpNss = data["nss"].str(); - return fpNss.empty() || fpNss == nss.toString(); + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "nss"_sd); + return fpNss.isEmpty() || fpNss == nss; }); _checkCanCreateCollection(opCtx, nss, optionsWithUUID); @@ -851,13 +828,23 @@ Collection* DatabaseImpl::_createCollection( // Create Collection object auto ownedCollection = [&]() -> std::shared_ptr { if (!vopts) { + if (CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss)) { + throwWriteConflictException(str::stream() + << "Namespace '" << nss.toStringForErrorMsg() + << "' is already in use."); + } + auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); std::pair> catalogIdRecordStorePair = uassertStatusOK(storageEngine->getCatalog()->createCollection( opCtx, nss, optionsWithUUID, true /*allocateDefaultSpace*/)); auto& catalogId = catalogIdRecordStorePair.first; + + auto catalogEntry = DurableCatalog::get(opCtx)->getParsedCatalogEntry(opCtx, catalogId); + auto metadata = catalogEntry->metadata; + return Collection::Factory::get(opCtx)->make( - opCtx, nss, catalogId, optionsWithUUID, std::move(catalogIdRecordStorePair.second)); + opCtx, nss, catalogId, metadata, std::move(catalogIdRecordStorePair.second)); } else { // Virtual collection stays only in memory and its metadata need not persist on disk and // therefore we bypass DurableCatalog. @@ -866,14 +853,14 @@ Collection* DatabaseImpl::_createCollection( }(); auto collection = ownedCollection.get(); ownedCollection->init(opCtx); - ownedCollection->setCommitted(false); CollectionCatalog::get(opCtx)->onCreateCollection(opCtx, std::move(ownedCollection)); - openCreateCollectionWindowFp.executeIf([&](const BSONObj& data) { sleepsecs(3); }, - [&](const BSONObj& data) { - const auto collElem = data["collectionNS"]; - return !collElem || nss.toString() == collElem.str(); - }); + openCreateCollectionWindowFp.executeIf( + [&](const BSONObj& data) { sleepsecs(3); }, + [&](const BSONObj& data) { + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "collectionNS"_sd); + return fpNss.isEmpty() || nss == fpNss; + }); BSONObj fullIdIndexSpec; @@ -892,8 +879,8 @@ Collection* DatabaseImpl::_createCollection( } else { // autoIndexId: false is only allowed on unreplicated collections. uassert(50001, - str::stream() << "autoIndexId:false is not allowed for collection " << nss - << " because it can be replicated", + str::stream() << "autoIndexId:false is not allowed for collection " + << nss.toStringForErrorMsg() << " because it can be replicated", !nss.isReplicated()); } } @@ -968,8 +955,9 @@ Status DatabaseImpl::userCreateNS(OperationContext* opCtx, "create collection {namespace} {collectionOptions}", logAttrs(nss), "collectionOptions"_attr = collectionOptions.toBSON()); - if (!NamespaceString::validCollectionComponent(nss.ns())) - return Status(ErrorCodes::InvalidNamespace, str::stream() << "invalid ns: " << nss); + if (!NamespaceString::validCollectionComponent(nss)) + return Status(ErrorCodes::InvalidNamespace, + str::stream() << "invalid ns: " << nss.toStringForErrorMsg()); // Validate the collation, if there is one. auto swCollator = _validateCollator(opCtx, collectionOptions); @@ -1050,7 +1038,8 @@ Status DatabaseImpl::userCreateNS(OperationContext* opCtx, } else { invariant(_createCollection( opCtx, nss, collectionOptions, createDefaultIndexes, idIndex, fromMigrate), - str::stream() << "Collection creation failed after validating options: " << nss + str::stream() << "Collection creation failed after validating options: " + << nss.toStringForErrorMsg() << ". Options: " << collectionOptions.toBSON()); } @@ -1066,8 +1055,9 @@ Status DatabaseImpl::userCreateVirtualNS(OperationContext* opCtx, "create collection {namespace} {collectionOptions}", logAttrs(nss), "collectionOptions"_attr = opts.toBSON()); - if (!NamespaceString::validCollectionComponent(nss.ns())) - return Status(ErrorCodes::InvalidNamespace, str::stream() << "invalid ns: " << nss); + if (!NamespaceString::validCollectionComponent(nss)) + return Status(ErrorCodes::InvalidNamespace, + str::stream() << "invalid ns: " << nss.toStringForErrorMsg()); // Validate the collation, if there is one. if (auto swCollator = _validateCollator(opCtx, opts); !swCollator.isOK()) { @@ -1081,8 +1071,8 @@ Status DatabaseImpl::userCreateVirtualNS(OperationContext* opCtx, /*idIndex=*/BSONObj(), /*fromMigrate=*/false, vopts), - str::stream() << "Collection creation failed after validating options: " << nss - << ". Options: " << opts.toBSON()); + str::stream() << "Collection creation failed after validating options: " + << nss.toStringForErrorMsg() << ". Options: " << opts.toBSON()); return Status::OK(); } diff --git a/src/mongo/db/catalog/database_impl.h b/src/mongo/db/catalog/database_impl.h index 12c77ba766345..495dcec8d5905 100644 --- a/src/mongo/db/catalog/database_impl.h +++ b/src/mongo/db/catalog/database_impl.h @@ -29,8 +29,25 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/virtual_collection_options.h" #include "mongo/db/database_name.h" +#include "mongo/db/dbcommands_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/optime.h" +#include "mongo/platform/atomic_word.h" namespace mongo { @@ -110,7 +127,7 @@ class DatabaseImpl final : public Database { NamespaceString toNss, bool stayTemp) const final; - static Status validateDBName(StringData dbname); + static Status validateDBName(const DatabaseName& dbname); const NamespaceString& getSystemViewsName() const final { return _viewsName; diff --git a/src/mongo/db/catalog/database_test.cpp b/src/mongo/db/catalog/database_test.cpp index 80a32d017915d..37ebeb1a02f77 100644 --- a/src/mongo/db/catalog/database_test.cpp +++ b/src/mongo/db/catalog/database_test.cpp @@ -27,17 +27,40 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_build_block.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/unique_collection_name.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" @@ -45,16 +68,27 @@ #include "mongo/db/op_observer/oplog_writer_mock.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/pcre.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -114,7 +148,7 @@ void DatabaseTest::tearDown() { } TEST_F(DatabaseTest, SetDropPendingThrowsExceptionIfDatabaseIsAlreadyInADropPendingState) { - writeConflictRetry(_opCtx.get(), "testSetDropPending", _nss.ns(), [this] { + writeConflictRetry(_opCtx.get(), "testSetDropPending", _nss, [this] { AutoGetDb autoDb(_opCtx.get(), _nss.dbName(), MODE_X); auto db = autoDb.ensureDbExists(_opCtx.get()); ASSERT_TRUE(db); @@ -137,7 +171,7 @@ TEST_F(DatabaseTest, SetDropPendingThrowsExceptionIfDatabaseIsAlreadyInADropPend TEST_F(DatabaseTest, CreateCollectionThrowsExceptionWhenDatabaseIsInADropPendingState) { writeConflictRetry( - _opCtx.get(), "testÇreateCollectionWhenDatabaseIsInADropPendingState", _nss.ns(), [this] { + _opCtx.get(), "testÇreateCollectionWhenDatabaseIsInADropPendingState", _nss, [this] { AutoGetDb autoDb(_opCtx.get(), _nss.dbName(), MODE_X); auto db = autoDb.ensureDbExists(_opCtx.get()); ASSERT_TRUE(db); @@ -146,13 +180,13 @@ TEST_F(DatabaseTest, CreateCollectionThrowsExceptionWhenDatabaseIsInADropPending WriteUnitOfWork wuow(_opCtx.get()); - ASSERT_THROWS_CODE_AND_WHAT(db->createCollection(_opCtx.get(), _nss), - AssertionException, - ErrorCodes::DatabaseDropPending, - (StringBuilder() - << "Cannot create collection " << _nss - << " - database is in the process of being dropped.") - .stringData()); + ASSERT_THROWS_CODE_AND_WHAT( + db->createCollection(_opCtx.get(), _nss), + AssertionException, + ErrorCodes::DatabaseDropPending, + (StringBuilder() << "Cannot create collection " << _nss.toStringForErrorMsg() + << " - database is in the process of being dropped.") + .stringData()); }); } @@ -162,7 +196,7 @@ void _testDropCollection(OperationContext* opCtx, const repl::OpTime& dropOpTime = {}, const CollectionOptions& collOpts = {}) { if (createCollectionBeforeDrop) { - writeConflictRetry(opCtx, "testDropCollection", nss.ns(), [=] { + writeConflictRetry(opCtx, "testDropCollection", nss, [=] { WriteUnitOfWork wuow(opCtx); AutoGetDb autoDb(opCtx, nss.dbName(), MODE_X); auto db = autoDb.ensureDbExists(opCtx); @@ -172,7 +206,7 @@ void _testDropCollection(OperationContext* opCtx, }); } - writeConflictRetry(opCtx, "testDropCollection", nss.ns(), [=] { + writeConflictRetry(opCtx, "testDropCollection", nss, [=] { AutoGetDb autoDb(opCtx, nss.dbName(), MODE_X); auto db = autoDb.ensureDbExists(opCtx); ASSERT_TRUE(db); @@ -216,7 +250,7 @@ TEST_F(DatabaseTest, DropCollectionRejectsProvidedDropOpTimeIfWritesAreReplicate auto nss = _nss; AutoGetDb autoDb(opCtx, nss.dbName(), MODE_X); auto db = autoDb.ensureDbExists(opCtx); - writeConflictRetry(opCtx, "testDropOpTimeWithReplicated", nss.ns(), [&] { + writeConflictRetry(opCtx, "testDropOpTimeWithReplicated", nss, [&] { ASSERT_TRUE(db); WriteUnitOfWork wuow(opCtx); @@ -231,7 +265,7 @@ TEST_F(DatabaseTest, DropCollectionRejectsProvidedDropOpTimeIfWritesAreReplicate void _testDropCollectionThrowsExceptionIfThereAreIndexesInProgress(OperationContext* opCtx, const NamespaceString& nss) { - writeConflictRetry(opCtx, "testDropCollectionWithIndexesInProgress", nss.ns(), [opCtx, nss] { + writeConflictRetry(opCtx, "testDropCollectionWithIndexesInProgress", nss, [opCtx, nss] { AutoGetDb autoDb(opCtx, nss.dbName(), MODE_X); auto db = autoDb.ensureDbExists(opCtx); ASSERT_TRUE(db); @@ -295,7 +329,7 @@ TEST_F(DatabaseTest, RenameCollectionPreservesUuidOfSourceCollectionAndUpdatesUu ASSERT_TRUE(db); auto fromUuid = UUID::gen(); - writeConflictRetry(opCtx, "create", fromNss.ns(), [&] { + writeConflictRetry(opCtx, "create", fromNss, [&] { auto catalog = CollectionCatalog::get(opCtx); ASSERT_EQUALS(boost::none, catalog->lookupNSSByUUID(opCtx, fromUuid)); @@ -307,7 +341,7 @@ TEST_F(DatabaseTest, RenameCollectionPreservesUuidOfSourceCollectionAndUpdatesUu wuow.commit(); }); - writeConflictRetry(opCtx, "rename", fromNss.ns(), [&] { + writeConflictRetry(opCtx, "rename", fromNss, [&] { WriteUnitOfWork wuow(opCtx); auto stayTemp = false; ASSERT_OK(db->renameCollection(opCtx, fromNss, toNss, stayTemp)); @@ -331,7 +365,7 @@ TEST_F(DatabaseTest, RenameCollectionPreservesUuidOfSourceCollectionAndUpdatesUu TEST_F(DatabaseTest, MakeUniqueCollectionNamespaceReturnsFailedToParseIfModelDoesNotContainPercentSign) { - writeConflictRetry(_opCtx.get(), "testMakeUniqueCollectionNamespace", _nss.ns(), [this] { + writeConflictRetry(_opCtx.get(), "testMakeUniqueCollectionNamespace", _nss, [this] { AutoGetDb autoDb(_opCtx.get(), _nss.dbName(), MODE_X); auto db = autoDb.ensureDbExists(_opCtx.get()); ASSERT_TRUE(db); @@ -342,26 +376,26 @@ TEST_F(DatabaseTest, } TEST_F(DatabaseTest, MakeUniqueCollectionNamespaceReplacesPercentSignsWithRandomCharacters) { - writeConflictRetry(_opCtx.get(), "testMakeUniqueCollectionNamespace", _nss.ns(), [this] { + writeConflictRetry(_opCtx.get(), "testMakeUniqueCollectionNamespace", _nss, [this] { AutoGetDb autoDb(_opCtx.get(), _nss.dbName(), MODE_X); auto db = autoDb.ensureDbExists(_opCtx.get()); ASSERT_TRUE(db); auto model = "tmp%%%%"_sd; - pcre::Regex re(_nss.db() + "\\.tmp[0-9A-Za-z][0-9A-Za-z][0-9A-Za-z][0-9A-Za-z]", + pcre::Regex re(_nss.db_forTest() + "\\.tmp[0-9A-Za-z][0-9A-Za-z][0-9A-Za-z][0-9A-Za-z]", pcre::ANCHORED | pcre::ENDANCHORED); auto nss1 = unittest::assertGet(makeUniqueCollectionName(_opCtx.get(), db->name(), model)); - if (!re.matchView(nss1.ns())) { - FAIL((StringBuilder() << "First generated namespace \"" << nss1.ns() + if (!re.matchView(nss1.ns_forTest())) { + FAIL((StringBuilder() << "First generated namespace \"" << nss1.ns_forTest() << "\" does not match regular expression \"" << re.pattern() << "\"") .str()); } - // Create collection using generated namespace so that makeUniqueCollectionNamespace() will - // not return the same namespace the next time. This is because we check the existing - // collections in the database for collisions while generating the namespace. + // Create collection using generated namespace so that makeUniqueCollectionNamespace() + // will not return the same namespace the next time. This is because we check the + // existing collections in the database for collisions while generating the namespace. { WriteUnitOfWork wuow(_opCtx.get()); ASSERT_TRUE(db->createCollection(_opCtx.get(), nss1)); @@ -369,15 +403,15 @@ TEST_F(DatabaseTest, MakeUniqueCollectionNamespaceReplacesPercentSignsWithRandom } auto nss2 = unittest::assertGet(makeUniqueCollectionName(_opCtx.get(), db->name(), model)); - if (!re.matchView(nss2.ns())) { - FAIL((StringBuilder() << "Second generated namespace \"" << nss2.ns() + if (!re.matchView(nss2.ns_forTest())) { + FAIL((StringBuilder() << "Second generated namespace \"" << nss2.ns_forTest() << "\" does not match regular expression \"" << re.pattern() << "\"") .str()); } - // Second generated namespace should not collide with the first because a collection with - // name matching nss1 now exists. + // Second generated namespace should not collide with the first because a collection + // with name matching nss1 now exists. ASSERT_NOT_EQUALS(nss1, nss2); }); } @@ -385,7 +419,7 @@ TEST_F(DatabaseTest, MakeUniqueCollectionNamespaceReplacesPercentSignsWithRandom TEST_F( DatabaseTest, MakeUniqueCollectionNamespaceReturnsNamespaceExistsIfGeneratedNamesMatchExistingCollections) { - writeConflictRetry(_opCtx.get(), "testMakeUniqueCollectionNamespace", _nss.ns(), [this] { + writeConflictRetry(_opCtx.get(), "testMakeUniqueCollectionNamespace", _nss, [this] { AutoGetDb autoDb(_opCtx.get(), _nss.dbName(), MODE_X); auto db = autoDb.ensureDbExists(_opCtx.get()); ASSERT_TRUE(db); @@ -467,7 +501,7 @@ TEST_F(DatabaseTest, AutoGetCollectionForReadCommandSucceedsWithDeadlineMin) { TEST_F(DatabaseTest, CreateCollectionProhibitsReplicatedCollectionsWithoutIdIndex) { writeConflictRetry(_opCtx.get(), "testÇreateCollectionProhibitsReplicatedCollectionsWithoutIdIndex", - _nss.ns(), + _nss, [this] { AutoGetDb autoDb(_opCtx.get(), _nss.dbName(), MODE_X); auto db = autoDb.ensureDbExists(_opCtx.get()); @@ -483,8 +517,8 @@ TEST_F(DatabaseTest, CreateCollectionProhibitsReplicatedCollectionsWithoutIdInde AssertionException, 50001, (StringBuilder() - << "autoIndexId:false is not allowed for collection " << _nss - << " because it can be replicated") + << "autoIndexId:false is not allowed for collection " + << _nss.toStringForErrorMsg() << " because it can be replicated") .stringData()); }); } diff --git a/src/mongo/db/catalog/document_validation.cpp b/src/mongo/db/catalog/document_validation.cpp index 3d660de21ca8a..0aa77538ed49e 100644 --- a/src/mongo/db/catalog/document_validation.cpp +++ b/src/mongo/db/catalog/document_validation.cpp @@ -27,7 +27,7 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/catalog/document_validation.h" diff --git a/src/mongo/db/catalog/document_validation.h b/src/mongo/db/catalog/document_validation.h index 47db304d79de9..586a9179c01cd 100644 --- a/src/mongo/db/catalog/document_validation.h +++ b/src/mongo/db/catalog/document_validation.h @@ -29,8 +29,18 @@ #pragma once +#include + +#include +#include +#include + #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/operation_context.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/catalog/drop_collection.cpp b/src/mongo/db/catalog/drop_collection.cpp index d91043545a611..eb7049fdbe178 100644 --- a/src/mongo/db/catalog/drop_collection.cpp +++ b/src/mongo/db/catalog/drop_collection.cpp @@ -29,17 +29,39 @@ #include "mongo/db/catalog/drop_collection.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/db/audit.h" #include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_catalog_helper.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_uuid_mismatch.h" #include "mongo/db/catalog/collection_uuid_mismatch_info.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/client.h" +#include "mongo/db/catalog/views_for_database.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/curop.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/db_raii.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/operation_context.h" @@ -47,9 +69,21 @@ #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/server_options.h" -#include "mongo/db/service_context.h" +#include "mongo/db/stats/top.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/views/view.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -58,6 +92,7 @@ namespace { MONGO_FAIL_POINT_DEFINE(hangDropCollectionBeforeLockAcquisition); MONGO_FAIL_POINT_DEFINE(hangDuringDropCollection); +MONGO_FAIL_POINT_DEFINE(allowSystemViewsDrop); /** * Checks that the collection has the 'expectedUUID' if given. @@ -76,7 +111,8 @@ Status _checkUUIDAndReplState(OperationContext* opCtx, if (opCtx->writesAreReplicated() && !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)) { return Status(ErrorCodes::NotWritablePrimary, - str::stream() << "Not primary while dropping collection " << nss); + str::stream() + << "Not primary while dropping collection " << nss.toStringForErrorMsg()); } return Status::OK(); @@ -92,7 +128,7 @@ void checkForCollection(std::shared_ptr collectionCatal auto nss = NamespaceString(baseNss.db(), collName.value()); if (collectionCatalog->lookupCollectionByNamespace(opCtx, nss)) { - pLeaked->push_back(nss.toString()); + pLeaked->push_back(toStringForLogging(nss)); } } } @@ -165,13 +201,17 @@ Status _dropView(OperationContext* opCtx, if (opCtx->writesAreReplicated() && !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, collectionName)) { return Status(ErrorCodes::NotWritablePrimary, - str::stream() << "Not primary while dropping collection " << collectionName); + str::stream() << "Not primary while dropping collection " + << collectionName.toStringForErrorMsg()); } WriteUnitOfWork wunit(opCtx); - audit::logDropView( - opCtx->getClient(), collectionName, view->viewOn().ns(), view->pipeline(), ErrorCodes::OK); + audit::logDropView(opCtx->getClient(), + collectionName, + NamespaceStringUtil::serialize(view->viewOn()), + view->pipeline(), + ErrorCodes::OK); Status status = db->dropView(opCtx, collectionName); if (!status.isOK()) { @@ -246,12 +286,12 @@ Status _abortIndexBuildsAndDrop(OperationContext* opCtx, // Send the abort signal to any active index builds on the collection. This waits until all // aborted index builds complete. - indexBuildsCoord->abortCollectionIndexBuilds(opCtx, - collectionNs, - collectionUUID, - str::stream() - << "Collection " << collectionNs << "(" - << collectionUUID << ") is being dropped"); + indexBuildsCoord->abortCollectionIndexBuilds( + opCtx, + collectionNs, + collectionUUID, + str::stream() << "Collection " << toStringForLogging(collectionNs) << "(" + << collectionUUID << ") is being dropped"); // Take an exclusive lock to finish the collection drop. optionalAutoDb.emplace(opCtx, startingNss.dbName(), MODE_IX); @@ -368,8 +408,15 @@ Status _dropCollection(OperationContext* opCtx, boost::optional dropIfUUIDNotMatching = boost::none) { try { - return writeConflictRetry(opCtx, "drop", collectionName.ns(), [&] { - AutoGetDb autoDb(opCtx, collectionName.dbName(), MODE_IX); + return writeConflictRetry(opCtx, "drop", collectionName, [&] { + // If a change collection is to be dropped, that is, the change streams are being + // disabled for a tenant, acquire exclusive tenant lock. + AutoGetDb autoDb(opCtx, + collectionName.dbName(), + MODE_IX /* database lock mode*/, + boost::make_optional(collectionName.tenantId() && + collectionName.isChangeCollection(), + MODE_X)); auto db = autoDb.getDb(); if (!db) { return expectedUUID @@ -443,7 +490,7 @@ Status _dropCollection(OperationContext* opCtx, // Drop the buckets collection in its own writeConflictRetry so that if // it throws a WCE, only the buckets collection drop is retried. writeConflictRetry( - opCtx, "drop", bucketsNs.ns(), [opCtx, db, &bucketsNs, fromMigrate] { + opCtx, "drop", bucketsNs, [opCtx, db, &bucketsNs, fromMigrate] { WriteUnitOfWork wuow(opCtx); db->dropCollectionEvenIfSystem(opCtx, bucketsNs, {}, fromMigrate) .ignore(); @@ -574,7 +621,7 @@ Status dropCollectionForApplyOps(OperationContext* opCtx, LOGV2(20333, "Hanging drop collection before lock acquisition while fail point is set"); hangDropCollectionBeforeLockAcquisition.pauseWhileSet(); } - return writeConflictRetry(opCtx, "drop", collectionName.ns(), [&] { + return writeConflictRetry(opCtx, "drop", collectionName, [&] { AutoGetDb autoDb(opCtx, collectionName.dbName(), MODE_IX); Database* db = autoDb.getDb(); if (!db) { @@ -679,4 +726,39 @@ void clearTempCollections(OperationContext* opCtx, const DatabaseName& dbName) { }); } +Status isDroppableCollection(OperationContext* opCtx, const NamespaceString& nss) { + if (!nss.isSystem()) { + return Status::OK(); + } + + auto isDroppableSystemCollection = [](const auto& nss) { + return nss.isHealthlog() || nss == NamespaceString::kLogicalSessionsNamespace || + nss == NamespaceString::kKeysCollectionNamespace || + nss.isTemporaryReshardingCollection() || nss.isTimeseriesBucketsCollection() || + nss.isChangeStreamPreImagesCollection() || + nss == NamespaceString::kConfigsvrRestoreNamespace || nss.isChangeCollection() || + nss.isSystemDotJavascript() || nss.isSystemStatsCollection(); + }; + + if (nss.isSystemDotProfile()) { + if (CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(nss.dbName()) != 0) + return Status(ErrorCodes::IllegalOperation, + "turn off profiling before dropping system.profile collection"); + } else if (nss.isSystemDotViews()) { + if (!MONGO_unlikely(allowSystemViewsDrop.shouldFail())) { + const auto viewStats = + CollectionCatalog::get(opCtx)->getViewStatsForDatabase(opCtx, nss.dbName()); + uassert(ErrorCodes::CommandFailed, + "cannot drop collection {} when time-series collections are present"_format( + nss.toStringForErrorMsg()), + viewStats && viewStats->userTimeseries == 0); + } + } else if (!isDroppableSystemCollection(nss)) { + return Status(ErrorCodes::IllegalOperation, + "cannot drop system collection {}"_format(nss.toStringForErrorMsg())); + } + + return Status::OK(); +} + } // namespace mongo diff --git a/src/mongo/db/catalog/drop_collection.h b/src/mongo/db/catalog/drop_collection.h index 64c834a722111..1bd1c46401585 100644 --- a/src/mongo/db/catalog/drop_collection.h +++ b/src/mongo/db/catalog/drop_collection.h @@ -29,9 +29,15 @@ #pragma once -#include "mongo/base/status.h" +#include +#include "mongo/base/status.h" +#include "mongo/db/database_name.h" #include "mongo/db/drop_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" +#include "mongo/util/uuid.h" namespace mongo { class NamespaceString; @@ -99,4 +105,12 @@ void checkForIdIndexesAndDropPendingCollections(OperationContext* opCtx, */ void clearTempCollections(OperationContext* opCtx, const DatabaseName& dbName); +/** + * Checks that the namespace complies with naming restrictions and therefore can be dropped. It + * returns a Status with details of that evaluation. + * + * TODO (SERVER-76936): Normalize raised errors adopting a consistent approach. + */ +Status isDroppableCollection(OperationContext* opCtx, const NamespaceString& nss); + } // namespace mongo diff --git a/src/mongo/db/catalog/drop_database.cpp b/src/mongo/db/catalog/drop_database.cpp index 3dcd730e9f716..d013feb510330 100644 --- a/src/mongo/db/catalog/drop_database.cpp +++ b/src/mongo/db/catalog/drop_database.cpp @@ -28,27 +28,55 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog/drop_database.h" - #include - +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/drop_database.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -71,8 +99,8 @@ Status _checkNssAndReplState(OperationContext* opCtx, Database* db, const Databa } auto replCoord = repl::ReplicationCoordinator::get(opCtx); - bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() && - !replCoord->canAcceptWritesForDatabase(opCtx, dbName.toStringWithTenantId()); + bool userInitiatedWritesAndNotPrimary = + opCtx->writesAreReplicated() && !replCoord->canAcceptWritesForDatabase(opCtx, dbName); if (userInitiatedWritesAndNotPrimary) { return Status(ErrorCodes::NotWritablePrimary, @@ -104,18 +132,20 @@ void _finishDropDatabase(OperationContext* opCtx, IndexBuildsCoordinator::get(opCtx)->assertNoBgOpInProgForDb(dbName); } - writeConflictRetry(opCtx, "dropDatabase_database", dbName.toString(), [&] { + // Testing depends on this failpoint stopping execution before the dropDatabase oplog entry is + // written, as well as before the in-memory state is cleared. + if (MONGO_unlikely(dropDatabaseHangBeforeInMemoryDrop.shouldFail())) { + LOGV2(20334, "dropDatabase - fail point dropDatabaseHangBeforeInMemoryDrop enabled"); + dropDatabaseHangBeforeInMemoryDrop.pauseWhileSet(opCtx); + } + + writeConflictRetry(opCtx, "dropDatabase_database", NamespaceString(dbName), [&] { // We need to replicate the dropDatabase oplog entry and clear the collection catalog in the // same transaction. This is to prevent stepdown from interrupting between these two // operations and leaving this node in an inconsistent state. WriteUnitOfWork wunit(opCtx); opCtx->getServiceContext()->getOpObserver()->onDropDatabase(opCtx, dbName); - if (MONGO_unlikely(dropDatabaseHangBeforeInMemoryDrop.shouldFail())) { - LOGV2(20334, "dropDatabase - fail point dropDatabaseHangBeforeInMemoryDrop enabled"); - dropDatabaseHangBeforeInMemoryDrop.pauseWhileSet(opCtx); - } - auto databaseHolder = DatabaseHolder::get(opCtx); databaseHolder->dropDb(opCtx, db); dropPendingGuard.dismiss(); @@ -145,7 +175,7 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a uassert(ErrorCodes::IllegalOperation, str::stream() << "Dropping the '" << dbName.toStringForErrorMsg() << "' database is prohibited.", - dbName.db() != DatabaseName::kAdmin.db()); + !dbName.isAdminDB()); { CurOp::get(opCtx)->ensureStarted(); @@ -161,9 +191,11 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a // collections to drop. repl::OpTime latestDropPendingOpTime; + const auto tenantLockMode{ + boost::make_optional(dbName.tenantId() && dbName.isConfigDB(), MODE_X)}; { boost::optional autoDB; - autoDB.emplace(opCtx, dbName, MODE_X); + autoDB.emplace(opCtx, dbName, MODE_X /* database lock mode*/, tenantLockMode); Database* db = autoDB->getDb(); Status status = _checkNssAndReplState(opCtx, db, dbName); @@ -188,7 +220,7 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a 20337, "dropDatabase {dbName} - starting", "dropDatabase - starting", logAttrs(dbName)); db->setDropPending(opCtx, true); - // If Database::dropCollectionEventIfSystem() fails, we should reset the drop-pending state + // If Database::dropCollectionEvenIfSystem() fails, we should reset the drop-pending state // on Database. ScopeGuard dropPendingGuard([&db, opCtx] { db->setDropPending(opCtx, false); }); auto indexBuildsCoord = IndexBuildsCoordinator::get(opCtx); @@ -200,15 +232,18 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a // Create a scope guard to reset the drop-pending state on the database to false if // there is a replica state change that kills this operation while the locks were // yielded. - ScopeGuard dropPendingGuardWhileUnlocked([dbName, opCtx, &dropPendingGuard] { - // TODO (SERVER-71610): Fix to be interruptible or document exception. - UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. - AutoGetDb autoDB(opCtx, dbName, MODE_IX); - if (auto db = autoDB.getDb()) { - db->setDropPending(opCtx, false); - } - dropPendingGuard.dismiss(); - }); + ScopeGuard dropPendingGuardWhileUnlocked( + [dbName, opCtx, &dropPendingGuard, tenantLockMode] { + // This scope guard must succeed in acquiring locks and reverting the drop + // pending state even when the failure is due to an interruption. + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. + AutoGetDb autoDB( + opCtx, dbName, MODE_X /* database lock mode*/, tenantLockMode); + if (auto db = autoDB.getDb()) { + db->setDropPending(opCtx, false); + } + dropPendingGuard.dismiss(); + }); // Drop locks. The drop helper will acquire locks on our behalf. autoDB = boost::none; @@ -224,7 +259,7 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a dropDatabaseHangAfterWaitingForIndexBuilds.pauseWhileSet(); } - autoDB.emplace(opCtx, dbName, MODE_X); + autoDB.emplace(opCtx, dbName, MODE_X /* database lock mode*/, tenantLockMode); db = autoDB->getDb(); dropPendingGuardWhileUnlocked.dismiss(); @@ -258,20 +293,50 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a logAttrs(dbName), "namespace"_attr = nss); - writeConflictRetry(opCtx, "dropDatabase_views_collection", nss.ns(), [&] { + writeConflictRetry(opCtx, "dropDatabase_views_collection", nss, [&] { WriteUnitOfWork wunit(opCtx); fassert(7193701, db->dropCollectionEvenIfSystem(opCtx, nss)); wunit.commit(); }); } - // Refresh the catalog so the views collection isn't present. + // The system.profile collection is created using an untimestamped write to the catalog when + // enabling profiling on a database. So we drop it untimestamped as well to avoid mixed-mode + // timestamp usage. + auto systemProfilePtr = catalog->lookupCollectionByNamespace( + opCtx, NamespaceString::makeSystemDotProfileNamespace(dbName)); + if (systemProfilePtr) { + const Timestamp commitTs = opCtx->recoveryUnit()->getCommitTimestamp(); + if (!commitTs.isNull()) { + opCtx->recoveryUnit()->clearCommitTimestamp(); + } + + // Ensure this block exits with the same commit timestamp state that it was called with. + ScopeGuard addCommitTimestamp([&opCtx, commitTs] { + if (!commitTs.isNull()) { + opCtx->recoveryUnit()->setCommitTimestamp(commitTs); + } + }); + + const auto& nss = systemProfilePtr->ns(); + LOGV2(7574000, + "dropDatabase - dropping collection", + logAttrs(dbName), + "namespace"_attr = nss); + + invariant(!opCtx->lockState()->inAWriteUnitOfWork()); + writeConflictRetry(opCtx, "dropDatabase_system.profile_collection", nss, [&] { + WriteUnitOfWork wunit(opCtx); + fassert(7574001, db->dropCollectionEvenIfSystem(opCtx, nss)); + wunit.commit(); + }); + } + + // Refresh the catalog so the views and profile collections aren't present. catalog = CollectionCatalog::get(opCtx); std::vector collectionsToDrop; - for (auto collIt = catalog->begin(opCtx, db->name()); collIt != catalog->end(opCtx); - ++collIt) { - auto collection = *collIt; + for (auto&& collection : catalog->range(db->name())) { if (!collection) { break; } @@ -308,9 +373,9 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a // Dropping a database on a primary replicates individual collection drops followed // by a database drop oplog entry. When a secondary observes the database drop oplog // entry, all of the replicated collections that were dropped must have been - // processed. Only non-replicated collections like `system.profile` should be left - // to remove. Collections with the `tmp.mr` namespace may or may not be getting - // replicated; be conservative and assume they are not. + // processed. Only non-replicated collections should be left to remove. Collections + // with the `tmp.mr` namespace may or may not be getting replicated; be conservative + // and assume they are not. invariant(!nss.isReplicated() || nss.coll().startsWith("tmp.mr")); } @@ -319,7 +384,7 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a catalog->lookupCollectionByNamespace(opCtx, nss)->uuid()); } - writeConflictRetry(opCtx, "dropDatabase_collection", nss.ns(), [&] { + writeConflictRetry(opCtx, "dropDatabase_collection", nss, [&] { WriteUnitOfWork wunit(opCtx); // A primary processing this will assign a timestamp when the operation is written // to the oplog. As stated above, a secondary processing must only observe @@ -344,7 +409,8 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a // any errors while we await the replication of any collection drops and then reacquire the // locks (which can throw) needed to finish the drop database. ScopeGuard dropPendingGuardWhileUnlocked([dbName, opCtx] { - // TODO (SERVER-71610): Fix to be interruptible or document exception. + // This scope guard must succeed in acquiring locks and reverting the drop pending state + // even when the failure is due to an interruption. UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. AutoGetDb autoDB(opCtx, dbName, MODE_IX); @@ -428,7 +494,7 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a dropDatabaseHangAfterAllCollectionsDrop.pauseWhileSet(); } - AutoGetDb autoDB(opCtx, dbName, MODE_X); + AutoGetDb autoDB(opCtx, dbName, MODE_X /* database lock mode*/, tenantLockMode); auto db = autoDB.getDb(); if (!db) { return Status(ErrorCodes::NamespaceNotFound, @@ -437,8 +503,8 @@ Status _dropDatabase(OperationContext* opCtx, const DatabaseName& dbName, bool a << numCollectionsToDrop << " collection(s)."); } - bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() && - !replCoord->canAcceptWritesForDatabase(opCtx, dbName.toStringWithTenantId()); + bool userInitiatedWritesAndNotPrimary = + opCtx->writesAreReplicated() && !replCoord->canAcceptWritesForDatabase(opCtx, dbName); if (userInitiatedWritesAndNotPrimary) { return Status(ErrorCodes::PrimarySteppedDown, diff --git a/src/mongo/db/catalog/drop_database_test.cpp b/src/mongo/db/catalog/drop_database_test.cpp index 9f9ea38f8f65f..240742a949683 100644 --- a/src/mongo/db/catalog/drop_database_test.cpp +++ b/src/mongo/db/catalog/drop_database_test.cpp @@ -27,33 +27,55 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include #include - -#include "mongo/db/catalog/create_collection.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/drop_database.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" -#include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_noop.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace { @@ -68,12 +90,12 @@ class OpObserverMock : public OpObserverNoop { public: void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) override; - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) override; + CollectionDropType dropType, + bool markFromMigrate) override; std::set droppedDatabaseNames; std::set droppedCollectionNames; @@ -93,10 +115,11 @@ repl::OpTime OpObserverMock::onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) { + const CollectionDropType dropType, + bool markFromMigrate) { ASSERT_TRUE(opCtx->lockState()->inAWriteUnitOfWork()); - auto opTime = - OpObserverNoop::onDropCollection(opCtx, collectionName, uuid, numRecords, dropType); + auto opTime = OpObserverNoop::onDropCollection( + opCtx, collectionName, uuid, numRecords, dropType, markFromMigrate); invariant(opTime.isNull()); // Do not update 'droppedCollectionNames' if OpObserverNoop::onDropCollection() throws. droppedCollectionNames.insert(collectionName); @@ -181,7 +204,7 @@ void DropDatabaseTest::tearDown() { * Creates a collection without any namespace restrictions. */ void _createCollection(OperationContext* opCtx, const NamespaceString& nss) { - writeConflictRetry(opCtx, "testDropCollection", nss.ns(), [=] { + writeConflictRetry(opCtx, "testDropCollection", nss, [=] { AutoGetDb autoDb(opCtx, nss.dbName(), MODE_X); auto db = autoDb.ensureDbExists(opCtx); ASSERT_TRUE(db); @@ -198,7 +221,7 @@ void _createCollection(OperationContext* opCtx, const NamespaceString& nss) { * Removes database from catalog, bypassing dropDatabase(). */ void _removeDatabaseFromCatalog(OperationContext* opCtx, StringData dbName) { - AutoGetDb autoDB(opCtx, dbName, MODE_X); + AutoGetDb autoDB(opCtx, DatabaseName::createDatabaseName_forTest(boost::none, dbName), MODE_X); auto db = autoDB.getDb(); // dropDatabase can call awaitReplication more than once, so do not attempt to drop the database // twice. @@ -220,7 +243,7 @@ TEST_F(DropDatabaseTest, DropDatabaseReturnsNotWritablePrimaryIfNotPrimary) { _createCollection(_opCtx.get(), _nss); ASSERT_OK(_replCoord->setFollowerMode(repl::MemberState::RS_SECONDARY)); ASSERT_TRUE(_opCtx->writesAreReplicated()); - ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _nss.db())); + ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _nss.dbName())); ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, dropDatabaseForApplyOps(_opCtx.get(), _nss.dbName())); } @@ -303,11 +326,11 @@ TEST_F(DropDatabaseTest, DropDatabasePassedThroughAwaitReplicationErrorForDropPe dropDatabaseForApplyOps(_opCtx.get(), _nss.dbName())); } -TEST_F(DropDatabaseTest, DropDatabaseSkipsSystemProfileCollectionWhenDroppingCollections) { +TEST_F(DropDatabaseTest, DropDatabaseDropsSystemProfileCollectionWhenDroppingCollections) { repl::OpTime dropOpTime(Timestamp(Seconds(100), 0), 1LL); NamespaceString profileNss = NamespaceString::createNamespaceString_forTest(_nss.getSisterNS("system.profile")); - _testDropDatabase(_opCtx.get(), _opObserver, profileNss, false); + _testDropDatabase(_opCtx.get(), _opObserver, profileNss, true); } TEST_F(DropDatabaseTest, DropDatabaseResetsDropPendingStateOnException) { @@ -369,7 +392,7 @@ TEST_F(DropDatabaseTest, // Update ReplicationCoordinatorMock so that awaitReplication() fails. _replCoord->setAwaitReplicationReturnValueFunction( [this](OperationContext*, const repl::OpTime&) { - _removeDatabaseFromCatalog(_opCtx.get(), _nss.db()); + _removeDatabaseFromCatalog(_opCtx.get(), _nss.db_forTest()); return repl::ReplicationCoordinator::StatusAndDuration( Status(ErrorCodes::WriteConcernFailed, ""), Milliseconds(0)); }); @@ -382,7 +405,7 @@ TEST_F(DropDatabaseTest, // Update ReplicationCoordinatorMock so that awaitReplication() fails. _replCoord->setAwaitReplicationReturnValueFunction( [this](OperationContext*, const repl::OpTime&) { - _removeDatabaseFromCatalog(_opCtx.get(), _nss.db()); + _removeDatabaseFromCatalog(_opCtx.get(), _nss.db_forTest()); return repl::ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0)); }); @@ -394,7 +417,7 @@ TEST_F(DropDatabaseTest, ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, status); ASSERT_EQUALS(status.reason(), std::string(str::stream() - << "Could not drop database " << _nss.db() + << "Could not drop database " << _nss.db_forTest() << " because it does not exist after dropping 1 collection(s).")); ASSERT_FALSE(AutoGetDb(_opCtx.get(), _nss.dbName(), MODE_X).getDb()); @@ -407,7 +430,7 @@ TEST_F(DropDatabaseTest, [this](OperationContext*, const repl::OpTime&) { ASSERT_OK(_replCoord->setFollowerMode(repl::MemberState::RS_SECONDARY)); ASSERT_TRUE(_opCtx->writesAreReplicated()); - ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _nss.db())); + ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _nss.dbName())); return repl::ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0)); }); @@ -418,7 +441,7 @@ TEST_F(DropDatabaseTest, auto status = dropDatabaseForApplyOps(_opCtx.get(), _nss.dbName()); ASSERT_EQUALS(ErrorCodes::PrimarySteppedDown, status); ASSERT_EQUALS(status.reason(), - std::string(str::stream() << "Could not drop database " << _nss.db() + std::string(str::stream() << "Could not drop database " << _nss.db_forTest() << " because we transitioned from PRIMARY to SECONDARY" << " while waiting for 1 pending collection drop(s).")); diff --git a/src/mongo/db/catalog/drop_indexes.cpp b/src/mongo/db/catalog/drop_indexes.cpp index ba72d795c5c69..f1ea5ff8864e0 100644 --- a/src/mongo/db/catalog/drop_indexes.cpp +++ b/src/mongo/db/catalog/drop_indexes.cpp @@ -30,25 +30,57 @@ #include "mongo/db/catalog/drop_indexes.h" #include - +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/collection_uuid_mismatch.h" #include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/client.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/curop.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl_set_member_in_standalone_mode.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/s/scoped_collection_metadata.h" #include "mongo/db/s/shard_key_index_util.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -66,9 +98,11 @@ Status checkView(OperationContext* opCtx, if (!collection) { if (CollectionCatalog::get(opCtx)->lookupView(opCtx, nss)) { return Status(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "Cannot drop indexes on view " << nss); + str::stream() + << "Cannot drop indexes on view " << nss.toStringForErrorMsg()); } - return Status(ErrorCodes::NamespaceNotFound, str::stream() << "ns not found " << nss); + return Status(ErrorCodes::NamespaceNotFound, + str::stream() << "ns not found " << nss.toStringForErrorMsg()); } return Status::OK(); } @@ -83,7 +117,7 @@ Status checkReplState(OperationContext* opCtx, if (writesAreReplicatedAndNotPrimary) { return Status(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while dropping indexes on database " - << dbAndUUID.dbName()->toStringForErrorMsg() + << dbAndUUID.dbName().toStringForErrorMsg() << " with collection " << dbAndUUID.uuid()); } @@ -92,9 +126,10 @@ Status checkReplState(OperationContext* opCtx, const auto& nss = collection->ns(); if (isPrimary && nss.isDropPendingNamespace()) { return Status(ErrorCodes::NamespaceNotFound, - str::stream() << "Cannot drop indexes on drop-pending namespace " << nss - << " in database " << dbAndUUID.dbName()->toStringForErrorMsg() - << " with uuid " << dbAndUUID.uuid()); + str::stream() << "Cannot drop indexes on drop-pending namespace " + << nss.toStringForErrorMsg() << " in database " + << dbAndUUID.dbName().toStringForErrorMsg() << " with uuid " + << dbAndUUID.uuid()); } return Status::OK(); @@ -226,35 +261,37 @@ std::vector abortIndexBuildByIndexNames(OperationContext* opCtx, Status dropIndexByDescriptor(OperationContext* opCtx, Collection* collection, IndexCatalog* indexCatalog, - const IndexDescriptor* desc) { - if (desc->isIdIndex()) { + IndexCatalogEntry* entry) { + if (entry->descriptor()->isIdIndex()) { return Status(ErrorCodes::InvalidOptions, "cannot drop _id index"); } // Support dropping unfinished indexes, but only if the index is 'frozen'. These indexes only // exist in standalone mode. - auto entry = indexCatalog->getEntry(desc); if (entry->isFrozen()) { - invariant(!entry->isReady(opCtx)); + invariant(!entry->isReady()); invariant(getReplSetMemberInStandaloneMode(opCtx->getServiceContext())); // Return here. No need to fall through to op observer on standalone. - return indexCatalog->dropUnfinishedIndex(opCtx, collection, desc); + return indexCatalog->dropUnfinishedIndex(opCtx, collection, entry); } // Do not allow dropping unfinished indexes that are not frozen. - if (!entry->isReady(opCtx)) { + if (!entry->isReady()) { return Status(ErrorCodes::IndexNotFound, - str::stream() - << "can't drop unfinished index with name: " << desc->indexName()); + str::stream() << "can't drop unfinished index with name: " + << entry->descriptor()->indexName()); } // Log the operation first, which reserves an optime in the oplog and sets the timestamp for // future writes. This guarantees the durable catalog's metadata change to share the same // timestamp when dropping the index below. - opCtx->getServiceContext()->getOpObserver()->onDropIndex( - opCtx, collection->ns(), collection->uuid(), desc->indexName(), desc->infoObj()); + opCtx->getServiceContext()->getOpObserver()->onDropIndex(opCtx, + collection->ns(), + collection->uuid(), + entry->descriptor()->indexName(), + entry->descriptor()->infoObj()); - auto s = indexCatalog->dropIndex(opCtx, collection, desc); + auto s = indexCatalog->dropIndexEntry(opCtx, collection, entry); if (!s.isOK()) { return s; } @@ -351,16 +388,16 @@ void dropReadyIndexes(OperationContext* opCtx, opCtx, CollectionPtr(collection), indexName, collDescription.getKeyPattern())); } - auto desc = indexCatalog->findIndexByName(opCtx, - indexName, - IndexCatalog::InclusionPolicy::kReady | - IndexCatalog::InclusionPolicy::kUnfinished | - IndexCatalog::InclusionPolicy::kFrozen); - if (!desc) { + auto writableEntry = indexCatalog->getWritableEntryByName( + opCtx, + indexName, + IndexCatalog::InclusionPolicy::kReady | IndexCatalog::InclusionPolicy::kUnfinished | + IndexCatalog::InclusionPolicy::kFrozen); + if (!writableEntry) { uasserted(ErrorCodes::IndexNotFound, str::stream() << "index not found with name [" << indexName << "]"); } - uassertStatusOK(dropIndexByDescriptor(opCtx, collection, indexCatalog, desc)); + uassertStatusOK(dropIndexByDescriptor(opCtx, collection, indexCatalog, writableEntry)); } } @@ -378,7 +415,7 @@ void assertNoMovePrimaryInProgress(OperationContext* opCtx, const NamespaceStrin LOGV2(4976500, "assertNoMovePrimaryInProgress", logAttrs(nss)); uasserted(ErrorCodes::MovePrimaryInProgress, - "movePrimary is in progress for namespace " + nss.toString()); + "movePrimary is in progress for namespace " + nss.toStringForErrorMsg()); } } } catch (const DBException& ex) { @@ -473,9 +510,10 @@ DropIndexesReply dropIndexes(OperationContext* opCtx, if (!*collection) { uasserted(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection '" << nss << "' with UUID " << dbAndUUID.uuid() - << " in database " << dbAndUUID.dbName()->toStringForErrorMsg() - << " does not exist."); + str::stream() + << "Collection '" << nss.toStringForErrorMsg() << "' with UUID " + << dbAndUUID.uuid() << " in database " + << dbAndUUID.dbName().toStringForErrorMsg() << " does not exist."); } // The collection could have been renamed when we dropped locks. @@ -504,7 +542,7 @@ DropIndexesReply dropIndexes(OperationContext* opCtx, // The index catalog requires that no active index builders are running when dropping ready // indexes. IndexBuildsCoordinator::get(opCtx)->assertNoIndexBuildInProgForCollection(collectionUUID); - writeConflictRetry(opCtx, "dropIndexes", dbAndUUID.toString(), [&] { + writeConflictRetry(opCtx, "dropIndexes", dbAndUUID, [&] { WriteUnitOfWork wuow(opCtx); // This is necessary to check shard version. @@ -527,19 +565,19 @@ DropIndexesReply dropIndexes(OperationContext* opCtx, collDesc.getKeyPattern())); } - auto desc = - indexCatalog->findIndexByName(opCtx, - indexName, - IndexCatalog::InclusionPolicy::kReady | - IndexCatalog::InclusionPolicy::kUnfinished | - IndexCatalog::InclusionPolicy::kFrozen); - if (!desc) { + auto writableEntry = indexCatalog->getWritableEntryByName( + opCtx, + indexName, + IndexCatalog::InclusionPolicy::kReady | + IndexCatalog::InclusionPolicy::kUnfinished | + IndexCatalog::InclusionPolicy::kFrozen); + if (!writableEntry) { // A similar index wasn't created while we yielded the locks during abort. continue; } uassertStatusOK(dropIndexByDescriptor( - opCtx, collection->getWritableCollection(opCtx), indexCatalog, desc)); + opCtx, collection->getWritableCollection(opCtx), indexCatalog, writableEntry)); } wuow.commit(); @@ -557,16 +595,15 @@ DropIndexesReply dropIndexes(OperationContext* opCtx, invariant((*collection)->getIndexCatalog()->numIndexesInProgress() == 0); } - writeConflictRetry( - opCtx, "dropIndexes", dbAndUUID.toString(), [opCtx, &collection, &indexNames, &reply] { - WriteUnitOfWork wunit(opCtx); + writeConflictRetry(opCtx, "dropIndexes", dbAndUUID, [opCtx, &collection, &indexNames, &reply] { + WriteUnitOfWork wunit(opCtx); - // This is necessary to check shard version. - OldClientContext ctx(opCtx, (*collection)->ns()); - dropReadyIndexes( - opCtx, collection->getWritableCollection(opCtx), indexNames, &reply, false); - wunit.commit(); - }); + // This is necessary to check shard version. + OldClientContext ctx(opCtx, (*collection)->ns()); + dropReadyIndexes( + opCtx, collection->getWritableCollection(opCtx), indexNames, &reply, false); + wunit.commit(); + }); return reply; } @@ -575,12 +612,12 @@ Status dropIndexesForApplyOps(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& cmdObj) try { BSONObjBuilder bob(cmdObj); - bob.append("$db", nss.dbName().db()); + bob.append("$db", nss.dbName().serializeWithoutTenantPrefix()); auto cmdObjWithDb = bob.obj(); auto parsed = DropIndexes::parse( IDLParserContext{"dropIndexes", false /* apiStrict */, nss.tenantId()}, cmdObjWithDb); - return writeConflictRetry(opCtx, "dropIndexes", nss.db(), [opCtx, &nss, &cmdObj, &parsed] { + return writeConflictRetry(opCtx, "dropIndexes", nss, [opCtx, &nss, &cmdObj, &parsed] { AutoGetCollection collection(opCtx, nss, MODE_X); // If db/collection does not exist, short circuit and return. diff --git a/src/mongo/db/catalog/drop_indexes.h b/src/mongo/db/catalog/drop_indexes.h index f0fa9dad5ef0d..499df67eb3a38 100644 --- a/src/mongo/db/catalog/drop_indexes.h +++ b/src/mongo/db/catalog/drop_indexes.h @@ -27,9 +27,17 @@ * it in the license file. */ -#include "mongo/base/status.h" +#include +#include +#include +#include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/drop_indexes_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/uuid.h" namespace mongo { class BSONObj; diff --git a/src/mongo/db/catalog/external_data_source_scope_guard.cpp b/src/mongo/db/catalog/external_data_source_scope_guard.cpp index 0f314a1cb3a33..270488775e631 100644 --- a/src/mongo/db/catalog/external_data_source_scope_guard.cpp +++ b/src/mongo/db/catalog/external_data_source_scope_guard.cpp @@ -29,6 +29,7 @@ #include "mongo/db/catalog/external_data_source_scope_guard.h" +#include "mongo/base/status.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog/drop_collection.h" #include "mongo/db/catalog/virtual_collection_options.h" @@ -36,7 +37,11 @@ #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/external_data_source_option_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #include "mongo/util/destructor_guard.h" +#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/catalog/external_data_source_scope_guard.h b/src/mongo/db/catalog/external_data_source_scope_guard.h index 0d14bcbf87056..7149dff1d2eeb 100644 --- a/src/mongo/db/catalog/external_data_source_scope_guard.h +++ b/src/mongo/db/catalog/external_data_source_scope_guard.h @@ -29,10 +29,15 @@ #pragma once +#include +#include +#include + #include "mongo/db/clientcursor.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/external_data_source_option_gen.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/catalog/health_log.cpp b/src/mongo/db/catalog/health_log.cpp index 9626d4561115d..11f5c9d9caaa8 100644 --- a/src/mongo/db/catalog/health_log.cpp +++ b/src/mongo/db/catalog/health_log.cpp @@ -28,8 +28,14 @@ */ #include "mongo/db/catalog/health_log.h" + +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/health_log_gen.h" -#include "mongo/db/db_raii.h" #include "mongo/db/namespace_string.h" namespace mongo { diff --git a/src/mongo/db/catalog/health_log.h b/src/mongo/db/catalog/health_log.h index f9fe3c5b4c356..4b79eecb16ebe 100644 --- a/src/mongo/db/catalog/health_log.h +++ b/src/mongo/db/catalog/health_log.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/db/catalog/health_log_gen.h" #include "mongo/db/catalog/health_log_interface.h" #include "mongo/db/concurrency/deferred_writer.h" diff --git a/src/mongo/db/catalog/health_log_interface.cpp b/src/mongo/db/catalog/health_log_interface.cpp index 69e8f03020fe4..dac05a7d49436 100644 --- a/src/mongo/db/catalog/health_log_interface.cpp +++ b/src/mongo/db/catalog/health_log_interface.cpp @@ -28,7 +28,14 @@ */ #include "mongo/db/catalog/health_log_interface.h" + +#include + +#include + #include "mongo/db/operation_context.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/catalog/health_log_interface.h b/src/mongo/db/catalog/health_log_interface.h index 885015fb6bec5..38ac25bec8aa1 100644 --- a/src/mongo/db/catalog/health_log_interface.h +++ b/src/mongo/db/catalog/health_log_interface.h @@ -29,6 +29,9 @@ #pragma once +#include +#include + #include "mongo/db/catalog/health_log_gen.h" #include "mongo/db/service_context.h" diff --git a/src/mongo/db/catalog/historical_catalogid_tracker.cpp b/src/mongo/db/catalog/historical_catalogid_tracker.cpp new file mode 100644 index 0000000000000..5f7d2cc853c8a --- /dev/null +++ b/src/mongo/db/catalog/historical_catalogid_tracker.cpp @@ -0,0 +1,706 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/catalog/historical_catalogid_tracker.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/util/assert_util_core.h" + +namespace mongo { +namespace { +// Sentinel id for marking a catalogId mapping range as unknown. Must use an invalid RecordId. +static RecordId kUnknownRangeMarkerId = RecordId::minLong(); +// Maximum number of entries in catalogId mapping when inserting catalogId missing at timestamp. +// Used to avoid quadratic behavior when inserting entries at the beginning. When threshold is +// reached we will fall back to more durable catalog scans. +static constexpr int kMaxCatalogIdMappingLengthForMissingInsert = 1000; + +// Copy existing value from immutable data structure or default-construct if not existing +template +auto copyIfExists(const Container& container, const Key& key) { + const auto* value = container.find(key); + if (value) { + return *value; + } + return typename Container::mapped_type(); +} + +// Returns true if cleanup is needed for a catalogId range +bool needsCleanup(const std::vector& ids) { + // Cleanup may occur if we have more than one entry for the namespace. + return ids.size() > 1; +} + +// Returns the lowest time a catalogId range may be cleaned up. needsCleanup() needs to have been +// checked prior to calling this function +Timestamp cleanupTime(const std::vector& ids) { + // When we have multiple entries, use the time at the second entry as the cleanup time, + // when the oldest timestamp advances past this we no longer need the first entry. + return ids.at(1).ts; +} + +// Converts a not found lookup timestamp to a LookupResult based on the oldest maintained timestamp +HistoricalCatalogIdTracker::LookupResult resultForNotFound(boost::optional ts, + Timestamp oldestMaintained) { + // If the request was with a time prior to the oldest maintained time it is unknown, otherwise + // we know it is not existing. + return {RecordId{}, + ts && *ts < oldestMaintained + ? HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown + : HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists}; +} + +// Converts a catalogId range into a lookup result that represents the latest state +HistoricalCatalogIdTracker::LookupResult latestInRange( + const std::vector& range) { + auto catalogId = range.back().id; + if (catalogId) { + return {*catalogId, HistoricalCatalogIdTracker::LookupResult::Existence::kExists}; + } + return {RecordId{}, HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists}; +} + +HistoricalCatalogIdTracker::LookupResult findInRange( + Timestamp ts, + const std::vector& range, + Timestamp oldestMaintained) { + // The algorithm is as follows for an input range of the following format that is sorted on + // timestamp: (ts1, id1), (ts2, id2), ..., (tsN, idN). + // + // We use upper_bound to perform binary search to the timestamp that is strictly larger than our + // query timestamp ts. The iterator can then be decremented to get the entry where the time is + // less or equal, this is the entry we are looking for. If upper_bound returns begin() or the + // 'id' in our found entry is the unknown marker the lookup result is unknown. + auto rangeIt = + std::upper_bound(range.begin(), range.end(), ts, [](const auto& ts, const auto& entry) { + return ts < entry.ts; + }); + if (rangeIt == range.begin()) { + return resultForNotFound(ts, oldestMaintained); + } + // Upper bound returns an iterator to the first entry with a larger timestamp. Decrement the + // iterator to get the last entry where the time is less or equal. + auto catalogId = (--rangeIt)->id; + if (catalogId) { + if (*catalogId != kUnknownRangeMarkerId) { + return {*catalogId, HistoricalCatalogIdTracker::LookupResult::Existence::kExists}; + } else { + return {RecordId{}, HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown}; + } + } + return {RecordId{}, HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists}; +} +} // namespace + +HistoricalCatalogIdTracker::LookupResult HistoricalCatalogIdTracker::lookup( + const NamespaceString& nss, boost::optional ts) const { + if (const std::vector* mapping = _nss.find(nss)) { + // Mapping found for namespace, get result depending on timestamp. + if (ts) { + return findInRange(*ts, *mapping, _oldestTimestampMaintained); + } + return latestInRange(*mapping); + } + // No mapping found for namespace, result is either not found or unknown depending on timestamp + return resultForNotFound(ts, _oldestTimestampMaintained); +} + + +HistoricalCatalogIdTracker::LookupResult HistoricalCatalogIdTracker::lookup( + const UUID& uuid, boost::optional ts) const { + if (const std::vector* mapping = _uuid.find(uuid)) { + // Mapping found for namespace, get result depending on timestamp. + if (ts) { + return findInRange(*ts, *mapping, _oldestTimestampMaintained); + } + return latestInRange(*mapping); + } + + // No mapping found for namespace, result is either not found or unknown depending on timestamp + return resultForNotFound(ts, _oldestTimestampMaintained); +} + +void HistoricalCatalogIdTracker::create(const NamespaceString& nss, + const UUID& uuid, + const RecordId& catalogId, + boost::optional ts) { + + if (!ts) { + _createNoTimestamp(nss, uuid, catalogId); + return; + } + + _createTimestamp(nss, uuid, catalogId, *ts); +} + +void HistoricalCatalogIdTracker::drop(const NamespaceString& nss, + const UUID& uuid, + boost::optional ts) { + if (!ts) { + _dropNoTimestamp(nss, uuid); + return; + } + + _dropTimestamp(nss, uuid, *ts); +} + +void HistoricalCatalogIdTracker::rename(const NamespaceString& from, + const NamespaceString& to, + boost::optional ts) { + if (!ts) { + _renameNoTimestamp(from, to); + return; + } + + _renameTimestamp(from, to, *ts); +} + +bool HistoricalCatalogIdTracker::canRecordNonExisting(const NamespaceString& nss) const { + // recordNonExistingAtTime can use a lot of entries because of the unknown marker that is + // needed. Constrain the memory usage. + if (const std::vector* ids = _nss.find(nss)) { + return ids->size() < kMaxCatalogIdMappingLengthForMissingInsert; + } + return true; +} + +bool HistoricalCatalogIdTracker::canRecordNonExisting(const UUID& uuid) const { + // recordNonExistingAtTime can use a lot of entries because of the unknown marker that is + // needed. Constrain the memory usage. + if (const std::vector* ids = _uuid.find(uuid)) { + return ids->size() < kMaxCatalogIdMappingLengthForMissingInsert; + } + return true; +} + +void HistoricalCatalogIdTracker::recordExistingAtTime(const NamespaceString& nss, + const UUID& uuid, + const RecordId& catalogId, + Timestamp ts) { + + // Helper lambda to perform the operation on both namespace and UUID + auto doRecord = + [this, &catalogId, &ts](auto& idsContainer, auto& changesContainer, const auto& key) { + // Helper to update the cleanup time after we've performed an insert. + auto markForCleanupIfNeeded = [&](const auto& ids) { + if (!needsCleanup(ids)) { + return; + } + + changesContainer = changesContainer.insert(key); + _recordCleanupTime(cleanupTime(ids)); + }; + + // Get copy of existing mapping, or default-construct new. + auto ids = copyIfExists(idsContainer, key); + // Helper to write updated id mapping back into container at scope exit. This allows us + // to write to 'ids' as if we were doing inplace updates to the container. + ScopeGuard scopedGuard([&] { idsContainer = idsContainer.set(key, std::move(ids)); }); + + // Binary search to the entry with same or larger timestamp. This represents the insert + // position in the container. + auto it = std::lower_bound( + ids.begin(), ids.end(), ts, [](const auto& entry, const Timestamp& ts) { + return entry.ts < ts; + }); + + if (it != ids.end()) { + // An entry could exist already if concurrent writes are performed, keep the latest + // change in that case. + if (it->ts == ts) { + it->id = catalogId; + return; + } + + // If next element has same catalogId, we can adjust its timestamp to cover a longer + // range + if (it->id == catalogId) { + it->ts = ts; + + markForCleanupIfNeeded(ids); + return; + } + } + + // Otherwise insert new entry at timestamp + ids.insert(it, {{catalogId, ts}}); + markForCleanupIfNeeded(ids); + }; + + // Apply the insert to both namespace and uuid. + doRecord(_nss, _nssChanges, nss); + doRecord(_uuid, _uuidChanges, uuid); +} + +void HistoricalCatalogIdTracker::recordNonExistingAtTime(const NamespaceString& nss, Timestamp ts) { + // Get copy of existing mapping, or default-construct new. + auto ids = copyIfExists(_nss, nss); + + // Avoid inserting missing mapping when the list has grown past the threshold. Will cause + // the system to fall back to scanning the durable catalog. + if (ids.size() >= kMaxCatalogIdMappingLengthForMissingInsert) { + return; + } + + // Helper to write updated id mapping back into container at scope exit + ScopeGuard scopedGuard([&] { _nss = _nss.set(nss, std::move(ids)); }); + + // Binary search to the entry with same or larger timestamp. This represents the insert position + // in the container. + auto it = + std::lower_bound(ids.begin(), ids.end(), ts, [](const auto& entry, const Timestamp& ts) { + return entry.ts < ts; + }); + + if (it != ids.end() && it->ts == ts) { + // An entry could exist already if concurrent writes are performed, keep the latest + // change in that case. + it->id = boost::none; + } else { + // Otherwise insert new entry + it = ids.insert(it, {boost::none, ts}); + } + + // The iterator is positioned on the added/modified element above, reposition it to the next + // entry + ++it; + + // We don't want to assume that the namespace remains not existing until the next entry, as + // there can be times where the namespace actually does exist. To make sure we trigger the + // scanning of the durable catalog in this range we will insert a bogus entry using an invalid + // RecordId at the next timestamp. This will treat the range forward as unknown. + auto nextTs = ts + 1; + + // If the next entry is on the next timestamp already, we can skip adding the bogus entry. + // If this function is called for a previously unknown namespace or UUID, we may not have + // any future valid entries and the iterator would be positioned at and at this point. + if (it == ids.end() || it->ts != nextTs) { + ids.insert(it, {kUnknownRangeMarkerId, nextTs}); + } + + // Update cleanup time if needed + if (!needsCleanup(ids)) { + return; + } + + _nssChanges = _nssChanges.insert(nss); + _recordCleanupTime(cleanupTime(ids)); +} + +void HistoricalCatalogIdTracker::recordNonExistingAtTime(const UUID& uuid, Timestamp ts) { + auto ids = copyIfExists(_uuid, uuid); + + // Avoid inserting missing mapping when the list has grown past the threshold. Will cause + // the system to fall back to scanning the durable catalog. + if (ids.size() >= kMaxCatalogIdMappingLengthForMissingInsert) { + return; + } + + // Helper to write updated id mapping back into container at scope exit + ScopeGuard scopedGuard([&] { _uuid = _uuid.set(uuid, std::move(ids)); }); + + // Binary search to the entry with same or larger timestamp. This represents the insert position + // in the container. + auto it = + std::lower_bound(ids.begin(), ids.end(), ts, [](const auto& entry, const Timestamp& ts) { + return entry.ts < ts; + }); + + if (it != ids.end() && it->ts == ts) { + // An entry could exist already if concurrent writes are performed, keep the latest + // change in that case. + it->id = boost::none; + } else { + // Otherwise insert new entry + it = ids.insert(it, {boost::none, ts}); + } + + // The iterator is positioned on the added/modified element above, reposition it to the next + // entry + ++it; + + // We don't want to assume that the namespace remains not existing until the next entry, as + // there can be times where the namespace actually does exist. To make sure we trigger the + // scanning of the durable catalog in this range we will insert a bogus entry using an invalid + // RecordId at the next timestamp. This will treat the range forward as unknown. + auto nextTs = ts + 1; + + // If the next entry is on the next timestamp already, we can skip adding the bogus entry. + // If this function is called for a previously unknown namespace or UUID, we may not have + // any future valid entries and the iterator would be positioned at and at this point. + if (it == ids.end() || it->ts != nextTs) { + ids.insert(it, {kUnknownRangeMarkerId, nextTs}); + } + + // Update cleanup time if needed + if (!needsCleanup(ids)) { + return; + } + + _uuidChanges = _uuidChanges.insert(uuid); + _recordCleanupTime(cleanupTime(ids)); +} + +bool HistoricalCatalogIdTracker::dirty(Timestamp oldest) const { + return _lowestTimestampForCleanup <= oldest; +} + +void HistoricalCatalogIdTracker::cleanup(Timestamp oldest) { + Timestamp nextLowestCleanupTimestamp = Timestamp::max(); + + // Helper lambda to perform the operation on both namespace and UUID + auto doCleanup = [this, &oldest, &nextLowestCleanupTimestamp](auto& idsContainer, + auto& changesContainer) { + // Batch all changes together + auto ids = idsContainer.transient(); + auto changes = changesContainer.transient(); + + for (auto&& key : changesContainer) { + // + auto range = ids.at(key); + + // Binary search for next larger timestamp + auto rangeIt = std::upper_bound( + range.begin(), range.end(), oldest, [](const auto& ts, const auto& entry) { + return ts < entry.ts; + }); + + // Continue if there is nothing to cleanup for this timestamp yet + if (rangeIt == range.begin()) { + // There should always be at least two entries in the range when we hit this + // branch. For the namespace to be put in '_nssChanges' we need at least two + // entries. + invariant(range.size() > 1); + nextLowestCleanupTimestamp = + std::min(nextLowestCleanupTimestamp, cleanupTime(range)); + continue; + } + + // The iterator is positioned to the closest entry that has a larger timestamp, + // decrement to get a lower or equal timestamp. This represents the first entry that we + // may not cleanup. + --rangeIt; + + // Erase range, we will leave at least one element due to the decrement above + range.erase(range.begin(), rangeIt); + + // If more changes are needed for this namespace, keep it in the set and keep track + // of lowest timestamp. + if (range.size() > 1) { + nextLowestCleanupTimestamp = + std::min(nextLowestCleanupTimestamp, cleanupTime(range)); + ids.set(key, std::move(range)); + continue; + } + // If the last remaining element is a drop earlier than the oldest timestamp, we can + // remove tracking this namespace + if (range.back().id == boost::none) { + ids.erase(key); + } else { + ids.set(key, std::move(range)); + } + + // Unmark this namespace or UUID for needing changes. + changes.erase(key); + } + + // Write back all changes to main container + changesContainer = changes.persistent(); + idsContainer = ids.persistent(); + }; + + // Iterate over all namespaces and UUIDs that is marked that they need cleanup + doCleanup(_nss, _nssChanges); + doCleanup(_uuid, _uuidChanges); + + _lowestTimestampForCleanup = nextLowestCleanupTimestamp; + _oldestTimestampMaintained = std::max(_oldestTimestampMaintained, oldest); +} + +void HistoricalCatalogIdTracker::rollback(Timestamp stable) { + _nssChanges = {}; + _uuidChanges = {}; + _lowestTimestampForCleanup = Timestamp::max(); + _oldestTimestampMaintained = std::min(_oldestTimestampMaintained, stable); + + // Helper lambda to perform the operation on both namespace and UUID + auto removeLargerTimestamps = [this, &stable](auto& idsContainer, auto& changesContainer) { + // Batch all changes together + auto idsWriter = idsContainer.transient(); + auto changesWriter = changesContainer.transient(); + + // Go through all known mappings and remove entries larger than input stable timestamp + for (const auto& [key, ids] : idsContainer) { + // Binary search to the first entry with a too large timestamp + auto end = std::upper_bound( + ids.begin(), ids.end(), stable, [](Timestamp ts, const auto& entry) { + return ts < entry.ts; + }); + + // Create a new range without the timestamps that are too large + std::vector removed(ids.begin(), end); + + // If the resulting range is empty, remove the key from the container + if (removed.empty()) { + idsWriter.erase(key); + continue; + } + + // Calculate when this namespace needs to be cleaned up next + if (needsCleanup(removed)) { + Timestamp cleanTime = cleanupTime(removed); + changesWriter.insert(key); + _recordCleanupTime(cleanTime); + } + idsWriter.set(key, std::move(removed)); + } + + // Write back all changes to main container + changesContainer = changesWriter.persistent(); + idsContainer = idsWriter.persistent(); + }; + + // Rollback on both namespace and uuid containers. + removeLargerTimestamps(_nss, _nssChanges); + removeLargerTimestamps(_uuid, _uuidChanges); +} + +void HistoricalCatalogIdTracker::_recordCleanupTime(Timestamp ts) { + if (ts < _lowestTimestampForCleanup) { + _lowestTimestampForCleanup = ts; + } +} + +void HistoricalCatalogIdTracker::_createTimestamp(const NamespaceString& nss, + const UUID& uuid, + const RecordId& catalogId, + Timestamp ts) { + // Helper lambda to perform the operation on both namespace and UUID + auto doCreate = [&catalogId, &ts](auto& idsContainer, const auto& key) { + // Make a copy of the vector stored at 'key' + auto ids = copyIfExists(idsContainer, key); + + // An entry could exist already if concurrent writes are performed, keep the latest + // change in that case. + if (!ids.empty() && ids.back().ts == ts) { + ids.back().id = catalogId; + idsContainer = idsContainer.set(key, std::move(ids)); + return; + } + + // Otherwise, push new entry at the end. Timestamp is always increasing + invariant(ids.empty() || ids.back().ts < ts); + // If the catalogId is the same as last entry, there's nothing we need to do. This can + // happen when the catalog is reopened. + if (!ids.empty() && ids.back().id == catalogId) { + return; + } + + // Push new mapping to the end and write back to the container. As this is a create, we do + // not need to update the cleanup time as a create can never yield an updated (lower) + // cleanup time for this namespace/uuid. + ids.push_back({catalogId, ts}); + idsContainer = idsContainer.set(key, std::move(ids)); + }; + + // Create on both namespace and uuid containers. + doCreate(_nss, nss); + doCreate(_uuid, uuid); +} + +void HistoricalCatalogIdTracker::_createNoTimestamp(const NamespaceString& nss, + const UUID& uuid, + const RecordId& catalogId) { + // Make sure untimestamped writes have a single entry in mapping. If we're mixing + // timestamped with untimestamped (such as repair). Ignore the untimestamped writes + // as an untimestamped deregister will correspond with an untimestamped register. We + // should leave the mapping as-is in this case. + + auto doCreate = [&catalogId](auto& idsContainer, auto& changesContainer, const auto& key) { + const std::vector* ids = idsContainer.find(key); + if (!ids) { + // This namespace or UUID was added due to an untimestamped write, add an entry + // with min timestamp + idsContainer = idsContainer.set(key, {{catalogId, Timestamp::min()}}); + + // Nothing to cleanup after untimestamped write + changesContainer = changesContainer.erase(key); + return; + } + }; + + // Create on both namespace and uuid containers. + doCreate(_nss, _nssChanges, nss); + doCreate(_uuid, _uuidChanges, uuid); +} + +void HistoricalCatalogIdTracker::_dropTimestamp(const NamespaceString& nss, + const UUID& uuid, + Timestamp ts) { + // Helper lambda to perform the operation on both namespace and UUID + auto doDrop = [this, &ts](auto& idsContainer, auto& changesContainer, const auto& key) { + // Make a copy of the vector stored at 'key' + auto ids = copyIfExists(idsContainer, key); + // An entry could exist already if concurrent writes are performed, keep the latest change + // in that case. + if (!ids.empty() && ids.back().ts == ts) { + ids.back().id = boost::none; + idsContainer = idsContainer.set(key, std::move(ids)); + return; + } + + // Otherwise, push new entry at the end. Timestamp is always increasing + invariant(ids.empty() || ids.back().ts < ts); + // If the catalogId is the same as last entry, there's nothing we need to do. This can + // happen when the catalog is reopened. + if (!ids.empty() && !ids.back().id.has_value()) { + return; + } + + // A drop entry can't be pushed in the container if it's empty. This is because we cannot + // initialize the namespace or UUID with a single drop. + invariant(!ids.empty()); + + // Push the drop at the end our or mapping + ids.push_back({boost::none, ts}); + + // This drop may result in the possibility of cleanup in the future + if (needsCleanup(ids)) { + Timestamp cleanTime = cleanupTime(ids); + changesContainer = changesContainer.insert(key); + _recordCleanupTime(cleanTime); + } + + // Write back the updated mapping into our container + idsContainer = idsContainer.set(key, std::move(ids)); + }; + + // Drop on both namespace and uuid containers + doDrop(_nss, _nssChanges, nss); + doDrop(_uuid, _uuidChanges, uuid); +} + +void HistoricalCatalogIdTracker::_dropNoTimestamp(const NamespaceString& nss, const UUID& uuid) { + // Make sure untimestamped writes have a single entry in mapping. If we're mixing + // timestamped with untimestamped (such as repair). Ignore the untimestamped writes as + // an untimestamped deregister will correspond with an untimestamped register. We should + // leave the mapping as-is in this case. + + auto doDrop = [](auto& idsContainer, auto& changesContainer, const auto& key) { + const std::vector* ids = idsContainer.find(key); + if (ids && ids->size() == 1) { + // This namespace or UUID was removed due to an untimestamped write, clear entries. + idsContainer = idsContainer.erase(key); + + // Nothing to cleanup after untimestamped write + changesContainer = changesContainer.erase(key); + } + }; + + // Drop on both namespace and uuid containers + doDrop(_nss, _nssChanges, nss); + doDrop(_uuid, _uuidChanges, uuid); +} + +void HistoricalCatalogIdTracker::_renameTimestamp(const NamespaceString& from, + const NamespaceString& to, + Timestamp ts) { + // Make copies of existing mappings on these namespaces. + auto toIds = copyIfExists(_nss, to); + auto fromIds = copyIfExists(_nss, from); + + // First update 'to' mapping. This is similar to a 'create'. + if (!toIds.empty() && toIds.back().ts == ts) { + // An entry could exist already if concurrent writes are performed, keep the latest change + // in that case. + toIds.back().id = fromIds.back().id; + } else { + // Timestamps should always be increasing. + invariant(toIds.empty() || toIds.back().ts < ts); + + // Push to end, we can take the catalogId from 'from'. We don't need to check if timestamp + // needs to be cleaned up as this is equivalent of a 'create'. + toIds.push_back({fromIds.back().id, ts}); + } + + // Then, update 'from' mapping. This is similar to a 'drop'. + if (!fromIds.empty() && fromIds.back().ts == ts) { + // Re-write latest entry if timestamp match (multiple changes occured in this transaction), + // otherwise push at end. + fromIds.back().id = boost::none; + } else { + // Timestamps should always be increasing. + invariant(fromIds.empty() || fromIds.back().ts < ts); + // Push to end and calculate cleanup timestamp. + fromIds.push_back({boost::none, ts}); + if (needsCleanup(fromIds)) { + Timestamp cleanTime = cleanupTime(fromIds); + _nssChanges = std::move(_nssChanges).insert(from); + _recordCleanupTime(cleanTime); + } + } + + // Store updates mappings back into container. + auto writer = _nss.transient(); + writer.set(from, std::move(fromIds)); + writer.set(to, std::move(toIds)); + _nss = writer.persistent(); +} + +void HistoricalCatalogIdTracker::_renameNoTimestamp(const NamespaceString& from, + const NamespaceString& to) { + // We should never perform rename in a mixed-mode environment. 'from' should contain a + // single entry and there should be nothing in 'to' . + const std::vector* fromIds = _nss.find(from); + invariant(fromIds && fromIds->size() == 1); + invariant(!_nss.find(to)); + + auto writer = _nss.transient(); + // Take the last known catalogId from 'from'. + writer.set(to, {{fromIds->back().id, Timestamp::min()}}); + writer.erase(from); + _nss = writer.persistent(); +} + +} // namespace mongo diff --git a/src/mongo/db/catalog/historical_catalogid_tracker.h b/src/mongo/db/catalog/historical_catalogid_tracker.h new file mode 100644 index 0000000000000..491effd7ca874 --- /dev/null +++ b/src/mongo/db/catalog/historical_catalogid_tracker.h @@ -0,0 +1,186 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include + +#include "mongo/bson/timestamp.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/record_id.h" +#include "mongo/util/immutable/unordered_map.h" +#include "mongo/util/immutable/unordered_set.h" +#include "mongo/util/uuid.h" + +namespace mongo { +/** + * Data structure to keep track of mappings between namespace or uuid to a catalogId. The mapping is + * maintained for a range of time [oldest, now) to mirror the time range the server can service + * queries. + * + * Uses immutable data structures internally to be cheap to copy. + */ +class HistoricalCatalogIdTracker { +public: + HistoricalCatalogIdTracker(Timestamp oldest = Timestamp::max()) + : _oldestTimestampMaintained(oldest) {} + + /** + * CatalogId with Timestamp + */ + struct TimestampedCatalogId { + boost::optional + id; // none represents non-existing at timestamp (due to drop or rename) + Timestamp ts; + }; + + /** + * Returns the CatalogId for a given 'nss' or 'uuid' at timestamp 'ts'. + */ + struct LookupResult { + enum class Existence { + // Namespace or UUID exists at time 'ts' and catalogId set in 'id'. + kExists, + // Namespace or UUID does not exist at time 'ts'. + kNotExists, + // Namespace or UUID existence at time 'ts' is unknown. The durable catalog must be + // scanned to determine existence. + kUnknown + }; + RecordId id; + Existence result; + }; + + /** + * Returns the CatalogId for a given 'nss' or 'uuid' at timestamp 'ts'. + * + * Timestamp 'none' returns mapping at latest. + */ + LookupResult lookup(const NamespaceString& nss, boost::optional ts) const; + LookupResult lookup(const UUID& uuid, boost::optional ts) const; + + /** + * Register that a namespace/uuid was created with given 'catalogId' at timestamp 'ts'. + * + * Timestamp 'none' indicates that the namespace was created without a timestamp. + */ + void create(const NamespaceString& nss, + const UUID& uuid, + const RecordId& catalogId, + boost::optional ts); + + /** + * Register that a namespace/uuid was dropped at timestamp 'ts'. + * + * Timestamp 'none' indicates that the namespace was dropped without a timestamp. + */ + void drop(const NamespaceString& nss, const UUID& uuid, boost::optional ts); + + /** + * Register that a namespace was renamed at timestamp 'ts'. + * + * Timestamp 'none' indicates that the namespace was renamed without a timestamp. + */ + void rename(const NamespaceString& from, + const NamespaceString& to, + boost::optional ts); + + /** + * Records existence of a namespace at timestamp 'ts' that was previously unknown. + */ + void recordExistingAtTime(const NamespaceString& nss, + const UUID& uuid, + const RecordId& catalogId, + Timestamp ts); + + /** + * Records non-existence of a namespace at timestamp 'ts' that was previously unknown. + */ + void recordNonExistingAtTime(const NamespaceString& nss, Timestamp ts); + void recordNonExistingAtTime(const UUID& uuid, Timestamp ts); + + /** + * Returns true if the structure has space to record non-existence of a namespace/uuid. + */ + bool canRecordNonExisting(const NamespaceString& nss) const; + bool canRecordNonExisting(const UUID& uuid) const; + + /** + * Returns true if a call to 'cleanup' with the given timestemp would perform any cleanup. + */ + bool dirty(Timestamp oldest) const; + + /** + * Performs cleanup of historical data when the oldest timestamp advances. Should be performed + * regularly to free up data for time ranges that are no longer needed for lookups. + */ + void cleanup(Timestamp oldest); + + /** + * Rollback any mappings with larger timestamps than provided stable timestamp. Needs to be + * performed as part of replication rollback. + */ + void rollback(Timestamp stable); + +private: + void _recordCleanupTime(Timestamp ts); + + void _createTimestamp(const NamespaceString& nss, + const UUID& uuid, + const RecordId& catalogId, + Timestamp ts); + void _createNoTimestamp(const NamespaceString& nss, + const UUID& uuid, + const RecordId& catalogId); + void _dropTimestamp(const NamespaceString& nss, const UUID& uuid, Timestamp ts); + void _dropNoTimestamp(const NamespaceString& nss, const UUID& uuid); + void _renameTimestamp(const NamespaceString& from, const NamespaceString& to, Timestamp ts); + void _renameNoTimestamp(const NamespaceString& from, const NamespaceString& to); + + + // CatalogId mappings for all known namespaces and UUIDs for the CollectionCatalog. The vector + // is sorted on timestamp. UUIDs will have at most two entries. One for the create and another + // for the drop. UUIDs stay the same across collection renames. + immutable::unordered_map> _nss; + immutable::unordered_map, UUID::Hash> _uuid; + // Set of namespaces and UUIDs that need cleanup when the oldest timestamp advances + // sufficiently. + immutable::unordered_set _nssChanges; + immutable::unordered_set _uuidChanges; + // Point at which the oldest timestamp need to advance for there to be any catalogId namespace + // that can be cleaned up + Timestamp _lowestTimestampForCleanup = Timestamp::max(); + // The oldest timestamp at which the tracker maintains mappings. Anything older than this is + // unknown. + Timestamp _oldestTimestampMaintained; +}; + +} // namespace mongo diff --git a/src/mongo/db/catalog/historical_catalogid_tracker_test.cpp b/src/mongo/db/catalog/historical_catalogid_tracker_test.cpp new file mode 100644 index 0000000000000..b0dc016585118 --- /dev/null +++ b/src/mongo/db/catalog/historical_catalogid_tracker_test.cpp @@ -0,0 +1,1075 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/catalog/historical_catalogid_tracker.h" + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" + +namespace mongo { +namespace { + +TEST(HistoricalCatalogIdTrackerTest, Create) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + UUID uuid = UUID::gen(); + RecordId rid{1}; + + // Initialize the oldest timestamp to (1, 1) + HistoricalCatalogIdTracker tracker(Timestamp(1, 1)); + + // Create entry + tracker.create(nss, uuid, rid, Timestamp(1, 2)); + + // Lookup without timestamp returns latest catalogId + ASSERT_EQ(tracker.lookup(nss, boost::none).id, rid); + ASSERT_EQ(tracker.lookup(uuid, boost::none).id, rid); + // Lookup before create returns unknown if looking before oldest + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 0)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 0)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + // Lookup before create returns not exists if looking after oldest + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 1)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 1)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup at create returns catalogId + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 2)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 2)).id, rid); + // Lookup after create returns catalogId + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 3)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 3)).id, rid); +} + +TEST(HistoricalCatalogIdTrackerTest, Drop) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + UUID uuid = UUID::gen(); + RecordId rid{1}; + + // Initialize the oldest timestamp to (1, 1) + HistoricalCatalogIdTracker tracker(Timestamp(1, 1)); + + // Create and drop collection. We have a time window where the namespace exists + tracker.create(nss, uuid, rid, Timestamp(1, 5)); + tracker.drop(nss, uuid, Timestamp(1, 10)); + + // Lookup without timestamp returns none + ASSERT_EQ(tracker.lookup(nss, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup before create and oldest returns unknown + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 0)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 0)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + // Lookup before create returns not exists + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 4)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 4)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup at create returns catalogId + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 5)).id, rid); + // Lookup after create returns catalogId + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 6)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 6)).id, rid); + // Lookup at drop returns none + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 10)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 10)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup after drop returns none + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 20)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 20)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); +} + +TEST(HistoricalCatalogIdTrackerTest, Rename) { + NamespaceString from = NamespaceString::createNamespaceString_forTest("a.b"); + NamespaceString to = NamespaceString::createNamespaceString_forTest("a.c"); + UUID uuid = UUID::gen(); + RecordId rid{1}; + + // Initialize the oldest timestamp to (1, 1) + HistoricalCatalogIdTracker tracker(Timestamp(1, 1)); + + // Create and rename collection. We have two windows where the collection exists but for + // different namespaces + tracker.create(from, uuid, rid, Timestamp(1, 5)); + tracker.rename(from, to, Timestamp(1, 10)); + + // Lookup without timestamp on 'from' returns none. By 'uuid' returns catalogId + ASSERT_EQ(tracker.lookup(from, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, boost::none).id, rid); + // Lookup before create and oldest returns unknown + ASSERT_EQ(tracker.lookup(from, Timestamp(1, 0)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 0)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + // Lookup before create returns not exists + ASSERT_EQ(tracker.lookup(from, Timestamp(1, 4)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 4)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup at create returns catalogId + ASSERT_EQ(tracker.lookup(from, Timestamp(1, 5)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 5)).id, rid); + // Lookup after create returns catalogId + ASSERT_EQ(tracker.lookup(from, Timestamp(1, 6)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 6)).id, rid); + // Lookup at rename on 'from' returns none. By 'uuid' returns catalogId + ASSERT_EQ(tracker.lookup(from, Timestamp(1, 10)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 10)).id, rid); + // Lookup after rename on 'from' returns none. By 'uuid' returns catalogId + ASSERT_EQ(tracker.lookup(from, Timestamp(1, 20)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 20)).id, rid); + + // Lookup without timestamp on 'to' returns catalogId + ASSERT_EQ(tracker.lookup(to, boost::none).id, rid); + ASSERT_EQ(tracker.lookup(uuid, boost::none).id, rid); + // Lookup before rename and oldest on 'to' returns unknown + ASSERT_EQ(tracker.lookup(to, Timestamp(1, 0)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 0)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + // Lookup before rename on 'to' returns not exists. By 'uuid' returns catalogId + ASSERT_EQ(tracker.lookup(to, Timestamp(1, 9)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 9)).id, rid); + // Lookup at rename on 'to' returns catalogId + ASSERT_EQ(tracker.lookup(to, Timestamp(1, 10)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 10)).id, rid); + // Lookup after rename on 'to' returns catalogId + ASSERT_EQ(tracker.lookup(to, Timestamp(1, 20)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 20)).id, rid); +} + +TEST(HistoricalCatalogIdTrackerTest, RenameDropTarget) { + NamespaceString from = NamespaceString::createNamespaceString_forTest("a.b"); + NamespaceString to = NamespaceString::createNamespaceString_forTest("a.c"); + UUID uuid = UUID::gen(); + UUID originalUUID = UUID::gen(); + RecordId rid{1}; + RecordId originalToRid{2}; + + // Initialize the oldest timestamp to (1, 1) + HistoricalCatalogIdTracker tracker(Timestamp(1, 1)); + + // Create collections. The 'to' namespace will exist for one collection from Timestamp(1, 6) + // until it is dropped by the rename at Timestamp(1, 10), after which the 'to' namespace will + // correspond to the renamed collection. + tracker.create(from, uuid, rid, Timestamp(1, 5)); + tracker.create(to, originalUUID, originalToRid, Timestamp(1, 6)); + // Drop and rename with the same timestamp, this is the same as dropTarget=true + tracker.drop(to, originalUUID, Timestamp(1, 10)); + tracker.rename(from, to, Timestamp(1, 10)); + + // Lookup without timestamp on 'to' and 'uuid' returns latest catalog id. By 'originalUUID' + // returns not exists as the target was dropped. + ASSERT_EQ(tracker.lookup(to, boost::none).id, rid); + ASSERT_EQ(tracker.lookup(uuid, boost::none).id, rid); + ASSERT_EQ(tracker.lookup(originalUUID, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup before rename and oldest on 'to' returns unknown + ASSERT_EQ(tracker.lookup(to, Timestamp(1, 0)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 0)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(to, Timestamp(1, 0)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(originalUUID, Timestamp(1, 0)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + // Lookup before rename on 'to' returns the original rid + ASSERT_EQ(tracker.lookup(to, Timestamp(1, 9)).id, originalToRid); + ASSERT_EQ(tracker.lookup(originalUUID, Timestamp(1, 9)).id, originalToRid); + // Lookup before rename on 'from' returns the rid + ASSERT_EQ(tracker.lookup(from, Timestamp(1, 9)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 9)).id, rid); + // Lookup at rename timestamp on 'to' and 'uuid' returns catalogId + ASSERT_EQ(tracker.lookup(to, Timestamp(1, 10)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 10)).id, rid); + // Lookup at rename timestamp on 'originalUUID' returns not exists as it was dropped during the + // rename. + ASSERT_EQ(tracker.lookup(originalUUID, Timestamp(1, 10)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup after rename on 'to' and 'uuid' returns catalogId + ASSERT_EQ(tracker.lookup(to, Timestamp(1, 20)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 20)).id, rid); + // Lookup after rename timestamp on 'originalUUID' returns not exists as it was dropped during + // the rename. + ASSERT_EQ(tracker.lookup(originalUUID, Timestamp(1, 20)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); +} + +TEST(HistoricalCatalogIdTrackerTest, DropCreate) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + UUID firstUUID = UUID::gen(); + UUID secondUUID = UUID::gen(); + RecordId rid1{1}; + RecordId rid2{2}; + + // Initialize the oldest timestamp to (1, 1) + HistoricalCatalogIdTracker tracker(Timestamp(1, 1)); + + // Create, drop and recreate collection on the same namespace. We have different catalogId. + tracker.create(nss, firstUUID, rid1, Timestamp(1, 5)); + tracker.drop(nss, firstUUID, Timestamp(1, 10)); + tracker.create(nss, secondUUID, rid2, Timestamp(1, 15)); + + // Lookup without timestamp returns latest catalogId + ASSERT_EQ(tracker.lookup(nss, boost::none).id, rid2); + ASSERT_EQ(tracker.lookup(secondUUID, boost::none).id, rid2); + ASSERT_EQ(tracker.lookup(firstUUID, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup before first create returns not exists + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 4)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 4)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 4)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 4)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup at first create returns first catalogId + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).id, rid1); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).id, rid1); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup after first create returns first catalogId + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 6)).id, rid1); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 6)).id, rid1); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 6)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup at drop returns none + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 10)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 10)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 10)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 10)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup after drop returns none + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 13)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 13)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 13)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 13)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup at second create returns second catalogId + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).id, rid2); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 15)).id, rid2); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup after second create returns second catalogId + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 20)).id, rid2); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 20)).id, rid2); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 20)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); +} + +TEST(HistoricalCatalogIdTrackerTest, CleanupEqDrop) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + UUID firstUUID = UUID::gen(); + UUID secondUUID = UUID::gen(); + RecordId rid1{1}; + RecordId rid2{2}; + + // Initialize the oldest timestamp to (1, 1) + HistoricalCatalogIdTracker tracker(Timestamp(1, 1)); + + // Create collection and verify we have nothing to cleanup + tracker.create(nss, firstUUID, rid1, Timestamp(1, 5)); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 1))); + + // Drop collection and verify we have nothing to cleanup as long as the oldest timestamp is + // before the drop + tracker.drop(nss, firstUUID, Timestamp(1, 10)); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 1))); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 5))); + ASSERT_TRUE(tracker.dirty(Timestamp(1, 10))); + + // Create new collection and nothing changed with answers to needsCleanupForOldestTimestamp. + tracker.create(nss, secondUUID, rid2, Timestamp(1, 15)); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 1))); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 5))); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 7))); + ASSERT_TRUE(tracker.dirty(Timestamp(1, 10))); + + // We can lookup the old catalogId before we advance the oldest timestamp and cleanup + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + + // Cleanup at drop timestamp, advance the oldest timestamp + tracker.cleanup(Timestamp(1, 10)); + + // After cleanup, we cannot find the old catalogId anymore. Also verify that we don't need + // anymore cleanup + ASSERT_FALSE(tracker.dirty(Timestamp(1, 10))); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); +} + +TEST(HistoricalCatalogIdTrackerTest, CleanupGtDrop) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + UUID firstUUID = UUID::gen(); + UUID secondUUID = UUID::gen(); + RecordId rid1{1}; + RecordId rid2{2}; + + // Initialize the oldest timestamp to (1, 1) + HistoricalCatalogIdTracker tracker(Timestamp(1, 1)); + + // Create collection and verify we have nothing to cleanup + tracker.create(nss, firstUUID, rid1, Timestamp(1, 5)); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 1))); + + // Drop collection and verify we have nothing to cleanup as long as the oldest timestamp is + // before the drop + tracker.drop(nss, firstUUID, Timestamp(1, 10)); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 1))); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 5))); + ASSERT_TRUE(tracker.dirty(Timestamp(1, 10))); + + // Create new collection and nothing changed with answers to needsCleanupForOldestTimestamp. + tracker.create(nss, secondUUID, rid2, Timestamp(1, 15)); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 1))); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 5))); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 7))); + ASSERT_TRUE(tracker.dirty(Timestamp(1, 12))); + + // We can lookup the old catalogId before we advance the oldest timestamp and cleanup + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + + // Cleanup after the drop timestamp + tracker.cleanup(Timestamp(1, 12)); + + // After cleanup, we cannot find the old catalogId anymore. Also verify that we don't need + // anymore cleanup + ASSERT_FALSE(tracker.dirty(Timestamp(1, 12))); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); +} + +TEST(HistoricalCatalogIdTrackerTest, CleanupGtRecreate) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + UUID firstUUID = UUID::gen(); + UUID secondUUID = UUID::gen(); + RecordId rid1{1}; + RecordId rid2{2}; + + // Initialize the oldest timestamp to (1, 1) + HistoricalCatalogIdTracker tracker(Timestamp(1, 1)); + + // Create collection and verify we have nothing to cleanup + tracker.create(nss, firstUUID, rid1, Timestamp(1, 5)); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 1))); + + // Drop collection and verify we have nothing to cleanup as long as the oldest timestamp is + // before the drop + tracker.drop(nss, firstUUID, Timestamp(1, 10)); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 1))); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 5))); + ASSERT_TRUE(tracker.dirty(Timestamp(1, 10))); + + // Create new collection and nothing changed with answers to needsCleanupForOldestTimestamp. + tracker.create(nss, secondUUID, rid2, Timestamp(1, 15)); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 1))); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 5))); + ASSERT_FALSE(tracker.dirty(Timestamp(1, 7))); + ASSERT_TRUE(tracker.dirty(Timestamp(1, 20))); + + // We can lookup the old catalogId before we advance the oldest timestamp and cleanup + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + + // Cleanup after the recreate timestamp + tracker.cleanup(Timestamp(1, 20)); + + // After cleanup, we cannot find the old catalogId anymore. Also verify that we don't need + // anymore cleanup + ASSERT_FALSE(tracker.dirty(Timestamp(1, 20))); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); +} + +TEST(HistoricalCatalogIdTrackerTest, CleanupMultiple) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + UUID firstUUID = UUID::gen(); + UUID secondUUID = UUID::gen(); + UUID thirdUUID = UUID::gen(); + UUID fourthUUID = UUID::gen(); + RecordId rid1{1}; + RecordId rid2{2}; + RecordId rid3{3}; + RecordId rid4{4}; + + // Initialize the oldest timestamp to (1, 1) + HistoricalCatalogIdTracker tracker(Timestamp(1, 1)); + + // Create and drop multiple namespace on the same namespace + tracker.create(nss, firstUUID, rid1, Timestamp(1, 5)); + tracker.drop(nss, firstUUID, Timestamp(1, 10)); + tracker.create(nss, secondUUID, rid2, Timestamp(1, 15)); + tracker.drop(nss, secondUUID, Timestamp(1, 20)); + tracker.create(nss, thirdUUID, rid3, Timestamp(1, 25)); + tracker.drop(nss, thirdUUID, Timestamp(1, 30)); + tracker.create(nss, fourthUUID, rid4, Timestamp(1, 35)); + tracker.drop(nss, fourthUUID, Timestamp(1, 40)); + + // Lookup can find all four collections + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(thirdUUID, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(fourthUUID, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + + // Cleanup oldest + tracker.cleanup(Timestamp(1, 10)); + + // Lookup can find the three remaining collections + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(thirdUUID, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(fourthUUID, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + + // Cleanup + tracker.cleanup(Timestamp(1, 21)); + + // Lookup can find the two remaining collections + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(thirdUUID, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(fourthUUID, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + + // Cleanup + tracker.cleanup(Timestamp(1, 32)); + + // Lookup can find the last remaining collections + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(thirdUUID, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(fourthUUID, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + + // Cleanup + tracker.cleanup(Timestamp(1, 50)); + + // Lookup now result in unknown as the oldest timestamp has advanced where mapping has been + // removed + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(thirdUUID, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(fourthUUID, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); +} + +TEST(HistoricalCatalogIdTrackerTest, CleanupMultipleSingleCall) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + UUID firstUUID = UUID::gen(); + UUID secondUUID = UUID::gen(); + UUID thirdUUID = UUID::gen(); + UUID fourthUUID = UUID::gen(); + RecordId rid1{1}; + RecordId rid2{2}; + RecordId rid3{3}; + RecordId rid4{4}; + + // Initialize the oldest timestamp to (1, 1) + HistoricalCatalogIdTracker tracker(Timestamp(1, 1)); + + // Create and drop multiple namespace on the same namespace + tracker.create(nss, firstUUID, rid1, Timestamp(1, 5)); + tracker.drop(nss, firstUUID, Timestamp(1, 10)); + tracker.create(nss, secondUUID, rid2, Timestamp(1, 15)); + tracker.drop(nss, secondUUID, Timestamp(1, 20)); + tracker.create(nss, thirdUUID, rid3, Timestamp(1, 25)); + tracker.drop(nss, thirdUUID, Timestamp(1, 30)); + tracker.create(nss, fourthUUID, rid4, Timestamp(1, 35)); + tracker.drop(nss, fourthUUID, Timestamp(1, 40)); + + // Lookup can find all four collections + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(thirdUUID, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(fourthUUID, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + + // Cleanup all + tracker.cleanup(Timestamp(1, 50)); + + // Lookup now result in unknown as the oldest timestamp has advanced where mapping has been + // removed + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(firstUUID, Timestamp(1, 5)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(secondUUID, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(thirdUUID, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(fourthUUID, Timestamp(1, 35)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); +} + +TEST(HistoricalCatalogIdTrackerTest, Rollback) { + NamespaceString a = NamespaceString::createNamespaceString_forTest("b.a"); + NamespaceString b = NamespaceString::createNamespaceString_forTest("b.b"); + NamespaceString c = NamespaceString::createNamespaceString_forTest("b.c"); + NamespaceString d = NamespaceString::createNamespaceString_forTest("b.d"); + NamespaceString e = NamespaceString::createNamespaceString_forTest("b.e"); + + UUID firstUUID = UUID::gen(); + UUID secondUUID = UUID::gen(); + UUID thirdUUID = UUID::gen(); + UUID fourthUUID = UUID::gen(); + UUID fifthUUID = UUID::gen(); + UUID sixthUUID = UUID::gen(); + RecordId rid1{1}; + RecordId rid2{2}; + RecordId rid3{3}; + RecordId rid4{4}; + RecordId rid5{5}; + RecordId rid6{6}; + + // Initialize the oldest timestamp to (1, 1) + HistoricalCatalogIdTracker tracker(Timestamp(1, 1)); + + // Create and drop multiple namespace on the same namespace + tracker.create(a, firstUUID, rid1, Timestamp(1, 1)); + tracker.drop(a, firstUUID, Timestamp(1, 2)); + tracker.create(a, secondUUID, rid2, Timestamp(1, 3)); + tracker.create(b, thirdUUID, rid3, Timestamp(1, 5)); + tracker.create(c, fourthUUID, rid4, Timestamp(1, 7)); + tracker.create(d, fifthUUID, rid5, Timestamp(1, 8)); + tracker.create(e, sixthUUID, rid6, Timestamp(1, 9)); + tracker.drop(b, thirdUUID, Timestamp(1, 10)); + + // Rollback to Timestamp(1, 8) + tracker.rollback(Timestamp(1, 8)); + + ASSERT_EQ(tracker.lookup(e, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(firstUUID, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(a, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(secondUUID, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(b, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(thirdUUID, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(c, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(fourthUUID, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(d, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(fifthUUID, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(e, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(sixthUUID, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); +} + +TEST(HistoricalCatalogIdTrackerTest, Insert) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + UUID uuid = UUID::gen(); + RecordId rid{1}; + + // Simulate startup where we have a range [oldest, stable] by initializing the oldest timestamp + // to something high and then insert mappings behind it where the range is unknown. + HistoricalCatalogIdTracker tracker(Timestamp(1, 40)); + + // Record that the collection is known to exist + tracker.recordExistingAtTime(nss, uuid, rid, Timestamp(1, 17)); + + // Lookups before the inserted timestamp is still unknown + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 11)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 11)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + + // Lookups at or after the inserted timestamp is found + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 17)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 17)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 17)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 17)).id, rid); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 19)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 19)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 19)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 19)).id, rid); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 25)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 25)).id, rid); + + // Record that the collection is known to exist at an even earlier timestamp + tracker.recordExistingAtTime(nss, uuid, rid, Timestamp(1, 12)); + + // We should now have extended the range from Timestamp(1, 17) to Timestamp(1, 12) + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 12)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 12)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 12)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 12)).id, rid); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 16)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 16)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 16)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 16)).id, rid); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 17)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 17)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 17)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 17)).id, rid); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 19)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 19)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 19)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 19)).id, rid); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 25)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 25)).id, rid); + + // Record that the collection is unknown to exist at an later timestamp + tracker.recordNonExistingAtTime(nss, Timestamp(1, 25)); + tracker.recordNonExistingAtTime(uuid, Timestamp(1, 25)); + + // Check the entries, most didn't change + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 17)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 17)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 17)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 17)).id, rid); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 19)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 19)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 19)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 19)).id, rid); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 22)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 22)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 22)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 22)).id, rid); + // At Timestamp(1, 25) we now return kNotExists + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // But next timestamp returns unknown + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 26)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 26)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + + // Record that the collection is unknown to exist at the timestamp + tracker.recordNonExistingAtTime(nss, Timestamp(1, 26)); + tracker.recordNonExistingAtTime(uuid, Timestamp(1, 26)); + + // We should not have re-written the existing entry at Timestamp(1, 26) + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 17)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 17)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 17)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 17)).id, rid); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 19)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 19)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 19)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 19)).id, rid); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 22)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 22)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 22)).id, rid); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 22)).id, rid); + // At Timestamp(1, 25) we now return kNotExists + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 25)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // But next timestamp returns unknown + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 26)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 26)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 27)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 27)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + + // Clean up, check so we are back to the original state + tracker.cleanup(Timestamp(1, 41)); + + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); +} + +TEST(HistoricalCatalogIdTrackerTest, InsertUnknown) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + UUID uuid = UUID::gen(); + + // Simulate startup where we have a range [oldest, stable] by initializing the oldest timestamp + // to something high and then insert mappings behind it where the range is unknown. + HistoricalCatalogIdTracker tracker(Timestamp(1, 40)); + + // Reading before the oldest is unknown + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kUnknown); + + // Record that the collection is unknown to exist at the timestamp + tracker.recordNonExistingAtTime(nss, Timestamp(1, 15)); + tracker.recordNonExistingAtTime(uuid, Timestamp(1, 15)); + + // Lookup should now be not existing + ASSERT_EQ(tracker.lookup(nss, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + // Lookup should now be not existing + ASSERT_EQ(tracker.lookup(uuid, Timestamp(1, 15)).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); +} + +TEST(HistoricalCatalogIdTrackerTest, NoTimestamp) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + UUID uuid = UUID::gen(); + RecordId rid{1}; + + HistoricalCatalogIdTracker tracker; + + // Create a collection on the namespace and confirm that we can lookup + tracker.create(nss, uuid, rid, boost::none); + ASSERT_EQ(tracker.lookup(nss, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + + // Drop the collection and confirm it is also removed from mapping + tracker.drop(nss, uuid, boost::none); + ASSERT_EQ(tracker.lookup(nss, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); +} + +TEST(HistoricalCatalogIdTrackerTest, NoTimestampRename) { + NamespaceString a = NamespaceString::createNamespaceString_forTest("a.a"); + NamespaceString b = NamespaceString::createNamespaceString_forTest("a.b"); + UUID uuid = UUID::gen(); + RecordId rid{1}; + + HistoricalCatalogIdTracker tracker; + + // Create a collection on the namespace and confirm that we can lookup + tracker.create(a, uuid, rid, boost::none); + ASSERT_EQ(tracker.lookup(a, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(b, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + + // Rename the collection and check lookup behavior + tracker.rename(a, b, boost::none); + ASSERT_EQ(tracker.lookup(a, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(b, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(tracker.lookup(uuid, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + + + // Drop the collection and confirm it is also removed from mapping + tracker.drop(b, uuid, boost::none); + ASSERT_EQ(tracker.lookup(a, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(b, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuid, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); +} + +TEST(HistoricalCatalogIdTrackerTest, NoTimestampRenameDropTarget) { + NamespaceString a = NamespaceString::createNamespaceString_forTest("a.a"); + NamespaceString b = NamespaceString::createNamespaceString_forTest("a.b"); + UUID uuidA = UUID::gen(); + UUID uuidB = UUID::gen(); + RecordId ridA{1}; + RecordId ridB{2}; + + HistoricalCatalogIdTracker tracker; + + // Create collections on the namespaces and confirm that we can lookup + tracker.create(a, uuidA, ridA, boost::none); + tracker.create(b, uuidB, ridB, boost::none); + auto [aId, aResult] = tracker.lookup(a, boost::none); + auto [bId, bResult] = tracker.lookup(b, boost::none); + ASSERT_EQ(aResult, HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(bResult, HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(aResult, tracker.lookup(uuidA, boost::none).result); + ASSERT_EQ(bResult, tracker.lookup(uuidB, boost::none).result); + ASSERT_EQ(aId, tracker.lookup(uuidA, boost::none).id); + ASSERT_EQ(bId, tracker.lookup(uuidB, boost::none).id); + + // Rename the collection and check lookup behavior + tracker.drop(b, uuidB, boost::none); + tracker.rename(a, b, boost::none); + auto [aIdAfter, aResultAfter] = tracker.lookup(a, boost::none); + auto [bIdAfter, bResultAfter] = tracker.lookup(b, boost::none); + ASSERT_EQ(aResultAfter, HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(bResultAfter, HistoricalCatalogIdTracker::LookupResult::Existence::kExists); + ASSERT_EQ(bResultAfter, tracker.lookup(uuidA, boost::none).result); + ASSERT_EQ(bIdAfter, tracker.lookup(uuidA, boost::none).id); + // Verify that the the recordId on b is now what was on a. We performed a rename with + // dropTarget=true. + ASSERT_EQ(aId, bIdAfter); + + // Drop the collection and confirm it is also removed from mapping + tracker.drop(b, uuidA, boost::none); + ASSERT_EQ(tracker.lookup(a, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuidA, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(b, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); + ASSERT_EQ(tracker.lookup(uuidB, boost::none).result, + HistoricalCatalogIdTracker::LookupResult::Existence::kNotExists); +} + +TEST(HistoricalCatalogIdTrackerTest, CleanupAfterMixedMode1) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + UUID uuid1 = UUID::gen(); + RecordId rid1{1}; + UUID uuid2 = UUID::gen(); + RecordId rid2{2}; + + // Initialize the oldest timestamp to (1, 1) + HistoricalCatalogIdTracker tracker(Timestamp(1, 1)); + + // Create and drop collection with timestamp, this leaves a history that may be cleaned up at + // the drop timestamp + tracker.create(nss, uuid1, rid1, Timestamp(1, 2)); + tracker.drop(nss, uuid1, Timestamp(1, 3)); + + // Re-create and drop without timestamp + tracker.create(nss, uuid2, rid2, boost::none); + tracker.drop(nss, uuid2, boost::none); + + // Try and cleanup for the namespace that was previously marked for needing cleanup + tracker.cleanup(Timestamp(1, 2)); +} + +TEST(HistoricalCatalogIdTrackerTest, CleanupAfterMixedMode2) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); + UUID uuid1 = UUID::gen(); + RecordId rid1{1}; + UUID uuid2 = UUID::gen(); + RecordId rid2{2}; + UUID uuid3 = UUID::gen(); + RecordId rid3{3}; + + // Initialize the oldest timestamp to (1, 1) + HistoricalCatalogIdTracker tracker(Timestamp(1, 1)); + + // Create and drop collection with timestamp, this leaves a history that may be cleaned up at + // the drop timestamp + tracker.create(nss, uuid1, rid1, Timestamp(1, 2)); + tracker.drop(nss, uuid1, Timestamp(1, 3)); + + // Re-create and drop without timestamp + tracker.create(nss, uuid2, rid2, boost::none); + tracker.drop(nss, uuid2, boost::none); + + // Create namespace again, will have a single entry and must therefore not require cleanup + tracker.create(nss, uuid3, rid3, Timestamp(1, 5)); + + // Try and cleanup for the namespace that was previously marked for needing cleanup + tracker.cleanup(Timestamp(1, 2)); +} + +} // namespace +} // namespace mongo diff --git a/src/mongo/db/catalog/index_build_block.cpp b/src/mongo/db/catalog/index_build_block.cpp index 584331ef3ea7f..1ffaf46789629 100644 --- a/src/mongo/db/catalog/index_build_block.cpp +++ b/src/mongo/db/catalog/index_build_block.cpp @@ -28,25 +28,40 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/catalog/index_build_block.h" - -#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/aggregated_index_usage_tracker.h" #include "mongo/db/audit.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/index_build_block.h" #include "mongo/db/catalog/index_key_validate.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/collection_index_usage_tracker.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/skipped_record_tracker.h" +#include "mongo/db/index_names.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/collection_index_usage_tracker_decoration.h" #include "mongo/db/query/collection_query_info.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/ident.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/ttl_collection_cache.h" -#include "mongo/db/vector_clock.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -54,8 +69,6 @@ namespace mongo { -class IndexCatalog; - IndexBuildBlock::IndexBuildBlock(const NamespaceString& nss, const BSONObj& spec, IndexBuildMethod method, @@ -72,7 +85,7 @@ void IndexBuildBlock::_completeInit(OperationContext* opCtx, Collection* collect // Register this index with the CollectionQueryInfo to regenerate the cache. This way, updates // occurring while an index is being build in the background will be aware of whether or not // they need to modify any indexes. - auto desc = getEntry(opCtx, collection)->descriptor(); + auto desc = getEntry(opCtx, CollectionPtr(collection))->descriptor(); CollectionQueryInfo::get(collection).rebuildIndexData(opCtx, CollectionPtr(collection)); CollectionIndexUsageTrackerDecoration::get(collection->getSharedDecorations()) .registerIndex(desc->indexName(), @@ -91,16 +104,14 @@ Status IndexBuildBlock::initForResume(OperationContext* opCtx, IndexBuildPhaseEnum phase) { _indexName = _spec.getStringField("name").toString(); - auto descriptor = collection->getIndexCatalog()->findIndexByName( + auto writableEntry = collection->getIndexCatalog()->getWritableEntryByName( opCtx, _indexName, IndexCatalog::InclusionPolicy::kReady | IndexCatalog::InclusionPolicy::kUnfinished); - auto indexCatalogEntry = descriptor->getEntry(); - uassert(4945000, "Index catalog entry not found while attempting to resume index build", - indexCatalogEntry); + writableEntry); uassert( 4945001, "Cannot resume a non-hybrid index build", _method == IndexBuildMethod::kHybrid); @@ -111,19 +122,19 @@ Status IndexBuildBlock::initForResume(OperationContext* opCtx, opCtx, collection->ns(), collection->getCollectionOptions(), - descriptor, - indexCatalogEntry->getIdent()); + writableEntry->descriptor(), + writableEntry->getIdent()); if (!status.isOK()) return status; } _indexBuildInterceptor = std::make_unique(opCtx, - indexCatalogEntry, + writableEntry, stateInfo.getSideWritesTable(), stateInfo.getDuplicateKeyTrackerTable(), stateInfo.getSkippedRecordTrackerTable()); - indexCatalogEntry->setIndexBuildInterceptor(_indexBuildInterceptor.get()); + writableEntry->setIndexBuildInterceptor(_indexBuildInterceptor.get()); _completeInit(opCtx, collection); @@ -136,10 +147,9 @@ Status IndexBuildBlock::init(OperationContext* opCtx, Collection* collection, bo // need this first for names, etc... BSONObj keyPattern = _spec.getObjectField("key"); - auto descriptor = - std::make_unique(IndexNames::findPluginName(keyPattern), _spec); + auto descriptor = IndexDescriptor(IndexNames::findPluginName(keyPattern), _spec); - _indexName = descriptor->indexName(); + _indexName = descriptor.indexName(); // Since the index build block is being initialized, the index build for _indexName is // beginning. Accordingly, emit an audit event indicating this. @@ -162,7 +172,7 @@ Status IndexBuildBlock::init(OperationContext* opCtx, Collection* collection, bo // Setup on-disk structures. We skip this during startup recovery for unfinished indexes as // everything is already in-place. Status status = collection->prepareForIndexBuild( - opCtx, descriptor.get(), _buildUUID, isBackgroundSecondaryBuild); + opCtx, &descriptor, _buildUUID, isBackgroundSecondaryBuild); if (!status.isOK()) return status; } @@ -170,9 +180,8 @@ Status IndexBuildBlock::init(OperationContext* opCtx, Collection* collection, bo auto indexCatalog = collection->getIndexCatalog(); IndexCatalogEntry* indexCatalogEntry = nullptr; if (forRecovery) { - auto desc = indexCatalog->findIndexByName( + indexCatalogEntry = indexCatalog->getWritableEntryByName( opCtx, _indexName, IndexCatalog::InclusionPolicy::kUnfinished); - indexCatalogEntry = indexCatalog->getEntryShared(desc).get(); } else { indexCatalogEntry = indexCatalog->createIndexEntry( opCtx, collection, std::move(descriptor), CreateIndexEntryFlags::kNone); @@ -183,17 +192,6 @@ Status IndexBuildBlock::init(OperationContext* opCtx, Collection* collection, bo indexCatalogEntry->setIndexBuildInterceptor(_indexBuildInterceptor.get()); } - if (isBackgroundIndex) { - opCtx->recoveryUnit()->onCommit( - [entry = indexCatalogEntry, coll = collection](OperationContext*, - boost::optional commitTime) { - // This will prevent the unfinished index from being visible on index iterators. - if (commitTime) { - entry->setMinimumVisibleSnapshot(commitTime.value()); - } - }); - } - _completeInit(opCtx, collection); return Status::OK(); @@ -215,7 +213,7 @@ void IndexBuildBlock::fail(OperationContext* opCtx, Collection* collection) { "IndexBuildAborted", ErrorCodes::IndexBuildAborted); - auto indexCatalogEntry = getEntry(opCtx, collection); + auto indexCatalogEntry = getWritableEntry(opCtx, collection); if (indexCatalogEntry) { invariant(collection->getIndexCatalog() ->dropIndexEntry(opCtx, collection, indexCatalogEntry) @@ -246,7 +244,7 @@ void IndexBuildBlock::success(OperationContext* opCtx, Collection* collection) { _indexBuildInterceptor->invariantAllWritesApplied(opCtx); } - auto indexCatalogEntry = getEntry(opCtx, collection); + auto indexCatalogEntry = getWritableEntry(opCtx, collection); collection->indexBuildSuccess(opCtx, indexCatalogEntry); auto svcCtx = opCtx->getClient()->getServiceContext(); @@ -279,10 +277,6 @@ void IndexBuildBlock::success(OperationContext* opCtx, Collection* collection) { "collectionIdent"_attr = coll->getSharedIdent()->getIdent(), "commitTimestamp"_attr = commitTime); - if (commitTime) { - entry->setMinimumVisibleSnapshot(commitTime.value()); - } - // Add the index to the TTLCollectionCache upon successfully committing the index build. // TTL indexes are not compatible with capped collections. Note that TTL deletion is // supported on capped clustered collections via bounded collection scan, which does not @@ -307,13 +301,12 @@ const IndexCatalogEntry* IndexBuildBlock::getEntry(OperationContext* opCtx, return descriptor->getEntry(); } -IndexCatalogEntry* IndexBuildBlock::getEntry(OperationContext* opCtx, Collection* collection) { - auto descriptor = collection->getIndexCatalog()->findIndexByName( +IndexCatalogEntry* IndexBuildBlock::getWritableEntry(OperationContext* opCtx, + Collection* collection) { + return collection->getIndexCatalog()->getWritableEntryByName( opCtx, _indexName, IndexCatalog::InclusionPolicy::kReady | IndexCatalog::InclusionPolicy::kUnfinished); - - return descriptor->getEntry(); } } // namespace mongo diff --git a/src/mongo/db/catalog/index_build_block.h b/src/mongo/db/catalog/index_build_block.h index 2b94849b46c7f..ee7a85fc5302c 100644 --- a/src/mongo/db/catalog/index_build_block.h +++ b/src/mongo/db/catalog/index_build_block.h @@ -29,7 +29,21 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/index/index_build_interceptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/resumable_index_builds_gen.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -95,7 +109,7 @@ class IndexBuildBlock { */ const IndexCatalogEntry* getEntry(OperationContext* opCtx, const CollectionPtr& collection) const; - IndexCatalogEntry* getEntry(OperationContext* opCtx, Collection* collection); + IndexCatalogEntry* getWritableEntry(OperationContext* opCtx, Collection* collection); /** * Returns the name of the index managed by this index builder. diff --git a/src/mongo/db/catalog/index_build_entry_test.cpp b/src/mongo/db/catalog/index_build_entry_test.cpp index dc0972f308519..2d264075cffbd 100644 --- a/src/mongo/db/catalog/index_build_entry_test.cpp +++ b/src/mongo/db/catalog/index_build_entry_test.cpp @@ -27,19 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include #include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_validate.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/bsontypes.h" #include "mongo/db/catalog/commit_quorum_options.h" #include "mongo/db/catalog/index_build_entry_gen.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/uuid.h" diff --git a/src/mongo/db/catalog/index_build_oplog_entry.cpp b/src/mongo/db/catalog/index_build_oplog_entry.cpp index 5cb54358a3d9a..d91918178549a 100644 --- a/src/mongo/db/catalog/index_build_oplog_entry.cpp +++ b/src/mongo/db/catalog/index_build_oplog_entry.cpp @@ -27,13 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/catalog/index_build_oplog_entry.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/db/catalog/index_build_oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/logv2/redaction.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { StatusWith IndexBuildOplogEntry::parse(const repl::OplogEntry& entry) { @@ -133,8 +143,8 @@ StatusWith IndexBuildOplogEntry::parse(const repl::OplogEn commandType, commandName.toString(), swBuildUUID.getValue(), - indexNames, - indexSpecs, + std::move(indexNames), + std::move(indexSpecs), cause}; } } // namespace mongo diff --git a/src/mongo/db/catalog/index_build_oplog_entry.h b/src/mongo/db/catalog/index_build_oplog_entry.h index 3da9a6e03aed2..d61a4959e96e8 100644 --- a/src/mongo/db/catalog/index_build_oplog_entry.h +++ b/src/mongo/db/catalog/index_build_oplog_entry.h @@ -29,9 +29,14 @@ #pragma once +#include +#include +#include #include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/commit_quorum_options.h" #include "mongo/db/repl/oplog_entry.h" #include "mongo/util/uuid.h" diff --git a/src/mongo/db/catalog/index_builds_manager.cpp b/src/mongo/db/catalog/index_builds_manager.cpp index 4233138c74499..73084dd2146be 100644 --- a/src/mongo/db/catalog/index_builds_manager.cpp +++ b/src/mongo/db/catalog/index_builds_manager.cpp @@ -28,24 +28,42 @@ */ -#include "mongo/db/catalog/multi_index_block.h" -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/db/catalog/index_builds_manager.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bson_validate.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_builds_manager.h" #include "mongo/db/catalog/index_repair.h" +#include "mongo/db/catalog/multi_index_block.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/storage_repair_observer.h" #include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" #include "mongo/util/progress_meter.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -94,7 +112,7 @@ Status IndexBuildsManager::setUpIndexBuild(OperationContext* opCtx, const auto& nss = collection->ns(); invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X), str::stream() << "Unable to set up index build " << buildUUID << ": collection " - << nss.ns() << " is not locked in exclusive mode."); + << nss.toStringForErrorMsg() << " is not locked in exclusive mode."); auto builder = invariant(_getBuilder(buildUUID)); if (options.protocol == IndexBuildProtocol::kTwoPhase) { @@ -115,7 +133,7 @@ Status IndexBuildsManager::setUpIndexBuild(OperationContext* opCtx, std::vector indexes; try { - indexes = writeConflictRetry(opCtx, "IndexBuildsManager::setUpIndexBuild", nss.ns(), [&]() { + indexes = writeConflictRetry(opCtx, "IndexBuildsManager::setUpIndexBuild", nss, [&]() { MultiIndexBlock::InitMode mode = options.forRecovery ? MultiIndexBlock::InitMode::Recovery : MultiIndexBlock::InitMode::SteadyState; @@ -172,7 +190,7 @@ StatusWith> IndexBuildsManager::startBuildingInd opCtx->checkForInterrupt(); // Cursor is left one past the end of the batch inside writeConflictRetry auto beginBatchId = record->id; - Status status = writeConflictRetry(opCtx, "repairDatabase", ns.ns(), [&] { + Status status = writeConflictRetry(opCtx, "repairDatabase", ns, [&] { // In the case of WCE in a partial batch, we need to go back to the beginning if (!record || (beginBatchId != record->id)) { record = cursor->seekExact(beginBatchId); @@ -216,7 +234,7 @@ StatusWith> IndexBuildsManager::startBuildingInd writeConflictRetry( opCtx, "insertSingleDocumentForInitialSyncOrRecovery-restoreCursor", - ns.ns(), + ns, [&cursor] { cursor->restore(); }); }); if (!insertStatus.isOK()) { @@ -239,7 +257,7 @@ StatusWith> IndexBuildsManager::startBuildingInd ON_BLOCK_EXIT([opCtx, ns, &cursor]() { // restore CAN throw WCE per API writeConflictRetry( - opCtx, "retryRestoreCursor", ns.ns(), [&cursor] { cursor->restore(); }); + opCtx, "retryRestoreCursor", ns, [&cursor] { cursor->restore(); }); }); wunit.commit(); return Status::OK(); @@ -282,9 +300,9 @@ StatusWith> IndexBuildsManager::startBuildingInd if (recordsRemoved > 0) { StorageRepairObserver::get(opCtx->getServiceContext()) - ->invalidatingModification(str::stream() - << "Moved " << recordsRemoved - << " records to lost and found: " << lostAndFoundNss.ns()); + ->invalidatingModification(str::stream() << "Moved " << recordsRemoved + << " records to lost and found: " + << toStringForLogging(lostAndFoundNss)); LOGV2(3956200, "Moved records to lost and found.", @@ -336,7 +354,7 @@ Status IndexBuildsManager::commitIndexBuild(OperationContext* opCtx, return writeConflictRetry( opCtx, "IndexBuildsManager::commitIndexBuild", - nss.ns(), + nss, [this, builder, buildUUID, opCtx, &collection, nss, &onCreateEachFn, &onCommitFn] { WriteUnitOfWork wunit(opCtx); auto status = builder->commit( diff --git a/src/mongo/db/catalog/index_builds_manager.h b/src/mongo/db/catalog/index_builds_manager.h index 57d90f463a1f0..94dbbf98c8d55 100644 --- a/src/mongo/db/catalog/index_builds_manager.h +++ b/src/mongo/db/catalog/index_builds_manager.h @@ -29,17 +29,33 @@ #pragma once +#include +#include #include #include +#include #include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/multi_index_block.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/index/index_build_interceptor.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/rebuild_indexes.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl_index_build_state.h" +#include "mongo/db/resumable_index_builds_gen.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/platform/mutex.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/catalog/index_builds_manager_test.cpp b/src/mongo/db/catalog/index_builds_manager_test.cpp index 3e9e9ee5ce5a7..07eebe61b812a 100644 --- a/src/mongo/db/catalog/index_builds_manager_test.cpp +++ b/src/mongo/db/catalog/index_builds_manager_test.cpp @@ -27,11 +27,18 @@ * it in the license file. */ +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/index_builds_manager.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/operation_context.h" -#include "mongo/db/shard_role.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/catalog/index_catalog.cpp b/src/mongo/db/catalog/index_catalog.cpp index 686091ac564f7..51cde29b8121e 100644 --- a/src/mongo/db/catalog/index_catalog.cpp +++ b/src/mongo/db/catalog/index_catalog.cpp @@ -28,11 +28,10 @@ */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -64,37 +63,9 @@ ReadyIndexesIterator::ReadyIndexesIterator(OperationContext* const opCtx, : _opCtx(opCtx), _iterator(beginIterator), _endIterator(endIterator) {} const IndexCatalogEntry* ReadyIndexesIterator::_advance() { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - auto pitFeatureEnabled = - feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe(); while (_iterator != _endIterator) { - IndexCatalogEntry* entry = _iterator->get(); + const IndexCatalogEntry* entry = _iterator->get(); ++_iterator; - - // When the PointInTimeCatalogLookups feature flag is not enabled, it is necessary to check - // whether the operation's read timestamp is before or after the most recent index - // modification (indicated by the minimum visible snapshot on the IndexCatalogEntry). If the - // read timestamp is before the most recent index modification, we must not include this - // entry in the iterator. - // - // When the PointInTimeCatalogLookups feature flag is enabled, the index catalog entry will - // be constructed from the durable catalog for reads with a read timestamp older than the - // minimum valid snapshot for the collection (which reflects the most recent catalog - // modification for that collection, including index modifications), so there's no need to - // check the minimum visible snapshot of the entry here. - if (!pitFeatureEnabled) { - if (auto minSnapshot = entry->getMinimumVisibleSnapshot()) { - auto mySnapshot = - _opCtx->recoveryUnit()->getPointInTimeReadTimestamp(_opCtx).get_value_or( - _opCtx->recoveryUnit()->getCatalogConflictingTimestamp()); - - if (!mySnapshot.isNull() && mySnapshot < minSnapshot.value()) { - // This index isn't finished in my snapshot. - continue; - } - } - } - return entry; } @@ -102,7 +73,8 @@ const IndexCatalogEntry* ReadyIndexesIterator::_advance() { } AllIndexesIterator::AllIndexesIterator( - OperationContext* const opCtx, std::unique_ptr> ownedContainer) + OperationContext* const opCtx, + std::unique_ptr> ownedContainer) : _opCtx(opCtx), _ownedContainer(std::move(ownedContainer)) { // Explicitly order calls onto the ownedContainer with respect to its move. _iterator = _ownedContainer->begin(); @@ -114,7 +86,7 @@ const IndexCatalogEntry* AllIndexesIterator::_advance() { return nullptr; } - IndexCatalogEntry* entry = *_iterator; + const IndexCatalogEntry* entry = *_iterator; ++_iterator; return entry; } diff --git a/src/mongo/db/catalog/index_catalog.h b/src/mongo/db/catalog/index_catalog.h index 5a3aa01e23046..b8c48bb98db79 100644 --- a/src/mongo/db/catalog/index_catalog.h +++ b/src/mongo/db/catalog/index_catalog.h @@ -29,17 +29,30 @@ #pragma once +#include +#include +#include #include +#include +#include #include #include "mongo/base/clonable_ptr.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/index/multikey_paths.h" #include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/record_id.h" +#include "mongo/db/resumable_index_builds_gen.h" #include "mongo/db/server_options.h" +#include "mongo/db/storage/key_string.h" #include "mongo/db/storage/record_store.h" namespace mongo { @@ -47,8 +60,8 @@ namespace mongo { class Client; class Collection; class CollectionPtr; - class IndexDescriptor; + struct InsertDeleteOptions; struct BsonRecord { @@ -186,15 +199,15 @@ class IndexCatalog { * it should pass in a null value. */ AllIndexesIterator(OperationContext* opCtx, - std::unique_ptr> ownedContainer); + std::unique_ptr> ownedContainer); private: const IndexCatalogEntry* _advance() override; OperationContext* const _opCtx; - std::vector::const_iterator _iterator; - std::vector::const_iterator _endIterator; - std::unique_ptr> _ownedContainer; + std::vector::const_iterator _iterator; + std::vector::const_iterator _endIterator; + std::unique_ptr> _ownedContainer; }; enum class InclusionPolicy { @@ -314,13 +327,27 @@ class IndexCatalog { */ virtual const IndexCatalogEntry* getEntry(const IndexDescriptor* desc) const = 0; + /** + * Returns a writable IndexCatalogEntry copy that will be returned by current and future calls + * to this function. Any previous IndexCatalogEntry/IndexDescriptor pointers that were returned + * may be invalidated. + */ + virtual IndexCatalogEntry* getWritableEntryByName( + OperationContext* opCtx, + StringData name, + InclusionPolicy inclusionPolicy = InclusionPolicy::kReady) = 0; + virtual IndexCatalogEntry* getWritableEntryByKeyPatternAndOptions( + OperationContext* opCtx, + const BSONObj& key, + const BSONObj& indexSpec, + InclusionPolicy inclusionPolicy = InclusionPolicy::kReady) = 0; + /** * Returns a pointer to the index catalog entry associated with 'desc', where the caller assumes * shared ownership of the entry. Returns null if the entry does not exist. */ virtual std::shared_ptr getEntryShared( const IndexDescriptor*) const = 0; - virtual std::shared_ptr getEntryShared(const IndexDescriptor*) = 0; /** * Returns a vector of shared pointers to all index entries. Excludes unfinished indexes. @@ -343,7 +370,7 @@ class IndexCatalog { virtual IndexCatalogEntry* createIndexEntry(OperationContext* opCtx, Collection* collection, - std::unique_ptr descriptor, + IndexDescriptor&& descriptor, CreateIndexEntryFlags flags) = 0; /** @@ -398,11 +425,15 @@ class IndexCatalog { * * This should only be used when we are confident in the specs, such as when specs are received * via replica set cloning or chunk migrations. + * + * 'removeInProgressIndexBuilds' controls whether in-progress index builds are also filtered + * out. */ virtual std::vector removeExistingIndexesNoChecks( OperationContext* opCtx, const CollectionPtr& collection, - const std::vector& indexSpecsToBuild) const = 0; + const std::vector& indexSpecsToBuild, + bool removeInProgressIndexBuilds = true) const = 0; /** * Drops indexes in the index catalog that returns true when it's descriptor returns true for @@ -424,16 +455,6 @@ class IndexCatalog { bool includingIdIndex, std::function onDropFn) = 0; - /** - * Drops the index given its descriptor. - * - * The caller must hold the collection X lock and ensure no index builds are in progress on the - * collection. - */ - virtual Status dropIndex(OperationContext* opCtx, - Collection* collection, - const IndexDescriptor* desc) = 0; - /** * Resets the index given its descriptor. * @@ -442,7 +463,7 @@ class IndexCatalog { */ virtual Status resetUnfinishedIndexForRecovery(OperationContext* opCtx, Collection* collection, - const IndexDescriptor* desc) = 0; + IndexCatalogEntry* entry) = 0; /** * Drops an unfinished index given its descriptor. @@ -451,7 +472,7 @@ class IndexCatalog { */ virtual Status dropUnfinishedIndex(OperationContext* opCtx, Collection* collection, - const IndexDescriptor* desc) = 0; + IndexCatalogEntry* entry) = 0; /** * Drops the index given its catalog entry. diff --git a/src/mongo/db/catalog/index_catalog_entry.cpp b/src/mongo/db/catalog/index_catalog_entry.cpp index 299e4e30e1046..4c78968ad63be 100644 --- a/src/mongo/db/catalog/index_catalog_entry.cpp +++ b/src/mongo/db/catalog/index_catalog_entry.cpp @@ -28,11 +28,7 @@ */ -#include "mongo/platform/basic.h" - #include "mongo/db/catalog/index_catalog_entry.h" - -#include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -40,7 +36,7 @@ namespace mongo { -std::shared_ptr IndexCatalogEntryContainer::release( +std::shared_ptr IndexCatalogEntryContainer::release( const IndexDescriptor* desc) { for (auto i = _entries.begin(); i != _entries.end(); ++i) { if ((*i)->descriptor() != desc) diff --git a/src/mongo/db/catalog/index_catalog_entry.h b/src/mongo/db/catalog/index_catalog_entry.h index 71bad9953b3da..bd4ca8bfcfd34 100644 --- a/src/mongo/db/catalog/index_catalog_entry.h +++ b/src/mongo/db/catalog/index_catalog_entry.h @@ -31,7 +31,10 @@ #include #include +#include #include +#include +#include #include "mongo/bson/ordering.h" #include "mongo/bson/timestamp.h" @@ -47,11 +50,14 @@ namespace mongo { class CollatorInterface; class Collection; + class CollectionPtr; class CollectionCatalogEntry; + class Ident; class IndexAccessMethod; class SortedDataIndexAccessMethod; + class IndexBuildInterceptor; class IndexDescriptor; class MatchExpression; @@ -63,9 +69,6 @@ class IndexCatalogEntry : public std::enable_shared_from_this IndexCatalogEntry() = default; virtual ~IndexCatalogEntry() = default; - inline IndexCatalogEntry(IndexCatalogEntry&&) = delete; - inline IndexCatalogEntry& operator=(IndexCatalogEntry&&) = delete; - virtual const std::string& getIdent() const = 0; virtual std::shared_ptr getSharedIdent() const = 0; virtual void setIdent(std::shared_ptr newIdent) = 0; @@ -156,23 +159,9 @@ class IndexCatalogEntry : public std::enable_shared_from_this const MultikeyPaths& multikeyPaths) const = 0; /** - * Returns whether this index is ready for queries. This is potentially unsafe in that it does - * not consider whether the index is visible or ready in the current storage snapshot. For - * that, use isReadyInMySnapshot() or isPresentInMySnapshot(). - */ - virtual bool isReady(OperationContext* opCtx) const = 0; - - /** - * Safely check whether this index is visible in the durable catalog in the current storage - * snapshot. + * Returns whether this index is ready for queries. */ - virtual bool isPresentInMySnapshot(OperationContext* opCtx) const = 0; - - /** - * Check whether this index is ready in the durable catalog in the current storage snapshot. It - * is unsafe to call this if isPresentInMySnapshot() has not also been checked. - */ - virtual bool isReadyInMySnapshot(OperationContext* opCtx) const = 0; + virtual bool isReady() const = 0; /** * Returns true if this index is not ready, and it is not currently in the process of being @@ -185,21 +174,13 @@ class IndexCatalogEntry : public std::enable_shared_from_this */ virtual bool shouldValidateDocument() const = 0; - /** - * If return value is not boost::none, reads with majority read concern using an older snapshot - * must treat this index as unfinished. - */ - virtual boost::optional getMinimumVisibleSnapshot() const = 0; - - virtual void setMinimumVisibleSnapshot(Timestamp name) = 0; - virtual const UpdateIndexData& getIndexedPaths() const = 0; }; class IndexCatalogEntryContainer { public: - typedef std::vector>::const_iterator const_iterator; - typedef std::vector>::const_iterator iterator; + typedef std::vector>::const_iterator const_iterator; + typedef std::vector>::const_iterator iterator; const_iterator begin() const { return _entries.begin(); @@ -226,13 +207,13 @@ class IndexCatalogEntryContainer { /** * Removes from _entries and returns the matching entry or NULL if none matches. */ - std::shared_ptr release(const IndexDescriptor* desc); + std::shared_ptr release(const IndexDescriptor* desc); bool remove(const IndexDescriptor* desc) { return static_cast(release(desc)); } - void add(std::shared_ptr&& entry) { + void add(std::shared_ptr&& entry) { _entries.push_back(std::move(entry)); } @@ -241,6 +222,6 @@ class IndexCatalogEntryContainer { } private: - std::vector> _entries; + std::vector> _entries; }; } // namespace mongo diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.cpp b/src/mongo/db/catalog/index_catalog_entry_impl.cpp index 58c9a7b479df2..d42b82f9dcf67 100644 --- a/src/mongo/db/catalog/index_catalog_entry_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_entry_impl.cpp @@ -27,69 +27,91 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - #include "mongo/db/catalog/index_catalog_entry_impl.h" #include -#include - -#include "mongo/base/init.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/multi_key_path_tracker.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/collection_query_info.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/logv2/log.h" -#include "mongo/util/scopeguard.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex - namespace mongo { -MONGO_FAIL_POINT_DEFINE(skipUpdateIndexMultikey); -using std::string; +MONGO_FAIL_POINT_DEFINE(skipUpdateIndexMultikey); IndexCatalogEntryImpl::IndexCatalogEntryImpl(OperationContext* const opCtx, const CollectionPtr& collection, const std::string& ident, - std::unique_ptr descriptor, + IndexDescriptor&& descriptor, bool isFrozen) - : _ident(ident), - _descriptor(std::move(descriptor)), - _catalogId(collection->getCatalogId()), + : _shared(make_intrusive(ident, collection->getCatalogId())), + _descriptor(descriptor), _isReady(false), _isFrozen(isFrozen), _shouldValidateDocument(false), _isDropped(false), _indexOffset(invariantStatusOK( - collection->checkMetaDataForIndex(_descriptor->indexName(), _descriptor->infoObj()))) { - - _descriptor->_entry = this; - _isReady = collection->isIndexReady(_descriptor->indexName()); + collection->checkMetaDataForIndex(_descriptor.indexName(), _descriptor.infoObj()))) { + _descriptor._entry = this; + _isReady = collection->isIndexReady(_descriptor.indexName()); // For time-series collections, we need to check that the indexed metric fields do not have // expanded array values. _shouldValidateDocument = collection->getTimeseriesOptions() && timeseries::doesBucketsIndexIncludeMeasurement( - opCtx, collection->ns(), *collection->getTimeseriesOptions(), _descriptor->infoObj()); + opCtx, collection->ns(), *collection->getTimeseriesOptions(), _descriptor.infoObj()); - const BSONObj& collation = _descriptor->collation(); + const BSONObj& collation = _descriptor.collation(); if (!collation.isEmpty()) { auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation); @@ -97,56 +119,36 @@ IndexCatalogEntryImpl::IndexCatalogEntryImpl(OperationContext* const opCtx, // Index spec should have already been validated. invariant(statusWithCollator.getStatus()); - _collator = std::move(statusWithCollator.getValue()); + _shared->_collator = std::move(statusWithCollator.getValue()); } - if (_descriptor->isPartial()) { - const BSONObj& filter = _descriptor->partialFilterExpression(); + if (_descriptor.isPartial()) { + const BSONObj& filter = _descriptor.partialFilterExpression(); - _expCtxForFilter = make_intrusive( - opCtx, CollatorInterface::cloneCollator(_collator.get()), collection->ns()); + _shared->_expCtxForFilter = make_intrusive( + opCtx, CollatorInterface::cloneCollator(_shared->_collator.get()), collection->ns()); // Parsing the partial filter expression is not expected to fail here since the // expression would have been successfully parsed upstream during index creation. - _filterExpression = + _shared->_filterExpression = MatchExpressionParser::parseAndNormalize(filter, - _expCtxForFilter, + _shared->_expCtxForFilter, ExtensionsCallbackNoop(), MatchExpressionParser::kBanAllSpecialFeatures); LOGV2_DEBUG(20350, 2, "have filter expression for {namespace} {indexName} {filter}", logAttrs(collection->ns()), - "indexName"_attr = _descriptor->indexName(), + "indexName"_attr = _descriptor.indexName(), "filter"_attr = redact(filter)); } } void IndexCatalogEntryImpl::setAccessMethod(std::unique_ptr accessMethod) { - invariant(!_accessMethod); - _accessMethod = std::move(accessMethod); - CollectionQueryInfo::computeUpdateIndexData(this, _accessMethod.get(), &_indexedPaths); -} - -bool IndexCatalogEntryImpl::isReady(OperationContext* opCtx) const { - // For multi-document transactions, we can open a snapshot prior to checking the - // minimumSnapshotVersion on a collection. This means we are unprotected from reading - // out-of-sync index catalog entries. To fix this, we uassert if we detect that the - // in-memory catalog is out-of-sync with the on-disk catalog. This check is not necessary when - // point-in-time catalog lookups are enabled as the snapshot is always in sync. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (opCtx->inMultiDocumentTransaction() && - !feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - if (!isPresentInMySnapshot(opCtx) || isReadyInMySnapshot(opCtx) != _isReady) { - uasserted(ErrorCodes::SnapshotUnavailable, - str::stream() << "Unable to read from a snapshot due to pending collection" - " catalog changes; please retry the operation."); - } - } - - if (kDebugBuild) - invariant(_isReady == isReadyInMySnapshot(opCtx)); - return _isReady; + invariant(!_shared->_accessMethod); + _shared->_accessMethod = std::move(accessMethod); + CollectionQueryInfo::computeUpdateIndexData( + this, _shared->_accessMethod.get(), &_shared->_indexedPaths); } bool IndexCatalogEntryImpl::isFrozen() const { @@ -173,12 +175,6 @@ MultikeyPaths IndexCatalogEntryImpl::getMultikeyPaths(OperationContext* opCtx, // --- -void IndexCatalogEntryImpl::setMinimumVisibleSnapshot(Timestamp newMinimumVisibleSnapshot) { - if (!_minVisibleSnapshot || (newMinimumVisibleSnapshot > _minVisibleSnapshot.value())) { - _minVisibleSnapshot = newMinimumVisibleSnapshot; - } -} - void IndexCatalogEntryImpl::setIsReady(bool newIsReady) { _isReady = newIsReady; } @@ -256,7 +252,7 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx, if (MultikeyPathTracker::get(opCtx).isTrackingMultikeyPathInfo()) { MultikeyPathTracker::get(opCtx).addMultikeyPathInfo({collection->ns(), collection->uuid(), - _descriptor->indexName(), + _descriptor.indexName(), multikeyMetadataKeys, std::move(paths)}); return; @@ -267,7 +263,7 @@ void IndexCatalogEntryImpl::setMultikey(OperationContext* opCtx, // RecordId. An attempt to write a duplicate key will therefore be ignored. if (!multikeyMetadataKeys.empty()) { uassertStatusOK(accessMethod()->asSortedData()->insertKeys( - opCtx, collection, multikeyMetadataKeys, {}, {}, nullptr)); + opCtx, collection, _descriptor.getEntry(), multikeyMetadataKeys, {}, {}, nullptr)); } // Mark the catalog as multikey, and record the multikey paths if applicable. @@ -294,7 +290,7 @@ void IndexCatalogEntryImpl::forceSetMultikey(OperationContext* const opCtx, // caller wants to upgrade this index because it knows exactly which paths are multikey. We rely // on the following function to make sure this upgrade only takes place on index types that // currently support path-level multikey path tracking. - coll->forceSetIndexIsMultikey(opCtx, _descriptor.get(), isMultikey, multikeyPaths); + coll->forceSetIndexIsMultikey(opCtx, &_descriptor, isMultikey, multikeyPaths); // Since multikey metadata has changed, invalidate the query cache. CollectionQueryInfo::get(coll).clearQueryCacheForSetMultikey(coll); @@ -317,12 +313,13 @@ Status IndexCatalogEntryImpl::_setMultikeyInMultiDocumentTransaction( // If the index is not visible within the side transaction, the index may have been created, // but not committed, in the parent transaction. Therefore, we abandon the side transaction // and set the multikey flag in the parent transaction. - if (!isPresentInMySnapshot(opCtx)) { + if (!DurableCatalog::get(opCtx)->isIndexPresent( + opCtx, _shared->_catalogId, _descriptor.indexName())) { return {ErrorCodes::SnapshotUnavailable, "index not visible in side transaction"}; } writeConflictRetry( - opCtx, "set index multikey", collection->ns().ns(), [&] { + opCtx, "set index multikey", collection->ns(), [&] { WriteUnitOfWork wuow(opCtx); // If we have a prepare optime for recovery, then we always use that. This is safe since @@ -364,8 +361,9 @@ Status IndexCatalogEntryImpl::_setMultikeyInMultiDocumentTransaction( auto msg = BSON("msg" << "Setting index to multikey" - << "coll" << collection->ns().ns() << "index" - << _descriptor->indexName()); + << "coll" + << NamespaceStringUtil::serializeForCatalog(collection->ns()) + << "index" << _descriptor.indexName()); opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage(opCtx, msg); } @@ -378,38 +376,29 @@ Status IndexCatalogEntryImpl::_setMultikeyInMultiDocumentTransaction( } std::shared_ptr IndexCatalogEntryImpl::getSharedIdent() const { - return _accessMethod ? _accessMethod->getSharedIdent() : nullptr; + return _shared->_accessMethod ? _shared->_accessMethod->getSharedIdent() : nullptr; } const Ordering& IndexCatalogEntryImpl::ordering() const { - return _descriptor->ordering(); + return _descriptor.ordering(); } void IndexCatalogEntryImpl::setIdent(std::shared_ptr newIdent) { - if (!_accessMethod) + if (!_shared->_accessMethod) return; - _accessMethod->setIdent(std::move(newIdent)); + _shared->_accessMethod->setIdent(std::move(newIdent)); } // ---- NamespaceString IndexCatalogEntryImpl::getNSSFromCatalog(OperationContext* opCtx) const { - return DurableCatalog::get(opCtx)->getEntry(_catalogId).nss; -} - -bool IndexCatalogEntryImpl::isReadyInMySnapshot(OperationContext* opCtx) const { - return DurableCatalog::get(opCtx)->isIndexReady(opCtx, _catalogId, _descriptor->indexName()); -} - -bool IndexCatalogEntryImpl::isPresentInMySnapshot(OperationContext* opCtx) const { - return DurableCatalog::get(opCtx)->isIndexPresent(opCtx, _catalogId, _descriptor->indexName()); + return DurableCatalog::get(opCtx)->getEntry(_shared->_catalogId).nss; } bool IndexCatalogEntryImpl::_catalogIsMultikey(OperationContext* opCtx, const CollectionPtr& collection, MultikeyPaths* multikeyPaths) const { - return collection->isIndexMultikey( - opCtx, _descriptor->indexName(), multikeyPaths, _indexOffset); + return collection->isIndexMultikey(opCtx, _descriptor.indexName(), multikeyPaths, _indexOffset); } void IndexCatalogEntryImpl::_catalogSetMultikey(OperationContext* opCtx, @@ -420,15 +409,15 @@ void IndexCatalogEntryImpl::_catalogSetMultikey(OperationContext* opCtx, // CollectionCatalogEntry::setIndexIsMultikey() requires that we discard the path-level // multikey information in order to avoid unintentionally setting path-level multikey // information on an index created before 3.4. - auto indexMetadataHasChanged = collection->setIndexIsMultikey( - opCtx, _descriptor->indexName(), multikeyPaths, _indexOffset); + auto indexMetadataHasChanged = + collection->setIndexIsMultikey(opCtx, _descriptor.indexName(), multikeyPaths, _indexOffset); if (indexMetadataHasChanged) { LOGV2_DEBUG(4718705, 1, "Index set to multi key, clearing query plan cache", logAttrs(collection->ns()), - "keyPattern"_attr = _descriptor->keyPattern()); + "keyPattern"_attr = _descriptor.keyPattern()); CollectionQueryInfo::get(collection).clearQueryCacheForSetMultikey(collection); } } diff --git a/src/mongo/db/catalog/index_catalog_entry_impl.h b/src/mongo/db/catalog/index_catalog_entry_impl.h index 00df93261dcbc..0488fde08fe1c 100644 --- a/src/mongo/db/catalog/index_catalog_entry_impl.h +++ b/src/mongo/db/catalog/index_catalog_entry_impl.h @@ -29,18 +29,31 @@ #pragma once +#include #include +#include +#include #include +#include "mongo/base/status.h" #include "mongo/bson/ordering.h" #include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/multikey_paths.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/record_id.h" +#include "mongo/db/storage/ident.h" +#include "mongo/db/storage/key_string.h" #include "mongo/db/update_index_data.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -52,18 +65,15 @@ class OperationContext; class ExpressionContext; class IndexCatalogEntryImpl : public IndexCatalogEntry { - IndexCatalogEntryImpl(const IndexCatalogEntryImpl&) = delete; - IndexCatalogEntryImpl& operator=(const IndexCatalogEntryImpl&) = delete; - public: IndexCatalogEntryImpl(OperationContext* opCtx, const CollectionPtr& collection, const std::string& ident, - std::unique_ptr descriptor, // ownership passes to me + IndexDescriptor&& descriptor, bool isFrozen); const std::string& getIdent() const final { - return _ident; + return _shared->_ident; } std::shared_ptr getSharedIdent() const final; @@ -71,14 +81,14 @@ class IndexCatalogEntryImpl : public IndexCatalogEntry { void setIdent(std::shared_ptr newIdent) final; IndexDescriptor* descriptor() final { - return _descriptor.get(); + return &_descriptor; } const IndexDescriptor* descriptor() const final { - return _descriptor.get(); + return &_descriptor; } IndexAccessMethod* accessMethod() const final { - return _accessMethod.get(); + return _shared->_accessMethod.get(); } void setAccessMethod(std::unique_ptr accessMethod) final; @@ -98,11 +108,11 @@ class IndexCatalogEntryImpl : public IndexCatalogEntry { const Ordering& ordering() const final; const MatchExpression* getFilterExpression() const final { - return _filterExpression.get(); + return _shared->_filterExpression.get(); } const CollatorInterface* getCollator() const final { - return _collator.get(); + return _shared->_collator.get(); } NamespaceString getNSSFromCatalog(OperationContext* opCtx) const final; @@ -114,11 +124,11 @@ class IndexCatalogEntryImpl : public IndexCatalogEntry { void setIsFrozen(bool newIsFrozen) final; void setDropped() final { - _isDropped.store(true); + _isDropped = true; } bool isDropped() const final { - return _isDropped.load(); + return _isDropped; } // -- @@ -165,32 +175,16 @@ class IndexCatalogEntryImpl : public IndexCatalogEntry { bool isMultikey, const MultikeyPaths& multikeyPaths) const final; - bool isReady(OperationContext* opCtx) const final; + bool isReady() const final { + return _isReady; + } bool isFrozen() const final; bool shouldValidateDocument() const final; - bool isPresentInMySnapshot(OperationContext* opCtx) const final; - - bool isReadyInMySnapshot(OperationContext* opCtx) const final; - - /** - * If return value is not boost::none, reads with majority read concern using an older snapshot - * must treat this index as unfinished. - */ - boost::optional getMinimumVisibleSnapshot() const final { - return _minVisibleSnapshot; - } - - /** - * Updates the minimum visible snapshot. The 'newMinimumVisibleSnapshot' is ignored if it would - * set the minimum visible snapshot backwards in time. - */ - void setMinimumVisibleSnapshot(Timestamp newMinimumVisibleSnapshot) final; - const UpdateIndexData& getIndexedPaths() const final { - return _indexedPaths; + return _shared->_indexedPaths; } private: @@ -218,39 +212,43 @@ class IndexCatalogEntryImpl : public IndexCatalogEntry { const CollectionPtr& collection, const MultikeyPaths& multikeyPaths) const; - // ----- + /** + * Holder of shared state between IndexCatalogEntryImpl clones + */ + struct SharedState : public RefCountable { + SharedState(const std::string& ident, const RecordId& catalogId) + : _ident(ident), _catalogId(catalogId) {} + + const std::string _ident; - const std::string _ident; + const RecordId _catalogId; // Location in the durable catalog of the collection entry + // containing this index entry. - std::unique_ptr _descriptor; // owned here + std::unique_ptr _accessMethod; - std::unique_ptr _accessMethod; + std::unique_ptr _collator; + std::unique_ptr _filterExpression; + + // Special ExpressionContext used to evaluate the partial filter expression. + boost::intrusive_ptr _expCtxForFilter; + + // Describes the paths indexed by this index. + UpdateIndexData _indexedPaths; + }; IndexBuildInterceptor* _indexBuildInterceptor = nullptr; // not owned here - std::unique_ptr _collator; - std::unique_ptr _filterExpression; - // Special ExpressionContext used to evaluate the partial filter expression. - boost::intrusive_ptr _expCtxForFilter; + boost::intrusive_ptr _shared; - // cached stuff + IndexDescriptor _descriptor; - const RecordId _catalogId; // Location in the durable catalog of the collection entry - // containing this index entry. - bool _isReady; // cache of NamespaceDetails info + bool _isReady; bool _isFrozen; bool _shouldValidateDocument; - AtomicWord _isDropped; // Whether the index drop is committed. + bool _isDropped; // Whether the index drop is committed. - // The earliest snapshot that is allowed to read this index. - boost::optional _minVisibleSnapshot; - - // Offset of this index within the Collection metadata. - // Used to improve lookups without having to search for the index name - // accessing the collection metadata. + // Offset of this index within the Collection metadata. Used to improve lookups without having + // to search for the index name accessing the collection metadata. int _indexOffset; - - // Describes the paths indexed by this index. - UpdateIndexData _indexedPaths; }; } // namespace mongo diff --git a/src/mongo/db/catalog/index_catalog_impl.cpp b/src/mongo/db/catalog/index_catalog_impl.cpp index 4be9e44a494e9..d8f04f72162dd 100644 --- a/src/mongo/db/catalog/index_catalog_impl.cpp +++ b/src/mongo/db/catalog/index_catalog_impl.cpp @@ -29,22 +29,42 @@ #include "mongo/db/catalog/index_catalog_impl.h" +#include +#include +#include +#include +#include +#include +#include +#include #include -#include "mongo/base/init.h" -#include "mongo/bson/simple_bsonelement_comparator.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/aggregated_index_usage_tracker.h" #include "mongo/db/audit.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/index_build_block.h" #include "mongo/db/catalog/index_catalog_entry_impl.h" #include "mongo/db/catalog/index_key_validate.h" #include "mongo/db/catalog/uncommitted_catalog_updates.h" -#include "mongo/db/client.h" -#include "mongo/db/clientcursor.h" -#include "mongo/db/curop.h" -#include "mongo/db/field_ref.h" +#include "mongo/db/collection_index_usage_tracker.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/fts/fts_spec.h" #include "mongo/db/global_settings.h" #include "mongo/db/index/index_access_method.h" @@ -52,37 +72,51 @@ #include "mongo/db/index/s2_access_method.h" #include "mongo/db/index/s2_bucket_access_method.h" #include "mongo/db/index_names.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/keypattern.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/multi_key_path_tracker.h" #include "mongo/db/operation_context.h" -#include "mongo/db/ops/delete.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collation_spec.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collection_index_usage_tracker_decoration.h" #include "mongo/db/query/collection_query_info.h" -#include "mongo/db/query/internal_plans.h" #include "mongo/db/query/query_feature_flags_gen.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl_set_member_in_standalone_mode.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/durable_catalog.h" -#include "mongo/db/storage/execution_context.h" -#include "mongo/db/storage/historical_ident_tracker.h" #include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_init.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/db/storage/storage_util.h" #include "mongo/db/ttl_collection_cache.h" #include "mongo/db/update/document_diff_calculator.h" -#include "mongo/db/vector_clock.h" +#include "mongo/db/update_index_data.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/represent_as.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/shared_buffer_fragment.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -215,7 +249,7 @@ void IndexCatalogImpl::init(OperationContext* opCtx, "spec"_attr = spec); } - auto descriptor = std::make_unique(_getAccessMethodName(keyPattern), spec); + auto descriptor = IndexDescriptor(_getAccessMethodName(keyPattern), spec); if (spec.hasField(IndexDescriptor::kExpireAfterSecondsFieldName)) { // TTL indexes with an invalid 'expireAfterSeconds' field cause problems in multiversion @@ -269,8 +303,8 @@ void IndexCatalogImpl::init(OperationContext* opCtx, // single-phase index builds are dropped during startup and rollback. auto buildUUID = collection->getIndexBuildUUID(indexName); invariant(buildUUID, - str::stream() - << "collection: " << collection->ns() << "index:" << indexName); + str::stream() << "collection: " << collection->ns().toStringForErrorMsg() + << "index:" << indexName); } // We intentionally do not drop or rebuild unfinished two-phase index builds before @@ -284,28 +318,19 @@ void IndexCatalogImpl::init(OperationContext* opCtx, auto flags = CreateIndexEntryFlags::kInitFromDisk | CreateIndexEntryFlags::kFrozen; IndexCatalogEntry* entry = createIndexEntry(opCtx, collection, std::move(descriptor), flags); - fassert(31433, !entry->isReady(opCtx)); + fassert(31433, !entry->isReady()); } else { // Initializing with unfinished indexes may occur during rollback or startup. auto flags = CreateIndexEntryFlags::kInitFromDisk; IndexCatalogEntry* entry = createIndexEntry(opCtx, collection, std::move(descriptor), flags); - fassert(4505500, !entry->isReady(opCtx)); + fassert(4505500, !entry->isReady()); } } else { auto flags = CreateIndexEntryFlags::kInitFromDisk | CreateIndexEntryFlags::kIsReady; IndexCatalogEntry* entry = createIndexEntry(opCtx, collection, std::move(descriptor), flags); - fassert(17340, entry->isReady(opCtx)); - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe() && - recoveryTs && !entry->descriptor()->isIdIndex()) { - // When initializing indexes from disk, we conservatively set the - // minimumVisibleSnapshot to non _id indexes to the recovery timestamp. The _id - // index is left visible. It's assumed if the collection is visible, it's _id is - // valid to be used. - entry->setMinimumVisibleSnapshot(recoveryTs.value()); - } + fassert(17340, entry->isReady()); } } @@ -334,7 +359,7 @@ std::unique_ptr IndexCatalogImpl::getIndexIterator( // the pointers to a new vector. The vector's ownership is passed to the iterator. The query // code path from an external client is not expected to hit this case so the cost isn't paid by // the important code path. - auto allIndexes = std::make_unique>(); + auto allIndexes = std::make_unique>(); if (inclusionPolicy & InclusionPolicy::kReady) { for (auto it = _readyIndexes.begin(); it != _readyIndexes.end(); ++it) { @@ -540,7 +565,8 @@ StatusWith IndexCatalogImpl::prepareSpecForCreate( std::vector IndexCatalogImpl::removeExistingIndexesNoChecks( OperationContext* const opCtx, const CollectionPtr& collection, - const std::vector& indexSpecsToBuild) const { + const std::vector& indexSpecsToBuild, + bool removeInProgressIndexBuilds) const { std::vector result; // Filter out ready and in-progress index builds, and any non-_id indexes if 'buildIndexes' is // set to false in the replica set's config. @@ -552,12 +578,11 @@ std::vector IndexCatalogImpl::removeExistingIndexesNoChecks( // _doesSpecConflictWithExisting currently does more work than we require here: we are only // interested in the index already exists error. + auto inclusionPolicy = removeInProgressIndexBuilds + ? IndexCatalog::InclusionPolicy::kReady | IndexCatalog::InclusionPolicy::kUnfinished + : IndexCatalog::InclusionPolicy::kReady; if (ErrorCodes::IndexAlreadyExists == - _doesSpecConflictWithExisting(opCtx, - collection, - spec, - IndexCatalog::InclusionPolicy::kReady | - IndexCatalog::InclusionPolicy::kUnfinished)) { + _doesSpecConflictWithExisting(opCtx, collection, spec, inclusionPolicy)) { continue; } @@ -586,26 +611,27 @@ std::vector IndexCatalogImpl::removeExistingIndexes( IndexCatalogEntry* IndexCatalogImpl::createIndexEntry(OperationContext* opCtx, Collection* collection, - std::unique_ptr descriptor, + IndexDescriptor&& descriptor, CreateIndexEntryFlags flags) { - Status status = _isSpecOk(opCtx, CollectionPtr(collection), descriptor->infoObj()); + invariant(!descriptor.getEntry()); + + Status status = _isSpecOk(opCtx, CollectionPtr(collection), descriptor.infoObj()); if (!status.isOK()) { LOGV2_FATAL(28782, "Found an invalid index", - "descriptor"_attr = descriptor->infoObj(), + "descriptor"_attr = descriptor.infoObj(), logAttrs(collection->ns()), "error"_attr = redact(status)); } auto engine = opCtx->getServiceContext()->getStorageEngine(); std::string ident = engine->getCatalog()->getIndexIdent( - opCtx, collection->getCatalogId(), descriptor->indexName()); + opCtx, collection->getCatalogId(), descriptor.indexName()); bool isReadyIndex = CreateIndexEntryFlags::kIsReady & flags; bool frozen = CreateIndexEntryFlags::kFrozen & flags; invariant(!frozen || !isReadyIndex); - auto* const descriptorPtr = descriptor.get(); auto entry = std::make_shared( opCtx, CollectionPtr(collection), ident, std::move(descriptor), frozen); @@ -636,7 +662,7 @@ IndexCatalogEntry* IndexCatalogImpl::createIndexEntry(OperationContext* opCtx, bool initFromDisk = CreateIndexEntryFlags::kInitFromDisk & flags; if (!initFromDisk && !UncommittedCatalogUpdates::isCreatedCollection(opCtx, collection->ns())) { - const std::string indexName = descriptorPtr->indexName(); + const std::string indexName = desc->indexName(); opCtx->recoveryUnit()->onRollback( [collectionDecorations = collection->getSharedDecorations(), indexName = std::move(indexName)](OperationContext*) { @@ -654,7 +680,8 @@ StatusWith IndexCatalogImpl::createIndexOnEmptyCollection(OperationCont invariant(collection->uuid() == collection->uuid()); CollectionCatalog::get(opCtx)->invariantHasExclusiveAccessToCollection(opCtx, collection->ns()); invariant(collection->isEmpty(opCtx), - str::stream() << "Collection must be empty. Collection: " << collection->ns() + str::stream() << "Collection must be empty. Collection: " + << collection->ns().toStringForErrorMsg() << " UUID: " << collection->uuid() << " Count (from size storer): " << collection->numRecords(opCtx)); @@ -674,7 +701,7 @@ StatusWith IndexCatalogImpl::createIndexOnEmptyCollection(OperationCont return status; // sanity checks, etc... - IndexCatalogEntry* entry = indexBuildBlock.getEntry(opCtx, collection); + IndexCatalogEntry* entry = indexBuildBlock.getWritableEntry(opCtx, collection); invariant(entry); IndexDescriptor* descriptor = entry->descriptor(); invariant(descriptor); @@ -1191,8 +1218,8 @@ Status IndexCatalogImpl::_doesSpecConflictWithExisting(OperationContext* opCtx, } if (numIndexesTotal() >= kMaxNumIndexesAllowed) { - string s = str::stream() << "add index fails, too many indexes for " << collection->ns() - << " key:" << key; + string s = str::stream() << "add index fails, too many indexes for " + << collection->ns().toStringForErrorMsg() << " key:" << key; LOGV2(20354, "Exceeded maximum number of indexes", logAttrs(collection->ns()), @@ -1265,22 +1292,24 @@ void IndexCatalogImpl::dropIndexes(OperationContext* opCtx, for (size_t i = 0; i < indexNamesToDrop.size(); i++) { string indexName = indexNamesToDrop[i]; - const IndexDescriptor* desc = findIndexByName( + IndexCatalogEntry* writableEntry = getWritableEntryByName( opCtx, indexName, IndexCatalog::InclusionPolicy::kReady | IndexCatalog::InclusionPolicy::kUnfinished | IndexCatalog::InclusionPolicy::kFrozen); - invariant(desc); - LOGV2_DEBUG(20355, 1, "\t dropAllIndexes dropping: {desc}", "desc"_attr = *desc); - IndexCatalogEntry* entry = desc->getEntry(); - invariant(entry); + invariant(writableEntry); + LOGV2_DEBUG(20355, + 1, + "\t dropAllIndexes dropping: {desc}", + "desc"_attr = *writableEntry->descriptor()); + // If the onDrop function creates an oplog entry, it should run first so that the drop is // timestamped at the same optime. if (onDropFn) { - onDropFn(desc); + onDropFn(writableEntry->descriptor()); } - invariant(dropIndexEntry(opCtx, collection, entry).isOK()); + invariant(dropIndexEntry(opCtx, collection, writableEntry).isOK()); } // verify state is sane post cleaning @@ -1317,27 +1346,12 @@ void IndexCatalogImpl::dropAllIndexes(OperationContext* opCtx, onDropFn); } -Status IndexCatalogImpl::dropIndex(OperationContext* opCtx, - Collection* collection, - const IndexDescriptor* desc) { - IndexCatalogEntry* entry = desc->getEntry(); - - if (!entry) - return Status(ErrorCodes::InternalError, "cannot find index to delete"); - - if (!entry->isReady(opCtx)) - return Status(ErrorCodes::InternalError, "cannot delete not ready index"); - - return dropIndexEntry(opCtx, collection, entry); -} - Status IndexCatalogImpl::resetUnfinishedIndexForRecovery(OperationContext* opCtx, Collection* collection, - const IndexDescriptor* desc) { + IndexCatalogEntry* entry) { invariant(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_X)); invariant(opCtx->lockState()->inAWriteUnitOfWork()); - IndexCatalogEntry* entry = desc->getEntry(); const std::string indexName = entry->descriptor()->indexName(); // Only indexes that aren't ready can be reset. @@ -1384,12 +1398,12 @@ Status IndexCatalogImpl::resetUnfinishedIndexForRecovery(OperationContext* opCtx } // Update the index entry state in preparation to rebuild the index. - if (!released->accessMethod()) { - released->setAccessMethod(IndexAccessMethod::make( - opCtx, collection->ns(), collection->getCollectionOptions(), released.get(), ident)); + if (!entry->accessMethod()) { + entry->setAccessMethod(IndexAccessMethod::make( + opCtx, collection->ns(), collection->getCollectionOptions(), entry, ident)); } - released->setIsFrozen(false); + entry->setIsFrozen(false); _buildingIndexes.add(std::move(released)); return Status::OK(); @@ -1397,13 +1411,11 @@ Status IndexCatalogImpl::resetUnfinishedIndexForRecovery(OperationContext* opCtx Status IndexCatalogImpl::dropUnfinishedIndex(OperationContext* opCtx, Collection* collection, - const IndexDescriptor* desc) { - IndexCatalogEntry* entry = desc->getEntry(); - + IndexCatalogEntry* entry) { if (!entry) return Status(ErrorCodes::InternalError, "cannot find index to delete"); - if (entry->isReady(opCtx)) + if (entry->isReady()) return Status(ErrorCodes::InternalError, "expected unfinished index, but it is ready"); return dropIndexEntry(opCtx, collection, entry); @@ -1421,12 +1433,7 @@ class IndexRemoveChange final : public RecoveryUnit::Change { _entry(std::move(entry)), _collectionDecorations(collectionDecorations) {} - void commit(OperationContext* opCtx, boost::optional commitTime) final { - if (commitTime) { - HistoricalIdentTracker::get(opCtx).recordDrop( - _entry->getIdent(), _nss, _uuid, commitTime.value()); - } - + void commit(OperationContext* opCtx, boost::optional) final { _entry->setDropped(); } @@ -1473,13 +1480,17 @@ Status IndexCatalogImpl::dropIndexEntry(OperationContext* opCtx, }(); invariant(released.get() == entry); - opCtx->recoveryUnit()->registerChange(std::make_unique( - collection->ns(), collection->uuid(), released, collection->getSharedDecorations())); + // TODO SERVER-77131: Remove index catalog entry instance in commit handler. + opCtx->recoveryUnit()->registerChange( + std::make_unique(collection->ns(), + collection->uuid(), + entry->shared_from_this(), + collection->getSharedDecorations())); CollectionQueryInfo::get(collection).rebuildIndexData(opCtx, CollectionPtr(collection)); CollectionIndexUsageTrackerDecoration::get(collection->getSharedDecorations()) .unregisterIndex(indexName); - _deleteIndexFromDisk(opCtx, collection, indexName, released); + _deleteIndexFromDisk(opCtx, collection, indexName, entry->shared_from_this()); return Status::OK(); } @@ -1514,7 +1525,7 @@ void IndexCatalogImpl::setMultikeyPaths(OperationContext* const opCtx, const IndexDescriptor* desc, const KeyStringSet& multikeyMetadataKeys, const MultikeyPaths& multikeyPaths) const { - IndexCatalogEntry* entry = desc->getEntry(); + const IndexCatalogEntry* entry = desc->getEntry(); invariant(entry); entry->setMultikey(opCtx, coll, multikeyMetadataKeys, multikeyPaths); }; @@ -1630,13 +1641,56 @@ const IndexCatalogEntry* IndexCatalogImpl::getEntry(const IndexDescriptor* desc) return entry; } -std::shared_ptr IndexCatalogImpl::getEntryShared( - const IndexDescriptor* indexDescriptor) const { - return indexDescriptor->getEntry()->shared_from_this(); +IndexCatalogEntry* IndexCatalogImpl::getWritableEntryByName(OperationContext* opCtx, + StringData name, + InclusionPolicy inclusionPolicy) { + return _getWritableEntry(findIndexByName(opCtx, name, inclusionPolicy)); } -std::shared_ptr IndexCatalogImpl::getEntryShared( - const IndexDescriptor* indexDescriptor) { +IndexCatalogEntry* IndexCatalogImpl::getWritableEntryByKeyPatternAndOptions( + OperationContext* opCtx, + const BSONObj& key, + const BSONObj& indexSpec, + InclusionPolicy inclusionPolicy) { + return _getWritableEntry( + findIndexByKeyPatternAndOptions(opCtx, key, indexSpec, inclusionPolicy)); +} + +IndexCatalogEntry* IndexCatalogImpl::_getWritableEntry(const IndexDescriptor* descriptor) { + if (!descriptor) { + return nullptr; + } + + auto getWritableEntry = [&](auto& container) -> IndexCatalogEntry* { + std::shared_ptr oldEntry = container.release(descriptor); + + // This collection instance already uniquely owns this IndexCatalogEntry, return it. + if (oldEntry.use_count() == 1) { + IndexCatalogEntry* entryToReturn = const_cast(oldEntry.get()); + container.add(std::move(oldEntry)); + return entryToReturn; + } + + std::shared_ptr writableEntry = + std::make_shared( + *static_cast(oldEntry.get())); + writableEntry->descriptor()->setEntry(writableEntry.get()); + IndexCatalogEntry* entryToReturn = writableEntry.get(); + container.add(std::move(writableEntry)); + return entryToReturn; + }; + + if (descriptor->getEntry()->isReady()) { + return getWritableEntry(_readyIndexes); + } else if (descriptor->getEntry()->isFrozen()) { + return getWritableEntry(_frozenIndexes); + } else { + return getWritableEntry(_buildingIndexes); + } +} + +std::shared_ptr IndexCatalogImpl::getEntryShared( + const IndexDescriptor* indexDescriptor) const { return indexDescriptor->getEntry()->shared_from_this(); } @@ -1654,15 +1708,20 @@ const IndexDescriptor* IndexCatalogImpl::refreshEntry(OperationContext* opCtx, const std::string indexName = oldDesc->indexName(); invariant(collection->isIndexReady(indexName)); - // Delete the IndexCatalogEntry that owns this descriptor. After deletion, 'oldDesc' is - // invalid and should not be dereferenced. Also, invalidate the index from the + // Delete the IndexCatalogEntry that owns this descriptor. After deletion, 'oldDesc' is invalid + // and should not be dereferenced. Also, invalidate the index from the // CollectionIndexUsageTrackerDecoration (shared state among Collection instances). - auto oldEntry = _readyIndexes.release(oldDesc); - invariant(oldEntry); + IndexCatalogEntry* writableEntry = _getWritableEntry(oldDesc); + invariant(writableEntry); + std::shared_ptr deletedEntry = + _readyIndexes.release(writableEntry->descriptor()); + invariant(writableEntry == deletedEntry.get()); + + // TODO SERVER-77131: Remove index catalog entry instance in commit handler. opCtx->recoveryUnit()->registerChange( std::make_unique(collection->ns(), collection->uuid(), - std::move(oldEntry), + writableEntry->shared_from_this(), collection->getSharedDecorations())); CollectionIndexUsageTrackerDecoration::get(collection->getSharedDecorations()) .unregisterIndex(indexName); @@ -1673,9 +1732,9 @@ const IndexDescriptor* IndexCatalogImpl::refreshEntry(OperationContext* opCtx, // Re-register this index in the index catalog with the new spec. Also, add the new index // to the CollectionIndexUsageTrackerDecoration (shared state among Collection instances). - auto newDesc = std::make_unique(_getAccessMethodName(keyPattern), spec); + auto newDesc = IndexDescriptor(_getAccessMethodName(keyPattern), spec); auto newEntry = createIndexEntry(opCtx, collection, std::move(newDesc), flags); - invariant(newEntry->isReady(opCtx)); + invariant(newEntry->isReady()); auto desc = newEntry->descriptor(); CollectionIndexUsageTrackerDecoration::get(collection->getSharedDecorations()) .registerIndex(desc->indexName(), @@ -1685,13 +1744,6 @@ const IndexDescriptor* IndexCatalogImpl::refreshEntry(OperationContext* opCtx, // Last rebuild index data for CollectionQueryInfo for this Collection. CollectionQueryInfo::get(collection).rebuildIndexData(opCtx, CollectionPtr(collection)); - opCtx->recoveryUnit()->onCommit( - [newEntry](OperationContext*, boost::optional commitTime) { - if (commitTime) { - newEntry->setMinimumVisibleSnapshot(*commitTime); - } - }); - // Return the new descriptor. return newEntry->descriptor(); } @@ -1704,13 +1756,13 @@ Status IndexCatalogImpl::_indexFilteredRecords(OperationContext* opCtx, const IndexCatalogEntry* index, const std::vector& bsonRecords, int64_t* keysInsertedOut) const { - SharedBufferFragmentBuilder pooledBuilder(KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + SharedBufferFragmentBuilder pooledBuilder(key_string::HeapBuilder::kHeapAllocatorDefaultBytes); InsertDeleteOptions options; prepareInsertDeleteOptions(opCtx, coll->ns(), index->descriptor(), &options); return index->accessMethod()->insert( - opCtx, pooledBuilder, coll, bsonRecords, options, keysInsertedOut); + opCtx, pooledBuilder, coll, index, bsonRecords, options, keysInsertedOut); } Status IndexCatalogImpl::_indexRecords(OperationContext* opCtx, @@ -1743,7 +1795,7 @@ Status IndexCatalogImpl::_updateRecord(OperationContext* const opCtx, const RecordId& recordId, int64_t* const keysInsertedOut, int64_t* const keysDeletedOut) const { - SharedBufferFragmentBuilder pooledBuilder(KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + SharedBufferFragmentBuilder pooledBuilder(key_string::HeapBuilder::kHeapAllocatorDefaultBytes); InsertDeleteOptions options; prepareInsertDeleteOptions(opCtx, coll->ns(), index->descriptor(), &options); @@ -1751,8 +1803,16 @@ Status IndexCatalogImpl::_updateRecord(OperationContext* const opCtx, int64_t keysInserted = 0; int64_t keysDeleted = 0; - auto status = index->accessMethod()->update( - opCtx, pooledBuilder, oldDoc, newDoc, recordId, coll, options, &keysInserted, &keysDeleted); + auto status = index->accessMethod()->update(opCtx, + pooledBuilder, + oldDoc, + newDoc, + recordId, + coll, + index, + options, + &keysInserted, + &keysDeleted); if (!status.isOK()) return status; @@ -1781,7 +1841,7 @@ void IndexCatalogImpl::_unindexRecord(OperationContext* opCtx, } } - SharedBufferFragmentBuilder pooledBuilder(KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + SharedBufferFragmentBuilder pooledBuilder(key_string::HeapBuilder::kHeapAllocatorDefaultBytes); InsertDeleteOptions options; prepareInsertDeleteOptions(opCtx, collection->ns(), entry->descriptor(), &options); @@ -1789,6 +1849,7 @@ void IndexCatalogImpl::_unindexRecord(OperationContext* opCtx, entry->accessMethod()->remove(opCtx, pooledBuilder, collection, + entry, obj, loc, logIfError, @@ -1867,9 +1928,9 @@ Status IndexCatalogImpl::indexRecords(OperationContext* opCtx, IndexCatalog::InclusionPolicy::kUnfinished); if (!idx) { return Status(ErrorCodes::IndexNotFound, - str::stream() - << "Could not find index " << newPath.indexName << " in " - << coll->ns() << " (" << coll->uuid() << ") to set to multikey."); + str::stream() << "Could not find index " << newPath.indexName << " in " + << coll->ns().toStringForErrorMsg() << " (" << coll->uuid() + << ") to set to multikey."); } setMultikeyPaths(opCtx, coll, idx, newPath.multikeyMetadataKeys, newPath.multikeyPaths); } @@ -1936,7 +1997,7 @@ void IndexCatalogImpl::unindexRecord(OperationContext* opCtx, for (IndexCatalogEntryContainer::const_iterator it = _readyIndexes.begin(); it != _readyIndexes.end(); ++it) { - IndexCatalogEntry* entry = it->get(); + const IndexCatalogEntry* entry = it->get(); bool logIfError = !noWarn; _unindexRecord( @@ -1946,10 +2007,10 @@ void IndexCatalogImpl::unindexRecord(OperationContext* opCtx, for (IndexCatalogEntryContainer::const_iterator it = _buildingIndexes.begin(); it != _buildingIndexes.end(); ++it) { - IndexCatalogEntry* entry = it->get(); + const IndexCatalogEntry* entry = it->get(); // If it's a background index, we DO NOT want to log anything. - bool logIfError = entry->isReady(opCtx) ? !noWarn : false; + bool logIfError = entry->isReady() ? !noWarn : false; _unindexRecord( opCtx, collection, entry, obj, loc, logIfError, keysDeletedOut, checkRecordId); } @@ -1959,7 +2020,7 @@ Status IndexCatalogImpl::compactIndexes(OperationContext* opCtx) const { for (IndexCatalogEntryContainer::const_iterator it = _readyIndexes.begin(); it != _readyIndexes.end(); ++it) { - IndexCatalogEntry* entry = it->get(); + const IndexCatalogEntry* entry = it->get(); LOGV2_DEBUG(20363, 1, @@ -2025,16 +2086,14 @@ void IndexCatalogImpl::prepareInsertDeleteOptions(OperationContext* opCtx, void IndexCatalogImpl::indexBuildSuccess(OperationContext* opCtx, Collection* coll, IndexCatalogEntry* index) { + // This function can be called inside of a WriteUnitOfWork, which can still encounter a write + // conflict. We don't need to reset any in-memory state as a new writable collection is fetched + // when retrying. auto releasedEntry = _buildingIndexes.release(index->descriptor()); invariant(releasedEntry.get() == index); _readyIndexes.add(std::move(releasedEntry)); - // Wait to unset the interceptor until the index actually commits. If a write conflict is - // encountered and the index commit process is restated, the multikey information from the - // interceptor may still be needed. - opCtx->recoveryUnit()->onCommit([index](OperationContext*, boost::optional) { - index->setIndexBuildInterceptor(nullptr); - }); + index->setIndexBuildInterceptor(nullptr); index->setIsReady(true); } diff --git a/src/mongo/db/catalog/index_catalog_impl.h b/src/mongo/db/catalog/index_catalog_impl.h index acedcfd85acd1..8da88018ac635 100644 --- a/src/mongo/db/catalog/index_catalog_impl.h +++ b/src/mongo/db/catalog/index_catalog_impl.h @@ -29,17 +29,33 @@ #pragma once +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/index_catalog.h" - #include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_build_interceptor.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/multikey_paths.h" #include "mongo/db/jsobj.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/resumable_index_builds_gen.h" #include "mongo/db/server_options.h" +#include "mongo/db/storage/key_string.h" #include "mongo/db/storage/record_store.h" namespace mongo { @@ -47,7 +63,6 @@ namespace mongo { class Client; class Collection; class CollectionPtr; - class IndexDescriptor; struct InsertDeleteOptions; @@ -117,9 +132,18 @@ class IndexCatalogImpl : public IndexCatalog { const IndexCatalogEntry* getEntry(const IndexDescriptor* desc) const override; - std::shared_ptr getEntryShared(const IndexDescriptor*) const override; + IndexCatalogEntry* getWritableEntryByName( + OperationContext* opCtx, + StringData name, + InclusionPolicy inclusionPolicy = InclusionPolicy::kReady) override; - std::shared_ptr getEntryShared(const IndexDescriptor*) override; + IndexCatalogEntry* getWritableEntryByKeyPatternAndOptions( + OperationContext* opCtx, + const BSONObj& key, + const BSONObj& indexSpec, + InclusionPolicy inclusionPolicy = InclusionPolicy::kReady) override; + + std::shared_ptr getEntryShared(const IndexDescriptor*) const override; std::vector> getAllReadyEntriesShared() const override; @@ -129,7 +153,7 @@ class IndexCatalogImpl : public IndexCatalog { IndexCatalogEntry* createIndexEntry(OperationContext* opCtx, Collection* collection, - std::unique_ptr descriptor, + IndexDescriptor&& descriptor, CreateIndexEntryFlags flags) override; StatusWith createIndexOnEmptyCollection(OperationContext* opCtx, @@ -150,7 +174,8 @@ class IndexCatalogImpl : public IndexCatalog { std::vector removeExistingIndexesNoChecks( OperationContext* opCtx, const CollectionPtr& collection, - const std::vector& indexSpecsToBuild) const override; + const std::vector& indexSpecsToBuild, + bool removeInProgressIndexBuilds) const override; void dropIndexes(OperationContext* opCtx, Collection* collection, @@ -162,17 +187,13 @@ class IndexCatalogImpl : public IndexCatalog { bool includingIdIndex, std::function onDropFn) override; - Status dropIndex(OperationContext* opCtx, - Collection* collection, - const IndexDescriptor* desc) override; - Status resetUnfinishedIndexForRecovery(OperationContext* opCtx, Collection* collection, - const IndexDescriptor* desc) override; + IndexCatalogEntry* entry) override; Status dropUnfinishedIndex(OperationContext* opCtx, Collection* collection, - const IndexDescriptor* desc) override; + IndexCatalogEntry* entry) override; Status dropIndexEntry(OperationContext* opCtx, Collection* collection, @@ -340,6 +361,13 @@ class IndexCatalogImpl : public IndexCatalog { long long numIndexesInCollectionCatalogEntry, const std::vector& indexNamesToDrop); + /** + * Returns a writable IndexCatalogEntry copy that will be returned by current and future calls + * to this function. Any previous IndexCatalogEntry/IndexDescriptor pointers that were returned + * may be invalidated. + */ + IndexCatalogEntry* _getWritableEntry(const IndexDescriptor* descriptor); + IndexCatalogEntryContainer _readyIndexes; IndexCatalogEntryContainer _buildingIndexes; IndexCatalogEntryContainer _frozenIndexes; diff --git a/src/mongo/db/catalog/index_consistency.cpp b/src/mongo/db/catalog/index_consistency.cpp index 6bc7a834deb44..b6fba9ae87fae 100644 --- a/src/mongo/db/catalog/index_consistency.cpp +++ b/src/mongo/db/catalog/index_consistency.cpp @@ -28,27 +28,69 @@ */ -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/storage/key_string.h" #include - -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog/index_consistency.h" - +#include +#include +#include +#include +#include +#include +#include + +#include +// IWYU pragma: no_include "boost/container/detail/flat_tree.hpp" +#include +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/algorithm.hpp" +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +// IWYU pragma: no_include "boost/move/algo/detail/set_difference.hpp" +#include +// IWYU pragma: no_include "boost/move/detail/iterator_to_raw_pointer.hpp" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_consistency.h" #include "mongo/db/catalog/index_repair.h" #include "mongo/db/catalog/validate_gen.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" #include "mongo/db/multi_key_path_tracker.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/record_id_helpers.h" #include "mongo/db/storage/execution_context.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/shared_buffer_fragment.h" +#include "mongo/util/str.h" #include "mongo/util/string_map.h" #include "mongo/util/testing_proctor.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -77,7 +119,7 @@ StringSet::hasher hash; * To distinguish these as different index keys, return a pair of index name and index key. */ std::pair _generateKeyForMap(const IndexInfo& indexInfo, - const KeyString::Value& ks) { + const key_string::Value& ks) { return std::make_pair(indexInfo.indexName, std::string(ks.getBuffer(), ks.getSize())); } @@ -102,18 +144,18 @@ BSONObj _rehydrateKey(const BSONObj& keyPattern, const BSONObj& indexKey) { } // namespace -IndexInfo::IndexInfo(const IndexDescriptor* descriptor, IndexAccessMethod* indexAccessMethod) +IndexInfo::IndexInfo(const IndexDescriptor* descriptor) : indexName(descriptor->indexName()), keyPattern(descriptor->keyPattern()), indexNameHash(hash(descriptor->indexName())), ord(Ordering::make(descriptor->keyPattern())), unique(descriptor->unique()), - accessMethod(indexAccessMethod) {} + accessMethod(descriptor->getEntry()->accessMethod()) {} IndexEntryInfo::IndexEntryInfo(const IndexInfo& indexInfo, RecordId entryRecordId, BSONObj entryIdKey, - KeyString::Value entryKeyString) + key_string::Value entryKeyString) : indexName(indexInfo.indexName), keyPattern(indexInfo.keyPattern), ord(indexInfo.ord), @@ -138,14 +180,14 @@ KeyStringIndexConsistency::KeyStringIndexConsistency( CollectionValidation::ValidateState* validateState, const size_t numHashBuckets) : IndexConsistency(opCtx, validateState, numHashBuckets) { - for (const auto& index : _validateState->getIndexes()) { - const auto descriptor = index->descriptor(); - IndexAccessMethod* accessMethod = const_cast(index->accessMethod()); - _indexesInfo.emplace(descriptor->indexName(), IndexInfo(descriptor, accessMethod)); + for (const auto& indexIdent : _validateState->getIndexIdents()) { + const IndexDescriptor* descriptor = + validateState->getCollection()->getIndexCatalog()->findIndexByIdent(opCtx, indexIdent); + _indexesInfo.emplace(descriptor->indexName(), IndexInfo(descriptor)); } } -void KeyStringIndexConsistency::addMultikeyMetadataPath(const KeyString::Value& ks, +void KeyStringIndexConsistency::addMultikeyMetadataPath(const key_string::Value& ks, IndexInfo* indexInfo) { auto hash = _hashKeyString(ks, indexInfo->indexNameHash); if (MONGO_unlikely(_validateState->logDiagnostics())) { @@ -157,7 +199,7 @@ void KeyStringIndexConsistency::addMultikeyMetadataPath(const KeyString::Value& indexInfo->hashedMultikeyMetadataPaths.emplace(hash); } -void KeyStringIndexConsistency::removeMultikeyMetadataPath(const KeyString::Value& ks, +void KeyStringIndexConsistency::removeMultikeyMetadataPath(const key_string::Value& ks, IndexInfo* indexInfo) { auto hash = _hashKeyString(ks, indexInfo->indexNameHash); if (MONGO_unlikely(_validateState->logDiagnostics())) { @@ -198,26 +240,17 @@ bool KeyStringIndexConsistency::haveEntryMismatch() const { void KeyStringIndexConsistency::repairIndexEntries(OperationContext* opCtx, ValidateResults* results) { - invariant(_validateState->getIndexes().size() > 0); - std::shared_ptr index = _validateState->getIndexes().front(); + invariant(_validateState->getIndexIdents().size() > 0); for (auto it = _missingIndexEntries.begin(); it != _missingIndexEntries.end();) { - const KeyString::Value& ks = it->second.keyString; + const key_string::Value& ks = it->second.keyString; const KeyFormat keyFormat = _validateState->getCollection()->getRecordStore()->keyFormat(); const std::string& indexName = it->first.first; - if (indexName != index->descriptor()->indexName()) { - // Assuming that _missingIndexEntries is sorted by indexName, this lookup should not - // happen often. - for (const auto& currIndex : _validateState->getIndexes()) { - if (currIndex->descriptor()->indexName() == indexName) { - index = currIndex; - break; - } - } - } - + const IndexDescriptor* descriptor = + _validateState->getCollection()->getIndexCatalog()->findIndexByName(opCtx, indexName); + const IndexCatalogEntry* entry = descriptor->getEntry(); int64_t numInserted = index_repair::repairMissingIndexEntry(opCtx, - index, + entry, ks, keyFormat, _validateState->nss(), @@ -241,7 +274,7 @@ void KeyStringIndexConsistency::repairIndexEntries(OperationContext* opCtx, << results->numDocumentsMovedToLostAndFound + results->numOutdatedMissingIndexEntry << " missing index entries. Removed documents can be found in '" - << lostAndFoundNss.ns() << "'."); + << lostAndFoundNss.toStringForErrorMsg() << "'."); } } @@ -280,9 +313,9 @@ void KeyStringIndexConsistency::addIndexEntryErrors(OperationContext* opCtx, bool first = true; for (const auto& missingIndexEntry : missingIndexEntriesBySize) { const IndexEntryInfo& entryInfo = missingIndexEntry->second; - KeyString::Value ks = entryInfo.keyString; + key_string::Value ks = entryInfo.keyString; auto indexKey = - KeyString::toBsonSafe(ks.getBuffer(), ks.getSize(), entryInfo.ord, ks.getTypeBits()); + key_string::toBsonSafe(ks.getBuffer(), ks.getSize(), entryInfo.ord, ks.getTypeBits()); const BSONObj entry = _generateInfo(entryInfo.indexName, entryInfo.keyPattern, entryInfo.recordId, @@ -397,7 +430,7 @@ void KeyStringIndexConsistency::addDocumentMultikeyPaths(IndexInfo* indexInfo, } void KeyStringIndexConsistency::addDocKey(OperationContext* opCtx, - const KeyString::Value& ks, + const key_string::Value& ks, IndexInfo* indexInfo, const RecordId& recordId, ValidateResults* results) { @@ -422,9 +455,9 @@ void KeyStringIndexConsistency::addDocKey(OperationContext* opCtx, "hashUpper"_attr = hashUpper, "hashLower"_attr = hashLower); const BSONObj& keyPatternBson = indexInfo->keyPattern; - auto keyStringBson = KeyString::toBsonSafe( + auto keyStringBson = key_string::toBsonSafe( ks.getBuffer(), ks.getSize(), indexInfo->ord, ks.getTypeBits()); - KeyString::logKeyString( + key_string::logKeyString( recordId, ks, keyPatternBson, keyStringBson, "[validate](record)"); } } else if (lower.indexKeyCount || upper.indexKeyCount) { @@ -450,7 +483,8 @@ void KeyStringIndexConsistency::addDocKey(OperationContext* opCtx, } void KeyStringIndexConsistency::addIndexKey(OperationContext* opCtx, - const KeyString::Value& ks, + const IndexCatalogEntry* entry, + const key_string::Value& ks, IndexInfo* indexInfo, const RecordId& recordId, ValidateResults* results) { @@ -475,9 +509,9 @@ void KeyStringIndexConsistency::addIndexKey(OperationContext* opCtx, "hashUpper"_attr = hashUpper, "hashLower"_attr = hashLower); const BSONObj& keyPatternBson = indexInfo->keyPattern; - auto keyStringBson = KeyString::toBsonSafe( + auto keyStringBson = key_string::toBsonSafe( ks.getBuffer(), ks.getSize(), indexInfo->ord, ks.getTypeBits()); - KeyString::logKeyString( + key_string::logKeyString( recordId, ks, keyPatternBson, keyStringBson, "[validate](index)"); } } else if (lower.indexKeyCount || upper.indexKeyCount) { @@ -486,7 +520,7 @@ void KeyStringIndexConsistency::addIndexKey(OperationContext* opCtx, // the '_missingIndexEntries' map. However if there was no document key for the index entry // key, we add the key to the '_extraIndexEntries' map. auto indexKey = - KeyString::toBsonSafe(ks.getBuffer(), ks.getSize(), indexInfo->ord, ks.getTypeBits()); + key_string::toBsonSafe(ks.getBuffer(), ks.getSize(), indexInfo->ord, ks.getTypeBits()); BSONObj info = _generateInfo( indexInfo->indexName, indexInfo->keyPattern, recordId, indexKey, BSONObj()); @@ -497,13 +531,12 @@ void KeyStringIndexConsistency::addIndexKey(OperationContext* opCtx, InsertDeleteOptions options; options.dupsAllowed = !indexInfo->unique; int64_t numDeleted = 0; - writeConflictRetry( - opCtx, "removingExtraIndexEntries", _validateState->nss().ns(), [&] { - WriteUnitOfWork wunit(opCtx); - Status status = indexInfo->accessMethod->asSortedData()->removeKeys( - opCtx, {ks}, options, &numDeleted); - wunit.commit(); - }); + writeConflictRetry(opCtx, "removingExtraIndexEntries", _validateState->nss(), [&] { + WriteUnitOfWork wunit(opCtx); + Status status = indexInfo->accessMethod->asSortedData()->removeKeys( + opCtx, entry, {ks}, options, &numDeleted); + wunit.commit(); + }); auto& indexResults = results->indexResultsMap[indexInfo->indexName]; indexResults.keysTraversed -= numDeleted; results->numRemovedExtraIndexEntries += numDeleted; @@ -623,21 +656,12 @@ void KeyStringIndexConsistency::validateIndexKeyCount(OperationContext* opCtx, // when validating index consistency (*numRecords) -= results.keysRemovedFromRecordStore; - // Do not fail on finding too few index entries compared to collection entries when full:false. - bool hasTooFewKeys = false; - const bool noErrorOnTooFewKeys = !_validateState->isFullIndexValidation(); - if (desc->isIdIndex() && numTotalKeys != (*numRecords)) { - hasTooFewKeys = (numTotalKeys < (*numRecords)); const std::string msg = str::stream() << "number of _id index entries (" << numTotalKeys << ") does not match the number of documents in the index (" << (*numRecords) << ")"; - if (noErrorOnTooFewKeys && (numTotalKeys < (*numRecords))) { - results.warnings.push_back(msg); - } else { - results.errors.push_back(msg); - results.valid = false; - } + results.errors.push_back(msg); + results.valid = false; } // Hashed indexes may never be multikey. @@ -665,23 +689,11 @@ void KeyStringIndexConsistency::validateIndexKeyCount(OperationContext* opCtx, // index may be a full text, geo or special index plugin with different semantics. if (results.valid && !desc->isSparse() && !desc->isPartial() && !desc->isIdIndex() && desc->getAccessMethodName() == "" && numTotalKeys < (*numRecords)) { - hasTooFewKeys = true; const std::string msg = str::stream() << "index " << desc->indexName() << " is not sparse or partial, but has fewer entries (" << numTotalKeys << ") than documents in the index (" << (*numRecords) << ")"; - if (noErrorOnTooFewKeys) { - results.warnings.push_back(msg); - } else { - results.errors.push_back(msg); - results.valid = false; - } - } - - if (!_validateState->isFullIndexValidation() && hasTooFewKeys) { - const std::string warning = str::stream() - << "index " << desc->indexName() << " has fewer keys than records." - << " Please re-run the validate command with {full: true}"; - results.warnings.push_back(warning); + results.errors.push_back(msg); + results.valid = false; } } @@ -689,8 +701,8 @@ namespace { // Ensures that index entries are in increasing or decreasing order. void _validateKeyOrder(OperationContext* opCtx, const IndexCatalogEntry* index, - const KeyString::Value& currKey, - const KeyString::Value& prevKey, + const key_string::Value& currKey, + const key_string::Value& prevKey, IndexValidateResults* results) { const auto descriptor = index->descriptor(); const bool unique = descriptor->unique(); @@ -718,11 +730,11 @@ void _validateKeyOrder(OperationContext* opCtx, if (results && results->valid) { const auto bsonKey = - KeyString::toBson(currKey, Ordering::make(descriptor->keyPattern())); + key_string::toBson(currKey, Ordering::make(descriptor->keyPattern())); const auto firstRecordId = - KeyString::decodeRecordIdLongAtEnd(prevKey.getBuffer(), prevKey.getSize()); + key_string::decodeRecordIdLongAtEnd(prevKey.getBuffer(), prevKey.getSize()); const auto secondRecordId = - KeyString::decodeRecordIdLongAtEnd(currKey.getBuffer(), currKey.getSize()); + key_string::decodeRecordIdLongAtEnd(currKey.getBuffer(), currKey.getSize()); results->errors.push_back(str::stream() << "Unique index '" << descriptor->indexName() << "' has duplicate key: " << bsonKey << ", first record: " << firstRecordId @@ -739,7 +751,7 @@ int64_t KeyStringIndexConsistency::traverseIndex(OperationContext* opCtx, const IndexCatalogEntry* index, ProgressMeterHolder& _progress, ValidateResults* results) { - const auto descriptor = index->descriptor(); + const IndexDescriptor* descriptor = index->descriptor(); const auto indexName = descriptor->indexName(); auto& indexResults = results->indexResultsMap[indexName]; IndexInfo& indexInfo = this->getIndexInfo(indexName); @@ -747,13 +759,13 @@ int64_t KeyStringIndexConsistency::traverseIndex(OperationContext* opCtx, bool isFirstEntry = true; - const KeyString::Version version = + const key_string::Version version = index->accessMethod()->asSortedData()->getSortedDataInterface()->getKeyStringVersion(); - KeyString::Builder firstKeyStringBuilder( - version, BSONObj(), indexInfo.ord, KeyString::Discriminator::kExclusiveBefore); - const KeyString::Value firstKeyString = firstKeyStringBuilder.getValueCopy(); - KeyString::Value prevIndexKeyStringValue; + key_string::Builder firstKeyStringBuilder( + version, BSONObj(), indexInfo.ord, key_string::Discriminator::kExclusiveBefore); + const key_string::Value firstKeyString = firstKeyStringBuilder.getValueCopy(); + key_string::Value prevIndexKeyStringValue; // Ensure that this index has an open index cursor. const auto indexCursorIt = _validateState->getIndexCursors().find(indexName); @@ -805,7 +817,7 @@ int64_t KeyStringIndexConsistency::traverseIndex(OperationContext* opCtx, } else { try { this->addIndexKey( - opCtx, indexEntry->keyString, &indexInfo, indexEntry->loc, results); + opCtx, index, indexEntry->keyString, &indexInfo, indexEntry->loc, results); } catch (const DBException& e) { StringBuilder ss; ss << "Parsing index key for " << indexInfo.indexName << " recId " @@ -825,7 +837,16 @@ int64_t KeyStringIndexConsistency::traverseIndex(OperationContext* opCtx, if (numKeys % kInterruptIntervalNumRecords == 0) { // Periodically checks for interrupts and yields. opCtx->checkForInterrupt(); + + const std::string indexIdent = index->getIdent(); _validateState->yield(opCtx); + + // After yielding, the latest instance of the collection is fetched and can be different + // from the collection instance prior to yielding. For this reason we need to refresh + // the index entry pointer. + descriptor = _validateState->getCollection()->getIndexCatalog()->findIndexByIdent( + opCtx, indexIdent); + index = descriptor->getEntry(); } try { @@ -871,7 +892,7 @@ int64_t KeyStringIndexConsistency::traverseIndex(OperationContext* opCtx, // 2. This index was built before 3.4, and there is no multikey path information for // the index. We can effectively 'upgrade' the index so that it does not need to be // rebuilt to update this information. - writeConflictRetry(opCtx, "updateMultikeyPaths", _validateState->nss().ns(), [&]() { + writeConflictRetry(opCtx, "updateMultikeyPaths", _validateState->nss(), [&]() { WriteUnitOfWork wuow(opCtx); auto writeableIndex = const_cast(index); const bool isMultikey = true; @@ -898,7 +919,7 @@ int64_t KeyStringIndexConsistency::traverseIndex(OperationContext* opCtx, // This makes an improvement in the case that no documents make the index multikey and // the flag can be unset entirely. This may be due to a change in the data or historical // multikey bugs that have persisted incorrect multikey infomation. - writeConflictRetry(opCtx, "unsetMultikeyPaths", _validateState->nss().ns(), [&]() { + writeConflictRetry(opCtx, "unsetMultikeyPaths", _validateState->nss(), [&]() { WriteUnitOfWork wuow(opCtx); auto writeableIndex = const_cast(index); const bool isMultikey = false; @@ -927,7 +948,7 @@ void KeyStringIndexConsistency::traverseRecord(OperationContext* opCtx, const auto iam = index->accessMethod()->asSortedData(); const auto descriptor = index->descriptor(); - SharedBufferFragmentBuilder pool(KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + SharedBufferFragmentBuilder pool(key_string::HeapBuilder::kHeapAllocatorDefaultBytes); auto& executionCtx = StorageExecutionContext::get(opCtx); const auto documentKeySet = executionCtx.keys(); @@ -936,6 +957,7 @@ void KeyStringIndexConsistency::traverseRecord(OperationContext* opCtx, iam->getKeys(opCtx, coll, + index, pool, recordBson, InsertDeleteOptions::ConstraintEnforcementMode::kEnforceConstraints, @@ -957,10 +979,10 @@ void KeyStringIndexConsistency::traverseRecord(OperationContext* opCtx, "recordId"_attr = recordId, "record"_attr = redact(recordBson)); for (auto& key : *documentKeySet) { - auto indexKey = KeyString::toBsonSafe(key.getBuffer(), - key.getSize(), - iam->getSortedDataInterface()->getOrdering(), - key.getTypeBits()); + auto indexKey = key_string::toBsonSafe(key.getBuffer(), + key.getSize(), + iam->getSortedDataInterface()->getOrdering(), + key.getTypeBits()); const BSONObj rehydratedKey = _rehydrateKey(descriptor->keyPattern(), indexKey); LOGV2(7556101, "Index key for document with multikey inconsistency", @@ -972,7 +994,7 @@ void KeyStringIndexConsistency::traverseRecord(OperationContext* opCtx, if (!index->isMultikey(opCtx, coll) && shouldBeMultikey) { if (_validateState->fixErrors()) { - writeConflictRetry(opCtx, "setIndexAsMultikey", coll->ns().ns(), [&] { + writeConflictRetry(opCtx, "setIndexAsMultikey", coll->ns(), [&] { WriteUnitOfWork wuow(opCtx); coll->getIndexCatalog()->setMultikeyPaths( opCtx, coll, descriptor, *multikeyMetadataKeys, *documentMultikeyPaths); @@ -982,7 +1004,7 @@ void KeyStringIndexConsistency::traverseRecord(OperationContext* opCtx, LOGV2(4614700, "Index set to multikey", "indexName"_attr = descriptor->indexName(), - "collection"_attr = coll->ns().ns()); + "collection"_attr = coll->ns()); results->warnings.push_back(str::stream() << "Index " << descriptor->indexName() << " set to multikey."); results->repaired = true; @@ -1009,7 +1031,7 @@ void KeyStringIndexConsistency::traverseRecord(OperationContext* opCtx, const MultikeyPaths& indexPaths = index->getMultikeyPaths(opCtx, coll); if (!MultikeyPathTracker::covers(indexPaths, *documentMultikeyPaths.get())) { if (_validateState->fixErrors()) { - writeConflictRetry(opCtx, "increaseMultikeyPathCoverage", coll->ns().ns(), [&] { + writeConflictRetry(opCtx, "increaseMultikeyPathCoverage", coll->ns(), [&] { WriteUnitOfWork wuow(opCtx); coll->getIndexCatalog()->setMultikeyPaths( opCtx, coll, descriptor, *multikeyMetadataKeys, *documentMultikeyPaths); @@ -1019,7 +1041,7 @@ void KeyStringIndexConsistency::traverseRecord(OperationContext* opCtx, LOGV2(4614701, "Multikey paths updated to cover multikey document", "indexName"_attr = descriptor->indexName(), - "collection"_attr = coll->ns().ns()); + "collection"_attr = coll->ns()); results->warnings.push_back(str::stream() << "Index " << descriptor->indexName() << " multikey paths updated."); results->repaired = true; @@ -1079,7 +1101,7 @@ BSONObj KeyStringIndexConsistency::_generateInfo(const std::string& indexName, return infoBuilder.obj(); } -uint32_t KeyStringIndexConsistency::_hashKeyString(const KeyString::Value& ks, +uint32_t KeyStringIndexConsistency::_hashKeyString(const key_string::Value& ks, const uint32_t indexNameHash) const { return ks.hash(indexNameHash); } diff --git a/src/mongo/db/catalog/index_consistency.h b/src/mongo/db/catalog/index_consistency.h index e4322ab4569ca..8945bc5612318 100644 --- a/src/mongo/db/catalog/index_consistency.h +++ b/src/mongo/db/catalog/index_consistency.h @@ -29,8 +29,26 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/ordering.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog/throttle_cursor.h" +#include "mongo/db/catalog/validate_results.h" #include "mongo/db/catalog/validate_state.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/key_string.h" #include "mongo/util/progress_meter.h" @@ -42,7 +60,7 @@ class IndexDescriptor; * Contains all the index information and stats throughout the validation. */ struct IndexInfo { - IndexInfo(const IndexDescriptor* descriptor, IndexAccessMethod* indexAccessMethod); + IndexInfo(const IndexDescriptor* descriptor); // Index name. const std::string indexName; // Contains the indexes key pattern. @@ -66,7 +84,7 @@ struct IndexInfo { // Indicates whether key entries must be unique. const bool unique; // Index access method pointer. - IndexAccessMethod* accessMethod; + const IndexAccessMethod* accessMethod; }; /** @@ -76,13 +94,13 @@ struct IndexEntryInfo { IndexEntryInfo(const IndexInfo& indexInfo, RecordId entryRecordId, BSONObj entryIdKey, - KeyString::Value entryKeyString); + key_string::Value entryKeyString); const std::string indexName; const BSONObj keyPattern; const Ordering ord; RecordId recordId; BSONObj idKey; - KeyString::Value keyString; + key_string::Value keyString; }; @@ -248,7 +266,7 @@ class KeyStringIndexConsistency final : protected IndexConsistency { * inconsistent hash buckets during the first phase of validation. */ void addDocKey(OperationContext* opCtx, - const KeyString::Value& ks, + const key_string::Value& ks, IndexInfo* indexInfo, const RecordId& recordId, ValidateResults* results); @@ -260,7 +278,8 @@ class KeyStringIndexConsistency final : protected IndexConsistency { * inconsistent hash buckets during the first phase of validation to document keys. */ void addIndexKey(OperationContext* opCtx, - const KeyString::Value& ks, + const IndexCatalogEntry* entry, + const key_string::Value& ks, IndexInfo* indexInfo, const RecordId& recordId, ValidateResults* results); @@ -276,8 +295,8 @@ class KeyStringIndexConsistency final : protected IndexConsistency { * entries and remove any path encountered. As we expect the index to contain a super-set of * the collection paths, a non-empty set represents an invalid index. */ - void addMultikeyMetadataPath(const KeyString::Value& ks, IndexInfo* indexInfo); - void removeMultikeyMetadataPath(const KeyString::Value& ks, IndexInfo* indexInfo); + void addMultikeyMetadataPath(const key_string::Value& ks, IndexInfo* indexInfo); + void removeMultikeyMetadataPath(const key_string::Value& ks, IndexInfo* indexInfo); size_t getMultikeyMetadataPathCount(IndexInfo* indexInfo); /** @@ -301,7 +320,7 @@ class KeyStringIndexConsistency final : protected IndexConsistency { /** * Returns a hashed value from the given KeyString and index namespace. */ - uint32_t _hashKeyString(const KeyString::Value& ks, uint32_t indexNameHash) const; + uint32_t _hashKeyString(const key_string::Value& ks, uint32_t indexNameHash) const; /** * Prints the collection document's and index entry's metadata. diff --git a/src/mongo/db/catalog/index_key_validate.cpp b/src/mongo/db/catalog/index_key_validate.cpp index 38dd3153a921a..8dde9fd9551c6 100644 --- a/src/mongo/db/catalog/index_key_validate.cpp +++ b/src/mongo/db/catalog/index_key_validate.cpp @@ -28,17 +28,36 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog/index_key_validate.h" - -#include +#include #include +#include #include +#include #include - +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/index_key_validate.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/field_ref.h" #include "mongo/db/index/column_key_generator.h" #include "mongo/db/index/columns_access_method.h" @@ -46,17 +65,26 @@ #include "mongo/db/index/wildcard_key_generator.h" #include "mongo/db/index/wildcard_validation.h" #include "mongo/db/index_names.h" -#include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/query_feature_flags_gen.h" -#include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/service_context.h" +#include "mongo/db/server_options.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/represent_as.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -135,7 +163,7 @@ BSONObj buildRepairedIndexSpec( Status validateKeyPattern(const BSONObj& key, IndexDescriptor::IndexVersion indexVersion, - bool inCollValidation) { + bool checkFCV) { const ErrorCodes::Error code = ErrorCodes::CannotCreateIndex; if (key.objsize() > 2048) @@ -158,7 +186,7 @@ Status validateKeyPattern(const BSONObj& key, // still be able to use the feature even if the FCV is downgraded. auto compoundWildcardIndexesAllowed = feature_flags::gFeatureFlagCompoundWildcardIndexes.isEnabledAndIgnoreFCVUnsafe(); - if (serverGlobalParams.featureCompatibility.isVersionInitialized() && !inCollValidation) { + if (serverGlobalParams.featureCompatibility.isVersionInitialized() && checkFCV) { compoundWildcardIndexesAllowed = feature_flags::gFeatureFlagCompoundWildcardIndexes.isEnabled( serverGlobalParams.featureCompatibility); @@ -336,7 +364,7 @@ BSONObj repairIndexSpec(const NamespaceString& ns, StatusWith validateIndexSpec(OperationContext* opCtx, const BSONObj& indexSpec, - bool inCollValidation) { + bool checkFCV) { bool hasKeyPatternField = false; bool hasIndexNameField = false; bool hasNamespaceField = false; @@ -389,8 +417,8 @@ StatusWith validateIndexSpec(OperationContext* opCtx, // Here we always validate the key pattern according to the most recent rules, in order // to enforce that all new indexes have well-formed key patterns. - Status keyPatternValidateStatus = validateKeyPattern( - keyPattern, IndexDescriptor::kLatestIndexVersion, inCollValidation); + Status keyPatternValidateStatus = + validateKeyPattern(keyPattern, IndexDescriptor::kLatestIndexVersion, checkFCV); if (!keyPatternValidateStatus.isOK()) { return keyPatternValidateStatus; } @@ -738,10 +766,8 @@ StatusWith validateIndexSpec(OperationContext* opCtx, } if (hasOriginalSpecField) { - StatusWith modifiedOriginalSpec = - validateIndexSpec(opCtx, - indexSpec.getObjectField(IndexDescriptor::kOriginalSpecFieldName), - inCollValidation); + StatusWith modifiedOriginalSpec = validateIndexSpec( + opCtx, indexSpec.getObjectField(IndexDescriptor::kOriginalSpecFieldName), checkFCV); if (!modifiedOriginalSpec.isOK()) { return modifiedOriginalSpec.getStatus(); } @@ -997,13 +1023,15 @@ bool isIndexAllowedInAPIVersion1(const IndexDescriptor& indexDesc) { !indexDesc.isSparse(); } -BSONObj parseAndValidateIndexSpecs(OperationContext* opCtx, const BSONObj& indexSpecObj) { +BSONObj parseAndValidateIndexSpecs(OperationContext* opCtx, + const BSONObj& indexSpecObj, + bool checkFCV) { constexpr auto k_id_ = "_id_"_sd; constexpr auto kStar = "*"_sd; BSONObj parsedIndexSpec = indexSpecObj; - auto indexSpecStatus = index_key_validate::validateIndexSpec(opCtx, parsedIndexSpec); + auto indexSpecStatus = index_key_validate::validateIndexSpec(opCtx, parsedIndexSpec, checkFCV); uassertStatusOK(indexSpecStatus.getStatus().withContext( str::stream() << "Error in specification " << parsedIndexSpec.toString())); diff --git a/src/mongo/db/catalog/index_key_validate.h b/src/mongo/db/catalog/index_key_validate.h index 67ec14be31d6f..575d313460ba8 100644 --- a/src/mongo/db/catalog/index_key_validate.h +++ b/src/mongo/db/catalog/index_key_validate.h @@ -29,9 +29,21 @@ #pragma once +#include #include - +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/util/duration.h" namespace mongo { class BSONObj; @@ -79,32 +91,36 @@ static std::set allowedFieldNames = { IndexDescriptor::kPrepareUniqueFieldName, IndexDescriptor::kColumnStoreCompressorFieldName, // Index creation under legacy writeMode can result in an index spec with an _id field. - "_id"}; + "_id", + // TODO SERVER-76108: Field names are not validated to match index type. This was used for the + // removed 'geoHaystack' index type, but users could have set it for other index types as well. + // We need to keep allowing it until FCV upgrade is implemented to clean this up. + "bucketSize"_sd, +}; /** * Checks if the key is valid for building an index according to the validation rules for the given - * index version. If 'inCollValidation' is true we skip checking FCV for compound wildcard indexes - * validation. + * index version. If 'checkFCV' is true we will check FCV for compound wildcard indexes validation. * - * TODO SERVER-68303: Consider removing 'inCollValidation' flag when 'CompoundWildcardIndexes' + * TODO SERVER-68303: Consider removing 'checkFCV' flag when 'CompoundWildcardIndexes' * feature flag is removed. */ Status validateKeyPattern(const BSONObj& key, IndexDescriptor::IndexVersion indexVersion, - bool inCollValidation = false); + bool checkFCV = false); /** * Validates the index specification 'indexSpec' and returns an equivalent index specification that * has any missing attributes filled in. If the index specification is malformed, then an error - * status is returned. If 'inCollValidation' is true we skip checking FCV for compound wildcard - * indexes validation. + * status is returned. If 'checkFCV' is true we will check FCV for compound wildcard indexes + * validation. * - * TODO SERVER-68303: Consider removing 'inCollValidation' flag when 'CompoundWildcardIndexes' + * TODO SERVER-68303: Consider removing 'checkFCV' flag when 'CompoundWildcardIndexes' * feature flag is removed. */ StatusWith validateIndexSpec(OperationContext* opCtx, const BSONObj& indexSpec, - bool inCollValidation = false); + bool checkFCV = false); /** * Returns a new index spec with any unknown field names removed from 'indexSpec'. @@ -171,9 +187,13 @@ bool isIndexAllowedInAPIVersion1(const IndexDescriptor& indexDesc); /** * Parses the index specifications from 'indexSpecObj', validates them, and returns equivalent index * specifications that have any missing attributes filled in. If any index specification is - * malformed, then an error status is returned. + * malformed, then an error status is returned. If 'checkFCV' is true we should validate the index + * spec taking into account the FCV value. Some certain type of index cannot be created with + * downgraded FCV but can be continuously used if it's already created before FCV downgrade. */ -BSONObj parseAndValidateIndexSpecs(OperationContext* opCtx, const BSONObj& indexSpecObj); +BSONObj parseAndValidateIndexSpecs(OperationContext* opCtx, + const BSONObj& indexSpecObj, + bool checkFCV); /** * Optional filtering function to adjust allowed index field names at startup. diff --git a/src/mongo/db/catalog/index_key_validate_test.cpp b/src/mongo/db/catalog/index_key_validate_test.cpp index 2becd45689782..9353905e390f7 100644 --- a/src/mongo/db/catalog/index_key_validate_test.cpp +++ b/src/mongo/db/catalog/index_key_validate_test.cpp @@ -27,19 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog/index_key_validate.h" - +#include +#include +#include #include +#include +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/db/catalog/index_key_validate.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/query/query_knobs_gen.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/catalog/index_repair.cpp b/src/mongo/db/catalog/index_repair.cpp index 96fca0781dc58..c4d32732e3b70 100644 --- a/src/mongo/db/catalog/index_repair.cpp +++ b/src/mongo/db/catalog/index_repair.cpp @@ -28,14 +28,44 @@ */ #include "mongo/db/catalog/index_repair.h" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/catalog/collection_yield_restore.h" -#include "mongo/db/catalog/validate_state.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/index/index_access_method.h" -#include "mongo/logv2/log_debug.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { namespace index_repair { @@ -47,28 +77,32 @@ StatusWith moveRecordToLostAndFound(OperationContext* opCtx, AutoGetCollection autoColl(opCtx, lostAndFoundNss, MODE_IX); auto catalog = CollectionCatalog::get(opCtx); auto originalCollection = catalog->lookupCollectionByNamespace(opCtx, nss); - CollectionPtr localCollection(catalog->lookupCollectionByNamespace(opCtx, lostAndFoundNss)); + + auto localCollection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(lostAndFoundNss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); // Creates the collection if it doesn't exist. - if (!localCollection) { + if (!localCollection.exists()) { Status status = - writeConflictRetry(opCtx, "createLostAndFoundCollection", lostAndFoundNss.ns(), [&]() { + writeConflictRetry(opCtx, "createLostAndFoundCollection", lostAndFoundNss, [&]() { // Ensure the database exists. auto db = autoColl.ensureDbExists(opCtx); - invariant(db, lostAndFoundNss.ns()); + invariant(db, lostAndFoundNss.toStringForErrorMsg()); WriteUnitOfWork wuow(opCtx); + ScopedLocalCatalogWriteFence scopedLocalCatalogWriteFence(opCtx, &localCollection); // Since we are potentially deleting a document with duplicate _id values, we need // to be able to insert into the lost and found collection without generating any // duplicate key errors on the _id value. CollectionOptions collOptions; collOptions.setNoIdIndex(); - localCollection = - CollectionPtr(db->createCollection(opCtx, lostAndFoundNss, collOptions)); - - // Ensure the collection exists. - invariant(localCollection, lostAndFoundNss.ns()); + db->createCollection(opCtx, lostAndFoundNss, collOptions); wuow.commit(); return Status::OK(); @@ -78,10 +112,11 @@ StatusWith moveRecordToLostAndFound(OperationContext* opCtx, } } - localCollection.makeYieldable(opCtx, LockedCollectionYieldRestore(opCtx, localCollection)); + // Ensure the collection exists. + invariant(localCollection.exists(), lostAndFoundNss.toStringForErrorMsg()); return writeConflictRetry( - opCtx, "writeDupDocToLostAndFoundCollection", nss.ns(), [&]() -> StatusWith { + opCtx, "writeDupDocToLostAndFoundCollection", nss, [&]() -> StatusWith { WriteUnitOfWork wuow(opCtx); Snapshotted doc; int docSize = 0; @@ -94,7 +129,7 @@ StatusWith moveRecordToLostAndFound(OperationContext* opCtx, // Write document to lost_and_found collection and delete from original collection. Status status = collection_internal::insertDocument( - opCtx, localCollection, InsertStatement(doc.value()), nullptr); + opCtx, localCollection.getCollectionPtr(), InsertStatement(doc.value()), nullptr); if (!status.isOK()) { return status; } @@ -118,8 +153,8 @@ StatusWith moveRecordToLostAndFound(OperationContext* opCtx, } int repairMissingIndexEntry(OperationContext* opCtx, - std::shared_ptr& index, - const KeyString::Value& ks, + const IndexCatalogEntry* index, + const key_string::Value& ks, const KeyFormat& keyFormat, const NamespaceString& nss, const CollectionPtr& coll, @@ -130,11 +165,12 @@ int repairMissingIndexEntry(OperationContext* opCtx, int64_t numInserted = 0; Status insertStatus = Status::OK(); - writeConflictRetry(opCtx, "insertingMissingIndexEntries", nss.ns(), [&] { + writeConflictRetry(opCtx, "insertingMissingIndexEntries", nss, [&] { WriteUnitOfWork wunit(opCtx); insertStatus = accessMethod->insertKeysAndUpdateMultikeyPaths(opCtx, coll, + index, {ks}, {}, {}, @@ -160,10 +196,10 @@ int repairMissingIndexEntry(OperationContext* opCtx, RecordId rid; if (keyFormat == KeyFormat::Long) { - rid = KeyString::decodeRecordIdLongAtEnd(ks.getBuffer(), ks.getSize()); + rid = key_string::decodeRecordIdLongAtEnd(ks.getBuffer(), ks.getSize()); } else { invariant(keyFormat == KeyFormat::String); - rid = KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()); + rid = key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()); } auto dupKeyInfo = insertStatus.extraInfo(); @@ -195,10 +231,10 @@ int repairMissingIndexEntry(OperationContext* opCtx, // duplicate records is in the index, so we need to add the newer record to the // index. if (dupKeyRid && ridToMove == *dupKeyRid) { - writeConflictRetry(opCtx, "insertingMissingIndexEntries", nss.ns(), [&] { + writeConflictRetry(opCtx, "insertingMissingIndexEntries", nss, [&] { WriteUnitOfWork wunit(opCtx); insertStatus = accessMethod->insertKeysAndUpdateMultikeyPaths( - opCtx, coll, {ks}, {}, {}, options, nullptr, nullptr); + opCtx, coll, index, {ks}, {}, {}, options, nullptr, nullptr); wunit.commit(); }); if (!insertStatus.isOK()) { @@ -208,7 +244,7 @@ int repairMissingIndexEntry(OperationContext* opCtx, } } else { results->errors.push_back(str::stream() << "unable to move record " << rid << " to " - << lostAndFoundNss.ns()); + << lostAndFoundNss.toStringForErrorMsg()); } } else { // If the missing index entry does not exist in the record store, then it has diff --git a/src/mongo/db/catalog/index_repair.h b/src/mongo/db/catalog/index_repair.h index 493f3943b65fe..ef7ad2cd5f1f8 100644 --- a/src/mongo/db/catalog/index_repair.h +++ b/src/mongo/db/catalog/index_repair.h @@ -30,8 +30,15 @@ #pragma once #include "mongo/base/status_with.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog/validate_results.h" #include "mongo/db/catalog/validate_state.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/key_string.h" namespace mongo { namespace index_repair { @@ -52,8 +59,8 @@ StatusWith moveRecordToLostAndFound(OperationContext* opCtx, * in a local lost and found collection. */ int repairMissingIndexEntry(OperationContext* opCtx, - std::shared_ptr& index, - const KeyString::Value& ks, + const IndexCatalogEntry* index, + const key_string::Value& ks, const KeyFormat& keyFormat, const NamespaceString& nss, const CollectionPtr& coll, diff --git a/src/mongo/db/catalog/index_signature_test.cpp b/src/mongo/db/catalog/index_signature_test.cpp index 8a3851c9da021..552a14a6f3baf 100644 --- a/src/mongo/db/catalog/index_signature_test.cpp +++ b/src/mongo/db/catalog/index_signature_test.cpp @@ -27,16 +27,43 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/index_catalog_entry_impl.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/query/collection_query_info.h" +#include "mongo/db/index_names.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -346,12 +373,17 @@ TEST_F(IndexSignatureTest, // signature. auto* wildcardIndex = unittest::assertGet(createIndex(fromjson("{v: 2, name: 'wc_all', key: {'$**': 1}}"))); + const std::string wildcardIndexIdent = wildcardIndex->getIdent(); + + auto getWildcardIndex = [&]() -> const IndexCatalogEntry* { + return coll()->getIndexCatalog()->findIndexByIdent(opCtx(), wildcardIndexIdent)->getEntry(); + }; // Verifies that another wildcard index with empty wildcardProjection compares identical // to 'wildcardIndex' after normalizing the index spec. auto anotherWcAllSpec = normalizeIndexSpec(fromjson("{v: 2, key: {'$**': 1}}")); auto anotherWcAllProjDesc = makeIndexDescriptor(anotherWcAllSpec); - ASSERT(anotherWcAllProjDesc->compareIndexOptions(opCtx(), coll()->ns(), wildcardIndex) == + ASSERT(anotherWcAllProjDesc->compareIndexOptions(opCtx(), coll()->ns(), getWildcardIndex()) == IndexDescriptor::Comparison::kIdentical); ASSERT_EQ(createIndex(anotherWcAllProjDesc->infoObj().addFields(fromjson("{name: 'wc_all'}"))), ErrorCodes::IndexAlreadyExists); @@ -361,20 +393,25 @@ TEST_F(IndexSignatureTest, // Verifies that an index with non-empty value for 'wildcardProjection' option compares // different from the base wildcard index and thus can be created. - auto wcProjADesc = - makeIndexDescriptor(normalizeIndexSpec(wildcardIndex->descriptor()->infoObj().addFields( + auto wcProjADesc = makeIndexDescriptor( + normalizeIndexSpec(getWildcardIndex()->descriptor()->infoObj().addFields( fromjson("{wildcardProjection: {a: 1}}")))); - ASSERT(wcProjADesc->compareIndexOptions(opCtx(), coll()->ns(), wildcardIndex) == + ASSERT(wcProjADesc->compareIndexOptions(opCtx(), coll()->ns(), getWildcardIndex()) == IndexDescriptor::Comparison::kDifferent); auto* wcProjAIndex = unittest::assertGet( createIndex(wcProjADesc->infoObj().addFields(fromjson("{name: 'wc_a'}")))); + const std::string wcProjAIndexIdent = wcProjAIndex->getIdent(); + + auto getWcProjAIndex = [&]() -> const IndexCatalogEntry* { + return coll()->getIndexCatalog()->findIndexByIdent(opCtx(), wcProjAIndexIdent)->getEntry(); + }; // Verifies that an index with the same value for 'wildcardProjection' option as the // wcProjAIndex compares identical. - auto anotherWcProjADesc = - makeIndexDescriptor(normalizeIndexSpec(wildcardIndex->descriptor()->infoObj().addFields( + auto anotherWcProjADesc = makeIndexDescriptor( + normalizeIndexSpec(getWildcardIndex()->descriptor()->infoObj().addFields( fromjson("{wildcardProjection: {a: 1}}")))); - ASSERT(anotherWcProjADesc->compareIndexOptions(opCtx(), coll()->ns(), wcProjAIndex) == + ASSERT(anotherWcProjADesc->compareIndexOptions(opCtx(), coll()->ns(), getWcProjAIndex()) == IndexDescriptor::Comparison::kIdentical); // Verifies that creating an index with the same value for 'wildcardProjection' option and the @@ -388,24 +425,24 @@ TEST_F(IndexSignatureTest, // Verifies that an index with a different value for 'wildcardProjection' option compares // different from the base wildcard index or 'wc_a' and thus can be created. - auto wcProjABDesc = - makeIndexDescriptor(normalizeIndexSpec(wildcardIndex->descriptor()->infoObj().addFields( + auto wcProjABDesc = makeIndexDescriptor( + normalizeIndexSpec(getWildcardIndex()->descriptor()->infoObj().addFields( fromjson("{wildcardProjection: {a: 1, b: 1}}")))); - ASSERT(wcProjABDesc->compareIndexOptions(opCtx(), coll()->ns(), wildcardIndex) == + ASSERT(wcProjABDesc->compareIndexOptions(opCtx(), coll()->ns(), getWildcardIndex()) == IndexDescriptor::Comparison::kDifferent); - ASSERT(wcProjABDesc->compareIndexOptions(opCtx(), coll()->ns(), wcProjAIndex) == + ASSERT(wcProjABDesc->compareIndexOptions(opCtx(), coll()->ns(), getWcProjAIndex()) == IndexDescriptor::Comparison::kDifferent); auto* wcProjABIndex = unittest::assertGet( createIndex(wcProjABDesc->infoObj().addFields(fromjson("{name: 'wc_a_b'}")))); // Verifies that an index with sub fields for 'wildcardProjection' option compares // different from the base wildcard index or 'wc_a' or 'wc_a_b' and thus can be created. - auto wcProjASubBCDesc = - makeIndexDescriptor(normalizeIndexSpec(wildcardIndex->descriptor()->infoObj().addFields( + auto wcProjASubBCDesc = makeIndexDescriptor( + normalizeIndexSpec(getWildcardIndex()->descriptor()->infoObj().addFields( fromjson("{wildcardProjection: {a: {b: 1, c: 1}}}")))); - ASSERT(wcProjASubBCDesc->compareIndexOptions(opCtx(), coll()->ns(), wildcardIndex) == + ASSERT(wcProjASubBCDesc->compareIndexOptions(opCtx(), coll()->ns(), getWildcardIndex()) == IndexDescriptor::Comparison::kDifferent); - ASSERT(wcProjASubBCDesc->compareIndexOptions(opCtx(), coll()->ns(), wcProjAIndex) == + ASSERT(wcProjASubBCDesc->compareIndexOptions(opCtx(), coll()->ns(), getWcProjAIndex()) == IndexDescriptor::Comparison::kDifferent); ASSERT(wcProjASubBCDesc->compareIndexOptions(opCtx(), coll()->ns(), wcProjABIndex) == IndexDescriptor::Comparison::kDifferent); @@ -413,8 +450,8 @@ TEST_F(IndexSignatureTest, createIndex(wcProjASubBCDesc->infoObj().addFields(fromjson("{name: 'wc_a_sub_b_c'}")))); // Verifies that two indexes with the same projection in different order compares identical. - auto wcProjASubCBDesc = - makeIndexDescriptor(normalizeIndexSpec(wildcardIndex->descriptor()->infoObj().addFields( + auto wcProjASubCBDesc = makeIndexDescriptor( + normalizeIndexSpec(getWildcardIndex()->descriptor()->infoObj().addFields( fromjson("{wildcardProjection: {a: {c: 1, b: 1}}}")))); ASSERT(wcProjASubCBDesc->compareIndexOptions(opCtx(), coll()->ns(), wcProjASubBCIndex) == IndexDescriptor::Comparison::kIdentical); @@ -430,7 +467,7 @@ TEST_F(IndexSignatureTest, // non-signature index option compares equivalent as the 'wcProjAIndex' auto wcProjAWithNonSigDesc = makeIndexDescriptor( anotherWcProjADesc->infoObj().addFields(fromjson("{storageEngine: {wiredTiger: {}}}"))); - ASSERT(wcProjAWithNonSigDesc->compareIndexOptions(opCtx(), coll()->ns(), wcProjAIndex) == + ASSERT(wcProjAWithNonSigDesc->compareIndexOptions(opCtx(), coll()->ns(), getWcProjAIndex()) == IndexDescriptor::Comparison::kEquivalent); // Verifies that an index with the same value for 'wildcardProjection' option, non-signature @@ -452,6 +489,11 @@ TEST_F(IndexSignatureTest, // signature. auto* wildcardIndex = unittest::assertGet(createIndex( fromjson("{v: 2, name: 'cwi_all', key: {'$**': 1, b: 1}, wildcardProjection: {b: 0}}"))); + const std::string wildcardIndexIdent = wildcardIndex->getIdent(); + + auto getWildcardIndex = [&]() -> const IndexCatalogEntry* { + return coll()->getIndexCatalog()->findIndexByIdent(opCtx(), wildcardIndexIdent)->getEntry(); + }; // Verifies that an index with the same value for 'wildcardProjection' option as the // wcProjAIndex compares identical. @@ -473,20 +515,20 @@ TEST_F(IndexSignatureTest, // Verifies that an index with a different value for 'wildcardProjection' option compares // different from the base wildcard index or 'cwi_a' and thus can be created. - auto wcProjABDesc = - makeIndexDescriptor(normalizeIndexSpec(wildcardIndex->descriptor()->infoObj().addFields( + auto wcProjABDesc = makeIndexDescriptor( + normalizeIndexSpec(getWildcardIndex()->descriptor()->infoObj().addFields( fromjson("{wildcardProjection: {a: 1}}")))); - ASSERT(wcProjABDesc->compareIndexOptions(opCtx(), coll()->ns(), wildcardIndex) == + ASSERT(wcProjABDesc->compareIndexOptions(opCtx(), coll()->ns(), getWildcardIndex()) == IndexDescriptor::Comparison::kDifferent); auto* wcProjABIndex = unittest::assertGet( createIndex(wcProjABDesc->infoObj().addFields(fromjson("{name: 'cwi_a_b'}")))); // Verifies that an index with sub fields for 'wildcardProjection' option compares // different from the base wildcard index or 'cwi_a' or 'cwi_a_b' and thus can be created. - auto wcProjASubBCDesc = - makeIndexDescriptor(normalizeIndexSpec(wildcardIndex->descriptor()->infoObj().addFields( + auto wcProjASubBCDesc = makeIndexDescriptor( + normalizeIndexSpec(getWildcardIndex()->descriptor()->infoObj().addFields( fromjson("{wildcardProjection: {a: {b: 1, c: 1}}}")))); - ASSERT(wcProjASubBCDesc->compareIndexOptions(opCtx(), coll()->ns(), wildcardIndex) == + ASSERT(wcProjASubBCDesc->compareIndexOptions(opCtx(), coll()->ns(), getWildcardIndex()) == IndexDescriptor::Comparison::kDifferent); ASSERT(wcProjASubBCDesc->compareIndexOptions(opCtx(), coll()->ns(), wcProjABIndex) == IndexDescriptor::Comparison::kDifferent); @@ -494,8 +536,8 @@ TEST_F(IndexSignatureTest, createIndex(wcProjASubBCDesc->infoObj().addFields(fromjson("{name: 'cwi_a_sub_b_c'}")))); // Verifies that two indexes with the same projection in different order compares identical. - auto wcProjASubCBDesc = - makeIndexDescriptor(normalizeIndexSpec(wildcardIndex->descriptor()->infoObj().addFields( + auto wcProjASubCBDesc = makeIndexDescriptor( + normalizeIndexSpec(getWildcardIndex()->descriptor()->infoObj().addFields( fromjson("{wildcardProjection: {a: {c: 1, b: 1}}}")))); ASSERT(wcProjASubCBDesc->compareIndexOptions(opCtx(), coll()->ns(), wcProjASubBCIndex) == IndexDescriptor::Comparison::kIdentical); @@ -510,9 +552,9 @@ TEST_F(IndexSignatureTest, // Verifies that an index with the same value for 'wildcardProjection' option and an // non-signature index option compares equivalent as the 'wcProjAIndex' auto wcProjAWithNonSigDesc = - makeIndexDescriptor(wildcardIndex->descriptor()->infoObj().addFields( + makeIndexDescriptor(getWildcardIndex()->descriptor()->infoObj().addFields( fromjson("{storageEngine: {wiredTiger: {}}}"))); - ASSERT(wcProjAWithNonSigDesc->compareIndexOptions(opCtx(), coll()->ns(), wildcardIndex) == + ASSERT(wcProjAWithNonSigDesc->compareIndexOptions(opCtx(), coll()->ns(), getWildcardIndex()) == IndexDescriptor::Comparison::kEquivalent); // Verifies that an index with the same value for 'wildcardProjection' option, non-signature @@ -550,14 +592,23 @@ TEST_F(IndexSignatureTest, // signature. auto* columnstoreIndex = unittest::assertGet( createIndex(fromjson("{v: 2, name: 'cs_all', key: {'$**': 'columnstore'}}"))); + const std::string columnstoreIndexIdent = columnstoreIndex->getIdent(); + + auto getColumnstoreIndex = [&]() -> const IndexCatalogEntry* { + return coll() + ->getIndexCatalog() + ->findIndexByIdent(opCtx(), columnstoreIndexIdent) + ->getEntry(); + }; // Verifies that another columnstore index with empty columnstoreProjection compares identical // to 'columnstoreIndex' after normalizing the index spec. auto anotherCsAllSpec = normalizeIndexSpec(fromjson("{v: 2, key: {'$**': 'columnstore'}}")); auto anotherCsAllProjDesc = makeIndexDescriptor(anotherCsAllSpec); - ASSERT(anotherCsAllProjDesc->compareIndexOptions(opCtx(), coll()->ns(), columnstoreIndex) == - IndexDescriptor::Comparison::kIdentical); + ASSERT( + anotherCsAllProjDesc->compareIndexOptions(opCtx(), coll()->ns(), getColumnstoreIndex()) == + IndexDescriptor::Comparison::kIdentical); ASSERT_EQ(createIndex(anotherCsAllProjDesc->infoObj().addFields(fromjson("{name: 'cs_all'}"))), ErrorCodes::IndexAlreadyExists); @@ -567,19 +618,24 @@ TEST_F(IndexSignatureTest, // Verifies that an index with non-empty value for 'columnstoreProjection' option compares // different from the base columnstore index and thus can be created. - auto csProjADesc = - makeIndexDescriptor(normalizeIndexSpec(columnstoreIndex->descriptor()->infoObj().addFields( + auto csProjADesc = makeIndexDescriptor( + normalizeIndexSpec(getColumnstoreIndex()->descriptor()->infoObj().addFields( fromjson("{columnstoreProjection: {a: 1}}")))); - ASSERT(csProjADesc->compareIndexOptions(opCtx(), coll()->ns(), columnstoreIndex) == + ASSERT(csProjADesc->compareIndexOptions(opCtx(), coll()->ns(), getColumnstoreIndex()) == IndexDescriptor::Comparison::kDifferent); auto* csProjAIndex = unittest::assertGet( createIndex(csProjADesc->infoObj().addFields(fromjson("{name: 'cs_a'}")))); + const std::string csProjAIndexIdent = csProjAIndex->getIdent(); + + auto getCsProjAIndex = [&]() -> const IndexCatalogEntry* { + return coll()->getIndexCatalog()->findIndexByIdent(opCtx(), csProjAIndexIdent)->getEntry(); + }; // Verifies that an index with the same value for 'columnstoreProjection' option as the // csProjAIndex compares identical. - auto anotherCsProjADesc = - makeIndexDescriptor(normalizeIndexSpec(columnstoreIndex->descriptor()->infoObj().addFields( + auto anotherCsProjADesc = makeIndexDescriptor( + normalizeIndexSpec(getColumnstoreIndex()->descriptor()->infoObj().addFields( fromjson("{columnstoreProjection: {a: 1}}")))); ASSERT(anotherCsProjADesc->compareIndexOptions(opCtx(), coll()->ns(), csProjAIndex) == @@ -596,24 +652,24 @@ TEST_F(IndexSignatureTest, // Verifies that an index with a different value for 'columnstoreProjection' option compares // different from the base columnstore index or 'cs_a' and thus can be created. - auto csProjABDesc = - makeIndexDescriptor(normalizeIndexSpec(columnstoreIndex->descriptor()->infoObj().addFields( + auto csProjABDesc = makeIndexDescriptor( + normalizeIndexSpec(getColumnstoreIndex()->descriptor()->infoObj().addFields( fromjson("{columnstoreProjection: {a: 1, b: 1}}")))); - ASSERT(csProjABDesc->compareIndexOptions(opCtx(), coll()->ns(), columnstoreIndex) == + ASSERT(csProjABDesc->compareIndexOptions(opCtx(), coll()->ns(), getColumnstoreIndex()) == IndexDescriptor::Comparison::kDifferent); - ASSERT(csProjABDesc->compareIndexOptions(opCtx(), coll()->ns(), csProjAIndex) == + ASSERT(csProjABDesc->compareIndexOptions(opCtx(), coll()->ns(), getCsProjAIndex()) == IndexDescriptor::Comparison::kDifferent); auto* csProjABIndex = unittest::assertGet( createIndex(csProjABDesc->infoObj().addFields(fromjson("{name: 'cs_a_b'}")))); // Verifies that an index with sub fields for 'columnstoreProjection' option compares // different from the base columnstore index or 'cs_a' or 'cs_a_b' and thus can be created. - auto csProjASubBCDesc = - makeIndexDescriptor(normalizeIndexSpec(columnstoreIndex->descriptor()->infoObj().addFields( + auto csProjASubBCDesc = makeIndexDescriptor( + normalizeIndexSpec(getColumnstoreIndex()->descriptor()->infoObj().addFields( fromjson("{columnstoreProjection: {a: {b: 1, c: 1}}}")))); - ASSERT(csProjASubBCDesc->compareIndexOptions(opCtx(), coll()->ns(), columnstoreIndex) == + ASSERT(csProjASubBCDesc->compareIndexOptions(opCtx(), coll()->ns(), getColumnstoreIndex()) == IndexDescriptor::Comparison::kDifferent); - ASSERT(csProjASubBCDesc->compareIndexOptions(opCtx(), coll()->ns(), csProjAIndex) == + ASSERT(csProjASubBCDesc->compareIndexOptions(opCtx(), coll()->ns(), getCsProjAIndex()) == IndexDescriptor::Comparison::kDifferent); ASSERT(csProjASubBCDesc->compareIndexOptions(opCtx(), coll()->ns(), csProjABIndex) == IndexDescriptor::Comparison::kDifferent); @@ -621,8 +677,8 @@ TEST_F(IndexSignatureTest, createIndex(csProjASubBCDesc->infoObj().addFields(fromjson("{name: 'cs_a_sub_b_c'}")))); // Verifies that two indexes with the same projection in different order compares identical. - auto csProjASubCBDesc = - makeIndexDescriptor(normalizeIndexSpec(columnstoreIndex->descriptor()->infoObj().addFields( + auto csProjASubCBDesc = makeIndexDescriptor( + normalizeIndexSpec(getColumnstoreIndex()->descriptor()->infoObj().addFields( fromjson("{columnstoreProjection: {a: {c: 1, b: 1}}}")))); ASSERT(csProjASubCBDesc->compareIndexOptions(opCtx(), coll()->ns(), csProjASubBCIndex) == IndexDescriptor::Comparison::kIdentical); @@ -638,7 +694,7 @@ TEST_F(IndexSignatureTest, // non-signature index option compares equivalent as the 'csProjAIndex' auto csProjAWithNonSigDesc = makeIndexDescriptor( anotherCsProjADesc->infoObj().addFields(fromjson("{storageEngine: {wiredTiger: {}}}"))); - ASSERT(csProjAWithNonSigDesc->compareIndexOptions(opCtx(), coll()->ns(), csProjAIndex) == + ASSERT(csProjAWithNonSigDesc->compareIndexOptions(opCtx(), coll()->ns(), getCsProjAIndex()) == IndexDescriptor::Comparison::kEquivalent); // Verifies that an index with the same value for 'columnstoreProjection' option, non-signature diff --git a/src/mongo/db/catalog/index_spec_validate_test.cpp b/src/mongo/db/catalog/index_spec_validate_test.cpp index 38d4e4d94ad1a..43a4eceb24d7a 100644 --- a/src/mongo/db/catalog/index_spec_validate_test.cpp +++ b/src/mongo/db/catalog/index_spec_validate_test.cpp @@ -27,24 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog/index_key_validate.h" - #include #include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/catalog/index_key_validate.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_test_service_context.h" -#include "mongo/db/server_options.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/catalog/list_indexes.cpp b/src/mongo/db/catalog/list_indexes.cpp index dd7aa0d7f4ad2..240aa69dba028 100644 --- a/src/mongo/db/catalog/list_indexes.cpp +++ b/src/mongo/db/catalog/list_indexes.cpp @@ -28,23 +28,34 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog/list_indexes.h" - +#include #include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" -#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/list_indexes.h" #include "mongo/db/curop_failpoint_helpers.h" #include "mongo/db/db_raii.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/db/storage/storage_engine.h" -#include "mongo/logv2/log.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -61,9 +72,9 @@ StatusWith> listIndexes(OperationContext* opCtx, AutoGetCollectionForReadCommandMaybeLockFree collection(opCtx, ns); auto nss = collection.getNss(); if (!collection) { - return StatusWith>(ErrorCodes::NamespaceNotFound, - str::stream() << "ns does not exist: " - << collection.getNss().ns()); + return StatusWith>( + ErrorCodes::NamespaceNotFound, + str::stream() << "ns does not exist: " << collection.getNss().toStringForErrorMsg()); } return StatusWith>( listIndexesInLock(opCtx, collection.getCollection(), nss, additionalInclude)); @@ -154,6 +165,6 @@ std::list listIndexesEmptyListIfMissing(OperationContext* opCtx, const NamespaceStringOrUUID& nss, ListIndexesInclude additionalInclude) { auto listStatus = listIndexes(opCtx, nss, additionalInclude); - return listStatus.isOK() ? listStatus.getValue() : std::list(); + return listStatus.isOK() ? std::move(listStatus.getValue()) : std::list(); } } // namespace mongo diff --git a/src/mongo/db/catalog/list_indexes.h b/src/mongo/db/catalog/list_indexes.h index 03df1c200c9bb..9a7b8baaae4d1 100644 --- a/src/mongo/db/catalog/list_indexes.h +++ b/src/mongo/db/catalog/list_indexes.h @@ -34,6 +34,7 @@ #include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/storage/record_store.h" namespace mongo { diff --git a/src/mongo/db/catalog/local_oplog_info.cpp b/src/mongo/db/catalog/local_oplog_info.cpp index 594b6b1e8b69e..2b057a50bfeca 100644 --- a/src/mongo/db/catalog/local_oplog_info.cpp +++ b/src/mongo/db/catalog/local_oplog_info.cpp @@ -30,13 +30,26 @@ #include "mongo/db/catalog/local_oplog_info.h" +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include + +#include "mongo/db/curop.h" +#include "mongo/db/logical_time.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/storage/flow_control.h" #include "mongo/db/storage/record_store.h" #include "mongo/db/storage/recovery_unit.h" #include "mongo/db/vector_clock_mutable.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCatalog @@ -77,7 +90,6 @@ void LocalOplogInfo::resetCollection() { } void LocalOplogInfo::setNewTimestamp(ServiceContext* service, const Timestamp& newTime) { - stdx::lock_guard lk(_newOpMutex); VectorClockMutable::get(service)->tickClusterTimeTo(LogicalTime(newTime)); } diff --git a/src/mongo/db/catalog/local_oplog_info.h b/src/mongo/db/catalog/local_oplog_info.h index 55ebff65f6ad5..0867006f874c0 100644 --- a/src/mongo/db/catalog/local_oplog_info.h +++ b/src/mongo/db/catalog/local_oplog_info.h @@ -36,7 +36,9 @@ #include "mongo/db/catalog/collection.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/mutex.h" namespace mongo { diff --git a/src/mongo/db/catalog/multi_index_block.cpp b/src/mongo/db/catalog/multi_index_block.cpp index 99d9c0a72419e..89c06b5dbb271 100644 --- a/src/mongo/db/catalog/multi_index_block.cpp +++ b/src/mongo/db/catalog/multi_index_block.cpp @@ -29,35 +29,79 @@ #include "mongo/db/catalog/multi_index_block.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + #include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/simple_bsonelement_comparator.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_yield_restore.h" #include "mongo/db/catalog/multi_index_block_gen.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/multikey_paths.h" +#include "mongo/db/index/skipped_record_tracker.h" #include "mongo/db/multi_key_path_tracker.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/collection_query_info.h" #include "mongo/db/query/get_executor.h" -#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/tenant_migration_conflict_info.h" -#include "mongo/db/storage/durable_catalog.h" -#include "mongo/db/storage/storage_options.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/ident.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/temporary_record_store.h" #include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/log_and_backoff.h" #include "mongo/util/progress_meter.h" -#include "mongo/util/quick_exit.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/timer.h" #include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -125,8 +169,8 @@ auto makeOnSuppressedErrorFn(const std::function& saveCursorBeforeWrite, } bool shouldRelaxConstraints(OperationContext* opCtx, const CollectionPtr& collection) { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gIndexBuildGracefulErrorHandling.isEnabledAndIgnoreFCVUnsafe()) { + if (!feature_flags::gIndexBuildGracefulErrorHandling.isEnabled( + serverGlobalParams.featureCompatibility)) { // Always suppress. return true; } @@ -253,8 +297,9 @@ StatusWith> MultiIndexBlock::init( InitMode initMode, const boost::optional& resumeInfo) { invariant(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_X), - str::stream() << "Collection " << collection->ns() << " with UUID " - << collection->uuid() << " is holding the incorrect lock"); + str::stream() << "Collection " << collection->ns().toStringForErrorMsg() + << " with UUID " << collection->uuid() + << " is holding the incorrect lock"); _collectionUUID = collection->uuid(); _buildIsCleanedUp = false; @@ -319,9 +364,10 @@ StatusWith> MultiIndexBlock::init( // indexes and start the build while holding a lock throughout. if (status == ErrorCodes::IndexBuildAlreadyInProgress) { invariant(indexSpecs.size() > 1, - str::stream() << "Collection: " << collection->ns() << " (" - << _collectionUUID - << "), Index spec: " << indexSpecs.front()); + str::stream() + << "Collection: " << collection->ns().toStringForErrorMsg() + << " (" << _collectionUUID + << "), Index spec: " << indexSpecs.front()); return {ErrorCodes::OperationFailed, "Cannot build two identical indexes. Try again without duplicate " "indexes."}; @@ -357,7 +403,8 @@ StatusWith> MultiIndexBlock::init( uassert(ErrorCodes::NoSuchKey, str::stream() << "Unable to locate resume information for " << info << " due to inconsistent resume information for index build " - << _buildUUID << " on namespace " << collection->ns() << "(" + << _buildUUID << " on namespace " + << collection->ns().toStringForErrorMsg() << "(" << _collectionUUID << ")", stateInfoIt != resumeInfoIndexes.end()); @@ -374,14 +421,16 @@ StatusWith> MultiIndexBlock::init( return status; auto indexCatalogEntry = - index.block->getEntry(opCtx, collection.getWritableCollection(opCtx)); + index.block->getWritableEntry(opCtx, collection.getWritableCollection(opCtx)); index.real = indexCatalogEntry->accessMethod(); status = index.real->initializeAsEmpty(opCtx); if (!status.isOK()) return status; - index.bulk = index.real->initiateBulk( - eachIndexBuildMaxMemoryUsageBytes, stateInfo, collection->ns().db()); + index.bulk = index.real->initiateBulk(indexCatalogEntry, + eachIndexBuildMaxMemoryUsageBytes, + stateInfo, + collection->ns().dbName()); const IndexDescriptor* descriptor = indexCatalogEntry->descriptor(); @@ -439,12 +488,13 @@ StatusWith> MultiIndexBlock::init( // Avoid converting TenantMigrationCommittedException to Status. throw; } catch (...) { - return exceptionToStatus().withContext( - str::stream() << "Caught exception during index builder (" << _buildUUID - << ") initialization on namespace" << collection->ns() << " (" - << _collectionUUID << "). " << indexSpecs.size() - << " index specs provided. First index spec: " - << (indexSpecs.empty() ? BSONObj() : indexSpecs[0])); + return exceptionToStatus().withContext(str::stream() + << "Caught exception during index builder (" + << _buildUUID << ") initialization on namespace" + << collection->ns().toStringForErrorMsg() << " (" + << _collectionUUID << "). " << indexSpecs.size() + << " index specs provided. First index spec: " + << (indexSpecs.empty() ? BSONObj() : indexSpecs[0])); } } @@ -453,7 +503,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection( const CollectionPtr& collection, const boost::optional& resumeAfterRecordId) { invariant(!_buildIsCleanedUp); - invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork()); + invariant(!opCtx->lockState()->inAWriteUnitOfWork()); // UUIDs are not guaranteed during startup because the check happens after indexes are rebuilt. if (_collectionUUID) { @@ -504,7 +554,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection( // Unlock before hanging so replication recognizes we've completed. collection.yield(); Locker::LockSnapshot lockInfo; - invariant(opCtx->lockState()->saveLockStateAndUnlock(&lockInfo)); + opCtx->lockState()->saveLockStateAndUnlock(&lockInfo); LOGV2(4585201, "Hanging index build with no locks due to " @@ -547,10 +597,12 @@ Status MultiIndexBlock::insertAllDocumentsInCollection( _lastRecordIdInserted = boost::none; for (auto& index : _indexes) { + auto indexCatalogEntry = index.block->getEntry(opCtx, collection); index.bulk = - index.real->initiateBulk(getEachIndexBuildMaxMemoryUsageBytes(_indexes.size()), + index.real->initiateBulk(indexCatalogEntry, + getEachIndexBuildMaxMemoryUsageBytes(_indexes.size()), /*stateInfo=*/boost::none, - collection->ns().db()); + collection->ns().dbName()); } }; @@ -619,7 +671,7 @@ Status MultiIndexBlock::insertAllDocumentsInCollection( // Unlock before hanging so replication recognizes we've completed. collection.yield(); Locker::LockSnapshot lockInfo; - invariant(opCtx->lockState()->saveLockStateAndUnlock(&lockInfo)); + opCtx->lockState()->saveLockStateAndUnlock(&lockInfo); LOGV2(20390, "Hanging index build with no locks due to " @@ -781,6 +833,18 @@ Status MultiIndexBlock::_insert( } } + // Cache the collection and index catalog entry pointers during the collection scan phase. This + // is necessary for index build performance to avoid looking up the index catalog entry for each + // insertion into the index table. + if (_collForScan != collection.get()) { + _collForScan = collection.get(); + + // Reset cached index catalog entry pointers. + for (size_t i = 0; i < _indexes.size(); i++) { + _indexes[i].entryForScan = _indexes[i].block->getEntry(opCtx, collection); + } + } + for (size_t i = 0; i < _indexes.size(); i++) { if (_indexes[i].filterExpression && !_indexes[i].filterExpression->matchesBSON(doc)) { continue; @@ -793,6 +857,7 @@ Status MultiIndexBlock::_insert( try { idxStatus = _indexes[i].bulk->insert(opCtx, collection, + _indexes[i].entryForScan, doc, loc, _indexes[i].options, @@ -822,7 +887,7 @@ Status MultiIndexBlock::dumpInsertsFromBulk( const IndexAccessMethod::RecordIdHandlerFn& onDuplicateRecord) { opCtx->checkForInterrupt(); invariant(!_buildIsCleanedUp); - invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork()); + invariant(!opCtx->lockState()->inAWriteUnitOfWork()); // Initial sync adds documents to the sorter using // insertSingleDocumentForInitialSyncOrRecovery() instead of delegating to @@ -856,21 +921,23 @@ Status MultiIndexBlock::dumpInsertsFromBulk( // SERVER-41918 This call to bulk->commit() results in file I/O that may result in an // exception. try { + const IndexCatalogEntry* entry = _indexes[i].block->getEntry(opCtx, collection); Status status = _indexes[i].bulk->commit( opCtx, collection, + entry, dupsAllowed, kYieldIterations, - [=](const KeyString::Value& duplicateKey) { + [=, this](const key_string::Value& duplicateKey) { // Do not record duplicates when explicitly ignored. This may be the case on // secondaries. return writeConflictRetry( - opCtx, "recordingDuplicateKey", entry->getNSSFromCatalog(opCtx).ns(), [&] { + opCtx, "recordingDuplicateKey", entry->getNSSFromCatalog(opCtx), [&] { if (dupsAllowed && !onDuplicateRecord && !_ignoreUnique && entry->indexBuildInterceptor()) { WriteUnitOfWork wuow(opCtx); Status status = entry->indexBuildInterceptor()->recordDuplicateKey( - opCtx, duplicateKey); + opCtx, entry, duplicateKey); if (!status.isOK()) { return status; } @@ -926,8 +993,12 @@ Status MultiIndexBlock::drainBackgroundWrites( // _ignoreUnique is set explicitly. auto trackDups = !_ignoreUnique ? IndexBuildInterceptor::TrackDuplicates::kTrack : IndexBuildInterceptor::TrackDuplicates::kNoTrack; - auto status = interceptor->drainWritesIntoIndex( - opCtx, coll, _indexes[i].options, trackDups, drainYieldPolicy); + auto status = interceptor->drainWritesIntoIndex(opCtx, + coll, + _indexes[i].block->getEntry(opCtx, coll), + _indexes[i].options, + trackDups, + drainYieldPolicy); if (!status.isOK()) { return status; } @@ -944,7 +1015,8 @@ Status MultiIndexBlock::retrySkippedRecords(OperationContext* opCtx, if (!interceptor) continue; - auto status = interceptor->retrySkippedRecords(opCtx, collection, mode); + auto status = interceptor->retrySkippedRecords( + opCtx, collection, index.block->getEntry(opCtx, collection), mode); if (!status.isOK()) { return status; } @@ -963,7 +1035,8 @@ Status MultiIndexBlock::checkConstraints(OperationContext* opCtx, const Collecti if (!interceptor) continue; - auto status = interceptor->checkDuplicateKeyConstraints(opCtx); + auto status = interceptor->checkDuplicateKeyConstraints( + opCtx, _indexes[i].block->getEntry(opCtx, collection)); if (!status.isOK()) { return status; } @@ -982,8 +1055,9 @@ Status MultiIndexBlock::commit(OperationContext* opCtx, OnCommitFn onCommit) { invariant(!_buildIsCleanedUp); invariant(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_X), - str::stream() << "Collection " << collection->ns() << " with UUID " - << collection->uuid() << " is holding the incorrect lock"); + str::stream() << "Collection " << collection->ns().toStringForErrorMsg() + << " with UUID " << collection->uuid() + << " is holding the incorrect lock"); // UUIDs are not guaranteed during startup because the check happens after indexes are rebuilt. if (_collectionUUID) { @@ -1003,7 +1077,8 @@ Status MultiIndexBlock::commit(OperationContext* opCtx, return { ErrorCodes::CannotCreateIndex, str::stream() - << "Index build on collection '" << collection->ns() << "' (" << collection->uuid() + << "Index build on collection '" << collection->ns().toStringForErrorMsg() << "' (" + << collection->uuid() << ") failed due to the detection of mixed-schema data in the time-series buckets " "collection. Starting as of v5.2, time-series measurement bucketing has been " "modified to ensure that newly created time-series buckets do not contain " @@ -1026,7 +1101,7 @@ Status MultiIndexBlock::commit(OperationContext* opCtx, // catalog entry. The interceptor will write multikey metadata keys into the index during // IndexBuildInterceptor::sideWrite, so we only need to pass the cached MultikeyPaths into // IndexCatalogEntry::setMultikey here. - auto indexCatalogEntry = _indexes[i].block->getEntry(opCtx, collection); + auto indexCatalogEntry = _indexes[i].block->getWritableEntry(opCtx, collection); auto interceptor = indexCatalogEntry->indexBuildInterceptor(); if (interceptor) { auto multikeyPaths = interceptor->getMultikeyPaths(); @@ -1093,7 +1168,8 @@ void MultiIndexBlock::abortWithoutCleanup(OperationContext* opCtx, const CollectionPtr& collection, bool isResumable) { invariant(!_buildIsCleanedUp); - // TODO (SERVER-71610): Fix to be interruptible or document exception. + // Aborting without cleanup is done during shutdown. At this point the operation context is + // killed, but acquiring locks must succeed. UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. // Lock if it's not already locked, to ensure storage engine cannot be destructed out from // underneath us. @@ -1220,7 +1296,7 @@ Status MultiIndexBlock::_failPointHangDuringBuild(OperationContext* opCtx, unsigned long long iteration) const { try { fp->executeIf( - [=, &doc](const BSONObj& data) { + [=, this, &doc](const BSONObj& data) { LOGV2(20386, "Hanging index build during collection scan phase", "where"_attr = where, diff --git a/src/mongo/db/catalog/multi_index_block.h b/src/mongo/db/catalog/multi_index_block.h index 4f6ec4a7bd0da..3797bd977c32d 100644 --- a/src/mongo/db/catalog/multi_index_block.h +++ b/src/mongo/db/catalog/multi_index_block.h @@ -29,6 +29,9 @@ #pragma once +#include +#include +#include #include #include #include @@ -40,16 +43,23 @@ #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/index_build_block.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_build_interceptor.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/operation_context.h" #include "mongo/db/record_id.h" #include "mongo/db/resumable_index_builds_gen.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/platform/mutex.h" #include "mongo/util/fail_point.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -60,6 +70,7 @@ class CollectionPtr; class MatchExpression; class NamespaceString; class OperationContext; + class ProgressMeterHolder; /** @@ -325,6 +336,10 @@ class MultiIndexBlock { std::unique_ptr bulk; InsertDeleteOptions options; + + // We cache index catalog entry pointer for the collection scan phase. This is necessary for + // index build performance in the insert path. + const IndexCatalogEntry* entryForScan = nullptr; }; void _writeStateToDisk(OperationContext* opCtx, const CollectionPtr& collection) const; @@ -384,5 +399,10 @@ class MultiIndexBlock { // The current phase of the index build. IndexBuildPhaseEnum _phase = IndexBuildPhaseEnum::kInitialized; + + // We cache the collection pointer for the collection scan phase. The collection pointer is + // compared after yielding, which is used to indicate whether we need to refetch the index + // catalog entry pointers in IndexToBuild. This is necessary for index build performance. + const Collection* _collForScan = nullptr; }; } // namespace mongo diff --git a/src/mongo/db/catalog/multi_index_block_test.cpp b/src/mongo/db/catalog/multi_index_block_test.cpp index 5cedf85fa5b24..bb14b7b8b5a6f 100644 --- a/src/mongo/db/catalog/multi_index_block_test.cpp +++ b/src/mongo/db/catalog/multi_index_block_test.cpp @@ -27,12 +27,27 @@ * it in the license file. */ +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/multi_index_block.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/shard_role.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/catalog/rename_collection.cpp b/src/mongo/db/catalog/rename_collection.cpp index e399d47b76a94..ed00394238299 100644 --- a/src/mongo/db/catalog/rename_collection.cpp +++ b/src/mongo/db/catalog/rename_collection.cpp @@ -29,40 +29,76 @@ #include "mongo/db/catalog/rename_collection.h" +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_uuid_mismatch.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/drop_collection.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/list_indexes.h" #include "mongo/db/catalog/local_oplog_info.h" #include "mongo/db/catalog/unique_collection_name.h" -#include "mongo/db/client.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/batched_write_policy.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/insert.h" -#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/db/stats/top.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" #include "mongo/util/namespace_string_util.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -94,8 +130,9 @@ Status checkSourceAndTargetNamespaces(OperationContext* opCtx, auto replCoord = repl::ReplicationCoordinator::get(opCtx); if (opCtx->writesAreReplicated() && !replCoord->canAcceptWritesFor(opCtx, source)) return Status(ErrorCodes::NotWritablePrimary, - str::stream() << "Not primary while renaming collection " << source << " to " - << target); + str::stream() << "Not primary while renaming collection " + << source.toStringForErrorMsg() << " to " + << target.toStringForErrorMsg()); if (isReplicatedChanged(opCtx, source, target)) return {ErrorCodes::IllegalOperation, @@ -112,15 +149,17 @@ Status checkSourceAndTargetNamespaces(OperationContext* opCtx, if (!sourceColl) { if (CollectionCatalog::get(opCtx)->lookupView(opCtx, source)) return Status(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "cannot rename view: " << source); + str::stream() << "cannot rename view: " << source.toStringForErrorMsg()); return Status(ErrorCodes::NamespaceNotFound, - str::stream() << "Source collection " << source.ns() << " does not exist"); + str::stream() << "Source collection " << source.toStringForErrorMsg() + << " does not exist"); } if (sourceColl->getCollectionOptions().encryptedFieldConfig && !AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::setUserWriteBlockMode)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(target.tenantId()), + ActionType::setUserWriteBlockMode)) { return Status(ErrorCodes::IllegalOperation, "Cannot rename an encrypted collection"); } @@ -131,12 +170,14 @@ Status checkSourceAndTargetNamespaces(OperationContext* opCtx, if (!targetColl) { if (CollectionCatalog::get(opCtx)->lookupView(opCtx, target)) return Status(ErrorCodes::NamespaceExists, - str::stream() << "a view already exists with that name: " << target); + str::stream() << "a view already exists with that name: " + << target.toStringForErrorMsg()); } else { if (targetColl->getCollectionOptions().encryptedFieldConfig && !AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::setUserWriteBlockMode)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(target.tenantId()), + ActionType::setUserWriteBlockMode)) { return Status(ErrorCodes::IllegalOperation, "Cannot rename to an existing encrypted collection"); } @@ -163,12 +204,14 @@ Status renameTargetCollectionToTmp(OperationContext* opCtx, if (!tmpNameResult.isOK()) { return tmpNameResult.getStatus().withContext( str::stream() << "Cannot generate a temporary collection name for the target " - << targetNs << " (" << targetUUID << ") so that the source" << sourceNs - << " (" << sourceUUID << ") could be renamed to " << targetNs); + << targetNs.toStringForErrorMsg() << " (" << targetUUID + << ") so that the source" << sourceNs.toStringForErrorMsg() << " (" + << sourceUUID << ") could be renamed to " + << targetNs.toStringForErrorMsg()); } const auto& tmpName = tmpNameResult.getValue(); const bool stayTemp = true; - return writeConflictRetry(opCtx, "renameCollection", targetNs.ns(), [&] { + return writeConflictRetry(opCtx, "renameCollection", targetNs, [&] { WriteUnitOfWork wunit(opCtx); auto status = targetDB->renameCollection(opCtx, targetNs, tmpName, stayTemp); if (!status.isOK()) @@ -197,7 +240,7 @@ Status renameCollectionDirectly(OperationContext* opCtx, NamespaceString source, NamespaceString target, RenameCollectionOptions options) { - return writeConflictRetry(opCtx, "renameCollection", target.ns(), [&] { + return writeConflictRetry(opCtx, "renameCollection", target, [&] { WriteUnitOfWork wunit(opCtx); { @@ -228,7 +271,7 @@ Status renameCollectionAndDropTarget(OperationContext* opCtx, const CollectionPtr& targetColl, RenameCollectionOptions options, repl::OpTime renameOpTimeFromApplyOps) { - return writeConflictRetry(opCtx, "renameCollection", target.ns(), [&] { + return writeConflictRetry(opCtx, "renameCollection", target, [&] { WriteUnitOfWork wunit(opCtx); // Target collection exists - drop it. @@ -370,7 +413,7 @@ Status renameCollectionWithinDBForApplyOps(OperationContext* opCtx, AutoStatsTracker::LogMode::kUpdateCurOp, CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(source.dbName())); - return writeConflictRetry(opCtx, "renameCollection", target.ns(), [&] { + return writeConflictRetry(opCtx, "renameCollection", target, [&] { auto targetColl = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, target); WriteUnitOfWork wuow(opCtx); if (targetColl) { @@ -455,13 +498,14 @@ Status renameCollectionAcrossDatabases(OperationContext* opCtx, source.db() != target.db(), str::stream() << "cannot rename within same database (use renameCollectionWithinDB instead): source: " - << source << "; target: " << target); + << source.toStringForErrorMsg() << "; target: " << target.toStringForErrorMsg()); // Refer to txnCmdAllowlist in commands.cpp. invariant(!opCtx->inMultiDocumentTransaction(), str::stream() << "renameCollectionAcrossDatabases not supported in multi-document " "transaction: source: " - << source << "; target: " << target); + << source.toStringForErrorMsg() + << "; target: " << target.toStringForErrorMsg()); uassert(ErrorCodes::InvalidOptions, "Cannot provide an expected collection UUID when renaming across databases", @@ -482,7 +526,7 @@ Status renameCollectionAcrossDatabases(OperationContext* opCtx, targetDBLock.emplace(opCtx, target.dbName(), MODE_X); } - DatabaseShardingState::assertMatchingDbVersion(opCtx, source.db()); + DatabaseShardingState::assertMatchingDbVersion(opCtx, source.dbName()); DisableDocumentValidation validationDisabler(opCtx); @@ -503,7 +547,7 @@ Status renameCollectionAcrossDatabases(OperationContext* opCtx, if (!sourceColl) { if (CollectionCatalog::get(opCtx)->lookupView(opCtx, source)) return Status(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "cannot rename view: " << source); + str::stream() << "cannot rename view: " << source.toStringForErrorMsg()); return Status(ErrorCodes::NamespaceNotFound, "source namespace does not exist"); } @@ -532,7 +576,8 @@ Status renameCollectionAcrossDatabases(OperationContext* opCtx, } else if (CollectionCatalog::get(opCtx)->lookupView(opCtx, target)) { return Status(ErrorCodes::NamespaceExists, - str::stream() << "a view already exists with that name: " << target); + str::stream() << "a view already exists with that name: " + << target.toStringForErrorMsg()); } // Create a temporary collection in the target database. It will be removed if we fail to @@ -551,8 +596,9 @@ Status renameCollectionAcrossDatabases(OperationContext* opCtx, makeUniqueCollectionName(opCtx, target.dbName(), "tmp%%%%%.renameCollection"); if (!tmpNameResult.isOK()) { return tmpNameResult.getStatus().withContext( - str::stream() << "Cannot generate temporary collection name to rename " << source - << " to " << target); + str::stream() << "Cannot generate temporary collection name to rename " + << source.toStringForErrorMsg() << " to " + << target.toStringForErrorMsg()); } const auto& tmpName = tmpNameResult.getValue(); @@ -568,7 +614,7 @@ Status renameCollectionAcrossDatabases(OperationContext* opCtx, auto collectionOptions = sourceColl->getCollectionOptions(); collectionOptions.uuid = tmpCollUUID.uuid(); - writeConflictRetry(opCtx, "renameCollection", tmpName.ns(), [&] { + writeConflictRetry(opCtx, "renameCollection", tmpName, [&] { WriteUnitOfWork wunit(opCtx); targetDB->createCollection(opCtx, tmpName, collectionOptions); wunit.commit(); @@ -625,11 +671,11 @@ Status renameCollectionAcrossDatabases(OperationContext* opCtx, // index in an unfinished state. For more information on assigning timestamps to multiple index // builds, please see SERVER-35780 and SERVER-35070. if (!indexesToCopy.empty()) { - Status status = writeConflictRetry(opCtx, "renameCollection", tmpName.ns(), [&] { + Status status = writeConflictRetry(opCtx, "renameCollection", tmpName, [&] { WriteUnitOfWork wunit(opCtx); auto fromMigrate = false; try { - CollectionWriter tmpCollWriter(opCtx, *tmpCollUUID.uuid()); + CollectionWriter tmpCollWriter(opCtx, tmpCollUUID.uuid()); IndexBuildsCoordinator::get(opCtx)->createIndexesOnEmptyCollection( opCtx, tmpCollWriter, indexesToCopy, fromMigrate); } catch (DBException& ex) { @@ -654,7 +700,7 @@ Status renameCollectionAcrossDatabases(OperationContext* opCtx, AutoGetCollection autoTmpColl(opCtx, tmpCollUUID, MODE_IX); if (!autoTmpColl) { return Status(ErrorCodes::NamespaceNotFound, - str::stream() << "Temporary collection '" << tmpName + str::stream() << "Temporary collection '" << tmpName.toStringForErrorMsg() << "' was removed while renaming collection across DBs"); } @@ -673,7 +719,7 @@ Status renameCollectionAcrossDatabases(OperationContext* opCtx, opCtx->checkForInterrupt(); // Cursor is left one past the end of the batch inside writeConflictRetry. auto beginBatchId = record->id; - Status status = writeConflictRetry(opCtx, "renameCollection", tmpName.ns(), [&] { + Status status = writeConflictRetry(opCtx, "renameCollection", tmpName, [&] { // Always reposition cursor in case it gets a WCE midway through. record = cursor->seekExact(beginBatchId); @@ -725,7 +771,7 @@ Status renameCollectionAcrossDatabases(OperationContext* opCtx, cursor->save(); // When this exits via success or WCE, we need to restore the cursor. - ON_BLOCK_EXIT([opCtx, ns = tmpName.ns(), &cursor]() { + ON_BLOCK_EXIT([opCtx, ns = tmpName, &cursor]() { writeConflictRetry( opCtx, "retryRestoreCursor", ns, [&cursor] { cursor->restore(); }); }); @@ -773,7 +819,8 @@ void doLocalRenameIfOptionsAndIndexesHaveNotChanged(OperationContext* opCtx, } uassert(ErrorCodes::CommandFailed, - str::stream() << "collection options of target collection " << targetNs.ns() + str::stream() << "collection options of target collection " + << targetNs.toStringForErrorMsg() << " changed during processing. Original options: " << originalCollectionOptions << ", new options: " << collectionOptions, SimpleBSONObjComparator::kInstance.evaluate( @@ -785,7 +832,7 @@ void doLocalRenameIfOptionsAndIndexesHaveNotChanged(OperationContext* opCtx, UnorderedFieldsBSONObjComparator comparator; uassert( ErrorCodes::CommandFailed, - str::stream() << "indexes of target collection " << targetNs.ns() + str::stream() << "indexes of target collection " << targetNs.toStringForErrorMsg() << " changed during processing.", originalIndexes.size() == currentIndexes.size() && std::equal(originalIndexes.begin(), @@ -798,12 +845,13 @@ void doLocalRenameIfOptionsAndIndexesHaveNotChanged(OperationContext* opCtx, void validateNamespacesForRenameCollection(OperationContext* opCtx, const NamespaceString& source, - const NamespaceString& target) { + const NamespaceString& target, + const RenameCollectionOptions& options) { uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid source namespace: " << source.ns(), + str::stream() << "Invalid source namespace: " << source.toStringForErrorMsg(), source.isValid()); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid target namespace: " << target.ns(), + str::stream() << "Invalid target namespace: " << target.toStringForErrorMsg(), target.isValid()); if ((repl::ReplicationCoordinator::get(opCtx)->getReplicationMode() != @@ -839,7 +887,7 @@ void validateNamespacesForRenameCollection(OperationContext* opCtx, uassert(ErrorCodes::NamespaceNotFound, str::stream() << "renameCollection cannot accept a source collection that is in a " "drop-pending state: " - << source, + << source.toStringForErrorMsg(), !source.isDropPendingNamespace()); uassert(ErrorCodes::IllegalOperation, @@ -850,9 +898,19 @@ void validateNamespacesForRenameCollection(OperationContext* opCtx, "renaming system.js collection or renaming to system.js is not allowed", !source.isSystemDotJavascript() && !target.isSystemDotJavascript()); - uassert(ErrorCodes::IllegalOperation, - "Renaming system.buckets collections is not allowed", - !source.isTimeseriesBucketsCollection()); + if (!source.isOutTmpBucketsCollection() && source.isTimeseriesBucketsCollection()) { + uassert(ErrorCodes::IllegalOperation, + "Renaming system.buckets collections is not allowed", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(target.tenantId()), + ActionType::setUserWriteBlockMode)); + + uassert(ErrorCodes::IllegalOperation, + str::stream() << "Cannot rename time-series buckets collection {" << source.ns() + << "} to a non-time-series buckets namespace {" << target.ns() << "}", + target.isTimeseriesBucketsCollection()); + } } void validateAndRunRenameCollection(OperationContext* opCtx, @@ -861,7 +919,7 @@ void validateAndRunRenameCollection(OperationContext* opCtx, const RenameCollectionOptions& options) { invariant(source != target, "Can't rename a collection to itself"); - validateNamespacesForRenameCollection(opCtx, source, target); + validateNamespacesForRenameCollection(opCtx, source, target, options); OperationShardingState::ScopedAllowImplicitCollectionCreate_UNSAFE unsafeCreateCollection( opCtx); @@ -876,7 +934,7 @@ Status renameCollection(OperationContext* opCtx, return Status(ErrorCodes::NamespaceNotFound, str::stream() << "renameCollection() cannot accept a source " "collection that is in a drop-pending state: " - << source); + << source.toStringForErrorMsg()); } if (source.isSystemDotViews() || target.isSystemDotViews()) { @@ -993,7 +1051,7 @@ Status renameCollectionForApplyOps(OperationContext* opCtx, str::stream() << "renameCollection() cannot accept a source " "collection that does not exist or is in a drop-pending state: " - << sourceNss.toString()); + << sourceNss.toStringForErrorMsg()); } const std::string uuidToDropString = uuidToDrop ? uuidToDrop->toString() : ""; @@ -1024,7 +1082,8 @@ Status renameCollectionForRollback(OperationContext* opCtx, invariant(source->db() == target.db(), str::stream() << "renameCollectionForRollback: source and target namespaces must " "have the same database. source: " - << *source << ". target: " << target); + << (*source).toStringForErrorMsg() + << ". target: " << target.toStringForErrorMsg()); LOGV2(20402, "renameCollectionForRollback: rename {source} ({uuid}) to {target}.", diff --git a/src/mongo/db/catalog/rename_collection.h b/src/mongo/db/catalog/rename_collection.h index 8697b70c2dc37..71755af3056e3 100644 --- a/src/mongo/db/catalog/rename_collection.h +++ b/src/mongo/db/catalog/rename_collection.h @@ -27,10 +27,18 @@ * it in the license file. */ +#include +#include +#include + #include "mongo/base/status.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection_options.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" #include "mongo/util/uuid.h" namespace mongo { @@ -93,9 +101,11 @@ Status renameCollectionForRollback(OperationContext* opCtx, /** * Performs validation checks to ensure source and target namespaces are eligible for rename. */ -void validateNamespacesForRenameCollection(OperationContext* opCtx, - const NamespaceString& source, - const NamespaceString& target); +void validateNamespacesForRenameCollection( + OperationContext* opCtx, + const NamespaceString& source, + const NamespaceString& target, + const RenameCollectionOptions& options = RenameCollectionOptions()); /** * Runs renameCollection() with preliminary validation checks to ensure source diff --git a/src/mongo/db/catalog/rename_collection_test.cpp b/src/mongo/db/catalog/rename_collection_test.cpp index 6fcbbf47d8ebc..db7a1e0879a1f 100644 --- a/src/mongo/db/catalog/rename_collection_test.cpp +++ b/src/mongo/db/catalog/rename_collection_test.cpp @@ -27,39 +27,66 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include #include -#include +#include #include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/rename_collection.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_noop.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/str.h" namespace mongo { @@ -105,7 +132,8 @@ class OpObserverMock : public OpObserverNoop { std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) override; + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) override; void onCreateCollection(OperationContext* opCtx, const CollectionPtr& coll, @@ -115,36 +143,38 @@ class OpObserverMock : public OpObserverNoop { const OplogSlot& createOpTime, bool fromMigrate) override; - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) override; + CollectionDropType dropType, + bool markFromMigrate) override; - using OpObserver::onRenameCollection; void onRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) override; + bool stayTemp, + bool markFromMigrate) override; - using OpObserver::preRenameCollection; repl::OpTime preRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) override; + bool stayTemp, + bool markFromMigrate) override; + void postRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, const UUID& uuid, const boost::optional& dropTargetUUID, bool stayTemp) override; + // Operations written to the oplog. These are operations for which // ReplicationCoordinator::isOplogDisabled() returns false. std::vector oplogEntries; @@ -213,7 +243,8 @@ void OpObserverMock::onInserts(OperationContext* opCtx, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { if (onInsertsThrows) { uasserted(ErrorCodes::OperationFailed, "insert failed"); } @@ -241,15 +272,16 @@ repl::OpTime OpObserverMock::onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) { + const CollectionDropType dropType, + bool markFromMigrate) { _logOp(opCtx, collectionName, "drop"); // If the oplog is not disabled for this namespace, then we need to reserve an op time for the // drop. if (!repl::ReplicationCoordinator::get(opCtx)->isOplogDisabledFor(opCtx, collectionName)) { OpObserver::Times::get(opCtx).reservedOpTimes.push_back(dropOpTime); } - auto noopOptime = - OpObserverNoop::onDropCollection(opCtx, collectionName, uuid, numRecords, dropType); + auto noopOptime = OpObserverNoop::onDropCollection( + opCtx, collectionName, uuid, numRecords, dropType, markFromMigrate); invariant(noopOptime.isNull()); return {}; } @@ -260,11 +292,24 @@ void OpObserverMock::onRenameCollection(OperationContext* opCtx, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) { - preRenameCollection( - opCtx, fromCollection, toCollection, uuid, dropTargetUUID, numRecords, stayTemp); - OpObserverNoop::onRenameCollection( - opCtx, fromCollection, toCollection, uuid, dropTargetUUID, numRecords, stayTemp); + bool stayTemp, + bool markFromMigrate) { + preRenameCollection(opCtx, + fromCollection, + toCollection, + uuid, + dropTargetUUID, + numRecords, + stayTemp, + markFromMigrate); + OpObserverNoop::onRenameCollection(opCtx, + fromCollection, + toCollection, + uuid, + dropTargetUUID, + numRecords, + stayTemp, + markFromMigrate); onRenameCollectionCalled = true; onRenameCollectionDropTarget = dropTargetUUID; } @@ -287,11 +332,18 @@ repl::OpTime OpObserverMock::preRenameCollection(OperationContext* opCtx, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) { + bool stayTemp, + bool markFromMigrate) { _logOp(opCtx, fromCollection, "rename"); OpObserver::Times::get(opCtx).reservedOpTimes.push_back(renameOpTime); - OpObserverNoop::preRenameCollection( - opCtx, fromCollection, toCollection, uuid, dropTargetUUID, numRecords, stayTemp); + OpObserverNoop::preRenameCollection(opCtx, + fromCollection, + toCollection, + uuid, + dropTargetUUID, + numRecords, + stayTemp, + /*markFromMigrate=*/false); return {}; } @@ -380,15 +432,16 @@ void RenameCollectionTest::tearDown() { void _createCollection(OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions options = {}) { - writeConflictRetry(opCtx, "_createCollection", nss.ns(), [=] { + writeConflictRetry(opCtx, "_createCollection", nss, [=] { AutoGetDb autoDb(opCtx, nss.dbName(), MODE_X); auto db = autoDb.ensureDbExists(opCtx); - ASSERT_TRUE(db) << "Cannot create collection " << nss << " because database " << nss.db() - << " does not exist."; + ASSERT_TRUE(db) << "Cannot create collection " << nss.toStringForErrorMsg() + << " because database " << nss.toStringForErrorMsg() << " does not exist."; WriteUnitOfWork wuow(opCtx); ASSERT_TRUE(db->createCollection(opCtx, nss, options)) - << "Failed to create collection " << nss << " due to unknown error."; + << "Failed to create collection " << nss.toStringForErrorMsg() + << " due to unknown error."; wuow.commit(); }); @@ -425,7 +478,7 @@ bool _collectionExists(OperationContext* opCtx, const NamespaceString& nss) { */ CollectionOptions _getCollectionOptions(OperationContext* opCtx, const NamespaceString& nss) { AutoGetCollectionForRead collection(opCtx, nss); - ASSERT_TRUE(collection) << "Unable to get collections options for " << nss + ASSERT_TRUE(collection) << "Unable to get collections options for " << nss.toStringForErrorMsg() << " because collection does not exist."; return collection->getCollectionOptions(); } @@ -452,7 +505,7 @@ NamespaceString _getCollectionNssFromUUID(OperationContext* opCtx, const UUID& u */ bool _isTempCollection(OperationContext* opCtx, const NamespaceString& nss) { AutoGetCollectionForRead collection(opCtx, nss); - ASSERT_TRUE(collection) << "Unable to check if " << nss + ASSERT_TRUE(collection) << "Unable to check if " << nss.toStringForErrorMsg() << " is a temporary collection because collection does not exist."; auto options = _getCollectionOptions(opCtx, nss); return options.temp; @@ -464,10 +517,11 @@ bool _isTempCollection(OperationContext* opCtx, const NamespaceString& nss) { void _createIndexOnEmptyCollection(OperationContext* opCtx, const NamespaceString& nss, const std::string& indexName) { - writeConflictRetry(opCtx, "_createIndexOnEmptyCollection", nss.ns(), [=] { + writeConflictRetry(opCtx, "_createIndexOnEmptyCollection", nss, [=] { AutoGetCollection collection(opCtx, nss, MODE_X); - ASSERT_TRUE(collection) << "Cannot create index on empty collection " << nss - << " because collection " << nss << " does not exist."; + ASSERT_TRUE(collection) << "Cannot create index on empty collection " + << nss.toStringForErrorMsg() << " because collection " + << nss.toStringForErrorMsg() << " does not exist."; auto indexInfoObj = BSON("v" << int(IndexDescriptor::kLatestIndexVersion) << "key" << BSON("a" << 1) << "name" << indexName); @@ -488,10 +542,11 @@ void _createIndexOnEmptyCollection(OperationContext* opCtx, * Inserts a single document into a collection. */ void _insertDocument(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) { - writeConflictRetry(opCtx, "_insertDocument", nss.ns(), [=] { + writeConflictRetry(opCtx, "_insertDocument", nss, [=] { AutoGetCollection collection(opCtx, nss, MODE_X); - ASSERT_TRUE(collection) << "Cannot insert document " << doc << " into collection " << nss - << " because collection " << nss << " does not exist."; + ASSERT_TRUE(collection) << "Cannot insert document " << doc << " into collection " + << nss.toStringForErrorMsg() << " because collection " + << nss.toStringForErrorMsg() << " does not exist."; WriteUnitOfWork wuow(opCtx); OpDebug* const opDebug = nullptr; @@ -501,20 +556,6 @@ void _insertDocument(OperationContext* opCtx, const NamespaceString& nss, const }); } -/** - * Retrieves the pointer to a collection associated with the given namespace string from the - * catalog. The caller must hold the appropriate locks from the lock manager. - */ -CollectionPtr _getCollection_inlock(OperationContext* opCtx, const NamespaceString& nss) { - invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IS)); - auto databaseHolder = DatabaseHolder::get(opCtx); - auto* db = databaseHolder->getDb(opCtx, DatabaseName(boost::none, nss.db())); - if (!db) { - return CollectionPtr(); - } - return CollectionPtr(CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss)); -} - TEST_F(RenameCollectionTest, RenameCollectionReturnsNamespaceNotFoundIfDatabaseDoesNotExist) { ASSERT_FALSE(AutoGetDb(_opCtx.get(), _sourceNss.dbName(), MODE_X).getDb()); ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, @@ -539,7 +580,7 @@ TEST_F(RenameCollectionTest, RenameCollectionReturnsNotWritablePrimaryIfNotPrima _createCollection(_opCtx.get(), _sourceNss); ASSERT_OK(_replCoord->setFollowerMode(repl::MemberState::RS_SECONDARY)); ASSERT_TRUE(_opCtx->writesAreReplicated()); - ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _sourceNss.db())); + ASSERT_FALSE(_replCoord->canAcceptWritesForDatabase(_opCtx.get(), _sourceNss.dbName())); ASSERT_EQUALS(ErrorCodes::NotWritablePrimary, renameCollection(_opCtx.get(), _sourceNss, _targetNss, {})); } @@ -547,8 +588,8 @@ TEST_F(RenameCollectionTest, RenameCollectionReturnsNotWritablePrimaryIfNotPrima TEST_F(RenameCollectionTest, TargetCollectionNameLong) { _createCollection(_opCtx.get(), _sourceNss); const std::string targetCollectionName(255, 'a'); - NamespaceString longTargetNss = - NamespaceString::createNamespaceString_forTest(_sourceNss.db(), targetCollectionName); + NamespaceString longTargetNss = NamespaceString::createNamespaceString_forTest( + _sourceNss.db_forTest(), targetCollectionName); ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, longTargetNss, {})); } @@ -592,7 +633,8 @@ TEST_F(RenameCollectionTest, auto dropPendingNss = _sourceNss.makeDropPendingNamespace(dropOpTime); _createCollection(_opCtx.get(), dropPendingNss); - auto cmd = BSON("renameCollection" << dropPendingNss.ns() << "to" << _targetNss.ns()); + auto cmd = + BSON("renameCollection" << dropPendingNss.ns_forTest() << "to" << _targetNss.ns_forTest()); ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, renameCollectionForApplyOps(_opCtx.get(), boost::none, boost::none, cmd, {})); @@ -611,7 +653,8 @@ TEST_F( NamespaceString ignoredSourceNss = NamespaceString::createNamespaceString_forTest(_sourceNss.dbName(), "ignored"); - auto cmd = BSON("renameCollection" << ignoredSourceNss.ns() << "to" << _targetNss.ns()); + auto cmd = BSON("renameCollection" << ignoredSourceNss.ns_forTest() << "to" + << _targetNss.ns_forTest()); ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, renameCollectionForApplyOps(_opCtx.get(), options.uuid, boost::none, cmd, {})); @@ -623,16 +666,16 @@ TEST_F( TEST_F(RenameCollectionTest, RenameCollectionToItselfByNsForApplyOps) { auto uuid = _createCollectionWithUUID(_opCtx.get(), _sourceNss); - auto cmd = BSON("renameCollection" << _sourceNss.ns() << "to" << _sourceNss.ns() << "dropTarget" - << true); + auto cmd = BSON("renameCollection" << _sourceNss.ns_forTest() << "to" << _sourceNss.ns_forTest() + << "dropTarget" << true); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), uuid, boost::none, cmd, {})); ASSERT_TRUE(_collectionExists(_opCtx.get(), _sourceNss)); } TEST_F(RenameCollectionTest, RenameCollectionToItselfByUUIDForApplyOps) { auto uuid = _createCollectionWithUUID(_opCtx.get(), _targetNss); - auto cmd = BSON("renameCollection" << _sourceNss.ns() << "to" << _targetNss.ns() << "dropTarget" - << true); + auto cmd = BSON("renameCollection" << _sourceNss.ns_forTest() << "to" << _targetNss.ns_forTest() + << "dropTarget" << true); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), uuid, boost::none, cmd, {})); ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss)); } @@ -640,8 +683,8 @@ TEST_F(RenameCollectionTest, RenameCollectionToItselfByUUIDForApplyOps) { TEST_F(RenameCollectionTest, RenameCollectionByUUIDRatherThanNsForApplyOps) { auto realRenameFromNss = NamespaceString::createNamespaceString_forTest("test.bar2"); auto uuid = _createCollectionWithUUID(_opCtx.get(), realRenameFromNss); - auto cmd = BSON("renameCollection" << _sourceNss.ns() << "to" << _targetNss.ns() << "dropTarget" - << true); + auto cmd = BSON("renameCollection" << _sourceNss.ns_forTest() << "to" << _targetNss.ns_forTest() + << "dropTarget" << true); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), uuid, boost::none, cmd, {})); ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss)); } @@ -653,8 +696,8 @@ TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsDropTargetByUUIDTargetDo auto collAUUID = _createCollectionWithUUID(_opCtx.get(), collA); auto collCUUID = _createCollectionWithUUID(_opCtx.get(), collC); // Rename A to B, drop C, where B is not an existing collection - auto cmd = - BSON("renameCollection" << collA.ns() << "to" << collB.ns() << "dropTarget" << collCUUID); + auto cmd = BSON("renameCollection" << collA.ns_forTest() << "to" << collB.ns_forTest() + << "dropTarget" << collCUUID); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), collAUUID, boost::none, cmd, {})); // A and C should be dropped ASSERT_FALSE(_collectionExists(_opCtx.get(), collA)); @@ -676,8 +719,8 @@ TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsDropTargetByUUIDTargetEx auto collCUUID = _createCollectionWithUUID(_opCtx.get(), collC); // Rename A to B, drop C, where B is an existing collection // B should be kept but with a temporary name - auto cmd = - BSON("renameCollection" << collA.ns() << "to" << collB.ns() << "dropTarget" << collCUUID); + auto cmd = BSON("renameCollection" << collA.ns_forTest() << "to" << collB.ns_forTest() + << "dropTarget" << collCUUID); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), collAUUID, boost::none, cmd, {})); // A and C should be dropped ASSERT_FALSE(_collectionExists(_opCtx.get(), collA)); @@ -708,8 +751,8 @@ TEST_F(RenameCollectionTest, auto collCUUID = _createCollectionWithUUID(_opCtx.get(), collC); // Rename A to B, drop C, where B is an existing collection // B should be kept but with a temporary name - auto cmd = - BSON("renameCollection" << collA.ns() << "to" << collB.ns() << "dropTarget" << collCUUID); + auto cmd = BSON("renameCollection" << collA.ns_forTest() << "to" << collB.ns_forTest() + << "dropTarget" << collCUUID); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), collAUUID, boost::none, cmd, {})); // A and C should be dropped ASSERT_FALSE(_collectionExists(_opCtx.get(), collA)); @@ -734,8 +777,8 @@ TEST_F(RenameCollectionTest, auto collCUUID = UUID::gen(); // Rename A to B, drop C, where B is an existing collection // B should be kept but with a temporary name - auto cmd = - BSON("renameCollection" << collA.ns() << "to" << collB.ns() << "dropTarget" << collCUUID); + auto cmd = BSON("renameCollection" << collA.ns_forTest() << "to" << collB.ns_forTest() + << "dropTarget" << collCUUID); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), collAUUID, boost::none, cmd, {})); // A and C should be dropped ASSERT_FALSE(_collectionExists(_opCtx.get(), collA)); @@ -766,9 +809,11 @@ TEST_F(RenameCollectionTest, options.dropTarget = true; ASSERT_OK(renameCollection(_opCtx.get(), _sourceNss, _targetNss, options)); ASSERT_FALSE(_collectionExists(_opCtx.get(), _sourceNss)) - << "source collection " << _sourceNss << " still exists after successful rename"; + << "source collection " << _sourceNss.toStringForErrorMsg() + << " still exists after successful rename"; ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss)) - << "target collection " << _targetNss << " missing after successful rename"; + << "target collection " << _targetNss.toStringForErrorMsg() + << " missing after successful rename"; ASSERT_TRUE(_opObserver->onRenameCollectionCalled); ASSERT_FALSE(_opObserver->onRenameCollectionDropTarget); @@ -778,7 +823,8 @@ TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsRejectsRenameOpTimeIfWri ASSERT_TRUE(_opCtx->writesAreReplicated()); _createCollection(_opCtx.get(), _sourceNss); - auto cmd = BSON("renameCollection" << _sourceNss.ns() << "to" << _targetNss.ns()); + auto cmd = + BSON("renameCollection" << _sourceNss.ns_forTest() << "to" << _targetNss.ns_forTest()); auto renameOpTime = _opObserver->renameOpTime; ASSERT_EQUALS( ErrorCodes::BadValue, @@ -793,8 +839,8 @@ DEATH_TEST_F(RenameCollectionTest, _createCollection(_opCtx.get(), _sourceNss); auto dropTargetUUID = _createCollectionWithUUID(_opCtx.get(), _targetNss); - auto cmd = BSON("renameCollection" << _sourceNss.ns() << "to" << _targetNss.ns() << "dropTarget" - << dropTargetUUID); + auto cmd = BSON("renameCollection" << _sourceNss.ns_forTest() << "to" << _targetNss.ns_forTest() + << "dropTarget" << dropTargetUUID); repl::OpTime renameOpTime = {Timestamp(Seconds(200), 1U), 1LL}; ASSERT_OK( @@ -803,7 +849,8 @@ DEATH_TEST_F(RenameCollectionTest, TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsSourceAndTargetDoNotExist) { auto uuid = UUID::gen(); - auto cmd = BSON("renameCollection" << _sourceNss.ns() << "to" << _targetNss.ns() << "dropTarget" + auto cmd = BSON("renameCollection" << _sourceNss.ns_forTest() << "to" << _targetNss.ns_forTest() + << "dropTarget" << "true"); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), uuid, boost::none, cmd, {})); ASSERT_FALSE(_collectionExists(_opCtx.get(), _sourceNss)); @@ -814,9 +861,9 @@ TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsDropTargetEvenIfSourceDo _createCollectionWithUUID(_opCtx.get(), _targetNss); auto missingSourceNss = NamespaceString::createNamespaceString_forTest("test.bar2"); auto uuid = UUID::gen(); - auto cmd = - BSON("renameCollection" << missingSourceNss.ns() << "to" << _targetNss.ns() << "dropTarget" - << "true"); + auto cmd = BSON("renameCollection" << missingSourceNss.ns_forTest() << "to" + << _targetNss.ns_forTest() << "dropTarget" + << "true"); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), uuid, boost::none, cmd, {})); ASSERT_FALSE(_collectionExists(_opCtx.get(), _targetNss)); } @@ -827,8 +874,9 @@ TEST_F(RenameCollectionTest, RenameCollectionForApplyOpsDropTargetByUUIDEvenIfSo _createCollectionWithUUID(_opCtx.get(), _targetNss); auto dropTargetUUID = _createCollectionWithUUID(_opCtx.get(), dropTargetNss); auto uuid = UUID::gen(); - auto cmd = BSON("renameCollection" << missingSourceNss.ns() << "to" << _targetNss.ns() - << "dropTarget" << dropTargetUUID); + auto cmd = + BSON("renameCollection" << missingSourceNss.ns_forTest() << "to" << _targetNss.ns_forTest() + << "dropTarget" << dropTargetUUID); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), uuid, boost::none, cmd, {})); ASSERT_TRUE(_collectionExists(_opCtx.get(), _targetNss)); ASSERT_FALSE(_collectionExists(_opCtx.get(), dropTargetNss)); @@ -847,19 +895,20 @@ void _testRenameCollectionStayTemp(OperationContext* opCtx, options.stayTemp = stayTemp; ASSERT_OK(renameCollection(opCtx, sourceNss, targetNss, options)); ASSERT_FALSE(_collectionExists(opCtx, sourceNss)) - << "source collection " << sourceNss << " still exists after successful rename"; + << "source collection " << sourceNss.toStringForErrorMsg() + << " still exists after successful rename"; if (!isSourceCollectionTemporary) { ASSERT_FALSE(_isTempCollection(opCtx, targetNss)) - << "target collection " << targetNss + << "target collection " << targetNss.toStringForErrorMsg() << " cannot not be temporary after rename if source collection is not temporary."; } else if (stayTemp) { ASSERT_TRUE(_isTempCollection(opCtx, targetNss)) - << "target collection " << targetNss + << "target collection " << targetNss.toStringForErrorMsg() << " is no longer temporary after rename with stayTemp set to true."; } else { ASSERT_FALSE(_isTempCollection(opCtx, targetNss)) - << "target collection " << targetNss + << "target collection " << targetNss.toStringForErrorMsg() << " still temporary after rename with stayTemp set to false."; } } @@ -928,14 +977,14 @@ void _testRenameCollectionAcrossDatabaseOplogEntries( std::vector* oplogEntries, bool forApplyOps, const std::vector& expectedOplogEntries) { - ASSERT_NOT_EQUALS(sourceNss.db(), targetNss.db()); + ASSERT_NOT_EQUALS(sourceNss.db_forTest(), targetNss.db_forTest()); _createCollection(opCtx, sourceNss); _createIndexOnEmptyCollection(opCtx, sourceNss, "a_1"); _insertDocument(opCtx, sourceNss, BSON("_id" << 0)); oplogEntries->clear(); if (forApplyOps) { - auto cmd = BSON("renameCollection" << sourceNss.ns() << "to" << targetNss.ns() - << "dropTarget" << true); + auto cmd = BSON("renameCollection" << sourceNss.ns_forTest() << "to" + << targetNss.ns_forTest() << "dropTarget" << true); ASSERT_OK(renameCollectionForApplyOps(opCtx, boost::none, sourceNss.tenantId(), cmd, {})); } else { RenameCollectionOptions options; @@ -1090,7 +1139,8 @@ TEST_F(RenameCollectionTest, NamespaceString invalidTargetNss = NamespaceString::createNamespaceString_forTest("invalidNamespace"); - auto cmd = BSON("renameCollection" << _sourceNss.ns() << "to" << invalidTargetNss.ns()); + auto cmd = BSON("renameCollection" << _sourceNss.ns_forTest() << "to" + << invalidTargetNss.ns_forTest()); ASSERT_EQUALS(ErrorCodes::InvalidNamespace, renameCollectionForApplyOps(_opCtx.get(), boost::none, boost::none, cmd, {})); @@ -1179,8 +1229,8 @@ TEST_F(RenameCollectionTestMultitenancy, RenameCollectionForApplyOps) { // unable to locate the source collection. If the targetNss tenantId doesn't match, well we're // going to rename the db/collection within the _tenantId, so this effectively ensures it isn't // possible to rename across tenantIds. - auto cmd = - BSON("renameCollection" << _sourceNssTid.toString() << "to" << targetNssTid.toString()); + auto cmd = BSON("renameCollection" << _sourceNssTid.toString_forTest() << "to" + << targetNssTid.toString_forTest()); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), boost::none, _tenantId, cmd, {})); ASSERT_TRUE(_collectionExists(_opCtx.get(), targetNssTid)); @@ -1196,8 +1246,8 @@ TEST_F(RenameCollectionTestMultitenancy, RenameCollectionForApplyOpsCommonRandom ASSERT_NOT_EQUALS(_sourceNssTid, targetNssTid); // This test only has a single tenantId that belongs to neither source nor target. - auto cmd = - BSON("renameCollection" << _sourceNssTid.toString() << "to" << targetNssTid.toString()); + auto cmd = BSON("renameCollection" << _sourceNssTid.toString_forTest() << "to" + << targetNssTid.toString_forTest()); // Because the tenantId doesn't belong to the source, we should see a collection not found // error. @@ -1216,12 +1266,12 @@ TEST_F(RenameCollectionTestMultitenancy, RenameCollectionForApplyOpsCommonTid) { ASSERT_NOT_EQUALS(_sourceNssTid, targetNssTid); // A tid field supersedes tenantIds maintained in source or target. See above. - auto cmd = - BSON("renameCollection" << _sourceNssTid.toString() << "to" << targetNssTid.toString()); + auto cmd = BSON("renameCollection" << _sourceNssTid.toString_forTest() << "to" + << targetNssTid.toString_forTest()); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), boost::none, _tenantId, cmd, {})); - ASSERT_TRUE(_collectionExists( - _opCtx.get(), - NamespaceString::createNamespaceString_forTest(_tenantId, targetNssTid.toString()))); + ASSERT_TRUE(_collectionExists(_opCtx.get(), + NamespaceString::createNamespaceString_forTest( + _tenantId, targetNssTid.toString_forTest()))); ASSERT_FALSE(_collectionExists(_opCtx.get(), _sourceNssTid)); } @@ -1236,8 +1286,8 @@ TEST_F(RenameCollectionTestMultitenancy, RenameCollectionForApplyOpsSourceExists ASSERT_NOT_EQUALS(otherSourceNssTid, targetNssTid); // A tid field supersedes tenantIds maintained in source or target. See above. - auto cmd = - BSON("renameCollection" << otherSourceNssTid.toString() << "to" << targetNssTid.toString()); + auto cmd = BSON("renameCollection" << otherSourceNssTid.toString_forTest() << "to" + << targetNssTid.toString_forTest()); ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, renameCollectionForApplyOps(_opCtx.get(), boost::none, _otherTenantId, cmd, {})); ASSERT_TRUE(_collectionExists(_opCtx.get(), _sourceNssTid)); @@ -1257,8 +1307,8 @@ TEST_F(RenameCollectionTestMultitenancy, ASSERT_NOT_EQUALS(otherSourceNssTid, targetNssTid); // A tid field supersedes tenantIds maintained in source or target. See above. - auto cmd = BSON("renameCollection" << otherSourceNssTid.toStringWithTenantId() << "to" - << targetNssTid.toStringWithTenantId()); + auto cmd = BSON("renameCollection" << otherSourceNssTid.toStringWithTenantId_forTest() << "to" + << targetNssTid.toStringWithTenantId_forTest()); ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, renameCollectionForApplyOps(_opCtx.get(), boost::none, boost::none, cmd, {})); ASSERT_TRUE(_collectionExists(_opCtx.get(), _sourceNssTid)); @@ -1274,8 +1324,8 @@ TEST_F(RenameCollectionTestMultitenancy, RenameCollectionForApplyOpsRequireTenan NamespaceString::createNamespaceString_forTest(_tenantId, _otherNs); ASSERT_NOT_EQUALS(_sourceNssTid, targetNssTid); - auto cmd = BSON("renameCollection" << _sourceNssTid.toStringWithTenantId() << "to" - << targetNssTid.toStringWithTenantId()); + auto cmd = BSON("renameCollection" << _sourceNssTid.toStringWithTenantId_forTest() << "to" + << targetNssTid.toStringWithTenantId_forTest()); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), boost::none, boost::none, cmd, {})); ASSERT_TRUE(_collectionExists(_opCtx.get(), targetNssTid)); ASSERT_FALSE(_collectionExists(_opCtx.get(), _sourceNssTid)); @@ -1286,8 +1336,8 @@ TEST_F(RenameCollectionTestMultitenancy, RenameCollectionForApplyOpsSameNS) { RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); // A tid field supersedes tenantIds maintained in source or target. See above. - auto cmd = - BSON("renameCollection" << _sourceNssTid.toString() << "to" << _sourceNssTid.toString()); + auto cmd = BSON("renameCollection" << _sourceNssTid.toString_forTest() << "to" + << _sourceNssTid.toString_forTest()); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), boost::none, _tenantId, cmd, {})); } @@ -1295,8 +1345,8 @@ TEST_F(RenameCollectionTestMultitenancy, RenameCollectionForApplyOpsSameNSRequir RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", false); - auto cmd = BSON("renameCollection" << _sourceNssTid.toStringWithTenantId() << "to" - << _sourceNssTid.toStringWithTenantId()); + auto cmd = BSON("renameCollection" << _sourceNssTid.toStringWithTenantId_forTest() << "to" + << _sourceNssTid.toStringWithTenantId_forTest()); ASSERT_OK(renameCollectionForApplyOps(_opCtx.get(), boost::none, boost::none, cmd, {})); } @@ -1310,8 +1360,8 @@ TEST_F(RenameCollectionTestMultitenancy, RenameCollectionForApplyOpsAcrossTenant // This test is valid during the transition period, before featureFlagRequireTenantID is // enforced, and will prefix the tenantIds onto the ns fields. - auto cmd = BSON("renameCollection" << _sourceNssTid.toStringWithTenantId() << "to" - << targetNssTid.toStringWithTenantId()); + auto cmd = BSON("renameCollection" << _sourceNssTid.toStringWithTenantId_forTest() << "to" + << targetNssTid.toStringWithTenantId_forTest()); ASSERT_THROWS_CODE(renameCollectionForApplyOps(_opCtx.get(), boost::none, boost::none, cmd, {}), AssertionException, ErrorCodes::IllegalOperation); diff --git a/src/mongo/db/catalog/throttle_cursor.cpp b/src/mongo/db/catalog/throttle_cursor.cpp index 97756e7768209..a82145a81961a 100644 --- a/src/mongo/db/catalog/throttle_cursor.cpp +++ b/src/mongo/db/catalog/throttle_cursor.cpp @@ -27,14 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/db/catalog/throttle_cursor.h" +#include +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/throttle_cursor.h" #include "mongo/db/catalog/validate_gen.h" +#include "mongo/db/client.h" #include "mongo/db/curop.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/operation_context.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" namespace mongo { @@ -78,7 +91,7 @@ SortedDataInterfaceThrottleCursor::SortedDataInterfaceThrottleCursor( } boost::optional SortedDataInterfaceThrottleCursor::seek( - OperationContext* opCtx, const KeyString::Value& key) { + OperationContext* opCtx, const key_string::Value& key) { boost::optional entry = _cursor->seek(key); if (entry) { const int64_t dataSize = entry->key.objsize() + sizeof(entry->loc); @@ -89,7 +102,7 @@ boost::optional SortedDataInterfaceThrottleCursor::seek( } boost::optional SortedDataInterfaceThrottleCursor::seekForKeyString( - OperationContext* opCtx, const KeyString::Value& key) { + OperationContext* opCtx, const key_string::Value& key) { boost::optional entry = _cursor->seekForKeyString(key); if (entry) { const int64_t dataSize = entry->keyString.getSize() + sizeof(entry->loc); diff --git a/src/mongo/db/catalog/throttle_cursor.h b/src/mongo/db/catalog/throttle_cursor.h index 39c8dc7b111bd..3f28c4915461f 100644 --- a/src/mongo/db/catalog/throttle_cursor.h +++ b/src/mongo/db/catalog/throttle_cursor.h @@ -29,12 +29,21 @@ #pragma once +#include #include #include +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_string.h" #include "mongo/db/storage/record_store.h" #include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/util/clock_source.h" #include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -95,9 +104,9 @@ class SortedDataInterfaceThrottleCursor { const SortedDataIndexAccessMethod* iam, DataThrottle* dataThrottle); - boost::optional seek(OperationContext* opCtx, const KeyString::Value& key); + boost::optional seek(OperationContext* opCtx, const key_string::Value& key); boost::optional seekForKeyString(OperationContext* opCtx, - const KeyString::Value& key); + const key_string::Value& key); boost::optional next(OperationContext* opCtx); boost::optional nextKeyString(OperationContext* opCtx); diff --git a/src/mongo/db/catalog/throttle_cursor_test.cpp b/src/mongo/db/catalog/throttle_cursor_test.cpp index 80800882ae9a0..93ccae5fa110d 100644 --- a/src/mongo/db/catalog/throttle_cursor_test.cpp +++ b/src/mongo/db/catalog/throttle_cursor_test.cpp @@ -27,16 +27,38 @@ * it in the license file. */ +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/throttle_cursor.h" #include "mongo/db/catalog/validate_gen.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/index/index_access_method.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/time_support.h" namespace mongo { @@ -52,10 +74,10 @@ class ThrottleCursorTest : public CatalogTestFixture { void tearDown() override; protected: - const KeyString::Value kMinKeyString = KeyString::Builder{KeyString::Version::kLatestVersion, - kMinBSONKey, - KeyString::ALL_ASCENDING} - .getValueCopy(); + const key_string::Value kMinKeyString = key_string::Builder{key_string::Version::kLatestVersion, + kMinBSONKey, + key_string::ALL_ASCENDING} + .getValueCopy(); explicit ThrottleCursorTest(Milliseconds clockIncrement = Milliseconds{kTickDelay}) : CatalogTestFixture(Options{}.useMockClock(true, clockIncrement)) {} diff --git a/src/mongo/db/catalog/uncommitted_catalog_updates.cpp b/src/mongo/db/catalog/uncommitted_catalog_updates.cpp index 751c0b25f6f5c..3dc076b3e3a9a 100644 --- a/src/mongo/db/catalog/uncommitted_catalog_updates.cpp +++ b/src/mongo/db/catalog/uncommitted_catalog_updates.cpp @@ -29,6 +29,21 @@ #include "uncommitted_catalog_updates.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/util/decorable.h" + namespace mongo { namespace { @@ -105,39 +120,6 @@ void UncommittedCatalogUpdates::_createCollection(OperationContext* opCtx, const auto& nss = coll->ns(); auto uuid = coll->uuid(); _entries.push_back({action, coll, nss, uuid}); - - // When we create a collection after a drop we skip registering the collection in the - // preCommitHook and register it during the same commit handler that we unregister the - // collection. - if (action == Entry::Action::kCreatedCollection) { - opCtx->recoveryUnit()->registerPreCommitHook([uuid](OperationContext* opCtx) { - auto uncommittedCatalogUpdates = UncommittedCatalogUpdates::get(opCtx); - auto [found, createdColl, newColl] = lookupCollection(opCtx, uuid); - if (!createdColl) { - return; - } - - // Invariant that a collection is found. - invariant(createdColl.get(), uuid.toString()); - - // This will throw when registering a namespace which is already in use. - CollectionCatalog::write(opCtx, [&, coll = createdColl](CollectionCatalog& catalog) { - catalog.registerCollectionTwoPhase(opCtx, uuid, coll, /*ts=*/boost::none); - }); - - opCtx->recoveryUnit()->onRollback([uuid](OperationContext* opCtx) { - CollectionCatalog::write(opCtx, [&](CollectionCatalog& catalog) { - catalog.deregisterCollection( - opCtx, uuid, /*isDropPending=*/false, /*ts=*/boost::none); - }); - }); - }); - } - - // We hold a reference to prevent the collection from being deleted when `PublishCatalogUpdates` - // runs its rollback handler as that happens first. Other systems may have setup some rollback - // handler that need to interact with this collection. - opCtx->recoveryUnit()->onRollback([coll](OperationContext*) {}); } void UncommittedCatalogUpdates::writableCollection(std::shared_ptr collection) { diff --git a/src/mongo/db/catalog/uncommitted_catalog_updates.h b/src/mongo/db/catalog/uncommitted_catalog_updates.h index 9c2a7057229fa..9d834c5f2cc27 100644 --- a/src/mongo/db/catalog/uncommitted_catalog_updates.h +++ b/src/mongo/db/catalog/uncommitted_catalog_updates.h @@ -29,11 +29,24 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include + #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/views_for_database.h" #include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/views/view.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/uuid.h" namespace mongo { @@ -139,7 +152,8 @@ class UncommittedCatalogUpdates { * collection must be used to drop an index. */ static bool isTwoPhaseCommitEntry(const Entry& entry) { - return (entry.action == Entry::Action::kWritableCollection || + return (entry.action == Entry::Action::kCreatedCollection || + entry.action == Entry::Action::kWritableCollection || entry.action == Entry::Action::kRenamedCollection || entry.action == Entry::Action::kDroppedCollection || entry.action == Entry::Action::kRecreatedCollection); @@ -251,6 +265,38 @@ class UncommittedCatalogUpdates { return _entries.empty(); } + /** + * Flag to check of callbacks with the RecoveryUnit has been registered for this instance. + */ + bool hasRegisteredWithRecoveryUnit() const { + return _callbacksRegisteredWithRecoveryUnit; + } + + /** + * Mark that callbacks with the RecoveryUnit has been registered for this instance. + */ + void markRegisteredWithRecoveryUnit() { + invariant(!_callbacksRegisteredWithRecoveryUnit); + _callbacksRegisteredWithRecoveryUnit = true; + } + + /** + * Flag to check if precommit has executed successfully and all uncommitted collections have + * been registered as pending commit + */ + bool hasPrecommitted() const { + return _preCommitted; + } + + /** + * Mark that precommit has executed successfully and all uncommitted collections have + * been registered as pending commit + */ + void markPrecommitted() { + invariant(!_preCommitted); + _preCommitted = true; + } + static UncommittedCatalogUpdates& get(OperationContext* opCtx); private: @@ -269,6 +315,9 @@ class UncommittedCatalogUpdates { std::vector _entries; stdx::unordered_set _ignoreExternalViewChanges; + + bool _callbacksRegisteredWithRecoveryUnit = false; + bool _preCommitted = false; }; /** @@ -312,6 +361,7 @@ class OpenedCollections { private: struct Entry { std::shared_ptr collection; + // TODO(SERVER-78226): Replace `nss` and `uuid` with a type which can express "nss and uuid" boost::optional nss; boost::optional uuid; }; diff --git a/src/mongo/db/catalog/uncommitted_multikey.cpp b/src/mongo/db/catalog/uncommitted_multikey.cpp index 8727a1a12d7c4..77fd78af15e9d 100644 --- a/src/mongo/db/catalog/uncommitted_multikey.cpp +++ b/src/mongo/db/catalog/uncommitted_multikey.cpp @@ -27,10 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/catalog/uncommitted_multikey.h" #include "mongo/db/operation_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/db/catalog/uncommitted_multikey.h b/src/mongo/db/catalog/uncommitted_multikey.h index 8a50a72ce3472..1ba026117a700 100644 --- a/src/mongo/db/catalog/uncommitted_multikey.h +++ b/src/mongo/db/catalog/uncommitted_multikey.h @@ -29,8 +29,12 @@ #pragma once -#include "mongo/db/storage/bson_collection_catalog_entry.h" #include +#include + +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/storage/bson_collection_catalog_entry.h" namespace mongo { class Collection; diff --git a/src/mongo/db/catalog/unique_collection_name.cpp b/src/mongo/db/catalog/unique_collection_name.cpp index a5efa4b22dfec..6c6895e2932d6 100644 --- a/src/mongo/db/catalog/unique_collection_name.cpp +++ b/src/mongo/db/catalog/unique_collection_name.cpp @@ -29,7 +29,25 @@ #include "mongo/db/catalog/unique_collection_name.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/platform/mutex.h" +#include "mongo/platform/random.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/catalog/unique_collection_name.h b/src/mongo/db/catalog/unique_collection_name.h index 2dccf435b9c32..344b737254957 100644 --- a/src/mongo/db/catalog/unique_collection_name.h +++ b/src/mongo/db/catalog/unique_collection_name.h @@ -30,6 +30,8 @@ #pragma once #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" diff --git a/src/mongo/db/catalog/util/partitioned.h b/src/mongo/db/catalog/util/partitioned.h index f7fa6ea5aa8a9..5c4eb6e522bb4 100644 --- a/src/mongo/db/catalog/util/partitioned.h +++ b/src/mongo/db/catalog/util/partitioned.h @@ -30,15 +30,16 @@ #pragma once #include +#include +#include #include #include #include +#include #include #include #include -#include - #include "mongo/platform/mutex.h" #include "mongo/util/aligned.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/db/catalog/util/partitioned_test.cpp b/src/mongo/db/catalog/util/partitioned_test.cpp index 2521aa7651d5c..62fab0220fe02 100644 --- a/src/mongo/db/catalog/util/partitioned_test.cpp +++ b/src/mongo/db/catalog/util/partitioned_test.cpp @@ -27,14 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +// IWYU pragma: no_include "boost/align/detail/aligned_alloc_posix.hpp" + +#include "mongo/base/string_data.h" #include "mongo/db/catalog/util/partitioned.h" +#include "mongo/platform/atomic_word.h" #include "mongo/stdx/thread.h" #include "mongo/stdx/unordered_map.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/catalog/validate_adaptor.cpp b/src/mongo/db/catalog/validate_adaptor.cpp index 04f00e597810e..b40809fe0875d 100644 --- a/src/mongo/db/catalog/validate_adaptor.cpp +++ b/src/mongo/db/catalog/validate_adaptor.cpp @@ -28,43 +28,71 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog/validate_adaptor.h" - +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/bson/util/bsoncolumn.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/column_index_consistency.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/index_consistency.h" #include "mongo/db/catalog/throttle_cursor.h" +#include "mongo/db/catalog/validate_adaptor.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/curop.h" -#include "mongo/db/index/columns_access_method.h" -#include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/index/wildcard_access_method.h" +#include "mongo/db/index_names.h" #include "mongo/db/matcher/expression.h" -#include "mongo/db/multi_key_path_tracker.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/record_id_helpers.h" -#include "mongo/db/server_feature_flags_gen.h" -#include "mongo/db/storage/execution_context.h" #include "mongo/db/storage/key_string.h" #include "mongo/db/storage/record_store.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/timeseries/bucket_catalog/flat_bson.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_options.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/object_check.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/shared_buffer_fragment.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #include "mongo/util/testing_proctor.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -73,6 +101,8 @@ namespace mongo { namespace { +MONGO_FAIL_POINT_DEFINE(failRecordStoreTraversal); + // Set limit for size of corrupted records that will be reported. const long long kMaxErrorSizeBytes = 1 * 1024 * 1024; const long long kInterruptIntervalNumBytes = 50 * 1024 * 1024; // 50MB. @@ -106,8 +136,8 @@ void _validateClusteredCollectionRecordId(OperationContext* opCtx, } const auto ksFromBSON = - KeyString::Builder(KeyString::Version::kLatestVersion, ridFromDoc.getValue()); - const auto ksFromRid = KeyString::Builder(KeyString::Version::kLatestVersion, rid); + key_string::Builder(key_string::Version::kLatestVersion, ridFromDoc.getValue()); + const auto ksFromRid = key_string::Builder(key_string::Version::kLatestVersion, rid); const auto clusterKeyField = clustered_util::getClusterKeyFieldName(indexSpec); if (ksFromRid != ksFromBSON) { @@ -539,12 +569,8 @@ Status ValidateAdaptor::validateRecord(OperationContext* opCtx, long long* nNonCompliantDocuments, size_t* dataSize, ValidateResults* results) { - auto validateBSONMode = BSONValidateMode::kDefault; - if (serverGlobalParams.featureCompatibility.isVersionInitialized() && - feature_flags::gExtendValidateCommand.isEnabled(serverGlobalParams.featureCompatibility)) { - validateBSONMode = _validateState->getBSONValidateMode(); - } - const Status status = validateBSON(record.data(), record.size(), validateBSONMode); + Status status = + validateBSON(record.data(), record.size(), _validateState->getBSONValidateMode()); if (!status.isOK()) { if (status.code() != ErrorCodes::NonConformantBSON) { return status; @@ -574,15 +600,17 @@ Status ValidateAdaptor::validateRecord(OperationContext* opCtx, results); } - SharedBufferFragmentBuilder pool(KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + SharedBufferFragmentBuilder pool(key_string::HeapBuilder::kHeapAllocatorDefaultBytes); - for (const auto& index : _validateState->getIndexes()) { - const IndexDescriptor* descriptor = index->descriptor(); - if (descriptor->isPartial() && !index->getFilterExpression()->matchesBSON(recordBson)) + for (const auto& indexIdent : _validateState->getIndexIdents()) { + const IndexDescriptor* descriptor = + coll->getIndexCatalog()->findIndexByIdent(opCtx, indexIdent); + if (descriptor->isPartial() && + !descriptor->getEntry()->getFilterExpression()->matchesBSON(recordBson)) continue; - this->traverseRecord(opCtx, coll, index.get(), recordId, recordBson, results); + this->traverseRecord(opCtx, coll, descriptor->getEntry(), recordId, recordBson, results); } return Status::OK(); } @@ -651,6 +679,24 @@ void ValidateAdaptor::traverseRecordStore(OperationContext* opCtx, Status status = validateRecord( opCtx, record->id, record->data, &nNonCompliantDocuments, &validatedSize, results); + // Log the out-of-order entries as errors. + // + // Validate uses a DataCorruptionDetectionMode::kLogAndContinue mode such that data + // corruption errors are logged without throwing, so certain checks must be duplicated here + // as well. + if ((prevRecordId.isValid() && prevRecordId > record->id) || + MONGO_unlikely(failRecordStoreTraversal.shouldFail())) { + // TODO SERVER-78040: Clean this up once we can insert errors blindly into the list and + // not care about deduplication. + static constexpr auto kErrorMessage = "Detected out-of-order documents. See logs."; + if (results->valid || + std::find(results->errors.begin(), results->errors.end(), kErrorMessage) == + results->errors.end()) { + results->errors.push_back(kErrorMessage); + results->valid = false; + } + } + // validatedSize = dataSize is not a general requirement as some storage engines may use // padding, but we still require that they return the unpadded record data. if (!status.isOK() || validatedSize != static_cast(dataSize)) { @@ -669,18 +715,23 @@ void ValidateAdaptor::traverseRecordStore(OperationContext* opCtx, } if (_validateState->fixErrors()) { - writeConflictRetry( - opCtx, "corrupt record removal", _validateState->nss().ns(), [&] { - WriteUnitOfWork wunit(opCtx); - rs->deleteRecord(opCtx, record->id); - wunit.commit(); - }); + writeConflictRetry(opCtx, "corrupt record removal", _validateState->nss(), [&] { + WriteUnitOfWork wunit(opCtx); + rs->deleteRecord(opCtx, record->id); + wunit.commit(); + }); results->repaired = true; results->numRemovedCorruptRecords++; _numRecords--; } else { - if (results->valid) { - results->errors.push_back("Detected one or more invalid documents. See logs."); + // TODO SERVER-78040: Clean this up once we can insert errors blindly into the list + // and not care about deduplication. + static constexpr auto kErrorMessage = + "Detected one or more invalid documents. See logs."; + if (results->valid || + std::find(results->errors.begin(), results->errors.end(), kErrorMessage) == + results->errors.end()) { + results->errors.push_back(kErrorMessage); results->valid = false; } @@ -710,10 +761,7 @@ void ValidateAdaptor::traverseRecordStore(OperationContext* opCtx, nNonCompliantDocuments++; schemaValidationFailed(_validateState, result.first, results); - } else if (serverGlobalParams.featureCompatibility.isVersionInitialized() && - feature_flags::gExtendValidateCommand.isEnabled( - serverGlobalParams.featureCompatibility) && - coll->getTimeseriesOptions()) { + } else if (coll->getTimeseriesOptions()) { // Checks for time-series collection consistency. Status bucketStatus = _validateTimeSeriesBucketRecord(coll, record->data.toBson(), results); @@ -752,9 +800,10 @@ void ValidateAdaptor::traverseRecordStore(OperationContext* opCtx, const auto fastCount = coll->numRecords(opCtx); if (_validateState->shouldEnforceFastCount() && fastCount != _numRecords) { - results->errors.push_back( - str::stream() << "fast count (" << fastCount << ") does not match number of records (" - << _numRecords << ") for collection '" << coll->ns() << "'"); + results->errors.push_back(str::stream() << "fast count (" << fastCount + << ") does not match number of records (" + << _numRecords << ") for collection '" + << coll->ns().toStringForErrorMsg() << "'"); results->valid = false; } diff --git a/src/mongo/db/catalog/validate_adaptor.h b/src/mongo/db/catalog/validate_adaptor.h index 07d0516462368..7bd5df52d926d 100644 --- a/src/mongo/db/catalog/validate_adaptor.h +++ b/src/mongo/db/catalog/validate_adaptor.h @@ -30,10 +30,21 @@ #pragma once #include +#include +#include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/column_index_consistency.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/index_consistency.h" +#include "mongo/db/catalog/validate_results.h" #include "mongo/db/catalog/validate_state.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/record_data.h" #include "mongo/util/progress_meter.h" namespace mongo { diff --git a/src/mongo/db/catalog/validate_results.cpp b/src/mongo/db/catalog/validate_results.cpp index b1ce131a2de9b..9ee8cb3db7289 100644 --- a/src/mongo/db/catalog/validate_results.cpp +++ b/src/mongo/db/catalog/validate_results.cpp @@ -29,6 +29,12 @@ #include "mongo/db/catalog/validate_results.h" +#include + +#include + +#include "mongo/base/string_data.h" + namespace mongo { void ValidateResults::appendToResultObj(BSONObjBuilder* resultObj, bool debugging) const { diff --git a/src/mongo/db/catalog/validate_results.h b/src/mongo/db/catalog/validate_results.h index c02235a68bc40..2103a0e69bc20 100644 --- a/src/mongo/db/catalog/validate_results.h +++ b/src/mongo/db/catalog/validate_results.h @@ -29,12 +29,18 @@ #pragma once +#include #include #include #include #include +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/record_id.h" namespace mongo { diff --git a/src/mongo/db/catalog/validate_state.cpp b/src/mongo/db/catalog/validate_state.cpp index 05e40ccef789e..66ec76499e171 100644 --- a/src/mongo/db/catalog/validate_state.cpp +++ b/src/mongo/db/catalog/validate_state.cpp @@ -28,22 +28,39 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/catalog/validate_state.h" +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/database_holder.h" -#include "mongo/db/catalog/index_consistency.h" -#include "mongo/db/catalog/validate_adaptor.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/validate_state.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/index/columns_access_method.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/views/view.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -93,12 +110,11 @@ ValidateState::ValidateState(OperationContext* opCtx, auto view = CollectionCatalog::get(opCtx)->lookupView(opCtx, _nss); if (!view) { uasserted(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection '" << _nss << "' does not exist to validate."); + str::stream() << "Collection '" << _nss.toStringForErrorMsg() + << "' does not exist to validate."); } else { // Uses the bucket collection in place of the time-series collection view. - if (!view->timeseries() || - !feature_flags::gExtendValidateCommand.isEnabled( - serverGlobalParams.featureCompatibility)) { + if (!view->timeseries()) { uasserted(ErrorCodes::CommandNotSupportedOnView, "Cannot validate a view"); } _nss = _nss.makeTimeseriesBucketsNamespace(); @@ -113,7 +129,7 @@ ValidateState::ValidateState(OperationContext* opCtx, ErrorCodes::NamespaceNotFound, fmt::format( "Cannot validate a time-series collection without its bucket collection {}.", - _nss.toString()), + _nss.toStringForErrorMsg()), _collection); } } @@ -135,13 +151,14 @@ ValidateState::ValidateState(OperationContext* opCtx, bool ValidateState::shouldEnforceFastCount() const { if (_mode == ValidateMode::kForegroundFullEnforceFastCount) { - if (_nss.isOplog() || _nss.isChangeCollection()) { + if (_nss.isOplog() || _nss.isChangeCollection() || + _nss.isChangeStreamPreImagesCollection()) { // Oplog writers only take a global IX lock, so the oplog can still be written to even // during full validation despite its collection X lock. This can cause validate to // incorrectly report an incorrect fast count on the oplog when run in enforceFastCount // mode. - // The oplog entries are also written to the change collections and are prone to fast - // count failures. + // The oplog entries are also written to the change collections and pre-images + // collections, these collections are also prone to fast count failures. return false; } else if (_nss == NamespaceString::kIndexBuildEntryNamespace) { // Do not enforce fast count on the 'config.system.indexBuilds' collection. This is an @@ -181,18 +198,22 @@ void ValidateState::_yieldLocks(OperationContext* opCtx) { _relockDatabaseAndCollection(opCtx); uassert(ErrorCodes::Interrupted, - str::stream() << "Interrupted due to: catalog restart: " << _nss << " (" << *_uuid - << ") while validating the collection", + str::stream() << "Interrupted due to: catalog restart: " << _nss.toStringForErrorMsg() + << " (" << *_uuid << ") while validating the collection", _catalogGeneration == opCtx->getServiceContext()->getCatalogGeneration()); // Check if any of the indexes we were validating were dropped. Indexes created while // yielding will be ignored. - for (const auto& index : _indexes) { + for (const auto& indexIdent : _indexIdents) { + const IndexDescriptor* desc = + _collection->getIndexCatalog()->findIndexByIdent(opCtx, indexIdent); uassert(ErrorCodes::Interrupted, str::stream() << "Interrupted due to: index being validated was dropped from collection: " - << _nss << " (" << *_uuid << "), index: " << index->descriptor()->indexName(), - !index->isDropped()); + << _nss.toStringForErrorMsg() << " (" << *_uuid + << "), index ident: " << indexIdent, + desc); + invariant(!desc->getEntry()->isDropped()); } }; @@ -226,7 +247,7 @@ void ValidateState::_yieldCursors(OperationContext* opCtx) { void ValidateState::initializeCursors(OperationContext* opCtx) { invariant(!_traverseRecordStoreCursor && !_seekRecordStoreCursor && _indexCursors.size() == 0 && - _columnStoreIndexCursors.size() == 0 && _indexes.size() == 0); + _columnStoreIndexCursors.size() == 0 && _indexIdents.size() == 0); // Background validation reads from the last stable checkpoint instead of the latest data. This // allows concurrent writes to go ahead without interfering with validation's view of the data. @@ -251,7 +272,7 @@ void ValidateState::initializeCursors(OperationContext* opCtx) { uint64_t currCheckpointId = 0; do { _indexCursors.clear(); - _indexes.clear(); + _indexIdents.clear(); StringSet readyDurableIndexes; try { _traverseRecordStoreCursor = std::make_unique( @@ -349,7 +370,7 @@ void ValidateState::initializeCursors(OperationContext* opCtx) { continue; } - _indexes.push_back(indexCatalog->getEntryShared(desc)); + _indexIdents.push_back(desc->getEntry()->getIdent()); } // For foreground validation which doesn't use checkpoint cursors, the checkpoint id will // always be zero. @@ -389,15 +410,16 @@ void ValidateState::_relockDatabaseAndCollection(OperationContext* opCtx) { std::string dbErrMsg = str::stream() << "Interrupted due to: database drop: " << _nss.db() - << " while validating collection: " << _nss << " (" << *_uuid << ")"; + << " while validating collection: " << _nss.toStringForErrorMsg() << " (" << *_uuid << ")"; _databaseLock.emplace(opCtx, _nss.dbName(), MODE_IS); _database = DatabaseHolder::get(opCtx)->getDb(opCtx, _nss.dbName()); uassert(ErrorCodes::Interrupted, dbErrMsg, _database); uassert(ErrorCodes::Interrupted, dbErrMsg, !_database->isDropPending(opCtx)); - std::string collErrMsg = str::stream() << "Interrupted due to: collection drop: " << _nss - << " (" << *_uuid << ") while validating the collection"; + std::string collErrMsg = str::stream() + << "Interrupted due to: collection drop: " << _nss.toStringForErrorMsg() << " (" << *_uuid + << ") while validating the collection"; try { NamespaceStringOrUUID nssOrUUID(_nss.dbName(), *_uuid); diff --git a/src/mongo/db/catalog/validate_state.h b/src/mongo/db/catalog/validate_state.h index 77c6d376fc4ae..508c78da63e08 100644 --- a/src/mongo/db/catalog/validate_state.h +++ b/src/mongo/db/catalog/validate_state.h @@ -29,14 +29,35 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bson_validate.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_validation.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/throttle_cursor.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" #include "mongo/db/server_options.h" +#include "mongo/db/storage/column_store.h" #include "mongo/db/storage/record_store.h" #include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/string_map.h" #include "mongo/util/uuid.h" namespace mongo { @@ -82,11 +103,8 @@ class ValidateState { } BSONValidateMode getBSONValidateMode() const { - return serverGlobalParams.featureCompatibility.isVersionInitialized() && - feature_flags::gExtendValidateCommand.isEnabled( - serverGlobalParams.featureCompatibility) && - (_mode == ValidateMode::kForegroundCheckBSON || - _mode == ValidateMode::kBackgroundCheckBSON || isFullValidation()) + return isFullValidation() || _mode == ValidateMode::kForegroundCheckBSON || + _mode == ValidateMode::kBackgroundCheckBSON ? BSONValidateMode::kFull : BSONValidateMode::kExtended; } @@ -137,8 +155,8 @@ class ValidateState { return _collection; } - const std::vector>& getIndexes() const { - return _indexes; + const std::vector& getIndexIdents() const { + return _indexIdents; } const StringSet& getSkippedIndexes() const { @@ -249,11 +267,11 @@ class ValidateState { // constructor boost::optional _uuid; - // Stores the indexes that are going to be validated. When validate yields periodically we'll - // use this list to determine if validation should abort when an existing index that was + // Stores the index idents that are going to be validated. When validate yields periodically + // we'll use this list to determine if validation should abort when an existing index that was // being validated is dropped. Additionally we'll use this list to determine which indexes to // skip during validation that may have been created in-between yields. - std::vector> _indexes; + std::vector _indexIdents; // Shared cursors to be used during validation, created in 'initializeCursors()'. StringMap> _indexCursors; diff --git a/src/mongo/db/catalog/validate_state_test.cpp b/src/mongo/db/catalog/validate_state_test.cpp index 816871e78a61a..55d61204d5776 100644 --- a/src/mongo/db/catalog/validate_state_test.cpp +++ b/src/mongo/db/catalog/validate_state_test.cpp @@ -27,18 +27,43 @@ * it in the license file. */ +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/index_builds_manager.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/validate_state.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_mock.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/snapshot_manager.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -129,7 +154,7 @@ void createIndex(OperationContext* opCtx, const NamespaceString& nss, const BSON AutoGetCollection collection(opCtx, nss, MODE_X); ASSERT(collection); - ASSERT_EQ(1, indexKey.nFields()) << nss << "/" << indexKey; + ASSERT_EQ(1, indexKey.nFields()) << nss.toStringForErrorMsg() << "/" << indexKey; auto spec = BSON("v" << int(IndexDescriptor::kLatestIndexVersion) << "key" << indexKey << "name" << (indexKey.firstElementFieldNameStringData() + "_1")); @@ -147,10 +172,12 @@ void dropIndex(OperationContext* opCtx, const NamespaceString& nss, const std::s WriteUnitOfWork wuow(opCtx); - auto indexDescriptor = collection->getIndexCatalog()->findIndexByName(opCtx, indexName); - ASSERT(indexDescriptor); - ASSERT_OK(collection.getWritableCollection(opCtx)->getIndexCatalog()->dropIndex( - opCtx, collection.getWritableCollection(opCtx), indexDescriptor)); + auto writableCollection = collection.getWritableCollection(opCtx); + auto writableEntry = + writableCollection->getIndexCatalog()->getWritableEntryByName(opCtx, indexName); + ASSERT(writableEntry); + ASSERT_OK(writableCollection->getIndexCatalog()->dropIndexEntry( + opCtx, collection.getWritableCollection(opCtx), writableEntry)); ASSERT_OK(opCtx->recoveryUnit()->setTimestamp( repl::ReplicationCoordinator::get(opCtx)->getMyLastAppliedOpTime().getTimestamp() + 1)); @@ -230,7 +257,7 @@ TEST_F(ValidateStateTest, OpenCursorsOnAllIndexes) { // Make sure all of the indexes were found and cursors opened against them. Including the // _id index. - ASSERT_EQ(validateState.getIndexes().size(), 5); + ASSERT_EQ(validateState.getIndexIdents().size(), 5); } // Checkpoint of all of the data: it should not make any difference for foreground validation @@ -246,7 +273,7 @@ TEST_F(ValidateStateTest, OpenCursorsOnAllIndexes) { CollectionValidation::RepairMode::kNone, /*logDiagnostics=*/false); validateState.initializeCursors(opCtx); - ASSERT_EQ(validateState.getIndexes().size(), 5); + ASSERT_EQ(validateState.getIndexIdents().size(), 5); } // Open cursors against checkpoint'ed indexes with {background:true}. @@ -277,7 +304,7 @@ TEST_F(ValidateStateDiskTest, OpenCursorsOnCheckpointedIndexes) { // Make sure the uncheckpoint'ed indexes are not found. // (Note the _id index was create with collection creation, so we have 3 indexes.) - ASSERT_EQ(validateState.getIndexes().size(), 3); + ASSERT_EQ(validateState.getIndexIdents().size(), 3); } // Indexes in the checkpoint that were dropped in the present should not have cursors opened against @@ -312,7 +339,7 @@ TEST_F(ValidateStateDiskTest, CursorsAreNotOpenedAgainstCheckpointedIndexesThatW CollectionValidation::RepairMode::kNone, /*logDiagnostics=*/false); validateState.initializeCursors(opCtx); - ASSERT_EQ(validateState.getIndexes().size(), 3); + ASSERT_EQ(validateState.getIndexIdents().size(), 3); } // Checkpoint the index drops and recheck that the indexes are not found. @@ -326,7 +353,7 @@ TEST_F(ValidateStateDiskTest, CursorsAreNotOpenedAgainstCheckpointedIndexesThatW CollectionValidation::RepairMode::kNone, /*logDiagnostics=*/false); validateState.initializeCursors(opCtx); - ASSERT_EQ(validateState.getIndexes().size(), 3); + ASSERT_EQ(validateState.getIndexIdents().size(), 3); } } // namespace diff --git a/src/mongo/db/catalog/views_for_database.cpp b/src/mongo/db/catalog/views_for_database.cpp index ac17cf47838eb..c04c22358c32b 100644 --- a/src/mongo/db/catalog/views_for_database.cpp +++ b/src/mongo/db/catalog/views_for_database.cpp @@ -30,12 +30,46 @@ #include "views_for_database.h" +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/audit.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/db/views/util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -45,12 +79,9 @@ namespace { RecordId find(OperationContext* opCtx, const CollectionPtr& systemViews, const NamespaceString& viewName) { - return systemViews->getIndexCatalog() - ->findIdIndex(opCtx) - ->getEntry() - ->accessMethod() - ->asSortedData() - ->findSingle(opCtx, systemViews, BSON("_id" << NamespaceStringUtil::serialize(viewName))); + const IndexCatalogEntry* entry = systemViews->getIndexCatalog()->findIdIndex(opCtx)->getEntry(); + return entry->accessMethod()->asSortedData()->findSingle( + opCtx, systemViews, entry, BSON("_id" << NamespaceStringUtil::serialize(viewName))); } StatusWith> parseCollator(OperationContext* opCtx, @@ -207,17 +238,6 @@ Status ViewsForDatabase::update(OperationContext* opCtx, Status ViewsForDatabase::_upsertIntoMap(OperationContext* opCtx, std::shared_ptr view) { - // Cannot have a secondary view on a system.buckets collection, only the time-series - // collection view. - if (view->viewOn().isTimeseriesBucketsCollection() && - view->name() != view->viewOn().getTimeseriesViewNamespace()) { - return { - ErrorCodes::InvalidNamespace, - "Invalid view: cannot define a view over a system.buckets namespace except by " - "creating a time-series collection", - }; - } - if (!view->name().isOnInternalDb() && !view->name().isSystem()) { if (view->timeseries()) { _stats.userTimeseries += 1; @@ -246,7 +266,7 @@ Status ViewsForDatabase::_upsertIntoGraph(OperationContext* opCtx, if (needsValidation) { uassertStatusOKWithContext(pipelineStatus.getStatus(), str::stream() << "Invalid pipeline for view " - << viewDef.name().ns()); + << viewDef.name().toStringForErrorMsg()); } return pipelineStatus.getStatus(); } @@ -332,6 +352,7 @@ Status ViewsForDatabase::_upsertIntoCatalog(OperationContext* opCtx, oldView, viewObj, collection_internal::kUpdateAllIndexes, + nullptr /* indexesAffected */, &CurOp::get(opCtx)->debug(), &args); } @@ -369,7 +390,7 @@ void ViewsForDatabase::clear(OperationContext* opCtx) { for (auto&& [name, view] : _viewMap) { audit::logDropView(opCtx->getClient(), view->name(), - view->viewOn().ns(), + NamespaceStringUtil::serialize(view->viewOn()), view->pipeline(), ErrorCodes::OK); } @@ -390,9 +411,9 @@ Status ViewsForDatabase::_validateCollation(OperationContext* opCtx, !CollatorInterface::collatorsMatch(view.defaultCollator(), otherView->defaultCollator())) { return {ErrorCodes::OptionNotSupportedOnView, - str::stream() << "View " << view.name().toString() + str::stream() << "View " << view.name().toStringForErrorMsg() << " has conflicting collation with view " - << otherView->name().toString()}; + << otherView->name().toStringForErrorMsg()}; } } diff --git a/src/mongo/db/catalog/views_for_database.h b/src/mongo/db/catalog/views_for_database.h index de9b40978dabe..f411189aef8ec 100644 --- a/src/mongo/db/catalog/views_for_database.h +++ b/src/mongo/db/catalog/views_for_database.h @@ -30,12 +30,21 @@ #pragma once #include +#include +#include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/views/view.h" #include "mongo/db/views/view_graph.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/catalog/virtual_collection_impl.cpp b/src/mongo/db/catalog/virtual_collection_impl.cpp index 1d6c8873482dd..03336be59a9f9 100644 --- a/src/mongo/db/catalog/virtual_collection_impl.cpp +++ b/src/mongo/db/catalog/virtual_collection_impl.cpp @@ -29,11 +29,14 @@ #include "mongo/db/catalog/virtual_collection_impl.h" +#include + #include "mongo/db/catalog/collection_impl.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/index_catalog_impl.h" #include "mongo/db/operation_context.h" #include "mongo/db/storage/external_record_store.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -57,6 +60,6 @@ std::shared_ptr VirtualCollectionImpl::make(OperationContext* opCtx, const CollectionOptions& options, const VirtualCollectionOptions& vopts) { return std::make_shared( - opCtx, nss, options, std::make_unique(nss.ns(), options.uuid, vopts)); + opCtx, nss, options, std::make_unique(nss, options.uuid, vopts)); } } // namespace mongo diff --git a/src/mongo/db/catalog/virtual_collection_impl.h b/src/mongo/db/catalog/virtual_collection_impl.h index 82ec6745a7a7e..c6557de13d896 100644 --- a/src/mongo/db/catalog/virtual_collection_impl.h +++ b/src/mongo/db/catalog/virtual_collection_impl.h @@ -29,12 +29,51 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/capped_visibility.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/collection_options_gen.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/virtual_collection_options.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/change_stream_pre_and_post_images_options_gen.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/bson_collection_catalog_entry.h" +#include "mongo/db/storage/durable_catalog_entry.h" #include "mongo/db/storage/external_record_store.h" +#include "mongo/db/storage/ident.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/snapshot.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" +#include "mongo/util/version/releases.h" namespace mongo { class VirtualCollectionImpl final : public Collection { @@ -74,8 +113,6 @@ class VirtualCollectionImpl final : public Collection { return Status(ErrorCodes::UnknownError, "unknown"); }; - void setCommitted(bool) {} - bool isInitialized() const final { return true; }; @@ -241,6 +278,16 @@ class VirtualCollectionImpl final : public Collection { return false; } + bool timeseriesBucketingParametersMayHaveChanged() const final { + unimplementedTasserted(); + return false; + } + + void setTimeseriesBucketingParametersChanged(OperationContext* opCtx, + boost::optional value) final { + unimplementedTasserted(); + } + bool getRequiresTimeseriesExtendedRangeSupport() const final { // A virtual collection is never a time-series collection, so it never requires // extended-range support. @@ -454,16 +501,10 @@ class VirtualCollectionImpl final : public Collection { return 0; } - boost::optional getMinimumVisibleSnapshot() const final { - return boost::none; - } - boost::optional getMinimumValidSnapshot() const final { return boost::none; } - void setMinimumVisibleSnapshot(Timestamp newMinimumVisibleSnapshot) final {} - void setMinimumValidSnapshot(Timestamp newMinimumValidSnapshot) final {} boost::optional getTimeseriesOptions() const final { diff --git a/src/mongo/db/catalog_raii.cpp b/src/mongo/db/catalog_raii.cpp index bf8cdc581b84e..875c411d26462 100644 --- a/src/mongo/db/catalog_raii.cpp +++ b/src/mongo/db/catalog_raii.cpp @@ -29,6 +29,18 @@ #include "mongo/db/catalog_raii.h" +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/catalog_helper.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_uuid_mismatch.h" @@ -39,19 +51,20 @@ #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/scoped_collection_metadata.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/shard_role.h" -#include "mongo/db/storage/storage_parameters_gen.h" -#include "mongo/logv2/log.h" -#include "mongo/util/fail_point.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage namespace mongo { namespace { -MONGO_FAIL_POINT_DEFINE(hangBeforeAutoGetCollectionLockFreeShardedStateAccess); - /** * Performs some sanity checks on the collection and database. */ @@ -62,13 +75,15 @@ void verifyDbAndCollection(OperationContext* opCtx, const Collection* coll, Database* db, bool verifyWriteEligible) { - invariant(!nsOrUUID.uuid() || coll, - str::stream() << "Collection for " << resolvedNss.ns() - << " disappeared after successfully resolving " << nsOrUUID.toString()); + invariant(!nsOrUUID.isUUID() || coll, + str::stream() << "Collection for " << resolvedNss.toStringForErrorMsg() + << " disappeared after successfully resolving " + << nsOrUUID.toStringForErrorMsg()); - invariant(!nsOrUUID.uuid() || db, - str::stream() << "Database for " << resolvedNss.ns() - << " disappeared after successfully resolving " << nsOrUUID.toString()); + invariant(!nsOrUUID.isUUID() || db, + str::stream() << "Database for " << resolvedNss.toStringForErrorMsg() + << " disappeared after successfully resolving " + << nsOrUUID.toStringForErrorMsg()); // In most cases we expect modifications for system.views to upgrade MODE_IX to MODE_X before // taking the lock. One exception is a query by UUID of system.views in a transaction. Usual @@ -84,21 +99,20 @@ void verifyDbAndCollection(OperationContext* opCtx, } // Verify that we are using the latest instance if we intend to perform writes. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe() && - verifyWriteEligible) { + if (verifyWriteEligible) { auto latest = CollectionCatalog::latest(opCtx); - if (!latest->containsCollection(opCtx, coll)) { - throwWriteConflictException(str::stream() - << "Unable to write to collection '" << coll->ns() - << "' due to catalog changes; please " - "retry the operation"); + if (!latest->isLatestCollection(opCtx, coll)) { + throwWriteConflictException(str::stream() << "Unable to write to collection '" + << coll->ns().toStringForErrorMsg() + << "' due to catalog changes; please " + "retry the operation"); } if (opCtx->recoveryUnit()->isActive()) { const auto mySnapshot = opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx); if (mySnapshot && *mySnapshot < coll->getMinimumValidSnapshot()) { throwWriteConflictException(str::stream() - << "Unable to write to collection '" << coll->ns() + << "Unable to write to collection '" + << coll->ns().toStringForErrorMsg() << "' due to snapshot timestamp " << *mySnapshot << " being older than collection minimum " << *coll->getMinimumValidSnapshot() @@ -106,25 +120,6 @@ void verifyDbAndCollection(OperationContext* opCtx, } } } - - // If we are in a transaction, we cannot yield and wait when there are pending catalog - // changes. Instead, we must return an error in such situations. We ignore this restriction - // for the oplog, since it never has pending catalog changes. - if (opCtx->inMultiDocumentTransaction() && resolvedNss != NamespaceString::kRsOplogNamespace) { - if (auto minSnapshot = coll->getMinimumVisibleSnapshot()) { - auto mySnapshot = - opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx).get_value_or( - opCtx->recoveryUnit()->getCatalogConflictingTimestamp()); - - uassert( - ErrorCodes::SnapshotUnavailable, - str::stream() << "Unable to read from a snapshot due to pending collection catalog " - "changes; please retry the operation. Snapshot timestamp is " - << mySnapshot.toString() << ". Collection minimum is " - << minSnapshot->toString(), - mySnapshot.isNull() || mySnapshot >= minSnapshot.value()); - } - } } } // namespace @@ -132,8 +127,9 @@ void verifyDbAndCollection(OperationContext* opCtx, AutoGetDb::AutoGetDb(OperationContext* opCtx, const DatabaseName& dbName, LockMode mode, + boost::optional tenantLockMode, Date_t deadline) - : AutoGetDb(opCtx, dbName, mode, deadline, [] { + : AutoGetDb(opCtx, dbName, mode, tenantLockMode, deadline, [] { Lock::GlobalLockSkipOptions options; return options; }()) {} @@ -141,9 +137,12 @@ AutoGetDb::AutoGetDb(OperationContext* opCtx, AutoGetDb::AutoGetDb(OperationContext* opCtx, const DatabaseName& dbName, LockMode mode, + boost::optional tenantLockMode, Date_t deadline, Lock::DBLockSkipOptions options) - : _dbName(dbName), _dbLock(opCtx, dbName, mode, deadline, std::move(options)), _db([&] { + : _dbName(dbName), + _dbLock(opCtx, dbName, mode, deadline, std::move(options), tenantLockMode), + _db([&] { auto databaseHolder = DatabaseHolder::get(opCtx); return databaseHolder->getDb(opCtx, dbName); }()) { @@ -152,20 +151,15 @@ AutoGetDb::AutoGetDb(OperationContext* opCtx, } bool AutoGetDb::canSkipRSTLLock(const NamespaceStringOrUUID& nsOrUUID) { - const auto& maybeNss = nsOrUUID.nss(); - - if (maybeNss) { - const auto& nss = *maybeNss; - return repl::canCollectionSkipRSTLLockAcquisition(nss); + if (nsOrUUID.isNamespaceString()) { + return repl::canCollectionSkipRSTLLockAcquisition(nsOrUUID.nss()); } return false; } bool AutoGetDb::canSkipFlowControlTicket(const NamespaceStringOrUUID& nsOrUUID) { - const auto& maybeNss = nsOrUUID.nss(); - - if (maybeNss) { - const auto& nss = *maybeNss; + if (nsOrUUID.isNamespaceString()) { + const auto& nss = nsOrUUID.nss(); bool notReplicated = !nss.isReplicated(); // TODO: Improve comment // @@ -194,14 +188,20 @@ AutoGetDb AutoGetDb::createForAutoGetCollection( dbLockOptions.skipRSTLLock = canSkipRSTLLock(nsOrUUID); dbLockOptions.skipFlowControlTicket = canSkipFlowControlTicket(nsOrUUID); - // TODO SERVER-67817 Use NamespaceStringOrUUID::db() instead. return AutoGetDb(opCtx, - nsOrUUID.nss() ? nsOrUUID.nss()->dbName() : *nsOrUUID.dbName(), + nsOrUUID.dbName(), isSharedLockMode(modeColl) ? MODE_IS : MODE_IX, + boost::none /* tenantLockMode */, deadline, std::move(dbLockOptions)); } +AutoGetDb::AutoGetDb(OperationContext* opCtx, + const DatabaseName& dbName, + LockMode mode, + Date_t deadline) + : AutoGetDb(opCtx, dbName, mode, boost::none, deadline) {} + Database* AutoGetDb::ensureDbExists(OperationContext* opCtx) { if (_db) { return _db; @@ -229,8 +229,8 @@ CollectionNamespaceOrUUIDLock::CollectionNamespaceOrUUIDLock(OperationContext* o LockMode mode, Date_t deadline) : _lock([opCtx, &nsOrUUID, mode, deadline] { - if (auto& ns = nsOrUUID.nss()) { - return Lock::CollectionLock{opCtx, *ns, mode, deadline}; + if (nsOrUUID.isNamespaceString()) { + return Lock::CollectionLock{opCtx, nsOrUUID.nss(), mode, deadline}; } auto resolveNs = [opCtx, &nsOrUUID] { @@ -286,14 +286,19 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx, // exclusive lock. if (modeColl == MODE_X) { invariant(!opCtx->recoveryUnit()->isActive(), - str::stream() << "Snapshot opened before acquiring X lock for " << nsOrUUID); + str::stream() << "Snapshot opened before acquiring X lock for " + << toStringForLogging(nsOrUUID)); } // Acquire the collection locks. If there's only one lock, then it can simply be taken. If // there are many, however, the locks must be taken in _ascending_ ResourceId order to avoid // deadlocks across threads. if (secondaryNssOrUUIDs.empty()) { - uassertStatusOK(nsOrUUID.isNssValid()); + uassert(ErrorCodes::InvalidNamespace, + fmt::format("Namespace {} is not a valid collection name", + nsOrUUID.toStringForErrorMsg()), + nsOrUUID.isUUID() || (nsOrUUID.isNamespaceString() && nsOrUUID.nss().isValid())); + _collLocks.emplace_back(opCtx, nsOrUUID, modeColl, deadline); } else { catalog_helper::acquireCollectionLocksInResourceIdOrder( @@ -321,21 +326,19 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx, _autoDb.refreshDbReferenceIfNull(opCtx); } - checkCollectionUUIDMismatch(opCtx, _resolvedNss, _coll, options._expectedUUID); verifyDbAndCollection( opCtx, modeColl, nsOrUUID, _resolvedNss, _coll.get(), _autoDb.getDb(), verifyWriteEligible); for (auto& secondaryNssOrUUID : secondaryNssOrUUIDs) { auto secondaryResolvedNss = catalog->resolveNamespaceStringOrUUID(opCtx, secondaryNssOrUUID); auto secondaryColl = catalog->lookupCollectionByNamespace(opCtx, secondaryResolvedNss); - auto secondaryDbName = secondaryNssOrUUID.dbName() ? secondaryNssOrUUID.dbName() - : secondaryNssOrUUID.nss()->dbName(); + auto secondaryDbName = secondaryNssOrUUID.dbName(); verifyDbAndCollection(opCtx, MODE_IS, secondaryNssOrUUID, secondaryResolvedNss, secondaryColl, - databaseHolder->getDb(opCtx, *secondaryDbName), + databaseHolder->getDb(opCtx, secondaryDbName), verifyWriteEligible); } @@ -354,6 +357,8 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx, _coll.setShardKeyPattern(collDesc.getKeyPattern()); } + checkCollectionUUIDMismatch(opCtx, *catalog, _resolvedNss, _coll, options._expectedUUID); + return; } @@ -365,13 +370,13 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx, // namespace were a view, the collection UUID mismatch check would have failed above. if ((_view = catalog->lookupView(opCtx, _resolvedNss))) { uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "Taking " << _resolvedNss.ns() + str::stream() << "Taking " << _resolvedNss.toStringForErrorMsg() << " lock for timeseries is not allowed", viewMode == auto_get_collection::ViewMode::kViewsPermitted || !_view->timeseries()); uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "Namespace " << _resolvedNss.ns() + str::stream() << "Namespace " << _resolvedNss.toStringForErrorMsg() << " is a view, not a collection", viewMode == auto_get_collection::ViewMode::kViewsPermitted); @@ -379,11 +384,10 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx, *receivedShardVersion, ShardVersion::UNSHARDED() /* wantedVersion */, ShardingState::get(opCtx)->shardId()), - str::stream() << "Namespace " << _resolvedNss + str::stream() << "Namespace " << _resolvedNss.toStringForErrorMsg() << " is a view therefore the shard " << "version attached to the request must be unset or UNSHARDED", !receivedShardVersion || *receivedShardVersion == ShardVersion::UNSHARDED()); - return; } } @@ -404,10 +408,13 @@ AutoGetCollection::AutoGetCollection(OperationContext* opCtx, *receivedShardVersion, boost::none /* wantedVersion */, ShardingState::get(opCtx)->shardId()), - str::stream() << "No metadata for namespace " << _resolvedNss << " therefore the shard " + str::stream() << "No metadata for namespace " << _resolvedNss.toStringForErrorMsg() + << " therefore the shard " << "version attached to the request must be unset, UNSHARDED or IGNORED", !receivedShardVersion || *receivedShardVersion == ShardVersion::UNSHARDED() || ShardVersion::isPlacementVersionIgnored(*receivedShardVersion)); + + checkCollectionUUIDMismatch(opCtx, *catalog, _resolvedNss, _coll, options._expectedUUID); } Collection* AutoGetCollection::getWritableCollection(OperationContext* opCtx) { @@ -438,107 +445,6 @@ Collection* AutoGetCollection::getWritableCollection(OperationContext* opCtx) { return _writableColl; } -AutoGetCollectionLockFree::AutoGetCollectionLockFree(OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - RestoreFromYieldFn restoreFromYield, - Options options) - : _lockFreeReadsBlock(opCtx), - _globalLock(opCtx, MODE_IS, options._deadline, Lock::InterruptBehavior::kThrow, [] { - Lock::GlobalLockSkipOptions options; - options.skipRSTLLock = true; - return options; - }()) { - - auto& viewMode = options._viewMode; - - // Wait for a configured amount of time after acquiring locks if the failpoint is enabled - catalog_helper::setAutoGetCollectionWaitFailpointExecute( - [&](const BSONObj& data) { sleepFor(Milliseconds(data["waitForMillis"].numberInt())); }); - - auto catalog = CollectionCatalog::get(opCtx); - _resolvedNss = catalog->resolveNamespaceStringOrUUID(opCtx, nsOrUUID); - _collection = catalog->lookupCollectionByNamespaceForRead_DONT_USE(opCtx, _resolvedNss); - - // When we restore from yield on this CollectionPtr we will update _collection above and use its - // new pointer in the CollectionPtr - _collectionPtr = CollectionPtr(_collection.get()); - _collectionPtr.makeYieldable( - opCtx, - [this, restoreFromYield = std::move(restoreFromYield)](OperationContext* opCtx, UUID uuid) { - restoreFromYield(_collection, opCtx, uuid); - return _collection.get(); - }); - - // Check that the sharding database version matches our read. - // Note: this must always be checked, regardless of whether the collection exists, so that the - // dbVersion of this node or the caller gets updated quickly in case either is stale. - DatabaseShardingState::assertMatchingDbVersion(opCtx, _resolvedNss.db()); - - checkCollectionUUIDMismatch(opCtx, _resolvedNss, _collectionPtr, options._expectedUUID); - - hangBeforeAutoGetCollectionLockFreeShardedStateAccess.executeIf( - [&](auto&) { hangBeforeAutoGetCollectionLockFreeShardedStateAccess.pauseWhileSet(opCtx); }, - [&](const BSONObj& data) { - return opCtx->getLogicalSessionId() && - opCtx->getLogicalSessionId()->getId() == UUID::fromCDR(data["lsid"].uuid()); - }); - - if (_collection) { - // Fetch and store the sharding collection description data needed for use during the - // operation. The shardVersion will be checked later if the shard filtering metadata is - // fetched, ensuring both that the collection description info fetched here and the routing - // table are consistent with the read request's shardVersion. - auto scopedCss = CollectionShardingState::acquire(opCtx, _collection->ns()); - auto collDesc = scopedCss->getCollectionDescription(opCtx); - if (collDesc.isSharded()) { - _collectionPtr.setShardKeyPattern(collDesc.getKeyPattern()); - } - - // If the collection exists, there is no need to check for views. - return; - } - - invariant(!options._expectedUUID); - _view = catalog->lookupView(opCtx, _resolvedNss); - uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "Taking " << _resolvedNss.ns() - << " lock for timeseries is not allowed", - !_view || viewMode == auto_get_collection::ViewMode::kViewsPermitted || - !_view->timeseries()); - uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "Namespace " << _resolvedNss.ns() << " is a view, not a collection", - !_view || viewMode == auto_get_collection::ViewMode::kViewsPermitted); - if (_view) { - // We are about to succeed setup as a view. No LockFree state was setup so do not mark the - // OperationContext as LFR. - _lockFreeReadsBlock.reset(); - } -} - -AutoGetCollectionMaybeLockFree::AutoGetCollectionMaybeLockFree( - OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - LockMode modeColl, - auto_get_collection::ViewMode viewMode, - Date_t deadline) { - if (opCtx->isLockFreeReadsOp()) { - _autoGetLockFree.emplace( - opCtx, - nsOrUUID, - [](std::shared_ptr& collection, OperationContext* opCtx, UUID uuid) { - LOGV2_FATAL(5342700, - "This is a nested lock helper and there was an attempt to " - "yield locks, which should be impossible"); - }, - AutoGetCollectionLockFree::Options{}.viewMode(viewMode).deadline(deadline)); - } else { - _autoGet.emplace(opCtx, - nsOrUUID, - modeColl, - AutoGetCollection::Options{}.viewMode(viewMode).deadline(deadline)); - } -} - struct CollectionWriter::SharedImpl { SharedImpl(CollectionWriter* parent) : _parent(parent) {} @@ -546,8 +452,7 @@ struct CollectionWriter::SharedImpl { std::function _writableCollectionInitializer; }; -CollectionWriter::CollectionWriter(OperationContext* opCtx, - ScopedCollectionAcquisition* acquisition) +CollectionWriter::CollectionWriter(OperationContext* opCtx, CollectionAcquisition* acquisition) : _acquisition(acquisition), _collection(&_storedCollection), _managed(true), @@ -558,8 +463,9 @@ CollectionWriter::CollectionWriter(OperationContext* opCtx, _storedCollection.makeYieldable(opCtx, LockedCollectionYieldRestore(opCtx, _storedCollection)); _sharedImpl->_writableCollectionInitializer = [this, opCtx]() mutable { - invariant(!_fence); - _fence = std::make_unique(opCtx, _acquisition); + if (!_fence) { + _fence = std::make_unique(opCtx, _acquisition); + } return CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForMetadataWrite( opCtx, _acquisition->nss()); @@ -638,7 +544,6 @@ Collection* CollectionWriter::getWritableCollection(OperationContext* opCtx) { [shared = _sharedImpl](OperationContext* opCtx, boost::optional) { if (shared->_parent) { shared->_parent->_writableCollection = nullptr; - shared->_parent->_fence.reset(); // Make the stored collection yieldable again as we now operate with the // same instance as is in the catalog. @@ -650,7 +555,6 @@ Collection* CollectionWriter::getWritableCollection(OperationContext* opCtx) { OperationContext* opCtx) mutable { if (shared->_parent) { shared->_parent->_writableCollection = nullptr; - shared->_parent->_fence.reset(); // Restore stored collection to its previous state. The rollback // instance is already yieldable. @@ -714,39 +618,45 @@ AutoGetOplog::AutoGetOplog(OperationContext* opCtx, OplogAccessMode mode, Date_t _oplog.makeYieldable(opCtx, LockedCollectionYieldRestore(opCtx, _oplog)); } - AutoGetChangeCollection::AutoGetChangeCollection(OperationContext* opCtx, AutoGetChangeCollection::AccessMode mode, - boost::optional tenantId, + const TenantId& tenantId, Date_t deadline) { - if (mode == AccessMode::kWriteInOplogContext) { - // The global lock must already be held. - invariant(opCtx->lockState()->isWriteLocked()); - } - - if (mode != AccessMode::kRead) { - // TODO SERVER-66715 avoid taking 'AutoGetCollection' and remove - // 'AllowLockAcquisitionOnTimestampedUnitOfWork'. - _allowLockAcquisitionTsWuow.emplace(opCtx->lockState()); + const auto changeCollectionNamespaceString = NamespaceString::makeChangeCollectionNSS(tenantId); + if (AccessMode::kRead == mode || AccessMode::kWrite == mode) { + // Treat this as a regular AutoGetCollection. + _coll.emplace(opCtx, + changeCollectionNamespaceString, + mode == AccessMode::kRead ? MODE_IS : MODE_IX, + AutoGetCollection::Options{}.deadline(deadline)); + return; } - - _coll.emplace(opCtx, - NamespaceString::makeChangeCollectionNSS(tenantId), - mode == AccessMode::kRead ? MODE_IS : MODE_IX, - AutoGetCollection::Options{}.deadline(deadline)); + tassert(6671506, "Invalid lock mode", AccessMode::kWriteInOplogContext == mode); + + // When writing to the change collection as part of normal operation, we avoid taking any new + // locks. The caller must already have the tenant lock that protects the tenant specific change + // stream collection from being dropped. That's sufficient for acquiring a raw collection + // pointer. + tassert(6671500, + str::stream() << "Lock not held in IX mode for the tenant " << tenantId, + opCtx->lockState()->isLockHeldForMode( + ResourceId(ResourceType::RESOURCE_TENANT, tenantId), LockMode::MODE_IX)); + auto changeCollectionPtr = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace( + opCtx, changeCollectionNamespaceString); + _changeCollection = CollectionPtr(changeCollectionPtr); + _changeCollection.makeYieldable(opCtx, LockedCollectionYieldRestore(opCtx, _changeCollection)); } const Collection* AutoGetChangeCollection::operator->() const { - return _coll ? _coll->getCollection().get() : nullptr; + return (**this).get(); } const CollectionPtr& AutoGetChangeCollection::operator*() const { - return _coll->getCollection(); + return (_coll) ? *(*_coll) : _changeCollection; } AutoGetChangeCollection::operator bool() const { - return _coll && _coll->getCollection().get(); + return static_cast(**this); } - } // namespace mongo diff --git a/src/mongo/db/catalog_raii.h b/src/mongo/db/catalog_raii.h index b03ca225f54a1..cc6ded9325715 100644 --- a/src/mongo/db/catalog_raii.h +++ b/src/mongo/db/catalog_raii.h @@ -29,14 +29,31 @@ #pragma once +#include +#include +#include +#include +#include +#include + #include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/database.h" #include "mongo/db/catalog/local_oplog_info.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/views/view.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace auto_get_collection { @@ -94,13 +111,37 @@ class AutoGetDb { AutoGetDb(OperationContext* opCtx, const DatabaseName& dbName, LockMode mode, + boost::optional tenantLockMode, Date_t deadline, Lock::DBLockSkipOptions options); public: + /** + * Acquires a lock on the specified database 'dbName' in the requested 'mode'. + * + * If the database belongs to a tenant, then acquires a tenant lock before the database lock. + * For 'mode' MODE_IS or MODE_S acquires tenant lock in intent-shared (IS) mode, otherwise, + * acquires a tenant lock in intent-exclusive (IX) mode. + */ + AutoGetDb(OperationContext* opCtx, + const DatabaseName& dbName, + LockMode mode, + Date_t deadline = Date_t::max()); + + /** + * Acquires a lock on the specified database 'dbName' in the requested 'mode'. + * + * If the database belongs to a tenant, then acquires a tenant lock before the database lock. + * For 'mode' MODE_IS or MODE_S acquires tenant lock in intent-shared (IS) mode, otherwise, + * acquires a tenant lock in intent-exclusive (IX) mode. A different, stronger tenant lock mode + * to acquire can be specified with 'tenantLockMode' parameter. Passing boost::none for the + * tenant lock mode does not skip the tenant lock, but indicates that the tenant lock in default + * mode should be acquired. + */ AutoGetDb(OperationContext* opCtx, const DatabaseName& dbName, LockMode mode, + boost::optional tenantLockMode, Date_t deadline = Date_t::max()); AutoGetDb(AutoGetDb&&) = default; @@ -303,128 +344,8 @@ class AutoGetCollection { Collection* _writableColl = nullptr; }; -/** - * RAII-style class that acquires the global MODE_IS lock. This class should only be used for reads. - * - * NOTE: Throws NamespaceNotFound if the collection UUID cannot be resolved to a nss. - * - * The collection references returned by this class will no longer be safe to retain after this - * object goes out of scope. This object ensures the continued existence of a Collection reference, - * if the collection exists when this object is instantiated. - * - * NOTE: this class is not safe to instantiate outside of AutoGetCollectionForReadLockFree. For - * example, it does not perform database or collection level shard version checks; nor does it - * establish a consistent storage snapshot with which to read. - */ -class AutoGetCollectionLockFree { - AutoGetCollectionLockFree(const AutoGetCollectionLockFree&) = delete; - AutoGetCollectionLockFree& operator=(const AutoGetCollectionLockFree&) = delete; - -public: - /** - * Function used to customize restore after yield behavior - */ - using RestoreFromYieldFn = - std::function&, OperationContext*, UUID)>; - - using Options = auto_get_collection::Options; - - /** - * Used by AutoGetCollectionForReadLockFree where it provides implementation for restore after - * yield. - */ - AutoGetCollectionLockFree(OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - RestoreFromYieldFn restoreFromYield, - Options options = {}); - - explicit operator bool() const { - // Use the CollectionPtr because it is updated if it yields whereas _collection is not until - // restore. - return static_cast(_collectionPtr); - } - - /** - * AutoGetCollectionLockFree can be used as a Collection pointer with the -> operator. - */ - const Collection* operator->() const { - return getCollection().get(); - } - - const CollectionPtr& operator*() const { - return getCollection(); - } - - /** - * Returns nullptr if the collection didn't exist. - * - * Deprecated in favor of the new ->(), *() and bool() accessors above! - */ - const CollectionPtr& getCollection() const { - return _collectionPtr; - } - - /** - * Returns nullptr if the view didn't exist. - */ - const ViewDefinition* getView() const { - return _view.get(); - } - - /** - * Returns the resolved namespace of the collection or view. - */ - const NamespaceString& getNss() const { - return _resolvedNss; - } - -private: - // Indicate that we are lock-free on code paths that can run either lock-free or locked for - // different kinds of operations. Note: this class member is currently declared first so that it - // destructs last, as a safety measure, but not because it is currently depended upon behavior. - boost::optional _lockFreeReadsBlock; - - Lock::GlobalLock _globalLock; - - // If the object was instantiated with a UUID, contains the resolved namespace, otherwise it is - // the same as the input namespace string - NamespaceString _resolvedNss; - - // The Collection shared_ptr will keep the Collection instance alive even if it is removed from - // the CollectionCatalog while this lock-free operation runs. - std::shared_ptr _collection; - - // The CollectionPtr is the access point to the Collection instance for callers. - CollectionPtr _collectionPtr; - - std::shared_ptr _view; -}; - -/** - * This is a nested lock helper. If a higher level operation is running a lock-free read, then this - * helper will follow suite and instantiate a AutoGetCollectionLockFree. Otherwise, it will - * instantiate a regular AutoGetCollection helper. - */ -class AutoGetCollectionMaybeLockFree { - AutoGetCollectionMaybeLockFree(const AutoGetCollectionMaybeLockFree&) = delete; - AutoGetCollectionMaybeLockFree& operator=(const AutoGetCollectionMaybeLockFree&) = delete; - -public: - /** - * Decides whether to instantiate a lock-free or locked helper based on whether a lock-free - * operation is set on the opCtx. - */ - AutoGetCollectionMaybeLockFree( - OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - LockMode modeColl, - auto_get_collection::ViewMode viewMode = auto_get_collection::ViewMode::kViewsForbidden, - Date_t deadline = Date_t::max()); - -private: - boost::optional _autoGet; - boost::optional _autoGetLockFree; -}; +class CollectionAcquisition; +class ScopedLocalCatalogWriteFence; /** * RAII-style class to handle the lifetime of writable Collections. @@ -432,22 +353,44 @@ class AutoGetCollectionMaybeLockFree { * AutoGetCollection. This class can serve as an adaptor to unify different methods of acquiring a * writable collection. * - * It is safe to re-use an instance for multiple WriteUnitOfWorks or to destroy it before the active - * WriteUnitOfWork finishes. + * It is safe to re-use an instance for multiple WriteUnitOfWorks. It is not safe to destroy it + * before the active WriteUnitOfWork finishes. */ -class ScopedCollectionAcquisition; -class ScopedLocalCatalogWriteFence; - class CollectionWriter final { public: - // This constructor indicates to the shard role subsystem that the subsequent code enteres into + // This constructor indicates to the shard role subsystem that the subsequent code enters into // local DDL land and that the content of the local collection should not be trusted until it // goes out of scope. // - // See the comments on ScopedCollectionAcquisition for more details. + // On destruction, if `getWritableCollection` been called during the object lifetime, the + // `acquisition` will be advanced to reflect the local catalog changes. It is important that + // when this destructor is called, the WUOW under which the catalog changes have been performed + // has already been commited or rollbacked. If it hasn't and the WUOW later rollbacks, the + // acquisition is left in an invalid state and must not be used. + // + // Example usage pattern: + // writeConflictRetry { + // auto coll = acquireCollection(...); + // CollectionWriter collectionWriter(opCtx, &coll); + // WriteUnitOfWork wuow(); + // collectionWriter.getWritableCollection().xxxx(); + // wouw.commit(); + // } + // + // Example usage pattern when the acquisition is held higher up by the caller: + // auto coll = acquireCollection(...); + // ... + // writeConflictRetry { + // // It is important that ~CollectionWriter will be executed after the ~WriteUnitOfWork + // // commits or rollbacks. + // CollectionWriter collectionWriter(opCtx, &coll); + // WriteUnitOfWork wuow(); + // collectionWriter.getWritableCollection().xxxx(); + // wouw.commit(); + // } // // TODO (SERVER-73766): Only this constructor should remain in use - CollectionWriter(OperationContext* opCtx, ScopedCollectionAcquisition* acquisition); + CollectionWriter(OperationContext* opCtx, CollectionAcquisition* acquisition); // Gets the collection from the catalog for the provided uuid CollectionWriter(OperationContext* opCtx, const UUID& uuid); @@ -490,8 +433,8 @@ class CollectionWriter final { private: // This group of values is only operated on for code paths that go through the - // `ScopedCollectionAcquisition` constructor. - ScopedCollectionAcquisition* _acquisition = nullptr; + // `CollectionAcquisition` constructor. + CollectionAcquisition* _acquisition = nullptr; std::unique_ptr _fence; // If this class is instantiated with the constructors that take UUID or nss we need somewhere @@ -585,12 +528,10 @@ class AutoGetOplog { * A RAII-style class to acquire lock to a particular tenant's change collection. * * A change collection can be accessed in the following modes: - * kWriteInOplogContext - perform writes to the change collection by taking the IX lock on a - * tenant's change collection. The change collection is written along with - * the oplog in the same 'WriteUnitOfWork' and assumes that the global IX - * lock is already held. - * kWrite - takes the IX lock on a tenant's change collection to perform any writes. - * kRead - takes the IS lock on a tenant's change collection to perform any reads. + * kWriteInOplogContext - assumes that the tenant IX lock has been pre-acquired. The user can + * perform reads and writes to the change collection. + * kWrite - behaves the same as 'AutoGetCollection::AutoGetCollection()' with lock mode MODE_IX. + * kRead - behaves the same as 'AutoGetCollection::AutoGetCollection()' with lock mode MODE_IS. */ class AutoGetChangeCollection { public: @@ -598,7 +539,7 @@ class AutoGetChangeCollection { AutoGetChangeCollection(OperationContext* opCtx, AccessMode mode, - boost::optional tenantId, + const TenantId& tenantId, Date_t deadline = Date_t::max()); AutoGetChangeCollection(const AutoGetChangeCollection&) = delete; @@ -609,9 +550,10 @@ class AutoGetChangeCollection { explicit operator bool() const; private: + // Used when the 'kWrite' or 'kRead' access mode is used. boost::optional _coll; - - boost::optional _allowLockAcquisitionTsWuow; + // Used when the 'kWriteInOplogContext' access mode is used. + CollectionPtr _changeCollection; }; } // namespace mongo diff --git a/src/mongo/db/catalog_raii_test.cpp b/src/mongo/db/catalog_raii_test.cpp index 7ed2fc9f3b50e..524c053aeeb52 100644 --- a/src/mongo/db/catalog_raii_test.cpp +++ b/src/mongo/db/catalog_raii_test.cpp @@ -27,15 +27,32 @@ * it in the license file. */ +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/database_holder_mock.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/concurrency/locker_impl.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/db/storage/recovery_unit_noop.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -240,8 +257,8 @@ TEST_F(CatalogRAIITestFixture, AutoGetCollectionSecondaryNamespacesSingleDb) { ASSERT(opCtx1->lockState()->isRSTLLocked()); ASSERT(opCtx1->lockState()->isReadLocked()); // Global lock check ASSERT(opCtx1->lockState()->isDbLockedForMode(nss.dbName(), MODE_IS)); - ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss1.db(), MODE_IS)); - ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss2.db(), MODE_IS)); + ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss1.dbName(), MODE_IS)); + ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss2.dbName(), MODE_IS)); ASSERT(opCtx1->lockState()->isCollectionLockedForMode(nss, MODE_IS)); ASSERT(opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNss1, MODE_IS)); ASSERT(opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNss2, MODE_IS)); @@ -249,8 +266,8 @@ TEST_F(CatalogRAIITestFixture, AutoGetCollectionSecondaryNamespacesSingleDb) { ASSERT(!opCtx1->lockState()->isRSTLExclusive()); ASSERT(!opCtx1->lockState()->isGlobalLockedRecursively()); ASSERT(!opCtx1->lockState()->isWriteLocked()); - ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb1.db(), MODE_IS)); - ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb2.db(), MODE_IS)); + ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb1.dbName(), MODE_IS)); + ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb2.dbName(), MODE_IS)); ASSERT(!opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNssOtherDb1, MODE_IS)); ASSERT(!opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNssOtherDb2, MODE_IS)); @@ -276,16 +293,16 @@ TEST_F(CatalogRAIITestFixture, AutoGetCollectionMultiNamespacesMODEIX) { ASSERT(opCtx1->lockState()->isRSTLLocked()); ASSERT(opCtx1->lockState()->isWriteLocked()); // Global lock check ASSERT(opCtx1->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); - ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss1.db(), MODE_IX)); - ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss2.db(), MODE_IX)); + ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss1.dbName(), MODE_IX)); + ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss2.dbName(), MODE_IX)); ASSERT(opCtx1->lockState()->isCollectionLockedForMode(nss, MODE_IX)); ASSERT(opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNss1, MODE_IX)); ASSERT(opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNss2, MODE_IX)); ASSERT(!opCtx1->lockState()->isRSTLExclusive()); ASSERT(!opCtx1->lockState()->isGlobalLockedRecursively()); - ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb1.db(), MODE_IX)); - ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb2.db(), MODE_IX)); + ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb1.dbName(), MODE_IX)); + ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb2.dbName(), MODE_IX)); ASSERT(!opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNssOtherDb1, MODE_IX)); ASSERT(!opCtx1->lockState()->isCollectionLockedForMode(kSecondaryNssOtherDb2, MODE_IX)); @@ -303,11 +320,11 @@ TEST_F(CatalogRAIITestFixture, AutoGetDbSecondaryNamespacesSingleDb) { ASSERT(opCtx1->lockState()->isRSTLLocked()); ASSERT(opCtx1->lockState()->isReadLocked()); // Global lock check ASSERT(opCtx1->lockState()->isDbLockedForMode(nss.dbName(), MODE_IS)); - ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss1.db(), MODE_IS)); - ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss2.db(), MODE_IS)); + ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss1.dbName(), MODE_IS)); + ASSERT(opCtx1->lockState()->isDbLockedForMode(kSecondaryNss2.dbName(), MODE_IS)); - ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb1.db(), MODE_IS)); - ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb2.db(), MODE_IS)); + ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb1.dbName(), MODE_IS)); + ASSERT(!opCtx1->lockState()->isDbLockedForMode(kSecondaryNssOtherDb2.dbName(), MODE_IS)); ASSERT(!opCtx1->lockState()->isRSTLExclusive()); ASSERT(!opCtx1->lockState()->isGlobalLockedRecursively()); ASSERT(!opCtx1->lockState()->isWriteLocked()); @@ -321,7 +338,7 @@ TEST_F(CatalogRAIITestFixture, AutoGetCollectionMultiNssCollLockDeadline) { // Take a MODE_X collection lock on kSecondaryNss1. boost::optional autoGetCollWithXLock; autoGetCollWithXLock.emplace(client1.second.get(), kSecondaryNss1, MODE_X); - ASSERT(client1.second->lockState()->isDbLockedForMode(kSecondaryNss1.db(), MODE_IX)); + ASSERT(client1.second->lockState()->isDbLockedForMode(kSecondaryNss1.dbName(), MODE_IX)); ASSERT(client1.second->lockState()->isCollectionLockedForMode(kSecondaryNss1, MODE_X)); // Now trying to take a MODE_IS lock on kSecondaryNss1 as a secondary collection should fail. @@ -361,52 +378,6 @@ TEST_F(CatalogRAIITestFixture, AutoGetCollectionMultiNssCollLockDeadline) { AutoGetCollection::Options{}.secondaryNssOrUUIDs(secondaryNamespacesConflict)); } -TEST_F(CatalogRAIITestFixture, AutoGetCollectionLockFreeGlobalLockDeadline) { - Lock::GlobalLock gLock1(client1.second.get(), MODE_X); - ASSERT(client1.second->lockState()->isLocked()); - failsWithLockTimeout( - [&] { - AutoGetCollectionLockFree coll( - client2.second.get(), - nss, - [](std::shared_ptr&, OperationContext*, UUID) {}, - AutoGetCollectionLockFree::Options{}.deadline(Date_t::now() + timeoutMs)); - }, - timeoutMs); -} - -TEST_F(CatalogRAIITestFixture, AutoGetCollectionLockFreeCompatibleWithCollectionExclusiveLock) { - Lock::DBLock dbLock1(client1.second.get(), nss.dbName(), MODE_IX); - ASSERT(client1.second->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); - Lock::CollectionLock collLock1(client1.second.get(), nss, MODE_X); - ASSERT(client1.second->lockState()->isCollectionLockedForMode(nss, MODE_X)); - - AutoGetCollectionLockFree coll( - client2.second.get(), nss, [](std::shared_ptr&, OperationContext*, UUID) { - }); - ASSERT(client2.second->lockState()->isLocked()); -} - -TEST_F(CatalogRAIITestFixture, AutoGetCollectionLockFreeCompatibleWithDatabaseExclusiveLock) { - Lock::DBLock dbLock1(client1.second.get(), nss.dbName(), MODE_X); - ASSERT(client1.second->lockState()->isDbLockedForMode(nss.dbName(), MODE_X)); - - AutoGetCollectionLockFree coll( - client2.second.get(), nss, [](std::shared_ptr&, OperationContext*, UUID) { - }); - ASSERT(client2.second->lockState()->isLocked()); -} - -TEST_F(CatalogRAIITestFixture, AutoGetCollectionLockFreeCompatibleWithRSTLExclusiveLock) { - Lock::ResourceLock rstl(client1.second.get(), resourceIdReplicationStateTransitionLock, MODE_X); - ASSERT(client1.second->lockState()->isRSTLExclusive()); - - AutoGetCollectionLockFree coll( - client2.second.get(), nss, [](std::shared_ptr&, OperationContext*, UUID) { - }); - ASSERT(client2.second->lockState()->isLocked()); -} - using ReadSource = RecoveryUnit::ReadSource; class RecoveryUnitMock : public RecoveryUnitNoop { @@ -467,8 +438,8 @@ TEST_F(CatalogRAIITestFixture, AutoGetDBDifferentTenantsConflictingNamespaces) { auto tenant1 = TenantId(OID::gen()); auto tenant2 = TenantId(OID::gen()); - DatabaseName dbName1(tenant1, db); - DatabaseName dbName2(tenant2, db); + DatabaseName dbName1 = DatabaseName::createDatabaseName_forTest(tenant1, db); + DatabaseName dbName2 = DatabaseName::createDatabaseName_forTest(tenant2, db); AutoGetDb db1(client1.second.get(), dbName1, MODE_X); AutoGetDb db2(client2.second.get(), dbName2, MODE_X); @@ -479,7 +450,7 @@ TEST_F(CatalogRAIITestFixture, AutoGetDBDifferentTenantsConflictingNamespaces) { TEST_F(CatalogRAIITestFixture, AutoGetDBWithTenantHitsDeadline) { auto db = "db1"; - DatabaseName dbName(TenantId(OID::gen()), db); + DatabaseName dbName = DatabaseName::createDatabaseName_forTest(TenantId(OID::gen()), db); Lock::DBLock dbLock1(client1.second.get(), dbName, MODE_X); ASSERT(client1.second->lockState()->isDbLockedForMode(dbName, MODE_X)); diff --git a/src/mongo/db/catalog_shard_feature_flag.idl b/src/mongo/db/catalog_shard_feature_flag.idl index be166156b9d0e..777ad1aa79ffd 100644 --- a/src/mongo/db/catalog_shard_feature_flag.idl +++ b/src/mongo/db/catalog_shard_feature_flag.idl @@ -30,14 +30,9 @@ global: cpp_namespace: "mongo" feature_flags: - featureFlagCatalogShard: - description: "Feature flag for enabling shared config server/shard server cluster role" - cpp_varname: gFeatureFlagCatalogShard - default: true - version: 7.0 - featureFlagTransitionToCatalogShard: - description: "Feature flag for transitioning a config server in and out of catalog shard mode" + description: "Feature flag for transitioning a config server in and out of config shard mode" cpp_varname: gFeatureFlagTransitionToCatalogShard default: true version: 7.0 + shouldBeFCVGated: true diff --git a/src/mongo/db/change_collection_expired_change_remover_test.cpp b/src/mongo/db/change_collection_expired_change_remover_test.cpp index 28779c57af730..073d7d4e9f941 100644 --- a/src/mongo/db/change_collection_expired_change_remover_test.cpp +++ b/src/mongo/db/change_collection_expired_change_remover_test.cpp @@ -27,32 +27,59 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/change_collection_expired_documents_remover.h" - +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/catalog_test_fixture.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/change_collection_truncate_markers.h" #include "mongo/db/change_stream_change_collection_manager.h" -#include "mongo/db/change_stream_options_manager.h" #include "mongo/db/change_stream_serverless_helpers.h" #include "mongo/db/change_streams_cluster_parameter_gen.h" -#include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/internal_plans.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/db/record_id.h" #include "mongo/db/record_id_helpers.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/server_parameter.h" #include "mongo/db/server_parameter_with_storage.h" #include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/key_format.h" #include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/transaction_resources.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/clock_source_mock.h" #include "mongo/util/duration.h" -#include "pipeline/change_stream_test_helpers.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -101,7 +128,7 @@ class ChangeCollectionExpiredChangeRemoverTest : public CatalogTestFixture { } std::vector readChangeCollection(OperationContext* opCtx, - boost::optional tenantId) { + const TenantId& tenantId) { auto changeCollection = AutoGetChangeCollection{opCtx, AutoGetChangeCollection::AccessMode::kRead, tenantId}; @@ -127,20 +154,26 @@ class ChangeCollectionExpiredChangeRemoverTest : public CatalogTestFixture { } size_t removeExpiredChangeCollectionsDocuments(OperationContext* opCtx, - boost::optional tenantId, + const TenantId& tenantId, Date_t expirationTime) { // Acquire intent-exclusive lock on the change collection. Early exit if the collection // doesn't exist. - const auto changeCollection = - AutoGetChangeCollection{opCtx, AutoGetChangeCollection::AccessMode::kWrite, tenantId}; + const auto changeCollection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString::makeChangeCollectionNSS(tenantId), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); // Get the 'maxRecordIdBound' and perform the removal of the expired documents. const auto maxRecordIdBound = ChangeStreamChangeCollectionManager::getChangeCollectionPurgingJobMetadata( - opCtx, &*changeCollection) + opCtx, changeCollection) ->maxRecordIdBound; - return ChangeStreamChangeCollectionManager::removeExpiredChangeCollectionsDocuments( - opCtx, &*changeCollection, maxRecordIdBound, expirationTime); + return ChangeStreamChangeCollectionManager:: + removeExpiredChangeCollectionsDocumentsWithCollScan( + opCtx, changeCollection, maxRecordIdBound, expirationTime); } const TenantId _tenantId; @@ -168,58 +201,26 @@ class ChangeCollectionTruncateExpirationTest : public ChangeCollectionExpiredCha changeStreamsParam->setValue(oldSettings, _tenantId).ignore(); } - void insertDocumentToChangeCollection(OperationContext* opCtx, - const TenantId& tenantId, - const BSONObj& obj) { - WriteUnitOfWork wuow(opCtx); - ChangeCollectionExpiredChangeRemoverTest::insertDocumentToChangeCollection( - opCtx, tenantId, obj); - const auto wallTime = now(); - Timestamp timestamp{wallTime}; - RecordId recordId = - record_id_helpers::keyForOptime(timestamp, KeyFormat::String).getValue(); - - _truncateMarkers->updateCurrentMarkerAfterInsertOnCommit( - opCtx, obj.objsize(), recordId, wallTime, 1); - wuow.commit(); - } - - void dropAndRecreateChangeCollection(OperationContext* opCtx, - const TenantId& tenantId, - int64_t minBytesPerMarker) { - auto& changeCollectionManager = ChangeStreamChangeCollectionManager::get(opCtx); - changeCollectionManager.dropChangeCollection(opCtx, tenantId); - _truncateMarkers.reset(); - changeCollectionManager.createChangeCollection(opCtx, tenantId); - _truncateMarkers = std::make_unique( - tenantId, std::deque{}, 0, 0, minBytesPerMarker); - } - size_t removeExpiredChangeCollectionsDocuments(OperationContext* opCtx, - boost::optional tenantId, + const TenantId& tenantId, Date_t expirationTime) { // Acquire intent-exclusive lock on the change collection. Early exit if the collection // doesn't exist. - const auto changeCollection = - AutoGetChangeCollection{opCtx, AutoGetChangeCollection::AccessMode::kWrite, tenantId}; - - WriteUnitOfWork wuow(opCtx); - size_t numRecordsDeleted = 0; - while (boost::optional marker = - _truncateMarkers->peekOldestMarkerIfNeeded(opCtx)) { - auto recordStore = changeCollection->getRecordStore(); - - ASSERT_OK(recordStore->rangeTruncate( - opCtx, RecordId(), marker->lastRecord, -marker->bytes, -marker->records)); - - _truncateMarkers->popOldestMarker(); - numRecordsDeleted += marker->records; - } - wuow.commit(); - return numRecordsDeleted; + const auto changeCollection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString::makeChangeCollectionNSS(tenantId), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + return ChangeStreamChangeCollectionManager:: + removeExpiredChangeCollectionsDocumentsWithTruncate( + opCtx, changeCollection, expirationTime); } - std::unique_ptr _truncateMarkers; + RAIIServerParameterControllerForTest truncateFeatureFlag{ + "featureFlagUseUnreplicatedTruncatesForDeletions", true}; }; // Tests that the last expired focument retrieved is the expected one. @@ -244,18 +245,23 @@ TEST_F(ChangeCollectionExpiredChangeRemoverTest, VerifyLastExpiredDocument) { clockSource()->advance(Milliseconds(1)); } - auto changeCollection = - AutoGetChangeCollection{opCtx, AutoGetChangeCollection::AccessMode::kRead, _tenantId}; + const auto changeCollection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString::makeChangeCollectionNSS(_tenantId), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kRead), + MODE_IS); auto maxExpiredRecordId = - ChangeStreamChangeCollectionManager::getChangeCollectionPurgingJobMetadata( - opCtx, &*changeCollection) + ChangeStreamChangeCollectionManager::getChangeCollectionPurgingJobMetadata(opCtx, + changeCollection) ->maxRecordIdBound; // Get the document found at 'maxExpiredRecordId' and test it against 'lastExpiredDocument'. auto scanExecutor = InternalPlanner::collectionScan(opCtx, - &(*changeCollection), + changeCollection, PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY, InternalPlanner::Direction::FORWARD, boost::none, @@ -325,10 +331,13 @@ TEST_F(ChangeCollectionTruncateExpirationTest, ShouldRemoveOnlyExpiredDocument_M const BSONObj notExpired = BSON("_id" << "notExpired"); + RAIIServerParameterControllerForTest minBytesPerMarker{ + "changeCollectionTruncateMarkersMinBytes", + firstExpired.objsize() + secondExpired.objsize()}; + const auto timeAtStart = now(); const auto opCtx = operationContext(); - dropAndRecreateChangeCollection( - opCtx, _tenantId, firstExpired.objsize() + secondExpired.objsize()); + dropAndRecreateChangeCollection(opCtx, _tenantId); insertDocumentToChangeCollection(opCtx, _tenantId, firstExpired); clockSource()->advance(Hours(1)); @@ -352,8 +361,11 @@ TEST_F(ChangeCollectionTruncateExpirationTest, ShouldRemoveOnlyExpiredDocument_M // Tests that the last expired document is never deleted. TEST_F(ChangeCollectionTruncateExpirationTest, ShouldLeaveAtLeastOneDocument_Markers) { + RAIIServerParameterControllerForTest minBytesPerMarker{ + "changeCollectionTruncateMarkersMinBytes", 1}; const auto opCtx = operationContext(); - dropAndRecreateChangeCollection(opCtx, _tenantId, 1); + + dropAndRecreateChangeCollection(opCtx, _tenantId); setExpireAfterSeconds(opCtx, Seconds{1}); diff --git a/src/mongo/db/change_collection_expired_documents_remover.cpp b/src/mongo/db/change_collection_expired_documents_remover.cpp index bdab02c8d9d1d..8012bfb83e1d0 100644 --- a/src/mongo/db/change_collection_expired_documents_remover.cpp +++ b/src/mongo/db/change_collection_expired_documents_remover.cpp @@ -29,20 +29,50 @@ #include "mongo/db/change_collection_expired_documents_remover.h" -#include "mongo/db/catalog_raii.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/change_stream_change_collection_manager.h" #include "mongo/db/change_stream_serverless_helpers.h" #include "mongo/db/change_streams_cluster_parameter_gen.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/server_feature_flags_gen.h" #include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" -#include "mongo/platform/mutex.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" #include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/periodic_runner.h" -#include -#include +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -66,9 +96,18 @@ change_stream_serverless_helpers::TenantSet getConfigDbTenants(OperationContext* return tenantIds; } +bool usesUnreplicatedTruncates() { + // (Ignore FCV check): This feature flag is potentially backported to previous version of the + // server. We can't rely on the FCV version to see whether it's enabled or not. + return feature_flags::gFeatureFlagUseUnreplicatedTruncatesForDeletions + .isEnabledAndIgnoreFCVUnsafe(); +} + void removeExpiredDocuments(Client* client) { hangBeforeRemovingExpiredChanges.pauseWhileSet(); + bool useUnreplicatedTruncates = usesUnreplicatedTruncates(); + try { auto opCtx = client->makeOperationContext(); const auto clock = client->getServiceContext()->getFastClockSource(); @@ -76,11 +115,9 @@ void removeExpiredDocuments(Client* client) { // If the fail point 'injectCurrentWallTimeForRemovingDocuments' is enabled then set the // 'currentWallTime' with the provided wall time. - if (injectCurrentWallTimeForRemovingExpiredDocuments.shouldFail()) { - injectCurrentWallTimeForRemovingExpiredDocuments.execute([&](const BSONObj& data) { - currentWallTime = data.getField("currentWallTime").date(); - }); - } + injectCurrentWallTimeForRemovingExpiredDocuments.execute([&](const BSONObj& data) { + currentWallTime = data.getField("currentWallTime").date(); + }); // Number of documents removed in the current pass. size_t removedCount = 0; @@ -88,46 +125,71 @@ void removeExpiredDocuments(Client* client) { auto& changeCollectionManager = ChangeStreamChangeCollectionManager::get(opCtx.get()); for (const auto& tenantId : getConfigDbTenants(opCtx.get())) { - auto expiredAfterSeconds = - change_stream_serverless_helpers::getExpireAfterSeconds(tenantId); - + // Change stream collections can multiply the amount of user data inserted and deleted + // on each node. It is imperative that removal is prioritized so it can keep up with + // inserts and prevent users from running out of disk space. + ScopedAdmissionPriorityForLock skipAdmissionControl( + opCtx->lockState(), AdmissionContext::Priority::kImmediate); // Acquire intent-exclusive lock on the change collection. - AutoGetChangeCollection changeCollection{ - opCtx.get(), AutoGetChangeCollection::AccessMode::kWrite, tenantId}; + const auto changeCollection = + acquireCollection(opCtx.get(), + CollectionAcquisitionRequest( + NamespaceString::makeChangeCollectionNSS(tenantId), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx.get()), + AcquisitionPrerequisites::kWrite), + MODE_IX); - // Early exit if collection does not exist or if running on a secondary (requires - // opCtx->lockState()->isRSTLLocked()). - if (!changeCollection || + // Early exit if collection does not exist. + if (!changeCollection.exists()) { + continue; + } + // Early exit if running on a secondary and we haven't enabled the unreplicated truncate + // maintenance flag (requires opCtx->lockState()->isRSTLLocked()). + if (!useUnreplicatedTruncates && !repl::ReplicationCoordinator::get(opCtx.get()) - ->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kConfig.toString())) { + ->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kConfig)) { continue; } - // Get the metadata required for the removal of the expired change collection - // documents. Early exit if the metadata is missing, indicating that there is nothing - // to remove. - auto purgingJobMetadata = - ChangeStreamChangeCollectionManager::getChangeCollectionPurgingJobMetadata( - opCtx.get(), &*changeCollection); - if (!purgingJobMetadata) { - continue; + auto expiredAfterSeconds = + change_stream_serverless_helpers::getExpireAfterSeconds(tenantId); + + if (useUnreplicatedTruncates) { + removedCount += ChangeStreamChangeCollectionManager:: + removeExpiredChangeCollectionsDocumentsWithTruncate( + opCtx.get(), + changeCollection, + currentWallTime - Seconds(expiredAfterSeconds)); + } else { + // Get the metadata required for the removal of the expired change collection + // documents. Early exit if the metadata is missing, indicating that there is + // nothing to remove. + auto purgingJobMetadata = + ChangeStreamChangeCollectionManager::getChangeCollectionPurgingJobMetadata( + opCtx.get(), changeCollection); + if (!purgingJobMetadata) { + continue; + } + + removedCount += ChangeStreamChangeCollectionManager:: + removeExpiredChangeCollectionsDocumentsWithCollScan( + opCtx.get(), + changeCollection, + purgingJobMetadata->maxRecordIdBound, + currentWallTime - Seconds(expiredAfterSeconds)); + maxStartWallTime = + std::max(maxStartWallTime, purgingJobMetadata->firstDocWallTimeMillis); } - removedCount += - ChangeStreamChangeCollectionManager::removeExpiredChangeCollectionsDocuments( - opCtx.get(), - &*changeCollection, - purgingJobMetadata->maxRecordIdBound, - currentWallTime - Seconds(expiredAfterSeconds)); changeCollectionManager.getPurgingJobStats().scannedCollections.fetchAndAddRelaxed(1); - maxStartWallTime = - std::max(maxStartWallTime, purgingJobMetadata->firstDocWallTimeMillis); } // The purging job metadata will be 'boost::none' if none of the change collections have // more than one oplog entry, as such the 'maxStartWallTimeMillis' will be zero. Avoid - // reporting 0 as 'maxStartWallTimeMillis'. - if (maxStartWallTime > 0) { + // reporting 0 as 'maxStartWallTimeMillis'. If using unreplicated truncates, this is + // maintained by the call to removeExpiredChangeCollectionsDocumentsWithTruncate. + if (!useUnreplicatedTruncates && maxStartWallTime > 0) { changeCollectionManager.getPurgingJobStats().maxStartWallTimeMillis.store( maxStartWallTime); } @@ -167,7 +229,11 @@ class ChangeCollectionExpiredDocumentsRemover { ChangeCollectionExpiredDocumentsRemover(ServiceContext* serviceContext) { const auto period = Seconds{gChangeCollectionExpiredDocumentsRemoverJobSleepSeconds.load()}; _jobAnchor = serviceContext->getPeriodicRunner()->makeJob( - {"ChangeCollectionExpiredDocumentsRemover", removeExpiredDocuments, period}); + {"ChangeCollectionExpiredDocumentsRemover", + removeExpiredDocuments, + period, + // TODO(SERVER-74662): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/}); _jobAnchor.start(); } diff --git a/src/mongo/db/change_collection_truncate_markers.cpp b/src/mongo/db/change_collection_truncate_markers.cpp index f1670f93d5803..61e691f9dc574 100644 --- a/src/mongo/db/change_collection_truncate_markers.cpp +++ b/src/mongo/db/change_collection_truncate_markers.cpp @@ -28,10 +28,59 @@ */ #include "mongo/db/change_collection_truncate_markers.h" + +#include +#include +#include + +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/change_stream_serverless_helpers.h" #include "mongo/db/operation_context.h" +#include "mongo/db/record_id_helpers.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" namespace mongo { + +namespace { +MONGO_FAIL_POINT_DEFINE(injectCurrentWallTimeForCheckingMarkers); + +Date_t getWallTimeToUse(OperationContext* opCtx) { + auto now = opCtx->getServiceContext()->getFastClockSource()->now(); + injectCurrentWallTimeForCheckingMarkers.execute( + [&](const BSONObj& data) { now = data.getField("currentWallTime").date(); }); + return now; +} + +bool hasMarkerWallTimeExpired(OperationContext* opCtx, + Date_t markerWallTime, + const TenantId& tenantId) { + auto now = getWallTimeToUse(opCtx); + auto expireAfterSeconds = + Seconds{change_stream_serverless_helpers::getExpireAfterSeconds(tenantId)}; + auto expirationTime = now - expireAfterSeconds; + return markerWallTime <= expirationTime; +} +} // namespace + ChangeCollectionTruncateMarkers::ChangeCollectionTruncateMarkers(TenantId tenantId, std::deque markers, int64_t leftoverRecordsCount, @@ -56,11 +105,88 @@ bool ChangeCollectionTruncateMarkers::_hasExcessMarkers(OperationContext* opCtx) return false; } - auto now = opCtx->getServiceContext()->getFastClockSource()->now(); - auto expireAfterSeconds = - Seconds{change_stream_serverless_helpers::getExpireAfterSeconds(_tenantId)}; - auto expirationTime = now - expireAfterSeconds; + return hasMarkerWallTimeExpired(opCtx, oldestMarker.wallTime, _tenantId); +} + +bool ChangeCollectionTruncateMarkers::_hasPartialMarkerExpired(OperationContext* opCtx) const { + const auto& [_, highestSeenWallTime] = getPartialMarker(); + + return hasMarkerWallTimeExpired(opCtx, highestSeenWallTime, _tenantId); +} + +void ChangeCollectionTruncateMarkers::expirePartialMarker(OperationContext* opCtx, + const Collection* changeCollection) { + createPartialMarkerIfNecessary(opCtx); + // We can't use the normal peekOldestMarkerIfNeeded method since that calls _hasExcessMarkers + // and it will return false since the new oldest marker will have the last entry. + auto oldestMarker = + checkMarkersWith([&](const std::deque& markers) + -> boost::optional { + // Partial marker did not get generated, early exit. + if (markers.empty()) { + return {}; + } + auto firstMarker = markers.front(); + // We will only consider the case of an expired marker. + if (!hasMarkerWallTimeExpired(opCtx, firstMarker.wallTime, _tenantId)) { + return {}; + } + return firstMarker; + }); + + if (!oldestMarker) { + // The oldest marker hasn't expired, nothing to do here. + return; + } + + // Abandon the snapshot so we can fetch the most recent version of the table. This increases the + // chances the last entry isn't present in the new partial marker. + opCtx->recoveryUnit()->abandonSnapshot(); + WriteUnitOfWork wuow(opCtx); + + auto backCursor = changeCollection->getRecordStore()->getCursor(opCtx, false); + // If the oldest marker does not contain the last entry it's a normal marker, don't perform any + // modifications to it. + auto obj = backCursor->next(); + if (!obj || obj->id > oldestMarker->lastRecord) { + return; + } + + // At this point the marker contains the last entry of the collection, we have to shift the last + // entry to the next marker so we can expire the previous entries. + auto bytesNotTruncated = obj->data.size(); + const auto& doc = obj->data.toBson(); + auto wallTime = doc[repl::OplogEntry::kWallClockTimeFieldName].Date(); + + updateCurrentMarkerAfterInsertOnCommit(opCtx, bytesNotTruncated, obj->id, wallTime, 1); + + auto bytesDeleted = oldestMarker->bytes - bytesNotTruncated; + auto docsDeleted = oldestMarker->records - 1; + + // We build the previous record id based on the extracted value + auto previousRecordId = [&] { + auto currId = doc[repl::OplogEntry::k_idFieldName].timestamp(); + invariant(currId > Timestamp::min(), "Last entry timestamp must be larger than 0"); + + auto fixedBson = BSON(repl::OplogEntry::k_idFieldName << (currId - 1)); + + auto recordId = invariantStatusOK( + record_id_helpers::keyForDoc(fixedBson, + changeCollection->getClusteredInfo()->getIndexSpec(), + changeCollection->getDefaultCollator())); + return recordId; + }(); + auto newMarker = + CollectionTruncateMarkers::Marker{docsDeleted, bytesDeleted, previousRecordId, wallTime}; - return oldestMarker.wallTime <= expirationTime; + // Replace now the oldest marker with a version that doesn't contain the last entry. This is + // susceptible to races with concurrent inserts. But the invariant of metrics being correct in + // aggregate still holds. Ignoring this issue is a valid strategy here as we move the ignored + // bytes to the next partial marker and we only guarantee eventual correctness. + modifyMarkersWith([&](std::deque& markers) { + markers.pop_front(); + markers.emplace_front(std::move(newMarker)); + }); + wuow.commit(); } } // namespace mongo diff --git a/src/mongo/db/change_collection_truncate_markers.h b/src/mongo/db/change_collection_truncate_markers.h index 1da45896c2b7b..72dcfa2d5e5a8 100644 --- a/src/mongo/db/change_collection_truncate_markers.h +++ b/src/mongo/db/change_collection_truncate_markers.h @@ -29,7 +29,15 @@ #pragma once -#include "mongo/db/storage/collection_markers.h" +#include +#include + +#include "mongo/db/catalog/collection.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/collection_truncate_markers.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/time_support.h" /** * Implementation of truncate markers for Change Collections. Respects the requirement of always @@ -45,9 +53,26 @@ class ChangeCollectionTruncateMarkers final int64_t leftoverRecordsBytes, int64_t minBytesPerMarker); + // Expires the partial marker with proper care for the last entry. Expiring here means: + // * Turning the partial marker into an actual marker + // * Ensuring the last entry isn't present in the generated marker + // The last entry is necessary for correctness of the change collection. This method will shift + // the last entry size and count to the next partial marker. + void expirePartialMarker(OperationContext* opCtx, const Collection* changeCollection); + + // Performs post initialisation work. The constructor doesn't specify the highest element seen, + // so we must update it after initialisation. + void performPostInitialisation(OperationContext* opCtx, + const RecordId& highestRecordId, + Date_t highestWallTime) { + updateCurrentMarker(opCtx, 0, highestRecordId, highestWallTime, 0); + } + private: bool _hasExcessMarkers(OperationContext* opCtx) const override; + bool _hasPartialMarkerExpired(OperationContext* opCtx) const override; + TenantId _tenantId; }; } // namespace mongo diff --git a/src/mongo/db/change_stream_change_collection_manager.cpp b/src/mongo/db/change_stream_change_collection_manager.cpp index 644c9da46b3a1..91362d53e7ed6 100644 --- a/src/mongo/db/change_stream_change_collection_manager.cpp +++ b/src/mongo/db/change_stream_change_collection_manager.cpp @@ -32,23 +32,70 @@ #include "mongo/db/change_stream_change_collection_manager.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" -#include "mongo/db/catalog/coll_mod.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog/drop_collection.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/change_stream_serverless_helpers.h" +#include "mongo/db/change_streams_cluster_parameter_gen.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" +#include "mongo/db/drop_gen.h" +#include "mongo/db/exec/batched_delete_stage.h" +#include "mongo/db/exec/collection_scan_common.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/namespace_string.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/record_id.h" +#include "mongo/db/record_id_helpers.h" #include "mongo/db/repl/apply_ops_command_info.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/server_feature_flags_gen.h" -#include "mongo/db/server_options.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/collection_truncate_markers.h" +#include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -165,13 +212,29 @@ boost::optional createChangeCollectionEntryFromOplog(const BSONObj& opl return readyChangeCollDoc.toBson(); } +bool usesUnreplicatedTruncates() { + // (Ignore FCV check): This feature flag is potentially backported to previous version of the + // server. We can't rely on the FCV version to see whether it's enabled or not. + return feature_flags::gFeatureFlagUseUnreplicatedTruncatesForDeletions + .isEnabledAndIgnoreFCVUnsafe(); +} +} // namespace + /** - * Helper to write insert statements to respective change collections based on tenant ids. + * Locks respective change collections, writes insert statements to respective change collections + * based on tenant ids. */ -class ChangeCollectionsWriter { +class ChangeStreamChangeCollectionManager::ChangeCollectionsWriterInternal { public: - explicit ChangeCollectionsWriter(const AutoGetChangeCollection::AccessMode& accessMode) - : _accessMode{accessMode} {} + explicit ChangeCollectionsWriterInternal( + OperationContext* opCtx, + OpDebug* opDebug, + const AutoGetChangeCollection::AccessMode& accessMode, + ConcurrentSharedValuesMap* map) + : _accessMode{accessMode}, + _opCtx{opCtx}, + _opDebug{opDebug}, + _tenantTruncateMarkersMap(map) {} /** * Adds the insert statement for the provided tenant that will be written to the change @@ -179,8 +242,22 @@ class ChangeCollectionsWriter { */ void add(InsertStatement insertStatement) { if (auto tenantId = _extractTenantId(insertStatement); tenantId) { - _tenantStatementsMap[*tenantId].push_back(std::move(insertStatement)); + _tenantToStatementsAndChangeCollectionMap[*tenantId].insertStatements.push_back( + std::move(insertStatement)); + } + } + + /** + * Acquires locks to change collections of all tenants referred to by added insert statements. + */ + void acquireLocks() { + tassert(6671503, "Locks cannot be acquired twice", !_locksAcquired); + for (auto&& [tenantId, insertStatementsAndChangeCollection] : + _tenantToStatementsAndChangeCollectionMap) { + insertStatementsAndChangeCollection.tenantChangeCollection.emplace( + _opCtx, _accessMode, tenantId); } + _locksAcquired = true; } /** @@ -188,10 +265,15 @@ class ChangeCollectionsWriter { * encountered, the write is skipped and the remaining inserts are attempted individually. Bails * out further writes if any other type of failure is encountered in writing to any change * collection. + * + * Locks should be acquired before calling this method by calling 'acquireLocks()'. */ - Status write(OperationContext* opCtx, OpDebug* opDebug) { - for (auto&& [tenantId, insertStatements] : _tenantStatementsMap) { - AutoGetChangeCollection tenantChangeCollection(opCtx, _accessMode, tenantId); + Status write() { + tassert(6671504, "Locks should be acquired first", _locksAcquired); + for (auto&& [tenantId, insertStatementsAndChangeCollection] : + _tenantToStatementsAndChangeCollectionMap) { + AutoGetChangeCollection& tenantChangeCollection = + *insertStatementsAndChangeCollection.tenantChangeCollection; // The change collection does not exist for a particular tenant because either the // change collection is not enabled or is in the process of enablement. Ignore this @@ -201,7 +283,15 @@ class ChangeCollectionsWriter { } // Writes to the change collection should not be replicated. - repl::UnreplicatedWritesBlock unReplBlock(opCtx); + repl::UnreplicatedWritesBlock unReplBlock(_opCtx); + + // To avoid creating a lot of unnecessary calls to + // CollectionTruncateMarkers::updateCurrentMarkerAfterInsertOnCommit we aggregate all + // the results and make a singular call. This requires storing the highest + // RecordId/WallTime seen from the insert statements. + RecordId maxRecordIdSeen; + Date_t maxWallTimeSeen; + int64_t bytesInserted = 0; /** * For a serverless shard merge, we clone all change collection entries from the donor @@ -210,9 +300,9 @@ class ChangeCollectionsWriter { * If we encounter a DuplicateKey error and the entry is identical to the existing one, * we can safely skip and continue. */ - for (auto&& insertStatement : insertStatements) { + for (auto&& insertStatement : insertStatementsAndChangeCollection.insertStatements) { Status status = collection_internal::insertDocument( - opCtx, *tenantChangeCollection, insertStatement, opDebug, false); + _opCtx, *tenantChangeCollection, insertStatement, _opDebug, false); if (status.code() == ErrorCodes::DuplicateKey) { const auto dupKeyInfo = status.extraInfo(); @@ -222,21 +312,69 @@ class ChangeCollectionsWriter { LOGV2(7282901, "Ignoring DuplicateKey error for change collection insert", "doc"_attr = insertStatement.doc.toString()); + // Continue to the next insert statement as we've ommitted the current one. + continue; } else if (!status.isOK()) { return Status(status.code(), str::stream() << "Write to change collection: " - << tenantChangeCollection->ns().toStringWithTenantId() + << tenantChangeCollection->ns().toStringForErrorMsg() << "failed") .withReason(status.reason()); } + + // Right now we assume that the tenant change collection is clustered and + // reconstruct the RecordId used in the KV store. Ideally we want the write path to + // return the record ids used for the insert but as it isn't available we + // reconstruct the key here. + dassert(tenantChangeCollection->isClustered()); + auto recordId = invariantStatusOK(record_id_helpers::keyForDoc( + insertStatement.doc, + tenantChangeCollection->getClusteredInfo()->getIndexSpec(), + tenantChangeCollection->getDefaultCollator())); + + if (maxRecordIdSeen < recordId) { + maxRecordIdSeen = std::move(recordId); + } + auto docWallTime = + insertStatement.doc[repl::OplogEntry::kWallClockTimeFieldName].Date(); + if (maxWallTimeSeen < docWallTime) { + maxWallTimeSeen = docWallTime; + } + + bytesInserted += insertStatement.doc.objsize(); } - } + std::shared_ptr truncateMarkers = + usesUnreplicatedTruncates() + ? _tenantTruncateMarkersMap->find(tenantChangeCollection->uuid()) + : nullptr; + if (truncateMarkers && bytesInserted > 0) { + // We update the TruncateMarkers instance if it exists. Creation is performed + // asynchronously by the remover thread. + truncateMarkers->updateCurrentMarkerAfterInsertOnCommit( + _opCtx, + bytesInserted, + maxRecordIdSeen, + maxWallTimeSeen, + insertStatementsAndChangeCollection.insertStatements.size()); + } + } return Status::OK(); } private: + /** + * Field 'insertStatements' contains insert statements to be written to the tenant's change + * collection associated with 'tenantChangeCollection' field. + */ + struct TenantStatementsAndChangeCollection { + + std::vector insertStatements; + + boost::optional tenantChangeCollection; + }; + boost::optional _extractTenantId(const InsertStatement& insertStatement) { // Parse the oplog entry to fetch the tenant id from 'tid' field. The oplog entry will not // written to the change collection if 'tid' field is missing. @@ -255,12 +393,80 @@ class ChangeCollectionsWriter { // Mode required to access change collections. const AutoGetChangeCollection::AccessMode _accessMode; - // Maps inserts statements for each tenant. - stdx::unordered_map, TenantId::Hasher> - _tenantStatementsMap; + // A mapping from a tenant id to insert statements and the change collection of the tenant. + stdx::unordered_map + _tenantToStatementsAndChangeCollectionMap; + + // An operation context to use while performing all operations in this class. + OperationContext* const _opCtx; + + // An OpDebug to use while performing all operations in this class. + OpDebug* const _opDebug; + + // Indicates if locks have been acquired. + bool _locksAcquired{false}; + + ConcurrentSharedValuesMap* + _tenantTruncateMarkersMap; }; -} // namespace +ChangeStreamChangeCollectionManager::ChangeCollectionsWriter::ChangeCollectionsWriter( + OperationContext* opCtx, + std::vector::const_iterator beginOplogEntries, + std::vector::const_iterator endOplogEntries, + OpDebug* opDebug, + ConcurrentSharedValuesMap* tenantMarkerMap) { + // This method must be called within a 'WriteUnitOfWork'. The caller must be responsible for + // commiting the unit of work. + invariant(opCtx->lockState()->inAWriteUnitOfWork()); + + _writer = std::make_unique( + opCtx, opDebug, AutoGetChangeCollection::AccessMode::kWrite, tenantMarkerMap); + + // Transform oplog entries to change collections entries and group them by tenant id. + for (auto oplogEntryIter = beginOplogEntries; oplogEntryIter != endOplogEntries; + oplogEntryIter++) { + auto& oplogDoc = oplogEntryIter->doc; + + // The initial seed oplog insertion is not timestamped as such the 'oplogSlot' is not + // initialized. The corresponding change collection insertion will not be timestamped. + auto oplogSlot = oplogEntryIter->oplogSlot; + + auto changeCollDoc = createChangeCollectionEntryFromOplog(oplogDoc); + + if (changeCollDoc) { + _writer->add(InsertStatement{ + std::move(*changeCollDoc), oplogSlot.getTimestamp(), oplogSlot.getTerm()}); + } + } +} + +ChangeStreamChangeCollectionManager::ChangeCollectionsWriter::ChangeCollectionsWriter( + ChangeStreamChangeCollectionManager::ChangeCollectionsWriter&& other) = default; + +ChangeStreamChangeCollectionManager::ChangeCollectionsWriter& +ChangeStreamChangeCollectionManager::ChangeCollectionsWriter::operator=( + ChangeStreamChangeCollectionManager::ChangeCollectionsWriter&& other) = default; + +ChangeStreamChangeCollectionManager::ChangeCollectionsWriter::~ChangeCollectionsWriter() = default; + +void ChangeStreamChangeCollectionManager::ChangeCollectionsWriter::acquireLocks() { + _writer->acquireLocks(); +} + +Status ChangeStreamChangeCollectionManager::ChangeCollectionsWriter::write() { + return _writer->write(); +} + +ChangeStreamChangeCollectionManager::ChangeCollectionsWriter +ChangeStreamChangeCollectionManager::createChangeCollectionsWriter( + OperationContext* opCtx, + std::vector::const_iterator beginOplogEntries, + std::vector::const_iterator endOplogEntries, + OpDebug* opDebug) { + return ChangeCollectionsWriter{ + opCtx, beginOplogEntries, endOplogEntries, opDebug, &_tenantTruncateMarkersMap}; +} BSONObj ChangeStreamChangeCollectionManager::PurgingJobStats::toBSON() const { return BSON("totalPass" << totalPass.load() << "docsDeleted" << docsDeleted.load() @@ -296,7 +502,7 @@ void ChangeStreamChangeCollectionManager::createChangeCollection(OperationContex const auto status = createCollection(opCtx, changeCollNss, changeCollectionOptions, BSONObj()); uassert(status.code(), str::stream() << "Failed to create change collection: " - << changeCollNss.toStringWithTenantId() << causedBy(status.reason()), + << changeCollNss.toStringForErrorMsg() << causedBy(status.reason()), status.isOK() || status.code() == ErrorCodes::NamespaceExists); } @@ -305,6 +511,22 @@ void ChangeStreamChangeCollectionManager::dropChangeCollection(OperationContext* DropReply dropReply; const auto changeCollNss = NamespaceString::makeChangeCollectionNSS(tenantId); + const bool useUnreplicatedDeletes = usesUnreplicatedTruncates(); + // We get the UUID now in order to remove the collection from the map later. We can't get the + // UUID once the collection has been dropped. + auto collUUID = [&]() -> boost::optional { + if (!useUnreplicatedDeletes) { + // Won't update the truncate markers map so no need to get the UUID. + return boost::none; + } + AutoGetDb lk(opCtx, changeCollNss.dbName(), MODE_IS); + auto collection = + CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, changeCollNss); + if (collection) { + return collection->uuid(); + } + return boost::none; + }(); const auto status = dropCollection(opCtx, changeCollNss, @@ -312,8 +534,14 @@ void ChangeStreamChangeCollectionManager::dropChangeCollection(OperationContext* DropCollectionSystemCollectionMode::kAllowSystemCollectionDrops); uassert(status.code(), str::stream() << "Failed to drop change collection: " - << changeCollNss.toStringWithTenantId() << causedBy(status.reason()), + << changeCollNss.toStringForErrorMsg() << causedBy(status.reason()), status.isOK() || status.code() == ErrorCodes::NamespaceNotFound); + + if (useUnreplicatedDeletes && collUUID) { + // Remove the collection from the TruncateMarkers map. As we are dropping the collection + // there's no need to keep it for the remover. Data will be deleted anyways. + _tenantTruncateMarkersMap.erase(*collUUID); + } } void ChangeStreamChangeCollectionManager::insertDocumentsToChangeCollection( @@ -326,8 +554,11 @@ void ChangeStreamChangeCollectionManager::insertDocumentsToChangeCollection( // commiting the unit of work. invariant(opCtx->lockState()->inAWriteUnitOfWork()); - ChangeCollectionsWriter changeCollectionsWriter{ - AutoGetChangeCollection::AccessMode::kWriteInOplogContext}; + ChangeCollectionsWriterInternal changeCollectionsWriter{ + opCtx, + nullptr /*opDebug*/, + AutoGetChangeCollection::AccessMode::kWriteInOplogContext, + &_tenantTruncateMarkersMap}; for (size_t idx = 0; idx < oplogRecords.size(); idx++) { auto& record = oplogRecords[idx]; @@ -341,53 +572,19 @@ void ChangeStreamChangeCollectionManager::insertDocumentsToChangeCollection( } } + changeCollectionsWriter.acquireLocks(); + // Write documents to change collections and throw exception in case of any failure. - Status status = changeCollectionsWriter.write(opCtx, nullptr /* opDebug */); + Status status = changeCollectionsWriter.write(); if (!status.isOK()) { LOGV2_FATAL( 6612300, "Failed to write to change collection", "reason"_attr = status.reason()); } } -Status ChangeStreamChangeCollectionManager::insertDocumentsToChangeCollection( - OperationContext* opCtx, - std::vector::const_iterator beginOplogEntries, - std::vector::const_iterator endOplogEntries, - bool isGlobalIXLockAcquired, - OpDebug* opDebug) { - // This method must be called within a 'WriteUnitOfWork'. The caller must be responsible for - // commiting the unit of work. - invariant(opCtx->lockState()->inAWriteUnitOfWork()); - - // If the global IX lock is already acquired, then change collections entries will be written - // within the oplog context as such acquire the correct access mode for change collections. - const auto changeCollAccessMode = isGlobalIXLockAcquired - ? AutoGetChangeCollection::AccessMode::kWriteInOplogContext - : AutoGetChangeCollection::AccessMode::kWrite; - ChangeCollectionsWriter changeCollectionsWriter{changeCollAccessMode}; - - // Transform oplog entries to change collections entries and group them by tenant id. - for (auto oplogEntryIter = beginOplogEntries; oplogEntryIter != endOplogEntries; - oplogEntryIter++) { - auto& oplogDoc = oplogEntryIter->doc; - - // The initial seed oplog insertion is not timestamped as such the 'oplogSlot' is not - // initialized. The corresponding change collection insertion will not be timestamped. - auto oplogSlot = oplogEntryIter->oplogSlot; - - if (auto changeCollDoc = createChangeCollectionEntryFromOplog(oplogDoc)) { - changeCollectionsWriter.add(InsertStatement{ - std::move(changeCollDoc.get()), oplogSlot.getTimestamp(), oplogSlot.getTerm()}); - } - } - - // Write documents to change collections. - return changeCollectionsWriter.write(opCtx, opDebug); -} - boost::optional ChangeStreamChangeCollectionManager::getChangeCollectionPurgingJobMetadata( - OperationContext* opCtx, const CollectionPtr* changeCollection) { + OperationContext* opCtx, const CollectionAcquisition& changeCollection) { auto findWallTimeAndRecordIdForFirstDocument = [&](InternalPlanner::Direction direction) -> boost::optional> { BSONObj currChangeDoc; @@ -415,9 +612,9 @@ ChangeStreamChangeCollectionManager::getChangeCollectionPurgingJobMetadata( return {{firstDocAttributes->first, RecordIdBound(std::move(lastDocRecordId))}}; } -size_t ChangeStreamChangeCollectionManager::removeExpiredChangeCollectionsDocuments( +size_t ChangeStreamChangeCollectionManager::removeExpiredChangeCollectionsDocumentsWithCollScan( OperationContext* opCtx, - const CollectionPtr* changeCollection, + const CollectionAcquisition& changeCollection, RecordIdBound maxRecordIdBound, Date_t expirationTime) { auto params = std::make_unique(); @@ -427,7 +624,7 @@ size_t ChangeStreamChangeCollectionManager::removeExpiredChangeCollectionsDocume LTEMatchExpression filter{"wall"_sd, Value(expirationTime)}; auto deleteExecutor = InternalPlanner::deleteWithCollectionScan( opCtx, - &(*changeCollection), + changeCollection, std::move(params), PlanYieldPolicy::YieldPolicy::YIELD_AUTO, InternalPlanner::Direction::FORWARD, @@ -448,6 +645,11 @@ size_t ChangeStreamChangeCollectionManager::removeExpiredChangeCollectionsDocume changeCollectionManager.getPurgingJobStats().bytesDeleted.fetchAndAddRelaxed( batchedDeleteStats.bytesDeleted); + // As we are using collection scans this means we aren't using truncate markers. Clear the + // map since they will not get updated anyways. The markers will get recreated if the + // feature flag is turned on again. + changeCollectionManager._tenantTruncateMarkersMap.clear(); + return batchedDeleteStats.docsDeleted; } catch (const ExceptionFor&) { // It is expected that a collection drop can kill a query plan while deleting an old @@ -455,4 +657,121 @@ size_t ChangeStreamChangeCollectionManager::removeExpiredChangeCollectionsDocume return 0; } } + +namespace { +std::shared_ptr initialiseTruncateMarkers( + OperationContext* opCtx, + const CollectionAcquisition& changeCollection, + ConcurrentSharedValuesMap& truncateMap) { + const auto& ns = changeCollection.nss(); + + auto minBytesPerMarker = gChangeCollectionTruncateMarkersMinBytes; + + YieldableCollectionIterator iterator{opCtx, changeCollection}; + + CollectionTruncateMarkers::InitialSetOfMarkers initialSetOfMarkers = + CollectionTruncateMarkers::createFromCollectionIterator( + opCtx, iterator, ns, minBytesPerMarker, [](const Record& record) { + const auto obj = record.data.toBson(); + auto wallTime = obj[repl::OplogEntry::kWallClockTimeFieldName].Date(); + return CollectionTruncateMarkers::RecordIdAndWallTime{record.id, wallTime}; + }); + // Leftover bytes contains the difference between the amount of bytes we had for the + // markers and the latest collection size/count. This is susceptible to a race + // condition, but metrics are already assumed to be approximate. Ignoring this issue is + // a valid strategy here. + auto truncateMarkers = truncateMap.getOrEmplace(changeCollection.uuid(), + *ns.tenantId(), + std::move(initialSetOfMarkers.markers), + initialSetOfMarkers.leftoverRecordsCount, + initialSetOfMarkers.leftoverRecordsBytes, + minBytesPerMarker); + + auto backScan = InternalPlanner::collectionScan(opCtx, + changeCollection, + PlanYieldPolicy::YieldPolicy::YIELD_AUTO, + InternalPlanner::BACKWARD); + // Update the truncate markers with the last collection entry's RecordId and wall time. + // This is necessary for correct marker expiration. Otherwise the highest seen points + // would be null. Nothing would expire since we have to maintain the last entry in the + // change collection and null RecordId < any initialised RecordId. This would only get + // fixed once an entry has been inserted, initialising the data points. + RecordId rId; + BSONObj doc; + if (backScan->getNext(&doc, &rId) == PlanExecutor::ADVANCED) { + auto wallTime = doc[repl::OplogEntry::kWallClockTimeFieldName].Date(); + truncateMarkers->performPostInitialisation(opCtx, rId, wallTime); + } + + return truncateMarkers; +} +} // namespace + +size_t ChangeStreamChangeCollectionManager::removeExpiredChangeCollectionsDocumentsWithTruncate( + OperationContext* opCtx, const CollectionAcquisition& changeCollection, Date_t expirationTime) { + auto& changeCollectionManager = ChangeStreamChangeCollectionManager::get(opCtx); + auto& truncateMap = changeCollectionManager._tenantTruncateMarkersMap; + + const auto& changeCollectionPtr = changeCollection.getCollectionPtr(); + auto truncateMarkers = truncateMap.find(changeCollectionPtr->uuid()); + + // No marker means it's a new collection, or we've just performed startup. Initialize + // the TruncateMarkers instance. + if (!truncateMarkers) { + writeConflictRetry( + opCtx, "initialise change collection truncate markers", changeCollectionPtr->ns(), [&] { + truncateMarkers = initialiseTruncateMarkers(opCtx, changeCollection, truncateMap); + }); + } + + int64_t numRecordsDeleted = 0; + + auto removeExpiredMarkers = [&] { + auto rs = changeCollectionPtr->getRecordStore(); + while (auto marker = truncateMarkers->peekOldestMarkerIfNeeded(opCtx)) { + writeConflictRetry(opCtx, "truncate change collection", changeCollectionPtr->ns(), [&] { + // The session might be in use from marker initialisation so we must reset it + // here in order to allow an untimestamped write. + opCtx->recoveryUnit()->abandonSnapshot(); + opCtx->recoveryUnit()->allowOneUntimestampedWrite(); + WriteUnitOfWork wuow(opCtx); + + auto bytesDeleted = marker->bytes; + auto docsDeleted = marker->records; + + auto status = + rs->rangeTruncate(opCtx, + // Truncate from the beginning of the collection, this will + // cover cases where some leftover documents are present. + RecordId(), + marker->lastRecord, + -bytesDeleted, + -docsDeleted); + invariantStatusOK(status); + + wuow.commit(); + + truncateMarkers->popOldestMarker(); + numRecordsDeleted += docsDeleted; + + auto& purgingJobStats = changeCollectionManager.getPurgingJobStats(); + purgingJobStats.docsDeleted.fetchAndAddRelaxed(docsDeleted); + purgingJobStats.bytesDeleted.fetchAndAddRelaxed(bytesDeleted); + + auto millisWallTime = marker->wallTime.toMillisSinceEpoch(); + if (purgingJobStats.maxStartWallTimeMillis.load() < millisWallTime) { + purgingJobStats.maxStartWallTimeMillis.store(millisWallTime); + } + }); + } + }; + + removeExpiredMarkers(); + // We now create a partial marker that will shift the last entry to the next marker if it's + // present there. This will allow us to expire all entries up to the last one. + truncateMarkers->expirePartialMarker(opCtx, changeCollectionPtr.get()); + // Second pass to remove the potentially new partial marker. + removeExpiredMarkers(); + return numRecordsDeleted; +} } // namespace mongo diff --git a/src/mongo/db/change_stream_change_collection_manager.h b/src/mongo/db/change_stream_change_collection_manager.h index 4ba82355bcf99..8e48603c90566 100644 --- a/src/mongo/db/change_stream_change_collection_manager.h +++ b/src/mongo/db/change_stream_change_collection_manager.h @@ -29,10 +29,31 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/change_collection_truncate_markers.h" +#include "mongo/db/exec/delete_stage.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/collection_truncate_markers.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/concurrent_shared_values_map.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -123,33 +144,73 @@ class ChangeStreamChangeCollectionManager { void dropChangeCollection(OperationContext* opCtx, const TenantId& tenantId); /** - * Inserts documents to change collections. The parameter 'oplogRecords' is a vector of oplog - * records and the parameter 'oplogTimestamps' is a vector for respective timestamp for each - * oplog record. + * Inserts documents to change collections. The parameter 'oplogRecords' is a vector of + * oplog records and the parameter 'oplogTimestamps' is a vector for respective timestamp + * for each oplog record. * - * The method fetches the tenant-id from the oplog entry, performs necessary modification to the - * document and then write to the tenant's change collection at the specified oplog timestamp. + * The method fetches the tenant-id from the oplog entry, performs necessary modification to + * the document and then write to the tenant's change collection at the specified oplog + * timestamp. * - * Failure in insertion to any change collection will result in a fatal exception and will bring - * down the node. + * Failure in insertion to any change collection will result in a fatal exception and will + * bring down the node. */ void insertDocumentsToChangeCollection(OperationContext* opCtx, const std::vector& oplogRecords, const std::vector& oplogTimestamps); + class ChangeCollectionsWriterInternal; /** - * Performs a range inserts on respective change collections using the oplog entries as - * specified by 'beginOplogEntries' and 'endOplogEntries'. - * - * Bails out if a failure is encountered in inserting documents to a particular change - * collection. + * Change Collection Writer. After acquiring ChangeCollectionsWriter the user should trigger + * acquisition of the locks by calling 'acquireLocks()' before the first write in the Write + * Unit of Work. Then the write of documents to change collections can be triggered by + * calling 'write()'. + */ + class ChangeCollectionsWriter { + friend class ChangeStreamChangeCollectionManager; + + /** + * Constructs a writer from a range ['beginOplogEntries', 'endOplogEntries') of oplog + * entries. + */ + ChangeCollectionsWriter( + OperationContext* opCtx, + std::vector::const_iterator beginOplogEntries, + std::vector::const_iterator endOplogEntries, + OpDebug* opDebug, + ConcurrentSharedValuesMap* + tenantMarkerMap); + + public: + ChangeCollectionsWriter(ChangeCollectionsWriter&&); + ChangeCollectionsWriter& operator=(ChangeCollectionsWriter&&); + + /** + * Acquires locks needed to write documents to change collections. + */ + void acquireLocks(); + + /** + * Writes documents to change collections. + */ + Status write(); + + ~ChangeCollectionsWriter(); + + private: + std::unique_ptr _writer; + }; + + /** + * Returns a change collection writer that can insert change collection entries into + * respective change collections. The entries are constructed from a range + * ['beginOplogEntries', 'endOplogEntries') of oplog entries. */ - Status insertDocumentsToChangeCollection( + ChangeCollectionsWriter createChangeCollectionsWriter( OperationContext* opCtx, std::vector::const_iterator beginOplogEntries, std::vector::const_iterator endOplogEntries, - bool isGlobalIXLockAcquired, OpDebug* opDebug); PurgingJobStats& getPurgingJobStats() { @@ -158,24 +219,43 @@ class ChangeStreamChangeCollectionManager { /** * Scans the provided change collection and returns its metadata that will be used by the - * purging job to perform deletion on it. The method returns 'boost::none' if the collection is - * empty. + * purging job to perform deletion on it. The method returns 'boost::none' if the collection + * is empty. */ static boost::optional getChangeCollectionPurgingJobMetadata(OperationContext* opCtx, - const CollectionPtr* changeCollection); + const CollectionAcquisition& changeCollection); - /** Removes documents from a change collection whose wall time is less than the + /** + * Removes documents from a change collection whose wall time is less than the * 'expirationTime'. Returns the number of documents deleted. The 'maxRecordIdBound' is the * maximum record id bound that will not be included in the collection scan. + * + * The removal process is performed with a collection scan + batch delete. */ - static size_t removeExpiredChangeCollectionsDocuments(OperationContext* opCtx, - const CollectionPtr* changeCollection, - RecordIdBound maxRecordIdBound, - Date_t expirationTime); + static size_t removeExpiredChangeCollectionsDocumentsWithCollScan( + OperationContext* opCtx, + const CollectionAcquisition& changeCollection, + RecordIdBound maxRecordIdBound, + Date_t expirationTime); + + /** + * Removes documents from a change collection whose wall time is less than the + * 'expirationTime'. Returns the number of documents deleted. + * + * The removal process is performed with a series of range truncate calls to the record + * store. Some documents might survive this process as deletion happens in chunks and we can + * only delete a chunk if we guarantee it is fully expired. + */ + static size_t removeExpiredChangeCollectionsDocumentsWithTruncate( + OperationContext* opCtx, + const CollectionAcquisition& changeCollection, + Date_t expirationTime); private: // Change collections purging job stats. PurgingJobStats _purgingJobStats; + ConcurrentSharedValuesMap + _tenantTruncateMarkersMap; }; } // namespace mongo diff --git a/src/mongo/db/change_stream_options_manager.cpp b/src/mongo/db/change_stream_options_manager.cpp index 37e82659886a2..a4a96aa824931 100644 --- a/src/mongo/db/change_stream_options_manager.cpp +++ b/src/mongo/db/change_stream_options_manager.cpp @@ -28,13 +28,34 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/change_stream_options_manager.h" #include "mongo/db/change_stream_options_parameter_gen.h" #include "mongo/db/change_stream_serverless_helpers.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/logv2/log.h" +#include "mongo/db/server_options.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -136,7 +157,7 @@ Status ChangeStreamOptionsParameter::validate(const BSONElement& newValueElement } }, [&](const std::int64_t& expireAfterSeconds) { - if (change_stream_serverless_helpers::isChangeCollectionsModeActive()) { + if (change_stream_serverless_helpers::isServerlessEnvironment()) { validateStatus = { ErrorCodes::CommandNotSupported, "The 'changeStreamOptions.preAndPostImages.expireAfterSeconds' is " diff --git a/src/mongo/db/change_stream_options_manager.h b/src/mongo/db/change_stream_options_manager.h index 364c51c535c19..d66227eb86a9f 100644 --- a/src/mongo/db/change_stream_options_manager.h +++ b/src/mongo/db/change_stream_options_manager.h @@ -29,7 +29,9 @@ #pragma once +#include "mongo/base/status_with.h" #include "mongo/db/change_stream_options_gen.h" +#include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/change_stream_options_parameter.idl b/src/mongo/db/change_stream_options_parameter.idl index 122e2793e3b16..8dcb85bcec1c9 100644 --- a/src/mongo/db/change_stream_options_parameter.idl +++ b/src/mongo/db/change_stream_options_parameter.idl @@ -39,4 +39,4 @@ server_parameters: name: ChangeStreamOptionsParameter override_set: true override_validate: true - \ No newline at end of file + diff --git a/src/mongo/db/change_stream_pre_image_util.cpp b/src/mongo/db/change_stream_pre_image_util.cpp new file mode 100644 index 0000000000000..c97207505d695 --- /dev/null +++ b/src/mongo/db/change_stream_pre_image_util.cpp @@ -0,0 +1,186 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/change_stream_pre_image_util.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/change_stream_options_gen.h" +#include "mongo/db/change_stream_options_manager.h" +#include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/record_id_helpers.h" +#include "mongo/db/service_context.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" + +namespace mongo { +// Fail point to set current time for time-based expiration of pre-images. +MONGO_FAIL_POINT_DEFINE(changeStreamPreImageRemoverCurrentTime); + +namespace change_stream_pre_image_util { + +namespace { +// Get the 'expireAfterSeconds' from the 'ChangeStreamOptions' if not 'off', boost::none otherwise. +boost::optional getExpireAfterSecondsFromChangeStreamOptions( + ChangeStreamOptions& changeStreamOptions) { + const stdx::variant& expireAfterSeconds = + changeStreamOptions.getPreAndPostImages().getExpireAfterSeconds(); + + if (!stdx::holds_alternative(expireAfterSeconds)) { + return stdx::get(expireAfterSeconds); + } + + return boost::none; +} +} // namespace + +boost::optional getPreImageExpirationTime(OperationContext* opCtx, Date_t currentTime) { + // Non-serverless and serverless environments expire pre-images according to different logic and + // parameters. This method retrieves the 'expireAfterSeconds' for a single-tenant environment. + boost::optional expireAfterSeconds = boost::none; + + // Get the expiration time directly from the change stream manager. + auto changeStreamOptions = ChangeStreamOptionsManager::get(opCtx).getOptions(opCtx); + expireAfterSeconds = getExpireAfterSecondsFromChangeStreamOptions(changeStreamOptions); + + // A pre-image is eligible for deletion if: + // pre-image's op-time + expireAfterSeconds < currentTime. + return expireAfterSeconds ? boost::optional(currentTime - Seconds(*expireAfterSeconds)) + : boost::none; +} + +Timestamp getPreImageTimestamp(const RecordId& rid) { + static constexpr auto kTopLevelFieldName = "ridAsBSON"_sd; + auto ridAsNestedBSON = record_id_helpers::toBSONAs(rid, kTopLevelFieldName); + // 'toBSONAs()' discards type bits of the underlying KeyString of the RecordId. However, since + // the 'ts' field of 'ChangeStreamPreImageId' is distinct CType::kTimestamp, type bits aren't + // necessary to obtain the original value. + + auto ridBSON = ridAsNestedBSON.getObjectField(kTopLevelFieldName); + + // Callers must ensure the 'rid' represents an underlying 'ChangeStreamPreImageId'. Otherwise, + // the behavior of this method is undefined. + invariant(ridBSON.hasField(ChangeStreamPreImageId::kTsFieldName)); + + auto tsElem = ridBSON.getField(ChangeStreamPreImageId::kTsFieldName); + return tsElem.timestamp(); +} + +RecordId toRecordId(ChangeStreamPreImageId id) { + return record_id_helpers::keyForElem( + BSON(ChangeStreamPreImage::kIdFieldName << id.toBSON()).firstElement()); +} + +RecordIdBound getAbsoluteMinPreImageRecordIdBoundForNs(const UUID& nsUUID) { + return RecordIdBound( + change_stream_pre_image_util::toRecordId(ChangeStreamPreImageId(nsUUID, Timestamp(), 0))); +} + +RecordIdBound getAbsoluteMaxPreImageRecordIdBoundForNs(const UUID& nsUUID) { + return RecordIdBound(change_stream_pre_image_util::toRecordId( + ChangeStreamPreImageId(nsUUID, Timestamp::max(), std::numeric_limits::max()))); +} + +UUID getPreImageNsUUID(const BSONObj& preImageObj) { + auto parsedUUID = UUID::parse(preImageObj[ChangeStreamPreImage::kIdFieldName] + .Obj()[ChangeStreamPreImageId::kNsUUIDFieldName]); + tassert(7027400, "Pre-image collection UUID must be of UUID type", parsedUUID.isOK()); + return std::move(parsedUUID.getValue()); +} + +boost::optional findNextCollectionUUID(OperationContext* opCtx, + const CollectionPtr* preImagesCollPtr, + boost::optional currentNsUUID, + Date_t& firstDocWallTime) { + BSONObj preImageObj; + + // Make the minRecordId for the next collection UUID the maximum RecordId for the current + // 'currentNsUUID'. + auto minRecordId = currentNsUUID + ? boost::make_optional( + change_stream_pre_image_util::getAbsoluteMaxPreImageRecordIdBoundForNs( + *currentNsUUID)) + : boost::none; + auto planExecutor = + InternalPlanner::collectionScan(opCtx, + preImagesCollPtr, + PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY, + InternalPlanner::Direction::FORWARD, + boost::none /* resumeAfterRecordId */, + std::move(minRecordId)); + if (planExecutor->getNext(&preImageObj, nullptr) == PlanExecutor::IS_EOF) { + return boost::none; + } + + firstDocWallTime = preImageObj[ChangeStreamPreImage::kOperationTimeFieldName].date(); + return getPreImageNsUUID(preImageObj); +} + +Date_t getCurrentTimeForPreImageRemoval(OperationContext* opCtx) { + auto currentTime = opCtx->getServiceContext()->getFastClockSource()->now(); + changeStreamPreImageRemoverCurrentTime.execute([&](const BSONObj& data) { + // Populate the current time for time based expiration of pre-images. + if (auto currentTimeElem = data["currentTimeForTimeBasedExpiration"]) { + const BSONType bsonType = currentTimeElem.type(); + tassert(7500501, + str::stream() << "Expected type for 'currentTimeForTimeBasedExpiration' is " + "'date', but found: " + << bsonType, + bsonType == BSONType::Date); + + currentTime = currentTimeElem.Date(); + } + }); + + return currentTime; +} + +} // namespace change_stream_pre_image_util +} // namespace mongo diff --git a/src/mongo/db/change_stream_pre_image_util.h b/src/mongo/db/change_stream_pre_image_util.h new file mode 100644 index 0000000000000..0f7eccdf7896d --- /dev/null +++ b/src/mongo/db/change_stream_pre_image_util.h @@ -0,0 +1,92 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#pragma once + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/change_stream_preimage_gen.h" +#include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/db/record_id.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" + +namespace mongo { +namespace change_stream_pre_image_util { +/** + * Returns pre-images expiry time in milliseconds since the epoch time if configured, boost::none + * otherwise. + * + * Only suitable for a single-tenant environment. Otherwise, callers should defer to serverless + * methods which compute expireAfterSeconds according to the tenantId. + */ +boost::optional getPreImageExpirationTime(OperationContext* opCtx, Date_t currentTime); + +/** + * Parses the 'ts' field from the 'ChangeStreamPreImageId' associated with the 'rid'. The 'rid' MUST +be + * generated from a pre-image. + */ +Timestamp getPreImageTimestamp(const RecordId& rid); + +RecordId toRecordId(ChangeStreamPreImageId id); + +/** + * A given pre-images collection consists of segments of pre-images generated from different UUIDs. + * Returns the absolute min/max RecordIdBounds for the segment of pre-images generated from + * 'nsUUID'. + */ +RecordIdBound getAbsoluteMinPreImageRecordIdBoundForNs(const UUID& nsUUID); +RecordIdBound getAbsoluteMaxPreImageRecordIdBoundForNs(const UUID& nsUUID); + +UUID getPreImageNsUUID(const BSONObj& preImageObj); + +/** + * Finds the next collection UUID in 'preImagesCollPtr' greater than 'currentNsUUID'. Returns + * boost::none if the next collection is not found. Stores the wall time of the first record in the + * next collection in 'firstDocWallTime'. + */ +boost::optional findNextCollectionUUID(OperationContext* opCtx, + const CollectionPtr* preImagesCollPtr, + boost::optional currentNsUUID, + Date_t& firstDocWallTime); + +/** + * Preferred method for getting the current time in pre-image removal code - in testing + * environments, the 'changeStreamPreImageRemoverCurrentTime' failpoint can alter the return value. + * + * Returns the current time. + */ +Date_t getCurrentTimeForPreImageRemoval(OperationContext* opCtx); +} // namespace change_stream_pre_image_util +} // namespace mongo diff --git a/src/mongo/db/change_stream_pre_images_collection_manager.cpp b/src/mongo/db/change_stream_pre_images_collection_manager.cpp index 0bb0ed06c961c..3e83b274602f8 100644 --- a/src/mongo/db/change_stream_pre_images_collection_manager.cpp +++ b/src/mongo/db/change_stream_pre_images_collection_manager.cpp @@ -29,86 +29,141 @@ #include "mongo/db/change_stream_pre_images_collection_manager.h" +#include +#include +#include +#include + +#include +#include +#include +#include +#include + #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog/drop_collection.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/change_stream_options_manager.h" +#include "mongo/db/change_stream_pre_image_util.h" #include "mongo/db/change_stream_serverless_helpers.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" +#include "mongo/db/database_name.h" +#include "mongo/db/drop_gen.h" +#include "mongo/db/exec/batched_delete_stage.h" +#include "mongo/db/exec/collection_scan_common.h" +#include "mongo/db/exec/delete_stage.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_tree.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" #include "mongo/util/assert_util.h" -#include "mongo/util/concurrency/idle_thread_block.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" +#include "mongo/util/timer.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery namespace mongo { namespace { -// Fail point to set current time for time-based expiration of pre-images. -MONGO_FAIL_POINT_DEFINE(changeStreamPreImageRemoverCurrentTime); MONGO_FAIL_POINT_DEFINE(failPreimagesCollectionCreation); -} // namespace - -namespace change_stream_pre_image_helpers { - -// Get the 'expireAfterSeconds' from the 'ChangeStreamOptions' if not 'off', boost::none otherwise. -boost::optional getExpireAfterSecondsFromChangeStreamOptions( - ChangeStreamOptions& changeStreamOptions) { - const stdx::variant& expireAfterSeconds = - changeStreamOptions.getPreAndPostImages().getExpireAfterSeconds(); - - if (!stdx::holds_alternative(expireAfterSeconds)) { - return stdx::get(expireAfterSeconds); - } - return boost::none; +const auto getPreImagesCollectionManager = + ServiceContext::declareDecoration(); + +std::unique_ptr getDeleteExpiredPreImagesExecutor( + OperationContext* opCtx, + CollectionAcquisition preImageColl, + const MatchExpression* filterPtr, + Timestamp maxRecordIdTimestamp, + UUID currentCollectionUUID) { + auto params = std::make_unique(); + params->isMulti = true; + + std::unique_ptr batchedDeleteParams; + batchedDeleteParams = std::make_unique(); + RecordIdBound minRecordId = + change_stream_pre_image_util::getAbsoluteMinPreImageRecordIdBoundForNs( + currentCollectionUUID); + RecordIdBound maxRecordId = + RecordIdBound(change_stream_pre_image_util::toRecordId(ChangeStreamPreImageId( + currentCollectionUUID, maxRecordIdTimestamp, std::numeric_limits::max()))); + + return InternalPlanner::deleteWithCollectionScan( + opCtx, + std::move(preImageColl), + std::move(params), + PlanYieldPolicy::YieldPolicy::YIELD_AUTO, + InternalPlanner::Direction::FORWARD, + std::move(minRecordId), + std::move(maxRecordId), + CollectionScanParams::ScanBoundInclusion::kIncludeBothStartAndEndRecords, + std::move(batchedDeleteParams), + filterPtr, + filterPtr != nullptr); } -// Returns pre-images expiry time in milliseconds since the epoch time if configured, boost::none -// otherwise. -boost::optional getPreImageExpirationTime(OperationContext* opCtx, Date_t currentTime) { - invariant(!change_stream_serverless_helpers::isChangeCollectionsModeActive()); - boost::optional expireAfterSeconds = boost::none; - - // Get the expiration time directly from the change stream manager. - auto changeStreamOptions = ChangeStreamOptionsManager::get(opCtx).getOptions(opCtx); - expireAfterSeconds = getExpireAfterSecondsFromChangeStreamOptions(changeStreamOptions); - - // A pre-image is eligible for deletion if: - // pre-image's op-time + expireAfterSeconds < currentTime. - return expireAfterSeconds ? boost::optional(currentTime - Seconds(*expireAfterSeconds)) - : boost::none; +bool useUnreplicatedTruncates() { + bool res = feature_flags::gFeatureFlagUseUnreplicatedTruncatesForDeletions.isEnabled( + serverGlobalParams.featureCompatibility); + return res; } +} // namespace -// TODO SERVER-74981: Investigate whether there is a safer way to extract the Timestamp. -Timestamp getPreImageTimestamp(const RecordId& rid) { - static constexpr auto kTopLevelFieldName = "ridAsBSON"_sd; - auto ridAsNestedBSON = record_id_helpers::toBSONAs(rid, kTopLevelFieldName); - // 'toBSONAs()' discards type bits of the underlying KeyString of the RecordId. However, since - // the 'ts' field of 'ChangeStreamPreImageId' is distinct CType::kTimestamp, type bits aren't - // necessary to obtain the original value. - - auto ridBSON = ridAsNestedBSON.getObjectField(kTopLevelFieldName); - auto tsElem = ridBSON.getField(ChangeStreamPreImageId::kTsFieldName); - return tsElem.timestamp(); +BSONObj ChangeStreamPreImagesCollectionManager::PurgingJobStats::toBSON() const { + BSONObjBuilder builder; + builder.append("totalPass", totalPass.loadRelaxed()) + .append("docsDeleted", docsDeleted.loadRelaxed()) + .append("bytesDeleted", bytesDeleted.loadRelaxed()) + .append("scannedCollections", scannedCollections.loadRelaxed()) + .append("scannedInternalCollections", scannedInternalCollections.loadRelaxed()) + .append("maxStartWallTimeMillis", maxStartWallTime.loadRelaxed().toMillisSinceEpoch()) + .append("timeElapsedMillis", timeElapsedMillis.loadRelaxed()); + return builder.obj(); } -RecordId toRecordId(ChangeStreamPreImageId id) { - return record_id_helpers::keyForElem( - BSON(ChangeStreamPreImage::kIdFieldName << id.toBSON()).firstElement()); +ChangeStreamPreImagesCollectionManager& ChangeStreamPreImagesCollectionManager::get( + ServiceContext* service) { + return getPreImagesCollectionManager(service); } -} // namespace change_stream_pre_image_helpers +ChangeStreamPreImagesCollectionManager& ChangeStreamPreImagesCollectionManager::get( + OperationContext* opCtx) { + return getPreImagesCollectionManager(opCtx->getServiceContext()); +} void ChangeStreamPreImagesCollectionManager::createPreImagesCollection( OperationContext* opCtx, boost::optional tenantId) { @@ -127,7 +182,7 @@ void ChangeStreamPreImagesCollectionManager::createPreImagesCollection( opCtx, preImagesCollectionNamespace, preImagesCollectionOptions, BSONObj()); uassert(status.code(), str::stream() << "Failed to create the pre-images collection: " - << preImagesCollectionNamespace.toStringWithTenantId() + << preImagesCollectionNamespace.toStringForErrorMsg() << causedBy(status.reason()), status.isOK() || status.code() == ErrorCodes::NamespaceExists); } @@ -144,9 +199,13 @@ void ChangeStreamPreImagesCollectionManager::dropPreImagesCollection( DropCollectionSystemCollectionMode::kAllowSystemCollectionDrops); uassert(status.code(), str::stream() << "Failed to drop the pre-images collection: " - << preImagesCollectionNamespace.toStringWithTenantId() + << preImagesCollectionNamespace.toStringForErrorMsg() << causedBy(status.reason()), status.isOK() || status.code() == ErrorCodes::NamespaceNotFound); + + if (useUnreplicatedTruncates()) { + _truncateManager.dropAllMarkersForTenant(tenantId); + } } void ChangeStreamPreImagesCollectionManager::insertPreImage(OperationContext* opCtx, @@ -167,17 +226,28 @@ void ChangeStreamPreImagesCollectionManager::insertPreImage(OperationContext* op // the pre-images collection. There are no known cases where an operation holding an // exclusive lock on the pre-images collection also waits for oplog visibility. AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(opCtx->lockState()); - AutoGetCollection preImagesCollectionRaii( - opCtx, preImagesCollectionNamespace, LockMode::MODE_IX); - auto& changeStreamPreImagesCollection = preImagesCollectionRaii.getCollection(); + const auto changeStreamPreImagesCollection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(preImagesCollectionNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + if (preImagesCollectionNamespace.tenantId() && + !change_stream_serverless_helpers::isChangeStreamEnabled( + opCtx, *preImagesCollectionNamespace.tenantId())) { + return; + } tassert(6646201, "The change stream pre-images collection is not present", - changeStreamPreImagesCollection); + changeStreamPreImagesCollection.exists()); + auto insertStatement = InsertStatement{preImage.toBSON()}; const auto insertionStatus = collection_internal::insertDocument(opCtx, - changeStreamPreImagesCollection, - InsertStatement{preImage.toBSON()}, + changeStreamPreImagesCollection.getCollectionPtr(), + insertStatement, &CurOp::get(opCtx)->debug()); tassert(5868601, str::stream() << "Attempted to insert a duplicate document into the pre-images " @@ -185,121 +255,137 @@ void ChangeStreamPreImagesCollectionManager::insertPreImage(OperationContext* op << preImage.getId().toBSON().toString(), insertionStatus != ErrorCodes::DuplicateKey); uassertStatusOK(insertionStatus); + + if (useUnreplicatedTruncates()) { + // This is a no-op until the 'tenantId' is registered with the 'truncateManager' in the + // expired pre-image removal path. + auto bytesInserted = insertStatement.doc.objsize(); + _truncateManager.updateMarkersOnInsert(opCtx, tenantId, preImage, bytesInserted); + } } -namespace { -/** - * Finds the next collection UUID in the change stream pre-images collection 'preImagesCollPtr' for - * which collection UUID is greater than 'collectionUUID'. Returns boost::none if the next - * collection is not found. - */ -boost::optional findNextCollectionUUID(OperationContext* opCtx, - const CollectionPtr* preImagesCollPtr, - boost::optional collectionUUID - -) { - BSONObj preImageObj; - auto minRecordId = collectionUUID - ? boost::make_optional( - RecordIdBound(change_stream_pre_image_helpers::toRecordId(ChangeStreamPreImageId( - *collectionUUID, Timestamp::max(), std::numeric_limits::max())))) - : boost::none; - auto planExecutor = - InternalPlanner::collectionScan(opCtx, - preImagesCollPtr, - PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY, - InternalPlanner::Direction::FORWARD, - boost::none /* resumeAfterRecordId */, - std::move(minRecordId)); - if (planExecutor->getNext(&preImageObj, nullptr) == PlanExecutor::IS_EOF) { - return boost::none; +void ChangeStreamPreImagesCollectionManager::performExpiredChangeStreamPreImagesRemovalPass( + Client* client) { + Timer timer; + + const auto startTime = Date_t::now(); + ServiceContext::UniqueOperationContext opCtx; + try { + opCtx = client->makeOperationContext(); + Date_t currentTimeForTimeBasedExpiration = + change_stream_pre_image_util::getCurrentTimeForPreImageRemoval(opCtx.get()); + size_t numberOfRemovals = 0; + + if (useUnreplicatedTruncates()) { + if (change_stream_serverless_helpers::isChangeCollectionsModeActive()) { + const auto tenantIds = + change_stream_serverless_helpers::getConfigDbTenants(opCtx.get()); + for (const auto& tenantId : tenantIds) { + numberOfRemovals += _deleteExpiredPreImagesWithTruncate(opCtx.get(), tenantId); + } + } else { + numberOfRemovals = + _deleteExpiredPreImagesWithTruncate(opCtx.get(), boost::none /** tenantId **/); + } + } else { + if (change_stream_serverless_helpers::isChangeCollectionsModeActive()) { + // A serverless environment is enabled and removal logic must take the tenantId into + // account. + const auto tenantIds = + change_stream_serverless_helpers::getConfigDbTenants(opCtx.get()); + for (const auto& tenantId : tenantIds) { + numberOfRemovals += _deleteExpiredPreImagesWithCollScanForTenants( + opCtx.get(), tenantId, currentTimeForTimeBasedExpiration); + } + } else { + numberOfRemovals = _deleteExpiredPreImagesWithCollScan( + opCtx.get(), currentTimeForTimeBasedExpiration); + } + } + + if (numberOfRemovals > 0) { + LOGV2_DEBUG(5869104, + 3, + "Periodic expired pre-images removal job finished executing", + "numberOfRemovals"_attr = numberOfRemovals, + "jobDuration"_attr = (Date_t::now() - startTime).toString()); + } + } catch (const DBException& exception) { + Status interruptStatus = opCtx ? opCtx.get()->checkForInterruptNoAssert() : Status::OK(); + if (!interruptStatus.isOK()) { + LOGV2_DEBUG(5869105, + 3, + "Periodic expired pre-images removal job operation was interrupted", + "errorCode"_attr = interruptStatus); + } else { + LOGV2_ERROR(5869106, + "Periodic expired pre-images removal job failed", + "reason"_attr = exception.reason()); + } } - auto parsedUUID = UUID::parse(preImageObj["_id"].Obj()["nsUUID"]); - tassert(7027400, "Pre-image collection UUID must be of UUID type", parsedUUID.isOK()); - return {std::move(parsedUUID.getValue())}; + + _purgingJobStats.timeElapsedMillis.fetchAndAddRelaxed(timer.millis()); + _purgingJobStats.totalPass.fetchAndAddRelaxed(1); } -/** - * Scans the 'config.system.preimages' collection and deletes the expired pre-images from it. - * - * Pre-images are ordered by collection UUID, ie. if UUID of collection A is ordered before UUID of - * collection B, then pre-images of collection A will be stored before pre-images of collection B. - * - * Pre-images are considered expired based on expiration parameter. In case when expiration - * parameter is not set a pre-image is considered expired if its timestamp is smaller than the - * timestamp of the earliest oplog entry. In case when expiration parameter is specified, aside from - * timestamp check a check on the wall clock time of the pre-image recording ('operationTime') is - * performed. If the difference between 'currentTimeForTimeBasedExpiration' and 'operationTime' is - * larger than expiration parameter, the pre-image is considered expired. One of those two - * conditions must be true for a pre-image to be eligible for deletion. - * - * +-------------------------+ - * | config.system.preimages | - * +------------+------------+ - * | - * +--------------------+---------+---------+-----------------------+ - * | | | | - * +-----------+-------+ +----------+--------+ +--------+----------+ +----------+--------+ - * | collA.preImageA | | collA.preImageB | | collB.preImageC | | collB.preImageD | - * +-----------+-------+ +----------+--------+ +---------+---------+ +----------+--------+ - * | timestamp: 1 | | timestamp: 10 | | timestamp: 5 | | timestamp: 9 | - * | applyIndex: 0 | | applyIndex: 0 | | applyIndex: 0 | | applyIndex: 1 | - * +-------------------+ +-------------------+ +-------------------+ +-------------------+ - */ -size_t _deleteExpiredChangeStreamPreImagesCommon(OperationContext* opCtx, - const CollectionPtr& preImageColl, - const MatchExpression* filterPtr, - Timestamp maxRecordIdTimestamp) { +size_t ChangeStreamPreImagesCollectionManager::_deleteExpiredPreImagesWithCollScanCommon( + OperationContext* opCtx, + const CollectionAcquisition& preImageColl, + const MatchExpression* filterPtr, + Timestamp maxRecordIdTimestamp) { size_t numberOfRemovals = 0; boost::optional currentCollectionUUID = boost::none; - while ((currentCollectionUUID = - findNextCollectionUUID(opCtx, &preImageColl, currentCollectionUUID))) { + + // Placeholder for the wall time of the first document of the current pre-images internal + // collection being examined. + Date_t firstDocWallTime{}; + + while ( + (currentCollectionUUID = change_stream_pre_image_util::findNextCollectionUUID( + opCtx, &preImageColl.getCollectionPtr(), currentCollectionUUID, firstDocWallTime))) { writeConflictRetry( opCtx, "ChangeStreamExpiredPreImagesRemover", - NamespaceString::makePreImageCollectionNSS(boost::none).ns(), + NamespaceString::makePreImageCollectionNSS(boost::none), [&] { - auto params = std::make_unique(); - params->isMulti = true; - - std::unique_ptr batchedDeleteParams; - batchedDeleteParams = std::make_unique(); - RecordIdBound minRecordId(change_stream_pre_image_helpers::toRecordId( - ChangeStreamPreImageId(*currentCollectionUUID, Timestamp(), 0))); - RecordIdBound maxRecordId = - RecordIdBound(change_stream_pre_image_helpers::toRecordId( - ChangeStreamPreImageId(*currentCollectionUUID, - maxRecordIdTimestamp, - std::numeric_limits::max()))); - - auto exec = InternalPlanner::deleteWithCollectionScan( - opCtx, - &preImageColl, - std::move(params), - PlanYieldPolicy::YieldPolicy::YIELD_AUTO, - InternalPlanner::Direction::FORWARD, - std::move(minRecordId), - std::move(maxRecordId), - CollectionScanParams::ScanBoundInclusion::kIncludeBothStartAndEndRecords, - std::move(batchedDeleteParams), - filterPtr, - filterPtr != nullptr); + auto exec = getDeleteExpiredPreImagesExecutor( + opCtx, preImageColl, filterPtr, maxRecordIdTimestamp, *currentCollectionUUID); numberOfRemovals += exec->executeDelete(); + auto batchedDeleteStats = exec->getBatchedDeleteStats(); + + _purgingJobStats.docsDeleted.fetchAndAddRelaxed(batchedDeleteStats.docsDeleted); + _purgingJobStats.bytesDeleted.fetchAndAddRelaxed(batchedDeleteStats.bytesDeleted); + _purgingJobStats.scannedInternalCollections.fetchAndAddRelaxed(1); }); + if (firstDocWallTime > _purgingJobStats.maxStartWallTime.load()) { + _purgingJobStats.maxStartWallTime.store(firstDocWallTime); + } } + _purgingJobStats.scannedCollections.fetchAndAddRelaxed(1); return numberOfRemovals; } -size_t deleteExpiredChangeStreamPreImages(OperationContext* opCtx, - Date_t currentTimeForTimeBasedExpiration) { +size_t ChangeStreamPreImagesCollectionManager::_deleteExpiredPreImagesWithCollScan( + OperationContext* opCtx, Date_t currentTimeForTimeBasedExpiration) { + // Change stream collections can multiply the amount of user data inserted and deleted on each + // node. It is imperative that removal is prioritized so it can keep up with inserts and prevent + // users from running out of disk space. + ScopedAdmissionPriorityForLock skipAdmissionControl(opCtx->lockState(), + AdmissionContext::Priority::kImmediate); + // Acquire intent-exclusive lock on the change collection. - AutoGetCollection preImageColl( - opCtx, NamespaceString::makePreImageCollectionNSS(boost::none), MODE_IX); + const auto preImageColl = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString::makePreImageCollectionNSS(boost::none), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); // Early exit if the collection doesn't exist or running on a secondary. - if (!preImageColl || + if (!preImageColl.exists() || !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase( - opCtx, DatabaseName::kConfig.toString())) { + opCtx, DatabaseName::kConfig)) { return 0; } @@ -307,7 +393,7 @@ size_t deleteExpiredChangeStreamPreImages(OperationContext* opCtx, const auto currentEarliestOplogEntryTs = repl::StorageInterface::get(opCtx->getServiceContext())->getEarliestOplogTimestamp(opCtx); - const auto preImageExpirationTime = change_stream_pre_image_helpers::getPreImageExpirationTime( + const auto preImageExpirationTime = change_stream_pre_image_util::getPreImageExpirationTime( opCtx, currentTimeForTimeBasedExpiration); // Configure the filter for the case when expiration parameter is set. @@ -320,33 +406,41 @@ size_t deleteExpiredChangeStreamPreImages(OperationContext* opCtx, // If 'preImageExpirationTime' is set, set 'maxRecordIdTimestamp' is set to the maximum // RecordId for this collection. Whether the pre-image has to be deleted will be determined // by the 'filter' parameter. - return _deleteExpiredChangeStreamPreImagesCommon( - opCtx, *preImageColl, &filter, Timestamp::max() /* maxRecordIdTimestamp */); + return _deleteExpiredPreImagesWithCollScanCommon( + opCtx, preImageColl, &filter, Timestamp::max() /* maxRecordIdTimestamp */); } // 'preImageExpirationTime' is not set, so the last expired pre-image timestamp is less than // 'currentEarliestOplogEntryTs'. - return _deleteExpiredChangeStreamPreImagesCommon( + return _deleteExpiredPreImagesWithCollScanCommon( opCtx, - *preImageColl, + preImageColl, nullptr /* filterPtr */, Timestamp(currentEarliestOplogEntryTs.asULL() - 1) /* maxRecordIdTimestamp */); } -size_t deleteExpiredChangeStreamPreImagesForTenants(OperationContext* opCtx, - const TenantId& tenantId, - Date_t currentTimeForTimeBasedExpiration) { - +size_t ChangeStreamPreImagesCollectionManager::_deleteExpiredPreImagesWithCollScanForTenants( + OperationContext* opCtx, const TenantId& tenantId, Date_t currentTimeForTimeBasedExpiration) { + // Change stream collections can multiply the amount of user data inserted and deleted on each + // node. It is imperative that removal is prioritized so it can keep up with inserts and prevent + // users from running out of disk space. + ScopedAdmissionPriorityForLock skipAdmissionControl(opCtx->lockState(), + AdmissionContext::Priority::kImmediate); // Acquire intent-exclusive lock on the change collection. - AutoGetCollection preImageColl(opCtx, - NamespaceString::makePreImageCollectionNSS( - change_stream_serverless_helpers::resolveTenantId(tenantId)), - MODE_IX); + const auto preImageColl = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + NamespaceString::makePreImageCollectionNSS( + change_stream_serverless_helpers::resolveTenantId(tenantId)), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); // Early exit if the collection doesn't exist or running on a secondary. - if (!preImageColl || + if (!preImageColl.exists() || !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase( - opCtx, DatabaseName::kConfig.toString())) { + opCtx, DatabaseName::kConfig)) { return 0; } @@ -357,67 +451,45 @@ size_t deleteExpiredChangeStreamPreImagesForTenants(OperationContext* opCtx, // Set the 'maxRecordIdTimestamp' parameter (upper scan boundary) to maximum possible. Whether // the pre-image has to be deleted will be determined by the 'filter' parameter. - return _deleteExpiredChangeStreamPreImagesCommon( - opCtx, *preImageColl, &filter, Timestamp::max() /* maxRecordIdTimestamp */); + return _deleteExpiredPreImagesWithCollScanCommon( + opCtx, preImageColl, &filter, Timestamp::max() /* maxRecordIdTimestamp */); } -} // namespace -void ChangeStreamPreImagesCollectionManager::performExpiredChangeStreamPreImagesRemovalPass( - Client* client) { - Date_t currentTimeForTimeBasedExpiration = Date_t::now(); - - changeStreamPreImageRemoverCurrentTime.execute([&](const BSONObj& data) { - // Populate the current time for time based expiration of pre-images. - if (auto currentTimeElem = data["currentTimeForTimeBasedExpiration"]) { - const BSONType bsonType = currentTimeElem.type(); - tassert(5869300, - str::stream() << "Expected type for 'currentTimeForTimeBasedExpiration' is " - "'date', but found: " - << bsonType, - bsonType == BSONType::Date); - - currentTimeForTimeBasedExpiration = currentTimeElem.Date(); - } - }); +size_t ChangeStreamPreImagesCollectionManager::_deleteExpiredPreImagesWithTruncate( + OperationContext* opCtx, boost::optional tenantId) { + // Change stream collections can multiply the amount of user data inserted and deleted + // on each node. It is imperative that removal is prioritized so it can keep up with + // inserts and prevent users from running out of disk space. + ScopedAdmissionPriorityForLock skipAdmissionControl(opCtx->lockState(), + AdmissionContext::Priority::kImmediate); + const auto preImagesColl = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString::makePreImageCollectionNSS(tenantId), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); - const auto startTime = Date_t::now(); - ServiceContext::UniqueOperationContext opCtx; - try { - opCtx = client->makeOperationContext(); - size_t numberOfRemovals = 0; - if (change_stream_serverless_helpers::isChangeCollectionsModeActive()) { - const auto tenantIds = - change_stream_serverless_helpers::getConfigDbTenants(opCtx.get()); - for (const auto& tenantId : tenantIds) { - numberOfRemovals += deleteExpiredChangeStreamPreImagesForTenants( - opCtx.get(), tenantId, currentTimeForTimeBasedExpiration); - } - } else { - numberOfRemovals = - deleteExpiredChangeStreamPreImages(opCtx.get(), currentTimeForTimeBasedExpiration); - } - - if (numberOfRemovals > 0) { - LOGV2_DEBUG(5869104, - 3, - "Periodic expired pre-images removal job finished executing", - "numberOfRemovals"_attr = numberOfRemovals, - "jobDuration"_attr = (Date_t::now() - startTime).toString()); - } - } catch (const DBException& exception) { - Status interruptStatus = opCtx ? opCtx.get()->checkForInterruptNoAssert() : Status::OK(); - if (!interruptStatus.isOK()) { - LOGV2_DEBUG(5869105, - 3, - "Periodic expired pre-images removal job operation was interrupted", - "errorCode"_attr = interruptStatus); - } else { - LOGV2_ERROR(5869106, - "Periodic expired pre-images removal job failed", - "reason"_attr = exception.reason()); - } + if (!preImagesColl.exists()) { + return 0; } + + // Prevent unnecessary latency on an end-user write operation by intialising the truncate + // markers lazily during the background cleanup. + _truncateManager.ensureMarkersInitialized(opCtx, tenantId, preImagesColl); + + auto truncateStats = _truncateManager.truncateExpiredPreImages( + opCtx, tenantId, preImagesColl.getCollectionPtr()); + + _purgingJobStats.docsDeleted.fetchAndAddRelaxed(truncateStats.docsDeleted); + _purgingJobStats.bytesDeleted.fetchAndAddRelaxed(truncateStats.bytesDeleted); + _purgingJobStats.scannedInternalCollections.fetchAndAddRelaxed( + truncateStats.scannedInternalCollections); + + _purgingJobStats.scannedCollections.fetchAndAddRelaxed(1); + + return truncateStats.docsDeleted; } } // namespace mongo diff --git a/src/mongo/db/change_stream_pre_images_collection_manager.h b/src/mongo/db/change_stream_pre_images_collection_manager.h index 9b55118de6b58..adce0f4291729 100644 --- a/src/mongo/db/change_stream_pre_images_collection_manager.h +++ b/src/mongo/db/change_stream_pre_images_collection_manager.h @@ -30,53 +30,197 @@ #pragma once #include +#include +#include +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/change_stream_pre_images_truncate_manager.h" +#include "mongo/db/change_stream_pre_images_truncate_markers_per_nsUUID.h" +#include "mongo/db/client.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/change_stream_preimage_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" #include "mongo/db/tenant_id.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/background.h" +#include "mongo/util/concurrent_shared_values_map.h" +#include "mongo/util/time_support.h" namespace mongo { -namespace change_stream_pre_image_helpers { -boost::optional getPreImageExpirationTime(OperationContext* opCtx, Date_t currentTime); - -Timestamp getPreImageTimestamp(const RecordId& rid); - -RecordId toRecordId(ChangeStreamPreImageId id); -} // namespace change_stream_pre_image_helpers - /** * Manages the lifecycle of the change stream pre-images collection(s). Also is responsible for * inserting the pre-images into the pre-images collection. */ class ChangeStreamPreImagesCollectionManager { public: + struct PurgingJobStats { + /** + * Total number of deletion passes completed by the purging job. + */ + AtomicWord totalPass; + + /** + * Cumulative number of pre-image documents deleted by the purging job. + */ + AtomicWord docsDeleted; + + /** + * Cumulative size in bytes of all deleted documents from all pre-image collections by the + * purging job. + */ + AtomicWord bytesDeleted; + + /** + * Cumulative number of pre-image collections scanned by the purging job. In single-tenant + * environments this is the same as totalPass as there is 1 pre-image collection per tenant. + */ + AtomicWord scannedCollections; + + /** + * Cumulative number of internal pre-image collections scanned by the purging job. Internal + * collections are the segments of actual pre-images of collections within system.preimages. + */ + AtomicWord scannedInternalCollections; + + /** + * Cumulative number of milliseconds elapsed since the first pass by the purging job. + */ + AtomicWord timeElapsedMillis; + + /** + * Maximum wall time from the first document of each pre-image collection. + */ + AtomicWord maxStartWallTime; + + /** + * Serializes the purging job statistics to the BSON object. + */ + BSONObj toBSON() const; + }; + + explicit ChangeStreamPreImagesCollectionManager() {} + + ~ChangeStreamPreImagesCollectionManager() = default; + + /** + * Gets the instance of the class using the service context. + */ + static ChangeStreamPreImagesCollectionManager& get(ServiceContext* service); + + /** + * Gets the instance of the class using the operation context. + */ + static ChangeStreamPreImagesCollectionManager& get(OperationContext* opCtx); + /** * Creates the pre-images collection, clustered by the primary key '_id'. The collection is * created for the specific tenant if the 'tenantId' is specified. */ - static void createPreImagesCollection(OperationContext* opCtx, - boost::optional tenantId); + void createPreImagesCollection(OperationContext* opCtx, boost::optional tenantId); /** * Drops the pre-images collection. The collection is dropped for the specific tenant if * the 'tenantId' is specified. */ - static void dropPreImagesCollection(OperationContext* opCtx, - boost::optional tenantId); + void dropPreImagesCollection(OperationContext* opCtx, boost::optional tenantId); /** * Inserts the document into the pre-images collection. The document is inserted into the * tenant's pre-images collection if the 'tenantId' is specified. */ - static void insertPreImage(OperationContext* opCtx, - boost::optional tenantId, - const ChangeStreamPreImage& preImage); + void insertPreImage(OperationContext* opCtx, + boost::optional tenantId, + const ChangeStreamPreImage& preImage); /** * Scans the system pre-images collection and deletes the expired pre-images from it. */ - static void performExpiredChangeStreamPreImagesRemovalPass(Client* client); + void performExpiredChangeStreamPreImagesRemovalPass(Client* client); + + const PurgingJobStats& getPurgingJobStats() { + return _purgingJobStats; + } + +private: + /** + * Scans the 'config.system.preimages' collection and deletes the expired pre-images from it. + * + * Pre-images are ordered by collection UUID, ie. if UUID of collection A is ordered before UUID + * of collection B, then pre-images of collection A will be stored before pre-images of + * collection B. + * + * Pre-images are considered expired based on expiration parameter. In case when expiration + * parameter is not set a pre-image is considered expired if its timestamp is smaller than the + * timestamp of the earliest oplog entry. In case when expiration parameter is specified, aside + * from timestamp check a check on the wall clock time of the pre-image recording + * ('operationTime') is performed. If the difference between 'currentTimeForTimeBasedExpiration' + * and 'operationTime' is larger than expiration parameter, the pre-image is considered expired. + * One of those two conditions must be true for a pre-image to be eligible for deletion. + * + * +-------------------------+ + * | config.system.preimages | + * +------------+------------+ + * | + * +--------------------+---------+---------+-----------------------+ + * | | | | + * +-----------+-------+ +----------+--------+ +--------+----------+ +----------+--------+ + * | collA.preImageA | | collA.preImageB | | collB.preImageC | | collB.preImageD | + * +-----------+-------+ +----------+--------+ +---------+---------+ +----------+--------+ + * | timestamp: 1 | | timestamp: 10 | | timestamp: 5 | | timestamp: 9 | + * | applyIndex: 0 | | applyIndex: 0 | | applyIndex: 0 | | applyIndex: 1 | + * +-------------------+ +-------------------+ +-------------------+ +-------------------+ + */ + + /** + * Common logic for removing expired pre-images with a collection scan. + * + * Returns the number of pre-image documents removed. + */ + size_t _deleteExpiredPreImagesWithCollScanCommon(OperationContext* opCtx, + const CollectionAcquisition& preImageColl, + const MatchExpression* filterPtr, + Timestamp maxRecordIdTimestamp); + + /** + * Removes expired pre-images in a single tenant environment. + * + * Returns the number of pre-image documents removed. + */ + size_t _deleteExpiredPreImagesWithCollScan(OperationContext* opCtx, + Date_t currentTimeForTimeBasedExpiration); + + /** + * Removes expired pre-images for the tenant with 'tenantId'. + * + * Returns the number of pre-image documents removed. + */ + size_t _deleteExpiredPreImagesWithCollScanForTenants(OperationContext* opCtx, + const TenantId& tenantId, + Date_t currentTimeForTimeBasedExpiration); + + /** + * Removes expired pre-images with truncate. Suitable for both serverless and single tenant + * environments. 'tenantId' is boost::none in a single tenant environment. + * + * If 'tenantId' is not yet registered with the '_truncateManager', performs lazy registration + * and initialisation of the tenant's corresponding truncate markers before removing expired + * pre-images. + * + * Returns the number of pre-image documents removed. + */ + size_t _deleteExpiredPreImagesWithTruncate(OperationContext* opCtx, + boost::optional tenantId); + + PurgingJobStats _purgingJobStats; + + /** + * Manages truncate markers and truncation across tenants. Treats a single tenant environment + * the same as a multi-tenant environment, but with only one tenant of TenantId boost::none. + */ + PreImagesTruncateManager _truncateManager; }; } // namespace mongo diff --git a/src/mongo/db/change_stream_pre_images_remover_test.cpp b/src/mongo/db/change_stream_pre_images_remover_test.cpp new file mode 100644 index 0000000000000..bfd61c0c0fd2c --- /dev/null +++ b/src/mongo/db/change_stream_pre_images_remover_test.cpp @@ -0,0 +1,590 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/change_stream_options_gen.h" +#include "mongo/db/change_stream_options_manager.h" +#include "mongo/db/change_stream_pre_image_util.h" +#include "mongo/db/change_stream_pre_images_collection_manager.h" +#include "mongo/db/change_stream_pre_images_truncate_markers_per_nsUUID.h" +#include "mongo/db/change_stream_serverless_helpers.h" +#include "mongo/db/change_streams_cluster_parameter_gen.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_impl.h" +#include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/op_observer/oplog_writer_impl.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/change_stream_preimage_gen.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/server_parameter_with_storage.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/storage/collection_truncate_markers.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" + +namespace mongo { + +namespace { +std::unique_ptr populateChangeStreamPreImageOptions( + stdx::variant expireAfterSeconds) { + PreAndPostImagesOptions preAndPostImagesOptions; + preAndPostImagesOptions.setExpireAfterSeconds(expireAfterSeconds); + + auto changeStreamOptions = std::make_unique(); + changeStreamOptions->setPreAndPostImages(std::move(preAndPostImagesOptions)); + + return changeStreamOptions; +} + +void setChangeStreamOptionsToManager(OperationContext* opCtx, + ChangeStreamOptions& changeStreamOptions) { + auto& changeStreamOptionsManager = ChangeStreamOptionsManager::get(opCtx); + ASSERT_EQ(changeStreamOptionsManager.setOptions(opCtx, changeStreamOptions).getStatus(), + ErrorCodes::OK); +} + +class ChangeStreamPreImageExpirationPolicyTest : public ServiceContextTest { +public: + ChangeStreamPreImageExpirationPolicyTest() { + ChangeStreamOptionsManager::create(getServiceContext()); + } +}; + +TEST_F(ChangeStreamPreImageExpirationPolicyTest, getPreImageExpirationTimeWithValidIntegralValue) { + auto opCtx = cc().makeOperationContext(); + const int64_t expireAfterSeconds = 10; + + auto changeStreamOptions = populateChangeStreamPreImageOptions(expireAfterSeconds); + setChangeStreamOptionsToManager(opCtx.get(), *changeStreamOptions.get()); + + auto currentTime = Date_t::now(); + auto receivedExpireAfterSeconds = + change_stream_pre_image_util::getPreImageExpirationTime(opCtx.get(), currentTime); + ASSERT(receivedExpireAfterSeconds); + ASSERT_EQ(*receivedExpireAfterSeconds, currentTime - Seconds(expireAfterSeconds)); +} + +TEST_F(ChangeStreamPreImageExpirationPolicyTest, getPreImageExpirationTimeWithUnsetValue) { + auto opCtx = cc().makeOperationContext(); + + auto currentTime = Date_t::now(); + auto receivedExpireAfterSeconds = + change_stream_pre_image_util::getPreImageExpirationTime(opCtx.get(), currentTime); + ASSERT_FALSE(receivedExpireAfterSeconds); +} + +TEST_F(ChangeStreamPreImageExpirationPolicyTest, getPreImageExpirationTimeWithOffValue) { + auto opCtx = cc().makeOperationContext(); + + auto changeStreamOptions = populateChangeStreamPreImageOptions("off"); + setChangeStreamOptionsToManager(opCtx.get(), *changeStreamOptions.get()); + + auto currentTime = Date_t::now(); + auto receivedExpireAfterSeconds = + change_stream_pre_image_util::getPreImageExpirationTime(opCtx.get(), currentTime); + ASSERT_FALSE(receivedExpireAfterSeconds); +} +} // namespace + +class PreImagesRemoverTest : public CatalogTestFixture { +protected: + const NamespaceString kPreImageEnabledCollection = + NamespaceString::createNamespaceString_forTest("test.collection"); + + // All truncate markers require a creation method. Unless specifically testing the creation + // method, the creation method is arbitrary and should not impact post-initialisation behavior. + const CollectionTruncateMarkers::MarkersCreationMethod kArbitraryMarkerCreationMethod{ + CollectionTruncateMarkers::MarkersCreationMethod::Scanning}; + + PreImagesRemoverTest() : CatalogTestFixture(Options{}.useMockClock(true)) {} + + ChangeStreamPreImage generatePreImage(const UUID& nsUUID, Timestamp ts) { + auto preImageId = ChangeStreamPreImageId(nsUUID, ts, 0); + const BSONObj doc = BSON("x" << 1); + auto operationTime = Date_t::fromDurationSinceEpoch(Seconds{ts.getSecs()}); + return ChangeStreamPreImage(preImageId, operationTime, doc); + } + + // Populates the pre-images collection with 'numRecords'. Generates pre-images with Timestamps 1 + // millisecond apart starting at 'startOperationTime'. + void prePopulatePreImagesCollection(boost::optional tenantId, + const NamespaceString& nss, + int64_t numRecords, + Date_t startOperationTime) { + auto preImagesCollectionNss = NamespaceString::makePreImageCollectionNSS(tenantId); + auto opCtx = operationContext(); + auto nsUUID = CollectionCatalog::get(opCtx) + ->lookupCollectionByNamespace(operationContext(), nss) + ->uuid(); + + std::vector preImages; + for (int64_t i = 0; i < numRecords; i++) { + preImages.push_back( + generatePreImage(nsUUID, Timestamp{startOperationTime + Milliseconds{i}})); + } + + std::vector preImageInsertStatements; + std::transform(preImages.begin(), + preImages.end(), + std::back_inserter(preImageInsertStatements), + [](const auto& preImage) { return InsertStatement{preImage.toBSON()}; }); + + AutoGetCollection preImagesCollectionRaii(opCtx, preImagesCollectionNss, MODE_IX); + ASSERT(preImagesCollectionRaii); + WriteUnitOfWork wuow(opCtx); + auto& changeStreamPreImagesCollection = preImagesCollectionRaii.getCollection(); + + auto status = collection_internal::insertDocuments(opCtx, + changeStreamPreImagesCollection, + preImageInsertStatements.begin(), + preImageInsertStatements.end(), + nullptr); + wuow.commit(); + }; + + void insertPreImage(NamespaceString nss, Timestamp operationTime) { + auto uuid = CollectionCatalog::get(operationContext()) + ->lookupCollectionByNamespace(operationContext(), nss) + ->uuid(); + auto& manager = ChangeStreamPreImagesCollectionManager::get(getServiceContext()); + auto opCtx = operationContext(); + WriteUnitOfWork wuow(opCtx); + auto image = generatePreImage(uuid, operationTime); + manager.insertPreImage(opCtx, boost::none, image); + wuow.commit(); + } + + ClockSourceMock* clockSource() { + return static_cast(getServiceContext()->getFastClockSource()); + } + + BSONObj performPass(Milliseconds timeToAdvance) { + auto clock = clockSource(); + clock->advance(timeToAdvance); + auto& manager = ChangeStreamPreImagesCollectionManager::get(getServiceContext()); + auto newClient = getServiceContext()->makeClient(""); + AlternativeClientRegion acr(newClient); + manager.performExpiredChangeStreamPreImagesRemovalPass(&cc()); + return manager.getPurgingJobStats().toBSON(); + } + + void setExpirationTime(Seconds seconds) { + auto opCtx = operationContext(); + auto& optionsManager = ChangeStreamOptionsManager::get(opCtx); + auto options = optionsManager.getOptions(opCtx); + auto preAndPostOptions = options.getPreAndPostImages(); + preAndPostOptions.setExpireAfterSeconds(seconds.count()); + options.setPreAndPostImages(preAndPostOptions); + invariantStatusOK(optionsManager.setOptions(opCtx, options)); + } + + void setExpirationTime(const TenantId& tenantId, Seconds seconds) { + auto* clusterParameters = ServerParameterSet::getClusterParameterSet(); + auto* changeStreamsParam = + clusterParameters + ->get>( + "changeStreams"); + + auto oldSettings = changeStreamsParam->getValue(tenantId); + oldSettings.setExpireAfterSeconds(seconds.count()); + changeStreamsParam->setValue(oldSettings, tenantId).ignore(); + } + + RecordId generatePreImageRecordId(Timestamp timestamp) { + const UUID uuid{UUID::gen()}; + ChangeStreamPreImageId preImageId(uuid, timestamp, 0); + return change_stream_pre_image_util::toRecordId(preImageId); + } + + + RecordId generatePreImageRecordId(Date_t wallTime) { + const UUID uuid{UUID::gen()}; + Timestamp timestamp{wallTime}; + ChangeStreamPreImageId preImageId(uuid, timestamp, 0); + return change_stream_pre_image_util::toRecordId(preImageId); + } + + bool hasExcessMarkers(OperationContext* opCtx, PreImagesTruncateMarkersPerNsUUID& markers) { + return markers._hasExcessMarkers(opCtx); + } + + void setUp() override { + CatalogTestFixture::setUp(); + ChangeStreamOptionsManager::create(getServiceContext()); + + // Set up OpObserver so that the test will append actual oplog entries to the oplog using + // repl::logOp(). + auto opObserverRegistry = + dynamic_cast(getServiceContext()->getOpObserver()); + opObserverRegistry->addObserver( + std::make_unique(std::make_unique())); + + auto& manager = ChangeStreamPreImagesCollectionManager::get(getServiceContext()); + manager.createPreImagesCollection(operationContext(), boost::none); + + invariantStatusOK(storageInterface()->createCollection( + operationContext(), kPreImageEnabledCollection, CollectionOptions{})); + } + + // A 'boost::none' tenantId implies a single tenant environment. + boost::optional nullTenantId() { + return boost::none; + } +}; + +// When 'expireAfterSeconds' is off, defaults to comparing the 'lastRecord's Timestamp of oldest +// marker with the Timestamp of the ealiest oplog entry. +// +// When 'expireAfterSeconds' is on, defaults to comparing the 'lastRecord's wallTime with +// the current time - 'expireAfterSeconds', which is already tested as a part of the +// ChangeStreamPreImageExpirationPolicyTest. +TEST_F(PreImagesRemoverTest, hasExcessMarkersExpiredAfterSecondsOff) { + auto opCtx = operationContext(); + + // With no explicit 'expireAfterSeconds', excess markers are determined by whether the Timestamp + // of the 'lastRecord' in the oldest marker is greater than the Timestamp of the earliest oplog + // entry. + auto changeStreamOptions = populateChangeStreamPreImageOptions("off"); + setChangeStreamOptionsToManager(opCtx, *changeStreamOptions.get()); + + const auto currentEarliestOplogEntryTs = + repl::StorageInterface::get(opCtx->getServiceContext())->getEarliestOplogTimestamp(opCtx); + + // Ensure that the generated Timestamp associated with the lastRecord of the marker is less than + // the earliest oplog entry Timestamp. + auto ts = currentEarliestOplogEntryTs - 1; + ASSERT_GT(currentEarliestOplogEntryTs, ts); + auto wallTime = Date_t::fromMillisSinceEpoch(ts.asInt64()); + auto lastRecordId = generatePreImageRecordId(wallTime); + + auto numRecords = 1; + auto numBytes = 100; + std::deque initialMarkers{ + {numRecords, numBytes, lastRecordId, wallTime}}; + + PreImagesTruncateMarkersPerNsUUID markers(nullTenantId() /* tenantId */, + std::move(initialMarkers), + 0, + 0, + 100, + kArbitraryMarkerCreationMethod); + bool excessMarkers = hasExcessMarkers(opCtx, markers); + ASSERT_TRUE(excessMarkers); +} + +TEST_F(PreImagesRemoverTest, hasNoExcessMarkersExpiredAfterSecondsOff) { + auto opCtx = operationContext(); + + // With no explicit 'expireAfterSeconds', excess markers are determined by whether the Timestamp + // of the 'lastRecord' in the oldest marker is greater than the Timestamp of the earliest oplog + // entry. + auto changeStreamOptions = populateChangeStreamPreImageOptions("off"); + setChangeStreamOptionsToManager(opCtx, *changeStreamOptions.get()); + + const auto currentEarliestOplogEntryTs = + repl::StorageInterface::get(opCtx->getServiceContext())->getEarliestOplogTimestamp(opCtx); + + // Ensure that the generated Timestamp associated with the lastRecord of the marker is less than + // the earliest oplog entry Timestamp. + auto ts = currentEarliestOplogEntryTs + 1; + ASSERT_LT(currentEarliestOplogEntryTs, ts); + auto wallTime = Date_t::fromMillisSinceEpoch(ts.asInt64()); + auto lastRecordId = generatePreImageRecordId(wallTime); + + auto numRecords = 1; + auto numBytes = 100; + std::deque initialMarkers{ + {numRecords, numBytes, lastRecordId, wallTime}}; + + PreImagesTruncateMarkersPerNsUUID markers(nullTenantId() /* tenantId */, + std::move(initialMarkers), + 0, + 0, + 100, + kArbitraryMarkerCreationMethod); + bool excessMarkers = hasExcessMarkers(opCtx, markers); + ASSERT_FALSE(excessMarkers); +} + +TEST_F(PreImagesRemoverTest, serverlessHasNoExcessMarkers) { + Seconds expireAfter{1000}; + auto tenantId = change_stream_serverless_helpers::getTenantIdForTesting(); + setExpirationTime(tenantId, expireAfter); + + auto opCtx = operationContext(); + auto wallTime = opCtx->getServiceContext()->getFastClockSource()->now() + Minutes(120); + auto lastRecordId = generatePreImageRecordId(wallTime); + auto numRecords = 1; + auto numBytes = 100; + std::deque initialMarkers{ + {numRecords, numBytes, lastRecordId, wallTime}}; + + PreImagesTruncateMarkersPerNsUUID markers( + tenantId, std::move(initialMarkers), 0, 0, 100, kArbitraryMarkerCreationMethod); + bool excessMarkers = hasExcessMarkers(opCtx, markers); + ASSERT_FALSE(excessMarkers); +} + +TEST_F(PreImagesRemoverTest, serverlessHasExcessMarkers) { + Seconds expireAfter{1}; + auto tenantId = change_stream_serverless_helpers::getTenantIdForTesting(); + setExpirationTime(tenantId, expireAfter); + + auto opCtx = operationContext(); + auto wallTime = opCtx->getServiceContext()->getFastClockSource()->now() - Minutes(120); + auto lastRecordId = generatePreImageRecordId(wallTime); + auto numRecords = 1; + auto numBytes = 100; + std::deque initialMarkers{ + {numRecords, numBytes, lastRecordId, wallTime}}; + + PreImagesTruncateMarkersPerNsUUID markers( + tenantId, std::move(initialMarkers), 0, 0, 100, kArbitraryMarkerCreationMethod); + bool excessMarkers = hasExcessMarkers(opCtx, markers); + ASSERT_TRUE(excessMarkers); +} + +TEST_F(PreImagesRemoverTest, RecordIdToPreImageTimstampRetrieval) { + // Basic case. + { + Timestamp ts0(Date_t::now()); + int64_t applyOpsIndex = 0; + + ChangeStreamPreImageId preImageId(UUID::gen(), ts0, applyOpsIndex); + auto preImageRecordId = change_stream_pre_image_util::toRecordId(preImageId); + + auto ts1 = change_stream_pre_image_util::getPreImageTimestamp(preImageRecordId); + ASSERT_EQ(ts0, ts1); + } + + // Min Timestamp. + { + Timestamp ts0 = Timestamp::min(); + int64_t applyOpsIndex = 0; + + ChangeStreamPreImageId preImageId(UUID::gen(), ts0, applyOpsIndex); + auto preImageRecordId = change_stream_pre_image_util::toRecordId(preImageId); + + auto ts1 = change_stream_pre_image_util::getPreImageTimestamp(preImageRecordId); + ASSERT_EQ(ts0, ts1); + } + + // Max Timestamp + { + Timestamp ts0 = Timestamp::max(); + int64_t applyOpsIndex = 0; + + ChangeStreamPreImageId preImageId(UUID::gen(), ts0, applyOpsIndex); + auto preImageRecordId = change_stream_pre_image_util::toRecordId(preImageId); + + auto ts1 = change_stream_pre_image_util::getPreImageTimestamp(preImageRecordId); + ASSERT_EQ(ts0, ts1); + } + + // Extra large 'applyOpsIndex'. + // + // Parsing a RecordId with an underlying KeyString representation into BSON discards type bits. + // Since the 'applyOpsIndex' is the only field in 'ChangeStreamPreImageId' that requires type + // bits to generate the original value from KeyString, ensure different numeric values of + // 'applyOpsIndex' don't impact the Timestamp retrieval. + { + Timestamp ts0(Date_t::now()); + int64_t applyOpsIndex = std::numeric_limits::max(); + + ChangeStreamPreImageId preImageId(UUID::gen(), ts0, applyOpsIndex); + auto preImageRecordId = change_stream_pre_image_util::toRecordId(preImageId); + + auto ts1 = change_stream_pre_image_util::getPreImageTimestamp(preImageRecordId); + ASSERT_EQ(ts0, ts1); + } + + // Extra large 'applyOpsIndex' with Timestamp::max(). + { + Timestamp ts0 = Timestamp::max(); + int64_t applyOpsIndex = std::numeric_limits::max(); + + ChangeStreamPreImageId preImageId(UUID::gen(), ts0, applyOpsIndex); + auto preImageRecordId = change_stream_pre_image_util::toRecordId(preImageId); + + auto ts1 = change_stream_pre_image_util::getPreImageTimestamp(preImageRecordId); + ASSERT_EQ(ts0, ts1); + } +} + +// TODO SERVER-70591: Remove this test as the feature flag will be removed. +TEST_F(PreImagesRemoverTest, EnsureNoMoreInternalScansWithCollectionScans) { + RAIIServerParameterControllerForTest truncateFeatureFlag{ + "featureFlagUseUnreplicatedTruncatesForDeletions", false}; + + auto clock = clockSource(); + insertPreImage(kPreImageEnabledCollection, Timestamp{clock->now()}); + clock->advance(Milliseconds{1}); + insertPreImage(kPreImageEnabledCollection, Timestamp{clock->now()}); + + setExpirationTime(Seconds{1}); + // Verify that expiration works as expected. + auto passStats = performPass(Milliseconds{2000}); + ASSERT_EQ(passStats["totalPass"].numberLong(), 1); + ASSERT_EQ(passStats["docsDeleted"].numberLong(), 2); + ASSERT_EQ(passStats["scannedInternalCollections"].numberLong(), 1); + + // Assert that internal scans do not occur in the old collection scan approach. + passStats = performPass(Milliseconds{2000}); + ASSERT_EQ(passStats["totalPass"].numberLong(), 2); + ASSERT_EQ(passStats["docsDeleted"].numberLong(), 2); + ASSERT_EQ(passStats["scannedInternalCollections"].numberLong(), 1); +} + +TEST_F(PreImagesRemoverTest, EnsureNoMoreInternalScansWithTruncates) { + RAIIServerParameterControllerForTest minBytesPerMarker{ + "preImagesCollectionTruncateMarkersMinBytes", 1}; + RAIIServerParameterControllerForTest truncateFeatureFlag{ + "featureFlagUseUnreplicatedTruncatesForDeletions", true}; + + auto clock = clockSource(); + insertPreImage(kPreImageEnabledCollection, Timestamp{clock->now()}); + clock->advance(Milliseconds{1}); + insertPreImage(kPreImageEnabledCollection, Timestamp{clock->now()}); + + setExpirationTime(Seconds{1}); + // Verify that expiration works as expected. + auto passStats = performPass(Milliseconds{2000}); + ASSERT_EQ(passStats["totalPass"].numberLong(), 1); + ASSERT_EQ(passStats["docsDeleted"].numberLong(), 2); + ASSERT_EQ(passStats["scannedInternalCollections"].numberLong(), 1); + + // Assert that internal scans still occur while the collection exists. + passStats = performPass(Milliseconds{2000}); + ASSERT_EQ(passStats["totalPass"].numberLong(), 2); + ASSERT_EQ(passStats["docsDeleted"].numberLong(), 2); + ASSERT_EQ(passStats["scannedInternalCollections"].numberLong(), 2); + + // Assert that internal scans don't occur if the collection is dropped and no more documents + // exist. + invariantStatusOK( + storageInterface()->dropCollection(operationContext(), kPreImageEnabledCollection)); + passStats = performPass(Milliseconds{2000}); + ASSERT_EQ(passStats["totalPass"].numberLong(), 3); + ASSERT_EQ(passStats["docsDeleted"].numberLong(), 2); + // One more scan occurs after the drop verifying there's no more data and it is safe to ignore + // in the future. + ASSERT_EQ(passStats["scannedInternalCollections"].numberLong(), 3); + + passStats = performPass(Milliseconds{2000}); + ASSERT_EQ(passStats["totalPass"].numberLong(), 4); + ASSERT_EQ(passStats["docsDeleted"].numberLong(), 2); + ASSERT_EQ(passStats["scannedInternalCollections"].numberLong(), 3); +} + +TEST_F(PreImagesRemoverTest, EnsureAllDocsEventualyTruncatedFromPrePopulatedCollection) { + RAIIServerParameterControllerForTest truncateFeatureFlag{ + "featureFlagUseUnreplicatedTruncatesForDeletions", true}; + + auto clock = clockSource(); + auto startOperationTime = clock->now(); + auto numRecords = 1000; + prePopulatePreImagesCollection( + nullTenantId(), kPreImageEnabledCollection, numRecords, startOperationTime); + + // Advance the clock to align with the most recent pre-image inserted. + clock->advance(Milliseconds{numRecords}); + + // Move the clock further ahead to simulate startup with a collection of expired pre-images. + clock->advance(Seconds{10}); + + setExpirationTime(Seconds{1}); + + auto passStats = performPass(Milliseconds{0}); + ASSERT_EQ(passStats["totalPass"].numberLong(), 1); + ASSERT_EQ(passStats["docsDeleted"].numberLong(), numRecords); + ASSERT_EQ(passStats["scannedInternalCollections"].numberLong(), 1); +} + +TEST_F(PreImagesRemoverTest, RemoverPassWithTruncateOnEmptyCollection) { + RAIIServerParameterControllerForTest truncateFeatureFlag{ + "featureFlagUseUnreplicatedTruncatesForDeletions", true}; + + setExpirationTime(Seconds{1}); + + auto passStats = performPass(Milliseconds{0}); + ASSERT_EQ(passStats["totalPass"].numberLong(), 1); + ASSERT_EQ(passStats["docsDeleted"].numberLong(), 0); + ASSERT_EQ(passStats["scannedInternalCollections"].numberLong(), 0); +} + +} // namespace mongo diff --git a/src/mongo/db/change_stream_pre_images_truncate_manager.cpp b/src/mongo/db/change_stream_pre_images_truncate_manager.cpp new file mode 100644 index 0000000000000..8afebdede68f8 --- /dev/null +++ b/src/mongo/db/change_stream_pre_images_truncate_manager.cpp @@ -0,0 +1,703 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/change_stream_pre_images_truncate_manager.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/change_stream_pre_image_util.h" +#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/collection_truncate_markers.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/timer.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault + +namespace mongo { +namespace { +struct InitialSamplingEstimates { + int64_t numRecords; + int64_t dataSize; + int64_t estimatedRecordsPerMarker; + int64_t estimatedBytesPerMarker; + int64_t minBytesPerMarker; +}; + +// Container for samples of pre-images keyed by their 'nsUUID'. +using NsUUIDToSamplesMap = stdx:: + unordered_map, UUID::Hash>; + +int64_t countTotalSamples(const NsUUIDToSamplesMap& samplesMap) { + int64_t totalSamples{0}; + for (const auto& [_, nsUUIDSamples] : samplesMap) { + totalSamples = totalSamples + nsUUIDSamples.size(); + } + return totalSamples; +} + +void appendSample(const BSONObj& preImageObj, const RecordId& rId, NsUUIDToSamplesMap& samplesMap) { + auto uuid = change_stream_pre_image_util::getPreImageNsUUID(preImageObj); + if (auto it = samplesMap.find(uuid); it != samplesMap.end()) { + it->second.push_back(CollectionTruncateMarkers::RecordIdAndWallTime{ + rId, PreImagesTruncateMarkersPerNsUUID::getWallTime(preImageObj)}); + } else { + // It's possible concurrent inserts have occurred since the initial point sampling + // to establish the number of NsUUIDs. + samplesMap[uuid] = {CollectionTruncateMarkers::RecordIdAndWallTime{ + rId, PreImagesTruncateMarkersPerNsUUID::getWallTime(preImageObj)}}; + } +} + +// Iterates over each 'nsUUID' captured by the pre-images in 'rs', and populates the 'samplesMap' to +// include the 'RecordIdAndWallTime' for the most recent pre-image inserted for each 'nsUUID'. +void sampleLastRecordPerNsUUID(OperationContext* opCtx, + RecordStore* rs, + NsUUIDToSamplesMap& samplesMap) { + auto cursor = rs->getCursor(opCtx, true /** forward **/); + boost::optional record{}; + while ((record = cursor->next())) { + UUID currentNsUUID = change_stream_pre_image_util::getPreImageNsUUID(record->data.toBson()); + RecordId maxRecordIdForCurrentNsUUID = + change_stream_pre_image_util::getAbsoluteMaxPreImageRecordIdBoundForNs(currentNsUUID) + .recordId(); + + // A forward 'seekNear' will return the previous entry if one does not match exactly. This + // should ensure that the 'record's id is greater than the 'maxRecordIdForCurrentNsUUID' and + // no less than the initial record for 'currentNsUUID'. + record = cursor->seekNear(maxRecordIdForCurrentNsUUID); + invariant(record); + invariant(currentNsUUID == + change_stream_pre_image_util::getPreImageNsUUID(record->data.toBson())); + appendSample(record->data.toBson(), record->id, samplesMap); + } +} + +int64_t getBytesAccountedFor( + const CollectionTruncateMarkers::InitialSetOfMarkers& initialSetOfMarkers) { + int64_t totalBytes{0}; + for (const auto& marker : initialSetOfMarkers.markers) { + totalBytes = totalBytes + marker.bytes; + } + totalBytes = totalBytes + initialSetOfMarkers.leftoverRecordsBytes; + return totalBytes; +} + +int64_t getRecordsAccountedFor( + const CollectionTruncateMarkers::InitialSetOfMarkers& initialSetOfMarkers) { + int64_t totalRecords{0}; + for (const auto& marker : initialSetOfMarkers.markers) { + totalRecords = totalRecords + marker.records; + } + totalRecords = totalRecords + initialSetOfMarkers.leftoverRecordsCount; + return totalRecords; +} + +// Returns a map of NsUUID to corresponding samples from the 'preImagesCollectionPtr'. +// +// Guarantees: +// (1) The result will contain at least 1 sample per 'nsUUID' in the pre-images collection. +// (2) For each 'nsUUID', the samples will be ordered as they appear in the underlying pre-images +// collection. +NsUUIDToSamplesMap gatherOrderedSamplesAcrossNsUUIDs( + OperationContext* opCtx, const CollectionAcquisition& preImagesCollection, int64_t numSamples) { + // First, try to obtain 1 sample per 'nsUUID'. + NsUUIDToSamplesMap samplesMap; + sampleLastRecordPerNsUUID( + opCtx, preImagesCollection.getCollectionPtr()->getRecordStore(), samplesMap); + auto numLastRecords = countTotalSamples(samplesMap); + + Timer lastProgressTimer; + + auto samplingLogIntervalSeconds = gCollectionSamplingLogIntervalSeconds.load(); + auto numSamplesRemaining = numSamples - numLastRecords; + auto exec = InternalPlanner::sampleCollection( + opCtx, preImagesCollection, PlanYieldPolicy::YieldPolicy::YIELD_AUTO); + + BSONObj doc; + RecordId rId; + for (int i = 0; i < numSamplesRemaining; i++) { + if (exec->getNext(&doc, &rId) == PlanExecutor::IS_EOF) { + // This really shouldn't happen unless the collection is empty and the size storer was + // really off on its collection size estimate. + break; + } + appendSample(doc, rId, samplesMap); + if (samplingLogIntervalSeconds > 0 && + lastProgressTimer.elapsed() >= Seconds(samplingLogIntervalSeconds)) { + LOGV2(7658600, + "Pre-images collection random sampling progress", + "namespace"_attr = preImagesCollection.nss(), + "completed"_attr = (i + 1), + "totalRandomSamples"_attr = numSamplesRemaining, + "totalSamples"_attr = numSamples); + lastProgressTimer.reset(); + } + } + + for (auto& [_, samples] : samplesMap) { + std::sort( + samples.begin(), + samples.end(), + [](const CollectionTruncateMarkers::RecordIdAndWallTime& a, + const CollectionTruncateMarkers::RecordIdAndWallTime& b) { return a.id < b.id; }); + } + + return samplesMap; +} + +// Each 'PreImagesTruncateMarkersPerNsUUID' accounts for a set of "whole truncate markers" as well +// as the leftover bytes and records not yet captured in a "whole" truncate marker (aka a partial +// marker). +// +// The 'initialEstimates' specifies the estimated number of samples needed to generate a whole +// marker. +// +// Given a set of samples for each 'nsUUID', returns a map with 'PreImagesTruncateMarkersPerNsUUID' +// for each 'nsUUID'. The created 'PreImagesTruncateMarkersPerNsUUID's will only generate whole +// markers. All partial markers will be empty in the result. +PreImagesTruncateManager::TenantTruncateMarkers createWholeMarkersFromSamples( + OperationContext* opCtx, + boost::optional tenantId, + const NsUUIDToSamplesMap& samplesMap, + const InitialSamplingEstimates& initialEstimates, + int64_t& wholeMarkersCreatedOutput) { + PreImagesTruncateManager::TenantTruncateMarkers truncateMarkersMap; + for (const auto& [nsUUID, samples] : samplesMap) { + auto initialWholeMarkers = + PreImagesTruncateMarkersPerNsUUID::createInitialMarkersFromSamples( + opCtx, + nsUUID, + samples, + initialEstimates.estimatedRecordsPerMarker, + initialEstimates.estimatedBytesPerMarker); + wholeMarkersCreatedOutput = wholeMarkersCreatedOutput + initialWholeMarkers.markers.size(); + + auto truncateMarkersForNsUUID = std::make_shared( + tenantId, + std::move(initialWholeMarkers.markers), + 0, + 0, + initialEstimates.minBytesPerMarker, + CollectionTruncateMarkers::MarkersCreationMethod::Sampling); + truncateMarkersMap.emplace(nsUUID, std::move(truncateMarkersForNsUUID)); + } + return truncateMarkersMap; +} + +void distributeUnaccountedBytesAndRecords( + OperationContext* opCtx, + RecordStore* rs, + int64_t recordsAccountedForByMarkers, + int64_t bytesAccountedForByMarkers, + PreImagesTruncateManager::TenantTruncateMarkers& tenantTruncateMarkers) { + // Use a new snapshot of the pre-images collection's numRecords and dataSize to account for + // any concurrent inserts unaccounted for during the initial creation of the + // 'tenantTruncateMarkers'. + auto updatedNumRecords = rs->numRecords(opCtx); + auto updatedDataSize = rs->dataSize(opCtx); + auto totalLeftoverRecords = updatedNumRecords - recordsAccountedForByMarkers; + auto totalLeftoverBytes = updatedDataSize - bytesAccountedForByMarkers; + + if (totalLeftoverRecords < 0 || totalLeftoverBytes < 0) { + // The 'updatedNumRecords' and 'updatedDataSize' are both retrieved by the SizeStorer, which + // can be incorrect after startup. If the records/ bytes accounted for were retrieved via + // scanning, its completely possible they are more accurate than the metrics reported. If + // they were retrieved from sampling, this scenario should be investigated further. + // + // Early exit if there are no more bytes / records to distribute across partial markers. + LOGV2_INFO(7658603, + "Pre-images inital truncate markers account for more bytes and/or records than " + "reported by the size storer", + "initialMarkersRecordsAccountedFor"_attr = recordsAccountedForByMarkers, + "initialMarkersBytesAccountedFor"_attr = bytesAccountedForByMarkers, + "reportedNumRecords"_attr = updatedNumRecords, + "reportedDataSize"_attr = updatedDataSize); + return; + } + + auto numNsUUIDs = tenantTruncateMarkers.size(); + if (totalLeftoverRecords == 0 || totalLeftoverBytes == 0 || numNsUUIDs == 0) { + return; + } + + auto leftoverRecordsPerNsUUID = totalLeftoverRecords / numNsUUIDs; + auto leftoverBytesPerNsUUID = totalLeftoverBytes / numNsUUIDs; + + for (auto& [nsUUID, preImagesTruncateMarkersPerNsUUID] : tenantTruncateMarkers) { + preImagesTruncateMarkersPerNsUUID->updatePartialMarkerForInitialisation( + opCtx, leftoverBytesPerNsUUID, RecordId{}, Date_t{}, leftoverRecordsPerNsUUID); + } + + // Arbitrarily append the remaining records and bytes to one of the marker sets. + int64_t remainderRecords = totalLeftoverRecords % numNsUUIDs; + int64_t remainderBytes = totalLeftoverBytes % numNsUUIDs; + tenantTruncateMarkers.begin()->second->updatePartialMarkerForInitialisation( + opCtx, remainderBytes, RecordId{}, Date_t{}, remainderRecords); +} + +void distributeUnaccountedBytesAndRecords( + OperationContext* opCtx, + RecordStore* rs, + const InitialSamplingEstimates& initialSamplingEstimates, + int64_t numWholeMarkersCreated, + PreImagesTruncateManager::TenantTruncateMarkers& tenantTruncateMarkers) { + auto recordsAccountedForByWholeMarkers = + numWholeMarkersCreated * initialSamplingEstimates.estimatedRecordsPerMarker; + auto bytesAccountedForByWholeMarkers = + numWholeMarkersCreated * initialSamplingEstimates.estimatedBytesPerMarker; + + distributeUnaccountedBytesAndRecords(opCtx, + rs, + recordsAccountedForByWholeMarkers, + bytesAccountedForByWholeMarkers, + tenantTruncateMarkers); +} + +PreImagesTruncateManager::TenantTruncateMarkers getInitialTruncateMarkersForTenantScanning( + OperationContext* opCtx, + boost::optional tenantId, + const CollectionAcquisition& preImagesCollection) { + auto rs = preImagesCollection.getCollectionPtr()->getRecordStore(); + + PreImagesTruncateManager::TenantTruncateMarkers truncateMap; + auto minBytesPerMarker = gPreImagesCollectionTruncateMarkersMinBytes; + + // Number of bytes and records accounted for by truncate markers. + int64_t numBytesAcrossMarkers{0}; + int64_t numRecordsAcrossMarkers{0}; + + boost::optional currentCollectionUUID = boost::none; + + // Step 1: perform a forward scan of the collection. This could take a while for larger + // collections. + Date_t firstWallTime{}; + while ((currentCollectionUUID = change_stream_pre_image_util::findNextCollectionUUID( + opCtx, + &preImagesCollection.getCollectionPtr(), + currentCollectionUUID, + firstWallTime))) { + auto initialSetOfMarkers = PreImagesTruncateMarkersPerNsUUID::createInitialMarkersScanning( + opCtx, preImagesCollection, currentCollectionUUID.get(), minBytesPerMarker); + + numBytesAcrossMarkers = numBytesAcrossMarkers + getBytesAccountedFor(initialSetOfMarkers); + numRecordsAcrossMarkers = + numRecordsAcrossMarkers + getRecordsAccountedFor(initialSetOfMarkers); + + auto truncateMarkers = std::make_shared( + tenantId, + std::move(initialSetOfMarkers.markers), + initialSetOfMarkers.leftoverRecordsCount, + initialSetOfMarkers.leftoverRecordsBytes, + minBytesPerMarker, + CollectionTruncateMarkers::MarkersCreationMethod::Scanning); + truncateMap.emplace(currentCollectionUUID.get(), truncateMarkers); + } + + // Step 2: See if there are records unaccounted for in the initial markers. This can happen if + // there are concurrent inserts into a given 'nsUUID' after the segment was scanned. + distributeUnaccountedBytesAndRecords( + opCtx, rs, numRecordsAcrossMarkers, numBytesAcrossMarkers, truncateMap); + + return truncateMap; +} + +PreImagesTruncateManager::TenantTruncateMarkers getInitialTruncateMarkersForTenantSampling( + OperationContext* opCtx, + boost::optional tenantId, + const CollectionAcquisition& preImagesCollection, + InitialSamplingEstimates&& initialEstimates) { + + uint64_t numSamples = + (CollectionTruncateMarkers::kRandomSamplesPerMarker * initialEstimates.numRecords) / + initialEstimates.estimatedRecordsPerMarker; + + /////////////////////////////////////////////////////////////// + // + // PHASE 1: Gather ordered sample points across the 'nsUUIDs' captured in the pre-images + // collection. + // + // + // {nsUUID: } + // + /////////////////////////////////////////////////////////////// + auto orderedSamples = gatherOrderedSamplesAcrossNsUUIDs(opCtx, preImagesCollection, numSamples); + auto totalSamples = countTotalSamples(orderedSamples); + if (totalSamples != (int64_t)numSamples) { + // Given the distribution of pre-images to 'nsUUID', the number of samples collected cannot + // effectively represent the pre-images collection. Default to scanning instead. + LOGV2(7658601, + "Reverting to scanning for initial pre-images truncate markers. The number of " + "samples collected does not match the desired number of samples", + "samplesTaken"_attr = totalSamples, + "samplesDesired"_attr = numSamples); + return getInitialTruncateMarkersForTenantScanning(opCtx, tenantId, preImagesCollection); + } + + //////////////////////////////////////////////////////////////// + // + // Phase 2: Create the whole truncate markers from the samples generated according to the + // 'initialEstimates'. + // + //////////////////////////////////////////////////////////////// + + int64_t wholeMarkersCreated{0}; + auto tenantTruncateMarkers = createWholeMarkersFromSamples( + opCtx, tenantId, orderedSamples, initialEstimates, wholeMarkersCreated); + + //////////////////////////////////////////////////////////////// + // + // Phase 3: Update 'tenantTruncateMarkers' partial markers with the remaining bytes and records + // not accounted for in the 'wholeMarkersCreated' and distribute them across the 'nsUUID's. + // + //////////////////////////////////////////////////////////////// + auto rs = preImagesCollection.getCollectionPtr()->getRecordStore(); + distributeUnaccountedBytesAndRecords( + opCtx, rs, initialEstimates, wholeMarkersCreated, tenantTruncateMarkers); + + return tenantTruncateMarkers; +} + +// Guarantee: Individual truncate markers and metrics for each 'nsUUID' may not be accurate, but +// cumulatively, the total 'dataSize' and 'numRecords' captured by the set of +// 'TenantTruncateMarkers' should reflect the actual 'dataSize' and 'numRecords' reported by the +// SizeStorer. +PreImagesTruncateManager::TenantTruncateMarkers getInitialTruncateMarkersForTenant( + OperationContext* opCtx, + boost::optional tenantId, + const CollectionAcquisition& preImageCollection) { + auto minBytesPerMarker = gPreImagesCollectionTruncateMarkersMinBytes; + auto rs = preImageCollection.getCollectionPtr()->getRecordStore(); + long long numRecords = rs->numRecords(opCtx); + long long dataSize = rs->dataSize(opCtx); + + // The creationMethod returned is the initial creationMethod to try. However, there is no + // guarantee at this point initialisation won't default to another creation method later in the + // initalisation process. + auto creationMethod = CollectionTruncateMarkers::computeInitialCreationMethod( + numRecords, dataSize, minBytesPerMarker); + LOGV2_INFO(7658604, + "Decided on initial creation method for pre-images truncate markers initialization", + "creationMethod"_attr = CollectionTruncateMarkers::toString(creationMethod), + "datatSize"_attr = dataSize, + "numRecords"_attr = numRecords, + "ns"_attr = preImageCollection.nss()); + + switch (creationMethod) { + case CollectionTruncateMarkers::MarkersCreationMethod::EmptyCollection: + // Default to scanning since 'dataSize' and 'numRecords' could be incorrect. + case CollectionTruncateMarkers::MarkersCreationMethod::Scanning: + return getInitialTruncateMarkersForTenantScanning(opCtx, tenantId, preImageCollection); + case CollectionTruncateMarkers::MarkersCreationMethod::Sampling: { + // Use the collection's average record size to estimate the number of records in + // each marker, and thus estimate the combined size of the records. + double avgRecordSize = double(dataSize) / double(numRecords); + double estimatedRecordsPerMarker = std::ceil(minBytesPerMarker / avgRecordSize); + double estimatedBytesPerMarker = estimatedRecordsPerMarker * avgRecordSize; + + return getInitialTruncateMarkersForTenantSampling( + opCtx, + tenantId, + preImageCollection, + InitialSamplingEstimates{numRecords, + dataSize, + (int64_t)estimatedRecordsPerMarker, + (int64_t)estimatedBytesPerMarker, + minBytesPerMarker}); + } + default: + MONGO_UNREACHABLE; + } +} + +void truncateRange(OperationContext* opCtx, + const CollectionPtr& preImagesColl, + const RecordId& minRecordId, + const RecordId& maxRecordId, + int64_t bytesDeleted, + int64_t docsDeleted) { + // The session might be in use from marker initialisation so we must + // reset it here in order to allow an untimestamped write. + opCtx->recoveryUnit()->abandonSnapshot(); + opCtx->recoveryUnit()->allowOneUntimestampedWrite(); + + WriteUnitOfWork wuow(opCtx); + auto rs = preImagesColl->getRecordStore(); + auto status = rs->rangeTruncate(opCtx, minRecordId, maxRecordId, -bytesDeleted, -docsDeleted); + invariantStatusOK(status); + wuow.commit(); +} + +// Performs a ranged truncate over each expired marker in 'truncateMarkersForNss'. Updates the +// "Output" parameters to communicate the respective docs deleted, bytes deleted, and and maximum +// wall time of documents deleted to the caller. +void truncateExpiredMarkersForNsUUID( + OperationContext* opCtx, + std::shared_ptr truncateMarkersForNsUUID, + const CollectionPtr& preImagesColl, + const UUID& nsUUID, + const RecordId& minRecordIdForNs, + int64_t& totalDocsDeletedOutput, + int64_t& totalBytesDeletedOutput, + Date_t& maxWallTimeForNsTruncateOutput) { + while (auto marker = truncateMarkersForNsUUID->peekOldestMarkerIfNeeded(opCtx)) { + writeConflictRetry(opCtx, "truncate pre-images collection", preImagesColl->ns(), [&] { + auto bytesDeleted = marker->bytes; + auto docsDeleted = marker->records; + + truncateRange(opCtx, + preImagesColl, + minRecordIdForNs, + marker->lastRecord, + bytesDeleted, + docsDeleted); + + if (marker->wallTime > maxWallTimeForNsTruncateOutput) { + maxWallTimeForNsTruncateOutput = marker->wallTime; + } + + truncateMarkersForNsUUID->popOldestMarker(); + + totalDocsDeletedOutput += docsDeleted; + totalBytesDeletedOutput += bytesDeleted; + }); + } +} +} // namespace + +void PreImagesTruncateManager::ensureMarkersInitialized( + OperationContext* opCtx, + boost::optional tenantId, + const CollectionAcquisition& preImagesColl) { + + auto tenantTruncateMarkers = _tenantMap.find(tenantId); + if (!tenantTruncateMarkers) { + _registerAndInitialiseMarkersForTenant(opCtx, tenantId, preImagesColl); + } +} + +PreImagesTruncateManager::TruncateStats PreImagesTruncateManager::truncateExpiredPreImages( + OperationContext* opCtx, + boost::optional tenantId, + const CollectionPtr& preImagesColl) { + TruncateStats stats; + auto tenantTruncateMarkers = _tenantMap.find(tenantId); + if (!tenantTruncateMarkers) { + return stats; + } + + auto snapShottedTruncateMarkers = tenantTruncateMarkers->getUnderlyingSnapshot(); + for (auto& [nsUUID, truncateMarkersForNsUUID] : *snapShottedTruncateMarkers) { + RecordId minRecordId = + change_stream_pre_image_util::getAbsoluteMinPreImageRecordIdBoundForNs(nsUUID) + .recordId(); + + int64_t docsDeletedForNs = 0; + int64_t bytesDeletedForNs = 0; + Date_t maxWallTimeForNsTruncate{}; + truncateExpiredMarkersForNsUUID(opCtx, + truncateMarkersForNsUUID, + preImagesColl, + nsUUID, + minRecordId, + docsDeletedForNs, + bytesDeletedForNs, + maxWallTimeForNsTruncate); + + // Best effort for removing all expired pre-images from 'nsUUID'. If there is a partial + // marker which can be made into an expired marker, try to remove the new marker as well. + truncateMarkersForNsUUID->createPartialMarkerIfNecessary(opCtx); + truncateExpiredMarkersForNsUUID(opCtx, + truncateMarkersForNsUUID, + preImagesColl, + nsUUID, + minRecordId, + docsDeletedForNs, + bytesDeletedForNs, + maxWallTimeForNsTruncate); + + if (maxWallTimeForNsTruncate > stats.maxStartWallTime) { + stats.maxStartWallTime = maxWallTimeForNsTruncate; + } + stats.docsDeleted = stats.docsDeleted + docsDeletedForNs; + stats.bytesDeleted = stats.bytesDeleted + bytesDeletedForNs; + stats.scannedInternalCollections++; + + // If the source collection doesn't exist and there's no more data to erase we can safely + // remove the markers. Perform a final truncate to remove all elements just in case. + if (CollectionCatalog::get(opCtx)->lookupCollectionByUUID(opCtx, nsUUID) == nullptr && + truncateMarkersForNsUUID->isEmpty()) { + + RecordId maxRecordId = + change_stream_pre_image_util::getAbsoluteMaxPreImageRecordIdBoundForNs(nsUUID) + .recordId(); + + writeConflictRetry(opCtx, "final truncate", preImagesColl->ns(), [&] { + truncateRange(opCtx, preImagesColl, minRecordId, maxRecordId, 0, 0); + }); + + tenantTruncateMarkers->erase(nsUUID); + } + } + + return stats; +} + +void PreImagesTruncateManager::dropAllMarkersForTenant(boost::optional tenantId) { + _tenantMap.erase(tenantId); +} + +void PreImagesTruncateManager::updateMarkersOnInsert(OperationContext* opCtx, + boost::optional tenantId, + const ChangeStreamPreImage& preImage, + int64_t bytesInserted) { + dassert(bytesInserted != 0); + auto tenantTruncateMarkers = _tenantMap.find(tenantId); + if (!tenantTruncateMarkers) { + return; + } + + auto nsUuid = preImage.getId().getNsUUID(); + auto truncateMarkersForNsUUID = tenantTruncateMarkers->find(nsUuid); + + if (!truncateMarkersForNsUUID) { + // There are 2 possible scenarios here: + // (1) The 'tenantTruncateMarkers' was created, but isn't done with + // initialisation. In this case, the truncate markers created for 'nsUUID' may or may not + // be overwritten in the initialisation process. This is okay, lazy initialisation is + // performed by the remover thread to avoid blocking writes on startup and is strictly best + // effort. + // + // (2) Pre-images were either recently enabled on 'nsUUID' or 'nsUUID' was just created. + // Either way, the first pre-images enabled insert to call 'getOrEmplace()' creates the + // truncate markers for the 'nsUUID'. Any following calls to 'getOrEmplace()' return a + // pointer to the existing truncate markers. + truncateMarkersForNsUUID = tenantTruncateMarkers->getOrEmplace( + nsUuid, + tenantId, + std::deque{}, + 0, + 0, + gPreImagesCollectionTruncateMarkersMinBytes, + CollectionTruncateMarkers::MarkersCreationMethod::EmptyCollection); + } + + auto wallTime = preImage.getOperationTime(); + auto recordId = change_stream_pre_image_util::toRecordId(preImage.getId()); + truncateMarkersForNsUUID->updateCurrentMarkerAfterInsertOnCommit( + opCtx, bytesInserted, recordId, wallTime, 1); +} + +void PreImagesTruncateManager::_registerAndInitialiseMarkersForTenant( + OperationContext* opCtx, + boost::optional tenantId, + const CollectionAcquisition& preImagesCollection) { + // First register the 'tenantId' in the '_tenantMap' without any truncate markers. This allows + // for concurrent inserts to be temporarily create their own truncate markers while + // initialisation proceeds. + auto tenantMapEntry = _tenantMap.getOrEmplace(tenantId); + auto initialisedTenantTruncateMarkers = + getInitialTruncateMarkersForTenant(opCtx, tenantId, preImagesCollection); + + tenantMapEntry->updateWith( + [&](const PreImagesTruncateManager::TenantTruncateMarkers& tenantMapEntryPlaceHolder) { + // Critical section where no other threads can modify the 'tenantMapEntry'. + + // If the 'tenantMapEntryPlaceHolder' contains markers for an 'nsUUID' not + // captured in the 'initialisedTenantTruncateMarkers', it is safe to append them to the + // resulting map entry. Otherwise, they will be overwritten since initalisation is best + // effort and there are no guarantees the data tracked is completely correct, only that + // it will eventually be correct once the inital truncate markers created are truncated. + for (const auto& [nsUUID, nsTruncateMarkers] : tenantMapEntryPlaceHolder) { + if (initialisedTenantTruncateMarkers.find(nsUUID) == + initialisedTenantTruncateMarkers.end()) { + initialisedTenantTruncateMarkers.emplace(nsUUID, nsTruncateMarkers); + } + } + return initialisedTenantTruncateMarkers; + }); + + // Partial marker expiration relies on the highest recordId and wallTime seen for each 'nsUUID' + // to be greater than or equal to those of the latest pre-image inserted for the 'nsUUID'. Since + // concurrent inserts during initialisation are not guaranteed to be captured by the truncate + // markers now placed in the '_tenantMap', perform a forward pass to ensure each 'nsUUID' has an + // accurate highest recordId and wallTime. + // + // This step is also necessary for markers generated through samples, which are produced with + // default highest recordId and wallTimes up to this point. + auto rs = preImagesCollection.getCollectionPtr()->getRecordStore(); + NsUUIDToSamplesMap highestRecordIdAndWallTimeSamples; + sampleLastRecordPerNsUUID(opCtx, rs, highestRecordIdAndWallTimeSamples); + auto snapShottedTruncateMarkers = tenantMapEntry->getUnderlyingSnapshot(); + for (auto& [nsUUID, truncateMarkersForNsUUID] : *snapShottedTruncateMarkers) { + // At this point, truncation could not possible occur yet, so the lastRecordIdAndWallTimes + // is expected to always contain an entry for the 'nsUUID'. + auto nsUUIDHighestRidAndWallTime = highestRecordIdAndWallTimeSamples.find(nsUUID); + invariant(nsUUIDHighestRidAndWallTime != highestRecordIdAndWallTimeSamples.end()); + + auto [highestRid, highestWallTime] = nsUUIDHighestRidAndWallTime->second[0]; + truncateMarkersForNsUUID->updatePartialMarkerForInitialisation( + opCtx, 0, highestRid, highestWallTime, 0); + } +} + +} // namespace mongo diff --git a/src/mongo/db/change_stream_pre_images_truncate_manager.h b/src/mongo/db/change_stream_pre_images_truncate_manager.h new file mode 100644 index 0000000000000..eb2ac7bb4f391 --- /dev/null +++ b/src/mongo/db/change_stream_pre_images_truncate_manager.h @@ -0,0 +1,138 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include + +#include "mongo/db/catalog/collection.h" +#include "mongo/db/change_stream_pre_images_truncate_markers_per_nsUUID.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/change_stream_preimage_gen.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/collection_truncate_markers.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/concurrent_shared_values_map.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" + +/** + * There is up to one 'config.system.preimages' collection per tenant. This pre-images + * collection contains pre-images for every collection 'nsUUID' with pre-images enabled on the + * tenant. The pre-images collection is ordered by collection 'nsUUID', so that pre-images belonging + * to a given collection are grouped together. Additionally, pre-images for a given collection + * 'nsUUID' are stored in timestamp order, which makes range truncation possible. + * + * Implementation of truncate markers for pre-images associated with a single collection 'nsUUID' + * within a pre-images collection. + */ +namespace mongo { +/** + * Manages the truncation of expired pre-images for pre-images collection(s). There is up to one + * "system.config.preimages" pre-images collection per tenant. + * + * In a single-tenant environment, there is only one "system.config.preimages" pre-images + * collection. In which case, the corresponding truncate markers are mapped to TenantId + * 'boost::none'. + * + * Responsible for constructing and managing truncate markers across tenants - and for each tenant, + * across all 'nsUUID's with pre-images enabled on the tenant. + */ +class PreImagesTruncateManager { +public: + /** + * Statistcs for a truncate pass over a given tenant's pre-images collection. + */ + struct TruncateStats { + int64_t bytesDeleted{0}; + int64_t docsDeleted{0}; + + // The number of 'nsUUID's scanned in the truncate pass. + int64_t scannedInternalCollections{0}; + + // The maximum wall time from the pre-images truncated across the collection. + Date_t maxStartWallTime{}; + }; + + /** + * If truncate markers do not exist for 'tenantId', create the initial set of markers by + * sampling or scanning records in the collection. Otherwise, this is a no-op. + */ + void ensureMarkersInitialized(OperationContext* opCtx, + boost::optional tenantId, + const CollectionAcquisition& preImagesColl); + + /* + * Truncates expired pre-images spanning the 'preImagesColl' associated with the 'tenantId'. + * Performs in-memory cleanup of the tenant's truncate markers whenever an underlying 'nsUUID' + * associated with pre-images is dropped. + */ + TruncateStats truncateExpiredPreImages(OperationContext* opCtx, + boost::optional tenantId, + const CollectionPtr& preImagesColl); + + /** + * Exclusively used when the config.preimages collection associated with 'tenantId' is dropped. + * All markers will be dropped immediately. + */ + void dropAllMarkersForTenant(boost::optional tenantId); + + /** + * Updates truncate markers to account for a newly inserted 'preImage' into the tenant's + * pre-images collection. If no truncate markers have been created for the 'tenantId's + * pre-images collection, this is a no-op. + */ + void updateMarkersOnInsert(OperationContext* opCtx, + boost::optional tenantId, + const ChangeStreamPreImage& preImage, + int64_t bytesInserted); + + using TenantTruncateMarkers = + absl::flat_hash_map, UUID::Hash>; + +private: + friend class PreImagesTruncateManagerTest; + + void _registerAndInitialiseMarkersForTenant(OperationContext* opCtx, + boost::optional tenantId, + const CollectionAcquisition& preImagesCollection); + /** + * Similar to the 'TenantTruncateMarkers' type, but with an added wrapper which enables copy on + * write semantics. + */ + using TenantTruncateMarkersCopyOnWrite = + ConcurrentSharedValuesMap; + using TenantMap = + ConcurrentSharedValuesMap, TenantTruncateMarkersCopyOnWrite>; + TenantMap _tenantMap; +}; +} // namespace mongo diff --git a/src/mongo/db/change_stream_pre_images_truncate_manager_test.cpp b/src/mongo/db/change_stream_pre_images_truncate_manager_test.cpp new file mode 100644 index 0000000000000..447bbf104a31e --- /dev/null +++ b/src/mongo/db/change_stream_pre_images_truncate_manager_test.cpp @@ -0,0 +1,555 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/change_stream_options_manager.h" +#include "mongo/db/change_stream_pre_images_collection_manager.h" +#include "mongo/db/change_stream_pre_images_truncate_manager.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_impl.h" +#include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/op_observer/oplog_writer_impl.h" +#include "mongo/db/pipeline/change_stream_preimage_gen.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/collection_truncate_markers.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest + +namespace mongo { +class PreImagesTruncateManagerTest : public CatalogTestFixture { +protected: + PreImagesTruncateManagerTest() : CatalogTestFixture(Options{}.useMockClock(true)) {} + ChangeStreamPreImage generatePreImage(const UUID& nsUUID, Timestamp ts, int64_t dataFieldSize) { + auto preImageId = ChangeStreamPreImageId(nsUUID, ts, 0); + const auto strField = std::string(dataFieldSize, 'a'); + const BSONObj doc = BSON("dataField" << strField); + auto operationTime = Date_t::fromDurationSinceEpoch(Seconds{ts.getSecs()}); + return ChangeStreamPreImage(preImageId, operationTime, doc); + } + + void prePopulatePreImagesCollection(const NamespaceString& preImagesNss, + const UUID& nsUUID, + int64_t dataFieldSize, + int64_t numPreImages) { + std::vector preImages; + for (int64_t i = 0; i < numPreImages; i++) { + preImages.push_back( + generatePreImage(nsUUID, Timestamp{Date_t::now()} + i, dataFieldSize)); + } + + std::vector preImageInsertStatements; + std::transform(preImages.begin(), + preImages.end(), + std::back_inserter(preImageInsertStatements), + [](const auto& preImage) { return InsertStatement{preImage.toBSON()}; }); + + auto opCtx = operationContext(); + AutoGetCollection preImagesCollectionRaii(opCtx, preImagesNss, MODE_IX); + ASSERT(preImagesCollectionRaii); + WriteUnitOfWork wuow(opCtx); + auto& changeStreamPreImagesCollection = preImagesCollectionRaii.getCollection(); + + auto status = collection_internal::insertDocuments(opCtx, + changeStreamPreImagesCollection, + preImageInsertStatements.begin(), + preImageInsertStatements.end(), + nullptr); + wuow.commit(); + } + + ClockSourceMock* clockSource() { + return static_cast(getServiceContext()->getFastClockSource()); + } + + void ensureMarkersInitialized(const NamespaceString& preImagesCollectionNss, + boost::optional tenantId, + PreImagesTruncateManager& truncateManager) { + auto opCtx = operationContext(); + const auto preImagesCollRAII = acquireCollection( + opCtx, + CollectionAcquisitionRequest(preImagesCollectionNss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kRead), + MODE_IS); + + truncateManager.ensureMarkersInitialized(opCtx, tenantId, preImagesCollRAII); + }; + + void createPreImagesCollection(boost::optional tenantId) { + auto preImagesCollectionNss = NamespaceString::makePreImageCollectionNSS(tenantId); + const auto opCtx = operationContext(); + ChangeStreamPreImagesCollectionManager::get(opCtx).createPreImagesCollection(opCtx, + tenantId); + } + + void setUp() override { + CatalogTestFixture::setUp(); + ChangeStreamOptionsManager::create(getServiceContext()); + + // Set up OpObserver so that the test will append actual oplog entries to the oplog + // using repl::logOp(). + auto opObserverRegistry = + dynamic_cast(getServiceContext()->getOpObserver()); + opObserverRegistry->addObserver( + std::make_unique(std::make_unique())); + } + + std::tuple getNumRecordsAndDataSize( + const NamespaceString& preImagesCollectionNss) { + auto opCtx = operationContext(); + const auto preImagesCollRAII = acquireCollection( + opCtx, + CollectionAcquisitionRequest(preImagesCollectionNss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kRead), + MODE_IS); + // Retrieve the actual data size and number of records for the collection. + const auto& preImagesColl = preImagesCollRAII.getCollectionPtr(); + return std::make_tuple(preImagesColl->numRecords(opCtx), preImagesColl->dataSize(opCtx)); + } + + void validateNumRecordsInMarkers(const PreImagesTruncateManager& truncateManager, + boost::optional tenantId, + int64_t expectedNumRecords) { + auto tenantCollectionMarkers = truncateManager._tenantMap.find(tenantId); + ASSERT(tenantCollectionMarkers); + + auto markersSnapshot = tenantCollectionMarkers->getUnderlyingSnapshot(); + int64_t numRecords{0}; + for (auto& [nsUUID, truncateMarkersForNsUUID] : *markersSnapshot) { + auto markers = truncateMarkersForNsUUID->getMarkers_forTest(); + for (const auto& marker : markers) { + numRecords = numRecords + marker.records; + } + numRecords = numRecords + truncateMarkersForNsUUID->currentRecords_forTest(); + } + ASSERT_EQ(numRecords, expectedNumRecords); + } + + void validateNumBytesInMarkers(const PreImagesTruncateManager& truncateManager, + boost::optional tenantId, + int64_t expectedNumBytes) { + auto tenantCollectionMarkers = truncateManager._tenantMap.find(tenantId); + ASSERT(tenantCollectionMarkers); + + auto markersSnapshot = tenantCollectionMarkers->getUnderlyingSnapshot(); + int64_t numBytes{0}; + for (auto& [nsUUID, truncateMarkersForNsUUID] : *markersSnapshot) { + auto markers = truncateMarkersForNsUUID->getMarkers_forTest(); + for (const auto& marker : markers) { + numBytes = numBytes + marker.bytes; + } + numBytes = numBytes + truncateMarkersForNsUUID->currentBytes_forTest(); + } + ASSERT_EQ(numBytes, expectedNumBytes); + } + + void validateMarkersDontExistForNsUUID(const PreImagesTruncateManager& truncateManager, + boost::optional tenantId, + const UUID& nsUUID) { + auto tenantCollectionMarkers = truncateManager._tenantMap.find(tenantId); + ASSERT(tenantCollectionMarkers); + + ASSERT(!tenantCollectionMarkers->find(nsUUID)); + } + + void validateCreationMethod( + const PreImagesTruncateManager& truncateManager, + boost::optional tenantId, + const UUID& nsUUID, + CollectionTruncateMarkers::MarkersCreationMethod expectedCreationMethod) { + auto tenantCollectionMarkers = truncateManager._tenantMap.find(tenantId); + ASSERT(tenantCollectionMarkers); + + auto nsUUIDTruncateMarkers = tenantCollectionMarkers->find(nsUUID); + ASSERT(nsUUIDTruncateMarkers); + ASSERT_EQ(nsUUIDTruncateMarkers->markersCreationMethod(), expectedCreationMethod); + } + + void validateMarkersExistForNsUUID(const PreImagesTruncateManager& truncateManager, + boost::optional tenantId, + const UUID& nsUUID) { + auto tenantCollectionMarkers = truncateManager._tenantMap.find(tenantId); + ASSERT(tenantCollectionMarkers); + + ASSERT(tenantCollectionMarkers->find(nsUUID)); + } + + void validateIncreasingRidAndWallTimesInMarkers(const PreImagesTruncateManager& truncateManager, + boost::optional tenantId) { + auto tenantCollectionMarkers = truncateManager._tenantMap.find(tenantId); + ASSERT(tenantCollectionMarkers); + + auto markersSnapshot = tenantCollectionMarkers->getUnderlyingSnapshot(); + for (auto& [nsUUID, truncateMarkersForNsUUID] : *markersSnapshot) { + auto markers = truncateMarkersForNsUUID->getMarkers_forTest(); + + RecordId highestSeenRecordId{}; + Date_t highestSeenWallTime{}; + for (const auto& marker : markers) { + auto currentRid = marker.lastRecord; + auto currentWallTime = marker.wallTime; + if (currentRid < highestSeenRecordId || currentWallTime < highestSeenWallTime) { + // Something went wrong during marker initialisation. Log the details of which + // 'nsUUID' failed for debugging before failing the test. + LOGV2_ERROR(7658610, + "Truncate markers created for pre-images with nsUUID were not " + "initialised in increasing order of highest wall time and RecordId", + "nsUUID"_attr = nsUUID, + "tenant"_attr = tenantId, + "highestSeenWallTime"_attr = highestSeenWallTime, + "highestSeenRecordId"_attr = highestSeenRecordId, + "markerRecordId"_attr = currentRid, + "markerWallTime"_attr = currentWallTime); + } + ASSERT_GTE(currentRid, highestSeenRecordId); + ASSERT_GTE(currentWallTime, highestSeenWallTime); + highestSeenRecordId = currentRid; + highestSeenWallTime = currentWallTime; + } + + const auto& [partialMarkerHighestRid, partialMarkerHighestWallTime] = + truncateMarkersForNsUUID->getPartialMarker_forTest(); + ASSERT_GTE(partialMarkerHighestRid, highestSeenRecordId); + ASSERT_GTE(partialMarkerHighestWallTime, highestSeenWallTime); + } + } +}; + +TEST_F(PreImagesTruncateManagerTest, ScanningSingleNsUUIDSingleTenant) { + auto minBytesPerMarker = 1; + RAIIServerParameterControllerForTest minBytesPerMarkerController{ + "preImagesCollectionTruncateMarkersMinBytes", minBytesPerMarker}; + + boost::optional nullTenantId = boost::none; + createPreImagesCollection(nullTenantId); + + auto preImagesCollectionNss = NamespaceString::makePreImageCollectionNSS(nullTenantId); + auto nsUUID0 = UUID::gen(); + + prePopulatePreImagesCollection(preImagesCollectionNss, nsUUID0, 1, 3000); + auto [preImagesRecordStoreNumRecords, preImagesRecordStoreDataSize] = + getNumRecordsAndDataSize(preImagesCollectionNss); + + PreImagesTruncateManager truncateManager; + ensureMarkersInitialized(preImagesCollectionNss, nullTenantId, truncateManager); + + validateMarkersExistForNsUUID(truncateManager, nullTenantId, nsUUID0); + + validateCreationMethod(truncateManager, + nullTenantId, + nsUUID0, + CollectionTruncateMarkers::MarkersCreationMethod::Scanning); + + validateNumRecordsInMarkers(truncateManager, nullTenantId, preImagesRecordStoreNumRecords); + validateNumBytesInMarkers(truncateManager, nullTenantId, preImagesRecordStoreDataSize); + + validateIncreasingRidAndWallTimesInMarkers(truncateManager, nullTenantId); +} + +TEST_F(PreImagesTruncateManagerTest, ScanningTwoNsUUIDsSingleTenant) { + auto minBytesPerMarker = 1; + RAIIServerParameterControllerForTest minBytesPerMarkerController{ + "preImagesCollectionTruncateMarkersMinBytes", minBytesPerMarker}; + + boost::optional nullTenantId = boost::none; + createPreImagesCollection(nullTenantId); + + auto preImagesCollectionNss = NamespaceString::makePreImageCollectionNSS(nullTenantId); + auto nsUUID0 = UUID::gen(); + auto nsUUID1 = UUID::gen(); + + prePopulatePreImagesCollection(preImagesCollectionNss, nsUUID0, 100, 10); + prePopulatePreImagesCollection(preImagesCollectionNss, nsUUID1, 1, 1990); + + auto [preImagesRecordStoreNumRecords, preImagesRecordStoreDataSize] = + getNumRecordsAndDataSize(preImagesCollectionNss); + + PreImagesTruncateManager truncateManager; + ensureMarkersInitialized(preImagesCollectionNss, nullTenantId, truncateManager); + + validateMarkersExistForNsUUID(truncateManager, nullTenantId, nsUUID0); + validateMarkersExistForNsUUID(truncateManager, nullTenantId, nsUUID1); + + validateCreationMethod(truncateManager, + nullTenantId, + nsUUID0, + CollectionTruncateMarkers::MarkersCreationMethod::Scanning); + validateCreationMethod(truncateManager, + nullTenantId, + nsUUID1, + CollectionTruncateMarkers::MarkersCreationMethod::Scanning); + + validateNumRecordsInMarkers(truncateManager, nullTenantId, preImagesRecordStoreNumRecords); + validateNumBytesInMarkers(truncateManager, nullTenantId, preImagesRecordStoreDataSize); +} + +TEST_F(PreImagesTruncateManagerTest, SamplingSingleNsUUIDSingleTenant) { + auto minBytesPerMarker = 1024 * 25; // 25KB to downsize for testing. + RAIIServerParameterControllerForTest minBytesPerMarkerController{ + "preImagesCollectionTruncateMarkersMinBytes", minBytesPerMarker}; + + boost::optional nullTenantId = boost::none; + createPreImagesCollection(nullTenantId); + + auto preImagesCollectionNss = NamespaceString::makePreImageCollectionNSS(nullTenantId); + auto nsUUID0 = UUID::gen(); + + prePopulatePreImagesCollection(preImagesCollectionNss, nsUUID0, 1, 4000); + auto [preImagesRecordStoreNumRecords, preImagesRecordStoreDataSize] = + getNumRecordsAndDataSize(preImagesCollectionNss); + + PreImagesTruncateManager truncateManager; + ensureMarkersInitialized(preImagesCollectionNss, nullTenantId, truncateManager); + + validateMarkersExistForNsUUID(truncateManager, nullTenantId, nsUUID0); + + validateCreationMethod(truncateManager, + nullTenantId, + nsUUID0, + CollectionTruncateMarkers::MarkersCreationMethod::Sampling); + + validateNumRecordsInMarkers(truncateManager, nullTenantId, preImagesRecordStoreNumRecords); + validateNumBytesInMarkers(truncateManager, nullTenantId, preImagesRecordStoreDataSize); + + validateIncreasingRidAndWallTimesInMarkers(truncateManager, nullTenantId); +} + +// Tests that markers initialized from a pre-populated pre-images collection guarantee that the +// total size and number of records across the pre-images collection are captured in the generated +// truncate markers. No other guarantees can be made aside from that the cumulative size and number +// of records across the tenant's nsUUIDs will be consistent. +TEST_F(PreImagesTruncateManagerTest, SamplingTwoNsUUIDsSingleTenant) { + auto minBytesPerMarker = 1024 * 100; // 100KB. + RAIIServerParameterControllerForTest minBytesPerMarkerController{ + "preImagesCollectionTruncateMarkersMinBytes", minBytesPerMarker}; + + boost::optional nullTenantId = boost::none; + createPreImagesCollection(nullTenantId); + + auto preImagesCollectionNss = NamespaceString::makePreImageCollectionNSS(nullTenantId); + auto nsUUID0 = UUID::gen(); + auto nsUUID1 = UUID::gen(); + + prePopulatePreImagesCollection(preImagesCollectionNss, nsUUID0, 100, 1000); + prePopulatePreImagesCollection(preImagesCollectionNss, nsUUID1, 1, 1000); + auto [preImagesRecordStoreNumRecords, preImagesRecordStoreDataSize] = + getNumRecordsAndDataSize(preImagesCollectionNss); + + PreImagesTruncateManager truncateManager; + ensureMarkersInitialized(preImagesCollectionNss, nullTenantId, truncateManager); + + validateMarkersExistForNsUUID(truncateManager, nullTenantId, nsUUID0); + validateMarkersExistForNsUUID(truncateManager, nullTenantId, nsUUID1); + + validateCreationMethod(truncateManager, + nullTenantId, + nsUUID0, + CollectionTruncateMarkers::MarkersCreationMethod::Sampling); + validateCreationMethod(truncateManager, + nullTenantId, + nsUUID1, + CollectionTruncateMarkers::MarkersCreationMethod::Sampling); + + validateNumRecordsInMarkers(truncateManager, nullTenantId, preImagesRecordStoreNumRecords); + validateNumBytesInMarkers(truncateManager, nullTenantId, preImagesRecordStoreDataSize); + + validateIncreasingRidAndWallTimesInMarkers(truncateManager, nullTenantId); +} + +TEST_F(PreImagesTruncateManagerTest, SamplingTwoNsUUIDsManyRecordsToFewSingleTenant) { + auto minBytesPerMarker = 1024 * 100; // 100KB. + RAIIServerParameterControllerForTest minBytesPerMarkerController{ + "preImagesCollectionTruncateMarkersMinBytes", minBytesPerMarker}; + + // For a single tenant environment. + boost::optional nullTenantId = boost::none; + createPreImagesCollection(nullTenantId); + + auto preImagesCollectionNss = NamespaceString::makePreImageCollectionNSS(nullTenantId); + auto nsUUID0 = UUID::gen(); + auto nsUUID1 = UUID::gen(); + + prePopulatePreImagesCollection(preImagesCollectionNss, nsUUID0, 100, 1999); + prePopulatePreImagesCollection(preImagesCollectionNss, nsUUID1, 1, 1); + + auto [preImagesRecordStoreNumRecords, preImagesRecordStoreDataSize] = + getNumRecordsAndDataSize(preImagesCollectionNss); + + PreImagesTruncateManager truncateManager; + ensureMarkersInitialized(preImagesCollectionNss, nullTenantId, truncateManager); + + validateMarkersExistForNsUUID(truncateManager, nullTenantId, nsUUID0); + validateMarkersExistForNsUUID(truncateManager, nullTenantId, nsUUID1); + + validateCreationMethod(truncateManager, + nullTenantId, + nsUUID0, + CollectionTruncateMarkers::MarkersCreationMethod::Sampling); + validateCreationMethod(truncateManager, + nullTenantId, + nsUUID1, + CollectionTruncateMarkers::MarkersCreationMethod::Sampling); + + validateNumRecordsInMarkers(truncateManager, nullTenantId, preImagesRecordStoreNumRecords); + validateNumBytesInMarkers(truncateManager, nullTenantId, preImagesRecordStoreDataSize); + + validateIncreasingRidAndWallTimesInMarkers(truncateManager, nullTenantId); +} + +TEST_F(PreImagesTruncateManagerTest, SamplingManyNsUUIDsSingleTenant) { + auto minBytesPerMarker = 1024 * 100; // 100KB. + RAIIServerParameterControllerForTest minBytesPerMarkerController{ + "preImagesCollectionTruncateMarkersMinBytes", minBytesPerMarker}; + + boost::optional nullTenantId = boost::none; + createPreImagesCollection(nullTenantId); + + auto preImagesCollectionNss = NamespaceString::makePreImageCollectionNSS(nullTenantId); + std::vector nsUUIDs{}; + auto numNssUUIDs = 11; + for (int i = 0; i < numNssUUIDs; i++) { + nsUUIDs.push_back(UUID::gen()); + } + + for (const auto& nsUUID : nsUUIDs) { + prePopulatePreImagesCollection(preImagesCollectionNss, nsUUID, 100, 555); + } + + auto [preImagesRecordStoreNumRecords, preImagesRecordStoreDataSize] = + getNumRecordsAndDataSize(preImagesCollectionNss); + + PreImagesTruncateManager truncateManager; + ensureMarkersInitialized(preImagesCollectionNss, nullTenantId, truncateManager); + + for (const auto& nsUUID : nsUUIDs) { + validateMarkersExistForNsUUID(truncateManager, nullTenantId, nsUUID); + validateCreationMethod(truncateManager, + nullTenantId, + nsUUID, + CollectionTruncateMarkers::MarkersCreationMethod::Sampling); + } + + validateNumRecordsInMarkers(truncateManager, nullTenantId, preImagesRecordStoreNumRecords); + validateNumBytesInMarkers(truncateManager, nullTenantId, preImagesRecordStoreDataSize); + validateIncreasingRidAndWallTimesInMarkers(truncateManager, nullTenantId); +} + +// Test that the PreImagesTruncateManager correctly separates collection truncate markers for each +// tenant. +TEST_F(PreImagesTruncateManagerTest, TwoTenantsGeneratesTwoSeparateSetsOfTruncateMarkers) { + auto minBytesPerMarker = 1024 * 25; // 25KB. + RAIIServerParameterControllerForTest minBytesPerMarkerController{ + "preImagesCollectionTruncateMarkersMinBytes", minBytesPerMarker}; + RAIIServerParameterControllerForTest serverlessFeatureFlagController{ + "featureFlagServerlessChangeStreams", true}; + + TenantId tid1 = TenantId(OID::gen()); + TenantId tid2 = TenantId(OID::gen()); + + createPreImagesCollection(tid1); + createPreImagesCollection(tid2); + + auto preImagesCollectionNss1 = NamespaceString::makePreImageCollectionNSS(tid1); + auto preImagesCollectionNss2 = NamespaceString::makePreImageCollectionNSS(tid2); + auto nsUUID1 = UUID::gen(); + auto nsUUID2 = UUID::gen(); + + prePopulatePreImagesCollection(preImagesCollectionNss1, nsUUID1, 100, 1000); + prePopulatePreImagesCollection(preImagesCollectionNss2, nsUUID2, 100, 1000); + + auto [tid1NumRecords, tid1DataSize] = getNumRecordsAndDataSize(preImagesCollectionNss1); + auto [tid2NumRecords, tid2DataSize] = getNumRecordsAndDataSize(preImagesCollectionNss2); + + // Initialise the truncate markers for each colleciton. + PreImagesTruncateManager truncateManager; + ensureMarkersInitialized(preImagesCollectionNss1, tid1, truncateManager); + ensureMarkersInitialized(preImagesCollectionNss2, tid2, truncateManager); + + validateNumRecordsInMarkers(truncateManager, tid1, tid1NumRecords); + validateNumRecordsInMarkers(truncateManager, tid2, tid2NumRecords); + + validateNumBytesInMarkers(truncateManager, tid1, tid1DataSize); + validateNumBytesInMarkers(truncateManager, tid2, tid2DataSize); + + validateIncreasingRidAndWallTimesInMarkers(truncateManager, tid1); + validateIncreasingRidAndWallTimesInMarkers(truncateManager, tid2); + + // Confirm that markers for each nsUUID are correctly isolated on their tenant. + validateMarkersExistForNsUUID(truncateManager, tid1, nsUUID1); + validateMarkersDontExistForNsUUID(truncateManager, tid1, nsUUID2); + + validateMarkersExistForNsUUID(truncateManager, tid2, nsUUID2); + validateMarkersDontExistForNsUUID(truncateManager, tid2, nsUUID1); +} +} // namespace mongo diff --git a/src/mongo/db/change_stream_pre_images_truncate_markers.cpp b/src/mongo/db/change_stream_pre_images_truncate_markers.cpp deleted file mode 100644 index 9cab9d3e5066d..0000000000000 --- a/src/mongo/db/change_stream_pre_images_truncate_markers.cpp +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/change_stream_pre_images_truncate_markers.h" - -#include "mongo/db/change_stream_pre_images_collection_manager.h" -#include "mongo/db/change_stream_serverless_helpers.h" -#include "mongo/db/operation_context.h" -#include "mongo/db/repl/storage_interface.h" - -namespace mongo { - -PreImagesTruncateMarkers::PreImagesTruncateMarkers(boost::optional tenantId, - std::deque markers, - int64_t leftoverRecordsCount, - int64_t leftoverRecordsBytes, - int64_t minBytesPerMarker) - : CollectionTruncateMarkersWithPartialExpiration( - std::move(markers), leftoverRecordsCount, leftoverRecordsBytes, minBytesPerMarker), - _tenantId(std::move(tenantId)) {} - -bool PreImagesTruncateMarkers::_hasExcessMarkers(OperationContext* opCtx) const { - const auto& markers = getMarkers(); - if (markers.empty()) { - // If there's nothing in the markers queue then we don't have excess markers by definition. - return false; - } - - const Marker& oldestMarker = markers.front(); - Date_t currentTimeForTimeBasedExpiration = - opCtx->getServiceContext()->getFastClockSource()->now(); - - if (_tenantId) { - // In a serverless environment, the 'expireAfterSeconds' is set per tenant and is the only - // criteria considered when determining whether a marker is expired. - // - // The oldest marker is expired if: - // 'wallTime' of the oldest marker <= current node time - 'expireAfterSeconds' - auto expireAfterSeconds = - Seconds{change_stream_serverless_helpers::getExpireAfterSeconds(_tenantId.get())}; - auto preImageExpirationTime = currentTimeForTimeBasedExpiration - expireAfterSeconds; - return oldestMarker.wallTime <= preImageExpirationTime; - } - - // In a non-serverless enviornment, the oldest marker is expired if either: - // (1) 'wallTime' of the oldest marker <= current node time - 'expireAfterSeconds' - // OR - // (2) Timestamp of the 'lastRecord' in the oldest marker < Timestamp of earliest oplog - // entry - - // The 'expireAfterSeconds' may or may not be set in a non-serverless enviornment. - const auto preImageExpirationTime = change_stream_pre_image_helpers::getPreImageExpirationTime( - opCtx, currentTimeForTimeBasedExpiration); - bool expiredByTimeBasedExpiration = - preImageExpirationTime ? oldestMarker.wallTime <= preImageExpirationTime : false; - - const auto currentEarliestOplogEntryTs = - repl::StorageInterface::get(opCtx->getServiceContext())->getEarliestOplogTimestamp(opCtx); - auto lastRecordTS = - change_stream_pre_image_helpers::getPreImageTimestamp(oldestMarker.lastRecord); - return expiredByTimeBasedExpiration || lastRecordTS < currentEarliestOplogEntryTs; -} -} // namespace mongo diff --git a/src/mongo/db/change_stream_pre_images_truncate_markers.h b/src/mongo/db/change_stream_pre_images_truncate_markers.h deleted file mode 100644 index 810dfaf60d0c2..0000000000000 --- a/src/mongo/db/change_stream_pre_images_truncate_markers.h +++ /dev/null @@ -1,56 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/storage/collection_markers.h" - -/** - * Implementation of truncate markers for the pre-images collection. - */ -namespace mongo { -class PreImagesTruncateMarkers final : public CollectionTruncateMarkersWithPartialExpiration { -public: - PreImagesTruncateMarkers(boost::optional tenantId, - std::deque markers, - int64_t leftoverRecordsCount, - int64_t leftoverRecordsBytes, - int64_t minBytesPerMarker); - -private: - friend class PreImagesTruncateMarkersTest; - - bool _hasExcessMarkers(OperationContext* opCtx) const override; - - /** - * When initialized, indicates this is a serverless environment. - */ - boost::optional _tenantId; -}; -} // namespace mongo diff --git a/src/mongo/db/change_stream_pre_images_truncate_markers_per_nsUUID.cpp b/src/mongo/db/change_stream_pre_images_truncate_markers_per_nsUUID.cpp new file mode 100644 index 0000000000000..e9c7b2a550953 --- /dev/null +++ b/src/mongo/db/change_stream_pre_images_truncate_markers_per_nsUUID.cpp @@ -0,0 +1,246 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/change_stream_pre_images_truncate_markers_per_nsUUID.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/change_stream_pre_image_util.h" +#include "mongo/db/change_stream_serverless_helpers.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/change_stream_preimage_gen.h" +#include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/timer.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault + +namespace mongo { +namespace { +// Returns true if the pre-image with highestRecordId and highestWallTime is expired. +bool isExpired(OperationContext* opCtx, + const boost::optional& tenantId, + const RecordId& highestRecordId, + Date_t highestWallTime) { + auto currentTimeForTimeBasedExpiration = + change_stream_pre_image_util::getCurrentTimeForPreImageRemoval(opCtx); + + if (tenantId) { + // In a serverless environment, the 'expireAfterSeconds' is set per tenant and is the only + // criteria considered when determining whether a marker is expired. + // + // The oldest marker is expired if: + // 'wallTime' of the oldest marker <= current node time - 'expireAfterSeconds' + auto expireAfterSeconds = + Seconds{change_stream_serverless_helpers::getExpireAfterSeconds(tenantId.get())}; + auto preImageExpirationTime = currentTimeForTimeBasedExpiration - expireAfterSeconds; + return highestWallTime <= preImageExpirationTime; + } + + // In a non-serverless environment, a marker is expired if either: + // (1) 'highestWallTime' of the (partial) marker <= current node time - + // 'expireAfterSeconds' OR + // (2) Timestamp of the 'highestRecordId' in the oldest marker < + // Timestamp of earliest oplog entry + + // The 'expireAfterSeconds' may or may not be set in a non-serverless environment. + const auto preImageExpirationTime = change_stream_pre_image_util::getPreImageExpirationTime( + opCtx, currentTimeForTimeBasedExpiration); + bool expiredByTimeBasedExpiration = + preImageExpirationTime ? highestWallTime <= preImageExpirationTime : false; + + const auto currentEarliestOplogEntryTs = + repl::StorageInterface::get(opCtx->getServiceContext())->getEarliestOplogTimestamp(opCtx); + auto highestRecordTimestamp = + change_stream_pre_image_util::getPreImageTimestamp(highestRecordId); + return expiredByTimeBasedExpiration || highestRecordTimestamp < currentEarliestOplogEntryTs; +} + +} // namespace + +PreImagesTruncateMarkersPerNsUUID::PreImagesTruncateMarkersPerNsUUID( + boost::optional tenantId, + std::deque markers, + int64_t leftoverRecordsCount, + int64_t leftoverRecordsBytes, + int64_t minBytesPerMarker, + CollectionTruncateMarkers::MarkersCreationMethod creationMethod) + : CollectionTruncateMarkersWithPartialExpiration( + std::move(markers), leftoverRecordsCount, leftoverRecordsBytes, minBytesPerMarker), + _tenantId(std::move(tenantId)), + _creationMethod(creationMethod) {} + +CollectionTruncateMarkers::RecordIdAndWallTime +PreImagesTruncateMarkersPerNsUUID::getRecordIdAndWallTime(const Record& record) { + BSONObj preImageObj = record.data.toBson(); + return CollectionTruncateMarkers::RecordIdAndWallTime(record.id, getWallTime(preImageObj)); +} + +Date_t PreImagesTruncateMarkersPerNsUUID::getWallTime(const BSONObj& preImageObj) { + return preImageObj[ChangeStreamPreImage::kOperationTimeFieldName].date(); +} + +CollectionTruncateMarkers::InitialSetOfMarkers +PreImagesTruncateMarkersPerNsUUID::createInitialMarkersFromSamples( + OperationContext* opCtx, + const UUID& nsUUID, + const std::vector& samples, + int64_t estimatedRecordsPerMarker, + int64_t estimatedBytesPerMarker) { + std::deque markers; + auto numSamples = samples.size(); + invariant(numSamples > 0); + for (size_t i = CollectionTruncateMarkers::kRandomSamplesPerMarker - 1; i < numSamples; + i = i + CollectionTruncateMarkers::kRandomSamplesPerMarker) { + const auto& [id, wallTime] = samples[i]; + LOGV2_DEBUG( + 7658602, + 0, + "Marking entry as a potential future truncation point for pre-images collection", + "wall"_attr = wallTime, + "ts"_attr = id); + markers.emplace_back(estimatedRecordsPerMarker, estimatedBytesPerMarker, id, wallTime); + } + + // Sampling is best effort estimations and at this step, only account for the whole markers + // generated and leave the 'currentRecords' and 'currentBytes' to be filled in at a later time. + // Additionally, the time taken is relatively arbitrary as the expensive part of the operation + // was retrieving the samples. + return CollectionTruncateMarkers::InitialSetOfMarkers{ + std::move(markers), + 0 /** currentRecords **/, + 0 /** currentBytes **/, + Microseconds{0} /** timeTaken **/, + CollectionTruncateMarkers::MarkersCreationMethod::Sampling}; +} + +CollectionTruncateMarkers::InitialSetOfMarkers +PreImagesTruncateMarkersPerNsUUID::createInitialMarkersScanning( + OperationContext* opCtx, + const CollectionAcquisition& collAcq, + const UUID& nsUUID, + int64_t minBytesPerMarker) { + Timer scanningTimer; + + RecordIdBound minRecordIdBound = + change_stream_pre_image_util::getAbsoluteMinPreImageRecordIdBoundForNs(nsUUID); + RecordId minRecordId = minRecordIdBound.recordId(); + + RecordIdBound maxRecordIdBound = + change_stream_pre_image_util::getAbsoluteMaxPreImageRecordIdBoundForNs(nsUUID); + RecordId maxRecordId = maxRecordIdBound.recordId(); + + auto exec = InternalPlanner::collectionScan(opCtx, + collAcq, + PlanYieldPolicy::YieldPolicy::YIELD_AUTO, + InternalPlanner::Direction::FORWARD, + boost::none, + std::move(minRecordIdBound), + std::move(maxRecordIdBound)); + int64_t currentRecords = 0; + int64_t currentBytes = 0; + std::deque markers; + BSONObj docOut; + RecordId rIdOut; + while (exec->getNext(&docOut, &rIdOut) == PlanExecutor::ADVANCED) { + currentRecords++; + currentBytes += docOut.objsize(); + + auto wallTime = getWallTime(docOut); + if (currentBytes >= minBytesPerMarker) { + LOGV2_DEBUG(7500500, + 1, + "Marking entry as a potential future truncation point for collection with " + "pre-images enabled", + "wallTime"_attr = wallTime, + "nsUuid"_attr = nsUUID); + + markers.emplace_back( + std::exchange(currentRecords, 0), std::exchange(currentBytes, 0), rIdOut, wallTime); + } + } + + if (currentRecords == 0 && markers.empty()) { + return CollectionTruncateMarkers::InitialSetOfMarkers{ + {}, 0, 0, Microseconds{0}, MarkersCreationMethod::EmptyCollection}; + } + + return CollectionTruncateMarkers::InitialSetOfMarkers{ + std::move(markers), + currentRecords, + currentBytes, + scanningTimer.elapsed(), + CollectionTruncateMarkers::MarkersCreationMethod::Scanning}; +} + +void PreImagesTruncateMarkersPerNsUUID::updatePartialMarkerForInitialisation( + OperationContext* opCtx, + int64_t numBytes, + RecordId recordId, + Date_t wallTime, + int64_t numRecords) { + updateCurrentMarker(opCtx, numBytes, recordId, wallTime, numRecords); +} + +bool PreImagesTruncateMarkersPerNsUUID::_hasExcessMarkers(OperationContext* opCtx) const { + const auto& markers = getMarkers(); + if (markers.empty()) { + // If there's nothing in the markers queue then we don't have excess markers by definition. + return false; + } + + const Marker& oldestMarker = markers.front(); + return isExpired(opCtx, _tenantId, oldestMarker.lastRecord, oldestMarker.wallTime); +} + +bool PreImagesTruncateMarkersPerNsUUID::_hasPartialMarkerExpired(OperationContext* opCtx) const { + const auto& [highestSeenRecordId, highestSeenWallTime] = getPartialMarker(); + return isExpired(opCtx, _tenantId, highestSeenRecordId, highestSeenWallTime); +} +} // namespace mongo diff --git a/src/mongo/db/change_stream_pre_images_truncate_markers_per_nsUUID.h b/src/mongo/db/change_stream_pre_images_truncate_markers_per_nsUUID.h new file mode 100644 index 0000000000000..3952fbd9464ef --- /dev/null +++ b/src/mongo/db/change_stream_pre_images_truncate_markers_per_nsUUID.h @@ -0,0 +1,138 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/change_stream_preimage_gen.h" +#include "mongo/db/record_id.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/collection_truncate_markers.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/concurrent_shared_values_map.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" + +/** + * There is up to one 'config.system.preimages' collection per tenant. This pre-images + * collection contains pre-images for every collection 'nsUUID' with pre-images enabled on the + * tenant. The pre-images collection is ordered by collection 'nsUUID', so that pre-images belonging + * to a given collection are grouped together. Additionally, pre-images for a given collection + * 'nsUUID' are stored in timestamp order, which makes range truncation possible. + * + * Implementation of truncate markers for pre-images associated with a single collection 'nsUUID' + * within a pre-images collection. + */ +namespace mongo { + +class PreImagesTruncateMarkersPerNsUUID final + : public CollectionTruncateMarkersWithPartialExpiration { +public: + PreImagesTruncateMarkersPerNsUUID( + boost::optional tenantId, + std::deque markers, + int64_t leftoverRecordsCount, + int64_t leftoverRecordsBytes, + int64_t minBytesPerMarker, + CollectionTruncateMarkers::MarkersCreationMethod creationMethod); + + /** + * Creates an 'InitialSetOfMarkers' from samples of pre-images with 'nsUUID'. The generated + * markers are best-effort estimates. They do not guarantee to capture an accurate number of + * records and bytes corresponding to the 'nsUUID' within the pre-images collection. This is + * because size metrics are only available for an entire pre-images collection, not individual + * segments corresponding to the provided 'nsUUID'. + * + * For mathamatical simplicity, the 'InitialSetOfMarkers' will only capture whole markers. Any + * samples not captured by whole markers will not be accounted for as a partial marker in the + * result. + */ + static CollectionTruncateMarkers::InitialSetOfMarkers createInitialMarkersFromSamples( + OperationContext* opCtx, + const UUID& nsUUID, + const std::vector& samples, + int64_t estimatedRecordsPerMarker, + int64_t estimatedBytesPerMarker); + + /** + * Returns an accurate 'InitialSetOfMarkers' corresponding to the segment of the pre-images + * collection generated from 'nsUUID'. + */ + static CollectionTruncateMarkers::InitialSetOfMarkers createInitialMarkersScanning( + OperationContext* opCtx, + const CollectionAcquisition& collPtr, + const UUID& nsUUID, + int64_t minBytesPerMarker); + + static CollectionTruncateMarkers::RecordIdAndWallTime getRecordIdAndWallTime( + const Record& record); + + static Date_t getWallTime(const BSONObj& doc); + + /** + * Returns whether there are no more markers and no partial marker pending creation. + */ + bool isEmpty() const { + return CollectionTruncateMarkers::isEmpty(); + } + + void updatePartialMarkerForInitialisation(OperationContext* opCtx, + int64_t numBytes, + RecordId recordId, + Date_t wallTime, + int64_t numRecords); + + CollectionTruncateMarkers::MarkersCreationMethod markersCreationMethod() { + return _creationMethod; + } + +private: + friend class PreImagesRemoverTest; + + bool _hasExcessMarkers(OperationContext* opCtx) const override; + + bool _hasPartialMarkerExpired(OperationContext* opCtx) const override; + + /** + * When initialized, indicates this is a serverless environment. + */ + boost::optional _tenantId; + + CollectionTruncateMarkers::MarkersCreationMethod _creationMethod; +}; + +} // namespace mongo diff --git a/src/mongo/db/change_stream_serverless_helpers.cpp b/src/mongo/db/change_stream_serverless_helpers.cpp index 4e082c005b0e5..bc1b92d45d325 100644 --- a/src/mongo/db/change_stream_serverless_helpers.cpp +++ b/src/mongo/db/change_stream_serverless_helpers.cpp @@ -31,23 +31,41 @@ #include "mongo/db/change_stream_serverless_helpers.h" -#include "mongo/db/catalog_raii.h" +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/change_streams_cluster_parameter_gen.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/global_settings.h" #include "mongo/db/multitenancy_gen.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/server_parameter_with_storage.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util_core.h" namespace mongo { namespace change_stream_serverless_helpers { namespace { bool isServerlessChangeStreamFeatureFlagEnabled() { - return serverGlobalParams.featureCompatibility.isVersionInitialized() && - feature_flags::gFeatureFlagServerlessChangeStreams.isEnabled( - serverGlobalParams.featureCompatibility); + return feature_flags::gFeatureFlagServerlessChangeStreams.isEnabled( + serverGlobalParams.featureCompatibility); } } // namespace @@ -75,10 +93,10 @@ bool canInitializeServices() { return false; } - return canRunInTargetEnvironment(); + return isServerlessEnvironment(); } -bool canRunInTargetEnvironment() { +bool isServerlessEnvironment() { // A change stream services are enabled only in the multitenant serverless settings. For the // sharded cluster, 'internalChangeStreamUseTenantIdForTesting' maybe provided for the testing // purposes until the support is available. @@ -113,7 +131,7 @@ TenantSet getConfigDbTenants(OperationContext* opCtx) { auto dbNames = CollectionCatalog::get(opCtx)->getAllDbNames(); for (auto&& dbName : dbNames) { - if (dbName.db() == DatabaseName::kConfig.db() && dbName.tenantId()) { + if (dbName.isConfigDB() && dbName.tenantId()) { tenantIds.insert(*dbName.tenantId()); } } diff --git a/src/mongo/db/change_stream_serverless_helpers.h b/src/mongo/db/change_stream_serverless_helpers.h index 9298603982f37..e70e0b6633f23 100644 --- a/src/mongo/db/change_stream_serverless_helpers.h +++ b/src/mongo/db/change_stream_serverless_helpers.h @@ -30,9 +30,11 @@ #pragma once #include +#include #include "mongo/db/operation_context.h" #include "mongo/db/tenant_id.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { namespace change_stream_serverless_helpers { @@ -60,7 +62,7 @@ bool canInitializeServices(); * Returns true if the target environment (replica-set or sharded-cluster) supports running change * stream in the serverless, false otherwise. */ -bool canRunInTargetEnvironment(); +bool isServerlessEnvironment(); /** * Returns an internal tenant id that will be used for testing purposes. This tenant id will not diff --git a/src/mongo/db/change_stream_state.idl b/src/mongo/db/change_stream_state.idl index 8972167a05872..b33c3d2fe4664 100644 --- a/src/mongo/db/change_stream_state.idl +++ b/src/mongo/db/change_stream_state.idl @@ -59,4 +59,4 @@ commands: cpp_name: GetChangeStreamStateCommandRequest api_version: "" namespace: ignored - reply_type: ChangeStreamStateParameters \ No newline at end of file + reply_type: ChangeStreamStateParameters diff --git a/src/mongo/db/change_streams_cluster_parameter.cpp b/src/mongo/db/change_streams_cluster_parameter.cpp index f3636b6e41c26..75e7b8a174ed2 100644 --- a/src/mongo/db/change_streams_cluster_parameter.cpp +++ b/src/mongo/db/change_streams_cluster_parameter.cpp @@ -31,10 +31,16 @@ #include "mongo/db/change_streams_cluster_parameter.h" +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/db/change_stream_serverless_helpers.h" #include "mongo/db/change_streams_cluster_parameter_gen.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" namespace mongo { @@ -49,7 +55,7 @@ Status validateChangeStreamsClusterParameter( return {ErrorCodes::IllegalOperation, "The 'changeStreams' parameter is unsupported in standalone."}; } - if (!change_stream_serverless_helpers::canRunInTargetEnvironment()) { + if (!change_stream_serverless_helpers::isServerlessEnvironment()) { return Status( ErrorCodes::CommandNotSupported, "The 'changeStreams' cluster-wide parameter is only available in serverless."); diff --git a/src/mongo/db/change_streams_cluster_parameter.h b/src/mongo/db/change_streams_cluster_parameter.h index ee1701a7b87c6..d28130f80fa23 100644 --- a/src/mongo/db/change_streams_cluster_parameter.h +++ b/src/mongo/db/change_streams_cluster_parameter.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include "mongo/base/status.h" #include "mongo/db/tenant_id.h" diff --git a/src/mongo/db/change_streams_cluster_parameter.idl b/src/mongo/db/change_streams_cluster_parameter.idl index 06b1f3a73f47b..8dfc7d9c5763e 100644 --- a/src/mongo/db/change_streams_cluster_parameter.idl +++ b/src/mongo/db/change_streams_cluster_parameter.idl @@ -68,3 +68,13 @@ server_parameters: validator: gte: 1 default: 10 + changeCollectionTruncateMarkersMinBytes: + description: "Server parameter that specifies the minimum number of bytes contained in each + truncate marker for change collections. This is only used if + featureFlagUseUnreplicatedTruncatesForDeletions is enabled" + set_at: startup + cpp_varname: gChangeCollectionTruncateMarkersMinBytes + cpp_vartype: int32_t + default: 33_554_432 # 32 MiB + validator: + gt: 0 diff --git a/src/mongo/db/client.cpp b/src/mongo/db/client.cpp index f833c29eff09d..3945939154d13 100644 --- a/src/mongo/db/client.cpp +++ b/src/mongo/db/client.cpp @@ -31,23 +31,23 @@ to an open socket (or logical connection if pooling on sockets) from a client. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/client.h" - -#include +#include +#include +#include +#include #include -#include -#include "mongo/base/status.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/operation_context.h" #include "mongo/db/operation_cpu_timer.h" #include "mongo/db/service_context.h" -#include "mongo/platform/compiler.h" -#include "mongo/stdx/thread.h" #include "mongo/util/concurrency/thread_name.h" -#include "mongo/util/exit.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/client.h b/src/mongo/db/client.h index 39c9b4703e4a4..ca479429a876e 100644 --- a/src/mongo/db/client.h +++ b/src/mongo/db/client.h @@ -37,16 +37,28 @@ #pragma once +#include +#include #include - +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/namespace_string.h" #include "mongo/db/service_context.h" +#include "mongo/logv2/log.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/random.h" #include "mongo/stdx/thread.h" #include "mongo/transport/session.h" #include "mongo/util/assert_util.h" #include "mongo/util/concurrency/spin_lock.h" +#include "mongo/util/concurrency/thread_name.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/decorable.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/net/ssl_peer_info.h" @@ -108,7 +120,7 @@ class Client final : public Decorable { } HostAndPort getRemote() const { - verify(_session); + MONGO_verify(_session); return _session->remote(); } @@ -194,15 +206,15 @@ class Client final : public Decorable { } /** - * Used to mark system operations that are allowed to be killed by the stepdown process. This - * should only be called once per Client and only from system connections. The Client should be - * locked by the caller. + * Used to mark system operations that are not allowed to be killed by the stepdown process. + * This should only be called once per Client and only from system connections. The Client + * should be locked by the caller. */ - void setSystemOperationKillableByStepdown(WithLock) { + void setSystemOperationUnkillableByStepdown(WithLock) { // This can only be changed once for system operations. invariant(isFromSystemConnection()); - invariant(!_systemOperationKillable); - _systemOperationKillable = true; + invariant(_systemOperationKillable); + _systemOperationKillable = false; } /** @@ -294,7 +306,7 @@ class Client final : public Decorable { OperationContext* _opCtx = nullptr; // If the active system client operation is allowed to be killed. - bool _systemOperationKillable = false; + bool _systemOperationKillable = true; PseudoRandom _prng; diff --git a/src/mongo/db/client_context_test.cpp b/src/mongo/db/client_context_test.cpp index ca1d02c724a01..d3204af82cb99 100644 --- a/src/mongo/db/client_context_test.cpp +++ b/src/mongo/db/client_context_test.cpp @@ -27,14 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include "mongo/base/string_data.h" #include "mongo/db/client.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/client_metadata_propagation_egress_hook.cpp b/src/mongo/db/client_metadata_propagation_egress_hook.cpp index 30d00e29b1d62..c8fba35e6d657 100644 --- a/src/mongo/db/client_metadata_propagation_egress_hook.cpp +++ b/src/mongo/db/client_metadata_propagation_egress_hook.cpp @@ -29,9 +29,11 @@ #include "mongo/db/client_metadata_propagation_egress_hook.h" +#include "mongo/db/operation_context.h" #include "mongo/db/write_block_bypass.h" #include "mongo/rpc/metadata/client_metadata.h" #include "mongo/rpc/metadata/impersonated_user_metadata.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace rpc { diff --git a/src/mongo/db/client_metadata_propagation_egress_hook.h b/src/mongo/db/client_metadata_propagation_egress_hook.h index b2831af3baf41..27dc230eb3d8d 100644 --- a/src/mongo/db/client_metadata_propagation_egress_hook.h +++ b/src/mongo/db/client_metadata_propagation_egress_hook.h @@ -29,6 +29,10 @@ #pragma once +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/service_context.h" #include "mongo/rpc/metadata/metadata_hook.h" diff --git a/src/mongo/db/client_out_of_line_executor.cpp b/src/mongo/db/client_out_of_line_executor.cpp index aba582e045528..b8d37326e05c5 100644 --- a/src/mongo/db/client_out_of_line_executor.cpp +++ b/src/mongo/db/client_out_of_line_executor.cpp @@ -30,14 +30,26 @@ #include "mongo/db/client_out_of_line_executor.h" +#include +#include +#include +#include + #include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/db/baton.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/logv2/log_severity.h" #include "mongo/logv2/log_severity_suppressor.h" -#include "mongo/util/clock_source.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/functional.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -50,7 +62,7 @@ class ClientOutOfLineExecutor::Impl { /** Returns Info(), then suppresses to `Debug(2)` for a second. */ logv2::SeveritySuppressor bumpedSeverity{ Seconds{1}, logv2::LogSeverity::Info(), logv2::LogSeverity::Debug(2)}; - ClockSource::StopWatch stopWatch; + Timer timer; }; ClientOutOfLineExecutor::ClientOutOfLineExecutor() noexcept @@ -96,14 +108,14 @@ void ClientOutOfLineExecutor::consumeAllTasks() noexcept { // approximation of the acceptable overhead in the context of normal client operations. static constexpr auto kTimeLimit = Microseconds(30); - _impl->stopWatch.restart(); + _impl->timer.reset(); while (auto maybeTask = _taskQueue->tryPop()) { auto task = std::move(*maybeTask); task(Status::OK()); } - auto elapsed = _impl->stopWatch.elapsed(); + auto elapsed = _impl->timer.elapsed(); if (MONGO_unlikely(elapsed > kTimeLimit)) { LOGV2_DEBUG(4651401, diff --git a/src/mongo/db/client_out_of_line_executor_test.cpp b/src/mongo/db/client_out_of_line_executor_test.cpp index a72b3450c668a..45947af8ca4dd 100644 --- a/src/mongo/db/client_out_of_line_executor_test.cpp +++ b/src/mongo/db/client_out_of_line_executor_test.cpp @@ -28,15 +28,24 @@ */ #include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/client.h" #include "mongo/db/client_out_of_line_executor.h" #include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" #include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_name.h" namespace mongo { namespace { diff --git a/src/mongo/db/client_strand.cpp b/src/mongo/db/client_strand.cpp index 22cec28346d25..58df0fa66ac10 100644 --- a/src/mongo/db/client_strand.cpp +++ b/src/mongo/db/client_strand.cpp @@ -28,12 +28,17 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/client_strand.h" +#include +#include "mongo/db/client_strand.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/concurrency/thread_name.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/db/client_strand.h b/src/mongo/db/client_strand.h index 334e29a7c154f..9167542d5cf12 100644 --- a/src/mongo/db/client_strand.h +++ b/src/mongo/db/client_strand.h @@ -28,13 +28,21 @@ */ #pragma once +#include +#include +#include +#include #include +#include +#include +#include "mongo/base/status.h" #include "mongo/db/client.h" #include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" #include "mongo/stdx/mutex.h" #include "mongo/util/concurrency/thread_name.h" +#include "mongo/util/functional.h" #include "mongo/util/intrusive_counter.h" #include "mongo/util/out_of_line_executor.h" diff --git a/src/mongo/db/client_strand_test.cpp b/src/mongo/db/client_strand_test.cpp index 168fbe5796166..52423c18e79fa 100644 --- a/src/mongo/db/client_strand_test.cpp +++ b/src/mongo/db/client_strand_test.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include +#include +#include +#include + +#include +#include "mongo/base/string_data.h" #include "mongo/db/client.h" #include "mongo/db/client_strand.h" #include "mongo/db/service_context_test_fixture.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_name.h" #include "mongo/util/executor_test_util.h" diff --git a/src/mongo/db/clientcursor.cpp b/src/mongo/db/clientcursor.cpp index db7a5253ef3e5..2c11bdf27d99b 100644 --- a/src/mongo/db/clientcursor.cpp +++ b/src/mongo/db/clientcursor.cpp @@ -29,29 +29,28 @@ #include "mongo/db/clientcursor.h" -#include +#include +#include +#include +#include +#include #include -#include -#include "mongo/db/audit.h" -#include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/external_data_source_scope_guard.h" #include "mongo/db/client.h" -#include "mongo/db/commands.h" -#include "mongo/db/commands/server_status.h" #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/curop.h" #include "mongo/db/cursor_manager.h" #include "mongo/db/cursor_server_params.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/query/explain.h" -#include "mongo/db/query/telemetry.h" -#include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/query_stats.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/util/background.h" +#include "mongo/util/clock_source.h" #include "mongo/util/concurrency/idle_thread_block.h" #include "mongo/util/exit.h" @@ -124,7 +123,9 @@ ClientCursor::ClientCursor(ClientCursorParams params, _planSummary(_exec->getPlanExplainer().getPlanSummary()), _planCacheKey(CurOp::get(operationUsingCursor)->debug().planCacheKey), _queryHash(CurOp::get(operationUsingCursor)->debug().queryHash), - _telemetryStoreKey(CurOp::get(operationUsingCursor)->debug().telemetryStoreKey), + _queryStatsStoreKeyHash(CurOp::get(operationUsingCursor)->debug().queryStatsStoreKeyHash), + _queryStatsKeyGenerator( + std::move(CurOp::get(operationUsingCursor)->debug().queryStatsKeyGenerator)), _shouldOmitDiagnosticInformation( CurOp::get(operationUsingCursor)->debug().shouldOmitDiagnosticInformation), _opKey(operationUsingCursor->getOperationKey()) { @@ -158,12 +159,13 @@ void ClientCursor::dispose(OperationContext* opCtx, boost::optional now) return; } - if (_telemetryStoreKey && opCtx) { - telemetry::writeTelemetry(opCtx, - _telemetryStoreKey, - getOriginatingCommandObj(), - _metrics.executionTime.value_or(Microseconds{0}).count(), - _metrics.nreturned.value_or(0)); + if (_queryStatsStoreKeyHash && opCtx) { + query_stats::writeQueryStats(opCtx, + _queryStatsStoreKeyHash, + std::move(_queryStatsKeyGenerator), + _metrics.executionTime.value_or(Microseconds{0}).count(), + _firstResponseExecutionTime.value_or(Microseconds{0}).count(), + _metrics.nreturned.value_or(0)); } if (now) { @@ -357,6 +359,13 @@ class ClientCursorMonitor : public BackgroundJob { void run() { ThreadClient tc("clientcursormon", getGlobalServiceContext()); + + // TODO(SERVER-74662): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + while (!globalInShutdownDeprecated()) { { const ServiceContext::UniqueOperationContext opCtx = cc().makeOperationContext(); @@ -386,29 +395,22 @@ void startClientCursorMonitor() { getClientCursorMonitor(getGlobalServiceContext()).go(); } -void collectTelemetryMongod(OperationContext* opCtx, - ClientCursorPin& pinnedCursor, - long long nreturned) { - auto curOp = CurOp::get(opCtx); - telemetry::collectMetricsOnOpDebug(curOp, nreturned); - pinnedCursor->incrementCursorMetrics(curOp->debug().additiveMetrics); +void collectQueryStatsMongod(OperationContext* opCtx, ClientCursorPin& pinnedCursor) { + pinnedCursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().additiveMetrics); } -void collectTelemetryMongod(OperationContext* opCtx, - const BSONObj& originatingCommand, - long long nreturned) { - auto curOp = CurOp::get(opCtx); - telemetry::collectMetricsOnOpDebug(curOp, nreturned); - +void collectQueryStatsMongod(OperationContext* opCtx, + std::unique_ptr keyGenerator) { // If we haven't registered a cursor to prepare for getMore requests, we record - // telemetry directly. - auto& opDebug = curOp->debug(); - telemetry::writeTelemetry( - opCtx, - opDebug.telemetryStoreKey, - originatingCommand, - opDebug.additiveMetrics.executionTime.value_or(Microseconds{0}).count(), - opDebug.additiveMetrics.nreturned.value_or(0)); + // query stats directly. + auto& opDebug = CurOp::get(opCtx)->debug(); + int64_t execTime = opDebug.additiveMetrics.executionTime.value_or(Microseconds{0}).count(); + query_stats::writeQueryStats(opCtx, + opDebug.queryStatsStoreKeyHash, + std::move(keyGenerator), + execTime, + execTime, + opDebug.additiveMetrics.nreturned.value_or(0)); } } // namespace mongo diff --git a/src/mongo/db/clientcursor.h b/src/mongo/db/clientcursor.h index 11e7b604ba0e3..e5d8b5e6a83c6 100644 --- a/src/mongo/db/clientcursor.h +++ b/src/mongo/db/clientcursor.h @@ -29,21 +29,49 @@ #pragma once -#include "mongo/bson/bsonobj.h" +#include #include +#include +#include +#include #include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/read_preference.h" #include "mongo/db/api_parameters.h" #include "mongo/db/auth/privilege.h" #include "mongo/db/auth/user_name.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" #include "mongo/db/cursor_id.h" +#include "mongo/db/generic_cursor_gen.h" #include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/query_stats_key_generator.h" +#include "mongo/db/query/tailable_mode_gen.h" #include "mongo/db/record_id.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -209,6 +237,9 @@ class ClientCursor : public Decorable { void incrementCursorMetrics(OpDebug::AdditiveMetrics newMetrics) { _metrics.add(newMetrics); + if (!_firstResponseExecutionTime) { + _firstResponseExecutionTime = _metrics.executionTime; + } } /** @@ -447,12 +478,13 @@ class ClientCursor : public Decorable { boost::optional _planCacheKey; boost::optional _queryHash; - // The shape of the original query serialized with readConcern, application name, and namespace. - // If boost::none, telemetry should not be collected for this cursor. - boost::optional _telemetryStoreKey; + // If boost::none, query stats should not be collected for this cursor. + boost::optional _queryStatsStoreKeyHash; // Metrics that are accumulated over the lifetime of the cursor, incremented with each getMore. - // Useful for diagnostics like telemetry. + // Useful for diagnostics like queryStats. OpDebug::AdditiveMetrics _metrics; + // The KeyGenerator used by query stats to generate the query stats store key. + std::unique_ptr _queryStatsKeyGenerator; // Flag to decide if diagnostic information should be omitted. bool _shouldOmitDiagnosticInformation{false}; @@ -462,6 +494,9 @@ class ClientCursor : public Decorable { // Flag indicating that a client has requested to kill the cursor. bool _killPending = false; + + // The execution time collected from the initial operation prior to any getMore requests. + boost::optional _firstResponseExecutionTime; }; /** @@ -586,12 +621,15 @@ void startClientCursorMonitor(); /** * Records certain metrics for the current operation on OpDebug and aggregates those metrics for - * telemetry use. If a cursor pin is provided, metrics are aggregated on the cursor; otherwise, - * metrics are written directly to the telemetry store. + * query stats use. If a cursor pin is provided, metrics are aggregated on the cursor; otherwise, + * metrics are written directly to the query stats store. + * NOTE: Metrics are taken from opDebug.additiveMetrics, so CurOp::setEndOfOpMetrics must be called + * *prior* to calling these. + * + * Currently, query stats are only collected for find and aggregate requests (and their subsequent + * getMore requests), so these should only be called from those request paths. */ -void collectTelemetryMongod(OperationContext* opCtx, ClientCursorPin& cursor, long long nreturned); -void collectTelemetryMongod(OperationContext* opCtx, - const BSONObj& originatingCommand, - long long nreturned); - +void collectQueryStatsMongod(OperationContext* opCtx, ClientCursorPin& cursor); +void collectQueryStatsMongod(OperationContext* opCtx, + std::unique_ptr keyGenerator); } // namespace mongo diff --git a/src/mongo/db/cloner.cpp b/src/mongo/db/cloner.cpp index 682208b774e12..fef41e423b051 100644 --- a/src/mongo/db/cloner.cpp +++ b/src/mongo/db/cloner.cpp @@ -28,41 +28,75 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/cloner.h" - #include - +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/bson/util/builder.h" -#include "mongo/client/authenticate.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/client/internal_auth.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/cloner.h" #include "mongo/db/cloner_gen.h" -#include "mongo/db/commands.h" #include "mongo/db/commands/list_collections_filter.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/database_name.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/ops/insert.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/isself.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/service_context.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/debug_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -75,7 +109,7 @@ MONGO_FAIL_POINT_DEFINE(movePrimaryFailPoint); BSONElement getErrField(const BSONObj& o); -BSONObj Cloner::_getIdIndexSpec(const std::list& indexSpecs) { +BSONObj DefaultClonerImpl::_getIdIndexSpec(const std::list& indexSpecs) { for (auto&& indexSpec : indexSpecs) { BSONElement indexName; uassertStatusOK(bsonExtractTypedField( @@ -87,19 +121,28 @@ BSONObj Cloner::_getIdIndexSpec(const std::list& indexSpecs) { return BSONObj(); } -Cloner::Cloner() {} - -struct Cloner::BatchHandler { +struct DefaultClonerImpl::BatchHandler { BatchHandler(OperationContext* opCtx, const std::string& dbName) : lastLog(0), opCtx(opCtx), _dbName(dbName), numSeen(0), saveLast(0) {} void operator()(DBClientCursor& cursor) { + const auto acquireCollectionFn = [&](OperationContext* opCtx) -> CollectionAcquisition { + return acquireCollection( + opCtx, + CollectionAcquisitionRequest(nss, + AcquisitionPrerequisites::kPretendUnsharded, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + }; + boost::optional dbLock; // TODO SERVER-63111 Once the Cloner holds a DatabaseName obj, use _dbName directly - DatabaseName dbName(boost::none, _dbName); + DatabaseName dbName = DatabaseNameUtil::deserialize(boost::none, _dbName); dbLock.emplace(opCtx, dbName, MODE_X); uassert(ErrorCodes::NotWritablePrimary, - str::stream() << "Not primary while cloning collection " << nss, + str::stream() << "Not primary while cloning collection " + << nss.toStringForErrorMsg(), !opCtx->writesAreReplicated() || repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)); @@ -107,9 +150,9 @@ struct Cloner::BatchHandler { auto databaseHolder = DatabaseHolder::get(opCtx); auto db = databaseHolder->openDb(opCtx, dbName); auto catalog = CollectionCatalog::get(opCtx); - auto collection = catalog->lookupCollectionByNamespace(opCtx, nss); - if (!collection) { - writeConflictRetry(opCtx, "createCollection", nss.ns(), [&] { + boost::optional collection = acquireCollectionFn(opCtx); + if (!collection->exists()) { + writeConflictRetry(opCtx, "createCollection", nss, [&] { opCtx->checkForInterrupt(); WriteUnitOfWork wunit(opCtx); @@ -118,13 +161,14 @@ struct Cloner::BatchHandler { from_options, CollectionOptions::ParseKind::parseForCommand)); invariant(db->userCreateNS( opCtx, nss, collectionOptions, createDefaultIndexes, from_id_index), - str::stream() - << "collection creation failed during clone [" << nss << "]"); + str::stream() << "collection creation failed during clone [" + << nss.toStringForErrorMsg() << "]"); wunit.commit(); - collection = catalog->lookupCollectionByNamespace(opCtx, nss); - invariant(collection, - str::stream() << "Missing collection during clone [" << nss << "]"); }); + collection.emplace(acquireCollectionFn(opCtx)); + invariant(collection->exists(), + str::stream() << "Missing collection during clone [" + << nss.toStringForErrorMsg() << "]"); } while (cursor.moreInCurrentBatch()) { @@ -138,20 +182,22 @@ struct Cloner::BatchHandler { } opCtx->checkForInterrupt(); + collection.reset(); dbLock.reset(); CurOp::get(opCtx)->yielded(); // TODO SERVER-63111 Once the cloner takes in a DatabaseName obj, use _dbName // directly - DatabaseName dbName(boost::none, _dbName); + DatabaseName dbName = DatabaseNameUtil::deserialize(boost::none, _dbName); dbLock.emplace(opCtx, dbName, MODE_X); // Check if everything is still all right. if (opCtx->writesAreReplicated()) { uassert( ErrorCodes::PrimarySteppedDown, - str::stream() << "Cannot write to ns: " << nss << " after yielding", + str::stream() << "Cannot write to ns: " << nss.toStringForErrorMsg() + << " after yielding", repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)); } @@ -160,10 +206,11 @@ struct Cloner::BatchHandler { str::stream() << "Database " << _dbName << " dropped while cloning", db != nullptr); - collection = catalog->lookupCollectionByNamespace(opCtx, nss); + collection.emplace(acquireCollectionFn(opCtx)); uassert(28594, - str::stream() << "Collection " << nss << " dropped while cloning", - collection); + str::stream() << "Collection " << nss.toStringForErrorMsg() + << " dropped while cloning", + collection->exists()); } BSONObj tmp = cursor.nextSafe(); @@ -180,21 +227,22 @@ struct Cloner::BatchHandler { continue; } str::stream ss; - ss << "Cloner: found corrupt document in " << nss << ": " << redact(status); + ss << "Cloner: found corrupt document in " << nss.toStringForErrorMsg() << ": " + << redact(status); msgasserted(28531, ss); } - verify(collection); + MONGO_verify(collection); ++numSeen; - writeConflictRetry(opCtx, "cloner insert", nss.ns(), [&] { + writeConflictRetry(opCtx, "cloner insert", nss, [&] { opCtx->checkForInterrupt(); WriteUnitOfWork wunit(opCtx); BSONObj doc = tmp; Status status = collection_internal::insertDocument(opCtx, - CollectionPtr(collection), + collection->getCollectionPtr(), InsertStatement(doc), nullptr /* OpDebug */, true); @@ -237,11 +285,11 @@ struct Cloner::BatchHandler { /** * Copy the specified collection. */ -void Cloner::_copy(OperationContext* opCtx, - const std::string& toDBName, - const NamespaceString& nss, - const BSONObj& from_opts, - const BSONObj& from_id_index) { +void DefaultClonerImpl::_copy(OperationContext* opCtx, + const std::string& toDBName, + const NamespaceString& nss, + const BSONObj& from_opts, + const BSONObj& from_id_index) { LOGV2_DEBUG(20414, 2, "\t\tcloning collection", @@ -267,11 +315,11 @@ void Cloner::_copy(OperationContext* opCtx, } } -void Cloner::_copyIndexes(OperationContext* opCtx, - const std::string& toDBName, - const NamespaceString& nss, - const BSONObj& from_opts, - const std::list& from_indexes) { +void DefaultClonerImpl::_copyIndexes(OperationContext* opCtx, + const std::string& toDBName, + const NamespaceString& nss, + const BSONObj& from_opts, + const std::list& from_indexes) { LOGV2_DEBUG(20415, 2, "\t\t copyIndexes", @@ -279,7 +327,8 @@ void Cloner::_copyIndexes(OperationContext* opCtx, "conn_getServerAddress"_attr = getConn()->getServerAddress()); uassert(ErrorCodes::PrimarySteppedDown, - str::stream() << "Not primary while copying indexes from " << nss << " (Cloner)", + str::stream() << "Not primary while copying indexes from " << nss.toStringForErrorMsg() + << " (Cloner)", !opCtx->writesAreReplicated() || repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)); @@ -287,7 +336,8 @@ void Cloner::_copyIndexes(OperationContext* opCtx, return; CollectionWriter collection(opCtx, nss); - invariant(collection, str::stream() << "Missing collection " << nss << " (Cloner)"); + invariant(collection, + str::stream() << "Missing collection " << nss.toStringForErrorMsg() << " (Cloner)"); auto indexCatalog = collection->getIndexCatalog(); auto indexesToBuild = indexCatalog->removeExistingIndexesNoChecks( @@ -297,7 +347,7 @@ void Cloner::_copyIndexes(OperationContext* opCtx, } auto fromMigrate = false; - writeConflictRetry(opCtx, "_copyIndexes", nss.ns(), [&] { + writeConflictRetry(opCtx, "_copyIndexes", nss, [&] { WriteUnitOfWork wunit(opCtx); IndexBuildsCoordinator::get(opCtx)->createIndexesOnEmptyCollection( opCtx, collection, indexesToBuild, fromMigrate); @@ -305,7 +355,7 @@ void Cloner::_copyIndexes(OperationContext* opCtx, }); } -StatusWith> Cloner::_filterCollectionsForClone( +StatusWith> DefaultClonerImpl::_filterCollectionsForClone( const std::string& fromDBName, const std::list& initialCollections) { std::vector finalCollections; for (auto&& collection : initialCollections) { @@ -339,12 +389,12 @@ StatusWith> Cloner::_filterCollectionsForClone( return finalCollections; } -Status Cloner::_createCollectionsForDb( +Status DefaultClonerImpl::_createCollectionsForDb( OperationContext* opCtx, const std::vector& createCollectionParams, const std::string& dbName) { auto databaseHolder = DatabaseHolder::get(opCtx); - const DatabaseName tenantDbName(boost::none, dbName); + const DatabaseName tenantDbName = DatabaseNameUtil::deserialize(boost::none, dbName); auto db = databaseHolder->openDb(opCtx, tenantDbName); invariant(opCtx->lockState()->isDbLockedForMode(tenantDbName, MODE_X)); @@ -362,7 +412,7 @@ Status Cloner::_createCollectionsForDb( const NamespaceString nss(dbName, params.collectionName); uassertStatusOK(userAllowedCreateNS(opCtx, nss)); - Status status = writeConflictRetry(opCtx, "createCollection", nss.ns(), [&] { + Status status = writeConflictRetry(opCtx, "createCollection", nss, [&] { opCtx->checkForInterrupt(); WriteUnitOfWork wunit(opCtx); @@ -373,7 +423,7 @@ Status Cloner::_createCollectionsForDb( // we're trying to create already exists. return Status(ErrorCodes::NamespaceExists, str::stream() << "unsharded collection with same namespace " - << nss.ns() << " already exists."); + << nss.toStringForErrorMsg() << " already exists."); } // If the collection is sharded and a collection with the same name already @@ -389,7 +439,8 @@ Status Cloner::_createCollectionsForDb( return Status(ErrorCodes::InvalidOptions, str::stream() - << "sharded collection with same namespace " << nss.ns() + << "sharded collection with same namespace " + << nss.toStringForErrorMsg() << " already exists, but UUIDs don't match. Existing UUID is " << existingOpts.uuid << " and new UUID is " << clonedUUID); } @@ -412,7 +463,8 @@ Status Cloner::_createCollectionsForDb( { OperationShardingState::ScopedAllowImplicitCollectionCreate_UNSAFE - unsafeCreateCollection(opCtx); + unsafeCreateCollection(opCtx, + /* forceCSRAsUnknownAfterCollectionCreation */ true); Status createStatus = db->userCreateNS( opCtx, nss, collectionOptions, createDefaultIndexes, params.idIndexSpec); if (!createStatus.isOK()) { @@ -433,9 +485,9 @@ Status Cloner::_createCollectionsForDb( return Status::OK(); } -Status Cloner::setupConn(OperationContext* opCtx, - const std::string& dBName, - const std::string& masterHost) { +Status DefaultClonerImpl::setupConn(OperationContext* opCtx, + const std::string& dBName, + const std::string& masterHost) { invariant(!_conn); invariant(!opCtx->lockState()->isLocked()); auto statusWithMasterHost = ConnectionString::parse(masterHost); @@ -479,9 +531,8 @@ Status Cloner::setupConn(OperationContext* opCtx, return Status::OK(); } -StatusWith> Cloner::getListOfCollections(OperationContext* opCtx, - const std::string& dBName, - const std::string& masterHost) { +StatusWith> DefaultClonerImpl::getListOfCollections( + OperationContext* opCtx, const std::string& dBName, const std::string& masterHost) { invariant(!opCtx->lockState()->isLocked()); std::vector collsToClone; if (!_conn) { @@ -492,16 +543,17 @@ StatusWith> Cloner::getListOfCollections(OperationContext* } // Gather the list of collections to clone // TODO SERVER-63111 Once the cloner takes in a DatabaseName obj, use dBName directly - std::list initialCollections = getConn()->getCollectionInfos( - DatabaseName(boost::none, dBName), ListCollectionsFilter::makeTypeCollectionFilter()); + std::list initialCollections = + getConn()->getCollectionInfos(DatabaseNameUtil::deserialize(boost::none, dBName), + ListCollectionsFilter::makeTypeCollectionFilter()); return _filterCollectionsForClone(dBName, initialCollections); } -Status Cloner::copyDb(OperationContext* opCtx, - const std::string& dBName, - const std::string& masterHost, - const std::vector& shardedColls, - std::set* clonedColls) { +Status DefaultClonerImpl::copyDb(OperationContext* opCtx, + const std::string& dBName, + const std::string& masterHost, + const std::vector& shardedColls, + std::set* clonedColls) { invariant(clonedColls && clonedColls->empty(), str::stream() << masterHost << ":" << dBName); // This function can potentially block for a long time on network activity, so holding of locks // is disallowed. @@ -546,14 +598,14 @@ Status Cloner::copyDb(OperationContext* opCtx, { // TODO SERVER-63111 Once the cloner takes in a DatabaseName obj, use dBName directly - DatabaseName dbName(boost::none, dBName); + DatabaseName dbName = DatabaseNameUtil::deserialize(boost::none, dBName); Lock::DBLock dbXLock(opCtx, dbName, MODE_X); uassert(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while cloning database " << dBName << " (after getting list of collections to clone)", !opCtx->writesAreReplicated() || repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx, - dBName)); + dbName)); auto status = _createCollectionsForDb(opCtx, createCollectionParams, dBName); if (!status.isOK()) { @@ -597,7 +649,7 @@ Status Cloner::copyDb(OperationContext* opCtx, const NamespaceString nss(dBName, params.collectionName); - clonedColls->insert(nss.ns()); + clonedColls->insert(nss.ns().toString()); LOGV2_DEBUG(20421, 1, "\t\t cloning", logAttrs(nss), "host"_attr = masterHost); @@ -607,4 +659,20 @@ Status Cloner::copyDb(OperationContext* opCtx, return Status::OK(); } +Cloner::Cloner() : Cloner(std::make_unique()) {} + +Status Cloner::copyDb(OperationContext* opCtx, + const std::string& dBName, + const std::string& masterHost, + const std::vector& shardedColls, + std::set* clonedColls) { + return _clonerImpl->copyDb(opCtx, dBName, masterHost, shardedColls, clonedColls); +} + +StatusWith> Cloner::getListOfCollections(OperationContext* opCtx, + const std::string& dBName, + const std::string& masterHost) { + return _clonerImpl->getListOfCollections(opCtx, dBName, masterHost); +} + } // namespace mongo diff --git a/src/mongo/db/cloner.h b/src/mongo/db/cloner.h index 761da5a052cf7..692e7b98c10db 100644 --- a/src/mongo/db/cloner.h +++ b/src/mongo/db/cloner.h @@ -33,11 +33,20 @@ * copy a database (export/import basically) */ +#include +#include +#include #include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/connpool.h" #include "mongo/client/dbclient_base.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" namespace mongo { @@ -45,15 +54,25 @@ class DBClientBase; class NamespaceString; class OperationContext; -// TODO SERVER-75657: Create an interface for the Cloner. +class ClonerImpl { +public: + virtual ~ClonerImpl() = default; + virtual Status copyDb(OperationContext* opCtx, + const std::string& dBName, + const std::string& masterHost, + const std::vector& shardedColls, + std::set* clonedColls) = 0; -class Cloner { - Cloner(const Cloner&) = delete; - Cloner& operator=(const Cloner&) = delete; + virtual Status setupConn(OperationContext* opCtx, + const std::string& dBName, + const std::string& masterHost) = 0; + + virtual StatusWith> getListOfCollections( + OperationContext* opCtx, const std::string& dBName, const std::string& masterHost) = 0; +}; +class DefaultClonerImpl : public ClonerImpl { public: - Cloner(); - virtual ~Cloner() {} /** * Copies an entire database from the specified host. * clonedColls: the function will return with this populated with a list of the collections that @@ -62,21 +81,21 @@ class Cloner { * that are cloned. When opts.createCollections is true, this parameter is * ignored and the collection list is fetched from the remote via _conn. */ - virtual Status copyDb(OperationContext* opCtx, - const std::string& dBName, - const std::string& masterHost, - const std::vector& shardedColls, - std::set* clonedColls); + Status copyDb(OperationContext* opCtx, + const std::string& dBName, + const std::string& masterHost, + const std::vector& shardedColls, + std::set* clonedColls) override; - virtual Status setupConn(OperationContext* opCtx, - const std::string& dBName, - const std::string& masterHost); + Status setupConn(OperationContext* opCtx, + const std::string& dBName, + const std::string& masterHost) override; - virtual StatusWith> getListOfCollections(OperationContext* opCtx, - const std::string& dBName, - const std::string& masterHost); + StatusWith> getListOfCollections(OperationContext* opCtx, + const std::string& dBName, + const std::string& masterHost) override; -protected: +private: std::unique_ptr _conn; // Filters a database's collection list and removes collections that should not be cloned. @@ -121,4 +140,29 @@ class Cloner { } }; +class Cloner { + +public: + Cloner(std::unique_ptr clonerImpl) : _clonerImpl(std::move(clonerImpl)) {} + + Cloner(); + + Cloner(const Cloner&) = delete; + + Cloner& operator=(const Cloner&) = delete; + + Status copyDb(OperationContext* opCtx, + const std::string& dBName, + const std::string& masterHost, + const std::vector& shardedColls, + std::set* clonedColls); + + StatusWith> getListOfCollections(OperationContext* opCtx, + const std::string& dBName, + const std::string& masterHost); + +private: + std::unique_ptr _clonerImpl; +}; + } // namespace mongo diff --git a/src/mongo/db/cluster_role.cpp b/src/mongo/db/cluster_role.cpp index 4095378729246..df3954a1326cc 100644 --- a/src/mongo/db/cluster_role.cpp +++ b/src/mongo/db/cluster_role.cpp @@ -28,27 +28,36 @@ */ #include "mongo/db/cluster_role.h" -#include "mongo/db/catalog_shard_feature_flag_gen.h" -#include "mongo/db/feature_flag.h" + +#include "mongo/util/assert_util.h" namespace mongo { -bool ClusterRole::has(const ClusterRole& other) const { - if (gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafeAtStartup() && - _value == ClusterRole::ConfigServer) { - return other._value == ClusterRole::ConfigServer || - other._value == ClusterRole::ShardServer; +ClusterRole::ClusterRole(Value role) : _roleMask(role) {} + +ClusterRole::ClusterRole(std::initializer_list roles) : _roleMask(None) { + for (const auto role : roles) { + _roleMask |= role; } + invariant(!hasExclusively(ClusterRole::ConfigServer), + "Role cannot be set to config server only"); +} - return _value == other._value; +ClusterRole& ClusterRole::operator=(const ClusterRole& rhs) { + if (this != &rhs) { + _roleMask = rhs._roleMask; + } + invariant(!hasExclusively(ClusterRole::ConfigServer), + "Role cannot be set to config server only"); + return *this; } -bool ClusterRole::exclusivelyHasShardRole() { - return _value == ClusterRole::ShardServer; +bool ClusterRole::has(const ClusterRole& role) const { + return role._roleMask == None ? _roleMask == None : _roleMask & role._roleMask; } -bool ClusterRole::exclusivelyHasConfigRole() { - return _value == ClusterRole::ConfigServer && - !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafeAtStartup(); +bool ClusterRole::hasExclusively(const ClusterRole& role) const { + return _roleMask == role._roleMask; } + } // namespace mongo diff --git a/src/mongo/db/cluster_role.h b/src/mongo/db/cluster_role.h index 343d8c1d48d88..552de9421adf1 100644 --- a/src/mongo/db/cluster_role.h +++ b/src/mongo/db/cluster_role.h @@ -29,39 +29,58 @@ #pragma once +#include +#include + namespace mongo { + /** - * ClusterRole is not mutually exclusive when featureFlagCatalogShard is true. In this mode, a - * config server cluster role is also a shard server cluster role. + * Represents the role this node plays in a sharded cluster, based on its startup arguments. Roles + * are not mutually exclusive since a node can play different roles at the same time. */ class ClusterRole { public: - enum Value { - None, - ShardServer, - ConfigServer, - }; + enum Value : uint8_t { + /** + * The node is not part of a sharded cluster. + */ + None = 0x00, - ClusterRole(Value v = ClusterRole::None) : _value(v) {} + /** + * The node acts as a shard server (the process was started with --shardsvr argument.) This + * is implicitly set when the node is configured to act as a config server (the process was + * started with --configsvr argument). + */ + ShardServer = 0x01, - ClusterRole& operator=(const ClusterRole& rhs) { - if (this != &rhs) { - _value = rhs._value; - } - return *this; - } + /** + * The node acts as a config server (the process was started with --configsvr argument). + */ + ConfigServer = 0x02, - bool has(const ClusterRole& other) const; + /** + * The node acts as a router server (the process was started with --router argument). + */ + RouterServer = 0x04 + }; + + ClusterRole(Value role = ClusterRole::None); + ClusterRole(std::initializer_list roles); + ClusterRole& operator=(const ClusterRole& rhs); - // Returns true if this mongod was started with --shardsvr, false otherwise. - bool exclusivelyHasShardRole(); + /** + * Returns `true` if this node plays the given role, `false` otherwise. Even if the node plays + * the given role, it is not excluded that it also plays others. + */ + bool has(const ClusterRole& role) const; - // Returns true if this mongod was started with --configsvr in a non-catalog shard topology, - // false otherwise. - // TODO SERVER-75391: Remove. - bool exclusivelyHasConfigRole(); + /** + * Returns `true` if this node plays only the given role, `false` otherwise. + */ + bool hasExclusively(const ClusterRole& role) const; private: - Value _value; + uint8_t _roleMask; }; + } // namespace mongo diff --git a/src/mongo/db/cluster_transaction_api.cpp b/src/mongo/db/cluster_transaction_api.cpp index befd2e093137c..77e2fc5198f5a 100644 --- a/src/mongo/db/cluster_transaction_api.cpp +++ b/src/mongo/db/cluster_transaction_api.cpp @@ -30,12 +30,18 @@ #include "mongo/db/cluster_transaction_api.h" #include +#include -#include "mongo/executor/task_executor.h" -#include "mongo/rpc/factory.h" -#include "mongo/rpc/op_msg_rpc_impls.h" -#include "mongo/rpc/reply_interface.h" -#include "mongo/stdx/future.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/s/service_entry_point_mongos.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/string_map.h" namespace mongo::txn_api::details { @@ -44,6 +50,7 @@ namespace { StringMap clusterCommandTranslations = { {"abortTransaction", "clusterAbortTransaction"}, {"aggregate", "clusterAggregate"}, + {"bulkWrite", "clusterBulkWrite"}, {"commitTransaction", "clusterCommitTransaction"}, {"delete", "clusterDelete"}, {"find", "clusterFind"}, diff --git a/src/mongo/db/cluster_transaction_api.h b/src/mongo/db/cluster_transaction_api.h index 6a15cc706ac48..1470b9712cba9 100644 --- a/src/mongo/db/cluster_transaction_api.h +++ b/src/mongo/db/cluster_transaction_api.h @@ -29,9 +29,14 @@ #pragma once +#include "mongo/bson/bsonobj.h" +#include "mongo/db/dbmessage.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/transaction/transaction_api.h" +#include "mongo/rpc/message.h" #include "mongo/s/service_entry_point_mongos.h" +#include "mongo/util/future.h" namespace mongo::txn_api::details { diff --git a/src/mongo/db/coll_mod_reply_validation.cpp b/src/mongo/db/coll_mod_reply_validation.cpp index c760eb40c8232..2524a1202a551 100644 --- a/src/mongo/db/coll_mod_reply_validation.cpp +++ b/src/mongo/db/coll_mod_reply_validation.cpp @@ -29,6 +29,12 @@ #include "mongo/db/coll_mod_reply_validation.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + namespace mongo::coll_mod_reply_validation { void validateReply(const CollModReply& reply) { auto hidden_new = reply.getHidden_new().has_value(); diff --git a/src/mongo/db/collection_index_usage_tracker.cpp b/src/mongo/db/collection_index_usage_tracker.cpp index d10f728ad16c5..1681a424b02ef 100644 --- a/src/mongo/db/collection_index_usage_tracker.cpp +++ b/src/mongo/db/collection_index_usage_tracker.cpp @@ -30,10 +30,14 @@ #include "mongo/db/collection_index_usage_tracker.h" -#include +#include +#include +#include +#include + +#include #include "mongo/db/commands/server_status_metric.h" -#include "mongo/db/index/index_descriptor.h" #include "mongo/util/assert_util.h" #include "mongo/util/clock_source.h" diff --git a/src/mongo/db/collection_index_usage_tracker.h b/src/mongo/db/collection_index_usage_tracker.h index aa69ce8b6382c..207caff982e7e 100644 --- a/src/mongo/db/collection_index_usage_tracker.h +++ b/src/mongo/db/collection_index_usage_tracker.h @@ -30,6 +30,8 @@ #pragma once #include +#include +#include #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" diff --git a/src/mongo/db/collection_index_usage_tracker_test.cpp b/src/mongo/db/collection_index_usage_tracker_test.cpp index fb4252588f7b6..68bd31098e647 100644 --- a/src/mongo/db/collection_index_usage_tracker_test.cpp +++ b/src/mongo/db/collection_index_usage_tracker_test.cpp @@ -27,13 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/collection_index_usage_tracker.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" namespace mongo { namespace { diff --git a/src/mongo/db/command_can_run_here.cpp b/src/mongo/db/command_can_run_here.cpp index 37089aefa68eb..5aa42c5088652 100644 --- a/src/mongo/db/command_can_run_here.cpp +++ b/src/mongo/db/command_can_run_here.cpp @@ -27,22 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/command_can_run_here.h" - #include "mongo/client/read_preference.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { bool commandCanRunHere(OperationContext* opCtx, - const std::string& dbname, + const DatabaseName& dbName, const Command* command, bool inMultiDocumentTransaction) { auto replCoord = repl::ReplicationCoordinator::get(opCtx); - if (replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbname)) + if (replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbName)) return true; // primary: always ok if (!opCtx->writesAreReplicated()) return true; // standalone: always ok diff --git a/src/mongo/db/command_can_run_here.h b/src/mongo/db/command_can_run_here.h index 204b246ef887f..98d432a3d3e29 100644 --- a/src/mongo/db/command_can_run_here.h +++ b/src/mongo/db/command_can_run_here.h @@ -32,12 +32,13 @@ #include #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" namespace mongo { bool commandCanRunHere(OperationContext* opCtx, - const std::string& dbname, + const DatabaseName& dbName, const Command* command, bool inMultiDocumentTransaction); diff --git a/src/mongo/db/commands.cpp b/src/mongo/db/commands.cpp index c27d252d1b730..f2e1869fe3db5 100644 --- a/src/mongo/db/commands.cpp +++ b/src/mongo/db/commands.cpp @@ -29,39 +29,52 @@ #include "mongo/db/commands.h" +#include +#include +#include +#include +#include #include #include +#include +#include + +#include "mongo/base/error_extra_info.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/algorithm.h" #include "mongo/bson/mutable/document.h" -#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/audit.h" -#include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/client.h" #include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/curop.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/error_labels.h" -#include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/read_write_concern_defaults.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/command_generic_argument.h" #include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/factory.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/metadata/client_metadata.h" #include "mongo/rpc/op_msg_rpc_impls.h" #include "mongo/rpc/rewrite_state_change_errors.h" #include "mongo/rpc/write_concern_error_detail.h" -#include "mongo/s/stale_exception.h" +#include "mongo/transport/session.h" #include "mongo/util/assert_util.h" #include "mongo/util/database_name_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/safe_num.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -278,9 +291,9 @@ std::string CommandHelpers::parseNsFullyQualified(const BSONObj& cmdObj) { first.canonicalType() == canonicalizeBSONType(mongo::String)); const NamespaceString nss(first.valueStringData()); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid namespace specified '" << nss.ns() << "'", + str::stream() << "Invalid namespace specified '" << nss.toStringForErrorMsg() << "'", nss.isValid()); - return nss.ns(); + return nss.ns().toString(); } NamespaceString CommandHelpers::parseNsCollectionRequired(const DatabaseName& dbName, @@ -297,7 +310,7 @@ NamespaceString CommandHelpers::parseNsCollectionRequired(const DatabaseName& db uassert(ErrorCodes::InvalidNamespace, str::stream() << "collection name has invalid type " << typeName(first.type()), first.canonicalType() == canonicalizeBSONType(mongo::String)); - const NamespaceString nss( + NamespaceString nss( NamespaceStringUtil::parseNamespaceFromRequest(dbName, first.valueStringData())); uassert(ErrorCodes::InvalidNamespace, str::stream() << "Invalid namespace specified '" << nss.toStringForErrorMsg() << "'", @@ -314,14 +327,16 @@ NamespaceStringOrUUID CommandHelpers::parseNsOrUUID(const DatabaseName& dbName, // Ensure collection identifier is not a Command const NamespaceString nss(parseNsCollectionRequired(dbName, cmdObj)); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid collection name specified '" << nss.ns(), + str::stream() << "Invalid collection name specified '" << nss.toStringForErrorMsg(), !(nss.ns().find('$') != std::string::npos && nss.ns() != "local.oplog.$main")); return nss; } } std::string CommandHelpers::parseNsFromCommand(StringData dbname, const BSONObj& cmdObj) { - return parseNsFromCommand({boost::none, dbname}, cmdObj).ns(); + return parseNsFromCommand(DatabaseNameUtil::deserialize(boost::none, dbname), cmdObj) + .ns() + .toString(); } NamespaceString CommandHelpers::parseNsFromCommand(const DatabaseName& dbName, @@ -333,11 +348,12 @@ NamespaceString CommandHelpers::parseNsFromCommand(const DatabaseName& dbName, cmdObj.firstElement().valueStringData()); } -ResourcePattern CommandHelpers::resourcePatternForNamespace(const std::string& ns) { +ResourcePattern CommandHelpers::resourcePatternForNamespace(const NamespaceString& ns) { if (!NamespaceString::validCollectionComponent(ns)) { - return ResourcePattern::forDatabaseName(ns); + const auto nss = NamespaceStringUtil::serialize(ns); + return ResourcePattern::forDatabaseName(ns.dbName()); } - return ResourcePattern::forExactNamespace(NamespaceString(ns)); + return ResourcePattern::forExactNamespace(ns); } Command* CommandHelpers::findCommand(StringData name) { @@ -570,10 +586,10 @@ void CommandHelpers::canUseTransactions(const NamespaceString& nss, uassert(ErrorCodes::OperationNotSupportedInTransaction, str::stream() << "Cannot run command against the '" << dbName.toStringForErrorMsg() << "' database in a transaction.", - dbName.db() != DatabaseName::kLocal.db()); + !dbName.isLocalDB()); uassert(ErrorCodes::OperationNotSupportedInTransaction, - str::stream() << "Cannot run command against the '" << nss + str::stream() << "Cannot run command against the '" << nss.toStringForErrorMsg() << "' collection in a transaction.", !nss.isSystemDotProfile()); @@ -585,7 +601,7 @@ void CommandHelpers::canUseTransactions(const NamespaceString& nss, } else { uassert(ErrorCodes::OperationNotSupportedInTransaction, "Cannot run command against the config database in a transaction.", - dbName.db() != DatabaseName::kConfig.db()); + !dbName.isConfigDB()); } } @@ -640,8 +656,11 @@ bool CommandHelpers::shouldActivateFailCommandFailPoint(const BSONObj& data, return false; // only activate failpoint on connection with a certain appName } - if (data.hasField("namespace") && (nss != NamespaceString(data.getStringField("namespace")))) { - return false; + if (data.hasField("namespace")) { + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "namespace"_sd); + if (nss != fpNss) { + return false; + } } if (!(data.hasField("failInternalCommands") && data.getBoolField("failInternalCommands")) && @@ -1042,7 +1061,7 @@ bool ErrmsgCommandDeprecated::run(OperationContext* opCtx, const BSONObj& cmdObj, BSONObjBuilder& result) { std::string errmsg; - auto ok = errmsgRun(opCtx, dbName.toStringWithTenantId(), cmdObj, errmsg, result); + auto ok = errmsgRun(opCtx, DatabaseNameUtil::serialize(dbName), cmdObj, errmsg, result); if (!errmsg.empty()) { CommandHelpers::appendSimpleCommandStatus(result, ok, errmsg); } diff --git a/src/mongo/db/commands.h b/src/mongo/db/commands.h index d06b9a1280506..6f861173806e9 100644 --- a/src/mongo/db/commands.h +++ b/src/mongo/db/commands.h @@ -29,34 +29,60 @@ #pragma once +#include #include +#include +#include #include #include +#include +#include #include +#include +#include #include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/element.h" #include "mongo/db/api_parameters.h" #include "mongo/db/auth/privilege.h" #include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/client.h" #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/database_name.h" #include "mongo/db/jsobj.h" #include "mongo/db/multitenancy_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/explain_options.h" +#include "mongo/db/query/explain_verbosity_gen.h" #include "mongo/db/read_concern_support_result.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/request_execution_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" #include "mongo/rpc/reply_builder_interface.h" #include "mongo/transport/service_executor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" #include "mongo/util/future.h" +#include "mongo/util/str.h" #include "mongo/util/string_map.h" namespace mongo { @@ -69,6 +95,7 @@ extern const std::set kApiVersions1; class AuthorizationContract; class Command; + class CommandInvocation; class OperationContext; @@ -165,7 +192,7 @@ struct CommandHelpers { * pattern or a database resource pattern, depending on whether parseNs returns a fully qualifed * collection name or just a database name. */ - static ResourcePattern resourcePatternForNamespace(const std::string& ns); + static ResourcePattern resourcePatternForNamespace(const NamespaceString& ns); static Command* findCommand(StringData name); @@ -659,6 +686,14 @@ class Command { return false; } + /** + * Override to true if this command should be allowed on a direct shard connection regardless + * of the directShardOperations ActionType. + */ + virtual bool shouldSkipDirectConnectionChecks() const { + return false; + } + private: // The full name of the command const std::string _name; @@ -873,7 +908,7 @@ class BasicCommandWithReplyBuilderInterface : public Command { } ResourcePattern parseResourcePattern(const DatabaseName& dbName, const BSONObj& cmdObj) const { - return CommandHelpers::resourcePatternForNamespace(parseNs(dbName, cmdObj).ns()); + return CommandHelpers::resourcePatternForNamespace(parseNs(dbName, cmdObj)); } // diff --git a/src/mongo/db/commands/SConscript b/src/mongo/db/commands/SConscript index 0ebbf800b6c20..e6e089d24ccb0 100644 --- a/src/mongo/db/commands/SConscript +++ b/src/mongo/db/commands/SConscript @@ -48,6 +48,7 @@ env.Library( source=[ 'end_sessions_command.cpp', 'fail_point_cmd.cpp', + 'fle2_cleanup.idl', 'fle2_compact.cpp', 'fle2_compact.idl', 'generic.cpp', @@ -83,8 +84,6 @@ env.Library( '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/session/kill_sessions', '$BUILD_DIR/mongo/db/session/logical_session_cache', - '$BUILD_DIR/mongo/db/session/logical_session_cache_impl', - '$BUILD_DIR/mongo/db/session/logical_session_id', '$BUILD_DIR/mongo/db/session/logical_session_id_helpers', '$BUILD_DIR/mongo/db/stats/counters', '$BUILD_DIR/mongo/db/transaction/transaction_api', @@ -112,6 +111,7 @@ env.Library( 'isself.cpp', 'logical_session_server_status_section.cpp', 'mr_common.cpp', + 'cpuload.cpp', 'reap_logical_session_cache_now.cpp', 'rotate_certificates_command.cpp', 'rotate_certificates.idl', @@ -135,8 +135,7 @@ env.Library( '$BUILD_DIR/mongo/db/repl/isself', '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', '$BUILD_DIR/mongo/db/server_base', - '$BUILD_DIR/mongo/db/session/logical_session_cache_impl', - '$BUILD_DIR/mongo/db/session/logical_session_id', + '$BUILD_DIR/mongo/db/session/logical_session_cache', '$BUILD_DIR/mongo/db/session/logical_session_id_helpers', '$BUILD_DIR/mongo/db/session/session_catalog', '$BUILD_DIR/mongo/db/shared_request_handling', @@ -198,8 +197,10 @@ env.Library( '$BUILD_DIR/mongo/db/auth/authprivilege', '$BUILD_DIR/mongo/db/commands', '$BUILD_DIR/mongo/db/concurrency/exception_util', + '$BUILD_DIR/mongo/db/concurrency/lock_manager', '$BUILD_DIR/mongo/db/query/op_metrics', '$BUILD_DIR/mongo/db/storage/backup_cursor_hooks', + '$BUILD_DIR/mongo/util/background_job', 'fsync_locked', ], ) @@ -253,6 +254,18 @@ env.Library( ], ) +env.Library( + target='lock_info_command', + source=[ + 'lock_info.idl', + ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/auth/authprivilege', + '$BUILD_DIR/mongo/db/server_base', + '$BUILD_DIR/mongo/util/namespace_string_database_name_util', + ], +) + env.Library( target='create_command', source=[ @@ -286,6 +299,21 @@ env.Library( ], ) +env.Library( + target='bulk_write_common', + source=[ + 'bulk_write_common.cpp', + ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/auth/authprivilege', + '$BUILD_DIR/mongo/db/server_base', + '$BUILD_DIR/mongo/db/service_context', + '$BUILD_DIR/mongo/logv2/logv2_options', + '$BUILD_DIR/mongo/util/namespace_string_database_name_util', + 'bulk_write_parser', + ], +) + env.Library( target='bulk_write_command', source=[ @@ -299,6 +327,7 @@ env.Library( '$BUILD_DIR/mongo/db/commands', '$BUILD_DIR/mongo/db/concurrency/exception_util', '$BUILD_DIR/mongo/db/curop_metrics', + '$BUILD_DIR/mongo/db/fle_crud_mongod', '$BUILD_DIR/mongo/db/not_primary_error_tracker', '$BUILD_DIR/mongo/db/ops/write_ops', '$BUILD_DIR/mongo/db/ops/write_ops_exec', @@ -308,9 +337,11 @@ env.Library( '$BUILD_DIR/mongo/db/server_feature_flags', '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/db/session/logical_session_id_helpers', + '$BUILD_DIR/mongo/db/transaction/transaction', '$BUILD_DIR/mongo/logv2/logv2_options', '$BUILD_DIR/mongo/util/log_and_backoff', '$BUILD_DIR/mongo/util/namespace_string_database_name_util', + 'bulk_write_common', 'bulk_write_parser', ], ) @@ -407,9 +438,11 @@ env.Library( '$BUILD_DIR/mongo/db/ops/write_ops_exec', '$BUILD_DIR/mongo/db/pipeline/aggregation_request_helper', '$BUILD_DIR/mongo/db/pipeline/process_interface/mongo_process_interface', + '$BUILD_DIR/mongo/db/pipeline/process_interface/mongod_process_interfaces', '$BUILD_DIR/mongo/db/query/command_request_response', '$BUILD_DIR/mongo/db/query/cursor_response_idl', '$BUILD_DIR/mongo/db/query/op_metrics', + '$BUILD_DIR/mongo/db/query/query_shape', '$BUILD_DIR/mongo/db/query/stats/query_stats', '$BUILD_DIR/mongo/db/query/stats/stats_histograms', '$BUILD_DIR/mongo/db/query_exec', @@ -428,6 +461,7 @@ env.Library( '$BUILD_DIR/mongo/db/timeseries/timeseries_collmod', '$BUILD_DIR/mongo/db/timeseries/timeseries_conversion_util', '$BUILD_DIR/mongo/db/timeseries/timeseries_options', + '$BUILD_DIR/mongo/db/timeseries/timeseries_write_util', '$BUILD_DIR/mongo/db/transaction/transaction', '$BUILD_DIR/mongo/db/views/view_catalog_helpers', '$BUILD_DIR/mongo/executor/async_request_executor', @@ -445,6 +479,7 @@ env.Library( 'list_collections_filter', 'list_databases_command', 'list_databases_for_all_tenants_command', + 'lock_info_command', 'rename_collection_idl', 'test_commands_enabled', 'validate_db_metadata_command', @@ -461,6 +496,7 @@ env.Library( '$BUILD_DIR/mongo/db/catalog/catalog_helpers', '$BUILD_DIR/mongo/db/catalog/collection_crud', '$BUILD_DIR/mongo/db/commands', + '$BUILD_DIR/mongo/db/dbhelpers', '$BUILD_DIR/mongo/db/index_builds_coordinator_interface', '$BUILD_DIR/mongo/db/ops/write_ops', '$BUILD_DIR/mongo/db/ops/write_ops_exec', @@ -535,6 +571,7 @@ env.Library( LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/bson/bson_validate', '$BUILD_DIR/mongo/db/server_base', + '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/util/fail_point', '$BUILD_DIR/mongo/util/namespace_string_database_name_util', ], @@ -548,13 +585,13 @@ env.Library( "collection_to_capped.cpp", "compact.cpp", "change_stream_state_command.cpp", - "cpuload.cpp", "dbcheck.cpp", "dbcommands_d.cpp", "dbhash.cpp", 'get_cluster_parameter_command.cpp', "internal_rename_if_options_and_indexes_match_cmd.cpp", "internal_transactions_test_command_d.cpp", + "fle2_cleanup_cmd.cpp", "fle2_compact_cmd.cpp", "map_reduce_command.cpp", "oplog_application_checks.cpp", @@ -633,6 +670,7 @@ env.Library( '$BUILD_DIR/mongo/db/transaction/transaction_api', '$BUILD_DIR/mongo/executor/inline_executor', '$BUILD_DIR/mongo/util/net/ssl_manager', + '$BUILD_DIR/mongo/util/progress_meter', 'cluster_server_parameter_commands_invocation', 'core', 'create_command', @@ -805,6 +843,7 @@ env.CppUnitTest( LIBDEPS=[ '$BUILD_DIR/mongo/db/auth/authmocks', '$BUILD_DIR/mongo/db/query/query_test_service_context', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/storage/two_phase_index_build_knobs_idl', 'map_reduce_agg', ], @@ -819,9 +858,10 @@ env.CppUnitTest( '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/db/auth/authmocks', '$BUILD_DIR/mongo/db/auth/authorization_manager_global', - '$BUILD_DIR/mongo/db/commands/standalone', - '$BUILD_DIR/mongo/db/service_context', + '$BUILD_DIR/mongo/db/commands', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/unittest/unittest', + 'standalone', ], ) @@ -836,8 +876,8 @@ env.CppUnitTest( "$BUILD_DIR/mongo/db/auth/authorization_manager_global", "$BUILD_DIR/mongo/db/commands", "$BUILD_DIR/mongo/db/commands/standalone", + '$BUILD_DIR/mongo/db/service_context_non_d', "$BUILD_DIR/mongo/db/service_context_test_fixture", - "$BUILD_DIR/mongo/unittest/unittest", "$BUILD_DIR/mongo/util/version_impl", ], ) diff --git a/src/mongo/db/commands/analyze_cmd.cpp b/src/mongo/db/commands/analyze_cmd.cpp index 8461f9231819d..6f185383e55ef 100644 --- a/src/mongo/db/commands/analyze_cmd.cpp +++ b/src/mongo/db/commands/analyze_cmd.cpp @@ -27,21 +27,48 @@ * it in the license file. */ +#include +#include +#include #include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/commands/analyze_cmd.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/field_ref.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/allowed_contexts.h" #include "mongo/db/query/analyze_command_gen.h" #include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/query/stats/scalar_histogram.h" #include "mongo/db/query/stats/stats_catalog.h" #include "mongo/db/query/stats/stats_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -104,7 +131,7 @@ StatusWith analyzeCommandAsAggregationCommand(OperationContext* opCtx, << "insert")); return BSON("aggregate" << collection << "pipeline" << pipelineBuilder.arr() << "cursor" - << BSONObj()); + << BSONObj() << "allowDiskUse" << false); } class CmdAnalyze final : public TypedCommand { @@ -138,9 +165,8 @@ class CmdAnalyze final : public TypedCommand { void typedRun(OperationContext* opCtx) { uassert(6660400, "Analyze command requires common query framework feature flag to be enabled", - serverGlobalParams.featureCompatibility.isVersionInitialized() && - feature_flags::gFeatureFlagCommonQueryFramework.isEnabled( - serverGlobalParams.featureCompatibility)); + feature_flags::gFeatureFlagCommonQueryFramework.isEnabled( + serverGlobalParams.featureCompatibility)); const auto& cmd = request(); const NamespaceString& nss = ns(); @@ -158,8 +184,9 @@ class CmdAnalyze final : public TypedCommand { const auto& collection = autoColl.getCollection(); // Namespace exists - uassert( - 6799700, str::stream() << "Couldn't find collection " << nss.ns(), collection); + uassert(6799700, + str::stream() << "Couldn't find collection " << nss.toStringForErrorMsg(), + collection); // Namespace cannot be capped collection const bool isCapped = collection->isCapped(); @@ -171,8 +198,8 @@ class CmdAnalyze final : public TypedCommand { const bool isNormalColl = nss.isNormalCollection(); const bool isClusteredColl = collection->isClustered(); uassert(6799702, - str::stream() - << nss.toString() << " is not a normal or clustered collection", + str::stream() << nss.toStringForErrorMsg() + << " is not a normal or clustered collection", isNormalColl || isClusteredColl); if (sampleSize) { @@ -247,7 +274,8 @@ class CmdAnalyze final : public TypedCommand { const NamespaceString& ns = request().getNamespace(); uassert(ErrorCodes::Unauthorized, - str::stream() << "Not authorized to call analyze on collection " << ns, + str::stream() << "Not authorized to call analyze on collection " + << ns.toStringForErrorMsg(), authzSession->isAuthorizedForActionsOnNamespace(ns, ActionType::analyze)); } }; diff --git a/src/mongo/db/commands/apply_ops_cmd.cpp b/src/mongo/db/commands/apply_ops_cmd.cpp index 06d1258476f20..e2e2c0756b8bb 100644 --- a/src/mongo/db/commands/apply_ops_cmd.cpp +++ b/src/mongo/db/commands/apply_ops_cmd.cpp @@ -27,29 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_check.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/auth/authorization_session.h" // IWYU pragma: keep #include "mongo/db/catalog/document_validation.h" -#include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/commands/oplog_application_checks.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/apply_ops.h" +#include "mongo/db/repl/apply_ops_command_info.h" #include "mongo/db/repl/oplog.h" -#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/service_context.h" -#include "mongo/util/scopeguard.h" -#include "mongo/util/uuid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { diff --git a/src/mongo/db/commands/async_command_execution_test.cpp b/src/mongo/db/commands/async_command_execution_test.cpp index 01bec5021182f..753cae12dfc9c 100644 --- a/src/mongo/db/commands/async_command_execution_test.cpp +++ b/src/mongo/db/commands/async_command_execution_test.cpp @@ -29,18 +29,40 @@ #include - +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" #include "mongo/db/client_strand.h" #include "mongo/db/commands.h" +#include "mongo/db/operation_context.h" #include "mongo/db/request_execution_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/factory.h" -#include "mongo/unittest/unittest.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/protocol.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/commands/authentication_commands.cpp b/src/mongo/db/commands/authentication_commands.cpp index 97419827ddecc..0e3261b3179d1 100644 --- a/src/mongo/db/commands/authentication_commands.cpp +++ b/src/mongo/db/commands/authentication_commands.cpp @@ -28,30 +28,54 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/commands/authentication_commands.h" - +#include +#include +#include +#include +#include #include +#include + +#include +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" #include "mongo/client/authenticate.h" -#include "mongo/client/sasl_client_authenticate.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/auth_options_gen.h" #include "mongo/db/auth/authentication_session.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_manager_global_parameters_gen.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/cluster_auth_mode.h" +#include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/commands/authentication_commands.h" #include "mongo/db/commands/authentication_commands_gen.h" #include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/server_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/rpc/op_msg.h" #include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/net/ssl_manager.h" #include "mongo/util/net/ssl_peer_info.h" #include "mongo/util/net/ssl_types.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl @@ -59,7 +83,6 @@ namespace mongo { namespace { -constexpr auto kExternalDB = "$external"_sd; constexpr auto kDBFieldName = "db"_sd; /** @@ -92,6 +115,12 @@ class CmdLogout : public TypedCommand { return "de-authenticate"; } + // We should allow users to logout even if the user does not have the direct shard roles action + // type. + bool shouldSkipDirectConnectionChecks() const final { + return true; + } + class Invocation final : public InvocationBase { public: using InvocationBase::InvocationBase; @@ -106,8 +135,6 @@ class CmdLogout : public TypedCommand { void doCheckAuthorization(OperationContext*) const final {} - static constexpr auto kAdminDB = "admin"_sd; - static constexpr auto kLocalDB = "local"_sd; void typedRun(OperationContext* opCtx) { auto& logoutState = getLogoutCommandState(opCtx->getServiceContext()); auto hasBeenInvoked = logoutState.markAsInvoked(); @@ -120,16 +147,17 @@ class CmdLogout : public TypedCommand { auto dbname = request().getDbName(); auto* as = AuthorizationSession::get(opCtx->getClient()); - as->logoutDatabase( - opCtx->getClient(), dbname.toStringWithTenantId(), "Logging out on user request"); - if (getTestCommandsEnabled() && (dbname == kAdminDB)) { + as->logoutDatabase(opCtx->getClient(), + DatabaseNameUtil::serializeForAuth(dbname), + "Logging out on user request"); + if (getTestCommandsEnabled() && (dbname.isAdminDB())) { // Allows logging out as the internal user against the admin database, however // this actually logs out of the local database as well. This is to // support the auth passthrough test framework on mongos (since you can't use the // local database on a mongos, so you can't logout as the internal user // without this). as->logoutDatabase(opCtx->getClient(), - kLocalDB, + DatabaseName::kLocal.db(), "Logging out from local database for test purposes"); } } @@ -213,7 +241,7 @@ void _authenticateX509(OperationContext* opCtx, AuthenticationSession* session) uassert(ErrorCodes::ProtocolError, "X.509 authentication must always use the $external database.", - userName.getDB() == kExternalDB); + userName.getDatabaseName().isExternalDB()); auto isInternalClient = [&]() -> bool { return opCtx->getClient()->session()->getTags() & transport::Session::kInternalClient; @@ -396,7 +424,7 @@ void doSpeculativeAuthenticate(OperationContext* opCtx, if (!hasDBField) { // No "db" field was provided, so default to "$external" - cmd.append(AuthenticateCommand::kDbNameFieldName, kExternalDB); + cmd.append(AuthenticateCommand::kDbNameFieldName, DatabaseName::kExternal.db()); } auto authCmdObj = diff --git a/src/mongo/db/commands/authentication_commands.h b/src/mongo/db/commands/authentication_commands.h index 725f40afde0d0..9beaa3bacae3a 100644 --- a/src/mongo/db/commands/authentication_commands.h +++ b/src/mongo/db/commands/authentication_commands.h @@ -34,6 +34,7 @@ #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/user.h" #include "mongo/db/auth/user_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" namespace mongo { @@ -45,6 +46,6 @@ void disableX509Auth(ServiceContext* svcCtx); bool isX509AuthDisabled(ServiceContext* svcCtx); UserRequest getX509UserRequest(OperationContext* opCtx, UserRequest request); -void doSpeculativeAuthenticate(OperationContext* opCtx, BSONObj isMaster, BSONObjBuilder* result); +void doSpeculativeAuthenticate(OperationContext* opCtx, BSONObj helloCmd, BSONObjBuilder* result); } // namespace mongo diff --git a/src/mongo/db/commands/bulk_write.cpp b/src/mongo/db/commands/bulk_write.cpp index 25f64cd4eee4e..4526f853201f6 100644 --- a/src/mongo/db/commands/bulk_write.cpp +++ b/src/mongo/db/commands/bulk_write.cpp @@ -27,44 +27,116 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include -#include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/client/read_preference.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_operation_source.h" #include "mongo/db/catalog/document_validation.h" +#include "mongo/db/clientcursor.h" #include "mongo/db/commands.h" #include "mongo/db/commands/bulk_write.h" +#include "mongo/db/commands/bulk_write_common.h" #include "mongo/db/commands/bulk_write_crud_op.h" #include "mongo/db/commands/bulk_write_gen.h" #include "mongo/db/commands/bulk_write_parser.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/curop.h" #include "mongo/db/curop_failpoint_helpers.h" #include "mongo/db/curop_metrics.h" #include "mongo/db/cursor_manager.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/queued_data_stage.h" -#include "mongo/db/matcher/extensions_callback_real.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/fle_crud.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/not_primary_error_tracker.h" #include "mongo/db/ops/delete_request_gen.h" #include "mongo/db/ops/insert.h" -#include "mongo/db/ops/parsed_update.h" +#include "mongo/db/ops/parsed_writes_common.h" +#include "mongo/db/ops/single_write_result_gen.h" #include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/update_result.h" #include "mongo/db/ops/write_ops_exec.h" +#include "mongo/db/ops/write_ops_exec_util.h" #include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/ops/write_ops_retryability.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_common.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/server_feature_flags_gen.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/stats/top.h" +#include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/transaction/retryable_writes_stats.h" +#include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/transaction_validation.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/log_and_backoff.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite @@ -72,14 +144,159 @@ namespace mongo { namespace { MONGO_FAIL_POINT_DEFINE(hangBeforeBulkWritePerformsUpdate); +MONGO_FAIL_POINT_DEFINE(hangBetweenProcessingBulkWriteOps); + +/** + * BulkWriteReplies maintains the BulkWriteReplyItems and provides an interface to add either + * Insert or Update/Delete replies. + */ +class BulkWriteReplies { +public: + BulkWriteReplies() = delete; + BulkWriteReplies(const BulkWriteCommandRequest& request, int capacity) + : _req(request), _replies() { + _replies.reserve(capacity); + } + + void addInsertReplies(OperationContext* opCtx, + size_t firstOpIdx, + write_ops_exec::WriteResult& writes) { + invariant(!writes.results.empty()); + + // Copy over retriedStmtIds. + for (auto& stmtId : writes.retriedStmtIds) { + _retriedStmtIds.emplace_back(stmtId); + } + + for (size_t i = 0; i < writes.results.size(); ++i) { + auto idx = firstOpIdx + i; + if (auto error = write_ops_exec::generateError( + opCtx, writes.results[i].getStatus(), idx, _numErrors)) { + auto replyItem = BulkWriteReplyItem(idx, error.get().getStatus()); + _replies.emplace_back(replyItem); + _numErrors++; + } else { + auto replyItem = BulkWriteReplyItem(idx); + replyItem.setN(writes.results[i].getValue().getN()); + _replies.emplace_back(replyItem); + } + } + } + + void addUpdateReply(size_t currentOpIdx, + int numMatched, + int numDocsModified, + const boost::optional& upserted, + const boost::optional& value, + const boost::optional& stmtId) { + auto replyItem = BulkWriteReplyItem(currentOpIdx); + replyItem.setNModified(numDocsModified); + if (upserted.has_value()) { + replyItem.setUpserted(upserted); + replyItem.setN(1); + } else { + replyItem.setN(numMatched); + } + + if (value) { + replyItem.setValue(value); + } + + if (stmtId) { + _retriedStmtIds.emplace_back(*stmtId); + } + + _replies.emplace_back(replyItem); + } + + void addUpdateReply(size_t currentOpIdx, + int numMatched, + int numDocsModified, + const boost::optional& upsertedAnyType, + const boost::optional& value, + const boost::optional& stmtId) { + + boost::optional upserted; + if (upsertedAnyType.has_value()) { + upserted = write_ops::Upserted(0, upsertedAnyType.value()); + } + + addUpdateReply(currentOpIdx, numMatched, numDocsModified, upserted, value, stmtId); + } + + void addUpdateReply(size_t currentOpIdx, + const UpdateResult& result, + const boost::optional& value, + const boost::optional& stmtId) { + boost::optional upserted; + if (!result.upsertedId.isEmpty()) { + upserted = IDLAnyTypeOwned(result.upsertedId.firstElement()); + } + addUpdateReply( + currentOpIdx, result.numMatched, result.numDocsModified, upserted, value, stmtId); + } + + + void addDeleteReply(size_t currentOpIdx, + long long nDeleted, + const boost::optional& value, + const boost::optional& stmtId) { + auto replyItem = BulkWriteReplyItem(currentOpIdx); + replyItem.setN(nDeleted); + + if (value) { + replyItem.setValue(value); + } + + if (stmtId) { + _retriedStmtIds.emplace_back(*stmtId); + } + + _replies.emplace_back(replyItem); + } + + void addUpdateErrorReply(OperationContext* opCtx, size_t currentOpIdx, const Status& status) { + auto replyItem = BulkWriteReplyItem(currentOpIdx); + replyItem.setNModified(0); + addErrorReply(opCtx, replyItem, status); + } + + void addErrorReply(OperationContext* opCtx, size_t currentOpIdx, const Status& status) { + auto replyItem = BulkWriteReplyItem(currentOpIdx); + addErrorReply(opCtx, replyItem, status); + } -using UpdateCallback = std::function& /* value */)>; + void addErrorReply(OperationContext* opCtx, + BulkWriteReplyItem& replyItem, + const Status& status) { + auto error = write_ops_exec::generateError(opCtx, status, replyItem.getIdx(), _numErrors); + invariant(error); + replyItem.setStatus(error.get().getStatus()); + replyItem.setOk(status.isOK() ? 1.0 : 0.0); + replyItem.setN(0); + _replies.emplace_back(replyItem); + _numErrors++; + } + + std::vector& getReplies() { + return _replies; + } -using DeleteCallback = std::function& /* value */)>; + std::vector& getRetriedStmtIds() { + return _retriedStmtIds; + } -using ErrorCallback = std::function; + int getNumErrors() { + return _numErrors; + } + +private: + const BulkWriteCommandRequest& _req; + std::vector _replies; + std::vector _retriedStmtIds; + /// The number of error replies contained in _replies. + int _numErrors = 0; +}; /** * Class representing an InsertBatch. Maintains a reference to the request and a callback function @@ -87,16 +304,13 @@ using ErrorCallback = std::function */ class InsertBatch { public: - using ReplyHandler = - std::function; - InsertBatch() = delete; InsertBatch(const BulkWriteCommandRequest& request, int capacity, - ReplyHandler replyCallback, + BulkWriteReplies& responses, write_ops_exec::LastOpFixer& lastOpFixer) : _req(request), - _replyFn(replyCallback), + _responses(responses), _lastOpFixer(lastOpFixer), _currentNs(), _batch(), @@ -108,7 +322,80 @@ class InsertBatch { return _batch.empty(); } - // Returns true if the write was successful and did not encounter errors. + void addRetryableWriteResult(OperationContext* opCtx, size_t idx, int32_t stmtId) { + write_ops_exec::WriteResult out; + SingleWriteResult res; + res.setN(1); + res.setNModified(0); + out.retriedStmtIds.push_back(stmtId); + out.results.emplace_back(res); + + _responses.addInsertReplies(opCtx, idx, out); + } + + // Return true if the insert was done by FLE. + // FLE skips inserts with no encrypted fields, in which case the caller of this method + // is expected to fallback to its non-FLE code path. + bool attemptProcessFLEInsert(OperationContext* opCtx, write_ops_exec::WriteResult& out) { + CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation = true; + + // For BulkWrite, re-entry is un-expected. + invariant(!_currentNs.getEncryptionInformation()->getCrudProcessed().value_or(false)); + + std::vector documents; + std::transform(_batch.cbegin(), + _batch.cend(), + std::back_inserter(documents), + [](const InsertStatement& insert) { return insert.doc; }); + + write_ops::InsertCommandRequest request(_currentNs.getNs(), documents); + request.setDollarTenant(_req.getDollarTenant()); + request.setExpectPrefix(_req.getExpectPrefix()); + auto& requestBase = request.getWriteCommandRequestBase(); + requestBase.setEncryptionInformation(_currentNs.getEncryptionInformation()); + requestBase.setOrdered(_req.getOrdered()); + requestBase.setBypassDocumentValidation(_req.getBypassDocumentValidation()); + + write_ops::InsertCommandReply insertReply; + + FLEBatchResult batchResult = processFLEInsert(opCtx, request, &insertReply); + + if (batchResult == FLEBatchResult::kProcessed) { + size_t inserted = static_cast(insertReply.getN()); + + SingleWriteResult result; + result.setN(1); + + if (documents.size() == inserted) { + invariant(!insertReply.getWriteErrors().has_value()); + out.results.reserve(inserted); + std::fill_n(std::back_inserter(out.results), inserted, std::move(result)); + } else { + invariant(insertReply.getWriteErrors().has_value()); + const auto& errors = insertReply.getWriteErrors().value(); + + out.results.reserve(inserted + errors.size()); + std::fill_n( + std::back_inserter(out.results), inserted + errors.size(), std::move(result)); + + for (const auto& error : errors) { + out.results[error.getIndex()] = error.getStatus(); + } + + if (_req.getOrdered()) { + out.canContinue = false; + } + } + + if (insertReply.getRetriedStmtIds().has_value()) { + out.retriedStmtIds = insertReply.getRetriedStmtIds().value(); + } + return true; + } + return false; + } + + // Returns true if the bulkWrite operation can continue and false if it should stop. bool flush(OperationContext* opCtx) { if (empty()) { return true; @@ -121,16 +408,47 @@ class InsertBatch { auto size = _batch.size(); out.results.reserve(size); - out.canContinue = write_ops_exec::insertBatchAndHandleErrors(opCtx, - _currentNs.getNs(), - _currentNs.getCollectionUUID(), - _req.getOrdered(), - _batch, - &_lastOpFixer, - &out, - OperationSource::kStandard); + bool insertedByFLE = false; + if (_currentNs.getEncryptionInformation().has_value()) { + insertedByFLE = attemptProcessFLEInsert(opCtx, out); + + if (!insertedByFLE) { + // It is unexpected for processFLEInsert (inside attemptProcessFLEInsert) + // to return kNotProcessed for multiple documents. In the case of retyrable write + // with FLE, we have to fallthrough to our normal code path below + // on !insertedByFLE, but we are past the point where that code path normally checks + // for checkStatementExecutedNoOplogEntryFetch (in handleInsertOp). + invariant(_batch.size() == 1); + + auto txnParticipant = TransactionParticipant::get(opCtx); + invariant(_batch[0].stmtIds.size() == 1); + if (opCtx->isRetryableWrite() && + txnParticipant.checkStatementExecutedNoOplogEntryFetch(opCtx, + _batch[0].stmtIds[0])) { + RetryableWritesStats::get(opCtx)->incrementRetriedStatementsCount(); + addRetryableWriteResult(opCtx, _firstOpIdx.get(), _batch[0].stmtIds[0]); + _batch.clear(); + _currentNs = NamespaceInfoEntry(); + _firstOpIdx = boost::none; + return out.canContinue; + } + } + } + + if (!insertedByFLE) { + out.canContinue = + write_ops_exec::insertBatchAndHandleErrors(opCtx, + _currentNs.getNs(), + _currentNs.getCollectionUUID(), + _req.getOrdered(), + _batch, + &_lastOpFixer, + &out, + OperationSource::kStandard); + } + _batch.clear(); - _replyFn(opCtx, _firstOpIdx.get(), out); + _responses.addInsertReplies(opCtx, _firstOpIdx.get(), out); _currentNs = NamespaceInfoEntry(); _firstOpIdx = boost::none; @@ -146,7 +464,6 @@ class InsertBatch { const NamespaceInfoEntry& nsInfo, const BSONObj& op) { // If this is a different namespace we have to flush the current batch. - // TODO SERVER-72682 refactor insertBatchAndHandleErrors to batch across namespaces. if (_isDifferentFromSavedNamespace(nsInfo)) { // Write the current batch since we have a different namespace to process. if (!flush(opCtx)) { @@ -167,7 +484,7 @@ class InsertBatch { private: const BulkWriteCommandRequest& _req; - ReplyHandler _replyFn; + BulkWriteReplies& _responses; write_ops_exec::LastOpFixer& _lastOpFixer; NamespaceInfoEntry _currentNs; std::vector _batch; @@ -188,78 +505,6 @@ class InsertBatch { } }; -/** - * BulkWriteReplies maintains the BulkWriteReplyItems and provides an interface to add either - * Insert or Update/Delete replies. - */ -class BulkWriteReplies { -public: - BulkWriteReplies() = delete; - BulkWriteReplies(const BulkWriteCommandRequest& request, int capacity) - : _req(request), _replies() { - _replies.reserve(capacity); - } - - void addInsertReplies(OperationContext* opCtx, - size_t firstOpIdx, - write_ops_exec::WriteResult& writes) { - invariant(!writes.results.empty()); - - for (size_t i = 0; i < writes.results.size(); ++i) { - auto idx = firstOpIdx + i; - // We do not pass in a proper numErrors since it causes unwanted truncation in error - // message generation. - if (auto error = write_ops_exec::generateError( - opCtx, writes.results[i].getStatus(), idx, 0 /* numErrors */)) { - auto replyItem = BulkWriteReplyItem(idx, error.get().getStatus()); - _replies.emplace_back(replyItem); - } else { - auto replyItem = BulkWriteReplyItem(idx); - replyItem.setN(writes.results[i].getValue().getN()); - _replies.emplace_back(replyItem); - } - } - } - - void addUpdateReply(size_t currentOpIdx, - const UpdateResult& result, - const boost::optional& value) { - auto replyItem = BulkWriteReplyItem(currentOpIdx); - replyItem.setNModified(result.numDocsModified); - if (!result.upsertedId.isEmpty()) { - replyItem.setUpserted( - write_ops::Upserted(0, IDLAnyTypeOwned(result.upsertedId.firstElement()))); - } - if (value) { - replyItem.setValue(value); - } - _replies.emplace_back(replyItem); - } - - void addDeleteReply(size_t currentOpIdx, - long long nDeleted, - const boost::optional& value) { - auto replyItem = BulkWriteReplyItem(currentOpIdx); - replyItem.setN(nDeleted); - if (value) { - replyItem.setValue(value); - } - _replies.emplace_back(replyItem); - } - - std::vector& getReplies() { - return _replies; - } - - void addErrorReply(size_t currentOpIdx, const Status& status) { - _replies.emplace_back(currentOpIdx, status); - } - -private: - const BulkWriteCommandRequest& _req; - std::vector _replies; -}; - void finishCurOp(OperationContext* opCtx, CurOp* curOp) { try { curOp->done(); @@ -269,7 +514,7 @@ void finishCurOp(OperationContext* opCtx, CurOp* curOp) { recordCurOpMetrics(opCtx); Top::get(opCtx->getServiceContext()) .record(opCtx, - curOp->getNS(), + curOp->getNSS(), curOp->getLogicalOp(), Top::LockType::WriteLocked, durationCount(curOp->elapsedTimeExcludingPauses()), @@ -302,38 +547,135 @@ void finishCurOp(OperationContext* opCtx, CurOp* curOp) { } } -int32_t getStatementId(OperationContext* opCtx, - const BulkWriteCommandRequest& req, - const size_t currentOpIdx) { - if (opCtx->isRetryableWrite()) { - auto stmtId = req.getStmtId(); - auto stmtIds = req.getStmtIds(); +std::tuple> getRetryResultForDelete( + OperationContext* opCtx, + const NamespaceString& nsString, + const boost::optional& entry) { + // Use a SideTransactionBlock since 'parseOplogEntryForFindAndModify' might need + // to fetch a pre/post image from the oplog and if this is a retry inside an + // in-progress retryable internal transaction, this 'opCtx' would have an active + // WriteUnitOfWork and it is illegal to read the the oplog when there is an + // active WriteUnitOfWork. + TransactionParticipant::SideTransactionBlock sideTxn(opCtx); + + // Need to create a dummy FindAndModifyRequest to use to parse the oplog entry + // using existing helpers. + // The helper only checks a couple of booleans for validation so we do not need + // to copy over all fields. + auto findAndModifyReq = write_ops::FindAndModifyCommandRequest(nsString); + findAndModifyReq.setRemove(true); + findAndModifyReq.setNew(false); + + auto findAndModifyReply = parseOplogEntryForFindAndModify(opCtx, findAndModifyReq, *entry); + + return std::make_tuple(findAndModifyReply.getLastErrorObject().getNumDocs(), + findAndModifyReply.getValue()); +} - if (stmtIds) { - return stmtIds->at(currentOpIdx); +std::tuple, + boost::optional> +getRetryResultForUpdate(OperationContext* opCtx, + const NamespaceString& nsString, + const BulkWriteUpdateOp* op, + const boost::optional& entry) { + // If 'return' is not specified then fetch this statement using the normal update + // helpers. If 'return' is specified we need to use the findAndModify helpers. + // findAndModify helpers do not support Updates executed with a none return so this + // split is necessary. + if (!op->getReturn()) { + auto writeResult = parseOplogEntryForUpdate(*entry); + + // Since multi cannot be true for retryable writes numDocsModified + upserted should be 1 + tassert(ErrorCodes::BadValue, + "bulkWrite retryable update must only modify one document", + writeResult.getNModified() + (writeResult.getUpsertedId().isEmpty() ? 0 : 1) == 1); + + boost::optional upserted; + if (!writeResult.getUpsertedId().isEmpty()) { + upserted = IDLAnyTypeOwned(writeResult.getUpsertedId().firstElement()); } - const int32_t firstStmtId = stmtId ? *stmtId : 0; - return firstStmtId + currentOpIdx; + // We only care about the values of numDocsModified and upserted from the Update + // result. + return std::make_tuple( + writeResult.getN(), writeResult.getNModified(), upserted, boost::none); } - return kUninitializedStmtId; + // Use a SideTransactionBlock since 'parseOplogEntryForFindAndModify' might need + // to fetch a pre/post image from the oplog and if this is a retry inside an + // in-progress retryable internal transaction, this 'opCtx' would have an active + // WriteUnitOfWork and it is illegal to read the the oplog when there is an + // active WriteUnitOfWork. + TransactionParticipant::SideTransactionBlock sideTxn(opCtx); + + // Need to create a dummy FindAndModifyRequest to use to parse the oplog entry + // using existing helpers. + // The helper only checks a couple of booleans for validation so we do not need + // to copy over all fields. + auto findAndModifyReq = write_ops::FindAndModifyCommandRequest(nsString); + findAndModifyReq.setUpsert(op->getUpsert()); + findAndModifyReq.setRemove(false); + if (op->getReturn() && op->getReturn().get() == "post") { + findAndModifyReq.setNew(true); + } + + auto findAndModifyReply = parseOplogEntryForFindAndModify(opCtx, findAndModifyReq, *entry); + + int numDocsModified = findAndModifyReply.getLastErrorObject().getNumDocs(); + + boost::optional upserted = + findAndModifyReply.getLastErrorObject().getUpserted(); + if (upserted.has_value()) { + // An 'upserted' doc does not count as a modified doc but counts in the + // numDocs total. Since numDocs is either 1 or 0 it should be 0 here. + numDocsModified = 0; + } + + // Since multi cannot be true for retryable writes numDocsModified + upserted should be 1 + tassert(ErrorCodes::BadValue, + "bulkWrite retryable update must only modify one document", + numDocsModified + (upserted.has_value() ? 1 : 0) == 1); + + // We only care about the values of numDocsModified and upserted from the Update + // result. + return std::make_tuple(findAndModifyReply.getLastErrorObject().getNumDocs(), + numDocsModified, + upserted, + findAndModifyReply.getValue()); } bool handleInsertOp(OperationContext* opCtx, const BulkWriteInsertOp* op, const BulkWriteCommandRequest& req, size_t currentOpIdx, - ErrorCallback errorCB, + BulkWriteReplies& responses, InsertBatch& batch) { const auto& nsInfo = req.getNsInfo(); auto idx = op->getInsert(); - auto stmtId = getStatementId(opCtx, req, currentOpIdx); + auto stmtId = opCtx->isRetryableWrite() ? bulk_write_common::getStatementId(req, currentOpIdx) + : kUninitializedStmtId; + + auto txnParticipant = TransactionParticipant::get(opCtx); + + // For FLE + RetryableWrite, we let FLE handle stmtIds and retryability, so we skip + // checkStatementExecutedNoOplogEntryFetch here. + if (!nsInfo[idx].getEncryptionInformation().has_value() && opCtx->isRetryableWrite() && + txnParticipant.checkStatementExecutedNoOplogEntryFetch(opCtx, stmtId)) { + if (!batch.flush(opCtx)) { + return false; + } + + RetryableWritesStats::get(opCtx)->incrementRetriedStatementsCount(); + batch.addRetryableWriteResult(opCtx, currentOpIdx, stmtId); + return true; + } + bool containsDotsAndDollarsField = false; auto fixedDoc = fixDocumentForInsert(opCtx, op->getDocument(), &containsDotsAndDollarsField); - // TODO SERVER-72988: handle retryable writes. if (!fixedDoc.isOK()) { if (!batch.flush(opCtx)) { return false; @@ -344,7 +686,7 @@ bool handleInsertOp(OperationContext* opCtx, uassertStatusOK(fixedDoc.getStatus()); MONGO_UNREACHABLE; } catch (const DBException& ex) { - errorCB(currentOpIdx, ex.toStatus()); + responses.addErrorReply(opCtx, currentOpIdx, ex.toStatus()); write_ops_exec::WriteResult out; // fixDocumentForInsert can only fail for validation reasons, we only use handleError // here to tell us if we are able to continue processing further ops or not. @@ -365,13 +707,198 @@ bool handleInsertOp(OperationContext* opCtx, return batch.addToBatch(opCtx, currentOpIdx, stmtId, nsInfo[idx], toInsert); } +// Unlike attemptProcessFLEInsert, no fallback to non-FLE path is needed, +// returning false only indicate an error occurred. +bool attemptProcessFLEUpdate(OperationContext* opCtx, + const BulkWriteUpdateOp* op, + const BulkWriteCommandRequest& req, + size_t currentOpIdx, + BulkWriteReplies& responses, + const mongo::NamespaceInfoEntry& nsInfoEntry) { + CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation = true; + + // op->getReturn() should not be set for this code path, see attemptProcessFLEFindAndModify. + uassert(ErrorCodes::InvalidOptions, + "BulkWrite update with sort is supported only with return.", + !op->getReturn() && !op->getSort()); + + write_ops::UpdateOpEntry update; + update.setQ(op->getFilter()); + update.setMulti(op->getMulti()); + update.setC(op->getConstants()); + update.setU(op->getUpdateMods()); + update.setHint(op->getHint()); + if (op->getCollation()) { + update.setCollation(op->getCollation().value()); + } + update.setArrayFilters(op->getArrayFilters().value_or(std::vector())); + update.setUpsert(op->getUpsert()); + + std::vector updates{update}; + write_ops::UpdateCommandRequest updateCommand(nsInfoEntry.getNs(), updates); + updateCommand.setDollarTenant(req.getDollarTenant()); + updateCommand.setExpectPrefix(req.getExpectPrefix()); + updateCommand.setLet(req.getLet()); + updateCommand.setLegacyRuntimeConstants(Variables::generateRuntimeConstants(opCtx)); + + updateCommand.getWriteCommandRequestBase().setEncryptionInformation( + nsInfoEntry.getEncryptionInformation()); + updateCommand.getWriteCommandRequestBase().setBypassDocumentValidation( + req.getBypassDocumentValidation()); + + write_ops::UpdateCommandReply updateReply = processFLEUpdate(opCtx, updateCommand); + + if (updateReply.getWriteErrors()) { + const auto& errors = updateReply.getWriteErrors().get(); + invariant(errors.size() == 1); + responses.addUpdateErrorReply(opCtx, currentOpIdx, errors[0].getStatus()); + return false; + } else { + boost::optional stmtId; + if (updateReply.getRetriedStmtIds()) { + const auto& retriedStmtIds = updateReply.getRetriedStmtIds().get(); + invariant(retriedStmtIds.size() == 1); + stmtId = retriedStmtIds[0]; + } + + boost::optional upserted; + if (updateReply.getUpserted()) { + const auto& upsertedDocuments = updateReply.getUpserted().get(); + invariant(upsertedDocuments.size() == 1); + upserted = upsertedDocuments[0]; + } + + responses.addUpdateReply(currentOpIdx, + updateReply.getN(), + updateReply.getNModified(), + upserted, + /* value */ boost::none, + stmtId); + + return true; + } +} + +// Unlike attemptProcessFLEInsert, no fallback to non-FLE path is needed, +// returning false only indicate an error occurred. +bool attemptProcessFLEFindAndModify(OperationContext* opCtx, + const BulkWriteUpdateOp* op, + const BulkWriteCommandRequest& req, + size_t currentOpIdx, + BulkWriteReplies& responses, + const mongo::NamespaceInfoEntry& nsInfoEntry) { + CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation = true; + + uassert(ErrorCodes::InvalidOptions, + "BulkWrite update with Queryable Encryption and return does not support constants or " + "multi.", + !op->getMulti() && !op->getConstants()); + + write_ops::FindAndModifyCommandRequest findAndModifyRequest(nsInfoEntry.getNs()); + findAndModifyRequest.setDollarTenant(req.getDollarTenant()); + findAndModifyRequest.setExpectPrefix(req.getExpectPrefix()); + findAndModifyRequest.setBypassDocumentValidation(req.getBypassDocumentValidation()); + findAndModifyRequest.setQuery(op->getFilter()); + findAndModifyRequest.setLet(req.getLet()); + findAndModifyRequest.setFields(op->getReturnFields()); + findAndModifyRequest.setUpdate(op->getUpdateMods()); + findAndModifyRequest.setLegacyRuntimeConstants(Variables::generateRuntimeConstants(opCtx)); + findAndModifyRequest.setSort(op->getSort().value_or(BSONObj())); + findAndModifyRequest.setHint(op->getHint()); + if (op->getCollation()) { + findAndModifyRequest.setCollation(op->getCollation().value()); + } + findAndModifyRequest.setArrayFilters(op->getArrayFilters().value_or(std::vector())); + findAndModifyRequest.setUpsert(op->getUpsert()); + if (op->getReturn()) { + findAndModifyRequest.setNew(op->getReturn().get() != "pre"); + } + findAndModifyRequest.setEncryptionInformation(nsInfoEntry.getEncryptionInformation()); + + StatusWith> status = + processFLEFindAndModifyHelper(opCtx, findAndModifyRequest); + if (!status.isOK()) { + responses.addUpdateErrorReply(opCtx, currentOpIdx, status.getStatus()); + return false; + } else { + const auto& reply = status.getValue().first; + int numDocs = reply.getLastErrorObject().getNumDocs(); + responses.addUpdateReply(currentOpIdx, + numDocs, + numDocs, + reply.getLastErrorObject().getUpserted(), + op->getReturn() ? reply.getValue() : boost::none, + reply.getRetriedStmtId()); + return true; + } +} + +// Unlike attemptProcessFLEInsert, no fallback to non-FLE path is needed, +// returning false only indicate an error occurred. +bool attemptProcessFLEDelete(OperationContext* opCtx, + const BulkWriteDeleteOp* op, + const BulkWriteCommandRequest& req, + size_t currentOpIdx, + BulkWriteReplies& responses, + const mongo::NamespaceInfoEntry& nsInfoEntry) { + CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation = true; + + // TODO SERVER-78678 : It is possible to support delete with return by mapping it to + // processFLEFindAndModifyHelper instead and using FindAndModifyCommandRequest.setRemove. + uassert(ErrorCodes::InvalidOptions, + "BulkWrite delete with Queryable Encryption does not support return or sort.", + !op->getReturn() && !op->getReturnFields() && !op->getSort()); + + write_ops::DeleteOpEntry deleteEntry; + if (op->getCollation()) { + deleteEntry.setCollation(op->getCollation()); + } + deleteEntry.setHint(op->getHint()); + deleteEntry.setMulti(op->getMulti()); + deleteEntry.setQ(op->getFilter()); + + std::vector deletes{deleteEntry}; + write_ops::DeleteCommandRequest deleteRequest(nsInfoEntry.getNs(), deletes); + deleteRequest.setDollarTenant(req.getDollarTenant()); + deleteRequest.setExpectPrefix(req.getExpectPrefix()); + deleteRequest.setLet(req.getLet()); + deleteRequest.setLegacyRuntimeConstants(Variables::generateRuntimeConstants(opCtx)); + deleteRequest.getWriteCommandRequestBase().setEncryptionInformation( + nsInfoEntry.getEncryptionInformation()); + deleteRequest.getWriteCommandRequestBase().setBypassDocumentValidation( + req.getBypassDocumentValidation()); + + write_ops::DeleteCommandReply deleteReply = processFLEDelete(opCtx, deleteRequest); + if (deleteReply.getWriteErrors()) { + const auto& errors = deleteReply.getWriteErrors().get(); + invariant(errors.size() == 1); + auto replyItem = BulkWriteReplyItem(currentOpIdx); + responses.addErrorReply(opCtx, replyItem, errors[0].getStatus()); + + return false; + } else { + boost::optional stmtId; + if (deleteReply.getRetriedStmtIds()) { + const auto& retriedStmtIds = deleteReply.getRetriedStmtIds().get(); + invariant(retriedStmtIds.size() == 1); + stmtId = retriedStmtIds[0]; + } + + responses.addDeleteReply(currentOpIdx, + deleteReply.getN(), + /* value */ boost::none, + stmtId); + return true; + } +} + bool handleUpdateOp(OperationContext* opCtx, CurOp* curOp, const BulkWriteUpdateOp* op, const BulkWriteCommandRequest& req, size_t currentOpIdx, - ErrorCallback errorCB, - UpdateCallback replyCB) { + write_ops_exec::LastOpFixer& lastOpFixer, + BulkWriteReplies& responses) { const auto& nsInfo = req.getNsInfo(); auto idx = op->getUpdate(); try { @@ -379,6 +906,10 @@ bool handleUpdateOp(OperationContext* opCtx, uassert(ErrorCodes::InvalidOptions, "May not specify both multi and return in bulkWrite command.", !op->getReturn()); + + uassert(ErrorCodes::InvalidOptions, + "Cannot use retryable writes with multi=true", + !opCtx->isRetryableWrite()); } if (op->getReturnFields()) { @@ -389,10 +920,44 @@ bool handleUpdateOp(OperationContext* opCtx, const NamespaceString& nsString = nsInfo[idx].getNs(); uassertStatusOK(userAllowedWriteNS(opCtx, nsString)); + + if (nsInfo[idx].getEncryptionInformation().has_value()) { + // For BulkWrite, re-entry is un-expected. + invariant(!nsInfo[idx].getEncryptionInformation()->getCrudProcessed().value_or(false)); + + if (!op->getReturn()) { + // Map to processFLEUpdate. + return attemptProcessFLEUpdate( + opCtx, op, req, currentOpIdx, responses, nsInfo[idx]); + } else { + // Map to processFLEFindAndModify. + return attemptProcessFLEFindAndModify( + opCtx, op, req, currentOpIdx, responses, nsInfo[idx]); + } + } + OpDebug* opDebug = &curOp->debug(); doTransactionValidationForWrites(opCtx, nsString); + auto stmtId = opCtx->isRetryableWrite() + ? bulk_write_common::getStatementId(req, currentOpIdx) + : kUninitializedStmtId; + if (opCtx->isRetryableWrite()) { + const auto txnParticipant = TransactionParticipant::get(opCtx); + if (auto entry = txnParticipant.checkStatementExecuted(opCtx, stmtId)) { + RetryableWritesStats::get(opCtx)->incrementRetriedStatementsCount(); + + auto [numMatched, numDocsModified, upserted, image] = + getRetryResultForUpdate(opCtx, nsString, op, entry); + + responses.addUpdateReply( + currentOpIdx, numMatched, numDocsModified, upserted, image, stmtId); + + return true; + } + } + const bool inTransaction = opCtx->inMultiDocumentTransaction(); auto updateRequest = UpdateRequest(); @@ -401,7 +966,8 @@ bool handleUpdateOp(OperationContext* opCtx, updateRequest.setProj(op->getReturnFields().value_or(BSONObj())); updateRequest.setUpdateModification(op->getUpdateMods()); updateRequest.setLegacyRuntimeConstants(Variables::generateRuntimeConstants(opCtx)); - updateRequest.setLetParameters(op->getLet()); + updateRequest.setUpdateConstants(op->getConstants()); + updateRequest.setLetParameters(req.getLet()); updateRequest.setSort(op->getSort().value_or(BSONObj())); updateRequest.setHint(op->getHint()); updateRequest.setCollation(op->getCollation().value_or(BSONObj())); @@ -416,19 +982,16 @@ bool handleUpdateOp(OperationContext* opCtx, } updateRequest.setMulti(op->getMulti()); - updateRequest.setYieldPolicy(inTransaction ? PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY - : PlanYieldPolicy::YieldPolicy::YIELD_AUTO); + updateRequest.setYieldPolicy(PlanYieldPolicy::YieldPolicy::YIELD_AUTO); - if (req.getStmtIds()) { - updateRequest.setStmtIds(*req.getStmtIds()); - } else if (req.getStmtId()) { - updateRequest.setStmtIds({*req.getStmtId()}); - } + // We only execute one update op at a time. + updateRequest.setStmtIds({stmtId}); // Although usually the PlanExecutor handles WCE internally, it will throw WCEs when it // is executing an update. This is done to ensure that we can always match, // modify, and return the document under concurrency, if a matching document exists. - return writeConflictRetry(opCtx, "bulkWriteUpdate", nsString.ns(), [&] { + lastOpFixer.startingOp(nsString); + return writeConflictRetry(opCtx, "bulkWriteUpdate", nsString, [&] { if (MONGO_unlikely(hangBeforeBulkWritePerformsUpdate.shouldFail())) { CurOpFailpointHelpers::waitWhileFailPointEnabled( &hangBeforeBulkWritePerformsUpdate, opCtx, "hangBeforeBulkWritePerformsUpdate"); @@ -438,11 +1001,6 @@ bool handleUpdateOp(OperationContext* opCtx, // match. int retryAttempts = 0; for (;;) { - const ExtensionsCallbackReal extensionsCallback( - opCtx, &updateRequest.getNamespaceString()); - ParsedUpdate parsedUpdate(opCtx, &updateRequest, extensionsCallback); - uassertStatusOK(parsedUpdate.parseRequest()); - try { boost::optional docFound; auto result = write_ops_exec::writeConflictRetryUpsert(opCtx, @@ -453,16 +1011,15 @@ bool handleUpdateOp(OperationContext* opCtx, false, updateRequest.isUpsert(), docFound, - &parsedUpdate); - replyCB(currentOpIdx, result, docFound); + updateRequest); + lastOpFixer.finishedOpSuccessfully(); + responses.addUpdateReply(currentOpIdx, result, docFound, boost::none); return true; } catch (const ExceptionFor& ex) { - if (!parsedUpdate.hasParsedQuery()) { - uassertStatusOK(parsedUpdate.parseQueryToCQ()); - } - + auto cq = uassertStatusOK( + parseWriteQueryToCQ(opCtx, nullptr /* expCtx */, updateRequest)); if (!write_ops_exec::shouldRetryDuplicateKeyException( - parsedUpdate, *ex.extraInfo())) { + updateRequest, *cq, *ex.extraInfo())) { throw; } @@ -477,7 +1034,11 @@ bool handleUpdateOp(OperationContext* opCtx, } }); } catch (const DBException& ex) { - errorCB(currentOpIdx, ex.toStatus()); + // IncompleteTrasactionHistory should always be command fatal. + if (ex.code() == ErrorCodes::IncompleteTransactionHistory) { + throw; + } + responses.addUpdateErrorReply(opCtx, currentOpIdx, ex.toStatus()); write_ops_exec::WriteResult out; return write_ops_exec::handleError( opCtx, ex, nsInfo[idx].getNs(), req.getOrdered(), op->getMulti(), boost::none, &out); @@ -489,8 +1050,8 @@ bool handleDeleteOp(OperationContext* opCtx, const BulkWriteDeleteOp* op, const BulkWriteCommandRequest& req, size_t currentOpIdx, - ErrorCallback errorCB, - DeleteCallback replyCB) { + write_ops_exec::LastOpFixer& lastOpFixer, + BulkWriteReplies& responses) { const auto& nsInfo = req.getNsInfo(); auto idx = op->getDeleteCommand(); try { @@ -498,6 +1059,10 @@ bool handleDeleteOp(OperationContext* opCtx, uassert(ErrorCodes::InvalidOptions, "May not specify both multi and return in bulkWrite command.", !op->getReturn()); + + uassert(ErrorCodes::InvalidOptions, + "Cannot use retryable writes with multi=true", + !opCtx->isRetryableWrite()); } if (op->getReturnFields()) { @@ -508,16 +1073,45 @@ bool handleDeleteOp(OperationContext* opCtx, const NamespaceString& nsString = nsInfo[idx].getNs(); uassertStatusOK(userAllowedWriteNS(opCtx, nsString)); + + if (nsInfo[idx].getEncryptionInformation().has_value()) { + return attemptProcessFLEDelete(opCtx, op, req, currentOpIdx, responses, nsInfo[idx]); + } + OpDebug* opDebug = &curOp->debug(); doTransactionValidationForWrites(opCtx, nsString); + auto stmtId = opCtx->isRetryableWrite() + ? bulk_write_common::getStatementId(req, currentOpIdx) + : kUninitializedStmtId; + if (opCtx->isRetryableWrite()) { + const auto txnParticipant = TransactionParticipant::get(opCtx); + // If 'return' is not specified then we do not need to parse the statement. Since + // multi:true is not allowed with retryable writes if the statement was executed + // there will always be 1 document deleted. + if (!op->getReturn()) { + if (txnParticipant.checkStatementExecutedNoOplogEntryFetch(opCtx, stmtId)) { + RetryableWritesStats::get(opCtx)->incrementRetriedStatementsCount(); + responses.addDeleteReply(currentOpIdx, 1, boost::none, stmtId); + return true; + } + } else { + if (auto entry = txnParticipant.checkStatementExecuted(opCtx, stmtId)) { + RetryableWritesStats::get(opCtx)->incrementRetriedStatementsCount(); + auto [numDocs, image] = getRetryResultForDelete(opCtx, nsString, entry); + responses.addDeleteReply(currentOpIdx, numDocs, image, stmtId); + return true; + } + } + } + auto deleteRequest = DeleteRequest(); deleteRequest.setNsString(nsString); deleteRequest.setQuery(op->getFilter()); deleteRequest.setProj(op->getReturnFields().value_or(BSONObj())); deleteRequest.setLegacyRuntimeConstants(Variables::generateRuntimeConstants(opCtx)); - deleteRequest.setLet(op->getLet()); + deleteRequest.setLet(req.getLet()); deleteRequest.setSort(op->getSort().value_or(BSONObj())); deleteRequest.setHint(op->getHint()); deleteRequest.setCollation(op->getCollation().value_or(BSONObj())); @@ -525,42 +1119,32 @@ bool handleDeleteOp(OperationContext* opCtx, deleteRequest.setReturnDeleted(op->getReturn()); deleteRequest.setIsExplain(false); - deleteRequest.setYieldPolicy(opCtx->inMultiDocumentTransaction() - ? PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY - : PlanYieldPolicy::YieldPolicy::YIELD_AUTO); + deleteRequest.setYieldPolicy(PlanYieldPolicy::YieldPolicy::YIELD_AUTO); - if (opCtx->getTxnNumber() && req.getStmtId()) { - deleteRequest.setStmtId(*req.getStmtId()); - } + deleteRequest.setStmtId(stmtId); const bool inTransaction = opCtx->inMultiDocumentTransaction(); - - return writeConflictRetry(opCtx, "bulkWriteDelete", nsString.ns(), [&] { + lastOpFixer.startingOp(nsString); + return writeConflictRetry(opCtx, "bulkWriteDelete", nsString, [&] { boost::optional docFound; auto nDeleted = write_ops_exec::writeConflictRetryRemove( - opCtx, nsString, &deleteRequest, curOp, opDebug, inTransaction, docFound); - replyCB(currentOpIdx, nDeleted, docFound); + opCtx, nsString, deleteRequest, curOp, opDebug, inTransaction, docFound); + lastOpFixer.finishedOpSuccessfully(); + responses.addDeleteReply(currentOpIdx, nDeleted, docFound, boost::none); return true; }); } catch (const DBException& ex) { - errorCB(currentOpIdx, ex.toStatus()); + // IncompleteTrasactionHistory should always be command fatal. + if (ex.code() == ErrorCodes::IncompleteTransactionHistory) { + throw; + } + responses.addErrorReply(opCtx, currentOpIdx, ex.toStatus()); write_ops_exec::WriteResult out; return write_ops_exec::handleError( opCtx, ex, nsInfo[idx].getNs(), req.getOrdered(), false, boost::none, &out); } } -bool haveSpaceForNext(const BSONObj& nextDoc, long long numDocs, size_t bytesBuffered) { - invariant(numDocs >= 0); - if (!numDocs) { - // Allow the first output document to exceed the limit to ensure we can always make - // progress. - return true; - } - - return (bytesBuffered + nextDoc.objsize()) <= BSONObjMaxUserSize; -} - class BulkWriteCmd : public BulkWriteCmdVersion1Gen { public: bool adminOnly() const final { @@ -614,46 +1198,19 @@ class BulkWriteCmd : public BulkWriteCmdVersion1Gen { gFeatureFlagBulkWriteCommand.isEnabled(serverGlobalParams.featureCompatibility)); auto& req = request(); - const auto& ops = req.getOps(); - const auto& nsInfo = req.getNsInfo(); - - uassert(ErrorCodes::InvalidOptions, - str::stream() - << "May not specify both stmtId and stmtIds in bulkWrite command. Got " - << BSON("stmtId" << *req.getStmtId() << "stmtIds" << *req.getStmtIds()) - << ". BulkWrite command: " << req.toBSON({}), - !(req.getStmtId() && req.getStmtIds())); - if (const auto& stmtIds = req.getStmtIds()) { - uassert( - ErrorCodes::InvalidLength, - str::stream() - << "Number of statement ids must match the number of batch entries. Got " - << stmtIds->size() << " statement ids but " << ops.size() - << " operations. Statement ids: " << BSON("stmtIds" << *stmtIds) - << ". BulkWrite command: " << req.toBSON({}), - stmtIds->size() == ops.size()); - } - - // Validate that every ops entry has a valid nsInfo index. - for (const auto& op : ops) { - const auto& bulkWriteOp = BulkWriteCRUDOp(op); - unsigned int nsInfoIdx = bulkWriteOp.getNsInfoIdx(); - uassert(ErrorCodes::BadValue, - str::stream() << "BulkWrite ops entry " << bulkWriteOp.toBSON() - << " has an invalid nsInfo index.", - nsInfoIdx < nsInfo.size()); - } + bulk_write_common::validateRequest(req, opCtx->isRetryableWrite()); // Apply all of the write operations. - auto replies = bulk_write::performWrites(opCtx, req); + auto [replies, retriedStmtIds, numErrors] = bulk_write::performWrites(opCtx, req); - return _populateCursorReply(opCtx, req, std::move(replies)); + return _populateCursorReply( + opCtx, req, std::move(replies), std::move(retriedStmtIds), numErrors); } void doCheckAuthorization(OperationContext* opCtx) const final try { auto session = AuthorizationSession::get(opCtx->getClient()); - auto privileges = _getPrivileges(); + auto privileges = bulk_write_common::getPrivileges(request()); // Make sure all privileges are authorized. uassert(ErrorCodes::Unauthorized, @@ -665,43 +1222,14 @@ class BulkWriteCmd : public BulkWriteCmdVersion1Gen { } private: - std::vector _getPrivileges() const { - const auto& ops = request().getOps(); - const auto& nsInfo = request().getNsInfo(); - - std::vector privileges; - privileges.reserve(nsInfo.size()); - ActionSet actions; - if (request().getBypassDocumentValidation()) { - actions.addAction(ActionType::bypassDocumentValidation); - } - - // Create initial Privilege entry for each nsInfo entry. - for (const auto& ns : nsInfo) { - privileges.emplace_back(ResourcePattern::forExactNamespace(ns.getNs()), actions); - } - - // Iterate over each op and assign the appropriate actions to the namespace privilege. - for (const auto& op : ops) { - const auto& bulkWriteOp = BulkWriteCRUDOp(op); - ActionSet newActions = bulkWriteOp.getActions(); - unsigned int nsInfoIdx = bulkWriteOp.getNsInfoIdx(); - uassert(ErrorCodes::BadValue, - str::stream() << "BulkWrite ops entry " << bulkWriteOp.toBSON() - << " has an invalid nsInfo index.", - nsInfoIdx < nsInfo.size()); - - auto& privilege = privileges[nsInfoIdx]; - privilege.addActions(newActions); - } - - return privileges; - } - Reply _populateCursorReply(OperationContext* opCtx, const BulkWriteCommandRequest& req, - std::vector replies) { - const NamespaceString cursorNss = NamespaceString::makeBulkWriteNSS(); + bulk_write::BulkWriteReplyItems replies, + bulk_write::RetriedStmtIds retriedStmtIds, + int numErrors) { + auto reqObj = unparsedRequest().body; + const NamespaceString cursorNss = + NamespaceString::makeBulkWriteNSS(req.getDollarTenant()); auto expCtx = make_intrusive( opCtx, std::unique_ptr(nullptr), ns()); @@ -734,8 +1262,8 @@ class BulkWriteCmd : public BulkWriteCmdVersion1Gen { batchSize = *req.getCursor()->getBatchSize(); } - size_t numReplies = 0; - size_t bytesBuffered = 0; + size_t numRepliesInFirstBatch = 0; + FindCommon::BSONArrayResponseSizeTracker responseSizeTracker; for (long long objCount = 0; objCount < batchSize; objCount++) { BSONObj nextDoc; PlanExecutor::ExecState state = exec->getNext(&nextDoc, nullptr); @@ -746,18 +1274,28 @@ class BulkWriteCmd : public BulkWriteCmdVersion1Gen { // If we can't fit this result inside the current batch, then we stash it for // later. - if (!haveSpaceForNext(nextDoc, objCount, bytesBuffered)) { + if (!responseSizeTracker.haveSpaceForNext(nextDoc)) { exec->stashResult(nextDoc); break; } - numReplies++; - bytesBuffered += nextDoc.objsize(); + numRepliesInFirstBatch++; + responseSizeTracker.add(nextDoc); } + CurOp::get(opCtx)->setEndOfOpMetrics(numRepliesInFirstBatch); if (exec->isEOF()) { - invariant(numReplies == replies.size()); - return BulkWriteCommandReply(BulkWriteCommandResponseCursor( - 0, std::vector(std::move(replies)))); + invariant(numRepliesInFirstBatch == replies.size()); + auto reply = BulkWriteCommandReply( + BulkWriteCommandResponseCursor( + 0, std::vector(std::move(replies))), + numErrors); + if (!retriedStmtIds.empty()) { + reply.setRetriedStmtIds(std::move(retriedStmtIds)); + } + + setElectionIdandOpTime(opCtx, reply); + + return reply; } exec->saveState(); @@ -772,16 +1310,35 @@ class BulkWriteCmd : public BulkWriteCmdVersion1Gen { opCtx->getWriteConcern(), repl::ReadConcernArgs::get(opCtx), ReadPreferenceSetting::get(opCtx), - unparsedRequest().body, - _getPrivileges()}); + reqObj, + bulk_write_common::getPrivileges(req)}); auto cursorId = pinnedCursor.getCursor()->cursorid(); pinnedCursor->incNBatches(); pinnedCursor->incNReturnedSoFar(replies.size()); - replies.resize(numReplies); - return BulkWriteCommandReply(BulkWriteCommandResponseCursor( - cursorId, std::vector(std::move(replies)))); + replies.resize(numRepliesInFirstBatch); + auto reply = BulkWriteCommandReply( + BulkWriteCommandResponseCursor(cursorId, + std::vector(std::move(replies))), + numErrors); + if (!retriedStmtIds.empty()) { + reply.setRetriedStmtIds(std::move(retriedStmtIds)); + } + + setElectionIdandOpTime(opCtx, reply); + + return reply; + } + + void setElectionIdandOpTime(OperationContext* opCtx, BulkWriteCommandReply& reply) { + // Undocumented repl fields that mongos depends on. + auto* replCoord = repl::ReplicationCoordinator::get(opCtx->getServiceContext()); + const auto replMode = replCoord->getReplicationMode(); + if (replMode != repl::ReplicationCoordinator::modeNone) { + reply.setOpTime(repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp()); + reply.setElectionId(replCoord->getElectionId()); + } } }; @@ -791,8 +1348,7 @@ class BulkWriteCmd : public BulkWriteCmdVersion1Gen { namespace bulk_write { -std::vector performWrites(OperationContext* opCtx, - const BulkWriteCommandRequest& req) { +BulkWriteReply performWrites(OperationContext* opCtx, const BulkWriteCommandRequest& req) { const auto& ops = req.getOps(); const auto& bypassDocumentValidation = req.getBypassDocumentValidation(); @@ -804,30 +1360,10 @@ std::vector performWrites(OperationContext* opCtx, auto responses = BulkWriteReplies(req, ops.size()); - // Construct reply handler callbacks. - auto insertCB = [&responses](OperationContext* opCtx, - int currentOpIdx, - write_ops_exec::WriteResult& writes) { - responses.addInsertReplies(opCtx, currentOpIdx, writes); - }; - auto updateCB = [&responses](int currentOpIdx, - const UpdateResult& result, - const boost::optional& value) { - responses.addUpdateReply(currentOpIdx, result, value); - }; - auto deleteCB = - [&responses](int currentOpIdx, long long nDeleted, const boost::optional& value) { - responses.addDeleteReply(currentOpIdx, nDeleted, value); - }; - - auto errorCB = [&responses](int currentOpIdx, const Status& status) { - responses.addErrorReply(currentOpIdx, status); - }; - // Create a current insert batch. const size_t maxBatchSize = internalInsertMaxBatchSize.load(); write_ops_exec::LastOpFixer lastOpFixer(opCtx); - auto batch = InsertBatch(req, std::min(ops.size(), maxBatchSize), insertCB, lastOpFixer); + auto batch = InsertBatch(req, std::min(ops.size(), maxBatchSize), responses, lastOpFixer); size_t idx = 0; @@ -837,23 +1373,46 @@ std::vector performWrites(OperationContext* opCtx, if (curOp) { finishCurOp(opCtx, &*curOp); } + + const auto& retriedStmtIds = responses.getRetriedStmtIds(); + // If any statements were retried then incremement command counter. + if (!retriedStmtIds.empty()) { + RetryableWritesStats::get(opCtx)->incrementRetriedCommandsCount(); + } }); - // Tell mongod what the shard and database versions are. This will cause writes to fail in case - // there is a mismatch in the mongos request provided versions and the local (shard's) + bool hasEncryptionInformation = false; + + // Tell mongod what the shard and database versions are. This will cause writes to fail in + // case there is a mismatch in the mongos request provided versions and the local (shard's) // understanding of the version. for (const auto& nsInfo : req.getNsInfo()) { // TODO (SERVER-72767, SERVER-72804, SERVER-72805): Support timeseries collections. OperationShardingState::setShardRole( opCtx, nsInfo.getNs(), nsInfo.getShardVersion(), nsInfo.getDatabaseVersion()); + + if (nsInfo.getEncryptionInformation().has_value()) { + hasEncryptionInformation = true; + } + } + + if (hasEncryptionInformation) { + uassert(ErrorCodes::BadValue, + "BulkWrite with Queryable Encryption supports only a single namespace.", + req.getNsInfo().size() == 1); } for (; idx < ops.size(); ++idx) { + if (MONGO_unlikely(hangBetweenProcessingBulkWriteOps.shouldFail())) { + CurOpFailpointHelpers::waitWhileFailPointEnabled( + &hangBetweenProcessingBulkWriteOps, opCtx, "hangBetweenProcessingBulkWriteOps"); + } + auto op = BulkWriteCRUDOp(ops[idx]); auto opType = op.getType(); if (opType == BulkWriteCRUDOp::kInsert) { - if (!handleInsertOp(opCtx, op.getInsert(), req, idx, errorCB, batch)) { + if (!handleInsertOp(opCtx, op.getInsert(), req, idx, responses, batch)) { // Insert write failed can no longer continue. break; } @@ -862,7 +1421,13 @@ std::vector performWrites(OperationContext* opCtx, if (!batch.flush(opCtx)) { break; } - if (!handleUpdateOp(opCtx, curOp, op.getUpdate(), req, idx, errorCB, updateCB)) { + if (hasEncryptionInformation) { + uassert( + ErrorCodes::InvalidOptions, + "BulkWrite update with Queryable Encryption supports only a single operation.", + ops.size() == 1); + } + if (!handleUpdateOp(opCtx, curOp, op.getUpdate(), req, idx, lastOpFixer, responses)) { // Update write failed can no longer continue. break; } @@ -871,7 +1436,13 @@ std::vector performWrites(OperationContext* opCtx, if (!batch.flush(opCtx)) { break; } - if (!handleDeleteOp(opCtx, curOp, op.getDelete(), req, idx, errorCB, deleteCB)) { + if (hasEncryptionInformation) { + uassert( + ErrorCodes::InvalidOptions, + "BulkWrite delete with Queryable Encryption supports only a single operation.", + ops.size() == 1); + } + if (!handleDeleteOp(opCtx, curOp, op.getDelete(), req, idx, lastOpFixer, responses)) { // Delete write failed can no longer continue. break; } @@ -884,7 +1455,8 @@ std::vector performWrites(OperationContext* opCtx, invariant(batch.empty()); - return responses.getReplies(); + return make_tuple( + responses.getReplies(), responses.getRetriedStmtIds(), responses.getNumErrors()); } } // namespace bulk_write diff --git a/src/mongo/db/commands/bulk_write.h b/src/mongo/db/commands/bulk_write.h index 20b2647ca3781..c88235790be83 100644 --- a/src/mongo/db/commands/bulk_write.h +++ b/src/mongo/db/commands/bulk_write.h @@ -29,16 +29,22 @@ #pragma once +#include +#include #include #include "mongo/db/commands/bulk_write_gen.h" #include "mongo/db/commands/bulk_write_parser.h" +#include "mongo/db/operation_context.h" namespace mongo { namespace bulk_write { -std::vector performWrites(OperationContext* opCtx, - const BulkWriteCommandRequest& req); +using RetriedStmtIds = std::vector; +using BulkWriteReplyItems = std::vector; +using BulkWriteReply = std::tuple; + +BulkWriteReply performWrites(OperationContext* opCtx, const BulkWriteCommandRequest& req); } // namespace bulk_write } // namespace mongo diff --git a/src/mongo/db/commands/bulk_write.idl b/src/mongo/db/commands/bulk_write.idl index f64f1b12697d0..b4a9afe9909ef 100644 --- a/src/mongo/db/commands/bulk_write.idl +++ b/src/mongo/db/commands/bulk_write.idl @@ -116,7 +116,7 @@ structs: type: indexHint default: mongo::BSONObj() stability: unstable - let: + constants: description: "Specifies constant values that can be referred to in the pipeline performing a custom update." type: object @@ -135,7 +135,8 @@ structs: stability: unstable return: description: "When set to 'post', returns the modified document - rather than the original. Default is to return the original ('pre')." + rather than the original. 'pre' returns the original. + Default is to not return either." type: string optional: true validator: @@ -158,13 +159,13 @@ structs: validator: { gte: 0 } stability: unstable filter: - description: "The query that matches documents to update. Uses the same query + description: "The query that matches documents to delete. Uses the same query selectors as used in the 'find' operation." type: object stability: unstable multi: - description: "If true, updates all documents that meet the query criteria. If false, - limits the update to one document which meets the query criteria." + description: "If true, deletes all documents that meet the query criteria. If false, + limits the delete to one document which meets the query criteria." type: bool default: false stability: unstable @@ -173,12 +174,6 @@ structs: type: indexHint default: mongo::BSONObj() stability: unstable - let: - description: "Specifies constant values that can be referred to in the pipeline - performing a custom update." - type: object - optional: true - stability: unstable collation: description: "Specifies the collation to use for the operation." type: object @@ -255,8 +250,11 @@ structs: cursor: type: BulkWriteCommandResponseCursor stability: unstable - electionId: + numErrors: type: int + stability: unstable + electionId: + type: objectid optional: true stability: unstable opTime: @@ -325,3 +323,9 @@ commands: type: array optional: true stability: unstable + let: + description: "Specifies constant values." + type: object + optional: true + stability: unstable + diff --git a/src/mongo/db/commands/bulk_write_common.cpp b/src/mongo/db/commands/bulk_write_common.cpp new file mode 100644 index 0000000000000..d68318b0b7e36 --- /dev/null +++ b/src/mongo/db/commands/bulk_write_common.cpp @@ -0,0 +1,187 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/commands/bulk_write_common.h" + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/commands/bulk_write_crud_op.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + +namespace mongo { +namespace bulk_write_common { + +void validateRequest(const BulkWriteCommandRequest& req, bool isRetryableWrite) { + const auto& ops = req.getOps(); + const auto& nsInfos = req.getNsInfo(); + + uassert(ErrorCodes::InvalidLength, + str::stream() << "Write batch sizes must be between 1 and " + << write_ops::kMaxWriteBatchSize << ". Got " << ops.size() + << " operations.", + ops.size() != 0 && ops.size() <= write_ops::kMaxWriteBatchSize); + + uassert(ErrorCodes::InvalidOptions, + str::stream() << "May not specify both stmtId and stmtIds in bulkWrite command. Got " + << BSON("stmtId" << *req.getStmtId() << "stmtIds" << *req.getStmtIds()) + << ". BulkWrite command: " << req.toBSON({}), + !(req.getStmtId() && req.getStmtIds())); + + if (const auto& stmtIds = req.getStmtIds()) { + uassert( + ErrorCodes::InvalidLength, + str::stream() << "Number of statement ids must match the number of batch entries. Got " + << stmtIds->size() << " statement ids but " << ops.size() + << " operations. Statement ids: " << BSON("stmtIds" << *stmtIds) + << ". BulkWrite command: " << req.toBSON({}), + stmtIds->size() == ops.size()); + } + + // Validate the namespaces in nsInfo. + for (const auto& nsInfo : nsInfos) { + uassert(ErrorCodes::InvalidNamespace, + str::stream() << "Invalid namespace specified for bulkWrite: '" + << nsInfo.getNs().toStringForErrorMsg() << "'", + nsInfo.getNs().isValid()); + } + + // Validate that every ops entry has a valid nsInfo index. + // Also validate that we only have one findAndModify for retryable writes. + bool seenFindAndModify = false; + for (const auto& op : ops) { + const auto& bulkWriteOp = BulkWriteCRUDOp(op); + unsigned int nsInfoIdx = bulkWriteOp.getNsInfoIdx(); + uassert(ErrorCodes::BadValue, + str::stream() << "BulkWrite ops entry " << bulkWriteOp.toBSON() + << " has an invalid nsInfo index.", + nsInfoIdx < nsInfos.size()); + + if (isRetryableWrite) { + switch (bulkWriteOp.getType()) { + case BulkWriteCRUDOp::kInsert: + break; + case BulkWriteCRUDOp::kUpdate: { + auto update = bulkWriteOp.getUpdate(); + if (update->getReturn()) { + uassert( + ErrorCodes::BadValue, + "BulkWrite can only support 1 op with a return for a retryable write", + !seenFindAndModify); + seenFindAndModify = true; + } + break; + } + case BulkWriteCRUDOp::kDelete: { + auto deleteOp = bulkWriteOp.getDelete(); + if (deleteOp->getReturn()) { + uassert( + ErrorCodes::BadValue, + "BulkWrite can only support 1 op with a return for a retryable write", + !seenFindAndModify); + seenFindAndModify = true; + } + break; + } + } + } + } +} + +std::vector getPrivileges(const BulkWriteCommandRequest& req) { + const auto& ops = req.getOps(); + const auto& nsInfo = req.getNsInfo(); + + std::vector privileges; + privileges.reserve(nsInfo.size()); + ActionSet actions; + if (req.getBypassDocumentValidation()) { + actions.addAction(ActionType::bypassDocumentValidation); + } + + // Create initial Privilege entry for each nsInfo entry. + for (const auto& ns : nsInfo) { + privileges.emplace_back(ResourcePattern::forExactNamespace(ns.getNs()), actions); + } + + // Iterate over each op and assign the appropriate actions to the namespace privilege. + for (const auto& op : ops) { + const auto& bulkWriteOp = BulkWriteCRUDOp(op); + ActionSet newActions = bulkWriteOp.getActions(); + unsigned int nsInfoIdx = bulkWriteOp.getNsInfoIdx(); + uassert(ErrorCodes::BadValue, + str::stream() << "BulkWrite ops entry " << bulkWriteOp.toBSON() + << " has an invalid nsInfo index.", + nsInfoIdx < nsInfo.size()); + + auto& privilege = privileges[nsInfoIdx]; + privilege.addActions(newActions); + } + + return privileges; +} + +int32_t getStatementId(const BulkWriteCommandRequest& req, size_t currentOpIdx) { + auto stmtId = req.getStmtId(); + auto stmtIds = req.getStmtIds(); + + if (stmtIds) { + return stmtIds->at(currentOpIdx); + } + + int32_t firstStmtId = stmtId ? *stmtId : 0; + return firstStmtId + currentOpIdx; +} + +NamespaceInfoEntry getFLENamespaceInfoEntry(const BSONObj& bulkWrite) { + BulkWriteCommandRequest bulk = + BulkWriteCommandRequest::parse(IDLParserContext("bulkWrite"), bulkWrite); + const std::vector& nss = bulk.getNsInfo(); + uassert(ErrorCodes::BadValue, + "BulkWrite with Queryable Encryption supports only a single namespace", + nss.size() == 1); + return nss[0]; +} + +} // namespace bulk_write_common +} // namespace mongo diff --git a/src/mongo/db/commands/bulk_write_common.h b/src/mongo/db/commands/bulk_write_common.h new file mode 100644 index 0000000000000..9edcb4a83e44a --- /dev/null +++ b/src/mongo/db/commands/bulk_write_common.h @@ -0,0 +1,71 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/commands/bulk_write_gen.h" +#include "mongo/db/commands/bulk_write_parser.h" + +/** + * Contains common functionality shared between the bulkWrite command in mongos and mongod. + */ + +namespace mongo { +namespace bulk_write_common { + +/** + * Validates the given bulkWrite command request and throws if the request is malformed. + */ +void validateRequest(const BulkWriteCommandRequest& req, bool isRetryableWrite); + +/** + * Get the privileges needed to perform the given bulkWrite command. + */ +std::vector getPrivileges(const BulkWriteCommandRequest& req); + +/** + * Get the statement ID for an operation within a bulkWrite command, taking into consideration + * whether the stmtId / stmtIds fields are present on the request. + */ +int32_t getStatementId(const BulkWriteCommandRequest& req, size_t currentOpIdx); + +/** + * From a serialized BulkWriteCommandRequest containing a single NamespaceInfoEntry, + * extract that NamespaceInfoEntry. For bulkWrite with queryable encryption. + */ +NamespaceInfoEntry getFLENamespaceInfoEntry(const BSONObj& bulkWrite); + +} // namespace bulk_write_common +} // namespace mongo diff --git a/src/mongo/db/commands/bulk_write_crud_op.cpp b/src/mongo/db/commands/bulk_write_crud_op.cpp index 68aeb5a6d5e2c..a014dbf322235 100644 --- a/src/mongo/db/commands/bulk_write_crud_op.cpp +++ b/src/mongo/db/commands/bulk_write_crud_op.cpp @@ -29,6 +29,9 @@ #include "mongo/db/commands/bulk_write_crud_op.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/stdx/variant.h" + namespace mongo { BulkWriteCRUDOp::BulkWriteCRUDOp(const stdx::variant +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/commands/bulk_write_gen.h" #include "mongo/stdx/variant.h" diff --git a/src/mongo/db/commands/bulk_write_parser.cpp b/src/mongo/db/commands/bulk_write_parser.cpp index 1e9404e80f4d2..6dfa4a0d1217e 100644 --- a/src/mongo/db/commands/bulk_write_parser.cpp +++ b/src/mongo/db/commands/bulk_write_parser.cpp @@ -29,9 +29,19 @@ #include "mongo/db/commands/bulk_write_parser.h" +#include +#include +#include #include +#include +#include + +#include "mongo/base/error_extra_info.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -146,9 +156,7 @@ BSONObj BulkWriteReplyItem::serialize() const { invariant(_ok == 1.0); } - if (_n) { - builder.append(kNFieldName, _n.get()); - } + builder.append(kNFieldName, _n.value_or(0)); if (_nModified) { builder.append(kNModifiedFieldName, _nModified.get()); diff --git a/src/mongo/db/commands/bulk_write_parser.h b/src/mongo/db/commands/bulk_write_parser.h index f30c1e18bccd5..1d196b5c9f56f 100644 --- a/src/mongo/db/commands/bulk_write_parser.h +++ b/src/mongo/db/commands/bulk_write_parser.h @@ -29,10 +29,16 @@ #pragma once +#include #include +#include +#include #include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/ops/write_ops_gen.h" diff --git a/src/mongo/db/commands/change_stream_state_command.cpp b/src/mongo/db/commands/change_stream_state_command.cpp index 44d186c03a1bf..f4bcb8999d89e 100644 --- a/src/mongo/db/commands/change_stream_state_command.cpp +++ b/src/mongo/db/commands/change_stream_state_command.cpp @@ -27,14 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/change_stream_serverless_helpers.h" #include "mongo/db/change_stream_state_gen.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/set_change_stream_state_coordinator.h" -#include "mongo/logv2/log.h" +#include "mongo/db/set_change_stream_state_coordinator_gen.h" +#include "mongo/db/tenant_id.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -113,8 +131,9 @@ class SetChangeStreamStateCommand final : public TypedCommandgetClient()) - ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), - ActionType::setChangeStreamState})); + ->isAuthorizedForPrivilege(Privilege{ + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::setChangeStreamState})); } }; } setChangeStreamStateCommand; @@ -179,8 +198,9 @@ class GetChangeStreamStateCommand final : public TypedCommandgetClient()) - ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), - ActionType::getChangeStreamState})); + ->isAuthorizedForPrivilege(Privilege{ + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::getChangeStreamState})); } }; } getChangeStreamStateCommand; diff --git a/src/mongo/db/commands/collection_to_capped.cpp b/src/mongo/db/commands/collection_to_capped.cpp index 563f89e009eae..d1f9a99bde845 100644 --- a/src/mongo/db/commands/collection_to_capped.cpp +++ b/src/mongo/db/commands/collection_to_capped.cpp @@ -27,20 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/capped_utils.h" -#include "mongo/db/client.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/query/find.h" -#include "mongo/db/query/internal_plans.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -76,7 +88,7 @@ class CmdCloneCollectionAsCapped : public BasicCommand { const NamespaceString nss( NamespaceStringUtil::parseNamespaceFromRequest(dbName, nssElt.valueStringData())); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid target namespace: " << nss.ns(), + str::stream() << "Invalid target namespace: " << nss.toStringForErrorMsg(), nss.isValid()); if (!as->isAuthorizedForActionsOnResource( diff --git a/src/mongo/db/commands/command_mirroring_test.cpp b/src/mongo/db/commands/command_mirroring_test.cpp index 6afb28d87183a..d4c78f36d854a 100644 --- a/src/mongo/db/commands/command_mirroring_test.cpp +++ b/src/mongo/db/commands/command_mirroring_test.cpp @@ -27,15 +27,31 @@ * it in the license file. */ +#include #include - +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/commands/compact.cpp b/src/mongo/db/commands/compact.cpp index 8e23ff71a5876..b9bf6a526220a 100644 --- a/src/mongo/db/commands/compact.cpp +++ b/src/mongo/db/commands/compact.cpp @@ -27,20 +27,29 @@ * it in the license file. */ +#include +#include #include -#include -#include "mongo/db/auth/action_set.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/catalog/collection.h" +#include "mongo/db/auth/authorization_session.h" #include "mongo/db/catalog/collection_compact.h" -#include "mongo/db/catalog/database.h" #include "mongo/db/commands.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/curop.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/commands/conn_pool_stats.cpp b/src/mongo/db/commands/conn_pool_stats.cpp index da62689917c96..557ee5287761f 100644 --- a/src/mongo/db/commands/conn_pool_stats.cpp +++ b/src/mongo/db/commands/conn_pool_stats.cpp @@ -27,20 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include -#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/connpool.h" #include "mongo/client/dbclient_connection.h" #include "mongo/client/global_conn_pool.h" +#include "mongo/client/replica_set_monitor_manager.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/service_context.h" #include "mongo/executor/connection_pool_stats.h" -#include "mongo/executor/network_interface_factory.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/s/grid.h" @@ -67,8 +74,9 @@ class PoolStats final : public BasicCommand { const DatabaseName& dbName, const BSONObj& cmdObj) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::connPoolStats)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::connPoolStats)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } @@ -97,13 +105,15 @@ class PoolStats final : public BasicCommand { // Sharding connections, if we have any { auto const grid = Grid::get(opCtx); - if (grid->getExecutorPool()) { - grid->getExecutorPool()->appendConnectionStats(&stats); - } - - auto const customConnPoolStatsFn = grid->getCustomConnectionPoolStatsFn(); - if (customConnPoolStatsFn) { - customConnPoolStatsFn(&stats); + if (grid->isInitialized()) { + if (grid->getExecutorPool()) { + grid->getExecutorPool()->appendConnectionStats(&stats); + } + + auto const customConnPoolStatsFn = grid->getCustomConnectionPoolStatsFn(); + if (customConnPoolStatsFn) { + customConnPoolStatsFn(&stats); + } } } diff --git a/src/mongo/db/commands/conn_pool_sync.cpp b/src/mongo/db/commands/conn_pool_sync.cpp index e83a4ef41d3bc..fde99b4368ebd 100644 --- a/src/mongo/db/commands/conn_pool_sync.cpp +++ b/src/mongo/db/commands/conn_pool_sync.cpp @@ -27,11 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connpool.h" #include "mongo/client/global_conn_pool.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" namespace mongo { namespace { @@ -56,8 +67,8 @@ class PoolFlushCmd : public BasicCommand { const DatabaseName& dbName, const BSONObj& cmdObj) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::connPoolSync)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::connPoolSync)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/db/commands/connection_status.cpp b/src/mongo/db/commands/connection_status.cpp index 152b9b69478f8..05c0ad371d4af 100644 --- a/src/mongo/db/commands/connection_status.cpp +++ b/src/mongo/db/commands/connection_status.cpp @@ -27,13 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/bson/util/bson_extract.h" -#include "mongo/db/auth/authorization_manager.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/parsed_privilege_gen.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/user.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/commands.h" #include "mongo/db/commands/connection_status_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/read_through_cache.h" namespace mongo { @@ -76,7 +95,7 @@ class CmdConnectionStatus : public TypedCommand { return ret; } - static std::vector expandPrivileges(AuthorizationSession* as) { + static std::vector expandPrivileges(AuthorizationSession* as) { // Create a unified map of resources to privileges, to avoid duplicate // entries in the connection status output. User::ResourcePrivilegeMap unified; @@ -92,11 +111,11 @@ class CmdConnectionStatus : public TypedCommand { } } - std::vector ret; + std::vector ret; std::transform(unified.cbegin(), unified.cend(), std::back_inserter(ret), - [](const auto& it) { return it.second; }); + [](const auto& it) { return it.second.toParsedPrivilege(); }); return ret; } diff --git a/src/mongo/db/commands/connection_status.idl b/src/mongo/db/commands/connection_status.idl index 38c0ca03d57f8..152592489ed3a 100644 --- a/src/mongo/db/commands/connection_status.idl +++ b/src/mongo/db/commands/connection_status.idl @@ -30,6 +30,7 @@ global: imports: - "mongo/db/auth/auth_types.idl" + - "mongo/db/auth/parsed_privilege.idl" - "mongo/db/basic_types.idl" structs: @@ -44,7 +45,7 @@ structs: type: array authenticatedUserPrivileges: description: 'Currently authorized privileges across granted roles' - type: array + type: array optional: true ConnectionStatusReply: diff --git a/src/mongo/db/commands/count_cmd.cpp b/src/mongo/db/commands/count_cmd.cpp index 821ad1bc6de81..58705a3256141 100644 --- a/src/mongo/db/commands/count_cmd.cpp +++ b/src/mongo/db/commands/count_cmd.cpp @@ -27,27 +27,76 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/auth/authorization_checks.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/commands/run_aggregate.h" #include "mongo/db/curop.h" #include "mongo/db/curop_failpoint_helpers.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/fle_crud.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/query/collection_query_info.h" #include "mongo/db/query/count_command_as_aggregation_command.h" +#include "mongo/db/query/count_command_gen.h" #include "mongo/db/query/explain.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/get_executor.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_explainer.h" #include "mongo/db/query/plan_summary_stats.h" #include "mongo/db/query/view_response_formatter.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/query_analysis_writer.h" -#include "mongo/logv2/log.h" +#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/query_analysis_sampler_util.h" +#include "mongo/util/assert_util.h" #include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -189,7 +238,8 @@ class CmdCount : public BasicCommand { nss, viewAggCmd, verbosity, - APIParameters::get(opCtx).getAPIStrict().value_or(false)); + APIParameters::get(opCtx).getAPIStrict().value_or(false), + request.getSerializationContext()); // An empty PrivilegeVector is acceptable because these privileges are only checked on // getMore and explain will not open a cursor. @@ -226,7 +276,14 @@ class CmdCount : public BasicCommand { auto exec = std::move(statusWithPlanExecutor.getValue()); auto bodyBuilder = result->getBodyBuilder(); - Explain::explainStages(exec.get(), collection, verbosity, BSONObj(), cmdObj, &bodyBuilder); + Explain::explainStages( + exec.get(), + collection, + verbosity, + BSONObj(), + SerializationContext::stateCommandReply(request.getSerializationContext()), + cmdObj, + &bodyBuilder); return Status::OK(); } diff --git a/src/mongo/db/commands/cpuload.cpp b/src/mongo/db/commands/cpuload.cpp index 532354d7d4eca..9fda5aed5df90 100644 --- a/src/mongo/db/commands/cpuload.cpp +++ b/src/mongo/db/commands/cpuload.cpp @@ -27,11 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/util/duration.h" +#include "mongo/util/timer.h" namespace mongo { @@ -77,6 +88,8 @@ class CPULoadCommand : public BasicCommand { if (cmdObj["cpuFactor"].isNumber()) { cpuFactor = cmdObj["cpuFactor"].number(); } + + Timer t{}; long long limit = 10000 * cpuFactor; // volatile used to ensure that loop is not optimized away volatile uint64_t lresult [[maybe_unused]] = 0; // NOLINT @@ -85,6 +98,11 @@ class CPULoadCommand : public BasicCommand { x *= 13; } lresult = x; + + // add time-consuming statistics + auto micros = t.elapsed(); + result.append("durationMillis", durationCount(micros)); + result.append("durationSeconds", durationCount(micros)); return true; } virtual bool supportsWriteConcern(const BSONObj& cmd) const { diff --git a/src/mongo/db/commands/create_command.cpp b/src/mongo/db/commands/create_command.cpp index 78573ff879aae..95d1479b73926 100644 --- a/src/mongo/db/commands/create_command.cpp +++ b/src/mongo/db/commands/create_command.cpp @@ -27,21 +27,56 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/crypto/fle_crypto.h" #include "mongo/db/auth/authorization_checks.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog/index_key_validate.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/commands/create_gen.h" -#include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/db_raii.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/service_context.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/views/view.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -99,7 +134,7 @@ void checkCollectionOptions(OperationContext* opCtx, if (coll) { auto actualOptions = coll->getCollectionOptions(); uassert(ErrorCodes::NamespaceExists, - str::stream() << "namespace " << ns.ns() + str::stream() << "namespace " << ns.toStringForErrorMsg() << " already exists, but with different options: " << actualOptions.toBSON(), options.matchesStorageOptions(actualOptions, collatorFactory)); @@ -118,14 +153,17 @@ void checkCollectionOptions(OperationContext* opCtx, auto fullNewNamespace = NamespaceStringUtil::parseNamespaceFromRequest(ns.dbName(), options.viewOn); uassert(ErrorCodes::NamespaceExists, - str::stream() << "namespace " << ns.ns() << " already exists, but is a view on " - << view->viewOn() << " rather than " << fullNewNamespace, + str::stream() << "namespace " << ns.toStringForErrorMsg() + << " already exists, but is a view on " + << view->viewOn().toStringForErrorMsg() << " rather than " + << fullNewNamespace.toStringForErrorMsg(), view->viewOn() == fullNewNamespace); auto existingPipeline = pipelineAsBsonObj(view->pipeline()); uassert(ErrorCodes::NamespaceExists, - str::stream() << "namespace " << ns.ns() << " already exists, but with pipeline " - << existingPipeline << " rather than " << options.pipeline, + str::stream() << "namespace " << ns.toStringForErrorMsg() + << " already exists, but with pipeline " << existingPipeline + << " rather than " << options.pipeline, existingPipeline.woCompare(options.pipeline) == 0); // Note: the server can add more values to collation options which were not @@ -139,7 +177,7 @@ void checkCollectionOptions(OperationContext* opCtx, const auto defaultCollatorSpecBSON = view->defaultCollator() ? view->defaultCollator()->getSpec().toBSON() : BSONObj(); uasserted(ErrorCodes::NamespaceExists, - str::stream() << "namespace " << ns.ns() + str::stream() << "namespace " << ns.toStringForErrorMsg() << " already exists, but with collation: " << defaultCollatorSpecBSON << " rather than " << options.collation); } @@ -273,13 +311,7 @@ class CmdCreate final : public CreateCmdVersion1Gen { repl::ReplicationCoordinator::get(opCtx)->getReplicationMode() == repl::ReplicationCoordinator::Mode::modeReplSet); - if (hasQueryType(cmd.getEncryptedFields().get(), QueryTypeEnum::RangePreview)) { - uassert( - 6775220, - "Queryable Encryption Range support is only supported when FCV supports " - "6.1", - gFeatureFlagFLE2Range.isEnabled(serverGlobalParams.featureCompatibility)); - } + FLEUtil::checkEFCForECC(cmd.getEncryptedFields().get()); } if (auto timeseries = cmd.getTimeseries()) { @@ -306,8 +338,8 @@ class CmdCreate final : public CreateCmdVersion1Gen { uassert(ErrorCodes::InvalidOptions, str::stream() - << cmd.getNamespace() << ": 'timeseries' is not allowed with '" - << fieldName << "'", + << cmd.getNamespace().toStringForErrorMsg() + << ": 'timeseries' is not allowed with '" << fieldName << "'", timeseries::kAllowedCollectionCreationOptions.contains(fieldName)); } @@ -315,9 +347,9 @@ class CmdCreate final : public CreateCmdVersion1Gen { return field.find('.') != std::string::npos; }; auto mustBeTopLevel = [&cmd](StringData field) -> std::string { - return str::stream() - << cmd.getNamespace() << ": '" << field << "' must be a top-level field " - << "and not contain a '.'"; + return str::stream() << cmd.getNamespace().toStringForErrorMsg() << ": '" + << field << "' must be a top-level field " + << "and not contain a '.'"; }; uassert(ErrorCodes::InvalidOptions, mustBeTopLevel("timeField"), @@ -395,8 +427,8 @@ class CmdCreate final : public CreateCmdVersion1Gen { // Check for config.settings in the user command since a validator is allowed // internally on this collection but the user may not modify the validator. uassert(ErrorCodes::InvalidOptions, - str::stream() - << "Document validators not allowed on system collection " << ns(), + str::stream() << "Document validators not allowed on system collection " + << ns().toStringForErrorMsg(), ns() != NamespaceString::kConfigSettingsNamespace); } diff --git a/src/mongo/db/commands/create_indexes_cmd.cpp b/src/mongo/db/commands/create_indexes_cmd.cpp index 0607c0c9e5777..10373b3132e02 100644 --- a/src/mongo/db/commands/create_indexes_cmd.cpp +++ b/src/mongo/db/commands/create_indexes_cmd.cpp @@ -27,49 +27,84 @@ * it in the license file. */ +#include +#include +#include +#include #include +#include #include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/crypto/encryption_fields_util.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/db/auth/privilege.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/collection_uuid_mismatch.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/commit_quorum_options.h" #include "mongo/db/catalog/create_collection.h" -#include "mongo/db/catalog/database.h" -#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/index_key_validate.h" -#include "mongo/db/catalog/multi_index_block.h" #include "mongo/db/catalog/uncommitted_catalog_updates.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/create_indexes_gen.h" #include "mongo/db/db_raii.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/field_ref.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/index_names.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/insert.h" #include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl_index_build_state.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/s/sharding_state.h" +#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/stats/top.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/db/storage/two_phase_index_build_knobs_gen.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/timeseries/catalog_helper.h" #include "mongo/db/timeseries/timeseries_commands_conversion_helper.h" -#include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" -#include "mongo/idl/command_generic_argument.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/compiler.h" -#include "mongo/s/shard_key_pattern.h" -#include "mongo/util/scopeguard.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" #include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -127,7 +162,8 @@ std::vector parseAndValidateIndexSpecs(OperationContext* opCtx, parsedIndexSpec = index_key_validate::removeUnknownFields(ns, parsedIndexSpec); } - parsedIndexSpec = index_key_validate::parseAndValidateIndexSpecs(opCtx, parsedIndexSpec); + parsedIndexSpec = index_key_validate::parseAndValidateIndexSpecs( + opCtx, parsedIndexSpec, true /* checkFCV */); uassert(ErrorCodes::BadValue, "Can't hide index on system collection", !(ns.isSystem() && !ns.isTimeseriesBucketsCollection()) || @@ -317,11 +353,11 @@ bool indexesAlreadyExist(OperationContext* opCtx, void assertNoMovePrimaryInProgress(OperationContext* opCtx, const NamespaceString& nss) { try { + Lock::CollectionLock collLock(opCtx, nss, MODE_IX); + const auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireShared(opCtx, nss.dbName()); - Lock::CollectionLock collLock(opCtx, nss, MODE_IX); - auto scopedCss = CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, nss); auto collDesc = scopedCss->getCollectionDescription(opCtx); @@ -330,7 +366,7 @@ void assertNoMovePrimaryInProgress(OperationContext* opCtx, const NamespaceStrin LOGV2(4909200, "assertNoMovePrimaryInProgress", logAttrs(nss)); uasserted(ErrorCodes::MovePrimaryInProgress, - "movePrimary is in progress for namespace " + nss.toString()); + "movePrimary is in progress for namespace " + nss.toStringForErrorMsg()); } } } catch (const DBException& ex) { @@ -406,8 +442,8 @@ void runCreateIndexesOnNewCollection(OperationContext* opCtx, invariant(opCtx->inMultiDocumentTransaction() || createCollImplicitly); uassert(ErrorCodes::OperationNotSupportedInTransaction, - str::stream() << "Cannot create new indexes on non-empty collection " << ns - << " in a multi-document transaction.", + str::stream() << "Cannot create new indexes on non-empty collection " + << ns.toStringForErrorMsg() << " in a multi-document transaction.", collection->isEmpty(opCtx)); const int numIndexesBefore = @@ -453,12 +489,12 @@ CreateIndexesReply runCreateIndexesWithCoordinator(OperationContext* opCtx, // was optimized to not update indexes. The only exception is the partial index used to support // retryable transactions that the sessions code knows how to handle. uassert(ErrorCodes::IllegalOperation, - str::stream() << "not allowed to create index on " << ns.ns(), + str::stream() << "not allowed to create index on " << ns.toStringForErrorMsg(), ns != NamespaceString::kSessionTransactionsTableNamespace || isCreatingInternalConfigTxnsPartialIndex(cmd)); uassert(ErrorCodes::OperationNotSupportedInTransaction, - str::stream() << "Cannot write to system collection " << ns.toString() + str::stream() << "Cannot write to system collection " << ns.toStringForErrorMsg() << " within a transaction.", !opCtx->inMultiDocumentTransaction() || !ns.isSystem()); @@ -484,6 +520,8 @@ CreateIndexesReply runCreateIndexesWithCoordinator(OperationContext* opCtx, // 1) We are in a replication mode that allows for index creation. // 2) Check sharding state. // 3) Check if we can create the index without handing control to the IndexBuildsCoordinator. + // 4) Check we are not in a multi-document transaction. + // 5) Check there is enough available disk space to start the index build. boost::optional collectionUUID; { AutoGetDb autoDb(opCtx, ns.dbName(), MODE_IX); @@ -491,10 +529,11 @@ CreateIndexesReply runCreateIndexesWithCoordinator(OperationContext* opCtx, if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, ns)) { uasserted(ErrorCodes::NotWritablePrimary, - str::stream() << "Not primary while creating indexes in " << ns.ns()); + str::stream() + << "Not primary while creating indexes in " << ns.toStringForErrorMsg()); } - bool indexExists = writeConflictRetry(opCtx, "createCollectionWithIndexes", ns.ns(), [&] { + bool indexExists = writeConflictRetry(opCtx, "createCollectionWithIndexes", ns, [&] { AutoGetCollection collection( opCtx, ns, @@ -534,9 +573,15 @@ CreateIndexesReply runCreateIndexesWithCoordinator(OperationContext* opCtx, // builds coordinator and take an exclusive lock. We should not take exclusive locks inside // of transactions, so we fail early here if we are inside of a transaction. uassert(ErrorCodes::OperationNotSupportedInTransaction, - str::stream() << "Cannot create new indexes on existing collection " << ns - << " in a multi-document transaction.", + str::stream() << "Cannot create new indexes on existing collection " + << ns.toStringForErrorMsg() << " in a multi-document transaction.", !opCtx->inMultiDocumentTransaction()); + + if (feature_flags::gIndexBuildGracefulErrorHandling.isEnabled( + serverGlobalParams.featureCompatibility)) { + uassertStatusOK( + IndexBuildsCoordinator::checkDiskSpaceSufficientToStartIndexBuild(opCtx)); + } } // Use AutoStatsTracker to update Top. @@ -631,12 +676,6 @@ CreateIndexesReply runCreateIndexesWithCoordinator(OperationContext* opCtx, // The current OperationContext may be interrupted, which would prevent us from // taking locks. Use a new OperationContext to abort the index build. auto newClient = opCtx->getServiceContext()->makeClient("abort-index-build"); - - { - stdx::lock_guard lk(*newClient.get()); - newClient.get()->setSystemOperationKillableByStepdown(lk); - } - AlternativeClientRegion acr(newClient); const auto abortCtx = cc().makeOperationContext(); @@ -682,8 +721,9 @@ CreateIndexesReply runCreateIndexesWithCoordinator(OperationContext* opCtx, } // All other errors should be forwarded to the caller with index build information included. - ex.addContext(str::stream() << "Index build failed: " << buildUUID << ": Collection " << ns - << " ( " << *collectionUUID << " )"); + ex.addContext(str::stream() + << "Index build failed: " << buildUUID << ": Collection " + << ns.toStringForErrorMsg() << " ( " << *collectionUUID << " )"); // Set last op on error to provide the client with a specific optime to read the state of // the server when the createIndexes command failed. @@ -733,8 +773,7 @@ class CmdCreateIndexes : public CreateIndexesCmdVersion1Gen { } void doCheckAuthorization(OperationContext* opCtx) const { - Privilege p(CommandHelpers::resourcePatternForNamespace(ns().toString()), - ActionType::createIndex); + Privilege p(CommandHelpers::resourcePatternForNamespace(ns()), ActionType::createIndex); uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient())->isAuthorizedForPrivilege(p)); diff --git a/src/mongo/db/commands/create_indexes_test.cpp b/src/mongo/db/commands/create_indexes_test.cpp index 3c8cc4268e302..14bb919f947e8 100644 --- a/src/mongo/db/commands/create_indexes_test.cpp +++ b/src/mongo/db/commands/create_indexes_test.cpp @@ -27,10 +27,32 @@ * it in the license file. */ +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -69,7 +91,10 @@ TEST_F(CreateIndexesTest, CreateIndexesFailsWhenIndexBuildsCollectionIsMissing) "createIndexes" << nss.coll() << "indexes" << BSON_ARRAY(index) << "commitQuorum" << 0); BSONObj result; // This should fail since config.system.indexBuilds does not exist. + startCapturingLogMessages(); ASSERT_FALSE(client.runCommand(nss.dbName(), createIndexesCmdObj, result)) << result; + stopCapturingLogMessages(); + ASSERT_EQ(1, countBSONFormatLogLinesIsSubset(BSON("id" << 7564400))); ASSERT(result.hasField("code")); ASSERT_EQ(result.getIntField("code"), 6325700); } diff --git a/src/mongo/db/commands/cst_command.cpp b/src/mongo/db/commands/cst_command.cpp index 3e44cf2429459..8848aba1f2f8a 100644 --- a/src/mongo/db/commands/cst_command.cpp +++ b/src/mongo/db/commands/cst_command.cpp @@ -61,7 +61,7 @@ class CstCommand : public BasicCommand { // The CST command constructs a Pipeline, which might hold execution resources. // We could do fine-grained permission checking similar to the find or aggregate commands, // but that seems more complicated than necessary since this is only a test command. - if (!authSession->isAuthorizedForAnyActionOnAnyResourceInDB(dbname.db())) { + if (!authSession->isAuthorizedForAnyActionOnAnyResourceInDB(dbname)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/commands/current_op.cpp b/src/mongo/db/commands/current_op.cpp index 8ba08e6c37b1d..b7f903295206b 100644 --- a/src/mongo/db/commands/current_op.cpp +++ b/src/mongo/db/commands/current_op.cpp @@ -27,19 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/commands/current_op_common.h" - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands.h" +#include "mongo/db/commands/current_op_common.h" #include "mongo/db/commands/fsync_locked.h" #include "mongo/db/commands/run_aggregate.h" -#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" -#include "mongo/db/stats/fill_locker_info.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/rpc/op_msg_rpc_impls.h" +#include "mongo/util/serialization_context.h" namespace mongo { @@ -51,11 +62,11 @@ class CurrentOpCommand final : public CurrentOpCommandBase { CurrentOpCommand() = default; Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj& cmdObj) const final { AuthorizationSession* authzSession = AuthorizationSession::get(opCtx->getClient()); - if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::inprog)) { + if (authzSession->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::inprog)) { return Status::OK(); } @@ -78,7 +89,9 @@ class CurrentOpCommand final : public CurrentOpCommandBase { PrivilegeVector privileges; if (!aggCmdObj["$ownOps"].trueValue()) { - privileges = {Privilege(ResourcePattern::forClusterResource(), ActionType::inprog)}; + privileges = { + Privilege(ResourcePattern::forClusterResource(request.getDbName().tenantId()), + ActionType::inprog)}; } auto status = runAggregate(opCtx, @@ -96,8 +109,12 @@ class CurrentOpCommand final : public CurrentOpCommandBase { CommandHelpers::appendSimpleCommandStatus(bodyBuilder, true); bodyBuilder.doneFast(); + // We need to copy the serialization context from the request to the reply object return CursorResponse::parseFromBSON( - replyBuilder.releaseBody(), nullptr, request.getNamespace().tenantId()); + replyBuilder.releaseBody(), + nullptr, + request.getNamespace().tenantId(), + SerializationContext::stateCommandReply(request.getSerializationContext())); } virtual void appendToResponse(BSONObjBuilder* result) const final { diff --git a/src/mongo/db/commands/current_op_common.cpp b/src/mongo/db/commands/current_op_common.cpp index 6cd8cb22e2657..d562e34802bad 100644 --- a/src/mongo/db/commands/current_op_common.cpp +++ b/src/mongo/db/commands/current_op_common.cpp @@ -27,15 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/commands/current_op_common.h" - -#include - #include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/namespace_string.h" #include "mongo/idl/command_generic_argument.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/string_map.h" namespace mongo { namespace { @@ -110,9 +120,12 @@ bool CurrentOpCommandBase::run(OperationContext* opCtx, pipeline.push_back(groupBuilder.obj()); // Pipeline is complete; create an AggregateCommandRequest for $currentOp. - AggregateCommandRequest request( - NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(dbName.tenantId(), "admin")), - std::move(pipeline)); + SerializationContext sc = SerializationContext::stateCommandRequest(); + sc.setTenantIdSource(auth::ValidatedTenancyScope::get(opCtx) != boost::none); + AggregateCommandRequest request(NamespaceString::makeCollectionlessAggregateNSS( + DatabaseNameUtil::deserialize(dbName.tenantId(), "admin")), + std::move(pipeline), + sc); // Run the pipeline and obtain a CursorResponse. auto aggResults = uassertStatusOK(runAggregation(opCtx, request)); diff --git a/src/mongo/db/commands/current_op_common.h b/src/mongo/db/commands/current_op_common.h index 4d22ef7d67b2e..bbd0df639f170 100644 --- a/src/mongo/db/commands/current_op_common.h +++ b/src/mongo/db/commands/current_op_common.h @@ -29,10 +29,18 @@ #pragma once +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/commands/dbcheck.cpp b/src/mongo/db/commands/dbcheck.cpp index 5e1e700c8943a..b4cfd60d14d01 100644 --- a/src/mongo/db/commands/dbcheck.cpp +++ b/src/mongo/db/commands/dbcheck.cpp @@ -28,29 +28,80 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/authorization_checks.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_catalog_helper.h" -#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/health_log_gen.h" #include "mongo/db/catalog/health_log_interface.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/dbcheck.h" +#include "mongo/db/repl/dbcheck_gen.h" +#include "mongo/db/repl/dbcheck_idl.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/optime.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/write_concern.h" #include "mongo/db/write_concern_options.h" #include "mongo/idl/command_generic_argument.h" -#include "mongo/util/background.h" - +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/thread.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/background.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/progress_meter.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -71,7 +122,7 @@ repl::OpTime _logOp(OperationContext* opCtx, oplogEntry.setObject(obj); AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); return writeConflictRetry( - opCtx, "dbCheck oplog entry", NamespaceString::kRsOplogNamespace.ns(), [&] { + opCtx, "dbCheck oplog entry", NamespaceString::kRsOplogNamespace, [&] { auto const clockSource = opCtx->getServiceContext()->getFastClockSource(); oplogEntry.setWallClockTime(clockSource->now()); @@ -156,6 +207,27 @@ using DbCheckRun = std::vector; std::unique_ptr singleCollectionRun(OperationContext* opCtx, const DatabaseName& dbName, const DbCheckSingleInvocation& invocation) { + if (!repl::feature_flags::gSecondaryIndexChecksInDbCheck.isEnabled( + serverGlobalParams.featureCompatibility)) { + uassert(ErrorCodes::InvalidOptions, + "When featureFlagSecondaryIndexChecksInDbCheck is not enabled, the validateMode " + "parameter cannot be set.", + !invocation.getValidateMode()); + } else { + if (invocation.getValidateMode() == mongo::DbCheckValidationModeEnum::extraIndexKeysCheck) { + uassert(ErrorCodes::InvalidOptions, + "When validateMode is set to extraIndexKeysCheck, the secondaryIndex parameter " + "must be set.", + invocation.getSecondaryIndex()); + } else { + uassert(ErrorCodes::InvalidOptions, + "When validateMode is set to dataConsistency or " + "dataConsistencyAndMissingIndexKeysCheck, the secondaryIndex parameter cannot " + "be set.", + !invocation.getSecondaryIndex()); + } + } + NamespaceString nss( NamespaceStringUtil::parseNamespaceFromRequest(dbName, invocation.getColl())); AutoGetCollectionForRead agc(opCtx, nss); @@ -165,7 +237,7 @@ std::unique_ptr singleCollectionRun(OperationContext* opCtx, agc.getCollection()); uassert(40619, - "Cannot run dbCheck on " + nss.toString() + " because it is not replicated", + "Cannot run dbCheck on " + nss.toStringForErrorMsg() + " because it is not replicated", nss.isReplicated()); uassert(6769500, "dbCheck no longer supports snapshotRead:false", invocation.getSnapshotRead()); @@ -197,9 +269,8 @@ std::unique_ptr singleCollectionRun(OperationContext* opCtx, std::unique_ptr fullDatabaseRun(OperationContext* opCtx, const DatabaseName& dbName, const DbCheckAllInvocation& invocation) { - uassert(ErrorCodes::InvalidNamespace, - "Cannot run dbCheck on local database", - dbName.db() != "local"); + uassert( + ErrorCodes::InvalidNamespace, "Cannot run dbCheck on local database", !dbName.isLocalDB()); AutoGetDb agd(opCtx, dbName, MODE_IS); uassert(ErrorCodes::NamespaceNotFound, @@ -276,7 +347,7 @@ std::unique_ptr getRun(OperationContext* opCtx, std::shared_ptr getConsistentCatalogAndSnapshot(OperationContext* opCtx) { // Loop until we get a consistent catalog and snapshot while (true) { - const auto catalogBeforeSnapshot = CollectionCatalog::get(opCtx); + auto catalogBeforeSnapshot = CollectionCatalog::get(opCtx); opCtx->recoveryUnit()->preallocateSnapshot(); const auto catalogAfterSnapshot = CollectionCatalog::get(opCtx); if (catalogBeforeSnapshot == catalogAfterSnapshot) { @@ -292,7 +363,7 @@ std::shared_ptr getConsistentCatalogAndSnapshot(Operati class DbCheckJob : public BackgroundJob { public: DbCheckJob(const DatabaseName& dbName, std::unique_ptr run) - : BackgroundJob(true), _done(false), _dbName(dbName.toString()), _run(std::move(run)) {} + : BackgroundJob(true), _done(false), _run(std::move(run)) {} protected: virtual std::string name() const override { @@ -302,12 +373,6 @@ class DbCheckJob : public BackgroundJob { virtual void run() override { // Every dbCheck runs in its own client. ThreadClient tc(name(), getGlobalServiceContext()); - - { - stdx::lock_guard lk(*tc.get()); - tc.get()->setSystemOperationKillableByStepdown(lk); - } - auto uniqueOpCtx = tc->makeOperationContext(); auto opCtx = uniqueOpCtx.get(); @@ -336,7 +401,8 @@ class DbCheckJob : public BackgroundJob { return; } - const std::string curOpMessage = "Scanning namespace " + info.nss.toString(); + const std::string curOpMessage = + "Scanning namespace " + NamespaceStringUtil::serialize(info.nss); ProgressMeterHolder progress; { AutoGetCollection coll(opCtx, info.nss, MODE_IS); @@ -512,7 +578,6 @@ class DbCheckJob : public BackgroundJob { // Set if the job cannot proceed. bool _done; - std::string _dbName; std::unique_ptr _run; StatusWith _runBatch(OperationContext* opCtx, @@ -532,23 +597,11 @@ class DbCheckJob : public BackgroundJob { Lock::GlobalLock glob(opCtx, MODE_IX); // The CollectionCatalog to use for lock-free reads with point-in-time catalog lookups. - std::shared_ptr catalog; - - boost::optional autoColl; - const Collection* collection = nullptr; - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - // Make sure we get a CollectionCatalog in sync with our snapshot. - catalog = getConsistentCatalogAndSnapshot(opCtx); - - collection = catalog->establishConsistentCollection( - opCtx, - {info.nss.db(), info.uuid}, - opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx)); - } else { - autoColl.emplace(opCtx, info.nss, MODE_IS); - collection = autoColl->getCollection().get(); - } + std::shared_ptr catalog = getConsistentCatalogAndSnapshot(opCtx); + const Collection* collection = catalog->establishConsistentCollection( + opCtx, + {info.nss.dbName(), info.uuid}, + opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx)); if (_stepdownHasOccurred(opCtx, info.nss)) { _done = true; @@ -564,14 +617,6 @@ class DbCheckJob : public BackgroundJob { uassert(ErrorCodes::SnapshotUnavailable, "No snapshot available yet for dbCheck", readTimestamp); - auto minVisible = collection->getMinimumVisibleSnapshot(); - if (minVisible && *readTimestamp < *collection->getMinimumVisibleSnapshot()) { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - invariant(!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()); - return {ErrorCodes::SnapshotUnavailable, - str::stream() << "Unable to read from collection " << info.nss - << " due to pending catalog changes"}; - } // The CollectionPtr needs to outlive the DbCheckHasher as it's used internally. const CollectionPtr collectionPtr(collection); @@ -603,7 +648,7 @@ class DbCheckJob : public BackgroundJob { batch.setMd5(md5); batch.setMinKey(first); batch.setMaxKey(BSONKey(hasher->lastKey())); - batch.setReadTimestamp(readTimestamp); + batch.setReadTimestamp(*readTimestamp); // Send information on this batch over the oplog. BatchStats result; @@ -680,11 +725,12 @@ class DbCheckCmd : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { - const bool isAuthorized = AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource( - ResourcePattern::forAnyResource(), ActionType::dbCheck); + const bool isAuthorized = + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forAnyResource(dbName.tenantId()), ActionType::dbCheck); return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized"); } @@ -693,14 +739,7 @@ class DbCheckCmd : public BasicCommand { const BSONObj& cmdObj, BSONObjBuilder& result) { auto job = getRun(opCtx, dbName, cmdObj); - try { - (new DbCheckJob(dbName, std::move(job)))->go(); - } catch (const DBException& e) { - result.append("ok", false); - result.append("err", e.toString()); - return false; - } - result.append("ok", true); + (new DbCheckJob(dbName, std::move(job)))->go(); return true; } } dbCheckCmd; diff --git a/src/mongo/db/commands/dbcommands.cpp b/src/mongo/db/commands/dbcommands.cpp index 3160ac847b8a7..284652c98f7bc 100644 --- a/src/mongo/db/commands/dbcommands.cpp +++ b/src/mongo/db/commands/dbcommands.cpp @@ -27,76 +27,91 @@ * it in the license file. */ -#include - -#include "mongo/base/simple_string_data_comparator.h" -#include "mongo/base/status_with.h" +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/bson/util/builder.h" -#include "mongo/db/audit.h" -#include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_checks.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/auth/user_management_commands_parser.h" -#include "mongo/db/auth/user_name.h" -#include "mongo/db/catalog/coll_mod.h" -#include "mongo/db/catalog/create_collection.h" -#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/drop_collection.h" #include "mongo/db/catalog/drop_database.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/clientcursor.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/coll_mod_gen.h" #include "mongo/db/coll_mod_reply_validation.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/feature_compatibility_version.h" -#include "mongo/db/commands/server_status.h" +#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/curop_failpoint_helpers.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbcommands_gen.h" -#include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/drop_database_gen.h" #include "mongo/db/drop_gen.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/index/index_access_method.h" -#include "mongo/db/introspect.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" #include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/ops/insert.h" -#include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" -#include "mongo/db/pipeline/storage_stats_spec_gen.h" -#include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/query/get_executor.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/explain_verbosity_gen.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/internal_plans.h" -#include "mongo/db/query/query_feature_flags_gen.h" -#include "mongo/db/query/query_planner.h" -#include "mongo/db/read_concern.h" -#include "mongo/db/repl/optime.h" -#include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/request_execution_context.h" -#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/s/shard_key_index_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/stats/storage_stats.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_init.h" #include "mongo/db/timeseries/timeseries_collmod.h" -#include "mongo/db/write_concern.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/executor/async_request_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/grid.h" -#include "mongo/scripting/engine.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" #include "mongo/util/future.h" -#include "mongo/util/md5.hpp" -#include "mongo/util/scopeguard.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" +#include "mongo/util/timer.h" #include "mongo/util/version.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -132,8 +147,8 @@ class CmdDropDatabase : public DropDatabaseCmdVersion1Gen { } void doCheckAuthorization(OperationContext* opCtx) const final { uassert(ErrorCodes::Unauthorized, - str::stream() << "Not authorized to drop database '" << request().getDbName() - << "'", + str::stream() << "Not authorized to drop database '" + << request().getDbName().toStringForErrorMsg() << "'", AuthorizationSession::get(opCtx->getClient()) ->isAuthorizedForActionsOnNamespace(ns(), ActionType::dropDatabase)); } @@ -199,7 +214,8 @@ class CmdDrop : public DropCmdVersion1Gen { void doCheckAuthorization(OperationContext* opCtx) const final { auto ns = request().getNamespace(); uassert(ErrorCodes::Unauthorized, - str::stream() << "Not authorized to drop collection '" << ns << "'", + str::stream() << "Not authorized to drop collection '" + << ns.toStringForErrorMsg() << "'", AuthorizationSession::get(opCtx->getClient()) ->isAuthorizedForActionsOnNamespace(ns, ActionType::dropCollection)); } @@ -219,7 +235,9 @@ class CmdDrop : public DropCmdVersion1Gen { !storageEngine->supportsRecoveryTimestamp()); } - Reply reply; + // We need to copy the serialization context from the request to the reply object + Reply reply( + SerializationContext::stateCommandReply(request().getSerializationContext())); uassertStatusOK( dropCollection(opCtx, request().getNamespace(), @@ -421,50 +439,11 @@ class CmdDataSize final : public TypedCommand { Rarely _collStatsSampler; -class CmdCollStats final : public BasicCommandWithRequestParser { +class CmdCollStats final : public TypedCommand { public: using Request = CollStatsCommand; - Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& cmdObj) const final { - const auto nss = CommandHelpers::parseNsCollectionRequired(dbName, cmdObj); - auto as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(nss), - ActionType::collStats)) { - return {ErrorCodes::Unauthorized, "unauthorized"}; - } - return Status::OK(); - } - - bool supportsWriteConcern(const BSONObj&) const final { - return false; - } - - bool runWithRequestParser(OperationContext* opCtx, - const DatabaseName&, - const BSONObj& cmdObj, - const RequestParser& requestParser, - BSONObjBuilder& result) final { - if (_collStatsSampler.tick()) - LOGV2_WARNING(7024600, - "The collStats command is deprecated. For more information, see " - "https://dochub.mongodb.org/core/collStats-deprecated"); - - const auto& cmd = requestParser.request(); - const auto& nss = cmd.getNamespace(); - - uassert(ErrorCodes::OperationFailed, "No collection name specified", !nss.coll().empty()); - - result.append("ns", NamespaceStringUtil::serialize(nss)); - auto spec = StorageStatsSpec::parse(IDLParserContext("collStats"), cmdObj); - Status status = appendCollectionStorageStats(opCtx, nss, spec, &result); - if (!status.isOK() && (status.code() != ErrorCodes::NamespaceNotFound)) { - uassertStatusOK(status); // throws - } - - return true; - } + CmdCollStats() : TypedCommand(Request::kCommandName, Request::kCommandAlias) {} AllowedOnSecondary secondaryAllowed(ServiceContext*) const final { return AllowedOnSecondary::kAlways; @@ -482,28 +461,66 @@ class CmdCollStats final : public BasicCommandWithRequestParser { return true; } - // Assume that appendCollectionStorageStats() gives us a valid response. - void validateResult(const BSONObj& resultObj) final {} + class Invocation final : public MinimalInvocationBase { + public: + using MinimalInvocationBase::MinimalInvocationBase; + + private: + bool supportsWriteConcern() const override { + return false; + } + void doCheckAuthorization(OperationContext* opCtx) const override { + auto as = AuthorizationSession::get(opCtx->getClient()); + uassert(ErrorCodes::Unauthorized, + "unauthorized", + as->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns()), + ActionType::collStats)); + } + + NamespaceString ns() const final { + return request().getNamespace(); + } + + void run(OperationContext* opCtx, rpc::ReplyBuilderInterface* reply) final { + if (_collStatsSampler.tick()) + LOGV2_WARNING(7024600, + "The collStats command is deprecated. For more information, see " + "https://dochub.mongodb.org/core/collStats-deprecated"); + + const auto nss = ns(); + uassert( + ErrorCodes::OperationFailed, "No collection name specified", !nss.coll().empty()); + + auto result = reply->getBodyBuilder(); + // We need to use the serialization context from the request when calling + // NamespaceStringUtil to build the reply. + auto serializationCtx = + SerializationContext::stateCommandReply(request().getSerializationContext()); + result.append("ns", NamespaceStringUtil::serialize(nss, serializationCtx)); + + const auto& spec = request().getStorageStatsSpec(); + Status status = + appendCollectionStorageStats(opCtx, nss, spec, serializationCtx, &result); + if (!status.isOK() && (status.code() != ErrorCodes::NamespaceNotFound)) { + uassertStatusOK(status); // throws + } + } + }; } cmdCollStats; -class CollectionModCommand : public BasicCommandWithRequestParser { +class CollectionModCommand : public TypedCommand { public: using Request = CollMod; using Reply = CollModReply; - CollectionModCommand() : BasicCommandWithRequestParser() {} - - virtual const std::set& apiVersions() const { + const std::set& apiVersions() const { return kApiVersions1; } AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { return AllowedOnSecondary::kNever; } - virtual bool supportsWriteConcern(const BSONObj& cmd) const override { - return true; - } bool allowedWithSecurityToken() const final { return true; @@ -520,70 +537,87 @@ class CollectionModCommand : public BasicCommandWithRequestParsergetClient(); - auto nss = parseNs(dbName, cmdObj); - return auth::checkAuthForCollMod( - client->getOperationContext(), AuthorizationSession::get(client), nss, cmdObj, false); - } - - bool runWithRequestParser(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& cmdObj, - const RequestParser& requestParser, - BSONObjBuilder& result) final { - const auto* cmd = &requestParser.request(); - - // Targeting the underlying buckets collection directly would make the time-series - // Collection out of sync with the time-series view document. Additionally, we want to - // ultimately obscure/hide the underlying buckets collection from the user, so we're - // disallowing targetting it. - uassert( - ErrorCodes::InvalidNamespace, - "collMod on a time-series collection's underlying buckets collection is not supported.", - !cmd->getNamespace().isTimeseriesBucketsCollection()); - - // Updating granularity on sharded time-series collections is not allowed. - if (Grid::get(opCtx)->catalogClient() && cmd->getTimeseries() && - cmd->getTimeseries()->getGranularity()) { - auto& nss = cmd->getNamespace(); - auto bucketNss = - nss.isTimeseriesBucketsCollection() ? nss : nss.makeTimeseriesBucketsNamespace(); - try { - auto coll = Grid::get(opCtx)->catalogClient()->getCollection(opCtx, bucketNss); - uassert(ErrorCodes::NotImplemented, - str::stream() - << "Cannot update granularity of a sharded time-series collection.", - !coll.getTimeseriesFields()); - } catch (const ExceptionFor&) { - // Collection is not sharded, skip check. - } + const AuthorizationContract* getAuthorizationContract() const final { + return &Request::kAuthorizationContract; + } + + class Invocation final : public MinimalInvocationBase { + public: + using MinimalInvocationBase::MinimalInvocationBase; + bool supportsWriteConcern() const override { + return true; } - if (cmd->getValidator() || cmd->getValidationLevel() || cmd->getValidationAction()) { - // Check for config.settings in the user command since a validator is allowed - // internally on this collection but the user may not modify the validator. - uassert(ErrorCodes::InvalidOptions, - str::stream() << "Document validators not allowed on system collection " - << cmd->getNamespace(), - cmd->getNamespace() != NamespaceString::kConfigSettingsNamespace); + NamespaceString ns() const final { + return request().getNamespace(); } - uassertStatusOK(timeseries::processCollModCommandWithTimeSeriesTranslation( - opCtx, cmd->getNamespace(), *cmd, true, &result)); - return true; - } + void doCheckAuthorization(OperationContext* opCtx) const override { + uassertStatusOK(auth::checkAuthForCollMod(opCtx, + AuthorizationSession::get(opCtx->getClient()), + request().getNamespace(), + unparsedRequest().body, + false)); + } - void validateResult(const BSONObj& resultObj) final { - auto reply = Reply::parse(IDLParserContext("CollModReply"), resultObj); - coll_mod_reply_validation::validateReply(reply); - } + void run(OperationContext* opCtx, rpc::ReplyBuilderInterface* reply) final { + const auto& cmd = request(); + const auto& nss = request().getNamespace(); + // Targeting the underlying buckets collection directly would make the time-series + // Collection out of sync with the time-series view document. Additionally, we want to + // ultimately obscure/hide the underlying buckets collection from the user, so we're + // disallowing targetting it. + uassert(ErrorCodes::InvalidNamespace, + "collMod on a time-series collection's underlying buckets collection is not " + "supported.", + !nss.isTimeseriesBucketsCollection()); + + + // Updating granularity on sharded time-series collections is not allowed. + auto catalogClient = + Grid::get(opCtx)->isInitialized() ? Grid::get(opCtx)->catalogClient() : nullptr; + if (catalogClient && cmd.getTimeseries() && cmd.getTimeseries()->getGranularity()) { + auto bucketNss = nss.isTimeseriesBucketsCollection() + ? nss + : nss.makeTimeseriesBucketsNamespace(); + try { + auto coll = catalogClient->getCollection(opCtx, bucketNss); + uassert(ErrorCodes::NotImplemented, + str::stream() + << "Cannot update granularity of a sharded time-series collection.", + !coll.getTimeseriesFields()); + } catch (const ExceptionFor&) { + // Collection is not sharded, skip check. + } + } - const AuthorizationContract* getAuthorizationContract() const final { - return &::mongo::CollMod::kAuthorizationContract; - } + if (cmd.getValidator() || cmd.getValidationLevel() || cmd.getValidationAction()) { + // Check for config.settings in the user command since a validator is allowed + // internally on this collection but the user may not modify the validator. + uassert(ErrorCodes::InvalidOptions, + str::stream() << "Document validators not allowed on system collection " + << nss.toStringForErrorMsg(), + nss != NamespaceString::kConfigSettingsNamespace); + } + + // We do not use the serialization context for reply object serialization as the reply + // object doesn't contain any nss or dbName structures. + auto result = reply->getBodyBuilder(); + uassertStatusOK(timeseries::processCollModCommandWithTimeSeriesTranslation( + opCtx, nss, cmd, true, &result)); + + // Only validate results in test mode so that we don't expose users to errors if we + // construct an invalid reply. + if (getTestCommandsEnabled()) { + validateResult(result.asTempObj()); + } + } + + void validateResult(const BSONObj& resultObj) { + auto reply = Reply::parse(IDLParserContext("CollModReply"), resultObj); + coll_mod_reply_validation::validateReply(reply); + } + }; } collectionModCommand; class CmdDbStats final : public TypedCommand { @@ -607,11 +641,11 @@ class CmdDbStats final : public TypedCommand { void doCheckAuthorization(OperationContext* opCtx) const final { auto as = AuthorizationSession::get(opCtx->getClient()); - uassert(ErrorCodes::Unauthorized, - "Unauthorized", - as->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(request().getDbName().db()), - ActionType::dbStats)); + uassert( + ErrorCodes::Unauthorized, + "Unauthorized", + as->isAuthorizedForActionsOnResource( + ResourcePattern::forDatabaseName(request().getDbName()), ActionType::dbStats)); } NamespaceString ns() const final { @@ -627,7 +661,7 @@ class CmdDbStats final : public TypedCommand { uassert(ErrorCodes::InvalidNamespace, str::stream() << "Invalid db name: " << dbname.toStringForErrorMsg(), - NamespaceString::validDBName(dbname.db(), + NamespaceString::validDBName(dbname, NamespaceString::DollarInDbNameBehavior::Allow)); { @@ -639,7 +673,8 @@ class CmdDbStats final : public TypedCommand { AutoGetDb autoDb(opCtx, dbname, MODE_IS); Database* db = autoDb.getDb(); - Reply reply; + // We need to copy the serialization context from the request to the reply object + Reply reply(SerializationContext::stateCommandReply(cmd.getSerializationContext())); reply.setDB(dbname.db()); if (!db) { diff --git a/src/mongo/db/commands/dbcommands_d.cpp b/src/mongo/db/commands/dbcommands_d.cpp index 775a08afea9a2..59bc20af84f0f 100644 --- a/src/mongo/db/commands/dbcommands_d.cpp +++ b/src/mongo/db/commands/dbcommands_d.cpp @@ -28,69 +28,62 @@ */ -#include "mongo/platform/basic.h" - -#include - -#include "mongo/base/simple_string_data_comparator.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" -#include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/bson/util/builder.h" -#include "mongo/db/audit.h" -#include "mongo/db/auth/action_set.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/auth/user_name.h" -#include "mongo/db/catalog/coll_mod.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" -#include "mongo/db/catalog/drop_collection.h" -#include "mongo/db/catalog/drop_database.h" -#include "mongo/db/catalog/index_key_validate.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/clientcursor.h" #include "mongo/db/commands.h" #include "mongo/db/commands/profile_common.h" #include "mongo/db/commands/profile_gen.h" -#include "mongo/db/commands/server_status.h" #include "mongo/db/commands/set_profiling_filter_globally_cmd.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop_failpoint_helpers.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/dbhelpers.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/index/index_access_method.h" -#include "mongo/db/index/index_descriptor.h" #include "mongo/db/introspect.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" -#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/ops/insert.h" +#include "mongo/db/operation_context.h" #include "mongo/db/profile_filter_impl.h" -#include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/get_executor.h" -#include "mongo/db/query/internal_plans.h" -#include "mongo/db/query/query_planner.h" -#include "mongo/db/read_concern.h" -#include "mongo/db/repl/optime.h" -#include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/repl_settings.h" -#include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/stats/storage_stats.h" -#include "mongo/db/write_concern.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/query_planner_params.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/logv2/log.h" -#include "mongo/s/stale_exception.h" -#include "mongo/scripting/engine.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/md5.h" #include "mongo/util/md5.hpp" -#include "mongo/util/scopeguard.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -292,7 +285,7 @@ class CmdFileMD5 : public BasicCommand { BSONObj query = BSON("files_id" << jsobj["filemd5"] << "n" << GTE << n); BSONObj sort = BSON("files_id" << 1 << "n" << 1); - return writeConflictRetry(opCtx, "filemd5", dbName.toString(), [&] { + return writeConflictRetry(opCtx, "filemd5", NamespaceString(dbName), [&] { auto findCommand = std::make_unique(nss); findCommand->setFilter(query.getOwned()); findCommand->setSort(sort.getOwned()); @@ -322,7 +315,7 @@ class CmdFileMD5 : public BasicCommand { BSONObj obj; while (PlanExecutor::ADVANCED == exec->getNext(&obj, nullptr)) { BSONElement ne = obj["n"]; - verify(ne.isNumber()); + MONGO_verify(ne.isNumber()); int myn = ne.numberInt(); if (n != myn) { if (partialOk) { diff --git a/src/mongo/db/commands/dbhash.cpp b/src/mongo/db/commands/dbhash.cpp index 312b513de886f..9a723a03a1fdd 100644 --- a/src/mongo/db/commands/dbhash.cpp +++ b/src/mongo/db/commands/dbhash.cpp @@ -28,31 +28,59 @@ */ -#include "mongo/platform/basic.h" - -#include #include +#include #include #include - +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/collection_catalog_helper.h" -#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" #include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/exec/working_set_common.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_engine.h" -#include "mongo/db/transaction/transaction_participant.h" #include "mongo/logv2/log.h" -#include "mongo/platform/mutex.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/md5.h" #include "mongo/util/md5.hpp" #include "mongo/util/net/socket_utils.h" +#include "mongo/util/str.h" #include "mongo/util/timer.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -67,7 +95,7 @@ std::shared_ptr getConsistentCatalogAndSnapshot(Operati // Loop until we get a consistent catalog and snapshot. This is only used for the lock-free // implementation of dbHash which skips acquiring database and collection locks. while (true) { - const auto catalogBeforeSnapshot = CollectionCatalog::get(opCtx); + auto catalogBeforeSnapshot = CollectionCatalog::get(opCtx); opCtx->recoveryUnit()->preallocateSnapshot(); const auto catalogAfterSnapshot = CollectionCatalog::get(opCtx); if (catalogBeforeSnapshot == catalogAfterSnapshot) { @@ -126,7 +154,7 @@ class DBHashCmd : public BasicCommand { const DatabaseName& dbName, const BSONObj& cmdObj) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbName.db()), + if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbName), ActionType::dbHash)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } @@ -244,22 +272,14 @@ class DBHashCmd : public BasicCommand { Lock::GlobalLock globalLock(opCtx, MODE_IS); // The CollectionCatalog to use for lock-free reads with point-in-time catalog lookups. - std::shared_ptr catalog; - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - // Make sure we get a CollectionCatalog in sync with our snapshot. - catalog = getConsistentCatalogAndSnapshot(opCtx); - } + std::shared_ptr catalog = getConsistentCatalogAndSnapshot(opCtx); boost::optional autoDb; if (isPointInTimeRead) { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - // We only need to lock the database in intent mode and then collection in intent - // mode as well to ensure that none of the collections get dropped. This is no - // longer necessary with point-in-time catalog lookups. - autoDb.emplace(opCtx, dbName, MODE_IS); - } + // We only need to lock the database in intent mode and then collection in intent + // mode as well to ensure that none of the collections get dropped. + // TODO:SERVER-75848 Make this lock-free + autoDb.emplace(opCtx, dbName, MODE_IS); } else { // We lock the entire database in S-mode in order to ensure that the contents will not // change for the snapshot when not reading at a timestamp. @@ -279,7 +299,8 @@ class DBHashCmd : public BasicCommand { auto collNss = collection->ns(); uassert(ErrorCodes::BadValue, - str::stream() << "weird fullCollectionName [" << collNss.toString() << "]", + str::stream() << "weird fullCollectionName [" << collNss.toStringForErrorMsg() + << "]", collNss.size() - 1 > dbName.db().size()); if (repl::ReplicationCoordinator::isOplogDisabledForNS(collNss)) { @@ -318,54 +339,51 @@ class DBHashCmd : public BasicCommand { return true; }; - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - for (auto it = catalog->begin(opCtx, dbName); it != catalog->end(opCtx); ++it) { - UUID uuid = it.uuid(); - - // The namespace must be found as the UUID is fetched from the same - // CollectionCatalog instance. - boost::optional nss = catalog->lookupNSSByUUID(opCtx, uuid); - invariant(nss); - - const Collection* coll = nullptr; - if (nss->isGlobalIndex()) { - // TODO SERVER-74209: Reading earlier than the minimum valid snapshot is not - // supported for global indexes. It appears that the primary and secondaries - // apply operations differently resulting in hash mismatches. This requires - // further investigation. In the meantime, global indexes use the behaviour - // prior to point-in-time lookups. - coll = *it; - - if (auto readTimestamp = - opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx)) { - auto minSnapshot = coll->getMinimumValidSnapshot(); - uassert(ErrorCodes::SnapshotUnavailable, - str::stream() - << "Unable to read from a snapshot due to pending collection" - " catalog changes; please retry the operation. Snapshot" - " timestamp is " - << readTimestamp->toString() - << ". Collection minimum timestamp is " - << minSnapshot->toString(), - !minSnapshot || *readTimestamp >= *minSnapshot); - } - } else { - coll = catalog->establishConsistentCollection( - opCtx, - {dbName, uuid}, - opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx)); - - if (!coll) { - // The collection did not exist at the read timestamp with the given UUID. - continue; - } + for (auto&& coll : catalog->range(dbName)) { + UUID uuid = coll->uuid(); + + // The namespace must be found as the UUID is fetched from the same + // CollectionCatalog instance. + boost::optional nss = catalog->lookupNSSByUUID(opCtx, uuid); + invariant(nss); + + // TODO:SERVER-75848 Make this lock-free + Lock::CollectionLock clk(opCtx, *nss, MODE_IS); + + const Collection* collection = nullptr; + if (nss->isGlobalIndex()) { + // TODO SERVER-74209: Reading earlier than the minimum valid snapshot is not + // supported for global indexes. It appears that the primary and secondaries apply + // operations differently resulting in hash mismatches. This requires further + // investigation. In the meantime, global indexes use the behaviour prior to + // point-in-time lookups. + collection = coll; + + if (auto readTimestamp = + opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx)) { + auto minSnapshot = coll->getMinimumValidSnapshot(); + uassert(ErrorCodes::SnapshotUnavailable, + str::stream() + << "Unable to read from a snapshot due to pending collection" + " catalog changes; please retry the operation. Snapshot" + " timestamp is " + << readTimestamp->toString() << ". Collection minimum timestamp is " + << minSnapshot->toString(), + !minSnapshot || *readTimestamp >= *minSnapshot); + } + } else { + collection = catalog->establishConsistentCollection( + opCtx, + {dbName, uuid}, + opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx)); + + if (!collection) { + // The collection did not exist at the read timestamp with the given UUID. + continue; } - - (void)checkAndHashCollection(coll); } - } else { - catalog::forEachCollectionFromDb(opCtx, dbName, MODE_IS, checkAndHashCollection); + + (void)checkAndHashCollection(collection); } BSONObjBuilder bb(result.subobjStart("collections")); @@ -414,18 +432,6 @@ class DBHashCmd : public BasicCommand { // reading from the consistent snapshot doesn't overlap with any catalog operations on // the collection. invariant(opCtx->lockState()->isCollectionLockedForMode(collection->ns(), MODE_IS)); - - auto minSnapshot = collection->getMinimumVisibleSnapshot(); - auto mySnapshot = opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx); - invariant(mySnapshot); - - uassert(ErrorCodes::SnapshotUnavailable, - str::stream() << "Unable to read from a snapshot due to pending collection" - " catalog changes; please retry the operation. Snapshot" - " timestamp is " - << mySnapshot->toString() << ". Collection minimum timestamp is " - << minSnapshot->toString(), - !minSnapshot || *mySnapshot >= *minSnapshot); } else { invariant(opCtx->lockState()->isDbLockedForMode(collection->ns().dbName(), MODE_S)); } @@ -456,7 +462,7 @@ class DBHashCmd : public BasicCommand { try { BSONObj c; - verify(nullptr != exec.get()); + MONGO_verify(nullptr != exec.get()); while (exec->getNext(&c, nullptr) == PlanExecutor::ADVANCED) { md5_append(&st, (const md5_byte_t*)c.objdata(), c.objsize()); } diff --git a/src/mongo/db/commands/distinct.cpp b/src/mongo/db/commands/distinct.cpp index fc0c1b0b98f18..ccf6e4e07ac66 100644 --- a/src/mongo/db/commands/distinct.cpp +++ b/src/mongo/db/commands/distinct.cpp @@ -28,40 +28,88 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include #include +#include +#include #include +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonelement_comparator_interface.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/auth/authorization_checks.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/client.h" -#include "mongo/db/clientcursor.h" #include "mongo/db/commands.h" #include "mongo/db/commands/run_aggregate.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/logical_time.h" #include "mongo/db/matcher/extensions_callback_real.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collection_query_info.h" -#include "mongo/db/query/cursor_response.h" #include "mongo/db/query/explain.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/find_common.h" #include "mongo/db/query/get_executor.h" #include "mongo/db/query/parsed_distinct.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_explainer.h" #include "mongo/db/query/plan_summary_stats.h" -#include "mongo/db/query/query_planner_common.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/view_response_formatter.h" -#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/query_analysis_writer.h" -#include "mongo/db/views/resolved_view.h" +#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/stats/top.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/query_analysis_sampler_util.h" +#include "mongo/util/assert_util.h" #include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" +#include "mongo/util/serialization_context.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -154,22 +202,33 @@ class DistinctCommand : public BasicCommand { const BSONObj& cmdObj = request.body; // Acquire locks. The RAII object is optional, because in the case of a view, the locks // need to be released. - boost::optional ctx; - ctx.emplace( + const auto nss = CommandHelpers::parseNsCollectionRequired(dbName, cmdObj); + + AutoStatsTracker tracker( opCtx, - CommandHelpers::parseNsCollectionRequired(dbName, cmdObj), - AutoGetCollection::Options{}.viewMode(auto_get_collection::ViewMode::kViewsPermitted)); - const auto nss = ctx->getNss(); + nss, + Top::LockType::ReadLocked, + AutoStatsTracker::LogMode::kUpdateTopAndCurOp, + CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(nss.dbName())); + + const auto acquisitionRequest = CollectionOrViewAcquisitionRequest::fromOpCtx( + opCtx, nss, AcquisitionPrerequisites::kRead); + boost::optional collectionOrView = + acquireCollectionOrViewMaybeLockFree(opCtx, acquisitionRequest); const ExtensionsCallbackReal extensionsCallback(opCtx, &nss); - auto defaultCollator = - ctx->getCollection() ? ctx->getCollection()->getDefaultCollator() : nullptr; + const CollatorInterface* defaultCollator = collectionOrView->getCollectionPtr() + ? collectionOrView->getCollectionPtr()->getDefaultCollator() + : nullptr; auto parsedDistinct = uassertStatusOK( ParsedDistinct::parse(opCtx, nss, cmdObj, extensionsCallback, true, defaultCollator)); - if (ctx->getView()) { + SerializationContext sc(SerializationContext::stateCommandRequest()); + sc.setTenantIdSource(request.getValidatedTenantId() != boost::none); + + if (collectionOrView->isView()) { // Relinquish locks. The aggregation command will re-acquire them. - ctx.reset(); + collectionOrView.reset(); auto viewAggregation = parsedDistinct.asAggregationCommand(); if (!viewAggregation.isOK()) { @@ -185,22 +244,28 @@ class DistinctCommand : public BasicCommand { nss, viewAggCmd, verbosity, - APIParameters::get(opCtx).getAPIStrict().value_or(false)); + APIParameters::get(opCtx).getAPIStrict().value_or(false), + sc); - // An empty PrivilegeVector is acceptable because these privileges are only checked on - // getMore and explain will not open a cursor. + // An empty PrivilegeVector is acceptable because these privileges are only checked + // on getMore and explain will not open a cursor. return runAggregate( opCtx, nss, viewAggRequest, viewAggregation.getValue(), PrivilegeVector(), result); } - const auto& collection = ctx->getCollection(); + const auto& collection = collectionOrView->getCollectionPtr(); - auto executor = uassertStatusOK( - getExecutorDistinct(&collection, QueryPlannerParams::DEFAULT, &parsedDistinct)); + auto executor = uassertStatusOK(getExecutorDistinct( + collectionOrView->getCollection(), QueryPlannerParams::DEFAULT, &parsedDistinct)); auto bodyBuilder = result->getBodyBuilder(); - Explain::explainStages( - executor.get(), collection, verbosity, BSONObj(), cmdObj, &bodyBuilder); + Explain::explainStages(executor.get(), + collection, + verbosity, + BSONObj(), + SerializationContext::stateCommandReply(sc), + cmdObj, + &bodyBuilder); return Status::OK(); } @@ -211,14 +276,37 @@ class DistinctCommand : public BasicCommand { CommandHelpers::handleMarkKillOnClientDisconnect(opCtx); // Acquire locks and resolve possible UUID. The RAII object is optional, because in the case // of a view, the locks need to be released. - boost::optional ctx; - ctx.emplace( - opCtx, - CommandHelpers::parseNsOrUUID(dbName, cmdObj), - AutoGetCollection::Options{}.viewMode(auto_get_collection::ViewMode::kViewsPermitted)); - const auto& nss = ctx->getNss(); - if (!ctx->getView()) { + // TODO: Make nicer. We need to instantiate the AutoStatsTracker before the acquisition in + // case it would throw so we can ensure data is written to the profile collection that some + // test may rely on. However, we might not know the namespace at this point so it is wrapped + // in a boost::optional. If the request is with a UUID we instantiate it after, but this is + // fine as the request should not be for sharded collections. + boost::optional tracker; + auto const initializeTracker = [&](const NamespaceString& nss) { + tracker.emplace(opCtx, + nss, + Top::LockType::ReadLocked, + AutoStatsTracker::LogMode::kUpdateTopAndCurOp, + CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(nss.dbName())); + }; + auto const nssOrUUID = CommandHelpers::parseNsOrUUID(dbName, cmdObj); + if (nssOrUUID.isNamespaceString()) { + initializeTracker(nssOrUUID.nss()); + } + const auto acquisitionRequest = CollectionOrViewAcquisitionRequest::fromOpCtx( + opCtx, nssOrUUID, AcquisitionPrerequisites::kRead); + + boost::optional collectionOrView = + acquireCollectionOrViewMaybeLockFree(opCtx, acquisitionRequest); + const auto nss = collectionOrView->nss(); + + if (!tracker) { + initializeTracker(nss); + } + + if (collectionOrView->isCollection()) { + const auto& coll = collectionOrView->getCollection(); // Distinct doesn't filter orphan documents so it is not allowed to run on sharded // collections in multi-document transactions. uassert( @@ -226,7 +314,7 @@ class DistinctCommand : public BasicCommand { "Cannot run 'distinct' on a sharded collection in a multi-document transaction. " "Please see http://dochub.mongodb.org/core/transaction-distinct for a recommended " "alternative.", - !opCtx->inMultiDocumentTransaction() || !ctx->getCollection().isSharded()); + !opCtx->inMultiDocumentTransaction() || !coll.getShardingDescription().isSharded()); // Similarly, we ban readConcern level snapshot for sharded collections. uassert( @@ -234,12 +322,13 @@ class DistinctCommand : public BasicCommand { "Cannot run 'distinct' on a sharded collection with readConcern level 'snapshot'", repl::ReadConcernArgs::get(opCtx).getLevel() != repl::ReadConcernLevel::kSnapshotReadConcern || - !ctx->getCollection().isSharded()); + !coll.getShardingDescription().isSharded()); } const ExtensionsCallbackReal extensionsCallback(opCtx, &nss); - auto defaultCollation = - ctx->getCollection() ? ctx->getCollection()->getDefaultCollator() : nullptr; + const CollatorInterface* defaultCollation = collectionOrView->getCollectionPtr() + ? collectionOrView->getCollectionPtr()->getDefaultCollator() + : nullptr; auto parsedDistinct = uassertStatusOK( ParsedDistinct::parse(opCtx, nss, cmdObj, extensionsCallback, false, defaultCollation)); @@ -258,9 +347,9 @@ class DistinctCommand : public BasicCommand { .getAsync([](auto) {}); } - if (ctx->getView()) { + if (collectionOrView->isView()) { // Relinquish locks. The aggregation command will re-acquire them. - ctx.reset(); + collectionOrView.reset(); auto viewAggregation = parsedDistinct.asAggregationCommand(); uassertStatusOK(viewAggregation.getStatus()); @@ -284,10 +373,8 @@ class DistinctCommand : public BasicCommand { uassertStatusOK(replCoord->checkCanServeReadsFor( opCtx, nss, ReadPreferenceSetting::get(opCtx).canRunOnSecondary())); - const auto& collection = ctx->getCollection(); - - auto executor = - getExecutorDistinct(&collection, QueryPlannerParams::DEFAULT, &parsedDistinct); + auto executor = getExecutorDistinct( + collectionOrView->getCollection(), QueryPlannerParams::DEFAULT, &parsedDistinct); uassertStatusOK(executor.getStatus()); { @@ -338,8 +425,6 @@ class DistinctCommand : public BasicCommand { auto&& [stats, _] = explainer.getWinningPlanStats(ExplainOptions::Verbosity::kExecStats); LOGV2_WARNING(23797, - "Plan executor error during distinct command: {error}, " - "stats: {stats}, cmd: {cmd}", "Plan executor error during distinct command", "error"_attr = exception.toStatus(), "stats"_attr = redact(stats), @@ -350,6 +435,7 @@ class DistinctCommand : public BasicCommand { } auto curOp = CurOp::get(opCtx); + const auto& collection = collectionOrView->getCollectionPtr(); // Get summary information about the plan. PlanSummaryStats stats; @@ -388,6 +474,7 @@ class DistinctCommand : public BasicCommand { keyBob.append("distinct", 1); keyBob.append("key", 1); keyBob.append("query", 1); + keyBob.append("hint", 1); keyBob.append("collation", 1); keyBob.append("shardVersion", 1); return keyBob.obj(); diff --git a/src/mongo/db/commands/drop_connections_command.cpp b/src/mongo/db/commands/drop_connections_command.cpp index 7d6dccfeefda0..4c4f04a9ef7b4 100644 --- a/src/mongo/db/commands/drop_connections_command.cpp +++ b/src/mongo/db/commands/drop_connections_command.cpp @@ -27,13 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/drop_connections_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/executor/egress_tag_closer_manager.h" -#include "mongo/util/net/hostandport.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -82,8 +92,9 @@ class DropConnectionsCmd final : public TypedCommand { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::dropConnections)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::dropConnections)); } }; diff --git a/src/mongo/db/commands/drop_indexes_cmd.cpp b/src/mongo/db/commands/drop_indexes_cmd.cpp index 9b368532f52a2..2055adeddf5d0 100644 --- a/src/mongo/db/commands/drop_indexes_cmd.cpp +++ b/src/mongo/db/commands/drop_indexes_cmd.cpp @@ -27,34 +27,58 @@ * it in the license file. */ +#include +#include +#include #include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/drop_indexes.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/index_key_validate.h" #include "mongo/db/catalog/multi_index_block.h" -#include "mongo/db/client.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/curop.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/db_raii.h" #include "mongo/db/drop_indexes_gen.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/service_context.h" #include "mongo/db/shard_role.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/timeseries/catalog_helper.h" #include "mongo/db/timeseries/timeseries_commands_conversion_helper.h" -#include "mongo/db/vector_clock.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/op_msg.h" #include "mongo/util/assert_util.h" #include "mongo/util/exit_code.h" +#include "mongo/util/fail_point.h" #include "mongo/util/quick_exit.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -89,7 +113,7 @@ class CmdDropIndexes : public DropIndexesCmdVersion1Gen { void doCheckAuthorization(OperationContext* opCtx) const final { uassert(ErrorCodes::Unauthorized, str::stream() << "Not authorized to drop index(es) on collection" - << request().getNamespace(), + << request().getNamespace().toStringForErrorMsg(), AuthorizationSession::get(opCtx->getClient()) ->isAuthorizedForActionsOnNamespace(request().getNamespace(), ActionType::dropIndex)); @@ -165,7 +189,7 @@ class CmdReIndex : public BasicCommand { ErrorCodes::IllegalOperation, str::stream() << "reIndex is only allowed on a standalone mongod instance. Cannot reIndex '" - << toReIndexNss << "' while replication is active"); + << toReIndexNss.toStringForErrorMsg() << "' while replication is active"); } auto acquisition = [&] { @@ -176,8 +200,8 @@ class CmdReIndex : public BasicCommand { MODE_X); uassert(ErrorCodes::CommandNotSupportedOnView, "can't re-index a view", - !std::holds_alternative(collOrViewAcquisition)); - return std::move(std::get(collOrViewAcquisition)); + !collOrViewAcquisition.isView()); + return CollectionAcquisition(std::move(collOrViewAcquisition)); }(); uassert(ErrorCodes::NamespaceNotFound, "collection does not exist", acquisition.exists()); @@ -192,7 +216,7 @@ class CmdReIndex : public BasicCommand { std::vector all; { std::vector indexNames; - writeConflictRetry(opCtx, "listIndexes", toReIndexNss.ns(), [&] { + writeConflictRetry(opCtx, "listIndexes", toReIndexNss, [&] { indexNames.clear(); acquisition.getCollectionPtr()->getAllIndexes(&indexNames); }); @@ -201,7 +225,7 @@ class CmdReIndex : public BasicCommand { for (size_t i = 0; i < indexNames.size(); i++) { const std::string& name = indexNames[i]; - BSONObj spec = writeConflictRetry(opCtx, "getIndexSpec", toReIndexNss.ns(), [&] { + BSONObj spec = writeConflictRetry(opCtx, "getIndexSpec", toReIndexNss, [&] { return acquisition.getCollectionPtr()->getIndexSpec(name); }); @@ -241,9 +265,9 @@ class CmdReIndex : public BasicCommand { indexer->setIndexBuildMethod(IndexBuildMethod::kForeground); StatusWith> swIndexesToRebuild(ErrorCodes::UnknownError, "Uninitialized"); - writeConflictRetry(opCtx, "dropAllIndexes", toReIndexNss.ns(), [&] { - WriteUnitOfWork wunit(opCtx); + writeConflictRetry(opCtx, "dropAllIndexes", toReIndexNss, [&] { CollectionWriter collection(opCtx, &acquisition); + WriteUnitOfWork wunit(opCtx); collection.getWritableCollection(opCtx)->getIndexCatalog()->dropAllIndexes( opCtx, collection.getWritableCollection(opCtx), true, {}); @@ -274,9 +298,9 @@ class CmdReIndex : public BasicCommand { uassertStatusOK(indexer->checkConstraints(opCtx, acquisition.getCollectionPtr())); - writeConflictRetry(opCtx, "commitReIndex", toReIndexNss.ns(), [&] { - WriteUnitOfWork wunit(opCtx); + writeConflictRetry(opCtx, "commitReIndex", toReIndexNss, [&] { CollectionWriter collection(opCtx, &acquisition); + WriteUnitOfWork wunit(opCtx); uassertStatusOK(indexer->commit(opCtx, collection.getWritableCollection(opCtx), MultiIndexBlock::kNoopOnCreateEachFn, diff --git a/src/mongo/db/commands/end_sessions_command.cpp b/src/mongo/db/commands/end_sessions_command.cpp index 040a2dc36f132..77b798dee843a 100644 --- a/src/mongo/db/commands/end_sessions_command.cpp +++ b/src/mongo/db/commands/end_sessions_command.cpp @@ -27,16 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/base/init.h" -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/db/commands.h" #include "mongo/db/commands/sessions_commands_gen.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/rpc/op_msg.h" namespace mongo { namespace { @@ -60,6 +63,12 @@ class EndSessionsCommand final : public EndSessionsCmdVersion1Gen +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/client.h" #include "mongo/db/command_can_run_here.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/explain_gen.h" -#include "mongo/db/query/explain.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/util/assert_util.h" #include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/str.h" namespace mongo { @@ -111,14 +134,11 @@ class CmdExplain::Invocation final : public CommandInvocation { void run(OperationContext* opCtx, rpc::ReplyBuilderInterface* result) override { // Explain is never allowed in multi-document transactions. const bool inMultiDocumentTransaction = false; - // TODO SERVER-68655 pass _dbName directly to commandCanRunHere uassert(50746, "Explain's child command cannot run on this node. " "Are you explaining a write command on a secondary?", - commandCanRunHere(opCtx, - _dbName.toStringWithTenantId(), - _innerInvocation->definition(), - inMultiDocumentTransaction)); + commandCanRunHere( + opCtx, _dbName, _innerInvocation->definition(), inMultiDocumentTransaction)); _innerInvocation->explain(opCtx, _verbosity, result); } @@ -185,7 +205,7 @@ std::unique_ptr CmdExplain::parse(OperationContext* opCtx, str::stream() << "Mismatched $db in explain command. Expected " << dbName.toStringForErrorMsg() << " but got " << innerDbName.toStringForErrorMsg(), - innerDb.checkAndGetStringData() == dbName.toString()); + innerDbName == dbName); } auto explainedCommand = CommandHelpers::findCommand(explainedObj.firstElementFieldName()); uassert(ErrorCodes::CommandNotFound, diff --git a/src/mongo/db/commands/external_data_source_commands_test.cpp b/src/mongo/db/commands/external_data_source_commands_test.cpp index 8c66244699579..f5186da194251 100644 --- a/src/mongo/db/commands/external_data_source_commands_test.cpp +++ b/src/mongo/db/commands/external_data_source_commands_test.cpp @@ -27,19 +27,47 @@ * it in the license file. */ +#include +#include #include - +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/client/dbclient_cursor.h" #include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/storage/named_pipe.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/mutex.h" #include "mongo/platform/random.h" -#include "mongo/util/assert_util.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/scopeguard.h" namespace mongo { @@ -137,7 +165,7 @@ class ExternalDataSourceCommandsTest : public ServiceContextMongoDTest { }; const DatabaseName ExternalDataSourceCommandsTest::kDatabaseName = - DatabaseName(boost::none, "external_data_source"); + DatabaseName::createDatabaseName_forTest(boost::none, "external_data_source"); TEST_F(ExternalDataSourceCommandsTest, SimpleScanAggRequest) { const auto nDocs = _random.nextInt32(100) + 1; diff --git a/src/mongo/db/commands/fail_point_cmd.cpp b/src/mongo/db/commands/fail_point_cmd.cpp index 04eda7ad215f4..daec8549e651c 100644 --- a/src/mongo/db/commands/fail_point_cmd.cpp +++ b/src/mongo/db/commands/fail_point_cmd.cpp @@ -27,15 +27,24 @@ * it in the license file. */ -#include - -#include "mongo/base/init.h" -#include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/privilege.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/wait_for_fail_point_gen.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" namespace mongo { diff --git a/src/mongo/db/commands/feature_compatibility_version.cpp b/src/mongo/db/commands/feature_compatibility_version.cpp index f836749f782be..c0b624083725b 100644 --- a/src/mongo/db/commands/feature_compatibility_version.cpp +++ b/src/mongo/db/commands/feature_compatibility_version.cpp @@ -28,34 +28,65 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/commands/feature_compatibility_version.h" - +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/commands/feature_compatibility_version_gen.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/feature_compatibility_version_document_gen.h" #include "mongo/db/feature_compatibility_version_documentation.h" #include "mongo/db/feature_compatibility_version_parser.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/optime.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/storage_interface.h" -#include "mongo/db/s/collection_sharding_state.h" -#include "mongo/db/s/sharding_state.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/wire_version.h" #include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_tag.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/transport/service_entry_point.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/mutex.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -75,7 +106,7 @@ namespace { * Utility class for recording permitted transitions between feature compatibility versions and * their on-disk representation as FeatureCompatibilityVersionDocument objects. */ -// TODO SERVER-65269: Add back 'const' qualifier to FCVTransitions class declaration +// TODO (SERVER-74847): Add back 'const' qualifier to FCVTransitions class declaration class FCVTransitions { public: FCVTransitions() { @@ -132,6 +163,11 @@ class FCVTransitions { _transitions[{GenericFCV::kDowngradingFromLatestToLastLTS, GenericFCV::kLastLTS, isFromConfigServer}] = GenericFCV::kLastLTS; + + // Add transition from downgrading -> upgrading. + _transitions[{GenericFCV::kDowngradingFromLatestToLastLTS, + GenericFCV::kLatest, + isFromConfigServer}] = GenericFCV::kUpgradingFromLastLTSToLatest; } _fcvDocuments[GenericFCV::kDowngradingFromLatestToLastLTS] = makeFCVDoc(GenericFCV::kLastLTS /* effective */, @@ -140,22 +176,6 @@ class FCVTransitions { ); } - /** - * If feature flag gDowngradingToUpgrading is enabled, - * we add the new downgrading->upgrading->latest path. - */ - void featureFlaggedAddNewTransitionState() { - // (Ignore FCV check): This is intentional because we want to use this feature even if we - // are in downgrading fcv state. - if (repl::feature_flags::gDowngradingToUpgrading.isEnabledAndIgnoreFCVUnsafe()) { - for (auto&& isFromConfigServer : {false, true}) { - _transitions[{GenericFCV::kDowngradingFromLatestToLastLTS, - GenericFCV::kLatest, - isFromConfigServer}] = GenericFCV::kUpgradingFromLastLTSToLatest; - } - } - } - // TODO (SERVER-74847): Remove this transition once we remove testing around // downgrading from latest to last continuous. void addTransitionFromLatestToLastContinuous() { @@ -306,18 +326,13 @@ void FeatureCompatibilityVersion::validateSetFeatureCompatibilityVersionRequest( auto fcvDoc = FeatureCompatibilityVersionDocument::parse( IDLParserContext("featureCompatibilityVersionDocument"), fcvObj.value()); - // (Ignore FCV check): This is intentional because we want to use this feature even if we are in - // downgrading fcv state. - if (repl::feature_flags::gDowngradingToUpgrading.isEnabledAndIgnoreFCVUnsafe()) { - auto isCleaningServerMetadata = fcvDoc.getIsCleaningServerMetadata(); - uassert( - 7428200, + auto isCleaningServerMetadata = fcvDoc.getIsCleaningServerMetadata(); + uassert(7428200, "Cannot upgrade featureCompatibilityVersion if a previous FCV downgrade stopped in the " "middle of cleaning up internal server metadata. Retry the FCV downgrade until it " "succeeds before attempting to upgrade the FCV.", !(newVersion > fromVersion && (isCleaningServerMetadata.is_initialized() && *isCleaningServerMetadata))); - } auto setFCVPhase = setFCVRequest.getPhase(); if (!isFromConfigServer || !setFCVPhase) { @@ -373,13 +388,10 @@ void FeatureCompatibilityVersion::updateFeatureCompatibilityVersionDocument( // Only transition to fully upgraded or downgraded states when we have completed all required // upgrade/downgrade behavior, unless it is the newly added downgrading to upgrading path. - // (Ignore FCV check): This is intentional because we want to use this feature even if we are in - // downgrading fcv state. auto transitioningVersion = setTargetVersion && serverGlobalParams.featureCompatibility.isUpgradingOrDowngrading(fromVersion) && - !(repl::feature_flags::gDowngradingToUpgrading.isEnabledAndIgnoreFCVUnsafe() && - (fromVersion == GenericFCV::kDowngradingFromLatestToLastLTS && - newVersion == GenericFCV::kLatest)) + !(fromVersion == GenericFCV::kDowngradingFromLatestToLastLTS && + newVersion == GenericFCV::kLatest) ? fromVersion : fcvTransitions.getTransitionalVersion(fromVersion, newVersion, isFromConfigServer); @@ -389,57 +401,60 @@ void FeatureCompatibilityVersion::updateFeatureCompatibilityVersionDocument( newFCVDoc.setChangeTimestamp(changeTimestamp); - // (Ignore FCV check): This is intentional because we want to use this feature even if we are in - // downgrading fcv state. - if (repl::feature_flags::gDowngradingToUpgrading.isEnabledAndIgnoreFCVUnsafe()) { - // The setIsCleaningServerMetadata parameter can either be true, false, or boost::none. - // True indicates we want to set the isCleaningServerMetadata FCV document field to true. - // False indicates we want to remove the isCleaningServerMetadata FCV document field. - // boost::none indicates that we don't want to change the current value of the - // isCleaningServerMetadata field. - if (setIsCleaningServerMetadata.is_initialized()) { - // True case: set isCleaningServerMetadata doc field to true. - if (*setIsCleaningServerMetadata) { - newFCVDoc.setIsCleaningServerMetadata(true); - } - // Else, false case: don't set the isCleaningServerMetadata field in newFCVDoc. This is - // because runUpdateCommand overrides the current whole FCV document with what is in - // newFCVDoc so not setting the field is effectively removing it. - } else { - // boost::none case: - // If we don't want to update the isCleaningServerMetadata, we need to make sure not to - // override the existing field if it exists, so get the current isCleaningServerMetadata - // field value from the current FCV document and set it in newFCVDoc. - // This is to protect against the case where a previous FCV downgrade failed - // in the isCleaningServerMetadata phase, and the user runs setFCV again. In that - // case we do not want to remove the existing isCleaningServerMetadata FCV doc field - // because it would not be safe to upgrade the FCV. - auto currentFCVObj = findFeatureCompatibilityVersionDocument(opCtx); - auto currentFCVDoc = FeatureCompatibilityVersionDocument::parse( - IDLParserContext("featureCompatibilityVersionDocument"), currentFCVObj.value()); - - auto currentIsCleaningServerMetadata = currentFCVDoc.getIsCleaningServerMetadata(); - if (currentIsCleaningServerMetadata.is_initialized() && - *currentIsCleaningServerMetadata) { - newFCVDoc.setIsCleaningServerMetadata(*currentIsCleaningServerMetadata); - } + // The setIsCleaningServerMetadata parameter can either be true, false, or boost::none. + // True indicates we want to set the isCleaningServerMetadata FCV document field to true. + // False indicates we want to remove the isCleaningServerMetadata FCV document field. + // boost::none indicates that we don't want to change the current value of the + // isCleaningServerMetadata field. + if (setIsCleaningServerMetadata.is_initialized()) { + // True case: set isCleaningServerMetadata doc field to true. + if (*setIsCleaningServerMetadata) { + newFCVDoc.setIsCleaningServerMetadata(true); + } + // Else, false case: don't set the isCleaningServerMetadata field in newFCVDoc. This is + // because runUpdateCommand overrides the current whole FCV document with what is in + // newFCVDoc so not setting the field is effectively removing it. + } else { + // boost::none case: + // If we don't want to update the isCleaningServerMetadata, we need to make sure not to + // override the existing field if it exists, so get the current isCleaningServerMetadata + // field value from the current FCV document and set it in newFCVDoc. + // This is to protect against the case where a previous FCV downgrade failed + // in the isCleaningServerMetadata phase, and the user runs setFCV again. In that + // case we do not want to remove the existing isCleaningServerMetadata FCV doc field + // because it would not be safe to upgrade the FCV. + auto currentFCVObj = findFeatureCompatibilityVersionDocument(opCtx); + auto currentFCVDoc = FeatureCompatibilityVersionDocument::parse( + IDLParserContext("featureCompatibilityVersionDocument"), currentFCVObj.value()); + + auto currentIsCleaningServerMetadata = currentFCVDoc.getIsCleaningServerMetadata(); + if (currentIsCleaningServerMetadata.is_initialized() && *currentIsCleaningServerMetadata) { + newFCVDoc.setIsCleaningServerMetadata(*currentIsCleaningServerMetadata); } } + // Replace the current FCV document with newFCVDoc. runUpdateCommand(opCtx, newFCVDoc); } void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* opCtx, repl::StorageInterface* storageInterface) { - if (!hasNoReplicatedCollections(opCtx)) + if (!hasNoReplicatedCollections(opCtx)) { + if (!gDefaultStartupFCV.empty()) { + LOGV2(7557701, + "Ignoring the provided defaultStartupFCV parameter since the FCV already exists"); + } return; + } + // If the server was not started with --shardsvr, the default featureCompatibilityVersion on // clean startup is the upgrade version. If it was started with --shardsvr, the default // featureCompatibilityVersion is the downgrade version, so that it can be safely added to a // downgrade version cluster. The config server will run setFeatureCompatibilityVersion as // part of addShard. - const bool storeUpgradeVersion = !serverGlobalParams.clusterRole.exclusivelyHasShardRole(); + const bool storeUpgradeVersion = + !serverGlobalParams.clusterRole.hasExclusively(ClusterRole::ShardServer); UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); NamespaceString nss(NamespaceString::kServerConfigurationNamespace); @@ -450,11 +465,34 @@ void FeatureCompatibilityVersion::setIfCleanStartup(OperationContext* opCtx, uassertStatusOK(storageInterface->createCollection(opCtx, nss, options)); } + // Set FCV to lastLTS for nodes started with --shardsvr. If an FCV was specified at startup + // through a startup parameter, set it to that FCV. Otherwise, set it to latest. FeatureCompatibilityVersionDocument fcvDoc; - if (storeUpgradeVersion) { - fcvDoc.setVersion(GenericFCV::kLatest); - } else { + if (!storeUpgradeVersion) { fcvDoc.setVersion(GenericFCV::kLastLTS); + } else if (!gDefaultStartupFCV.empty()) { + StringData versionString = StringData(gDefaultStartupFCV); + FCV parsedVersion; + + if (versionString == multiversion::toString(GenericFCV::kLastLTS)) { + parsedVersion = GenericFCV::kLastLTS; + } else if (versionString == multiversion::toString(GenericFCV::kLastContinuous)) { + parsedVersion = GenericFCV::kLastContinuous; + } else if (versionString == multiversion::toString(GenericFCV::kLatest)) { + parsedVersion = GenericFCV::kLatest; + } else { + parsedVersion = GenericFCV::kLatest; + LOGV2_WARNING_OPTIONS(7557700, + {logv2::LogTag::kStartupWarnings}, + "The provided 'defaultStartupFCV' is not a valid FCV. Setting " + "the FCV to the latest FCV instead", + "defaultStartupFCV"_attr = versionString, + "latestFCV"_attr = multiversion::toString(GenericFCV::kLatest)); + } + + fcvDoc.setVersion(parsedVersion); + } else { + fcvDoc.setVersion(GenericFCV::kLatest); } // We then insert the featureCompatibilityVersion document into the server configuration @@ -485,6 +523,13 @@ bool FeatureCompatibilityVersion::hasNoReplicatedCollections(OperationContext* o void FeatureCompatibilityVersion::updateMinWireVersion() { WireSpec& wireSpec = WireSpec::instance(); const auto currentFcv = serverGlobalParams.featureCompatibility.getVersion(); + // The reason we set the minWireVersion to LATEST_WIRE_VERSION when downgrading from latest as + // well as on upgrading to latest is because we shouldn’t decrease the minWireVersion until we + // have fully downgraded to the lower FCV in case we get any backwards compatibility breakages, + // since during `kDowngradingFrom_X_to_Y` we may still be stopping/cleaning up any features from + // the upgraded FCV. In essence, a node with the upgraded FCV/binary should not be able to + // communicate with downgraded binary nodes until the FCV is completely downgraded to + // `kVersion_Y`. if (currentFcv == GenericFCV::kLatest || (serverGlobalParams.featureCompatibility.isUpgradingOrDowngrading() && currentFcv != GenericFCV::kUpgradingFromLastLTSToLastContinuous)) { @@ -573,10 +618,6 @@ void FeatureCompatibilityVersion::fassertInitializedAfterStartup(OperationContex auto fcvDocument = findFeatureCompatibilityVersionDocument(opCtx); - // TODO SERVER-65269: Move downgrading->upgrading transition back to FCVTransitions - // constructor. Adding the new fcv downgrading -> upgrading path - fcvTransitions.featureFlaggedAddNewTransitionState(); - auto const storageEngine = opCtx->getServiceContext()->getStorageEngine(); auto dbNames = storageEngine->listDatabases(); bool nonLocalDatabases = std::any_of( diff --git a/src/mongo/db/commands/feature_compatibility_version.h b/src/mongo/db/commands/feature_compatibility_version.h index e524ac4bbe1a2..ff9ea72ec22cc 100644 --- a/src/mongo/db/commands/feature_compatibility_version.h +++ b/src/mongo/db/commands/feature_compatibility_version.h @@ -29,13 +29,19 @@ #pragma once +#include + #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/commands/set_feature_compatibility_version_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/feature_compatibility_version_document_gen.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/server_options.h" +#include "mongo/util/version/releases.h" namespace mongo { diff --git a/src/mongo/db/commands/feature_compatibility_version.idl b/src/mongo/db/commands/feature_compatibility_version.idl index 1161730d96506..f60cc55eda689 100644 --- a/src/mongo/db/commands/feature_compatibility_version.idl +++ b/src/mongo/db/commands/feature_compatibility_version.idl @@ -44,3 +44,8 @@ server_parameters: cpp_vartype: bool cpp_varname: gInternalValidateFeaturesAsPrimary default: true + defaultStartupFCV: + description: 'Startup parameter to set a default FCV at startup' + set_at: startup + cpp_vartype: std::string + cpp_varname: gDefaultStartupFCV diff --git a/src/mongo/db/commands/find_and_modify.cpp b/src/mongo/db/commands/find_and_modify.cpp index 5c10fb706ecf1..bf4fb1c22000e 100644 --- a/src/mongo/db/commands/find_and_modify.cpp +++ b/src/mongo/db/commands/find_and_modify.cpp @@ -27,51 +27,97 @@ * it in the license file. */ -#include - -#include "mongo/base/status_with.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/catalog/collection_yield_restore.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/document_validation.h" -#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/commands/update_metrics.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/curop_failpoint_helpers.h" -#include "mongo/db/exec/update_stage.h" +#include "mongo/db/database_name.h" #include "mongo/db/fle_crud.h" -#include "mongo/db/matcher/extensions_callback_real.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/delete_request_gen.h" #include "mongo/db/ops/insert.h" #include "mongo/db/ops/parsed_delete.h" #include "mongo/db/ops/parsed_update.h" +#include "mongo/db/ops/parsed_writes_common.h" +#include "mongo/db/ops/update_request.h" #include "mongo/db/ops/update_result.h" #include "mongo/db/ops/write_ops_exec.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/ops/write_ops_retryability.h" -#include "mongo/db/query/collection_query_info.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/explain.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/find_command_gen.h" #include "mongo/db/query/get_executor.h" -#include "mongo/db/query/plan_executor.h" -#include "mongo/db/query/plan_summary_stats.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_state.h" -#include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/query_analysis_writer.h" -#include "mongo/db/stats/counters.h" -#include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_role.h" #include "mongo/db/stats/top.h" #include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/db/timeseries/timeseries_update_delete_util.h" +#include "mongo/db/timeseries/timeseries_write_util.h" #include "mongo/db/transaction/retryable_writes_stats.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/transaction_resources.h" #include "mongo/db/transaction_validation.h" #include "mongo/db/update/update_util.h" -#include "mongo/db/write_concern.h" -#include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/analyze_shard_key_common_gen.h" +#include "mongo/s/analyze_shard_key_role.h" #include "mongo/s/query_analysis_sampler_util.h" #include "mongo/s/would_change_owning_shard_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" #include "mongo/util/log_and_backoff.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -81,41 +127,6 @@ namespace { MONGO_FAIL_POINT_DEFINE(failAllFindAndModify); MONGO_FAIL_POINT_DEFINE(hangBeforeFindAndModifyPerformsUpdate); -/** - * If the operation succeeded, then returns either a document to return to the client, or - * boost::none if no matching document to update/remove was found. If the operation failed, throws. - */ -boost::optional advanceExecutor(OperationContext* opCtx, - const write_ops::FindAndModifyCommandRequest& request, - PlanExecutor* exec, - bool isRemove) { - BSONObj value; - PlanExecutor::ExecState state; - try { - state = exec->getNext(&value, nullptr); - } catch (DBException& exception) { - auto&& explainer = exec->getPlanExplainer(); - auto&& [stats, _] = explainer.getWinningPlanStats(ExplainOptions::Verbosity::kExecStats); - LOGV2_WARNING( - 23802, - "Plan executor error during findAndModify: {error}, stats: {stats}, cmd: {cmd}", - "Plan executor error during findAndModify", - "error"_attr = exception.toStatus(), - "stats"_attr = redact(stats), - "cmd"_attr = request.toBSON(BSONObj() /* commandPassthroughFields */)); - - exception.addContext("Plan executor error during findAndModify"); - throw; - } - - if (PlanExecutor::ADVANCED == state) { - return {std::move(value)}; - } - - invariant(state == PlanExecutor::IS_EOF); - return boost::none; -} - void validate(const write_ops::FindAndModifyCommandRequest& request) { uassert(ErrorCodes::FailedToParse, "Either an update or remove=true must be specified", @@ -162,9 +173,7 @@ void makeDeleteRequest(OperationContext* opCtx, requestOut->setReturnDeleted(true); // Always return the old value. requestOut->setIsExplain(explain); - requestOut->setYieldPolicy(opCtx->inMultiDocumentTransaction() - ? PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY - : PlanYieldPolicy::YieldPolicy::YIELD_AUTO); + requestOut->setYieldPolicy(PlanYieldPolicy::YieldPolicy::YIELD_AUTO); } write_ops::FindAndModifyCommandReply buildResponse( @@ -195,7 +204,7 @@ write_ops::FindAndModifyCommandReply buildResponse( void assertCanWrite_inlock(OperationContext* opCtx, const NamespaceString& nss) { uassert(ErrorCodes::NotWritablePrimary, str::stream() << "Not primary while running findAndModify command on collection " - << nss.ns(), + << nss.toStringForErrorMsg(), repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)); CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, nss) @@ -206,7 +215,7 @@ void recordStatsForTopCommand(OperationContext* opCtx) { auto curOp = CurOp::get(opCtx); Top::get(opCtx->getClient()->getServiceContext()) .record(opCtx, - curOp->getNS(), + curOp->getNSS(), curOp->getLogicalOp(), Top::LockType::WriteLocked, durationCount(curOp->elapsedTimeExcludingPauses()), @@ -218,7 +227,7 @@ void checkIfTransactionOnCappedColl(const CollectionPtr& coll, bool inTransactio if (coll && coll->isCapped()) { uassert( ErrorCodes::OperationNotSupportedInTransaction, - str::stream() << "Collection '" << coll->ns() + str::stream() << "Collection '" << coll->ns().toStringForErrorMsg() << "' is a capped collection. Writes in transactions are not allowed on " "capped collections.", !inTransaction); @@ -318,8 +327,7 @@ void CmdFindAndModify::Invocation::doCheckAuthorization(OperationContext* opCtx) actions.addAction(ActionType::bypassDocumentValidation); } - ResourcePattern resource( - CommandHelpers::resourcePatternForNamespace(request.getNamespace().toString())); + ResourcePattern resource(CommandHelpers::resourcePatternForNamespace(request.getNamespace())); uassert(17138, "Invalid target namespace " + resource.toString(), resource.isExactNamespacePattern()); @@ -327,7 +335,7 @@ void CmdFindAndModify::Invocation::doCheckAuthorization(OperationContext* opCtx) uassert(ErrorCodes::Unauthorized, str::stream() << "Not authorized to find and modify on database'" - << this->request().getDbName() << "'", + << this->request().getDbName().toStringForErrorMsg() << "'", AuthorizationSession::get(opCtx->getClient())->isAuthorizedForPrivileges(privileges)); } @@ -350,7 +358,8 @@ void CmdFindAndModify::Invocation::explain(OperationContext* opCtx, }(); auto request = requestAndMsg.first; - const NamespaceString& nss = request.getNamespace(); + auto [isTimeseries, nss] = timeseries::isTimeseries(opCtx, request); + uassertStatusOK(userAllowedWriteNS(opCtx, nss)); auto const curOp = CurOp::get(opCtx); OpDebug* const opDebug = &curOp->debug(); @@ -362,50 +371,77 @@ void CmdFindAndModify::Invocation::explain(OperationContext* opCtx, const bool isExplain = true; makeDeleteRequest(opCtx, request, isExplain, &deleteRequest); - ParsedDelete parsedDelete(opCtx, &deleteRequest); - uassertStatusOK(parsedDelete.parseRequest()); - // Explain calls of the findAndModify command are read-only, but we take write // locks so that the timing information is more accurate. - AutoGetCollection collection(opCtx, nss, MODE_IX); + const auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, nss, AcquisitionPrerequisites::OperationType::kWrite), + MODE_IX); uassert(ErrorCodes::NamespaceNotFound, str::stream() << "database " << dbName.toStringForErrorMsg() << " does not exist", - collection.getDb()); + DatabaseHolder::get(opCtx)->getDb(opCtx, nss.dbName())); + + ParsedDelete parsedDelete( + opCtx, &deleteRequest, collection.getCollectionPtr(), isTimeseries); + uassertStatusOK(parsedDelete.parseRequest()); CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, nss) ->checkShardVersionOrThrow(opCtx); - const auto exec = uassertStatusOK( - getExecutorDelete(opDebug, &collection.getCollection(), &parsedDelete, verbosity)); + const auto exec = + uassertStatusOK(getExecutorDelete(opDebug, collection, &parsedDelete, verbosity)); auto bodyBuilder = result->getBodyBuilder(); Explain::explainStages( - exec.get(), collection.getCollection(), verbosity, BSONObj(), cmdObj, &bodyBuilder); + exec.get(), + collection.getCollectionPtr(), + verbosity, + BSONObj(), + SerializationContext::stateCommandReply(request.getSerializationContext()), + cmdObj, + &bodyBuilder); } else { auto updateRequest = UpdateRequest(); updateRequest.setNamespaceString(nss); update::makeUpdateRequest(opCtx, request, verbosity, &updateRequest); - const ExtensionsCallbackReal extensionsCallback(opCtx, &updateRequest.getNamespaceString()); - ParsedUpdate parsedUpdate(opCtx, &updateRequest, extensionsCallback); - uassertStatusOK(parsedUpdate.parseRequest()); - // Explain calls of the findAndModify command are read-only, but we take write // locks so that the timing information is more accurate. - AutoGetCollection collection(opCtx, nss, MODE_IX); + const auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, nss, AcquisitionPrerequisites::OperationType::kWrite), + MODE_IX); uassert(ErrorCodes::NamespaceNotFound, str::stream() << "database " << dbName.toStringForErrorMsg() << " does not exist", - collection.getDb()); + DatabaseHolder::get(opCtx)->getDb(opCtx, nss.dbName())); + if (isTimeseries) { + timeseries::assertTimeseriesBucketsCollection(collection.getCollectionPtr().get()); + } + + ParsedUpdate parsedUpdate(opCtx, + &updateRequest, + collection.getCollectionPtr(), + false /*forgoOpCounterIncrements*/, + isTimeseries); + uassertStatusOK(parsedUpdate.parseRequest()); CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, nss) ->checkShardVersionOrThrow(opCtx); - const auto exec = uassertStatusOK( - getExecutorUpdate(opDebug, &collection.getCollection(), &parsedUpdate, verbosity)); + const auto exec = + uassertStatusOK(getExecutorUpdate(opDebug, collection, &parsedUpdate, verbosity)); auto bodyBuilder = result->getBodyBuilder(); Explain::explainStages( - exec.get(), collection.getCollection(), verbosity, BSONObj(), cmdObj, &bodyBuilder); + exec.get(), + collection.getCollectionPtr(), + verbosity, + BSONObj(), + SerializationContext::stateCommandReply(request.getSerializationContext()), + cmdObj, + &bodyBuilder); } } @@ -431,8 +467,8 @@ write_ops::FindAndModifyCommandReply CmdFindAndModify::Invocation::typedRun( CmdFindAndModify::collectMetrics(req); auto disableDocumentValidation = req.getBypassDocumentValidation().value_or(false); - auto fleCrudProcessed = - write_ops_exec::getFleCrudProcessed(opCtx, req.getEncryptionInformation()); + auto fleCrudProcessed = write_ops_exec::getFleCrudProcessed( + opCtx, req.getEncryptionInformation(), nsString.tenantId()); DisableDocumentSchemaValidationIfTrue docSchemaValidationDisabler(opCtx, disableDocumentValidation); @@ -471,7 +507,7 @@ write_ops::FindAndModifyCommandReply CmdFindAndModify::Invocation::typedRun( opCtx, ns(), analyze_shard_key::SampledCommandNameEnum::kFindAndModify, req); if (sampleId) { analyze_shard_key::QueryAnalysisWriter::get(opCtx) - ->addFindAndModifyQuery(*sampleId, req) + ->addFindAndModifyQuery(opCtx, *sampleId, req) .getAsync([](auto) {}); } @@ -484,7 +520,7 @@ write_ops::FindAndModifyCommandReply CmdFindAndModify::Invocation::typedRun( // Although usually the PlanExecutor handles WCE internally, it will throw WCEs when it // is executing a findAndModify. This is done to ensure that we can always match, // modify, and return the document under concurrency, if a matching document exists. - return writeConflictRetry(opCtx, "findAndModify", nsString.ns(), [&] { + return writeConflictRetry(opCtx, "findAndModify", nsString, [&] { if (req.getRemove().value_or(false)) { DeleteRequest deleteRequest; makeDeleteRequest(opCtx, req, false, &deleteRequest); @@ -494,7 +530,7 @@ write_ops::FindAndModifyCommandReply CmdFindAndModify::Invocation::typedRun( } boost::optional docFound; write_ops_exec::writeConflictRetryRemove( - opCtx, nsString, &deleteRequest, curOp, opDebug, inTransaction, docFound); + opCtx, nsString, deleteRequest, curOp, opDebug, inTransaction, docFound); recordStatsForTopCommand(opCtx); return buildResponse(boost::none, true /* isRemove */, docFound); } else { @@ -522,11 +558,6 @@ write_ops::FindAndModifyCommandReply CmdFindAndModify::Invocation::typedRun( updateRequest.setAllowShardKeyUpdatesWithoutFullShardKeyInQuery( req.getAllowShardKeyUpdatesWithoutFullShardKeyInQuery()); - const ExtensionsCallbackReal extensionsCallback( - opCtx, &updateRequest.getNamespaceString()); - ParsedUpdate parsedUpdate(opCtx, &updateRequest, extensionsCallback); - uassertStatusOK(parsedUpdate.parseRequest()); - try { boost::optional docFound; auto updateResult = @@ -538,17 +569,15 @@ write_ops::FindAndModifyCommandReply CmdFindAndModify::Invocation::typedRun( req.getRemove().value_or(false), req.getUpsert().value_or(false), docFound, - &parsedUpdate); + updateRequest); recordStatsForTopCommand(opCtx); return buildResponse(updateResult, req.getRemove().value_or(false), docFound); } catch (const ExceptionFor& ex) { - if (!parsedUpdate.hasParsedQuery()) { - uassertStatusOK(parsedUpdate.parseQueryToCQ()); - } - + auto cq = uassertStatusOK( + parseWriteQueryToCQ(opCtx, nullptr /* expCtx */, updateRequest)); if (!write_ops_exec::shouldRetryDuplicateKeyException( - parsedUpdate, *ex.extraInfo())) { + updateRequest, *cq, *ex.extraInfo())) { throw; } diff --git a/src/mongo/db/commands/find_cmd.cpp b/src/mongo/db/commands/find_cmd.cpp index 206ead1851623..2a1c3e4a40252 100644 --- a/src/mongo/db/commands/find_cmd.cpp +++ b/src/mongo/db/commands/find_cmd.cpp @@ -27,49 +27,117 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_checks.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/collection_uuid_mismatch.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/clientcursor.h" #include "mongo/db/commands.h" #include "mongo/db/commands/run_aggregate.h" -#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/cursor_manager.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/exec/disk_use_options_gen.h" -#include "mongo/db/exec/working_set_common.h" #include "mongo/db/fle_crud.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_real.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregation_request_helper.h" -#include "mongo/db/pipeline/variables.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/query/cqf_command_utils.h" -#include "mongo/db/query/cqf_get_executor.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/cursor_response.h" #include "mongo/db/query/explain.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/find.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/find_common.h" #include "mongo/db/query/get_executor.h" -#include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/query/telemetry.h" +#include "mongo/db/query/parsed_find_command.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/query_shape.h" +#include "mongo/db/query/query_stats.h" +#include "mongo/db/query/query_stats_find_key_generator.h" +#include "mongo/db/query/query_stats_key_generator.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/query_analysis_writer.h" #include "mongo/db/service_context.h" #include "mongo/db/stats/counters.h" #include "mongo/db/stats/resource_consumption_metrics.h" -#include "mongo/db/stats/server_read_concern_metrics.h" -#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/storage_stats.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/query_analysis_sampler_util.h" +#include "mongo/transport/session.h" #include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" #include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -97,44 +165,16 @@ boost::intrusive_ptr makeExpressionContext( // ExpressionContext. collator = collPtr->getDefaultCollator()->clone(); } - - // Although both 'find' and 'aggregate' commands have an ExpressionContext, some of the data - // members in the ExpressionContext are used exclusively by the aggregation subsystem. This - // includes the following fields which here we simply initialize to some meaningless default - // value: - // - explain - // - fromMongos - // - needsMerge - // - bypassDocumentValidation - // - mongoProcessInterface - // - resolvedNamespaces - // - uuid - // - // As we change the code to make the find and agg systems more tightly coupled, it would make - // sense to start initializing these fields for find operations as well. - auto expCtx = make_intrusive( - opCtx, - verbosity, - false, // fromMongos - false, // needsMerge - findCommand.getAllowDiskUse().value_or(allowDiskUseByDefault.load()), - false, // bypassDocumentValidation - false, // isMapReduceCommand - findCommand.getNamespaceOrUUID().nss().value_or(NamespaceString()), - findCommand.getLegacyRuntimeConstants(), - std::move(collator), - nullptr, // mongoProcessInterface - StringMap{}, - boost::none, // uuid - findCommand.getLet(), // let - CurOp::get(opCtx)->dbProfileLevel() > 0 // mayDbProfile - ); + auto expCtx = + make_intrusive(opCtx, + findCommand, + std::move(collator), + CurOp::get(opCtx)->dbProfileLevel() > 0, // mayDbProfile + verbosity, + allowDiskUseByDefault.load()); expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp"; expCtx->startExpressionCounters(); - // Set the value of $$USER_ROLES for the find command. - expCtx->setUserRoles(); - return expCtx; } @@ -148,6 +188,52 @@ void beginQueryOp(OperationContext* opCtx, const NamespaceString& nss, const BSO curOp->setNS_inlock(nss); } +/** + * Parses the grammar elements like 'filter', 'sort', and 'projection' from the raw + * 'FindCommandRequest', and tracks internal state like begining the operation's timer and recording + * query shape stats (if enabled). + */ +std::unique_ptr parseQueryAndBeginOperation( + OperationContext* opCtx, + const AutoGetCollectionForReadCommandMaybeLockFree& ctx, + const NamespaceString& nss, + BSONObj requestBody, + std::unique_ptr findCommand, + const CollectionPtr& collection) { + // Fill out curop information. + beginQueryOp(opCtx, nss, requestBody); + // Finish the parsing step by using the FindCommandRequest to create a CanonicalQuery. + const ExtensionsCallbackReal extensionsCallback(opCtx, &nss); + + auto expCtx = + makeExpressionContext(opCtx, *findCommand, collection, boost::none /* verbosity */); + + auto parsedRequest = uassertStatusOK( + parsed_find_command::parse(expCtx, + std::move(findCommand), + extensionsCallback, + MatchExpressionParser::kAllowAllSpecialFeatures)); + + // After parsing to detect if $$USER_ROLES is referenced in the query, set the value of + // $$USER_ROLES for the find command. + expCtx->setUserRoles(); + // Register query stats collection. Exclude queries against collections with encrypted fields. + // It is important to do this before canonicalizing and optimizing the query, each of which + // would alter the query shape. + if (!(collection && collection.get()->getCollectionOptions().encryptedFieldConfig)) { + query_stats::registerRequest(opCtx, nss, [&]() { + BSONObj queryShape = query_shape::extractQueryShape( + *parsedRequest, + SerializationOptions::kRepresentativeQueryShapeSerializeOptions, + expCtx); + return std::make_unique( + expCtx, *parsedRequest, std::move(queryShape), ctx.getCollectionType()); + }); + } + + return uassertStatusOK( + CanonicalQuery::canonicalize(std::move(expCtx), std::move(parsedRequest))); +} /** * A command for running .find() queries. */ @@ -295,12 +381,23 @@ class FindCmd final : public Command { // Parse the command BSON to a FindCommandRequest. auto findCommand = _parseCmdObjectToFindCommandRequest(opCtx, nss, _request.body); + // check validated tenantId and correct the serialization context object on the request + auto reqSerializationContext = findCommand->getSerializationContext(); + reqSerializationContext.setTenantIdSource(_request.getValidatedTenantId() != + boost::none); + findCommand->setSerializationContext(reqSerializationContext); + // Finish the parsing step by using the FindCommandRequest to create a CanonicalQuery. const ExtensionsCallbackReal extensionsCallback(opCtx, &nss); // The collection may be NULL. If so, getExecutor() should handle it by returning an // execution tree with an EOFStage. const auto& collection = ctx->getCollection(); + if (!ctx->getView()) { + const bool isClusteredCollection = collection && collection->isClustered(); + uassertStatusOK(query_request_helper::validateResumeAfter( + findCommand->getResumeAfter(), isClusteredCollection)); + } auto expCtx = makeExpressionContext(opCtx, *findCommand, collection, verbosity); const bool isExplain = true; auto cq = uassertStatusOK( @@ -311,6 +408,10 @@ class FindCmd final : public Command { extensionsCallback, MatchExpressionParser::kAllowAllSpecialFeatures)); + // After parsing to detect if $$USER_ROLES is referenced in the query, set the value of + // $$USER_ROLES for the find command. + cq->getExpCtx()->setUserRoles(); + // If we are running a query against a view redirect this query through the aggregation // system. if (ctx->getView()) { @@ -335,7 +436,8 @@ class FindCmd final : public Command { nss, viewAggCmd, verbosity, - APIParameters::get(opCtx).getAPIStrict().value_or(false)); + APIParameters::get(opCtx).getAPIStrict().value_or(false), + reqSerializationContext); try { // An empty PrivilegeVector is acceptable because these privileges are only @@ -366,8 +468,13 @@ class FindCmd final : public Command { auto bodyBuilder = result->getBodyBuilder(); // Got the execution tree. Explain it. - Explain::explainStages( - exec.get(), collection, verbosity, BSONObj(), _request.body, &bodyBuilder); + Explain::explainStages(exec.get(), + collection, + verbosity, + BSONObj(), + SerializationContext::stateCommandReply(reqSerializationContext), + _request.body, + &bodyBuilder); } /** @@ -388,9 +495,18 @@ class FindCmd final : public Command { // Parse the command BSON to a FindCommandRequest. Pass in the parsedNss in case cmdObj // does not have a UUID. - const bool isExplain = false; const bool isOplogNss = (_ns == NamespaceString::kRsOplogNamespace); auto findCommand = _parseCmdObjectToFindCommandRequest(opCtx, _ns, cmdObj); + + // check validated tenantId and correct the serialization context object on the request + auto reqSerializationContext = findCommand->getSerializationContext(); + reqSerializationContext.setTenantIdSource(_request.getValidatedTenantId() != + boost::none); + findCommand->setSerializationContext(reqSerializationContext); + + auto respSerializationContext = + SerializationContext::stateCommandReply(reqSerializationContext); + CurOp::get(opCtx)->beginQueryPlanningTimer(); // Only allow speculative majority for internal commands that specify the correct flag. @@ -399,10 +515,11 @@ class FindCmd final : public Command { !(repl::ReadConcernArgs::get(opCtx).isSpeculativeMajority() && !findCommand->getAllowSpeculativeMajorityRead())); + const bool isFindByUUID = findCommand->getNamespaceOrUUID().isUUID(); uassert(ErrorCodes::InvalidOptions, "When using the find command by UUID, the collectionUUID parameter cannot also " "be specified", - !findCommand->getNamespaceOrUUID().uuid() || !findCommand->getCollectionUUID()); + !isFindByUUID || !findCommand->getCollectionUUID()); auto replCoord = repl::ReplicationCoordinator::get(opCtx); const auto txnParticipant = TransactionParticipant::get(opCtx); @@ -501,19 +618,23 @@ class FindCmd final : public Command { const auto& collection = ctx->getCollection(); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "UUID " << findCommand->getNamespaceOrUUID().uuid().value() + str::stream() << "UUID " << findCommand->getNamespaceOrUUID().uuid() << " specified in query request not found", - collection || !findCommand->getNamespaceOrUUID().uuid()); + collection || !isFindByUUID); + bool isClusteredCollection = false; if (collection) { - // Set the namespace if a collection was found, as opposed to nothing or a view. - query_request_helper::refreshNSS(ctx->getNss(), findCommand.get()); + if (isFindByUUID) { + // Replace the UUID in the find command with the fully qualified namespace of + // the looked up Collection. + findCommand->setNss(ctx->getNss()); + } // Tailing a replicated capped clustered collection requires majority read concern. const bool isTailable = findCommand->getTailable(); const bool isMajorityReadConcern = repl::ReadConcernArgs::get(opCtx).getLevel() == repl::ReadConcernLevel::kMajorityReadConcern; - const bool isClusteredCollection = collection->isClustered(); + isClusteredCollection = collection->isClustered(); const bool isCapped = collection->isCapped(); const bool isReplicated = collection->ns().isReplicated(); if (isClusteredCollection && isCapped && isReplicated && isTailable) { @@ -524,21 +645,16 @@ class FindCmd final : public Command { } } - // Fill out curop information. - beginQueryOp(opCtx, nss, _request.body); - - // Finish the parsing step by using the FindCommandRequest to create a CanonicalQuery. - const ExtensionsCallbackReal extensionsCallback(opCtx, &nss); + // Views use the aggregation system and the $_resumeAfter parameter is not allowed. A + // more descriptive error will be raised later, but we want to validate this parameter + // before beginning the operation. + if (!ctx->getView()) { + uassertStatusOK(query_request_helper::validateResumeAfter( + findCommand->getResumeAfter(), isClusteredCollection)); + } - auto expCtx = - makeExpressionContext(opCtx, *findCommand, collection, boost::none /* verbosity */); - auto cq = uassertStatusOK( - CanonicalQuery::canonicalize(opCtx, - std::move(findCommand), - isExplain, - std::move(expCtx), - extensionsCallback, - MatchExpressionParser::kAllowAllSpecialFeatures)); + auto cq = parseQueryAndBeginOperation( + opCtx, *ctx, nss, _request.body, std::move(findCommand), collection); // If we are running a query against a view redirect this query through the aggregation // system. @@ -580,16 +696,6 @@ class FindCmd final : public Command { cq->setUseCqfIfEligible(true); - if (collection) { - // Collect telemetry. Exclude queries against collections with encrypted fields. - if (!collection.get()->getCollectionOptions().encryptedFieldConfig) { - telemetry::registerFindRequest(cq->getFindCommandRequest(), - collection.get()->ns(), - opCtx, - cq->getExpCtx()); - } - } - // Get the execution plan for the query. bool permitYield = true; auto exec = @@ -617,7 +723,12 @@ class FindCmd final : public Command { const CursorId cursorId = 0; endQueryOp(opCtx, collection, *exec, numResults, boost::none, cmdObj); auto bodyBuilder = result->getBodyBuilder(); - appendCursorResponseObject(cursorId, nss, BSONArray(), boost::none, &bodyBuilder); + appendCursorResponseObject(cursorId, + nss, + BSONArray(), + boost::none, + &bodyBuilder, + respSerializationContext); return; } @@ -665,8 +776,6 @@ class FindCmd final : public Command { auto&& [stats, _] = explainer.getWinningPlanStats(ExplainOptions::Verbosity::kExecStats); LOGV2_WARNING(23798, - "Plan executor error during find command: {error}, " - "stats: {stats}, cmd: {cmd}", "Plan executor error during find command", "error"_attr = exception.toStatus(), "stats"_attr = redact(stats), @@ -722,9 +831,9 @@ class FindCmd final : public Command { if (stashResourcesForGetMore) { // Collect storage stats now before we stash the recovery unit. These stats are // normally collected in the service entry point layer just before a command - // ends, but they must be collected before stashing the - // RecoveryUnit. Otherwise, the service entry point layer will collect the - // stats from the new RecoveryUnit, which wasn't actually used for the query. + // ends, but they must be collected before stashing the RecoveryUnit. Otherwise, + // the service entry point layer will collect the stats from the new + // RecoveryUnit, which wasn't actually used for the query. // // The stats collected here will not get overwritten, as the service entry // point layer will only set these stats when they're not empty. @@ -739,14 +848,14 @@ class FindCmd final : public Command { } // Generate the response object to send to the client. - firstBatch.done(cursorId, nss); + firstBatch.done(cursorId, nss, respSerializationContext); // Increment this metric once we have generated a response and we know it will return // documents. auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx); metricsCollector.incrementDocUnitsReturned(nss.ns(), docUnitsReturned); - query_request_helper::validateCursorResponse(result->getBodyBuilder().asTempObj(), - nss.tenantId()); + query_request_helper::validateCursorResponse( + result->getBodyBuilder().asTempObj(), nss.tenantId(), respSerializationContext); } void appendMirrorableRequest(BSONObjBuilder* bob) const override { @@ -789,15 +898,12 @@ class FindCmd final : public Command { // Rewrite any FLE find payloads that exist in the query if this is a FLE 2 query. if (shouldDoFLERewrite(findCommand)) { - invariant(findCommand->getNamespaceOrUUID().nss()); + invariant(findCommand->getNamespaceOrUUID().isNamespaceString()); if (!findCommand->getEncryptionInformation()->getCrudProcessed().value_or(false)) { processFLEFindD( - opCtx, findCommand->getNamespaceOrUUID().nss().value(), findCommand.get()); + opCtx, findCommand->getNamespaceOrUUID().nss(), findCommand.get()); } - // Set the telemetryStoreKey to none so telemetry isn't collected when we've done a - // FLE rewrite. - CurOp::get(opCtx)->debug().telemetryStoreKey = boost::none; CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation = true; } diff --git a/src/mongo/db/commands/fle2_cleanup.idl b/src/mongo/db/commands/fle2_cleanup.idl new file mode 100644 index 0000000000000..836897b065dd1 --- /dev/null +++ b/src/mongo/db/commands/fle2_cleanup.idl @@ -0,0 +1,55 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +global: + cpp_namespace: "mongo" + +imports: + - "mongo/db/basic_types.idl" + - "mongo/crypto/fle_stats.idl" + +structs: + CleanupStructuredEncryptionDataCommandReply: + description: "Reply from the {cleanupStructuredEncryptionData: ...} command" + strict: true + is_command_reply: true + fields: + stats: CleanupStats + +commands: + cleanupStructuredEncryptionData: + description: "Parser for the 'cleanupStructuredEncryptionData' command" + command_name: cleanupStructuredEncryptionData + api_version: "" + namespace: concatenate_with_db + strict: true + reply_type: CleanupStructuredEncryptionDataCommandReply + fields: + cleanupTokens: + description: "Map of field path to ECOCToken" + type: object diff --git a/src/mongo/db/commands/fle2_cleanup_cmd.cpp b/src/mongo/db/commands/fle2_cleanup_cmd.cpp new file mode 100644 index 0000000000000..9a7cfda501a36 --- /dev/null +++ b/src/mongo/db/commands/fle2_cleanup_cmd.cpp @@ -0,0 +1,370 @@ +/** + * Copyright (C) 2022-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/crypto/fle_options_gen.h" +#include "mongo/crypto/fle_stats.h" +#include "mongo/crypto/fle_stats_gen.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog/drop_collection.h" +#include "mongo/db/catalog/rename_collection.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/commands.h" +#include "mongo/db/commands/create_gen.h" +#include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/commands/fle2_cleanup_gen.h" +#include "mongo/db/commands/fle2_compact.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" +#include "mongo/db/drop_gen.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/fle_crud.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/server_parameter_with_storage.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage + +namespace mongo { + +namespace { + +void createQEClusteredStateCollection(OperationContext* opCtx, const NamespaceString& nss) { + CreateCommand createCmd(nss); + mongo::ClusteredIndexSpec clusterIdxSpec(BSON("_id" << 1), true); + createCmd.setClusteredIndex( + stdx::variant(std::move(clusterIdxSpec))); + auto status = createCollection(opCtx, createCmd); + if (!status.isOK()) { + if (status != ErrorCodes::NamespaceExists) { + uassertStatusOK(status); + } + LOGV2_DEBUG( + 7618801, 1, "Create collection failed because namespace already exists", logAttrs(nss)); + } +} + +void dropQEStateCollection(OperationContext* opCtx, const NamespaceString& nss) { + DropReply dropReply; + uassertStatusOK( + dropCollection(opCtx, + nss, + &dropReply, + DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops)); + LOGV2_DEBUG(7618802, 1, "QE state collection drop finished", "reply"_attr = dropReply); +} + +/** + * QE cleanup is similar to QE compact in that it also performs "compaction" of the + * ESC collection by removing stale ESC non-anchors. Unlike compact, cleanup also removes + * stale ESC anchors. It also differs from compact in that instead of inserting "anchors" + * to the ESC, cleanup only inserts or updates "null" anchors. + * + * At a high level, the cleanup algorithm works as follows: + * 1. The _ids of random ESC non-anchors are first read into an in-memory set 'P'. + * 2. (*) a temporary 'esc.deletes' collection is created. This will collection will contain + the _ids of anchor documents that cleanup will remove towards the end of the operation. + * 3. The ECOC is renamed to a temporary namespace (hereby referred to as 'ecoc.compact'). + * 4. Unique entries from 'ecoc.compact' are decoded into an in-memory set of tokens: 'C'. + * 5. For each token in 'C', the following is performed: + * a. Start a transaction + * b. Run EmuBinary to collect the latest anchor and non-anchor positions for the current token. + * c. (*) Insert (or update an existing) null anchor which encodes the latest positions. + * d. (*) If there are anchors corresponding to the current token, insert their _ids + * into 'esc.deletes'. These anchors are now stale and are marked for deletion. + * e. Commit transaction + * 6. Delete every document in the ESC whose _id can be found in 'P' + * 7. (*) Delete every document in the ESC whose _id can be found in 'esc.deletes' + * 8. (*) Drop 'esc.deletes' + * 9. Drop 'ecoc.compact' + * + * Steps marked with (*) are unique to the cleanup operation. + */ +CleanupStats cleanupEncryptedCollection(OperationContext* opCtx, + const CleanupStructuredEncryptionData& request) { + CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation = true; + + uassert(7618803, + str::stream() << "Feature flag `FLE2CleanupCommand` must be enabled to run " + << CleanupStructuredEncryptionData::kCommandName, + gFeatureFlagFLE2CleanupCommand.isEnabled(serverGlobalParams.featureCompatibility)); + + uassert(7618804, + str::stream() << CleanupStructuredEncryptionData::kCommandName + << " must be run through mongos in a sharded cluster", + !ShardingState::get(opCtx)->enabled()); + + // Since this command holds an IX lock on the DB and the global lock throughout + // the lifetime of this operation, setFCV should not be allowed to abort the transaction + // performing the cleanup. Otherwise, on retry, the transaction may attempt to + // acquire the global lock in IX mode, while setFCV is already waiting to acquire it + // in S mode, causing a deadlock. + FixedFCVRegion fixedFcv(opCtx); + + const auto& edcNss = request.getNamespace(); + + AutoGetDb autoDb(opCtx, edcNss.dbName(), MODE_IX); + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "Database '" << edcNss.dbName().toStringForErrorMsg() + << "' does not exist", + autoDb.getDb()); + Lock::CollectionLock edcLock(opCtx, edcNss, MODE_IS); + + // Validate the request and acquire the relevant namespaces + EncryptedStateCollectionsNamespaces namespaces; + { + auto catalog = CollectionCatalog::get(opCtx); + + // Check the data collection exists and is not a view + auto edc = catalog->lookupCollectionByNamespace(opCtx, edcNss); + if (!edc) { + uassert(ErrorCodes::CommandNotSupportedOnView, + "Cannot cleanup structured encryption data on a view", + !catalog->lookupView(opCtx, edcNss)); + uasserted(ErrorCodes::NamespaceNotFound, + str::stream() + << "Collection '" << edcNss.toStringForErrorMsg() << "' does not exist"); + } + + validateCleanupRequest(request, *edc); + + namespaces = + uassertStatusOK(EncryptedStateCollectionsNamespaces::createFromDataCollection(*edc)); + } + + // Acquire exclusive lock on the associated 'ecoc.lock' namespace to serialize calls + // to cleanup and compact on the same EDC namespace. + Lock::CollectionLock compactionLock(opCtx, namespaces.ecocLockNss, MODE_X); + + LOGV2(7618805, "Cleaning up the encrypted compaction collection", logAttrs(edcNss)); + + CleanupStats stats({}, {}); + FLECompactESCDeleteSet escDeleteSet; + auto tagsPerDelete = + ServerParameterSet::getClusterParameterSet() + ->get>("fleCompactionOptions") + ->getValue(boost::none) + .getMaxESCEntriesPerCompactionDelete(); + + // If 'esc.deletes' exists, clean up the matching anchors in ESC and drop 'esc.deletes' + { + AutoGetCollection escDeletes(opCtx, namespaces.escDeletesNss, MODE_IS); + if (escDeletes) { + LOGV2(7618806, + "Cleaning up ESC deletes collection from a prior cleanup operation", + logAttrs(namespaces.escDeletesNss)); + cleanupESCAnchors( + opCtx, namespaces.escNss, namespaces.escDeletesNss, tagsPerDelete, &stats.getEsc()); + } + } + dropQEStateCollection(opCtx, namespaces.escDeletesNss); + + bool createEcoc = false; + bool renameEcoc = false; + { + AutoGetCollection ecoc(opCtx, namespaces.ecocNss, MODE_IS); + AutoGetCollection ecocCompact(opCtx, namespaces.ecocRenameNss, MODE_IS); + + // Early exit if there's no ECOC + if (!ecoc && !ecocCompact) { + LOGV2(7618807, + "Skipping cleanup as there is no ECOC collection to compact", + "ecocNss"_attr = namespaces.ecocNss, + "ecocCompactNss"_attr = namespaces.ecocRenameNss); + return stats; + } + + createEcoc = !ecoc; + + // Set up the temporary 'ecoc.compact' collection + if (ecoc && !ecocCompact) { + // Load the random set of ESC non-anchor entries to be deleted post-cleanup + auto memoryLimit = + ServerParameterSet::getClusterParameterSet() + ->get>("fleCompactionOptions") + ->getValue(boost::none) + .getMaxCompactionSize(); + escDeleteSet = + readRandomESCNonAnchorIds(opCtx, namespaces.escNss, memoryLimit, &stats.getEsc()); + renameEcoc = createEcoc = true; + } else /* ecocCompact exists */ { + LOGV2(7618808, + "Resuming compaction from a stale ECOC collection", + logAttrs(namespaces.ecocRenameNss)); + } + } + + if (renameEcoc) { + LOGV2(7618809, + "Renaming the encrypted compaction collection", + "ecocNss"_attr = namespaces.ecocNss, + "ecocRenameNss"_attr = namespaces.ecocRenameNss); + RenameCollectionOptions renameOpts; + validateAndRunRenameCollection( + opCtx, namespaces.ecocNss, namespaces.ecocRenameNss, renameOpts); + } + + if (createEcoc) { + createQEClusteredStateCollection(opCtx, namespaces.ecocNss); + } + + // Create the temporary 'esc.deletes' clustered collection + createQEClusteredStateCollection(opCtx, namespaces.escDeletesNss); + + { + AutoGetCollection ecocCompact(opCtx, namespaces.ecocRenameNss, MODE_IS); + AutoGetCollection escDeletes(opCtx, namespaces.escDeletesNss, MODE_IS); + + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "Renamed encrypted compaction collection " + << namespaces.ecocRenameNss.toStringForErrorMsg() + << " no longer exists prior to cleanup", + ecocCompact.getCollection()); + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "ESC deletes collection " + << namespaces.escDeletesNss.toStringForErrorMsg() + << " no longer exists prior to cleanup", + escDeletes.getCollection()); + + // Clean up entries for each encrypted field in compactionTokens + processFLECleanup(opCtx, + request, + &getTransactionWithRetriesForMongoD, + namespaces, + &stats.getEsc(), + &stats.getEcoc()); + + // Delete the entries in 'C' from the ESC + cleanupESCNonAnchors( + opCtx, namespaces.escNss, escDeleteSet, tagsPerDelete, &stats.getEsc()); + + // Delete the entries in esc.deletes collection from the ESC + cleanupESCAnchors( + opCtx, namespaces.escNss, namespaces.escDeletesNss, tagsPerDelete, &stats.getEsc()); + } + + // Drop the 'esc.deletes' collection + dropQEStateCollection(opCtx, namespaces.escDeletesNss); + + // Drop the 'ecoc.compact' collection + dropQEStateCollection(opCtx, namespaces.ecocRenameNss); + + LOGV2(7618810, + "Done cleaning up the encrypted compaction collection", + logAttrs(request.getNamespace())); + + FLEStatusSection::get().updateCleanupStats(stats); + return stats; +} + +class CleanupStructuredEncryptionDataCmd final + : public TypedCommand { +public: + using Request = CleanupStructuredEncryptionData; + using Reply = CleanupStructuredEncryptionData::Reply; + using TC = TypedCommand; + + class Invocation final : public TC::InvocationBase { + public: + using TC::InvocationBase::InvocationBase; + using TC::InvocationBase::request; + + Reply typedRun(OperationContext* opCtx) { + return Reply(cleanupEncryptedCollection(opCtx, request())); + } + + private: + bool supportsWriteConcern() const final { + return false; + } + + void doCheckAuthorization(OperationContext* opCtx) const final { + auto* as = AuthorizationSession::get(opCtx->getClient()); + uassert(ErrorCodes::Unauthorized, + "Not authorized to cleanup structured encryption data", + as->isAuthorizedForActionsOnResource( + ResourcePattern::forExactNamespace(request().getNamespace()), + ActionType::cleanupStructuredEncryptionData)); + } + + NamespaceString ns() const final { + return request().getNamespace(); + } + }; + + typename TC::AllowedOnSecondary secondaryAllowed(ServiceContext*) const final { + return BasicCommand::AllowedOnSecondary::kNever; + } + + bool adminOnly() const final { + return false; + } + + std::set sensitiveFieldNames() const final { + return {CleanupStructuredEncryptionData::kCleanupTokensFieldName}; + } +} cleanupStructuredEncryptionDataCmd; + + +} // namespace + +} // namespace mongo diff --git a/src/mongo/db/commands/fle2_compact.cpp b/src/mongo/db/commands/fle2_compact.cpp index 162103af76f2a..1a1b9c9f853de 100644 --- a/src/mongo/db/commands/fle2_compact.cpp +++ b/src/mongo/db/commands/fle2_compact.cpp @@ -28,30 +28,65 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/commands/fle2_compact.h" - +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include #include - +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/crypto/encryption_fields_gen.h" -#include "mongo/crypto/fle_stats.h" -#include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/commands/fle2_compact_gen.h" -#include "mongo/db/commands/server_status.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/commands/fle2_compact.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/transaction/transaction_api.h" #include "mongo/logv2/log.h" -#include "mongo/platform/mutex.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite -MONGO_FAIL_POINT_DEFINE(fleCompactFailBeforeECOCRead); +MONGO_FAIL_POINT_DEFINE(fleCompactOrCleanupFailBeforeECOCRead); MONGO_FAIL_POINT_DEFINE(fleCompactHangBeforeESCAnchorInsert); - +MONGO_FAIL_POINT_DEFINE(fleCleanupHangBeforeNullAnchorUpdate); +MONGO_FAIL_POINT_DEFINE(fleCleanupFailAfterTransactionCommit); +MONGO_FAIL_POINT_DEFINE(fleCompactFailAfterTransactionCommit); +MONGO_FAIL_POINT_DEFINE(fleCleanupFailDuringAnchorDeletes); namespace mongo { namespace { @@ -105,6 +140,136 @@ void CompactStatsCounter::add(const ECOCStats& other) { addDeletes(other.getDeleted()); } +FLEEdgeCountInfo fetchEdgeCountInfo(FLEQueryInterface* queryImpl, + const ESCDerivedFromDataTokenAndContentionFactorToken& token, + const NamespaceString& escNss, + FLEQueryInterface::TagQueryType queryType, + const StringData queryTypeStr) { + std::vector> tags; + tags.emplace_back().push_back(FLEEdgePrfBlock{token.data, boost::none}); + auto countInfoSets = queryImpl->getTags(escNss, tags, queryType); + uassert(7517100, + str::stream() << "getQueryableEncryptionCountInfo for " << queryTypeStr + << " returned an invalid number of edge count info", + countInfoSets.size() == 1 && countInfoSets[0].size() == 1); + + auto& countInfo = countInfoSets[0][0]; + uassert(7517103, + str::stream() << "getQueryableEncryptionCountInfo for " << queryTypeStr + << " returned non-existent stats", + countInfo.stats.has_value()); + + uassert(7295001, + str::stream() << "getQueryableEncryptionCountInfo for " << queryTypeStr + << " returned non-existent searched counts", + countInfo.searchedCounts.has_value()); + + return countInfo; +} + +/** + * Inserts or updates a null anchor document in ESC. + * The newNullAnchor must contain the _id of the null anchor document to update. + */ +void upsertNullAnchor(FLEQueryInterface* queryImpl, + bool hasNullAnchor, + BSONObj newNullAnchor, + const NamespaceString& nss, + ECStats* stats) { + CompactStatsCounter statsCtr(stats); + + if (MONGO_unlikely(fleCleanupHangBeforeNullAnchorUpdate.shouldFail())) { + LOGV2(7618811, "Hanging due to fleCleanupHangBeforeNullAnchorUpdate fail point"); + fleCleanupHangBeforeNullAnchorUpdate.pauseWhileSet(); + } + + if (hasNullAnchor) { + // update the null doc with a replacement modification + write_ops::UpdateOpEntry updateEntry; + updateEntry.setMulti(false); + updateEntry.setUpsert(false); + updateEntry.setQ(newNullAnchor.getField("_id").wrap()); + updateEntry.setU(mongo::write_ops::UpdateModification( + newNullAnchor, write_ops::UpdateModification::ReplacementTag{})); + write_ops::UpdateCommandRequest updateRequest(nss, {std::move(updateEntry)}); + + auto reply = queryImpl->update(nss, kUninitializedStmtId, updateRequest); + checkWriteErrors(reply); + statsCtr.addUpdates(reply.getNModified()); + } else { + // insert the null anchor; translate duplicate key error to a FLE contention error + StmtId stmtId = kUninitializedStmtId; + auto reply = + uassertStatusOK(queryImpl->insertDocuments(nss, {newNullAnchor}, &stmtId, true)); + checkWriteErrors(reply); + statsCtr.addInserts(1); + } +} + +void checkSchemaAndCompactionTokens(const BSONObj& tokens, const Collection& edc) { + uassert(6346807, + "Target namespace is not an encrypted collection", + edc.getCollectionOptions().encryptedFieldConfig); + + // Validate the request contains a compaction token for each encrypted field + const auto& efc = edc.getCollectionOptions().encryptedFieldConfig.value(); + CompactionHelpers::validateCompactionTokens(efc, tokens); +} + +std::shared_ptr> readUniqueECOCEntriesInTxn( + OperationContext* opCtx, + GetTxnCallback getTxn, + const NamespaceString ecocCompactNss, + BSONObj compactionTokens, + ECOCStats* ecocStats) { + + auto innerEcocStats = std::make_shared(); + auto uniqueEcocEntries = std::make_shared>(); + + if (MONGO_unlikely(fleCompactOrCleanupFailBeforeECOCRead.shouldFail())) { + uasserted(7293605, "Failed due to fleCompactOrCleanupFailBeforeECOCRead fail point"); + } + + std::shared_ptr trun = getTxn(opCtx); + + // The function that handles the transaction may outlive this function so we need to use + // shared_ptrs + auto argsBlock = std::make_tuple(compactionTokens, ecocCompactNss); + auto sharedBlock = std::make_shared(argsBlock); + + auto swResult = trun->runNoThrow( + opCtx, + [sharedBlock, uniqueEcocEntries, innerEcocStats]( + const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { + FLEQueryInterfaceImpl queryImpl(txnClient, getGlobalServiceContext()); + + auto [compactionTokens2, ecocCompactNss2] = *sharedBlock.get(); + + *uniqueEcocEntries = getUniqueCompactionDocuments( + &queryImpl, compactionTokens2, ecocCompactNss2, innerEcocStats.get()); + + return SemiFuture::makeReady(); + }); + uassertStatusOK(swResult); + uassertStatusOK(swResult.getValue().getEffectiveStatus()); + + if (ecocStats) { + CompactStatsCounter ctr(ecocStats); + ctr.add(*innerEcocStats); + } + return uniqueEcocEntries; +} + +EncryptionInformation makeEmptyProcessEncryptionInformation() { + EncryptionInformation encryptionInformation; + encryptionInformation.setCrudProcessed(true); + + // We need to set an empty BSON object here for the schema. + encryptionInformation.setSchema(BSONObj()); + + return encryptionInformation; +} + } // namespace @@ -112,8 +277,9 @@ StatusWith EncryptedStateCollectionsNamespaces::createFromDataCollection(const Collection& edc) { if (!edc.getCollectionOptions().encryptedFieldConfig) { return Status(ErrorCodes::BadValue, - str::stream() << "Encrypted data collection " << edc.ns() - << " is missing encrypted fields metadata"); + str::stream() + << "Encrypted data collection " << edc.ns().toStringForErrorMsg() + << " is missing encrypted fields metadata"); } auto& cfg = *(edc.getCollectionOptions().encryptedFieldConfig); @@ -136,12 +302,16 @@ EncryptedStateCollectionsNamespaces::createFromDataCollection(const Collection& if (!missingColl.empty()) { return Status(ErrorCodes::BadValue, str::stream() - << "Encrypted data collection " << edc.ns() + << "Encrypted data collection " << edc.ns().toStringForErrorMsg() << " is missing the name of its " << missingColl << " collection"); } namespaces.ecocRenameNss = NamespaceString(db, namespaces.ecocNss.coll().toString().append(".compact")); + namespaces.ecocLockNss = + NamespaceString(db, namespaces.ecocNss.coll().toString().append(".lock")); + namespaces.escDeletesNss = + NamespaceString(db, namespaces.escNss.coll().toString().append(".deletes")); return namespaces; } @@ -151,9 +321,9 @@ EncryptedStateCollectionsNamespaces::createFromDataCollection(const Collection& * that have been encrypted with that token. All entries are returned * in a set in their decrypted form. */ -stdx::unordered_set getUniqueCompactionDocumentsV2( +stdx::unordered_set getUniqueCompactionDocuments( FLEQueryInterface* queryImpl, - const CompactStructuredEncryptionData& request, + BSONObj tokensObj, const NamespaceString& ecocNss, ECOCStats* ecocStats) { @@ -162,7 +332,7 @@ stdx::unordered_set getUniqueCompactionDocumentsV2( // Initialize a set 'C' and for each compaction token, find all entries // in ECOC with matching field name. Decrypt entries and add to set 'C'. stdx::unordered_set c; - auto compactionTokens = CompactionHelpers::parseCompactionTokens(request.getCompactionTokens()); + auto compactionTokens = CompactionHelpers::parseCompactionTokens(tokensObj); for (auto& compactionToken : compactionTokens) { auto docs = queryImpl->findDocuments( @@ -181,38 +351,54 @@ void compactOneFieldValuePairV2(FLEQueryInterface* queryImpl, const ECOCCompactionDocumentV2& ecocDoc, const NamespaceString& escNss, ECStats* escStats) { - auto escValueToken = - FLETwiceDerivedTokenGenerator::generateESCTwiceDerivedValueToken(ecocDoc.esc); + CompactStatsCounter stats(escStats); - std::vector> tags; - { - FLEEdgePrfBlock edgeSet{ecocDoc.esc.data, boost::none}; - tags.push_back({edgeSet}); + /** + * Send a getQueryableEncryptionCountInfo command with query type "compact". + * The target of this command will perform the actual search for the next anchor + * position, which happens in the getEdgeCountInfoForCompact() function in fle_crypto. + * + * It is expected to return a single reply token, whose "count" field contains the + * next anchor position, and whose "searchedCounts" field contains the result of + * emuBinary. + */ + auto countInfo = fetchEdgeCountInfo( + queryImpl, ecocDoc.esc, escNss, FLEQueryInterface::TagQueryType::kCompact, "compact"_sd); + auto& emuBinaryResult = countInfo.searchedCounts.value(); + + stats.add(countInfo.stats.get()); + + // Check for the invalid case where emuBinary returned (0,0). + // This means that the tokens can't be trusted or the state collections are already hosed. + if (emuBinaryResult.cpos.value_or(1) == 0) { + // apos must also be 0 if cpos is 0 + uassert(7666501, + "getQueryableEncryptionCountInfo returned an invalid position for the next anchor", + emuBinaryResult.apos.has_value() && emuBinaryResult.apos.value() == 0); + uasserted(7666502, + str::stream() << "Queryable Encryption compaction encountered invalid searched " + "ESC positions for field " + << ecocDoc.fieldName + << ". This may be due to invalid compaction tokens or corrupted " + "state collections."); } - auto countInfoSets = - queryImpl->getTags(escNss, tags, FLEQueryInterface::TagQueryType::kCompact); - - uassert(7517100, - "CountInfoSets cannot be empty and must have one value.", - countInfoSets.size() == 1 && countInfoSets[0].size() == 1); - - auto& val = countInfoSets[0][0]; - CompactStatsCounter stats(escStats); + if (emuBinaryResult.cpos == boost::none) { + // no new non-anchors since the last compact/cleanup, so don't insert a new anchor + return; + } - auto& tagToken = val.tagToken; - auto cpos = val.cpos; + // the "count" field contains the next anchor position and must not be zero + uassert(7295002, + "getQueryableEncryptionCountInfo returned an invalid position for the next anchor", + countInfo.count > 0); - uassert( - 7517103, "Stats cannot be empty for compacting a field value pair.", val.stats.has_value()); - stats.add(val.stats.get()); + auto valueToken = FLETwiceDerivedTokenGenerator::generateESCTwiceDerivedValueToken(ecocDoc.esc); + auto latestCpos = emuBinaryResult.cpos.value(); - if (!val.cpos) { - return; - } + auto anchorDoc = ESCCollection::generateAnchorDocument( + countInfo.tagToken, valueToken, countInfo.count, latestCpos); - auto anchorDoc = - ESCCollection::generateAnchorDocument(tagToken, escValueToken, val.count, cpos.value()); StmtId stmtId = kUninitializedStmtId; if (MONGO_unlikely(fleCompactHangBeforeESCAnchorInsert.shouldFail())) { @@ -226,48 +412,187 @@ void compactOneFieldValuePairV2(FLEQueryInterface* queryImpl, stats.addInserts(1); } + +void cleanupOneFieldValuePair(FLEQueryInterface* queryImpl, + const ECOCCompactionDocumentV2& ecocDoc, + const NamespaceString& escNss, + const NamespaceString& escDeletesNss, + ECStats* escStats) { + + CompactStatsCounter stats(escStats); + auto valueToken = FLETwiceDerivedTokenGenerator::generateESCTwiceDerivedValueToken(ecocDoc.esc); + + /** + * Send a getQueryableEncryptionCountInfo command with query type "cleanup". + * The target of this command will perform steps (B), (C), and (D) of the cleanup + * algorithm, and is implemented in getEdgeCountInfoForCleanup() function in fle_crypto. + * + * It is expected to return a single reply token, whose "searchedCounts" field contains + * the result of emuBinary (C), and whose "nullAnchorCounts" field may contain the result + * of the null anchor lookup (D). The "count" field shall contain the value of a_1 that + * the null anchor should be updated with. + */ + auto countInfo = fetchEdgeCountInfo( + queryImpl, ecocDoc.esc, escNss, FLEQueryInterface::TagQueryType::kCleanup, "cleanup"_sd); + auto& emuBinaryResult = countInfo.searchedCounts.value(); + + stats.add(countInfo.stats.get()); + + // Check for the invalid case where emuBinary returned (0,0). + // This means that the tokens can't be trusted or the state collections are already hosed. + if (emuBinaryResult.cpos.value_or(1) == 0) { + // apos must also be 0 if cpos is 0 + uassert(7618815, + "getQueryableEncryptionCountInfo returned an invalid position for the next anchor", + emuBinaryResult.apos.has_value() && emuBinaryResult.apos.value() == 0); + uasserted(7618816, + str::stream() << "Queryable Encryption cleanup encountered invalid searched " + "ESC positions for field " + << ecocDoc.fieldName + << ". This may be due to invalid compaction tokens or corrupted " + "state collections."); + } + + if (emuBinaryResult.apos == boost::none) { + // case (E) + // Null anchor exists & contains the latest anchor position, + // and *maybe* the latest non-anchor position. + uassert(7295003, + str::stream() << "getQueryableEncryptionCountInfo for cleanup returned " + "non-existent null anchor counts", + countInfo.nullAnchorCounts.has_value()); + + if (emuBinaryResult.cpos == boost::none) { + // if cpos is none, then the null anchor also contains the latest + // non-anchor position, so no need to update it. + return; + } + + auto latestApos = countInfo.nullAnchorCounts->apos; + auto latestCpos = countInfo.count; + + // Update null anchor with the latest positions + auto newAnchor = ESCCollection::generateNullAnchorDocument( + countInfo.tagToken, valueToken, latestApos, latestCpos); + upsertNullAnchor(queryImpl, true, newAnchor, escNss, escStats); + + } else if (emuBinaryResult.apos.value() == 0) { + // case (F) + // No anchors yet exist, so latest apos is 0. + + uint64_t latestApos = 0; + auto latestCpos = countInfo.count; + + // Insert a new null anchor. + auto newAnchor = ESCCollection::generateNullAnchorDocument( + countInfo.tagToken, valueToken, latestApos, latestCpos); + upsertNullAnchor(queryImpl, false, newAnchor, escNss, escStats); + + } else /* (apos > 0) */ { + // case (G) + // New anchors exist - if null anchor exists, then it contains stale positions. + // Latest apos is returned by emuBinary. + + auto latestApos = emuBinaryResult.apos.value(); + auto latestCpos = countInfo.count; + + bool nullAnchorExists = countInfo.nullAnchorCounts.has_value(); + + // upsert the null anchor with the latest positions + auto newAnchor = ESCCollection::generateNullAnchorDocument( + countInfo.tagToken, valueToken, latestApos, latestCpos); + upsertNullAnchor(queryImpl, nullAnchorExists, newAnchor, escNss, escStats); + + // insert the _id of stale anchors (anchors in range [bottomApos + 1, latestApos]) + // into the deletes collection. + uint64_t bottomApos = 0; + if (nullAnchorExists) { + bottomApos = countInfo.nullAnchorCounts->apos; + } + + StmtId stmtId = kUninitializedStmtId; + + for (auto i = bottomApos + 1; i <= latestApos; i++) { + auto block = ESCCollection::generateAnchorId(countInfo.tagToken, i); + auto doc = BSON("_id"_sd << BSONBinData(block.data(), block.size(), BinDataGeneral)); + + auto swReply = queryImpl->insertDocuments(escDeletesNss, {doc}, &stmtId, false); + if (swReply.getStatus() == ErrorCodes::DuplicateKey) { + // ignore duplicate _id error, which can happen in case of a restart. + LOGV2_DEBUG(7295010, + 2, + "Duplicate anchor ID found in ESC deletes collection", + "namespace"_attr = escDeletesNss); + continue; + } + uassertStatusOK(swReply.getStatus()); + checkWriteErrors(swReply.getValue()); + } + } +} + void processFLECompactV2(OperationContext* opCtx, const CompactStructuredEncryptionData& request, GetTxnCallback getTxn, const EncryptedStateCollectionsNamespaces& namespaces, ECStats* escStats, ECOCStats* ecocStats) { - auto innerEcocStats = std::make_shared(); auto innerEscStats = std::make_shared(); /* uniqueEcocEntries corresponds to the set 'C_f' in OST-1 */ - auto uniqueEcocEntries = std::make_shared>(); + auto uniqueEcocEntries = readUniqueECOCEntriesInTxn( + opCtx, getTxn, namespaces.ecocRenameNss, request.getCompactionTokens(), ecocStats); - if (MONGO_unlikely(fleCompactFailBeforeECOCRead.shouldFail())) { - uasserted(7293605, "Failed compact due to fleCompactFailBeforeECOCRead fail point"); - } - - // Read the ECOC documents in a transaction - { + // Each entry in 'C_f' represents a unique field/value pair. For each field/value pair, + // compact the ESC entries for that field/value pair in one transaction. + for (auto& ecocDoc : *uniqueEcocEntries) { + // start a new transaction std::shared_ptr trun = getTxn(opCtx); // The function that handles the transaction may outlive this function so we need to use // shared_ptrs - auto argsBlock = std::make_tuple(request, namespaces.ecocRenameNss); + auto argsBlock = std::make_tuple(ecocDoc, namespaces.escNss); auto sharedBlock = std::make_shared(argsBlock); auto swResult = trun->runNoThrow( opCtx, - [sharedBlock, uniqueEcocEntries, innerEcocStats]( - const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { + [sharedBlock, innerEscStats](const txn_api::TransactionClient& txnClient, + ExecutorPtr txnExec) { FLEQueryInterfaceImpl queryImpl(txnClient, getGlobalServiceContext()); - auto [request2, ecocRenameNss] = *sharedBlock.get(); + auto [ecocDoc2, escNss] = *sharedBlock.get(); - *uniqueEcocEntries = getUniqueCompactionDocumentsV2( - &queryImpl, request2, ecocRenameNss, innerEcocStats.get()); + compactOneFieldValuePairV2(&queryImpl, ecocDoc2, escNss, innerEscStats.get()); return SemiFuture::makeReady(); }); uassertStatusOK(swResult); uassertStatusOK(swResult.getValue().getEffectiveStatus()); + + if (MONGO_unlikely(fleCompactFailAfterTransactionCommit.shouldFail())) { + uasserted(7663001, "Failed due to fleCompactFailAfterTransactionCommit fail point"); + } + } + + // Update stats + if (escStats) { + CompactStatsCounter ctr(escStats); + ctr.add(*innerEscStats); } +} + +void processFLECleanup(OperationContext* opCtx, + const CleanupStructuredEncryptionData& request, + GetTxnCallback getTxn, + const EncryptedStateCollectionsNamespaces& namespaces, + ECStats* escStats, + ECOCStats* ecocStats) { + auto innerEscStats = std::make_shared(); + + /* uniqueEcocEntries corresponds to the set 'C_f' in OST-1 */ + auto uniqueEcocEntries = readUniqueECOCEntriesInTxn( + opCtx, getTxn, namespaces.ecocRenameNss, request.getCleanupTokens(), ecocStats); // Each entry in 'C_f' represents a unique field/value pair. For each field/value pair, // compact the ESC entries for that field/value pair in one transaction. @@ -277,7 +602,7 @@ void processFLECompactV2(OperationContext* opCtx, // The function that handles the transaction may outlive this function so we need to use // shared_ptrs - auto argsBlock = std::make_tuple(ecocDoc, namespaces.escNss); + auto argsBlock = std::make_tuple(ecocDoc, namespaces.escNss, namespaces.escDeletesNss); auto sharedBlock = std::make_shared(argsBlock); auto swResult = trun->runNoThrow( @@ -286,15 +611,20 @@ void processFLECompactV2(OperationContext* opCtx, ExecutorPtr txnExec) { FLEQueryInterfaceImpl queryImpl(txnClient, getGlobalServiceContext()); - auto [ecocDoc2, escNss] = *sharedBlock.get(); + auto [ecocDoc2, escNss, escDeletesNss] = *sharedBlock.get(); - compactOneFieldValuePairV2(&queryImpl, ecocDoc2, escNss, innerEscStats.get()); + cleanupOneFieldValuePair( + &queryImpl, ecocDoc2, escNss, escDeletesNss, innerEscStats.get()); return SemiFuture::makeReady(); }); uassertStatusOK(swResult); uassertStatusOK(swResult.getValue().getEffectiveStatus()); + + if (MONGO_unlikely(fleCleanupFailAfterTransactionCommit.shouldFail())) { + uasserted(7663002, "Failedg due to fleCleanupFailAfterTransactionCommit fail point"); + } } // Update stats @@ -302,20 +632,14 @@ void processFLECompactV2(OperationContext* opCtx, CompactStatsCounter ctr(escStats); ctr.add(*innerEscStats); } - if (ecocStats) { - CompactStatsCounter ctr(ecocStats); - ctr.add(*innerEcocStats); - } } void validateCompactRequest(const CompactStructuredEncryptionData& request, const Collection& edc) { - uassert(6346807, - "Target namespace is not an encrypted collection", - edc.getCollectionOptions().encryptedFieldConfig); + checkSchemaAndCompactionTokens(request.getCompactionTokens(), edc); +} - // Validate the request contains a compaction token for each encrypted field - const auto& efc = edc.getCollectionOptions().encryptedFieldConfig.value(); - CompactionHelpers::validateCompactionTokens(efc, request.getCompactionTokens()); +void validateCleanupRequest(const CleanupStructuredEncryptionData& request, const Collection& edc) { + checkSchemaAndCompactionTokens(request.getCleanupTokens(), edc); } const PrfBlock& FLECompactESCDeleteSet::at(size_t index) const { @@ -355,11 +679,16 @@ FLECompactESCDeleteSet readRandomESCNonAnchorIds(OperationContext* opCtx, aggCmd.setPipeline(std::move(pipeline)); } + aggCmd.setEncryptionInformation(makeEmptyProcessEncryptionInformation()); + auto swCursor = DBClientCursor::fromAggregationRequest(&client, aggCmd, false, false); uassertStatusOK(swCursor.getStatus()); auto cursor = std::move(swCursor.getValue()); - uassert(7293607, "Got an invalid cursor while reading the Queryable Encryption ESC", cursor); + uassert(7293607, + str::stream() << "Got an invalid cursor while reading the Queryable Encryption ESC " + << escNss.toStringForErrorMsg(), + cursor); while (cursor->more()) { auto& deleteIds = deleteSet.deleteIdSets.emplace_back(); @@ -408,6 +737,9 @@ void cleanupESCNonAnchors(OperationContext* opCtx, for (size_t idIndex = 0; idIndex < deleteSet.size();) { write_ops::DeleteCommandRequest deleteRequest(escNss, std::vector{}); + deleteRequest.getWriteCommandRequestBase().setEncryptionInformation( + makeEmptyProcessEncryptionInformation()); + auto& opEntry = deleteRequest.getDeletes().emplace_back(); opEntry.setMulti(true); @@ -440,4 +772,75 @@ void cleanupESCNonAnchors(OperationContext* opCtx, } } +void cleanupESCAnchors(OperationContext* opCtx, + const NamespaceString& escNss, + const NamespaceString& escDeletesNss, + size_t maxTagsPerDelete, + ECStats* escStats) { + + DBDirectClient client(opCtx); + std::int64_t deleted = 0; + + FindCommandRequest findCmd{escDeletesNss}; + + auto cursor = client.find(std::move(findCmd)); + + uassert(7618812, + str::stream() << "Got an invalid cursor while reading the Queryable Encryption ESC " + << escDeletesNss.toStringForErrorMsg(), + cursor); + + std::vector deleteSet; + deleteSet.reserve(maxTagsPerDelete); + + while (cursor->more()) { + write_ops::DeleteCommandRequest deleteRequest(escNss, + std::vector{}); + deleteRequest.getWriteCommandRequestBase().setEncryptionInformation( + makeEmptyProcessEncryptionInformation()); + + auto& opEntry = deleteRequest.getDeletes().emplace_back(); + opEntry.setMulti(true); + + BSONObjBuilder queryBuilder; + { + BSONObjBuilder idBuilder(queryBuilder.subobjStart(kId)); + BSONArrayBuilder array = idBuilder.subarrayStart("$in"); + + for (size_t tagCount = 0; tagCount < maxTagsPerDelete && cursor->more(); tagCount++) { + const auto doc = cursor->nextSafe(); + BSONElement id; + auto status = bsonExtractTypedField(doc, kId, BinData, &id); + uassertStatusOK(status); + uassert(7618813, + "Found a document in esc.deletes with _id of incorrect BinDataType", + id.binDataType() == BinDataType::BinDataGeneral); + + array.append(id); + } + } + + opEntry.setQ(queryBuilder.obj()); + + if (MONGO_unlikely(fleCleanupFailDuringAnchorDeletes.shouldFail())) { + uasserted(7723800, "Failing due to fleCleanupFailDuringAnchorDeletes failpoint"); + } + + auto reply = client.remove(deleteRequest); + if (reply.getWriteCommandReplyBase().getWriteErrors()) { + LOGV2_WARNING(7618814, + "Queryable Encryption compaction encountered write errors", + "namespace"_attr = escNss, + "reply"_attr = reply); + checkWriteErrors(reply.getWriteCommandReplyBase()); + } + deleted += reply.getN(); + } + + if (escStats) { + CompactStatsCounter stats(escStats); + stats.addDeletes(deleted); + } +} + } // namespace mongo diff --git a/src/mongo/db/commands/fle2_compact.h b/src/mongo/db/commands/fle2_compact.h index 2af0e930f3a68..9dad508659c7e 100644 --- a/src/mongo/db/commands/fle2_compact.h +++ b/src/mongo/db/commands/fle2_compact.h @@ -29,9 +29,22 @@ #pragma once +#include +#include +#include + #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_crypto_types.h" +#include "mongo/crypto/fle_stats_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/commands/fle2_cleanup_gen.h" #include "mongo/db/commands/fle2_compact_gen.h" #include "mongo/db/fle_crud.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { @@ -44,6 +57,8 @@ struct EncryptedStateCollectionsNamespaces { NamespaceString escNss; NamespaceString ecocNss; NamespaceString ecocRenameNss; + NamespaceString ecocLockNss; + NamespaceString escDeletesNss; }; /** @@ -51,6 +66,12 @@ struct EncryptedStateCollectionsNamespaces { */ void validateCompactRequest(const CompactStructuredEncryptionData& request, const Collection& edc); +/** + * Validate a cleanup request has the right encryption tokens. + */ +void validateCleanupRequest(const CleanupStructuredEncryptionData& request, const Collection& edc); + + void processFLECompactV2(OperationContext* opCtx, const CompactStructuredEncryptionData& request, GetTxnCallback getTxn, @@ -58,14 +79,21 @@ void processFLECompactV2(OperationContext* opCtx, ECStats* escStats, ECOCStats* ecocStats); +void processFLECleanup(OperationContext* opCtx, + const CleanupStructuredEncryptionData& request, + GetTxnCallback getTxn, + const EncryptedStateCollectionsNamespaces& namespaces, + ECStats* escStats, + ECOCStats* ecocStats); + /** * Get all unique documents in the ECOC collection in their decrypted form. * * Used by unit tests. */ -stdx::unordered_set getUniqueCompactionDocumentsV2( +stdx::unordered_set getUniqueCompactionDocuments( FLEQueryInterface* queryImpl, - const CompactStructuredEncryptionData& request, + BSONObj tokensObj, const NamespaceString& ecocNss, ECOCStats* ecocStats); @@ -81,6 +109,19 @@ void compactOneFieldValuePairV2(FLEQueryInterface* queryImpl, const NamespaceString& escNss, ECStats* escStats); + +/** + * Performs cleanup of the ESC entries for the encrypted field/value pair + * whose tokens are in the provided ECOC compaction document. + * + * Used by unit tests. + */ +void cleanupOneFieldValuePair(FLEQueryInterface* queryImpl, + const ECOCCompactionDocumentV2& ecocDoc, + const NamespaceString& escNss, + const NamespaceString& escDeletesNss, + ECStats* escStats); + /** * Container for the _id values of ESC entries that are slated for deletion * at the end of a compact or cleanup operation. @@ -112,8 +153,8 @@ FLECompactESCDeleteSet readRandomESCNonAnchorIds(OperationContext* opCtx, ECStats* escStats); /** - * Deletes from the ESC collection the non-anchor documents whose _id - * appears in the list deleteIds + * Deletes from the ESC collection the non-anchor documents whose _ids + * appear in the list deleteIds */ void cleanupESCNonAnchors(OperationContext* opCtx, const NamespaceString& escNss, @@ -121,4 +162,14 @@ void cleanupESCNonAnchors(OperationContext* opCtx, size_t maxTagsPerDelete, ECStats* escStats); +/** + * Deletes from the ESC collection the anchor documents whose _ids + * appear in the collection escDeletesNss + */ +void cleanupESCAnchors(OperationContext* opCtx, + const NamespaceString& escNss, + const NamespaceString& escDeletesNss, + size_t maxTagsPerDelete, + ECStats* escStats); + } // namespace mongo diff --git a/src/mongo/db/commands/fle2_compact.idl b/src/mongo/db/commands/fle2_compact.idl index 3ba9a9c96868c..3ac36f10da098 100644 --- a/src/mongo/db/commands/fle2_compact.idl +++ b/src/mongo/db/commands/fle2_compact.idl @@ -35,7 +35,7 @@ imports: structs: CompactStructuredEncryptionDataCommandReply: - description: "Reply from the {compactStructuredEncryptedData: ...} command" + description: "Reply from the {compactStructuredEncryptionData: ...} command" strict: true is_command_reply: true fields: diff --git a/src/mongo/db/commands/fle2_compact_cmd.cpp b/src/mongo/db/commands/fle2_compact_cmd.cpp index 3862fea1f56e6..80a7bd09a7a75 100644 --- a/src/mongo/db/commands/fle2_compact_cmd.cpp +++ b/src/mongo/db/commands/fle2_compact_cmd.cpp @@ -28,14 +28,28 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/commands/fle2_compact.h" - -#include "mongo/crypto/encryption_fields_gen.h" +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/crypto/fle_options_gen.h" #include "mongo/crypto/fle_stats.h" +#include "mongo/crypto/fle_stats_gen.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog/drop_collection.h" @@ -44,10 +58,29 @@ #include "mongo/db/commands.h" #include "mongo/db/commands/create_gen.h" #include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/commands/fle2_compact.h" +#include "mongo/db/commands/fle2_compact_gen.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" +#include "mongo/db/drop_gen.h" +#include "mongo/db/fle_crud.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/server_parameter_with_storage.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -57,11 +90,6 @@ MONGO_FAIL_POINT_DEFINE(fleCompactSkipECOCDropUnsharded); namespace mongo { namespace { -/** - * Ensures that only one compactStructuredEncryptionData can run at a given time. - */ -Lock::ResourceMutex commandMutex("compactStructuredEncryptionDataCommandMutex"); - CompactStats compactEncryptedCompactionCollection(OperationContext* opCtx, const CompactStructuredEncryptionData& request) { @@ -72,15 +100,6 @@ CompactStats compactEncryptedCompactionCollection(OperationContext* opCtx, << " must be run through mongos in a sharded cluster", !ShardingState::get(opCtx)->enabled()); - uassert( - 7592901, - "The preview version of compactStructuredEncryptionData is no longer supported in this " - "binary version", - gFeatureFlagFLE2CompactForProtocolV2.isEnabled(serverGlobalParams.featureCompatibility)); - - // Only allow one instance of compactStructuredEncryptionData to run at a time. - Lock::ExclusiveLock fleCompactCommandLock(opCtx, commandMutex); - // Since this command holds an IX lock on the DB and the global lock throughout // the lifetime of this operation, setFCV should not be allowed to abort the transaction // performing the compaction. Otherwise, on retry, the transaction may attempt to @@ -108,7 +127,8 @@ CompactStats compactEncryptedCompactionCollection(OperationContext* opCtx, "Cannot compact structured encryption data on a view", !catalog->lookupView(opCtx, edcNss)); uasserted(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection '" << edcNss << "' does not exist"); + str::stream() << "Collection '" << edcNss.toStringForErrorMsg() + << "' does not exist"); } validateCompactRequest(request, *edc); @@ -116,7 +136,12 @@ CompactStats compactEncryptedCompactionCollection(OperationContext* opCtx, auto namespaces = uassertStatusOK(EncryptedStateCollectionsNamespaces::createFromDataCollection(*edc)); + // Acquire exclusive lock on the associated 'ecoc.lock' namespace to serialize calls + // to cleanup and compact on the same EDC namespace + Lock::CollectionLock compactionLock(opCtx, namespaces.ecocLockNss, MODE_X); + // Step 1: rename the ECOC collection if it exists + catalog = CollectionCatalog::get(opCtx); auto ecoc = catalog->lookupCollectionByNamespace(opCtx, namespaces.ecocNss); auto ecocRename = catalog->lookupCollectionByNamespace(opCtx, namespaces.ecocRenameNss); @@ -183,7 +208,7 @@ CompactStats compactEncryptedCompactionCollection(OperationContext* opCtx, uassert(ErrorCodes::NamespaceNotFound, str::stream() << "Renamed encrypted compaction collection " - << namespaces.ecocRenameNss + << namespaces.ecocRenameNss.toStringForErrorMsg() << " no longer exists prior to compaction", tempEcocColl.getCollection()); diff --git a/src/mongo/db/commands/fle2_get_count_info_command.cpp b/src/mongo/db/commands/fle2_get_count_info_command.cpp index 5564233556472..f7146d4c07481 100644 --- a/src/mongo/db/commands/fle2_get_count_info_command.cpp +++ b/src/mongo/db/commands/fle2_get_count_info_command.cpp @@ -27,15 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/bson/bsonobj.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_crypto_types.h" +#include "mongo/crypto/fle_stats_gen.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/fle2_get_count_info_command_gen.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/fle_crud.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/util/assert_util.h" @@ -78,16 +96,23 @@ QECountInfoReplyTokens tokenFromCountInfo(const FLEEdgeCountInfo& countInfo) { token.setEDCDerivedFromDataTokenAndContentionFactorToken(countInfo.edc.value().toCDR()); } - if (countInfo.cpos || countInfo.apos) { + token.setSearchedPositions(countInfo.searchedCounts.map([](const auto& pair) { ESCOptionalPositionsPair spos; - if (countInfo.cpos) { - spos.setCpos(countInfo.cpos.get()); + if (pair.cpos) { + spos.setCpos(pair.cpos.get()); } - if (countInfo.apos) { - spos.setApos(countInfo.apos.get()); + if (pair.apos) { + spos.setApos(pair.apos.get()); } - token.setSearchedPositions(spos); - } + return spos; + })); + + token.setNullAnchorPositions(countInfo.nullAnchorCounts.map([](const auto& pair) { + ESCPositionsPair newPair; + newPair.setApos(pair.apos); + newPair.setCpos(pair.cpos); + return newPair; + })); if (countInfo.stats) { token.setStats(countInfo.stats.get()); @@ -125,6 +150,8 @@ FLEQueryInterface::TagQueryType queryTypeTranslation(QECountInfoQueryTypeEnum ty return FLEQueryInterface::TagQueryType::kQuery; case QECountInfoQueryTypeEnum::Compact: return FLEQueryInterface::TagQueryType::kCompact; + case QECountInfoQueryTypeEnum::Cleanup: + return FLEQueryInterface::TagQueryType::kCleanup; default: uasserted(7517102, "Invalid QECountInfoQueryTypeEnum value."); } @@ -177,8 +204,9 @@ class GetQueryableEncryptionCountInfoCmd final auto* as = AuthorizationSession::get(opCtx->getClient()); uassert(ErrorCodes::Unauthorized, "Not authorized to read tags", - as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } NamespaceString ns() const final { diff --git a/src/mongo/db/commands/fle2_get_count_info_command.idl b/src/mongo/db/commands/fle2_get_count_info_command.idl index 0914ee9f5cc92..2caf9a38816db 100644 --- a/src/mongo/db/commands/fle2_get_count_info_command.idl +++ b/src/mongo/db/commands/fle2_get_count_info_command.idl @@ -86,7 +86,7 @@ structs: cpp_name: NullAnchorPositions optional: true stats: - description: "stats returned for compaction algorithm" + description: "stats returned for compaction or cleanup algorithms" type: ECStats optional: true @@ -134,6 +134,7 @@ enums: Insert: "insert" Query: "query" Compact: "compact" + Cleanup: "cleanup" commands: getQueryableEncryptionCountInfo: @@ -148,6 +149,6 @@ commands: description: "Array of tokens to fetch" type: array queryType: - description: "Purpose of command, either for insert, query, or compact" + description: "Purpose of command, either for insert, query, compact, or cleanup" type: QECountInfoQueryType diff --git a/src/mongo/db/commands/fle_compact_test.cpp b/src/mongo/db/commands/fle_compact_test.cpp index da83ffe3436e4..7bf3c3796e44e 100644 --- a/src/mongo/db/commands/fle_compact_test.cpp +++ b/src/mongo/db/commands/fle_compact_test.cpp @@ -27,24 +27,66 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include #include - +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/secure_allocator.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/crypto/aead_encryption.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_crypto_types.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/crypto/fle_stats_gen.h" +#include "mongo/crypto/symmetric_key.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/client.h" #include "mongo/db/commands/fle2_compact.h" #include "mongo/db/fle_crud.h" #include "mongo/db/fle_query_interface_mock.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/platform/random.h" #include "mongo/shell/kms_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/hex.h" +#include "mongo/util/murmur3.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -134,9 +176,8 @@ BSONObj TestKeyVault::getEncryptedKey(const UUID& uuid) { } UUID fieldNameToUUID(StringData field) { - std::array buf; - - MurmurHash3_x86_128(field.rawData(), field.size(), 123456, buf.data()); + std::array buf; + murmur3(field, 123456 /*seed*/, buf); return UUID::fromCDR(buf); } @@ -147,11 +188,6 @@ class FleCompactTest : public ServiceContextMongoDTest { ESCTwiceDerivedTagToken twiceDerivedTag; ESCTwiceDerivedValueToken twiceDerivedValue; }; - struct ECCTestTokens { - ECCDerivedFromDataTokenAndContentionFactorToken contentionDerived; - ECCTwiceDerivedTagToken twiceDerivedTag; - ECCTwiceDerivedValueToken twiceDerivedValue; - }; struct InsertionState { uint64_t count{0}; uint64_t pos{0}; @@ -167,10 +203,15 @@ class FleCompactTest : public ServiceContextMongoDTest { void createCollection(const NamespaceString& ns); - void assertDocumentCounts(uint64_t edc, uint64_t esc, uint64_t ecc, uint64_t ecoc); + void assertDocumentCounts(uint64_t edc, + uint64_t esc, + uint64_t ecoc, + boost::optional escDeletes = boost::none); void assertESCNonAnchorDocument(BSONObj obj, bool exists, uint64_t cpos); - void assertESCAnchorDocument(BSONObj obj, bool exists, uint64_t apos, uint64_t cpos); + void assertESCAnchorDocument( + BSONObj obj, bool exists, uint64_t apos, uint64_t cpos, bool nullAnchor = false); + void assertESCNullAnchorDocument(BSONObj obj, bool exists, uint64_t apos, uint64_t cpos); ESCTestTokens getTestESCTokens(BSONObj obj); @@ -179,8 +220,7 @@ class FleCompactTest : public ServiceContextMongoDTest { EncryptedFieldConfig generateEncryptedFieldConfig( const std::set& encryptedFieldNames); - CompactStructuredEncryptionData generateCompactCommand( - const std::set& encryptedFieldNames); + BSONObj generateCompactionTokens(const std::set& encryptedFieldNames); std::vector generatePlaceholder(UUID keyId, BSONElement value); @@ -188,6 +228,12 @@ class FleCompactTest : public ServiceContextMongoDTest { void insertFieldValues(StringData fieldName, std::map& values); + void doInsertAndCompactCycles(StringData fieldName, + std::map& values, + bool compactOnLastCycle, + uint64_t cycles, + uint64_t insertsPerCycle = 1); + protected: ServiceContext::UniqueOperationContext _opCtx; @@ -224,10 +270,12 @@ void FleCompactTest::setUp() { _namespaces.escNss = NamespaceString::createNamespaceString_forTest("test.enxcol_.coll.esc"); _namespaces.ecocNss = NamespaceString::createNamespaceString_forTest("test.enxcol_.coll.ecoc"); _namespaces.ecocRenameNss = NamespaceString::createNamespaceString_forTest("test.ecoc.compact"); + _namespaces.escDeletesNss = NamespaceString::createNamespaceString_forTest("test.esc.deletes"); createCollection(_namespaces.edcNss); createCollection(_namespaces.escNss); createCollection(_namespaces.ecocNss); + createCollection(_namespaces.escDeletesNss); } void FleCompactTest::tearDown() { @@ -246,10 +294,16 @@ void FleCompactTest::createCollection(const NamespaceString& ns) { ASSERT_OK(statusCC); } -void FleCompactTest::assertDocumentCounts(uint64_t edc, uint64_t esc, uint64_t ecc, uint64_t ecoc) { +void FleCompactTest::assertDocumentCounts(uint64_t edc, + uint64_t esc, + uint64_t ecoc, + boost::optional escDeletes) { ASSERT_EQ(_queryImpl->countDocuments(_namespaces.edcNss), edc); ASSERT_EQ(_queryImpl->countDocuments(_namespaces.escNss), esc); ASSERT_EQ(_queryImpl->countDocuments(_namespaces.ecocNss), ecoc); + if (escDeletes) { + ASSERT_EQ(_queryImpl->countDocuments(_namespaces.escDeletesNss), *escDeletes); + } } void FleCompactTest::assertESCNonAnchorDocument(BSONObj obj, bool exists, uint64_t cpos) { @@ -260,23 +314,29 @@ void FleCompactTest::assertESCNonAnchorDocument(BSONObj obj, bool exists, uint64 ASSERT(!doc.hasField("value"_sd)); } -void FleCompactTest::assertESCAnchorDocument(BSONObj obj, - bool exists, - uint64_t apos, - uint64_t cpos) { +void FleCompactTest::assertESCAnchorDocument( + BSONObj obj, bool exists, uint64_t apos, uint64_t cpos, bool nullAnchor) { auto tokens = getTestESCTokens(obj); - auto doc = _queryImpl->getById(_namespaces.escNss, - ESCCollection::generateAnchorId(tokens.twiceDerivedTag, apos)); + auto id = nullAnchor ? ESCCollection::generateNullAnchorId(tokens.twiceDerivedTag) + : ESCCollection::generateAnchorId(tokens.twiceDerivedTag, apos); + auto doc = _queryImpl->getById(_namespaces.escNss, id); ASSERT_EQ(doc.isEmpty(), !exists); if (exists) { auto anchorDoc = uassertStatusOK(ESCCollection::decryptAnchorDocument(tokens.twiceDerivedValue, doc)); - ASSERT_EQ(anchorDoc.position, 0); + ASSERT_EQ(anchorDoc.position, nullAnchor ? apos : 0); ASSERT_EQ(anchorDoc.count, cpos); } } +void FleCompactTest::assertESCNullAnchorDocument(BSONObj obj, + bool exists, + uint64_t apos, + uint64_t cpos) { + return assertESCAnchorDocument(obj, exists, apos, cpos, true); +} + FleCompactTest::ESCTestTokens FleCompactTest::getTestESCTokens(BSONObj obj) { auto element = obj.firstElement(); auto indexKeyId = fieldNameToUUID(element.fieldNameStringData()); @@ -328,13 +388,9 @@ EncryptedFieldConfig FleCompactTest::generateEncryptedFieldConfig( return efc; } -CompactStructuredEncryptionData FleCompactTest::generateCompactCommand( - const std::set& encryptedFieldNames) { - CompactStructuredEncryptionData cmd(_namespaces.edcNss); +BSONObj FleCompactTest::generateCompactionTokens(const std::set& encryptedFieldNames) { auto efc = generateEncryptedFieldConfig(encryptedFieldNames); - auto compactionTokens = FLEClientCrypto::generateCompactionTokens(efc, &_keyVault); - cmd.setCompactionTokens(compactionTokens); - return cmd; + return FLEClientCrypto::generateCompactionTokens(efc, &_keyVault); } std::vector FleCompactTest::generatePlaceholder(UUID keyId, BSONElement value) { @@ -400,12 +456,38 @@ void FleCompactTest::insertFieldValues(StringData field, } } +void FleCompactTest::doInsertAndCompactCycles(StringData fieldName, + std::map& values, + bool compactOnLastCycle, + uint64_t cycles, + uint64_t insertsPerCycle) { + ECStats escStats; + + for (; cycles > 0; cycles--) { + for (auto& [value, state] : values) { + state.toInsertCount = insertsPerCycle; + } + + insertFieldValues(fieldName, values); + + if (!compactOnLastCycle && cycles == 1) { + break; + } + + for (auto& [value, state] : values) { + auto testPair = BSON(fieldName << value); + auto ecocDoc = generateTestECOCDocumentV2(testPair); + compactOneFieldValuePairV2(_queryImpl.get(), ecocDoc, _namespaces.escNss, &escStats); + } + } +} + TEST_F(FleCompactTest, GetUniqueECOCDocsFromEmptyECOC) { ECOCStats stats; std::set fieldSet = {"first", "ssn"}; - auto cmd = generateCompactCommand(fieldSet); - auto docs = getUniqueCompactionDocumentsV2(_queryImpl.get(), cmd, _namespaces.ecocNss, &stats); + auto tokens = generateCompactionTokens(fieldSet); + auto docs = getUniqueCompactionDocuments(_queryImpl.get(), tokens, _namespaces.ecocNss, &stats); ASSERT(docs.empty()); } @@ -427,10 +509,10 @@ TEST_F(FleCompactTest, GetUniqueECOCDocsMultipleFieldsWithManyDuplicateValues) { insertFieldValues(field, values); } - assertDocumentCounts(numInserted, numInserted, 0, numInserted); + assertDocumentCounts(numInserted, numInserted, numInserted); - auto cmd = generateCompactCommand(fieldSet); - auto docs = getUniqueCompactionDocumentsV2(_queryImpl.get(), cmd, _namespaces.ecocNss, &stats); + auto tokens = generateCompactionTokens(fieldSet); + auto docs = getUniqueCompactionDocuments(_queryImpl.get(), tokens, _namespaces.ecocNss, &stats); ASSERT(docs == expected); } @@ -440,18 +522,16 @@ TEST_F(FleCompactTest, CompactValueV2_NoNonAnchors) { auto testPair = BSON("first" << "brian"); auto ecocDoc = generateTestECOCDocumentV2(testPair); - assertDocumentCounts(0, 0, 0, 0); + assertDocumentCounts(0, 0, 0); - // Compact an empty ESC; assert compact inserts anchor at apos = 1 with cpos = 0 + // Compact an empty ESC; assert an error is thrown because compact should not be called + // if there are no ESC entries that correspond to the ECOC document. // Note: this tests compact where EmuBinary returns (cpos = 0, apos = 0) - compactOneFieldValuePairV2(_queryImpl.get(), ecocDoc, _namespaces.escNss, &escStats); - assertDocumentCounts(0, 1, 0, 0); - assertESCAnchorDocument(testPair, true, 1, 0); - - // Compact an ESC containing only non-null anchors - // Note: this tests compact where EmuBinary returns (cpos = null, apos > 0) - compactOneFieldValuePairV2(_queryImpl.get(), ecocDoc, _namespaces.escNss, &escStats); - assertDocumentCounts(0, 1, 0, 0); + ASSERT_THROWS_CODE( + compactOneFieldValuePairV2(_queryImpl.get(), ecocDoc, _namespaces.escNss, &escStats), + DBException, + 7666502); + assertDocumentCounts(0, 0, 0); } TEST_F(FleCompactTest, CompactValueV2_NoNullAnchors) { @@ -465,7 +545,7 @@ TEST_F(FleCompactTest, CompactValueV2_NoNullAnchors) { // Insert 15 of the same value; assert non-anchors 1 thru 15 values[value].toInsertCount = 15; insertFieldValues(key, values); - assertDocumentCounts(15, 15, 0, 15); + assertDocumentCounts(15, 15, 15); for (uint64_t i = 1; i <= 15; i++) { assertESCNonAnchorDocument(testPair, true, i); } @@ -473,18 +553,18 @@ TEST_F(FleCompactTest, CompactValueV2_NoNullAnchors) { // Compact ESC which should only have non-anchors // Note: this tests compact where EmuBinary returns (cpos > 0, apos = 0) compactOneFieldValuePairV2(_queryImpl.get(), ecocDoc, _namespaces.escNss, &escStats); - assertDocumentCounts(15, 16, 0, 15); + assertDocumentCounts(15, 16, 15); assertESCAnchorDocument(testPair, true, 1, 15); // Compact ESC which should now have a fresh anchor and stale non-anchors // Note: this tests compact where EmuBinary returns (cpos = null, apos > 0) compactOneFieldValuePairV2(_queryImpl.get(), ecocDoc, _namespaces.escNss, &escStats); - assertDocumentCounts(15, 16, 0, 15); + assertDocumentCounts(15, 16, 15); // Insert another 15 of the same value; assert non-anchors 16 thru 30 values[value].toInsertCount = 15; insertFieldValues(key, values); - assertDocumentCounts(30, 31, 0, 30); + assertDocumentCounts(30, 31, 30); for (uint64_t i = 16; i <= 30; i++) { assertESCNonAnchorDocument(testPair, true, i); } @@ -492,10 +572,58 @@ TEST_F(FleCompactTest, CompactValueV2_NoNullAnchors) { // Compact ESC which should now have fresh anchors and fresh non-anchors // Note: this tests compact where EmuBinary returns (cpos > 0, apos > 0) compactOneFieldValuePairV2(_queryImpl.get(), ecocDoc, _namespaces.escNss, &escStats); - assertDocumentCounts(30, 32, 0, 30); + assertDocumentCounts(30, 32, 30); assertESCAnchorDocument(testPair, true, 2, 30); } +TEST_F(FleCompactTest, CompactValueV2_WithNullAnchor) { + ECStats escStats; + constexpr auto key = "first"_sd; + const std::string value = "roger"; + auto testPair = BSON(key << value); + auto ecocDoc = generateTestECOCDocumentV2(testPair); + std::map values = {{value, {}}}; + + uint64_t edcCount = 0; + uint64_t escCount = 0; + uint64_t ecocCount = 0; + + // Insert new documents + values[value].toInsertCount = 5; + insertFieldValues(key, values); + edcCount = values[value].count; + escCount += values[value].count; + ecocCount = edcCount; + assertDocumentCounts(edcCount, escCount, ecocCount); + + // Run cleanup on ESC to insert the null anchor + cleanupOneFieldValuePair( + _queryImpl.get(), ecocDoc, _namespaces.escNss, _namespaces.escDeletesNss, &escStats); + escCount++; + assertDocumentCounts(edcCount, escCount, ecocCount, 0); + assertESCNullAnchorDocument(testPair, true, 0 /*apos*/, 5 /*cpos*/); + + // Compact ESC which now contains the null anchor + stale non-anchors; assert no change + // Note: this tests compact where EmuBinary returns (cpos = null, apos = null) + compactOneFieldValuePairV2(_queryImpl.get(), ecocDoc, _namespaces.escNss, &escStats); + assertDocumentCounts(edcCount, escCount, ecocCount); + + // Insert new documents + values[value].toInsertCount = 5; + insertFieldValues(key, values); + edcCount += 5; + escCount += 5; + ecocCount += 5; + assertDocumentCounts(edcCount, escCount, ecocCount); + + // Compact ESC which now has non-anchors + null anchor; assert anchor is inserted with apos=1 + // Note: this tests compact where EmuBinary returns (cpos > 0, apos = null) + compactOneFieldValuePairV2(_queryImpl.get(), ecocDoc, _namespaces.escNss, &escStats); + escCount++; + assertDocumentCounts(edcCount, escCount, ecocCount); + assertESCAnchorDocument(testPair, true, 1, edcCount); +} + TEST_F(FleCompactTest, RandomESCNonAnchorDeletions) { ECStats escStats; constexpr auto key = "first"_sd; @@ -512,7 +640,7 @@ TEST_F(FleCompactTest, RandomESCNonAnchorDeletions) { // populate the ESC with 300 non-anchors values[value].toInsertCount = 300; insertFieldValues(key, values); - assertDocumentCounts(300, 300, 0, 300); + assertDocumentCounts(300, 300, 300); // read from non-empty ESC; limit to 0 tags idSet = readRandomESCNonAnchorIds(_opCtx.get(), _namespaces.escNss, 0, &escStats); @@ -528,7 +656,7 @@ TEST_F(FleCompactTest, RandomESCNonAnchorDeletions) { // delete the tags from the ESC; 30 tags at a time cleanupESCNonAnchors(_opCtx.get(), _namespaces.escNss, idSet, 30, &escStats); ASSERT_EQ(escStats.getDeleted(), deleteCount); - assertDocumentCounts(300, 300 - deleteCount, 0, 300); + assertDocumentCounts(300, 300 - deleteCount, 300); // assert the deletes are scattered // (ie. less than 150 deleted in first half of the original set of 300) @@ -544,5 +672,209 @@ TEST_F(FleCompactTest, RandomESCNonAnchorDeletions) { ASSERT_LT(counter, deleteCount); } +// Tests cleanup on an empty ESC +TEST_F(FleCompactTest, CleanupValueV2_EmptyESC) { + ECStats escStats; + auto testPair = BSON("first" + << "brian"); + auto ecocDoc = generateTestECOCDocumentV2(testPair); + assertDocumentCounts(0, 0, 0); + + // Cleanup an empty ESC; assert an error is thrown because cleanup should not be called + // if there are no ESC entries that correspond to the ECOC document. + // Note: this tests cleanup where EmuBinary returns (cpos = 0, apos = 0) + ASSERT_THROWS_CODE( + cleanupOneFieldValuePair( + _queryImpl.get(), ecocDoc, _namespaces.escNss, _namespaces.escDeletesNss, &escStats), + DBException, + 7618816); + assertDocumentCounts(0, 0, 0); +} + +// Tests case (E) in cleanup algorithm, where apos = null and (cpos = null or cpos > 0). +// No regular anchors exist during cleanup. +TEST_F(FleCompactTest, CleanupValue_NullAnchorHasLatestApos_NoAnchors) { + ECStats escStats; + constexpr auto key = "first"_sd; + const std::string value = "roger"; + auto testPair = BSON(key << value); + auto ecocDoc = generateTestECOCDocumentV2(testPair); + std::map values = {{value, {}}}; + + uint64_t edcCount = 0; + uint64_t escCount = 0; + uint64_t ecocCount = 0; + + // Insert new documents + values[value].toInsertCount = 5; + insertFieldValues(key, values); + edcCount = values[value].count; + escCount += values[value].count; + ecocCount = edcCount; + assertDocumentCounts(edcCount, escCount, ecocCount); + + // Run cleanup on empty ESC to insert the null anchor with (apos = 0, cpos = 5) + cleanupOneFieldValuePair( + _queryImpl.get(), ecocDoc, _namespaces.escNss, _namespaces.escDeletesNss, &escStats); + escCount++; + assertDocumentCounts(edcCount, escCount, ecocCount, 0); + assertESCNullAnchorDocument(testPair, true, 0 /*apos*/, 5 /*cpos*/); + + // Run cleanup again; (tests emuBinary apos = null, cpos = null) + cleanupOneFieldValuePair( + _queryImpl.get(), ecocDoc, _namespaces.escNss, _namespaces.escDeletesNss, &escStats); + assertDocumentCounts(edcCount, escCount, ecocCount, 0); + + // Assert null anchor is unchanged + assertESCNullAnchorDocument(testPair, true, 0 /*apos*/, 5 /*cpos*/); + + // Insert new documents + values[value].toInsertCount = 5; + insertFieldValues(key, values); + edcCount += 5; + escCount += 5; + ecocCount += 5; + + // Run cleanup again; (tests emuBinary apos = null, cpos > 0) + cleanupOneFieldValuePair( + _queryImpl.get(), ecocDoc, _namespaces.escNss, _namespaces.escDeletesNss, &escStats); + assertDocumentCounts(edcCount, escCount, ecocCount, 0); + + // Assert null anchor is updated with new cpos + assertESCNullAnchorDocument(testPair, true, 0 /*apos*/, 10 /*cpos*/); +} + +// Tests case (E) in cleanup algorithm, where apos = null and (cpos = null or cpos > 0). +// Regular anchors exist during cleanup. +TEST_F(FleCompactTest, CleanupValue_NullAnchorHasLatestApos_WithAnchors) { + ECStats escStats; + constexpr auto key = "first"_sd; + const std::string value = "roger"; + auto testPair = BSON(key << value); + auto ecocDoc = generateTestECOCDocumentV2(testPair); + std::map values = {{value, {}}}; + + uint64_t numAnchors, edcCount, ecocCount, escCount; + + // Run a few insert & compact cycles to populate the ESC with non-anchors & plain anchors + doInsertAndCompactCycles(key, values, true, 5 /*cycles*/, 25 /*insertsPerCycle*/); + numAnchors = 5; + edcCount = 25 * 5; + escCount = edcCount + numAnchors; + ecocCount = edcCount; + assertDocumentCounts(edcCount, escCount, ecocCount, 0); + assertESCAnchorDocument(testPair, true, numAnchors /*apos*/, edcCount /*cpos*/); + + // Run cleanup to insert the null anchor + cleanupOneFieldValuePair( + _queryImpl.get(), ecocDoc, _namespaces.escNss, _namespaces.escDeletesNss, &escStats); + escCount++; // null anchor inserted + assertDocumentCounts(edcCount, escCount, ecocCount, numAnchors); + assertESCNullAnchorDocument(testPair, true, numAnchors, edcCount); + + // Run cleanup again; (tests emuBinary apos = null, cpos = null) + cleanupOneFieldValuePair( + _queryImpl.get(), ecocDoc, _namespaces.escNss, _namespaces.escDeletesNss, &escStats); + assertDocumentCounts(edcCount, escCount, ecocCount, numAnchors); + + // Assert null anchor is unchanged + assertESCNullAnchorDocument(testPair, true, numAnchors, edcCount); + + // Insert 25 new documents + doInsertAndCompactCycles(key, values, false /*no compact*/, 1, 25); + edcCount += 25; + escCount += 25; + ecocCount = edcCount; + assertDocumentCounts(edcCount, escCount, ecocCount, numAnchors); + + // Run cleanup again; (tests emuBinary apos = null, cpos > 0) + cleanupOneFieldValuePair( + _queryImpl.get(), ecocDoc, _namespaces.escNss, _namespaces.escDeletesNss, &escStats); + assertDocumentCounts(edcCount, escCount, ecocCount, numAnchors); + + // Assert null anchor is updated with new cpos + assertESCNullAnchorDocument(testPair, true, numAnchors, edcCount); +} + +// Tests case (F) in cleanup algorithm, where apos = 0 and (cpos > 0) +TEST_F(FleCompactTest, CleanupValue_NoAnchorsExist) { + ECStats escStats; + constexpr auto key = "first"_sd; + const std::string value = "roger"; + auto testPair = BSON(key << value); + auto ecocDoc = generateTestECOCDocumentV2(testPair); + std::map values = {{value, {}}}; + + uint64_t edcCount = 0; + uint64_t escCount = 0; + uint64_t ecocCount = 0; + + // Insert new documents + values[value].toInsertCount = 5; + insertFieldValues(key, values); + edcCount = values[value].count; + escCount += edcCount; + ecocCount = edcCount; + assertDocumentCounts(edcCount, escCount, ecocCount, 0); + + // Cleanup ESC with new non-anchors; (tests emuBinary apos = 0, cpos > 0) + cleanupOneFieldValuePair( + _queryImpl.get(), ecocDoc, _namespaces.escNss, _namespaces.escDeletesNss, &escStats); + escCount++; // null anchor inserted + assertDocumentCounts(edcCount, escCount, ecocCount, 0); + + // Assert null doc is inserted with apos = 0, cpos = 5 + assertESCNullAnchorDocument(testPair, true, 0, edcCount); +} + +// Tests case (G) in cleanup algorithm, where apos > 0 and (cpos = null or cpos > 0) +TEST_F(FleCompactTest, CleanupValue_NewAnchorsExist) { + ECStats escStats; + constexpr auto key = "first"_sd; + const std::string value = "roger"; + auto testPair = BSON(key << value); + auto ecocDoc = generateTestECOCDocumentV2(testPair); + std::map values = {{value, {}}}; + + uint64_t numAnchors, edcCount, ecocCount, escCount; + uint64_t escDeletesCount = 0; + + // Run a few insert & compact cycles to populate the ESC with non-anchors & plain anchors + doInsertAndCompactCycles(key, values, true, 5 /*cycles*/, 25 /*insertsPerCycle*/); + numAnchors = 5; + edcCount = 25 * 5; + escCount = edcCount + numAnchors; + ecocCount = edcCount; + assertDocumentCounts(edcCount, escCount, ecocCount, 0); + assertESCAnchorDocument(testPair, true, numAnchors /*apos*/, edcCount /*cpos*/); + + // Run cleanup; (tests emuBinary apos > 0, cpos = null) + // Assert null doc is inserted & has the latest apos & cpos + cleanupOneFieldValuePair( + _queryImpl.get(), ecocDoc, _namespaces.escNss, _namespaces.escDeletesNss, &escStats); + escCount++; // null anchor inserted + escDeletesCount = numAnchors; + assertDocumentCounts(edcCount, escCount, ecocCount, escDeletesCount); + assertESCNullAnchorDocument(testPair, true, numAnchors, edcCount); + + // Run a few more insert & compact cycles, but don't compact on last cycle + doInsertAndCompactCycles(key, values, false /*compactOnLastCycle*/, 5, 25); + numAnchors += 4; + edcCount += (25 * 5); + escCount += (4 + 25 * 5); + ecocCount = edcCount; + assertDocumentCounts(edcCount, escCount, ecocCount, escDeletesCount); + // assert latest anchor has cpos that is 25 inserts stale. + assertESCAnchorDocument(testPair, true, numAnchors, edcCount - 25); + + // Run cleanup; (tests emuBinary apos > 0, cpos > 0) + // Assert null doc is updated with the latest apos & cpos + cleanupOneFieldValuePair( + _queryImpl.get(), ecocDoc, _namespaces.escNss, _namespaces.escDeletesNss, &escStats); + escDeletesCount = numAnchors; + assertDocumentCounts(edcCount, escCount, ecocCount, escDeletesCount); + assertESCNullAnchorDocument(testPair, true, numAnchors, edcCount); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/commands/fsync.cpp b/src/mongo/db/commands/fsync.cpp index db6dd91ff33c0..7a088b1c9cfd7 100644 --- a/src/mongo/db/commands/fsync.cpp +++ b/src/mongo/db/commands/fsync.cpp @@ -28,65 +28,64 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/commands/fsync.h" - +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include #include -#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/commands/fsync.h" #include "mongo/db/commands/fsync_locked.h" +#include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/service_context.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/storage/backup_cursor_hooks.h" #include "mongo/db/storage/storage_engine.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/stdx/condition_variable.h" #include "mongo/util/assert_util.h" -#include "mongo/util/background.h" -#include "mongo/util/exit.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - namespace mongo { -namespace { -// Ensures that only one command is operating on fsyncLock state at a time. As a 'ResourceMutex', -// lock time will be reported for a given user operation. -Lock::ResourceMutex commandMutex("fsyncCommandMutex"); +// Protects access to globalFsyncLockThread and other global fsync state. +Mutex fsyncStateMutex = MONGO_MAKE_LATCH("fsyncStateMutex"); -/** - * Maintains a global read lock while mongod is fsyncLocked. - */ -class FSyncLockThread : public BackgroundJob { -public: - FSyncLockThread(ServiceContext* serviceContext, bool allowFsyncFailure) - : BackgroundJob(false), - _serviceContext(serviceContext), - _allowFsyncFailure(allowFsyncFailure) {} +// Globally accessible FsyncLockThread to allow shutdown to coordinate with any active fsync cmds. +// Must acquire the 'fsyncStateMutex' before accessing. +std::unique_ptr globalFsyncLockThread = nullptr; - std::string name() const override { - return "FSyncLockThread"; - } +// Exposed publically via extern in fsync.h. +SimpleMutex filesLockedFsync; - void run() override; +namespace { -private: - ServiceContext* const _serviceContext; - bool _allowFsyncFailure; - static bool _shutdownTaskRegistered; -}; +// Ensures that only one command is operating on fsyncLock state at a time. As a 'ResourceMutex', +// lock time will be reported for a given user operation. +Lock::ResourceMutex fsyncSingleCommandExclusionMutex("fsyncSingleCommandExclusionMutex"); class FSyncCommand : public BasicCommand { public: @@ -99,12 +98,12 @@ class FSyncCommand : public BasicCommand { virtual ~FSyncCommand() { // The FSyncLockThread is owned by the FSyncCommand and accesses FsyncCommand state. It must // be shut down prior to FSyncCommand destruction. - stdx::unique_lock lk(lockStateMutex); + stdx::unique_lock lk(fsyncStateMutex); if (_lockCount > 0) { _lockCount = 0; releaseFsyncLockSyncCV.notify_one(); - _lockThread->wait(); - _lockThread.reset(nullptr); + globalFsyncLockThread->wait(); + globalFsyncLockThread.reset(nullptr); } } @@ -128,8 +127,8 @@ class FSyncCommand : public BasicCommand { const DatabaseName& dbName, const BSONObj& cmdObj) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::fsync)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::fsync)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } @@ -164,28 +163,28 @@ class FSyncCommand : public BasicCommand { return true; } - Lock::ExclusiveLock lk(opCtx, commandMutex); + Lock::ExclusiveLock lk(opCtx, fsyncSingleCommandExclusionMutex); const auto lockCountAtStart = getLockCount(); - invariant(lockCountAtStart > 0 || !_lockThread); + invariant(lockCountAtStart > 0 || !globalFsyncLockThread); acquireLock(); if (lockCountAtStart == 0) { Status status = Status::OK(); { - stdx::unique_lock lk(lockStateMutex); + stdx::unique_lock lk(fsyncStateMutex); threadStatus = Status::OK(); threadStarted = false; - _lockThread = std::make_unique(opCtx->getServiceContext(), - allowFsyncFailure); - _lockThread->go(); + globalFsyncLockThread = std::make_unique( + opCtx->getServiceContext(), allowFsyncFailure); + globalFsyncLockThread->go(); while (!threadStarted && threadStatus.isOK()) { acquireFsyncLockSyncCV.wait(lk); } - // 'threadStatus' must be copied while 'lockStateMutex' is held. + // 'threadStatus' must be copied while 'fsyncStateMutex' is held. status = threadStatus; } @@ -211,29 +210,39 @@ class FSyncCommand : public BasicCommand { return true; } - // Returns whether we are currently fsyncLocked. For use by callers not holding lockStateMutex. + /** + * Returns whether we are currently fsyncLocked. For use by callers not holding fsyncStateMutex. + */ bool fsyncLocked() { stdx::unique_lock lkFsyncLocked(_fsyncLockedMutex); return _fsyncLocked; } - // For callers not already holding 'lockStateMutex'. + /** + * For callers not already holding 'fsyncStateMutex'. + */ int64_t getLockCount() { - stdx::unique_lock lk(lockStateMutex); + stdx::unique_lock lk(fsyncStateMutex); return getLockCount_inLock(); } - // 'lockStateMutex' must be held when calling. + /** + * 'fsyncStateMutex' must be held when calling. + */ int64_t getLockCount_inLock() { return _lockCount; } void releaseLock() { - stdx::unique_lock lk(lockStateMutex); + stdx::unique_lock lk(fsyncStateMutex); releaseLock_inLock(lk); } - void releaseLock_inLock(stdx::unique_lock& lk) { + /** + * Returns false if the fsync lock was recursively locked. Returns true if the fysnc lock is + * released. + */ + bool releaseLock_inLock(stdx::unique_lock& lk) { invariant(_lockCount >= 1); _lockCount--; @@ -244,25 +253,26 @@ class FSyncCommand : public BasicCommand { } releaseFsyncLockSyncCV.notify_one(); lk.unlock(); - _lockThread->wait(); - _lockThread.reset(nullptr); + globalFsyncLockThread->wait(); + globalFsyncLockThread.reset(nullptr); + return true; } + return false; } // Allows for control of lock state change between the fsyncLock and fsyncUnlock commands and // the FSyncLockThread that maintains the global read lock. - Mutex lockStateMutex = MONGO_MAKE_LATCH("FSyncCommand::lockStateMutex"); stdx::condition_variable acquireFsyncLockSyncCV; stdx::condition_variable releaseFsyncLockSyncCV; - // 'lockStateMutex' must be held to modify or read. + // 'fsyncStateMutex' must be held to modify or read. Status threadStatus = Status::OK(); - // 'lockStateMutex' must be held to modify or read. + // 'fsyncStateMutex' must be held to modify or read. bool threadStarted = false; private: void acquireLock() { - stdx::unique_lock lk(lockStateMutex); + stdx::unique_lock lk(fsyncStateMutex); _lockCount++; if (_lockCount == 1) { @@ -271,10 +281,8 @@ class FSyncCommand : public BasicCommand { } } - std::unique_ptr _lockThread; - // The number of lock requests currently held. We will only release the fsyncLock when this - // number is decremented to 0. May only be accessed while 'lockStateMutex' is held. + // number is decremented to 0. May only be accessed while 'fsyncStateMutex' is held. int64_t _lockCount = 0; Mutex _fsyncLockedMutex = MONGO_MAKE_LATCH("FSyncCommand::_fsyncLockedMutex"); @@ -299,11 +307,12 @@ class FSyncUnlockCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { - bool isAuthorized = AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::unlock); + bool isAuthorized = + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::unlock); return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized"); } @@ -314,9 +323,9 @@ class FSyncUnlockCommand : public BasicCommand { BSONObjBuilder& result) override { LOGV2(20465, "command: unlock requested"); - Lock::ExclusiveLock lk(opCtx, commandMutex); + Lock::ExclusiveLock lk(opCtx, fsyncSingleCommandExclusionMutex); - stdx::unique_lock stateLock(fsyncCmd.lockStateMutex); + stdx::unique_lock stateLock(fsyncStateMutex); auto lockCount = fsyncCmd.getLockCount_inLock(); @@ -344,12 +353,26 @@ class FSyncUnlockCommand : public BasicCommand { } fsyncUnlockCmd; -bool FSyncLockThread::_shutdownTaskRegistered = false; +} // namespace + +void FSyncLockThread::shutdown(stdx::unique_lock& stateLock) { + if (fsyncCmd.getLockCount_inLock() > 0) { + LOGV2_WARNING(20469, "Interrupting fsync because the server is shutting down"); + while (!fsyncCmd.releaseLock_inLock(stateLock)) + ; + } +} void FSyncLockThread::run() { ThreadClient tc("fsyncLockWorker", _serviceContext); stdx::lock_guard lkf(filesLockedFsync); - stdx::unique_lock lk(fsyncCmd.lockStateMutex); + stdx::unique_lock stateLock(fsyncStateMutex); + + // TODO(SERVER-74657): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } invariant(fsyncCmd.getLockCount_inLock() == 1); @@ -360,24 +383,6 @@ void FSyncLockThread::run() { StorageEngine* storageEngine = _serviceContext->getStorageEngine(); - // The fsync shutdown task has to be registered once the server is running otherwise it - // conflicts with the servers shutdown task. - if (!_shutdownTaskRegistered) { - _shutdownTaskRegistered = true; - registerShutdownTask([&] { - stdx::unique_lock stateLock(fsyncCmd.lockStateMutex); - if (fsyncCmd.getLockCount_inLock() > 0) { - LOGV2_WARNING(20469, "Interrupting fsync because the server is shutting down"); - while (fsyncCmd.getLockCount_inLock()) { - // Relies on the lock to be released in 'releaseLock_inLock()' when the - // release brings the lock count to 0. - invariant(stateLock); - fsyncCmd.releaseLock_inLock(stateLock); - } - } - }); - } - try { storageEngine->flushAllFiles(&opCtx, /*callerHoldsReadLock*/ true); } catch (const std::exception& e) { @@ -395,9 +400,11 @@ void FSyncLockThread::run() { bool successfulFsyncLock = false; auto backupCursorHooks = BackupCursorHooks::get(_serviceContext); try { + // TODO SERVER-65920: Create a NamespaceString for logging with the "global" ns in + // writeConflictRetry. writeConflictRetry(&opCtx, "beginBackup", - "global", + NamespaceString("global"), [&opCtx, backupCursorHooks, &successfulFsyncLock, storageEngine] { if (backupCursorHooks->enabled()) { backupCursorHooks->fsyncLock(&opCtx); @@ -436,7 +443,7 @@ void FSyncLockThread::run() { 20471, "WARNING: instance is locked, blocking all writes. The fsync command has " "finished execution, remember to unlock the instance using fsyncUnlock()."); - fsyncCmd.releaseFsyncLockSyncCV.wait_for(lk, Seconds(60).toSystemDuration()); + fsyncCmd.releaseFsyncLockSyncCV.wait_for(stateLock, Seconds(60).toSystemDuration()); } if (successfulFsyncLock) { @@ -459,9 +466,4 @@ MONGO_INITIALIZER(fsyncLockedForWriting)(InitializerContext* context) { setLockedForWritingImpl([]() { return fsyncCmd.fsyncLocked(); }); } -} // namespace - -// Exposed publically via extern in fsync.h. -SimpleMutex filesLockedFsync; - } // namespace mongo diff --git a/src/mongo/db/commands/fsync.h b/src/mongo/db/commands/fsync.h index c091d5f04cd16..2c51a39fce055 100644 --- a/src/mongo/db/commands/fsync.h +++ b/src/mongo/db/commands/fsync.h @@ -29,13 +29,59 @@ #pragma once +#include +#include +#include + +#include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/background.h" #include "mongo/util/concurrency/mutex.h" namespace mongo { +/** + * Maintains a global read lock while mongod is fsyncLocked. + */ +class FSyncLockThread : public BackgroundJob { +public: + FSyncLockThread(ServiceContext* serviceContext, bool allowFsyncFailure) + : BackgroundJob(false), + _serviceContext(serviceContext), + _allowFsyncFailure(allowFsyncFailure) {} + + std::string name() const override { + return "FSyncLockThread"; + } + + void run() override; + + /** + * Releases the fsync lock for shutdown. + */ + void shutdown(stdx::unique_lock& lk); + +private: + ServiceContext* const _serviceContext; + bool _allowFsyncFailure; +}; + /** * Allows holders to block on an active fsyncLock. */ extern SimpleMutex filesLockedFsync; +/** + * Must be taken before accessing globalFsyncLockThread below. + */ +extern Mutex fsyncStateMutex; + +/** + * The FSyncLockThread must be external available for interruption during shutdown. + * Must lock the 'fsyncStateMutex' before accessing. + * + * TODO (SERVER-76131): consider whether this should decorate the service context. + */ +extern std::unique_ptr globalFsyncLockThread; + } // namespace mongo diff --git a/src/mongo/db/commands/fsync_locked.cpp b/src/mongo/db/commands/fsync_locked.cpp index 322506aeb5a7e..34972061f4653 100644 --- a/src/mongo/db/commands/fsync_locked.cpp +++ b/src/mongo/db/commands/fsync_locked.cpp @@ -28,6 +28,7 @@ */ #include +#include #include "mongo/db/commands/fsync_locked.h" diff --git a/src/mongo/db/commands/generic.cpp b/src/mongo/db/commands/generic.cpp index 43ff09312ad37..b8aa25f966eb1 100644 --- a/src/mongo/db/commands/generic.cpp +++ b/src/mongo/db/commands/generic.cpp @@ -28,21 +28,43 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include -#include "mongo/bson/util/bson_extract.h" -#include "mongo/bson/util/builder.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/generic_gen.h" -#include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/log_process_details.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" -#include "mongo/util/processinfo.h" - -#include -#include -#include +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -280,8 +302,9 @@ class CmdLogMessage : public TypedCommand { auto* as = AuthorizationSession::get(client); uassert(ErrorCodes::Unauthorized, "Not authorized to send custom message to log", - as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::applicationMessage)); + as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::applicationMessage)); } NamespaceString ns() const final { diff --git a/src/mongo/db/commands/generic_servers.cpp b/src/mongo/db/commands/generic_servers.cpp index a565f4924f586..73d8067ebd161 100644 --- a/src/mongo/db/commands/generic_servers.cpp +++ b/src/mongo/db/commands/generic_servers.cpp @@ -28,27 +28,53 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include -#include "mongo/bson/util/bson_extract.h" -#include "mongo/bson/util/builder.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/commands.h" #include "mongo/db/commands/generic_servers_gen.h" #include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/database_name.h" #include "mongo/db/log_process_details.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/logv2/log_util.h" #include "mongo/logv2/ramlog.h" +#include "mongo/platform/compiler.h" #include "mongo/scripting/engine.h" -#include "mongo/util/exit.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" #include "mongo/util/net/socket_utils.h" -#include "mongo/util/ntservice.h" #include "mongo/util/processinfo.h" - -#include -#include +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -111,8 +137,9 @@ void FeaturesCmd::Invocation::doCheckAuthorization(OperationContext* opCtx) cons auto* as = AuthorizationSession::get(opCtx->getClient()); uassert(ErrorCodes::Unauthorized, "Not authorized to reset machine identifier", - as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::oidReset)); + as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::oidReset)); } } template <> @@ -144,8 +171,9 @@ void HostInfoCmd::Invocation::doCheckAuthorization(OperationContext* opCtx) cons auto* as = AuthorizationSession::get(opCtx->getClient()); uassert(ErrorCodes::Unauthorized, "Not authorized to read hostInfo", - as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::hostInfo)); + as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::hostInfo)); } template <> HostInfoReply HostInfoCmd::Invocation::typedRun(OperationContext*) { @@ -189,8 +217,9 @@ void GetCmdLineOptsCmd::Invocation::doCheckAuthorization(OperationContext* opCtx auto* as = AuthorizationSession::get(opCtx->getClient()); uassert(ErrorCodes::Unauthorized, "Not authorized to read command line options", - as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::getCmdLineOpts)); + as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::getCmdLineOpts)); } template <> GetCmdLineOptsReply GetCmdLineOptsCmd::Invocation::typedRun(OperationContext*) { @@ -208,8 +237,9 @@ void LogRotateCmd::Invocation::doCheckAuthorization(OperationContext* opCtx) con auto* as = AuthorizationSession::get(opCtx->getClient()); uassert(ErrorCodes::Unauthorized, "Not authorized to rotate logs", - as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::logRotate)); + as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::logRotate)); } template <> OkReply LogRotateCmd::Invocation::typedRun(OperationContext* opCtx) { @@ -262,11 +292,11 @@ class GetLogCmd : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const final { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::getLog)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::getLog)) { return {ErrorCodes::Unauthorized, "Not authorized to get log"}; } return Status::OK(); diff --git a/src/mongo/db/commands/get_cluster_parameter_command.cpp b/src/mongo/db/commands/get_cluster_parameter_command.cpp index 2e11ef26363a4..80da6a60676fb 100644 --- a/src/mongo/db/commands/get_cluster_parameter_command.cpp +++ b/src/mongo/db/commands/get_cluster_parameter_command.cpp @@ -28,17 +28,30 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/audit.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/cluster_server_parameter_cmds_gen.h" #include "mongo/db/commands/get_cluster_parameter_invocation.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/server_feature_flags_gen.h" -#include "mongo/idl/cluster_server_parameter_gen.h" -#include "mongo/logv2/log.h" +#include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -92,7 +105,8 @@ class GetClusterParameterCommand final : public TypedCommandisAuthorizedForPrivilege(Privilege{ - ResourcePattern::forClusterResource(), ActionType::getClusterParameter})); + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::getClusterParameter})); } NamespaceString ns() const override { diff --git a/src/mongo/db/commands/get_cluster_parameter_invocation.cpp b/src/mongo/db/commands/get_cluster_parameter_invocation.cpp index 7d827971f00d4..436eab955ac1c 100644 --- a/src/mongo/db/commands/get_cluster_parameter_invocation.cpp +++ b/src/mongo/db/commands/get_cluster_parameter_invocation.cpp @@ -28,14 +28,27 @@ */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/audit.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands/get_cluster_parameter_invocation.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/idl/cluster_server_parameter_gen.h" +#include "mongo/db/server_options.h" #include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -134,77 +147,4 @@ GetClusterParameterInvocation::Reply GetClusterParameterInvocation::getCachedPar return Reply(parameterValues); } -GetClusterParameterInvocation::Reply GetClusterParameterInvocation::getDurableParameters( - OperationContext* opCtx, const GetClusterParameter& request) { - auto configServers = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - - // Create the query document such that all documents in config.clusterParmeters with _id - // in the requested list of ServerParameters are returned. - const CmdBody& cmdBody = request.getCommandParameter(); - ServerParameterSet* clusterParameters = ServerParameterSet::getClusterParameterSet(); - - BSONObjBuilder queryDocBuilder; - BSONObjBuilder inObjBuilder = queryDocBuilder.subobjStart("_id"_sd); - BSONArrayBuilder parameterNameBuilder = inObjBuilder.subarrayStart("$in"_sd); - - auto [requestedParameterNames, parameterValues] = retrieveRequestedParameters( - opCtx, cmdBody, request.getDbName().tenantId(), false /* excludeClusterParameterTime */); - - for (const auto& parameterValue : parameterValues) { - parameterNameBuilder.append(parameterValue["_id"_sd].String()); - } - - parameterNameBuilder.doneFast(); - inObjBuilder.doneFast(); - - // Perform the majority read on the config server primary. - BSONObj query = queryDocBuilder.obj(); - LOGV2_DEBUG(6226101, 2, "Querying config servers for cluster parameters", "query"_attr = query); - auto findResponse = uassertStatusOK(configServers->exhaustiveFindOnConfig( - opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - repl::ReadConcernLevel::kMajorityReadConcern, - NamespaceString::makeClusterParametersNSS(request.getDbName().tenantId()), - query, - BSONObj(), - boost::none)); - - // Any parameters that are not included in the response don't have a cluster parameter - // document yet, which means they still are using the default value. - std::vector retrievedParameters = std::move(findResponse.docs); - if (retrievedParameters.size() < requestedParameterNames.size()) { - std::vector onDiskParameterNames; - onDiskParameterNames.reserve(retrievedParameters.size()); - std::transform( - retrievedParameters.begin(), - retrievedParameters.end(), - std::back_inserter(onDiskParameterNames), - [&](const auto& onDiskParameter) { return onDiskParameter["_id"_sd].String(); }); - - // Sort and find the set difference of the requested parameters and the parameters - // returned. - std::vector defaultParameterNames; - - defaultParameterNames.reserve(requestedParameterNames.size() - onDiskParameterNames.size()); - - std::sort(onDiskParameterNames.begin(), onDiskParameterNames.end()); - std::sort(requestedParameterNames.begin(), requestedParameterNames.end()); - std::set_difference(requestedParameterNames.begin(), - requestedParameterNames.end(), - onDiskParameterNames.begin(), - onDiskParameterNames.end(), - std::back_inserter(defaultParameterNames)); - - for (const auto& defaultParameterName : defaultParameterNames) { - auto defaultParameter = clusterParameters->get(defaultParameterName); - BSONObjBuilder bob; - defaultParameter->append( - opCtx, &bob, defaultParameterName, request.getDbName().tenantId()); - retrievedParameters.push_back(bob.obj()); - } - } - - return Reply(retrievedParameters); -} - } // namespace mongo diff --git a/src/mongo/db/commands/get_cluster_parameter_invocation.h b/src/mongo/db/commands/get_cluster_parameter_invocation.h index 374341994bad2..ac02282ca8fa4 100644 --- a/src/mongo/db/commands/get_cluster_parameter_invocation.h +++ b/src/mongo/db/commands/get_cluster_parameter_invocation.h @@ -29,8 +29,17 @@ #pragma once +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands/cluster_server_parameter_cmds_gen.h" #include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" namespace mongo { @@ -43,14 +52,9 @@ class GetClusterParameterInvocation { GetClusterParameterInvocation() = default; - // Retrieves in-memory parameters. Used by mongod getClusterParameter and mongoses - // with featureFlagClusterWideConfigM2 enabled. + // Retrieves in-memory parameters. Reply getCachedParameters(OperationContext* opCtx, const GetClusterParameter& request); - // Retrieves durable cluster server parameters from config server. Used by mongoses with - // featureFlagClusterWideConfigM2 disabled. - Reply getDurableParameters(OperationContext* opCtx, const GetClusterParameter& request); - private: // Parses the command body and retrieves the BSON representation and names of the requested // cluster parameters for the given tenant. diff --git a/src/mongo/db/commands/getmore_cmd.cpp b/src/mongo/db/commands/getmore_cmd.cpp index 8b2f980dd9835..54a4e9eae58e9 100644 --- a/src/mongo/db/commands/getmore_cmd.cpp +++ b/src/mongo/db/commands/getmore_cmd.cpp @@ -27,44 +27,101 @@ * it in the license file. */ +#include +#include +#include #include +#include +#include +#include #include - -#include "mongo/bson/util/bson_extract.h" +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/read_preference.h" #include "mongo/db/api_parameters.h" #include "mongo/db/auth/authorization_checks.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/collection.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/external_data_source_scope_guard.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/clientcursor.h" #include "mongo/db/commands.h" +#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" #include "mongo/db/curop_failpoint_helpers.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/cursor_manager.h" #include "mongo/db/db_raii.h" -#include "mongo/db/exec/working_set_common.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/change_stream_invalidation_info.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/cursor_response_gen.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/find.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/find_common.h" #include "mongo/db/query/getmore_command_gen.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_explainer.h" #include "mongo/db/query/plan_summary_stats.h" #include "mongo/db/read_concern.h" -#include "mongo/db/repl/oplog.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/speculative_majority_read_info.h" +#include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/stats/counters.h" #include "mongo/db/stats/resource_consumption_metrics.h" #include "mongo/db/stats/top.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" #include "mongo/rpc/rewrite_state_change_errors.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" #include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -164,13 +221,14 @@ void validateAuthorization(const OperationContext* opCtx, const ClientCursor& cu */ void validateNamespace(const NamespaceString& commandNss, const ClientCursor& cursor) { uassert(ErrorCodes::Unauthorized, - str::stream() << "Requested getMore on namespace '" << commandNss.ns() - << "', but cursor belongs to a different namespace " << cursor.nss().ns(), + str::stream() << "Requested getMore on namespace '" << commandNss.toStringForErrorMsg() + << "', but cursor belongs to a different namespace " + << cursor.nss().toStringForErrorMsg(), commandNss == cursor.nss()); if (commandNss.isOplog() && MONGO_unlikely(rsStopGetMoreCmd.shouldFail())) { uasserted(ErrorCodes::CommandFailed, - str::stream() << "getMore on " << commandNss.ns() + str::stream() << "getMore on " << commandNss.toStringForErrorMsg() << " rejected due to active fail point rsStopGetMoreCmd"); } } @@ -325,7 +383,7 @@ class GetMoreCmd final : public Command { NamespaceString nss(NamespaceStringUtil::parseNamespaceFromRequest( _cmd.getDbName(), _cmd.getCollection())); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid namespace for getMore: " << nss.ns(), + str::stream() << "Invalid namespace for getMore: " << nss.toStringForErrorMsg(), nss.isValid()); } @@ -433,7 +491,6 @@ class GetMoreCmd final : public Command { auto&& [stats, _] = explainer.getWinningPlanStats(ExplainOptions::Verbosity::kExecStats); LOGV2_WARNING(20478, - "getMore command executor error: {error}, stats: {stats}, cmd: {cmd}", "getMore command executor error", "error"_attr = exception.toStatus(), "stats"_attr = redact(stats), @@ -587,6 +644,12 @@ class GetMoreCmd final : public Command { curOp->setGenericCursor_inlock(cursorPin->toGenericCursor()); } + // If this is a change stream cursor, check whether the tenant has migrated elsewhere. + if (cursorPin->getExecutor()->getPostBatchResumeToken()["_data"]) { + tenant_migration_access_blocker::assertCanGetMoreChangeStream(opCtx, + _cmd.getDbName()); + } + // If the 'failGetMoreAfterCursorCheckout' failpoint is enabled, throw an exception with // the given 'errorCode' value, or ErrorCodes::InternalError if 'errorCode' is omitted. failGetMoreAfterCursorCheckout.executeIf( @@ -687,15 +750,17 @@ class GetMoreCmd final : public Command { curOp->debug().cursorExhausted = true; } - nextBatch.done(respondWithId, nss); + nextBatch.done(respondWithId, + nss, + SerializationContext::stateCommandReply(_cmd.getSerializationContext())); // Increment this metric once we have generated a response and we know it will return // documents. auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx); metricsCollector.incrementDocUnitsReturned(curOp->getNS(), docUnitsReturned); curOp->debug().additiveMetrics.nBatches = 1; - - collectTelemetryMongod(opCtx, cursorPin, numResults); + curOp->setEndOfOpMetrics(numResults); + collectQueryStatsMongod(opCtx, cursorPin); if (respondWithId) { cursorDeleter.dismiss(); @@ -789,9 +854,14 @@ class GetMoreCmd final : public Command { void validateResult(rpc::ReplyBuilderInterface* reply, boost::optional tenantId) { auto ret = reply->getBodyBuilder().asTempObj(); - CursorGetMoreReply::parse( - IDLParserContext{"CursorGetMoreReply", false /* apiStrict */, tenantId}, - ret.removeField("ok")); + + // We need to copy the serialization context from the request to the reply object + CursorGetMoreReply::parse(IDLParserContext("CursorGetMoreReply", + false /* apiStrict */, + tenantId, + SerializationContext::stateCommandReply( + _cmd.getSerializationContext())), + ret.removeField("ok")); } const GetMoreCommandRequest _cmd; diff --git a/src/mongo/db/commands/hashcmd.cpp b/src/mongo/db/commands/hashcmd.cpp index 7caee18486362..bf26a4ae96231 100644 --- a/src/mongo/db/commands/hashcmd.cpp +++ b/src/mongo/db/commands/hashcmd.cpp @@ -31,18 +31,20 @@ * Defines a shell command for hashing a BSONElement value */ +#include #include -#include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/status.h" -#include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/database_name.h" #include "mongo/db/hasher.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/commands/http_client.cpp b/src/mongo/db/commands/http_client.cpp index fe67aa7b88972..18ce882deb55e 100644 --- a/src/mongo/db/commands/http_client.cpp +++ b/src/mongo/db/commands/http_client.cpp @@ -27,15 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/base/init.h" -#include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" +#include + +#include "mongo/base/data_builder.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" #include "mongo/db/commands.h" #include "mongo/db/commands/http_client_gen.h" -#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/net/http_client.h" diff --git a/src/mongo/db/commands/index_filter_commands.cpp b/src/mongo/db/commands/index_filter_commands.cpp index 846d517e0e803..6969efc99a7a5 100644 --- a/src/mongo/db/commands/index_filter_commands.cpp +++ b/src/mongo/db/commands/index_filter_commands.cpp @@ -28,32 +28,52 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include #include #include +#include #include -#include "mongo/base/init.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj_comparator_interface.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database.h" -#include "mongo/db/client.h" #include "mongo/db/commands/index_filter_commands.h" #include "mongo/db/commands/plan_cache_commands.h" #include "mongo/db/db_raii.h" -#include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_real.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query_encoder.h" #include "mongo/db/query/collection_query_info.h" -#include "mongo/db/query/plan_cache_key_factory.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/query_settings_decoration.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/stdx/type_traits.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -221,7 +241,7 @@ Status ClearFilters::clear(OperationContext* opCtx, // - clear index filters for single query shape when a query shape is described in the // command arguments. if (cmdObj.hasField("query")) { - auto statusWithCQ = plan_cache_commands::canonicalize(opCtx, collection->ns().ns(), cmdObj); + auto statusWithCQ = plan_cache_commands::canonicalize(opCtx, collection->ns(), cmdObj); if (!statusWithCQ.isOK()) { return statusWithCQ.getStatus(); } @@ -364,7 +384,7 @@ Status SetFilter::set(OperationContext* opCtx, } } - auto statusWithCQ = plan_cache_commands::canonicalize(opCtx, collection->ns().ns(), cmdObj); + auto statusWithCQ = plan_cache_commands::canonicalize(opCtx, collection->ns(), cmdObj); if (!statusWithCQ.isOK()) { return statusWithCQ.getStatus(); } diff --git a/src/mongo/db/commands/index_filter_commands.h b/src/mongo/db/commands/index_filter_commands.h index 5f7561af73c96..c99bc60acc64c 100644 --- a/src/mongo/db/commands/index_filter_commands.h +++ b/src/mongo/db/commands/index_filter_commands.h @@ -29,10 +29,19 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/query_settings.h" #include "mongo/db/query/sbe_plan_cache.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/commands/index_filter_commands_test.cpp b/src/mongo/db/commands/index_filter_commands_test.cpp index 3ca9255094c1c..09eaea5047e93 100644 --- a/src/mongo/db/commands/index_filter_commands_test.cpp +++ b/src/mongo/db/commands/index_filter_commands_test.cpp @@ -31,33 +31,48 @@ * This file contains tests for mongo/db/commands/index_filter_commands.h */ -#include "mongo/db/commands/index_filter_commands.h" - +#include +#include +#include +#include +#include #include - +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" #include "mongo/db/catalog/collection_mock.h" +#include "mongo/db/commands/index_filter_commands.h" #include "mongo/db/exec/plan_cache_util.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" #include "mongo/db/exec/sbe/stages/co_scan.h" -#include "mongo/db/json.h" -#include "mongo/db/operation_context_noop.h" -#include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/plan_cache.h" +#include "mongo/db/query/plan_cache_callbacks.h" +#include "mongo/db/query/plan_cache_debug_info.h" #include "mongo/db/query/plan_cache_key_factory.h" -#include "mongo/db/query/plan_ranker.h" +#include "mongo/db/query/plan_ranking_decision.h" #include "mongo/db/query/query_solution.h" #include "mongo/db/query/query_test_service_context.h" #include "mongo/db/query/sbe_plan_cache.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" - -using namespace mongo; - +#include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source.h" + +namespace mongo { namespace { -using std::string; -using std::unique_ptr; -using std::vector; - class IndexFilterCommandsTest : public unittest::Test { protected: void setUp() override { @@ -205,14 +220,14 @@ class IndexFilterCommandsTest : public unittest::Test { /** * Utility function to get list of index filters from the query settings. */ - vector getFilters() { + std::vector getFilters() { BSONObjBuilder bob; ASSERT_OK(ListFilters::list(_querySettings, &bob)); BSONObj resultObj = bob.obj(); BSONElement filtersElt = resultObj.getField("filters"); ASSERT_EQUALS(filtersElt.type(), mongo::Array); - vector filtersEltArray = filtersElt.Array(); - vector filters; + std::vector filtersEltArray = filtersElt.Array(); + std::vector filters; for (auto&& elt : filtersEltArray) { ASSERT_TRUE(elt.isABSONObj()); BSONObj obj = elt.Obj(); @@ -317,7 +332,9 @@ class IndexFilterCommandsTest : public unittest::Test { // matter to the tests. auto cacheData = std::make_unique( std::make_unique(PlanNodeId{}), - stage_builder::PlanStageData{std::make_unique()}); + stage_builder::PlanStageData( + stage_builder::PlanStageEnvironment(std::make_unique()), + std::make_unique())); auto decision = createDecision(1U); auto querySolution = std::make_unique(); @@ -360,7 +377,7 @@ const NamespaceString IndexFilterCommandsTest::_nss( */ TEST_F(IndexFilterCommandsTest, ListFiltersEmpty) { - vector filters = getFilters(); + std::vector filters = getFilters(); ASSERT_TRUE(filters.empty()); } @@ -388,7 +405,7 @@ TEST_F(IndexFilterCommandsTest, ClearNonexistentIndexFilter) { std::string clearCmdObject{"{query: {b: 1}}"}; ASSERT_OK(setIndexFilter(setCmdObject)); - vector filters = getFilters(); + std::vector filters = getFilters(); ASSERT_EQUALS(filters.size(), 1U); // Clear nonexistent index filter. @@ -440,7 +457,7 @@ TEST_F(IndexFilterCommandsTest, SetAndClearFilters) { indexes: [{a: 1}]})"); size_t expectedNumFilters = 1; - vector filters = getFilters(); + std::vector filters = getFilters(); ASSERT_EQ(expectedNumFilters, filters.size()); // Query shape should not exist in plan cache after index filter is updated. @@ -518,7 +535,7 @@ TEST_F(IndexFilterCommandsTest, SetAndClearFiltersCollation) { collation: {locale: 'mock_reverse_string'}, indexes: [{a: 1}]})"); - vector filters = getFilters(); + std::vector filters = getFilters(); ASSERT_EQUALS(filters.size(), 1U); ASSERT_BSONOBJ_EQ(filters[0].getObjectField("query"), fromjson("{a: 'foo'}")); ASSERT_BSONOBJ_EQ(filters[0].getObjectField("sort"), fromjson("{}")); @@ -571,4 +588,6 @@ TEST_F(IndexFilterCommandsTest, SetFilterAcceptsIndexNames) { ASSERT_BSONOBJ_EQ(indexes[0].embeddedObject(), fromjson("{a: 1}")); ASSERT_EQUALS(indexes[1].valueStringData(), "a_1:rev"); } + } // namespace +} // namespace mongo diff --git a/src/mongo/db/commands/internal_rename_if_options_and_indexes_match_cmd.cpp b/src/mongo/db/commands/internal_rename_if_options_and_indexes_match_cmd.cpp index 4a12c44304f5d..6e2fbae24c1f0 100644 --- a/src/mongo/db/commands/internal_rename_if_options_and_indexes_match_cmd.cpp +++ b/src/mongo/db/commands/internal_rename_if_options_and_indexes_match_cmd.cpp @@ -27,19 +27,43 @@ * it in the license file. */ +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/rename_collection.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" #include "mongo/db/commands/internal_rename_if_options_and_indexes_match_gen.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/db_raii.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/ddl_lock_manager.h" +#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" namespace mongo { namespace { -MONGO_FAIL_POINT_DEFINE(blockBeforeInternalRenameIfOptionsAndIndexesMatch); +MONGO_FAIL_POINT_DEFINE(blockBeforeInternalRenameAndBeforeTakingDDLLocks); +MONGO_FAIL_POINT_DEFINE(blockBeforeInternalRenameAndAfterTakingDDLLocks); bool isCollectionSharded(OperationContext* opCtx, const NamespaceString& nss) { AutoGetCollectionForRead lock(opCtx, nss); @@ -70,29 +94,41 @@ class InternalRenameIfOptionsAndIndexesMatchCmd final std::list(originalIndexes.begin(), originalIndexes.end()); const auto& collectionOptions = thisRequest.getCollectionOptions(); - if (serverGlobalParams.clusterRole.has(ClusterRole::None) || - serverGlobalParams.clusterRole.exclusivelyHasConfigRole()) { + if (MONGO_unlikely(blockBeforeInternalRenameAndBeforeTakingDDLLocks.shouldFail())) { + blockBeforeInternalRenameAndBeforeTakingDDLLocks.pauseWhileSet(); + } + + if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { // No need to acquire additional locks in a non-sharded environment _internalRun(opCtx, fromNss, toNss, indexList, collectionOptions); return; } - // Check if the receiving shard is still the primary for the database - DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, fromNss.db()); - - // Acquiring the local part of the distributed locks for involved namespaces allows: - // - Serialize with sharded DDLs, ensuring no concurrent modifications of the - // collections. - // - Check safely if the target collection is sharded or not. + /** + * Acquiring the local part of the distributed locks for involved namespaces allows: + * - Serialize with sharded DDLs, ensuring no concurrent modifications of the + * collections. + * - Check safely if the target collection is sharded or not. + * - Check if the current shard is still the primary for the database. + */ static constexpr StringData lockReason{"internalRenameCollection"_sd}; - auto ddlLockManager = DDLLockManager::get(opCtx); - auto fromCollDDLLock = ddlLockManager->lock( - opCtx, fromNss.ns(), lockReason, DDLLockManager::kDefaultLockTimeout); - auto toCollDDLLock = ddlLockManager->lock( - opCtx, toNss.ns(), lockReason, DDLLockManager::kDefaultLockTimeout); + const DDLLockManager::ScopedCollectionDDLLock fromCollDDLLock{ + opCtx, fromNss, lockReason, MODE_X, DDLLockManager::kDefaultLockTimeout}; + + // If we are renaming a buckets collection in the $out stage, we must acquire a lock on + // the view namespace, instead of the buckets namespace. This lock avoids concurrent + // modifications, since users run operations on the view and not the buckets namespace + // and all time-series DDL operations take a lock on the view namespace. + const DDLLockManager::ScopedCollectionDDLLock toCollDDLLock{ + opCtx, + fromNss.isOutTmpBucketsCollection() ? toNss.getTimeseriesViewNamespace() : toNss, + lockReason, + MODE_X, + DDLLockManager::kDefaultLockTimeout}; uassert(ErrorCodes::IllegalOperation, - str::stream() << "cannot rename to sharded collection '" << toNss << "'", + str::stream() << "cannot rename to sharded collection '" + << toNss.toStringForErrorMsg() << "'", !isCollectionSharded(opCtx, toNss)); _internalRun(opCtx, fromNss, toNss, indexList, collectionOptions); @@ -104,15 +140,14 @@ class InternalRenameIfOptionsAndIndexesMatchCmd final const NamespaceString& toNss, const std::list& indexList, const BSONObj& collectionOptions) { - if (MONGO_unlikely(blockBeforeInternalRenameIfOptionsAndIndexesMatch.shouldFail())) { - blockBeforeInternalRenameIfOptionsAndIndexesMatch.pauseWhileSet(); + if (MONGO_unlikely(blockBeforeInternalRenameAndAfterTakingDDLLocks.shouldFail())) { + blockBeforeInternalRenameAndAfterTakingDDLLocks.pauseWhileSet(); } - RenameCollectionOptions options; options.dropTarget = true; options.stayTemp = false; doLocalRenameIfOptionsAndIndexesHaveNotChanged( - opCtx, fromNss, toNss, options, std::move(indexList), collectionOptions); + opCtx, fromNss, toNss, options, indexList, collectionOptions); } NamespaceString ns() const override { @@ -127,17 +162,17 @@ class InternalRenameIfOptionsAndIndexesMatchCmd final auto from = thisRequest.getFrom(); auto to = thisRequest.getTo(); uassert(ErrorCodes::Unauthorized, - str::stream() << "Unauthorized to rename " << from, + str::stream() << "Unauthorized to rename " << from.toStringForErrorMsg(), AuthorizationSession::get(opCtx->getClient()) ->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(from), ActionType::renameCollection)); uassert(ErrorCodes::Unauthorized, - str::stream() << "Unauthorized to drop " << to, + str::stream() << "Unauthorized to drop " << to.toStringForErrorMsg(), AuthorizationSession::get(opCtx->getClient()) ->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(to), ActionType::dropCollection)); uassert(ErrorCodes::Unauthorized, - str::stream() << "Unauthorized to insert into " << to, + str::stream() << "Unauthorized to insert into " << to.toStringForErrorMsg(), AuthorizationSession::get(opCtx->getClient()) ->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(to), ActionType::insert)); diff --git a/src/mongo/db/commands/internal_transactions_test_command_d.cpp b/src/mongo/db/commands/internal_transactions_test_command_d.cpp index 6f7a4ad8babec..77b0853933b24 100644 --- a/src/mongo/db/commands/internal_transactions_test_command_d.cpp +++ b/src/mongo/db/commands/internal_transactions_test_command_d.cpp @@ -27,8 +27,22 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/cluster_transaction_api.h" +#include "mongo/db/commands.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/resource_yielder.h" +#include "mongo/db/transaction/transaction_api.h" #include "mongo/db/transaction/transaction_participant_resource_yielder.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/executor/task_executor.h" #include "mongo/s/commands/internal_transactions_test_command.h" namespace mongo { @@ -43,12 +57,12 @@ class InternalTransactionsTestCommandD StringData commandName, bool useClusterClient) { auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); // If a sharded mongod is acting as a mongos, it will need special routing behaviors. if (useClusterClient) { + auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); return txn_api::SyncTransactionWithRetries( opCtx, - sleepInlineExecutor, + executor, TransactionParticipantResourceYielder::make(commandName), inlineExecutor, std::make_unique( @@ -61,7 +75,7 @@ class InternalTransactionsTestCommandD return txn_api::SyncTransactionWithRetries( opCtx, - sleepInlineExecutor, + executor, TransactionParticipantResourceYielder::make(commandName), inlineExecutor); } diff --git a/src/mongo/db/commands/isself.cpp b/src/mongo/db/commands/isself.cpp index 04c10b43f184c..a5626c332e23a 100644 --- a/src/mongo/db/commands/isself.cpp +++ b/src/mongo/db/commands/isself.cpp @@ -27,11 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/isself.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp b/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp index 9c29fb6a372eb..76adc77d3cc8e 100644 --- a/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp +++ b/src/mongo/db/commands/kill_all_sessions_by_pattern_command.cpp @@ -27,25 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/base/init.h" -#include "mongo/db/auth/action_set.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/privilege.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/kill_sessions.h" #include "mongo/db/session/kill_sessions_common.h" -#include "mongo/db/session/kill_sessions_local.h" -#include "mongo/db/session/logical_session_cache.h" -#include "mongo/db/session/logical_session_id.h" -#include "mongo/db/session/logical_session_id_helpers.h" -#include "mongo/db/stats/top.h" +#include "mongo/db/session/kill_sessions_gen.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { @@ -69,11 +78,12 @@ class KillAllSessionsByPatternCommand final : public BasicCommand { return "kill logical sessions by pattern"; } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient()); if (!authSession->isAuthorizedForPrivilege( - Privilege{ResourcePattern::forClusterResource(), ActionType::killAnySession})) { + Privilege{ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::killAnySession})) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } @@ -88,7 +98,7 @@ class KillAllSessionsByPatternCommand final : public BasicCommand { } virtual bool run(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj& cmdObj, BSONObjBuilder& result) override { IDLParserContext ctx("KillAllSessionsByPatternCmd"); @@ -106,7 +116,8 @@ class KillAllSessionsByPatternCommand final : public BasicCommand { auto authSession = AuthorizationSession::get(opCtx->getClient()); if (!authSession->isAuthorizedForPrivilege( - Privilege(ResourcePattern::forClusterResource(), ActionType::impersonate))) { + Privilege(ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::impersonate))) { for (const auto& pattern : ksc.getKillAllSessionsByPattern()) { if (pattern.getUsers() || pattern.getRoles()) { diff --git a/src/mongo/db/commands/kill_all_sessions_command.cpp b/src/mongo/db/commands/kill_all_sessions_command.cpp index e7854c13d89f8..fea3fc8823d05 100644 --- a/src/mongo/db/commands/kill_all_sessions_command.cpp +++ b/src/mongo/db/commands/kill_all_sessions_command.cpp @@ -27,25 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/base/init.h" -#include "mongo/db/auth/action_set.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/privilege.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/kill_sessions.h" #include "mongo/db/session/kill_sessions_common.h" -#include "mongo/db/session/kill_sessions_local.h" -#include "mongo/db/session/logical_session_cache.h" -#include "mongo/db/session/logical_session_id.h" -#include "mongo/db/session/logical_session_id_helpers.h" -#include "mongo/db/stats/top.h" +#include "mongo/db/session/kill_sessions_gen.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -69,11 +75,12 @@ class KillAllSessionsCommand final : public BasicCommand { return "kill all logical sessions, for a user, and their operations"; } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient()); if (!authSession->isAuthorizedForPrivilege( - Privilege{ResourcePattern::forClusterResource(), ActionType::killAnySession})) { + Privilege{ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::killAnySession})) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } diff --git a/src/mongo/db/commands/kill_op.cpp b/src/mongo/db/commands/kill_op.cpp index b31f45242ffc2..26803d5f76a3d 100644 --- a/src/mongo/db/commands/kill_op.cpp +++ b/src/mongo/db/commands/kill_op.cpp @@ -28,22 +28,15 @@ */ -#include "mongo/platform/basic.h" - -#include - -#include "mongo/base/init.h" -#include "mongo/base/status.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/db/audit.h" -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" -#include "mongo/db/commands.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/kill_op_cmd_base.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" -#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" -#include "mongo/util/str.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/db/commands/kill_op_cmd_base.cpp b/src/mongo/db/commands/kill_op_cmd_base.cpp index ebae3fcbc0a92..343cf8cf38392 100644 --- a/src/mongo/db/commands/kill_op_cmd_base.cpp +++ b/src/mongo/db/commands/kill_op_cmd_base.cpp @@ -26,17 +26,26 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/db/auth/authorization_session.h" - -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/commands/kill_op_cmd_base.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/auth/authentication_session.h" +#include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/client.h" +#include "mongo/db/commands/kill_op_cmd_base.h" #include "mongo/db/operation_killer.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/db/commands/kill_op_cmd_base.h b/src/mongo/db/commands/kill_op_cmd_base.h index 8a48d623ff65e..11fca4263f1f3 100644 --- a/src/mongo/db/commands/kill_op_cmd_base.h +++ b/src/mongo/db/commands/kill_op_cmd_base.h @@ -27,8 +27,15 @@ * it in the license file. */ +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" +#include "mongo/db/operation_id.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/commands/kill_sessions_command.cpp b/src/mongo/db/commands/kill_sessions_command.cpp index 8cfd99a7d9056..443305282884a 100644 --- a/src/mongo/db/commands/kill_sessions_command.cpp +++ b/src/mongo/db/commands/kill_sessions_command.cpp @@ -27,25 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/base/init.h" -#include "mongo/db/auth/action_set.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/crypto/sha256_block.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/user.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/kill_sessions.h" #include "mongo/db/session/kill_sessions_common.h" -#include "mongo/db/session/kill_sessions_local.h" -#include "mongo/db/session/logical_session_cache.h" -#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/kill_sessions_gen.h" #include "mongo/db/session/logical_session_id_helpers.h" -#include "mongo/db/stats/top.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/read_through_cache.h" namespace mongo { @@ -108,7 +121,7 @@ class KillSessionsCommand final : public BasicCommand { } virtual bool run(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj& cmdObj, BSONObjBuilder& result) override { IDLParserContext ctx("KillSessionsCmd"); @@ -122,7 +135,8 @@ class KillSessionsCommand final : public BasicCommand { auto lsids = makeLogicalSessionIds( ksc.getKillSessions(), opCtx, - {Privilege{ResourcePattern::forClusterResource(), ActionType::killAnySession}}); + {Privilege{ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::killAnySession}}); patterns.reserve(lsids.size()); for (const auto& lsid : lsids) { diff --git a/src/mongo/db/commands/killcursors_cmd.cpp b/src/mongo/db/commands/killcursors_cmd.cpp index c63b3e2fad93f..ac92550305707 100644 --- a/src/mongo/db/commands/killcursors_cmd.cpp +++ b/src/mongo/db/commands/killcursors_cmd.cpp @@ -27,12 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include -#include "mongo/db/auth/authorization_session.h" +#include "mongo/base/status.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/commands.h" #include "mongo/db/commands/killcursors_common.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/cursor_manager.h" #include "mongo/db/db_raii.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/kill_cursors_gen.h" #include "mongo/db/stats/top.h" diff --git a/src/mongo/db/commands/killoperations_cmd.cpp b/src/mongo/db/commands/killoperations_cmd.cpp index 7be0ad4fe51c0..855388bf66022 100644 --- a/src/mongo/db/commands/killoperations_cmd.cpp +++ b/src/mongo/db/commands/killoperations_cmd.cpp @@ -27,9 +27,23 @@ * it in the license file. */ +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/db/commands.h" #include "mongo/db/commands/killoperations_common.h" #include "mongo/db/cursor_manager.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/stdx/unordered_set.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/db/commands/killoperations_common.h b/src/mongo/db/commands/killoperations_common.h index c4542e8485790..187f2c4ba0955 100644 --- a/src/mongo/db/commands/killoperations_common.h +++ b/src/mongo/db/commands/killoperations_common.h @@ -80,7 +80,8 @@ class KillOperationsCmdBase : public TypedCommand { void doCheckAuthorization(OperationContext* opCtx) const override { auto client = opCtx->getClient(); auto isInternal = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::internal); + ResourcePattern::forClusterResource(Base::request().getDbName().tenantId()), + ActionType::internal); if (!getTestCommandsEnabled() && !isInternal) { // Either the mongod/mongos must be in testing mode or this command must come from // an internal user diff --git a/src/mongo/db/commands/list_collections.cpp b/src/mongo/db/commands/list_collections.cpp index 4d192815e277f..c5daf09fc7439 100644 --- a/src/mongo/db/commands/list_collections.cpp +++ b/src/mongo/db/commands/list_collections.cpp @@ -28,44 +28,82 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include #include +#include +#include +#include #include +#include +#include +#include +#include +#include + #include "mongo/base/checked_cast.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_catalog_helper.h" -#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/clientcursor.h" #include "mongo/db/commands.h" #include "mongo/db/commands/list_collections_filter.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/curop_failpoint_helpers.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/cursor_manager.h" #include "mongo/db/db_raii.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/queued_data_stage.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/list_collections_gen.h" -#include "mongo/db/multitenancy.h" -#include "mongo/db/query/cursor_request.h" -#include "mongo/db/query/cursor_response.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/find_common.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/service_context.h" -#include "mongo/db/storage/storage_engine.h" -#include "mongo/db/storage/storage_options.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/views/view.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -255,9 +293,12 @@ BSONObj buildCollectionBson(OperationContext* opCtx, ListCollectionsReply createListCollectionsCursorReply( CursorId cursorId, const NamespaceString& cursorNss, + const SerializationContext& respSerializationContext, std::vector&& firstBatch) { return ListCollectionsReply( - ListCollectionsReplyCursor(cursorId, cursorNss, std::move(firstBatch))); + ListCollectionsReplyCursor( + cursorId, cursorNss, std::move(firstBatch), respSerializationContext), + respSerializationContext); } class CmdListCollections : public ListCollectionsCmdVersion1Gen { @@ -295,11 +336,7 @@ class CmdListCollections : public ListCollectionsCmdVersion1GengetClient()); - - auto dbName = request().getDbName(); - auto cmdObj = request().toBSON({}); - uassertStatusOK(authzSession->checkAuthorizedToListCollections( - dbName.toStringWithTenantId(), cmdObj)); + uassertStatusOK(authzSession->checkAuthorizedToListCollections(request())); } NamespaceString ns() const final { @@ -316,6 +353,10 @@ class CmdListCollections : public ListCollectionsCmdVersion1Gen( opCtx, std::unique_ptr(nullptr), ns()); @@ -440,10 +481,8 @@ class CmdListCollections : public ListCollectionsCmdVersion1GenisLockFreeReadsOp()) { auto collectionCatalog = CollectionCatalog::get(opCtx); - for (auto it = collectionCatalog->begin(opCtx, dbName); - it != collectionCatalog->end(opCtx); - ++it) { - perCollectionWork(*it); + for (auto&& coll : collectionCatalog->range(dbName)) { + perCollectionWork(coll); } } else { mongo::catalog::forEachCollectionFromDb( @@ -525,7 +564,7 @@ class CmdListCollections : public ListCollectionsCmdVersion1GenisEOF()) { - return createListCollectionsCursorReply( - 0 /* cursorId */, cursorNss, std::move(firstBatch)); + return createListCollectionsCursorReply(0 /* cursorId */, + cursorNss, + respSerializationContext, + std::move(firstBatch)); } exec->saveState(); exec->detachFromOperationContext(); @@ -557,13 +598,14 @@ class CmdListCollections : public ListCollectionsCmdVersion1GengetClient()) - ->checkAuthorizedToListCollections( - dbName.toStringWithTenantId(), cmdObj))}); + ->checkAuthorizedToListCollections(listCollRequest))}); pinnedCursor->incNBatches(); pinnedCursor->incNReturnedSoFar(firstBatch.size()); - return createListCollectionsCursorReply( - pinnedCursor.getCursor()->cursorid(), cursorNss, std::move(firstBatch)); + return createListCollectionsCursorReply(pinnedCursor.getCursor()->cursorid(), + cursorNss, + respSerializationContext, + std::move(firstBatch)); } }; } cmdListCollections; diff --git a/src/mongo/db/commands/list_collections_filter.cpp b/src/mongo/db/commands/list_collections_filter.cpp index c2e742d2b2b5c..a2f015eed0ea8 100644 --- a/src/mongo/db/commands/list_collections_filter.cpp +++ b/src/mongo/db/commands/list_collections_filter.cpp @@ -27,11 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/commands/list_collections_filter.h" - -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" namespace mongo { diff --git a/src/mongo/db/commands/list_collections_filter_test.cpp b/src/mongo/db/commands/list_collections_filter_test.cpp index a16e57432106b..8e4dd8086d5f1 100644 --- a/src/mongo/db/commands/list_collections_filter_test.cpp +++ b/src/mongo/db/commands/list_collections_filter_test.cpp @@ -27,12 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/commands/list_collections_filter.h" +#include +#include "mongo/base/string_data.h" #include "mongo/bson/json.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/commands/list_collections_filter.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/commands/list_databases.cpp b/src/mongo/db/commands/list_databases.cpp index 3ad19b1d236ff..acd4f827bddf9 100644 --- a/src/mongo/db/commands/list_databases.cpp +++ b/src/mongo/db/commands/list_databases.cpp @@ -27,23 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/database_holder.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/list_databases_common.h" #include "mongo/db/commands/list_databases_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/curop_failpoint_helpers.h" #include "mongo/db/database_name.h" #include "mongo/db/matcher/expression.h" -#include "mongo/db/multitenancy_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/db/server_feature_flags_gen.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/storage_engine.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/serialization_context.h" namespace mongo { @@ -97,21 +111,22 @@ class CmdListDatabases final : public ListDatabasesCmdVersion1Gen& authDB) { - const bool mayListAllDatabases = as->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::listDatabases); - - if (authDB) { - uassert(ErrorCodes::Unauthorized, - "Insufficient permissions to list all databases", - authDB.value() || mayListAllDatabases); - return authDB.value(); - } - - // By default, list all databases if we can, otherwise - // only those we're allowed to find on. - return !mayListAllDatabases; - })(cmd.getAuthorizedDatabases()); + const bool authorizedDatabases = + ([as, tenantId = cmd.getDbName().tenantId()](const boost::optional& authDB) { + const bool mayListAllDatabases = as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(tenantId), ActionType::listDatabases); + + if (authDB) { + uassert(ErrorCodes::Unauthorized, + "Insufficient permissions to list all databases", + authDB.value() || mayListAllDatabases); + return authDB.value(); + } + + // By default, list all databases if we can, otherwise + // only those we're allowed to find on. + return !mayListAllDatabases; + })(cmd.getAuthorizedDatabases()); // {filter: matchExpression}. std::unique_ptr filter = list_databases::getFilter(cmd, opCtx, ns()); @@ -135,7 +150,10 @@ class CmdListDatabases final : public ListDatabasesCmdVersion1GenisAuthorizedForAnyActionOnAnyResourceInDB(dbName.toString())) { + if (authorizedDatabases && !as->isAuthorizedForAnyActionOnAnyResourceInDB(dbName)) { // We don't have listDatabases on the cluster or find on this database. continue; } // If setTenantId is true, always return the dbName without the tenantId - ReplyItemType item(setTenantId ? dbName.db() : DatabaseNameUtil::serialize(dbName)); + ReplyItemType item(setTenantId ? dbName.db().toString() + : DatabaseNameUtil::serialize(dbName)); if (setTenantId) { initializeItemWithTenantId(item, dbName); } @@ -117,7 +117,7 @@ int64_t setReplyItems(OperationContext* opCtx, continue; } - writeConflictRetry(opCtx, "sizeOnDisk", dbName.toString(), [&] { + writeConflictRetry(opCtx, "sizeOnDisk", NamespaceString(dbName), [&] { size = storageEngine->sizeOnDiskForDb(opCtx, dbName); }); item.setSizeOnDisk(size); diff --git a/src/mongo/db/commands/list_databases_for_all_tenants.cpp b/src/mongo/db/commands/list_databases_for_all_tenants.cpp index c7574aef1b4b2..01cba554d3316 100644 --- a/src/mongo/db/commands/list_databases_for_all_tenants.cpp +++ b/src/mongo/db/commands/list_databases_for_all_tenants.cpp @@ -27,21 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/database_holder.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/list_databases_common.h" #include "mongo/db/commands/list_databases_for_all_tenants_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/database_name.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/multitenancy_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/explain_verbosity_gen.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/storage_engine.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -94,7 +107,8 @@ class CmdListDatabasesForAllTenants final : public TypedCommandisAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::internal)); + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } Reply typedRun(OperationContext* opCtx) { @@ -128,7 +142,7 @@ class CmdListDatabasesForAllTenants final : public TypedCommand +#include +#include +#include #include +#include +#include #include - -#include "mongo/bson/bsonobjbuilder.h" +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/index_key_validate.h" #include "mongo/db/catalog/list_indexes.h" #include "mongo/db/clientcursor.h" #include "mongo/db/commands.h" -#include "mongo/db/curop.h" -#include "mongo/db/curop_failpoint_helpers.h" #include "mongo/db/cursor_manager.h" #include "mongo/db/db_raii.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/queued_data_stage.h" #include "mongo/db/exec/working_set.h" -#include "mongo/db/index/index_descriptor.h" #include "mongo/db/list_indexes_gen.h" -#include "mongo/db/query/cursor_request.h" -#include "mongo/db/query/cursor_response.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/find_common.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/service_context.h" -#include "mongo/db/storage/durable_catalog.h" -#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/db/timeseries/catalog_helper.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/util/uuid.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -119,19 +151,19 @@ IndexSpecsWithNamespaceString getIndexSpecsWithNamespaceString(OperationContext* // Since time-series collections don't have UUIDs, we skip the time-series lookup // if the target collection is specified as a UUID. - if (const auto& origNss = origNssOrUUID.nss()) { + if (origNssOrUUID.isNamespaceString()) { auto isCommandOnTimeseriesBucketNamespace = cmd.getIsTimeseriesNamespace() && *cmd.getIsTimeseriesNamespace(); if (auto timeseriesOptions = timeseries::getTimeseriesOptions( - opCtx, *origNss, !isCommandOnTimeseriesBucketNamespace)) { + opCtx, origNssOrUUID.nss(), !isCommandOnTimeseriesBucketNamespace)) { auto bucketsNss = isCommandOnTimeseriesBucketNamespace - ? *origNss - : origNss->makeTimeseriesBucketsNamespace(); + ? origNssOrUUID.nss() + : origNssOrUUID.nss().makeTimeseriesBucketsNamespace(); AutoGetCollectionForReadCommandMaybeLockFree autoColl(opCtx, bucketsNss); const CollectionPtr& coll = autoColl.getCollection(); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "ns does not exist: " << bucketsNss, + str::stream() << "ns does not exist: " << bucketsNss.toStringForErrorMsg(), coll); return std::make_pair( @@ -146,8 +178,9 @@ IndexSpecsWithNamespaceString getIndexSpecsWithNamespaceString(OperationContext* const auto& nss = autoColl.getNss(); const CollectionPtr& coll = autoColl.getCollection(); - uassert( - ErrorCodes::NamespaceNotFound, str::stream() << "ns does not exist: " << nss.ns(), coll); + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "ns does not exist: " << nss.toStringForErrorMsg(), + coll); return std::make_pair(listIndexesInLock(opCtx, coll, nss, additionalInclude), nss); } @@ -230,13 +263,12 @@ class CmdListIndexes final : public ListIndexesCmdVersion1Gen { } NamespaceString ns() const final { - auto nss = request().getNamespaceOrUUID(); - if (nss.uuid()) { + auto nssOrUUID = request().getNamespaceOrUUID(); + if (nssOrUUID.isUUID()) { // UUID requires opCtx to resolve, settle on just the dbname. return NamespaceString(request().getDbName()); } - invariant(nss.nss()); - return nss.nss().value(); + return nssOrUUID.nss(); } void doCheckAuthorization(OperationContext* opCtx) const final { @@ -252,7 +284,8 @@ class CmdListIndexes final : public ListIndexesCmdVersion1Gen { opCtx, cmd.getNamespaceOrUUID()); uassert(ErrorCodes::Unauthorized, - str::stream() << "Not authorized to list indexes on collection:" << nss.ns(), + str::stream() << "Not authorized to list indexes on collection:" + << nss.toStringForErrorMsg(), authzSession->isAuthorizedForActionsOnResource( ResourcePattern::forExactNamespace(nss), ActionType::listIndexes)); } @@ -283,6 +316,9 @@ class CmdListIndexes final : public ListIndexesCmdVersion1Gen { const NamespaceString& nss) { auto& cmd = request(); + // We need to copy the serialization context from the request to the reply object + const auto serializationContext = cmd.getSerializationContext(); + long long batchSize = std::numeric_limits::max(); if (cmd.getCursor() && cmd.getCursor()->getBatchSize()) { batchSize = *cmd.getCursor()->getBatchSize(); @@ -333,7 +369,12 @@ class CmdListIndexes final : public ListIndexesCmdVersion1Gen { try { firstBatch.push_back(ListIndexesReplyItem::parse( - IDLParserContext("ListIndexesReplyItem"), nextDoc)); + IDLParserContext( + "ListIndexesReplyItem", + false /* apiStrict */, + nss.tenantId(), + SerializationContext::stateCommandReply(serializationContext)), + nextDoc)); } catch (const DBException& exc) { LOGV2_ERROR(5254500, "Could not parse catalog entry while replying to listIndexes", @@ -349,7 +390,11 @@ class CmdListIndexes final : public ListIndexesCmdVersion1Gen { } if (exec->isEOF()) { - return ListIndexesReplyCursor(0 /* cursorId */, nss, std::move(firstBatch)); + return ListIndexesReplyCursor( + 0 /* cursorId */, + nss, + std::move(firstBatch), + SerializationContext::stateCommandReply(serializationContext)); } exec->saveState(); @@ -372,7 +417,10 @@ class CmdListIndexes final : public ListIndexesCmdVersion1Gen { pinnedCursor->incNReturnedSoFar(firstBatch.size()); return ListIndexesReplyCursor( - pinnedCursor.getCursor()->cursorid(), nss, std::move(firstBatch)); + pinnedCursor.getCursor()->cursorid(), + nss, + std::move(firstBatch), + SerializationContext::stateCommandReply(serializationContext)); } }; } cmdListIndexes; diff --git a/src/mongo/db/commands/lock_info.cpp b/src/mongo/db/commands/lock_info.cpp index d7e77df64534c..f1b7f20c7aab3 100644 --- a/src/mongo/db/commands/lock_info.cpp +++ b/src/mongo/db/commands/lock_info.cpp @@ -27,27 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" -#include "mongo/db/concurrency/lock_manager_defs.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/commands/lock_info_gen.h" +#include "mongo/db/concurrency/lock_manager.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" namespace mongo { -/** - * Admin command to display global lock information - * TODO(SERVER-61211): Convert to IDL. - */ -class CmdLockInfo : public BasicCommand { +class CmdLockInfo : public TypedCommand { public: + using Request = LockInfo; + AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { return AllowedOnSecondary::kAlways; } @@ -64,28 +68,37 @@ class CmdLockInfo : public BasicCommand { return "show all lock info on the server"; } - Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, - const BSONObj&) const final { - bool isAuthorized = - AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::serverStatus); - return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized"); - } + class Invocation final : public MinimalInvocationBase { + public: + using MinimalInvocationBase::MinimalInvocationBase; + + private: + void doCheckAuthorization(OperationContext* opCtx) const override { + uassert(ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::serverStatus)); + } - CmdLockInfo() : BasicCommand("lockInfo") {} + NamespaceString ns() const override { + return NamespaceString(request().getDbName()); + } - bool run(OperationContext* opCtx, - const DatabaseName&, - const BSONObj& jsobj, - BSONObjBuilder& result) { - auto lockToClientMap = LockManager::getLockToClientMap(opCtx->getServiceContext()); - LockManager::get(opCtx)->getLockInfoBSON(lockToClientMap, &result); - if (jsobj["includeStorageEngineDump"].trueValue()) { - opCtx->getServiceContext()->getStorageEngine()->dump(); + bool supportsWriteConcern() const override { + return false; } - return true; - } + + void run(OperationContext* opCtx, rpc::ReplyBuilderInterface* reply) override { + auto lockToClientMap = LockManager::getLockToClientMap(opCtx->getServiceContext()); + auto result = reply->getBodyBuilder(); + LockManager::get(opCtx)->getLockInfoBSON(lockToClientMap, &result); + const auto& includeStorageEngineDump = request().getIncludeStorageEngineDump(); + if (includeStorageEngineDump) { + opCtx->getServiceContext()->getStorageEngine()->dump(); + } + } + }; } cmdLockInfo; } // namespace mongo diff --git a/src/mongo/db/commands/lock_info.idl b/src/mongo/db/commands/lock_info.idl new file mode 100644 index 0000000000000..0c17910173c7a --- /dev/null +++ b/src/mongo/db/commands/lock_info.idl @@ -0,0 +1,45 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +global: + cpp_namespace: "mongo" + +imports: + - "mongo/db/basic_types.idl" + +commands: + lockInfo: + description: "Returns information on locks that are currently being held or pending." + command_name: lockInfo + namespace: ignored + api_version: "" + fields: + includeStorageEngineDump: + optional: true + description: "Dump storage engine debug information to the logs" + type: bool diff --git a/src/mongo/db/commands/logical_session_server_status_section.cpp b/src/mongo/db/commands/logical_session_server_status_section.cpp index 95d7a58895ed7..717e989750f69 100644 --- a/src/mongo/db/commands/logical_session_server_status_section.cpp +++ b/src/mongo/db/commands/logical_session_server_status_section.cpp @@ -27,12 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/server_status.h" -#include "mongo/db/jsobj.h" #include "mongo/db/operation_context.h" #include "mongo/db/session/logical_session_cache.h" +#include "mongo/db/session/logical_session_cache_stats_gen.h" #include "mongo/db/session/session_catalog.h" namespace mongo { diff --git a/src/mongo/db/commands/map_reduce_agg.cpp b/src/mongo/db/commands/map_reduce_agg.cpp index 12570a99ac150..8abb9b29a33d6 100644 --- a/src/mongo/db/commands/map_reduce_agg.cpp +++ b/src/mongo/db/commands/map_reduce_agg.cpp @@ -26,34 +26,54 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" - -#include -#include -#include +#include +#include +#include +#include +#include +#include #include #include -#include "mongo/base/string_data.h" +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/commands.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/commands/map_reduce_agg.h" -#include "mongo/db/commands/map_reduce_javascript_code.h" +#include "mongo/db/commands/map_reduce_gen.h" +#include "mongo/db/commands/map_reduce_global_variable_scope.h" +#include "mongo/db/commands/map_reduce_out_options.h" #include "mongo/db/commands/mr_common.h" #include "mongo/db/curop.h" #include "mongo/db/db_raii.h" #include "mongo/db/exec/disk_use_options_gen.h" -#include "mongo/db/exec/document_value/value.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/pipeline/document_source_cursor.h" -#include "mongo/db/pipeline/expression.h" -#include "mongo/db/pipeline/pipeline_d.h" -#include "mongo/db/pipeline/plan_executor_pipeline.h" -#include "mongo/db/query/explain_common.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/explain.h" #include "mongo/db/query/map_reduce_output_format.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/plan_summary_stats.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" +#include "mongo/util/timer.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -76,7 +96,7 @@ auto makeExpressionContext(OperationContext* opCtx, "mapReduce on a view is not supported", !ctx.getView()); - auto [resolvedCollator, _] = PipelineD::resolveCollator( + auto [resolvedCollator, _] = resolveCollator( opCtx, parsedMr.getCollation().get_value_or(BSONObj()), ctx.getCollection()); // The UUID of the collection for the execution namespace of this aggregation. diff --git a/src/mongo/db/commands/map_reduce_agg.h b/src/mongo/db/commands/map_reduce_agg.h index 24acb70f0a7e5..6c8690a00014a 100644 --- a/src/mongo/db/commands/map_reduce_agg.h +++ b/src/mongo/db/commands/map_reduce_agg.h @@ -29,15 +29,18 @@ #pragma once +#include #include #include #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/map_reduce_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/explain_options.h" namespace mongo::map_reduce_agg { diff --git a/src/mongo/db/commands/map_reduce_agg_test.cpp b/src/mongo/db/commands/map_reduce_agg_test.cpp index 36471106b0e85..9eb1c10028d92 100644 --- a/src/mongo/db/commands/map_reduce_agg_test.cpp +++ b/src/mongo/db/commands/map_reduce_agg_test.cpp @@ -27,24 +27,45 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include + +#include +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" -#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/commands/map_reduce_agg.h" +#include "mongo/db/commands/map_reduce_gen.h" +#include "mongo/db/commands/map_reduce_javascript_code.h" +#include "mongo/db/commands/map_reduce_out_options.h" #include "mongo/db/commands/mr_common.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_group.h" +#include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_merge.h" #include "mongo/db/pipeline/document_source_out.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/document_source_sort.h" #include "mongo/db/pipeline/document_source_unwind.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/commands/map_reduce_command.cpp b/src/mongo/db/commands/map_reduce_command.cpp index a29f1db0e60ec..9ce097471da57 100644 --- a/src/mongo/db/commands/map_reduce_command.cpp +++ b/src/mongo/db/commands/map_reduce_command.cpp @@ -27,16 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/document_validation.h" +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" #include "mongo/db/commands/map_reduce_agg.h" #include "mongo/db/commands/map_reduce_command_base.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/find_common.h" -#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/commands/map_reduce_out_options.cpp b/src/mongo/db/commands/map_reduce_out_options.cpp index cbdf67f2f6393..a809ae5dd8f85 100644 --- a/src/mongo/db/commands/map_reduce_out_options.cpp +++ b/src/mongo/db/commands/map_reduce_out_options.cpp @@ -28,12 +28,22 @@ */ +#include #include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/commands/map_reduce_out_options.h" #include "mongo/db/namespace_string.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/db/commands/map_reduce_out_options.h b/src/mongo/db/commands/map_reduce_out_options.h index f1c98a568e3d2..1c42111db8cfe 100644 --- a/src/mongo/db/commands/map_reduce_out_options.h +++ b/src/mongo/db/commands/map_reduce_out_options.h @@ -30,6 +30,10 @@ #pragma once #include +#include + +#include +#include #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" diff --git a/src/mongo/db/commands/map_reduce_parse_test.cpp b/src/mongo/db/commands/map_reduce_parse_test.cpp index 5201d7c18ad26..ca8837ec0fd58 100644 --- a/src/mongo/db/commands/map_reduce_parse_test.cpp +++ b/src/mongo/db/commands/map_reduce_parse_test.cpp @@ -27,16 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/db/commands/map_reduce_gen.h" #include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" -#include "mongo/unittest/unittest.h" - namespace mongo { namespace { diff --git a/src/mongo/db/commands/mr_common.cpp b/src/mongo/db/commands/mr_common.cpp index 47828472d0512..eb215fe2b7358 100644 --- a/src/mongo/db/commands/mr_common.cpp +++ b/src/mongo/db/commands/mr_common.cpp @@ -29,31 +29,59 @@ #include "mongo/db/commands/mr_common.h" +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/commands.h" +#include "mongo/db/commands/map_reduce_javascript_code.h" #include "mongo/db/exec/inclusion_projection_executor.h" -#include "mongo/db/exec/projection_node.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/accumulator_js_reduce.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_group.h" +#include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_merge.h" +#include "mongo/db/pipeline/document_source_merge_modes_gen.h" #include "mongo/db/pipeline/document_source_out.h" #include "mongo/db/pipeline/document_source_project.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/document_source_sort.h" #include "mongo/db/pipeline/document_source_unwind.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_function.h" #include "mongo/db/pipeline/expression_js_emit.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/transformer_interface.h" +#include "mongo/db/query/projection_policies.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/s/chunk_version.h" +#include "mongo/util/assert_util.h" #include "mongo/util/intrusive_counter.h" #include "mongo/util/str.h" @@ -72,7 +100,8 @@ Status interpretTranslationError(DBException* ex, const MapReduceCommandRequest& std::string error; switch (static_cast(ex->code())) { case ErrorCodes::InvalidNamespace: - error = "Invalid output namespace {} for MapReduce"_format(outNss.ns()); + error = + "Invalid output namespace {} for MapReduce"_format(outNss.toStringForErrorMsg()); break; case 15976: error = "The mapReduce sort option must have at least one sort key"; @@ -91,7 +120,8 @@ Status interpretTranslationError(DBException* ex, const MapReduceCommandRequest& break; case 31320: case 31321: - error = "Can't output mapReduce results to internal DB {}"_format(outNss.db()); + error = "Can't output mapReduce results to internal DB {}"_format( + outNss.dbName().toStringForErrorMsg()); break; default: // Prepend MapReduce context in the event of an unknown exception. @@ -268,7 +298,7 @@ auto translateOut(boost::intrusive_ptr expCtx, } // namespace -OutputOptions parseOutputOptions(const std::string& dbname, const BSONObj& cmdObj) { +OutputOptions parseOutputOptions(StringData dbname, const BSONObj& cmdObj) { OutputOptions outputOptions; outputOptions.outNonAtomic = true; @@ -324,7 +354,7 @@ OutputOptions parseOutputOptions(const std::string& dbname, const BSONObj& cmdOb const StringData outDb(outputOptions.outDB.empty() ? dbname : outputOptions.outDB); const NamespaceString nss(outDb, outputOptions.collectionName); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid 'out' namespace: " << nss.ns(), + str::stream() << "Invalid 'out' namespace: " << nss.toStringForErrorMsg(), nss.isValid()); outputOptions.finalNamespace = std::move(nss); } @@ -369,7 +399,8 @@ Status checkAuthForMapReduce(const BasicCommand* commandTemplate, ResourcePattern outputResource( ResourcePattern::forExactNamespace(NamespaceString(outputOptions.finalNamespace))); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid target namespace " << outputResource.ns().ns(), + str::stream() << "Invalid target namespace " + << outputResource.ns().toStringForErrorMsg(), outputResource.ns().isValid()); // TODO: check if outputNs exists and add createCollection privilege if not diff --git a/src/mongo/db/commands/mr_common.h b/src/mongo/db/commands/mr_common.h index 965553e9ca81b..a9278fca73e92 100644 --- a/src/mongo/db/commands/mr_common.h +++ b/src/mongo/db/commands/mr_common.h @@ -29,13 +29,22 @@ #pragma once +#include +#include #include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands.h" #include "mongo/db/commands/map_reduce_gen.h" +#include "mongo/db/commands/map_reduce_out_options.h" +#include "mongo/db/database_name.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" namespace mongo::map_reduce_common { @@ -49,7 +58,7 @@ struct OutputOptions { OutputType outType; }; -OutputOptions parseOutputOptions(const std::string& dbname, const BSONObj& cmdObj); +OutputOptions parseOutputOptions(StringData dbname, const BSONObj& cmdObj); Status checkAuthForMapReduce(const BasicCommand* command, OperationContext* opCtx, diff --git a/src/mongo/db/commands/mr_test.cpp b/src/mongo/db/commands/mr_test.cpp index 38cc3f73f81bc..9310aadca4f58 100644 --- a/src/mongo/db/commands/mr_test.cpp +++ b/src/mongo/db/commands/mr_test.cpp @@ -27,34 +27,69 @@ * it in the license file. */ +#include +#include #include #include +#include #include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/map_reduce_gen.h" +#include "mongo/db/commands/map_reduce_out_options.h" #include "mongo/db/commands/mr_common.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/json.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_noop.h" #include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/logv2/log.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/rpc/factory.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/protocol.h" #include "mongo/scripting/dbdirectclient_factory.h" #include "mongo/scripting/engine.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -112,7 +147,7 @@ void _testConfigParseOutputOptions(const std::string& dbname, _compareOutputOptionField(dbname, cmdObjStr, "finalNamespace", - outputOptions.finalNamespace.ns(), + outputOptions.finalNamespace.toString_forTest(), expectedFinalNamespace); _compareOutputOptionField( dbname, cmdObjStr, "outNonAtomic", outputOptions.outNonAtomic, expectedOutNonAtomic); @@ -265,7 +300,8 @@ class MapReduceOpObserver : public OpObserverNoop { std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) override; + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) override; /** * Tracks the temporary collections mapReduces creates. @@ -278,12 +314,12 @@ class MapReduceOpObserver : public OpObserverNoop { const OplogSlot& createOpTime, bool fromMigrate) override; - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) override; + CollectionDropType dropType, + bool markFromMigrate) override; // Hook for onInserts. Defaults to a no-op function but may be overridden to inject exceptions // while mapReduce inserts its results into the temporary output collection. @@ -323,7 +359,8 @@ void MapReduceOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { onInsertsFn(); } @@ -344,7 +381,8 @@ repl::OpTime MapReduceOpObserver::onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) { + const CollectionDropType dropType, + bool markFromMigrate) { // If the oplog is not disabled for this namespace, then we need to reserve an op time for the // drop. if (!repl::ReplicationCoordinator::get(opCtx)->isOplogDisabledFor(opCtx, collectionName)) { @@ -460,7 +498,8 @@ Status MapReduceCommandTest::_runCommand(StringData mapCode, StringData reduceCo auto command = CommandHelpers::findCommand("mapReduce"); ASSERT(command) << "Unable to look up mapReduce command"; - auto request = OpMsgRequest::fromDBAndBody(inputNss.db(), _makeCmdObj(mapCode, reduceCode)); + auto request = + OpMsgRequest::fromDBAndBody(inputNss.db_forTest(), _makeCmdObj(mapCode, reduceCode)); auto replyBuilder = rpc::makeReplyBuilder(rpc::Protocol::kOpMsg); auto result = CommandHelpers::runCommandDirectly(_opCtx.get(), request); auto status = getStatusFromCommandResult(result); @@ -476,7 +515,7 @@ void MapReduceCommandTest::_assertTemporaryCollectionsAreDropped() { for (const auto& tempNss : _opObserver->tempNamespaces) { ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, _storage.getCollectionCount(_opCtx.get(), tempNss)) - << "mapReduce did not remove temporary collection on success: " << tempNss.ns(); + << "mapReduce did not remove temporary collection on success: " << tempNss.ns_forTest(); } } @@ -528,7 +567,7 @@ TEST_F(MapReduceCommandTest, PrimaryStepDownPreventsTemporaryCollectionDrops) { _opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kLastApplied); for (const auto& tempNss : _opObserver->tempNamespaces) { ASSERT_OK(_storage.getCollectionCount(_opCtx.get(), tempNss).getStatus()) - << "missing mapReduce temporary collection: " << tempNss; + << "missing mapReduce temporary collection: " << tempNss.toStringForErrorMsg(); } } @@ -545,7 +584,7 @@ TEST_F(MapReduceCommandTest, ReplacingExistingOutputCollectionPreservesIndexes) AutoGetCollection coll(_opCtx.get(), outputNss, MODE_X); ASSERT(coll); writeConflictRetry( - _opCtx.get(), "ReplacingExistingOutputCollectionPreservesIndexes", outputNss.ns(), [&] { + _opCtx.get(), "ReplacingExistingOutputCollectionPreservesIndexes", outputNss, [&] { WriteUnitOfWork wuow(_opCtx.get()); ASSERT_OK( coll.getWritableCollection(_opCtx.get()) @@ -572,7 +611,7 @@ TEST_F(MapReduceCommandTest, ReplacingExistingOutputCollectionPreservesIndexes) ASSERT_NOT_EQUALS( *options.uuid, *CollectionCatalog::get(_opCtx.get())->lookupUUIDByNSS(_opCtx.get(), outputNss)) - << "Output collection " << outputNss << " was not replaced"; + << "Output collection " << outputNss.toStringForErrorMsg() << " was not replaced"; _assertTemporaryCollectionsAreDropped(); } diff --git a/src/mongo/db/commands/notify_sharding_event.idl b/src/mongo/db/commands/notify_sharding_event.idl index 19720db4eb228..3386709284403 100644 --- a/src/mongo/db/commands/notify_sharding_event.idl +++ b/src/mongo/db/commands/notify_sharding_event.idl @@ -32,18 +32,13 @@ global: cpp_namespace: "mongo" cpp_includes: - "mongo/client/connection_string.h" + - "mongo/db/commands/notify_sharding_event_utils.h" imports: - "mongo/db/basic_types.idl" - "mongo/s/sharding_types.idl" enums: - EventType: - description: "The type of sharding event" - type: string - values: - kDatabasesAdded : "databasesAdded" - CommitPhase: description: "Specifies the phase of the event being generated (For Sharding DDL commits described through 2-phase notifications)." @@ -75,7 +70,6 @@ structs: phase: description: "The sub-phase of the event being notified." type: CommitPhase - optional: true commands: _shardsvrNotifyShardingEvent: @@ -89,7 +83,11 @@ commands: reply_type: OkReply fields: eventType: - type: EventType + type: string + description: "The type of sharding event" + validator: { callback: "notify_sharding_event::validateEventType" } details: type: object_owned + description: "A descriptor of the sharding event + (to be serialized and interpreted based on the value of eventType)." diff --git a/src/mongo/db/commands/notify_sharding_event_utils.h b/src/mongo/db/commands/notify_sharding_event_utils.h new file mode 100644 index 0000000000000..29eef8c5920ba --- /dev/null +++ b/src/mongo/db/commands/notify_sharding_event_utils.h @@ -0,0 +1,50 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" + +namespace mongo { +namespace notify_sharding_event { + +static constexpr char kDatabasesAdded[] = "databasesAdded"; + +inline Status validateEventType(const std::string& eventType) { + if (eventType != kDatabasesAdded) { + return {ErrorCodes::UnsupportedShardingEventNotification, + "Unrecognized EventType: " + eventType}; + } + + return Status::OK(); +} + +} // namespace notify_sharding_event +} // namespace mongo diff --git a/src/mongo/db/commands/oplog_application_checks.cpp b/src/mongo/db/commands/oplog_application_checks.cpp index 5588fd37f9695..60e8e48f723b7 100644 --- a/src/mongo/db/commands/oplog_application_checks.cpp +++ b/src/mongo/db/commands/oplog_application_checks.cpp @@ -26,15 +26,40 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_check.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_checks.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/builtin_roles.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/commands.h" #include "mongo/db/commands/oplog_application_checks.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/tenant_id.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" namespace mongo { UUID OplogApplicationChecks::getUUIDFromOplogEntry(const BSONObj& oplogEntry) { @@ -43,7 +68,7 @@ UUID OplogApplicationChecks::getUUIDFromOplogEntry(const BSONObj& oplogEntry) { }; Status OplogApplicationChecks::checkOperationAuthorization(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj& oplogEntry, AuthorizationSession* authSession, bool alwaysUpsert) { @@ -53,8 +78,9 @@ Status OplogApplicationChecks::checkOperationAuthorization(OperationContext* opC if (opType == "n"_sd) { // oplog notes require cluster permissions, and may not have a ns - if (!authSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::appendOplogNote)) { + if (!authSession->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::appendOplogNote)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); @@ -93,7 +119,7 @@ Status OplogApplicationChecks::checkOperationAuthorization(OperationContext* opC // renameCollection commands must be run on the 'admin' database. Its arguments are // fully qualified namespaces. Catalog internals don't know the op produced by running // renameCollection was originally run on 'admin', so we must restore this. - dbNameForAuthCheck = DatabaseName(nss.tenantId(), "admin"); + dbNameForAuthCheck = DatabaseNameUtil::deserialize(nss.tenantId(), "admin"); } // TODO reuse the parse result for when we run() later. Note that when running, @@ -145,8 +171,8 @@ Status OplogApplicationChecks::checkOperationAuthorization(OperationContext* opC // It seems that 'db' isn't used anymore. Require all actions to prevent casual use. ActionSet allActions; allActions.addAllActions(); - if (!authSession->isAuthorizedForActionsOnResource(ResourcePattern::forAnyResource(), - allActions)) { + if (!authSession->isAuthorizedForActionsOnResource( + ResourcePattern::forAnyResource(nss.tenantId()), allActions)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); @@ -222,14 +248,14 @@ Status OplogApplicationChecks::checkAuthForOperation(OperationContext* opCtx, const BSONObj& cmdObj, OplogApplicationValidity validity) { AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient()); - if (!authSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::applyOps)) { + if (!authSession->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::applyOps)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } if (validity == OplogApplicationValidity::kNeedsSuperuser) { std::vector universalPrivileges; - auth::generateUniversalPrivileges(&universalPrivileges); + auth::generateUniversalPrivileges(&universalPrivileges, dbName.tenantId()); if (!authSession->isAuthorizedForPrivileges(universalPrivileges)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } @@ -237,15 +263,15 @@ Status OplogApplicationChecks::checkAuthForOperation(OperationContext* opCtx, } if (validity == OplogApplicationValidity::kNeedsForceAndUseUUID) { if (!authSession->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), + ResourcePattern::forClusterResource(dbName.tenantId()), {ActionType::forceUUID, ActionType::useUUID})) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } validity = OplogApplicationValidity::kOk; } if (validity == OplogApplicationValidity::kNeedsUseUUID) { - if (!authSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::useUUID)) { + if (!authSession->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::useUUID)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } validity = OplogApplicationValidity::kOk; diff --git a/src/mongo/db/commands/oplog_application_checks.h b/src/mongo/db/commands/oplog_application_checks.h index 3152510a66b89..2554c5fa2ec42 100644 --- a/src/mongo/db/commands/oplog_application_checks.h +++ b/src/mongo/db/commands/oplog_application_checks.h @@ -30,6 +30,8 @@ #include #include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/commands/oplog_note.cpp b/src/mongo/db/commands/oplog_note.cpp index 3993658620445..3f4b31e233fd0 100644 --- a/src/mongo/db/commands/oplog_note.cpp +++ b/src/mongo/db/commands/oplog_note.cpp @@ -28,26 +28,45 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/commands.h" - -#include "mongo/base/init.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/client.h" +#include "mongo/db/commands.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/curop.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -73,11 +92,11 @@ Status _performNoopWrite(OperationContext* opCtx, BSONObj msgObj, StringData not } // Its a proxy for being a primary passing "local" will cause it to return true on secondary - if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) { + if (!replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin)) { return {ErrorCodes::NotWritablePrimary, "Not a primary"}; } - writeConflictRetry(opCtx, note, NamespaceString::kRsOplogNamespace.ns(), [&opCtx, &msgObj] { + writeConflictRetry(opCtx, note, NamespaceString::kRsOplogNamespace, [&opCtx, &msgObj] { WriteUnitOfWork uow(opCtx); opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage(opCtx, msgObj); uow.commit(); @@ -111,11 +130,12 @@ class AppendOplogNoteCmd : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::appendOplogNote)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::appendOplogNote)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/commands/parameters.cpp b/src/mongo/db/commands/parameters.cpp index 321cd9ed91c97..a4d4bb08b3df3 100644 --- a/src/mongo/db/commands/parameters.cpp +++ b/src/mongo/db/commands/parameters.cpp @@ -27,23 +27,58 @@ * it in the license file. */ -#include - +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/parse_number.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" #include "mongo/bson/mutable/document.h" -#include "mongo/client/replica_set_monitor.h" -#include "mongo/config.h" -#include "mongo/db/auth/authorization_manager.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" #include "mongo/db/commands/parameters_gen.h" #include "mongo/db/commands/parse_log_component_settings.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" #include "mongo/db/server_parameter_gen.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/kv/kv_engine.h" -#include "mongo/db/storage/storage_options.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/command_generic_argument.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_component_settings.h" +#include "mongo/logv2/log_manager.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -220,8 +255,8 @@ class CmdGet : public BasicCommand { const DatabaseName& dbName, const BSONObj& cmdObj) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::getParameter)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::getParameter)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } @@ -292,8 +327,8 @@ class CmdSet : public BasicCommand { const DatabaseName& dbName, const BSONObj& cmdObj) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::setParameter)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::setParameter)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/db/commands/parse_log_component_settings.cpp b/src/mongo/db/commands/parse_log_component_settings.cpp index 509fff201a926..a9a484f86a5a5 100644 --- a/src/mongo/db/commands/parse_log_component_settings.cpp +++ b/src/mongo/db/commands/parse_log_component_settings.cpp @@ -27,17 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/commands/parse_log_component_settings.h" - #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" +#include "mongo/db/commands/parse_log_component_settings.h" #include "mongo/logv2/log_component.h" -#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/commands/parse_log_component_settings_test.cpp b/src/mongo/db/commands/parse_log_component_settings_test.cpp index 3b3ab9e37e9f8..c201259c27d5f 100644 --- a/src/mongo/db/commands/parse_log_component_settings_test.cpp +++ b/src/mongo/db/commands/parse_log_component_settings_test.cpp @@ -27,13 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/parse_log_component_settings.h" - -#include "mongo/db/jsobj.h" #include "mongo/logv2/log_component.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/commands/pipeline_command.cpp b/src/mongo/db/commands/pipeline_command.cpp index 337c3e5b0678f..bdcfd733307bb 100644 --- a/src/mongo/db/commands/pipeline_command.cpp +++ b/src/mongo/db/commands/pipeline_command.cpp @@ -28,24 +28,52 @@ */ #include - -#include "mongo/bson/bsonelement.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/auth/authorization_checks.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/create_collection.h" +#include "mongo/db/auth/privilege.h" #include "mongo/db/catalog/external_data_source_scope_guard.h" #include "mongo/db/commands.h" #include "mongo/db/commands/run_aggregate.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/pipeline/external_data_source_option_gen.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/explain_verbosity_gen.h" #include "mongo/db/query/query_knobs_gen.h" -#include "mongo/idl/idl_parser.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/util/assert_util.h" #include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/serialization_context.h" namespace mongo { namespace { @@ -81,13 +109,18 @@ class PipelineCommand final : public Command { OperationContext* opCtx, const OpMsgRequest& opMsgRequest, boost::optional explainVerbosity) override { + + SerializationContext serializationCtx = SerializationContext::stateCommandRequest(); + serializationCtx.setTenantIdSource(opMsgRequest.getValidatedTenantId() != boost::none); + const auto aggregationRequest = aggregation_request_helper::parseFromBSON( opCtx, DatabaseNameUtil::deserialize(opMsgRequest.getValidatedTenantId(), opMsgRequest.getDatabase()), opMsgRequest.body, explainVerbosity, - APIParameters::get(opCtx).getAPIStrict().value_or(false)); + APIParameters::get(opCtx).getAPIStrict().value_or(false), + serializationCtx); auto privileges = uassertStatusOK( auth::getPrivilegesForAggregate(AuthorizationSession::get(opCtx->getClient()), @@ -125,6 +158,14 @@ class PipelineCommand final : public Command { _liteParsedPipeline(_aggregationRequest), _privileges(std::move(privileges)) { auto externalDataSources = _aggregationRequest.getExternalDataSources(); + // Support collection-less aggregate commands without $_externalDataSources. + if (_aggregationRequest.getNamespace().isCollectionlessAggregateNS()) { + uassert(7604400, + "$_externalDataSources can't be used with the collectionless aggregate", + !externalDataSources.has_value()); + return; + } + uassert(7039000, "Either $_externalDataSources must always be present when enableComputeMode=" "true or must not when enableComputeMode=false", @@ -230,7 +271,8 @@ class PipelineCommand final : public Command { if (!_aggregationRequest.getExplain() && !_aggregationRequest.getExchange()) { query_request_helper::validateCursorResponse( reply->getBodyBuilder().asTempObj(), - _aggregationRequest.getNamespace().tenantId()); + _aggregationRequest.getNamespace().tenantId(), + _aggregationRequest.getSerializationContext()); } } diff --git a/src/mongo/db/commands/plan_cache_clear_command.cpp b/src/mongo/db/commands/plan_cache_clear_command.cpp index 7fa1bd499a250..d8b104479a026 100644 --- a/src/mongo/db/commands/plan_cache_clear_command.cpp +++ b/src/mongo/db/commands/plan_cache_clear_command.cpp @@ -28,24 +28,38 @@ */ -#include "mongo/platform/basic.h" - +#include +#include #include +#include + +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/commands.h" #include "mongo/db/commands/plan_cache_commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" -#include "mongo/db/matcher/extensions_callback_real.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/canonical_query_encoder.h" +#include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/collection_query_info.h" -#include "mongo/db/query/plan_cache_callbacks.h" -#include "mongo/db/query/plan_cache_key_factory.h" -#include "mongo/db/query/plan_ranker.h" -#include "mongo/db/query/query_utils.h" #include "mongo/db/query/sbe_plan_cache.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -67,7 +81,7 @@ PlanCache* getPlanCache(OperationContext* opCtx, const CollectionPtr& collection Status clear(OperationContext* opCtx, const CollectionPtr& collection, PlanCache* planCache, - const std::string& ns, + const NamespaceString& nss, const BSONObj& cmdObj) { invariant(planCache); @@ -76,7 +90,7 @@ Status clear(OperationContext* opCtx, // - clear plans for single query shape when a query shape is described in the // command arguments. if (cmdObj.hasField("query")) { - auto statusWithCQ = plan_cache_commands::canonicalize(opCtx, ns, cmdObj); + auto statusWithCQ = plan_cache_commands::canonicalize(opCtx, nss, cmdObj); if (!statusWithCQ.isOK()) { return statusWithCQ.getStatus(); } @@ -112,8 +126,7 @@ Status clear(OperationContext* opCtx, version, false /*matchSecondaryCollections*/); - LOGV2_DEBUG( - 23908, 1, "{namespace}: Cleared plan cache", "Cleared plan cache", "namespace"_attr = ns); + LOGV2_DEBUG(23908, 1, "{namespace}: Cleared plan cache", "Cleared plan cache", logAttrs(nss)); return Status::OK(); } @@ -185,7 +198,7 @@ bool PlanCacheClearCommand::run(OperationContext* opCtx, } auto planCache = getPlanCache(opCtx, ctx.getCollection()); - uassertStatusOK(clear(opCtx, ctx.getCollection(), planCache, nss.ns(), cmdObj)); + uassertStatusOK(clear(opCtx, ctx.getCollection(), planCache, nss, cmdObj)); return true; } diff --git a/src/mongo/db/commands/plan_cache_commands.cpp b/src/mongo/db/commands/plan_cache_commands.cpp index e35658442c7a4..4b8733664c5ec 100644 --- a/src/mongo/db/commands/plan_cache_commands.cpp +++ b/src/mongo/db/commands/plan_cache_commands.cpp @@ -27,16 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/commands/plan_cache_commands.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/commands/plan_cache_commands.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_real.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::plan_cache_commands { StatusWith> canonicalize(OperationContext* opCtx, - StringData ns, + const NamespaceString& nss, const BSONObj& cmdObj) { // query - required BSONElement queryElt = cmdObj.getField("query"); @@ -85,13 +97,18 @@ StatusWith> canonicalize(OperationContext* opCtx } // Create canonical query - auto findCommand = std::make_unique(NamespaceString{ns}); + auto findCommand = std::make_unique(nss); findCommand->setFilter(queryObj.getOwned()); findCommand->setSort(sortObj.getOwned()); findCommand->setProjection(projObj.getOwned()); findCommand->setCollation(collationObj.getOwned()); - const ExtensionsCallbackReal extensionsCallback( - opCtx, findCommand->getNamespaceOrUUID().nss().get_ptr()); + + tassert(ErrorCodes::BadValue, + "Unsupported type UUID for namespace", + findCommand->getNamespaceOrUUID().isNamespaceString()); + const ExtensionsCallbackReal extensionsCallback(opCtx, + &findCommand->getNamespaceOrUUID().nss()); + const boost::intrusive_ptr expCtx; auto statusWithCQ = CanonicalQuery::canonicalize(opCtx, diff --git a/src/mongo/db/commands/plan_cache_commands.h b/src/mongo/db/commands/plan_cache_commands.h index 0315a52c8dc03..4bd71b5b0c65a 100644 --- a/src/mongo/db/commands/plan_cache_commands.h +++ b/src/mongo/db/commands/plan_cache_commands.h @@ -29,11 +29,18 @@ #pragma once +#include +#include + +#include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/sbe_plan_cache.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/uuid.h" namespace mongo::plan_cache_commands { @@ -42,7 +49,7 @@ namespace mongo::plan_cache_commands { * that command represented as a CanonicalQuery. */ StatusWith> canonicalize(OperationContext* opCtx, - StringData ns, + const NamespaceString& nss, const BSONObj& cmdObj); /** diff --git a/src/mongo/db/commands/plan_cache_commands_test.cpp b/src/mongo/db/commands/plan_cache_commands_test.cpp index 7513b9320a40b..c371e0e4ea788 100644 --- a/src/mongo/db/commands/plan_cache_commands_test.cpp +++ b/src/mongo/db/commands/plan_cache_commands_test.cpp @@ -27,13 +27,19 @@ * it in the license file. */ +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_mock.h" #include "mongo/db/commands/plan_cache_commands.h" #include "mongo/db/namespace_string.h" #include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/plan_cache_key_factory.h" #include "mongo/db/query/query_test_service_context.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -49,47 +55,46 @@ PlanCacheKey makeClassicKey(const CanonicalQuery& cq) { TEST(PlanCacheCommandsTest, CannotCanonicalizeWithMissingQueryField) { QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); - ASSERT_NOT_OK( - plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{}")).getStatus()); + ASSERT_NOT_OK(plan_cache_commands::canonicalize(opCtx.get(), nss, fromjson("{}")).getStatus()); } TEST(PlanCacheCommandsTest, CannotCanonicalizeWhenQueryFieldIsNotObject) { QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); - ASSERT_NOT_OK(plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: 1}")) - .getStatus()); + ASSERT_NOT_OK( + plan_cache_commands::canonicalize(opCtx.get(), nss, fromjson("{query: 1}")).getStatus()); } TEST(PlanCacheCommandsTest, CannotCanonicalizeWhenSortFieldIsNotObject) { QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); ASSERT_NOT_OK( - plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {}, sort: 1}")) + plan_cache_commands::canonicalize(opCtx.get(), nss, fromjson("{query: {}, sort: 1}")) .getStatus()); } TEST(PlanCacheCommandsTest, CannotCanonicalizeWhenProjectionFieldIsNotObject) { QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); - ASSERT_NOT_OK(plan_cache_commands::canonicalize( - opCtx.get(), nss.ns(), fromjson("{query: {}, projection: 1}")) - .getStatus()); + ASSERT_NOT_OK( + plan_cache_commands::canonicalize(opCtx.get(), nss, fromjson("{query: {}, projection: 1}")) + .getStatus()); } TEST(PlanCacheCommandsTest, CannotCanonicalizeWhenCollationFieldIsNotObject) { QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); - ASSERT_NOT_OK(plan_cache_commands::canonicalize( - opCtx.get(), nss.ns(), fromjson("{query: {}, collation: 1}")) - .getStatus()); + ASSERT_NOT_OK( + plan_cache_commands::canonicalize(opCtx.get(), nss, fromjson("{query: {}, collation: 1}")) + .getStatus()); } TEST(PlanCacheCommandsTest, CannotCanonicalizeWhenSortObjectIsMalformed) { QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); - ASSERT_NOT_OK(plan_cache_commands::canonicalize( - opCtx.get(), nss.ns(), fromjson("{query: {}, sort: {a: 0}}")) - .getStatus()); + ASSERT_NOT_OK( + plan_cache_commands::canonicalize(opCtx.get(), nss, fromjson("{query: {}, sort: {a: 0}}")) + .getStatus()); } TEST(PlanCacheCommandsTest, CanCanonicalizeWithValidQuery) { @@ -98,13 +103,13 @@ TEST(PlanCacheCommandsTest, CanCanonicalizeWithValidQuery) { QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); auto statusWithCQ = - plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}}")); + plan_cache_commands::canonicalize(opCtx.get(), nss, fromjson("{query: {a: 1, b: 1}}")); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr query = std::move(statusWithCQ.getValue()); // Equivalent query should generate same key. statusWithCQ = - plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {b: 3, a: 4}}")); + plan_cache_commands::canonicalize(opCtx.get(), nss, fromjson("{query: {b: 3, a: 4}}")); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr equivQuery = std::move(statusWithCQ.getValue()); ASSERT_EQUALS(makeClassicKey(*query), makeClassicKey(*equivQuery)); @@ -116,13 +121,13 @@ TEST(PlanCacheCommandsTest, SortQueryResultsInDifferentPlanCacheKeyFromUnsorted) QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); auto statusWithCQ = - plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}}")); + plan_cache_commands::canonicalize(opCtx.get(), nss, fromjson("{query: {a: 1, b: 1}}")); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr query = std::move(statusWithCQ.getValue()); // Sort query should generate different key from unsorted query. statusWithCQ = plan_cache_commands::canonicalize( - opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}")); + opCtx.get(), nss, fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}")); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr sortQuery = std::move(statusWithCQ.getValue()); ASSERT_NOT_EQUALS(makeClassicKey(*query), makeClassicKey(*sortQuery)); @@ -135,13 +140,13 @@ TEST(PlanCacheCommandsTest, SortsAreProperlyDelimitedInPlanCacheKey) { QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); auto statusWithCQ = plan_cache_commands::canonicalize( - opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}")); + opCtx.get(), nss, fromjson("{query: {a: 1, b: 1}, sort: {a: 1, b: 1}}")); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr sortQuery1 = std::move(statusWithCQ.getValue()); // Confirm sort arguments are properly delimited (SERVER-17158) statusWithCQ = plan_cache_commands::canonicalize( - opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}")); + opCtx.get(), nss, fromjson("{query: {a: 1, b: 1}, sort: {aab: 1}}")); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr sortQuery2 = std::move(statusWithCQ.getValue()); ASSERT_NOT_EQUALS(makeClassicKey(*sortQuery1), makeClassicKey(*sortQuery2)); @@ -153,12 +158,12 @@ TEST(PlanCacheCommandsTest, ProjectQueryResultsInDifferentPlanCacheKeyFromUnproj QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); auto statusWithCQ = - plan_cache_commands::canonicalize(opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}}")); + plan_cache_commands::canonicalize(opCtx.get(), nss, fromjson("{query: {a: 1, b: 1}}")); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr query = std::move(statusWithCQ.getValue()); statusWithCQ = plan_cache_commands::canonicalize( - opCtx.get(), nss.ns(), fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}")); + opCtx.get(), nss, fromjson("{query: {a: 1, b: 1}, projection: {_id: 0, a: 1}}")); ASSERT_OK(statusWithCQ.getStatus()); std::unique_ptr projectionQuery = std::move(statusWithCQ.getValue()); ASSERT_NOT_EQUALS(makeClassicKey(*query), makeClassicKey(*projectionQuery)); diff --git a/src/mongo/db/commands/profile_common.cpp b/src/mongo/db/commands/profile_common.cpp index 6a33e66a561f8..aa7f80c88149a 100644 --- a/src/mongo/db/commands/profile_common.cpp +++ b/src/mongo/db/commands/profile_common.cpp @@ -27,17 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/commands/profile_common.h" #include "mongo/db/commands/profile_gen.h" -#include "mongo/db/curop.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/profile_filter_impl.h" +#include "mongo/db/profile_filter.h" +#include "mongo/db/server_options.h" #include "mongo/idl/idl_parser.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -63,8 +74,8 @@ Status ProfileCmdBase::checkAuthForOperation(OperationContext* opCtx, } } - return authzSession->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(dbName.db()), ActionType::enableProfiler) + return authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbName), + ActionType::enableProfiler) ? Status::OK() : Status(ErrorCodes::Unauthorized, "unauthorized"); } diff --git a/src/mongo/db/commands/profile_common.h b/src/mongo/db/commands/profile_common.h index f09bce4e99344..84c115f75e5f5 100644 --- a/src/mongo/db/commands/profile_common.h +++ b/src/mongo/db/commands/profile_common.h @@ -29,11 +29,21 @@ #pragma once +#include #include +#include +#include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/commands/read_write_concern_defaults_server_status.cpp b/src/mongo/db/commands/read_write_concern_defaults_server_status.cpp index 8fbe94aab1b7b..9892e68f67b59 100644 --- a/src/mongo/db/commands/read_write_concern_defaults_server_status.cpp +++ b/src/mongo/db/commands/read_write_concern_defaults_server_status.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands/rwc_defaults_commands_gen.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/server_options.h" namespace mongo { namespace { @@ -42,12 +47,12 @@ class ReadWriteConcernDefaultsServerStatus final : public ServerStatusSection { ReadWriteConcernDefaultsServerStatus() : ServerStatusSection("defaultRWConcern") {} bool includeByDefault() const override { - return !serverGlobalParams.clusterRole.exclusivelyHasShardRole(); + return !serverGlobalParams.clusterRole.hasExclusively(ClusterRole::ShardServer); } BSONObj generateSection(OperationContext* opCtx, const BSONElement& configElement) const override { - if (serverGlobalParams.clusterRole.exclusivelyHasShardRole() || + if (serverGlobalParams.clusterRole.hasExclusively(ClusterRole::ShardServer) || !repl::ReplicationCoordinator::get(opCtx)->isReplEnabled()) { return {}; } diff --git a/src/mongo/db/commands/reap_logical_session_cache_now.cpp b/src/mongo/db/commands/reap_logical_session_cache_now.cpp index 2d70454db914c..955e179fe927a 100644 --- a/src/mongo/db/commands/reap_logical_session_cache_now.cpp +++ b/src/mongo/db/commands/reap_logical_session_cache_now.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_cache.h" namespace mongo { diff --git a/src/mongo/db/commands/refresh_logical_session_cache_now.cpp b/src/mongo/db/commands/refresh_logical_session_cache_now.cpp index 4e7aabd5f2468..f703a1cdb710f 100644 --- a/src/mongo/db/commands/refresh_logical_session_cache_now.cpp +++ b/src/mongo/db/commands/refresh_logical_session_cache_now.cpp @@ -27,13 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_cache.h" -#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/commands/refresh_sessions_command.cpp b/src/mongo/db/commands/refresh_sessions_command.cpp index 6ea2b09d36cd6..c609937cac376 100644 --- a/src/mongo/db/commands/refresh_sessions_command.cpp +++ b/src/mongo/db/commands/refresh_sessions_command.cpp @@ -27,16 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/commands/sessions_commands_gen.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_cache.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/commands/rename_collection_cmd.cpp b/src/mongo/db/commands/rename_collection_cmd.cpp index 7a2c20a53d8ce..7283c074f68c6 100644 --- a/src/mongo/db/commands/rename_collection_cmd.cpp +++ b/src/mongo/db/commands/rename_collection_cmd.cpp @@ -28,25 +28,27 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/client/dbclient_cursor.h" -#include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database_holder.h" -#include "mongo/db/catalog/index_catalog.h" +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/catalog/rename_collection.h" -#include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/commands/rename_collection_common.h" #include "mongo/db/commands/rename_collection_gen.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/index/index_descriptor.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/ops/insert.h" -#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" -#include "mongo/util/scopeguard.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -120,9 +122,7 @@ class CmdRenameCollection final : public TypedCommand { void doCheckAuthorization(OperationContext* opCtx) const override { uassertStatusOK(rename_collection::checkAuthForRenameCollectionCommand( - opCtx->getClient(), - request().getDbName().toStringWithTenantId(), - request().toBSON(BSONObj()))); + opCtx->getClient(), request())); } }; diff --git a/src/mongo/db/commands/rename_collection_common.cpp b/src/mongo/db/commands/rename_collection_common.cpp index ecdad33ec36eb..f27526b0216bf 100644 --- a/src/mongo/db/commands/rename_collection_common.cpp +++ b/src/mongo/db/commands/rename_collection_common.cpp @@ -29,41 +29,39 @@ #include "mongo/db/commands/rename_collection_common.h" -#include -#include +#include +#include "mongo/base/error_codes.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/client.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/stdx/variant.h" namespace mongo { namespace rename_collection { -Status checkAuthForRenameCollectionCommand(Client* client, - const std::string& dbname, - const BSONObj& cmdObj) { - const auto sourceNsElt = cmdObj["renameCollection"]; - const auto targetNsElt = cmdObj["to"]; - - uassert(ErrorCodes::TypeMismatch, - "'renameCollection' must be of type String", - sourceNsElt.type() == BSONType::String); - uassert(ErrorCodes::TypeMismatch, - "'to' must be of type String", - targetNsElt.type() == BSONType::String); +Status checkAuthForRenameCollectionCommand(Client* client, const RenameCollectionCommand& request) { + const auto& sourceNS = request.getCommandParameter(); + const auto& targetNS = request.getTo(); + const bool dropTarget = [&] { + const auto dropTarget = request.getDropTarget(); + if (stdx::holds_alternative(dropTarget)) { + return stdx::get(dropTarget); + } - const NamespaceString sourceNS(sourceNsElt.valueStringData()); - const NamespaceString targetNS(targetNsElt.valueStringData()); - bool dropTarget = cmdObj["dropTarget"].trueValue(); + // UUID alternative is "trueish" + return true; + }(); - if (sourceNS.db() == targetNS.db() && sourceNS.isNormalCollection() && + if (sourceNS.dbName() == targetNS.dbName() && sourceNS.isNormalCollection() && targetNS.isNormalCollection()) { - bool canRename = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(sourceNS.db()), ActionType::renameCollectionSameDB); + const bool canRename = AuthorizationSession::get(client)->isAuthorizedForActionsOnResource( + ResourcePattern::forDatabaseName(sourceNS.dbName()), + ActionType::renameCollectionSameDB); bool canDropTargetIfNeeded = true; if (dropTarget) { diff --git a/src/mongo/db/commands/rename_collection_common.h b/src/mongo/db/commands/rename_collection_common.h index 3290c344e5651..2eeee9848f5e9 100644 --- a/src/mongo/db/commands/rename_collection_common.h +++ b/src/mongo/db/commands/rename_collection_common.h @@ -31,7 +31,10 @@ #include +#include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/commands/rename_collection_gen.h" namespace mongo { @@ -39,9 +42,7 @@ class Client; namespace rename_collection { -Status checkAuthForRenameCollectionCommand(Client* client, - const std::string& dbname, - const BSONObj& cmdObj); +Status checkAuthForRenameCollectionCommand(Client* client, const RenameCollectionCommand& request); } // namespace rename_collection } // namespace mongo diff --git a/src/mongo/db/commands/resize_oplog.cpp b/src/mongo/db/commands/resize_oplog.cpp index 0c96a36e5d4d1..c38b191dd5c0c 100644 --- a/src/mongo/db/commands/resize_oplog.cpp +++ b/src/mongo/db/commands/resize_oplog.cpp @@ -29,17 +29,36 @@ #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" #include "mongo/db/commands/resize_oplog_gen.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/util/str.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -68,11 +87,12 @@ class CmdReplSetResizeOplog : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const final { AuthorizationSession* authzSession = AuthorizationSession::get(opCtx->getClient()); - if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::replSetResizeOplog)) { + if (authzSession->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::replSetResizeOplog)) { return Status::OK(); } return Status(ErrorCodes::Unauthorized, "Unauthorized"); @@ -89,7 +109,7 @@ class CmdReplSetResizeOplog : public BasicCommand { auto params = ReplSetResizeOplogRequest::parse(IDLParserContext("replSetResizeOplog"), jsobj); - return writeConflictRetry(opCtx, "replSetResizeOplog", coll->ns().ns(), [&] { + return writeConflictRetry(opCtx, "replSetResizeOplog", coll->ns(), [&] { WriteUnitOfWork wunit(opCtx); if (auto sizeMB = params.getSize()) { diff --git a/src/mongo/db/commands/rotate_certificates_command.cpp b/src/mongo/db/commands/rotate_certificates_command.cpp index 65fd79fbaf1cf..8c3008a83dd4a 100644 --- a/src/mongo/db/commands/rotate_certificates_command.cpp +++ b/src/mongo/db/commands/rotate_certificates_command.cpp @@ -27,13 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/config.h" +#include "mongo/base/error_codes.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/rotate_certificates_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" #include "mongo/util/net/ssl_manager.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -86,8 +99,9 @@ class RotateCertificatesCmd final : public TypedCommand { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::rotateCertificates)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::rotateCertificates)); } }; diff --git a/src/mongo/db/commands/run_aggregate.cpp b/src/mongo/db/commands/run_aggregate.cpp index c2f2701cd42f5..3b74f2a74dc97 100644 --- a/src/mongo/db/commands/run_aggregate.cpp +++ b/src/mongo/db/commands/run_aggregate.cpp @@ -29,72 +29,135 @@ #include "mongo/db/commands/run_aggregate.h" +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/client/read_preference.h" +#include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/db/api_parameters.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_uuid_mismatch.h" -#include "mongo/db/catalog/database.h" -#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/external_data_source_scope_guard.h" -#include "mongo/db/change_stream_change_collection_manager.h" -#include "mongo/db/change_stream_pre_images_collection_manager.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/change_stream_serverless_helpers.h" +#include "mongo/db/client.h" +#include "mongo/db/clientcursor.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/cursor_manager.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/exec/disk_use_options_gen.h" -#include "mongo/db/exec/document_value/document.h" -#include "mongo/db/exec/working_set_common.h" #include "mongo/db/fle_crud.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/pipeline/change_stream_invalidation_info.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_exchange.h" #include "mongo/db/pipeline/document_source_geo_near.h" -#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/pipeline_d.h" #include "mongo/db/pipeline/plan_executor_pipeline.h" #include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/pipeline/search_helper.h" -#include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/collation/collation_spec.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collection_query_info.h" #include "mongo/db/query/cqf_command_utils.h" #include "mongo/db/query/cqf_get_executor.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/explain.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/find_common.h" -#include "mongo/db/query/get_executor.h" +#include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_explainer.h" #include "mongo/db/query/plan_summary_stats.h" -#include "mongo/db/query/query_feature_flags_gen.h" #include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/query/query_planner_common.h" -#include "mongo/db/query/telemetry.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/query_stats.h" +#include "mongo/db/query/query_stats_aggregate_key_generator.h" +#include "mongo/db/query/query_stats_key_generator.h" #include "mongo/db/read_concern.h" -#include "mongo/db/repl/oplog.h" +#include "mongo/db/read_write_concern_provenance.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/repl/speculative_majority_read_info.h" -#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/query_analysis_writer.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/db/service_context.h" +#include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" #include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/db/stats/top.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_options.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/views/resolved_view.h" #include "mongo/db/views/view.h" #include "mongo/db/views/view_catalog_helpers.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/query_analysis_sampler_util.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" #include "mongo/util/string_map.h" +#include "mongo/util/synchronized_value.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -154,11 +217,13 @@ bool handleCursorCommand(OperationContext* opCtx, invariant(cursors[idx]); BSONObjBuilder cursorResult; - appendCursorResponseObject(cursors[idx]->cursorid(), - nsForCursor, - BSONArray(), - cursors[idx]->getExecutor()->getExecutorType(), - &cursorResult); + appendCursorResponseObject( + cursors[idx]->cursorid(), + nsForCursor, + BSONArray(), + cursors[idx]->getExecutor()->getExecutorType(), + &cursorResult, + SerializationContext::stateCommandReply(request.getSerializationContext())); cursorResult.appendBool("ok", 1); cursorsBuilder.append(cursorResult.obj()); @@ -226,7 +291,6 @@ bool handleCursorCommand(OperationContext* opCtx, auto&& [stats, _] = explainer.getWinningPlanStats(ExplainOptions::Verbosity::kExecStats); LOGV2_WARNING(23799, - "Aggregate command executor error: {error}, stats: {stats}, cmd: {cmd}", "Aggregate command executor error", "error"_attr = exception.toStatus(), "stats"_attr = redact(stats), @@ -289,7 +353,10 @@ bool handleCursorCommand(OperationContext* opCtx, } const CursorId cursorId = cursor ? cursor->cursorid() : 0LL; - responseBuilder.done(cursorId, nsForCursor); + responseBuilder.done( + cursorId, + nsForCursor, + SerializationContext::stateCommandReply(request.getSerializationContext())); auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx); metricsCollector.incrementDocUnitsReturned(curOp->getNS(), docUnitsReturned); @@ -331,8 +398,9 @@ StatusWith> resolveInvolvedNames auto resolveViewDefinition = [&](const NamespaceString& ns) -> Status { auto resolvedView = view_catalog_helpers::resolveView(opCtx, catalog, ns, boost::none); if (!resolvedView.isOK()) { - return resolvedView.getStatus().withContext( - str::stream() << "Failed to resolve view '" << involvedNs.ns()); + return resolvedView.getStatus().withContext(str::stream() + << "Failed to resolve view '" + << involvedNs.toStringForErrorMsg()); } auto&& underlyingNs = resolvedView.getValue().getNamespace(); @@ -426,7 +494,7 @@ Status collatorCompatibleWithPipeline(OperationContext* opCtx, if (!CollatorInterface::collatorsMatch(view->defaultCollator(), collator)) { return {ErrorCodes::OptionNotSupportedOnView, str::stream() << "Cannot override a view's default collation" - << potentialViewNs.ns()}; + << potentialViewNs.toStringForErrorMsg()}; } } return Status::OK(); @@ -450,18 +518,6 @@ boost::intrusive_ptr makeExpressionContext( allowDiskUseByDefault.load()); expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp"; expCtx->collationMatchesDefault = collationMatchesDefault; - - // If the request explicitly specified NOT to use v2 resume tokens for change streams, set this - // on the expCtx. This can happen if a the request originated from 6.0 mongos, or in test mode. - if (request.getGenerateV2ResumeTokens().has_value()) { - // We only ever expect an explicit $_generateV2ResumeTokens to be false. - uassert(6528200, "Invalid request for v2 tokens", !request.getGenerateV2ResumeTokens()); - expCtx->changeStreamTokenVersion = 1; - } - - // Set the value of $$USER_ROLES for the aggregation. - expCtx->setUserRoles(); - return expCtx; } @@ -656,6 +712,95 @@ std::vector> createLegacyEx return execs; } +Status runAggregateOnView(OperationContext* opCtx, + const NamespaceString& origNss, + const AggregateCommandRequest& request, + const MultipleCollectionAccessor& collections, + boost::optional> collatorToUse, + const ViewDefinition* view, + const boost::intrusive_ptr& expCtx, + std::shared_ptr catalog, + const PrivilegeVector& privileges, + CurOp* curOp, + rpc::ReplyBuilderInterface* result, + const std::function& resetContextFn) { + auto nss = request.getNamespace(); + checkCollectionUUIDMismatch( + opCtx, nss, collections.getMainCollection(), request.getCollectionUUID()); + + uassert(ErrorCodes::CommandNotSupportedOnView, + "mapReduce on a view is not supported", + !request.getIsMapReduceCommand()); + + // Check that the default collation of 'view' is compatible with the operation's + // collation. The check is skipped if the request did not specify a collation. + if (!request.getCollation().get_value_or(BSONObj()).isEmpty()) { + invariant(collatorToUse); // Should already be resolved at this point. + if (!CollatorInterface::collatorsMatch(view->defaultCollator(), collatorToUse->get()) && + !view->timeseries()) { + + return {ErrorCodes::OptionNotSupportedOnView, + "Cannot override a view's default collation"}; + } + } + + // Queries on timeseries views may specify non-default collation whereas queries + // on all other types of views must match the default collator (the collation use + // to originally create that collections). Thus in the case of operations on TS + // views, we use the request's collation. + auto timeSeriesCollator = view->timeseries() ? request.getCollation() : boost::none; + + auto resolvedView = + uassertStatusOK(view_catalog_helpers::resolveView(opCtx, catalog, nss, timeSeriesCollator)); + + // With the view & collation resolved, we can relinquish locks. + resetContextFn(); + + // Set this operation's shard version for the underlying collection to unsharded. + // This is prerequisite for future shard versioning checks. + boost::optional scopeSetShardRole; + if (!serverGlobalParams.clusterRole.has(ClusterRole::None)) { + scopeSetShardRole.emplace(opCtx, + resolvedView.getNamespace(), + ShardVersion::UNSHARDED() /* shardVersion */, + boost::none /* databaseVersion */); + }; + uassert(std::move(resolvedView), + "Explain of a resolved view must be executed by mongos", + !ShardingState::get(opCtx)->enabled() || !request.getExplain()); + + // Parse the resolved view into a new aggregation request. + auto newRequest = resolvedView.asExpandedViewAggregation(request); + auto newCmd = aggregation_request_helper::serializeToCommandObj(newRequest); + + auto status{Status::OK()}; + try { + status = runAggregate(opCtx, origNss, newRequest, newCmd, privileges, result); + } catch (const ExceptionForCat& ex) { + // Since we expect the view to be UNSHARDED, if we reached to this point there are + // two possibilities: + // 1. The shard doesn't know what its shard version/state is and needs to recover + // it (in which case we throw so that the shard can run recovery) + // 2. The collection references by the view is actually SHARDED, in which case the + // router must execute it + if (const auto staleInfo{ex.extraInfo()}) { + uassert(std::move(resolvedView), + "Resolved views on sharded collections must be executed by mongos", + !staleInfo->getVersionWanted()); + } + throw; + } + + { + // Set the namespace of the curop back to the view namespace so ctx records + // stats on this view namespace on destruction. + stdx::lock_guard lk(*opCtx->getClient()); + curOp->setNS_inlock(nss); + } + + return status; +} + } // namespace Status runAggregate(OperationContext* opCtx, @@ -695,7 +840,10 @@ Status runAggregate(OperationContext* opCtx, options.isInitialResponse = true; CursorResponseBuilder responseBuilder(result, options); responseBuilder.setWasStatementExecuted(true); - responseBuilder.done(0LL, origNss); + responseBuilder.done( + 0LL, + origNss, + SerializationContext::stateCommandReply(request.getSerializationContext())); return Status::OK(); } } @@ -756,15 +904,6 @@ Status runAggregate(OperationContext* opCtx, collections.clear(); }; - auto registerTelemetry = [&]() -> void { - // Register telemetry. Exclude queries against collections with encrypted fields. - // We still collect telemetry on collection-less aggregations. - if (!(ctx && ctx->getCollection() && - ctx->getCollection()->getCollectionOptions().encryptedFieldConfig)) { - telemetry::registerAggRequest(request, opCtx); - } - }; - std::vector> execs; boost::intrusive_ptr expCtx; auto curOp = CurOp::get(opCtx); @@ -795,28 +934,23 @@ Status runAggregate(OperationContext* opCtx, nss = NamespaceString::kRsOplogNamespace; // In case of serverless the change stream will be opened on the change collection. - if (change_stream_serverless_helpers::isChangeCollectionsModeActive()) { + const bool isServerless = change_stream_serverless_helpers::isServerlessEnvironment(); + if (isServerless) { const auto tenantId = change_stream_serverless_helpers::resolveTenantId(origNss.tenantId()); uassert(ErrorCodes::BadValue, "Change streams cannot be used without tenant id", tenantId); - - uassert(ErrorCodes::ChangeStreamNotEnabled, - "Change streams must be enabled before being used.", - change_stream_serverless_helpers::isChangeStreamEnabled(opCtx, *tenantId)); - - nss = NamespaceString::makeChangeCollectionNSS(tenantId); } // Assert that a change stream on the config server is always opened on the oplog. - tassert( - 6763400, - str::stream() << "Change stream was unexpectedly opened on the namespace: " << nss - << " in the config server", - !serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) || nss.isOplog()); + tassert(6763400, + str::stream() << "Change stream was unexpectedly opened on the namespace: " + << nss.toStringForErrorMsg() << " in the config server", + !serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) || + nss.isOplog()); // Upgrade and wait for read concern if necessary. _adjustChangeStreamReadConcern(opCtx); @@ -827,25 +961,29 @@ Status runAggregate(OperationContext* opCtx, auto view = catalog->lookupView(opCtx, origNss); uassert(ErrorCodes::CommandNotSupportedOnView, str::stream() << "Cannot run aggregation on timeseries with namespace " - << origNss.ns(), + << origNss.toStringForErrorMsg(), !view || !view->timeseries()); uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() - << "Namespace " << origNss.ns() << " is a view, not a collection", + str::stream() << "Namespace " << origNss.toStringForErrorMsg() + << " is a view, not a collection", !view); } // If the user specified an explicit collation, adopt it; otherwise, use the simple // collation. We do not inherit the collection's default collation or UUID, since // the stream may be resuming from a point before the current UUID existed. - auto [collator, match] = PipelineD::resolveCollator( + auto [collator, match] = resolveCollator( opCtx, request.getCollation().get_value_or(BSONObj()), CollectionPtr()); collatorToUse.emplace(std::move(collator)); collatorToUseMatchesDefault = match; // Obtain collection locks on the execution namespace; that is, the oplog. initContext(auto_get_collection::ViewMode::kViewsForbidden); - registerTelemetry(); + uassert(ErrorCodes::ChangeStreamNotEnabled, + "Change streams must be enabled before being used", + !isServerless || + change_stream_serverless_helpers::isChangeStreamEnabled(opCtx, + *nss.tenantId())); } else if (nss.isCollectionlessAggregateNS() && pipelineInvolvedNamespaces.empty()) { uassert(4928901, str::stream() << AggregateCommandRequest::kCollectionUUIDFieldName @@ -858,114 +996,115 @@ Status runAggregate(OperationContext* opCtx, Top::LockType::NotLocked, AutoStatsTracker::LogMode::kUpdateTopAndCurOp, 0); - auto [collator, match] = PipelineD::resolveCollator( + auto [collator, match] = resolveCollator( opCtx, request.getCollation().get_value_or(BSONObj()), CollectionPtr()); collatorToUse.emplace(std::move(collator)); collatorToUseMatchesDefault = match; tassert(6235101, "A collection-less aggregate should not take any locks", ctx == boost::none); - registerTelemetry(); } else { // This is a regular aggregation. Lock the collection or view. initContext(auto_get_collection::ViewMode::kViewsPermitted); - registerTelemetry(); - auto [collator, match] = - PipelineD::resolveCollator(opCtx, - request.getCollation().get_value_or(BSONObj()), - collections.getMainCollection()); + auto [collator, match] = resolveCollator(opCtx, + request.getCollation().get_value_or(BSONObj()), + collections.getMainCollection()); collatorToUse.emplace(std::move(collator)); collatorToUseMatchesDefault = match; if (collections.hasMainCollection()) { uuid = collections.getMainCollection()->uuid(); } } + if (request.getResumeAfter()) { + uassert(ErrorCodes::InvalidPipelineOperator, + "$_resumeAfter is not supported on view", + !ctx->getView()); + const auto& collection = ctx->getCollection(); + const bool isClusteredCollection = collection && collection->isClustered(); + uassertStatusOK(query_request_helper::validateResumeAfter(*request.getResumeAfter(), + isClusteredCollection)); + } + + auto parsePipeline = [&](std::unique_ptr collator) { + expCtx = + makeExpressionContext(opCtx, + request, + std::move(collator), + uuid, + collatorToUseMatchesDefault, + collections.getMainCollection() + ? collections.getMainCollection()->getCollectionOptions() + : boost::optional(boost::none)); + + // If any involved collection contains extended-range data, set a flag which individual + // DocumentSource parsers can check. + collections.forEach([&](const CollectionPtr& coll) { + if (coll->getRequiresTimeseriesExtendedRangeSupport()) + expCtx->setRequiresTimeseriesExtendedRangeSupport(true); + }); + + expCtx->startExpressionCounters(); + auto pipeline = Pipeline::parse(request.getPipeline(), expCtx); + curOp->beginQueryPlanningTimer(); + expCtx->stopExpressionCounters(); + + return std::make_pair(expCtx, std::move(pipeline)); + }; // If this is a view, resolve it by finding the underlying collection and stitching view // pipelines and this request's pipeline together. We then release our locks before // recursively calling runAggregate(), which will re-acquire locks on the underlying // collection. (The lock must be released because recursively acquiring locks on the // database will prohibit yielding.) - if (ctx && ctx->getView() && !liteParsedPipeline.startsWithCollStats()) { - invariant(nss != NamespaceString::kRsOplogNamespace); - invariant(!nss.isCollectionlessAggregateNS()); - - checkCollectionUUIDMismatch( - opCtx, nss, collections.getMainCollection(), request.getCollectionUUID()); - - uassert(ErrorCodes::CommandNotSupportedOnView, - "mapReduce on a view is not supported", - !request.getIsMapReduceCommand()); - - // Check that the default collation of 'view' is compatible with the operation's - // collation. The check is skipped if the request did not specify a collation. - if (!request.getCollation().get_value_or(BSONObj()).isEmpty()) { - invariant(collatorToUse); // Should already be resolved at this point. - if (!CollatorInterface::collatorsMatch(ctx->getView()->defaultCollator(), - collatorToUse->get()) && - !ctx->getView()->timeseries()) { - - return {ErrorCodes::OptionNotSupportedOnView, - "Cannot override a view's default collation"}; - } - } - - // Queries on timeseries views may specify non-default collation whereas queries - // on all other types of views must match the default collator (the collation use - // to originally create that collections). Thus in the case of operations on TS - // views, we use the request's collation. - auto timeSeriesCollator = - ctx->getView()->timeseries() ? request.getCollation() : boost::none; - - auto resolvedView = uassertStatusOK( - view_catalog_helpers::resolveView(opCtx, catalog, nss, timeSeriesCollator)); - - // With the view & collation resolved, we can relinquish locks. - resetContext(); - - // Set this operation's shard version for the underlying collection to unsharded. - // This is prerequisite for future shard versioning checks. - boost::optional scopeSetShardRole; - if (!serverGlobalParams.clusterRole.has(ClusterRole::None)) { - scopeSetShardRole.emplace(opCtx, - resolvedView.getNamespace(), - ShardVersion::UNSHARDED() /* shardVersion */, - boost::none /* databaseVersion */); - }; - uassert(std::move(resolvedView), - "Explain of a resolved view must be executed by mongos", - !ShardingState::get(opCtx)->enabled() || !request.getExplain()); - - // Parse the resolved view into a new aggregation request. - auto newRequest = resolvedView.asExpandedViewAggregation(request); - auto newCmd = aggregation_request_helper::serializeToCommandObj(newRequest); - - auto status{Status::OK()}; + // We do not need to expand the view pipeline when there is a $collStats stage, as + // $collStats is supported on a view namespace. For a time-series collection, however, the + // view is abstracted out for the users, so we needed to resolve the namespace to get the + // underlying bucket collection. + if (ctx && ctx->getView() && + (!liteParsedPipeline.startsWithCollStats() || ctx->getView()->timeseries())) { try { - status = runAggregate(opCtx, origNss, newRequest, newCmd, privileges, result); - } catch (const ExceptionForCat& ex) { - // Since we expect the view to be UNSHARDED, if we reached to this point there are - // two possibilities: - // 1. The shard doesn't know what its shard version/state is and needs to recover - // it (in which case we throw so that the shard can run recovery) - // 2. The collection references by the view is actually SHARDED, in which case the - // router must execute it - if (const auto staleInfo{ex.extraInfo()}) { - uassert(std::move(resolvedView), - "Resolved views on sharded collections must be executed by mongos", - !staleInfo->getVersionWanted()); + invariant(collatorToUse.has_value()); + query_stats::registerRequest(opCtx, nss, [&]() { + // In this path we haven't yet parsed the pipeline, but we need to do so for + // query shape stats - which should track the queries before views are resolved. + // Inside this callback we know we have already checked that query stats are + // enabled and know that this request has not been rate limited. + + // We can't move out of collatorToUse as it's needed for runAggregateOnView(). + // Clone instead. + auto&& [expCtx, pipeline] = parsePipeline( + *collatorToUse == nullptr ? nullptr : (*collatorToUse)->clone()); + + return std::make_unique( + request, + *pipeline, + expCtx, + pipelineInvolvedNamespaces, + origNss, + ctx->getCollectionType()); + }); + } catch (const DBException& ex) { + if (ex.code() == 6347902) { + // TODO Handle the $$SEARCH_META case in SERVER-76087. + LOGV2_WARNING(7198701, + "Failed to parse pipeline before view resolution", + "error"_attr = ex.toStatus()); + } else { + throw; } - throw; - } - - { - // Set the namespace of the curop back to the view namespace so ctx records - // stats on this view namespace on destruction. - stdx::lock_guard lk(*opCtx->getClient()); - curOp->setNS_inlock(nss); } - - return status; + return runAggregateOnView(opCtx, + origNss, + request, + collections, + std::move(collatorToUse), + ctx->getView(), + expCtx, + catalog, + privileges, + curOp, + result, + resetContext); } // If collectionUUID was provided, verify the collection exists and has the expected UUID. @@ -973,26 +1112,40 @@ Status runAggregate(OperationContext* opCtx, opCtx, nss, collections.getMainCollection(), request.getCollectionUUID()); invariant(collatorToUse); - expCtx = makeExpressionContext(opCtx, - request, - std::move(*collatorToUse), - uuid, - collatorToUseMatchesDefault, - collections.getMainCollection() - ? collections.getMainCollection()->getCollectionOptions() - : boost::optional(boost::none)); - - // If any involved collection contains extended-range data, set a flag which individual - // DocumentSource parsers can check. - collections.forEach([&](const CollectionPtr& coll) { - if (coll->getRequiresTimeseriesExtendedRangeSupport()) - expCtx->setRequiresTimeseriesExtendedRangeSupport(true); - }); - - expCtx->startExpressionCounters(); - auto pipeline = Pipeline::parse(request.getPipeline(), expCtx); - curOp->beginQueryPlanningTimer(); - expCtx->stopExpressionCounters(); + auto&& expCtxAndPipeline = parsePipeline(std::move(*collatorToUse)); + auto expCtx = expCtxAndPipeline.first; + auto pipeline = std::move(expCtxAndPipeline.second); + + // This prevents opening a new change stream in the critical section of a serverless shard + // split or merge operation to prevent resuming on the recipient with a resume token higher + // than that operation's blockTimestamp. + // + // If we do this check before picking a startTime for a change stream then the primary could + // go into a blocking state between the check and getting the timestamp resulting in a + // startTime greater than blockTimestamp. Therefore we must do this check here, after the + // pipeline has been parsed and startTime has been initialized. + if (liteParsedPipeline.hasChangeStream()) { + tenant_migration_access_blocker::assertCanOpenChangeStream(expCtx->opCtx, nss.dbName()); + } + + // After parsing to detect if $$USER_ROLES is referenced in the query, set the value of + // $$USER_ROLES for the aggregation. + expCtx->setUserRoles(); + + // Register query stats with the pre-optimized pipeline. Exclude queries against collections + // with encrypted fields. We still collect query stats on collection-less aggregations. + if (!(ctx && ctx->getCollection() && + ctx->getCollection()->getCollectionOptions().encryptedFieldConfig)) { + query_stats::registerRequest(opCtx, nss, [&]() { + return std::make_unique( + request, + *pipeline, + expCtx, + pipelineInvolvedNamespaces, + nss, + ctx ? boost::make_optional(ctx->getCollectionType()) : boost::none); + }); + } if (!request.getAllowDiskUse().value_or(true)) { allowDiskUseFalseCounter.increment(); @@ -1018,10 +1171,6 @@ Status runAggregate(OperationContext* opCtx, opCtx, nss, request.getEncryptionInformation().value(), std::move(pipeline)); request.getEncryptionInformation()->setCrudProcessed(true); } - - // Set the telemetryStoreKey to none so telemetry isn't collected when we've done a FLE - // rewrite. - CurOp::get(opCtx)->debug().telemetryStoreKey = boost::none; } pipeline->optimizePipeline(); @@ -1169,12 +1318,14 @@ Status runAggregate(OperationContext* opCtx, // appropriate collection lock must be already held. Make sure it has not been released // yet. invariant(ctx); - Explain::explainStages(explainExecutor, - collections, - *(expCtx->explain), - BSON("optimizedPipeline" << true), - cmdObj, - &bodyBuilder); + Explain::explainStages( + explainExecutor, + collections, + *(expCtx->explain), + BSON("optimizedPipeline" << true), + SerializationContext::stateCommandReply(request.getSerializationContext()), + cmdObj, + &bodyBuilder); } } else { // Cursor must be specified, if explain is not. @@ -1188,12 +1339,9 @@ Status runAggregate(OperationContext* opCtx, PlanSummaryStats stats; planExplainer.getSummaryStats(&stats); curOp->debug().setPlanSummaryMetrics(stats); + curOp->setEndOfOpMetrics(stats.nReturned); - if (keepCursor) { - collectTelemetryMongod(opCtx, pins[0], stats.nReturned); - } else { - collectTelemetryMongod(opCtx, cmdObj, stats.nReturned); - } + collectQueryStatsMongod(opCtx, pins[0]); // For an optimized away pipeline, signal the cache that a query operation has completed. // For normal pipelines this is done in DocumentSourceCursor. @@ -1214,8 +1362,7 @@ Status runAggregate(OperationContext* opCtx, for (const auto& [secondaryNss, coll] : collections.getSecondaryCollections()) { if (coll) { PlanSummaryStats secondaryStats; - planExplainer.getSecondarySummaryStats(secondaryNss.toString(), - &secondaryStats); + planExplainer.getSecondarySummaryStats(secondaryNss, &secondaryStats); CollectionQueryInfo::get(coll).notifyOfQuery(opCtx, coll, secondaryStats); } } diff --git a/src/mongo/db/commands/run_aggregate.h b/src/mongo/db/commands/run_aggregate.h index 30b7afd002af5..51d6dfb055bd9 100644 --- a/src/mongo/db/commands/run_aggregate.h +++ b/src/mongo/db/commands/run_aggregate.h @@ -29,15 +29,18 @@ #pragma once +#include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/privilege.h" #include "mongo/db/catalog/external_data_source_scope_guard.h" +#include "mongo/db/commands/server_status_metric.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" #include "mongo/rpc/op_msg_rpc_impls.h" +#include "mongo/rpc/reply_builder_interface.h" namespace mongo { diff --git a/src/mongo/db/commands/rwc_defaults_commands.cpp b/src/mongo/db/commands/rwc_defaults_commands.cpp index d29ffbc51908e..5b661ad2865c8 100644 --- a/src/mongo/db/commands/rwc_defaults_commands.cpp +++ b/src/mongo/db/commands/rwc_defaults_commands.cpp @@ -28,20 +28,46 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" #include "mongo/db/commands/rwc_defaults_commands_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/ops/write_ops.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/read_write_concern_defaults.h" -#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/read_write_concern_defaults_gen.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -162,8 +188,9 @@ class SetDefaultRWConcernCommand : public TypedCommandgetClient()) - ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), - ActionType::setDefaultRWConcern})); + ->isAuthorizedForPrivilege(Privilege{ + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::setDefaultRWConcern})); } NamespaceString ns() const override { @@ -214,8 +241,9 @@ class GetDefaultRWConcernCommand : public TypedCommandgetClient()) - ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), - ActionType::getDefaultRWConcern})); + ->isAuthorizedForPrivilege(Privilege{ + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::getDefaultRWConcern})); } NamespaceString ns() const override { diff --git a/src/mongo/db/commands/server_status.cpp b/src/mongo/db/commands/server_status.cpp index 1fab501c0a533..41243d5987481 100644 --- a/src/mongo/db/commands/server_status.cpp +++ b/src/mongo/db/commands/server_status.cpp @@ -29,8 +29,10 @@ #include "mongo/db/commands/server_status.h" -#include "mongo/db/service_context.h" -#include "mongo/util/version.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -46,7 +48,7 @@ ServerStatusSectionRegistry* ServerStatusSectionRegistry::get() { void ServerStatusSectionRegistry::addSection(ServerStatusSection* section) { // Disallow adding a section named "timing" as it is reserved for the server status command. dassert(section->getSectionName() != kTimingSection); - verify(!_runCalled.load()); + MONGO_verify(!_runCalled.load()); _sections[section->getSectionName()] = section; } diff --git a/src/mongo/db/commands/server_status.h b/src/mongo/db/commands/server_status.h index 1d10d80596849..27c37fe0278c2 100644 --- a/src/mongo/db/commands/server_status.h +++ b/src/mongo/db/commands/server_status.h @@ -29,11 +29,18 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" #include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" #include "mongo/db/stats/counters.h" #include "mongo/platform/atomic_word.h" -#include namespace mongo { diff --git a/src/mongo/db/commands/server_status_command.cpp b/src/mongo/db/commands/server_status_command.cpp index 521e4a4901047..8cf5962d77fa4 100644 --- a/src/mongo/db/commands/server_status_command.cpp +++ b/src/mongo/db/commands/server_status_command.cpp @@ -28,14 +28,48 @@ */ +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" #include "mongo/db/stats/counters.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/process_id.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/duration.h" #include "mongo/util/net/http_client.h" #include "mongo/util/net/socket_utils.h" +#include "mongo/util/processinfo.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #include "mongo/util/version.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -67,11 +101,11 @@ class CmdServerStatus : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::serverStatus)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::serverStatus)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/db/commands/server_status_metric.cpp b/src/mongo/db/commands/server_status_metric.cpp index 0f3a684c80743..eeef22339d510 100644 --- a/src/mongo/db/commands/server_status_metric.cpp +++ b/src/mongo/db/commands/server_status_metric.cpp @@ -29,13 +29,22 @@ #include "mongo/db/commands/server_status_metric.h" +#include #include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsontypes.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #include "mongo/util/static_immortal.h" -#include "mongo/util/str.h" #include "mongo/util/synchronized_value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/db/commands/server_status_metric.h b/src/mongo/db/commands/server_status_metric.h index 88eada1afca26..856f9d8893ce5 100644 --- a/src/mongo/db/commands/server_status_metric.h +++ b/src/mongo/db/commands/server_status_metric.h @@ -29,12 +29,19 @@ #pragma once +#include +#include #include #include #include #include +#include +#include "mongo/base/counter.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/jsobj.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/synchronized_value.h" namespace mongo { diff --git a/src/mongo/db/commands/server_status_metric_test.cpp b/src/mongo/db/commands/server_status_metric_test.cpp index 6de4042226b98..cde7ceb5d2be9 100644 --- a/src/mongo/db/commands/server_status_metric_test.cpp +++ b/src/mongo/db/commands/server_status_metric_test.cpp @@ -29,10 +29,13 @@ #include "mongo/db/commands/server_status_metric.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/unittest/unittest.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/commands/server_status_servers.cpp b/src/mongo/db/commands/server_status_servers.cpp index d5dd31ca34790..13165ac377739 100644 --- a/src/mongo/db/commands/server_status_servers.cpp +++ b/src/mongo/db/commands/server_status_servers.cpp @@ -27,16 +27,31 @@ * it in the license file. */ -#include "mongo/config.h" +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/stats/counters.h" +#include "mongo/platform/atomic_word.h" #include "mongo/transport/message_compressor_registry.h" #include "mongo/transport/service_entry_point.h" #include "mongo/transport/service_executor_fixed.h" #include "mongo/transport/service_executor_reserved.h" #include "mongo/transport/service_executor_synchronous.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/net/hostname_canonicalization.h" #include "mongo/util/net/socket_utils.h" #include "mongo/util/net/ssl_manager.h" +#include "mongo/util/net/ssl_types.h" namespace mongo { namespace { diff --git a/src/mongo/db/commands/set_cluster_parameter_command.cpp b/src/mongo/db/commands/set_cluster_parameter_command.cpp index f44f546d143ab..452bb2b03ad26 100644 --- a/src/mongo/db/commands/set_cluster_parameter_command.cpp +++ b/src/mongo/db/commands/set_cluster_parameter_command.cpp @@ -28,17 +28,36 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" #include "mongo/db/commands/cluster_server_parameter_cmds_gen.h" -#include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/commands/set_cluster_parameter_invocation.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/server_feature_flags_gen.h" -#include "mongo/idl/cluster_server_parameter_gen.h" -#include "mongo/logv2/log.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -112,8 +131,9 @@ class SetClusterParameterCommand final : public TypedCommandgetClient()) - ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), - ActionType::setClusterParameter})); + ->isAuthorizedForPrivilege(Privilege{ + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::setClusterParameter})); } }; } setClusterParameterCommand; diff --git a/src/mongo/db/commands/set_cluster_parameter_invocation.cpp b/src/mongo/db/commands/set_cluster_parameter_invocation.cpp index ddc6a21fccc32..4c6a415ab6168 100644 --- a/src/mongo/db/commands/set_cluster_parameter_invocation.cpp +++ b/src/mongo/db/commands/set_cluster_parameter_invocation.cpp @@ -28,17 +28,36 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/commands/set_cluster_parameter_invocation.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/audit.h" -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/commands.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/commands/set_cluster_parameter_invocation.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/server_options.h" #include "mongo/db/vector_clock.h" -#include "mongo/idl/cluster_server_parameter_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -48,21 +67,22 @@ namespace mongo { bool SetClusterParameterInvocation::invoke(OperationContext* opCtx, const SetClusterParameter& cmd, boost::optional paramTime, - const WriteConcernOptions& writeConcern) { + const WriteConcernOptions& writeConcern, + bool skipValidation) { BSONObj cmdParamObj = cmd.getCommandParameter(); StringData parameterName = cmdParamObj.firstElement().fieldName(); ServerParameter* serverParameter = _sps->get(parameterName); auto tenantId = cmd.getDbName().tenantId(); - auto [query, update] = - normalizeParameter(opCtx, - cmdParamObj, - paramTime, - serverParameter, - parameterName, - tenantId, - serverGlobalParams.clusterRole.exclusivelyHasShardRole()); + auto [query, update] = normalizeParameter( + opCtx, + cmdParamObj, + paramTime, + serverParameter, + parameterName, + tenantId, + skipValidation || serverGlobalParams.clusterRole.hasExclusively(ClusterRole::ShardServer)); BSONObjBuilder oldValueBob; serverParameter->append(opCtx, &oldValueBob, parameterName.toString(), tenantId); diff --git a/src/mongo/db/commands/set_cluster_parameter_invocation.h b/src/mongo/db/commands/set_cluster_parameter_invocation.h index d44de4274b296..a46aa5a40c442 100644 --- a/src/mongo/db/commands/set_cluster_parameter_invocation.h +++ b/src/mongo/db/commands/set_cluster_parameter_invocation.h @@ -29,9 +29,21 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/commands/cluster_server_parameter_cmds_gen.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/write_concern_options.h" namespace mongo { @@ -80,7 +92,8 @@ class SetClusterParameterInvocation { bool invoke(OperationContext*, const SetClusterParameter&, boost::optional, - const WriteConcernOptions&); + const WriteConcernOptions&, + bool skipValidation = false); // Validate new parameter passed to setClusterParameter and generate the query and update fields // for the on-disk update. diff --git a/src/mongo/db/commands/set_cluster_parameter_invocation_test.cpp b/src/mongo/db/commands/set_cluster_parameter_invocation_test.cpp index 2260b5442f0c7..81f2d16b5d49c 100644 --- a/src/mongo/db/commands/set_cluster_parameter_invocation_test.cpp +++ b/src/mongo/db/commands/set_cluster_parameter_invocation_test.cpp @@ -27,18 +27,28 @@ * it in the license file. */ -#include "mongo/db/operation_context.h" -#include "mongo/db/operation_id.h" -#include "mongo/db/service_context.h" -#include "mongo/platform/basic.h" +#include +#include #include +#include -#include "mongo/db/dbdirectclient.h" -#include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/client.h" #include "mongo/db/commands/set_cluster_parameter_invocation.h" -#include "mongo/idl/idl_parser.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp index 8905e0993676d..395c4e971f77e 100644 --- a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp +++ b/src/mongo/db/commands/set_feature_compatibility_version_command.cpp @@ -28,78 +28,114 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include #include - -#include "mongo/crypto/fle_crypto.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/audit.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/coll_mod.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog_helper.h" -#include "mongo/db/catalog/create_collection.h" -#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/drop_collection.h" #include "mongo/db/catalog/drop_indexes.h" -#include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/catalog/index_catalog_impl.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/catalog_shard_feature_flag_gen.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/coll_mod_gen.h" #include "mongo/db/commands.h" #include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/commands/set_feature_compatibility_version_gen.h" #include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/drop_gen.h" #include "mongo/db/feature_compatibility_version_documentation.h" -#include "mongo/db/feature_compatibility_version_parser.h" -#include "mongo/db/global_settings.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/shard_merge_recipient_service.h" #include "mongo/db/repl/tenant_migration_donor_service.h" #include "mongo/db/repl/tenant_migration_recipient_service.h" #include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/db/s/migration_coordinator_document_gen.h" -#include "mongo/db/s/range_deletion_util.h" #include "mongo/db/s/resharding/coordinator_document_gen.h" #include "mongo/db/s/resharding/resharding_coordinator_service.h" -#include "mongo/db/s/resharding/resharding_donor_recipient_common.h" #include "mongo/db/s/shard_authoritative_catalog_gen.h" -#include "mongo/db/s/sharding_cluster_parameters_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" #include "mongo/db/s/sharding_ddl_coordinator_service.h" #include "mongo/db/s/sharding_index_catalog_ddl_util.h" -#include "mongo/db/s/sharding_state.h" #include "mongo/db/s/sharding_util.h" -#include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/server_feature_flags_gen.h" #include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" #include "mongo/db/serverless/shard_split_donor_service.h" -#include "mongo/db/session/session_catalog.h" -#include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/db/storage/storage_parameters_gen.h" -#include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" #include "mongo/db/vector_clock.h" -#include "mongo/idl/cluster_server_parameter_gen.h" +#include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" #include "mongo/s/catalog/type_config_version.h" -#include "mongo/s/catalog/type_index_catalog.h" -#include "mongo/s/grid.h" -#include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/s/resharding/resharding_feature_flag_gen.h" #include "mongo/s/sharding_feature_flags_gen.h" -#include "mongo/stdx/unordered_set.h" -#include "mongo/util/exit.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" #include "mongo/util/scopeguard.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" #include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -126,6 +162,7 @@ MONGO_FAIL_POINT_DEFINE(hangDowngradingBeforeIsCleaningServerMetadata); MONGO_FAIL_POINT_DEFINE(failAfterReachingTransitioningState); MONGO_FAIL_POINT_DEFINE(hangAtSetFCVStart); MONGO_FAIL_POINT_DEFINE(failAfterSendingShardsToDowngradingOrUpgrading); +MONGO_FAIL_POINT_DEFINE(hangAfterBlockingIndexBuildsForFcvDowngrade); /** * Ensures that only one instance of setFeatureCompatibilityVersion can run at a given time. @@ -161,7 +198,7 @@ void abortAllReshardCollection(OperationContext* opCtx) { std::vector nsWithReshardColl; store.forEach(opCtx, {}, [&](const ReshardingCoordinatorDocument& doc) { - nsWithReshardColl.push_back(doc.getSourceNss().ns()); + nsWithReshardColl.push_back(doc.getSourceNss().ns().toString()); return true; }); @@ -193,11 +230,30 @@ void dropDistLockCollections(OperationContext* opCtx) { if (dropStatus != ErrorCodes::NamespaceNotFound) { uassertStatusOKWithContext( dropStatus, - str::stream() << "Failed to drop deprecated distributed locks collection " << nss); + str::stream() << "Failed to drop deprecated distributed locks collection " + << nss.toStringForErrorMsg()); } } } +// TODO SERVER-78330 remove this. +void deleteShardingStateRecoveryDoc(OperationContext* opCtx) { + DBDirectClient client(opCtx); + const auto commandResponse = client.runCommand([&] { + write_ops::DeleteCommandRequest deleteOp(NamespaceString::kServerConfigurationNamespace); + deleteOp.setDeletes( + {[&] { + write_ops::DeleteOpEntry entry; + entry.setQ(BSON("_id" + << "minOpTimeRecovery")); + entry.setMulti(false); + return entry; + }()}); + return deleteOp.serialize({}); + }()); + uassertStatusOK(getStatusFromWriteCommandReply(commandResponse->getCommandReply())); +} + void uassertStatusOKIgnoreNSNotFound(Status status) { if (status.isOK() || status == ErrorCodes::NamespaceNotFound) { return; @@ -253,11 +309,12 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::setFeatureCompatibilityVersion)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::setFeatureCompatibilityVersion)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } @@ -329,39 +386,17 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { // the FCV but encountered failover afterwards. repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); - // TODO SERVER-72796: Remove once gGlobalIndexesShardingCatalog is enabled. - if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && - feature_flags::gGlobalIndexesShardingCatalog.isEnabledOnVersion(requestedVersion)) { - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForCoordinatorsOfGivenTypeToComplete( - opCtx, DDLCoordinatorTypeEnum::kRenameCollectionPre63Compatible); - } - // TODO SERVER-73627: Remove once 7.0 becomes last LTS. - if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && - feature_flags::gDropCollectionHoldingCriticalSection.isEnabledOnVersion( - requestedVersion)) { - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForCoordinatorsOfGivenTypeToComplete( - opCtx, DDLCoordinatorTypeEnum::kDropCollectionPre70Compatible); - - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForCoordinatorsOfGivenTypeToComplete( - opCtx, DDLCoordinatorTypeEnum::kDropDatabasePre70Compatible); - } - - // TODO SERVER-68373: Remove once 7.0 becomes last LTS - if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && - mongo::gFeatureFlagFLE2CompactForProtocolV2.isEnabledOnVersion(requestedVersion)) { - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForCoordinatorsOfGivenTypeToComplete( - opCtx, - DDLCoordinatorTypeEnum::kCompactStructuredEncryptionDataPre70Compatible); - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForCoordinatorsOfGivenTypeToComplete( - opCtx, - DDLCoordinatorTypeEnum::kCompactStructuredEncryptionDataPre61Compatible); - } - + // _finalizeUpgrade is only for any tasks that must be done to fully complete the FCV + // upgrade AFTER the FCV document has already been updated to the UPGRADED FCV. + // We call it here because it's possible that during an FCV upgrade, the + // replset/shard server/config server undergoes failover AFTER the FCV document has + // already been updated to the UPGRADED FCV, but before the cluster has completed + // _finalizeUpgrade. In this case, since the cluster failed over, the user/client may + // retry sending the setFCV command to the cluster, but the cluster is already in the + // requestedVersion (i.e. requestedVersion == actualVersion). However, the cluster + // should retry/complete the tasks from _finalizeUpgrade before sending ok:1 back to the + // user/client. Therefore, these tasks **must** be idempotent/retryable. + _finalizeUpgrade(opCtx, requestedVersion); return true; } @@ -396,31 +431,14 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { const auto fcvChangeRegion( FeatureCompatibilityVersion::enterFCVChangeRegion(opCtx)); - // If catalogShard is enabled and there is an entry in config.shards with _id: - // ShardId::kConfigServerId then the config server is a catalog shard. - auto isCatalogShard = - serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && - serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && - !ShardingCatalogManager::get(opCtx) - ->findOneConfigDocument(opCtx, - NamespaceString::kConfigsvrShardsNamespace, - BSON("_id" << ShardId::kConfigServerId.toString())) - .isEmpty(); - - uassert(ErrorCodes::CannotDowngrade, - "Cannot downgrade featureCompatibilityVersion to {} " - "with a catalog shard as it is not supported in earlier versions. " - "Please transition the config server to dedicated mode using the " - "transitionToDedicatedConfigServer command."_format( - multiversion::toString(requestedVersion)), - !isCatalogShard || - gFeatureFlagCatalogShard.isEnabledOnVersion(requestedVersion)); - uassert(ErrorCodes::Error(6744303), "Failing setFeatureCompatibilityVersion before reaching the FCV " "transitional stage due to 'failBeforeTransitioning' failpoint set", !failBeforeTransitioning.shouldFail()); + ScopedPostFCVDocumentUpdateActions postUpdateAction = + _prepareTransitionalState(opCtx, actualVersion, requestedVersion); + // We pass boost::none as the setIsCleaningServerMetadata argument in order to // indicate that we don't want to override the existing isCleaningServerMetadata FCV // doc field. This is to protect against the case where a previous FCV downgrade @@ -453,7 +471,7 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { invariant(serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); // This helper function is only for any actions that should be done specifically on - // shard servers during phase 1 of the 2-phase setFCV protocol for sharded clusters. + // shard servers during phase 1 of the 3-phase setFCV protocol for sharded clusters. // For example, before completing phase 1, we must wait for backward incompatible // ShardingDDLCoordinators to finish. // We do not expect any other feature-specific work to be done in the 'start' phase. @@ -476,14 +494,10 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { _sendSetFCVRequestToShards( opCtx, request, changeTimestamp, SetFCVPhaseEnum::kStart); - // (Ignore FCV check): This feature flag is intentional to only check if it is - // enabled on this binary so the config server can be a shard. - if (gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafe()) { - // The config server may also be a shard, so have it run any shard server tasks. - // Run this after sending the first phase to shards so they enter the transition - // state even if this throws. - _shardServerPhase1Tasks(opCtx, requestedVersion); - } + // The config server may also be a shard, so have it run any shard server tasks. + // Run this after sending the first phase to shards so they enter the transition + // state even if this throws. + _shardServerPhase1Tasks(opCtx, requestedVersion); } uassert(ErrorCodes::Error(7555202), @@ -548,40 +562,15 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { false /* setIsCleaningServerMetadata */); } - // TODO SERVER-72796: Remove once gGlobalIndexesShardingCatalog is enabled. - if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && - requestedVersion > actualVersion && - feature_flags::gGlobalIndexesShardingCatalog - .isEnabledOnTargetFCVButDisabledOnOriginalFCV(requestedVersion, actualVersion)) { - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForCoordinatorsOfGivenTypeToComplete( - opCtx, DDLCoordinatorTypeEnum::kRenameCollectionPre63Compatible); - } - // TODO SERVER-73627: Remove once 7.0 becomes last LTS. - if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && - requestedVersion > actualVersion && - feature_flags::gDropCollectionHoldingCriticalSection - .isEnabledOnTargetFCVButDisabledOnOriginalFCV(requestedVersion, actualVersion)) { - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForCoordinatorsOfGivenTypeToComplete( - opCtx, DDLCoordinatorTypeEnum::kDropCollectionPre70Compatible); - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForCoordinatorsOfGivenTypeToComplete( - opCtx, DDLCoordinatorTypeEnum::kDropDatabasePre70Compatible); - } - - // TODO SERVER-68373: Remove once 7.0 becomes last LTS - if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && - requestedVersion > actualVersion && - mongo::gFeatureFlagFLE2CompactForProtocolV2 - .isEnabledOnTargetFCVButDisabledOnOriginalFCV(requestedVersion, actualVersion)) { - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForCoordinatorsOfGivenTypeToComplete( - opCtx, DDLCoordinatorTypeEnum::kCompactStructuredEncryptionDataPre70Compatible); - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForCoordinatorsOfGivenTypeToComplete( - opCtx, DDLCoordinatorTypeEnum::kCompactStructuredEncryptionDataPre61Compatible); + // _finalizeUpgrade is only for any tasks that must be done to fully complete the FCV + // upgrade AFTER the FCV document has already been updated to the UPGRADED FCV. + // This is because during _runUpgrade, the FCV is still in the transitional state (which + // behaves like the downgraded FCV), so certain tasks cannot be done yet until the FCV is + // fully upgraded. + // Everything in this function **must** be idempotent/retryable. + if (requestedVersion > actualVersion) { + _finalizeUpgrade(opCtx, requestedVersion); } LOGV2(6744302, @@ -596,7 +585,7 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { private: // This helper function is only for any actions that should be done specifically on - // shard servers during phase 1 of the 2-phase setFCV protocol for sharded clusters. + // shard servers during phase 1 of the 3-phase setFCV protocol for sharded clusters. // For example, before completing phase 1, we must wait for backward incompatible // ShardingDDLCoordinators to finish. This is important in order to ensure that no // shard that is currently a participant of such a backward-incompatible @@ -612,24 +601,6 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { getTransitionFCVFromAndTo(serverGlobalParams.featureCompatibility.getVersion()); const auto isDowngrading = originalVersion > requestedVersion; const auto isUpgrading = originalVersion < requestedVersion; - // TODO (SERVER-71309): Remove once 7.0 becomes last LTS. - if (isDowngrading && - feature_flags::gResilientMovePrimary.isDisabledOnTargetFCVButEnabledOnOriginalFCV( - requestedVersion, originalVersion)) { - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForCoordinatorsOfGivenTypeToComplete(opCtx, - DDLCoordinatorTypeEnum::kMovePrimary); - } - - // TODO SERVER-68008: Remove collMod draining mechanism after 7.0 becomes last LTS. - if (isDowngrading && - feature_flags::gCollModCoordinatorV3.isDisabledOnTargetFCVButEnabledOnOriginalFCV( - requestedVersion, originalVersion)) { - // Drain all running collMod v3 coordinator because they produce backward - // incompatible on disk metadata - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForCoordinatorsOfGivenTypeToComplete(opCtx, DDLCoordinatorTypeEnum::kCollMod); - } // TODO SERVER-72796: Remove once gGlobalIndexesShardingCatalog is enabled. if (isDowngrading && @@ -652,19 +623,9 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { DDLCoordinatorTypeEnum::kDropDatabase); } - // TODO SERVER-68373 remove once 7.0 becomes last LTS - if (isDowngrading && - mongo::gFeatureFlagFLE2CompactForProtocolV2 - .isDisabledOnTargetFCVButEnabledOnOriginalFCV(requestedVersion, originalVersion)) { - // Drain the QE compact coordinator because it persists state that is - // not backwards compatible with earlier versions. - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForCoordinatorsOfGivenTypeToComplete( - opCtx, DDLCoordinatorTypeEnum::kCompactStructuredEncryptionData); - } - if (isUpgrading) { - _createShardingIndexCatalogIndexes(opCtx, requestedVersion); + _createShardingIndexCatalogIndexes( + opCtx, requestedVersion, NamespaceString::kShardIndexCatalogNamespace); } } @@ -672,9 +633,14 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { // transition lock in S mode. It is required that the code in this helper function is idempotent // and could be done after _runDowngrade even if it failed at any point in the middle of // _userCollectionsUassertsForDowngrade or _internalServerCleanupForDowngrade. - void _prepareToUpgradeActions(OperationContext* opCtx) { + void _prepareToUpgradeActions(OperationContext* opCtx, + const multiversion::FeatureCompatibilityVersion requestedVersion, + boost::optional changeTimestamp) { if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { - _cancelServerlessMigrations(opCtx); + if (repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()) { + _cancelServerlessMigrations(opCtx); + } + _maybeMigrateAuditConfig(opCtx, requestedVersion, changeTimestamp); return; } @@ -682,6 +648,8 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { // roles aren't mutually exclusive. if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { // Config server role actions. + _maybeMigrateAuditConfig(opCtx, requestedVersion, changeTimestamp); + _dropReshardingCoordinatorUniqueIndex(opCtx, requestedVersion); } if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { @@ -697,27 +665,148 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { return; } - // This helper function is for updating metadata to make sure the new features in the + void _maybeRemoveOldAuditConfig( + OperationContext* opCtx, const multiversion::FeatureCompatibilityVersion requestedVersion) { + if (feature_flags::gFeatureFlagAuditConfigClusterParameter.isEnabledOnVersion( + requestedVersion) && + audit::removeOldConfig) { + LOGV2_DEBUG(7193000, + 3, + "Upgraded to FCV with audit config cluster parameter enabled, removing old " + "config."); + audit::removeOldConfig(opCtx); + } + } + + // This helper function is for updating server metadata to make sure the new features in the // upgraded version work for sharded and non-sharded clusters. It is required that the code // in this helper function is idempotent and could be done after _runDowngrade even if it // failed at any point in the middle of _userCollectionsUassertsForDowngrade or // _internalServerCleanupForDowngrade. - void _completeUpgrade(OperationContext* opCtx, - const multiversion::FeatureCompatibilityVersion requestedVersion) { + void _upgradeServerMetadata(OperationContext* opCtx, + const multiversion::FeatureCompatibilityVersion requestedVersion) { if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { const auto actualVersion = serverGlobalParams.featureCompatibility.getVersion(); _cleanupConfigVersionOnUpgrade(opCtx, requestedVersion, actualVersion); - _createSchemaOnConfigSettings(opCtx, requestedVersion, actualVersion); - _setOnCurrentShardSinceFieldOnChunks(opCtx, requestedVersion, actualVersion); - // Depends on _setOnCurrentShardSinceFieldOnChunks() - _initializePlacementHistory(opCtx, requestedVersion, actualVersion); _dropConfigMigrationsCollection(opCtx); - _setShardedClusterCardinalityParam(opCtx, requestedVersion); + } + + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { + // Delete any possible leftover ShardingStateRecovery document. + // TODO SERVER-78330 remove this. + deleteShardingStateRecoveryDoc(opCtx); } _removeRecordPreImagesCollectionOption(opCtx); } + void _maybeMigrateAuditConfig(OperationContext* opCtx, + const multiversion::FeatureCompatibilityVersion requestedVersion, + boost::optional changeTimestamp) { + const auto& [fromVersion, _] = + getTransitionFCVFromAndTo(serverGlobalParams.featureCompatibility.getVersion()); + if (feature_flags::gFeatureFlagAuditConfigClusterParameter + .isEnabledOnTargetFCVButDisabledOnOriginalFCV(requestedVersion, fromVersion) && + audit::migrateOldToNew) { + LOGV2_DEBUG(7193001, + 3, + "Upgrading to FCV wth audit config cluster parameter enabled, migrating " + "audit config to cluster parameter."); + audit::migrateOldToNew(opCtx, changeTimestamp); + } + } + + void _createReshardingCoordinatorUniqueIndex( + OperationContext* opCtx, + const multiversion::FeatureCompatibilityVersion requestedVersion, + const multiversion::FeatureCompatibilityVersion originalVersion) { + // We're guaranteed that if the resharding metadata collection exists, it is empty; + // if it were not we would have already aborted with ManualInterventionRequired. + if (resharding::gFeatureFlagReshardingImprovements + .isDisabledOnTargetFCVButEnabledOnOriginalFCV(requestedVersion, originalVersion)) { + LOGV2(7760407, + "Downgrading to FCV wth resharding improvements parameter disabled, " + "creating resharding coordinator unique index."); + AutoGetCollection autoColl( + opCtx, NamespaceString::kConfigReshardingOperationsNamespace, MODE_X); + const Collection* collection = autoColl.getCollection().get(); + // This could only happen if we got a downgrade command before the service initialized; + // in that case the collection and index will be created on initialization. + if (!collection) { + LOGV2_DEBUG(7760408, + 2, + "The reshardingOperations collection did not exist during downgrade"); + return; + } + writeConflictRetry( + opCtx, + "createIndexOnConfigCollection", + NamespaceString::kConfigReshardingOperationsNamespace, + [&] { + WriteUnitOfWork wunit(opCtx); + CollectionWriter collWriter(opCtx, collection->uuid()); + try { + IndexBuildsCoordinator::get(opCtx)->createIndexesOnEmptyCollection( + opCtx, + collWriter, + {BSON("key" << BSON("active" << 1) << "name" + << "ReshardingCoordinatorActiveIndex" + << "v" << int(IndexDescriptor::kLatestIndexVersion) + << "unique" << true)}, + false /*fromMigrate*/); + } catch (const DBException& e) { + // The uassert should never happen, but it does not indicate corruption if + // it does. + uassert(ErrorCodes::ManualInterventionRequired, + str::stream() << "Unable to create 'active' index on " + "'config.reshardingOperations'. Consider " + "dropping 'config.reshardingOperations' and " + "trying again. Original exception " + << e.toString(), + e.code() == ErrorCodes::IndexAlreadyExists); + LOGV2_DEBUG(7760409, + 2, + "The 'active' unique index on the reshardingOperations " + "collection already existed during downgrade"); + return; + } + wunit.commit(); + }); + } + } + + void _dropReshardingCoordinatorUniqueIndex( + OperationContext* opCtx, const multiversion::FeatureCompatibilityVersion requestedVersion) { + // There is no need to re-create this index on upgrade, as the index is no longer + // needed to ensure resharding operations are unique. + const auto& [fromVersion, _] = + getTransitionFCVFromAndTo(serverGlobalParams.featureCompatibility.getVersion()); + if (resharding::gFeatureFlagReshardingImprovements + .isEnabledOnTargetFCVButDisabledOnOriginalFCV(requestedVersion, fromVersion)) { + LOGV2(7760401, + "Upgrading to FCV wth resharding improvements parameter enabled, " + "dropping resharding coordinator unique index."); + try { + // The index name is included literally here to avoid creating a + // construction-order catastrophe with the static std::string + // kReshardingCoordinatorActiveIndexName + auto reply = dropIndexes(opCtx, + NamespaceString::kConfigReshardingOperationsNamespace, + boost::none, + "ReshardingCoordinatorActiveIndex"); + LOGV2_DEBUG( + 7760402, 2, "Dropped resharding coordinator index", "reply"_attr = reply); + } catch (ExceptionFor&) { + LOGV2_DEBUG( + 7760403, 2, "Resharding coordinator collection did not exist during upgrade."); + } catch (ExceptionFor&) { + LOGV2_DEBUG(7760404, + 2, + "Resharding coordinator 'active' index did not exist during upgrade."); + } + } + } + // TODO SERVER-68889 remove once 7.0 becomes last LTS void _cleanupConfigVersionOnUpgrade( OperationContext* opCtx, @@ -791,94 +880,22 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { } } - // TODO SERVER-68217 remove once v7.0 becomes last-lts - void _initializePlacementHistory( + void _createShardingIndexCatalogIndexes( OperationContext* opCtx, const multiversion::FeatureCompatibilityVersion requestedVersion, - const multiversion::FeatureCompatibilityVersion actualVersion) { - if (feature_flags::gHistoricalPlacementShardingCatalog - .isEnabledOnTargetFCVButDisabledOnOriginalFCV(requestedVersion, actualVersion)) { - ShardingCatalogManager::get(opCtx)->initializePlacementHistory(opCtx); - } - } - - void _createShardingIndexCatalogIndexes( - OperationContext* opCtx, const multiversion::FeatureCompatibilityVersion requestedVersion) { + const NamespaceString& indexCatalogNss) { // TODO SERVER-67392: Remove once gGlobalIndexesShardingCatalog is enabled. const auto actualVersion = serverGlobalParams.featureCompatibility.getVersion(); if (feature_flags::gGlobalIndexesShardingCatalog .isEnabledOnTargetFCVButDisabledOnOriginalFCV(requestedVersion, actualVersion)) { - uassertStatusOK(sharding_util::createShardingIndexCatalogIndexes(opCtx)); - if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { + uassertStatusOK( + sharding_util::createShardingIndexCatalogIndexes(opCtx, indexCatalogNss)); + if (indexCatalogNss == NamespaceString::kShardIndexCatalogNamespace) { uassertStatusOK(sharding_util::createShardCollectionCatalogIndexes(opCtx)); } } } - // TODO (SERVER-70763): Remove once FCV 7.0 becomes last-lts. - void _createSchemaOnConfigSettings( - OperationContext* opCtx, - const multiversion::FeatureCompatibilityVersion requestedVersion, - const multiversion::FeatureCompatibilityVersion actualVersion) { - if (feature_flags::gConfigSettingsSchema.isEnabledOnTargetFCVButDisabledOnOriginalFCV( - requestedVersion, actualVersion)) { - LOGV2(6885200, "Creating schema on config.settings"); - uassertStatusOK(ShardingCatalogManager::get(opCtx)->upgradeConfigSettings(opCtx)); - } - } - - void _setShardedClusterCardinalityParam( - OperationContext* opCtx, const multiversion::FeatureCompatibilityVersion requestedVersion) { - if (feature_flags::gClusterCardinalityParameter.isEnabledOnVersion(requestedVersion)) { - // Get current cluster parameter value so that we don't run SetClusterParameter - // extraneously - auto* clusterParameters = ServerParameterSet::getClusterParameterSet(); - auto* clusterCardinalityParam = - clusterParameters->get>( - "shardedClusterCardinalityForDirectConns"); - auto currentValue = - clusterCardinalityParam->getValue(boost::none).getHasTwoOrMoreShards(); - - // config.shards is stable during FCV changes, so query that to discover the current - // number of shards. - DBDirectClient client(opCtx); - FindCommandRequest findRequest{NamespaceString::kConfigsvrShardsNamespace}; - findRequest.setLimit(2); - auto numShards = client.find(std::move(findRequest))->itcount(); - bool expectedValue = numShards >= 2; - - if (expectedValue == currentValue) { - return; - } - - ConfigsvrSetClusterParameter configsvrSetClusterParameter( - BSON("shardedClusterCardinalityForDirectConns" - << BSON("hasTwoOrMoreShards" << expectedValue))); - configsvrSetClusterParameter.setDbName(DatabaseName(boost::none, "admin")); - - const auto shardRegistry = Grid::get(opCtx)->shardRegistry(); - const auto cmdResponse = - uassertStatusOK(shardRegistry->getConfigShard()->runCommandWithFixedRetryAttempts( - opCtx, - ReadPreferenceSetting(ReadPreference::PrimaryOnly), - DatabaseName::kAdmin.toString(), - configsvrSetClusterParameter.toBSON({}), - Shard::RetryPolicy::kIdempotent)); - uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(std::move(cmdResponse))); - } - } - - // TODO (SERVER-72791): Remove once FCV 7.0 becomes last-lts. - void _setOnCurrentShardSinceFieldOnChunks( - OperationContext* opCtx, - const multiversion::FeatureCompatibilityVersion requestedVersion, - const multiversion::FeatureCompatibilityVersion actualVersion) { - if (feature_flags::gAutoMerger.isEnabledOnTargetFCVButDisabledOnOriginalFCV( - requestedVersion, actualVersion)) { - ShardingCatalogManager::get(opCtx)->setOnCurrentShardSinceFieldOnChunks(opCtx); - } - } - // Removes collection option "recordPreImages" from all collection definitions. // TODO SERVER-74036: Remove once FCV 7.0 becomes last-LTS. void _removeRecordPreImagesCollectionOption(OperationContext* opCtx) { @@ -916,7 +933,8 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { &dropReply, DropCollectionSystemCollectionMode::kAllowSystemCollectionDrops); uassert(deletionStatus.code(), - str::stream() << "Failed to drop " << NamespaceString::kMigrationsNamespace + str::stream() << "Failed to drop " + << NamespaceString::kMigrationsNamespace.toStringForErrorMsg() << causedBy(deletionStatus.reason()), deletionStatus.isOK() || deletionStatus.code() == ErrorCodes::NamespaceNotFound); } @@ -937,7 +955,8 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { // transition lock in S mode. It is required that the code in this helper function is // idempotent and could be done after _runDowngrade even if it failed at any point in the // middle of _userCollectionsUassertsForDowngrade or _internalServerCleanupForDowngrade. - _prepareToUpgradeActions(opCtx); + const auto requestedVersion = request.getCommandParameter(); + _prepareToUpgradeActions(opCtx, requestedVersion, changeTimestamp); { // Take the FCV full transition lock in S mode to create a barrier for operations taking @@ -966,7 +985,8 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { // _runUpgrade performs all the metadata-changing actions of an FCV upgrade. Any new feature // specific upgrade code should be placed in the _runUpgrade helper functions: - // * _completeUpgrade: for updating metadata to make sure the new features in the upgraded + // * _upgradeServerMetadata: for updating server metadata to make sure the new features in the + // upgraded // version work for sharded and non-sharded clusters // Please read the comments on those helper functions for more details on what should be placed // in each function. @@ -985,18 +1005,19 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { // TODO SERVER-68551: Remove once 7.0 becomes last-lts dropDistLockCollections(opCtx); - _createShardingIndexCatalogIndexes(opCtx, requestedVersion); + _createShardingIndexCatalogIndexes( + opCtx, requestedVersion, NamespaceString::kConfigsvrIndexCatalogNamespace); // Tell the shards to complete setFCV (transition to fully upgraded) _sendSetFCVRequestToShards(opCtx, request, changeTimestamp, SetFCVPhaseEnum::kComplete); } - // This helper function is for updating metadata to make sure the new features in the + // This helper function is for updating server metadata to make sure the new features in the // upgraded version work for sharded and non-sharded clusters. It is required that the code // in this helper function is idempotent and could be done after _runDowngrade even if it // failed at any point in the middle of _userCollectionsUassertsForDowngrade or // _internalServerCleanupForDowngrade. - _completeUpgrade(opCtx, requestedVersion); + _upgradeServerMetadata(opCtx, requestedVersion); hangWhileUpgrading.pauseWhileSet(opCtx); } @@ -1004,7 +1025,8 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { // This helper function is for any actions that should be done before taking the FCV full // transition lock in S mode. void _prepareToDowngradeActions(OperationContext* opCtx) { - if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { + if (serverGlobalParams.clusterRole.has(ClusterRole::None) && + repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()) { _cancelServerlessMigrations(opCtx); return; } @@ -1050,14 +1072,27 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { const auto& [originalVersion, _] = getTransitionFCVFromAndTo(serverGlobalParams.featureCompatibility.getVersion()); + if (feature_flags::gFeatureFlagAuditConfigClusterParameter + .isDisabledOnTargetFCVButEnabledOnOriginalFCV(requestedVersion, originalVersion)) { + // Ensure audit config cluster parameter is unset on disk. + AutoGetCollection clusterParametersColl( + opCtx, NamespaceString::kClusterParametersNamespace, MODE_IS); + BSONObj _result; + if (Helpers::findOne(opCtx, + clusterParametersColl.getCollection(), + BSON("_id" + << "auditConfig"), + _result)) { + uasserted(ErrorCodes::CannotDowngrade, + "Cannot downgrade the cluster when the auditConfig cluster parameter is " + "set. Drop the auditConfig document from the config.clusterParameters " + "collection before downgrading."); + } + } + // Note the config server is also considered a shard, so the ConfigServer and ShardServer // roles aren't mutually exclusive. if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { - if (gFeatureFlagCatalogShard.isDisabledOnTargetFCVButEnabledOnOriginalFCV( - requestedVersion, originalVersion)) { - _assertNoCollectionsHaveChangeStreamsPrePostImages(opCtx); - } - if (feature_flags::gGlobalIndexesShardingCatalog .isDisabledOnTargetFCVButEnabledOnOriginalFCV(requestedVersion, originalVersion)) { @@ -1086,122 +1121,10 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { "detected global index name: '" << indexDoc[IndexCatalogType::kNameFieldName].String() << "' on collection '" - << NamespaceString(collDoc[CollectionType::kNssFieldName].String()) - << "'", + << collDoc[CollectionType::kNssFieldName].String() << "'", !hasShardingIndexCatalogEntries); } } - - if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) || - serverGlobalParams.clusterRole.has(ClusterRole::None)) { - if (feature_flags::gTimeseriesScalabilityImprovements - .isDisabledOnTargetFCVButEnabledOnOriginalFCV(requestedVersion, - originalVersion)) { - for (const auto& dbName : DatabaseHolder::get(opCtx)->getNames()) { - Lock::DBLock dbLock(opCtx, dbName, MODE_IX); - catalog::forEachCollectionFromDb( - opCtx, - dbName, - MODE_S, - [&](const Collection* collection) { - auto tsOptions = collection->getTimeseriesOptions(); - invariant(tsOptions); - - auto indexCatalog = collection->getIndexCatalog(); - auto indexIt = indexCatalog->getIndexIterator( - opCtx, - IndexCatalog::InclusionPolicy::kReady | - IndexCatalog::InclusionPolicy::kUnfinished); - - // Check and fail to downgrade if the time-series collection has a - // partial, TTL index. - while (indexIt->more()) { - auto indexEntry = indexIt->next(); - if (indexEntry->descriptor()->isPartial()) { - // TODO (SERVER-67659): Remove partial, TTL index check once - // FCV 7.0 becomes last-lts. - uassert( - ErrorCodes::CannotDowngrade, - str::stream() - << "Cannot downgrade the cluster when there are " - "secondary " - "TTL indexes with partial filters on time-series " - "collections. Drop all partial, TTL indexes on " - "time-series collections before downgrading. First " - "detected incompatible index name: '" - << indexEntry->descriptor()->indexName() - << "' on collection: '" - << collection->ns().getTimeseriesViewNamespace() << "'", - !indexEntry->descriptor()->infoObj().hasField( - IndexDescriptor::kExpireAfterSecondsFieldName)); - } - } - - // Check the time-series options for a default granularity. Fail the - // downgrade if the bucketing parameters are custom values. - uassert( - ErrorCodes::CannotDowngrade, - str::stream() - << "Cannot downgrade the cluster when there are time-series " - "collections with custom bucketing parameters. In order to " - "downgrade, the time-series collection(s) must be updated " - "with a granularity of 'seconds', 'minutes' or 'hours'. " - "First detected incompatible collection: '" - << collection->ns().getTimeseriesViewNamespace() << "'", - tsOptions->getGranularity().has_value()); - - return true; - }, - [&](const Collection* collection) { - return collection->getTimeseriesOptions() != boost::none; - }); - } - } - - // Block downgrade for collections with encrypted fields - // TODO SERVER-67760: Remove once FCV 7.0 becomes last-lts. - for (const auto& dbName : DatabaseHolder::get(opCtx)->getNames()) { - Lock::DBLock dbLock(opCtx, dbName, MODE_IX); - catalog::forEachCollectionFromDb( - opCtx, dbName, MODE_X, [&](const Collection* collection) { - auto& efc = collection->getCollectionOptions().encryptedFieldConfig; - - uassert(ErrorCodes::CannotDowngrade, - str::stream() << "Cannot downgrade the cluster as collection " - << collection->ns() - << " has 'encryptedFields' with range indexes", - !(efc.has_value() && - hasQueryType(efc.get(), QueryTypeEnum::RangePreview))); - return true; - }); - } - - if (feature_flags::gfeatureFlagCappedCollectionsRelaxedSize - .isDisabledOnTargetFCVButEnabledOnOriginalFCV(requestedVersion, - originalVersion)) { - for (const auto& dbName : DatabaseHolder::get(opCtx)->getNames()) { - Lock::DBLock dbLock(opCtx, dbName, MODE_IX); - catalog::forEachCollectionFromDb( - opCtx, - dbName, - MODE_S, - [&](const Collection* collection) { - uasserted( - ErrorCodes::CannotDowngrade, - str::stream() - << "Cannot downgrade the cluster when there are capped " - "collection with a size that is non multiple of 256 bytes. " - "Drop or resize the following collection: '" - << collection->ns() << "'"); - return true; - }, - [&](const Collection* collection) { - return collection->isCapped() && - collection->getCappedMaxSize() % 256 != 0; - }); - } - } - } } // Remove cluster parameters from the clusterParameters collections which are not enabled on @@ -1229,6 +1152,20 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { } } + void _updateAuditConfigOnDowngrade( + OperationContext* opCtx, const multiversion::FeatureCompatibilityVersion requestedVersion) { + invariant(serverGlobalParams.featureCompatibility.isUpgradingOrDowngrading()); + const auto& [fromVersion, _] = + getTransitionFCVFromAndTo(serverGlobalParams.featureCompatibility.getVersion()); + + if (feature_flags::gFeatureFlagAuditConfigClusterParameter + .isDisabledOnTargetFCVButEnabledOnOriginalFCV(requestedVersion, fromVersion)) { + if (audit::updateAuditConfigOnDowngrade) { + audit::updateAuditConfigOnDowngrade(opCtx); + } + } + } + // This helper function is for any internal server downgrade cleanup, such as dropping // collections or aborting. This cleanup will happen after user collection downgrade // cleanup. @@ -1256,33 +1193,36 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { // Note the config server is also considered a shard, so the ConfigServer and ShardServer // roles aren't mutually exclusive. if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { - _dropInternalShardingIndexCatalogCollection(opCtx, requestedVersion, originalVersion); - _removeSchemaOnConfigSettings(opCtx, requestedVersion, originalVersion); + _updateAuditConfigOnDowngrade(opCtx, requestedVersion); + _dropInternalShardingIndexCatalogCollection( + opCtx, + requestedVersion, + originalVersion, + NamespaceString::kConfigsvrIndexCatalogNamespace); // Always abort the reshardCollection regardless of version to ensure that it will // run on a consistent version from start to finish. This will ensure that it will // be able to apply the oplog entries correctly. abortAllReshardCollection(opCtx); + _createReshardingCoordinatorUniqueIndex(opCtx, requestedVersion, originalVersion); _updateConfigVersionOnDowngrade(opCtx, requestedVersion, originalVersion); } if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { - // If we are downgrading to a version that doesn't support implicit translation of - // Timeseries collection in sharding DDL Coordinators we need to drain all ongoing - // coordinators - if (feature_flags::gImplicitDDLTimeseriesNssTranslation - .isDisabledOnTargetFCVButEnabledOnOriginalFCV(requestedVersion, - originalVersion)) { - ShardingDDLCoordinatorService::getService(opCtx) - ->waitForOngoingCoordinatorsToFinish(opCtx); - } - _dropInternalShardingIndexCatalogCollection(opCtx, requestedVersion, originalVersion); + _dropInternalShardingIndexCatalogCollection( + opCtx, + requestedVersion, + originalVersion, + NamespaceString::kShardIndexCatalogNamespace); + } else { + _updateAuditConfigOnDowngrade(opCtx, requestedVersion); } } void _dropInternalShardingIndexCatalogCollection( OperationContext* opCtx, const multiversion::FeatureCompatibilityVersion requestedVersion, - const multiversion::FeatureCompatibilityVersion originalVersion) { + const multiversion::FeatureCompatibilityVersion originalVersion, + const NamespaceString& indexCatalogNss) { // TODO SERVER-67392: Remove when 7.0 branches-out. // Coordinators that commits indexes to the csrs must be drained before this point. Older // FCV's must not find cluster-wide indexes. @@ -1320,6 +1260,7 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { uassert(dropStatus.code(), str::stream() << "Failed to drop " << NamespaceString::kShardCollectionCatalogNamespace + .toStringForErrorMsg() << causedBy(dropStatus.reason()), dropStatus.isOK() || dropStatus.code() == ErrorCodes::NamespaceNotFound); } @@ -1344,13 +1285,6 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { client.update(update); } - // TODO SERVER-75274: Drop both collections on a catalog shard enabled config server. - NamespaceString indexCatalogNss; - if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { - indexCatalogNss = NamespaceString::kConfigsvrIndexCatalogNamespace; - } else { - indexCatalogNss = NamespaceString::kShardIndexCatalogNamespace; - } LOGV2(6280502, "Dropping global indexes collection", "nss"_attr = indexCatalogNss); const auto deletionStatus = dropCollection(opCtx, @@ -1358,42 +1292,86 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { &dropReply, DropCollectionSystemCollectionMode::kAllowSystemCollectionDrops); uassert(deletionStatus.code(), - str::stream() << "Failed to drop " << indexCatalogNss + str::stream() << "Failed to drop " << indexCatalogNss.toStringForErrorMsg() << causedBy(deletionStatus.reason()), deletionStatus.isOK() || deletionStatus.code() == ErrorCodes::NamespaceNotFound); } } - // TODO (SERVER-70763): Remove once FCV 7.0 becomes last-lts. - void _removeSchemaOnConfigSettings( + /** + * May contain actions to perfom after the FCV document update. Execution occurs when the object + * goes out of scope. + */ + using ScopedPostFCVDocumentUpdateActions = ScopeGuard>; + + /** + * Actions to be performed before the FCV document is set into upgrading or downgrading + * transitional state. The returned object may contain post-update actions which are executed + * when it goes out of scope, so it must be properly scoped to expire after the FCV document has + * been updated. The assumption is that the provided opCtx is still valid by the time the action + * is executed. + */ + ScopedPostFCVDocumentUpdateActions _prepareTransitionalState( OperationContext* opCtx, - const multiversion::FeatureCompatibilityVersion requestedVersion, - const multiversion::FeatureCompatibilityVersion originalVersion) { - if (feature_flags::gConfigSettingsSchema.isDisabledOnTargetFCVButEnabledOnOriginalFCV( - requestedVersion, originalVersion)) { - LOGV2(6885201, "Removing schema on config.settings"); - CollMod collModCmd{NamespaceString::kConfigSettingsNamespace}; - collModCmd.getCollModRequest().setValidator(BSONObj()); - collModCmd.getCollModRequest().setValidationLevel(ValidationLevelEnum::off); - BSONObjBuilder builder; - uassertStatusOKIgnoreNSNotFound(processCollModCommand( - opCtx, {NamespaceString::kConfigSettingsNamespace}, collModCmd, &builder)); + multiversion::FeatureCompatibilityVersion actualVersion, + multiversion::FeatureCompatibilityVersion requestedVersion) { + + std::function unblockNewIndexBuilds; + + // TODO (SERVER-68290): Remove index build abort due to FCV downgrade once the + // feature flag is removed. + if (feature_flags::gIndexBuildGracefulErrorHandling + .isDisabledOnTargetFCVButEnabledOnOriginalFCV(requestedVersion, actualVersion)) { + invariant(requestedVersion < actualVersion); + const auto reason = fmt::format("FCV downgrade in progress, from {} to {}.", + toString(actualVersion), + toString(requestedVersion)); + + const auto indexBuildsCoord = IndexBuildsCoordinator::get(opCtx); + // Block new index builds before writing the transitional FCV state, which will cause + // new feature flag checks to consider it disabled. + indexBuildsCoord->setNewIndexBuildsBlocked(true, reason); + // New index builds will be unblocked after ScopedPostFCVDocumentUpdateActions goes out + // of scope once the FCV document has been updated. + unblockNewIndexBuilds = [indexBuildsCoord] { + indexBuildsCoord->setNewIndexBuildsBlocked(false); + }; + + if (hangAfterBlockingIndexBuildsForFcvDowngrade.shouldFail()) { + LOGV2(7738704, "Hanging for failpoint hangAfterBlockingIndexBuildsForFcvDowngrade"); + hangAfterBlockingIndexBuildsForFcvDowngrade.pauseWhileSet(opCtx); + } + + // While new index builds are blocked, abort all existing index builds and wait for + // them. + indexBuildsCoord->abortAllIndexBuildsWithReason(opCtx, reason); + // Some index builds might already be committing or aborting, in which case the above + // call does not wait for them. Wait for the rest of the index builds. + indexBuildsCoord->waitForAllIndexBuildsToStop(opCtx); } + + const auto postUpdateActions = [unblockNewIndexBuilds = + std::move(unblockNewIndexBuilds)]() { + if (unblockNewIndexBuilds) { + unblockNewIndexBuilds(); + } + }; + + return {postUpdateActions}; } // _prepareToDowngrade performs all actions and checks that need to be done before proceeding to - // make any metadata changes as part of FCV downgrade. Any new feature specific downgrade - // code should be placed in the helper functions: + // make any metadata changes as part of FCV downgrade. Any new feature specific downgrade code + // should be placed in the helper functions: // * _prepareToDowngradeActions: Any downgrade actions that should be done before taking the FCV // full transition lock in S mode should go in this function. // * _userCollectionsUassertsForDowngrade: for any checks on user data or settings that will // uassert if users need to manually clean up user data or settings. // When doing feature flag checking for downgrade, we should check the feature flag is enabled // on current FCV and will be disabled after downgrade by using - // isDisabledOnTargetFCVButEnabledOnOriginalFCV(targetFCV, originalFCV) - // Please read the comments on those helper functions for more details on what should be placed - // in each function. + // isDisabledOnTargetFCVButEnabledOnOriginalFCV(targetFCV, originalFCV) Please read the comments + // on those helper functions for more details on what should be placed in each function. void _prepareToDowngrade(OperationContext* opCtx, const SetFeatureCompatibilityVersion& request, boost::optional changeTimestamp) { @@ -1452,21 +1430,16 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { // Set the isCleaningServerMetadata field to true. This prohibits the downgrading to // upgrading transition until the isCleaningServerMetadata is unset when we successfully // finish the FCV downgrade and transition to the DOWNGRADED state. - // (Ignore FCV check): This is intentional because we want to use this feature even if we - // are in downgrading fcv state. - if (repl::feature_flags::gDowngradingToUpgrading.isEnabledAndIgnoreFCVUnsafe()) { - { - const auto fcvChangeRegion( - FeatureCompatibilityVersion::enterFCVChangeRegion(opCtx)); - FeatureCompatibilityVersion::updateFeatureCompatibilityVersionDocument( - opCtx, - actualVersion, - requestedVersion, - isFromConfigServer, - changeTimestamp, - true /* setTargetVersion */, - true /* setIsCleaningServerMetadata*/); - } + { + const auto fcvChangeRegion(FeatureCompatibilityVersion::enterFCVChangeRegion(opCtx)); + FeatureCompatibilityVersion::updateFeatureCompatibilityVersionDocument( + opCtx, + actualVersion, + requestedVersion, + isFromConfigServer, + changeTimestamp, + true /* setTargetVersion */, + true /* setIsCleaningServerMetadata*/); } uassert(ErrorCodes::Error(7428201), @@ -1504,35 +1477,33 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { /** * Abort all serverless migrations active on this node, for both donors and recipients. - * Called after reaching an upgrading or downgrading state. + * Called after reaching an upgrading or downgrading state for nodes with ClusterRole::None. + * Must only be called in serverless mode. */ void _cancelServerlessMigrations(OperationContext* opCtx) { + invariant(repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()); invariant(serverGlobalParams.featureCompatibility.isUpgradingOrDowngrading()); - if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { - auto donorService = checked_cast( - repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()) - ->lookupServiceByName(TenantMigrationDonorService::kServiceName)); - donorService->abortAllMigrations(opCtx); - - auto recipientService = checked_cast( - repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()) - ->lookupServiceByName(repl::TenantMigrationRecipientService:: - kTenantMigrationRecipientServiceName)); - recipientService->abortAllMigrations(opCtx); - - if (getGlobalReplSettings().isServerless()) { - auto splitDonorService = checked_cast( - repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()) - ->lookupServiceByName(ShardSplitDonorService::kServiceName)); - splitDonorService->abortAllSplits(opCtx); - - auto mergeRecipientService = checked_cast( - repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()) - ->lookupServiceByName( - repl::ShardMergeRecipientService::kShardMergeRecipientServiceName)); - mergeRecipientService->abortAllMigrations(opCtx); - } - } + auto donorService = checked_cast( + repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()) + ->lookupServiceByName(TenantMigrationDonorService::kServiceName)); + donorService->abortAllMigrations(opCtx); + + auto recipientService = checked_cast( + repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()) + ->lookupServiceByName( + repl::TenantMigrationRecipientService::kTenantMigrationRecipientServiceName)); + recipientService->abortAllMigrations(opCtx); + + auto splitDonorService = checked_cast( + repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()) + ->lookupServiceByName(ShardSplitDonorService::kServiceName)); + splitDonorService->abortAllSplits(opCtx); + + auto mergeRecipientService = checked_cast( + repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()) + ->lookupServiceByName( + repl::ShardMergeRecipientService::kShardMergeRecipientServiceName)); + mergeRecipientService->abortAllMigrations(opCtx); } /** @@ -1576,7 +1547,7 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { [&](const Collection* collection) { uassert(ErrorCodes::CannotDowngrade, str::stream() << "Cannot downgrade the config server as collection " - << collection->ns() + << collection->ns().toStringForErrorMsg() << " has 'changeStreamPreAndPostImages' enabled. Please " "unset the option or drop the collection.", !collection->isChangeStreamPreAndPostImagesEnabled()); @@ -1588,6 +1559,35 @@ class SetFeatureCompatibilityVersionCommand : public BasicCommand { } } + // _finalizeUpgrade is only for any tasks that must be done to fully complete the FCV upgrade + // AFTER the FCV document has already been updated to the UPGRADED FCV. + // This is because during _runUpgrade, the FCV is still in the transitional state (which behaves + // like the downgraded FCV), so certain tasks cannot be done yet until the FCV is fully + // upgraded. + // Additionally, it's possible that during an FCV upgrade, the replset/shard server/config + // server undergoes failover AFTER the FCV document has already been updated to the UPGRADED + // FCV, but before the cluster has completed _finalizeUpgrade. In this case, since the cluster + // failed over, the user/client may retry sending the setFCV command to the cluster, but the + // cluster is already in the requestedVersion (i.e. requestedVersion == actualVersion). However, + // the cluster should retry/complete the tasks from _finalizeUpgrade before sending ok:1 + // back to the user/client. Therefore, these tasks **must** be idempotent/retryable. + void _finalizeUpgrade(OperationContext* opCtx, + const multiversion::FeatureCompatibilityVersion requestedVersion) { + // TODO SERVER-73627: Remove once 7.0 becomes last LTS. + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) && + feature_flags::gDropCollectionHoldingCriticalSection.isEnabledOnVersion( + requestedVersion)) { + ShardingDDLCoordinatorService::getService(opCtx) + ->waitForCoordinatorsOfGivenTypeToComplete( + opCtx, DDLCoordinatorTypeEnum::kDropCollectionPre70Compatible); + ShardingDDLCoordinatorService::getService(opCtx) + ->waitForCoordinatorsOfGivenTypeToComplete( + opCtx, DDLCoordinatorTypeEnum::kDropDatabasePre70Compatible); + } + + _maybeRemoveOldAuditConfig(opCtx, requestedVersion); + } + } setFeatureCompatibilityVersionCommand; } // namespace diff --git a/src/mongo/db/commands/set_index_commit_quorum_command.cpp b/src/mongo/db/commands/set_index_commit_quorum_command.cpp index 6eaf714827226..a585c40936bc5 100644 --- a/src/mongo/db/commands/set_index_commit_quorum_command.cpp +++ b/src/mongo/db/commands/set_index_commit_quorum_command.cpp @@ -27,16 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/set_index_commit_quorum_gen.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/write_concern_options.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/commands/set_profiling_filter_globally_cmd.cpp b/src/mongo/db/commands/set_profiling_filter_globally_cmd.cpp index e64844387f955..92c27600054c7 100644 --- a/src/mongo/db/commands/set_profiling_filter_globally_cmd.cpp +++ b/src/mongo/db/commands/set_profiling_filter_globally_cmd.cpp @@ -28,11 +28,29 @@ */ #include "mongo/db/commands/set_profiling_filter_globally_cmd.h" + +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/commands/profile_common.h" #include "mongo/db/commands/profile_gen.h" +#include "mongo/db/profile_filter.h" #include "mongo/db/profile_filter_impl.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -43,8 +61,8 @@ Status SetProfilingFilterGloballyCmd::checkAuthForOperation(OperationContext* op const DatabaseName& dbName, const BSONObj& cmdObj) const { AuthorizationSession* authSession = AuthorizationSession::get(opCtx->getClient()); - return authSession->isAuthorizedForActionsOnResource(ResourcePattern::forAnyNormalResource(), - ActionType::enableProfiler) + return authSession->isAuthorizedForActionsOnResource( + ResourcePattern::forAnyNormalResource(dbName.tenantId()), ActionType::enableProfiler) ? Status::OK() : Status(ErrorCodes::Unauthorized, "unauthorized"); } diff --git a/src/mongo/db/commands/set_profiling_filter_globally_cmd.h b/src/mongo/db/commands/set_profiling_filter_globally_cmd.h index dce2d74a46843..63ed738cf3c73 100644 --- a/src/mongo/db/commands/set_profiling_filter_globally_cmd.h +++ b/src/mongo/db/commands/set_profiling_filter_globally_cmd.h @@ -29,9 +29,17 @@ #pragma once +#include + #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/commands/set_user_write_block_mode_command.cpp b/src/mongo/db/commands/set_user_write_block_mode_command.cpp index b852cf00d24cf..e446299ba7ca0 100644 --- a/src/mongo/db/commands/set_user_write_block_mode_command.cpp +++ b/src/mongo/db/commands/set_user_write_block_mode_command.cpp @@ -28,17 +28,36 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" #include "mongo/db/commands/set_user_write_block_mode_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/global_user_write_block_state.h" #include "mongo/db/s/user_writes_recoverable_critical_section_service.h" -#include "mongo/logv2/log.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/platform/mutex.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl @@ -129,8 +148,9 @@ class SetUserWriteBlockModeCommand final : public TypedCommandgetClient()) - ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), - ActionType::setUserWriteBlockMode})); + ->isAuthorizedForPrivilege(Privilege{ + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::setUserWriteBlockMode})); } Mutex _mutex = MONGO_MAKE_LATCH("SetUserWriteBlockModeCommand::_mutex"); diff --git a/src/mongo/db/commands/shutdown.cpp b/src/mongo/db/commands/shutdown.cpp index e97338136f2ab..c1fbc2daf58d2 100644 --- a/src/mongo/db/commands/shutdown.cpp +++ b/src/mongo/db/commands/shutdown.cpp @@ -28,20 +28,32 @@ */ -#include "mongo/logv2/log.h" +#include +#include +#include + +#include +#include #include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands/shutdown.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/stdx/thread.h" #include "mongo/util/assert_util.h" +#include "mongo/util/exit.h" #include "mongo/util/exit_code.h" #include "mongo/util/fail_point.h" +#include "mongo/util/static_immortal.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - namespace mongo { - namespace shutdown_detail { MONGO_FAIL_POINT_DEFINE(crashOnShutdown); @@ -96,5 +108,4 @@ void finishShutdown(OperationContext* opCtx, } } // namespace shutdown_detail - } // namespace mongo diff --git a/src/mongo/db/commands/shutdown.h b/src/mongo/db/commands/shutdown.h index 373eb4a007e49..8b11ea64a3ebe 100644 --- a/src/mongo/db/commands/shutdown.h +++ b/src/mongo/db/commands/shutdown.h @@ -30,12 +30,21 @@ #include #include -#include "mongo/db/commands/shutdown_gen.h" - +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/commands/shutdown_gen.h" #include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" #include "mongo/util/exit.h" #include "mongo/util/ntservice.h" @@ -98,7 +107,8 @@ class CmdShutdown : public TypedCommand { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(client)->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::shutdown)); + ResourcePattern::forClusterResource(Base::request().getDbName().tenantId()), + ActionType::shutdown)); } }; diff --git a/src/mongo/db/commands/shutdown_d.cpp b/src/mongo/db/commands/shutdown_d.cpp index 94d901c51c123..fccba1212d283 100644 --- a/src/mongo/db/commands/shutdown_d.cpp +++ b/src/mongo/db/commands/shutdown_d.cpp @@ -28,15 +28,25 @@ */ -#include "mongo/platform/basic.h" - +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/db/commands.h" #include "mongo/db/commands/shutdown.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/transaction_coordinator_service.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/db/commands/sleep_command.cpp b/src/mongo/db/commands/sleep_command.cpp index 159a57e213f29..a004ce75f6099 100644 --- a/src/mongo/db/commands/sleep_command.cpp +++ b/src/mongo/db/commands/sleep_command.cpp @@ -27,11 +27,29 @@ * it in the license file. */ +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -92,8 +110,9 @@ class CmdSleep : public BasicCommand { return; } auto nss = NamespaceString(ns); - uassert( - 50961, "lockTarget is not a valid namespace", NamespaceString::validDBName(nss.db())); + uassert(50961, + "lockTarget is not a valid namespace", + NamespaceString::validDBName(nss.dbName())); auto dbMode = mode; if (!nsIsDbOnly(ns)) { @@ -114,7 +133,7 @@ class CmdSleep : public BasicCommand { // Need to acquire DBLock before attempting to acquire a collection lock. uassert(50962, "lockTarget is not a valid namespace", - NamespaceString::validCollectionComponent(ns)); + NamespaceString::validCollectionComponent(nss)); Lock::CollectionLock collLock(opCtx, nss, mode, Date_t::max()); LOGV2(6001603, "Collection lock acquired by sleep command.", diff --git a/src/mongo/db/commands/snapshot_management.cpp b/src/mongo/db/commands/snapshot_management.cpp index d84e94c3254ae..192948b1c4c85 100644 --- a/src/mongo/db/commands/snapshot_management.cpp +++ b/src/mongo/db/commands/snapshot_management.cpp @@ -27,18 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/base/init.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/timestamp.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/snapshot_manager.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/vector_clock.h" +#include "mongo/util/assert_util.h" namespace mongo { class CmdMakeSnapshot final : public BasicCommand { diff --git a/src/mongo/db/commands/start_session_command.cpp b/src/mongo/db/commands/start_session_command.cpp index de254ea12377a..8c36e8cafadd3 100644 --- a/src/mongo/db/commands/start_session_command.cpp +++ b/src/mongo/db/commands/start_session_command.cpp @@ -27,22 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/base/init.h" -#include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/authorization_manager.h" -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/client.h" +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_cache.h" -#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/logical_session_id_helpers.h" -#include "mongo/db/stats/top.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" namespace mongo { namespace { diff --git a/src/mongo/db/commands/tenant_migration_donor_cmds.cpp b/src/mongo/db/commands/tenant_migration_donor_cmds.cpp index b0e952a9a6391..682014d4abf58 100644 --- a/src/mongo/db/commands/tenant_migration_donor_cmds.cpp +++ b/src/mongo/db/commands/tenant_migration_donor_cmds.cpp @@ -27,16 +27,56 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" #include "mongo/db/commands/tenant_migration_donor_cmds_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_donor_service.h" +#include "mongo/db/repl/tenant_migration_pem_payload_gen.h" +#include "mongo/db/repl/tenant_migration_state_machine_gen.h" +#include "mongo/db/repl/tenant_migration_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/serverless/serverless_types_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -72,6 +112,10 @@ class DonorStartMigrationCmd : public TypedCommand { "donorStartMigration not available while upgrading or downgrading the donor FCV", !serverGlobalParams.featureCompatibility.isUpgradingOrDowngrading()); + uassert(ErrorCodes::IllegalOperation, + "tenant migrations are only available if --serverless is enabled", + repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()); + const auto& cmd = request(); const auto migrationProtocol = cmd.getProtocol().value_or(kDefaultMigrationProtocol); const auto& tenantId = cmd.getTenantId(); @@ -142,8 +186,9 @@ class DonorStartMigrationCmd : public TypedCommand { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::runTenantMigration)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::runTenantMigration)); } bool supportsWriteConcern() const override { @@ -182,6 +227,10 @@ class DonorForgetMigrationCmd : public TypedCommand { "tenant migrations are not available on config servers", !serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); + uassert(ErrorCodes::IllegalOperation, + "tenant migrations are only available if --serverless is enabled", + repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()); + const auto& cmd = request(); opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); @@ -217,8 +266,9 @@ class DonorForgetMigrationCmd : public TypedCommand { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::runTenantMigration)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::runTenantMigration)); } bool supportsWriteConcern() const override { @@ -256,6 +306,10 @@ class DonorAbortMigrationCmd : public TypedCommand { "tenant migrations are not available on config servers", !serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); + uassert(ErrorCodes::IllegalOperation, + "tenant migrations are only available if --serverless is enabled", + repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()); + const RequestType& cmd = request(); auto donorService = @@ -301,8 +355,9 @@ class DonorAbortMigrationCmd : public TypedCommand { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::runTenantMigration)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::runTenantMigration)); } bool supportsWriteConcern() const override { diff --git a/src/mongo/db/commands/tenant_migration_recipient_cmds.cpp b/src/mongo/db/commands/tenant_migration_recipient_cmds.cpp index bb6b1a0a2f89f..3ddefa144e22a 100644 --- a/src/mongo/db/commands/tenant_migration_recipient_cmds.cpp +++ b/src/mongo/db/commands/tenant_migration_recipient_cmds.cpp @@ -27,16 +27,50 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/tenant_migration_donor_cmds_gen.h" #include "mongo/db/commands/tenant_migration_recipient_cmds_gen.h" -#include "mongo/db/feature_compatibility_version_parser.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/shard_merge_recipient_service.h" +#include "mongo/db/repl/tenant_migration_pem_payload_gen.h" #include "mongo/db/repl/tenant_migration_recipient_service.h" +#include "mongo/db/repl/tenant_migration_state_machine_gen.h" +#include "mongo/db/repl/tenant_migration_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/serverless/serverless_types_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -66,6 +100,10 @@ class RecipientSyncDataCmd : public TypedCommand { "tenant migrations are not available on config servers", !serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); + uassert(ErrorCodes::IllegalOperation, + "tenant migrations are only available if --serverless is enabled", + repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()); + // (Generic FCV reference): This FCV reference should exist across LTS binary versions. uassert( 5356101, @@ -173,8 +211,9 @@ class RecipientSyncDataCmd : public TypedCommand { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::runTenantMigration)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::runTenantMigration)); } bool supportsWriteConcern() const override { @@ -228,6 +267,10 @@ class RecipientVoteImportedFilesCommand final uassertStatusOK( repl::ReplicationCoordinator::get(opCtx)->checkReplEnabledForCommand(&result)); + uassert(ErrorCodes::IllegalOperation, + "tenant migrations are only available if --serverless is enabled", + repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()); + const auto& cmd = request(); LOGV2(6112805, "Received RecipientVoteImportedFiles request", @@ -259,8 +302,9 @@ class RecipientVoteImportedFilesCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; } recipientVoteImportedFilesCommand; @@ -289,6 +333,10 @@ class RecipientForgetMigrationCmd : public TypedCommandgetSettings().isServerless()); + const auto& cmd = request(); const auto migrationProtocol = cmd.getProtocol().value_or(kDefaultMigrationProtocol); const auto& tenantId = cmd.getTenantId(); @@ -395,8 +443,9 @@ class RecipientForgetMigrationCmd : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::runTenantMigration)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::runTenantMigration)); } bool supportsWriteConcern() const override { diff --git a/src/mongo/db/commands/test_api_version_2_commands.cpp b/src/mongo/db/commands/test_api_version_2_commands.cpp index 713b5aacebdbc..7b07cf8e7f9d1 100644 --- a/src/mongo/db/commands/test_api_version_2_commands.cpp +++ b/src/mongo/db/commands/test_api_version_2_commands.cpp @@ -27,8 +27,19 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/api_parameters.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/commands/test_commands.cpp b/src/mongo/db/commands/test_commands.cpp index b28dae581e004..1a8079a59dae1 100644 --- a/src/mongo/db/commands/test_commands.cpp +++ b/src/mongo/db/commands/test_commands.cpp @@ -29,24 +29,55 @@ #include "mongo/db/commands/test_commands.h" +#include +#include +#include +#include +#include +#include #include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/capped_collection_maintenance.h" #include "mongo/db/catalog/capped_utils.h" -#include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/catalog/collection_yield_restore.h" -#include "mongo/db/client.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" +#include "mongo/db/dbhelpers.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/ops/insert.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/record_id.h" #include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -102,19 +133,21 @@ class GodInsert : public BasicCommand { OldClientContext ctx(opCtx, nss); Database* db = ctx.db(); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + WriteUnitOfWork wunit(opCtx); UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); - CollectionPtr collection( - CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss)); - if (!collection) { - collection = CollectionPtr(db->createCollection(opCtx, nss)); - uassert(ErrorCodes::CannotCreateCollection, "could not create collection", collection); + if (!collection.exists()) { + ScopedLocalCatalogWriteFence scopedLocalCatalogWriteFence(opCtx, &collection); + db->createCollection(opCtx, nss); } - collection.makeYieldable(opCtx, LockedCollectionYieldRestore(opCtx, collection)); + uassert( + ErrorCodes::CannotCreateCollection, "could not create collection", collection.exists()); - OpDebug* const nullOpDebug = nullptr; - Status status = collection_internal::insertDocument( - opCtx, collection, InsertStatement(obj), nullOpDebug, false); + Status status = Helpers::insert(opCtx, collection, obj); if (status.isOK()) { wunit.commit(); } @@ -175,9 +208,9 @@ class CapTrunc : public BasicCommand { RecordId end; { - // Scan backwards through the collection to find the document to start truncating from. - // We will remove 'n' documents, so start truncating from the (n + 1)th document to the - // end. + // Scan backwards through the collection to find the document to start truncating + // from. We will remove 'n' documents, so start truncating from the (n + 1)th + // document to the end. auto exec = InternalPlanner::collectionScan(opCtx, &collection.getCollection(), PlanYieldPolicy::YieldPolicy::NO_YIELD, @@ -287,22 +320,23 @@ class DurableHistoryReplicatedTestCmd : public BasicCommand { wuow.commit(); } - AutoGetCollection autoColl(opCtx, kDurableHistoryTestNss, MODE_IX); + const auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, kDurableHistoryTestNss, AcquisitionPrerequisites::kWrite), + MODE_IX); WriteUnitOfWork wuow(opCtx); - // Note, this write will replicate to secondaries, but a secondary will not in-turn pin the - // oldest timestamp. The write otherwise must be timestamped in a storage engine table with - // logging disabled. This is to test that rolling back the written document also results in - // the pin being lifted. + // Note, this write will replicate to secondaries, but a secondary will not in-turn pin + // the oldest timestamp. The write otherwise must be timestamped in a storage engine + // table with logging disabled. This is to test that rolling back the written document + // also results in the pin being lifted. Timestamp pinTs = uassertStatusOK(opCtx->getServiceContext()->getStorageEngine()->pinOldestTimestamp( opCtx, kTestingDurableHistoryPinName, requestedPinTs, round)); - uassertStatusOK(collection_internal::insertDocument( - opCtx, - *autoColl, - InsertStatement(fixDocumentForInsert(opCtx, BSON("pinTs" << pinTs)).getValue()), - nullptr)); + uassertStatusOK(Helpers::insert( + opCtx, collection, fixDocumentForInsert(opCtx, BSON("pinTs" << pinTs)).getValue())); wuow.commit(); result.append("requestedPinTs", requestedPinTs); diff --git a/src/mongo/db/commands/test_commands.h b/src/mongo/db/commands/test_commands.h index 4903cc4a8ccbf..052eb5633e9b5 100644 --- a/src/mongo/db/commands/test_commands.h +++ b/src/mongo/db/commands/test_commands.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include "mongo/bson/timestamp.h" #include "mongo/db/operation_context.h" diff --git a/src/mongo/db/commands/test_commands_enabled.cpp b/src/mongo/db/commands/test_commands_enabled.cpp index 670ef0567e35d..e3de65301918a 100644 --- a/src/mongo/db/commands/test_commands_enabled.cpp +++ b/src/mongo/db/commands/test_commands_enabled.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/commands/test_commands_enabled_gen.h" diff --git a/src/mongo/db/commands/test_deprecation_command.cpp b/src/mongo/db/commands/test_deprecation_command.cpp index bdf5c0d0a4e64..4a6c3008fa152 100644 --- a/src/mongo/db/commands/test_deprecation_command.cpp +++ b/src/mongo/db/commands/test_deprecation_command.cpp @@ -27,8 +27,19 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/api_parameters.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/commands/top_command.cpp b/src/mongo/db/commands/top_command.cpp index 0eadc4fdecfb6..9846254ffcbd1 100644 --- a/src/mongo/db/commands/top_command.cpp +++ b/src/mongo/db/commands/top_command.cpp @@ -27,18 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/base/init.h" -#include "mongo/db/auth/action_set.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/stats/top.h" +#include "mongo/stdx/type_traits.h" namespace { @@ -65,11 +72,11 @@ class TopCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::top)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::top)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/db/commands/traffic_recording_cmds.cpp b/src/mongo/db/commands/traffic_recording_cmds.cpp index b82157eb08232..3fcdf9d63a707 100644 --- a/src/mongo/db/commands/traffic_recording_cmds.cpp +++ b/src/mongo/db/commands/traffic_recording_cmds.cpp @@ -28,16 +28,24 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/auth/action_set.h" +#include "mongo/base/error_codes.h" #include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/traffic_recorder.h" #include "mongo/db/traffic_recorder_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -70,8 +78,9 @@ class StartRecordingCommand final : public TypedCommand { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), - ActionType::trafficRecord})); + ->isAuthorizedForPrivilege(Privilege{ + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::trafficRecord})); } NamespaceString ns() const override { @@ -105,8 +114,9 @@ class StopRecordingCommand final : public TypedCommand { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), - ActionType::trafficRecord})); + ->isAuthorizedForPrivilege(Privilege{ + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::trafficRecord})); } NamespaceString ns() const override { diff --git a/src/mongo/db/commands/txn_cmds.cpp b/src/mongo/db/commands/txn_cmds.cpp index b846e0bd1fe4f..d26e95e01d5ac 100644 --- a/src/mongo/db/commands/txn_cmds.cpp +++ b/src/mongo/db/commands/txn_cmds.cpp @@ -28,22 +28,41 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" #include "mongo/db/commands/txn_cmds_gen.h" #include "mongo/db/curop_failpoint_helpers.h" -#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/transaction_validation.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction diff --git a/src/mongo/db/commands/txn_two_phase_commit_cmds.idl b/src/mongo/db/commands/txn_two_phase_commit_cmds.idl index 69ed3648c49cb..f6039f5b60a5c 100644 --- a/src/mongo/db/commands/txn_two_phase_commit_cmds.idl +++ b/src/mongo/db/commands/txn_two_phase_commit_cmds.idl @@ -40,6 +40,19 @@ structs: description: "The id of the shard" type: shard_id + PrepareReply: + description: "Reply to prepareTransactionCommand" + is_command_reply: true + fields: + prepareTimestamp: + description: "Timestamp of prepare transaction operation. Optional for backward compatibility only." + type: timestamp + optional: true + affectedNamespaces: + description: "List of namespaces, affected by the transaction. Optional for backward compatibility only." + type: array + optional: true + commands: prepareTransaction: description: "Parser for the 'prepareTransaction' command." @@ -47,6 +60,7 @@ commands: strict: true namespace: ignored api_version: "" + reply_type: PrepareReply coordinateCommitTransaction: description: "Parser for the 'coordinateCommitTransaction' command." diff --git a/src/mongo/db/commands/update_metrics.cpp b/src/mongo/db/commands/update_metrics.cpp index a730e812c282b..c0ee251f7217a 100644 --- a/src/mongo/db/commands/update_metrics.cpp +++ b/src/mongo/db/commands/update_metrics.cpp @@ -29,6 +29,14 @@ #include "mongo/db/commands/update_metrics.h" +#include + +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/ops/write_ops_parsers.h" + namespace mongo { UpdateMetrics::UpdateMetrics(StringData commandName) : _commandsWithAggregationPipeline("commands." + commandName + ".pipeline"), diff --git a/src/mongo/db/commands/update_metrics.h b/src/mongo/db/commands/update_metrics.h index d485d723e81e8..0d50da464f426 100644 --- a/src/mongo/db/commands/update_metrics.h +++ b/src/mongo/db/commands/update_metrics.h @@ -33,6 +33,7 @@ #include "mongo/bson/bsonobj.h" #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" namespace mongo { /** diff --git a/src/mongo/db/commands/user_management_commands.cpp b/src/mongo/db/commands/user_management_commands.cpp index a3c9fdb8bb397..c6de8a78a0853 100644 --- a/src/mongo/db/commands/user_management_commands.cpp +++ b/src/mongo/db/commands/user_management_commands.cpp @@ -27,63 +27,115 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - +#include +#include #include +#include +#include +#include #include +#include +#include #include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/bson/mutable/algorithm.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/document.h" #include "mongo/bson/mutable/element.h" +#include "mongo/bson/oid.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/config.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/crypto/mechanism_scram.h" +#include "mongo/crypto/sha1_block.h" +#include "mongo/crypto/sha256_block.h" #include "mongo/db/audit.h" #include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/address_restriction.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/auth_options_gen.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege_parser.h" +#include "mongo/db/auth/builtin_roles.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/privilege_format.h" #include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/sasl_options.h" +#include "mongo/db/auth/umc_info_command_arg.h" #include "mongo/db/auth/user.h" #include "mongo/db/auth/user_document_parser.h" #include "mongo/db/auth/user_management_commands_parser.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" #include "mongo/db/commands/run_aggregate.h" -#include "mongo/db/commands/test_commands.h" +#include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/commands/user_management_commands_common.h" #include "mongo/db/commands/user_management_commands_gen.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/multitenancy.h" -#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/dbmessage.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/rpc/factory.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/op_msg_rpc_impls.h" +#include "mongo/rpc/reply_interface.h" #include "mongo/s/write_ops/batched_command_response.h" #include "mongo/stdx/unordered_set.h" #include "mongo/transport/service_entry_point.h" +#include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" #include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" #include "mongo/util/icu.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/net/ssl_manager.h" +#include "mongo/util/net/ssl_options.h" +#include "mongo/util/net/ssl_types.h" #include "mongo/util/password_digest.h" +#include "mongo/util/read_through_cache.h" #include "mongo/util/sequence_util.h" #include "mongo/util/str.h" #include "mongo/util/time_support.h" @@ -91,7 +143,6 @@ #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl - namespace mongo { namespace { @@ -724,26 +775,32 @@ class UMCTransaction { UMCTransaction(OperationContext* opCtx, StringData forCommand, - const boost::optional& tenant) { - // Don't transactionalize on standalone. - _isReplSet = repl::ReplicationCoordinator::get(opCtx)->getReplicationMode() == - repl::ReplicationCoordinator::modeReplSet; + const boost::optional& tenant) + : // Don't transactionalize on standalone. + _isReplSet{repl::ReplicationCoordinator::get(opCtx)->getReplicationMode() == + repl::ReplicationCoordinator::modeReplSet}, + // Subclient used by transaction operations. + _client{opCtx->getServiceContext()->makeClient(forCommand.toString())}, + _dbName{DatabaseNameUtil::deserialize(tenant, kAdminDB)}, + _sessionInfo{LogicalSessionFromClient(UUID::gen())} { + _sessionInfo.setTxnNumber(0); + _sessionInfo.setStartTransaction(true); + _sessionInfo.setAutocommit(false); + + // TODO(SERVER-74660): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*_client.get()); + _client.get()->setSystemOperationUnkillableByStepdown(lk); + } - // Subclient used by transaction operations. - _client = opCtx->getServiceContext()->makeClient(forCommand.toString()); auto as = AuthorizationSession::get(_client.get()); if (as) { as->grantInternalAuthorization(_client.get()); } - _dbName = DatabaseNameUtil::deserialize(tenant, kAdminDB); - AlternativeClientRegion clientRegion(_client); - _sessionInfo.setStartTransaction(true); - _sessionInfo.setTxnNumber(0); - _sessionInfo.setSessionId(LogicalSessionFromClient(UUID::gen())); - _sessionInfo.setAutocommit(false); } + ~UMCTransaction() { if (_state == TransactionState::kStarted) { abort().ignore(); @@ -880,6 +937,34 @@ class UMCTransaction { TransactionState _state = TransactionState::kInit; }; +void uassertNoUnrecognizedActions(const std::vector& unrecognizedActions) { + if (unrecognizedActions.empty()) { + return; + } + + // Dedupe + std::set actions; + for (const auto& action : unrecognizedActions) { + actions.insert(StringData{action}); + } + + StringBuilder sb; + sb << "Unknown action type"; + if (actions.size() > 1) { + sb << 's'; + } + sb << " in privilege set:"; + for (const auto& action : actions) { + sb << " '" << action << "',"; + } + + // Trim last comma off. + auto msg = sb.str(); + msg.pop_back(); + + uasserted(ErrorCodes::BadValue, msg); +} + enum class SupportTenantOption { kNever, kTestOnly, @@ -986,6 +1071,12 @@ class CmdUMCTyped : public TypedCommand> { MONGO_UNREACHABLE; return false; } + + // Since the user management commands do not affect user data, we should allow these commands + // even if the user does not have the direct shard operations action type. + bool shouldSkipDirectConnectionChecks() const final { + return true; + } }; @@ -1013,7 +1104,7 @@ void CmdUMCTyped::Invocation::typedRun(OperationContext* opCt cmd.getCommandParameter().find('\0') == std::string::npos); UserName userName(cmd.getCommandParameter(), dbname); - const bool isExternal = dbname.db() == NamespaceString::kExternalDb; + const bool isExternal = dbname.isExternalDB(); uassert(ErrorCodes::BadValue, "Must provide a 'pwd' field for all user documents, except those" " with '$external' as the user's source db", @@ -1247,10 +1338,12 @@ DropAllUsersFromDatabaseReply CmdUMCTyped::Invo auto* authzManager = AuthorizationManager::get(serviceContext); auto lk = uassertStatusOK(requireWritableAuthSchema28SCRAM(opCtx, authzManager)); - audit::logDropAllUsersFromDatabase(client, dbname.db()); + audit::logDropAllUsersFromDatabase(client, dbname); auto swNumRemoved = removePrivilegeDocuments( - opCtx, BSON(AuthorizationManager::USER_DB_FIELD_NAME << dbname.db()), dbname.tenantId()); + opCtx, + BSON(AuthorizationManager::USER_DB_FIELD_NAME << dbname.serializeWithoutTenantPrefix()), + dbname.tenantId()); // Must invalidate even on bad status - what if the write succeeded but the GLE failed? authzManager->invalidateUsersFromDB(opCtx, dbname); @@ -1415,8 +1508,8 @@ UsersInfoReply CmdUMCTyped::Invocation::typedRu if (arg.isAllForAllDBs()) { // Leave the pipeline unconstrained, we want to return every user. } else if (arg.isAllOnCurrentDB()) { - pipeline.push_back( - BSON("$match" << BSON(AuthorizationManager::USER_DB_FIELD_NAME << dbname.db()))); + pipeline.push_back(BSON("$match" << BSON(AuthorizationManager::USER_DB_FIELD_NAME + << dbname.serializeWithoutTenantPrefix()))); } else { invariant(arg.isExact()); BSONArrayBuilder usersMatchArray; @@ -1505,7 +1598,7 @@ void CmdUMCTyped::Invocation::typedRun(OperationContext* opCt uassert(ErrorCodes::BadValue, "Cannot create roles in the $external database", - dbname.db() != NamespaceString::kExternalDb); + !dbname.isExternalDB()); uassert(ErrorCodes::BadValue, "Cannot create roles with the same name as a built-in role", @@ -1516,9 +1609,13 @@ void CmdUMCTyped::Invocation::typedRun(OperationContext* opCt roleObjBuilder.append(AuthorizationManager::ROLE_NAME_FIELD_NAME, roleName.getRole()); roleObjBuilder.append(AuthorizationManager::ROLE_DB_FIELD_NAME, roleName.getDB()); - BSONArray privileges; - uassertStatusOK(privilegeVectorToBSONArray(cmd.getPrivileges(), &privileges)); - roleObjBuilder.append("privileges", privileges); + std::vector unrecognizedActions; + PrivilegeVector privileges = Privilege::privilegeVectorFromParsedPrivilegeVector( + dbname.tenantId(), cmd.getPrivileges(), &unrecognizedActions); + uassertNoUnrecognizedActions(unrecognizedActions); + BSONArray privBSON; + uassertStatusOK(privilegeVectorToBSONArray(privileges, &privBSON)); + roleObjBuilder.append("privileges", privBSON); auto resolvedRoleNames = auth::resolveRoleNames(cmd.getRoles(), dbname); roleObjBuilder.append("roles", containerToBSONArray(resolvedRoleNames)); @@ -1536,10 +1633,9 @@ void CmdUMCTyped::Invocation::typedRun(OperationContext* opCt // Role existence has to be checked after acquiring the update lock uassertStatusOK(checkOkayToGrantRolesToRole(opCtx, roleName, resolvedRoleNames, authzManager)); - uassertStatusOK(checkOkayToGrantPrivilegesToRole(roleName, cmd.getPrivileges())); + uassertStatusOK(checkOkayToGrantPrivilegesToRole(roleName, privileges)); - audit::logCreateRole( - client, roleName, resolvedRoleNames, cmd.getPrivileges(), bsonAuthRestrictions); + audit::logCreateRole(client, roleName, resolvedRoleNames, privileges, bsonAuthRestrictions); uassertStatusOK(insertRoleDocument(opCtx, roleObjBuilder.done(), roleName.getTenant())); } @@ -1561,10 +1657,15 @@ void CmdUMCTyped::Invocation::typedRun(OperationContext* opCt BSONObjBuilder updateSetBuilder; BSONObjBuilder updateUnsetBuilder; + PrivilegeVector privileges; if (auto privs = cmd.getPrivileges()) { - BSONArray privileges; - uassertStatusOK(privilegeVectorToBSONArray(privs.get(), &privileges)); - updateSetBuilder.append("privileges", privileges); + std::vector unrecognizedActions; + privileges = Privilege::privilegeVectorFromParsedPrivilegeVector( + dbname.tenantId(), privs.get(), &unrecognizedActions); + uassertNoUnrecognizedActions(unrecognizedActions); + BSONArray privBSON; + uassertStatusOK(privilegeVectorToBSONArray(privileges, &privBSON)); + updateSetBuilder.append("privileges", privBSON); } boost::optional> optRoles; @@ -1595,13 +1696,15 @@ void CmdUMCTyped::Invocation::typedRun(OperationContext* opCt uassertStatusOK(checkOkayToGrantRolesToRole(opCtx, roleName, *optRoles, authzManager)); } - auto privs = cmd.getPrivileges(); - if (privs) { - uassertStatusOK(checkOkayToGrantPrivilegesToRole(roleName, privs.get())); + if (!privileges.empty()) { + uassertStatusOK(checkOkayToGrantPrivilegesToRole(roleName, privileges)); } - audit::logUpdateRole( - client, roleName, optRoles ? &*optRoles : nullptr, privs ? &*privs : nullptr, authRest); + audit::logUpdateRole(client, + roleName, + optRoles ? &*optRoles : nullptr, + hasPrivs ? &privileges : nullptr, + authRest); const auto updateSet = updateSetBuilder.obj(); const auto updateUnset = updateUnsetBuilder.obj(); @@ -1639,13 +1742,17 @@ void CmdUMCTyped::Invocation::typedRun(OperationCo auto* authzManager = AuthorizationManager::get(serviceContext); auto lk = uassertStatusOK(requireWritableAuthSchema28SCRAM(opCtx, authzManager)); - uassertStatusOK(checkOkayToGrantPrivilegesToRole(roleName, cmd.getPrivileges())); + std::vector unrecognizedActions; + PrivilegeVector newPrivileges = Privilege::privilegeVectorFromParsedPrivilegeVector( + dbname.tenantId(), cmd.getPrivileges(), &unrecognizedActions); + uassertNoUnrecognizedActions(unrecognizedActions); + uassertStatusOK(checkOkayToGrantPrivilegesToRole(roleName, newPrivileges)); // Add additional privileges to existing set. auto data = uassertStatusOK(authzManager->resolveRoles( opCtx, {roleName}, AuthorizationManager::ResolveRoleOption::kDirectPrivileges)); auto privileges = std::move(data.privileges.get()); - for (const auto& priv : cmd.getPrivileges()) { + for (const auto& priv : newPrivileges) { Privilege::addPrivilegeToPrivilegeVector(&privileges, priv); } @@ -1660,7 +1767,7 @@ void CmdUMCTyped::Invocation::typedRun(OperationCo BSONObjBuilder updateBSONBuilder; updateObj.writeTo(&updateBSONBuilder); - audit::logGrantPrivilegesToRole(client, roleName, cmd.getPrivileges()); + audit::logGrantPrivilegesToRole(client, roleName, newPrivileges); auto status = updateRoleDocument(opCtx, roleName, updateBSONBuilder.done()); // Must invalidate even on bad status - what if the write succeeded but the GLE failed? @@ -1688,10 +1795,14 @@ void CmdUMCTyped::Invocation::typedRun(Operatio auto* authzManager = AuthorizationManager::get(serviceContext); auto lk = uassertStatusOK(requireWritableAuthSchema28SCRAM(opCtx, authzManager)); + std::vector unrecognizedActions; + PrivilegeVector rmPrivs = Privilege::privilegeVectorFromParsedPrivilegeVector( + dbname.tenantId(), cmd.getPrivileges(), &unrecognizedActions); + uassertNoUnrecognizedActions(unrecognizedActions); auto data = uassertStatusOK(authzManager->resolveRoles( opCtx, {roleName}, AuthorizationManager::ResolveRoleOption::kDirectPrivileges)); auto privileges = std::move(data.privileges.get()); - for (const auto& rmPriv : cmd.getPrivileges()) { + for (const auto& rmPriv : rmPrivs) { for (auto it = privileges.begin(); it != privileges.end(); ++it) { if (it->getResourcePattern() == rmPriv.getResourcePattern()) { it->removeActions(rmPriv.getActions()); @@ -1711,7 +1822,7 @@ void CmdUMCTyped::Invocation::typedRun(Operatio uassertStatusOK(setElement.pushBack(privilegesElement)); uassertStatusOK(Privilege::getBSONForPrivileges(privileges, privilegesElement)); - audit::logRevokePrivilegesFromRole(client, roleName, cmd.getPrivileges()); + audit::logRevokePrivilegesFromRole(client, roleName, rmPrivs); BSONObjBuilder updateBSONBuilder; updateObj.writeTo(&updateBSONBuilder); @@ -1958,7 +2069,8 @@ DropAllRolesFromDatabaseReply CmdUMCTyped::Invo DropAllRolesFromDatabaseReply reply; const auto dropRoleOps = [&](UMCTransaction& txn) -> Status { - auto roleMatch = BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname.db()); + auto roleMatch = + BSON(AuthorizationManager::ROLE_DB_FIELD_NAME << dbname.serializeWithoutTenantPrefix()); auto rolesMatch = BSON("roles" << roleMatch); // Remove these roles from all users @@ -1973,7 +2085,7 @@ DropAllRolesFromDatabaseReply CmdUMCTyped::Invo // Remove these roles from all other roles swCount = txn.update(rolesNSS(dbname.tenantId()), - BSON("roles.db" << dbname.db()), + BSON("roles.db" << dbname.serializeWithoutTenantPrefix()), BSON("$pull" << rolesMatch)); if (!swCount.isOK()) { return useDefaultCode(swCount.getStatus(), ErrorCodes::RoleModificationFailed) @@ -1998,7 +2110,7 @@ DropAllRolesFromDatabaseReply CmdUMCTyped::Invo auto status = retryTransactionOps( opCtx, dbname.tenantId(), DropAllRolesFromDatabaseCommand::kCommandName, dropRoleOps, [&] { - audit::logDropAllRolesFromDatabase(opCtx->getClient(), dbname.db()); + audit::logDropAllRolesFromDatabase(opCtx->getClient(), dbname); }); if (!status.isOK()) { uassertStatusOK( diff --git a/src/mongo/db/commands/user_management_commands.idl b/src/mongo/db/commands/user_management_commands.idl index 06fd9ef3af7d8..a1df8c0e77715 100644 --- a/src/mongo/db/commands/user_management_commands.idl +++ b/src/mongo/db/commands/user_management_commands.idl @@ -32,6 +32,7 @@ imports: - "mongo/db/basic_types.idl" - "mongo/db/auth/auth_types.idl" - "mongo/db/auth/address_restriction.idl" + - "mongo/db/auth/parsed_privilege.idl" - "mongo/db/auth/user_management_commands_parser.idl" - "mongo/db/multitenancy.idl" @@ -227,7 +228,7 @@ commands: fields: privileges: description: "Actions explicitly granted to this role" - type: array + type: array roles: description: "Roles to inherit additional privileges from" type: array @@ -247,7 +248,7 @@ commands: fields: privileges: description: "Actions explicitly granted to this role" - type: array + type: array optional: true roles: description: "Roles to inherit additional privileges from" @@ -269,7 +270,7 @@ commands: fields: privileges: description: "Privileges to grant to this role" - type: array + type: array revokePrivilegesFromRole: description: "Grants privileges to a role" @@ -282,7 +283,7 @@ commands: fields: privileges: description: "Privileges to revoke from this role" - type: array + type: array grantRolesToRole: description: "Grant roles to a role" diff --git a/src/mongo/db/commands/user_management_commands_common.cpp b/src/mongo/db/commands/user_management_commands_common.cpp index aad109f76eb79..6ef902faf9c16 100644 --- a/src/mongo/db/commands/user_management_commands_common.cpp +++ b/src/mongo/db/commands/user_management_commands_common.cpp @@ -27,29 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/commands/user_management_commands_common.h" - -#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/bson/mutable/algorithm.h" -#include "mongo/config.h" +#include "mongo/base/string_data.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/parsed_privilege_gen.h" #include "mongo/db/auth/resource_pattern.h" -#include "mongo/db/auth/security_token_gen.h" -#include "mongo/db/auth/user.h" -#include "mongo/db/auth/user_management_commands_parser.h" -#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/auth/umc_info_command_arg.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/commands/user_management_commands_common.h" #include "mongo/db/commands/user_management_commands_gen.h" -#include "mongo/db/jsobj.h" #include "mongo/db/multitenancy.h" -#include "mongo/db/multitenancy_gen.h" -#include "mongo/util/sequence_util.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" #include "mongo/util/str.h" namespace mongo { @@ -59,16 +67,18 @@ namespace { Status checkAuthorizedToGrantPrivilege(AuthorizationSession* authzSession, const Privilege& privilege) { const ResourcePattern& resource = privilege.getResourcePattern(); + const auto& targetDb = resource.dbNameToMatch(); if (resource.isDatabasePattern() || resource.isExactNamespacePattern()) { if (!authzSession->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(resource.databaseToMatch()), - ActionType::grantRole)) { + ResourcePattern::forDatabaseName(targetDb), ActionType::grantRole)) { return Status(ErrorCodes::Unauthorized, str::stream() << "Not authorized to grant privileges on the " - << resource.databaseToMatch() << "database"); + << targetDb.db() << "database"); } } else if (!authzSession->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName("admin"), ActionType::grantRole)) { + ResourcePattern::forDatabaseName( + DatabaseNameUtil::deserialize(targetDb.tenantId(), "admin"_sd)), + ActionType::grantRole)) { return Status(ErrorCodes::Unauthorized, "To grant privileges affecting multiple databases or the cluster," " must be authorized to grant roles from the admin database"); @@ -92,7 +102,8 @@ Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession, const std::vector& roles) { for (size_t i = 0; i < roles.size(); ++i) { if (!authzSession->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(roles[i].getDB()), ActionType::grantRole)) { + ResourcePattern::forDatabaseName(roles[i].getDatabaseName()), + ActionType::grantRole)) { return Status(ErrorCodes::Unauthorized, str::stream() << "Not authorized to grant role: " << roles[i]); } @@ -102,9 +113,11 @@ Status checkAuthorizedToGrantRoles(AuthorizationSession* authzSession, } Status checkAuthorizedToGrantPrivileges(AuthorizationSession* authzSession, - const PrivilegeVector& privileges) { - for (PrivilegeVector::const_iterator it = privileges.begin(); it != privileges.end(); ++it) { - Status status = checkAuthorizedToGrantPrivilege(authzSession, *it); + const boost::optional& tenantId, + const std::vector& privileges) { + for (const auto& pp : privileges) { + auto privilege = Privilege::resolvePrivilegeWithTenant(tenantId, pp); + auto status = checkAuthorizedToGrantPrivilege(authzSession, privilege); if (!status.isOK()) { return status; } @@ -117,7 +130,8 @@ Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession, const std::vector& roles) { for (size_t i = 0; i < roles.size(); ++i) { if (!authzSession->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(roles[i].getDB()), ActionType::revokeRole)) { + ResourcePattern::forDatabaseName(roles[i].getDatabaseName()), + ActionType::revokeRole)) { return Status(ErrorCodes::Unauthorized, str::stream() << "Not authorized to revoke role: " << roles[i]); } @@ -129,16 +143,18 @@ Status checkAuthorizedToRevokeRoles(AuthorizationSession* authzSession, Status checkAuthorizedToRevokePrivilege(AuthorizationSession* authzSession, const Privilege& privilege) { const ResourcePattern& resource = privilege.getResourcePattern(); + const auto& targetDb = resource.dbNameToMatch(); if (resource.isDatabasePattern() || resource.isExactNamespacePattern()) { if (!authzSession->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(resource.databaseToMatch()), - ActionType::revokeRole)) { + ResourcePattern::forDatabaseName(targetDb), ActionType::revokeRole)) { return Status(ErrorCodes::Unauthorized, str::stream() << "Not authorized to revoke privileges on the " - << resource.databaseToMatch() << "database"); + << targetDb.db() << "database"); } } else if (!authzSession->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName("admin"), ActionType::revokeRole)) { + ResourcePattern::forDatabaseName( + DatabaseNameUtil::deserialize(targetDb.tenantId(), "admin"_sd)), + ActionType::revokeRole)) { return Status(ErrorCodes::Unauthorized, "To revoke privileges affecting multiple databases or the cluster," " must be authorized to revoke roles from the admin database"); @@ -147,9 +163,11 @@ Status checkAuthorizedToRevokePrivilege(AuthorizationSession* authzSession, } Status checkAuthorizedToRevokePrivileges(AuthorizationSession* authzSession, - const PrivilegeVector& privileges) { - for (PrivilegeVector::const_iterator it = privileges.begin(); it != privileges.end(); ++it) { - Status status = checkAuthorizedToRevokePrivilege(authzSession, *it); + const boost::optional& tenantId, + const std::vector& privileges) { + for (const auto& pp : privileges) { + auto privilege = Privilege::resolvePrivilegeWithTenant(tenantId, pp); + auto status = checkAuthorizedToRevokePrivilege(authzSession, privilege); if (!status.isOK()) { return status; } @@ -163,7 +181,7 @@ Status checkAuthorizedToSetRestrictions(AuthorizationSession* authzSession, const DatabaseName& dbname) { if (hasAuthRestriction) { if (!authzSession->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(dbname.toStringWithTenantId()), + ResourcePattern::forDatabaseName(dbname), ActionType::setAuthenticationRestriction)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } @@ -189,9 +207,8 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const CreateUserCommand& uassert(ErrorCodes::Unauthorized, str::stream() << "Not authorized to create users on db: " << dbname.toStringForErrorMsg(), - as->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(dbname.toStringWithTenantId()), - ActionType::createUser)); + as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname), + ActionType::createUser)); auto resolvedRoles = resolveRoleNames(request.getRoles(), dbname); uassertStatusOK(checkAuthorizedToGrantRoles(as, resolvedRoles)); @@ -209,26 +226,25 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const UpdateUserCommand& str::stream() << "Not authorized to change password of user: " << userName, (request.getPwd() == boost::none) || isAuthorizedToChangeOwnPasswordAsUser(as, userName) || - as->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(dbname.toStringWithTenantId()), - ActionType::changePassword)); + as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname), + ActionType::changePassword)); uassert(ErrorCodes::Unauthorized, str::stream() << "Not authorized to change customData of user: " << userName, (request.getCustomData() == boost::none) || isAuthorizedToChangeOwnCustomDataAsUser(as, userName) || - as->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(dbname.toStringWithTenantId()), - ActionType::changeCustomData)); + as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname), + ActionType::changeCustomData)); if (auto possibleRoles = request.getRoles()) { // You don't know what roles you might be revoking, so require the ability to // revoke any role in the system. - uassert(ErrorCodes::Unauthorized, - "In order to use updateUser to set roles array, must be " - "authorized to revoke any role in the system", - as->isAuthorizedForActionsOnResource(ResourcePattern::forAnyNormalResource(), - ActionType::revokeRole)); + uassert( + ErrorCodes::Unauthorized, + "In order to use updateUser to set roles array, must be " + "authorized to revoke any role in the system", + as->isAuthorizedForActionsOnResource( + ResourcePattern::forAnyNormalResource(dbname.tenantId()), ActionType::revokeRole)); auto resolvedRoles = resolveRoleNames(possibleRoles.value(), dbname); uassertStatusOK(checkAuthorizedToGrantRoles(as, resolvedRoles)); @@ -255,7 +271,8 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const CreateRoleCommand& as->isAuthorizedToCreateRole(roleName)); uassertStatusOK(checkAuthorizedToGrantRoles(as, resolveRoleNames(request.getRoles(), dbname))); - uassertStatusOK(checkAuthorizedToGrantPrivileges(as, request.getPrivileges())); + uassertStatusOK( + checkAuthorizedToGrantPrivileges(as, dbname.tenantId(), request.getPrivileges())); uassertStatusOK(checkAuthorizedToSetRestrictions( as, request.getAuthenticationRestrictions() != boost::none, dbname)); } @@ -268,15 +285,15 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const UpdateRoleCommand& // to revoke any role (or privilege) in the system. uassert(ErrorCodes::Unauthorized, "updateRole command required the ability to revoke any role in the system", - as->isAuthorizedForActionsOnResource(ResourcePattern::forAnyNormalResource(), - ActionType::revokeRole)); + as->isAuthorizedForActionsOnResource( + ResourcePattern::forAnyNormalResource(dbname.tenantId()), ActionType::revokeRole)); if (auto roles = request.getRoles()) { auto resolvedRoles = resolveRoleNames(roles.value(), dbname); uassertStatusOK(checkAuthorizedToGrantRoles(as, resolvedRoles)); } if (auto privs = request.getPrivileges()) { - uassertStatusOK(checkAuthorizedToGrantPrivileges(as, privs.value())); + uassertStatusOK(checkAuthorizedToGrantPrivileges(as, dbname.tenantId(), privs.value())); } uassertStatusOK(checkAuthorizedToSetRestrictions( as, request.getAuthenticationRestrictions() != boost::none, dbname)); @@ -291,42 +308,42 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const GrantRolesToRoleCom void checkAuthForTypedCommand(OperationContext* opCtx, const GrantPrivilegesToRoleCommand& request) { auto* as = AuthorizationSession::get(opCtx->getClient()); - uassertStatusOK(checkAuthorizedToGrantPrivileges(as, request.getPrivileges())); + uassertStatusOK(checkAuthorizedToGrantPrivileges( + as, request.getDbName().tenantId(), request.getPrivileges())); } void checkAuthForTypedCommand(OperationContext* opCtx, const DropUserCommand& request) { auto* as = AuthorizationSession::get(opCtx->getClient()); UserName userName(request.getCommandParameter(), request.getDbName()); - uassert(ErrorCodes::Unauthorized, - str::stream() << "Not authorized to drop users from the " << userName.getDB() - << " database", - as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(userName.getDB()), - ActionType::dropUser)); + uassert( + ErrorCodes::Unauthorized, + str::stream() << "Not authorized to drop users from the " << userName.getDB() + << " database", + as->isAuthorizedForActionsOnResource( + ResourcePattern::forDatabaseName(userName.getDatabaseName()), ActionType::dropUser)); } void checkAuthForTypedCommand(OperationContext* opCtx, const DropRoleCommand& request) { const auto& dbname = request.getDbName(); auto* as = AuthorizationSession::get(opCtx->getClient()); - uassert( - ErrorCodes::Unauthorized, - str::stream() << "Not authorized to drop roles from the " << dbname.toStringForErrorMsg() - << " database", - as->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(dbname.toStringWithTenantId()), ActionType::dropRole)); + uassert(ErrorCodes::Unauthorized, + str::stream() << "Not authorized to drop roles from the " + << dbname.toStringForErrorMsg() << " database", + as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname), + ActionType::dropRole)); } void checkAuthForTypedCommand(OperationContext* opCtx, const DropAllUsersFromDatabaseCommand& request) { const auto& dbname = request.getDbName(); auto* as = AuthorizationSession::get(opCtx->getClient()); - uassert( - ErrorCodes::Unauthorized, - str::stream() << "Not authorized to drop users from the " << dbname.toStringForErrorMsg() - << " database", - as->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(dbname.toStringWithTenantId()), ActionType::dropUser)); + uassert(ErrorCodes::Unauthorized, + str::stream() << "Not authorized to drop users from the " + << dbname.toStringForErrorMsg() << " database", + as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname), + ActionType::dropUser)); } void checkAuthForTypedCommand(OperationContext* opCtx, const RevokeRolesFromUserCommand& request) { @@ -350,14 +367,13 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const UsersInfoCommand& r uassert(ErrorCodes::Unauthorized, str::stream() << "Not authorized to view users from the " << dbname.toStringForErrorMsg() << " database", - as->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(dbname.toStringWithTenantId()), - ActionType::viewUser)); + as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname), + ActionType::viewUser)); } else if (arg.isAllForAllDBs()) { uassert(ErrorCodes::Unauthorized, str::stream() << "Not authorized to view users from all databases", - as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::viewUser)); + as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbname.tenantId()), ActionType::viewUser)); } else { invariant(arg.isExact()); auto activeTenant = getActiveTenant(opCtx); @@ -368,7 +384,8 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const UsersInfoCommand& r "May not specify tenant in usersInfo query", !activeTenant && as->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::internal)); + ResourcePattern::forClusterResource(dbname.tenantId()), + ActionType::internal)); } if (as->lookupUser(userName)) { @@ -379,7 +396,8 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const UsersInfoCommand& r str::stream() << "Not authorized to view users from the " << dbname.toStringForErrorMsg() << " database", as->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(userName.getDB()), ActionType::viewUser)); + ResourcePattern::forDatabaseName(userName.getDatabaseName()), + ActionType::viewUser)); } } } @@ -387,19 +405,19 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const UsersInfoCommand& r void checkAuthForTypedCommand(OperationContext* opCtx, const RevokePrivilegesFromRoleCommand& request) { auto* as = AuthorizationSession::get(opCtx->getClient()); - uassertStatusOK(checkAuthorizedToRevokePrivileges(as, request.getPrivileges())); + uassertStatusOK(checkAuthorizedToRevokePrivileges( + as, request.getDbName().tenantId(), request.getPrivileges())); } void checkAuthForTypedCommand(OperationContext* opCtx, const DropAllRolesFromDatabaseCommand& request) { const auto& dbname = request.getDbName(); auto* as = AuthorizationSession::get(opCtx->getClient()); - uassert( - ErrorCodes::Unauthorized, - str::stream() << "Not authorized to drop roles from the " << dbname.toStringForErrorMsg() - << " database", - as->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(dbname.toStringWithTenantId()), ActionType::dropRole)); + uassert(ErrorCodes::Unauthorized, + str::stream() << "Not authorized to drop roles from the " + << dbname.toStringForErrorMsg() << " database", + as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname), + ActionType::dropRole)); } void checkAuthForTypedCommand(OperationContext* opCtx, const RolesInfoCommand& request) { @@ -412,9 +430,8 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const RolesInfoCommand& r uassert(ErrorCodes::Unauthorized, str::stream() << "Not authorized to view roles from the " << dbname.toStringForErrorMsg() << " database", - as->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(dbname.toStringWithTenantId()), - ActionType::viewRole)); + as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname), + ActionType::viewRole)); } else { invariant(arg.isExact()); auto roles = arg.getElements(dbname); @@ -427,7 +444,8 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const RolesInfoCommand& r str::stream() << "Not authorized to view roles from the " << role.getDB() << " database", as->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(role.getDB()), ActionType::viewRole)); + ResourcePattern::forDatabaseName(role.getDatabaseName()), + ActionType::viewRole)); } } } @@ -436,8 +454,9 @@ void checkAuthForTypedCommand(OperationContext* opCtx, const InvalidateUserCache auto* as = AuthorizationSession::get(opCtx->getClient()); uassert(ErrorCodes::Unauthorized, "Not authorized to invalidate user cache", - as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::invalidateUserCache)); + as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request.getDbName().tenantId()), + ActionType::invalidateUserCache)); } void checkAuthForTypedCommand(OperationContext* opCtx, @@ -445,8 +464,9 @@ void checkAuthForTypedCommand(OperationContext* opCtx, auto* as = AuthorizationSession::get(opCtx->getClient()); uassert(ErrorCodes::Unauthorized, "Not authorized to get cache generation", - as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request.getDbName().tenantId()), + ActionType::internal)); } void checkAuthForTypedCommand(OperationContext* opCtx, @@ -462,25 +482,32 @@ void checkAuthForTypedCommand(OperationContext* opCtx, actions.addAction(ActionType::dropUser); actions.addAction(ActionType::dropRole); } - uassert(ErrorCodes::Unauthorized, - "Not authorized to update user/role data using _mergeAuthzCollections a command", - as->isAuthorizedForActionsOnResource(ResourcePattern::forAnyNormalResource(), actions)); auto tempUsersColl = request.getTempUsersCollection(); - uassert(ErrorCodes::Unauthorized, - str::stream() << "Not authorized to read " << tempUsersColl, - tempUsersColl.empty() || + if (!tempUsersColl.empty()) { + auto tempUsersNS = NamespaceString(tempUsersColl); + uassert(ErrorCodes::Unauthorized, + "Not authorized to update user data using _mergeAuthzCollections a command", + as->isAuthorizedForActionsOnResource( + ResourcePattern::forAnyNormalResource(tempUsersNS.tenantId()), actions)); + uassert(ErrorCodes::Unauthorized, + str::stream() << "Not authorized to read " << tempUsersColl, as->isAuthorizedForActionsOnResource( - ResourcePattern::forExactNamespace(NamespaceString(tempUsersColl)), - ActionType::find)); + ResourcePattern::forExactNamespace(tempUsersNS), ActionType::find)); + } auto tempRolesColl = request.getTempRolesCollection(); - uassert(ErrorCodes::Unauthorized, - str::stream() << "Not authorized to read " << tempRolesColl, - tempRolesColl.empty() || + if (!tempRolesColl.empty()) { + auto tempRolesNS = NamespaceString(tempRolesColl); + uassert(ErrorCodes::Unauthorized, + "Not authorized to update role data using _mergeAuthzCollections a command", as->isAuthorizedForActionsOnResource( - ResourcePattern::forExactNamespace(NamespaceString(tempRolesColl)), - ActionType::find)); + ResourcePattern::forAnyNormalResource(tempRolesNS.tenantId()), actions)); + uassert(ErrorCodes::Unauthorized, + str::stream() << "Not authorized to read " << tempRolesColl, + as->isAuthorizedForActionsOnResource( + ResourcePattern::forExactNamespace(tempRolesNS), ActionType::find)); + } } } // namespace auth diff --git a/src/mongo/db/commands/user_management_commands_common.h b/src/mongo/db/commands/user_management_commands_common.h index 0c2af216fb331..095549fa382c4 100644 --- a/src/mongo/db/commands/user_management_commands_common.h +++ b/src/mongo/db/commands/user_management_commands_common.h @@ -32,11 +32,14 @@ #include #include +#include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/bson/mutable/element.h" #include "mongo/db/auth/privilege.h" #include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/role_name_or_string.h" #include "mongo/db/auth/user_name.h" +#include "mongo/db/client.h" #include "mongo/db/commands/user_management_commands_gen.h" #include "mongo/db/database_name.h" diff --git a/src/mongo/db/commands/validate.cpp b/src/mongo/db/commands/validate.cpp index cd962c2681872..b32e39c278671 100644 --- a/src/mongo/db/commands/validate.cpp +++ b/src/mongo/db/commands/validate.cpp @@ -28,21 +28,45 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_validation.h" -#include "mongo/db/client.h" +#include "mongo/db/catalog/validate_results.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/query/internal_plans.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/server_options.h" -#include "mongo/db/storage/record_store.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_truncation.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #include "mongo/util/testing_proctor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -232,16 +256,9 @@ class ValidateCmd : public BasicCommand { << " and { enforceFastCount: true } is not supported."); } - const auto rawcheckBSONConformance = cmdObj["checkBSONConformance"]; - const bool checkBSONConformance = rawcheckBSONConformance.trueValue(); - if (rawcheckBSONConformance && - !feature_flags::gExtendValidateCommand.isEnabled( - serverGlobalParams.featureCompatibility)) { - uasserted(ErrorCodes::InvalidOptions, - str::stream() << "The 'checkBSONConformance' option is not supported by the " - "validate command."); - } - if (rawcheckBSONConformance && !checkBSONConformance && + const auto rawCheckBSONConformance = cmdObj["checkBSONConformance"]; + const bool checkBSONConformance = rawCheckBSONConformance.trueValue(); + if (rawCheckBSONConformance && !checkBSONConformance && (fullValidate || enforceFastCount)) { uasserted(ErrorCodes::InvalidOptions, str::stream() << "Cannot explicitly set 'checkBSONConformance: false' with " diff --git a/src/mongo/db/commands/validate_db_metadata_cmd.cpp b/src/mongo/db/commands/validate_db_metadata_cmd.cpp index ea4a72feb8dce..0bdc63f774845 100644 --- a/src/mongo/db/commands/validate_db_metadata_cmd.cpp +++ b/src/mongo/db/commands/validate_db_metadata_cmd.cpp @@ -27,20 +27,43 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/basic_types.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/collection_catalog_helper.h" -#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/index_key_validate.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" #include "mongo/db/commands/validate_db_metadata_common.h" #include "mongo/db/commands/validate_db_metadata_gen.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/multitenancy.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/views/view.h" #include "mongo/db/views/view_catalog_helpers.h" -#include "mongo/logv2/log.h" +#include "mongo/rpc/op_msg.h" #include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -149,12 +172,10 @@ class ValidateDBMetadataCmd : public TypedCommand { return _validateView(opCtx, view); }); - for (auto collIt = collectionCatalog->begin(opCtx, dbName); - collIt != collectionCatalog->end(opCtx); - ++collIt) { + for (auto&& coll : collectionCatalog->range(dbName)) { if (!_validateNamespace( opCtx, - collectionCatalog->lookupNSSByUUID(opCtx, collIt.uuid()).value())) { + collectionCatalog->lookupNSSByUUID(opCtx, coll->uuid()).value())) { return; } } @@ -167,7 +188,7 @@ class ValidateDBMetadataCmd : public TypedCommand { bool _validateView(OperationContext* opCtx, const ViewDefinition& view) { auto pipelineStatus = view_catalog_helpers::validatePipeline(opCtx, view); if (!pipelineStatus.isOK()) { - ErrorReplyElement error(view.name().ns(), + ErrorReplyElement error(NamespaceStringUtil::serialize(view.name()), ErrorCodes::APIStrictError, ErrorCodes::errorString(ErrorCodes::APIStrictError), pipelineStatus.getStatus().reason()); @@ -183,7 +204,7 @@ class ValidateDBMetadataCmd : public TypedCommand { /** * Returns false, if the evaluation needs to be aborted. */ - bool _validateNamespace(OperationContext* opCtx, const NamespaceStringOrUUID& coll) { + bool _validateNamespace(OperationContext* opCtx, const NamespaceString& coll) { bool apiStrict = APIParameters::get(opCtx).getAPIStrict().value_or(false); auto apiVersion = APIParameters::get(opCtx).getAPIVersion().value_or(""); @@ -204,7 +225,7 @@ class ValidateDBMetadataCmd : public TypedCommand { } const auto status = collection->checkValidatorAPIVersionCompatability(opCtx); if (!status.isOK()) { - ErrorReplyElement error(coll.nss()->ns(), + ErrorReplyElement error(NamespaceStringUtil::serialize(coll), ErrorCodes::APIStrictError, ErrorCodes::errorString(ErrorCodes::APIStrictError), status.reason()); @@ -227,7 +248,7 @@ class ValidateDBMetadataCmd : public TypedCommand { const IndexDescriptor* desc = ii->next()->descriptor(); if (apiStrict && apiVersion == "1" && !index_key_validate::isIndexAllowedInAPIVersion1(*desc)) { - ErrorReplyElement error(coll.nss()->ns(), + ErrorReplyElement error(NamespaceStringUtil::serialize(coll), ErrorCodes::APIStrictError, ErrorCodes::errorString(ErrorCodes::APIStrictError), str::stream() diff --git a/src/mongo/db/commands/validate_db_metadata_common.h b/src/mongo/db/commands/validate_db_metadata_common.h index a3b76ab6ab5b5..f096efa1375d5 100644 --- a/src/mongo/db/commands/validate_db_metadata_common.h +++ b/src/mongo/db/commands/validate_db_metadata_common.h @@ -51,8 +51,11 @@ struct ValidateDBMetadataSizeTracker { void assertUserCanRunValidate(OperationContext* opCtx, const ValidateDBMetadataCommandRequest& request) { - const auto resource = request.getDb() ? ResourcePattern::forDatabaseName(*request.getDb()) - : ResourcePattern::forAnyNormalResource(); + const auto tenantId = request.getDbName().tenantId(); + const auto resource = request.getDb() + ? ResourcePattern::forDatabaseName( + DatabaseNameUtil::deserialize(tenantId, *request.getDb())) + : ResourcePattern::forAnyNormalResource(tenantId); uassert(ErrorCodes::Unauthorized, str::stream() << "Not authorized to run validateDBMetadata command on resource: '" << resource.toString() << "'", diff --git a/src/mongo/db/commands/vote_abort_index_build_command.cpp b/src/mongo/db/commands/vote_abort_index_build_command.cpp index c5e764360730f..f8f861d00ac94 100644 --- a/src/mongo/db/commands/vote_abort_index_build_command.cpp +++ b/src/mongo/db/commands/vote_abort_index_build_command.cpp @@ -28,16 +28,31 @@ */ -#include "mongo/db/repl/replication_coordinator_fwd.h" -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/vote_index_build_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/net/hostandport.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -115,8 +130,9 @@ class VoteAbortIndexBuildCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; }; diff --git a/src/mongo/db/commands/vote_commit_index_build_command.cpp b/src/mongo/db/commands/vote_commit_index_build_command.cpp index 2deaa460163f1..e1b57e00418ec 100644 --- a/src/mongo/db/commands/vote_commit_index_build_command.cpp +++ b/src/mongo/db/commands/vote_commit_index_build_command.cpp @@ -28,14 +28,29 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/vote_index_build_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/net/hostandport.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -114,8 +129,9 @@ class VoteCommitIndexBuildCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/commands/whats_my_sni_command.cpp b/src/mongo/db/commands/whats_my_sni_command.cpp index 3073fdfa03316..1db27f1aa975c 100644 --- a/src/mongo/db/commands/whats_my_sni_command.cpp +++ b/src/mongo/db/commands/whats_my_sni_command.cpp @@ -27,7 +27,19 @@ * it in the license file. */ +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/commands/whats_my_uri_cmd.cpp b/src/mongo/db/commands/whats_my_uri_cmd.cpp index e791d492a7b33..5fe9fde3180ba 100644 --- a/src/mongo/db/commands/whats_my_uri_cmd.cpp +++ b/src/mongo/db/commands/whats_my_uri_cmd.cpp @@ -27,9 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" namespace mongo { namespace { diff --git a/src/mongo/db/commands/write_commands.cpp b/src/mongo/db/commands/write_commands.cpp index 1a41144d06f5a..9e363a91201ec 100644 --- a/src/mongo/db/commands/write_commands.cpp +++ b/src/mongo/db/commands/write_commands.cpp @@ -28,57 +28,84 @@ */ -#include "mongo/base/checked_cast.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/mutable/document.h" #include "mongo/bson/mutable/element.h" -#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_operation_source.h" -#include "mongo/db/catalog/database_holder.h" -#include "mongo/db/catalog/document_validation.h" -#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" #include "mongo/db/commands/update_metrics.h" #include "mongo/db/commands/write_commands_common.h" -#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" #include "mongo/db/fle_crud.h" -#include "mongo/db/json.h" -#include "mongo/db/matcher/doc_validation_error.h" -#include "mongo/db/matcher/extensions_callback_real.h" #include "mongo/db/namespace_string.h" #include "mongo/db/not_primary_error_tracker.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/delete_request_gen.h" #include "mongo/db/ops/parsed_delete.h" #include "mongo/db/ops/parsed_update.h" +#include "mongo/db/ops/single_write_result_gen.h" +#include "mongo/db/ops/update_request.h" #include "mongo/db/ops/write_ops.h" #include "mongo/db/ops/write_ops_exec.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include "mongo/db/pipeline/process_interface/replica_set_node_process_interface.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/explain.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/get_executor.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/repl/tenant_migration_conflict_info.h" -#include "mongo/db/repl/tenant_migration_decoration.h" -#include "mongo/db/s/collection_sharding_state.h" -#include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/stats/counters.h" -#include "mongo/db/storage/duplicate_key_error_info.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" -#include "mongo/db/transaction/retryable_writes_stats.h" -#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/timeseries/timeseries_update_delete_util.h" +#include "mongo/db/timeseries/timeseries_write_util.h" +#include "mongo/db/transaction_resources.h" #include "mongo/db/transaction_validation.h" -#include "mongo/db/update/document_diff_applier.h" -#include "mongo/db/write_concern.h" -#include "mongo/logv2/log.h" -#include "mongo/logv2/redaction.h" -#include "mongo/s/stale_exception.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" -#include "mongo/util/string_map.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/safe_num.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -111,33 +138,6 @@ bool shouldSkipOutput(OperationContext* opCtx) { writeConcern.syncMode == WriteConcernOptions::SyncMode::UNSET); } -/** - * Returns true if 'ns' is a time-series collection. That is, this namespace is backed by a - * time-series buckets collection. - */ -template -bool isTimeseries(OperationContext* opCtx, const Request& request) { - uassert(5916400, - "'isTimeseriesNamespace' parameter can only be set when the request is sent on " - "system.buckets namespace", - !request.getIsTimeseriesNamespace() || - request.getNamespace().isTimeseriesBucketsCollection()); - const auto bucketNss = request.getIsTimeseriesNamespace() - ? request.getNamespace() - : request.getNamespace().makeTimeseriesBucketsNamespace(); - - // If the buckets collection exists now, the time-series insert path will check for the - // existence of the buckets collection later on with a lock. - // If this check is concurrent with the creation of a time-series collection and the buckets - // collection does not yet exist, this check may return false unnecessarily. As a result, an - // insert attempt into the time-series namespace will either succeed or fail, depending on who - // wins the race. - // Hold reference to the catalog for collection lookup without locks to be safe. - auto catalog = CollectionCatalog::get(opCtx); - auto coll = catalog->lookupCollectionByNamespace(opCtx, bucketNss); - return (coll && coll->getTimeseriesOptions()); -} - /** * Contains hooks that are used by 'populateReply' method. */ @@ -283,6 +283,11 @@ class CmdInsert final : public write_ops::InsertCmdVersion1Gen { } write_ops::InsertCommandReply typedRun(OperationContext* opCtx) final try { + // On debug builds, verify that the estimated size of the insert command is at least as + // large as the size of the actual, serialized insert command. This ensures that the + // logic which estimates the size of insert commands is correct. + dassert(write_ops::verifySizeEstimate(request(), &unparsedRequest())); + doTransactionValidationForWrites(opCtx, ns()); if (request().getEncryptionInformation().has_value()) { // Flag set here and in fle_crud.cpp since this only executes on a mongod. @@ -297,20 +302,27 @@ class CmdInsert final : public write_ops::InsertCmdVersion1Gen { } } - if (isTimeseries(opCtx, request())) { + if (auto [isTimeseries, _] = timeseries::isTimeseries(opCtx, request()); isTimeseries) { // Re-throw parsing exceptions to be consistent with CmdInsert::Invocation's // constructor. try { return write_ops_exec::performTimeseriesWrites(opCtx, request()); } catch (DBException& ex) { - ex.addContext(str::stream() << "time-series insert failed: " << ns().ns()); + ex.addContext(str::stream() + << "time-series insert failed: " << ns().toStringForErrorMsg()); throw; } } + boost::optional priority; + if (request().getNamespace() == NamespaceString::kConfigSampledQueriesNamespace || + request().getNamespace() == NamespaceString::kConfigSampledQueriesDiffNamespace) { + priority.emplace(opCtx->lockState(), AdmissionContext::Priority::kLow); + } + if (hangInsertBeforeWrite.shouldFail([&](const BSONObj& data) { - const auto ns = data.getStringField("ns"); - return ns == request().getNamespace().toString(); + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "ns"_sd); + return fpNss == request().getNamespace(); })) { hangInsertBeforeWrite.pauseWhileSet(); } @@ -436,21 +448,26 @@ class CmdUpdate final : public write_ops::UpdateCmdVersion1Gen { invariant(!_commandObj.isEmpty()); - if (const auto& shardVersion = _commandObj.getField("shardVersion"); - !shardVersion.eoo()) { - bob->append(shardVersion); - } bob->append("find", _commandObj["update"].String()); extractQueryDetails(_updateOpObj, bob); bob->append("batchSize", 1); bob->append("singleBatch", true); + + if (const auto& shardVersion = _commandObj.getField("shardVersion"); + !shardVersion.eoo()) { + bob->append(shardVersion); + } } write_ops::UpdateCommandReply typedRun(OperationContext* opCtx) final try { + // On debug builds, verify that the estimated size of the update command is at least as + // large as the size of the actual, serialized update command. This ensures that the + // logic which estimates the size of update commands is correct. + dassert(write_ops::verifySizeEstimate(request(), &unparsedRequest())); + doTransactionValidationForWrites(opCtx, ns()); write_ops::UpdateCommandReply updateReply; - OperationSource source = OperationSource::kStandard; if (request().getEncryptionInformation().has_value()) { // Flag set here and in fle_crud.cpp since this only executes on a mongod. CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation = true; @@ -459,23 +476,9 @@ class CmdUpdate final : public write_ops::UpdateCmdVersion1Gen { } } - if (isTimeseries(opCtx, request())) { - uassert(ErrorCodes::OperationNotSupportedInTransaction, - str::stream() << "Cannot perform a multi-document transaction on a " - "time-series collection: " - << ns(), - !opCtx->inMultiDocumentTransaction()); - source = OperationSource::kTimeseriesUpdate; - } - - // On debug builds, verify that the estimated size of the updates are at least as large - // as the actual, serialized size. This ensures that the logic that estimates the size - // of deletes for batch writes is correct. - if constexpr (kDebugBuild) { - for (auto&& updateOp : request().getUpdates()) { - invariant(write_ops::verifySizeEstimate(updateOp)); - } - } + auto [isTimeseries, bucketNs] = timeseries::isTimeseries(opCtx, request()); + OperationSource source = + isTimeseries ? OperationSource::kTimeseriesUpdate : OperationSource::kStandard; long long nModified = 0; @@ -483,7 +486,19 @@ class CmdUpdate final : public write_ops::UpdateCmdVersion1Gen { // 'postProcessHandler' and should not be accessed afterwards. std::vector upsertedInfoVec; - auto reply = write_ops_exec::performUpdates(opCtx, request(), source); + write_ops_exec::WriteResult reply; + // For retryable updates on time-series collections, we needs to run them in + // transactions to ensure the multiple writes are replicated atomically. + if (isTimeseries && opCtx->isRetryableWrite() && !opCtx->inMultiDocumentTransaction()) { + auto executor = serverGlobalParams.clusterRole.has(ClusterRole::None) + ? ReplicaSetNodeProcessInterface::getReplicaSetNodeExecutor( + opCtx->getServiceContext()) + : Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); + write_ops_exec::runTimeseriesRetryableUpdates( + opCtx, bucketNs, request(), executor, &reply); + } else { + reply = write_ops_exec::performUpdates(opCtx, request(), source); + } // Handler to process each 'SingleWriteResult'. auto singleWriteHandler = [&](const SingleWriteResult& opResult, int index) { @@ -550,8 +565,10 @@ class CmdUpdate final : public write_ops::UpdateCmdVersion1Gen { "explained write batches must be of size 1", request().getUpdates().size() == 1); + auto [isRequestToTimeseries, nss] = timeseries::isTimeseries(opCtx, request()); + UpdateRequest updateRequest(request().getUpdates()[0]); - updateRequest.setNamespaceString(request().getNamespace()); + updateRequest.setNamespaceString(nss); if (shouldDoFLERewrite(request())) { CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation = true; @@ -570,26 +587,44 @@ class CmdUpdate final : public write_ops::UpdateCmdVersion1Gen { updateRequest.setYieldPolicy(PlanYieldPolicy::YieldPolicy::YIELD_AUTO); updateRequest.setExplain(verbosity); - const ExtensionsCallbackReal extensionsCallback(opCtx, - &updateRequest.getNamespaceString()); - ParsedUpdate parsedUpdate(opCtx, &updateRequest, extensionsCallback); - uassertStatusOK(parsedUpdate.parseRequest()); - // Explains of write commands are read-only, but we take write locks so that timing // info is more accurate. - AutoGetCollection collection(opCtx, request().getNamespace(), MODE_IX); + const auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + + if (isRequestToTimeseries) { + timeseries::assertTimeseriesBucketsCollection(collection.getCollectionPtr().get()); + + const auto& requestHint = request().getUpdates()[0].getHint(); + if (timeseries::isHintIndexKey(requestHint)) { + auto timeseriesOptions = collection.getCollectionPtr()->getTimeseriesOptions(); + updateRequest.setHint( + uassertStatusOK(timeseries::createBucketsIndexSpecFromTimeseriesIndexSpec( + *timeseriesOptions, requestHint))); + } + } + + ParsedUpdate parsedUpdate(opCtx, + &updateRequest, + collection.getCollectionPtr(), + false /* forgoOpCounterIncrements */, + isRequestToTimeseries); + uassertStatusOK(parsedUpdate.parseRequest()); - auto exec = uassertStatusOK(getExecutorUpdate(&CurOp::get(opCtx)->debug(), - &collection.getCollection(), - &parsedUpdate, - verbosity)); + auto exec = uassertStatusOK(getExecutorUpdate( + &CurOp::get(opCtx)->debug(), collection, &parsedUpdate, verbosity)); auto bodyBuilder = result->getBodyBuilder(); - Explain::explainStages(exec.get(), - collection.getCollection(), - verbosity, - BSONObj(), - _commandObj, - &bodyBuilder); + Explain::explainStages( + exec.get(), + collection.getCollectionPtr(), + verbosity, + BSONObj(), + SerializationContext::stateCommandReply(request().getSerializationContext()), + _commandObj, + &bodyBuilder); } BSONObj _commandObj; @@ -660,6 +695,11 @@ class CmdDelete final : public write_ops::DeleteCmdVersion1Gen { } write_ops::DeleteCommandReply typedRun(OperationContext* opCtx) final try { + // On debug builds, verify that the estimated size of the deletes are at least as large + // as the actual, serialized size. This ensures that the logic that estimates the size + // of deletes for batch writes is correct. + dassert(write_ops::verifySizeEstimate(request(), &unparsedRequest())); + doTransactionValidationForWrites(opCtx, ns()); write_ops::DeleteCommandReply deleteReply; OperationSource source = OperationSource::kStandard; @@ -673,18 +713,10 @@ class CmdDelete final : public write_ops::DeleteCmdVersion1Gen { } } - if (isTimeseries(opCtx, request())) { + if (auto [isTimeseries, _] = timeseries::isTimeseries(opCtx, request()); isTimeseries) { source = OperationSource::kTimeseriesDelete; } - // On debug builds, verify that the estimated size of the deletes are at least as large - // as the actual, serialized size. This ensures that the logic that estimates the size - // of deletes for batch writes is correct. - if constexpr (kDebugBuild) { - for (auto&& deleteOp : request().getDeletes()) { - invariant(write_ops::verifySizeEstimate(deleteOp)); - } - } auto reply = write_ops_exec::performDeletes(opCtx, request(), source); populateReply(opCtx, @@ -717,15 +749,7 @@ class CmdDelete final : public write_ops::DeleteCmdVersion1Gen { request().getDeletes().size() == 1); auto deleteRequest = DeleteRequest{}; - auto isRequestToTimeseries = isTimeseries(opCtx, request()); - auto nss = [&] { - auto nss = request().getNamespace(); - if (!isRequestToTimeseries) { - return nss; - } - return nss.isTimeseriesBucketsCollection() ? nss - : nss.makeTimeseriesBucketsNamespace(); - }(); + auto [isRequestToTimeseries, nss] = timeseries::isTimeseries(opCtx, request()); deleteRequest.setNsString(nss); deleteRequest.setLegacyRuntimeConstants(request().getLegacyRuntimeConstants().value_or( Variables::generateRuntimeConstants(opCtx))); @@ -751,43 +775,38 @@ class CmdDelete final : public write_ops::DeleteCmdVersion1Gen { // Explains of write commands are read-only, but we take write locks so that timing // info is more accurate. - AutoGetCollection collection(opCtx, deleteRequest.getNsString(), MODE_IX); - + const auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, deleteRequest.getNsString(), AcquisitionPrerequisites::kWrite), + MODE_IX); if (isRequestToTimeseries) { - uassert(ErrorCodes::NamespaceNotFound, - "Could not find time-series buckets collection for write explain", - *collection); - auto timeseriesOptions = collection->getTimeseriesOptions(); - uassert(ErrorCodes::InvalidOptions, - "Time-series buckets collection is missing time-series options", - timeseriesOptions); + timeseries::assertTimeseriesBucketsCollection(collection.getCollectionPtr().get()); if (timeseries::isHintIndexKey(firstDelete.getHint())) { + auto timeseriesOptions = collection.getCollectionPtr()->getTimeseriesOptions(); deleteRequest.setHint( uassertStatusOK(timeseries::createBucketsIndexSpecFromTimeseriesIndexSpec( *timeseriesOptions, firstDelete.getHint()))); } } - ParsedDelete parsedDelete(opCtx, - &deleteRequest, - isRequestToTimeseries && collection - ? collection->getTimeseriesOptions() - : boost::none); + ParsedDelete parsedDelete( + opCtx, &deleteRequest, collection.getCollectionPtr(), isRequestToTimeseries); uassertStatusOK(parsedDelete.parseRequest()); // Explain the plan tree. - auto exec = uassertStatusOK(getExecutorDelete(&CurOp::get(opCtx)->debug(), - &collection.getCollection(), - &parsedDelete, - verbosity)); + auto exec = uassertStatusOK(getExecutorDelete( + &CurOp::get(opCtx)->debug(), collection, &parsedDelete, verbosity)); auto bodyBuilder = result->getBodyBuilder(); - Explain::explainStages(exec.get(), - collection.getCollection(), - verbosity, - BSONObj(), - _commandObj, - &bodyBuilder); + Explain::explainStages( + exec.get(), + collection.getCollectionPtr(), + verbosity, + BSONObj(), + SerializationContext::stateCommandReply(request().getSerializationContext()), + _commandObj, + &bodyBuilder); } const BSONObj& _commandObj; diff --git a/src/mongo/db/commands/write_commands_common.cpp b/src/mongo/db/commands/write_commands_common.cpp index ac39adca7060f..945533ba3d835 100644 --- a/src/mongo/db/commands/write_commands_common.cpp +++ b/src/mongo/db/commands/write_commands_common.cpp @@ -27,20 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/commands/write_commands_common.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" #include -#include #include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/privilege.h" -#include "mongo/db/catalog/document_validation.h" -#include "mongo/db/ops/write_ops.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands/write_commands_common.h" +#include "mongo/db/namespace_string.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/commands_bm.cpp b/src/mongo/db/commands_bm.cpp index 94920bed08c7a..7d68a4204e969 100644 --- a/src/mongo/db/commands_bm.cpp +++ b/src/mongo/db/commands_bm.cpp @@ -27,11 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include +#include -#include "mongo/base/string_data.h" #include "mongo/idl/command_generic_argument.h" namespace mongo { diff --git a/src/mongo/db/commands_test.cpp b/src/mongo/db/commands_test.cpp index c34814bf02711..547f835dd540f 100644 --- a/src/mongo/db/commands_test.cpp +++ b/src/mongo/db/commands_test.cpp @@ -27,26 +27,46 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_extra_info.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/crypto/mechanism_scram.h" +#include "mongo/crypto/sha1_block.h" +#include "mongo/crypto/sha256_block.h" #include "mongo/db/auth/authorization_manager.h" -#include "mongo/db/auth/authorization_session_for_test.h" +#include "mongo/db/auth/authorization_manager_impl.h" +#include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/authz_manager_external_state_mock.h" -#include "mongo/db/auth/authz_session_external_state_mock.h" +#include "mongo/db/auth/restriction_environment.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/sasl_options.h" -#include "mongo/db/catalog/collection_mock.h" +#include "mongo/db/auth/user.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/commands.h" #include "mongo/db/commands_test_example_gen.h" -#include "mongo/db/dbmessage.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/rpc/factory.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/atomic_word.h" #include "mongo/rpc/op_msg_rpc_impls.h" #include "mongo/transport/session.h" #include "mongo/transport/transport_layer_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/sockaddr.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -142,7 +162,8 @@ class ParseNsOrUUID : public ServiceContextTest { TEST_F(ParseNsOrUUID, FailWrongType) { auto cmd = BSON("query" << BSON("a" << BSON("$gte" << 11))); - ASSERT_THROWS_CODE(CommandHelpers::parseNsOrUUID(DatabaseName(boost::none, "db"), cmd), + ASSERT_THROWS_CODE(CommandHelpers::parseNsOrUUID( + DatabaseName::createDatabaseName_forTest(boost::none, "db"), cmd), DBException, ErrorCodes::InvalidNamespace); } @@ -158,7 +179,8 @@ TEST_F(ParseNsOrUUID, FailEmptyDbName) { TEST_F(ParseNsOrUUID, FailInvalidDbName) { auto cmd = BSON("query" << "coll"); - ASSERT_THROWS_CODE(CommandHelpers::parseNsOrUUID(DatabaseName(boost::none, "test.coll"), cmd), + ASSERT_THROWS_CODE(CommandHelpers::parseNsOrUUID( + DatabaseName::createDatabaseName_forTest(boost::none, "test.coll"), cmd), DBException, ErrorCodes::InvalidNamespace); } @@ -166,15 +188,17 @@ TEST_F(ParseNsOrUUID, FailInvalidDbName) { TEST_F(ParseNsOrUUID, ParseValidColl) { auto cmd = BSON("query" << "coll"); - auto parsedNss = CommandHelpers::parseNsOrUUID(DatabaseName(boost::none, "test"), cmd); - ASSERT_EQ(*parsedNss.nss(), NamespaceString::createNamespaceString_forTest("test.coll")); + auto parsedNss = CommandHelpers::parseNsOrUUID( + DatabaseName::createDatabaseName_forTest(boost::none, "test"), cmd); + ASSERT_EQ(parsedNss.nss(), NamespaceString::createNamespaceString_forTest("test.coll")); } TEST_F(ParseNsOrUUID, ParseValidUUID) { const UUID uuid = UUID::gen(); auto cmd = BSON("query" << uuid); - auto parsedNsOrUUID = CommandHelpers::parseNsOrUUID(DatabaseName(boost::none, "test"), cmd); - ASSERT_EQUALS(uuid, *parsedNsOrUUID.uuid()); + auto parsedNsOrUUID = CommandHelpers::parseNsOrUUID( + DatabaseName::createDatabaseName_forTest(boost::none, "test"), cmd); + ASSERT_EQUALS(uuid, parsedNsOrUUID.uuid()); } /** @@ -395,7 +419,11 @@ class TypedCommandTest : public ServiceContextMongoDTest { // Set up the auth subsystem to authorize the command. auto localManagerState = std::make_unique(); _managerState = localManagerState.get(); - _managerState->setAuthzVersion(AuthorizationManager::schemaVersion26Final); + { + auto opCtxHolder = makeOperationContext(); + auto* opCtx = opCtxHolder.get(); + _managerState->setAuthzVersion(opCtx, AuthorizationManager::schemaVersion26Final); + } auto uniqueAuthzManager = std::make_unique( getServiceContext(), std::move(localManagerState)); _authzManager = uniqueAuthzManager.get(); @@ -447,7 +475,7 @@ class TypedCommandTest : public ServiceContextMongoDTest { const OpMsgRequest request = [&] { typename T::Request incr(ns); incr.setI(i); - return incr.serialize(BSON("$db" << ns.db())); + return incr.serialize(BSON("$db" << ns.db_forTest())); }(); auto opCtx = _client->makeOperationContext(); diff --git a/src/mongo/db/concurrency/SConscript b/src/mongo/db/concurrency/SConscript index 264db4ac54f87..fbe5fa04903a4 100644 --- a/src/mongo/db/concurrency/SConscript +++ b/src/mongo/db/concurrency/SConscript @@ -1,4 +1,5 @@ # -*- mode: python -*- +import sys Import("env") @@ -11,8 +12,8 @@ env.Library( ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/catalog/catalog_helpers', - '$BUILD_DIR/mongo/db/catalog/collection_crud', '$BUILD_DIR/mongo/db/concurrency/exception_util', + '$BUILD_DIR/mongo/db/dbhelpers', '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/shard_role', '$BUILD_DIR/mongo/util/concurrency/thread_pool', @@ -36,38 +37,30 @@ env.Library( ], ) -env.Library( - target='lock_manager_defs', - source=[ - 'lock_manager_defs.cpp', - ], - LIBDEPS=[ - '$BUILD_DIR/mongo/base', - ], -) - env.Library( target='lock_manager', source=[ 'd_concurrency.cpp', 'lock_manager.cpp', - 'lock_state.cpp', + 'lock_manager_defs.cpp', 'lock_stats.cpp', + 'locker.cpp', + 'locker_impl.cpp', 'replication_state_transition_lock_guard.cpp', 'resource_catalog.cpp', ], LIBDEPS=[ - '$BUILD_DIR/mongo/db/service_context', + '$BUILD_DIR/mongo/db/storage/concurrency_adjustment_parameters', '$BUILD_DIR/mongo/db/storage/storage_engine_parameters', - '$BUILD_DIR/mongo/util/background_job', - '$BUILD_DIR/mongo/util/concurrency/spin_lock', '$BUILD_DIR/mongo/util/concurrency/ticketholder', - '$BUILD_DIR/third_party/shim_boost', - 'lock_manager_defs', ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/concurrency/flow_control_ticketholder', '$BUILD_DIR/mongo/db/server_base', + '$BUILD_DIR/mongo/db/service_context', + '$BUILD_DIR/mongo/util/background_job', + '$BUILD_DIR/mongo/util/concurrency/spin_lock', + '$BUILD_DIR/mongo/util/namespace_string_database_name_util', ], ) @@ -87,24 +80,26 @@ env.Benchmark( 'd_concurrency_bm.cpp', ], LIBDEPS=[ + '$BUILD_DIR/mongo/db/service_context_non_d', 'lock_manager', ], ) env.CppUnitTest( - target='db_concurrency_test', + target='lock_manager_test', source=[ 'd_concurrency_test.cpp', 'fast_map_noalloc_test.cpp', 'lock_manager_test.cpp', - 'lock_state_test.cpp', 'lock_stats_test.cpp', + 'locker_impl_test.cpp', 'resource_catalog_test.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/db/auth/authmocks', '$BUILD_DIR/mongo/db/query/op_metrics', - '$BUILD_DIR/mongo/db/service_context_d_test_fixture', + '$BUILD_DIR/mongo/db/service_context_non_d', + '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/mongo/transport/transport_layer_common', '$BUILD_DIR/mongo/transport/transport_layer_mock', '$BUILD_DIR/mongo/util/progress_meter', @@ -112,3 +107,5 @@ env.CppUnitTest( 'lock_manager', ], ) + +env.PrettyPrinterTest(target="lock_gdb_test.py") diff --git a/src/mongo/db/concurrency/d_concurrency.cpp b/src/mongo/db/concurrency/d_concurrency.cpp index dbc851ff1890f..ea01b321b1d39 100644 --- a/src/mongo/db/concurrency/d_concurrency.cpp +++ b/src/mongo/db/concurrency/d_concurrency.cpp @@ -29,17 +29,20 @@ #include "mongo/db/concurrency/d_concurrency.h" -#include +#include #include -#include -#include "mongo/db/concurrency/flow_control_ticketholder.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/db/concurrency/resource_catalog.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/service_context.h" -#include "mongo/logv2/log.h" -#include "mongo/platform/mutex.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/util/assert_util.h" -#include "mongo/util/stacktrace.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -47,12 +50,7 @@ namespace mongo { Lock::ResourceMutex::ResourceMutex(std::string resourceLabel) - : _rid(ResourceIdFactory::newResourceIdForMutex(std::move(resourceLabel))) {} - -std::string Lock::ResourceMutex::getName(ResourceId resourceId) { - invariant(resourceId.getType() == RESOURCE_MUTEX); - return ResourceIdFactory::nameForId(resourceId); -} + : _rid(ResourceCatalog::get().newResourceIdForMutex(std::move(resourceLabel))) {} bool Lock::ResourceMutex::isExclusivelyLocked(Locker* locker) { return locker->isLockHeldForMode(_rid, MODE_X); @@ -62,29 +60,33 @@ bool Lock::ResourceMutex::isAtLeastReadLocked(Locker* locker) { return locker->isLockHeldForMode(_rid, MODE_IS); } -ResourceId Lock::ResourceMutex::ResourceIdFactory::newResourceIdForMutex( - std::string resourceLabel) { - return _resourceIdFactory()._newResourceIdForMutex(std::move(resourceLabel)); -} +void Lock::ResourceLock::_lock(LockMode mode, Date_t deadline) { + invariant(_result == LOCK_INVALID); + if (_opCtx) + _opCtx->lockState()->lock(_opCtx, _rid, mode, deadline); + else + _locker->lock(_rid, mode, deadline); -std::string Lock::ResourceMutex::ResourceIdFactory::nameForId(ResourceId resourceId) { - stdx::lock_guard lk(_resourceIdFactory().labelsMutex); - return _resourceIdFactory().labels.at(resourceId.getHashId()); + _result = LOCK_OK; } -Lock::ResourceMutex::ResourceIdFactory& -Lock::ResourceMutex::ResourceIdFactory::_resourceIdFactory() { - static StaticImmortal resourceIdFactory; - return resourceIdFactory.value(); -} +void Lock::ResourceLock::_unlock() { + if (_isLocked()) { + if (_opCtx) + _opCtx->lockState()->unlock(_rid); + else + _locker->unlock(_rid); -ResourceId Lock::ResourceMutex::ResourceIdFactory::_newResourceIdForMutex( - std::string resourceLabel) { - stdx::lock_guard lk(labelsMutex); - invariant(nextId == labels.size()); - labels.push_back(std::move(resourceLabel)); + _result = LOCK_INVALID; + } +} - return ResourceId::makeMutexResourceId(nextId++); +void Lock::ExclusiveLock::lock() { + // The contract of the condition_variable-like utilities is that that the lock is returned in + // the locked state so the acquisition below must be guaranteed to always succeed. + invariant(_opCtx); + UninterruptibleLockGuard ulg(_opCtx->lockState()); // NOLINT. + _lock(MODE_X); } Lock::GlobalLock::GlobalLock(OperationContext* opCtx, @@ -178,22 +180,66 @@ Lock::GlobalLock::GlobalLock(GlobalLock&& otherLock) otherLock._result = LOCK_INVALID; } +Lock::GlobalLock::~GlobalLock() { + // Preserve the original lock result which will be overridden by unlock(). + auto lockResult = _result; + auto* locker = _opCtx->lockState(); + + if (isLocked()) { + // Abandon our snapshot if destruction of the GlobalLock object results in actually + // unlocking the global lock. Recursive locking and the two-phase locking protocol may + // prevent lock release. + const bool willReleaseLock = _isOutermostLock && !locker->inAWriteUnitOfWork(); + if (willReleaseLock) { + _opCtx->recoveryUnit()->abandonSnapshot(); + } + _unlock(); + } + + if (!_skipRSTLLock && (lockResult == LOCK_OK || lockResult == LOCK_WAITING)) { + locker->unlock(resourceIdReplicationStateTransitionLock); + } +} + void Lock::GlobalLock::_unlock() { _opCtx->lockState()->unlockGlobal(); _result = LOCK_INVALID; } +Lock::TenantLock::TenantLock(OperationContext* opCtx, + const TenantId& tenantId, + LockMode mode, + Date_t deadline) + : _id{RESOURCE_TENANT, tenantId}, _opCtx{opCtx} { + dassert(_opCtx->lockState()->isLockHeldForMode(resourceIdGlobal, + isSharedLockMode(mode) ? MODE_IS : MODE_IX)); + _opCtx->lockState()->lock(_opCtx, _id, mode, deadline); +} + +Lock::TenantLock::TenantLock(TenantLock&& otherLock) + : _id(otherLock._id), _opCtx(otherLock._opCtx) { + otherLock._opCtx = nullptr; +} + +Lock::TenantLock::~TenantLock() { + if (_opCtx) { + _opCtx->lockState()->unlock(_id); + } +} + Lock::DBLock::DBLock(OperationContext* opCtx, const DatabaseName& dbName, LockMode mode, - Date_t deadline) - : DBLock(opCtx, dbName, mode, deadline, DBLockSkipOptions{}) {} + Date_t deadline, + boost::optional tenantLockMode) + : DBLock(opCtx, dbName, mode, deadline, DBLockSkipOptions{}, tenantLockMode) {} Lock::DBLock::DBLock(OperationContext* opCtx, const DatabaseName& dbName, LockMode mode, Date_t deadline, - DBLockSkipOptions options) + DBLockSkipOptions options, + boost::optional tenantLockMode) : _id(RESOURCE_DATABASE, dbName), _opCtx(opCtx), _result(LOCK_INVALID), _mode(mode) { _globalLock.emplace(opCtx, @@ -204,6 +250,32 @@ Lock::DBLock::DBLock(OperationContext* opCtx, massert(28539, "need a valid database name", !dbName.db().empty()); + tassert(6671501, + str::stream() << "Tenant lock mode " << modeName(*tenantLockMode) + << " specified for database " << dbName.toStringForErrorMsg() + << " that does not belong to a tenant", + !tenantLockMode || dbName.tenantId()); + + // Acquire the tenant lock. + if (dbName.tenantId()) { + const auto effectiveTenantLockMode = [&]() { + const auto defaultTenantLockMode = isSharedLockMode(_mode) ? MODE_IS : MODE_IX; + if (tenantLockMode) { + tassert(6671505, + str::stream() << "Requested tenant lock mode " << modeName(*tenantLockMode) + << " that is weaker than the default one " + << modeName(defaultTenantLockMode) << " for database " + << dbName.toStringForErrorMsg() << " of tenant " + << dbName.tenantId()->toString(), + isModeCovered(defaultTenantLockMode, *tenantLockMode)); + return *tenantLockMode; + } else { + return defaultTenantLockMode; + } + }(); + _tenantLock.emplace(opCtx, *dbName.tenantId(), effectiveTenantLockMode, deadline); + } + _opCtx->lockState()->lock(_opCtx, _id, _mode, deadline); _result = LOCK_OK; } @@ -213,7 +285,8 @@ Lock::DBLock::DBLock(DBLock&& otherLock) _opCtx(otherLock._opCtx), _result(otherLock._result), _mode(otherLock._mode), - _globalLock(std::move(otherLock._globalLock)) { + _globalLock(std::move(otherLock._globalLock)), + _tenantLock(std::move(otherLock._tenantLock)) { // Mark as moved so the destructor doesn't invalidate the newly-constructed lock. otherLock._result = LOCK_INVALID; } @@ -250,25 +323,4 @@ Lock::ParallelBatchWriterMode::ParallelBatchWriterMode(OperationContext* opCtx) : _pbwm(opCtx, resourceIdParallelBatchWriterMode, MODE_X), _shouldNotConflictBlock(opCtx->lockState()) {} -void Lock::ResourceLock::_lock(LockMode mode, Date_t deadline) { - invariant(_result == LOCK_INVALID); - if (_opCtx) - _opCtx->lockState()->lock(_opCtx, _rid, mode, deadline); - else - _locker->lock(_rid, mode, deadline); - - _result = LOCK_OK; -} - -void Lock::ResourceLock::_unlock() { - if (_isLocked()) { - if (_opCtx) - _opCtx->lockState()->unlock(_rid); - else - _locker->unlock(_rid); - - _result = LOCK_INVALID; - } -} - } // namespace mongo diff --git a/src/mongo/db/concurrency/d_concurrency.h b/src/mongo/db/concurrency/d_concurrency.h index 4c0903c101c00..c6ea8043c4476 100644 --- a/src/mongo/db/concurrency/d_concurrency.h +++ b/src/mongo/db/concurrency/d_concurrency.h @@ -29,16 +29,50 @@ #pragma once -#include // For UINT_MAX +#include +#include +#include +#include +#include +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/time_support.h" #include "mongo/util/timer.h" namespace mongo { class Lock { public: + /** + * For use as general mutex or readers/writers lock, outside the general multi-granularity + * model. A ResourceMutex is not affected by yielding and two phase locking semantics inside + * WUOWs. Lock with ResourceLock, SharedLock or ExclusiveLock. Uses same fairness as other + * LockManager locks. + */ + class ResourceMutex { + public: + ResourceMutex(std::string resourceLabel); + + /** + * Each instantiation of this class allocates a new ResourceId. + */ + ResourceId getRid() const { + return _rid; + } + + bool isExclusivelyLocked(Locker* locker); + + bool isAtLeastReadLocked(Locker* locker); + + private: + const ResourceId _rid; + }; + /** * General purpose RAII wrapper for a resource managed by the lock manager * @@ -102,57 +136,6 @@ class Lock { LockResult _result{LOCK_INVALID}; }; - /** - * For use as general mutex or readers/writers lock, outside the general multi-granularity - * model. A ResourceMutex is not affected by yielding and two phase locking semantics inside - * WUOWs. Lock with ResourceLock, SharedLock or ExclusiveLock. Uses same fairness as other - * LockManager locks. - */ - class ResourceMutex { - public: - ResourceMutex(std::string resourceLabel); - - std::string getName() const { - return getName(_rid); - } - - /** - * Each instantiation of this class allocates a new ResourceId. - */ - ResourceId getRid() const { - return _rid; - } - - static std::string getName(ResourceId resourceId); - - bool isExclusivelyLocked(Locker* locker); - - bool isAtLeastReadLocked(Locker* locker); - - private: - const ResourceId _rid; - - /** - * ResourceMutexes can be constructed during initialization, thus the code must ensure the - * vector of labels is constructed before items are added to it. This factory encapsulates - * all members that need to be initialized before first use. - */ - class ResourceIdFactory { - public: - static ResourceId newResourceIdForMutex(std::string resourceLabel); - - static std::string nameForId(ResourceId resourceId); - - private: - static ResourceIdFactory& _resourceIdFactory(); - ResourceId _newResourceIdForMutex(std::string resourceLabel); - - std::uint64_t nextId = 0; - std::vector labels; - Mutex labelsMutex = MONGO_MAKE_LATCH("ResourceIdFactory::labelsMutex"); - }; - }; - /** * Obtains a ResourceMutex for exclusive use. */ @@ -161,21 +144,9 @@ class Lock { ExclusiveLock(OperationContext* opCtx, ResourceMutex mutex) : ResourceLock(opCtx, mutex.getRid(), MODE_X) {} - ExclusiveLock(Locker* locker, ResourceMutex mutex) - : ResourceLock(locker, mutex.getRid(), MODE_X) {} - // Lock/unlock overloads to allow ExclusiveLock to be used with condition_variable-like // utilities such as stdx::condition_variable_any and waitForConditionOrInterrupt - - void lock() { - // The contract of the condition_variable-like utilities is that that the lock is - // returned in the locked state so the acquisition below must be guaranteed to always - // succeed. - invariant(_opCtx); - UninterruptibleLockGuard ulg(_opCtx->lockState()); // NOLINT. - _lock(MODE_X); - } - + void lock(); void unlock() { _unlock(); } @@ -194,9 +165,6 @@ class Lock { public: SharedLock(OperationContext* opCtx, ResourceMutex mutex) : ResourceLock(opCtx, mutex.getRid(), MODE_IS) {} - - SharedLock(Locker* locker, ResourceMutex mutex) - : ResourceLock(locker, mutex.getRid(), MODE_IS) {} }; /** @@ -246,24 +214,7 @@ class Lock { GlobalLock(GlobalLock&&); - ~GlobalLock() { - // Preserve the original lock result which will be overridden by unlock(). - auto lockResult = _result; - if (isLocked()) { - // Abandon our snapshot if destruction of the GlobalLock object results in actually - // unlocking the global lock. Recursive locking and the two-phase locking protocol - // may prevent lock release. - const bool willReleaseLock = _isOutermostLock && - !(_opCtx->lockState() && _opCtx->lockState()->inAWriteUnitOfWork()); - if (willReleaseLock) { - _opCtx->recoveryUnit()->abandonSnapshot(); - } - _unlock(); - } - if (!_skipRSTLLock && (lockResult == LOCK_OK || lockResult == LOCK_WAITING)) { - _opCtx->lockState()->unlock(resourceIdReplicationStateTransitionLock); - } - } + ~GlobalLock(); bool isLocked() const { return _result == LOCK_OK; @@ -321,6 +272,36 @@ class Lock { using DBLockSkipOptions = GlobalLockSkipOptions; + /** + * Tenant lock. + * + * Controls access to resources belonging to a tenant. + * + * This lock supports four modes (see Lock_Mode): + * MODE_IS: concurrent access to tenant's resources, requiring further database read locks + * MODE_IX: concurrent access to tenant's resources, requiring further database read or write + * locks + * MODE_S: shared read access to tenant's resources, blocking any writers + * MODE_X: exclusive access to tenant's resources, blocking all other readers and writers. + */ + class TenantLock { + TenantLock(const TenantLock&) = delete; + TenantLock& operator=(const TenantLock&) = delete; + + public: + TenantLock(OperationContext* opCtx, + const TenantId& tenantId, + LockMode mode, + Date_t deadline = Date_t::max()); + + TenantLock(TenantLock&&); + ~TenantLock(); + + private: + ResourceId _id; + OperationContext* _opCtx; + }; + /** * Database lock. * @@ -334,19 +315,28 @@ class Lock { * for MODE_IX or MODE_X also acquires global lock in intent-exclusive (IX) mode. * For storage engines that do not support collection-level locking, MODE_IS will be * upgraded to MODE_S and MODE_IX will be upgraded to MODE_X. + * + * If the database belongs to a tenant, then acquires a tenant lock before the database lock. + * For 'mode' MODE_IS or MODE_S acquires tenant lock in intent-shared (IS) mode, otherwise, + * acquires a tenant lock in intent-exclusive (IX) mode. A different, stronger tenant lock mode + * to acquire can be specified with 'tenantLockMode' parameter. Passing boost::none for the + * tenant lock mode does not skip the tenant lock, but indicates that the tenant lock in default + * mode should be acquired. */ class DBLock { public: DBLock(OperationContext* opCtx, const DatabaseName& dbName, LockMode mode, - Date_t deadline = Date_t::max()); + Date_t deadline = Date_t::max(), + boost::optional tenantLockMode = boost::none); DBLock(OperationContext* opCtx, const DatabaseName& dbName, LockMode mode, Date_t deadline, - DBLockSkipOptions skipOptions); + DBLockSkipOptions skipOptions, + boost::optional tenantLockMode = boost::none); DBLock(DBLock&&); ~DBLock(); @@ -371,6 +361,9 @@ class Lock { // Acquires the global lock on our behalf. boost::optional _globalLock; + + // Acquires the tenant lock on behalf of this DB lock. + boost::optional _tenantLock; }; /** diff --git a/src/mongo/db/concurrency/d_concurrency_bm.cpp b/src/mongo/db/concurrency/d_concurrency_bm.cpp index e8c4f5639f1ad..88ab38e660abb 100644 --- a/src/mongo/db/concurrency/d_concurrency_bm.cpp +++ b/src/mongo/db/concurrency/d_concurrency_bm.cpp @@ -28,50 +28,45 @@ */ #include - -#include "mongo/base/init.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/concurrency/lock_manager_test_help.h" -#include "mongo/db/storage/recovery_unit_noop.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/platform/mutex.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/str.h" namespace mongo { namespace { const int kMaxPerfThreads = 16; // max number of threads to use for lock perf -MONGO_INITIALIZER_GENERAL(DConcurrencyTestServiceContext, ("DConcurrencyTestClientObserver"), ()) -(InitializerContext* context) { - setGlobalServiceContext(ServiceContext::make()); -} - -class LockerImplClientObserver : public ServiceContext::ClientObserver { +class DConcurrencyTest : public benchmark::Fixture { public: - LockerImplClientObserver() = default; - ~LockerImplClientObserver() = default; - - void onCreateClient(Client* client) final {} - - void onDestroyClient(Client* client) final {} - - void onCreateOperationContext(OperationContext* opCtx) override { - opCtx->setLockState(std::make_unique(opCtx->getServiceContext())); + void SetUp(benchmark::State& state) override { + if (state.thread_index == 0) { + setGlobalServiceContext(ServiceContext::make()); + makeKClientsWithLockers(state.threads); + } } - void onDestroyOperationContext(OperationContext* opCtx) final {} -}; - -const ServiceContext::ConstructorActionRegisterer clientObserverRegisterer{ - "DConcurrencyTestClientObserver", - [](ServiceContext* service) { - service->registerClientObserver(std::make_unique()); - }, - [](ServiceContext* serviceContext) { - }}; + void TearDown(benchmark::State& state) override { + if (state.thread_index == 0) { + clients.clear(); + setGlobalServiceContext({}); + } + } -class DConcurrencyTest : public benchmark::Fixture { -public: /** * Returns a vector of Clients of length 'k', each of which has an OperationContext with its * lockState set to a LockerImpl. @@ -93,10 +88,6 @@ class DConcurrencyTest : public benchmark::Fixture { }; BENCHMARK_DEFINE_F(DConcurrencyTest, BM_StdMutex)(benchmark::State& state) { - if (state.thread_index == 0) { - makeKClientsWithLockers(state.threads); - } - static auto mtx = MONGO_MAKE_LATCH(); for (auto keepRunning : state) { @@ -105,10 +96,6 @@ BENCHMARK_DEFINE_F(DConcurrencyTest, BM_StdMutex)(benchmark::State& state) { } BENCHMARK_DEFINE_F(DConcurrencyTest, BM_ResourceMutexShared)(benchmark::State& state) { - if (state.thread_index == 0) { - makeKClientsWithLockers(state.threads); - } - static Lock::ResourceMutex mtx("testMutex"); for (auto keepRunning : state) { @@ -117,10 +104,6 @@ BENCHMARK_DEFINE_F(DConcurrencyTest, BM_ResourceMutexShared)(benchmark::State& s } BENCHMARK_DEFINE_F(DConcurrencyTest, BM_ResourceMutexExclusive)(benchmark::State& state) { - if (state.thread_index == 0) { - makeKClientsWithLockers(state.threads); - } - static Lock::ResourceMutex mtx("testMutex"); for (auto keepRunning : state) { @@ -129,70 +112,42 @@ BENCHMARK_DEFINE_F(DConcurrencyTest, BM_ResourceMutexExclusive)(benchmark::State } BENCHMARK_DEFINE_F(DConcurrencyTest, BM_CollectionIntentSharedLock)(benchmark::State& state) { - if (state.thread_index == 0) { - makeKClientsWithLockers(state.threads); - } - - DatabaseName dbName(boost::none, "test"); + DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "test"); for (auto keepRunning : state) { Lock::DBLock dlk(clients[state.thread_index].second.get(), dbName, MODE_IS); - Lock::CollectionLock clk( - clients[state.thread_index].second.get(), NamespaceString("test.coll"), MODE_IS); - } - - if (state.thread_index == 0) { - clients.clear(); + Lock::CollectionLock clk(clients[state.thread_index].second.get(), + NamespaceString::createNamespaceString_forTest("test.coll"), + MODE_IS); } } BENCHMARK_DEFINE_F(DConcurrencyTest, BM_CollectionIntentExclusiveLock)(benchmark::State& state) { - if (state.thread_index == 0) { - makeKClientsWithLockers(state.threads); - } - - DatabaseName dbName(boost::none, "test"); + DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "test"); for (auto keepRunning : state) { Lock::DBLock dlk(clients[state.thread_index].second.get(), dbName, MODE_IX); - Lock::CollectionLock clk( - clients[state.thread_index].second.get(), NamespaceString("test.coll"), MODE_IX); - } - - if (state.thread_index == 0) { - clients.clear(); + Lock::CollectionLock clk(clients[state.thread_index].second.get(), + NamespaceString::createNamespaceString_forTest("test.coll"), + MODE_IX); } } BENCHMARK_DEFINE_F(DConcurrencyTest, BM_CollectionSharedLock)(benchmark::State& state) { - if (state.thread_index == 0) { - makeKClientsWithLockers(state.threads); - } - - DatabaseName dbName(boost::none, "test"); + DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "test"); for (auto keepRunning : state) { Lock::DBLock dlk(clients[state.thread_index].second.get(), dbName, MODE_IS); - Lock::CollectionLock clk( - clients[state.thread_index].second.get(), NamespaceString("test.coll"), MODE_S); - } - - if (state.thread_index == 0) { - clients.clear(); + Lock::CollectionLock clk(clients[state.thread_index].second.get(), + NamespaceString::createNamespaceString_forTest("test.coll"), + MODE_S); } } BENCHMARK_DEFINE_F(DConcurrencyTest, BM_CollectionExclusiveLock)(benchmark::State& state) { - if (state.thread_index == 0) { - makeKClientsWithLockers(state.threads); - } - - DatabaseName dbName(boost::none, "test"); + DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "test"); for (auto keepRunning : state) { Lock::DBLock dlk(clients[state.thread_index].second.get(), dbName, MODE_IX); - Lock::CollectionLock clk( - clients[state.thread_index].second.get(), NamespaceString("test.coll"), MODE_X); - } - - if (state.thread_index == 0) { - clients.clear(); + Lock::CollectionLock clk(clients[state.thread_index].second.get(), + NamespaceString::createNamespaceString_forTest("test.coll"), + MODE_X); } } diff --git a/src/mongo/db/concurrency/d_concurrency_test.cpp b/src/mongo/db/concurrency/d_concurrency_test.cpp index 4320e51ecdf79..3831a315312c7 100644 --- a/src/mongo/db/concurrency/d_concurrency_test.cpp +++ b/src/mongo/db/concurrency/d_concurrency_test.cpp @@ -27,28 +27,58 @@ * it in the license file. */ +// IWYU pragma: no_include "cxxabi.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include #include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/concurrency/lock_manager_test_help.h" +#include "mongo/db/concurrency/fast_map_noalloc.h" +#include "mongo/db/concurrency/locker_impl.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" -#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/concurrency/resource_catalog.h" +#include "mongo/db/curop.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/storage/execution_control/concurrency_adjustment_parameters_gen.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/recovery_unit_noop.h" -#include "mongo/db/storage/storage_engine_parameters_gen.h" #include "mongo/db/storage/ticketholder_manager.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" -#include "mongo/stdx/future.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/stdx/thread.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" #include "mongo/util/concurrency/priority_ticketholder.h" #include "mongo/util/concurrency/semaphore_ticketholder.h" -#include "mongo/util/debug_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/progress_meter.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #include "mongo/util/time_support.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -72,7 +102,7 @@ class UseReaderWriterGlobalThrottling { public: explicit UseReaderWriterGlobalThrottling(ServiceContext* svcCtx, int numTickets) : _svcCtx(svcCtx) { - gStorageEngineConcurrencyAdjustmentAlgorithm = ""; + gStorageEngineConcurrencyAdjustmentAlgorithm = "fixedConcurrentTransactions"; // TODO SERVER-72616: Remove ifdefs once PriorityTicketHolder is available cross-platform. #ifdef __linux__ if constexpr (std::is_same_v) { @@ -114,7 +144,7 @@ class UseReaderWriterGlobalThrottling { }; -class DConcurrencyTestFixture : public ServiceContextMongoDTest { +class DConcurrencyTestFixture : public ServiceContextTest { public: /** * Returns a vector of Clients of length 'k', each of which has an OperationContext with its @@ -182,7 +212,7 @@ class DConcurrencyTestFixture : public ServiceContextMongoDTest { TEST_F(DConcurrencyTestFixture, WriteConflictRetryInstantiatesOK) { auto opCtx = makeOperationContext(); getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); - writeConflictRetry(opCtx.get(), "", "", [] {}); + writeConflictRetry(opCtx.get(), "", NamespaceString(), [] {}); } TEST_F(DConcurrencyTestFixture, WriteConflictRetryRetriesFunctionOnWriteConflictException) { @@ -190,7 +220,7 @@ TEST_F(DConcurrencyTestFixture, WriteConflictRetryRetriesFunctionOnWriteConflict getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); auto&& opDebug = CurOp::get(opCtx.get())->debug(); ASSERT_EQUALS(0, opDebug.additiveMetrics.writeConflicts.load()); - ASSERT_EQUALS(100, writeConflictRetry(opCtx.get(), "", "", [&opDebug] { + ASSERT_EQUALS(100, writeConflictRetry(opCtx.get(), "", NamespaceString(), [&opDebug] { if (0 == opDebug.additiveMetrics.writeConflicts.load()) { throwWriteConflictException( str::stream() @@ -207,7 +237,7 @@ TEST_F(DConcurrencyTestFixture, WriteConflictRetryPropagatesNonWriteConflictExce getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); ASSERT_THROWS_CODE(writeConflictRetry(opCtx.get(), "", - "", + NamespaceString(), [] { uassert(ErrorCodes::OperationFailed, "", false); MONGO_UNREACHABLE; @@ -225,7 +255,7 @@ TEST_F(DConcurrencyTestFixture, ASSERT_THROWS(writeConflictRetry( opCtx.get(), "", - "", + NamespaceString(), [] { throwWriteConflictException( str::stream() << "Verify that WriteConflictExceptions are propogated " @@ -361,7 +391,8 @@ TEST_F(DConcurrencyTestFixture, ASSERT_EQ(lockState->getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); { - Lock::DBLock dbWrite(opCtx.get(), DatabaseName(boost::none, "db"), MODE_IX); + Lock::DBLock dbWrite( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_IX); ASSERT(lockState->isW()); ASSERT(MODE_X == lockState->getLockMode(resourceIdGlobal)) << "unexpected global lock mode " << modeName(lockState->getLockMode(resourceIdGlobal)); @@ -403,7 +434,8 @@ TEST_F(DConcurrencyTestFixture, ASSERT_EQ(lockState->getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); { - Lock::DBLock dbWrite(opCtx.get(), DatabaseName(boost::none, "db"), MODE_IX); + Lock::DBLock dbWrite( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_IX); ASSERT(lockState->isW()); ASSERT(MODE_X == lockState->getLockMode(resourceIdGlobal)) << "unexpected global lock mode " << modeName(lockState->getLockMode(resourceIdGlobal)); @@ -442,7 +474,8 @@ TEST_F(DConcurrencyTestFixture, ASSERT_EQ(lockState->getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); { - Lock::DBLock dbWrite(opCtx.get(), DatabaseName(boost::none, "db"), MODE_IX); + Lock::DBLock dbWrite( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_IX); ASSERT(lockState->isW()); ASSERT(MODE_X == lockState->getLockMode(resourceIdGlobal)) << "unexpected global lock mode " << modeName(lockState->getLockMode(resourceIdGlobal)); @@ -606,7 +639,10 @@ TEST_F(DConcurrencyTestFixture, DBLockXSetsGlobalWriteLockedOnOperationContext) ASSERT_FALSE(opCtx->lockState()->wasGlobalLockTakenForWrite()); ASSERT_FALSE(opCtx->lockState()->wasGlobalLockTaken()); - { Lock::DBLock dbWrite(opCtx, DatabaseName(boost::none, "db"), MODE_X); } + { + Lock::DBLock dbWrite( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_X); + } ASSERT_TRUE(opCtx->lockState()->wasGlobalLockTakenForWrite()); ASSERT_TRUE(opCtx->lockState()->wasGlobalLockTaken()); } @@ -617,11 +653,99 @@ TEST_F(DConcurrencyTestFixture, DBLockSDoesNotSetGlobalWriteLockedOnOperationCon ASSERT_FALSE(opCtx->lockState()->wasGlobalLockTakenForWrite()); ASSERT_FALSE(opCtx->lockState()->wasGlobalLockTaken()); - { Lock::DBLock dbRead(opCtx, DatabaseName(boost::none, "db"), MODE_S); } + { + Lock::DBLock dbRead( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_S); + } ASSERT_FALSE(opCtx->lockState()->wasGlobalLockTakenForWrite()); ASSERT_TRUE(opCtx->lockState()->wasGlobalLockTaken()); } +TEST_F(DConcurrencyTestFixture, TenantLock) { + auto opCtx = makeOperationContext(); + getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); + TenantId tenantId{OID::gen()}; + ResourceId tenantResourceId{ResourceType::RESOURCE_TENANT, tenantId}; + struct TestCase { + LockMode globalLockMode; + LockMode tenantLockMode; + }; + std::vector testCases{ + {MODE_IX, MODE_IX}, {MODE_IX, MODE_X}, {MODE_IS, MODE_S}, {MODE_IS, MODE_IS}}; + for (auto&& testCase : testCases) { + { + Lock::GlobalLock globalLock{opCtx.get(), testCase.globalLockMode}; + Lock::TenantLock tenantLock{opCtx.get(), tenantId, testCase.tenantLockMode}; + ASSERT_TRUE( + opCtx->lockState()->isLockHeldForMode(tenantResourceId, testCase.tenantLockMode)); + } + ASSERT_FALSE( + opCtx->lockState()->isLockHeldForMode(tenantResourceId, testCase.tenantLockMode)); + } +} + +TEST_F(DConcurrencyTestFixture, DBLockTakesTenantLock) { + auto opCtx = makeOperationContext(); + getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); + TenantId tenantId{OID::gen()}; + ResourceId tenantResourceId{ResourceType::RESOURCE_TENANT, tenantId}; + struct TestCase { + bool tenantOwned; + LockMode databaseLockMode; + boost::optional tenantLockMode; + LockMode expectedTenantLockMode; + }; + + StringData testDatabaseName{"test"}; + const bool tenantOwned{true}; + const bool tenantless{false}; + const boost::optional none; + std::vector testCases{ + {tenantless, MODE_S, none, MODE_NONE}, + {tenantless, MODE_IS, none, MODE_NONE}, + {tenantless, MODE_X, none, MODE_NONE}, + {tenantless, MODE_IX, none, MODE_NONE}, + {tenantOwned, MODE_S, none, MODE_IS}, + {tenantOwned, MODE_IS, none, MODE_IS}, + {tenantOwned, MODE_X, none, MODE_IX}, + {tenantOwned, MODE_IX, none, MODE_IX}, + {tenantOwned, MODE_X, MODE_X, MODE_X}, + {tenantOwned, MODE_IX, MODE_X, MODE_X}, + }; + for (auto&& testCase : testCases) { + { + Lock::DBLock dbLock( + opCtx.get(), + DatabaseName::createDatabaseName_forTest( + testCase.tenantOwned ? boost::make_optional(tenantId) : boost::none, + testDatabaseName), + testCase.databaseLockMode, + Date_t::max(), + testCase.tenantLockMode); + ASSERT(opCtx->lockState()->getLockMode(tenantResourceId) == + testCase.expectedTenantLockMode) + << " db lock mode: " << modeName(testCase.databaseLockMode) + << ", tenant lock mode: " + << (testCase.tenantLockMode ? modeName(*testCase.tenantLockMode) : "-"); + } + ASSERT(opCtx->lockState()->getLockMode(tenantResourceId) == MODE_NONE) + << " db lock mode: " << modeName(testCase.databaseLockMode) << ", tenant lock mode: " + << (testCase.tenantLockMode ? modeName(*testCase.tenantLockMode) : "-"); + } + + // Verify that tenant lock survives move. + { + auto lockBuilder = [&]() { + return Lock::DBLock{ + opCtx.get(), + DatabaseName::createDatabaseName_forTest(tenantId, testDatabaseName), + MODE_S}; + }; + Lock::DBLock dbLockCopy{lockBuilder()}; + ASSERT(opCtx->lockState()->isLockHeldForMode(tenantResourceId, MODE_IS)); + } +} + TEST_F(DConcurrencyTestFixture, GlobalLockXDoesNotSetGlobalWriteLockedWhenLockAcquisitionTimesOut) { auto clients = makeKClientsWithLockers(2); @@ -694,7 +818,10 @@ TEST_F(DConcurrencyTestFixture, DBLockSDoesNotSetGlobalLockTakenInModeConflictin auto opCtx = clients[0].second.get(); ASSERT_FALSE(opCtx->lockState()->wasGlobalLockTakenInModeConflictingWithWrites()); - { Lock::DBLock dbWrite(opCtx, DatabaseName(boost::none, "db"), MODE_S); } + { + Lock::DBLock dbWrite( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_S); + } ASSERT_FALSE(opCtx->lockState()->wasGlobalLockTakenInModeConflictingWithWrites()); } @@ -703,7 +830,10 @@ TEST_F(DConcurrencyTestFixture, DBLockISDoesNotSetGlobalLockTakenInModeConflicti auto opCtx = clients[0].second.get(); ASSERT_FALSE(opCtx->lockState()->wasGlobalLockTakenInModeConflictingWithWrites()); - { Lock::DBLock dbWrite(opCtx, DatabaseName(boost::none, "db"), MODE_IS); } + { + Lock::DBLock dbWrite( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_IS); + } ASSERT_FALSE(opCtx->lockState()->wasGlobalLockTakenInModeConflictingWithWrites()); } @@ -712,7 +842,10 @@ TEST_F(DConcurrencyTestFixture, DBLockIXSetsGlobalLockTakenInModeConflictingWith auto opCtx = clients[0].second.get(); ASSERT_FALSE(opCtx->lockState()->wasGlobalLockTakenInModeConflictingWithWrites()); - { Lock::DBLock dbWrite(opCtx, DatabaseName(boost::none, "db"), MODE_IX); } + { + Lock::DBLock dbWrite( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_IX); + } ASSERT_TRUE(opCtx->lockState()->wasGlobalLockTakenInModeConflictingWithWrites()); } @@ -721,7 +854,10 @@ TEST_F(DConcurrencyTestFixture, DBLockXSetsGlobalLockTakenInModeConflictingWithW auto opCtx = clients[0].second.get(); ASSERT_FALSE(opCtx->lockState()->wasGlobalLockTakenInModeConflictingWithWrites()); - { Lock::DBLock dbRead(opCtx, DatabaseName(boost::none, "db"), MODE_X); } + { + Lock::DBLock dbRead( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_X); + } ASSERT_TRUE(opCtx->lockState()->wasGlobalLockTakenInModeConflictingWithWrites()); } @@ -1046,7 +1182,7 @@ TEST_F(DConcurrencyTestFixture, DBLockWaitIsInterruptible) { // The main thread takes an exclusive lock, causing the spawned thread to wait when it attempts // to acquire a conflicting lock. - DatabaseName dbName(boost::none, "db"); + DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "db"); Lock::DBLock dbLock(opCtx1, dbName, MODE_X); auto result = runTaskAndKill(opCtx2, [&]() { @@ -1086,14 +1222,15 @@ TEST_F(DConcurrencyTestFixture, DBLockWaitIsNotInterruptibleWithLockGuard) { // The main thread takes an exclusive lock, causing the spawned thread to wait when it attempts // to acquire a conflicting lock. boost::optional dbLock = - Lock::DBLock(opCtx1, DatabaseName(boost::none, "db"), MODE_X); + Lock::DBLock(opCtx1, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_X); // Killing the lock wait should not interrupt it. auto result = runTaskAndKill( opCtx2, [&]() { UninterruptibleLockGuard noInterrupt(opCtx2->lockState()); // NOLINT. - Lock::DBLock d(opCtx2, DatabaseName(boost::none, "db"), MODE_S); + Lock::DBLock d( + opCtx2, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_S); }, [&] { dbLock.reset(); }); // Should not throw an exception. @@ -1131,25 +1268,29 @@ TEST_F(DConcurrencyTestFixture, LockCompleteInterruptedWhenUncontested) { TEST_F(DConcurrencyTestFixture, DBLockTakesS) { auto opCtx = makeOperationContext(); getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); - Lock::DBLock dbRead(opCtx.get(), DatabaseName(boost::none, "db"), MODE_S); + Lock::DBLock dbRead( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_S); - const ResourceId resIdDb(RESOURCE_DATABASE, DatabaseName(boost::none, "db")); + const ResourceId resIdDb(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "db")); ASSERT(opCtx->lockState()->getLockMode(resIdDb) == MODE_S); } TEST_F(DConcurrencyTestFixture, DBLockTakesX) { auto opCtx = makeOperationContext(); getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); - Lock::DBLock dbWrite(opCtx.get(), DatabaseName(boost::none, "db"), MODE_X); + Lock::DBLock dbWrite( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_X); - const ResourceId resIdDb(RESOURCE_DATABASE, DatabaseName(boost::none, "db")); + const ResourceId resIdDb(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "db")); ASSERT(opCtx->lockState()->getLockMode(resIdDb) == MODE_X); } TEST_F(DConcurrencyTestFixture, DBLockTakesISForAdminIS) { auto opCtx = makeOperationContext(); getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); - Lock::DBLock dbRead(opCtx.get(), DatabaseName(boost::none, "admin"), MODE_IS); + Lock::DBLock dbRead(opCtx.get(), DatabaseName::kAdmin, MODE_IS); ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_IS); } @@ -1157,7 +1298,7 @@ TEST_F(DConcurrencyTestFixture, DBLockTakesISForAdminIS) { TEST_F(DConcurrencyTestFixture, DBLockTakesSForAdminS) { auto opCtx = makeOperationContext(); getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); - Lock::DBLock dbRead(opCtx.get(), DatabaseName(boost::none, "admin"), MODE_S); + Lock::DBLock dbRead(opCtx.get(), DatabaseName::kAdmin, MODE_S); ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_S); } @@ -1165,7 +1306,7 @@ TEST_F(DConcurrencyTestFixture, DBLockTakesSForAdminS) { TEST_F(DConcurrencyTestFixture, DBLockTakesIXForAdminIX) { auto opCtx = makeOperationContext(); getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); - Lock::DBLock dbWrite(opCtx.get(), DatabaseName(boost::none, "admin"), MODE_IX); + Lock::DBLock dbWrite(opCtx.get(), DatabaseName::kAdmin, MODE_IX); ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_IX); } @@ -1173,7 +1314,7 @@ TEST_F(DConcurrencyTestFixture, DBLockTakesIXForAdminIX) { TEST_F(DConcurrencyTestFixture, DBLockTakesXForAdminX) { auto opCtx = makeOperationContext(); getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); - Lock::DBLock dbWrite(opCtx.get(), DatabaseName(boost::none, "admin"), MODE_X); + Lock::DBLock dbWrite(opCtx.get(), DatabaseName::kAdmin, MODE_X); ASSERT(opCtx->lockState()->getLockMode(resourceIdAdminDB) == MODE_X); } @@ -1181,7 +1322,7 @@ TEST_F(DConcurrencyTestFixture, DBLockTakesXForAdminX) { TEST_F(DConcurrencyTestFixture, MultipleWriteDBLocksOnSameThread) { auto opCtx = makeOperationContext(); getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); - DatabaseName dbName(boost::none, "db1"); + DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "db1"); Lock::DBLock r1(opCtx.get(), dbName, MODE_X); Lock::DBLock r2(opCtx.get(), dbName, MODE_X); @@ -1192,7 +1333,7 @@ TEST_F(DConcurrencyTestFixture, MultipleConflictingDBLocksOnSameThread) { auto opCtx = makeOperationContext(); getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); auto lockState = opCtx->lockState(); - DatabaseName dbName(boost::none, "db1"); + DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "db1"); Lock::DBLock r1(opCtx.get(), dbName, MODE_X); Lock::DBLock r2(opCtx.get(), dbName, MODE_S); @@ -1200,32 +1341,123 @@ TEST_F(DConcurrencyTestFixture, MultipleConflictingDBLocksOnSameThread) { ASSERT(lockState->isDbLockedForMode(dbName, MODE_S)); } -TEST_F(DConcurrencyTestFixture, IsDbLockedForSMode) { - DatabaseName dbName(boost::none, "db"); - +TEST_F(DConcurrencyTestFixture, IsDbLockedForMode_IsCollectionLockedForMode) { auto opCtx = makeOperationContext(); getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); auto lockState = opCtx->lockState(); - Lock::DBLock dbLock(opCtx.get(), dbName, MODE_S); - - ASSERT(lockState->isDbLockedForMode(dbName, MODE_IS)); - ASSERT(!lockState->isDbLockedForMode(dbName, MODE_IX)); - ASSERT(lockState->isDbLockedForMode(dbName, MODE_S)); - ASSERT(!lockState->isDbLockedForMode(dbName, MODE_X)); -} -TEST_F(DConcurrencyTestFixture, IsDbLockedForXMode) { - DatabaseName dbName(boost::none, "db"); - - auto opCtx = makeOperationContext(); - getClient()->swapLockState(std::make_unique(opCtx->getServiceContext())); - auto lockState = opCtx->lockState(); - Lock::DBLock dbLock(opCtx.get(), dbName, MODE_X); + // Database ownership options to test. + enum DatabaseOwnershipOptions { + // Owned by a tenant and not. + kAll, + // Owned by a tenant only. + kTenantOwned + }; + struct TestCase { + LockMode globalLockMode; + LockMode tenantLockMode; + DatabaseOwnershipOptions databaseOwnership; + LockMode databaseLockMode; + LockMode checkedDatabaseLockMode; + bool expectedResult; + }; - ASSERT(lockState->isDbLockedForMode(dbName, MODE_IS)); - ASSERT(lockState->isDbLockedForMode(dbName, MODE_IX)); - ASSERT(lockState->isDbLockedForMode(dbName, MODE_S)); - ASSERT(lockState->isDbLockedForMode(dbName, MODE_X)); + TenantId tenantId{OID::gen()}; + StringData testDatabaseName{"test"}; + std::vector testCases{ + // Only global lock acquired. + {MODE_X, MODE_NONE, kAll, MODE_NONE, MODE_X, true}, + {MODE_X, MODE_NONE, kAll, MODE_NONE, MODE_IX, true}, + {MODE_X, MODE_NONE, kAll, MODE_NONE, MODE_S, true}, + {MODE_X, MODE_NONE, kAll, MODE_NONE, MODE_IS, true}, + {MODE_S, MODE_NONE, kAll, MODE_NONE, MODE_X, false}, + {MODE_S, MODE_NONE, kAll, MODE_NONE, MODE_IX, false}, + {MODE_S, MODE_NONE, kAll, MODE_NONE, MODE_S, true}, + {MODE_S, MODE_NONE, kAll, MODE_NONE, MODE_IS, true}, + // Global and tenant locks acquired. + {MODE_IX, MODE_NONE, kTenantOwned, MODE_NONE, MODE_X, false}, + {MODE_IX, MODE_NONE, kTenantOwned, MODE_NONE, MODE_IX, false}, + {MODE_IX, MODE_NONE, kTenantOwned, MODE_NONE, MODE_S, false}, + {MODE_IX, MODE_NONE, kTenantOwned, MODE_NONE, MODE_IS, false}, + {MODE_IX, MODE_X, kTenantOwned, MODE_NONE, MODE_X, true}, + {MODE_IX, MODE_X, kTenantOwned, MODE_NONE, MODE_IX, true}, + {MODE_IX, MODE_X, kTenantOwned, MODE_NONE, MODE_S, true}, + {MODE_IX, MODE_X, kTenantOwned, MODE_NONE, MODE_IS, true}, + {MODE_IS, MODE_NONE, kTenantOwned, MODE_NONE, MODE_X, false}, + {MODE_IS, MODE_NONE, kTenantOwned, MODE_NONE, MODE_IX, false}, + {MODE_IS, MODE_NONE, kTenantOwned, MODE_NONE, MODE_S, false}, + {MODE_IS, MODE_NONE, kTenantOwned, MODE_NONE, MODE_IS, false}, + {MODE_IS, MODE_S, kTenantOwned, MODE_NONE, MODE_X, false}, + {MODE_IS, MODE_S, kTenantOwned, MODE_NONE, MODE_IX, false}, + {MODE_IS, MODE_S, kTenantOwned, MODE_NONE, MODE_S, true}, + {MODE_IS, MODE_S, kTenantOwned, MODE_NONE, MODE_IS, true}, + // Global, tenant, db locks acquired. + {MODE_NONE, MODE_NONE, kAll, MODE_NONE, MODE_X, false}, + {MODE_NONE, MODE_NONE, kAll, MODE_NONE, MODE_IX, false}, + {MODE_NONE, MODE_NONE, kAll, MODE_NONE, MODE_S, false}, + {MODE_NONE, MODE_NONE, kAll, MODE_NONE, MODE_IS, false}, + {MODE_NONE, MODE_NONE, kAll, MODE_S, MODE_X, false}, + {MODE_NONE, MODE_NONE, kAll, MODE_S, MODE_IX, false}, + {MODE_NONE, MODE_NONE, kAll, MODE_S, MODE_S, true}, + {MODE_NONE, MODE_NONE, kAll, MODE_S, MODE_IS, true}, + {MODE_NONE, MODE_NONE, kAll, MODE_X, MODE_X, true}, + {MODE_NONE, MODE_NONE, kAll, MODE_X, MODE_IX, true}, + {MODE_NONE, MODE_NONE, kAll, MODE_X, MODE_S, true}, + {MODE_NONE, MODE_NONE, kAll, MODE_X, MODE_IS, true}, + {MODE_NONE, MODE_NONE, kAll, MODE_IX, MODE_X, false}, + {MODE_NONE, MODE_NONE, kAll, MODE_IX, MODE_IX, true}, + {MODE_NONE, MODE_NONE, kAll, MODE_IX, MODE_S, false}, + {MODE_NONE, MODE_NONE, kAll, MODE_IX, MODE_IS, true}, + {MODE_NONE, MODE_NONE, kAll, MODE_IS, MODE_X, false}, + {MODE_NONE, MODE_NONE, kAll, MODE_IS, MODE_IX, false}, + {MODE_NONE, MODE_NONE, kAll, MODE_IS, MODE_S, false}, + {MODE_NONE, MODE_NONE, kAll, MODE_IS, MODE_IS, true}, + }; + for (auto&& testCase : testCases) { + { + for (auto&& tenantOwned : std::vector{false, true}) { + if (!tenantOwned && kTenantOwned == testCase.databaseOwnership) { + continue; + } + const DatabaseName databaseName = DatabaseName::createDatabaseName_forTest( + tenantOwned ? boost::make_optional(tenantId) : boost::none, testDatabaseName); + boost::optional globalLock; + boost::optional tenantLock; + boost::optional dbLock; + + if (MODE_NONE != testCase.globalLockMode) { + globalLock.emplace(opCtx.get(), testCase.globalLockMode); + } + if (MODE_NONE != testCase.tenantLockMode) { + tenantLock.emplace(opCtx.get(), tenantId, testCase.tenantLockMode); + } + if (MODE_NONE != testCase.databaseLockMode) { + dbLock.emplace(opCtx.get(), databaseName, testCase.databaseLockMode); + } + ASSERT( + lockState->isDbLockedForMode(databaseName, testCase.checkedDatabaseLockMode) == + testCase.expectedResult) + << " global lock mode: " << modeName(testCase.globalLockMode) + << " tenant lock mode: " << modeName(testCase.tenantLockMode) + << " db lock mode: " << modeName(testCase.databaseLockMode) + << " tenant owned: " << tenantOwned + << " checked lock mode: " << modeName(testCase.checkedDatabaseLockMode); + + // If database is not locked with intent lock, a collection in the database is + // locked for the same lock mode. + ASSERT(testCase.databaseLockMode == MODE_IS || + testCase.databaseLockMode == MODE_IX || + lockState->isCollectionLockedForMode( + NamespaceString::createNamespaceString_forTest(databaseName, "coll"), + testCase.checkedDatabaseLockMode) == testCase.expectedResult) + << " global lock mode: " << modeName(testCase.globalLockMode) + << " tenant lock mode: " << modeName(testCase.tenantLockMode) + << " db lock mode: " << modeName(testCase.databaseLockMode) + << " tenant owned: " << tenantOwned + << " checked lock mode: " << modeName(testCase.checkedDatabaseLockMode); + } + } + } } TEST_F(DConcurrencyTestFixture, IsCollectionLocked_DB_Locked_IS) { @@ -1294,8 +1526,8 @@ TEST_F(DConcurrencyTestFixture, Stress) { AtomicWord ready{0}; std::vector threads; - DatabaseName fooDb(boost::none, "foo"); - DatabaseName localDb(boost::none, "local"); + DatabaseName fooDb = DatabaseName::createDatabaseName_forTest(boost::none, "foo"); + DatabaseName localDb = DatabaseName::kLocal; for (int threadId = 0; threadId < kMaxStressThreads; threadId++) { threads.emplace_back([&, threadId]() { @@ -1330,7 +1562,7 @@ TEST_F(DConcurrencyTestFixture, Stress) { { Lock::DBLock r(clients[threadId].second.get(), fooDb, MODE_S); } { Lock::DBLock r(clients[threadId].second.get(), - DatabaseName(boost::none, "bar"), + DatabaseName::createDatabaseName_forTest(boost::none, "bar"), MODE_S); } } else if (i % 7 == 6) { @@ -1359,27 +1591,24 @@ TEST_F(DConcurrencyTestFixture, Stress) { } else if (q == 2) { { - Lock::DBLock x(clients[threadId].second.get(), - DatabaseName(boost::none, "admin"), - MODE_S); + Lock::DBLock x( + clients[threadId].second.get(), DatabaseName::kAdmin, MODE_S); } { - Lock::DBLock x(clients[threadId].second.get(), - DatabaseName(boost::none, "admin"), - MODE_X); + Lock::DBLock x( + clients[threadId].second.get(), DatabaseName::kAdmin, MODE_X); } } else if (q == 3) { Lock::DBLock x(clients[threadId].second.get(), fooDb, MODE_X); - Lock::DBLock y(clients[threadId].second.get(), - DatabaseName(boost::none, "admin"), - MODE_S); + Lock::DBLock y( + clients[threadId].second.get(), DatabaseName::kAdmin, MODE_S); } else if (q == 4) { - Lock::DBLock x(clients[threadId].second.get(), - DatabaseName(boost::none, "foo2"), - MODE_S); - Lock::DBLock y(clients[threadId].second.get(), - DatabaseName(boost::none, "admin"), - MODE_S); + Lock::DBLock x( + clients[threadId].second.get(), + DatabaseName::createDatabaseName_forTest(boost::none, "foo2"), + MODE_S); + Lock::DBLock y( + clients[threadId].second.get(), DatabaseName::kAdmin, MODE_S); } else if (q == 5) { Lock::DBLock x(clients[threadId].second.get(), fooDb, MODE_IS); } else if (q == 6) { @@ -1443,14 +1672,14 @@ TEST_F(DConcurrencyTestFixture, StressPartitioned) { } if (i % 2 == 0) { - Lock::DBLock x( - clients[threadId].second.get(), DatabaseName(boost::none, "foo"), MODE_IS); + Lock::DBLock x(clients[threadId].second.get(), + DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + MODE_IS); } else { - Lock::DBLock x( - clients[threadId].second.get(), DatabaseName(boost::none, "foo"), MODE_IX); - Lock::DBLock y(clients[threadId].second.get(), - DatabaseName(boost::none, "local"), + Lock::DBLock x(clients[threadId].second.get(), + DatabaseName::createDatabaseName_forTest(boost::none, "foo"), MODE_IX); + Lock::DBLock y(clients[threadId].second.get(), DatabaseName::kLocal, MODE_IX); } if (threadId == kMaxStressThreads - 1) @@ -1468,10 +1697,12 @@ TEST_F(DConcurrencyTestFixture, StressPartitioned) { } TEST_F(DConcurrencyTestFixture, ResourceMutexLabels) { + auto opCtx = makeOperationContext(); + Lock::ResourceMutex mutex("label"); - ASSERT(mutex.getName() == "label"); + ASSERT_EQ("label", *ResourceCatalog::get().name(mutex.getRid())); Lock::ResourceMutex mutex2("label2"); - ASSERT(mutex2.getName() == "label2"); + ASSERT_EQ("label2", *ResourceCatalog::get().name(mutex2.getRid())); } TEST_F(DConcurrencyTestFixture, Throttling) { @@ -1742,7 +1973,7 @@ TEST_F(DConcurrencyTestFixture, auto opCtx1 = clientOpctxPairs[0].second.get(); auto opCtx2 = clientOpctxPairs[1].second.get(); - DatabaseName dbName{boost::none, "test"}; + DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "test"); boost::optional globalIX = Lock::GlobalLock{opCtx1, LockMode::MODE_IX}; boost::optional dbIX = Lock::DBLock{opCtx1, dbName, LockMode::MODE_IX}; @@ -1825,23 +2056,27 @@ TEST_F(DConcurrencyTestFixture, DBLockInInterruptedContextThrowsEvenWhenUncontes opCtx->markKilled(); boost::optional dbWriteLock; - ASSERT_THROWS_CODE(dbWriteLock.emplace(opCtx, DatabaseName(boost::none, "db"), MODE_IX), - AssertionException, - ErrorCodes::Interrupted); + ASSERT_THROWS_CODE( + dbWriteLock.emplace( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_IX), + AssertionException, + ErrorCodes::Interrupted); } TEST_F(DConcurrencyTestFixture, DBLockInInterruptedContextThrowsEvenWhenAcquiringRecursively) { auto clients = makeKClientsWithLockers(1); auto opCtx = clients[0].second.get(); - Lock::DBLock dbWriteLock(opCtx, DatabaseName(boost::none, "db"), MODE_X); + Lock::DBLock dbWriteLock( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_X); opCtx->markKilled(); { boost::optional recursiveDBWriteLock; ASSERT_THROWS_CODE( - recursiveDBWriteLock.emplace(opCtx, DatabaseName(boost::none, "db"), MODE_X), + recursiveDBWriteLock.emplace( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_X), AssertionException, ErrorCodes::Interrupted); } @@ -1853,8 +2088,10 @@ TEST_F(DConcurrencyTestFixture, DBLockInInterruptedContextRespectsUninterruptibl opCtx->markKilled(); - UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. - Lock::DBLock dbWriteLock(opCtx, DatabaseName(boost::none, "db"), MODE_X); // Does not throw. + UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. + Lock::DBLock dbWriteLock(opCtx, + DatabaseName::createDatabaseName_forTest(boost::none, "db"), + MODE_X); // Does not throw. } TEST_F(DConcurrencyTestFixture, DBLockTimeout) { @@ -1864,7 +2101,7 @@ TEST_F(DConcurrencyTestFixture, DBLockTimeout) { const Milliseconds timeoutMillis = Milliseconds(1500); - DatabaseName testDb(boost::none, "testdb"); + DatabaseName testDb = DatabaseName::createDatabaseName_forTest(boost::none, "testdb"); Lock::DBLock L1(opctx1, testDb, MODE_X, Date_t::max()); ASSERT(opctx1->lockState()->isDbLockedForMode(testDb, MODE_X)); @@ -1889,11 +2126,12 @@ TEST_F(DConcurrencyTestFixture, DBLockTimeoutDueToGlobalLock) { ASSERT(G1.isLocked()); Date_t t1 = Date_t::now(); - ASSERT_THROWS_CODE( - Lock::DBLock( - opctx2, DatabaseName(boost::none, "testdb"), MODE_X, Date_t::now() + timeoutMillis), - AssertionException, - ErrorCodes::LockTimeout); + ASSERT_THROWS_CODE(Lock::DBLock(opctx2, + DatabaseName::createDatabaseName_forTest(boost::none, "testdb"), + MODE_X, + Date_t::now() + timeoutMillis), + AssertionException, + ErrorCodes::LockTimeout); Date_t t2 = Date_t::now(); ASSERT_GTE(t2 - t1 + kMaxClockJitterMillis, Milliseconds(timeoutMillis)); } @@ -1902,7 +2140,8 @@ TEST_F(DConcurrencyTestFixture, CollectionLockInInterruptedContextThrowsEvenWhen auto clients = makeKClientsWithLockers(1); auto opCtx = clients[0].second.get(); - Lock::DBLock dbLock(opCtx, DatabaseName(boost::none, "db"), MODE_IX); + Lock::DBLock dbLock( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_IX); opCtx->markKilled(); { @@ -1920,7 +2159,8 @@ TEST_F(DConcurrencyTestFixture, auto clients = makeKClientsWithLockers(1); auto opCtx = clients[0].second.get(); - Lock::DBLock dbLock(opCtx, DatabaseName(boost::none, "db"), MODE_IX); + Lock::DBLock dbLock( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_IX); Lock::CollectionLock collLock( opCtx, NamespaceString::createNamespaceString_forTest("db.coll"), MODE_IX); @@ -1940,7 +2180,8 @@ TEST_F(DConcurrencyTestFixture, CollectionLockInInterruptedContextRespectsUninte auto clients = makeKClientsWithLockers(1); auto opCtx = clients[0].second.get(); - Lock::DBLock dbLock(opCtx, DatabaseName(boost::none, "db"), MODE_IX); + Lock::DBLock dbLock( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "db"), MODE_IX); opCtx->markKilled(); @@ -1957,7 +2198,7 @@ TEST_F(DConcurrencyTestFixture, CollectionLockTimeout) { const Milliseconds timeoutMillis = Milliseconds(1500); - DatabaseName testDb(boost::none, "testdb"); + DatabaseName testDb = DatabaseName::createDatabaseName_forTest(boost::none, "testdb"); Lock::DBLock DBL1(opctx1, testDb, MODE_IX, Date_t::max()); ASSERT(opctx1->lockState()->isDbLockedForMode(testDb, MODE_IX)); @@ -2503,8 +2744,8 @@ TEST_F(DConcurrencyTestFixture, DifferentTenantsTakeDBLockOnConflictingNamespace auto tenant1 = TenantId(OID::gen()); auto tenant2 = TenantId(OID::gen()); - DatabaseName dbName1(tenant1, db); - DatabaseName dbName2(tenant2, db); + DatabaseName dbName1 = DatabaseName::createDatabaseName_forTest(tenant1, db); + DatabaseName dbName2 = DatabaseName::createDatabaseName_forTest(tenant2, db); Lock::DBLock r1(opCtx1, dbName1, MODE_X); Lock::DBLock r2(opCtx2, dbName2, MODE_X); @@ -2519,7 +2760,7 @@ TEST_F(DConcurrencyTestFixture, ConflictingTenantDBLockThrows) { auto opCtx2 = clients[1].second.get(); auto db = "db1"; - DatabaseName dbName1(TenantId(OID::gen()), db); + DatabaseName dbName1 = DatabaseName::createDatabaseName_forTest(TenantId(OID::gen()), db); Lock::DBLock r1(opCtx1, dbName1, MODE_X); ASSERT(opCtx1->lockState()->isDbLockedForMode(dbName1, MODE_X)); diff --git a/src/mongo/db/concurrency/deferred_writer.cpp b/src/mongo/db/concurrency/deferred_writer.cpp index 0ec030345eaaa..b50f1a9695f26 100644 --- a/src/mongo/db/concurrency/deferred_writer.cpp +++ b/src/mongo/db/concurrency/deferred_writer.cpp @@ -29,14 +29,32 @@ #include "mongo/db/concurrency/deferred_writer.h" -#include "mongo/db/catalog/collection_write_path.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/dbhelpers.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" -#include "mongo/util/concurrency/idle_thread_block.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite @@ -78,27 +96,31 @@ Status DeferredWriter::_makeCollection(OperationContext* opCtx) { } } -StatusWith> DeferredWriter::_getCollection( - OperationContext* opCtx) { - std::unique_ptr agc; - agc = std::make_unique(opCtx, _nss, MODE_IX); +StatusWith DeferredWriter::_getCollection(OperationContext* opCtx) { + while (true) { + { + auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + _nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + if (collection.exists()) { + return std::move(collection); + } + } - while (!agc->getCollection()) { - // Release the previous AGC's lock before trying to rebuild the collection. - agc.reset(); + // Release the lockS before trying to rebuild the collection. Status status = _makeCollection(opCtx); - if (!status.isOK()) { return status; } - - agc = std::make_unique(opCtx, _nss, MODE_IX); } - - return std::move(agc); } -Status DeferredWriter::_worker(InsertStatement stmt) noexcept try { +Status DeferredWriter::_worker(BSONObj doc) noexcept try { auto uniqueOpCtx = Client::getCurrent()->makeOperationContext(); OperationContext* opCtx = uniqueOpCtx.get(); auto result = _getCollection(opCtx); @@ -107,14 +129,11 @@ Status DeferredWriter::_worker(InsertStatement stmt) noexcept try { return result.getStatus(); } - auto agc = std::move(result.getValue()); + const auto collection = std::move(result.getValue()); - const CollectionPtr& collection = agc->getCollection(); - - Status status = writeConflictRetry(opCtx, "deferred insert", _nss.ns(), [&] { + Status status = writeConflictRetry(opCtx, "deferred insert", _nss, [&] { WriteUnitOfWork wuow(opCtx); - Status status = - collection_internal::insertDocument(opCtx, collection, stmt, nullptr, false); + Status status = Helpers::insert(opCtx, collection, doc); if (!status.isOK()) { return status; } @@ -125,7 +144,7 @@ Status DeferredWriter::_worker(InsertStatement stmt) noexcept try { stdx::lock_guard lock(_mutex); - _numBytes -= stmt.doc.objsize(); + _numBytes -= doc.objsize(); return status; } catch (const DBException& e) { return e.toStatus(); @@ -151,9 +170,6 @@ void DeferredWriter::startup(std::string workerName) { options.maxThreads = 1; options.onCreateThread = [](const std::string& name) { Client::initThread(name); - - stdx::lock_guard lk(cc()); - cc().setSystemOperationKillableByStepdown(lk); }; _pool = std::make_unique(options); _pool->startup(); @@ -178,8 +194,8 @@ bool DeferredWriter::insertDocument(BSONObj obj) { // Check if we're allowed to insert this object. if (_numBytes + obj.objsize() >= _maxNumBytes) { - // If not, drop it. We always drop new entries rather than old ones; that way the caller - // knows at the time of the call that the entry was dropped. + // If not, drop it. We always drop new entries rather than old ones; that way the + // caller knows at the time of the call that the entry was dropped. _logDroppedEntry(); return false; } @@ -189,7 +205,7 @@ bool DeferredWriter::insertDocument(BSONObj obj) { _pool->schedule([this, obj](auto status) { fassert(40588, status); - auto workerStatus = _worker(InsertStatement(obj.getOwned())); + auto workerStatus = _worker(obj.getOwned()); if (!workerStatus.isOK()) { _logFailure(workerStatus); } diff --git a/src/mongo/db/concurrency/deferred_writer.h b/src/mongo/db/concurrency/deferred_writer.h index 1f45fbb8d155d..44b2043de8430 100644 --- a/src/mongo/db/concurrency/deferred_writer.h +++ b/src/mongo/db/concurrency/deferred_writer.h @@ -29,14 +29,26 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/repl/oplog.h" #include "mongo/platform/mutex.h" namespace mongo { -class AutoGetCollection; +class CollectionAcquisition; + class ThreadPool; /** @@ -131,12 +143,12 @@ class DeferredWriter { /** * Ensure that the backing collection exists, and pass back a lock and handle to it. */ - StatusWith> _getCollection(OperationContext* opCtx); + StatusWith _getCollection(OperationContext* opCtx); /** * The method that the worker thread will run. */ - Status _worker(InsertStatement stmt) noexcept; + Status _worker(BSONObj doc) noexcept; /** * The options for the collection, in case we need to create it. diff --git a/src/mongo/db/concurrency/exception_util.cpp b/src/mongo/db/concurrency/exception_util.cpp index 37c447d3cd27e..f59c6f3858b62 100644 --- a/src/mongo/db/concurrency/exception_util.cpp +++ b/src/mongo/db/concurrency/exception_util.cpp @@ -29,10 +29,16 @@ #include "mongo/db/concurrency/exception_util.h" +#include + #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/concurrency/exception_util_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" #include "mongo/util/duration.h" #include "mongo/util/log_and_backoff.h" @@ -43,14 +49,18 @@ namespace mongo { MONGO_FAIL_POINT_DEFINE(skipWriteConflictRetries); -void logWriteConflictAndBackoff(int attempt, StringData operation, StringData ns) { +void logWriteConflictAndBackoff(int attempt, + StringData operation, + StringData reason, + const NamespaceStringOrUUID& nssOrUUID) { logAndBackoff(4640401, logv2::LogComponent::kWrite, logv2::LogSeverity::Debug(1), static_cast(attempt), "Caught WriteConflictException", "operation"_attr = operation, - logAttrs(NamespaceString(ns))); + "reason"_attr = reason, + "namespace"_attr = toStringForLogging(nssOrUUID)); } namespace { @@ -70,7 +80,7 @@ CounterMetric transactionTooLargeForCacheErrorsConvertedToWriteConflict{ void handleTemporarilyUnavailableException(OperationContext* opCtx, int attempts, StringData opStr, - StringData ns, + const NamespaceStringOrUUID& nssOrUUID, const TemporarilyUnavailableException& e) { CurOp::get(opCtx)->debug().additiveMetrics.incrementTemporarilyUnavailableErrors(1); @@ -84,7 +94,7 @@ void handleTemporarilyUnavailableException(OperationContext* opCtx, "reason"_attr = e.reason(), "attempts"_attr = attempts, "operation"_attr = opStr, - logAttrs(NamespaceString(ns))); + "namespace"_attr = toStringForLogging(nssOrUUID)); temporarilyUnavailableErrorsEscaped.increment(1); throw e; } @@ -99,13 +109,12 @@ void handleTemporarilyUnavailableException(OperationContext* opCtx, "attempts"_attr = attempts, "operation"_attr = opStr, "sleepFor"_attr = sleepFor, - logAttrs(NamespaceString(ns))); + "namespace"_attr = toStringForLogging(nssOrUUID)); opCtx->sleepFor(sleepFor); } void handleTemporarilyUnavailableExceptionInTransaction(OperationContext* opCtx, StringData opStr, - StringData ns, const TemporarilyUnavailableException& e) { // Since WriteConflicts are tagged as TransientTransactionErrors and TemporarilyUnavailable // errors are not, we convert the error to a WriteConflict to allow users of multi-document @@ -118,7 +127,7 @@ void handleTemporarilyUnavailableExceptionInTransaction(OperationContext* opCtx, void handleTransactionTooLargeForCacheException(OperationContext* opCtx, int* writeConflictAttempts, StringData opStr, - StringData ns, + const NamespaceStringOrUUID& nssOrUUID, const TransactionTooLargeForCacheException& e) { transactionTooLargeForCacheErrors.increment(1); if (opCtx->writesAreReplicated()) { @@ -133,7 +142,8 @@ void handleTransactionTooLargeForCacheException(OperationContext* opCtx, // Handle as write conflict. CurOp::get(opCtx)->debug().additiveMetrics.incrementWriteConflicts(1); - logWriteConflictAndBackoff(*writeConflictAttempts, opStr, ns); + logWriteConflictAndBackoff( + *writeConflictAttempts, opStr, e.reason(), NamespaceStringOrUUID(nssOrUUID)); ++(*writeConflictAttempts); opCtx->recoveryUnit()->abandonSnapshot(); } diff --git a/src/mongo/db/concurrency/exception_util.h b/src/mongo/db/concurrency/exception_util.h index f88e3b7de7a4b..cbe2fcf7756c1 100644 --- a/src/mongo/db/concurrency/exception_util.h +++ b/src/mongo/db/concurrency/exception_util.h @@ -29,9 +29,21 @@ #pragma once +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" @@ -45,12 +57,15 @@ extern FailPoint skipWriteConflictRetries; * @param attempt - what attempt is this, 1 based * @param operation - e.g. "update" */ -void logWriteConflictAndBackoff(int attempt, StringData operation, StringData ns); +void logWriteConflictAndBackoff(int attempt, + StringData operation, + StringData reason, + const NamespaceStringOrUUID& nssOrUUID); void handleTemporarilyUnavailableException(OperationContext* opCtx, int attempts, StringData opStr, - StringData ns, + const NamespaceStringOrUUID& nssOrUUID, const TemporarilyUnavailableException& e); /** @@ -58,26 +73,34 @@ void handleTemporarilyUnavailableException(OperationContext* opCtx, */ void handleTemporarilyUnavailableExceptionInTransaction(OperationContext* opCtx, StringData opStr, - StringData ns, const TemporarilyUnavailableException& e); void handleTransactionTooLargeForCacheException(OperationContext* opCtx, int* writeConflictAttempts, StringData opStr, - StringData ns, + const NamespaceStringOrUUID& nssOrUUID, const TransactionTooLargeForCacheException& e); +namespace error_details { +/** + * A faster alternative to `iasserted`, designed to throw exceptions for unexceptional events on the + * critical execution path (e.g., `WriteConflict`). + */ +template +[[noreturn]] void throwExceptionFor(std::string reason) { + throw ExceptionFor({ec, std::move(reason)}); +} +} // namespace error_details + /** * A `WriteConflictException` is thrown if during a write, two or more operations conflict with each * other. For example if two operations get the same version of a document, and then both try to * modify that document, this exception will get thrown by one of them. */ [[noreturn]] inline void throwWriteConflictException(StringData context) { - Status status{ - ErrorCodes::WriteConflict, - str::stream() << "Caused by :: "_sd << context - << " :: Please retry your operation or multi-document transaction."_sd}; - iasserted(status); + error_details::throwExceptionFor( + "Caused by :: {} :: Please retry your operation or multi-document transaction."_format( + context)); } /** @@ -86,8 +109,8 @@ void handleTransactionTooLargeForCacheException(OperationContext* opCtx, * be retried internally by the `writeConflictRetry` helper a finite number of times before * eventually being returned. */ -[[noreturn]] inline void throwTemporarilyUnavailableException(StringData context) { - iasserted({ErrorCodes::TemporarilyUnavailable, context}); +[[noreturn]] inline void throwTemporarilyUnavailableException(std::string context) { + error_details::throwExceptionFor(std::move(context)); } /** @@ -96,8 +119,8 @@ void handleTransactionTooLargeForCacheException(OperationContext* opCtx, * transaction state. This helps to avoid retrying, maybe indefinitely, a transaction which would * never be able to complete. */ -[[noreturn]] inline void throwTransactionTooLargeForCache(StringData context) { - iasserted({ErrorCodes::TransactionTooLargeForCache, context}); +[[noreturn]] inline void throwTransactionTooLargeForCache(std::string context) { + error_details::throwExceptionFor(std::move(context)); } /** @@ -114,7 +137,10 @@ void handleTransactionTooLargeForCacheException(OperationContext* opCtx, * invocation of the argument function f without any exception handling and retry logic. */ template -auto writeConflictRetry(OperationContext* opCtx, StringData opStr, StringData ns, F&& f) { +auto writeConflictRetry(OperationContext* opCtx, + StringData opStr, + const NamespaceStringOrUUID& nssOrUUID, + F&& f) { invariant(opCtx); invariant(opCtx->lockState()); invariant(opCtx->recoveryUnit()); @@ -129,7 +155,7 @@ auto writeConflictRetry(OperationContext* opCtx, StringData opStr, StringData ns return f(); } catch (TemporarilyUnavailableException const& e) { if (opCtx->inMultiDocumentTransaction()) { - handleTemporarilyUnavailableExceptionInTransaction(opCtx, opStr, ns, e); + handleTemporarilyUnavailableExceptionInTransaction(opCtx, opStr, e); } throw; } @@ -140,15 +166,17 @@ auto writeConflictRetry(OperationContext* opCtx, StringData opStr, StringData ns while (true) { try { return f(); - } catch (WriteConflictException const&) { + } catch (WriteConflictException const& e) { CurOp::get(opCtx)->debug().additiveMetrics.incrementWriteConflicts(1); - logWriteConflictAndBackoff(writeConflictAttempts, opStr, ns); + logWriteConflictAndBackoff(writeConflictAttempts, opStr, e.reason(), nssOrUUID); ++writeConflictAttempts; opCtx->recoveryUnit()->abandonSnapshot(); } catch (TemporarilyUnavailableException const& e) { - handleTemporarilyUnavailableException(opCtx, ++attemptsTempUnavailable, opStr, ns, e); + handleTemporarilyUnavailableException( + opCtx, ++attemptsTempUnavailable, opStr, nssOrUUID, e); } catch (TransactionTooLargeForCacheException const& e) { - handleTransactionTooLargeForCacheException(opCtx, &writeConflictAttempts, opStr, ns, e); + handleTransactionTooLargeForCacheException( + opCtx, &writeConflictAttempts, opStr, nssOrUUID, e); } } } diff --git a/src/mongo/db/concurrency/fast_map_noalloc.h b/src/mongo/db/concurrency/fast_map_noalloc.h index 56cef7c61dd80..acfbe7e28763a 100644 --- a/src/mongo/db/concurrency/fast_map_noalloc.h +++ b/src/mongo/db/concurrency/fast_map_noalloc.h @@ -29,7 +29,10 @@ #pragma once +#include +#include #include +#include #include "mongo/base/static_assert.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/db/concurrency/fast_map_noalloc_test.cpp b/src/mongo/db/concurrency/fast_map_noalloc_test.cpp index 3357b5608154e..2f970cf045157 100644 --- a/src/mongo/db/concurrency/fast_map_noalloc_test.cpp +++ b/src/mongo/db/concurrency/fast_map_noalloc_test.cpp @@ -29,9 +29,14 @@ #include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/concurrency/fast_map_noalloc.h" #include "mongo/db/concurrency/lock_manager_defs.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/concurrency/flow_control_ticketholder.cpp b/src/mongo/db/concurrency/flow_control_ticketholder.cpp index cc5a9e0300f74..dc6f17dcdec7c 100644 --- a/src/mongo/db/concurrency/flow_control_ticketholder.cpp +++ b/src/mongo/db/concurrency/flow_control_ticketholder.cpp @@ -28,12 +28,23 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/concurrency/flow_control_ticketholder.h" +#include +#include "mongo/db/client.h" +#include "mongo/db/concurrency/flow_control_ticketholder.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/db/concurrency/flow_control_ticketholder.h b/src/mongo/db/concurrency/flow_control_ticketholder.h index 8247318a8427d..5d32fb108ec04 100644 --- a/src/mongo/db/concurrency/flow_control_ticketholder.h +++ b/src/mongo/db/concurrency/flow_control_ticketholder.h @@ -29,6 +29,9 @@ #pragma once +#include +#include + #include "mongo/bson/bsonobjbuilder.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/concurrency/lock_gdb_test.py b/src/mongo/db/concurrency/lock_gdb_test.py new file mode 100644 index 0000000000000..b9b1d8dc89fa1 --- /dev/null +++ b/src/mongo/db/concurrency/lock_gdb_test.py @@ -0,0 +1,15 @@ +"""Script to be invoked by GDB for testing lock manager pretty printer. +""" + +import gdb +import traceback + +try: + gdb.execute('break main') + gdb.execute('run') + gdb_type = lookup_type('mongo::LockManager') + assert gdb_type is not None, 'Failed to lookup type mongo::LockManager' + gdb.write('TEST PASSED\n') +except Exception as err: + gdb.write('TEST FAILED -- {!s}\n'.format(traceback.format_exc())) + gdb.execute('quit 1', to_string=True) diff --git a/src/mongo/db/concurrency/lock_manager.cpp b/src/mongo/db/concurrency/lock_manager.cpp index f727eda63e8f1..446d3bf32c9e6 100644 --- a/src/mongo/db/concurrency/lock_manager.cpp +++ b/src/mongo/db/concurrency/lock_manager.cpp @@ -27,32 +27,42 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - #include "mongo/db/concurrency/lock_manager.h" +#include +#include #include #include +#include +#include +#include +#include +#include +#include + +#include +#include +#include -#include "mongo/base/data_type_endian.h" -#include "mongo/base/data_view.h" #include "mongo/base/static_assert.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/config.h" -#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_request_list.h" #include "mongo/db/concurrency/locker.h" -#include "mongo/db/concurrency/resource_catalog.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_truncation.h" +#include "mongo/stdx/thread.h" #include "mongo/util/assert_util.h" #include "mongo/util/decorable.h" -#include "mongo/util/str.h" -#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault - namespace mongo { namespace { @@ -864,14 +874,9 @@ LockManager::Partition* LockManager::_getPartition(LockRequest* request) const { return &_partitions[request->locker->getId() % _numPartitions]; } -bool LockManager::hasConflictingRequests(const LockRequest* request) const { - auto lock = request->lock; - if (!lock) { - return false; - } - - stdx::lock_guard lk(_getBucket(lock->resourceId)->mutex); - return !lock->conflictList.empty(); +bool LockManager::hasConflictingRequests(ResourceId resId, const LockRequest* request) const { + stdx::lock_guard lk(_getBucket(resId)->mutex); + return request->lock ? !request->lock->conflictList.empty() : false; } void LockManager::dump() const { @@ -974,28 +979,6 @@ LockHead* LockManager::LockBucket::findOrInsert(ResourceId resId) { return lock; } -// -// ResourceId -// -std::string ResourceId::toString() const { - StringBuilder ss; - ss << "{" << _fullHash << ": " << resourceTypeName(getType()) << ", " << getHashId(); - if (getType() == RESOURCE_MUTEX) { - ss << ", " << Lock::ResourceMutex::getName(*this); - } - - if (getType() == RESOURCE_DATABASE || getType() == RESOURCE_COLLECTION) { - if (auto resourceName = ResourceCatalog::get(getGlobalServiceContext()).name(*this)) { - ss << ", " << *resourceName; - } - } - - ss << "}"; - - return ss.str(); -} - - // // LockRequest // diff --git a/src/mongo/db/concurrency/lock_manager.h b/src/mongo/db/concurrency/lock_manager.h index f53fd104cef0f..d0870e91f0c43 100644 --- a/src/mongo/db/concurrency/lock_manager.h +++ b/src/mongo/db/concurrency/lock_manager.h @@ -35,7 +35,9 @@ #include #include "mongo/bson/bsonobj.h" -#include "mongo/config.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/auth/cluster_auth_mode.h" #include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/lock_request_list.h" #include "mongo/platform/atomic_word.h" @@ -63,8 +65,6 @@ class LockManager { * Retrieves the lock manager instance attached to this ServiceContext. * The lock manager is now a decoration on the service context and this is the accessor that * most callers should prefer outside of startup, lock internals, and debugger scripts. - * Using the ServiceContext and OperationContext versions where possible is preferable to - * getGlobalLockManager(). */ static LockManager* get(ServiceContext* service); static LockManager* get(ServiceContext& service); @@ -146,10 +146,10 @@ class LockManager { void cleanupUnusedLocks(); /** - * Returns whether there are any conflicting lock requests for the resource associated with the - * given lock request. Note that this return value may be immediately stale. + * Returns whether there are any conflicting lock requests for the given resource and lock + * request. Note that the returned value may be immediately stale. */ - bool hasConflictingRequests(const LockRequest* request) const; + bool hasConflictingRequests(ResourceId resId, const LockRequest* request) const; /** * Dumps the contents of all locks to the log. diff --git a/src/mongo/db/concurrency/lock_manager_defs.cpp b/src/mongo/db/concurrency/lock_manager_defs.cpp index a437d3b59641e..22d06cad17c65 100644 --- a/src/mongo/db/concurrency/lock_manager_defs.cpp +++ b/src/mongo/db/concurrency/lock_manager_defs.cpp @@ -27,15 +27,19 @@ * it in the license file. */ -#include "lock_manager_defs.h" +#include "mongo/db/concurrency/lock_manager_defs.h" + +#include + +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/concurrency/resource_catalog.h" namespace mongo { // Hardcoded resource IDs. -const ResourceId resourceIdLocalDB = - ResourceId(RESOURCE_DATABASE, DatabaseName(boost::none, "local")); -const ResourceId resourceIdAdminDB = - ResourceId(RESOURCE_DATABASE, DatabaseName(boost::none, "admin")); +const ResourceId resourceIdLocalDB = ResourceId(RESOURCE_DATABASE, DatabaseName::kLocal); +const ResourceId resourceIdAdminDB = ResourceId(RESOURCE_DATABASE, DatabaseName::kAdmin); const ResourceId resourceIdGlobal = ResourceId(RESOURCE_GLOBAL, static_cast(ResourceGlobalId::kGlobal)); const ResourceId resourceIdParallelBatchWriterMode = @@ -45,4 +49,18 @@ const ResourceId resourceIdFeatureCompatibilityVersion = ResourceId( const ResourceId resourceIdReplicationStateTransitionLock = ResourceId( RESOURCE_GLOBAL, static_cast(ResourceGlobalId::kReplicationStateTransitionLock)); +std::string ResourceId::toString() const { + StringBuilder ss; + ss << "{" << _fullHash << ": " << resourceTypeName(getType()) << ", " << getHashId(); + if (getType() == RESOURCE_DATABASE || getType() == RESOURCE_COLLECTION || + getType() == RESOURCE_MUTEX) { + if (auto resourceName = ResourceCatalog::get().name(*this)) { + ss << ", " << *resourceName; + } + } + ss << "}"; + + return ss.str(); +} + } // namespace mongo diff --git a/src/mongo/db/concurrency/lock_manager_defs.h b/src/mongo/db/concurrency/lock_manager_defs.h index 026af0b28f442..b99a66fafe8b8 100644 --- a/src/mongo/db/concurrency/lock_manager_defs.h +++ b/src/mongo/db/concurrency/lock_manager_defs.h @@ -29,19 +29,23 @@ #pragma once +#include +#include #include #include #include #include -#include - #include "mongo/base/data_type_endian.h" #include "mongo/base/data_view.h" #include "mongo/base/static_assert.h" #include "mongo/base/string_data.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/murmur3.h" namespace mongo { @@ -159,11 +163,21 @@ enum ResourceType { /** Used for global exclusive operations */ RESOURCE_GLOBAL, + /** Encompasses resources belonging to a tenant, if in multi-tenant mode.*/ + RESOURCE_TENANT, + /** Generic resources, used for multi-granularity locking, together with the above locks */ RESOURCE_DATABASE, RESOURCE_COLLECTION, RESOURCE_METADATA, + /** + * Resource DDL types used for multi-granularity locking on DDL operations. + * These resources are not related to the storage hierarchy. + */ + RESOURCE_DDL_DATABASE, + RESOURCE_DDL_COLLECTION, + /** * Resource type used for locking general resources not related to the storage hierarchy. These * can't be created manually, use Lock::ResourceMutex::ResourceMutex() instead. @@ -190,8 +204,15 @@ enum class ResourceGlobalId : uint8_t { /** * Maps the resource id to a human-readable string. */ -static const char* ResourceTypeNames[] = { - "Invalid", "Global", "Database", "Collection", "Metadata", "Mutex"}; +static const char* ResourceTypeNames[] = {"Invalid", + "Global", + "Tenant", + "Database", + "Collection", + "Metadata", + "DDLDatabase", + "DDLCollection", + "Mutex"}; /** * Maps the global resource id to a human-readable string. @@ -229,29 +250,33 @@ static const char* resourceGlobalIdName(ResourceGlobalId id) { * Uniquely identifies a lockable resource. */ class ResourceId { - // We only use 3 bits for the resource type in the ResourceId hash - enum { resourceTypeBits = 3 }; + // We only use 4 bits for the resource type in the ResourceId hash + enum { resourceTypeBits = 4 }; MONGO_STATIC_ASSERT(ResourceTypesCount <= (1 << resourceTypeBits)); public: ResourceId() : _fullHash(0) {} ResourceId(ResourceType type, const NamespaceString& nss) - : _fullHash(fullHash(type, hashStringData(nss.toStringWithTenantId()))) { + : _fullHash(fullHash(type, hashStringData(nss.toStringForResourceId()))) { verifyNoResourceMutex(type); } ResourceId(ResourceType type, const DatabaseName& dbName) - : _fullHash(fullHash(type, hashStringData(dbName.toStringWithTenantId()))) { + : _fullHash(fullHash(type, hashStringData(dbName.toStringForResourceId()))) { verifyNoResourceMutex(type); } - ResourceId(ResourceType type, const std::string& str) - : _fullHash(fullHash(type, hashStringData(str))) { - // Resources of type database or collection must never be passed as a raw string - invariant(type != RESOURCE_DATABASE && type != RESOURCE_COLLECTION); + ResourceId(ResourceType type, StringData str) : _fullHash(fullHash(type, hashStringData(str))) { + // Resources of type database, collection, or tenant must never be passed as a raw string. + invariant(type != RESOURCE_DATABASE && type != RESOURCE_COLLECTION && + type != RESOURCE_TENANT); verifyNoResourceMutex(type); } ResourceId(ResourceType type, uint64_t hashId) : _fullHash(fullHash(type, hashId)) { verifyNoResourceMutex(type); } + ResourceId(ResourceType type, const TenantId& tenantId) + : _fullHash{fullHash(type, hashStringData(tenantId.toString()))} { + verifyNoResourceMutex(type); + } bool isValid() const { return getType() != RESOURCE_INVALID; @@ -282,13 +307,10 @@ class ResourceId { } private: - ResourceId(uint64_t fullHash) : _fullHash(fullHash) {} + friend class ResourceCatalog; + friend class ResourceIdTest; - // Used to allow Lock::ResourceMutex to create ResourceIds with RESOURCE_MUTEX type - static ResourceId makeMutexResourceId(uint64_t hashId) { - return ResourceId(fullHash(ResourceType::RESOURCE_MUTEX, hashId)); - } - friend class Lock; + ResourceId(uint64_t fullHash) : _fullHash(fullHash) {} void verifyNoResourceMutex(ResourceType type) { invariant( @@ -309,9 +331,7 @@ class ResourceId { } static uint64_t hashStringData(StringData str) { - char hash[16]; - MurmurHash3_x64_128(str.rawData(), str.size(), 0, hash); - return static_cast(ConstDataView(hash).read>()); + return murmur3(str, 0 /*seed*/); } }; diff --git a/src/mongo/db/concurrency/lock_manager_test.cpp b/src/mongo/db/concurrency/lock_manager_test.cpp index 9cb8601ea2061..1e0bda05fe2f3 100644 --- a/src/mongo/db/concurrency/lock_manager_test.cpp +++ b/src/mongo/db/concurrency/lock_manager_test.cpp @@ -27,17 +27,41 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/concurrency/lock_manager.h" #include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/lock_manager_test_help.h" +#include "mongo/db/concurrency/locker_impl.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/tenant_id.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" namespace mongo { class LockManagerTest : public ServiceContextTest {}; -TEST(ResourceId, Semantics) { +class ResourceIdTest : public unittest::Test { +protected: + constexpr int getResourceTypeBits() { + return ResourceId::resourceTypeBits; + } +}; + +TEST(ResourceIdTest, Semantics) { ResourceId resIdDb(RESOURCE_DATABASE, 324334234); ASSERT(resIdDb.getType() == RESOURCE_DATABASE); ASSERT(resIdDb.getHashId() == 324334234); @@ -60,8 +84,9 @@ TEST(ResourceId, Semantics) { ASSERT_EQUALS(resId, resIdColl); } -TEST(ResourceId, Masking) { - const uint64_t maxHash = (1ULL << 61) - 1; // Only 61 bits usable for hash +TEST_F(ResourceIdTest, Masking) { + const uint64_t maxHash = + (1ULL << (64 - getResourceTypeBits())) - 1; // Only 60 bits usable for hash ResourceType resources[3] = {RESOURCE_GLOBAL, RESOURCE_COLLECTION, RESOURCE_METADATA}; uint64_t hashes[3] = {maxHash, maxHash / 3, maxHash / 3 * 2}; @@ -75,8 +100,6 @@ TEST(ResourceId, Masking) { } } -class ResourceIdTest : public unittest::Test {}; - DEATH_TEST_F(ResourceIdTest, StringConstructorMustNotBeCollection, "invariant") { ResourceId(RESOURCE_COLLECTION, "TestDB.collection"); } @@ -93,6 +116,10 @@ DEATH_TEST_F(ResourceIdTest, CantCreateResourceMutexDirectly, "invariant") { // LockManager // +TEST_F(LockManagerTest, IsModeCovered) { + ASSERT(isModeCovered(MODE_IS, MODE_IX)); +} + TEST_F(LockManagerTest, Grant) { LockManager lockMgr; const ResourceId resId( @@ -1012,13 +1039,13 @@ TEST_F(LockManagerTest, HasConflictingRequests) { LockerImpl lockerIX{getServiceContext()}; LockRequestCombo requestIX{&lockerIX}; ASSERT_EQ(lockMgr.lock(resId, &requestIX, LockMode::MODE_IX), LockResult::LOCK_OK); - ASSERT_FALSE(lockMgr.hasConflictingRequests(&requestIX)); + ASSERT_FALSE(lockMgr.hasConflictingRequests(resId, &requestIX)); LockerImpl lockerX{getServiceContext()}; LockRequestCombo requestX{&lockerX}; ASSERT_EQ(lockMgr.lock(resId, &requestX, LockMode::MODE_X), LockResult::LOCK_WAITING); - ASSERT_TRUE(lockMgr.hasConflictingRequests(&requestIX)); - ASSERT_TRUE(lockMgr.hasConflictingRequests(&requestX)); + ASSERT_TRUE(lockMgr.hasConflictingRequests(resId, &requestIX)); + ASSERT_TRUE(lockMgr.hasConflictingRequests(resId, &requestX)); ASSERT(lockMgr.unlock(&requestIX)); ASSERT(lockMgr.unlock(&requestX)); diff --git a/src/mongo/db/concurrency/lock_manager_test_help.h b/src/mongo/db/concurrency/lock_manager_test_help.h index 5eba9d33dbdb8..5149fa7c9e727 100644 --- a/src/mongo/db/concurrency/lock_manager_test_help.h +++ b/src/mongo/db/concurrency/lock_manager_test_help.h @@ -30,23 +30,10 @@ #pragma once #include "mongo/db/concurrency/lock_manager.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/concurrency/locker_impl.h" namespace mongo { -class LockerForTests : public LockerImpl { -public: - explicit LockerForTests(OperationContext* opCtx, LockMode globalLockMode) - : LockerImpl(opCtx->getServiceContext()) { - lockGlobal(opCtx, globalLockMode); - } - - ~LockerForTests() { - unlockGlobal(); - } -}; - - class TrackingLockGrantNotification : public LockGrantNotification { public: TrackingLockGrantNotification() : numNotifies(0), lastResult(LOCK_INVALID) {} diff --git a/src/mongo/db/concurrency/lock_state.cpp b/src/mongo/db/concurrency/lock_state.cpp deleted file mode 100644 index b19fde7e391e6..0000000000000 --- a/src/mongo/db/concurrency/lock_state.cpp +++ /dev/null @@ -1,1196 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - - -#include "mongo/platform/basic.h" - -#include "mongo/db/concurrency/lock_state.h" - -#include - -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/json.h" -#include "mongo/db/concurrency/flow_control_ticketholder.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/service_context.h" -#include "mongo/db/storage/flow_control.h" -#include "mongo/db/storage/ticketholder_manager.h" -#include "mongo/logv2/log.h" -#include "mongo/platform/compiler.h" -#include "mongo/stdx/new.h" -#include "mongo/util/background.h" -#include "mongo/util/concurrency/ticketholder.h" -#include "mongo/util/debug_util.h" -#include "mongo/util/fail_point.h" -#include "mongo/util/scopeguard.h" -#include "mongo/util/str.h" -#include "mongo/util/testing_proctor.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault - - -namespace mongo { - -MONGO_FAIL_POINT_DEFINE(failNonIntentLocksIfWaitNeeded); -MONGO_FAIL_POINT_DEFINE(enableTestOnlyFlagforRSTL); - -namespace { - -// Ignore data races in certain functions when running with TSAN. For performance reasons, -// diagnostic commands are expected to race with concurrent lock acquisitions while gathering -// statistics. -#if defined(__has_feature) && __has_feature(thread_sanitizer) -#define MONGO_TSAN_IGNORE __attribute__((no_sanitize("thread"))) -#else -#define MONGO_TSAN_IGNORE -#endif - -/** - * Tracks global (across all clients) lock acquisition statistics, partitioned into multiple - * buckets to minimize concurrent access conflicts. - * - * Each client has a LockerId that monotonically increases across all client instances. The - * LockerId % 8 is used to index into one of 8 LockStats instances. These LockStats objects must be - * atomically accessed, so maintaining 8 that are indexed by LockerId reduces client conflicts and - * improves concurrent write access. A reader, to collect global lock statics for reporting, will - * sum the results of all 8 disjoint 'buckets' of stats. - */ -class PartitionedInstanceWideLockStats { - PartitionedInstanceWideLockStats(const PartitionedInstanceWideLockStats&) = delete; - PartitionedInstanceWideLockStats& operator=(const PartitionedInstanceWideLockStats&) = delete; - -public: - PartitionedInstanceWideLockStats() {} - - void recordAcquisition(LockerId id, ResourceId resId, LockMode mode) { - _get(id).recordAcquisition(resId, mode); - } - - void recordWait(LockerId id, ResourceId resId, LockMode mode) { - _get(id).recordWait(resId, mode); - } - - void recordWaitTime(LockerId id, ResourceId resId, LockMode mode, uint64_t waitMicros) { - _get(id).recordWaitTime(resId, mode, waitMicros); - } - - void report(SingleThreadedLockStats* outStats) const { - for (int i = 0; i < NumPartitions; i++) { - outStats->append(_partitions[i].stats); - } - } - - void reset() { - for (int i = 0; i < NumPartitions; i++) { - _partitions[i].stats.reset(); - } - } - -private: - // This alignment is a best effort approach to ensure that each partition falls on a - // separate page/cache line in order to avoid false sharing. - struct alignas(stdx::hardware_destructive_interference_size) AlignedLockStats { - AtomicLockStats stats; - }; - - enum { NumPartitions = 8 }; - - - AtomicLockStats& _get(LockerId id) { - return _partitions[id % NumPartitions].stats; - } - - - AlignedLockStats _partitions[NumPartitions]; -}; - -// How often (in millis) to check for deadlock if a lock has not been granted for some time -const Milliseconds MaxWaitTime = Milliseconds(500); - -// Dispenses unique LockerId identifiers -AtomicWord idCounter(0); - -// Tracks lock statistics across all Locker instances. Distributes stats across multiple buckets -// indexed by LockerId in order to minimize concurrent access conflicts. -PartitionedInstanceWideLockStats globalStats; - -} // namespace - -bool LockerImpl::_shouldDelayUnlock(ResourceId resId, LockMode mode) const { - switch (resId.getType()) { - case RESOURCE_MUTEX: - return false; - - case RESOURCE_GLOBAL: - case RESOURCE_DATABASE: - case RESOURCE_COLLECTION: - case RESOURCE_METADATA: - break; - - default: - MONGO_UNREACHABLE; - } - - switch (mode) { - case MODE_X: - case MODE_IX: - return true; - - case MODE_IS: - case MODE_S: - return _sharedLocksShouldTwoPhaseLock; - - default: - MONGO_UNREACHABLE; - } -} - -bool LockerImpl::isW() const { - return getLockMode(resourceIdGlobal) == MODE_X; -} - -bool LockerImpl::isR() const { - return getLockMode(resourceIdGlobal) == MODE_S; -} - -bool LockerImpl::isLocked() const { - return getLockMode(resourceIdGlobal) != MODE_NONE; -} - -bool LockerImpl::isWriteLocked() const { - return isLockHeldForMode(resourceIdGlobal, MODE_IX); -} - -bool LockerImpl::isReadLocked() const { - return isLockHeldForMode(resourceIdGlobal, MODE_IS); -} - -bool LockerImpl::isRSTLExclusive() const { - return getLockMode(resourceIdReplicationStateTransitionLock) == MODE_X; -} - -bool LockerImpl::isRSTLLocked() const { - return getLockMode(resourceIdReplicationStateTransitionLock) != MODE_NONE; -} - -void LockerImpl::dump() const { - struct Entry { - ResourceId key; - LockRequest::Status status; - LockMode mode; - unsigned int recursiveCount; - unsigned int unlockPending; - - BSONObj toBSON() const { - BSONObjBuilder b; - b.append("key", key.toString()); - b.append("status", lockRequestStatusName(status)); - b.append("recursiveCount", static_cast(recursiveCount)); - b.append("unlockPending", static_cast(unlockPending)); - b.append("mode", modeName(mode)); - return b.obj(); - } - std::string toString() const { - return tojson(toBSON()); - } - }; - std::vector entries; - { - auto lg = stdx::lock_guard(_lock); - for (auto it = _requests.begin(); !it.finished(); it.next()) - entries.push_back( - {it.key(), it->status, it->mode, it->recursiveCount, it->unlockPending}); - } - LOGV2(20523, - "Locker id {id} status: {requests}", - "Locker status", - "id"_attr = _id, - "requests"_attr = entries); -} - -void LockerImpl::_dumpLockerAndLockManagerRequests() { - // Log the _requests that this locker holds. This will provide identifying information to cross - // reference with the LockManager dump below for extra information. - dump(); - - // Log the LockManager's lock information. Given the locker 'dump()' above, we should be able to - // easily cross reference to find the lock info matching this operation. The LockManager can - // safely access (under internal locks) the LockRequest data that the locker cannot. - BSONObjBuilder builder; - auto lockToClientMap = LockManager::getLockToClientMap(getGlobalServiceContext()); - getGlobalLockManager()->getLockInfoBSON(lockToClientMap, &builder); - auto lockInfo = builder.done(); - LOGV2_ERROR(5736000, "Operation ending while holding locks.", "LockInfo"_attr = lockInfo); -} - - -// -// CondVarLockGrantNotification -// - -CondVarLockGrantNotification::CondVarLockGrantNotification() { - clear(); -} - -void CondVarLockGrantNotification::clear() { - _result = LOCK_INVALID; -} - -LockResult CondVarLockGrantNotification::wait(Milliseconds timeout) { - stdx::unique_lock lock(_mutex); - return _cond.wait_for( - lock, timeout.toSystemDuration(), [this] { return _result != LOCK_INVALID; }) - ? _result - : LOCK_TIMEOUT; -} - -LockResult CondVarLockGrantNotification::wait(OperationContext* opCtx, Milliseconds timeout) { - invariant(opCtx); - stdx::unique_lock lock(_mutex); - if (opCtx->waitForConditionOrInterruptFor( - _cond, lock, timeout, [this] { return _result != LOCK_INVALID; })) { - // Because waitForConditionOrInterruptFor evaluates the predicate before checking for - // interrupt, it is possible that a killed operation can acquire a lock if the request is - // granted quickly. For that reason, it is necessary to check if the operation has been - // killed at least once before accepting the lock grant. - opCtx->checkForInterrupt(); - return _result; - } - return LOCK_TIMEOUT; -} - -void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) { - stdx::unique_lock lock(_mutex); - invariant(_result == LOCK_INVALID); - _result = result; - - _cond.notify_all(); -} - -// -// Locker -// - -LockerImpl::LockerImpl(ServiceContext* serviceCtx) - : _id(idCounter.addAndFetch(1)), - _wuowNestingLevel(0), - _threadId(stdx::this_thread::get_id()), - _ticketHolderManager(TicketHolderManager::get(serviceCtx)) {} - -stdx::thread::id LockerImpl::getThreadId() const { - return _threadId; -} - -void LockerImpl::updateThreadIdToCurrentThread() { - _threadId = stdx::this_thread::get_id(); -} - -void LockerImpl::unsetThreadId() { - _threadId = stdx::thread::id(); // Reset to represent a non-executing thread. -} - -LockerImpl::~LockerImpl() { - // Cannot delete the Locker while there are still outstanding requests, because the - // LockManager may attempt to access deleted memory. Besides it is probably incorrect - // to delete with unaccounted locks anyways. - invariant(!inAWriteUnitOfWork()); - invariant(_numResourcesToUnlockAtEndUnitOfWork == 0); - invariant(!_ticket || !_ticket->valid()); - - if (!_requests.empty()) { - _dumpLockerAndLockManagerRequests(); - } - invariant(_requests.empty()); - - invariant(_modeForTicket == MODE_NONE); -} - -Locker::ClientState LockerImpl::getClientState() const { - auto state = _clientState.load(); - if (state == kActiveReader && hasLockPending()) - state = kQueuedReader; - if (state == kActiveWriter && hasLockPending()) - state = kQueuedWriter; - - return state; -} - -void LockerImpl::reacquireTicket(OperationContext* opCtx) { - invariant(_modeForTicket != MODE_NONE); - auto clientState = _clientState.load(); - const bool reader = isSharedLockMode(_modeForTicket); - - // Ensure that either we don't have a ticket, or the current ticket mode matches the lock mode. - invariant(clientState == kInactive || (clientState == kActiveReader && reader) || - (clientState == kActiveWriter && !reader)); - - // If we already have a ticket, there's nothing to do. - if (clientState != kInactive) - return; - - if (_acquireTicket(opCtx, _modeForTicket, Date_t::now())) { - return; - } - - do { - for (auto it = _requests.begin(); it; it.next()) { - invariant(it->mode == LockMode::MODE_IS || it->mode == LockMode::MODE_IX); - opCtx->checkForInterrupt(); - - // If we've reached this point then that means we tried to acquire a ticket but were - // unsuccessful, implying that tickets are currently exhausted. Additionally, since - // we're holding an IS or IX lock for this resource, any pending requests for the same - // resource must be S or X and will not be able to be granted. Thus, since such a - // pending lock request may also be holding a ticket, if there are any present we fail - // this ticket reacquisition in order to avoid a deadlock. - uassert(ErrorCodes::LockTimeout, - fmt::format("Unable to acquire ticket with mode '{}' due to detected lock " - "conflict for resource {}", - _modeForTicket, - it.key().toString()), - !getGlobalLockManager()->hasConflictingRequests(it.objAddr())); - } - } while (!_acquireTicket(opCtx, _modeForTicket, Date_t::now() + Milliseconds{100})); -} - -bool LockerImpl::_acquireTicket(OperationContext* opCtx, LockMode mode, Date_t deadline) { - // Upon startup, the holder is not guaranteed to be initialized. - auto holder = _ticketHolderManager ? _ticketHolderManager->getTicketHolder(mode) : nullptr; - const bool reader = isSharedLockMode(mode); - - if (!shouldWaitForTicket() && holder) { - holder->reportImmediatePriorityAdmission(); - } else if (mode != MODE_X && mode != MODE_NONE && holder) { - // MODE_X is exclusive of all other locks, thus acquiring a ticket is unnecessary. - _clientState.store(reader ? kQueuedReader : kQueuedWriter); - // If the ticket wait is interrupted, restore the state of the client. - ScopeGuard restoreStateOnErrorGuard([&] { _clientState.store(kInactive); }); - - // Acquiring a ticket is a potentially blocking operation. This must not be called after a - // transaction timestamp has been set, indicating this transaction has created an oplog - // hole. - invariant(!opCtx->recoveryUnit()->isTimestamped()); - - if (auto ticket = holder->waitForTicketUntil( - _uninterruptibleLocksRequested ? nullptr : opCtx, &_admCtx, deadline)) { - _ticket = std::move(*ticket); - } else { - return false; - } - restoreStateOnErrorGuard.dismiss(); - } - - _clientState.store(reader ? kActiveReader : kActiveWriter); - return true; -} - -void LockerImpl::lockGlobal(OperationContext* opCtx, LockMode mode, Date_t deadline) { - dassert(isLocked() == (_modeForTicket != MODE_NONE)); - if (_modeForTicket == MODE_NONE) { - if (_uninterruptibleLocksRequested) { - // Ignore deadline. - invariant(_acquireTicket(opCtx, mode, Date_t::max())); - } else { - auto beforeAcquire = Date_t::now(); - uassert(ErrorCodes::LockTimeout, - str::stream() << "Unable to acquire ticket with mode '" << mode - << "' within a max lock request timeout of '" - << Date_t::now() - beforeAcquire << "' milliseconds.", - _acquireTicket(opCtx, mode, deadline)); - } - _modeForTicket = mode; - } else if (TestingProctor::instance().isEnabled() && !isModeCovered(mode, _modeForTicket)) { - LOGV2_FATAL( - 6614500, - "Ticket held does not cover requested mode for global lock. Global lock upgrades are " - "not allowed", - "held"_attr = modeName(_modeForTicket), - "requested"_attr = modeName(mode)); - } - - const LockResult result = _lockBegin(opCtx, resourceIdGlobal, mode); - // Fast, uncontended path - if (result == LOCK_OK) - return; - - invariant(result == LOCK_WAITING); - _lockComplete(opCtx, resourceIdGlobal, mode, deadline, nullptr); -} - -bool LockerImpl::unlockGlobal() { - if (!unlock(resourceIdGlobal)) { - return false; - } - - invariant(!inAWriteUnitOfWork()); - - LockRequestsMap::Iterator it = _requests.begin(); - while (!it.finished()) { - // If we're here we should only have one reference to any lock. It is a programming - // error for any lock used with multi-granularity locking to have more references than - // the global lock, because every scope starts by calling lockGlobal. - const auto resType = it.key().getType(); - if (resType == RESOURCE_GLOBAL || resType == RESOURCE_MUTEX) { - it.next(); - } else { - invariant(_unlockImpl(&it)); - } - } - - return true; -} - -void LockerImpl::beginWriteUnitOfWork() { - _wuowNestingLevel++; -} - -void LockerImpl::endWriteUnitOfWork() { - invariant(_wuowNestingLevel > 0); - - if (--_wuowNestingLevel > 0) { - // Don't do anything unless leaving outermost WUOW. - return; - } - - LockRequestsMap::Iterator it = _requests.begin(); - while (_numResourcesToUnlockAtEndUnitOfWork > 0) { - if (it->unlockPending) { - invariant(!it.finished()); - _numResourcesToUnlockAtEndUnitOfWork--; - } - while (it->unlockPending > 0) { - // If a lock is converted, unlock() may be called multiple times on a resource within - // the same WriteUnitOfWork. All such unlock() requests must thus be fulfilled here. - it->unlockPending--; - unlock(it.key()); - } - it.next(); - } -} - -void LockerImpl::releaseWriteUnitOfWork(WUOWLockSnapshot* stateOut) { - stateOut->wuowNestingLevel = _wuowNestingLevel; - _wuowNestingLevel = 0; - - for (auto it = _requests.begin(); _numResourcesToUnlockAtEndUnitOfWork > 0; it.next()) { - if (it->unlockPending) { - while (it->unlockPending) { - it->unlockPending--; - stateOut->unlockPendingLocks.push_back({it.key(), it->mode}); - } - _numResourcesToUnlockAtEndUnitOfWork--; - } - } -} - -void LockerImpl::restoreWriteUnitOfWork(const WUOWLockSnapshot& stateToRestore) { - invariant(_numResourcesToUnlockAtEndUnitOfWork == 0); - invariant(!inAWriteUnitOfWork()); - - for (auto& lock : stateToRestore.unlockPendingLocks) { - auto it = _requests.begin(); - while (it && !(it.key() == lock.resourceId && it->mode == lock.mode)) { - it.next(); - } - invariant(!it.finished()); - if (!it->unlockPending) { - _numResourcesToUnlockAtEndUnitOfWork++; - } - it->unlockPending++; - } - // Equivalent to call beginWriteUnitOfWork() multiple times. - _wuowNestingLevel = stateToRestore.wuowNestingLevel; -} - -bool LockerImpl::releaseWriteUnitOfWorkAndUnlock(LockSnapshot* stateOut) { - // Only the global WUOW can be released, since we never need to release and restore - // nested WUOW's. Thus we don't have to remember the nesting level. - invariant(_wuowNestingLevel == 1); - --_wuowNestingLevel; - invariant(!isGlobalLockedRecursively()); - - // All locks should be pending to unlock. - invariant(_requests.size() == _numResourcesToUnlockAtEndUnitOfWork); - for (auto it = _requests.begin(); it; it.next()) { - // No converted lock so we don't need to unlock more than once. - invariant(it->unlockPending == 1); - it->unlockPending--; - } - _numResourcesToUnlockAtEndUnitOfWork = 0; - - return saveLockStateAndUnlock(stateOut); -} - -void LockerImpl::restoreWriteUnitOfWorkAndLock(OperationContext* opCtx, - const LockSnapshot& stateToRestore) { - if (stateToRestore.globalMode != MODE_NONE) { - restoreLockState(opCtx, stateToRestore); - } - - invariant(_numResourcesToUnlockAtEndUnitOfWork == 0); - for (auto it = _requests.begin(); it; it.next()) { - invariant(_shouldDelayUnlock(it.key(), (it->mode))); - invariant(it->unlockPending == 0); - it->unlockPending++; - } - _numResourcesToUnlockAtEndUnitOfWork = static_cast(_requests.size()); - - beginWriteUnitOfWork(); -} - -void LockerImpl::lock(OperationContext* opCtx, ResourceId resId, LockMode mode, Date_t deadline) { - // `lockGlobal` must be called to lock `resourceIdGlobal`. - invariant(resId != resourceIdGlobal); - - const LockResult result = _lockBegin(opCtx, resId, mode); - - // Fast, uncontended path - if (result == LOCK_OK) - return; - - invariant(result == LOCK_WAITING); - _lockComplete(opCtx, resId, mode, deadline, nullptr); -} - -void LockerImpl::downgrade(ResourceId resId, LockMode newMode) { - LockRequestsMap::Iterator it = _requests.find(resId); - getGlobalLockManager()->downgrade(it.objAddr(), newMode); -} - -bool LockerImpl::unlock(ResourceId resId) { - LockRequestsMap::Iterator it = _requests.find(resId); - - // Don't attempt to unlock twice. This can happen when an interrupted global lock is destructed. - if (it.finished()) - return false; - - if (inAWriteUnitOfWork() && _shouldDelayUnlock(it.key(), (it->mode))) { - // Only delay unlocking if the lock is not acquired more than once. Otherwise, we can simply - // call _unlockImpl to decrement recursiveCount instead of incrementing unlockPending. This - // is safe because the lock is still being held in the strongest mode necessary. - if (it->recursiveCount > 1) { - // Invariant that the lock is still being held. - invariant(!_unlockImpl(&it)); - return false; - } - if (!it->unlockPending) { - _numResourcesToUnlockAtEndUnitOfWork++; - } - it->unlockPending++; - // unlockPending will be incremented if a lock is converted or acquired in the same mode - // recursively, and unlock() is called multiple times on one ResourceId. - invariant(it->unlockPending <= it->recursiveCount); - return false; - } - - return _unlockImpl(&it); -} - -bool LockerImpl::unlockRSTLforPrepare() { - auto rstlRequest = _requests.find(resourceIdReplicationStateTransitionLock); - - // Don't attempt to unlock twice. This can happen when an interrupted global lock is destructed. - if (!rstlRequest) - return false; - - // If the RSTL is 'unlockPending' and we are fully unlocking it, then we do not want to - // attempt to unlock the RSTL when the WUOW ends, since it will already be unlocked. - if (rstlRequest->unlockPending) { - rstlRequest->unlockPending = 0; - _numResourcesToUnlockAtEndUnitOfWork--; - } - - // Reset the recursiveCount to 1 so that we fully unlock the RSTL. Since it will be fully - // unlocked, any future unlocks will be noops anyways. - rstlRequest->recursiveCount = 1; - - return _unlockImpl(&rstlRequest); -} - -LockMode LockerImpl::getLockMode(ResourceId resId) const { - scoped_spinlock scopedLock(_lock); - - const LockRequestsMap::ConstIterator it = _requests.find(resId); - if (!it) - return MODE_NONE; - - return it->mode; -} - -bool LockerImpl::isLockHeldForMode(ResourceId resId, LockMode mode) const { - return isModeCovered(mode, getLockMode(resId)); -} - -bool LockerImpl::isDbLockedForMode(const DatabaseName& dbName, LockMode mode) const { - if (isW()) - return true; - if (isR() && isSharedLockMode(mode)) - return true; - - const ResourceId resIdDb(RESOURCE_DATABASE, dbName); - return isLockHeldForMode(resIdDb, mode); -} - -bool LockerImpl::isCollectionLockedForMode(const NamespaceString& nss, LockMode mode) const { - invariant(nss.coll().size()); - - if (isW()) - return true; - if (isR() && isSharedLockMode(mode)) - return true; - - const ResourceId resIdDb(RESOURCE_DATABASE, nss.dbName()); - - LockMode dbMode = getLockMode(resIdDb); - if (!shouldConflictWithSecondaryBatchApplication()) - return true; - - switch (dbMode) { - case MODE_NONE: - return false; - case MODE_X: - return true; - case MODE_S: - return isSharedLockMode(mode); - case MODE_IX: - case MODE_IS: { - const ResourceId resIdColl(RESOURCE_COLLECTION, nss); - return isLockHeldForMode(resIdColl, mode); - } break; - case LockModesCount: - break; - } - - MONGO_UNREACHABLE; - return false; -} - -bool LockerImpl::wasGlobalLockTakenForWrite() const { - return _globalLockMode & ((1 << MODE_IX) | (1 << MODE_X)); -} - -bool LockerImpl::wasGlobalLockTakenInModeConflictingWithWrites() const { - return _wasGlobalLockTakenInModeConflictingWithWrites.load(); -} - -bool LockerImpl::wasGlobalLockTaken() const { - return _globalLockMode != (1 << MODE_NONE); -} - -void LockerImpl::setGlobalLockTakenInMode(LockMode mode) { - _globalLockMode |= (1 << mode); - - if (mode == MODE_IX || mode == MODE_X || mode == MODE_S) { - _wasGlobalLockTakenInModeConflictingWithWrites.store(true); - } -} - -ResourceId LockerImpl::getWaitingResource() const { - scoped_spinlock scopedLock(_lock); - - return _waitingResource; -} - -MONGO_TSAN_IGNORE -void LockerImpl::getLockerInfo(LockerInfo* lockerInfo, - const boost::optional lockStatsBase) const { - invariant(lockerInfo); - - // Zero-out the contents - lockerInfo->locks.clear(); - lockerInfo->waitingResource = ResourceId(); - lockerInfo->stats.reset(); - - _lock.lock(); - LockRequestsMap::ConstIterator it = _requests.begin(); - while (!it.finished()) { - OneLock info; - info.resourceId = it.key(); - info.mode = it->mode; - - lockerInfo->locks.push_back(info); - it.next(); - } - _lock.unlock(); - - std::sort(lockerInfo->locks.begin(), lockerInfo->locks.end()); - - lockerInfo->waitingResource = getWaitingResource(); - lockerInfo->stats.append(_stats); - - // lockStatsBase is a snapshot of lock stats taken when the sub-operation starts. Only - // sub-operations have lockStatsBase. - if (lockStatsBase) - // Adjust the lock stats by subtracting the lockStatsBase. No mutex is needed because - // lockStatsBase is immutable. - lockerInfo->stats.subtract(*lockStatsBase); -} - -boost::optional LockerImpl::getLockerInfo( - const boost::optional lockStatsBase) const { - Locker::LockerInfo lockerInfo; - getLockerInfo(&lockerInfo, lockStatsBase); - return std::move(lockerInfo); -} - -bool LockerImpl::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) { - // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork. - invariant(!inAWriteUnitOfWork()); - invariant(!(_modeForTicket == MODE_S || _modeForTicket == MODE_X), - "Yielding a strong global MODE_X/MODE_S lock is forbidden"); - // Clear out whatever is in stateOut. - stateOut->locks.clear(); - stateOut->globalMode = MODE_NONE; - - // First, we look at the global lock. There is special handling for this so we store it - // separately from the more pedestrian locks. - LockRequestsMap::Iterator globalRequest = _requests.find(resourceIdGlobal); - if (!globalRequest) { - // If there's no global lock there isn't really anything to do. Check that. - for (auto it = _requests.begin(); !it.finished(); it.next()) { - invariant(it.key().getType() == RESOURCE_MUTEX); - } - return false; - } - - // If the global lock or RSTL has been acquired more than once, we're probably somewhere in a - // DBDirectClient call. It's not safe to release and reacquire locks -- the context using - // the DBDirectClient is probably not prepared for lock release. - LockRequestsMap::Iterator rstlRequest = - _requests.find(resourceIdReplicationStateTransitionLock); - if (globalRequest->recursiveCount > 1 || (rstlRequest && rstlRequest->recursiveCount > 1)) { - return false; - } - - // If the RSTL is exclusive, then this operation should not yield. - if (rstlRequest && rstlRequest->mode != MODE_IX) { - return false; - } - - // The global lock must have been acquired just once - stateOut->globalMode = globalRequest->mode; - invariant(unlock(resourceIdGlobal)); - - // Next, the non-global locks. - for (LockRequestsMap::Iterator it = _requests.begin(); !it.finished(); it.next()) { - const ResourceId resId = it.key(); - const ResourceType resType = resId.getType(); - if (resType == RESOURCE_MUTEX) - continue; - - // We should never have to save and restore metadata locks. - invariant(RESOURCE_DATABASE == resType || RESOURCE_COLLECTION == resType || - (resId == resourceIdParallelBatchWriterMode && isSharedLockMode(it->mode)) || - resId == resourceIdFeatureCompatibilityVersion || - (resId == resourceIdReplicationStateTransitionLock && it->mode == MODE_IX)); - - // And, stuff the info into the out parameter. - OneLock info; - info.resourceId = resId; - info.mode = it->mode; - invariant( - !(info.mode == MODE_S || info.mode == MODE_X), - str::stream() << "Yielding a strong MODE_X/MODE_S lock is forbidden. ResourceId was " - << resId.toString()); - stateOut->locks.push_back(info); - - invariant(unlock(resId)); - } - invariant(!isLocked()); - - // Sort locks by ResourceId. They'll later be acquired in this canonical locking order. - std::sort(stateOut->locks.begin(), stateOut->locks.end()); - - return true; -} - -void LockerImpl::restoreLockState(OperationContext* opCtx, const Locker::LockSnapshot& state) { - // We shouldn't be restoring lock state from inside a WriteUnitOfWork. - invariant(!inAWriteUnitOfWork()); - invariant(_modeForTicket == MODE_NONE); - invariant(_clientState.load() == kInactive); - - getFlowControlTicket(opCtx, state.globalMode); - - std::vector::const_iterator it = state.locks.begin(); - // If we locked the PBWM, it must be locked before the - // resourceIdFeatureCompatibilityVersion, resourceIdReplicationStateTransitionLock, and - // resourceIdGlobal resources. - if (it != state.locks.end() && it->resourceId == resourceIdParallelBatchWriterMode) { - lock(opCtx, it->resourceId, it->mode); - it++; - } - - // If we locked the FCV lock, it must be locked before the - // resourceIdReplicationStateTransitionLock and resourceIdGlobal resources. - if (it != state.locks.end() && it->resourceId == resourceIdFeatureCompatibilityVersion) { - lock(opCtx, it->resourceId, it->mode); - it++; - } - - // If we locked the RSTL, it must be locked before the resourceIdGlobal resource. - if (it != state.locks.end() && it->resourceId == resourceIdReplicationStateTransitionLock) { - lock(opCtx, it->resourceId, it->mode); - it++; - } - - lockGlobal(opCtx, state.globalMode); - for (; it != state.locks.end(); it++) { - // Ensures we don't acquire locks out of order which can lead to deadlock. - invariant(it->resourceId.getType() != ResourceType::RESOURCE_GLOBAL); - lock(opCtx, it->resourceId, it->mode); - } - invariant(_modeForTicket != MODE_NONE); -} - -MONGO_TSAN_IGNORE -FlowControlTicketholder::CurOp LockerImpl::getFlowControlStats() const { - return _flowControlStats; -} - -MONGO_TSAN_IGNORE -LockResult LockerImpl::_lockBegin(OperationContext* opCtx, ResourceId resId, LockMode mode) { - dassert(!getWaitingResource().isValid()); - - // Operations which are holding open an oplog hole cannot block when acquiring locks. - if (opCtx && !shouldAllowLockAcquisitionOnTimestampedUnitOfWork() && - resId.getType() != RESOURCE_METADATA && resId.getType() != RESOURCE_MUTEX) { - invariant(!opCtx->recoveryUnit()->isTimestamped(), - str::stream() - << "Operation holding open an oplog hole tried to acquire locks. ResourceId: " - << resId << ", mode: " << modeName(mode)); - } - - LockRequest* request; - bool isNew = true; - - LockRequestsMap::Iterator it = _requests.find(resId); - if (!it) { - scoped_spinlock scopedLock(_lock); - LockRequestsMap::Iterator itNew = _requests.insert(resId); - itNew->initNew(this, &_notify); - - request = itNew.objAddr(); - } else { - request = it.objAddr(); - isNew = false; - } - - // If unlockPending is nonzero, that means a LockRequest already exists for this resource but - // is planned to be released at the end of this WUOW due to two-phase locking. Rather than - // unlocking the existing request, we can reuse it if the existing mode matches the new mode. - if (request->unlockPending && isModeCovered(mode, request->mode)) { - request->unlockPending--; - if (!request->unlockPending) { - _numResourcesToUnlockAtEndUnitOfWork--; - } - return LOCK_OK; - } - - // Making this call here will record lock re-acquisitions and conversions as well. - globalStats.recordAcquisition(_id, resId, mode); - _stats.recordAcquisition(resId, mode); - - // Give priority to the full modes for Global, PBWM, and RSTL resources so we don't stall global - // operations such as shutdown or stepdown. - const ResourceType resType = resId.getType(); - if (resType == RESOURCE_GLOBAL) { - if (mode == MODE_S || mode == MODE_X) { - request->enqueueAtFront = true; - request->compatibleFirst = true; - } - } else if (resType != RESOURCE_MUTEX) { - // This is all sanity checks that the global locks are always be acquired - // before any other lock has been acquired and they must be in sync with the nesting. - if (kDebugBuild) { - const LockRequestsMap::Iterator itGlobal = _requests.find(resourceIdGlobal); - invariant(itGlobal->recursiveCount > 0); - invariant(itGlobal->mode != MODE_NONE); - }; - } - - // The notification object must be cleared before we invoke the lock manager, because - // otherwise we might reset state if the lock becomes granted very fast. - _notify.clear(); - - LockResult result = isNew ? getGlobalLockManager()->lock(resId, request, mode) - : getGlobalLockManager()->convert(resId, request, mode); - - if (result == LOCK_WAITING) { - globalStats.recordWait(_id, resId, mode); - _stats.recordWait(resId, mode); - _setWaitingResource(resId); - } else if (result == LOCK_OK && opCtx && _uninterruptibleLocksRequested == 0) { - // Lock acquisitions are not allowed to succeed when opCtx is marked as interrupted, unless - // the caller requested an uninterruptible lock. - auto interruptStatus = opCtx->checkForInterruptNoAssert(); - if (!interruptStatus.isOK()) { - auto unlockIt = _requests.find(resId); - invariant(unlockIt); - _unlockImpl(&unlockIt); - uassertStatusOK(interruptStatus); - } - } - - return result; -} - -void LockerImpl::_lockComplete(OperationContext* opCtx, - ResourceId resId, - LockMode mode, - Date_t deadline, - const LockTimeoutCallback& onTimeout) { - // Operations which are holding open an oplog hole cannot block when acquiring locks. Lock - // requests entering this function have been queued up and will be granted the lock as soon as - // the lock is released, which is a blocking operation. - if (opCtx && !shouldAllowLockAcquisitionOnTimestampedUnitOfWork() && - resId.getType() != RESOURCE_METADATA && resId.getType() != RESOURCE_MUTEX) { - invariant(!opCtx->recoveryUnit()->isTimestamped(), - str::stream() - << "Operation holding open an oplog hole tried to acquire locks. ResourceId: " - << resId << ", mode: " << modeName(mode)); - } - - // Clean up the state on any failed lock attempts. - ScopeGuard unlockOnErrorGuard([&] { - LockRequestsMap::Iterator it = _requests.find(resId); - invariant(it); - _unlockImpl(&it); - _setWaitingResource(ResourceId()); - }); - - // This failpoint is used to time out non-intent locks if they cannot be granted immediately - // for user operations. Testing-only. - const bool isUserOperation = opCtx && opCtx->getClient()->isFromUserConnection(); - if (!_uninterruptibleLocksRequested && isUserOperation && - MONGO_unlikely(failNonIntentLocksIfWaitNeeded.shouldFail())) { - uassert(ErrorCodes::LockTimeout, - str::stream() << "Cannot immediately acquire lock '" << resId.toString() - << "'. Timing out due to failpoint.", - (mode == MODE_IS || mode == MODE_IX)); - } - - LockResult result; - Milliseconds timeout; - if (deadline == Date_t::max()) { - timeout = Milliseconds::max(); - } else if (deadline <= Date_t()) { - timeout = Milliseconds(0); - } else { - timeout = deadline - Date_t::now(); - } - timeout = std::min(timeout, _maxLockTimeout ? *_maxLockTimeout : Milliseconds::max()); - if (_uninterruptibleLocksRequested) { - timeout = Milliseconds::max(); - } - - // Don't go sleeping without bound in order to be able to report long waits. - Milliseconds waitTime = std::min(timeout, MaxWaitTime); - const uint64_t startOfTotalWaitTime = curTimeMicros64(); - uint64_t startOfCurrentWaitTime = startOfTotalWaitTime; - - while (true) { - // It is OK if this call wakes up spuriously, because we re-evaluate the remaining - // wait time anyways. - // If we have an operation context, we want to use its interruptible wait so that - // pending lock acquisitions can be cancelled, so long as no callers have requested an - // uninterruptible lock. - if (opCtx && _uninterruptibleLocksRequested == 0) { - result = _notify.wait(opCtx, waitTime); - } else { - result = _notify.wait(waitTime); - } - - // Account for the time spent waiting on the notification object - const uint64_t curTimeMicros = curTimeMicros64(); - const uint64_t elapsedTimeMicros = curTimeMicros - startOfCurrentWaitTime; - startOfCurrentWaitTime = curTimeMicros; - - globalStats.recordWaitTime(_id, resId, mode, elapsedTimeMicros); - _stats.recordWaitTime(resId, mode, elapsedTimeMicros); - - if (result == LOCK_OK) - break; - - // If infinite timeout was requested, just keep waiting - if (timeout == Milliseconds::max()) { - continue; - } - - const auto totalBlockTime = duration_cast( - Microseconds(int64_t(curTimeMicros - startOfTotalWaitTime))); - waitTime = (totalBlockTime < timeout) ? std::min(timeout - totalBlockTime, MaxWaitTime) - : Milliseconds(0); - - // Check if the lock acquisition has timed out. If we have an operation context and client - // we can provide additional diagnostics data. - if (waitTime == Milliseconds(0)) { - if (onTimeout) { - onTimeout(); - } - std::string timeoutMessage = str::stream() - << "Unable to acquire " << modeName(mode) << " lock on '" << resId.toString() - << "' within " << timeout << "."; - if (opCtx && opCtx->getClient()) { - timeoutMessage = str::stream() - << timeoutMessage << " opId: " << opCtx->getOpID() - << ", op: " << opCtx->getClient()->desc() - << ", connId: " << opCtx->getClient()->getConnectionId() << "."; - } - uasserted(ErrorCodes::LockTimeout, timeoutMessage); - } - } - - invariant(result == LOCK_OK); - unlockOnErrorGuard.dismiss(); - _setWaitingResource(ResourceId()); -} - -void LockerImpl::getFlowControlTicket(OperationContext* opCtx, LockMode lockMode) { - auto ticketholder = FlowControlTicketholder::get(opCtx); - if (ticketholder && lockMode == LockMode::MODE_IX && _clientState.load() == kInactive && - _admCtx.getPriority() != AdmissionContext::Priority::kImmediate && - !_uninterruptibleLocksRequested) { - // FlowControl only acts when a MODE_IX global lock is being taken. The clientState is only - // being modified here to change serverStatus' `globalLock.currentQueue` metrics. This - // method must not exit with a side-effect on the clientState. That value is also used for - // tracking whether other resources need to be released. - _clientState.store(kQueuedWriter); - ScopeGuard restoreState([&] { _clientState.store(kInactive); }); - // Acquiring a ticket is a potentially blocking operation. This must not be called after a - // transaction timestamp has been set, indicating this transaction has created an oplog - // hole. - invariant(!opCtx->recoveryUnit()->isTimestamped()); - ticketholder->getTicket(opCtx, &_flowControlStats); - } -} - -LockResult LockerImpl::lockRSTLBegin(OperationContext* opCtx, LockMode mode) { - bool testOnly = false; - - if (MONGO_unlikely(enableTestOnlyFlagforRSTL.shouldFail())) { - testOnly = true; - } - - invariant(testOnly || mode == MODE_IX || mode == MODE_X); - return _lockBegin(opCtx, resourceIdReplicationStateTransitionLock, mode); -} - -void LockerImpl::lockRSTLComplete(OperationContext* opCtx, - LockMode mode, - Date_t deadline, - const LockTimeoutCallback& onTimeout) { - _lockComplete(opCtx, resourceIdReplicationStateTransitionLock, mode, deadline, onTimeout); -} - -void LockerImpl::releaseTicket() { - invariant(_modeForTicket != MODE_NONE); - _releaseTicket(); -} - -void LockerImpl::_releaseTicket() { - _ticket.reset(); - _clientState.store(kInactive); -} - -bool LockerImpl::_unlockImpl(LockRequestsMap::Iterator* it) { - if (getGlobalLockManager()->unlock(it->objAddr())) { - if (it->key() == resourceIdGlobal) { - invariant(_modeForTicket != MODE_NONE); - - // We may have already released our ticket through a call to releaseTicket(). - if (_clientState.load() != kInactive) { - _releaseTicket(); - } - - _modeForTicket = MODE_NONE; - } - - scoped_spinlock scopedLock(_lock); - it->remove(); - - return true; - } - - return false; -} - -bool LockerImpl::isGlobalLockedRecursively() { - auto globalLockRequest = _requests.find(resourceIdGlobal); - return !globalLockRequest.finished() && globalLockRequest->recursiveCount > 1; -} - -void LockerImpl::_setWaitingResource(ResourceId resId) { - scoped_spinlock scopedLock(_lock); - - _waitingResource = resId; -} - -// -// Auto classes -// - -namespace { -/** - * Periodically purges unused lock buckets. The first time the lock is used again after - * cleanup it needs to be allocated, and similarly, every first use by a client for an intent - * mode may need to create a partitioned lock head. Cleanup is done roughly once a minute. - */ -class UnusedLockCleaner : PeriodicTask { -public: - std::string taskName() const { - return "UnusedLockCleaner"; - } - - void taskDoWork() { - LOGV2_DEBUG(20524, 2, "cleaning up unused lock buckets of the global lock manager"); - getGlobalLockManager()->cleanupUnusedLocks(); - } -} unusedLockCleaner; -} // namespace - -// -// Standalone functions -// - -LockManager* getGlobalLockManager() { - auto serviceContext = getGlobalServiceContext(); - invariant(serviceContext); - return LockManager::get(serviceContext); -} - -void reportGlobalLockingStats(SingleThreadedLockStats* outStats) { - globalStats.report(outStats); -} - -void resetGlobalLockStats() { - globalStats.reset(); -} - -} // namespace mongo diff --git a/src/mongo/db/concurrency/lock_state.h b/src/mongo/db/concurrency/lock_state.h deleted file mode 100644 index 7ae54b1fd7488..0000000000000 --- a/src/mongo/db/concurrency/lock_state.h +++ /dev/null @@ -1,434 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include - -#include "mongo/db/concurrency/fast_map_noalloc.h" -#include "mongo/db/concurrency/lock_manager_defs.h" -#include "mongo/db/concurrency/locker.h" -#include "mongo/db/operation_context.h" -#include "mongo/db/storage/ticketholder_manager.h" -#include "mongo/platform/atomic_word.h" -#include "mongo/util/concurrency/spin_lock.h" -#include "mongo/util/concurrency/ticketholder.h" - -namespace mongo { - -/** - * Notfication callback, which stores the last notification result and signals a condition - * variable, which can be waited on. - */ -class CondVarLockGrantNotification : public LockGrantNotification { - CondVarLockGrantNotification(const CondVarLockGrantNotification&) = delete; - CondVarLockGrantNotification& operator=(const CondVarLockGrantNotification&) = delete; - -public: - CondVarLockGrantNotification(); - - /** - * Clears the object so it can be reused. - */ - void clear(); - - /** - * Uninterruptible blocking method, which waits for the notification to fire. - * - * @param timeout How many milliseconds to wait before returning LOCK_TIMEOUT. - */ - LockResult wait(Milliseconds timeout); - - /** - * Interruptible blocking method, which waits for the notification to fire or an interrupt from - * the operation context. - * - * @param opCtx OperationContext to wait on for an interrupt. - * @param timeout How many milliseconds to wait before returning LOCK_TIMEOUT. - */ - LockResult wait(OperationContext* opCtx, Milliseconds timeout); - -private: - virtual void notify(ResourceId resId, LockResult result); - - // These two go together to implement the conditional variable pattern. - Mutex _mutex = MONGO_MAKE_LATCH("CondVarLockGrantNotification::_mutex"); - stdx::condition_variable _cond; - - // Result from the last call to notify - LockResult _result; -}; - -/** - * Interface for acquiring locks. One of those objects will have to be instantiated for each - * request (transaction). - * - * Lock/unlock methods must always be called from a single thread. - * - * All instances reference a single global lock manager. - * - */ -class LockerImpl : public Locker { -public: - /** - * Instantiates new locker. Must be given a unique identifier for disambiguation. Lockers - * having the same identifier will not conflict on lock acquisition. - */ - LockerImpl(ServiceContext* serviceContext); - - virtual ~LockerImpl(); - - virtual ClientState getClientState() const; - - virtual LockerId getId() const { - return _id; - } - - stdx::thread::id getThreadId() const override; - - void updateThreadIdToCurrentThread() override; - void unsetThreadId() override; - - void setSharedLocksShouldTwoPhaseLock(bool sharedLocksShouldTwoPhaseLock) override { - _sharedLocksShouldTwoPhaseLock = sharedLocksShouldTwoPhaseLock; - } - - void setMaxLockTimeout(Milliseconds maxTimeout) override { - _maxLockTimeout = maxTimeout; - } - - bool hasMaxLockTimeout() override { - return static_cast(_maxLockTimeout); - } - - void unsetMaxLockTimeout() override { - _maxLockTimeout = boost::none; - } - - /** - * Acquires the ticket within the deadline (or _maxLockTimeout) and tries to grab the lock. - */ - virtual void lockGlobal(OperationContext* opCtx, - LockMode mode, - Date_t deadline = Date_t::max()); - - virtual bool unlockGlobal(); - - virtual LockResult lockRSTLBegin(OperationContext* opCtx, LockMode mode); - virtual void lockRSTLComplete(OperationContext* opCtx, - LockMode mode, - Date_t deadline, - const LockTimeoutCallback& onTimeout); - - virtual bool unlockRSTLforPrepare(); - - virtual void beginWriteUnitOfWork() override; - virtual void endWriteUnitOfWork() override; - - virtual bool inAWriteUnitOfWork() const { - return _wuowNestingLevel > 0; - } - - bool wasGlobalLockTakenForWrite() const override; - - bool wasGlobalLockTakenInModeConflictingWithWrites() const override; - - bool wasGlobalLockTaken() const override; - - void setGlobalLockTakenInMode(LockMode mode) override; - - /** - * Requests a lock for resource 'resId' with mode 'mode'. An OperationContext 'opCtx' must be - * provided to interrupt waiting on the locker condition variable that indicates status of - * the lock acquisition. A lock operation would otherwise wait until a timeout or the lock is - * granted. - */ - virtual void lock(OperationContext* opCtx, - ResourceId resId, - LockMode mode, - Date_t deadline = Date_t::max()); - - virtual void lock(ResourceId resId, LockMode mode, Date_t deadline = Date_t::max()) { - lock(nullptr, resId, mode, deadline); - } - - virtual void downgrade(ResourceId resId, LockMode newMode); - - virtual bool unlock(ResourceId resId); - - virtual LockMode getLockMode(ResourceId resId) const; - virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const; - virtual bool isDbLockedForMode(const DatabaseName& dbName, LockMode mode) const; - virtual bool isCollectionLockedForMode(const NamespaceString& nss, LockMode mode) const; - - virtual ResourceId getWaitingResource() const; - - virtual void getLockerInfo(LockerInfo* lockerInfo, - boost::optional lockStatsBase) const; - virtual boost::optional getLockerInfo( - boost::optional lockStatsBase) const final; - - virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut); - - virtual void restoreLockState(OperationContext* opCtx, const LockSnapshot& stateToRestore); - - bool releaseWriteUnitOfWorkAndUnlock(LockSnapshot* stateOut) override; - void restoreWriteUnitOfWorkAndLock(OperationContext* opCtx, - const LockSnapshot& stateToRestore) override; - - void releaseWriteUnitOfWork(WUOWLockSnapshot* stateOut) override; - void restoreWriteUnitOfWork(const WUOWLockSnapshot& stateToRestore) override; - - virtual void releaseTicket(); - virtual void reacquireTicket(OperationContext* opCtx); - - bool hasReadTicket() const override { - return _modeForTicket == MODE_IS || _modeForTicket == MODE_S; - } - - bool hasWriteTicket() const override { - return _modeForTicket == MODE_IX || _modeForTicket == MODE_X; - } - - void getFlowControlTicket(OperationContext* opCtx, LockMode lockMode) override; - - FlowControlTicketholder::CurOp getFlowControlStats() const override; - - // - // Below functions are for testing only. - // - - FastMapNoAlloc getRequestsForTest() const { - scoped_spinlock scopedLock(_lock); - return _requests; - } - - LockResult lockBeginForTest(OperationContext* opCtx, ResourceId resId, LockMode mode) { - return _lockBegin(opCtx, resId, mode); - } - - void lockCompleteForTest(OperationContext* opCtx, - ResourceId resId, - LockMode mode, - Date_t deadline) { - _lockComplete(opCtx, resId, mode, deadline, nullptr); - } - -private: - typedef FastMapNoAlloc LockRequestsMap; - - /** - * Allows for lock requests to be requested in a non-blocking way. There can be only one - * outstanding pending lock request per locker object. - * - * _lockBegin posts a request to the lock manager for the specified lock to be acquired, - * which either immediately grants the lock, or puts the requestor on the conflict queue - * and returns immediately with the result of the acquisition. The result can be one of: - * - * LOCK_OK - Nothing more needs to be done. The lock is granted. - * LOCK_WAITING - The request has been queued up and will be granted as soon as the lock - * is free. If this result is returned, typically _lockComplete needs to be called in - * order to wait for the actual grant to occur. If the caller no longer needs to wait - * for the grant to happen, unlock needs to be called with the same resource passed - * to _lockBegin. - * - * In other words for each call to _lockBegin, which does not return LOCK_OK, there needs to - * be a corresponding call to either _lockComplete or unlock. - * - * If an operation context is provided that represents an interrupted operation, _lockBegin will - * throw an exception whenever it would have been possible to grant the lock with LOCK_OK. This - * behavior can be disabled with an UninterruptibleLockGuard. - * - * NOTE: These methods are not public and should only be used inside the class - * implementation and for unit-tests and not called directly. - */ - LockResult _lockBegin(OperationContext* opCtx, ResourceId resId, LockMode mode); - - /** - * Waits for the completion of a lock, previously requested through _lockBegin/ - * Must only be called, if _lockBegin returned LOCK_WAITING. - * - * @param opCtx Operation context that, if not null, will be used to allow interruptible lock - * acquisition. - * @param resId Resource id which was passed to an earlier _lockBegin call. Must match. - * @param mode Mode which was passed to an earlier _lockBegin call. Must match. - * @param deadline The absolute time point when this lock acquisition will time out, if not yet - * granted. - * @param onTimeout Callback which will run if the lock acquisition is about to time out. - * - * Throws an exception if it is interrupted. - */ - void _lockComplete(OperationContext* opCtx, - ResourceId resId, - LockMode mode, - Date_t deadline, - const LockTimeoutCallback& onTimeout); - - /** - * The main functionality of the unlock method, except accepts iterator in order to avoid - * additional lookups during unlockGlobal. Frees locks immediately, so must not be called from - * inside a WUOW. - */ - bool _unlockImpl(LockRequestsMap::Iterator* it); - - /** - * Whether we should use two phase locking. Returns true if the particular lock's release should - * be delayed until the end of the operation. - * - * We delay release of write operation locks (X, IX) in order to ensure that the data changes - * they protect are committed successfully. endWriteUnitOfWork will release them afterwards. - * This protects other threads from seeing inconsistent in-memory state. - * - * Shared locks (S, IS) will also participate in two-phase locking if - * '_sharedLocksShouldTwoPhaseLock' is true. This will protect open storage engine transactions - * across network calls. - */ - bool _shouldDelayUnlock(ResourceId resId, LockMode mode) const; - - /** - * Releases the ticket for the Locker. - */ - void _releaseTicket(); - - /** - * Acquires a ticket for the Locker under 'mode'. - * Returns true if a ticket is successfully acquired. - * false if it cannot acquire a ticket within 'deadline'. - * It may throw an exception when it is interrupted. - */ - bool _acquireTicket(OperationContext* opCtx, LockMode mode, Date_t deadline); - - void _setWaitingResource(ResourceId resId); - - /** - * Calls dump() on this locker instance and the lock manager. - */ - void _dumpLockerAndLockManagerRequests(); - - // Used to disambiguate different lockers - const LockerId _id; - - // The only reason we have this spin lock here is for the diagnostic tools, which could - // iterate through the LockRequestsMap on a separate thread and need it to be stable. - // Apart from that, all accesses to the LockerImpl are always from a single thread. - // - // This has to be locked inside const methods, hence the mutable. - mutable SpinLock _lock; - // Note: this data structure must always guarantee the continued validity of pointers/references - // to its contents (LockRequests). The LockManager maintains a LockRequestList of pointers to - // the LockRequests managed by this data structure. - LockRequestsMap _requests; - - // Reuse the notification object across requests so we don't have to create a new mutex - // and condition variable every time. - CondVarLockGrantNotification _notify; - - // Per-locker locking statistics. Reported in the slow-query log message and through - // db.currentOp. Complementary to the per-instance locking statistics. - AtomicLockStats _stats; - - // Delays release of exclusive/intent-exclusive locked resources until the write unit of - // work completes. Value of 0 means we are not inside a write unit of work. - int _wuowNestingLevel; - - // Mode for which the Locker acquired a ticket, or MODE_NONE if no ticket was acquired. - LockMode _modeForTicket = MODE_NONE; - - // Indicates whether the client is active reader/writer or is queued. - AtomicWord _clientState{kInactive}; - - // Track the thread who owns the lock for debugging purposes - stdx::thread::id _threadId; - - // If true, shared locks will participate in two-phase locking. - bool _sharedLocksShouldTwoPhaseLock = false; - - // If this is set, dictates the max number of milliseconds that we will wait for lock - // acquisition. Effectively resets lock acquisition deadlines to time out sooner. If set to 0, - // for example, lock attempts will time out immediately if the lock is not immediately - // available. Note this will be ineffective if uninterruptible lock guard is set. - boost::optional _maxLockTimeout; - - // A structure for accumulating time spent getting flow control tickets. - FlowControlTicketholder::CurOp _flowControlStats; - - // The global ticketholders of the service context. - TicketHolderManager* _ticketHolderManager; - - // This will only be valid when holding a ticket. - boost::optional _ticket; - - // Tracks the global lock modes ever acquired in this Locker's life. This value should only ever - // be accessed from the thread that owns the Locker. - unsigned char _globalLockMode = (1 << MODE_NONE); - - // Tracks whether this operation should be killed on step down. - AtomicWord _wasGlobalLockTakenInModeConflictingWithWrites{false}; - - // If isValid(), the ResourceId of the resource currently waiting for the lock. If not valid, - // there is no resource currently waiting. - ResourceId _waitingResource; - - ////////////////////////////////////////////////////////////////////////////////////////// - // - // Methods merged from LockState, which should eventually be removed or changed to methods - // on the LockerImpl interface. - // - -public: - virtual void dump() const; - - virtual bool isW() const; - virtual bool isR() const; - - virtual bool isLocked() const; - virtual bool isWriteLocked() const; - virtual bool isReadLocked() const; - - virtual bool isRSTLExclusive() const; - virtual bool isRSTLLocked() const; - - bool isGlobalLockedRecursively() override; - - virtual bool hasLockPending() const { - return getWaitingResource().isValid(); - } -}; - -/** - * Retrieves the global lock manager instance. - * Legacy global lock manager accessor for internal lock implementation * and debugger scripts - * such as gdb/mongo_lock.py. - * The lock manager is now a decoration on the service context and this accessor is retained for - * startup, lock internals, and debugger scripts. - * Using LockManager::get(ServiceContext*) where possible is preferable. - */ -LockManager* getGlobalLockManager(); - -} // namespace mongo diff --git a/src/mongo/db/concurrency/lock_state_test.cpp b/src/mongo/db/concurrency/lock_state_test.cpp deleted file mode 100644 index 3e99da1dd00e8..0000000000000 --- a/src/mongo/db/concurrency/lock_state_test.cpp +++ /dev/null @@ -1,1364 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - - -#include "mongo/platform/basic.h" - -#include -#include -#include - -#include "mongo/config.h" -#include "mongo/db/concurrency/lock_manager_test_help.h" -#include "mongo/db/concurrency/locker.h" -#include "mongo/db/curop.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/transport/session.h" -#include "mongo/transport/transport_layer_mock.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/fail_point.h" -#include "mongo/util/timer.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - - -namespace mongo { - -class LockerImplTest : public ServiceContextTest {}; - -TEST_F(LockerImplTest, LockNoConflict) { - auto opCtx = makeOperationContext(); - - const ResourceId resId( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - LockerImpl locker(opCtx->getServiceContext()); - locker.lockGlobal(opCtx.get(), MODE_IX); - - locker.lock(resId, MODE_X); - - ASSERT(locker.isLockHeldForMode(resId, MODE_X)); - ASSERT(locker.isLockHeldForMode(resId, MODE_S)); - - ASSERT(locker.unlock(resId)); - - ASSERT(locker.isLockHeldForMode(resId, MODE_NONE)); - - locker.unlockGlobal(); -} - -TEST_F(LockerImplTest, ReLockNoConflict) { - auto opCtx = makeOperationContext(); - - const ResourceId resId( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - LockerImpl locker(opCtx->getServiceContext()); - locker.lockGlobal(opCtx.get(), MODE_IX); - - locker.lock(resId, MODE_S); - locker.lock(resId, MODE_X); - - ASSERT(!locker.unlock(resId)); - ASSERT(locker.isLockHeldForMode(resId, MODE_X)); - - ASSERT(locker.unlock(resId)); - ASSERT(locker.isLockHeldForMode(resId, MODE_NONE)); - - ASSERT(locker.unlockGlobal()); -} - -TEST_F(LockerImplTest, ConflictWithTimeout) { - auto opCtx = makeOperationContext(); - - const ResourceId resId( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - LockerImpl locker1(opCtx->getServiceContext()); - locker1.lockGlobal(opCtx.get(), MODE_IX); - locker1.lock(resId, MODE_X); - - LockerImpl locker2(opCtx->getServiceContext()); - locker2.lockGlobal(opCtx.get(), MODE_IX); - - ASSERT_THROWS_CODE(locker2.lock(opCtx.get(), resId, MODE_S, Date_t::now()), - AssertionException, - ErrorCodes::LockTimeout); - - ASSERT(locker2.getLockMode(resId) == MODE_NONE); - - ASSERT(locker1.unlock(resId)); - - ASSERT(locker1.unlockGlobal()); - ASSERT(locker2.unlockGlobal()); -} - -TEST_F(LockerImplTest, ConflictUpgradeWithTimeout) { - auto opCtx = makeOperationContext(); - - const ResourceId resId( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - LockerImpl locker1(opCtx->getServiceContext()); - locker1.lockGlobal(opCtx.get(), MODE_IS); - locker1.lock(resId, MODE_S); - - LockerImpl locker2(opCtx->getServiceContext()); - locker2.lockGlobal(opCtx.get(), MODE_IS); - locker2.lock(resId, MODE_S); - - // Try upgrading locker 1, which should block and timeout - ASSERT_THROWS_CODE(locker1.lock(opCtx.get(), resId, MODE_X, Date_t::now() + Milliseconds(1)), - AssertionException, - ErrorCodes::LockTimeout); - - locker1.unlockGlobal(); - locker2.unlockGlobal(); -} - -TEST_F(LockerImplTest, FailPointInLockFailsGlobalNonIntentLocksIfTheyCannotBeImmediatelyGranted) { - transport::TransportLayerMock transportLayer; - std::shared_ptr session = transportLayer.createSession(); - - auto newClient = getServiceContext()->makeClient("userClient", session); - AlternativeClientRegion acr(newClient); - auto newOpCtx = cc().makeOperationContext(); - - LockerImpl locker1(newOpCtx->getServiceContext()); - locker1.lockGlobal(newOpCtx.get(), MODE_IX); - - { - FailPointEnableBlock failWaitingNonPartitionedLocks("failNonIntentLocksIfWaitNeeded"); - - // MODE_S attempt. - LockerImpl locker2(newOpCtx->getServiceContext()); - ASSERT_THROWS_CODE( - locker2.lockGlobal(newOpCtx.get(), MODE_S), DBException, ErrorCodes::LockTimeout); - - // MODE_X attempt. - LockerImpl locker3(newOpCtx->getServiceContext()); - ASSERT_THROWS_CODE( - locker3.lockGlobal(newOpCtx.get(), MODE_X), DBException, ErrorCodes::LockTimeout); - } - - locker1.unlockGlobal(); -} - -TEST_F(LockerImplTest, FailPointInLockFailsNonIntentLocksIfTheyCannotBeImmediatelyGranted) { - transport::TransportLayerMock transportLayer; - std::shared_ptr session = transportLayer.createSession(); - - auto newClient = getServiceContext()->makeClient("userClient", session); - AlternativeClientRegion acr(newClient); - auto newOpCtx = cc().makeOperationContext(); - - // Granted MODE_X lock, fail incoming MODE_S and MODE_X. - const ResourceId resId( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - LockerImpl locker1(newOpCtx->getServiceContext()); - locker1.lockGlobal(newOpCtx.get(), MODE_IX); - locker1.lock(newOpCtx.get(), resId, MODE_X); - - { - FailPointEnableBlock failWaitingNonPartitionedLocks("failNonIntentLocksIfWaitNeeded"); - - // MODE_S attempt. - LockerImpl locker2(newOpCtx->getServiceContext()); - locker2.lockGlobal(newOpCtx.get(), MODE_IS); - ASSERT_THROWS_CODE(locker2.lock(newOpCtx.get(), resId, MODE_S, Date_t::max()), - DBException, - ErrorCodes::LockTimeout); - - // The timed out MODE_S attempt shouldn't be present in the map of lock requests because it - // won't ever be granted. - ASSERT(locker2.getRequestsForTest().find(resId).finished()); - locker2.unlockGlobal(); - - // MODE_X attempt. - LockerImpl locker3(newOpCtx->getServiceContext()); - locker3.lockGlobal(newOpCtx.get(), MODE_IX); - ASSERT_THROWS_CODE(locker3.lock(newOpCtx.get(), resId, MODE_X, Date_t::max()), - DBException, - ErrorCodes::LockTimeout); - - // The timed out MODE_X attempt shouldn't be present in the map of lock requests because it - // won't ever be granted. - ASSERT(locker3.getRequestsForTest().find(resId).finished()); - locker3.unlockGlobal(); - } - - locker1.unlockGlobal(); -} - -TEST_F(LockerImplTest, ReadTransaction) { - auto opCtx = makeOperationContext(); - - LockerImpl locker(opCtx->getServiceContext()); - - locker.lockGlobal(opCtx.get(), MODE_IS); - locker.unlockGlobal(); - - locker.lockGlobal(opCtx.get(), MODE_IX); - locker.unlockGlobal(); - - locker.lockGlobal(opCtx.get(), MODE_IX); - locker.lockGlobal(opCtx.get(), MODE_IS); - locker.unlockGlobal(); - locker.unlockGlobal(); -} - -/** - * Test that saveLockerImpl works by examining the output. - */ -TEST_F(LockerImplTest, saveAndRestoreGlobal) { - auto opCtx = makeOperationContext(); - - Locker::LockSnapshot lockInfo; - - LockerImpl locker(opCtx->getServiceContext()); - - // No lock requests made, no locks held. - locker.saveLockStateAndUnlock(&lockInfo); - ASSERT_EQUALS(0U, lockInfo.locks.size()); - - // Lock the global lock, but just once. - locker.lockGlobal(opCtx.get(), MODE_IX); - - // We've locked the global lock. This should be reflected in the lockInfo. - locker.saveLockStateAndUnlock(&lockInfo); - ASSERT(!locker.isLocked()); - ASSERT_EQUALS(MODE_IX, lockInfo.globalMode); - - // Restore the lock(s) we had. - locker.restoreLockState(opCtx.get(), lockInfo); - - ASSERT(locker.isLocked()); - ASSERT(locker.unlockGlobal()); -} - -/** - * Test that saveLockerImpl can save and restore the RSTL. - */ -TEST_F(LockerImplTest, saveAndRestoreRSTL) { - auto opCtx = makeOperationContext(); - - Locker::LockSnapshot lockInfo; - - LockerImpl locker(opCtx->getServiceContext()); - - const ResourceId resIdDatabase(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB")); - - // Acquire locks. - locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX); - locker.lockGlobal(opCtx.get(), MODE_IX); - locker.lock(resIdDatabase, MODE_IX); - - // Save the lock state. - locker.saveLockStateAndUnlock(&lockInfo); - ASSERT(!locker.isLocked()); - ASSERT_EQUALS(MODE_IX, lockInfo.globalMode); - - // Check locks are unlocked. - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resourceIdReplicationStateTransitionLock)); - ASSERT(!locker.isLocked()); - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); - - // Restore the lock(s) we had. - locker.restoreLockState(opCtx.get(), lockInfo); - - // Check locks are re-locked. - ASSERT(locker.isLocked()); - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resourceIdReplicationStateTransitionLock)); - - ASSERT(locker.unlockGlobal()); - ASSERT(locker.unlock(resourceIdReplicationStateTransitionLock)); -} - -/** - * Test that we don't unlock when we have the global lock more than once. - */ -TEST_F(LockerImplTest, saveAndRestoreGlobalAcquiredTwice) { - auto opCtx = makeOperationContext(); - - Locker::LockSnapshot lockInfo; - - LockerImpl locker(opCtx->getServiceContext()); - - // No lock requests made, no locks held. - locker.saveLockStateAndUnlock(&lockInfo); - ASSERT_EQUALS(0U, lockInfo.locks.size()); - - // Lock the global lock. - locker.lockGlobal(opCtx.get(), MODE_IX); - locker.lockGlobal(opCtx.get(), MODE_IX); - - // This shouldn't actually unlock as we're in a nested scope. - ASSERT(!locker.saveLockStateAndUnlock(&lockInfo)); - - ASSERT(locker.isLocked()); - - // We must unlockGlobal twice. - ASSERT(!locker.unlockGlobal()); - ASSERT(locker.unlockGlobal()); -} - -/** - * Tests that restoreLockerImpl works by locking a db and collection and saving + restoring. - */ -TEST_F(LockerImplTest, saveAndRestoreDBAndCollection) { - auto opCtx = makeOperationContext(); - - Locker::LockSnapshot lockInfo; - - LockerImpl locker(opCtx->getServiceContext()); - - const ResourceId resIdDatabase(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB")); - const ResourceId resIdCollection( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - // Lock some stuff. - locker.lockGlobal(opCtx.get(), MODE_IX); - locker.lock(resIdDatabase, MODE_IX); - locker.lock(resIdCollection, MODE_IX); - locker.saveLockStateAndUnlock(&lockInfo); - - // Things shouldn't be locked anymore. - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); - - // Restore lock state. - locker.restoreLockState(opCtx.get(), lockInfo); - - // Make sure things were re-locked. - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdCollection)); - - ASSERT(locker.unlockGlobal()); -} - -TEST_F(LockerImplTest, releaseWriteUnitOfWork) { - auto opCtx = makeOperationContext(); - - Locker::LockSnapshot lockInfo; - - LockerImpl locker(opCtx->getServiceContext()); - - const ResourceId resIdDatabase(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB")); - const ResourceId resIdCollection( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - locker.beginWriteUnitOfWork(); - // Lock some stuff. - locker.lockGlobal(opCtx.get(), MODE_IX); - locker.lock(resIdDatabase, MODE_IX); - locker.lock(resIdCollection, MODE_IX); - // Unlock them so that they will be pending to unlock. - ASSERT_FALSE(locker.unlock(resIdCollection)); - ASSERT_FALSE(locker.unlock(resIdDatabase)); - ASSERT_FALSE(locker.unlockGlobal()); - - ASSERT(locker.releaseWriteUnitOfWorkAndUnlock(&lockInfo)); - - // Things shouldn't be locked anymore. - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); - ASSERT_FALSE(locker.isLocked()); - - // Destructor should succeed since the locker's state should be empty. -} - -TEST_F(LockerImplTest, restoreWriteUnitOfWork) { - auto opCtx = makeOperationContext(); - - Locker::LockSnapshot lockInfo; - - LockerImpl locker(opCtx->getServiceContext()); - - const ResourceId resIdDatabase(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB")); - const ResourceId resIdCollection( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - locker.beginWriteUnitOfWork(); - // Lock some stuff. - locker.lockGlobal(opCtx.get(), MODE_IX); - locker.lock(resIdDatabase, MODE_IX); - locker.lock(resIdCollection, MODE_IX); - // Unlock them so that they will be pending to unlock. - ASSERT_FALSE(locker.unlock(resIdCollection)); - ASSERT_FALSE(locker.unlock(resIdDatabase)); - ASSERT_FALSE(locker.unlockGlobal()); - - ASSERT(locker.releaseWriteUnitOfWorkAndUnlock(&lockInfo)); - - // Things shouldn't be locked anymore. - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); - ASSERT_FALSE(locker.isLocked()); - - // Restore lock state. - locker.restoreWriteUnitOfWorkAndLock(opCtx.get(), lockInfo); - - // Make sure things were re-locked. - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdCollection)); - ASSERT(locker.isLocked()); - - locker.endWriteUnitOfWork(); - - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); - ASSERT_FALSE(locker.isLocked()); -} - -TEST_F(LockerImplTest, releaseAndRestoreWriteUnitOfWorkWithoutUnlock) { - auto opCtx = makeOperationContext(); - - Locker::WUOWLockSnapshot lockInfo; - - LockerImpl locker(opCtx->getServiceContext()); - - const ResourceId resIdDatabase(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB")); - const ResourceId resIdCollection( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - const ResourceId resIdCollection2( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection2")); - - locker.beginWriteUnitOfWork(); - // Lock some stuff. - locker.lockGlobal(opCtx.get(), MODE_IX); - locker.lock(resIdDatabase, MODE_IX); - locker.lock(resIdCollection, MODE_X); - - // Recursive global lock. - locker.lockGlobal(opCtx.get(), MODE_IX); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 2U); - - ASSERT_FALSE(locker.unlockGlobal()); - - // Unlock them so that they will be pending to unlock. - ASSERT_FALSE(locker.unlock(resIdCollection)); - ASSERT_FALSE(locker.unlock(resIdDatabase)); - ASSERT_FALSE(locker.unlockGlobal()); - ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 3UL); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); - - locker.releaseWriteUnitOfWork(&lockInfo); - ASSERT_EQ(lockInfo.unlockPendingLocks.size(), 3UL); - - // Things should still be locked. - ASSERT_EQUALS(MODE_X, locker.getLockMode(resIdCollection)); - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 0U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); - ASSERT(locker.isLocked()); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 0U); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); - - // The locker is no longer participating the two-phase locking. - ASSERT_FALSE(locker.inAWriteUnitOfWork()); - ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 0UL); - - // Start a new WUOW with the same locker. Any new locks acquired in the new WUOW - // should participate two-phase locking. - { - locker.beginWriteUnitOfWork(); - - // Grab new locks inside the new WUOW. - locker.lockGlobal(opCtx.get(), MODE_IX); - locker.lock(resIdDatabase, MODE_IX); - locker.lock(resIdCollection2, MODE_IX); - - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdCollection2)); - ASSERT(locker.isLocked()); - - locker.unlock(resIdCollection2); - ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 1UL); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 0U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 2U); - locker.unlock(resIdDatabase); - ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 1UL); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 0U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); - locker.unlockGlobal(); - ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 1UL); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 0U); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); - locker.endWriteUnitOfWork(); - } - ASSERT_FALSE(locker.inAWriteUnitOfWork()); - ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 0UL); - - ASSERT_EQUALS(MODE_X, locker.getLockMode(resIdCollection)); - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 0U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); - ASSERT(locker.isLocked()); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 0U); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); - // The new locks has been released. - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection2)); - - // Restore lock state. - locker.restoreWriteUnitOfWork(lockInfo); - - ASSERT_TRUE(locker.inAWriteUnitOfWork()); - - // Make sure things are still locked. - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_X, locker.getLockMode(resIdCollection)); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); - ASSERT(locker.isLocked()); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); - - locker.endWriteUnitOfWork(); - - ASSERT_FALSE(locker.inAWriteUnitOfWork()); - - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection2)); - ASSERT_FALSE(locker.isLocked()); - ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 0U); - ASSERT(locker.getRequestsForTest().find(resourceIdGlobal).finished()); -} - -TEST_F(LockerImplTest, releaseAndRestoreReadOnlyWriteUnitOfWork) { - auto opCtx = makeOperationContext(); - - Locker::LockSnapshot lockInfo; - - LockerImpl locker(opCtx->getServiceContext()); - - const ResourceId resIdDatabase(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB")); - const ResourceId resIdCollection( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - // Snapshot transactions delay shared locks as well. - locker.setSharedLocksShouldTwoPhaseLock(true); - - locker.beginWriteUnitOfWork(); - // Lock some stuff in IS mode. - locker.lockGlobal(opCtx.get(), MODE_IS); - locker.lock(resIdDatabase, MODE_IS); - locker.lock(resIdCollection, MODE_IS); - // Unlock them. - ASSERT_FALSE(locker.unlock(resIdCollection)); - ASSERT_FALSE(locker.unlock(resIdDatabase)); - ASSERT_FALSE(locker.unlockGlobal()); - ASSERT_EQ(3u, locker.numResourcesToUnlockAtEndUnitOfWorkForTest()); - - // Things shouldn't be locked anymore. - ASSERT_TRUE(locker.releaseWriteUnitOfWorkAndUnlock(&lockInfo)); - - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); - ASSERT_FALSE(locker.isLocked()); - - // Restore lock state. - locker.restoreWriteUnitOfWorkAndLock(opCtx.get(), lockInfo); - - ASSERT_EQUALS(MODE_IS, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_IS, locker.getLockMode(resIdCollection)); - ASSERT_TRUE(locker.isLocked()); - - locker.endWriteUnitOfWork(); - - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); - ASSERT_FALSE(locker.isLocked()); -} - -TEST_F(LockerImplTest, releaseAndRestoreEmptyWriteUnitOfWork) { - Locker::LockSnapshot lockInfo; - auto opCtx = makeOperationContext(); - LockerImpl locker(opCtx->getServiceContext()); - - // Snapshot transactions delay shared locks as well. - locker.setSharedLocksShouldTwoPhaseLock(true); - - locker.beginWriteUnitOfWork(); - - // Nothing to yield. - ASSERT_FALSE(locker.releaseWriteUnitOfWorkAndUnlock(&lockInfo)); - ASSERT_FALSE(locker.isLocked()); - - // Restore lock state. - locker.restoreWriteUnitOfWorkAndLock(nullptr, lockInfo); - ASSERT_FALSE(locker.isLocked()); - - locker.endWriteUnitOfWork(); - ASSERT_FALSE(locker.isLocked()); -} - -TEST_F(LockerImplTest, releaseAndRestoreWriteUnitOfWorkWithRecursiveLocks) { - auto opCtx = makeOperationContext(); - - Locker::LockSnapshot lockInfo; - - LockerImpl locker(opCtx->getServiceContext()); - - const ResourceId resIdDatabase(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB")); - const ResourceId resIdCollection( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - locker.beginWriteUnitOfWork(); - // Lock some stuff. - locker.lockGlobal(opCtx.get(), MODE_IX); - locker.lock(resIdDatabase, MODE_IX); - locker.lock(resIdCollection, MODE_IX); - // Recursively lock them again with a weaker mode. - locker.lockGlobal(opCtx.get(), MODE_IS); - locker.lock(resIdDatabase, MODE_IS); - locker.lock(resIdCollection, MODE_IS); - - // Make sure locks are converted. - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdCollection)); - ASSERT_TRUE(locker.isWriteLocked()); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 2U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 2U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->recursiveCount, 2U); - - // Unlock them so that they will be pending to unlock. - ASSERT_FALSE(locker.unlock(resIdCollection)); - ASSERT_FALSE(locker.unlock(resIdDatabase)); - ASSERT_FALSE(locker.unlockGlobal()); - // Make sure locks are still acquired in the correct mode. - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdCollection)); - ASSERT_TRUE(locker.isWriteLocked()); - // Make sure unlocking converted locks decrements the locks' recursiveCount instead of - // incrementing unlockPending. - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 0U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 0U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->recursiveCount, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->unlockPending, 0U); - - // Unlock again so unlockPending == recursiveCount. - ASSERT_FALSE(locker.unlock(resIdCollection)); - ASSERT_FALSE(locker.unlock(resIdDatabase)); - ASSERT_FALSE(locker.unlockGlobal()); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->recursiveCount, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->unlockPending, 1U); - - ASSERT(locker.releaseWriteUnitOfWorkAndUnlock(&lockInfo)); - - // Things shouldn't be locked anymore. - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); - ASSERT_FALSE(locker.isLocked()); - - // Restore lock state. - locker.restoreWriteUnitOfWorkAndLock(opCtx.get(), lockInfo); - - // Make sure things were re-locked in the correct mode. - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdCollection)); - ASSERT_TRUE(locker.isWriteLocked()); - // Make sure locks were coalesced after restore and are pending to unlock as before. - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->recursiveCount, 1U); - ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->unlockPending, 1U); - - locker.endWriteUnitOfWork(); - - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); - ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); - ASSERT_FALSE(locker.isLocked()); -} - -TEST_F(LockerImplTest, DefaultLocker) { - auto opCtx = makeOperationContext(); - - const ResourceId resId(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB")); - - LockerImpl locker(opCtx->getServiceContext()); - locker.lockGlobal(opCtx.get(), MODE_IX); - locker.lock(resId, MODE_X); - - // Make sure only Global and TestDB resources are locked. - Locker::LockerInfo info; - locker.getLockerInfo(&info, boost::none); - ASSERT(!info.waitingResource.isValid()); - ASSERT_EQUALS(2U, info.locks.size()); - ASSERT_EQUALS(RESOURCE_GLOBAL, info.locks[0].resourceId.getType()); - ASSERT_EQUALS(resId, info.locks[1].resourceId); - - ASSERT(locker.unlockGlobal()); -} - -TEST_F(LockerImplTest, SharedLocksShouldTwoPhaseLockIsTrue) { - // Test that when setSharedLocksShouldTwoPhaseLock is true and we are in a WUOW, unlock on IS - // and S locks are postponed until endWriteUnitOfWork() is called. Mode IX and X locks always - // participate in two-phased locking, regardless of the setting. - - auto opCtx = makeOperationContext(); - - const ResourceId resId1(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB1")); - const ResourceId resId2(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB2")); - const ResourceId resId3( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection3")); - const ResourceId resId4( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection4")); - - LockerImpl locker(opCtx->getServiceContext()); - locker.setSharedLocksShouldTwoPhaseLock(true); - - locker.lockGlobal(opCtx.get(), MODE_IS); - ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_IS); - - locker.lock(resourceIdReplicationStateTransitionLock, MODE_IS); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IS); - - locker.lock(resId1, MODE_IS); - locker.lock(resId2, MODE_IX); - locker.lock(resId3, MODE_S); - locker.lock(resId4, MODE_X); - ASSERT_EQ(locker.getLockMode(resId1), MODE_IS); - ASSERT_EQ(locker.getLockMode(resId2), MODE_IX); - ASSERT_EQ(locker.getLockMode(resId3), MODE_S); - ASSERT_EQ(locker.getLockMode(resId4), MODE_X); - - locker.beginWriteUnitOfWork(); - - ASSERT_FALSE(locker.unlock(resId1)); - ASSERT_FALSE(locker.unlock(resId2)); - ASSERT_FALSE(locker.unlock(resId3)); - ASSERT_FALSE(locker.unlock(resId4)); - ASSERT_EQ(locker.getLockMode(resId1), MODE_IS); - ASSERT_EQ(locker.getLockMode(resId2), MODE_IX); - ASSERT_EQ(locker.getLockMode(resId3), MODE_S); - ASSERT_EQ(locker.getLockMode(resId4), MODE_X); - - ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IS); - - ASSERT_FALSE(locker.unlockGlobal()); - ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_IS); - - locker.endWriteUnitOfWork(); - - ASSERT_EQ(locker.getLockMode(resId1), MODE_NONE); - ASSERT_EQ(locker.getLockMode(resId2), MODE_NONE); - ASSERT_EQ(locker.getLockMode(resId3), MODE_NONE); - ASSERT_EQ(locker.getLockMode(resId4), MODE_NONE); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); - ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_NONE); -} - -TEST_F(LockerImplTest, ModeIXAndXLockParticipatesInTwoPhaseLocking) { - // Unlock on mode IX and X locks during a WUOW should always be postponed until - // endWriteUnitOfWork() is called. Mode IS and S locks should unlock immediately. - - auto opCtx = makeOperationContext(); - - const ResourceId resId1(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB1")); - const ResourceId resId2(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB2")); - const ResourceId resId3( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection3")); - const ResourceId resId4( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection4")); - - LockerImpl locker(opCtx->getServiceContext()); - - locker.lockGlobal(opCtx.get(), MODE_IX); - ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_IX); - - locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); - - locker.lock(resId1, MODE_IS); - locker.lock(resId2, MODE_IX); - locker.lock(resId3, MODE_S); - locker.lock(resId4, MODE_X); - ASSERT_EQ(locker.getLockMode(resId1), MODE_IS); - ASSERT_EQ(locker.getLockMode(resId2), MODE_IX); - ASSERT_EQ(locker.getLockMode(resId3), MODE_S); - ASSERT_EQ(locker.getLockMode(resId4), MODE_X); - - locker.beginWriteUnitOfWork(); - - ASSERT_TRUE(locker.unlock(resId1)); - ASSERT_FALSE(locker.unlock(resId2)); - ASSERT_TRUE(locker.unlock(resId3)); - ASSERT_FALSE(locker.unlock(resId4)); - ASSERT_EQ(locker.getLockMode(resId1), MODE_NONE); - ASSERT_EQ(locker.getLockMode(resId2), MODE_IX); - ASSERT_EQ(locker.getLockMode(resId3), MODE_NONE); - ASSERT_EQ(locker.getLockMode(resId4), MODE_X); - - ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); - - ASSERT_FALSE(locker.unlockGlobal()); - ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_IX); - - locker.endWriteUnitOfWork(); - - ASSERT_EQ(locker.getLockMode(resId2), MODE_NONE); - ASSERT_EQ(locker.getLockMode(resId4), MODE_NONE); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); - ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_NONE); -} - -TEST_F(LockerImplTest, RSTLUnlocksWithNestedLock) { - auto opCtx = makeOperationContext(); - LockerImpl locker(opCtx->getServiceContext()); - - locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); - - locker.beginWriteUnitOfWork(); - - // Do a nested lock acquisition. - locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); - - ASSERT(locker.unlockRSTLforPrepare()); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); - - ASSERT_FALSE(locker.unlockRSTLforPrepare()); - - locker.endWriteUnitOfWork(); - - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); - - ASSERT_FALSE(locker.unlockRSTLforPrepare()); - ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); -} - -TEST_F(LockerImplTest, RSTLModeIXWithTwoPhaseLockingCanBeUnlockedWhenPrepared) { - auto opCtx = makeOperationContext(); - LockerImpl locker(opCtx->getServiceContext()); - - locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); - - locker.beginWriteUnitOfWork(); - - ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); - - ASSERT(locker.unlockRSTLforPrepare()); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); - - ASSERT_FALSE(locker.unlockRSTLforPrepare()); - - locker.endWriteUnitOfWork(); - - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); - - ASSERT_FALSE(locker.unlockRSTLforPrepare()); - ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); -} - -TEST_F(LockerImplTest, RSTLModeISWithTwoPhaseLockingCanBeUnlockedWhenPrepared) { - auto opCtx = makeOperationContext(); - LockerImpl locker(opCtx->getServiceContext()); - - locker.lock(resourceIdReplicationStateTransitionLock, MODE_IS); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IS); - - locker.beginWriteUnitOfWork(); - - ASSERT(locker.unlockRSTLforPrepare()); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); - - ASSERT_FALSE(locker.unlockRSTLforPrepare()); - - locker.endWriteUnitOfWork(); - - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); - - ASSERT_FALSE(locker.unlockRSTLforPrepare()); - ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); -} - -TEST_F(LockerImplTest, RSTLTwoPhaseLockingBehaviorModeIS) { - auto opCtx = makeOperationContext(); - LockerImpl locker(opCtx->getServiceContext()); - - locker.lock(resourceIdReplicationStateTransitionLock, MODE_IS); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IS); - - locker.beginWriteUnitOfWork(); - - ASSERT_TRUE(locker.unlock(resourceIdReplicationStateTransitionLock)); - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); - - ASSERT_FALSE(locker.unlockRSTLforPrepare()); - - locker.endWriteUnitOfWork(); - - ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); - - ASSERT_FALSE(locker.unlockRSTLforPrepare()); - ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); -} - -TEST_F(LockerImplTest, OverrideLockRequestTimeout) { - auto opCtx = makeOperationContext(); - - const ResourceId resIdFirstDB(RESOURCE_DATABASE, DatabaseName(boost::none, "FirstDB")); - const ResourceId resIdSecondDB(RESOURCE_DATABASE, DatabaseName(boost::none, "SecondDB")); - - LockerImpl locker1(opCtx->getServiceContext()); - LockerImpl locker2(opCtx->getServiceContext()); - - // Set up locker2 to override lock requests' provided timeout if greater than 1000 milliseconds. - locker2.setMaxLockTimeout(Milliseconds(1000)); - - locker1.lockGlobal(opCtx.get(), MODE_IX); - locker2.lockGlobal(opCtx.get(), MODE_IX); - - // locker1 acquires FirstDB under an exclusive lock. - locker1.lock(resIdFirstDB, MODE_X); - ASSERT_TRUE(locker1.isLockHeldForMode(resIdFirstDB, MODE_X)); - - // locker2's attempt to acquire FirstDB with unlimited wait time should timeout after 1000 - // milliseconds and throw because _maxLockRequestTimeout is set to 1000 milliseconds. - ASSERT_THROWS_CODE(locker2.lock(opCtx.get(), resIdFirstDB, MODE_X, Date_t::max()), - AssertionException, - ErrorCodes::LockTimeout); - - // locker2's attempt to acquire an uncontested lock should still succeed normally. - locker2.lock(resIdSecondDB, MODE_X); - - ASSERT_TRUE(locker1.unlock(resIdFirstDB)); - ASSERT_TRUE(locker1.isLockHeldForMode(resIdFirstDB, MODE_NONE)); - ASSERT_TRUE(locker2.unlock(resIdSecondDB)); - ASSERT_TRUE(locker2.isLockHeldForMode(resIdSecondDB, MODE_NONE)); - - ASSERT(locker1.unlockGlobal()); - ASSERT(locker2.unlockGlobal()); -} - -TEST_F(LockerImplTest, DoNotWaitForLockAcquisition) { - auto opCtx = makeOperationContext(); - - const ResourceId resIdFirstDB(RESOURCE_DATABASE, DatabaseName(boost::none, "FirstDB")); - const ResourceId resIdSecondDB(RESOURCE_DATABASE, DatabaseName(boost::none, "SecondDB")); - - LockerImpl locker1(opCtx->getServiceContext()); - LockerImpl locker2(opCtx->getServiceContext()); - - // Set up locker2 to immediately return if a lock is unavailable, regardless of supplied - // deadlines in the lock request. - locker2.setMaxLockTimeout(Milliseconds(0)); - - locker1.lockGlobal(opCtx.get(), MODE_IX); - locker2.lockGlobal(opCtx.get(), MODE_IX); - - // locker1 acquires FirstDB under an exclusive lock. - locker1.lock(resIdFirstDB, MODE_X); - ASSERT_TRUE(locker1.isLockHeldForMode(resIdFirstDB, MODE_X)); - - // locker2's attempt to acquire FirstDB with unlimited wait time should fail immediately and - // throw because _maxLockRequestTimeout was set to 0. - ASSERT_THROWS_CODE(locker2.lock(opCtx.get(), resIdFirstDB, MODE_X, Date_t::max()), - AssertionException, - ErrorCodes::LockTimeout); - - // locker2's attempt to acquire an uncontested lock should still succeed normally. - locker2.lock(resIdSecondDB, MODE_X); - - ASSERT_TRUE(locker1.unlock(resIdFirstDB)); - ASSERT_TRUE(locker1.isLockHeldForMode(resIdFirstDB, MODE_NONE)); - ASSERT_TRUE(locker2.unlock(resIdSecondDB)); - ASSERT_TRUE(locker2.isLockHeldForMode(resIdSecondDB, MODE_NONE)); - - ASSERT(locker1.unlockGlobal()); - ASSERT(locker2.unlockGlobal()); -} - -namespace { -/** - * Helper function to determine if 'lockerInfo' contains a lock with ResourceId 'resourceId' and - * lock mode 'mode' within 'lockerInfo.locks'. - */ -bool lockerInfoContainsLock(const Locker::LockerInfo& lockerInfo, - const ResourceId& resourceId, - const LockMode& mode) { - return (1U == - std::count_if(lockerInfo.locks.begin(), - lockerInfo.locks.end(), - [&resourceId, &mode](const Locker::OneLock& lock) { - return lock.resourceId == resourceId && lock.mode == mode; - })); -} -} // namespace - -TEST_F(LockerImplTest, GetLockerInfoShouldReportHeldLocks) { - auto opCtx = makeOperationContext(); - - const ResourceId dbId(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB")); - const ResourceId collectionId( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - // Take an exclusive lock on the collection. - LockerImpl locker(opCtx->getServiceContext()); - locker.lockGlobal(opCtx.get(), MODE_IX); - locker.lock(dbId, MODE_IX); - locker.lock(collectionId, MODE_X); - - // Assert it shows up in the output of getLockerInfo(). - Locker::LockerInfo lockerInfo; - locker.getLockerInfo(&lockerInfo, boost::none); - - ASSERT(lockerInfoContainsLock(lockerInfo, resourceIdGlobal, MODE_IX)); - ASSERT(lockerInfoContainsLock(lockerInfo, dbId, MODE_IX)); - ASSERT(lockerInfoContainsLock(lockerInfo, collectionId, MODE_X)); - ASSERT_EQ(3U, lockerInfo.locks.size()); - - ASSERT(locker.unlock(collectionId)); - ASSERT(locker.unlock(dbId)); - ASSERT(locker.unlockGlobal()); -} - -TEST_F(LockerImplTest, GetLockerInfoShouldReportPendingLocks) { - auto opCtx = makeOperationContext(); - - const ResourceId dbId(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB")); - const ResourceId collectionId( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - // Take an exclusive lock on the collection. - LockerImpl successfulLocker(opCtx->getServiceContext()); - successfulLocker.lockGlobal(opCtx.get(), MODE_IX); - successfulLocker.lock(dbId, MODE_IX); - successfulLocker.lock(collectionId, MODE_X); - - // Now attempt to get conflicting locks. - LockerImpl conflictingLocker(opCtx->getServiceContext()); - conflictingLocker.lockGlobal(opCtx.get(), MODE_IS); - conflictingLocker.lock(dbId, MODE_IS); - ASSERT_EQ(LOCK_WAITING, - conflictingLocker.lockBeginForTest(nullptr /* opCtx */, collectionId, MODE_IS)); - - // Assert the held locks show up in the output of getLockerInfo(). - Locker::LockerInfo lockerInfo; - conflictingLocker.getLockerInfo(&lockerInfo, boost::none); - ASSERT(lockerInfoContainsLock(lockerInfo, resourceIdGlobal, MODE_IS)); - ASSERT(lockerInfoContainsLock(lockerInfo, dbId, MODE_IS)); - ASSERT(lockerInfoContainsLock(lockerInfo, collectionId, MODE_IS)); - ASSERT_EQ(3U, lockerInfo.locks.size()); - - // Assert it reports that it is waiting for the collection lock. - ASSERT_EQ(collectionId, lockerInfo.waitingResource); - - // Make sure it no longer reports waiting once unlocked. - ASSERT(successfulLocker.unlock(collectionId)); - ASSERT(successfulLocker.unlock(dbId)); - ASSERT(successfulLocker.unlockGlobal()); - - conflictingLocker.lockCompleteForTest( - nullptr /* opCtx */, collectionId, MODE_IS, Date_t::now()); - - conflictingLocker.getLockerInfo(&lockerInfo, boost::none); - ASSERT_FALSE(lockerInfo.waitingResource.isValid()); - - ASSERT(conflictingLocker.unlock(collectionId)); - ASSERT(conflictingLocker.unlock(dbId)); - ASSERT(conflictingLocker.unlockGlobal()); -} - -TEST_F(LockerImplTest, GetLockerInfoShouldSubtractBase) { - auto opCtx = makeOperationContext(); - auto locker = opCtx->lockState(); - const ResourceId dbId(RESOURCE_DATABASE, DatabaseName(boost::none, "SubtractTestDB")); - - auto numAcquisitions = [&](boost::optional baseStats) { - Locker::LockerInfo info; - locker->getLockerInfo(&info, baseStats); - return info.stats.get(dbId, MODE_IX).numAcquisitions; - }; - auto getBaseStats = [&] { - return CurOp::get(opCtx.get())->getLockStatsBase(); - }; - - locker->lockGlobal(opCtx.get(), MODE_IX); - - // Obtain a lock before any other ops have been pushed to the stack. - locker->lock(dbId, MODE_IX); - locker->unlock(dbId); - - ASSERT_EQUALS(numAcquisitions(getBaseStats()), 1) << "The acquisition should be reported"; - - // Push another op to the stack and obtain a lock. - CurOp superOp; - superOp.push(opCtx.get()); - locker->lock(dbId, MODE_IX); - locker->unlock(dbId); - - ASSERT_EQUALS(numAcquisitions(getBaseStats()), 1) - << "Only superOp's acquisition should be reported"; - - // Then push another op to the stack and obtain another lock. - CurOp subOp; - subOp.push(opCtx.get()); - locker->lock(dbId, MODE_IX); - locker->unlock(dbId); - - ASSERT_EQUALS(numAcquisitions(getBaseStats()), 1) - << "Only the latest acquisition should be reported"; - - ASSERT_EQUALS(numAcquisitions({}), 3) - << "All acquisitions should be reported when no base is subtracted out."; - - ASSERT(locker->unlockGlobal()); -} - -TEST_F(LockerImplTest, ReaquireLockPendingUnlock) { - auto opCtx = makeOperationContext(); - - const ResourceId resId( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - LockerImpl locker(opCtx->getServiceContext()); - locker.lockGlobal(opCtx.get(), MODE_IS); - - locker.lock(resId, MODE_X); - ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); - - locker.beginWriteUnitOfWork(); - - ASSERT_FALSE(locker.unlock(resId)); - ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); - ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); - - // Reacquire lock pending unlock. - locker.lock(resId, MODE_X); - ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 0); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 0); - - locker.endWriteUnitOfWork(); - - ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); - - locker.unlockGlobal(); -} - -TEST_F(LockerImplTest, AcquireLockPendingUnlockWithCoveredMode) { - auto opCtx = makeOperationContext(); - - const ResourceId resId( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - LockerImpl locker(opCtx->getServiceContext()); - locker.lockGlobal(opCtx.get(), MODE_IS); - - locker.lock(resId, MODE_X); - ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); - - locker.beginWriteUnitOfWork(); - - ASSERT_FALSE(locker.unlock(resId)); - ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); - ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); - - // Attempt to lock the resource with a mode that is covered by the existing mode. - locker.lock(resId, MODE_IX); - ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 0); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 0); - - locker.endWriteUnitOfWork(); - - ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX)); - - locker.unlockGlobal(); -} - -TEST_F(LockerImplTest, ConvertLockPendingUnlock) { - auto opCtx = makeOperationContext(); - - const ResourceId resId( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - LockerImpl locker(opCtx->getServiceContext()); - locker.lockGlobal(opCtx.get(), MODE_IS); - - locker.lock(resId, MODE_IX); - ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX)); - - locker.beginWriteUnitOfWork(); - - ASSERT_FALSE(locker.unlock(resId)); - ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX)); - ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->recursiveCount == 1); - - // Convert lock pending unlock. - locker.lock(resId, MODE_X); - ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->recursiveCount == 2); - - locker.endWriteUnitOfWork(); - - ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 0); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 0); - ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); - - locker.unlockGlobal(); -} - -TEST_F(LockerImplTest, ConvertLockPendingUnlockAndUnlock) { - auto opCtx = makeOperationContext(); - - const ResourceId resId( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - LockerImpl locker(opCtx->getServiceContext()); - locker.lockGlobal(opCtx.get(), MODE_IS); - - locker.lock(resId, MODE_IX); - ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX)); - - locker.beginWriteUnitOfWork(); - - ASSERT_FALSE(locker.unlock(resId)); - ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX)); - ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->recursiveCount == 1); - - // Convert lock pending unlock. - locker.lock(resId, MODE_X); - ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->recursiveCount == 2); - - // Unlock the lock conversion. - ASSERT_FALSE(locker.unlock(resId)); - ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); - // Make sure we still hold X lock and unlock the weaker mode to decrement recursiveCount instead - // of incrementing unlockPending. - ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); - ASSERT(locker.getRequestsForTest().find(resId).objAddr()->recursiveCount == 1); - - locker.endWriteUnitOfWork(); - - ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 0); - ASSERT(locker.getRequestsForTest().find(resId).finished()); - ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_NONE)); - - locker.unlockGlobal(); -} - -TEST_F(LockerImplTest, SetTicketAcquisitionForLockRAIIType) { - auto opCtx = makeOperationContext(); - - // By default, ticket acquisition is required. - ASSERT_TRUE(opCtx->lockState()->shouldWaitForTicket()); - - { - ScopedAdmissionPriorityForLock setTicketAquisition(opCtx->lockState(), - AdmissionContext::Priority::kImmediate); - ASSERT_FALSE(opCtx->lockState()->shouldWaitForTicket()); - } - - ASSERT_TRUE(opCtx->lockState()->shouldWaitForTicket()); - - opCtx->lockState()->setAdmissionPriority(AdmissionContext::Priority::kImmediate); - ASSERT_FALSE(opCtx->lockState()->shouldWaitForTicket()); - - { - ScopedAdmissionPriorityForLock setTicketAquisition(opCtx->lockState(), - AdmissionContext::Priority::kImmediate); - ASSERT_FALSE(opCtx->lockState()->shouldWaitForTicket()); - } - - ASSERT_FALSE(opCtx->lockState()->shouldWaitForTicket()); -} - -// This test exercises the lock dumping code in ~LockerImpl in case locks are held on destruction. -DEATH_TEST_F(LockerImplTest, - LocksHeldOnDestructionCausesALocksDump, - "Operation ending while holding locks.") { - auto opCtx = makeOperationContext(); - - const ResourceId resId( - RESOURCE_COLLECTION, - NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); - - LockerImpl locker(opCtx->getServiceContext()); - locker.lockGlobal(opCtx.get(), MODE_IX); - locker.lock(resId, MODE_X); - - ASSERT(locker.isLockHeldForMode(resId, MODE_X)); - ASSERT(locker.isLockHeldForMode(resId, MODE_S)); - - // 'locker' destructor should invariant because locks are still held. -} - -} // namespace mongo diff --git a/src/mongo/db/concurrency/lock_stats.cpp b/src/mongo/db/concurrency/lock_stats.cpp index faffad4b46cd2..ddc853f99ea2d 100644 --- a/src/mongo/db/concurrency/lock_stats.cpp +++ b/src/mongo/db/concurrency/lock_stats.cpp @@ -27,11 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/concurrency/lock_stats.h" +#include #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/concurrency/lock_stats.h" namespace mongo { diff --git a/src/mongo/db/concurrency/lock_stats.h b/src/mongo/db/concurrency/lock_stats.h index 8b1ecb34c4fd9..7244fa29cc219 100644 --- a/src/mongo/db/concurrency/lock_stats.h +++ b/src/mongo/db/concurrency/lock_stats.h @@ -29,7 +29,11 @@ #pragma once +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" #include "mongo/platform/atomic_word.h" namespace mongo { diff --git a/src/mongo/db/concurrency/lock_stats_test.cpp b/src/mongo/db/concurrency/lock_stats_test.cpp index b1a32ddccb154..6dbd04f21bf9e 100644 --- a/src/mongo/db/concurrency/lock_stats_test.cpp +++ b/src/mongo/db/concurrency/lock_stats_test.cpp @@ -27,14 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/concurrency/lock_manager_test_help.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/lock_stats.h" +#include "mongo/db/concurrency/locker_impl.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/tenant_id.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { +namespace { class LockStatsTest : public ServiceContextTest {}; @@ -46,7 +62,9 @@ TEST_F(LockStatsTest, NoWait) { resetGlobalLockStats(); auto opCtx = makeOperationContext(); - LockerForTests locker(opCtx.get(), MODE_IX); + LockerImpl locker(getServiceContext()); + locker.lockGlobal(opCtx.get(), MODE_IX); + ON_BLOCK_EXIT([&] { locker.unlockGlobal(); }); locker.lock(resId, MODE_X); locker.unlock(resId); @@ -67,12 +85,16 @@ TEST_F(LockStatsTest, Wait) { resetGlobalLockStats(); auto opCtx = makeOperationContext(); - LockerForTests locker(opCtx.get(), MODE_IX); + LockerImpl locker(getServiceContext()); + locker.lockGlobal(opCtx.get(), MODE_IX); + ON_BLOCK_EXIT([&] { locker.unlockGlobal(); }); locker.lock(resId, MODE_X); { // This will block - LockerForTests lockerConflict(opCtx.get(), MODE_IX); + LockerImpl lockerConflict(getServiceContext()); + lockerConflict.lockGlobal(opCtx.get(), MODE_IX); + ON_BLOCK_EXIT([&] { lockerConflict.unlockGlobal(); }); ASSERT_EQUALS(LOCK_WAITING, lockerConflict.lockBeginForTest(opCtx.get(), resId, MODE_S)); // Sleep 1 millisecond so the wait time passes @@ -103,7 +125,9 @@ TEST_F(LockStatsTest, Reporting) { resetGlobalLockStats(); auto opCtx = makeOperationContext(); - LockerForTests locker(opCtx.get(), MODE_IX); + LockerImpl locker(getServiceContext()); + locker.lockGlobal(opCtx.get(), MODE_IX); + ON_BLOCK_EXIT([&] { locker.unlockGlobal(); }); locker.lock(resId, MODE_X); locker.unlock(resId); @@ -123,11 +147,15 @@ TEST_F(LockStatsTest, Subtraction) { resetGlobalLockStats(); auto opCtx = makeOperationContext(); - LockerForTests locker(opCtx.get(), MODE_IX); + LockerImpl locker(getServiceContext()); + locker.lockGlobal(opCtx.get(), MODE_IX); + ON_BLOCK_EXIT([&] { locker.unlockGlobal(); }); locker.lock(resId, MODE_X); { - LockerForTests lockerConflict(opCtx.get(), MODE_IX); + LockerImpl lockerConflict(getServiceContext()); + lockerConflict.lockGlobal(opCtx.get(), MODE_IX); + ON_BLOCK_EXIT([&] { lockerConflict.unlockGlobal(); }); ASSERT_THROWS_CODE( lockerConflict.lock(opCtx.get(), resId, MODE_S, Date_t::now() + Milliseconds(5)), AssertionException, @@ -141,7 +169,9 @@ TEST_F(LockStatsTest, Subtraction) { ASSERT_GREATER_THAN(stats.get(resId, MODE_S).combinedWaitTimeMicros, 0); { - LockerForTests lockerConflict(opCtx.get(), MODE_IX); + LockerImpl lockerConflict(getServiceContext()); + lockerConflict.lockGlobal(opCtx.get(), MODE_IX); + ON_BLOCK_EXIT([&] { lockerConflict.unlockGlobal(); }); ASSERT_THROWS_CODE( lockerConflict.lock(opCtx.get(), resId, MODE_S, Date_t::now() + Milliseconds(5)), AssertionException, @@ -240,4 +270,5 @@ TEST_F(LockStatsTest, ServerStatus) { .getIntField("w")); } +} // namespace } // namespace mongo diff --git a/src/mongo/db/concurrency/locker.cpp b/src/mongo/db/concurrency/locker.cpp new file mode 100644 index 0000000000000..5aa5684745040 --- /dev/null +++ b/src/mongo/db/concurrency/locker.cpp @@ -0,0 +1,36 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/concurrency/locker.h" + +namespace mongo { + +Locker::Locker() = default; + +} // namespace mongo diff --git a/src/mongo/db/concurrency/locker.h b/src/mongo/db/concurrency/locker.h index ae697e63b2388..64ff6cab270b8 100644 --- a/src/mongo/db/concurrency/locker.h +++ b/src/mongo/db/concurrency/locker.h @@ -29,15 +29,28 @@ #pragma once +#include +#include #include // For UINT_MAX +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" #include "mongo/db/concurrency/flow_control_ticketholder.h" #include "mongo/db/concurrency/lock_manager.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/lock_stats.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/stdx/thread.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -46,7 +59,9 @@ namespace mongo { * request (transaction). * * Lock/unlock methods must always be called from a single thread. + * */ +// TODO (SERVER-26879): Get rid of LockerImpl, devirtualise Locker and make it final class Locker { Locker(const Locker&) = delete; Locker& operator=(const Locker&) = delete; @@ -57,19 +72,7 @@ class Locker { public: using LockTimeoutCallback = std::function; - virtual ~Locker() {} - - /** - * Returns true if this is an instance of LockerNoop. Because LockerNoop doesn't implement many - * methods, some users may need to check this first to find out what is safe to call. LockerNoop - * is only used in unittests and for a brief period at startup, so you can assume you hold the - * equivalent of a MODE_X lock when using it. - * - * TODO get rid of this once we kill LockerNoop. - */ - virtual bool isNoop() const { - return false; - } + virtual ~Locker() = default; /** * State for reporting the number of active and queued reader and writer clients. @@ -163,7 +166,7 @@ class Locker { /** * Decrements the reference count on the global lock. If the reference count on the * global lock hits zero, the transaction is over, and unlockGlobal unlocks all other locks - * except for RESOURCE_MUTEX locks. + * except for RESOURCE_MUTEX and RESOURCE_DDL_* locks. * * @return true if this is the last endTransaction call (i.e., the global lock was * released); false if there are still references on the global lock. This value @@ -349,8 +352,7 @@ class Locker { boost::optional lockStatsBase) const = 0; /** - * Returns boost::none if this is an instance of LockerNoop, or a populated LockerInfo - * otherwise. + * Returns diagnostics information for the locker. */ virtual boost::optional getLockerInfo( boost::optional lockStatsBase) const = 0; @@ -381,23 +383,29 @@ class Locker { }; /** - * Retrieves all locks held by this transaction, other than RESOURCE_MUTEX locks, and what mode - * they're held in. - * Stores these locks in 'stateOut', destroying any previous state. Unlocks all locks - * held by this transaction. This functionality is used for yielding, which is - * voluntary/cooperative lock release and reacquisition in order to allow for interleaving - * of otherwise conflicting long-running operations. + * Determines if this operation can safely release its locks for yielding. This must precede a + * call to saveLockStateAndUnlock() at the risk of failing any invariants. + * + * Returns false when no locks are held. + */ + virtual bool canSaveLockState() = 0; + + /** + * Retrieves all locks held by this transaction, other than RESOURCE_MUTEX and RESOURCE_DDL_* + * locks, and what mode they're held in. + * + * Unlocks all locks held by this transaction, and stores them in 'stateOut'. This functionality + * is used for yielding, which is voluntary/cooperative lock release and reacquisition in order + * to allow for interleaving of otherwise conflicting long-running operations. The LockSnapshot + * can then be passed to restoreLockState() after yielding to reacquire all released locks. * * This functionality is also used for releasing locks on databases and collections * when cursors are dormant and waiting for a getMore request. * - * Returns true if locks are released. It is expected that restoreLockerImpl will be called - * in the future. - * - * Returns false if locks are not released. restoreLockState(...) does not need to be - * called in this case. + * Callers are expected to check if locks are yieldable first by calling canSaveLockState(), + * otherwise this function will invariant. */ - virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut) = 0; + virtual void saveLockStateAndUnlock(LockSnapshot* stateOut) = 0; /** * Re-locks all locks whose state was stored in 'stateToRestore'. @@ -410,7 +418,7 @@ class Locker { * WUOW has been released. restoreWriteUnitOfWorkAndLock reacquires the locks and resumes the * two-phase locking behavior of WUOW. */ - virtual bool releaseWriteUnitOfWorkAndUnlock(LockSnapshot* stateOut) = 0; + virtual void releaseWriteUnitOfWorkAndUnlock(LockSnapshot* stateOut) = 0; virtual void restoreWriteUnitOfWorkAndLock(OperationContext* opCtx, const LockSnapshot& stateToRestore) = 0; @@ -564,7 +572,7 @@ class Locker { } protected: - Locker() {} + Locker(); /** * The number of callers that are guarding from lock interruptions. diff --git a/src/mongo/db/concurrency/locker_impl.cpp b/src/mongo/db/concurrency/locker_impl.cpp new file mode 100644 index 0000000000000..5b5b0297c97e5 --- /dev/null +++ b/src/mongo/db/concurrency/locker_impl.cpp @@ -0,0 +1,1274 @@ +/** + * Copyright (C) 2018-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/concurrency/locker_impl.h" + +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/ticketholder_manager.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/new.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/background.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/concurrency/ticketholder.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/testing_proctor.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault + +namespace mongo { + +MONGO_FAIL_POINT_DEFINE(failNonIntentLocksIfWaitNeeded); +MONGO_FAIL_POINT_DEFINE(enableTestOnlyFlagforRSTL); + +namespace { + +// Ignore data races in certain functions when running with TSAN. For performance reasons, +// diagnostic commands are expected to race with concurrent lock acquisitions while gathering +// statistics. +#if __has_feature(thread_sanitizer) +#define MONGO_TSAN_IGNORE __attribute__((no_sanitize("thread"))) +#else +#define MONGO_TSAN_IGNORE +#endif + +/** + * Tracks global (across all clients) lock acquisition statistics, partitioned into multiple + * buckets to minimize concurrent access conflicts. + * + * Each client has a LockerId that monotonically increases across all client instances. The + * LockerId % 8 is used to index into one of 8 LockStats instances. These LockStats objects must be + * atomically accessed, so maintaining 8 that are indexed by LockerId reduces client conflicts and + * improves concurrent write access. A reader, to collect global lock statics for reporting, will + * sum the results of all 8 disjoint 'buckets' of stats. + */ +class PartitionedInstanceWideLockStats { + PartitionedInstanceWideLockStats(const PartitionedInstanceWideLockStats&) = delete; + PartitionedInstanceWideLockStats& operator=(const PartitionedInstanceWideLockStats&) = delete; + +public: + PartitionedInstanceWideLockStats() {} + + void recordAcquisition(LockerId id, ResourceId resId, LockMode mode) { + _get(id).recordAcquisition(resId, mode); + } + + void recordWait(LockerId id, ResourceId resId, LockMode mode) { + _get(id).recordWait(resId, mode); + } + + void recordWaitTime(LockerId id, ResourceId resId, LockMode mode, uint64_t waitMicros) { + _get(id).recordWaitTime(resId, mode, waitMicros); + } + + void report(SingleThreadedLockStats* outStats) const { + for (int i = 0; i < NumPartitions; i++) { + outStats->append(_partitions[i].stats); + } + } + + void reset() { + for (int i = 0; i < NumPartitions; i++) { + _partitions[i].stats.reset(); + } + } + +private: + // This alignment is a best effort approach to ensure that each partition falls on a + // separate page/cache line in order to avoid false sharing. + struct alignas(stdx::hardware_destructive_interference_size) AlignedLockStats { + AtomicLockStats stats; + }; + + enum { NumPartitions = 8 }; + + + AtomicLockStats& _get(LockerId id) { + return _partitions[id % NumPartitions].stats; + } + + + AlignedLockStats _partitions[NumPartitions]; +}; + +// How often (in millis) to check for deadlock if a lock has not been granted for some time +const Milliseconds MaxWaitTime = Milliseconds(500); + +// Dispenses unique LockerId identifiers +AtomicWord idCounter(0); + +// Tracks lock statistics across all Locker instances. Distributes stats across multiple buckets +// indexed by LockerId in order to minimize concurrent access conflicts. +PartitionedInstanceWideLockStats globalStats; + +} // namespace + +LockManager* getGlobalLockManager() { + auto serviceContext = getGlobalServiceContext(); + invariant(serviceContext); + return LockManager::get(serviceContext); +} + +bool LockerImpl::_shouldDelayUnlock(ResourceId resId, LockMode mode) const { + switch (resId.getType()) { + case RESOURCE_MUTEX: + case RESOURCE_DDL_DATABASE: + case RESOURCE_DDL_COLLECTION: + return false; + + case RESOURCE_GLOBAL: + case RESOURCE_TENANT: + case RESOURCE_DATABASE: + case RESOURCE_COLLECTION: + case RESOURCE_METADATA: + break; + + default: + MONGO_UNREACHABLE; + } + + switch (mode) { + case MODE_X: + case MODE_IX: + return true; + + case MODE_IS: + case MODE_S: + return _sharedLocksShouldTwoPhaseLock; + + default: + MONGO_UNREACHABLE; + } +} + +bool LockerImpl::isW() const { + return getLockMode(resourceIdGlobal) == MODE_X; +} + +bool LockerImpl::isR() const { + return getLockMode(resourceIdGlobal) == MODE_S; +} + +bool LockerImpl::isLocked() const { + return getLockMode(resourceIdGlobal) != MODE_NONE; +} + +bool LockerImpl::isWriteLocked() const { + return isLockHeldForMode(resourceIdGlobal, MODE_IX); +} + +bool LockerImpl::isReadLocked() const { + return isLockHeldForMode(resourceIdGlobal, MODE_IS); +} + +bool LockerImpl::isRSTLExclusive() const { + return getLockMode(resourceIdReplicationStateTransitionLock) == MODE_X; +} + +bool LockerImpl::isRSTLLocked() const { + return getLockMode(resourceIdReplicationStateTransitionLock) != MODE_NONE; +} + +void LockerImpl::dump() const { + struct Entry { + ResourceId key; + LockRequest::Status status; + LockMode mode; + unsigned int recursiveCount; + unsigned int unlockPending; + + BSONObj toBSON() const { + BSONObjBuilder b; + b.append("key", key.toString()); + b.append("status", lockRequestStatusName(status)); + b.append("recursiveCount", static_cast(recursiveCount)); + b.append("unlockPending", static_cast(unlockPending)); + b.append("mode", modeName(mode)); + return b.obj(); + } + std::string toString() const { + return tojson(toBSON()); + } + }; + std::vector entries; + { + auto lg = stdx::lock_guard(_lock); + for (auto it = _requests.begin(); !it.finished(); it.next()) + entries.push_back( + {it.key(), it->status, it->mode, it->recursiveCount, it->unlockPending}); + } + LOGV2(20523, + "Locker id {id} status: {requests}", + "Locker status", + "id"_attr = _id, + "requests"_attr = entries); +} + +void LockerImpl::_dumpLockerAndLockManagerRequests() { + // Log the _requests that this locker holds. This will provide identifying information to cross + // reference with the LockManager dump below for extra information. + dump(); + + // Log the LockManager's lock information. Given the locker 'dump()' above, we should be able to + // easily cross reference to find the lock info matching this operation. The LockManager can + // safely access (under internal locks) the LockRequest data that the locker cannot. + BSONObjBuilder builder; + auto lockToClientMap = LockManager::getLockToClientMap(getGlobalServiceContext()); + getGlobalLockManager()->getLockInfoBSON(lockToClientMap, &builder); + auto lockInfo = builder.done(); + LOGV2_ERROR(5736000, "Operation ending while holding locks.", "LockInfo"_attr = lockInfo); +} + + +// +// CondVarLockGrantNotification +// + +CondVarLockGrantNotification::CondVarLockGrantNotification() { + clear(); +} + +void CondVarLockGrantNotification::clear() { + _result = LOCK_INVALID; +} + +LockResult CondVarLockGrantNotification::wait(Milliseconds timeout) { + stdx::unique_lock lock(_mutex); + return _cond.wait_for( + lock, timeout.toSystemDuration(), [this] { return _result != LOCK_INVALID; }) + ? _result + : LOCK_TIMEOUT; +} + +LockResult CondVarLockGrantNotification::wait(OperationContext* opCtx, Milliseconds timeout) { + invariant(opCtx); + stdx::unique_lock lock(_mutex); + if (opCtx->waitForConditionOrInterruptFor( + _cond, lock, timeout, [this] { return _result != LOCK_INVALID; })) { + // Because waitForConditionOrInterruptFor evaluates the predicate before checking for + // interrupt, it is possible that a killed operation can acquire a lock if the request is + // granted quickly. For that reason, it is necessary to check if the operation has been + // killed at least once before accepting the lock grant. + opCtx->checkForInterrupt(); + return _result; + } + return LOCK_TIMEOUT; +} + +void CondVarLockGrantNotification::notify(ResourceId resId, LockResult result) { + stdx::unique_lock lock(_mutex); + invariant(_result == LOCK_INVALID); + _result = result; + + _cond.notify_all(); +} + +// +// Locker +// + +LockerImpl::LockerImpl(ServiceContext* serviceCtx) + : _id(idCounter.addAndFetch(1)), + _wuowNestingLevel(0), + _threadId(stdx::this_thread::get_id()), + _ticketHolderManager(TicketHolderManager::get(serviceCtx)) {} + +stdx::thread::id LockerImpl::getThreadId() const { + return _threadId; +} + +void LockerImpl::updateThreadIdToCurrentThread() { + _threadId = stdx::this_thread::get_id(); +} + +void LockerImpl::unsetThreadId() { + _threadId = stdx::thread::id(); // Reset to represent a non-executing thread. +} + +LockerImpl::~LockerImpl() { + // Cannot delete the Locker while there are still outstanding requests, because the + // LockManager may attempt to access deleted memory. Besides it is probably incorrect + // to delete with unaccounted locks anyways. + invariant(!inAWriteUnitOfWork()); + invariant(_numResourcesToUnlockAtEndUnitOfWork == 0); + invariant(!_ticket || !_ticket->valid()); + + if (!_requests.empty()) { + _dumpLockerAndLockManagerRequests(); + } + invariant(_requests.empty()); + + invariant(_modeForTicket == MODE_NONE); +} + +Locker::ClientState LockerImpl::getClientState() const { + auto state = _clientState.load(); + if (state == kActiveReader && hasLockPending()) + state = kQueuedReader; + if (state == kActiveWriter && hasLockPending()) + state = kQueuedWriter; + + return state; +} + +void LockerImpl::reacquireTicket(OperationContext* opCtx) { + invariant(_modeForTicket != MODE_NONE); + auto clientState = _clientState.load(); + const bool reader = isSharedLockMode(_modeForTicket); + + // Ensure that either we don't have a ticket, or the current ticket mode matches the lock mode. + invariant(clientState == kInactive || (clientState == kActiveReader && reader) || + (clientState == kActiveWriter && !reader)); + + // If we already have a ticket, there's nothing to do. + if (clientState != kInactive) + return; + + if (_acquireTicket(opCtx, _modeForTicket, Date_t::now())) { + return; + } + + do { + for (auto it = _requests.begin(); it; it.next()) { + invariant(it->mode == LockMode::MODE_IS || it->mode == LockMode::MODE_IX); + opCtx->checkForInterrupt(); + + // If we've reached this point then that means we tried to acquire a ticket but were + // unsuccessful, implying that tickets are currently exhausted. Additionally, since + // we're holding an IS or IX lock for this resource, any pending requests for the same + // resource must be S or X and will not be able to be granted. Thus, since such a + // pending lock request may also be holding a ticket, if there are any present we fail + // this ticket reacquisition in order to avoid a deadlock. + uassert(ErrorCodes::LockTimeout, + fmt::format("Unable to acquire ticket with mode '{}' due to detected lock " + "conflict for resource {}", + _modeForTicket, + it.key().toString()), + !getGlobalLockManager()->hasConflictingRequests(it.key(), it.objAddr())); + } + } while (!_acquireTicket(opCtx, _modeForTicket, Date_t::now() + Milliseconds{100})); +} + +bool LockerImpl::_acquireTicket(OperationContext* opCtx, LockMode mode, Date_t deadline) { + // Upon startup, the holder is not guaranteed to be initialized. + auto holder = _ticketHolderManager ? _ticketHolderManager->getTicketHolder(mode) : nullptr; + const bool reader = isSharedLockMode(mode); + + if (!shouldWaitForTicket() && holder) { + holder->reportImmediatePriorityAdmission(); + } else if (mode != MODE_X && mode != MODE_NONE && holder) { + // MODE_X is exclusive of all other locks, thus acquiring a ticket is unnecessary. + _clientState.store(reader ? kQueuedReader : kQueuedWriter); + // If the ticket wait is interrupted, restore the state of the client. + ScopeGuard restoreStateOnErrorGuard([&] { _clientState.store(kInactive); }); + + // Acquiring a ticket is a potentially blocking operation. This must not be called after a + // transaction timestamp has been set, indicating this transaction has created an oplog + // hole. + invariant(!opCtx->recoveryUnit()->isTimestamped()); + + if (auto ticket = holder->waitForTicketUntil( + _uninterruptibleLocksRequested ? nullptr : opCtx, &_admCtx, deadline)) { + _ticket = std::move(*ticket); + } else { + return false; + } + restoreStateOnErrorGuard.dismiss(); + } + + _clientState.store(reader ? kActiveReader : kActiveWriter); + return true; +} + +void LockerImpl::lockGlobal(OperationContext* opCtx, LockMode mode, Date_t deadline) { + dassert(isLocked() == (_modeForTicket != MODE_NONE)); + if (_modeForTicket == MODE_NONE) { + if (_uninterruptibleLocksRequested) { + // Ignore deadline. + invariant(_acquireTicket(opCtx, mode, Date_t::max())); + } else { + auto beforeAcquire = Date_t::now(); + uassert(ErrorCodes::LockTimeout, + str::stream() << "Unable to acquire ticket with mode '" << mode + << "' within a max lock request timeout of '" + << Date_t::now() - beforeAcquire << "' milliseconds.", + _acquireTicket(opCtx, mode, deadline)); + } + _modeForTicket = mode; + } else if (TestingProctor::instance().isEnabled() && !isModeCovered(mode, _modeForTicket)) { + LOGV2_FATAL( + 6614500, + "Ticket held does not cover requested mode for global lock. Global lock upgrades are " + "not allowed", + "held"_attr = modeName(_modeForTicket), + "requested"_attr = modeName(mode)); + } + + const LockResult result = _lockBegin(opCtx, resourceIdGlobal, mode); + // Fast, uncontended path + if (result == LOCK_OK) + return; + + invariant(result == LOCK_WAITING); + _lockComplete(opCtx, resourceIdGlobal, mode, deadline, nullptr); +} + +bool LockerImpl::unlockGlobal() { + if (!unlock(resourceIdGlobal)) { + return false; + } + + invariant(!inAWriteUnitOfWork()); + + LockRequestsMap::Iterator it = _requests.begin(); + while (!it.finished()) { + // If we're here we should only have one reference to any lock. It is a programming + // error for any lock used with multi-granularity locking to have more references than + // the global lock, because every scope starts by calling lockGlobal. + const auto resType = it.key().getType(); + if (resType == RESOURCE_GLOBAL || resType == RESOURCE_MUTEX || + resType == RESOURCE_DDL_DATABASE || resType == RESOURCE_DDL_COLLECTION) { + it.next(); + } else { + invariant(_unlockImpl(&it)); + } + } + + return true; +} + +void LockerImpl::beginWriteUnitOfWork() { + _wuowNestingLevel++; +} + +void LockerImpl::endWriteUnitOfWork() { + invariant(_wuowNestingLevel > 0); + + if (--_wuowNestingLevel > 0) { + // Don't do anything unless leaving outermost WUOW. + return; + } + + LockRequestsMap::Iterator it = _requests.begin(); + while (_numResourcesToUnlockAtEndUnitOfWork > 0) { + if (it->unlockPending) { + invariant(!it.finished()); + _numResourcesToUnlockAtEndUnitOfWork--; + } + while (it->unlockPending > 0) { + // If a lock is converted, unlock() may be called multiple times on a resource within + // the same WriteUnitOfWork. All such unlock() requests must thus be fulfilled here. + it->unlockPending--; + unlock(it.key()); + } + it.next(); + } +} + +void LockerImpl::releaseWriteUnitOfWork(WUOWLockSnapshot* stateOut) { + stateOut->wuowNestingLevel = _wuowNestingLevel; + _wuowNestingLevel = 0; + + for (auto it = _requests.begin(); _numResourcesToUnlockAtEndUnitOfWork > 0; it.next()) { + if (it->unlockPending) { + while (it->unlockPending) { + it->unlockPending--; + stateOut->unlockPendingLocks.push_back({it.key(), it->mode}); + } + _numResourcesToUnlockAtEndUnitOfWork--; + } + } +} + +void LockerImpl::restoreWriteUnitOfWork(const WUOWLockSnapshot& stateToRestore) { + invariant(_numResourcesToUnlockAtEndUnitOfWork == 0); + invariant(!inAWriteUnitOfWork()); + + for (auto& lock : stateToRestore.unlockPendingLocks) { + auto it = _requests.begin(); + while (it && !(it.key() == lock.resourceId && it->mode == lock.mode)) { + it.next(); + } + invariant(!it.finished()); + if (!it->unlockPending) { + _numResourcesToUnlockAtEndUnitOfWork++; + } + it->unlockPending++; + } + // Equivalent to call beginWriteUnitOfWork() multiple times. + _wuowNestingLevel = stateToRestore.wuowNestingLevel; +} + +void LockerImpl::releaseWriteUnitOfWorkAndUnlock(LockSnapshot* stateOut) { + // Only the global WUOW can be released, since we never need to release and restore + // nested WUOW's. Thus we don't have to remember the nesting level. + invariant(_wuowNestingLevel == 1); + --_wuowNestingLevel; + invariant(!isGlobalLockedRecursively()); + + // All locks should be pending to unlock. + invariant(_requests.size() == _numResourcesToUnlockAtEndUnitOfWork); + for (auto it = _requests.begin(); it; it.next()) { + // No converted lock so we don't need to unlock more than once. + invariant(it->unlockPending == 1); + it->unlockPending--; + } + _numResourcesToUnlockAtEndUnitOfWork = 0; + + saveLockStateAndUnlock(stateOut); +} + +void LockerImpl::restoreWriteUnitOfWorkAndLock(OperationContext* opCtx, + const LockSnapshot& stateToRestore) { + if (stateToRestore.globalMode != MODE_NONE) { + restoreLockState(opCtx, stateToRestore); + } + + invariant(_numResourcesToUnlockAtEndUnitOfWork == 0); + for (auto it = _requests.begin(); it; it.next()) { + invariant(_shouldDelayUnlock(it.key(), (it->mode))); + invariant(it->unlockPending == 0); + it->unlockPending++; + } + _numResourcesToUnlockAtEndUnitOfWork = static_cast(_requests.size()); + + beginWriteUnitOfWork(); +} + +void LockerImpl::lock(OperationContext* opCtx, ResourceId resId, LockMode mode, Date_t deadline) { + // `lockGlobal` must be called to lock `resourceIdGlobal`. + invariant(resId != resourceIdGlobal); + + const LockResult result = _lockBegin(opCtx, resId, mode); + + // Fast, uncontended path + if (result == LOCK_OK) + return; + + invariant(result == LOCK_WAITING); + _lockComplete(opCtx, resId, mode, deadline, nullptr); +} + +void LockerImpl::downgrade(ResourceId resId, LockMode newMode) { + LockRequestsMap::Iterator it = _requests.find(resId); + getGlobalLockManager()->downgrade(it.objAddr(), newMode); +} + +bool LockerImpl::unlock(ResourceId resId) { + LockRequestsMap::Iterator it = _requests.find(resId); + + // Don't attempt to unlock twice. This can happen when an interrupted global lock is destructed. + if (it.finished()) + return false; + + if (inAWriteUnitOfWork() && _shouldDelayUnlock(it.key(), (it->mode))) { + // Only delay unlocking if the lock is not acquired more than once. Otherwise, we can simply + // call _unlockImpl to decrement recursiveCount instead of incrementing unlockPending. This + // is safe because the lock is still being held in the strongest mode necessary. + if (it->recursiveCount > 1) { + // Invariant that the lock is still being held. + invariant(!_unlockImpl(&it)); + return false; + } + if (!it->unlockPending) { + _numResourcesToUnlockAtEndUnitOfWork++; + } + it->unlockPending++; + // unlockPending will be incremented if a lock is converted or acquired in the same mode + // recursively, and unlock() is called multiple times on one ResourceId. + invariant(it->unlockPending <= it->recursiveCount); + return false; + } + + return _unlockImpl(&it); +} + +bool LockerImpl::unlockRSTLforPrepare() { + auto rstlRequest = _requests.find(resourceIdReplicationStateTransitionLock); + + // Don't attempt to unlock twice. This can happen when an interrupted global lock is destructed. + if (!rstlRequest) + return false; + + // If the RSTL is 'unlockPending' and we are fully unlocking it, then we do not want to + // attempt to unlock the RSTL when the WUOW ends, since it will already be unlocked. + if (rstlRequest->unlockPending) { + rstlRequest->unlockPending = 0; + _numResourcesToUnlockAtEndUnitOfWork--; + } + + // Reset the recursiveCount to 1 so that we fully unlock the RSTL. Since it will be fully + // unlocked, any future unlocks will be noops anyways. + rstlRequest->recursiveCount = 1; + + return _unlockImpl(&rstlRequest); +} + +LockMode LockerImpl::getLockMode(ResourceId resId) const { + scoped_spinlock scopedLock(_lock); + + const LockRequestsMap::ConstIterator it = _requests.find(resId); + if (!it) + return MODE_NONE; + + return it->mode; +} + +bool LockerImpl::isLockHeldForMode(ResourceId resId, LockMode mode) const { + return isModeCovered(mode, getLockMode(resId)); +} + +boost::optional LockerImpl::_globalAndTenantLocksImplyDBOrCollectionLockedForMode( + const boost::optional& tenantId, LockMode lockMode) const { + if (isW()) { + return true; + } + if (isR() && isSharedLockMode(lockMode)) { + return true; + } + if (tenantId) { + const ResourceId tenantResourceId{ResourceType::RESOURCE_TENANT, *tenantId}; + switch (getLockMode(tenantResourceId)) { + case MODE_NONE: + return false; + case MODE_X: + return true; + case MODE_S: + return isSharedLockMode(lockMode); + case MODE_IX: + case MODE_IS: + break; + default: + MONGO_UNREACHABLE_TASSERT(6671502); + } + } + return boost::none; +} + +bool LockerImpl::isDbLockedForMode(const DatabaseName& dbName, LockMode mode) const { + if (auto lockedForMode = + _globalAndTenantLocksImplyDBOrCollectionLockedForMode(dbName.tenantId(), mode); + lockedForMode) { + return *lockedForMode; + } + + const ResourceId resIdDb(RESOURCE_DATABASE, dbName); + return isLockHeldForMode(resIdDb, mode); +} + +bool LockerImpl::isCollectionLockedForMode(const NamespaceString& nss, LockMode mode) const { + invariant(nss.coll().size()); + + if (!shouldConflictWithSecondaryBatchApplication()) + return true; + + if (auto lockedForMode = + _globalAndTenantLocksImplyDBOrCollectionLockedForMode(nss.tenantId(), mode); + lockedForMode) { + return *lockedForMode; + } + + const ResourceId resIdDb(RESOURCE_DATABASE, nss.dbName()); + LockMode dbMode = getLockMode(resIdDb); + + switch (dbMode) { + case MODE_NONE: + return false; + case MODE_X: + return true; + case MODE_S: + return isSharedLockMode(mode); + case MODE_IX: + case MODE_IS: { + const ResourceId resIdColl(RESOURCE_COLLECTION, nss); + return isLockHeldForMode(resIdColl, mode); + } break; + case LockModesCount: + break; + } + + MONGO_UNREACHABLE; + return false; +} + +bool LockerImpl::wasGlobalLockTakenForWrite() const { + return _globalLockMode & ((1 << MODE_IX) | (1 << MODE_X)); +} + +bool LockerImpl::wasGlobalLockTakenInModeConflictingWithWrites() const { + return _wasGlobalLockTakenInModeConflictingWithWrites.load(); +} + +bool LockerImpl::wasGlobalLockTaken() const { + return _globalLockMode != (1 << MODE_NONE); +} + +void LockerImpl::setGlobalLockTakenInMode(LockMode mode) { + _globalLockMode |= (1 << mode); + + if (mode == MODE_IX || mode == MODE_X || mode == MODE_S) { + _wasGlobalLockTakenInModeConflictingWithWrites.store(true); + } +} + +ResourceId LockerImpl::getWaitingResource() const { + scoped_spinlock scopedLock(_lock); + + return _waitingResource; +} + +MONGO_TSAN_IGNORE +void LockerImpl::getLockerInfo(LockerInfo* lockerInfo, + const boost::optional lockStatsBase) const { + invariant(lockerInfo); + + // Zero-out the contents + lockerInfo->locks.clear(); + lockerInfo->waitingResource = ResourceId(); + lockerInfo->stats.reset(); + + _lock.lock(); + LockRequestsMap::ConstIterator it = _requests.begin(); + while (!it.finished()) { + OneLock info; + info.resourceId = it.key(); + info.mode = it->mode; + + lockerInfo->locks.push_back(info); + it.next(); + } + _lock.unlock(); + + std::sort(lockerInfo->locks.begin(), lockerInfo->locks.end()); + + lockerInfo->waitingResource = getWaitingResource(); + lockerInfo->stats.append(_stats); + + // lockStatsBase is a snapshot of lock stats taken when the sub-operation starts. Only + // sub-operations have lockStatsBase. + if (lockStatsBase) + // Adjust the lock stats by subtracting the lockStatsBase. No mutex is needed because + // lockStatsBase is immutable. + lockerInfo->stats.subtract(*lockStatsBase); +} + +boost::optional LockerImpl::getLockerInfo( + const boost::optional lockStatsBase) const { + Locker::LockerInfo lockerInfo; + getLockerInfo(&lockerInfo, lockStatsBase); + return std::move(lockerInfo); +} + +void LockerImpl::saveLockStateAndUnlock(Locker::LockSnapshot* stateOut) { + // We shouldn't be saving and restoring lock state from inside a WriteUnitOfWork. + invariant(!inAWriteUnitOfWork()); + + // Callers must guarantee that they can actually yield. + if (MONGO_unlikely(!canSaveLockState())) { + dump(); + LOGV2_FATAL(7033800, + "Attempted to yield locks but we are either not holding locks, holding a " + "strong MODE_S/MODE_X lock, or holding one recursively"); + } + + // Clear out whatever is in stateOut. + stateOut->locks.clear(); + stateOut->globalMode = MODE_NONE; + + // First, we look at the global lock. There is special handling for this so we store it + // separately from the more pedestrian locks. + auto globalRequest = _requests.find(resourceIdGlobal); + invariant(globalRequest); + + stateOut->globalMode = globalRequest->mode; + invariant(unlock(resourceIdGlobal)); + + // Next, the non-global locks. + for (LockRequestsMap::Iterator it = _requests.begin(); !it.finished(); it.next()) { + const ResourceId resId = it.key(); + const ResourceType resType = resId.getType(); + if (resType == RESOURCE_MUTEX || resType == RESOURCE_DDL_DATABASE || + resType == RESOURCE_DDL_COLLECTION) + continue; + + // We should never have to save and restore metadata locks. + invariant(RESOURCE_DATABASE == resType || RESOURCE_COLLECTION == resType || + resId == resourceIdParallelBatchWriterMode || RESOURCE_TENANT == resType || + resId == resourceIdFeatureCompatibilityVersion || + resId == resourceIdReplicationStateTransitionLock); + + // And, stuff the info into the out parameter. + OneLock info; + info.resourceId = resId; + info.mode = it->mode; + stateOut->locks.push_back(info); + invariant(unlock(resId)); + } + invariant(!isLocked()); + + // Sort locks by ResourceId. They'll later be acquired in this canonical locking order. + std::sort(stateOut->locks.begin(), stateOut->locks.end()); +} + +void LockerImpl::restoreLockState(OperationContext* opCtx, const Locker::LockSnapshot& state) { + // We shouldn't be restoring lock state from inside a WriteUnitOfWork. + invariant(!inAWriteUnitOfWork()); + invariant(_modeForTicket == MODE_NONE); + invariant(_clientState.load() == kInactive); + + getFlowControlTicket(opCtx, state.globalMode); + + std::vector::const_iterator it = state.locks.begin(); + // If we locked the PBWM, it must be locked before the + // resourceIdFeatureCompatibilityVersion, resourceIdReplicationStateTransitionLock, and + // resourceIdGlobal resources. + if (it != state.locks.end() && it->resourceId == resourceIdParallelBatchWriterMode) { + lock(opCtx, it->resourceId, it->mode); + it++; + } + + // If we locked the FCV lock, it must be locked before the + // resourceIdReplicationStateTransitionLock and resourceIdGlobal resources. + if (it != state.locks.end() && it->resourceId == resourceIdFeatureCompatibilityVersion) { + lock(opCtx, it->resourceId, it->mode); + it++; + } + + // If we locked the RSTL, it must be locked before the resourceIdGlobal resource. + if (it != state.locks.end() && it->resourceId == resourceIdReplicationStateTransitionLock) { + lock(opCtx, it->resourceId, it->mode); + it++; + } + + lockGlobal(opCtx, state.globalMode); + for (; it != state.locks.end(); it++) { + // Ensures we don't acquire locks out of order which can lead to deadlock. + invariant(it->resourceId.getType() != ResourceType::RESOURCE_GLOBAL); + lock(opCtx, it->resourceId, it->mode); + } + invariant(_modeForTicket != MODE_NONE); +} + +MONGO_TSAN_IGNORE +FlowControlTicketholder::CurOp LockerImpl::getFlowControlStats() const { + return _flowControlStats; +} + +MONGO_TSAN_IGNORE +LockResult LockerImpl::_lockBegin(OperationContext* opCtx, ResourceId resId, LockMode mode) { + dassert(!getWaitingResource().isValid()); + + // Operations which are holding open an oplog hole cannot block when acquiring locks. + const ResourceType resType = resId.getType(); + if (opCtx && !shouldAllowLockAcquisitionOnTimestampedUnitOfWork() && + resType != RESOURCE_METADATA && resType != RESOURCE_MUTEX && + resType != RESOURCE_DDL_DATABASE && resType != RESOURCE_DDL_COLLECTION) { + invariant(!opCtx->recoveryUnit()->isTimestamped(), + str::stream() + << "Operation holding open an oplog hole tried to acquire locks. ResourceId: " + << resId << ", mode: " << modeName(mode)); + } + + LockRequest* request; + bool isNew = true; + + LockRequestsMap::Iterator it = _requests.find(resId); + if (!it) { + scoped_spinlock scopedLock(_lock); + LockRequestsMap::Iterator itNew = _requests.insert(resId); + itNew->initNew(this, &_notify); + + request = itNew.objAddr(); + } else { + request = it.objAddr(); + isNew = false; + } + + // If unlockPending is nonzero, that means a LockRequest already exists for this resource but + // is planned to be released at the end of this WUOW due to two-phase locking. Rather than + // unlocking the existing request, we can reuse it if the existing mode matches the new mode. + if (request->unlockPending && isModeCovered(mode, request->mode)) { + request->unlockPending--; + if (!request->unlockPending) { + _numResourcesToUnlockAtEndUnitOfWork--; + } + return LOCK_OK; + } + + // Making this call here will record lock re-acquisitions and conversions as well. + globalStats.recordAcquisition(_id, resId, mode); + _stats.recordAcquisition(resId, mode); + + // Give priority to the full modes for Global, PBWM, and RSTL resources so we don't stall global + // operations such as shutdown or stepdown. + if (resType == RESOURCE_GLOBAL) { + if (mode == MODE_S || mode == MODE_X) { + request->enqueueAtFront = true; + request->compatibleFirst = true; + } + } else if (resType != RESOURCE_MUTEX && resType != RESOURCE_DDL_DATABASE && + resType != RESOURCE_DDL_COLLECTION) { + // This is all sanity checks that the global locks are always be acquired + // before any other lock has been acquired and they must be in sync with the nesting. + if (kDebugBuild) { + const LockRequestsMap::Iterator itGlobal = _requests.find(resourceIdGlobal); + invariant(itGlobal->recursiveCount > 0); + invariant(itGlobal->mode != MODE_NONE); + }; + } + + // The notification object must be cleared before we invoke the lock manager, because + // otherwise we might reset state if the lock becomes granted very fast. + _notify.clear(); + + LockResult result = isNew ? getGlobalLockManager()->lock(resId, request, mode) + : getGlobalLockManager()->convert(resId, request, mode); + + if (result == LOCK_WAITING) { + globalStats.recordWait(_id, resId, mode); + _stats.recordWait(resId, mode); + _setWaitingResource(resId); + } else if (result == LOCK_OK && opCtx && _uninterruptibleLocksRequested == 0) { + // Lock acquisitions are not allowed to succeed when opCtx is marked as interrupted, unless + // the caller requested an uninterruptible lock. + auto interruptStatus = opCtx->checkForInterruptNoAssert(); + if (!interruptStatus.isOK()) { + auto unlockIt = _requests.find(resId); + invariant(unlockIt); + _unlockImpl(&unlockIt); + uassertStatusOK(interruptStatus); + } + } + + return result; +} + +void LockerImpl::_lockComplete(OperationContext* opCtx, + ResourceId resId, + LockMode mode, + Date_t deadline, + const LockTimeoutCallback& onTimeout) { + // Operations which are holding open an oplog hole cannot block when acquiring locks. Lock + // requests entering this function have been queued up and will be granted the lock as soon as + // the lock is released, which is a blocking operation. + const ResourceType resType = resId.getType(); + if (opCtx && !shouldAllowLockAcquisitionOnTimestampedUnitOfWork() && + resType != RESOURCE_METADATA && resType != RESOURCE_MUTEX && + resType != RESOURCE_DDL_DATABASE && resType != RESOURCE_DDL_COLLECTION) { + invariant(!opCtx->recoveryUnit()->isTimestamped(), + str::stream() + << "Operation holding open an oplog hole tried to acquire locks. ResourceId: " + << resId << ", mode: " << modeName(mode)); + } + + // Clean up the state on any failed lock attempts. + ScopeGuard unlockOnErrorGuard([&] { + LockRequestsMap::Iterator it = _requests.find(resId); + invariant(it); + _unlockImpl(&it); + _setWaitingResource(ResourceId()); + }); + + // This failpoint is used to time out non-intent locks if they cannot be granted immediately + // for user operations. Testing-only. + const bool isUserOperation = opCtx && opCtx->getClient()->isFromUserConnection(); + if (!_uninterruptibleLocksRequested && isUserOperation && + MONGO_unlikely(failNonIntentLocksIfWaitNeeded.shouldFail())) { + uassert(ErrorCodes::LockTimeout, + str::stream() << "Cannot immediately acquire lock '" << resId.toString() + << "'. Timing out due to failpoint.", + (mode == MODE_IS || mode == MODE_IX)); + } + + LockResult result; + Milliseconds timeout; + if (deadline == Date_t::max()) { + timeout = Milliseconds::max(); + } else if (deadline <= Date_t()) { + timeout = Milliseconds(0); + } else { + timeout = deadline - Date_t::now(); + } + timeout = std::min(timeout, _maxLockTimeout ? *_maxLockTimeout : Milliseconds::max()); + if (_uninterruptibleLocksRequested) { + timeout = Milliseconds::max(); + } + + // Don't go sleeping without bound in order to be able to report long waits. + Milliseconds waitTime = std::min(timeout, MaxWaitTime); + const uint64_t startOfTotalWaitTime = curTimeMicros64(); + uint64_t startOfCurrentWaitTime = startOfTotalWaitTime; + + while (true) { + // It is OK if this call wakes up spuriously, because we re-evaluate the remaining + // wait time anyways. + // If we have an operation context, we want to use its interruptible wait so that + // pending lock acquisitions can be cancelled, so long as no callers have requested an + // uninterruptible lock. + if (opCtx && _uninterruptibleLocksRequested == 0) { + result = _notify.wait(opCtx, waitTime); + } else { + result = _notify.wait(waitTime); + } + + // Account for the time spent waiting on the notification object + const uint64_t curTimeMicros = curTimeMicros64(); + const uint64_t elapsedTimeMicros = curTimeMicros - startOfCurrentWaitTime; + startOfCurrentWaitTime = curTimeMicros; + + globalStats.recordWaitTime(_id, resId, mode, elapsedTimeMicros); + _stats.recordWaitTime(resId, mode, elapsedTimeMicros); + + if (result == LOCK_OK) + break; + + // If infinite timeout was requested, just keep waiting + if (timeout == Milliseconds::max()) { + continue; + } + + const auto totalBlockTime = duration_cast( + Microseconds(int64_t(curTimeMicros - startOfTotalWaitTime))); + waitTime = (totalBlockTime < timeout) ? std::min(timeout - totalBlockTime, MaxWaitTime) + : Milliseconds(0); + + // Check if the lock acquisition has timed out. If we have an operation context and client + // we can provide additional diagnostics data. + if (waitTime == Milliseconds(0)) { + if (onTimeout) { + onTimeout(); + } + std::string timeoutMessage = str::stream() + << "Unable to acquire " << modeName(mode) << " lock on '" << resId.toString() + << "' within " << timeout << "."; + if (opCtx && opCtx->getClient()) { + timeoutMessage = str::stream() + << timeoutMessage << " opId: " << opCtx->getOpID() + << ", op: " << opCtx->getClient()->desc() + << ", connId: " << opCtx->getClient()->getConnectionId() << "."; + } + uasserted(ErrorCodes::LockTimeout, timeoutMessage); + } + } + + invariant(result == LOCK_OK); + unlockOnErrorGuard.dismiss(); + _setWaitingResource(ResourceId()); +} + +void LockerImpl::getFlowControlTicket(OperationContext* opCtx, LockMode lockMode) { + auto ticketholder = FlowControlTicketholder::get(opCtx); + if (ticketholder && lockMode == LockMode::MODE_IX && _clientState.load() == kInactive && + _admCtx.getPriority() != AdmissionContext::Priority::kImmediate && + !_uninterruptibleLocksRequested) { + // FlowControl only acts when a MODE_IX global lock is being taken. The clientState is only + // being modified here to change serverStatus' `globalLock.currentQueue` metrics. This + // method must not exit with a side-effect on the clientState. That value is also used for + // tracking whether other resources need to be released. + _clientState.store(kQueuedWriter); + ScopeGuard restoreState([&] { _clientState.store(kInactive); }); + // Acquiring a ticket is a potentially blocking operation. This must not be called after a + // transaction timestamp has been set, indicating this transaction has created an oplog + // hole. + invariant(!opCtx->recoveryUnit()->isTimestamped()); + ticketholder->getTicket(opCtx, &_flowControlStats); + } +} + +LockResult LockerImpl::lockRSTLBegin(OperationContext* opCtx, LockMode mode) { + bool testOnly = false; + + if (MONGO_unlikely(enableTestOnlyFlagforRSTL.shouldFail())) { + testOnly = true; + } + + invariant(testOnly || mode == MODE_IX || mode == MODE_X); + return _lockBegin(opCtx, resourceIdReplicationStateTransitionLock, mode); +} + +void LockerImpl::lockRSTLComplete(OperationContext* opCtx, + LockMode mode, + Date_t deadline, + const LockTimeoutCallback& onTimeout) { + _lockComplete(opCtx, resourceIdReplicationStateTransitionLock, mode, deadline, onTimeout); +} + +void LockerImpl::releaseTicket() { + invariant(_modeForTicket != MODE_NONE); + _releaseTicket(); +} + +void LockerImpl::_releaseTicket() { + _ticket.reset(); + _clientState.store(kInactive); +} + +bool LockerImpl::_unlockImpl(LockRequestsMap::Iterator* it) { + if (getGlobalLockManager()->unlock(it->objAddr())) { + if (it->key() == resourceIdGlobal) { + invariant(_modeForTicket != MODE_NONE); + + // We may have already released our ticket through a call to releaseTicket(). + if (_clientState.load() != kInactive) { + _releaseTicket(); + } + + _modeForTicket = MODE_NONE; + } + + scoped_spinlock scopedLock(_lock); + it->remove(); + + return true; + } + + return false; +} + +bool LockerImpl::isGlobalLockedRecursively() { + auto globalLockRequest = _requests.find(resourceIdGlobal); + return !globalLockRequest.finished() && globalLockRequest->recursiveCount > 1; +} + +bool LockerImpl::canSaveLockState() { + // We cannot yield strong global locks. + if (_modeForTicket == MODE_S || _modeForTicket == MODE_X) { + return false; + } + + // If we don't have a global lock, we do not yield. + if (_modeForTicket == MODE_NONE) { + auto globalRequest = _requests.find(resourceIdGlobal); + invariant(!globalRequest); + + // If there's no global lock there isn't really anything to do. Check that. + for (auto it = _requests.begin(); !it.finished(); it.next()) { + const ResourceType resType = it.key().getType(); + invariant(resType == RESOURCE_MUTEX || resType == RESOURCE_DDL_DATABASE || + resType == RESOURCE_DDL_COLLECTION); + } + return false; + } + + for (auto it = _requests.begin(); !it.finished(); it.next()) { + const ResourceId resId = it.key(); + const ResourceType resType = resId.getType(); + if (resType == RESOURCE_MUTEX || resType == RESOURCE_DDL_DATABASE || + resType == RESOURCE_DDL_COLLECTION) + continue; + + // If any lock has been acquired more than once, we're probably somewhere in a + // DBDirectClient call. It's not safe to release and reacquire locks -- the context using + // the DBDirectClient is probably not prepared for lock release. This logic applies to all + // locks in the hierarchy. + if (it->recursiveCount > 1) { + return false; + } + + // We cannot yield any other lock in a strong lock mode. + if (it->mode == MODE_S || it->mode == MODE_X) { + return false; + } + } + + return true; +} + +void LockerImpl::_setWaitingResource(ResourceId resId) { + scoped_spinlock scopedLock(_lock); + + _waitingResource = resId; +} + +// +// Auto classes +// + +namespace { +/** + * Periodically purges unused lock buckets. The first time the lock is used again after + * cleanup it needs to be allocated, and similarly, every first use by a client for an intent + * mode may need to create a partitioned lock head. Cleanup is done roughly once a minute. + */ +class UnusedLockCleaner : PeriodicTask { +public: + std::string taskName() const { + return "UnusedLockCleaner"; + } + + void taskDoWork() { + LOGV2_DEBUG(20524, 2, "cleaning up unused lock buckets of the global lock manager"); + getGlobalLockManager()->cleanupUnusedLocks(); + } +} unusedLockCleaner; +} // namespace + +// +// Standalone functions +// + +void reportGlobalLockingStats(SingleThreadedLockStats* outStats) { + globalStats.report(outStats); +} + +void resetGlobalLockStats() { + globalStats.reset(); +} + +} // namespace mongo diff --git a/src/mongo/db/concurrency/locker_impl.h b/src/mongo/db/concurrency/locker_impl.h new file mode 100644 index 0000000000000..9798c2b7e4da5 --- /dev/null +++ b/src/mongo/db/concurrency/locker_impl.h @@ -0,0 +1,455 @@ +/** + * Copyright (C) 2018-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include "mongo/db/concurrency/fast_map_noalloc.h" +#include "mongo/db/concurrency/flow_control_ticketholder.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/lock_stats.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/ticketholder_manager.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" +#include "mongo/util/concurrency/spin_lock.h" +#include "mongo/util/concurrency/ticketholder.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" + +namespace mongo { + +/** + * Notfication callback, which stores the last notification result and signals a condition + * variable, which can be waited on. + */ +class CondVarLockGrantNotification : public LockGrantNotification { + CondVarLockGrantNotification(const CondVarLockGrantNotification&) = delete; + CondVarLockGrantNotification& operator=(const CondVarLockGrantNotification&) = delete; + +public: + CondVarLockGrantNotification(); + + /** + * Clears the object so it can be reused. + */ + void clear(); + + /** + * Uninterruptible blocking method, which waits for the notification to fire. + * + * @param timeout How many milliseconds to wait before returning LOCK_TIMEOUT. + */ + LockResult wait(Milliseconds timeout); + + /** + * Interruptible blocking method, which waits for the notification to fire or an interrupt from + * the operation context. + * + * @param opCtx OperationContext to wait on for an interrupt. + * @param timeout How many milliseconds to wait before returning LOCK_TIMEOUT. + */ + LockResult wait(OperationContext* opCtx, Milliseconds timeout); + +private: + virtual void notify(ResourceId resId, LockResult result); + + // These two go together to implement the conditional variable pattern. + Mutex _mutex = MONGO_MAKE_LATCH("CondVarLockGrantNotification::_mutex"); + stdx::condition_variable _cond; + + // Result from the last call to notify + LockResult _result; +}; + +/** + * Interface for acquiring locks. One of those objects will have to be instantiated for each + * request (transaction). + * + * Lock/unlock methods must always be called from a single thread. + * + * All instances reference a single global lock manager. + * + */ +// TODO (SERVER-26879): Get rid of LockerImpl, devirtualise Locker and make it final +class LockerImpl final : public Locker { +public: + /** + * Instantiates new locker. Must be given a unique identifier for disambiguation. Lockers + * having the same identifier will not conflict on lock acquisition. + */ + LockerImpl(ServiceContext* serviceContext); + + virtual ~LockerImpl(); + + virtual ClientState getClientState() const; + + virtual LockerId getId() const { + return _id; + } + + stdx::thread::id getThreadId() const override; + + void updateThreadIdToCurrentThread() override; + void unsetThreadId() override; + + void setSharedLocksShouldTwoPhaseLock(bool sharedLocksShouldTwoPhaseLock) override { + _sharedLocksShouldTwoPhaseLock = sharedLocksShouldTwoPhaseLock; + } + + void setMaxLockTimeout(Milliseconds maxTimeout) override { + _maxLockTimeout = maxTimeout; + } + + bool hasMaxLockTimeout() override { + return static_cast(_maxLockTimeout); + } + + void unsetMaxLockTimeout() override { + _maxLockTimeout = boost::none; + } + + /** + * Acquires the ticket within the deadline (or _maxLockTimeout) and tries to grab the lock. + */ + virtual void lockGlobal(OperationContext* opCtx, + LockMode mode, + Date_t deadline = Date_t::max()); + + virtual bool unlockGlobal(); + + virtual LockResult lockRSTLBegin(OperationContext* opCtx, LockMode mode); + virtual void lockRSTLComplete(OperationContext* opCtx, + LockMode mode, + Date_t deadline, + const LockTimeoutCallback& onTimeout); + + virtual bool unlockRSTLforPrepare(); + + virtual void beginWriteUnitOfWork() override; + virtual void endWriteUnitOfWork() override; + + virtual bool inAWriteUnitOfWork() const { + return _wuowNestingLevel > 0; + } + + bool wasGlobalLockTakenForWrite() const override; + + bool wasGlobalLockTakenInModeConflictingWithWrites() const override; + + bool wasGlobalLockTaken() const override; + + void setGlobalLockTakenInMode(LockMode mode) override; + + /** + * Requests a lock for resource 'resId' with mode 'mode'. An OperationContext 'opCtx' must be + * provided to interrupt waiting on the locker condition variable that indicates status of + * the lock acquisition. A lock operation would otherwise wait until a timeout or the lock is + * granted. + */ + virtual void lock(OperationContext* opCtx, + ResourceId resId, + LockMode mode, + Date_t deadline = Date_t::max()); + + virtual void lock(ResourceId resId, LockMode mode, Date_t deadline = Date_t::max()) { + lock(nullptr, resId, mode, deadline); + } + + virtual void downgrade(ResourceId resId, LockMode newMode); + + virtual bool unlock(ResourceId resId); + + virtual LockMode getLockMode(ResourceId resId) const; + virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const; + virtual bool isDbLockedForMode(const DatabaseName& dbName, LockMode mode) const; + virtual bool isCollectionLockedForMode(const NamespaceString& nss, LockMode mode) const; + + virtual ResourceId getWaitingResource() const; + + virtual void getLockerInfo(LockerInfo* lockerInfo, + boost::optional lockStatsBase) const; + virtual boost::optional getLockerInfo( + boost::optional lockStatsBase) const final; + + virtual void saveLockStateAndUnlock(LockSnapshot* stateOut); + + virtual void restoreLockState(OperationContext* opCtx, const LockSnapshot& stateToRestore); + + void releaseWriteUnitOfWorkAndUnlock(LockSnapshot* stateOut) override; + void restoreWriteUnitOfWorkAndLock(OperationContext* opCtx, + const LockSnapshot& stateToRestore) override; + + void releaseWriteUnitOfWork(WUOWLockSnapshot* stateOut) override; + void restoreWriteUnitOfWork(const WUOWLockSnapshot& stateToRestore) override; + + virtual void releaseTicket(); + virtual void reacquireTicket(OperationContext* opCtx); + + bool hasReadTicket() const override { + return _modeForTicket == MODE_IS || _modeForTicket == MODE_S; + } + + bool hasWriteTicket() const override { + return _modeForTicket == MODE_IX || _modeForTicket == MODE_X; + } + + void getFlowControlTicket(OperationContext* opCtx, LockMode lockMode) override; + + FlowControlTicketholder::CurOp getFlowControlStats() const override; + + // + // Below functions are for testing only. + // + + FastMapNoAlloc getRequestsForTest() const { + scoped_spinlock scopedLock(_lock); + return _requests; + } + + LockResult lockBeginForTest(OperationContext* opCtx, ResourceId resId, LockMode mode) { + return _lockBegin(opCtx, resId, mode); + } + + void lockCompleteForTest(OperationContext* opCtx, + ResourceId resId, + LockMode mode, + Date_t deadline) { + _lockComplete(opCtx, resId, mode, deadline, nullptr); + } + +private: + typedef FastMapNoAlloc LockRequestsMap; + + /** + * Allows for lock requests to be requested in a non-blocking way. There can be only one + * outstanding pending lock request per locker object. + * + * _lockBegin posts a request to the lock manager for the specified lock to be acquired, + * which either immediately grants the lock, or puts the requestor on the conflict queue + * and returns immediately with the result of the acquisition. The result can be one of: + * + * LOCK_OK - Nothing more needs to be done. The lock is granted. + * LOCK_WAITING - The request has been queued up and will be granted as soon as the lock + * is free. If this result is returned, typically _lockComplete needs to be called in + * order to wait for the actual grant to occur. If the caller no longer needs to wait + * for the grant to happen, unlock needs to be called with the same resource passed + * to _lockBegin. + * + * In other words for each call to _lockBegin, which does not return LOCK_OK, there needs to + * be a corresponding call to either _lockComplete or unlock. + * + * If an operation context is provided that represents an interrupted operation, _lockBegin will + * throw an exception whenever it would have been possible to grant the lock with LOCK_OK. This + * behavior can be disabled with an UninterruptibleLockGuard. + * + * NOTE: These methods are not public and should only be used inside the class + * implementation and for unit-tests and not called directly. + */ + LockResult _lockBegin(OperationContext* opCtx, ResourceId resId, LockMode mode); + + /** + * Waits for the completion of a lock, previously requested through _lockBegin/ + * Must only be called, if _lockBegin returned LOCK_WAITING. + * + * @param opCtx Operation context that, if not null, will be used to allow interruptible lock + * acquisition. + * @param resId Resource id which was passed to an earlier _lockBegin call. Must match. + * @param mode Mode which was passed to an earlier _lockBegin call. Must match. + * @param deadline The absolute time point when this lock acquisition will time out, if not yet + * granted. + * @param onTimeout Callback which will run if the lock acquisition is about to time out. + * + * Throws an exception if it is interrupted. + */ + void _lockComplete(OperationContext* opCtx, + ResourceId resId, + LockMode mode, + Date_t deadline, + const LockTimeoutCallback& onTimeout); + + /** + * The main functionality of the unlock method, except accepts iterator in order to avoid + * additional lookups during unlockGlobal. Frees locks immediately, so must not be called from + * inside a WUOW. + */ + bool _unlockImpl(LockRequestsMap::Iterator* it); + + /** + * Whether we should use two phase locking. Returns true if the particular lock's release should + * be delayed until the end of the operation. + * + * We delay release of write operation locks (X, IX) in order to ensure that the data changes + * they protect are committed successfully. endWriteUnitOfWork will release them afterwards. + * This protects other threads from seeing inconsistent in-memory state. + * + * Shared locks (S, IS) will also participate in two-phase locking if + * '_sharedLocksShouldTwoPhaseLock' is true. This will protect open storage engine transactions + * across network calls. + */ + bool _shouldDelayUnlock(ResourceId resId, LockMode mode) const; + + /** + * Releases the ticket for the Locker. + */ + void _releaseTicket(); + + /** + * Acquires a ticket for the Locker under 'mode'. + * Returns true if a ticket is successfully acquired. + * false if it cannot acquire a ticket within 'deadline'. + * It may throw an exception when it is interrupted. + */ + bool _acquireTicket(OperationContext* opCtx, LockMode mode, Date_t deadline); + + void _setWaitingResource(ResourceId resId); + + /** + * Calls dump() on this locker instance and the lock manager. + */ + void _dumpLockerAndLockManagerRequests(); + + /** + * Determines whether global and tenant lock state implies that some database or lower level + * resource, such as a collection, belonging to a tenant identified by 'tenantId' is locked in + * 'lockMode'. + * + * Returns: + * true, if the global and tenant locks imply that the resource is locked for 'mode'; + * false, if the global and tenant locks imply that the resource is not locked for 'mode'; + * boost::none, if the global and tenant lock state does not imply either outcome and lower + * level locks should be consulted. + */ + boost::optional _globalAndTenantLocksImplyDBOrCollectionLockedForMode( + const boost::optional& tenantId, LockMode lockMode) const; + + // Used to disambiguate different lockers + const LockerId _id; + + // The only reason we have this spin lock here is for the diagnostic tools, which could + // iterate through the LockRequestsMap on a separate thread and need it to be stable. + // Apart from that, all accesses to the LockerImpl are always from a single thread. + // + // This has to be locked inside const methods, hence the mutable. + mutable SpinLock _lock; + // Note: this data structure must always guarantee the continued validity of pointers/references + // to its contents (LockRequests). The LockManager maintains a LockRequestList of pointers to + // the LockRequests managed by this data structure. + LockRequestsMap _requests; + + // Reuse the notification object across requests so we don't have to create a new mutex + // and condition variable every time. + CondVarLockGrantNotification _notify; + + // Per-locker locking statistics. Reported in the slow-query log message and through + // db.currentOp. Complementary to the per-instance locking statistics. + AtomicLockStats _stats; + + // Delays release of exclusive/intent-exclusive locked resources until the write unit of + // work completes. Value of 0 means we are not inside a write unit of work. + int _wuowNestingLevel; + + // Mode for which the Locker acquired a ticket, or MODE_NONE if no ticket was acquired. + LockMode _modeForTicket = MODE_NONE; + + // Indicates whether the client is active reader/writer or is queued. + AtomicWord _clientState{kInactive}; + + // Track the thread who owns the lock for debugging purposes + stdx::thread::id _threadId; + + // If true, shared locks will participate in two-phase locking. + bool _sharedLocksShouldTwoPhaseLock = false; + + // If this is set, dictates the max number of milliseconds that we will wait for lock + // acquisition. Effectively resets lock acquisition deadlines to time out sooner. If set to 0, + // for example, lock attempts will time out immediately if the lock is not immediately + // available. Note this will be ineffective if uninterruptible lock guard is set. + boost::optional _maxLockTimeout; + + // A structure for accumulating time spent getting flow control tickets. + FlowControlTicketholder::CurOp _flowControlStats; + + // The global ticketholders of the service context. + TicketHolderManager* _ticketHolderManager; + + // This will only be valid when holding a ticket. + boost::optional _ticket; + + // Tracks the global lock modes ever acquired in this Locker's life. This value should only ever + // be accessed from the thread that owns the Locker. + unsigned char _globalLockMode = (1 << MODE_NONE); + + // Tracks whether this operation should be killed on step down. + AtomicWord _wasGlobalLockTakenInModeConflictingWithWrites{false}; + + // If isValid(), the ResourceId of the resource currently waiting for the lock. If not valid, + // there is no resource currently waiting. + ResourceId _waitingResource; + + ////////////////////////////////////////////////////////////////////////////////////////// + // + // Methods merged from LockState, which should eventually be removed or changed to methods + // on the LockerImpl interface. + // + +public: + virtual void dump() const; + + virtual bool isW() const; + virtual bool isR() const; + + virtual bool isLocked() const; + virtual bool isWriteLocked() const; + virtual bool isReadLocked() const; + + virtual bool isRSTLExclusive() const; + virtual bool isRSTLLocked() const; + + bool isGlobalLockedRecursively() override; + bool canSaveLockState() override; + + virtual bool hasLockPending() const { + return getWaitingResource().isValid(); + } +}; + +} // namespace mongo diff --git a/src/mongo/db/concurrency/locker_impl_test.cpp b/src/mongo/db/concurrency/locker_impl_test.cpp new file mode 100644 index 0000000000000..86ee88ad0612b --- /dev/null +++ b/src/mongo/db/concurrency/locker_impl_test.cpp @@ -0,0 +1,1411 @@ +/** + * Copyright (C) 2018-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/client.h" +#include "mongo/db/concurrency/fast_map_noalloc.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/lock_stats.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/concurrency/locker_impl.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/tenant_id.h" +#include "mongo/transport/session.h" +#include "mongo/transport/transport_layer_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest + + +namespace mongo { + +class LockerImplTest : public ServiceContextTest {}; + +TEST_F(LockerImplTest, LockNoConflict) { + auto opCtx = makeOperationContext(); + + const ResourceId resId( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + LockerImpl locker(opCtx->getServiceContext()); + locker.lockGlobal(opCtx.get(), MODE_IX); + + locker.lock(resId, MODE_X); + + ASSERT(locker.isLockHeldForMode(resId, MODE_X)); + ASSERT(locker.isLockHeldForMode(resId, MODE_S)); + + ASSERT(locker.unlock(resId)); + + ASSERT(locker.isLockHeldForMode(resId, MODE_NONE)); + + locker.unlockGlobal(); +} + +TEST_F(LockerImplTest, ReLockNoConflict) { + auto opCtx = makeOperationContext(); + + const ResourceId resId( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + LockerImpl locker(opCtx->getServiceContext()); + locker.lockGlobal(opCtx.get(), MODE_IX); + + locker.lock(resId, MODE_S); + locker.lock(resId, MODE_X); + + ASSERT(!locker.unlock(resId)); + ASSERT(locker.isLockHeldForMode(resId, MODE_X)); + + ASSERT(locker.unlock(resId)); + ASSERT(locker.isLockHeldForMode(resId, MODE_NONE)); + + ASSERT(locker.unlockGlobal()); +} + +TEST_F(LockerImplTest, ConflictWithTimeout) { + auto opCtx = makeOperationContext(); + + const ResourceId resId( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + LockerImpl locker1(opCtx->getServiceContext()); + locker1.lockGlobal(opCtx.get(), MODE_IX); + locker1.lock(resId, MODE_X); + + LockerImpl locker2(opCtx->getServiceContext()); + locker2.lockGlobal(opCtx.get(), MODE_IX); + + ASSERT_THROWS_CODE(locker2.lock(opCtx.get(), resId, MODE_S, Date_t::now()), + AssertionException, + ErrorCodes::LockTimeout); + + ASSERT(locker2.getLockMode(resId) == MODE_NONE); + + ASSERT(locker1.unlock(resId)); + + ASSERT(locker1.unlockGlobal()); + ASSERT(locker2.unlockGlobal()); +} + +TEST_F(LockerImplTest, ConflictUpgradeWithTimeout) { + auto opCtx = makeOperationContext(); + + const ResourceId resId( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + LockerImpl locker1(opCtx->getServiceContext()); + locker1.lockGlobal(opCtx.get(), MODE_IS); + locker1.lock(resId, MODE_S); + + LockerImpl locker2(opCtx->getServiceContext()); + locker2.lockGlobal(opCtx.get(), MODE_IS); + locker2.lock(resId, MODE_S); + + // Try upgrading locker 1, which should block and timeout + ASSERT_THROWS_CODE(locker1.lock(opCtx.get(), resId, MODE_X, Date_t::now() + Milliseconds(1)), + AssertionException, + ErrorCodes::LockTimeout); + + locker1.unlockGlobal(); + locker2.unlockGlobal(); +} + +TEST_F(LockerImplTest, FailPointInLockFailsGlobalNonIntentLocksIfTheyCannotBeImmediatelyGranted) { + transport::TransportLayerMock transportLayer; + std::shared_ptr session = transportLayer.createSession(); + + auto newClient = getServiceContext()->makeClient("userClient", session); + AlternativeClientRegion acr(newClient); + auto newOpCtx = cc().makeOperationContext(); + + LockerImpl locker1(newOpCtx->getServiceContext()); + locker1.lockGlobal(newOpCtx.get(), MODE_IX); + + { + FailPointEnableBlock failWaitingNonPartitionedLocks("failNonIntentLocksIfWaitNeeded"); + + // MODE_S attempt. + LockerImpl locker2(newOpCtx->getServiceContext()); + ASSERT_THROWS_CODE( + locker2.lockGlobal(newOpCtx.get(), MODE_S), DBException, ErrorCodes::LockTimeout); + + // MODE_X attempt. + LockerImpl locker3(newOpCtx->getServiceContext()); + ASSERT_THROWS_CODE( + locker3.lockGlobal(newOpCtx.get(), MODE_X), DBException, ErrorCodes::LockTimeout); + } + + locker1.unlockGlobal(); +} + +TEST_F(LockerImplTest, FailPointInLockFailsNonIntentLocksIfTheyCannotBeImmediatelyGranted) { + transport::TransportLayerMock transportLayer; + std::shared_ptr session = transportLayer.createSession(); + + auto newClient = getServiceContext()->makeClient("userClient", session); + AlternativeClientRegion acr(newClient); + auto newOpCtx = cc().makeOperationContext(); + + // Granted MODE_X lock, fail incoming MODE_S and MODE_X. + const ResourceId resId( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + LockerImpl locker1(newOpCtx->getServiceContext()); + locker1.lockGlobal(newOpCtx.get(), MODE_IX); + locker1.lock(newOpCtx.get(), resId, MODE_X); + + { + FailPointEnableBlock failWaitingNonPartitionedLocks("failNonIntentLocksIfWaitNeeded"); + + // MODE_S attempt. + LockerImpl locker2(newOpCtx->getServiceContext()); + locker2.lockGlobal(newOpCtx.get(), MODE_IS); + ASSERT_THROWS_CODE(locker2.lock(newOpCtx.get(), resId, MODE_S, Date_t::max()), + DBException, + ErrorCodes::LockTimeout); + + // The timed out MODE_S attempt shouldn't be present in the map of lock requests because it + // won't ever be granted. + ASSERT(locker2.getRequestsForTest().find(resId).finished()); + locker2.unlockGlobal(); + + // MODE_X attempt. + LockerImpl locker3(newOpCtx->getServiceContext()); + locker3.lockGlobal(newOpCtx.get(), MODE_IX); + ASSERT_THROWS_CODE(locker3.lock(newOpCtx.get(), resId, MODE_X, Date_t::max()), + DBException, + ErrorCodes::LockTimeout); + + // The timed out MODE_X attempt shouldn't be present in the map of lock requests because it + // won't ever be granted. + ASSERT(locker3.getRequestsForTest().find(resId).finished()); + locker3.unlockGlobal(); + } + + locker1.unlockGlobal(); +} + +TEST_F(LockerImplTest, ReadTransaction) { + auto opCtx = makeOperationContext(); + + LockerImpl locker(opCtx->getServiceContext()); + + locker.lockGlobal(opCtx.get(), MODE_IS); + locker.unlockGlobal(); + + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.unlockGlobal(); + + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.lockGlobal(opCtx.get(), MODE_IS); + locker.unlockGlobal(); + locker.unlockGlobal(); +} + +/** + * Test that saveLockerImpl works by examining the output. + */ +TEST_F(LockerImplTest, saveAndRestoreGlobal) { + auto opCtx = makeOperationContext(); + + LockerImpl locker(opCtx->getServiceContext()); + + // No lock requests made, no locks held. + ASSERT_FALSE(locker.canSaveLockState()); + + // Lock the global lock, but just once. + locker.lockGlobal(opCtx.get(), MODE_IX); + + // We've locked the global lock. This should be reflected in the lockInfo. + Locker::LockSnapshot lockInfo; + locker.saveLockStateAndUnlock(&lockInfo); + ASSERT(!locker.isLocked()); + ASSERT_EQUALS(MODE_IX, lockInfo.globalMode); + + // Restore the lock(s) we had. + locker.restoreLockState(opCtx.get(), lockInfo); + + ASSERT(locker.isLocked()); + ASSERT(locker.unlockGlobal()); +} + +/** + * Test that saveLockerImpl can save and restore the RSTL. + */ +TEST_F(LockerImplTest, saveAndRestoreRSTL) { + auto opCtx = makeOperationContext(); + + Locker::LockSnapshot lockInfo; + + LockerImpl locker(opCtx->getServiceContext()); + + const ResourceId resIdDatabase(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB")); + + // Acquire locks. + locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX); + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.lock(resIdDatabase, MODE_IX); + + // Save the lock state. + locker.saveLockStateAndUnlock(&lockInfo); + ASSERT(!locker.isLocked()); + ASSERT_EQUALS(MODE_IX, lockInfo.globalMode); + + // Check locks are unlocked. + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resourceIdReplicationStateTransitionLock)); + ASSERT(!locker.isLocked()); + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); + + // Restore the lock(s) we had. + locker.restoreLockState(opCtx.get(), lockInfo); + + // Check locks are re-locked. + ASSERT(locker.isLocked()); + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resourceIdReplicationStateTransitionLock)); + + ASSERT(locker.unlockGlobal()); + ASSERT(locker.unlock(resourceIdReplicationStateTransitionLock)); +} + +/** + * Test that we don't unlock when we have the global lock more than once. + */ +TEST_F(LockerImplTest, saveAndRestoreGlobalAcquiredTwice) { + auto opCtx = makeOperationContext(); + + LockerImpl locker(opCtx->getServiceContext()); + + // No lock requests made, no locks held. + ASSERT_FALSE(locker.canSaveLockState()); + + // Lock the global lock. + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.lockGlobal(opCtx.get(), MODE_IX); + + // This shouldn't actually unlock as we're in a nested scope. + ASSERT_FALSE(locker.canSaveLockState()); + + ASSERT(locker.isLocked()); + + // We must unlockGlobal twice. + ASSERT(!locker.unlockGlobal()); + ASSERT(locker.unlockGlobal()); +} + +/** + * Tests that restoreLockerImpl works by locking a db and collection and saving + restoring. + */ +TEST_F(LockerImplTest, saveAndRestoreDBAndCollection) { + auto opCtx = makeOperationContext(); + + Locker::LockSnapshot lockInfo; + + LockerImpl locker(opCtx->getServiceContext()); + + const ResourceId resIdDatabase(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB")); + const ResourceId resIdCollection( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + // Lock some stuff. + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.lock(resIdDatabase, MODE_IX); + locker.lock(resIdCollection, MODE_IX); + locker.saveLockStateAndUnlock(&lockInfo); + + // Things shouldn't be locked anymore. + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); + + // Restore lock state. + locker.restoreLockState(opCtx.get(), lockInfo); + + // Make sure things were re-locked. + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdCollection)); + + ASSERT(locker.unlockGlobal()); +} + +TEST_F(LockerImplTest, releaseWriteUnitOfWork) { + auto opCtx = makeOperationContext(); + + Locker::LockSnapshot lockInfo; + + LockerImpl locker(opCtx->getServiceContext()); + + const ResourceId resIdDatabase(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB")); + const ResourceId resIdCollection( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + locker.beginWriteUnitOfWork(); + // Lock some stuff. + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.lock(resIdDatabase, MODE_IX); + locker.lock(resIdCollection, MODE_IX); + // Unlock them so that they will be pending to unlock. + ASSERT_FALSE(locker.unlock(resIdCollection)); + ASSERT_FALSE(locker.unlock(resIdDatabase)); + ASSERT_FALSE(locker.unlockGlobal()); + + locker.releaseWriteUnitOfWorkAndUnlock(&lockInfo); + + // Things shouldn't be locked anymore. + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); + ASSERT_FALSE(locker.isLocked()); + + // Destructor should succeed since the locker's state should be empty. +} + +TEST_F(LockerImplTest, restoreWriteUnitOfWork) { + auto opCtx = makeOperationContext(); + + Locker::LockSnapshot lockInfo; + + LockerImpl locker(opCtx->getServiceContext()); + + const ResourceId resIdDatabase(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB")); + const ResourceId resIdCollection( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + locker.beginWriteUnitOfWork(); + // Lock some stuff. + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.lock(resIdDatabase, MODE_IX); + locker.lock(resIdCollection, MODE_IX); + // Unlock them so that they will be pending to unlock. + ASSERT_FALSE(locker.unlock(resIdCollection)); + ASSERT_FALSE(locker.unlock(resIdDatabase)); + ASSERT_FALSE(locker.unlockGlobal()); + + locker.releaseWriteUnitOfWorkAndUnlock(&lockInfo); + + // Things shouldn't be locked anymore. + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); + ASSERT_FALSE(locker.isLocked()); + + // Restore lock state. + locker.restoreWriteUnitOfWorkAndLock(opCtx.get(), lockInfo); + + // Make sure things were re-locked. + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdCollection)); + ASSERT(locker.isLocked()); + + locker.endWriteUnitOfWork(); + + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); + ASSERT_FALSE(locker.isLocked()); +} + +TEST_F(LockerImplTest, releaseAndRestoreWriteUnitOfWorkWithoutUnlock) { + auto opCtx = makeOperationContext(); + + Locker::WUOWLockSnapshot lockInfo; + + LockerImpl locker(opCtx->getServiceContext()); + + const ResourceId resIdDatabase(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB")); + const ResourceId resIdCollection( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + const ResourceId resIdCollection2( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection2")); + + locker.beginWriteUnitOfWork(); + // Lock some stuff. + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.lock(resIdDatabase, MODE_IX); + locker.lock(resIdCollection, MODE_X); + + // Recursive global lock. + locker.lockGlobal(opCtx.get(), MODE_IX); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 2U); + + ASSERT_FALSE(locker.unlockGlobal()); + + // Unlock them so that they will be pending to unlock. + ASSERT_FALSE(locker.unlock(resIdCollection)); + ASSERT_FALSE(locker.unlock(resIdDatabase)); + ASSERT_FALSE(locker.unlockGlobal()); + ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 3UL); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); + + locker.releaseWriteUnitOfWork(&lockInfo); + ASSERT_EQ(lockInfo.unlockPendingLocks.size(), 3UL); + + // Things should still be locked. + ASSERT_EQUALS(MODE_X, locker.getLockMode(resIdCollection)); + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 0U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); + ASSERT(locker.isLocked()); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 0U); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); + + // The locker is no longer participating the two-phase locking. + ASSERT_FALSE(locker.inAWriteUnitOfWork()); + ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 0UL); + + // Start a new WUOW with the same locker. Any new locks acquired in the new WUOW + // should participate two-phase locking. + { + locker.beginWriteUnitOfWork(); + + // Grab new locks inside the new WUOW. + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.lock(resIdDatabase, MODE_IX); + locker.lock(resIdCollection2, MODE_IX); + + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdCollection2)); + ASSERT(locker.isLocked()); + + locker.unlock(resIdCollection2); + ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 1UL); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 0U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 2U); + locker.unlock(resIdDatabase); + ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 1UL); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 0U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); + locker.unlockGlobal(); + ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 1UL); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 0U); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); + locker.endWriteUnitOfWork(); + } + ASSERT_FALSE(locker.inAWriteUnitOfWork()); + ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 0UL); + + ASSERT_EQUALS(MODE_X, locker.getLockMode(resIdCollection)); + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 0U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); + ASSERT(locker.isLocked()); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 0U); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); + // The new locks has been released. + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection2)); + + // Restore lock state. + locker.restoreWriteUnitOfWork(lockInfo); + + ASSERT_TRUE(locker.inAWriteUnitOfWork()); + + // Make sure things are still locked. + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_X, locker.getLockMode(resIdCollection)); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); + ASSERT(locker.isLocked()); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); + + locker.endWriteUnitOfWork(); + + ASSERT_FALSE(locker.inAWriteUnitOfWork()); + + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection2)); + ASSERT_FALSE(locker.isLocked()); + ASSERT_EQ(locker.numResourcesToUnlockAtEndUnitOfWorkForTest(), 0U); + ASSERT(locker.getRequestsForTest().find(resourceIdGlobal).finished()); +} + +TEST_F(LockerImplTest, releaseAndRestoreReadOnlyWriteUnitOfWork) { + auto opCtx = makeOperationContext(); + + Locker::LockSnapshot lockInfo; + + LockerImpl locker(opCtx->getServiceContext()); + + const ResourceId resIdDatabase(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB")); + const ResourceId resIdCollection( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + // Snapshot transactions delay shared locks as well. + locker.setSharedLocksShouldTwoPhaseLock(true); + + locker.beginWriteUnitOfWork(); + // Lock some stuff in IS mode. + locker.lockGlobal(opCtx.get(), MODE_IS); + locker.lock(resIdDatabase, MODE_IS); + locker.lock(resIdCollection, MODE_IS); + // Unlock them. + ASSERT_FALSE(locker.unlock(resIdCollection)); + ASSERT_FALSE(locker.unlock(resIdDatabase)); + ASSERT_FALSE(locker.unlockGlobal()); + ASSERT_EQ(3u, locker.numResourcesToUnlockAtEndUnitOfWorkForTest()); + + // Things shouldn't be locked anymore. + locker.releaseWriteUnitOfWorkAndUnlock(&lockInfo); + + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); + ASSERT_FALSE(locker.isLocked()); + + // Restore lock state. + locker.restoreWriteUnitOfWorkAndLock(opCtx.get(), lockInfo); + + ASSERT_EQUALS(MODE_IS, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_IS, locker.getLockMode(resIdCollection)); + ASSERT_TRUE(locker.isLocked()); + + locker.endWriteUnitOfWork(); + + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); + ASSERT_FALSE(locker.isLocked()); +} + +TEST_F(LockerImplTest, releaseAndRestoreEmptyWriteUnitOfWork) { + Locker::LockSnapshot lockInfo; + auto opCtx = makeOperationContext(); + LockerImpl locker(opCtx->getServiceContext()); + + // Snapshot transactions delay shared locks as well. + locker.setSharedLocksShouldTwoPhaseLock(true); + + locker.beginWriteUnitOfWork(); + + // Nothing to yield. + ASSERT_FALSE(locker.canSaveLockState()); + ASSERT_FALSE(locker.isLocked()); + + locker.endWriteUnitOfWork(); +} + +TEST_F(LockerImplTest, releaseAndRestoreWriteUnitOfWorkWithRecursiveLocks) { + auto opCtx = makeOperationContext(); + + Locker::LockSnapshot lockInfo; + + LockerImpl locker(opCtx->getServiceContext()); + + const ResourceId resIdDatabase(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB")); + const ResourceId resIdCollection( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + locker.beginWriteUnitOfWork(); + // Lock some stuff. + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.lock(resIdDatabase, MODE_IX); + locker.lock(resIdCollection, MODE_IX); + // Recursively lock them again with a weaker mode. + locker.lockGlobal(opCtx.get(), MODE_IS); + locker.lock(resIdDatabase, MODE_IS); + locker.lock(resIdCollection, MODE_IS); + + // Make sure locks are converted. + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdCollection)); + ASSERT_TRUE(locker.isWriteLocked()); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 2U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 2U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->recursiveCount, 2U); + + // Unlock them so that they will be pending to unlock. + ASSERT_FALSE(locker.unlock(resIdCollection)); + ASSERT_FALSE(locker.unlock(resIdDatabase)); + ASSERT_FALSE(locker.unlockGlobal()); + // Make sure locks are still acquired in the correct mode. + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdCollection)); + ASSERT_TRUE(locker.isWriteLocked()); + // Make sure unlocking converted locks decrements the locks' recursiveCount instead of + // incrementing unlockPending. + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 0U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 0U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->recursiveCount, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->unlockPending, 0U); + + // Unlock again so unlockPending == recursiveCount. + ASSERT_FALSE(locker.unlock(resIdCollection)); + ASSERT_FALSE(locker.unlock(resIdDatabase)); + ASSERT_FALSE(locker.unlockGlobal()); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->recursiveCount, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->unlockPending, 1U); + + locker.releaseWriteUnitOfWorkAndUnlock(&lockInfo); + + // Things shouldn't be locked anymore. + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); + ASSERT_FALSE(locker.isLocked()); + + // Restore lock state. + locker.restoreWriteUnitOfWorkAndLock(opCtx.get(), lockInfo); + + // Make sure things were re-locked in the correct mode. + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_IX, locker.getLockMode(resIdCollection)); + ASSERT_TRUE(locker.isWriteLocked()); + // Make sure locks were coalesced after restore and are pending to unlock as before. + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->recursiveCount, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resourceIdGlobal).objAddr()->unlockPending, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->recursiveCount, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdDatabase).objAddr()->unlockPending, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->recursiveCount, 1U); + ASSERT_EQ(locker.getRequestsForTest().find(resIdCollection).objAddr()->unlockPending, 1U); + + locker.endWriteUnitOfWork(); + + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdDatabase)); + ASSERT_EQUALS(MODE_NONE, locker.getLockMode(resIdCollection)); + ASSERT_FALSE(locker.isLocked()); +} + +TEST_F(LockerImplTest, DefaultLocker) { + auto opCtx = makeOperationContext(); + + const ResourceId resId(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB")); + + LockerImpl locker(opCtx->getServiceContext()); + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.lock(resId, MODE_X); + + // Make sure only Global and TestDB resources are locked. + Locker::LockerInfo info; + locker.getLockerInfo(&info, boost::none); + ASSERT(!info.waitingResource.isValid()); + ASSERT_EQUALS(2U, info.locks.size()); + ASSERT_EQUALS(RESOURCE_GLOBAL, info.locks[0].resourceId.getType()); + ASSERT_EQUALS(resId, info.locks[1].resourceId); + + ASSERT(locker.unlockGlobal()); +} + +TEST_F(LockerImplTest, SharedLocksShouldTwoPhaseLockIsTrue) { + // Test that when setSharedLocksShouldTwoPhaseLock is true and we are in a WUOW, unlock on IS + // and S locks are postponed until endWriteUnitOfWork() is called. Mode IX and X locks always + // participate in two-phased locking, regardless of the setting. + + auto opCtx = makeOperationContext(); + + const ResourceId resId1(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB1")); + const ResourceId resId2(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB2")); + const ResourceId resId3( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection3")); + const ResourceId resId4( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection4")); + + LockerImpl locker(opCtx->getServiceContext()); + locker.setSharedLocksShouldTwoPhaseLock(true); + + locker.lockGlobal(opCtx.get(), MODE_IS); + ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_IS); + + locker.lock(resourceIdReplicationStateTransitionLock, MODE_IS); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IS); + + locker.lock(resId1, MODE_IS); + locker.lock(resId2, MODE_IX); + locker.lock(resId3, MODE_S); + locker.lock(resId4, MODE_X); + ASSERT_EQ(locker.getLockMode(resId1), MODE_IS); + ASSERT_EQ(locker.getLockMode(resId2), MODE_IX); + ASSERT_EQ(locker.getLockMode(resId3), MODE_S); + ASSERT_EQ(locker.getLockMode(resId4), MODE_X); + + locker.beginWriteUnitOfWork(); + + ASSERT_FALSE(locker.unlock(resId1)); + ASSERT_FALSE(locker.unlock(resId2)); + ASSERT_FALSE(locker.unlock(resId3)); + ASSERT_FALSE(locker.unlock(resId4)); + ASSERT_EQ(locker.getLockMode(resId1), MODE_IS); + ASSERT_EQ(locker.getLockMode(resId2), MODE_IX); + ASSERT_EQ(locker.getLockMode(resId3), MODE_S); + ASSERT_EQ(locker.getLockMode(resId4), MODE_X); + + ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IS); + + ASSERT_FALSE(locker.unlockGlobal()); + ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_IS); + + locker.endWriteUnitOfWork(); + + ASSERT_EQ(locker.getLockMode(resId1), MODE_NONE); + ASSERT_EQ(locker.getLockMode(resId2), MODE_NONE); + ASSERT_EQ(locker.getLockMode(resId3), MODE_NONE); + ASSERT_EQ(locker.getLockMode(resId4), MODE_NONE); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); + ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_NONE); +} + +TEST_F(LockerImplTest, ModeIXAndXLockParticipatesInTwoPhaseLocking) { + // Unlock on mode IX and X locks during a WUOW should always be postponed until + // endWriteUnitOfWork() is called. Mode IS and S locks should unlock immediately. + + auto opCtx = makeOperationContext(); + + const ResourceId resId1(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB1")); + const ResourceId resId2(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB2")); + const ResourceId resId3( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection3")); + const ResourceId resId4( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection4")); + + LockerImpl locker(opCtx->getServiceContext()); + + locker.lockGlobal(opCtx.get(), MODE_IX); + ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_IX); + + locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); + + locker.lock(resId1, MODE_IS); + locker.lock(resId2, MODE_IX); + locker.lock(resId3, MODE_S); + locker.lock(resId4, MODE_X); + ASSERT_EQ(locker.getLockMode(resId1), MODE_IS); + ASSERT_EQ(locker.getLockMode(resId2), MODE_IX); + ASSERT_EQ(locker.getLockMode(resId3), MODE_S); + ASSERT_EQ(locker.getLockMode(resId4), MODE_X); + + locker.beginWriteUnitOfWork(); + + ASSERT_TRUE(locker.unlock(resId1)); + ASSERT_FALSE(locker.unlock(resId2)); + ASSERT_TRUE(locker.unlock(resId3)); + ASSERT_FALSE(locker.unlock(resId4)); + ASSERT_EQ(locker.getLockMode(resId1), MODE_NONE); + ASSERT_EQ(locker.getLockMode(resId2), MODE_IX); + ASSERT_EQ(locker.getLockMode(resId3), MODE_NONE); + ASSERT_EQ(locker.getLockMode(resId4), MODE_X); + + ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); + + ASSERT_FALSE(locker.unlockGlobal()); + ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_IX); + + locker.endWriteUnitOfWork(); + + ASSERT_EQ(locker.getLockMode(resId2), MODE_NONE); + ASSERT_EQ(locker.getLockMode(resId4), MODE_NONE); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); + ASSERT_EQ(locker.getLockMode(resourceIdGlobal), MODE_NONE); +} + +TEST_F(LockerImplTest, RSTLUnlocksWithNestedLock) { + auto opCtx = makeOperationContext(); + LockerImpl locker(opCtx->getServiceContext()); + + locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); + + locker.beginWriteUnitOfWork(); + + // Do a nested lock acquisition. + locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); + + ASSERT(locker.unlockRSTLforPrepare()); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); + + ASSERT_FALSE(locker.unlockRSTLforPrepare()); + + locker.endWriteUnitOfWork(); + + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); + + ASSERT_FALSE(locker.unlockRSTLforPrepare()); + ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); +} + +TEST_F(LockerImplTest, RSTLModeIXWithTwoPhaseLockingCanBeUnlockedWhenPrepared) { + auto opCtx = makeOperationContext(); + LockerImpl locker(opCtx->getServiceContext()); + + locker.lock(resourceIdReplicationStateTransitionLock, MODE_IX); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); + + locker.beginWriteUnitOfWork(); + + ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); + + ASSERT(locker.unlockRSTLforPrepare()); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); + + ASSERT_FALSE(locker.unlockRSTLforPrepare()); + + locker.endWriteUnitOfWork(); + + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); + + ASSERT_FALSE(locker.unlockRSTLforPrepare()); + ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); +} + +TEST_F(LockerImplTest, RSTLModeISWithTwoPhaseLockingCanBeUnlockedWhenPrepared) { + auto opCtx = makeOperationContext(); + LockerImpl locker(opCtx->getServiceContext()); + + locker.lock(resourceIdReplicationStateTransitionLock, MODE_IS); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IS); + + locker.beginWriteUnitOfWork(); + + ASSERT(locker.unlockRSTLforPrepare()); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); + + ASSERT_FALSE(locker.unlockRSTLforPrepare()); + + locker.endWriteUnitOfWork(); + + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); + + ASSERT_FALSE(locker.unlockRSTLforPrepare()); + ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); +} + +TEST_F(LockerImplTest, RSTLTwoPhaseLockingBehaviorModeIS) { + auto opCtx = makeOperationContext(); + LockerImpl locker(opCtx->getServiceContext()); + + locker.lock(resourceIdReplicationStateTransitionLock, MODE_IS); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_IS); + + locker.beginWriteUnitOfWork(); + + ASSERT_TRUE(locker.unlock(resourceIdReplicationStateTransitionLock)); + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); + + ASSERT_FALSE(locker.unlockRSTLforPrepare()); + + locker.endWriteUnitOfWork(); + + ASSERT_EQ(locker.getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); + + ASSERT_FALSE(locker.unlockRSTLforPrepare()); + ASSERT_FALSE(locker.unlock(resourceIdReplicationStateTransitionLock)); +} + +TEST_F(LockerImplTest, OverrideLockRequestTimeout) { + auto opCtx = makeOperationContext(); + + const ResourceId resIdFirstDB(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "FirstDB")); + const ResourceId resIdSecondDB( + RESOURCE_DATABASE, DatabaseName::createDatabaseName_forTest(boost::none, "SecondDB")); + + LockerImpl locker1(opCtx->getServiceContext()); + LockerImpl locker2(opCtx->getServiceContext()); + + // Set up locker2 to override lock requests' provided timeout if greater than 1000 milliseconds. + locker2.setMaxLockTimeout(Milliseconds(1000)); + + locker1.lockGlobal(opCtx.get(), MODE_IX); + locker2.lockGlobal(opCtx.get(), MODE_IX); + + // locker1 acquires FirstDB under an exclusive lock. + locker1.lock(resIdFirstDB, MODE_X); + ASSERT_TRUE(locker1.isLockHeldForMode(resIdFirstDB, MODE_X)); + + // locker2's attempt to acquire FirstDB with unlimited wait time should timeout after 1000 + // milliseconds and throw because _maxLockRequestTimeout is set to 1000 milliseconds. + ASSERT_THROWS_CODE(locker2.lock(opCtx.get(), resIdFirstDB, MODE_X, Date_t::max()), + AssertionException, + ErrorCodes::LockTimeout); + + // locker2's attempt to acquire an uncontested lock should still succeed normally. + locker2.lock(resIdSecondDB, MODE_X); + + ASSERT_TRUE(locker1.unlock(resIdFirstDB)); + ASSERT_TRUE(locker1.isLockHeldForMode(resIdFirstDB, MODE_NONE)); + ASSERT_TRUE(locker2.unlock(resIdSecondDB)); + ASSERT_TRUE(locker2.isLockHeldForMode(resIdSecondDB, MODE_NONE)); + + ASSERT(locker1.unlockGlobal()); + ASSERT(locker2.unlockGlobal()); +} + +TEST_F(LockerImplTest, DoNotWaitForLockAcquisition) { + auto opCtx = makeOperationContext(); + + const ResourceId resIdFirstDB(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "FirstDB")); + const ResourceId resIdSecondDB( + RESOURCE_DATABASE, DatabaseName::createDatabaseName_forTest(boost::none, "SecondDB")); + + LockerImpl locker1(opCtx->getServiceContext()); + LockerImpl locker2(opCtx->getServiceContext()); + + // Set up locker2 to immediately return if a lock is unavailable, regardless of supplied + // deadlines in the lock request. + locker2.setMaxLockTimeout(Milliseconds(0)); + + locker1.lockGlobal(opCtx.get(), MODE_IX); + locker2.lockGlobal(opCtx.get(), MODE_IX); + + // locker1 acquires FirstDB under an exclusive lock. + locker1.lock(resIdFirstDB, MODE_X); + ASSERT_TRUE(locker1.isLockHeldForMode(resIdFirstDB, MODE_X)); + + // locker2's attempt to acquire FirstDB with unlimited wait time should fail immediately and + // throw because _maxLockRequestTimeout was set to 0. + ASSERT_THROWS_CODE(locker2.lock(opCtx.get(), resIdFirstDB, MODE_X, Date_t::max()), + AssertionException, + ErrorCodes::LockTimeout); + + // locker2's attempt to acquire an uncontested lock should still succeed normally. + locker2.lock(resIdSecondDB, MODE_X); + + ASSERT_TRUE(locker1.unlock(resIdFirstDB)); + ASSERT_TRUE(locker1.isLockHeldForMode(resIdFirstDB, MODE_NONE)); + ASSERT_TRUE(locker2.unlock(resIdSecondDB)); + ASSERT_TRUE(locker2.isLockHeldForMode(resIdSecondDB, MODE_NONE)); + + ASSERT(locker1.unlockGlobal()); + ASSERT(locker2.unlockGlobal()); +} + +namespace { +/** + * Helper function to determine if 'lockerInfo' contains a lock with ResourceId 'resourceId' and + * lock mode 'mode' within 'lockerInfo.locks'. + */ +bool lockerInfoContainsLock(const Locker::LockerInfo& lockerInfo, + const ResourceId& resourceId, + const LockMode& mode) { + return (1U == + std::count_if(lockerInfo.locks.begin(), + lockerInfo.locks.end(), + [&resourceId, &mode](const Locker::OneLock& lock) { + return lock.resourceId == resourceId && lock.mode == mode; + })); +} +} // namespace + +TEST_F(LockerImplTest, GetLockerInfoShouldReportHeldLocks) { + auto opCtx = makeOperationContext(); + + const ResourceId dbId(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB")); + const ResourceId collectionId( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + // Take an exclusive lock on the collection. + LockerImpl locker(opCtx->getServiceContext()); + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.lock(dbId, MODE_IX); + locker.lock(collectionId, MODE_X); + + // Assert it shows up in the output of getLockerInfo(). + Locker::LockerInfo lockerInfo; + locker.getLockerInfo(&lockerInfo, boost::none); + + ASSERT(lockerInfoContainsLock(lockerInfo, resourceIdGlobal, MODE_IX)); + ASSERT(lockerInfoContainsLock(lockerInfo, dbId, MODE_IX)); + ASSERT(lockerInfoContainsLock(lockerInfo, collectionId, MODE_X)); + ASSERT_EQ(3U, lockerInfo.locks.size()); + + ASSERT(locker.unlock(collectionId)); + ASSERT(locker.unlock(dbId)); + ASSERT(locker.unlockGlobal()); +} + +TEST_F(LockerImplTest, GetLockerInfoShouldReportPendingLocks) { + auto opCtx = makeOperationContext(); + + const ResourceId dbId(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB")); + const ResourceId collectionId( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + // Take an exclusive lock on the collection. + LockerImpl successfulLocker(opCtx->getServiceContext()); + successfulLocker.lockGlobal(opCtx.get(), MODE_IX); + successfulLocker.lock(dbId, MODE_IX); + successfulLocker.lock(collectionId, MODE_X); + + // Now attempt to get conflicting locks. + LockerImpl conflictingLocker(opCtx->getServiceContext()); + conflictingLocker.lockGlobal(opCtx.get(), MODE_IS); + conflictingLocker.lock(dbId, MODE_IS); + ASSERT_EQ(LOCK_WAITING, + conflictingLocker.lockBeginForTest(nullptr /* opCtx */, collectionId, MODE_IS)); + + // Assert the held locks show up in the output of getLockerInfo(). + Locker::LockerInfo lockerInfo; + conflictingLocker.getLockerInfo(&lockerInfo, boost::none); + ASSERT(lockerInfoContainsLock(lockerInfo, resourceIdGlobal, MODE_IS)); + ASSERT(lockerInfoContainsLock(lockerInfo, dbId, MODE_IS)); + ASSERT(lockerInfoContainsLock(lockerInfo, collectionId, MODE_IS)); + ASSERT_EQ(3U, lockerInfo.locks.size()); + + // Assert it reports that it is waiting for the collection lock. + ASSERT_EQ(collectionId, lockerInfo.waitingResource); + + // Make sure it no longer reports waiting once unlocked. + ASSERT(successfulLocker.unlock(collectionId)); + ASSERT(successfulLocker.unlock(dbId)); + ASSERT(successfulLocker.unlockGlobal()); + + conflictingLocker.lockCompleteForTest( + nullptr /* opCtx */, collectionId, MODE_IS, Date_t::now()); + + conflictingLocker.getLockerInfo(&lockerInfo, boost::none); + ASSERT_FALSE(lockerInfo.waitingResource.isValid()); + + ASSERT(conflictingLocker.unlock(collectionId)); + ASSERT(conflictingLocker.unlock(dbId)); + ASSERT(conflictingLocker.unlockGlobal()); +} + +TEST_F(LockerImplTest, GetLockerInfoShouldSubtractBase) { + auto opCtx = makeOperationContext(); + auto locker = opCtx->lockState(); + const ResourceId dbId(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "SubtractTestDB")); + + auto numAcquisitions = [&](boost::optional baseStats) { + Locker::LockerInfo info; + locker->getLockerInfo(&info, baseStats); + return info.stats.get(dbId, MODE_IX).numAcquisitions; + }; + auto getBaseStats = [&] { + return CurOp::get(opCtx.get())->getLockStatsBase(); + }; + + locker->lockGlobal(opCtx.get(), MODE_IX); + + // Obtain a lock before any other ops have been pushed to the stack. + locker->lock(dbId, MODE_IX); + locker->unlock(dbId); + + ASSERT_EQUALS(numAcquisitions(getBaseStats()), 1) << "The acquisition should be reported"; + + // Push another op to the stack and obtain a lock. + CurOp superOp; + superOp.push(opCtx.get()); + locker->lock(dbId, MODE_IX); + locker->unlock(dbId); + + ASSERT_EQUALS(numAcquisitions(getBaseStats()), 1) + << "Only superOp's acquisition should be reported"; + + // Then push another op to the stack and obtain another lock. + CurOp subOp; + subOp.push(opCtx.get()); + locker->lock(dbId, MODE_IX); + locker->unlock(dbId); + + ASSERT_EQUALS(numAcquisitions(getBaseStats()), 1) + << "Only the latest acquisition should be reported"; + + ASSERT_EQUALS(numAcquisitions({}), 3) + << "All acquisitions should be reported when no base is subtracted out."; + + ASSERT(locker->unlockGlobal()); +} + +TEST_F(LockerImplTest, ReaquireLockPendingUnlock) { + auto opCtx = makeOperationContext(); + + const ResourceId resId( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + LockerImpl locker(opCtx->getServiceContext()); + locker.lockGlobal(opCtx.get(), MODE_IS); + + locker.lock(resId, MODE_X); + ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); + + locker.beginWriteUnitOfWork(); + + ASSERT_FALSE(locker.unlock(resId)); + ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); + ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); + + // Reacquire lock pending unlock. + locker.lock(resId, MODE_X); + ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 0); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 0); + + locker.endWriteUnitOfWork(); + + ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); + + locker.unlockGlobal(); +} + +TEST_F(LockerImplTest, AcquireLockPendingUnlockWithCoveredMode) { + auto opCtx = makeOperationContext(); + + const ResourceId resId( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + LockerImpl locker(opCtx->getServiceContext()); + locker.lockGlobal(opCtx.get(), MODE_IS); + + locker.lock(resId, MODE_X); + ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); + + locker.beginWriteUnitOfWork(); + + ASSERT_FALSE(locker.unlock(resId)); + ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); + ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); + + // Attempt to lock the resource with a mode that is covered by the existing mode. + locker.lock(resId, MODE_IX); + ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 0); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 0); + + locker.endWriteUnitOfWork(); + + ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX)); + + locker.unlockGlobal(); +} + +TEST_F(LockerImplTest, ConvertLockPendingUnlock) { + auto opCtx = makeOperationContext(); + + const ResourceId resId( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + LockerImpl locker(opCtx->getServiceContext()); + locker.lockGlobal(opCtx.get(), MODE_IS); + + locker.lock(resId, MODE_IX); + ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX)); + + locker.beginWriteUnitOfWork(); + + ASSERT_FALSE(locker.unlock(resId)); + ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX)); + ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->recursiveCount == 1); + + // Convert lock pending unlock. + locker.lock(resId, MODE_X); + ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->recursiveCount == 2); + + locker.endWriteUnitOfWork(); + + ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 0); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 0); + ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); + + locker.unlockGlobal(); +} + +TEST_F(LockerImplTest, ConvertLockPendingUnlockAndUnlock) { + auto opCtx = makeOperationContext(); + + const ResourceId resId( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + LockerImpl locker(opCtx->getServiceContext()); + locker.lockGlobal(opCtx.get(), MODE_IS); + + locker.lock(resId, MODE_IX); + ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX)); + + locker.beginWriteUnitOfWork(); + + ASSERT_FALSE(locker.unlock(resId)); + ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_IX)); + ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->recursiveCount == 1); + + // Convert lock pending unlock. + locker.lock(resId, MODE_X); + ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->recursiveCount == 2); + + // Unlock the lock conversion. + ASSERT_FALSE(locker.unlock(resId)); + ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 1); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->unlockPending == 1); + // Make sure we still hold X lock and unlock the weaker mode to decrement recursiveCount instead + // of incrementing unlockPending. + ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_X)); + ASSERT(locker.getRequestsForTest().find(resId).objAddr()->recursiveCount == 1); + + locker.endWriteUnitOfWork(); + + ASSERT(locker.numResourcesToUnlockAtEndUnitOfWorkForTest() == 0); + ASSERT(locker.getRequestsForTest().find(resId).finished()); + ASSERT_TRUE(locker.isLockHeldForMode(resId, MODE_NONE)); + + locker.unlockGlobal(); +} + +TEST_F(LockerImplTest, SetTicketAcquisitionForLockRAIIType) { + auto opCtx = makeOperationContext(); + + // By default, ticket acquisition is required. + ASSERT_TRUE(opCtx->lockState()->shouldWaitForTicket()); + + { + ScopedAdmissionPriorityForLock setTicketAquisition(opCtx->lockState(), + AdmissionContext::Priority::kImmediate); + ASSERT_FALSE(opCtx->lockState()->shouldWaitForTicket()); + } + + ASSERT_TRUE(opCtx->lockState()->shouldWaitForTicket()); + + opCtx->lockState()->setAdmissionPriority(AdmissionContext::Priority::kImmediate); + ASSERT_FALSE(opCtx->lockState()->shouldWaitForTicket()); + + { + ScopedAdmissionPriorityForLock setTicketAquisition(opCtx->lockState(), + AdmissionContext::Priority::kImmediate); + ASSERT_FALSE(opCtx->lockState()->shouldWaitForTicket()); + } + + ASSERT_FALSE(opCtx->lockState()->shouldWaitForTicket()); +} + +// This test exercises the lock dumping code in ~LockerImpl in case locks are held on destruction. +DEATH_TEST_F(LockerImplTest, + LocksHeldOnDestructionCausesALocksDump, + "Operation ending while holding locks.") { + auto opCtx = makeOperationContext(); + + const ResourceId resId( + RESOURCE_COLLECTION, + NamespaceString::createNamespaceString_forTest(boost::none, "TestDB.collection")); + + LockerImpl locker(opCtx->getServiceContext()); + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.lock(resId, MODE_X); + + ASSERT(locker.isLockHeldForMode(resId, MODE_X)); + ASSERT(locker.isLockHeldForMode(resId, MODE_S)); + + // 'locker' destructor should invariant because locks are still held. +} + +DEATH_TEST_F(LockerImplTest, SaveAndRestoreGlobalRecursivelyIsFatal, "7033800") { + auto opCtx = makeOperationContext(); + + Locker::LockSnapshot lockInfo; + + LockerImpl locker(opCtx->getServiceContext()); + + // No lock requests made, no locks held. + locker.saveLockStateAndUnlock(&lockInfo); + ASSERT_EQUALS(0U, lockInfo.locks.size()); + + // Lock the global lock. + locker.lockGlobal(opCtx.get(), MODE_IX); + locker.lockGlobal(opCtx.get(), MODE_IX); + + // Should invariant + locker.saveLockStateAndUnlock(&lockInfo); +} + +} // namespace mongo diff --git a/src/mongo/db/concurrency/locker_noop.h b/src/mongo/db/concurrency/locker_noop.h deleted file mode 100644 index 5bd792544ac01..0000000000000 --- a/src/mongo/db/concurrency/locker_noop.h +++ /dev/null @@ -1,263 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/concurrency/locker.h" - -namespace mongo { - -/** - * Locker, which cannot be used to lock/unlock resources and just returns true for checks for - * whether a particular resource is locked. Do not use it for cases where actual locking - * behaviour is expected or locking is performed. - */ -class LockerNoop : public Locker { -public: - LockerNoop() {} - - virtual bool isNoop() const { - return true; - } - - virtual ClientState getClientState() const { - MONGO_UNREACHABLE; - } - - virtual LockerId getId() const { - MONGO_UNREACHABLE; - } - - stdx::thread::id getThreadId() const override { - MONGO_UNREACHABLE; - } - - void updateThreadIdToCurrentThread() override { - MONGO_UNREACHABLE; - } - - void unsetThreadId() override { - MONGO_UNREACHABLE; - } - - void setSharedLocksShouldTwoPhaseLock(bool sharedLocksShouldTwoPhaseLock) override { - MONGO_UNREACHABLE; - } - - void setMaxLockTimeout(Milliseconds maxTimeout) override { - MONGO_UNREACHABLE; - } - - bool hasMaxLockTimeout() override { - MONGO_UNREACHABLE; - } - - void unsetMaxLockTimeout() override { - MONGO_UNREACHABLE; - } - - virtual void lockGlobal(OperationContext* opCtx, LockMode mode, Date_t deadline) { - MONGO_UNREACHABLE; - } - - virtual void lockGlobal(LockMode mode, Date_t deadline) { - MONGO_UNREACHABLE; - } - - virtual bool unlockGlobal() { - MONGO_UNREACHABLE; - } - - virtual void beginWriteUnitOfWork() override {} - - virtual void endWriteUnitOfWork() override {} - - virtual bool inAWriteUnitOfWork() const { - return false; - } - - virtual bool wasGlobalLockTakenForWrite() const { - return false; - } - - virtual bool wasGlobalLockTakenInModeConflictingWithWrites() const { - return false; - } - - virtual bool wasGlobalLockTaken() const { - return false; - } - - virtual void setGlobalLockTakenInMode(LockMode mode) {} - - virtual LockResult lockRSTLBegin(OperationContext* opCtx, LockMode mode) { - MONGO_UNREACHABLE; - } - - virtual void lockRSTLComplete(OperationContext* opCtx, - LockMode mode, - Date_t deadline, - const LockTimeoutCallback& onTimeout) { - MONGO_UNREACHABLE; - } - - virtual bool unlockRSTLforPrepare() { - MONGO_UNREACHABLE; - } - - virtual void lock(OperationContext* opCtx, ResourceId resId, LockMode mode, Date_t deadline) {} - - virtual void lock(ResourceId resId, LockMode mode, Date_t deadline) {} - - virtual void downgrade(ResourceId resId, LockMode newMode) { - MONGO_UNREACHABLE; - } - - virtual bool unlock(ResourceId resId) { - return true; - } - - virtual LockMode getLockMode(ResourceId resId) const { - MONGO_UNREACHABLE; - } - - virtual bool isLockHeldForMode(ResourceId resId, LockMode mode) const { - return true; - } - - virtual bool isDbLockedForMode(const DatabaseName& dbName, LockMode mode) const { - return true; - } - - virtual bool isCollectionLockedForMode(const NamespaceString& nss, LockMode mode) const { - return true; - } - - virtual ResourceId getWaitingResource() const { - MONGO_UNREACHABLE; - } - - virtual void getLockerInfo(LockerInfo* lockerInfo, - boost::optional lockStatsBase) const { - MONGO_UNREACHABLE; - } - - virtual boost::optional getLockerInfo( - boost::optional lockStatsBase) const { - return boost::none; - } - - virtual bool saveLockStateAndUnlock(LockSnapshot* stateOut) { - MONGO_UNREACHABLE; - } - - virtual void restoreLockState(OperationContext* opCtx, const LockSnapshot& stateToRestore) { - MONGO_UNREACHABLE; - } - virtual void restoreLockState(const LockSnapshot& stateToRestore) { - MONGO_UNREACHABLE; - } - - bool releaseWriteUnitOfWorkAndUnlock(LockSnapshot* stateOut) override { - MONGO_UNREACHABLE; - } - - void restoreWriteUnitOfWorkAndLock(OperationContext* opCtx, - const LockSnapshot& stateToRestore) override { - MONGO_UNREACHABLE; - }; - - void releaseWriteUnitOfWork(WUOWLockSnapshot* stateOut) override { - MONGO_UNREACHABLE; - } - - void restoreWriteUnitOfWork(const WUOWLockSnapshot& stateToRestore) override { - MONGO_UNREACHABLE; - }; - - virtual void releaseTicket() { - MONGO_UNREACHABLE; - } - - virtual void reacquireTicket(OperationContext* opCtx) { - MONGO_UNREACHABLE; - } - - virtual bool hasReadTicket() const { - MONGO_UNREACHABLE; - } - - virtual bool hasWriteTicket() const { - MONGO_UNREACHABLE; - } - - virtual void dump() const { - MONGO_UNREACHABLE; - } - - virtual bool isW() const { - return false; - } - - virtual bool isR() const { - MONGO_UNREACHABLE; - } - - virtual bool isLocked() const { - // This is necessary because replication makes decisions based on the answer to this, and - // we wrote unit tests to test the behavior specifically when this returns "false". - return false; - } - - virtual bool isWriteLocked() const { - return true; - } - - virtual bool isReadLocked() const { - return true; - } - - virtual bool isRSTLExclusive() const { - return true; - } - - virtual bool isRSTLLocked() const { - return true; - } - - virtual bool hasLockPending() const { - MONGO_UNREACHABLE; - } - - bool isGlobalLockedRecursively() override { - return false; - } -}; - -} // namespace mongo diff --git a/src/mongo/db/concurrency/locker_noop_client_observer.h b/src/mongo/db/concurrency/locker_noop_client_observer.h deleted file mode 100644 index 6f9159e83d13e..0000000000000 --- a/src/mongo/db/concurrency/locker_noop_client_observer.h +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright (C) 2021-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/concurrency/locker_noop.h" -#include "mongo/db/service_context.h" - -namespace mongo { - -/** - * ServiceContext hook that ensures OperationContexts are created with a valid - * Locker instance. Intended for use in tests that do not require a storage engine. - */ -class LockerNoopClientObserver : public ServiceContext::ClientObserver { -public: - LockerNoopClientObserver() = default; - ~LockerNoopClientObserver() = default; - - void onCreateClient(Client* client) final {} - - void onDestroyClient(Client* client) final {} - - void onCreateOperationContext(OperationContext* opCtx) override { - opCtx->setLockState(std::make_unique()); - } - - void onDestroyOperationContext(OperationContext* opCtx) final {} -}; - -/** - * Unlike LockerNoopClientObserver, this ClientObserver will not overwrite any existing Lockers on - * the OperationContext. - * - * This class is suitable for test fixtures that may be used in executables with LockerImpl service - * hooks installed. - */ -class LockerNoopClientObserverWithReplacementPolicy : public LockerNoopClientObserver { -public: - LockerNoopClientObserverWithReplacementPolicy() = default; - ~LockerNoopClientObserverWithReplacementPolicy() = default; - - void onCreateOperationContext(OperationContext* opCtx) final { - if (opCtx->lockState()) { - return; - } - LockerNoopClientObserver::onCreateOperationContext(opCtx); - } -}; - -} // namespace mongo diff --git a/src/mongo/db/concurrency/locker_noop_service_context_test_fixture.h b/src/mongo/db/concurrency/locker_noop_service_context_test_fixture.h deleted file mode 100644 index cffd44150bdc9..0000000000000 --- a/src/mongo/db/concurrency/locker_noop_service_context_test_fixture.h +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright (C) 2021-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/concurrency/locker_noop_client_observer.h" -#include "mongo/db/service_context_test_fixture.h" - -namespace mongo { - -/** - * Registers the LockerNoopClientObserver with the provided ServiceContext on construction. - */ -class LockerNoopClientObserverRegisterer { - LockerNoopClientObserverRegisterer(const LockerNoopClientObserverRegisterer&) = delete; - LockerNoopClientObserverRegisterer& operator=(const LockerNoopClientObserverRegisterer&) = - delete; - -public: - explicit LockerNoopClientObserverRegisterer(ServiceContext* service) { - service->registerClientObserver(std::make_unique()); - } -}; - -/** - * Test fixture for tests that require a properly initialized global service context - * and a stub Locker implementation whenever an OperationContext is requested. - */ -class LockerNoopServiceContextTest : public ServiceContextTest { -protected: - LockerNoopServiceContextTest() : _lockerNoopClientObserverRegisterer(getServiceContext()) {} - - ~LockerNoopServiceContextTest() = default; - -private: - LockerNoopClientObserverRegisterer _lockerNoopClientObserverRegisterer; -}; - -} // namespace mongo diff --git a/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp b/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp index c46af9278ac73..3085f435b505b 100644 --- a/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp +++ b/src/mongo/db/concurrency/replication_state_transition_lock_guard.cpp @@ -28,10 +28,11 @@ */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" #include "mongo/db/operation_context.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/concurrency/replication_state_transition_lock_guard.h b/src/mongo/db/concurrency/replication_state_transition_lock_guard.h index a6079005ea20f..0ddc101afe526 100644 --- a/src/mongo/db/concurrency/replication_state_transition_lock_guard.h +++ b/src/mongo/db/concurrency/replication_state_transition_lock_guard.h @@ -33,6 +33,7 @@ #include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/locker.h" +#include "mongo/db/operation_context.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/concurrency/resource_catalog.cpp b/src/mongo/db/concurrency/resource_catalog.cpp index 62183c3a4f838..740d24d7a13e2 100644 --- a/src/mongo/db/concurrency/resource_catalog.cpp +++ b/src/mongo/db/concurrency/resource_catalog.cpp @@ -29,25 +29,38 @@ #include "mongo/db/concurrency/resource_catalog.h" -#include "mongo/db/service_context.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/util/assert_util_core.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/static_immortal.h" namespace mongo { -namespace { -const auto getResourceCatalog = ServiceContext::declareDecoration(); -} // namespace -ResourceCatalog& ResourceCatalog::get(ServiceContext* svcCtx) { - return getResourceCatalog(svcCtx); +ResourceCatalog& ResourceCatalog::get() { + static StaticImmortal resourceCatalog; + return resourceCatalog.value(); } void ResourceCatalog::add(ResourceId id, const NamespaceString& ns) { invariant(id.getType() == RESOURCE_COLLECTION); - _add(id, ns.toStringWithTenantId()); + _add(id, NamespaceStringUtil::serializeForCatalog(ns)); } void ResourceCatalog::add(ResourceId id, const DatabaseName& dbName) { invariant(id.getType() == RESOURCE_DATABASE); - _add(id, dbName.toStringWithTenantId()); + _add(id, DatabaseNameUtil::serializeForCatalog(dbName)); } void ResourceCatalog::_add(ResourceId id, std::string name) { @@ -57,12 +70,20 @@ void ResourceCatalog::_add(ResourceId id, std::string name) { void ResourceCatalog::remove(ResourceId id, const NamespaceString& ns) { invariant(id.getType() == RESOURCE_COLLECTION); - _remove(id, ns.toStringWithTenantId()); + _remove(id, NamespaceStringUtil::serializeForCatalog(ns)); } void ResourceCatalog::remove(ResourceId id, const DatabaseName& dbName) { invariant(id.getType() == RESOURCE_DATABASE); - _remove(id, dbName.toStringWithTenantId()); + _remove(id, DatabaseNameUtil::serializeForCatalog(dbName)); +} + +ResourceId ResourceCatalog::newResourceIdForMutex(std::string resourceLabel) { + stdx::lock_guard lk(_mutexResourceIdLabelsMutex); + _mutexResourceIdLabels.emplace_back(std::move(resourceLabel)); + + return ResourceId( + ResourceId::fullHash(ResourceType::RESOURCE_MUTEX, _mutexResourceIdLabels.size() - 1)); } void ResourceCatalog::_remove(ResourceId id, const std::string& name) { @@ -86,12 +107,20 @@ void ResourceCatalog::clear() { } boost::optional ResourceCatalog::name(ResourceId id) const { - invariant(id.getType() == RESOURCE_DATABASE || id.getType() == RESOURCE_COLLECTION); - stdx::lock_guard lk{_mutex}; + if (id.getType() == RESOURCE_DATABASE || id.getType() == RESOURCE_COLLECTION) { + stdx::lock_guard lk{_mutex}; - auto it = _resources.find(id); - return it == _resources.end() || it->second.size() > 1 - ? boost::none - : boost::make_optional(*it->second.begin()); + auto it = _resources.find(id); + return it == _resources.end() || it->second.size() > 1 + ? boost::none + : boost::make_optional(*it->second.begin()); + } else if (id.getType() == RESOURCE_MUTEX) { + stdx::lock_guard lk{_mutexResourceIdLabelsMutex}; + + return _mutexResourceIdLabels.at(id.getHashId()); + } + + return boost::none; } + } // namespace mongo diff --git a/src/mongo/db/concurrency/resource_catalog.h b/src/mongo/db/concurrency/resource_catalog.h index 44f684053b434..e8019c01a8cae 100644 --- a/src/mongo/db/concurrency/resource_catalog.h +++ b/src/mongo/db/concurrency/resource_catalog.h @@ -29,12 +29,23 @@ #pragma once +#include +#include + +#include + #include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/string_map.h" namespace mongo { + class ResourceCatalog { public: - static ResourceCatalog& get(ServiceContext* scvCtx); + static ResourceCatalog& get(); void add(ResourceId id, const NamespaceString& ns); void add(ResourceId id, const DatabaseName& dbName); @@ -42,6 +53,8 @@ class ResourceCatalog { void remove(ResourceId id, const NamespaceString& ns); void remove(ResourceId id, const DatabaseName& dbName); + ResourceId newResourceIdForMutex(std::string resourceLabel); + void clear(); /** @@ -55,7 +68,12 @@ class ResourceCatalog { void _remove(ResourceId id, const std::string& name); - mutable Mutex _mutex = MONGO_MAKE_LATCH("ResourceCatalog"); + mutable Mutex _mutex = MONGO_MAKE_LATCH("ResourceCatalog::_mutex"); stdx::unordered_map _resources; + + mutable Mutex _mutexResourceIdLabelsMutex = + MONGO_MAKE_LATCH("ResourceCatalog::_mutexResourceIdLabelsMutex"); + std::vector _mutexResourceIdLabels; }; + } // namespace mongo diff --git a/src/mongo/db/concurrency/resource_catalog_test.cpp b/src/mongo/db/concurrency/resource_catalog_test.cpp index 026e5cf587adf..bdd5c4e5bfb38 100644 --- a/src/mongo/db/concurrency/resource_catalog_test.cpp +++ b/src/mongo/db/concurrency/resource_catalog_test.cpp @@ -28,11 +28,21 @@ */ #include "mongo/db/concurrency/resource_catalog.h" + +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/tenant_id.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { + class ResourceCatalogTest : public unittest::Test { public: void setUp() { @@ -73,10 +83,10 @@ TEST_F(ResourceCatalogTest, InsertTest) { catalog.add(thirdResourceId, thirdCollection); resource = catalog.name(firstResourceId); - ASSERT_EQ(firstCollection.toStringWithTenantId(), *resource); + ASSERT_EQ(firstCollection.toStringWithTenantId_forTest(), *resource); resource = catalog.name(thirdResourceId); - ASSERT_EQ(thirdCollection.toStringWithTenantId(), resource); + ASSERT_EQ(thirdCollection.toStringWithTenantId_forTest(), resource); } TEST_F(ResourceCatalogTest, RemoveTest) { @@ -87,7 +97,7 @@ TEST_F(ResourceCatalogTest, RemoveTest) { catalog.remove(firstResourceId, NamespaceString::createNamespaceString_forTest(boost::none, "BadNamespace")); auto resource = catalog.name(firstResourceId); - ASSERT_EQ(firstCollection.toStringWithTenantId(), *resource); + ASSERT_EQ(firstCollection.toStringWithTenantId_forTest(), *resource); catalog.remove(firstResourceId, firstCollection); catalog.remove(firstResourceId, firstCollection); @@ -100,6 +110,11 @@ TEST_F(ResourceCatalogTest, RemoveTest) { ASSERT_EQ(boost::none, resource); } +TEST_F(ResourceCatalogTest, ResourceMutexTest) { + auto rid = catalog.newResourceIdForMutex("TestLabel"); + ASSERT_EQ("TestLabel", *catalog.name(rid)); +} + TEST_F(ResourceCatalogTest, CollisionTest) { // firstCollection and secondCollection map to the same ResourceId. catalog.add(firstResourceId, firstCollection); @@ -116,12 +131,12 @@ TEST_F(ResourceCatalogTest, CollisionTest) { // We remove a namespace, resolving the collision. catalog.remove(firstResourceId, firstCollection); resource = catalog.name(secondResourceId); - ASSERT_EQ(secondCollection.toStringWithTenantId(), *resource); + ASSERT_EQ(secondCollection.toStringWithTenantId_forTest(), *resource); // Adding the same namespace twice does not create a collision. catalog.add(secondResourceId, secondCollection); resource = catalog.name(secondResourceId); - ASSERT_EQ(secondCollection.toStringWithTenantId(), *resource); + ASSERT_EQ(secondCollection.toStringWithTenantId_forTest(), *resource); // The map should function normally for entries without collisions. catalog.add(firstResourceId, firstCollection); @@ -130,7 +145,7 @@ TEST_F(ResourceCatalogTest, CollisionTest) { catalog.add(thirdResourceId, thirdCollection); resource = catalog.name(thirdResourceId); - ASSERT_EQ(thirdCollection.toStringWithTenantId(), *resource); + ASSERT_EQ(thirdCollection.toStringWithTenantId_forTest(), *resource); catalog.remove(thirdResourceId, thirdCollection); resource = catalog.name(thirdResourceId); @@ -147,11 +162,12 @@ TEST_F(ResourceCatalogTest, CollisionTest) { } DEATH_TEST_F(ResourceCatalogTest, AddDatabaseInvalidResourceType, "invariant") { - catalog.add({RESOURCE_GLOBAL, 0}, DatabaseName{"db"}); + catalog.add({RESOURCE_GLOBAL, 0}, DatabaseName::createDatabaseName_forTest(boost::none, "db")); } DEATH_TEST_F(ResourceCatalogTest, AddCollectionInvalidResourceType, "invariant") { catalog.add({RESOURCE_GLOBAL, 0}, NamespaceString::createNamespaceString_forTest("db.coll")); } + } // namespace } // namespace mongo diff --git a/src/mongo/db/create_indexes.idl b/src/mongo/db/create_indexes.idl index 0c628f75cee06..49b186b110582 100644 --- a/src/mongo/db/create_indexes.idl +++ b/src/mongo/db/create_indexes.idl @@ -185,6 +185,10 @@ structs: type: safeBool optional: true stability: unstable + originalSpec: + type: object_owned + optional: true + stability: unstable clustered: type: safeBool optional: true diff --git a/src/mongo/db/cst/SConscript b/src/mongo/db/cst/SConscript index fbb9634cfd3b7..6423db3a095b8 100644 --- a/src/mongo/db/cst/SConscript +++ b/src/mongo/db/cst/SConscript @@ -42,6 +42,7 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/index/index_access_method', '$BUILD_DIR/mongo/db/matcher/expressions_mongod_only', '$BUILD_DIR/mongo/db/query/query_test_service_context', + '$BUILD_DIR/mongo/db/service_context_non_d', 'cst', ], ) @@ -55,10 +56,13 @@ env.CppUnitTest( ], LIBDEPS=[ '$BUILD_DIR/mongo/db/query/query_test_service_context', + '$BUILD_DIR/mongo/db/service_context_non_d', 'cst', ], ) + # Disabled under SERVER-64949. +# # env.Benchmark( # target='cst_bm', # source=[ diff --git a/src/mongo/db/cst/bson_lexer.cpp b/src/mongo/db/cst/bson_lexer.cpp index f1c2d9014a623..2e7da4ffae174 100644 --- a/src/mongo/db/cst/bson_lexer.cpp +++ b/src/mongo/db/cst/bson_lexer.cpp @@ -27,13 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include #include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/db/cst/bson_lexer.h" +#include "mongo/db/cst/c_node.h" #include "mongo/db/cst/parser_gen.hpp" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" #include "mongo/util/string_map.h" namespace mongo { @@ -334,7 +350,7 @@ void BSONLexer::tokenize(BSONElement elem, bool includeFieldName) { switch (elem.type()) { case BSONType::Array: { pushToken("start array", ParserGen::token::START_ARRAY); - auto index = 0; + auto index = 0U; for (auto&& nestedElem : elem.embeddedObject()) { ScopedLocationTracker arrayCtx{this, index++}; // For arrays, do not tokenize the field names. @@ -483,7 +499,7 @@ BSONLexer::BSONLexer(BSONObj obj, ParserGen::token_type startingToken) { // array index. No need to tokenize the fieldname for that case. if (startingToken == ParserGen::token::START_PIPELINE) { pushToken("start array", ParserGen::token::START_ARRAY); - auto index = 0; + auto index = 0U; for (auto&& elem : obj) { ScopedLocationTracker stageCtx{this, index++}; tokenize(elem, false); diff --git a/src/mongo/db/cst/bson_lexer.h b/src/mongo/db/cst/bson_lexer.h index 2bb1f0cf45c7e..1d91e69ec77c7 100644 --- a/src/mongo/db/cst/bson_lexer.h +++ b/src/mongo/db/cst/bson_lexer.h @@ -31,8 +31,10 @@ #include #include +#include #include +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/cst/bson_location.h" #include "mongo/db/cst/parser_gen.hpp" diff --git a/src/mongo/db/cst/bson_lexer_test.cpp b/src/mongo/db/cst/bson_lexer_test.cpp index e8d90b893600d..e148bfab02058 100644 --- a/src/mongo/db/cst/bson_lexer_test.cpp +++ b/src/mongo/db/cst/bson_lexer_test.cpp @@ -27,15 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include - +#include "mongo/base/string_data.h" #include "mongo/bson/json.h" #include "mongo/db/cst/bson_lexer.h" #include "mongo/db/cst/parser_gen.hpp" - -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/cst/c_node.cpp b/src/mongo/db/cst/c_node.cpp index 462f0091e5bcf..56a09be31e160 100644 --- a/src/mongo/db/cst/c_node.cpp +++ b/src/mongo/db/cst/c_node.cpp @@ -28,15 +28,22 @@ */ #include "mongo/db/cst/c_node.h" -#include "mongo/bson/bsontypes.h" -#include "mongo/db/query/datetime/date_time_support.h" -#include "mongo/util/hex.h" -#include "mongo/util/overloaded_visitor.h" -#include +#include #include +#include #include -#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/hex.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo { using namespace std::string_literals; @@ -126,8 +133,8 @@ auto printValue(const T& payload) { [](const UserDate& userDate) { return " #include +#include +#include +#include #include #include +#include #include #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/cst/compound_key.h" #include "mongo/db/cst/key_fieldname.h" #include "mongo/db/cst/key_value.h" #include "mongo/db/cst/path.h" +#include "mongo/platform/basic.h" #include "mongo/platform/decimal128.h" #include "mongo/stdx/variant.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/cst/c_node_disambiguation.cpp b/src/mongo/db/cst/c_node_disambiguation.cpp index 15098cadd59a5..06d1935309462 100644 --- a/src/mongo/db/cst/c_node_disambiguation.cpp +++ b/src/mongo/db/cst/c_node_disambiguation.cpp @@ -27,13 +27,18 @@ * it in the license file. */ -#include #include #include +#include + +#include +#include #include "mongo/db/cst/c_node_disambiguation.h" +#include "mongo/db/cst/compound_key.h" #include "mongo/stdx/variant.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo::c_node_disambiguation { namespace { diff --git a/src/mongo/db/cst/c_node_disambiguation.h b/src/mongo/db/cst/c_node_disambiguation.h index 58ef9502e5fb4..a3a994d11ea86 100644 --- a/src/mongo/db/cst/c_node_disambiguation.h +++ b/src/mongo/db/cst/c_node_disambiguation.h @@ -29,12 +29,15 @@ #pragma once -#include "mongo/platform/basic.h" - +#include +#include +#include #include #include "mongo/db/cst/c_node.h" #include "mongo/db/cst/c_node_validation.h" +#include "mongo/db/cst/path.h" +#include "mongo/platform/basic.h" /** * Functions which perform additional disambiguation beyond what a context free grammar can handle. diff --git a/src/mongo/db/cst/c_node_validation.cpp b/src/mongo/db/cst/c_node_validation.cpp index 81797d81159e9..ba7f7bf9bc74f 100644 --- a/src/mongo/db/cst/c_node_validation.cpp +++ b/src/mongo/db/cst/c_node_validation.cpp @@ -27,22 +27,37 @@ * it in the license file. */ +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include #include +#include #include +#include #include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_depth.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/cst/c_node.h" #include "mongo/db/cst/c_node_validation.h" +#include "mongo/db/cst/compound_key.h" +#include "mongo/db/cst/key_fieldname.h" #include "mongo/db/cst/path.h" #include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/variable_validation.h" -#include "mongo/db/query/util/make_data_structure.h" #include "mongo/stdx/variant.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/str.h" namespace mongo::c_node_validation { using namespace std::string_literals; diff --git a/src/mongo/db/cst/c_node_validation.h b/src/mongo/db/cst/c_node_validation.h index 7aa3dec7ccf6d..9dcc3dbc23d96 100644 --- a/src/mongo/db/cst/c_node_validation.h +++ b/src/mongo/db/cst/c_node_validation.h @@ -29,14 +29,14 @@ #pragma once -#include "mongo/platform/basic.h" - #include #include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/db/cst/c_node.h" #include "mongo/db/matcher/matcher_type_set.h" +#include "mongo/platform/basic.h" /** * Functions which perform additional validation beyond what a context free grammar can handle. diff --git a/src/mongo/db/cst/compound_key.h b/src/mongo/db/cst/compound_key.h index a7380bd5c44e8..7df5aec2f6017 100644 --- a/src/mongo/db/cst/compound_key.h +++ b/src/mongo/db/cst/compound_key.h @@ -29,9 +29,10 @@ #pragma once -#include "mongo/platform/basic.h" - #include +#include + +#include "mongo/platform/basic.h" namespace mongo { diff --git a/src/mongo/db/cst/cst_error_test.cpp b/src/mongo/db/cst/cst_error_test.cpp index 600245b4a1d7c..c8dee2363042c 100644 --- a/src/mongo/db/cst/cst_error_test.cpp +++ b/src/mongo/db/cst/cst_error_test.cpp @@ -27,18 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" #include "mongo/db/cst/bson_lexer.h" -#include "mongo/db/cst/c_node.h" -#include "mongo/db/cst/key_fieldname.h" -#include "mongo/db/cst/key_value.h" #include "mongo/db/cst/parser_gen.hpp" -#include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/cst/cst_expression_test.cpp b/src/mongo/db/cst/cst_expression_test.cpp index f3e2754a4abab..f8fc37c36f174 100644 --- a/src/mongo/db/cst/cst_expression_test.cpp +++ b/src/mongo/db/cst/cst_expression_test.cpp @@ -27,18 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/cst/bson_lexer.h" #include "mongo/db/cst/c_node.h" #include "mongo/db/cst/key_fieldname.h" -#include "mongo/db/cst/key_value.h" #include "mongo/db/cst/parser_gen.hpp" -#include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/cst/cst_find_project_test.cpp b/src/mongo/db/cst/cst_find_project_test.cpp index 94ca4ac202619..297e68558a24a 100644 --- a/src/mongo/db/cst/cst_find_project_test.cpp +++ b/src/mongo/db/cst/cst_find_project_test.cpp @@ -27,17 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" #include "mongo/db/cst/bson_lexer.h" #include "mongo/db/cst/c_node.h" -#include "mongo/db/cst/key_fieldname.h" -#include "mongo/db/cst/key_value.h" #include "mongo/db/cst/parser_gen.hpp" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/cst/cst_literals_test.cpp b/src/mongo/db/cst/cst_literals_test.cpp index acffd400b20dc..78c8102507b6c 100644 --- a/src/mongo/db/cst/cst_literals_test.cpp +++ b/src/mongo/db/cst/cst_literals_test.cpp @@ -27,25 +27,42 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include #include +#include +#include + +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" #include "mongo/db/cst/c_node.h" #include "mongo/db/cst/cst_pipeline_translation.h" #include "mongo/db/cst/key_fieldname.h" -#include "mongo/db/cst/key_value.h" +#include "mongo/db/cst/path.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/pipeline/document_source_limit.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" -#include "mongo/db/pipeline/document_source_skip.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/transformer_interface.h" #include "mongo/db/query/util/make_data_structure.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/cst/cst_match_test.cpp b/src/mongo/db/cst/cst_match_test.cpp index b266b6fa686b9..ba2d74b63a65e 100644 --- a/src/mongo/db/cst/cst_match_test.cpp +++ b/src/mongo/db/cst/cst_match_test.cpp @@ -27,19 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/platform/decimal128.h" #include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" #include "mongo/db/cst/bson_lexer.h" #include "mongo/db/cst/c_node.h" -#include "mongo/db/cst/key_fieldname.h" -#include "mongo/db/cst/key_value.h" #include "mongo/db/cst/parser_gen.hpp" -#include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/cst/cst_match_translation.cpp b/src/mongo/db/cst/cst_match_translation.cpp index 06222c441c9b0..030b5087d921d 100644 --- a/src/mongo/db/cst/cst_match_translation.cpp +++ b/src/mongo/db/cst/cst_match_translation.cpp @@ -27,22 +27,39 @@ * it in the license file. */ -#include -#include -#include -#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/db/cst/c_node.h" #include "mongo/db/cst/cst_match_translation.h" #include "mongo/db/cst/cst_pipeline_translation.h" #include "mongo/db/cst/key_fieldname.h" -#include "mongo/db/cst/key_value.h" #include "mongo/db/matcher/expression_expr.h" +#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_text_base.h" #include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/expression_type.h" #include "mongo/db/matcher/matcher_type_set.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/platform/decimal128.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo::cst_match_translation { namespace { diff --git a/src/mongo/db/cst/cst_match_translation.h b/src/mongo/db/cst/cst_match_translation.h index ae233f64362be..19dc6b3f432e4 100644 --- a/src/mongo/db/cst/cst_match_translation.h +++ b/src/mongo/db/cst/cst_match_translation.h @@ -29,11 +29,15 @@ #pragma once -#include "mongo/platform/basic.h" +#include +#include +#include +#include #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/extensions_callback.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/platform/basic.h" namespace mongo::cst_match_translation { diff --git a/src/mongo/db/cst/cst_match_translation_test.cpp b/src/mongo/db/cst/cst_match_translation_test.cpp index 65257f9e21d1e..6c9525605de78 100644 --- a/src/mongo/db/cst/cst_match_translation_test.cpp +++ b/src/mongo/db/cst/cst_match_translation_test.cpp @@ -27,23 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include -#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/cst/bson_lexer.h" #include "mongo/db/cst/c_node.h" #include "mongo/db/cst/cst_match_translation.h" -#include "mongo/db/cst/key_fieldname.h" -#include "mongo/db/cst/key_value.h" +#include "mongo/db/cst/parser_gen.hpp" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/expression_type.h" #include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/cst/cst_pipeline_translation.cpp b/src/mongo/db/cst/cst_pipeline_translation.cpp index 10e147e515cb1..cf7395a144546 100644 --- a/src/mongo/db/cst/cst_pipeline_translation.cpp +++ b/src/mongo/db/cst/cst_pipeline_translation.cpp @@ -28,31 +28,43 @@ */ +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include -#include -#include +#include #include +#include #include +#include #include +#include #include #include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/cst/c_node.h" +#include "mongo/db/cst/compound_key.h" #include "mongo/db/cst/cst_match_translation.h" #include "mongo/db/cst/cst_pipeline_translation.h" #include "mongo/db/cst/key_fieldname.h" #include "mongo/db/cst/key_value.h" +#include "mongo/db/cst/path.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/exclusion_projection_executor.h" #include "mongo/db/exec/inclusion_projection_executor.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/document_source_match.h" -#include "mongo/db/pipeline/document_source_project.h" #include "mongo/db/pipeline/document_source_sample.h" +#include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/document_source_skip.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" @@ -60,9 +72,12 @@ #include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/variable_validation.h" #include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/projection_policies.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" #include "mongo/util/intrusive_counter.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo::cst_pipeline_translation { namespace { diff --git a/src/mongo/db/cst/cst_pipeline_translation.h b/src/mongo/db/cst/cst_pipeline_translation.h index 0f8bcc5ad2e6a..c3f29fd47809f 100644 --- a/src/mongo/db/cst/cst_pipeline_translation.h +++ b/src/mongo/db/cst/cst_pipeline_translation.h @@ -29,12 +29,15 @@ #pragma once -#include "mongo/platform/basic.h" - +#include #include +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/variables.h" +#include "mongo/platform/basic.h" namespace mongo::cst_pipeline_translation { diff --git a/src/mongo/db/cst/cst_pipeline_translation_test.cpp b/src/mongo/db/cst/cst_pipeline_translation_test.cpp index f2d11bbdcb6c5..ed21a7a374554 100644 --- a/src/mongo/db/cst/cst_pipeline_translation_test.cpp +++ b/src/mongo/db/cst/cst_pipeline_translation_test.cpp @@ -27,31 +27,44 @@ * it in the license file. */ -#include "mongo/db/cst/path.h" -#include "mongo/platform/basic.h" - -#include +#include +#include +#include #include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" #include "mongo/db/cst/c_node.h" +#include "mongo/db/cst/compound_key.h" #include "mongo/db/cst/cst_pipeline_translation.h" -#include "mongo/db/cst/cst_sort_translation.h" #include "mongo/db/cst/key_fieldname.h" #include "mongo/db/cst/key_value.h" +#include "mongo/db/cst/path.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_limit.h" -#include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_sample.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/document_source_skip.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/pipeline/variables.h" +#include "mongo/db/pipeline/transformer_interface.h" #include "mongo/db/query/util/make_data_structure.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/cst/cst_set_operator_translation_test.cpp b/src/mongo/db/cst/cst_set_operator_translation_test.cpp index e7403a9d5d602..2118d2846851a 100644 --- a/src/mongo/db/cst/cst_set_operator_translation_test.cpp +++ b/src/mongo/db/cst/cst_set_operator_translation_test.cpp @@ -27,21 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include #include +#include + +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" #include "mongo/db/cst/c_node.h" #include "mongo/db/cst/cst_pipeline_translation.h" #include "mongo/db/cst/key_fieldname.h" -#include "mongo/db/cst/key_value.h" -#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/cst/path.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/util/make_data_structure.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/cst/cst_sort_translation.cpp b/src/mongo/db/cst/cst_sort_translation.cpp index 3dc3ef692e448..3bea7d8a69c9b 100644 --- a/src/mongo/db/cst/cst_sort_translation.cpp +++ b/src/mongo/db/cst/cst_sort_translation.cpp @@ -27,17 +27,26 @@ * it in the license file. */ -#include +#include +#include +#include #include +#include +#include #include +#include + #include "mongo/db/cst/cst_sort_translation.h" #include "mongo/db/cst/key_value.h" #include "mongo/db/cst/path.h" #include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/field_path.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo::cst_sort_translation { diff --git a/src/mongo/db/cst/cst_sort_translation.h b/src/mongo/db/cst/cst_sort_translation.h index 2a5f6210b502f..1227c45194870 100644 --- a/src/mongo/db/cst/cst_sort_translation.h +++ b/src/mongo/db/cst/cst_sort_translation.h @@ -29,12 +29,13 @@ #pragma once -#include "mongo/platform/basic.h" - +#include #include #include "mongo/db/cst/c_node.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/sort_pattern.h" +#include "mongo/platform/basic.h" namespace mongo::cst_sort_translation { diff --git a/src/mongo/db/cst/cst_sort_translation_test.cpp b/src/mongo/db/cst/cst_sort_translation_test.cpp index f35dccb7cf601..3511e8b2b098f 100644 --- a/src/mongo/db/cst/cst_sort_translation_test.cpp +++ b/src/mongo/db/cst/cst_sort_translation_test.cpp @@ -27,21 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include #include +#include + +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" #include "mongo/db/cst/c_node.h" -#include "mongo/db/cst/cst_pipeline_translation.h" #include "mongo/db/cst/cst_sort_translation.h" #include "mongo/db/cst/key_fieldname.h" #include "mongo/db/cst/key_value.h" +#include "mongo/db/cst/path.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/sort_pattern.h" #include "mongo/db/query/util/make_data_structure.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/cst/cst_test.cpp b/src/mongo/db/cst/cst_test.cpp index d25eebe7132ab..c902d494f4357 100644 --- a/src/mongo/db/cst/cst_test.cpp +++ b/src/mongo/db/cst/cst_test.cpp @@ -27,19 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include - +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" #include "mongo/db/cst/bson_lexer.h" #include "mongo/db/cst/c_node.h" #include "mongo/db/cst/key_fieldname.h" #include "mongo/db/cst/key_value.h" #include "mongo/db/cst/parser_gen.hpp" +#include "mongo/db/cst/path.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/cst/parser_gen.cpp b/src/mongo/db/cst/parser_gen.cpp index 2baba6a6cf7bd..b5a2ee7dc5e98 100644 --- a/src/mongo/db/cst/parser_gen.cpp +++ b/src/mongo/db/cst/parser_gen.cpp @@ -41,17 +41,32 @@ // Unqualified %code blocks. #line 82 "grammar.yy" -#include #include +#include #include +#include +#include "boost/algorithm/string/split.hpp" +#include "boost/core/addressof.hpp" +#include "boost/function/function_base.hpp" +#include "boost/iterator/iterator_facade.hpp" +#include "boost/type_index/type_index_facade.hpp" + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/db/cst/bson_lexer.h" #include "mongo/db/cst/c_node_disambiguation.h" #include "mongo/db/cst/c_node_validation.h" +#include "mongo/db/cst/compound_key.h" #include "mongo/db/cst/key_fieldname.h" +#include "mongo/db/cst/key_value.h" +#include "mongo/db/cst/path.h" #include "mongo/db/query/util/make_data_structure.h" #include "mongo/platform/decimal128.h" #include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { // Mandatory error function. @@ -72,6 +87,7 @@ void ParserGen::error(const ParserGen::location_type& loc, const std::string& ms #if defined YYENABLE_NLS && YYENABLE_NLS #if ENABLE_NLS #include // FIXME: INFRINGES ON USER NAME SPACE. + #define YY_(msgid) dgettext("bison-runtime", msgid) #endif #endif diff --git a/src/mongo/db/cst/parser_gen.hpp b/src/mongo/db/cst/parser_gen.hpp index d56c0217ef518..d37ffd22eaa4f 100644 --- a/src/mongo/db/cst/parser_gen.hpp +++ b/src/mongo/db/cst/parser_gen.hpp @@ -47,8 +47,17 @@ // "%code requires" blocks. #line 66 "grammar.yy" +#include +#include +#include + +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/cst/bson_location.h" #include "mongo/db/cst/c_node.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/time_support.h" // Forward declare any parameters needed for lexing/parsing. namespace mongo { @@ -109,6 +118,7 @@ class BSONLexer; #include #ifndef YY_ASSERT #include + #define YY_ASSERT assert // NOLINT(mongo-assert-check) #endif diff --git a/src/mongo/db/curop.cpp b/src/mongo/db/curop.cpp index d1c27b922c051..95247986d254f 100644 --- a/src/mongo/db/curop.cpp +++ b/src/mongo/db/curop.cpp @@ -29,37 +29,67 @@ // CHECK_LOG_REDACTION - #include "mongo/db/curop.h" -#include "mongo/util/duration.h" -#include - +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/mutable/document.h" -#include "mongo/config.h" -#include "mongo/db/auth/authorization_manager.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/auth/user_name.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/json.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/prepare_conflict_tracker.h" #include "mongo/db/profile_filter.h" -#include "mongo/db/query/getmore_command_gen.h" #include "mongo/db/query/plan_summary_stats.h" -#include "mongo/db/query/telemetry.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/stats/timer_stats.h" -#include "mongo/db/storage/storage_engine_feature_flags_gen.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/metadata/client_metadata.h" #include "mongo/rpc/metadata/impersonated_user_metadata.h" +#include "mongo/rpc/metadata/impersonated_user_metadata_gen.h" #include "mongo/transport/service_executor.h" +#include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/diagnostic_info.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/hex.h" #include "mongo/util/log_with_sampling.h" #include "mongo/util/namespace_string_util.h" #include "mongo/util/net/socket_utils.h" #include "mongo/util/str.h" -#include "mongo/util/system_tick_source.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -69,17 +99,20 @@ namespace { auto& oplogGetMoreStats = makeServerStatusMetric("repl.network.oplogGetMoresProcessed"); BSONObj serializeDollarDbInOpDescription(boost::optional tenantId, - const BSONObj& cmdObj) { + const BSONObj& cmdObj, + const SerializationContext& sc) { auto db = cmdObj["$db"]; if (!db) { return cmdObj; } - auto dbName = DatabaseNameUtil::deserialize(tenantId, db.String()); - auto newCmdObj = - cmdObj.addField(BSON("$db" << DatabaseNameUtil::serialize(dbName)).firstElement()); + auto dbName = DatabaseNameUtil::deserialize(tenantId, db.String(), sc); + auto newCmdObj = cmdObj.addField(BSON("$db" << DatabaseNameUtil::serialize( + dbName, SerializationContext::stateCommandReply(sc))) + .firstElement()); return newCmdObj; } + } // namespace /** @@ -187,7 +220,7 @@ CurOp* CurOp::get(const OperationContext& opCtx) { return _curopStack(opCtx).top(); } -void CurOp::reportCurrentOpForClient(OperationContext* opCtx, +void CurOp::reportCurrentOpForClient(const boost::intrusive_ptr& expCtx, Client* client, bool truncateOps, bool backtraceMode, @@ -214,8 +247,9 @@ void CurOp::reportCurrentOpForClient(OperationContext* opCtx, // Fill out the rest of the BSONObj with opCtx specific details. infoBuilder->appendBool("active", client->hasAnyActiveCurrentOp()); - infoBuilder->append("currentOpTime", - opCtx->getServiceContext()->getPreciseClockSource()->now().toString()); + infoBuilder->append( + "currentOpTime", + expCtx->opCtx->getServiceContext()->getPreciseClockSource()->now().toString()); auto authSession = AuthorizationSession::get(client); // Depending on whether the authenticated user is the same user which ran the command, @@ -265,7 +299,14 @@ void CurOp::reportCurrentOpForClient(OperationContext* opCtx, lsid->serialize(&lsidBuilder); } - CurOp::get(clientOpCtx)->reportState(infoBuilder, truncateOps); + tassert(7663403, + str::stream() << "SerializationContext on the expCtx should not be empty, with ns: " + << expCtx->ns.ns(), + expCtx->serializationCtxt != SerializationContext::stateDefault()); + + // reportState is used to generate a command reply + auto sc = SerializationContext::stateCommandReply(expCtx->serializationCtxt); + CurOp::get(clientOpCtx)->reportState(infoBuilder, sc, truncateOps); } #ifndef MONGO_CONFIG_USE_RAW_LATCHES @@ -305,11 +346,7 @@ OperationContext* CurOp::opCtx() { } void CurOp::setOpDescription_inlock(const BSONObj& opDescription) { - if (_nss.tenantId()) { - _opDescription = serializeDollarDbInOpDescription(_nss.tenantId(), opDescription); - } else { - _opDescription = opDescription; - } + _opDescription = opDescription; } void CurOp::setGenericCursor_inlock(GenericCursor gc) { @@ -336,11 +373,20 @@ void CurOp::setGenericOpRequestDetails(NamespaceString nss, _isCommand = _debug.iscommand = isCommand; _logicalOp = _debug.logicalOp = logicalOp; _networkOp = _debug.networkOp = op; - _opDescription = serializeDollarDbInOpDescription(nss.tenantId(), cmdObj); + _opDescription = cmdObj; _command = command; _nss = std::move(nss); } +void CurOp::setEndOfOpMetrics(long long nreturned) { + _debug.additiveMetrics.nreturned = nreturned; + // executionTime is set with the final executionTime in completeAndLogOperation, but for + // query stats collection we want it set before incrementing cursor metrics using OpDebug's + // AdditiveMetrics. The value set here will be overwritten later in + // completeAndLogOperation. + _debug.additiveMetrics.executionTime = elapsedTimeExcludingPauses(); +} + void CurOp::setMessage_inlock(StringData message) { if (_progressMeter.isActive()) { LOGV2_ERROR(20527, @@ -348,7 +394,7 @@ void CurOp::setMessage_inlock(StringData message) { "Updating message", "old"_attr = redact(_message), "new"_attr = redact(message)); - verify(!_progressMeter.isActive()); + MONGO_verify(!_progressMeter.isActive()); } _message = message.toString(); // copy } @@ -393,8 +439,10 @@ TickSource::Tick CurOp::startTime() { void CurOp::done() { _end = _tickSource->getTicks(); +} - if (_cpuTimer) { +void CurOp::calculateCpuTime() { + if (_cpuTimer && _debug.cpuTime == Nanoseconds::zero()) { _debug.cpuTime = _cpuTimer->getElapsed(); } } @@ -471,6 +519,12 @@ bool CurOp::completeAndLogOperation(logv2::LogComponent component, bool shouldLogSlowOp, shouldProfileAtLevel1; if (filter) { + // Calculate this operation's CPU time before deciding whether logging/profiling is + // necessary only if it is needed for filtering. + if (filter->dependsOn("cpuNanos")) { + calculateCpuTime(); + } + bool passesFilter = filter->matches(opCtx, _debug, *this); shouldLogSlowOp = passesFilter; @@ -486,6 +540,13 @@ bool CurOp::completeAndLogOperation(logv2::LogComponent component, shouldProfileAtLevel1 = shouldLogSlowOp && shouldSample; } + // Defer calculating the CPU time until we know that we actually are going to write it to + // the logs or profiler. The CPU time may have been determined earlier if it was a dependency + // of 'filter' in which case this is a no-op. + if (forceLog || shouldLogSlowOp || _dbprofile >= 2) { + calculateCpuTime(); + } + if (forceLog || shouldLogSlowOp) { auto lockerInfo = opCtx->lockState()->getLockerInfo(_lockStatsBase); if (_debug.storageStats == nullptr && opCtx->lockState()->wasGlobalLockTaken() && @@ -504,7 +565,8 @@ bool CurOp::completeAndLogOperation(logv2::LogComponent component, Lock::GlobalLock lk(opCtx, MODE_IS, Date_t::now() + Milliseconds(500), - Lock::InterruptBehavior::kThrow); + Lock::InterruptBehavior::kThrow, + Lock::GlobalLockSkipOptions{.skipRSTLLock = true}); _debug.storageStats = opCtx->recoveryUnit()->computeOperationStatisticsSinceLastCall(); } catch (const DBException& ex) { @@ -685,7 +747,9 @@ BSONObj CurOp::truncateAndSerializeGenericCursor(GenericCursor* cursor, return serialized; } -void CurOp::reportState(BSONObjBuilder* builder, bool truncateOps) { +void CurOp::reportState(BSONObjBuilder* builder, + const SerializationContext& serializationContext, + bool truncateOps) { auto opCtx = this->opCtx(); auto start = _start.load(); if (start) { @@ -696,7 +760,7 @@ void CurOp::reportState(BSONObjBuilder* builder, bool truncateOps) { } builder->append("op", logicalOpToString(_logicalOp)); - builder->append("ns", NamespaceStringUtil::serialize(_nss)); + builder->append("ns", NamespaceStringUtil::serialize(_nss, serializationContext)); bool omitAndRedactInformation = CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation; builder->append("redacted", omitAndRedactInformation); @@ -707,7 +771,9 @@ void CurOp::reportState(BSONObjBuilder* builder, bool truncateOps) { // is true, limit the size of each op to 1000 bytes. Otherwise, do not truncate. const boost::optional maxQuerySize{truncateOps, 1000}; - auto obj = appendCommentField(opCtx, _opDescription); + auto opDescription = + serializeDollarDbInOpDescription(_nss.tenantId(), _opDescription, serializationContext); + auto obj = appendCommentField(opCtx, opDescription); // If flag is true, add command field to builder without sensitive information. if (omitAndRedactInformation) { @@ -797,22 +863,15 @@ void CurOp::reportState(BSONObjBuilder* builder, bool truncateOps) { builder->append("dataThroughputAverage", *_debug.dataThroughputAverage); } - // (Ignore FCV check): This feature flag is used to initialize ticketing during storage engine - // initialization and FCV checking is ignored there, so here we also need to ignore FCV to keep - // consistent behavior. - if (feature_flags::gFeatureFlagDeprioritizeLowPriorityOperations - .isEnabledAndIgnoreFCVUnsafe()) { - auto admissionPriority = opCtx->lockState()->getAdmissionPriority(); - if (admissionPriority < AdmissionContext::Priority::kNormal) { - builder->append("admissionPriority", toString(admissionPriority)); - } + auto admissionPriority = opCtx->lockState()->getAdmissionPriority(); + if (admissionPriority < AdmissionContext::Priority::kNormal) { + builder->append("admissionPriority", toString(admissionPriority)); } if (auto start = _waitForWriteConcernStart.load(); start > 0) { auto end = _waitForWriteConcernEnd.load(); - auto elapsedTimeTotal = - duration_cast(debug().waitForWriteConcernDurationMillis); - elapsedTimeTotal += computeElapsedTimeTotal(start, end); + auto elapsedTimeTotal = _atomicWaitForWriteConcernDurationMillis.load(); + elapsedTimeTotal += duration_cast(computeElapsedTimeTotal(start, end)); builder->append("waitForWriteConcernDurationMillis", durationCount(elapsedTimeTotal)); } @@ -1025,15 +1084,9 @@ void OpDebug::report(OperationContext* opCtx, pAttrs->add("reslen", responseLength); } - // (Ignore FCV check): This feature flag is used to initialize ticketing during storage engine - // initialization and FCV checking is ignored there, so here we also need to ignore FCV to keep - // consistent behavior. - if (feature_flags::gFeatureFlagDeprioritizeLowPriorityOperations - .isEnabledAndIgnoreFCVUnsafe()) { - auto admissionPriority = opCtx->lockState()->getAdmissionPriority(); - if (admissionPriority < AdmissionContext::Priority::kNormal) { - pAttrs->add("admissionPriority", admissionPriority); - } + auto admissionPriority = opCtx->lockState()->getAdmissionPriority(); + if (admissionPriority < AdmissionContext::Priority::kNormal) { + pAttrs->add("admissionPriority", admissionPriority); } if (lockStats) { @@ -1254,6 +1307,11 @@ void OpDebug::append(OperationContext* opCtx, b.append("writeConcern", writeConcern->toBSON()); } + if (waitForWriteConcernDurationMillis > Milliseconds::zero()) { + b.append("waitForWriteConcernDuration", + durationCount(waitForWriteConcernDurationMillis)); + } + if (storageStats) { b.append("storage", storageStats->toBSON()); } diff --git a/src/mongo/db/curop.h b/src/mongo/db/curop.h index 1c480498d7c5f..8bf9d260fe8b3 100644 --- a/src/mongo/db/curop.h +++ b/src/mongo/db/curop.h @@ -30,24 +30,60 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include - -#include "mongo/config.h" +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/user_acquisition_stats.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/concurrency/flow_control_ticketholder.h" +#include "mongo/db/concurrency/lock_stats.h" #include "mongo/db/cursor_id.h" +#include "mongo/db/database_name.h" +#include "mongo/db/generic_cursor_gen.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/operation_cpu_timer.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/profile_filter.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_summary_stats.h" +#include "mongo/db/query/query_stats_key_generator.h" #include "mongo/db/server_options.h" #include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/db/storage/storage_stats.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/write_concern_options.h" #include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log_component.h" #include "mongo/platform/atomic_word.h" +#include "mongo/rpc/message.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/duration.h" #include "mongo/util/progress_meter.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/string_map.h" #include "mongo/util/system_tick_source.h" #include "mongo/util/tick_source.h" #include "mongo/util/time_support.h" @@ -292,8 +328,10 @@ class OpDebug { // The hash of the query's "stable" key. This represents the query's shape. boost::optional queryHash; // The shape of the original query serialized with readConcern, application name, and namespace. - // If boost::none, telemetry should not be collected for this operation. - boost::optional telemetryStoreKey; + // If boost::none, query stats should not be collected for this operation. + boost::optional queryStatsStoreKeyHash; + // The KeyGenerator used by query stats to generate the query stats store key. + std::unique_ptr queryStatsKeyGenerator; // The query framework that this operation used. Will be unknown for non query operations. PlanExecutor::QueryFramework queryFramework{PlanExecutor::QueryFramework::kUnknown}; @@ -426,7 +464,7 @@ class CurOp { * report, since this may be called in either a mongoD or mongoS context and the latter does not * supply lock stats. The client must be locked before calling this method. */ - static void reportCurrentOpForClient(OperationContext* opCtx, + static void reportCurrentOpForClient(const boost::intrusive_ptr& expCtx, Client* client, bool truncateOps, bool backtraceMode, @@ -469,6 +507,13 @@ class CurOp { BSONObj cmdObj, NetworkOp op); + /** + * Sets metrics collected at the end of an operation onto curOp's OpDebug instance. Note that + * this is used in tandem with OpDebug::setPlanSummaryMetrics so should not repeat any metrics + * collected there. + */ + void setEndOfOpMetrics(long long nreturned); + /** * Marks the operation end time, records the length of the client response if a valid response * exists, and then - subject to the current values of slowMs and sampleRate - logs this CurOp @@ -577,7 +622,7 @@ class CurOp { if (_dbprofile <= 0) return false; - if (CollectionCatalog::get(opCtx())->getDatabaseProfileSettings(getNSS().db()).filter) + if (CollectionCatalog::get(opCtx())->getDatabaseProfileSettings(getNSS().dbName()).filter) return true; return elapsedTimeExcludingPauses() >= Milliseconds{serverGlobalParams.slowMS.load()}; @@ -763,7 +808,7 @@ class CurOp { return computeElapsedTimeTotal(start, _end.load()) - _totalPausedDuration; } /** - * The planningTimeMicros metric, reported in the system profiler and in telemetry, is measured + * The planningTimeMicros metric, reported in the system profiler and in queryStats, is measured * using the Curop instance's _tickSource. Currently, _tickSource is only paused in places where logical work is being done. If this were to change, and _tickSource were to be paused during query planning for reasons unrelated to the work of @@ -814,12 +859,21 @@ class CurOp { auto start = _waitForWriteConcernStart.load(); if (start != 0) { _waitForWriteConcernEnd = _tickSource->getTicks(); - debug().waitForWriteConcernDurationMillis += duration_cast( + auto duration = duration_cast( computeElapsedTimeTotal(start, _waitForWriteConcernEnd.load())); + _atomicWaitForWriteConcernDurationMillis = + _atomicWaitForWriteConcernDurationMillis.load() + duration; + debug().waitForWriteConcernDurationMillis = _atomicWaitForWriteConcernDurationMillis; _waitForWriteConcernStart = 0; } } + /** + * If the platform supports the CPU timer, and we haven't collected this operation's CPU time + * already, then calculates this operation's CPU time and stores it on the 'OpDebug'. + */ + void calculateCpuTime(); + /** * 'opDescription' must be either an owned BSONObj or guaranteed to outlive the OperationContext * it is associated with. @@ -853,7 +907,9 @@ class CurOp { * If called from a thread other than the one executing the operation associated with this * CurOp, it is necessary to lock the associated Client object before executing this method. */ - void reportState(BSONObjBuilder* builder, bool truncateOps = false); + void reportState(BSONObjBuilder* builder, + const SerializationContext& serializationContext, + bool truncateOps = false); /** * Sets the message for FailPoints used. @@ -942,16 +998,6 @@ class CurOp { _tickSource = tickSource; } - /** - * Merge match counters from the current operation into the global map and stop counting. - */ - void stopMatchExprCounter(); - - /** - * Increment the counter for the match expression with given name in the current operation. - */ - void incrementMatchExprCounter(StringData name); - private: class CurOpStack; @@ -964,7 +1010,6 @@ class CurOp { TickSource::Tick startTime(); Microseconds computeElapsedTimeTotal(TickSource::Tick startTime, TickSource::Tick endTime) const; - /** * Handles failpoints that check whether a command has completed or not. * Used for testing purposes instead of the getLog command. @@ -1040,6 +1085,10 @@ class CurOp { // These values are used to calculate the amount of time spent waiting for write concern. std::atomic _waitForWriteConcernStart{0}; // NOLINT std::atomic _waitForWriteConcernEnd{0}; // NOLINT + // This metric is the same value as debug().waitForWriteConcernDurationMillis. + // We cannot use std::atomic in OpDebug since it is not copy assignable, but using a non-atomic + // allows for a data race between stopWaitForWriteConcernTimer and curop::reportState. + std::atomic _atomicWaitForWriteConcernDurationMillis{Milliseconds{0}}; // NOLINT }; } // namespace mongo diff --git a/src/mongo/db/curop_failpoint_helpers.cpp b/src/mongo/db/curop_failpoint_helpers.cpp index d09c1e488bfa5..3a35ee54cbae1 100644 --- a/src/mongo/db/curop_failpoint_helpers.cpp +++ b/src/mongo/db/curop_failpoint_helpers.cpp @@ -27,11 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/curop_failpoint_helpers.h" +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/client.h" #include "mongo/db/curop.h" +#include "mongo/db/curop_failpoint_helpers.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -78,8 +91,8 @@ void CurOpFailpointHelpers::waitWhileFailPointEnabled(FailPoint* failPoint, updateCurOpFailPointMsg(opCtx, origCurOpFailpointMsg); }, [&](const BSONObj& data) { - StringData fpNss = data.getStringField("nss"); - if (nss && !fpNss.empty() && fpNss != nss.value().toString()) { + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "nss"_sd); + if (nss && !fpNss.isEmpty() && fpNss != nss.value()) { return false; } return true; diff --git a/src/mongo/db/curop_failpoint_helpers.h b/src/mongo/db/curop_failpoint_helpers.h index c40a08d344011..d2115a24d58ba 100644 --- a/src/mongo/db/curop_failpoint_helpers.h +++ b/src/mongo/db/curop_failpoint_helpers.h @@ -27,7 +27,13 @@ * it in the license file. */ +#include +#include +#include +#include + #include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/util/fail_point.h" diff --git a/src/mongo/db/curop_metrics.cpp b/src/mongo/db/curop_metrics.cpp index 8248bdce16b29..b1e267236ada9 100644 --- a/src/mongo/db/curop_metrics.cpp +++ b/src/mongo/db/curop_metrics.cpp @@ -27,10 +27,15 @@ * it in the license file. */ +#include + +#include + #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/curop.h" #include "mongo/db/operation_context.h" #include "mongo/db/stats/counters.h" +#include "mongo/platform/atomic_word.h" namespace mongo { namespace { diff --git a/src/mongo/db/curop_test.cpp b/src/mongo/db/curop_test.cpp index c5d2d7947ca95..c5a8735417421 100644 --- a/src/mongo/db/curop_test.cpp +++ b/src/mongo/db/curop_test.cpp @@ -27,12 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/db/curop.h" #include "mongo/db/query/query_test_service_context.h" -#include "mongo/unittest/unittest.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/tick_source_mock.h" namespace mongo { @@ -269,7 +278,7 @@ TEST(CurOpTest, ShouldNotReportFailpointMsgIfNotSet) { BSONObjBuilder reportedStateWithoutFailpointMsg; { stdx::lock_guard lk(*opCtx->getClient()); - curop->reportState(&reportedStateWithoutFailpointMsg); + curop->reportState(&reportedStateWithoutFailpointMsg, SerializationContext()); } auto bsonObj = reportedStateWithoutFailpointMsg.done(); @@ -304,5 +313,42 @@ TEST(CurOpTest, ElapsedTimeReflectsTickSource) { ASSERT_EQ(Milliseconds{20}, duration_cast(curop->elapsedTimeTotal())); } +TEST(CurOpTest, CheckNSAgainstSerializationContext) { + RAIIServerParameterControllerForTest multitenanyController("multitenancySupport", true); + TenantId tid = TenantId(OID::gen()); + + QueryTestServiceContext serviceContext; + auto opCtx = serviceContext.makeOperationContext(); + + auto curop = CurOp::get(*opCtx); + + // Create dummy command. + BSONObj command = BSON("a" << 3); + + // Set dummy 'ns' and 'command'. + curop->setGenericOpRequestDetails( + NamespaceString::createNamespaceString_forTest(tid, "testDb.coll"), + nullptr, + command, + NetworkOp::dbQuery); + + // Test without using the expectPrefix field. + for (bool tenantIdFromDollarTenantOrSecurityToken : {false, true}) { + SerializationContext sc = SerializationContext::stateCommandReply(); + sc.setTenantIdSource(tenantIdFromDollarTenantOrSecurityToken); + + BSONObjBuilder builder; + { + stdx::lock_guard lk(*opCtx->getClient()); + curop->reportState(&builder, sc); + } + auto bsonObj = builder.done(); + + std::string serializedNs = tenantIdFromDollarTenantOrSecurityToken + ? "testDb.coll" + : tid.toString() + "_testDb.coll"; + ASSERT_EQ(serializedNs, bsonObj.getField("ns").String()); + } +} } // namespace } // namespace mongo diff --git a/src/mongo/db/cursor_manager.cpp b/src/mongo/db/cursor_manager.cpp index 0a499fe586cda..cc3da7f8e41cb 100644 --- a/src/mongo/db/cursor_manager.cpp +++ b/src/mongo/db/cursor_manager.cpp @@ -29,27 +29,45 @@ #include "mongo/db/cursor_manager.h" -#include "mongo/base/data_cursor.h" -#include "mongo/base/init.h" +#include +#include +#include +// IWYU pragma: no_include "boost/align/detail/aligned_alloc_posix.hpp" +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/db/allocate_cursor_id.h" -#include "mongo/db/audit.h" #include "mongo/db/auth/authorization_checks.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/client.h" #include "mongo/db/curop.h" #include "mongo/db/cursor_server_params.h" -#include "mongo/db/db_raii.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/plan_executor.h" -#include "mongo/db/query/query_feature_flags_gen.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/service_context.h" #include "mongo/db/session/kill_sessions_common.h" #include "mongo/db/session/logical_session_cache.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/random.h" -#include "mongo/util/exit.h" +#include "mongo/util/aligned.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -214,8 +232,8 @@ StatusWith CursorManager::pinCursor( CurOp::get(opCtx)->debug().queryHash = cursor->_queryHash; CurOp::get(opCtx)->debug().planCacheKey = cursor->_planCacheKey; - // Pass along telemetry context so it is retrievable after query execution for storing metrics. - CurOp::get(opCtx)->debug().telemetryStoreKey = cursor->_telemetryStoreKey; + // Pass along queryStats context so it is retrievable after query execution for storing metrics. + CurOp::get(opCtx)->debug().queryStatsStoreKeyHash = cursor->_queryStatsStoreKeyHash; cursor->_operationUsingCursor = opCtx; diff --git a/src/mongo/db/cursor_manager.h b/src/mongo/db/cursor_manager.h index dd0ec97bcc8c9..b07d3e7ace491 100644 --- a/src/mongo/db/cursor_manager.h +++ b/src/mongo/db/cursor_manager.h @@ -29,21 +29,39 @@ #pragma once +#include +#include +#include +#include +#include +#include #include +#include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/db/catalog/util/partitioned.h" #include "mongo/db/clientcursor.h" #include "mongo/db/cursor_id.h" #include "mongo/db/generic_cursor.h" +#include "mongo/db/generic_cursor_gen.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/kill_sessions.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_killer.h" +#include "mongo/platform/mutex.h" +#include "mongo/platform/random.h" #include "mongo/stdx/unordered_map.h" #include "mongo/stdx/unordered_set.h" #include "mongo/util/clock_source.h" #include "mongo/util/concurrency/mutex.h" #include "mongo/util/duration.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/cursor_server_params.cpp b/src/mongo/db/cursor_server_params.cpp index c59f7c1f3b730..abf6284f4597a 100644 --- a/src/mongo/db/cursor_server_params.cpp +++ b/src/mongo/db/cursor_server_params.cpp @@ -27,11 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/cursor_server_params.h" - #include "mongo/db/cursor_server_params_gen.h" +#include "mongo/platform/atomic_word.h" namespace mongo { diff --git a/src/mongo/db/database_name.h b/src/mongo/db/database_name.h index e96aee3c6dd5b..d2cbcae97d133 100644 --- a/src/mongo/db/database_name.h +++ b/src/mongo/db/database_name.h @@ -30,13 +30,28 @@ #pragma once #include #include +#include +#include #include +#include +#include +#include +#include +#include +#include #include +#include +#include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/tenant_id.h" #include "mongo/logv2/log_attr.h" +#include "mongo/util/assert_util.h" #include "mongo/util/static_immortal.h" +#include "mongo/util/str.h" namespace mongo { @@ -87,13 +102,6 @@ class DatabaseName { return _get().toString(); } - friend std::ostream& operator<<(std::ostream& stream, const ConstantProxy& dbName) { - return stream << dbName.toString(); - } - friend StringBuilder& operator<<(StringBuilder& builder, const ConstantProxy& dbName) { - return builder << dbName.toString(); - } - private: const DatabaseName& _get() const { return _sharedState->get(); @@ -103,64 +111,91 @@ class DatabaseName { }; #define DBNAME_CONSTANT(id, db) static const ConstantProxy id; -#include "database_name_reserved.def.h" +#include "database_name_reserved.def.h" // IWYU pragma: keep + #undef DBNAME_CONSTANT + static constexpr size_t kMaxDatabaseNameLength = 63; + static constexpr size_t kMaxTenantDatabaseNameLength = 38; + /** * Constructs an empty DatabaseName. */ DatabaseName() = default; /** - * Constructs a DatabaseName from the given tenantId and database name. - * "dbName" is expected only consist of a db name. It is the caller's responsibility to ensure - * the dbName is a valid db name. + * This function constructs a DatabaseName without checking for presence of TenantId. It + * must only be used by auth systems which are not yet tenant aware. + * + * TODO SERVER-76294 Remove this function. Any remaining call sites must be changed to use a + * function on DatabaseNameUtil. */ - DatabaseName(boost::optional tenantId, StringData dbString) - : _tenantId(std::move(tenantId)), _dbString(dbString.toString()) { - uassert(ErrorCodes::InvalidNamespace, - "'.' is an invalid character in a db name: " + _dbString, - dbString.find('.') == std::string::npos); - - uassert(ErrorCodes::InvalidNamespace, - "database names cannot have embedded null characters", - dbString.find('\0') == std::string::npos); + static DatabaseName createDatabaseNameForAuth(const boost::optional& tenantId, + StringData dbString) { + return DatabaseName(tenantId, dbString); } /** - * Prefer to use the constructor above. - * TODO SERVER-65456 Remove this constructor. + * This function constructs a DatabaseName without checking for presence of TenantId. + * + * MUST only be used for tests. */ - DatabaseName(StringData dbName, boost::optional tenantId = boost::none) - : DatabaseName(std::move(tenantId), dbName) {} - - const boost::optional& tenantId() const { - return _tenantId; + static DatabaseName createDatabaseName_forTest(boost::optional tenantId, + StringData dbString) { + return DatabaseName(tenantId, dbString); } - const std::string& db() const { - return _dbString; + boost::optional tenantId() const { + if (!_hasTenantId()) { + return boost::none; + } + + return TenantId{OID::from(&_data[kDataOffset])}; + } + /** + * This function is deprecated. TODO SERVER-77537 Make db() private. + */ + StringData db() const { + auto offset = _hasTenantId() ? kDataOffset + OID::kOIDSize : kDataOffset; + return StringData{_data.data() + offset, _data.size() - offset}; } - const std::string& toString() const { - return db(); + bool isEmpty() const { + return _data.size() == kDataOffset; + } + bool isAdminDB() const { + return db() == DatabaseName::kAdmin.db(); + } + bool isLocalDB() const { + return db() == DatabaseName::kLocal.db(); + } + bool isConfigDB() const { + return db() == DatabaseName::kConfig.db(); + } + bool isExternalDB() const { + return db() == DatabaseName::kExternal.db(); } - std::string toStringWithTenantId() const { - if (_tenantId) - return str::stream() << *_tenantId << '_' << _dbString; + /** + * Serialize the db name to stirng, always ignoring the tenantId. + * This function should only be used when no available serialize context. + */ + std::string serializeWithoutTenantPrefix() const { + return db().toString(); + } - return _dbString; + /** + * This function should only be used when creating a resouce id for databasename. + */ + std::string toStringForResourceId() const { + return toStringWithTenantId(); } /** * This function should only be used when logging a db name in an error message. */ std::string toStringForErrorMsg() const { - if (_tenantId) - return str::stream() << *_tenantId << '_' << _dbString; - - return _dbString; + return toStringWithTenantId(); } /** @@ -171,8 +206,28 @@ class DatabaseName { return dbName.toStringWithTenantId(); } + /** + * This function returns the DatabaseName as a string, including the tenantId. + * + * MUST only be used for tests. + */ + std::string toStringWithTenantId_forTest() const { + return toStringWithTenantId(); + } + + /** + * This function returns the DatabaseName as a string, ignoring the tenantId. + * + * MUST only be used for tests. + */ + std::string toString_forTest() const { + return toString(); + } + bool equalCaseInsensitive(const DatabaseName& other) const { - return (_tenantId == other._tenantId) && boost::iequals(toString(), other.toString()); + return StringData{_data.data() + kDataOffset, _data.size() - kDataOffset} + .equalCaseInsensitive( + StringData{other._data.data() + kDataOffset, other._data.size() - kDataOffset}); } friend std::ostream& operator<<(std::ostream& stream, const DatabaseName& tdb) { @@ -183,36 +238,46 @@ class DatabaseName { return builder << tdb.toString(); } - friend bool operator==(const DatabaseName& a, const DatabaseName& b) { - return a._lens() == b._lens(); + int compare(const DatabaseName& other) const { + if (_hasTenantId() && !other._hasTenantId()) { + return 1; + } + + if (other._hasTenantId() && !_hasTenantId()) { + return -1; + } + + return StringData{_data.data() + kDataOffset, _data.size() - kDataOffset}.compare( + StringData{other._data.data() + kDataOffset, other._data.size() - kDataOffset}); + } + + friend bool operator==(const DatabaseName& lhs, const DatabaseName& rhs) { + return lhs._data == rhs._data; } - friend bool operator!=(const DatabaseName& a, const DatabaseName& b) { - return a._lens() != b._lens(); + friend bool operator!=(const DatabaseName& lhs, const DatabaseName& rhs) { + return lhs._data != rhs._data; } - friend bool operator<(const DatabaseName& a, const DatabaseName& b) { - return a._lens() < b._lens(); + friend bool operator<(const DatabaseName& lhs, const DatabaseName& rhs) { + return lhs.compare(rhs) < 0; } - friend bool operator>(const DatabaseName& a, const DatabaseName& b) { - return a._lens() > b._lens(); + friend bool operator<=(const DatabaseName& lhs, const DatabaseName& rhs) { + return lhs.compare(rhs) <= 0; } - friend bool operator<=(const DatabaseName& a, const DatabaseName& b) { - return a._lens() <= b._lens(); + friend bool operator>(const DatabaseName& lhs, const DatabaseName& rhs) { + return lhs.compare(rhs) > 0; } - friend bool operator>=(const DatabaseName& a, const DatabaseName& b) { - return a._lens() >= b._lens(); + friend bool operator>=(const DatabaseName& lhs, const DatabaseName& rhs) { + return lhs.compare(rhs) >= 0; } template friend H AbslHashValue(H h, const DatabaseName& obj) { - if (obj._tenantId) { - return H::combine(std::move(h), obj._tenantId.get(), obj._dbString); - } - return H::combine(std::move(h), obj._dbString); + return H::combine(std::move(h), obj._data); } friend auto logAttrs(const DatabaseName& obj) { @@ -220,12 +285,73 @@ class DatabaseName { } private: - std::tuple&, const std::string&> _lens() const { - return std::tie(_tenantId, _dbString); + friend class NamespaceString; + friend class NamespaceStringOrUUID; + friend class DatabaseNameUtil; + + /** + * Constructs a DatabaseName from the given tenantId and database name. + * "dbString" is expected only consist of a db name. It is the caller's responsibility to ensure + * the dbName is a valid db name. + */ + DatabaseName(boost::optional tenantId, StringData dbString) { + uassert(ErrorCodes::InvalidNamespace, + "'.' is an invalid character in a db name: " + dbString, + dbString.find('.') == std::string::npos); + uassert(ErrorCodes::InvalidNamespace, + "database names cannot have embedded null characters", + dbString.find('\0') == std::string::npos); + + size_t maxLen = tenantId ? kMaxTenantDatabaseNameLength : kMaxDatabaseNameLength; + uassert(ErrorCodes::InvalidNamespace, + fmt::format( + "db name must be at most {} characters, found: {}", maxLen, dbString.size()), + dbString.size() <= maxLen); + + uint8_t details = dbString.size() & kDatabaseNameOffsetEndMask; + size_t dbStartIndex = kDataOffset; + if (tenantId) { + dbStartIndex += OID::kOIDSize; + details |= kTenantIdMask; + } + + _data.resize(dbStartIndex + dbString.size()); + *reinterpret_cast(_data.data()) = details; + if (tenantId) { + std::memcpy(_data.data() + kDataOffset, tenantId->_oid.view().view(), OID::kOIDSize); + } + if (!dbString.empty()) { + std::memcpy(_data.data() + dbStartIndex, dbString.rawData(), dbString.size()); + } + } + + std::string toString() const { + return db().toString(); + } + + std::string toStringWithTenantId() const { + if (_hasTenantId()) { + auto tenantId = TenantId{OID::from(&_data[kDataOffset])}; + return str::stream() << tenantId.toString() << "_" << db(); + } + + return db().toString(); } - boost::optional _tenantId = boost::none; - std::string _dbString; + static constexpr size_t kDataOffset = sizeof(uint8_t); + static constexpr uint8_t kTenantIdMask = 0x80; + static constexpr uint8_t kDatabaseNameOffsetEndMask = 0x7F; + + inline bool _hasTenantId() const { + return static_cast(_data.front()) & kTenantIdMask; + } + + // Private constructor for NamespaceString to construct DatabaseName from its own internal data + struct TrustedInitTag {}; + DatabaseName(std::string data, TrustedInitTag) : _data(std::move(data)) {} + + // Same in-memory layout as NamespaceString, see documentation in its header + std::string _data{'\0'}; }; // The `constexpr` definitions for `DatabaseName::ConstantProxy` static data members are below. See @@ -233,14 +359,16 @@ class DatabaseName { // namespace_string.h for more details. namespace dbname_detail::const_proxy_shared_states { #define DBNAME_CONSTANT(id, db) constexpr inline DatabaseName::ConstantProxy::SharedState id{db}; -#include "database_name_reserved.def.h" +#include "database_name_reserved.def.h" // IWYU pragma: keep + #undef DBNAME_CONSTANT } // namespace dbname_detail::const_proxy_shared_states #define DBNAME_CONSTANT(id, db) \ constexpr inline DatabaseName::ConstantProxy DatabaseName::id{ \ &dbname_detail::const_proxy_shared_states::id}; -#include "database_name_reserved.def.h" +#include "database_name_reserved.def.h" // IWYU pragma: keep + #undef DBNAME_CONSTANT } // namespace mongo diff --git a/src/mongo/db/database_name_reserved.def.h b/src/mongo/db/database_name_reserved.def.h index 062fdd4317ec0..38b0cf1768496 100644 --- a/src/mongo/db/database_name_reserved.def.h +++ b/src/mongo/db/database_name_reserved.def.h @@ -46,4 +46,6 @@ DBNAME_CONSTANT(kConfig, "config"_sd) DBNAME_CONSTANT(kSystem, "system"_sd) +DBNAME_CONSTANT(kExternal, "$external"_sd) + DBNAME_CONSTANT(kEmpty, ""_sd) diff --git a/src/mongo/db/database_name_test.cpp b/src/mongo/db/database_name_test.cpp index 7488e13820f0e..195b8143647f5 100644 --- a/src/mongo/db/database_name_test.cpp +++ b/src/mongo/db/database_name_test.cpp @@ -28,11 +28,22 @@ */ #include "mongo/db/database_name.h" -#include "mongo/db/server_feature_flags_gen.h" + +#include + +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/namespace_string.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -40,56 +51,54 @@ namespace mongo { namespace { TEST(DatabaseNameTest, MultitenancySupportDisabled) { - DatabaseName dbnWithoutTenant1(boost::none, "a"); + DatabaseName dbnWithoutTenant1 = DatabaseName::createDatabaseName_forTest(boost::none, "a"); ASSERT(!dbnWithoutTenant1.tenantId()); - ASSERT_EQUALS(std::string("a"), dbnWithoutTenant1.db()); - ASSERT_EQUALS(std::string("a"), dbnWithoutTenant1.toString()); + ASSERT_EQUALS(std::string("a"), dbnWithoutTenant1.toString_forTest()); TenantId tenantId(OID::gen()); - DatabaseName dbnWithTenant(tenantId, "a"); + DatabaseName dbnWithTenant = DatabaseName::createDatabaseName_forTest(tenantId, "a"); ASSERT(dbnWithTenant.tenantId()); ASSERT_EQUALS(tenantId, *dbnWithTenant.tenantId()); - ASSERT_EQUALS(std::string("a"), dbnWithTenant.db()); - ASSERT_EQUALS(std::string("a"), dbnWithTenant.toString()); - ASSERT_EQUALS(std::string(tenantId.toString() + "_a"), dbnWithTenant.toStringWithTenantId()); + ASSERT_EQUALS(std::string("a"), dbnWithoutTenant1.toString_forTest()); + ASSERT_EQUALS(std::string(tenantId.toString() + "_a"), + dbnWithTenant.toStringWithTenantId_forTest()); } TEST(DatabaseNameTest, MultitenancySupportEnabledTenantIDNotRequired) { // TODO SERVER-62114 remove this test case. RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); - DatabaseName dbnWithoutTenant(boost::none, "a"); + DatabaseName dbnWithoutTenant = DatabaseName::createDatabaseName_forTest(boost::none, "a"); ASSERT(!dbnWithoutTenant.tenantId()); - ASSERT_EQUALS(std::string("a"), dbnWithoutTenant.db()); - ASSERT_EQUALS(std::string("a"), dbnWithoutTenant.toString()); + ASSERT_EQUALS(std::string("a"), dbnWithoutTenant.toString_forTest()); TenantId tenantId(OID::gen()); - DatabaseName dbnWithTenant(tenantId, "a"); + DatabaseName dbnWithTenant = DatabaseName::createDatabaseName_forTest(tenantId, "a"); ASSERT(dbnWithTenant.tenantId()); ASSERT_EQUALS(tenantId, *dbnWithTenant.tenantId()); - ASSERT_EQUALS(std::string("a"), dbnWithTenant.db()); - ASSERT_EQUALS(std::string("a"), dbnWithTenant.toString()); - ASSERT_EQUALS(std::string(tenantId.toString() + "_a"), dbnWithTenant.toStringWithTenantId()); + ASSERT_EQUALS(std::string("a"), dbnWithTenant.toString_forTest()); + ASSERT_EQUALS(std::string(tenantId.toString() + "_a"), + dbnWithTenant.toStringWithTenantId_forTest()); } TEST(DatabaseNameTest, VerifyEqualsOperator) { TenantId tenantId(OID::gen()); - DatabaseName dbn(tenantId, "a"); - ASSERT_TRUE(DatabaseName(tenantId, "a") == dbn); - ASSERT_TRUE(DatabaseName(tenantId, "b") != dbn); + DatabaseName dbn = DatabaseName::createDatabaseName_forTest(tenantId, "a"); + ASSERT_TRUE(DatabaseName::createDatabaseName_forTest(tenantId, "a") == dbn); + ASSERT_TRUE(DatabaseName::createDatabaseName_forTest(tenantId, "b") != dbn); TenantId otherTenantId = TenantId(OID::gen()); - ASSERT_TRUE(DatabaseName(otherTenantId, "a") != dbn); - ASSERT_TRUE(DatabaseName(boost::none, "a") != dbn); + ASSERT_TRUE(DatabaseName::createDatabaseName_forTest(otherTenantId, "a") != dbn); + ASSERT_TRUE(DatabaseName::createDatabaseName_forTest(boost::none, "a") != dbn); } TEST(DatabaseNameTest, VerifyHashFunction) { TenantId tenantId1(OID::gen()); TenantId tenantId2(OID::gen()); - DatabaseName dbn1 = DatabaseName(tenantId1, "a"); - DatabaseName dbn2 = DatabaseName(tenantId2, "a"); - DatabaseName dbn3 = DatabaseName(boost::none, "a"); + DatabaseName dbn1 = DatabaseName::createDatabaseName_forTest(tenantId1, "a"); + DatabaseName dbn2 = DatabaseName::createDatabaseName_forTest(tenantId2, "a"); + DatabaseName dbn3 = DatabaseName::createDatabaseName_forTest(boost::none, "a"); stdx::unordered_map dbMap; @@ -97,7 +106,7 @@ TEST(DatabaseNameTest, VerifyHashFunction) { ASSERT_EQUALS(dbMap[dbn1], "value T1 a1"); dbMap[dbn1] = "value T1 a2"; ASSERT_EQUALS(dbMap[dbn1], "value T1 a2"); - dbMap[DatabaseName(tenantId1, "a")] = "value T1 a3"; + dbMap[DatabaseName::createDatabaseName_forTest(tenantId1, "a")] = "value T1 a3"; ASSERT_EQUALS(dbMap[dbn1], "value T1 a3"); dbMap[dbn2] = "value T2 a1"; @@ -121,33 +130,93 @@ TEST(DatabaseNameTest, VerifyCompareFunction) { // OID's generated by the same process are monotonically increasing. ASSERT(tenantId1 < tenantId2); - DatabaseName dbn1a = DatabaseName(tenantId1, "a"); - DatabaseName dbn1b = DatabaseName(tenantId1, "b"); - DatabaseName dbn2a = DatabaseName(tenantId2, "a"); - DatabaseName dbn3a = DatabaseName(boost::none, "a"); - - ASSERT(dbn1a < dbn1b); - ASSERT(dbn1b < dbn2a); - ASSERT(dbn3a != dbn1a); - ASSERT(dbn1a != dbn2a); + DatabaseName dbn1a = DatabaseName::createDatabaseName_forTest(tenantId1, "a"); + DatabaseName dbn1A = DatabaseName::createDatabaseName_forTest(tenantId1, "A"); + DatabaseName dbn1b = DatabaseName::createDatabaseName_forTest(tenantId1, "b"); + DatabaseName dbn2a = DatabaseName::createDatabaseName_forTest(tenantId2, "a"); + DatabaseName dbn3a = DatabaseName::createDatabaseName_forTest(boost::none, "a"); + DatabaseName dbn3A = DatabaseName::createDatabaseName_forTest(boost::none, "a"); + + ASSERT_LT(dbn1a, dbn1b); + ASSERT_LT(dbn1b, dbn2a); + ASSERT_NE(dbn3a, dbn1a); + ASSERT_NE(dbn1a, dbn2a); + ASSERT_LT(dbn3a, dbn1a); + ASSERT_LT(dbn3a, dbn2a); + ASSERT_GT(dbn2a, dbn1a); + ASSERT_TRUE(dbn1a.equalCaseInsensitive(dbn1a)); + ASSERT_TRUE(dbn1a.equalCaseInsensitive(dbn1A)); + ASSERT_FALSE(dbn1a.equalCaseInsensitive(dbn2a)); + ASSERT_FALSE(dbn1a.equalCaseInsensitive(dbn3a)); + ASSERT_TRUE(dbn3a.equalCaseInsensitive(dbn3A)); } TEST(DatabaseNameTest, CheckDatabaseNameLogAttrs) { TenantId tenantId(OID::gen()); - DatabaseName dbWithTenant(tenantId, "myLongDbName"); + DatabaseName dbWithTenant = DatabaseName::createDatabaseName_forTest(tenantId, "myLongDbName"); startCapturingLogMessages(); LOGV2(7448500, "Msg db:", logAttrs(dbWithTenant)); ASSERT_EQUALS(1, countBSONFormatLogLinesIsSubset( - BSON("attr" << BSON("db" << dbWithTenant.toStringWithTenantId())))); + BSON("attr" << BSON("db" << dbWithTenant.toStringWithTenantId_forTest())))); LOGV2(7448501, "Msg database:", "database"_attr = dbWithTenant); ASSERT_EQUALS(1, - countBSONFormatLogLinesIsSubset( - BSON("attr" << BSON("database" << dbWithTenant.toStringWithTenantId())))); + countBSONFormatLogLinesIsSubset(BSON( + "attr" << BSON("database" << dbWithTenant.toStringWithTenantId_forTest())))); stopCapturingLogMessages(); } +TEST(DatabaseNameTest, EmptyDbString) { + DatabaseName empty{}; + ASSERT_FALSE(empty.tenantId()); + ASSERT_EQ(empty.toString_forTest(), ""); + ASSERT_EQ(empty.toStringWithTenantId_forTest(), ""); + + DatabaseName emptyFromStringData = + DatabaseName::createDatabaseName_forTest(boost::none, StringData()); + ASSERT_FALSE(emptyFromStringData.tenantId()); + ASSERT_EQ(emptyFromStringData.toString_forTest(), ""); + ASSERT_EQ(emptyFromStringData.toStringWithTenantId_forTest(), ""); + + TenantId tenantId(OID::gen()); + DatabaseName emptyWithTenantId = DatabaseName::createDatabaseName_forTest(tenantId, ""); + ASSERT(emptyWithTenantId.tenantId()); + ASSERT_EQ(emptyWithTenantId.toString_forTest(), ""); + ASSERT_EQ(emptyWithTenantId.toStringWithTenantId_forTest(), + fmt::format("{}_", tenantId.toString())); +} + +TEST(DatabaseNameTest, FromDataEquality) { + NamespaceString test = NamespaceString::createNamespaceString_forTest("foo"); + ASSERT_EQ(test.dbName(), DatabaseName::createDatabaseName_forTest(boost::none, "foo")); + NamespaceString testTwo{DatabaseName::createDatabaseName_forTest(boost::none, "foo")}; + ASSERT_EQ(testTwo.dbName(), DatabaseName::createDatabaseName_forTest(boost::none, "foo")); +} + +TEST(DatabaseNameTest, ValidDbNameLength) { + const std::string longStr = + "1234567890123456789012345678901234567890123456789012345678901234567890"; + const auto dbName = DatabaseName::createDatabaseName_forTest( + boost::none, longStr.substr(0, DatabaseName::kMaxDatabaseNameLength)); + ASSERT_EQ(dbName.toString_forTest().size(), DatabaseName::kMaxDatabaseNameLength); + ASSERT_THROWS_CODE( + DatabaseName::createDatabaseName_forTest( + boost::none, longStr.substr(0, DatabaseName::kMaxDatabaseNameLength + 1)), + DBException, + ErrorCodes::InvalidNamespace); + + const TenantId tenantId(OID::gen()); + const auto tenantDbName = DatabaseName::createDatabaseName_forTest( + tenantId, longStr.substr(0, DatabaseName::kMaxTenantDatabaseNameLength)); + ASSERT_EQ(tenantDbName.toString_forTest().size(), DatabaseName::kMaxTenantDatabaseNameLength); + ASSERT_THROWS_CODE( + DatabaseName::createDatabaseName_forTest( + tenantId, longStr.substr(0, DatabaseName::kMaxTenantDatabaseNameLength + 1)), + DBException, + ErrorCodes::InvalidNamespace); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/db_raii.cpp b/src/mongo/db/db_raii.cpp index 64b9dbe68a024..8c62c6ccee080 100644 --- a/src/mongo/db/db_raii.cpp +++ b/src/mongo/db/db_raii.cpp @@ -29,47 +29,76 @@ #include "mongo/db/db_raii.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/catalog_helper.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_uuid_mismatch.h" #include "mongo/db/catalog/collection_yield_restore.h" #include "mongo/db/catalog/database_holder.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" -#include "mongo/db/repl/collection_utils.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/scoped_collection_metadata.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/storage/capped_snapshots.h" #include "mongo/db/storage/snapshot_helper.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/message.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage namespace mongo { namespace { +MONGO_FAIL_POINT_DEFINE(hangBeforeAutoGetCollectionLockFreeShardedStateAccess); MONGO_FAIL_POINT_DEFINE(hangBeforeAutoGetShardVersionCheck); MONGO_FAIL_POINT_DEFINE(reachedAutoGetLockFreeShardConsistencyRetry); const boost::optional kDoNotChangeProfilingLevel = boost::none; -// TODO (SERVER-69813): Get rid of this when ShardServerCatalogCacheLoader will be removed. -// If set to false, secondary reads should wait behind the PBW lock. -const auto allowSecondaryReadsDuringBatchApplication_DONT_USE = - OperationContext::declareDecoration>(); - /** * Performs some checks to determine whether the operation is compatible with a lock-free read. * Multi-doc transactions are not supported, nor are operations holding an exclusive lock. */ bool supportsLockFreeRead(OperationContext* opCtx) { // Lock-free reads are not supported in multi-document transactions. - // Lock-free reads are not supported under an exclusive lock (nested reads under exclusive lock - // holding operations). + // Lock-free reads are not supported when performing a write. // Lock-free reads are not supported if a storage txn is already open w/o the lock-free reads // operation flag set. return !storageGlobalParams.disableLockFreeReads && !opCtx->inMultiDocumentTransaction() && @@ -77,55 +106,6 @@ bool supportsLockFreeRead(OperationContext* opCtx) { !(opCtx->recoveryUnit()->isActive() && !opCtx->isLockFreeReadsOp()); } -/** - * Type that pretends to be a Collection. It implements the minimal interface used by - * acquireCollectionAndConsistentSnapshot(). We are tricking acquireCollectionAndConsistentSnapshot - * to establish a consistent snapshot with just the catalog and not for a specific Collection. - */ -class FakeCollection { -public: - // We just need to return something that would not considered to be the oplog. A default - // constructed NamespaceString is fine. - const NamespaceString& ns() const { - return _ns; - }; - // We just need to return something that compares equal with itself here. - boost::optional getMinimumVisibleSnapshot() const { - return boost::none; - } - -private: - NamespaceString _ns; -}; - -/** - * If the given collection exists, asserts that the minimum visible timestamp of 'collection' is - * compatible with 'readTimestamp'. Throws a SnapshotUnavailable error if the assertion fails. - */ -void assertCollectionChangesCompatibleWithReadTimestamp(OperationContext* opCtx, - const Collection* collection, - boost::optional readTimestamp) { - // Check that the collection exists. - if (!collection) { - return; - } - - // Ensure the readTimestamp is not older than the collection's minimum visible timestamp. - auto minSnapshot = collection->getMinimumVisibleSnapshot(); - if (SnapshotHelper::collectionChangesConflictWithRead(minSnapshot, readTimestamp)) { - // Note: SnapshotHelper::collectionChangesConflictWithRead returns false if either - // minSnapshot or readTimestamp is not set, so it's safe to print them below. - uasserted( - ErrorCodes::SnapshotUnavailable, - str::stream() << "Unable to read from a snapshot due to pending collection catalog " - "changes to collection '" - << collection->ns() - << "'; please retry the operation. Snapshot timestamp is " - << readTimestamp->toString() << ". Collection minimum timestamp is " - << minSnapshot->toString()); - } -} - /** * Performs validation of special locking requirements for certain namespaces. */ @@ -176,18 +156,6 @@ bool isAnyNssAViewOrSharded(OperationContext* opCtx, }); } -void assertAllNamespacesAreCompatibleForReadTimestamp( - OperationContext* opCtx, - const CollectionCatalog* catalog, - const std::vector& namespaces, - const boost::optional& readTimestamp) { - for (auto&& nss : namespaces) { - auto collection = catalog->lookupCollectionByNamespace(opCtx, nss); - // Check that the collection has not had a DDL operation since readTimestamp. - assertCollectionChangesCompatibleWithReadTimestamp(opCtx, collection, readTimestamp); - } -} - /** * Resolves all NamespaceStringOrUUIDs in the input vector by using the input catalog to call * CollectionCatalog::resolveSecondaryNamespacesOrUUIDs. @@ -264,121 +232,6 @@ bool haveAcquiredConsistentCatalogAndSnapshot( } } -/** - * Helper function to acquire a consistent catalog and storage snapshot without holding the RSTL or - * collection locks. - * - * GetCollectionAndEstablishReadSourceFunc is called before we open a snapshot, it needs to fetch - * the Collection from the catalog and select the read source. - * - * ResetFunc is called when we failed to achieve consistency and need to retry. - * - * SetSecondaryState sets any of the secondary state that the AutoGet* needs to know about. - */ -template -auto acquireCollectionAndConsistentSnapshot( - OperationContext* opCtx, - bool isLockFreeReadSubOperation, - CollectionCatalogStasher& catalogStasher, - GetCollectionAndEstablishReadSourceFunc getCollectionAndEstablishReadSource, - ResetFunc reset, - SetSecondaryState setSecondaryState, - const std::vector& secondaryNssOrUUIDs = {}) { - // Figure out what type of Collection GetCollectionAndEstablishReadSourceFunc returns. It needs - // to behave like a pointer. - using CollectionPtrT = decltype(std::declval()( - std::declval(), - std::declval(), - std::declval()) - .first); - - CollectionPtrT collection; - catalogStasher.reset(); - while (true) { - // AutoGetCollectionForReadBase can choose a read source based on the current replication - // state. Therefore we must fetch the repl state beforehand, to compare with afterwards. - long long replTerm = repl::ReplicationCoordinator::get(opCtx)->getTerm(); - - auto catalog = CollectionCatalog::get(opCtx); - - auto [localColl, isView] = - getCollectionAndEstablishReadSource(opCtx, *catalog, isLockFreeReadSubOperation); - collection = localColl; - - auto resolvedSecondaryNamespaces = - resolveSecondaryNamespacesOrUUIDs(opCtx, catalog.get(), secondaryNssOrUUIDs); - - if (resolvedSecondaryNamespaces) { - // Note that calling getPointInTimeReadTimestamp may open a snapshot if one is not - // already open, depending on the current read source. - const auto readTimestamp = opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx); - assertAllNamespacesAreCompatibleForReadTimestamp( - opCtx, catalog.get(), *resolvedSecondaryNamespaces, readTimestamp); - } - - // A lock request does not always find a collection to lock. But if we found a view abort - // LFR setup, we don't need to open a storage snapshot in this case as the lock helper will - // be released and we will lock the Collection backing the view later on. - if (!collection && isView) - break; - - // If this is a nested lock acquisition, then we already have a consistent stashed catalog - // and snapshot from which to read and we can skip the below logic. - if (isLockFreeReadSubOperation) { - // A consistent in-memory and on-disk state is already set up by a higher level AutoGet* - // instance. We just need to return the requested Collection which has already been - // checked by getCollectionAndEstablishReadSource above. - return collection; - } - - // We must open a storage snapshot consistent with the fetched in-memory Catalog instance - // and chosen read source. The Catalog instance and replication state after opening a - // snapshot will be compared with the previously acquired state. If either does not match, - // then this loop will retry lock acquisition and read source selection until there is a - // match. - // - // Note: getCollectionAndEstablishReadSource() may open a snapshot for PIT reads, so - // preallocateSnapshot() may be a no-op, but that is OK because the snapshot is established - // by getCollectionAndEstablishReadSource() after it fetches a Collection instance. - if (collection && collection->ns().isOplog()) { - // Signal to the RecoveryUnit that the snapshot will be used for reading the oplog. - // Normally the snapshot is opened from a cursor that can take special action when - // reading from the oplog. - opCtx->recoveryUnit()->preallocateSnapshotForOplogRead(); - } else { - opCtx->recoveryUnit()->preallocateSnapshot(); - } - - // Verify that the catalog has not changed while we opened the storage snapshot. If the - // catalog is unchanged, then the requested Collection is also guaranteed to be the same. - auto newCatalog = CollectionCatalog::get(opCtx); - - if (haveAcquiredConsistentCatalogAndSnapshot( - opCtx, - catalog.get(), - newCatalog.get(), - replTerm, - repl::ReplicationCoordinator::get(opCtx)->getTerm(), - resolvedSecondaryNamespaces)) { - bool isAnySecondaryNssShardedOrAView = !resolvedSecondaryNamespaces.has_value(); - setSecondaryState(isAnySecondaryNssShardedOrAView); - catalogStasher.stash(std::move(catalog)); - break; - } - - LOGV2_DEBUG(5067701, - 3, - "Retrying acquiring state for lock-free read because collection, catalog or " - "replication state changed."); - reset(); - opCtx->recoveryUnit()->abandonSnapshot(); - } - - return collection; -} - void assertReadConcernSupported(const CollectionPtr& coll, const repl::ReadConcernArgs& readConcernArgs, const RecoveryUnit::ReadSource& readSource) { @@ -497,193 +350,13 @@ AutoStatsTracker::~AutoStatsTracker() { curOp->getReadWriteType()); } -template -AutoGetCollectionForReadBase:: - AutoGetCollectionForReadBase(OperationContext* opCtx, - const EmplaceAutoCollFunc& emplaceAutoColl, - bool isLockFreeReadSubOperation) { - // If this instance is nested and lock-free, then we do not want to adjust any setting, but we - // do need to set up the Collection reference. - if (isLockFreeReadSubOperation) { - emplaceAutoColl.emplace(_autoColl); - return; - } - - // The caller was expecting to conflict with batch application before entering this function. - // i.e. the caller does not currently have a ShouldNotConflict... block in scope. - bool callerWasConflicting = opCtx->lockState()->shouldConflictWithSecondaryBatchApplication(); - - if (allowSecondaryReadsDuringBatchApplication_DONT_USE(opCtx).value_or(true) && - opCtx->getServiceContext()->getStorageEngine()->supportsReadConcernSnapshot()) { - _shouldNotConflictWithSecondaryBatchApplicationBlock.emplace(opCtx->lockState()); - } - - emplaceAutoColl.emplace(_autoColl); - - auto readConcernArgs = repl::ReadConcernArgs::get(opCtx); - // If the collection doesn't exist or disappears after releasing locks and waiting, there is no - // need to check for pending catalog changes. - while (const auto& coll = _autoColl->getCollection()) { - assertReadConcernSupported( - coll, readConcernArgs, opCtx->recoveryUnit()->getTimestampReadSource()); - - if (coll->usesCappedSnapshots()) { - CappedSnapshots::get(opCtx).establish(opCtx, coll); - } - - // We make a copy of the namespace so we can use the variable after locks are released, - // since releasing locks will allow the value of coll->ns() to change. - const NamespaceString nss = coll->ns(); - // During batch application on secondaries, there is a potential to read inconsistent states - // that would normally be protected by the PBWM lock. In order to serve secondary reads - // during this period, we default to not acquiring the lock (by setting - // _shouldNotConflictWithSecondaryBatchApplicationBlock). On primaries, we always read at a - // consistent time, so not taking the PBWM lock is not a problem. On secondaries, we have to - // guarantee we read at a consistent state, so we must read at the lastApplied timestamp, - // which is set after each complete batch. - - // Once we have our locks, check whether or not we should override the ReadSource that was - // set before acquiring locks. - const bool shouldReadAtLastApplied = SnapshotHelper::changeReadSourceIfNeeded(opCtx, nss); - // Update readSource in case it was updated. - const auto readSource = opCtx->recoveryUnit()->getTimestampReadSource(); - - const auto readTimestamp = opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx); - - checkInvariantsForReadOptions(nss, - readConcernArgs.getArgsAfterClusterTime(), - readSource, - readTimestamp, - callerWasConflicting, - shouldReadAtLastApplied); - - auto minSnapshot = coll->getMinimumVisibleSnapshot(); - if (!SnapshotHelper::collectionChangesConflictWithRead(minSnapshot, readTimestamp)) { - return; - } - - // If we are reading at a provided timestamp earlier than the latest catalog changes, - // then we must return an error. - if (readSource == RecoveryUnit::ReadSource::kProvided) { - uasserted(ErrorCodes::SnapshotUnavailable, - str::stream() - << "Unable to read from a snapshot due to pending collection catalog " - "changes; please retry the operation. Snapshot timestamp is " - << readTimestamp->toString() << ". Collection minimum is " - << minSnapshot->toString()); - } - - invariant( - // The kMajorityCommitted and kLastApplied read sources already read from timestamps - // that are safe with respect to concurrent secondary batch application, and are - // eligible for retrying. - readSource == RecoveryUnit::ReadSource::kMajorityCommitted || - readSource == RecoveryUnit::ReadSource::kNoOverlap || - readSource == RecoveryUnit::ReadSource::kLastApplied); - - invariant(readConcernArgs.getLevel() != repl::ReadConcernLevel::kSnapshotReadConcern); - - // Yield locks in order to do the blocking call below. - _autoColl = boost::none; - - // If there are pending catalog changes when using a no-overlap or lastApplied read source, - // we yield to get a new read timestamp ahead of the minimum visible snapshot. - if (readSource == RecoveryUnit::ReadSource::kLastApplied || - readSource == RecoveryUnit::ReadSource::kNoOverlap) { - invariant(readTimestamp); - LOGV2(20576, - "Tried reading at a timestamp, but future catalog changes are pending. " - "Trying again", - "readTimestamp"_attr = *readTimestamp, - "collection"_attr = nss.ns(), - "collectionMinSnapshot"_attr = *minSnapshot); - - // If we are AutoGetting multiple collections, it is possible that we've already done - // some reads and locked in our snapshot. At this point, the only way out is to fail - // the operation. The client application will need to retry. - uassert( - ErrorCodes::SnapshotUnavailable, - str::stream() << "Unable to read from a snapshot due to pending collection catalog " - "changes and holding multiple collection locks; please retry the " - "operation. Snapshot timestamp is " - << readTimestamp->toString() << ". Collection minimum is " - << minSnapshot->toString(), - !opCtx->lockState()->isLocked()); - - // Abandon our snapshot. We may select a new read timestamp or ReadSource in the next - // loop iteration. - opCtx->recoveryUnit()->abandonSnapshot(); - } - - if (readSource == RecoveryUnit::ReadSource::kMajorityCommitted) { - const auto replCoord = repl::ReplicationCoordinator::get(opCtx); - replCoord->waitUntilSnapshotCommitted(opCtx, *minSnapshot); - uassertStatusOK(opCtx->recoveryUnit()->majorityCommittedSnapshotAvailable()); - } - - { - stdx::lock_guard lk(*opCtx->getClient()); - CurOp::get(opCtx)->yielded(); - } - - emplaceAutoColl.emplace(_autoColl); - } -} - -EmplaceAutoGetCollectionForRead::EmplaceAutoGetCollectionForRead( - OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollection::Options options) - : _opCtx(opCtx), - _nsOrUUID(nsOrUUID), - // Multi-document transactions need MODE_IX locks, otherwise MODE_IS. - _collectionLockMode(getLockModeForQuery(opCtx, nsOrUUID.nss())), - _options(std::move(options)) {} - -void EmplaceAutoGetCollectionForRead::emplace(boost::optional& autoColl) const { - autoColl.emplace( - _opCtx, _nsOrUUID, _collectionLockMode, _options, AutoGetCollection::ForReadTag{}); -} - -AutoGetCollectionForReadLegacy::AutoGetCollectionForReadLegacy( - OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollection::Options options) - : AutoGetCollectionForReadBase(opCtx, - EmplaceAutoGetCollectionForRead(opCtx, nsOrUUID, options)) { - const auto& secondaryNssOrUUIDs = options._secondaryNssOrUUIDs; - - // All relevant locks are held. Check secondary collections and verify they are valid for - // use. - if (getCollection() && !secondaryNssOrUUIDs.empty()) { - auto catalog = CollectionCatalog::get(opCtx); - - auto resolvedNamespaces = - resolveSecondaryNamespacesOrUUIDs(opCtx, catalog.get(), secondaryNssOrUUIDs); - - _secondaryNssIsAViewOrSharded = !resolvedNamespaces.has_value(); - - // If no secondary namespace is a view or is sharded, resolve namespaces and check their - // that their minVisible timestamps are compatible with the read timestamp. - if (resolvedNamespaces) { - // Note that calling getPointInTimeReadTimestamp may open a snapshot if one is not - // already open, depending on the current read source. - const auto readTimestamp = opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx); - assertAllNamespacesAreCompatibleForReadTimestamp( - opCtx, catalog.get(), *resolvedNamespaces, readTimestamp); - } - } -} - -AutoGetCollectionForReadPITCatalog::AutoGetCollectionForReadPITCatalog( - OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollection::Options options) +AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* opCtx, + const NamespaceStringOrUUID& nsOrUUID, + AutoGetCollection::Options options) : _callerWasConflicting(opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()), _shouldNotConflictWithSecondaryBatchApplicationBlock( [&]() -> boost::optional { - if (allowSecondaryReadsDuringBatchApplication_DONT_USE(opCtx).value_or(true) && - opCtx->getServiceContext()->getStorageEngine()->supportsReadConcernSnapshot()) { + if (opCtx->getServiceContext()->getStorageEngine()->supportsReadConcernSnapshot()) { return boost::optional( opCtx->lockState()); } @@ -691,9 +364,9 @@ AutoGetCollectionForReadPITCatalog::AutoGetCollectionForReadPITCatalog( return boost::none; }()), _autoDb(AutoGetDb::createForAutoGetCollection( - opCtx, nsOrUUID, getLockModeForQuery(opCtx, nsOrUUID.nss()), options)) { + opCtx, nsOrUUID, getLockModeForQuery(opCtx, nsOrUUID), options)) { - const auto modeColl = getLockModeForQuery(opCtx, nsOrUUID.nss()); + const auto modeColl = getLockModeForQuery(opCtx, nsOrUUID); const auto viewMode = options._viewMode; const auto deadline = options._deadline; const auto& secondaryNssOrUUIDs = options._secondaryNssOrUUIDs; @@ -702,7 +375,11 @@ AutoGetCollectionForReadPITCatalog::AutoGetCollectionForReadPITCatalog( // there are many, however, the locks must be taken in _ascending_ ResourceId order to avoid // deadlocks across threads. if (secondaryNssOrUUIDs.empty()) { - uassertStatusOK(nsOrUUID.isNssValid()); + uassert(ErrorCodes::InvalidNamespace, + fmt::format("Namespace {} is not a valid collection name", + nsOrUUID.toStringForErrorMsg()), + nsOrUUID.isUUID() || (nsOrUUID.isNamespaceString() && nsOrUUID.nss().isValid())); + _collLocks.emplace_back(opCtx, nsOrUUID, modeColl, deadline); } else { catalog_helper::acquireCollectionLocksInResourceIdOrder( @@ -740,7 +417,6 @@ AutoGetCollectionForReadPITCatalog::AutoGetCollectionForReadPITCatalog( _coll.makeYieldable(opCtx, LockedCollectionYieldRestore{opCtx, _coll}); // Validate primary collection. - checkCollectionUUIDMismatch(opCtx, _resolvedNss, _coll, options._expectedUUID); verifyNamespaceLockingRequirements(opCtx, modeColl, _resolvedNss); // Check secondary collections and verify they are valid for use. @@ -792,6 +468,8 @@ AutoGetCollectionForReadPITCatalog::AutoGetCollectionForReadPITCatalog( _callerWasConflicting, shouldReadAtLastApplied); + checkCollectionUUIDMismatch(opCtx, *catalog, _resolvedNss, _coll, options._expectedUUID); + return; } @@ -804,13 +482,13 @@ AutoGetCollectionForReadPITCatalog::AutoGetCollectionForReadPITCatalog( // namespace were a view, the collection UUID mismatch check would have failed above. if ((_view = catalog->lookupView(opCtx, _resolvedNss))) { uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "Taking " << _resolvedNss.ns() + str::stream() << "Taking " << _resolvedNss.toStringForErrorMsg() << " lock for timeseries is not allowed", viewMode == auto_get_collection::ViewMode::kViewsPermitted || !_view->timeseries()); uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "Namespace " << _resolvedNss.ns() + str::stream() << "Namespace " << _resolvedNss.toStringForErrorMsg() << " is a view, not a collection", viewMode == auto_get_collection::ViewMode::kViewsPermitted); @@ -818,7 +496,7 @@ AutoGetCollectionForReadPITCatalog::AutoGetCollectionForReadPITCatalog( *receivedShardVersion, ShardVersion::UNSHARDED() /* wantedVersion */, ShardingState::get(opCtx)->shardId()), - str::stream() << "Namespace " << _resolvedNss + str::stream() << "Namespace " << _resolvedNss.toStringForErrorMsg() << " is a view therefore the shard " << "version attached to the request must be unset or UNSHARDED", !receivedShardVersion || *receivedShardVersion == ShardVersion::UNSHARDED()); @@ -843,173 +521,31 @@ AutoGetCollectionForReadPITCatalog::AutoGetCollectionForReadPITCatalog( *receivedShardVersion, boost::none /* wantedVersion */, ShardingState::get(opCtx)->shardId()), - str::stream() << "No metadata for namespace " << _resolvedNss << " therefore the shard " + str::stream() << "No metadata for namespace " << _resolvedNss.toStringForErrorMsg() + << " therefore the shard " << "version attached to the request must be unset, UNSHARDED or IGNORED", !receivedShardVersion || *receivedShardVersion == ShardVersion::UNSHARDED() || ShardVersion::isPlacementVersionIgnored(*receivedShardVersion)); -} - -const CollectionPtr& AutoGetCollectionForReadPITCatalog::getCollection() const { - return _coll; -} - -const ViewDefinition* AutoGetCollectionForReadPITCatalog::getView() const { - return _view.get(); -} -const NamespaceString& AutoGetCollectionForReadPITCatalog::getNss() const { - return _resolvedNss; -} - -AutoGetCollectionForRead::AutoGetCollectionForRead(OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollection::Options options) { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - _legacy.emplace(opCtx, nsOrUUID, options); - } else { - _pitCatalog.emplace(opCtx, nsOrUUID, options); - } + checkCollectionUUIDMismatch(opCtx, *catalog, _resolvedNss, _coll, options._expectedUUID); } const CollectionPtr& AutoGetCollectionForRead::getCollection() const { - return _pitCatalog ? _pitCatalog->getCollection() : _legacy->getCollection(); + return _coll; } const ViewDefinition* AutoGetCollectionForRead::getView() const { - return _pitCatalog ? _pitCatalog->getView() : _legacy->getView(); + return _view.get(); } const NamespaceString& AutoGetCollectionForRead::getNss() const { - return _pitCatalog ? _pitCatalog->getNss() : _legacy->getNss(); -} - -bool AutoGetCollectionForRead::isAnySecondaryNamespaceAViewOrSharded() const { - return _pitCatalog ? _pitCatalog->isAnySecondaryNamespaceAViewOrSharded() - : _legacy->isAnySecondaryNamespaceAViewOrSharded(); -} - -AutoGetCollectionForReadLockFreeLegacy::EmplaceHelper::EmplaceHelper( - OperationContext* opCtx, - CollectionCatalogStasher& catalogStasher, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollectionLockFree::Options options, - bool isLockFreeReadSubOperation) - : _opCtx(opCtx), - _catalogStasher(catalogStasher), - _nsOrUUID(nsOrUUID), - _options(std::move(options)), - _isLockFreeReadSubOperation(isLockFreeReadSubOperation) {} - -void AutoGetCollectionForReadLockFreeLegacy::EmplaceHelper::emplace( - boost::optional& autoColl) const { - autoColl.emplace( - _opCtx, - _nsOrUUID, - /* restoreFromYield */ - [&catalogStasher = _catalogStasher, isSubOperation = _isLockFreeReadSubOperation]( - std::shared_ptr& collection, OperationContext* opCtx, UUID uuid) { - // A sub-operation should never yield because it would break the consistent in-memory - // and on-disk view of the higher level operation. - invariant(!isSubOperation); - - collection = acquireCollectionAndConsistentSnapshot( - opCtx, - /* isLockFreeReadSubOperation */ - isSubOperation, - /* CollectionCatalogStasher */ - catalogStasher, - /* GetCollectionAndEstablishReadSourceFunc */ - [uuid](OperationContext* opCtx, - const CollectionCatalog& catalog, - bool isLockFreeReadSubOperation) { - // There should only ever be one helper recovering from a query yield, so it - // should never be nested. - invariant(!isLockFreeReadSubOperation); - - auto coll = catalog.lookupCollectionByUUIDForRead_DONT_USE(opCtx, uuid); - - if (coll) { - if (coll->usesCappedSnapshots()) { - CappedSnapshots::get(opCtx).establish(opCtx, coll.get()); - } - - // After yielding and reacquiring locks, the preconditions that were used to - // select our ReadSource initially need to be checked again. We select a - // ReadSource based on replication state. After a query yields its locks, - // the replication state may have changed, invalidating our current choice - // of ReadSource. Using the same preconditions, change our ReadSource if - // necessary. - SnapshotHelper::changeReadSourceIfNeeded(opCtx, coll->ns()); - } - - return std::make_pair(coll, /* isView */ false); - }, - /* ResetFunc */ - []() {}, - /* SetSecondaryState */ - [](bool isAnySecondaryNamespaceAViewOrSharded) { - // Not necessary to check for views or sharded secondary collections, which are - // unsupported. If a read is running, changing a namespace to a view would - // require dropping the collection first, which trips other checks. A secondary - // collection becoming sharded during a read is ignored to parallel existing - // behavior for the primary collection. - }); - }, - _options); -} - -AutoGetCollectionForReadLockFreeLegacy::AutoGetCollectionForReadLockFreeLegacy( - OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollection::Options options) - : _catalogStash(opCtx) { - bool isLockFreeReadSubOperation = opCtx->isLockFreeReadsOp(); - - // Supported lock-free reads should only ever have an open storage snapshot prior to calling - // this helper if it is a nested lock-free operation. The storage snapshot and in-memory state - // used across lock=free reads must be consistent. - invariant(supportsLockFreeRead(opCtx) && - (!opCtx->recoveryUnit()->isActive() || isLockFreeReadSubOperation)); - - EmplaceHelper emplaceFunc(opCtx, - _catalogStash, - nsOrUUID, - AutoGetCollectionLockFree::Options{} - .viewMode(options._viewMode) - .deadline(options._deadline) - .expectedUUID(options._expectedUUID), - isLockFreeReadSubOperation); - acquireCollectionAndConsistentSnapshot( - opCtx, - /* isLockFreeReadSubOperation */ - isLockFreeReadSubOperation, - /* CollectionCatalogStasher */ - _catalogStash, - /* GetCollectionAndEstablishReadSourceFunc */ - [this, &emplaceFunc]( - OperationContext* opCtx, const CollectionCatalog&, bool isLockFreeReadSubOperation) { - _autoGetCollectionForReadBase.emplace(opCtx, emplaceFunc, isLockFreeReadSubOperation); - return std::make_pair(_autoGetCollectionForReadBase->getCollection().get(), - _autoGetCollectionForReadBase->getView()); - }, - /* ResetFunc */ - [this]() { _autoGetCollectionForReadBase.reset(); }, - /* SetSecondaryState */ - [this](bool isAnySecondaryNamespaceAViewOrSharded) { - _secondaryNssIsAViewOrSharded = isAnySecondaryNamespaceAViewOrSharded; - }, - options._secondaryNssOrUUIDs); + return _resolvedNss; } namespace { -void openSnapshot(OperationContext* opCtx, bool isForOplogRead) { - if (isForOplogRead) { - opCtx->recoveryUnit()->preallocateSnapshotForOplogRead(); - } else { - opCtx->recoveryUnit()->preallocateSnapshot(); - } +void openSnapshot(OperationContext* opCtx) { + opCtx->recoveryUnit()->preallocateSnapshot(); } /** @@ -1086,7 +622,7 @@ ConsistentCatalogAndSnapshot getConsistentCatalogAndSnapshot( try { nss = catalogBeforeSnapshot->resolveNamespaceStringOrUUID(opCtx, nsOrUUID); } catch (const ExceptionFor&) { - invariant(nsOrUUID.uuid()); + invariant(nsOrUUID.isUUID()); const auto readSource = opCtx->recoveryUnit()->getTimestampReadSource(); if (readSource == RecoveryUnit::ReadSource::kNoTimestamp || @@ -1111,7 +647,7 @@ ConsistentCatalogAndSnapshot getConsistentCatalogAndSnapshot( // catalog. establishCappedSnapshotIfNeeded(opCtx, catalogBeforeSnapshot, nsOrUUID); - openSnapshot(opCtx, nss.isOplog()); + openSnapshot(opCtx); const auto readSource = opCtx->recoveryUnit()->getTimestampReadSource(); const auto readTimestamp = opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx); @@ -1144,6 +680,60 @@ ConsistentCatalogAndSnapshot getConsistentCatalogAndSnapshot( } } +/** + * Helper function to acquire a consistent catalog and storage snapshot without holding the RSTL or + * collection locks. + * + * Should only be used to setup lock-free reads in a global/db context. Not safe to use for reading + * on collections. + * + * Pass in boost::none as dbName to setup for a global context + */ +void acquireConsistentCatalogAndSnapshotUnsafe(OperationContext* opCtx, + boost::optional dbName) { + + while (true) { + // AutoGetCollectionForReadBase can choose a read source based on the current replication + // state. Therefore we must fetch the repl state beforehand, to compare with afterwards. + long long replTerm = repl::ReplicationCoordinator::get(opCtx)->getTerm(); + + auto catalog = CollectionCatalog::get(opCtx); + + // Check that the sharding database version matches our read. + if (dbName) { + // Check that the sharding database version matches our read. + DatabaseShardingState::assertMatchingDbVersion(opCtx, *dbName); + } + + // We must open a storage snapshot consistent with the fetched in-memory Catalog instance. + // The Catalog instance and replication state after opening a snapshot will be compared with + // the previously acquired state. If either does not match, then this loop will retry lock + // acquisition and read source selection until there is a match. + opCtx->recoveryUnit()->preallocateSnapshot(); + + // Verify that the catalog has not changed while we opened the storage snapshot. If the + // catalog is unchanged, then the requested Collection is also guaranteed to be the same. + auto newCatalog = CollectionCatalog::get(opCtx); + + if (haveAcquiredConsistentCatalogAndSnapshot( + opCtx, + catalog.get(), + newCatalog.get(), + replTerm, + repl::ReplicationCoordinator::get(opCtx)->getTerm(), + boost::none)) { + CollectionCatalog::stash(opCtx, std::move(catalog)); + return; + } + + LOGV2_DEBUG(5067701, + 3, + "Retrying acquiring state for lock-free read because collection, catalog or " + "replication state changed."); + opCtx->recoveryUnit()->abandonSnapshot(); + } +} + std::shared_ptr lookupView( OperationContext* opCtx, const std::shared_ptr& catalog, @@ -1151,11 +741,13 @@ std::shared_ptr lookupView( auto_get_collection::ViewMode viewMode) { auto view = catalog->lookupView(opCtx, nss); uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "Taking " << nss.ns() << " lock for timeseries is not allowed", + str::stream() << "Taking " << nss.toStringForErrorMsg() + << " lock for timeseries is not allowed", !view || viewMode == auto_get_collection::ViewMode::kViewsPermitted || !view->timeseries()); uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "Namespace " << nss.ns() << " is a view, not a collection", + str::stream() << "Namespace " << nss.toStringForErrorMsg() + << " is a view, not a collection", !view || viewMode == auto_get_collection::ViewMode::kViewsPermitted); return view; } @@ -1166,10 +758,6 @@ getCollectionForLockFreeRead(OperationContext* opCtx, boost::optional readTimestamp, const NamespaceStringOrUUID& nsOrUUID, AutoGetCollection::Options options) { - - auto& hangBeforeAutoGetCollectionLockFreeShardedStateAccess = - *globalFailPointRegistry().find("hangBeforeAutoGetCollectionLockFreeShardedStateAccess"); - hangBeforeAutoGetCollectionLockFreeShardedStateAccess.executeIf( [&](auto&) { hangBeforeAutoGetCollectionLockFreeShardedStateAccess.pauseWhileSet(opCtx); }, [&](const BSONObj& data) { @@ -1186,7 +774,7 @@ getCollectionForLockFreeRead(OperationContext* opCtx, // above, since getCollectionFromCatalog may call openCollection, which could change the result // of namespace resolution. const auto nss = catalog->resolveNamespaceStringOrUUID(opCtx, nsOrUUID); - checkCollectionUUIDMismatch(opCtx, catalog, nss, coll, options._expectedUUID); + checkCollectionUUIDMismatch(opCtx, *catalog, nss, coll, options._expectedUUID); std::shared_ptr viewDefinition = coll ? nullptr : lookupView(opCtx, catalog, nss, options._viewMode); @@ -1232,7 +820,6 @@ boost::optional makeShouldNotConflictWithSecondaryBatchApplicationBlock(OperationContext* opCtx, bool isLockFreeReadSubOperation) { if (!isLockFreeReadSubOperation && - allowSecondaryReadsDuringBatchApplication_DONT_USE(opCtx).value_or(true) && opCtx->getServiceContext()->getStorageEngine()->supportsReadConcernSnapshot()) { return boost::optional( opCtx->lockState()); @@ -1243,14 +830,12 @@ makeShouldNotConflictWithSecondaryBatchApplicationBlock(OperationContext* opCtx, } // namespace -CollectionPtr::RestoreFn AutoGetCollectionForReadLockFreePITCatalog::_makeRestoreFromYieldFn( +CollectionPtr::RestoreFn AutoGetCollectionForReadLockFree::_makeRestoreFromYieldFn( const AutoGetCollection::Options& options, bool callerExpectedToConflictWithSecondaryBatchApplication, const DatabaseName& dbName) { return [this, options, callerExpectedToConflictWithSecondaryBatchApplication, dbName]( OperationContext* opCtx, UUID uuid) -> const Collection* { - _catalogStasher.reset(); - auto nsOrUUID = NamespaceStringOrUUID(dbName, uuid); try { auto catalogStateForNamespace = acquireCatalogStateForNamespace( @@ -1262,7 +847,7 @@ CollectionPtr::RestoreFn AutoGetCollectionForReadLockFreePITCatalog::_makeRestor _resolvedNss = catalogStateForNamespace.resolvedNss; _view = catalogStateForNamespace.view; - _catalogStasher.stash(std::move(catalogStateForNamespace.catalog)); + CollectionCatalog::stash(opCtx, std::move(catalogStateForNamespace.catalog)); return catalogStateForNamespace.collection; } catch (const ExceptionFor&) { @@ -1278,11 +863,10 @@ CollectionPtr::RestoreFn AutoGetCollectionForReadLockFreePITCatalog::_makeRestor }; } -AutoGetCollectionForReadLockFreePITCatalog::AutoGetCollectionForReadLockFreePITCatalog( +AutoGetCollectionForReadLockFree::AutoGetCollectionForReadLockFree( OperationContext* opCtx, NamespaceStringOrUUID nsOrUUID, AutoGetCollection::Options options) : _originalReadSource(opCtx->recoveryUnit()->getTimestampReadSource()), _isLockFreeReadSubOperation(opCtx->isLockFreeReadsOp()), // This has to come before LFRBlock. - _catalogStasher(opCtx), _callerExpectedToConflictWithSecondaryBatchApplication( opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()), _shouldNotConflictWithSecondaryBatchApplicationBlock( @@ -1304,7 +888,7 @@ AutoGetCollectionForReadLockFreePITCatalog::AutoGetCollectionForReadLockFreePITC invariant(supportsLockFreeRead(opCtx) && (!opCtx->recoveryUnit()->isActive() || _isLockFreeReadSubOperation)); - DatabaseShardingState::assertMatchingDbVersion(opCtx, nsOrUUID.db()); + DatabaseShardingState::assertMatchingDbVersion(opCtx, nsOrUUID.dbName()); auto readConcernArgs = repl::ReadConcernArgs::get(opCtx); if (_isLockFreeReadSubOperation) { @@ -1346,7 +930,7 @@ AutoGetCollectionForReadLockFreePITCatalog::AutoGetCollectionForReadLockFreePITC if (_view) { _lockFreeReadsBlock.reset(); } - _catalogStasher.stash(std::move(catalogStateForNamespace.catalog)); + CollectionCatalog::stash(opCtx, std::move(catalogStateForNamespace.catalog)); _secondaryNssIsAViewOrSharded = catalogStateForNamespace.isAnySecondaryNssShardedOrAView; _collectionPtr = CollectionPtr(catalogStateForNamespace.collection); @@ -1371,19 +955,6 @@ AutoGetCollectionForReadLockFreePITCatalog::AutoGetCollectionForReadLockFreePITC } } -AutoGetCollectionForReadLockFree::AutoGetCollectionForReadLockFree( - OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollection::Options options) { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - _impl.emplace( - opCtx, nsOrUUID, std::move(options)); - } else { - _impl.emplace(opCtx, nsOrUUID, std::move(options)); - } -} - AutoGetCollectionForReadMaybeLockFree::AutoGetCollectionForReadMaybeLockFree( OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID, @@ -1433,7 +1004,10 @@ AutoGetCollectionForReadCommandBase:: const NamespaceStringOrUUID& nsOrUUID, AutoGetCollection::Options options, AutoStatsTracker::LogMode logMode) - : _autoCollForRead(opCtx, nsOrUUID, options), + : // We disable the expectedUUID option as we must check it after all the shard versioning + // checks. + _autoCollForRead( + opCtx, nsOrUUID, AutoGetCollection::Options{options}.expectedUUID(boost::none)), _statsTracker(opCtx, _autoCollForRead.getNss(), Top::LockType::ReadLocked, @@ -1453,6 +1027,9 @@ AutoGetCollectionForReadCommandBase:: auto scopedCss = CollectionShardingState::acquire(opCtx, _autoCollForRead.getNss()); scopedCss->checkShardVersionOrThrow(opCtx); } + + checkCollectionUUIDMismatch( + opCtx, _autoCollForRead.getNss(), _autoCollForRead.getCollection(), options._expectedUUID); } AutoGetCollectionForReadCommandLockFree::AutoGetCollectionForReadCommandLockFree( @@ -1558,68 +1135,47 @@ const NamespaceString& AutoGetCollectionForReadCommandMaybeLockFree::getNss() co } } +StringData AutoGetCollectionForReadCommandMaybeLockFree::getCollectionType() const { + if (auto&& view = getView()) { + if (view->timeseries()) + return "timeseries"_sd; + return "view"_sd; + } + auto&& collection = getCollection(); + if (!collection) { + return "nonExistent"_sd; + } + return "collection"_sd; +} + + bool AutoGetCollectionForReadCommandMaybeLockFree::isAnySecondaryNamespaceAViewOrSharded() const { return _autoGet ? _autoGet->isAnySecondaryNamespaceAViewOrSharded() : _autoGetLockFree->isAnySecondaryNamespaceAViewOrSharded(); } AutoReadLockFree::AutoReadLockFree(OperationContext* opCtx, Date_t deadline) - : _catalogStash(opCtx), - _lockFreeReadsBlock(opCtx), + : _lockFreeReadsBlock(opCtx), _globalLock(opCtx, MODE_IS, deadline, Lock::InterruptBehavior::kThrow, [] { Lock::GlobalLockSkipOptions options; options.skipRSTLLock = true; return options; }()) { - // The catalog will be stashed inside the CollectionCatalogStasher. - FakeCollection fakeColl; - acquireCollectionAndConsistentSnapshot( - opCtx, - /* isLockFreeReadSubOperation */ - false, - /* CollectionCatalogStasher */ - _catalogStash, - /* GetCollectionAndEstablishReadSourceFunc */ - [&](OperationContext* opCtx, const CollectionCatalog&, bool) { - return std::make_pair(&fakeColl, /* isView */ false); - }, - /* ResetFunc */ - []() {}, - /* SetSecondaryState */ - [](bool isAnySecondaryNamespaceAViewOrSharded) {}); + + acquireConsistentCatalogAndSnapshotUnsafe(opCtx, /*dbName*/ boost::none); } AutoGetDbForReadLockFree::AutoGetDbForReadLockFree(OperationContext* opCtx, const DatabaseName& dbName, Date_t deadline) - : _catalogStash(opCtx), - _lockFreeReadsBlock(opCtx), + : _lockFreeReadsBlock(opCtx), _globalLock(opCtx, MODE_IS, deadline, Lock::InterruptBehavior::kThrow, [] { Lock::GlobalLockSkipOptions options; options.skipRSTLLock = true; return options; }()) { - // The catalog will be stashed inside the CollectionCatalogStasher. - FakeCollection fakeColl; - acquireCollectionAndConsistentSnapshot( - opCtx, - /* isLockFreeReadSubOperation */ - false, - /* CollectionCatalogStasher */ - _catalogStash, - /* GetCollectionAndEstablishReadSourceFunc */ - [&](OperationContext* opCtx, const CollectionCatalog&, bool) { - // Check that the sharding database version matches our read. - // Note: this must always be checked, regardless of whether the collection exists, so - // that the dbVersion of this node or the caller gets updated quickly in case either is - // stale. - DatabaseShardingState::assertMatchingDbVersion(opCtx, dbName); - return std::make_pair(&fakeColl, /* isView */ false); - }, - /* ResetFunc */ - []() {}, - /* SetSecondaryState */ - [](bool isAnySecondaryNamespaceAViewOrSharded) {}); + + acquireConsistentCatalogAndSnapshotUnsafe(opCtx, dbName); } AutoGetDbForReadMaybeLockFree::AutoGetDbForReadMaybeLockFree(OperationContext* opCtx, @@ -1643,7 +1199,7 @@ OldClientContext::~OldClientContext() { auto currentOp = CurOp::get(_opCtx); Top::get(_opCtx->getClient()->getServiceContext()) .record(_opCtx, - currentOp->getNS(), + currentOp->getNSS(), currentOp->getLogicalOp(), _opCtx->lockState()->isWriteLocked() ? Top::LockType::WriteLocked : Top::LockType::ReadLocked, @@ -1652,37 +1208,20 @@ OldClientContext::~OldClientContext() { currentOp->getReadWriteType()); } -LockMode getLockModeForQuery(OperationContext* opCtx, const boost::optional& nss) { +LockMode getLockModeForQuery(OperationContext* opCtx, const NamespaceStringOrUUID& nssOrUUID) { invariant(opCtx); // Use IX locks for multi-statement transactions; otherwise, use IS locks. if (opCtx->inMultiDocumentTransaction()) { uassert(51071, "Cannot query system.views within a transaction", - !nss || !nss->isSystemDotViews()); + !nssOrUUID.isNamespaceString() || !nssOrUUID.nss().isSystemDotViews()); return MODE_IX; } return MODE_IS; } -BlockSecondaryReadsDuringBatchApplication_DONT_USE:: - BlockSecondaryReadsDuringBatchApplication_DONT_USE(OperationContext* opCtx) - : _opCtx(opCtx) { - auto allowSecondaryReads = &allowSecondaryReadsDuringBatchApplication_DONT_USE(opCtx); - allowSecondaryReads->swap(_originalSettings); - *allowSecondaryReads = false; -} - -BlockSecondaryReadsDuringBatchApplication_DONT_USE:: - ~BlockSecondaryReadsDuringBatchApplication_DONT_USE() { - auto allowSecondaryReads = &allowSecondaryReadsDuringBatchApplication_DONT_USE(_opCtx); - allowSecondaryReads->swap(_originalSettings); -} - -template class AutoGetCollectionForReadBase; template class AutoGetCollectionForReadCommandBase; -template class AutoGetCollectionForReadBase; template class AutoGetCollectionForReadCommandBase; } // namespace mongo diff --git a/src/mongo/db/db_raii.h b/src/mongo/db/db_raii.h index 765beaa5ed284..e603ac2c25eb9 100644 --- a/src/mongo/db/db_raii.h +++ b/src/mongo/db/db_raii.h @@ -29,12 +29,30 @@ #pragma once +#include +#include +#include +#include #include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/stats/top.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/views/view.h" #include "mongo/stdx/variant.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/time_support.h" #include "mongo/util/timer.h" namespace mongo { @@ -87,112 +105,6 @@ class AutoStatsTracker { std::set _nssSet; }; -/** - * Shared base class for AutoGetCollectionForRead and AutoGetCollectionForReadLockFree. - * Do not use directly. - */ -template -class AutoGetCollectionForReadBase { - AutoGetCollectionForReadBase(const AutoGetCollectionForReadBase&) = delete; - AutoGetCollectionForReadBase& operator=(const AutoGetCollectionForReadBase&) = delete; - -public: - AutoGetCollectionForReadBase(OperationContext* opCtx, - const EmplaceAutoGetCollectionFunc& emplaceAutoColl, - bool isLockFreeReadSubOperation = false); - - explicit operator bool() const { - return static_cast(getCollection()); - } - - const Collection* operator->() const { - return getCollection().get(); - } - - const CollectionPtr& operator*() const { - return getCollection(); - } - - const CollectionPtr& getCollection() const { - return _autoColl->getCollection(); - } - - const ViewDefinition* getView() const { - return _autoColl->getView(); - } - - const NamespaceString& getNss() const { - return _autoColl->getNss(); - } - -protected: - // If this field is set, the reader will not take the ParallelBatchWriterMode lock and conflict - // with secondary batch application. This stays in scope with the _autoColl so that locks are - // taken and released in the right order. - boost::optional - _shouldNotConflictWithSecondaryBatchApplicationBlock; - - // This field is optional, because the code to wait for majority committed snapshot needs to - // release locks in order to block waiting - boost::optional _autoColl; -}; - -/** - * Helper for AutoGetCollectionForRead below. Contains implementation on how contained - * AutoGetCollection is instantiated by AutoGetCollectionForReadBase. - */ -class EmplaceAutoGetCollectionForRead { -public: - EmplaceAutoGetCollectionForRead(OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollection::Options options = {}); - - void emplace(boost::optional& autoColl) const; - -private: - OperationContext* _opCtx; - const NamespaceStringOrUUID& _nsOrUUID; - LockMode _collectionLockMode; - AutoGetCollection::Options _options; -}; - -/** - * Same as calling AutoGetCollection with MODE_IS, but in addition ensures that the read will be - * performed against an appropriately committed snapshot if the operation is using a readConcern of - * 'majority'. - * - * Use this when you want to read the contents of a collection, but you are not at the top-level of - * some command. This will ensure your reads obey any requested readConcern, but will not update the - * status of CurrentOp, or add a Top entry. - * - * Any collections specified in 'secondaryNssOrUUIDs' will be checked that their minimum visible - * timestamp supports read concern, throwing a SnapshotUnavailable on error. Additional collection - * and/or database locks will be acquired for 'secondaryNssOrUUIDs' namespaces. - * - * NOTE: Must not be used with any locks held, because it needs to block waiting on the committed - * snapshot to become available, and can potentially release and reacquire locks. - */ -class AutoGetCollectionForReadLegacy - : public AutoGetCollectionForReadBase { -public: - AutoGetCollectionForReadLegacy(OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollection::Options = {}); - - /** - * Indicates whether any namespace in 'secondaryNssOrUUIDs' is a view or sharded. - * - * The secondary namespaces won't be checked if getCollection() returns nullptr. - */ - bool isAnySecondaryNamespaceAViewOrSharded() const { - return _secondaryNssIsAViewOrSharded; - } - -private: - // Tracks whether any secondary collection namespaces is a view or sharded. - bool _secondaryNssIsAViewOrSharded = false; -}; - /** * Locked version of AutoGetCollectionForRead for setting up an operation for read that ensured that * the read will be performed against an appropriately committed snapshot if the operation is using @@ -205,11 +117,11 @@ class AutoGetCollectionForReadLegacy * Additional collection and/or database locks will be acquired for 'secondaryNssOrUUIDs' * namespaces. */ -class AutoGetCollectionForReadPITCatalog { +class AutoGetCollectionForRead { public: - AutoGetCollectionForReadPITCatalog(OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollection::Options = {}); + AutoGetCollectionForRead(OperationContext* opCtx, + const NamespaceStringOrUUID& nsOrUUID, + AutoGetCollection::Options = {}); explicit operator bool() const { return static_cast(getCollection()); @@ -258,101 +170,6 @@ class AutoGetCollectionForReadPITCatalog { bool _secondaryNssIsAViewOrSharded = false; }; - -class AutoGetCollectionForRead { -public: - AutoGetCollectionForRead(OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollection::Options = {}); - - explicit operator bool() const { - return static_cast(getCollection()); - } - - const Collection* operator->() const { - return getCollection().get(); - } - - const CollectionPtr& operator*() const { - return getCollection(); - } - - const CollectionPtr& getCollection() const; - const ViewDefinition* getView() const; - const NamespaceString& getNss() const; - - bool isAnySecondaryNamespaceAViewOrSharded() const; - -private: - boost::optional _legacy; - boost::optional _pitCatalog; -}; - -/** - * Same as AutoGetCollectionForRead above except does not take collection, database or rstl locks. - * Takes the global lock and may take the PBWM, same as AutoGetCollectionForRead. Ensures a - * consistent in-memory and on-disk view of the storage catalog. - * - * This implementation does not use the PIT catalog. - */ -class AutoGetCollectionForReadLockFreeLegacy { -public: - AutoGetCollectionForReadLockFreeLegacy(OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollection::Options = {}); - - const CollectionPtr& getCollection() const { - return _autoGetCollectionForReadBase->getCollection(); - } - - const ViewDefinition* getView() const { - return _autoGetCollectionForReadBase->getView(); - } - - const NamespaceString& getNss() const { - return _autoGetCollectionForReadBase->getNss(); - } - - bool isAnySecondaryNamespaceAViewOrSharded() const { - return _secondaryNssIsAViewOrSharded; - } - -private: - /** - * Helper for how AutoGetCollectionForReadBase instantiates its owned AutoGetCollectionLockFree. - */ - class EmplaceHelper { - public: - EmplaceHelper(OperationContext* opCtx, - CollectionCatalogStasher& catalogStasher, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollectionLockFree::Options options, - bool isLockFreeReadSubOperation); - - void emplace(boost::optional& autoColl) const; - - private: - OperationContext* _opCtx; - CollectionCatalogStasher& _catalogStasher; - const NamespaceStringOrUUID& _nsOrUUID; - AutoGetCollectionLockFree::Options _options; - - // Set to true if the lock helper using this EmplaceHelper is nested under another lock-free - // helper. - bool _isLockFreeReadSubOperation; - }; - - // Tracks whether any secondary collection namespaces is a view or sharded. - bool _secondaryNssIsAViewOrSharded = false; - - // The CollectionCatalogStasher must outlive the LockFreeReadsBlock in the AutoGet* below. - // ~LockFreeReadsBlock clears a flag that the ~CollectionCatalogStasher checks. - CollectionCatalogStasher _catalogStash; - - boost::optional> - _autoGetCollectionForReadBase; -}; - /** * Same as AutoGetCollectionForRead above except does not take collection, database or rstl locks. * Takes the global lock and may take the PBWM, same as AutoGetCollectionForRead. Ensures a @@ -360,11 +177,11 @@ class AutoGetCollectionForReadLockFreeLegacy { * * This implementation uses the point-in-time (PIT) catalog. */ -class AutoGetCollectionForReadLockFreePITCatalog final { +class AutoGetCollectionForReadLockFree final { public: - AutoGetCollectionForReadLockFreePITCatalog(OperationContext* opCtx, - NamespaceStringOrUUID nsOrUUID, - AutoGetCollection::Options options = {}); + AutoGetCollectionForReadLockFree(OperationContext* opCtx, + NamespaceStringOrUUID nsOrUUID, + AutoGetCollection::Options options = {}); const CollectionPtr& getCollection() const { return _collectionPtr; @@ -385,7 +202,7 @@ class AutoGetCollectionForReadLockFreePITCatalog final { private: /** * Creates the std::function object used by CollectionPtrs to restore state for this - * AutoGetCollectionForReadLockFreePITCatalog object after having yielded. + * AutoGetCollectionForReadLockFree object after having yielded. */ CollectionPtr::RestoreFn _makeRestoreFromYieldFn( const AutoGetCollection::Options& options, @@ -393,19 +210,13 @@ class AutoGetCollectionForReadLockFreePITCatalog final { const DatabaseName& dbName); // Used so that we can reset the read source back to the original read source when this instance - // of AutoGetCollectionForReadLockFreePITCatalog is destroyed. + // of AutoGetCollectionForReadLockFree is destroyed. RecoveryUnit::ReadSource _originalReadSource; - // Whether or not this AutoGetCollectionForReadLockFreePITCatalog is being constructed while + // Whether or not this AutoGetCollectionForReadLockFree is being constructed while // there's already a lock-free read in progress. bool _isLockFreeReadSubOperation; - // The CollectionCatalogStasher must outlive the LockFreeReadsBlock below. ~LockFreeReadsBlock - // clears a flag that the ~CollectionCatalogStasher checks. - // - // Is not assigned-to after construction, but will have reset/stash called on yield/restore. - CollectionCatalogStasher _catalogStasher; - // Whether or not the calling context expects to conflict with secondary batch application. This // is just used for invariant checking. bool _callerExpectedToConflictWithSecondaryBatchApplication; @@ -444,89 +255,13 @@ class AutoGetCollectionForReadLockFreePITCatalog final { // May change after construction, when restoring from yield. NamespaceString _resolvedNss; - // Only set if _collectionPtr does not contain a nullptr and if the requested namesapce is a + // Only set if _collectionPtr does not contain a nullptr and if the requested namespace is a // view. // // May change after construction, when restoring from yield. std::shared_ptr _view; }; -/** - * Same as AutoGetCollectionForRead above except does not take collection, database or rstl locks. - * Takes the global lock and may take the PBWM, same as AutoGetCollectionForRead. Ensures a - * consistent in-memory and on-disk view of the storage catalog. - */ -class AutoGetCollectionForReadLockFree { -public: - AutoGetCollectionForReadLockFree(OperationContext* opCtx, - const NamespaceStringOrUUID& nsOrUUID, - AutoGetCollection::Options = {}); - - explicit operator bool() const { - return static_cast(getCollection()); - } - - const Collection* operator->() const { - return getCollection().get(); - } - - const CollectionPtr& operator*() const { - return getCollection(); - } - - const CollectionPtr& getCollection() const { - return stdx::visit( - OverloadedVisitor{ - [](auto&& impl) -> const CollectionPtr& { return impl.getCollection(); }, - [](stdx::monostate) -> const CollectionPtr& { MONGO_UNREACHABLE; }, - }, - _impl); - } - - const ViewDefinition* getView() const { - return stdx::visit(OverloadedVisitor{[](auto&& impl) { return impl.getView(); }, - [](stdx::monostate) -> const ViewDefinition* { - MONGO_UNREACHABLE; - }}, - _impl); - } - - const NamespaceString& getNss() const { - return stdx::visit( - OverloadedVisitor{[](auto&& impl) -> const NamespaceString& { return impl.getNss(); }, - [](stdx::monostate) -> const NamespaceString& { - MONGO_UNREACHABLE; - }}, - _impl); - } - - /** - * Indicates whether any namespace in 'secondaryNssOrUUIDs' is a view or sharded. - * - * The secondary namespaces won't be checked if getCollection() returns nullptr. - */ - bool isAnySecondaryNamespaceAViewOrSharded() const { - return stdx::visit( - OverloadedVisitor{ - [](auto&& impl) { return impl.isAnySecondaryNamespaceAViewOrSharded(); }, - [](stdx::monostate) -> bool { - MONGO_UNREACHABLE; - }}, - _impl); - } - -private: - // If the gPointInTimeCatalogLookups feature flag is enabled, this will contain an instance of - // AutoGetCollectionForReadLockFreePITCatalog. Otherwise, it will contain an instance of - // AutoGetCollectionForReadLockFreeLegacy. Note that stdx::monostate is required for default - // construction, since these other types are not movable, but after construction, the value - // should never be set to stdx::monostate. - stdx::variant - _impl; -}; - /** * Creates either an AutoGetCollectionForRead or AutoGetCollectionForReadLockFree depending on * whether a lock-free read is supported. @@ -694,6 +429,7 @@ class AutoGetCollectionForReadCommandMaybeLockFree { return getCollection(); } const CollectionPtr& getCollection() const; + StringData getCollectionType() const; const ViewDefinition* getView() const; const NamespaceString& getNss() const; bool isAnySecondaryNamespaceAViewOrSharded() const; @@ -712,10 +448,6 @@ class AutoReadLockFree { AutoReadLockFree(OperationContext* opCtx, Date_t deadline = Date_t::max()); private: - // The CollectionCatalogStasher must outlive the LockFreeReadsBlock below. ~LockFreeReadsBlock - // clears a flag that the ~CollectionCatalogStasher checks. - CollectionCatalogStasher _catalogStash; - // Sets a flag on the opCtx to inform subsequent code that the operation is running lock-free. LockFreeReadsBlock _lockFreeReadsBlock; @@ -739,10 +471,6 @@ class AutoGetDbForReadLockFree { Date_t deadline = Date_t::max()); private: - // The CollectionCatalogStasher must outlive the LockFreeReadsBlock below. ~LockFreeReadsBlock - // clears a flag that the ~CollectionCatalogStasher checks. - CollectionCatalogStasher _catalogStash; - // Sets a flag on the opCtx to inform subsequent code that the operation is running lock-free. LockFreeReadsBlock _lockFreeReadsBlock; @@ -801,7 +529,7 @@ class OldClientContext { * lock otherwise. MODE_IX acquisition will allow a read to participate in two-phase locking. * Throws an exception if 'system.views' is being queried within a transaction. */ -LockMode getLockModeForQuery(OperationContext* opCtx, const boost::optional& nss); +LockMode getLockModeForQuery(OperationContext* opCtx, const NamespaceStringOrUUID& nssOrUUID); /** * When in scope, enforces prepare conflicts in the storage engine. Reads and writes in this scope @@ -840,19 +568,4 @@ class EnforcePrepareConflictsBlock { PrepareConflictBehavior _originalValue; }; -/** - * TODO (SERVER-69813): Get rid of this when ShardServerCatalogCacheLoader will be removed. - * RAII type for letting secondary reads to block behind the PBW lock. - * Note: Do not add additional usage. This is only temporary for ease of backport. - */ -struct BlockSecondaryReadsDuringBatchApplication_DONT_USE { -public: - BlockSecondaryReadsDuringBatchApplication_DONT_USE(OperationContext* opCtx); - ~BlockSecondaryReadsDuringBatchApplication_DONT_USE(); - -private: - OperationContext* _opCtx{nullptr}; - boost::optional _originalSettings; -}; - } // namespace mongo diff --git a/src/mongo/db/db_raii_multi_collection_test.cpp b/src/mongo/db/db_raii_multi_collection_test.cpp index da00f5b9fa5cd..d4300cea388a6 100644 --- a/src/mongo/db/db_raii_multi_collection_test.cpp +++ b/src/mongo/db/db_raii_multi_collection_test.cpp @@ -27,12 +27,34 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/concurrency/locker_impl.h" #include "mongo/db/db_raii.h" -#include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -257,8 +279,8 @@ TEST_F(AutoGetCollectionMultiTest, LockedSecondaryNamespaceNotFound) { invariant(locker->isCollectionLockedForMode(_primaryNss, MODE_IS)); for (const auto& secondaryNss : _secondaryNssOrUUIDVec) { - invariant(locker->isDbLockedForMode(secondaryNss.nss()->dbName(), MODE_IS)); - invariant(locker->isCollectionLockedForMode(*secondaryNss.nss(), MODE_IS)); + invariant(locker->isDbLockedForMode(secondaryNss.nss().dbName(), MODE_IS)); + invariant(locker->isCollectionLockedForMode(secondaryNss.nss(), MODE_IS)); } const auto& coll = autoGet.getCollection(); diff --git a/src/mongo/db/db_raii_test.cpp b/src/mongo/db/db_raii_test.cpp index d2eba6919ccd7..66d190603a0d9 100644 --- a/src/mongo/db/db_raii_test.cpp +++ b/src/mongo/db/db_raii_test.cpp @@ -27,19 +27,53 @@ * it in the license file. */ +#include +#include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/client.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/concurrency/locker_impl.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/find_common.h" #include "mongo/db/query/get_executor.h" -#include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/tailable_mode_gen.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/snapshot_manager.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/dbdirectclient.cpp b/src/mongo/db/dbdirectclient.cpp index 2a4e030191063..3b0f2beb1ece4 100644 --- a/src/mongo/db/dbdirectclient.cpp +++ b/src/mongo/db/dbdirectclient.cpp @@ -29,17 +29,32 @@ #include "mongo/db/dbdirectclient.h" -#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/client.h" -#include "mongo/db/commands.h" #include "mongo/db/curop.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbmessage.h" #include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/service_context.h" #include "mongo/db/wire_version.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" #include "mongo/transport/service_entry_point.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -147,7 +162,8 @@ std::unique_ptr DBDirectClient::find(FindCommandRequest findRequ const ReadPreferenceSetting& readPref, ExhaustMode exhaustMode) { invariant(!findRequest.getReadConcern(), - "passing readConcern to DBDirectClient::find() is not supported"); + "passing readConcern to DBDirectClient::find() is not supported as it has to use the " + "parent operation's readConcern"); return DBClientBase::find(std::move(findRequest), readPref, exhaustMode); } @@ -197,11 +213,11 @@ long long DBDirectClient::count(const NamespaceStringOrUUID nsOrUuid, int limit, int skip, boost::optional readConcernObj) { - invariant(!readConcernObj, "passing readConcern to DBDirectClient functions is not supported"); + invariant(!readConcernObj, + "passing readConcern to DBDirectClient functions is not supported as it has to use " + "the parent operation's readConcern"); BSONObj cmdObj = _countCmd(nsOrUuid, query, options, limit, skip, boost::none); - - auto& dbName = (nsOrUuid.uuid() ? nsOrUuid.dbName().value() : (*nsOrUuid.nss()).dbName()); - auto request = OpMsgRequestBuilder::create(dbName, cmdObj); + auto request = OpMsgRequestBuilder::create(nsOrUuid.dbName(), cmdObj); // Calls runCommand instead of runCommandDirectly to ensure the tenant inforamtion of this // command gets validated and is used for parsing the command request. diff --git a/src/mongo/db/dbdirectclient.h b/src/mongo/db/dbdirectclient.h index 7d9982ad34f5b..91fee8fc69f16 100644 --- a/src/mongo/db/dbdirectclient.h +++ b/src/mongo/db/dbdirectclient.h @@ -29,11 +29,28 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/client/connection_string.h" #include "mongo/client/dbclient_base.h" -#include "mongo/config.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/client/read_preference.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/dbmessage.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/query/find_command.h" +#include "mongo/rpc/message.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_types.h" namespace mongo { diff --git a/src/mongo/db/dbdirectclient_test.cpp b/src/mongo/db/dbdirectclient_test.cpp index 84c5c08084092..9701d4c5752e1 100644 --- a/src/mongo/db/dbdirectclient_test.cpp +++ b/src/mongo/db/dbdirectclient_test.cpp @@ -28,8 +28,30 @@ */ #include "mongo/db/dbdirectclient.h" + +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" namespace mongo { namespace { diff --git a/src/mongo/db/dbhelpers.cpp b/src/mongo/db/dbhelpers.cpp index 67c6a3a1e91ca..11db0e987c3a6 100644 --- a/src/mongo/db/dbhelpers.cpp +++ b/src/mongo/db/dbhelpers.cpp @@ -28,30 +28,53 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/dbhelpers.h" - +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_operation_source.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/index/btree_access_method.h" -#include "mongo/db/json.h" -#include "mongo/db/keypattern.h" +#include "mongo/db/dbhelpers.h" +#include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_real.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/delete.h" #include "mongo/db/ops/update.h" #include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/get_executor.h" #include "mongo/db/query/index_bounds_builder.h" #include "mongo/db/query/internal_plans.h" -#include "mongo/db/query/query_planner.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/record_id_helpers.h" -#include "mongo/util/scopeguard.h" -#include "mongo/util/str.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -179,8 +202,9 @@ bool Helpers::findById(OperationContext* opCtx, if (indexFound) *indexFound = 1; - auto recordId = catalog->getEntry(desc)->accessMethod()->asSortedData()->findSingle( - opCtx, CollectionPtr(collection), query["_id"].wrap()); + const IndexCatalogEntry* entry = catalog->getEntry(desc); + auto recordId = entry->accessMethod()->asSortedData()->findSingle( + opCtx, CollectionPtr(collection), entry, query["_id"].wrap()); if (recordId.isNull()) return false; result = collection->docFor(opCtx, recordId).value(); @@ -190,7 +214,7 @@ bool Helpers::findById(OperationContext* opCtx, RecordId Helpers::findById(OperationContext* opCtx, const CollectionPtr& collection, const BSONObj& idquery) { - verify(collection); + MONGO_verify(collection); const IndexCatalog* catalog = collection->getIndexCatalog(); const IndexDescriptor* desc = catalog->findIdIndex(opCtx); if (!desc && clustered_util::isClusteredOnId(collection->getClusteredInfo())) { @@ -201,8 +225,9 @@ RecordId Helpers::findById(OperationContext* opCtx, } uassert(13430, "no _id index", desc); - return catalog->getEntry(desc)->accessMethod()->asSortedData()->findSingle( - opCtx, collection, idquery["_id"].wrap()); + const IndexCatalogEntry* entry = catalog->getEntry(desc); + return entry->accessMethod()->asSortedData()->findSingle( + opCtx, collection, entry, idquery["_id"].wrap()); } // Acquires necessary locks to read the collection with the given namespace. If this is an oplog @@ -271,24 +296,24 @@ bool Helpers::getLast(OperationContext* opCtx, const NamespaceString& nss, BSONO } UpdateResult Helpers::upsert(OperationContext* opCtx, - const NamespaceString& nss, + CollectionAcquisition& coll, const BSONObj& o, bool fromMigrate) { BSONElement e = o["_id"]; - verify(e.type()); + MONGO_verify(e.type()); BSONObj id = e.wrap(); - return upsert(opCtx, nss, id, o, fromMigrate); + return upsert(opCtx, coll, id, o, fromMigrate); } UpdateResult Helpers::upsert(OperationContext* opCtx, - const NamespaceString& nss, + CollectionAcquisition& coll, const BSONObj& filter, const BSONObj& updateMod, bool fromMigrate) { - OldClientContext context(opCtx, nss); + OldClientContext context(opCtx, coll.nss()); auto request = UpdateRequest(); - request.setNamespaceString(nss); + request.setNamespaceString(coll.nss()); request.setQuery(filter); request.setUpdateModification(write_ops::UpdateModification::parseFromClassicUpdate(updateMod)); @@ -298,18 +323,18 @@ UpdateResult Helpers::upsert(OperationContext* opCtx, } request.setYieldPolicy(PlanYieldPolicy::YieldPolicy::NO_YIELD); - return ::mongo::update(opCtx, context.db(), request); + return ::mongo::update(opCtx, coll, request); } void Helpers::update(OperationContext* opCtx, - const NamespaceString& nss, + CollectionAcquisition& coll, const BSONObj& filter, const BSONObj& updateMod, bool fromMigrate) { - OldClientContext context(opCtx, nss); + OldClientContext context(opCtx, coll.nss()); auto request = UpdateRequest(); - request.setNamespaceString(nss); + request.setNamespaceString(coll.nss()); request.setQuery(filter); request.setUpdateModification(write_ops::UpdateModification::parseFromClassicUpdate(updateMod)); @@ -318,19 +343,27 @@ void Helpers::update(OperationContext* opCtx, } request.setYieldPolicy(PlanYieldPolicy::YieldPolicy::NO_YIELD); - ::mongo::update(opCtx, context.db(), request); + ::mongo::update(opCtx, coll, request); } -void Helpers::putSingleton(OperationContext* opCtx, const NamespaceString& nss, BSONObj obj) { - OldClientContext context(opCtx, nss); +Status Helpers::insert(OperationContext* opCtx, + const CollectionAcquisition& coll, + const BSONObj& doc) { + OldClientContext context(opCtx, coll.nss()); + return collection_internal::insertDocument( + opCtx, coll.getCollectionPtr(), InsertStatement{doc}, &CurOp::get(opCtx)->debug()); +} + +void Helpers::putSingleton(OperationContext* opCtx, CollectionAcquisition& coll, BSONObj obj) { + OldClientContext context(opCtx, coll.nss()); auto request = UpdateRequest(); - request.setNamespaceString(nss); + request.setNamespaceString(coll.nss()); request.setUpdateModification(write_ops::UpdateModification::parseFromClassicUpdate(obj)); request.setUpsert(); - ::mongo::update(opCtx, context.db(), request); + ::mongo::update(opCtx, coll, request); CurOp::get(opCtx)->done(); } @@ -351,14 +384,10 @@ BSONObj Helpers::inferKeyPattern(const BSONObj& o) { return kpBuilder.obj(); } -void Helpers::emptyCollection(OperationContext* opCtx, const NamespaceString& nss) { - OldClientContext context(opCtx, nss); +void Helpers::emptyCollection(OperationContext* opCtx, const CollectionAcquisition& coll) { + OldClientContext context(opCtx, coll.nss()); repl::UnreplicatedWritesBlock uwb(opCtx); - CollectionPtr collection = CollectionPtr( - context.db() ? CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss) - : nullptr); - - deleteObjects(opCtx, collection, nss, BSONObj(), false); + deleteObjects(opCtx, coll, BSONObj(), false); } bool Helpers::findByIdAndNoopUpdate(OperationContext* opCtx, @@ -393,7 +422,8 @@ bool Helpers::findByIdAndNoopUpdate(OperationContext* opCtx, snapshottedDoc, result, collection_internal::kUpdateNoIndexes, - nullptr, + nullptr /* indexesAffected */, + nullptr /* opDebug */, &args); return true; diff --git a/src/mongo/db/dbhelpers.h b/src/mongo/db/dbhelpers.h index c620351824aa0..f909524b43da2 100644 --- a/src/mongo/db/dbhelpers.h +++ b/src/mongo/db/dbhelpers.h @@ -29,6 +29,10 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/ops/update_result.h" #include "mongo/db/record_id.h" @@ -40,6 +44,7 @@ class CollectionPtr; class Database; class OperationContext; class FindCommandRequest; +class CollectionAcquisition; /** * db helpers are helper functions and classes that let us easily manipulate the local @@ -121,7 +126,7 @@ struct Helpers { * Performs an upsert of "obj" into the collection "ns", with an empty update predicate. * Callers must have "ns" locked. */ - static void putSingleton(OperationContext* opCtx, const NamespaceString& nss, BSONObj obj); + static void putSingleton(OperationContext* opCtx, CollectionAcquisition& coll, BSONObj obj); /** * Callers are expected to hold the collection lock. @@ -129,7 +134,7 @@ struct Helpers { * o has to have an _id field or will assert */ static UpdateResult upsert(OperationContext* opCtx, - const NamespaceString& nss, + CollectionAcquisition& coll, const BSONObj& o, bool fromMigrate = false); @@ -140,7 +145,7 @@ struct Helpers { * on the same storage snapshot. */ static UpdateResult upsert(OperationContext* opCtx, - const NamespaceString& nss, + CollectionAcquisition& coll, const BSONObj& filter, const BSONObj& updateMod, bool fromMigrate = false); @@ -152,11 +157,18 @@ struct Helpers { * on the same storage snapshot. */ static void update(OperationContext* opCtx, - const NamespaceString& nss, + CollectionAcquisition& coll, const BSONObj& filter, const BSONObj& updateMod, bool fromMigrate = false); + /** + * Inserts document 'doc' into collection 'coll'. + */ + static Status insert(OperationContext* opCtx, + const CollectionAcquisition& coll, + const BSONObj& doc); + // TODO: this should be somewhere else probably /* Takes object o, and returns a new object with the * same field elements but the names stripped out. @@ -176,7 +188,7 @@ struct Helpers { * You do not need to set the database before calling. * Does not oplog the operation. */ - static void emptyCollection(OperationContext* opCtx, const NamespaceString& nss); + static void emptyCollection(OperationContext* opCtx, const CollectionAcquisition& coll); /* * Finds the doc and then runs a no-op update by running an update using the doc just read. Used diff --git a/src/mongo/db/dbmessage.cpp b/src/mongo/db/dbmessage.cpp index efdc725cbd326..30fbb75600545 100644 --- a/src/mongo/db/dbmessage.cpp +++ b/src/mongo/db/dbmessage.cpp @@ -27,12 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/dbmessage.h" - +#include "mongo/db/server_options.h" #include "mongo/platform/strnlen.h" -#include "mongo/rpc/object_check.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -61,7 +67,7 @@ DbMessage::DbMessage(const Message& msg) : _msg(msg), _nsStart(nullptr), _mark(n } const char* DbMessage::getns() const { - verify(messageShouldHaveNs()); + MONGO_verify(messageShouldHaveNs()); return _nsStart; } @@ -91,8 +97,8 @@ BSONObj DbMessage::nextJsObj() { } BSONObj js(_nextjsobj); - verify(js.objsize() >= 5); - verify(js.objsize() <= (_theEnd - _nextjsobj)); + MONGO_verify(js.objsize() >= 5); + MONGO_verify(js.objsize() <= (_theEnd - _nextjsobj)); _nextjsobj += js.objsize(); if (_nextjsobj >= _theEnd) @@ -105,7 +111,7 @@ void DbMessage::markReset(const char* toMark = nullptr) { toMark = _mark; } - verify(toMark); + MONGO_verify(toMark); _nextjsobj = toMark; } diff --git a/src/mongo/db/dbmessage.h b/src/mongo/db/dbmessage.h index 0b8e8ce84c718..4a0ff94e41540 100644 --- a/src/mongo/db/dbmessage.h +++ b/src/mongo/db/dbmessage.h @@ -29,8 +29,18 @@ #pragma once +#include +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/base/encoded_value_storage.h" #include "mongo/base/static_assert.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder.h" #include "mongo/client/constants.h" #include "mongo/db/jsobj.h" #include "mongo/db/server_options.h" diff --git a/src/mongo/db/dbmessage_test.cpp b/src/mongo/db/dbmessage_test.cpp index 733542537387d..0a1cec60bcb8d 100644 --- a/src/mongo/db/dbmessage_test.cpp +++ b/src/mongo/db/dbmessage_test.cpp @@ -29,9 +29,13 @@ #include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/builder.h" #include "mongo/db/dbmessage.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { using std::string; diff --git a/src/mongo/db/default_baton.cpp b/src/mongo/db/default_baton.cpp index e1eeaea9a57d3..fc61fe8771be6 100644 --- a/src/mongo/db/default_baton.cpp +++ b/src/mongo/db/default_baton.cpp @@ -28,12 +28,21 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/default_baton.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/db/client.h" +#include "mongo/db/default_baton.h" #include "mongo/db/operation_context.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/functional.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/db/default_baton.h b/src/mongo/db/default_baton.h index 74fd724fae14f..29b096792350f 100644 --- a/src/mongo/db/default_baton.h +++ b/src/mongo/db/default_baton.h @@ -34,7 +34,11 @@ #include "mongo/db/baton.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/clock_source.h" #include "mongo/util/functional.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/time_support.h" +#include "mongo/util/waitable.h" namespace mongo { diff --git a/src/mongo/db/error_labels.cpp b/src/mongo/db/error_labels.cpp index 96e1b05ede6ce..e98f5e0a6747a 100644 --- a/src/mongo/db/error_labels.cpp +++ b/src/mongo/db/error_labels.cpp @@ -28,11 +28,30 @@ */ #include "mongo/db/error_labels.h" + +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/commands.h" #include "mongo/db/curop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/exit.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -105,7 +124,8 @@ bool ErrorLabelBuilder::isResumableChangeStreamError() const { (_commandName == "aggregate" || _commandName == "getMore") && _code && !_wcCode && (ErrorCodes::isRetriableError(*_code) || ErrorCodes::isNetworkError(*_code) || ErrorCodes::isNeedRetargettingError(*_code) || _code == ErrorCodes::RetryChangeStream || - _code == ErrorCodes::FailedToSatisfyReadPreference); + _code == ErrorCodes::FailedToSatisfyReadPreference || + _code == ErrorCodes::ResumeTenantChangeStream); // If the command or exception is not relevant, bail out early. if (!mayNeedResumableChangeStreamErrorLabel) { @@ -120,7 +140,7 @@ bool ErrorLabelBuilder::isResumableChangeStreamError() const { : CurOp::get(_opCtx)->originatingCommand()); // Get the namespace string from CurOp. We will need it to build the LiteParsedPipeline. - const auto nss = NamespaceString{CurOp::get(_opCtx)->getNS()}; + const auto& nss = CurOp::get(_opCtx)->getNSS(); bool apiStrict = APIParameters::get(_opCtx).getAPIStrict().value_or(false); // Do enough parsing to confirm that this is a well-formed pipeline with a $changeStream. diff --git a/src/mongo/db/error_labels.h b/src/mongo/db/error_labels.h index 08bdb9ff174b1..90da61ad22f0c 100644 --- a/src/mongo/db/error_labels.h +++ b/src/mongo/db/error_labels.h @@ -29,7 +29,16 @@ #pragma once +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" namespace mongo { static constexpr StringData kErrorLabelsFieldName = "errorLabels"_sd; diff --git a/src/mongo/db/error_labels_test.cpp b/src/mongo/db/error_labels_test.cpp index e7db1adb6ba36..01fb05a3c2bcd 100644 --- a/src/mongo/db/error_labels_test.cpp +++ b/src/mongo/db/error_labels_test.cpp @@ -27,16 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" #include "mongo/db/curop.h" #include "mongo/db/error_labels.h" -#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/db/session/logical_session_id.h" -#include "mongo/unittest/unittest.h" +#include "mongo/rpc/message.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" namespace mongo { namespace { @@ -150,7 +165,7 @@ class ErrorLabelBuilderTest : public ServiceContextTest { }; TEST_F(ErrorLabelBuilderTest, NonErrorCodesHaveNoLabel) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; std::string commandName = "insert"; ErrorLabelBuilder builder(opCtx(), sessionInfo, @@ -168,7 +183,7 @@ TEST_F(ErrorLabelBuilderTest, NonErrorCodesHaveNoLabel) { } TEST_F(ErrorLabelBuilderTest, NonTransactionsHaveNoTransientTransactionErrorLabel) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; std::string commandName = "insert"; ErrorLabelBuilder builder(opCtx(), sessionInfo, @@ -183,7 +198,7 @@ TEST_F(ErrorLabelBuilderTest, NonTransactionsHaveNoTransientTransactionErrorLabe } TEST_F(ErrorLabelBuilderTest, RetryableWritesHaveNoTransientTransactionErrorLabel) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); std::string commandName = "insert"; ErrorLabelBuilder builder(opCtx(), @@ -199,7 +214,7 @@ TEST_F(ErrorLabelBuilderTest, RetryableWritesHaveNoTransientTransactionErrorLabe } TEST_F(ErrorLabelBuilderTest, NonTransientTransactionErrorsHaveNoTransientTransactionErrorLabel) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); sessionInfo.setAutocommit(false); std::string commandName = "commitTransaction"; @@ -216,7 +231,7 @@ TEST_F(ErrorLabelBuilderTest, NonTransientTransactionErrorsHaveNoTransientTransa } TEST_F(ErrorLabelBuilderTest, TransientTransactionErrorsHaveTransientTransactionErrorLabel) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); sessionInfo.setAutocommit(false); std::string commandName = "commitTransaction"; @@ -235,7 +250,7 @@ TEST_F(ErrorLabelBuilderTest, TransientTransactionErrorsHaveTransientTransaction TEST_F( ErrorLabelBuilderTest, TransientTransactionErrorWithRetryableWriteConcernErrorHasTransientTransactionErrorLabelOnly) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); sessionInfo.setAutocommit(false); std::string commandName = "commitTransaction"; @@ -260,7 +275,7 @@ TEST_F( } TEST_F(ErrorLabelBuilderTest, NonRetryableWritesHaveNoRetryableWriteErrorLabel) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; std::string commandName = "insert"; ErrorLabelBuilder builder(opCtx(), sessionInfo, @@ -282,7 +297,7 @@ TEST_F(ErrorLabelBuilderTest, NonRetryableWritesHaveNoRetryableWriteErrorLabel) } TEST_F(ErrorLabelBuilderTest, NonRetryableWriteErrorsHaveNoRetryableWriteErrorLabel) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); std::string commandName = "update"; ErrorLabelBuilder builder(opCtx(), @@ -298,7 +313,7 @@ TEST_F(ErrorLabelBuilderTest, NonRetryableWriteErrorsHaveNoRetryableWriteErrorLa } TEST_F(ErrorLabelBuilderTest, RetryableWriteErrorsHaveRetryableWriteErrorLabel) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); std::string commandName = "update"; ErrorLabelBuilder builder(opCtx(), @@ -314,7 +329,7 @@ TEST_F(ErrorLabelBuilderTest, RetryableWriteErrorsHaveRetryableWriteErrorLabel) } TEST_F(ErrorLabelBuilderTest, NonLocalShutDownErrorsOnMongosDoNotHaveRetryableWriteErrorLabel) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); std::string commandName = "update"; ErrorLabelBuilder builder(opCtx(), @@ -331,7 +346,7 @@ TEST_F(ErrorLabelBuilderTest, NonLocalShutDownErrorsOnMongosDoNotHaveRetryableWr TEST_F(ErrorLabelBuilderTest, LocalShutDownErrorsOnMongosHaveRetryableWriteErrorLabelInterruptedAtShutdown) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); std::string commandName = "update"; FailPointEnableBlock failPoint("errorLabelBuilderMockShutdown"); @@ -349,7 +364,7 @@ TEST_F(ErrorLabelBuilderTest, TEST_F(ErrorLabelBuilderTest, LocalShutDownErrorsOnMongosHaveRetryableWriteErrorLabelCallbackCanceled) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); std::string commandName = "update"; FailPointEnableBlock failPoint("errorLabelBuilderMockShutdown"); @@ -367,7 +382,7 @@ TEST_F(ErrorLabelBuilderTest, TEST_F(ErrorLabelBuilderTest, RetryableWriteErrorsHaveNoRetryableWriteErrorLabelForInternalClients) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); std::string commandName = "update"; ErrorLabelBuilder builder(opCtx(), @@ -384,7 +399,7 @@ TEST_F(ErrorLabelBuilderTest, TEST_F(ErrorLabelBuilderTest, NonRetryableWriteErrorsInWriteConcernErrorsHaveNoRetryableWriteErrorLabel) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); std::string commandName = "update"; ErrorLabelBuilder builder(opCtx(), @@ -401,7 +416,7 @@ TEST_F(ErrorLabelBuilderTest, TEST_F(ErrorLabelBuilderTest, RetryableWriteErrorsInWriteConcernErrorsHaveRetryableWriteErrorLabel) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); std::string commandName = "update"; ErrorLabelBuilder builder(opCtx(), @@ -417,7 +432,7 @@ TEST_F(ErrorLabelBuilderTest, } TEST_F(ErrorLabelBuilderTest, RetryableWriteErrorsOnCommitAbortHaveRetryableWriteErrorLabel) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); sessionInfo.setAutocommit(false); std::string commandName; @@ -489,7 +504,7 @@ TEST_F(ErrorLabelBuilderTest, RetryableWriteErrorsOnCommitAbortHaveRetryableWrit } TEST_F(ErrorLabelBuilderTest, NonResumableChangeStreamError) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; std::string commandName; ErrorLabelBuilder builder(opCtx(), sessionInfo, @@ -504,12 +519,12 @@ TEST_F(ErrorLabelBuilderTest, NonResumableChangeStreamError) { } TEST_F(ErrorLabelBuilderTest, ResumableChangeStreamErrorAppliesToChangeStreamAggregations) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; // Build the aggregation command and confirm that it parses correctly, so we know that the error // is the only factor that determines the success or failure of isResumableChangeStreamError(). auto cmdObj = BSON("aggregate" << nss().coll() << "pipeline" << BSON_ARRAY(BSON("$changeStream" << BSONObj())) << "cursor" - << BSONObj() << "$db" << nss().db()); + << BSONObj() << "$db" << nss().db_forTest()); auto aggRequest = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests(nss(), cmdObj)); ASSERT_TRUE(LiteParsedPipeline(aggRequest).hasChangeStream()); @@ -543,12 +558,12 @@ TEST_F(ErrorLabelBuilderTest, ResumableChangeStreamErrorAppliesToChangeStreamAgg } TEST_F(ErrorLabelBuilderTest, ResumableChangeStreamErrorDoesNotApplyToNonResumableErrors) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; // Build the aggregation command and confirm that it parses correctly, so we know that the error // is the only factor that determines the success or failure of isResumableChangeStreamError(). auto cmdObj = BSON("aggregate" << nss().coll() << "pipeline" << BSON_ARRAY(BSON("$changeStream" << BSONObj())) << "cursor" - << BSONObj() << "$db" << nss().db()); + << BSONObj() << "$db" << nss().db_forTest()); auto aggRequest = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests(nss(), cmdObj)); ASSERT_TRUE(LiteParsedPipeline(aggRequest).hasChangeStream()); @@ -582,12 +597,12 @@ TEST_F(ErrorLabelBuilderTest, ResumableChangeStreamErrorDoesNotApplyToNonResumab } TEST_F(ErrorLabelBuilderTest, ResumableChangeStreamErrorDoesNotApplyToNonChangeStreamAggregations) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; // Build the aggregation command and confirm that it parses correctly, so we know that the error // is the only factor that determines the success or failure of isResumableChangeStreamError(). auto cmdObj = BSON("aggregate" << nss().coll() << "pipeline" << BSON_ARRAY(BSON("$match" << BSONObj())) - << "cursor" << BSONObj() << "$db" << nss().db()); + << "cursor" << BSONObj() << "$db" << nss().db_forTest()); auto aggRequest = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests(nss(), cmdObj)); ASSERT_FALSE(LiteParsedPipeline(aggRequest).hasChangeStream()); @@ -621,7 +636,7 @@ TEST_F(ErrorLabelBuilderTest, ResumableChangeStreamErrorDoesNotApplyToNonChangeS } TEST_F(ErrorLabelBuilderTest, ResumableChangeStreamErrorDoesNotApplyToNonAggregations) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; auto cmdObj = BSON("find" << nss().coll() << "filter" << BSONObj()); // The label does not apply to a "find" command. std::string commandName = "find"; @@ -652,7 +667,7 @@ TEST_F(ErrorLabelBuilderTest, ResumableChangeStreamErrorDoesNotApplyToNonAggrega } TEST_F(ErrorLabelBuilderTest, NoWritesPerformedLabelApplied) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; std::string commandName = "find"; ErrorLabelBuilder builder(opCtx(), sessionInfo, @@ -667,7 +682,7 @@ TEST_F(ErrorLabelBuilderTest, NoWritesPerformedLabelApplied) { } TEST_F(ErrorLabelBuilderTest, NoWritesPerformedLabelNotAppliedAfterWrite) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; std::string commandName = "update"; ErrorLabelBuilder builder(opCtx(), sessionInfo, @@ -682,7 +697,7 @@ TEST_F(ErrorLabelBuilderTest, NoWritesPerformedLabelNotAppliedAfterWrite) { } TEST_F(ErrorLabelBuilderTest, NoWritesPerformedLabelNotAppliedIfUnknown) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; std::string commandName = "update"; ErrorLabelBuilder builder(opCtx(), sessionInfo, @@ -697,7 +712,7 @@ TEST_F(ErrorLabelBuilderTest, NoWritesPerformedLabelNotAppliedIfUnknown) { } TEST_F(ErrorLabelBuilderTest, NoWritesPerformedAndRetryableWriteAppliesBothLabels) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); std::string commandName = "update"; auto actualErrorLabels = getErrorLabels(opCtx(), @@ -716,7 +731,7 @@ TEST_F(ErrorLabelBuilderTest, NoWritesPerformedAndRetryableWriteAppliesBothLabel } TEST_F(ErrorLabelBuilderTest, NoWritesPerformedNotAppliedDuringOrdinaryUpdate) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; std::string commandName = "update"; auto actualErrorLabels = getErrorLabels(opCtx(), sessionInfo, @@ -731,7 +746,7 @@ TEST_F(ErrorLabelBuilderTest, NoWritesPerformedNotAppliedDuringOrdinaryUpdate) { } TEST_F(ErrorLabelBuilderTest, NoWritesPerformedNotAppliedDuringTransientTransactionError) { - OperationSessionInfoFromClient sessionInfo; + OperationSessionInfoFromClient sessionInfo{LogicalSessionFromClient(UUID::gen())}; sessionInfo.setTxnNumber(1); sessionInfo.setAutocommit(false); std::string commandName = "commitTransaction"; diff --git a/src/mongo/db/exec/SConscript b/src/mongo/db/exec/SConscript index e772b0598ccb5..7a02d8b0ce86f 100644 --- a/src/mongo/db/exec/SConscript +++ b/src/mongo/db/exec/SConscript @@ -32,7 +32,6 @@ env.Library( target="scoped_timer", source=[ "scoped_timer.cpp", - "scoped_timer_factory.cpp", ], LIBDEPS=[ '$BUILD_DIR/mongo/db/service_context', @@ -56,7 +55,8 @@ env.Library( env.Library( target="bucket_unpacker", source=[ - "bucket_unpacker.cpp", + "timeseries/bucket_spec.cpp", + "timeseries/bucket_unpacker.cpp", ], LIBDEPS=[ "$BUILD_DIR/mongo/db/query_expressions", diff --git a/src/mongo/db/exec/add_fields_projection_executor.cpp b/src/mongo/db/exec/add_fields_projection_executor.cpp index fa93a3828d5d8..1ecba76297349 100644 --- a/src/mongo/db/exec/add_fields_projection_executor.cpp +++ b/src/mongo/db/exec/add_fields_projection_executor.cpp @@ -27,13 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/exec/add_fields_projection_executor.h" - -#include +#include +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/add_fields_projection_executor.h" #include "mongo/db/matcher/expression_algo.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo::projection_executor { namespace { diff --git a/src/mongo/db/exec/add_fields_projection_executor.h b/src/mongo/db/exec/add_fields_projection_executor.h index 48a48a91f749a..30f973a23399f 100644 --- a/src/mongo/db/exec/add_fields_projection_executor.h +++ b/src/mongo/db/exec/add_fields_projection_executor.h @@ -30,10 +30,32 @@ #pragma once #include - +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/inclusion_projection_executor.h" +#include "mongo/db/exec/projection_executor.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/transformer_interface.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/projection_policies.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/string_map.h" namespace mongo::projection_executor { /** diff --git a/src/mongo/db/exec/add_fields_projection_executor_test.cpp b/src/mongo/db/exec/add_fields_projection_executor_test.cpp index 7230c6495c846..8cdcfdc3d274f 100644 --- a/src/mongo/db/exec/add_fields_projection_executor_test.cpp +++ b/src/mongo/db/exec/add_fields_projection_executor_test.cpp @@ -27,20 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include +#include +#include + #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/exec/add_fields_projection_executor.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::projection_executor { namespace { diff --git a/src/mongo/db/exec/and_common.h b/src/mongo/db/exec/and_common.h index 93abdf84d9622..cc859d52f1313 100644 --- a/src/mongo/db/exec/and_common.h +++ b/src/mongo/db/exec/and_common.h @@ -48,9 +48,9 @@ class AndCommon { // Both 'src' and 'dest' must have a RecordId (and they must be the same RecordId), as // we should have just matched them according to this RecordId while doing an // intersection. - verify(dest->hasRecordId()); - verify(src.hasRecordId()); - verify(dest->recordId == src.recordId); + MONGO_verify(dest->hasRecordId()); + MONGO_verify(src.hasRecordId()); + MONGO_verify(dest->recordId == src.recordId); dest->metadata().mergeWith(src.metadata()); diff --git a/src/mongo/db/exec/and_hash.cpp b/src/mongo/db/exec/and_hash.cpp index f620db00faf45..ed8119232daf0 100644 --- a/src/mongo/db/exec/and_hash.cpp +++ b/src/mongo/db/exec/and_hash.cpp @@ -30,12 +30,18 @@ #include "mongo/db/exec/and_hash.h" #include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/exec/and_common.h" -#include "mongo/db/exec/scoped_timer.h" #include "mongo/db/exec/working_set.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" namespace { @@ -179,10 +185,10 @@ PlanStage::StageState AndHashStage::doWork(WorkingSetID* out) { // hash map. // We should be EOF if we're not hashing results and the dataMap is empty. - verify(!_dataMap.empty()); + MONGO_verify(!_dataMap.empty()); // We probe _dataMap with the last child. - verify(_currentChild == _children.size() - 1); + MONGO_verify(_currentChild == _children.size() - 1); // Get the next result for the (_children.size() - 1)-th child. StageState childStatus = workChild(_children.size() - 1, out); @@ -228,7 +234,7 @@ PlanStage::StageState AndHashStage::workChild(size_t childNo, WorkingSetID* out) } PlanStage::StageState AndHashStage::readFirstChild(WorkingSetID* out) { - verify(_currentChild == 0); + MONGO_verify(_currentChild == 0); WorkingSetID id = WorkingSet::INVALID_ID; StageState childStatus = workChild(0, &id); @@ -279,7 +285,7 @@ PlanStage::StageState AndHashStage::readFirstChild(WorkingSetID* out) { } PlanStage::StageState AndHashStage::hashOtherChildren(WorkingSetID* out) { - verify(_currentChild > 0); + MONGO_verify(_currentChild > 0); WorkingSetID id = WorkingSet::INVALID_ID; StageState childStatus = workChild(_currentChild, &id); diff --git a/src/mongo/db/exec/and_hash.h b/src/mongo/db/exec/and_hash.h index 4bd591952f570..46eff96c2ac63 100644 --- a/src/mongo/db/exec/and_hash.h +++ b/src/mongo/db/exec/and_hash.h @@ -29,11 +29,17 @@ #pragma once +#include +#include #include #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" #include "mongo/stdx/unordered_map.h" #include "mongo/stdx/unordered_set.h" diff --git a/src/mongo/db/exec/and_sorted.cpp b/src/mongo/db/exec/and_sorted.cpp index 5d35a7a1bd245..8165e3d5a088c 100644 --- a/src/mongo/db/exec/and_sorted.cpp +++ b/src/mongo/db/exec/and_sorted.cpp @@ -29,12 +29,15 @@ #include "mongo/db/exec/and_sorted.h" +#include #include +#include +#include + +#include #include "mongo/db/exec/and_common.h" -#include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -83,9 +86,9 @@ PlanStage::StageState AndSortedStage::doWork(WorkingSetID* out) { } PlanStage::StageState AndSortedStage::getTargetRecordId(WorkingSetID* out) { - verify(numeric_limits::max() == _targetNode); - verify(WorkingSet::INVALID_ID == _targetId); - verify(RecordId() == _targetRecordId); + MONGO_verify(numeric_limits::max() == _targetNode); + MONGO_verify(WorkingSet::INVALID_ID == _targetId); + MONGO_verify(RecordId() == _targetRecordId); // Pick one, and get a RecordId to work toward. WorkingSetID id = WorkingSet::INVALID_ID; @@ -127,8 +130,8 @@ PlanStage::StageState AndSortedStage::getTargetRecordId(WorkingSetID* out) { } PlanStage::StageState AndSortedStage::moveTowardTargetRecordId(WorkingSetID* out) { - verify(numeric_limits::max() != _targetNode); - verify(WorkingSet::INVALID_ID != _targetId); + MONGO_verify(numeric_limits::max() != _targetNode); + MONGO_verify(WorkingSet::INVALID_ID != _targetId); // We have nodes that haven't hit _targetRecordId yet. size_t workingChildNumber = _workingTowardRep.front(); diff --git a/src/mongo/db/exec/and_sorted.h b/src/mongo/db/exec/and_sorted.h index 0236d4a294af9..5fa29483c4f6c 100644 --- a/src/mongo/db/exec/and_sorted.h +++ b/src/mongo/db/exec/and_sorted.h @@ -29,12 +29,18 @@ #pragma once +#include +#include #include #include #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" namespace mongo { diff --git a/src/mongo/db/exec/batched_delete_stage.cpp b/src/mongo/db/exec/batched_delete_stage.cpp index 0d917f69e9d38..561fa09d1ddbe 100644 --- a/src/mongo/db/exec/batched_delete_stage.cpp +++ b/src/mongo/db/exec/batched_delete_stage.cpp @@ -28,25 +28,50 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/batched_delete_stage.h" - +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/commands/server_status.h" -#include "mongo/db/curop.h" +#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/exec/batched_delete_stage.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/plan_stage.h" -#include "mongo/db/exec/scoped_timer.h" #include "mongo/db/exec/working_set.h" -#include "mongo/db/exec/working_set_common.h" #include "mongo/db/exec/write_stage_common.h" -#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_impl.h" #include "mongo/db/service_context.h" -#include "mongo/logv2/log.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite @@ -114,7 +139,8 @@ struct BatchedDeletesSSS : ServerStatusSection { } batchedDeletesSSS; // Wrapper for write_stage_common::ensureStillMatches() which also updates the 'refetchesDueToYield' -// serverStatus metric. +// serverStatus metric. As with ensureStillMatches, if false is returned, the WoringSetMember +// referenced by 'id' is no longer valid, and must not be used except for freeing the WSM. bool ensureStillMatchesAndUpdateStats(const CollectionPtr& collection, OperationContext* opCtx, WorkingSet* ws, @@ -132,7 +158,7 @@ BatchedDeleteStage::BatchedDeleteStage( std::unique_ptr params, std::unique_ptr batchedDeleteParams, WorkingSet* ws, - const CollectionPtr& collection, + CollectionAcquisition collection, PlanStage* child) : DeleteStage::DeleteStage( kStageType.rawData(), expCtx, std::move(params), ws, collection, child), @@ -146,6 +172,8 @@ BatchedDeleteStage::BatchedDeleteStage( tassert(6303800, "batched deletions only support multi-document deletions (multi: true)", _params->isMulti); + // TODO SERVER-66279 remove the following tassert once the range-deleter will be using batched + // deletions since it's the only component expected to delete with `fromMigrate=true` tassert(6303801, "batched deletions do not support the 'fromMigrate' parameter", !_params->fromMigrate); @@ -247,7 +275,6 @@ PlanStage::StageState BatchedDeleteStage::_deleteBatch(WorkingSetID* out) { handlePlanStageYield( expCtx(), "BatchedDeleteStage saveState", - collection()->ns().ns(), [&] { child()->saveState(); return PlanStage::NEED_TIME /* unused */; @@ -267,7 +294,6 @@ PlanStage::StageState BatchedDeleteStage::_deleteBatch(WorkingSetID* out) { const auto ret = handlePlanStageYield( expCtx(), "BatchedDeleteStage::_deleteBatch", - collection()->ns().ns(), [&] { timeInBatch = _commitBatch(out, &recordsToSkip, &docsDeleted, &bytesDeleted, &bufferOffset); @@ -340,24 +366,40 @@ long long BatchedDeleteStage::_commitBatch(WorkingSetID* out, } auto workingSetMemberID = _stagedDeletesBuffer.at(*bufferOffset); - - // The PlanExecutor YieldPolicy may change snapshots between calls to 'doWork()'. - // Different documents may have different snapshots. - bool docStillMatches = ensureStillMatchesAndUpdateStats( - collection(), opCtx(), _ws, workingSetMemberID, _params->canonicalQuery); - WorkingSetMember* member = _ws->get(workingSetMemberID); - - // Determine whether the document being deleted is owned by this shard, and the action - // to undertake if it isn't. - bool writeToOrphan = false; - auto action = _preWriteFilter.computeActionAndLogSpecialCases( - member->doc.value(), "batched delete"_sd, collection()->ns()); - if (!docStillMatches || action == write_stage_common::PreWriteFilter::Action::kSkip) { - recordsToSkip->insert(workingSetMemberID); - continue; + bool writeToOrphan = _params->fromMigrate; + + // The assumption is that fromMigrate implies the documents cannot change, and there is no + // need to ensure they still match. + if (!_params->fromMigrate) { + using write_stage_common::PreWriteFilter; + // Warning: on Action::kSkip, the WSM's underlaying document is no longer valid. + const PreWriteFilter::Action action = [&]() { + // The PlanExecutor YieldPolicy may change snapshots between calls to 'doWork()'. + // Different documents may have different snapshots. + const bool docStillMatches = ensureStillMatchesAndUpdateStats( + collectionPtr(), opCtx(), _ws, workingSetMemberID, _params->canonicalQuery); + + // Warning: if docStillMatches is false, the WSM's underlaying Document/BSONObj is + // no longer valid. + if (!docStillMatches) { + return PreWriteFilter::Action::kSkip; + } + // Determine whether the document being deleted is owned by this shard, and the + // action to undertake if it isn't. + return _preWriteFilter.computeActionAndLogSpecialCases( + member->doc.value(), "batched delete"_sd, collectionPtr()->ns()); + }(); + + // Skip the document, as it either no longer exists, or has been filtered by the + // PreWriteFilter. + if (PreWriteFilter::Action::kSkip == action) { + recordsToSkip->insert(workingSetMemberID); + continue; + } + + writeToOrphan = action == PreWriteFilter::Action::kWriteAsFromMigrate; } - writeToOrphan = action == write_stage_common::PreWriteFilter::Action::kWriteAsFromMigrate; auto retryableWrite = write_stage_common::isRetryableWrite(opCtx()); Snapshotted memberDoc = member->doc; @@ -378,12 +420,12 @@ long long BatchedDeleteStage::_commitBatch(WorkingSetID* out, collection_internal::deleteDocument( opCtx(), - collection(), + collectionPtr(), Snapshotted(memberDoc.snapshotId(), bsonObjDoc), _params->stmtId, member->recordId, _params->opDebug, - _params->fromMigrate || writeToOrphan, + writeToOrphan, false, _params->returnDeleted ? collection_internal::StoreDeletedDoc::On : collection_internal::StoreDeletedDoc::Off, @@ -404,9 +446,9 @@ long long BatchedDeleteStage::_commitBatch(WorkingSetID* out, // committed + the number of documents deleted in the current unit of work. // Assume nDocs is positive. - return data.hasField("sleepMs") && data.hasField("ns") && - data.getStringField("ns") == collection()->ns().toString() && - data.hasField("nDocs") && + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "ns"_sd); + return data.hasField("sleepMs") && !fpNss.isEmpty() && + collectionPtr()->ns() == fpNss && data.hasField("nDocs") && _specificStats.docsDeleted + *docsDeleted >= static_cast(data.getIntField("nDocs")); }); @@ -474,9 +516,8 @@ PlanStage::StageState BatchedDeleteStage::_tryRestoreState(WorkingSetID* out) { return handlePlanStageYield( expCtx(), "BatchedDeleteStage::_tryRestoreState", - collection()->ns().ns(), [&] { - child()->restoreState(&collection()); + child()->restoreState(&collectionPtr()); return PlanStage::NEED_TIME; }, [&] { diff --git a/src/mongo/db/exec/batched_delete_stage.h b/src/mongo/db/exec/batched_delete_stage.h index 511eacee901c1..899d5d5f18b10 100644 --- a/src/mongo/db/exec/batched_delete_stage.h +++ b/src/mongo/db/exec/batched_delete_stage.h @@ -29,11 +29,25 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/batched_delete_stage_buffer.h" #include "mongo/db/exec/batched_delete_stage_gen.h" #include "mongo/db/exec/delete_stage.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/exec/write_stage_common.h" #include "mongo/db/jsobj.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/shard_role.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/duration.h" +#include "mongo/util/timer.h" namespace mongo { @@ -107,7 +121,7 @@ class BatchedDeleteStage final : public DeleteStage { std::unique_ptr params, std::unique_ptr batchedDeleteParams, WorkingSet* ws, - const CollectionPtr& collection, + CollectionAcquisition collection, PlanStage* child); ~BatchedDeleteStage(); diff --git a/src/mongo/db/exec/batched_delete_stage_buffer.cpp b/src/mongo/db/exec/batched_delete_stage_buffer.cpp index 8d8575edd5f15..af712c0017804 100644 --- a/src/mongo/db/exec/batched_delete_stage_buffer.cpp +++ b/src/mongo/db/exec/batched_delete_stage_buffer.cpp @@ -28,13 +28,13 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/batched_delete_stage_buffer.h" - +#include #include -#include "mongo/logv2/log.h" +#include + +#include "mongo/db/exec/batched_delete_stage_buffer.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/exec/batched_delete_stage_buffer.h b/src/mongo/db/exec/batched_delete_stage_buffer.h index 11624e90cabaa..2b0ff7b5170d5 100644 --- a/src/mongo/db/exec/batched_delete_stage_buffer.h +++ b/src/mongo/db/exec/batched_delete_stage_buffer.h @@ -29,6 +29,11 @@ #pragma once +#include +#include +#include + +#include "mongo/db/exec/working_set.h" #include "mongo/db/exec/working_set_common.h" namespace mongo { diff --git a/src/mongo/db/exec/bucket_unpacker.cpp b/src/mongo/db/exec/bucket_unpacker.cpp deleted file mode 100644 index 1a177a98cb67f..0000000000000 --- a/src/mongo/db/exec/bucket_unpacker.cpp +++ /dev/null @@ -1,1804 +0,0 @@ -/** - * Copyright (C) 2020-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/exec/bucket_unpacker.h" - -#include - -#include "mongo/bson/util/bsoncolumn.h" -#include "mongo/db/matcher/expression.h" -#include "mongo/db/matcher/expression_algo.h" -#include "mongo/db/matcher/expression_always_boolean.h" -#include "mongo/db/matcher/expression_expr.h" -#include "mongo/db/matcher/expression_geo.h" -#include "mongo/db/matcher/expression_internal_bucket_geo_within.h" -#include "mongo/db/matcher/expression_internal_expr_comparison.h" -#include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/matcher/expression_tree.h" -#include "mongo/db/matcher/extensions_callback_noop.h" -#include "mongo/db/matcher/rewrite_expr.h" -#include "mongo/db/pipeline/expression.h" -#include "mongo/db/timeseries/timeseries_options.h" - -#include "mongo/logv2/log.h" -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery - -namespace mongo { - -using IneligiblePredicatePolicy = BucketSpec::IneligiblePredicatePolicy; - -bool BucketSpec::fieldIsComputed(StringData field) const { - return std::any_of( - _computedMetaProjFields.begin(), _computedMetaProjFields.end(), [&](auto& s) { - return s == field || expression::isPathPrefixOf(field, s) || - expression::isPathPrefixOf(s, field); - }); -} - -namespace { - -constexpr long long max32BitEpochMillis = - static_cast(std::numeric_limits::max()) * 1000; - -/** - * Creates an ObjectId initialized with an appropriate timestamp corresponding to 'rhs' and - * returns it as a Value. - */ -template -auto constructObjectIdValue(const BSONElement& rhs, int bucketMaxSpanSeconds) { - // Indicates whether to initialize an ObjectId with a max or min value for the non-date bytes. - enum class OIDInit : bool { max, min }; - // Make an ObjectId cooresponding to a date value. As a conversion from date to ObjectId will - // truncate milliseconds, we round up when needed to prevent missing results. - auto makeDateOID = [](auto&& date, auto&& maxOrMin, bool roundMillisUpToSecond = false) { - if (roundMillisUpToSecond && (date.toMillisSinceEpoch() % 1000 != 0)) { - date += Seconds{1}; - } - - auto oid = OID{}; - oid.init(date, maxOrMin == OIDInit::max); - return oid; - }; - // Make an ObjectId corresponding to a date value adjusted by the max bucket value for the - // time series view that this query operates on. This predicate can be used in a comparison - // to gauge a max value for a given bucket, rather than a min value. - auto makeMaxAdjustedDateOID = [&](auto&& date, auto&& maxOrMin) { - // Ensure we don't underflow. - if (date.toDurationSinceEpoch() >= Seconds{bucketMaxSpanSeconds}) - // Subtract max bucket range. - return makeDateOID(date - Seconds{bucketMaxSpanSeconds}, maxOrMin); - else - // Since we're out of range, just make a predicate that is true for all dates. - // We'll never use an OID for a date < 0 due to OID range limitations, so we set the - // minimum date to 0. - return makeDateOID(Date_t::fromMillisSinceEpoch(0LL), OIDInit::min); - }; - - // Because the OID timestamp is only 4 bytes, we can't convert larger dates - invariant(rhs.date().toMillisSinceEpoch() >= 0LL); - invariant(rhs.date().toMillisSinceEpoch() <= max32BitEpochMillis); - - // An ObjectId consists of a 4-byte timestamp, as well as a unique value and a counter, thus - // two ObjectIds initialized with the same date will have different values. To ensure that we - // do not incorrectly include or exclude any buckets, depending on the operator we will - // construct either the largest or the smallest ObjectId possible with the corresponding date. - // If the query operand is not of type Date, the original query will not match on any documents - // because documents in a time-series collection must have a timeField of type Date. We will - // make this case faster by keeping the ObjectId as the lowest or highest possible value so as - // to eliminate all buckets. - if constexpr (std::is_same_v) { - return Value{makeDateOID(rhs.date(), OIDInit::min, true /*roundMillisUpToSecond*/)}; - } else if constexpr (std::is_same_v) { - return Value{makeDateOID(rhs.date(), OIDInit::max, true /*roundMillisUpToSecond*/)}; - } else if constexpr (std::is_same_v) { - return Value{makeMaxAdjustedDateOID(rhs.date(), OIDInit::max)}; - } else if constexpr (std::is_same_v) { - return Value{makeMaxAdjustedDateOID(rhs.date(), OIDInit::min)}; - } - MONGO_UNREACHABLE_TASSERT(5756800); -} - -/** - * Makes a disjunction of the given predicates. - * - * - The result is non-null; it may be an OrMatchExpression with zero children. - * - Any trivially-false arguments are omitted. - * - If only one argument is nontrivial, returns that argument rather than adding an extra - * OrMatchExpression around it. - */ -std::unique_ptr makeOr(std::vector> predicates) { - std::vector> nontrivial; - for (auto&& p : predicates) { - if (!p->isTriviallyFalse()) - nontrivial.push_back(std::move(p)); - } - - if (nontrivial.size() == 1) - return std::move(nontrivial[0]); - - return std::make_unique(std::move(nontrivial)); -} - -BucketSpec::BucketPredicate handleIneligible(IneligiblePredicatePolicy policy, - const MatchExpression* matchExpr, - StringData message) { - switch (policy) { - case IneligiblePredicatePolicy::kError: - uasserted( - 5916301, - "Error translating non-metadata time-series predicate to operate on buckets: " + - message + ": " + matchExpr->serialize().toString()); - case IneligiblePredicatePolicy::kIgnore: - return {}; - } - MONGO_UNREACHABLE_TASSERT(5916307); -} - -/* - * Creates a predicate that ensures that if there exists a subpath of matchExprPath such that the - * type of `control.min.subpath` is not the same as `control.max.subpath` then we will match that - * document. - * - * However, if the buckets collection has no mixed-schema data then this type-equality predicate is - * unnecessary. In that case this function returns an empty, always-true predicate. - */ -std::unique_ptr createTypeEqualityPredicate( - boost::intrusive_ptr pExpCtx, - const StringData& matchExprPath, - bool assumeNoMixedSchemaData) { - - std::vector> typeEqualityPredicates; - - if (assumeNoMixedSchemaData) - return makeOr(std::move(typeEqualityPredicates)); - - FieldPath matchExprField(matchExprPath); - using namespace timeseries; - - // Assume that we're generating a predicate on "a.b" - for (size_t i = 0; i < matchExprField.getPathLength(); i++) { - auto minPath = std::string{kControlMinFieldNamePrefix} + matchExprField.getSubpath(i); - auto maxPath = std::string{kControlMaxFieldNamePrefix} + matchExprField.getSubpath(i); - - // This whole block adds - // {$expr: {$ne: [{$type: "$control.min.a"}, {$type: "$control.max.a"}]}} - // in order to ensure that the type of `control.min.a` and `control.max.a` are the same. - - // This produces {$expr: ... } - typeEqualityPredicates.push_back(std::make_unique( - // This produces {$ne: ... } - make_intrusive( - pExpCtx.get(), - ExpressionCompare::CmpOp::NE, - // This produces [...] - makeVector>( - // This produces {$type: ... } - make_intrusive( - pExpCtx.get(), - // This produces [...] - makeVector>( - // This produces "$control.min.a" - ExpressionFieldPath::createPathFromString( - pExpCtx.get(), minPath, pExpCtx->variablesParseState))), - // This produces {$type: ... } - make_intrusive( - pExpCtx.get(), - // This produces [...] - makeVector>( - // This produces "$control.max.a" - ExpressionFieldPath::createPathFromString( - pExpCtx.get(), maxPath, pExpCtx->variablesParseState))))), - pExpCtx)); - } - return makeOr(std::move(typeEqualityPredicates)); -} - -boost::optional checkComparisonPredicateErrors( - const MatchExpression* matchExpr, - const StringData matchExprPath, - const BSONElement& matchExprData, - const BucketSpec& bucketSpec, - ExpressionContext::CollationMatchesDefault collationMatchesDefault) { - using namespace timeseries; - // The control field's min and max are chosen using a field-order insensitive comparator, while - // MatchExpressions use a comparator that treats field-order as significant. Because of this we - // will not perform this optimization on queries with operands of compound types. - if (matchExprData.type() == BSONType::Object || matchExprData.type() == BSONType::Array) - return "operand can't be an object or array"_sd; - - // MatchExpressions have special comparison semantics regarding null, in that {$eq: null} will - // match all documents where the field is either null or missing. Because this is different - // from both the comparison semantics that InternalExprComparison expressions and the control's - // min and max fields use, we will not perform this optimization on queries with null operands. - if (matchExprData.type() == BSONType::jstNULL) - return "can't handle {$eq: null}"_sd; - - // The control field's min and max are chosen based on the collation of the collection. If the - // query's collation does not match the collection's collation and the query operand is a - // string or compound type (skipped above) we will not perform this optimization. - if (collationMatchesDefault == ExpressionContext::CollationMatchesDefault::kNo && - matchExprData.type() == BSONType::String) { - return "can't handle string comparison with a non-default collation"_sd; - } - - // This function only handles time and measurement predicates--not metadata. - if (bucketSpec.metaField() && - (matchExprPath == bucketSpec.metaField().value() || - expression::isPathPrefixOf(bucketSpec.metaField().value(), matchExprPath))) { - tasserted( - 6707200, - str::stream() << "createComparisonPredicate() does not handle metadata predicates: " - << matchExpr); - } - - // We must avoid mapping predicates on fields computed via $addFields or a computed $project. - if (bucketSpec.fieldIsComputed(matchExprPath.toString())) { - return "can't handle a computed field"_sd; - } - - // We must avoid mapping predicates on fields removed by $project. - if (!determineIncludeField(matchExprPath, bucketSpec.behavior(), bucketSpec.fieldSet())) { - return "can't handle a field removed by projection"_sd; - } - - const auto isTimeField = (matchExprPath == bucketSpec.timeField()); - if (isTimeField && matchExprData.type() != BSONType::Date) { - // Users are not allowed to insert non-date measurements into time field. So this query - // would not match anything. We do not need to optimize for this case. - return "This predicate will never be true, because the time field always contains a Date"_sd; - } - - return boost::none; -} - -std::unique_ptr createComparisonPredicate( - const ComparisonMatchExpressionBase* matchExpr, - const BucketSpec& bucketSpec, - int bucketMaxSpanSeconds, - ExpressionContext::CollationMatchesDefault collationMatchesDefault, - boost::intrusive_ptr pExpCtx, - bool haveComputedMetaField, - bool includeMetaField, - bool assumeNoMixedSchemaData, - IneligiblePredicatePolicy policy) { - using namespace timeseries; - const auto matchExprPath = matchExpr->path(); - const auto matchExprData = matchExpr->getData(); - - const auto error = checkComparisonPredicateErrors( - matchExpr, matchExprPath, matchExprData, bucketSpec, collationMatchesDefault); - if (error) { - return handleIneligible(policy, matchExpr, *error).loosePredicate; - } - - const auto isTimeField = (matchExprPath == bucketSpec.timeField()); - auto minPath = std::string{kControlMinFieldNamePrefix} + matchExprPath; - const StringData minPathStringData(minPath); - auto maxPath = std::string{kControlMaxFieldNamePrefix} + matchExprPath; - const StringData maxPathStringData(maxPath); - - BSONObj minTime; - BSONObj maxTime; - bool dateIsExtended = false; - if (isTimeField) { - auto timeField = matchExprData.Date(); - minTime = BSON("" << timeField - Seconds(bucketMaxSpanSeconds)); - maxTime = BSON("" << timeField + Seconds(bucketMaxSpanSeconds)); - - // The date is in the "extended" range if it doesn't fit into the bottom - // 32 bits. - long long timestamp = timeField.toMillisSinceEpoch(); - dateIsExtended = timestamp < 0LL || timestamp > max32BitEpochMillis; - } - - switch (matchExpr->matchType()) { - case MatchExpression::EQ: - case MatchExpression::INTERNAL_EXPR_EQ: - // For $eq, make both a $lte against 'control.min' and a $gte predicate against - // 'control.max'. - // - // If the comparison is against the 'time' field and we haven't stored a time outside of - // the 32 bit range, include a predicate against the _id field which is converted to - // the maximum for the corresponding range of ObjectIds and - // is adjusted by the max range for a bucket to approximate the max bucket value given - // the min. Also include a predicate against the _id field which is converted to the - // minimum for the range of ObjectIds corresponding to the given date. In - // addition, we include a {'control.min' : {$gte: 'time - bucketMaxSpanSeconds'}} and - // a {'control.max' : {$lte: 'time + bucketMaxSpanSeconds'}} predicate which will be - // helpful in reducing bounds for index scans on 'time' field and routing on mongos. - // - // The same procedure applies to aggregation expressions of the form - // {$expr: {$eq: [...]}} that can be rewritten to use $_internalExprEq. - if (!isTimeField) { - return makeOr(makeVector>( - makePredicate(MatchExprPredicate( - minPathStringData, matchExprData), - MatchExprPredicate( - maxPathStringData, matchExprData)), - createTypeEqualityPredicate(pExpCtx, matchExprPath, assumeNoMixedSchemaData))); - } else if (bucketSpec.usesExtendedRange()) { - return makePredicate( - MatchExprPredicate(minPath, matchExprData), - MatchExprPredicate(minPath, - minTime.firstElement()), - MatchExprPredicate(maxPath, matchExprData), - MatchExprPredicate(maxPath, - maxTime.firstElement())); - } else if (dateIsExtended) { - // Since by this point we know that no time value has been inserted which is - // outside the epoch range, we know that no document can meet this criteria - return std::make_unique(); - } else { - return makePredicate( - MatchExprPredicate(minPathStringData, - matchExprData), - MatchExprPredicate(minPathStringData, - minTime.firstElement()), - MatchExprPredicate(maxPathStringData, - matchExprData), - MatchExprPredicate(maxPathStringData, - maxTime.firstElement()), - MatchExprPredicate( - kBucketIdFieldName, - constructObjectIdValue(matchExprData, - bucketMaxSpanSeconds)), - MatchExprPredicate( - kBucketIdFieldName, - constructObjectIdValue(matchExprData, - bucketMaxSpanSeconds))); - } - MONGO_UNREACHABLE_TASSERT(6646903); - - case MatchExpression::GT: - case MatchExpression::INTERNAL_EXPR_GT: - // For $gt, make a $gt predicate against 'control.max'. In addition, if the comparison - // is against the 'time' field, and the collection doesn't contain times outside the - // 32 bit range, include a predicate against the _id field which is converted to the - // maximum for the corresponding range of ObjectIds and is adjusted by the max range - // for a bucket to approximate the max bucket value given the min. - // - // In addition, we include a {'control.min' : {$gt: 'time - bucketMaxSpanSeconds'}} - // predicate which will be helpful in reducing bounds for index scans on 'time' field - // and routing on mongos. - // - // The same procedure applies to aggregation expressions of the form - // {$expr: {$gt: [...]}} that can be rewritten to use $_internalExprGt. - if (!isTimeField) { - return makeOr(makeVector>( - std::make_unique(maxPathStringData, - matchExprData), - createTypeEqualityPredicate(pExpCtx, matchExprPath, assumeNoMixedSchemaData))); - } else if (bucketSpec.usesExtendedRange()) { - return makePredicate( - MatchExprPredicate(maxPath, matchExprData), - MatchExprPredicate(minPath, - minTime.firstElement())); - } else if (matchExprData.Date().toMillisSinceEpoch() < 0LL) { - // Since by this point we know that no time value has been inserted < 0, - // every document must meet this criteria - return std::make_unique(); - } else if (matchExprData.Date().toMillisSinceEpoch() > max32BitEpochMillis) { - // Since by this point we know that no time value has been inserted > - // max32BitEpochMillis, we know that no document can meet this criteria - return std::make_unique(); - } else { - return makePredicate(MatchExprPredicate( - maxPathStringData, matchExprData), - MatchExprPredicate( - minPathStringData, minTime.firstElement()), - MatchExprPredicate( - kBucketIdFieldName, - constructObjectIdValue( - matchExprData, bucketMaxSpanSeconds))); - } - MONGO_UNREACHABLE_TASSERT(6646904); - - case MatchExpression::GTE: - case MatchExpression::INTERNAL_EXPR_GTE: - // For $gte, make a $gte predicate against 'control.max'. In addition, if the comparison - // is against the 'time' field, and the collection doesn't contain times outside the - // 32 bit range, include a predicate against the _id field which is - // converted to the minimum for the corresponding range of ObjectIds and is adjusted - // by the max range for a bucket to approximate the max bucket value given the min. In - // addition, we include a {'control.min' : {$gte: 'time - bucketMaxSpanSeconds'}} - // predicate which will be helpful in reducing bounds for index scans on 'time' field - // and routing on mongos. - // - // The same procedure applies to aggregation expressions of the form - // {$expr: {$gte: [...]}} that can be rewritten to use $_internalExprGte. - if (!isTimeField) { - return makeOr(makeVector>( - std::make_unique(maxPathStringData, - matchExprData), - createTypeEqualityPredicate(pExpCtx, matchExprPath, assumeNoMixedSchemaData))); - } else if (bucketSpec.usesExtendedRange()) { - return makePredicate( - MatchExprPredicate(maxPath, matchExprData), - MatchExprPredicate(minPath, - minTime.firstElement())); - } else if (matchExprData.Date().toMillisSinceEpoch() < 0LL) { - // Since by this point we know that no time value has been inserted < 0, - // every document must meet this criteria - return std::make_unique(); - } else if (matchExprData.Date().toMillisSinceEpoch() > max32BitEpochMillis) { - // Since by this point we know that no time value has been inserted > 0xffffffff, - // we know that no value can meet this criteria - return std::make_unique(); - } else { - return makePredicate(MatchExprPredicate( - maxPathStringData, matchExprData), - MatchExprPredicate( - minPathStringData, minTime.firstElement()), - MatchExprPredicate( - kBucketIdFieldName, - constructObjectIdValue( - matchExprData, bucketMaxSpanSeconds))); - } - MONGO_UNREACHABLE_TASSERT(6646905); - - case MatchExpression::LT: - case MatchExpression::INTERNAL_EXPR_LT: - // For $lt, make a $lt predicate against 'control.min'. In addition, if the comparison - // is against the 'time' field, include a predicate against the _id field which is - // converted to the minimum for the corresponding range of ObjectIds, unless the - // collection contain extended range dates which won't fit int the 32 bits allocated - // for _id. - // - // In addition, we include a {'control.max' : {$lt: 'time + bucketMaxSpanSeconds'}} - // predicate which will be helpful in reducing bounds for index scans on 'time' field - // and routing on mongos. - // - // The same procedure applies to aggregation expressions of the form - // {$expr: {$lt: [...]}} that can be rewritten to use $_internalExprLt. - if (!isTimeField) { - return makeOr(makeVector>( - std::make_unique(minPathStringData, - matchExprData), - createTypeEqualityPredicate(pExpCtx, matchExprPath, assumeNoMixedSchemaData))); - } else if (bucketSpec.usesExtendedRange()) { - return makePredicate( - MatchExprPredicate(minPath, matchExprData), - MatchExprPredicate(maxPath, - maxTime.firstElement())); - } else if (matchExprData.Date().toMillisSinceEpoch() < 0LL) { - // Since by this point we know that no time value has been inserted < 0, - // we know that no document can meet this criteria - return std::make_unique(); - } else if (matchExprData.Date().toMillisSinceEpoch() > max32BitEpochMillis) { - // Since by this point we know that no time value has been inserted > 0xffffffff - // every time value must be less than this value - return std::make_unique(); - } else { - return makePredicate(MatchExprPredicate( - minPathStringData, matchExprData), - MatchExprPredicate( - maxPathStringData, maxTime.firstElement()), - MatchExprPredicate( - kBucketIdFieldName, - constructObjectIdValue( - matchExprData, bucketMaxSpanSeconds))); - } - MONGO_UNREACHABLE_TASSERT(6646906); - - case MatchExpression::LTE: - case MatchExpression::INTERNAL_EXPR_LTE: - // For $lte, make a $lte predicate against 'control.min'. In addition, if the comparison - // is against the 'time' field, and the collection doesn't contain times outside the - // 32 bit range, include a predicate against the _id field which is - // converted to the maximum for the corresponding range of ObjectIds. In - // addition, we include a {'control.max' : {$lte: 'time + bucketMaxSpanSeconds'}} - // predicate which will be helpful in reducing bounds for index scans on 'time' field - // and routing on mongos. - // - // The same procedure applies to aggregation expressions of the form - // {$expr: {$lte: [...]}} that can be rewritten to use $_internalExprLte. - if (!isTimeField) { - return makeOr(makeVector>( - std::make_unique(minPathStringData, - matchExprData), - createTypeEqualityPredicate(pExpCtx, matchExprPath, assumeNoMixedSchemaData))); - } else if (bucketSpec.usesExtendedRange()) { - return makePredicate( - MatchExprPredicate(minPath, matchExprData), - MatchExprPredicate(maxPath, - maxTime.firstElement())); - } else if (matchExprData.Date().toMillisSinceEpoch() < 0LL) { - // Since by this point we know that no time value has been inserted < 0, - // we know that no document can meet this criteria - return std::make_unique(); - } else if (matchExprData.Date().toMillisSinceEpoch() > max32BitEpochMillis) { - // Since by this point we know that no time value has been inserted > 0xffffffff - // every document must be less than this value - return std::make_unique(); - } else { - return makePredicate(MatchExprPredicate( - minPathStringData, matchExprData), - MatchExprPredicate( - maxPathStringData, maxTime.firstElement()), - MatchExprPredicate( - kBucketIdFieldName, - constructObjectIdValue( - matchExprData, bucketMaxSpanSeconds))); - } - MONGO_UNREACHABLE_TASSERT(6646907); - - default: - MONGO_UNREACHABLE_TASSERT(5348302); - } - - MONGO_UNREACHABLE_TASSERT(5348303); -} - -std::unique_ptr createTightComparisonPredicate( - const ComparisonMatchExpressionBase* matchExpr, - const BucketSpec& bucketSpec, - ExpressionContext::CollationMatchesDefault collationMatchesDefault) { - using namespace timeseries; - const auto matchExprPath = matchExpr->path(); - const auto matchExprData = matchExpr->getData(); - - const auto error = checkComparisonPredicateErrors( - matchExpr, matchExprPath, matchExprData, bucketSpec, collationMatchesDefault); - if (error) { - return handleIneligible(BucketSpec::IneligiblePredicatePolicy::kIgnore, matchExpr, *error) - .loosePredicate; - } - - // We have to disable the tight predicate for the measurement field. There might be missing - // values in the measurements and the control fields ignore them on insertion. So we cannot use - // bucket min and max to determine the property of all events in the bucket. For measurement - // fields, there's a further problem that if the control field is an array, we cannot generate - // the tight predicate because the predicate will be implicitly mapped over the array elements. - if (matchExprPath != bucketSpec.timeField()) { - return handleIneligible(BucketSpec::IneligiblePredicatePolicy::kIgnore, - matchExpr, - "can't create tight predicate on non-time field") - .tightPredicate; - } - - auto minPath = std::string{kControlMinFieldNamePrefix} + matchExprPath; - const StringData minPathStringData(minPath); - auto maxPath = std::string{kControlMaxFieldNamePrefix} + matchExprPath; - const StringData maxPathStringData(maxPath); - - switch (matchExpr->matchType()) { - // All events satisfy $eq if bucket min and max both satisfy $eq. - case MatchExpression::EQ: - return makePredicate( - MatchExprPredicate(minPathStringData, matchExprData), - MatchExprPredicate(maxPathStringData, matchExprData)); - case MatchExpression::INTERNAL_EXPR_EQ: - return makePredicate( - MatchExprPredicate(minPathStringData, matchExprData), - MatchExprPredicate(maxPathStringData, - matchExprData)); - - // All events satisfy $gt if bucket min satisfy $gt. - case MatchExpression::GT: - return std::make_unique(minPathStringData, matchExprData); - case MatchExpression::INTERNAL_EXPR_GT: - return std::make_unique(minPathStringData, - matchExprData); - - // All events satisfy $gte if bucket min satisfy $gte. - case MatchExpression::GTE: - return std::make_unique(minPathStringData, matchExprData); - case MatchExpression::INTERNAL_EXPR_GTE: - return std::make_unique(minPathStringData, - matchExprData); - - // All events satisfy $lt if bucket max satisfy $lt. - case MatchExpression::LT: - return std::make_unique(maxPathStringData, matchExprData); - case MatchExpression::INTERNAL_EXPR_LT: - return std::make_unique(maxPathStringData, - matchExprData); - - // All events satisfy $lte if bucket max satisfy $lte. - case MatchExpression::LTE: - return std::make_unique(maxPathStringData, matchExprData); - case MatchExpression::INTERNAL_EXPR_LTE: - return std::make_unique(maxPathStringData, - matchExprData); - - default: - MONGO_UNREACHABLE_TASSERT(7026901); - } -} - -std::unique_ptr createTightExprComparisonPredicate( - const ExprMatchExpression* matchExpr, - const BucketSpec& bucketSpec, - ExpressionContext::CollationMatchesDefault collationMatchesDefault, - boost::intrusive_ptr pExpCtx) { - using namespace timeseries; - auto rewriteMatchExpr = RewriteExpr::rewrite(matchExpr->getExpression(), pExpCtx->getCollator()) - .releaseMatchExpression(); - if (rewriteMatchExpr && - ComparisonMatchExpressionBase::isInternalExprComparison(rewriteMatchExpr->matchType())) { - auto compareMatchExpr = - checked_cast(rewriteMatchExpr.get()); - return createTightComparisonPredicate( - compareMatchExpr, bucketSpec, collationMatchesDefault); - } - - return handleIneligible(BucketSpec::IneligiblePredicatePolicy::kIgnore, - matchExpr, - "can't handle non-comparison $expr match expression") - .tightPredicate; -} - -} // namespace - -BucketSpec::BucketPredicate BucketSpec::createPredicatesOnBucketLevelField( - const MatchExpression* matchExpr, - const BucketSpec& bucketSpec, - int bucketMaxSpanSeconds, - ExpressionContext::CollationMatchesDefault collationMatchesDefault, - const boost::intrusive_ptr& pExpCtx, - bool haveComputedMetaField, - bool includeMetaField, - bool assumeNoMixedSchemaData, - IneligiblePredicatePolicy policy) { - - tassert(5916304, "BucketSpec::createPredicatesOnBucketLevelField nullptr", matchExpr); - - // If we have a leaf predicate on a meta field, we can map it to the bucket's meta field. - // This includes comparisons such as $eq and $lte, as well as other non-comparison predicates - // such as $exists, or $mod. Unrenamable expressions can't be split into a whole bucket level - // filter, when we should return nullptr. - // - // Metadata predicates are partially handled earlier, by splitting the match expression into a - // metadata-only part, and measurement/time-only part. However, splitting a $match into two - // sequential $matches only works when splitting a conjunction. A predicate like - // {$or: [ {a: 5}, {meta.b: 5} ]} can't be split, and can't be metadata-only, so we have to - // handle it here. - const auto matchExprPath = matchExpr->path(); - if (!matchExprPath.empty() && bucketSpec.metaField() && - (matchExprPath == bucketSpec.metaField().value() || - expression::isPathPrefixOf(bucketSpec.metaField().value(), matchExprPath))) { - - if (haveComputedMetaField) - return handleIneligible(policy, matchExpr, "can't handle a computed meta field"); - - if (!includeMetaField) - return handleIneligible(policy, matchExpr, "cannot handle an excluded meta field"); - - if (expression::hasOnlyRenameableMatchExpressionChildren(*matchExpr)) { - auto looseResult = matchExpr->clone(); - expression::applyRenamesToExpression( - looseResult.get(), - {{bucketSpec.metaField().value(), timeseries::kBucketMetaFieldName.toString()}}); - auto tightResult = looseResult->clone(); - return {std::move(looseResult), std::move(tightResult)}; - } else { - return {nullptr, nullptr}; - } - } - - if (matchExpr->matchType() == MatchExpression::AND) { - auto nextAnd = static_cast(matchExpr); - auto looseAndExpression = std::make_unique(); - auto tightAndExpression = std::make_unique(); - for (size_t i = 0; i < nextAnd->numChildren(); i++) { - auto child = createPredicatesOnBucketLevelField(nextAnd->getChild(i), - bucketSpec, - bucketMaxSpanSeconds, - collationMatchesDefault, - pExpCtx, - haveComputedMetaField, - includeMetaField, - assumeNoMixedSchemaData, - policy); - if (child.loosePredicate) { - looseAndExpression->add(std::move(child.loosePredicate)); - } - - if (tightAndExpression && child.tightPredicate) { - tightAndExpression->add(std::move(child.tightPredicate)); - } else { - // For tight expression, null means always false, we can short circuit here. - tightAndExpression = nullptr; - } - } - - // For a loose predicate, if we are unable to generate an expression we can just treat it as - // always true or an empty AND. This is because we are trying to generate a predicate that - // will match the superset of our actual results. - std::unique_ptr looseExpression = nullptr; - if (looseAndExpression->numChildren() == 1) { - looseExpression = looseAndExpression->releaseChild(0); - } else if (looseAndExpression->numChildren() > 1) { - looseExpression = std::move(looseAndExpression); - } - - // For a tight predicate, if we are unable to generate an expression we can just treat it as - // always false. This is because we are trying to generate a predicate that will match the - // subset of our actual results. - std::unique_ptr tightExpression = nullptr; - if (tightAndExpression && tightAndExpression->numChildren() == 1) { - tightExpression = tightAndExpression->releaseChild(0); - } else { - tightExpression = std::move(tightAndExpression); - } - - return {std::move(looseExpression), std::move(tightExpression)}; - } else if (matchExpr->matchType() == MatchExpression::OR) { - // Given {$or: [A, B]}, suppose A, B can be pushed down as A', B'. - // If an event matches {$or: [A, B]} then either: - // - it matches A, which means any bucket containing it matches A' - // - it matches B, which means any bucket containing it matches B' - // So {$or: [A', B']} will capture all the buckets we need to satisfy {$or: [A, B]}. - auto nextOr = static_cast(matchExpr); - auto looseOrExpression = std::make_unique(); - auto tightOrExpression = std::make_unique(); - - for (size_t i = 0; i < nextOr->numChildren(); i++) { - auto child = createPredicatesOnBucketLevelField(nextOr->getChild(i), - bucketSpec, - bucketMaxSpanSeconds, - collationMatchesDefault, - pExpCtx, - haveComputedMetaField, - includeMetaField, - assumeNoMixedSchemaData, - policy); - if (looseOrExpression && child.loosePredicate) { - looseOrExpression->add(std::move(child.loosePredicate)); - } else { - // For loose expression, null means always true, we can short circuit here. - looseOrExpression = nullptr; - } - - // For tight predicate, we give a tighter bound so that all events in the bucket - // either all matches A or all matches B. - if (child.tightPredicate) { - tightOrExpression->add(std::move(child.tightPredicate)); - } - } - - // For a loose predicate, if we are unable to generate an expression we can just treat it as - // always true. This is because we are trying to generate a predicate that will match the - // superset of our actual results. - std::unique_ptr looseExpression = nullptr; - if (looseOrExpression && looseOrExpression->numChildren() == 1) { - looseExpression = looseOrExpression->releaseChild(0); - } else { - looseExpression = std::move(looseOrExpression); - } - - // For a tight predicate, if we are unable to generate an expression we can just treat it as - // always false or an empty OR. This is because we are trying to generate a predicate that - // will match the subset of our actual results. - std::unique_ptr tightExpression = nullptr; - if (tightOrExpression->numChildren() == 1) { - tightExpression = tightOrExpression->releaseChild(0); - } else if (tightOrExpression->numChildren() > 1) { - tightExpression = std::move(tightOrExpression); - } - - return {std::move(looseExpression), std::move(tightExpression)}; - } else if (ComparisonMatchExpression::isComparisonMatchExpression(matchExpr) || - ComparisonMatchExpressionBase::isInternalExprComparison(matchExpr->matchType())) { - return { - createComparisonPredicate(checked_cast(matchExpr), - bucketSpec, - bucketMaxSpanSeconds, - collationMatchesDefault, - pExpCtx, - haveComputedMetaField, - includeMetaField, - assumeNoMixedSchemaData, - policy), - createTightComparisonPredicate( - checked_cast(matchExpr), - bucketSpec, - collationMatchesDefault)}; - } else if (matchExpr->matchType() == MatchExpression::EXPRESSION) { - return { - // The loose predicate will be pushed before the unpacking which will be inspected by - // the - // query planner. Since the classic planner doesn't handle the $expr expression, we - // don't - // generate the loose predicate. - nullptr, - createTightExprComparisonPredicate(checked_cast(matchExpr), - bucketSpec, - collationMatchesDefault, - pExpCtx)}; - } else if (matchExpr->matchType() == MatchExpression::GEO) { - auto& geoExpr = static_cast(matchExpr)->getGeoExpression(); - if (geoExpr.getPred() == GeoExpression::WITHIN || - geoExpr.getPred() == GeoExpression::INTERSECT) { - return {std::make_unique( - geoExpr.getGeometryPtr(), geoExpr.getField()), - nullptr}; - } - } else if (matchExpr->matchType() == MatchExpression::EXISTS) { - if (assumeNoMixedSchemaData) { - // We know that every field that appears in an event will also appear in the min/max. - auto result = std::make_unique(); - result->add(std::make_unique(StringData( - std::string{timeseries::kControlMinFieldNamePrefix} + matchExpr->path()))); - result->add(std::make_unique(StringData( - std::string{timeseries::kControlMaxFieldNamePrefix} + matchExpr->path()))); - return {std::move(result), nullptr}; - } else { - // At time of writing, we only pass 'kError' when creating a partial index, and - // we know the collection will have no mixed-schema buckets by the time the index is - // done building. - tassert(5916305, - "Can't push down {$exists: true} when the collection may have mixed-schema " - "buckets.", - policy != IneligiblePredicatePolicy::kError); - return {}; - } - } else if (matchExpr->matchType() == MatchExpression::MATCH_IN) { - // {a: {$in: [X, Y]}} is equivalent to {$or: [ {a: X}, {a: Y} ]}. - // {$in: [/a/]} is interpreted as a regex query. - // {$in: [null]} matches any nullish value. - const auto* inExpr = static_cast(matchExpr); - if (inExpr->hasRegex()) - return handleIneligible( - policy, matchExpr, "can't handle $regex predicate (inside $in predicate)"); - if (inExpr->hasNull()) - return handleIneligible( - policy, matchExpr, "can't handle {$eq: null} predicate (inside $in predicate)"); - - auto result = std::make_unique(); - - bool alwaysTrue = false; - for (auto&& elem : inExpr->getEqualities()) { - // If inExpr is {$in: [X, Y]} then the elems are '0: X' and '1: Y'. - auto eq = std::make_unique( - inExpr->path(), elem, nullptr /*annotation*/, inExpr->getCollator()); - auto child = createComparisonPredicate(eq.get(), - bucketSpec, - bucketMaxSpanSeconds, - collationMatchesDefault, - pExpCtx, - haveComputedMetaField, - includeMetaField, - assumeNoMixedSchemaData, - policy); - - // As with OR, only add the child if it has been succesfully translated, otherwise the - // $in cannot be correctly mapped to bucket level fields and we should return nullptr. - if (child) { - result->add(std::move(child)); - } else { - alwaysTrue = true; - if (policy == IneligiblePredicatePolicy::kIgnore) - break; - } - } - if (alwaysTrue) - return {}; - - // As above, no special case for an empty IN: returning nullptr would be incorrect because - // it means 'always-true', here. - return {std::move(result), nullptr}; - } - return handleIneligible(policy, matchExpr, "can't handle this predicate"); -} - -std::pair BucketSpec::pushdownPredicate( - const boost::intrusive_ptr& expCtx, - const TimeseriesOptions& tsOptions, - ExpressionContext::CollationMatchesDefault collationMatchesDefault, - const BSONObj& predicate, - bool haveComputedMetaField, - bool includeMetaField, - bool assumeNoMixedSchemaData, - IneligiblePredicatePolicy policy) { - - auto allowedFeatures = MatchExpressionParser::kDefaultSpecialFeatures; - auto matchExpr = uassertStatusOK( - MatchExpressionParser::parse(predicate, expCtx, ExtensionsCallbackNoop(), allowedFeatures)); - - auto metaField = haveComputedMetaField ? boost::none : tsOptions.getMetaField(); - auto [metaOnlyPredicate, metricPredicate] = [&] { - if (!metaField) { - // If there's no metadata field, then none of the predicates are metadata-only - // predicates. - return std::make_pair(std::unique_ptr(nullptr), std::move(matchExpr)); - } - - return expression::splitMatchExpressionBy( - std::move(matchExpr), - {metaField->toString()}, - {{metaField->toString(), timeseries::kBucketMetaFieldName.toString()}}, - expression::isOnlyDependentOn); - }(); - - int maxSpanSeconds = tsOptions.getBucketMaxSpanSeconds() - ? *tsOptions.getBucketMaxSpanSeconds() - : timeseries::getMaxSpanSecondsFromGranularity( - tsOptions.getGranularity().get_value_or(BucketGranularityEnum::Seconds)); - - std::unique_ptr bucketMetricPredicate = metricPredicate - ? createPredicatesOnBucketLevelField( - metricPredicate.get(), - BucketSpec{ - tsOptions.getTimeField().toString(), - metaField.map([](StringData s) { return s.toString(); }), - // Since we are operating on a collection, not a query-result, - // there are no inclusion/exclusion projections we need to apply - // to the buckets before unpacking. So we can use default values for the rest of - // the arguments. - }, - maxSpanSeconds, - collationMatchesDefault, - expCtx, - haveComputedMetaField, - includeMetaField, - assumeNoMixedSchemaData, - policy) - .loosePredicate - : nullptr; - - BSONObjBuilder result; - if (metaOnlyPredicate) - metaOnlyPredicate->serialize(&result, {}); - if (bucketMetricPredicate) - bucketMetricPredicate->serialize(&result, {}); - return std::make_pair(bucketMetricPredicate.get(), result.obj()); -} - -class BucketUnpacker::UnpackingImpl { -public: - UnpackingImpl() = default; - virtual ~UnpackingImpl() = default; - - virtual void addField(const BSONElement& field) = 0; - virtual int measurementCount(const BSONElement& timeField) const = 0; - virtual bool getNext(MutableDocument& measurement, - const BucketSpec& spec, - const Value& metaValue, - bool includeTimeField, - bool includeMetaField) = 0; - virtual bool getNext(BSONObjBuilder& builder, - const BucketSpec& spec, - const BSONElement& metaValue, - bool includeTimeField, - bool includeMetaField) = 0; - virtual void extractSingleMeasurement(MutableDocument& measurement, - int j, - const BucketSpec& spec, - const std::set& unpackFieldsToIncludeExclude, - const BSONObj& bucket, - const Value& metaValue, - bool includeTimeField, - bool includeMetaField) = 0; - - // Provides an upper bound on the number of fields in each measurement. - virtual std::size_t numberOfFields() = 0; - -protected: - // Data field count is variable, but time and metadata are fixed. - constexpr static std::size_t kFixedFieldNumber = 2; -}; - -namespace { - - -// Unpacker for V1 uncompressed buckets -class BucketUnpackerV1 : public BucketUnpacker::UnpackingImpl { -public: - // A table that is useful for interpolations between the number of measurements in a bucket and - // the byte size of a bucket's data section timestamp column. Each table entry is a pair (b_i, - // S_i), where b_i is the number of measurements in the bucket and S_i is the byte size of the - // timestamp BSONObj. The table is bounded by 16 MB (2 << 23 bytes) where the table entries are - // pairs of b_i and S_i for the lower bounds of the row key digit intervals [0, 9], [10, 99], - // [100, 999], [1000, 9999] and so on. The last entry in the table, S7, is the first entry to - // exceed the server BSON object limit of 16 MB. - static constexpr std::array, 8> kTimestampObjSizeTable{ - {{0, BSONObj::kMinBSONLength}, - {10, 115}, - {100, 1195}, - {1000, 12895}, - {10000, 138895}, - {100000, 1488895}, - {1000000, 15888895}, - {10000000, 168888895}}}; - - static int computeElementCountFromTimestampObjSize(int targetTimestampObjSize); - - BucketUnpackerV1(const BSONElement& timeField); - - void addField(const BSONElement& field) override; - int measurementCount(const BSONElement& timeField) const override; - bool getNext(MutableDocument& measurement, - const BucketSpec& spec, - const Value& metaValue, - bool includeTimeField, - bool includeMetaField) override; - bool getNext(BSONObjBuilder& builder, - const BucketSpec& spec, - const BSONElement& metaValue, - bool includeTimeField, - bool includeMetaField) override; - void extractSingleMeasurement(MutableDocument& measurement, - int j, - const BucketSpec& spec, - const std::set& unpackFieldsToIncludeExclude, - const BSONObj& bucket, - const Value& metaValue, - bool includeTimeField, - bool includeMetaField) override; - std::size_t numberOfFields() override; - -private: - // Iterates the timestamp section of the bucket to drive the unpacking iteration. - BSONObjIterator _timeFieldIter; - - // Iterators used to unpack the columns of the above bucket that are populated during the reset - // phase according to the provided 'BucketSpec'. - std::vector> _fieldIters; -}; - -// Calculates the number of measurements in a bucket given the 'targetTimestampObjSize' using the -// 'BucketUnpacker::kTimestampObjSizeTable' table. If the 'targetTimestampObjSize' hits a record in -// the table, this helper returns the measurement count corresponding to the table record. -// Otherwise, the 'targetTimestampObjSize' is used to probe the table for the smallest {b_i, S_i} -// pair such that 'targetTimestampObjSize' < S_i. Once the interval is found, the upper bound of the -// pair for the interval is computed and then linear interpolation is used to compute the -// measurement count corresponding to the 'targetTimestampObjSize' provided. -int BucketUnpackerV1::computeElementCountFromTimestampObjSize(int targetTimestampObjSize) { - auto currentInterval = - std::find_if(std::begin(BucketUnpackerV1::kTimestampObjSizeTable), - std::end(BucketUnpackerV1::kTimestampObjSizeTable), - [&](const auto& entry) { return targetTimestampObjSize <= entry.second; }); - - if (currentInterval->second == targetTimestampObjSize) { - return currentInterval->first; - } - // This points to the first interval larger than the target 'targetTimestampObjSize', the actual - // interval that will cover the object size is the interval before the current one. - tassert(5422104, - "currentInterval should not point to the first table entry", - currentInterval > BucketUnpackerV1::kTimestampObjSizeTable.begin()); - --currentInterval; - - auto nDigitsInRowKey = 1 + (currentInterval - BucketUnpackerV1::kTimestampObjSizeTable.begin()); - - return currentInterval->first + - ((targetTimestampObjSize - currentInterval->second) / (10 + nDigitsInRowKey)); -} - -BucketUnpackerV1::BucketUnpackerV1(const BSONElement& timeField) - : _timeFieldIter(BSONObjIterator{timeField.Obj()}) {} - -void BucketUnpackerV1::addField(const BSONElement& field) { - _fieldIters.emplace_back(field.fieldNameStringData(), BSONObjIterator{field.Obj()}); -} - -int BucketUnpackerV1::measurementCount(const BSONElement& timeField) const { - return computeElementCountFromTimestampObjSize(timeField.objsize()); -} - -bool BucketUnpackerV1::getNext(MutableDocument& measurement, - const BucketSpec& spec, - const Value& metaValue, - bool includeTimeField, - bool includeMetaField) { - auto&& timeElem = _timeFieldIter.next(); - if (includeTimeField) { - measurement.addField(spec.timeFieldHashed(), Value{timeElem}); - } - - // Includes metaField when we're instructed to do so and metaField value exists. - if (includeMetaField && !metaValue.missing()) { - measurement.addField(*spec.metaFieldHashed(), metaValue); - } - - const auto& currentIdx = timeElem.fieldNameStringData(); - for (auto&& [colName, colIter] : _fieldIters) { - if (auto&& elem = *colIter; colIter.more() && elem.fieldNameStringData() == currentIdx) { - measurement.addField(colName, Value{elem}); - colIter.advance(elem); - } - } - - return _timeFieldIter.more(); -} - -bool BucketUnpackerV1::getNext(BSONObjBuilder& builder, - const BucketSpec& spec, - const BSONElement& metaValue, - bool includeTimeField, - bool includeMetaField) { - auto&& timeElem = _timeFieldIter.next(); - if (includeTimeField) { - builder.appendAs(timeElem, spec.timeField()); - } - - // Includes metaField when we're instructed to do so and metaField value exists. - if (includeMetaField && !metaValue.eoo()) { - builder.appendAs(metaValue, *spec.metaField()); - } - - const auto& currentIdx = timeElem.fieldNameStringData(); - for (auto&& [colName, colIter] : _fieldIters) { - if (auto&& elem = *colIter; colIter.more() && elem.fieldNameStringData() == currentIdx) { - builder.appendAs(elem, colName); - colIter.advance(elem); - } - } - - return _timeFieldIter.more(); -} - -void BucketUnpackerV1::extractSingleMeasurement( - MutableDocument& measurement, - int j, - const BucketSpec& spec, - const std::set& unpackFieldsToIncludeExclude, - const BSONObj& bucket, - const Value& metaValue, - bool includeTimeField, - bool includeMetaField) { - auto rowKey = std::to_string(j); - auto targetIdx = StringData{rowKey}; - auto&& dataRegion = bucket.getField(timeseries::kBucketDataFieldName).Obj(); - - if (includeMetaField && !metaValue.missing()) { - measurement.addField(*spec.metaFieldHashed(), metaValue); - } - - for (auto&& dataElem : dataRegion) { - const auto& colName = dataElem.fieldNameStringData(); - if (!determineIncludeField(colName, spec.behavior(), unpackFieldsToIncludeExclude)) { - continue; - } - auto value = dataElem[targetIdx]; - if (value) { - measurement.addField(dataElem.fieldNameStringData(), Value{value}); - } - } -} - -std::size_t BucketUnpackerV1::numberOfFields() { - // The data fields are tracked by _fieldIters, but we need to account also for the time field - // and possibly the meta field. - return kFixedFieldNumber + _fieldIters.size(); -} - -// Unpacker for V2 compressed buckets -class BucketUnpackerV2 : public BucketUnpacker::UnpackingImpl { -public: - BucketUnpackerV2(const BSONElement& timeField, int elementCount); - - void addField(const BSONElement& field) override; - int measurementCount(const BSONElement& timeField) const override; - bool getNext(MutableDocument& measurement, - const BucketSpec& spec, - const Value& metaValue, - bool includeTimeField, - bool includeMetaField) override; - bool getNext(BSONObjBuilder& builder, - const BucketSpec& spec, - const BSONElement& metaValue, - bool includeTimeField, - bool includeMetaField) override; - void extractSingleMeasurement(MutableDocument& measurement, - int j, - const BucketSpec& spec, - const std::set& unpackFieldsToIncludeExclude, - const BSONObj& bucket, - const Value& metaValue, - bool includeTimeField, - bool includeMetaField) override; - std::size_t numberOfFields() override; - -private: - struct ColumnStore { - ColumnStore(BSONElement elem) - : column(elem), - it(column.begin()), - end(column.end()), - hashedName(FieldNameHasher{}(column.name())) {} - ColumnStore(ColumnStore&& other) - : column(std::move(other.column)), - it(other.it.moveTo(column)), - end(other.end), - hashedName(other.hashedName) {} - - BSONColumn column; - BSONColumn::Iterator it; - BSONColumn::Iterator end; - size_t hashedName; - }; - - // Iterates the timestamp section of the bucket to drive the unpacking iteration. - ColumnStore _timeColumn; - - // Iterators used to unpack the columns of the above bucket that are populated during the reset - // phase according to the provided 'BucketSpec'. - std::vector _fieldColumns; - - // Element count - int _elementCount; -}; - -BucketUnpackerV2::BucketUnpackerV2(const BSONElement& timeField, int elementCount) - : _timeColumn(timeField), _elementCount(elementCount) { - if (_elementCount == -1) { - _elementCount = _timeColumn.column.size(); - } -} - -void BucketUnpackerV2::addField(const BSONElement& field) { - _fieldColumns.emplace_back(field); -} - -int BucketUnpackerV2::measurementCount(const BSONElement& timeField) const { - return _elementCount; -} - -bool BucketUnpackerV2::getNext(MutableDocument& measurement, - const BucketSpec& spec, - const Value& metaValue, - bool includeTimeField, - bool includeMetaField) { - // Get element and increment iterator - const auto& timeElem = *_timeColumn.it; - if (includeTimeField) { - measurement.addField(spec.timeFieldHashed(), Value{timeElem}); - } - ++_timeColumn.it; - - // Includes metaField when we're instructed to do so and metaField value exists. - if (includeMetaField && !metaValue.missing()) { - measurement.addField(*spec.metaFieldHashed(), metaValue); - } - - for (auto& fieldColumn : _fieldColumns) { - uassert(6067601, - "Bucket unexpectedly contained fewer values than count", - fieldColumn.it != fieldColumn.end); - const BSONElement& elem = *fieldColumn.it; - // EOO represents missing field - if (!elem.eoo()) { - measurement.addField(HashedFieldName{fieldColumn.column.name(), fieldColumn.hashedName}, - Value{elem}); - } - ++fieldColumn.it; - } - - return _timeColumn.it != _timeColumn.end; -} - -bool BucketUnpackerV2::getNext(BSONObjBuilder& builder, - const BucketSpec& spec, - const BSONElement& metaValue, - bool includeTimeField, - bool includeMetaField) { - // Get element and increment iterator - const auto& timeElem = *_timeColumn.it; - if (includeTimeField) { - builder.appendAs(timeElem, spec.timeField()); - } - ++_timeColumn.it; - - // Includes metaField when we're instructed to do so and metaField value exists. - if (includeMetaField && !metaValue.eoo()) { - builder.appendAs(metaValue, *spec.metaField()); - } - - for (auto& fieldColumn : _fieldColumns) { - uassert(7026803, - "Bucket unexpectedly contained fewer values than count", - fieldColumn.it != fieldColumn.end); - const BSONElement& elem = *fieldColumn.it; - // EOO represents missing field - if (!elem.eoo()) { - builder.appendAs(elem, fieldColumn.column.name()); - } - ++fieldColumn.it; - } - - return _timeColumn.it != _timeColumn.end; -} - -void BucketUnpackerV2::extractSingleMeasurement( - MutableDocument& measurement, - int j, - const BucketSpec& spec, - const std::set& unpackFieldsToIncludeExclude, - const BSONObj& bucket, - const Value& metaValue, - bool includeTimeField, - bool includeMetaField) { - if (includeTimeField) { - auto val = _timeColumn.column[j]; - uassert( - 6067500, "Bucket unexpectedly contained fewer values than count", val && !val->eoo()); - measurement.addField(spec.timeFieldHashed(), Value{*val}); - } - - if (includeMetaField && !metaValue.missing()) { - measurement.addField(*spec.metaFieldHashed(), metaValue); - } - - if (includeTimeField) { - for (auto& fieldColumn : _fieldColumns) { - auto val = fieldColumn.column[j]; - uassert(6067600, "Bucket unexpectedly contained fewer values than count", val); - measurement.addField(HashedFieldName{fieldColumn.column.name(), fieldColumn.hashedName}, - Value{*val}); - } - } -} - -std::size_t BucketUnpackerV2::numberOfFields() { - // The data fields are tracked by _fieldColumns, but we need to account also for the time field - // and possibly the meta field. - return kFixedFieldNumber + _fieldColumns.size(); -} -} // namespace - -BucketSpec::BucketSpec(const std::string& timeField, - const boost::optional& metaField, - const std::set& fields, - Behavior behavior, - const std::set& computedProjections, - bool usesExtendedRange) - : _fieldSet(fields), - _behavior(behavior), - _computedMetaProjFields(computedProjections), - _timeField(timeField), - _timeFieldHashed(FieldNameHasher().hashedFieldName(_timeField)), - _metaField(metaField), - _usesExtendedRange(usesExtendedRange) { - if (_metaField) { - _metaFieldHashed = FieldNameHasher().hashedFieldName(*_metaField); - } -} - -BucketSpec::BucketSpec(const BucketSpec& other) - : _fieldSet(other._fieldSet), - _behavior(other._behavior), - _computedMetaProjFields(other._computedMetaProjFields), - _timeField(other._timeField), - _timeFieldHashed(HashedFieldName{_timeField, other._timeFieldHashed->hash()}), - _metaField(other._metaField), - _usesExtendedRange(other._usesExtendedRange) { - if (_metaField) { - _metaFieldHashed = HashedFieldName{*_metaField, other._metaFieldHashed->hash()}; - } -} - -BucketSpec::BucketSpec(BucketSpec&& other) - : _fieldSet(std::move(other._fieldSet)), - _behavior(other._behavior), - _computedMetaProjFields(std::move(other._computedMetaProjFields)), - _timeField(std::move(other._timeField)), - _timeFieldHashed(HashedFieldName{_timeField, other._timeFieldHashed->hash()}), - _metaField(std::move(other._metaField)), - _usesExtendedRange(other._usesExtendedRange) { - if (_metaField) { - _metaFieldHashed = HashedFieldName{*_metaField, other._metaFieldHashed->hash()}; - } -} - -BucketSpec::BucketSpec(const TimeseriesOptions& tsOptions) - : BucketSpec(tsOptions.getTimeField().toString(), - tsOptions.getMetaField() - ? boost::optional(tsOptions.getMetaField()->toString()) - : boost::none) {} - -BucketSpec& BucketSpec::operator=(const BucketSpec& other) { - if (&other != this) { - _fieldSet = other._fieldSet; - _behavior = other._behavior; - _computedMetaProjFields = other._computedMetaProjFields; - _timeField = other._timeField; - _timeFieldHashed = HashedFieldName{_timeField, other._timeFieldHashed->hash()}; - _metaField = other._metaField; - if (_metaField) { - _metaFieldHashed = HashedFieldName{*_metaField, other._metaFieldHashed->hash()}; - } - _usesExtendedRange = other._usesExtendedRange; - } - return *this; -} - -void BucketSpec::setTimeField(std::string&& name) { - _timeField = std::move(name); - _timeFieldHashed = FieldNameHasher().hashedFieldName(_timeField); -} - -const std::string& BucketSpec::timeField() const { - return _timeField; -} - -HashedFieldName BucketSpec::timeFieldHashed() const { - invariant(_timeFieldHashed->key().rawData() == _timeField.data()); - invariant(_timeFieldHashed->key() == _timeField); - return *_timeFieldHashed; -} - -void BucketSpec::setMetaField(boost::optional&& name) { - _metaField = std::move(name); - if (_metaField) { - _metaFieldHashed = FieldNameHasher().hashedFieldName(*_metaField); - } else { - _metaFieldHashed = boost::none; - } -} - -const boost::optional& BucketSpec::metaField() const { - return _metaField; -} - -boost::optional BucketSpec::metaFieldHashed() const { - return _metaFieldHashed; -} - -BucketUnpacker::BucketUnpacker() = default; -BucketUnpacker::BucketUnpacker(BucketUnpacker&& other) = default; -BucketUnpacker::~BucketUnpacker() = default; -BucketUnpacker& BucketUnpacker::operator=(BucketUnpacker&& rhs) = default; - -BucketUnpacker::BucketUnpacker(BucketSpec spec) { - setBucketSpec(std::move(spec)); -} - -void BucketUnpacker::addComputedMetaProjFields(const std::vector& computedFieldNames) { - for (auto&& field : computedFieldNames) { - _spec.addComputedMetaProjFields(field); - - // If we're already specifically including fields, we need to add the computed fields to - // the included field set to indicate they're in the output doc. - if (_spec.behavior() == BucketSpec::Behavior::kInclude) { - _spec.addIncludeExcludeField(field); - } else { - // Since exclude is applied after addComputedMetaProjFields, we must erase the new field - // from the include/exclude fields so this doesn't get removed. - _spec.removeIncludeExcludeField(field.toString()); - } - } - - // Recalculate _includeTimeField, since both computedMetaProjFields and fieldSet may have - // changed. - determineIncludeTimeField(); -} - -Document BucketUnpacker::getNext() { - tassert(5521503, "'getNext()' requires the bucket to be owned", _bucket.isOwned()); - tassert(5422100, "'getNext()' was called after the bucket has been exhausted", hasNext()); - - // MutableDocument reserves memory based on the number of fields, but uses a fixed size of 25 - // bytes plus an allowance of 7 characters for the field name. Doubling the number of fields - // should give us enough overhead for longer field names without wasting too much memory. - auto measurement = MutableDocument{2 * _unpackingImpl->numberOfFields()}; - _hasNext = _unpackingImpl->getNext( - measurement, _spec, _metaValue, _includeTimeField, _includeMetaField); - - // Add computed meta projections. - for (auto&& name : _spec.computedMetaProjFields()) { - measurement.addField(name, Value{_computedMetaProjections[name]}); - } - - if (_includeMinTimeAsMetadata && _minTime) { - measurement.metadata().setTimeseriesBucketMinTime(*_minTime); - } - - if (_includeMaxTimeAsMetadata && _maxTime) { - measurement.metadata().setTimeseriesBucketMaxTime(*_maxTime); - } - - return measurement.freeze(); -} - -BSONObj BucketUnpacker::getNextBson() { - tassert(7026800, "'getNextBson()' requires the bucket to be owned", _bucket.isOwned()); - tassert(7026801, "'getNextBson()' was called after the bucket has been exhausted", hasNext()); - tassert(7026802, - "'getNextBson()' cannot return max and min time as metadata", - !_includeMaxTimeAsMetadata && !_includeMinTimeAsMetadata); - - BSONObjBuilder builder; - _hasNext = _unpackingImpl->getNext( - builder, _spec, _metaBSONElem, _includeTimeField, _includeMetaField); - - // Add computed meta projections. - for (auto&& name : _spec.computedMetaProjFields()) { - builder.appendAs(_computedMetaProjections[name], name); - } - - return builder.obj(); -} - -Document BucketUnpacker::extractSingleMeasurement(int j) { - tassert(5422101, - "'extractSingleMeasurment' expects j to be greater than or equal to zero and less than " - "or equal to the number of measurements in a bucket", - j >= 0 && j < _numberOfMeasurements); - - auto measurement = MutableDocument{}; - _unpackingImpl->extractSingleMeasurement(measurement, - j, - _spec, - fieldsToIncludeExcludeDuringUnpack(), - _bucket, - _metaValue, - _includeTimeField, - _includeMetaField); - - // Add computed meta projections. - for (auto&& name : _spec.computedMetaProjFields()) { - measurement.addField(name, Value{_computedMetaProjections[name]}); - } - - return measurement.freeze(); -} - -void BucketUnpacker::reset(BSONObj&& bucket, bool bucketMatchedQuery) { - _unpackingImpl.reset(); - _bucket = std::move(bucket); - _bucketMatchedQuery = bucketMatchedQuery; - uassert(5346510, "An empty bucket cannot be unpacked", !_bucket.isEmpty()); - - auto&& dataRegion = _bucket.getField(timeseries::kBucketDataFieldName).Obj(); - if (dataRegion.isEmpty()) { - // If the data field of a bucket is present but it holds an empty object, there's nothing to - // unpack. - return; - } - - auto&& timeFieldElem = dataRegion.getField(_spec.timeField()); - uassert(5346700, - "The $_internalUnpackBucket stage requires the data region to have a timeField object", - timeFieldElem); - - _metaBSONElem = _bucket[timeseries::kBucketMetaFieldName]; - _metaValue = Value{_metaBSONElem}; - if (_spec.metaField()) { - // The spec indicates that there might be a metadata region. Missing metadata in - // measurements is expressed with missing metadata in a bucket. But we disallow undefined - // since the undefined BSON type is deprecated. - uassert(5369600, - "The $_internalUnpackBucket stage allows metadata to be absent or otherwise, it " - "must not be the deprecated undefined bson type", - _metaValue.missing() || _metaValue.getType() != BSONType::Undefined); - } else { - // If the spec indicates that the time series collection has no metadata field, then we - // should not find a metadata region in the underlying bucket documents. - uassert(5369601, - "The $_internalUnpackBucket stage expects buckets to have missing metadata regions " - "if the metaField parameter is not provided", - _metaValue.missing()); - } - - auto&& controlField = _bucket[timeseries::kBucketControlFieldName]; - uassert(5857902, - "The $_internalUnpackBucket stage requires 'control' object to be present", - controlField && controlField.type() == BSONType::Object); - - auto&& controlClosed = controlField.Obj()[timeseries::kBucketControlClosedFieldName]; - _closedBucket = controlClosed.booleanSafe(); - - if (_includeMinTimeAsMetadata) { - auto&& controlMin = controlField.Obj()[timeseries::kBucketControlMinFieldName]; - uassert(6460203, - str::stream() << "The $_internalUnpackBucket stage requires '" - << timeseries::kControlMinFieldNamePrefix << "' object to be present", - controlMin && controlMin.type() == BSONType::Object); - auto&& minTime = controlMin.Obj()[_spec.timeField()]; - uassert(6460204, - str::stream() << "The $_internalUnpackBucket stage requires '" - << timeseries::kControlMinFieldNamePrefix << "." << _spec.timeField() - << "' to be a date", - minTime && minTime.type() == BSONType::Date); - _minTime = minTime.date(); - } - - if (_includeMaxTimeAsMetadata) { - auto&& controlMax = controlField.Obj()[timeseries::kBucketControlMaxFieldName]; - uassert(6460205, - str::stream() << "The $_internalUnpackBucket stage requires '" - << timeseries::kControlMaxFieldNamePrefix << "' object to be present", - controlMax && controlMax.type() == BSONType::Object); - auto&& maxTime = controlMax.Obj()[_spec.timeField()]; - uassert(6460206, - str::stream() << "The $_internalUnpackBucket stage requires '" - << timeseries::kControlMaxFieldNamePrefix << "." << _spec.timeField() - << "' to be a date", - maxTime && maxTime.type() == BSONType::Date); - _maxTime = maxTime.date(); - } - - auto&& versionField = controlField.Obj()[timeseries::kBucketControlVersionFieldName]; - uassert(5857903, - "The $_internalUnpackBucket stage requires 'control.version' field to be present", - versionField && isNumericBSONType(versionField.type())); - auto version = versionField.Number(); - - if (version == 1) { - _unpackingImpl = std::make_unique(timeFieldElem); - } else if (version == 2) { - auto countField = controlField.Obj()[timeseries::kBucketControlCountFieldName]; - _unpackingImpl = - std::make_unique(timeFieldElem, - countField && isNumericBSONType(countField.type()) - ? static_cast(countField.Number()) - : -1); - } else { - uasserted(5857900, "Invalid bucket version"); - } - - // Walk the data region of the bucket, and decide if an iterator should be set up based on the - // include or exclude case. - for (auto&& elem : dataRegion) { - auto colName = elem.fieldNameStringData(); - if (colName == _spec.timeField()) { - // Skip adding a FieldIterator for the timeField since the timestamp value from - // _timeFieldIter can be placed accordingly in the materialized measurement. - continue; - } - - // Includes a field when '_spec.behavior()' is 'kInclude' and it's found in 'fieldSet' or - // _spec.behavior() is 'kExclude' and it's not found in 'fieldSet'. - if (determineIncludeField( - colName, _spec.behavior(), fieldsToIncludeExcludeDuringUnpack())) { - _unpackingImpl->addField(elem); - } - } - - // Update computed meta projections with values from this bucket. - for (auto&& name : _spec.computedMetaProjFields()) { - _computedMetaProjections[name] = _bucket[name]; - } - - // Save the measurement count for the bucket. - _numberOfMeasurements = _unpackingImpl->measurementCount(timeFieldElem); - _hasNext = _numberOfMeasurements > 0; -} - -int BucketUnpacker::computeMeasurementCount(const BSONObj& bucket, StringData timeField) { - auto&& controlField = bucket[timeseries::kBucketControlFieldName]; - uassert(5857904, - "The $_internalUnpackBucket stage requires 'control' object to be present", - controlField && controlField.type() == BSONType::Object); - - auto&& versionField = controlField.Obj()[timeseries::kBucketControlVersionFieldName]; - uassert(5857905, - "The $_internalUnpackBucket stage requires 'control.version' field to be present", - versionField && isNumericBSONType(versionField.type())); - - auto&& dataField = bucket[timeseries::kBucketDataFieldName]; - if (!dataField || dataField.type() != BSONType::Object) - return 0; - - auto&& time = dataField.Obj()[timeField]; - if (!time) { - return 0; - } - - auto version = versionField.Number(); - if (version == 1) { - return BucketUnpackerV1::computeElementCountFromTimestampObjSize(time.objsize()); - } else if (version == 2) { - auto countField = controlField.Obj()[timeseries::kBucketControlCountFieldName]; - if (countField && isNumericBSONType(countField.type())) { - return static_cast(countField.Number()); - } - - return BSONColumn(time).size(); - } else { - uasserted(5857901, "Invalid bucket version"); - } -} - -void BucketUnpacker::determineIncludeTimeField() { - const bool isInclude = _spec.behavior() == BucketSpec::Behavior::kInclude; - const bool fieldSetContainsTime = - _spec.fieldSet().find(_spec.timeField()) != _spec.fieldSet().end(); - - const auto& metaProjFields = _spec.computedMetaProjFields(); - const bool metaProjContains = metaProjFields.find(_spec.timeField()) != metaProjFields.cend(); - - // If computedMetaProjFields contains the time field, we exclude it from unpacking no matter - // what, since it will be overwritten anyway. - _includeTimeField = isInclude == fieldSetContainsTime && !metaProjContains; -} - -void BucketUnpacker::eraseMetaFromFieldSetAndDetermineIncludeMeta() { - if (!_spec.metaField() || - _spec.computedMetaProjFields().find(*_spec.metaField()) != - _spec.computedMetaProjFields().cend()) { - _includeMetaField = false; - } else if (auto itr = _spec.fieldSet().find(*_spec.metaField()); - itr != _spec.fieldSet().end()) { - _spec.removeIncludeExcludeField(*_spec.metaField()); - _includeMetaField = _spec.behavior() == BucketSpec::Behavior::kInclude; - } else { - _includeMetaField = _spec.behavior() == BucketSpec::Behavior::kExclude; - } -} - -void BucketUnpacker::eraseExcludedComputedMetaProjFields() { - if (_spec.behavior() == BucketSpec::Behavior::kExclude) { - for (const auto& field : _spec.fieldSet()) { - _spec.eraseFromComputedMetaProjFields(field); - } - } -} - -void BucketUnpacker::setBucketSpec(BucketSpec&& bucketSpec) { - _spec = std::move(bucketSpec); - - eraseMetaFromFieldSetAndDetermineIncludeMeta(); - determineIncludeTimeField(); - eraseExcludedComputedMetaProjFields(); - - _includeMinTimeAsMetadata = _spec.includeMinTimeAsMetadata; - _includeMaxTimeAsMetadata = _spec.includeMaxTimeAsMetadata; -} - -void BucketUnpacker::setIncludeMinTimeAsMetadata() { - _includeMinTimeAsMetadata = true; -} - -void BucketUnpacker::setIncludeMaxTimeAsMetadata() { - _includeMaxTimeAsMetadata = true; -} - -const std::set& BucketUnpacker::fieldsToIncludeExcludeDuringUnpack() { - if (_unpackFieldsToIncludeExclude) { - return *_unpackFieldsToIncludeExclude; - } - - _unpackFieldsToIncludeExclude = std::set(); - const auto& metaProjFields = _spec.computedMetaProjFields(); - if (_spec.behavior() == BucketSpec::Behavior::kInclude) { - // For include, we unpack fieldSet - metaProjFields. - for (auto&& field : _spec.fieldSet()) { - if (metaProjFields.find(field) == metaProjFields.cend()) { - _unpackFieldsToIncludeExclude->insert(field); - } - } - } else { - // For exclude, we unpack everything but fieldSet + metaProjFields. - _unpackFieldsToIncludeExclude->insert(_spec.fieldSet().begin(), _spec.fieldSet().end()); - _unpackFieldsToIncludeExclude->insert(metaProjFields.begin(), metaProjFields.end()); - } - - return *_unpackFieldsToIncludeExclude; -} - -const std::set BucketUnpacker::reservedBucketFieldNames = { - timeseries::kBucketIdFieldName, - timeseries::kBucketDataFieldName, - timeseries::kBucketMetaFieldName, - timeseries::kBucketControlFieldName}; - -} // namespace mongo diff --git a/src/mongo/db/exec/bucket_unpacker.h b/src/mongo/db/exec/bucket_unpacker.h deleted file mode 100644 index 9a29372ee3b21..0000000000000 --- a/src/mongo/db/exec/bucket_unpacker.h +++ /dev/null @@ -1,453 +0,0 @@ -/** - * Copyright (C) 2021-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - - -#include -#include - -#include "mongo/bson/bsonobj.h" -#include "mongo/db/exec/document_value/document.h" -#include "mongo/db/matcher/expression.h" -#include "mongo/db/pipeline/expression_context.h" -#include "mongo/db/timeseries/timeseries_constants.h" - -namespace mongo { -/** - * Carries parameters for unpacking a bucket. The order of operations applied to determine which - * fields are in the final document are: - * If we are in include mode: - * 1. Unpack all fields from the bucket. - * 2. Remove any fields not in _fieldSet, since we are in include mode. - * 3. Add fields from _computedMetaProjFields. - * If we are in exclude mode: - * 1. Unpack all fields from the bucket. - * 2. Add fields from _computedMetaProjFields. - * 3. Remove any fields in _fieldSet, since we are in exclude mode. - */ -class BucketSpec { -public: - // When unpacking buckets with kInclude we must produce measurements that contain the - // set of fields. Otherwise, if the kExclude option is used, the measurements will include the - // set difference between all fields in the bucket and the provided fields. - enum class Behavior { kInclude, kExclude }; - - BucketSpec() = default; - BucketSpec(const std::string& timeField, - const boost::optional& metaField, - const std::set& fields = {}, - Behavior behavior = Behavior::kExclude, - const std::set& computedProjections = {}, - bool usesExtendedRange = false); - BucketSpec(const BucketSpec&); - BucketSpec(BucketSpec&&); - BucketSpec(const TimeseriesOptions& tsOptions); - - BucketSpec& operator=(const BucketSpec&); - - // The user-supplied timestamp field name specified during time-series collection creation. - void setTimeField(std::string&& field); - const std::string& timeField() const; - HashedFieldName timeFieldHashed() const; - - // An optional user-supplied metadata field name specified during time-series collection - // creation. This field name is used during materialization of metadata fields of a measurement - // after unpacking. - void setMetaField(boost::optional&& field); - const boost::optional& metaField() const; - boost::optional metaFieldHashed() const; - - void setFieldSet(std::set& fieldSet) { - _fieldSet = std::move(fieldSet); - } - - void addIncludeExcludeField(const StringData& field) { - _fieldSet.emplace(field); - } - - void removeIncludeExcludeField(const std::string& field) { - _fieldSet.erase(field); - } - - const std::set& fieldSet() const { - return _fieldSet; - } - - void setBehavior(Behavior behavior) { - _behavior = behavior; - } - - Behavior behavior() const { - return _behavior; - } - - void addComputedMetaProjFields(const StringData& field) { - _computedMetaProjFields.emplace(field); - } - - const std::set& computedMetaProjFields() const { - return _computedMetaProjFields; - } - - void eraseFromComputedMetaProjFields(const std::string& field) { - _computedMetaProjFields.erase(field); - } - - void setUsesExtendedRange(bool usesExtendedRange) { - _usesExtendedRange = usesExtendedRange; - } - - bool usesExtendedRange() const { - return _usesExtendedRange; - } - - // Returns whether 'field' depends on a pushed down $addFields or computed $project. - bool fieldIsComputed(StringData field) const; - - // Says what to do when an event-level predicate cannot be mapped to a bucket-level predicate. - enum class IneligiblePredicatePolicy { - // When optimizing a query, it's fine if some predicates can't be pushed down. We'll still - // run the predicate after unpacking, so the results will be correct. - kIgnore, - // When creating a partial index, it's misleading if we can't handle a predicate: the user - // expects every predicate in the partialFilterExpression to contribute, somehow, to making - // the index smaller. - kError, - }; - - struct BucketPredicate { - // A loose predicate is a predicate which returns true when any measures of a bucket - // matches. - std::unique_ptr loosePredicate; - - // A tight predicate is a predicate which returns true when all measures of a bucket - // matches. - std::unique_ptr tightPredicate; - }; - - /** - * Takes a predicate after $_internalUnpackBucket on a bucketed field as an argument and - * attempts to map it to new predicates on the 'control' field. There will be a 'loose' - * predicate that will match if some of the event field matches, also a 'tight' predicate that - * will match if all of the event field matches. For example, the event level predicate {a: - * {$gt: 5}} will generate the loose predicate {control.max.a: {$_internalExprGt: 5}}, and the - * tight predicate {control.min.a: {$_internalExprGt: 5}}. The loose predicate will be added - * before the - * $_internalUnpackBucket stage to filter out buckets with no match. The tight predicate will - * be used to evaluate predicate on bucket level to avoid unnecessary event level evaluation. - * - * If the original predicate is on the bucket's timeField we may also create a new loose - * predicate on the '_id' field to assist in index utilization. For example, the predicate - * {time: {$lt: new Date(...)}} will generate the following predicate: - * {$and: [ - * {_id: {$lt: ObjectId(...)}}, - * {control.min.time: {$_internalExprLt: new Date(...)}} - * ]} - * - * If the provided predicate is ineligible for this mapping, the function will return a nullptr. - * This should be interpreted as an always-true predicate. - * - * When using IneligiblePredicatePolicy::kIgnore, if the predicate can't be pushed down, it - * returns null. When using IneligiblePredicatePolicy::kError it raises a user error. - */ - static BucketPredicate createPredicatesOnBucketLevelField( - const MatchExpression* matchExpr, - const BucketSpec& bucketSpec, - int bucketMaxSpanSeconds, - ExpressionContext::CollationMatchesDefault collationMatchesDefault, - const boost::intrusive_ptr& pExpCtx, - bool haveComputedMetaField, - bool includeMetaField, - bool assumeNoMixedSchemaData, - IneligiblePredicatePolicy policy); - - /** - * Converts an event-level predicate to a bucket-level predicate, such that - * - * {$unpackBucket ...} {$match: } - * - * gives the same result as - * - * {$match: } {$unpackBucket ...} {$match: } - * - * This means the bucket-level predicate must include every bucket that might contain an event - * matching the event-level predicate. - * - * This helper is used when creating a partial index on a time-series collection: logically, - * we index only events that match the event-level partialFilterExpression, but physically we - * index any bucket that matches the bucket-level partialFilterExpression. - * - * When using IneligiblePredicatePolicy::kIgnore, if the predicate can't be pushed down, it - * returns null. When using IneligiblePredicatePolicy::kError it raises a user error. - * - * Returns a boolean (alongside the bucket-level predicate) describing if the result contains - * a metric predicate. - */ - static std::pair pushdownPredicate( - const boost::intrusive_ptr& expCtx, - const TimeseriesOptions& tsOptions, - ExpressionContext::CollationMatchesDefault collationMatchesDefault, - const BSONObj& predicate, - bool haveComputedMetaField, - bool includeMetaField, - bool assumeNoMixedSchemaData, - IneligiblePredicatePolicy policy); - - bool includeMinTimeAsMetadata = false; - bool includeMaxTimeAsMetadata = false; - -private: - // The set of field names in the data region that should be included or excluded. - std::set _fieldSet; - Behavior _behavior = Behavior::kExclude; - - // Set of computed meta field projection names. Added at the end of materialized - // measurements. - std::set _computedMetaProjFields; - - std::string _timeField; - boost::optional _timeFieldHashed; - - boost::optional _metaField = boost::none; - boost::optional _metaFieldHashed = boost::none; - bool _usesExtendedRange = false; -}; - -/** - * BucketUnpacker will unpack bucket fields for metadata and the provided fields. - */ -class BucketUnpacker { -public: - /** - * Returns the number of measurements in the bucket in O(1) time. - */ - static int computeMeasurementCount(const BSONObj& bucket, StringData timeField); - - // Set of field names reserved for time-series buckets. - static const std::set reservedBucketFieldNames; - - BucketUnpacker(); - BucketUnpacker(BucketSpec spec); - BucketUnpacker(const BucketUnpacker& other) = delete; - BucketUnpacker(BucketUnpacker&& other); - ~BucketUnpacker(); - BucketUnpacker& operator=(const BucketUnpacker& rhs) = delete; - BucketUnpacker& operator=(BucketUnpacker&& rhs); - - /** - * This method will continue to materialize Documents until the bucket is exhausted. A - * precondition of this method is that 'hasNext()' must be true. - */ - Document getNext(); - - /** - * Similar to the previous method, but return a BSON object instead. - */ - BSONObj getNextBson(); - - /** - * This method will extract the j-th measurement from the bucket. A precondition of this method - * is that j >= 0 && j <= the number of measurements within the underlying bucket. - */ - Document extractSingleMeasurement(int j); - - /** - * Returns true if there is more data to fetch, is the precondition for 'getNext'. - */ - bool hasNext() const { - return _hasNext; - } - - /** - * Makes a copy of this BucketUnpacker that is detached from current bucket. The new copy needs - * to be reset to a new bucket object to perform unpacking. - */ - BucketUnpacker copy() const { - BucketUnpacker unpackerCopy; - unpackerCopy._spec = _spec; - unpackerCopy._includeMetaField = _includeMetaField; - unpackerCopy._includeTimeField = _includeTimeField; - return unpackerCopy; - } - - /** - * This resets the unpacker to prepare to unpack a new bucket described by the given document. - */ - void reset(BSONObj&& bucket, bool bucketMatchedQuery = false); - - BucketSpec::Behavior behavior() const { - return _spec.behavior(); - } - - const BucketSpec& bucketSpec() const { - return _spec; - } - - const BSONObj& bucket() const { - return _bucket; - } - - bool bucketMatchedQuery() const { - return _bucketMatchedQuery; - } - - bool includeMetaField() const { - return _includeMetaField; - } - - bool includeTimeField() const { - return _includeTimeField; - } - - int32_t numberOfMeasurements() const { - return _numberOfMeasurements; - } - - bool includeMinTimeAsMetadata() const { - return _includeMinTimeAsMetadata; - } - - bool includeMaxTimeAsMetadata() const { - return _includeMaxTimeAsMetadata; - } - - const std::string& getTimeField() const { - return _spec.timeField(); - } - - const boost::optional& getMetaField() const { - return _spec.metaField(); - } - - std::string getMinField(StringData field) const { - return std::string{timeseries::kControlMinFieldNamePrefix} + field; - } - - std::string getMaxField(StringData field) const { - return std::string{timeseries::kControlMaxFieldNamePrefix} + field; - } - - bool isClosedBucket() const { - return _closedBucket; - } - - void setBucketSpec(BucketSpec&& bucketSpec); - void setIncludeMinTimeAsMetadata(); - void setIncludeMaxTimeAsMetadata(); - - // Add computed meta projection names to the bucket specification. - void addComputedMetaProjFields(const std::vector& computedFieldNames); - - // Fill _spec.unpackFieldsToIncludeExclude with final list of fields to include/exclude during - // unpacking. Only calculates the list the first time it is called. - const std::set& fieldsToIncludeExcludeDuringUnpack(); - - class UnpackingImpl; - -private: - // Determines if timestamp values should be included in the materialized measurements. - void determineIncludeTimeField(); - - // Removes metaField from the field set and determines whether metaField should be - // included in the materialized measurements. - void eraseMetaFromFieldSetAndDetermineIncludeMeta(); - - // Erase computed meta projection fields if they are present in the exclusion field set. - void eraseExcludedComputedMetaProjFields(); - - BucketSpec _spec; - - std::unique_ptr _unpackingImpl; - - bool _hasNext = false; - - // A flag used to mark that the entire bucket matches the following $match predicate. - bool _bucketMatchedQuery = false; - - // A flag used to mark that the timestamp value should be materialized in measurements. - bool _includeTimeField{false}; - - // A flag used to mark that a bucket's metadata value should be materialized in measurements. - bool _includeMetaField{false}; - - // A flag used to mark that a bucket's min time should be materialized as metadata. - bool _includeMinTimeAsMetadata{false}; - - // A flag used to mark that a bucket's max time should be materialized as metadata. - bool _includeMaxTimeAsMetadata{false}; - - // The bucket being unpacked. - BSONObj _bucket; - - // Since the metadata value is the same across all materialized measurements we can cache the - // metadata Value in the reset phase and use it to materialize the metadata in each - // measurement. - Value _metaValue; - - BSONElement _metaBSONElem; - - // Since the bucket min time is the same across all materialized measurements, we can cache the - // value in the reset phase and use it to materialize as a metadata field in each measurement - // if required by the pipeline. - boost::optional _minTime; - - // Since the bucket max time is the same across all materialized measurements, we can cache the - // value in the reset phase and use it to materialize as a metadata field in each measurement - // if required by the pipeline. - boost::optional _maxTime; - - // Flag indicating whether this bucket is closed, as determined by the presence of the - // 'control.closed' field. - bool _closedBucket; - - // Map for the computed meta field projections. Updated for - // every bucket upon reset(). - stdx::unordered_map _computedMetaProjections; - - // The number of measurements in the bucket. - int32_t _numberOfMeasurements = 0; - - // Final list of fields to include/exclude during unpacking. This is computed once during the - // first doGetNext call so we don't have to recalculate every time we reach a new bucket. - boost::optional> _unpackFieldsToIncludeExclude = boost::none; -}; - -/** - * Determines if an arbitrary field should be included in the materialized measurements. - */ -inline bool determineIncludeField(StringData fieldName, - BucketSpec::Behavior unpackerBehavior, - const std::set& unpackFieldsToIncludeExclude) { - const bool isInclude = unpackerBehavior == BucketSpec::Behavior::kInclude; - const bool unpackFieldsContains = unpackFieldsToIncludeExclude.find(fieldName.toString()) != - unpackFieldsToIncludeExclude.cend(); - return isInclude == unpackFieldsContains; -} -} // namespace mongo diff --git a/src/mongo/db/exec/bucket_unpacker_test.cpp b/src/mongo/db/exec/bucket_unpacker_test.cpp index ed073dc79cde3..d8b8911d630c2 100644 --- a/src/mongo/db/exec/bucket_unpacker_test.cpp +++ b/src/mongo/db/exec/bucket_unpacker_test.cpp @@ -27,15 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/bson/util/bsoncolumn.h" #include "mongo/bson/util/bsoncolumnbuilder.h" -#include "mongo/db/exec/bucket_unpacker.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/timeseries/bucket_spec.h" +#include "mongo/db/exec/timeseries/bucket_unpacker.h" #include "mongo/db/timeseries/bucket_compression.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decimal_counter.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/exec/cached_plan.cpp b/src/mongo/db/exec/cached_plan.cpp index a10a0f8ab0a40..cfa9b841b9b63 100644 --- a/src/mongo/db/exec/cached_plan.cpp +++ b/src/mongo/db/exec/cached_plan.cpp @@ -28,30 +28,41 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/cached_plan.h" - #include +#include +#include +#include -#include "mongo/db/catalog/collection.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/exec/cached_plan.h" #include "mongo/db/exec/multi_plan.h" #include "mongo/db/exec/plan_cache_util.h" -#include "mongo/db/exec/scoped_timer.h" #include "mongo/db/exec/trial_period_utils.h" -#include "mongo/db/exec/working_set_common.h" #include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/collection_query_info.h" -#include "mongo/db/query/explain.h" +#include "mongo/db/query/plan_cache.h" #include "mongo/db/query/plan_cache_key_factory.h" +#include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/plan_explainer_factory.h" #include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner.h" #include "mongo/db/query/stage_builder_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_proxy.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" -#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -62,7 +73,7 @@ namespace mongo { const char* CachedPlanStage::kStageType = "CACHED_PLAN"; CachedPlanStage::CachedPlanStage(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, WorkingSet* ws, CanonicalQuery* cq, const QueryPlannerParams& params, @@ -213,16 +224,16 @@ Status CachedPlanStage::replan(PlanYieldPolicy* yieldPolicy, bool shouldCache, s if (shouldCache) { // Deactivate the current cache entry. - const auto& coll = collection(); - auto cache = CollectionQueryInfo::get(coll).getPlanCache(); - cache->deactivate(plan_cache_key_factory::make(*_canonicalQuery, coll)); + auto cache = CollectionQueryInfo::get(collectionPtr()).getPlanCache(); + cache->deactivate( + plan_cache_key_factory::make(*_canonicalQuery, collectionPtr())); } // Use the query planning module to plan the whole query. auto statusWithMultiPlanSolns = QueryPlanner::plan(*_canonicalQuery, _plannerParams); if (!statusWithMultiPlanSolns.isOK()) { return statusWithMultiPlanSolns.getStatus().withContext( - str::stream() << "error processing query: " << _canonicalQuery->toString() + str::stream() << "error processing query: " << _canonicalQuery->toStringForErrorMsg() << " planner returned error"); } auto solutions = std::move(statusWithMultiPlanSolns.getValue()); diff --git a/src/mongo/db/exec/cached_plan.h b/src/mongo/db/exec/cached_plan.h index 35472f453cb5a..cb45329457f60 100644 --- a/src/mongo/db/exec/cached_plan.h +++ b/src/mongo/db/exec/cached_plan.h @@ -29,15 +29,24 @@ #pragma once +#include #include #include +#include +#include "mongo/base/status.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_all_indices_stage.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/jsobj.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" namespace mongo { @@ -57,7 +66,7 @@ class PlanYieldPolicy; class CachedPlanStage final : public RequiresAllIndicesStage { public: CachedPlanStage(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, WorkingSet* ws, CanonicalQuery* cq, const QueryPlannerParams& params, diff --git a/src/mongo/db/exec/collection_scan.cpp b/src/mongo/db/exec/collection_scan.cpp index 0a3eceafa02c1..7aab4653dbed0 100644 --- a/src/mongo/db/exec/collection_scan.cpp +++ b/src/mongo/db/exec/collection_scan.cpp @@ -27,25 +27,44 @@ * it in the license file. */ -#include "mongo/util/assert_util.h" - -#include "mongo/db/exec/collection_scan.h" - +#include +#include +#include +#include #include +#include +#include + +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database.h" +#include "mongo/db/client.h" +#include "mongo/db/exec/collection_scan.h" #include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/exec/filter.h" -#include "mongo/db/exec/scoped_timer.h" #include "mongo/db/exec/working_set.h" -#include "mongo/db/exec/working_set_common.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/plan_executor_impl.h" -#include "mongo/db/record_id_helpers.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" -#include "mongo/util/fail_point.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -56,15 +75,17 @@ using std::unique_ptr; using std::vector; namespace { -const char* getStageName(const CollectionPtr& coll, const CollectionScanParams& params) { - return (!coll->ns().isOplog() && (params.minRecord || params.maxRecord)) ? "CLUSTERED_IXSCAN" - : "COLLSCAN"; +const char* getStageName(const VariantCollectionPtrOrAcquisition& coll, + const CollectionScanParams& params) { + return (!coll.getCollectionPtr()->ns().isOplog() && (params.minRecord || params.maxRecord)) + ? "CLUSTERED_IXSCAN" + : "COLLSCAN"; } } // namespace CollectionScan::CollectionScan(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const CollectionScanParams& params, WorkingSet* workingSet, const MatchExpression* filter) @@ -72,6 +93,7 @@ CollectionScan::CollectionScan(ExpressionContext* expCtx, _workingSet(workingSet), _filter((filter && !filter->isTriviallyTrue()) ? filter : nullptr), _params(params) { + const auto& collPtr = collection.getCollectionPtr(); // Explain reports the direction of the collection scan. _specificStats.direction = params.direction; _specificStats.minRecord = params.minRecord; @@ -81,10 +103,10 @@ CollectionScan::CollectionScan(ExpressionContext* expCtx, // The 'minRecord' and 'maxRecord' parameters are used for a special optimization that // applies only to forwards scans of the oplog and scans on clustered collections. invariant(!params.resumeAfterRecordId); - if (collection->ns().isOplogOrChangeCollection()) { + if (collPtr->ns().isOplogOrChangeCollection()) { invariant(params.direction == CollectionScanParams::FORWARD); } else { - invariant(collection->isClustered()); + invariant(collPtr->isClustered()); } } @@ -94,7 +116,7 @@ CollectionScan::CollectionScan(ExpressionContext* expCtx, tassert(6125000, "Only collection scans on clustered collections may specify recordId " "BoundInclusion policies", - collection->isClustered()); + collPtr->isClustered()); if (filter) { // The filter is applied after the ScanBoundInclusion is considered. @@ -112,8 +134,7 @@ CollectionScan::CollectionScan(ExpressionContext* expCtx, "max"_attr = (!_params.maxRecord) ? "none" : _params.maxRecord->toString()); tassert(6521000, "Expected an oplog or a change collection with 'shouldTrackLatestOplogTimestamp'", - !_params.shouldTrackLatestOplogTimestamp || - collection->ns().isOplogOrChangeCollection()); + !_params.shouldTrackLatestOplogTimestamp || collPtr->ns().isOplogOrChangeCollection()); if (params.assertTsHasNotFallenOff) { tassert(6521001, @@ -139,18 +160,19 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) { return PlanStage::IS_EOF; } - if (_params.lowPriority && !_priority && opCtx()->getClient()->isFromUserConnection() && + if (_params.lowPriority && !_priority && gDeprioritizeUnboundedUserCollectionScans.load() && + opCtx()->getClient()->isFromUserConnection() && opCtx()->lockState()->shouldWaitForTicket()) { _priority.emplace(opCtx()->lockState(), AdmissionContext::Priority::kLow); } boost::optional record; const bool needToMakeCursor = !_cursor; + const auto& collPtr = collectionPtr(); const auto ret = handlePlanStageYield( expCtx(), "CollectionScan", - collection()->ns().ns(), [&] { if (needToMakeCursor) { const bool forward = _params.direction == CollectionScanParams::FORWARD; @@ -167,14 +189,13 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) { // sure that we are using a fresh storage engine snapshot while waiting. // Otherwise, we will end up reading from the snapshot where the oplog entries // are not yet visible even after the wait. - invariant(!_params.tailable && collection()->ns().isOplog()); + invariant(!_params.tailable && collPtr->ns().isOplog()); opCtx()->recoveryUnit()->abandonSnapshot(); - collection()->getRecordStore()->waitForAllEarlierOplogWritesToBeVisible( - opCtx()); + collPtr->getRecordStore()->waitForAllEarlierOplogWritesToBeVisible(opCtx()); } - _cursor = collection()->getCursor(opCtx(), forward); + _cursor = collPtr->getCursor(opCtx(), forward); if (!_lastSeenId.isNull()) { invariant(_params.tailable); @@ -207,9 +228,9 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) { uasserted(ErrorCodes::KeyNotFound, str::stream() << "Failed to resume collection scan: the recordId from " - "which we are " - << "attempting to resume no longer exists in the collection. " - << "recordId: " << recordIdToSeek); + "which we are attempting to resume no longer exists in " + "the collection: " + << recordIdToSeek); } } } @@ -255,7 +276,7 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) { // For change collections, advance '_latestOplogEntryTimestamp' to the current snapshot // timestamp, i.e. the latest available timestamp in the global oplog. - if (_params.shouldTrackLatestOplogTimestamp && collection()->ns().isChangeCollection()) { + if (_params.shouldTrackLatestOplogTimestamp && collPtr->ns().isChangeCollection()) { setLatestOplogEntryTimestampToReadTimestamp(); } _priority.reset(); @@ -280,6 +301,13 @@ PlanStage::StageState CollectionScan::doWork(WorkingSetID* out) { } void CollectionScan::setLatestOplogEntryTimestampToReadTimestamp() { + // Since this method is only ever called when iterating a change collection, the following check + // effectively disables optime advancement in Serverless, for reasons outlined in SERVER-76288. + // TODO SERVER-76309: re-enable optime advancement to support sharding in Serverless. + if (collectionPtr()->ns().isChangeCollection()) { + return; + } + const auto readTimestamp = opCtx()->recoveryUnit()->getPointInTimeReadTimestamp(opCtx()); // If we don't have a read timestamp, we take no action here. @@ -473,7 +501,8 @@ void CollectionScan::doDetachFromOperationContext() { } void CollectionScan::doReattachToOperationContext() { - if (_params.lowPriority && opCtx()->getClient()->isFromUserConnection() && + if (_params.lowPriority && gDeprioritizeUnboundedUserCollectionScans.load() && + opCtx()->getClient()->isFromUserConnection() && opCtx()->lockState()->shouldWaitForTicket()) { _priority.emplace(opCtx()->lockState(), AdmissionContext::Priority::kLow); } diff --git a/src/mongo/db/exec/collection_scan.h b/src/mongo/db/exec/collection_scan.h index 8a465b9cdc045..b758cbdc1f56a 100644 --- a/src/mongo/db/exec/collection_scan.h +++ b/src/mongo/db/exec/collection_scan.h @@ -29,12 +29,25 @@ #pragma once +#include #include +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/exec/collection_scan_common.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_collection_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" +#include "mongo/db/storage/record_store.h" #include "mongo/s/resharding/resume_token_gen.h" namespace mongo { @@ -53,7 +66,7 @@ class OperationContext; class CollectionScan final : public RequiresCollectionStage { public: CollectionScan(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const CollectionScanParams& params, WorkingSet* workingSet, const MatchExpression* filter); diff --git a/src/mongo/db/exec/count.cpp b/src/mongo/db/exec/count.cpp index 832db7435cbd9..a19c3563c19e5 100644 --- a/src/mongo/db/exec/count.cpp +++ b/src/mongo/db/exec/count.cpp @@ -27,15 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/count.h" - #include +#include + +#include #include "mongo/db/catalog/collection.h" -#include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/exec/working_set_common.h" +#include "mongo/db/exec/count.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/exec/count.h b/src/mongo/db/exec/count.h index b8de89a959409..01e28a232d21d 100644 --- a/src/mongo/db/exec/count.h +++ b/src/mongo/db/exec/count.h @@ -29,7 +29,13 @@ #pragma once +#include + #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" namespace mongo { diff --git a/src/mongo/db/exec/count_scan.cpp b/src/mongo/db/exec/count_scan.cpp index 20f1330fe0714..ae8337b4651c7 100644 --- a/src/mongo/db/exec/count_scan.cpp +++ b/src/mongo/db/exec/count_scan.cpp @@ -29,12 +29,23 @@ #include "mongo/db/exec/count_scan.h" +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include #include +#include -#include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/exec/scoped_timer.h" +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/ordering.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index_names.h" #include "mongo/db/query/plan_executor_impl.h" +#include "mongo/db/storage/index_entry_comparison.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -80,7 +91,7 @@ const char* CountScan::kStageType = "COUNT_SCAN"; // the CountScanParams rather than resolving them via the IndexDescriptor, since these may differ // from the descriptor's contents. CountScan::CountScan(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, CountScanParams params, WorkingSet* workingSet) : RequiresIndexStage(kStageType, expCtx, collection, params.indexDescriptor, workingSet), @@ -119,11 +130,7 @@ PlanStage::StageState CountScan::doWork(WorkingSetID* out) { const auto ret = handlePlanStageYield( expCtx(), "CountScan", - collection()->ns().ns(), [&] { - // We don't care about the keys. - const auto kWantLoc = SortedDataInterface::Cursor::kWantLoc; - if (needInit) { // First call to work(). Perform cursor init. _cursor = indexAccessMethod()->newCursor(opCtx()); @@ -137,7 +144,7 @@ PlanStage::StageState CountScan::doWork(WorkingSetID* out) { _startKeyInclusive); entry = _cursor->seek(keyStringForSeek); } else { - entry = _cursor->next(kWantLoc); + entry = _cursor->next(SortedDataInterface::Cursor::KeyInclusion::kExclude); } return PlanStage::ADVANCED; }, diff --git a/src/mongo/db/exec/count_scan.h b/src/mongo/db/exec/count_scan.h index b5d6a4fe4a9e5..6995160b35fc3 100644 --- a/src/mongo/db/exec/count_scan.h +++ b/src/mongo/db/exec/count_scan.h @@ -29,13 +29,29 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_index_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" #include "mongo/db/storage/sorted_data_interface.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util_core.h" namespace mongo { @@ -90,7 +106,7 @@ struct CountScanParams { class CountScan final : public RequiresIndexStage { public: CountScan(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, CountScanParams params, WorkingSet* workingSet); diff --git a/src/mongo/db/exec/delete_stage.cpp b/src/mongo/db/exec/delete_stage.cpp index 7f29f46a89e0b..f69532b814faf 100644 --- a/src/mongo/db/exec/delete_stage.cpp +++ b/src/mongo/db/exec/delete_stage.cpp @@ -28,25 +28,38 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/exec/delete_stage.h" +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/curop.h" -#include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/exec/working_set_common.h" +#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/exec/delete_stage.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/write_stage_common.h" -#include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/query/canonical_query.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_impl.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/s/collection_sharding_state.h" -#include "mongo/db/service_context.h" -#include "mongo/logv2/log.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite @@ -75,7 +88,7 @@ bool shouldRestartDeleteIfNoLongerMatches(const DeleteStageParams* params) { DeleteStage::DeleteStage(ExpressionContext* expCtx, std::unique_ptr params, WorkingSet* ws, - const CollectionPtr& collection, + CollectionAcquisition collection, PlanStage* child) : DeleteStage(kStageType.rawData(), expCtx, std::move(params), ws, collection, child) {} @@ -83,12 +96,12 @@ DeleteStage::DeleteStage(const char* stageType, ExpressionContext* expCtx, std::unique_ptr params, WorkingSet* ws, - const CollectionPtr& collection, + CollectionAcquisition collection, PlanStage* child) - : RequiresMutableCollectionStage(stageType, expCtx, collection), + : RequiresWritableCollectionStage(stageType, expCtx, collection), _params(std::move(params)), _ws(ws), - _preWriteFilter(opCtx(), collection->ns()), + _preWriteFilter(opCtx(), collection.nss()), _idRetrying(WorkingSet::INVALID_ID), _idReturning(WorkingSet::INVALID_ID) { _children.emplace_back(child); @@ -169,10 +182,9 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) { const auto ret = handlePlanStageYield( expCtx(), "DeleteStage ensureStillMatches", - collection()->ns().ns(), [&] { docStillMatches = write_stage_common::ensureStillMatches( - collection(), opCtx(), _ws, id, _params->canonicalQuery); + collectionPtr(), opCtx(), _ws, id, _params->canonicalQuery); return PlanStage::NEED_TIME; }, [&] { @@ -201,7 +213,7 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) { auto [immediateReturnStageState, fromMigrate] = _preWriteFilter.checkIfNotWritable( member->doc.value(), "delete"_sd, - collection()->ns(), + collectionPtr()->ns(), [&](const ExceptionFor& ex) { planExecutorShardingCriticalSectionFuture(opCtx()) = ex->getCriticalSectionSignal(); memberFreer.dismiss(); // Keep this member around so we can retry deleting it. @@ -225,14 +237,9 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) { Snapshotted memberDoc = member->doc; BSONObj bsonObjDoc = memberDoc.value().toBson(); - if (_params->removeSaver) { - uassertStatusOK(_params->removeSaver->goingToDelete(bsonObjDoc)); - } - handlePlanStageYield( expCtx(), "DeleteStage saveState", - collection()->ns().ns(), [&] { child()->saveState(); return PlanStage::NEED_TIME /* unused */; @@ -248,12 +255,11 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) { const auto ret = handlePlanStageYield( expCtx(), "DeleteStage deleteDocument", - collection()->ns().ns(), [&] { WriteUnitOfWork wunit(opCtx()); collection_internal::deleteDocument( opCtx(), - collection(), + collectionPtr(), Snapshotted(memberDoc.snapshotId(), bsonObjDoc), _params->stmtId, recordId, @@ -301,35 +307,42 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) { member->transitionToOwnedObj(); } - // As restoreState may restore (recreate) cursors, cursors are tied to the transaction in - // which they are created, and a WriteUnitOfWork is a transaction, make sure to restore the - // state outside of the WriteUnitOfWork. - const auto restoreStateRet = handlePlanStageYield( - expCtx(), - "DeleteStage restoreState", - collection()->ns().ns(), - [&] { - child()->restoreState(&collection()); - return PlanStage::NEED_TIME; - }, - [&] { - // yieldHandler - // Note we don't need to retry anything in this case since the - // delete already was committed. However, we still need to return - // the deleted document (if it was requested). - if (_params->returnDeleted) { - // member->obj should refer to the deleted document. - invariant(member->getState() == WorkingSetMember::OWNED_OBJ); - - _idReturning = id; - // Keep this member around so that we can return it on - // the next work() call. - memberFreer.dismiss(); - } - *out = WorkingSet::INVALID_ID; - }); - if (restoreStateRet != PlanStage::NEED_TIME) { - return ret; + // As restoreState may restore (recreate) cursors, cursors are tied to the transaction in which + // they are created, and a WriteUnitOfWork is a transaction, make sure to restore the state + // outside of the WriteUnitOfWork. + // + // If this stage is already exhausted it won't use its children stages anymore and therefore + // there's no need to restore them. Avoid restoring them so that there's no possibility of + // requiring yielding at this point. Restoring from yield could fail due to a sharding placement + // change. Throwing a StaleConfig error is undesirable after a "delete one" operation has + // already performed a write because the router would retry. + if (!isEOF()) { + const auto restoreStateRet = handlePlanStageYield( + expCtx(), + "DeleteStage restoreState", + [&] { + child()->restoreState(&collectionPtr()); + return PlanStage::NEED_TIME; + }, + [&] { + // yieldHandler + // Note we don't need to retry anything in this case since the delete already was + // committed. However, we still need to return the deleted document (if it was + // requested). + if (_params->returnDeleted) { + // member->obj should refer to the deleted document. + invariant(member->getState() == WorkingSetMember::OWNED_OBJ); + + _idReturning = id; + // Keep this member around so that we can return it on + // the next work() call. + memberFreer.dismiss(); + } + *out = WorkingSet::INVALID_ID; + }); + if (restoreStateRet != PlanStage::NEED_TIME) { + return ret; + } } if (_params->returnDeleted) { @@ -341,16 +354,25 @@ PlanStage::StageState DeleteStage::doWork(WorkingSetID* out) { return PlanStage::ADVANCED; } - return PlanStage::NEED_TIME; + return isEOF() ? PlanStage::IS_EOF : PlanStage::NEED_TIME; } void DeleteStage::doRestoreStateRequiresCollection() { - const NamespaceString& ns = collection()->ns(); + const NamespaceString& ns = collectionPtr()->ns(); uassert(ErrorCodes::PrimarySteppedDown, - str::stream() << "Demoted from primary while removing from " << ns.ns(), + str::stream() << "Demoted from primary while removing from " + << ns.toStringForErrorMsg(), !opCtx()->writesAreReplicated() || repl::ReplicationCoordinator::get(opCtx())->canAcceptWritesFor(opCtx(), ns)); + // Single deletes never yield after having already deleted one document. Otherwise restore could + // fail (e.g. due to a sharding placement change) and we'd fail to report in the response the + // already deleted documents. + const bool singleDeleteAndAlreadyDeleted = !_params->isMulti && _specificStats.docsDeleted > 0; + tassert(7711600, + "Single delete should never restore after having already deleted one document.", + !singleDeleteAndAlreadyDeleted || _params->isExplain); + _preWriteFilter.restoreState(); } diff --git a/src/mongo/db/exec/delete_stage.h b/src/mongo/db/exec/delete_stage.h index bf9b1e9cc5213..d70c8153c5c55 100644 --- a/src/mongo/db/exec/delete_stage.h +++ b/src/mongo/db/exec/delete_stage.h @@ -29,11 +29,24 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_collection_stage.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/exec/write_stage_common.h" #include "mongo/db/jsobj.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/profile_filter.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/session/logical_session_id.h" -#include "mongo/db/storage/remove_saver.h" +#include "mongo/db/shard_role.h" namespace mongo { @@ -79,15 +92,6 @@ struct DeleteStageParams { // Optional. When not null, delete metrics are recorded here. OpDebug* opDebug; - // Optional. When not null, send document about to be deleted to removeSaver. - // RemoveSaver is called before actual deletes are executed. - // Note: the differentiating factor between this and returnDeleted is that the caller will get - // the deleted document after it was already deleted. That means that if the caller would have - // to use the removeSaver at that point, they miss the document if the process dies before it - // reaches the removeSaver. However, this is still best effort since the RemoveSaver - // operates on a different persistence system from the the database storage engine. - std::unique_ptr removeSaver; - // Determines how the delete stats should be incremented. Will be incremented by 1 if the // function is empty. DocumentCounter numStatsForDoc; @@ -101,7 +105,7 @@ struct DeleteStageParams { * Callers of work() must be holding a write lock (and, for replicated deletes, callers must have * had the replication coordinator approve the write). */ -class DeleteStage : public RequiresMutableCollectionStage { +class DeleteStage : public RequiresWritableCollectionStage { DeleteStage(const DeleteStage&) = delete; DeleteStage& operator=(const DeleteStage&) = delete; @@ -111,14 +115,14 @@ class DeleteStage : public RequiresMutableCollectionStage { DeleteStage(ExpressionContext* expCtx, std::unique_ptr params, WorkingSet* ws, - const CollectionPtr& collection, + CollectionAcquisition collection, PlanStage* child); DeleteStage(const char* stageType, ExpressionContext* expCtx, std::unique_ptr params, WorkingSet* ws, - const CollectionPtr& collection, + CollectionAcquisition collection, PlanStage* child); bool isEOF(); diff --git a/src/mongo/db/exec/distinct_scan.cpp b/src/mongo/db/exec/distinct_scan.cpp index 310fc417139b0..a49e1d69dd3e6 100644 --- a/src/mongo/db/exec/distinct_scan.cpp +++ b/src/mongo/db/exec/distinct_scan.cpp @@ -30,13 +30,18 @@ #include "mongo/db/exec/distinct_scan.h" #include +#include + +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include -#include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/exec/filter.h" -#include "mongo/db/exec/scoped_timer.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/query/plan_executor_impl.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -48,7 +53,7 @@ using std::vector; const char* DistinctScan::kStageType = "DISTINCT_SCAN"; DistinctScan::DistinctScan(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, DistinctParams params, WorkingSet* workingSet) : RequiresIndexStage(kStageType, expCtx, collection, params.indexDescriptor, workingSet), @@ -84,7 +89,6 @@ PlanStage::StageState DistinctScan::doWork(WorkingSetID* out) { const auto ret = handlePlanStageYield( expCtx(), "DistinctScan", - collection()->ns().ns(), [&] { if (!_cursor) _cursor = indexAccessMethod()->newCursor(opCtx(), _scanDirection == 1); diff --git a/src/mongo/db/exec/distinct_scan.h b/src/mongo/db/exec/distinct_scan.h index 81b8dbac3cc5a..7d2685657adc9 100644 --- a/src/mongo/db/exec/distinct_scan.h +++ b/src/mongo/db/exec/distinct_scan.h @@ -30,11 +30,31 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_index_stage.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/util/assert_util_core.h" namespace mongo { @@ -98,7 +118,7 @@ struct DistinctParams { class DistinctScan final : public RequiresIndexStage { public: DistinctScan(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, DistinctParams params, WorkingSet* workingSet); diff --git a/src/mongo/db/exec/document_value/SConscript b/src/mongo/db/exec/document_value/SConscript index 9d0947da7495c..5899cb089fc02 100644 --- a/src/mongo/db/exec/document_value/SConscript +++ b/src/mongo/db/exec/document_value/SConscript @@ -15,7 +15,6 @@ env.Library( '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/db/pipeline/field_path', '$BUILD_DIR/mongo/db/query/datetime/date_time_support', - '$BUILD_DIR/mongo/util/intrusive_counter', ], ) diff --git a/src/mongo/db/exec/document_value/document.cpp b/src/mongo/db/exec/document_value/document.cpp index b71b109aa65c7..686f66058c55a 100644 --- a/src/mongo/db/exec/document_value/document.cpp +++ b/src/mongo/db/exec/document_value/document.cpp @@ -29,12 +29,23 @@ #include "mongo/db/exec/document_value/document.h" -#include - +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/error_codes.h" #include "mongo/bson/bson_depth.h" -#include "mongo/db/jsobj.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/pipeline/field_path.h" -#include "mongo/db/pipeline/resume_token.h" +#include "mongo/stdx/variant.h" #include "mongo/util/str.h" namespace mongo { @@ -79,7 +90,7 @@ getNestedFieldHelperBSON(BSONElement elt, const FieldPath& fp, size_t level) { } } // namespace -const DocumentStorage DocumentStorage::kEmptyDoc; +const DocumentStorage DocumentStorage::kEmptyDoc{ConstructorTag::InitApproximateSize}; const StringDataSet Document::allMetadataFieldNames{Document::metaFieldTextScore, Document::metaFieldRandVal, @@ -410,6 +421,7 @@ void DocumentStorage::reset(const BSONObj& bson, bool bsonHasMetadata) { // Clean metadata. _metadataFields = DocumentMetadataFields{}; + _metadataFields.setModified(false); } void DocumentStorage::fillCache() const { @@ -477,7 +489,7 @@ void DocumentStorage::loadLazyMetadata() const { Document::Document(const BSONObj& bson) { MutableDocument md; - md.newStorageWithBson(bson, false); + md.reset(bson, false); *this = md.freeze(); } @@ -571,7 +583,7 @@ void Document::toBsonWithMetaData(BSONObjBuilder* builder) const { Document Document::fromBsonWithMetaData(const BSONObj& bson) { MutableDocument md; - md.newStorageWithBson(bson, true); + md.reset(bson, true); return md.freeze(); } diff --git a/src/mongo/db/exec/document_value/document.h b/src/mongo/db/exec/document_value/document.h index 83db6052aeb99..abeb1e5a2935d 100644 --- a/src/mongo/db/exec/document_value/document.h +++ b/src/mongo/db/exec/document_value/document.h @@ -29,23 +29,46 @@ #pragma once -#include "mongo/db/exec/document_value/document_internal.h" - #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "mongo/base/string_data.h" #include "mongo/base/string_data_comparator_interface.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/builder.h" +#include "mongo/db/exec/document_value/document_internal.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_internal.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/string_map.h" namespace mongo { class BSONObj; + class FieldIterator; class FieldPath; class Value; class MutableDocument; - /** An internal class that represents the position of a field in a document. * * This is a low-level class that you usually don't need to worry about. @@ -742,16 +765,6 @@ class MutableDocument { storage().makeOwned(); } - /** - * Creates a new document storage with the BSON object. Setting 'bsonHasMetadata' to true - * signals that the BSON object contains metadata fields (the complete list is in - * Document::allMetadataFieldNames). - */ - DocumentStorage& newStorageWithBson(const BSONObj& bson, bool bsonHasMetadata) { - reset(make_intrusive(bson, bsonHasMetadata, false, 0)); - return const_cast(*storagePtr()); - } - private: friend class MutableValue; // for access to next constructor explicit MutableDocument(MutableValue mv) : _storageHolder(nullptr), _storage(mv.getDocPtr()) {} @@ -817,7 +830,7 @@ class FieldIterator { /// Get next item and advance iterator Document::FieldPair next() { - verify(more()); + MONGO_verify(more()); Document::FieldPair fp(_it->nameSD(), _it->val); _it.advance(); diff --git a/src/mongo/db/exec/document_value/document_bm.cpp b/src/mongo/db/exec/document_value/document_bm.cpp index 480f0b139208d..6f3e028063e93 100644 --- a/src/mongo/db/exec/document_value/document_bm.cpp +++ b/src/mongo/db/exec/document_value/document_bm.cpp @@ -28,11 +28,19 @@ */ #include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_internal.h" +#include "mongo/util/assert_util_core.h" namespace mongo { @@ -90,4 +98,18 @@ void BM_documentToBson(benchmark::State& state) { BENCHMARK(BM_documentToBson)->DenseRange(2'000, 10'000, 2'000)->Unit(benchmark::kMicrosecond); +void BM_FieldNameHasher(benchmark::State& state) { + std::string field; + for (auto i = 0; i < state.range(0); i++) { + field.append("a"); + } + + for (auto _ : state) { + benchmark::DoNotOptimize(FieldNameHasher{}(field)); + } +} + +BENCHMARK(BM_FieldNameHasher)->RangeMultiplier(2)->Range(1, 1 << 8); + + } // namespace mongo diff --git a/src/mongo/db/exec/document_value/document_comparator.cpp b/src/mongo/db/exec/document_value/document_comparator.cpp index 4f949e4933efa..cb75b2cb981ec 100644 --- a/src/mongo/db/exec/document_value/document_comparator.cpp +++ b/src/mongo/db/exec/document_value/document_comparator.cpp @@ -27,10 +27,7 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/exec/document_value/document_comparator.h" - #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/exec/document_value/document_comparator.h b/src/mongo/db/exec/document_value/document_comparator.h index b666e7de7111d..23a8638a9ce95 100644 --- a/src/mongo/db/exec/document_value/document_comparator.h +++ b/src/mongo/db/exec/document_value/document_comparator.h @@ -29,10 +29,16 @@ #pragma once +#include +#include +#include +#include + #include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/stdx/unordered_map.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/exec/document_value/document_comparator_test.cpp b/src/mongo/db/exec/document_value/document_comparator_test.cpp index c3145f7bbcc07..d16e0355c0320 100644 --- a/src/mongo/db/exec/document_value/document_comparator_test.cpp +++ b/src/mongo/db/exec/document_value/document_comparator_test.cpp @@ -27,15 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/exec/document_value/document_comparator.h" +#include #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/db/exec/document_value/document_comparator.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/exec/document_value/document_internal.h b/src/mongo/db/exec/document_value/document_internal.h index b380247baf623..818dff7d6b04a 100644 --- a/src/mongo/db/exec/document_value/document_internal.h +++ b/src/mongo/db/exec/document_value/document_internal.h @@ -29,7 +29,6 @@ #pragma once -#include #include #include @@ -329,10 +328,8 @@ struct FieldNameHasher { using is_transparent = void; std::size_t operator()(StringData sd) const { - // TODO consider FNV-1a once we have a better benchmark corpus - unsigned out; - MurmurHash3_x86_32(sd.rawData(), sd.size(), 0, &out); - return out; + // Use the default absl string hasher. + return absl::Hash{}(absl::string_view(sd.rawData(), sd.size())); } std::size_t operator()(const std::string& s) const { @@ -416,7 +413,7 @@ class DocumentStorage : public RefCountable { // Document uses these const ValueElement& getField(Position pos) const { - verify(pos.found()); + MONGO_verify(pos.found()); return *(_firstElement->plusBytes(pos.index)); } @@ -437,7 +434,7 @@ class DocumentStorage : public RefCountable { // MutableDocument uses these ValueElement& getField(Position pos) { _modified = true; - verify(pos.found()); + MONGO_verify(pos.found()); return *(_firstElement->plusBytes(pos.index)); } @@ -631,6 +628,16 @@ class DocumentStorage : public RefCountable { } private: + enum class ConstructorTag { InitApproximateSize = 0 }; + DocumentStorage(ConstructorTag tag) : DocumentStorage() { + switch (tag) { + case ConstructorTag::InitApproximateSize: + snapshottedApproximateSize(); + return; + } + MONGO_UNREACHABLE; + } + /// Returns the position of the named field in the cache or Position() template Position findFieldInCache(T name) const; diff --git a/src/mongo/db/exec/document_value/document_metadata_fields.cpp b/src/mongo/db/exec/document_value/document_metadata_fields.cpp index d33eee6c1406d..01bdc06d530a2 100644 --- a/src/mongo/db/exec/document_value/document_metadata_fields.cpp +++ b/src/mongo/db/exec/document_value/document_metadata_fields.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include +#include "mongo/base/data_type_endian.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" namespace mongo { @@ -45,8 +49,10 @@ DocumentMetadataFields::DocumentMetadataFields(const DocumentMetadataFields& oth : _holder(other._holder ? std::make_unique(*other._holder) : nullptr) {} DocumentMetadataFields& DocumentMetadataFields::operator=(const DocumentMetadataFields& other) { - _holder = other._holder ? std::make_unique(*other._holder) : nullptr; - _modified = true; + if (this != &other) { + _holder = other._holder ? std::make_unique(*other._holder) : nullptr; + _modified = true; + } return *this; } diff --git a/src/mongo/db/exec/document_value/document_metadata_fields.h b/src/mongo/db/exec/document_value/document_metadata_fields.h index 6ca3ad3198a41..c3adf2653d660 100644 --- a/src/mongo/db/exec/document_value/document_metadata_fields.h +++ b/src/mongo/db/exec/document_value/document_metadata_fields.h @@ -30,10 +30,21 @@ #pragma once #include +#include +#include +#include +#include + +#include #include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/record_id.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" +#include "mongo/util/time_support.h" namespace mongo { /** diff --git a/src/mongo/db/exec/document_value/document_metadata_fields_test.cpp b/src/mongo/db/exec/document_value/document_metadata_fields_test.cpp index 4a76154f1f67d..490aee2180758 100644 --- a/src/mongo/db/exec/document_value/document_metadata_fields_test.cpp +++ b/src/mongo/db/exec/document_value/document_metadata_fields_test.cpp @@ -27,15 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/bson/bsonobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/exec/document_value/document_value_test.cpp b/src/mongo/db/exec/document_value/document_value_test.cpp index 6dd68199a4c2d..8f98884d371cd 100644 --- a/src/mongo/db/exec/document_value/document_value_test.cpp +++ b/src/mongo/db/exec/document_value/document_value_test.cpp @@ -27,17 +27,48 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_depth.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_comparator.h" +#include "mongo/db/exec/document_value/document_internal.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/document_value/value_comparator.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/decimal128.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/db/exec/document_value/document_value_test_util.cpp b/src/mongo/db/exec/document_value/document_value_test_util.cpp index 3468756fb452a..af3084410293f 100644 --- a/src/mongo/db/exec/document_value/document_value_test_util.cpp +++ b/src/mongo/db/exec/document_value/document_value_test_util.cpp @@ -27,9 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/db/exec/document_value/document_comparator.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/unittest/assert.h" namespace mongo { namespace unittest { diff --git a/src/mongo/db/exec/document_value/document_value_test_util.h b/src/mongo/db/exec/document_value/document_value_test_util.h index 8b3e7d99e126b..490429b6336e0 100644 --- a/src/mongo/db/exec/document_value/document_value_test_util.h +++ b/src/mongo/db/exec/document_value/document_value_test_util.h @@ -29,8 +29,15 @@ #pragma once +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_comparator.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/inline_auto_update.h" #include "mongo/unittest/unittest.h" diff --git a/src/mongo/db/exec/document_value/document_value_test_util_self_test.cpp b/src/mongo/db/exec/document_value/document_value_test_util_self_test.cpp index b7843120263f0..42d3e8e17aec3 100644 --- a/src/mongo/db/exec/document_value/document_value_test_util_self_test.cpp +++ b/src/mongo/db/exec/document_value/document_value_test_util_self_test.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" - -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/exec/document_value/value.cpp b/src/mongo/db/exec/document_value/value.cpp index ccb612032f1eb..570282088eadc 100644 --- a/src/mongo/db/exec/document_value/value.cpp +++ b/src/mongo/db/exec/document_value/value.cpp @@ -27,21 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/document_value/value.h" - -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include +#include +#include +#include +#include + +#include #include "mongo/base/compare_numbers.h" #include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" #include "mongo/base/simple_string_data_comparator.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data_comparator_interface.h" #include "mongo/bson/bson_depth.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/exec/document_value/document.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/exec/document_value/document_internal.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/query/datetime/date_time_support.h" #include "mongo/platform/decimal128.h" #include "mongo/util/hex.h" @@ -74,7 +90,7 @@ void ValueStorage::verifyRefCountingIfShould() const { case NumberLong: case NumberDouble: // the above types never reference external data - verify(!refCounter); + MONGO_verify(!refCounter); break; case String: @@ -338,12 +354,12 @@ double Value::getDouble() const { if (type == NumberDecimal) return _storage.getDecimal().toDouble(); - verify(type == NumberDouble); + MONGO_verify(type == NumberDouble); return _storage.doubleValue; } Document Value::getDocument() const { - verify(getType() == Object); + MONGO_verify(getType() == Object); return _storage.getDocument(); } @@ -421,7 +437,7 @@ BSONObjBuilder& operator<<(BSONObjBuilderValueStream& builder, const Value& val) return builder.builder(); } } - verify(false); + MONGO_verify(false); } void Value::addToBsonObj(BSONObjBuilder* builder, @@ -508,7 +524,7 @@ bool Value::coerceToBool() const { case NumberDecimal: return !_storage.getDecimal().isZero(); } - verify(false); + MONGO_verify(false); } namespace { @@ -666,7 +682,7 @@ string Value::coerceToString() const { case Date: return uassertStatusOKWithContext( - TimeZoneDatabase::utcZone().formatDate(kISOFormatString, getDate()), + TimeZoneDatabase::utcZone().formatDate(kIsoFormatStringZ, getDate()), "failed while coercing date to string"); case EOO: @@ -883,9 +899,20 @@ int Value::compare(const Value& rL, return l->scope.woCompare(r->scope); } } - verify(false); + MONGO_verify(false); } +namespace { +/** + * Hashes the given 'StringData', combines the resulting hash with 'seed', and returns the result. + */ +size_t hashStringData(StringData sd, size_t seed) { + size_t strHash = absl::Hash{}(absl::string_view(sd.rawData(), sd.size())); + boost::hash_combine(seed, strHash); + return seed; +} +} // namespace + void Value::hash_combine(size_t& seed, const StringData::ComparatorInterface* stringComparator) const { BSONType type = getType(); @@ -957,7 +984,7 @@ void Value::hash_combine(size_t& seed, case Code: case Symbol: { StringData sd = getRawData(); - MurmurHash3_x86_32(sd.rawData(), sd.size(), seed, &seed); + seed = hashStringData(sd, seed); break; } @@ -966,7 +993,7 @@ void Value::hash_combine(size_t& seed, if (stringComparator) { stringComparator->hash_combine(seed, sd); } else { - MurmurHash3_x86_32(sd.rawData(), sd.size(), seed, &seed); + seed = hashStringData(sd, seed); } break; } @@ -990,14 +1017,14 @@ void Value::hash_combine(size_t& seed, case BinData: { StringData sd = getRawData(); - MurmurHash3_x86_32(sd.rawData(), sd.size(), seed, &seed); + seed = hashStringData(sd, seed); boost::hash_combine(seed, _storage.binDataType()); break; } case RegEx: { StringData sd = getRawData(); - MurmurHash3_x86_32(sd.rawData(), sd.size(), seed, &seed); + seed = hashStringData(sd, seed); break; } @@ -1191,7 +1218,7 @@ size_t Value::getApproximateSize() const { case Undefined: return sizeof(Value); } - verify(false); + MONGO_verify(false); } string Value::toString() const { @@ -1235,7 +1262,7 @@ ostream& operator<<(ostream& out, const Value& val) { return out << "undefined"; case Date: return out << [&] { - if (auto string = TimeZoneDatabase::utcZone().formatDate(kISOFormatString, + if (auto string = TimeZoneDatabase::utcZone().formatDate(kIsoFormatStringZ, val.coerceToDate()); string.isOK()) return string.getValue(); @@ -1272,7 +1299,7 @@ ostream& operator<<(ostream& out, const Value& val) { } // Not in default case to trigger better warning if a case is missing - verify(false); + MONGO_verify(false); } void Value::fillCache() const { @@ -1453,7 +1480,7 @@ Value Value::deserializeForSorter(BufReader& buf, const SorterDeserializeSetting return Value(std::move(array)); } } - verify(false); + MONGO_verify(false); } void Value::serializeForIDL(StringData fieldName, BSONObjBuilder* builder) const { diff --git a/src/mongo/db/exec/document_value/value.h b/src/mongo/db/exec/document_value/value.h index ab2e2c029346e..8b664d9b54764 100644 --- a/src/mongo/db/exec/document_value/value.h +++ b/src/mongo/db/exec/document_value/value.h @@ -29,11 +29,34 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/data_range.h" #include "mongo/base/static_assert.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/exec/document_value/value_internal.h" -#include "mongo/util/concepts.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/safe_num.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" namespace mongo { @@ -417,31 +440,27 @@ inline void swap(mongo::Value& lhs, mongo::Value& rhs) { lhs.swap(rhs); } -MONGO_MAKE_BOOL_TRAIT(CanConstructValueFrom, - (typename T), - (T), - (T val), - // - Value(std::forward(val))); - /** * This class is identical to Value, but supports implicit creation from any of the types explicitly * supported by Value. */ class ImplicitValue : public Value { public: - TEMPLATE(typename T) - REQUIRES(CanConstructValueFrom) - ImplicitValue(T&& arg) : Value(std::forward(arg)) {} + template + requires std::is_constructible_v ImplicitValue(T&& arg) + : Value(std::forward(arg)) {} ImplicitValue(std::initializer_list values) : Value(convertToValues(values)) {} + ImplicitValue(std::vector values) : Value(convertToValues(values)) {} - ImplicitValue(std::vector values) : Value(convertToValues(values)) {} + template + ImplicitValue(std::vector values) : Value(convertToValues(values)) {} - static std::vector convertToValues(const std::vector& vec) { + template + static std::vector convertToValues(const std::vector& vec) { std::vector values; values.reserve(vec.size()); - for_each(vec.begin(), vec.end(), ([&](const int& val) { values.emplace_back(val); })); + for_each(vec.begin(), vec.end(), ([&](const T& val) { values.emplace_back(val); })); return values; } @@ -453,7 +472,7 @@ class ImplicitValue : public Value { values.reserve(vec.size()); for_each( vec.begin(), vec.end(), ([&](const ImplicitValue& val) { values.push_back(val); })); - return Value(values); + return Value(std::move(values)); } /** @@ -475,12 +494,12 @@ class ImplicitValue : public Value { namespace mongo { inline size_t Value::getArrayLength() const { - verify(getType() == Array); + MONGO_verify(getType() == Array); return getArray().size(); } inline StringData Value::getStringData() const { - verify(getType() == String); + MONGO_verify(getType() == String); return getRawData(); } @@ -489,36 +508,36 @@ inline StringData Value::getRawData() const { } inline std::string Value::getString() const { - verify(getType() == String); + MONGO_verify(getType() == String); return _storage.getString().toString(); } inline OID Value::getOid() const { - verify(getType() == jstOID); + MONGO_verify(getType() == jstOID); return OID(_storage.oid); } inline bool Value::getBool() const { - verify(getType() == Bool); + MONGO_verify(getType() == Bool); return _storage.boolValue; } inline Date_t Value::getDate() const { - verify(getType() == Date); + MONGO_verify(getType() == Date); return Date_t::fromMillisSinceEpoch(_storage.dateValue); } inline Timestamp Value::getTimestamp() const { - verify(getType() == bsonTimestamp); + MONGO_verify(getType() == bsonTimestamp); return Timestamp(_storage.timestampValue); } inline const char* Value::getRegex() const { - verify(getType() == RegEx); + MONGO_verify(getType() == RegEx); return _storage.getString().rawData(); // this is known to be NUL terminated } inline const char* Value::getRegexFlags() const { - verify(getType() == RegEx); + MONGO_verify(getType() == RegEx); const char* pattern = _storage.getString().rawData(); // this is known to be NUL terminated const char* flags = pattern + strlen(pattern) + 1; // first byte after pattern's NUL dassert(flags + strlen(flags) == pattern + _storage.getString().size()); @@ -526,16 +545,16 @@ inline const char* Value::getRegexFlags() const { } inline std::string Value::getSymbol() const { - verify(getType() == Symbol); + MONGO_verify(getType() == Symbol); return _storage.getString().toString(); } inline std::string Value::getCode() const { - verify(getType() == Code); + MONGO_verify(getType() == Code); return _storage.getString().toString(); } inline int Value::getInt() const { - verify(getType() == NumberInt); + MONGO_verify(getType() == NumberInt); return _storage.intValue; } @@ -544,18 +563,18 @@ inline long long Value::getLong() const { if (type == NumberInt) return _storage.intValue; - verify(type == NumberLong); + MONGO_verify(type == NumberLong); return _storage.longValue; } inline UUID Value::getUuid() const { - verify(_storage.binDataType() == BinDataType::newUUID); + MONGO_verify(_storage.binDataType() == BinDataType::newUUID); auto stringData = _storage.getString(); return UUID::fromCDR({stringData.rawData(), stringData.size()}); } inline BSONBinData Value::getBinData() const { - verify(getType() == BinData); + MONGO_verify(getType() == BinData); auto stringData = _storage.getString(); return BSONBinData(stringData.rawData(), stringData.size(), _storage.binDataType()); } diff --git a/src/mongo/db/exec/document_value/value_comparator.cpp b/src/mongo/db/exec/document_value/value_comparator.cpp index 7a900df31217f..067e587254adf 100644 --- a/src/mongo/db/exec/document_value/value_comparator.cpp +++ b/src/mongo/db/exec/document_value/value_comparator.cpp @@ -27,10 +27,7 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/exec/document_value/value_comparator.h" - #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/exec/document_value/value_comparator.h b/src/mongo/db/exec/document_value/value_comparator.h index 9929ff938e091..a231d0bfcef5e 100644 --- a/src/mongo/db/exec/document_value/value_comparator.h +++ b/src/mongo/db/exec/document_value/value_comparator.h @@ -29,9 +29,12 @@ #pragma once +#include #include #include +#include + #include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/stdx/unordered_map.h" diff --git a/src/mongo/db/exec/document_value/value_comparator_test.cpp b/src/mongo/db/exec/document_value/value_comparator_test.cpp index 963a29bc11a33..0f13d84a81f1f 100644 --- a/src/mongo/db/exec/document_value/value_comparator_test.cpp +++ b/src/mongo/db/exec/document_value/value_comparator_test.cpp @@ -27,15 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/document_value/value_comparator.h" +#include +#include +#include +#include +#include #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -312,5 +318,33 @@ TEST(ValueComparatorTest, HashingCodeShouldNotRespectCollation) { ASSERT_NE(comparator.hash(val1), comparator.hash(val2)); } +// This test was originally designed to reproduce SERVER-78126. +TEST(ValueComparatorTest, ArraysDifferingByOneStringShouldHaveDifferentHashes) { + const ValueComparator comparator{}; + const Value val1{std::vector{Value{std::string{"a"}}, Value{std::string{"x"}}}}; + const Value val2{std::vector{Value{std::string{"b"}}, Value{std::string{"x"}}}}; + ASSERT_NE(comparator.compare(val1, val2), 0); + ASSERT_NE(comparator.hash(val1), comparator.hash(val2)); +} + +TEST(ValueComparatorTest, ArraysDifferingByOneStringShouldHaveDifferentHashesWithCollation) { + CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); + const ValueComparator comparator{&collator}; + const Value val1{std::vector{Value{std::string{"abc"}}, Value{std::string{"xyz"}}}}; + const Value val2{std::vector{Value{std::string{"bcd"}}, Value{std::string{"xyz"}}}}; + ASSERT_NE(comparator.compare(val1, val2), 0); + ASSERT_NE(comparator.hash(val1), comparator.hash(val2)); +} + +TEST(ValueComparatorTest, ObjectsDifferingByOneStringShouldHaveDifferentHashes) { + const ValueComparator comparator{}; + const Value val1( + Document({{"foo"_sd, Value{std::string{"abc"}}}, {"bar"_sd, Value{std::string{"xyz"}}}})); + const Value val2( + Document({{"foo"_sd, Value{std::string{"def"}}}, {"bar"_sd, Value{std::string{"xyz"}}}})); + ASSERT_NE(comparator.compare(val1, val2), 0); + ASSERT_NE(comparator.hash(val1), comparator.hash(val2)); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/exec/document_value/value_internal.h b/src/mongo/db/exec/document_value/value_internal.h index b19af0e728f27..8f36c8f800e81 100644 --- a/src/mongo/db/exec/document_value/value_internal.h +++ b/src/mongo/db/exec/document_value/value_internal.h @@ -30,14 +30,20 @@ #pragma once #include +#include +#include + #include #include "mongo/base/static_assert.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsontypes.h" #include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/util/assert_util.h" #include "mongo/util/debug_util.h" #include "mongo/util/intrusive_counter.h" @@ -47,6 +53,67 @@ class Document; class DocumentStorage; class Value; + +/** An immutable reference-counted string of inline data. */ +class RCString final : public RefCountable { +public: + static boost::intrusive_ptr create(StringData s) { + using namespace fmt::literals; + static constexpr size_t sizeLimit = BSONObjMaxUserSize; + uassert(16493, + "RCString too large. Requires size={} < limit={}"_format(s.size(), sizeLimit), + s.size() < sizeLimit); + return boost::intrusive_ptr{new (s) RCString{s}}; + } + + explicit operator StringData() const noexcept { + return StringData{_data(), _size}; + } + + void* operator new(size_t, StringData s) { + return ::operator new(_allocSize(s.size())); + } + + /** Used if constructor fails after placement `new (StringData)`. */ + void operator delete(void* ptr, StringData s) { + ::operator delete(ptr, _allocSize(s.size())); + } + +#if __cpp_lib_destroying_delete >= 201806L + void operator delete(RCString* ptr, std::destroying_delete_t) { + size_t sz = _allocSize(ptr->_size); + ptr->~RCString(); + ::operator delete(ptr, sz); + } +#else // !__cpp_lib_destroying_delete + /** Invoked by virtual destructor. */ + void operator delete(void* ptr) { + ::operator delete(ptr); + } +#endif // __cpp_lib_destroying_delete + +private: + static size_t _allocSize(size_t stringSize) { + return sizeof(RCString) + stringSize + 1; // Incl. '\0'-terminator + } + + /** Use static `create()` instead. */ + explicit RCString(StringData s) : _size{s.size()} { + if (_size) + memcpy(_data(), s.rawData(), _size); + _data()[_size] = '\0'; + } + + const char* _data() const noexcept { + return reinterpret_cast(this + 1); + } + char* _data() noexcept { + return const_cast(std::as_const(*this)._data()); + } + + size_t _size; /** Excluding '\0' terminator. */ +}; + // TODO: a MutableVector, similar to MutableDocument /// A heap-allocated reference-counted std::vector class RCVector : public RefCountable { @@ -268,7 +335,7 @@ class ValueStorage { } else { dassert(typeid(*genericRCPtr) == typeid(const RCString)); const RCString* stringPtr = static_cast(genericRCPtr); - return StringData(stringPtr->c_str(), stringPtr->size()); + return StringData{*stringPtr}; } } diff --git a/src/mongo/db/exec/eof.cpp b/src/mongo/db/exec/eof.cpp index 25c7e44ac9252..d9c4dcb64027e 100644 --- a/src/mongo/db/exec/eof.cpp +++ b/src/mongo/db/exec/eof.cpp @@ -27,13 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/eof.h" - #include +#include -#include "mongo/db/exec/scoped_timer.h" +#include "mongo/db/exec/eof.h" namespace mongo { diff --git a/src/mongo/db/exec/eof.h b/src/mongo/db/exec/eof.h index e60d6b4319b34..3e5c069185f99 100644 --- a/src/mongo/db/exec/eof.h +++ b/src/mongo/db/exec/eof.h @@ -29,7 +29,13 @@ #pragma once +#include + #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" namespace mongo { diff --git a/src/mongo/db/exec/exclusion_projection_executor.cpp b/src/mongo/db/exec/exclusion_projection_executor.cpp index 7e12bbf5e2ff1..fc431a92eb6ff 100644 --- a/src/mongo/db/exec/exclusion_projection_executor.cpp +++ b/src/mongo/db/exec/exclusion_projection_executor.cpp @@ -27,7 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include +#include +#include +#include #include "mongo/db/exec/exclusion_projection_executor.h" #include "mongo/db/query/query_knobs_gen.h" diff --git a/src/mongo/db/exec/exclusion_projection_executor.h b/src/mongo/db/exec/exclusion_projection_executor.h index 811717c73e848..e8f3c3bca06e4 100644 --- a/src/mongo/db/exec/exclusion_projection_executor.h +++ b/src/mongo/db/exec/exclusion_projection_executor.h @@ -29,14 +29,39 @@ #pragma once +#include #include +#include #include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/fastpath_projection_node.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_node.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/transformer_interface.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/projection_ast.h" +#include "mongo/db/query/projection_policies.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/string_map.h" namespace mongo::projection_executor { /** diff --git a/src/mongo/db/exec/exclusion_projection_executor_test.cpp b/src/mongo/db/exec/exclusion_projection_executor_test.cpp index 29538a31b6138..c09c7eea40ee2 100644 --- a/src/mongo/db/exec/exclusion_projection_executor_test.cpp +++ b/src/mongo/db/exec/exclusion_projection_executor_test.cpp @@ -27,28 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/exec/exclusion_projection_executor.h" - -#include -#include -#include +#include +#include +#include #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/exclusion_projection_executor.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" #include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/projection_parser.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/record_id.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::projection_executor { namespace { diff --git a/src/mongo/db/exec/fetch.cpp b/src/mongo/db/exec/fetch.cpp index 7071807029235..c627121fa7b4f 100644 --- a/src/mongo/db/exec/fetch.cpp +++ b/src/mongo/db/exec/fetch.cpp @@ -27,20 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/fetch.h" - #include +#include +#include +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/exec/fetch.h" #include "mongo/db/exec/filter.h" -#include "mongo/db/exec/scoped_timer.h" #include "mongo/db/exec/working_set_common.h" #include "mongo/db/query/plan_executor_impl.h" #include "mongo/util/assert_util.h" -#include "mongo/util/fail_point.h" -#include "mongo/util/str.h" namespace mongo { @@ -54,7 +51,7 @@ FetchStage::FetchStage(ExpressionContext* expCtx, WorkingSet* ws, std::unique_ptr child, const MatchExpression* filter, - const CollectionPtr& collection) + VariantCollectionPtrOrAcquisition collection) : RequiresCollectionStage(kStageType, expCtx, collection), _ws(ws), _filter((filter && !filter->isTriviallyTrue()) ? filter : nullptr), @@ -97,15 +94,14 @@ PlanStage::StageState FetchStage::doWork(WorkingSetID* out) { ++_specificStats.alreadyHasObj; } else { // We need a valid RecordId to fetch from and this is the only state that has one. - verify(WorkingSetMember::RID_AND_IDX == member->getState()); - verify(member->hasRecordId()); + MONGO_verify(WorkingSetMember::RID_AND_IDX == member->getState()); + MONGO_verify(member->hasRecordId()); const auto ret = handlePlanStageYield( expCtx(), "FetchStage", - collection()->ns().ns(), [&] { - const auto& coll = collection(); + const auto& coll = collectionPtr(); if (!_cursor) _cursor = coll->getCursor(opCtx()); diff --git a/src/mongo/db/exec/fetch.h b/src/mongo/db/exec/fetch.h index 0e3db734142f1..d700a1da4aa3d 100644 --- a/src/mongo/db/exec/fetch.h +++ b/src/mongo/db/exec/fetch.h @@ -31,10 +31,17 @@ #include +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_collection_stage.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" +#include "mongo/db/storage/record_store.h" namespace mongo { @@ -54,7 +61,7 @@ class FetchStage : public RequiresCollectionStage { WorkingSet* ws, std::unique_ptr child, const MatchExpression* filter, - const CollectionPtr& collection); + VariantCollectionPtrOrAcquisition collection); ~FetchStage(); diff --git a/src/mongo/db/exec/find_projection_executor_test.cpp b/src/mongo/db/exec/find_projection_executor_test.cpp index 9f000b8810c62..109c5680ceea1 100644 --- a/src/mongo/db/exec/find_projection_executor_test.cpp +++ b/src/mongo/db/exec/find_projection_executor_test.cpp @@ -27,15 +27,39 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" +#include "mongo/db/matcher/copyable_match_expression.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/expression_find_internal.h" #include "mongo/db/query/projection_parser.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/projection_policies.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::projection_executor { constexpr auto kProjectionPostImageVarName = diff --git a/src/mongo/db/exec/geo_near.cpp b/src/mongo/db/exec/geo_near.cpp index ca7c1859c0b06..2104c3aecbb7d 100644 --- a/src/mongo/db/exec/geo_near.cpp +++ b/src/mongo/db/exec/geo_near.cpp @@ -31,21 +31,52 @@ #include "mongo/db/exec/geo_near.h" #include +#include +#include +#include #include #include // For s2 search +#include +#include #include -#include "mongo/db/bson/dotted_path_support.h" +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/fetch.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/geo/geoconstants.h" -#include "mongo/db/geo/geoparser.h" +#include "mongo/db/geo/geometry_container.h" #include "mongo/db/geo/hash.h" #include "mongo/db/index/expression_params.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/expression_index.h" #include "mongo/db/query/expression_index_knobs_gen.h" -#include "mongo/logv2/log.h" +#include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/interval.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -195,11 +226,12 @@ static R2Annulus twoDDistanceBounds(const GeoNearParams& nearParams, return fullBounds; } -GeoNear2DStage::DensityEstimator::DensityEstimator(const CollectionPtr& collection, - PlanStage::Children* children, - BSONObj infoObj, - const GeoNearParams* nearParams, - const R2Annulus& fullBounds) +GeoNear2DStage::DensityEstimator::DensityEstimator( + const VariantCollectionPtrOrAcquisition* collection, + PlanStage::Children* children, + BSONObj infoObj, + const GeoNearParams* nearParams, + const R2Annulus& fullBounds) : _collection(collection), _children(children), _nearParams(nearParams), @@ -223,7 +255,7 @@ void GeoNear2DStage::DensityEstimator::buildIndexScan(ExpressionContext* expCtx, const IndexDescriptor* twoDIndex) { // Scan bounds on 2D indexes are only over the 2D field - other bounds aren't applicable. // This is handled in query planning. - IndexScanParams scanParams(expCtx->opCtx, _collection, twoDIndex); + IndexScanParams scanParams(expCtx->opCtx, _collection->getCollectionPtr(), twoDIndex); scanParams.bounds = _nearParams->baseBounds; // The "2d" field is always the first in the index @@ -254,7 +286,7 @@ void GeoNear2DStage::DensityEstimator::buildIndexScan(ExpressionContext* expCtx, IndexBoundsBuilder::intersectize(oil, &scanParams.bounds.fields[twoDFieldPosition]); invariant(!_indexScan); - _indexScan = new IndexScan(expCtx, _collection, scanParams, workingSet, nullptr); + _indexScan = new IndexScan(expCtx, *_collection, scanParams, workingSet, nullptr); _children->emplace_back(_indexScan); } @@ -340,7 +372,7 @@ PlanStage::StageState GeoNear2DStage::initialize(OperationContext* opCtx, WorkingSetID* out) { if (!_densityEstimator) { _densityEstimator.reset(new DensityEstimator( - collection(), &_children, indexDescriptor()->infoObj(), &_nearParams, _fullBounds)); + &collection(), &_children, indexDescriptor()->infoObj(), &_nearParams, _fullBounds)); } double estimatedDistance; @@ -383,7 +415,7 @@ static const string kTwoDIndexNearStage("GEO_NEAR_2D"); GeoNear2DStage::GeoNear2DStage(const GeoNearParams& nearParams, ExpressionContext* expCtx, WorkingSet* workingSet, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const IndexDescriptor* twoDIndex) : NearStage(expCtx, kTwoDIndexNearStage.c_str(), @@ -409,7 +441,7 @@ class FetchStageWithMatch final : public FetchStage { WorkingSet* ws, std::unique_ptr child, MatchExpression* filter, - const CollectionPtr& collection) + VariantCollectionPtrOrAcquisition collection) : FetchStage(expCtx, ws, std::move(child), filter, collection), _matcher(filter) {} private: @@ -448,8 +480,8 @@ static R2Annulus projectBoundsToTwoDDegrees(R2Annulus sphereBounds) { outerDegrees + maxErrorDegrees); } -std::unique_ptr GeoNear2DStage::nextInterval( - OperationContext* opCtx, WorkingSet* workingSet, const CollectionPtr& collection) { +std::unique_ptr GeoNear2DStage::nextInterval(OperationContext* opCtx, + WorkingSet* workingSet) { // The search is finished if we searched at least once and all the way to the edge if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) { return nullptr; @@ -541,7 +573,7 @@ std::unique_ptr GeoNear2DStage::nextInterval( // Scan bounds on 2D indexes are only over the 2D field - other bounds aren't applicable. // This is handled in query planning. - IndexScanParams scanParams(opCtx, collection, indexDescriptor()); + IndexScanParams scanParams(opCtx, collectionPtr(), indexDescriptor()); // This does force us to do our own deduping of results. scanParams.bounds = _nearParams.baseBounds; @@ -578,7 +610,7 @@ std::unique_ptr GeoNear2DStage::nextInterval( // 2D indexes support covered search over additional fields they contain auto scan = std::make_unique( - expCtx(), collection, scanParams, workingSet, _nearParams.filter); + expCtx(), collection(), scanParams, workingSet, _nearParams.filter); MatchExpression* docMatcher = nullptr; @@ -590,7 +622,7 @@ std::unique_ptr GeoNear2DStage::nextInterval( // FetchStage owns index scan _children.emplace_back(std::make_unique( - expCtx(), workingSet, std::move(scan), docMatcher, collection)); + expCtx(), workingSet, std::move(scan), docMatcher, collection())); return std::make_unique( _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval); @@ -626,7 +658,7 @@ static const string kS2IndexNearStage("GEO_NEAR_2DSPHERE"); GeoNear2DSphereStage::GeoNear2DSphereStage(const GeoNearParams& nearParams, ExpressionContext* expCtx, WorkingSet* workingSet, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const IndexDescriptor* s2Index) : NearStage(expCtx, kS2IndexNearStage.c_str(), @@ -706,11 +738,12 @@ S2Region* buildS2Region(const R2Annulus& sphereBounds) { } } // namespace -GeoNear2DSphereStage::DensityEstimator::DensityEstimator(const CollectionPtr& collection, - PlanStage::Children* children, - const GeoNearParams* nearParams, - const S2IndexingParams& indexParams, - const R2Annulus& fullBounds) +GeoNear2DSphereStage::DensityEstimator::DensityEstimator( + const VariantCollectionPtrOrAcquisition* collection, + PlanStage::Children* children, + const GeoNearParams* nearParams, + const S2IndexingParams& indexParams, + const R2Annulus& fullBounds) : _collection(collection), _children(children), _nearParams(nearParams), @@ -727,7 +760,7 @@ GeoNear2DSphereStage::DensityEstimator::DensityEstimator(const CollectionPtr& co void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(ExpressionContext* expCtx, WorkingSet* workingSet, const IndexDescriptor* s2Index) { - IndexScanParams scanParams(expCtx->opCtx, _collection, s2Index); + IndexScanParams scanParams(expCtx->opCtx, _collection->getCollectionPtr(), s2Index); scanParams.bounds = _nearParams->baseBounds; // Because the planner doesn't yet set up 2D index bounds, do it ourselves here @@ -750,7 +783,7 @@ void GeoNear2DSphereStage::DensityEstimator::buildIndexScan(ExpressionContext* e // Index scan invariant(!_indexScan); - _indexScan = new IndexScan(expCtx, _collection, scanParams, workingSet, nullptr); + _indexScan = new IndexScan(expCtx, *_collection, scanParams, workingSet, nullptr); _children->emplace_back(_indexScan); } @@ -837,7 +870,7 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* opCtx, WorkingSetID* out) { if (!_densityEstimator) { _densityEstimator.reset(new DensityEstimator( - collection(), &_children, &_nearParams, _indexParams, _fullBounds)); + &collection(), &_children, &_nearParams, _indexParams, _fullBounds)); } double estimatedDistance; @@ -863,7 +896,7 @@ PlanStage::StageState GeoNear2DSphereStage::initialize(OperationContext* opCtx, } std::unique_ptr GeoNear2DSphereStage::nextInterval( - OperationContext* opCtx, WorkingSet* workingSet, const CollectionPtr& collection) { + OperationContext* opCtx, WorkingSet* workingSet) { // The search is finished if we searched at least once and all the way to the edge if (_currBounds.getInner() >= 0 && _currBounds.getOuter() == _fullBounds.getOuter()) { return nullptr; @@ -896,7 +929,7 @@ std::unique_ptr GeoNear2DSphereStage::nextInterval( // Setup the covering region and stages for this interval // - IndexScanParams scanParams(opCtx, collection, indexDescriptor()); + IndexScanParams scanParams(opCtx, collectionPtr(), indexDescriptor()); // This does force us to do our own deduping of results. scanParams.bounds = _nearParams.baseBounds; @@ -928,11 +961,12 @@ std::unique_ptr GeoNear2DSphereStage::nextInterval( OrderedIntervalList* coveredIntervals = &scanParams.bounds.fields[s2FieldPosition]; ExpressionMapping::S2CellIdsToIntervalsWithParents(cover, _indexParams, coveredIntervals); - auto scan = std::make_unique(expCtx(), collection, scanParams, workingSet, nullptr); + auto scan = + std::make_unique(expCtx(), collection(), scanParams, workingSet, nullptr); // FetchStage owns index scan _children.emplace_back(std::make_unique( - expCtx(), workingSet, std::move(scan), _nearParams.filter, collection)); + expCtx(), workingSet, std::move(scan), _nearParams.filter, collection())); return std::make_unique( _children.back().get(), nextBounds.getInner(), nextBounds.getOuter(), isLastInterval); diff --git a/src/mongo/db/exec/geo_near.h b/src/mongo/db/exec/geo_near.h index 2a5735d626149..9f4b1f6cf14bd 100644 --- a/src/mongo/db/exec/geo_near.h +++ b/src/mongo/db/exec/geo_near.h @@ -29,19 +29,27 @@ #pragma once +#include #include +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/index_scan.h" #include "mongo/db/exec/near.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/geo/geometry_container.h" +#include "mongo/db/geo/hash.h" #include "mongo/db/geo/r2_region_coverer.h" +#include "mongo/db/geo/shapes.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/s2_common.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_geo.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/plan_executor.h" namespace mongo { @@ -73,13 +81,12 @@ class GeoNear2DStage final : public NearStage { GeoNear2DStage(const GeoNearParams& nearParams, ExpressionContext* expCtx, WorkingSet* workingSet, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const IndexDescriptor* twoDIndex); protected: std::unique_ptr nextInterval(OperationContext* opCtx, - WorkingSet* workingSet, - const CollectionPtr& collection) final; + WorkingSet* workingSet) final; double computeDistance(WorkingSetMember* member) final; @@ -90,7 +97,7 @@ class GeoNear2DStage final : public NearStage { private: class DensityEstimator { public: - DensityEstimator(const CollectionPtr& collection, + DensityEstimator(const VariantCollectionPtrOrAcquisition* collection, PlanStage::Children* children, BSONObj infoObj, const GeoNearParams* nearParams, @@ -107,7 +114,8 @@ class GeoNear2DStage final : public NearStage { WorkingSet* workingSet, const IndexDescriptor* twoDIndex); - const CollectionPtr& _collection; + const VariantCollectionPtrOrAcquisition* + _collection; // Points to the internal stage _collection. PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage. const GeoNearParams* _nearParams; // Not owned here. const R2Annulus& _fullBounds; @@ -142,13 +150,12 @@ class GeoNear2DSphereStage final : public NearStage { GeoNear2DSphereStage(const GeoNearParams& nearParams, ExpressionContext* expCtx, WorkingSet* workingSet, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const IndexDescriptor* s2Index); protected: std::unique_ptr nextInterval(OperationContext* opCtx, - WorkingSet* workingSet, - const CollectionPtr& collection) final; + WorkingSet* workingSet) final; double computeDistance(WorkingSetMember* member) final; @@ -160,7 +167,7 @@ class GeoNear2DSphereStage final : public NearStage { // Estimate the density of data by search the nearest cells level by level around center. class DensityEstimator { public: - DensityEstimator(const CollectionPtr& collection, + DensityEstimator(const VariantCollectionPtrOrAcquisition* collection, PlanStage::Children* children, const GeoNearParams* nearParams, const S2IndexingParams& indexParams, @@ -179,7 +186,8 @@ class GeoNear2DSphereStage final : public NearStage { WorkingSet* workingSet, const IndexDescriptor* s2Index); - const CollectionPtr& _collection; + const VariantCollectionPtrOrAcquisition* + _collection; // Points to the internal stage _collection PlanStage::Children* _children; // Points to PlanStage::_children in the NearStage. const GeoNearParams* _nearParams; // Not owned here. const S2IndexingParams _indexParams; diff --git a/src/mongo/db/exec/idhack.cpp b/src/mongo/db/exec/idhack.cpp index 04600ac4f4a8a..e148ee447d2dc 100644 --- a/src/mongo/db/exec/idhack.cpp +++ b/src/mongo/db/exec/idhack.cpp @@ -27,19 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/idhack.h" - #include +#include +#include +#include -#include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/exec/index_scan.h" -#include "mongo/db/exec/projection.h" -#include "mongo/db/exec/scoped_timer.h" +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/idhack.h" #include "mongo/db/exec/working_set_common.h" -#include "mongo/db/index/btree_access_method.h" +#include "mongo/db/index/index_access_method.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/plan_executor_impl.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -53,7 +60,7 @@ const char* IDHackStage::kStageType = "IDHACK"; IDHackStage::IDHackStage(ExpressionContext* expCtx, CanonicalQuery* query, WorkingSet* ws, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const IndexDescriptor* descriptor) : RequiresIndexStage(kStageType, expCtx, collection, descriptor, ws), _workingSet(ws), @@ -65,7 +72,7 @@ IDHackStage::IDHackStage(ExpressionContext* expCtx, IDHackStage::IDHackStage(ExpressionContext* expCtx, const BSONObj& key, WorkingSet* ws, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const IndexDescriptor* descriptor) : RequiresIndexStage(kStageType, expCtx, collection, descriptor, ws), _workingSet(ws), @@ -88,11 +95,10 @@ PlanStage::StageState IDHackStage::doWork(WorkingSetID* out) { return handlePlanStageYield( expCtx(), "IDHackStage", - collection()->ns().ns(), [&] { // Look up the key by going directly to the index. - auto recordId = - indexAccessMethod()->asSortedData()->findSingle(opCtx(), collection(), _key); + auto recordId = indexAccessMethod()->asSortedData()->findSingle( + opCtx(), collectionPtr(), indexDescriptor()->getEntry(), _key); // Key not found. if (recordId.isNull()) { @@ -109,7 +115,7 @@ PlanStage::StageState IDHackStage::doWork(WorkingSetID* out) { member->recordId = std::move(recordId); _workingSet->transitionToRecordIdAndIdx(id); - const auto& coll = collection(); + const auto& coll = collectionPtr(); if (!_recordCursor) _recordCursor = coll->getCursor(opCtx()); diff --git a/src/mongo/db/exec/idhack.h b/src/mongo/db/exec/idhack.h index effe1766efa31..48115e5d2f3c8 100644 --- a/src/mongo/db/exec/idhack.h +++ b/src/mongo/db/exec/idhack.h @@ -31,9 +31,18 @@ #include +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_index_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" +#include "mongo/db/storage/record_store.h" namespace mongo { @@ -51,13 +60,13 @@ class IDHackStage final : public RequiresIndexStage { IDHackStage(ExpressionContext* expCtx, CanonicalQuery* query, WorkingSet* ws, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const IndexDescriptor* descriptor); IDHackStage(ExpressionContext* expCtx, const BSONObj& key, WorkingSet* ws, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const IndexDescriptor* descriptor); ~IDHackStage(); diff --git a/src/mongo/db/exec/inclusion_projection_executor.cpp b/src/mongo/db/exec/inclusion_projection_executor.cpp index e9c21b75e0e81..d6fb44c3decf8 100644 --- a/src/mongo/db/exec/inclusion_projection_executor.cpp +++ b/src/mongo/db/exec/inclusion_projection_executor.cpp @@ -27,10 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include +#include +#include #include "mongo/db/exec/inclusion_projection_executor.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/pipeline/expression_walker.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::projection_executor { using ComputedFieldsPolicy = ProjectionPolicies::ComputedFieldsPolicy; diff --git a/src/mongo/db/exec/inclusion_projection_executor.h b/src/mongo/db/exec/inclusion_projection_executor.h index 38fbe69f436ce..7874a9aea4ad9 100644 --- a/src/mongo/db/exec/inclusion_projection_executor.h +++ b/src/mongo/db/exec/inclusion_projection_executor.h @@ -29,13 +29,44 @@ #pragma once +#include +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/fastpath_projection_node.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_node.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_dependencies.h" #include "mongo/db/pipeline/expression_walker.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/transformer_interface.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/projection_ast.h" +#include "mongo/db/query/projection_policies.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/string_map.h" namespace mongo::projection_executor { /** diff --git a/src/mongo/db/exec/inclusion_projection_executor_test.cpp b/src/mongo/db/exec/inclusion_projection_executor_test.cpp index bb14e5c69a920..2e8b3a82fa493 100644 --- a/src/mongo/db/exec/inclusion_projection_executor_test.cpp +++ b/src/mongo/db/exec/inclusion_projection_executor_test.cpp @@ -27,26 +27,41 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/inclusion_projection_executor.h" - +#include #include +#include +#include +#include +#include +#include +#include + #include "mongo/base/exact_cast.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/inclusion_projection_executor.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" +#include "mongo/db/matcher/copyable_match_expression.h" #include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/query/projection.h" #include "mongo/db/query/projection_parser.h" +#include "mongo/db/record_id.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/exec/index_scan.cpp b/src/mongo/db/exec/index_scan.cpp index 9aa6f1fad0650..b6b7d038b4657 100644 --- a/src/mongo/db/exec/index_scan.cpp +++ b/src/mongo/db/exec/index_scan.cpp @@ -28,20 +28,32 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/index_scan.h" - +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include #include +#include -#include "mongo/db/catalog/index_catalog.h" +#include + +#include "mongo/bson/ordering.h" +#include "mongo/db/client.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/filter.h" -#include "mongo/db/exec/scoped_timer.h" +#include "mongo/db/exec/index_scan.h" #include "mongo/db/index/index_access_method.h" -#include "mongo/db/index_names.h" #include "mongo/db/query/index_bounds_builder.h" #include "mongo/db/query/plan_executor_impl.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/debug_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -63,7 +75,7 @@ namespace mongo { const char* IndexScan::kStageType = "IXSCAN"; IndexScan::IndexScan(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, IndexScanParams params, WorkingSet* workingSet, const MatchExpression* filter) @@ -93,7 +105,8 @@ IndexScan::IndexScan(ExpressionContext* expCtx, } boost::optional IndexScan::initIndexScan() { - if (_lowPriority && opCtx()->getClient()->isFromUserConnection() && + if (_lowPriority && gDeprioritizeUnboundedUserIndexScans.load() && + opCtx()->getClient()->isFromUserConnection() && opCtx()->lockState()->shouldWaitForTicket()) { _priority.emplace(opCtx()->lockState(), AdmissionContext::Priority::kLow); } @@ -110,7 +123,7 @@ boost::optional IndexScan::initIndexScan() { _endKey = _bounds.endKey; _indexCursor->setEndPosition(_endKey, _endKeyInclusive); - KeyString::Value keyStringForSeek = IndexEntryComparison::makeKeyStringFromBSONKeyForSeek( + key_string::Value keyStringForSeek = IndexEntryComparison::makeKeyStringFromBSONKeyForSeek( _startKey, indexAccessMethod()->getSortedDataInterface()->getKeyStringVersion(), indexAccessMethod()->getSortedDataInterface()->getOrdering(), @@ -153,7 +166,6 @@ PlanStage::StageState IndexScan::doWork(WorkingSetID* out) { const auto ret = handlePlanStageYield( expCtx(), "IndexScan", - collection()->ns().ns(), [&] { switch (_scanState) { case INITIALIZING: @@ -294,7 +306,8 @@ void IndexScan::doDetachFromOperationContext() { } void IndexScan::doReattachToOperationContext() { - if (_lowPriority && opCtx()->getClient()->isFromUserConnection() && + if (_lowPriority && gDeprioritizeUnboundedUserIndexScans.load() && + opCtx()->getClient()->isFromUserConnection() && opCtx()->lockState()->shouldWaitForTicket()) { _priority.emplace(opCtx()->lockState(), AdmissionContext::Priority::kLow); } diff --git a/src/mongo/db/exec/index_scan.h b/src/mongo/db/exec/index_scan.h index 8efc515b6c0ca..2e4eaf9978f35 100644 --- a/src/mongo/db/exec/index_scan.h +++ b/src/mongo/db/exec/index_scan.h @@ -29,11 +29,28 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_index_stage.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" #include "mongo/db/storage/index_entry_comparison.h" #include "mongo/db/storage/sorted_data_interface.h" @@ -115,7 +132,7 @@ class IndexScan final : public RequiresIndexStage { }; IndexScan(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, IndexScanParams params, WorkingSet* workingSet, const MatchExpression* filter); diff --git a/src/mongo/db/exec/js_function.cpp b/src/mongo/db/exec/js_function.cpp index 339a7192d2513..ee6adc5eedf80 100644 --- a/src/mongo/db/exec/js_function.cpp +++ b/src/mongo/db/exec/js_function.cpp @@ -27,15 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/exec/js_function.h" +#include +#include +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/client.h" +#include "mongo/db/exec/js_function.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/atomic_word.h" #include "mongo/scripting/engine.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/exec/js_function.h b/src/mongo/db/exec/js_function.h index 55f1343fb3988..9cd0be55f6bf4 100644 --- a/src/mongo/db/exec/js_function.h +++ b/src/mongo/db/exec/js_function.h @@ -27,6 +27,9 @@ * it in the license file. */ +#pragma once + +#include #include #include diff --git a/src/mongo/db/exec/limit.cpp b/src/mongo/db/exec/limit.cpp index 220d86d31be7c..cdb43abe29bd7 100644 --- a/src/mongo/db/exec/limit.cpp +++ b/src/mongo/db/exec/limit.cpp @@ -30,10 +30,8 @@ #include "mongo/db/exec/limit.h" #include - -#include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/util/str.h" +#include +#include namespace mongo { diff --git a/src/mongo/db/exec/limit.h b/src/mongo/db/exec/limit.h index ffc2f6a509cfd..ce6be32494c41 100644 --- a/src/mongo/db/exec/limit.h +++ b/src/mongo/db/exec/limit.h @@ -30,8 +30,14 @@ #pragma once +#include + #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/jsobj.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" namespace mongo { diff --git a/src/mongo/db/exec/merge_sort.cpp b/src/mongo/db/exec/merge_sort.cpp index 8520a95947559..30848069fc222 100644 --- a/src/mongo/db/exec/merge_sort.cpp +++ b/src/mongo/db/exec/merge_sort.cpp @@ -29,14 +29,18 @@ #include "mongo/db/exec/merge_sort.h" +#include #include +#include +#include -#include "mongo/db/exec/scoped_timer.h" +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/working_set.h" -#include "mongo/db/exec/working_set_common.h" #include "mongo/db/query/collation/collation_index_key.h" #include "mongo/db/query/collation/collator_interface.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -140,7 +144,7 @@ PlanStage::StageState MergeSortStage::doWork(WorkingSetID* out) { } // If we're here, for each non-EOF child, we have a valid WSID. - verify(!_merging.empty()); + MONGO_verify(!_merging.empty()); // Get the 'min' WSID. _merging is a priority queue so its top is the smallest. MergingRef top = _merging.top(); @@ -173,13 +177,13 @@ bool MergeSortStage::StageWithValueComparison::operator()(const MergingRef& lhs, string fn = patternElt.fieldName(); BSONElement lhsElt; - verify(lhsMember->getFieldDotted(fn, &lhsElt)); + MONGO_verify(lhsMember->getFieldDotted(fn, &lhsElt)); // Determine if the left-hand side sort key part comes from an index key. auto lhsIsFromIndexKey = !lhsMember->hasObj(); BSONElement rhsElt; - verify(rhsMember->getFieldDotted(fn, &rhsElt)); + MONGO_verify(rhsMember->getFieldDotted(fn, &rhsElt)); // Determine if the right-hand side sort key part comes from an index key. auto rhsIsFromIndexKey = !rhsMember->hasObj(); diff --git a/src/mongo/db/exec/merge_sort.h b/src/mongo/db/exec/merge_sort.h index cdb844fde4cd0..111a3423cb9d0 100644 --- a/src/mongo/db/exec/merge_sort.h +++ b/src/mongo/db/exec/merge_sort.h @@ -30,17 +30,26 @@ #pragma once #include +#include #include #include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/jsobj.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { class CollatorInterface; + // External params for the merge sort stage. Declared below. class MergeSortStageParams; diff --git a/src/mongo/db/exec/mock_stage.cpp b/src/mongo/db/exec/mock_stage.cpp index ab2dd12edfa8e..d1b8ce0f2dbd0 100644 --- a/src/mongo/db/exec/mock_stage.cpp +++ b/src/mongo/db/exec/mock_stage.cpp @@ -27,11 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/exec/mock_stage.h" - -#include "mongo/util/overloaded_visitor.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo { diff --git a/src/mongo/db/exec/mock_stage.h b/src/mongo/db/exec/mock_stage.h index 02e75a52bd4ba..ea832e0f08608 100644 --- a/src/mongo/db/exec/mock_stage.h +++ b/src/mongo/db/exec/mock_stage.h @@ -29,9 +29,20 @@ #pragma once +#include #include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/exec/multi_iterator.cpp b/src/mongo/db/exec/multi_iterator.cpp index 04fc77664a700..c61f7e9ca9c6a 100644 --- a/src/mongo/db/exec/multi_iterator.cpp +++ b/src/mongo/db/exec/multi_iterator.cpp @@ -27,14 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/multi_iterator.h" - #include +#include -#include "mongo/db/exec/working_set_common.h" +#include +#include +#include + +#include "mongo/db/exec/multi_iterator.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/plan_executor_impl.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -46,7 +51,7 @@ const char* MultiIteratorStage::kStageType = "MULTI_ITERATOR"; MultiIteratorStage::MultiIteratorStage(ExpressionContext* expCtx, WorkingSet* ws, - const CollectionPtr& collection) + VariantCollectionPtrOrAcquisition collection) : RequiresCollectionStage(kStageType, expCtx, collection), _ws(ws) {} void MultiIteratorStage::addIterator(unique_ptr it) { @@ -59,7 +64,6 @@ PlanStage::StageState MultiIteratorStage::doWork(WorkingSetID* out) { const auto ret = handlePlanStageYield( expCtx(), "MultiIteratorStage", - collection()->ns().ns(), [&] { while (!_iterators.empty()) { record = _iterators.back()->next(); diff --git a/src/mongo/db/exec/multi_iterator.h b/src/mongo/db/exec/multi_iterator.h index cf6e5e322c64a..00592f972dd25 100644 --- a/src/mongo/db/exec/multi_iterator.h +++ b/src/mongo/db/exec/multi_iterator.h @@ -33,9 +33,15 @@ #include #include "mongo/db/catalog/collection.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_collection_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" +#include "mongo/db/storage/record_store.h" namespace mongo { @@ -47,7 +53,9 @@ namespace mongo { */ class MultiIteratorStage final : public RequiresCollectionStage { public: - MultiIteratorStage(ExpressionContext* expCtx, WorkingSet* ws, const CollectionPtr& collection); + MultiIteratorStage(ExpressionContext* expCtx, + WorkingSet* ws, + VariantCollectionPtrOrAcquisition collection); void addIterator(std::unique_ptr it); diff --git a/src/mongo/db/exec/multi_plan.cpp b/src/mongo/db/exec/multi_plan.cpp index ac4c503004875..b585363eb9617 100644 --- a/src/mongo/db/exec/multi_plan.cpp +++ b/src/mongo/db/exec/multi_plan.cpp @@ -31,26 +31,47 @@ #include "mongo/db/exec/multi_plan.h" #include -#include +#include +#include +#include +#include #include +#include +#include +#include -#include "mongo/db/catalog/database.h" -#include "mongo/db/client.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/db/commands/server_status_metric.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/exec/histogram_server_status_metric.h" -#include "mongo/db/exec/scoped_timer.h" #include "mongo/db/exec/trial_period_utils.h" -#include "mongo/db/exec/working_set_common.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/collection_query_info.h" -#include "mongo/db/query/explain.h" #include "mongo/db/query/multiple_collection_accessor.h" +#include "mongo/db/query/plan_cache_debug_info.h" #include "mongo/db/query/plan_cache_key_factory.h" +#include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/plan_explainer_factory.h" #include "mongo/db/query/plan_ranker.h" #include "mongo/db/query/plan_ranker_util.h" +#include "mongo/db/query/plan_ranking_decision.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" -#include "mongo/util/histogram.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_proxy.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/str.h" +#include "mongo/util/tick_source.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -109,7 +130,7 @@ HistogramServerStatusMetric classicNumPlansHistogram( } // namespace MultiPlanStage::MultiPlanStage(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, CanonicalQuery* cq, PlanCachingMode cachingMode) : RequiresCollectionStage(kStageType, expCtx, collection), @@ -166,9 +187,9 @@ PlanStage::StageState MultiPlanStage::doWork(WorkingSetID* out) { LOGV2_DEBUG(20588, 5, "Best plan errored, switching to backup plan"); - CollectionQueryInfo::get(collection()) + CollectionQueryInfo::get(collectionPtr()) .getPlanCache() - ->remove(plan_cache_key_factory::make(*_query, collection())); + ->remove(plan_cache_key_factory::make(*_query, collectionPtr())); switchToBackupPlan(); return _candidates[_bestPlanIdx].root->work(out); @@ -206,7 +227,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { const size_t numWorks = trial_period::getTrialPeriodMaxWorks(opCtx(), - collection(), + collectionPtr(), internalQueryPlanEvaluationWorks.load(), internalQueryPlanEvaluationCollFraction.load()); size_t numResults = trial_period::getTrialPeriodNumToReturn(*_query); @@ -245,7 +266,7 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { invariant(ranking); _bestPlanIdx = ranking->candidateOrder[0]; - verify(_bestPlanIdx >= 0 && _bestPlanIdx < static_cast(_candidates.size())); + MONGO_verify(_bestPlanIdx >= 0 && _bestPlanIdx < static_cast(_candidates.size())); auto& bestCandidate = _candidates[_bestPlanIdx]; const auto& alreadyProduced = bestCandidate.results; @@ -270,12 +291,17 @@ Status MultiPlanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { } } - plan_cache_util::updatePlanCache(expCtx()->opCtx, - MultipleCollectionAccessor(collection()), - _cachingMode, - *_query, - std::move(ranking), - _candidates); + const auto& coll = collection(); + auto multipleCollection = coll.isAcquisition() + ? MultipleCollectionAccessor{coll.getAcquisition()} + : MultipleCollectionAccessor{coll.getCollectionPtr()}; + + plan_cache_util::updatePlanCacheFromCandidates(expCtx()->opCtx, + std::move(multipleCollection), + _cachingMode, + *_query, + std::move(ranking), + _candidates); removeRejectedPlans(); return Status::OK(); diff --git a/src/mongo/db/exec/multi_plan.h b/src/mongo/db/exec/multi_plan.h index 12ab61659db5b..37e6a9ea1e1b1 100644 --- a/src/mongo/db/exec/multi_plan.h +++ b/src/mongo/db/exec/multi_plan.h @@ -30,16 +30,27 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/status.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/exec/plan_cache_util.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_collection_stage.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/jsobj.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/plan_enumerator_explain_info.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_ranker.h" #include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" namespace mongo { @@ -61,7 +72,7 @@ class MultiPlanStage final : public RequiresCollectionStage { * when possible. If 'shouldCache' is false, the plan cache will never be written. */ MultiPlanStage(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, CanonicalQuery* cq, PlanCachingMode cachingMode = PlanCachingMode::AlwaysCache); diff --git a/src/mongo/db/exec/near.cpp b/src/mongo/db/exec/near.cpp index 05bc2735af143..3c11074c6c4a8 100644 --- a/src/mongo/db/exec/near.cpp +++ b/src/mongo/db/exec/near.cpp @@ -46,7 +46,7 @@ NearStage::NearStage(ExpressionContext* expCtx, const char* typeName, StageType type, WorkingSet* workingSet, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const IndexDescriptor* indexDescriptor) : RequiresIndexStage(typeName, expCtx, collection, indexDescriptor, workingSet), _workingSet(workingSet), @@ -136,7 +136,7 @@ PlanStage::StageState NearStage::bufferNext(WorkingSetID* toReturn) { // if (!_nextInterval) { - auto interval = nextInterval(opCtx(), _workingSet, collection()); + auto interval = nextInterval(opCtx(), _workingSet); if (!interval) { _searchState = SearchState_Finished; return PlanStage::IS_EOF; diff --git a/src/mongo/db/exec/near.h b/src/mongo/db/exec/near.h index 315b7c8f4b885..1772c85263e11 100644 --- a/src/mongo/db/exec/near.h +++ b/src/mongo/db/exec/near.h @@ -108,7 +108,7 @@ class NearStage : public RequiresIndexStage { const char* typeName, StageType type, WorkingSet* workingSet, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const IndexDescriptor* indexDescriptor); // @@ -121,8 +121,7 @@ class NearStage : public RequiresIndexStage { * covering stage if required. */ virtual std::unique_ptr nextInterval(OperationContext* opCtx, - WorkingSet* workingSet, - const CollectionPtr& collection) = 0; + WorkingSet* workingSet) = 0; /** * Computes the distance value for the given member data, or -1 if the member should not be diff --git a/src/mongo/db/exec/or.cpp b/src/mongo/db/exec/or.cpp index 078765ffc84ea..00c21ecbb0caa 100644 --- a/src/mongo/db/exec/or.cpp +++ b/src/mongo/db/exec/or.cpp @@ -29,12 +29,15 @@ #include "mongo/db/exec/or.h" +#include #include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/filter.h" -#include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/exec/or.h b/src/mongo/db/exec/or.h index e4ddcbcb2c0c8..51d5a1b7aa38e 100644 --- a/src/mongo/db/exec/or.h +++ b/src/mongo/db/exec/or.h @@ -29,9 +29,16 @@ #pragma once +#include +#include + #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" #include "mongo/stdx/unordered_set.h" diff --git a/src/mongo/db/exec/plan_cache_util.cpp b/src/mongo/db/exec/plan_cache_util.cpp index c14f2d6034cbb..dbe8091475af4 100644 --- a/src/mongo/db/exec/plan_cache_util.cpp +++ b/src/mongo/db/exec/plan_cache_util.cpp @@ -28,7 +28,29 @@ */ #include "mongo/db/exec/plan_cache_util.h" + +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/canonical_query_encoder.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/stage_types.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/stdx/unordered_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -73,10 +95,11 @@ void updatePlanCache(OperationContext* opCtx, const CanonicalQuery& query, const QuerySolution& solution, const sbe::PlanStage& root, - const stage_builder::PlanStageData& data) { - if (shouldCacheQuery(query) && collections.getMainCollection()) { - auto key = plan_cache_key_factory::make(query, collections); - auto plan = std::make_unique(root.clone(), data); + stage_builder::PlanStageData& stageData) { + const CollectionPtr& collection = collections.getMainCollection(); + if (collection && shouldCacheQuery(query) && solution.isEligibleForPlanCache()) { + sbe::PlanCacheKey key = plan_cache_key_factory::make(query, collections); + auto plan = std::make_unique(root.clone(), stageData); plan->indexFilterApplied = solution.indexFilterApplied; bool shouldOmitDiagnosticInformation = @@ -177,7 +200,7 @@ plan_cache_debug_info::DebugInfoSBE buildDebugInfo(const QuerySolution* solution } case STAGE_EQ_LOOKUP: { auto eln = static_cast(node); - auto& secondaryStats = debugInfo.secondaryStats[eln->foreignCollection.toString()]; + auto& secondaryStats = debugInfo.secondaryStats[eln->foreignCollection]; if (eln->lookupStrategy == EqLookupNode::LookupStrategy::kIndexedLoopJoin) { tassert(6466200, "Index join lookup should have an index entry", eln->idxEntry); secondaryStats.indexesUsed.push_back(eln->idxEntry->identifier.catalogName); diff --git a/src/mongo/db/exec/plan_cache_util.h b/src/mongo/db/exec/plan_cache_util.h index 1567c8161b459..4ac5c03124500 100644 --- a/src/mongo/db/exec/plan_cache_util.h +++ b/src/mongo/db/exec/plan_cache_util.h @@ -29,19 +29,37 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/db/catalog/collection.h" #include "mongo/db/curop.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/collection_query_info.h" #include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/plan_cache.h" +#include "mongo/db/query/plan_cache_callbacks.h" #include "mongo/db/query/plan_cache_debug_info.h" #include "mongo/db/query/plan_cache_key_factory.h" #include "mongo/db/query/plan_explainer_factory.h" +#include "mongo/db/query/plan_ranker.h" +#include "mongo/db/query/plan_ranking_decision.h" #include "mongo/db/query/query_solution.h" #include "mongo/db/query/sbe_plan_cache.h" #include "mongo/db/query/sbe_plan_ranker.h" #include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" namespace mongo { /** @@ -90,16 +108,23 @@ plan_cache_debug_info::DebugInfo buildDebugInfo( plan_cache_debug_info::DebugInfoSBE buildDebugInfo(const QuerySolution* solution); /** - * Caches the best candidate plan, chosen from the given 'candidates' based on the 'ranking' - * decision, if the 'query' is of a type that can be cached. Otherwise, does nothing. + * Caches the best candidate execution plan for 'query', chosen from the given 'candidates' based on + * the 'ranking' decision, if the 'query' is of a type that can be cached. Otherwise, does nothing. * * The 'cachingMode' specifies whether the query should be: * * Always cached. * * Never cached. * * Cached, except in certain special cases. + * + * This method is shared between Classic and SBE. The plan roots 'candidates[i].root' have different + * types between the two: + * * Classic - mongo::PlanStage* + * * SBE - std::unique_ptr> + * This breaks polymorphism because native pointers and std::unique_ptr must be handled differently. + * std::is_same_v is used to distinguish in these cases. */ template -void updatePlanCache( +void updatePlanCacheFromCandidates( OperationContext* opCtx, const MultipleCollectionAccessor& collections, PlanCachingMode cachingMode, @@ -176,7 +201,9 @@ void updatePlanCache( // Store the choice we just made in the cache, if the query is of a type that is safe to // cache. - if (shouldCacheQuery(query) && canCache) { + QuerySolution* solution = winningPlan.solution.get(); + if (canCache && shouldCacheQuery(query) && solution->isEligibleForPlanCache()) { + const CollectionPtr& collection = collections.getMainCollection(); auto rankingDecision = ranking.get(); auto cacheClassicPlan = [&]() { auto buildDebugInfoFn = [&]() -> plan_cache_debug_info::DebugInfo { @@ -188,7 +215,6 @@ void updatePlanCache( callbacks{query, buildDebugInfoFn}; winningPlan.solution->cacheData->indexFilterApplied = winningPlan.solution->indexFilterApplied; - auto& collection = collections.getMainCollection(); auto isSensitive = CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation; uassertStatusOK(CollectionQueryInfo::get(collection) .getPlanCache() @@ -243,10 +269,10 @@ void updatePlanCache( } /** - * Caches the SBE plan 'root' along with its accompanying 'data' if the 'query' is of a type that - * can be cached. Otherwise, does nothing. + * Caches the plan 'root' along with its accompanying 'data' if the 'query' is of a type that can be + * cached. Otherwise, does nothing. * - * The given plan will be "pinned" to the cache and will be not subject to replanning. One put into + * The given plan will be "pinned" to the cache and will not be subject to replanning. Once put into * the cache, the plan immediately becomes "active". */ void updatePlanCache(OperationContext* opCtx, @@ -254,6 +280,6 @@ void updatePlanCache(OperationContext* opCtx, const CanonicalQuery& query, const QuerySolution& solution, const sbe::PlanStage& root, - const stage_builder::PlanStageData& data); + stage_builder::PlanStageData& stageData); } // namespace plan_cache_util } // namespace mongo diff --git a/src/mongo/db/exec/plan_stage.cpp b/src/mongo/db/exec/plan_stage.cpp index 4976b1efed0df..dc402ac7080cf 100644 --- a/src/mongo/db/exec/plan_stage.cpp +++ b/src/mongo/db/exec/plan_stage.cpp @@ -28,12 +28,11 @@ */ -#include "mongo/platform/basic.h" - #include "mongo/db/exec/plan_stage.h" +#include + #include "mongo/db/operation_context.h" -#include "mongo/db/service_context.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/exec/plan_stage.h b/src/mongo/db/exec/plan_stage.h index e4816cf943da3..a1a6da0f84064 100644 --- a/src/mongo/db/exec/plan_stage.h +++ b/src/mongo/db/exec/plan_stage.h @@ -29,16 +29,27 @@ #pragma once +#include +#include +#include +#include #include +#include +#include #include #include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/exec/scoped_timer_factory.h" #include "mongo/db/exec/working_set.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/plan_summary_stats.h" #include "mongo/db/query/restore_context.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" namespace mongo { @@ -404,9 +415,9 @@ class PlanStage { */ boost::optional getOptTimer() { if (_opCtx && _commonStats.executionTime) { - return scoped_timer_factory::make(_opCtx->getServiceContext(), - QueryExecTimerPrecision::kMillis, - _commonStats.executionTime.get_ptr()); + return boost::optional(boost::in_place_init, + _commonStats.executionTime.get_ptr(), + _opCtx->getServiceContext()->getFastClockSource()); } return boost::none; diff --git a/src/mongo/db/exec/plan_stats.h b/src/mongo/db/exec/plan_stats.h index 6917e6849937a..ab51d635d58e3 100644 --- a/src/mongo/db/exec/plan_stats.h +++ b/src/mongo/db/exec/plan_stats.h @@ -1170,7 +1170,7 @@ struct TimeseriesModifyStats final : public SpecificStats { } uint64_t estimateObjectSizeInBytes() const { - return sizeof(*this); + return objInserted.objsize() + sizeof(*this); } void acceptVisitor(PlanStatsConstVisitor* visitor) const final { @@ -1185,7 +1185,17 @@ struct TimeseriesModifyStats final : public SpecificStats { BSONObj bucketFilter; BSONObj residualFilter; size_t nBucketsUnpacked = 0u; - size_t nMeasurementsDeleted = 0u; + size_t nMeasurementsMatched = 0u; + size_t nMeasurementsModified = 0u; + + // Will be 1 if this is an {upsert: true} update that did an insert, 0 otherwise. + size_t nMeasurementsUpserted = 0u; + + // The object that was inserted. This is an empty document if no insert was performed. + BSONObj objInserted; + + // True iff this is a $mod update. + bool isModUpdate; }; struct SampleFromTimeseriesBucketStats final : public SpecificStats { @@ -1227,9 +1237,22 @@ struct SpoolStats : public SpecificStats { visitor->visit(this); } + // The maximum number of bytes of memory we're willing to use during execution of the spool. If + // this limit is exceeded and 'allowDiskUse' is false, the query will fail at execution time. If + // 'allowDiskUse' is true, the data will be spilled to disk. + uint64_t maxMemoryUsageBytes = 0u; + + // The maximum number of bytes of disk space we're willing to use during execution of the spool, + // if 'allowDiskUse' is true. + uint64_t maxDiskUsageBytes = 0u; + // The amount of data we've spooled in bytes. uint64_t totalDataSizeBytes = 0u; - // TODO SERVER-74437 add more stats for spilling metrics + // The number of times that we spilled data to disk during the execution of this query. + uint64_t spills = 0u; + + // The maximum size of the spill file written to disk, or 0 if no spilling occurred. + uint64_t spilledDataStorageSize = 0u; }; } // namespace mongo diff --git a/src/mongo/db/exec/projection.cpp b/src/mongo/db/exec/projection.cpp index 9cec5eac22b68..76fae8039b3da 100644 --- a/src/mongo/db/exec/projection.cpp +++ b/src/mongo/db/exec/projection.cpp @@ -29,18 +29,28 @@ #include "mongo/db/exec/projection.h" -#include +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/projection_executor_builder.h" -#include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/record_id.h" -#include "mongo/util/str.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/exec/projection.h b/src/mongo/db/exec/projection.h index 3c86983bb8557..0d9b7a90f49fe 100644 --- a/src/mongo/db/exec/projection.h +++ b/src/mongo/db/exec/projection.h @@ -29,12 +29,27 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/projection_executor.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/projection.h" #include "mongo/db/query/projection_ast.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" +#include "mongo/util/string_map.h" namespace mongo { /** diff --git a/src/mongo/db/exec/projection_executor.h b/src/mongo/db/exec/projection_executor.h index 0188c17f69548..b74e66c06d320 100644 --- a/src/mongo/db/exec/projection_executor.h +++ b/src/mongo/db/exec/projection_executor.h @@ -29,16 +29,32 @@ #pragma once -#include "mongo/platform/basic.h" - #include +#include +#include +#include +#include +#include #include +#include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/transformer_interface.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/projection_ast.h" #include "mongo/db/query/projection_policies.h" +#include "mongo/platform/basic.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::projection_executor { /** @@ -73,7 +89,7 @@ class ProjectionExecutor : public TransformerInterface { /** * Apply the projection transformation. */ - Document applyTransformation(const Document& input) override { + Document applyTransformation(const Document& input) const override { auto output = applyProjection(input); if (_rootReplacementExpression) { return _applyRootReplacementExpression(input, output); @@ -135,7 +151,7 @@ class ProjectionExecutor : public TransformerInterface { boost::intrusive_ptr _rootReplacementExpression; private: - Document _applyRootReplacementExpression(const Document& input, const Document& output) { + Document _applyRootReplacementExpression(const Document& input, const Document& output) const { using namespace fmt::literals; _expCtx->variables.setValue(_projectionPostImageVarId, Value{output}); diff --git a/src/mongo/db/exec/projection_executor_builder.cpp b/src/mongo/db/exec/projection_executor_builder.cpp index 6c18456966bc8..80a738c7d1c7b 100644 --- a/src/mongo/db/exec/projection_executor_builder.cpp +++ b/src/mongo/db/exec/projection_executor_builder.cpp @@ -27,17 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include -#include "mongo/db/exec/projection_executor_builder.h" +#include #include "mongo/base/exact_cast.h" +#include "mongo/base/string_data.h" #include "mongo/db/exec/exclusion_projection_executor.h" #include "mongo/db/exec/inclusion_projection_executor.h" +#include "mongo/db/exec/projection_executor_builder.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_find_internal.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/query/projection_ast.h" #include "mongo/db/query/projection_ast_path_tracking_visitor.h" +#include "mongo/db/query/projection_ast_visitor.h" #include "mongo/db/query/tree_walker.h" -#include "mongo/db/query/util/make_data_structure.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::projection_executor { namespace { diff --git a/src/mongo/db/exec/projection_executor_builder.h b/src/mongo/db/exec/projection_executor_builder.h index 476f2b63a25dc..01dde351ffb93 100644 --- a/src/mongo/db/exec/projection_executor_builder.h +++ b/src/mongo/db/exec/projection_executor_builder.h @@ -30,9 +30,14 @@ #pragma once #include +#include +#include #include "mongo/db/exec/projection_executor.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/projection.h" #include "mongo/db/query/projection_ast.h" +#include "mongo/db/query/projection_policies.h" namespace mongo::projection_executor { /** diff --git a/src/mongo/db/exec/projection_executor_builder_test.cpp b/src/mongo/db/exec/projection_executor_builder_test.cpp index 1182007846eec..45811e4d54a77 100644 --- a/src/mongo/db/exec/projection_executor_builder_test.cpp +++ b/src/mongo/db/exec/projection_executor_builder_test.cpp @@ -27,20 +27,39 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include + +#include +#include +#include +#include #include "mongo/base/exact_cast.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/exclusion_projection_executor.h" #include "mongo/db/exec/inclusion_projection_executor.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/transformer_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/db/query/projection_ast_util.h" #include "mongo/db/query/projection_parser.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/exec/projection_executor_redaction_test.cpp b/src/mongo/db/exec/projection_executor_redaction_test.cpp index b7ff9f1059c01..fef2b4cdb7ec9 100644 --- a/src/mongo/db/exec/projection_executor_redaction_test.cpp +++ b/src/mongo/db/exec/projection_executor_redaction_test.cpp @@ -27,17 +27,29 @@ * it in the license file. */ +#include +#include +#include + #include "document_value/document_value_test_util.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/query/projection_ast_util.h" #include "mongo/db/query/projection_parser.h" #include "mongo/db/query/projection_policies.h" #include "mongo/db/query/serialization_options.h" -#include "mongo/unittest/inline_auto_update.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -58,16 +70,16 @@ std::unique_ptr compileProjection(BSONO expCtx, &ast, policies, projection_executor::kDefaultBuilderParams); return exec; } -std::string redactFieldNameForTest(StringData s) { +std::string applyHmacForTest(StringData s) { return str::stream() << "HASH<" << s << ">"; } TEST(Redaction, ProjectionTest) { SerializationOptions options; - options.replacementForLiteralArgs = "?"; - options.redactIdentifiers = true; + options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + options.transformIdentifiers = true; - options.identifierRedactionPolicy = redactFieldNameForTest; + options.transformIdentifiersCallback = applyHmacForTest; auto redactProj = [&](std::string obj) { return compileProjection(fromjson(obj))->serializeTransformation(boost::none, options); }; @@ -169,7 +181,7 @@ TEST(Redaction, ProjectionTest) { /// Add fields projection actual = redactProj("{a: \"hi\"}"); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({"HASH<_id>":true,"HASH":{"$const":"?"}})", + R"({"HASH<_id>":true,"HASH":"?string"})", actual); actual = redactProj("{a: '$field'}"); @@ -180,55 +192,38 @@ TEST(Redaction, ProjectionTest) { // Dotted path actual = redactProj("{\"a.b\": \"hi\"}"); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({"HASH<_id>":true,"HASH":{"HASH":{"$const":"?"}}})", + R"({"HASH<_id>":true,"HASH":{"HASH":"?string"}})", actual); // Two fields actual = redactProj("{a: \"hi\", b: \"hello\"}"); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({"HASH<_id>":true,"HASH":{"$const":"?"},"HASH":{"$const":"?"}})", + R"({"HASH<_id>":true,"HASH":"?string","HASH":"?string"})", actual); // Explicit _id: 0 actual = redactProj("{b: \"hi\", _id: \"hey\"}"); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({"HASH":{"$const":"?"},"HASH<_id>":{"$const":"?"}})", + R"({"HASH":"?string","HASH<_id>":"?string"})", actual); // Two nested fields actual = redactProj("{\"b.d\": \"hello\", \"b.c\": \"world\"}"); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({ - "HASH<_id>": true, - "HASH": { - "HASH": { - "$const": "?" - }, - "HASH": { - "$const": "?" - } - } - })", + R"({"HASH<_id>":true,"HASH":{"HASH":"?string","HASH":"?string"}})", actual); actual = redactProj("{\"b.d\": \"hello\", a: \"world\", \"b.c\": \"mongodb\"}"); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT R"({ - "HASH<_id>": true, - "HASH": { - "HASH": { - "$const": "?" - }, - "HASH": { - "$const": "?" - } - }, - "HASH": { - "$const": "?" - } - })", + "HASH<_id>": true, + "HASH": { + "HASH": "?string", + "HASH": "?string" + }, + "HASH": "?string" + })", actual); } - } // namespace } // namespace mongo diff --git a/src/mongo/db/exec/projection_executor_test.cpp b/src/mongo/db/exec/projection_executor_test.cpp index 7d308bd6d2eef..935363f01e3f3 100644 --- a/src/mongo/db/exec/projection_executor_test.cpp +++ b/src/mongo/db/exec/projection_executor_test.cpp @@ -27,23 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/projection_executor.h" - +#include #include +#include + #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/inclusion_projection_executor.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/projection_parser.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::projection_executor { namespace { diff --git a/src/mongo/db/exec/projection_executor_utils.cpp b/src/mongo/db/exec/projection_executor_utils.cpp index 180d37e0fecf9..806de65fbced0 100644 --- a/src/mongo/db/exec/projection_executor_utils.cpp +++ b/src/mongo/db/exec/projection_executor_utils.cpp @@ -27,7 +27,25 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/projection_executor.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::projection_executor_utils { bool applyProjectionToOneField(projection_executor::ProjectionExecutor* executor, @@ -128,7 +146,7 @@ Value applyFindSliceProjectionToArray(const std::vector& array, : elem); } - return Value{output}; + return Value{std::move(output)}; } /** diff --git a/src/mongo/db/exec/projection_executor_utils.h b/src/mongo/db/exec/projection_executor_utils.h index e36dff444f848..e35ddab7fcc2d 100644 --- a/src/mongo/db/exec/projection_executor_utils.h +++ b/src/mongo/db/exec/projection_executor_utils.h @@ -29,10 +29,17 @@ #pragma once +#include + +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/pipeline/field_path.h" +#include "mongo/stdx/unordered_set.h" namespace mongo::projection_executor_utils { /** diff --git a/src/mongo/db/exec/projection_executor_utils_test.cpp b/src/mongo/db/exec/projection_executor_utils_test.cpp index d3b977f3c2c44..44fe96194b554 100644 --- a/src/mongo/db/exec/projection_executor_utils_test.cpp +++ b/src/mongo/db/exec/projection_executor_utils_test.cpp @@ -27,14 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/projection_executor_utils.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::projection_executor_utils { namespace positional_projection_tests { diff --git a/src/mongo/db/exec/projection_executor_wildcard_access_test.cpp b/src/mongo/db/exec/projection_executor_wildcard_access_test.cpp index fc1e74a0c2eea..87c1c2787e343 100644 --- a/src/mongo/db/exec/projection_executor_wildcard_access_test.cpp +++ b/src/mongo/db/exec/projection_executor_wildcard_access_test.cpp @@ -27,18 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" -#include "mongo/db/exec/projection_executor_utils.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/transformer_interface.h" #include "mongo/db/query/projection_parser.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/projection_policies.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::projection_executor { namespace { diff --git a/src/mongo/db/exec/projection_node.cpp b/src/mongo/db/exec/projection_node.cpp index 0b98219ab5da5..0f419773f6b87 100644 --- a/src/mongo/db/exec/projection_node.cpp +++ b/src/mongo/db/exec/projection_node.cpp @@ -27,9 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/projection_node.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo::projection_executor { using ArrayRecursionPolicy = ProjectionPolicies::ArrayRecursionPolicy; diff --git a/src/mongo/db/exec/projection_node.h b/src/mongo/db/exec/projection_node.h index 14ceb554e7a75..abc826b7a4356 100644 --- a/src/mongo/db/exec/projection_node.h +++ b/src/mongo/db/exec/projection_node.h @@ -29,11 +29,31 @@ #pragma once +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/projection_executor.h" - +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/projection_policies.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/string_map.h" namespace mongo::projection_executor { /** diff --git a/src/mongo/db/exec/queued_data_stage.cpp b/src/mongo/db/exec/queued_data_stage.cpp index 8ecb541d627e5..d2f183263dbce 100644 --- a/src/mongo/db/exec/queued_data_stage.cpp +++ b/src/mongo/db/exec/queued_data_stage.cpp @@ -30,9 +30,7 @@ #include "mongo/db/exec/queued_data_stage.h" #include - -#include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/exec/working_set_common.h" +#include namespace mongo { diff --git a/src/mongo/db/exec/queued_data_stage.h b/src/mongo/db/exec/queued_data_stage.h index bc13988cef0ad..738c7ceb78027 100644 --- a/src/mongo/db/exec/queued_data_stage.h +++ b/src/mongo/db/exec/queued_data_stage.h @@ -29,10 +29,14 @@ #pragma once +#include #include #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/working_set.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" namespace mongo { diff --git a/src/mongo/db/exec/queued_data_stage_test.cpp b/src/mongo/db/exec/queued_data_stage_test.cpp index d0e65b6d7428a..81de11f787f39 100644 --- a/src/mongo/db/exec/queued_data_stage_test.cpp +++ b/src/mongo/db/exec/queued_data_stage_test.cpp @@ -33,15 +33,21 @@ #include "mongo/db/exec/queued_data_stage.h" -#include +#include #include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/tree_walker.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" using namespace mongo; diff --git a/src/mongo/db/exec/record_store_fast_count.cpp b/src/mongo/db/exec/record_store_fast_count.cpp index f810b55dc7944..fe18f230da476 100644 --- a/src/mongo/db/exec/record_store_fast_count.cpp +++ b/src/mongo/db/exec/record_store_fast_count.cpp @@ -27,16 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/db/catalog/collection.h" #include "mongo/db/exec/record_store_fast_count.h" +#include "mongo/util/assert_util_core.h" namespace mongo { const char* RecordStoreFastCountStage::kStageType = "RECORD_STORE_FAST_COUNT"; RecordStoreFastCountStage::RecordStoreFastCountStage(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, long long skip, long long limit) : RequiresCollectionStage(kStageType, expCtx, collection), _skip(skip), _limit(limit) { @@ -54,7 +56,7 @@ PlanStage::StageState RecordStoreFastCountStage::doWork(WorkingSetID* out) { // This stage never returns a working set member. *out = WorkingSet::INVALID_ID; - long long nCounted = collection()->numRecords(opCtx()); + long long nCounted = collectionPtr()->numRecords(opCtx()); if (_skip) { nCounted -= _skip; diff --git a/src/mongo/db/exec/record_store_fast_count.h b/src/mongo/db/exec/record_store_fast_count.h index 883e6e843d2c7..5cdec740c67a7 100644 --- a/src/mongo/db/exec/record_store_fast_count.h +++ b/src/mongo/db/exec/record_store_fast_count.h @@ -29,7 +29,15 @@ #pragma once +#include + +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_collection_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/stage_types.h" namespace mongo { @@ -43,7 +51,7 @@ class RecordStoreFastCountStage final : public RequiresCollectionStage { static const char* kStageType; RecordStoreFastCountStage(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, long long skip, long long limit); diff --git a/src/mongo/db/exec/requires_all_indices_stage.h b/src/mongo/db/exec/requires_all_indices_stage.h index 1c223c355869f..b164f7c18e0bd 100644 --- a/src/mongo/db/exec/requires_all_indices_stage.h +++ b/src/mongo/db/exec/requires_all_indices_stage.h @@ -44,9 +44,14 @@ class RequiresAllIndicesStage : public RequiresCollectionStage { public: RequiresAllIndicesStage(const char* stageType, ExpressionContext* expCtx, - const CollectionPtr& coll) - : RequiresCollectionStage(stageType, expCtx, coll), - _allIndicesRequiredChecker(MultipleCollectionAccessor(coll)) {} + VariantCollectionPtrOrAcquisition collectionVariant) + : RequiresCollectionStage(stageType, expCtx, collectionVariant) { + const auto& coll = collection(); + auto multipleCollection = coll.isAcquisition() + ? MultipleCollectionAccessor{coll.getAcquisition()} + : MultipleCollectionAccessor{coll.getCollectionPtr()}; + _allIndicesRequiredChecker.emplace(std::move(multipleCollection)); + } virtual ~RequiresAllIndicesStage() = default; @@ -55,7 +60,12 @@ class RequiresAllIndicesStage : public RequiresCollectionStage { void doRestoreStateRequiresCollection() override final { if (_allIndicesRequiredChecker) { - _allIndicesRequiredChecker->check(opCtx(), MultipleCollectionAccessor(collection())); + const auto& coll = collection(); + auto multipleCollection = coll.isAcquisition() + ? MultipleCollectionAccessor{coll.getAcquisition()} + : MultipleCollectionAccessor{coll.getCollectionPtr()}; + + _allIndicesRequiredChecker->check(opCtx(), std::move(multipleCollection)); } } diff --git a/src/mongo/db/exec/requires_collection_stage.cpp b/src/mongo/db/exec/requires_collection_stage.cpp index c4b5a761df524..07276417be68f 100644 --- a/src/mongo/db/exec/requires_collection_stage.cpp +++ b/src/mongo/db/exec/requires_collection_stage.cpp @@ -27,11 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/db/exec/requires_collection_stage.h" - #include "mongo/db/query/plan_yield_policy.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -41,15 +43,21 @@ void RequiresCollectionStage::doSaveState() { void RequiresCollectionStage::doRestoreState(const RestoreContext& context) { if (context.type() == RestoreContext::RestoreType::kExternal) { - // RequiresCollectionStage requires a collection to be provided in restore. However, it may - // be null in case the collection got dropped or renamed. - auto collPtr = context.collection(); - invariant(collPtr); - _collection = collPtr; + // Restore the CollectionPtr only if we're still using the legacy approach. If we're using + // CollectionAcquisition it means the restoration is performed outside of this method + // and the pointers are still valid since it will survive across external yields. + if (_collection.isCollectionPtr()) { + // RequiresCollectionStage requires a collection to be provided in restore. However, it + // may be null in case the collection got dropped or renamed. + auto collPtr = context.collection(); + invariant(collPtr); + _collection = VariantCollectionPtrOrAcquisition{collPtr}; + } // If we restore externally and get a null Collection we need to figure out if this was a // drop or rename. The external lookup could have been done for UUID or namespace. - const auto& coll = *collPtr; + const auto& coll = _collection.getCollectionPtr(); + _collectionPtr = &coll; // If collection exists uuid does not match assume lookup was over namespace and treat this // as a drop. @@ -68,7 +76,7 @@ void RequiresCollectionStage::doRestoreState(const RestoreContext& context) { } } - const auto& coll = *_collection; + const auto& coll = *_collectionPtr; if (!coll) { PlanYieldPolicy::throwCollectionDroppedError(_collectionUUID); diff --git a/src/mongo/db/exec/requires_collection_stage.h b/src/mongo/db/exec/requires_collection_stage.h index 8265323458ba1..74b1eb25b3fa3 100644 --- a/src/mongo/db/exec/requires_collection_stage.h +++ b/src/mongo/db/exec/requires_collection_stage.h @@ -29,9 +29,17 @@ #pragma once +#include +#include + #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/restore_context.h" +#include "mongo/db/shard_role.h" #include "mongo/util/uuid.h" namespace mongo { @@ -49,12 +57,13 @@ class RequiresCollectionStage : public PlanStage { public: RequiresCollectionStage(const char* stageType, ExpressionContext* expCtx, - const CollectionPtr& coll) + VariantCollectionPtrOrAcquisition coll) : PlanStage(stageType, expCtx), - _collection(&coll), - _collectionUUID(coll->uuid()), + _collection(coll), + _collectionPtr(&coll.getCollectionPtr()), + _collectionUUID(coll.getCollectionPtr()->uuid()), _catalogEpoch(getCatalogEpoch()), - _nss(coll->ns()) {} + _nss(coll.getCollectionPtr()->ns()) {} virtual ~RequiresCollectionStage() = default; @@ -73,8 +82,12 @@ class RequiresCollectionStage : public PlanStage { */ virtual void doRestoreStateRequiresCollection() = 0; - const CollectionPtr& collection() const { - return *_collection; + const VariantCollectionPtrOrAcquisition& collection() const { + return _collection; + } + + const CollectionPtr& collectionPtr() const { + return *_collectionPtr; } UUID uuid() const { @@ -91,7 +104,8 @@ class RequiresCollectionStage : public PlanStage { // helper. It needs to stay valid until the PlanExecutor saves its state. To avoid this pointer // from dangling it needs to be reset when doRestoreState() is called and it is reset to a // different CollectionPtr. - const CollectionPtr* _collection; + VariantCollectionPtrOrAcquisition _collection; + const CollectionPtr* _collectionPtr; const UUID _collectionUUID; const uint64_t _catalogEpoch; @@ -101,6 +115,19 @@ class RequiresCollectionStage : public PlanStage { }; // Type alias for use by PlanStages that write to a Collection. -using RequiresMutableCollectionStage = RequiresCollectionStage; +class RequiresWritableCollectionStage : public RequiresCollectionStage { +public: + RequiresWritableCollectionStage(const char* stageType, + ExpressionContext* expCtx, + CollectionAcquisition coll) + : RequiresCollectionStage(stageType, expCtx, coll), _collectionAcquisition(coll) {} + + const CollectionAcquisition& collectionAcquisition() const { + return _collectionAcquisition; + } + +private: + const CollectionAcquisition _collectionAcquisition; +}; } // namespace mongo diff --git a/src/mongo/db/exec/requires_index_stage.cpp b/src/mongo/db/exec/requires_index_stage.cpp index 512ee0b14bcaf..6481dafc38216 100644 --- a/src/mongo/db/exec/requires_index_stage.cpp +++ b/src/mongo/db/exec/requires_index_stage.cpp @@ -27,15 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/exec/requires_index_stage.h" +#include "mongo/base/error_codes.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { RequiresIndexStage::RequiresIndexStage(const char* stageType, ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const IndexDescriptor* indexDescriptor, WorkingSet* workingSet) : RequiresCollectionStage(stageType, expCtx, collection), @@ -52,7 +55,7 @@ void RequiresIndexStage::doSaveStateRequiresCollection() { } void RequiresIndexStage::doRestoreStateRequiresCollection() { - auto desc = collection()->getIndexCatalog()->findIndexByIdent(expCtx()->opCtx, _indexIdent); + auto desc = collectionPtr()->getIndexCatalog()->findIndexByIdent(expCtx()->opCtx, _indexIdent); uassert(ErrorCodes::QueryPlanKilled, str::stream() << "query plan killed :: index '" << _indexName << "' dropped", desc && !desc->getEntry()->isDropped()); diff --git a/src/mongo/db/exec/requires_index_stage.h b/src/mongo/db/exec/requires_index_stage.h index 5491f96935221..a5aa8e7cf342a 100644 --- a/src/mongo/db/exec/requires_index_stage.h +++ b/src/mongo/db/exec/requires_index_stage.h @@ -29,10 +29,15 @@ #pragma once +#include + +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/exec/requires_collection_stage.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/plan_executor.h" namespace mongo { @@ -50,7 +55,7 @@ class RequiresIndexStage : public RequiresCollectionStage { public: RequiresIndexStage(const char* stageType, ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const IndexDescriptor* indexDescriptor, WorkingSet* workingSet); diff --git a/src/mongo/db/exec/return_key.cpp b/src/mongo/db/exec/return_key.cpp index a620fb8e5a593..400e0797bb98e 100644 --- a/src/mongo/db/exec/return_key.cpp +++ b/src/mongo/db/exec/return_key.cpp @@ -28,14 +28,17 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/return_key.h" +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/document_value/document.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/pipeline/field_path.h" -#include "mongo/logv2/log.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/return_key.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/exec/return_key.h b/src/mongo/db/exec/return_key.h index 2e6db5482c071..d4925cc53dfc8 100644 --- a/src/mongo/db/exec/return_key.h +++ b/src/mongo/db/exec/return_key.h @@ -29,7 +29,17 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/query/stage_types.h" namespace mongo { /** diff --git a/src/mongo/db/exec/sample_from_timeseries_bucket.cpp b/src/mongo/db/exec/sample_from_timeseries_bucket.cpp index a308c61bc7fcb..6ce81004ba810 100644 --- a/src/mongo/db/exec/sample_from_timeseries_bucket.cpp +++ b/src/mongo/db/exec/sample_from_timeseries_bucket.cpp @@ -29,10 +29,30 @@ #include "mongo/db/exec/sample_from_timeseries_bucket.h" + +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/client.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/db/timeseries/timeseries_constants.h" - #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/random.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/exec/sample_from_timeseries_bucket.h b/src/mongo/db/exec/sample_from_timeseries_bucket.h index 03b1c685a7fea..ff01d4c3f007b 100644 --- a/src/mongo/db/exec/sample_from_timeseries_bucket.h +++ b/src/mongo/db/exec/sample_from_timeseries_bucket.h @@ -29,9 +29,24 @@ #pragma once -#include "mongo/db/exec/bucket_unpacker.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/data_view.h" +#include "mongo/bson/oid.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/exec/timeseries/bucket_unpacker.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { /** diff --git a/src/mongo/db/exec/sbe/SConscript b/src/mongo/db/exec/sbe/SConscript index 1dd2e1fcf8687..ef566ce35d098 100644 --- a/src/mongo/db/exec/sbe/SConscript +++ b/src/mongo/db/exec/sbe/SConscript @@ -66,6 +66,7 @@ sbeEnv.Library( ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/bson/dotted_path_support', + '$BUILD_DIR/mongo/db/query/str_trim_utils', '$BUILD_DIR/mongo/db/sorter/sorter_idl', '$BUILD_DIR/mongo/db/sorter/sorter_stats', ], @@ -165,7 +166,6 @@ env.Library( LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/auth/authmocks', '$BUILD_DIR/mongo/db/service_context_d_test_fixture', - '$BUILD_DIR/mongo/db/service_context_test_fixture', ], ) @@ -183,6 +183,7 @@ env.CppUnitTest( 'expressions/sbe_date_to_parts_test.cpp', 'expressions/sbe_date_to_string_test.cpp', 'expressions/sbe_date_trunc_test.cpp', + 'expressions/sbe_exp_moving_avg_test.cpp', 'expressions/sbe_extract_sub_array_builtin_test.cpp', 'expressions/sbe_fail_test.cpp', 'expressions/sbe_get_element_builtin_test.cpp', @@ -199,6 +200,7 @@ env.CppUnitTest( 'expressions/sbe_object_array_conversion_test.cpp', 'expressions/sbe_prim_binary_test.cpp', 'expressions/sbe_prim_unary_test.cpp', + 'expressions/sbe_rank_test.cpp', 'expressions/sbe_regex_test.cpp', 'expressions/sbe_replace_one_expression_test.cpp', 'expressions/sbe_reverse_array_builtin_test.cpp', @@ -248,7 +250,6 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/index/column_store_index', '$BUILD_DIR/mongo/db/query/collation/collator_interface_mock', '$BUILD_DIR/mongo/db/service_context_d_test_fixture', - '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/mongo/util/pcre_wrapper', 'sbe_plan_stage_test', ], @@ -260,14 +261,17 @@ env.Library( 'abt/sbe_abt_test_util.cpp', ], LIBDEPS=[ - "$BUILD_DIR/mongo/db/auth/authmocks", - "$BUILD_DIR/mongo/db/pipeline/abt_translation", - "$BUILD_DIR/mongo/db/query/optimizer/unit_test_utils", + '$BUILD_DIR/mongo/db/pipeline/abt_translation', + '$BUILD_DIR/mongo/db/query/optimizer/unit_test_utils', '$BUILD_DIR/mongo/db/query/query_test_service_context', '$BUILD_DIR/mongo/db/query_exec', '$BUILD_DIR/mongo/db/service_context_test_fixture', 'query_sbe_abt', ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/auth/authmocks', + '$BUILD_DIR/mongo/db/service_context_non_d', + ], ) env.CppUnitTest( @@ -277,7 +281,6 @@ env.CppUnitTest( 'abt/sbe_abt_test.cpp', ], LIBDEPS=[ - '$BUILD_DIR/mongo/unittest/unittest', 'sbe_abt_test_util', ], ) @@ -307,3 +310,15 @@ env.Benchmark( 'query_sbe_abt', ], ) + +env.Benchmark( + target='sbe_vm_bm', + source=[ + 'vm/vm_bm.cpp', + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/db/query/collation/collator_icu', + '$BUILD_DIR/mongo/unittest/unittest', + 'query_sbe', + ], +) diff --git a/src/mongo/db/exec/sbe/abt/abt_lower.cpp b/src/mongo/db/exec/sbe/abt/abt_lower.cpp index d8304be017f78..9600cfe72363f 100644 --- a/src/mongo/db/exec/sbe/abt/abt_lower.cpp +++ b/src/mongo/db/exec/sbe/abt/abt_lower.cpp @@ -28,6 +28,29 @@ */ #include "mongo/db/exec/sbe/abt/abt_lower.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/abt/named_slots.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/stages/co_scan.h" @@ -47,7 +70,19 @@ #include "mongo/db/exec/sbe/stages/union.h" #include "mongo/db/exec/sbe/stages/unique.h" #include "mongo/db/exec/sbe/stages/unwind.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo::optimizer { @@ -326,11 +361,16 @@ sbe::value::SlotVector SBENodeLowering::convertRequiredProjectionsToSlots( } sbe::value::SlotVector result; + // Need to dedup here, because even if 'projections' is unique, 'slotMap' can map two + // projections to the same slot. 'convertProjectionsToSlots' can't dedup because it preserves + // the order of items in the vector. + sbe::value::SlotSet seen; const auto& projections = getPropertyConst(props._physicalProps).getProjections(); for (const auto slot : convertProjectionsToSlots(slotMap, projections.getVector())) { - if (toExcludeSet.count(slot) == 0) { + if (toExcludeSet.count(slot) == 0 && seen.count(slot) == 0) { result.push_back(slot); + seen.insert(slot); } } return result; @@ -754,8 +794,11 @@ std::unique_ptr SBENodeLowering::walk(const NestedLoopJoinNode& for (const ProjectionName& projectionName : n.getCorrelatedProjectionNames()) { correlatedSlots.push_back(slotMap.at(projectionName)); } - // Soring is not essential. Here we sort only for SBE plan stability. + // Sorting is not essential. Here we sort only for SBE plan stability. std::sort(correlatedSlots.begin(), correlatedSlots.end()); + // However, we should deduplicate the slots, in case two projections mapped to the same slot. + correlatedSlots.erase(std::unique(correlatedSlots.begin(), correlatedSlots.end()), + correlatedSlots.end()); auto expr = lowerExpression(filter, slotMap); @@ -1049,11 +1092,11 @@ std::unique_ptr SBENodeLowering::walk(const PhysicalScanNode& n, NamespaceStringOrUUID nss = parseFromScanDef(def); // Unused. - boost::optional seekKeySlot; + boost::optional seekRecordIdSlot; sbe::ScanCallbacks callbacks({}, {}, {}); if (n.useParallelScan()) { - return sbe::makeS(nss.uuid().value(), + return sbe::makeS(nss.uuid(), rootSlot, scanRidSlot, boost::none, @@ -1077,23 +1120,26 @@ std::unique_ptr SBENodeLowering::walk(const PhysicalScanNode& n, } MONGO_UNREACHABLE; }(); - return sbe::makeS(nss.uuid().value(), - rootSlot, - scanRidSlot, - boost::none, - boost::none, - boost::none, - boost::none, - boost::none, - fields, - vars, - seekKeySlot, - forwardScan, - nullptr /*yieldPolicy*/, - planNodeId, - callbacks, - false, /* lowPriority */ - _scanOrder == ScanOrder::Random); + return sbe::makeS( + nss.uuid(), + rootSlot, + scanRidSlot, + boost::none, + boost::none, + boost::none, + boost::none, + boost::none, + fields, + vars, + seekRecordIdSlot, + boost::none /* minRecordIdSlot */, + boost::none /* maxRecordIdSlot */, + forwardScan, + nullptr /*yieldPolicy*/, + planNodeId, + callbacks, + gDeprioritizeUnboundedUserCollectionScans.load(), /* lowPriority */ + _scanOrder == ScanOrder::Random); } else { tasserted(6624355, "Unknown scan type."); } @@ -1127,17 +1173,17 @@ std::unique_ptr SBENodeLowering::convertBoundsToExpr( ksFnArgs.emplace_back(exprLower.optimize(expr)); } - KeyString::Discriminator discriminator; + key_string::Discriminator discriminator; // For a reverse scan, we start from the high bound and iterate until the low bound. if (isLower != reversed) { // For the start point, we want to seek ExclusiveBefore iff the bound is inclusive, // so that values equal to the seek value are included. - discriminator = bound.isInclusive() ? KeyString::Discriminator::kExclusiveBefore - : KeyString::Discriminator::kExclusiveAfter; + discriminator = bound.isInclusive() ? key_string::Discriminator::kExclusiveBefore + : key_string::Discriminator::kExclusiveAfter; } else { // For the end point we want the opposite. - discriminator = bound.isInclusive() ? KeyString::Discriminator::kExclusiveAfter - : KeyString::Discriminator::kExclusiveBefore; + discriminator = bound.isInclusive() ? key_string::Discriminator::kExclusiveAfter + : key_string::Discriminator::kExclusiveBefore; } ksFnArgs.emplace_back(sbe::makeE( @@ -1204,12 +1250,13 @@ std::unique_ptr SBENodeLowering::walk(const IndexScanNode& n, // Unused. boost::optional resultSlot; - return sbe::makeS(nss.uuid().value(), + return sbe::makeS(nss.uuid(), indexDefName, !reverse, resultSlot, scanRidSlot, boost::none, + boost::none, indexKeysToInclude, vars, std::move(lowerBoundExpr), @@ -1236,11 +1283,11 @@ std::unique_ptr SBENodeLowering::walk(const SeekNode& n, sbe::value::SlotVector vars; generateSlots(slotMap, n.getFieldProjectionMap(), seekRidSlot, rootSlot, fields, vars); - boost::optional seekKeySlot = slotMap.at(n.getRIDProjectionName()); + boost::optional seekRecordIdSlot = slotMap.at(n.getRIDProjectionName()); sbe::ScanCallbacks callbacks({}, {}, {}); const PlanNodeId planNodeId = _nodeToGroupPropsMap.at(&n)._planNodeId; - return sbe::makeS(nss.uuid().value(), + return sbe::makeS(nss.uuid(), rootSlot, seekRidSlot, boost::none, @@ -1250,7 +1297,9 @@ std::unique_ptr SBENodeLowering::walk(const SeekNode& n, boost::none, fields, vars, - seekKeySlot, + seekRecordIdSlot, + boost::none /* minRecordIdSlot */, + boost::none /* maxRecordIdSlot */, true /*forward*/, nullptr /*yieldPolicy*/, planNodeId, diff --git a/src/mongo/db/exec/sbe/abt/abt_lower.h b/src/mongo/db/exec/sbe/abt/abt_lower.h index 164076df6b004..95843693c4481 100644 --- a/src/mongo/db/exec/sbe/abt/abt_lower.h +++ b/src/mongo/db/exec/sbe/abt/abt_lower.h @@ -29,13 +29,28 @@ #pragma once +#include +#include +#include +#include + #include "mongo/db/exec/sbe/abt/abt_lower_defs.h" +#include "mongo/db/exec/sbe/abt/named_slots.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/node_defs.h" #include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { @@ -260,6 +275,12 @@ class SBENodeLowering { /** * Convert a vector of ProjectionNames to slot IDs from the projections that have already been * bound to slots. + * + * Preserves the order, and therefore preserves duplicates and the result .size() == the input + * .size(). + * + * Even when 'projectionNames' is free of duplicates, the output may have duplicates because two + * projections can map to the same slot. */ sbe::value::SlotVector convertProjectionsToSlots(const SlotVarMap& slotMap, const ProjectionNameVector& projectionNames); @@ -269,6 +290,10 @@ class SBENodeLowering { * the RequiredProjections node property. This function pulls out those projection names and * looks up the relevant slot IDs they are bound to. The optional toExclude vector can prevent * some slots from being added to the output vector. + * + * The output is free of duplicates. + * + * Does not guarantee any output order. */ sbe::value::SlotVector convertRequiredProjectionsToSlots( const SlotVarMap& slotMap, diff --git a/src/mongo/db/exec/sbe/abt/abt_lower_bm.cpp b/src/mongo/db/exec/sbe/abt/abt_lower_bm.cpp index 20e17f83a326d..d44c934e78b3a 100644 --- a/src/mongo/db/exec/sbe/abt/abt_lower_bm.cpp +++ b/src/mongo/db/exec/sbe/abt/abt_lower_bm.cpp @@ -28,20 +28,39 @@ */ #include +#include #include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/abt/abt_lower.h" +#include "mongo/db/exec/sbe/abt/abt_lower_defs.h" #include "mongo/db/exec/sbe/abt/named_slots_mock.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/index_bounds.h" #include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/metadata_factory.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/node_defs.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/reference_tracker.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" #include "mongo/db/query/optimizer/rewrites/path_lower.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" +#include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo::optimizer { namespace { @@ -135,7 +154,7 @@ class ABTNodeLoweringFixture : public benchmark::Fixture { // Create bindings (as above) and also create a scan node source. ABT createBindings(std::vector> bindingList) { - return createBindings(bindingList, _node(scanNode("scan0")), "scan0"); + return createBindings(std::move(bindingList), _node(scanNode("scan0")), "scan0"); } }; diff --git a/src/mongo/db/exec/sbe/abt/abt_lower_test.cpp b/src/mongo/db/exec/sbe/abt/abt_lower_test.cpp index 4a9f299f6cb8c..93e85f1a6efbc 100644 --- a/src/mongo/db/exec/sbe/abt/abt_lower_test.cpp +++ b/src/mongo/db/exec/sbe/abt/abt_lower_test.cpp @@ -27,24 +27,48 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + #include "mongo/base/string_data.h" #include "mongo/bson/timestamp.h" #include "mongo/db/exec/sbe/abt/abt_lower.h" #include "mongo/db/exec/sbe/abt/named_slots_mock.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/containers.h" #include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/explain.h" #include "mongo/db/query/optimizer/metadata.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/node_defs.h" #include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" #include "mongo/db/query/optimizer/rewrites/path_lower.h" #include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/golden_test.h" -#include "mongo/unittest/unittest.h" -#include +#include "mongo/unittest/golden_test_base.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo::optimizer { namespace { @@ -143,7 +167,7 @@ class ABTPlanGeneration : public unittest::Test { DistributionAndPaths dnp(DistributionType::Centralized); bool exists = true; CEType ce{false}; - return ScanDefinition(opts, indexDefs, trie, dnp, exists, ce); + return ScanDefinition(opts, indexDefs, trie, dnp, exists, ce, ShardingMetadata{}); } // Does not add the node to the Node map, must be called inside '_node()'. diff --git a/src/mongo/db/exec/sbe/abt/sbe_abt_diff_test.cpp b/src/mongo/db/exec/sbe/abt/sbe_abt_diff_test.cpp index d71b3d040d680..ffb2709bad9fb 100644 --- a/src/mongo/db/exec/sbe/abt/sbe_abt_diff_test.cpp +++ b/src/mongo/db/exec/sbe/abt/sbe_abt_diff_test.cpp @@ -27,9 +27,21 @@ * it in the license file. */ -#include "mongo/db/concurrency/lock_state.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/exec/sbe/abt/sbe_abt_test_util.h" -#include "mongo/unittest/temp_dir.h" +#include "mongo/db/service_context.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer { namespace { @@ -77,30 +89,39 @@ static bool compareResults(const std::vector& expected, using TestContextFn = std::function; +static std::vector fromjson(const std::vector& jsonVector) { + std::vector bsonObjs; + bsonObjs.reserve(jsonVector.size()); + for (const std::string& jsonStr : jsonVector) { + bsonObjs.push_back(mongo::fromjson(jsonStr)); + } + return bsonObjs; +} + static bool compareSBEABTAgainstExpected(const TestContextFn& fn, const std::string& pipelineStr, - const std::vector& jsonVector, + const std::vector& inputObjs, const std::vector& expected) { - const auto& actual = runSBEAST(fn().get(), pipelineStr, jsonVector); + const auto& actual = runSBEAST(fn().get(), pipelineStr, inputObjs); return compareResults(expected, actual, true /*preserveFieldOrder*/); } static bool comparePipelineAgainstExpected(const TestContextFn& fn, const std::string& pipelineStr, - const std::vector& jsonVector, + const std::vector& inputObjs, const std::vector& expected) { - const auto& actual = runPipeline(fn().get(), pipelineStr, jsonVector); + const auto& actual = runPipeline(fn().get(), pipelineStr, inputObjs); return compareResults(expected, actual, true /*preserveFieldOrder*/); } static bool compareSBEABTAgainstPipeline(const TestContextFn& fn, const std::string& pipelineStr, - const std::vector& jsonVector, + const std::vector& inputObjs, const bool preserveFieldOrder = true) { - const auto& pipelineResults = runPipeline(fn().get(), pipelineStr, jsonVector); - const auto& sbeResults = runSBEAST(fn().get(), pipelineStr, jsonVector); + const auto& pipelineResults = runPipeline(fn().get(), pipelineStr, inputObjs); + const auto& sbeResults = runSBEAST(fn().get(), pipelineStr, inputObjs); - std::cout << "Pipeline: " << pipelineStr << ", input size: " << jsonVector.size() << "\n"; + std::cout << "Pipeline: " << pipelineStr << ", input size: " << inputObjs.size() << "\n"; const bool result = compareResults(pipelineResults, sbeResults, preserveFieldOrder); if (result) { std::cout << "Success. Result count: " << pipelineResults.size() << "\n"; @@ -115,38 +136,6 @@ static bool compareSBEABTAgainstPipeline(const TestContextFn& fn, return result; } -static std::vector toResultSet(const std::vector& jsonVector) { - std::vector results; - for (const std::string& jsonStr : jsonVector) { - results.emplace_back(fromjson(jsonStr)); - } - return results; -} - -class TestObserver : public ServiceContext::ClientObserver { -public: - TestObserver() = default; - ~TestObserver() = default; - - void onCreateClient(Client* client) final {} - - void onDestroyClient(Client* client) final {} - - void onCreateOperationContext(OperationContext* opCtx) override { - opCtx->setLockState(std::make_unique(opCtx->getServiceContext())); - } - - void onDestroyOperationContext(OperationContext* opCtx) final {} -}; - -const ServiceContext::ConstructorActionRegisterer clientObserverRegisterer{ - "TestObserver", - [](ServiceContext* service) { - service->registerClientObserver(std::make_unique()); - }, - [](ServiceContext* serviceContext) { - }}; - TEST_F(NodeSBE, DiffTestBasic) { const auto contextFn = [this]() { return makeOperationContext(); @@ -154,22 +143,22 @@ TEST_F(NodeSBE, DiffTestBasic) { const auto compare = [&contextFn](const std::string& pipelineStr, const std::vector& jsonVector) { return compareSBEABTAgainstPipeline( - contextFn, pipelineStr, jsonVector, true /*preserveFieldOrder*/); + contextFn, pipelineStr, fromjson(jsonVector), true /*preserveFieldOrder*/); }; ASSERT_TRUE(compareSBEABTAgainstExpected( - contextFn, "[]", {"{a:1, b:2, c:3}"}, toResultSet({"{ a: 1, b: 2, c: 3 }"}))); + contextFn, "[]", fromjson({"{a:1, b:2, c:3}"}), fromjson({"{ a: 1, b: 2, c: 3 }"}))); ASSERT_TRUE(compareSBEABTAgainstExpected(contextFn, "[{$addFields: {c: {$literal: 3}}}]", - {"{a:1, b:2}"}, - toResultSet({"{ a: 1, b: 2, c: 3 }"}))); + fromjson({"{a:1, b:2}"}), + fromjson({"{ a: 1, b: 2, c: 3 }"}))); ASSERT_TRUE(comparePipelineAgainstExpected( - contextFn, "[]", {"{a:1, b:2, c:3}"}, toResultSet({"{ a: 1, b: 2, c: 3 }"}))); + contextFn, "[]", fromjson({"{a:1, b:2, c:3}"}), fromjson({"{ a: 1, b: 2, c: 3 }"}))); ASSERT_TRUE(comparePipelineAgainstExpected(contextFn, "[{$addFields: {c: {$literal: 3}}}]", - {"{a:1, b:2}"}, - toResultSet({"{ a: 1, b: 2, c: 3 }"}))); + fromjson({"{a:1, b:2}"}), + fromjson({"{ a: 1, b: 2, c: 3 }"}))); ASSERT_TRUE(compare("[]", {"{a:1, b:2, c:3}"})); ASSERT_TRUE(compare("[{$addFields: {c: {$literal: 3}}}]", {"{a:1, b:2}"})); @@ -182,14 +171,14 @@ TEST_F(NodeSBE, DiffTest) { const auto compare = [&contextFn](const std::string& pipelineStr, const std::vector& jsonVector) { return compareSBEABTAgainstPipeline( - contextFn, pipelineStr, jsonVector, true /*preserveFieldOrder*/); + contextFn, pipelineStr, fromjson(jsonVector), true /*preserveFieldOrder*/); }; // Consider checking if compare() works first. const auto compareUnordered = [&contextFn](const std::string& pipelineStr, const std::vector& jsonVector) { return compareSBEABTAgainstPipeline( - contextFn, pipelineStr, jsonVector, false /*preserveFieldOrder*/); + contextFn, pipelineStr, fromjson(jsonVector), false /*preserveFieldOrder*/); }; ASSERT_TRUE(compare("[]", {})); diff --git a/src/mongo/db/exec/sbe/abt/sbe_abt_test.cpp b/src/mongo/db/exec/sbe/abt/sbe_abt_test.cpp index bdc44690acf33..65528c6578320 100644 --- a/src/mongo/db/exec/sbe/abt/sbe_abt_test.cpp +++ b/src/mongo/db/exec/sbe/abt/sbe_abt_test.cpp @@ -27,18 +27,59 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/abt/abt_lower.h" +#include "mongo/db/exec/sbe/abt/abt_lower_defs.h" #include "mongo/db/exec/sbe/abt/sbe_abt_test_util.h" +#include "mongo/db/exec/sbe/expressions/compile_ctx.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/abt/document_source_visitor.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/cost_model/cost_model_gen.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/metadata_factory.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/node_defs.h" #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/reference_tracker.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" #include "mongo/db/query/optimizer/rewrites/path_lower.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/unit_test_abt_literals.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/db/record_id.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { diff --git a/src/mongo/db/exec/sbe/abt/sbe_abt_test_util.cpp b/src/mongo/db/exec/sbe/abt/sbe_abt_test_util.cpp index 38389f7d85a71..7be99f3fa485d 100644 --- a/src/mongo/db/exec/sbe/abt/sbe_abt_test_util.cpp +++ b/src/mongo/db/exec/sbe/abt/sbe_abt_test_util.cpp @@ -29,34 +29,61 @@ #include "mongo/db/exec/sbe/abt/sbe_abt_test_util.h" +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/sbe/abt/abt_lower.h" +#include "mongo/db/exec/sbe/abt/abt_lower_defs.h" +#include "mongo/db/exec/sbe/expressions/compile_ctx.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/pipeline/abt/document_source_visitor.h" #include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_queue.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/cost_model/cost_model_gen.h" #include "mongo/db/query/cqf_command_utils.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node_defs.h" #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/syntax/expr.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" +#include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/temp_dir.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery namespace mongo::optimizer { std::unique_ptr parsePipeline( - const std::string& pipelineStr, NamespaceString nss, OperationContext* opCtx) { - const BSONObj inputBson = fromjson("{pipeline: " + pipelineStr + "}"); - - std::vector rawPipeline; - for (auto&& stageElem : inputBson["pipeline"].Array()) { - ASSERT_EQUALS(stageElem.type(), BSONType::Object); - rawPipeline.push_back(stageElem.embeddedObject()); - } - + const std::vector& rawPipeline, NamespaceString nss, OperationContext* opCtx) { AggregateCommandRequest request(std::move(nss), rawPipeline); boost::intrusive_ptr ctx( new ExpressionContextForTest(opCtx, request)); @@ -67,12 +94,24 @@ std::unique_ptr parsePipeline( return Pipeline::parse(request.getPipeline(), ctx); } -ABT createValueArray(const std::vector& jsonVector) { +std::unique_ptr parsePipeline( + const std::string& pipelineStr, NamespaceString nss, OperationContext* opCtx) { + const BSONObj inputBson = fromjson("{pipeline: " + pipelineStr + "}"); + + std::vector rawPipeline; + for (auto&& stageElem : inputBson["pipeline"].Array()) { + ASSERT_EQUALS(stageElem.type(), BSONType::Object); + rawPipeline.push_back(stageElem.embeddedObject()); + } + return parsePipeline(rawPipeline, std::move(nss), opCtx); +} + +ABT createValueArray(const std::vector& inputObjs) { const auto [tag, val] = sbe::value::makeNewArray(); auto outerArrayPtr = sbe::value::getArrayView(val); // TODO: SERVER-69566. Use makeArray. - for (size_t i = 0; i < jsonVector.size(); i++) { + for (size_t i = 0; i < inputObjs.size(); i++) { const auto [tag1, val1] = sbe::value::makeNewArray(); auto innerArrayPtr = sbe::value::getArrayView(val1); @@ -80,7 +119,7 @@ ABT createValueArray(const std::vector& jsonVector) { const auto [recordTag, recordVal] = sbe::value::makeNewRecordId(i); innerArrayPtr->push_back(recordTag, recordVal); - const BSONObj& bsonObj = fromjson(jsonVector.at(i)); + const BSONObj& bsonObj = inputObjs.at(i); const auto [tag2, val2] = sbe::value::copyValue(sbe::value::TypeTags::bsonObject, sbe::value::bitcastFrom(bsonObj.objdata())); @@ -94,13 +133,14 @@ ABT createValueArray(const std::vector& jsonVector) { std::vector runSBEAST(OperationContext* opCtx, const std::string& pipelineStr, - const std::vector& jsonVector) { + const std::vector& inputObjs) { auto prefixId = PrefixId::createForTests(); Metadata metadata{{}}; - auto pipeline = parsePipeline(pipelineStr, NamespaceString("test"), opCtx); + auto pipeline = + parsePipeline(pipelineStr, NamespaceString::createNamespaceString_forTest("test"), opCtx); - ABT valueArray = createValueArray(jsonVector); + ABT valueArray = createValueArray(inputObjs); const ProjectionName scanProjName = prefixId.getNextId("scan"); ABT tree = translatePipelineToABT(metadata, @@ -171,14 +211,13 @@ std::vector runSBEAST(OperationContext* opCtx, std::vector runPipeline(OperationContext* opCtx, const std::string& pipelineStr, - const std::vector& jsonVector) { - NamespaceString nss("test"); + const std::vector& inputObjs) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("test"); std::unique_ptr pipeline = parsePipeline(pipelineStr, nss, opCtx); const auto queueStage = DocumentSourceQueue::create(pipeline->getContext()); - for (const std::string& s : jsonVector) { - BSONObj bsonObj = fromjson(s); + for (const auto& bsonObj : inputObjs) { queueStage->emplace_back(Document{bsonObj}); } diff --git a/src/mongo/db/exec/sbe/abt/sbe_abt_test_util.h b/src/mongo/db/exec/sbe/abt/sbe_abt_test_util.h index efab6ef258aef..0c19934df4a80 100644 --- a/src/mongo/db/exec/sbe/abt/sbe_abt_test_util.h +++ b/src/mongo/db/exec/sbe/abt/sbe_abt_test_util.h @@ -27,10 +27,17 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/expression_test_base.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/pipeline.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/db/service_context_test_fixture.h" @@ -40,6 +47,8 @@ namespace mongo::optimizer { class NodeSBE : public ServiceContextTest {}; +std::unique_ptr parsePipeline( + const std::vector& rawPipeline, NamespaceString nss, OperationContext* opCtx); std::unique_ptr parsePipeline( const std::string& pipelineStr, NamespaceString nss, OperationContext* opCtx); @@ -47,10 +56,12 @@ using ABTSBE = sbe::EExpressionTestFixture; // Create a pipeline based on the given string, use a DocumentSourceQueue as input initialized with // the provided documents encoded as json strings, and return the results as BSON. + +ABT createValueArray(const std::vector& inputObjs); std::vector runSBEAST(OperationContext* opCtx, const std::string& pipelineStr, - const std::vector& jsonVector); + const std::vector& inputObjs); std::vector runPipeline(OperationContext* opCtx, const std::string& pipelineStr, - const std::vector& jsonVector); + const std::vector& inputObjs); } // namespace mongo::optimizer diff --git a/src/mongo/db/exec/sbe/expressions/compile_ctx.cpp b/src/mongo/db/exec/sbe/expressions/compile_ctx.cpp index 0c4ba18f07168..e84bccf16420b 100644 --- a/src/mongo/db/exec/sbe/expressions/compile_ctx.cpp +++ b/src/mongo/db/exec/sbe/expressions/compile_ctx.cpp @@ -29,6 +29,8 @@ #include "mongo/db/exec/sbe/expressions/compile_ctx.h" +#include + namespace mongo::sbe { value::SlotAccessor* CompileCtx::getAccessor(value::SlotId slot) { diff --git a/src/mongo/db/exec/sbe/expressions/compile_ctx.h b/src/mongo/db/exec/sbe/expressions/compile_ctx.h index 2ca81325d5a6b..c7bfa2bb581ef 100644 --- a/src/mongo/db/exec/sbe/expressions/compile_ctx.h +++ b/src/mongo/db/exec/sbe/expressions/compile_ctx.h @@ -29,7 +29,14 @@ #pragma once +#include +#include +#include + #include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include "mongo/db/exec/sbe/values/row.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/vm/label.h" #include "mongo/stdx/unordered_map.h" diff --git a/src/mongo/db/exec/sbe/expressions/expression.cpp b/src/mongo/db/exec/sbe/expressions/expression.cpp index a5693e47668ba..724aa9db5e541 100644 --- a/src/mongo/db/exec/sbe/expressions/expression.cpp +++ b/src/mongo/db/exec/sbe/expressions/expression.cpp @@ -27,22 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/expressions/expression.h" - -#include +#include +#include +#include #include -#include #include +#include +#include +#include + +#include "mongo/bson/ordering.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" #include "mongo/db/exec/sbe/size_estimator.h" -#include "mongo/db/exec/sbe/stages/spool.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/print_options.h" #include "mongo/db/exec/sbe/values/arith_common.h" #include "mongo/db/exec/sbe/values/value_printer.h" #include "mongo/db/exec/sbe/vm/datetime.h" +#include "mongo/db/exec/sbe/vm/label.h" +#include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { @@ -644,7 +652,7 @@ static stdx::unordered_map kBuiltinFunctions = { {"abs", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::abs, false}}, {"ceil", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::ceil, false}}, {"floor", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::floor, false}}, - {"trunc", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::trunc, false}}, + {"trunc", BuiltinFn{[](size_t n) { return n == 1 || n == 2; }, vm::Builtin::trunc, false}}, {"exp", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::exp, false}}, {"ln", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::ln, false}}, {"log10", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::log10, false}}, @@ -681,8 +689,12 @@ static stdx::unordered_map kBuiltinFunctions = { {"bitTestPosition", BuiltinFn{[](size_t n) { return n == 3; }, vm::Builtin::bitTestPosition, false}}, {"bsonSize", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::bsonSize, false}}, + {"strLenBytes", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::strLenBytes, false}}, {"toLower", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::toLower, false}}, {"toUpper", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::toUpper, false}}, + {"trim", BuiltinFn{[](size_t n) { return n == 2; }, vm::Builtin::trim, false}}, + {"ltrim", BuiltinFn{[](size_t n) { return n == 2; }, vm::Builtin::ltrim, false}}, + {"rtrim", BuiltinFn{[](size_t n) { return n == 2; }, vm::Builtin::rtrim, false}}, {"coerceToBool", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::coerceToBool, false}}, {"coerceToString", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::coerceToString, false}}, @@ -767,6 +779,8 @@ static stdx::unordered_map kBuiltinFunctions = { {"sortKeyComponentVectorGetElement", BuiltinFn{ [](size_t n) { return n == 2; }, vm::Builtin::sortKeyComponentVectorGetElement, false}}, + {"sortKeyComponentVectorToArray", + BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::sortKeyComponentVectorToArray, false}}, {"tsSecond", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::tsSecond, false}}, {"tsIncrement", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::tsIncrement, false}}, {"typeMatch", BuiltinFn{[](size_t n) { return n == 2; }, vm::Builtin::typeMatch, false}}, @@ -779,6 +793,48 @@ static stdx::unordered_map kBuiltinFunctions = { BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::objectToArray, false}}, {"arrayToObject", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::arrayToObject, false}}, + {"array", BuiltinFn{kAnyNumberOfArgs, vm::Builtin::newArray, false}}, + {"aggFirstNNeedsMoreInput", + BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::aggFirstNNeedsMoreInput, false}}, + {"aggFirstN", BuiltinFn{[](size_t n) { return n == 2; }, vm::Builtin::aggFirstN, false}}, + {"aggFirstNMerge", + BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::aggFirstNMerge, true}}, + {"aggFirstNFinalize", + BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::aggFirstNFinalize, false}}, + {"aggLastN", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::aggLastN, true}}, + {"aggLastNMerge", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::aggLastNMerge, true}}, + {"aggLastNFinalize", + BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::aggLastNFinalize, false}}, + {"aggTopN", BuiltinFn{[](size_t n) { return n == 3; }, vm::Builtin::aggTopN, true}}, + {"aggTopNMerge", BuiltinFn{[](size_t n) { return n == 2; }, vm::Builtin::aggTopNMerge, true}}, + {"aggTopNFinalize", + BuiltinFn{[](size_t n) { return n == 2; }, vm::Builtin::aggTopNFinalize, false}}, + {"aggBottomN", BuiltinFn{[](size_t n) { return n == 3; }, vm::Builtin::aggBottomN, true}}, + {"aggBottomNMerge", + BuiltinFn{[](size_t n) { return n == 2; }, vm::Builtin::aggBottomNMerge, true}}, + {"aggBottomNFinalize", + BuiltinFn{[](size_t n) { return n == 2; }, vm::Builtin::aggBottomNFinalize, false}}, + {"aggMaxN", BuiltinFn{[](size_t n) { return n == 1 || n == 2; }, vm::Builtin::aggMaxN, true}}, + {"aggMaxNMerge", + BuiltinFn{[](size_t n) { return n == 1 || n == 2; }, vm::Builtin::aggMaxNMerge, true}}, + {"aggMaxNFinalize", + BuiltinFn{[](size_t n) { return n == 1 || n == 2; }, vm::Builtin::aggMaxNFinalize, false}}, + {"aggMinN", BuiltinFn{[](size_t n) { return n == 1 || n == 2; }, vm::Builtin::aggMinN, true}}, + {"aggMinNMerge", + BuiltinFn{[](size_t n) { return n == 1 || n == 2; }, vm::Builtin::aggMinNMerge, true}}, + {"aggMinNFinalize", + BuiltinFn{[](size_t n) { return n == 1 || n == 2; }, vm::Builtin::aggMinNFinalize, false}}, + {"aggRank", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::aggRank, true}}, + {"aggRankColl", BuiltinFn{[](size_t n) { return n == 2; }, vm::Builtin::aggRankColl, true}}, + {"aggDenseRank", BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::aggDenseRank, true}}, + {"aggDenseRankColl", + BuiltinFn{[](size_t n) { return n == 2; }, vm::Builtin::aggDenseRankColl, true}}, + {"aggRankFinalize", + BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::aggRankFinalize, false}}, + {"aggExpMovingAvg", + BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::aggExpMovingAvg, true}}, + {"aggExpMovingAvgFinalize", + BuiltinFn{[](size_t n) { return n == 1; }, vm::Builtin::aggExpMovingAvgFinalize, false}}, }; /** @@ -1119,6 +1175,18 @@ vm::CodeFragment EFunction::compileDirect(CompileCtx& ctx) const { return (it->second.generate)(ctx, _nodes, it->second.aggregate); } + if (_name == "aggState") { + uassert(7695204, + "aggregate function call: aggState occurs in the non-aggregate context.", + ctx.aggExpression && ctx.accumulator); + uassert(7695205, + str::stream() << "function call: aggState has wrong arity: " << _nodes.size(), + _nodes.size() == 0); + vm::CodeFragment code; + code.appendMoveVal(ctx.accumulator); + return code; + } + uasserted(4822847, str::stream() << "unknown function call: " << _name); } diff --git a/src/mongo/db/exec/sbe/expressions/expression.h b/src/mongo/db/exec/sbe/expressions/expression.h index 350f42f399909..bfb97323a0d96 100644 --- a/src/mongo/db/exec/sbe/expressions/expression.h +++ b/src/mongo/db/exec/sbe/expressions/expression.h @@ -29,15 +29,26 @@ #pragma once +#include +#include +#include +#include +#include +#include #include #include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/abt/named_slots.h" #include "mongo/db/exec/sbe/util/debug_print.h" #include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/util/assert_util_core.h" namespace mongo { namespace sbe { diff --git a/src/mongo/db/exec/sbe/expressions/runtime_environment.cpp b/src/mongo/db/exec/sbe/expressions/runtime_environment.cpp index e59e89e0d888f..e6c7293f6bc39 100644 --- a/src/mongo/db/exec/sbe/expressions/runtime_environment.cpp +++ b/src/mongo/db/exec/sbe/expressions/runtime_environment.cpp @@ -29,6 +29,17 @@ #include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/util/builder.h" + namespace mongo::sbe { RuntimeEnvironment::RuntimeEnvironment(const RuntimeEnvironment& other) : _state{other._state}, _isSmp{other._isSmp} { @@ -142,7 +153,7 @@ std::unique_ptr RuntimeEnvironment::makeCopyForParallelUse() return makeCopy(); } -void RuntimeEnvironment::debugString(StringBuilder* builder) { +void RuntimeEnvironment::debugString(StringBuilder* builder) const { using namespace std::literals; value::SlotMap slotName; diff --git a/src/mongo/db/exec/sbe/expressions/runtime_environment.h b/src/mongo/db/exec/sbe/expressions/runtime_environment.h index 2100bff5bb7dc..b9e049013d1c2 100644 --- a/src/mongo/db/exec/sbe/expressions/runtime_environment.h +++ b/src/mongo/db/exec/sbe/expressions/runtime_environment.h @@ -29,13 +29,27 @@ #pragma once +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/exec/sbe/abt/named_slots.h" #include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #include "mongo/util/string_map.h" namespace mongo::sbe { /** - * A holder for slots and accessors which are used in a PlanStage tree but: + * A holder for "global" slots and accessors. These are used in a PlanStage tree but: * - Cannot be made constants due to restrictions on the lifetime of such values (e.g., they're * singleton instances owned somewhere else). * - Can be changed in runtime outside of the PlanStage tree (e.g., a resume recordId changed by a @@ -43,14 +57,14 @@ namespace mongo::sbe { * * A RuntimeEnvironment object is created once per an execution thread. That means that each * producer and consumer in a parallel plan will have their own compilation environment, with their - * own slot accessors. However, slot accessors in each of such environment will access shared data, + * own slot accessors. However, slot accessors in each such environment will access shared data, * which is the same across all environments. * * To avoid data races, the values stored in the runtime environment are considered read-only when * used with a parallel plan. An attempt to change any slot with 'resetValue' will result in a user * exception. * - * If the runtime environment is used in a serial plan, modifications of the slots is allowed. + * If the runtime environment is used in a serial plan, modification of the slots is allowed. */ class RuntimeEnvironment final : public optimizer::NamedSlotsProvider { public: @@ -175,7 +189,7 @@ class RuntimeEnvironment final : public optimizer::NamedSlotsProvider { /** * Dumps all the slots currently defined in this environment into the given string builder. */ - void debugString(StringBuilder* builder); + void debugString(StringBuilder* builder) const; private: RuntimeEnvironment(const RuntimeEnvironment&); diff --git a/src/mongo/db/exec/sbe/expressions/sbe_bson_size_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_bson_size_test.cpp index cb37d2b06afb7..5c33a2ca3a787 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_bson_size_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_bson_size_test.cpp @@ -27,7 +27,21 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { using SBEBsonSizeTest = EExpressionTestFixture; diff --git a/src/mongo/db/exec/sbe/expressions/sbe_coerce_to_string_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_coerce_to_string_test.cpp index bd68938cf41ec..1fbd3321e957f 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_coerce_to_string_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_coerce_to_string_test.cpp @@ -27,9 +27,24 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/sbe/expression_test_base.h" -#include "mongo/db/exec/sbe/values/bson.h" -#include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_concat_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_concat_test.cpp index 891e0b2c767af..a217274356ea1 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_concat_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_concat_test.cpp @@ -27,8 +27,26 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { class SBEConcatTest : public EExpressionTestFixture { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_constant_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_constant_test.cpp index cf4059456b547..0aed0eabe0ea3 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_constant_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_constant_test.cpp @@ -27,8 +27,23 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expression_test_base.h" -#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/sbe_unittest.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert_that.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/golden_test.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_date_add_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_date_add_test.cpp index b15be9772a914..ac2bc0049f0c2 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_date_add_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_date_add_test.cpp @@ -27,8 +27,24 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/data_view.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" #include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_date_diff_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_date_diff_test.cpp index 290fcbcfa983e..c184809ff6a74 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_date_diff_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_date_diff_test.cpp @@ -27,9 +27,29 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/data_view.h" +#include "mongo/base/string_data.h" #include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo::sbe { namespace { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_date_expression_accepting_timezone_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_date_expression_accepting_timezone_test.cpp index c49c7799bc644..85bf350b59317 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_date_expression_accepting_timezone_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_date_expression_accepting_timezone_test.cpp @@ -27,9 +27,20 @@ * it in the license file. */ -#include "mongo/bson/oid.h" +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" #include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/time_support.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_date_from_string_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_date_from_string_test.cpp index 10ca3e4e1db7a..2aba9936ccac4 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_date_from_string_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_date_from_string_test.cpp @@ -27,10 +27,21 @@ * it in the license file. */ -#include "mongo/bson/oid.h" +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/time_support.h" namespace mongo::sbe { namespace { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_date_to_parts_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_date_to_parts_test.cpp index 117a8e86264b0..1bc9579120295 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_date_to_parts_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_date_to_parts_test.cpp @@ -27,9 +27,18 @@ * it in the license file. */ -#include "mongo/bson/oid.h" +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" #include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_date_to_string_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_date_to_string_test.cpp index 7a25a0c0d5842..70a03d8cc6c64 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_date_to_string_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_date_to_string_test.cpp @@ -27,12 +27,21 @@ * it in the license file. */ +#include +#include +#include +#include +#include + #include "mongo/base/string_data.h" -#include "mongo/bson/oid.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/datetime/date_time_support.h" #include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/time_support.h" namespace mongo::sbe { namespace { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_date_trunc_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_date_trunc_test.cpp index 6f10d3c5eec02..c105091e5b859 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_date_trunc_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_date_trunc_test.cpp @@ -27,9 +27,26 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include "mongo/base/data_view.h" +#include "mongo/base/string_data.h" #include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo::sbe { namespace { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_exp_moving_avg_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_exp_moving_avg_test.cpp new file mode 100644 index 0000000000000..2c193606b6e8c --- /dev/null +++ b/src/mongo/db/exec/sbe/expressions/sbe_exp_moving_avg_test.cpp @@ -0,0 +1,111 @@ +/** + * Copyright (C) 2020-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/platform/decimal128.h" + +namespace mongo::sbe { + +class SBEExpMovingAvgTest : public EExpressionTestFixture { +public: + void runAndAssertExpression(double alpha, + std::vector>& inputs, + std::vector>& expValues) { + value::OwnedValueAccessor inputAccessor; + auto inputSlot = bindAccessor(&inputAccessor); + + value::OwnedValueAccessor aggAccessor; + auto aggSlot = bindAccessor(&aggAccessor); + + auto expMovingAvgExpr = + sbe::makeE("aggExpMovingAvg", sbe::makeEs(makeE(inputSlot))); + auto compiledExpMovingAvg = compileAggExpression(*expMovingAvgExpr, &aggAccessor); + + auto expMovingAvgExprFinalize = sbe::makeE( + "aggExpMovingAvgFinalize", sbe::makeEs(makeE(aggSlot))); + auto compiledExpMovingAvgFinalize = compileExpression(*expMovingAvgExprFinalize); + + auto [stateArrayTag, stateArrayVal] = value::makeNewArray(); + auto stateArray = value::getArrayView(stateArrayVal); + stateArray->push_back(value::TypeTags::Null, 0); + stateArray->push_back(value::TypeTags::NumberDecimal, + value::makeCopyDecimal(Decimal128{alpha}).second); + stateArray->push_back(value::TypeTags::Boolean, value::bitcastFrom(false)); + + aggAccessor.reset(stateArrayTag, stateArrayVal); + + for (size_t i = 0; i < inputs.size(); ++i) { + inputAccessor.reset(inputs[i].first, inputs[i].second); + auto [runTag, runVal] = runCompiledExpression(compiledExpMovingAvg.get()); + + aggAccessor.reset(runTag, runVal); + auto [emaTag, emaVal] = runCompiledExpression(compiledExpMovingAvgFinalize.get()); + + ASSERT_EQ(expValues[i].first, emaTag); + auto [compareTag, compareVal] = + value::compareValue(expValues[i].first, expValues[i].second, emaTag, emaVal); + ASSERT_EQ(compareTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(compareVal), 0); + value::releaseValue(emaTag, emaVal); + value::releaseValue(expValues[i].first, expValues[i].second); + } + } +}; + +TEST_F(SBEExpMovingAvgTest, ExpMovingAvgTest) { + std::vector> inputs; + std::vector> expValues; + + inputs.push_back({value::TypeTags::Null, 0}); + expValues.push_back({value::TypeTags::Null, 0}); + + inputs.push_back({value::TypeTags::NumberInt64, 13}); + expValues.push_back({value::TypeTags::NumberDouble, value::bitcastFrom(13.0)}); + + inputs.push_back({value::TypeTags::NumberDouble, value::bitcastFrom(15.4)}); + expValues.push_back({value::TypeTags::NumberDouble, value::bitcastFrom(14.8)}); + + inputs.push_back({value::TypeTags::Null, 0}); + expValues.push_back({value::TypeTags::NumberDouble, value::bitcastFrom(14.8)}); + + inputs.push_back({value::TypeTags::NumberInt32, 12}); + expValues.push_back({value::TypeTags::NumberDouble, value::bitcastFrom(12.7)}); + + inputs.push_back( + {value::TypeTags::NumberDecimal, value::makeCopyDecimal(Decimal128{11.7}).second}); + expValues.push_back( + {value::TypeTags::NumberDecimal, value::makeCopyDecimal(Decimal128{11.95}).second}); + + inputs.push_back({value::TypeTags::NumberInt64, 82}); + expValues.push_back( + {value::TypeTags::NumberDecimal, value::makeCopyDecimal(Decimal128{64.4875}).second}); + + runAndAssertExpression(0.75, inputs, expValues); +} +} // namespace mongo::sbe diff --git a/src/mongo/db/exec/sbe/expressions/sbe_extract_sub_array_builtin_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_extract_sub_array_builtin_test.cpp index ca6885a98bf52..64693e6f4f1f5 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_extract_sub_array_builtin_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_extract_sub_array_builtin_test.cpp @@ -27,11 +27,26 @@ * it in the license file. */ -#include -#include - +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/expression_test_base.h" -#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_fail_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_fail_test.cpp index 095db37f72dfb..5fd886b0f4c55 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_fail_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_fail_test.cpp @@ -27,7 +27,17 @@ * it in the license file. */ +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/golden_test.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_get_element_builtin_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_get_element_builtin_test.cpp index a6b1c34878173..5c145c0f2eaf0 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_get_element_builtin_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_get_element_builtin_test.cpp @@ -27,11 +27,22 @@ * it in the license file. */ -#include -#include - +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/expression_test_base.h" -#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_if_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_if_test.cpp index 87f7476da2af8..fec6aaf435fb1 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_if_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_if_test.cpp @@ -27,7 +27,18 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/sbe_unittest.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/golden_test.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_index_of_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_index_of_test.cpp index 5b6428b182a1f..f18757a4bc8f4 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_index_of_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_index_of_test.cpp @@ -27,7 +27,18 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_is_array_empty_builtin_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_is_array_empty_builtin_test.cpp index 25d775c4419dd..e080313ffff51 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_is_array_empty_builtin_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_is_array_empty_builtin_test.cpp @@ -27,11 +27,18 @@ * it in the license file. */ -#include -#include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/expression_test_base.h" -#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_is_member_builtin_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_is_member_builtin_test.cpp index c8b8bbb304602..dbb9cd2c81d8b 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_is_member_builtin_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_is_member_builtin_test.cpp @@ -27,8 +27,22 @@ * it in the license file. */ +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expression_test_base.h" -#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_iso_date_to_parts_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_iso_date_to_parts_test.cpp index 090bccf50f3fd..72db2e07dae2f 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_iso_date_to_parts_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_iso_date_to_parts_test.cpp @@ -27,9 +27,18 @@ * it in the license file. */ -#include "mongo/bson/oid.h" +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" #include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_ks_builtin_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_ks_builtin_test.cpp index f5a3ecd1377f9..d247019f4260a 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_ks_builtin_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_ks_builtin_test.cpp @@ -27,8 +27,22 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/expression_test_base.h" -#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { @@ -38,9 +52,9 @@ class SBEBuiltinKsTest : public EExpressionTestFixture { value::Value argVal, value::TypeTags expectedTag, value::Value expectedVal) { - auto version = static_cast(KeyString::Version::V1); + auto version = static_cast(key_string::Version::V1); auto ordering = uint32_t{0}; - auto discriminator = static_cast(KeyString::Discriminator::kInclusive); + auto discriminator = static_cast(key_string::Discriminator::kInclusive); auto versionExpr = makeE(value::TypeTags::NumberInt64, value::bitcastFrom(version)); @@ -78,7 +92,7 @@ TEST_F(SBEBuiltinKsTest, NumericTests) { auto argTag = value::TypeTags::NumberInt32; auto argVal = value::bitcastFrom(int32Value); - KeyString::Builder kb(KeyString::Version::V1, KeyString::ALL_ASCENDING); + key_string::Builder kb(key_string::Version::V1, key_string::ALL_ASCENDING); kb.appendNumberInt(int32Value); auto [expectedTag, expectedVal] = value::makeCopyKeyString(kb.getValueCopy()); value::ValueGuard expectedGuard(expectedTag, expectedVal); @@ -91,7 +105,7 @@ TEST_F(SBEBuiltinKsTest, NumericTests) { auto argTag = value::TypeTags::NumberInt64; auto argVal = value::bitcastFrom(int64Value); - KeyString::Builder kb(KeyString::Version::V1, KeyString::ALL_ASCENDING); + key_string::Builder kb(key_string::Version::V1, key_string::ALL_ASCENDING); kb.appendNumberLong(int64Value); auto [expectedTag, expectedVal] = value::makeCopyKeyString(kb.getValueCopy()); value::ValueGuard expectedGuard(expectedTag, expectedVal); @@ -103,7 +117,7 @@ TEST_F(SBEBuiltinKsTest, NumericTests) { auto argTag = value::TypeTags::NumberDouble; auto argVal = value::bitcastFrom(doubleValue); - KeyString::Builder kb(KeyString::Version::V1, KeyString::ALL_ASCENDING); + key_string::Builder kb(key_string::Version::V1, key_string::ALL_ASCENDING); kb.appendNumberDouble(doubleValue); auto [expectedTag, expectedVal] = value::makeCopyKeyString(kb.getValueCopy()); value::ValueGuard expectedGuard(expectedTag, expectedVal); @@ -115,7 +129,7 @@ TEST_F(SBEBuiltinKsTest, NumericTests) { auto [argTag, argVal] = value::makeCopyDecimal(dec128Value); value::ValueGuard argGuard(argTag, argVal); - KeyString::Builder kb(KeyString::Version::V1, KeyString::ALL_ASCENDING); + key_string::Builder kb(key_string::Version::V1, key_string::ALL_ASCENDING); kb.appendNumberDecimal(dec128Value); auto [expectedTag, expectedVal] = value::makeCopyKeyString(kb.getValueCopy()); value::ValueGuard expectedGuard(expectedTag, expectedVal); diff --git a/src/mongo/db/exec/sbe/expressions/sbe_lambda_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_lambda_test.cpp index a355b12873bbf..2351e76a822cc 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_lambda_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_lambda_test.cpp @@ -27,7 +27,18 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/golden_test.h" namespace mongo::sbe { using SBELambdaTest = GoldenEExpressionTestFixture; diff --git a/src/mongo/db/exec/sbe/expressions/sbe_local_bind_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_local_bind_test.cpp index b554da480c1f2..0a9674b9a0a72 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_local_bind_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_local_bind_test.cpp @@ -27,7 +27,16 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/golden_test.h" namespace mongo::sbe { using SBELocalBindTest = GoldenEExpressionTestFixture; diff --git a/src/mongo/db/exec/sbe/expressions/sbe_mod_expression_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_mod_expression_test.cpp index 59be1d8675efc..e9c994aac6852 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_mod_expression_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_mod_expression_test.cpp @@ -27,7 +27,20 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_new_array_from_range_builtin_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_new_array_from_range_builtin_test.cpp index 3b9e0a8586c2c..f8f6d5fd88e03 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_new_array_from_range_builtin_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_new_array_from_range_builtin_test.cpp @@ -27,7 +27,17 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { class SBEBuiltinNewArrayFromRangeTest : public EExpressionTestFixture { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_object_array_conversion_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_object_array_conversion_test.cpp index 4b4a454b981b1..567dc052b0a16 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_object_array_conversion_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_object_array_conversion_test.cpp @@ -27,9 +27,27 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_prim_binary_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_prim_binary_test.cpp index d8514e8b05c8f..66a0be119654a 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_prim_binary_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_prim_binary_test.cpp @@ -27,8 +27,23 @@ * it in the license file. */ +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/sbe_unittest.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert_that.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/golden_test.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_prim_unary_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_prim_unary_test.cpp index 2e6149049923f..9e9d4c7adbdac 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_prim_unary_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_prim_unary_test.cpp @@ -27,7 +27,21 @@ * it in the license file. */ +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/sbe_unittest.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/golden_test.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_rank_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_rank_test.cpp new file mode 100644 index 0000000000000..80a94443473c8 --- /dev/null +++ b/src/mongo/db/exec/sbe/expressions/sbe_rank_test.cpp @@ -0,0 +1,135 @@ +/** + * Copyright (C) 2020-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" + +namespace mongo::sbe { +using SBERankTest = EExpressionTestFixture; + +TEST_F(SBERankTest, ComputeRank) { + value::OwnedValueAccessor rankAccessor; + value::OwnedValueAccessor denseRankAccessor; + value::OwnedValueAccessor argAccessor; + auto rankSlot = bindAccessor(&rankAccessor); + auto denseRankSlot = bindAccessor(&denseRankAccessor); + auto argSlot = bindAccessor(&argAccessor); + auto rankExpr = sbe::makeE("aggRank", sbe::makeEs(makeE(argSlot))); + auto compiledRankExpr = compileAggExpression(*rankExpr, &rankAccessor); + auto denseRankExpr = + sbe::makeE("aggDenseRank", sbe::makeEs(makeE(argSlot))); + auto compiledDenseRankExpr = compileAggExpression(*denseRankExpr, &denseRankAccessor); + auto finalRankExpr = + sbe::makeE("aggRankFinalize", sbe::makeEs(makeE(rankSlot))); + auto finalCompiledRankExpr = compileExpression(*finalRankExpr); + auto finalDenseRankExpr = + sbe::makeE("aggRankFinalize", sbe::makeEs(makeE(denseRankSlot))); + auto finalCompiledDenseRankExpr = compileExpression(*finalDenseRankExpr); + + std::vector values{100, 200, 200, 200, 300}; + std::vector ranks{1, 2, 2, 2, 5}; + std::vector denseRanks{1, 2, 2, 2, 3}; + for (size_t i = 0; i < values.size(); i++) { + argAccessor.reset(value::TypeTags::NumberInt32, values[i]); + auto [tag, val] = runCompiledExpression(compiledRankExpr.get()); + rankAccessor.reset(tag, val); + std::tie(tag, val) = runCompiledExpression(compiledDenseRankExpr.get()); + denseRankAccessor.reset(tag, val); + + std::tie(tag, val) = runCompiledExpression(finalCompiledRankExpr.get()); + ASSERT_EQUALS(value::TypeTags::NumberInt64, tag); + ASSERT_EQUALS(value::bitcastTo(val), ranks[i]); + + std::tie(tag, val) = runCompiledExpression(finalCompiledDenseRankExpr.get()); + ASSERT_EQUALS(value::TypeTags::NumberInt64, tag); + ASSERT_EQUALS(value::bitcastTo(val), denseRanks[i]); + } +} + +TEST_F(SBERankTest, ComputeRankWithCollation) { + value::OwnedValueAccessor rankAccessor; + value::OwnedValueAccessor denseRankAccessor; + value::OwnedValueAccessor argAccessor; + value::ViewOfValueAccessor collatorAccessor; + auto rankSlot = bindAccessor(&rankAccessor); + auto denseRankSlot = bindAccessor(&denseRankAccessor); + auto argSlot = bindAccessor(&argAccessor); + auto collatorSlot = bindAccessor(&collatorAccessor); + + auto rankExpr = sbe::makeE( + "aggRankColl", sbe::makeEs(makeE(argSlot), makeE(collatorSlot))); + auto compiledRankExpr = compileAggExpression(*rankExpr, &rankAccessor); + auto denseRankExpr = sbe::makeE( + "aggDenseRankColl", sbe::makeEs(makeE(argSlot), makeE(collatorSlot))); + auto compiledDenseRankExpr = compileAggExpression(*denseRankExpr, &denseRankAccessor); + auto finalRankExpr = + sbe::makeE("aggRankFinalize", sbe::makeEs(makeE(rankSlot))); + auto finalCompiledRankExpr = compileExpression(*finalRankExpr); + auto finalDenseRankExpr = + sbe::makeE("aggRankFinalize", sbe::makeEs(makeE(denseRankSlot))); + auto finalCompiledDenseRankExpr = compileExpression(*finalDenseRankExpr); + + auto collator = + std::make_unique(CollatorInterfaceMock::MockType::kToLowerString); + collatorAccessor.reset(value::TypeTags::collator, + value::bitcastFrom(collator.get())); + + std::vector values{"aaa", "bbb", "BBB", "bBb", "ccc"}; + std::vector ranks{1, 2, 2, 2, 5}; + std::vector denseRanks{1, 2, 2, 2, 3}; + for (size_t i = 0; i < values.size(); i++) { + auto [tag, val] = value::makeSmallString(values[i]); + argAccessor.reset(tag, val); + std::tie(tag, val) = runCompiledExpression(compiledRankExpr.get()); + rankAccessor.reset(tag, val); + std::tie(tag, val) = runCompiledExpression(compiledDenseRankExpr.get()); + denseRankAccessor.reset(tag, val); + + std::tie(tag, val) = runCompiledExpression(finalCompiledRankExpr.get()); + ASSERT_EQUALS(value::TypeTags::NumberInt64, tag); + ASSERT_EQUALS(value::bitcastTo(val), ranks[i]); + + std::tie(tag, val) = runCompiledExpression(finalCompiledDenseRankExpr.get()); + ASSERT_EQUALS(value::TypeTags::NumberInt64, tag); + ASSERT_EQUALS(value::bitcastTo(val), denseRanks[i]); + } +} +} // namespace mongo::sbe diff --git a/src/mongo/db/exec/sbe/expressions/sbe_regex_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_regex_test.cpp index e81dcd5627e51..4dc662b6b4314 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_regex_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_regex_test.cpp @@ -27,8 +27,23 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/pcre.h" #include "mongo/util/pcre_util.h" +#include "mongo/util/str.h" namespace mongo::sbe { class SBERegexTest : public EExpressionTestFixture { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_replace_one_expression_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_replace_one_expression_test.cpp index b15bcbcf6dedf..9dd15356b3adc 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_replace_one_expression_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_replace_one_expression_test.cpp @@ -27,7 +27,23 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_reverse_array_builtin_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_reverse_array_builtin_test.cpp index 4d32ddf159713..a03a4f057391e 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_reverse_array_builtin_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_reverse_array_builtin_test.cpp @@ -27,10 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/exec/sbe/expression_test_base.h" -#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_round_builtin_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_round_builtin_test.cpp index 823dcdf7c9aab..8afe9cae33ebc 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_round_builtin_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_round_builtin_test.cpp @@ -27,10 +27,23 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include + #include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/platform/decimal128.h" #include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { namespace { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_runtime_environment_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_runtime_environment_test.cpp index 927b528d26195..99cb78344f28e 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_runtime_environment_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_runtime_environment_test.cpp @@ -27,8 +27,15 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expressions/runtime_environment.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_set_expressions_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_set_expressions_test.cpp index 60380c5438e99..eba5198dff89f 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_set_expressions_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_set_expressions_test.cpp @@ -27,8 +27,21 @@ * it in the license file. */ +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_shard_filter_builtin_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_shard_filter_builtin_test.cpp index a94e1e48a8f15..cc49c6c26beed 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_shard_filter_builtin_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_shard_filter_builtin_test.cpp @@ -27,12 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/expression_test_base.h" -#include "mongo/db/exec/sbe/values/bson.h" -#include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/shard_filterer_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_to_upper_to_lower_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_to_upper_to_lower_test.cpp index 5c3e04bfeecf9..8c467b2270e80 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_to_upper_to_lower_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_to_upper_to_lower_test.cpp @@ -27,9 +27,23 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expression_test_base.h" -#include "mongo/db/exec/sbe/values/bson.h" -#include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_trigonometric_expressions_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_trigonometric_expressions_test.cpp index 9ff545a4980d8..b4959aee638d6 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_trigonometric_expressions_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_trigonometric_expressions_test.cpp @@ -27,7 +27,19 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_trunc_builtin_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_trunc_builtin_test.cpp index 3cca308fc5c85..7dc86d19217c0 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_trunc_builtin_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_trunc_builtin_test.cpp @@ -27,9 +27,21 @@ * it in the license file. */ -#include "mongo/db/exec/sbe/expression_test_base.h" - +#include +#include #include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_ts_second_ts_increment_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_ts_second_ts_increment_test.cpp index ace39c9c4dbe7..7cf4d456403c9 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_ts_second_ts_increment_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_ts_second_ts_increment_test.cpp @@ -27,10 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/sbe/expression_test_base.h" -#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/expressions/sbe_variable_test.cpp b/src/mongo/db/exec/sbe/expressions/sbe_variable_test.cpp index 37bf0ee091fce..c6d764b7a0c67 100644 --- a/src/mongo/db/exec/sbe/expressions/sbe_variable_test.cpp +++ b/src/mongo/db/exec/sbe/expressions/sbe_variable_test.cpp @@ -27,8 +27,18 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" -#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/sbe_unittest.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/golden_test.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/makeobj_spec.cpp b/src/mongo/db/exec/sbe/makeobj_spec.cpp index e23faac3dcf1b..3f71d921ee5ff 100644 --- a/src/mongo/db/exec/sbe/makeobj_spec.cpp +++ b/src/mongo/db/exec/sbe/makeobj_spec.cpp @@ -27,10 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/db/exec/sbe/makeobj_spec.h" - #include "mongo/db/exec/sbe/size_estimator.h" namespace mongo::sbe { @@ -42,7 +42,7 @@ IndexedStringVector MakeObjSpec::buildIndexedFieldVector(std::vector +#include +#include #include +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/exec/sbe/makeobj_enums.h" #include "mongo/db/exec/sbe/values/bson.h" #include "mongo/db/exec/sbe/values/value.h" @@ -50,34 +55,24 @@ struct MakeObjSpec { std::vector fields, std::vector projects) : fieldBehavior(fieldBehavior), - numFields(fields.size()), - fieldsAndProjects(buildIndexedFieldVector(std::move(fields), std::move(projects))) {} - - MakeObjSpec(const MakeObjSpec& other) - : fieldBehavior(other.fieldBehavior), - numFields(other.numFields), - fieldsAndProjects(other.fieldsAndProjects) {} - - MakeObjSpec(MakeObjSpec&& other) - : fieldBehavior(other.fieldBehavior), - numFields(other.numFields), - fieldsAndProjects(std::move(other.fieldsAndProjects)) {} + numKeepOrDrops(fields.size()), + fieldNames(buildIndexedFieldVector(std::move(fields), std::move(projects))) {} std::string toString() const { StringBuilder builder; builder << (fieldBehavior == MakeObjSpec::FieldBehavior::keep ? "keep" : "drop") << ", ["; - for (size_t i = 0; i < fieldsAndProjects.size(); ++i) { - if (i == numFields) { + for (size_t i = 0; i < fieldNames.size(); ++i) { + if (i == numKeepOrDrops) { builder << "], ["; } else if (i != 0) { builder << ", "; } - builder << '"' << fieldsAndProjects[i] << '"'; + builder << '"' << fieldNames[i] << '"'; } - if (fieldsAndProjects.size() == numFields) { + if (fieldNames.size() == numKeepOrDrops) { builder << "], ["; } @@ -89,7 +84,7 @@ struct MakeObjSpec { size_t getApproximateSize() const; FieldBehavior fieldBehavior; - size_t numFields = 0; - IndexedStringVector fieldsAndProjects; + size_t numKeepOrDrops = 0; + IndexedStringVector fieldNames; }; } // namespace mongo::sbe diff --git a/src/mongo/db/exec/sbe/sbe_code_fragment_test.cpp b/src/mongo/db/exec/sbe/sbe_code_fragment_test.cpp index 97694ec06acdb..50f1d30a71b45 100644 --- a/src/mongo/db/exec/sbe/sbe_code_fragment_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_code_fragment_test.cpp @@ -27,13 +27,20 @@ * it in the license file. */ +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/sbe_unittest.h" -#include "mongo/db/exec/sbe/values/bson.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/values/value_printer.h" +#include "mongo/db/exec/sbe/vm/label.h" #include "mongo/db/exec/sbe/vm/vm.h" #include "mongo/db/exec/sbe/vm/vm_printer.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/golden_test.h" -#include "mongo/unittest/unittest.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/sbe_column_scan_test.cpp b/src/mongo/db/exec/sbe/sbe_column_scan_test.cpp index 98868e3de12b2..55d0ed43e27a7 100644 --- a/src/mongo/db/exec/sbe/sbe_column_scan_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_column_scan_test.cpp @@ -31,9 +31,5 @@ * This file contains tests for sbe::ColumnScan */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/sbe_plan_stage_test.h" -#include "mongo/db/storage/column_store.h" namespace mongo::sbe {} // namespace mongo::sbe diff --git a/src/mongo/db/exec/sbe/sbe_filter_test.cpp b/src/mongo/db/exec/sbe/sbe_filter_test.cpp index 15a34975c9f2e..5a83b2baae76e 100644 --- a/src/mongo/db/exec/sbe/sbe_filter_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_filter_test.cpp @@ -31,11 +31,25 @@ * This file contains tests for sbe::FilterStage. */ -#include "mongo/platform/basic.h" - - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" #include "mongo/db/exec/sbe/stages/filter.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/sbe_hash_agg_test.cpp b/src/mongo/db/exec/sbe/sbe_hash_agg_test.cpp index 2bdb65fac0eac..0748957cd0cb3 100644 --- a/src/mongo/db/exec/sbe/sbe_hash_agg_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_hash_agg_test.cpp @@ -31,13 +31,49 @@ * This file contains tests for sbe::HashAggStage. */ -#include "mongo/platform/basic.h" - - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/exec/sbe/expressions/compile_ctx.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" #include "mongo/db/exec/sbe/stages/hash_agg.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" +#include "mongo/db/exec/sbe/stages/project.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/util/assert_util.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/record_id.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/scopeguard.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/sbe_hash_join_test.cpp b/src/mongo/db/exec/sbe/sbe_hash_join_test.cpp index 79b19a8ed7cdf..ce15f1a704a53 100644 --- a/src/mongo/db/exec/sbe/sbe_hash_join_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_hash_join_test.cpp @@ -31,12 +31,33 @@ * This file contains tests for sbe::HashJoinStage. */ -#include "mongo/platform/basic.h" - - +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/sbe/expressions/compile_ctx.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" #include "mongo/db/exec/sbe/stages/hash_join.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/sbe_hash_lookup_test.cpp b/src/mongo/db/exec/sbe/sbe_hash_lookup_test.cpp index 31929a32a28a6..fe197f60243ad 100644 --- a/src/mongo/db/exec/sbe/sbe_hash_lookup_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_hash_lookup_test.cpp @@ -31,16 +31,44 @@ * This file contains tests for sbe::LoopJoinStage. */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/exec/sbe/expressions/compile_ctx.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" #include "mongo/db/exec/sbe/sbe_unittest.h" #include "mongo/db/exec/sbe/stages/hash_lookup.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/print_options.h" #include "mongo/db/exec/sbe/util/stage_results_printer.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/values/value_printer.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/golden_test.h" -#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" + namespace mongo::sbe { class HashLookupStageTest : public PlanStageTestFixture { diff --git a/src/mongo/db/exec/sbe/sbe_key_string_test.cpp b/src/mongo/db/exec/sbe/sbe_key_string_test.cpp index 87dfcd4b0b7e0..08b85587397c4 100644 --- a/src/mongo/db/exec/sbe/sbe_key_string_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_key_string_test.cpp @@ -27,10 +27,41 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/ordering.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/compile_ctx.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include "mongo/db/exec/sbe/stages/co_scan.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/storage/key_string.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/time_support.h" namespace mongo::sbe { @@ -95,9 +126,9 @@ TEST_F(SBEKeyStringTest, Basic) { bob.appendNull("null-descending"); auto testValues = bob.done(); - // Copy each element from 'testValues' into a KeyString::Value. Each KeyString::Value has a + // Copy each element from 'testValues' into a key_string::Value. Each key_string::Value has a // maximum number of components, so we have to break the elements up into groups. - std::queue> keyStringQueue; + std::queue> keyStringQueue; std::vector elements; testValues.elems(elements); @@ -110,7 +141,7 @@ TEST_F(SBEKeyStringTest, Basic) { } auto ordering = Ordering::make(patternBob.done()); - KeyString::Builder keyStringBuilder(KeyString::Version::V1, ordering); + key_string::Builder keyStringBuilder(key_string::Version::V1, ordering); for (auto j = i; j < endBound; ++j) { keyStringBuilder.appendBSONElement(elements[j]); } @@ -176,7 +207,7 @@ TEST_F(SBEKeyStringTest, Basic) { } TEST(SBEKeyStringTest, KeyComponentInclusion) { - KeyString::Builder keyStringBuilder(KeyString::Version::V1, KeyString::ALL_ASCENDING); + key_string::Builder keyStringBuilder(key_string::Version::V1, key_string::ALL_ASCENDING); keyStringBuilder.appendNumberLong(12345); // Included keyStringBuilder.appendString("I've information vegetable, animal, and mineral"_sd); keyStringBuilder.appendString( @@ -193,7 +224,7 @@ TEST(SBEKeyStringTest, KeyComponentInclusion) { BufBuilder builder; readKeyStringValueIntoAccessors( - keyString, KeyString::ALL_ASCENDING, &builder, &accessors, indexKeysToInclude); + keyString, key_string::ALL_ASCENDING, &builder, &accessors, indexKeysToInclude); auto value = accessors[0].getViewOfValue(); ASSERT(value::TypeTags::NumberInt64 == value.first && diff --git a/src/mongo/db/exec/sbe/sbe_limit_skip_test.cpp b/src/mongo/db/exec/sbe/sbe_limit_skip_test.cpp index 69eec391e8038..f20e64c26f3d9 100644 --- a/src/mongo/db/exec/sbe/sbe_limit_skip_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_limit_skip_test.cpp @@ -31,10 +31,23 @@ * This file contains tests for sbe::LimitSkipStage. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" +#include "mongo/db/exec/sbe/stages/co_scan.h" #include "mongo/db/exec/sbe/stages/limit_skip.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/sbe_loop_join_test.cpp b/src/mongo/db/exec/sbe/sbe_loop_join_test.cpp index 982269154543f..ddc3f304d7284 100644 --- a/src/mongo/db/exec/sbe/sbe_loop_join_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_loop_join_test.cpp @@ -31,10 +31,23 @@ * This file contains tests for sbe::LoopJoinStage. */ +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" #include "mongo/db/exec/sbe/stages/loop_join.h" #include "mongo/db/exec/sbe/stages/spool.h" -#include "mongo/util/assert_util.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/sbe_math_builtins_test.cpp b/src/mongo/db/exec/sbe/sbe_math_builtins_test.cpp index 654c7229cdcb4..5b85ecca8697e 100644 --- a/src/mongo/db/exec/sbe/sbe_math_builtins_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_math_builtins_test.cpp @@ -27,11 +27,29 @@ * it in the license file. */ -#include - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/sbe_merge_join_test.cpp b/src/mongo/db/exec/sbe/sbe_merge_join_test.cpp index 1b8392ced5300..07160705ac818 100644 --- a/src/mongo/db/exec/sbe/sbe_merge_join_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_merge_join_test.cpp @@ -27,11 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" #include "mongo/db/exec/sbe/stages/merge_join.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/sbe_mkobj_test.cpp b/src/mongo/db/exec/sbe/sbe_mkobj_test.cpp index 64433238c9c2c..00d79cec67479 100644 --- a/src/mongo/db/exec/sbe/sbe_mkobj_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_mkobj_test.cpp @@ -27,10 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" #include "mongo/db/exec/sbe/stages/makeobj.h" +#include "mongo/db/exec/sbe/stages/project.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { class MkObjStageTest : public PlanStageTestFixture { @@ -239,22 +260,24 @@ class MkObjStageTest : public PlanStageTestFixture { } auto [expectedTag, expectedVal] = - stage_builder::makeValue(BSON_ARRAY(BSON("c" << 3 << "a" - << "one" - << "b" - << "two") - << BSON("c" << 2 << "a" - << "one" - << "b" - << "two") - << BSON("c" << 3 << "a" - << "one" - << "b" - << "two") - << BSON("c" << 2 << "a" - << "one" - << "b" - << "two"))); + stage_builder::makeValue(BSON_ARRAY(BSON("a" + << "one" + << "b" + << "two" + << "c" << 3) + << BSON("a" + << "one" + << "c" << 2 << "b" + << "two") + << BSON("a" + << "one" + << "b" + << "two" + << "c" << 3) + << BSON("a" + << "one" + << "c" << 2 << "b" + << "two"))); value::ValueGuard expectedGuard{expectedTag, expectedVal}; inputGuard.reset(); diff --git a/src/mongo/db/exec/sbe/sbe_numeric_convert_test.cpp b/src/mongo/db/exec/sbe/sbe_numeric_convert_test.cpp index 35072243158d0..524609bc161f1 100644 --- a/src/mongo/db/exec/sbe/sbe_numeric_convert_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_numeric_convert_test.cpp @@ -27,7 +27,20 @@ * it in the license file. */ +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/sbe_unittest.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/golden_test.h" namespace mongo::sbe { namespace test_detail { diff --git a/src/mongo/db/exec/sbe/sbe_plan_size_test.cpp b/src/mongo/db/exec/sbe/sbe_plan_size_test.cpp index 7d2c7d04f3389..3593042c10f81 100644 --- a/src/mongo/db/exec/sbe/sbe_plan_size_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_plan_size_test.cpp @@ -27,6 +27,25 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/makeobj_enums.h" #include "mongo/db/exec/sbe/stages/branch.h" #include "mongo/db/exec/sbe/stages/bson_scan.h" #include "mongo/db/exec/sbe/stages/co_scan.h" @@ -44,13 +63,19 @@ #include "mongo/db/exec/sbe/stages/sort.h" #include "mongo/db/exec/sbe/stages/sorted_merge.h" #include "mongo/db/exec/sbe/stages/spool.h" +#include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/exec/sbe/stages/traverse.h" #include "mongo/db/exec/sbe/stages/union.h" #include "mongo/db/exec/sbe/stages/unique.h" #include "mongo/db/exec/sbe/stages/unwind.h" #include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/id_generator.h" +#include "mongo/util/uuid.h" namespace mongo::sbe { @@ -159,6 +184,7 @@ TEST_F(PlanSizeTest, SimpleIndexScanStage) { generateSlotId(), generateSlotId(), generateSlotId(), + generateSlotId(), IndexKeysInclusionSet(1), mockSV(), makeE(generateSlotId()), @@ -170,14 +196,18 @@ TEST_F(PlanSizeTest, SimpleIndexScanStage) { TEST_F(PlanSizeTest, GenericIndexScanStage) { auto collUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue(); - GenericIndexScanStageParams params{ - makeE(generateSlotId()), {}, 1, KeyString::Version{0}, Ordering::allAscending()}; + GenericIndexScanStageParams params{makeE(generateSlotId()), + {}, + 1, + key_string::Version{0}, + Ordering::allAscending()}; auto stage = makeS(collUuid, StringData(), std::move(params), generateSlotId(), generateSlotId(), generateSlotId(), + generateSlotId(), IndexKeysInclusionSet(1), mockSV(), nullptr, @@ -231,21 +261,23 @@ TEST_F(PlanSizeTest, Project) { TEST_F(PlanSizeTest, Scan) { auto collUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue(); - auto stage = makeS(collUuid, - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - boost::none, - std::vector{"field"}, - mockSV(), - generateSlotId(), - true, - nullptr, - kEmptyPlanNodeId, - ScanCallbacks()); + auto stage = makeS(collUuid, + generateSlotId() /* recordSlot */, + generateSlotId() /* recordIdSlot */, + generateSlotId() /* snapshotIdSlot */, + generateSlotId() /* indexIdSlot */, + generateSlotId() /* indexKeySlot */, + generateSlotId() /* indexKeyPatternSlot */, + boost::none /* oplogTsSlot */, + std::vector{"field"} /* scanFieldNames */, + mockSV() /* scanFieldSlots */, + generateSlotId() /* seekRecordIdSlot */, + generateSlotId() /* minRecordIdSlot */, + generateSlotId() /* maxRecordIdSlot */, + true /* forward */, + nullptr /* yieldPolicy */, + kEmptyPlanNodeId /* nodeId */, + ScanCallbacks()); assertPlanSize(*stage); } diff --git a/src/mongo/db/exec/sbe/sbe_plan_stage_test.cpp b/src/mongo/db/exec/sbe/sbe_plan_stage_test.cpp index 2a364bce19702..dadfa27ee1e20 100644 --- a/src/mongo/db/exec/sbe/sbe_plan_stage_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_plan_stage_test.cpp @@ -31,12 +31,21 @@ * This file contains a test framework for testing sbe::PlanStages. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/exec/sbe/sbe_plan_stage_test.h" +#include +#include +#include -#include "mongo/db/concurrency/locker_noop_client_observer.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/exec/sbe/sbe_plan_stage_test.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -68,7 +77,12 @@ PlanStageTestFixture::generateVirtualScanMulti(int32_t numSlots, const BSONArray } void PlanStageTestFixture::prepareTree(CompileCtx* ctx, PlanStage* root) { - Lock::GlobalLock globalLock{operationContext(), MODE_IS}; + // We want to avoid recursive locking since this results in yield plans that don't yield when + // they should. + boost::optional globalLock; + if (!operationContext()->lockState()->isLocked()) { + globalLock.emplace(operationContext(), MODE_IS); + } if (_yieldPolicy) { _yieldPolicy->clearRegisteredPlans(); _yieldPolicy->registerPlan(root); diff --git a/src/mongo/db/exec/sbe/sbe_plan_stage_test.h b/src/mongo/db/exec/sbe/sbe_plan_stage_test.h index 92db73ed2f912..9630844e0ae42 100644 --- a/src/mongo/db/exec/sbe/sbe_plan_stage_test.h +++ b/src/mongo/db/exec/sbe/sbe_plan_stage_test.h @@ -33,18 +33,37 @@ #pragma once +#include +#include +#include +#include +#include + +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" + +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/exec/sbe/expressions/compile_ctx.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" #include "mongo/db/exec/sbe/stages/co_scan.h" #include "mongo/db/exec/sbe/stages/limit_skip.h" #include "mongo/db/exec/sbe/stages/project.h" +#include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/exec/sbe/stages/unwind.h" #include "mongo/db/exec/sbe/values/bson.h" #include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/plan_yield_policy_sbe.h" #include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/yieldable.h" #include "mongo/unittest/unittest.h" +#include "mongo/util/duration.h" +#include "mongo/util/id_generator.h" namespace mongo::sbe { @@ -258,6 +277,7 @@ class PlanStageTestFixture : public CatalogTestFixture { protected: std::unique_ptr makeYieldPolicy() { return std::make_unique( + operationContext(), PlanYieldPolicy::YieldPolicy::YIELD_AUTO, operationContext()->getServiceContext()->getFastClockSource(), 0, diff --git a/src/mongo/db/exec/sbe/sbe_sort_test.cpp b/src/mongo/db/exec/sbe/sbe_sort_test.cpp index 83b59daa53530..f036d1835a9f8 100644 --- a/src/mongo/db/exec/sbe/sbe_sort_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_sort_test.cpp @@ -27,11 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" #include "mongo/db/exec/sbe/stages/sort.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/sbe_sorted_merge_test.cpp b/src/mongo/db/exec/sbe/sbe_sorted_merge_test.cpp index 04410210e187e..f269c5192ae96 100644 --- a/src/mongo/db/exec/sbe/sbe_sorted_merge_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_sorted_merge_test.cpp @@ -27,11 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - - +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" #include "mongo/db/exec/sbe/stages/sorted_merge.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/sbe_spool_test.cpp b/src/mongo/db/exec/sbe/sbe_spool_test.cpp index 8b879949aaec1..eac2dbfb0b8a8 100644 --- a/src/mongo/db/exec/sbe/sbe_spool_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_spool_test.cpp @@ -27,13 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" -#include "mongo/db/exec/sbe/stages/filter.h" +#include "mongo/db/exec/sbe/stages/limit_skip.h" #include "mongo/db/exec/sbe/stages/loop_join.h" #include "mongo/db/exec/sbe/stages/spool.h" +#include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/exec/sbe/stages/union.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/query/util/make_data_structure.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/sbe_test.cpp b/src/mongo/db/exec/sbe/sbe_test.cpp index c92649647f4e9..862b4663764c9 100644 --- a/src/mongo/db/exec/sbe/sbe_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_test.cpp @@ -27,13 +27,34 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/db/exec/sbe/sbe_unittest.h" #include "mongo/db/exec/sbe/values/bson.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/vm/vm.h" #include "mongo/db/exec/sbe/vm/vm_printer.h" +#include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/golden_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/represent_as.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/sbe_trial_run_tracker_test.cpp b/src/mongo/db/exec/sbe/sbe_trial_run_tracker_test.cpp index 9a6077cda7d6d..60c54fb291572 100644 --- a/src/mongo/db/exec/sbe/sbe_trial_run_tracker_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_trial_run_tracker_test.cpp @@ -31,15 +31,40 @@ * This file contains tests for sbe::HashAggStage. */ -#include "mongo/platform/basic.h" - - +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" +#include "mongo/db/exec/sbe/stages/co_scan.h" #include "mongo/db/exec/sbe/stages/hash_agg.h" +#include "mongo/db/exec/sbe/stages/limit_skip.h" #include "mongo/db/exec/sbe/stages/scan.h" #include "mongo/db/exec/sbe/stages/sort.h" +#include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/exec/sbe/stages/union.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/trial_run_tracker.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/uuid.h" namespace mongo::sbe { @@ -47,21 +72,23 @@ using TrialRunTrackerTest = PlanStageTestFixture; TEST_F(TrialRunTrackerTest, TrackerAttachesToStreamingStage) { auto collUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue(); - auto scanStage = makeS(collUuid, - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - boost::none, - std::vector{"field"}, - makeSV(generateSlotId()), - generateSlotId(), - true, - nullptr, - kEmptyPlanNodeId, - ScanCallbacks()); + auto scanStage = makeS(collUuid, + generateSlotId() /* recordSlot */, + generateSlotId() /* recordIdSlot */, + generateSlotId() /* snapshotIdSlot */, + generateSlotId() /* indexIdSlot */, + generateSlotId() /* indexKeySlot */, + generateSlotId() /* indexKeyPatternSlot */, + boost::none /* oplogTsSlot */, + std::vector{"field"} /* scanFieldNames */, + makeSV(generateSlotId()) /* scanFieldSlots */, + generateSlotId() /* seekRecordIdSlot */, + generateSlotId() /* minRecordIdSlot */, + generateSlotId() /* maxRecordIdSlot */, + true /* forward */, + nullptr /* yieldPolicy */, + kEmptyPlanNodeId /* nodeId */, + ScanCallbacks()); auto tracker = std::make_unique(size_t{0}, size_t{0}); ON_BLOCK_EXIT([&]() { scanStage->detachFromTrialRunTracker(); }); @@ -91,21 +118,23 @@ TEST_F(TrialRunTrackerTest, TrackerAttachesToBlockingStage) { TEST_F(TrialRunTrackerTest, TrackerAttachesToBothBlockingAndStreamingStages) { auto collUuid = UUID::parse("00000000-0000-0000-0000-000000000000").getValue(); - auto scanStage = makeS(collUuid, - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - boost::none, - std::vector{"field"}, - makeSV(generateSlotId()), - generateSlotId(), - true, - nullptr, - kEmptyPlanNodeId, - ScanCallbacks()); + auto scanStage = makeS(collUuid, + generateSlotId() /* recordSlot */, + generateSlotId() /* recordIdSlot */, + generateSlotId() /* snapshotIdSlot */, + generateSlotId() /* indexIdSlot */, + generateSlotId() /* indexKeySlot */, + generateSlotId() /* indexKeyPatternSlot */, + boost::none /* oplogTsSlot */, + std::vector{"field"} /* scanFieldNames */, + makeSV(generateSlotId()) /* scanFieldSlots */, + generateSlotId() /* seekRecordIdSlot */, + generateSlotId() /* minRecordIdSlot */, + generateSlotId() /* maxRecordIdSlot */, + true /* forward */, + nullptr /* yieldPolicy */, + kEmptyPlanNodeId /* nodeId */, + ScanCallbacks()); auto rootSortStage = makeS(std::move(scanStage), makeSV(), @@ -342,21 +371,23 @@ TEST_F(TrialRunTrackerTest, SiblingBlockingStagesBothGetTrialRunTracker) { TEST_F(TrialRunTrackerTest, TrialRunTrackingCanBeDisabled) { auto scanStage = - makeS(UUID::parse("00000000-0000-0000-0000-000000000000").getValue(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - boost::none, - std::vector{"field"}, - makeSV(generateSlotId()), - generateSlotId(), - true, - nullptr, - kEmptyPlanNodeId, - ScanCallbacks()); + makeS(UUID::parse("00000000-0000-0000-0000-000000000000").getValue(), + generateSlotId() /* recordSlot */, + generateSlotId() /* recordIdSlot */, + generateSlotId() /* snapshotIdSlot */, + generateSlotId() /* indexIdSlot */, + generateSlotId() /* indexKeySlot */, + generateSlotId() /* indexKeyPatternSlot */, + boost::none /* oplogTsSlot */, + std::vector{"field"} /* scanFieldNames */, + makeSV(generateSlotId()) /* scanFieldSlots */, + generateSlotId() /* seekRecordIdSlot */, + generateSlotId() /* minRecordIdSlot */, + generateSlotId() /* maxRecordIdSlot */, + true /* forward */, + nullptr /* yieldPolicy */, + kEmptyPlanNodeId /* nodeId */, + ScanCallbacks()); scanStage->disableTrialRunTracking(); auto tracker = std::make_unique(size_t{0}, size_t{0}); @@ -366,21 +397,23 @@ TEST_F(TrialRunTrackerTest, TrialRunTrackingCanBeDisabled) { TEST_F(TrialRunTrackerTest, DisablingTrackingForChildDoesNotInhibitTrackingForParent) { auto scanStage = - makeS(UUID::parse("00000000-0000-0000-0000-000000000000").getValue(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - generateSlotId(), - boost::none, - std::vector{"field"}, - makeSV(generateSlotId()), - generateSlotId(), - true, - nullptr, - kEmptyPlanNodeId, - ScanCallbacks()); + makeS(UUID::parse("00000000-0000-0000-0000-000000000000").getValue(), + generateSlotId() /* recordSlot */, + generateSlotId() /* recordIdSlot */, + generateSlotId() /* snapshotIdSlot */, + generateSlotId() /* indexIdSlot */, + generateSlotId() /* indexKeySlot */, + generateSlotId() /* indexKeyPatternSlot */, + boost::none /* oplogTsSlot */, + std::vector{"field"} /* scanFieldNames */, + makeSV(generateSlotId()) /* scanFieldSlots */, + generateSlotId() /* seekRecordIdSlot */, + generateSlotId() /* minRecordIdSlot */, + generateSlotId() /* maxRecordIdSlot */, + true /* forward */, + nullptr /* yieldPolicy */, + kEmptyPlanNodeId /* nodeId */, + ScanCallbacks()); // Disable tracking for 'scanStage'. We should still attach the tracker for 'rootSortStage'. scanStage->disableTrialRunTracking(); diff --git a/src/mongo/db/exec/sbe/sbe_unique_test.cpp b/src/mongo/db/exec/sbe/sbe_unique_test.cpp index 1761b86c916e8..95655b20608f9 100644 --- a/src/mongo/db/exec/sbe/sbe_unique_test.cpp +++ b/src/mongo/db/exec/sbe/sbe_unique_test.cpp @@ -27,12 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" +#include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/exec/sbe/stages/unique.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/sbe_unittest.cpp b/src/mongo/db/exec/sbe/sbe_unittest.cpp index 867b57979f822..bd47f7ad7ce84 100644 --- a/src/mongo/db/exec/sbe/sbe_unittest.cpp +++ b/src/mongo/db/exec/sbe/sbe_unittest.cpp @@ -27,8 +27,11 @@ * it in the license file. */ +#include + #include "mongo/db/exec/sbe/sbe_unittest.h" -#include "mongo/logv2/log.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/exec/sbe/sbe_unittest.h b/src/mongo/db/exec/sbe/sbe_unittest.h index 4eb2ed2acbd9c..cb287ae7e6a1b 100644 --- a/src/mongo/db/exec/sbe/sbe_unittest.h +++ b/src/mongo/db/exec/sbe/sbe_unittest.h @@ -28,15 +28,23 @@ */ #pragma once -#include "mongo/unittest/assert_that.h" -#include "mongo/unittest/golden_test.h" -#include "mongo/unittest/matcher.h" -#include "mongo/unittest/unittest.h" +#include +#include +#include +#include +#include "mongo/db/exec/sbe/util/print_options.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/values/value_printer.h" #include "mongo/db/exec/sbe/vm/vm_printer.h" -#include +#include "mongo/unittest/assert_that.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/golden_test.h" +#include "mongo/unittest/golden_test_base.h" +#include "mongo/unittest/inline_auto_update.h" +#include "mongo/unittest/matcher.h" +#include "mongo/unittest/matcher_core.h" +#include "mongo/unittest/unittest.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/size_estimator.cpp b/src/mongo/db/exec/sbe/size_estimator.cpp index 0395142bce9bd..f50c1ab22b56e 100644 --- a/src/mongo/db/exec/sbe/size_estimator.cpp +++ b/src/mongo/db/exec/sbe/size_estimator.cpp @@ -29,6 +29,8 @@ #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/bson/bsonelement.h" + namespace mongo::sbe::size_estimator { size_t estimate(const IndexBounds& indexBounds) { diff --git a/src/mongo/db/exec/sbe/size_estimator.h b/src/mongo/db/exec/sbe/size_estimator.h index cf7536e73304c..02fe9f8df8889 100644 --- a/src/mongo/db/exec/sbe/size_estimator.h +++ b/src/mongo/db/exec/sbe/size_estimator.h @@ -30,17 +30,29 @@ #include #include +#include +#include #include #include +#include +#include #include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/util/builder.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/stages/hash_agg.h" #include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/row.h" #include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/interval.h" #include "mongo/db/storage/index_entry_comparison.h" #include "mongo/util/indexed_string_vector.h" diff --git a/src/mongo/db/exec/sbe/stages/branch.cpp b/src/mongo/db/exec/sbe/stages/branch.cpp index 9519a5998a33a..b14ec75740a4a 100644 --- a/src/mongo/db/exec/sbe/stages/branch.cpp +++ b/src/mongo/db/exec/sbe/stages/branch.cpp @@ -27,13 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/stages/branch.h" - +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/branch.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace sbe { diff --git a/src/mongo/db/exec/sbe/stages/branch.h b/src/mongo/db/exec/sbe/stages/branch.h index df813e762a4df..9a9d3214c2fc8 100644 --- a/src/mongo/db/exec/sbe/stages/branch.h +++ b/src/mongo/db/exec/sbe/stages/branch.h @@ -29,9 +29,20 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/query/stage_types.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/stages/bson_scan.cpp b/src/mongo/db/exec/sbe/stages/bson_scan.cpp index 6b2e2c22e12af..751df45f9b1a6 100644 --- a/src/mongo/db/exec/sbe/stages/bson_scan.cpp +++ b/src/mongo/db/exec/sbe/stages/bson_scan.cpp @@ -27,13 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/exec/sbe/stages/bson_scan.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" -#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/bson_scan.h" +#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { @@ -41,20 +50,24 @@ namespace sbe { BSONScanStage::BSONScanStage(std::vector bsons, boost::optional recordSlot, PlanNodeId planNodeId, - std::vector fields, - value::SlotVector vars, + std::vector scanFieldNames, + value::SlotVector scanFieldSlots, bool participateInTrialRunTracking) : PlanStage("bsonscan"_sd, planNodeId, participateInTrialRunTracking), _bsons(std::move(bsons)), _recordSlot(recordSlot), - _fields(std::move(fields)), - _vars(std::move(vars)) { + _scanFieldNames(std::move(scanFieldNames)), + _scanFieldSlots(std::move(scanFieldSlots)) { _bsonCurrent = _bsons.begin(); } std::unique_ptr BSONScanStage::clone() const { - return std::make_unique( - _bsons, _recordSlot, _commonStats.nodeId, _fields, _vars, _participateInTrialRunTracking); + return std::make_unique(_bsons, + _recordSlot, + _commonStats.nodeId, + _scanFieldNames, + _scanFieldSlots, + _participateInTrialRunTracking); } void BSONScanStage::prepare(CompileCtx& ctx) { @@ -62,12 +75,14 @@ void BSONScanStage::prepare(CompileCtx& ctx) { _recordAccessor = std::make_unique(); } - for (size_t idx = 0; idx < _fields.size(); ++idx) { - auto [it, inserted] = - _fieldAccessors.emplace(_fields[idx], std::make_unique()); - uassert(4822841, str::stream() << "duplicate field: " << _fields[idx], inserted); - auto [itRename, insertedRename] = _varAccessors.emplace(_vars[idx], it->second.get()); - uassert(4822842, str::stream() << "duplicate field: " << _vars[idx], insertedRename); + for (size_t idx = 0; idx < _scanFieldNames.size(); ++idx) { + auto [it, inserted] = _scanFieldAccessors.emplace( + _scanFieldNames[idx], std::make_unique()); + uassert(4822841, str::stream() << "duplicate field: " << _scanFieldNames[idx], inserted); + auto [itRename, insertedRename] = + _scanFieldAccessorsMap.emplace(_scanFieldSlots[idx], it->second.get()); + uassert( + 4822842, str::stream() << "duplicate field: " << _scanFieldSlots[idx], insertedRename); } } @@ -76,7 +91,7 @@ value::SlotAccessor* BSONScanStage::getAccessor(CompileCtx& ctx, value::SlotId s return _recordAccessor.get(); } - if (auto it = _varAccessors.find(slot); it != _varAccessors.end()) { + if (auto it = _scanFieldAccessorsMap.find(slot); it != _scanFieldAccessorsMap.end()) { return it->second; } @@ -99,13 +114,14 @@ PlanState BSONScanStage::getNext() { value::bitcastFrom(_bsonCurrent->objdata())); } - if (auto fieldsToMatch = _fieldAccessors.size(); fieldsToMatch != 0) { - for (auto& [name, accessor] : _fieldAccessors) { + if (auto fieldsToMatch = _scanFieldAccessors.size(); fieldsToMatch != 0) { + for (auto& [name, accessor] : _scanFieldAccessors) { accessor->reset(); } for (const auto& element : *_bsonCurrent) { auto fieldName = element.fieldNameStringData(); - if (auto it = _fieldAccessors.find(fieldName); it != _fieldAccessors.end()) { + if (auto it = _scanFieldAccessors.find(fieldName); + it != _scanFieldAccessors.end()) { // Found the field so convert it to Value. auto [tag, val] = bson::convertFrom(element); it->second->reset(tag, val); @@ -143,8 +159,8 @@ std::unique_ptr BSONScanStage::getStats(bool includeDebugInfo) c if (_recordSlot) { bob.appendNumber("recordSlot", static_cast(*_recordSlot)); } - bob.append("field", _fields); - bob.append("outputSlots", _vars.begin(), _vars.end()); + bob.append("field", _scanFieldNames); + bob.append("outputSlots", _scanFieldSlots.begin(), _scanFieldSlots.end()); ret->debugInfo = bob.obj(); } @@ -163,14 +179,14 @@ std::vector BSONScanStage::debugPrint() const { } ret.emplace_back(DebugPrinter::Block("[`")); - for (size_t idx = 0; idx < _fields.size(); ++idx) { + for (size_t idx = 0; idx < _scanFieldNames.size(); ++idx) { if (idx) { ret.emplace_back(DebugPrinter::Block("`,")); } - DebugPrinter::addIdentifier(ret, _vars[idx]); + DebugPrinter::addIdentifier(ret, _scanFieldSlots[idx]); ret.emplace_back("="); - DebugPrinter::addIdentifier(ret, _fields[idx]); + DebugPrinter::addIdentifier(ret, _scanFieldNames[idx]); } ret.emplace_back(DebugPrinter::Block("`]")); @@ -179,8 +195,8 @@ std::vector BSONScanStage::debugPrint() const { size_t BSONScanStage::estimateCompileTimeSize() const { size_t size = sizeof(*this); - size += size_estimator::estimate(_fields); - size += size_estimator::estimate(_vars); + size += size_estimator::estimate(_scanFieldNames); + size += size_estimator::estimate(_scanFieldSlots); return size; } diff --git a/src/mongo/db/exec/sbe/stages/bson_scan.h b/src/mongo/db/exec/sbe/stages/bson_scan.h index 63f97e8c4ce2f..111f44dbb13b1 100644 --- a/src/mongo/db/exec/sbe/stages/bson_scan.h +++ b/src/mongo/db/exec/sbe/stages/bson_scan.h @@ -29,8 +29,20 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" #include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/query/stage_types.h" namespace mongo { namespace sbe { @@ -38,18 +50,18 @@ namespace sbe { * Scans a vector of BSON documents. The resulting BSON documents are placed into the * given 'recordSlot', if provided. * - * The caller can also optionally provide a vector of top-level field names, 'fields', to extract - * from each BSON object. The resulting values are placed into the slots indicated by the 'vars' - * slot vector each time this stage advances. The provided 'fields' and 'vars' vectors must be of - * equal length. + * The caller can also optionally provide a vector of top-level field names, 'scanFieldNames', to + * extract from each BSON object. The resulting values are placed into the slots indicated by the + * 'scanFieldSlots' slot vector each time this stage advances. The provided 'scanFieldNames' and + * 'scanFieldSlots' vectors must be of equal length. */ class BSONScanStage final : public PlanStage { public: BSONScanStage(std::vector bsons, boost::optional recordSlot, PlanNodeId planNodeId, - std::vector fields = {}, - value::SlotVector vars = {}, + std::vector scanFieldNames = {}, + value::SlotVector scanFieldSlots = {}, bool participateInTrialRunTracking = true); std::unique_ptr clone() const final; @@ -70,13 +82,13 @@ class BSONScanStage final : public PlanStage { const std::vector _bsons; const boost::optional _recordSlot; - const std::vector _fields; - const value::SlotVector _vars; + const std::vector _scanFieldNames; + const value::SlotVector _scanFieldSlots; std::unique_ptr _recordAccessor; - value::FieldViewAccessorMap _fieldAccessors; - value::SlotAccessorMap _varAccessors; + value::FieldViewAccessorMap _scanFieldAccessors; + value::SlotAccessorMap _scanFieldAccessorsMap; std::vector::const_iterator _bsonCurrent; diff --git a/src/mongo/db/exec/sbe/stages/co_scan.cpp b/src/mongo/db/exec/sbe/stages/co_scan.cpp index 6656a6bf6dc4e..37c103d7a41d9 100644 --- a/src/mongo/db/exec/sbe/stages/co_scan.cpp +++ b/src/mongo/db/exec/sbe/stages/co_scan.cpp @@ -27,12 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/exec/sbe/stages/co_scan.h" - +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" -#include "mongo/db/exec/sbe/expressions/expression.h" namespace mongo::sbe { CoScanStage::CoScanStage(PlanNodeId planNodeId, diff --git a/src/mongo/db/exec/sbe/stages/co_scan.h b/src/mongo/db/exec/sbe/stages/co_scan.h index 1f8c8d5404d8d..0ccdea6d6e787 100644 --- a/src/mongo/db/exec/sbe/stages/co_scan.h +++ b/src/mongo/db/exec/sbe/stages/co_scan.h @@ -29,7 +29,15 @@ #pragma once +#include +#include + +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/stage_types.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/stages/collection_helpers.cpp b/src/mongo/db/exec/sbe/stages/collection_helpers.cpp index 4853846a0ae18..9a71ef9d38b11 100644 --- a/src/mongo/db/exec/sbe/stages/collection_helpers.cpp +++ b/src/mongo/db/exec/sbe/stages/collection_helpers.cpp @@ -27,50 +27,55 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/exec/sbe/stages/collection_helpers.h" +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/exec/sbe/stages/collection_helpers.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::sbe { -std::tuple acquireCollection(OperationContext* opCtx, - const UUID& collUuid) { +void CollectionRef::acquireCollection(OperationContext* opCtx, const UUID& collUuid) { // The collection is either locked at a higher level or a snapshot of the catalog (consistent // with the storage engine snapshot from which we are reading) has been stashed on the // 'OperationContext'. Either way, this means that the UUID must still exist in our view of the // collection catalog. - CollectionPtr collPtr(CollectionCatalog::get(opCtx)->lookupCollectionByUUID(opCtx, collUuid)); - tassert(5071000, str::stream() << "Collection uuid " << collUuid << " does not exist", collPtr); + _collPtr.emplace(CollectionCatalog::get(opCtx)->lookupCollectionByUUID(opCtx, collUuid)); + tassert( + 5071000, str::stream() << "Collection uuid " << collUuid << " does not exist", getPtr()); - auto nss = collPtr->ns(); - return std::make_tuple( - std::move(collPtr), std::move(nss), CollectionCatalog::get(opCtx)->getEpoch()); + _collName = getPtr()->ns(); + _catalogEpoch = CollectionCatalog::get(opCtx)->getEpoch(); } -CollectionPtr restoreCollection(OperationContext* opCtx, - const NamespaceString& collName, - const UUID& collUuid, - uint64_t catalogEpoch) { +void CollectionRef::restoreCollection(OperationContext* opCtx, const UUID& collUuid) { + tassert(5777401, "Collection name should be initialized", _collName); + tassert(5777402, "Catalog epoch should be initialized", _catalogEpoch); + // Re-lookup the collection pointer, by UUID. If the collection has been dropped, then this UUID // lookup will result in a null pointer. If the collection has been renamed, then the resulting - // collection object should have a different name from the original 'collName'. In either + // collection object should have a different name from the original '_collName'. In either // scenario, we throw a 'QueryPlanKilled' error and terminate the query. - CollectionPtr collPtr(CollectionCatalog::get(opCtx)->lookupCollectionByUUID(opCtx, collUuid)); - if (!collPtr) { + _collPtr.emplace(CollectionCatalog::get(opCtx)->lookupCollectionByUUID(opCtx, collUuid)); + if (!getPtr()) { PlanYieldPolicy::throwCollectionDroppedError(collUuid); } - if (collName != collPtr->ns()) { - PlanYieldPolicy::throwCollectionRenamedError(collName, collPtr->ns(), collUuid); + if (*_collName != getPtr()->ns()) { + PlanYieldPolicy::throwCollectionRenamedError(*_collName, getPtr()->ns(), collUuid); } uassert(ErrorCodes::QueryPlanKilled, "the catalog was closed and reopened", - CollectionCatalog::get(opCtx)->getEpoch() == catalogEpoch); - - return collPtr; + CollectionCatalog::get(opCtx)->getEpoch() == *_catalogEpoch); } } // namespace mongo::sbe diff --git a/src/mongo/db/exec/sbe/stages/collection_helpers.h b/src/mongo/db/exec/sbe/stages/collection_helpers.h index 4116f2980ff35..f1d7734db5731 100644 --- a/src/mongo/db/exec/sbe/stages/collection_helpers.h +++ b/src/mongo/db/exec/sbe/stages/collection_helpers.h @@ -29,51 +29,97 @@ #pragma once +#include +#include +#include +#include +#include #include +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/db_raii.h" #include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" namespace mongo::sbe { /** * A callback which gets called whenever a SCAN stage asks an underlying index scan for a result. */ -using IndexKeyConsistencyCheckCallback = std::function; +using IndexKeyConsistencyCheckCallback = bool (*)(OperationContext* opCtx, + StringMap&, + value::SlotAccessor* snapshotIdAccessor, + value::SlotAccessor* indexIdentAccessor, + value::SlotAccessor* indexKeyAccessor, + const CollectionPtr& collection, + const Record& nextRecord); -using IndexKeyCorruptionCheckCallback = - std::function; +using IndexKeyCorruptionCheckCallback = void (*)(OperationContext* opCtx, + value::SlotAccessor* snapshotIdAccessor, + value::SlotAccessor* indexKeyAccessor, + value::SlotAccessor* indexKeyPatternAccessor, + const RecordId& rid, + const NamespaceString& nss); /** - * Given a collection UUID, looks up the UUID in the catalog and returns a pointer to the - * collection, the collection's name, and the current catalog epoch. - * - * This is intended for use during the preparation of an SBE plan. The caller must hold the - * appropriate db_raii object in order to ensure that SBE plan preparation sees a consistent view of - * the catalog. + * Helper class used by SBE PlanStages for acquiring and re-acquiring a CollectionPtr. */ -std::tuple acquireCollection(OperationContext* opCtx, - const UUID& collUuid); +class CollectionRef { +public: + bool isInitialized() const { + return _collPtr.has_value(); + } -/** - * Re-acquires a pointer to the collection, intended for use during SBE yield recovery or when a - * closed SBE plan is re-opened. In addition to acquiring the collection pointer, throws a - * UserException if the collection has been dropped or renamed, or if the catalog has been closed - * and re-opened. SBE query execution currently cannot survive such events if they occur during a - * yield or between getMores. - */ -CollectionPtr restoreCollection(OperationContext* opCtx, - const NamespaceString& collName, - const UUID& collUuid, - uint64_t catalogEpoch); + const CollectionPtr& getPtr() const { + dassert(isInitialized()); + return *_collPtr; + } + + operator bool() const { + return isInitialized() ? static_cast(getPtr()) : false; + } + + bool operator!() const { + return isInitialized() ? !getPtr() : true; + } + + void reset() { + _collPtr = boost::none; + } + + boost::optional getCollName() const { + return _collName; + } + + /** + * Given a collection UUID, looks up the UUID in the catalog and stores a pointer to the + * collection into _collPtr. This method also stores the NamespaceString for the collection + * into _collName, and it stores the current catalog epoch into _catalogEpoch. + * + * This is intended for use during the preparation of an SBE plan. The caller must hold the + * appropriate db_raii object in order to ensure that SBE plan preparation sees a consistent + * view of the catalog. + */ + void acquireCollection(OperationContext* opCtx, const UUID& collUuid); + + /** + * Re-acquires a pointer to the collection, intended for use during SBE yield recovery or when a + * closed SBE plan is re-opened. In addition to acquiring the collection pointer, throws a + * UserException if the collection has been dropped or renamed, or if the catalog has been + * closed and re-opened. SBE query execution currently cannot survive such events if they occur + * during a yield or between getMores. + */ + void restoreCollection(OperationContext* opCtx, const UUID& collUuid); + +private: + boost::optional _collPtr; + boost::optional _collName; + boost::optional _catalogEpoch; +}; } // namespace mongo::sbe diff --git a/src/mongo/db/exec/sbe/stages/column_scan.cpp b/src/mongo/db/exec/sbe/stages/column_scan.cpp index 7be25f90f7425..a5deff7f873aa 100644 --- a/src/mongo/db/exec/sbe/stages/column_scan.cpp +++ b/src/mongo/db/exec/sbe/stages/column_scan.cpp @@ -28,10 +28,29 @@ */ #include "mongo/db/exec/sbe/stages/column_scan.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" #include "mongo/db/index/columns_access_method.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace sbe { @@ -115,13 +134,14 @@ void ColumnScanStage::prepare(CompileCtx& ctx) { } tassert(6610200, "'_coll' should not be initialized prior to 'acquireCollection()'", !_coll); - std::tie(_coll, _collName, _catalogEpoch) = acquireCollection(_opCtx, _collUuid); + _coll.acquireCollection(_opCtx, _collUuid); - auto indexCatalog = _coll->getIndexCatalog(); + auto indexCatalog = _coll.getPtr()->getIndexCatalog(); auto indexDesc = indexCatalog->findIndexByName(_opCtx, _columnIndexName); tassert(6610201, str::stream() << "could not find index named '" << _columnIndexName - << "' in collection '" << _collName << "'", + << "' in collection '" << _coll.getCollName()->toStringForErrorMsg() + << "'", indexDesc); _weakIndexCatalogEntry = indexCatalog->getEntryShared(indexDesc); } @@ -177,12 +197,11 @@ void ColumnScanStage::doRestoreState(bool relinquishCursor) { invariant(!_coll); // If this stage has not been prepared, then yield recovery is a no-op. - if (!_collName) { + if (!_coll.getCollName()) { return; } - tassert(6610202, "Catalog epoch should be initialized", _catalogEpoch); - _coll = restoreCollection(_opCtx, *_collName, _collUuid, *_catalogEpoch); + _coll.restoreCollection(_opCtx, _collUuid); auto indexCatalogEntry = _weakIndexCatalogEntry.lock(); uassert(ErrorCodes::QueryPlanKilled, @@ -264,14 +283,12 @@ void ColumnScanStage::open(bool reOpen) { // make some validity checks (the collection has not been dropped, renamed, etc.). tassert( 6610207, "ColumnScanStage is not open but have _rowStoreCursor", !_rowStoreCursor); - tassert(6610208, "Collection name should be initialized", _collName); - tassert(6610209, "Catalog epoch should be initialized", _catalogEpoch); - _coll = restoreCollection(_opCtx, *_collName, _collUuid, *_catalogEpoch); + _coll.restoreCollection(_opCtx, _collUuid); } } if (!_rowStoreCursor) { - _rowStoreCursor = _coll->getCursor(_opCtx, true /* forward */); + _rowStoreCursor = _coll.getPtr()->getCursor(_opCtx, true /* forward */); } if (_columnCursors.empty()) { diff --git a/src/mongo/db/exec/sbe/stages/column_scan.h b/src/mongo/db/exec/sbe/stages/column_scan.h index edca0c0916451..324349f2582f1 100644 --- a/src/mongo/db/exec/sbe/stages/column_scan.h +++ b/src/mongo/db/exec/sbe/stages/column_scan.h @@ -29,14 +29,40 @@ #pragma once -#include "mongo/config.h" +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/stages/collection_helpers.h" #include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" #include "mongo/db/exec/sbe/values/column_store_encoder.h" #include "mongo/db/exec/sbe/values/columnar.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/exec/trial_run_tracker.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/column_store.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" namespace mongo { namespace sbe { @@ -238,9 +264,7 @@ class ColumnScanStage final : public PlanStage { // The columnar index this stage is scanning and the associated row store collection. const UUID _collUuid; const std::string _columnIndexName; - CollectionPtr _coll; - boost::optional _collName; // These two members are initialized in 'prepare()' - boost::optional _catalogEpoch; // and are not changed afterwards. + CollectionRef _coll; std::weak_ptr _weakIndexCatalogEntry; // Paths to be read from the index. '_includeInOutput' defines which of the fields should be diff --git a/src/mongo/db/exec/sbe/stages/exchange.cpp b/src/mongo/db/exec/sbe/stages/exchange.cpp index 02b5c3ce7d0bd..16bbed2a843e6 100644 --- a/src/mongo/db/exec/sbe/stages/exchange.cpp +++ b/src/mongo/db/exec/sbe/stages/exchange.cpp @@ -27,14 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/stages/exchange.h" - -#include "mongo/base/init.h" +// IWYU pragma: no_include "cxxabi.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/string_data.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/exchange.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future_impl.h" namespace mongo::sbe { std::unique_ptr s_globalThreadPool; @@ -46,6 +58,12 @@ MONGO_INITIALIZER(s_globalThreadPool)(InitializerContext* context) { options.maxThreads = 128; options.onCreateThread = [](const std::string& name) { Client::initThread(name); + + // TODO(SERVER-74662): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } }; s_globalThreadPool = std::make_unique(options); s_globalThreadPool->startup(); diff --git a/src/mongo/db/exec/sbe/stages/exchange.h b/src/mongo/db/exec/sbe/stages/exchange.h index 2919f91955b4a..82ec0f121e3c6 100644 --- a/src/mongo/db/exec/sbe/stages/exchange.h +++ b/src/mongo/db/exec/sbe/stages/exchange.h @@ -29,13 +29,27 @@ #pragma once +#include +#include +#include +#include #include +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" #include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/future.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/future.h" diff --git a/src/mongo/db/exec/sbe/stages/filter.h b/src/mongo/db/exec/sbe/stages/filter.h index 0449f1abbb13f..5f48de3c2fddb 100644 --- a/src/mongo/db/exec/sbe/stages/filter.h +++ b/src/mongo/db/exec/sbe/stages/filter.h @@ -44,10 +44,14 @@ namespace mongo::sbe { * rather than plain "filter". The predicate is evaluated in the open() call. If the result is * false, then 'getNext()' returns EOF immediately. * - * The IsEof template parameter controls 'early out' behavior of the filter expression. If this + * The 'IsEof' template parameter controls 'early out' behavior of the filter expression. If this * template parameter is true, then the stage is notated as "efilter" rather than plain "filter". * Once the filter evaluates to false then the getNext() call returns EOF. * + * Only one of 'IsConst' and 'IsEof' may be true. + * + * Records pass through the filter when the 'filter' expression evaluates to true. + * * Debug string representations: * * filter { predicate } childStage diff --git a/src/mongo/db/exec/sbe/stages/hash_agg.cpp b/src/mongo/db/exec/sbe/stages/hash_agg.cpp index d94eafa3c334d..3f1959ca6a0e1 100644 --- a/src/mongo/db/exec/sbe/stages/hash_agg.cpp +++ b/src/mongo/db/exec/sbe/stages/hash_agg.cpp @@ -28,14 +28,36 @@ */ #include "mongo/db/exec/sbe/stages/hash_agg.h" -#include "mongo/db/concurrency/d_concurrency.h" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" #include "mongo/db/exec/sbe/size_estimator.h" #include "mongo/db/exec/sbe/util/spilling.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/stats/counters.h" #include "mongo/db/stats/resource_consumption_metrics.h" -#include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/storage_engine.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" #include "mongo/util/str.h" namespace mongo { @@ -214,12 +236,12 @@ void HashAggStage::prepare(CompileCtx& ctx) { _outAccessors[slot] = _outAggAccessors.back().get(); ctx.root = this; - ctx.aggExpression = true; - ctx.accumulator = _outAggAccessors.back().get(); std::unique_ptr initCode{nullptr}; if (expr.init) { initCode = expr.init->compile(ctx); } + ctx.aggExpression = true; + ctx.accumulator = _outAggAccessors.back().get(); _aggCodes.emplace_back(std::move(initCode), expr.acc->compile(ctx)); ctx.aggExpression = false; @@ -287,7 +309,7 @@ void HashAggStage::makeTemporaryRecordStore() { void HashAggStage::spillRowToDisk(const value::MaterializedRow& key, const value::MaterializedRow& val) { - KeyString::Builder kb{KeyString::Version::kLatestVersion}; + key_string::Builder kb{key_string::Version::kLatestVersion}; key.serializeIntoKeyString(kb); // Add a unique integer to the end of the key, since record ids must be unique. We want equal // keys to be adjacent in the 'RecordStore' so that we can merge the partial aggregates with a @@ -443,6 +465,8 @@ void HashAggStage::open(bool reOpen) { auto [it, _] = _ht->emplace(std::move(key), value::MaterializedRow{0}); it->second.resize(_outAggAccessors.size()); + _htIt = it; + // Run accumulator initializer if needed. for (size_t idx = 0; idx < _outAggAccessors.size(); ++idx) { if (_aggCodes[idx].first) { @@ -450,7 +474,6 @@ void HashAggStage::open(bool reOpen) { _outHashAggAccessors[idx]->reset(owned, tag, val); } } - _htIt = it; } // Accumulate state in '_ht'. @@ -529,7 +552,8 @@ HashAggStage::SpilledRow HashAggStage::deserializeSpilledRecord(const Record& re // Read the values and type bits out of the value part of the record. BufReader valReader(record.data.data(), record.data.size()); auto val = value::MaterializedRow::deserializeForSorter(valReader, {}); - auto typeBits = KeyString::TypeBits::fromBuffer(KeyString::Version::kLatestVersion, &valReader); + auto typeBits = + key_string::TypeBits::fromBuffer(key_string::Version::kLatestVersion, &valReader); keyBuffer.reset(); auto key = value::MaterializedRow::deserializeFromKeyString( @@ -703,9 +727,9 @@ std::vector HashAggStage::debugPrint() const { ret.emplace_back("="); DebugPrinter::addBlocks(ret, expr.acc->debugPrint()); if (expr.init) { - ret.emplace_back(DebugPrinter::Block("(`")); + ret.emplace_back(DebugPrinter::Block("init{`")); DebugPrinter::addBlocks(ret, expr.init->debugPrint()); - ret.emplace_back(DebugPrinter::Block("`)")); + ret.emplace_back(DebugPrinter::Block("`}")); } first = false; } diff --git a/src/mongo/db/exec/sbe/stages/hash_agg.h b/src/mongo/db/exec/sbe/stages/hash_agg.h index ad262efccca72..78233b1b676dd 100644 --- a/src/mongo/db/exec/sbe/stages/hash_agg.h +++ b/src/mongo/db/exec/sbe/stages/hash_agg.h @@ -29,11 +29,33 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/util/builder.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/row.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/exec/trial_run_tracker.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/temporary_record_store.h" +#include "mongo/platform/atomic_proxy.h" +#include "mongo/platform/atomic_word.h" #include "mongo/stdx/unordered_map.h" namespace mongo { @@ -175,7 +197,7 @@ class HashAggStage final : public PlanStage { /** * Inserts a key and value pair to the '_recordStore'. They key is serialized to a - * 'KeyString::Value' which becomes the 'RecordId'. This makes the keys memcmp-able and ensures + * 'key_string::Value' which becomes the 'RecordId'. This makes the keys memcmp-able and ensures * that the record store ends up sorted by the group-by keys. * * Note that the 'typeBits' are needed to reconstruct the spilled 'key' to a 'MaterializedRow', diff --git a/src/mongo/db/exec/sbe/stages/hash_join.cpp b/src/mongo/db/exec/sbe/stages/hash_join.cpp index 41c512031e38f..9a0557a0fb950 100644 --- a/src/mongo/db/exec/sbe/stages/hash_join.cpp +++ b/src/mongo/db/exec/sbe/stages/hash_join.cpp @@ -27,13 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include -#include "mongo/db/exec/sbe/stages/hash_join.h" +#include +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" -#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/hash_join.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/exec/sbe/stages/hash_join.h b/src/mongo/db/exec/sbe/stages/hash_join.h index a3997074db0bb..9843988a24456 100644 --- a/src/mongo/db/exec/sbe/stages/hash_join.h +++ b/src/mongo/db/exec/sbe/stages/hash_join.h @@ -29,10 +29,22 @@ #pragma once +#include +#include +#include +#include #include +#include + +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/row.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/query/stage_types.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/stages/hash_lookup.cpp b/src/mongo/db/exec/sbe/stages/hash_lookup.cpp index 7341204fc0787..ada528cd03f70 100644 --- a/src/mongo/db/exec/sbe/stages/hash_lookup.cpp +++ b/src/mongo/db/exec/sbe/stages/hash_lookup.cpp @@ -27,16 +27,40 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/curop.h" -#include "mongo/db/exec/sbe/stages/hash_lookup.h" -#include "mongo/db/exec/sbe/stages/stage_visitors.h" - #include "mongo/db/exec/sbe/expressions/compile_ctx.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/hash_lookup.h" +#include "mongo/db/exec/sbe/stages/stage_visitors.h" #include "mongo/db/exec/sbe/util/spilling.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" #include "mongo/util/str.h" namespace mongo { @@ -211,9 +235,9 @@ void HashLookupStage::reset() { _bufferIt = 0; } -std::pair HashLookupStage::serializeKeyForRecordStore( +std::pair HashLookupStage::serializeKeyForRecordStore( const value::MaterializedRow& key) const { - KeyString::Builder kb{KeyString::Version::kLatestVersion}; + key_string::Builder kb{key_string::Version::kLatestVersion}; return encodeKeyString(kb, key); } @@ -340,7 +364,7 @@ void HashLookupStage::spillBufferedValueToDisk(OperationContext* opCtx, size_t HashLookupStage::bufferValueOrSpill(value::MaterializedRow& value) { size_t bufferIndex = _valueId; const long long newMemUsage = _computedTotalMemUsage + size_estimator::estimate(value); - if (newMemUsage <= _memoryUseInBytesBeforeSpill) { + if (!hasSpilledBufToDisk() && newMemUsage <= _memoryUseInBytesBeforeSpill) { _buffer.emplace_back(std::move(value)); _computedTotalMemUsage = newMemUsage; } else { diff --git a/src/mongo/db/exec/sbe/stages/hash_lookup.h b/src/mongo/db/exec/sbe/stages/hash_lookup.h index d66f53bb2038e..c7a9d92155c97 100644 --- a/src/mongo/db/exec/sbe/stages/hash_lookup.h +++ b/src/mongo/db/exec/sbe/stages/hash_lookup.h @@ -29,12 +29,35 @@ #pragma once +#include +#include +#include +#include +#include +#include #include +#include +#include + +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/row.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/temporary_record_store.h" +#include "mongo/platform/atomic_word.h" namespace mongo::sbe { /** @@ -161,7 +184,7 @@ class HashLookupStage final : public PlanStage { void makeTemporaryRecordStore(); - std::pair serializeKeyForRecordStore( + std::pair serializeKeyForRecordStore( const value::MaterializedRow& key) const; /** diff --git a/src/mongo/db/exec/sbe/stages/ix_scan.cpp b/src/mongo/db/exec/sbe/stages/ix_scan.cpp index e02df9d3f5ef6..c5296f8efcfe7 100644 --- a/src/mongo/db/exec/sbe/stages/ix_scan.cpp +++ b/src/mongo/db/exec/sbe/stages/ix_scan.cpp @@ -27,17 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/stages/ix_scan.h" - +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/client.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" -#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/stages/ix_scan.h" #include "mongo/db/exec/trial_run_tracker.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/str.h" namespace mongo::sbe { IndexScanStageBase::IndexScanStageBase(StringData stageType, @@ -47,6 +65,7 @@ IndexScanStageBase::IndexScanStageBase(StringData stageType, boost::optional indexKeySlot, boost::optional recordIdSlot, boost::optional snapshotIdSlot, + boost::optional indexIdentSlot, IndexKeysInclusionSet indexKeysToInclude, value::SlotVector vars, PlanYieldPolicy* yieldPolicy, @@ -60,6 +79,7 @@ IndexScanStageBase::IndexScanStageBase(StringData stageType, _indexKeySlot(indexKeySlot), _recordIdSlot(recordIdSlot), _snapshotIdSlot(snapshotIdSlot), + _indexIdentSlot(indexIdentSlot), _indexKeysToInclude(indexKeysToInclude), _vars(std::move(vars)), _lowPriority(lowPriority) { @@ -67,18 +87,6 @@ IndexScanStageBase::IndexScanStageBase(StringData stageType, } void IndexScanStageBase::prepareImpl(CompileCtx& ctx) { - if (_indexKeySlot) { - _recordAccessor = std::make_unique(); - } - - if (_recordIdSlot) { - _recordIdAccessor = std::make_unique(); - } - - if (_snapshotIdSlot) { - _snapshotIdAccessor = std::make_unique(); - } - _accessors.resize(_vars.size()); for (size_t idx = 0; idx < _accessors.size(); ++idx) { auto [it, inserted] = _accessorMap.emplace(_vars[idx], &_accessors[idx]); @@ -86,37 +94,53 @@ void IndexScanStageBase::prepareImpl(CompileCtx& ctx) { } tassert(5709602, "'_coll' should not be initialized prior to 'acquireCollection()'", !_coll); - std::tie(_coll, _collName, _catalogEpoch) = acquireCollection(_opCtx, _collUuid); + _coll.acquireCollection(_opCtx, _collUuid); - auto indexCatalog = _coll->getIndexCatalog(); + auto indexCatalog = _coll.getPtr()->getIndexCatalog(); auto indexDesc = indexCatalog->findIndexByName(_opCtx, _indexName); - tassert(4938500, + // uassert here, not tassert, because it is not a programming bug if the index got dropped just + // before we looked for it. + uassert(4938500, str::stream() << "could not find index named '" << _indexName << "' in collection '" - << _collName << "'", + << _coll.getCollName()->toStringForErrorMsg() << "'", indexDesc); + _entry = indexCatalog->getEntry(indexDesc); tassert(4938503, str::stream() << "expected IndexCatalogEntry for index named: " << _indexName, static_cast(_entry)); - _indexIdent = _entry->getIdent(); + _ordering = _entry->ordering(); - if (_snapshotIdAccessor) { + auto [identTag, identVal] = value::makeNewString(StringData(_entry->getIdent())); + _indexIdentAccessor.reset(identTag, identVal); + + if (_indexIdentSlot) { + _indexIdentViewAccessor.reset(identTag, identVal); + } else { + _indexIdentViewAccessor.reset(); + } + + if (_snapshotIdSlot) { _latestSnapshotId = _opCtx->recoveryUnit()->getSnapshotId().toNumber(); } } value::SlotAccessor* IndexScanStageBase::getAccessor(CompileCtx& ctx, value::SlotId slot) { if (_indexKeySlot && *_indexKeySlot == slot) { - return _recordAccessor.get(); + return &_recordAccessor; } if (_recordIdSlot && *_recordIdSlot == slot) { - return _recordIdAccessor.get(); + return &_recordIdAccessor; } if (_snapshotIdSlot && *_snapshotIdSlot == slot) { - return _snapshotIdAccessor.get(); + return &_snapshotIdAccessor; + } + + if (_indexIdentSlot && *_indexIdentSlot == slot) { + return &_indexIdentViewAccessor; } if (auto it = _accessorMap.find(slot); it != _accessorMap.end()) { @@ -128,11 +152,11 @@ value::SlotAccessor* IndexScanStageBase::getAccessor(CompileCtx& ctx, value::Slo void IndexScanStageBase::doSaveState(bool relinquishCursor) { if (relinquishCursor) { - if (_recordAccessor) { - prepareForYielding(*_recordAccessor, slotsAccessible()); + if (_indexKeySlot) { + prepareForYielding(_recordAccessor, slotsAccessible()); } - if (_recordIdAccessor) { - prepareForYielding(*_recordIdAccessor, slotsAccessible()); + if (_recordIdSlot) { + prepareForYielding(_recordIdAccessor, slotsAccessible()); } for (auto& accessor : _accessors) { prepareForYielding(accessor, slotsAccessible()); @@ -154,10 +178,13 @@ void IndexScanStageBase::doSaveState(bool relinquishCursor) { } void IndexScanStageBase::restoreCollectionAndIndex() { - tassert(5777406, "Collection name should be initialized", _collName); - tassert(5777407, "Catalog epoch should be initialized", _catalogEpoch); - _coll = restoreCollection(_opCtx, *_collName, _collUuid, *_catalogEpoch); - auto desc = _coll->getIndexCatalog()->findIndexByIdent(_opCtx, _indexIdent); + _coll.restoreCollection(_opCtx, _collUuid); + + auto [identTag, identVal] = _indexIdentAccessor.getViewOfValue(); + tassert(7566700, "Expected ident to be a string", value::isString(identTag)); + + auto indexIdent = value::getStringView(identTag, identVal); + auto desc = _coll.getPtr()->getIndexCatalog()->findIndexByIdent(_opCtx, indexIdent); uassert(ErrorCodes::QueryPlanKilled, str::stream() << "query plan killed :: index '" << _indexName << "' dropped", desc && !desc->getEntry()->isDropped()); @@ -175,7 +202,7 @@ void IndexScanStageBase::doRestoreState(bool relinquishCursor) { invariant(!_coll); // If this stage has not been prepared, then yield recovery is a no-op. - if (!_collName) { + if (!_coll.getCollName()) { return; } restoreCollectionAndIndex(); @@ -186,7 +213,7 @@ void IndexScanStageBase::doRestoreState(bool relinquishCursor) { // Yield is the only time during plan execution that the snapshotId can change. As such, we // update it accordingly as part of yield recovery. - if (_snapshotIdAccessor) { + if (_snapshotIdSlot) { _latestSnapshotId = _opCtx->recoveryUnit()->getSnapshotId().toNumber(); } } @@ -199,8 +226,8 @@ void IndexScanStageBase::doDetachFromOperationContext() { } void IndexScanStageBase::doAttachToOperationContext(OperationContext* opCtx) { - if (_lowPriority && _open && _opCtx->getClient()->isFromUserConnection() && - _opCtx->lockState()->shouldWaitForTicket()) { + if (_lowPriority && _open && gDeprioritizeUnboundedUserIndexScans.load() && + _opCtx->getClient()->isFromUserConnection() && _opCtx->lockState()->shouldWaitForTicket()) { _priority.emplace(opCtx->lockState(), AdmissionContext::Priority::kLow); } if (_cursor) { @@ -263,8 +290,8 @@ void IndexScanStageBase::trackRead() { PlanState IndexScanStageBase::getNext() { auto optTimer(getOptTimer(_opCtx)); - if (_lowPriority && !_priority && _opCtx->getClient()->isFromUserConnection() && - _opCtx->lockState()->shouldWaitForTicket()) { + if (_lowPriority && !_priority && gDeprioritizeUnboundedUserIndexScans.load() && + _opCtx->getClient()->isFromUserConnection() && _opCtx->lockState()->shouldWaitForTicket()) { _priority.emplace(_opCtx->lockState(), AdmissionContext::Priority::kLow); } @@ -292,21 +319,21 @@ PlanState IndexScanStageBase::getNext() { } } while (!validateKey(_nextRecord)); - if (_recordAccessor) { - _recordAccessor->reset(false, - value::TypeTags::ksValue, - value::bitcastFrom(&_nextRecord->keyString)); + if (_indexKeySlot) { + _recordAccessor.reset(false, + value::TypeTags::ksValue, + value::bitcastFrom(&_nextRecord->keyString)); } - if (_recordIdAccessor) { - _recordIdAccessor->reset( + if (_recordIdSlot) { + _recordIdAccessor.reset( false, value::TypeTags::RecordId, value::bitcastFrom(&_nextRecord->loc)); } - if (_snapshotIdAccessor) { + if (_snapshotIdSlot) { // Copy the latest snapshot ID into the 'snapshotId' slot. - _snapshotIdAccessor->reset(value::TypeTags::NumberInt64, - value::bitcastFrom(_latestSnapshotId)); + _snapshotIdAccessor.reset(value::TypeTags::NumberInt64, + value::bitcastFrom(_latestSnapshotId)); } if (_accessors.size()) { @@ -348,6 +375,9 @@ std::unique_ptr IndexScanStageBase::getStats(bool includeDebugIn if (_snapshotIdSlot) { bob.appendNumber("snapshotIdSlot", static_cast(*_snapshotIdSlot)); } + if (_indexIdentSlot) { + bob.appendNumber("indexIdentSlot", static_cast(*_indexIdentSlot)); + } bob.append("outputSlots", _vars.begin(), _vars.end()); bob.append("indexKeysToInclude", _indexKeysToInclude.to_string()); ret->debugInfo = bob.obj(); @@ -379,6 +409,12 @@ void IndexScanStageBase::debugPrintImpl(std::vector& blocks DebugPrinter::addIdentifier(blocks, DebugPrinter::kNoneKeyword); } + if (_indexIdentSlot) { + DebugPrinter::addIdentifier(blocks, _indexIdentSlot.value()); + } else { + DebugPrinter::addIdentifier(blocks, DebugPrinter::kNoneKeyword); + } + if (_lowPriority) { DebugPrinter::addKeyword(blocks, "lowPriority"); } @@ -428,6 +464,7 @@ SimpleIndexScanStage::SimpleIndexScanStage(UUID collUuid, boost::optional indexKeySlot, boost::optional recordIdSlot, boost::optional snapshotIdSlot, + boost::optional indexIdentSlot, IndexKeysInclusionSet indexKeysToInclude, value::SlotVector vars, std::unique_ptr seekKeyLow, @@ -443,6 +480,7 @@ SimpleIndexScanStage::SimpleIndexScanStage(UUID collUuid, indexKeySlot, recordIdSlot, snapshotIdSlot, + indexIdentSlot, indexKeysToInclude, std::move(vars), yieldPolicy, @@ -463,6 +501,7 @@ std::unique_ptr SimpleIndexScanStage::clone() const { _indexKeySlot, _recordIdSlot, _snapshotIdSlot, + _indexIdentSlot, _indexKeysToInclude, _vars, _seekKeyLow ? _seekKeyLow->clone() : nullptr, @@ -483,20 +522,21 @@ void SimpleIndexScanStage::prepare(CompileCtx& ctx) { if (_seekKeyHigh) { ctx.root = this; _seekKeyHighCode = _seekKeyHigh->compile(ctx); - _seekKeyHighHolder = std::make_unique(); } - _seekKeyLowHolder = std::make_unique(); + + _seekKeyLowHolder.reset(); + _seekKeyHighHolder.reset(); } void SimpleIndexScanStage::doSaveState(bool relinquishCursor) { // Seek points are external to the index scan and must be accessible no matter what as long // as the index scan is opened. if (_open && relinquishCursor) { - if (_seekKeyLowHolder) { - prepareForYielding(*_seekKeyLowHolder, true); + if (_seekKeyLow) { + prepareForYielding(_seekKeyLowHolder, true); } - if (_seekKeyHighHolder) { - prepareForYielding(*_seekKeyHighHolder, true); + if (_seekKeyHigh) { + prepareForYielding(_seekKeyHighHolder, true); } } @@ -514,7 +554,7 @@ void SimpleIndexScanStage::open(bool reOpen) { uassert(4822851, str::stream() << "seek key is wrong type: " << msgTagLow, tagLow == value::TypeTags::ksValue); - _seekKeyLowHolder->reset(ownedLow, tagLow, valLow); + _seekKeyLowHolder.reset(ownedLow, tagLow, valLow); auto [ownedHi, tagHi, valHi] = _bytecode.run(_seekKeyHighCode.get()); const auto msgTagHi = tagHi; @@ -522,36 +562,36 @@ void SimpleIndexScanStage::open(bool reOpen) { str::stream() << "seek key is wrong type: " << msgTagHi, tagHi == value::TypeTags::ksValue); - _seekKeyHighHolder->reset(ownedHi, tagHi, valHi); + _seekKeyHighHolder.reset(ownedHi, tagHi, valHi); } else if (_seekKeyLow) { auto [ownedLow, tagLow, valLow] = _bytecode.run(_seekKeyLowCode.get()); const auto msgTagLow = tagLow; uassert(4822853, str::stream() << "seek key is wrong type: " << msgTagLow, tagLow == value::TypeTags::ksValue); - _seekKeyLowHolder->reset(ownedLow, tagLow, valLow); + _seekKeyLowHolder.reset(ownedLow, tagLow, valLow); } else { auto sdi = _entry->accessMethod()->asSortedData()->getSortedDataInterface(); - KeyString::Builder kb(sdi->getKeyStringVersion(), - sdi->getOrdering(), - KeyString::Discriminator::kExclusiveBefore); - kb.appendDiscriminator(KeyString::Discriminator::kExclusiveBefore); + key_string::Builder kb(sdi->getKeyStringVersion(), + sdi->getOrdering(), + key_string::Discriminator::kExclusiveBefore); + kb.appendDiscriminator(key_string::Discriminator::kExclusiveBefore); auto [copyTag, copyVal] = value::makeCopyKeyString(kb.getValueCopy()); - _seekKeyLowHolder->reset(true, copyTag, copyVal); + _seekKeyLowHolder.reset(true, copyTag, copyVal); } } -const KeyString::Value& SimpleIndexScanStage::getSeekKeyLow() const { - auto [tag, value] = _seekKeyLowHolder->getViewOfValue(); +const key_string::Value& SimpleIndexScanStage::getSeekKeyLow() const { + auto [tag, value] = _seekKeyLowHolder.getViewOfValue(); return *value::getKeyStringView(value); } -const KeyString::Value* SimpleIndexScanStage::getSeekKeyHigh() const { - if (!_seekKeyHighHolder) { +const key_string::Value* SimpleIndexScanStage::getSeekKeyHigh() const { + if (!_seekKeyHigh) { return nullptr; } - auto [tag, value] = _seekKeyHighHolder->getViewOfValue(); + auto [tag, value] = _seekKeyHighHolder.getViewOfValue(); return value::getKeyStringView(value); } @@ -637,6 +677,7 @@ GenericIndexScanStage::GenericIndexScanStage(UUID collUuid, boost::optional indexKeySlot, boost::optional recordIdSlot, boost::optional snapshotIdSlot, + boost::optional indexIdentSlot, IndexKeysInclusionSet indexKeysToInclude, value::SlotVector vars, PlanYieldPolicy* yieldPolicy, @@ -649,6 +690,7 @@ GenericIndexScanStage::GenericIndexScanStage(UUID collUuid, indexKeySlot, recordIdSlot, snapshotIdSlot, + indexIdentSlot, indexKeysToInclude, std::move(vars), yieldPolicy, @@ -668,6 +710,7 @@ std::unique_ptr GenericIndexScanStage::clone() const { _indexKeySlot, _recordIdSlot, _snapshotIdSlot, + _indexIdentSlot, _indexKeysToInclude, _vars, _yieldPolicy, @@ -732,11 +775,11 @@ bool GenericIndexScanStage::validateKey(const boost::optional& k _keyBuffer.reset(); BSONObjBuilder keyBuilder(_keyBuffer); - KeyString::toBsonSafe(key->keyString.getBuffer(), - key->keyString.getSize(), - _params.ord, - key->keyString.getTypeBits(), - keyBuilder); + key_string::toBsonSafe(key->keyString.getBuffer(), + key->keyString.getSize(), + _params.ord, + key->keyString.getTypeBits(), + keyBuilder); auto bsonKey = keyBuilder.done(); switch (_checker->checkKey(bsonKey, &_seekPoint)) { diff --git a/src/mongo/db/exec/sbe/stages/ix_scan.h b/src/mongo/db/exec/sbe/stages/ix_scan.h index 4c8a206dcf22e..708e5f450c807 100644 --- a/src/mongo/db/exec/sbe/stages/ix_scan.h +++ b/src/mongo/db/exec/sbe/stages/ix_scan.h @@ -29,14 +29,41 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/ordering.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/db_raii.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/stages/collection_helpers.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/exec/trial_run_tracker.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_string.h" #include "mongo/db/storage/record_store.h" #include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/util/uuid.h" namespace mongo::sbe { @@ -47,7 +74,8 @@ namespace mongo::sbe { * The "output" slots are * - 'indexKeySlot': the "KeyString" representing the index entry, * - 'recordIdSlot': a reference that can be used to fetch the entire document, - * - 'snapshotIdSlot': the storage snapshot that this index scan is reading from, and + * - 'snapshotIdSlot': the storage snapshot that this index scan is reading from, + * - 'indexIdentSlot': the ident of the index being read from, and * - 'vars': one slot for each value in the index key that should be "projected" out of the entry. * * The 'indexKeysToInclude' bitset determines which values are included in the projection based @@ -66,6 +94,7 @@ class IndexScanStageBase : public PlanStage { boost::optional indexKeySlot, boost::optional recordIdSlot, boost::optional snapshotIdSlot, + boost::optional indexIdentSlot, IndexKeysInclusionSet indexKeysToInclude, value::SlotVector vars, PlanYieldPolicy* yieldPolicy, @@ -132,21 +161,20 @@ class IndexScanStageBase : public PlanStage { const boost::optional _indexKeySlot; const boost::optional _recordIdSlot; const boost::optional _snapshotIdSlot; + const boost::optional _indexIdentSlot; const IndexKeysInclusionSet _indexKeysToInclude; const value::SlotVector _vars; vm::ByteCode _bytecode; - // These members are default constructed to boost::none and are initialized when 'prepare()' - // is called. Once they are set, they are never modified again. - boost::optional _collName; - boost::optional _catalogEpoch; + CollectionRef _coll; - CollectionPtr _coll; + value::OwnedValueAccessor _recordAccessor; + value::OwnedValueAccessor _recordIdAccessor; + value::OwnedValueAccessor _snapshotIdAccessor; - std::unique_ptr _recordAccessor; - std::unique_ptr _recordIdAccessor; - std::unique_ptr _snapshotIdAccessor; + value::OwnedValueAccessor _indexIdentAccessor; + value::ViewOfValueAccessor _indexIdentViewAccessor; // This field holds the latest snapshot ID that we've received from _opCtx->recoveryUnit(). // This field gets initialized by prepare(), and it gets updated each time doRestoreState() is @@ -160,7 +188,6 @@ class IndexScanStageBase : public PlanStage { std::unique_ptr _cursor; const IndexCatalogEntry* _entry{nullptr}; - std::string _indexIdent; boost::optional _ordering{boost::none}; boost::optional _nextRecord; @@ -191,12 +218,11 @@ class IndexScanStageBase : public PlanStage { * * Debug string representation: * - * ixscan indexKeySlot? recordIdSlot? snapshotIdSlot? [slot_1 = fieldNo_1, ..., slot2 = fieldNo_n] - * collectionUuid indexName forward + * ixscan indexKeySlot? recordIdSlot? snapshotIdSlot? indexIdentSlot? + * [slot_1 = fieldNo_1, ..., slot2 = fieldNo_n] collectionUuid indexName forward * - * ixseek lowKey highKey indexKeySlot? recordIdSlot? snapshotIdSlot? - * [slot_1 = fieldNo_1, ..., slot2 = fieldNo_n] - * collectionUuid indexName forward + * ixseek lowKey highKey indexKeySlot? recordIdSlot? snapshotIdSlot? indexIdentSlot? + * [slot_1 = fieldNo_1, ..., slot2 = fieldNo_n] collectionUuid indexName forward */ class SimpleIndexScanStage final : public IndexScanStageBase { public: @@ -206,6 +232,7 @@ class SimpleIndexScanStage final : public IndexScanStageBase { boost::optional indexKeySlot, boost::optional recordIdSlot, boost::optional snapshotIdSlot, + boost::optional indexIdentSlot, IndexKeysInclusionSet indexKeysToInclude, value::SlotVector vars, std::unique_ptr seekKeyLow, @@ -229,8 +256,8 @@ class SimpleIndexScanStage final : public IndexScanStageBase { bool validateKey(const boost::optional& key) override; private: - const KeyString::Value& getSeekKeyLow() const; - const KeyString::Value* getSeekKeyHigh() const; + const key_string::Value& getSeekKeyLow() const; + const key_string::Value* getSeekKeyHigh() const; std::unique_ptr _seekKeyLow; std::unique_ptr _seekKeyHigh; @@ -239,8 +266,8 @@ class SimpleIndexScanStage final : public IndexScanStageBase { std::unique_ptr _seekKeyLowCode; std::unique_ptr _seekKeyHighCode; - std::unique_ptr _seekKeyLowHolder; - std::unique_ptr _seekKeyHighHolder; + value::OwnedValueAccessor _seekKeyLowHolder; + value::OwnedValueAccessor _seekKeyHighHolder; }; /** @@ -255,15 +282,14 @@ class SimpleIndexScanStage final : public IndexScanStageBase { * * Debug string representation: * - * ixscan_generic indexBounds indexKeySlot? recordIdSlot? snapshotIdSlot? - * [slot_1 = fieldNo_1, ..., slot2 = fieldNo_n] - * collectionUuid indexName forward + * ixscan_generic indexBounds indexKeySlot? recordIdSlot? snapshotIdSlot? indexIdentSlot? + * [slot_1 = fieldNo_1, ..., slot2 = fieldNo_n] collectionUuid indexName forward */ struct GenericIndexScanStageParams { std::unique_ptr indexBounds; const BSONObj keyPattern; const int direction; - const KeyString::Version version; + const key_string::Version version; const Ordering ord; }; class GenericIndexScanStage final : public IndexScanStageBase { @@ -274,6 +300,7 @@ class GenericIndexScanStage final : public IndexScanStageBase { boost::optional indexKeySlot, boost::optional recordIdSlot, boost::optional snapshotIdSlot, + boost::optional indexIdentSlot, IndexKeysInclusionSet indexKeysToInclude, value::SlotVector vars, PlanYieldPolicy* yieldPolicy, diff --git a/src/mongo/db/exec/sbe/stages/limit_skip.cpp b/src/mongo/db/exec/sbe/stages/limit_skip.cpp index 8343f56ca96e7..5bcb6679070b7 100644 --- a/src/mongo/db/exec/sbe/stages/limit_skip.cpp +++ b/src/mongo/db/exec/sbe/stages/limit_skip.cpp @@ -27,11 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/exec/sbe/stages/limit_skip.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/limit_skip.h" +#include "mongo/util/assert_util_core.h" namespace mongo::sbe { LimitSkipStage::LimitSkipStage(std::unique_ptr input, diff --git a/src/mongo/db/exec/sbe/stages/limit_skip.h b/src/mongo/db/exec/sbe/stages/limit_skip.h index 7fc366a217448..5690580265d6f 100644 --- a/src/mongo/db/exec/sbe/stages/limit_skip.h +++ b/src/mongo/db/exec/sbe/stages/limit_skip.h @@ -29,8 +29,19 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/query/stage_types.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/stages/loop_join.cpp b/src/mongo/db/exec/sbe/stages/loop_join.cpp index 6e9fd2b528cd7..36c34bfd605e2 100644 --- a/src/mongo/db/exec/sbe/stages/loop_join.cpp +++ b/src/mongo/db/exec/sbe/stages/loop_join.cpp @@ -27,13 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" +#include "mongo/db/exec/sbe/size_estimator.h" #include "mongo/db/exec/sbe/stages/loop_join.h" #include "mongo/db/exec/sbe/stages/stage_visitors.h" - -#include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/stages/loop_join.h b/src/mongo/db/exec/sbe/stages/loop_join.h index 2d105e96b83db..778ac8aabf417 100644 --- a/src/mongo/db/exec/sbe/stages/loop_join.h +++ b/src/mongo/db/exec/sbe/stages/loop_join.h @@ -29,9 +29,19 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/query/stage_types.h" namespace mongo::sbe { enum class JoinType : uint8_t { Inner, Left, Right }; diff --git a/src/mongo/db/exec/sbe/stages/makeobj.cpp b/src/mongo/db/exec/sbe/stages/makeobj.cpp index 4ae15961a9c00..45bc8c5e6c1f4 100644 --- a/src/mongo/db/exec/sbe/stages/makeobj.cpp +++ b/src/mongo/db/exec/sbe/stages/makeobj.cpp @@ -27,13 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/stages/makeobj.h" - +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/makeobj.h" #include "mongo/db/exec/sbe/values/bson.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/shared_buffer.h" namespace mongo::sbe { template @@ -56,6 +68,7 @@ MakeObjStageBase::MakeObjStageBase(std::unique_ptr input, _fieldBehavior(fieldBehavior), _fields(std::move(fields)), _projectFields(std::move(projectFields)), + _fieldNames(buildFieldNames(_fields, _projectFields)), _projectVars(std::move(projectVars)), _forceNewObject(forceNewObject), _returnOldObject(returnOldObject) { @@ -109,20 +122,14 @@ void MakeObjStageBase::prepare(CompileCtx& ctx) { if (_rootSlot) { _root = _children[0]->getAccessor(ctx, *_rootSlot); } - for (auto& p : _fields) { - // Mark the values from _fields with 'std::numeric_limits::max()'. - auto [it, inserted] = _allFieldsMap.emplace(p, std::numeric_limits::max()); - uassert(4822818, str::stream() << "duplicate field: " << p, inserted); - } for (size_t idx = 0; idx < _projectFields.size(); ++idx) { auto& p = _projectFields[idx]; - // Mark the values from _projectFields with their corresponding index. - auto [it, inserted] = _allFieldsMap.emplace(p, idx); - uassert(4822819, str::stream() << "duplicate field: " << p, inserted); _projects.emplace_back(p, _children[0]->getAccessor(ctx, _projectVars[idx])); } + _visited.resize(_projectFields.size()); + _compiled = true; } @@ -169,75 +176,148 @@ void MakeObjStageBase::produceObject() { _obj.reset(tag, val); + memset(_visited.data(), 0, _projectFields.size()); + + const bool isInclusion = _fieldBehavior == FieldBehavior::keep; + if (_root) { auto [tag, val] = _root->getViewOfValue(); - size_t computedFieldsSize = _projectFields.size(); - size_t projectedFieldsSize = _fields.size(); - size_t nFieldsNeededIfInclusion = projectedFieldsSize; + size_t numFieldsRemaining = _fields.size() + _projectFields.size(); + size_t numComputedFieldsRemaining = _projectFields.size(); + + const size_t numFieldsRemainingThreshold = isInclusion ? 1 : 0; + if (tag == value::TypeTags::bsonObject) { - if (!(nFieldsNeededIfInclusion == 0 && _fieldBehavior == FieldBehavior::keep)) { - auto be = value::bitcastTo(val); - auto size = ConstDataView(be).read>(); - auto end = be + size; - - // Simple heuristic to determine number of fields. - size_t approximatedNumFieldsInRoot = (size / 16); - // If the field behaviour is 'keep', then we know that the output will have - // 'projectedFieldsSize + computedFieldsSize' fields. Otherwise, we use the - // approximated number of fields in the root document and then add - // 'projectedFieldsSize' to it to achieve a better approximation (Note: we don't - // subtract 'computedFieldsSize' from the result in the latter case, as it might - // lead to a negative number) - size_t numOutputFields = _fieldBehavior == FieldBehavior::keep - ? projectedFieldsSize + computedFieldsSize - : approximatedNumFieldsInRoot + projectedFieldsSize; - obj->reserve(numOutputFields); - // Skip document length. - be += sizeof(int32_t); + auto be = value::bitcastTo(val); + const auto size = ConstDataView(be).read>(); + const auto end = be + size; + + // Skip document length. + be += sizeof(int32_t); + + // Simple heuristic to approximate the number of fields in '_root'. + size_t approxNumFieldsInRoot = size / 16; + + // If the field behaviour is 'keep', then we know that the output will have exactly + // '_fields.size() + _projectFields.size()' fields. Otherwise we use '_fields.size()' + // plus the approximated number of fields in '_root' to approximate the number of + // fields in the output object. (Note: we don't subtract '_projectFields.size()' + // from the result in the latter case, as it might lead to a negative number.) + size_t approxNumOutputFields = isInclusion ? _fields.size() + _projectFields.size() + : _fields.size() + approxNumFieldsInRoot; + obj->reserve(approxNumOutputFields); + + // Loop over _root's fields until numFieldsRemaining - numComputedFieldsRemaining == 0 + // AND until one of the follow is true: + // (1) numComputedFieldsRemaining == 1 and isInclusion == true; -OR- + // (2) numComputedFieldsRemaining == 0. + if (numFieldsRemaining > numFieldsRemainingThreshold || + numFieldsRemaining != numComputedFieldsRemaining) { while (be != end - 1) { auto sv = bson::fieldNameAndLength(be); - auto key = StringMapHasher{}.hashed_key(StringData(sv)); - - if (!isFieldProjectedOrRestricted(key)) { - auto [tag, val] = bson::convertFrom(be, end, sv.size()); - auto [copyTag, copyVal] = value::copyValue(tag, val); - obj->push_back(sv, copyTag, copyVal); - --nFieldsNeededIfInclusion; + auto [found, projectIdx] = lookupField(sv); + + if (projectIdx == std::numeric_limits::max()) { + if (found == isInclusion) { + auto [fieldTag, fieldVal] = bson::convertFrom(be, end, sv.size()); + auto [copyTag, copyVal] = value::copyValue(fieldTag, fieldVal); + obj->push_back(sv, copyTag, copyVal); + } + + numFieldsRemaining -= found; + } else { + projectField(obj, projectIdx); + _visited[projectIdx] = 1; + --numFieldsRemaining; + --numComputedFieldsRemaining; } - if (nFieldsNeededIfInclusion == 0 && _fieldBehavior == FieldBehavior::keep) { + if (numFieldsRemaining <= numFieldsRemainingThreshold && + numFieldsRemaining == numComputedFieldsRemaining) { + if (!isInclusion) { + be = bson::advance(be, sv.size()); + } + break; } be = bson::advance(be, sv.size()); } } + + // If this is an exclusion projection and 'be' has not reached the end of the input + // object, copy over the remaining fields from the input object into 'bob'. + if (!isInclusion) { + while (be != end - 1) { + auto sv = bson::fieldNameAndLength(be); + + auto [fieldTag, fieldVal] = bson::convertFrom(be, end, sv.size()); + auto [copyTag, copyVal] = value::copyValue(fieldTag, fieldVal); + obj->push_back(sv, copyTag, copyVal); + + be = bson::advance(be, sv.size()); + } + } } else if (tag == value::TypeTags::Object) { - if (!(nFieldsNeededIfInclusion == 0 && _fieldBehavior == FieldBehavior::keep)) { - auto objRoot = value::getObjectView(val); - auto numOutputFields = _fieldBehavior == FieldBehavior::keep - ? projectedFieldsSize + computedFieldsSize - : objRoot->size() + projectedFieldsSize; - obj->reserve(numOutputFields); - for (size_t idx = 0; idx < objRoot->size(); ++idx) { - auto sv = objRoot->field(idx); - auto key = StringMapHasher{}.hashed_key(StringData(sv)); - - if (!isFieldProjectedOrRestricted(key)) { - auto [tag, val] = objRoot->getAt(idx); - auto [copyTag, copyVal] = value::copyValue(tag, val); - obj->push_back(sv, copyTag, copyVal); - --nFieldsNeededIfInclusion; + auto objRoot = value::getObjectView(val); + size_t idx = 0; + + // If the field behaviour is 'keep', then we know that the output will have exactly + // '_fields.size() + _projectFields.size()' fields. Otherwise use '_fields.size()' + // plus the number of fields in '_root' to approximate the number of fields in the + // output object. (Note: we don't subtract '_projectFields.size()' from the result + // in the latter case, as it might lead to a negative number.) + size_t approxNumOutputFields = isInclusion ? _fields.size() + _projectFields.size() + : _fields.size() + objRoot->size(); + obj->reserve(approxNumOutputFields); + + // Loop over _root's fields until numFieldsRemaining - numComputedFieldsRemaining == 0 + // AND until one of the follow is true: + // (1) numComputedFieldsRemaining == 1 and isInclusion == true; -OR- + // (2) numComputedFieldsRemaining == 0. + if (numFieldsRemaining > numFieldsRemainingThreshold || + numFieldsRemaining != numComputedFieldsRemaining) { + for (idx = 0; idx < objRoot->size(); ++idx) { + auto sv = StringData(objRoot->field(idx)); + auto [found, projectIdx] = lookupField(sv); + + if (projectIdx == std::numeric_limits::max()) { + if (found == isInclusion) { + auto [fieldTag, fieldVal] = objRoot->getAt(idx); + auto [copyTag, copyVal] = value::copyValue(fieldTag, fieldVal); + obj->push_back(sv, copyTag, copyVal); + } + + numFieldsRemaining -= found; + } else { + projectField(obj, projectIdx); + _visited[projectIdx] = 1; + --numFieldsRemaining; + --numComputedFieldsRemaining; } - if (nFieldsNeededIfInclusion == 0 && _fieldBehavior == FieldBehavior::keep) { - return; + if (numFieldsRemaining <= numFieldsRemainingThreshold && + numFieldsRemaining == numComputedFieldsRemaining) { + ++idx; + break; } } } + + // If this is an exclusion projection and 'idx' has not reached the end of the input + // object, copy over the remaining fields from the input object into 'bob'. + if (!isInclusion) { + for (; idx < objRoot->size(); ++idx) { + auto sv = StringData(objRoot->field(idx)); + auto [fieldTag, fieldVal] = objRoot->getAt(idx); + auto [copyTag, copyVal] = value::copyValue(fieldTag, fieldVal); + + obj->push_back(sv, copyTag, copyVal); + } + } } else { - for (size_t idx = 0; idx < _projects.size(); ++idx) { + for (size_t idx = 0; idx < _projectFields.size(); ++idx) { projectField(obj, idx); } // If the result is non empty object return it. @@ -254,8 +334,10 @@ void MakeObjStageBase::produceObject() { return; } } - for (size_t idx = 0; idx < _projects.size(); ++idx) { - projectField(obj, idx); + for (size_t idx = 0; idx < _projectFields.size(); ++idx) { + if (!_visited[idx]) { + projectField(obj, idx); + } } } @@ -269,54 +351,125 @@ void MakeObjStageBase::produceObject() { _obj.reset(value::TypeTags::bsonObject, value::bitcastFrom(data)); }; + memset(_visited.data(), 0, _projectFields.size()); + + const bool isInclusion = _fieldBehavior == FieldBehavior::keep; + if (_root) { auto [tag, val] = _root->getViewOfValue(); - size_t nFieldsNeededIfInclusion = _fields.size(); - if (tag == value::TypeTags::bsonObject) { - if (!(nFieldsNeededIfInclusion == 0 && _fieldBehavior == FieldBehavior::keep)) { - auto be = value::bitcastTo(val); - const auto end = be + ConstDataView(be).read>(); + size_t numFieldsRemaining = _fields.size() + _projectFields.size(); + size_t numComputedFieldsRemaining = _projectFields.size(); - // Skip document length. - be += sizeof(int32_t); - while (be != end - 1) { - auto sv = bson::fieldNameAndLength(be); - auto key = StringMapHasher{}.hashed_key(StringData(sv)); + const size_t numFieldsRemainingThreshold = isInclusion ? 1 : 0; - auto nextBe = bson::advance(be, sv.size()); + if (tag == value::TypeTags::bsonObject) { + auto be = value::bitcastTo(val); + const auto size = ConstDataView(be).read>(); + const auto end = be + size; + + // Skip document length. + be += sizeof(int32_t); + + // Loop over _root's fields until numFieldsRemaining - numComputedFieldsRemaining == 0 + // AND until one of the follow is true: + // (1) numComputedFieldsRemaining == 1 and isInclusion == true; -OR- + // (2) numComputedFieldsRemaining == 0. + if (numFieldsRemaining > numFieldsRemainingThreshold || + numFieldsRemaining != numComputedFieldsRemaining) { + while (be != end - 1) { + const char* nextBe = nullptr; - if (!isFieldProjectedOrRestricted(key)) { - bob.append(BSONElement(be, sv.size() + 1, nextBe - be)); - --nFieldsNeededIfInclusion; + auto sv = bson::fieldNameAndLength(be); + auto [found, projectIdx] = lookupField(sv); + + if (projectIdx == std::numeric_limits::max()) { + if (found == isInclusion) { + nextBe = bson::advance(be, sv.size()); + bob.append(BSONElement(be, sv.size() + 1, nextBe - be)); + } + + numFieldsRemaining -= found; + } else { + projectField(&bob, projectIdx); + _visited[projectIdx] = 1; + --numFieldsRemaining; + --numComputedFieldsRemaining; } - if (nFieldsNeededIfInclusion == 0 && _fieldBehavior == FieldBehavior::keep) { + if (numFieldsRemaining <= numFieldsRemainingThreshold && + numFieldsRemaining == numComputedFieldsRemaining) { + if (!isInclusion) { + be = nextBe ? nextBe : bson::advance(be, sv.size()); + } + break; } + be = nextBe ? nextBe : bson::advance(be, sv.size()); + } + } + + // If this is an exclusion projection and 'be' has not reached the end of the input + // object, copy over the remaining fields from the input object into 'bob'. + if (!isInclusion) { + while (be != end - 1) { + auto sv = bson::fieldNameAndLength(be); + auto nextBe = bson::advance(be, sv.size()); + + bob.append(BSONElement(be, sv.size() + 1, nextBe - be)); + be = nextBe; } } } else if (tag == value::TypeTags::Object) { - if (!(nFieldsNeededIfInclusion == 0 && _fieldBehavior == FieldBehavior::keep)) { - auto objRoot = value::getObjectView(val); - for (size_t idx = 0; idx < objRoot->size(); ++idx) { - auto key = StringMapHasher{}.hashed_key(StringData(objRoot->field(idx))); - - if (!isFieldProjectedOrRestricted(key)) { - auto [tag, val] = objRoot->getAt(idx); - bson::appendValueToBsonObj(bob, objRoot->field(idx), tag, val); - --nFieldsNeededIfInclusion; + auto objRoot = value::getObjectView(val); + size_t idx = 0; + + // Loop over _root's fields until numFieldsRemaining - numComputedFieldsRemaining == 0 + // AND until one of the follow is true: + // (1) numComputedFieldsRemaining == 1 and isInclusion == true; -OR- + // (2) numComputedFieldsRemaining == 0. + if (numFieldsRemaining > numFieldsRemainingThreshold || + numFieldsRemaining != numComputedFieldsRemaining) { + for (idx = 0; idx < objRoot->size(); ++idx) { + auto sv = StringData(objRoot->field(idx)); + auto [found, projectIdx] = lookupField(sv); + + if (projectIdx == std::numeric_limits::max()) { + if (found == isInclusion) { + auto [fieldTag, fieldVal] = objRoot->getAt(idx); + bson::appendValueToBsonObj(bob, sv, fieldTag, fieldVal); + } + + numFieldsRemaining -= found; + } else { + projectField(&bob, projectIdx); + _visited[projectIdx] = 1; + --numFieldsRemaining; + --numComputedFieldsRemaining; } - if (nFieldsNeededIfInclusion == 0 && _fieldBehavior == FieldBehavior::keep) { + if (numFieldsRemaining <= numFieldsRemainingThreshold && + numFieldsRemaining == numComputedFieldsRemaining) { + ++idx; break; } } } + + // If this is an exclusion projection and 'idx' has not reached the end of the input + // object, copy over the remaining fields from the input object into 'bob'. + if (!isInclusion) { + for (; idx < objRoot->size(); ++idx) { + auto sv = StringData(objRoot->field(idx)); + auto [fieldTag, fieldVal] = objRoot->getAt(idx); + + bson::appendValueToBsonObj(bob, sv, fieldTag, fieldVal); + } + } } else { - for (size_t idx = 0; idx < _projects.size(); ++idx) { + for (size_t idx = 0; idx < _projectFields.size(); ++idx) { projectField(&bob, idx); } // If the result is non empty object return it. @@ -335,8 +488,10 @@ void MakeObjStageBase::produceObject() { return; } } - for (size_t idx = 0; idx < _projects.size(); ++idx) { - projectField(&bob, idx); + for (size_t idx = 0; idx < _projectFields.size(); ++idx) { + if (!_visited[idx]) { + projectField(&bob, idx); + } } finish(); } diff --git a/src/mongo/db/exec/sbe/stages/makeobj.h b/src/mongo/db/exec/sbe/stages/makeobj.h index 16575b5a4565f..460831746cf2e 100644 --- a/src/mongo/db/exec/sbe/stages/makeobj.h +++ b/src/mongo/db/exec/sbe/stages/makeobj.h @@ -29,9 +29,28 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/makeobj_enums.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/util/indexed_string_vector.h" namespace mongo::sbe { /** @@ -121,23 +140,23 @@ class MakeObjStageBase final : public PlanStage { void projectField(value::Object* obj, size_t idx); void projectField(UniqueBSONObjBuilder* bob, size_t idx); - bool isFieldProjectedOrRestricted(const StringMapHashedKey& key) const { - bool foundKey = false; - bool projected = false; - bool restricted = false; - - if (!_allFieldsMap.empty()) { - if (auto it = _allFieldsMap.find(key); it != _allFieldsMap.end()) { - foundKey = true; - projected = it->second != std::numeric_limits::max(); - restricted = *_fieldBehavior != FieldBehavior::keep; - } - } - if (!foundKey) { - restricted = *_fieldBehavior == FieldBehavior::keep; + std::pair lookupField(StringData sv) const { + auto pos = _fieldNames.findPos(sv); + + if (pos == IndexedStringVector::npos) { + return {false, pos}; + } else if (pos < _fields.size()) { + return {true, std::numeric_limits::max()}; + } else { + return {true, pos - _fields.size()}; } + } - return projected || restricted; + IndexedStringVector buildFieldNames(const std::vector& fields, + const std::vector& projectFields) { + auto names = fields; + names.insert(names.end(), projectFields.begin(), projectFields.end()); + return IndexedStringVector(std::move(names)); } void produceObject(); @@ -147,13 +166,13 @@ class MakeObjStageBase final : public PlanStage { const boost::optional _fieldBehavior; const std::vector _fields; const std::vector _projectFields; + const IndexedStringVector _fieldNames; const value::SlotVector _projectVars; const bool _forceNewObject; const bool _returnOldObject; - StringMap _allFieldsMap; - std::vector> _projects; + absl::InlinedVector _visited; value::OwnedValueAccessor _obj; diff --git a/src/mongo/db/exec/sbe/stages/merge_join.cpp b/src/mongo/db/exec/sbe/stages/merge_join.cpp index 4908633d020c2..52d1767f619de 100644 --- a/src/mongo/db/exec/sbe/stages/merge_join.cpp +++ b/src/mongo/db/exec/sbe/stages/merge_join.cpp @@ -27,13 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/exec/sbe/stages/merge_join.h" +#include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" -#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/merge_join.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/exec/sbe/stages/merge_join.h b/src/mongo/db/exec/sbe/stages/merge_join.h index ff94784ac0d19..52f76cb533367 100644 --- a/src/mongo/db/exec/sbe/stages/merge_join.h +++ b/src/mongo/db/exec/sbe/stages/merge_join.h @@ -29,10 +29,20 @@ #pragma once +#include +#include +#include #include +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/row.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/query/stage_types.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/stages/plan_stats.cpp b/src/mongo/db/exec/sbe/stages/plan_stats.cpp index 3fe032c60bca2..c43879a50d0ca 100644 --- a/src/mongo/db/exec/sbe/stages/plan_stats.cpp +++ b/src/mongo/db/exec/sbe/stages/plan_stats.cpp @@ -27,11 +27,7 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/stages/plan_stats.h" - -#include +#include #include "mongo/db/exec/plan_stats_walker.h" #include "mongo/db/exec/sbe/stages/plan_stats.h" diff --git a/src/mongo/db/exec/sbe/stages/plan_stats.h b/src/mongo/db/exec/sbe/stages/plan_stats.h index cd093715921b5..6e81c31237044 100644 --- a/src/mongo/db/exec/sbe/stages/plan_stats.h +++ b/src/mongo/db/exec/sbe/stages/plan_stats.h @@ -30,7 +30,19 @@ #pragma once #include "mongo/db/exec/plan_stats.h" + +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/plan_stats_visitor.h" +#include "mongo/db/query/plan_summary_stats.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/query/tree_walker.h" #include "mongo/db/storage/column_store.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/stages/project.cpp b/src/mongo/db/exec/sbe/stages/project.cpp index 326c023879837..5c4cf99f9e51e 100644 --- a/src/mongo/db/exec/sbe/stages/project.cpp +++ b/src/mongo/db/exec/sbe/stages/project.cpp @@ -27,12 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/exec/sbe/stages/project.h" +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/project.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/util/str.h" namespace mongo { namespace sbe { diff --git a/src/mongo/db/exec/sbe/stages/project.h b/src/mongo/db/exec/sbe/stages/project.h index bf4e169c8c930..8d3931c13b352 100644 --- a/src/mongo/db/exec/sbe/stages/project.h +++ b/src/mongo/db/exec/sbe/stages/project.h @@ -29,9 +29,19 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/query/stage_types.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/stages/scan.cpp b/src/mongo/db/exec/sbe/stages/scan.cpp index 0f5fa3ef7cf2d..30707ad67841d 100644 --- a/src/mongo/db/exec/sbe/stages/scan.cpp +++ b/src/mongo/db/exec/sbe/stages/scan.cpp @@ -27,18 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/stages/scan.h" - -#include "mongo/config.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/client.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" -#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/scan.h" +#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/trial_run_tracker.h" -#include "mongo/db/index/index_access_method.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/repl/optime.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep #include "mongo/util/str.h" namespace mongo { @@ -47,51 +67,49 @@ ScanStage::ScanStage(UUID collectionUuid, boost::optional recordSlot, boost::optional recordIdSlot, boost::optional snapshotIdSlot, - boost::optional indexIdSlot, + boost::optional indexIdentSlot, boost::optional indexKeySlot, boost::optional indexKeyPatternSlot, boost::optional oplogTsSlot, - std::vector fields, - value::SlotVector vars, - boost::optional seekKeySlot, + std::vector scanFieldNames, + value::SlotVector scanFieldSlots, + boost::optional seekRecordIdSlot, + boost::optional minRecordIdSlot, + boost::optional maxRecordIdSlot, bool forward, PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId, ScanCallbacks scanCallbacks, bool lowPriority, bool useRandomCursor, - bool participateInTrialRunTracking) - : PlanStage( - seekKeySlot ? "seek"_sd : "scan"_sd, yieldPolicy, nodeId, participateInTrialRunTracking), - _collUuid(collectionUuid), + bool participateInTrialRunTracking, + bool excludeScanEndRecordId) + : PlanStage(seekRecordIdSlot ? "seek"_sd : "scan"_sd, + yieldPolicy, + nodeId, + participateInTrialRunTracking), _recordSlot(recordSlot), _recordIdSlot(recordIdSlot), _snapshotIdSlot(snapshotIdSlot), - _indexIdSlot(indexIdSlot), + _indexIdentSlot(indexIdentSlot), _indexKeySlot(indexKeySlot), _indexKeyPatternSlot(indexKeyPatternSlot), _oplogTsSlot(oplogTsSlot), - _fields(std::move(fields)), - _vars(std::move(vars)), - _seekKeySlot(seekKeySlot), + _scanFieldNames(std::move(scanFieldNames)), + _scanFieldSlots(std::move(scanFieldSlots)), + _seekRecordIdSlot(seekRecordIdSlot), + _minRecordIdSlot(minRecordIdSlot), + _maxRecordIdSlot(maxRecordIdSlot), _forward(forward), - _scanCallbacks(std::move(scanCallbacks)), _useRandomCursor(useRandomCursor), + _collUuid(collectionUuid), + _scanCallbacks(std::move(scanCallbacks)), + _excludeScanEndRecordId(excludeScanEndRecordId), _lowPriority(lowPriority) { - invariant(_fields.size() == _vars.size()); - invariant(!_seekKeySlot || _forward); - tassert(5567202, - "The '_oplogTsSlot' cannot be set without 'ts' field in '_fields'", - !_oplogTsSlot || - (std::find(_fields.begin(), _fields.end(), repl::OpTime::kTimestampFieldName) != - _fields.end())); + invariant(_scanFieldNames.size() == _scanFieldSlots.size()); + invariant(!_seekRecordIdSlot || _forward); // We cannot use a random cursor if we are seeking or requesting a reverse scan. - invariant(!_useRandomCursor || (!_seekKeySlot && _forward)); - for (size_t idx = 0; idx < _fields.size(); ++idx) { - const char* str = _fields[idx].c_str(); - auto len = _fields[idx].size(); - _fieldsBloomFilter.insert(str, len); - } + invariant(!_useRandomCursor || (!_seekRecordIdSlot && _forward)); } std::unique_ptr ScanStage::clone() const { @@ -99,72 +117,59 @@ std::unique_ptr ScanStage::clone() const { _recordSlot, _recordIdSlot, _snapshotIdSlot, - _indexIdSlot, + _indexIdentSlot, _indexKeySlot, _indexKeyPatternSlot, _oplogTsSlot, - _fields, - _vars, - _seekKeySlot, + _scanFieldNames.getUnderlyingVector(), + _scanFieldSlots, + _seekRecordIdSlot, + _minRecordIdSlot, + _maxRecordIdSlot, _forward, _yieldPolicy, _commonStats.nodeId, _scanCallbacks, _lowPriority, _useRandomCursor, - _participateInTrialRunTracking); + _participateInTrialRunTracking, + _excludeScanEndRecordId); } void ScanStage::prepare(CompileCtx& ctx) { - if (_recordSlot) { - _recordAccessor = std::make_unique(); - } - - if (_recordIdSlot) { - _recordIdAccessor = std::make_unique(); - } + _scanFieldAccessors.resize(_scanFieldNames.size()); + for (size_t idx = 0; idx < _scanFieldNames.size(); ++idx) { + auto accessorPtr = &_scanFieldAccessors[idx]; - _fieldAccessors.resize(_fields.size()); - for (size_t idx = 0; idx < _fields.size(); ++idx) { - auto accessorPtr = &_fieldAccessors[idx]; + auto [itRename, insertedRename] = + _scanFieldAccessorsMap.emplace(_scanFieldSlots[idx], accessorPtr); + uassert( + 4822815, str::stream() << "duplicate field: " << _scanFieldSlots[idx], insertedRename); - auto [itRename, insertedRename] = _varAccessors.emplace(_vars[idx], accessorPtr); - uassert(4822815, str::stream() << "duplicate field: " << _vars[idx], insertedRename); - - if (_oplogTsSlot && _fields[idx] == repl::OpTime::kTimestampFieldName) { + if (_oplogTsSlot && _scanFieldNames[idx] == repl::OpTime::kTimestampFieldName) { + // Oplog scans only: cache a pointer to the "ts" field accessor for fast access. _tsFieldAccessor = accessorPtr; } + } + + if (_seekRecordIdSlot) { + _seekRecordIdAccessor = ctx.getAccessor(*_seekRecordIdSlot); + } - const size_t offset = computeFieldMaskOffset(_fields[idx].c_str(), _fields[idx].size()); - _maskOffsetToFieldAccessors[offset] = - stdx::visit(OverloadedVisitor{ - [&](stdx::monostate _) -> FieldAccessorVariant { - return std::make_pair(StringData{_fields[idx]}, accessorPtr); - }, - [&](std::pair pair) - -> FieldAccessorVariant { - StringMap map; - map.emplace(pair.first, pair.second); - map.emplace(_fields[idx], accessorPtr); - return map; - }, - [&](StringMap map) -> FieldAccessorVariant { - map.emplace(_fields[idx], accessorPtr); - return std::move(map); - }}, - std::move(_maskOffsetToFieldAccessors[offset])); - } - - if (_seekKeySlot) { - _seekKeyAccessor = ctx.getAccessor(*_seekKeySlot); + if (_minRecordIdSlot) { + _minRecordIdAccessor = ctx.getAccessor(*_minRecordIdSlot); + } + + if (_maxRecordIdSlot) { + _maxRecordIdAccessor = ctx.getAccessor(*_maxRecordIdSlot); } if (_snapshotIdSlot) { _snapshotIdAccessor = ctx.getAccessor(*_snapshotIdSlot); } - if (_indexIdSlot) { - _indexIdAccessor = ctx.getAccessor(*_indexIdSlot); + if (_indexIdentSlot) { + _indexIdentAccessor = ctx.getAccessor(*_indexIdentSlot); } if (_indexKeySlot) { @@ -180,23 +185,23 @@ void ScanStage::prepare(CompileCtx& ctx) { } tassert(5709600, "'_coll' should not be initialized prior to 'acquireCollection()'", !_coll); - std::tie(_coll, _collName, _catalogEpoch) = acquireCollection(_opCtx, _collUuid); + _coll.acquireCollection(_opCtx, _collUuid); } value::SlotAccessor* ScanStage::getAccessor(CompileCtx& ctx, value::SlotId slot) { if (_recordSlot && *_recordSlot == slot) { - return _recordAccessor.get(); + return &_recordAccessor; } if (_recordIdSlot && *_recordIdSlot == slot) { - return _recordIdAccessor.get(); + return &_recordIdAccessor; } if (_oplogTsSlot && *_oplogTsSlot == slot) { return _oplogTsAccessor; } - if (auto it = _varAccessors.find(slot); it != _varAccessors.end()) { + if (auto it = _scanFieldAccessorsMap.find(slot); it != _scanFieldAccessorsMap.end()) { return it->second; } @@ -206,9 +211,8 @@ value::SlotAccessor* ScanStage::getAccessor(CompileCtx& ctx, value::SlotId slot) void ScanStage::doSaveState(bool relinquishCursor) { #if defined(MONGO_CONFIG_DEBUG_BUILD) if (slotsAccessible()) { - if (_recordAccessor && - _recordAccessor->getViewOfValue().first != value::TypeTags::Nothing) { - auto [tag, val] = _recordAccessor->getViewOfValue(); + if (_recordSlot && _recordAccessor.getViewOfValue().first != value::TypeTags::Nothing) { + auto [tag, val] = _recordAccessor.getViewOfValue(); tassert(5975900, "expected scan to produce bson", tag == value::TypeTags::bsonObject); auto* raw = value::bitcastTo(val); @@ -220,23 +224,23 @@ void ScanStage::doSaveState(bool relinquishCursor) { #endif if (relinquishCursor) { - if (_recordAccessor) { - prepareForYielding(*_recordAccessor, slotsAccessible()); + if (_recordSlot) { + prepareForYielding(_recordAccessor, slotsAccessible()); } - if (_recordIdAccessor) { + if (_recordIdSlot) { // TODO: SERVER-72054 - // RecordId are currently (incorrectly) accessed after EOF, therefore - // we must treat them as always accessible ratther invalidate them when slots are - // disabled. We should use slotsAccessible() instead of true, once the bug is fixed. - prepareForYielding(*_recordIdAccessor, true); + // RecordId are currently (incorrectly) accessed after EOF, therefore we must treat them + // as always accessible rather than invalidate them when slots are disabled. We should + // use slotsAccessible() instead of true, once the bug is fixed. + prepareForYielding(_recordIdAccessor, true); } - for (auto& accessor : _fieldAccessors) { + for (auto& accessor : _scanFieldAccessors) { prepareForYielding(accessor, slotsAccessible()); } } #if defined(MONGO_CONFIG_DEBUG_BUILD) - if (!_recordAccessor || !slotsAccessible()) { + if (!_recordSlot || !slotsAccessible()) { _lastReturned.clear(); } #endif @@ -249,6 +253,7 @@ void ScanStage::doSaveState(bool relinquishCursor) { cursor->setSaveStorageCursorOnDetachFromOperationContext(!relinquishCursor); } + _indexCatalogEntryMap.clear(); _coll.reset(); } @@ -257,12 +262,11 @@ void ScanStage::doRestoreState(bool relinquishCursor) { invariant(!_coll); // If this stage has not been prepared, then yield recovery is a no-op. - if (!_collName) { + if (!_coll.getCollName()) { return; } - tassert(5777408, "Catalog epoch should be initialized", _catalogEpoch); - _coll = restoreCollection(_opCtx, *_collName, _collUuid, *_catalogEpoch); + _coll.restoreCollection(_opCtx, _collUuid); if (auto cursor = getActiveCursor(); cursor != nullptr) { if (relinquishCursor) { @@ -273,7 +277,7 @@ void ScanStage::doRestoreState(bool relinquishCursor) { str::stream() << "CollectionScan died due to position in capped collection being deleted. ", couldRestore); - } else if (_coll->isCapped()) { + } else if (_coll.getPtr()->isCapped()) { // We cannot check for capped position lost here, as it requires us to reposition the // cursor, which would free the underlying value and break the contract of // restoreState(fullSave=false). So we defer the capped collection position lost check @@ -288,8 +292,8 @@ void ScanStage::doRestoreState(bool relinquishCursor) { } #if defined(MONGO_CONFIG_DEBUG_BUILD) - if (_recordAccessor && !_lastReturned.empty()) { - auto [tag, val] = _recordAccessor->getViewOfValue(); + if (_recordSlot && !_lastReturned.empty()) { + auto [tag, val] = _recordAccessor.getViewOfValue(); tassert(5975901, "expected scan to produce bson", tag == value::TypeTags::bsonObject); auto* raw = value::bitcastTo(val); @@ -313,8 +317,8 @@ void ScanStage::doDetachFromOperationContext() { } void ScanStage::doAttachToOperationContext(OperationContext* opCtx) { - if (_lowPriority && _open && opCtx->getClient()->isFromUserConnection() && - opCtx->lockState()->shouldWaitForTicket()) { + if (_lowPriority && _open && gDeprioritizeUnboundedUserCollectionScans.load() && + opCtx->getClient()->isFromUserConnection() && opCtx->lockState()->shouldWaitForTicket()) { _priority.emplace(opCtx->lockState(), AdmissionContext::Priority::kLow); } if (auto cursor = getActiveCursor()) { @@ -336,14 +340,61 @@ RecordCursor* ScanStage::getActiveCursor() const { return _useRandomCursor ? _randomCursor.get() : _cursor.get(); } -void ScanStage::initKey() { - auto [tag, val] = _seekKeyAccessor->getViewOfValue(); +void ScanStage::setSeekRecordId() { + auto [tag, val] = _seekRecordIdAccessor->getViewOfValue(); const auto msgTag = tag; tassert(7104002, str::stream() << "Seek key is wrong type: " << msgTag, tag == value::TypeTags::RecordId); - _key = *value::getRecordIdView(val); + _seekRecordId = *value::getRecordIdView(val); +} + +void ScanStage::setMinRecordId() { + auto [tag, val] = _minRecordIdAccessor->getViewOfValue(); + const auto msgTag = tag; + tassert(7452101, + str::stream() << "minRecordId is wrong type: " << msgTag, + tag == value::TypeTags::RecordId); + + _minRecordId = *value::getRecordIdView(val); +} + +void ScanStage::setMaxRecordId() { + auto [tag, val] = _maxRecordIdAccessor->getViewOfValue(); + const auto msgTag = tag; + tassert(7452102, + str::stream() << "maxRecordId is wrong type: " << msgTag, + tag == value::TypeTags::RecordId); + + _maxRecordId = *value::getRecordIdView(val); +} + +void ScanStage::scanResetState(bool reOpen) { + if (!_useRandomCursor) { + // Reuse existing cursor if possible in the reOpen case (i.e. when we will do a seek). + if (!reOpen || + (!_seekRecordIdAccessor && + (_forward ? !_minRecordIdAccessor : !_maxRecordIdAccessor))) { + _cursor = _coll.getPtr()->getCursor(_opCtx, _forward); + } + if (_seekRecordIdAccessor) { + setSeekRecordId(); + } else { + if (_minRecordIdAccessor) { + setMinRecordId(); + } + if (_maxRecordIdAccessor) { + setMaxRecordId(); + } + } + } else { + _randomCursor = _coll.getPtr()->getRecordStore()->getRandomCursor(_opCtx); + } + + _firstGetNext = true; + _hasScanEndRecordId = _forward ? _maxRecordIdAccessor : _minRecordIdAccessor; + _havePassedScanEndRecordId = false; } void ScanStage::open(bool reOpen) { @@ -356,16 +407,7 @@ void ScanStage::open(bool reOpen) { // Fast-path for handling the case where 'reOpen' is true. if (MONGO_likely(reOpen)) { dassert(_open && _coll && getActiveCursor()); - - if (_seekKeyAccessor) { - initKey(); - } else if (!_useRandomCursor) { - _cursor = _coll->getCursor(_opCtx, _forward); - } else { - _randomCursor = _coll->getRecordStore()->getRandomCursor(_opCtx); - } - - _firstGetNext = true; + scanResetState(reOpen); return; } @@ -373,51 +415,38 @@ void ScanStage::open(bool reOpen) { // first time ever, or this stage is being opened for the first time after calling close(). tassert(5071004, "first open to ScanStage but reOpen=true", !reOpen && !_open); tassert(5071005, "ScanStage is not open but has a cursor", !getActiveCursor()); - tassert(5777401, "Collection name should be initialized", _collName); - tassert(5777402, "Catalog epoch should be initialized", _catalogEpoch); // We need to re-acquire '_coll' in this case and make some validity checks (the collection has // not been dropped, renamed, etc). - _coll = restoreCollection(_opCtx, *_collName, _collUuid, *_catalogEpoch); + _coll.restoreCollection(_opCtx, _collUuid); tassert(5959701, "restoreCollection() unexpectedly returned null in ScanStage", _coll); if (_scanCallbacks.scanOpenCallback) { - _scanCallbacks.scanOpenCallback(_opCtx, _coll); - } - - if (_seekKeyAccessor) { - initKey(); - _cursor = _coll->getCursor(_opCtx, _forward); - } else if (!_useRandomCursor) { - _cursor = _coll->getCursor(_opCtx, _forward); - } else { - _randomCursor = _coll->getRecordStore()->getRandomCursor(_opCtx); + _scanCallbacks.scanOpenCallback(_opCtx, _coll.getPtr()); } + scanResetState(reOpen); _open = true; - _firstGetNext = true; } -value::OwnedValueAccessor* ScanStage::getFieldAccessor(StringData name, size_t offset) const { - return stdx::visit( - OverloadedVisitor{ - [](const stdx::monostate& _) -> value::OwnedValueAccessor* { return nullptr; }, - [&](const std::pair pair) { - return (pair.first == name) ? pair.second : nullptr; - }, - [&](const StringMap& map) { - auto it = map.find(name); - return it == map.end() ? nullptr : it->second; - }}, - _maskOffsetToFieldAccessors[offset]); +value::OwnedValueAccessor* ScanStage::getFieldAccessor(StringData name) { + if (size_t pos = _scanFieldNames.findPos(name); pos != IndexedStringVector::npos) { + return &_scanFieldAccessors[pos]; + } + return nullptr; } PlanState ScanStage::getNext() { auto optTimer(getOptTimer(_opCtx)); - if (_lowPriority && !_priority && _opCtx->getClient()->isFromUserConnection() && - _opCtx->lockState()->shouldWaitForTicket()) { + // A clustered collection scan may have an end bound we have already passed. + if (_havePassedScanEndRecordId) { + return trackPlanState(PlanState::IS_EOF); + } + + if (_lowPriority && !_priority && gDeprioritizeUnboundedUserCollectionScans.load() && + _opCtx->getClient()->isFromUserConnection() && _opCtx->lockState()->shouldWaitForTicket()) { _priority.emplace(_opCtx->lockState(), AdmissionContext::Priority::kLow); } @@ -441,61 +470,128 @@ PlanState ScanStage::getNext() { _needsToCheckCappedPositionLost = false; } - auto res = _firstGetNext && _seekKeyAccessor; - auto nextRecord = _useRandomCursor ? _randomCursor->next() - : (res ? _cursor->seekExact(_key) : _cursor->next()); - _firstGetNext = false; + // Optimized so the most common case has as short a codepath as possible. Info on bounds edge + // enforcement: + // o '_seekRecordIdAccessor' existence means this is doing a single-record fetch or resuming a + // prior paused scan and must do seekExact() to that recordId. In the fetch case this is the + // record to be returned. In the resume case it is the last one returned before the pause, + // and if it no longer exists the scan will fail because it doesn't know where to resume + // from. If it is present, the code below expects us to leave the cursor on that record to + // do some checks, and there will be a FilterStage above the scan to filter out this record. + // o '_minRecordIdAccessor' and/or '_maxRecordIdAccessor' mean we are doing a bounded scan on + // a clustered collection, and we will do a seekNear() to the start bound on the first call. + // - If the bound(s) came in via an expression, we are to assume both bounds are inclusive. + // A FilterStage above this stage will exist to filter out any that are really exclusive. + // - If the bound(s) came in via the "min" and/or "max" keywords, this stage must enforce + // them directly as there may be no FilterStage above it. In this case the start bound is + // always inclusive, so the logic is unchanged, but the end bound is always exclusive, so + // we use '_excludeScanEndRecordId' to indicate this for scan termination. + // - Since there may not be a FilterStage for a bounded scan, we need to skip the first + // record here if the seekNear() positioned on a recordId before the target range. + bool doSeekExact = false; + boost::optional nextRecord; + if (!_useRandomCursor) { + if (!_firstGetNext) { + nextRecord = _cursor->next(); + } else { + _firstGetNext = false; + if (_seekRecordIdAccessor) { // fetch or scan resume + if (_seekRecordId.isNull()) { + // Attempting to resume from a null record ID gives a null '_seekRecordId'. + uasserted(ErrorCodes::KeyNotFound, + str::stream() + << "Failed to resume collection scan: the recordId from " + "which we are attempting to resume no longer exists in " + "the collection: " + << _seekRecordId); + } + doSeekExact = true; + nextRecord = _cursor->seekExact(_seekRecordId); + } else if (_minRecordIdAccessor && _forward) { + nextRecord = _cursor->seekNear(_minRecordId); + // Skip first record if seekNear() landed on the record just before the start bound. + if (nextRecord && nextRecord->id < _minRecordId) { + nextRecord = _cursor->next(); + } + } else if (_maxRecordIdAccessor && !_forward) { + nextRecord = _cursor->seekNear(_maxRecordId); + // Skip first record if seekNear() landed on the record just before the start bound. + if (nextRecord && nextRecord->id > _maxRecordId) { + nextRecord = _cursor->next(); + } + } else { + nextRecord = _cursor->next(); + } + } + } else { + nextRecord = _randomCursor->next(); + // Performance optimization: random cursors don't care about '_firstGetNext' so we do not + // need to set it to false here. + } if (!nextRecord) { - // Only check our index key for corruption during the first call to 'getNext' and while - // seeking. - if (_scanCallbacks.indexKeyCorruptionCheckCallback) { - tassert(5113712, - "Index key corruption check can only be performed on the first call " - "to getNext() during a seek", - res); - tassert(5777400, "Collection name should be initialized", _collName); + // Only check the index key for corruption if this getNext() call did seekExact(), as that + // expects the '_seekRecordId' to be found, but it was not. + if (doSeekExact && _scanCallbacks.indexKeyCorruptionCheckCallback) { + tassert(5777400, "Collection name should be initialized", _coll.getCollName()); _scanCallbacks.indexKeyCorruptionCheckCallback(_opCtx, _snapshotIdAccessor, _indexKeyAccessor, _indexKeyPatternAccessor, - _key, - *_collName); + _seekRecordId, + *_coll.getCollName()); } _priority.reset(); return trackPlanState(PlanState::IS_EOF); } // Return EOF if the index key is found to be inconsistent. - if (_scanCallbacks.indexKeyConsistencyCheckCallBack && - !_scanCallbacks.indexKeyConsistencyCheckCallBack( - _opCtx, _snapshotIdAccessor, _indexIdAccessor, _indexKeyAccessor, _coll, *nextRecord)) { + if (_scanCallbacks.indexKeyConsistencyCheckCallback && + !_scanCallbacks.indexKeyConsistencyCheckCallback(_opCtx, + _indexCatalogEntryMap, + _snapshotIdAccessor, + _indexIdentAccessor, + _indexKeyAccessor, + _coll.getPtr(), + *nextRecord)) { _priority.reset(); return trackPlanState(PlanState::IS_EOF); } - if (_recordAccessor) { - _recordAccessor->reset(false, - value::TypeTags::bsonObject, - value::bitcastFrom(nextRecord->data.data())); + if (_recordSlot) { + _recordAccessor.reset(false, + value::TypeTags::bsonObject, + value::bitcastFrom(nextRecord->data.data())); } - if (_recordIdAccessor) { + if (_recordIdSlot) { _recordId = std::move(nextRecord->id); - _recordIdAccessor->reset( + if (_hasScanEndRecordId) { + if (_excludeScanEndRecordId) { + _havePassedScanEndRecordId = + _forward ? (_recordId >= _maxRecordId) : (_recordId <= _minRecordId); + } else { + _havePassedScanEndRecordId = + _forward ? (_recordId > _maxRecordId) : (_recordId < _minRecordId); + } + } + if (_havePassedScanEndRecordId) { + return trackPlanState(PlanState::IS_EOF); + } + _recordIdAccessor.reset( false, value::TypeTags::RecordId, value::bitcastFrom(&_recordId)); } - if (!_fieldAccessors.empty()) { + if (!_scanFieldAccessors.empty()) { auto rawBson = nextRecord->data.data(); auto start = rawBson + 4; auto end = rawBson + ConstDataView(rawBson).read>(); auto last = end - 1; - if (_fieldAccessors.size() == 1) { + if (_scanFieldAccessors.size() == 1) { // If we're only looking for 1 field, then it's more efficient to forgo the hashtable // and just use equality comparison. - auto name = StringData{_fields[0]}; + auto name = StringData{_scanFieldNames[0]}; auto [tag, val] = [start, last, end, name] { for (auto bsonElement = start; bsonElement != last;) { auto field = bson::fieldNameAndLength(bsonElement); @@ -507,30 +603,18 @@ PlanState ScanStage::getNext() { return std::make_pair(value::TypeTags::Nothing, value::Value{0}); }(); - _fieldAccessors.front().reset(false, tag, val); + _scanFieldAccessors.front().reset(false, tag, val); } else { // If we're looking for 2 or more fields, it's more efficient to use the hashtable. - for (auto& accessor : _fieldAccessors) { + for (auto& accessor : _scanFieldAccessors) { accessor.reset(); } - auto fieldsToMatch = _fieldAccessors.size(); + auto fieldsToMatch = _scanFieldAccessors.size(); for (auto bsonElement = start; bsonElement != last;) { - // Oftentimes _fieldAccessors hashtable only has a few entries, but the object we're - // scanning could have dozens of fields. In this common scenario, most hashtable - // lookups will "miss" (i.e. they won't find a matching entry in the hashtable). To - // optimize for this, we put a very simple bloom filter (requiring only a few basic - // machine instructions) in front of the hashtable. When we "miss" in the bloom - // filter, we can quickly skip over a field without having to generate the hash for - // the field. auto field = bson::fieldNameAndLength(bsonElement); - const size_t offset = computeFieldMaskOffset(field.rawData(), field.size()); - if (!(_fieldsBloomFilter.maybeContainsHash(computeFieldMask(offset)))) { - bsonElement = bson::advance(bsonElement, field.size()); - continue; - } + auto accessor = getFieldAccessor(field); - auto accessor = getFieldAccessor(field, offset); if (accessor != nullptr) { auto [tag, val] = bson::convertFrom(bsonElement, end, field.size()); accessor->reset(false, tag, val); @@ -544,10 +628,9 @@ PlanState ScanStage::getNext() { } if (_oplogTsAccessor) { - // If _oplogTsAccessor is set, then we check if the document had a "ts" field, and if - // so we write the value of "ts" into _oplogTsAccessor. The engine uses mechanism to - // keep track of the most recent timestamp that has been observed when scanning the - // oplog collection. + // Oplog scans only: if _oplogTsAccessor is set, the value of the "ts" field, if + // it exists in the document, will be copied to this slot for use by the clustered scan + // EOF filter above this stage and/or because the query asked for the latest "ts" value. tassert(7097200, "Expected _tsFieldAccessor to be defined", _tsFieldAccessor); auto [tag, val] = _tsFieldAccessor->getViewOfValue(); if (tag != value::TypeTags::Nothing) { @@ -574,6 +657,7 @@ void ScanStage::close() { auto optTimer(getOptTimer(_opCtx)); trackClose(); + _indexCatalogEntryMap.clear(); _cursor.reset(); _randomCursor.reset(); _coll.reset(); @@ -594,14 +678,20 @@ std::unique_ptr ScanStage::getStats(bool includeDebugInfo) const if (_recordIdSlot) { bob.appendNumber("recordIdSlot", static_cast(*_recordIdSlot)); } - if (_seekKeySlot) { - bob.appendNumber("seekKeySlot", static_cast(*_seekKeySlot)); + if (_seekRecordIdSlot) { + bob.appendNumber("seekRecordIdSlot", static_cast(*_seekRecordIdSlot)); + } + if (_minRecordIdSlot) { + bob.appendNumber("minRecordIdSlot", static_cast(*_minRecordIdSlot)); + } + if (_maxRecordIdSlot) { + bob.appendNumber("maxRecordIdSlot", static_cast(*_maxRecordIdSlot)); } if (_snapshotIdSlot) { bob.appendNumber("snapshotIdSlot", static_cast(*_snapshotIdSlot)); } - if (_indexIdSlot) { - bob.appendNumber("indexIdSlot", static_cast(*_indexIdSlot)); + if (_indexIdentSlot) { + bob.appendNumber("indexIdentSlot", static_cast(*_indexIdentSlot)); } if (_indexKeySlot) { bob.appendNumber("indexKeySlot", static_cast(*_indexKeySlot)); @@ -610,8 +700,8 @@ std::unique_ptr ScanStage::getStats(bool includeDebugInfo) const bob.appendNumber("indexKeyPatternSlot", static_cast(*_indexKeyPatternSlot)); } - bob.append("fields", _fields); - bob.append("outputSlots", _vars.begin(), _vars.end()); + bob.append("scanFieldNames", _scanFieldNames.getUnderlyingVector()); + bob.append("scanFieldSlots", _scanFieldSlots.begin(), _scanFieldSlots.end()); ret->debugInfo = bob.obj(); } return ret; @@ -622,10 +712,10 @@ const SpecificStats* ScanStage::getSpecificStats() const { } std::vector ScanStage::debugPrint() const { - auto ret = PlanStage::debugPrint(); + std::vector ret = PlanStage::debugPrint(); - if (_seekKeySlot) { - DebugPrinter::addIdentifier(ret, _seekKeySlot.value()); + if (_seekRecordIdSlot) { + DebugPrinter::addIdentifier(ret, _seekRecordIdSlot.value()); } if (_recordSlot) { @@ -646,8 +736,8 @@ std::vector ScanStage::debugPrint() const { DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword); } - if (_indexIdSlot) { - DebugPrinter::addIdentifier(ret, _indexIdSlot.value()); + if (_indexIdentSlot) { + DebugPrinter::addIdentifier(ret, _indexIdentSlot.value()); } else { DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword); } @@ -664,6 +754,18 @@ std::vector ScanStage::debugPrint() const { DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword); } + if (_minRecordIdSlot) { + DebugPrinter::addIdentifier(ret, _minRecordIdSlot.value()); + } else { + DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword); + } + + if (_maxRecordIdSlot) { + DebugPrinter::addIdentifier(ret, _maxRecordIdSlot.value()); + } else { + DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword); + } + if (_useRandomCursor) { DebugPrinter::addKeyword(ret, "random"); } @@ -673,14 +775,14 @@ std::vector ScanStage::debugPrint() const { } ret.emplace_back(DebugPrinter::Block("[`")); - for (size_t idx = 0; idx < _fields.size(); ++idx) { + for (size_t idx = 0; idx < _scanFieldNames.size(); ++idx) { if (idx) { ret.emplace_back(DebugPrinter::Block("`,")); } - DebugPrinter::addIdentifier(ret, _vars[idx]); + DebugPrinter::addIdentifier(ret, _scanFieldSlots[idx]); ret.emplace_back("="); - DebugPrinter::addIdentifier(ret, _fields[idx]); + DebugPrinter::addIdentifier(ret, _scanFieldNames[idx]); } ret.emplace_back(DebugPrinter::Block("`]")); @@ -697,8 +799,9 @@ std::vector ScanStage::debugPrint() const { size_t ScanStage::estimateCompileTimeSize() const { size_t size = sizeof(*this); - size += size_estimator::estimate(_fields); - size += size_estimator::estimate(_vars); + size += size_estimator::estimate(_scanFieldNames.getUnderlyingVector()); + size += size_estimator::estimate(_scanFieldNames.getUnderlyingMap()); + size += size_estimator::estimate(_scanFieldSlots); size += size_estimator::estimate(_specificStats); return size; } @@ -707,27 +810,27 @@ ParallelScanStage::ParallelScanStage(UUID collectionUuid, boost::optional recordSlot, boost::optional recordIdSlot, boost::optional snapshotIdSlot, - boost::optional indexIdSlot, + boost::optional indexIdentSlot, boost::optional indexKeySlot, boost::optional indexKeyPatternSlot, - std::vector fields, - value::SlotVector vars, + std::vector scanFieldNames, + value::SlotVector scanFieldSlots, PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId, ScanCallbacks callbacks, bool participateInTrialRunTracking) : PlanStage("pscan"_sd, yieldPolicy, nodeId, participateInTrialRunTracking), - _collUuid(collectionUuid), _recordSlot(recordSlot), _recordIdSlot(recordIdSlot), _snapshotIdSlot(snapshotIdSlot), - _indexIdSlot(indexIdSlot), + _indexIdentSlot(indexIdentSlot), _indexKeySlot(indexKeySlot), _indexKeyPatternSlot(indexKeyPatternSlot), - _fields(std::move(fields)), - _vars(std::move(vars)), + _scanFieldNames(std::move(scanFieldNames)), + _scanFieldSlots(std::move(scanFieldSlots)), + _collUuid(collectionUuid), _scanCallbacks(std::move(callbacks)) { - invariant(_fields.size() == _vars.size()); + invariant(_scanFieldNames.size() == _scanFieldSlots.size()); _state = std::make_shared(); } @@ -737,28 +840,28 @@ ParallelScanStage::ParallelScanStage(const std::shared_ptr& state boost::optional recordSlot, boost::optional recordIdSlot, boost::optional snapshotIdSlot, - boost::optional indexIdSlot, + boost::optional indexIdentSlot, boost::optional indexKeySlot, boost::optional indexKeyPatternSlot, - std::vector fields, - value::SlotVector vars, + std::vector scanFieldNames, + value::SlotVector scanFieldSlots, PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId, ScanCallbacks callbacks, bool participateInTrialRunTracking) : PlanStage("pscan"_sd, yieldPolicy, nodeId, participateInTrialRunTracking), - _collUuid(collectionUuid), _recordSlot(recordSlot), _recordIdSlot(recordIdSlot), _snapshotIdSlot(snapshotIdSlot), - _indexIdSlot(indexIdSlot), + _indexIdentSlot(indexIdentSlot), _indexKeySlot(indexKeySlot), _indexKeyPatternSlot(indexKeyPatternSlot), - _fields(std::move(fields)), - _vars(std::move(vars)), - _state(state), - _scanCallbacks(std::move(callbacks)) { - invariant(_fields.size() == _vars.size()); + _scanFieldNames(std::move(scanFieldNames)), + _scanFieldSlots(std::move(scanFieldSlots)), + _collUuid(collectionUuid), + _scanCallbacks(std::move(callbacks)), + _state(state) { + invariant(_scanFieldNames.size() == _scanFieldSlots.size()); } std::unique_ptr ParallelScanStage::clone() const { @@ -767,11 +870,11 @@ std::unique_ptr ParallelScanStage::clone() const { _recordSlot, _recordIdSlot, _snapshotIdSlot, - _indexIdSlot, + _indexIdentSlot, _indexKeySlot, _indexKeyPatternSlot, - _fields, - _vars, + _scanFieldNames.getUnderlyingVector(), + _scanFieldSlots, _yieldPolicy, _commonStats.nodeId, _scanCallbacks, @@ -779,28 +882,23 @@ std::unique_ptr ParallelScanStage::clone() const { } void ParallelScanStage::prepare(CompileCtx& ctx) { - if (_recordSlot) { - _recordAccessor = std::make_unique(); - } + _scanFieldAccessors.resize(_scanFieldNames.size()); - if (_recordIdSlot) { - _recordIdAccessor = std::make_unique(); - } + for (size_t idx = 0; idx < _scanFieldNames.size(); ++idx) { + auto accessorPtr = &_scanFieldAccessors[idx]; - for (size_t idx = 0; idx < _fields.size(); ++idx) { - auto [it, inserted] = - _fieldAccessors.emplace(_fields[idx], std::make_unique()); - uassert(4822816, str::stream() << "duplicate field: " << _fields[idx], inserted); - auto [itRename, insertedRename] = _varAccessors.emplace(_vars[idx], it->second.get()); - uassert(4822817, str::stream() << "duplicate field: " << _vars[idx], insertedRename); + auto [itRename, insertedRename] = + _scanFieldAccessorsMap.emplace(_scanFieldSlots[idx], accessorPtr); + uassert( + 4822817, str::stream() << "duplicate field: " << _scanFieldSlots[idx], insertedRename); } if (_snapshotIdSlot) { _snapshotIdAccessor = ctx.getAccessor(*_snapshotIdSlot); } - if (_indexIdSlot) { - _indexIdAccessor = ctx.getAccessor(*_indexIdSlot); + if (_indexIdentSlot) { + _indexIdentAccessor = ctx.getAccessor(*_indexIdentSlot); } if (_indexKeySlot) { @@ -812,19 +910,19 @@ void ParallelScanStage::prepare(CompileCtx& ctx) { } tassert(5709601, "'_coll' should not be initialized prior to 'acquireCollection()'", !_coll); - std::tie(_coll, _collName, _catalogEpoch) = acquireCollection(_opCtx, _collUuid); + _coll.acquireCollection(_opCtx, _collUuid); } value::SlotAccessor* ParallelScanStage::getAccessor(CompileCtx& ctx, value::SlotId slot) { if (_recordSlot && *_recordSlot == slot) { - return _recordAccessor.get(); + return &_recordAccessor; } if (_recordIdSlot && *_recordIdSlot == slot) { - return _recordIdAccessor.get(); + return &_recordIdAccessor; } - if (auto it = _varAccessors.find(slot); it != _varAccessors.end()) { + if (auto it = _scanFieldAccessorsMap.find(slot); it != _scanFieldAccessorsMap.end()) { return it->second; } @@ -834,9 +932,8 @@ value::SlotAccessor* ParallelScanStage::getAccessor(CompileCtx& ctx, value::Slot void ParallelScanStage::doSaveState(bool relinquishCursor) { #if defined(MONGO_CONFIG_DEBUG_BUILD) if (slotsAccessible()) { - if (_recordAccessor && - _recordAccessor->getViewOfValue().first != value::TypeTags::Nothing) { - auto [tag, val] = _recordAccessor->getViewOfValue(); + if (_recordSlot && _recordAccessor.getViewOfValue().first != value::TypeTags::Nothing) { + auto [tag, val] = _recordAccessor.getViewOfValue(); tassert(5975904, "expected scan to produce bson", tag == value::TypeTags::bsonObject); auto* raw = value::bitcastTo(val); @@ -847,22 +944,22 @@ void ParallelScanStage::doSaveState(bool relinquishCursor) { } #endif - if (_recordAccessor) { - prepareForYielding(*_recordAccessor, slotsAccessible()); + if (_recordSlot) { + prepareForYielding(_recordAccessor, slotsAccessible()); } - if (_recordIdAccessor) { + if (_recordIdSlot) { // TODO: SERVER-72054 // RecordId are currently (incorrectly) accessed after EOF, therefore // we must treat them as always accessible ratther invalidate them when slots are // disabled. We should use slotsAccessible() instead of true, once the bug is fixed. - prepareForYielding(*_recordIdAccessor, true); + prepareForYielding(_recordIdAccessor, true); } - for (auto& [fieldName, accessor] : _fieldAccessors) { - prepareForYielding(*accessor, slotsAccessible()); + for (auto& accessor : _scanFieldAccessors) { + prepareForYielding(accessor, slotsAccessible()); } #if defined(MONGO_CONFIG_DEBUG_BUILD) - if (!_recordAccessor || !slotsAccessible()) { + if (!_recordSlot || !slotsAccessible()) { _lastReturned.clear(); } #endif @@ -871,6 +968,7 @@ void ParallelScanStage::doSaveState(bool relinquishCursor) { _cursor->save(); } + _indexCatalogEntryMap.clear(); _coll.reset(); } @@ -879,12 +977,11 @@ void ParallelScanStage::doRestoreState(bool relinquishCursor) { invariant(!_coll); // If this stage has not been prepared, then yield recovery is a no-op. - if (!_collName) { + if (!_coll.getCollName()) { return; } - tassert(5777409, "Catalog epoch should be initialized", _catalogEpoch); - _coll = restoreCollection(_opCtx, *_collName, _collUuid, *_catalogEpoch); + _coll.restoreCollection(_opCtx, _collUuid); if (_cursor && relinquishCursor) { const bool couldRestore = _cursor->restore(); @@ -895,8 +992,8 @@ void ParallelScanStage::doRestoreState(bool relinquishCursor) { } #if defined(MONGO_CONFIG_DEBUG_BUILD) - if (_recordAccessor && !_lastReturned.empty()) { - auto [tag, val] = _recordAccessor->getViewOfValue(); + if (_recordSlot && !_lastReturned.empty()) { + auto [tag, val] = _recordAccessor.getViewOfValue(); tassert(5975905, "expected scan to produce bson", tag == value::TypeTags::bsonObject); auto* raw = value::bitcastTo(val); @@ -934,22 +1031,20 @@ void ParallelScanStage::open(bool reOpen) { // we're being opened after 'close()'. we need to re-acquire '_coll' in this case and // make some validity checks (the collection has not been dropped, renamed, etc.). tassert(5071013, "ParallelScanStage is not open but have _cursor", !_cursor); - tassert(5777403, "Collection name should be initialized", _collName); - tassert(5777404, "Catalog epoch should be initialized", _catalogEpoch); - _coll = restoreCollection(_opCtx, *_collName, _collUuid, *_catalogEpoch); + _coll.restoreCollection(_opCtx, _collUuid); } { stdx::unique_lock lock(_state->mutex); if (_state->ranges.empty()) { - auto ranges = _coll->getRecordStore()->numRecords(_opCtx) / 10240; + auto ranges = _coll.getPtr()->getRecordStore()->numRecords(_opCtx) / 10240; if (ranges < 2) { _state->ranges.emplace_back(Range{RecordId{}, RecordId{}}); } else { if (ranges > 1024) { ranges = 1024; } - auto randomCursor = _coll->getRecordStore()->getRandomCursor(_opCtx); + auto randomCursor = _coll.getPtr()->getRecordStore()->getRandomCursor(_opCtx); invariant(randomCursor); std::set rids; while (ranges--) { @@ -968,7 +1063,7 @@ void ParallelScanStage::open(bool reOpen) { } } - _cursor = _coll->getCursor(_opCtx); + _cursor = _coll.getPtr()->getCursor(_opCtx); _open = true; } @@ -985,6 +1080,13 @@ boost::optional ParallelScanStage::nextRange() { } } +value::OwnedValueAccessor* ParallelScanStage::getFieldAccessor(StringData name) { + if (size_t pos = _scanFieldNames.findPos(name); pos != IndexedStringVector::npos) { + return &_scanFieldAccessors[pos]; + } + return nullptr; +} + PlanState ParallelScanStage::getNext() { auto optTimer(getOptTimer(_opCtx)); @@ -1010,13 +1112,13 @@ PlanState ParallelScanStage::getNext() { "Index key corruption check can only performed when inspecting the first " "recordId in a range", needRange); - tassert(5777405, "Collection name should be initialized", _collName); + tassert(5777405, "Collection name should be initialized", _coll.getCollName()); _scanCallbacks.indexKeyCorruptionCheckCallback(_opCtx, _snapshotIdAccessor, _indexKeyAccessor, _indexKeyPatternAccessor, _range.begin, - *_collName); + *_coll.getCollName()); } return trackPlanState(PlanState::IS_EOF); } @@ -1028,46 +1130,46 @@ PlanState ParallelScanStage::getNext() { } // Return EOF if the index key is found to be inconsistent. - if (_scanCallbacks.indexKeyConsistencyCheckCallBack && - !_scanCallbacks.indexKeyConsistencyCheckCallBack(_opCtx, + if (_scanCallbacks.indexKeyConsistencyCheckCallback && + !_scanCallbacks.indexKeyConsistencyCheckCallback(_opCtx, + _indexCatalogEntryMap, _snapshotIdAccessor, - _indexIdAccessor, + _indexIdentAccessor, _indexKeyAccessor, - _coll, + _coll.getPtr(), *nextRecord)) { return trackPlanState(PlanState::IS_EOF); } } while (!nextRecord); - if (_recordAccessor) { - _recordAccessor->reset(false, - value::TypeTags::bsonObject, - value::bitcastFrom(nextRecord->data.data())); + if (_recordSlot) { + _recordAccessor.reset(false, + value::TypeTags::bsonObject, + value::bitcastFrom(nextRecord->data.data())); } - if (_recordIdAccessor) { + if (_recordIdSlot) { _recordId = nextRecord->id; - _recordIdAccessor->reset( + _recordIdAccessor.reset( false, value::TypeTags::RecordId, value::bitcastFrom(&_recordId)); } - if (!_fieldAccessors.empty()) { - auto fieldsToMatch = _fieldAccessors.size(); + if (!_scanFieldAccessors.empty()) { + auto fieldsToMatch = _scanFieldAccessors.size(); auto rawBson = nextRecord->data.data(); auto be = rawBson + 4; auto end = rawBson + ConstDataView(rawBson).read>(); - for (auto& [name, accessor] : _fieldAccessors) { - accessor->reset(); + for (auto& accessor : _scanFieldAccessors) { + accessor.reset(); } while (be != end - 1) { auto sv = bson::fieldNameAndLength(be); - if (auto it = _fieldAccessors.find(sv); it != _fieldAccessors.end()) { - // Found the field so convert it to Value. - auto [tag, val] = bson::convertFrom(be, end, sv.size()); - - it->second->reset(false, tag, val); + auto accessor = getFieldAccessor(sv); + if (accessor != nullptr) { + auto [tag, val] = bson::convertFrom(be, end, sv.size()); + accessor->reset(false, tag, val); if ((--fieldsToMatch) == 0) { // No need to scan any further so bail out early. break; @@ -1085,6 +1187,7 @@ void ParallelScanStage::close() { auto optTimer(getOptTimer(_opCtx)); trackClose(); + _indexCatalogEntryMap.clear(); _cursor.reset(); _coll.reset(); _open = false; @@ -1120,8 +1223,8 @@ std::vector ParallelScanStage::debugPrint() const { DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword); } - if (_indexIdSlot) { - DebugPrinter::addIdentifier(ret, _indexIdSlot.value()); + if (_indexIdentSlot) { + DebugPrinter::addIdentifier(ret, _indexIdentSlot.value()); } else { DebugPrinter::addIdentifier(ret, DebugPrinter::kNoneKeyword); } @@ -1139,14 +1242,14 @@ std::vector ParallelScanStage::debugPrint() const { } ret.emplace_back(DebugPrinter::Block("[`")); - for (size_t idx = 0; idx < _fields.size(); ++idx) { + for (size_t idx = 0; idx < _scanFieldNames.size(); ++idx) { if (idx) { ret.emplace_back(DebugPrinter::Block("`,")); } - DebugPrinter::addIdentifier(ret, _vars[idx]); + DebugPrinter::addIdentifier(ret, _scanFieldSlots[idx]); ret.emplace_back("="); - DebugPrinter::addIdentifier(ret, _fields[idx]); + DebugPrinter::addIdentifier(ret, _scanFieldNames[idx]); } ret.emplace_back(DebugPrinter::Block("`]")); @@ -1159,8 +1262,9 @@ std::vector ParallelScanStage::debugPrint() const { size_t ParallelScanStage::estimateCompileTimeSize() const { size_t size = sizeof(*this); - size += size_estimator::estimate(_fields); - size += size_estimator::estimate(_vars); + size += size_estimator::estimate(_scanFieldNames.getUnderlyingVector()); + size += size_estimator::estimate(_scanFieldNames.getUnderlyingMap()); + size += size_estimator::estimate(_scanFieldSlots); return size; } diff --git a/src/mongo/db/exec/sbe/stages/scan.h b/src/mongo/db/exec/sbe/stages/scan.h index 3328495017167..9c162037ac10a 100644 --- a/src/mongo/db/exec/sbe/stages/scan.h +++ b/src/mongo/db/exec/sbe/stages/scan.h @@ -29,45 +29,74 @@ #pragma once -#include "mongo/config.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/exec/field_name_bloom_filter.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/expressions/runtime_environment.h" #include "mongo/db/exec/sbe/stages/collection_helpers.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" #include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/trial_run_tracker.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/record_store.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/indexed_string_vector.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" namespace mongo { namespace sbe { -using ScanOpenCallback = std::function; +using ScanOpenCallback = void (*)(OperationContext*, const CollectionPtr&); struct ScanCallbacks { - ScanCallbacks(IndexKeyCorruptionCheckCallback indexKeyCorruptionCheck = {}, - IndexKeyConsistencyCheckCallback indexKeyConsistencyCheck = {}, - ScanOpenCallback scanOpen = {}) + ScanCallbacks(IndexKeyCorruptionCheckCallback indexKeyCorruptionCheck = nullptr, + IndexKeyConsistencyCheckCallback indexKeyConsistencyCheck = nullptr, + ScanOpenCallback scanOpen = nullptr) : indexKeyCorruptionCheckCallback(std::move(indexKeyCorruptionCheck)), - indexKeyConsistencyCheckCallBack(std::move(indexKeyConsistencyCheck)), + indexKeyConsistencyCheckCallback(std::move(indexKeyConsistencyCheck)), scanOpenCallback(std::move(scanOpen)) {} - IndexKeyCorruptionCheckCallback indexKeyCorruptionCheckCallback; - IndexKeyConsistencyCheckCallback indexKeyConsistencyCheckCallBack; - ScanOpenCallback scanOpenCallback; + IndexKeyCorruptionCheckCallback indexKeyCorruptionCheckCallback = nullptr; + IndexKeyConsistencyCheckCallback indexKeyConsistencyCheckCallback = nullptr; + ScanOpenCallback scanOpenCallback = nullptr; }; /** * Retrieves documents from the collection with the given 'collectionUuid' using the storage API. - * Can be used as either a full scan of the collection, or as a seek. In the latter case, this stage - * is given a 'seekKeySlot' from which to read a 'RecordId'. We seek to this 'RecordId' and then - * scan from that point to the end of the collection. + * + * Iff resuming a prior scan, this stage is given a 'seekRecordIdSlot' from which to read a + * 'RecordId'. We seek to this 'RecordId' before resuming the scan. 'stageType' is set to "seek" + * instead of "scan" for this case only. * * If the 'recordSlot' is provided, then each of the records returned from the scan is placed into * an output slot with this slot id. Similarly, if 'recordIdSlot' is provided, then this slot is * populated with the record id on each advance. * * In addition, the scan/seek can extract a set of top-level fields from each document. The caller - * asks for this by passing a vector of 'fields', along with a corresponding slot vector 'vars' into - * which the resulting values should be stored. These vectors must have the same length. + * asks for this by passing a vector of 'scanFieldNames', along with a corresponding slot vector + * 'scanFieldSlots' into which the resulting values should be stored. These vectors must have the + * same length. * * The direction of the scan is controlled by the 'forward' parameter. * @@ -78,20 +107,21 @@ struct ScanCallbacks { * we must verify at runtime that no such inconsistency exists. This requires the scan to know the * value of the index key, the identity of the index from which it was obtained, and the id of the * storage snapshot from which it was obtained. This information is made available to the seek stage - * via 'snapshotIdSlot', 'indexIdSlot', 'indexKeySlot', and 'indexKeyPatternSlot'. + * via 'snapshotIdSlot', 'indexIdentSlot', 'indexKeySlot', and 'indexKeyPatternSlot'. * - * If this is an oplog scan, then the 'oplogTsSlot' will be populated with the "ts" field from each - * oplog entry. + * For oplog scans, 'oplogTsSlot' will be populated with a copy of the "ts" field (which is the + * oplog clustering key) from the doc if it is a clustered scan (for use by the EOF filter above the + * scan) or the caller asked for the latest oplog "ts" value. * * Debug string representations: * - * scan recordSlot|none recordIdSlot|none snapshotIdSlot|none indexIdSlot|none indexKeySlot|none - * indexKeyPatternSlot|none [slot1 = fieldName1, ... slot_n = fieldName_n] collectionUuid - * forward needOplogSlotForTs + * scan recordSlot? recordIdSlot? snapshotIdSlot? indexIdentSlot? indexKeySlot? + * indexKeyPatternSlot? minRecordIdSlot? maxRecordIdSlot? [slot1 = fieldName1, ... + * slot_n = fieldName_n] collectionUuid forward needOplogSlotForTs * - * seek seekKeySlot recordSlot|none recordIdSlot|none snapshotIdSlot|none indexIdSlot|none - * indexKeySlot|none indexKeyPatternSlot|none [slot1 = fieldName1, ... slot_n = fieldName_n] - * collectionUuid forward needOplogSlotForTs + * seek seekKeySlot recordSlot? recordIdSlot? snapshotIdSlot? indexIdentSlot? indexKeySlot? + * indexKeyPatternSlot? minRecordIdSlot? maxRecordIdSlot? [slot1 = fieldName1, ... + * slot_n = fieldName_n] collectionUuid forward needOplogSlotForTs */ class ScanStage final : public PlanStage { public: @@ -99,20 +129,23 @@ class ScanStage final : public PlanStage { boost::optional recordSlot, boost::optional recordIdSlot, boost::optional snapshotIdSlot, - boost::optional indexIdSlot, + boost::optional indexIdentSlot, boost::optional indexKeySlot, boost::optional indexKeyPatternSlot, boost::optional oplogTsSlot, - std::vector fields, - value::SlotVector vars, - boost::optional seekKeySlot, + std::vector scanFieldNames, + value::SlotVector scanFieldSlots, + boost::optional seekRecordIdSlot, + boost::optional minRecordIdSlot, + boost::optional maxRecordIdSlot, bool forward, PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId, ScanCallbacks scanCallbacks, bool lowPriority = false, bool useRandomCursor = false, - bool participateInTrialRunTracking = true); + bool participateInTrialRunTracking = true, + bool excludeScanEndRecordId = false); std::unique_ptr clone() const final; @@ -140,97 +173,115 @@ class ScanStage final : public PlanStage { // Returns the primary cursor or the random cursor depending on whether _useRandomCursor is set. RecordCursor* getActiveCursor() const; - static size_t computeFieldMaskOffset(const char* name, size_t length) { - return static_cast(name[length / 2]) & 63u; - } + /** + * Resets the state data members for starting the scan in the 'reOpen' case, i.e. skipping state + * that would be correct after a prior open() call that was NOT followed by a close() call. This + * is also called by the initial open() to set the same subset of state for the first time to + * avoid duplicating this code. + */ + void scanResetState(bool reOpen); - static uint64_t computeFieldMask(size_t offset) { - return uint64_t{1} << offset; - } + // Only for a resumed scan ("seek"), this sets '_seekRecordId' to the resume point at runtime. + void setSeekRecordId(); - static uint64_t computeFieldMask(const char* name, size_t length) { - return uint64_t{1} << computeFieldMaskOffset(name, length); - } + // Only for a clustered collection scan, this sets '_minRecordId' to the lower scan bound. + void setMinRecordId(); - void initKey(); + // Only for a clustered collection scan, this sets '_maxRecordId' to the upper scan bound. + void setMaxRecordId(); - value::OwnedValueAccessor* getFieldAccessor(StringData name, size_t offset) const; + value::OwnedValueAccessor* getFieldAccessor(StringData name); - const UUID _collUuid; const boost::optional _recordSlot; const boost::optional _recordIdSlot; const boost::optional _snapshotIdSlot; - const boost::optional _indexIdSlot; + const boost::optional _indexIdentSlot; const boost::optional _indexKeySlot; const boost::optional _indexKeyPatternSlot; const boost::optional _oplogTsSlot; - const std::vector _fields; - const value::SlotVector _vars; + // '_scanFieldNames' - names of the fields being scanned from the doc + // '_scanFieldSlots' - slot IDs corresponding, by index, to _scanFieldAccessors + const IndexedStringVector _scanFieldNames; + const value::SlotVector _scanFieldSlots; - const boost::optional _seekKeySlot; - const bool _forward; + const boost::optional _seekRecordIdSlot; + const boost::optional _minRecordIdSlot; + const boost::optional _maxRecordIdSlot; - // These members are default constructed to boost::none and are initialized when 'prepare()' - // is called. Once they are set, they are never modified again. - boost::optional _collName; - boost::optional _catalogEpoch; + // Tells if this is a forward (as opposed to reverse) scan. + const bool _forward; - CollectionPtr _coll; + // Used to return a random sample of the collection. + const bool _useRandomCursor; - // If provided, used during a trial run to accumulate certain execution stats. Once the trial - // run is complete, this pointer is reset to nullptr. - TrialRunTracker* _tracker{nullptr}; + const UUID _collUuid; const ScanCallbacks _scanCallbacks; - std::unique_ptr _recordAccessor; - std::unique_ptr _recordIdAccessor; + // Holds the current record. + value::OwnedValueAccessor _recordAccessor; + + // Holds the RecordId of the current record as a TypeTags::RecordId. + value::OwnedValueAccessor _recordIdAccessor; + RecordId _recordId; + value::SlotAccessor* _snapshotIdAccessor{nullptr}; - value::SlotAccessor* _indexIdAccessor{nullptr}; + value::SlotAccessor* _indexIdentAccessor{nullptr}; value::SlotAccessor* _indexKeyAccessor{nullptr}; value::SlotAccessor* _indexKeyPatternAccessor{nullptr}; - // If this ScanStage was constructed with _oplogTsSlot set, then _oplogTsAccessor will point to - // an accessor in the RuntimeEnvironment, and value of the "ts" field (if it exists) from each - // record scanned will be written to this accessor. The engine uses mechanism to keep track of - // the most recent timestamp that has been observed when scanning the oplog collection. + // For oplog scans only, holds a copy of the "ts" field of the record (which is the oplog + // clustering key) for use by the end-bound EOF filter above the scan, if applicable. RuntimeEnvironment::Accessor* _oplogTsAccessor{nullptr}; - // Used to return a random sample of the collection. - const bool _useRandomCursor; + // For oplog scans only, holds a cached pointer to the accessor for the "ts" field in the + // current document to get this accessor quickly rather than having to look it up in the + // '_scanFieldAccessors' hashtable each time. + value::SlotAccessor* _tsFieldAccessor{nullptr}; - std::vector _fieldAccessors; - value::SlotAccessorMap _varAccessors; - value::SlotAccessor* _seekKeyAccessor{nullptr}; + // These members hold info about the target fields being scanned from the record. + // '_scanFieldAccessors' - slot accessors corresponding, by index, to _scanFieldNames + // '_scanFieldAccessorsMap' - a map from vector index to pointer to the corresponding + // accessor in '_scanFieldAccessors' + absl::InlinedVector _scanFieldAccessors; + value::SlotAccessorMap _scanFieldAccessorsMap; - // Variant stores pointers to field accessors for all fields with a given bloom filter mask - // offset. If there is only one field with a given offset, it is stored as a StringData and - // pointer pair, which allows us to just compare strings instead of hash map lookup. - using FieldAccessorVariant = stdx::variant, - StringMap>; + // Only for a resumed scan ("seek"). Slot holding the TypeTags::RecordId of the record to resume + // the scan from. '_seekRecordId' is the RecordId value, initialized from the slot at runtime. + value::SlotAccessor* _seekRecordIdAccessor{nullptr}; + RecordId _seekRecordId; - // Array contains FieldAccessorVariants, indexed by bloom filter mask offset, determined by - // computeFieldMaskOffset function. - std::array _maskOffsetToFieldAccessors; + // Only for clustered collection scans, holds the minimum record ID of the scan, if applicable. + value::SlotAccessor* _minRecordIdAccessor{nullptr}; + RecordId _minRecordId; - // _tsFieldAccessor points to the accessor for field "ts". We use _tsFieldAccessor to get at - // the accessor quickly rather than having to look it up in the _fieldAccessors hashtable. - value::SlotAccessor* _tsFieldAccessor{nullptr}; + // Only for clustered collection scans, holds the maximum record ID of the scan, if applicable. + value::SlotAccessor* _maxRecordIdAccessor{nullptr}; + RecordId _maxRecordId; - FieldNameBloomFilter _fieldsBloomFilter; + // Only for clustered collection scans: must ScanStage::getNext() exclude the ending bound? + bool _excludeScanEndRecordId = false; - RecordId _recordId; + // Only for clustered collection scans: does the scan have an end bound? + bool _hasScanEndRecordId = false; - bool _open{false}; + // Only for clustered collection scans: have we crossed the scan end bound if there is one? + bool _havePassedScanEndRecordId = false; + CollectionRef _coll; + + // If provided, used during a trial run to accumulate certain execution stats. Once the trial + // run is complete, this pointer is reset to nullptr. + TrialRunTracker* _tracker{nullptr}; + + bool _open{false}; std::unique_ptr _cursor; // TODO: SERVER-62647. Consider removing random cursor when no longer needed. std::unique_ptr _randomCursor; - RecordId _key; + // Tells whether this is the first getNext() call of the scan or after restarting. bool _firstGetNext{false}; // Whether the scan should have low storage admission priority. @@ -243,6 +294,8 @@ class ScanStage final : public PlanStage { // collection is still valid. Only relevant to capped collections. bool _needsToCheckCappedPositionLost = false; + StringMap _indexCatalogEntryMap; + #if defined(MONGO_CONFIG_DEBUG_BUILD) // Debug-only buffer used to track the last thing returned from the stage. Between // saves/restores this is used to check that the storage cursor has not changed position. @@ -266,11 +319,11 @@ class ParallelScanStage final : public PlanStage { boost::optional recordSlot, boost::optional recordIdSlot, boost::optional snapshotIdSlot, - boost::optional indexIdSlot, + boost::optional indexIdentSlot, boost::optional indexKeySlot, boost::optional indexKeyPatternSlot, - std::vector fields, - value::SlotVector vars, + std::vector scanFieldNames, + value::SlotVector scanFieldSlots, PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId, ScanCallbacks callbacks, @@ -281,11 +334,11 @@ class ParallelScanStage final : public PlanStage { boost::optional recordSlot, boost::optional recordIdSlot, boost::optional snapshotIdSlot, - boost::optional indexIdSlot, + boost::optional indexIdentSlot, boost::optional indexKeySlot, boost::optional indexKeyPatternSlot, - std::vector fields, - value::SlotVector vars, + std::vector scanFieldNames, + value::SlotVector scanFieldSlots, PlanYieldPolicy* yieldPolicy, PlanNodeId nodeId, ScanCallbacks callbacks, @@ -319,46 +372,56 @@ class ParallelScanStage final : public PlanStage { _currentRange = std::numeric_limits::max(); } - const UUID _collUuid; + value::OwnedValueAccessor* getFieldAccessor(StringData name); + const boost::optional _recordSlot; const boost::optional _recordIdSlot; const boost::optional _snapshotIdSlot; - const boost::optional _indexIdSlot; + const boost::optional _indexIdentSlot; const boost::optional _indexKeySlot; const boost::optional _indexKeyPatternSlot; - const std::vector _fields; - const value::SlotVector _vars; - // These members are default constructed to boost::none and are initialized when 'prepare()' - // is called. Once they are set, they are never modified again. - boost::optional _collName; - boost::optional _catalogEpoch; + // '_scanFieldNames' - names of the fields being scanned from the doc + // '_scanFieldSlots' - slot IDs corresponding, by index, to _scanFieldAccessors + const IndexedStringVector _scanFieldNames; + const value::SlotVector _scanFieldSlots; - CollectionPtr _coll; - - std::shared_ptr _state; + const UUID _collUuid; const ScanCallbacks _scanCallbacks; - std::unique_ptr _recordAccessor; - std::unique_ptr _recordIdAccessor; + // Holds the current record. + value::OwnedValueAccessor _recordAccessor; + + // Holds the RecordId of the current record as a TypeTags::RecordId. + value::OwnedValueAccessor _recordIdAccessor; + RecordId _recordId; + value::SlotAccessor* _snapshotIdAccessor{nullptr}; - value::SlotAccessor* _indexIdAccessor{nullptr}; + value::SlotAccessor* _indexIdentAccessor{nullptr}; value::SlotAccessor* _indexKeyAccessor{nullptr}; value::SlotAccessor* _indexKeyPatternAccessor{nullptr}; - value::FieldAccessorMap _fieldAccessors; - value::SlotAccessorMap _varAccessors; + // These members hold info about the target fields being scanned from the record. + // '_scanFieldAccessors' - slot accessors corresponding, by index, to _scanFieldNames + // '_scanFieldAccessorsMap' - a map from vector index to pointer to the corresponding + // accessor in '_scanFieldAccessors' + absl::InlinedVector _scanFieldAccessors; + value::SlotAccessorMap _scanFieldAccessorsMap; + + CollectionRef _coll; + + std::shared_ptr _state; size_t _currentRange{std::numeric_limits::max()}; Range _range; - RecordId _recordId; - bool _open{false}; std::unique_ptr _cursor; + StringMap _indexCatalogEntryMap; + #if defined(MONGO_CONFIG_DEBUG_BUILD) // Debug-only buffer used to track the last thing returned from the stage. Between // saves/restores this is used to check that the storage cursor has not changed position. diff --git a/src/mongo/db/exec/sbe/stages/sort.cpp b/src/mongo/db/exec/sbe/stages/sort.cpp index 05840435a827d..fd7ff5c54dd2b 100644 --- a/src/mongo/db/exec/sbe/stages/sort.cpp +++ b/src/mongo/db/exec/sbe/stages/sort.cpp @@ -27,15 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/stages/sort.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" -#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/sort.h" +#include "mongo/db/exec/sbe/values/row.h" #include "mongo/db/exec/trial_run_tracker.h" +#include "mongo/db/sorter/sorter.h" #include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace { diff --git a/src/mongo/db/exec/sbe/stages/sort.h b/src/mongo/db/exec/sbe/stages/sort.h index 1b4dd9ea7ed55..ed5af79bd7415 100644 --- a/src/mongo/db/exec/sbe/stages/sort.h +++ b/src/mongo/db/exec/sbe/stages/sort.h @@ -29,12 +29,25 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/trial_run_tracker.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/sorter/sorter_stats.h" namespace mongo { template class SortIteratorInterface; + template class Sorter; } // namespace mongo diff --git a/src/mongo/db/exec/sbe/stages/sorted_merge.cpp b/src/mongo/db/exec/sbe/stages/sorted_merge.cpp index 201f6092dd428..531a3aca135ce 100644 --- a/src/mongo/db/exec/sbe/stages/sorted_merge.cpp +++ b/src/mongo/db/exec/sbe/stages/sorted_merge.cpp @@ -27,13 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/stages/sorted_merge.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" -#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/sorted_merge.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/str.h" namespace mongo { namespace sbe { diff --git a/src/mongo/db/exec/sbe/stages/sorted_merge.h b/src/mongo/db/exec/sbe/stages/sorted_merge.h index 436ddfce0808b..0fc7e1acfde14 100644 --- a/src/mongo/db/exec/sbe/stages/sorted_merge.h +++ b/src/mongo/db/exec/sbe/stages/sorted_merge.h @@ -29,11 +29,20 @@ #pragma once +#include +#include +#include #include +#include -#include "mongo/db/exec/sbe/stages/stages.h" - +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/sorted_stream_merger.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/stage_types.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/stages/spool.cpp b/src/mongo/db/exec/sbe/stages/spool.cpp index 341dcc57d906e..d26ee4cd84fb5 100644 --- a/src/mongo/db/exec/sbe/stages/spool.cpp +++ b/src/mongo/db/exec/sbe/stages/spool.cpp @@ -27,10 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" #include "mongo/db/exec/sbe/stages/spool.h" +#include "mongo/db/exec/sbe/values/row.h" namespace mongo::sbe { SpoolEagerProducerStage::SpoolEagerProducerStage(std::unique_ptr input, diff --git a/src/mongo/db/exec/sbe/stages/spool.h b/src/mongo/db/exec/sbe/stages/spool.h index 09a453e0e0ed9..52ba0ab237424 100644 --- a/src/mongo/db/exec/sbe/stages/spool.h +++ b/src/mongo/db/exec/sbe/stages/spool.h @@ -29,9 +29,29 @@ #pragma once +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/stages/stages.h b/src/mongo/db/exec/sbe/stages/stages.h index 863c3f94bff98..f5184c327f7a3 100644 --- a/src/mongo/db/exec/sbe/stages/stages.h +++ b/src/mongo/db/exec/sbe/stages/stages.h @@ -35,7 +35,6 @@ #include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/exec/scoped_timer_factory.h" #include "mongo/db/exec/trial_run_tracker.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/plan_yield_policy.h" @@ -455,9 +454,19 @@ class CanTrackStats { */ boost::optional getOptTimer(OperationContext* opCtx) { if (opCtx && _commonStats.executionTime.precision != QueryExecTimerPrecision::kNoTiming) { - return scoped_timer_factory::make(opCtx->getServiceContext(), - _commonStats.executionTime.precision, - &_commonStats.executionTime.executionTimeEstimate); + + if (MONGO_likely(_commonStats.executionTime.precision == + QueryExecTimerPrecision::kMillis)) { + return boost::optional( + boost::in_place_init, + &_commonStats.executionTime.executionTimeEstimate, + opCtx->getServiceContext()->getFastClockSource()); + } else { + return boost::optional( + boost::in_place_init, + &_commonStats.executionTime.executionTimeEstimate, + opCtx->getServiceContext()->getTickSource()); + } } return boost::none; @@ -581,7 +590,8 @@ class PlanStage : public CanSwitchOperationContext, * call and avoids resource acquisition in getNext(). * * When reOpen flag is true then the plan stage should reinitizalize already acquired resources - * (e.g. re-hash, re-sort, re-seek, etc). + * (e.g. re-hash, re-sort, re-seek, etc), but it can avoid reinitializing things that do not + * contain state and are not destroyed by close(), since close() is not called before a reopen. */ virtual void open(bool reOpen) = 0; diff --git a/src/mongo/db/exec/sbe/stages/traverse.cpp b/src/mongo/db/exec/sbe/stages/traverse.cpp index d97e382023d5f..f0c35b3db4d07 100644 --- a/src/mongo/db/exec/sbe/stages/traverse.cpp +++ b/src/mongo/db/exec/sbe/stages/traverse.cpp @@ -27,11 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" #include "mongo/db/exec/sbe/size_estimator.h" #include "mongo/db/exec/sbe/stages/traverse.h" +#include "mongo/util/assert_util.h" namespace mongo::sbe { TraverseStage::TraverseStage(std::unique_ptr outer, diff --git a/src/mongo/db/exec/sbe/stages/traverse.h b/src/mongo/db/exec/sbe/stages/traverse.h index 09e5dc3dfcf7e..f6c77ce207d33 100644 --- a/src/mongo/db/exec/sbe/stages/traverse.h +++ b/src/mongo/db/exec/sbe/stages/traverse.h @@ -29,9 +29,22 @@ #pragma once +#include +#include +#include +#include + +#include + +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/query/stage_types.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/stages/union.cpp b/src/mongo/db/exec/sbe/stages/union.cpp index 587db310d9665..f2145e74d230f 100644 --- a/src/mongo/db/exec/sbe/stages/union.cpp +++ b/src/mongo/db/exec/sbe/stages/union.cpp @@ -29,11 +29,21 @@ #include "mongo/db/exec/sbe/stages/union.h" +#include +#include +#include #include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" -#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::sbe { UnionStage::UnionStage(PlanStage::Vector inputStages, diff --git a/src/mongo/db/exec/sbe/stages/union.h b/src/mongo/db/exec/sbe/stages/union.h index 07e85850efcf8..952a6b5ccc148 100644 --- a/src/mongo/db/exec/sbe/stages/union.h +++ b/src/mongo/db/exec/sbe/stages/union.h @@ -29,9 +29,17 @@ #pragma once +#include +#include #include +#include +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/query/stage_types.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/stages/unique.cpp b/src/mongo/db/exec/sbe/stages/unique.cpp index c88fa9ab43e6f..04db11bd4db62 100644 --- a/src/mongo/db/exec/sbe/stages/unique.cpp +++ b/src/mongo/db/exec/sbe/stages/unique.cpp @@ -27,11 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/stages/unique.h" - +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/unique.h" namespace mongo { namespace sbe { diff --git a/src/mongo/db/exec/sbe/stages/unique.h b/src/mongo/db/exec/sbe/stages/unique.h index c344cd09d247d..60cfaca5dae2d 100644 --- a/src/mongo/db/exec/sbe/stages/unique.h +++ b/src/mongo/db/exec/sbe/stages/unique.h @@ -29,9 +29,19 @@ #pragma once +#include +#include #include +#include +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/row.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/stdx/unordered_set.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/stages/unwind.cpp b/src/mongo/db/exec/sbe/stages/unwind.cpp index 6b19c49849d40..b7bb46f3b4ca8 100644 --- a/src/mongo/db/exec/sbe/stages/unwind.cpp +++ b/src/mongo/db/exec/sbe/stages/unwind.cpp @@ -27,12 +27,19 @@ * it in the license file. */ -#include "mongo/config.h" -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/stages/unwind.h" - +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/stages/unwind.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/stages/unwind.h b/src/mongo/db/exec/sbe/stages/unwind.h index 58c519d424fb7..db0ede17fdf5a 100644 --- a/src/mongo/db/exec/sbe/stages/unwind.h +++ b/src/mongo/db/exec/sbe/stages/unwind.h @@ -29,7 +29,18 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/stage_types.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/exec/sbe/util/debug_print.cpp b/src/mongo/db/exec/sbe/util/debug_print.cpp index e33397530fc5d..f3a3be78ccd6a 100644 --- a/src/mongo/db/exec/sbe/util/debug_print.cpp +++ b/src/mongo/db/exec/sbe/util/debug_print.cpp @@ -27,11 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/util/debug_print.h" +#include +#include #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" namespace mongo { namespace sbe { diff --git a/src/mongo/db/exec/sbe/util/debug_print.h b/src/mongo/db/exec/sbe/util/debug_print.h index e4a1260a67fa1..c339256043f68 100644 --- a/src/mongo/db/exec/sbe/util/debug_print.h +++ b/src/mongo/db/exec/sbe/util/debug_print.h @@ -29,9 +29,11 @@ #pragma once +#include #include #include +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/util/str.h" diff --git a/src/mongo/db/exec/sbe/util/spilling.cpp b/src/mongo/db/exec/sbe/util/spilling.cpp index 6e8c027d7feb7..cf0de795826d3 100644 --- a/src/mongo/db/exec/sbe/util/spilling.cpp +++ b/src/mongo/db/exec/sbe/util/spilling.cpp @@ -29,6 +29,23 @@ #include "mongo/db/exec/sbe/util/spilling.h" +#include +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" +#include "mongo/util/str.h" + namespace mongo { namespace sbe { @@ -41,17 +58,17 @@ void assertIgnorePrepareConflictsBehavior(OperationContext* opCtx) { } -std::pair encodeKeyString(KeyString::Builder& kb, - const value::MaterializedRow& value) { +std::pair encodeKeyString(key_string::Builder& kb, + const value::MaterializedRow& value) { value.serializeIntoKeyString(kb); auto typeBits = kb.getTypeBits(); auto rid = RecordId(kb.getBuffer(), kb.getSize()); return {rid, typeBits}; } -KeyString::Value decodeKeyString(const RecordId& rid, KeyString::TypeBits typeBits) { +key_string::Value decodeKeyString(const RecordId& rid, key_string::TypeBits typeBits) { auto rawKey = rid.getStr(); - KeyString::Builder kb{KeyString::Version::kLatestVersion}; + key_string::Builder kb{key_string::Version::kLatestVersion}; kb.resetFromBuffer(rawKey.rawData(), rawKey.size()); kb.setTypeBits(typeBits); return kb.getValueCopy(); @@ -72,7 +89,7 @@ int upsertToRecordStore(OperationContext* opCtx, RecordStore* rs, const RecordId& key, const value::MaterializedRow& val, - const KeyString::TypeBits& typeBits, // recover type of value. + const key_string::TypeBits& typeBits, // recover type of value. bool update) { BufBuilder bufValue; val.serializeForSorter(bufValue); @@ -83,7 +100,7 @@ int upsertToRecordStore(OperationContext* opCtx, RecordStore* rs, const RecordId& key, BufBuilder& buf, - const KeyString::TypeBits& typeBits, // recover type of value. + const key_string::TypeBits& typeBits, // recover type of value. bool update) { // Append the 'typeBits' to the end of the val's buffer so the 'key' can be reconstructed when // draining HashAgg. diff --git a/src/mongo/db/exec/sbe/util/spilling.h b/src/mongo/db/exec/sbe/util/spilling.h index 0a562d7e864dc..1d200313d3412 100644 --- a/src/mongo/db/exec/sbe/util/spilling.h +++ b/src/mongo/db/exec/sbe/util/spilling.h @@ -29,10 +29,18 @@ #pragma once -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/bson/util/builder.h" +#include "mongo/db/exec/sbe/values/row.h" #include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/temporary_record_store.h" +#include "mongo/platform/basic.h" namespace mongo { namespace sbe { @@ -44,11 +52,11 @@ namespace sbe { void assertIgnorePrepareConflictsBehavior(OperationContext* opCtx); // Encode key as a RecordId and TypeBits. -std::pair encodeKeyString(KeyString::Builder&, - const value::MaterializedRow& value); +std::pair encodeKeyString(key_string::Builder&, + const value::MaterializedRow& value); // Reconstructs the KeyString carried in RecordId using 'typeBits'. -KeyString::Value decodeKeyString(const RecordId& rid, KeyString::TypeBits typeBits); +key_string::Value decodeKeyString(const RecordId& rid, key_string::TypeBits typeBits); // Reads a materialized row from the record store. boost::optional readFromRecordStore(OperationContext* opCtx, @@ -66,13 +74,13 @@ int upsertToRecordStore(OperationContext* opCtx, RecordStore* rs, const RecordId& key, const value::MaterializedRow& val, - const KeyString::TypeBits& typeBits, + const key_string::TypeBits& typeBits, bool update); int upsertToRecordStore(OperationContext* opCtx, RecordStore* rs, const RecordId& key, BufBuilder& buf, - const KeyString::TypeBits& typeBits, // recover type of value. + const key_string::TypeBits& typeBits, // recover type of value. bool update); } // namespace sbe } // namespace mongo diff --git a/src/mongo/db/exec/sbe/util/stage_results_printer.cpp b/src/mongo/db/exec/sbe/util/stage_results_printer.cpp index 2a8c740cad835..4d0a223055da0 100644 --- a/src/mongo/db/exec/sbe/util/stage_results_printer.cpp +++ b/src/mongo/db/exec/sbe/util/stage_results_printer.cpp @@ -28,7 +28,16 @@ */ #include "mongo/db/exec/sbe/util/stage_results_printer.h" -#include "mongo/platform/basic.h" + +#include + +#include +#include + +#include "mongo/db/exec/plan_stats_visitor.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" +#include "mongo/db/query/tree_walker.h" +#include "mongo/util/assert_util.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/util/stage_results_printer.h b/src/mongo/db/exec/sbe/util/stage_results_printer.h index 7518ccf3a34eb..f5cc5274a6f0e 100644 --- a/src/mongo/db/exec/sbe/util/stage_results_printer.h +++ b/src/mongo/db/exec/sbe/util/stage_results_printer.h @@ -30,7 +30,11 @@ #pragma once #include +#include +#include +#include +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/exec/sbe/util/print_options.h" #include "mongo/db/exec/sbe/values/slot.h" diff --git a/src/mongo/db/exec/sbe/util/stage_results_printer_test.cpp b/src/mongo/db/exec/sbe/util/stage_results_printer_test.cpp index f6cd96a349575..3eabab8e569be 100644 --- a/src/mongo/db/exec/sbe/util/stage_results_printer_test.cpp +++ b/src/mongo/db/exec/sbe/util/stage_results_printer_test.cpp @@ -27,9 +27,15 @@ * it in the license file. */ +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" #include "mongo/db/exec/sbe/util/stage_results_printer.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/values/arith_common.cpp b/src/mongo/db/exec/sbe/values/arith_common.cpp index f7646f2c4f1e8..366e0f898aa12 100644 --- a/src/mongo/db/exec/sbe/values/arith_common.cpp +++ b/src/mongo/db/exec/sbe/values/arith_common.cpp @@ -29,6 +29,19 @@ #include "mongo/db/exec/sbe/values/arith_common.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/platform/decimal128.h" +#include "mongo/platform/overflow_arithmetic.h" +#include "mongo/util/assert_util.h" + /** These common operations - Addition, Subtraction and Multiplication - are used in both the VM and constant folding in the optimizer. These methods are extensible for any computation with SBE values. @@ -144,6 +157,13 @@ FastTuple genericArithmeticOp(value::TypeTa // The result does not fit into int64_t so fallthru to the wider type. [[fallthrough]]; } + case value::TypeTags::NumberDouble: { + double result; + Op::doOperation(numericCast(lhsTag, lhsValue), + numericCast(rhsTag, rhsValue), + result); + return {false, value::TypeTags::NumberDouble, value::bitcastFrom(result)}; + } case value::TypeTags::NumberDecimal: { Decimal128 result; Op::doOperation(numericCast(lhsTag, lhsValue), @@ -152,13 +172,6 @@ FastTuple genericArithmeticOp(value::TypeTa auto [tag, val] = value::makeCopyDecimal(result); return {true, tag, val}; } - case value::TypeTags::NumberDouble: { - double result; - Op::doOperation(numericCast(lhsTag, lhsValue), - numericCast(rhsTag, rhsValue), - result); - return {false, value::TypeTags::NumberDouble, value::bitcastFrom(result)}; - } default: MONGO_UNREACHABLE; } diff --git a/src/mongo/db/exec/sbe/values/bson.cpp b/src/mongo/db/exec/sbe/values/bson.cpp index 877fd6405b511..01c2d35fe7a2a 100644 --- a/src/mongo/db/exec/sbe/values/bson.cpp +++ b/src/mongo/db/exec/sbe/values/bson.cpp @@ -27,9 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" namespace mongo { namespace sbe { diff --git a/src/mongo/db/exec/sbe/values/bson.h b/src/mongo/db/exec/sbe/values/bson.h index 622c9c1da9823..51137e00639a7 100644 --- a/src/mongo/db/exec/sbe/values/bson.h +++ b/src/mongo/db/exec/sbe/values/bson.h @@ -29,6 +29,11 @@ #pragma once +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/values/value.h" diff --git a/src/mongo/db/exec/sbe/values/column_store_encoder.h b/src/mongo/db/exec/sbe/values/column_store_encoder.h index 91edb0dae1e40..3613029c2d414 100644 --- a/src/mongo/db/exec/sbe/values/column_store_encoder.h +++ b/src/mongo/db/exec/sbe/values/column_store_encoder.h @@ -29,11 +29,31 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" #include "mongo/db/exec/sbe/values/bson.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/storage/column_store.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo::sbe::value { /** diff --git a/src/mongo/db/exec/sbe/values/column_store_encoder_test.cpp b/src/mongo/db/exec/sbe/values/column_store_encoder_test.cpp index 35bfb95ba5c93..9ec94dd2c1a34 100644 --- a/src/mongo/db/exec/sbe/values/column_store_encoder_test.cpp +++ b/src/mongo/db/exec/sbe/values/column_store_encoder_test.cpp @@ -27,12 +27,34 @@ * it in the license file. */ -#include "mongo/db/exec/sbe/expression_test_base.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/exec/sbe/values/bson.h" #include "mongo/db/exec/sbe/values/column_store_encoder.h" #include "mongo/db/index/column_cell.h" #include "mongo/db/storage/column_store.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/md5.h" #include "mongo/util/md5.hpp" +#include "mongo/util/safe_num.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" namespace mongo::sbe { TEST(SBEColumnStoreEncoder, EncodeTest) { diff --git a/src/mongo/db/exec/sbe/values/columnar.cpp b/src/mongo/db/exec/sbe/values/columnar.cpp index c2722ba943788..066fb675bb471 100644 --- a/src/mongo/db/exec/sbe/values/columnar.cpp +++ b/src/mongo/db/exec/sbe/values/columnar.cpp @@ -27,10 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include #include "mongo/db/exec/sbe/values/columnar.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/exec/sbe/values/columnar.h b/src/mongo/db/exec/sbe/values/columnar.h index ae4b3ccf8805c..fcdd90030d1e1 100644 --- a/src/mongo/db/exec/sbe/values/columnar.h +++ b/src/mongo/db/exec/sbe/values/columnar.h @@ -29,9 +29,21 @@ #pragma once -#include "mongo/config.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/exec/sbe/values/column_store_encoder.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/field_ref.h" #include "mongo/db/storage/column_store.h" +#include "mongo/util/assert_util_core.h" /** * Helper functions for reading values out of a columnar index for processing in SBE. diff --git a/src/mongo/db/exec/sbe/values/columnar_test.cpp b/src/mongo/db/exec/sbe/values/columnar_test.cpp index ebbed88848a4c..635b8573378fc 100644 --- a/src/mongo/db/exec/sbe/values/columnar_test.cpp +++ b/src/mongo/db/exec/sbe/values/columnar_test.cpp @@ -31,11 +31,14 @@ * Tests for columnar/SBE integration. */ -#include "mongo/platform/basic.h" - +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" #include "mongo/db/exec/sbe/values/columnar.h" -#include "mongo/db/storage/column_store.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { void makeObjFromColumns(std::vector& cells, value::Object& out) { diff --git a/src/mongo/db/exec/sbe/values/row.cpp b/src/mongo/db/exec/sbe/values/row.cpp index 6fa1c4b3a4ae8..3c3b00ba3b964 100644 --- a/src/mongo/db/exec/sbe/values/row.cpp +++ b/src/mongo/db/exec/sbe/values/row.cpp @@ -27,9 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/bson/bsonmisc.h" +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/builder.h" #include "mongo/db/exec/js_function.h" #include "mongo/db/exec/sbe/makeobj_spec.h" @@ -38,8 +51,18 @@ #include "mongo/db/exec/sbe/values/row.h" #include "mongo/db/exec/sbe/values/sort_spec.h" #include "mongo/db/exec/sbe/values/value_builder.h" +#include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/fts/fts_matcher.h" +#include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/key_string.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" #include "mongo/util/bufreader.h" +#include "mongo/util/pcre.h" +#include "mongo/util/shared_buffer.h" +#include "mongo/util/time_support.h" + namespace mongo::sbe::value { static std::pair deserializeValue(BufReader& buf) { @@ -171,8 +194,8 @@ static std::pair deserializeValue(BufReader& buf) { break; } case TypeTags::ksValue: { - auto version = static_cast(buf.read()); - auto ks = KeyString::Value::deserialize(buf, version); + auto version = static_cast(buf.read()); + auto ks = key_string::Value::deserialize(buf, version); auto [ksTag, ksVal] = makeCopyKeyString(ks); tag = ksTag; val = ksVal; @@ -358,7 +381,7 @@ static void serializeValue(BufBuilder& buf, TypeTags tag, Value val) { } } -static void serializeValueIntoKeyString(KeyString::Builder& buf, TypeTags tag, Value val) { +static void serializeValueIntoKeyString(key_string::Builder& buf, TypeTags tag, Value val) { switch (tag) { case TypeTags::Nothing: { buf.appendBool(false); @@ -561,7 +584,7 @@ void RowBase::serializeForSorter(BufBuilder& buf) const { template -void RowBase::serializeIntoKeyString(KeyString::Builder& buf) const { +void RowBase::serializeIntoKeyString(key_string::Builder& buf) const { const RowType& self = *static_cast(this); for (size_t idx = 0; idx < self.size(); ++idx) { auto [tag, val] = self.getViewOfValue(idx); @@ -570,17 +593,17 @@ void RowBase::serializeIntoKeyString(KeyString::Builder& buf) const { } template -RowType RowBase::deserializeFromKeyString(const KeyString::Value& keyString, +RowType RowBase::deserializeFromKeyString(const key_string::Value& keyString, BufBuilder* valueBufferBuilder, boost::optional numPrefixValsToRead) { BufReader reader(keyString.getBuffer(), keyString.getSize()); - KeyString::TypeBits typeBits(keyString.getTypeBits()); - KeyString::TypeBits::Reader typeBitsReader(typeBits); + key_string::TypeBits typeBits(keyString.getTypeBits()); + key_string::TypeBits::Reader typeBitsReader(typeBits); RowValueBuilder valBuilder(valueBufferBuilder); auto keepReading = true; do { - keepReading = KeyString::readSBEValue( + keepReading = key_string::readSBEValue( &reader, &typeBitsReader, false /* inverted */, typeBits.version, &valBuilder); } while (keepReading); diff --git a/src/mongo/db/exec/sbe/values/row.h b/src/mongo/db/exec/sbe/values/row.h index a4b13a2145214..c1a2e2311a81f 100644 --- a/src/mongo/db/exec/sbe/values/row.h +++ b/src/mongo/db/exec/sbe/values/row.h @@ -29,11 +29,26 @@ #pragma once +#include +#include +#include +#include +#include #include +#include +#include -#include "mongo/config.h" +#include "mongo/base/string_data.h" +#include "mongo/base/string_data_comparator_interface.h" +#include "mongo/bson/util/builder.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/exec/sbe/values/slot_util.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/bufreader.h" namespace mongo { class BufReader; @@ -116,10 +131,10 @@ class RowBase { * 'keyString' are ignored. */ static RowType deserializeFromKeyString( - const KeyString::Value& keyString, + const key_string::Value& keyString, BufBuilder* valueBufferBuilder, boost::optional numPrefixValsToRead = boost::none); - void serializeIntoKeyString(KeyString::Builder& builder) const; + void serializeIntoKeyString(key_string::Builder& builder) const; protected: void release() { diff --git a/src/mongo/db/exec/sbe/values/row_test.cpp b/src/mongo/db/exec/sbe/values/row_test.cpp index 739decdfdff0e..421bf79416949 100644 --- a/src/mongo/db/exec/sbe/values/row_test.cpp +++ b/src/mongo/db/exec/sbe/values/row_test.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/exec/sbe/sbe_unittest.h" #include "mongo/db/exec/sbe/values/row.h" -#include "mongo/db/exec/sbe/values/slot.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/assert_that.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { diff --git a/src/mongo/db/exec/sbe/values/sbe_pattern_value_cmp.cpp b/src/mongo/db/exec/sbe/values/sbe_pattern_value_cmp.cpp index c7693f4fc0250..521dd09ade348 100644 --- a/src/mongo/db/exec/sbe/values/sbe_pattern_value_cmp.cpp +++ b/src/mongo/db/exec/sbe/values/sbe_pattern_value_cmp.cpp @@ -29,10 +29,12 @@ #include "mongo/db/exec/sbe/values/sbe_pattern_value_cmp.h" +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/bson/dotted_path_support.h" -#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/sbe/values/bson.h" #include "mongo/db/exec/sbe/values/value.h" diff --git a/src/mongo/db/exec/sbe/values/sbe_pattern_value_cmp.h b/src/mongo/db/exec/sbe/values/sbe_pattern_value_cmp.h index 9f0a70f40bbfd..50fb6951c22a6 100644 --- a/src/mongo/db/exec/sbe/values/sbe_pattern_value_cmp.h +++ b/src/mongo/db/exec/sbe/values/sbe_pattern_value_cmp.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/collation/collator_interface.h" diff --git a/src/mongo/db/exec/sbe/values/sbe_pattern_value_cmp_test.cpp b/src/mongo/db/exec/sbe/values/sbe_pattern_value_cmp_test.cpp index 35e9cf8272c0b..2b8a53d125f97 100644 --- a/src/mongo/db/exec/sbe/values/sbe_pattern_value_cmp_test.cpp +++ b/src/mongo/db/exec/sbe/values/sbe_pattern_value_cmp_test.cpp @@ -29,14 +29,20 @@ #include "mongo/db/exec/sbe/values/sbe_pattern_value_cmp.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/docval_to_sbeval.h" -#include "mongo/db/exec/sbe/sbe_plan_stage_test.h" -#include "mongo/db/jsobj.h" #include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe::value { namespace { diff --git a/src/mongo/db/exec/sbe/values/slot.h b/src/mongo/db/exec/sbe/values/slot.h index fdbf99605b7be..e457705fa47ad 100644 --- a/src/mongo/db/exec/sbe/values/slot.h +++ b/src/mongo/db/exec/sbe/values/slot.h @@ -70,6 +70,11 @@ class SlotAccessor { * make a deep copy. The returned value is owned by the caller. */ virtual std::pair copyOrMoveValue() = 0; + + template + bool is() const { + return dynamic_cast(this) != nullptr; + } }; /** @@ -402,7 +407,7 @@ typedef SingleRowAccessor MaterializedSingleRowAccessor; * might reference it. */ void readKeyStringValueIntoAccessors( - const KeyString::Value& keyString, + const key_string::Value& keyString, const Ordering& ordering, BufBuilder* valueBufferBuilder, std::vector* accessors, diff --git a/src/mongo/db/exec/sbe/values/slot_printer.cpp b/src/mongo/db/exec/sbe/values/slot_printer.cpp index 6efdf05d3ed38..9f40bcd455138 100644 --- a/src/mongo/db/exec/sbe/values/slot_printer.cpp +++ b/src/mongo/db/exec/sbe/values/slot_printer.cpp @@ -27,10 +27,9 @@ * it in the license file. */ -#include +#include #include "mongo/db/exec/sbe/values/slot_printer.h" -#include "mongo/platform/basic.h" namespace mongo::sbe::value { diff --git a/src/mongo/db/exec/sbe/values/slot_printer.h b/src/mongo/db/exec/sbe/values/slot_printer.h index f681874bdeb26..790d36d9b7d36 100644 --- a/src/mongo/db/exec/sbe/values/slot_printer.h +++ b/src/mongo/db/exec/sbe/values/slot_printer.h @@ -32,6 +32,7 @@ #include #include "mongo/db/exec/sbe/util/print_options.h" +#include "mongo/db/exec/sbe/values/row.h" #include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/values/value_printer.h" diff --git a/src/mongo/db/exec/sbe/values/slot_printer_test.cpp b/src/mongo/db/exec/sbe/values/slot_printer_test.cpp index aca1200227975..5ba83337e940d 100644 --- a/src/mongo/db/exec/sbe/values/slot_printer_test.cpp +++ b/src/mongo/db/exec/sbe/values/slot_printer_test.cpp @@ -28,7 +28,13 @@ */ #include "mongo/db/exec/sbe/values/slot_printer.h" -#include "mongo/unittest/unittest.h" + +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe::value { diff --git a/src/mongo/db/exec/sbe/values/sort_spec.h b/src/mongo/db/exec/sbe/values/sort_spec.h index fad29a4f09f6b..e421bab7888a5 100644 --- a/src/mongo/db/exec/sbe/values/sort_spec.h +++ b/src/mongo/db/exec/sbe/values/sort_spec.h @@ -59,7 +59,7 @@ class SortSpec { SortSpec& operator=(const SortSpec&) = delete; - KeyString::Value generateSortKey(const BSONObj& obj, const CollatorInterface* collator); + key_string::Value generateSortKey(const BSONObj& obj, const CollatorInterface* collator); /* @@ -77,6 +77,15 @@ class SortSpec { value::SortKeyComponentVector* generateSortKeyComponentVector( FastTuple obj, const CollatorInterface* collator); + /** + * Compare an array of values based on the sort pattern. + */ + std::pair compare(TypeTags leftTag, + Value leftVal, + TypeTags rightTag, + Value rightVal, + const CollatorInterface* collator = nullptr) const; + const BSONObj& getPattern() const { return _sortPatternBson; } diff --git a/src/mongo/db/exec/sbe/values/value.cpp b/src/mongo/db/exec/sbe/values/value.cpp index 3f2b1bd7f638c..d4e150c46735e 100644 --- a/src/mongo/db/exec/sbe/values/value.cpp +++ b/src/mongo/db/exec/sbe/values/value.cpp @@ -27,21 +27,40 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include -#include "mongo/db/exec/sbe/values/value.h" +#include #include "mongo/base/compare_numbers.h" +#include "mongo/base/string_data_comparator_interface.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/exec/js_function.h" #include "mongo/db/exec/sbe/makeobj_spec.h" -#include "mongo/db/exec/sbe/size_estimator.h" +#include "mongo/db/exec/sbe/util/print_options.h" #include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/sort_spec.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/values/value_builder.h" #include "mongo/db/exec/sbe/values/value_printer.h" +#include "mongo/db/index/btree_key_generator.h" +#include "mongo/db/index/sort_key_generator.h" #include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/db/query/sort_pattern.h" #include "mongo/db/storage/key_string.h" +#include "mongo/util/bufreader.h" +#include "mongo/util/duration.h" #include "mongo/util/errno_util.h" #include "mongo/util/pcre_util.h" @@ -131,9 +150,9 @@ std::pair makeNewBsonCodeWScope(StringData code, const char* sc return {TypeTags::bsonCodeWScope, bitcastFrom(buffer.release())}; } -std::pair makeCopyKeyString(const KeyString::Value& inKey) { - auto k = new KeyString::Value(inKey); - return {TypeTags::ksValue, bitcastFrom(k)}; +std::pair makeCopyKeyString(const key_string::Value& inKey) { + auto k = new key_string::Value(inKey); + return {TypeTags::ksValue, bitcastFrom(k)}; } std::pair makeNewPcreRegex(StringData pattern, StringData options) { @@ -153,7 +172,7 @@ std::pair makeCopyTimeZone(const TimeZone& tz) { return {TypeTags::timeZone, bitcastFrom(tzCopy.release())}; } -KeyString::Value SortSpec::generateSortKey(const BSONObj& obj, const CollatorInterface* collator) { +key_string::Value SortSpec::generateSortKey(const BSONObj& obj, const CollatorInterface* collator) { _sortKeyGen.setCollator(collator); return _sortKeyGen.computeSortKeyString(obj); } @@ -199,6 +218,50 @@ value::SortKeyComponentVector* SortSpec::generateSortKeyComponentVector( return &_localSortKeyComponentStorage; } +std::pair SortSpec::compare(TypeTags leftTag, + Value leftVal, + TypeTags rightTag, + Value rightVal, + const CollatorInterface* collator) const { + if (_sortPattern.size() == 1) { + auto [cmpTag, cmpVal] = compareValue(leftTag, leftVal, rightTag, rightVal, collator); + if (cmpTag == TypeTags::NumberInt32) { + auto sign = _sortPattern[0].isAscending ? 1 : -1; + cmpVal = bitcastFrom(bitcastTo(cmpVal) * sign); + return {cmpTag, cmpVal}; + } else { + return {TypeTags::Nothing, 0}; + } + } + + if (leftTag != TypeTags::Array || rightTag != TypeTags::Array) { + return {TypeTags::Nothing, 0}; + } + auto leftArray = getArrayView(leftVal); + auto rightArray = getArrayView(rightVal); + if (leftArray->size() != _sortPattern.size() || rightArray->size() != _sortPattern.size()) { + return {TypeTags::Nothing, 0}; + } + + for (size_t i = 0; i < _sortPattern.size(); i++) { + auto [leftElemTag, leftElemVal] = leftArray->getAt(i); + auto [rightElemTag, rightElemVal] = rightArray->getAt(i); + auto [cmpTag, cmpVal] = + compareValue(leftElemTag, leftElemVal, rightElemTag, rightElemVal, collator); + if (cmpTag == TypeTags::NumberInt32) { + if (cmpVal != 0) { + auto sign = _sortPattern[i].isAscending ? 1 : -1; + cmpVal = bitcastFrom(bitcastTo(cmpVal) * sign); + return {cmpTag, cmpVal}; + } + } else { + return {TypeTags::Nothing, 0}; + } + } + + return {TypeTags::NumberInt32, 0}; +} + BtreeKeyGenerator SortSpec::initKeyGen() const { tassert(5037003, "SortSpec should not be passed an empty sort pattern", @@ -219,7 +282,7 @@ BtreeKeyGenerator SortSpec::initKeyGen() const { } const bool isSparse = false; - auto version = KeyString::Version::kLatestVersion; + auto version = key_string::Version::kLatestVersion; auto ordering = Ordering::make(_sortPatternBson); return {std::move(fields), std::move(fixed), isSparse, version, ordering}; @@ -371,6 +434,11 @@ str::stream& operator<<(str::stream& str, const std::pair& valu ValuePrinters::make(str, PrintOptions()).writeValueToStream(value.first, value.second); return str; } +std::string print(const std::pair& value) { + auto stream = str::stream(); + stream << value; + return stream; +} BSONType tagToType(TypeTags tag) noexcept { switch (tag) { @@ -935,7 +1003,7 @@ StringData ObjectEnumerator::getFieldName() const { } } -void readKeyStringValueIntoAccessors(const KeyString::Value& keyString, +void readKeyStringValueIntoAccessors(const key_string::Value& keyString, const Ordering& ordering, BufBuilder* valueBufferBuilder, std::vector* accessors, @@ -944,8 +1012,8 @@ void readKeyStringValueIntoAccessors(const KeyString::Value& keyString, invariant(!indexKeysToInclude || indexKeysToInclude->count() == accessors->size()); BufReader reader(keyString.getBuffer(), keyString.getSize()); - KeyString::TypeBits typeBits(keyString.getTypeBits()); - KeyString::TypeBits::Reader typeBitsReader(typeBits); + key_string::TypeBits typeBits(keyString.getTypeBits()); + key_string::TypeBits::Reader typeBitsReader(typeBits); bool keepReading = true; size_t componentIndex = 0; @@ -957,7 +1025,7 @@ void readKeyStringValueIntoAccessors(const KeyString::Value& keyString, ? (ordering.get(componentIndex) == -1) : false; - keepReading = KeyString::readSBEValue( + keepReading = key_string::readSBEValue( &reader, &typeBitsReader, inverted, typeBits.version, &valBuilder); invariant(componentIndex < Ordering::kMaxCompoundIndexKeys || !keepReading); @@ -965,7 +1033,7 @@ void readKeyStringValueIntoAccessors(const KeyString::Value& keyString, // If 'indexKeysToInclude' indicates that this index key component is not part of the // projection, remove it from the list of values that will be fed to the 'accessors' // list. Note that, even when we are excluding a key component, we can't skip the call - // to 'KeyString::readSBEValue()' because it is needed to advance the 'reader' and + // to 'key_string::readSBEValue()' because it is needed to advance the 'reader' and // 'typeBitsReader' stream. if (indexKeysToInclude && (componentIndex < Ordering::kMaxCompoundIndexKeys) && !(*indexKeysToInclude)[componentIndex]) { diff --git a/src/mongo/db/exec/sbe/values/value.h b/src/mongo/db/exec/sbe/values/value.h index 5107bbdc5e3e0..c80f40331c3d8 100644 --- a/src/mongo/db/exec/sbe/values/value.h +++ b/src/mongo/db/exec/sbe/values/value.h @@ -31,31 +31,51 @@ #include #include +#include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +// IWYU pragma: no_include "boost/predef/hardware/simd/x86.h" +// IWYU pragma: no_include "boost/predef/hardware/simd/x86/versions.h" +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +// IWYU pragma: no_include "emmintrin.h" +#include #include #include -#include +#include #include +#include +#include +#include #include #include +#include #include #include #include "mongo/base/data_type_endian.h" #include "mongo/base/data_view.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/ordering.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/exec/shard_filterer.h" #include "mongo/db/fts/fts_matcher.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/query/bson_typemask.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/datetime/date_time_support.h" #include "mongo/db/query/index_bounds.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_string.h" #include "mongo/platform/bits.h" +#include "mongo/platform/compiler.h" #include "mongo/platform/decimal128.h" #include "mongo/platform/endian.h" #include "mongo/util/assert_util.h" #include "mongo/util/pcre.h" #include "mongo/util/represent_as.h" +#include "mongo/util/shared_buffer.h" +#include "mongo/util/str.h" namespace mongo { /** @@ -68,7 +88,6 @@ class Value; } class TimeZoneDatabase; - class TimeZone; class JsFunction; @@ -169,7 +188,7 @@ enum class TypeTags : uint8_t { // Local lambda value LocalLambda, - // KeyString::Value + // key_string::Value ksValue, // Pointer to a compiled PCRE regular expression object. @@ -316,6 +335,7 @@ std::ostream& operator<<(std::ostream& os, TypeTags tag); str::stream& operator<<(str::stream& str, TypeTags tag); std::ostream& operator<<(std::ostream& os, const std::pair& value); str::stream& operator<<(str::stream& str, const std::pair& value); +std::string print(const std::pair& value); /** * Three ways value comparison (aka spaceship operator). @@ -845,6 +865,13 @@ class Array { } } + void pop_back() { + if (_vals.size() > 0) { + releaseValue(_vals.back().first, _vals.back().second); + _vals.pop_back(); + } + } + auto size() const noexcept { return _vals.size(); } @@ -857,6 +884,17 @@ class Array { return _vals[idx]; } + std::pair swapAt(std::size_t idx, TypeTags tag, Value val) { + if (idx >= _vals.size() || tag == TypeTags::Nothing) { + return {TypeTags::Nothing, 0}; + } + + auto ret = _vals[idx]; + _vals[idx].first = tag; + _vals[idx].second = val; + return ret; + } + auto& values() const noexcept { return _vals; } @@ -950,7 +988,7 @@ class ArraySet { ValueSetType _values; }; -/* +/** * A vector of values representing a sort key. The values are NOT owned by this object. */ struct SortKeyComponentVector { @@ -1254,8 +1292,8 @@ inline std::pair makeCopyDecimal(const Decimal128& inD) { return {TypeTags::NumberDecimal, reinterpret_cast(valueBuffer)}; } -inline KeyString::Value* getKeyStringView(Value val) noexcept { - return reinterpret_cast(val); +inline key_string::Value* getKeyStringView(Value val) noexcept { + return reinterpret_cast(val); } std::pair makeNewPcreRegex(StringData pattern, StringData options); @@ -1420,7 +1458,7 @@ inline std::pair makeCopyBsonCodeWScope(const BsonCodeWScope& c return makeNewBsonCodeWScope(cws.code, cws.scope); } -std::pair makeCopyKeyString(const KeyString::Value& inKey); +std::pair makeCopyKeyString(const key_string::Value& inKey); std::pair makeCopyJsFunction(const JsFunction&); diff --git a/src/mongo/db/exec/sbe/values/value_builder.h b/src/mongo/db/exec/sbe/values/value_builder.h index 184e9ce6d89cc..474297abbb4a8 100644 --- a/src/mongo/db/exec/sbe/values/value_builder.h +++ b/src/mongo/db/exec/sbe/values/value_builder.h @@ -57,7 +57,7 @@ namespace mongo::sbe::value { * The 'valueBufferBuilder' is _not_ owned by the ValueBuilder class, so that the caller can reuse * it without freeing and then reallocating its memory. * - * NB: The ValueBuilder is specifically intended to adapt KeyString::Value conversion, which + * NB: The ValueBuilder is specifically intended to adapt key_string::Value conversion, which * operates by appending results to a BSONObjBuilder, to instead convert to SBE values. It is not * intended as a general-purpose tool for populating SBE accessors, and no new code should construct * or use a ValueBuilder. @@ -191,8 +191,11 @@ class ValueBuilder { virtual size_t numValues() const = 0; protected: + // We expect most rows to end up containing this many values or fewer. + static constexpr int kInlinedVectorSize = 16; + std::pair getValue(size_t index, int bufferLen) { - invariant(index < _numValues); + invariant(index < _tagList.size()); auto tag = _tagList[index]; auto val = _valList[index]; @@ -224,9 +227,8 @@ class ValueBuilder { } void appendValue(TypeTags tag, Value val) noexcept { - _tagList[_numValues] = tag; - _valList[_numValues] = val; - ++_numValues; + _tagList.push_back(tag); + _valList.push_back(val); } void appendValue(std::pair in) noexcept { @@ -241,14 +243,12 @@ class ValueBuilder { // storing a pointer, we store an _offset_ into the under-construction buffer. Translation from // offset to pointer occurs as part of the 'readValues()' function. void appendValueBufferOffset(TypeTags tag) { - _tagList[_numValues] = tag; - _valList[_numValues] = value::bitcastFrom(_valueBufferBuilder->len()); - ++_numValues; + _tagList.push_back(tag); + _valList.push_back(value::bitcastFrom(_valueBufferBuilder->len())); } - std::array _tagList; - std::array _valList; - size_t _numValues = 0; + absl::InlinedVector _tagList; + absl::InlinedVector _valList; BufBuilder* _valueBufferBuilder; }; @@ -270,11 +270,12 @@ class OwnedValueAccessorValueBuilder : public ValueBuilder { // buffer, this value will remain in that buffer, even though we've removed it from the // list. It will still get deallocated along with everything else when that buffer gets // cleared or deleted, though, so there is no leak. - --_numValues; + _tagList.pop_back(); + _valList.pop_back(); } size_t numValues() const override { - return _numValues; + return _tagList.size(); } /** @@ -284,7 +285,7 @@ class OwnedValueAccessorValueBuilder : public ValueBuilder { */ void readValues(std::vector* accessors) { auto bufferLen = _valueBufferBuilder->len(); - for (size_t i = 0; i < _numValues; ++i) { + for (size_t i = 0; i < _tagList.size(); ++i) { auto [tag, val] = getValue(i, bufferLen); invariant(i < accessors->size()); (*accessors)[i].reset(false, tag, val); @@ -304,7 +305,7 @@ class RowValueBuilder : public ValueBuilder { size_t numValues() const override { size_t nVals = 0; size_t bufIdx = 0; - while (bufIdx < _numValues) { + while (bufIdx < _tagList.size()) { auto tag = _tagList[bufIdx]; auto val = _valList[bufIdx]; if (tag == TypeTags::Boolean && !bitcastTo(val)) { @@ -353,9 +354,9 @@ class RowValueBuilder : public ValueBuilder { } case TypeTags::ksValue: { // Read the KeyString size after the 'sbeTag' byte. This gets written to the - // buffer in 'KeyString::Value::serialize'. + // buffer in 'key_string::Value::serialize'. auto ks = - KeyString::Value::deserialize(buf, KeyString::Version::kLatestVersion); + key_string::Value::deserialize(buf, key_string::Version::kLatestVersion); auto [ksTag, ksVal] = makeCopyKeyString(ks); return {true, ksTag, ksVal}; } diff --git a/src/mongo/db/exec/sbe/values/value_printer.cpp b/src/mongo/db/exec/sbe/values/value_printer.cpp index a1ff2e5d99b5b..1babbbd4e7f7c 100644 --- a/src/mongo/db/exec/sbe/values/value_printer.cpp +++ b/src/mongo/db/exec/sbe/values/value_printer.cpp @@ -27,12 +27,32 @@ * it in the license file. */ #include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/exec/sbe/makeobj_spec.h" #include "mongo/db/exec/sbe/values/sort_spec.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/values/value_printer.h" -#include "mongo/platform/basic.h" +#include "mongo/db/fts/fts_matcher.h" +#include "mongo/db/fts/fts_query_impl.h" +#include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/hex.h" +#include "mongo/util/pcre.h" #include "mongo/util/pcre_util.h" namespace mongo::sbe::value { diff --git a/src/mongo/db/exec/sbe/values/value_printer.h b/src/mongo/db/exec/sbe/values/value_printer.h index 42b5f99ab8639..1a0bd5463c403 100644 --- a/src/mongo/db/exec/sbe/values/value_printer.h +++ b/src/mongo/db/exec/sbe/values/value_printer.h @@ -29,10 +29,14 @@ #pragma once +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/util/print_options.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/platform/basic.h" #include "mongo/util/str.h" diff --git a/src/mongo/db/exec/sbe/values/value_serialization_test.cpp b/src/mongo/db/exec/sbe/values/value_serialization_test.cpp index 47cdb3fef89b9..9628c31e789d4 100644 --- a/src/mongo/db/exec/sbe/values/value_serialization_test.cpp +++ b/src/mongo/db/exec/sbe/values/value_serialization_test.cpp @@ -27,12 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/values/bson.h" -#include "mongo/db/exec/sbe/values/slot.h" +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/exec/sbe/values/row.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/bufreader.h" +#include "mongo/util/shared_buffer.h" namespace mongo::sbe { /** @@ -142,7 +161,7 @@ TEST(ValueSerializeForSorter, Serialize) { value::bitcastFrom(bson["binDataDeprecated"].value())); testData->push_back(bsonBinDataDeprecatedTag, bsonBinDataDeprecatedVal); - KeyString::Builder keyStringBuilder(KeyString::Version::V1); + key_string::Builder keyStringBuilder(key_string::Version::V1); keyStringBuilder.appendNumberLong(1); keyStringBuilder.appendNumberLong(2); keyStringBuilder.appendNumberLong(3); @@ -209,7 +228,7 @@ class ValueSerializeForKeyString : public mongo::unittest::Test { sourceRow.reset(idx++, false, tag, val); } - KeyString::Builder kb{KeyString::Version::kLatestVersion}; + key_string::Builder kb{key_string::Version::kLatestVersion}; sourceRow.serializeIntoKeyString(kb); auto ks = kb.getValueCopy(); @@ -388,7 +407,7 @@ TEST_F(ValueSerializeForKeyString, BsonBinData) { } TEST_F(ValueSerializeForKeyString, KeyString) { - KeyString::Builder keyStringBuilder(KeyString::Version::V1); + key_string::Builder keyStringBuilder(key_string::Version::V1); keyStringBuilder.appendNumberLong(1); keyStringBuilder.appendNumberLong(2); keyStringBuilder.appendNumberLong(3); @@ -454,4 +473,17 @@ TEST_F(ValueSerializeForKeyString, BsonCodeWScope) { runTest({{cwsTag1, cwsVal1}, {cwsTag2, cwsVal2}, {cwsTag3, cwsVal3}}); } + +// Test that roundtripping through KeyString works for a wide row. KeyStrings used in indexes are +// typically constrained in the number of components they can have, since we limit compound indexes +// to at most 32 components. But roundtripping rows wider than 32 still needs to work. +// +// This test was originally designed to reproduce SERVER-76321. +TEST_F(ValueSerializeForKeyString, RoundtripWideRow) { + std::vector> row; + for (int32_t i = 0; i < 40; ++i) { + row.emplace_back(sbe::value::TypeTags::NumberInt32, sbe::value::bitcastFrom(i)); + } + runTest(row); +} } // namespace mongo::sbe diff --git a/src/mongo/db/exec/sbe/values/value_test.cpp b/src/mongo/db/exec/sbe/values/value_test.cpp index 215e198876836..2f5a4f6b1304b 100644 --- a/src/mongo/db/exec/sbe/values/value_test.cpp +++ b/src/mongo/db/exec/sbe/values/value_test.cpp @@ -27,11 +27,22 @@ * it in the license file. */ +#include +#include +#include + +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/sbe/values/sort_spec.h" #include "mongo/db/exec/sbe/values/util.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/query/sbe_stage_builder_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::sbe { @@ -383,4 +394,143 @@ TEST_F(SbeValueTest, ArraySetForEachMoveIsDestructive) { ASSERT_EQ(arr1.size(), 0); ASSERT_EQ(arr2.size(), 2); } + +template +std::pair createArray(Args... args) { + auto [arrayTag, arrayVal] = value::makeNewArray(); + auto array = value::getArrayView(arrayVal); + for (const auto& [tag, val] : {args...}) { + array->push_back(tag, val); + } + return {arrayTag, arrayVal}; +} + +TEST_F(SbeValueTest, SortSpecCompareSingleValueAsc) { + auto sortSpecBson = BSON("x" << 1); + value::SortSpec sortSpec(sortSpecBson); + + auto [tag1, val1] = std::make_pair(value::TypeTags::NumberInt32, 1); + auto [tag2, val2] = std::make_pair(value::TypeTags::NumberInt32, 2); + + auto [cmpTag, cmpVal] = sortSpec.compare(tag1, val1, tag2, val2); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), -1); + + std::tie(cmpTag, cmpVal) = sortSpec.compare(tag2, val2, tag1, val1); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), 1); + + std::tie(cmpTag, cmpVal) = sortSpec.compare(tag1, val1, tag1, val1); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), 0); +} + +TEST_F(SbeValueTest, SortSpecCompareSingleValueDsc) { + auto sortSpecBson = BSON("x" << -1); + value::SortSpec sortSpec(sortSpecBson); + + auto [tag1, val1] = std::make_pair(value::TypeTags::NumberInt32, 1); + auto [tag2, val2] = std::make_pair(value::TypeTags::NumberInt32, 2); + + auto [cmpTag, cmpVal] = sortSpec.compare(tag1, val1, tag2, val2); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), 1); + + std::tie(cmpTag, cmpVal) = sortSpec.compare(tag2, val2, tag1, val1); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), -1); + + std::tie(cmpTag, cmpVal) = sortSpec.compare(tag1, val1, tag1, val1); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), 0); +} + +TEST_F(SbeValueTest, SortSpecCompareCollation) { + auto sortSpecBson = BSON("x" << 1); + value::SortSpec sortSpec(sortSpecBson); + + auto [tag1, val1] = value::makeBigString("12345678"); + value::ValueGuard guard1{tag1, val1}; + auto [tag2, val2] = value::makeBigString("87654321"); + value::ValueGuard guard2{tag2, val2}; + + auto collator = + std::make_unique(CollatorInterfaceMock::MockType::kReverseString); + + auto [cmpTag, cmpVal] = sortSpec.compare(tag1, val1, tag2, val2, collator.get()); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), 1); + + std::tie(cmpTag, cmpVal) = sortSpec.compare(tag2, val2, tag1, val1, collator.get()); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), -1); + + std::tie(cmpTag, cmpVal) = sortSpec.compare(tag1, val1, tag1, val1, collator.get()); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), 0); +} + +TEST_F(SbeValueTest, SortSpecCompareMultiValueMix) { + auto sortSpecBson = BSON("x" << 1 << "y" << -1); + value::SortSpec sortSpec(sortSpecBson); + + auto [tag11, val11] = + createArray(value::makeBigString("11111111"), value::makeBigString("11111111")); + value::ValueGuard guard11{tag11, val11}; + auto [tag12, val12] = + createArray(value::makeBigString("11111111"), value::makeBigString("22222222")); + value::ValueGuard guard12{tag12, val12}; + auto [tag21, val21] = + createArray(value::makeBigString("22222222"), value::makeBigString("11111111")); + value::ValueGuard guard21{tag21, val21}; + + auto [cmpTag, cmpVal] = sortSpec.compare(tag11, val11, tag21, val21); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), -1); + + std::tie(cmpTag, cmpVal) = sortSpec.compare(tag11, val11, tag12, val12); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), 1); + + std::tie(cmpTag, cmpVal) = sortSpec.compare(tag21, val21, tag11, val11); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), 1); + + std::tie(cmpTag, cmpVal) = sortSpec.compare(tag12, val12, tag11, val11); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), -1); + + std::tie(cmpTag, cmpVal) = sortSpec.compare(tag11, val11, tag11, val11); + ASSERT_EQ(cmpTag, value::TypeTags::NumberInt32); + ASSERT_EQ(value::bitcastTo(cmpVal), 0); +} + +TEST_F(SbeValueTest, SortSpecCompareInvalid) { + auto sortSpecBson = BSON("x" << 1 << "y" << -1); + value::SortSpec sortSpec(sortSpecBson); + + auto [tag1, val1] = + createArray(value::makeBigString("11111111"), value::makeBigString("11111111")); + value::ValueGuard guard1{tag1, val1}; + auto [tag2, val2] = createArray(value::makeBigString("11111111"), + value::makeBigString("11111111"), + value::makeBigString("11111111")); + value::ValueGuard guard2{tag2, val2}; + + auto [cmpTag, cmpVal] = sortSpec.compare(value::TypeTags::NumberInt32, 0, tag1, val1); + ASSERT_EQ(cmpTag, value::TypeTags::Nothing); + ASSERT_EQ(cmpVal, 0); + + std::tie(cmpTag, cmpVal) = sortSpec.compare(tag1, val1, value::TypeTags::NumberInt32, 0); + ASSERT_EQ(cmpTag, value::TypeTags::Nothing); + ASSERT_EQ(cmpVal, 0); + + std::tie(cmpTag, cmpVal) = sortSpec.compare(tag1, val1, tag2, val2); + ASSERT_EQ(cmpTag, value::TypeTags::Nothing); + ASSERT_EQ(cmpVal, 0); + + std::tie(cmpTag, cmpVal) = sortSpec.compare(tag2, val2, tag1, val1); + ASSERT_EQ(cmpTag, value::TypeTags::Nothing); + ASSERT_EQ(cmpVal, 0); +} } // namespace mongo::sbe diff --git a/src/mongo/db/exec/sbe/values/write_value_to_stream_test.cpp b/src/mongo/db/exec/sbe/values/write_value_to_stream_test.cpp index dc03009f3f26b..5104ad126301e 100644 --- a/src/mongo/db/exec/sbe/values/write_value_to_stream_test.cpp +++ b/src/mongo/db/exec/sbe/values/write_value_to_stream_test.cpp @@ -31,9 +31,24 @@ * This file contains tests for sbe::value::writeValueToStream. */ +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/db/exec/sbe/util/print_options.h" #include "mongo/db/exec/sbe/values/value.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/hex.h" constexpr char kStringShort[] = "this is a short string!"; diff --git a/src/mongo/db/exec/sbe/vm/arith.cpp b/src/mongo/db/exec/sbe/vm/arith.cpp index 2c3d4e55c6b56..ae938c88313e9 100644 --- a/src/mongo/db/exec/sbe/vm/arith.cpp +++ b/src/mongo/db/exec/sbe/vm/arith.cpp @@ -27,15 +27,30 @@ * it in the license file. */ -#include "mongo/db/exec/sbe/vm/vm.h" - +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/accumulator_sum_value_enum.h" #include "mongo/db/exec/sbe/values/arith_common.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/platform/decimal128.h" #include "mongo/platform/overflow_arithmetic.h" +#include "mongo/util/assert_util.h" #include "mongo/util/represent_as.h" +#include "mongo/util/str.h" #include "mongo/util/summation.h" -#include "mongo/util/time_support.h" namespace mongo { namespace sbe { @@ -770,35 +785,6 @@ FastTuple ByteCode::genericFloor(value::Typ return {false, value::TypeTags::Nothing, 0}; } -FastTuple ByteCode::genericTrunc(value::TypeTags operandTag, - value::Value operandValue) { - if (!isNumber(operandTag)) { - return {false, value::TypeTags::Nothing, 0}; - } - - switch (operandTag) { - case value::TypeTags::NumberDouble: { - auto truncatedValue = std::trunc(value::bitcastTo(operandValue)); - return { - false, value::TypeTags::NumberDouble, value::bitcastFrom(truncatedValue)}; - } - case value::TypeTags::NumberDecimal: { - auto value = value::bitcastTo(operandValue); - if (!value.isNaN() && value.isFinite()) { - value = value.quantize(Decimal128::kNormalizedZero, Decimal128::kRoundTowardZero); - } - auto [resultTag, resultValue] = value::makeCopyDecimal(value); - return {true, resultTag, resultValue}; - } - case value::TypeTags::NumberInt32: - case value::TypeTags::NumberInt64: - // Trunc on integer values is the identity function. - return {false, operandTag, operandValue}; - default: - MONGO_UNREACHABLE; - } -} - FastTuple ByteCode::genericExp(value::TypeTags operandTag, value::Value operandValue) { switch (operandTag) { diff --git a/src/mongo/db/exec/sbe/vm/datetime.cpp b/src/mongo/db/exec/sbe/vm/datetime.cpp index b4363ca0de492..602e84fbb4e29 100644 --- a/src/mongo/db/exec/sbe/vm/datetime.cpp +++ b/src/mongo/db/exec/sbe/vm/datetime.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/db/exec/sbe/vm/vm.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/sbe/vm/datetime.h" +#include "mongo/db/exec/sbe/vm/vm.h" #include "mongo/db/query/datetime/date_time_support.h" -#include "mongo/util/represent_as.h" +#include "mongo/util/assert_util.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/exec/sbe/vm/datetime.h b/src/mongo/db/exec/sbe/vm/datetime.h index a212a521b9857..ce37f48693755 100644 --- a/src/mongo/db/exec/sbe/vm/datetime.h +++ b/src/mongo/db/exec/sbe/vm/datetime.h @@ -32,6 +32,7 @@ #include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/util/time_support.h" namespace mongo { namespace sbe { diff --git a/src/mongo/db/exec/sbe/vm/vm.cpp b/src/mongo/db/exec/sbe/vm/vm.cpp index bbf03060e3abb..1b9991e3f5791 100644 --- a/src/mongo/db/exec/sbe/vm/vm.cpp +++ b/src/mongo/db/exec/sbe/vm/vm.cpp @@ -27,42 +27,81 @@ * it in the license file. */ -#include - -#include "mongo/config.h" -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sbe/expressions/expression.h" -#include "mongo/db/exec/sbe/vm/vm.h" -#include "mongo/db/exec/sbe/vm/vm_printer.h" - -#include - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/parse_number.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/oid.h" -#include "mongo/db/client.h" +#include "mongo/bson/ordering.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/exec/js_function.h" #include "mongo/db/exec/sbe/accumulator_sum_value_enum.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" #include "mongo/db/exec/sbe/makeobj_spec.h" #include "mongo/db/exec/sbe/values/arith_common.h" #include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/values/column_store_encoder.h" #include "mongo/db/exec/sbe/values/columnar.h" +#include "mongo/db/exec/sbe/values/row.h" #include "mongo/db/exec/sbe/values/sbe_pattern_value_cmp.h" #include "mongo/db/exec/sbe/values/sort_spec.h" #include "mongo/db/exec/sbe/values/util.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/vm/datetime.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/exec/sbe/vm/vm_printer.h" +#include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/fts/fts_matcher.h" #include "mongo/db/hasher.h" -#include "mongo/db/index/btree_key_generator.h" #include "mongo/db/query/collation/collation_index_key.h" #include "mongo/db/query/datetime/date_time_support.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/str_trim_utils.h" +#include "mongo/db/storage/column_store.h" #include "mongo/db/storage/key_string.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" -#include "mongo/util/fail_point.h" +#include "mongo/util/errno_util.h" +#include "mongo/util/indexed_string_vector.h" #include "mongo/util/pcre.h" +#include "mongo/util/shared_buffer.h" #include "mongo/util/str.h" +#include "mongo/util/string_map.h" #include "mongo/util/summation.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -77,6 +116,8 @@ namespace vm { int Instruction::stackOffset[Instruction::Tags::lastInstruction] = { 1, // pushConstVal 1, // pushAccessVal + 1, // pushOwnedAccessorVal + 1, // pushEnvAccessorVal 1, // pushMoveVal 1, // pushLocalVal 1, // pushMoveLocalVal @@ -480,8 +521,15 @@ void CodeFragment::appendConstVal(value::TypeTags tag, value::Value val) { void CodeFragment::appendAccessVal(value::SlotAccessor* accessor) { Instruction i; - i.tag = Instruction::pushAccessVal; + i.tag = [](value::SlotAccessor* accessor) { + if (accessor->is()) { + return Instruction::pushOwnedAccessorVal; + } else if (accessor->is()) { + return Instruction::pushEnvAccessorVal; + } + return Instruction::pushAccessVal; + }(accessor); auto offset = allocateSpace(sizeof(Instruction) + sizeof(accessor)); offset += writeToMemory(offset, i); @@ -2027,9 +2075,8 @@ FastTuple ByteCode::builtinNewArray(ArityTy if (arity) { arr->reserve(arity); for (ArityType idx = 0; idx < arity; ++idx) { - auto [owned, tag, val] = getFromStack(idx); - auto [tagCopy, valCopy] = value::copyValue(tag, val); - arr->push_back(tagCopy, valCopy); + auto [tag, val] = moveOwnedFromStack(idx); + arr->push_back(tag, val); } } @@ -2216,8 +2263,8 @@ FastTuple ByteCode::genericNewKeyString( return {false, value::TypeTags::Nothing, 0}; } - auto ksVersion = static_cast(version); - auto ksDiscriminator = static_cast(discriminator); + auto ksVersion = static_cast(version); + auto ksDiscriminator = static_cast(discriminator); uint32_t orderingBits = value::numericCast(tagOrdering, valOrdering); BSONObjBuilder bb; @@ -2225,7 +2272,7 @@ FastTuple ByteCode::genericNewKeyString( bb.append(""_sd, (orderingBits & 1) ? -1 : 1); } - KeyString::HeapBuilder kb{ksVersion, Ordering::make(bb.done())}; + key_string::HeapBuilder kb{ksVersion, Ordering::make(bb.done())}; const auto stringTransformFn = [&](StringData stringData) { return collator->getComparisonString(stringData); @@ -2386,7 +2433,7 @@ FastTuple ByteCode::genericNewKeyString( return {true, value::TypeTags::ksValue, - value::bitcastFrom(new KeyString::Value(kb.release()))}; + value::bitcastFrom(new key_string::Value(kb.release()))}; } FastTuple ByteCode::builtinNewKeyString(ArityType arity) { @@ -2434,11 +2481,7 @@ FastTuple ByteCode::builtinFloor(ArityType } FastTuple ByteCode::builtinTrunc(ArityType arity) { - invariant(arity == 1); - - auto [_, tagOperand, valOperand] = getFromStack(0); - - return genericTrunc(tagOperand, valOperand); + return genericRoundTrunc("$trunc", Decimal128::kRoundTowardZero, arity); } FastTuple ByteCode::builtinExp(ArityType arity) { @@ -3683,6 +3726,22 @@ FastTuple ByteCode::builtinBsonSize(ArityTy return {false, value::TypeTags::Nothing, 0}; } +FastTuple ByteCode::builtinStrLenBytes(ArityType arity) { + invariant(arity == 1); + + auto [_, operandTag, operandVal] = getFromStack(0); + + if (value::isString(operandTag)) { + auto str = value::getStringView(operandTag, operandVal); + auto strLenBytes = str.size(); + uassert(5155801, + "string length could not be represented as an int.", + strLenBytes <= std::numeric_limits::max()); + return {false, value::TypeTags::NumberInt32, strLenBytes}; + } + return {false, value::TypeTags::Nothing, 0}; +} + FastTuple ByteCode::builtinToUpper(ArityType arity) { auto [_, operandTag, operandVal] = getFromStack(0); @@ -3757,7 +3816,7 @@ FastTuple ByteCode::builtinCoerceToString(A case value::TypeTags::Date: { std::string str = str::stream() << TimeZoneDatabase::utcZone().formatDate( - kISOFormatString, + kIsoFormatStringZ, Date_t::fromMillisSinceEpoch(value::bitcastTo(operandVal))); auto [strTag, strVal] = value::makeNewString(str); return {true, strTag, strVal}; @@ -3877,7 +3936,8 @@ static int32_t convertNumericToInt32(const value::TypeTags tag, const value::Val } } -FastTuple ByteCode::builtinRound(ArityType arity) { +FastTuple ByteCode::genericRoundTrunc( + std::string funcName, Decimal128::RoundingMode roundingMode, ArityType arity) { invariant(arity == 1 || arity == 2); int32_t place = 0; const auto [numOwn, numTag, numVal] = getFromStack(0); @@ -3896,7 +3956,7 @@ FastTuple ByteCode::builtinRound(ArityType case value::TypeTags::NumberDecimal: { auto dec = value::bitcastTo(numVal); if (!dec.isInfinite()) { - dec = dec.quantize(quantum, Decimal128::kRoundTiesToEven); + dec = dec.quantize(quantum, roundingMode); } auto [resultTag, resultValue] = value::makeCopyDecimal(dec); return {true, resultTag, resultValue}; @@ -3904,7 +3964,7 @@ FastTuple ByteCode::builtinRound(ArityType case value::TypeTags::NumberDouble: { auto asDec = Decimal128(value::bitcastTo(numVal), Decimal128::kRoundTo34Digits); if (!asDec.isInfinite()) { - asDec = asDec.quantize(quantum, Decimal128::kRoundTiesToEven); + asDec = asDec.quantize(quantum, roundingMode); } return { false, value::TypeTags::NumberDouble, value::bitcastFrom(asDec.toDouble())}; @@ -3917,11 +3977,11 @@ FastTuple ByteCode::builtinRound(ArityType auto numericArgll = numTag == value::TypeTags::NumberInt32 ? static_cast(value::bitcastTo(numVal)) : value::bitcastTo(numVal); - auto out = Decimal128(numericArgll).quantize(quantum, Decimal128::kRoundTiesToEven); + auto out = Decimal128(numericArgll).quantize(quantum, roundingMode); uint32_t flags = 0; auto outll = out.toLong(&flags); uassert(5155302, - "Invalid conversion to long during $round.", + "Invalid conversion to long during " + funcName + ".", !Decimal128::hasFlag(flags, Decimal128::kInvalid)); if (numTag == value::TypeTags::NumberInt64 || outll > std::numeric_limits::max()) { @@ -3937,6 +3997,10 @@ FastTuple ByteCode::builtinRound(ArityType } } +FastTuple ByteCode::builtinRound(ArityType arity) { + return genericRoundTrunc("$round", Decimal128::kRoundTiesToEven, arity); +} + FastTuple ByteCode::builtinConcat(ArityType arity) { StringBuilder result; for (ArityType idx = 0; idx < arity; ++idx) { @@ -3973,6 +4037,26 @@ FastTuple ByteCode::builtinConcatArrays(Ari return {true, resTag, resVal}; } +FastTuple ByteCode::builtinTrim(ArityType arity, + bool trimLeft, + bool trimRight) { + auto [ownedChars, tagChars, valChars] = getFromStack(1); + auto [ownedInput, tagInput, valInput] = getFromStack(0); + + if (!value::isString(tagInput)) { + return {false, value::TypeTags::Nothing, 0}; + } + + auto replacementChars = !value::isNullish(tagChars) + ? str_trim_utils::extractCodePointsFromChars(value::getStringView(tagChars, valChars)) + : str_trim_utils::kDefaultTrimWhitespaceChars; + auto inputString = value::getStringView(tagInput, valInput); + + auto [strTag, strValue] = sbe::value::makeNewString( + str_trim_utils::doTrim(inputString, replacementChars, trimLeft, trimRight)); + return {true, strTag, strValue}; +} + FastTuple ByteCode::builtinAggConcatArraysCapped( ArityType arity) { auto [ownArr, tagArr, valArr] = getFromStack(0); @@ -5263,8 +5347,8 @@ FastTuple ByteCode::builtinGenerateSortKey( return {true, value::TypeTags::ksValue, - value::bitcastFrom( - new KeyString::Value(sortSpec->generateSortKey(bsonObj, collator)))}; + value::bitcastFrom( + new key_string::Value(sortSpec->generateSortKey(bsonObj, collator)))}; } FastTuple ByteCode::builtinSortKeyComponentVectorGetElement( @@ -5286,88 +5370,210 @@ FastTuple ByteCode::builtinSortKeyComponent return {false, outTag, outVal}; } -std::pair ByteCode::produceBsonObject(const MakeObjSpec* mos, +FastTuple ByteCode::builtinSortKeyComponentVectorToArray( + ArityType arity) { + invariant(arity == 1); + + auto [sortVecOwned, sortVecTag, sortVecVal] = getFromStack(0); + if (sortVecTag != value::TypeTags::sortKeyComponentVector) { + return {false, value::TypeTags::Nothing, 0}; + } + auto* sortVec = value::getSortKeyComponentVectorView(sortVecVal); + + if (sortVec->elts.size() == 1) { + auto [tag, val] = sortVec->elts[0]; + auto [copyTag, copyVal] = value::copyValue(tag, val); + return {true, copyTag, copyVal}; + } else { + auto [arrayTag, arrayVal] = value::makeNewArray(); + value::ValueGuard arrayGuard{arrayTag, arrayVal}; + auto array = value::getArrayView(arrayVal); + array->reserve(sortVec->elts.size()); + for (size_t i = 0; i < sortVec->elts.size(); ++i) { + auto [tag, val] = sortVec->elts[i]; + auto [copyTag, copyVal] = value::copyValue(tag, val); + array->push_back(copyTag, copyVal); + } + arrayGuard.reset(); + return {true, arrayTag, arrayVal}; + } +} + +std::pair ByteCode::produceBsonObject(const MakeObjSpec* spec, value::TypeTags rootTag, value::Value rootVal, - const size_t startIdx) { - auto& fieldBehavior = mos->fieldBehavior; - auto& fieldsAndProjects = mos->fieldsAndProjects; - auto numFields = mos->numFields; + int stackOffset) { + auto& fieldNames = spec->fieldNames; + + const bool isInclusion = spec->fieldBehavior == MakeObjSpec::FieldBehavior::keep; + const size_t numFields = fieldNames.size(); + const size_t numKeepOrDrops = spec->numKeepOrDrops; + const size_t numComputedFields = numFields - numKeepOrDrops; + + // The "visited" array keeps track of which computed fields have been visited so far so that + // later we can append the non-visited computed fields to the end of the object. + char* visited = nullptr; + char localVisitedArr[64]; + std::unique_ptr allocatedVisitedArr; + if (MONGO_unlikely(numComputedFields > 64)) { + allocatedVisitedArr = std::make_unique(numComputedFields); + visited = allocatedVisitedArr.get(); + } else { + visited = &localVisitedArr[0]; + } - const bool isInclusion = fieldBehavior == MakeObjSpec::FieldBehavior::keep; + memset(visited, 0, numComputedFields); UniqueBSONObjBuilder bob; - if (value::isObject(rootTag)) { - size_t nFieldsIfInclusion = numFields; + size_t numFieldsRemaining = numFields; + size_t numComputedFieldsRemaining = numComputedFields; + + const size_t numFieldsRemainingThreshold = isInclusion ? 1 : 0; + if (value::isObject(rootTag)) { if (rootTag == value::TypeTags::bsonObject) { - if (!(nFieldsIfInclusion == 0 && isInclusion)) { - auto be = value::bitcastTo(rootVal); - const auto end = be + ConstDataView(be).read>(); + auto be = value::bitcastTo(rootVal); + const auto end = be + ConstDataView(be).read>(); - // Skip document length. - be += 4; + // Skip document length. + be += 4; + + // Let N = the # of "computed" fields, and let K = (isInclusion && N > 0 ? N-1 : N). + // + // When we have seen all of the "keepOrDrop" fields and when we have seen K of the + // "computed" fields, we can break out of this loop, ignore or copy the remaining + // fields from 'rootVal' (depending on whether 'isInclusion' is true or false), and + // then finally append the remaining computed field (if there is one) to the output + // object. + // + // (When isInclusion == true and a single "computed" field remains, it's okay to stop + // scanning 'rootVal' and append the remaining computed field to the end of the output + // object because it will have no observable effect on field order.) + if (numFieldsRemaining > numFieldsRemainingThreshold || + numFieldsRemaining != numComputedFieldsRemaining) { while (be != end - 1) { auto sv = bson::fieldNameAndLength(be); auto nextBe = bson::advance(be, sv.size()); + size_t pos = fieldNames.findPos(sv); - bool isProjectedOrRestricted; - size_t pos = fieldsAndProjects.findPos(sv); if (pos == IndexedStringVector::npos) { - isProjectedOrRestricted = isInclusion; - } else if (pos < numFields) { - isProjectedOrRestricted = !isInclusion; + if (!isInclusion) { + bob.append(BSONElement(be, sv.size() + 1, nextBe - be)); + } + } else if (pos < numKeepOrDrops) { + --numFieldsRemaining; + + if (isInclusion) { + bob.append(BSONElement(be, sv.size() + 1, nextBe - be)); + } } else { - isProjectedOrRestricted = true; - } + --numFieldsRemaining; + --numComputedFieldsRemaining; - if (!isProjectedOrRestricted) { - bob.append(BSONElement(be, sv.size() + 1, nextBe - be)); - --nFieldsIfInclusion; + auto projectIdx = pos - numKeepOrDrops; + visited[projectIdx] = 1; + + size_t argIdx = stackOffset + projectIdx; + auto [_, tag, val] = getFromStack(argIdx); + bson::appendValueToBsonObj(bob, fieldNames[pos], tag, val); } - if (nFieldsIfInclusion == 0 && isInclusion) { + be = nextBe; + + if (numFieldsRemaining <= numFieldsRemainingThreshold && + numFieldsRemaining == numComputedFieldsRemaining) { break; } + } + } + + // If isInclusion == false and 'be' has not reached the end of 'rootVal', copy over + // the remaining fields from 'rootVal' to the output object. + if (!isInclusion) { + while (be != end - 1) { + auto sv = bson::fieldNameAndLength(be); + auto nextBe = bson::advance(be, sv.size()); + bob.append(BSONElement(be, sv.size() + 1, nextBe - be)); be = nextBe; } } } else if (rootTag == value::TypeTags::Object) { - if (!(nFieldsIfInclusion == 0 && isInclusion)) { - auto objRoot = value::getObjectView(rootVal); - for (size_t idx = 0; idx < objRoot->size(); ++idx) { + auto objRoot = value::getObjectView(rootVal); + size_t idx = 0; + + // Let N = number of "computed" fields, and let K = (isInclusion && N > 0 ? N-1 : N). + // + // When we have seen all of the "keepOrDrop" fields and when we have seen K of the + // "computed" fields, we can break out of this loop, ignore or copy the remaining + // fields from 'rootVal' (depending on whether 'isInclusion' is true or false), and + // then finally append the remaining computed field (if there is one) to the output + // object. + // + // (When isInclusion == true and a single "computed" field remains, it's okay to stop + // scanning 'rootVal' and append the remaining computed field to the end of the output + // object because it will have no observable effect on field order.) + if (numFieldsRemaining > numFieldsRemainingThreshold || + numFieldsRemaining != numComputedFieldsRemaining) { + for (; idx < objRoot->size(); ++idx) { auto sv = StringData(objRoot->field(idx)); + size_t pos = fieldNames.findPos(sv); - bool isProjectedOrRestricted; - size_t pos = fieldsAndProjects.findPos(sv); if (pos == IndexedStringVector::npos) { - isProjectedOrRestricted = isInclusion; - } else if (pos < numFields) { - isProjectedOrRestricted = !isInclusion; + if (!isInclusion) { + auto [tag, val] = objRoot->getAt(idx); + bson::appendValueToBsonObj(bob, objRoot->field(idx), tag, val); + } + } else if (pos < numKeepOrDrops) { + --numFieldsRemaining; + + if (isInclusion) { + auto [tag, val] = objRoot->getAt(idx); + bson::appendValueToBsonObj(bob, objRoot->field(idx), tag, val); + } } else { - isProjectedOrRestricted = true; - } + --numFieldsRemaining; + --numComputedFieldsRemaining; - if (!isProjectedOrRestricted) { - auto [tag, val] = objRoot->getAt(idx); - bson::appendValueToBsonObj(bob, objRoot->field(idx), tag, val); - --nFieldsIfInclusion; + auto projectIdx = pos - numKeepOrDrops; + visited[projectIdx] = 1; + + size_t argIdx = stackOffset + projectIdx; + auto [_, tag, val] = getFromStack(argIdx); + bson::appendValueToBsonObj(bob, fieldNames[pos], tag, val); } - if (nFieldsIfInclusion == 0 && isInclusion) { + if (numFieldsRemaining <= numFieldsRemainingThreshold && + numFieldsRemaining == numComputedFieldsRemaining) { + ++idx; break; } } } + + // If isInclusion == false and 'be' has not reached the end of 'rootVal', copy over + // the remaining fields from 'rootVal' to the output object. + if (!isInclusion) { + for (; idx < objRoot->size(); ++idx) { + auto sv = StringData(objRoot->field(idx)); + auto [fieldTag, fieldVal] = objRoot->getAt(idx); + bson::appendValueToBsonObj(bob, sv, fieldTag, fieldVal); + } + } } } - for (size_t idx = numFields; idx < fieldsAndProjects.size(); ++idx) { - auto argIdx = startIdx + (idx - numFields); - auto [_, tag, val] = getFromStack(argIdx); - bson::appendValueToBsonObj(bob, fieldsAndProjects[idx], tag, val); + // Append the remaining computed fields (if any) to the output object. + if (numComputedFieldsRemaining > 0) { + for (size_t pos = numKeepOrDrops; pos < fieldNames.size(); ++pos) { + auto projectIdx = pos - numKeepOrDrops; + if (!visited[projectIdx]) { + size_t argIdx = stackOffset + projectIdx; + auto [_, tag, val] = getFromStack(argIdx); + bson::appendValueToBsonObj(bob, fieldNames[pos], tag, val); + } + } } bob.doneFast(); @@ -5389,9 +5595,9 @@ FastTuple ByteCode::builtinMakeBsonObj(Arit auto mos = value::getMakeObjSpecView(mosVal); - const size_t startIdx = 2; + const int stackOffset = 2; - auto [tag, val] = produceBsonObject(mos, objTag, objVal, startIdx); + auto [tag, val] = produceBsonObject(mos, objTag, objVal, stackOffset); return {true, tag, val}; } @@ -5756,9 +5962,6 @@ FastTuple ByteCode::builtinObjectToArray(Ar objectEnumerator.advance(); } - if (objOwned) { - value::releaseValue(objTag, objVal); - } arrGuard.reset(); return {true, arrTag, arrVal}; } @@ -5781,9 +5984,6 @@ FastTuple ByteCode::builtinArrayToObject(Ar // return empty object for empty array if (arrayEnumerator.atEnd()) { - if (arrOwned) { - value::releaseValue(arrTag, arrVal); - } objGuard.reset(); return {true, objTag, objVal}; } @@ -5843,8 +6043,6 @@ FastTuple ByteCode::builtinArrayToObject(Ar auto [valueCopyTag, valueCopyVal] = value::copyValue(valueTag, valueVal); if (keyMap.contains(keyStringData)) { auto idx = keyMap[keyStringData]; - auto oldVal = object->getAt(idx); - value::ValueGuard guard{oldVal}; object->setAt(idx, valueCopyTag, valueCopyVal); } else { keyMap[keyStringData] = object->size(); @@ -5900,8 +6098,6 @@ FastTuple ByteCode::builtinArrayToObject(Ar auto [valueCopyTag, valueCopyVal] = value::copyValue(valueTag, valueVal); if (keyMap.contains(keyStringData)) { auto idx = keyMap[keyStringData]; - auto oldVal = object->getAt(idx); - value::ValueGuard guard{oldVal}; object->setAt(idx, valueCopyTag, valueCopyVal); } else { keyMap[keyStringData] = object->size(); @@ -5910,13 +6106,747 @@ FastTuple ByteCode::builtinArrayToObject(Ar } arrayEnumerator.advance(); } - if (arrOwned) { - value::releaseValue(arrTag, arrVal); - } objGuard.reset(); return {true, objTag, objVal}; } +std::tuple multiAccState( + value::TypeTags stateTag, value::Value stateVal) { + uassert( + 7548600, "The accumulator state should be an array", stateTag == value::TypeTags::Array); + auto state = value::getArrayView(stateVal); + + uassert(7548601, + "The accumulator state should have correct number of elements", + state->size() == static_cast(AggMultiElems::kSizeOfArray)); + + auto [arrayTag, arrayVal] = state->getAt(static_cast(AggMultiElems::kInternalArr)); + uassert(7548602, + "Internal array component is not of correct type", + arrayTag == value::TypeTags::Array); + auto array = value::getArrayView(arrayVal); + + auto [startIndexTag, startIndexVal] = + state->getAt(static_cast(AggMultiElems::kStartIdx)); + uassert(7548700, + "Index component be a 64-bit integer", + startIndexTag == value::TypeTags::NumberInt64); + + auto [maxSizeTag, maxSize] = state->getAt(static_cast(AggMultiElems::kMaxSize)); + uassert(7548603, + "MaxSize component should be a 64-bit integer", + maxSizeTag == value::TypeTags::NumberInt64); + + auto [memUsageTag, memUsage] = state->getAt(static_cast(AggMultiElems::kMemUsage)); + uassert(7548612, + "MemUsage component should be a 32-bit integer", + memUsageTag == value::TypeTags::NumberInt32); + + auto [memLimitTag, memLimit] = state->getAt(static_cast(AggMultiElems::kMemLimit)); + uassert(7548613, + "MemLimit component should be a 32-bit integer", + memLimitTag == value::TypeTags::NumberInt32); + + return {state, array, startIndexVal, maxSize, memUsage, memLimit}; +} + +FastTuple ByteCode::builtinAggFirstNNeedsMoreInput( + ArityType arity) { + auto [stateOwned, stateTag, stateVal] = getFromStack(0); + uassert(7695200, "Unexpected accumulator state ownership", !stateOwned); + + auto state = value::getArrayView(stateVal); + uassert( + 7695201, "The accumulator state should be an array", stateTag == value::TypeTags::Array); + + auto [arrayTag, arrayVal] = state->getAt(static_cast(AggMultiElems::kInternalArr)); + uassert(7695202, + "Internal array component is not of correct type", + arrayTag == value::TypeTags::Array); + auto array = value::getArrayView(arrayVal); + + auto [maxSizeTag, maxSize] = state->getAt(static_cast(AggMultiElems::kMaxSize)); + uassert(7695203, + "MaxSize component should be a 64-bit integer", + maxSizeTag == value::TypeTags::NumberInt64); + + bool needMoreInput = (array->size() < maxSize); + return {false, value::TypeTags::Boolean, value::bitcastFrom(needMoreInput)}; +} + +int32_t updateAndCheckMemUsage(value::Array* state, + int32_t memUsage, + int32_t memAdded, + int32_t memLimit) { + memUsage += memAdded; + uassert(ErrorCodes::ExceededMemoryLimit, + str::stream() + << "Accumulator used too much memory and spilling to disk cannot reduce memory " + "consumption any further. Memory limit: " + << memLimit << " bytes", + memUsage < memLimit); + state->setAt( + static_cast(AggMultiElems::kMemUsage), value::TypeTags::NumberInt32, memUsage); + return memUsage; +} + +size_t updateStartIdx(value::Array* state, size_t startIdx, size_t arrSize) { + startIdx = (startIdx + 1) % arrSize; + state->setAt( + static_cast(AggMultiElems::kStartIdx), value::TypeTags::NumberInt64, startIdx); + return startIdx; +} + +int32_t aggFirstN(value::Array* state, + value::Array* array, + size_t maxSize, + int32_t memUsage, + int32_t memLimit, + value::TypeTags fieldTag, + value::Value fieldVal) { + value::ValueGuard fieldGuard{fieldTag, fieldVal}; + if (array->size() < maxSize) { + memUsage = updateAndCheckMemUsage( + state, memUsage, value::getApproximateSize(fieldTag, fieldVal), memLimit); + + // add to array + fieldGuard.reset(); + array->push_back(fieldTag, fieldVal); + } + return memUsage; +} + +FastTuple ByteCode::builtinAggFirstN(ArityType arity) { + auto [stateTag, stateVal] = moveOwnedFromStack(0); + value::ValueGuard stateGuard{stateTag, stateVal}; + + auto [state, array, startIdx, maxSize, memUsage, memLimit] = multiAccState(stateTag, stateVal); + + auto [fieldTag, fieldVal] = moveOwnedFromStack(1); + aggFirstN(state, array, maxSize, memUsage, memLimit, fieldTag, fieldVal); + + stateGuard.reset(); + return {true, stateTag, stateVal}; +} + +FastTuple ByteCode::builtinAggFirstNMerge(ArityType arity) { + auto [mergeStateTag, mergeStateVal] = moveOwnedFromStack(0); + value::ValueGuard mergeStateGuard{mergeStateTag, mergeStateVal}; + + auto [stateTag, stateVal] = moveOwnedFromStack(1); + value::ValueGuard stateGuard{stateTag, stateVal}; + + auto [mergeState, mergeArray, mergeStartIdx, mergeMaxSize, mergeMemUsage, mergeMemLimit] = + multiAccState(mergeStateTag, mergeStateVal); + auto [state, array, accStartIdx, accMaxSize, accMemUsage, accMemLimit] = + multiAccState(stateTag, stateVal); + uassert(7548604, + "Two arrays to merge should have the same MaxSize component", + accMaxSize == mergeMaxSize); + + for (size_t i = 0; i < array->size(); ++i) { + if (mergeArray->size() == mergeMaxSize) { + break; + } + + auto [tag, val] = array->swapAt(i, value::TypeTags::Null, 0); + mergeMemUsage = + aggFirstN(mergeState, mergeArray, mergeMaxSize, mergeMemUsage, mergeMemLimit, tag, val); + } + + mergeStateGuard.reset(); + return {true, mergeStateTag, mergeStateVal}; +} + +FastTuple ByteCode::builtinAggFirstNFinalize(ArityType arity) { + auto [stateTag, stateVal] = moveOwnedFromStack(0); + value::ValueGuard guard{stateTag, stateVal}; + + uassert(7548605, "expected an array", stateTag == value::TypeTags::Array); + auto state = value::getArrayView(stateVal); + + auto [outputTag, outputVal] = + state->swapAt(static_cast(AggMultiElems::kInternalArr), value::TypeTags::Null, 0); + return {true, outputTag, outputVal}; +} + +std::pair aggLastN(value::Array* state, + value::Array* array, + size_t startIdx, + size_t maxSize, + int32_t memUsage, + int32_t memLimit, + value::TypeTags fieldTag, + value::Value fieldVal) { + value::ValueGuard guard{fieldTag, fieldVal}; + if (array->size() < maxSize) { + invariant(startIdx == 0); + guard.reset(); + array->push_back(fieldTag, fieldVal); + } else { + invariant(array->size() == maxSize); + guard.reset(); + auto [oldFieldTag, oldFieldVal] = array->swapAt(startIdx, fieldTag, fieldVal); + memUsage -= value::getApproximateSize(oldFieldTag, oldFieldVal); + value::releaseValue(oldFieldTag, oldFieldVal); + startIdx = updateStartIdx(state, startIdx, maxSize); + } + memUsage = updateAndCheckMemUsage( + state, memUsage, value::getApproximateSize(fieldTag, fieldVal), memLimit); + return {startIdx, memUsage}; +} + +FastTuple ByteCode::builtinAggLastN(ArityType arity) { + auto [stateTag, stateVal] = moveOwnedFromStack(0); + value::ValueGuard stateGuard{stateTag, stateVal}; + + auto [state, array, startIdx, maxSize, memUsage, memLimit] = multiAccState(stateTag, stateVal); + + auto [fieldTag, fieldVal] = moveOwnedFromStack(1); + aggLastN(state, array, startIdx, maxSize, memUsage, memLimit, fieldTag, fieldVal); + + stateGuard.reset(); + return {true, stateTag, stateVal}; +} + +FastTuple ByteCode::builtinAggLastNMerge(ArityType arity) { + auto [mergeStateTag, mergeStateVal] = moveOwnedFromStack(0); + value::ValueGuard mergeStateGuard{mergeStateTag, mergeStateVal}; + + auto [stateTag, stateVal] = moveOwnedFromStack(1); + value::ValueGuard stateGuard{stateTag, stateVal}; + + auto [mergeState, mergeArray, mergeStartIdx, mergeMaxSize, mergeMemUsage, mergeMemLimit] = + multiAccState(mergeStateTag, mergeStateVal); + auto [state, array, startIdx, maxSize, memUsage, memLimit] = multiAccState(stateTag, stateVal); + uassert(7548703, + "Two arrays to merge should have the same MaxSize component", + maxSize == mergeMaxSize); + + if (array->size() < maxSize) { + // add values from accArr to mergeArray + for (size_t i = 0; i < array->size(); ++i) { + auto [tag, val] = array->swapAt(i, value::TypeTags::Null, 0); + std::tie(mergeStartIdx, mergeMemUsage) = aggLastN(mergeState, + mergeArray, + mergeStartIdx, + mergeMaxSize, + mergeMemUsage, + mergeMemLimit, + tag, + val); + } + mergeStateGuard.reset(); + return {true, mergeStateTag, mergeStateVal}; + } else { + // return accArray since it contains last n values + invariant(array->size() == maxSize); + stateGuard.reset(); + return {true, stateTag, stateVal}; + } +} + +FastTuple ByteCode::builtinAggLastNFinalize(ArityType arity) { + auto [stateTag, stateVal] = moveOwnedFromStack(0); + value::ValueGuard guard{stateTag, stateVal}; + + auto [state, arr, startIdx, maxSize, memUsage, memLimit] = multiAccState(stateTag, stateVal); + if (startIdx == 0) { + auto [outputTag, outputVal] = state->swapAt(0, value::TypeTags::Null, 0); + return {true, outputTag, outputVal}; + } + + invariant(arr->size() == maxSize); + auto [outArrayTag, outArrayVal] = value::makeNewArray(); + auto outArray = value::getArrayView(outArrayVal); + outArray->reserve(maxSize); + for (size_t i = 0; i < maxSize; ++i) { + auto srcIdx = (i + startIdx) % maxSize; + auto [elemTag, elemVal] = arr->swapAt(srcIdx, value::TypeTags::Null, 0); + outArray->push_back(elemTag, elemVal); + } + return {true, outArrayTag, outArrayVal}; +} + +template +int32_t aggTopBottomNAdd(value::Array* state, + value::Array* array, + size_t maxSize, + int32_t memUsage, + int32_t memLimit, + const value::SortSpec* sortSpec, + std::pair key, + std::pair output) { + auto memAdded = [](std::pair key, + std::pair output) { + return value::getApproximateSize(key.first, key.second) + + value::getApproximateSize(output.first, output.second); + }; + + value::ValueGuard keyGuard{key.first, key.second}; + value::ValueGuard outputGuard{output.first, output.second}; + auto less = Less(sortSpec); + auto keyLess = PairKeyComp(less); + auto& heap = array->values(); + + if (array->size() < maxSize) { + auto [pairTag, pairVal] = value::makeNewArray(); + value::ValueGuard pairGuard{pairTag, pairVal}; + auto pair = value::getArrayView(pairVal); + pair->reserve(2); + keyGuard.reset(); + pair->push_back(key.first, key.second); + outputGuard.reset(); + pair->push_back(output.first, output.second); + + memUsage = updateAndCheckMemUsage(state, memUsage, memAdded(key, output), memLimit); + + pairGuard.reset(); + array->push_back(pairTag, pairVal); + std::push_heap(heap.begin(), heap.end(), keyLess); + } else { + tassert(5807005, + "Heap should contain same number of elements as MaxSize", + array->size() == maxSize); + + auto [worstTag, worstVal] = heap.front(); + auto worst = value::getArrayView(worstVal); + auto worstKey = worst->getAt(0); + if (less(key, worstKey)) { + memUsage = updateAndCheckMemUsage(state, + memUsage, + -memAdded(worst->getAt(0), worst->getAt(1)) + + memAdded(key, output), + memLimit); + + std::pop_heap(heap.begin(), heap.end(), keyLess); + keyGuard.reset(); + worst->setAt(0, key.first, key.second); + outputGuard.reset(); + worst->setAt(1, output.first, output.second); + std::push_heap(heap.begin(), heap.end(), keyLess); + } + } + + return memUsage; +} + +template +FastTuple ByteCode::builtinAggTopBottomN(ArityType arity) { + auto [sortSpecOwned, sortSpecTag, sortSpecVal] = getFromStack(3); + tassert(5807024, "Argument must be of sortSpec type", sortSpecTag == value::TypeTags::sortSpec); + auto sortSpec = value::getSortSpecView(sortSpecVal); + + auto [stateTag, stateVal] = moveOwnedFromStack(0); + value::ValueGuard stateGuard{stateTag, stateVal}; + auto [state, array, startIdx, maxSize, memUsage, memLimit] = multiAccState(stateTag, stateVal); + auto key = moveOwnedFromStack(1); + auto output = moveOwnedFromStack(2); + + aggTopBottomNAdd(state, array, maxSize, memUsage, memLimit, sortSpec, key, output); + + stateGuard.reset(); + return {true, stateTag, stateVal}; +} + +template +FastTuple ByteCode::builtinAggTopBottomNMerge( + ArityType arity) { + auto [sortSpecOwned, sortSpecTag, sortSpecVal] = getFromStack(2); + tassert(5807025, "Argument must be of sortSpec type", sortSpecTag == value::TypeTags::sortSpec); + auto sortSpec = value::getSortSpecView(sortSpecVal); + + auto [stateTag, stateVal] = moveOwnedFromStack(1); + value::ValueGuard stateGuard{stateTag, stateVal}; + auto [mergeStateTag, mergeStateVal] = moveOwnedFromStack(0); + value::ValueGuard mergeStateGuard{mergeStateTag, mergeStateVal}; + auto [mergeState, mergeArray, mergeStartIx, mergeMaxSize, mergeMemUsage, mergeMemLimit] = + multiAccState(mergeStateTag, mergeStateVal); + auto [state, array, startIdx, maxSize, memUsage, memLimit] = multiAccState(stateTag, stateVal); + tassert(5807008, + "Two arrays to merge should have the same MaxSize component", + maxSize == mergeMaxSize); + + for (auto [pairTag, pairVal] : array->values()) { + auto pair = value::getArrayView(pairVal); + auto key = pair->swapAt(0, value::TypeTags::Null, 0); + auto output = pair->swapAt(1, value::TypeTags::Null, 0); + mergeMemUsage = aggTopBottomNAdd(mergeState, + mergeArray, + mergeMaxSize, + mergeMemUsage, + mergeMemLimit, + sortSpec, + key, + output); + } + + mergeStateGuard.reset(); + return {true, mergeStateTag, mergeStateVal}; +} + +FastTuple ByteCode::builtinAggTopBottomNFinalize( + ArityType arity) { + auto [sortSpecOwned, sortSpecTag, sortSpecVal] = getFromStack(1); + tassert(5807026, "Argument must be of sortSpec type", sortSpecTag == value::TypeTags::sortSpec); + auto sortSpec = value::getSortSpecView(sortSpecVal); + + auto [stateTag, stateVal] = moveOwnedFromStack(0); + value::ValueGuard stateGuard{stateTag, stateVal}; + auto [state, array, startIdx, maxSize, memUsage, memLimit] = multiAccState(stateTag, stateVal); + + auto [outputArrayTag, outputArrayVal] = value::makeNewArray(); + value::ValueGuard outputArrayGuard{outputArrayTag, outputArrayVal}; + auto outputArray = value::getArrayView(outputArrayVal); + outputArray->reserve(array->size()); + + // We always output result in the order of sort pattern in according to MQL semantics. + auto less = SortPatternLess(sortSpec); + auto keyLess = PairKeyComp(less); + std::sort(array->values().begin(), array->values().end(), keyLess); + for (size_t i = 0; i < array->size(); ++i) { + auto pair = value::getArrayView(array->getAt(i).second); + auto [outputTag, outputVal] = pair->swapAt(1, value::TypeTags::Null, 0); + outputArray->push_back(outputTag, outputVal); + } + + outputArrayGuard.reset(); + return {true, outputArrayTag, outputArrayVal}; +} + +template +int32_t aggMinMaxN(value::Array* state, + value::Array* array, + size_t maxSize, + int32_t memUsage, + int32_t memLimit, + const CollatorInterface* collator, + value::TypeTags fieldTag, + value::Value fieldVal) { + value::ValueGuard guard{fieldTag, fieldVal}; + auto& heap = array->values(); + ValueCompare comp{collator}; + + if (array->size() < maxSize) { + memUsage = updateAndCheckMemUsage( + state, memUsage, value::getApproximateSize(fieldTag, fieldVal), memLimit); + guard.reset(); + + array->push_back(fieldTag, fieldVal); + std::push_heap(heap.begin(), heap.end(), comp); + } else { + uassert(7548800, + "Heap should contain same number of elements as MaxSize", + array->size() == maxSize); + + auto heapRoot = heap.front(); + if (comp({fieldTag, fieldVal}, heapRoot)) { + memUsage = + updateAndCheckMemUsage(state, + memUsage, + -value::getApproximateSize(heapRoot.first, heapRoot.second) + + value::getApproximateSize(fieldTag, fieldVal), + memLimit); + std::pop_heap(heap.begin(), heap.end(), comp); + guard.reset(); + array->setAt(maxSize - 1, fieldTag, fieldVal); + std::push_heap(heap.begin(), heap.end(), comp); + } + } + + return memUsage; +} + +template +FastTuple ByteCode::builtinAggMinMaxN(ArityType arity) { + invariant(arity == 2 || arity == 3); + + auto [stateTag, stateVal] = moveOwnedFromStack(0); + value::ValueGuard stateGuard{stateTag, stateVal}; + + auto [fieldTag, fieldVal] = moveOwnedFromStack(1); + value::ValueGuard fieldGuard{fieldTag, fieldVal}; + + if (value::isNullish(fieldTag)) { + stateGuard.reset(); + return {true, stateTag, stateVal}; + } + + auto [state, array, startIdx, maxSize, memUsage, memLimit] = multiAccState(stateTag, stateVal); + + CollatorInterface* collator = nullptr; + if (arity == 3) { + auto [collOwned, collTag, collVal] = getFromStack(2); + uassert(7548802, "expected a collator argument", collTag == value::TypeTags::collator); + collator = value::getCollatorView(collVal); + } + fieldGuard.reset(); + aggMinMaxN(state, array, maxSize, memUsage, memLimit, collator, fieldTag, fieldVal); + + stateGuard.reset(); + return {true, stateTag, stateVal}; +} + +template +FastTuple ByteCode::builtinAggMinMaxNMerge(ArityType arity) { + invariant(arity == 2 || arity == 3); + + auto [mergeStateTag, mergeStateVal] = moveOwnedFromStack(0); + value::ValueGuard mergeStateGuard{mergeStateTag, mergeStateVal}; + + auto [stateTag, stateVal] = moveOwnedFromStack(1); + value::ValueGuard stateGuard{stateTag, stateVal}; + + auto [mergeState, mergeArray, mergeStartIdx, mergeMaxSize, mergeMemUsage, mergeMemLimit] = + multiAccState(mergeStateTag, mergeStateVal); + auto [state, array, startIdx, maxSize, memUsage, memLimit] = multiAccState(stateTag, stateVal); + uassert(7548801, + "Two arrays to merge should have the same MaxSize component", + maxSize == mergeMaxSize); + + CollatorInterface* collator = nullptr; + if (arity == 3) { + auto [collOwned, collTag, collVal] = getFromStack(2); + uassert(7548803, "expected a collator argument", collTag == value::TypeTags::collator); + collator = value::getCollatorView(collVal); + } + + for (size_t i = 0; i < array->size(); ++i) { + auto [tag, val] = array->swapAt(i, value::TypeTags::Null, 0); + mergeMemUsage = aggMinMaxN( + mergeState, mergeArray, mergeMaxSize, mergeMemUsage, mergeMemLimit, collator, tag, val); + } + + mergeStateGuard.reset(); + return {true, mergeStateTag, mergeStateVal}; +} + +template +FastTuple ByteCode::builtinAggMinMaxNFinalize( + ArityType arity) { + invariant(arity == 2 || arity == 1); + auto [stateTag, stateVal] = moveOwnedFromStack(0); + value::ValueGuard stateGuard{stateTag, stateVal}; + + auto [state, array, startIdx, maxSize, memUsage, memLimit] = multiAccState(stateTag, stateVal); + + CollatorInterface* collator = nullptr; + if (arity == 2) { + auto [collOwned, collTag, collVal] = getFromStack(1); + uassert(7548804, "expected a collator argument", collTag == value::TypeTags::collator); + collator = value::getCollatorView(collVal); + } + + ValueCompare comp{collator}; + std::sort(array->values().begin(), array->values().end(), comp); + auto [arrayTag, arrayVal] = + state->swapAt(static_cast(AggMultiElems::kInternalArr), value::TypeTags::Null, 0); + return {true, arrayTag, arrayVal}; +} + +std::tuple, int64_t, int64_t> rankState( + value::TypeTags stateTag, value::Value stateVal) { + uassert( + 7795500, "The accumulator state should be an array", stateTag == value::TypeTags::Array); + auto state = value::getArrayView(stateVal); + + uassert(7795501, + "The accumulator state should have correct number of elements", + state->size() == AggRankElems::kRankArraySize); + + auto lastValue = state->getAt(AggRankElems::kLastValue); + auto [lastRankTag, lastRankVal] = state->getAt(AggRankElems::kLastRank); + auto [sameRankCountTag, sameRankCountVal] = state->getAt(AggRankElems::kSameRankCount); + + uassert(7795502, + "Last rank component should be a 64-bit integer", + lastRankTag == value::TypeTags::NumberInt64); + auto lastRank = value::bitcastTo(lastRankVal); + + uassert(7795503, + "Same rank component should be a 64-bit integer", + sameRankCountTag == value::TypeTags::NumberInt64); + auto sameRankCount = value::bitcastTo(sameRankCountVal); + return {state, lastValue, lastRank, sameRankCount}; +} + +FastTuple builtinAggRankImpl( + value::TypeTags stateTag, + value::Value stateVal, + bool valueOwned, + value::TypeTags valueTag, + value::Value valueVal, + bool dense, + CollatorInterface* collator = nullptr) { + // Initialize the accumulator. + if (stateTag == value::TypeTags::Nothing) { + auto [newStateTag, newStateVal] = value::makeNewArray(); + value::ValueGuard newStateGuard{newStateTag, newStateVal}; + auto newState = value::getArrayView(newStateVal); + newState->reserve(AggRankElems::kRankArraySize); + if (!valueOwned) { + std::tie(valueTag, valueVal) = value::copyValue(valueTag, valueVal); + } + newState->push_back(valueTag, valueVal); + newState->push_back(value::TypeTags::NumberInt64, 1); + newState->push_back(value::TypeTags::NumberInt64, 1); + newStateGuard.reset(); + return {true, newStateTag, newStateVal}; + } + + value::ValueGuard stateGuard{stateTag, stateVal}; + auto [state, lastValue, lastRank, sameRankCount] = rankState(stateTag, stateVal); + auto [compareTag, compareVal] = + value::compareValue(valueTag, valueVal, lastValue.first, lastValue.second, collator); + if (compareTag == value::TypeTags::NumberInt32 && compareVal == 0) { + state->setAt(AggRankElems::kSameRankCount, value::TypeTags::NumberInt64, sameRankCount + 1); + } else { + if (!valueOwned) { + std::tie(valueTag, valueVal) = value::copyValue(valueTag, valueVal); + } + state->setAt(AggRankElems::kLastValue, valueTag, valueVal); + state->setAt(AggRankElems::kLastRank, + value::TypeTags::NumberInt64, + dense ? lastRank + 1 : lastRank + sameRankCount); + state->setAt(AggRankElems::kSameRankCount, value::TypeTags::NumberInt64, 1); + } + stateGuard.reset(); + return {true, stateTag, stateVal}; +} + +FastTuple ByteCode::builtinAggRank(ArityType arity) { + invariant(arity == 2); + auto [valueOwned, valueTag, valueVal] = getFromStack(1); + auto [stateTag, stateVal] = moveOwnedFromStack(0); + return builtinAggRankImpl( + stateTag, stateVal, valueOwned, valueTag, valueVal, false /* dense */); +} + +FastTuple ByteCode::builtinAggRankColl(ArityType arity) { + invariant(arity == 3); + auto [collatorOwned, collatorTag, collatorVal] = getFromStack(2); + auto [valueOwned, valueTag, valueVal] = getFromStack(1); + auto [stateTag, stateVal] = moveOwnedFromStack(0); + + tassert(7795504, + "Incorrect value type passed to aggRankColl for collator.", + collatorTag == value::TypeTags::collator); + auto collator = value::getCollatorView(collatorVal); + + return builtinAggRankImpl( + stateTag, stateVal, valueOwned, valueTag, valueVal, false /* dense */, collator); +} + +FastTuple ByteCode::builtinAggDenseRank(ArityType arity) { + invariant(arity == 2); + auto [valueOwned, valueTag, valueVal] = getFromStack(1); + auto [stateTag, stateVal] = moveOwnedFromStack(0); + return builtinAggRankImpl(stateTag, stateVal, valueOwned, valueTag, valueVal, true /* dense */); +} + +FastTuple ByteCode::builtinAggDenseRankColl(ArityType arity) { + invariant(arity == 3); + auto [collatorOwned, collatorTag, collatorVal] = getFromStack(2); + auto [valueOwned, valueTag, valueVal] = getFromStack(1); + auto [stateTag, stateVal] = moveOwnedFromStack(0); + + tassert(7795505, + "Incorrect value type passed to aggDenseRankColl for collator.", + collatorTag == value::TypeTags::collator); + auto collator = value::getCollatorView(collatorVal); + + return builtinAggRankImpl( + stateTag, stateVal, valueOwned, valueTag, valueVal, true /* dense */, collator); +} + +FastTuple ByteCode::builtinAggRankFinalize(ArityType arity) { + invariant(arity == 1); + auto [stateOwned, stateTag, stateVal] = getFromStack(0); + auto [state, lastValue, lastRank, sameRankCount] = rankState(stateTag, stateVal); + return {true, value::TypeTags::NumberInt64, value::bitcastFrom(lastRank)}; +} + +FastTuple ByteCode::builtinAggExpMovingAvg(ArityType arity) { + auto [stateTag, stateVal] = moveOwnedFromStack(0); + value::ValueGuard stateGuard{stateTag, stateVal}; + + auto [fieldOwned, fieldTag, fieldVal] = getFromStack(1); + if (!value::isNumber(fieldTag)) { + stateGuard.reset(); + return {true, stateTag, stateVal}; + } + + uassert(7821200, "State should be of array type", stateTag == value::TypeTags::Array); + auto state = value::getArrayView(stateVal); + uassert(7821201, + "Unexpected state array size", + state->size() == static_cast(AggExpMovingAvgElems::kSizeOfArray)); + + auto [alphaTag, alphaVal] = state->getAt(static_cast(AggExpMovingAvgElems::kAlpha)); + uassert(7821202, "alpha is not of decimal type", alphaTag == value::TypeTags::NumberDecimal); + auto alpha = value::bitcastTo(alphaVal); + + value::TypeTags currentResultTag; + value::Value currentResultVal; + std::tie(currentResultTag, currentResultVal) = + state->getAt(static_cast(AggExpMovingAvgElems::kResult)); + + auto decimalVal = value::numericCast(fieldTag, fieldVal); + auto result = [&]() { + if (currentResultTag == value::TypeTags::Null) { + // Accumulator result has not been yet initialised. We will now + // set it to decimalVal + return decimalVal; + } else { + uassert(7821203, + "currentResultTag is not of decimal type", + currentResultTag == value::TypeTags::NumberDecimal); + auto currentResult = value::bitcastTo(currentResultVal); + currentResult = decimalVal.multiply(alpha).add( + currentResult.multiply(Decimal128(1).subtract(alpha))); + return currentResult; + } + }(); + + auto [resultTag, resultVal] = value::makeCopyDecimal(result); + + state->setAt(static_cast(AggExpMovingAvgElems::kResult), resultTag, resultVal); + if (fieldTag == value::TypeTags::NumberDecimal) { + state->setAt(static_cast(AggExpMovingAvgElems::kIsDecimal), + value::TypeTags::Boolean, + value::bitcastFrom(true)); + } + + stateGuard.reset(); + return {true, stateTag, stateVal}; +} + +FastTuple ByteCode::builtinAggExpMovingAvgFinalize( + ArityType arity) { + auto [stateOwned, stateTag, stateVal] = getFromStack(0); + + uassert(7821204, "State should be of array type", stateTag == value::TypeTags::Array); + auto state = value::getArrayView(stateVal); + + auto [resultTag, resultVal] = state->getAt(static_cast(AggExpMovingAvgElems::kResult)); + if (resultTag == value::TypeTags::Null) { + return {false, value::TypeTags::Null, 0}; + } + uassert(7821205, "Unexpected result type", resultTag == value::TypeTags::NumberDecimal); + + auto [isDecimalTag, isDecimalVal] = + state->getAt(static_cast(AggExpMovingAvgElems::kIsDecimal)); + uassert(7821206, "Unexpected isDecimal type", isDecimalTag == value::TypeTags::Boolean); + + if (value::bitcastTo(isDecimalVal)) { + std::tie(resultTag, resultVal) = value::copyValue(resultTag, resultVal); + return {true, resultTag, resultVal}; + } else { + auto result = value::bitcastTo(resultVal).toDouble(); + return {false, value::TypeTags::NumberDouble, value::bitcastFrom(result)}; + } +} + FastTuple ByteCode::dispatchBuiltin(Builtin f, ArityType arity) { switch (f) { @@ -6020,10 +6950,18 @@ FastTuple ByteCode::dispatchBuiltin(Builtin return builtinBitTestPosition(arity); case Builtin::bsonSize: return builtinBsonSize(arity); + case Builtin::strLenBytes: + return builtinStrLenBytes(arity); case Builtin::toUpper: return builtinToUpper(arity); case Builtin::toLower: return builtinToLower(arity); + case Builtin::trim: + return builtinTrim(arity, true, true); + case Builtin::ltrim: + return builtinTrim(arity, true, false); + case Builtin::rtrim: + return builtinTrim(arity, false, true); case Builtin::coerceToBool: return builtinCoerceToBool(arity); case Builtin::coerceToString: @@ -6144,6 +7082,8 @@ FastTuple ByteCode::dispatchBuiltin(Builtin return builtinGenerateCheapSortKey(arity); case Builtin::sortKeyComponentVectorGetElement: return builtinSortKeyComponentVectorGetElement(arity); + case Builtin::sortKeyComponentVectorToArray: + return builtinSortKeyComponentVectorToArray(arity); case Builtin::makeBsonObj: return builtinMakeBsonObj(arity); case Builtin::tsSecond: @@ -6181,6 +7121,58 @@ FastTuple ByteCode::dispatchBuiltin(Builtin return builtinObjectToArray(arity); case Builtin::arrayToObject: return builtinArrayToObject(arity); + case Builtin::aggFirstNNeedsMoreInput: + return builtinAggFirstNNeedsMoreInput(arity); + case Builtin::aggFirstN: + return builtinAggFirstN(arity); + case Builtin::aggFirstNMerge: + return builtinAggFirstNMerge(arity); + case Builtin::aggFirstNFinalize: + return builtinAggFirstNFinalize(arity); + case Builtin::aggLastN: + return builtinAggLastN(arity); + case Builtin::aggLastNMerge: + return builtinAggLastNMerge(arity); + case Builtin::aggLastNFinalize: + return builtinAggLastNFinalize(arity); + case Builtin::aggTopN: + return builtinAggTopBottomN(arity); + case Builtin::aggTopNMerge: + return builtinAggTopBottomNMerge(arity); + case Builtin::aggTopNFinalize: + return builtinAggTopBottomNFinalize(arity); + case Builtin::aggBottomN: + return builtinAggTopBottomN(arity); + case Builtin::aggBottomNMerge: + return builtinAggTopBottomNMerge(arity); + case Builtin::aggBottomNFinalize: + return builtinAggTopBottomNFinalize(arity); + case Builtin::aggMaxN: + return builtinAggMinMaxN(arity); + case Builtin::aggMaxNMerge: + return builtinAggMinMaxNMerge(arity); + case Builtin::aggMaxNFinalize: + return builtinAggMinMaxNFinalize(arity); + case Builtin::aggMinN: + return builtinAggMinMaxN(arity); + case Builtin::aggMinNMerge: + return builtinAggMinMaxNMerge(arity); + case Builtin::aggMinNFinalize: + return builtinAggMinMaxNFinalize(arity); + case Builtin::aggRank: + return builtinAggRank(arity); + case Builtin::aggRankColl: + return builtinAggRankColl(arity); + case Builtin::aggDenseRank: + return builtinAggDenseRank(arity); + case Builtin::aggDenseRankColl: + return builtinAggDenseRankColl(arity); + case Builtin::aggRankFinalize: + return builtinAggRankFinalize(arity); + case Builtin::aggExpMovingAvg: + return builtinAggExpMovingAvg(arity); + case Builtin::aggExpMovingAvgFinalize: + return builtinAggExpMovingAvgFinalize(arity); } MONGO_UNREACHABLE; @@ -6289,10 +7281,18 @@ std::string builtinToString(Builtin b) { return "bitTestPosition"; case Builtin::bsonSize: return "bsonSize"; + case Builtin::strLenBytes: + return "strLenBytes"; case Builtin::toUpper: return "toUpper"; case Builtin::toLower: return "toLower"; + case Builtin::trim: + return "trim"; + case Builtin::ltrim: + return "ltrim"; + case Builtin::rtrim: + return "rtrim"; case Builtin::coerceToBool: return "coerceToBool"; case Builtin::coerceToString: @@ -6413,6 +7413,8 @@ std::string builtinToString(Builtin b) { return "generateCheapSortKey"; case Builtin::sortKeyComponentVectorGetElement: return "sortKeyComponentVectorGetElement"; + case Builtin::sortKeyComponentVectorToArray: + return "sortKeyComponentVectorToArray"; case Builtin::makeBsonObj: return "makeBsonObj"; case Builtin::tsSecond: @@ -6451,6 +7453,58 @@ std::string builtinToString(Builtin b) { return "objectToArray"; case Builtin::arrayToObject: return "arrayToObject"; + case Builtin::aggFirstNNeedsMoreInput: + return "aggFirstNNeedsMoreInput"; + case Builtin::aggFirstN: + return "aggFirstN"; + case Builtin::aggFirstNMerge: + return "aggFirstNMerge"; + case Builtin::aggFirstNFinalize: + return "aggFirstNFinalize"; + case Builtin::aggLastN: + return "aggLastN"; + case Builtin::aggLastNMerge: + return "aggLastNMerge"; + case Builtin::aggLastNFinalize: + return "aggLastNFinalize"; + case Builtin::aggTopN: + return "aggTopN"; + case Builtin::aggTopNMerge: + return "aggTopNMerge"; + case Builtin::aggTopNFinalize: + return "aggTopNFinalize"; + case Builtin::aggBottomN: + return "aggBottomN"; + case Builtin::aggBottomNMerge: + return "aggBottomNMerge"; + case Builtin::aggBottomNFinalize: + return "aggBottomNFinalize"; + case Builtin::aggMaxN: + return "aggMaxN"; + case Builtin::aggMaxNMerge: + return "aggMaxNMerge"; + case Builtin::aggMaxNFinalize: + return "aggMaxNFinalize"; + case Builtin::aggMinN: + return "aggMinN"; + case Builtin::aggMinNMerge: + return "aggMinNMerge"; + case Builtin::aggMinNFinalize: + return "aggMinNFinalize"; + case Builtin::aggRank: + return "aggRank"; + case Builtin::aggRankColl: + return "aggRankColl"; + case Builtin::aggDenseRank: + return "aggDenseRank"; + case Builtin::aggDenseRankColl: + return "aggDenseRankColl"; + case Builtin::aggRankFinalize: + return "aggRankFinalize"; + case Builtin::aggExpMovingAvg: + return "aggExpMovingAvg"; + case Builtin::aggExpMovingAvgFinalize: + return "aggExpMovingAvgFinalize"; default: MONGO_UNREACHABLE; } @@ -6551,6 +7605,24 @@ void ByteCode::runInternal(const CodeFragment* code, int64_t position) { break; } + case Instruction::pushOwnedAccessorVal: { + auto accessor = readFromMemory(pcPointer); + pcPointer += sizeof(accessor); + + auto [tag, val] = accessor->getViewOfValue(); + pushStack(false, tag, val); + + break; + } + case Instruction::pushEnvAccessorVal: { + auto accessor = readFromMemory(pcPointer); + pcPointer += sizeof(accessor); + + auto [tag, val] = accessor->getViewOfValue(); + pushStack(false, tag, val); + + break; + } case Instruction::pushMoveVal: { auto accessor = readFromMemory(pcPointer); pcPointer += sizeof(accessor); diff --git a/src/mongo/db/exec/sbe/vm/vm.h b/src/mongo/db/exec/sbe/vm/vm.h index f7d4012352ccb..f78ae0267862e 100644 --- a/src/mongo/db/exec/sbe/vm/vm.h +++ b/src/mongo/db/exec/sbe/vm/vm.h @@ -29,21 +29,41 @@ #pragma once +#include +#include +#include +#include +#include +#include #include +#include +#include +#include #include +#include +#include +#include +#include #include #include "mongo/base/compare_numbers.h" -#include "mongo/config.h" +#include "mongo/base/data_type_endian.h" +#include "mongo/base/string_data.h" +#include "mongo/base/string_data_comparator_interface.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/exec/sbe/makeobj_spec.h" #include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/sort_spec.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/vm/datetime.h" #include "mongo/db/exec/sbe/vm/label.h" #include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/datetime/date_time_support.h" - -#include +#include "mongo/platform/compiler.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/allocator.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" #if !defined(MONGO_CONFIG_DEBUG_BUILD) #define MONGO_COMPILER_ALWAYS_INLINE_OPT MONGO_COMPILER_ALWAYS_INLINE @@ -264,6 +284,8 @@ struct Instruction { enum Tags { pushConstVal, pushAccessVal, + pushOwnedAccessorVal, + pushEnvAccessorVal, pushMoveVal, pushLocalVal, pushMoveLocalVal, @@ -432,6 +454,10 @@ struct Instruction { return "pushConstVal"; case pushAccessVal: return "pushAccessVal"; + case pushOwnedAccessorVal: + return "pushOwnedAccessorVal"; + case pushEnvAccessorVal: + return "pushEnvAccessorVal"; case pushMoveVal: return "pushMoveVal"; case pushLocalVal: @@ -663,12 +689,16 @@ enum class Builtin : uint8_t { bitTestMask, // test bitwise mask & value is mask bitTestPosition, // test BinData with a bit position list bsonSize, // implements $bsonSize + strLenBytes, toUpper, toLower, coerceToBool, coerceToString, concat, concatArrays, + trim, + ltrim, + rtrim, // Agg function to concatenate arrays, failing when the accumulator reaches a specified size. aggConcatArraysCapped, @@ -732,6 +762,7 @@ enum class Builtin : uint8_t { generateSortKey, generateCheapSortKey, sortKeyComponentVectorGetElement, + sortKeyComponentVectorToArray, makeBsonObj, tsSecond, @@ -752,10 +783,128 @@ enum class Builtin : uint8_t { isoWeek, objectToArray, arrayToObject, + + aggFirstNNeedsMoreInput, + aggFirstN, + aggFirstNMerge, + aggFirstNFinalize, + aggLastN, + aggLastNMerge, + aggLastNFinalize, + aggTopN, + aggTopNMerge, + aggTopNFinalize, + aggBottomN, + aggBottomNMerge, + aggBottomNFinalize, + aggMaxN, + aggMaxNMerge, + aggMaxNFinalize, + aggMinN, + aggMinNMerge, + aggMinNFinalize, + aggRank, + aggRankColl, + aggDenseRank, + aggDenseRankColl, + aggRankFinalize, + aggExpMovingAvg, + aggExpMovingAvgFinalize, }; std::string builtinToString(Builtin b); +/** + * This enum defines indices into an 'Array' that store state for $AccumulatorN expressions. + * + * The array contains five elements: + * - The element at index `kInternalArr` is the array that holds the values. + * - The element at index `kStartIdx` is the logical start index in the internal array. This is + * used for emulating queue behaviour. + * - The element at index `kMaxSize` is the maximum number entries the data structure holds. + * - The element at index `kMemUsage` holds the current memory usage + * - The element at index `kMemLimit` holds the max memory limit allowed + */ +enum class AggMultiElems { kInternalArr, kStartIdx, kMaxSize, kMemUsage, kMemLimit, kSizeOfArray }; + +/** + * Less than comparison based on a sort pattern. + */ +struct SortPatternLess { + SortPatternLess(const value::SortSpec* sortSpec) : _sortSpec(sortSpec) {} + + bool operator()(const std::pair& lhs, + const std::pair& rhs) const { + auto [cmpTag, cmpVal] = _sortSpec->compare(lhs.first, lhs.second, rhs.first, rhs.second); + uassert(5807000, "Invalid comparison result", cmpTag == value::TypeTags::NumberInt32); + return value::bitcastTo(cmpVal) < 0; + } + +private: + const value::SortSpec* _sortSpec; +}; + +/** + * Greater than comparison based on a sort pattern. + */ +struct SortPatternGreater { + SortPatternGreater(const value::SortSpec* sortSpec) : _sortSpec(sortSpec) {} + + bool operator()(const std::pair& lhs, + const std::pair& rhs) const { + auto [cmpTag, cmpVal] = _sortSpec->compare(lhs.first, lhs.second, rhs.first, rhs.second); + uassert(5807001, "Invalid comparison result", cmpTag == value::TypeTags::NumberInt32); + return value::bitcastTo(cmpVal) > 0; + } + +private: + const value::SortSpec* _sortSpec; +}; + +/** + * Comparison based on the key of a pair of elements. + */ +template +struct PairKeyComp { + PairKeyComp(const Comp& comp) : _comp(comp) {} + + bool operator()(const std::pair& lhs, + const std::pair& rhs) const { + auto [lPairTag, lPairVal] = lhs; + auto lPair = value::getArrayView(lPairVal); + auto lKey = lPair->getAt(0); + + auto [rPairTag, rPairVal] = rhs; + auto rPair = value::getArrayView(rPairVal); + auto rKey = rPair->getAt(0); + + return _comp(lKey, rKey); + } + +private: + const Comp _comp; +}; + +template +struct ValueCompare { + ValueCompare(const CollatorInterface* collator) : _collator(collator) {} + + bool operator()(const std::pair& lhs, + const std::pair& rhs) const { + auto [tag, val] = + value::compareValue(lhs.first, lhs.second, rhs.first, rhs.second, _collator); + uassert(7548805, "Invalid comparison result", tag == value::TypeTags::NumberInt32); + if constexpr (less) { + return value::bitcastTo(val) < 0; + } else { + return value::bitcastTo(val) > 0; + } + } + +private: + const CollatorInterface* _collator; +}; + /** * This enum defines indices into an 'Array' that returns the partial sum result when 'needsMerge' * is requested. @@ -786,12 +935,28 @@ enum AggStdDevValueElems { kSizeOfArray }; +/** + * This enum defines indices into an 'Array' that store state for rank expressions. + * + * The array contains three elements: + * - The element at index `kLastValue` is the last value. + * - The element at index `kLastRank` is the rank of the last value. + * - The element at index `kSameRankCount` is how many values are of the same rank as the last + * value. + */ +enum AggRankElems { kLastValue, kLastRank, kSameRankCount, kRankArraySize }; + /** * This enum defines indices into an 'Array' that returns the result of accumulators that track the * size of accumulated values, such as 'addToArrayCapped' and 'addToSetCapped'. */ enum class AggArrayWithSize { kValues = 0, kSizeOfValues, kLast = kSizeOfValues + 1 }; +/** + * This enum defines indices into an 'Array' that stores the state for $expMovingAvg accumulator + */ +enum class AggExpMovingAvgElems { kResult, kAlpha, kIsDecimal, kSizeOfArray }; + using SmallArityType = uint8_t; using ArityType = uint32_t; @@ -1096,8 +1261,6 @@ class ByteCode { value::Value operandValue); FastTuple genericFloor(value::TypeTags operandTag, value::Value operandValue); - FastTuple genericTrunc(value::TypeTags operandTag, - value::Value operandValue); FastTuple genericExp(value::TypeTags operandTag, value::Value operandValue); FastTuple genericLn(value::TypeTags operandTag, @@ -1106,6 +1269,8 @@ class ByteCode { value::Value operandValue); FastTuple genericSqrt(value::TypeTags operandTag, value::Value operandValue); + FastTuple genericRoundTrunc( + std::string funcName, Decimal128::RoundingMode roundingMode, ArityType arity); std::pair genericNot(value::TypeTags tag, value::Value value); std::pair genericIsMember(value::TypeTags lhsTag, value::Value lhsVal, @@ -1389,10 +1554,35 @@ class ByteCode { TimeZone timezone, DayOfWeek startOfWeek); + /** + * produceBsonObject() takes a MakeObjSpec ('spec'), a root value ('rootTag' and 'rootVal'), + * and 0 or more "computed" values as inputs, it builds an output BSON object based on the + * instructions provided by 'spec' and based on the contents of 'root' and the computed input + * values, and then it returns the output object. (Note the computed input values are not + * directly passed in as C++ parameters -- instead the computed input values are passed via + * the VM's stack.) + * + * 'spec' provides two lists of field names: "keepOrDrop" fields and "computed" fields. These + * lists are disjoint and do not contain duplicates. The number of computed input values passed + * in by the caller on the VM stack must match the number of fields in the "computed" list. + * + * For each field F in the "computed" list, this method will retrieve the corresponding computed + * input value V from the VM stack and add {F,V} to the output object. + * + * If 'root' is not an object, it is ignored. Otherwise, for each field F in 'root' with value V + * that does not appear in the "computed" list, this method will copy {F,V} to the output object + * if either: (1) field F appears in the "keepOrDrop" list and 'spec->fieldBehavior == keep'; or + * (2) field F does _not_ appear in the "keepOrDrop" list and 'spec->fieldBehavior == drop'. If + * neither of these conditions are met, field F in 'root' will be ignored. + * + * For any two distinct fields F1 and F2 in the output object, if F1 is in 'root' and F2 does + * not appear before F1 in 'root', -OR- if both F1 and F2 are not in 'root' and F2 does not + * appear before F1 in the "computed" list, then F2 will appear after F1 in the output object. + */ std::pair produceBsonObject(const MakeObjSpec* mos, value::TypeTags rootTag, value::Value rootVal, - size_t startIdx); + int stackOffset); FastTuple builtinSplit(ArityType arity); FastTuple builtinDate(ArityType arity); @@ -1454,6 +1644,7 @@ class ByteCode { FastTuple builtinBitTestMask(ArityType arity); FastTuple builtinBitTestPosition(ArityType arity); FastTuple builtinBsonSize(ArityType arity); + FastTuple builtinStrLenBytes(ArityType arity); FastTuple builtinToUpper(ArityType arity); FastTuple builtinToLower(ArityType arity); FastTuple builtinCoerceToBool(ArityType arity); @@ -1476,6 +1667,9 @@ class ByteCode { FastTuple builtinRound(ArityType arity); FastTuple builtinConcat(ArityType arity); FastTuple builtinConcatArrays(ArityType arity); + FastTuple builtinTrim(ArityType arity, + bool trimLeft, + bool trimRight); FastTuple builtinAggConcatArraysCapped(ArityType arity); FastTuple builtinAggSetUnion(ArityType arity); FastTuple builtinAggSetUnionCapped(ArityType arity); @@ -1523,6 +1717,8 @@ class ByteCode { FastTuple builtinGenerateCheapSortKey(ArityType arity); FastTuple builtinSortKeyComponentVectorGetElement( ArityType arity); + FastTuple builtinSortKeyComponentVectorToArray( + ArityType arity); FastTuple builtinMakeBsonObj(ArityType arity); FastTuple builtinTsSecond(ArityType arity); FastTuple builtinTsIncrement(ArityType arity); @@ -1543,9 +1739,35 @@ class ByteCode { FastTuple builtinISOWeekYear(ArityType arity); FastTuple builtinISODayOfWeek(ArityType arity); FastTuple builtinISOWeek(ArityType arity); - FastTuple builtinObjectToArray(ArityType arity); FastTuple builtinArrayToObject(ArityType arity); + + FastTuple builtinAggFirstNNeedsMoreInput(ArityType arity); + FastTuple builtinAggFirstN(ArityType arity); + FastTuple builtinAggFirstNMerge(ArityType arity); + FastTuple builtinAggFirstNFinalize(ArityType arity); + FastTuple builtinAggLastN(ArityType arity); + FastTuple builtinAggLastNMerge(ArityType arity); + FastTuple builtinAggLastNFinalize(ArityType arity); + template + FastTuple builtinAggTopBottomN(ArityType arity); + template + FastTuple builtinAggTopBottomNMerge(ArityType arity); + FastTuple builtinAggTopBottomNFinalize(ArityType arity); + template + FastTuple builtinAggMinMaxN(ArityType arity); + template + FastTuple builtinAggMinMaxNMerge(ArityType arity); + template + FastTuple builtinAggMinMaxNFinalize(ArityType arity); + FastTuple builtinAggRank(ArityType arity); + FastTuple builtinAggRankColl(ArityType arity); + FastTuple builtinAggDenseRank(ArityType arity); + FastTuple builtinAggDenseRankColl(ArityType arity); + FastTuple builtinAggRankFinalize(ArityType arity); + FastTuple builtinAggExpMovingAvg(ArityType arity); + FastTuple builtinAggExpMovingAvgFinalize(ArityType arity); + FastTuple dispatchBuiltin(Builtin f, ArityType arity); static constexpr size_t offsetOwned = 0; diff --git a/src/mongo/db/exec/sbe/vm/vm_bm.cpp b/src/mongo/db/exec/sbe/vm/vm_bm.cpp new file mode 100644 index 0000000000000..c480f9574ce00 --- /dev/null +++ b/src/mongo/db/exec/sbe/vm/vm_bm.cpp @@ -0,0 +1,254 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/sbe/expressions/compile_ctx.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/query/collation/collator_factory_icu.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/platform/random.h" +#include "mongo/util/assert_util_core.h" + +namespace mongo::sbe { +namespace { + +using TagValue = std::pair; + +class ValueVectorGuard { +public: + ValueVectorGuard(std::vector& values) : _values(values) {} + ~ValueVectorGuard() { + for (auto [tag, value] : _values) { + value::releaseValue(tag, value); + } + } + +private: + std::vector& _values; +}; + +class SbeVmBenchmark : public benchmark::Fixture { +private: + static constexpr int32_t kSeed = 1; + +public: + SbeVmBenchmark() : SbeVmBenchmark(std::make_unique()) {} + + void benchmarkExpression(std::unique_ptr expr, + const std::vector& inputs, + benchmark::State& state) { + vm::CodeFragment code = expr->compileDirect(_compileCtx); + vm::ByteCode vm; + auto inputAccessor = _env->getAccessor(_inputSlotId); + for (auto keepRunning : state) { + for (auto [inputTag, inputVal] : inputs) { + inputAccessor->reset(false, inputTag, inputVal); + auto [owned, tag, val] = vm.run(&code); + if (owned) { + value::releaseValue(tag, val); + } + } + benchmark::ClobberMemory(); + } + } + + TagValue generateRandomString(size_t size) { + static const std::string kAlphabet = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"; + std::string str; + str.reserve(size); + for (size_t j = 0; j < size; ++j) { + str.push_back(kAlphabet[_random.nextInt32(kAlphabet.size())]); + } + return value::makeNewString(str); + } + + std::vector generateRandomStrings(size_t count, size_t size) { + std::vector strings; + strings.reserve(count); + for (size_t i = 0; i < count; i++) { + strings.push_back(generateRandomString(size)); + } + return strings; + } + + TagValue makeArraySet(const std::vector& values, const CollatorInterface* collator) { + auto [tag, value] = value::makeNewArraySet(collator); + auto* arraySet = value::getArraySetView(value); + for (const auto& [tag, value] : values) { + auto [tagCopy, valueCopy] = value::copyValue(tag, value); + arraySet->push_back(tagCopy, valueCopy); + } + return {tag, value}; + } + + value::SlotId setCollator(const CollatorInterface* collator) { + auto collatorSlot = _env->getSlotIfExists("collator"_sd); + if (collatorSlot) { + _env->getAccessor(*collatorSlot) + ->reset(false, + value::TypeTags::collator, + value::bitcastFrom(collator)); + return *collatorSlot; + } + return _env->registerSlot("collator"_sd, + value::TypeTags::collator, + value::bitcastFrom(collator), + false, + &_slotIdGenerator); + } + + std::unique_ptr createCollator() { + auto statusWithCollator = _collatorFactory.makeFromBSON(BSON("locale" + << "en_US")); + invariant(statusWithCollator.isOK()); + return std::move(statusWithCollator.getValue()); + } + + value::SlotId inputSlotId() const { + return _inputSlotId; + } + + PseudoRandom random() const { + return _random; + } + +private: + SbeVmBenchmark(std::unique_ptr env) + : _env(env.get()), _compileCtx(std::move(env)), _random(kSeed) { + _env->registerSlot("timeZoneDB"_sd, + value::TypeTags::timeZoneDB, + value::bitcastFrom(&_timeZoneDB), + false, + &_slotIdGenerator); + _inputSlotId = + _env->registerSlot("input"_sd, value::TypeTags::Nothing, 0, false, &_slotIdGenerator); + } + + RuntimeEnvironment* _env; + CompileCtx _compileCtx; + value::SlotIdGenerator _slotIdGenerator; + value::SlotId _inputSlotId; + + PseudoRandom _random; + + TimeZoneDatabase _timeZoneDB; + + CollatorFactoryICU _collatorFactory; +}; + +BENCHMARK_DEFINE_F(SbeVmBenchmark, BM_IsMember_ArraySet_NoCollator)(benchmark::State& state) { + auto strings = generateRandomStrings(state.range(0) /*count*/, state.range(1) /*size*/); + ValueVectorGuard guards(strings); + + TagValue arraySet = makeArraySet(strings, nullptr /*collator*/); + auto arraySetConstant = makeE(arraySet.first, arraySet.second); + + auto expr = makeE( + "isMember"_sd, makeEs(makeE(inputSlotId()), std::move(arraySetConstant))); + TagValue searchValue = generateRandomString(state.range(1) /*size*/); + value::ValueGuard guard{searchValue.first, searchValue.second}; + benchmarkExpression(std::move(expr), {searchValue}, state); +} + +BENCHMARK_DEFINE_F(SbeVmBenchmark, BM_IsMember_ArraySet_Collator)(benchmark::State& state) { + auto strings = generateRandomStrings(state.range(0) /*count*/, state.range(1) /*size*/); + ValueVectorGuard guards(strings); + auto collator = createCollator(); + auto collatorSlotId = setCollator(collator.get()); + + TagValue arraySet = makeArraySet(strings, collator.get()); + auto arraySetConstant = makeE(arraySet.first, arraySet.second); + + auto expr = makeE("collIsMember"_sd, + makeEs(makeE(collatorSlotId), + makeE(inputSlotId()), + std::move(arraySetConstant))); + TagValue searchValue = generateRandomString(state.range(1) /*size*/); + value::ValueGuard guard{searchValue.first, searchValue.second}; + benchmarkExpression(std::move(expr), {searchValue}, state); +} + +BENCHMARK_DEFINE_F(SbeVmBenchmark, BM_IsMember_ArraySet_Collator_Linear)(benchmark::State& state) { + auto strings = generateRandomStrings(state.range(0) /*count*/, state.range(1) /*size*/); + ValueVectorGuard guards(strings); + auto collator = createCollator(); + auto collatorSlotId = setCollator(collator.get()); + + // Do not pass collator to ArraySet to make VM use linear search. + TagValue arraySet = makeArraySet(strings, nullptr); + auto arraySetConstant = makeE(arraySet.first, arraySet.second); + + auto expr = makeE("collIsMember"_sd, + makeEs(makeE(collatorSlotId), + makeE(inputSlotId()), + std::move(arraySetConstant))); + + TagValue searchValue = generateRandomString(state.range(1) /*size*/); + value::ValueGuard guard{searchValue.first, searchValue.second}; + benchmarkExpression(std::move(expr), {searchValue}, state); +} + +#define ADD_ARGS() \ + Args({5, 5}) \ + ->Args({10, 5}) \ + ->Args({10, 10}) \ + ->Args({50, 10}) \ + ->Args({10, 50}) \ + ->Args({50, 50}) \ + ->Args({10, 100}) \ + ->Args({50, 100}) \ + ->Args({100, 100}) + +BENCHMARK_REGISTER_F(SbeVmBenchmark, BM_IsMember_ArraySet_NoCollator)->Args({100, 100}); + +BENCHMARK_REGISTER_F(SbeVmBenchmark, BM_IsMember_ArraySet_Collator)->ADD_ARGS(); + +BENCHMARK_REGISTER_F(SbeVmBenchmark, BM_IsMember_ArraySet_Collator_Linear)->ADD_ARGS(); + +} // namespace +} // namespace mongo::sbe diff --git a/src/mongo/db/exec/sbe/vm/vm_printer.cpp b/src/mongo/db/exec/sbe/vm/vm_printer.cpp index 921463cab28aa..24f7ec9a92a38 100644 --- a/src/mongo/db/exec/sbe/vm/vm_printer.cpp +++ b/src/mongo/db/exec/sbe/vm/vm_printer.cpp @@ -26,12 +26,26 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ +#include +#include +#include #include - +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/sbe/util/print_options.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/sbe/values/value_printer.h" #include "mongo/db/exec/sbe/vm/vm.h" #include "mongo/db/exec/sbe/vm/vm_printer.h" -#include "mongo/platform/basic.h" +#include "mongo/db/query/datetime/date_time_support.h" namespace mongo::sbe::vm { @@ -296,6 +310,8 @@ class CodeFragmentPrinterImpl { .writeValueToStream(tag, val); break; } + case Instruction::pushOwnedAccessorVal: + case Instruction::pushEnvAccessorVal: case Instruction::pushAccessVal: case Instruction::pushMoveVal: { auto accessor = readFromMemory(pcPointer); diff --git a/src/mongo/db/exec/scoped_timer.cpp b/src/mongo/db/exec/scoped_timer.cpp index fc1b408e97595..76f2880cd014d 100644 --- a/src/mongo/db/exec/scoped_timer.cpp +++ b/src/mongo/db/exec/scoped_timer.cpp @@ -27,19 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/exec/scoped_timer.h" +#include "mongo/platform/compiler.h" + namespace mongo { ScopedTimer::ScopedTimer(Nanoseconds* counter, TickSource* ts) - : _counter(counter), _tickSource(ts), _startTS(ts->getTicks()) {} + : _counter(counter), _tickSource(ts), _clockSource(nullptr), _startTS(ts->getTicks()) {} ScopedTimer::ScopedTimer(Nanoseconds* counter, ClockSource* cs) - : _counter(counter), _clockSource(cs), _startCS(cs->now()) {} + : _counter(counter), _tickSource(nullptr), _clockSource(cs), _startCS(cs->now()) {} ScopedTimer::~ScopedTimer() { - if (_clockSource) { + if (MONGO_likely(_clockSource)) { *_counter += Nanoseconds{ (durationCount(_clockSource->now() - _startCS) * 1000 * 1000)}; return; diff --git a/src/mongo/db/exec/scoped_timer.h b/src/mongo/db/exec/scoped_timer.h index 3608ff0133f36..783fa2240e64c 100644 --- a/src/mongo/db/exec/scoped_timer.h +++ b/src/mongo/db/exec/scoped_timer.h @@ -31,7 +31,9 @@ #include "mongo/stdx/variant.h" #include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" #include "mongo/util/system_tick_source.h" +#include "mongo/util/tick_source.h" #include "mongo/util/time_support.h" #include "mongo/util/timer.h" @@ -55,10 +57,10 @@ class ScopedTimer { private: // Reference to the counter that we are incrementing with the elapsed time. Nanoseconds* const _counter; - TickSource* _tickSource = nullptr; - ClockSource* _clockSource = nullptr; + TickSource* _tickSource; + ClockSource* _clockSource; Date_t _startCS; - TickSource::Tick _startTS = 0; + TickSource::Tick _startTS; }; } // namespace mongo diff --git a/src/mongo/db/exec/scoped_timer_factory.cpp b/src/mongo/db/exec/scoped_timer_factory.cpp deleted file mode 100644 index 310d6ab338df4..0000000000000 --- a/src/mongo/db/exec/scoped_timer_factory.cpp +++ /dev/null @@ -1,50 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/exec/scoped_timer_factory.h" - -namespace mongo { -namespace scoped_timer_factory { - -boost::optional make(ServiceContext* context, - QueryExecTimerPrecision precision, - Nanoseconds* counter) { - invariant(context); - if (precision == QueryExecTimerPrecision::kMillis) { - return {{counter, context->getFastClockSource()}}; - } - if (precision == QueryExecTimerPrecision::kNanos) { - return {{counter, context->getTickSource()}}; - } - - return boost::none; -} - -} // namespace scoped_timer_factory -} // namespace mongo diff --git a/src/mongo/db/exec/scoped_timer_factory.h b/src/mongo/db/exec/scoped_timer_factory.h deleted file mode 100644 index 3d7aba43eb122..0000000000000 --- a/src/mongo/db/exec/scoped_timer_factory.h +++ /dev/null @@ -1,47 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/query/plan_summary_stats.h" -#include "mongo/db/service_context.h" - -namespace mongo { -namespace scoped_timer_factory { - -/** - * A factory helper to make a 'ScopedTimer'. The type of the underlying timer is based on the value - * of 'precision'. - */ -boost::optional make(ServiceContext* context, - QueryExecTimerPrecision precision, - Nanoseconds* counter); -} // namespace scoped_timer_factory -} // namespace mongo diff --git a/src/mongo/db/exec/shard_filter.cpp b/src/mongo/db/exec/shard_filter.cpp index 82a0d9328abe8..b125e0a216939 100644 --- a/src/mongo/db/exec/shard_filter.cpp +++ b/src/mongo/db/exec/shard_filter.cpp @@ -28,17 +28,22 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/shard_filter.h" - #include +#include +#include -#include "mongo/db/exec/filter.h" -#include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/exec/working_set_common.h" +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/shard_filter.h" +#include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/logv2/log.h" -#include "mongo/s/shard_key_pattern.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/exec/shard_filter.h b/src/mongo/db/exec/shard_filter.h index 38e23a41aea03..35d305bfa1458 100644 --- a/src/mongo/db/exec/shard_filter.h +++ b/src/mongo/db/exec/shard_filter.h @@ -29,8 +29,15 @@ #pragma once +#include + #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/shard_filterer_impl.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/s/scoped_collection_metadata.h" namespace mongo { diff --git a/src/mongo/db/exec/shard_filterer_impl.cpp b/src/mongo/db/exec/shard_filterer_impl.cpp index ebe4f2a52a83d..c1cafc34babe5 100644 --- a/src/mongo/db/exec/shard_filterer_impl.cpp +++ b/src/mongo/db/exec/shard_filterer_impl.cpp @@ -27,12 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/exec/shard_filterer_impl.h" +#include -#include "mongo/db/exec/filter.h" -#include "mongo/db/matcher/matchable.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/shard_filterer_impl.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/exec/shard_filterer_impl.h b/src/mongo/db/exec/shard_filterer_impl.h index f974f9da5ecbb..de5f65787088e 100644 --- a/src/mongo/db/exec/shard_filterer_impl.h +++ b/src/mongo/db/exec/shard_filterer_impl.h @@ -29,7 +29,13 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/keypattern.h" #include "mongo/db/matcher/matchable.h" #include "mongo/db/s/scoped_collection_metadata.h" diff --git a/src/mongo/db/exec/skip.cpp b/src/mongo/db/exec/skip.cpp index d3d0fc48afdde..bd8add049481c 100644 --- a/src/mongo/db/exec/skip.cpp +++ b/src/mongo/db/exec/skip.cpp @@ -30,10 +30,8 @@ #include "mongo/db/exec/skip.h" #include - -#include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/util/str.h" +#include +#include namespace mongo { diff --git a/src/mongo/db/exec/skip.h b/src/mongo/db/exec/skip.h index 24937662d0205..9aa2cd9b5b58d 100644 --- a/src/mongo/db/exec/skip.h +++ b/src/mongo/db/exec/skip.h @@ -30,8 +30,14 @@ #pragma once +#include + #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/jsobj.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" namespace mongo { diff --git a/src/mongo/db/exec/sort.cpp b/src/mongo/db/exec/sort.cpp index 18975b313e923..cd742d144c44f 100644 --- a/src/mongo/db/exec/sort.cpp +++ b/src/mongo/db/exec/sort.cpp @@ -27,12 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include +#include #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/sort.h" -#include "mongo/db/exec/working_set_common.h" #include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/exec/sort.h b/src/mongo/db/exec/sort.h index f71c90dda9d6d..fa034003f579d 100644 --- a/src/mongo/db/exec/sort.h +++ b/src/mongo/db/exec/sort.h @@ -29,14 +29,24 @@ #pragma once +#include +#include +#include #include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sort_executor.h" #include "mongo/db/exec/sort_key_comparator.h" #include "mongo/db/exec/sort_key_generator.h" #include "mongo/db/exec/working_set.h" +#include "mongo/db/index/sort_key_generator.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" namespace mongo { diff --git a/src/mongo/db/exec/sort_executor.cpp b/src/mongo/db/exec/sort_executor.cpp index f23475b5eac98..8f2dfbc8650e3 100644 --- a/src/mongo/db/exec/sort_executor.cpp +++ b/src/mongo/db/exec/sort_executor.cpp @@ -27,12 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/db/exec/sort_executor.h" - -#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/exec/working_set.h" +#include "mongo/platform/atomic_word.h" namespace mongo { namespace { diff --git a/src/mongo/db/exec/sort_executor.h b/src/mongo/db/exec/sort_executor.h index 5ff1ba8bfff76..19f7781eb3fc9 100644 --- a/src/mongo/db/exec/sort_executor.h +++ b/src/mongo/db/exec/sort_executor.h @@ -29,13 +29,21 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sort_key_comparator.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/query/sort_pattern.h" #include "mongo/db/sorter/sorter.h" +#include "mongo/db/sorter/sorter_stats.h" namespace mongo { /** diff --git a/src/mongo/db/exec/sort_key_comparator.cpp b/src/mongo/db/exec/sort_key_comparator.cpp index b48ea5c1f9952..42b0cd88e13cb 100644 --- a/src/mongo/db/exec/sort_key_comparator.cpp +++ b/src/mongo/db/exec/sort_key_comparator.cpp @@ -29,6 +29,13 @@ #include "mongo/db/exec/sort_key_comparator.h" +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/value_comparator.h" + namespace mongo { SortKeyComparator::SortKeyComparator(const SortPattern& sortPattern) { diff --git a/src/mongo/db/exec/sort_key_comparator.h b/src/mongo/db/exec/sort_key_comparator.h index e078bc71fa3fb..da110f5d67d4c 100644 --- a/src/mongo/db/exec/sort_key_comparator.h +++ b/src/mongo/db/exec/sort_key_comparator.h @@ -31,6 +31,7 @@ #include +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/query/sort_pattern.h" diff --git a/src/mongo/db/exec/sort_key_generator.cpp b/src/mongo/db/exec/sort_key_generator.cpp index 922ea05ca1ef3..70dacc0c0b2b7 100644 --- a/src/mongo/db/exec/sort_key_generator.cpp +++ b/src/mongo/db/exec/sort_key_generator.cpp @@ -28,20 +28,15 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/sort_key_generator.h" - #include +#include #include -#include "mongo/bson/bsonobj_comparator.h" -#include "mongo/db/catalog/collection.h" -#include "mongo/db/exec/scoped_timer.h" +#include + +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/sort_key_generator.h" #include "mongo/db/exec/working_set.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/matcher/extensions_callback_noop.h" -#include "mongo/db/query/collation/collator_interface.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/exec/sort_key_generator.h b/src/mongo/db/exec/sort_key_generator.h index f31968619473b..d5fac8e26932f 100644 --- a/src/mongo/db/exec/sort_key_generator.h +++ b/src/mongo/db/exec/sort_key_generator.h @@ -29,10 +29,13 @@ #pragma once +#include #include #include "mongo/bson/bsonobj.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/index/sort_key_generator.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/index_bounds.h" diff --git a/src/mongo/db/exec/sort_test.cpp b/src/mongo/db/exec/sort_test.cpp index 328484902f06d..b59a1e2de8617 100644 --- a/src/mongo/db/exec/sort_test.cpp +++ b/src/mongo/db/exec/sort_test.cpp @@ -33,17 +33,33 @@ #include "mongo/db/exec/sort.h" -#include #include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/queued_data_stage.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/sort_key_generator.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/collation/collator_factory_mock.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" using namespace mongo; diff --git a/src/mongo/db/exec/spool.cpp b/src/mongo/db/exec/spool.cpp index 2af14488ee019..95901c0ad7d8f 100644 --- a/src/mongo/db/exec/spool.cpp +++ b/src/mongo/db/exec/spool.cpp @@ -29,15 +29,62 @@ #include "mongo/db/exec/spool.h" +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" + +namespace { +/** + * Generates a new file name on each call using a static, atomic and monotonically increasing + * number. + * + * Each user of the Sorter must implement this function to ensure that all temporary files that the + * Sorter instances produce are uniquely identified using a unique file name extension with separate + * atomic variable. This is necessary because the sorter.cpp code is separately included in multiple + * places, rather than compiled in one place and linked, and so cannot provide a globally unique ID. + */ +std::string nextFileName() { + static mongo::AtomicWord spoolFileCounter; + return "ext-spool." + std::to_string(spoolFileCounter.fetchAndAdd(1)); +} + +// Helper to allocate a new working set member to hold the RecordId, set the output parameter, and +// return ADVANCED. +mongo::PlanStage::StageState allocateResultAndAdvance(mongo::WorkingSet* ws, + mongo::WorkingSetID* out, + mongo::RecordId&& recordId) { + *out = ws->allocate(); + auto member = ws->get(*out); + member->recordId = std::move(recordId); + // Only store the record id, not any index information or full objects. This is to + // reduce memory and disk usage - it is the responsibility of our caller to fetch the records. + ws->transitionToRecordIdAndIdx(*out); + return mongo::PlanStage::ADVANCED; +} +} // namespace + namespace mongo { const char* SpoolStage::kStageType = "SPOOL"; SpoolStage::SpoolStage(ExpressionContext* expCtx, WorkingSet* ws, std::unique_ptr child) - : PlanStage(expCtx, std::move(child), kStageType), _ws(ws) {} + : PlanStage(expCtx, std::move(child), kStageType), + _ws(ws), + _memTracker(expCtx->allowDiskUse, internalQueryMaxSpoolMemoryUsageBytes.load()) { + + _specificStats.maxMemoryUsageBytes = _memTracker._maxAllowedMemoryUsageBytes; + _specificStats.maxDiskUsageBytes = internalQueryMaxSpoolDiskUsageBytes.load(); +} bool SpoolStage::isEOF() { - return _nextIndex == static_cast(_buffer.size()); + return _spillFileIters.empty() && _nextIndex == static_cast(_buffer.size()); } std::unique_ptr SpoolStage::getStats() { @@ -48,6 +95,34 @@ std::unique_ptr SpoolStage::getStats() { return ret; } +void SpoolStage::spill() { + uassert(ErrorCodes::QueryExceededMemoryLimitNoDiskUseAllowed, + "Exceeded memory limit for spool, but didn't allow external sort. Set " + "allowDiskUseByDefault:true to opt in.", + _memTracker._allowDiskUse); + uassert(7443700, + "Exceeded disk use limit for spool", + _specificStats.spilledDataStorageSize < _specificStats.maxDiskUsageBytes); + + // Initialize '_file' in a lazy manner only when it is needed. + if (!_file) { + _spillStats = std::make_unique(nullptr /* sorterTracker */); + _file = std::make_shared::File>( + expCtx()->tempDir + "/" + nextFileName(), _spillStats.get()); + } + + SortedFileWriter writer(SortOptions().TempDir(expCtx()->tempDir), _file); + for (size_t i = 0; i < _buffer.size(); ++i) { + writer.addAlreadySorted(std::move(_buffer[i]), NullValue()); + } + _spillFileIters.emplace_back(writer.done()); + _buffer.clear(); + + _memTracker.resetCurrent(); + ++_specificStats.spills; + _specificStats.spilledDataStorageSize = _spillStats->bytesSpilled(); +} + PlanStage::StageState SpoolStage::doWork(WorkingSetID* out) { if (isEOF()) { return PlanStage::IS_EOF; @@ -65,10 +140,16 @@ PlanStage::StageState SpoolStage::doWork(WorkingSetID* out) { tassert( 7443500, "WSM passed to spool stage must have a RecordId", member->hasRecordId()); - // TODO SERVER-74437 spill to disk if necessary - _specificStats.totalDataSizeBytes += member->recordId.memUsage(); + auto memUsage = member->recordId.memUsage(); + _specificStats.totalDataSizeBytes += memUsage; + _memTracker.update(memUsage); + _buffer.emplace_back(std::move(member->recordId)); + if (!_memTracker.withinMemoryLimit()) { + spill(); + } + // We've cached the RecordId, so go ahead and free the object in the working set. _ws->free(id); @@ -85,18 +166,25 @@ PlanStage::StageState SpoolStage::doWork(WorkingSetID* out) { // from our buffer. } + // First, return results from any spills we may have. + while (!_spillFileIters.empty()) { + if (_spillFileIters.front()->more()) { + auto [recordId, _] = _spillFileIters.front()->next(); + return allocateResultAndAdvance(_ws, out, std::move(recordId)); + } + + _spillFileIters.pop_front(); + } + // Increment to the next element in our buffer. Note that we increment the index *first* so that // we will return EOF in a call to doWork() before isEOF() returns true. if (++_nextIndex == static_cast(_buffer.size())) { return PlanStage::IS_EOF; } - *out = _ws->allocate(); - auto member = _ws->get(*out); - member->recordId = std::move(_buffer[_nextIndex]); - // Only store the record id, not any index information or full objects. This is to - // reduce memory and disk usage - it is the responsibility of our caller to fetch the records. - _ws->transitionToRecordIdAndIdx(*out); - return PlanStage::ADVANCED; + return allocateResultAndAdvance(_ws, out, std::move(_buffer[_nextIndex])); } } // namespace mongo + +#include "mongo/db/sorter/sorter.cpp" +// Explicit instantiation unneeded since we aren't exposing Sorter outside of this file. diff --git a/src/mongo/db/exec/spool.h b/src/mongo/db/exec/spool.h index d70c5a357bb75..4309eda4b28dc 100644 --- a/src/mongo/db/exec/spool.h +++ b/src/mongo/db/exec/spool.h @@ -30,8 +30,22 @@ #pragma once +#include +#include +#include +#include +#include + #include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/memory_usage_tracker.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/record_id.h" +#include "mongo/db/sorter/sorter.h" +#include "mongo/db/sorter/sorter_stats.h" +#include "mongo/logv2/log_attr.h" namespace mongo { @@ -66,6 +80,8 @@ class SpoolStage final : public PlanStage { PlanStage::StageState doWork(WorkingSetID* id); private: + void spill(); + WorkingSet* _ws; SpoolStats _specificStats; @@ -73,7 +89,17 @@ class SpoolStage final : public PlanStage { // Next index to consume from the buffer. If < 0, the buffer is not yet fully populated from the // child. int _nextIndex = -1; - // Buffer caching spooled results. + + // Buffer caching spooled results in-memory. std::vector _buffer; + + // Machinery for spilling to disk. + MemoryUsageTracker _memTracker; + std::unique_ptr _spillStats; + std::shared_ptr::File> _file; + + // Iterators over the file that has been spilled to disk. These must be exhausted in addition to + // '_buffer' when returning results. + std::deque::Iterator>> _spillFileIters; }; } // namespace mongo diff --git a/src/mongo/db/exec/spool_test.cpp b/src/mongo/db/exec/spool_test.cpp index 45bf3eae3bb70..830df35e268f4 100644 --- a/src/mongo/db/exec/spool_test.cpp +++ b/src/mongo/db/exec/spool_test.cpp @@ -31,9 +31,30 @@ * This file contains tests for mongo/db/exec/spool.cpp */ +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/exec/mock_stage.h" #include "mongo/db/exec/spool.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/temp_dir.h" +#include "mongo/util/assert_util.h" using namespace mongo; @@ -55,10 +76,16 @@ class SpoolStageTest : public ServiceContextMongoDTest { /** * Create a new working set member with the given record id. */ - WorkingSetID makeRecord(long recordId) { + WorkingSetID makeRecord(const stdx::variant& recordId) { WorkingSetID id = ws.allocate(); WorkingSetMember* wsm = ws.get(id); - wsm->recordId = RecordId(recordId); + stdx::visit(OverloadedVisitor{ + [&](long value) { wsm->recordId = RecordId(value); }, + [&](const std::string& value) { + wsm->recordId = RecordId(value.c_str(), value.size()); + }, + }, + recordId); ws.transitionToRecordIdAndObj(id); return id; } @@ -67,20 +94,32 @@ class SpoolStageTest : public ServiceContextMongoDTest { * Helper that calls work() on the spool stage and validates the result according to the * expected values. */ - void workAndAssertStateAndRecordId(SpoolStage& spool, - PlanStage::StageState expectedState, - boost::optional expectedId = boost::none, - bool childHasMoreRecords = true) { + void workAndAssertStateAndRecordId( + SpoolStage& spool, + PlanStage::StageState expectedState, + const stdx::variant& expectedId = stdx::monostate{}, + bool childHasMoreRecords = true) { ASSERT_FALSE(spool.isEOF()); WorkingSetID id = WorkingSet::INVALID_ID; auto state = spool.work(&id); ASSERT_EQUALS(state, expectedState); - if (expectedId) { + if (expectedId.index() != 0) { auto member = ws.get(id); ASSERT_TRUE(member->hasRecordId()); - ASSERT_EQUALS(member->recordId.getLong(), *expectedId); + stdx::visit(OverloadedVisitor{ + [&](long value) { + ASSERT_TRUE(member->recordId.isLong()); + ASSERT_EQUALS(member->recordId.getLong(), value); + }, + [&](const std::string& value) { + ASSERT_TRUE(member->recordId.isStr()); + ASSERT_EQUALS(member->recordId.getStr(), value); + }, + [&](const stdx::monostate&) {}, + }, + expectedId); _memUsage += member->recordId.memUsage(); } @@ -104,11 +143,27 @@ class SpoolStageTest : public ServiceContextMongoDTest { ASSERT_EQUALS(stats->totalDataSizeBytes, _memUsage); } + SpoolStage makeSpool(std::unique_ptr root, + long long maxAllowedMemoryUsageBytes = 1024, + boost::optional maxAllowedDiskUsageBytes = boost::none) { + if (maxAllowedDiskUsageBytes) { + _tempDir = std::make_unique("SpoolStageTest"); + expCtx()->tempDir = _tempDir->path(); + expCtx()->allowDiskUse = maxAllowedDiskUsageBytes.has_value(); + } + + internalQueryMaxSpoolMemoryUsageBytes.store(maxAllowedMemoryUsageBytes); + internalQueryMaxSpoolDiskUsageBytes.store(maxAllowedDiskUsageBytes.value_or(0)); + + return SpoolStage(expCtx(), &ws, std::move(root)); + } + WorkingSet ws; private: ServiceContext::UniqueOperationContext _opCtx; std::unique_ptr _expCtx; + std::unique_ptr _tempDir; long _memUsage = 0; }; @@ -116,18 +171,17 @@ class SpoolStageTest : public ServiceContextMongoDTest { TEST_F(SpoolStageTest, eof) { auto mock = std::make_unique(expCtx(), &ws); - auto spool = SpoolStage(expCtx(), &ws, std::move(mock)); + auto spool = makeSpool(std::move(mock)); assertEofState(spool); } TEST_F(SpoolStageTest, basic) { - std::vector docs{makeRecord(1), makeRecord(2), makeRecord(3)}; auto mock = std::make_unique(expCtx(), &ws); - mock->enqueueAdvanced(docs[0]); - mock->enqueueAdvanced(docs[1]); - mock->enqueueAdvanced(docs[2]); + mock->enqueueAdvanced(makeRecord(1)); + mock->enqueueAdvanced(makeRecord(2)); + mock->enqueueAdvanced(makeRecord(3)); - auto spool = SpoolStage(expCtx(), &ws, std::move(mock)); + auto spool = makeSpool(std::move(mock)); // There are no NEED_TIME/NEED_YIELDs to propagate so we can exhaust the input on the first call // to work() and then begin returning the cached results. @@ -138,16 +192,15 @@ TEST_F(SpoolStageTest, basic) { } TEST_F(SpoolStageTest, propagatesNeedTime) { - std::vector docs{makeRecord(1), makeRecord(2), makeRecord(3)}; auto mock = std::make_unique(expCtx(), &ws); mock->enqueueStateCode(PlanStage::NEED_TIME); - mock->enqueueAdvanced(docs[0]); + mock->enqueueAdvanced(makeRecord(1)); mock->enqueueStateCode(PlanStage::NEED_TIME); - mock->enqueueAdvanced(docs[1]); + mock->enqueueAdvanced(makeRecord(2)); mock->enqueueStateCode(PlanStage::NEED_TIME); - mock->enqueueAdvanced(docs[2]); + mock->enqueueAdvanced(makeRecord(3)); - auto spool = SpoolStage(expCtx(), &ws, std::move(mock)); + auto spool = makeSpool(std::move(mock)); // First, consume all of the NEED_TIMEs from the child. workAndAssertStateAndRecordId(spool, PlanStage::NEED_TIME); @@ -163,16 +216,15 @@ TEST_F(SpoolStageTest, propagatesNeedTime) { } TEST_F(SpoolStageTest, propagatesNeedYield) { - std::vector docs{makeRecord(1), makeRecord(2), makeRecord(3)}; auto mock = std::make_unique(expCtx(), &ws); - mock->enqueueAdvanced(docs[0]); + mock->enqueueAdvanced(makeRecord(1)); mock->enqueueStateCode(PlanStage::NEED_YIELD); - mock->enqueueAdvanced(docs[1]); + mock->enqueueAdvanced(makeRecord(2)); mock->enqueueStateCode(PlanStage::NEED_YIELD); mock->enqueueStateCode(PlanStage::NEED_YIELD); - mock->enqueueAdvanced(docs[2]); + mock->enqueueAdvanced(makeRecord(3)); - auto spool = SpoolStage(expCtx(), &ws, std::move(mock)); + auto spool = makeSpool(std::move(mock)); // First, consume all of the NEED_YIELDs from the child. workAndAssertStateAndRecordId(spool, PlanStage::NEED_YIELD); @@ -193,14 +245,114 @@ TEST_F(SpoolStageTest, onlyNeedYieldAndNeedTime) { mock->enqueueStateCode(PlanStage::NEED_TIME); mock->enqueueStateCode(PlanStage::NEED_YIELD); - auto spool = SpoolStage(expCtx(), &ws, std::move(mock)); + auto spool = makeSpool(std::move(mock)); // Consume all the NEED_YIELD/NEED_TIMEs, then we should see EOF immediately workAndAssertStateAndRecordId(spool, PlanStage::NEED_YIELD); workAndAssertStateAndRecordId(spool, PlanStage::NEED_TIME); workAndAssertStateAndRecordId( - spool, PlanStage::NEED_YIELD, boost::none, false /* childHasMoreRecords */); + spool, PlanStage::NEED_YIELD, stdx::monostate{}, false /* childHasMoreRecords */); + + assertEofState(spool); +} + +TEST_F(SpoolStageTest, spillEveryRecordId) { + auto mock = std::make_unique(expCtx(), &ws); + mock->enqueueAdvanced(makeRecord(1)); + mock->enqueueAdvanced(makeRecord(2)); + mock->enqueueAdvanced(makeRecord(3)); + const uint64_t maxAllowedMemoryUsageBytes = 1; + auto spool = + makeSpool(std::move(mock), maxAllowedMemoryUsageBytes, 1024 /* maxAllowedDiskUsageBytes */); + + workAndAssertStateAndRecordId(spool, PlanStage::ADVANCED, 1); + workAndAssertStateAndRecordId(spool, PlanStage::ADVANCED, 2); + workAndAssertStateAndRecordId(spool, PlanStage::ADVANCED, 3); assertEofState(spool); + + // Validate the spilling stats. We should have spilled for each record. + auto stats = static_cast(spool.getSpecificStats()); + ASSERT_EQUALS(stats->spills, 3); + ASSERT_GREATER_THAN(stats->spilledDataStorageSize, 0); + ASSERT_EQUALS(stats->maxMemoryUsageBytes, maxAllowedMemoryUsageBytes); } + +TEST_F(SpoolStageTest, spillEveryOtherRecordId) { + auto mock = std::make_unique(expCtx(), &ws); + mock->enqueueAdvanced(makeRecord(1)); + mock->enqueueAdvanced(makeRecord(2)); + mock->enqueueAdvanced(makeRecord(3)); + mock->enqueueAdvanced(makeRecord(4)); + mock->enqueueAdvanced(makeRecord(5)); + + const uint64_t maxAllowedMemoryUsageBytes = sizeof(RecordId) * 1.5; + auto spool = + makeSpool(std::move(mock), maxAllowedMemoryUsageBytes, 1024 /* maxAllowedDiskUsageBytes */); + + workAndAssertStateAndRecordId(spool, PlanStage::ADVANCED, 1); + workAndAssertStateAndRecordId(spool, PlanStage::ADVANCED, 2); + workAndAssertStateAndRecordId(spool, PlanStage::ADVANCED, 3); + workAndAssertStateAndRecordId(spool, PlanStage::ADVANCED, 4); + workAndAssertStateAndRecordId(spool, PlanStage::ADVANCED, 5); + assertEofState(spool); + + // Validate the spilling stats. We should have spilled every other record. + auto stats = static_cast(spool.getSpecificStats()); + ASSERT_EQUALS(stats->spills, 2); + ASSERT_GREATER_THAN(stats->spilledDataStorageSize, 0); + ASSERT_EQUALS(stats->maxMemoryUsageBytes, maxAllowedMemoryUsageBytes); +} + +TEST_F(SpoolStageTest, spillStringRecordId) { + auto mock = std::make_unique(expCtx(), &ws); + mock->enqueueAdvanced(makeRecord(1)); + mock->enqueueAdvanced(makeRecord("this is a short string")); + mock->enqueueAdvanced(makeRecord(2)); + mock->enqueueAdvanced(makeRecord("this is a longer string.........")); + mock->enqueueAdvanced(makeRecord("the last string")); + + const uint64_t maxAllowedMemoryUsageBytes = sizeof(RecordId) + 1; + auto spool = + makeSpool(std::move(mock), maxAllowedMemoryUsageBytes, 1024 /* maxAllowedDiskUsageBytes */); + + workAndAssertStateAndRecordId(spool, PlanStage::ADVANCED, 1); + workAndAssertStateAndRecordId(spool, PlanStage::ADVANCED, "this is a short string"); + workAndAssertStateAndRecordId(spool, PlanStage::ADVANCED, 2); + workAndAssertStateAndRecordId(spool, PlanStage::ADVANCED, "this is a longer string........."); + workAndAssertStateAndRecordId(spool, PlanStage::ADVANCED, "the last string"); + assertEofState(spool); + + // Validate the spilling stats. We should have spilled every other record. + auto stats = static_cast(spool.getSpecificStats()); + ASSERT_EQUALS(stats->spills, 2); + ASSERT_GREATER_THAN(stats->spilledDataStorageSize, 0); + ASSERT_EQUALS(stats->maxMemoryUsageBytes, maxAllowedMemoryUsageBytes); +} + +TEST_F(SpoolStageTest, spillingDisabled) { + auto mock = std::make_unique(expCtx(), &ws); + mock->enqueueAdvanced(makeRecord(1)); + + auto spool = makeSpool(std::move(mock), + 0 /* maxAllowedMemoryUsageBytes */, + boost::none /* maxAllowedDiskUsageBytes */); + + WorkingSetID id; + ASSERT_THROWS_CODE( + spool.work(&id), AssertionException, ErrorCodes::QueryExceededMemoryLimitNoDiskUseAllowed); +} + +TEST_F(SpoolStageTest, maxDiskSpaceUsed) { + auto mock = std::make_unique(expCtx(), &ws); + mock->enqueueAdvanced(makeRecord(1)); + mock->enqueueAdvanced(makeRecord(2)); + + auto spool = makeSpool( + std::move(mock), 1 /* maxAllowedMemoryUsageBytes */, 1 /* maxAllowedDiskUsageBytes */); + + WorkingSetID id; + ASSERT_THROWS_CODE(spool.work(&id), AssertionException, 7443700); +} + } // namespace diff --git a/src/mongo/db/exec/stagedebug_cmd.cpp b/src/mongo/db/exec/stagedebug_cmd.cpp index d6d167893ae3f..10c23e17f0032 100644 --- a/src/mongo/db/exec/stagedebug_cmd.cpp +++ b/src/mongo/db/exec/stagedebug_cmd.cpp @@ -27,39 +27,60 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include - -#include "mongo/base/init.h" -#include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/catalog/database.h" +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/client.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/and_hash.h" #include "mongo/db/exec/and_sorted.h" #include "mongo/db/exec/collection_scan.h" +#include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/exec/delete_stage.h" #include "mongo/db/exec/fetch.h" #include "mongo/db/exec/index_scan.h" #include "mongo/db/exec/limit.h" #include "mongo/db/exec/merge_sort.h" #include "mongo/db/exec/or.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/skip.h" -#include "mongo/db/exec/sort.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/index/fts_access_method.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/matcher/expression_text_base.h" #include "mongo/db/matcher/extensions_callback_real.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -143,7 +164,7 @@ class StageDebugCmd : public BasicCommand { const NamespaceString nss( NamespaceStringUtil::parseNamespaceFromRequest(dbName, collElt.String())); uassert(ErrorCodes::InvalidNamespace, - str::stream() << nss.toString() << " is not a valid namespace", + str::stream() << nss.toStringForErrorMsg() << " is not a valid namespace", nss.isValid()); auto expCtx = make_intrusive( @@ -153,13 +174,15 @@ class StageDebugCmd : public BasicCommand { // TODO A write lock is currently taken here to accommodate stages that perform writes // (e.g. DeleteStage). This should be changed to use a read lock for read-only // execution trees. - AutoGetCollection autoColl(opCtx, nss, MODE_IX); + const auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); // Make sure the collection is valid. - const auto& collection = autoColl.getCollection(); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Couldn't find collection " << nss.ns(), - collection); + str::stream() << "Couldn't find collection " << nss.toStringForErrorMsg(), + collection.exists()); // Pull out the plan BSONElement planElt = argObj["plan"]; @@ -184,7 +207,7 @@ class StageDebugCmd : public BasicCommand { plan_executor_factory::make(expCtx, std::move(ws), std::move(rootFetch), - &collection, + collection, PlanYieldPolicy::YieldPolicy::YIELD_AUTO, false /* whether owned BSON must be returned */); fassert(28536, statusWithPlanExecutor.getStatus()); @@ -203,11 +226,12 @@ class StageDebugCmd : public BasicCommand { } PlanStage* parseQuery(const boost::intrusive_ptr& expCtx, - const CollectionPtr& collection, + CollectionAcquisition collection, BSONObj obj, WorkingSet* workingSet, const NamespaceString& nss, std::vector>* exprs) { + const auto& collectionPtr = collection.getCollectionPtr(); OperationContext* opCtx = expCtx->opCtx; BSONElement firstElt = obj.firstElement(); @@ -234,7 +258,7 @@ class StageDebugCmd : public BasicCommand { auto statusWithMatcher = MatchExpressionParser::parse(argObj, expCtx, - ExtensionsCallbackReal(opCtx, &collection->ns()), + ExtensionsCallbackReal(opCtx, &collection.nss()), MatchExpressionParser::kAllowAllSpecialFeatures); if (!statusWithMatcher.isOK()) { return nullptr; @@ -242,7 +266,7 @@ class StageDebugCmd : public BasicCommand { std::unique_ptr me = std::move(statusWithMatcher.getValue()); // exprs is what will wind up deleting this. matcher = me.get(); - verify(nullptr != matcher); + MONGO_verify(nullptr != matcher); exprs->push_back(std::move(me)); } else if (argsTag == e.fieldName()) { nodeArgs = argObj; @@ -262,7 +286,7 @@ class StageDebugCmd : public BasicCommand { // This'll throw if it's not an obj but that's OK. BSONObj keyPatternObj = keyPatternElement.Obj(); std::vector indexes; - collection->getIndexCatalog()->findIndexesByKeyPattern( + collectionPtr->getIndexCatalog()->findIndexesByKeyPattern( opCtx, keyPatternObj, IndexCatalog::InclusionPolicy::kReady, &indexes); uassert(16890, str::stream() << "Can't find index: " << keyPatternObj, @@ -279,18 +303,18 @@ class StageDebugCmd : public BasicCommand { str::stream() << "Index 'name' must be a string in: " << nodeArgs, nodeArgs["name"].type() == BSONType::String); StringData name = nodeArgs["name"].valueStringData(); - desc = collection->getIndexCatalog()->findIndexByName(opCtx, name); + desc = collectionPtr->getIndexCatalog()->findIndexByName(opCtx, name); uassert(40223, str::stream() << "Can't find index: " << name.toString(), desc); } - IndexScanParams params(opCtx, collection, desc); + IndexScanParams params(opCtx, collectionPtr, desc); params.bounds.isSimpleRange = true; params.bounds.startKey = BSONObj::stripFieldNames(nodeArgs["startKey"].Obj()); params.bounds.endKey = BSONObj::stripFieldNames(nodeArgs["endKey"].Obj()); params.bounds.boundInclusion = IndexBounds::makeBoundInclusionFromBoundBools( nodeArgs["startKeyInclusive"].Bool(), nodeArgs["endKeyInclusive"].Bool()); params.direction = nodeArgs["direction"].numberInt(); - params.shouldDedup = desc->getEntry()->isMultikey(opCtx, collection); + params.shouldDedup = desc->getEntry()->isMultikey(opCtx, collectionPtr); return new IndexScan(expCtx.get(), collection, params, workingSet, matcher); } else if ("andHash" == nodeName) { diff --git a/src/mongo/db/exec/subplan.cpp b/src/mongo/db/exec/subplan.cpp index eab7e0abe494a..665599de3c72f 100644 --- a/src/mongo/db/exec/subplan.cpp +++ b/src/mongo/db/exec/subplan.cpp @@ -27,27 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/subplan.h" - +#include +#include #include +#include +#include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/exec/multi_plan.h" #include "mongo/db/exec/plan_cache_util.h" -#include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/matcher/extensions_callback_real.h" +#include "mongo/db/exec/subplan.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/collection_query_info.h" -#include "mongo/db/query/get_executor.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/plan_cache.h" #include "mongo/db/query/plan_cache_key_factory.h" #include "mongo/db/query/plan_executor.h" -#include "mongo/db/query/planner_access.h" -#include "mongo/db/query/planner_analysis.h" -#include "mongo/db/query/query_planner_common.h" +#include "mongo/db/query/query_planner.h" #include "mongo/db/query/stage_builder_util.h" +#include "mongo/util/assert_util.h" #include "mongo/util/scopeguard.h" -#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h" +#include "mongo/util/str.h" namespace mongo { @@ -58,7 +66,7 @@ using std::vector; const char* SubplanStage::kStageType = "SUBPLAN"; SubplanStage::SubplanStage(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, WorkingSet* ws, const QueryPlannerParams& params, CanonicalQuery* cq) @@ -114,7 +122,7 @@ Status SubplanStage::choosePlanWholeQuery(PlanYieldPolicy* yieldPolicy) { auto statusWithMultiPlanSolns = QueryPlanner::plan(*_query, _plannerParams); if (!statusWithMultiPlanSolns.isOK()) { return statusWithMultiPlanSolns.getStatus().withContext( - str::stream() << "error processing query: " << _query->toString() + str::stream() << "error processing query: " << _query->toStringForErrorMsg() << " planner returned error"); } auto solutions = std::move(statusWithMultiPlanSolns.getValue()); @@ -188,7 +196,7 @@ Status SubplanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { // Plan each branch of the $or. auto subplanningStatus = QueryPlanner::planSubqueries( - expCtx()->opCtx, getSolutionCachedData, collection(), *_query, _plannerParams); + expCtx()->opCtx, getSolutionCachedData, collectionPtr(), *_query, _plannerParams); if (!subplanningStatus.isOK()) { return choosePlanWholeQuery(yieldPolicy); } @@ -237,7 +245,7 @@ Status SubplanStage::pickBestPlan(PlanYieldPolicy* yieldPolicy) { if (!multiPlanStage->bestPlanChosen()) { str::stream ss; - ss << "Failed to pick best plan for subchild " << cq->toString(); + ss << "Failed to pick best plan for subchild " << cq->toStringForErrorMsg(); return Status(ErrorCodes::NoQueryExecutionPlans, ss); } return multiPlanStage->bestSolution(); diff --git a/src/mongo/db/exec/subplan.h b/src/mongo/db/exec/subplan.h index f1065c32e8d2f..0b5f4553fee47 100644 --- a/src/mongo/db/exec/subplan.h +++ b/src/mongo/db/exec/subplan.h @@ -29,20 +29,31 @@ #pragma once +#include +#include #include #include #include #include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_all_indices_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/classic_plan_cache.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner.h" #include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util_core.h" namespace mongo { @@ -70,7 +81,7 @@ class OperationContext; class SubplanStage final : public RequiresAllIndicesStage { public: SubplanStage(ExpressionContext* expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, WorkingSet* ws, const QueryPlannerParams& params, CanonicalQuery* cq); diff --git a/src/mongo/db/exec/text_match.cpp b/src/mongo/db/exec/text_match.cpp index 67468cee8433f..b36c1c413d621 100644 --- a/src/mongo/db/exec/text_match.cpp +++ b/src/mongo/db/exec/text_match.cpp @@ -30,13 +30,13 @@ #include "mongo/db/exec/text_match.h" #include +#include #include -#include "mongo/db/exec/scoped_timer.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/working_set.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/jsobj.h" -#include "mongo/util/str.h" +#include "mongo/db/storage/snapshot.h" namespace mongo { diff --git a/src/mongo/db/exec/text_match.h b/src/mongo/db/exec/text_match.h index d9018198ad8d0..212ab1a984d72 100644 --- a/src/mongo/db/exec/text_match.h +++ b/src/mongo/db/exec/text_match.h @@ -30,13 +30,18 @@ #pragma once #include +#include +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/fts/fts_matcher.h" #include "mongo/db/fts/fts_query_impl.h" #include "mongo/db/fts/fts_spec.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" namespace mongo { diff --git a/src/mongo/db/exec/text_or.cpp b/src/mongo/db/exec/text_or.cpp index 2746f97f0c5d0..8b1ce7792bb4e 100644 --- a/src/mongo/db/exec/text_or.cpp +++ b/src/mongo/db/exec/text_or.cpp @@ -29,16 +29,22 @@ #include "mongo/db/exec/text_or.h" -#include +#include #include +#include +#include #include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/filter.h" -#include "mongo/db/exec/index_scan.h" -#include "mongo/db/exec/scoped_timer.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/exec/working_set_common.h" -#include "mongo/db/jsobj.h" #include "mongo/db/query/plan_executor_impl.h" #include "mongo/db/record_id.h" #include "mongo/util/assert_util.h" @@ -57,7 +63,7 @@ TextOrStage::TextOrStage(ExpressionContext* expCtx, size_t keyPrefixSize, WorkingSet* ws, const MatchExpression* filter, - const CollectionPtr& collection) + VariantCollectionPtrOrAcquisition collection) : RequiresCollectionStage(kStageType, expCtx, collection), _keyPrefixSize(keyPrefixSize), _ws(ws), @@ -154,9 +160,8 @@ PlanStage::StageState TextOrStage::initStage(WorkingSetID* out) { return handlePlanStageYield( expCtx(), "TextOrStage initStage", - collection()->ns().ns(), [&] { - _recordCursor = collection()->getCursor(opCtx()); + _recordCursor = collectionPtr()->getCursor(opCtx()); _internalState = State::kReadingTerms; return PlanStage::NEED_TIME; }, @@ -262,14 +267,13 @@ PlanStage::StageState TextOrStage::addTerm(WorkingSetID wsid, WorkingSetID* out) const auto ret = handlePlanStageYield( expCtx(), "TextOrStage addTerm", - collection()->ns().ns(), [&] { if (!WorkingSetCommon::fetch(opCtx(), _ws, wsid, _recordCursor.get(), - collection(), - collection()->ns())) { + collectionPtr(), + collectionPtr()->ns())) { _ws->free(wsid); textRecordData->score = -1; return NEED_TIME; diff --git a/src/mongo/db/exec/text_or.h b/src/mongo/db/exec/text_or.h index 87936b531aac6..f8a116936cae3 100644 --- a/src/mongo/db/exec/text_or.h +++ b/src/mongo/db/exec/text_or.h @@ -29,12 +29,21 @@ #pragma once +#include #include +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_collection_stage.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/fts/fts_spec.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/record_id.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/stdx/unordered_map.h" namespace mongo { @@ -71,7 +80,7 @@ class TextOrStage final : public RequiresCollectionStage { size_t keyPrefixSize, WorkingSet* ws, const MatchExpression* filter, - const CollectionPtr& collection); + VariantCollectionPtrOrAcquisition collection); void addChild(std::unique_ptr child); diff --git a/src/mongo/db/exec/timeseries/bucket_spec.cpp b/src/mongo/db/exec/timeseries/bucket_spec.cpp new file mode 100644 index 0000000000000..a8ba1289249e1 --- /dev/null +++ b/src/mongo/db/exec/timeseries/bucket_spec.cpp @@ -0,0 +1,1103 @@ +/** + * Copyright (C) 2020-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/exec/timeseries/bucket_spec.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_algo.h" +#include "mongo/db/matcher/expression_always_boolean.h" +#include "mongo/db/matcher/expression_expr.h" +#include "mongo/db/matcher/expression_geo.h" +#include "mongo/db/matcher/expression_internal_bucket_geo_within.h" +#include "mongo/db/matcher/expression_internal_expr_comparison.h" +#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/matcher/rewrite_expr.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/query/util/make_data_structure.h" +#include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery + +namespace mongo { + +using IneligiblePredicatePolicy = BucketSpec::IneligiblePredicatePolicy; + +bool BucketSpec::fieldIsComputed(StringData field) const { + return std::any_of( + _computedMetaProjFields.begin(), _computedMetaProjFields.end(), [&](auto& s) { + return s == field || expression::isPathPrefixOf(field, s) || + expression::isPathPrefixOf(s, field); + }); +} + +namespace { +constexpr long long max32BitEpochMillis = + static_cast(std::numeric_limits::max()) * 1000; + +/** + * Creates an ObjectId initialized with an appropriate timestamp corresponding to 'rhs' and + * returns it as a Value. + */ +template +auto constructObjectIdValue(const BSONElement& rhs, int bucketMaxSpanSeconds) { + // Indicates whether to initialize an ObjectId with a max or min value for the non-date bytes. + enum class OIDInit : bool { max, min }; + // Make an ObjectId cooresponding to a date value. As a conversion from date to ObjectId will + // truncate milliseconds, we round up when needed to prevent missing results. + auto makeDateOID = [](auto&& date, auto&& maxOrMin, bool roundMillisUpToSecond = false) { + if (roundMillisUpToSecond && (date.toMillisSinceEpoch() % 1000 != 0)) { + date += Seconds{1}; + } + + auto oid = OID{}; + oid.init(date, maxOrMin == OIDInit::max); + return oid; + }; + // Make an ObjectId corresponding to a date value adjusted by the max bucket value for the + // time series view that this query operates on. This predicate can be used in a comparison + // to gauge a max value for a given bucket, rather than a min value. + auto makeMaxAdjustedDateOID = [&](auto&& date, auto&& maxOrMin) { + // Ensure we don't underflow. + if (date.toDurationSinceEpoch() >= Seconds{bucketMaxSpanSeconds}) + // Subtract max bucket range. + return makeDateOID(date - Seconds{bucketMaxSpanSeconds}, maxOrMin); + else + // Since we're out of range, just make a predicate that is true for all dates. + // We'll never use an OID for a date < 0 due to OID range limitations, so we set the + // minimum date to 0. + return makeDateOID(Date_t::fromMillisSinceEpoch(0LL), OIDInit::min); + }; + + // Because the OID timestamp is only 4 bytes, we can't convert larger dates + invariant(rhs.date().toMillisSinceEpoch() >= 0LL); + invariant(rhs.date().toMillisSinceEpoch() <= max32BitEpochMillis); + + // An ObjectId consists of a 4-byte timestamp, as well as a unique value and a counter, thus + // two ObjectIds initialized with the same date will have different values. To ensure that we + // do not incorrectly include or exclude any buckets, depending on the operator we will + // construct either the largest or the smallest ObjectId possible with the corresponding date. + // If the query operand is not of type Date, the original query will not match on any documents + // because documents in a time-series collection must have a timeField of type Date. We will + // make this case faster by keeping the ObjectId as the lowest or highest possible value so as + // to eliminate all buckets. + if constexpr (std::is_same_v) { + return Value{makeDateOID(rhs.date(), OIDInit::min, true /*roundMillisUpToSecond*/)}; + } else if constexpr (std::is_same_v) { + return Value{makeDateOID(rhs.date(), OIDInit::max, true /*roundMillisUpToSecond*/)}; + } else if constexpr (std::is_same_v) { + return Value{makeMaxAdjustedDateOID(rhs.date(), OIDInit::max)}; + } else if constexpr (std::is_same_v) { + return Value{makeMaxAdjustedDateOID(rhs.date(), OIDInit::min)}; + } + MONGO_UNREACHABLE_TASSERT(5756800); +} + +/** + * Makes a disjunction of the given predicates. + * + * - The result is non-null; it may be an OrMatchExpression with zero children. + * - Any trivially-false arguments are omitted. + * - If only one argument is nontrivial, returns that argument rather than adding an extra + * OrMatchExpression around it. + */ +std::unique_ptr makeOr(std::vector> predicates) { + std::vector> nontrivial; + for (auto&& p : predicates) { + if (!p->isTriviallyFalse()) + nontrivial.push_back(std::move(p)); + } + + if (nontrivial.size() == 1) + return std::move(nontrivial[0]); + + return std::make_unique(std::move(nontrivial)); +} + +BucketSpec::BucketPredicate handleIneligible(IneligiblePredicatePolicy policy, + const MatchExpression* matchExpr, + StringData message) { + switch (policy) { + case IneligiblePredicatePolicy::kError: + uasserted( + 5916301, + "Error translating non-metadata time-series predicate to operate on buckets: " + + message + ": " + matchExpr->serialize().toString()); + case IneligiblePredicatePolicy::kIgnore: + return {}; + } + MONGO_UNREACHABLE_TASSERT(5916307); +} + +/* + * Creates a predicate that ensures that if there exists a subpath of matchExprPath such that the + * type of `control.min.subpath` is not the same as `control.max.subpath` then we will match that + * document. + * + * However, if the buckets collection has no mixed-schema data then this type-equality predicate is + * unnecessary. In that case this function returns an empty, always-true predicate. + */ +std::unique_ptr createTypeEqualityPredicate( + boost::intrusive_ptr pExpCtx, + const StringData& matchExprPath, + bool assumeNoMixedSchemaData) { + + std::vector> typeEqualityPredicates; + + if (assumeNoMixedSchemaData) + return makeOr(std::move(typeEqualityPredicates)); + + FieldPath matchExprField(matchExprPath); + using namespace timeseries; + + // Assume that we're generating a predicate on "a.b" + for (size_t i = 0; i < matchExprField.getPathLength(); i++) { + auto minPath = std::string{kControlMinFieldNamePrefix} + matchExprField.getSubpath(i); + auto maxPath = std::string{kControlMaxFieldNamePrefix} + matchExprField.getSubpath(i); + + // This whole block adds + // {$expr: {$ne: [{$type: "$control.min.a"}, {$type: "$control.max.a"}]}} + // in order to ensure that the type of `control.min.a` and `control.max.a` are the same. + + // This produces {$expr: ... } + typeEqualityPredicates.push_back(std::make_unique( + // This produces {$ne: ... } + make_intrusive( + pExpCtx.get(), + ExpressionCompare::CmpOp::NE, + // This produces [...] + makeVector>( + // This produces {$type: ... } + make_intrusive( + pExpCtx.get(), + // This produces [...] + makeVector>( + // This produces "$control.min.a" + ExpressionFieldPath::createPathFromString( + pExpCtx.get(), minPath, pExpCtx->variablesParseState))), + // This produces {$type: ... } + make_intrusive( + pExpCtx.get(), + // This produces [...] + makeVector>( + // This produces "$control.max.a" + ExpressionFieldPath::createPathFromString( + pExpCtx.get(), maxPath, pExpCtx->variablesParseState))))), + pExpCtx)); + } + return makeOr(std::move(typeEqualityPredicates)); +} + +boost::optional checkComparisonPredicateErrors( + const MatchExpression* matchExpr, + const StringData matchExprPath, + const BSONElement& matchExprData, + const BucketSpec& bucketSpec, + ExpressionContext::CollationMatchesDefault collationMatchesDefault) { + using namespace timeseries; + // The control field's min and max are chosen using a field-order insensitive comparator, while + // MatchExpressions use a comparator that treats field-order as significant. Because of this we + // will not perform this optimization on queries with operands of compound types. + if (matchExprData.type() == BSONType::Object || matchExprData.type() == BSONType::Array) + return "operand can't be an object or array"_sd; + + // MatchExpressions have special comparison semantics regarding null, in that {$eq: null} will + // match all documents where the field is either null or missing. Because this is different + // from both the comparison semantics that InternalExprComparison expressions and the control's + // min and max fields use, we will not perform this optimization on queries with null operands. + if (matchExprData.type() == BSONType::jstNULL) + return "can't handle {$eq: null}"_sd; + + // The control field's min and max are chosen based on the collation of the collection. If the + // query's collation does not match the collection's collation and the query operand is a + // string or compound type (skipped above) we will not perform this optimization. + if (collationMatchesDefault == ExpressionContext::CollationMatchesDefault::kNo && + matchExprData.type() == BSONType::String) { + return "can't handle string comparison with a non-default collation"_sd; + } + + // This function only handles time and measurement predicates--not metadata. + if (bucketSpec.metaField() && + (matchExprPath == bucketSpec.metaField().value() || + expression::isPathPrefixOf(bucketSpec.metaField().value(), matchExprPath))) { + tasserted( + 6707200, + str::stream() << "createComparisonPredicate() does not handle metadata predicates: " + << matchExpr); + } + + // We must avoid mapping predicates on fields computed via $addFields or a computed $project. + if (bucketSpec.fieldIsComputed(matchExprPath.toString())) { + return "can't handle a computed field"_sd; + } + + // We must avoid mapping predicates on fields removed by $project. + if (!determineIncludeField(matchExprPath, bucketSpec.behavior(), bucketSpec.fieldSet())) { + return "can't handle a field removed by projection"_sd; + } + + const auto isTimeField = (matchExprPath == bucketSpec.timeField()); + if (isTimeField && matchExprData.type() != BSONType::Date) { + // Users are not allowed to insert non-date measurements into time field. So this query + // would not match anything. We do not need to optimize for this case. + return "This predicate will never be true, because the time field always contains a Date"_sd; + } + + return boost::none; +} + +std::unique_ptr createComparisonPredicate( + const ComparisonMatchExpressionBase* matchExpr, + const BucketSpec& bucketSpec, + int bucketMaxSpanSeconds, + boost::intrusive_ptr pExpCtx, + bool haveComputedMetaField, + bool includeMetaField, + bool assumeNoMixedSchemaData, + IneligiblePredicatePolicy policy) { + using namespace timeseries; + const auto matchExprPath = matchExpr->path(); + const auto matchExprData = matchExpr->getData(); + + const auto error = checkComparisonPredicateErrors( + matchExpr, matchExprPath, matchExprData, bucketSpec, pExpCtx->collationMatchesDefault); + if (error) { + return handleIneligible(policy, matchExpr, *error).loosePredicate; + } + + const auto isTimeField = (matchExprPath == bucketSpec.timeField()); + auto minPath = std::string{kControlMinFieldNamePrefix} + matchExprPath; + const StringData minPathStringData(minPath); + auto maxPath = std::string{kControlMaxFieldNamePrefix} + matchExprPath; + const StringData maxPathStringData(maxPath); + + BSONObj minTime; + BSONObj maxTime; + bool dateIsExtended = false; + if (isTimeField) { + auto timeField = matchExprData.Date(); + minTime = BSON("" << timeField - Seconds(bucketMaxSpanSeconds)); + maxTime = BSON("" << timeField + Seconds(bucketMaxSpanSeconds)); + + // The date is in the "extended" range if it doesn't fit into the bottom + // 32 bits. + long long timestamp = timeField.toMillisSinceEpoch(); + dateIsExtended = timestamp < 0LL || timestamp > max32BitEpochMillis; + } + + switch (matchExpr->matchType()) { + case MatchExpression::EQ: + case MatchExpression::INTERNAL_EXPR_EQ: + // For $eq, make both a $lte against 'control.min' and a $gte predicate against + // 'control.max'. + // + // If the comparison is against the 'time' field and we haven't stored a time outside of + // the 32 bit range, include a predicate against the _id field which is converted to + // the maximum for the corresponding range of ObjectIds and + // is adjusted by the max range for a bucket to approximate the max bucket value given + // the min. Also include a predicate against the _id field which is converted to the + // minimum for the range of ObjectIds corresponding to the given date. In + // addition, we include a {'control.min' : {$gte: 'time - bucketMaxSpanSeconds'}} and + // a {'control.max' : {$lte: 'time + bucketMaxSpanSeconds'}} predicate which will be + // helpful in reducing bounds for index scans on 'time' field and routing on mongos. + // + // The same procedure applies to aggregation expressions of the form + // {$expr: {$eq: [...]}} that can be rewritten to use $_internalExprEq. + if (!isTimeField) { + return makeOr(makeVector>( + makePredicate(MatchExprPredicate( + minPathStringData, matchExprData), + MatchExprPredicate( + maxPathStringData, matchExprData)), + createTypeEqualityPredicate(pExpCtx, matchExprPath, assumeNoMixedSchemaData))); + } else if (bucketSpec.usesExtendedRange()) { + return makePredicate( + MatchExprPredicate(minPath, matchExprData), + MatchExprPredicate(minPath, + minTime.firstElement()), + MatchExprPredicate(maxPath, matchExprData), + MatchExprPredicate(maxPath, + maxTime.firstElement())); + } else if (dateIsExtended) { + // Since by this point we know that no time value has been inserted which is + // outside the epoch range, we know that no document can meet this criteria + return std::make_unique(); + } else { + return makePredicate( + MatchExprPredicate(minPathStringData, + matchExprData), + MatchExprPredicate(minPathStringData, + minTime.firstElement()), + MatchExprPredicate(maxPathStringData, + matchExprData), + MatchExprPredicate(maxPathStringData, + maxTime.firstElement()), + MatchExprPredicate( + kBucketIdFieldName, + constructObjectIdValue(matchExprData, + bucketMaxSpanSeconds)), + MatchExprPredicate( + kBucketIdFieldName, + constructObjectIdValue(matchExprData, + bucketMaxSpanSeconds))); + } + MONGO_UNREACHABLE_TASSERT(6646903); + + case MatchExpression::GT: + case MatchExpression::INTERNAL_EXPR_GT: + // For $gt, make a $gt predicate against 'control.max'. In addition, if the comparison + // is against the 'time' field, and the collection doesn't contain times outside the + // 32 bit range, include a predicate against the _id field which is converted to the + // maximum for the corresponding range of ObjectIds and is adjusted by the max range + // for a bucket to approximate the max bucket value given the min. + // + // In addition, we include a {'control.min' : {$gt: 'time - bucketMaxSpanSeconds'}} + // predicate which will be helpful in reducing bounds for index scans on 'time' field + // and routing on mongos. + // + // The same procedure applies to aggregation expressions of the form + // {$expr: {$gt: [...]}} that can be rewritten to use $_internalExprGt. + if (!isTimeField) { + return makeOr(makeVector>( + std::make_unique(maxPathStringData, + matchExprData), + createTypeEqualityPredicate(pExpCtx, matchExprPath, assumeNoMixedSchemaData))); + } else if (bucketSpec.usesExtendedRange()) { + return makePredicate( + MatchExprPredicate(maxPath, matchExprData), + MatchExprPredicate(minPath, + minTime.firstElement())); + } else if (matchExprData.Date().toMillisSinceEpoch() < 0LL) { + // Since by this point we know that no time value has been inserted < 0, + // every document must meet this criteria + return std::make_unique(); + } else if (matchExprData.Date().toMillisSinceEpoch() > max32BitEpochMillis) { + // Since by this point we know that no time value has been inserted > + // max32BitEpochMillis, we know that no document can meet this criteria + return std::make_unique(); + } else { + return makePredicate(MatchExprPredicate( + maxPathStringData, matchExprData), + MatchExprPredicate( + minPathStringData, minTime.firstElement()), + MatchExprPredicate( + kBucketIdFieldName, + constructObjectIdValue( + matchExprData, bucketMaxSpanSeconds))); + } + MONGO_UNREACHABLE_TASSERT(6646904); + + case MatchExpression::GTE: + case MatchExpression::INTERNAL_EXPR_GTE: + // For $gte, make a $gte predicate against 'control.max'. In addition, if the comparison + // is against the 'time' field, and the collection doesn't contain times outside the + // 32 bit range, include a predicate against the _id field which is + // converted to the minimum for the corresponding range of ObjectIds and is adjusted + // by the max range for a bucket to approximate the max bucket value given the min. In + // addition, we include a {'control.min' : {$gte: 'time - bucketMaxSpanSeconds'}} + // predicate which will be helpful in reducing bounds for index scans on 'time' field + // and routing on mongos. + // + // The same procedure applies to aggregation expressions of the form + // {$expr: {$gte: [...]}} that can be rewritten to use $_internalExprGte. + if (!isTimeField) { + return makeOr(makeVector>( + std::make_unique(maxPathStringData, + matchExprData), + createTypeEqualityPredicate(pExpCtx, matchExprPath, assumeNoMixedSchemaData))); + } else if (bucketSpec.usesExtendedRange()) { + return makePredicate( + MatchExprPredicate(maxPath, matchExprData), + MatchExprPredicate(minPath, + minTime.firstElement())); + } else if (matchExprData.Date().toMillisSinceEpoch() < 0LL) { + // Since by this point we know that no time value has been inserted < 0, + // every document must meet this criteria + return std::make_unique(); + } else if (matchExprData.Date().toMillisSinceEpoch() > max32BitEpochMillis) { + // Since by this point we know that no time value has been inserted > 0xffffffff, + // we know that no value can meet this criteria + return std::make_unique(); + } else { + return makePredicate(MatchExprPredicate( + maxPathStringData, matchExprData), + MatchExprPredicate( + minPathStringData, minTime.firstElement()), + MatchExprPredicate( + kBucketIdFieldName, + constructObjectIdValue( + matchExprData, bucketMaxSpanSeconds))); + } + MONGO_UNREACHABLE_TASSERT(6646905); + + case MatchExpression::LT: + case MatchExpression::INTERNAL_EXPR_LT: + // For $lt, make a $lt predicate against 'control.min'. In addition, if the comparison + // is against the 'time' field, include a predicate against the _id field which is + // converted to the minimum for the corresponding range of ObjectIds, unless the + // collection contain extended range dates which won't fit int the 32 bits allocated + // for _id. + // + // In addition, we include a {'control.max' : {$lt: 'time + bucketMaxSpanSeconds'}} + // predicate which will be helpful in reducing bounds for index scans on 'time' field + // and routing on mongos. + // + // The same procedure applies to aggregation expressions of the form + // {$expr: {$lt: [...]}} that can be rewritten to use $_internalExprLt. + if (!isTimeField) { + return makeOr(makeVector>( + std::make_unique(minPathStringData, + matchExprData), + createTypeEqualityPredicate(pExpCtx, matchExprPath, assumeNoMixedSchemaData))); + } else if (bucketSpec.usesExtendedRange()) { + return makePredicate( + MatchExprPredicate(minPath, matchExprData), + MatchExprPredicate(maxPath, + maxTime.firstElement())); + } else if (matchExprData.Date().toMillisSinceEpoch() < 0LL) { + // Since by this point we know that no time value has been inserted < 0, + // we know that no document can meet this criteria + return std::make_unique(); + } else if (matchExprData.Date().toMillisSinceEpoch() > max32BitEpochMillis) { + // Since by this point we know that no time value has been inserted > 0xffffffff + // every time value must be less than this value + return std::make_unique(); + } else { + return makePredicate(MatchExprPredicate( + minPathStringData, matchExprData), + MatchExprPredicate( + maxPathStringData, maxTime.firstElement()), + MatchExprPredicate( + kBucketIdFieldName, + constructObjectIdValue( + matchExprData, bucketMaxSpanSeconds))); + } + MONGO_UNREACHABLE_TASSERT(6646906); + + case MatchExpression::LTE: + case MatchExpression::INTERNAL_EXPR_LTE: + // For $lte, make a $lte predicate against 'control.min'. In addition, if the comparison + // is against the 'time' field, and the collection doesn't contain times outside the + // 32 bit range, include a predicate against the _id field which is + // converted to the maximum for the corresponding range of ObjectIds. In + // addition, we include a {'control.max' : {$lte: 'time + bucketMaxSpanSeconds'}} + // predicate which will be helpful in reducing bounds for index scans on 'time' field + // and routing on mongos. + // + // The same procedure applies to aggregation expressions of the form + // {$expr: {$lte: [...]}} that can be rewritten to use $_internalExprLte. + if (!isTimeField) { + return makeOr(makeVector>( + std::make_unique(minPathStringData, + matchExprData), + createTypeEqualityPredicate(pExpCtx, matchExprPath, assumeNoMixedSchemaData))); + } else if (bucketSpec.usesExtendedRange()) { + return makePredicate( + MatchExprPredicate(minPath, matchExprData), + MatchExprPredicate(maxPath, + maxTime.firstElement())); + } else if (matchExprData.Date().toMillisSinceEpoch() < 0LL) { + // Since by this point we know that no time value has been inserted < 0, + // we know that no document can meet this criteria + return std::make_unique(); + } else if (matchExprData.Date().toMillisSinceEpoch() > max32BitEpochMillis) { + // Since by this point we know that no time value has been inserted > 0xffffffff + // every document must be less than this value + return std::make_unique(); + } else { + return makePredicate(MatchExprPredicate( + minPathStringData, matchExprData), + MatchExprPredicate( + maxPathStringData, maxTime.firstElement()), + MatchExprPredicate( + kBucketIdFieldName, + constructObjectIdValue( + matchExprData, bucketMaxSpanSeconds))); + } + MONGO_UNREACHABLE_TASSERT(6646907); + + default: + MONGO_UNREACHABLE_TASSERT(5348302); + } + + MONGO_UNREACHABLE_TASSERT(5348303); +} + +std::unique_ptr createTightComparisonPredicate( + const ComparisonMatchExpressionBase* matchExpr, + const BucketSpec& bucketSpec, + ExpressionContext::CollationMatchesDefault collationMatchesDefault) { + using namespace timeseries; + const auto matchExprPath = matchExpr->path(); + const auto matchExprData = matchExpr->getData(); + + const auto error = checkComparisonPredicateErrors( + matchExpr, matchExprPath, matchExprData, bucketSpec, collationMatchesDefault); + if (error) { + return handleIneligible(BucketSpec::IneligiblePredicatePolicy::kIgnore, matchExpr, *error) + .loosePredicate; + } + + // We have to disable the tight predicate for the measurement field. There might be missing + // values in the measurements and the control fields ignore them on insertion. So we cannot use + // bucket min and max to determine the property of all events in the bucket. For measurement + // fields, there's a further problem that if the control field is an array, we cannot generate + // the tight predicate because the predicate will be implicitly mapped over the array elements. + if (matchExprPath != bucketSpec.timeField()) { + return handleIneligible(BucketSpec::IneligiblePredicatePolicy::kIgnore, + matchExpr, + "can't create tight predicate on non-time field") + .tightPredicate; + } + + auto minPath = std::string{kControlMinFieldNamePrefix} + matchExprPath; + const StringData minPathStringData(minPath); + auto maxPath = std::string{kControlMaxFieldNamePrefix} + matchExprPath; + const StringData maxPathStringData(maxPath); + + switch (matchExpr->matchType()) { + // All events satisfy $eq if bucket min and max both satisfy $eq. + case MatchExpression::EQ: + return makePredicate( + MatchExprPredicate(minPathStringData, matchExprData), + MatchExprPredicate(maxPathStringData, matchExprData)); + case MatchExpression::INTERNAL_EXPR_EQ: + return makePredicate( + MatchExprPredicate(minPathStringData, matchExprData), + MatchExprPredicate(maxPathStringData, + matchExprData)); + + // All events satisfy $gt if bucket min satisfy $gt. + case MatchExpression::GT: + return std::make_unique(minPathStringData, matchExprData); + case MatchExpression::INTERNAL_EXPR_GT: + return std::make_unique(minPathStringData, + matchExprData); + + // All events satisfy $gte if bucket min satisfy $gte. + case MatchExpression::GTE: + return std::make_unique(minPathStringData, matchExprData); + case MatchExpression::INTERNAL_EXPR_GTE: + return std::make_unique(minPathStringData, + matchExprData); + + // All events satisfy $lt if bucket max satisfy $lt. + case MatchExpression::LT: + return std::make_unique(maxPathStringData, matchExprData); + case MatchExpression::INTERNAL_EXPR_LT: + return std::make_unique(maxPathStringData, + matchExprData); + + // All events satisfy $lte if bucket max satisfy $lte. + case MatchExpression::LTE: + return std::make_unique(maxPathStringData, matchExprData); + case MatchExpression::INTERNAL_EXPR_LTE: + return std::make_unique(maxPathStringData, + matchExprData); + + default: + MONGO_UNREACHABLE_TASSERT(7026901); + } +} + +std::unique_ptr createTightExprComparisonPredicate( + const ExprMatchExpression* matchExpr, + const BucketSpec& bucketSpec, + boost::intrusive_ptr pExpCtx) { + using namespace timeseries; + auto rewriteMatchExpr = RewriteExpr::rewrite(matchExpr->getExpression(), pExpCtx->getCollator()) + .releaseMatchExpression(); + if (rewriteMatchExpr && + ComparisonMatchExpressionBase::isInternalExprComparison(rewriteMatchExpr->matchType())) { + auto compareMatchExpr = + checked_cast(rewriteMatchExpr.get()); + return createTightComparisonPredicate( + compareMatchExpr, bucketSpec, pExpCtx->collationMatchesDefault); + } + + return handleIneligible(BucketSpec::IneligiblePredicatePolicy::kIgnore, + matchExpr, + "can't handle non-comparison $expr match expression") + .tightPredicate; +} + +} // namespace + +BucketSpec::BucketPredicate BucketSpec::createPredicatesOnBucketLevelField( + const MatchExpression* matchExpr, + const BucketSpec& bucketSpec, + int bucketMaxSpanSeconds, + const boost::intrusive_ptr& pExpCtx, + bool haveComputedMetaField, + bool includeMetaField, + bool assumeNoMixedSchemaData, + IneligiblePredicatePolicy policy) { + + tassert(5916304, "BucketSpec::createPredicatesOnBucketLevelField nullptr", matchExpr); + + // If we have a leaf predicate on a meta field, we can map it to the bucket's meta field. + // This includes comparisons such as $eq and $lte, as well as other non-comparison predicates + // such as $exists, or $mod. Unrenamable expressions can't be split into a whole bucket level + // filter, when we should return nullptr. + // + // Metadata predicates are partially handled earlier, by splitting the match expression into a + // metadata-only part, and measurement/time-only part. However, splitting a $match into two + // sequential $matches only works when splitting a conjunction. A predicate like + // {$or: [ {a: 5}, {meta.b: 5} ]} can't be split, and can't be metadata-only, so we have to + // handle it here. + const auto matchExprPath = matchExpr->path(); + if (!matchExprPath.empty() && bucketSpec.metaField() && + (matchExprPath == bucketSpec.metaField().value() || + expression::isPathPrefixOf(bucketSpec.metaField().value(), matchExprPath))) { + + if (haveComputedMetaField) + return handleIneligible(policy, matchExpr, "can't handle a computed meta field"); + + if (!includeMetaField) + return handleIneligible(policy, matchExpr, "cannot handle an excluded meta field"); + + if (auto looseResult = expression::copyExpressionAndApplyRenames( + matchExpr, + {{bucketSpec.metaField().value(), timeseries::kBucketMetaFieldName.toString()}}); + looseResult) { + auto tightResult = looseResult->clone(); + return {std::move(looseResult), std::move(tightResult)}; + } else { + return {nullptr, nullptr}; + } + } + + if (matchExpr->matchType() == MatchExpression::AND) { + auto nextAnd = static_cast(matchExpr); + auto looseAndExpression = std::make_unique(); + auto tightAndExpression = std::make_unique(); + for (size_t i = 0; i < nextAnd->numChildren(); i++) { + auto child = createPredicatesOnBucketLevelField(nextAnd->getChild(i), + bucketSpec, + bucketMaxSpanSeconds, + pExpCtx, + haveComputedMetaField, + includeMetaField, + assumeNoMixedSchemaData, + policy); + if (child.loosePredicate) { + looseAndExpression->add(std::move(child.loosePredicate)); + } + + if (tightAndExpression && child.tightPredicate) { + tightAndExpression->add(std::move(child.tightPredicate)); + } else { + // For tight expression, null means always false, we can short circuit here. + tightAndExpression = nullptr; + } + } + + // For a loose predicate, if we are unable to generate an expression we can just treat it as + // always true or an empty AND. This is because we are trying to generate a predicate that + // will match the superset of our actual results. + std::unique_ptr looseExpression = nullptr; + if (looseAndExpression->numChildren() == 1) { + looseExpression = looseAndExpression->releaseChild(0); + } else if (looseAndExpression->numChildren() > 1) { + looseExpression = std::move(looseAndExpression); + } + + // For a tight predicate, if we are unable to generate an expression we can just treat it as + // always false. This is because we are trying to generate a predicate that will match the + // subset of our actual results. + std::unique_ptr tightExpression = nullptr; + if (tightAndExpression && tightAndExpression->numChildren() == 1) { + tightExpression = tightAndExpression->releaseChild(0); + } else { + tightExpression = std::move(tightAndExpression); + } + + return {std::move(looseExpression), std::move(tightExpression)}; + } else if (matchExpr->matchType() == MatchExpression::OR) { + // Given {$or: [A, B]}, suppose A, B can be pushed down as A', B'. + // If an event matches {$or: [A, B]} then either: + // - it matches A, which means any bucket containing it matches A' + // - it matches B, which means any bucket containing it matches B' + // So {$or: [A', B']} will capture all the buckets we need to satisfy {$or: [A, B]}. + auto nextOr = static_cast(matchExpr); + auto looseOrExpression = std::make_unique(); + auto tightOrExpression = std::make_unique(); + + for (size_t i = 0; i < nextOr->numChildren(); i++) { + auto child = createPredicatesOnBucketLevelField(nextOr->getChild(i), + bucketSpec, + bucketMaxSpanSeconds, + pExpCtx, + haveComputedMetaField, + includeMetaField, + assumeNoMixedSchemaData, + policy); + if (looseOrExpression && child.loosePredicate) { + looseOrExpression->add(std::move(child.loosePredicate)); + } else { + // For loose expression, null means always true, we can short circuit here. + looseOrExpression = nullptr; + } + + // For tight predicate, we give a tighter bound so that all events in the bucket + // either all matches A or all matches B. + if (child.tightPredicate) { + tightOrExpression->add(std::move(child.tightPredicate)); + } + } + + // For a loose predicate, if we are unable to generate an expression we can just treat it as + // always true. This is because we are trying to generate a predicate that will match the + // superset of our actual results. + std::unique_ptr looseExpression = nullptr; + if (looseOrExpression && looseOrExpression->numChildren() == 1) { + looseExpression = looseOrExpression->releaseChild(0); + } else { + looseExpression = std::move(looseOrExpression); + } + + // For a tight predicate, if we are unable to generate an expression we can just treat it as + // always false or an empty OR. This is because we are trying to generate a predicate that + // will match the subset of our actual results. + std::unique_ptr tightExpression = nullptr; + if (tightOrExpression->numChildren() == 1) { + tightExpression = tightOrExpression->releaseChild(0); + } else if (tightOrExpression->numChildren() > 1) { + tightExpression = std::move(tightOrExpression); + } + + return {std::move(looseExpression), std::move(tightExpression)}; + } else if (ComparisonMatchExpression::isComparisonMatchExpression(matchExpr) || + ComparisonMatchExpressionBase::isInternalExprComparison(matchExpr->matchType())) { + return { + createComparisonPredicate(checked_cast(matchExpr), + bucketSpec, + bucketMaxSpanSeconds, + pExpCtx, + haveComputedMetaField, + includeMetaField, + assumeNoMixedSchemaData, + policy), + createTightComparisonPredicate( + checked_cast(matchExpr), + bucketSpec, + pExpCtx->collationMatchesDefault)}; + } else if (matchExpr->matchType() == MatchExpression::EXPRESSION) { + return { + // The loose predicate will be pushed before the unpacking which will be inspected by + // the + // query planner. Since the classic planner doesn't handle the $expr expression, we + // don't + // generate the loose predicate. + nullptr, + createTightExprComparisonPredicate( + checked_cast(matchExpr), bucketSpec, pExpCtx)}; + } else if (matchExpr->matchType() == MatchExpression::GEO) { + auto& geoExpr = static_cast(matchExpr)->getGeoExpression(); + if (geoExpr.getPred() == GeoExpression::WITHIN || + geoExpr.getPred() == GeoExpression::INTERSECT) { + return {std::make_unique( + geoExpr.getGeometryPtr(), geoExpr.getField()), + nullptr}; + } + } else if (matchExpr->matchType() == MatchExpression::EXISTS) { + if (assumeNoMixedSchemaData) { + // We know that every field that appears in an event will also appear in the min/max. + auto result = std::make_unique(); + result->add(std::make_unique(StringData( + std::string{timeseries::kControlMinFieldNamePrefix} + matchExpr->path()))); + result->add(std::make_unique(StringData( + std::string{timeseries::kControlMaxFieldNamePrefix} + matchExpr->path()))); + return {std::move(result), nullptr}; + } else { + // At time of writing, we only pass 'kError' when creating a partial index, and + // we know the collection will have no mixed-schema buckets by the time the index is + // done building. + tassert(5916305, + "Can't push down {$exists: true} when the collection may have mixed-schema " + "buckets.", + policy != IneligiblePredicatePolicy::kError); + return {}; + } + } else if (matchExpr->matchType() == MatchExpression::MATCH_IN) { + // {a: {$in: [X, Y]}} is equivalent to {$or: [ {a: X}, {a: Y} ]}. + // {$in: [/a/]} is interpreted as a regex query. + // {$in: [null]} matches any nullish value. + const auto* inExpr = static_cast(matchExpr); + if (inExpr->hasRegex()) + return handleIneligible( + policy, matchExpr, "can't handle $regex predicate (inside $in predicate)"); + if (inExpr->hasNull()) + return handleIneligible( + policy, matchExpr, "can't handle {$eq: null} predicate (inside $in predicate)"); + + auto result = std::make_unique(); + + bool alwaysTrue = false; + for (auto&& elem : inExpr->getEqualities()) { + // If inExpr is {$in: [X, Y]} then the elems are '0: X' and '1: Y'. + auto eq = std::make_unique( + inExpr->path(), elem, nullptr /*annotation*/, inExpr->getCollator()); + auto child = createComparisonPredicate(eq.get(), + bucketSpec, + bucketMaxSpanSeconds, + pExpCtx, + haveComputedMetaField, + includeMetaField, + assumeNoMixedSchemaData, + policy); + + // As with OR, only add the child if it has been succesfully translated, otherwise the + // $in cannot be correctly mapped to bucket level fields and we should return nullptr. + if (child) { + result->add(std::move(child)); + } else { + alwaysTrue = true; + if (policy == IneligiblePredicatePolicy::kIgnore) + break; + } + } + if (alwaysTrue) + return {}; + + // As above, no special case for an empty IN: returning nullptr would be incorrect because + // it means 'always-true', here. + return {std::move(result), nullptr}; + } + return handleIneligible(policy, matchExpr, "can't handle this predicate"); +} + +std::pair BucketSpec::pushdownPredicate( + const boost::intrusive_ptr& expCtx, + const TimeseriesOptions& tsOptions, + const BSONObj& predicate, + bool haveComputedMetaField, + bool includeMetaField, + bool assumeNoMixedSchemaData, + IneligiblePredicatePolicy policy) { + auto [metaOnlyPred, bucketMetricPred, residualPred] = + getPushdownPredicates(expCtx, + tsOptions, + predicate, + haveComputedMetaField, + includeMetaField, + assumeNoMixedSchemaData, + policy); + BSONObjBuilder result; + if (metaOnlyPred) + metaOnlyPred->serialize(&result, {}); + if (bucketMetricPred) + bucketMetricPred->serialize(&result, {}); + return std::make_pair(bucketMetricPred.get(), result.obj()); +} + +std::pair, std::unique_ptr> +BucketSpec::splitOutMetaOnlyPredicate(std::unique_ptr expr, + boost::optional metaField) { + if (!metaField) { + // If there's no metadata field, then none of the predicates are metadata-only + // predicates. + return std::make_pair(std::unique_ptr(nullptr), std::move(expr)); + } + + return expression::splitMatchExpressionBy( + std::move(expr), + {metaField->toString()}, + {{metaField->toString(), timeseries::kBucketMetaFieldName.toString()}}, + expression::isOnlyDependentOn); +} + +BucketSpec::SplitPredicates BucketSpec::getPushdownPredicates( + const boost::intrusive_ptr& expCtx, + const TimeseriesOptions& tsOptions, + const BSONObj& predicate, + bool haveComputedMetaField, + bool includeMetaField, + bool assumeNoMixedSchemaData, + IneligiblePredicatePolicy policy) { + + auto allowedFeatures = MatchExpressionParser::kDefaultSpecialFeatures; + auto matchExpr = uassertStatusOK( + MatchExpressionParser::parse(predicate, expCtx, ExtensionsCallbackNoop(), allowedFeatures)); + + auto metaField = haveComputedMetaField ? boost::none : tsOptions.getMetaField(); + auto [metaOnlyPred, residualPred] = splitOutMetaOnlyPredicate(std::move(matchExpr), metaField); + + std::unique_ptr bucketMetricPred = residualPred + ? createPredicatesOnBucketLevelField( + residualPred.get(), + BucketSpec{ + tsOptions.getTimeField().toString(), + metaField.map([](StringData s) { return s.toString(); }), + // Since we are operating on a collection, not a query-result, + // there are no inclusion/exclusion projections we need to apply + // to the buckets before unpacking. So we can use default values for the rest of + // the arguments. + }, + *tsOptions.getBucketMaxSpanSeconds(), + expCtx, + haveComputedMetaField, + includeMetaField, + assumeNoMixedSchemaData, + policy) + .loosePredicate + : nullptr; + + return {.metaOnlyExpr = std::move(metaOnlyPred), + .bucketMetricExpr = std::move(bucketMetricPred), + .residualExpr = std::move(residualPred)}; +} + +BucketSpec::BucketSpec(const std::string& timeField, + const boost::optional& metaField, + const std::set& fields, + Behavior behavior, + const std::set& computedProjections, + bool usesExtendedRange) + : _fieldSet(fields), + _behavior(behavior), + _computedMetaProjFields(computedProjections), + _timeField(timeField), + _timeFieldHashed(FieldNameHasher().hashedFieldName(_timeField)), + _metaField(metaField), + _usesExtendedRange(usesExtendedRange) { + if (_metaField) { + _metaFieldHashed = FieldNameHasher().hashedFieldName(*_metaField); + } +} + +BucketSpec::BucketSpec(const BucketSpec& other) + : _fieldSet(other._fieldSet), + _behavior(other._behavior), + _computedMetaProjFields(other._computedMetaProjFields), + _timeField(other._timeField), + _timeFieldHashed(HashedFieldName{_timeField, other._timeFieldHashed->hash()}), + _metaField(other._metaField), + _usesExtendedRange(other._usesExtendedRange) { + if (_metaField) { + _metaFieldHashed = HashedFieldName{*_metaField, other._metaFieldHashed->hash()}; + } +} + +BucketSpec::BucketSpec(BucketSpec&& other) + : _fieldSet(std::move(other._fieldSet)), + _behavior(other._behavior), + _computedMetaProjFields(std::move(other._computedMetaProjFields)), + _timeField(std::move(other._timeField)), + _timeFieldHashed(HashedFieldName{_timeField, other._timeFieldHashed->hash()}), + _metaField(std::move(other._metaField)), + _usesExtendedRange(other._usesExtendedRange) { + if (_metaField) { + _metaFieldHashed = HashedFieldName{*_metaField, other._metaFieldHashed->hash()}; + } +} + +BucketSpec::BucketSpec(const TimeseriesOptions& tsOptions) + : BucketSpec(tsOptions.getTimeField().toString(), + tsOptions.getMetaField() + ? boost::optional(tsOptions.getMetaField()->toString()) + : boost::none) {} + +BucketSpec& BucketSpec::operator=(const BucketSpec& other) { + if (&other != this) { + _fieldSet = other._fieldSet; + _behavior = other._behavior; + _computedMetaProjFields = other._computedMetaProjFields; + _timeField = other._timeField; + _timeFieldHashed = HashedFieldName{_timeField, other._timeFieldHashed->hash()}; + _metaField = other._metaField; + if (_metaField) { + _metaFieldHashed = HashedFieldName{*_metaField, other._metaFieldHashed->hash()}; + } + _usesExtendedRange = other._usesExtendedRange; + } + return *this; +} + +void BucketSpec::setTimeField(std::string&& name) { + _timeField = std::move(name); + _timeFieldHashed = FieldNameHasher().hashedFieldName(_timeField); +} + +const std::string& BucketSpec::timeField() const { + return _timeField; +} + +HashedFieldName BucketSpec::timeFieldHashed() const { + invariant(_timeFieldHashed->key().rawData() == _timeField.data()); + invariant(_timeFieldHashed->key() == _timeField); + return *_timeFieldHashed; +} + +void BucketSpec::setMetaField(boost::optional&& name) { + _metaField = std::move(name); + if (_metaField) { + _metaFieldHashed = FieldNameHasher().hashedFieldName(*_metaField); + } else { + _metaFieldHashed = boost::none; + } +} + +const boost::optional& BucketSpec::metaField() const { + return _metaField; +} + +boost::optional BucketSpec::metaFieldHashed() const { + return _metaFieldHashed; +} +} // namespace mongo diff --git a/src/mongo/db/exec/timeseries/bucket_spec.h b/src/mongo/db/exec/timeseries/bucket_spec.h new file mode 100644 index 0000000000000..1e0fb97ddb452 --- /dev/null +++ b/src/mongo/db/exec/timeseries/bucket_spec.h @@ -0,0 +1,290 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document_internal.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/timeseries/timeseries_gen.h" + +namespace mongo { +/** + * Carries parameters for unpacking a bucket. The order of operations applied to determine which + * fields are in the final document are: + * If we are in include mode: + * 1. Unpack all fields from the bucket. + * 2. Remove any fields not in _fieldSet, since we are in include mode. + * 3. Add fields from _computedMetaProjFields. + * If we are in exclude mode: + * 1. Unpack all fields from the bucket. + * 2. Add fields from _computedMetaProjFields. + * 3. Remove any fields in _fieldSet, since we are in exclude mode. + */ +class BucketSpec { +public: + // When unpacking buckets with kInclude we must produce measurements that contain the + // set of fields. Otherwise, if the kExclude option is used, the measurements will include the + // set difference between all fields in the bucket and the provided fields. + enum class Behavior { kInclude, kExclude }; + + BucketSpec() = default; + BucketSpec(const std::string& timeField, + const boost::optional& metaField, + const std::set& fields = {}, + Behavior behavior = Behavior::kExclude, + const std::set& computedProjections = {}, + bool usesExtendedRange = false); + BucketSpec(const BucketSpec&); + BucketSpec(BucketSpec&&); + BucketSpec(const TimeseriesOptions& tsOptions); + + BucketSpec& operator=(const BucketSpec&); + + // The user-supplied timestamp field name specified during time-series collection creation. + void setTimeField(std::string&& field); + const std::string& timeField() const; + HashedFieldName timeFieldHashed() const; + + // An optional user-supplied metadata field name specified during time-series collection + // creation. This field name is used during materialization of metadata fields of a measurement + // after unpacking. + void setMetaField(boost::optional&& field); + const boost::optional& metaField() const; + boost::optional metaFieldHashed() const; + + void setFieldSet(std::set& fieldSet) { + _fieldSet = std::move(fieldSet); + } + + void addIncludeExcludeField(const StringData& field) { + _fieldSet.emplace(field); + } + + void removeIncludeExcludeField(const std::string& field) { + _fieldSet.erase(field); + } + + const std::set& fieldSet() const { + return _fieldSet; + } + + void setBehavior(Behavior behavior) { + _behavior = behavior; + } + + Behavior behavior() const { + return _behavior; + } + + void addComputedMetaProjFields(const StringData& field) { + _computedMetaProjFields.emplace(field); + } + + const std::set& computedMetaProjFields() const { + return _computedMetaProjFields; + } + + void eraseFromComputedMetaProjFields(const std::string& field) { + _computedMetaProjFields.erase(field); + } + + void setUsesExtendedRange(bool usesExtendedRange) { + _usesExtendedRange = usesExtendedRange; + } + + bool usesExtendedRange() const { + return _usesExtendedRange; + } + + // Returns whether 'field' depends on a pushed down $addFields or computed $project. + bool fieldIsComputed(StringData field) const; + + // Says what to do when an event-level predicate cannot be mapped to a bucket-level predicate. + enum class IneligiblePredicatePolicy { + // When optimizing a query, it's fine if some predicates can't be pushed down. We'll still + // run the predicate after unpacking, so the results will be correct. + kIgnore, + // When creating a partial index, it's misleading if we can't handle a predicate: the user + // expects every predicate in the partialFilterExpression to contribute, somehow, to making + // the index smaller. + kError, + }; + + struct BucketPredicate { + // A loose predicate is a predicate which returns true when any measures of a bucket + // matches. + std::unique_ptr loosePredicate; + + // A tight predicate is a predicate which returns true when all measures of a bucket + // matches. + std::unique_ptr tightPredicate; + }; + + /** + * Takes a predicate after $_internalUnpackBucket on a bucketed field as an argument and + * attempts to map it to new predicates on the 'control' field. There will be a 'loose' + * predicate that will match if some of the event field matches, also a 'tight' predicate that + * will match if all of the event field matches. For example, the event level predicate {a: + * {$gt: 5}} will generate the loose predicate {control.max.a: {$_internalExprGt: 5}}, and the + * tight predicate {control.min.a: {$_internalExprGt: 5}}. The loose predicate will be added + * before the $_internalUnpackBucket stage to filter out buckets with no match. The tight + * predicate will be used to evaluate predicate on bucket level to avoid unnecessary event level + * evaluation. + * + * If the original predicate is on the bucket's timeField we may also create a new loose + * predicate on the '_id' field to assist in index utilization. For example, the predicate + * {time: {$lt: new Date(...)}} will generate the following predicate: + * {$and: [ + * {_id: {$lt: ObjectId(...)}}, + * {control.min.time: {$_internalExprLt: new Date(...)}} + * ]} + * + * If the provided predicate is ineligible for this mapping, the function will return a nullptr. + * This should be interpreted as an always-true predicate. + * + * When using IneligiblePredicatePolicy::kIgnore, if the predicate can't be pushed down, it + * returns null. When using IneligiblePredicatePolicy::kError it raises a user error. + */ + static BucketPredicate createPredicatesOnBucketLevelField( + const MatchExpression* matchExpr, + const BucketSpec& bucketSpec, + int bucketMaxSpanSeconds, + const boost::intrusive_ptr& pExpCtx, + bool haveComputedMetaField, + bool includeMetaField, + bool assumeNoMixedSchemaData, + IneligiblePredicatePolicy policy); + + /** + * Converts an event-level predicate to a bucket-level predicate, such that + * + * {$unpackBucket ...} {$match: } + * + * gives the same result as + * + * {$match: } {$unpackBucket ...} {$match: } + * + * This means the bucket-level predicate must include every bucket that might contain an event + * matching the event-level predicate. + * + * This helper is used when creating a partial index on a time-series collection: logically, + * we index only events that match the event-level partialFilterExpression, but physically we + * index any bucket that matches the bucket-level partialFilterExpression. + * + * When using IneligiblePredicatePolicy::kIgnore, if the predicate can't be pushed down, it + * returns null. When using IneligiblePredicatePolicy::kError it raises a user error. + * + * Returns a boolean (alongside the bucket-level predicate) describing if the result contains + * a metric predicate. + */ + static std::pair pushdownPredicate( + const boost::intrusive_ptr& expCtx, + const TimeseriesOptions& tsOptions, + const BSONObj& predicate, + bool haveComputedMetaField, + bool includeMetaField, + bool assumeNoMixedSchemaData, + IneligiblePredicatePolicy policy); + + /** + * Splits out a predicate on the meta field from a predicate on the bucket metric field. + */ + static std::pair, std::unique_ptr> + splitOutMetaOnlyPredicate(std::unique_ptr expr, + boost::optional metaField); + + // Used as the return value of getPushdownPredicates(). + struct SplitPredicates { + std::unique_ptr metaOnlyExpr; + std::unique_ptr bucketMetricExpr; + std::unique_ptr residualExpr; + }; + + /** + * Decomposes a predicate into three parts: a predicate on the meta field, a predicate on the + * bucket metric field(s), and a residual predicate. The predicate on the meta field is a + * predicate that can be evaluated on the meta field. The predicate on the bucket metric + * field(s) is a predicate that can be evaluated on the bucket metric field(s) like + * control.min|max.[field]. The residual predicate is a predicate that cannot be evaluated on + * either the meta field or the bucket metric field and exactly matches desired measurements. + */ + static SplitPredicates getPushdownPredicates( + const boost::intrusive_ptr& expCtx, + const TimeseriesOptions& tsOptions, + const BSONObj& predicate, + bool haveComputedMetaField, + bool includeMetaField, + bool assumeNoMixedSchemaData, + IneligiblePredicatePolicy policy); + + bool includeMinTimeAsMetadata = false; + bool includeMaxTimeAsMetadata = false; + +private: + // The set of field names in the data region that should be included or excluded. + std::set _fieldSet; + Behavior _behavior = Behavior::kExclude; + + // Set of computed meta field projection names. Added at the end of materialized + // measurements. + std::set _computedMetaProjFields; + + std::string _timeField; + boost::optional _timeFieldHashed; + + boost::optional _metaField = boost::none; + boost::optional _metaFieldHashed = boost::none; + bool _usesExtendedRange = false; +}; + +/** + * Determines if an arbitrary field should be included in the materialized measurements. + */ +inline bool determineIncludeField(StringData fieldName, + BucketSpec::Behavior unpackerBehavior, + const std::set& unpackFieldsToIncludeExclude) { + const bool isInclude = unpackerBehavior == BucketSpec::Behavior::kInclude; + const bool unpackFieldsContains = unpackFieldsToIncludeExclude.find(fieldName.toString()) != + unpackFieldsToIncludeExclude.cend(); + return isInclude == unpackFieldsContains; +} +} // namespace mongo diff --git a/src/mongo/db/exec/timeseries/bucket_unpacker.cpp b/src/mongo/db/exec/timeseries/bucket_unpacker.cpp new file mode 100644 index 0000000000000..73bda0ab23192 --- /dev/null +++ b/src/mongo/db/exec/timeseries/bucket_unpacker.cpp @@ -0,0 +1,782 @@ +/** + * Copyright (C) 2020-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/exec/timeseries/bucket_unpacker.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/bsoncolumn.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_internal.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + +namespace mongo { + +class BucketUnpacker::UnpackingImpl { +public: + UnpackingImpl() = default; + virtual ~UnpackingImpl() = default; + + virtual void addField(const BSONElement& field) = 0; + virtual int measurementCount(const BSONElement& timeField) const = 0; + virtual bool getNext(MutableDocument& measurement, + const BucketSpec& spec, + const Value& metaValue, + bool includeTimeField, + bool includeMetaField) = 0; + virtual bool getNext(BSONObjBuilder& builder, + const BucketSpec& spec, + const BSONElement& metaValue, + bool includeTimeField, + bool includeMetaField) = 0; + virtual void extractSingleMeasurement(MutableDocument& measurement, + int j, + const BucketSpec& spec, + const std::set& unpackFieldsToIncludeExclude, + const BSONObj& bucket, + const Value& metaValue, + bool includeTimeField, + bool includeMetaField) = 0; + + // Provides an upper bound on the number of fields in each measurement. + virtual std::size_t numberOfFields() = 0; + +protected: + // Data field count is variable, but time and metadata are fixed. + constexpr static std::size_t kFixedFieldNumber = 2; +}; + +namespace { +// Unpacker for V1 uncompressed buckets +class BucketUnpackerV1 : public BucketUnpacker::UnpackingImpl { +public: + // A table that is useful for interpolations between the number of measurements in a bucket and + // the byte size of a bucket's data section timestamp column. Each table entry is a pair (b_i, + // S_i), where b_i is the number of measurements in the bucket and S_i is the byte size of the + // timestamp BSONObj. The table is bounded by 16 MB (2 << 23 bytes) where the table entries are + // pairs of b_i and S_i for the lower bounds of the row key digit intervals [0, 9], [10, 99], + // [100, 999], [1000, 9999] and so on. The last entry in the table, S7, is the first entry to + // exceed the server BSON object limit of 16 MB. + static constexpr std::array, 8> kTimestampObjSizeTable{ + {{0, BSONObj::kMinBSONLength}, + {10, 115}, + {100, 1195}, + {1000, 12895}, + {10000, 138895}, + {100000, 1488895}, + {1000000, 15888895}, + {10000000, 168888895}}}; + + static int computeElementCountFromTimestampObjSize(int targetTimestampObjSize); + + BucketUnpackerV1(const BSONElement& timeField); + + void addField(const BSONElement& field) override; + int measurementCount(const BSONElement& timeField) const override; + bool getNext(MutableDocument& measurement, + const BucketSpec& spec, + const Value& metaValue, + bool includeTimeField, + bool includeMetaField) override; + bool getNext(BSONObjBuilder& builder, + const BucketSpec& spec, + const BSONElement& metaValue, + bool includeTimeField, + bool includeMetaField) override; + void extractSingleMeasurement(MutableDocument& measurement, + int j, + const BucketSpec& spec, + const std::set& unpackFieldsToIncludeExclude, + const BSONObj& bucket, + const Value& metaValue, + bool includeTimeField, + bool includeMetaField) override; + std::size_t numberOfFields() override; + +private: + // Iterates the timestamp section of the bucket to drive the unpacking iteration. + BSONObjIterator _timeFieldIter; + + // Iterators used to unpack the columns of the above bucket that are populated during the reset + // phase according to the provided 'BucketSpec'. + std::vector> _fieldIters; +}; + +// Calculates the number of measurements in a bucket given the 'targetTimestampObjSize' using the +// 'BucketUnpacker::kTimestampObjSizeTable' table. If the 'targetTimestampObjSize' hits a record in +// the table, this helper returns the measurement count corresponding to the table record. +// Otherwise, the 'targetTimestampObjSize' is used to probe the table for the smallest {b_i, S_i} +// pair such that 'targetTimestampObjSize' < S_i. Once the interval is found, the upper bound of the +// pair for the interval is computed and then linear interpolation is used to compute the +// measurement count corresponding to the 'targetTimestampObjSize' provided. +int BucketUnpackerV1::computeElementCountFromTimestampObjSize(int targetTimestampObjSize) { + auto currentInterval = + std::find_if(std::begin(BucketUnpackerV1::kTimestampObjSizeTable), + std::end(BucketUnpackerV1::kTimestampObjSizeTable), + [&](const auto& entry) { return targetTimestampObjSize <= entry.second; }); + + if (currentInterval->second == targetTimestampObjSize) { + return currentInterval->first; + } + // This points to the first interval larger than the target 'targetTimestampObjSize', the actual + // interval that will cover the object size is the interval before the current one. + tassert(5422104, + "currentInterval should not point to the first table entry", + currentInterval > BucketUnpackerV1::kTimestampObjSizeTable.begin()); + --currentInterval; + + auto nDigitsInRowKey = 1 + (currentInterval - BucketUnpackerV1::kTimestampObjSizeTable.begin()); + + return currentInterval->first + + ((targetTimestampObjSize - currentInterval->second) / (10 + nDigitsInRowKey)); +} + +BucketUnpackerV1::BucketUnpackerV1(const BSONElement& timeField) + : _timeFieldIter(BSONObjIterator{timeField.Obj()}) {} + +void BucketUnpackerV1::addField(const BSONElement& field) { + _fieldIters.emplace_back(field.fieldNameStringData(), BSONObjIterator{field.Obj()}); +} + +int BucketUnpackerV1::measurementCount(const BSONElement& timeField) const { + return computeElementCountFromTimestampObjSize(timeField.objsize()); +} + +bool BucketUnpackerV1::getNext(MutableDocument& measurement, + const BucketSpec& spec, + const Value& metaValue, + bool includeTimeField, + bool includeMetaField) { + auto&& timeElem = _timeFieldIter.next(); + if (includeTimeField) { + measurement.addField(spec.timeFieldHashed(), Value{timeElem}); + } + + // Includes metaField when we're instructed to do so and metaField value exists. + if (includeMetaField && !metaValue.missing()) { + measurement.addField(*spec.metaFieldHashed(), metaValue); + } + + const auto& currentIdx = timeElem.fieldNameStringData(); + for (auto&& [colName, colIter] : _fieldIters) { + if (auto&& elem = *colIter; colIter.more() && elem.fieldNameStringData() == currentIdx) { + measurement.addField(colName, Value{elem}); + colIter.advance(elem); + } + } + + return _timeFieldIter.more(); +} + +bool BucketUnpackerV1::getNext(BSONObjBuilder& builder, + const BucketSpec& spec, + const BSONElement& metaValue, + bool includeTimeField, + bool includeMetaField) { + auto&& timeElem = _timeFieldIter.next(); + if (includeTimeField) { + builder.appendAs(timeElem, spec.timeField()); + } + + // Includes metaField when we're instructed to do so and metaField value exists. + if (includeMetaField && !metaValue.eoo()) { + builder.appendAs(metaValue, *spec.metaField()); + } + + const auto& currentIdx = timeElem.fieldNameStringData(); + for (auto&& [colName, colIter] : _fieldIters) { + if (auto&& elem = *colIter; colIter.more() && elem.fieldNameStringData() == currentIdx) { + builder.appendAs(elem, colName); + colIter.advance(elem); + } + } + + return _timeFieldIter.more(); +} + +void BucketUnpackerV1::extractSingleMeasurement( + MutableDocument& measurement, + int j, + const BucketSpec& spec, + const std::set& unpackFieldsToIncludeExclude, + const BSONObj& bucket, + const Value& metaValue, + bool includeTimeField, + bool includeMetaField) { + auto rowKey = std::to_string(j); + auto targetIdx = StringData{rowKey}; + auto&& dataRegion = bucket.getField(timeseries::kBucketDataFieldName).Obj(); + + if (includeMetaField && !metaValue.missing()) { + measurement.addField(*spec.metaFieldHashed(), metaValue); + } + + for (auto&& dataElem : dataRegion) { + const auto& colName = dataElem.fieldNameStringData(); + if (!determineIncludeField(colName, spec.behavior(), unpackFieldsToIncludeExclude)) { + continue; + } + auto value = dataElem[targetIdx]; + if (value) { + measurement.addField(dataElem.fieldNameStringData(), Value{value}); + } + } +} + +std::size_t BucketUnpackerV1::numberOfFields() { + // The data fields are tracked by _fieldIters, but we need to account also for the time field + // and possibly the meta field. + return kFixedFieldNumber + _fieldIters.size(); +} + +// Unpacker for V2 compressed buckets +class BucketUnpackerV2 : public BucketUnpacker::UnpackingImpl { +public: + BucketUnpackerV2(const BSONElement& timeField, int elementCount); + + void addField(const BSONElement& field) override; + int measurementCount(const BSONElement& timeField) const override; + bool getNext(MutableDocument& measurement, + const BucketSpec& spec, + const Value& metaValue, + bool includeTimeField, + bool includeMetaField) override; + bool getNext(BSONObjBuilder& builder, + const BucketSpec& spec, + const BSONElement& metaValue, + bool includeTimeField, + bool includeMetaField) override; + void extractSingleMeasurement(MutableDocument& measurement, + int j, + const BucketSpec& spec, + const std::set& unpackFieldsToIncludeExclude, + const BSONObj& bucket, + const Value& metaValue, + bool includeTimeField, + bool includeMetaField) override; + std::size_t numberOfFields() override; + +private: + struct ColumnStore { + ColumnStore(BSONElement elem) + : column(elem), + it(column.begin()), + fieldName(elem.fieldNameStringData(), FieldNameHasher{}(elem.fieldNameStringData())) { + } + + BSONColumn column; + BSONColumn::Iterator it; + HashedFieldName fieldName; + }; + + // Iterates the timestamp section of the bucket to drive the unpacking iteration. + ColumnStore _timeColumn; + + // Iterators used to unpack the columns of the above bucket that are populated during the reset + // phase according to the provided 'BucketSpec'. + std::vector _fieldColumns; + + // Element count + int _elementCount; +}; + +BucketUnpackerV2::BucketUnpackerV2(const BSONElement& timeField, int elementCount) + : _timeColumn(timeField), _elementCount(elementCount) { + if (_elementCount == -1) { + _elementCount = _timeColumn.column.size(); + } +} + +void BucketUnpackerV2::addField(const BSONElement& field) { + _fieldColumns.emplace_back(field); +} + +int BucketUnpackerV2::measurementCount(const BSONElement& timeField) const { + return _elementCount; +} + +bool BucketUnpackerV2::getNext(MutableDocument& measurement, + const BucketSpec& spec, + const Value& metaValue, + bool includeTimeField, + bool includeMetaField) { + // Get element and increment iterator + const auto& timeElem = *_timeColumn.it; + if (includeTimeField) { + measurement.addField(spec.timeFieldHashed(), Value{timeElem}); + } + ++_timeColumn.it; + + // Includes metaField when we're instructed to do so and metaField value exists. + if (includeMetaField && !metaValue.missing()) { + measurement.addField(*spec.metaFieldHashed(), metaValue); + } + + for (auto& fieldColumn : _fieldColumns) { + uassert(6067601, + "Bucket unexpectedly contained fewer values than count", + fieldColumn.it.more()); + const BSONElement& elem = *fieldColumn.it; + // EOO represents missing field + if (!elem.eoo()) { + measurement.addField(fieldColumn.fieldName, Value{elem}); + } + ++fieldColumn.it; + } + + return _timeColumn.it.more(); +} + +bool BucketUnpackerV2::getNext(BSONObjBuilder& builder, + const BucketSpec& spec, + const BSONElement& metaValue, + bool includeTimeField, + bool includeMetaField) { + // Get element and increment iterator + const auto& timeElem = *_timeColumn.it; + if (includeTimeField) { + builder.appendAs(timeElem, spec.timeField()); + } + ++_timeColumn.it; + + // Includes metaField when we're instructed to do so and metaField value exists. + if (includeMetaField && !metaValue.eoo()) { + builder.appendAs(metaValue, *spec.metaField()); + } + + for (auto& fieldColumn : _fieldColumns) { + uassert(7026803, + "Bucket unexpectedly contained fewer values than count", + fieldColumn.it.more()); + const BSONElement& elem = *fieldColumn.it; + // EOO represents missing field + if (!elem.eoo()) { + builder.appendAs(elem, fieldColumn.fieldName.key()); + } + ++fieldColumn.it; + } + + return _timeColumn.it.more(); +} + +void BucketUnpackerV2::extractSingleMeasurement( + MutableDocument& measurement, + int j, + const BucketSpec& spec, + const std::set& unpackFieldsToIncludeExclude, + const BSONObj& bucket, + const Value& metaValue, + bool includeTimeField, + bool includeMetaField) { + if (includeTimeField) { + auto val = _timeColumn.column[j]; + uassert( + 6067500, "Bucket unexpectedly contained fewer values than count", val && !val->eoo()); + measurement.addField(spec.timeFieldHashed(), Value{*val}); + } + + if (includeMetaField && !metaValue.missing()) { + measurement.addField(*spec.metaFieldHashed(), metaValue); + } + + if (includeTimeField) { + for (auto& fieldColumn : _fieldColumns) { + auto val = fieldColumn.column[j]; + uassert(6067600, "Bucket unexpectedly contained fewer values than count", val); + measurement.addField(fieldColumn.fieldName, Value{*val}); + } + } +} + +std::size_t BucketUnpackerV2::numberOfFields() { + // The data fields are tracked by _fieldColumns, but we need to account also for the time field + // and possibly the meta field. + return kFixedFieldNumber + _fieldColumns.size(); +} +} // namespace + +BucketUnpacker::BucketUnpacker() = default; +BucketUnpacker::BucketUnpacker(BucketUnpacker&& other) = default; +BucketUnpacker::~BucketUnpacker() = default; +BucketUnpacker& BucketUnpacker::operator=(BucketUnpacker&& rhs) = default; + +BucketUnpacker::BucketUnpacker(BucketSpec spec) { + setBucketSpec(std::move(spec)); +} + +void BucketUnpacker::addComputedMetaProjFields(const std::vector& computedFieldNames) { + for (auto&& field : computedFieldNames) { + _spec.addComputedMetaProjFields(field); + + // If we're already specifically including fields, we need to add the computed fields to + // the included field set to indicate they're in the output doc. + if (_spec.behavior() == BucketSpec::Behavior::kInclude) { + _spec.addIncludeExcludeField(field); + } else { + // Since exclude is applied after addComputedMetaProjFields, we must erase the new field + // from the include/exclude fields so this doesn't get removed. + _spec.removeIncludeExcludeField(field.toString()); + } + } + + // Recalculate _includeTimeField, since both computedMetaProjFields and fieldSet may have + // changed. + determineIncludeTimeField(); +} + +Document BucketUnpacker::getNext() { + tassert(5521503, "'getNext()' requires the bucket to be owned", _bucket.isOwned()); + tassert(5422100, "'getNext()' was called after the bucket has been exhausted", hasNext()); + + // MutableDocument reserves memory based on the number of fields, but uses a fixed size of 25 + // bytes plus an allowance of 7 characters for the field name. Doubling the number of fields + // should give us enough overhead for longer field names without wasting too much memory. + auto measurement = MutableDocument{2 * _unpackingImpl->numberOfFields()}; + _hasNext = _unpackingImpl->getNext( + measurement, _spec, _metaValue, _includeTimeField, _includeMetaField); + + // Add computed meta projections. + for (auto&& name : _spec.computedMetaProjFields()) { + measurement.addField(name, Value{_computedMetaProjections[name]}); + } + + if (_includeMinTimeAsMetadata && _minTime) { + measurement.metadata().setTimeseriesBucketMinTime(*_minTime); + } + + if (_includeMaxTimeAsMetadata && _maxTime) { + measurement.metadata().setTimeseriesBucketMaxTime(*_maxTime); + } + + return measurement.freeze(); +} + +BSONObj BucketUnpacker::getNextBson() { + tassert(7026800, "'getNextBson()' requires the bucket to be owned", _bucket.isOwned()); + tassert(7026801, "'getNextBson()' was called after the bucket has been exhausted", hasNext()); + tassert(7026802, + "'getNextBson()' cannot return max and min time as metadata", + !_includeMaxTimeAsMetadata && !_includeMinTimeAsMetadata); + + BSONObjBuilder builder; + _hasNext = _unpackingImpl->getNext( + builder, _spec, _metaBSONElem, _includeTimeField, _includeMetaField); + + // Add computed meta projections. + for (auto&& name : _spec.computedMetaProjFields()) { + builder.appendAs(_computedMetaProjections[name], name); + } + + return builder.obj(); +} + +Document BucketUnpacker::extractSingleMeasurement(int j) { + tassert(5422101, + "'extractSingleMeasurment' expects j to be greater than or equal to zero and less than " + "or equal to the number of measurements in a bucket", + j >= 0 && j < _numberOfMeasurements); + + auto measurement = MutableDocument{}; + _unpackingImpl->extractSingleMeasurement(measurement, + j, + _spec, + fieldsToIncludeExcludeDuringUnpack(), + _bucket, + _metaValue, + _includeTimeField, + _includeMetaField); + + // Add computed meta projections. + for (auto&& name : _spec.computedMetaProjFields()) { + measurement.addField(name, Value{_computedMetaProjections[name]}); + } + + return measurement.freeze(); +} + +void BucketUnpacker::reset(BSONObj&& bucket, bool bucketMatchedQuery) { + _unpackingImpl.reset(); + _bucket = std::move(bucket); + _bucketMatchedQuery = bucketMatchedQuery; + uassert(5346510, "An empty bucket cannot be unpacked", !_bucket.isEmpty()); + + auto&& dataRegion = _bucket.getField(timeseries::kBucketDataFieldName).Obj(); + if (dataRegion.isEmpty()) { + // If the data field of a bucket is present but it holds an empty object, there's nothing to + // unpack. + return; + } + + auto&& timeFieldElem = dataRegion.getField(_spec.timeField()); + uassert(5346700, + "The $_internalUnpackBucket stage requires the data region to have a timeField object", + timeFieldElem); + + _metaBSONElem = _bucket[timeseries::kBucketMetaFieldName]; + _metaValue = Value{_metaBSONElem}; + if (_spec.metaField()) { + // The spec indicates that there might be a metadata region. Missing metadata in + // measurements is expressed with missing metadata in a bucket. But we disallow undefined + // since the undefined BSON type is deprecated. + uassert(5369600, + "The $_internalUnpackBucket stage allows metadata to be absent or otherwise, it " + "must not be the deprecated undefined bson type", + _metaValue.missing() || _metaValue.getType() != BSONType::Undefined); + } else { + // If the spec indicates that the time series collection has no metadata field, then we + // should not find a metadata region in the underlying bucket documents. + uassert(5369601, + "The $_internalUnpackBucket stage expects buckets to have missing metadata regions " + "if the metaField parameter is not provided", + _metaValue.missing()); + } + + auto&& controlField = _bucket[timeseries::kBucketControlFieldName]; + uassert(5857902, + "The $_internalUnpackBucket stage requires 'control' object to be present", + controlField && controlField.type() == BSONType::Object); + + auto&& controlClosed = controlField.Obj()[timeseries::kBucketControlClosedFieldName]; + _closedBucket = controlClosed.booleanSafe(); + + if (_includeMinTimeAsMetadata) { + auto&& controlMin = controlField.Obj()[timeseries::kBucketControlMinFieldName]; + uassert(6460203, + str::stream() << "The $_internalUnpackBucket stage requires '" + << timeseries::kControlMinFieldNamePrefix << "' object to be present", + controlMin && controlMin.type() == BSONType::Object); + auto&& minTime = controlMin.Obj()[_spec.timeField()]; + uassert(6460204, + str::stream() << "The $_internalUnpackBucket stage requires '" + << timeseries::kControlMinFieldNamePrefix << "." << _spec.timeField() + << "' to be a date", + minTime && minTime.type() == BSONType::Date); + _minTime = minTime.date(); + } + + if (_includeMaxTimeAsMetadata) { + auto&& controlMax = controlField.Obj()[timeseries::kBucketControlMaxFieldName]; + uassert(6460205, + str::stream() << "The $_internalUnpackBucket stage requires '" + << timeseries::kControlMaxFieldNamePrefix << "' object to be present", + controlMax && controlMax.type() == BSONType::Object); + auto&& maxTime = controlMax.Obj()[_spec.timeField()]; + uassert(6460206, + str::stream() << "The $_internalUnpackBucket stage requires '" + << timeseries::kControlMaxFieldNamePrefix << "." << _spec.timeField() + << "' to be a date", + maxTime && maxTime.type() == BSONType::Date); + _maxTime = maxTime.date(); + } + + auto&& versionField = controlField.Obj()[timeseries::kBucketControlVersionFieldName]; + uassert(5857903, + "The $_internalUnpackBucket stage requires 'control.version' field to be present", + versionField && isNumericBSONType(versionField.type())); + auto version = versionField.Number(); + + if (version == 1) { + _unpackingImpl = std::make_unique(timeFieldElem); + } else if (version == 2) { + auto countField = controlField.Obj()[timeseries::kBucketControlCountFieldName]; + _unpackingImpl = + std::make_unique(timeFieldElem, + countField && isNumericBSONType(countField.type()) + ? static_cast(countField.Number()) + : -1); + } else { + uasserted(5857900, "Invalid bucket version"); + } + + // Walk the data region of the bucket, and decide if an iterator should be set up based on the + // include or exclude case. + for (auto&& elem : dataRegion) { + auto colName = elem.fieldNameStringData(); + if (colName == _spec.timeField()) { + // Skip adding a FieldIterator for the timeField since the timestamp value from + // _timeFieldIter can be placed accordingly in the materialized measurement. + continue; + } + + // Includes a field when '_spec.behavior()' is 'kInclude' and it's found in 'fieldSet' or + // _spec.behavior() is 'kExclude' and it's not found in 'fieldSet'. + if (determineIncludeField( + colName, _spec.behavior(), fieldsToIncludeExcludeDuringUnpack())) { + _unpackingImpl->addField(elem); + } + } + + // Update computed meta projections with values from this bucket. + for (auto&& name : _spec.computedMetaProjFields()) { + _computedMetaProjections[name] = _bucket[name]; + } + + // Save the measurement count for the bucket. + _numberOfMeasurements = _unpackingImpl->measurementCount(timeFieldElem); + _hasNext = _numberOfMeasurements > 0; +} + +int BucketUnpacker::computeMeasurementCount(const BSONObj& bucket, StringData timeField) { + auto&& controlField = bucket[timeseries::kBucketControlFieldName]; + uassert(5857904, + "The $_internalUnpackBucket stage requires 'control' object to be present", + controlField && controlField.type() == BSONType::Object); + + auto&& versionField = controlField.Obj()[timeseries::kBucketControlVersionFieldName]; + uassert(5857905, + "The $_internalUnpackBucket stage requires 'control.version' field to be present", + versionField && isNumericBSONType(versionField.type())); + + auto&& dataField = bucket[timeseries::kBucketDataFieldName]; + if (!dataField || dataField.type() != BSONType::Object) + return 0; + + auto&& time = dataField.Obj()[timeField]; + if (!time) { + return 0; + } + + auto version = versionField.Number(); + if (version == 1) { + return BucketUnpackerV1::computeElementCountFromTimestampObjSize(time.objsize()); + } else if (version == 2) { + auto countField = controlField.Obj()[timeseries::kBucketControlCountFieldName]; + if (countField && isNumericBSONType(countField.type())) { + return static_cast(countField.Number()); + } + + return BSONColumn(time).size(); + } else { + uasserted(5857901, "Invalid bucket version"); + } +} + +void BucketUnpacker::determineIncludeTimeField() { + const bool isInclude = _spec.behavior() == BucketSpec::Behavior::kInclude; + const bool fieldSetContainsTime = + _spec.fieldSet().find(_spec.timeField()) != _spec.fieldSet().end(); + + const auto& metaProjFields = _spec.computedMetaProjFields(); + const bool metaProjContains = metaProjFields.find(_spec.timeField()) != metaProjFields.cend(); + + // If computedMetaProjFields contains the time field, we exclude it from unpacking no matter + // what, since it will be overwritten anyway. + _includeTimeField = isInclude == fieldSetContainsTime && !metaProjContains; +} + +void BucketUnpacker::eraseMetaFromFieldSetAndDetermineIncludeMeta() { + if (!_spec.metaField() || + _spec.computedMetaProjFields().find(*_spec.metaField()) != + _spec.computedMetaProjFields().cend()) { + _includeMetaField = false; + } else if (auto itr = _spec.fieldSet().find(*_spec.metaField()); + itr != _spec.fieldSet().end()) { + _spec.removeIncludeExcludeField(*_spec.metaField()); + _includeMetaField = _spec.behavior() == BucketSpec::Behavior::kInclude; + } else { + _includeMetaField = _spec.behavior() == BucketSpec::Behavior::kExclude; + } +} + +void BucketUnpacker::eraseExcludedComputedMetaProjFields() { + if (_spec.behavior() == BucketSpec::Behavior::kExclude) { + for (const auto& field : _spec.fieldSet()) { + _spec.eraseFromComputedMetaProjFields(field); + } + } +} + +void BucketUnpacker::setBucketSpec(BucketSpec&& bucketSpec) { + _spec = std::move(bucketSpec); + + eraseMetaFromFieldSetAndDetermineIncludeMeta(); + determineIncludeTimeField(); + eraseExcludedComputedMetaProjFields(); + + _includeMinTimeAsMetadata = _spec.includeMinTimeAsMetadata; + _includeMaxTimeAsMetadata = _spec.includeMaxTimeAsMetadata; +} + +void BucketUnpacker::setIncludeMinTimeAsMetadata() { + _includeMinTimeAsMetadata = true; +} + +void BucketUnpacker::setIncludeMaxTimeAsMetadata() { + _includeMaxTimeAsMetadata = true; +} + +const std::set& BucketUnpacker::fieldsToIncludeExcludeDuringUnpack() { + if (_unpackFieldsToIncludeExclude) { + return *_unpackFieldsToIncludeExclude; + } + + _unpackFieldsToIncludeExclude = std::set(); + const auto& metaProjFields = _spec.computedMetaProjFields(); + if (_spec.behavior() == BucketSpec::Behavior::kInclude) { + // For include, we unpack fieldSet - metaProjFields. + for (auto&& field : _spec.fieldSet()) { + if (metaProjFields.find(field) == metaProjFields.cend()) { + _unpackFieldsToIncludeExclude->insert(field); + } + } + } else { + // For exclude, we unpack everything but fieldSet + metaProjFields. + _unpackFieldsToIncludeExclude->insert(_spec.fieldSet().begin(), _spec.fieldSet().end()); + _unpackFieldsToIncludeExclude->insert(metaProjFields.begin(), metaProjFields.end()); + } + + return *_unpackFieldsToIncludeExclude; +} + +const std::set BucketUnpacker::reservedBucketFieldNames = { + timeseries::kBucketIdFieldName, + timeseries::kBucketDataFieldName, + timeseries::kBucketMetaFieldName, + timeseries::kBucketControlFieldName}; + +} // namespace mongo diff --git a/src/mongo/db/exec/timeseries/bucket_unpacker.h b/src/mongo/db/exec/timeseries/bucket_unpacker.h new file mode 100644 index 0000000000000..eab495e2fa845 --- /dev/null +++ b/src/mongo/db/exec/timeseries/bucket_unpacker.h @@ -0,0 +1,254 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/timeseries/bucket_spec.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/time_support.h" + +namespace mongo { +/** + * BucketUnpacker will unpack bucket fields for metadata and the provided fields. + */ +class BucketUnpacker { +public: + /** + * Returns the number of measurements in the bucket in O(1) time. + */ + static int computeMeasurementCount(const BSONObj& bucket, StringData timeField); + + // Set of field names reserved for time-series buckets. + static const std::set reservedBucketFieldNames; + + BucketUnpacker(); + BucketUnpacker(BucketSpec spec); + BucketUnpacker(const BucketUnpacker& other) = delete; + BucketUnpacker(BucketUnpacker&& other); + ~BucketUnpacker(); + BucketUnpacker& operator=(const BucketUnpacker& rhs) = delete; + BucketUnpacker& operator=(BucketUnpacker&& rhs); + + /** + * This method will continue to materialize Documents until the bucket is exhausted. A + * precondition of this method is that 'hasNext()' must be true. + */ + Document getNext(); + + /** + * Similar to the previous method, but return a BSON object instead. + */ + BSONObj getNextBson(); + + /** + * This method will extract the j-th measurement from the bucket. A precondition of this method + * is that j >= 0 && j <= the number of measurements within the underlying bucket. + */ + Document extractSingleMeasurement(int j); + + /** + * Returns true if there is more data to fetch, is the precondition for 'getNext'. + */ + bool hasNext() const { + return _hasNext; + } + + /** + * Makes a copy of this BucketUnpacker that is detached from current bucket. The new copy needs + * to be reset to a new bucket object to perform unpacking. + */ + BucketUnpacker copy() const { + BucketUnpacker unpackerCopy; + unpackerCopy._spec = _spec; + unpackerCopy._includeMetaField = _includeMetaField; + unpackerCopy._includeTimeField = _includeTimeField; + return unpackerCopy; + } + + /** + * This resets the unpacker to prepare to unpack a new bucket described by the given document. + */ + void reset(BSONObj&& bucket, bool bucketMatchedQuery = false); + + BucketSpec::Behavior behavior() const { + return _spec.behavior(); + } + + const BucketSpec& bucketSpec() const { + return _spec; + } + + const BSONObj& bucket() const { + return _bucket; + } + + bool bucketMatchedQuery() const { + return _bucketMatchedQuery; + } + + bool includeMetaField() const { + return _includeMetaField; + } + + bool includeTimeField() const { + return _includeTimeField; + } + + int32_t numberOfMeasurements() const { + return _numberOfMeasurements; + } + + bool includeMinTimeAsMetadata() const { + return _includeMinTimeAsMetadata; + } + + bool includeMaxTimeAsMetadata() const { + return _includeMaxTimeAsMetadata; + } + + const std::string& getTimeField() const { + return _spec.timeField(); + } + + const boost::optional& getMetaField() const { + return _spec.metaField(); + } + + std::string getMinField(StringData field) const { + return std::string{timeseries::kControlMinFieldNamePrefix} + field; + } + + std::string getMaxField(StringData field) const { + return std::string{timeseries::kControlMaxFieldNamePrefix} + field; + } + + bool isClosedBucket() const { + return _closedBucket; + } + + void setBucketSpec(BucketSpec&& bucketSpec); + void setIncludeMinTimeAsMetadata(); + void setIncludeMaxTimeAsMetadata(); + + // Add computed meta projection names to the bucket specification. + void addComputedMetaProjFields(const std::vector& computedFieldNames); + + // Fill _spec.unpackFieldsToIncludeExclude with final list of fields to include/exclude during + // unpacking. Only calculates the list the first time it is called. + const std::set& fieldsToIncludeExcludeDuringUnpack(); + + class UnpackingImpl; + +private: + // Determines if timestamp values should be included in the materialized measurements. + void determineIncludeTimeField(); + + // Removes metaField from the field set and determines whether metaField should be + // included in the materialized measurements. + void eraseMetaFromFieldSetAndDetermineIncludeMeta(); + + // Erase computed meta projection fields if they are present in the exclusion field set. + void eraseExcludedComputedMetaProjFields(); + + BucketSpec _spec; + + std::unique_ptr _unpackingImpl; + + bool _hasNext = false; + + // A flag used to mark that the entire bucket matches the following $match predicate. + bool _bucketMatchedQuery = false; + + // A flag used to mark that the timestamp value should be materialized in measurements. + bool _includeTimeField{false}; + + // A flag used to mark that a bucket's metadata value should be materialized in measurements. + bool _includeMetaField{false}; + + // A flag used to mark that a bucket's min time should be materialized as metadata. + bool _includeMinTimeAsMetadata{false}; + + // A flag used to mark that a bucket's max time should be materialized as metadata. + bool _includeMaxTimeAsMetadata{false}; + + // The bucket being unpacked. + BSONObj _bucket; + + // Since the metadata value is the same across all materialized measurements we can cache the + // metadata Value in the reset phase and use it to materialize the metadata in each + // measurement. + Value _metaValue; + + BSONElement _metaBSONElem; + + // Since the bucket min time is the same across all materialized measurements, we can cache the + // value in the reset phase and use it to materialize as a metadata field in each measurement + // if required by the pipeline. + boost::optional _minTime; + + // Since the bucket max time is the same across all materialized measurements, we can cache the + // value in the reset phase and use it to materialize as a metadata field in each measurement + // if required by the pipeline. + boost::optional _maxTime; + + // Flag indicating whether this bucket is closed, as determined by the presence of the + // 'control.closed' field. + bool _closedBucket = false; + + // Map for the computed meta field projections. Updated for + // every bucket upon reset(). + stdx::unordered_map _computedMetaProjections; + + // The number of measurements in the bucket. + int32_t _numberOfMeasurements = 0; + + // Final list of fields to include/exclude during unpacking. This is computed once during the + // first doGetNext call so we don't have to recalculate every time we reach a new bucket. + boost::optional> _unpackFieldsToIncludeExclude = boost::none; +}; +} // namespace mongo diff --git a/src/mongo/db/exec/timeseries_modify.cpp b/src/mongo/db/exec/timeseries_modify.cpp index ce477e445ab80..2d9b0e3590985 100644 --- a/src/mongo/db/exec/timeseries_modify.cpp +++ b/src/mongo/db/exec/timeseries_modify.cpp @@ -29,8 +29,52 @@ #include "mongo/db/exec/timeseries_modify.h" +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/client.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/internal_transactions_feature_flag_gen.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_impl.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_write_util.h" +#include "mongo/db/update/path_support.h" +#include "mongo/db/update/update_util.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/s/would_change_owning_shard_exception.h" +#include "mongo/transport/session.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite @@ -39,41 +83,63 @@ namespace mongo { const char* TimeseriesModifyStage::kStageType = "TS_MODIFY"; TimeseriesModifyStage::TimeseriesModifyStage(ExpressionContext* expCtx, - std::unique_ptr params, + TimeseriesModifyParams&& params, WorkingSet* ws, std::unique_ptr child, - const CollectionPtr& coll, + CollectionAcquisition coll, BucketUnpacker bucketUnpacker, - std::unique_ptr residualPredicate) - : RequiresCollectionStage(kStageType, expCtx, coll), + std::unique_ptr residualPredicate, + std::unique_ptr originalPredicate) + : RequiresWritableCollectionStage(kStageType, expCtx, coll), _params(std::move(params)), + _originalPredicate(std::move(originalPredicate)), _ws(ws), _bucketUnpacker{std::move(bucketUnpacker)}, _residualPredicate(std::move(residualPredicate)), - _preWriteFilter(opCtx(), coll->ns()) { + _preWriteFilter(opCtx(), coll.nss()) { tassert(7308200, - "multi is true and no residual predicate was specified", - _isDeleteOne() || _residualPredicate); + "Multi deletes must have a residual predicate", + _isSingletonWrite() || _residualPredicate || _params.isUpdate); + tassert(7308300, + "Can return the old measurement only if modifying one", + !_params.returnOld || _isSingletonWrite()); + tassert(7314602, + "Can return the new measurement only if updating one", + !_params.returnNew || (_isSingletonWrite() && _params.isUpdate)); + tassert(7743100, + "Updates must provide original predicate", + !_params.isUpdate || _originalPredicate); _children.emplace_back(std::move(child)); // These three properties are only used for the queryPlanner explain and will not change while // executing this stage. _specificStats.opType = [&] { - if (_isDeleteOne()) { - return "deleteOne"; - } else { - return "deleteMany"; + if (_params.isUpdate) { + return _isMultiWrite() ? "updateMany" : "updateOne"; } + return _isMultiWrite() ? "deleteMany" : "deleteOne"; }(); - _specificStats.bucketFilter = _params->canonicalQuery->getQueryObj(); + _specificStats.bucketFilter = _params.canonicalQuery->getQueryObj(); if (_residualPredicate) { _specificStats.residualFilter = _residualPredicate->serialize(); } + + tassert(7314202, + "Updates must specify an update driver", + _params.updateDriver || !_params.isUpdate); + _specificStats.isModUpdate = + _params.isUpdate && _params.updateDriver->type() == UpdateDriver::UpdateType::kOperator; + + _isUserInitiatedUpdate = _params.isUpdate && opCtx()->writesAreReplicated() && + !(_params.isFromOplogApplication || + _params.updateDriver->type() == UpdateDriver::UpdateType::kDelta || _params.fromMigrate); } bool TimeseriesModifyStage::isEOF() { - if (_isDeleteOne() && _specificStats.nMeasurementsDeleted > 0) { - return true; + if (_isSingletonWrite() && _specificStats.nMeasurementsMatched > 0) { + // If we have a measurement to return, we should not return EOF so that we can get a chance + // to get called again and return the measurement. + return !_measurementToReturn; } return child()->isEOF() && _retryBucketId == WorkingSet::INVALID_ID; } @@ -88,25 +154,346 @@ std::unique_ptr TimeseriesModifyStage::getStats() { return ret; } -PlanStage::StageState TimeseriesModifyStage::_writeToTimeseriesBuckets( +const std::vector>& TimeseriesModifyStage::_getUserLevelShardKeyPaths( + const ScopedCollectionDescription& collDesc) { + _immutablePaths.clear(); + + const auto& tsFields = collDesc.getTimeseriesFields(); + for (const auto& shardKeyField : collDesc.getKeyPatternFields()) { + if (auto metaField = tsFields->getMetaField(); metaField && + shardKeyField->isPrefixOfOrEqualTo(FieldRef{timeseries::kBucketMetaFieldName})) { + auto userMetaFieldRef = std::make_unique(*metaField); + if (shardKeyField->numParts() > 1) { + userMetaFieldRef->appendPart(shardKeyField->dottedField(1)); + } + _immutablePaths.emplace_back(std::move(userMetaFieldRef)); + } else if (auto timeField = tsFields->getTimeField(); + shardKeyField->isPrefixOfOrEqualTo( + FieldRef{timeseries::kControlMinFieldNamePrefix + timeField.toString()}) || + shardKeyField->isPrefixOfOrEqualTo( + FieldRef{timeseries::kControlMaxFieldNamePrefix + timeField.toString()})) { + _immutablePaths.emplace_back(std::make_unique(timeField)); + } else { + tasserted(7687100, + "Unexpected shard key field: {}"_format(shardKeyField->dottedField())); + } + } + + return _immutablePaths; +} + +const std::vector>& TimeseriesModifyStage::_getImmutablePaths() { + if (!_isUserInitiatedUpdate) { + return _immutablePaths; + } + + const auto& collDesc = collectionAcquisition().getShardingDescription(); + if (!collDesc.isSharded() || OperationShardingState::isComingFromRouter(opCtx())) { + return _immutablePaths; + } + + return _getUserLevelShardKeyPaths(collDesc); +} + +std::vector TimeseriesModifyStage::_applyUpdate( + const std::vector& matchedMeasurements, std::vector& unchangedMeasurements) { + // Determine which documents to update based on which ones are actually being changed. + std::vector modifiedMeasurements; + + for (auto&& measurement : matchedMeasurements) { + // Timeseries updates are never in place, because we execute them as a delete of the old + // measurement plus an insert of the modified one. + mutablebson::Document doc(measurement, mutablebson::Document::kInPlaceDisabled); + + // We want to block shard key updates if the user requested an update directly to a shard, + // when shard key fields should be immutable. + FieldRefSet immutablePaths(_getImmutablePaths()); + const bool isInsert = false; + bool docWasModified = false; + + if (!_params.updateDriver->needMatchDetails()) { + uassertStatusOK(_params.updateDriver->update(opCtx(), + "", + &doc, + _isUserInitiatedUpdate, + immutablePaths, + isInsert, + nullptr, + &docWasModified)); + } else { + // If there was a matched field, obtain it. + MatchDetails matchDetails; + matchDetails.requestElemMatchKey(); + + // We have to re-apply the filter to get the matched element. + tassert(7662500, + "measurement must pass filter", + _originalPredicate->matchesBSON(measurement, &matchDetails)); + + uassertStatusOK(_params.updateDriver->update( + opCtx(), + matchDetails.hasElemMatchKey() ? matchDetails.elemMatchKey() : "", + &doc, + _isUserInitiatedUpdate, + immutablePaths, + isInsert, + nullptr, + &docWasModified)); + } + + if (docWasModified) { + modifiedMeasurements.emplace_back(doc.getObject()); + } else { + // The document wasn't modified, write it back to the original bucket unchanged. + unchangedMeasurements.emplace_back(std::move(measurement)); + } + } + + return modifiedMeasurements; +} + +void TimeseriesModifyStage::_checkRestrictionsOnUpdatingShardKeyAreNotViolated( + const ScopedCollectionDescription& collDesc, const FieldRefSet& shardKeyPaths) { + using namespace fmt::literals; + // We do not allow modifying either the current shard key value or new shard key value (if + // resharding) without specifying the full current shard key in the query. + // If the query is a simple equality match on _id, then '_params.canonicalQuery' will be null. + // But if we are here, we already know that the shard key is not _id, since we have an assertion + // earlier for requests that try to modify the immutable _id field. So it is safe to uassert if + // '_params.canonicalQuery' is null OR if the query does not include equality matches on all + // shard key fields. + pathsupport::EqualityMatches equalities; + + // We do not allow updates to the shard key when 'multi' is true. + uassert(ErrorCodes::InvalidOptions, + "Multi-update operations are not allowed when updating the shard key field.", + _params.isUpdate && _isSingletonWrite()); + + // With the introduction of PM-1632, we allow updating a document shard key without providing a + // full shard key if the update is executed in a retryable write or transaction. PM-1632 uses an + // internal transaction to execute these updates, so to make sure that we can only update the + // document shard key in a retryable write or transaction, mongos only sets + // $_allowShardKeyUpdatesWithoutFullShardKeyInQuery to true if the client executed write was a + // retryable write or in a transaction. + if (_params.allowShardKeyUpdatesWithoutFullShardKeyInQuery && + feature_flags::gFeatureFlagUpdateOneWithoutShardKey.isEnabled( + serverGlobalParams.featureCompatibility)) { + bool isInternalClient = + !cc().session() || (cc().session()->getTags() & transport::Session::kInternalClient); + uassert(ErrorCodes::InvalidOptions, + "$_allowShardKeyUpdatesWithoutFullShardKeyInQuery is an internal parameter", + isInternalClient); + + // If this node is a replica set primary node, an attempted update to the shard key value + // must either be a retryable write or inside a transaction. An update without a transaction + // number is legal if gFeatureFlagUpdateDocumentShardKeyUsingTransactionApi is enabled + // because mongos will be able to start an internal transaction to handle the + // wouldChangeOwningShard error thrown below. If this node is a replica set secondary node, + // we can skip validation. + if (!feature_flags::gFeatureFlagUpdateDocumentShardKeyUsingTransactionApi.isEnabled( + serverGlobalParams.featureCompatibility)) { + uassert(ErrorCodes::IllegalOperation, + "Must run update to shard key field in a multi-statement transaction or with " + "retryWrites: true.", + _params.allowShardKeyUpdatesWithoutFullShardKeyInQuery); + } + } else { + FieldRefSet userLevelShardKeyPaths(_getUserLevelShardKeyPaths(collDesc)); + uassert(7717803, + "Shard key update is not allowed without specifying the full shard key in the " + "query: pred = {}, shardKeyPaths = {}"_format( + _originalPredicate->serialize().toString(), userLevelShardKeyPaths.toString()), + (_originalPredicate && + pathsupport::extractFullEqualityMatches( + *_originalPredicate, userLevelShardKeyPaths, &equalities) + .isOK() && + equalities.size() == userLevelShardKeyPaths.size())); + + // If this node is a replica set primary node, an attempted update to the shard key value + // must either be a retryable write or inside a transaction. An update without a transaction + // number is legal if gFeatureFlagUpdateDocumentShardKeyUsingTransactionApi is enabled + // because mongos will be able to start an internal transaction to handle the + // wouldChangeOwningShard error thrown below. If this node is a replica set secondary node, + // we can skip validation. + if (!feature_flags::gFeatureFlagUpdateDocumentShardKeyUsingTransactionApi.isEnabled( + serverGlobalParams.featureCompatibility)) { + uassert(ErrorCodes::IllegalOperation, + "Must run update to shard key field in a multi-statement transaction or with " + "retryWrites: true.", + opCtx()->getTxnNumber()); + } + } +} + +void TimeseriesModifyStage::_checkUpdateChangesExistingShardKey(const BSONObj& newBucket, + const BSONObj& oldBucket, + const BSONObj& newMeasurement, + const BSONObj& oldMeasurement) { + using namespace fmt::literals; + const auto& collDesc = collectionAcquisition().getShardingDescription(); + const auto& shardKeyPattern = collDesc.getShardKeyPattern(); + + auto oldShardKey = shardKeyPattern.extractShardKeyFromDoc(oldBucket); + auto newShardKey = shardKeyPattern.extractShardKeyFromDoc(newBucket); + + // If the shard key fields remain unchanged by this update we can skip the rest of the checks. + // Using BSONObj::binaryEqual() still allows a missing shard key field to be filled in with an + // explicit null value. + if (newShardKey.binaryEqual(oldShardKey)) { + return; + } + + FieldRefSet shardKeyPaths(collDesc.getKeyPatternFields()); + + // Assert that the updated doc has no arrays or array descendants for the shard key fields. + update::assertPathsNotArray(mutablebson::Document{oldBucket}, shardKeyPaths); + + _checkRestrictionsOnUpdatingShardKeyAreNotViolated(collDesc, shardKeyPaths); + + // At this point we already asserted that the complete shardKey have been specified in the + // query, this implies that mongos is not doing a broadcast update and that it attached a + // shardVersion to the command. Thus it is safe to call getOwnershipFilter + const auto& collFilter = collectionAcquisition().getShardingFilter(); + invariant(collFilter); + + // If the shard key of an orphan document is allowed to change, and the document is allowed to + // become owned by the shard, the global uniqueness assumption for _id values would be violated. + invariant(collFilter->keyBelongsToMe(oldShardKey)); + + if (!collFilter->keyBelongsToMe(newShardKey)) { + // We send the 'oldMeasurement' instead of the old bucket document to leverage timeseries + // deleteOne because the delete can run inside an internal transaction. + uasserted(WouldChangeOwningShardInfo(oldMeasurement, + newBucket, + false, + collectionPtr()->ns(), + collectionPtr()->uuid(), + newMeasurement), + "This update would cause the doc to change owning shards"); + } +} + +void TimeseriesModifyStage::_checkUpdateChangesReshardingKey( + const ShardingWriteRouter& shardingWriteRouter, + const BSONObj& newBucket, + const BSONObj& oldBucket, + const BSONObj& newMeasurement, + const BSONObj& oldMeasurement) { + using namespace fmt::literals; + const auto& collDesc = collectionAcquisition().getShardingDescription(); + + auto reshardingKeyPattern = collDesc.getReshardingKeyIfShouldForwardOps(); + if (!reshardingKeyPattern) + return; + + auto oldShardKey = reshardingKeyPattern->extractShardKeyFromDoc(oldBucket); + auto newShardKey = reshardingKeyPattern->extractShardKeyFromDoc(newBucket); + + if (newShardKey.binaryEqual(oldShardKey)) + return; + + FieldRefSet shardKeyPaths(collDesc.getKeyPatternFields()); + _checkRestrictionsOnUpdatingShardKeyAreNotViolated(collDesc, shardKeyPaths); + + auto oldRecipShard = *shardingWriteRouter.getReshardingDestinedRecipient(oldBucket); + auto newRecipShard = *shardingWriteRouter.getReshardingDestinedRecipient(newBucket); + + if (oldRecipShard != newRecipShard) { + // We send the 'oldMeasurement' instead of the old bucket document to leverage timeseries + // deleteOne because the delete can run inside an internal transaction. + uasserted( + WouldChangeOwningShardInfo(oldMeasurement, + newBucket, + false, + collectionPtr()->ns(), + collectionPtr()->uuid(), + newMeasurement), + "This update would cause the doc to change owning shards under the new shard key"); + } +} + +void TimeseriesModifyStage::_checkUpdateChangesShardKeyFields(const BSONObj& newBucket, + const BSONObj& oldBucket, + const BSONObj& newMeasurement, + const BSONObj& oldMeasurement) { + const auto isSharded = collectionAcquisition().getShardingDescription().isSharded(); + if (!isSharded) { + return; + } + + // It is possible that both the existing and new shard keys are being updated, so we do not want + // to short-circuit checking whether either is being modified. + _checkUpdateChangesExistingShardKey(newBucket, oldBucket, newMeasurement, oldMeasurement); + ShardingWriteRouter shardingWriteRouter(opCtx(), collectionPtr()->ns()); + _checkUpdateChangesReshardingKey( + shardingWriteRouter, newBucket, oldBucket, newMeasurement, oldMeasurement); +} + +template +std::pair TimeseriesModifyStage::_writeToTimeseriesBuckets( + ScopeGuard& bucketFreer, WorkingSetID bucketWsmId, - const std::vector& unchangedMeasurements, - const std::vector& deletedMeasurements, + std::vector&& unchangedMeasurements, + std::vector&& matchedMeasurements, bool bucketFromMigrate) { - if (_params->isExplain) { - _specificStats.nMeasurementsDeleted += deletedMeasurements.size(); - return PlanStage::NEED_TIME; + // No measurements needed to be updated or deleted from the bucket document. + if (matchedMeasurements.empty()) { + return {false, PlanStage::NEED_TIME}; } + _specificStats.nMeasurementsMatched += matchedMeasurements.size(); + + bool isUpdate = _params.isUpdate; + + // If this is a delete, we will be deleting all matched measurements. If this is an update, we + // may not need to modify all measurements, since some may be no-op updates. + const auto& modifiedMeasurements = + isUpdate ? _applyUpdate(matchedMeasurements, unchangedMeasurements) : matchedMeasurements; + + // Checks for shard key value changes. We will fail the command if it's a multi-update, so only + // performing the check needed for a single-update. + if (isUpdate && _isUserInitiatedUpdate && !modifiedMeasurements.empty()) { + _checkUpdateChangesShardKeyFields( + timeseries::makeBucketDocument({modifiedMeasurements[0]}, + collectionPtr()->ns(), + *collectionPtr()->getTimeseriesOptions(), + collectionPtr()->getDefaultCollator()), + _bucketUnpacker.bucket(), + modifiedMeasurements[0], + matchedMeasurements[0]); + } + + ScopeGuard setMeasurementToReturnGuard([&] { + // If asked to return the old or new measurement and the write was successful, we should + // save the measurement so that we can return it later. + if (_params.returnOld) { + _measurementToReturn = std::move(matchedMeasurements[0]); + } else if (_params.returnNew) { + if (modifiedMeasurements.empty()) { + // If we are returning the new measurement, then we must have modified at least one + // measurement. If we did not, then we should return the old measurement instead. + _measurementToReturn = std::move(matchedMeasurements[0]); + } else { + _measurementToReturn = std::move(modifiedMeasurements[0]); + } + } + }); - // No measurements needed to be deleted from the bucket document. - if (deletedMeasurements.empty()) { - return PlanStage::NEED_TIME; + // After applying the updates, no measurements needed to be updated in the bucket document. This + // case is still considered a successful write. + if (modifiedMeasurements.empty()) { + return {true, PlanStage::NEED_TIME}; + } + + // We don't actually write anything if we are in explain mode but we still need to update the + // stats and let the caller think as if the write succeeded if there's any modified measurement. + if (_params.isExplain) { + _specificStats.nMeasurementsModified += modifiedMeasurements.size(); + return {true, PlanStage::NEED_TIME}; } handlePlanStageYield( expCtx(), "TimeseriesModifyStage saveState", - collection()->ns().ns(), [&] { child()->saveState(); return PlanStage::NEED_TIME /* unused */; @@ -117,88 +504,90 @@ PlanStage::StageState TimeseriesModifyStage::_writeToTimeseriesBuckets( }); auto recordId = _ws->get(bucketWsmId)->recordId; - - auto yieldAndRetry = [&](unsigned logId) { - LOGV2_DEBUG(logId, - 5, - "Retrying bucket due to conflict attempting to write out changes", - "bucket_rid"_attr = recordId); - _retryBucket(bucketWsmId); - return PlanStage::NEED_YIELD; - }; - - OID bucketId = record_id_helpers::toBSONAs(recordId, "_id")["_id"].OID(); - if (unchangedMeasurements.empty()) { - write_ops::DeleteOpEntry deleteEntry(BSON("_id" << bucketId), false); - write_ops::DeleteCommandRequest op(collection()->ns(), {deleteEntry}); - - auto result = timeseries::performAtomicWrites( - opCtx(), collection(), recordId, op, bucketFromMigrate, _params->stmtId); - if (!result.isOK()) { - return yieldAndRetry(7309300); + try { + const auto modificationRet = handlePlanStageYield( + expCtx(), + "TimeseriesModifyStage writeToBuckets", + [&] { + if (isUpdate) { + timeseries::performAtomicWritesForUpdate(opCtx(), + collectionPtr(), + recordId, + unchangedMeasurements, + modifiedMeasurements, + bucketFromMigrate, + _params.stmtId); + } else { + timeseries::performAtomicWritesForDelete(opCtx(), + collectionPtr(), + recordId, + unchangedMeasurements, + bucketFromMigrate, + _params.stmtId); + } + return PlanStage::NEED_TIME; + }, + [&] { + // yieldHandler + // We need to retry the bucket, so we should not free the current bucket. + bucketFreer.dismiss(); + _retryBucket(bucketWsmId); + }); + if (modificationRet != PlanStage::NEED_TIME) { + setMeasurementToReturnGuard.dismiss(); + return {false, PlanStage::NEED_YIELD}; } - } else { - auto timeseriesOptions = collection()->getTimeseriesOptions(); - auto metaFieldName = timeseriesOptions->getMetaField(); - auto metadata = [&] { - if (!metaFieldName) { // Collection has no metadata field. - return BSONObj(); - } - // Look for the metadata field on this bucket and return it if present. - auto metaField = unchangedMeasurements[0].getField(*metaFieldName); - return metaField ? metaField.wrap() : BSONObj(); - }(); - auto replaceBucket = - timeseries::makeNewDocumentForWrite(bucketId, - unchangedMeasurements, - metadata, - timeseriesOptions, - collection()->getDefaultCollator()); - - write_ops::UpdateModification u(replaceBucket); - write_ops::UpdateOpEntry updateEntry(BSON("_id" << bucketId), std::move(u)); - write_ops::UpdateCommandRequest op(collection()->ns(), {updateEntry}); - - auto result = timeseries::performAtomicWrites( - opCtx(), collection(), recordId, op, bucketFromMigrate, _params->stmtId); - if (!result.isOK()) { - return yieldAndRetry(7309301); + } catch (const ExceptionFor& ex) { + if (ShardVersion::isPlacementVersionIgnored(ex->getVersionReceived()) && + ex->getCriticalSectionSignal()) { + // If the placement version is IGNORED and we encountered a critical section, then + // yield, wait for the critical section to finish and then we'll resume the write + // from the point we had left. We do this to prevent large multi-writes from + // repeatedly failing due to StaleConfig and exhausting the mongos retry attempts. + planExecutorShardingCriticalSectionFuture(opCtx()) = ex->getCriticalSectionSignal(); + // We need to retry the bucket, so we should not free the current bucket. + bucketFreer.dismiss(); + setMeasurementToReturnGuard.dismiss(); + _retryBucket(bucketWsmId); + return {false, PlanStage::NEED_YIELD}; } + throw; } - _specificStats.nMeasurementsDeleted += deletedMeasurements.size(); + _specificStats.nMeasurementsModified += modifiedMeasurements.size(); - // As restoreState may restore (recreate) cursors, cursors are tied to the - // transaction in which they are created, and a WriteUnitOfWork is a transaction, - // make sure to restore the state outside of the WriteUnitOfWork. - return handlePlanStageYield( + // As restoreState may restore (recreate) cursors, cursors are tied to the transaction in which + // they are created, and a WriteUnitOfWork is a transaction, make sure to restore the state + // outside of the WriteUnitOfWork. + auto status = handlePlanStageYield( expCtx(), "TimeseriesModifyStage restoreState", - collection()->ns().ns(), [&] { - child()->restoreState(&collection()); + child()->restoreState(&collectionPtr()); return PlanStage::NEED_TIME; }, // yieldHandler - // Note we don't need to retry anything in this case since the delete already - // was committed. However, we still need to return the deleted document (if it - // was requested). - // TODO SERVER-73089 for findAndModify we need to return the deleted doc. + // Note we don't need to retry anything in this case since the write already was committed. + // However, we still need to return the affected measurement (if it was requested). We don't + // need to rely on the storage engine to return the affected document since we already have + // it in memory. [&] { /* noop */ }); + + return {true, status}; } template std::pair, bool> TimeseriesModifyStage::_checkIfWritingToOrphanedBucket(ScopeGuard& bucketFreer, WorkingSetID id) { - // If we are in explain mode, we do not need to check if the bucket is orphaned since - // we're not writing to bucket. If we are migrating a bucket, we also do not need to - // check if the bucket is not writable and just return it. - if (_params->isExplain || _params->fromMigrate) { - return {boost::none, _params->fromMigrate}; + // If we are in explain mode, we do not need to check if the bucket is orphaned since we're not + // writing to bucket. If we are migrating a bucket, we also do not need to check if the bucket + // is not writable and just return it. + if (_params.isExplain || _params.fromMigrate) { + return {boost::none, _params.fromMigrate}; } return _preWriteFilter.checkIfNotWritable(_ws->get(id)->doc.value(), - "timeseriesDelete"_sd, - collection()->ns(), + "timeseries "_sd + _specificStats.opType, + collectionPtr()->ns(), [&](const ExceptionFor& ex) { planExecutorShardingCriticalSectionFuture( opCtx()) = ex->getCriticalSectionSignal(); @@ -220,18 +609,16 @@ PlanStage::StageState TimeseriesModifyStage::_getNextBucket(WorkingSetID& id) { _retryBucketId = WorkingSet::INVALID_ID; } - // We may not have an up-to-date bucket for this RecordId. Fetch it and ensure that it - // still exists and matches our bucket-level predicate if it is not believed to be - // up-to-date. + // We may not have an up-to-date bucket for this RecordId. Fetch it and ensure that it still + // exists and matches our bucket-level predicate if it is not believed to be up-to-date. bool docStillMatches; const auto status = handlePlanStageYield( expCtx(), "TimeseriesModifyStage:: ensureStillMatches", - collection()->ns().ns(), [&] { docStillMatches = write_stage_common::ensureStillMatches( - collection(), opCtx(), _ws, id, _params->canonicalQuery); + collectionPtr(), opCtx(), _ws, id, _params.canonicalQuery); return PlanStage::NEED_TIME; }, [&] { @@ -256,11 +643,33 @@ void TimeseriesModifyStage::_retryBucket(WorkingSetID bucketId) { _retryBucketId = bucketId; } +void TimeseriesModifyStage::_prepareToReturnMeasurement(WorkingSetID& out) { + tassert(7314601, + "Must be called only when need to return the old or new measurement", + _params.returnOld || _params.returnNew); + + out = _ws->allocate(); + auto member = _ws->get(out); + // The measurement does not have record id. + member->recordId = RecordId{}; + member->doc.value() = Document{std::move(*_measurementToReturn)}; + _ws->transitionToOwnedObj(out); + _measurementToReturn.reset(); +} + PlanStage::StageState TimeseriesModifyStage::doWork(WorkingSetID* out) { if (isEOF()) { return PlanStage::IS_EOF; } + if (_measurementToReturn) { + // If we fall into this case, then we were asked to return the old or new measurement but we + // were not able to do so in the previous call to doWork() because we needed to yield. Now + // that we are back, we can return it. + _prepareToReturnMeasurement(*out); + return PlanStage::ADVANCED; + } + tassert(7495500, "Expected bucketUnpacker's current bucket to be exhausted", !_bucketUnpacker.hasNext()); @@ -274,16 +683,16 @@ PlanStage::StageState TimeseriesModifyStage::doWork(WorkingSetID* out) { return status; } - // We want to free this member when we return because we either have an owned copy of - // the bucket for normal write and write to orphan cases, or we skip the bucket. + // We want to free this member when we return because we either have an owned copy of the bucket + // for normal write and write to orphan cases, or we skip the bucket. ScopeGuard bucketFreer([&] { _ws->free(id); }); auto member = _ws->get(id); tassert(7459100, "Expected a RecordId from the child stage", member->hasRecordId()); - // Determine if we are writing to an orphaned bucket - such writes should be excluded - // from user-visible change stream events. This will be achieved later by setting - // 'fromMigrate' flag when calling performAtomicWrites(). + // Determine if we are writing to an orphaned bucket - such writes should be excluded from + // user-visible change stream events. This will be achieved later by setting 'fromMigrate' flag + // when calling performAtomicWrites(). auto [immediateReturnStageState, bucketFromMigrate] = _checkIfWritingToOrphanedBucket(bucketFreer, id); if (immediateReturnStageState) { @@ -301,33 +710,42 @@ PlanStage::StageState TimeseriesModifyStage::doWork(WorkingSetID* out) { ++_specificStats.nBucketsUnpacked; std::vector unchangedMeasurements; - std::vector deletedMeasurements; + std::vector matchedMeasurements; while (_bucketUnpacker.hasNext()) { auto measurement = _bucketUnpacker.getNext().toBson(); - // We should stop deleting measurements once we hit the limit of one in the not multi case. - bool shouldContinueDeleting = _isDeleteMulti() || deletedMeasurements.empty(); - if (shouldContinueDeleting && + // We should stop matching measurements once we hit the limit of one in the non-multi case. + bool shouldContinueMatching = _isMultiWrite() || matchedMeasurements.empty(); + if (shouldContinueMatching && (!_residualPredicate || _residualPredicate->matchesBSON(measurement))) { - deletedMeasurements.push_back(measurement); + matchedMeasurements.push_back(measurement); } else { unchangedMeasurements.push_back(measurement); } } - status = _writeToTimeseriesBuckets( - id, unchangedMeasurements, deletedMeasurements, bucketFromMigrate); + auto isWriteSuccessful = false; + std::tie(isWriteSuccessful, status) = + _writeToTimeseriesBuckets(bucketFreer, + id, + std::move(unchangedMeasurements), + std::move(matchedMeasurements), + bucketFromMigrate); if (status != PlanStage::NEED_TIME) { *out = WorkingSet::INVALID_ID; - bucketFreer.dismiss(); + } else if (isWriteSuccessful && _measurementToReturn) { + // If the write was successful and if asked to return the old or new measurement, then + // '_measurementToReturn' must have been filled out and we can return it immediately. + _prepareToReturnMeasurement(*out); + status = PlanStage::ADVANCED; } return status; } void TimeseriesModifyStage::doRestoreStateRequiresCollection() { - const NamespaceString& ns = collection()->ns(); + const NamespaceString& ns = collectionPtr()->ns(); uassert(ErrorCodes::PrimarySteppedDown, - "Demoted from primary while removing from {}"_format(ns.ns()), + "Demoted from primary while removing from {}"_format(ns.toStringForErrorMsg()), !opCtx()->writesAreReplicated() || repl::ReplicationCoordinator::get(opCtx())->canAcceptWritesFor(opCtx(), ns)); diff --git a/src/mongo/db/exec/timeseries_modify.h b/src/mongo/db/exec/timeseries_modify.h index 88e687802ac20..9c761d6a43fc7 100644 --- a/src/mongo/db/exec/timeseries_modify.h +++ b/src/mongo/db/exec/timeseries_modify.h @@ -30,36 +30,131 @@ #pragma once -#include "mongo/db/exec/bucket_unpacker.h" +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/collection_operation_source.h" #include "mongo/db/exec/delete_stage.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_collection_stage.h" +#include "mongo/db/exec/timeseries/bucket_unpacker.h" +#include "mongo/db/exec/update_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/exec/write_stage_common.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/field_ref_set.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/ops/update_request.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/s/sharding_write_router.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/update/update_driver.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" namespace mongo { +struct TimeseriesModifyParams { + TimeseriesModifyParams(const DeleteStageParams* deleteParams) + : isUpdate(false), + isMulti(deleteParams->isMulti), + fromMigrate(deleteParams->fromMigrate), + isExplain(deleteParams->isExplain), + returnOld(deleteParams->returnDeleted), + stmtId(deleteParams->stmtId), + canonicalQuery(deleteParams->canonicalQuery) {} + + TimeseriesModifyParams(const UpdateStageParams* updateParams) + : isUpdate(true), + isMulti(updateParams->request->isMulti()), + fromMigrate(updateParams->request->source() == OperationSource::kFromMigrate), + isExplain(updateParams->request->explain()), + returnOld(updateParams->request->shouldReturnOldDocs()), + returnNew(updateParams->request->shouldReturnNewDocs()), + allowShardKeyUpdatesWithoutFullShardKeyInQuery( + updateParams->request->getAllowShardKeyUpdatesWithoutFullShardKeyInQuery()), + canonicalQuery(updateParams->canonicalQuery), + isFromOplogApplication(updateParams->request->isFromOplogApplication()), + updateDriver(updateParams->driver) { + tassert(7314203, + "timeseries updates should only have one stmtId", + updateParams->request->getStmtIds().size() == 1); + stmtId = updateParams->request->getStmtIds().front(); + } + + // Is this an update or a delete operation? + bool isUpdate; + + // Is this a multi update/delete? + bool isMulti; + + // Is this command part of a migrate operation that is essentially like a no-op when the + // cluster is observed by an external client. + bool fromMigrate; + + // Are we explaining a command rather than actually executing it? + bool isExplain; + + // Should we return the old measurement? + bool returnOld; + + // Should we return the new measurement? + bool returnNew = false; + + // Should we allow shard key updates without the full shard key in the query? + OptionalBool allowShardKeyUpdatesWithoutFullShardKeyInQuery; + + // The stmtId for this particular command. + StmtId stmtId = kUninitializedStmtId; + + // The parsed query predicate for this command. Not owned here. + CanonicalQuery* canonicalQuery; + + // True if this command was triggered by the application of an oplog entry. + bool isFromOplogApplication = false; + + // Contains the logic for applying mods to documents. Only present for updates. Not owned. Must + // outlive the TimeseriesModifyStage. + UpdateDriver* updateDriver = nullptr; +}; + /** * Unpacks time-series bucket documents and writes the modified documents. * * The stage processes one bucket at a time, unpacking all the measurements and writing the output * bucket in a single doWork() call. */ -class TimeseriesModifyStage final : public RequiresMutableCollectionStage { +class TimeseriesModifyStage : public RequiresWritableCollectionStage { public: static const char* kStageType; TimeseriesModifyStage(ExpressionContext* expCtx, - std::unique_ptr params, + TimeseriesModifyParams&& params, WorkingSet* ws, std::unique_ptr child, - const CollectionPtr& coll, + CollectionAcquisition coll, BucketUnpacker bucketUnpacker, - std::unique_ptr residualPredicate); + std::unique_ptr residualPredicate, + std::unique_ptr originalPredicate = nullptr); StageType stageType() const { return STAGE_TIMESERIES_MODIFY; } - bool isEOF() final; + bool isEOF() override; std::unique_ptr getStats(); @@ -69,12 +164,8 @@ class TimeseriesModifyStage final : public RequiresMutableCollectionStage { PlanStage::StageState doWork(WorkingSetID* id); - bool _isDeleteMulti() { - return _params->isMulti; - } - - bool _isDeleteOne() { - return !_isDeleteMulti(); + bool containsDotsAndDollarsField() const { + return _params.isUpdate && _params.updateDriver->containsDotsAndDollarsField(); } protected: @@ -84,14 +175,67 @@ class TimeseriesModifyStage final : public RequiresMutableCollectionStage { void doRestoreStateRequiresCollection() final; + /** + * Prepares returning the old or new measurement when requested so. + */ + void _prepareToReturnMeasurement(WorkingSetID& out); + + /** + * Gets the user-level shard key paths. + */ + const std::vector>& _getUserLevelShardKeyPaths( + const ScopedCollectionDescription& collDesc); + + /** + * Gets immutable paths when the request is user-initiated and the timeseries collection is + * sharded and the request does not come from the router. + */ + const std::vector>& _getImmutablePaths(); + + // A user-initiated write is one which is not caused by oplog application and is not part of a + // chunk migration. + bool _isUserInitiatedUpdate; + + TimeseriesModifyParams _params; + + TimeseriesModifyStats _specificStats{}; + + // Stores the old measurement that is modified or the new measurement after update/upsert when + // requested to return it for the deleteOne or updateOne. + boost::optional _measurementToReturn = boost::none; + + // Original, untranslated and complete predicate. + std::unique_ptr _originalPredicate; + + // Temporary storage for _getImmutablePaths(). + std::vector> _immutablePaths; + private: + bool _isMultiWrite() const { + return _params.isMulti; + } + + bool _isSingletonWrite() const { + return !_isMultiWrite(); + } + + /** + * Applies update and returns the updated measurements. + */ + std::vector _applyUpdate(const std::vector& matchedMeasurements, + std::vector& unchangedMeasurements); + /** * Writes the modifications to a bucket. + * + * Returns the pair of (whether the write was successful, the stage state to propagate). */ - PlanStage::StageState _writeToTimeseriesBuckets( + template + std::pair _writeToTimeseriesBuckets( + ScopeGuard& bucketFreer, WorkingSetID bucketWsmId, - const std::vector& unchangedMeasurements, - const std::vector& deletedMeasurements, + std::vector&& unchangedMeasurements, + std::vector&& matchedMeasurements, bool bucketFromMigrate); /** @@ -109,7 +253,24 @@ class TimeseriesModifyStage final : public RequiresMutableCollectionStage { */ PlanStage::StageState _getNextBucket(WorkingSetID& id); - std::unique_ptr _params; + void _checkRestrictionsOnUpdatingShardKeyAreNotViolated( + const ScopedCollectionDescription& collDesc, const FieldRefSet& shardKeyPaths); + + void _checkUpdateChangesExistingShardKey(const BSONObj& newBucket, + const BSONObj& oldBucket, + const BSONObj& newMeasurement, + const BSONObj& oldMeasurement); + + void _checkUpdateChangesReshardingKey(const ShardingWriteRouter& shardingWriteRouter, + const BSONObj& newBucket, + const BSONObj& oldBucket, + const BSONObj& newMeasurement, + const BSONObj& oldMeasurementt); + + void _checkUpdateChangesShardKeyFields(const BSONObj& newBucket, + const BSONObj& oldBucket, + const BSONObj& newMeasurement, + const BSONObj& oldMeasurement); WorkingSet* _ws; @@ -119,7 +280,7 @@ class TimeseriesModifyStage final : public RequiresMutableCollectionStage { BucketUnpacker _bucketUnpacker; - // Determines the measurements to delete from this bucket, and by inverse, those to keep + // Determines the measurements to modify in this bucket, and by inverse, those to keep // unmodified. This predicate can be null if we have a meta-only or empty predicate on singleton // deletes or updates. std::unique_ptr _residualPredicate; @@ -134,8 +295,6 @@ class TimeseriesModifyStage final : public RequiresMutableCollectionStage { */ write_stage_common::PreWriteFilter _preWriteFilter; - TimeseriesModifyStats _specificStats{}; - // A pending retry to get to after a NEED_YIELD propagation and a new storage snapshot is // established. This can be set when a write fails or when a fetch fails. WorkingSetID _retryBucketId = WorkingSet::INVALID_ID; diff --git a/src/mongo/db/exec/timeseries_upsert.cpp b/src/mongo/db/exec/timeseries_upsert.cpp new file mode 100644 index 0000000000000..25655a34487ab --- /dev/null +++ b/src/mongo/db/exec/timeseries_upsert.cpp @@ -0,0 +1,237 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/exec/timeseries_upsert.h" + +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/document_validation.h" +#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/internal_transactions_feature_flag_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" +#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/server_options.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/timeseries/timeseries_write_util.h" +#include "mongo/db/update/update_driver.h" +#include "mongo/db/update/update_util.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/would_change_owning_shard_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/uuid.h" + +namespace { + +const char idFieldName[] = "_id"; +const mongo::FieldRef idFieldRef(idFieldName); + +} // namespace + + +namespace mongo { + +TimeseriesUpsertStage::TimeseriesUpsertStage(ExpressionContext* expCtx, + TimeseriesModifyParams&& params, + WorkingSet* ws, + std::unique_ptr child, + CollectionAcquisition coll, + BucketUnpacker bucketUnpacker, + std::unique_ptr residualPredicate, + std::unique_ptr originalPredicate, + const UpdateRequest& request) + : TimeseriesModifyStage(expCtx, + std::move(params), + ws, + std::move(child), + coll, + std::move(bucketUnpacker), + std::move(residualPredicate), + std::move(originalPredicate)), + _request(request) { + // We should never create this stage for a non-upsert request. + tassert(7655100, "request must be an upsert", _params.isUpdate && _request.isUpsert()); +}; + +// We're done when updating is finished and we have either matched or inserted. +bool TimeseriesUpsertStage::isEOF() { + return TimeseriesModifyStage::isEOF() && + (_specificStats.nMeasurementsMatched > 0 || _specificStats.nMeasurementsUpserted > 0); +} + +PlanStage::StageState TimeseriesUpsertStage::doWork(WorkingSetID* out) { + if (isEOF()) { + return StageState::IS_EOF; + } + + // First, attempt to perform the update on a matching document. + auto updateState = TimeseriesModifyStage::doWork(out); + + // If the update returned anything other than EOF, just forward it along. There's a chance we + // still may find a document to update and will not have to insert anything. If it did return + // EOF and we do not need to insert a new document, return EOF immediately here. + if (updateState != PlanStage::IS_EOF || isEOF()) { + return updateState; + } + + // Since this is an insert, we will be logging it as such in the oplog. We don't need the + // driver's help to build the oplog record. We also set the 'nUpserted' stats counter here. + _params.updateDriver->setLogOp(false); + _specificStats.nMeasurementsUpserted = 1; + + // Generate the new document to be inserted. + _specificStats.objInserted = _produceNewDocumentForInsert(); + + // If this is an explain, skip performing the actual insert. + if (!_params.isExplain) { + _performInsert(_specificStats.objInserted); + } + + // We should always be EOF at this point. + tassert(7655101, "must be at EOF if we performed an upsert", isEOF()); + + if (!_params.returnNew) { + // If we don't need to return the inserted document, we're done. + return PlanStage::IS_EOF; + } + + // If we want to return the document we just inserted, create it as a WorkingSetMember. + _measurementToReturn = _specificStats.objInserted; + _prepareToReturnMeasurement(*out); + return PlanStage::ADVANCED; +} + +void TimeseriesUpsertStage::_performInsert(BSONObj newMeasurement) { + if (_isUserInitiatedUpdate) { + const auto& acq = collectionAcquisition(); + if (const auto& collDesc = acq.getShardingDescription(); collDesc.isSharded()) { + auto newBucket = + timeseries::makeBucketDocument({newMeasurement}, + acq.nss(), + *collectionPtr()->getTimeseriesOptions(), + collectionPtr()->getDefaultCollator()); + + // The shard key fields may not have arrays at any point along their paths. + update::assertPathsNotArray(mutablebson::Document{newBucket}, + collDesc.getKeyPatternFields()); + + const auto& collFilter = acq.getShardingFilter(); + invariant(collFilter); + + auto newShardKey = collDesc.getShardKeyPattern().extractShardKeyFromDoc(newBucket); + if (!collFilter->keyBelongsToMe(newShardKey)) { + // An attempt to upsert a document with a shard key value that belongs on + // another shard must either be a retryable write or inside a transaction. An + // upsert without a transaction number is legal if + // gFeatureFlagUpdateDocumentShardKeyUsingTransactionApi is enabled because + // mongos will be able to start an internal transaction to handle the + // wouldChangeOwningShard error thrown below. + if (!feature_flags::gFeatureFlagUpdateDocumentShardKeyUsingTransactionApi.isEnabled( + serverGlobalParams.featureCompatibility)) { + uassert(ErrorCodes::IllegalOperation, + "The upsert document could not be inserted onto the shard targeted " + "by the query, since its shard key belongs on a different shard. " + "Cross-shard upserts are only allowed when running in a " + "transaction or with retryWrites: true.", + opCtx()->getTxnNumber()); + } + uasserted(WouldChangeOwningShardInfo(_originalPredicate->serialize(), + newBucket, + true, // upsert + acq.nss(), + acq.uuid(), + newMeasurement), + "The document we are inserting belongs on a different shard"); + } + } + } + writeConflictRetry(opCtx(), "TimeseriesUpsert", collectionPtr()->ns(), [&] { + timeseries::performAtomicWritesForUpdate(opCtx(), + collectionPtr(), + RecordId{}, + boost::none, + {newMeasurement}, + _params.fromMigrate, + _params.stmtId); + }); +} + + +BSONObj TimeseriesUpsertStage::_produceNewDocumentForInsert() { + // Initialize immutable paths based on the shard key field(s). + _getImmutablePaths(); + + mutablebson::Document doc; + + if (_request.shouldUpsertSuppliedDocument()) { + update::generateNewDocumentFromSuppliedDoc(opCtx(), _immutablePaths, &_request, doc); + } else { + // When populating the document from the query for replacement updates, we should include + // the _id field. However, we don't want to block _id from being set/updated, so only + // include it in 'immutablePaths' for this step. + _immutablePaths.emplace_back(std::make_unique(idFieldName)); + uassertStatusOK(_params.updateDriver->populateDocumentWithQueryFields( + *_originalPredicate, _immutablePaths, doc)); + _immutablePaths.pop_back(); + + update::generateNewDocumentFromUpdateOp( + opCtx(), _immutablePaths, _params.updateDriver, doc); + } + + update::ensureIdFieldIsFirst(&doc, true); + + auto newDocument = doc.getObject(); + if (!DocumentValidationSettings::get(opCtx()).isInternalValidationDisabled()) { + uassert(7655103, + "Document to upsert is larger than {}"_format(BSONObjMaxUserSize), + newDocument.objsize() <= BSONObjMaxUserSize); + } + + return newDocument; +} + +} // namespace mongo diff --git a/src/mongo/db/exec/timeseries_upsert.h b/src/mongo/db/exec/timeseries_upsert.h new file mode 100644 index 0000000000000..63c5f4ce16a75 --- /dev/null +++ b/src/mongo/db/exec/timeseries_upsert.h @@ -0,0 +1,75 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#pragma once + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/timeseries/bucket_unpacker.h" +#include "mongo/db/exec/timeseries_modify.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/ops/update_request.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/shard_role.h" + +namespace mongo { + +/** + * Execution stage for timeseries update requests with {upsert:true}. This is a specialized + * TimeseriesModifyStage which, in the event that no documents match the update request's query, + * generates and inserts a new document into the collection. All logic related to the insertion + * phase is implemented by this class. + */ +class TimeseriesUpsertStage final : public TimeseriesModifyStage { +public: + TimeseriesUpsertStage(ExpressionContext* expCtx, + TimeseriesModifyParams&& params, + WorkingSet* ws, + std::unique_ptr child, + CollectionAcquisition coll, + BucketUnpacker bucketUnpacker, + std::unique_ptr residualPredicate, + std::unique_ptr originalPredicate, + const UpdateRequest& request); + + bool isEOF() final; + PlanStage::StageState doWork(WorkingSetID* id) final; + +private: + BSONObj _produceNewDocumentForInsert(); + void _performInsert(BSONObj newDocument); + + // The original update request. + const UpdateRequest& _request; +}; +} // namespace mongo diff --git a/src/mongo/db/exec/trial_period_utils.cpp b/src/mongo/db/exec/trial_period_utils.cpp index b158f297b4720..9388a0bfa30f8 100644 --- a/src/mongo/db/exec/trial_period_utils.cpp +++ b/src/mongo/db/exec/trial_period_utils.cpp @@ -27,11 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/exec/trial_period_utils.h" +#include +#include +#include #include "mongo/db/catalog/collection.h" +#include "mongo/db/exec/trial_period_utils.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" namespace mongo::trial_period { size_t getTrialPeriodMaxWorks(OperationContext* opCtx, diff --git a/src/mongo/db/exec/trial_period_utils.h b/src/mongo/db/exec/trial_period_utils.h index f8e4d4a725fdf..529890a289158 100644 --- a/src/mongo/db/exec/trial_period_utils.h +++ b/src/mongo/db/exec/trial_period_utils.h @@ -29,7 +29,11 @@ #pragma once +#include + +#include "mongo/db/operation_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/repl/oplog.h" namespace mongo { class Collection; diff --git a/src/mongo/db/exec/trial_stage.cpp b/src/mongo/db/exec/trial_stage.cpp index 44a1d061ed0a3..6c7984d2d81b2 100644 --- a/src/mongo/db/exec/trial_stage.cpp +++ b/src/mongo/db/exec/trial_stage.cpp @@ -27,17 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/trial_stage.h" - -#include #include +#include +#include + +#include #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/exec/or.h" #include "mongo/db/exec/queued_data_stage.h" -#include "mongo/db/exec/working_set_common.h" +#include "mongo/db/exec/trial_stage.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/exec/trial_stage.h b/src/mongo/db/exec/trial_stage.h index 674ee56cbf43e..226da1072c795 100644 --- a/src/mongo/db/exec/trial_stage.h +++ b/src/mongo/db/exec/trial_stage.h @@ -29,8 +29,16 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/stage_types.h" namespace mongo { diff --git a/src/mongo/db/exec/unpack_timeseries_bucket.cpp b/src/mongo/db/exec/unpack_timeseries_bucket.cpp index bbd4b61cf1258..7134ff878e741 100644 --- a/src/mongo/db/exec/unpack_timeseries_bucket.cpp +++ b/src/mongo/db/exec/unpack_timeseries_bucket.cpp @@ -29,6 +29,14 @@ #include "mongo/db/exec/unpack_timeseries_bucket.h" +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/snapshot.h" + namespace mongo { namespace { diff --git a/src/mongo/db/exec/unpack_timeseries_bucket.h b/src/mongo/db/exec/unpack_timeseries_bucket.h index 84a1392369eee..c034ecb274a76 100644 --- a/src/mongo/db/exec/unpack_timeseries_bucket.h +++ b/src/mongo/db/exec/unpack_timeseries_bucket.h @@ -29,8 +29,14 @@ #pragma once -#include "mongo/db/exec/bucket_unpacker.h" +#include + #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/timeseries/bucket_unpacker.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/stage_types.h" namespace mongo { /** diff --git a/src/mongo/db/exec/update_stage.cpp b/src/mongo/db/exec/update_stage.cpp index d2cec8d96ba3d..e9490e4a2c37b 100644 --- a/src/mongo/db/exec/update_stage.cpp +++ b/src/mongo/db/exec/update_stage.cpp @@ -29,25 +29,63 @@ #include "mongo/db/exec/update_stage.h" -#include - -#include "mongo/base/status_with.h" -#include "mongo/bson/mutable/algorithm.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_operation_source.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/document_validation.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/field_ref.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" -#include "mongo/db/query/collection_query_info.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_impl.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/update/path_support.h" #include "mongo/db/update/update_oplog_entry_serialization.h" #include "mongo/db/update/update_util.h" #include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" #include "mongo/s/would_change_owning_shard_exception.h" +#include "mongo/transport/session.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite @@ -93,7 +131,7 @@ CollectionUpdateArgs::StoreDocOption getStoreDocMode(const UpdateRequest& update UpdateStage::UpdateStage(ExpressionContext* expCtx, const UpdateStageParams& params, WorkingSet* ws, - const CollectionPtr& collection, + CollectionAcquisition collection, PlanStage* child) : UpdateStage(expCtx, params, ws, collection) { // We should never reach here if the request is an upsert. @@ -105,16 +143,15 @@ UpdateStage::UpdateStage(ExpressionContext* expCtx, UpdateStage::UpdateStage(ExpressionContext* expCtx, const UpdateStageParams& params, WorkingSet* ws, - const CollectionPtr& collection) - : RequiresMutableCollectionStage(kStageType.rawData(), expCtx, collection), + CollectionAcquisition collection) + : RequiresWritableCollectionStage(kStageType.rawData(), expCtx, collection), _params(params), _ws(ws), _doc(params.driver->getDocument()), - _cachedShardingCollectionDescription(collection->ns()), _idRetrying(WorkingSet::INVALID_ID), _idReturning(WorkingSet::INVALID_ID), _updatedRecordIds(params.request->isMulti() ? new RecordIdSet() : nullptr), - _preWriteFilter(opCtx(), collection->ns()) { + _preWriteFilter(opCtx(), collection.nss()) { // Should the modifiers validate their embedded docs via storage_validation::scanDocument()? // Only user updates should be checked. Any system or replication stuff should pass through. @@ -144,7 +181,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted& oldObj, // only enable in-place mutations if the underlying storage engine offers support for // writing damage events. _doc.reset(oldObjValue, - (collection()->updateWithDamagesSupported() + (collectionPtr()->updateWithDamagesSupported() ? mutablebson::Document::kInPlaceEnabled : mutablebson::Document::kInPlaceDisabled)); @@ -157,11 +194,8 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted& oldObj, FieldRefSet immutablePaths; if (_isUserInitiatedWrite) { - // Documents coming directly from users should be validated for storage. It is safe to - // access the CollectionShardingState in this write context and to throw SSV if the sharding - // metadata has not been initialized. - const auto& collDesc = - _cachedShardingCollectionDescription.getCollectionDescription(opCtx()); + // Documents coming directly from users should be validated for storage. + const auto& collDesc = collectionAcquisition().getShardingDescription(); if (collDesc.isSharded() && !OperationShardingState::isComingFromRouter(opCtx())) { immutablePaths.fillFrom(collDesc.getKeyPatternFields()); @@ -186,7 +220,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted& oldObj, matchDetails.requestElemMatchKey(); dassert(cq); - verify(cq->root()->matchesBSON(oldObjValue, &matchDetails)); + MONGO_verify(cq->root()->matchesBSON(oldObjValue, &matchDetails)); std::string matchedField; if (matchDetails.hasElemMatchKey()) @@ -208,7 +242,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted& oldObj, // Skip adding _id field if the collection is capped (since capped collection documents can // neither grow nor shrink). - const auto createIdField = !collection()->isCapped(); + const auto createIdField = !collectionPtr()->isCapped(); // Ensure _id is first if it exists, and generate a new OID if appropriate. update::ensureIdFieldIsFirst(&_doc, createIdField); @@ -238,8 +272,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted& oldObj, args.sampleId = request->getSampleId(); args.update = logObj; if (_isUserInitiatedWrite) { - const auto& collDesc = - _cachedShardingCollectionDescription.getCollectionDescription(opCtx()); + const auto& collDesc = collectionAcquisition().getShardingDescription(); args.criteria = collDesc.extractDocumentKey(oldObjValue); } else { const auto docId = oldObjValue[idFieldName]; @@ -256,6 +289,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted& oldObj, args.retryableWrite = write_stage_common::isRetryableWrite(opCtx()); + bool indexesAffected = false; if (inPlace) { if (!request->explain()) { const RecordData oldRec(oldObj.value().objdata(), oldObj.value().objsize()); @@ -270,12 +304,13 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted& oldObj, WriteUnitOfWork wunit(opCtx()); newObj = uassertStatusOK(collection_internal::updateDocumentWithDamages( opCtx(), - collection(), + collectionPtr(), recordId, oldObj, source, _damages, diff.has_value() ? &*diff : collection_internal::kUpdateAllIndexes, + &indexesAffected, _params.opDebug, &args)); invariant(oldObj.snapshotId() == opCtx()->recoveryUnit()->getSnapshotId()); @@ -301,11 +336,12 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted& oldObj, WriteUnitOfWork wunit(opCtx()); collection_internal::updateDocument( opCtx(), - collection(), + collectionPtr(), recordId, oldObj, newObj, diff.has_value() ? &*diff : collection_internal::kUpdateAllIndexes, + &indexesAffected, _params.opDebug, &args); invariant(oldObj.snapshotId() == opCtx()->recoveryUnit()->getSnapshotId()); @@ -317,7 +353,7 @@ BSONObj UpdateStage::transformAndUpdate(const Snapshotted& oldObj, // For an example, see the comment above near declaration of '_updatedRecordIds'. // // This must be done after the wunit commits so we are sure we won't be rolling back. - if (_updatedRecordIds && driver->modsAffectIndices()) { + if (_updatedRecordIds && indexesAffected) { _updatedRecordIds->insert(recordId); } } @@ -351,7 +387,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) { } boost::optional unReplBlock; - if (collection()->ns().isImplicitlyReplicated() && !_isUserInitiatedWrite) { + if (collectionPtr()->ns().isImplicitlyReplicated() && !_isUserInitiatedWrite) { // Implictly replicated collections do not replicate updates. // However, user-initiated writes and some background maintenance tasks are allowed // to replicate as they cannot be derived from the oplog. @@ -413,10 +449,9 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) { const auto ensureStillMatchesRet = handlePlanStageYield( expCtx(), "UpdateStage ensureStillMatches", - collection()->ns().ns(), [&] { docStillMatches = write_stage_common::ensureStillMatches( - collection(), opCtx(), _ws, id, _params.canonicalQuery); + collectionPtr(), opCtx(), _ws, id, _params.canonicalQuery); return PlanStage::NEED_TIME; }, [&] { @@ -445,7 +480,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) { auto [immediateReturnStageState, fromMigrate] = _preWriteFilter.checkIfNotWritable( member->doc.value(), "update"_sd, - collection()->ns(), + collectionPtr()->ns(), [&](const ExceptionFor& ex) { planExecutorShardingCriticalSectionFuture(opCtx()) = ex->getCriticalSectionSignal(); @@ -468,7 +503,6 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) { handlePlanStageYield( expCtx(), "UpdateStage saveState", - collection()->ns().ns(), [&] { child()->saveState(); return PlanStage::NEED_TIME /* unused */; @@ -486,7 +520,6 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) { const auto updateRet = handlePlanStageYield( expCtx(), "UpdateStage update", - collection()->ns().ns(), [&] { // Do the update, get us the new version of the doc. newObj = transformAndUpdate({oldSnapshot, oldObj}, recordId, writeToOrphan); @@ -533,33 +566,40 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) { // Restore state after modification. As restoreState may restore (recreate) cursors, make // sure to restore the state outside of the WritUnitOfWork. - const auto restoreStateRet = handlePlanStageYield( - expCtx(), - "UpdateStage restoreState", - collection()->ns().ns(), - [&] { - child()->restoreState(&collection()); - return PlanStage::NEED_TIME; - }, - [&] { - // yieldHandler - // Note we don't need to retry updating anything in this case since the update - // already was committed. However, we still need to return the updated document (if - // it was requested). - if (_params.request->shouldReturnAnyDocs()) { - // member->obj should refer to the document we want to return. - invariant(member->getState() == WorkingSetMember::OWNED_OBJ); - - _idReturning = id; - // Keep this member around so that we can return it on the next - // work() call. - memberFreer.dismiss(); - } - *out = WorkingSet::INVALID_ID; - }); + // + // If this stage is already exhausted it won't use its children stages anymore and therefore + // there's no need to restore them. Avoid restoring them so that there's no possibility of + // requiring yielding at this point. Restoring from yield could fail due to a sharding + // placement change. Throwing a StaleConfig error is undesirable after an "update one" + // operation has already performed a write because the router would retry. + if (!isEOF()) { + const auto restoreStateRet = handlePlanStageYield( + expCtx(), + "UpdateStage restoreState", + [&] { + child()->restoreState(&collectionPtr()); + return PlanStage::NEED_TIME; + }, + [&] { + // yieldHandler + // Note we don't need to retry updating anything in this case since the update + // already was committed. However, we still need to return the updated document + // (if it was requested). + if (_params.request->shouldReturnAnyDocs()) { + // member->obj should refer to the document we want to return. + invariant(member->getState() == WorkingSetMember::OWNED_OBJ); + + _idReturning = id; + // Keep this member around so that we can return it on the next + // work() call. + memberFreer.dismiss(); + } + *out = WorkingSet::INVALID_ID; + }); - if (restoreStateRet != PlanStage::NEED_TIME) { - return restoreStateRet; + if (restoreStateRet != PlanStage::NEED_TIME) { + return restoreStateRet; + } } if (_params.request->shouldReturnAnyDocs()) { @@ -571,7 +611,7 @@ PlanStage::StageState UpdateStage::doWork(WorkingSetID* out) { return PlanStage::ADVANCED; } - return PlanStage::NEED_TIME; + return isEOF() ? PlanStage::IS_EOF : PlanStage::NEED_TIME; } else if (PlanStage::IS_EOF == status) { // The child is out of results, and therefore so are we. return PlanStage::IS_EOF; @@ -593,16 +633,19 @@ void UpdateStage::doRestoreStateRequiresCollection() { if (userInitiatedWritesAndNotPrimary) { uasserted(ErrorCodes::PrimarySteppedDown, str::stream() << "Demoted from primary while performing update on " - << nsString.ns()); + << nsString.toStringForErrorMsg()); } - // The set of indices may have changed during yield. Make sure that the update driver has up to - // date index information. - const auto& updateIndexData = CollectionQueryInfo::get(collection()).getIndexKeys(opCtx()); - _params.driver->refreshIndexKeys(&updateIndexData); + // Single updates never yield after having already modified one document. Otherwise restore + // could fail (e.g. due to a sharding placement change) and we'd fail to report in the response + // the already modified documents. + const bool singleUpdateAndAlreadyWrote = !_params.request->isMulti() && + (_specificStats.nModified > 0 || _specificStats.nUpserted > 0); + tassert(7711601, + "Single update should never restore after having already modified one document.", + !singleUpdateAndAlreadyWrote || request.explain()); _preWriteFilter.restoreState(); - _cachedShardingCollectionDescription.restoreState(); } std::unique_ptr UpdateStage::getStats() { @@ -696,9 +739,9 @@ void UpdateStage::_checkRestrictionsOnUpdatingShardKeyAreNotViolated( void UpdateStage::checkUpdateChangesReshardingKey(const ShardingWriteRouter& shardingWriteRouter, const BSONObj& newObj, const Snapshotted& oldObj) { - const auto& collDesc = shardingWriteRouter.getCollDesc(); + const auto& collDesc = collectionAcquisition().getShardingDescription(); - auto reshardingKeyPattern = collDesc->getReshardingKeyIfShouldForwardOps(); + auto reshardingKeyPattern = collDesc.getReshardingKeyIfShouldForwardOps(); if (!reshardingKeyPattern) return; @@ -708,37 +751,28 @@ void UpdateStage::checkUpdateChangesReshardingKey(const ShardingWriteRouter& sha if (newShardKey.binaryEqual(oldShardKey)) return; - FieldRefSet shardKeyPaths(collDesc->getKeyPatternFields()); - _checkRestrictionsOnUpdatingShardKeyAreNotViolated(*collDesc, shardKeyPaths); + FieldRefSet shardKeyPaths(collDesc.getKeyPatternFields()); + _checkRestrictionsOnUpdatingShardKeyAreNotViolated(collDesc, shardKeyPaths); auto oldRecipShard = *shardingWriteRouter.getReshardingDestinedRecipient(oldObj.value()); auto newRecipShard = *shardingWriteRouter.getReshardingDestinedRecipient(newObj); - uassert( - WouldChangeOwningShardInfo( - oldObj.value(), newObj, false /* upsert */, collection()->ns(), collection()->uuid()), - "This update would cause the doc to change owning shards under the new shard key", - oldRecipShard == newRecipShard); + uassert(WouldChangeOwningShardInfo(oldObj.value(), + newObj, + false /* upsert */, + collectionPtr()->ns(), + collectionPtr()->uuid()), + "This update would cause the doc to change owning shards under the new shard key", + oldRecipShard == newRecipShard); } void UpdateStage::checkUpdateChangesShardKeyFields(const boost::optional& newObjCopy, const Snapshotted& oldObj) { - ShardingWriteRouter shardingWriteRouter( - opCtx(), collection()->ns(), Grid::get(opCtx())->catalogCache()); - - auto* const css = shardingWriteRouter.getCss(); - - // css can be null when this is a config server. - if (css == nullptr) { - return; - } - - const auto collDesc = css->getCollectionDescription(opCtx()); - // Calling mutablebson::Document::getObject() renders a full copy of the updated document. This // can be expensive for larger documents, so we skip calling it when the collection isn't even // sharded. - if (!collDesc.isSharded()) { + const auto isSharded = collectionAcquisition().getShardingDescription().isSharded(); + if (!isSharded) { return; } @@ -746,15 +780,15 @@ void UpdateStage::checkUpdateChangesShardKeyFields(const boost::optionalns()); + checkUpdateChangesExistingShardKey(newObj, oldObj); checkUpdateChangesReshardingKey(shardingWriteRouter, newObj, oldObj); } -void UpdateStage::checkUpdateChangesExistingShardKey(const ShardingWriteRouter& shardingWriteRouter, - const BSONObj& newObj, +void UpdateStage::checkUpdateChangesExistingShardKey(const BSONObj& newObj, const Snapshotted& oldObj) { - const auto& collDesc = shardingWriteRouter.getCollDesc(); - const auto& shardKeyPattern = collDesc->getShardKeyPattern(); + const auto& collDesc = collectionAcquisition().getShardingDescription(); + const auto& shardKeyPattern = collDesc.getShardKeyPattern(); auto oldShardKey = shardKeyPattern.extractShardKeyFromDoc(oldObj.value()); auto newShardKey = shardKeyPattern.extractShardKeyFromDoc(newObj); @@ -766,25 +800,24 @@ void UpdateStage::checkUpdateChangesExistingShardKey(const ShardingWriteRouter& return; } - FieldRefSet shardKeyPaths(collDesc->getKeyPatternFields()); + FieldRefSet shardKeyPaths(collDesc.getKeyPatternFields()); // Assert that the updated doc has no arrays or array descendants for the shard key fields. update::assertPathsNotArray(_doc, shardKeyPaths); - _checkRestrictionsOnUpdatingShardKeyAreNotViolated(*collDesc, shardKeyPaths); + _checkRestrictionsOnUpdatingShardKeyAreNotViolated(collDesc, shardKeyPaths); // At this point we already asserted that the complete shardKey have been specified in the // query, this implies that mongos is not doing a broadcast update and that it attached a // shardVersion to the command. Thus it is safe to call getOwnershipFilter - auto* const css = shardingWriteRouter.getCss(); - const auto collFilter = css->getOwnershipFilter( - opCtx(), CollectionShardingState::OrphanCleanupPolicy::kAllowOrphanCleanup); + const auto& collFilter = collectionAcquisition().getShardingFilter(); + invariant(collFilter); // If the shard key of an orphan document is allowed to change, and the document is allowed to // become owned by the shard, the global uniqueness assumption for _id values would be violated. - invariant(collFilter.keyBelongsToMe(oldShardKey)); + invariant(collFilter->keyBelongsToMe(oldShardKey)); - if (!collFilter.keyBelongsToMe(newShardKey)) { + if (!collFilter->keyBelongsToMe(newShardKey)) { if (MONGO_unlikely(hangBeforeThrowWouldChangeOwningShard.shouldFail())) { LOGV2(20605, "Hit hangBeforeThrowWouldChangeOwningShard failpoint"); hangBeforeThrowWouldChangeOwningShard.pauseWhileSet(opCtx()); @@ -793,8 +826,8 @@ void UpdateStage::checkUpdateChangesExistingShardKey(const ShardingWriteRouter& uasserted(WouldChangeOwningShardInfo(oldObj.value(), newObj, false /* upsert */, - collection()->ns(), - collection()->uuid()), + collectionPtr()->ns(), + collectionPtr()->uuid()), "This update would cause the doc to change owning shards"); } } diff --git a/src/mongo/db/exec/update_stage.h b/src/mongo/db/exec/update_stage.h index 5120ffccfe467..86a494252f255 100644 --- a/src/mongo/db/exec/update_stage.h +++ b/src/mongo/db/exec/update_stage.h @@ -29,12 +29,35 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/mutable/damage_vector.h" +#include "mongo/bson/mutable/document.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/curop.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/requires_collection_stage.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/exec/write_stage_common.h" +#include "mongo/db/field_ref_set.h" #include "mongo/db/ops/update_request.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/record_id.h" +#include "mongo/db/s/scoped_collection_metadata.h" #include "mongo/db/s/sharding_write_router.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/db/update/update_driver.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { @@ -85,7 +108,7 @@ struct UpdateStageParams { * * Callers of doWork() must be holding a write lock. */ -class UpdateStage : public RequiresMutableCollectionStage { +class UpdateStage : public RequiresWritableCollectionStage { UpdateStage(const UpdateStage&) = delete; UpdateStage& operator=(const UpdateStage&) = delete; @@ -95,7 +118,7 @@ class UpdateStage : public RequiresMutableCollectionStage { UpdateStage(ExpressionContext* expCtx, const UpdateStageParams& params, WorkingSet* ws, - const CollectionPtr& collection, + CollectionAcquisition collection, PlanStage* child); bool isEOF() override; @@ -117,7 +140,7 @@ class UpdateStage : public RequiresMutableCollectionStage { UpdateStage(ExpressionContext* expCtx, const UpdateStageParams& params, WorkingSet* ws, - const CollectionPtr& collection); + CollectionAcquisition collection); void doSaveStateRequiresCollection() final { _preWriteFilter.saveState(); @@ -144,9 +167,6 @@ class UpdateStage : public RequiresMutableCollectionStage { mutablebson::Document& _doc; mutablebson::DamageVector _damages; - // Cached collection sharding description. It is reset when restoring from a yield. - write_stage_common::CachedShardingDescription _cachedShardingCollectionDescription; - private: /** * Computes the result of applying mods to the document 'oldObj' at RecordId 'recordId' in @@ -186,8 +206,7 @@ class UpdateStage : public RequiresMutableCollectionStage { * been updated to a value belonging to a chunk that is not owned by this shard. We cannot apply * this update atomically. */ - void checkUpdateChangesExistingShardKey(const ShardingWriteRouter& shardingWriteRouter, - const BSONObj& newObj, + void checkUpdateChangesExistingShardKey(const BSONObj& newObj, const Snapshotted& oldObj); void checkUpdateChangesReshardingKey(const ShardingWriteRouter& shardingWriteRouter, diff --git a/src/mongo/db/exec/upsert_stage.cpp b/src/mongo/db/exec/upsert_stage.cpp index 502b68b07a47a..76ff0a289e833 100644 --- a/src/mongo/db/exec/upsert_stage.cpp +++ b/src/mongo/db/exec/upsert_stage.cpp @@ -29,18 +29,52 @@ #include "mongo/db/exec/upsert_stage.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/mutable/const_element.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_operation_source.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/local_oplog_info.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/curop_failpoint_helpers.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/field_ref.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" -#include "mongo/db/query/query_feature_flags_gen.h" -#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/update_request.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/server_options.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/update/storage_validation.h" +#include "mongo/db/update/update_driver.h" #include "mongo/db/update/update_util.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/would_change_owning_shard_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/safe_num.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -58,7 +92,7 @@ const FieldRef idFieldRef(idFieldName); UpsertStage::UpsertStage(ExpressionContext* expCtx, const UpdateStageParams& params, WorkingSet* ws, - const CollectionPtr& collection, + CollectionAcquisition collection, PlanStage* child) : UpdateStage(expCtx, params, ws, collection) { // We should never create this stage for a non-upsert request. @@ -77,7 +111,7 @@ PlanStage::StageState UpsertStage::doWork(WorkingSetID* out) { } boost::optional unReplBlock; - if (collection()->ns().isImplicitlyReplicated()) { + if (collectionPtr()->ns().isImplicitlyReplicated()) { // Implictly replicated collections do not replicate updates. unReplBlock.emplace(opCtx()); } @@ -131,18 +165,14 @@ void UpsertStage::_performInsert(BSONObj newDocument) { // 'q' field belong to this shard, but those in the 'u' field do not. In this case we need to // throw so that MongoS can target the insert to the correct shard. if (_isUserInitiatedWrite) { - const auto& collDesc = - _cachedShardingCollectionDescription.getCollectionDescription(opCtx()); + const auto& collDesc = collectionAcquisition().getShardingDescription(); if (collDesc.isSharded()) { - auto scopedCss = CollectionShardingState::assertCollectionLockedAndAcquire( - opCtx(), collection()->ns()); - auto collFilter = scopedCss->getOwnershipFilter( - opCtx(), CollectionShardingState::OrphanCleanupPolicy::kAllowOrphanCleanup); - const ShardKeyPattern& shardKeyPattern = collFilter.getShardKeyPattern(); - auto newShardKey = shardKeyPattern.extractShardKeyFromDoc(newDocument); - - if (!collFilter.keyBelongsToMe(newShardKey)) { + const auto& collFilter = collectionAcquisition().getShardingFilter(); + invariant(collFilter); + auto newShardKey = collDesc.getShardKeyPattern().extractShardKeyFromDoc(newDocument); + + if (!collFilter->keyBelongsToMe(newShardKey)) { // An attempt to upsert a document with a shard key value that belongs on another // shard must either be a retryable write or inside a transaction. // An upsert without a transaction number is legal if @@ -162,8 +192,8 @@ void UpsertStage::_performInsert(BSONObj newDocument) { uasserted(WouldChangeOwningShardInfo(_params.request->getQuery(), newDocument, true /* upsert */, - collection()->ns(), - collection()->uuid()), + collectionPtr()->ns(), + collectionPtr()->uuid()), "The document we are inserting belongs on a different shard"); } } @@ -174,20 +204,20 @@ void UpsertStage::_performInsert(BSONObj newDocument) { &hangBeforeUpsertPerformsInsert, opCtx(), "hangBeforeUpsertPerformsInsert"); } - writeConflictRetry(opCtx(), "upsert", collection()->ns().ns(), [&] { + writeConflictRetry(opCtx(), "upsert", collectionPtr()->ns(), [&] { WriteUnitOfWork wunit(opCtx()); InsertStatement insertStmt(_params.request->getStmtIds(), newDocument); auto replCoord = repl::ReplicationCoordinator::get(opCtx()); - if (collection()->isCapped() && - !replCoord->isOplogDisabledFor(opCtx(), collection()->ns())) { + if (collectionPtr()->isCapped() && + !replCoord->isOplogDisabledFor(opCtx(), collectionPtr()->ns())) { auto oplogInfo = LocalOplogInfo::get(opCtx()); auto oplogSlots = oplogInfo->getNextOpTimes(opCtx(), /*batchSize=*/1); insertStmt.oplogSlot = oplogSlots.front(); } uassertStatusOK(collection_internal::insertDocument(opCtx(), - collection(), + collectionPtr(), insertStmt, _params.opDebug, _params.request->source() == @@ -203,8 +233,7 @@ BSONObj UpsertStage::_produceNewDocumentForInsert() { FieldRefSet shardKeyPaths, immutablePaths; if (_isUserInitiatedWrite) { // Obtain the collection description. This will be needed to compute the shardKey paths. - const auto& collDesc = - _cachedShardingCollectionDescription.getCollectionDescription(opCtx()); + const auto& collDesc = collectionAcquisition().getShardingDescription(); // If the collection is sharded, add all fields from the shard key to the 'shardKeyPaths' // set. diff --git a/src/mongo/db/exec/upsert_stage.h b/src/mongo/db/exec/upsert_stage.h index d3a1b1671c422..e4acb578e70c3 100644 --- a/src/mongo/db/exec/upsert_stage.h +++ b/src/mongo/db/exec/upsert_stage.h @@ -29,7 +29,14 @@ #pragma once +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/update_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/field_ref_set.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/shard_role.h" namespace mongo { @@ -54,7 +61,7 @@ class UpsertStage final : public UpdateStage { UpsertStage(ExpressionContext* expCtx, const UpdateStageParams& params, WorkingSet* ws, - const CollectionPtr& collection, + CollectionAcquisition collection, PlanStage* child); bool isEOF() final; diff --git a/src/mongo/db/exec/working_set.cpp b/src/mongo/db/exec/working_set.cpp index cf7cfaea13155..1572a1141c1ac 100644 --- a/src/mongo/db/exec/working_set.cpp +++ b/src/mongo/db/exec/working_set.cpp @@ -29,9 +29,13 @@ #include "mongo/db/exec/working_set.h" +#include + +#include +#include + +#include "mongo/base/data_type_endian.h" #include "mongo/db/bson/dotted_path_support.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/service_context.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -76,8 +80,8 @@ WorkingSetID WorkingSet::allocate() { void WorkingSet::free(WorkingSetID i) { MemberHolder& holder = _data[i]; - verify(i < _data.size()); // ID has been allocated. - verify(holder.nextFreeOrSelf == i); // ID currently in use. + MONGO_verify(i < _data.size()); // ID has been allocated. + MONGO_verify(holder.nextFreeOrSelf == i); // ID currently in use. // Free resources and push this WSM to the head of the freelist. holder.member.clear(); diff --git a/src/mongo/db/exec/working_set.h b/src/mongo/db/exec/working_set.h index 13688799b3b22..22dc8628b9ecf 100644 --- a/src/mongo/db/exec/working_set.h +++ b/src/mongo/db/exec/working_set.h @@ -29,15 +29,29 @@ #pragma once +#include +#include #include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/jsobj.h" #include "mongo/db/record_id.h" #include "mongo/db/storage/snapshot.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" namespace mongo { @@ -76,7 +90,7 @@ struct IndexKeyDatum { while (keyPatternIt.more()) { BSONElement keyPatternElt = keyPatternIt.next(); - verify(keyDataIt.more()); + MONGO_verify(keyDataIt.more()); BSONElement keyDataElt = keyDataIt.next(); if (field == keyPatternElt.fieldName()) diff --git a/src/mongo/db/exec/working_set_common.cpp b/src/mongo/db/exec/working_set_common.cpp index 513f020358d06..e91c8820be4e0 100644 --- a/src/mongo/db/exec/working_set_common.cpp +++ b/src/mongo/db/exec/working_set_common.cpp @@ -28,24 +28,57 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/working_set_common.h" - +#include #include - -#include "mongo/bson/simple_bsonobj_comparator.h" +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/health_log_gen.h" #include "mongo/db/catalog/health_log_interface.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/working_set.h" +#include "mongo/db/exec/working_set_common.h" #include "mongo/db/index/index_access_method.h" -#include "mongo/db/query/canonical_query.h" -#include "mongo/db/service_context.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/execution_context.h" #include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/shared_buffer_fragment.h" #include "mongo/util/stacktrace.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -135,9 +168,18 @@ bool WorkingSetCommon::fetch(OperationContext* opCtx, HealthLogInterface::get(opCtx)->log(entry); + auto options = [&] { + if (opCtx->recoveryUnit()->getDataCorruptionDetectionMode() == + DataCorruptionDetectionMode::kThrow) { + return logv2::LogOptions{ + logv2::UserAssertAfterLog(ErrorCodes::DataCorruptionDetected)}; + } else { + return logv2::LogOptions(logv2::LogComponent::kAutomaticDetermination); + } + }(); LOGV2_ERROR_OPTIONS( 4615603, - {logv2::UserAssertAfterLog(ErrorCodes::DataCorruptionDetected)}, + options, "Erroneous index key found with reference to non-existent record id. Consider " "dropping and then re-creating the index and then running the validate command " "on the collection.", @@ -167,7 +209,7 @@ bool WorkingSetCommon::fetch(OperationContext* opCtx, } auto keys = executionCtx.keys(); - SharedBufferFragmentBuilder pool(KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + SharedBufferFragmentBuilder pool(key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // There's no need to compute the prefixes of the indexed fields that cause the // index to be multikey when ensuring the keyData is still valid. KeyStringSet* multikeyMetadataKeys = nullptr; @@ -176,10 +218,11 @@ bool WorkingSetCommon::fetch(OperationContext* opCtx, auto desc = collection->getIndexCatalog()->findIndexByIdent(opCtx, indexIdent); invariant(desc, str::stream() << "Index entry not found for index with ident " << indexIdent - << " on collection " << collection->ns()); + << " on collection " << collection->ns().toStringForErrorMsg()); auto* iam = desc->getEntry()->accessMethod()->asSortedData(); iam->getKeys(opCtx, collection, + desc->getEntry(), pool, member->doc.value().toBson(), InsertDeleteOptions::ConstraintEnforcementMode::kEnforceConstraints, @@ -188,10 +231,10 @@ bool WorkingSetCommon::fetch(OperationContext* opCtx, multikeyMetadataKeys, multikeyPaths, member->recordId); - KeyString::HeapBuilder keyString(iam->getSortedDataInterface()->getKeyStringVersion(), - memberKey.keyData, - iam->getSortedDataInterface()->getOrdering(), - member->recordId); + key_string::HeapBuilder keyString(iam->getSortedDataInterface()->getKeyStringVersion(), + memberKey.keyData, + iam->getSortedDataInterface()->getOrdering(), + member->recordId); if (!keys->count(keyString.release())) { // document would no longer be at this position in the index. return false; diff --git a/src/mongo/db/exec/working_set_test.cpp b/src/mongo/db/exec/working_set_test.cpp index 9204f74f65238..addbc83547870 100644 --- a/src/mongo/db/exec/working_set_test.cpp +++ b/src/mongo/db/exec/working_set_test.cpp @@ -27,18 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/working_set.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" #include "mongo/db/record_id_helpers.h" #include "mongo/db/storage/snapshot.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/exec/write_stage_common.cpp b/src/mongo/db/exec/write_stage_common.cpp index 11670a420d624..adb2b88a59ba9 100644 --- a/src/mongo/db/exec/write_stage_common.cpp +++ b/src/mongo/db/exec/write_stage_common.cpp @@ -29,19 +29,30 @@ #include "mongo/db/exec/write_stage_common.h" -#include "mongo/base/shim.h" +#include + #include "mongo/db/catalog/collection.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/shard_filterer_impl.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/exec/working_set_common.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/server_options.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/logv2/redaction.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite @@ -59,7 +70,7 @@ PreWriteFilter::PreWriteFilter(OperationContext* opCtx, NamespaceString nss) // Always allow writes on standalone and secondary nodes. const auto replCoord{repl::ReplicationCoordinator::get(opCtx)}; - return !replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin.toString()); + return !replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin); }()) {} PreWriteFilter::Action PreWriteFilter::computeAction(const Document& doc) { @@ -131,21 +142,6 @@ void PreWriteFilter::logFromMigrate(const Document& doc, "record"_attr = doc); } -void CachedShardingDescription::restoreState() { - _collectionDescription.reset(); -} - -const ScopedCollectionDescription& CachedShardingDescription::getCollectionDescription( - OperationContext* opCtx) { - if (!_collectionDescription) { - const auto scopedCss = - CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, _nss); - _collectionDescription = scopedCss->getCollectionDescription(opCtx); - } - - return *_collectionDescription; -} - bool ensureStillMatches(const CollectionPtr& collection, OperationContext* opCtx, WorkingSet* ws, diff --git a/src/mongo/db/exec/write_stage_common.h b/src/mongo/db/exec/write_stage_common.h index 169ef5edc6c36..1f5e8b72e2563 100644 --- a/src/mongo/db/exec/write_stage_common.h +++ b/src/mongo/db/exec/write_stage_common.h @@ -29,13 +29,26 @@ #pragma once -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/shard_filterer.h" #include "mongo/db/exec/working_set.h" +#include "mongo/db/exec/working_set_common.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_exec.h" +#include "mongo/platform/basic.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -150,27 +163,11 @@ class PreWriteFilter { std::unique_ptr _shardFilterer; }; -/** - * This class represents a cached sharding collection description. When resuming from a yield, the - * cache needs to be invalidated. - */ -class CachedShardingDescription { -public: - CachedShardingDescription(const NamespaceString& nss) : _nss(nss) {} - - void restoreState(); - - const ScopedCollectionDescription& getCollectionDescription(OperationContext* opCtx); - -private: - const NamespaceString _nss; - boost::optional _collectionDescription; -}; - /** * Returns true if the document referred to by 'id' still exists and matches the query predicate * given by 'cq'. Returns true if the document still exists and 'cq' is null. Returns false - * otherwise. + * otherwise, in which case the WorkingSetMember referred to by 'id' will no longer contain a valid + * document, and the only operation that should be performed on the WSM is to free it. * * May throw a WriteConflictException if there was a conflict while searching to see if the document * still exists. diff --git a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp index 788ac47f27215..82942a0913db4 100644 --- a/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp +++ b/src/mongo/db/exhaust_cursor_currentop_integration_test.cpp @@ -28,17 +28,60 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/dbclient_base.h" #include "mongo/client/dbclient_connection.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/client/dbclient_rs.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/dbmessage.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/optime.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/stdx/future.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/integration_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/ssl_options.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/system_clock_source.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -68,7 +111,7 @@ void initTestCollection(DBClientBase* conn) { for (int i = 0; i < 10; i++) { auto insertCmd = BSON("insert" << testNSS.coll() << "documents" << BSON_ARRAY(BSON("a" << i))); - auto reply = conn->runCommand(OpMsgRequest::fromDBAndBody(testNSS.db(), insertCmd)); + auto reply = conn->runCommand(OpMsgRequest::fromDBAndBody(testNSS.db_forTest(), insertCmd)); ASSERT_OK(getStatusFromCommandResult(reply->getCommandReply())); } } @@ -86,7 +129,7 @@ void setWaitAfterCommandFinishesExecutionFailpoint(DBClientBase* conn, bool enab auto cmdObj = BSON("configureFailPoint" << "waitAfterCommandFinishesExecution" << "mode" << (enable ? "alwaysOn" : "off") << "data" - << BSON("ns" << testNSS.toString())); + << BSON("ns" << testNSS.toString_forTest())); auto reply = conn->runCommand(OpMsgRequest::fromDBAndBody("admin", cmdObj)); ASSERT_OK(getStatusFromCommandResult(reply->getCommandReply())); } @@ -383,7 +426,7 @@ TEST(CurrentOpExhaustCursorTest, ExhaustCursorUpdatesLastKnownCommittedOpTime) { for (int i = 0; i < 5; i++) { auto insertCmd = BSON("insert" << testNSS.coll() << "documents" << BSON_ARRAY(BSON("a" << i))); - auto reply = conn->runCommand(OpMsgRequest::fromDBAndBody(testNSS.db(), insertCmd)); + auto reply = conn->runCommand(OpMsgRequest::fromDBAndBody(testNSS.db_forTest(), insertCmd)); ASSERT_OK(getStatusFromCommandResult(reply->getCommandReply())); } @@ -427,7 +470,7 @@ TEST(CurrentOpExhaustCursorTest, ExhaustCursorUpdatesLastKnownCommittedOpTime) { for (int i = 5; i < 8; i++) { auto insertCmd = BSON("insert" << testNSS.coll() << "documents" << BSON_ARRAY(BSON("a" << i))); - auto reply = conn->runCommand(OpMsgRequest::fromDBAndBody(testNSS.db(), insertCmd)); + auto reply = conn->runCommand(OpMsgRequest::fromDBAndBody(testNSS.db_forTest(), insertCmd)); ASSERT_OK(getStatusFromCommandResult(reply->getCommandReply())); } diff --git a/src/mongo/db/explain_test.cpp b/src/mongo/db/explain_test.cpp index 5c54a439b6db0..8bd0b2f354bbb 100644 --- a/src/mongo/db/explain_test.cpp +++ b/src/mongo/db/explain_test.cpp @@ -27,14 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/explain_gen.h" #include "mongo/db/query/explain_options.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/explain_verbosity_gen.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/feature_compatibility_version_parser.cpp b/src/mongo/db/feature_compatibility_version_parser.cpp index 1b7ee15572d0f..dd3d71f0fd03c 100644 --- a/src/mongo/db/feature_compatibility_version_parser.cpp +++ b/src/mongo/db/feature_compatibility_version_parser.cpp @@ -27,15 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/feature_compatibility_version_parser.h" +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/feature_compatibility_version_document_gen.h" #include "mongo/db/feature_compatibility_version_documentation.h" +#include "mongo/db/feature_compatibility_version_parser.h" #include "mongo/db/namespace_string.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #include "mongo/util/version/releases.h" namespace mongo { @@ -110,16 +118,16 @@ StatusWith FeatureCompatibilityVersio version == targetVersion) { // Downgrading FCV must have a "previousVersion" field. if (!previousVersion) { - return Status(ErrorCodes::Error(4926902), - str::stream() - << "Missing field " - << FeatureCompatibilityVersionDocument::kPreviousVersionFieldName - << " in downgrading states for " << multiversion::kParameterName - << " document in " - << NamespaceString::kServerConfigurationNamespace.toString() - << ": " << featureCompatibilityVersionDoc << ". See " - << feature_compatibility_version_documentation::kCompatibilityLink - << "."); + return Status( + ErrorCodes::Error(4926902), + str::stream() + << "Missing field " + << FeatureCompatibilityVersionDocument::kPreviousVersionFieldName + << " in downgrading states for " << multiversion::kParameterName + << " document in " + << NamespaceString::kServerConfigurationNamespace.toStringForErrorMsg() + << ": " << featureCompatibilityVersionDoc << ". See " + << feature_compatibility_version_documentation::kCompatibilityLink << "."); } if (version == GenericFCV::kLastLTS) { // Downgrading to last-lts. @@ -131,16 +139,16 @@ StatusWith FeatureCompatibilityVersio // Non-downgrading FCV must not have a "previousVersion" field. if (previousVersion) { - return Status(ErrorCodes::Error(4926903), - str::stream() - << "Unexpected field " - << FeatureCompatibilityVersionDocument::kPreviousVersionFieldName - << " in non-downgrading states for " << multiversion::kParameterName - << " document in " - << NamespaceString::kServerConfigurationNamespace.toString() << ": " - << featureCompatibilityVersionDoc << ". See " - << feature_compatibility_version_documentation::kCompatibilityLink - << "."); + return Status( + ErrorCodes::Error(4926903), + str::stream() + << "Unexpected field " + << FeatureCompatibilityVersionDocument::kPreviousVersionFieldName + << " in non-downgrading states for " << multiversion::kParameterName + << " document in " + << NamespaceString::kServerConfigurationNamespace.toStringForErrorMsg() << ": " + << featureCompatibilityVersionDoc << ". See " + << feature_compatibility_version_documentation::kCompatibilityLink << "."); } // Upgrading FCV. @@ -148,13 +156,13 @@ StatusWith FeatureCompatibilityVersio // For upgrading FCV, "targetVersion" must be kLatest or kLastContinuous and "version" // must be kLastContinuous or kLastLTS. if (targetVersion == GenericFCV::kLastLTS || version == GenericFCV::kLatest) { - return Status(ErrorCodes::Error(4926904), - str::stream() - << "Invalid " << multiversion::kParameterName << " document in " - << NamespaceString::kServerConfigurationNamespace.toString() - << ": " << featureCompatibilityVersionDoc << ". See " - << feature_compatibility_version_documentation::kCompatibilityLink - << "."); + return Status( + ErrorCodes::Error(4926904), + str::stream() + << "Invalid " << multiversion::kParameterName << " document in " + << NamespaceString::kServerConfigurationNamespace.toStringForErrorMsg() + << ": " << featureCompatibilityVersionDoc << ". See " + << feature_compatibility_version_documentation::kCompatibilityLink << "."); } if (version == GenericFCV::kLastLTS) { @@ -165,8 +173,8 @@ StatusWith FeatureCompatibilityVersio uassert(5070601, str::stream() << "Invalid " << multiversion::kParameterName << " document in " - << NamespaceString::kServerConfigurationNamespace.toString() << ": " - << featureCompatibilityVersionDoc << ". See " + << NamespaceString::kServerConfigurationNamespace.toStringForErrorMsg() + << ": " << featureCompatibilityVersionDoc << ". See " << feature_compatibility_version_documentation::kCompatibilityLink << ".", version == GenericFCV::kLastContinuous); @@ -180,8 +188,8 @@ StatusWith FeatureCompatibilityVersio auto status = e.toStatus(); status.addContext(str::stream() << "Invalid " << multiversion::kParameterName << " document in " - << NamespaceString::kServerConfigurationNamespace.toString() << ": " - << featureCompatibilityVersionDoc << ". See " + << NamespaceString::kServerConfigurationNamespace.toStringForErrorMsg() + << ": " << featureCompatibilityVersionDoc << ". See " << feature_compatibility_version_documentation::kCompatibilityLink << "."); return status; diff --git a/src/mongo/db/feature_compatibility_version_parser.h b/src/mongo/db/feature_compatibility_version_parser.h index 0a9cc4eb2383c..58a3533ce1a0e 100644 --- a/src/mongo/db/feature_compatibility_version_parser.h +++ b/src/mongo/db/feature_compatibility_version_parser.h @@ -29,6 +29,10 @@ #pragma once +#include "mongo/base/error_extra_info.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/util/version/releases.h" namespace mongo { diff --git a/src/mongo/db/feature_flag.cpp b/src/mongo/db/feature_flag.cpp index b588b00730b51..1c7e99b83f345 100644 --- a/src/mongo/db/feature_flag.cpp +++ b/src/mongo/db/feature_flag.cpp @@ -29,14 +29,24 @@ #include "mongo/db/feature_flag.h" +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/db/feature_compatibility_version_parser.h" +#include "mongo/util/assert_util.h" #include "mongo/util/debug_util.h" +#include "mongo/util/str.h" #include "mongo/util/version/releases.h" namespace mongo { // (Generic FCV reference): feature flag support -FeatureFlag::FeatureFlag(bool enabled, StringData versionString) - : _enabled(enabled), _version(multiversion::GenericFCV::kLatest) { +FeatureFlag::FeatureFlag(bool enabled, StringData versionString, bool shouldBeFCVGated) + : _enabled(enabled), + _version(multiversion::GenericFCV::kLatest), + _shouldBeFCVGated(shouldBeFCVGated) { // Verify the feature flag invariants. IDL binder verifies these hold but we add these checks to // prevent incorrect direct instantiation. @@ -44,7 +54,7 @@ FeatureFlag::FeatureFlag(bool enabled, StringData versionString) // If default is true, then version should be present. // If default is false, then no version is allowed. if (kDebugBuild) { - if (enabled) { + if (enabled && shouldBeFCVGated) { dassert(!versionString.empty()); } else { dassert(versionString.empty()); @@ -57,27 +67,46 @@ FeatureFlag::FeatureFlag(bool enabled, StringData versionString) } bool FeatureFlag::isEnabled(const ServerGlobalParams::FeatureCompatibility& fcv) const { + // If the feature flag is not FCV gated, return whether it is enabled. + if (!_shouldBeFCVGated) { + return _enabled; + } + + + // If the FCV is not initialized yet, we check whether the feature flag is enabled on the last + // LTS FCV, which is the lowest FCV we can have on this server. Because the version of a feature + // flag is not supposed to change, we are sure that if the feature flag is enabled on the last + // LTS FCV, it is enabled on all FCVs this server can have. + if (!fcv.isVersionInitialized()) { + // (Generic FCV reference): This FCV reference should exist across LTS binary versions. + return isEnabledOnVersion(multiversion::GenericFCV::kLastLTS); + } + if (!_enabled) { return false; } + // If the feature flag is enabled, return whether the server's FCV is >= to the version the + // feature flag was enabled on. return fcv.isGreaterThanOrEqualTo(_version); } -bool FeatureFlag::isEnabledUseDefaultFCVWhenUninitialized( - const ServerGlobalParams::FeatureCompatibility& fcv) const { - if (fcv.isVersionInitialized()) { - return isEnabled(fcv); - } else { - return isEnabledOnVersion( - multiversion::FeatureCompatibilityVersion::kUnsetDefaultLastLTSBehavior); - } -} - +// isEnabledAndIgnoreFCVUnsafe should NOT be used in general, as it checks if the feature flag is +// turned on, regardless of which FCV we are on. It can result in unsafe scenarios +// where we enable a feature on an FCV where it is not supported or where the feature has not been +// fully implemented yet. In order to use isEnabledAndIgnoreFCVUnsafe, you **must** add a comment +// above that line starting with "(Ignore FCV check):" describing why we can safely ignore checking +// the FCV here. +// Note that if the feature flag does not have any upgrade/downgrade concerns, then shouldBeFCVGated +// should be set to false and FeatureFlag::isEnabled() should be used instead of this function. bool FeatureFlag::isEnabledAndIgnoreFCVUnsafe() const { return _enabled; } +// isEnabledAndIgnoreFCVUnsafeAtStartup should only be used on startup, if we want to check if the +// feature flag if the feature flag is turned on, regardless of which FCV we are on. +// Note that if the feature flag does not have any upgrade/downgrade concerns, then shouldBeFCVGated +// should be set to false and FeatureFlag::isEnabled() should be used instead of this function. bool FeatureFlag::isEnabledAndIgnoreFCVUnsafeAtStartup() const { return _enabled; } diff --git a/src/mongo/db/feature_flag.h b/src/mongo/db/feature_flag.h index 746f14e41c5ab..ec5e3655040bc 100644 --- a/src/mongo/db/feature_flag.h +++ b/src/mongo/db/feature_flag.h @@ -29,11 +29,18 @@ #pragma once +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/feature_compatibility_version_parser.h" #include "mongo/db/server_options.h" #include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" #include "mongo/util/version/releases.h" namespace mongo { @@ -52,7 +59,7 @@ class FeatureFlag { friend class FeatureFlagServerParameter; public: - FeatureFlag(bool enabled, StringData versionString); + FeatureFlag(bool enabled, StringData versionString, bool shouldBeFCVGated); /** * Returns true if the flag is set to true and enabled for this FCV version. @@ -126,6 +133,7 @@ class FeatureFlag { private: bool _enabled; multiversion::FeatureCompatibilityVersion _version; + bool _shouldBeFCVGated; }; /** diff --git a/src/mongo/db/feature_flag_test.idl.tpl b/src/mongo/db/feature_flag_test.idl.tpl index d83d2316aaf52..5b5d6e8a0ed4f 100644 --- a/src/mongo/db/feature_flag_test.idl.tpl +++ b/src/mongo/db/feature_flag_test.idl.tpl @@ -42,11 +42,19 @@ feature_flags: description: "Create a feature flag" cpp_varname: gFeatureFlagToaster default: false + shouldBeFCVGated: true featureFlagFryer: description: "Create a feature flag" cpp_varname: gFeatureFlagFryer default: false + shouldBeFCVGated: true + + featureFlagFork: + description: "Create a feature flag that should not be FCV gated" + cpp_varname: gFeatureFlagFork + default: true + shouldBeFCVGated: false #def $ver_str(v): ${'{}.{}'.format(v.major, v.minor)} featureFlagBlender: @@ -56,6 +64,7 @@ feature_flags: # The version should be a valid FCV not equal to GenericFCV::kLastLTS in # the generated 'releases.h' file. version: $ver_str(latest) + shouldBeFCVGated: true featureFlagSpoon: description: "Create a feature flag" @@ -63,6 +72,7 @@ feature_flags: default: true # The version should match GenericFCV::kLastLTS in the generated 'releases.h' file. version: $ver_str(last_lts) + shouldBeFCVGated: true server_parameters: spTestNeedsFeatureFlagToaster: diff --git a/src/mongo/db/field_parser.cpp b/src/mongo/db/field_parser.cpp index 0afde43f427cd..0cf51b9b34002 100644 --- a/src/mongo/db/field_parser.cpp +++ b/src/mongo/db/field_parser.cpp @@ -27,7 +27,8 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/db/field_parser.h" diff --git a/src/mongo/db/field_parser.h b/src/mongo/db/field_parser.h index 078668d28f9ab..f0c54390129d6 100644 --- a/src/mongo/db/field_parser.h +++ b/src/mongo/db/field_parser.h @@ -29,12 +29,24 @@ #pragma once +#include +#include #include #include +#include +#include #include +#include #include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/jsobj.h" +#include "mongo/util/assert_util.h" #include "mongo/util/scopeguard.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/field_parser_test.cpp b/src/mongo/db/field_parser_test.cpp index adf8e2c59dc2f..6b926dc92b88e 100644 --- a/src/mongo/db/field_parser_test.cpp +++ b/src/mongo/db/field_parser_test.cpp @@ -27,15 +27,21 @@ * it in the license file. */ +#include +#include #include #include #include #include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/mutable_bson_test_utils.h" #include "mongo/db/field_parser.h" -#include "mongo/db/jsobj.h" #include "mongo/platform/decimal128.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/time_support.h" namespace { diff --git a/src/mongo/db/field_ref.cpp b/src/mongo/db/field_ref.cpp index ae4a8596debe1..92526a4e69b7c 100644 --- a/src/mongo/db/field_ref.cpp +++ b/src/mongo/db/field_ref.cpp @@ -27,12 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/field_ref.h" - +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include +#include +#include +#include "mongo/bson/util/builder.h" +#include "mongo/db/field_ref.h" #include "mongo/util/assert_util.h" #include "mongo/util/ctype.h" @@ -53,6 +60,9 @@ void FieldRef::parse(StringData path) { // keep a copy in a local sting. _dotted = path.toString(); + tassert(1589700, + "the size of the path is larger than accepted", + _dotted.size() <= BSONObjMaxInternalSize); // Separate the field parts using '.' as a delimiter. std::string::iterator beg = _dotted.begin(); @@ -192,8 +202,13 @@ void FieldRef::reserialize() const { } StringData FieldRef::getPart(FieldIndex i) const { - invariant(i < _parts.size()); - + // boost::container::small_vector already checks that the index `i` is in bounds, so we don't + // bother checking here. If we change '_parts' to a different container implementation + // that no longer performs a bounds check, we should add one here. + static_assert( + std::is_same< + decltype(_parts), + boost::container::small_vector, kFewDottedFieldParts>>()); const boost::optional& part = _parts[i]; if (part) { return part->toStringData(_dotted); @@ -258,6 +273,10 @@ bool FieldRef::isNumericPathComponentStrict(FieldIndex i) const { return FieldRef::isNumericPathComponentStrict(getPart(i)); } +bool FieldRef::isNumericPathComponentLenient(FieldIndex i) const { + return FieldRef::isNumericPathComponentLenient(getPart(i)); +} + bool FieldRef::hasNumericPathComponents() const { for (size_t i = 0; i < numParts(); ++i) { if (isNumericPathComponentStrict(i)) diff --git a/src/mongo/db/field_ref.h b/src/mongo/db/field_ref.h index b2923a8450222..c7a2f0dfc3607 100644 --- a/src/mongo/db/field_ref.h +++ b/src/mongo/db/field_ref.h @@ -31,6 +31,11 @@ #include #include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include +#include #include #include #include @@ -157,6 +162,12 @@ class FieldRef { */ bool isNumericPathComponentStrict(FieldIndex i) const; + /** + * Similar to isNumericPathComponentStrict, but returns true for 0-prefixed indices, such as + * "00" and "01". + */ + bool isNumericPathComponentLenient(FieldIndex i) const; + /** * Returns true if this FieldRef has any numeric path components. */ diff --git a/src/mongo/db/field_ref_set.cpp b/src/mongo/db/field_ref_set.cpp index 39728b5f693bd..b3858baf21a7c 100644 --- a/src/mongo/db/field_ref_set.cpp +++ b/src/mongo/db/field_ref_set.cpp @@ -27,10 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/field_ref_set.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/db/field_ref_set.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" @@ -52,6 +57,23 @@ StringData safeFirstPart(const FieldRef* fieldRef) { return fieldRef->getPart(0); } } + + +/** + * Helper function to check if path conflicts are all prefixes. + */ +Status checkPathIsPrefixOf(const FieldRef& path, const FieldRef& conflictingPath) { + // Conflicts are always prefixes (or equal to) the path, or vice versa + if (path.numParts() > conflictingPath.numParts()) { + string errMsg = str::stream() + << "field at '" << conflictingPath.dottedField() + << "' must be exactly specified, field at sub-path '" << path.dottedField() << "'found"; + return Status(ErrorCodes::NotExactValueField, errMsg); + } + + return Status::OK(); +} + } // namespace bool FieldRefSet::FieldRefPtrLessThan::operator()(const FieldRef* l, const FieldRef* r) const { @@ -72,7 +94,7 @@ FieldRefSet::FieldRefSet(const vector& paths) { fillFrom(paths); } -bool FieldRefSet::findConflicts(const FieldRef* toCheck, FieldRefSet* conflicts) const { +StatusWith FieldRefSet::checkForConflictsAndPrefix(const FieldRef* toCheck) const { bool foundConflict = false; // If the set is empty, there is no work to do. @@ -88,10 +110,9 @@ bool FieldRefSet::findConflicts(const FieldRef* toCheck, FieldRefSet* conflicts) while (it != _fieldSet.end() && safeFirstPart(*it) == prefixStr) { size_t common = (*it)->commonPrefixSize(*toCheck); if ((*it)->numParts() == common || toCheck->numParts() == common) { - if (!conflicts) - return true; - - conflicts->_fieldSet.insert(*it); + if (auto status = checkPathIsPrefixOf(*toCheck, **it); !status.isOK()) { + return status; + } foundConflict = true; } ++it; diff --git a/src/mongo/db/field_ref_set.h b/src/mongo/db/field_ref_set.h index 001ea504e67c1..98e447992cd4d 100644 --- a/src/mongo/db/field_ref_set.h +++ b/src/mongo/db/field_ref_set.h @@ -29,10 +29,18 @@ #pragma once +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include #include +#include +#include #include -#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/db/field_ref.h" namespace mongo { @@ -72,6 +80,10 @@ class FieldRefSet { return _fieldSet.empty(); } + size_t size() const { + return _fieldSet.size(); + } + inline const_iterator begin() const { return _fieldSet.begin(); } @@ -130,7 +142,7 @@ class FieldRefSet { * * Return true if conflicts were found. */ - bool findConflicts(const FieldRef* toCheck, FieldRefSet* conflicts) const; + StatusWith checkForConflictsAndPrefix(const FieldRef* toCheck) const; void clear() { _fieldSet.clear(); diff --git a/src/mongo/db/field_ref_set_test.cpp b/src/mongo/db/field_ref_set_test.cpp index ae610a15cbfae..6241f24d498ce 100644 --- a/src/mongo/db/field_ref_set_test.cpp +++ b/src/mongo/db/field_ref_set_test.cpp @@ -27,12 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/field_ref_set.h" - #include "mongo/db/field_ref.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/field_ref_set.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/field_ref_test.cpp b/src/mongo/db/field_ref_test.cpp index f631b6bdb4e91..b7ae44f7a947f 100644 --- a/src/mongo/db/field_ref_test.cpp +++ b/src/mongo/db/field_ref_test.cpp @@ -27,14 +27,16 @@ * it in the license file. */ +#include #include -#include "mongo/base/error_codes.h" -#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/field_ref.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/str.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -91,6 +93,17 @@ TEST(Normal, SinglePart) { ASSERT_EQUALS(fieldRef.dottedField(), field); } +DEATH_TEST_REGEX(Normal, Overflow, "Tripwire assertion.*1589700") { + std::string field = "a"; + for (size_t s = 1; s <= BSONObjMaxInternalSize / 2; s++) { + field.append(".a"); + } + ASSERT_GT(field.size(), BSONObjMaxInternalSize); + FieldRef fieldRef; + ASSERT_THROWS_CODE(fieldRef.parse(field), AssertionException, 1589700); +} + + TEST(Normal, ParseTwice) { std::string field = "a"; FieldRef fieldRef; diff --git a/src/mongo/db/fle_crud.cpp b/src/mongo/db/fle_crud.cpp index 2cf9e1a5b6f2d..5323fe35fe00e 100644 --- a/src/mongo/db/fle_crud.cpp +++ b/src/mongo/db/fle_crud.cpp @@ -28,40 +28,80 @@ */ -#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include + +#include #include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/bsontypes.h" #include "mongo/crypto/encryption_fields_gen.h" #include "mongo/crypto/fle_crypto.h" #include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/crypto/fle_stats_gen.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/client.h" #include "mongo/db/commands/fle2_get_count_info_command_gen.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/fle_crud.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/multitenancy_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_time_tracker.h" +#include "mongo/db/ops/write_ops.h" #include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/fle/query_rewriter_interface.h" #include "mongo/db/query/fle/server_rewrite.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/transaction_api.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/factory.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" #include "mongo/s/grid.h" #include "mongo/s/transaction_router_resource_yielder.h" #include "mongo/s/write_ops/batch_write_exec.h" +#include "mongo/s/write_ops/batched_upsert_detail.h" +#include "mongo/stdx/variant.h" #include "mongo/util/assert_util.h" -#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite @@ -188,23 +228,28 @@ std::vector toTagSets( FLEEdgeCountInfo convertTokensToEdgeCount(const QECountInfoReplyTokens& token) { - boost::optional edc; - if (token.getEDCDerivedFromDataTokenAndContentionFactorToken()) { - edc = FLETokenFromCDR( - token.getEDCDerivedFromDataTokenAndContentionFactorToken().value()); - } + auto edc = token.getEDCDerivedFromDataTokenAndContentionFactorToken().map([](auto& t) { + return FLETokenFromCDR(t); + }); - boost::optional cpos, apos; - auto& spos = token.getSearchedPositions(); - if (spos) { - cpos = spos->getCpos(); - apos = spos->getApos(); - } + auto spos = token.getSearchedPositions().map([](auto& pair) { + EmuBinaryResult newPair; + newPair.cpos = pair.getCpos(); + newPair.apos = pair.getApos(); + return newPair; + }); + + auto npos = token.getNullAnchorPositions().map([](auto& pair) { + ESCCountsPair newPair; + newPair.cpos = pair.getCpos(); + newPair.apos = pair.getApos(); + return newPair; + }); auto esc = FLETokenFromCDR(token.getESCTwiceDerivedTagToken()); - return FLEEdgeCountInfo(token.getCount(), esc, cpos, apos, token.getStats(), edc); + return FLEEdgeCountInfo(token.getCount(), esc, spos, npos, token.getStats(), edc); } std::vector> toEdgeCounts( @@ -235,12 +280,12 @@ std::vector> toEdgeCounts( std::shared_ptr getTransactionWithRetriesForMongoS( OperationContext* opCtx) { + auto& executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto fleInlineCrudExecutor = std::make_shared(); return std::make_shared( opCtx, - fleInlineCrudExecutor->getSleepableExecutor( - Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()), + executor, TransactionRouterResourceYielder::makeForLocalHandoff(), fleInlineCrudExecutor); } @@ -400,7 +445,7 @@ std::pair processInsert( uint32_t numDocs = 0; write_ops::WriteCommandReplyBase writeBase; - // TODO: Remove with SERVER-73714 + // This is an optimization for single-document unencrypted inserts. if (documents.size() == 1) { auto serverPayload = EDCServerCollection::getEncryptedFieldInfo(documents[0]); if (serverPayload.size() == 0) { @@ -1051,16 +1096,15 @@ write_ops::UpdateCommandReply processUpdate(FLEQueryInterface* queryImpl, // Fail if we could not find the new document uassert(6371505, "Could not find pre-image document by _id", !newDocument.isEmpty()); - if (hasIndexedFieldsInSchema(efc.getFields())) { - // Check the user did not remove/destroy the __safeContent__ array. If there are no - // indexed fields, then there will not be a safeContent array in the document. - FLEClientCrypto::validateTagsArray(newDocument); - } - // Step 5 ---- auto originalFields = EDCServerCollection::getEncryptedIndexedFields(originalDocument); auto newFields = EDCServerCollection::getEncryptedIndexedFields(newDocument); + if (hasIndexedFieldsInSchema(efc.getFields()) && !(newFields.empty())) { + // Check the user did not remove/destroy the __safeContent__ array. If there are no + // indexed fields, then there will not be a safeContent array in the document. + FLEClientCrypto::validateTagsArray(newDocument); + } // Step 6 ---- // GarbageCollect steps: @@ -1314,16 +1358,16 @@ write_ops::FindAndModifyCommandReply processFindAndModify( // Fail if we could not find the new document uassert(7293302, "Could not find pre-image document by _id", !newDocument.isEmpty()); - if (hasIndexedFieldsInSchema(efc.getFields())) { + // Step 5 ---- + auto originalFields = EDCServerCollection::getEncryptedIndexedFields(originalDocument); + auto newFields = EDCServerCollection::getEncryptedIndexedFields(newDocument); + + if (hasIndexedFieldsInSchema(efc.getFields()) && !(newFields.empty())) { // Check the user did not remove/destroy the __safeContent__ array. If there are no // indexed fields, then there will not be a safeContent array in the document. FLEClientCrypto::validateTagsArray(newDocument); } - // Step 5 ---- - auto originalFields = EDCServerCollection::getEncryptedIndexedFields(originalDocument); - auto newFields = EDCServerCollection::getEncryptedIndexedFields(newDocument); - // Step 6 ---- // GarbageCollect steps: // 1. Gather the tags from the metadata block(s) of each removed field. These are stale tags. @@ -1451,6 +1495,13 @@ BSONObj FLEQueryInterfaceImpl::getById(const NamespaceString& nss, BSONElement e uint64_t FLEQueryInterfaceImpl::countDocuments(const NamespaceString& nss) { // Since count() does not work in a transaction, call count() by bypassing the transaction api auto client = _serviceContext->makeClient("SEP-int-fle-crud"); + + // TODO(SERVER-74660): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } + AlternativeClientRegion clientRegion(client); auto opCtx = cc().makeOperationContext(); auto as = AuthorizationSession::get(cc()); @@ -1487,6 +1538,8 @@ QECountInfoQueryTypeEnum queryTypeTranslation(FLEQueryInterface::TagQueryType ty return QECountInfoQueryTypeEnum::Query; case FLEQueryInterface::TagQueryType::kCompact: return QECountInfoQueryTypeEnum::Compact; + case FLEQueryInterface::TagQueryType::kCleanup: + return QECountInfoQueryTypeEnum::Cleanup; default: uasserted(7517101, "Invalid TagQueryType value."); } @@ -1507,7 +1560,7 @@ std::vector> FLEQueryInterfaceImpl::getTags( getCountsCmd.setTokens(toTagSets(tokensSets)); getCountsCmd.setQueryType(queryTypeTranslation(type)); - auto response = _txnClient.runCommandSync(nss.db(), getCountsCmd.toBSON({})); + auto response = _txnClient.runCommandSync(nss.dbName(), getCountsCmd.toBSON({})); auto status = getStatusFromWriteCommandReply(response); uassertStatusOK(status); @@ -1787,6 +1840,13 @@ std::vector> FLETagNoTXNQuery::getTags( // Pop off the current op context so we can get a fresh set of read concern settings auto client = _opCtx->getServiceContext()->makeClient("FLETagNoTXNQuery"); + + // TODO(SERVER-74660): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } + AlternativeClientRegion clientRegion(client); auto opCtx = cc().makeOperationContext(); auto as = AuthorizationSession::get(cc()); diff --git a/src/mongo/db/fle_crud.h b/src/mongo/db/fle_crud.h index a2c5fe051a752..1f7faa550ab6d 100644 --- a/src/mongo/db/fle_crud.h +++ b/src/mongo/db/fle_crud.h @@ -30,24 +30,42 @@ #pragma once #include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include #include +#include +#include +#include +#include +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/oid.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_crypto_types.h" #include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/count_command_gen.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/transaction/transaction_api.h" #include "mongo/executor/inline_executor.h" #include "mongo/rpc/op_msg.h" #include "mongo/s/write_ops/batch_write_exec.h" +#include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { class OperationContext; @@ -157,6 +175,13 @@ std::pair processFLEFindAndModifyExplainMongos( OperationContext* opCtx, const write_ops::FindAndModifyCommandRequest& findAndModifyRequest); +/** + * Process a findAndModify request from a replica set. + */ +StatusWith> +processFLEFindAndModifyHelper(OperationContext* opCtx, + const write_ops::FindAndModifyCommandRequest& findAndModifyRequest); + /** * Process a findAndModify request from a replica set. */ diff --git a/src/mongo/db/fle_crud_mongod.cpp b/src/mongo/db/fle_crud_mongod.cpp index a13e25c66859a..42f0cbda565ff 100644 --- a/src/mongo/db/fle_crud_mongod.cpp +++ b/src/mongo/db/fle_crud_mongod.cpp @@ -27,46 +27,73 @@ * it in the license file. */ -#include "mongo/base/status.h" -#include "mongo/db/fle_crud.h" - +#include +#include #include #include +#include + +#include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" -#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" -#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" #include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_crypto_types.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/crypto/fle_stats_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/db_raii.h" #include "mongo/db/fle_crud.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops_gen.h" -#include "mongo/db/ops/write_ops_parsers.h" -#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/count_command_gen.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/fle/query_rewriter_interface.h" #include "mongo/db/query/fle/server_rewrite.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/resource_yielder.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/session.h" #include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/sorted_data_interface.h" #include "mongo/db/transaction/transaction_api.h" #include "mongo/db/transaction/transaction_participant.h" -#include "mongo/db/transaction/transaction_participant_resource_yielder.h" #include "mongo/executor/inline_executor.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/idl/idl_parser.h" -#include "mongo/s/grid.h" -#include "mongo/s/transaction_router_resource_yielder.h" -#include "mongo/s/write_ops/batch_write_exec.h" +#include "mongo/rpc/op_msg.h" #include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/decorable.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -221,7 +248,7 @@ class StorageEngineClusteredCollectionReader : public FLEStateCollectionReader { // Check for interruption so we can be killed _opCtx->checkForInterrupt(); - KeyString::Builder builder(KeyString::Version::kLatestVersion); + key_string::Builder builder(key_string::Version::kLatestVersion); builder.appendBinData(BSONBinData(block.data(), block.size(), BinDataType::BinDataGeneral)); auto recordId = RecordId(builder.getBuffer(), builder.getSize()); @@ -289,11 +316,12 @@ class StorageEngineIndexCollectionReader : public FLEStateCollectionReader { // Check for interruption so we can be killed _opCtx->checkForInterrupt(); - KeyString::Builder kb( - _sdi->getKeyStringVersion(), _sdi->getOrdering(), KeyString::Discriminator::kInclusive); + key_string::Builder kb(_sdi->getKeyStringVersion(), + _sdi->getOrdering(), + key_string::Discriminator::kInclusive); kb.appendBinData(BSONBinData(block.data(), block.size(), BinDataGeneral)); - KeyString::Value id(kb.getValueCopy()); + key_string::Value id(kb.getValueCopy()); incrementRead(); @@ -304,13 +332,13 @@ class StorageEngineIndexCollectionReader : public FLEStateCollectionReader { // Seek will almost always give us a document, it just may not be a document we were // looking for. We need to check if seeked to the document we want - auto sizeWithoutRecordId = KeyString::sizeWithoutRecordIdLongAtEnd( + auto sizeWithoutRecordId = key_string::sizeWithoutRecordIdLongAtEnd( ksEntry->keyString.getBuffer(), ksEntry->keyString.getSize()); - if (KeyString::compare(ksEntry->keyString.getBuffer(), - id.getBuffer(), - sizeWithoutRecordId, - id.getSize()) == 0) { + if (key_string::compare(ksEntry->keyString.getBuffer(), + id.getBuffer(), + sizeWithoutRecordId, + id.getSize()) == 0) { // Get the document from the base collection return _cursor->seekExact(ksEntry->loc); @@ -336,11 +364,10 @@ std::shared_ptr getTransactionWithRetriesFo OperationContext* opCtx) { auto fleInlineCrudExecutor = std::make_shared(); - auto inlineSleepExecutor = fleInlineCrudExecutor->getSleepableExecutor(_fleCrudExecutor); return std::make_shared( opCtx, - inlineSleepExecutor, + _fleCrudExecutor, std::make_unique(), fleInlineCrudExecutor); } @@ -405,8 +432,9 @@ write_ops::DeleteCommandReply processFLEDelete( return deleteReply; } -write_ops::FindAndModifyCommandReply processFLEFindAndModify( - OperationContext* opCtx, const write_ops::FindAndModifyCommandRequest& findAndModifyRequest) { +StatusWith> +processFLEFindAndModifyHelper(OperationContext* opCtx, + const write_ops::FindAndModifyCommandRequest& findAndModifyRequest) { uassert(6371800, "Encrypted index operations are only supported on replica sets", @@ -416,7 +444,12 @@ write_ops::FindAndModifyCommandReply processFLEFindAndModify( auto reply = processFindAndModifyRequest( opCtx, findAndModifyRequest, &getTransactionWithRetriesForMongoD); - return uassertStatusOK(reply).first; + return reply; +} + +write_ops::FindAndModifyCommandReply processFLEFindAndModify( + OperationContext* opCtx, const write_ops::FindAndModifyCommandRequest& findAndModifyRequest) { + return uassertStatusOK(processFLEFindAndModifyHelper(opCtx, findAndModifyRequest)).first; } write_ops::UpdateCommandReply processFLEUpdate( @@ -492,7 +525,7 @@ std::vector> getTagsFromStorage( auto opStr = "getTagsFromStorage"_sd; return writeConflictRetry( - opCtx, opStr, nsOrUUID.toString(), [&]() -> std::vector> { + opCtx, opStr, nsOrUUID, [&]() -> std::vector> { AutoGetCollectionForReadMaybeLockFree autoColl(opCtx, nsOrUUID); const auto& collection = autoColl.getCollection(); @@ -532,14 +565,15 @@ std::vector> getTagsFromStorage( opCtx, kIdIndexName, IndexCatalog::InclusionPolicy::kReady); if (!indexDescriptor) { uasserted(ErrorCodes::IndexNotFound, - str::stream() << "Index not found, ns:" << nsOrUUID.toString() + str::stream() << "Index not found, ns:" << toStringForLogging(nsOrUUID) << ", index: " << kIdIndexName); } if (indexDescriptor->isPartial()) { uasserted(ErrorCodes::IndexOptionsConflict, - str::stream() << "Partial index is not allowed for this operation, ns:" - << nsOrUUID.toString() << ", index: " << kIdIndexName); + str::stream() + << "Partial index is not allowed for this operation, ns:" + << toStringForLogging(nsOrUUID) << ", index: " << kIdIndexName); } auto indexCatalogEntry = indexDescriptor->getEntry()->shared_from_this(); diff --git a/src/mongo/db/fle_crud_test.cpp b/src/mongo/db/fle_crud_test.cpp index e025da895729c..0f6a3adaf5097 100644 --- a/src/mongo/db/fle_crud_test.cpp +++ b/src/mongo/db/fle_crud_test.cpp @@ -28,48 +28,70 @@ */ -#include #include #include -#include +#include +#include +#include #include #include -#include #include +#include +#include +#include +#include + #include "mongo/base/data_range.h" #include "mongo/base/error_codes.h" +#include "mongo/base/secure_allocator.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/json.h" +#include "mongo/crypto/aead_encryption.h" #include "mongo/crypto/encryption_fields_gen.h" #include "mongo/crypto/fle_crypto.h" #include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/crypto/fle_stats_gen.h" #include "mongo/crypto/fle_tags.h" +#include "mongo/crypto/symmetric_key.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" #include "mongo/db/catalog/collection_options.h" +#include "mongo/db/client.h" #include "mongo/db/fle_crud.h" #include "mongo/db/fle_query_interface_mock.h" -#include "mongo/db/matcher/schema/encrypt_schema_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/executor/network_interface_mock.h" #include "mongo/idl/idl_parser.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/random.h" #include "mongo/shell/kms_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/hex.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/murmur3.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" namespace mongo { @@ -179,10 +201,8 @@ BSONObj TestKeyVault::getEncryptedKey(const UUID& uuid) { } UUID fieldNameToUUID(StringData field) { - std::array buf; - - MurmurHash3_x86_128(field.rawData(), field.size(), 123456, buf.data()); - + std::array buf; + murmur3(field, 123456 /*seed*/, buf); return UUID::fromCDR(buf); } @@ -208,7 +228,7 @@ class FleCrudTest : public ServiceContextMongoDTest { void createCollection(const NamespaceString& ns); - void assertDocumentCounts(uint64_t edc, uint64_t esc, uint64_t ecc, uint64_t ecoc); + void assertDocumentCounts(uint64_t edc, uint64_t esc, uint64_t ecoc); void testValidateEncryptedFieldInfo(BSONObj obj, bool bypassValidation); @@ -447,15 +467,11 @@ EncryptedFieldConfig getTestEncryptedFieldConfig( return EncryptedFieldConfig::parse(IDLParserContext("root"), fromjson(rangeSchemaV2)); } -void parseEncryptedInvalidFieldConfig(StringData esc, StringData ecc, StringData ecoc) { +void parseEncryptedInvalidFieldConfig(StringData esc, StringData ecoc) { auto invalidCollectionNameSchema = // "{" + - fmt::format( - "{{\"escCollection\": \"{}\", \"eccCollection\": \"{}\", \"ecocCollection\": \"{}\", ", - esc, - ecc, - ecoc) + + fmt::format("{{\"escCollection\": \"{}\", \"ecocCollection\": \"{}\", ", esc, ecoc) + R"( "fields": [ { @@ -475,7 +491,7 @@ void parseEncryptedInvalidFieldConfig(StringData esc, StringData ecc, StringData EncryptedFieldConfig::parse(IDLParserContext("root"), fromjson(invalidCollectionNameSchema)); } -void FleCrudTest::assertDocumentCounts(uint64_t edc, uint64_t esc, uint64_t ecc, uint64_t ecoc) { +void FleCrudTest::assertDocumentCounts(uint64_t edc, uint64_t esc, uint64_t ecoc) { ASSERT_EQ(_queryImpl->countDocuments(_edcNs), edc); ASSERT_EQ(_queryImpl->countDocuments(_escNs), esc); ASSERT_EQ(_queryImpl->countDocuments(_ecocNs), ecoc); @@ -763,11 +779,18 @@ class FleTagsTest : public FleCrudTest { FleCrudTest::tearDown(); } + std::vector> getCountInfoSets(BSONObj obj, uint64_t cm = 0) { + auto s = getTestESCDataToken(obj); + auto d = getTestEDCDataToken(obj); + auto nssEsc = NamespaceString::createNamespaceString_forTest("test.enxcol_.coll.esc"); + return mongo::fle::getCountInfoSets(_queryImpl.get(), nssEsc, s, d, cm); + } + std::vector readTags(BSONObj obj, uint64_t cm = 0) { auto s = getTestESCDataToken(obj); auto d = getTestEDCDataToken(obj); - auto nssEsc = NamespaceString("test.enxcol_.coll.esc"); - auto nssEcc = NamespaceString("test.enxcol_.coll.ecc"); + auto nssEsc = NamespaceString::createNamespaceString_forTest("test.enxcol_.coll.esc"); + auto nssEcc = NamespaceString::createNamespaceString_forTest("test.enxcol_.coll.ecc"); return mongo::fle::readTags(_queryImpl.get(), nssEsc, s, d, cm); } @@ -781,7 +804,7 @@ TEST_F(FleCrudTest, InsertOne) { doSingleInsert(1, element, Fle2AlgorithmInt::kEquality); - assertDocumentCounts(1, 1, 0, 1); + assertDocumentCounts(1, 1, 1); assertECOCDocumentCountByField("encrypted", 1); ASSERT_FALSE( @@ -794,7 +817,7 @@ TEST_F(FleCrudTest, InsertOneRange) { auto element = doc.firstElement(); doSingleInsert(1, element, Fle2AlgorithmInt::kRange); - assertDocumentCounts(1, 5, 0, 5); + assertDocumentCounts(1, 5, 5); assertECOCDocumentCountByField("encrypted", 5); } @@ -807,7 +830,7 @@ TEST_F(FleCrudTest, InsertTwoSame) { doSingleInsert(1, element, Fle2AlgorithmInt::kEquality); doSingleInsert(2, element, Fle2AlgorithmInt::kEquality); - assertDocumentCounts(2, 2, 0, 2); + assertDocumentCounts(2, 2, 2); assertECOCDocumentCountByField("encrypted", 2); auto escTagToken = getTestESCToken(element); @@ -827,7 +850,7 @@ TEST_F(FleCrudTest, InsertTwoDifferent) { BSON("encrypted" << "topsecret")); - assertDocumentCounts(2, 2, 0, 2); + assertDocumentCounts(2, 2, 2); assertECOCDocumentCountByField("encrypted", 2); ASSERT_FALSE( @@ -855,7 +878,7 @@ TEST_F(FleCrudTest, Insert100Fields) { }; doSingleWideInsert(1, fieldCount, valueGenerator); - assertDocumentCounts(1, fieldCount, 0, fieldCount); + assertDocumentCounts(1, fieldCount, fieldCount); for (uint64_t field = 0; field < fieldCount; field++) { auto fieldName = fieldNameFromInt(field); @@ -887,7 +910,7 @@ TEST_F(FleCrudTest, Insert20Fields50Rows) { doSingleWideInsert(row, fieldCount, valueGenerator); } - assertDocumentCounts(rowCount, rowCount * fieldCount, 0, rowCount * fieldCount); + assertDocumentCounts(rowCount, rowCount * fieldCount, rowCount * fieldCount); for (uint64_t row = 0; row < rowCount; row++) { for (uint64_t field = 0; field < fieldCount; field++) { @@ -1008,7 +1031,7 @@ TEST_F(FleCrudTest, InsertAndDeleteOne) { doSingleInsert(1, element, Fle2AlgorithmInt::kEquality); - assertDocumentCounts(1, 1, 0, 1); + assertDocumentCounts(1, 1, 1); ASSERT_FALSE( _queryImpl->getById(_escNs, ESCCollection::generateNonAnchorId(getTestESCToken(element), 1)) @@ -1016,7 +1039,7 @@ TEST_F(FleCrudTest, InsertAndDeleteOne) { doSingleDelete(1, Fle2AlgorithmInt::kEquality); - assertDocumentCounts(0, 1, 0, 1); + assertDocumentCounts(0, 1, 1); assertECOCDocumentCountByField("encrypted", 1); } @@ -1027,11 +1050,11 @@ TEST_F(FleCrudTest, InsertAndDeleteOneRange) { doSingleInsert(1, element, Fle2AlgorithmInt::kRange); - assertDocumentCounts(1, 5, 0, 5); + assertDocumentCounts(1, 5, 5); doSingleDelete(1, Fle2AlgorithmInt::kRange); - assertDocumentCounts(0, 5, 0, 5); + assertDocumentCounts(0, 5, 5); assertECOCDocumentCountByField("encrypted", 5); } @@ -1044,7 +1067,7 @@ TEST_F(FleCrudTest, InsertTwoSameAndDeleteTwo) { doSingleInsert(1, element, Fle2AlgorithmInt::kEquality); doSingleInsert(2, element, Fle2AlgorithmInt::kEquality); - assertDocumentCounts(2, 2, 0, 2); + assertDocumentCounts(2, 2, 2); ASSERT_FALSE( _queryImpl->getById(_escNs, ESCCollection::generateNonAnchorId(getTestESCToken(element), 1)) @@ -1053,7 +1076,7 @@ TEST_F(FleCrudTest, InsertTwoSameAndDeleteTwo) { doSingleDelete(2, Fle2AlgorithmInt::kEquality); doSingleDelete(1, Fle2AlgorithmInt::kEquality); - assertDocumentCounts(0, 2, 0, 2); + assertDocumentCounts(0, 2, 2); assertECOCDocumentCountByField("encrypted", 2); } @@ -1066,12 +1089,12 @@ TEST_F(FleCrudTest, InsertTwoDifferentAndDeleteTwo) { BSON("encrypted" << "topsecret")); - assertDocumentCounts(2, 2, 0, 2); + assertDocumentCounts(2, 2, 2); doSingleDelete(2, Fle2AlgorithmInt::kEquality); doSingleDelete(1, Fle2AlgorithmInt::kEquality); - assertDocumentCounts(0, 2, 0, 2); + assertDocumentCounts(0, 2, 2); assertECOCDocumentCountByField("encrypted", 2); } @@ -1080,11 +1103,11 @@ TEST_F(FleCrudTest, InsertOneButDeleteAnother) { doSingleInsert(1, BSON("encrypted" << "secret")); - assertDocumentCounts(1, 1, 0, 1); + assertDocumentCounts(1, 1, 1); doSingleDelete(2, Fle2AlgorithmInt::kEquality); - assertDocumentCounts(1, 1, 0, 1); + assertDocumentCounts(1, 1, 1); assertECOCDocumentCountByField("encrypted", 1); } @@ -1095,13 +1118,13 @@ TEST_F(FleCrudTest, UpdateOne) { BSON("encrypted" << "secret")); - assertDocumentCounts(1, 1, 0, 1); + assertDocumentCounts(1, 1, 1); doSingleUpdate(1, BSON("encrypted" << "top secret")); - assertDocumentCounts(1, 2, 0, 2); + assertDocumentCounts(1, 2, 2); assertECOCDocumentCountByField("encrypted", 2); validateDocument(1, @@ -1118,14 +1141,14 @@ TEST_F(FleCrudTest, UpdateOneRange) { doSingleInsert(1, element, Fle2AlgorithmInt::kRange); - assertDocumentCounts(1, 5, 0, 5); + assertDocumentCounts(1, 5, 5); auto doc2 = BSON("encrypted" << 2); auto elem2 = doc2.firstElement(); doSingleUpdate(1, elem2, Fle2AlgorithmInt::kRange); - assertDocumentCounts(1, 10, 0, 10); + assertDocumentCounts(1, 10, 10); validateDocument(1, BSON("_id" << 1 << "counter" << 2 << "plainText" @@ -1140,13 +1163,13 @@ TEST_F(FleCrudTest, UpdateOneSameValue) { BSON("encrypted" << "secret")); - assertDocumentCounts(1, 1, 0, 1); + assertDocumentCounts(1, 1, 1); doSingleUpdate(1, BSON("encrypted" << "secret")); - assertDocumentCounts(1, 2, 0, 2); + assertDocumentCounts(1, 2, 2); assertECOCDocumentCountByField("encrypted", 2); validateDocument(1, @@ -1163,7 +1186,7 @@ TEST_F(FleCrudTest, UpdateOneReplace) { BSON("encrypted" << "secret")); - assertDocumentCounts(1, 1, 0, 1); + assertDocumentCounts(1, 1, 1); auto replace = BSON("encrypted" << "top secret"); @@ -1183,7 +1206,7 @@ TEST_F(FleCrudTest, UpdateOneReplace) { Fle2AlgorithmInt::kEquality); - assertDocumentCounts(1, 2, 0, 2); + assertDocumentCounts(1, 2, 2); assertECOCDocumentCountByField("encrypted", 2); validateDocument(1, @@ -1200,7 +1223,7 @@ TEST_F(FleCrudTest, UpdateOneReplaceRange) { doSingleInsert(1, element, Fle2AlgorithmInt::kRange); - assertDocumentCounts(1, 5, 0, 5); + assertDocumentCounts(1, 5, 5); auto replace = BSON("encrypted" << 2); auto buf = generateSinglePlaceholder(replace.firstElement(), Fle2AlgorithmInt::kRange); @@ -1217,7 +1240,7 @@ TEST_F(FleCrudTest, UpdateOneReplaceRange) { write_ops::UpdateModification(result, write_ops::UpdateModification::ReplacementTag{}), Fle2AlgorithmInt::kRange); - assertDocumentCounts(1, 10, 0, 10); + assertDocumentCounts(1, 10, 10); validateDocument(1, BSON("_id" << 1 << "plaintext" @@ -1233,7 +1256,7 @@ TEST_F(FleCrudTest, RenameSafeContent) { BSON("encrypted" << "secret")); - assertDocumentCounts(1, 1, 0, 1); + assertDocumentCounts(1, 1, 1); BSONObjBuilder builder; builder.append("$inc", BSON("counter" << 1)); @@ -1250,7 +1273,7 @@ TEST_F(FleCrudTest, SetSafeContent) { BSON("encrypted" << "secret")); - assertDocumentCounts(1, 1, 0, 1); + assertDocumentCounts(1, 1, 1); BSONObjBuilder builder; builder.append("$inc", BSON("counter" << 1)); @@ -1273,16 +1296,10 @@ TEST_F(FleCrudTest, testValidateEncryptedFieldConfig) { // Test that EDCServerCollection::validateEncryptedFieldInfo throws an error when collection names // do not match naming rules. TEST_F(FleCrudTest, testValidateEncryptedFieldConfigFields) { - ASSERT_THROWS_CODE(parseEncryptedInvalidFieldConfig( - "enxcol_.coll.esc1", "enxcol_.coll.ecc", "enxcol_.coll.ecoc"), + ASSERT_THROWS_CODE(parseEncryptedInvalidFieldConfig("enxcol_.coll.esc1", "enxcol_.coll.ecoc"), DBException, 7406900); - ASSERT_THROWS_CODE(parseEncryptedInvalidFieldConfig( - "enxcol_.coll.esc", "enxcol_.coll.ecc1", "enxcol_.coll.ecoc"), - DBException, - 7406901); - ASSERT_THROWS_CODE(parseEncryptedInvalidFieldConfig( - "enxcol_.coll.esc", "enxcol_.coll.ecc", "enxcol_.coll.ecoc1"), + ASSERT_THROWS_CODE(parseEncryptedInvalidFieldConfig("enxcol_.coll.esc", "enxcol_.coll.ecoc1"), DBException, 7406902); } @@ -1293,7 +1310,7 @@ TEST_F(FleCrudTest, FindAndModify_UpdateOne) { BSON("encrypted" << "secret")); - assertDocumentCounts(1, 1, 0, 1); + assertDocumentCounts(1, 1, 1); auto doc = BSON("encrypted" << "top secret"); @@ -1313,7 +1330,7 @@ TEST_F(FleCrudTest, FindAndModify_UpdateOne) { write_ops::UpdateModification(result, write_ops::UpdateModification::ModifierUpdateTag{})); doFindAndModify(req, Fle2AlgorithmInt::kEquality); - assertDocumentCounts(1, 2, 0, 2); + assertDocumentCounts(1, 2, 2); assertECOCDocumentCountByField("encrypted", 2); validateDocument(1, @@ -1331,7 +1348,7 @@ TEST_F(FleCrudTest, FindAndModify_UpdateOneRange) { doSingleInsert(1, firstDoc.firstElement(), Fle2AlgorithmInt::kRange); - assertDocumentCounts(1, 5, 0, 5); + assertDocumentCounts(1, 5, 5); auto doc = BSON("encrypted" << 2); auto element = doc.firstElement(); @@ -1350,7 +1367,7 @@ TEST_F(FleCrudTest, FindAndModify_UpdateOneRange) { write_ops::UpdateModification(result, write_ops::UpdateModification::ModifierUpdateTag{})); doFindAndModify(req, Fle2AlgorithmInt::kRange); - assertDocumentCounts(1, 10, 0, 10); + assertDocumentCounts(1, 10, 10); assertECOCDocumentCountByField("encrypted", 10); validateDocument(1, @@ -1368,14 +1385,14 @@ TEST_F(FleCrudTest, FindAndModify_InsertAndDeleteOne) { doSingleInsert(1, element, Fle2AlgorithmInt::kEquality); - assertDocumentCounts(1, 1, 0, 1); + assertDocumentCounts(1, 1, 1); write_ops::FindAndModifyCommandRequest req(_edcNs); req.setQuery(BSON("_id" << 1)); req.setRemove(true); doFindAndModify(req, Fle2AlgorithmInt::kEquality); - assertDocumentCounts(0, 1, 0, 1); + assertDocumentCounts(0, 1, 1); assertECOCDocumentCountByField("encrypted", 1); } @@ -1386,7 +1403,7 @@ TEST_F(FleCrudTest, FindAndModify_RenameSafeContent) { BSON("encrypted" << "secret")); - assertDocumentCounts(1, 1, 0, 1); + assertDocumentCounts(1, 1, 1); BSONObjBuilder builder; builder.append("$inc", BSON("counter" << 1)); @@ -1412,7 +1429,7 @@ TEST_F(FleCrudTest, FindAndModify_SetSafeContent) { BSON("encrypted" << "secret")); - assertDocumentCounts(1, 1, 0, 1); + assertDocumentCounts(1, 1, 1); BSONObjBuilder builder; builder.append("$inc", BSON("counter" << 1)); @@ -1578,6 +1595,41 @@ TEST_F(FleTagsTest, ContentionFactor) { doSingleInsertWithContention(7, doc2, 4, 2, efc); doSingleInsertWithContention(8, doc2, 4, 3, efc); + { + // Test the counts of the results from individual contention factors, ensuring that + // the data stored on disk and the getTags algorithm is working correctly. + // + // This relies on the order preserving nature of the query. + + auto countInfoSetDoc1 = getCountInfoSets(doc1, 4); + { + ASSERT_EQ(1, countInfoSetDoc1.size()); + + auto countInfoSet = countInfoSetDoc1[0]; + + ASSERT_EQ(5, countInfoSet.size()); + + ASSERT_EQ(2, countInfoSet[0].count); + ASSERT_EQ(0, countInfoSet[1].count); + ASSERT_EQ(0, countInfoSet[2].count); + ASSERT_EQ(1, countInfoSet[3].count); + } + + auto countInfoSetDoc2 = getCountInfoSets(doc2, 4); + { + ASSERT_EQ(1, countInfoSetDoc2.size()); + + auto countInfoSet = countInfoSetDoc2[0]; + + ASSERT_EQ(5, countInfoSet.size()); + + ASSERT_EQ(0, countInfoSet[0].count); + ASSERT_EQ(0, countInfoSet[1].count); + ASSERT_EQ(1, countInfoSet[2].count); + ASSERT_EQ(1, countInfoSet[3].count); + } + } + ASSERT_EQ(3, readTags(doc1, 4).size()); ASSERT_EQ(2, readTags(doc2, 4).size()); } diff --git a/src/mongo/db/fle_query_interface_mock.cpp b/src/mongo/db/fle_query_interface_mock.cpp index e54f18b7b15fe..c820c0aff3199 100644 --- a/src/mongo/db/fle_query_interface_mock.cpp +++ b/src/mongo/db/fle_query_interface_mock.cpp @@ -27,9 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/db/fle_query_interface_mock.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/fle_query_interface_mock.h b/src/mongo/db/fle_query_interface_mock.h index d503a5200d345..41e6614495ca3 100644 --- a/src/mongo/db/fle_query_interface_mock.h +++ b/src/mongo/db/fle_query_interface_mock.h @@ -29,9 +29,23 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_crypto_types.h" +#include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/db/fle_crud.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/session/logical_session_id.h" namespace mongo { diff --git a/src/mongo/db/free_mon/SConscript b/src/mongo/db/free_mon/SConscript index 7b18f082cb4b6..1d22c36ba8090 100644 --- a/src/mongo/db/free_mon/SConscript +++ b/src/mongo/db/free_mon/SConscript @@ -76,6 +76,7 @@ fmEnv.CppUnitTest( ], LIBDEPS=[ '$BUILD_DIR/mongo/db/auth/authmocks', + '$BUILD_DIR/mongo/db/op_observer/op_observer', '$BUILD_DIR/mongo/db/repl/replmocks', '$BUILD_DIR/mongo/db/repl/storage_interface_impl', '$BUILD_DIR/mongo/db/service_context_d_test_fixture', diff --git a/src/mongo/db/free_mon/free_mon_commands.cpp b/src/mongo/db/free_mon/free_mon_commands.cpp index 5d639d0355850..3c3be470cf798 100644 --- a/src/mongo/db/free_mon/free_mon_commands.cpp +++ b/src/mongo/db/free_mon/free_mon_commands.cpp @@ -27,14 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/free_mon/free_mon_commands_gen.h" #include "mongo/db/free_mon/free_mon_controller.h" #include "mongo/db/free_mon/free_mon_options.h" -#include "mongo/db/free_mon/free_mon_storage.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" namespace mongo { @@ -66,11 +82,12 @@ class GetFreeMonitoringStatusCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const final { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::checkFreeMonitoringStatus)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::checkFreeMonitoringStatus)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); @@ -84,17 +101,23 @@ class GetFreeMonitoringStatusCommand : public BasicCommand { IDLParserContext ctx("getFreeMonitoringStatus"); GetFreeMonitoringStatus::parse(ctx, cmdObj); - if (globalFreeMonParams.freeMonitoringState == EnableCloudStateEnum::kOff) { - result.append("state", "disabled"); - return true; + // FreeMonitoring has been deprecated and will be decomissioned. + // Report that FreeMon is disabled even if it's running to draw attention + // to the deprecation notice returned from the service. + result.append("state"_sd, "disabled"_sd); + + if (globalFreeMonParams.freeMonitoringState != EnableCloudStateEnum::kOff) { + // To aid discovery during deprecation period, add true state as context. + auto* controller = FreeMonController::get(opCtx->getServiceContext()); + if (controller) { + result.append( + "message"_sd, + "Free monitoring is deprecated, refer to 'debug' field for actual status"_sd); + BSONObjBuilder debug(result.subobjStart("debug"_sd)); + controller->getStatus(opCtx, &debug); + } } - auto* controller = FreeMonController::get(opCtx->getServiceContext()); - if (!controller) { - result.append("state", "disabled"); - } else { - controller->getStatus(opCtx, &result); - } return true; } } getFreeMonitoringStatusCommand; @@ -123,11 +146,12 @@ class SetFreeMonitoringCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const final { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::setFreeMonitoring)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::setFreeMonitoring)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/free_mon/free_mon_commands_stub.cpp b/src/mongo/db/free_mon/free_mon_commands_stub.cpp index 6249c21f67985..d813003db7bc4 100644 --- a/src/mongo/db/free_mon/free_mon_commands_stub.cpp +++ b/src/mongo/db/free_mon/free_mon_commands_stub.cpp @@ -64,11 +64,12 @@ class GetFreeMonitoringStatusCommandStub : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const final { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::checkFreeMonitoringStatus)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::checkFreeMonitoringStatus)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/free_mon/free_mon_controller.cpp b/src/mongo/db/free_mon/free_mon_controller.cpp index d18287ecb0236..4a110e0867a5e 100644 --- a/src/mongo/db/free_mon/free_mon_controller.cpp +++ b/src/mongo/db/free_mon/free_mon_controller.cpp @@ -28,11 +28,21 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/free_mon/free_mon_controller.h" +#include +#include "mongo/db/free_mon/free_mon_controller.h" +#include "mongo/db/ftdc/collector.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/synchronized_value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/db/free_mon/free_mon_controller.h b/src/mongo/db/free_mon/free_mon_controller.h index f734231eeaa67..5ad1e0544343f 100644 --- a/src/mongo/db/free_mon/free_mon_controller.h +++ b/src/mongo/db/free_mon/free_mon_controller.h @@ -29,6 +29,8 @@ #pragma once +#include +#include #include #include #include @@ -36,11 +38,16 @@ #include #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/client.h" #include "mongo/db/free_mon/free_mon_message.h" #include "mongo/db/free_mon/free_mon_network.h" #include "mongo/db/free_mon/free_mon_processor.h" +#include "mongo/db/ftdc/collector.h" #include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/thread.h" #include "mongo/util/duration.h" diff --git a/src/mongo/db/free_mon/free_mon_controller_test.cpp b/src/mongo/db/free_mon/free_mon_controller_test.cpp index 92b2bbb9b5952..66cecc38355c2 100644 --- a/src/mongo/db/free_mon/free_mon_controller_test.cpp +++ b/src/mongo/db/free_mon/free_mon_controller_test.cpp @@ -28,45 +28,64 @@ */ -#include "mongo/platform/basic.h" - -#include -#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include #include #include -#include - -#include "mongo/db/free_mon/free_mon_controller.h" -#include "mongo/db/free_mon/free_mon_storage.h" +#include +#include +#include "mongo/base/data_range.h" #include "mongo/base/data_type_validated.h" -#include "mongo/bson/bson_validate.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/client.h" +#include "mongo/db/free_mon/free_mon_controller.h" #include "mongo/db/free_mon/free_mon_op_observer.h" -#include "mongo/db/ftdc/collector.h" -#include "mongo/db/ftdc/config.h" -#include "mongo/db/ftdc/constants.h" -#include "mongo/db/ftdc/controller.h" -#include "mongo/db/ftdc/ftdc_test.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/free_mon/free_mon_protocol_gen.h" +#include "mongo/db/free_mon/free_mon_storage.h" +#include "mongo/db/free_mon/free_mon_storage_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/task_executor.h" +#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/object_check.h" -#include "mongo/unittest/barrier.h" -#include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source.h" -#include "mongo/util/hex.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/random.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep +#include "mongo/stdx/condition_variable.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/db/free_mon/free_mon_mongod.cpp b/src/mongo/db/free_mon/free_mon_mongod.cpp index bf247415dc837..e66264fca7cc1 100644 --- a/src/mongo/db/free_mon/free_mon_mongod.cpp +++ b/src/mongo/db/free_mon/free_mon_mongod.cpp @@ -28,14 +28,21 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/free_mon/free_mon_mongod.h" - +#include +#include +#include +#include +#include #include -#include +#include #include +#include +#include +#include + +#include "mongo/base/data_builder.h" +#include "mongo/base/data_range.h" #include "mongo/base/data_type_validated.h" #include "mongo/base/error_codes.h" #include "mongo/base/status.h" @@ -43,28 +50,36 @@ #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/bsontypes.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/client.h" #include "mongo/db/free_mon/free_mon_controller.h" #include "mongo/db/free_mon/free_mon_message.h" +#include "mongo/db/free_mon/free_mon_mongod.h" #include "mongo/db/free_mon/free_mon_mongod_gen.h" #include "mongo/db/free_mon/free_mon_network.h" #include "mongo/db/free_mon/free_mon_op_observer.h" #include "mongo/db/free_mon/free_mon_options.h" +#include "mongo/db/free_mon/free_mon_processor.h" #include "mongo/db/free_mon/free_mon_protocol_gen.h" #include "mongo/db/free_mon/free_mon_storage.h" #include "mongo/db/ftdc/ftdc_server.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/service_context.h" #include "mongo/executor/network_interface_factory.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/rpc/object_check.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep #include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" #include "mongo/util/future.h" #include "mongo/util/net/http_client.h" #include "mongo/util/testing_proctor.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl @@ -81,6 +96,10 @@ auto makeTaskExecutor(ServiceContext* /*serviceContext*/) { tpOptions.maxThreads = 2; tpOptions.onCreateThread = [](const std::string& threadName) { Client::initThread(threadName.c_str()); + + // TODO(SERVER-74659): Please revisit if this thread could be made killable. + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); }; return std::make_unique( std::make_unique(tpOptions), executor::makeNetworkInterface("FreeMonNet")); @@ -237,7 +256,8 @@ class FreeMonNamespaceUUIDCollector : public FreeMonCollectorInterface { for (auto& nss : _namespaces) { auto optUUID = catalog->lookupUUIDByNSS(opCtx, nss); if (optUUID) { - builder << nss.toString() << optUUID.get(); + // Always include tenant id in nss for FTDC collector. + builder << toStringForLogging(nss) << optUUID.get(); } } } diff --git a/src/mongo/db/free_mon/free_mon_mongod.h b/src/mongo/db/free_mon/free_mon_mongod.h index a17647c4db4c8..32f0269a9117f 100644 --- a/src/mongo/db/free_mon/free_mon_mongod.h +++ b/src/mongo/db/free_mon/free_mon_mongod.h @@ -30,8 +30,11 @@ #pragma once #include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/service_context.h" #include "mongo/db/tenant_id.h" diff --git a/src/mongo/db/free_mon/free_mon_op_observer.cpp b/src/mongo/db/free_mon/free_mon_op_observer.cpp index f8126abcef3b1..9fc6232b4affa 100644 --- a/src/mongo/db/free_mon/free_mon_op_observer.cpp +++ b/src/mongo/db/free_mon/free_mon_op_observer.cpp @@ -27,13 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/free_mon/free_mon_op_observer.h" +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/free_mon/free_mon_controller.h" +#include "mongo/db/free_mon/free_mon_op_observer.h" #include "mongo/db/free_mon/free_mon_storage.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { @@ -47,7 +52,7 @@ bool isStandaloneOrPrimary(OperationContext* opCtx) { repl::MemberState::RS_PRIMARY); } -const auto getFreeMonDeleteState = OperationContext::declareDecoration(); +const auto getFreeMonDeleteState = OplogDeleteEntryArgs::declareDecoration(); } // namespace @@ -59,7 +64,8 @@ repl::OpTime FreeMonOpObserver::onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) { + const CollectionDropType dropType, + bool markFromMigrate) { if (collectionName == NamespaceString::kServerConfigurationNamespace) { auto controller = FreeMonController::get(opCtx->getServiceContext()); @@ -76,7 +82,8 @@ void FreeMonOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { if (coll->ns() != NamespaceString::kServerConfigurationNamespace) { return; } @@ -100,7 +107,9 @@ void FreeMonOpObserver::onInserts(OperationContext* opCtx, } } -void FreeMonOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) { +void FreeMonOpObserver::onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (args.coll->ns() != NamespaceString::kServerConfigurationNamespace) { return; } @@ -120,20 +129,23 @@ void FreeMonOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntry void FreeMonOpObserver::aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) { + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { bool isFreeMonDoc = (coll->ns() == NamespaceString::kServerConfigurationNamespace) && (doc["_id"].str() == FreeMonStorage::kFreeMonDocIdKey); // Set a flag that indicates whether the document to be delete is the free monitoring state // document - getFreeMonDeleteState(opCtx) = isFreeMonDoc; + getFreeMonDeleteState(args) = isFreeMonDoc; } void FreeMonOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (coll->ns() != NamespaceString::kServerConfigurationNamespace) { return; } @@ -142,7 +154,7 @@ void FreeMonOpObserver::onDelete(OperationContext* opCtx, return; } - if (getFreeMonDeleteState(opCtx) == true) { + if (getFreeMonDeleteState(args) == true) { auto controller = FreeMonController::get(opCtx->getServiceContext()); if (controller != nullptr) { @@ -151,8 +163,8 @@ void FreeMonOpObserver::onDelete(OperationContext* opCtx, } } -void FreeMonOpObserver::_onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) { +void FreeMonOpObserver::onReplicationRollback(OperationContext* opCtx, + const RollbackObserverInfo& rbInfo) { // Invalidate any in-memory auth data if necessary. const auto& rollbackNamespaces = rbInfo.rollbackNamespaces; if (rollbackNamespaces.count(NamespaceString::kServerConfigurationNamespace) == 1) { diff --git a/src/mongo/db/free_mon/free_mon_op_observer.h b/src/mongo/db/free_mon/free_mon_op_observer.h index f734cb77696dc..d9097c651fb75 100644 --- a/src/mongo/db/free_mon/free_mon_op_observer.h +++ b/src/mongo/db/free_mon/free_mon_op_observer.h @@ -29,7 +29,19 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -37,7 +49,7 @@ namespace mongo { * OpObserver for Free Monitoring. Observes all secondary replication traffic and filters down to * relevant entries for free monitoring. */ -class FreeMonOpObserver final : public OpObserver { +class FreeMonOpObserver final : public OpObserverNoop { FreeMonOpObserver(const FreeMonOpObserver&) = delete; FreeMonOpObserver& operator=(const FreeMonOpObserver&) = delete; @@ -45,209 +57,42 @@ class FreeMonOpObserver final : public OpObserver { FreeMonOpObserver(); ~FreeMonOpObserver(); - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) final {} - - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - void onCreateIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc, - bool fromMigrate) final {} - - void onStartIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onStartIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) final {} + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kSystem, NamespaceFilter::kSystem}; + } void onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) final; - - void onInsertGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final{}; + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; - void onDeleteGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final {} - - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) final; + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) final; + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) final; - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final{}; + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; - void onCreateCollection(OperationContext* opCtx, - const CollectionPtr& coll, - const NamespaceString& collectionName, - const CollectionOptions& options, - const BSONObj& idIndex, - const OplogSlot& createOpTime, - bool fromMigrate) final {} - - void onCollMod(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& collModCmd, - const CollectionOptions& oldCollOptions, - boost::optional indexInfo) final {} - - void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final {} - - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) final; - - void onDropIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const std::string& indexName, - const BSONObj& indexInfo) final {} - - using OpObserver::onRenameCollection; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final {} - - void onImportCollection(OperationContext* opCtx, - const UUID& importUUID, - const NamespaceString& nss, - long long numRecords, - long long dataSize, - const BSONObj& catalogEntry, - const BSONObj& storageMetadata, - bool isDryRun) final {} - - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final { - return repl::OpTime(); - } - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) final {} - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) final {} - - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) final {} - - void onTransactionStart(OperationContext* opCtx) final {} - - void onUnpreparedTransactionCommit(OperationContext* opCtx, - const TransactionOperations& transactionOperations) final {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept final {} - - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) final { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) final {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) final {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) final {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} - - void onMajorityCommitPointUpdate(ServiceContext* service, - const repl::OpTime& newCommitPoint) final {} + CollectionDropType dropType, + bool markFromMigrate) final; -private: - void _onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo); + void onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final; }; } // namespace mongo diff --git a/src/mongo/db/free_mon/free_mon_options.cpp b/src/mongo/db/free_mon/free_mon_options.cpp index 71196ba2306e6..b66434d95aae1 100644 --- a/src/mongo/db/free_mon/free_mon_options.cpp +++ b/src/mongo/db/free_mon/free_mon_options.cpp @@ -28,16 +28,19 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/free_mon/free_mon_options.h" +#include #include "mongo/base/error_codes.h" +#include "mongo/base/initializer.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/db/free_mon/free_mon_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/startup_option_init.h" #include "mongo/util/options_parser/startup_options.h" +#include "mongo/util/options_parser/value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kFTDC @@ -46,11 +49,6 @@ namespace mongo { FreeMonParams globalFreeMonParams; -namespace optionenvironment { -class OptionSection; -class Environment; -} // namespace optionenvironment - namespace moe = mongo::optionenvironment; namespace { diff --git a/src/mongo/db/free_mon/free_mon_options.h b/src/mongo/db/free_mon/free_mon_options.h index 19f707e8b6571..79f458fb28f8c 100644 --- a/src/mongo/db/free_mon/free_mon_options.h +++ b/src/mongo/db/free_mon/free_mon_options.h @@ -29,6 +29,7 @@ #pragma once +#include #include #include diff --git a/src/mongo/db/free_mon/free_mon_processor.cpp b/src/mongo/db/free_mon/free_mon_processor.cpp index d42dc473d146d..72554ac5ef0ca 100644 --- a/src/mongo/db/free_mon/free_mon_processor.cpp +++ b/src/mongo/db/free_mon/free_mon_processor.cpp @@ -28,25 +28,38 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/free_mon/free_mon_processor.h" - +#include +#include +#include +#include +#include #include -#include +#include #include #include #include +#include +#include +#include + +#include "mongo/base/checked_cast.h" #include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/db/free_mon/free_mon_processor.h" #include "mongo/db/free_mon/free_mon_storage.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl @@ -165,6 +178,12 @@ void FreeMonProcessor::run() { Client::initThread("FreeMonProcessor"); Client* client = &cc(); + // TODO(SERVER-74659): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client); + client->setSystemOperationUnkillableByStepdown(lk); + } + while (true) { auto item = _queue.dequeue(client->getServiceContext()->getPreciseClockSource()); if (!item.has_value()) { diff --git a/src/mongo/db/free_mon/free_mon_processor.h b/src/mongo/db/free_mon/free_mon_processor.h index ec7c63fef4511..a31f1fbb0a327 100644 --- a/src/mongo/db/free_mon/free_mon_processor.h +++ b/src/mongo/db/free_mon/free_mon_processor.h @@ -28,14 +28,23 @@ */ #pragma once +#include #include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" #include #include #include +#include #include #include #include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" #include "mongo/db/free_mon/free_mon_message.h" #include "mongo/db/free_mon/free_mon_network.h" @@ -45,6 +54,10 @@ #include "mongo/db/free_mon/free_mon_storage_gen.h" #include "mongo/db/ftdc/collector.h" #include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" +#include "mongo/platform/random.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/clock_source.h" #include "mongo/util/duration.h" #include "mongo/util/future.h" diff --git a/src/mongo/db/free_mon/free_mon_queue.cpp b/src/mongo/db/free_mon/free_mon_queue.cpp index 2e3582f663e93..322257bf8d3fd 100644 --- a/src/mongo/db/free_mon/free_mon_queue.cpp +++ b/src/mongo/db/free_mon/free_mon_queue.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include + +#include "mongo/base/status.h" #include "mongo/db/free_mon/free_mon_queue.h" - -#include - +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/idle_thread_block.h" #include "mongo/util/duration.h" diff --git a/src/mongo/db/free_mon/free_mon_queue.h b/src/mongo/db/free_mon/free_mon_queue.h index de401e6884110..0311d5f79b85a 100644 --- a/src/mongo/db/free_mon/free_mon_queue.h +++ b/src/mongo/db/free_mon/free_mon_queue.h @@ -30,12 +30,17 @@ #pragma once #include +#include +#include +#include #include #include #include #include #include "mongo/db/free_mon/free_mon_message.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/util/clock_source.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/free_mon/free_mon_queue_test.cpp b/src/mongo/db/free_mon/free_mon_queue_test.cpp index 275743a0fda9a..7903a94dc822d 100644 --- a/src/mongo/db/free_mon/free_mon_queue_test.cpp +++ b/src/mongo/db/free_mon/free_mon_queue_test.cpp @@ -27,17 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/db/client.h" #include "mongo/db/free_mon/free_mon_message.h" #include "mongo/db/free_mon/free_mon_queue.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/free_mon/free_mon_status.cpp b/src/mongo/db/free_mon/free_mon_status.cpp index 5d69473ea5e56..138b581e1eade 100644 --- a/src/mongo/db/free_mon/free_mon_status.cpp +++ b/src/mongo/db/free_mon/free_mon_status.cpp @@ -27,12 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands/server_status.h" #include "mongo/db/free_mon/free_mon_controller.h" #include "mongo/db/free_mon/free_mon_options.h" +#include "mongo/db/operation_context.h" namespace mongo { namespace { @@ -47,8 +57,9 @@ class FreeMonServerStatus : public ServerStatusSection { Status checkAuthForOperation(OperationContext* opCtx) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::checkFreeMonitoringStatus)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(as->getUserTenantId()), + ActionType::checkFreeMonitoringStatus)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/db/free_mon/free_mon_storage.cpp b/src/mongo/db/free_mon/free_mon_storage.cpp index 7c1fcd7858d9f..9f11973c8d399 100644 --- a/src/mongo/db/free_mon/free_mon_storage.cpp +++ b/src/mongo/db/free_mon/free_mon_storage.cpp @@ -29,15 +29,27 @@ #include "mongo/db/free_mon/free_mon_storage.h" +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/db_raii.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/idl/idl_parser.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/free_mon/free_mon_storage.h b/src/mongo/db/free_mon/free_mon_storage.h index 8dffcc44068c6..0e251d50768d1 100644 --- a/src/mongo/db/free_mon/free_mon_storage.h +++ b/src/mongo/db/free_mon/free_mon_storage.h @@ -30,7 +30,9 @@ #pragma once #include +#include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/free_mon/free_mon_storage_gen.h" #include "mongo/db/operation_context.h" diff --git a/src/mongo/db/free_mon/free_mon_storage_test.cpp b/src/mongo/db/free_mon/free_mon_storage_test.cpp index e3040b3373e0d..8a6cc2c145162 100644 --- a/src/mongo/db/free_mon/free_mon_storage_test.cpp +++ b/src/mongo/db/free_mon/free_mon_storage_test.cpp @@ -27,16 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection_options.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/free_mon/free_mon_storage.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" @@ -44,8 +54,9 @@ #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/executor/network_interface_mock.h" #include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/executor/thread_pool_task_executor_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/ftdc/SConscript b/src/mongo/db/ftdc/SConscript index bf58e691ea695..856a44e419c32 100644 --- a/src/mongo/db/ftdc/SConscript +++ b/src/mongo/db/ftdc/SConscript @@ -109,6 +109,7 @@ env.CppUnitTest( ], LIBDEPS=[ '$BUILD_DIR/mongo/db/auth/authmocks', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/mongo/util/clock_source_mock', 'ftdc', diff --git a/src/mongo/db/ftdc/block_compressor.cpp b/src/mongo/db/ftdc/block_compressor.cpp index 969cc5d1c774a..afaa8315717f7 100644 --- a/src/mongo/db/ftdc/block_compressor.cpp +++ b/src/mongo/db/ftdc/block_compressor.cpp @@ -27,12 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/ftdc/block_compressor.h" - #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/ftdc/block_compressor.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/ftdc/block_compressor.h b/src/mongo/db/ftdc/block_compressor.h index 2b2173d09b7d2..7f44dfd1fab68 100644 --- a/src/mongo/db/ftdc/block_compressor.h +++ b/src/mongo/db/ftdc/block_compressor.h @@ -29,6 +29,7 @@ #pragma once +#include #include #include diff --git a/src/mongo/db/ftdc/collector.cpp b/src/mongo/db/ftdc/collector.cpp index 8f8af095e7dd5..06c5c832d8daf 100644 --- a/src/mongo/db/ftdc/collector.cpp +++ b/src/mongo/db/ftdc/collector.cpp @@ -27,18 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/ftdc/collector.h" +#include -#include "mongo/base/string_data.h" -#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/ftdc/collector.h" #include "mongo/db/ftdc/constants.h" -#include "mongo/db/ftdc/util.h" -#include "mongo/db/jsobj.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/admission_context.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/ftdc/compressor.cpp b/src/mongo/db/ftdc/compressor.cpp index 6c6d6688572c4..e007ddf5106dd 100644 --- a/src/mongo/db/ftdc/compressor.cpp +++ b/src/mongo/db/ftdc/compressor.cpp @@ -27,16 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/ftdc/compressor.h" +#include #include "mongo/base/data_builder.h" +#include "mongo/base/data_range_cursor.h" +#include "mongo/base/status.h" +#include "mongo/db/ftdc/compressor.h" #include "mongo/db/ftdc/config.h" #include "mongo/db/ftdc/util.h" #include "mongo/db/ftdc/varint.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/service_context.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/ftdc/compressor.h b/src/mongo/db/ftdc/compressor.h index 6af25dc20c19a..d22482028e034 100644 --- a/src/mongo/db/ftdc/compressor.h +++ b/src/mongo/db/ftdc/compressor.h @@ -30,16 +30,20 @@ #pragma once #include +#include #include #include #include #include +#include "mongo/base/data_range.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/util/builder.h" #include "mongo/db/ftdc/block_compressor.h" #include "mongo/db/ftdc/config.h" #include "mongo/db/jsobj.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/ftdc/compressor_test.cpp b/src/mongo/db/ftdc/compressor_test.cpp index 5009f22760f3d..85942efa60e87 100644 --- a/src/mongo/db/ftdc/compressor_test.cpp +++ b/src/mongo/db/ftdc/compressor_test.cpp @@ -27,20 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include #include +#include + +#include #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/ftdc/compressor.h" #include "mongo/db/ftdc/config.h" #include "mongo/db/ftdc/decompressor.h" #include "mongo/db/ftdc/ftdc_test.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/ftdc/controller.cpp b/src/mongo/db/ftdc/controller.cpp index c2c409be1d805..467540920e39c 100644 --- a/src/mongo/db/ftdc/controller.cpp +++ b/src/mongo/db/ftdc/controller.cpp @@ -28,22 +28,30 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/ftdc/controller.h" - +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" #include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/db/client.h" #include "mongo/db/ftdc/collector.h" +#include "mongo/db/ftdc/controller.h" #include "mongo/db/ftdc/util.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/concurrency/idle_thread_block.h" -#include "mongo/util/exit.h" +#include "mongo/util/str.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kFTDC @@ -196,6 +204,12 @@ void FTDCController::doLoop() noexcept { Client::initThread(kFTDCThreadName); Client* client = &cc(); + // TODO(SERVER-74659): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client); + client->setSystemOperationUnkillableByStepdown(lk); + } + // Update config { stdx::lock_guard lock(_mutex); diff --git a/src/mongo/db/ftdc/controller.h b/src/mongo/db/ftdc/controller.h index f7b244e5fcc34..710fa9e6093f0 100644 --- a/src/mongo/db/ftdc/controller.h +++ b/src/mongo/db/ftdc/controller.h @@ -30,9 +30,13 @@ #pragma once #include +#include #include #include +#include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/ftdc/collector.h" #include "mongo/db/ftdc/config.h" #include "mongo/db/ftdc/file_manager.h" @@ -40,6 +44,7 @@ #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" +#include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/db/ftdc/controller_test.cpp b/src/mongo/db/ftdc/controller_test.cpp index b821d96065a82..d98a9da364539 100644 --- a/src/mongo/db/ftdc/controller_test.cpp +++ b/src/mongo/db/ftdc/controller_test.cpp @@ -27,27 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include -#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include #include +#include +#include +#include -#include "mongo/base/data_type_validated.h" -#include "mongo/base/init.h" -#include "mongo/bson/bson_validate.h" -#include "mongo/bson/bsonmisc.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/client.h" #include "mongo/db/ftdc/collector.h" #include "mongo/db/ftdc/config.h" #include "mongo/db/ftdc/constants.h" #include "mongo/db/ftdc/controller.h" #include "mongo/db/ftdc/ftdc_test.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/clock_source.h" namespace mongo { diff --git a/src/mongo/db/ftdc/decompressor.cpp b/src/mongo/db/ftdc/decompressor.cpp index d8aa96b80a175..9b80e68716f77 100644 --- a/src/mongo/db/ftdc/decompressor.cpp +++ b/src/mongo/db/ftdc/decompressor.cpp @@ -27,18 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/ftdc/decompressor.h" +#include #include "mongo/base/data_range_cursor.h" +#include "mongo/base/data_type_endian.h" #include "mongo/base/data_type_validated.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/db/ftdc/compressor.h" +#include "mongo/db/ftdc/decompressor.h" #include "mongo/db/ftdc/util.h" #include "mongo/db/ftdc/varint.h" -#include "mongo/db/jsobj.h" -#include "mongo/rpc/object_check.h" -#include "mongo/util/assert_util.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep namespace mongo { diff --git a/src/mongo/db/ftdc/decompressor.h b/src/mongo/db/ftdc/decompressor.h index 7481cf2b4e937..ff12df7e7479d 100644 --- a/src/mongo/db/ftdc/decompressor.h +++ b/src/mongo/db/ftdc/decompressor.h @@ -33,6 +33,7 @@ #include "mongo/base/data_range.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/ftdc/block_compressor.h" #include "mongo/db/jsobj.h" diff --git a/src/mongo/db/ftdc/file_manager.cpp b/src/mongo/db/ftdc/file_manager.cpp index 4b1286327be88..f000c7ecbfc3d 100644 --- a/src/mongo/db/ftdc/file_manager.cpp +++ b/src/mongo/db/ftdc/file_manager.cpp @@ -28,21 +28,31 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/ftdc/file_manager.h" - -#include +#include +#include +#include #include #include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" + +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" #include "mongo/db/client.h" #include "mongo/db/ftdc/config.h" #include "mongo/db/ftdc/constants.h" +#include "mongo/db/ftdc/file_manager.h" #include "mongo/db/ftdc/file_reader.h" -#include "mongo/db/jsobj.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/ftdc/file_manager.h b/src/mongo/db/ftdc/file_manager.h index b7275c63ecf01..12fd68c8255f6 100644 --- a/src/mongo/db/ftdc/file_manager.h +++ b/src/mongo/db/ftdc/file_manager.h @@ -30,17 +30,22 @@ #pragma once #include +#include #include +#include #include #include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/ftdc/collector.h" #include "mongo/db/ftdc/config.h" #include "mongo/db/ftdc/file_writer.h" #include "mongo/db/ftdc/util.h" #include "mongo/db/jsobj.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/ftdc/file_manager_test.cpp b/src/mongo/db/ftdc/file_manager_test.cpp index 9d944e9a0a42b..c441c2369d2f4 100644 --- a/src/mongo/db/ftdc/file_manager_test.cpp +++ b/src/mongo/db/ftdc/file_manager_test.cpp @@ -28,16 +28,19 @@ */ -#include "mongo/platform/basic.h" - #include -#include +#include #include #include #include +#include + +#include +#include -#include "mongo/base/init.h" -#include "mongo/bson/bson_validate.h" +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" @@ -46,10 +49,13 @@ #include "mongo/db/ftdc/file_manager.h" #include "mongo/db/ftdc/file_writer.h" #include "mongo/db/ftdc/ftdc_test.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/service_context_test_fixture.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/ftdc/file_reader.cpp b/src/mongo/db/ftdc/file_reader.cpp index b71257e427851..34b6bdb5465b3 100644 --- a/src/mongo/db/ftdc/file_reader.cpp +++ b/src/mongo/db/ftdc/file_reader.cpp @@ -27,20 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/ftdc/file_reader.h" - -#include -#include - -#include "mongo/base/data_range_cursor.h" +#include +#include +#include // IWYU pragma: keep +#include + +#include +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" + +#include "mongo/base/data_range.h" +#include "mongo/base/data_type_endian.h" #include "mongo/base/data_type_validated.h" -#include "mongo/bson/bsonmisc.h" -#include "mongo/db/ftdc/config.h" +#include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" +#include "mongo/db/ftdc/file_reader.h" #include "mongo/db/ftdc/util.h" -#include "mongo/db/jsobj.h" -#include "mongo/rpc/object_check.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/ftdc/file_reader.h b/src/mongo/db/ftdc/file_reader.h index 4ebf95de5e1f1..95d1b35b91316 100644 --- a/src/mongo/db/ftdc/file_reader.h +++ b/src/mongo/db/ftdc/file_reader.h @@ -32,14 +32,17 @@ #include #include #include -#include +#include // IWYU pragma: keep +#include #include #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/ftdc/decompressor.h" #include "mongo/db/ftdc/util.h" #include "mongo/db/jsobj.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/ftdc/file_writer.cpp b/src/mongo/db/ftdc/file_writer.cpp index b0c0f4c000777..a73370ef3ddbd 100644 --- a/src/mongo/db/ftdc/file_writer.cpp +++ b/src/mongo/db/ftdc/file_writer.cpp @@ -27,20 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/ftdc/file_writer.h" - -#include -#include +#include +#include +#include +#include // IWYU pragma: keep #include +#include -#include "mongo/base/string_data.h" +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" #include "mongo/db/ftdc/compressor.h" #include "mongo/db/ftdc/config.h" +#include "mongo/db/ftdc/file_writer.h" #include "mongo/db/ftdc/util.h" -#include "mongo/db/jsobj.h" -#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/ftdc/file_writer.h b/src/mongo/db/ftdc/file_writer.h index 9a7a8fa5e7d35..fbea9c9e2b59c 100644 --- a/src/mongo/db/ftdc/file_writer.h +++ b/src/mongo/db/ftdc/file_writer.h @@ -30,14 +30,19 @@ #pragma once #include +#include #include #include -#include +#include // IWYU pragma: keep #include +#include "mongo/base/data_range.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/ftdc/compressor.h" +#include "mongo/db/ftdc/config.h" #include "mongo/db/jsobj.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/ftdc/file_writer_test.cpp b/src/mongo/db/ftdc/file_writer_test.cpp index 16118eec6ce11..9cea37d3d4349 100644 --- a/src/mongo/db/ftdc/file_writer_test.cpp +++ b/src/mongo/db/ftdc/file_writer_test.cpp @@ -27,21 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include -#include +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/ftdc/config.h" #include "mongo/db/ftdc/file_reader.h" #include "mongo/db/ftdc/file_writer.h" #include "mongo/db/ftdc/ftdc_test.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" namespace mongo { diff --git a/src/mongo/db/ftdc/ftdc_commands.cpp b/src/mongo/db/ftdc/ftdc_commands.cpp index d211c41a888b7..4a28b97553663 100644 --- a/src/mongo/db/ftdc/ftdc_commands.cpp +++ b/src/mongo/db/ftdc/ftdc_commands.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/base/init.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/ftdc/controller.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" namespace mongo { namespace { @@ -72,12 +80,8 @@ class GetDiagnosticDataCommand final : public BasicCommand { auto* client = opCtx->getClient(); if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::serverStatus)) { - return Status(ErrorCodes::Unauthorized, "Unauthorized"); - } - - if (!AuthorizationSession::get(client)->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::replSetGetStatus)) { + ResourcePattern::forClusterResource(dbName.tenantId()), + {ActionType::serverStatus, ActionType::replSetGetStatus})) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } diff --git a/src/mongo/db/ftdc/ftdc_mongod.cpp b/src/mongo/db/ftdc/ftdc_mongod.cpp index 7632f8bb5ddf6..3a6ecc2a6fc0a 100644 --- a/src/mongo/db/ftdc/ftdc_mongod.cpp +++ b/src/mongo/db/ftdc/ftdc_mongod.cpp @@ -27,22 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/ftdc/ftdc_mongod.h" - -#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/commands.h" +#include "mongo/db/ftdc/collector.h" #include "mongo/db/ftdc/constants.h" #include "mongo/db/ftdc/controller.h" +#include "mongo/db/ftdc/ftdc_mongod.h" #include "mongo/db/ftdc/ftdc_mongod_gen.h" #include "mongo/db/ftdc/ftdc_server.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/storage_options.h" +#include "mongo/rpc/op_msg.h" #include "mongo/transport/transport_layer_ftdc_collector.h" #include "mongo/util/assert_util.h" +#include "mongo/util/synchronized_value.h" namespace mongo { @@ -124,7 +138,7 @@ void registerMongoDCollectors(FTDCController* controller) { << BSON_ARRAY(BSON("$collStats" << BSON( "storageStats" << BSON( "waitForLock" << false << "numericOnly" << true))))))); - if (!serverGlobalParams.clusterRole.exclusivelyHasShardRole()) { + if (!serverGlobalParams.clusterRole.hasExclusively(ClusterRole::ShardServer)) { // GetDefaultRWConcern controller->addOnRotateCollector(std::make_unique( "getDefaultRWConcern", diff --git a/src/mongo/db/ftdc/ftdc_mongod.h b/src/mongo/db/ftdc/ftdc_mongod.h index 9108c39ee57a1..def13381dfce5 100644 --- a/src/mongo/db/ftdc/ftdc_mongod.h +++ b/src/mongo/db/ftdc/ftdc_mongod.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include #include diff --git a/src/mongo/db/ftdc/ftdc_mongos.cpp b/src/mongo/db/ftdc/ftdc_mongos.cpp index aeb457f38d73e..3d1d3037af327 100644 --- a/src/mongo/db/ftdc/ftdc_mongos.cpp +++ b/src/mongo/db/ftdc/ftdc_mongos.cpp @@ -27,26 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/ftdc/ftdc_mongos.h" - -#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/connpool.h" #include "mongo/client/dbclient_connection.h" #include "mongo/client/global_conn_pool.h" #include "mongo/client/replica_set_monitor_manager.h" +#include "mongo/db/ftdc/collector.h" #include "mongo/db/ftdc/controller.h" +#include "mongo/db/ftdc/ftdc_mongos.h" #include "mongo/db/ftdc/ftdc_server.h" -#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/ftdc/util.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" #include "mongo/executor/connection_pool_stats.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/grid.h" -#include "mongo/stdx/thread.h" #include "mongo/transport/transport_layer_ftdc_collector.h" -#include "mongo/util/synchronized_value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kFTDC diff --git a/src/mongo/db/ftdc/ftdc_server.cpp b/src/mongo/db/ftdc/ftdc_server.cpp index e5b605b0d51a4..e06ce9dfc3cd4 100644 --- a/src/mongo/db/ftdc/ftdc_server.cpp +++ b/src/mongo/db/ftdc/ftdc_server.cpp @@ -27,26 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/ftdc/ftdc_server.h" - -#include -#include +#include +#include +#include // IWYU pragma: keep #include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" #include "mongo/db/ftdc/collector.h" #include "mongo/db/ftdc/config.h" #include "mongo/db/ftdc/controller.h" +#include "mongo/db/ftdc/ftdc_server.h" #include "mongo/db/ftdc/ftdc_server_gen.h" #include "mongo/db/ftdc/ftdc_system_stats.h" -#include "mongo/db/jsobj.h" #include "mongo/db/mirror_maestro.h" #include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" #include "mongo/util/synchronized_value.h" namespace mongo { diff --git a/src/mongo/db/ftdc/ftdc_server.h b/src/mongo/db/ftdc/ftdc_server.h index 161a93412eede..fe3af879ad75a 100644 --- a/src/mongo/db/ftdc/ftdc_server.h +++ b/src/mongo/db/ftdc/ftdc_server.h @@ -29,9 +29,14 @@ #pragma once +#include +#include #include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" #include "mongo/db/ftdc/collector.h" @@ -39,6 +44,8 @@ #include "mongo/db/ftdc/controller.h" #include "mongo/db/jsobj.h" #include "mongo/db/operation_context.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/rpc/op_msg.h" namespace mongo { diff --git a/src/mongo/db/ftdc/ftdc_system_stats.cpp b/src/mongo/db/ftdc/ftdc_system_stats.cpp index 15fd4e4571eed..89adbb705ee71 100644 --- a/src/mongo/db/ftdc/ftdc_system_stats.cpp +++ b/src/mongo/db/ftdc/ftdc_system_stats.cpp @@ -27,16 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/ftdc/ftdc_system_stats.h" - #include #include "mongo/base/status.h" -#include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/ftdc/collector.h" +#include "mongo/db/ftdc/ftdc_system_stats.h" namespace mongo { diff --git a/src/mongo/db/ftdc/ftdc_system_stats.h b/src/mongo/db/ftdc/ftdc_system_stats.h index c4265b061bfcc..945e3637aec1d 100644 --- a/src/mongo/db/ftdc/ftdc_system_stats.h +++ b/src/mongo/db/ftdc/ftdc_system_stats.h @@ -32,6 +32,7 @@ #include "mongo/base/status.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/ftdc/collector.h" #include "mongo/db/ftdc/controller.h" namespace mongo { diff --git a/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp b/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp index 42327c7005368..a9ece9505251b 100644 --- a/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp +++ b/src/mongo/db/ftdc/ftdc_system_stats_linux.cpp @@ -27,13 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/ftdc/ftdc_system_stats.h" - +#include #include #include #include +#include #include #include "mongo/base/status.h" @@ -41,6 +39,7 @@ #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/ftdc/collector.h" #include "mongo/db/ftdc/controller.h" +#include "mongo/db/ftdc/ftdc_system_stats.h" #include "mongo/util/errno_util.h" #include "mongo/util/functional.h" #include "mongo/util/processinfo.h" @@ -168,6 +167,22 @@ class LinuxSystemMetricsCollector final : public SystemMetricsCollector { &subObjBuilder); subObjBuilder.doneFast(); } + + { + BSONObjBuilder subObjBuilder(builder.subobjStart("pressure"_sd)); + processStatusErrors( + procparser::parseProcPressureFile("cpu", "/proc/pressure/cpu"_sd, &subObjBuilder), + &subObjBuilder); + + processStatusErrors(procparser::parseProcPressureFile( + "memory", "/proc/pressure/memory"_sd, &subObjBuilder), + &subObjBuilder); + + processStatusErrors( + procparser::parseProcPressureFile("io", "/proc/pressure/io"_sd, &subObjBuilder), + &subObjBuilder); + subObjBuilder.doneFast(); + } } private: diff --git a/src/mongo/db/ftdc/ftdc_test.cpp b/src/mongo/db/ftdc/ftdc_test.cpp index 43ce53e9c673f..d2dc673f6594f 100644 --- a/src/mongo/db/ftdc/ftdc_test.cpp +++ b/src/mongo/db/ftdc/ftdc_test.cpp @@ -27,23 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/ftdc/ftdc_test.h" - -#include +#include +#include +#include +#include +#include #include +#include -#include "mongo/base/data_type_validated.h" -#include "mongo/base/init.h" -#include "mongo/bson/bson_validate.h" -#include "mongo/db/client.h" +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/ftdc/file_reader.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/ftdc/ftdc_test.h" +#include "mongo/db/ftdc/util.h" #include "mongo/db/service_context.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/tick_source.h" #include "mongo/util/tick_source_mock.h" namespace mongo { diff --git a/src/mongo/db/ftdc/ftdc_test.h b/src/mongo/db/ftdc/ftdc_test.h index 902b91049875d..c99a1e4d8887f 100644 --- a/src/mongo/db/ftdc/ftdc_test.h +++ b/src/mongo/db/ftdc/ftdc_test.h @@ -28,14 +28,16 @@ */ #include +#include #include -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" +#include "mongo/db/service_context_test_fixture.h" namespace mongo { -class FTDCTest : public LockerNoopServiceContextTest { +class FTDCTest : public ServiceContextTest { public: FTDCTest(); }; diff --git a/src/mongo/db/ftdc/ftdc_util_test.cpp b/src/mongo/db/ftdc/ftdc_util_test.cpp index b50d7fa8dc1fb..568e7455dfb52 100644 --- a/src/mongo/db/ftdc/ftdc_util_test.cpp +++ b/src/mongo/db/ftdc/ftdc_util_test.cpp @@ -27,10 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/string_data.h" #include "mongo/db/ftdc/util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/ftdc/util.cpp b/src/mongo/db/ftdc/util.cpp index 811bec1c8dc36..bb84135809014 100644 --- a/src/mongo/db/ftdc/util.cpp +++ b/src/mongo/db/ftdc/util.cpp @@ -28,21 +28,31 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/ftdc/util.h" - -#include - +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/ftdc/config.h" #include "mongo/db/ftdc/constants.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/service_context.h" +#include "mongo/db/ftdc/util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" #include "mongo/util/str.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/ftdc/util.h b/src/mongo/db/ftdc/util.h index 428a1c48a272f..eaba06321128d 100644 --- a/src/mongo/db/ftdc/util.h +++ b/src/mongo/db/ftdc/util.h @@ -30,12 +30,21 @@ #pragma once #include +#include +#include #include +#include "mongo/base/data_range.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/ftdc/decompressor.h" #include "mongo/db/jsobj.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/ftdc/varint.cpp b/src/mongo/db/ftdc/varint.cpp index 2d3cd837788f7..3afc8bdf8a64d 100644 --- a/src/mongo/db/ftdc/varint.cpp +++ b/src/mongo/db/ftdc/varint.cpp @@ -31,6 +31,9 @@ #include +#include +#include + #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/ftdc/varint_test.cpp b/src/mongo/db/ftdc/varint_test.cpp index 21b6e9548a615..6afcc78490961 100644 --- a/src/mongo/db/ftdc/varint_test.cpp +++ b/src/mongo/db/ftdc/varint_test.cpp @@ -27,17 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/base/data_builder.h" -#include "mongo/base/data_type_validated.h" -#include "mongo/base/init.h" -#include "mongo/db/client.h" -#include "mongo/db/ftdc/collector.h" -#include "mongo/db/ftdc/config.h" -#include "mongo/db/ftdc/controller.h" +#include "mongo/base/data_view.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" #include "mongo/db/ftdc/varint.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/fts/fts_basic_phrase_matcher.cpp b/src/mongo/db/fts/fts_basic_phrase_matcher.cpp index 8b4c373e04840..3fef8919e222f 100644 --- a/src/mongo/db/fts/fts_basic_phrase_matcher.cpp +++ b/src/mongo/db/fts/fts_basic_phrase_matcher.cpp @@ -29,6 +29,8 @@ #include "mongo/db/fts/fts_basic_phrase_matcher.h" +#include + #include "mongo/platform/strcasestr.h" namespace mongo { diff --git a/src/mongo/db/fts/fts_basic_phrase_matcher.h b/src/mongo/db/fts/fts_basic_phrase_matcher.h index dfab41a61197a..cadcf5ce8fe64 100644 --- a/src/mongo/db/fts/fts_basic_phrase_matcher.h +++ b/src/mongo/db/fts/fts_basic_phrase_matcher.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/db/fts/fts_phrase_matcher.h" namespace mongo { diff --git a/src/mongo/db/fts/fts_basic_phrase_matcher_test.cpp b/src/mongo/db/fts/fts_basic_phrase_matcher_test.cpp index 02fba7d3f25cd..bbbc35ffa6889 100644 --- a/src/mongo/db/fts/fts_basic_phrase_matcher_test.cpp +++ b/src/mongo/db/fts/fts_basic_phrase_matcher_test.cpp @@ -29,7 +29,9 @@ #include "mongo/db/fts/fts_basic_phrase_matcher.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/fts_basic_tokenizer.cpp b/src/mongo/db/fts/fts_basic_tokenizer.cpp index 25b8c0afad1d4..2fa1b6ed49947 100644 --- a/src/mongo/db/fts/fts_basic_tokenizer.cpp +++ b/src/mongo/db/fts/fts_basic_tokenizer.cpp @@ -27,14 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/fts/fts_basic_tokenizer.h" - #include -#include "mongo/db/fts/fts_query_impl.h" -#include "mongo/db/fts/fts_spec.h" +#include "mongo/db/fts/fts_basic_tokenizer.h" #include "mongo/db/fts/stemmer.h" #include "mongo/db/fts/stop_words.h" #include "mongo/db/fts/tokenizer.h" diff --git a/src/mongo/db/fts/fts_basic_tokenizer.h b/src/mongo/db/fts/fts_basic_tokenizer.h index d3f7851034e8d..f5d773b642d5b 100644 --- a/src/mongo/db/fts/fts_basic_tokenizer.h +++ b/src/mongo/db/fts/fts_basic_tokenizer.h @@ -29,7 +29,11 @@ #pragma once +#include +#include + #include "mongo/base/string_data.h" +#include "mongo/db/fts/fts_language.h" #include "mongo/db/fts/fts_tokenizer.h" #include "mongo/db/fts/stemmer.h" #include "mongo/db/fts/tokenizer.h" diff --git a/src/mongo/db/fts/fts_basic_tokenizer_test.cpp b/src/mongo/db/fts/fts_basic_tokenizer_test.cpp index 29ce8e9f7eedf..7602a9cc8bcce 100644 --- a/src/mongo/db/fts/fts_basic_tokenizer_test.cpp +++ b/src/mongo/db/fts/fts_basic_tokenizer_test.cpp @@ -27,9 +27,16 @@ * it in the license file. */ -#include "mongo/db/fts/fts_spec.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/fts/fts_language.h" #include "mongo/db/fts/fts_tokenizer.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/fts/fts_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/fts_element_iterator.cpp b/src/mongo/db/fts/fts_element_iterator.cpp index aa44fe8c9f423..6273c1eb8138b 100644 --- a/src/mongo/db/fts/fts_element_iterator.cpp +++ b/src/mongo/db/fts/fts_element_iterator.cpp @@ -28,11 +28,17 @@ */ #include "mongo/db/fts/fts_element_iterator.h" -#include "mongo/db/fts/fts_spec.h" -#include "mongo/db/fts/fts_util.h" -#include "mongo/util/str.h" +#include +#include #include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/fts/fts_spec.h" +#include "mongo/util/str.h" namespace mongo { @@ -109,6 +115,11 @@ FTSIteratorValue FTSElementIterator::advance() { continue; } + // SERVER-78238: fields whose name contains a dot or starts with a '$' are not indexable. + if (fieldName.find_first_of('.') != string::npos || fieldName.starts_with('$')) { + continue; + } + // Compose the dotted name of the current field: // 1. parent path empty (top level): use the current field name // 2. parent path non-empty and obj is an array: use the parent path diff --git a/src/mongo/db/fts/fts_element_iterator.h b/src/mongo/db/fts/fts_element_iterator.h index 611cf33c7fb01..e4e26ebbe8590 100644 --- a/src/mongo/db/fts/fts_element_iterator.h +++ b/src/mongo/db/fts/fts_element_iterator.h @@ -29,14 +29,15 @@ #pragma once -#include "mongo/bson/bsonobj.h" -#include "mongo/db/fts/fts_language.h" -#include "mongo/db/fts/fts_spec.h" - +#include #include #include #include +#include "mongo/bson/bsonobj.h" +#include "mongo/db/fts/fts_language.h" +#include "mongo/db/fts/fts_spec.h" + namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/fts_element_iterator_test.cpp b/src/mongo/db/fts/fts_element_iterator_test.cpp index cdb0641f5b554..0a3fc5ff76e96 100644 --- a/src/mongo/db/fts/fts_element_iterator_test.cpp +++ b/src/mongo/db/fts/fts_element_iterator_test.cpp @@ -27,11 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" #include "mongo/db/fts/fts_element_iterator.h" -#include "mongo/db/json.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/fts/fts_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/fts_index_format.cpp b/src/mongo/db/fts/fts_index_format.cpp index 4a1215a3c7c3a..755d5ddd1f3d8 100644 --- a/src/mongo/db/fts/fts_index_format.cpp +++ b/src/mongo/db/fts/fts_index_format.cpp @@ -27,17 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include - -#include "mongo/base/init.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonelement_comparator_interface.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/fts/fts_index_format.h" #include "mongo/db/fts/fts_spec.h" -#include "mongo/db/server_options.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/util/assert_util.h" #include "mongo/util/hex.h" #include "mongo/util/md5.hpp" +#include "mongo/util/murmur3.h" #include "mongo/util/str.h" namespace mongo { @@ -53,24 +68,18 @@ namespace { BSONObj nullObj; BSONElement nullElt; -// New in textIndexVersion 2. -// If the term is longer than 32 characters, it may -// result in the generated key being too large -// for the index. In that case, we generate a 64-character key -// from the concatenation of the first 32 characters -// and the hex string of the murmur3 hash value of the entire -// term value. +// New in textIndexVersion 2. If the term is longer than 32 characters, it may result in the +// generated key being too large for the index. In that case, we generate a 64-character key from +// the concatenation of the first 32 characters and the hex string of the murmur3 hash value of the +// entire term value. const size_t termKeyPrefixLengthV2 = 32U; // 128-bit hash value expressed in hex = 32 characters const size_t termKeySuffixLengthV2 = 32U; const size_t termKeyLengthV2 = termKeyPrefixLengthV2 + termKeySuffixLengthV2; -// TextIndexVersion 3. -// If the term is longer than 256 characters, it may -// result in the generated key being too large -// for the index. In that case, we generate a 256-character key -// from the concatenation of the first 224 characters -// and the hex string of the md5 hash value of the entire +// TextIndexVersion 3. If the term is longer than 256 characters, it may result in the generated key +// being too large for the index. In that case, we generate a 256-character key from the +// concatenation of the first 224 characters and the hex string of the md5 hash value of the entire // term value. const size_t termKeyPrefixLengthV3 = 224U; // 128-bit hash value expressed in hex = 32 characters @@ -113,7 +122,7 @@ void FTSIndexFormat::getKeys(SharedBufferFragmentBuilder& pooledBufferBuilder, const FTSSpec& spec, const BSONObj& obj, KeyStringSet* keys, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, Ordering ordering, const boost::optional& id) { vector extrasBefore; @@ -139,7 +148,7 @@ void FTSIndexFormat::getKeys(SharedBufferFragmentBuilder& pooledBufferBuilder, const string& term = i->first; double weight = i->second; - KeyString::PooledBuilder keyString(pooledBufferBuilder, keyStringVersion, ordering); + key_string::PooledBuilder keyString(pooledBufferBuilder, keyStringVersion, ordering); for (const auto& elem : extrasBefore) { keyString.appendBSONElement(elem); } @@ -168,9 +177,9 @@ BSONObj FTSIndexFormat::getIndexKey(double weight, b.appendAs(i.next(), ""); } - KeyString::Builder keyString(KeyString::Version::kLatestVersion, KeyString::ALL_ASCENDING); + key_string::Builder keyString(key_string::Version::kLatestVersion, key_string::ALL_ASCENDING); _appendIndexKey(keyString, weight, term, textIndexVersion); - auto key = KeyString::toBson(keyString, KeyString::ALL_ASCENDING); + auto key = key_string::toBson(keyString, key_string::ALL_ASCENDING); return b.appendElements(key).obj(); } @@ -191,13 +200,10 @@ void FTSIndexFormat::_appendIndexKey(KeyStringBuilder& keyString, if (term.size() <= termKeyPrefixLengthV2) { keyString.appendString(term); } else { - union { - uint64_t hash[2]; - char data[16]; - } t; + std::array hash; uint32_t seed = 0; - MurmurHash3_x64_128(term.data(), term.size(), seed, t.hash); - string keySuffix = hexblob::encodeLower(t.data, sizeof(t.data)); + murmur3(StringData{term}, seed, hash); + string keySuffix = hexblob::encodeLower(hash.data(), hash.size()); invariant(termKeySuffixLengthV2 == keySuffix.size()); keyString.appendString(term.substr(0, termKeyPrefixLengthV2) + keySuffix); } diff --git a/src/mongo/db/fts/fts_index_format.h b/src/mongo/db/fts/fts_index_format.h index 522fd577619f0..720ae19589f97 100644 --- a/src/mongo/db/fts/fts_index_format.h +++ b/src/mongo/db/fts/fts_index_format.h @@ -29,13 +29,20 @@ #pragma once +#include +#include #include #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator_interface.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/ordering.h" #include "mongo/db/fts/fts_util.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/key_string.h" #include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { @@ -49,7 +56,7 @@ class FTSIndexFormat { const FTSSpec& spec, const BSONObj& document, KeyStringSet* keys, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, Ordering ordering, const boost::optional& id = boost::none); diff --git a/src/mongo/db/fts/fts_index_format_test.cpp b/src/mongo/db/fts/fts_index_format_test.cpp index 34e180bbc3067..cf3cc458e159a 100644 --- a/src/mongo/db/fts/fts_index_format_test.cpp +++ b/src/mongo/db/fts/fts_index_format_test.cpp @@ -28,15 +28,29 @@ */ -#include "mongo/platform/basic.h" - +#include +#include #include +// IWYU pragma: no_include "boost/container/detail/flat_tree.hpp" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" -#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/fts/fts_index_format.h" #include "mongo/db/fts/fts_spec.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -59,12 +73,12 @@ TEST(FTSIndexFormat, Simple1) { BSON("data" << "cat sat"), &keys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())); ASSERT_EQUALS(2U, keys.size()); for (auto& keyString : keys) { - auto key = KeyString::toBson(keyString, Ordering::make(BSONObj())); + auto key = key_string::toBson(keyString, Ordering::make(BSONObj())); ASSERT_EQUALS(2, key.nFields()); ASSERT_EQUALS(String, key.firstElement().type()); } @@ -82,11 +96,11 @@ TEST(FTSIndexFormat, ExtraBack1) { << "cat" << "x" << 5), &keys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())); ASSERT_EQUALS(1U, keys.size()); - auto key = KeyString::toBson(*keys.begin(), Ordering::make(BSONObj())); + auto key = key_string::toBson(*keys.begin(), Ordering::make(BSONObj())); ASSERT_EQUALS(3, key.nFields()); BSONObjIterator i(key); ASSERT_EQUALS(StringData("cat"), i.next().valueStringDataSafe()); @@ -105,11 +119,11 @@ TEST(FTSIndexFormat, ExtraFront1) { << "cat" << "x" << 5), &keys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())); ASSERT_EQUALS(1U, keys.size()); - auto key = KeyString::toBson(*keys.begin(), Ordering::make(BSONObj())); + auto key = key_string::toBson(*keys.begin(), Ordering::make(BSONObj())); ASSERT_EQUALS(3, key.nFields()); BSONObjIterator i(key); ASSERT_EQUALS(5, i.next().numberInt()); @@ -127,7 +141,7 @@ TEST(FTSIndexFormat, StopWords1) { BSON("data" << "computer"), &keys1, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())); ASSERT_EQUALS(1U, keys1.size()); @@ -137,7 +151,7 @@ TEST(FTSIndexFormat, StopWords1) { BSON("data" << "any computer"), &keys2, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())); ASSERT_EQUALS(1U, keys2.size()); } @@ -149,7 +163,7 @@ TEST(FTSIndexFormat, StopWords1) { void assertEqualsIndexKeys(std::set& expectedKeys, const KeyStringSet& keys) { ASSERT_EQUALS(expectedKeys.size(), keys.size()); for (auto& keyString : keys) { - auto key = KeyString::toBson(keyString, Ordering::make(BSONObj())); + auto key = key_string::toBson(keyString, Ordering::make(BSONObj())); ASSERT_EQUALS(2, key.nFields()); ASSERT_EQUALS(String, key.firstElement().type()); string s = key.firstElement().String(); @@ -187,7 +201,7 @@ TEST(FTSIndexFormat, LongWordsTextIndexVersion1) { spec, BSON("data" << text), &keys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())); // Hard-coded expected computed keys for future-proofing. @@ -224,7 +238,7 @@ TEST(FTSIndexFormat, LongWordTextIndexVersion2) { spec, BSON("data" << text), &keys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())); // Hard-coded expected computed keys for future-proofing. @@ -261,7 +275,7 @@ TEST(FTSIndexFormat, LongWordTextIndexVersion3) { spec, BSON("data" << text), &keys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())); // Hard-coded expected computed keys for future-proofing. @@ -290,7 +304,7 @@ TEST(FTSIndexFormat, GetKeysWithLeadingEmptyArrayThrows) { spec, objToIndex, &keys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())), AssertionException, ErrorCodes::CannotBuildIndexKeys); @@ -306,7 +320,7 @@ TEST(FTSIndexFormat, GetKeysWithTrailingEmptyArrayThrows) { spec, objToIndex, &keys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())), AssertionException, ErrorCodes::CannotBuildIndexKeys); @@ -322,7 +336,7 @@ TEST(FTSIndexFormat, GetKeysWithLeadingSingleElementArrayThrows) { spec, objToIndex, &keys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())), AssertionException, ErrorCodes::CannotBuildIndexKeys); @@ -338,7 +352,7 @@ TEST(FTSIndexFormat, GetKeysWithTrailingSingleElementArrayThrows) { spec, objToIndex, &keys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())), AssertionException, ErrorCodes::CannotBuildIndexKeys); @@ -354,7 +368,7 @@ TEST(FTSIndexFormat, GetKeysWithMultiElementArrayThrows) { spec, objToIndex, &keys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())), AssertionException, ErrorCodes::CannotBuildIndexKeys); @@ -370,12 +384,12 @@ TEST(FTSIndexFormat, GetKeysWithPositionalPathAllowed) { spec, objToIndex, &keys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())); ASSERT_EQ(2U, keys.size()); { - auto key = KeyString::toBson(*keys.begin(), Ordering::make(BSONObj())); + auto key = key_string::toBson(*keys.begin(), Ordering::make(BSONObj())); ASSERT_EQ(3, key.nFields()); BSONObjIterator it{key}; ASSERT_BSONELT_EQ(it.next(), fromjson("{'': {b: 'foo'}}").firstElement()); @@ -384,7 +398,7 @@ TEST(FTSIndexFormat, GetKeysWithPositionalPathAllowed) { { auto next = ++keys.begin(); - auto key = KeyString::toBson(*next, Ordering::make(BSONObj())); + auto key = key_string::toBson(*next, Ordering::make(BSONObj())); ASSERT_EQ(3, key.nFields()); BSONObjIterator it{key}; ASSERT_BSONELT_EQ(it.next(), fromjson("{'': {b: 'foo'}}").firstElement()); diff --git a/src/mongo/db/fts/fts_language.cpp b/src/mongo/db/fts/fts_language.cpp index 654817cd26134..ea17340bba2a1 100644 --- a/src/mongo/db/fts/fts_language.cpp +++ b/src/mongo/db/fts/fts_language.cpp @@ -31,17 +31,16 @@ #include #include +#include #include #include #include #include -#include #include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" -#include "mongo/db/fts/fts_basic_phrase_matcher.h" #include "mongo/db/fts/fts_basic_tokenizer.h" -#include "mongo/db/fts/fts_unicode_phrase_matcher.h" +#include "mongo/db/fts/fts_tokenizer.h" #include "mongo/db/fts/fts_unicode_tokenizer.h" #include "mongo/util/assert_util.h" #include "mongo/util/ctype.h" diff --git a/src/mongo/db/fts/fts_language.h b/src/mongo/db/fts/fts_language.h index 74c2b2a8cb5ad..465b4e9c1fe52 100644 --- a/src/mongo/db/fts/fts_language.h +++ b/src/mongo/db/fts/fts_language.h @@ -34,6 +34,7 @@ #include #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/db/fts/fts_basic_phrase_matcher.h" #include "mongo/db/fts/fts_phrase_matcher.h" #include "mongo/db/fts/fts_unicode_phrase_matcher.h" diff --git a/src/mongo/db/fts/fts_language_test.cpp b/src/mongo/db/fts/fts_language_test.cpp index a9e8fdd3a245c..412578e5557ce 100644 --- a/src/mongo/db/fts/fts_language_test.cpp +++ b/src/mongo/db/fts/fts_language_test.cpp @@ -28,9 +28,12 @@ */ #include "mongo/db/fts/fts_language.h" -#include "mongo/db/fts/fts_spec.h" -#include "mongo/platform/basic.h" -#include "mongo/unittest/unittest.h" + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/fts/fts_matcher.cpp b/src/mongo/db/fts/fts_matcher.cpp index a248d9600d84e..1839df4b16c0a 100644 --- a/src/mongo/db/fts/fts_matcher.cpp +++ b/src/mongo/db/fts/fts_matcher.cpp @@ -27,12 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/fts/fts_element_iterator.h" #include "mongo/db/fts/fts_matcher.h" #include "mongo/db/fts/fts_phrase_matcher.h" #include "mongo/db/fts/fts_tokenizer.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/fts/fts_matcher.h b/src/mongo/db/fts/fts_matcher.h index 8698ba0648d59..b942b23b75e8a 100644 --- a/src/mongo/db/fts/fts_matcher.h +++ b/src/mongo/db/fts/fts_matcher.h @@ -29,6 +29,11 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/fts/fts_language.h" #include "mongo/db/fts/fts_query_impl.h" #include "mongo/db/fts/fts_spec.h" #include "mongo/db/fts/fts_tokenizer.h" diff --git a/src/mongo/db/fts/fts_matcher_test.cpp b/src/mongo/db/fts/fts_matcher_test.cpp index 46c292ce55a75..40fd2a96946e6 100644 --- a/src/mongo/db/fts/fts_matcher_test.cpp +++ b/src/mongo/db/fts/fts_matcher_test.cpp @@ -27,10 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/fts/fts_matcher.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/fts/fts_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/fts_query_impl.cpp b/src/mongo/db/fts/fts_query_impl.cpp index c5b7ed85c0f59..6c081d145e4ed 100644 --- a/src/mongo/db/fts/fts_query_impl.cpp +++ b/src/mongo/db/fts/fts_query_impl.cpp @@ -27,16 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/fts/fts_query_impl.h" - +#include #include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/fts/fts_language.h" +#include "mongo/db/fts/fts_query_impl.h" #include "mongo/db/fts/fts_query_parser.h" -#include "mongo/db/fts/fts_spec.h" #include "mongo/db/fts/fts_tokenizer.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/fts/fts_query_impl.h b/src/mongo/db/fts/fts_query_impl.h index 06d400b5a1990..dac355f3c4b18 100644 --- a/src/mongo/db/fts/fts_query_impl.h +++ b/src/mongo/db/fts/fts_query_impl.h @@ -29,11 +29,16 @@ #pragma once +#include +#include #include #include #include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/fts/fts_query.h" +#include "mongo/db/fts/fts_util.h" namespace mongo { diff --git a/src/mongo/db/fts/fts_query_impl_test.cpp b/src/mongo/db/fts/fts_query_impl_test.cpp index b3b4cad71f1b2..13c6687dddf8c 100644 --- a/src/mongo/db/fts/fts_query_impl_test.cpp +++ b/src/mongo/db/fts/fts_query_impl_test.cpp @@ -27,11 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" #include "mongo/bson/json.h" #include "mongo/db/fts/fts_query_impl.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/fts_query_noop.cpp b/src/mongo/db/fts/fts_query_noop.cpp index c11fdecdab07c..d68aa0eb84728 100644 --- a/src/mongo/db/fts/fts_query_noop.cpp +++ b/src/mongo/db/fts/fts_query_noop.cpp @@ -27,12 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/db/fts/fts_query_noop.h" -#include - namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/fts_query_noop.h b/src/mongo/db/fts/fts_query_noop.h index 58b9411593374..62e0eccad2390 100644 --- a/src/mongo/db/fts/fts_query_noop.h +++ b/src/mongo/db/fts/fts_query_noop.h @@ -29,7 +29,11 @@ #pragma once +#include + +#include "mongo/base/status.h" #include "mongo/db/fts/fts_query.h" +#include "mongo/db/fts/fts_util.h" namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/fts_query_noop_test.cpp b/src/mongo/db/fts/fts_query_noop_test.cpp index 376d532051fbf..14dabeb6f31f5 100644 --- a/src/mongo/db/fts/fts_query_noop_test.cpp +++ b/src/mongo/db/fts/fts_query_noop_test.cpp @@ -27,10 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/string_data.h" #include "mongo/db/fts/fts_query_noop.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/fts_query_parser.cpp b/src/mongo/db/fts/fts_query_parser.cpp index c6038be457543..db34056ecd2ba 100644 --- a/src/mongo/db/fts/fts_query_parser.cpp +++ b/src/mongo/db/fts/fts_query_parser.cpp @@ -27,10 +27,8 @@ * it in the license file. */ -#include - #include "mongo/db/fts/fts_query_parser.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/fts/fts_spec.cpp b/src/mongo/db/fts/fts_spec.cpp index 64b333d5c258d..2ea96a4983960 100644 --- a/src/mongo/db/fts/fts_spec.cpp +++ b/src/mongo/db/fts/fts_spec.cpp @@ -27,16 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/fts/fts_spec.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/field_ref.h" #include "mongo/db/fts/fts_element_iterator.h" +#include "mongo/db/fts/fts_spec.h" #include "mongo/db/fts/fts_tokenizer.h" #include "mongo/db/fts/fts_util.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { @@ -118,23 +127,23 @@ FTSSpec::FTSSpec(const BSONObj& indexInfo) { BSONObjIterator i(indexInfo["weights"].Obj()); while (i.more()) { BSONElement e = i.next(); - verify(e.isNumber()); + MONGO_verify(e.isNumber()); if (WILDCARD == e.fieldName()) { _wildcard = true; } else { double num = e.number(); _weights[e.fieldName()] = num; - verify(num > 0 && num < MAX_WORD_WEIGHT); + MONGO_verify(num > 0 && num < MAX_WORD_WEIGHT); } } - verify(_wildcard || _weights.size()); + MONGO_verify(_wildcard || _weights.size()); } // extra information { BSONObj keyPattern = indexInfo["key"].Obj(); - verify(keyPattern.nFields() >= 2); + MONGO_verify(keyPattern.nFields() >= 2); BSONObjIterator i(keyPattern); bool passedFTS = false; @@ -228,7 +237,7 @@ void FTSSpec::_scoreStringV2(FTSTokenizer* tokenizer, double& score = (*docScores)[term]; score += (weight * data.freq * coeff * adjustment); - verify(score <= MAX_WEIGHT); + MONGO_verify(score <= MAX_WEIGHT); } } @@ -316,7 +325,7 @@ StatusWith FTSSpec::fixSpec(const BSONObj& spec) { b.append(e); } } - verify(addedFtsStuff); + MONGO_verify(addedFtsStuff); } keyPattern = b.obj(); @@ -324,7 +333,7 @@ StatusWith FTSSpec::fixSpec(const BSONObj& spec) { // fields, then extraAfter fields. { BSONObjIterator i(spec["key"].Obj()); - verify(i.more()); + MONGO_verify(i.more()); BSONElement e = i.next(); // extraBefore fields diff --git a/src/mongo/db/fts/fts_spec.h b/src/mongo/db/fts/fts_spec.h index dbc67857b82a8..b809822ec59ab 100644 --- a/src/mongo/db/fts/fts_spec.h +++ b/src/mongo/db/fts/fts_spec.h @@ -29,11 +29,15 @@ #pragma once +#include #include #include #include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/fts/fts_language.h" #include "mongo/db/fts/fts_util.h" #include "mongo/db/fts/stemmer.h" diff --git a/src/mongo/db/fts/fts_spec_legacy.cpp b/src/mongo/db/fts/fts_spec_legacy.cpp index 8d88f9a772ab7..e4b478d139b3c 100644 --- a/src/mongo/db/fts/fts_spec_legacy.cpp +++ b/src/mongo/db/fts/fts_spec_legacy.cpp @@ -27,9 +27,28 @@ * it in the license file. */ -#include "mongo/db/fts/fts_spec.h" - +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/fts/fts_language.h" +#include "mongo/db/fts/fts_spec.h" +#include "mongo/db/fts/fts_util.h" +#include "mongo/db/fts/stemmer.h" +#include "mongo/db/fts/stop_words.h" +#include "mongo/db/fts/tokenizer.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { @@ -112,7 +131,7 @@ void FTSSpec::_scoreStringV1(const Tools& tools, double& score = (*docScores)[term]; score += (weight * data.freq * coeff * adjustment); - verify(score <= MAX_WEIGHT); + MONGO_verify(score <= MAX_WEIGHT); } } diff --git a/src/mongo/db/fts/fts_spec_test.cpp b/src/mongo/db/fts/fts_spec_test.cpp index 047968f254144..98afde000e484 100644 --- a/src/mongo/db/fts/fts_spec_test.cpp +++ b/src/mongo/db/fts/fts_spec_test.cpp @@ -27,11 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/fts/fts_spec.h" -#include "mongo/db/json.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/fts/fts_unicode_phrase_matcher.cpp b/src/mongo/db/fts/fts_unicode_phrase_matcher.cpp index 97e80ba4f1621..921116b98510c 100644 --- a/src/mongo/db/fts/fts_unicode_phrase_matcher.cpp +++ b/src/mongo/db/fts/fts_unicode_phrase_matcher.cpp @@ -29,7 +29,6 @@ #include "mongo/db/fts/fts_unicode_phrase_matcher.h" -#include "mongo/db/fts/fts_language.h" #include "mongo/db/fts/unicode/string.h" namespace mongo { diff --git a/src/mongo/db/fts/fts_unicode_phrase_matcher.h b/src/mongo/db/fts/fts_unicode_phrase_matcher.h index de3ad3c11c8d8..f226e33eb9047 100644 --- a/src/mongo/db/fts/fts_unicode_phrase_matcher.h +++ b/src/mongo/db/fts/fts_unicode_phrase_matcher.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/db/fts/fts_phrase_matcher.h" #include "mongo/db/fts/unicode/codepoints.h" diff --git a/src/mongo/db/fts/fts_unicode_phrase_matcher_test.cpp b/src/mongo/db/fts/fts_unicode_phrase_matcher_test.cpp index 186395f677e05..93f6b2d20482e 100644 --- a/src/mongo/db/fts/fts_unicode_phrase_matcher_test.cpp +++ b/src/mongo/db/fts/fts_unicode_phrase_matcher_test.cpp @@ -28,7 +28,10 @@ */ #include "mongo/db/fts/fts_unicode_phrase_matcher.h" -#include "mongo/unittest/unittest.h" + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/fts_unicode_tokenizer.cpp b/src/mongo/db/fts/fts_unicode_tokenizer.cpp index 5254e063c83c2..a626f72649e70 100644 --- a/src/mongo/db/fts/fts_unicode_tokenizer.cpp +++ b/src/mongo/db/fts/fts_unicode_tokenizer.cpp @@ -27,18 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/fts/fts_unicode_tokenizer.h" - -#include - -#include "mongo/db/fts/fts_query_impl.h" -#include "mongo/db/fts/fts_spec.h" #include "mongo/db/fts/stemmer.h" #include "mongo/db/fts/stop_words.h" -#include "mongo/db/fts/tokenizer.h" -#include "mongo/util/str.h" namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/fts_unicode_tokenizer.h b/src/mongo/db/fts/fts_unicode_tokenizer.h index 0745e11ad072d..1f57cdf15a0d9 100644 --- a/src/mongo/db/fts/fts_unicode_tokenizer.h +++ b/src/mongo/db/fts/fts_unicode_tokenizer.h @@ -29,10 +29,15 @@ #pragma once +#include + #include "mongo/base/string_data.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/fts/fts_language.h" #include "mongo/db/fts/fts_tokenizer.h" #include "mongo/db/fts/stemmer.h" #include "mongo/db/fts/tokenizer.h" +#include "mongo/db/fts/unicode/codepoints.h" #include "mongo/db/fts/unicode/string.h" namespace mongo { diff --git a/src/mongo/db/fts/fts_unicode_tokenizer_test.cpp b/src/mongo/db/fts/fts_unicode_tokenizer_test.cpp index 9499149ad19d5..0538f3e972d21 100644 --- a/src/mongo/db/fts/fts_unicode_tokenizer_test.cpp +++ b/src/mongo/db/fts/fts_unicode_tokenizer_test.cpp @@ -27,11 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/db/fts/fts_language.h" #include "mongo/db/fts/fts_unicode_tokenizer.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/fts/fts_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/stemmer.cpp b/src/mongo/db/fts/stemmer.cpp index 9940b838ba768..d02f71e9dd264 100644 --- a/src/mongo/db/fts/stemmer.cpp +++ b/src/mongo/db/fts/stemmer.cpp @@ -33,6 +33,8 @@ #include #include +#include + #include "mongo/util/assert_util.h" namespace mongo::fts { diff --git a/src/mongo/db/fts/stemmer_test.cpp b/src/mongo/db/fts/stemmer_test.cpp index b95e0949f1fce..de2271ba5e3e0 100644 --- a/src/mongo/db/fts/stemmer_test.cpp +++ b/src/mongo/db/fts/stemmer_test.cpp @@ -28,10 +28,10 @@ */ -#include "mongo/unittest/unittest.h" - -#include "mongo/db/fts/fts_spec.h" +#include "mongo/db/fts/fts_util.h" #include "mongo/db/fts/stemmer.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/stop_words.cpp b/src/mongo/db/fts/stop_words.cpp index 9c415a2d2628e..b41db38fb25da 100644 --- a/src/mongo/db/fts/stop_words.cpp +++ b/src/mongo/db/fts/stop_words.cpp @@ -27,12 +27,17 @@ * it in the license file. */ +#include +#include #include #include +#include -#include "mongo/db/fts/stop_words.h" +#include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/db/fts/stop_words.h" #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/fts/stop_words.h b/src/mongo/db/fts/stop_words.h index 6c1c1cc07e1e2..83e577b672ce5 100644 --- a/src/mongo/db/fts/stop_words.h +++ b/src/mongo/db/fts/stop_words.h @@ -30,9 +30,13 @@ #pragma once +#include #include #include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/fts/fts_language.h" #include "mongo/util/string_map.h" diff --git a/src/mongo/db/fts/stop_words_test.cpp b/src/mongo/db/fts/stop_words_test.cpp index f35f350af3575..fcccbf4422468 100644 --- a/src/mongo/db/fts/stop_words_test.cpp +++ b/src/mongo/db/fts/stop_words_test.cpp @@ -27,11 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/fts/fts_spec.h" +#include "mongo/db/fts/fts_util.h" #include "mongo/db/fts/stop_words.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/tokenizer.cpp b/src/mongo/db/fts/tokenizer.cpp index 1463dc212bf04..28cea0ae7b994 100644 --- a/src/mongo/db/fts/tokenizer.cpp +++ b/src/mongo/db/fts/tokenizer.cpp @@ -30,7 +30,7 @@ #include #include "mongo/db/fts/tokenizer.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/fts/tokenizer_test.cpp b/src/mongo/db/fts/tokenizer_test.cpp index db0a1c272afc0..38bb3abd692b9 100644 --- a/src/mongo/db/fts/tokenizer_test.cpp +++ b/src/mongo/db/fts/tokenizer_test.cpp @@ -27,11 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/fts/fts_spec.h" +#include "mongo/db/fts/fts_util.h" #include "mongo/db/fts/tokenizer.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace fts { diff --git a/src/mongo/db/fts/unicode/byte_vector.h b/src/mongo/db/fts/unicode/byte_vector.h index 13886d16ff33a..62dc7e61ba72f 100644 --- a/src/mongo/db/fts/unicode/byte_vector.h +++ b/src/mongo/db/fts/unicode/byte_vector.h @@ -33,11 +33,11 @@ // TODO replace this with #if BOOST_HW_SIMD_X86 >= BOOST_HW_SIMD_X86_SSE2_VERSION in boost 1.60 #if defined(_M_AMD64) || defined(__amd64__) -#include "mongo/db/fts/unicode/byte_vector_sse2.h" +#include "mongo/db/fts/unicode/byte_vector_sse2.h" // IWYU pragma: export #elif defined(__powerpc64__) -#include "mongo/db/fts/unicode/byte_vector_altivec.h" +#include "mongo/db/fts/unicode/byte_vector_altivec.h" // IWYU pragma: export #elif defined(__aarch64__) -#include "mongo/db/fts/unicode/byte_vector_neon.h" -#else // Other platforms go above here. +#include "mongo/db/fts/unicode/byte_vector_neon.h" // IWYU pragma: export +#else // Other platforms go above here. #undef MONGO_HAVE_FAST_BYTE_VECTOR #endif diff --git a/src/mongo/db/fts/unicode/byte_vector_test.cpp b/src/mongo/db/fts/unicode/byte_vector_test.cpp index 0670da2ac33aa..44ecc04001005 100644 --- a/src/mongo/db/fts/unicode/byte_vector_test.cpp +++ b/src/mongo/db/fts/unicode/byte_vector_test.cpp @@ -27,12 +27,15 @@ * it in the license file. */ +#include #include #include #include +#include "mongo/base/string_data.h" #include "mongo/db/fts/unicode/byte_vector.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #ifdef MONGO_HAVE_FAST_BYTE_VECTOR namespace mongo { diff --git a/src/mongo/db/fts/unicode/codepoints_test.cpp b/src/mongo/db/fts/unicode/codepoints_test.cpp index 7706963317d92..36ab23994a1a5 100644 --- a/src/mongo/db/fts/unicode/codepoints_test.cpp +++ b/src/mongo/db/fts/unicode/codepoints_test.cpp @@ -28,7 +28,13 @@ */ #include "mongo/db/fts/unicode/codepoints.h" -#include "mongo/unittest/unittest.h" + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace unicode { diff --git a/src/mongo/db/fts/unicode/string.cpp b/src/mongo/db/fts/unicode/string.cpp index b94d9fcef13f1..f23c2ba642333 100644 --- a/src/mongo/db/fts/unicode/string.cpp +++ b/src/mongo/db/fts/unicode/string.cpp @@ -32,7 +32,11 @@ #include #include #include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/fts/unicode/byte_vector.h" #include "mongo/platform/bits.h" #include "mongo/shell/linenoise_utf8.h" diff --git a/src/mongo/db/fts/unicode/string.h b/src/mongo/db/fts/unicode/string.h index 5bf98e2bf8db3..43ed31793c02c 100644 --- a/src/mongo/db/fts/unicode/string.h +++ b/src/mongo/db/fts/unicode/string.h @@ -29,12 +29,14 @@ #pragma once +#include #include #include #include #include "mongo/base/string_data.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/fts/unicode/codepoints.h" namespace mongo { diff --git a/src/mongo/db/fts/unicode/string_test.cpp b/src/mongo/db/fts/unicode/string_test.cpp index 00931a22a1032..59f26ad494086 100644 --- a/src/mongo/db/fts/unicode/string_test.cpp +++ b/src/mongo/db/fts/unicode/string_test.cpp @@ -27,13 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/fts/unicode/string.h" #include "mongo/shell/linenoise_utf8.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/ctype.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep #ifdef MSC_VER // Microsoft VS 2013 does not handle UTF-8 strings in char literal strings, error C4566 diff --git a/src/mongo/db/geo/SConscript b/src/mongo/db/geo/SConscript index e43299f0d2a85..9e46510b3347d 100644 --- a/src/mongo/db/geo/SConscript +++ b/src/mongo/db/geo/SConscript @@ -40,3 +40,11 @@ env.CppUnitTest( "geoparser", ], ) + +env.Benchmark( + target='hash_bm', + source=[ + 'hash_bm.cpp', + ], + LIBDEPS=['geometry'], +) diff --git a/src/mongo/db/geo/big_polygon.cpp b/src/mongo/db/geo/big_polygon.cpp index c4cffc35cea4a..f57fd567c3593 100644 --- a/src/mongo/db/geo/big_polygon.cpp +++ b/src/mongo/db/geo/big_polygon.cpp @@ -29,8 +29,18 @@ #include "mongo/db/geo/big_polygon.h" -#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include #include "mongo/util/assert_util.h" #include "mongo/util/transitional_tools_do_not_use/vector_spooling.h" diff --git a/src/mongo/db/geo/big_polygon.h b/src/mongo/db/geo/big_polygon.h index 03b30ef831985..f83c6242a6d6c 100644 --- a/src/mongo/db/geo/big_polygon.h +++ b/src/mongo/db/geo/big_polygon.h @@ -29,16 +29,17 @@ #pragma once +#include +#include #include #include +#include #include #include #include #include #include -#include "mongo/db/geo/s2.h" - namespace mongo { // Simple GeoJSON polygon with a custom CRS identifier as having a strict winding order. diff --git a/src/mongo/db/geo/big_polygon_test.cpp b/src/mongo/db/geo/big_polygon_test.cpp index d02de8846fb9f..9889ddca06a33 100644 --- a/src/mongo/db/geo/big_polygon_test.cpp +++ b/src/mongo/db/geo/big_polygon_test.cpp @@ -29,9 +29,21 @@ #include "mongo/db/geo/big_polygon.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/util/builder.h" -#include "mongo/unittest/unittest.h" +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/geo/geometry_container.cpp b/src/mongo/db/geo/geometry_container.cpp index 21bbad45e4132..3827a1546a99d 100644 --- a/src/mongo/db/geo/geometry_container.cpp +++ b/src/mongo/db/geo/geometry_container.cpp @@ -30,9 +30,38 @@ #include "mongo/db/geo/geometry_container.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement_comparator_interface.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/geo/big_polygon.h" #include "mongo/db/geo/geoconstants.h" #include "mongo/db/geo/geoparser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #include "mongo/util/transitional_tools_do_not_use/vector_spooling.h" @@ -165,7 +194,7 @@ static Point toLngLatPoint(const S2Point& s2Point) { static void lineR2Bounds(const S2Polyline& flatLine, Box* flatBounds) { int numVertices = flatLine.num_vertices(); - verify(flatLine.num_vertices() > 0); + MONGO_verify(flatLine.num_vertices() > 0); flatBounds->init(toLngLatPoint(flatLine.vertex(0)), toLngLatPoint(flatLine.vertex(0))); @@ -180,7 +209,7 @@ static void circleR2Bounds(const Circle& circle, Box* flatBounds) { } static void multiPointR2Bounds(const vector& points, Box* flatBounds) { - verify(!points.empty()); + MONGO_verify(!points.empty()); flatBounds->init(toLngLatPoint(points.front()), toLngLatPoint(points.front())); @@ -217,15 +246,15 @@ Box GeometryContainer::R2BoxRegion::buildBounds(const GeometryContainer& geometr } else if (geometry._multiPoint && FLAT == geometry._multiPoint->crs) { multiPointR2Bounds(geometry._multiPoint->points, &bounds); } else if (geometry._multiLine && FLAT == geometry._multiLine->crs) { - verify(false); + MONGO_verify(false); } else if (geometry._multiPolygon && FLAT == geometry._multiPolygon->crs) { - verify(false); + MONGO_verify(false); } else if (geometry._geometryCollection) { - verify(false); + MONGO_verify(false); } else if (geometry.hasS2Region()) { // For now, just support spherical cap for $centerSphere and GeoJSON points - verify((geometry._cap && FLAT != geometry._cap->crs) || - (geometry._point && FLAT != geometry._point->crs)); + MONGO_verify((geometry._cap && FLAT != geometry._cap->crs) || + (geometry._point && FLAT != geometry._point->crs)); s2RegionR2Bounds(geometry.getS2Region(), &bounds); } @@ -291,7 +320,7 @@ bool GeometryContainer::contains(const GeometryContainer& otherContainer) const } if (nullptr != _box) { - verify(FLAT == _box->crs); + MONGO_verify(FLAT == _box->crs); if (nullptr == otherContainer._point) { return false; } diff --git a/src/mongo/db/geo/geometry_container.h b/src/mongo/db/geo/geometry_container.h index b785504cdc35f..17fbc58f0e2d8 100644 --- a/src/mongo/db/geo/geometry_container.h +++ b/src/mongo/db/geo/geometry_container.h @@ -29,10 +29,21 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include #include #include +#include #include "mongo/base/clonable_ptr.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/geo/shapes.h" diff --git a/src/mongo/db/geo/geoparser.cpp b/src/mongo/db/geo/geoparser.cpp index f1cabdf481561..26c892d579738 100644 --- a/src/mongo/db/geo/geoparser.cpp +++ b/src/mongo/db/geo/geoparser.cpp @@ -30,15 +30,34 @@ #include "mongo/db/geo/geoparser.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include #include -#include +#include #include #include +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/geo/big_polygon.h" #include "mongo/db/geo/shapes.h" -#include "mongo/db/jsobj.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #include "mongo/util/transitional_tools_do_not_use/vector_spooling.h" diff --git a/src/mongo/db/geo/geoparser.h b/src/mongo/db/geo/geoparser.h index 4d4d11852358b..fb1bcd3e520e7 100644 --- a/src/mongo/db/geo/geoparser.h +++ b/src/mongo/db/geo/geoparser.h @@ -29,6 +29,12 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/geo/shapes.h" #include "mongo/db/jsobj.h" diff --git a/src/mongo/db/geo/geoparser_test.cpp b/src/mongo/db/geo/geoparser_test.cpp index 01eba23667a13..9f88ae4afee81 100644 --- a/src/mongo/db/geo/geoparser_test.cpp +++ b/src/mongo/db/geo/geoparser_test.cpp @@ -31,15 +31,23 @@ * This file contains tests for mongo/db/geo/geoparser.cpp. */ -#include +#include +#include +#include #include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/geo/geoparser.h" #include "mongo/db/geo/shapes.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" // Wrap a BSON object to a BSON element. #define BSON_ELT(bson) BSON("" << (bson)).firstElement() diff --git a/src/mongo/db/geo/hash.cpp b/src/mongo/db/geo/hash.cpp index 86f47847b2efb..589561ec06b0e 100644 --- a/src/mongo/db/geo/hash.cpp +++ b/src/mongo/db/geo/hash.cpp @@ -28,14 +28,32 @@ */ #include "mongo/db/geo/hash.h" -#include "mongo/config.h" -#include "mongo/db/field_parser.h" -#include "mongo/db/geo/shapes.h" -#include "mongo/db/jsobj.h" -#include "mongo/util/str.h" #include // for max() +#include +#include +#include #include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/field_parser.h" +#include "mongo/db/geo/shapes.h" +#include "mongo/platform/endian.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -173,10 +191,31 @@ std::uint64_t interleaveWithZeros(std::uint32_t input) { word = (word ^ (word << 1)) & 0x5555555555555555; return word; } + +std::uint32_t deinterleaveZeros(std::uint64_t input) { + // The following example is an extension to 32-bits of the following bit manipulation for + // 16-bit numbers. Note that the following operations are a result of applying the inverse of + // interleaveWithZeros operations. + // + // 0a0b 0c0d 0e0f 0g0h 0i0j 0k0l 0m0n 0o0p + // -> 00ab 00cd 00ef 00gh 00ij 00kl 00mn 00op + // -> 0000 abcd 0000 efgh 0000 ijkl 0000 mnop + // -> 0000 0000 abcd efgh 0000 0000 ijkl mnop + // -> 0000 0000 0000 0000 abcd efgh ijkl mnop + + uint64_t word = input; + word &= 0x5555555555555555; + word = (word ^ (word >> 1)) & 0x3333333333333333; + word = (word ^ (word >> 2)) & 0x0f0f0f0f0f0f0f0f; + word = (word ^ (word >> 4)) & 0x00ff00ff00ff00ff; + word = (word ^ (word >> 8)) & 0x0000ffff0000ffff; + word = (word ^ (word >> 16)) & 0x00000000ffffffff; + return word; +} } // namespace GeoHash::GeoHash(unsigned x, unsigned y, unsigned bits) { - verify(bits <= 32); + MONGO_verify(bits <= 32); _bits = bits; auto interleavedX = interleaveWithZeros(x); auto interleavedY = interleaveWithZeros(y); @@ -196,95 +235,10 @@ GeoHash::GeoHash(long long hash, unsigned bits) : _hash(hash), _bits(bits) { clearUnusedBits(); } -/** - * Explanation & Example: - * bitset<64>(_hash) = "00000001 00000010 00000100 00001000 00010000 00100000 01000000 10000000"; - * - * the reinterpret_cast() of _hash results in: - * c[0] = 10000000 (the last 8 bits of _hash) - * c[1] = 01000000 (the second to last 8 bits of _hash) - * ... - * c[6] = 00000010 (the second 8 bits of _hash) - * c[7] = 00000001 (the first 8 bits of _hash) - * - * Calculating the Value of Y: - * in the for loop, - * t is c[i] but with all the even bits turned off: - * t = 00000000 (when i is even) - * t = 01000000 (i = 1) - * t = 00010000 (i = 3) - * t = 00000100 (i = 5) - * t = 00000001 (i = 7) - * - * then for each t, - * get the hashedToNormal(t): - * hashedToNormal(t) = 0 = 00000000 (when i is even) - * hashedToNormal(t) = 8 = 00001000 (i = 1) - * hashedToNormal(t) = 4 = 00000100 (i = 3) - * hashedToNormal(t) = 2 = 00000010 (i = 5) - * hashedToNormal(t) = 1 = 00000001 (i = 7) - * then shift it by (4 * i) (Little Endian) then - * bitwise OR it with y - * - * visually, all together it looks like: - * y = 00000000000000000000000000000000 (32 bits) - * y |= 00000000 (hashedToNormal(t) when i = 0) - * y |= 00001000 (hashedToNormal(t) when i = 1) - * y |= 00000000 (hashedToNormal(t) when i = 2) - * y |= 00000100 (hashedToNormal(t) when i = 3) - * y |= 00000000 (hashedToNormal(t) when i = 4) - * y |= 00000010 (hashedToNormal(t) when i = 5) - * y |= 00000000 (hashedToNormal(t) when i = 6) - * y |= 00000001 (hashedToNormal(t) when i = 7) - * --------------------------------------------- - * y = 00010000001000000100000010000000 - * - * Calculating the Value of X: - * in the for loop, - * t is c[i] right shifted by 1 with all the even bits turned off: - * t = 00000000 (when i is odd) - * t = 01000000 (i = 0) - * t = 00010000 (i = 2) - * t = 00000100 (i = 4) - * t = 00000001 (i = 6) - * - * then for each t, - * get the hashedToNormal(t) and shift it by (4 * i) (Little Endian) then - * bitwise OR it with x - */ -void GeoHash::unhash_fast(unsigned* x, unsigned* y) const { - *x = 0; - *y = 0; - const char* c = reinterpret_cast(&_hash); - for (int i = 0; i < 8; i++) { - // 0x55 in binary is "01010101", - // it's an odd bitmask that we use to turn off all the even bits - unsigned t = (unsigned)(c[i]) & 0x55; - const int leftShift = 4 * (kNativeLittle ? i : (7 - i)); - *y |= geoBitSets.hashedToNormal[t] << leftShift; - - t = ((unsigned)(c[i]) >> 1) & 0x55; - *x |= geoBitSets.hashedToNormal[t] << leftShift; - } -} - -void GeoHash::unhash_slow(unsigned* x, unsigned* y) const { - *x = 0; - *y = 0; - for (unsigned i = 0; i < _bits; i++) { - if (getBitX(i)) - *x |= mask32For(i); - if (getBitY(i)) - *y |= mask32For(i); - } -} - void GeoHash::unhash(unsigned* x, unsigned* y) const { - if constexpr (kNativeLittle) { - unhash_fast(x, y); - } else { - unhash_slow(x, y); - } + // Order goes XYXYXY... Shift Xs to Y position as that's how the algorithm expects the input. + *x = deinterleaveZeros(_hash >> 1); + *y = deinterleaveZeros(_hash); } /** Is the 'bit'-th most significant bit set? (NOT the least significant) */ @@ -298,7 +252,7 @@ GeoHash GeoHash::up() const { } bool GeoHash::hasPrefix(const GeoHash& other) const { - verify(other._bits <= _bits); + MONGO_verify(other._bits <= _bits); if (other._bits == 0) return true; @@ -323,7 +277,7 @@ string GeoHash::toStringHex1() const { } void GeoHash::setBit(unsigned pos, bool value) { - verify(pos < _bits * 2); + MONGO_verify(pos < _bits * 2); const long long mask = mask64For(pos); if (value) _hash |= mask; @@ -336,12 +290,12 @@ bool GeoHash::getBit(unsigned pos) const { } bool GeoHash::getBitX(unsigned pos) const { - verify(pos < 32); + MONGO_verify(pos < 32); return getBit(pos * 2); } bool GeoHash::getBitY(unsigned pos) const { - verify(pos < 32); + MONGO_verify(pos < 32); return getBit((pos * 2) + 1); } @@ -351,7 +305,7 @@ BSONObj GeoHash::wrap(const char* name) const { appendHashMin(&b, name); BSONObj o = b.obj(); if ('\0' == name[0]) - verify(o.objsize() == 20); + MONGO_verify(o.objsize() == 20); return o; } @@ -396,7 +350,7 @@ bool GeoHash::atMaxY() const { // TODO(hk): comment better void GeoHash::move(int x, int y) { - verify(_bits); + MONGO_verify(_bits); _move(0, x); _move(1, y); } @@ -405,7 +359,7 @@ void GeoHash::move(int x, int y) { void GeoHash::_move(unsigned offset, int d) { if (d == 0) return; - verify(d <= 1 && d >= -1); // TEMP + MONGO_verify(d <= 1 && d >= -1); // TEMP bool from, to; if (d > 0) { @@ -437,7 +391,7 @@ void GeoHash::_move(unsigned offset, int d) { pos -= 2; } - verify(0); + MONGO_verify(0); } GeoHash& GeoHash::operator=(const GeoHash& h) { @@ -467,7 +421,7 @@ bool GeoHash::operator<(const GeoHash& h) const { GeoHash& GeoHash::operator+=(const char* s) { unsigned pos = _bits * 2; _bits += strlen(s) / 2; - verify(_bits <= 32); + MONGO_verify(_bits <= 32); while ('\0' != s[0]) { if (s[0] == '1') setBit(pos, 1); @@ -539,12 +493,12 @@ void GeoHash::appendHashMin(BSONObjBuilder* builder, const char* fieldName) cons appendHashToBuilder(_hash, builder, fieldName); } -void GeoHash::appendHashMin(KeyString::Builder* ks) const { +void GeoHash::appendHashMin(key_string::Builder* ks) const { // The min bound of a GeoHash region has all the unused suffix bits set to 0 appendHashToKeyString(_hash, ks); } -void GeoHash::appendHashMin(KeyString::PooledBuilder* ks) const { +void GeoHash::appendHashMin(key_string::PooledBuilder* ks) const { // The min bound of a GeoHash region has all the unused suffix bits set to 0 appendHashToKeyString(_hash, ks); } @@ -600,7 +554,7 @@ GeoHash GeoHash::parent(unsigned int level) const { } GeoHash GeoHash::parent() const { - verify(_bits > 0); + MONGO_verify(_bits > 0); return GeoHash(_hash, _bits - 1); } @@ -929,7 +883,7 @@ double GeoHashConverter::convertFromHashScale(unsigned in) const { // Convert from a double that is [min, max] to a double in [0, (max-min)*scaling] double GeoHashConverter::convertToDoubleHashScale(double in) const { - verify(in <= _params.max && in >= _params.min); + MONGO_verify(in <= _params.max && in >= _params.min); if (in == _params.max) { // prevent aliasing with _min by moving inside the "box" @@ -938,7 +892,7 @@ double GeoHashConverter::convertToDoubleHashScale(double in) const { } in -= _params.min; - verify(in >= 0); + MONGO_verify(in >= 0); return in * _params.scaling; } diff --git a/src/mongo/db/geo/hash.h b/src/mongo/db/geo/hash.h index 6342c384a8555..9d131f3f6ba54 100644 --- a/src/mongo/db/geo/hash.h +++ b/src/mongo/db/geo/hash.h @@ -29,6 +29,15 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/jsobj.h" #include "mongo/db/storage/key_string.h" #include "mongo/platform/basic.h" @@ -113,8 +122,8 @@ class GeoHash { // Append the minimum range of the hash to the builder provided (inclusive) void appendHashMin(BSONObjBuilder* builder, const char* fieldName) const; // Append the minimum range of the hash to the KeyString provided (inclusive) - void appendHashMin(KeyString::Builder* ks) const; - void appendHashMin(KeyString::PooledBuilder* ks) const; + void appendHashMin(key_string::Builder* ks) const; + void appendHashMin(key_string::PooledBuilder* ks) const; // Append the maximum range of the hash to the builder provided (inclusive) void appendHashMax(BSONObjBuilder* builder, const char* fieldName) const; @@ -141,10 +150,6 @@ class GeoHash { // closest (in particular, level == kMaxBits is not allowed). void appendVertexNeighbors(unsigned level, std::vector* output) const; - // public but only for the purpose of testing - void unhash_fast(unsigned* x, unsigned* y) const; - void unhash_slow(unsigned* x, unsigned* y) const; - private: // Create a hash from the provided string. Used by the std::string and char* cons. void initFromString(const char* s); diff --git a/src/mongo/db/geo/hash_bm.cpp b/src/mongo/db/geo/hash_bm.cpp new file mode 100644 index 0000000000000..310b184c0ba1d --- /dev/null +++ b/src/mongo/db/geo/hash_bm.cpp @@ -0,0 +1,80 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include +#include + +#include "mongo/db/geo/hash.h" + +namespace mongo { +namespace { + +void BM_hashSpeed(benchmark::State& state) { + auto length = state.range(); + std::vector hashes(length); + + std::random_device device{}; + uint32_t valueX = device(); + uint32_t valueY = device(); + for (auto _ : state) { + for (auto& geohash : hashes) { + auto newHash = GeoHash(valueX, valueY, 32); + benchmark::DoNotOptimize(newHash); + geohash = std::move(newHash); + benchmark::ClobberMemory(); + } + } +} + +void BM_unhashSpeed(benchmark::State& state) { + auto length = state.range(); + std::vector hashes(length); + + std::random_device device{}; + for (auto& geohash : hashes) { + geohash = GeoHash(device(), device(), 32); + } + + for (auto _ : state) { + for (auto& geohash : hashes) { + uint32_t x, y; + geohash.unhash(&x, &y); + benchmark::DoNotOptimize(x); + benchmark::DoNotOptimize(y); + } + } +} + +BENCHMARK(BM_hashSpeed)->Range(1, 1 << 10); +BENCHMARK(BM_unhashSpeed)->Range(1, 1 << 10); +} // namespace +} // namespace mongo diff --git a/src/mongo/db/geo/hash_test.cpp b/src/mongo/db/geo/hash_test.cpp index e9c9ed83cab2b..93096a66de301 100644 --- a/src/mongo/db/geo/hash_test.cpp +++ b/src/mongo/db/geo/hash_test.cpp @@ -32,16 +32,20 @@ */ #include // For max() -#include #include -#include -#include +#include +#include #include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/geo/hash.h" #include "mongo/db/geo/shapes.h" #include "mongo/platform/random.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" @@ -81,30 +85,6 @@ string splitBinStr(string bin) { return split.substr(0, split.size() - 1); } -bool unhash_fast_and_slow_match(string hash) { - GeoHash geoHash = GeoHash(hash); - unsigned fastX, fastY, slowX, slowY, x, y; - - geoHash.unhash_fast(&x, &y); - fastX = x; - fastY = y; - - geoHash.unhash_slow(&x, &y); - slowX = x; - slowY = y; - - bool match = (fastX == slowX && fastY == slowY); - if (!match) { - std::bitset<32> fastXBits(fastX), fastYBits(fastY), slowXBits(slowX), slowYBits(slowY); - cout << "unhash_fast's x: " << splitBinStr(fastXBits.to_string()) << endl; - cout << "unhash_slow's x: " << splitBinStr(slowXBits.to_string()) << endl; - cout << "unhash_fast's y: " << splitBinStr(fastYBits.to_string()) << endl; - cout << "unhash_slow's y: " << splitBinStr(slowYBits.to_string()) << endl; - } - - return match; -} - TEST(GeoHash, MakeRandomValidHashes) { int maxStringLength = 64; for (int i = 0; i < maxStringLength; i += 2) { @@ -130,24 +110,6 @@ TEST(GeoHash, MakeOddHash) { ASSERT_THROWS(makeHash(a), mongo::AssertionException); } -TEST(GeoHash, UnhashFastMatchesUnhashSlow) { - string hashes[12] = {"0000000000000000000000000000000000000000000000000000000000000000", - "0101010110100011011100110101000000000101001101000011001011111001", - "1010000000110010100110000111001111010011010100001000011110101100", - "0101010110100011011101011010001111000110111011111011001010110100", - "1010000000110010100111101000000000010000100010110000011111100001", - "0101010100100100001011111110011110010001111100011011011110110111", - "1010000010110101110001001100010001000111100101010000001011100010", - "0101010100100100001010010001010001010010001010100011011111111010", - "1010000010110101110000100011011110000100010011101000001010101111", - "0101010110100011011100110101000000000000100111110001101101001011", - "1010000000110010100110000111001111010110111110111010111000011110", - "1111111111111111111111111111111111111111111111111111111111111111"}; - for (int i = 0; i < 12; i++) { - ASSERT_TRUE(unhash_fast_and_slow_match(hashes[i])); - } -} - TEST(GeoHash, HashAndUnhash) { PseudoRandom random(12345); for (int i = 0; i < 1'000; i++) { diff --git a/src/mongo/db/geo/r2_region_coverer.cpp b/src/mongo/db/geo/r2_region_coverer.cpp index 9a0364495e016..961142a27f9dc 100644 --- a/src/mongo/db/geo/r2_region_coverer.cpp +++ b/src/mongo/db/geo/r2_region_coverer.cpp @@ -28,13 +28,28 @@ */ +#include +// IWYU pragma: no_include "boost/intrusive/detail/std_fwd.hpp" #include - -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" #include "mongo/db/geo/r2_region_coverer.h" #include "mongo/db/geo/shapes.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -97,7 +112,7 @@ void R2RegionCoverer::getCovering(const R2Region& region, vector* cover // children they have (fewest children first), and then by the number of // fully contained children (fewest children first). - verify(_minLevel <= _maxLevel); + MONGO_verify(_minLevel <= _maxLevel); dassert(_candidateQueue->empty()); dassert(_results->empty()); _region = ®ion; @@ -171,7 +186,7 @@ void R2RegionCoverer::addCandidate(Candidate* candidate) { return; } - verify(candidate->numChildren == 0); + MONGO_verify(candidate->numChildren == 0); // Expand children int numTerminals = expandChildren(candidate); diff --git a/src/mongo/db/geo/r2_region_coverer.h b/src/mongo/db/geo/r2_region_coverer.h index 208322ff68b7b..c7fb2789933c5 100644 --- a/src/mongo/db/geo/r2_region_coverer.h +++ b/src/mongo/db/geo/r2_region_coverer.h @@ -29,9 +29,14 @@ #pragma once +#include #include #include +#include +#include #include +#include +#include #include #include "mongo/db/geo/hash.h" diff --git a/src/mongo/db/geo/r2_region_coverer_test.cpp b/src/mongo/db/geo/r2_region_coverer_test.cpp index eff96a0c5f798..3988216b7048b 100644 --- a/src/mongo/db/geo/r2_region_coverer_test.cpp +++ b/src/mongo/db/geo/r2_region_coverer_test.cpp @@ -28,18 +28,35 @@ */ +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include #include +#include +#include +#include #include #include -#include "mongo/db/geo/r2_region_coverer.h" - -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/geo/geometry_container.h" +#include "mongo/db/geo/r2_region_coverer.h" +#include "mongo/db/geo/shapes.h" #include "mongo/logv2/log.h" -#include "mongo/platform/random.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -202,7 +219,7 @@ TEST(R2RegionCoverer, RandomCells) { } double randDouble(double lowerBound, double upperBound) { - verify(lowerBound <= upperBound); + MONGO_verify(lowerBound <= upperBound); const int NUMBITS = 53; // Random double in [0, 1) long long randLong = diff --git a/src/mongo/db/geo/s2.h b/src/mongo/db/geo/s2.h deleted file mode 100644 index f86087538e3d9..0000000000000 --- a/src/mongo/db/geo/s2.h +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -/* - * This file's purpose is to confine the suppression of the Clang warning for - * mismatched-tags (struct vs class) in only the s2.h file - */ - -#ifdef __clang__ -#pragma GCC diagnostic ignored "-Wmismatched-tags" -#endif - -#include - -#ifdef __clang__ -#pragma GCC diagnostic pop -#endif diff --git a/src/mongo/db/geo/shapes.cpp b/src/mongo/db/geo/shapes.cpp index 147147833df35..d7f5122051156 100644 --- a/src/mongo/db/geo/shapes.cpp +++ b/src/mongo/db/geo/shapes.cpp @@ -28,7 +28,27 @@ */ #include "mongo/db/geo/shapes.h" -#include "mongo/db/jsobj.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" using std::abs; @@ -105,8 +125,8 @@ bool Box::onBoundary(double bound, double val, double fudge) const { } bool Box::mid(double amin, double amax, double bmin, double bmax, bool min, double* res) const { - verify(amin <= amax); - verify(bmin <= bmax); + MONGO_verify(amin <= amax); + MONGO_verify(bmin <= bmax); if (amin < bmin) { if (amax < bmin) @@ -629,7 +649,7 @@ double spheredist_rad(const Point& p1, const Point& p2) { if (cross_prod >= 1 || cross_prod <= -1) { // fun with floats - verify(fabs(cross_prod) - 1 < 1e-6); + MONGO_verify(fabs(cross_prod) - 1 < 1e-6); return cross_prod > 0 ? 0 : M_PI; } diff --git a/src/mongo/db/geo/shapes.h b/src/mongo/db/geo/shapes.h index 96c645900ca4a..de3765c843f9a 100644 --- a/src/mongo/db/geo/shapes.h +++ b/src/mongo/db/geo/shapes.h @@ -29,18 +29,24 @@ #pragma once +#include #include +#include +#include #include #include +#include #include #include #include #include +#include #include #include "mongo/base/clonable_ptr.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/geo/big_polygon.h" -#include "mongo/db/geo/s2.h" #include "mongo/db/jsobj.h" #ifndef M_PI diff --git a/src/mongo/db/global_index.cpp b/src/mongo/db/global_index.cpp index b8c6d605476ba..15bedb478ada3 100644 --- a/src/mongo/db/global_index.cpp +++ b/src/mongo/db/global_index.cpp @@ -29,22 +29,54 @@ #include "mongo/db/global_index.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" -#include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/dbhelpers.h" +#include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/exec/delete_stage.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_role.h" #include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/retryable_writes_stats.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -60,8 +92,8 @@ BSONObj buildIndexEntry(const BSONObj& key, const BSONObj& docKey) { // - We assume unique: true, and there's no support for other index options. // - No support for multikey indexes. - KeyString::Builder ks(KeyString::Version::V1); - ks.resetToKey(BSONObj::stripFieldNames(key), KeyString::ALL_ASCENDING); + key_string::Builder ks(key_string::Version::V1); + ks.resetToKey(BSONObj::stripFieldNames(key), key_string::ALL_ASCENDING); const auto& indexTB = ks.getTypeBits(); // Build the index entry, consisting of: @@ -84,7 +116,7 @@ BSONObj buildIndexEntry(const BSONObj& key, const BSONObj& docKey) { RecordIdBound docKeyToRecordIdBound(const BSONObj& docKey) { // Build RecordIdBound corresponding to docKey. - KeyString::Builder keyBuilder(KeyString::Version::kLatestVersion); + key_string::Builder keyBuilder(key_string::Version::kLatestVersion); keyBuilder.appendObject(docKey); return RecordIdBound(RecordId(keyBuilder.getBuffer(), keyBuilder.getSize())); } @@ -119,7 +151,7 @@ void createContainer(OperationContext* opCtx, const UUID& indexUUID) { LOGV2(6789200, "Create global index container", "indexUUID"_attr = indexUUID); // Create the container. - return writeConflictRetry(opCtx, "createGlobalIndexContainer", nss.ns(), [&]() { + return writeConflictRetry(opCtx, "createGlobalIndexContainer", nss, [&]() { const auto indexKeySpec = BSON("v" << 2 << "name" << kContainerIndexKeyFieldName.toString() + "_1" << "key" << BSON(kContainerIndexKeyFieldName << 1) << "unique" << true); @@ -164,7 +196,7 @@ void createContainer(OperationContext* opCtx, const UUID& indexUUID) { indexKeySpec, IndexCatalog::InclusionPolicy::kReady)); tassert(6789206, - str::stream() << "Collection with namespace " << nss.ns() + str::stream() << "Collection with namespace " << nss.toStringForErrorMsg() << " already exists but it has inconsistent UUID " << autoColl->uuid().toString() << ".", autoColl->uuid() == indexUUID); @@ -187,7 +219,7 @@ void dropContainer(OperationContext* opCtx, const UUID& indexUUID) { LOGV2(6789300, "Drop global index container", "indexUUID"_attr = indexUUID); // Drop the container. - return writeConflictRetry(opCtx, "dropGlobalIndexContainer", nss.ns(), [&]() { + return writeConflictRetry(opCtx, "dropGlobalIndexContainer", nss, [&]() { AutoGetCollection autoColl(opCtx, nss, MODE_X); if (!autoColl) { // Idempotent command, return OK if the collection is non-existing. @@ -211,14 +243,13 @@ void dropContainer(OperationContext* opCtx, const UUID& indexUUID) { } void insertKey(OperationContext* opCtx, - const CollectionPtr& container, + const CollectionAcquisition& container, const BSONObj& key, const BSONObj& docKey) { const auto indexEntry = buildIndexEntry(key, docKey); invariant(!opCtx->writesAreReplicated()); - uassertStatusOK(collection_internal::insertDocument( - opCtx, container, InsertStatement(indexEntry), nullptr)); + uassertStatusOK(Helpers::insert(opCtx, container, indexEntry)); } void insertKey(OperationContext* opCtx, @@ -229,19 +260,21 @@ void insertKey(OperationContext* opCtx, const auto indexEntry = buildIndexEntry(key, docKey); // Insert the index entry. - writeConflictRetry(opCtx, "insertGlobalIndexKey", ns.toString(), [&] { + writeConflictRetry(opCtx, "insertGlobalIndexKey", ns, [&] { WriteUnitOfWork wuow(opCtx); - AutoGetCollection autoColl(opCtx, ns, MODE_IX); - auto& container = autoColl.getCollection(); + const auto coll = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, ns, AcquisitionPrerequisites::kWrite), + MODE_IX); uassert(6789402, str::stream() << "Global index container with UUID " << indexUUID << " does not exist.", - container); + coll.exists()); { repl::UnreplicatedWritesBlock unreplicatedWrites(opCtx); - insertKey(opCtx, container, key, docKey); + insertKey(opCtx, coll, key, docKey); } opCtx->getServiceContext()->getOpObserver()->onInsertGlobalIndexKey( @@ -252,7 +285,7 @@ void insertKey(OperationContext* opCtx, } void deleteKey(OperationContext* opCtx, - const CollectionPtr& container, + const CollectionAcquisition& container, const BSONObj& key, const BSONObj& docKey) { const auto indexEntry = buildIndexEntry(key, docKey); @@ -268,7 +301,7 @@ void deleteKey(OperationContext* opCtx, // is why we delete using a collection scan. auto planExecutor = InternalPlanner::deleteWithCollectionScan( opCtx, - &container, + container, std::move(deleteStageParams), PlanYieldPolicy::YieldPolicy::NO_YIELD, InternalPlanner::FORWARD, @@ -283,7 +316,7 @@ void deleteKey(OperationContext* opCtx, // Return error if no document has been found or if the associated "key" does not match the key // provided as parameter. uassert(ErrorCodes::KeyNotFound, - str::stream() << "Global index container with UUID " << container->uuid() + str::stream() << "Global index container with UUID " << container.uuid() << " does not contain specified entry. key:" << key << ", docKey:" << docKey, execState == PlanExecutor::ExecState::ADVANCED && @@ -299,19 +332,21 @@ void deleteKey(OperationContext* opCtx, const auto ns = NamespaceString::makeGlobalIndexNSS(indexUUID); // Find and delete the index entry. - writeConflictRetry(opCtx, "deleteGlobalIndexKey", ns.toString(), [&] { + writeConflictRetry(opCtx, "deleteGlobalIndexKey", ns, [&] { WriteUnitOfWork wuow(opCtx); - AutoGetCollection autoColl(opCtx, ns, MODE_IX); - auto& container = autoColl.getCollection(); + const auto coll = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, ns, AcquisitionPrerequisites::kWrite), + MODE_IX); uassert(6924201, str::stream() << "Global index container with UUID " << indexUUID << " does not exist.", - container); + coll.exists()); { repl::UnreplicatedWritesBlock unreplicatedWrites(opCtx); - deleteKey(opCtx, container, key, docKey); + deleteKey(opCtx, coll, key, docKey); } opCtx->getServiceContext()->getOpObserver()->onDeleteGlobalIndexKey( diff --git a/src/mongo/db/global_index.h b/src/mongo/db/global_index.h index c4a2718c8edb7..4175e2f01a4a4 100644 --- a/src/mongo/db/global_index.h +++ b/src/mongo/db/global_index.h @@ -30,9 +30,15 @@ #pragma once +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/operation_context.h" #include "mongo/util/uuid.h" +namespace mongo { +class CollectionAcquisition; +} + namespace mongo::global_index { // The container (collection) fields of an index key. The document key is stored as a BSON object. @@ -75,7 +81,7 @@ void insertKey(OperationContext* opCtx, * the above, this variant requires the call to be wrapped inside a writeConflictRetry. */ void insertKey(OperationContext* opCtx, - const CollectionPtr& container, + const CollectionAcquisition& container, const BSONObj& key, const BSONObj& docKey); @@ -94,7 +100,7 @@ void deleteKey(OperationContext* opCtx, * the above, this variant requires the call to be wrapped inside a writeConflictRetry. */ void deleteKey(OperationContext* opCtx, - const CollectionPtr& container, + const CollectionAcquisition& container, const BSONObj& key, const BSONObj& docKey); diff --git a/src/mongo/db/global_index_test.cpp b/src/mongo/db/global_index_test.cpp index 1730d272b4393..452d341cbf0c7 100644 --- a/src/mongo/db/global_index_test.cpp +++ b/src/mongo/db/global_index_test.cpp @@ -29,17 +29,42 @@ #include "mongo/db/global_index.h" +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" #include "mongo/db/db_raii.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/storage/key_string.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -106,7 +131,7 @@ void verifyStoredKeyMatchesIndexKey(const BSONObj& key, indexEntry.hasElement(global_index::kContainerIndexKeyTypeBitsFieldName); ASSERT_EQ(expectTypeBits, hasTypeBits); - auto tb = KeyString::TypeBits(KeyString::Version::V1); + auto tb = key_string::TypeBits(key_string::Version::V1); if (hasTypeBits) { auto entryTypeBitsSize = indexEntry[global_index::kContainerIndexKeyTypeBitsFieldName].size(); @@ -114,12 +139,12 @@ void verifyStoredKeyMatchesIndexKey(const BSONObj& key, indexEntry[global_index::kContainerIndexKeyTypeBitsFieldName].binData( entryTypeBitsSize); auto entryTypeBitsReader = BufReader(entryTypeBitsBinData, entryTypeBitsSize); - tb = KeyString::TypeBits::fromBuffer(KeyString::Version::V1, &entryTypeBitsReader); + tb = key_string::TypeBits::fromBuffer(key_string::Version::V1, &entryTypeBitsReader); ASSERT(!tb.isAllZeros()); } const auto rehydratedKey = - KeyString::toBson(entryIndexKeyBinData, entryIndexKeySize, KeyString::ALL_ASCENDING, tb); + key_string::toBson(entryIndexKeyBinData, entryIndexKeySize, key_string::ALL_ASCENDING, tb); ASSERT_BSONOBJ_EQ(rehydratedKey, key); LOGV2(6789401, diff --git a/src/mongo/db/global_settings.cpp b/src/mongo/db/global_settings.cpp index 0d31094d87d1d..c71fa89f525c8 100644 --- a/src/mongo/db/global_settings.cpp +++ b/src/mongo/db/global_settings.cpp @@ -29,9 +29,25 @@ #include "mongo/db/global_settings.h" +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/client.h" #include "mongo/db/mongod_options_general_gen.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/global_settings.h b/src/mongo/db/global_settings.h index 50b4d82a74175..9e8f1e4134d10 100644 --- a/src/mongo/db/global_settings.h +++ b/src/mongo/db/global_settings.h @@ -29,7 +29,12 @@ #pragma once +#include +#include +#include + #include "mongo/db/repl/repl_settings.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/hasher.cpp b/src/mongo/db/hasher.cpp index e8da6919bf48c..5a5851a349433 100644 --- a/src/mongo/db/hasher.cpp +++ b/src/mongo/db/hasher.cpp @@ -33,9 +33,15 @@ #include "mongo/db/hasher.h" - -#include "mongo/db/jsobj.h" -#include "mongo/util/md5.hpp" +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/platform/endian.h" +#include "mongo/util/md5.h" namespace mongo { diff --git a/src/mongo/db/hasher.h b/src/mongo/db/hasher.h index a4e86a1b5aa1e..46d59fbfb4535 100644 --- a/src/mongo/db/hasher.h +++ b/src/mongo/db/hasher.h @@ -34,6 +34,8 @@ */ +#include + #include "mongo/bson/bsonelement.h" namespace mongo { diff --git a/src/mongo/db/hasher_test.cpp b/src/mongo/db/hasher_test.cpp index 869453f21dd39..a5e3a0e27fed0 100644 --- a/src/mongo/db/hasher_test.cpp +++ b/src/mongo/db/hasher_test.cpp @@ -29,14 +29,22 @@ /** Unit tests for BSONElementHasher. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/hasher.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" - -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/index/2d_access_method.cpp b/src/mongo/db/index/2d_access_method.cpp index d5af3a86a97cf..486cdb6e717f9 100644 --- a/src/mongo/db/index/2d_access_method.cpp +++ b/src/mongo/db/index/2d_access_method.cpp @@ -29,15 +29,14 @@ #include "mongo/db/index/2d_access_method.h" -#include -#include +#include + +#include #include "mongo/db/catalog/index_catalog_entry.h" -#include "mongo/db/index/2d_common.h" #include "mongo/db/index/expression_keys_private.h" #include "mongo/db/index/expression_params.h" -#include "mongo/db/index_names.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/index/index_descriptor.h" namespace mongo { @@ -58,6 +57,7 @@ void TwoDAccessMethod::validateDocument(const CollectionPtr& collection, /** Finds the key objects to put in an index */ void TwoDAccessMethod::doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, diff --git a/src/mongo/db/index/2d_access_method.h b/src/mongo/db/index/2d_access_method.h index a7ae364b99410..4125372b67a72 100644 --- a/src/mongo/db/index/2d_access_method.h +++ b/src/mongo/db/index/2d_access_method.h @@ -29,10 +29,21 @@ #pragma once +#include +#include + #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/index/2d_common.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { @@ -45,9 +56,6 @@ class TwoDAccessMethod : public SortedDataIndexAccessMethod { TwoDAccessMethod(IndexCatalogEntry* btreeState, std::unique_ptr btree); private: - const IndexDescriptor* getDescriptor() { - return _descriptor; - } TwoDIndexingParams& getParams() { return _params; } @@ -64,6 +72,7 @@ class TwoDAccessMethod : public SortedDataIndexAccessMethod { */ void doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, diff --git a/src/mongo/db/index/2d_key_generator_test.cpp b/src/mongo/db/index/2d_key_generator_test.cpp index 1f12859b4156b..778b1219f4ca0 100644 --- a/src/mongo/db/index/2d_key_generator_test.cpp +++ b/src/mongo/db/index/2d_key_generator_test.cpp @@ -28,19 +28,32 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/index/expression_keys_private.h" - #include - +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/json.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/geo/hash.h" #include "mongo/db/index/2d_common.h" +#include "mongo/db/index/expression_keys_private.h" #include "mongo/db/index/expression_params.h" -#include "mongo/db/json.h" +#include "mongo/db/storage/key_string.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/shared_buffer_fragment.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -53,7 +66,7 @@ std::string dumpKeyset(const KeyStringSet& keyStrings) { std::stringstream ss; ss << "[ "; for (auto& keyString : keyStrings) { - auto key = KeyString::toBson(keyString, Ordering::make(BSONObj())); + auto key = key_string::toBson(keyString, Ordering::make(BSONObj())); ss << key.toString() << " "; } ss << "]"; @@ -81,21 +94,21 @@ bool assertKeysetsEqual(const KeyStringSet& expectedKeys, const KeyStringSet& ac return true; } -KeyString::Value make2DKey(const TwoDIndexingParams& params, - int x, - int y, - BSONElement trailingFields) { +key_string::Value make2DKey(const TwoDIndexingParams& params, + int x, + int y, + BSONElement trailingFields) { BSONObjBuilder bob; BSONObj locObj = BSON_ARRAY(x << y); params.geoHashConverter->hash(locObj, nullptr).appendHashMin(&bob, ""); bob.append(trailingFields); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, bob.obj(), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, bob.obj(), Ordering::make(BSONObj())); return keyString.release(); } struct TwoDKeyGeneratorTest : public unittest::Test { - SharedBufferFragmentBuilder allocator{KeyString::HeapBuilder::kHeapAllocatorDefaultBytes}; + SharedBufferFragmentBuilder allocator{key_string::HeapBuilder::kHeapAllocatorDefaultBytes}; }; TEST_F(TwoDKeyGeneratorTest, TrailingField) { @@ -108,7 +121,7 @@ TEST_F(TwoDKeyGeneratorTest, TrailingField) { obj, params, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())); KeyStringSet expectedKeys; @@ -128,7 +141,7 @@ TEST_F(TwoDKeyGeneratorTest, ArrayTrailingField) { obj, params, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())); KeyStringSet expectedKeys; @@ -148,7 +161,7 @@ TEST_F(TwoDKeyGeneratorTest, ArrayOfObjectsTrailingField) { obj, params, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj())); KeyStringSet expectedKeys; diff --git a/src/mongo/db/index/SConscript b/src/mongo/db/index/SConscript index c3cba76f3ce8e..c26571bde0fcd 100644 --- a/src/mongo/db/index/SConscript +++ b/src/mongo/db/index/SConscript @@ -70,16 +70,15 @@ iamEnv.Library( '$BUILD_DIR/mongo/db/multi_key_path_tracker', '$BUILD_DIR/mongo/db/pipeline/document_path_support', '$BUILD_DIR/mongo/db/query/collation/collator_factory_interface', - '$BUILD_DIR/mongo/db/query/collation/collator_interface', '$BUILD_DIR/mongo/db/query/op_metrics', '$BUILD_DIR/mongo/db/query/projection_ast', '$BUILD_DIR/mongo/db/query/sort_pattern', '$BUILD_DIR/mongo/db/query_expressions', '$BUILD_DIR/mongo/db/record_id_helpers', - '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', '$BUILD_DIR/mongo/db/resumable_index_builds_idl', '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/service_context', + '$BUILD_DIR/mongo/db/shard_role_api_stor_ex', '$BUILD_DIR/mongo/db/sorter/sorter_idl', '$BUILD_DIR/mongo/db/sorter/sorter_stats', '$BUILD_DIR/mongo/db/storage/encryption_hooks', diff --git a/src/mongo/db/index/btree_access_method.cpp b/src/mongo/db/index/btree_access_method.cpp index 71c9bf86fecac..f3986f9ed73c3 100644 --- a/src/mongo/db/index/btree_access_method.cpp +++ b/src/mongo/db/index/btree_access_method.cpp @@ -29,14 +29,15 @@ #include "mongo/db/index/btree_access_method.h" +#include #include -#include "mongo/base/status.h" -#include "mongo/base/status_with.h" +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/index/expression_keys_private.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/keypattern.h" +#include "mongo/db/index/index_descriptor.h" namespace mongo { @@ -50,7 +51,7 @@ BtreeAccessMethod::BtreeAccessMethod(IndexCatalogEntry* btreeState, vector fieldNames; vector fixed; - BSONObjIterator it(_descriptor->keyPattern()); + BSONObjIterator it(btreeState->descriptor()->keyPattern()); while (it.more()) { BSONElement elt = it.next(); fieldNames.push_back(elt.fieldName()); @@ -60,7 +61,7 @@ BtreeAccessMethod::BtreeAccessMethod(IndexCatalogEntry* btreeState, _keyGenerator = std::make_unique(fieldNames, fixed, - _descriptor->isSparse(), + btreeState->descriptor()->isSparse(), getSortedDataInterface()->getKeyStringVersion(), getSortedDataInterface()->getOrdering()); } @@ -73,6 +74,7 @@ void BtreeAccessMethod::validateDocument(const CollectionPtr& collection, void BtreeAccessMethod::doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, @@ -80,15 +82,10 @@ void BtreeAccessMethod::doGetKeys(OperationContext* opCtx, KeyStringSet* multikeyMetadataKeys, MultikeyPaths* multikeyPaths, const boost::optional& id) const { - const auto skipMultikey = context == GetKeysContext::kValidatingKeys && - !_descriptor->getEntry()->isMultikey(opCtx, collection); - _keyGenerator->getKeys(pooledBufferBuilder, - obj, - skipMultikey, - keys, - multikeyPaths, - _indexCatalogEntry->getCollator(), - id); + const auto skipMultikey = + context == GetKeysContext::kValidatingKeys && !entry->isMultikey(opCtx, collection); + _keyGenerator->getKeys( + pooledBufferBuilder, obj, skipMultikey, keys, multikeyPaths, entry->getCollator(), id); } } // namespace mongo diff --git a/src/mongo/db/index/btree_access_method.h b/src/mongo/db/index/btree_access_method.h index 5621c99933fb5..e834634bf5ef5 100644 --- a/src/mongo/db/index/btree_access_method.h +++ b/src/mongo/db/index/btree_access_method.h @@ -30,10 +30,21 @@ #pragma once +#include +#include + #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/index/btree_key_generator.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { @@ -54,6 +65,7 @@ class BtreeAccessMethod : public SortedDataIndexAccessMethod { void doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, diff --git a/src/mongo/db/index/btree_key_generator.cpp b/src/mongo/db/index/btree_key_generator.cpp index 56c2696ecda4d..1644c425ebd86 100644 --- a/src/mongo/db/index/btree_key_generator.cpp +++ b/src/mongo/db/index/btree_key_generator.cpp @@ -29,10 +29,28 @@ #include "mongo/db/index/btree_key_generator.h" +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/field_ref.h" -#include "mongo/db/query/collation/collation_index_key.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/query/collation/collator_interface.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" @@ -89,7 +107,7 @@ std::pair extractNonArrayElementAtPath(const BSONObj& obj, St BtreeKeyGenerator::BtreeKeyGenerator(std::vector fieldNames, std::vector fixed, bool isSparse, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, Ordering ordering) : _keyStringVersion(keyStringVersion), _isIdIndex(fieldNames.size() == 1 && std::string("_id") == fieldNames[0]), @@ -217,7 +235,7 @@ void BtreeKeyGenerator::getKeys(SharedBufferFragmentBuilder& pooledBufferBuilder if (e.eoo()) { keys->insert(_nullKeyString); } else { - KeyString::PooledBuilder keyString(pooledBufferBuilder, _keyStringVersion, _ordering); + key_string::PooledBuilder keyString(pooledBufferBuilder, _keyStringVersion, _ordering); if (collator) { keyString.appendBSONElement(e, [&](StringData stringData) { @@ -314,7 +332,7 @@ void BtreeKeyGenerator::_getKeysWithoutArray(SharedBufferFragmentBuilder& pooled const boost::optional& id, KeyStringSet* keys) const { - KeyString::PooledBuilder keyString{pooledBufferBuilder, _keyStringVersion, _ordering}; + key_string::PooledBuilder keyString{pooledBufferBuilder, _keyStringVersion, _ordering}; size_t numNotFound{0}; for (auto&& fieldName : _fieldNames) { @@ -432,7 +450,7 @@ void BtreeKeyGenerator::_getKeysWithArray(std::vector* fieldNames, if (_isSparse && numNotFound == fieldNames->size()) { return; } - KeyString::PooledBuilder keyString(pooledBufferBuilder, _keyStringVersion, _ordering); + key_string::PooledBuilder keyString(pooledBufferBuilder, _keyStringVersion, _ordering); for (const auto& elem : *fixed) { if (collator) { keyString.appendBSONElement(elem, [&](StringData stringData) { @@ -583,12 +601,12 @@ void BtreeKeyGenerator::_getKeysWithArray(std::vector* fieldNames, } } -KeyString::Value BtreeKeyGenerator::_buildNullKeyString() const { +key_string::Value BtreeKeyGenerator::_buildNullKeyString() const { BSONObjBuilder nullKeyBuilder; for (size_t i = 0; i < _fieldNames.size(); ++i) { nullKeyBuilder.appendNull(""); } - KeyString::HeapBuilder nullKeyString(_keyStringVersion, nullKeyBuilder.obj(), _ordering); + key_string::HeapBuilder nullKeyString(_keyStringVersion, nullKeyBuilder.obj(), _ordering); return nullKeyString.release(); } diff --git a/src/mongo/db/index/btree_key_generator.h b/src/mongo/db/index/btree_key_generator.h index 7a1e5639c26d0..a0689a207dc8a 100644 --- a/src/mongo/db/index/btree_key_generator.h +++ b/src/mongo/db/index/btree_key_generator.h @@ -30,15 +30,25 @@ #pragma once #include +#include +#include +#include +#include #include #include #include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator_interface.h" +#include "mongo/bson/ordering.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/multikey_paths.h" #include "mongo/db/jsobj.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/key_string.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { @@ -57,7 +67,7 @@ class BtreeKeyGenerator { BtreeKeyGenerator(std::vector fieldNames, std::vector fixed, bool isSparse, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, Ordering ordering); /** @@ -241,9 +251,9 @@ class BtreeKeyGenerator { const CollatorInterface* collator, const boost::optional& id) const; - KeyString::Value _buildNullKeyString() const; + key_string::Value _buildNullKeyString() const; - const KeyString::Version _keyStringVersion; + const key_string::Version _keyStringVersion; const bool _isIdIndex; const bool _isSparse; @@ -255,7 +265,7 @@ class BtreeKeyGenerator { // These are used by getKeys below. const std::vector _fieldNames; - const KeyString::Value _nullKeyString; // A full key with all fields null. + const key_string::Value _nullKeyString; // A full key with all fields null. std::vector _fixed; diff --git a/src/mongo/db/index/btree_key_generator_test.cpp b/src/mongo/db/index/btree_key_generator_test.cpp index 312fad5ee3cc2..9406c9e498905 100644 --- a/src/mongo/db/index/btree_key_generator_test.cpp +++ b/src/mongo/db/index/btree_key_generator_test.cpp @@ -28,18 +28,27 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/index/btree_key_generator.h" - #include +#include +#include +#include +#include #include +#include +#include -#include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/db/json.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/index/btree_key_generator.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -60,7 +69,7 @@ std::string dumpKeyset(const KeyStringSet& keyStrings) { std::stringstream ss; ss << "[ "; for (auto& keyString : keyStrings) { - auto key = KeyString::toBson(keyString, Ordering::make(BSONObj())); + auto key = key_string::toBson(keyString, Ordering::make(BSONObj())); ss << key.toString() << " "; } ss << "]"; @@ -119,7 +128,7 @@ bool testKeygen(const BSONObj& kp, } auto keyGen = std::make_unique( - fieldNames, fixed, sparse, KeyString::Version::kLatestVersion, Ordering::make(BSONObj())); + fieldNames, fixed, sparse, key_string::Version::kLatestVersion, Ordering::make(BSONObj())); auto runTest = [&](bool skipMultikey) { // @@ -182,8 +191,8 @@ bool testKeygen(const BSONObj& kp, TEST(BtreeKeyGeneratorTest, GetIdKeyFromObject) { BSONObj keyPattern = fromjson("{_id: 1}"); BSONObj genKeysFrom = fromjson("{_id: 'foo', b: 4}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 'foo'}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 'foo'}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -192,8 +201,8 @@ TEST(BtreeKeyGeneratorTest, GetIdKeyFromObject) { TEST(BtreeKeyGeneratorTest, GetKeysFromObjectSimple) { BSONObj keyPattern = fromjson("{a: 1}"); BSONObj genKeysFrom = fromjson("{b: 4, a: 5}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 5}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 5}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -202,8 +211,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromObjectSimple) { TEST(BtreeKeyGeneratorTest, GetKeysFromObjectDotted) { BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a: {b: 4}, c: 'foo'}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 4}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 4}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -212,12 +221,12 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromObjectDotted) { TEST(BtreeKeyGeneratorTest, GetKeysFromArraySimple) { BSONObj keyPattern = fromjson("{a: 1}"); BSONObj genKeysFrom = fromjson("{a: [1, 2, 3]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -226,8 +235,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromArraySimple) { TEST(BtreeKeyGeneratorTest, GetKeysFromArrayWithIdenticalValues) { BSONObj keyPattern = fromjson("{a: 1}"); BSONObj genKeysFrom = fromjson("{a: [0, 0, 0]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 0}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 0}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -236,8 +245,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromArrayWithIdenticalValues) { TEST(BtreeKeyGeneratorTest, GetKeysFromArrayWithEquivalentValues) { BSONObj keyPattern = fromjson("{a: 1}"); BSONObj genKeysFrom = fromjson("{a: [0, NumberInt(0), NumberLong(0)]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 0}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 0}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -246,12 +255,12 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromArrayWithEquivalentValues) { TEST(BtreeKeyGeneratorTest, GetKeysFromArrayFirstElement) { BSONObj keyPattern = fromjson("{a: 1, b: 1}"); BSONObj genKeysFrom = fromjson("{a: [1, 2, 3], b: 2}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1, '': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2, '': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 3, '': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1, '': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2, '': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 3, '': 2}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{{0U}, MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -260,12 +269,12 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromArrayFirstElement) { TEST(BtreeKeyGeneratorTest, GetKeysFromArraySecondElement) { BSONObj keyPattern = fromjson("{first: 1, a: 1}"); BSONObj genKeysFrom = fromjson("{first: 5, a: [1, 2, 3]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 5, '': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 5, '': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 5, '': 3}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 5, '': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 5, '': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 5, '': 3}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}, {0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -274,12 +283,12 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromArraySecondElement) { TEST(BtreeKeyGeneratorTest, GetKeysFromSecondLevelArray) { BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a: {b: [1, 2, 3]}}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{{1U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -303,12 +312,12 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromParallelArraysBasic) { TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubobjectBasic) { BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [{b:1,c:4}, {b:2,c:4}, {b:3,c:4}]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -317,10 +326,10 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubobjectBasic) { TEST(BtreeKeyGeneratorTest, GetKeysFromSubobjectWithArrayOfSubobjects) { BSONObj keyPattern = fromjson("{'a.b.c': 1}"); BSONObj genKeysFrom = fromjson("{a: {b: [{c: 1}, {c: 2}]}}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release()}; MultikeyPaths expectedMultikeyPaths{{1U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -329,12 +338,15 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromSubobjectWithArrayOfSubobjects) { TEST(BtreeKeyGeneratorTest, GetKeysArraySubobjectCompoundIndex) { BSONObj keyPattern = fromjson("{'a.b': 1, d: 99}"); BSONObj genKeysFrom = fromjson("{a: [{b:1,c:4}, {b:2,c:4}, {b:3,c:4}], d: 99}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1, '': 99}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2, '': 99}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 3, '': 99}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + fromjson("{'': 1, '': 99}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2(key_string::Version::kLatestVersion, + fromjson("{'': 2, '': 99}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + fromjson("{'': 3, '': 99}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{{0U}, MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -343,14 +355,14 @@ TEST(BtreeKeyGeneratorTest, GetKeysArraySubobjectCompoundIndex) { TEST(BtreeKeyGeneratorTest, GetKeysArraySubobjectSingleMissing) { BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [{foo: 41}, {b:1,c:4}, {b:2,c:4}, {b:3,c:4}]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString4( - KeyString::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString4( + key_string::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{ keyString1.release(), keyString2.release(), keyString3.release(), keyString4.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; @@ -360,8 +372,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysArraySubobjectSingleMissing) { TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubobjectMissing) { BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [{foo: 41}, {foo: 41}, {foo: 41}]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -370,8 +382,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubobjectMissing) { TEST(BtreeKeyGeneratorTest, GetKeysMissingField) { BSONObj keyPattern = fromjson("{a: 1}"); BSONObj genKeysFrom = fromjson("{b: 1}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -380,8 +392,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysMissingField) { TEST(BtreeKeyGeneratorTest, GetKeysSubobjectMissing) { BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [1, 2]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -390,9 +402,9 @@ TEST(BtreeKeyGeneratorTest, GetKeysSubobjectMissing) { TEST(BtreeKeyGeneratorTest, GetKeysFromCompound) { BSONObj keyPattern = fromjson("{x: 1, y: 1}"); BSONObj genKeysFrom = fromjson("{x: 'a', y: 'b'}"); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - fromjson("{'': 'a', '': 'b'}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + fromjson("{'': 'a', '': 'b'}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}, MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -401,9 +413,9 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromCompound) { TEST(BtreeKeyGeneratorTest, GetKeysFromCompoundMissing) { BSONObj keyPattern = fromjson("{x: 1, y: 1}"); BSONObj genKeysFrom = fromjson("{x: 'a'}"); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - fromjson("{'': 'a', '': null}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + fromjson("{'': 'a', '': null}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}, MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -412,8 +424,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromCompoundMissing) { TEST(BtreeKeyGeneratorTest, GetKeysFromArraySubelementComplex) { BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a:[{b:[2]}]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; // Both the 'a' and 'a.b' arrays contain a single element, so they are considered multikey. MultikeyPaths expectedMultikeyPaths{{0U, 1U}}; @@ -438,12 +450,12 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromParallelArraysComplex) { TEST(BtreeKeyGeneratorTest, GetKeysAlternateMissing) { BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}"); BSONObj genKeysFrom = fromjson("{a:[{b:1},{c:2}]}"); - KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion, - fromjson("{'': null, '': 2}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2(KeyString::Version::kLatestVersion, - fromjson("{'': 1, '': null}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + fromjson("{'': null, '': 2}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2(key_string::Version::kLatestVersion, + fromjson("{'': 1, '': null}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release()}; MultikeyPaths expectedMultikeyPaths{{0U}, {0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -452,12 +464,12 @@ TEST(BtreeKeyGeneratorTest, GetKeysAlternateMissing) { TEST(BtreeKeyGeneratorTest, GetKeysFromMultiComplex) { BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a:[{b:1},{b:[1,2,3]}]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{{0U, 1U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -466,12 +478,12 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromMultiComplex) { TEST(BtreeKeyGeneratorTest, GetKeysFromArrayOfSubobjectsWithArrayValues) { BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [{b: [1, 2]}, {b: [2, 3]}]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{{0U, 1U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -480,12 +492,12 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromArrayOfSubobjectsWithArrayValues) { TEST(BtreeKeyGeneratorTest, GetKeysFromArrayOfSubobjectsWithNonDistinctArrayValues) { BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [{b: [1, 2, 3]}, {b: [2]}, {b: [3, 1]}]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{{0U, 1U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -494,14 +506,15 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromArrayOfSubobjectsWithNonDistinctArrayValu TEST(BtreeKeyGeneratorTest, GetKeysArrayEmpty) { BSONObj keyPattern = fromjson("{a: 1}"); BSONObj genKeysFrom = fromjson("{a:[1,2]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': undefined}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString4( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + fromjson("{'': undefined}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString4( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.getValueCopy(), keyString2.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -526,10 +539,10 @@ TEST(BtreeKeyGeneratorTest, GetKeysArrayEmpty) { TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleArray) { BSONObj keyPattern = fromjson("{a: 1, a: 1}"); BSONObj genKeysFrom = fromjson("{a:[1,2]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1, '': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2, '': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1, '': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2, '': 2}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release()}; MultikeyPaths expectedMultikeyPaths{{0U}, {0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -538,9 +551,9 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleArray) { TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleEmptyArray) { BSONObj keyPattern = fromjson("{a: 1, a: 1}"); BSONObj genKeysFrom = fromjson("{a:[]}"); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - fromjson("{'': undefined, '': undefined}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + fromjson("{'': undefined, '': undefined}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}, {0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -549,13 +562,13 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleEmptyArray) { TEST(BtreeKeyGeneratorTest, GetKeysFromMultiEmptyArray) { BSONObj keyPattern = fromjson("{a: 1, b: 1}"); BSONObj genKeysFrom = fromjson("{a: 1, b: [1, 2]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1, '': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 1, '': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3(KeyString::Version::kLatestVersion, - fromjson("{'': 1, '': undefined}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1, '': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 1, '': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + fromjson("{'': 1, '': undefined}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.getValueCopy(), keyString2.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}, {0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -574,8 +587,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromMultiEmptyArray) { TEST(BtreeKeyGeneratorTest, GetKeysFromNestedEmptyArray) { BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a:[]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -584,9 +597,9 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromNestedEmptyArray) { TEST(BtreeKeyGeneratorTest, GetKeysFromMultiNestedEmptyArray) { BSONObj keyPattern = fromjson("{'a.b': 1, 'a.c': 1}"); BSONObj genKeysFrom = fromjson("{a:[]}"); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - fromjson("{'': null, '': null}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + fromjson("{'': null, '': null}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}, {0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -595,15 +608,15 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromMultiNestedEmptyArray) { TEST(BtreeKeyGeneratorTest, GetKeysFromUnevenNestedEmptyArray) { BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a:[]}"); - KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion, - fromjson("{'': undefined, '': null}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2(KeyString::Version::kLatestVersion, - fromjson("{'': {b:1}, '': 1}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3(KeyString::Version::kLatestVersion, - fromjson("{'': {b:[]}, '': undefined}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + fromjson("{'': undefined, '': null}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2(key_string::Version::kLatestVersion, + fromjson("{'': {b:1}, '': 1}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + fromjson("{'': {b:[]}, '': undefined}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release()}; MultikeyPaths expectedMultikeyPaths{{0U}, {0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -623,9 +636,9 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromUnevenNestedEmptyArray) { TEST(BtreeKeyGeneratorTest, GetKeysFromReverseUnevenNestedEmptyArray) { BSONObj keyPattern = fromjson("{'a.b': 1, 'a': 1}"); BSONObj genKeysFrom = fromjson("{a:[]}"); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - fromjson("{'': null, '': undefined}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + fromjson("{'': null, '': undefined}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}, {0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -635,9 +648,9 @@ TEST(BtreeKeyGeneratorTest, SparseReverseUnevenNestedEmptyArray) { const bool sparse = true; BSONObj keyPattern = fromjson("{'a.b': 1, 'a': 1}"); BSONObj genKeysFrom = fromjson("{a:[]}"); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - fromjson("{'': null, '': undefined}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + fromjson("{'': null, '': undefined}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}, {0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths, sparse)); @@ -679,8 +692,8 @@ TEST(BtreeKeyGeneratorTest, SparseNonObjectMissingNestedField) { const bool sparse = true; BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a:[]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths, sparse)); @@ -697,12 +710,13 @@ TEST(BtreeKeyGeneratorTest, SparseNonObjectMissingNestedField) { TEST(BtreeKeyGeneratorTest, GetKeysFromIndexedArrayIndex) { BSONObj keyPattern = fromjson("{'a.0': 1}"); BSONObj genKeysFrom = fromjson("{a:[1]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': [1]}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': undefined}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': [1]}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + fromjson("{'': undefined}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.getValueCopy()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -740,12 +754,13 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromIndexedArrayIndex) { TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleIndexedArrayIndex) { BSONObj keyPattern = fromjson("{'a.0.0': 1}"); BSONObj genKeysFrom = fromjson("{a:[[1]]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': undefined}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + fromjson("{'': undefined}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -774,12 +789,13 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromDoubleIndexedArrayIndex) { TEST(BtreeKeyGeneratorTest, GetKeysFromObjectWithinArray) { BSONObj keyPattern = fromjson("{'a.0.b': 1}"); BSONObj genKeysFrom = fromjson("{a:[{b:1}]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': [1]}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': undefined}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': [1]}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + fromjson("{'': undefined}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.getValueCopy()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -822,10 +838,10 @@ TEST(BtreeKeyGeneratorTest, GetKeysFromObjectWithinArray) { TEST(BtreeKeyGeneratorTest, GetKeysPositionalElementIsExpandedArray) { BSONObj keyPattern = fromjson("{'a.0.b': 1}"); BSONObj genKeysFrom = fromjson("{a:[[{b:1}, {b:2}]]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release()}; MultikeyPaths expectedMultikeyPaths{{1U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -834,8 +850,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysPositionalElementIsExpandedArray) { TEST(BtreeKeyGeneratorTest, GetKeysTrailingPositionalElementIsSingletonArray) { BSONObj keyPattern = fromjson("{'a.b.c.3': 1}"); BSONObj genKeysFrom = fromjson("{a:{b:{c:[0,1,2,[3]]}}}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': [3]}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': [3]}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -844,8 +860,9 @@ TEST(BtreeKeyGeneratorTest, GetKeysTrailingPositionalElementIsSingletonArray) { TEST(BtreeKeyGeneratorTest, GetKeysTrailingPositionalElementIsEmptyArray) { BSONObj keyPattern = fromjson("{'a.b.c.3': 1}"); BSONObj genKeysFrom = fromjson("{a:{b:{c:[0,1,2,[]]}}}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': undefined}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + fromjson("{'': undefined}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -854,8 +871,9 @@ TEST(BtreeKeyGeneratorTest, GetKeysTrailingPositionalElementIsEmptyArray) { TEST(BtreeKeyGeneratorTest, GetKeysManyPositionalElementsComplex) { BSONObj keyPattern = fromjson("{'a.0.1.2.b.0': 1}"); BSONObj genKeysFrom = fromjson("{a:[[1, [1, 2, [{b: [[], 2]}]]], 1]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': undefined}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + fromjson("{'': undefined}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{3U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -864,8 +882,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysManyPositionalElementsComplex) { TEST(BtreeKeyGeneratorTest, GetKeysFromArrayWithinObjectWithinArray) { BSONObj keyPattern = fromjson("{'a.0.b.0': 1}"); BSONObj genKeysFrom = fromjson("{a:[{b:[1]}]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -904,12 +922,12 @@ TEST(BtreeKeyGeneratorTest, ParallelArraysUneven) { TEST(BtreeKeyGeneratorTest, MultipleArraysNotParallel) { BSONObj keyPattern = fromjson("{'a.b.c': 1}"); BSONObj genKeysFrom = fromjson("{a: [1, 2, {b: {c: [3, 4]}}]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 4}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 4}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{{0U, 2U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -918,13 +936,13 @@ TEST(BtreeKeyGeneratorTest, MultipleArraysNotParallel) { TEST(BtreeKeyGeneratorTest, MultipleArraysNotParallelCompound) { BSONObj keyPattern = fromjson("{'a.b.c': 1, 'a.b.d': 1}"); BSONObj genKeysFrom = fromjson("{a: [1, 2, {b: {c: [3, 4], d: 5}}]}"); - KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion, - fromjson("{'': null, '': null}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 3, '': 5}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 4, '': 5}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + fromjson("{'': null, '': null}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 3, '': 5}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 4, '': 5}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{{0U, 2U}, {0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -933,21 +951,21 @@ TEST(BtreeKeyGeneratorTest, MultipleArraysNotParallelCompound) { TEST(BtreeKeyGeneratorTest, GetKeysComplexNestedArrays) { BSONObj keyPattern = fromjson("{'a.b.c.d': 1, 'a.g': 1, 'a.b.f': 1, 'a.b.c': 1, 'a.b.e': 1}"); BSONObj genKeysFrom = fromjson("{a: [1, {b: [2, {c: [3, {d: 1}], e: 4}, 5, {f: 6}], g: 7}]}"); - KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion, - fromjson("{'':null, '':null, '':null, '':null, '':null}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2(KeyString::Version::kLatestVersion, - fromjson("{'':null, '':7, '':null, '':null, '':null}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3(KeyString::Version::kLatestVersion, - fromjson("{'':null, '':7, '':null, '':3, '':4}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString4(KeyString::Version::kLatestVersion, - fromjson("{'':null, '':7, '':6, '':null, '':null}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString5(KeyString::Version::kLatestVersion, - fromjson("{'':1, '':7, '':null, '':{d: 1}, '':4}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + fromjson("{'':null, '':null, '':null, '':null, '':null}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2(key_string::Version::kLatestVersion, + fromjson("{'':null, '':7, '':null, '':null, '':null}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + fromjson("{'':null, '':7, '':null, '':3, '':4}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString4(key_string::Version::kLatestVersion, + fromjson("{'':null, '':7, '':6, '':null, '':null}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString5(key_string::Version::kLatestVersion, + fromjson("{'':1, '':7, '':null, '':{d: 1}, '':4}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release(), @@ -961,8 +979,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysComplexNestedArrays) { TEST(BtreeKeyGeneratorTest, GetKeys2DArray) { BSONObj keyPattern = fromjson("{a: 1}"); BSONObj genKeysFrom = fromjson("{a: [[2]]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': [2]}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': [2]}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1019,8 +1037,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysParallelArraysOneArrayEmptyNested) { TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternMissingElement) { BSONObj keyPattern = fromjson("{'a.2': 1}"); BSONObj genKeysFrom = fromjson("{a: [{'2': 5}]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 5}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 5}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1030,8 +1048,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternMissingElement) { TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray) { BSONObj keyPattern = fromjson("{'a.2': 1}"); BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5]]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1041,8 +1059,9 @@ TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray) { TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray2) { BSONObj keyPattern = fromjson("{'a.2': 1}"); BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5], [3, 4, 6], [0, 1, 2]]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': [0, 1, 2]}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + fromjson("{'': [0, 1, 2]}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1052,8 +1071,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray2) { TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray3) { BSONObj keyPattern = fromjson("{'a.2': 1}"); BSONObj genKeysFrom = fromjson("{a: [{'0': 1, '1': 2, '2': 5}]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 5}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 5}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1063,8 +1082,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray3) { TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray4) { BSONObj keyPattern = fromjson("{'a.b.2': 1}"); BSONObj genKeysFrom = fromjson("{a: [{b: [[1, 2, 5]]}]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U, 1U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1074,10 +1093,10 @@ TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray4) { TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray5) { BSONObj keyPattern = fromjson("{'a.2': 1}"); BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5], {'2': 6}]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 6}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 6}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1086,8 +1105,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysPositionalKeyPatternNestedArray5) { TEST(BtreeKeyGeneratorTest, GetNullKeyNestedArray) { BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [[1, 2, 5]]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1096,18 +1115,18 @@ TEST(BtreeKeyGeneratorTest, GetNullKeyNestedArray) { TEST(BtreeKeyGeneratorTest, GetKeysUnevenNestedArrays) { BSONObj keyPattern = fromjson("{a: 1, 'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [1, {b: [2, 3, 4]}]}"); - KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion, - fromjson("{'': 1, '': null}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2(KeyString::Version::kLatestVersion, - fromjson("{'': {b:[2,3,4]}, '': 2}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3(KeyString::Version::kLatestVersion, - fromjson("{'': {b:[2,3,4]}, '': 3}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString4(KeyString::Version::kLatestVersion, - fromjson("{'': {b:[2,3,4]}, '': 4}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + fromjson("{'': 1, '': null}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2(key_string::Version::kLatestVersion, + fromjson("{'': {b:[2,3,4]}, '': 2}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + fromjson("{'': {b:[2,3,4]}, '': 3}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString4(key_string::Version::kLatestVersion, + fromjson("{'': {b:[2,3,4]}, '': 4}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{ keyString1.release(), keyString2.release(), keyString3.release(), keyString4.release()}; MultikeyPaths expectedMultikeyPaths{{0U}, {0U, 1U}}; @@ -1119,8 +1138,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysUnevenNestedArrays) { TEST(BtreeKeyGeneratorTest, GetKeysRepeatedFieldName) { BSONObj keyPattern = fromjson("{a: 1}"); BSONObj genKeysFrom = fromjson("{a: 2, a: 3}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1131,10 +1150,10 @@ TEST(BtreeKeyGeneratorTest, GetKeysRepeatedFieldName) { TEST(BtreeKeyGeneratorTest, GetKeysEmptyPathPiece) { BSONObj keyPattern = fromjson("{'a..c': 1}"); BSONObj genKeysFrom = fromjson("{a: {'': [{c: 1}, {c: 2}]}}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release()}; MultikeyPaths expectedMultikeyPaths{{1U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1145,10 +1164,10 @@ TEST(BtreeKeyGeneratorTest, GetKeysEmptyPathPiece) { TEST(BtreeKeyGeneratorTest, GetKeysLastPathPieceEmpty) { BSONObj keyPattern = fromjson("{'a.': 1}"); BSONObj genKeysFrom = fromjson("{a: 2}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': {'': 2}}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': {'': 2}}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1162,8 +1181,8 @@ TEST(BtreeKeyGeneratorTest, GetKeysLastPathPieceEmpty) { TEST(BtreeKeyGeneratorTest, GetKeysFirstPathPieceEmpty) { BSONObj keyPattern = fromjson("{'.a': 1}"); BSONObj genKeysFrom = fromjson("{a: 2}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1172,12 +1191,12 @@ TEST(BtreeKeyGeneratorTest, GetKeysFirstPathPieceEmpty) { TEST(BtreeKeyGeneratorTest, GetKeysFirstPathPieceEmpty2) { BSONObj keyPattern = fromjson("{'.a': 1}"); BSONObj genKeysFrom = fromjson("{'': [{a: [1, 2, 3]}]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{{0U, 1U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1195,8 +1214,8 @@ TEST(BtreeKeyGeneratorTest, PositionalKeyPatternParallelArrays) { TEST(BtreeKeyGeneratorTest, KeyPattern_a_0_b_Extracts_b_ElementInsideSingleton2DArray) { BSONObj keyPattern = fromjson("{'a.0.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [[{b: 1}]]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{1U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1205,8 +1224,8 @@ TEST(BtreeKeyGeneratorTest, KeyPattern_a_0_b_Extracts_b_ElementInsideSingleton2D TEST(BtreeKeyGeneratorTest, KeyPattern_a_0_0_b_Extracts_b_ElementInsideSingleton2DArray) { BSONObj keyPattern = fromjson("{'a.0.0.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [[{b: 1}]]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1216,12 +1235,12 @@ TEST(BtreeKeyGeneratorTest, KeyPattern_a_0_0_b_ExtractsEachValueFrom_b_ArrayInsideSingleton2DArray) { BSONObj keyPattern = fromjson("{'a.0.0.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [[{b: [1, 2, 3]}]]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{{3U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1230,8 +1249,8 @@ TEST(BtreeKeyGeneratorTest, TEST(BtreeKeyGeneratorTest, KeyPattern_a_0_0_b_Extracts_b_ElementInsideSingleton3DArray) { BSONObj keyPattern = fromjson("{'a.0.0.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [[[ {b: 1} ]]]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{2U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1240,12 +1259,12 @@ TEST(BtreeKeyGeneratorTest, KeyPattern_a_0_0_b_Extracts_b_ElementInsideSingleton TEST(BtreeKeyGeneratorTest, KeyPattern_a_0_0_b_ExtractsEach_b_ElementInside3DArray) { BSONObj keyPattern = fromjson("{'a.0.0.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [[[{b: 1}, {b: 2}, {b: 3}]]]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 1}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 3}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; MultikeyPaths expectedMultikeyPaths{{2U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1254,8 +1273,8 @@ TEST(BtreeKeyGeneratorTest, KeyPattern_a_0_0_b_ExtractsEach_b_ElementInside3DArr TEST(BtreeKeyGeneratorTest, KeyPattern_a_0_0_b_ExtractsNullFrom4DArray) { BSONObj keyPattern = fromjson("{'a.0.0.b': 1}"); BSONObj genKeysFrom = fromjson("{a: [[[[ {b: 1} ]]]]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': null}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{2U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1264,8 +1283,8 @@ TEST(BtreeKeyGeneratorTest, KeyPattern_a_0_0_b_ExtractsNullFrom4DArray) { TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays5) { BSONObj keyPattern = fromjson("{'a.b.1': 1}"); BSONObj genKeysFrom = fromjson("{a: [{b: [1, 2]}]}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 2}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; MultikeyPaths expectedMultikeyPaths{{0U}}; ASSERT(testKeygen(keyPattern, genKeysFrom, expectedKeys, expectedMultikeyPaths)); @@ -1275,18 +1294,18 @@ TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays5) { TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays6) { BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1, 'a.0.b':1, 'a.b.0': 1, 'a.0.b.0': 1}"); BSONObj genKeysFrom = fromjson("{a: [{b: [1,2]}, {b: 3}]}"); - KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion, - fromjson("{'': {b:3}, '': 3, '': 1, '': null, '': 1}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2(KeyString::Version::kLatestVersion, - fromjson("{'': {b:3}, '': 3, '': 2, '': null, '': 1}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3(KeyString::Version::kLatestVersion, - fromjson("{'': {b:[1,2]}, '': 1, '': 1, '': 1, '': 1}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString4(KeyString::Version::kLatestVersion, - fromjson("{'': {b:[1,2]}, '': 2, '': 2, '': 1, '': 1}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + fromjson("{'': {b:3}, '': 3, '': 1, '': null, '': 1}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2(key_string::Version::kLatestVersion, + fromjson("{'': {b:3}, '': 3, '': 2, '': null, '': 1}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + fromjson("{'': {b:[1,2]}, '': 1, '': 1, '': 1, '': 1}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString4(key_string::Version::kLatestVersion, + fromjson("{'': {b:[1,2]}, '': 2, '': 2, '': 1, '': 1}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{ keyString1.release(), keyString2.release(), keyString3.release(), keyString4.release()}; MultikeyPaths expectedMultikeyPaths{{0U}, {0U, 1U}, {2U}, {0U}, MultikeyComponents{}}; @@ -1297,20 +1316,20 @@ TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays6) { TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays7) { BSONObj keyPattern = fromjson("{'a': 1, 'a.b': 1, 'a.0.b':1, 'a.b.0': 1, 'a.0.b.0': 1}"); BSONObj genKeysFrom = fromjson("{a: [{b: [1,2]}, {b: {'0': 3}}]}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': {b:{'0':3}}, '': {'0':3}, '': 1, '': 3, '': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': {b:{'0':3}}, '': {'0':3}, '': 2, '': 3, '': 1}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3(KeyString::Version::kLatestVersion, - fromjson("{'': {b:[1,2]}, '': 1, '': 1, '': 1, '': 1}"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString4(KeyString::Version::kLatestVersion, - fromjson("{'': {b:[1,2]}, '': 2, '': 2, '': 1, '': 1}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + fromjson("{'': {b:[1,2]}, '': 1, '': 1, '': 1, '': 1}"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString4(key_string::Version::kLatestVersion, + fromjson("{'': {b:[1,2]}, '': 2, '': 2, '': 1, '': 1}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{ keyString1.release(), keyString2.release(), keyString3.release(), keyString4.release()}; MultikeyPaths expectedMultikeyPaths{{0U}, {0U, 1U}, {2U}, {0U}, MultikeyComponents{}}; @@ -1320,8 +1339,8 @@ TEST(BtreeKeyGeneratorTest, PositionalKeyPatternNestedArrays7) { TEST(BtreeKeyGeneratorTest, GetCollationAwareIdKeyFromObject) { BSONObj keyPattern = fromjson("{_id: 1}"); BSONObj genKeysFrom = fromjson("{_id: 'foo', b: 4}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 'oof'}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 'oof'}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; @@ -1332,8 +1351,8 @@ TEST(BtreeKeyGeneratorTest, GetCollationAwareIdKeyFromObject) { TEST(BtreeKeyGeneratorTest, GetCollationAwareKeysFromObjectSimple) { BSONObj keyPattern = fromjson("{a: 1}"); BSONObj genKeysFrom = fromjson("{b: 4, a: 'foo'}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 'oof'}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 'oof'}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; @@ -1344,8 +1363,8 @@ TEST(BtreeKeyGeneratorTest, GetCollationAwareKeysFromObjectSimple) { TEST(BtreeKeyGeneratorTest, GetCollationAwareKeysFromObjectDotted) { BSONObj keyPattern = fromjson("{'a.b': 1}"); BSONObj genKeysFrom = fromjson("{a: {b: 'foo'}, c: 4}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 'oof'}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 'oof'}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; @@ -1356,12 +1375,12 @@ TEST(BtreeKeyGeneratorTest, GetCollationAwareKeysFromObjectDotted) { TEST(BtreeKeyGeneratorTest, GetCollationAwareKeysFromArraySimple) { BSONObj keyPattern = fromjson("{a: 1}"); BSONObj genKeysFrom = fromjson("{a: ['foo', 'bar', 'baz']}"); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, fromjson("{'': 'oof'}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, fromjson("{'': 'rab'}"), Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3( - KeyString::Version::kLatestVersion, fromjson("{'': 'zab'}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, fromjson("{'': 'oof'}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, fromjson("{'': 'rab'}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3( + key_string::Version::kLatestVersion, fromjson("{'': 'zab'}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); MultikeyPaths expectedMultikeyPaths{{0U}}; @@ -1372,8 +1391,8 @@ TEST(BtreeKeyGeneratorTest, GetCollationAwareKeysFromArraySimple) { TEST(BtreeKeyGeneratorTest, CollatorDoesNotAffectNonStringIdKey) { BSONObj keyPattern = fromjson("{_id: 1}"); BSONObj genKeysFrom = fromjson("{_id: 5, b: 4}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 5}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 5}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; @@ -1384,8 +1403,8 @@ TEST(BtreeKeyGeneratorTest, CollatorDoesNotAffectNonStringIdKey) { TEST(BtreeKeyGeneratorTest, CollatorDoesNotAffectNonStringKeys) { BSONObj keyPattern = fromjson("{a: 1}"); BSONObj genKeysFrom = fromjson("{b: 4, a: 5}"); - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, fromjson("{'': 5}"), Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, fromjson("{'': 5}"), Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; @@ -1396,9 +1415,9 @@ TEST(BtreeKeyGeneratorTest, CollatorDoesNotAffectNonStringKeys) { TEST(BtreeKeyGeneratorTest, GetCollationAwareKeysFromNestedObject) { BSONObj keyPattern = fromjson("{a: 1}"); BSONObj genKeysFrom = fromjson("{b: 4, a: {c: 'foo'}}"); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - fromjson("{'': {c: 'oof'}}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + fromjson("{'': {c: 'oof'}}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; @@ -1409,9 +1428,9 @@ TEST(BtreeKeyGeneratorTest, GetCollationAwareKeysFromNestedObject) { TEST(BtreeKeyGeneratorTest, GetCollationAwareKeysFromNestedArray) { BSONObj keyPattern = fromjson("{a: 1}"); BSONObj genKeysFrom = fromjson("{b: 4, a: {c: ['foo', 'bar', 'baz']}}"); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - fromjson("{'': {c: ['oof', 'rab', 'zab']}}"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + fromjson("{'': {c: ['oof', 'rab', 'zab']}}"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); MultikeyPaths expectedMultikeyPaths{MultikeyComponents{}}; diff --git a/src/mongo/db/index/bulk_builder_common.h b/src/mongo/db/index/bulk_builder_common.h index 925fff50b0af8..0ae2a5c5b1620 100644 --- a/src/mongo/db/index/bulk_builder_common.h +++ b/src/mongo/db/index/bulk_builder_common.h @@ -69,13 +69,14 @@ template class BulkBuilderCommon : public IndexAccessMethod::BulkBuilder { public: - using KeyHandlerFn = std::function; + using KeyHandlerFn = std::function; using RecordIdHandlerFn = std::function; BulkBuilderCommon(int64_t numKeys, std::string message, std::string indexName) : _keysInserted(numKeys), _progressMessage(message), _indexName(indexName){}; Status commit(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, bool dupsAllowed, int32_t yieldIterations, const KeyHandlerFn& onDuplicateKeyInserted, @@ -83,7 +84,7 @@ class BulkBuilderCommon : public IndexAccessMethod::BulkBuilder { Timer timer; - auto builder = static_cast(this)->setUpBulkInserter(opCtx, dupsAllowed); + auto builder = static_cast(this)->setUpBulkInserter(opCtx, entry, dupsAllowed); auto it = static_cast(this)->finalizeSort(); ProgressMeterHolder pm; @@ -131,7 +132,7 @@ class BulkBuilderCommon : public IndexAccessMethod::BulkBuilder { bool isDup; try { isDup = static_cast(this)->duplicateCheck( - opCtx, data, dupsAllowed, onDuplicateRecord); + opCtx, entry, data, dupsAllowed, onDuplicateRecord); } catch (DBException& e) { return e.toStatus(); } @@ -142,7 +143,7 @@ class BulkBuilderCommon : public IndexAccessMethod::BulkBuilder { try { - writeConflictRetry(opCtx, "addingKey", _ns.ns(), [&] { + writeConflictRetry(opCtx, "addingKey", _ns, [&] { WriteUnitOfWork wunit(opCtx); static_cast(this)->insertKey(builder, data); wunit.commit(); @@ -161,7 +162,7 @@ class BulkBuilderCommon : public IndexAccessMethod::BulkBuilder { // Yield locks every 'yieldIterations' key insertions. if (yieldIterations > 0 && (++iterations % yieldIterations == 0)) { - yield(opCtx, &collection, _ns); + entry = yield(opCtx, collection, _ns, entry); } { diff --git a/src/mongo/db/index/column_cell.cpp b/src/mongo/db/index/column_cell.cpp index 86f11a0f25cae..1bd85f7d50f13 100644 --- a/src/mongo/db/index/column_cell.cpp +++ b/src/mongo/db/index/column_cell.cpp @@ -30,7 +30,24 @@ #include "mongo/db/index/column_cell.h" +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" #include "mongo/db/storage/column_store.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/index/column_cell_test.cpp b/src/mongo/db/index/column_cell_test.cpp index 0b998247b72ba..c488d5af4a0be 100644 --- a/src/mongo/db/index/column_cell_test.cpp +++ b/src/mongo/db/index/column_cell_test.cpp @@ -28,10 +28,22 @@ */ #include "mongo/db/index/column_cell.h" + +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/storage/column_store.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/hex.h" -#include "mongo/util/str.h" +#include "mongo/util/shared_buffer.h" namespace mongo { TEST(ColumnCell, AppendElementToCellTest) { diff --git a/src/mongo/db/index/column_key_generator.cpp b/src/mongo/db/index/column_key_generator.cpp index 863fbe17d3dfe..c6a77ab8926e8 100644 --- a/src/mongo/db/index/column_key_generator.cpp +++ b/src/mongo/db/index/column_key_generator.cpp @@ -28,16 +28,39 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/index/column_key_generator.h" - -#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/projection_executor.h" +#include "mongo/db/exec/projection_executor_builder.h" +#include "mongo/db/index/column_key_generator.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/transformer_interface.h" +#include "mongo/db/query/projection_ast.h" +#include "mongo/db/query/projection_parser.h" +#include "mongo/db/query/projection_policies.h" #include "mongo/db/storage/column_store.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decimal_counter.h" #include "mongo/util/functional.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/itoa.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex diff --git a/src/mongo/db/index/column_key_generator.h b/src/mongo/db/index/column_key_generator.h index 5ffaa944b6326..f8957fddd53be 100644 --- a/src/mongo/db/index/column_key_generator.h +++ b/src/mongo/db/index/column_key_generator.h @@ -29,14 +29,25 @@ #pragma once +#include +#include #include +#include #include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/exec/index_path_projection.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" +#include "mongo/db/query/projection.h" #include "mongo/db/query/projection_parser.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/column_store.h" #include "mongo/util/functional.h" #include "mongo/util/string_map.h" diff --git a/src/mongo/db/index/column_key_generator_test.cpp b/src/mongo/db/index/column_key_generator_test.cpp index 6f424411058c8..7b5501db24d19 100644 --- a/src/mongo/db/index/column_key_generator_test.cpp +++ b/src/mongo/db/index/column_key_generator_test.cpp @@ -27,14 +27,28 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include + #include "mongo/bson/bson_depth.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/index/column_cell.h" #include "mongo/db/index/column_key_generator.h" -#include "mongo/platform/basic.h" -#include "mongo/unittest/unittest.h" -#include +#include "mongo/stdx/unordered_set.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" namespace mongo::column_keygen { namespace { diff --git a/src/mongo/db/index/column_store_sorter.cpp b/src/mongo/db/index/column_store_sorter.cpp index bab951ee77284..a56feccedfde7 100644 --- a/src/mongo/db/index/column_store_sorter.cpp +++ b/src/mongo/db/index/column_store_sorter.cpp @@ -31,7 +31,25 @@ #include "mongo/db/index/column_store_sorter.h" +#include #include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/error_codes.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/random.h" +#include "mongo/util/str.h" namespace mongo { struct ComparisonForPathAndRid { @@ -270,7 +288,7 @@ ColumnStoreSorter::persistDataForShutdown() { std::back_inserter(ranges), [](const auto it) { return it->getRange(); }); - return {_spillFile->path().filename().string(), ranges}; + return {_spillFile->path().filename().string(), std::move(ranges)}; } /** @@ -316,9 +334,17 @@ class ColumnStoreSorter::InMemoryIterator final : public ColumnStoreSorter::Iter return {key, contents}; } - const std::pair& current() final { + Key nextWithDeferredValue() override { + MONGO_UNREACHABLE; + } + + Value getDeferredValue() override { + MONGO_UNREACHABLE; + } + + const Key& current() final { tasserted(ErrorCodes::NotImplemented, - "current() not implemented for ColumnStoreSorter::Iterator"); + "current() not implemented for ColumnStoreSorter::InMemoryIterator"); } void openSource() final {} @@ -349,6 +375,7 @@ std::string nextFileName() { #undef MONGO_LOGV2_DEFAULT_COMPONENT #include "mongo/db/sorter/sorter.cpp" + #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex MONGO_CREATE_SORTER(mongo::ColumnStoreSorter::Key, mongo::ColumnStoreSorter::Value, diff --git a/src/mongo/db/index/column_store_sorter.h b/src/mongo/db/index/column_store_sorter.h index 61c11ae37cd8c..c1855be1aed9e 100644 --- a/src/mongo/db/index/column_store_sorter.h +++ b/src/mongo/db/index/column_store_sorter.h @@ -29,9 +29,25 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/record_id.h" #include "mongo/db/sorter/sorter.h" +#include "mongo/db/sorter/sorter_gen.h" +#include "mongo/db/sorter/sorter_stats.h" #include "mongo/db/storage/column_store.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" +#include "mongo/util/string_map.h" namespace mongo { /** diff --git a/src/mongo/db/index/column_store_sorter_test.cpp b/src/mongo/db/index/column_store_sorter_test.cpp index 5725302328707..c15cd5660b135 100644 --- a/src/mongo/db/index/column_store_sorter_test.cpp +++ b/src/mongo/db/index/column_store_sorter_test.cpp @@ -28,8 +28,17 @@ */ #include "mongo/db/index/column_store_sorter.h" + +// IWYU pragma: no_include "ext/alloc_traits.h" +#include + +#include "mongo/db/storage/storage_options.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" namespace mongo { TEST(ColumnStoreSorter, SortTest) { diff --git a/src/mongo/db/index/columns_access_method.cpp b/src/mongo/db/index/columns_access_method.cpp index b7c7a825a8fc8..0333ae2f0b7da 100644 --- a/src/mongo/db/index/columns_access_method.cpp +++ b/src/mongo/db/index/columns_access_method.cpp @@ -30,22 +30,41 @@ #include "mongo/db/index/columns_access_method.h" +#include +#include +#include +#include +#include +#include +#include #include +#include + #include "mongo/base/status.h" -#include "mongo/base/status_with.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/curop.h" #include "mongo/db/index/bulk_builder_common.h" #include "mongo/db/index/column_cell.h" #include "mongo/db/index/column_key_generator.h" #include "mongo/db/index/column_store_sorter.h" #include "mongo/db/index/index_build_interceptor.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/sorter/sorter.h" +#include "mongo/db/sorter/sorter_gen.h" #include "mongo/db/storage/execution_context.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/logv2/log.h" -#include "mongo/util/progress_meter.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -66,19 +85,21 @@ inline void dec(int64_t* counter) { ColumnStoreAccessMethod::ColumnStoreAccessMethod(IndexCatalogEntry* ice, std::unique_ptr store) : _store(std::move(store)), - _indexCatalogEntry(ice), - _descriptor(ice->descriptor()), - _keyGen(_descriptor->keyPattern(), _descriptor->pathProjection()) {} + _keyGen(ice->descriptor()->keyPattern(), ice->descriptor()->pathProjection()) {} class ColumnStoreAccessMethod::BulkBuilder final : public BulkBuilderCommon { public: - BulkBuilder(ColumnStoreAccessMethod* index, size_t maxMemoryUsageBytes, StringData dbName); + BulkBuilder(ColumnStoreAccessMethod* index, + const IndexCatalogEntry* entry, + size_t maxMemoryUsageBytes, + const DatabaseName& dbName); BulkBuilder(ColumnStoreAccessMethod* index, + const IndexCatalogEntry* entry, size_t maxMemoryUsageBytes, const IndexStateInfo& stateInfo, - StringData dbName); + const DatabaseName& dbName); // // Generic APIs @@ -86,6 +107,7 @@ class ColumnStoreAccessMethod::BulkBuilder final Status insert(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, const BSONObj& obj, const RecordId& rid, const InsertDeleteOptions& options, @@ -102,10 +124,12 @@ class ColumnStoreAccessMethod::BulkBuilder final std::unique_ptr finalizeSort(); std::unique_ptr setUpBulkInserter(OperationContext* opCtx, + const IndexCatalogEntry* entry, bool dupsAllowed); void debugEnsureSorted(const std::pair& data); bool duplicateCheck(OperationContext* opCtx, + const IndexCatalogEntry* entry, const std::pair& data, bool dupsAllowed, const RecordIdHandlerFn& onDuplicateRecord); @@ -127,26 +151,31 @@ class ColumnStoreAccessMethod::BulkBuilder final }; ColumnStoreAccessMethod::BulkBuilder::BulkBuilder(ColumnStoreAccessMethod* index, + const IndexCatalogEntry* entry, size_t maxMemoryUsageBytes, - StringData dbName) + const DatabaseName& dbName) : BulkBuilderCommon(0, "Index Build: inserting keys from external sorter into columnstore index", - index->_descriptor->indexName()), + entry->descriptor()->indexName()), _columnsAccess(index), - _sorter(maxMemoryUsageBytes, dbName, bulkBuilderFileStats(), bulkBuilderTracker()) { + _sorter(maxMemoryUsageBytes, + DatabaseNameUtil::serializeForCatalog(dbName), + bulkBuilderFileStats(), + bulkBuilderTracker()) { countNewBuildInStats(); } ColumnStoreAccessMethod::BulkBuilder::BulkBuilder(ColumnStoreAccessMethod* index, + const IndexCatalogEntry* entry, size_t maxMemoryUsageBytes, const IndexStateInfo& stateInfo, - StringData dbName) + const DatabaseName& dbName) : BulkBuilderCommon(stateInfo.getNumKeys().value_or(0), "Index Build: inserting keys from external sorter into columnstore index", - index->_descriptor->indexName()), + entry->descriptor()->indexName()), _columnsAccess(index), _sorter(maxMemoryUsageBytes, - dbName, + DatabaseNameUtil::serializeForCatalog(dbName), bulkBuilderFileStats(), stateInfo.getFileName()->toString(), *stateInfo.getRanges(), @@ -157,6 +186,7 @@ ColumnStoreAccessMethod::BulkBuilder::BulkBuilder(ColumnStoreAccessMethod* index Status ColumnStoreAccessMethod::BulkBuilder::insert( OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, const BSONObj& obj, const RecordId& rid, const InsertDeleteOptions& options, @@ -206,8 +236,8 @@ std::unique_ptr ColumnStoreAccessMethod::BulkBuilde } std::unique_ptr ColumnStoreAccessMethod::BulkBuilder::setUpBulkInserter( - OperationContext* opCtx, bool dupsAllowed) { - _ns = _columnsAccess->_indexCatalogEntry->getNSSFromCatalog(opCtx); + OperationContext* opCtx, const IndexCatalogEntry* entry, bool dupsAllowed) { + _ns = entry->getNSSFromCatalog(opCtx); return _columnsAccess->_store->makeBulkBuilder(opCtx); } @@ -234,6 +264,7 @@ void ColumnStoreAccessMethod::BulkBuilder::debugEnsureSorted( bool ColumnStoreAccessMethod::BulkBuilder::duplicateCheck( OperationContext* opCtx, + const IndexCatalogEntry* entry, const std::pair& data, bool dupsAllowed, const RecordIdHandlerFn& onDuplicateRecord) { @@ -279,6 +310,7 @@ void ColumnStoreAccessMethod::_visitCellsForIndexInsert( Status ColumnStoreAccessMethod::insert(OperationContext* opCtx, SharedBufferFragmentBuilder& pooledBufferBuilder, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const std::vector& bsonRecords, const InsertDeleteOptions& options, int64_t* keysInsertedOut) { @@ -286,7 +318,7 @@ Status ColumnStoreAccessMethod::insert(OperationContext* opCtx, PooledFragmentBuilder buf(pooledBufferBuilder); // We cannot write to the index during its initial build phase, so we defer this insert as a // "side write" to be applied after the build completes. - if (_indexCatalogEntry->isHybridBuilding()) { + if (entry->isHybridBuilding()) { auto columnChanges = StorageExecutionContext::get(opCtx).columnChanges(); _visitCellsForIndexInsert( opCtx, buf, bsonRecords, [&](StringData path, const BsonRecord& rec) { @@ -304,8 +336,8 @@ Status ColumnStoreAccessMethod::insert(OperationContext* opCtx, } invariant(deleted == 0); }); - uassertStatusOK(_indexCatalogEntry->indexBuildInterceptor()->sideWrite( - opCtx, *columnChanges, &inserted, &deleted)); + uassertStatusOK(entry->indexBuildInterceptor()->sideWrite( + opCtx, entry, *columnChanges, &inserted, &deleted)); return Status::OK(); } else { auto cursor = _store->newWriteCursor(opCtx); @@ -324,13 +356,14 @@ Status ColumnStoreAccessMethod::insert(OperationContext* opCtx, void ColumnStoreAccessMethod::remove(OperationContext* opCtx, SharedBufferFragmentBuilder& pooledBufferBuilder, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const BSONObj& obj, const RecordId& rid, bool logIfError, const InsertDeleteOptions& options, int64_t* keysDeletedOut, CheckRecordId checkRecordId) { - if (_indexCatalogEntry->isHybridBuilding()) { + if (entry->isHybridBuilding()) { auto columnChanges = StorageExecutionContext::get(opCtx).columnChanges(); _keyGen.visitPathsForDelete(obj, [&](StringData path) { columnChanges->emplace_back(path.toString(), @@ -341,8 +374,8 @@ void ColumnStoreAccessMethod::remove(OperationContext* opCtx, int64_t inserted = 0; int64_t removed = 0; fassert(6597801, - _indexCatalogEntry->indexBuildInterceptor()->sideWrite( - opCtx, *columnChanges, &inserted, &removed)); + entry->indexBuildInterceptor()->sideWrite( + opCtx, entry, *columnChanges, &inserted, &removed)); if (keysDeletedOut) { *keysDeletedOut += removed; } @@ -363,12 +396,13 @@ Status ColumnStoreAccessMethod::update(OperationContext* opCtx, const BSONObj& newDoc, const RecordId& rid, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const InsertDeleteOptions& options, int64_t* keysInsertedOut, int64_t* keysDeletedOut) { PooledFragmentBuilder buf(pooledBufferBuilder); - if (_indexCatalogEntry->isHybridBuilding()) { + if (entry->isHybridBuilding()) { auto columnChanges = StorageExecutionContext::get(opCtx).columnChanges(); _keyGen.visitDiffForUpdate( oldDoc, @@ -405,8 +439,8 @@ Status ColumnStoreAccessMethod::update(OperationContext* opCtx, int64_t inserted = 0; int64_t deleted = 0; if (columnChanges->size() > 0) { - uassertStatusOK(_indexCatalogEntry->indexBuildInterceptor()->sideWrite( - opCtx, *columnChanges, &inserted, &deleted)); + uassertStatusOK(entry->indexBuildInterceptor()->sideWrite( + opCtx, entry, *columnChanges, &inserted, &deleted)); } if (keysInsertedOut) { *keysInsertedOut += inserted; @@ -484,12 +518,13 @@ Status ColumnStoreAccessMethod::compact(OperationContext* opCtx) { std::unique_ptr ColumnStoreAccessMethod::initiateBulk( + const IndexCatalogEntry* entry, size_t maxMemoryUsageBytes, const boost::optional& stateInfo, - StringData dbName) { + const DatabaseName& dbName) { return (stateInfo && stateInfo->getFileName()) - ? std::make_unique(this, maxMemoryUsageBytes, *stateInfo, dbName) - : std::make_unique(this, maxMemoryUsageBytes, dbName); + ? std::make_unique(this, entry, maxMemoryUsageBytes, *stateInfo, dbName) + : std::make_unique(this, entry, maxMemoryUsageBytes, dbName); } std::shared_ptr ColumnStoreAccessMethod::getSharedIdent() const { @@ -502,6 +537,7 @@ void ColumnStoreAccessMethod::setIdent(std::shared_ptr ident) { Status ColumnStoreAccessMethod::applyIndexBuildSideWrite(OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const BSONObj& operation, const InsertDeleteOptions& unusedOptions, KeyHandlerFn&& unusedFn, diff --git a/src/mongo/db/index/columns_access_method.h b/src/mongo/db/index/columns_access_method.h index 91004d3c0636c..9ca30b7245fec 100644 --- a/src/mongo/db/index/columns_access_method.h +++ b/src/mongo/db/index/columns_access_method.h @@ -29,10 +29,35 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog/validate_results.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/index_path_projection.h" #include "mongo/db/index/column_cell.h" #include "mongo/db/index/column_key_generator.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/resumable_index_builds_gen.h" #include "mongo/db/storage/column_store.h" +#include "mongo/db/storage/ident.h" +#include "mongo/util/functional.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { @@ -66,6 +91,7 @@ class ColumnStoreAccessMethod : public IndexAccessMethod { Status insert(OperationContext* opCtx, SharedBufferFragmentBuilder& pooledBufferBuilder, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const std::vector& bsonRecords, const InsertDeleteOptions& options, int64_t* keysInsertedOut) final; @@ -73,6 +99,7 @@ class ColumnStoreAccessMethod : public IndexAccessMethod { void remove(OperationContext* opCtx, SharedBufferFragmentBuilder& pooledBufferBuilder, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const BSONObj& obj, const RecordId& rid, bool logIfError, @@ -86,12 +113,14 @@ class ColumnStoreAccessMethod : public IndexAccessMethod { const BSONObj& newDoc, const RecordId& rid, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const InsertDeleteOptions& options, int64_t* keysInsertedOut, int64_t* keysDeletedOut) final; Status applyIndexBuildSideWrite(OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const BSONObj& operation, const InsertDeleteOptions& unusedOptions, KeyHandlerFn&& unusedFn, @@ -115,9 +144,10 @@ class ColumnStoreAccessMethod : public IndexAccessMethod { Status compact(OperationContext* opCtx) final; std::unique_ptr initiateBulk( + const IndexCatalogEntry* entry, size_t maxMemoryUsageBytes, const boost::optional& stateInfo, - StringData dbName) final; + const DatabaseName& dbName) final; std::shared_ptr getSharedIdent() const final; @@ -133,8 +163,8 @@ class ColumnStoreAccessMethod : public IndexAccessMethod { class BulkBuilder; - const std::string& indexName() const { - return _descriptor->indexName(); + const std::string& indexName(const IndexCatalogEntry* entry) const { + return entry->descriptor()->indexName(); } /** @@ -153,8 +183,6 @@ class ColumnStoreAccessMethod : public IndexAccessMethod { function_ref cb) const; const std::unique_ptr _store; - IndexCatalogEntry* const _indexCatalogEntry; // owned by IndexCatalog - const IndexDescriptor* const _descriptor; const column_keygen::ColumnKeyGenerator _keyGen; }; } // namespace mongo diff --git a/src/mongo/db/index/columns_access_method_test.cpp b/src/mongo/db/index/columns_access_method_test.cpp index 756f497caa407..53d456b124ed5 100644 --- a/src/mongo/db/index/columns_access_method_test.cpp +++ b/src/mongo/db/index/columns_access_method_test.cpp @@ -27,12 +27,35 @@ * it in the license file. */ +#include +#include + +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/index/column_cell.h" #include "mongo/db/index/columns_access_method.h" -#include "mongo/db/index/index_build_interceptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/hex.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -47,7 +70,7 @@ class ColumnsAccessMethodTest : public CatalogTestFixture { public: protected: ColumnsAccessMethodTest() - : _pooledBuilder(KeyString::HeapBuilder::kHeapAllocatorDefaultBytes) {} + : _pooledBuilder(key_string::HeapBuilder::kHeapAllocatorDefaultBytes) {} const CollectionPtr& collection() const { return _coll->getCollection(); @@ -58,10 +81,13 @@ class ColumnsAccessMethodTest : public CatalogTestFixture { int64_t rowId = ++_lastRowId; WriteUnitOfWork wuow(operationContext()); + const auto desc = + collection()->getIndexCatalog()->findIndexByName(operationContext(), "columnstore"); Status status = _accessMethod->insert( operationContext(), _pooledBuilder, collection(), + desc->getEntry(), std::vector{BsonRecord{RecordId(rowId), Timestamp(1, rowId), &obj}}, {}, &keysInserted); diff --git a/src/mongo/db/index/duplicate_key_tracker.cpp b/src/mongo/db/index/duplicate_key_tracker.cpp index 95c603397ea3b..413809ccb92bc 100644 --- a/src/mongo/db/index/duplicate_key_tracker.cpp +++ b/src/mongo/db/index/duplicate_key_tracker.cpp @@ -28,17 +28,35 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/index/duplicate_key_tracker.h" +#include +#include +#include "mongo/base/status_with.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" +#include "mongo/db/index/duplicate_key_tracker.h" #include "mongo/db/index/index_access_method.h" -#include "mongo/db/keypattern.h" -#include "mongo/db/storage/execution_context.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" +#include "mongo/util/progress_meter.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -50,45 +68,44 @@ static constexpr StringData kKeyField = "key"_sd; } DuplicateKeyTracker::DuplicateKeyTracker(OperationContext* opCtx, const IndexCatalogEntry* entry) - : _indexCatalogEntry(entry), - _keyConstraintsTable(opCtx->getServiceContext()->getStorageEngine()->makeTemporaryRecordStore( + : _keyConstraintsTable(opCtx->getServiceContext()->getStorageEngine()->makeTemporaryRecordStore( opCtx, KeyFormat::Long)) { - invariant(_indexCatalogEntry->descriptor()->unique()); + invariant(entry->descriptor()->unique()); } DuplicateKeyTracker::DuplicateKeyTracker(OperationContext* opCtx, const IndexCatalogEntry* entry, - StringData ident) - : _indexCatalogEntry(entry) { + StringData ident) { _keyConstraintsTable = opCtx->getServiceContext()->getStorageEngine()->makeTemporaryRecordStoreFromExistingIdent( opCtx, ident); - invariant(_indexCatalogEntry->descriptor()->unique(), + invariant(entry->descriptor()->unique(), str::stream() << "Duplicate key tracker table exists on disk with ident: " << ident - << " but the index is not unique: " - << _indexCatalogEntry->descriptor()); + << " but the index is not unique: " << entry->descriptor()); } void DuplicateKeyTracker::keepTemporaryTable() { _keyConstraintsTable->keep(); } -Status DuplicateKeyTracker::recordKey(OperationContext* opCtx, const KeyString::Value& key) { +Status DuplicateKeyTracker::recordKey(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry, + const key_string::Value& key) { invariant(opCtx->lockState()->inAWriteUnitOfWork()); LOGV2_DEBUG(20676, 1, "Index build: recording duplicate key conflict on unique index", - "index"_attr = _indexCatalogEntry->descriptor()->indexName()); + "index"_attr = indexCatalogEntry->descriptor()->indexName()); - // The KeyString::Value will be serialized in the format [KeyString][TypeBits]. We need to + // The key_string::Value will be serialized in the format [KeyString][TypeBits]. We need to // store the TypeBits for error reporting later on. The RecordId does not need to be stored, so // we exclude it from the serialization. BufBuilder builder; if (KeyFormat::Long == - _indexCatalogEntry->accessMethod() + indexCatalogEntry->accessMethod() ->asSortedData() ->getSortedDataInterface() ->rsKeyFormat()) { @@ -109,20 +126,21 @@ Status DuplicateKeyTracker::recordKey(OperationContext* opCtx, const KeyString:: if (numDuplicates % 1000 == 0) { LOGV2_INFO(4806700, "Index build: high number of duplicate keys on unique index", - "index"_attr = _indexCatalogEntry->descriptor()->indexName(), + "index"_attr = indexCatalogEntry->descriptor()->indexName(), "numDuplicateKeys"_attr = numDuplicates); } return Status::OK(); } -Status DuplicateKeyTracker::checkConstraints(OperationContext* opCtx) const { +Status DuplicateKeyTracker::checkConstraints(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry) const { invariant(!opCtx->lockState()->inAWriteUnitOfWork()); auto constraintsCursor = _keyConstraintsTable->rs()->getCursor(opCtx); auto record = constraintsCursor->next(); - auto index = _indexCatalogEntry->accessMethod()->asSortedData()->getSortedDataInterface(); + auto index = indexCatalogEntry->accessMethod()->asSortedData()->getSortedDataInterface(); static const char* curopMessage = "Index Build: checking for duplicate keys"; ProgressMeterHolder progress; @@ -139,7 +157,7 @@ Status DuplicateKeyTracker::checkConstraints(OperationContext* opCtx) const { resolved++; BufReader reader(record->data.data(), record->data.size()); - auto key = KeyString::Value::deserialize(reader, index->getKeyStringVersion()); + auto key = key_string::Value::deserialize(reader, index->getKeyStringVersion()); auto status = index->dupKeyCheck(opCtx, key); if (!status.isOK()) @@ -171,7 +189,7 @@ Status DuplicateKeyTracker::checkConstraints(OperationContext* opCtx) const { logLevel, "index build: resolved duplicate key conflicts for unique index", "numResolved"_attr = resolved, - "indexName"_attr = _indexCatalogEntry->descriptor()->indexName()); + "indexName"_attr = indexCatalogEntry->descriptor()->indexName()); return Status::OK(); } diff --git a/src/mongo/db/index/duplicate_key_tracker.h b/src/mongo/db/index/duplicate_key_tracker.h index ff19c357983cd..422b2c1461fb9 100644 --- a/src/mongo/db/index/duplicate_key_tracker.h +++ b/src/mongo/db/index/duplicate_key_tracker.h @@ -30,13 +30,19 @@ #pragma once #include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/operation_context.h" #include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/temporary_record_store.h" +#include "mongo/platform/atomic_word.h" namespace mongo { @@ -73,7 +79,9 @@ class DuplicateKeyTracker { /** * Given a duplicate key, insert it into the key constraint table. */ - Status recordKey(OperationContext* opCtx, const KeyString::Value& key); + Status recordKey(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry, + const key_string::Value& key); /** * Returns Status::OK if all previously recorded duplicate key constraint violations have been @@ -82,15 +90,14 @@ class DuplicateKeyTracker { * * Must not be in a WriteUnitOfWork. */ - Status checkConstraints(OperationContext* opCtx) const; + Status checkConstraints(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry) const; std::string getTableIdent() const { return _keyConstraintsTable->rs()->getIdent(); } private: - const IndexCatalogEntry* _indexCatalogEntry; - AtomicWord _duplicateCounter{0}; // This temporary record store is owned by the duplicate key tracker and dropped along with it. diff --git a/src/mongo/db/index/expression_keys_private.cpp b/src/mongo/db/index/expression_keys_private.cpp index 1d574121050d4..2503593c4172f 100644 --- a/src/mongo/db/index/expression_keys_private.cpp +++ b/src/mongo/db/index/expression_keys_private.cpp @@ -30,30 +30,59 @@ #include "mongo/db/index/expression_keys_private.h" -#include +#include #include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement_comparator_interface.h" -#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/field_ref.h" #include "mongo/db/fts/fts_index_format.h" -#include "mongo/db/geo/geoconstants.h" +#include "mongo/db/fts/fts_spec.h" #include "mongo/db/geo/geometry_container.h" -#include "mongo/db/geo/geoparser.h" -#include "mongo/db/geo/s2.h" +#include "mongo/db/geo/hash.h" +#include "mongo/db/geo/shapes.h" #include "mongo/db/index/2d_common.h" #include "mongo/db/index/s2_common.h" #include "mongo/db/index_names.h" #include "mongo/db/query/collation/collation_index_key.h" +#include "mongo/db/server_options.h" #include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/db/timeseries/timeseries_dotted_path_support.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/redaction.h" +#include "mongo/stdx/type_traits.h" #include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" #include "mongo/util/str.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -120,13 +149,13 @@ Status S2GetKeysForElement(const BSONElement& element, /* * We take the cartesian product of all keys when appending. */ -void appendToS2Keys(const std::vector& existingKeys, - std::vector* out, - KeyString::Version keyStringVersion, +void appendToS2Keys(const std::vector& existingKeys, + std::vector* out, + key_string::Version keyStringVersion, SortedDataIndexAccessMethod::GetKeysContext context, Ordering ordering, size_t maxKeys, - const std::function& fn) { + const std::function& fn) { if (context == SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys && existingKeys.size() + out->size() > maxKeys) { if (!relaxIndexMaxNumGeneratedKeysPerDocument.shouldFail()) { @@ -158,9 +187,9 @@ void appendToS2Keys(const std::vector& existingKeys, bool getS2GeoKeys(const BSONObj& document, const BSONElementSet& elements, const S2IndexingParams& params, - const std::vector& keysToAdd, - std::vector* out, - KeyString::Version keyStringVersion, + const std::vector& keysToAdd, + std::vector* out, + key_string::Version keyStringVersion, SortedDataIndexAccessMethod::GetKeysContext context, Ordering ordering, size_t maxKeys) { @@ -200,7 +229,7 @@ bool getS2GeoKeys(const BSONObj& document, context, ordering, maxKeys, - [](KeyString::HeapBuilder& ks) { ks.appendNull(); }); + [](key_string::HeapBuilder& ks) { ks.appendNull(); }); } return everGeneratedMultipleCells; } @@ -214,9 +243,9 @@ bool getS2GeoKeys(const BSONObj& document, bool getS2BucketGeoKeys(const BSONObj& document, const BSONElementSet& elements, const S2IndexingParams& params, - const std::vector& keysToAdd, - std::vector* out, - KeyString::Version keyStringVersion, + const std::vector& keysToAdd, + std::vector* out, + key_string::Version keyStringVersion, SortedDataIndexAccessMethod::GetKeysContext context, Ordering ordering, size_t maxKeys) { @@ -294,7 +323,7 @@ bool getS2BucketGeoKeys(const BSONObj& document, context, ordering, maxKeys, - [](KeyString::HeapBuilder& ks) { ks.appendNull(); }); + [](key_string::HeapBuilder& ks) { ks.appendNull(); }); } return generatedMultipleCells; } @@ -305,9 +334,9 @@ bool getS2BucketGeoKeys(const BSONObj& document, */ void getS2LiteralKeysArray(const BSONObj& obj, const CollatorInterface* collator, - const std::vector& keysToAdd, - std::vector* out, - KeyString::Version keyStringVersion, + const std::vector& keysToAdd, + std::vector* out, + key_string::Version keyStringVersion, SortedDataIndexAccessMethod::GetKeysContext context, Ordering ordering, size_t maxKeys) { @@ -320,7 +349,7 @@ void getS2LiteralKeysArray(const BSONObj& obj, context, ordering, maxKeys, - [](KeyString::HeapBuilder& ks) { ks.appendUndefined(); }); + [](key_string::HeapBuilder& ks) { ks.appendUndefined(); }); } else { // Non-empty arrays are exploded. while (objIt.more()) { @@ -331,7 +360,7 @@ void getS2LiteralKeysArray(const BSONObj& obj, context, ordering, maxKeys, - [&](KeyString::HeapBuilder& ks) { + [&](key_string::HeapBuilder& ks) { if (collator) { ks.appendBSONElement(elem, [&](StringData stringData) { return collator->getComparisonString(stringData); @@ -352,9 +381,9 @@ void getS2LiteralKeysArray(const BSONObj& obj, */ bool getS2OneLiteralKey(const BSONElement& elt, const CollatorInterface* collator, - const std::vector& keysToAdd, - std::vector* out, - KeyString::Version keyStringVersion, + const std::vector& keysToAdd, + std::vector* out, + key_string::Version keyStringVersion, SortedDataIndexAccessMethod::GetKeysContext context, Ordering ordering, size_t maxKeys) { @@ -370,7 +399,7 @@ bool getS2OneLiteralKey(const BSONElement& elt, context, ordering, maxKeys, - [&](KeyString::HeapBuilder& ks) { + [&](key_string::HeapBuilder& ks) { if (collator) { ks.appendBSONElement(elt, [&](StringData stringData) { return collator->getComparisonString(stringData); @@ -392,9 +421,9 @@ bool getS2OneLiteralKey(const BSONElement& elt, */ bool getS2LiteralKeys(const BSONElementSet& elements, const CollatorInterface* collator, - const std::vector& keysToAdd, - std::vector* out, - KeyString::Version keyStringVersion, + const std::vector& keysToAdd, + std::vector* out, + key_string::Version keyStringVersion, SortedDataIndexAccessMethod::GetKeysContext context, Ordering ordering, size_t maxKeys) { @@ -407,7 +436,7 @@ bool getS2LiteralKeys(const BSONElementSet& elements, context, ordering, maxKeys, - [](KeyString::HeapBuilder& ks) { ks.appendNull(); }); + [](key_string::HeapBuilder& ks) { ks.appendNull(); }); } else { for (BSONElementSet::iterator i = elements.begin(); i != elements.end(); ++i) { const bool thisElemIsArray = getS2OneLiteralKey( @@ -486,7 +515,7 @@ void ExpressionKeysPrivate::get2DKeys(SharedBufferFragmentBuilder& pooledBufferB const BSONObj& obj, const TwoDIndexingParams& params, KeyStringSet* keys, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, Ordering ordering, const boost::optional& id) { BSONElementMultiSet bSet; @@ -541,7 +570,7 @@ void ExpressionKeysPrivate::get2DKeys(SharedBufferFragmentBuilder& pooledBufferB continue; } - KeyString::PooledBuilder keyString(pooledBufferBuilder, keyStringVersion, ordering); + key_string::PooledBuilder keyString(pooledBufferBuilder, keyStringVersion, ordering); params.geoHashConverter->hash(locObj, &obj).appendHashMin(&keyString); // Go through all the other index keys @@ -578,7 +607,7 @@ void ExpressionKeysPrivate::getFTSKeys(SharedBufferFragmentBuilder& pooledBuffer const BSONObj& obj, const fts::FTSSpec& ftsSpec, KeyStringSet* keys, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, Ordering ordering, const boost::optional& id) { fts::FTSIndexFormat::getKeys( @@ -594,13 +623,13 @@ void ExpressionKeysPrivate::getHashKeys(SharedBufferFragmentBuilder& pooledBuffe bool isSparse, const CollatorInterface* collator, KeyStringSet* keys, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, Ordering ordering, bool ignoreArraysAlongPath, const boost::optional& id) { static const BSONObj nullObj = BSON("" << BSONNULL); auto hasFieldValue = false; - KeyString::PooledBuilder keyString(pooledBufferBuilder, keyStringVersion, ordering); + key_string::PooledBuilder keyString(pooledBufferBuilder, keyStringVersion, ordering); for (auto&& indexEntry : keyPattern) { auto indexPath = indexEntry.fieldNameStringData(); auto* cstr = indexPath.rawData(); @@ -668,11 +697,11 @@ void ExpressionKeysPrivate::getS2Keys(SharedBufferFragmentBuilder& pooledBufferB const S2IndexingParams& params, KeyStringSet* keys, MultikeyPaths* multikeyPaths, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, SortedDataIndexAccessMethod::GetKeysContext context, Ordering ordering, const boost::optional& id) { - std::vector keysToAdd; + std::vector keysToAdd; // Does one of our documents have a geo field? bool haveGeoField = false; @@ -705,7 +734,7 @@ void ExpressionKeysPrivate::getS2Keys(SharedBufferFragmentBuilder& pooledBufferB // requires // multiple cells for its covering. bool lastPathComponentCausesIndexToBeMultikey; - std::vector updatedKeysToAdd; + std::vector updatedKeysToAdd; if (IndexNames::GEO_2DSPHERE_BUCKET == keyElem.str()) { auto elementStorage = diff --git a/src/mongo/db/index/expression_keys_private.h b/src/mongo/db/index/expression_keys_private.h index c97d48913c910..1cd6bf9cf820b 100644 --- a/src/mongo/db/index/expression_keys_private.h +++ b/src/mongo/db/index/expression_keys_private.h @@ -29,19 +29,28 @@ #pragma once +#include +#include #include +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator_interface.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/hasher.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/multikey_paths.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/key_string.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { class CollectionPtr; class CollatorInterface; + struct TwoDIndexingParams; struct S2IndexingParams; @@ -74,7 +83,7 @@ class ExpressionKeysPrivate { const BSONObj& obj, const TwoDIndexingParams& params, KeyStringSet* keys, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, Ordering ordering, const boost::optional& id = boost::none); @@ -86,7 +95,7 @@ class ExpressionKeysPrivate { const BSONObj& obj, const fts::FTSSpec& ftsSpec, KeyStringSet* keys, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, Ordering ordering, const boost::optional& id = boost::none); @@ -105,7 +114,7 @@ class ExpressionKeysPrivate { bool isSparse, const CollatorInterface* collator, KeyStringSet* keys, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, Ordering ordering, bool ignoreArraysAlongPath, const boost::optional& id = boost::none); @@ -130,7 +139,7 @@ class ExpressionKeysPrivate { const S2IndexingParams& params, KeyStringSet* keys, MultikeyPaths* multikeyPaths, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, SortedDataIndexAccessMethod::GetKeysContext context, Ordering ordering, const boost::optional& id = boost::none); diff --git a/src/mongo/db/index/expression_params.cpp b/src/mongo/db/index/expression_params.cpp index 77996c85f4698..8e4291b05cdb4 100644 --- a/src/mongo/db/index/expression_params.cpp +++ b/src/mongo/db/index/expression_params.cpp @@ -29,14 +29,26 @@ #include "mongo/db/index/expression_params.h" +#include #include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/geo/geoconstants.h" +#include "mongo/db/geo/hash.h" #include "mongo/db/hasher.h" #include "mongo/db/index/2d_common.h" #include "mongo/db/index/s2_common.h" #include "mongo/db/index_names.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/index/expression_params.h b/src/mongo/db/index/expression_params.h index 157a933c0b333..e23d8d0fbedef 100644 --- a/src/mongo/db/index/expression_params.h +++ b/src/mongo/db/index/expression_params.h @@ -32,6 +32,7 @@ #include #include +#include "mongo/bson/bsonobj.h" #include "mongo/db/hasher.h" #include "mongo/db/jsobj.h" diff --git a/src/mongo/db/index/fts_access_method.cpp b/src/mongo/db/index/fts_access_method.cpp index 78f2d68b21eec..baf1aa230a462 100644 --- a/src/mongo/db/index/fts_access_method.cpp +++ b/src/mongo/db/index/fts_access_method.cpp @@ -28,6 +28,11 @@ */ #include "mongo/db/index/fts_access_method.h" + +#include + +#include + #include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/index/expression_keys_private.h" #include "mongo/db/index/index_descriptor.h" @@ -41,6 +46,7 @@ FTSAccessMethod::FTSAccessMethod(IndexCatalogEntry* btreeState, void FTSAccessMethod::doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, diff --git a/src/mongo/db/index/fts_access_method.h b/src/mongo/db/index/fts_access_method.h index 966b959af6a74..4e2021978fb26 100644 --- a/src/mongo/db/index/fts_access_method.h +++ b/src/mongo/db/index/fts_access_method.h @@ -29,11 +29,22 @@ #pragma once +#include +#include + #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/fts/fts_spec.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { @@ -54,6 +65,7 @@ class FTSAccessMethod : public SortedDataIndexAccessMethod { */ void doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, diff --git a/src/mongo/db/index/hash_access_method.cpp b/src/mongo/db/index/hash_access_method.cpp index 7e7e7862fd312..e203c4c9010e1 100644 --- a/src/mongo/db/index/hash_access_method.cpp +++ b/src/mongo/db/index/hash_access_method.cpp @@ -29,10 +29,15 @@ #include "mongo/db/index/hash_access_method.h" +#include + +#include + #include "mongo/db/catalog/index_catalog_entry.h" -#include "mongo/db/hasher.h" #include "mongo/db/index/expression_keys_private.h" #include "mongo/db/index/expression_params.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -57,6 +62,7 @@ void HashAccessMethod::validateDocument(const CollectionPtr& collection, void HashAccessMethod::doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, @@ -69,7 +75,7 @@ void HashAccessMethod::doGetKeys(OperationContext* opCtx, _keyPattern, _seed, _hashVersion, - _descriptor->isSparse(), + entry->descriptor()->isSparse(), _collator, keys, getSortedDataInterface()->getKeyStringVersion(), diff --git a/src/mongo/db/index/hash_access_method.h b/src/mongo/db/index/hash_access_method.h index 6dc7df9e48d06..a7c188ad11f01 100644 --- a/src/mongo/db/index/hash_access_method.h +++ b/src/mongo/db/index/hash_access_method.h @@ -29,13 +29,24 @@ #pragma once +#include +#include #include #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/hasher.h" // For HashSeed. #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { @@ -61,6 +72,7 @@ class HashAccessMethod : public SortedDataIndexAccessMethod { */ void doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, diff --git a/src/mongo/db/index/hash_key_generator_test.cpp b/src/mongo/db/index/hash_key_generator_test.cpp index 3bdada24c8ab3..adc7739515e3f 100644 --- a/src/mongo/db/index/hash_key_generator_test.cpp +++ b/src/mongo/db/index/hash_key_generator_test.cpp @@ -28,19 +28,34 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/index/expression_keys_private.h" - #include - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/json.h" +#include "mongo/bson/ordering.h" #include "mongo/db/hasher.h" -#include "mongo/db/json.h" +#include "mongo/db/index/expression_keys_private.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_string.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/shared_buffer_fragment.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -56,7 +71,7 @@ std::string dumpKeyset(const KeyStringSet& keyStrings) { std::stringstream ss; ss << "[ "; for (auto& keyString : keyStrings) { - auto key = KeyString::toBson(keyString, Ordering::make(BSONObj())); + auto key = key_string::toBson(keyString, Ordering::make(BSONObj())); ss << key.toString() << " "; } ss << "]"; @@ -84,15 +99,15 @@ bool assertKeysetsEqual(const KeyStringSet& expectedKeys, const KeyStringSet& ac return true; } -KeyString::Value makeHashKey(BSONElement elt) { - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - BSON("" << BSONElementHasher::hash64(elt, kHashSeed)), - Ordering::make(BSONObj())); +key_string::Value makeHashKey(BSONElement elt) { + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + BSON("" << BSONElementHasher::hash64(elt, kHashSeed)), + Ordering::make(BSONObj())); return keyString.release(); } struct HashKeyGeneratorTest : public unittest::Test { - SharedBufferFragmentBuilder allocator{KeyString::HeapBuilder::kHeapAllocatorDefaultBytes}; + SharedBufferFragmentBuilder allocator{key_string::HeapBuilder::kHeapAllocatorDefaultBytes}; }; TEST_F(HashKeyGeneratorTest, CollationAppliedBeforeHashing) { @@ -108,7 +123,7 @@ TEST_F(HashKeyGeneratorTest, CollationAppliedBeforeHashing) { false, &collator, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), false); @@ -133,7 +148,7 @@ TEST_F(HashKeyGeneratorTest, CollationDoesNotAffectNonStringFields) { false, &collator, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), false); @@ -158,7 +173,7 @@ TEST_F(HashKeyGeneratorTest, CollatorAppliedBeforeHashingNestedObject) { false, &collator, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), false); @@ -183,12 +198,13 @@ TEST_F(HashKeyGeneratorTest, CollationAppliedforAllIndexFields) { true, &collator, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), false); KeyStringSet expectedKeys; - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + Ordering::make(BSONObj())); keyString.appendBSONElement(backwardsObj["a"]["c"]); keyString.appendNumberLong(BSONElementHasher::hash64(backwardsObj["a"], kHashSeed)); expectedKeys.insert(keyString.release()); @@ -207,7 +223,7 @@ TEST_F(HashKeyGeneratorTest, NoCollation) { false, nullptr, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), false); @@ -229,13 +245,14 @@ TEST_F(HashKeyGeneratorTest, CompoundIndexEmptyObject) { false, nullptr, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), false); // Verify that we inserted null indexes for empty input object. KeyStringSet expectedKeys; - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + Ordering::make(BSONObj())); auto nullBSON = BSON("" << BSONNULL); auto nullElement = nullBSON.firstElement(); keyString.appendNumberLong(BSONElementHasher::hash64(nullElement, kHashSeed)); @@ -257,7 +274,7 @@ TEST_F(HashKeyGeneratorTest, SparseIndex) { true, // isSparse nullptr, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), false); // Verify that no index entries were added to the sparse index. @@ -276,13 +293,14 @@ TEST_F(HashKeyGeneratorTest, SparseIndexWithAFieldPresent) { true, // isSparse nullptr, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), false); // Verify that we inserted null entries for the misssing fields. KeyStringSet expectedKeys; - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + Ordering::make(BSONObj())); auto nullBSON = BSON("" << BSONNULL); auto nullElement = nullBSON.firstElement(); keyString.appendNumberLong(BSONElementHasher::hash64(obj["a"], kHashSeed)); @@ -304,7 +322,7 @@ TEST_F(HashKeyGeneratorTest, ArrayAlongIndexFieldPathFails) { false, nullptr, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), false), DBException, @@ -323,7 +341,7 @@ TEST_F(HashKeyGeneratorTest, ArrayAlongIndexFieldPathDoesNotFailWhenIgnoreFlagIs false, nullptr, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), true // ignoreArraysAlongPath ); @@ -345,7 +363,7 @@ TEST_F(HashKeyGeneratorTest, ArrayAtTerminalPathAlwaysFails) { true, // isSparse nullptr, &actualKeys, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), true, // ignoreArraysAlongPath boost::none), diff --git a/src/mongo/db/index/index_access_method.cpp b/src/mongo/db/index/index_access_method.cpp index 6adebaa0b64db..a4df78a8262f3 100644 --- a/src/mongo/db/index/index_access_method.cpp +++ b/src/mongo/db/index/index_access_method.cpp @@ -27,20 +27,43 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/index/index_access_method.h" - +#include +#include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +// IWYU pragma: no_include "boost/move/algo/detail/set_difference.hpp" +#include +#include +#include +#include +#include +#include +#include +#include #include #include +#include + #include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/catalog/index_consistency.h" -#include "mongo/db/client.h" +#include "mongo/db/catalog/validate_results.h" #include "mongo/db/commands/server_status.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" #include "mongo/db/index/2d_access_method.h" #include "mongo/db/index/btree_access_method.h" @@ -48,23 +71,39 @@ #include "mongo/db/index/columns_access_method.h" #include "mongo/db/index/fts_access_method.h" #include "mongo/db/index/hash_access_method.h" +#include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_build_interceptor.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/s2_access_method.h" #include "mongo/db/index/s2_bucket_access_method.h" #include "mongo/db/index/wildcard_access_method.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/keypattern.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/timestamp_block.h" +#include "mongo/db/service_context.h" +#include "mongo/db/sorter/sorter.h" +#include "mongo/db/sorter/sorter_gen.h" #include "mongo/db/storage/execution_context.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_format.h" #include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/platform/atomic_word.h" -#include "mongo/util/progress_meter.h" -#include "mongo/util/scopeguard.h" +#include "mongo/platform/random.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/stacktrace.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -188,7 +227,9 @@ bool isMultikeyFromPaths(const MultikeyPaths& multikeyPaths) { [](const MultikeyComponents& components) { return !components.empty(); }); } -SortOptions makeSortOptions(size_t maxMemoryUsageBytes, StringData dbName, SorterFileStats* stats) { +SortOptions makeSortOptions(size_t maxMemoryUsageBytes, + const DatabaseName& dbName, + SorterFileStats* stats) { return SortOptions() .TempDir(storageGlobalParams.dbpath + "/_tmp") .ExtSortAllowed() @@ -196,7 +237,7 @@ SortOptions makeSortOptions(size_t maxMemoryUsageBytes, StringData dbName, Sorte .UseMemoryPool(true) .FileStats(stats) .Tracker(&indexBulkBuilderSSS.sorterTracker) - .DBName(dbName.toString()); + .DBName(DatabaseNameUtil::serializeForCatalog(dbName)); } MultikeyPaths createMultikeyPaths(const std::vector& multikeyPathsVec) { @@ -213,22 +254,21 @@ MultikeyPaths createMultikeyPaths(const std::vector& multikeyPaths } // namespace struct BtreeExternalSortComparison { - int operator()(const KeyString::Value& l, const KeyString::Value& r) const { + int operator()(const key_string::Value& l, const key_string::Value& r) const { return l.compare(r); } }; SortedDataIndexAccessMethod::SortedDataIndexAccessMethod(const IndexCatalogEntry* btreeState, std::unique_ptr btree) - : _indexCatalogEntry(btreeState), - _descriptor(btreeState->descriptor()), - _newInterface(std::move(btree)) { - verify(IndexDescriptor::isIndexVersionSupported(_descriptor->version())); + : _newInterface(std::move(btree)) { + MONGO_verify(IndexDescriptor::isIndexVersionSupported(btreeState->descriptor()->version())); } Status SortedDataIndexAccessMethod::insert(OperationContext* opCtx, SharedBufferFragmentBuilder& pooledBuilder, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const std::vector& bsonRecords, const InsertDeleteOptions& options, int64_t* numInserted) { @@ -248,6 +288,7 @@ Status SortedDataIndexAccessMethod::insert(OperationContext* opCtx, getKeys(opCtx, coll, + entry, pooledBuilder, *bsonRecord.docPtr, options.getKeysMode, @@ -259,6 +300,7 @@ Status SortedDataIndexAccessMethod::insert(OperationContext* opCtx, Status status = _indexKeysOrWriteToSideTable(opCtx, coll, + entry, *keys, *multikeyMetadataKeys, *multikeyPaths, @@ -276,6 +318,7 @@ Status SortedDataIndexAccessMethod::insert(OperationContext* opCtx, void SortedDataIndexAccessMethod::remove(OperationContext* opCtx, SharedBufferFragmentBuilder& pooledBuilder, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const BSONObj& obj, const RecordId& loc, bool logIfError, @@ -290,6 +333,7 @@ void SortedDataIndexAccessMethod::remove(OperationContext* opCtx, auto keys = executionCtx.keys(); getKeys(opCtx, coll, + entry, pooledBuilder, obj, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered, @@ -300,7 +344,7 @@ void SortedDataIndexAccessMethod::remove(OperationContext* opCtx, loc); _unindexKeysOrWriteToSideTable( - opCtx, coll->ns(), *keys, obj, logIfError, numDeleted, options, checkRecordId); + opCtx, coll->ns(), entry, *keys, obj, logIfError, numDeleted, options, checkRecordId); } Status SortedDataIndexAccessMethod::update(OperationContext* opCtx, @@ -309,17 +353,19 @@ Status SortedDataIndexAccessMethod::update(OperationContext* opCtx, const BSONObj& newDoc, const RecordId& loc, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const InsertDeleteOptions& options, int64_t* numInserted, int64_t* numDeleted) { UpdateTicket updateTicket; - prepareUpdate(opCtx, coll, oldDoc, newDoc, loc, options, &updateTicket); + prepareUpdate(opCtx, coll, entry, oldDoc, newDoc, loc, options, &updateTicket); auto status = Status::OK(); - if (_indexCatalogEntry->isHybridBuilding() || !_indexCatalogEntry->isReady(opCtx)) { + if (entry->isHybridBuilding() || !entry->isReady()) { bool logIfError = false; _unindexKeysOrWriteToSideTable(opCtx, coll->ns(), + entry, updateTicket.removed, oldDoc, logIfError, @@ -328,6 +374,7 @@ Status SortedDataIndexAccessMethod::update(OperationContext* opCtx, CheckRecordId::Off); return _indexKeysOrWriteToSideTable(opCtx, coll, + entry, updateTicket.added, updateTicket.newMultikeyMetadataKeys, updateTicket.newMultikeyPaths, @@ -335,13 +382,14 @@ Status SortedDataIndexAccessMethod::update(OperationContext* opCtx, options, numInserted); } else { - return doUpdate(opCtx, coll, updateTicket, numInserted, numDeleted); + return doUpdate(opCtx, coll, entry, updateTicket, numInserted, numDeleted); } } Status SortedDataIndexAccessMethod::insertKeysAndUpdateMultikeyPaths( OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const KeyStringSet& keys, const KeyStringSet& multikeyMetadataKeys, const MultikeyPaths& multikeyPaths, @@ -352,6 +400,7 @@ Status SortedDataIndexAccessMethod::insertKeysAndUpdateMultikeyPaths( // Insert the specified data keys into the index. auto status = insertKeys(opCtx, coll, + entry, keys, options, std::move(onDuplicateKey), @@ -362,7 +411,7 @@ Status SortedDataIndexAccessMethod::insertKeysAndUpdateMultikeyPaths( } // If these keys should cause the index to become multikey, pass them into the catalog. if (shouldMarkIndexAsMultikey(keys.size(), multikeyMetadataKeys, multikeyPaths)) { - _indexCatalogEntry->setMultikey(opCtx, coll, multikeyMetadataKeys, multikeyPaths); + entry->setMultikey(opCtx, coll, multikeyMetadataKeys, multikeyPaths); } // If we have some multikey metadata keys, they should have been added while marking the index // as multikey in the catalog. Add them to the count of keys inserted for completeness. @@ -374,6 +423,7 @@ Status SortedDataIndexAccessMethod::insertKeysAndUpdateMultikeyPaths( Status SortedDataIndexAccessMethod::insertKeys(OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const KeyStringSet& keys, const InsertDeleteOptions& options, KeyHandlerFn&& onDuplicateKey, @@ -383,11 +433,11 @@ Status SortedDataIndexAccessMethod::insertKeys(OperationContext* opCtx, if (numInserted) { *numInserted = 0; } - bool unique = _descriptor->unique(); - bool prepareUnique = _descriptor->prepareUnique(); + bool unique = entry->descriptor()->unique(); + bool prepareUnique = entry->descriptor()->prepareUnique(); bool dupsAllowed; - if (!_descriptor->isIdIndex() && !opCtx->isEnforcingConstraints() && - coll->isIndexReady(_descriptor->indexName())) { + if (!entry->descriptor()->isIdIndex() && !opCtx->isEnforcingConstraints() && + coll->isIndexReady(entry->descriptor()->indexName())) { // Oplog application should avoid checking for duplicates on unique indexes except when: // 1. Building an index. We have to use the duplicate key error to record possible // conflicts. @@ -399,7 +449,7 @@ Status SortedDataIndexAccessMethod::insertKeys(OperationContext* opCtx, } else if (prepareUnique) { // Before the index build commits, duplicate keys are allowed to exist with the // 'prepareUnique' option. After that, duplicates are not allowed. - dupsAllowed = !coll->isIndexReady(_descriptor->indexName()); + dupsAllowed = !coll->isIndexReady(entry->descriptor()->indexName()); } else { dupsAllowed = !unique; } @@ -430,13 +480,21 @@ Status SortedDataIndexAccessMethod::insertKeys(OperationContext* opCtx, } void SortedDataIndexAccessMethod::removeOneKey(OperationContext* opCtx, - const KeyString::Value& keyString, - bool dupsAllowed) { + const IndexCatalogEntry* entry, + const key_string::Value& keyString, + bool dupsAllowed) const { try { _newInterface->unindex(opCtx, keyString, dupsAllowed); } catch (AssertionException& e) { - NamespaceString ns = _indexCatalogEntry->getNSSFromCatalog(opCtx); + if (e.code() == ErrorCodes::DataCorruptionDetected) { + // DataCorruptionDetected errors are expected to have logged an error and added an entry + // to the health log with the stack trace at the location where the error was initially + // thrown. No need to do so again. + throw; + } + + NamespaceString ns = entry->getNSSFromCatalog(opCtx); LOGV2(20683, "Assertion failure: _unindex failed on: {namespace} for index: {indexName}. " "{error} KeyString:{keyString}", @@ -444,7 +502,7 @@ void SortedDataIndexAccessMethod::removeOneKey(OperationContext* opCtx, "error"_attr = redact(e), "keyString"_attr = keyString, logAttrs(ns), - "indexName"_attr = _descriptor->indexName()); + "indexName"_attr = entry->descriptor()->indexName()); printStackTrace(); } } @@ -455,12 +513,13 @@ std::unique_ptr SortedDataIndexAccessMethod::newCur } Status SortedDataIndexAccessMethod::removeKeys(OperationContext* opCtx, + const IndexCatalogEntry* entry, const KeyStringSet& keys, const InsertDeleteOptions& options, - int64_t* numDeleted) { + int64_t* numDeleted) const { for (const auto& key : keys) { - removeOneKey(opCtx, key, options.dupsAllowed); + removeOneKey(opCtx, entry, key, options.dupsAllowed); } *numDeleted = keys.size(); @@ -473,13 +532,14 @@ Status SortedDataIndexAccessMethod::initializeAsEmpty(OperationContext* opCtx) { RecordId SortedDataIndexAccessMethod::findSingle(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, const BSONObj& requestedKey) const { // Generate the key for this index. - KeyString::Value actualKey = [&]() { - if (_indexCatalogEntry->getCollator()) { + key_string::Value actualKey = [&]() { + if (entry->getCollator()) { // For performance, call get keys only if there is a non-simple collation. SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); auto& executionCtx = StorageExecutionContext::get(opCtx); auto keys = executionCtx.keys(); KeyStringSet* multikeyMetadataKeys = nullptr; @@ -487,6 +547,7 @@ RecordId SortedDataIndexAccessMethod::findSingle(OperationContext* opCtx, getKeys(opCtx, collection, + entry, pooledBuilder, requestedKey, InsertDeleteOptions::ConstraintEnforcementMode::kEnforceConstraints, @@ -498,7 +559,7 @@ RecordId SortedDataIndexAccessMethod::findSingle(OperationContext* opCtx, invariant(keys->size() == 1); return *keys->begin(); } else { - KeyString::HeapBuilder requestedKeyString( + key_string::HeapBuilder requestedKeyString( getSortedDataInterface()->getKeyStringVersion(), BSONObj::stripFieldNames(requestedKey), getSortedDataInterface()->getOrdering()); @@ -578,17 +639,18 @@ pair SortedDataIndexAccessMethod::setDifference( void SortedDataIndexAccessMethod::prepareUpdate(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, const BSONObj& from, const BSONObj& to, const RecordId& record, const InsertDeleteOptions& options, UpdateTicket* ticket) const { - SharedBufferFragmentBuilder pooledBuilder(KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); - const MatchExpression* indexFilter = _indexCatalogEntry->getFilterExpression(); + SharedBufferFragmentBuilder pooledBuilder(key_string::HeapBuilder::kHeapAllocatorDefaultBytes); + const MatchExpression* indexFilter = entry->getFilterExpression(); if (!indexFilter || indexFilter->matchesBSON(from)) { // Override key constraints when generating keys for removal. This only applies to keys // that do not apply to a partial filter expression. - const auto getKeysMode = _indexCatalogEntry->isHybridBuilding() + const auto getKeysMode = entry->isHybridBuilding() ? InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered : options.getKeysMode; @@ -597,6 +659,7 @@ void SortedDataIndexAccessMethod::prepareUpdate(OperationContext* opCtx, // metadata isn't updated when keys are deleted. getKeys(opCtx, collection, + entry, pooledBuilder, from, getKeysMode, @@ -610,6 +673,7 @@ void SortedDataIndexAccessMethod::prepareUpdate(OperationContext* opCtx, if (!indexFilter || indexFilter->matchesBSON(to)) { getKeys(opCtx, collection, + entry, pooledBuilder, to, options.getKeysMode, @@ -630,10 +694,11 @@ void SortedDataIndexAccessMethod::prepareUpdate(OperationContext* opCtx, Status SortedDataIndexAccessMethod::doUpdate(OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const UpdateTicket& ticket, int64_t* numInserted, int64_t* numDeleted) { - invariant(!_indexCatalogEntry->isHybridBuilding()); + invariant(!entry->isHybridBuilding()); invariant(ticket.newKeys.size() == ticket.oldKeys.size() + ticket.added.size() - ticket.removed.size()); invariant(numInserted); @@ -652,7 +717,7 @@ Status SortedDataIndexAccessMethod::doUpdate(OperationContext* opCtx, // Add all new data keys into the index. for (const auto& keyString : ticket.added) { - bool dupsAllowed = !_descriptor->prepareUnique() && ticket.dupsAllowed; + bool dupsAllowed = !entry->descriptor()->prepareUnique() && ticket.dupsAllowed; auto status = _newInterface->insert(opCtx, keyString, dupsAllowed); if (!status.isOK()) return status; @@ -661,8 +726,7 @@ Status SortedDataIndexAccessMethod::doUpdate(OperationContext* opCtx, // If these keys should cause the index to become multikey, pass them into the catalog. if (shouldMarkIndexAsMultikey( ticket.newKeys.size(), ticket.newMultikeyMetadataKeys, ticket.newMultikeyPaths)) { - _indexCatalogEntry->setMultikey( - opCtx, coll, ticket.newMultikeyMetadataKeys, ticket.newMultikeyPaths); + entry->setMultikey(opCtx, coll, ticket.newMultikeyMetadataKeys, ticket.newMultikeyPaths); } // If we have some multikey metadata keys, they should have been added while marking the index @@ -687,6 +751,7 @@ void SortedDataIndexAccessMethod::setIdent(std::shared_ptr newIdent) { Status SortedDataIndexAccessMethod::applyIndexBuildSideWrite(OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const BSONObj& operation, const InsertDeleteOptions& options, KeyHandlerFn&& onDuplicateKey, @@ -705,18 +770,19 @@ Status SortedDataIndexAccessMethod::applyIndexBuildSideWrite(OperationContext* o } }(); - // Deserialize the encoded KeyString::Value. + // Deserialize the encoded key_string::Value. int keyLen; const char* binKey = operation["key"].binData(keyLen); BufReader reader(binKey, keyLen); - const KeyString::Value keyString = - KeyString::Value::deserialize(reader, getSortedDataInterface()->getKeyStringVersion()); + const key_string::Value keyString = + key_string::Value::deserialize(reader, getSortedDataInterface()->getKeyStringVersion()); const KeyStringSet keySet{keyString}; if (opType == IndexBuildInterceptor::Op::kInsert) { int64_t numInserted; auto status = insertKeysAndUpdateMultikeyPaths(opCtx, coll, + entry, {keySet.begin(), keySet.end()}, {}, MultikeyPaths{}, @@ -733,7 +799,7 @@ Status SortedDataIndexAccessMethod::applyIndexBuildSideWrite(OperationContext* o } else { invariant(opType == IndexBuildInterceptor::Op::kDelete); int64_t numDeleted; - Status s = removeKeys(opCtx, {keySet.begin(), keySet.end()}, options, &numDeleted); + Status s = removeKeys(opCtx, entry, {keySet.begin(), keySet.end()}, options, &numDeleted); if (!s.isOK()) { return s; } @@ -762,54 +828,70 @@ SorterTracker* IndexAccessMethod::BulkBuilder::bulkBuilderTracker() { return &indexBulkBuilderSSS.sorterTracker; } -void IndexAccessMethod::BulkBuilder::yield(OperationContext* opCtx, - const Yieldable* yieldable, - const NamespaceString& ns) { +const IndexCatalogEntry* IndexAccessMethod::BulkBuilder::yield(OperationContext* opCtx, + const CollectionPtr& collection, + const NamespaceString& ns, + const IndexCatalogEntry* entry) { + const std::string indexIdent = entry->getIdent(); + // Releasing locks means a new snapshot should be acquired when restored. opCtx->recoveryUnit()->abandonSnapshot(); - yieldable->yield(); + collection.yield(); auto locker = opCtx->lockState(); Locker::LockSnapshot snapshot; - if (locker->saveLockStateAndUnlock(&snapshot)) { - - // Track the number of yields in CurOp. - CurOp::get(opCtx)->yielded(); - - auto failPointHang = [opCtx, &ns](FailPoint* fp) { - fp->executeIf( - [fp](auto&&) { - LOGV2(5180600, "Hanging index build during bulk load yield"); - fp->pauseWhileSet(); - }, - [opCtx, &ns](auto&& config) { - return config.getStringField("namespace") == ns.ns(); - }); - }; - failPointHang(&hangDuringIndexBuildBulkLoadYield); - failPointHang(&hangDuringIndexBuildBulkLoadYieldSecond); - - locker->restoreLockState(opCtx, snapshot); + locker->saveLockStateAndUnlock(&snapshot); + + // Track the number of yields in CurOp. + CurOp::get(opCtx)->yielded(); + + auto failPointHang = [opCtx, &ns](FailPoint* fp) { + fp->executeIf( + [fp](auto&&) { + LOGV2(5180600, "Hanging index build during bulk load yield"); + fp->pauseWhileSet(); + }, + [opCtx, &ns](auto&& config) { + return NamespaceStringUtil::parseFailPointData(config, "namespace") == ns; + }); + }; + failPointHang(&hangDuringIndexBuildBulkLoadYield); + failPointHang(&hangDuringIndexBuildBulkLoadYieldSecond); + + locker->restoreLockState(opCtx, snapshot); + collection.restore(); + + // After yielding, the latest instance of the collection is fetched and can be + // different from the collection instance prior to yielding. For this reason we need + // to refresh the index entry pointer. + if (!collection) { + return nullptr; } - yieldable->restore(); + + return collection->getIndexCatalog() + ->findIndexByIdent(opCtx, indexIdent, IndexCatalog::InclusionPolicy::kUnfinished) + ->getEntry(); } class SortedDataIndexAccessMethod::BulkBuilderImpl final : public BulkBuilderCommon { public: - using Sorter = mongo::Sorter; + using Sorter = mongo::Sorter; - BulkBuilderImpl(SortedDataIndexAccessMethod* iam, + BulkBuilderImpl(const IndexCatalogEntry* entry, + SortedDataIndexAccessMethod* iam, size_t maxMemoryUsageBytes, - StringData dbName); + const DatabaseName& dbName); - BulkBuilderImpl(SortedDataIndexAccessMethod* iam, + BulkBuilderImpl(const IndexCatalogEntry* entry, + SortedDataIndexAccessMethod* iam, size_t maxMemoryUsageBytes, const IndexStateInfo& stateInfo, - StringData dbName); + const DatabaseName& dbName); Status insert(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, const BSONObj& obj, const RecordId& loc, const InsertDeleteOptions& options, @@ -825,11 +907,13 @@ class SortedDataIndexAccessMethod::BulkBuilderImpl final std::unique_ptr finalizeSort(); std::unique_ptr setUpBulkInserter(OperationContext* opCtx, + const IndexCatalogEntry* entry, bool dupsAllowed); void debugEnsureSorted(const Sorter::Data& data); bool duplicateCheck(OperationContext* opCtx, + const IndexCatalogEntry* entry, const Sorter::Data& data, bool dupsAllowed, const RecordIdHandlerFn& onDuplicateRecord); @@ -845,7 +929,7 @@ class SortedDataIndexAccessMethod::BulkBuilderImpl final Sorter* _makeSorter( size_t maxMemoryUsageBytes, - StringData dbName, + const DatabaseName& dbName, boost::optional fileName = boost::none, const boost::optional>& ranges = boost::none) const; @@ -854,7 +938,7 @@ class SortedDataIndexAccessMethod::BulkBuilderImpl final SortedDataIndexAccessMethod* _iam; std::unique_ptr _sorter; - KeyString::Value _previousKey; + key_string::Value _previousKey; // Set to true if any document added to the BulkBuilder causes the index to become multikey. bool _isMultiKey = false; @@ -870,32 +954,35 @@ class SortedDataIndexAccessMethod::BulkBuilderImpl final }; std::unique_ptr SortedDataIndexAccessMethod::initiateBulk( + const IndexCatalogEntry* entry, size_t maxMemoryUsageBytes, const boost::optional& stateInfo, - StringData dbName) { + const DatabaseName& dbName) { return stateInfo - ? std::make_unique(this, maxMemoryUsageBytes, *stateInfo, dbName) - : std::make_unique(this, maxMemoryUsageBytes, dbName); + ? std::make_unique(entry, this, maxMemoryUsageBytes, *stateInfo, dbName) + : std::make_unique(entry, this, maxMemoryUsageBytes, dbName); } -SortedDataIndexAccessMethod::BulkBuilderImpl::BulkBuilderImpl(SortedDataIndexAccessMethod* iam, +SortedDataIndexAccessMethod::BulkBuilderImpl::BulkBuilderImpl(const IndexCatalogEntry* entry, + SortedDataIndexAccessMethod* iam, size_t maxMemoryUsageBytes, - StringData dbName) + const DatabaseName& dbName) : BulkBuilderCommon(0, "Index Build: inserting keys from external sorter into index", - iam->_descriptor->indexName()), + entry->descriptor()->indexName()), _iam(iam), _sorter(_makeSorter(maxMemoryUsageBytes, dbName)) { countNewBuildInStats(); } -SortedDataIndexAccessMethod::BulkBuilderImpl::BulkBuilderImpl(SortedDataIndexAccessMethod* iam, +SortedDataIndexAccessMethod::BulkBuilderImpl::BulkBuilderImpl(const IndexCatalogEntry* entry, + SortedDataIndexAccessMethod* iam, size_t maxMemoryUsageBytes, const IndexStateInfo& stateInfo, - StringData dbName) + const DatabaseName& dbName) : BulkBuilderCommon(stateInfo.getNumKeys().value_or(0), "Index Build: inserting keys from external sorter into index", - iam->_descriptor->indexName()), + entry->descriptor()->indexName()), _iam(iam), _sorter( _makeSorter(maxMemoryUsageBytes, dbName, stateInfo.getFileName(), stateInfo.getRanges())), @@ -907,6 +994,7 @@ SortedDataIndexAccessMethod::BulkBuilderImpl::BulkBuilderImpl(SortedDataIndexAcc Status SortedDataIndexAccessMethod::BulkBuilderImpl::insert( OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, const BSONObj& obj, const RecordId& loc, const InsertDeleteOptions& options, @@ -920,6 +1008,7 @@ Status SortedDataIndexAccessMethod::BulkBuilderImpl::insert( try { _iam->getKeys(opCtx, collection, + entry, _sorter->memPool(), obj, options.getKeysMode, @@ -991,7 +1080,7 @@ void SortedDataIndexAccessMethod::BulkBuilderImpl::_insertMultikeyMetadataKeysIn SortedDataIndexAccessMethod::BulkBuilderImpl::Sorter::Settings SortedDataIndexAccessMethod::BulkBuilderImpl::_makeSorterSettings() const { - return std::pair( {_iam->getSortedDataInterface()->getKeyStringVersion()}, {}); } @@ -999,7 +1088,7 @@ SortedDataIndexAccessMethod::BulkBuilderImpl::_makeSorterSettings() const { SortedDataIndexAccessMethod::BulkBuilderImpl::Sorter* SortedDataIndexAccessMethod::BulkBuilderImpl::_makeSorter( size_t maxMemoryUsageBytes, - StringData dbName, + const DatabaseName& dbName, boost::optional fileName, const boost::optional>& ranges) const { return fileName @@ -1014,7 +1103,7 @@ SortedDataIndexAccessMethod::BulkBuilderImpl::_makeSorter( _makeSorterSettings()); } -std::unique_ptr::Iterator> +std::unique_ptr::Iterator> SortedDataIndexAccessMethod::BulkBuilderImpl::finalizeSort() { _insertMultikeyMetadataKeysIntoSorter(); return std::unique_ptr(_sorter->done()); @@ -1022,8 +1111,9 @@ SortedDataIndexAccessMethod::BulkBuilderImpl::finalizeSort() { std::unique_ptr SortedDataIndexAccessMethod::BulkBuilderImpl::setUpBulkInserter(OperationContext* opCtx, + const IndexCatalogEntry* entry, bool dupsAllowed) { - _ns = _iam->_indexCatalogEntry->getNSSFromCatalog(opCtx); + _ns = entry->getNSSFromCatalog(opCtx); return _iam->getSortedDataInterface()->makeBulkBuilder(opCtx, dupsAllowed); } @@ -1040,11 +1130,12 @@ void SortedDataIndexAccessMethod::BulkBuilderImpl::debugEnsureSorted(const Sorte bool SortedDataIndexAccessMethod::BulkBuilderImpl::duplicateCheck( OperationContext* opCtx, + const IndexCatalogEntry* entry, const Sorter::Data& data, bool dupsAllowed, const RecordIdHandlerFn& onDuplicateRecord) { - auto descriptor = _iam->_descriptor; + auto descriptor = entry->descriptor(); bool isDup = false; if (descriptor->unique()) { @@ -1056,7 +1147,7 @@ bool SortedDataIndexAccessMethod::BulkBuilderImpl::duplicateCheck( // Before attempting to insert, perform a duplicate key check. if (isDup && !dupsAllowed) { - uassertStatusOK(_iam->_handleDuplicateKey(opCtx, data.first, onDuplicateRecord)); + uassertStatusOK(_iam->_handleDuplicateKey(opCtx, entry, data.first, onDuplicateRecord)); } return isDup; } @@ -1079,6 +1170,7 @@ Status SortedDataIndexAccessMethod::BulkBuilderImpl::keyCommitted( void SortedDataIndexAccessMethod::getKeys( OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, InsertDeleteOptions::ConstraintEnforcementMode mode, @@ -1097,11 +1189,12 @@ void SortedDataIndexAccessMethod::getKeys( id->toString())); try { - if (_indexCatalogEntry->shouldValidateDocument()) { - validateDocument(collection, obj, _descriptor->keyPattern()); + if (entry->shouldValidateDocument()) { + validateDocument(collection, obj, entry->descriptor()->keyPattern()); } doGetKeys(opCtx, collection, + entry, pooledBufferBuilder, obj, context, @@ -1126,7 +1219,7 @@ void SortedDataIndexAccessMethod::getKeys( // If the document applies to the filter (which means that it should have never been // indexed), do not suppress the error. - const MatchExpression* filter = _indexCatalogEntry->getFilterExpression(); + const MatchExpression* filter = entry->getFilterExpression(); if (mode == InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered && filter && filter->matchesBSON(obj)) { throw; @@ -1140,7 +1233,7 @@ void SortedDataIndexAccessMethod::getKeys( } if (onSuppressedErrorFn) { - onSuppressedErrorFn(opCtx, _indexCatalogEntry, ex.toStatus(), obj, id); + onSuppressedErrorFn(opCtx, entry, ex.toStatus(), obj, id); } else { LOGV2_DEBUG(20686, 1, @@ -1182,26 +1275,28 @@ std::string nextFileName() { Status SortedDataIndexAccessMethod::_handleDuplicateKey( OperationContext* opCtx, - const KeyString::Value& dataKey, + const IndexCatalogEntry* entry, + const key_string::Value& dataKey, const RecordIdHandlerFn& onDuplicateRecord) { RecordId recordId = (KeyFormat::Long == _newInterface->rsKeyFormat()) - ? KeyString::decodeRecordIdLongAtEnd(dataKey.getBuffer(), dataKey.getSize()) - : KeyString::decodeRecordIdStrAtEnd(dataKey.getBuffer(), dataKey.getSize()); + ? key_string::decodeRecordIdLongAtEnd(dataKey.getBuffer(), dataKey.getSize()) + : key_string::decodeRecordIdStrAtEnd(dataKey.getBuffer(), dataKey.getSize()); if (onDuplicateRecord) { return onDuplicateRecord(recordId); } - BSONObj dupKey = KeyString::toBson(dataKey, getSortedDataInterface()->getOrdering()); + BSONObj dupKey = key_string::toBson(dataKey, getSortedDataInterface()->getOrdering()); return buildDupKeyErrorStatus(dupKey.getOwned(), - _indexCatalogEntry->getNSSFromCatalog(opCtx), - _descriptor->indexName(), - _descriptor->keyPattern(), - _descriptor->collation()); + entry->getNSSFromCatalog(opCtx), + entry->descriptor()->indexName(), + entry->descriptor()->keyPattern(), + entry->descriptor()->collation()); } Status SortedDataIndexAccessMethod::_indexKeysOrWriteToSideTable( OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const KeyStringSet& keys, const KeyStringSet& multikeyMetadataKeys, const MultikeyPaths& multikeyPaths, @@ -1209,46 +1304,33 @@ Status SortedDataIndexAccessMethod::_indexKeysOrWriteToSideTable( const InsertDeleteOptions& options, int64_t* keysInsertedOut) { Status status = Status::OK(); - if (_indexCatalogEntry->isHybridBuilding()) { + if (entry->isHybridBuilding()) { // The side table interface accepts only records that meet the criteria for this partial // index. // See SERVER-28975 and SERVER-39705 for details. - if (auto filter = _indexCatalogEntry->getFilterExpression()) { + if (auto filter = entry->getFilterExpression()) { if (!filter->matchesBSON(obj)) { return Status::OK(); } } int64_t inserted = 0; - status = _indexCatalogEntry->indexBuildInterceptor()->sideWrite( - opCtx, - keys, - multikeyMetadataKeys, - multikeyPaths, - IndexBuildInterceptor::Op::kInsert, - &inserted); + status = entry->indexBuildInterceptor()->sideWrite(opCtx, + entry, + keys, + multikeyMetadataKeys, + multikeyPaths, + IndexBuildInterceptor::Op::kInsert, + &inserted); if (keysInsertedOut) { *keysInsertedOut += inserted; } } else { - // Ensure that our snapshot is compatible with the index's minimum visibile snapshot. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - const auto minVisibleTimestamp = _indexCatalogEntry->getMinimumVisibleSnapshot(); - const auto readTimestamp = - opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx).value_or( - opCtx->recoveryUnit()->getCatalogConflictingTimestamp()); - if (minVisibleTimestamp && !readTimestamp.isNull() && - readTimestamp < *minVisibleTimestamp) { - throwWriteConflictException( - "Unable to read from a snapshot due to pending catalog changes."); - } - } - int64_t numInserted = 0; status = insertKeysAndUpdateMultikeyPaths( opCtx, coll, + entry, keys, {multikeyMetadataKeys.begin(), multikeyMetadataKeys.end()}, multikeyPaths, @@ -1266,6 +1348,7 @@ Status SortedDataIndexAccessMethod::_indexKeysOrWriteToSideTable( void SortedDataIndexAccessMethod::_unindexKeysOrWriteToSideTable( OperationContext* opCtx, const NamespaceString& ns, + const IndexCatalogEntry* entry, const KeyStringSet& keys, const BSONObj& obj, bool logIfError, @@ -1273,11 +1356,11 @@ void SortedDataIndexAccessMethod::_unindexKeysOrWriteToSideTable( InsertDeleteOptions options, // copy! CheckRecordId checkRecordId) { - if (_indexCatalogEntry->isHybridBuilding()) { + if (entry->isHybridBuilding()) { // The side table interface accepts only records that meet the criteria for this partial // index. // See SERVER-28975 and SERVER-39705 for details. - if (auto filter = _indexCatalogEntry->getFilterExpression()) { + if (auto filter = entry->getFilterExpression()) { if (!filter->matchesBSON(obj)) { return; } @@ -1285,8 +1368,8 @@ void SortedDataIndexAccessMethod::_unindexKeysOrWriteToSideTable( int64_t removed = 0; fassert(31155, - _indexCatalogEntry->indexBuildInterceptor()->sideWrite( - opCtx, keys, {}, {}, IndexBuildInterceptor::Op::kDelete, &removed)); + entry->indexBuildInterceptor()->sideWrite( + opCtx, entry, keys, {}, {}, IndexBuildInterceptor::Op::kDelete, &removed)); if (keysDeletedOut) { *keysDeletedOut += removed; } @@ -1301,22 +1384,8 @@ void SortedDataIndexAccessMethod::_unindexKeysOrWriteToSideTable( // We need to disable blind-deletes if 'checkRecordId' is explicitly set 'On'. options.dupsAllowed = options.dupsAllowed || checkRecordId == CheckRecordId::On; - // Ensure that our snapshot is compatible with the index's minimum visibile snapshot. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe()) { - const auto minVisibleTimestamp = _indexCatalogEntry->getMinimumVisibleSnapshot(); - const auto readTimestamp = - opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx).value_or( - opCtx->recoveryUnit()->getCatalogConflictingTimestamp()); - if (minVisibleTimestamp && !readTimestamp.isNull() && - readTimestamp < *minVisibleTimestamp) { - throwWriteConflictException( - "Unable to read from a snapshot due to pending catalog changes."); - } - } - int64_t removed = 0; - Status status = removeKeys(opCtx, keys, options, &removed); + Status status = removeKeys(opCtx, entry, keys, options, &removed); if (!status.isOK()) { LOGV2(20362, @@ -1335,5 +1404,6 @@ void SortedDataIndexAccessMethod::_unindexKeysOrWriteToSideTable( #undef MONGO_LOGV2_DEFAULT_COMPONENT #include "mongo/db/sorter/sorter.cpp" + #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand -MONGO_CREATE_SORTER(mongo::KeyString::Value, mongo::NullValue, mongo::BtreeExternalSortComparison); +MONGO_CREATE_SORTER(mongo::key_string::Value, mongo::NullValue, mongo::BtreeExternalSortComparison); diff --git a/src/mongo/db/index/index_access_method.h b/src/mongo/db/index/index_access_method.h index 2803af40272c8..869990170bde8 100644 --- a/src/mongo/db/index/index_access_method.h +++ b/src/mongo/db/index/index_access_method.h @@ -30,19 +30,40 @@ #pragma once #include +#include +#include +#include +#include #include #include +#include +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/database_name.h" #include "mongo/db/field_ref.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/multikey_metadata_access_stats.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/record_id.h" +#include "mongo/db/resumable_index_builds_gen.h" #include "mongo/db/sorter/sorter.h" +#include "mongo/db/sorter/sorter_stats.h" +#include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/db/storage/ident.h" +#include "mongo/db/storage/key_string.h" #include "mongo/db/storage/sorted_data_interface.h" #include "mongo/db/yieldable.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { @@ -75,7 +96,7 @@ class IndexAccessMethod { Status status, const BSONObj& obj, const boost::optional& loc)>; - using KeyHandlerFn = std::function; + using KeyHandlerFn = std::function; using RecordIdHandlerFn = std::function; IndexAccessMethod() = default; @@ -107,6 +128,7 @@ class IndexAccessMethod { virtual Status insert(OperationContext* opCtx, SharedBufferFragmentBuilder& pooledBufferBuilder, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const std::vector& bsonRecords, const InsertDeleteOptions& options, int64_t* numInserted) = 0; @@ -114,6 +136,7 @@ class IndexAccessMethod { virtual void remove(OperationContext* opCtx, SharedBufferFragmentBuilder& pooledBufferBuilder, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const BSONObj& obj, const RecordId& loc, bool logIfError, @@ -127,6 +150,7 @@ class IndexAccessMethod { const BSONObj& newDoc, const RecordId& loc, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const InsertDeleteOptions& options, int64_t* numInserted, int64_t* numDeleted) = 0; @@ -193,6 +217,7 @@ class IndexAccessMethod { virtual Status applyIndexBuildSideWrite(OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const BSONObj& operation, const InsertDeleteOptions& options, KeyHandlerFn&& onDuplicateKey, @@ -212,6 +237,7 @@ class IndexAccessMethod { */ virtual Status insert(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, const BSONObj& obj, const RecordId& loc, const InsertDeleteOptions& options, @@ -231,6 +257,7 @@ class IndexAccessMethod { */ virtual Status commit(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, bool dupsAllowed, int32_t yieldIterations, const KeyHandlerFn& onDuplicateKeyInserted, @@ -255,9 +282,10 @@ class IndexAccessMethod { * Abandon the current snapshot and release then reacquire locks. Tests that target the * behavior of bulk index builds that yield can use failpoints to stall this yield. */ - static void yield(OperationContext* opCtx, - const Yieldable* yieldable, - const NamespaceString& ns); + [[nodiscard]] static const IndexCatalogEntry* yield(OperationContext* opCtx, + const CollectionPtr& collection, + const NamespaceString& ns, + const IndexCatalogEntry* entry); }; /** @@ -274,9 +302,10 @@ class IndexAccessMethod { * new index build. */ virtual std::unique_ptr initiateBulk( + const IndexCatalogEntry* entry, size_t maxMemoryUsageBytes, const boost::optional& stateInfo, - StringData dbName) = 0; + const DatabaseName& dbName) = 0; }; /** @@ -390,6 +419,7 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { */ void getKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, InsertDeleteOptions::ConstraintEnforcementMode mode, @@ -410,6 +440,7 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { Status insertKeys( OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const KeyStringSet& keys, const InsertDeleteOptions& options, KeyHandlerFn&& onDuplicateKey, @@ -424,6 +455,7 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { Status insertKeysAndUpdateMultikeyPaths( OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const KeyStringSet& keys, const KeyStringSet& multikeyMetadataKeys, const MultikeyPaths& multikeyPaths, @@ -437,9 +469,10 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { * 'numDeleted' will be set to the number of keys removed from the index for the provided keys. */ Status removeKeys(OperationContext* opCtx, + const IndexCatalogEntry* entry, const KeyStringSet& keys, const InsertDeleteOptions& options, - int64_t* numDeleted); + int64_t* numDeleted) const; /** * Gets the keys of the documents 'from' and 'to' and prepares them for the update. @@ -447,6 +480,7 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { */ void prepareUpdate(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, const BSONObj& from, const BSONObj& to, const RecordId& loc, @@ -466,12 +500,14 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { */ Status doUpdate(OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const UpdateTicket& ticket, int64_t* numInserted, int64_t* numDeleted); RecordId findSingle(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, const BSONObj& key) const; /** @@ -512,6 +548,7 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { Status insert(OperationContext* opCtx, SharedBufferFragmentBuilder& pooledBufferBuilder, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const std::vector& bsonRecords, const InsertDeleteOptions& options, int64_t* numInserted) final; @@ -519,6 +556,7 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { void remove(OperationContext* opCtx, SharedBufferFragmentBuilder& pooledBufferBuilder, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const BSONObj& obj, const RecordId& loc, bool logIfError, @@ -532,6 +570,7 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { const BSONObj& newDoc, const RecordId& loc, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const InsertDeleteOptions& options, int64_t* numInserted, int64_t* numDeleted) final; @@ -558,15 +597,17 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { Status applyIndexBuildSideWrite(OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const BSONObj& operation, const InsertDeleteOptions& options, KeyHandlerFn&& onDuplicateKey, int64_t* keysInserted, int64_t* keysDeleted) final; - std::unique_ptr initiateBulk(size_t maxMemoryUsageBytes, + std::unique_ptr initiateBulk(const IndexCatalogEntry* entry, + size_t maxMemoryUsageBytes, const boost::optional& stateInfo, - StringData dbName) final; + const DatabaseName& dbName) final; protected: /** @@ -593,6 +634,7 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { */ virtual void doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, @@ -601,9 +643,6 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { MultikeyPaths* multikeyPaths, const boost::optional& id) const = 0; - const IndexCatalogEntry* const _indexCatalogEntry; // owned by IndexCatalog - const IndexDescriptor* const _descriptor; - private: class BulkBuilderImpl; @@ -612,7 +651,10 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { * * Used by remove() only. */ - void removeOneKey(OperationContext* opCtx, const KeyString::Value& keyString, bool dupsAllowed); + void removeOneKey(OperationContext* opCtx, + const IndexCatalogEntry* entry, + const key_string::Value& keyString, + bool dupsAllowed) const; /** * While inserting keys into index (from external sorter), if a duplicate key is detected @@ -620,11 +662,13 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { * DuplicateKey error will be returned. */ Status _handleDuplicateKey(OperationContext* opCtx, - const KeyString::Value& dataKey, + const IndexCatalogEntry* entry, + const key_string::Value& dataKey, const RecordIdHandlerFn& onDuplicateRecord); Status _indexKeysOrWriteToSideTable(OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* entry, const KeyStringSet& keys, const KeyStringSet& multikeyMetadataKeys, const MultikeyPaths& multikeyPaths, @@ -634,6 +678,7 @@ class SortedDataIndexAccessMethod : public IndexAccessMethod { void _unindexKeysOrWriteToSideTable(OperationContext* opCtx, const NamespaceString& ns, + const IndexCatalogEntry* entry, const KeyStringSet& keys, const BSONObj& obj, bool logIfError, diff --git a/src/mongo/db/index/index_build_interceptor.cpp b/src/mongo/db/index/index_build_interceptor.cpp index 888224b8789ad..cd135505d8348 100644 --- a/src/mongo/db/index/index_build_interceptor.cpp +++ b/src/mongo/db/index/index_build_interceptor.cpp @@ -28,27 +28,58 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/index/index_build_interceptor.h" - +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/index/columns_access_method.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_build_interceptor.h" #include "mongo/db/index/index_build_interceptor_gen.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/multi_key_path_tracker.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/progress_meter.h" -#include "mongo/util/uuid.h" +#include "mongo/util/str.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -62,10 +93,9 @@ MONGO_FAIL_POINT_DEFINE(hangIndexBuildDuringDrainWritesPhaseSecond); IndexBuildInterceptor::IndexBuildInterceptor(OperationContext* opCtx, const IndexCatalogEntry* entry) - : _indexCatalogEntry(entry), - _sideWritesTable(opCtx->getServiceContext()->getStorageEngine()->makeTemporaryRecordStore( + : _sideWritesTable(opCtx->getServiceContext()->getStorageEngine()->makeTemporaryRecordStore( opCtx, KeyFormat::Long)), - _skippedRecordTracker(opCtx, entry, boost::none) { + _skippedRecordTracker(opCtx, boost::none) { if (entry->descriptor()->unique()) { _duplicateKeyTracker = std::make_unique(opCtx, entry); @@ -77,11 +107,10 @@ IndexBuildInterceptor::IndexBuildInterceptor(OperationContext* opCtx, StringData sideWritesIdent, boost::optional duplicateKeyTrackerIdent, boost::optional skippedRecordTrackerIdent) - : _indexCatalogEntry(entry), - _sideWritesTable( + : _sideWritesTable( opCtx->getServiceContext()->getStorageEngine()->makeTemporaryRecordStoreFromExistingIdent( opCtx, sideWritesIdent)), - _skippedRecordTracker(opCtx, entry, skippedRecordTrackerIdent), + _skippedRecordTracker(opCtx, skippedRecordTrackerIdent), _skipNumAppliedCheck(true) { auto dupKeyTrackerIdentExists = duplicateKeyTrackerIdent ? true : false; @@ -105,20 +134,23 @@ void IndexBuildInterceptor::keepTemporaryTables() { } Status IndexBuildInterceptor::recordDuplicateKey(OperationContext* opCtx, - const KeyString::Value& key) const { - invariant(_indexCatalogEntry->descriptor()->unique()); - return _duplicateKeyTracker->recordKey(opCtx, key); + const IndexCatalogEntry* indexCatalogEntry, + const key_string::Value& key) const { + invariant(indexCatalogEntry->descriptor()->unique()); + return _duplicateKeyTracker->recordKey(opCtx, indexCatalogEntry, key); } -Status IndexBuildInterceptor::checkDuplicateKeyConstraints(OperationContext* opCtx) const { +Status IndexBuildInterceptor::checkDuplicateKeyConstraints( + OperationContext* opCtx, const IndexCatalogEntry* indexCatalogEntry) const { if (!_duplicateKeyTracker) { return Status::OK(); } - return _duplicateKeyTracker->checkConstraints(opCtx); + return _duplicateKeyTracker->checkConstraints(opCtx, indexCatalogEntry); } Status IndexBuildInterceptor::drainWritesIntoIndex(OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* indexCatalogEntry, const InsertDeleteOptions& options, TrackDuplicates trackDuplicates, DrainYieldPolicy drainYieldPolicy) { @@ -201,15 +233,17 @@ Status IndexBuildInterceptor::drainWritesIntoIndex(OperationContext* opCtx, } const long long iteration = _numApplied + batchSize; - _checkDrainPhaseFailPoint(opCtx, &hangIndexBuildDuringDrainWritesPhase, iteration); _checkDrainPhaseFailPoint( - opCtx, &hangIndexBuildDuringDrainWritesPhaseSecond, iteration); + opCtx, indexCatalogEntry, &hangIndexBuildDuringDrainWritesPhase, iteration); + _checkDrainPhaseFailPoint( + opCtx, indexCatalogEntry, &hangIndexBuildDuringDrainWritesPhaseSecond, iteration); batchSize += 1; batchSizeBytes += objSize; if (auto status = _applyWrite(opCtx, coll, + indexCatalogEntry, unownedDoc, options, trackDuplicates, @@ -253,7 +287,17 @@ Status IndexBuildInterceptor::drainWritesIntoIndex(OperationContext* opCtx, // Lock yielding will be directed by the yield policy provided. // We will typically yield locks during the draining phase if we are holding intent locks. if (DrainYieldPolicy::kYield == drainYieldPolicy) { - _yield(opCtx, &coll); + const std::string indexIdent = indexCatalogEntry->getIdent(); + _yield(opCtx, indexCatalogEntry, &coll); + + // After yielding, the latest instance of the collection is fetched and can be different + // from the collection instance prior to yielding. For this reason we need to refresh + // the index entry pointer. + indexCatalogEntry = coll->getIndexCatalog() + ->findIndexByIdent(opCtx, + indexIdent, + IndexCatalog::InclusionPolicy::kUnfinished) + ->getEntry(); } { @@ -270,8 +314,7 @@ Status IndexBuildInterceptor::drainWritesIntoIndex(OperationContext* opCtx, // Apply batches of side writes until the last record in the table is seen. while (!atEof) { - auto swAtEof = - writeConflictRetry(opCtx, "index build drain", coll->ns().ns(), applySingleBatch); + auto swAtEof = writeConflictRetry(opCtx, "index build drain", coll->ns(), applySingleBatch); if (!swAtEof.isOK()) { return swAtEof.getStatus(); } @@ -287,7 +330,7 @@ Status IndexBuildInterceptor::drainWritesIntoIndex(OperationContext* opCtx, LOGV2_DEBUG(20689, logLevel, "Index build: drained side writes", - "index"_attr = _indexCatalogEntry->descriptor()->indexName(), + "index"_attr = indexCatalogEntry->descriptor()->indexName(), "collectionUUID"_attr = coll->uuid(), logAttrs(coll->ns()), "numApplied"_attr = (_numApplied - appliedAtStart), @@ -300,6 +343,7 @@ Status IndexBuildInterceptor::drainWritesIntoIndex(OperationContext* opCtx, Status IndexBuildInterceptor::_applyWrite(OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* indexCatalogEntry, const BSONObj& operation, const InsertDeleteOptions& options, TrackDuplicates trackDups, @@ -308,37 +352,47 @@ Status IndexBuildInterceptor::_applyWrite(OperationContext* opCtx, // Sorted index types may choose to disallow duplicates (enforcing an unique index). Columnar // indexes are not sorted and therefore cannot enforce uniqueness constraints. Only sorted // indexes will use this lambda passed through the IndexAccessMethod interface. - IndexAccessMethod::KeyHandlerFn onDuplicateKeyFn = [=](const KeyString::Value& duplicateKey) { - return trackDups == TrackDuplicates::kTrack ? recordDuplicateKey(opCtx, duplicateKey) - : Status::OK(); - }; - - return _indexCatalogEntry->accessMethod()->applyIndexBuildSideWrite( - opCtx, coll, operation, options, std::move(onDuplicateKeyFn), keysInserted, keysDeleted); + IndexAccessMethod::KeyHandlerFn onDuplicateKeyFn = + [=, this](const key_string::Value& duplicateKey) { + return trackDups == TrackDuplicates::kTrack + ? recordDuplicateKey(opCtx, indexCatalogEntry, duplicateKey) + : Status::OK(); + }; + + return indexCatalogEntry->accessMethod()->applyIndexBuildSideWrite(opCtx, + coll, + indexCatalogEntry, + operation, + options, + std::move(onDuplicateKeyFn), + keysInserted, + keysDeleted); } -void IndexBuildInterceptor::_yield(OperationContext* opCtx, const Yieldable* yieldable) { +void IndexBuildInterceptor::_yield(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry, + const Yieldable* yieldable) { // Releasing locks means a new snapshot should be acquired when restored. opCtx->recoveryUnit()->abandonSnapshot(); yieldable->yield(); auto locker = opCtx->lockState(); Locker::LockSnapshot snapshot; - invariant(locker->saveLockStateAndUnlock(&snapshot)); + locker->saveLockStateAndUnlock(&snapshot); // Track the number of yields in CurOp. CurOp::get(opCtx)->yielded(); - auto failPointHang = [opCtx, indexCatalogEntry = _indexCatalogEntry](FailPoint* fp) { + auto failPointHang = [opCtx, indexCatalogEntry](FailPoint* fp) { fp->executeIf( [fp](auto&&) { LOGV2(20690, "Hanging index build during drain yield"); fp->pauseWhileSet(); }, [opCtx, indexCatalogEntry](auto&& config) { - return config.getStringField("namespace") == - indexCatalogEntry->getNSSFromCatalog(opCtx).ns(); + return NamespaceStringUtil::parseFailPointData(config, "namespace") == + indexCatalogEntry->getNSSFromCatalog(opCtx); }); }; failPointHang(&hangDuringIndexBuildDrainYield); @@ -397,6 +451,7 @@ boost::optional IndexBuildInterceptor::getMultikeyPaths() const { } Status IndexBuildInterceptor::_finishSideWrite(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry, const std::vector& toInsert) { _sideWritesCounter->fetchAndAdd(toInsert.size()); // This insert may roll back, but not necessarily from inserting into this table. If other write @@ -418,7 +473,7 @@ Status IndexBuildInterceptor::_finishSideWrite(OperationContext* opCtx, 2, "Recording side write keys on index", "numRecords"_attr = records.size(), - "index"_attr = _indexCatalogEntry->descriptor()->indexName()); + "index"_attr = indexCatalogEntry->descriptor()->indexName()); // By passing a vector of null timestamps, these inserts are not timestamped individually, but // rather with the timestamp of the owning operation. @@ -427,6 +482,7 @@ Status IndexBuildInterceptor::_finishSideWrite(OperationContext* opCtx, } Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry, const KeyStringSet& keys, const KeyStringSet& multikeyMetadataKeys, const MultikeyPaths& multikeyPaths, @@ -441,7 +497,7 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx, // Maintain parity with IndexAccessMethod's handling of whether keys could change the multikey // state on the index. - bool isMultikey = _indexCatalogEntry->accessMethod()->asSortedData()->shouldMarkIndexAsMultikey( + bool isMultikey = indexCatalogEntry->accessMethod()->asSortedData()->shouldMarkIndexAsMultikey( keys.size(), multikeyMetadataKeys, multikeyPaths); // No need to take the multikeyPaths mutex if this would not change any multikey state. @@ -473,8 +529,8 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx, // replication rollbacks, side table writes associated with a CUD operation should // remain/rollback along with the corresponding oplog entry. - // Serialize the KeyString::Value into a binary format for storage. Since the - // KeyString::Value also contains TypeBits information, it is not sufficient to just read + // Serialize the key_string::Value into a binary format for storage. Since the + // key_string::Value also contains TypeBits information, it is not sufficient to just read // from getBuffer(). builder.reset(); keyString.serialize(builder); @@ -496,10 +552,11 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx, } } - return _finishSideWrite(opCtx, std::move(toInsert)); + return _finishSideWrite(opCtx, indexCatalogEntry, std::move(toInsert)); } Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry, const std::vector& keys, int64_t* const numKeysWrittenOut, int64_t* const numKeysDeletedOut) { @@ -537,29 +594,30 @@ Status IndexBuildInterceptor::sideWrite(OperationContext* opCtx, *numKeysWrittenOut = numKeysWritten; *numKeysDeletedOut = numKeysDeleted; - return _finishSideWrite(opCtx, std::move(toInsert)); + return _finishSideWrite(opCtx, indexCatalogEntry, std::move(toInsert)); } Status IndexBuildInterceptor::retrySkippedRecords(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* indexCatalogEntry, RetrySkippedRecordMode mode) { - return _skippedRecordTracker.retrySkippedRecords(opCtx, collection, mode); + return _skippedRecordTracker.retrySkippedRecords(opCtx, collection, indexCatalogEntry, mode); } void IndexBuildInterceptor::_checkDrainPhaseFailPoint(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry, FailPoint* fp, long long iteration) const { fp->executeIf( - [=](const BSONObj& data) { + [=, this, indexName = indexCatalogEntry->descriptor()->indexName()](const BSONObj& data) { LOGV2(4841800, "Hanging index build during drain writes phase", "iteration"_attr = iteration, - "index"_attr = _indexCatalogEntry->descriptor()->indexName()); + "index"_attr = indexName); fp->pauseWhileSet(opCtx); }, - [iteration, - &indexName = _indexCatalogEntry->descriptor()->indexName()](const BSONObj& data) { + [iteration, indexName = indexCatalogEntry->descriptor()->indexName()](const BSONObj& data) { auto indexNames = data.getObjectField("indexNames"); return iteration == data["iteration"].numberLong() && std::any_of(indexNames.begin(), indexNames.end(), [&indexName](const auto& elem) { diff --git a/src/mongo/db/index/index_build_interceptor.h b/src/mongo/db/index/index_build_interceptor.h index e502bacd4f3e8..8f03d8268ea69 100644 --- a/src/mongo/db/index/index_build_interceptor.h +++ b/src/mongo/db/index/index_build_interceptor.h @@ -29,8 +29,22 @@ #pragma once +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include +#include +#include #include - +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/index/column_key_generator.h" #include "mongo/db/index/columns_access_method.h" #include "mongo/db/index/duplicate_key_tracker.h" @@ -38,9 +52,16 @@ #include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/skipped_record_tracker.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/temporary_record_store.h" #include "mongo/db/yieldable.h" #include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/fail_point.h" namespace mongo { @@ -98,6 +119,7 @@ class IndexBuildInterceptor { * On success, `numKeysOut` if non-null will contain the number of keys added or removed. */ Status sideWrite(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry, const KeyStringSet& keys, const KeyStringSet& multikeyMetadataKeys, const MultikeyPaths& multikeyPaths, @@ -114,6 +136,7 @@ class IndexBuildInterceptor { * that will be removed. */ Status sideWrite(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry, const std::vector& keys, int64_t* numKeysWrittenOut, int64_t* numKeysDeletedOut); @@ -122,14 +145,17 @@ class IndexBuildInterceptor { * Given a duplicate key, record the key for later verification by a call to * checkDuplicateKeyConstraints(); */ - Status recordDuplicateKey(OperationContext* opCtx, const KeyString::Value& key) const; + Status recordDuplicateKey(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry, + const key_string::Value& key) const; /** * Returns Status::OK if all previously recorded duplicate key constraint violations have been * resolved for the index. Returns a DuplicateKey error if there are still duplicate key * constraint violations on the index. */ - Status checkDuplicateKeyConstraints(OperationContext* opCtx) const; + Status checkDuplicateKeyConstraints(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry) const; /** @@ -142,6 +168,7 @@ class IndexBuildInterceptor { */ Status drainWritesIntoIndex(OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* indexCatalogEntry, const InsertDeleteOptions& options, TrackDuplicates trackDups, DrainYieldPolicy drainYieldPolicy); @@ -164,6 +191,7 @@ class IndexBuildInterceptor { Status retrySkippedRecords( OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* indexCatalogEntry, RetrySkippedRecordMode mode = RetrySkippedRecordMode::kKeyGenerationAndInsertion); /** @@ -198,6 +226,7 @@ class IndexBuildInterceptor { Status _applyWrite(OperationContext* opCtx, const CollectionPtr& coll, + const IndexCatalogEntry* indexCatalogEntry, const BSONObj& doc, const InsertDeleteOptions& options, TrackDuplicates trackDups, @@ -209,16 +238,18 @@ class IndexBuildInterceptor { /** * Yield lock manager locks and abandon the current storage engine snapshot. */ - void _yield(OperationContext* opCtx, const Yieldable* yieldable); + void _yield(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry, + const Yieldable* yieldable); void _checkDrainPhaseFailPoint(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry, FailPoint* fp, long long iteration) const; - Status _finishSideWrite(OperationContext* opCtx, const std::vector& toInsert); - - // The entry for the index that is being built. - const IndexCatalogEntry* _indexCatalogEntry; + Status _finishSideWrite(OperationContext* opCtx, + const IndexCatalogEntry* indexCatalogEntry, + const std::vector& toInsert); // This temporary record store records intercepted keys that will be written into the index by // calling drainWritesIntoIndex(). It is owned by the interceptor and dropped along with it. diff --git a/src/mongo/db/index/index_build_interceptor_test.cpp b/src/mongo/db/index/index_build_interceptor_test.cpp index b9129e16bdeaf..a67bd3a36df15 100644 --- a/src/mongo/db/index/index_build_interceptor_test.cpp +++ b/src/mongo/db/index/index_build_interceptor_test.cpp @@ -27,10 +27,38 @@ * it in the license file. */ +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" +#include "mongo/bson/util/builder.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/index/index_build_interceptor.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -78,6 +106,11 @@ class IndexBuilderInterceptorTest : public CatalogTestFixture { return contents; } + const IndexDescriptor* getIndexDescriptor(const std::string& indexName) { + return _coll->getCollection()->getIndexCatalog()->findIndexByName(operationContext(), + indexName); + } + void setUp() override { CatalogTestFixture::setUp(); ASSERT_OK(storageInterface()->createCollection(operationContext(), _nss, {})); @@ -96,15 +129,21 @@ class IndexBuilderInterceptorTest : public CatalogTestFixture { TEST_F(IndexBuilderInterceptorTest, SingleInsertIsSavedToSideWritesTable) { auto interceptor = createIndexBuildInterceptor(fromjson("{v: 2, name: 'a_1', key: {a: 1}}")); + const IndexDescriptor* desc = getIndexDescriptor("a_1"); - KeyString::HeapBuilder ksBuilder(KeyString::Version::kLatestVersion); + key_string::HeapBuilder ksBuilder(key_string::Version::kLatestVersion); ksBuilder.appendNumberLong(10); - KeyString::Value keyString(ksBuilder.release()); + key_string::Value keyString(ksBuilder.release()); WriteUnitOfWork wuow(operationContext()); int64_t numKeys = 0; - ASSERT_OK(interceptor->sideWrite( - operationContext(), {keyString}, {}, {}, IndexBuildInterceptor::Op::kInsert, &numKeys)); + ASSERT_OK(interceptor->sideWrite(operationContext(), + desc->getEntry(), + {keyString}, + {}, + {}, + IndexBuildInterceptor::Op::kInsert, + &numKeys)); ASSERT_EQ(1, numKeys); wuow.commit(); @@ -125,6 +164,7 @@ TEST_F(IndexBuilderInterceptorTest, SingleColumnInsertIsSavedToSideWritesTable) RAIIServerParameterControllerForTest controller("featureFlagColumnstoreIndexes", true); auto interceptor = createIndexBuildInterceptor( fromjson("{v: 2, name: 'columnstore', key: {'$**': 'columnstore'}}")); + const IndexDescriptor* desc = getIndexDescriptor("columnstore"); std::vector columnChanges; columnChanges.emplace_back( @@ -134,7 +174,7 @@ TEST_F(IndexBuilderInterceptorTest, SingleColumnInsertIsSavedToSideWritesTable) int64_t numKeysInserted = 0; int64_t numKeysDeleted = 0; ASSERT_OK(interceptor->sideWrite( - operationContext(), columnChanges, &numKeysInserted, &numKeysDeleted)); + operationContext(), desc->getEntry(), columnChanges, &numKeysInserted, &numKeysDeleted)); ASSERT_EQ(1, numKeysInserted); ASSERT_EQ(0, numKeysDeleted); wuow.commit(); @@ -159,6 +199,7 @@ TEST_F(IndexBuilderInterceptorTest, SingleColumnDeleteIsSavedToSideWritesTable) RAIIServerParameterControllerForTest controller("featureFlagColumnstoreIndexes", true); auto interceptor = createIndexBuildInterceptor( fromjson("{v: 2, name: 'columnstore', key: {'$**': 'columnstore'}}")); + const IndexDescriptor* desc = getIndexDescriptor("columnstore"); std::vector columnChanges; columnChanges.emplace_back( @@ -168,7 +209,7 @@ TEST_F(IndexBuilderInterceptorTest, SingleColumnDeleteIsSavedToSideWritesTable) int64_t numKeysInserted = 0; int64_t numKeysDeleted = 0; ASSERT_OK(interceptor->sideWrite( - operationContext(), columnChanges, &numKeysInserted, &numKeysDeleted)); + operationContext(), desc->getEntry(), columnChanges, &numKeysInserted, &numKeysDeleted)); ASSERT_EQ(0, numKeysInserted); ASSERT_EQ(1, numKeysDeleted); wuow.commit(); @@ -193,6 +234,7 @@ TEST_F(IndexBuilderInterceptorTest, SingleColumnUpdateIsSavedToSideWritesTable) RAIIServerParameterControllerForTest controller("featureFlagColumnstoreIndexes", true); auto interceptor = createIndexBuildInterceptor( fromjson("{v: 2, name: 'columnstore', key: {'$**': 'columnstore'}}")); + const IndexDescriptor* desc = getIndexDescriptor("columnstore"); // create path + cell + rid std::vector columnChanges; @@ -203,7 +245,7 @@ TEST_F(IndexBuilderInterceptorTest, SingleColumnUpdateIsSavedToSideWritesTable) int64_t numKeysInserted = 0; int64_t numKeysDeleted = 0; ASSERT_OK(interceptor->sideWrite( - operationContext(), columnChanges, &numKeysInserted, &numKeysDeleted)); + operationContext(), desc->getEntry(), columnChanges, &numKeysInserted, &numKeysDeleted)); ASSERT_EQ(1, numKeysInserted); ASSERT_EQ(0, numKeysDeleted); wuow.commit(); @@ -228,6 +270,7 @@ TEST_F(IndexBuilderInterceptorTest, MultipleColumnInsertsAreSavedToSideWritesTab RAIIServerParameterControllerForTest controller("featureFlagColumnstoreIndexes", true); auto interceptor = createIndexBuildInterceptor( fromjson("{v: 2, name: 'columnstore', key: {'$**': 'columnstore'}}")); + const IndexDescriptor* desc = getIndexDescriptor("columnstore"); std::vector columnChanges; columnChanges.emplace_back("changedPath1", @@ -252,7 +295,7 @@ TEST_F(IndexBuilderInterceptorTest, MultipleColumnInsertsAreSavedToSideWritesTab int64_t numKeysDeleted = 0; ASSERT_OK(interceptor->sideWrite( - operationContext(), columnChanges, &numKeysInserted, &numKeysDeleted)); + operationContext(), desc->getEntry(), columnChanges, &numKeysInserted, &numKeysDeleted)); ASSERT_EQ(4, numKeysInserted); ASSERT_EQ(0, numKeysDeleted); wuow.commit(); @@ -303,6 +346,7 @@ TEST_F(IndexBuilderInterceptorTest, MultipleColumnSideWritesAreSavedToSideWrites RAIIServerParameterControllerForTest controller("featureFlagColumnstoreIndexes", true); auto interceptor = createIndexBuildInterceptor( fromjson("{v: 2, name: 'columnstore', key: {'$**': 'columnstore'}}")); + const IndexDescriptor* desc = getIndexDescriptor("columnstore"); WriteUnitOfWork wuow(operationContext()); int64_t numKeysInserted = 0; @@ -314,7 +358,7 @@ TEST_F(IndexBuilderInterceptorTest, MultipleColumnSideWritesAreSavedToSideWrites RecordId(1), column_keygen::ColumnKeyGenerator::DiffAction::kInsert); ASSERT_OK(interceptor->sideWrite( - operationContext(), columnChanges, &numKeysInserted, &numKeysDeleted)); + operationContext(), desc->getEntry(), columnChanges, &numKeysInserted, &numKeysDeleted)); ASSERT_EQ(1, numKeysInserted); ASSERT_EQ(0, numKeysDeleted); @@ -322,7 +366,7 @@ TEST_F(IndexBuilderInterceptorTest, MultipleColumnSideWritesAreSavedToSideWrites columnChanges2.emplace_back( "changedPath1", "", RecordId(1), column_keygen::ColumnKeyGenerator::DiffAction::kDelete); ASSERT_OK(interceptor->sideWrite( - operationContext(), columnChanges2, &numKeysInserted, &numKeysDeleted)); + operationContext(), desc->getEntry(), columnChanges2, &numKeysInserted, &numKeysDeleted)); ASSERT_EQ(0, numKeysInserted); ASSERT_EQ(1, numKeysDeleted); @@ -334,7 +378,7 @@ TEST_F(IndexBuilderInterceptorTest, MultipleColumnSideWritesAreSavedToSideWrites columnChanges3.emplace_back( "changedPath3", "", RecordId(2), column_keygen::ColumnKeyGenerator::DiffAction::kDelete); ASSERT_OK(interceptor->sideWrite( - operationContext(), columnChanges3, &numKeysInserted, &numKeysDeleted)); + operationContext(), desc->getEntry(), columnChanges3, &numKeysInserted, &numKeysDeleted)); ASSERT_EQ(1, numKeysInserted); ASSERT_EQ(1, numKeysDeleted); @@ -344,7 +388,7 @@ TEST_F(IndexBuilderInterceptorTest, MultipleColumnSideWritesAreSavedToSideWrites RecordId(2), column_keygen::ColumnKeyGenerator::DiffAction::kInsert); ASSERT_OK(interceptor->sideWrite( - operationContext(), columnChanges4, &numKeysInserted, &numKeysDeleted)); + operationContext(), desc->getEntry(), columnChanges4, &numKeysInserted, &numKeysDeleted)); ASSERT_EQ(1, numKeysInserted); ASSERT_EQ(0, numKeysDeleted); wuow.commit(); diff --git a/src/mongo/db/index/index_descriptor.cpp b/src/mongo/db/index/index_descriptor.cpp index 1426207c61083..9e4df7fd3ddfe 100644 --- a/src/mongo/db/index/index_descriptor.cpp +++ b/src/mongo/db/index/index_descriptor.cpp @@ -27,29 +27,69 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/index/index_descriptor.h" - +#include #include - +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonelement_comparator.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" #include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/index_path_projection.h" +#include "mongo/db/exec/projection_executor.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/index/column_key_generator.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/wildcard_access_method.h" #include "mongo/db/index/wildcard_key_generator.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/server_options.h" #include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex namespace mongo { +namespace { + +/** + * Returns wildcardProjection or columnstoreProjection projection + */ +BSONObj createPathProjection(const BSONObj& infoObj) { + if (const auto wildcardProjection = infoObj[IndexDescriptor::kWildcardProjectionFieldName]) { + return wildcardProjection.Obj().getOwned(); + } else if (const auto columnStoreProjection = + infoObj[IndexDescriptor::kColumnStoreProjectionFieldName]) { + return columnStoreProjection.Obj().getOwned(); + } else { + return BSONObj(); + } +} + +} // namespace + using IndexVersion = IndexDescriptor::IndexVersion; namespace { @@ -124,6 +164,9 @@ constexpr StringData IndexDescriptor::kColumnStoreCompressorFieldName; * infoObj - options information */ IndexDescriptor::IndexDescriptor(const std::string& accessMethodName, BSONObj infoObj) + : _shared(make_intrusive(accessMethodName, infoObj)) {} + +IndexDescriptor::SharedState::SharedState(const std::string& accessMethodName, BSONObj infoObj) : _accessMethodName(accessMethodName), _indexType(IndexNames::nameToType(accessMethodName)), _infoObj(infoObj.getOwned()), @@ -222,8 +265,8 @@ IndexDescriptor::Comparison IndexDescriptor::compareIndexOptions( // the original and normalized projections will be empty BSON objects, so we can still do the // comparison based on the normalized projection. static const UnorderedFieldsBSONObjComparator kUnorderedBSONCmp; - if (kUnorderedBSONCmp.evaluate(_normalizedProjection != - existingIndexDesc->_normalizedProjection)) { + if (kUnorderedBSONCmp.evaluate(_shared->_normalizedProjection != + existingIndexDesc->_shared->_normalizedProjection)) { return Comparison::kDifferent; } @@ -309,14 +352,14 @@ std::vector IndexDescriptor::getFieldNames() const { }; // Iterate over the key pattern and add the field names to the 'fieldNames' vector. - BSONObjIterator keyPatternIter(_keyPattern); + BSONObjIterator keyPatternIter(_shared->_keyPattern); while (keyPatternIter.more()) { BSONElement KeyPatternElem = keyPatternIter.next(); auto fieldName = KeyPatternElem.fieldNameStringData(); // If the index type is text and the field name is either '_fts' or '_ftsx', then append the // index fields to the field names, otherwise add the field name from the key pattern. - if ((_indexType == IndexType::INDEX_TEXT) && + if ((_shared->_indexType == IndexType::INDEX_TEXT) && (fieldName == kFTSFieldName || fieldName == kFTSXFieldName)) { maybeAppendFtsIndexField(); } else { @@ -326,7 +369,7 @@ std::vector IndexDescriptor::getFieldNames() const { // If the index type is text and the 'hasSeenFtsOrFtsxFields' is set to false, then append the // index fields. - if (_indexType == IndexType::INDEX_TEXT) { + if (_shared->_indexType == IndexType::INDEX_TEXT) { maybeAppendFtsIndexField(); } diff --git a/src/mongo/db/index/index_descriptor.h b/src/mongo/db/index/index_descriptor.h index 26657bf810f6d..b9908a54b90ff 100644 --- a/src/mongo/db/index/index_descriptor.h +++ b/src/mongo/db/index/index_descriptor.h @@ -29,16 +29,30 @@ #pragma once -#include "mongo/db/index/index_descriptor_fwd.h" - +#include +#include +#include +#include +#include +#include +#include #include #include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/ordering.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/index/index_descriptor_fwd.h" #include "mongo/db/index/multikey_paths.h" #include "mongo/db/index_names.h" #include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -119,7 +133,7 @@ class IndexDescriptor { * Example: {foo: 1, bar: -1} */ const BSONObj& keyPattern() const { - return _keyPattern; + return _shared->_keyPattern; } /** @@ -138,7 +152,7 @@ class IndexDescriptor { * return (unnormalized) object: {"a.b":{"$numberDouble":"1"}} */ const BSONObj& pathProjection() const { - return _projection; + return _shared->_projection; } /** @@ -159,12 +173,12 @@ class IndexDescriptor { * return (normalized) object: {"a":{"b":true},"_id":false} */ const BSONObj& normalizedPathProjection() const { - return _normalizedProjection; + return _shared->_normalizedProjection; } // How many fields do we index / are in the key pattern? int getNumFields() const { - return _numFields; + return _shared->_numFields; } // @@ -173,75 +187,79 @@ class IndexDescriptor { // Return the name of the index. const std::string& indexName() const { - return _indexName; + return _shared->_indexName; } // Return the name of the access method we must use to access this index's data. const std::string& getAccessMethodName() const { - return _accessMethodName; + return _shared->_accessMethodName; } // Returns the type of the index associated with this descriptor. IndexType getIndexType() const { - return _indexType; + return _shared->_indexType; } /** * Return a pointer to the IndexCatalogEntry that owns this descriptor, or null if orphaned. */ - IndexCatalogEntry* getEntry() const { + const IndexCatalogEntry* getEntry() const { return _entry; } + void setEntry(IndexCatalogEntry* entry) { + _entry = entry; + } + // // Properties every index has // // Return what version of index this is. IndexVersion version() const { - return _version; + return _shared->_version; } // Return the 'Ordering' of the index keys. const Ordering& ordering() const { - return _ordering; + return _shared->_ordering; } // May each key only occur once? bool unique() const { - return _unique; + return _shared->_unique; } bool hidden() const { - return _hidden; + return _shared->_hidden; } // Is this index sparse? bool isSparse() const { - return _sparse; + return _shared->_sparse; } // Is this a partial index? bool isPartial() const { - return _partial; + return _shared->_partial; } bool isIdIndex() const { - return _isIdIndex; + return _shared->_isIdIndex; } // Return a (rather compact) std::string representation. std::string toString() const { - return _infoObj.toString(); + return _shared->_infoObj.toString(); } // Return the info object. const BSONObj& infoObj() const { - return _infoObj; + return _shared->_infoObj; } BSONObj toBSON() const { - return _infoObj; + return _shared->_infoObj; } /** @@ -254,19 +272,20 @@ class IndexDescriptor { const IndexCatalogEntry* existingIndex) const; const BSONObj& collation() const { - return _collation; + return _shared->_collation; } const BSONObj& partialFilterExpression() const { - return _partialFilterExpression; + return _shared->_partialFilterExpression; } bool prepareUnique() const { - return _prepareUnique; + return _shared->_prepareUnique; } boost::optional compressor() const { - return _compressor ? boost::make_optional(*_compressor) : boost::none; + return _shared->_compressor ? boost::make_optional(*_shared->_compressor) + : boost::none; } /** @@ -302,53 +321,48 @@ class IndexDescriptor { } private: - /** - * Returns wildcardProjection or columnstoreProjection projection + /* + * Holder of shared state between IndexDescriptor clones. */ - BSONObj createPathProjection(const BSONObj& infoObj) const { - if (const auto wildcardProjection = - infoObj[IndexDescriptor::kWildcardProjectionFieldName]) { - return wildcardProjection.Obj().getOwned(); - } else if (const auto columnStoreProjection = - infoObj[IndexDescriptor::kColumnStoreProjectionFieldName]) { - return columnStoreProjection.Obj().getOwned(); - } else { - return BSONObj(); - } - } + struct SharedState : public RefCountable { + SharedState(const std::string& accessMethodName, BSONObj infoObj); + + // What access method should we use for this index? + std::string _accessMethodName; + + IndexType _indexType; + + // The BSONObj describing the index. Accessed through the various members above. + BSONObj _infoObj; + + // --- cached data from _infoObj + + int64_t _numFields; // How many fields are indexed? + BSONObj _keyPattern; + BSONObj _projection; // for wildcardProjection / columnstoreProjection; never changes + BSONObj + _normalizedProjection; // for wildcardProjection / columnstoreProjection; never changes + std::string _indexName; + bool _isIdIndex; + bool _sparse; + bool _unique; + bool _hidden; + bool _partial; + IndexVersion _version; + // '_ordering' should be initialized after '_indexType' because different index types may + // require different handling of the Ordering. + Ordering _ordering; + BSONObj _collation; + BSONObj _partialFilterExpression; + bool _prepareUnique = false; + boost::optional _compressor; + }; - // What access method should we use for this index? - std::string _accessMethodName; - - IndexType _indexType; - - // The BSONObj describing the index. Accessed through the various members above. - BSONObj _infoObj; - - // --- cached data from _infoObj - - int64_t _numFields; // How many fields are indexed? - BSONObj _keyPattern; - BSONObj _projection; // for wildcardProjection / columnstoreProjection; never changes - BSONObj _normalizedProjection; // for wildcardProjection / columnstoreProjection; never changes - std::string _indexName; - bool _isIdIndex; - bool _sparse; - bool _unique; - bool _hidden; - bool _partial; - IndexVersion _version; - // '_ordering' should be initialized after '_indexType' because different index types may - // require different handling of the Ordering. - Ordering _ordering; - BSONObj _collation; - BSONObj _partialFilterExpression; - bool _prepareUnique = false; - boost::optional _compressor; + boost::intrusive_ptr _shared; // Many query stages require going from an IndexDescriptor to its IndexCatalogEntry, so for // now we need this. - IndexCatalogEntry* _entry = nullptr; + const IndexCatalogEntry* _entry = nullptr; friend class IndexCatalog; friend class IndexCatalogEntryImpl; diff --git a/src/mongo/db/index/key_gen_bm.cpp b/src/mongo/db/index/key_gen_bm.cpp index 84c7681149703..7c9c33d30dd3d 100644 --- a/src/mongo/db/index/key_gen_bm.cpp +++ b/src/mongo/db/index/key_gen_bm.cpp @@ -27,13 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include +#include #include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/ordering.h" #include "mongo/db/index/btree_key_generator.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { namespace { @@ -54,7 +61,7 @@ void BM_KeyGenBasic(benchmark::State& state, bool skipMultikey) { BtreeKeyGenerator generator({kFieldName}, {BSONElement{}}, false, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, makeOrdering(kFieldName)); SharedBufferFragmentBuilder allocator(kMemBlockSize, @@ -84,7 +91,7 @@ void BM_KeyGenArray(benchmark::State& state, int32_t elements) { BtreeKeyGenerator generator({kFieldName}, {BSONElement{}}, false, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, makeOrdering(kFieldName)); SharedBufferFragmentBuilder allocator(kMemBlockSize, @@ -112,7 +119,7 @@ void BM_KeyGenArrayZero(benchmark::State& state, int32_t elements) { BtreeKeyGenerator generator({kFieldName}, {BSONElement{}}, false, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, makeOrdering(kFieldName)); SharedBufferFragmentBuilder allocator(kMemBlockSize, @@ -145,7 +152,7 @@ void BM_KeyGenArrayOfArray(benchmark::State& state, int32_t elements) { BtreeKeyGenerator generator({kFieldName}, {BSONElement{}}, false, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, makeOrdering(kFieldName)); SharedBufferFragmentBuilder allocator(kMemBlockSize, diff --git a/src/mongo/db/index/s2_access_method.cpp b/src/mongo/db/index/s2_access_method.cpp index eaba79ce7ebdb..e199bab98d966 100644 --- a/src/mongo/db/index/s2_access_method.cpp +++ b/src/mongo/db/index/s2_access_method.cpp @@ -30,17 +30,28 @@ #include "mongo/db/index/s2_access_method.h" -#include - -#include "mongo/base/status.h" +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/catalog/index_catalog_entry.h" -#include "mongo/db/geo/geoconstants.h" -#include "mongo/db/geo/geoparser.h" #include "mongo/db/index/expression_keys_private.h" #include "mongo/db/index/expression_params.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_names.h" -#include "mongo/db/jsobj.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -137,6 +148,7 @@ void S2AccessMethod::validateDocument(const CollectionPtr& collection, void S2AccessMethod::doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, @@ -146,7 +158,7 @@ void S2AccessMethod::doGetKeys(OperationContext* opCtx, const boost::optional& id) const { ExpressionKeysPrivate::getS2Keys(pooledBufferBuilder, obj, - _descriptor->keyPattern(), + entry->descriptor()->keyPattern(), _params, keys, multikeyPaths, diff --git a/src/mongo/db/index/s2_access_method.h b/src/mongo/db/index/s2_access_method.h index 38999d95379bc..ba3c96dbd0998 100644 --- a/src/mongo/db/index/s2_access_method.h +++ b/src/mongo/db/index/s2_access_method.h @@ -29,12 +29,24 @@ #pragma once +#include +#include + #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/s2_common.h" #include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { @@ -70,6 +82,7 @@ class S2AccessMethod : public SortedDataIndexAccessMethod { */ void doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, diff --git a/src/mongo/db/index/s2_bucket_access_method.cpp b/src/mongo/db/index/s2_bucket_access_method.cpp index b9e926a753b06..4e3afde98ec84 100644 --- a/src/mongo/db/index/s2_bucket_access_method.cpp +++ b/src/mongo/db/index/s2_bucket_access_method.cpp @@ -30,17 +30,28 @@ #include "mongo/db/index/s2_bucket_access_method.h" -#include - -#include "mongo/base/status.h" +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/catalog/index_catalog_entry.h" -#include "mongo/db/geo/geoconstants.h" -#include "mongo/db/geo/geoparser.h" #include "mongo/db/index/expression_keys_private.h" #include "mongo/db/index/expression_params.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_names.h" -#include "mongo/db/jsobj.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -119,6 +130,7 @@ void S2BucketAccessMethod::validateDocument(const CollectionPtr& collection, void S2BucketAccessMethod::doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, @@ -128,7 +140,7 @@ void S2BucketAccessMethod::doGetKeys(OperationContext* opCtx, const boost::optional& id) const { ExpressionKeysPrivate::getS2Keys(pooledBufferBuilder, obj, - _descriptor->keyPattern(), + entry->descriptor()->keyPattern(), _params, keys, multikeyPaths, diff --git a/src/mongo/db/index/s2_bucket_access_method.h b/src/mongo/db/index/s2_bucket_access_method.h index c2d7186881d35..1d7ca5e2547d7 100644 --- a/src/mongo/db/index/s2_bucket_access_method.h +++ b/src/mongo/db/index/s2_bucket_access_method.h @@ -29,12 +29,24 @@ #pragma once +#include +#include + #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/s2_common.h" #include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { @@ -70,6 +82,7 @@ class S2BucketAccessMethod : public SortedDataIndexAccessMethod { */ void doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, diff --git a/src/mongo/db/index/s2_bucket_key_generator_test.cpp b/src/mongo/db/index/s2_bucket_key_generator_test.cpp index 7ef1c4b21d35e..799d559190df1 100644 --- a/src/mongo/db/index/s2_bucket_key_generator_test.cpp +++ b/src/mongo/db/index/s2_bucket_key_generator_test.cpp @@ -29,18 +29,38 @@ #include +#include #include #include - -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/simple_bsonobj_comparator.h" +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/bson/ordering.h" #include "mongo/db/index/expression_keys_private.h" #include "mongo/db/index/expression_params.h" +#include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/s2_common.h" -#include "mongo/db/json.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/storage/key_string.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/shared_buffer_fragment.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -54,7 +74,7 @@ std::string dumpKeyset(const KeyStringSet& keyStrings) { std::stringstream ss; ss << "[ "; for (auto& keyString : keyStrings) { - auto key = KeyString::toBson(keyString, Ordering::make(BSONObj())); + auto key = key_string::toBson(keyString, Ordering::make(BSONObj())); ss << key.toString() << " "; } ss << "]"; @@ -108,12 +128,12 @@ void assertMultikeyPathsEqual(const MultikeyPaths& expectedMultikeyPaths, struct S2BucketKeyGeneratorTest : public unittest::Test { using PointSet = std::set>; - SharedBufferFragmentBuilder allocator{KeyString::HeapBuilder::kHeapAllocatorDefaultBytes}; + SharedBufferFragmentBuilder allocator{key_string::HeapBuilder::kHeapAllocatorDefaultBytes}; void verifySetIsCoveredByKeys(const KeyStringSet& keys, const PointSet& points) const { std::vector cells; for (const auto& key : keys) { - auto obj = KeyString::toBson(key, Ordering::make(BSONObj())); + auto obj = key_string::toBson(key, Ordering::make(BSONObj())); cells.emplace_back(S2CellId(obj.firstElement().Long())); } @@ -152,7 +172,7 @@ TEST_F(S2BucketKeyGeneratorTest, GetS2BucketKeys) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); @@ -181,7 +201,7 @@ TEST_F(S2BucketKeyGeneratorTest, GetS2BucketKeysSubField) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); @@ -210,7 +230,7 @@ TEST_F(S2BucketKeyGeneratorTest, GetS2BucketKeysDeepSubField) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); @@ -244,7 +264,7 @@ TEST_F(S2BucketKeyGeneratorTest, GetS2BucketKeysSubFieldSomeMissing) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); diff --git a/src/mongo/db/index/s2_common.cpp b/src/mongo/db/index/s2_common.cpp index 3dc9dcf8f2c12..e6d89754f9468 100644 --- a/src/mongo/db/index/s2_common.cpp +++ b/src/mongo/db/index/s2_common.cpp @@ -29,11 +29,16 @@ #include "mongo/db/index/s2_common.h" -#include +#include #include #include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/geo/geometry_container.h" #include "mongo/db/query/collation/collator_interface.h" @@ -110,9 +115,9 @@ BSONObj S2CellIdToIndexKey(const S2CellId& cellId, S2IndexVersion indexVersion) void S2CellIdToIndexKeyStringAppend(const S2CellId& cellId, S2IndexVersion indexVersion, - const std::vector& keysToAdd, - std::vector* out, - KeyString::Version keyStringVersion, + const std::vector& keysToAdd, + std::vector* out, + key_string::Version keyStringVersion, Ordering ordering) { // The range of an unsigned long long is // |-----------------|------------------| diff --git a/src/mongo/db/index/s2_common.h b/src/mongo/db/index/s2_common.h index e9dd8b4a9c7f6..75bbb180cf51f 100644 --- a/src/mongo/db/index/s2_common.h +++ b/src/mongo/db/index/s2_common.h @@ -29,8 +29,12 @@ #pragma once +#include #include +#include +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/ordering.h" #include "mongo/db/jsobj.h" #include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/storage/key_string.h" @@ -86,9 +90,9 @@ struct S2IndexingParams { BSONObj S2CellIdToIndexKey(const S2CellId& cellId, S2IndexVersion indexVersion); void S2CellIdToIndexKeyStringAppend(const S2CellId& cellId, S2IndexVersion indexVersion, - const std::vector& keysToAdd, - std::vector* out, - KeyString::Version keyStringVersion, + const std::vector& keysToAdd, + std::vector* out, + key_string::Version keyStringVersion, Ordering ordering); } // namespace mongo diff --git a/src/mongo/db/index/s2_key_generator_test.cpp b/src/mongo/db/index/s2_key_generator_test.cpp index 23a321ddc6819..dc2779c2f9c2d 100644 --- a/src/mongo/db/index/s2_key_generator_test.cpp +++ b/src/mongo/db/index/s2_key_generator_test.cpp @@ -27,21 +27,36 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - -#include "mongo/db/index/expression_keys_private.h" - #include - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/json.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/index/expression_keys_private.h" #include "mongo/db/index/expression_params.h" +#include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/s2_common.h" -#include "mongo/db/json.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/storage/key_string.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/shared_buffer_fragment.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -55,7 +70,7 @@ std::string dumpKeyset(const KeyStringSet& keyStrings) { std::stringstream ss; ss << "[ "; for (auto& keyString : keyStrings) { - auto key = KeyString::toBson(keyString, Ordering::make(BSONObj())); + auto key = key_string::toBson(keyString, Ordering::make(BSONObj())); ss << key.toString() << " "; } ss << "]"; @@ -108,7 +123,7 @@ void assertMultikeyPathsEqual(const MultikeyPaths& expectedMultikeyPaths, } struct S2KeyGeneratorTest : public unittest::Test { - SharedBufferFragmentBuilder allocator{KeyString::HeapBuilder::kHeapAllocatorDefaultBytes}; + SharedBufferFragmentBuilder allocator{key_string::HeapBuilder::kHeapAllocatorDefaultBytes}; long long getCellID(int x, int y, bool multiPoint = false) { BSONObj obj; @@ -137,12 +152,12 @@ struct S2KeyGeneratorTest : public unittest::Test { params, &keys, multikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); ASSERT_EQUALS(1U, keys.size()); - auto key = KeyString::toBson(*keys.begin(), Ordering::make(BSONObj())); + auto key = key_string::toBson(*keys.begin(), Ordering::make(BSONObj())); return key.firstElement().Long(); } }; @@ -166,22 +181,22 @@ TEST_F(S2KeyGeneratorTest, GetS2KeysFromSubobjectWithArrayOfGeoAndNonGeoSubobjec params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion, - BSON("" << 1 << "" << getCellID(0, 0)), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2(KeyString::Version::kLatestVersion, - BSON("" << 1 << "" << getCellID(3, 3)), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3(KeyString::Version::kLatestVersion, - BSON("" << 2 << "" << getCellID(0, 0)), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString4(KeyString::Version::kLatestVersion, - BSON("" << 2 << "" << getCellID(3, 3)), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + BSON("" << 1 << "" << getCellID(0, 0)), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2(key_string::Version::kLatestVersion, + BSON("" << 1 << "" << getCellID(3, 3)), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + BSON("" << 2 << "" << getCellID(0, 0)), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString4(key_string::Version::kLatestVersion, + BSON("" << 2 << "" << getCellID(3, 3)), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{ keyString1.release(), keyString2.release(), keyString3.release(), keyString4.release()}; @@ -208,19 +223,19 @@ TEST_F(S2KeyGeneratorTest, GetS2KeysFromArrayOfNonGeoSubobjectsWithArrayValues) params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion, - BSON("" << 1 << "" << getCellID(0, 0)), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2(KeyString::Version::kLatestVersion, - BSON("" << 2 << "" << getCellID(0, 0)), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3(KeyString::Version::kLatestVersion, - BSON("" << 3 << "" << getCellID(0, 0)), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + BSON("" << 1 << "" << getCellID(0, 0)), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2(key_string::Version::kLatestVersion, + BSON("" << 2 << "" << getCellID(0, 0)), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + BSON("" << 3 << "" << getCellID(0, 0)), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); @@ -244,20 +259,20 @@ TEST_F(S2KeyGeneratorTest, GetS2KeysFromMultiPointInGeoField) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); const bool multiPoint = true; - KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion, - BSON("" << 1 << "" << getCellID(0, 0, multiPoint)), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2(KeyString::Version::kLatestVersion, - BSON("" << 1 << "" << getCellID(1, 0, multiPoint)), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3(KeyString::Version::kLatestVersion, - BSON("" << 1 << "" << getCellID(1, 1, multiPoint)), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + BSON("" << 1 << "" << getCellID(0, 0, multiPoint)), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2(key_string::Version::kLatestVersion, + BSON("" << 1 << "" << getCellID(1, 0, multiPoint)), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + BSON("" << 1 << "" << getCellID(1, 1, multiPoint)), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release(), keyString3.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); @@ -280,14 +295,14 @@ TEST_F(S2KeyGeneratorTest, CollationAppliedToNonGeoStringFieldAfterGeoField) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - BSON("" << getCellID(0, 0) << "" - << "gnirts"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + BSON("" << getCellID(0, 0) << "" + << "gnirts"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); @@ -311,15 +326,15 @@ TEST_F(S2KeyGeneratorTest, CollationAppliedToNonGeoStringFieldBeforeGeoField) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - BSON("" - << "gnirts" - << "" << getCellID(0, 0)), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + BSON("" + << "gnirts" + << "" << getCellID(0, 0)), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); @@ -343,16 +358,16 @@ TEST_F(S2KeyGeneratorTest, CollationAppliedToAllNonGeoStringFields) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - BSON("" - << "gnirts" - << "" << getCellID(0, 0) << "" - << "2gnirts"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + BSON("" + << "gnirts" + << "" << getCellID(0, 0) << "" + << "2gnirts"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); @@ -377,14 +392,14 @@ TEST_F(S2KeyGeneratorTest, CollationAppliedToNonGeoStringFieldWithMultiplePathCo params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - BSON("" << getCellID(0, 0) << "" - << "gnirts"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + BSON("" << getCellID(0, 0) << "" + << "gnirts"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); @@ -408,18 +423,18 @@ TEST_F(S2KeyGeneratorTest, CollationAppliedToStringsInArray) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion, - BSON("" << getCellID(0, 0) << "" - << "gnirts"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2(KeyString::Version::kLatestVersion, - BSON("" << getCellID(0, 0) << "" - << "2gnirts"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + BSON("" << getCellID(0, 0) << "" + << "gnirts"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2(key_string::Version::kLatestVersion, + BSON("" << getCellID(0, 0) << "" + << "2gnirts"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString1.release(), keyString2.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); @@ -443,34 +458,34 @@ TEST_F(S2KeyGeneratorTest, CollationAppliedToStringsInAllArrays) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion, - BSON("" << getCellID(0, 0) << "" - << "gnirts" - << "" - << "cba"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString2(KeyString::Version::kLatestVersion, - BSON("" << getCellID(0, 0) << "" - << "gnirts" - << "" - << "fed"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString3(KeyString::Version::kLatestVersion, - BSON("" << getCellID(0, 0) << "" - << "2gnirts" - << "" - << "cba"), - Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString4(KeyString::Version::kLatestVersion, - BSON("" << getCellID(0, 0) << "" - << "2gnirts" - << "" - << "fed"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + BSON("" << getCellID(0, 0) << "" + << "gnirts" + << "" + << "cba"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString2(key_string::Version::kLatestVersion, + BSON("" << getCellID(0, 0) << "" + << "gnirts" + << "" + << "fed"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString3(key_string::Version::kLatestVersion, + BSON("" << getCellID(0, 0) << "" + << "2gnirts" + << "" + << "cba"), + Ordering::make(BSONObj())); + key_string::HeapBuilder keyString4(key_string::Version::kLatestVersion, + BSON("" << getCellID(0, 0) << "" + << "2gnirts" + << "" + << "fed"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{ keyString1.release(), keyString2.release(), keyString3.release(), keyString4.release()}; @@ -494,13 +509,13 @@ TEST_F(S2KeyGeneratorTest, CollationDoesNotAffectNonStringFields) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - BSON("" << getCellID(0, 0) << "" << 5), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + BSON("" << getCellID(0, 0) << "" << 5), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); @@ -524,15 +539,15 @@ TEST_F(S2KeyGeneratorTest, CollationAppliedToStringsInNestedObjects) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - BSON("" << getCellID(0, 0) << "" - << BSON("c" - << "gnirts")), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + BSON("" << getCellID(0, 0) << "" + << BSON("c" + << "gnirts")), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); @@ -556,14 +571,14 @@ TEST_F(S2KeyGeneratorTest, NoCollation) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - BSON("" << getCellID(0, 0) << "" - << "string"), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + BSON("" << getCellID(0, 0) << "" + << "string"), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); @@ -587,13 +602,13 @@ TEST_F(S2KeyGeneratorTest, EmptyArrayForLeadingFieldIsConsideredMultikey) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - BSON("" << BSONUndefined << "" << getCellID(0, 0)), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + BSON("" << BSONUndefined << "" << getCellID(0, 0)), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); @@ -616,13 +631,13 @@ TEST_F(S2KeyGeneratorTest, EmptyArrayForTrailingFieldIsConsideredMultikey) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - BSON("" << getCellID(0, 0) << "" << BSONUndefined), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + BSON("" << getCellID(0, 0) << "" << BSONUndefined), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); @@ -645,13 +660,13 @@ TEST_F(S2KeyGeneratorTest, SingleElementTrailingArrayIsConsideredMultikey) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - BSON("" << 99 << "" << getCellID(0, 0)), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + BSON("" << 99 << "" << getCellID(0, 0)), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); @@ -674,17 +689,40 @@ TEST_F(S2KeyGeneratorTest, MidPathSingleElementArrayIsConsideredMultikey) { params, &actualKeys, &actualMultikeyPaths, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, SortedDataIndexAccessMethod::GetKeysContext::kAddingKeys, Ordering::make(BSONObj())); - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, - BSON("" << 99 << "" << getCellID(0, 0)), - Ordering::make(BSONObj())); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, + BSON("" << 99 << "" << getCellID(0, 0)), + Ordering::make(BSONObj())); KeyStringSet expectedKeys{keyString.release()}; ASSERT_TRUE(areKeysetsEqual(expectedKeys, actualKeys)); assertMultikeyPathsEqual(MultikeyPaths{{0U}, MultikeyComponents{}}, actualMultikeyPaths); } +// Test which verifies that the rounding functions used by s2 follow 'round to even' rounding +// behavior. +TEST_F(S2KeyGeneratorTest, VerifyS2RoundingBehavior) { + const double roundDownToEven = 2.5; + ASSERT_EQ(2, MathUtil::FastIntRound(roundDownToEven)); + ASSERT_EQ(2LL, MathUtil::FastInt64Round(roundDownToEven)); + + const double roundUpToEven = 3.5; + ASSERT_EQ(4, MathUtil::FastIntRound(roundUpToEven)); + ASSERT_EQ(4LL, MathUtil::FastInt64Round(roundUpToEven)); + + const double roundDownToEvenNegative = -3.5; + ASSERT_EQ(-4, MathUtil::FastIntRound(roundDownToEvenNegative)); + ASSERT_EQ(-4LL, MathUtil::FastInt64Round(roundDownToEvenNegative)); + + const double roundUpToEvenNegative = -2.5; + ASSERT_EQ(-2, MathUtil::FastIntRound(roundUpToEvenNegative)); + ASSERT_EQ(-2LL, MathUtil::FastInt64Round(roundUpToEvenNegative)); + + const double point = 944920918.5; + ASSERT_EQ(944920918, MathUtil::FastIntRound(point)); + ASSERT_EQ(944920918LL, MathUtil::FastInt64Round(point)); +} } // namespace diff --git a/src/mongo/db/index/skipped_record_tracker.cpp b/src/mongo/db/index/skipped_record_tracker.cpp index e2ab8991a5178..917dbff52a018 100644 --- a/src/mongo/db/index/skipped_record_tracker.cpp +++ b/src/mongo/db/index/skipped_record_tracker.cpp @@ -30,13 +30,43 @@ #include "mongo/db/index/skipped_record_tracker.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/multi_key_path_tracker.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/execution_context.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/progress_meter.h" +#include "mongo/util/shared_buffer_fragment.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -46,13 +76,8 @@ namespace { static constexpr StringData kRecordIdField = "recordId"_sd; } -SkippedRecordTracker::SkippedRecordTracker(const IndexCatalogEntry* indexCatalogEntry) - : SkippedRecordTracker(nullptr, indexCatalogEntry, boost::none) {} - SkippedRecordTracker::SkippedRecordTracker(OperationContext* opCtx, - const IndexCatalogEntry* indexCatalogEntry, - boost::optional ident) - : _indexCatalogEntry(indexCatalogEntry) { + boost::optional ident) { if (!ident) { return; } @@ -83,10 +108,7 @@ void SkippedRecordTracker::record(OperationContext* opCtx, const RecordId& recor } writeConflictRetry( - opCtx, - "recordSkippedRecordTracker", - NamespaceString::kIndexBuildEntryNamespace.ns(), - [&]() { + opCtx, "recordSkippedRecordTracker", NamespaceString::kIndexBuildEntryNamespace, [&]() { WriteUnitOfWork wuow(opCtx); uassertStatusOK( _skippedRecordsTable->rs() @@ -112,6 +134,7 @@ bool SkippedRecordTracker::areAllRecordsApplied(OperationContext* opCtx) const { Status SkippedRecordTracker::retrySkippedRecords(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* indexCatalogEntry, RetrySkippedRecordMode mode) { const bool keyGenerationOnly = mode == RetrySkippedRecordMode::kKeyGeneration; @@ -125,8 +148,8 @@ Status SkippedRecordTracker::retrySkippedRecords(OperationContext* opCtx, InsertDeleteOptions options; collection->getIndexCatalog()->prepareInsertDeleteOptions( opCtx, - _indexCatalogEntry->getNSSFromCatalog(opCtx), - _indexCatalogEntry->descriptor(), + indexCatalogEntry->getNSSFromCatalog(opCtx), + indexCatalogEntry->descriptor(), &options); // This should only be called when constraints are being enforced, on a primary. It does not @@ -152,7 +175,7 @@ Status SkippedRecordTracker::retrySkippedRecords(OperationContext* opCtx, progress.get(lk)->hit(); }; - SharedBufferFragmentBuilder pooledBuilder(KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + SharedBufferFragmentBuilder pooledBuilder(key_string::HeapBuilder::kHeapAllocatorDefaultBytes); auto& executionCtx = StorageExecutionContext::get(opCtx); auto recordStore = _skippedRecordsTable->rs(); @@ -181,7 +204,7 @@ Status SkippedRecordTracker::retrySkippedRecords(OperationContext* opCtx, auto keys = executionCtx.keys(); auto multikeyMetadataKeys = executionCtx.multikeyMetadataKeys(); auto multikeyPaths = executionCtx.multikeyPaths(); - auto iam = _indexCatalogEntry->accessMethod()->asSortedData(); + auto iam = indexCatalogEntry->accessMethod()->asSortedData(); try { // Because constraint enforcement is set, this will throw if there are any indexing @@ -189,6 +212,7 @@ Status SkippedRecordTracker::retrySkippedRecords(OperationContext* opCtx, // normally happen if constraints were relaxed. iam->getKeys(opCtx, collection, + indexCatalogEntry, pooledBuilder, skippedDoc, options.getKeysMode, @@ -203,13 +227,19 @@ Status SkippedRecordTracker::retrySkippedRecords(OperationContext* opCtx, onResolved(); continue; } - auto status = iam->insertKeys(opCtx, collection, *keys, options, nullptr, nullptr); + auto status = iam->insertKeys( + opCtx, collection, indexCatalogEntry, *keys, options, nullptr, nullptr); if (!status.isOK()) { return status; } - status = iam->insertKeys( - opCtx, collection, *multikeyMetadataKeys, options, nullptr, nullptr); + status = iam->insertKeys(opCtx, + collection, + indexCatalogEntry, + *multikeyMetadataKeys, + options, + nullptr, + nullptr); if (!status.isOK()) { return status; } @@ -247,13 +277,13 @@ Status SkippedRecordTracker::retrySkippedRecords(OperationContext* opCtx, LOGV2_DEBUG(7333101, logLevel, "Index build: verified key generation for skipped records", - "index"_attr = _indexCatalogEntry->descriptor()->indexName(), + "index"_attr = indexCatalogEntry->descriptor()->indexName(), "numResolved"_attr = resolved); } else { LOGV2_DEBUG(23883, logLevel, "Index build: reapplied skipped records", - "index"_attr = _indexCatalogEntry->descriptor()->indexName(), + "index"_attr = indexCatalogEntry->descriptor()->indexName(), "numResolved"_attr = resolved); } return Status::OK(); diff --git a/src/mongo/db/index/skipped_record_tracker.h b/src/mongo/db/index/skipped_record_tracker.h index 8ad04b04caa42..1f45ce4bc9f8b 100644 --- a/src/mongo/db/index/skipped_record_tracker.h +++ b/src/mongo/db/index/skipped_record_tracker.h @@ -29,8 +29,23 @@ #pragma once +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/temporary_record_store.h" #include "mongo/platform/atomic_word.h" @@ -54,10 +69,7 @@ class SkippedRecordTracker { kKeyGenerationAndInsertion }; - explicit SkippedRecordTracker(const IndexCatalogEntry* indexCatalogEntry); - SkippedRecordTracker(OperationContext* opCtx, - const IndexCatalogEntry* indexCatalogEntry, - boost::optional ident); + SkippedRecordTracker(OperationContext* opCtx, boost::optional ident); /** * Records a RecordId that was unable to be indexed due to a key generation error. At the @@ -86,6 +98,7 @@ class SkippedRecordTracker { Status retrySkippedRecords( OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* indexCatalogEntry, RetrySkippedRecordMode mode = RetrySkippedRecordMode::kKeyGenerationAndInsertion); boost::optional getTableIdent() const { @@ -98,8 +111,6 @@ class SkippedRecordTracker { } private: - const IndexCatalogEntry* _indexCatalogEntry; - // This temporary record store is owned by the duplicate key tracker. std::unique_ptr _skippedRecordsTable; diff --git a/src/mongo/db/index/sort_key_generator.cpp b/src/mongo/db/index/sort_key_generator.cpp index f9c411beeb77e..079c53192e3d4 100644 --- a/src/mongo/db/index/sort_key_generator.cpp +++ b/src/mongo/db/index/sort_key_generator.cpp @@ -27,13 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +// IWYU pragma: no_include "boost/container/detail/flat_tree.hpp" +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/sort_key_generator.h" - -#include "mongo/bson/bsonobj_comparator.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/query/collation/collation_index_key.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { namespace { @@ -75,18 +94,9 @@ SortKeyGenerator::SortKeyGenerator(SortPattern sortPattern, const CollatorInterf constexpr bool isSparse = false; _indexKeyGen = std::make_unique( - fieldNames, fixed, isSparse, KeyString::Version::kLatestVersion, _ordering); + fieldNames, fixed, isSparse, key_string::Version::kLatestVersion, _ordering); - { - // TODO SERVER-74725: Remove this. - std::set fieldNameSet; - for (auto& fn : fieldNames) { - fieldNameSet.insert(fn); - } - _sortHasRepeatKey = (fieldNameSet.size() != fieldNames.size()); - } - - if (!_sortHasMeta && !_sortHasRepeatKey) { + if (!_sortHasMeta) { size_t i = 0; for (auto&& keyPart : _sortPattern) { _sortKeyTreeRoot.addSortPatternPart(&keyPart, 0, i++); @@ -103,19 +113,25 @@ Value SortKeyGenerator::computeSortKey(const WorkingSetMember& wsm) const { return computeSortKeyFromIndexKey(wsm); } -KeyString::Value SortKeyGenerator::computeSortKeyString(const BSONObj& obj) { +key_string::Value SortKeyGenerator::computeSortKeyString(const BSONObj& obj) { const bool fastPathSucceeded = fastFillOutSortKeyParts(obj, &_localEltStorage); if (fastPathSucceeded) { - KeyString::HeapBuilder builder(KeyString::Version::kLatestVersion, _ordering); + key_string::HeapBuilder builder(key_string::Version::kLatestVersion, _ordering); for (auto elt : _localEltStorage) { - builder.appendBSONElement(elt); + if (_collator) { + builder.appendBSONElement(elt, [&](StringData stringData) { + return _collator->getComparisonString(stringData); + }); + } else { + builder.appendBSONElement(elt); + } } return builder.release(); } KeyStringSet keySet; - SharedBufferFragmentBuilder allocator(KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + SharedBufferFragmentBuilder allocator(key_string::HeapBuilder::kHeapAllocatorDefaultBytes); const bool skipMultikey = false; MultikeyPaths* multikeyPaths = nullptr; _indexKeyGen->getKeys(allocator, obj, skipMultikey, &keySet, multikeyPaths, _collator); @@ -202,7 +218,7 @@ StatusWith SortKeyGenerator::computeSortKeyFromDocumentWithoutMetadata( // corresponding collation keys. Therefore, we use the simple string comparator when comparing // the keys themselves. KeyStringSet keys; - SharedBufferFragmentBuilder allocator(KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + SharedBufferFragmentBuilder allocator(key_string::HeapBuilder::kHeapAllocatorDefaultBytes); try { // There's no need to compute the prefixes of the indexed fields that cause the index to be @@ -225,7 +241,7 @@ StatusWith SortKeyGenerator::computeSortKeyFromDocumentWithoutMetadata( invariant(!keys.empty()); // The sort key is the first index key, ordered according to the pattern '_sortSpecWithoutMeta'. - return KeyString::toBson(*keys.begin(), Ordering::make(_sortSpecWithoutMeta)); + return key_string::toBson(*keys.begin(), Ordering::make(_sortSpecWithoutMeta)); } Value SortKeyGenerator::getCollationComparisonKey(const Value& val) const { @@ -416,7 +432,6 @@ bool SortKeyGenerator::fastFillOutSortKeyPartsHelper(const BSONObj& bson, void SortKeyGenerator::generateSortKeyComponentVector(const BSONObj& bson, std::vector* eltsOut) { tassert(7103704, "Sort cannot have meta", !_sortHasMeta); - tassert(7103701, "Sort cannot have repeat keys", !_sortHasRepeatKey); tassert(7103702, "Cannot pass null as eltsOut", eltsOut); const bool fastPathSucceeded = fastFillOutSortKeyParts(bson, eltsOut); diff --git a/src/mongo/db/index/sort_key_generator.h b/src/mongo/db/index/sort_key_generator.h index c34763195c204..9b6f96877eecc 100644 --- a/src/mongo/db/index/sort_key_generator.h +++ b/src/mongo/db/index/sort_key_generator.h @@ -29,11 +29,26 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/field_name_bloom_filter.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/index/btree_key_generator.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/sort_pattern.h" #include "mongo/db/storage/key_string.h" @@ -63,7 +78,7 @@ class SortKeyGenerator { /** * Computes a KeyString that can be used as the sort key for this object. */ - KeyString::Value computeSortKeyString(const BSONObj& bson); + key_string::Value computeSortKeyString(const BSONObj& bson); /** * Determines all of the portions of the sort key for the given document and populates the @@ -231,8 +246,6 @@ class SortKeyGenerator { // If we're not sorting with a $meta value we can short-cut some work. bool _sortHasMeta = false; - bool _sortHasRepeatKey = false; - std::unique_ptr _indexKeyGen; // Used for fastFillOutSortKeyParts()/extractSortKeyParts(). diff --git a/src/mongo/db/index/sort_key_generator_test.cpp b/src/mongo/db/index/sort_key_generator_test.cpp index bf5899ead53a6..d35b7d59ae8d3 100644 --- a/src/mongo/db/index/sort_key_generator_test.cpp +++ b/src/mongo/db/index/sort_key_generator_test.cpp @@ -27,17 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include + +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/index/sort_key_generator.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/index/wildcard_access_method.cpp b/src/mongo/db/index/wildcard_access_method.cpp index 08d58cdf5b258..734ba975631c3 100644 --- a/src/mongo/db/index/wildcard_access_method.cpp +++ b/src/mongo/db/index/wildcard_access_method.cpp @@ -27,23 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/wildcard_access_method.h" #include "mongo/db/index_names.h" - -#include "mongo/db/catalog/index_catalog_entry.h" -#include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/storage/key_format.h" namespace mongo { WildcardAccessMethod::WildcardAccessMethod(IndexCatalogEntry* wildcardState, std::unique_ptr btree) : SortedDataIndexAccessMethod(wildcardState, std::move(btree)), - _keyGen(_descriptor->keyPattern(), - _descriptor->pathProjection(), - _indexCatalogEntry->getCollator(), + _keyGen(wildcardState->descriptor()->keyPattern(), + wildcardState->descriptor()->pathProjection(), + wildcardState->getCollator(), getSortedDataInterface()->getKeyStringVersion(), getSortedDataInterface()->getOrdering(), getSortedDataInterface()->rsKeyFormat()) {} @@ -56,6 +60,7 @@ bool WildcardAccessMethod::shouldMarkIndexAsMultikey(size_t numberOfKeys, void WildcardAccessMethod::doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, diff --git a/src/mongo/db/index/wildcard_access_method.h b/src/mongo/db/index/wildcard_access_method.h index 3b5e55e3cfab2..9a7963b1f70e7 100644 --- a/src/mongo/db/index/wildcard_access_method.h +++ b/src/mongo/db/index/wildcard_access_method.h @@ -29,10 +29,24 @@ #pragma once +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/exec/index_path_projection.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/wildcard_key_generator.h" #include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/index_bounds.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { @@ -66,13 +80,6 @@ class WildcardAccessMethod final : public SortedDataIndexAccessMethod { return _keyGen.getWildcardProjection(); } - /** - * Returns the Wildcard Index's key pattern. - */ - const BSONObj& getKeyPattern() const { - return _descriptor->keyPattern(); - } - /* * We should make a new Ordering for wildcard key generator because the index keys generated for * wildcard indexes include a "$_path" field prior to the wildcard field and the Ordering passed @@ -83,6 +90,7 @@ class WildcardAccessMethod final : public SortedDataIndexAccessMethod { private: void doGetKeys(OperationContext* opCtx, const CollectionPtr& collection, + const IndexCatalogEntry* entry, SharedBufferFragmentBuilder& pooledBufferBuilder, const BSONObj& obj, GetKeysContext context, diff --git a/src/mongo/db/index/wildcard_key_generator.cpp b/src/mongo/db/index/wildcard_key_generator.cpp index 7299ee4d253c4..2534314d01971 100644 --- a/src/mongo/db/index/wildcard_key_generator.cpp +++ b/src/mongo/db/index/wildcard_key_generator.cpp @@ -27,16 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/index/wildcard_key_generator.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/query/collation/collation_index_key.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/wildcard_key_generator.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/projection_parser.h" +#include "mongo/db/query/projection_policies.h" +#include "mongo/db/query/query_feature_flags_gen.h" #include "mongo/db/record_id_helpers.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -67,7 +88,7 @@ void popPathComponent(BSONElement elem, bool enclosingObjIsArray, FieldRef* path // keys generation. void appendToKeyString(const std::vector& elems, const CollatorInterface* collator, - KeyString::PooledBuilder* keyString) { + key_string::PooledBuilder* keyString) { for (const auto& elem : elems) { if (collator) { keyString->appendBSONElement(elem, [&](StringData stringData) { @@ -81,7 +102,7 @@ void appendToKeyString(const std::vector& elems, // Append 'MinKey' to 'keyString'. Multikey path keys use 'MinKey' for non-wildcard fields. void appendToMultiKeyString(const std::vector& elems, - KeyString::PooledBuilder* keyString) { + key_string::PooledBuilder* keyString) { for (size_t i = 0; i < elems.size(); i++) { keyString->appendBSONElement(kMinBSONKey.firstElement()); } @@ -95,7 +116,7 @@ void appendToMultiKeyString(const std::vector& elems, */ class SingleDocumentKeyEncoder { public: - SingleDocumentKeyEncoder(const KeyString::Version& keyStringVersion, + SingleDocumentKeyEncoder(const key_string::Version& keyStringVersion, const Ordering& ordering, const CollatorInterface* collator, const boost::optional& id, @@ -132,7 +153,7 @@ class SingleDocumentKeyEncoder { bool _addKeyForEmptyLeaf(BSONElement elem, const FieldRef& fullPath); - const KeyString::Version& _keyStringVersion; + const key_string::Version& _keyStringVersion; const Ordering& _ordering; const CollatorInterface* _collator; const boost::optional& _id; @@ -183,7 +204,7 @@ void SingleDocumentKeyEncoder::traverseWildcard(BSONObj obj, bool objIsArray, Fi void SingleDocumentKeyEncoder::_addKey(BSONElement elem, const FieldRef& fullPath) { // Wildcard keys are of the form { "": "path.to.field", "": }. - KeyString::PooledBuilder keyString(_pooledBufferBuilder, _keyStringVersion, _ordering); + key_string::PooledBuilder keyString(_pooledBufferBuilder, _keyStringVersion, _ordering); if (!_preElems.empty()) { appendToKeyString(_preElems, _collator, &keyString); @@ -215,7 +236,7 @@ void SingleDocumentKeyEncoder::_addMultiKey(const FieldRef& fullPath) { // 'multikeyPaths' may be nullptr if the access method is being used in an operation which does // not require multikey path generation. if (_multikeyPaths) { - KeyString::PooledBuilder keyString(_pooledBufferBuilder, _keyStringVersion, _ordering); + key_string::PooledBuilder keyString(_pooledBufferBuilder, _keyStringVersion, _ordering); if (!_preElems.empty()) { appendToMultiKeyString(_preElems, &keyString); @@ -307,7 +328,7 @@ WildcardProjection WildcardKeyGenerator::createProjectionExecutor(BSONObj keyPat WildcardKeyGenerator::WildcardKeyGenerator(BSONObj keyPattern, BSONObj pathProjection, const CollatorInterface* collator, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, Ordering ordering, boost::optional rsKeyFormat) : _proj(createProjectionExecutor(keyPattern, pathProjection)), @@ -403,7 +424,7 @@ void WildcardKeyGenerator::generateKeys(SharedBufferFragmentBuilder& pooledBuffe // a document {a: 1} should still be indexed by this compound wildcard index {a:1, "b.$**": 1}. // In this case, we generate an index key {'': 1, '': MinKey, '': MinKey} for this document. if (keysSequence.size() == sequenceSize && (!preElems.empty() || !postElems.empty())) { - KeyString::PooledBuilder keyString(pooledBufferBuilder, _keyStringVersion, _ordering); + key_string::PooledBuilder keyString(pooledBufferBuilder, _keyStringVersion, _ordering); if (preElemsExist.any() || postElemsExist.any()) { if (!preElems.empty()) { diff --git a/src/mongo/db/index/wildcard_key_generator.h b/src/mongo/db/index/wildcard_key_generator.h index afe948c52ad7e..9ab8cbfed71e3 100644 --- a/src/mongo/db/index/wildcard_key_generator.h +++ b/src/mongo/db/index/wildcard_key_generator.h @@ -29,13 +29,22 @@ #pragma once +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/ordering.h" #include "mongo/db/exec/index_path_projection.h" #include "mongo/db/field_ref.h" #include "mongo/db/index/btree_key_generator.h" #include "mongo/db/index_names.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_format.h" #include "mongo/db/storage/key_string.h" #include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/util/shared_buffer_fragment.h" namespace mongo { @@ -63,7 +72,7 @@ class WildcardKeyGenerator { WildcardKeyGenerator(BSONObj keyPattern, BSONObj pathProjection, const CollatorInterface* collator, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, Ordering ordering, boost::optional rsKeyFormat = boost::none); @@ -92,7 +101,7 @@ class WildcardKeyGenerator { WildcardProjection _proj; const CollatorInterface* _collator; const BSONObj _keyPattern; - const KeyString::Version _keyStringVersion; + const key_string::Version _keyStringVersion; const Ordering _ordering; const boost::optional _rsKeyFormat; boost::optional _preBtreeGenerator = boost::none; diff --git a/src/mongo/db/index/wildcard_key_generator_test.cpp b/src/mongo/db/index/wildcard_key_generator_test.cpp index ed6407eabe2f3..2554b47930138 100644 --- a/src/mongo/db/index/wildcard_key_generator_test.cpp +++ b/src/mongo/db/index/wildcard_key_generator_test.cpp @@ -28,15 +28,28 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/index/wildcard_key_generator.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/record_id_helpers.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -48,7 +61,7 @@ KeyStringSet makeKeySet(std::initializer_list init = {}, RecordId id = KeyStringSet keys; Ordering ordering = Ordering::make(BSONObj()); for (const auto& key : init) { - KeyString::HeapBuilder keyString(KeyString::Version::kLatestVersion, key, ordering); + key_string::HeapBuilder keyString(key_string::Version::kLatestVersion, key, ordering); if (!id.isNull()) { keyString.appendRecordId(id); } @@ -61,7 +74,7 @@ std::string dumpKeyset(const KeyStringSet& keyStrings) { std::stringstream ss; ss << "[ "; for (auto& keyString : keyStrings) { - auto key = KeyString::toBson(keyString, Ordering::make(BSONObj())); + auto key = key_string::toBson(keyString, Ordering::make(BSONObj())); ss << key.toString() << " "; } ss << "]"; @@ -90,7 +103,7 @@ bool assertKeysetsEqual(const KeyStringSet& expectedKeys, const KeyStringSet& ac } struct WildcardKeyGeneratorTest : public unittest::Test { - SharedBufferFragmentBuilder allocator{KeyString::HeapBuilder::kHeapAllocatorDefaultBytes}; + SharedBufferFragmentBuilder allocator{key_string::HeapBuilder::kHeapAllocatorDefaultBytes}; KeyFormat rsKeyFormat = KeyFormat::Long; }; @@ -101,7 +114,7 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractTopLevelKey) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{a: 1}"); @@ -121,7 +134,7 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractKeysFromNestedObject) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{a: {b: 'one', c: 2}}"); @@ -143,7 +156,7 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ShouldIndexEmptyObject) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{a: 1, b: {}}"); @@ -163,7 +176,7 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ShouldIndexNonNestedEmptyArrayAsUnd WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{ a: [], b: {c: []}, d: [[], {e: []}]}"); @@ -192,7 +205,7 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractMultikeyPath) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{a: [1, 2, {b: 'one', c: 2}, {d: 3}]}"); @@ -220,7 +233,7 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractMultikeyPathsKeyFormatString WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), KeyFormat::String}; auto inputDoc = fromjson("{a: [1, 2, {b: 'one', c: 2}, {d: 3}]}"); @@ -248,7 +261,7 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractMultikeyPathAndDedupKeys) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{a: [1, 2, {b: 'one', c: 2}, {c: 2, d: 3}, {d: 3}]}"); @@ -276,7 +289,7 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractZeroElementMultikeyPath) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{a: [1, 2, {b: 'one', c: 2}, {c: 2, d: 3}, {d: 3}], e: []}"); @@ -305,7 +318,7 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractNestedMultikeyPaths) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -343,7 +356,7 @@ TEST_F(WildcardKeyGeneratorFullDocumentTest, ExtractMixedPathTypesAndAllSubpaths WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -391,7 +404,7 @@ TEST_F(WildcardKeyGeneratorSingleSubtreeTest, ExtractSubtreeWithSinglePathCompon WildcardKeyGenerator keyGen{fromjson("{'g.$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -422,7 +435,7 @@ TEST_F(WildcardKeyGeneratorSingleSubtreeTest, ExtractSubtreeWithMultiplePathComp WildcardKeyGenerator keyGen{fromjson("{'g.h.$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -453,7 +466,7 @@ TEST_F(WildcardKeyGeneratorSingleSubtreeTest, ExtractMultikeySubtree) { WildcardKeyGenerator keyGen{fromjson("{'g.h.j.$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -482,7 +495,7 @@ TEST_F(WildcardKeyGeneratorSingleSubtreeTest, ExtractNestedMultikeySubtree) { WildcardKeyGenerator keyGen{fromjson("{'a.e.$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -516,7 +529,7 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionSingleSubtree) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{g: 1}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -547,7 +560,7 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionNestedSubtree) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{'g.h': 1}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -578,7 +591,7 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionMultikeySubtree) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{'g.h.j': 1}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -607,7 +620,7 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionNestedMultikeySubtr WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{'a.e': 1}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -636,7 +649,7 @@ TEST_F(WildcardKeyGeneratorInclusionTest, InclusionProjectionMultipleSubtrees) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{'a.b': 1, 'a.c': 1, 'a.e': 1, 'g.h.i': 1}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -671,7 +684,7 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionSingleSubtree) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{g: 0}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -707,7 +720,7 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionNestedSubtree) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{'g.h': 0}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -744,7 +757,7 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionMultikeySubtree) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{'g.h.j': 0}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -782,7 +795,7 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionNestedMultikeySubtr WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{'a.e': 0}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -823,7 +836,7 @@ TEST_F(WildcardKeyGeneratorExclusionTest, ExclusionProjectionMultipleSubtrees) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{'a.b': 0, 'a.c': 0, 'a.e': 0, 'g.h.i': 0}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -864,7 +877,7 @@ TEST_F(WildcardKeyGeneratorIdTest, ExcludeIdFieldIfProjectionIsEmpty) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -895,7 +908,7 @@ TEST_F(WildcardKeyGeneratorIdTest, ExcludeIdFieldForSingleSubtreeKeyPattern) { WildcardKeyGenerator keyGen{fromjson("{'a.$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -924,7 +937,7 @@ TEST_F(WildcardKeyGeneratorIdTest, PermitIdFieldAsSingleSubtreeKeyPattern) { WildcardKeyGenerator keyGen{fromjson("{'_id.$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -948,7 +961,7 @@ TEST_F(WildcardKeyGeneratorIdTest, PermitIdSubfieldAsSingleSubtreeKeyPattern) { WildcardKeyGenerator keyGen{fromjson("{'_id.id1.$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -971,7 +984,7 @@ TEST_F(WildcardKeyGeneratorIdTest, ExcludeIdFieldByDefaultForInclusionProjection WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{a: 1}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -1000,7 +1013,7 @@ TEST_F(WildcardKeyGeneratorIdTest, PermitIdSubfieldInclusionInExplicitProjection WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{'_id.id1': 1}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -1023,7 +1036,7 @@ TEST_F(WildcardKeyGeneratorIdTest, ExcludeIdFieldByDefaultForExclusionProjection WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{a: 0}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -1047,7 +1060,7 @@ TEST_F(WildcardKeyGeneratorIdTest, PermitIdSubfieldExclusionInExplicitProjection WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{'_id.id1': 0}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -1079,7 +1092,7 @@ TEST_F(WildcardKeyGeneratorIdTest, IncludeIdFieldIfExplicitlySpecifiedInProjecti WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{_id: 1, a: 1}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -1110,7 +1123,7 @@ TEST_F(WildcardKeyGeneratorIdTest, ExcludeIdFieldIfExplicitlySpecifiedInProjecti WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{_id: 0, a: 1}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -1139,7 +1152,7 @@ TEST_F(WildcardKeyGeneratorIdTest, IncludeIdFieldIfExplicitlySpecifiedInExclusio WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), fromjson("{_id: 1, a: 0}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -1169,7 +1182,7 @@ TEST_F(WildcardKeyGeneratorCollationTest, CollationMixedPathAndKeyTypes) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), {}, &collator, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -1223,7 +1236,7 @@ TEST_F(WildcardKeyGeneratorDottedFieldsTest, DoNotIndexDottedFields) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1}"), {}, {}, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -1253,7 +1266,7 @@ TEST_F(WildcardKeyGeneratorDottedFieldsTest, DoNotIndexDottedFieldsWithSimilarSu WildcardKeyGenerator keyGen{fromjson("{'a.b.$**': 1}"), {}, {}, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; @@ -1284,7 +1297,7 @@ TEST_F(WildcardKeyGeneratorCompoundTest, ExtractTopLevelKeyCompound) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1, a: 1}"), fromjson("{a: 0}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{a: 1, b: 1}"); @@ -1304,7 +1317,7 @@ TEST_F(WildcardKeyGeneratorCompoundTest, ExtractKeysFromNestedObjectCompound) { WildcardKeyGenerator keyGen{fromjson("{c: 1, '$**': 1}"), fromjson("{c: 0}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{a: {b: 'one', c: 2}}"); @@ -1326,7 +1339,7 @@ TEST_F(WildcardKeyGeneratorCompoundTest, MiddleWildcardComponentCompound) { WildcardKeyGenerator keyGen{fromjson("{a: 1, '$**': 1, c: 1}"), fromjson("{a: 0, c: 0}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{a: 1, b: 2}"); @@ -1347,7 +1360,7 @@ TEST_F(WildcardKeyGeneratorCompoundTest, IndexSubTreeCompound) { WildcardKeyGenerator keyGen{fromjson("{a: 1, 'sub.$**': 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{a: 1, sub: {a: 1, b: 2}}"); @@ -1369,7 +1382,7 @@ TEST_F(WildcardKeyGeneratorCompoundTest, CompoundWildcardIndexShouldBeSparse) { WildcardKeyGenerator keyGen{fromjson("{'$**': 1, c: 1}"), fromjson("{c: 0}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{}"); @@ -1390,7 +1403,7 @@ TEST_F(WildcardKeyGeneratorCompoundTest, CanGenerateKeysForMultikeyFieldCompound WildcardKeyGenerator keyGen{fromjson("{a: 1, '$**': 1, c: 1}"), fromjson("{a: 0, c: 0}"), nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{a: 1, b: [1, {c: [3]}]}"); @@ -1419,7 +1432,7 @@ TEST_F(WildcardKeyGeneratorCompoundTest, CannotCompoundWithMultikeyField) { WildcardKeyGenerator keyGen{fromjson("{'sub.$**': 1, arr: 1}"), {}, nullptr, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), rsKeyFormat}; auto inputDoc = fromjson("{sub: {a: 1}, arr: [1, 2]}"); diff --git a/src/mongo/db/index/wildcard_validation.cpp b/src/mongo/db/index/wildcard_validation.cpp index 57c17ef7680ba..f1ea75c9ce254 100644 --- a/src/mongo/db/index/wildcard_validation.cpp +++ b/src/mongo/db/index/wildcard_validation.cpp @@ -29,8 +29,21 @@ #include "mongo/db/index/wildcard_validation.h" +#include +#include +#include +#include +#include +#include + +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/field_ref.h" #include "mongo/db/index_names.h" +#include "mongo/util/str.h" namespace mongo { namespace { diff --git a/src/mongo/db/index/wildcard_validation.h b/src/mongo/db/index/wildcard_validation.h index 3144e0e1fbb8a..101a1638f6db3 100644 --- a/src/mongo/db/index/wildcard_validation.h +++ b/src/mongo/db/index/wildcard_validation.h @@ -29,6 +29,8 @@ #pragma once +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" diff --git a/src/mongo/db/index/wildcard_validation_test.cpp b/src/mongo/db/index/wildcard_validation_test.cpp index bac71b0309e9d..af613349a5f2d 100644 --- a/src/mongo/db/index/wildcard_validation_test.cpp +++ b/src/mongo/db/index/wildcard_validation_test.cpp @@ -28,7 +28,12 @@ */ #include "mongo/db/index/wildcard_validation.h" -#include "mongo/unittest/unittest.h" + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/index_build_entry_helpers.cpp b/src/mongo/db/index_build_entry_helpers.cpp index c462bf91a92d0..28ffcff8cfec0 100644 --- a/src/mongo/db/index_build_entry_helpers.cpp +++ b/src/mongo/db/index_build_entry_helpers.cpp @@ -29,20 +29,56 @@ #include "mongo/db/index_build_entry_helpers.h" +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/commit_quorum_options.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_build_entry_gen.h" #include "mongo/db/catalog/local_oplog_info.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -54,14 +90,19 @@ namespace { MONGO_FAIL_POINT_DEFINE(hangBeforeGettingIndexBuildEntry); Status upsert(OperationContext* opCtx, const IndexBuildEntry& indexBuildEntry) { - return writeConflictRetry(opCtx, "upsertIndexBuildEntry", - NamespaceString::kIndexBuildEntryNamespace.ns(), + NamespaceString::kIndexBuildEntryNamespace, [&]() -> Status { - AutoGetCollection collection( - opCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX); - if (!collection) { + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest( + NamespaceString::kIndexBuildEntryNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + if (!collection.exists()) { str::stream ss; ss << "Collection not found: " << NamespaceString::kIndexBuildEntryNamespace.ns(); @@ -70,7 +111,7 @@ Status upsert(OperationContext* opCtx, const IndexBuildEntry& indexBuildEntry) { WriteUnitOfWork wuow(opCtx); Helpers::upsert(opCtx, - NamespaceString::kIndexBuildEntryNamespace, + collection, indexBuildEntry.toBSON(), /*fromMigrate=*/false); wuow.commit(); @@ -112,11 +153,18 @@ std::pair buildIndexBuildEntryFilterAndUpdate( Status upsert(OperationContext* opCtx, const BSONObj& filter, const BSONObj& updateMod) { return writeConflictRetry(opCtx, "upsertIndexBuildEntry", - NamespaceString::kIndexBuildEntryNamespace.ns(), + NamespaceString::kIndexBuildEntryNamespace, [&]() -> Status { - AutoGetCollection collection( - opCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX); - if (!collection) { + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest( + NamespaceString::kIndexBuildEntryNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + if (!collection.exists()) { str::stream ss; ss << "Collection not found: " << NamespaceString::kIndexBuildEntryNamespace.ns(); @@ -125,7 +173,7 @@ Status upsert(OperationContext* opCtx, const BSONObj& filter, const BSONObj& upd WriteUnitOfWork wuow(opCtx); Helpers::upsert(opCtx, - NamespaceString::kIndexBuildEntryNamespace, + collection, filter, updateMod, /*fromMigrate=*/false); @@ -137,11 +185,19 @@ Status upsert(OperationContext* opCtx, const BSONObj& filter, const BSONObj& upd Status update(OperationContext* opCtx, const BSONObj& filter, const BSONObj& updateMod) { return writeConflictRetry(opCtx, "updateIndexBuildEntry", - NamespaceString::kIndexBuildEntryNamespace.ns(), + NamespaceString::kIndexBuildEntryNamespace, [&]() -> Status { - AutoGetCollection collection( - opCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX); - if (!collection) { + ; + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest( + NamespaceString::kIndexBuildEntryNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + if (!collection.exists()) { str::stream ss; ss << "Collection not found: " << NamespaceString::kIndexBuildEntryNamespace.ns(); @@ -150,7 +206,7 @@ Status update(OperationContext* opCtx, const BSONObj& filter, const BSONObj& upd WriteUnitOfWork wuow(opCtx); Helpers::update(opCtx, - NamespaceString::kIndexBuildEntryNamespace, + collection, filter, updateMod, /*fromMigrate=*/false); @@ -167,7 +223,7 @@ void ensureIndexBuildEntriesNamespaceExists(OperationContext* opCtx) { writeConflictRetry( opCtx, "createIndexBuildCollection", - NamespaceString::kIndexBuildEntryNamespace.ns(), + NamespaceString::kIndexBuildEntryNamespace, [&]() -> void { AutoGetDb autoDb(opCtx, NamespaceString::kIndexBuildEntryNamespace.dbName(), MODE_IX); auto db = autoDb.ensureDbExists(opCtx); @@ -215,13 +271,17 @@ Status persistIndexCommitQuorum(OperationContext* opCtx, const IndexBuildEntry& Status addIndexBuildEntry(OperationContext* opCtx, const IndexBuildEntry& indexBuildEntry) { return writeConflictRetry( - opCtx, - "addIndexBuildEntry", - NamespaceString::kIndexBuildEntryNamespace.ns(), - [&]() -> Status { - AutoGetCollection collection( - opCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX); - if (!collection) { + opCtx, "addIndexBuildEntry", NamespaceString::kIndexBuildEntryNamespace, [&]() -> Status { + const auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + NamespaceString::kIndexBuildEntryNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + if (!collection.exists()) { str::stream ss; ss << "Collection not found: " << NamespaceString::kIndexBuildEntryNamespace.ns(); return Status(ErrorCodes::NamespaceNotFound, ss); @@ -235,7 +295,7 @@ Status addIndexBuildEntry(OperationContext* opCtx, const IndexBuildEntry& indexB auto oplogSlot = oplogInfo->getNextOpTimes(opCtx, 1U)[0]; Status status = collection_internal::insertDocument( opCtx, - *collection, + collection.getCollectionPtr(), InsertStatement(kUninitializedStmtId, indexBuildEntry.toBSON(), oplogSlot), nullptr); @@ -253,7 +313,7 @@ Status removeIndexBuildEntry(OperationContext* opCtx, return writeConflictRetry( opCtx, "removeIndexBuildEntry", - NamespaceString::kIndexBuildEntryNamespace.ns(), + NamespaceString::kIndexBuildEntryNamespace, [&]() -> Status { if (!collection) { str::stream ss; @@ -298,7 +358,7 @@ StatusWith getIndexBuildEntry(OperationContext* opCtx, UUID ind // This operation does not perform any writes, but the index building code is sensitive to // exceptions and we must protect it from unanticipated write conflicts from reads. bool foundObj = writeConflictRetry( - opCtx, "getIndexBuildEntry", NamespaceString::kIndexBuildEntryNamespace.ns(), [&]() { + opCtx, "getIndexBuildEntry", NamespaceString::kIndexBuildEntryNamespace, [&]() { return Helpers::findOne( opCtx, collection.getCollection(), BSON("_id" << indexBuildUUID), obj); }); diff --git a/src/mongo/db/index_build_entry_helpers_test.cpp b/src/mongo/db/index_build_entry_helpers_test.cpp index 9c14ee9ebaafa..983aecacaadef 100644 --- a/src/mongo/db/index_build_entry_helpers_test.cpp +++ b/src/mongo/db/index_build_entry_helpers_test.cpp @@ -27,23 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include #include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/bsontypes.h" #include "mongo/db/catalog/catalog_test_fixture.h" #include "mongo/db/catalog/commit_quorum_options.h" #include "mongo/db/catalog/index_build_entry_gen.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/index_build_entry_helpers.h" -#include "mongo/db/service_context.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/uuid.h" diff --git a/src/mongo/db/index_builds_coordinator.cpp b/src/mongo/db/index_builds_coordinator.cpp index fc2b76c5153eb..170808b3072fa 100644 --- a/src/mongo/db/index_builds_coordinator.cpp +++ b/src/mongo/db/index_builds_coordinator.cpp @@ -29,48 +29,87 @@ #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/catalog/index_builds_manager.h" -#include "mongo/util/future.h" +#include +#include #include #include +#include +#include +#include +#include #include +#include +#include + +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" -#include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/base/error_codes.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_yield_restore.h" #include "mongo/db/catalog/commit_quorum_options.h" -#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/index_build_entry_gen.h" +#include "mongo/db/catalog/index_builds_manager.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog/multi_index_block.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" #include "mongo/db/curop.h" -#include "mongo/db/db_raii.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/index/wildcard_key_generator.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/index/index_build_interceptor.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_build_entry_helpers.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/cloner_utils.h" +#include "mongo/db/repl/member_config.h" #include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/timestamp_block.h" #include "mongo/db/s/collection_sharding_state.h" -#include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/s/scoped_collection_metadata.h" #include "mongo/db/server_options.h" #include "mongo/db/server_recovery.h" #include "mongo/db/service_context.h" +#include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/db/storage/disk_space_util.h" #include "mongo/db/storage/durable_catalog.h" -#include "mongo/db/storage/encryption_hooks.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/storage_parameters_gen.h" -#include "mongo/db/storage/storage_util.h" #include "mongo/db/storage/two_phase_index_build_knobs_gen.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/message.h" #include "mongo/s/shard_key_pattern.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/util/assert_util.h" -#include "mongo/util/scoped_counter.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" #include "mongo/util/testing_proctor.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -93,7 +132,11 @@ MONGO_FAIL_POINT_DEFINE(hangIndexBuildBeforeWaitingUntilMajorityOpTime); MONGO_FAIL_POINT_DEFINE(hangBeforeUnregisteringAfterCommit); MONGO_FAIL_POINT_DEFINE(failSetUpResumeIndexBuild); MONGO_FAIL_POINT_DEFINE(failIndexBuildWithError); +MONGO_FAIL_POINT_DEFINE(failIndexBuildWithErrorInSecondDrain); MONGO_FAIL_POINT_DEFINE(hangInRemoveIndexBuildEntryAfterCommitOrAbort); +MONGO_FAIL_POINT_DEFINE(hangIndexBuildOnSetupBeforeTakingLocks); +MONGO_FAIL_POINT_DEFINE(hangAbortIndexBuildByBuildUUIDAfterLocks); +MONGO_FAIL_POINT_DEFINE(hangOnStepUpAsyncTaskBeforeCheckingCommitQuorum); IndexBuildsCoordinator::IndexBuildsSSS::IndexBuildsSSS() : ServerStatusSection("indexBuilds"), @@ -144,7 +187,8 @@ bool shouldBuildIndexesOnEmptyCollectionSinglePhased(OperationContext* opCtx, const CollectionPtr& collection, IndexBuildProtocol protocol) { const auto& nss = collection->ns(); - invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X), str::stream() << nss); + invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X), + str::stream() << nss.toStringForErrorMsg()); auto replCoord = repl::ReplicationCoordinator::get(opCtx); @@ -193,8 +237,6 @@ void removeIndexBuildEntryAfterCommitOrAbort(OperationContext* opCtx, return; } - hangInRemoveIndexBuildEntryAfterCommitOrAbort.pauseWhileSet(); - auto replCoord = repl::ReplicationCoordinator::get(opCtx); if (!replCoord->canAcceptWritesFor(opCtx, dbAndUUID)) { return; @@ -269,8 +311,7 @@ void onCommitIndexBuild(OperationContext* opCtx, */ void onAbortIndexBuild(OperationContext* opCtx, const NamespaceString& nss, - ReplIndexBuildState& replState, - const Status& cause) { + ReplIndexBuildState& replState) { if (IndexBuildProtocol::kTwoPhase != replState.protocol) { return; } @@ -280,8 +321,13 @@ void onAbortIndexBuild(OperationContext* opCtx, auto opObserver = opCtx->getServiceContext()->getOpObserver(); auto collUUID = replState.collectionUUID; auto fromMigrate = false; - opObserver->onAbortIndexBuild( - opCtx, nss, collUUID, replState.buildUUID, replState.indexSpecs, cause, fromMigrate); + opObserver->onAbortIndexBuild(opCtx, + nss, + collUUID, + replState.buildUUID, + replState.indexSpecs, + replState.getAbortStatus(), + fromMigrate); } /** @@ -367,10 +413,9 @@ repl::OpTime getLatestOplogOpTime(OperationContext* opCtx) { BSONObj oplogEntryBSON; // This operation does not perform any writes, but the index building code is sensitive to // exceptions and we must protect it from unanticipated write conflicts from reads. - writeConflictRetry( - opCtx, "getLatestOplogOpTime", NamespaceString::kRsOplogNamespace.ns(), [&]() { - invariant(Helpers::getLast(opCtx, NamespaceString::kRsOplogNamespace, oplogEntryBSON)); - }); + writeConflictRetry(opCtx, "getLatestOplogOpTime", NamespaceString::kRsOplogNamespace, [&]() { + invariant(Helpers::getLast(opCtx, NamespaceString::kRsOplogNamespace, oplogEntryBSON)); + }); auto optime = repl::OpTime::parseFromOplogEntry(oplogEntryBSON); invariant(optime.isOK(), @@ -500,6 +545,30 @@ IndexBuildsCoordinator* IndexBuildsCoordinator::get(OperationContext* OperationC return get(OperationContext->getServiceContext()); } +Status IndexBuildsCoordinator::checkDiskSpaceSufficientToStartIndexBuild(OperationContext* opCtx) { + auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); + const bool filesNotAllInSameDirectory = + storageEngine->isUsingDirectoryPerDb() || storageEngine->isUsingDirectoryForIndexes(); + if (filesNotAllInSameDirectory) { + LOGV2(7333300, + "Index build: skipping available disk space check before starting index build as " + "storage engine stores data files in different directories"); + return Status::OK(); + } + + // Must hold the global lock to ensure safe access to storageGlobalParams.dbpath. + dassert(opCtx->lockState()->isLocked()); + const auto availableBytes = getAvailableDiskSpaceBytesInDbPath(storageGlobalParams.dbpath); + const int64_t requiredBytes = gIndexBuildMinAvailableDiskSpaceMB.load() * 1024 * 1024; + if (availableBytes <= requiredBytes) { + return Status( + ErrorCodes::OutOfDiskSpace, + fmt::format("available disk space of {} bytes is less than required minimum of {}", + availableBytes, + requiredBytes)); + } + return Status::OK(); +} std::unique_ptr IndexBuildsCoordinator::makeKillIndexBuildOnLowDiskSpaceAction() { @@ -513,6 +582,14 @@ IndexBuildsCoordinator::makeKillIndexBuildOnLowDiskSpaceAction() { } void act(OperationContext* opCtx, int64_t availableBytes) noexcept final { + if (!feature_flags::gIndexBuildGracefulErrorHandling.isEnabled( + serverGlobalParams.featureCompatibility)) { + LOGV2(6826200, + "Index build: disk space monitor detected we're low on storage space but " + "'featureFlagIndexBuildGracefulErrorHandling' is disabled. Ignoring it"); + return; + } + if (_coord->noIndexBuildInProgress()) { // Avoid excessive logging when no index builds are in progress. Nothing prevents an // index build from starting after this check. Subsequent calls will see any @@ -619,13 +696,13 @@ Status IndexBuildsCoordinator::_startIndexBuildForRecovery(OperationContext* opC // using the same ident to avoid doing untimestamped writes to the catalog. for (const auto& indexName : indexNames) { auto indexCatalog = collection.getWritableCollection(opCtx)->getIndexCatalog(); - auto desc = - indexCatalog->findIndexByName(opCtx, - indexName, - IndexCatalog::InclusionPolicy::kUnfinished | - IndexCatalog::InclusionPolicy::kFrozen); + auto writableEntry = indexCatalog->getWritableEntryByName( + opCtx, + indexName, + IndexCatalog::InclusionPolicy::kUnfinished | + IndexCatalog::InclusionPolicy::kFrozen); Status status = indexCatalog->resetUnfinishedIndexForRecovery( - opCtx, collection.getWritableCollection(opCtx), desc); + opCtx, collection.getWritableCollection(opCtx), writableEntry); if (!status.isOK()) { return status; } @@ -678,11 +755,11 @@ Status IndexBuildsCoordinator::_dropIndexesForRepair(OperationContext* opCtx, invariant(collection->isInitialized()); for (const auto& indexName : indexNames) { auto indexCatalog = collection.getWritableCollection(opCtx)->getIndexCatalog(); - auto descriptor = - indexCatalog->findIndexByName(opCtx, indexName, IndexCatalog::InclusionPolicy::kReady); - if (descriptor) { - Status s = - indexCatalog->dropIndex(opCtx, collection.getWritableCollection(opCtx), descriptor); + auto writableEntry = indexCatalog->getWritableEntryByName( + opCtx, indexName, IndexCatalog::InclusionPolicy::kReady); + if (writableEntry->descriptor()) { + Status s = indexCatalog->dropIndexEntry( + opCtx, collection.getWritableCollection(opCtx), writableEntry); if (!s.isOK()) { return s; } @@ -690,13 +767,13 @@ Status IndexBuildsCoordinator::_dropIndexesForRepair(OperationContext* opCtx, } // The index must be unfinished or frozen if it isn't ready. - descriptor = indexCatalog->findIndexByName(opCtx, - indexName, - IndexCatalog::InclusionPolicy::kUnfinished | - IndexCatalog::InclusionPolicy::kFrozen); - invariant(descriptor); + writableEntry = indexCatalog->getWritableEntryByName( + opCtx, + indexName, + IndexCatalog::InclusionPolicy::kUnfinished | IndexCatalog::InclusionPolicy::kFrozen); + invariant(writableEntry); Status s = indexCatalog->dropUnfinishedIndex( - opCtx, collection.getWritableCollection(opCtx), descriptor); + opCtx, collection.getWritableCollection(opCtx), writableEntry); if (!s.isOK()) { return s; } @@ -786,8 +863,8 @@ Status IndexBuildsCoordinator::_setUpResumeIndexBuild(OperationContext* opCtx, return status; } -void IndexBuildsCoordinator::waitForAllIndexBuildsToStopForShutdown(OperationContext* opCtx) { - activeIndexBuilds.waitForAllIndexBuildsToStopForShutdown(opCtx); +void IndexBuildsCoordinator::waitForAllIndexBuildsToStop(OperationContext* opCtx) { + activeIndexBuilds.waitForAllIndexBuildsToStop(opCtx); } std::vector IndexBuildsCoordinator::abortCollectionIndexBuilds( @@ -907,25 +984,7 @@ void IndexBuildsCoordinator::abortTenantIndexBuilds(OperationContext* opCtx, void IndexBuildsCoordinator::abortAllIndexBuildsForInitialSync(OperationContext* opCtx, const std::string& reason) { - LOGV2(4833200, "About to abort all index builders running", "reason"_attr = reason); - - auto builds = [&]() -> std::vector> { - auto indexBuildFilter = [](const auto& replState) { - return true; - }; - return activeIndexBuilds.filterIndexBuilds(indexBuildFilter); - }(); - for (const auto& replState : builds) { - if (!abortIndexBuildByBuildUUID( - opCtx, replState->buildUUID, IndexBuildAction::kInitialSyncAbort, reason)) { - // The index build may already be in the midst of tearing down. - LOGV2(5010503, - "Index build: failed to abort index build for initial sync", - "buildUUID"_attr = replState->buildUUID, - "database"_attr = replState->dbName, - "collectionUUID"_attr = replState->collectionUUID); - } - } + _abortAllIndexBuildsWithReason(opCtx, IndexBuildAction::kInitialSyncAbort, reason); } namespace { @@ -955,12 +1014,7 @@ bool forceSelfAbortIndexBuild(OperationContext* opCtx, void IndexBuildsCoordinator::abortAllIndexBuildsDueToDiskSpace(OperationContext* opCtx, std::int64_t availableBytes, std::int64_t requiredBytes) { - auto builds = [&]() -> std::vector> { - auto indexBuildFilter = [](const auto& replState) { - return true; - }; - return activeIndexBuilds.filterIndexBuilds(indexBuildFilter); - }(); + auto builds = activeIndexBuilds.getAllIndexBuilds(); auto abortStatus = Status(ErrorCodes::OutOfDiskSpace, fmt::format("available disk space of {} bytes is less than required minimum of {}", @@ -971,6 +1025,9 @@ void IndexBuildsCoordinator::abortAllIndexBuildsDueToDiskSpace(OperationContext* if (forceSelfAbortIndexBuild(opCtx, replState, abortStatus)) { // Increase metrics only if the build was actually aborted by the above call. indexBuildsSSS.killedDueToInsufficientDiskSpace.addAndFetch(1); + LOGV2(7333601, + "Index build: aborted due to insufficient disk space", + "buildUUID"_attr = replState->buildUUID); } } } @@ -1041,7 +1098,7 @@ void IndexBuildsCoordinator::applyStartIndexBuild(OperationContext* opCtx, // proceeding with building them. if (indexBuildOptions.applicationMode == ApplicationMode::kInitialSync) { auto dbAndUUID = NamespaceStringOrUUID(nss.db().toString(), collUUID); - writeConflictRetry(opCtx, "IndexBuildsCoordinator::applyStartIndexBuild", nss.ns(), [&] { + writeConflictRetry(opCtx, "IndexBuildsCoordinator::applyStartIndexBuild", nss, [&] { WriteUnitOfWork wuow(opCtx); AutoGetCollection coll(opCtx, dbAndUUID, MODE_X); @@ -1057,20 +1114,20 @@ void IndexBuildsCoordinator::applyStartIndexBuild(OperationContext* opCtx, str::stream() << "Index spec is missing the 'name' field " << spec, !name.empty()); - if (auto desc = indexCatalog->findIndexByName( + if (auto writableEntry = indexCatalog->getWritableEntryByName( opCtx, name, IndexCatalog::InclusionPolicy::kReady)) { - uassertStatusOK( - indexCatalog->dropIndex(opCtx, coll.getWritableCollection(opCtx), desc)); + uassertStatusOK(indexCatalog->dropIndexEntry( + opCtx, coll.getWritableCollection(opCtx), writableEntry)); } - const IndexDescriptor* desc = indexCatalog->findIndexByKeyPatternAndOptions( + auto writableEntry = indexCatalog->getWritableEntryByKeyPatternAndOptions( opCtx, spec.getObjectField(IndexDescriptor::kKeyPatternFieldName), spec, IndexCatalog::InclusionPolicy::kReady); - if (desc) { - uassertStatusOK( - indexCatalog->dropIndex(opCtx, coll.getWritableCollection(opCtx), desc)); + if (writableEntry) { + uassertStatusOK(indexCatalog->dropIndexEntry( + opCtx, coll.getWritableCollection(opCtx), writableEntry)); } } @@ -1252,7 +1309,7 @@ void IndexBuildsCoordinator::applyAbortIndexBuild(OperationContext* opCtx, auto indexCatalog = autoColl.getWritableCollection(opCtx)->getIndexCatalog(); for (const auto& indexSpec : oplogEntry.indexSpecs) { - const IndexDescriptor* desc = indexCatalog->findIndexByName( + auto writableEntry = indexCatalog->getWritableEntryByName( opCtx, indexSpec.getStringField(IndexDescriptor::kIndexNameFieldName), IndexCatalog::InclusionPolicy::kReady | IndexCatalog::InclusionPolicy::kUnfinished | @@ -1262,9 +1319,9 @@ void IndexBuildsCoordinator::applyAbortIndexBuild(OperationContext* opCtx, "Dropping unfinished index during oplog recovery as standalone", "spec"_attr = indexSpec); - invariant(desc && desc->getEntry()->isFrozen()); + invariant(writableEntry && writableEntry->isFrozen()); invariant(indexCatalog->dropUnfinishedIndex( - opCtx, autoColl.getWritableCollection(opCtx), desc)); + opCtx, autoColl.getWritableCollection(opCtx), writableEntry)); } wuow.commit(); @@ -1278,7 +1335,7 @@ boost::optional IndexBuildsCoordinator::abortIndexBuildByIndexNames( const std::vector& indexNames, std::string reason) { boost::optional buildUUID; - auto indexBuilds = _getIndexBuilds(); + auto indexBuilds = activeIndexBuilds.getAllIndexBuilds(); auto onIndexBuild = [&](const std::shared_ptr& replState) { if (replState->collectionUUID != collectionUUID) { return; @@ -1308,12 +1365,53 @@ boost::optional IndexBuildsCoordinator::abortIndexBuildByIndexNames( return buildUUID; } +void IndexBuildsCoordinator::_abortAllIndexBuildsWithReason(OperationContext* opCtx, + IndexBuildAction action, + const std::string& reason) { + LOGV2(7738702, + "About to abort all running index builders", + "reason"_attr = reason, + "action"_attr = indexBuildActionToString(action)); + + auto builds = activeIndexBuilds.getAllIndexBuilds(); + for (const auto& replState : builds) { + if (!abortIndexBuildByBuildUUID(opCtx, replState->buildUUID, action, reason)) { + // The index build may already be in the midst of tearing down. + LOGV2(7738703, + "Index build: failed to abort index build, this is expected if the build is " + "already being committed or in the process of tearing down.", + "buildUUID"_attr = replState->buildUUID, + "database"_attr = replState->dbName, + "collectionUUID"_attr = replState->collectionUUID); + } + } +} + +void IndexBuildsCoordinator::abortAllIndexBuildsWithReason(OperationContext* opCtx, + const std::string& reason) { + _abortAllIndexBuildsWithReason(opCtx, IndexBuildAction::kPrimaryAbort, reason); +} + +void IndexBuildsCoordinator::setNewIndexBuildsBlocked(const bool newValue, + boost::optional reason) { + stdx::unique_lock lk(_newIndexBuildsBlockedMutex); + invariant(newValue != _newIndexBuildsBlocked); + invariant((newValue && reason) || (!newValue && !reason)); + + _newIndexBuildsBlocked = newValue; + _blockReason = reason; + + if (!_newIndexBuildsBlocked) { + _newIndexBuildsBlockedCV.notify_all(); + } +} + bool IndexBuildsCoordinator::hasIndexBuilder(OperationContext* opCtx, const UUID& collectionUUID, const std::vector& indexNames) const { bool foundIndexBuilder = false; boost::optional buildUUID; - auto indexBuilds = _getIndexBuilds(); + auto indexBuilds = activeIndexBuilds.getAllIndexBuilds(); auto onIndexBuild = [&](const std::shared_ptr& replState) { if (replState->collectionUUID != collectionUUID) { return; @@ -1374,6 +1472,8 @@ bool IndexBuildsCoordinator::abortIndexBuildByBuildUUID(OperationContext* opCtx, AutoGetCollection indexBuildEntryColl( opCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX); + hangAbortIndexBuildByBuildUUIDAfterLocks.pauseWhileSet(); + // If we are using two-phase index builds and are no longer primary after receiving an // abort, we cannot replicate an abortIndexBuild oplog entry. Continue holding the RSTL to // check the replication state and to prevent any state transitions from happening while @@ -1433,44 +1533,7 @@ bool IndexBuildsCoordinator::abortIndexBuildByBuildUUID(OperationContext* opCtx, } // At this point we must continue aborting the index build. - try { - _completeAbort(opCtx, - replState, - *indexBuildEntryColl, - signalAction, - {ErrorCodes::IndexBuildAborted, reason}); - } catch (const DBException& e) { - LOGV2_FATAL( - 4656011, - "Failed to abort index build after partially tearing-down index build state", - "buildUUID"_attr = replState->buildUUID, - "error"_attr = e); - } - - // Wait for the builder thread to receive the signal before unregistering. Don't release the - // Collection lock until this happens, guaranteeing the thread has stopped making progress - // and has exited. - auto fut = replState->sharedPromise.getFuture(); - auto waitStatus = fut.waitNoThrow(); // Result from waiting on future. - auto buildStatus = fut.getNoThrow().getStatus(); // Result from _runIndexBuildInner(). - LOGV2(20655, - "Index build: joined after abort", - "buildUUID"_attr = buildUUID, - "waitResult"_attr = waitStatus, - "status"_attr = buildStatus); - - if (IndexBuildAction::kRollbackAbort == signalAction) { - // Index builds interrupted for rollback may be resumed during recovery. We wait for the - // builder thread to complete before persisting the in-memory state that will be used - // to resume the index build. - // No locks are required when aborting due to rollback. This performs no storage engine - // writes, only cleans up the remaining in-memory state. - CollectionWriter coll(opCtx, replState->collectionUUID); - _indexBuildsManager.abortIndexBuildWithoutCleanup( - opCtx, coll.get(), replState->buildUUID, replState->isResumable()); - } - - activeIndexBuilds.unregisterIndexBuild(&_indexBuildsManager, replState); + _completeExternalAbort(opCtx, replState, *indexBuildEntryColl, signalAction); break; } @@ -1480,8 +1543,7 @@ bool IndexBuildsCoordinator::abortIndexBuildByBuildUUID(OperationContext* opCtx, void IndexBuildsCoordinator::_completeAbort(OperationContext* opCtx, std::shared_ptr replState, const CollectionPtr& indexBuildEntryCollection, - IndexBuildAction signalAction, - Status reason) { + IndexBuildAction signalAction) { if (!replState->isAbortCleanUpRequired()) { LOGV2(7329402, "Index build: abort cleanup not required", @@ -1509,7 +1571,7 @@ void IndexBuildsCoordinator::_completeAbort(OperationContext* opCtx, str::stream() << "singlePhase: " << (IndexBuildProtocol::kSinglePhase == replState->protocol)); auto onCleanUpFn = [&] { - onAbortIndexBuild(opCtx, coll->ns(), *replState, reason); + onAbortIndexBuild(opCtx, coll->ns(), *replState); }; _indexBuildsManager.abortIndexBuild(opCtx, coll, replState->buildUUID, onCleanUpFn); removeIndexBuildEntryAfterCommitOrAbort( @@ -1561,13 +1623,53 @@ void IndexBuildsCoordinator::_completeAbort(OperationContext* opCtx, LOGV2(465611, "Cleaned up index build after abort. ", "buildUUID"_attr = replState->buildUUID); } +void IndexBuildsCoordinator::_completeExternalAbort(OperationContext* opCtx, + std::shared_ptr replState, + const CollectionPtr& indexBuildEntryColl, + IndexBuildAction signalAction) { + + const auto status = replState->getAbortStatus(); + try { + _completeAbort(opCtx, replState, indexBuildEntryColl, signalAction); + } catch (const DBException& e) { + LOGV2_FATAL(4656011, + "Failed to abort index build after partially tearing-down index build state", + "buildUUID"_attr = replState->buildUUID, + "error"_attr = e); + } + + // Wait for the builder thread to receive the signal before unregistering. Don't release the + // Collection lock until this happens, guaranteeing the thread has stopped making progress + // and has exited. + auto fut = replState->sharedPromise.getFuture(); + auto waitStatus = fut.waitNoThrow(); // Result from waiting on future. + auto buildStatus = fut.getNoThrow().getStatus(); // Result from _runIndexBuildInner(). + LOGV2(20655, + "Index build: joined after abort", + "buildUUID"_attr = replState->buildUUID, + "waitResult"_attr = waitStatus, + "status"_attr = buildStatus); + + if (IndexBuildAction::kRollbackAbort == signalAction) { + // Index builds interrupted for rollback may be resumed during recovery. We wait for the + // builder thread to complete before persisting the in-memory state that will be used + // to resume the index build. + // No locks are required when aborting due to rollback. This performs no storage engine + // writes, only cleans up the remaining in-memory state. + CollectionWriter coll(opCtx, replState->collectionUUID); + _indexBuildsManager.abortIndexBuildWithoutCleanup( + opCtx, coll.get(), replState->buildUUID, replState->isResumable()); + } + + replState->completeAbort(opCtx); + activeIndexBuilds.unregisterIndexBuild(&_indexBuildsManager, replState); +} + void IndexBuildsCoordinator::_completeSelfAbort(OperationContext* opCtx, std::shared_ptr replState, - const CollectionPtr& indexBuildEntryCollection, - Status reason) { - _completeAbort( - opCtx, replState, indexBuildEntryCollection, IndexBuildAction::kPrimaryAbort, reason); - replState->abortSelf(opCtx); + const CollectionPtr& indexBuildEntryCollection) { + _completeAbort(opCtx, replState, indexBuildEntryCollection, IndexBuildAction::kPrimaryAbort); + replState->completeAbort(opCtx); activeIndexBuilds.unregisterIndexBuild(&_indexBuildsManager, replState); } @@ -1586,7 +1688,7 @@ void IndexBuildsCoordinator::_completeAbortForShutdown( } std::size_t IndexBuildsCoordinator::getActiveIndexBuildCount(OperationContext* opCtx) { - auto indexBuilds = _getIndexBuilds(); + auto indexBuilds = activeIndexBuilds.getAllIndexBuilds(); // We use forEachIndexBuild() to log basic details on the current index builds and don't intend // to modify any of the index builds, hence the no-op. forEachIndexBuild(indexBuilds, "IndexBuildsCoordinator::getActiveIndexBuildCount"_sd, nullptr); @@ -1617,7 +1719,6 @@ void IndexBuildsCoordinator::onStepUp(OperationContext* opCtx) { PromiseAndFuture promiseAndFuture; _stepUpThread = stdx::thread([this, &promiseAndFuture] { Client::initThread("IndexBuildsCoordinator-StepUp"); - auto threadCtx = Client::getCurrent()->makeOperationContext(); threadCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); promiseAndFuture.promise.emplaceValue(); @@ -1632,7 +1733,7 @@ void IndexBuildsCoordinator::onStepUp(OperationContext* opCtx) { } void IndexBuildsCoordinator::_onStepUpAsyncTaskFn(OperationContext* opCtx) { - auto indexBuilds = _getIndexBuilds(); + auto indexBuilds = activeIndexBuilds.getAllIndexBuilds(); const auto signalCommitQuorumAndRetrySkippedRecords = [this, opCtx](const std::shared_ptr& replState) { if (replState->protocol != IndexBuildProtocol::kTwoPhase) { @@ -1647,6 +1748,12 @@ void IndexBuildsCoordinator::_onStepUpAsyncTaskFn(OperationContext* opCtx) { const NamespaceStringOrUUID dbAndUUID(replState->dbName, replState->collectionUUID); AutoGetCollection autoColl(opCtx, dbAndUUID, MODE_IX); + // The index build hasn't yet completed its initial setup, and persisted state like + // commit quorum information is absent. There's nothing to do here. + if (replState->isSettingUp()) { + return; + } + // The index build might have committed or aborted while looping and not holding the // collection lock. Re-checking if it is still active after taking locks would not solve // the issue, as build can still be registered as active, even if it is in an aborted or @@ -1659,6 +1766,8 @@ void IndexBuildsCoordinator::_onStepUpAsyncTaskFn(OperationContext* opCtx) { // This reads from system.indexBuilds collection to see if commit quorum got // satisfied. try { + hangOnStepUpAsyncTaskBeforeCheckingCommitQuorum.pauseWhileSet(); + if (_signalIfCommitQuorumIsSatisfied(opCtx, replState)) { // The index build has been signalled to commit. As retrying skipped records // during step-up is done to prevent waiting until commit time, if the build @@ -1667,12 +1776,20 @@ void IndexBuildsCoordinator::_onStepUpAsyncTaskFn(OperationContext* opCtx) { return; } } catch (DBException& ex) { + // If the operation context is interrupted (shutdown, stepdown, killOp), stop + // the verification process and exit. + opCtx->checkForInterrupt(); + fassert(31440, ex.toStatus()); } } try { - // Only checks if key generation is valid, does not actually insert. + // Unlike the primary, secondaries cannot fail immediately when detecting key + // generation errors; they instead temporarily store them in the 'skipped records' + // table, to validate them on commit. As an optimisation to potentially detect + // errors earlier, check the table on step-up. Unlike during commit, we only check + // key generation here, we do not actually insert the keys. uassertStatusOK(_indexBuildsManager.retrySkippedRecords( opCtx, replState->buildUUID, @@ -1680,13 +1797,12 @@ void IndexBuildsCoordinator::_onStepUpAsyncTaskFn(OperationContext* opCtx) { IndexBuildsManager::RetrySkippedRecordMode::kKeyGeneration)); } catch (const DBException& ex) { - // Shutdown or replication state change might happen while iterating the index - // builds. In both cases, the opCtx is interrupted, in which case we want to stop - // the verification process and exit. This might also be the case for a killOp. + // If the operation context is interrupted (shutdown, stepdown, killOp), stop the + // verification process and exit. opCtx->checkForInterrupt(); - // All other errors must be due to key generation. We can abort the build early as - // it would eventually fail anyways during the commit phase retry. + // All other errors must be due to key generation. Abort the build now, instead of + // failing later during the commit phase retry. auto status = ex.toStatus().withContext("Skipped records retry failed on step-up"); abortIndexBuildByBuildUUID( opCtx, replState->buildUUID, IndexBuildAction::kPrimaryAbort, status.reason()); @@ -1711,7 +1827,7 @@ IndexBuilds IndexBuildsCoordinator::stopIndexBuildsForRollback(OperationContext* IndexBuilds buildsStopped; - auto indexBuilds = _getIndexBuilds(); + auto indexBuilds = activeIndexBuilds.getAllIndexBuilds(); auto onIndexBuild = [&](const std::shared_ptr& replState) { if (IndexBuildProtocol::kSinglePhase == replState->protocol) { LOGV2(20659, @@ -1857,7 +1973,7 @@ void IndexBuildsCoordinator::restartIndexBuildsForRecovery( } bool IndexBuildsCoordinator::noIndexBuildInProgress() const { - return activeIndexBuilds.getActiveIndexBuilds() == 0; + return activeIndexBuilds.getActiveIndexBuildsCount() == 0; } int IndexBuildsCoordinator::numInProgForDb(const DatabaseName& dbName) const { @@ -1923,7 +2039,7 @@ void IndexBuildsCoordinator::assertNoBgOpInProgForDb(const DatabaseName& dbName) uassert(ErrorCodes::BackgroundOperationInProgressForDatabase, fmt::format("cannot perform operation: an index build is currently running for " "database {}. Found index build: {}", - dbName.toString(), + dbName.toStringForErrorMsg(), firstIndexBuildUUID->toString()), indexBuilds.empty()); } @@ -2112,6 +2228,34 @@ Status IndexBuildsCoordinator::_setUpIndexBuildForTwoPhaseRecovery( return _startIndexBuildForRecovery(opCtx, nss, specs, buildUUID, protocol); } +void IndexBuildsCoordinator::_waitIfNewIndexBuildsBlocked(OperationContext* opCtx, + const UUID& collectionUUID, + const std::vector& specs, + const UUID& buildUUID) { + stdx::unique_lock lk(_newIndexBuildsBlockedMutex); + bool messageLogged = false; + + opCtx->waitForConditionOrInterrupt(_newIndexBuildsBlockedCV, lk, [&] { + if (_newIndexBuildsBlocked && !messageLogged) { + LOGV2(7738700, + "Index build: new index builds are blocked, waiting", + "reason"_attr = *_blockReason, + "indexSpecs"_attr = specs, + "buildUUID"_attr = buildUUID, + "collectionUUID"_attr = collectionUUID); + messageLogged = true; + } + return !_newIndexBuildsBlocked; + }); + if (messageLogged) { + LOGV2(7738701, + "Index build: new index builds unblocked, continuing", + "indexSpecs"_attr = specs, + "buildUUID"_attr = buildUUID, + "collectionUUID"_attr = collectionUUID); + } +} + StatusWith> @@ -2127,7 +2271,6 @@ IndexBuildsCoordinator::_acquireExclusiveLockWithRSTLRetry(OperationContext* opC Lock::DBLockSkipOptions lockOptions{/*.skipFlowControlTicket=*/false, /*.skipRSTLLock=*/true}; Lock::DBLock dbLock{opCtx, replState->dbName, MODE_IX, Date_t::max(), lockOptions}; - CollectionNamespaceOrUUIDLock collLock{ opCtx, {replState->dbName, replState->collectionUUID}, MODE_X}; @@ -2186,7 +2329,7 @@ IndexBuildsCoordinator::_filterSpecsAndRegisterBuild(OperationContext* opCtx, if (replCoord->getSettings().usingReplSets() && replCoord->canAcceptWritesFor(opCtx, nssOrUuid)) { uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "drop-pending collection: " << nss, + str::stream() << "drop-pending collection: " << nss.toStringForErrorMsg(), !nss.isDropPendingNamespace()); } @@ -2226,7 +2369,7 @@ IndexBuildsCoordinator::_filterSpecsAndRegisterBuild(OperationContext* opCtx, // the catalog update when it uses the timestamp from the startIndexBuild, rather than // the commitIndexBuild, oplog entry. writeConflictRetry( - opCtx, "IndexBuildsCoordinator::_filterSpecsAndRegisterBuild", nss.ns(), [&] { + opCtx, "IndexBuildsCoordinator::_filterSpecsAndRegisterBuild", nss, [&] { WriteUnitOfWork wuow(opCtx); createIndexesOnEmptyCollection(opCtx, collection, filteredSpecs, false); wuow.commit(); @@ -2261,6 +2404,9 @@ IndexBuildsCoordinator::PostSetupAction IndexBuildsCoordinator::_setUpIndexBuild std::shared_ptr replState, Timestamp startTimestamp, const IndexBuildOptions& indexBuildOptions) { + + hangIndexBuildOnSetupBeforeTakingLocks.pauseWhileSet(opCtx); + auto [dbLock, collLock, rstl] = std::move(_acquireExclusiveLockWithRSTLRetry(opCtx, replState.get()).getValue()); @@ -2364,8 +2510,6 @@ IndexBuildsCoordinator::PostSetupAction IndexBuildsCoordinator::_setUpIndexBuild uassertStatusOK(_indexBuildsManager.setUpIndexBuild( opCtx, collection, replState->indexSpecs, replState->buildUUID, onInitFn, options)); } - // Mark the index build setup as complete, from now on cleanup is required on failure/abort. - replState->completeSetup(); } catch (DBException& ex) { _indexBuildsManager.abortIndexBuild( opCtx, collection, replState->buildUUID, MultiIndexBlock::kNoopOnCleanUpFn); @@ -2384,14 +2528,39 @@ IndexBuildsCoordinator::PostSetupAction IndexBuildsCoordinator::_setUpIndexBuild throw; } - if (isIndexBuildResumable(opCtx, *replState, indexBuildOptions)) { - // We should only set this value if this is a hybrid index build. - invariant(_indexBuildsManager.isBackgroundBuilding(replState->buildUUID)); + // Mark the index build setup as complete, from now on cleanup is required on failure/abort. + // _setUpIndexBuildInner must not throw after this point, or risk secondaries getting stuck + // applying the 'startIndexBuild' oplog entry, because throwing here would cause the node to + // vote for abort and subsequently await the 'abortIndexBuild' entry before fulfilling the start + // promise, while the oplog applier is waiting for the start promise. + replState->completeSetup(); + + // Failing to establish lastOpTime before interceptors is not fatal, the index build will + // continue as non-resumable. The build can continue as non-resumable even if this step + // succeeds, if it timeouts during the wait for majority read concern on the timestamp + // established here. + try { + if (isIndexBuildResumable(opCtx, *replState, indexBuildOptions)) { + // We should only set this value if this is a hybrid index build. + invariant(_indexBuildsManager.isBackgroundBuilding(replState->buildUUID)); - // After the interceptors are set, get the latest optime in the oplog that could have - // contained a write to this collection. We need to be holding the collection lock in X mode - // so that we ensure that there are not any uncommitted transactions on this collection. - replState->setLastOpTimeBeforeInterceptors(getLatestOplogOpTime(opCtx)); + // After the interceptors are set, get the latest optime in the oplog that could have + // contained a write to this collection. We need to be holding the collection lock in X + // mode so that we ensure that there are not any uncommitted transactions on this + // collection. + replState->setLastOpTimeBeforeInterceptors(getLatestOplogOpTime(opCtx)); + } + } catch (DBException& ex) { + // It is fine to let the build continue even if we are interrupted, interrupt check before + // actually starting the build will trigger the abort, after having signalled the start + // promise. + LOGV2(7484300, + "Index build: failed to setup index build resumability, will continue as " + "non-resumable.", + "buildUUID"_attr = replState->buildUUID, + logAttrs(replState->dbName), + "collectionUUID"_attr = replState->collectionUUID, + "reason"_attr = ex.toStatus()); } return PostSetupAction::kContinueIndexBuild; @@ -2408,7 +2577,9 @@ Status IndexBuildsCoordinator::_setUpIndexBuild(OperationContext* opCtx, postSetupAction = _setUpIndexBuildInner(opCtx, replState, startTimestamp, indexBuildOptions); } catch (const DBException& ex) { - auto status = ex.toStatus(); + // After this point, concurrent aborts are not allowed, with the exception of a loopback + // voteAbortIndexBuild. + replState->setPostFailureState(ex.toStatus()); // Hold reference to the catalog for collection lookup without locks to be safe. auto catalog = CollectionCatalog::get(opCtx); CollectionPtr collection(catalog->lookupCollectionByUUID(opCtx, replState->collectionUUID)); @@ -2416,13 +2587,12 @@ Status IndexBuildsCoordinator::_setUpIndexBuild(OperationContext* opCtx, str::stream() << "Collection with UUID " << replState->collectionUUID << " should exist because an index build is in progress: " << replState->buildUUID); - _cleanUpAfterFailure(opCtx, collection, replState, indexBuildOptions, status); - + _cleanUpAfterFailure(opCtx, collection, replState, indexBuildOptions); // Setup is done within the index builder thread, signal to any waiters that an error // occurred. - replState->sharedPromise.setError(status); - return status; + replState->sharedPromise.setError(replState->getAbortStatus()); + return replState->getAbortStatus(); } // The indexes are in the durable catalog in an unfinished state. Return an OK status so @@ -2463,14 +2633,6 @@ void IndexBuildsCoordinator::_runIndexBuild( } auto replState = invariant(swReplState); - // Try to set index build state to in-progress, if it has been aborted or interrupted then - // signal any waiters and return early. - auto tryStartStatus = replState->tryStart(opCtx); - if (!tryStartStatus.isOK()) { - replState->sharedPromise.setError(tryStartStatus); - return; - } - // Add build UUID to lock manager diagnostic output. auto locker = opCtx->lockState(); auto oldLockerDebugInfo = locker->getDebugInfo(); @@ -2514,6 +2676,13 @@ namespace { template void runOnAlternateContext(OperationContext* opCtx, std::string name, Func func) { auto newClient = opCtx->getServiceContext()->makeClient(name); + + // TODO(SERVER-74657): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*newClient.get()); + newClient.get()->setSystemOperationUnkillableByStepdown(lk); + } + AlternativeClientRegion acr(newClient); const auto newCtx = cc().makeOperationContext(); func(newCtx.get()); @@ -2523,8 +2692,24 @@ void runOnAlternateContext(OperationContext* opCtx, std::string name, Func func) void IndexBuildsCoordinator::_cleanUpAfterFailure(OperationContext* opCtx, const CollectionPtr& collection, std::shared_ptr replState, - const IndexBuildOptions& indexBuildOptions, - const Status& status) { + const IndexBuildOptions& indexBuildOptions) { + + const auto status = replState->getAbortStatus(); + + if (!replState->isAbortCleanUpRequired()) { + // The index build aborted at an early stage before the 'startIndexBuild' oplog entry is + // replicated: members replicating from this sync source are not aware of this index + // build, nor has any build state been persisted locally. Unregister the index build + // locally. In two phase index builds, any conditions causing secondaries to fail setting up + // an index build (which must have succeeded in the primary) are assumed to eventually cause + // the node to crash, so we do not attempt to verify this is a primary. + LOGV2(7564400, + "Index build: unregistering without cleanup", + "buildUUD"_attr = replState->buildUUID, + "error"_attr = status); + activeIndexBuilds.unregisterIndexBuild(&_indexBuildsManager, replState); + return; + } if (!status.isA()) { try { @@ -2533,12 +2718,22 @@ void IndexBuildsCoordinator::_cleanUpAfterFailure(OperationContext* opCtx, // waiting on index builds to finish because the index build state has not been updated // properly. + if (status.code() == ErrorCodes::DataCorruptionDetected) { + indexBuildsSSS.failedDueToDataCorruption.addAndFetch(1); + LOGV2(7333600, + "Index build: data corruption detected", + "buildUUID"_attr = replState->buildUUID, + logAttrs(replState->dbName), + "collectionUUID"_attr = replState->collectionUUID, + "error"_attr = status); + } + if (IndexBuildProtocol::kSinglePhase == replState->protocol) { _cleanUpSinglePhaseAfterNonShutdownFailure( - opCtx, collection, replState, indexBuildOptions, status); + opCtx, collection, replState, indexBuildOptions); } else { _cleanUpTwoPhaseAfterNonShutdownFailure( - opCtx, collection, replState, indexBuildOptions, status); + opCtx, collection, replState, indexBuildOptions); } return; } catch (const DBException& ex) { @@ -2557,50 +2752,36 @@ void IndexBuildsCoordinator::_cleanUpSinglePhaseAfterNonShutdownFailure( OperationContext* opCtx, const CollectionPtr& collection, std::shared_ptr replState, - const IndexBuildOptions& indexBuildOptions, - const Status& status) { + const IndexBuildOptions& indexBuildOptions) { + + invariant(replState->isAbortCleanUpRequired()); // The index builder thread can abort on its own if it is interrupted by a user killop. This // would prevent us from taking locks. Use a new OperationContext to abort the index build. - runOnAlternateContext( - opCtx, "self-abort", [this, replState, status](OperationContext* abortCtx) { - ShouldNotConflictWithSecondaryBatchApplicationBlock noConflict(abortCtx->lockState()); - // Skip RSTL to avoid deadlocks with prepare conflicts and state transitions caused by - // taking a strong collection lock. See SERVER-42621. - Lock::DBLockSkipOptions lockOptions{/*.skipFlowControlTicket=*/false, - /*.skipRSTLLock=*/true}; - Lock::DBLock dbLock(abortCtx, replState->dbName, MODE_IX, Date_t::max(), lockOptions); + runOnAlternateContext(opCtx, "self-abort", [this, replState](OperationContext* abortCtx) { + ShouldNotConflictWithSecondaryBatchApplicationBlock noConflict(abortCtx->lockState()); + // Skip RSTL to avoid deadlocks with prepare conflicts and state transitions caused by + // taking a strong collection lock. See SERVER-42621. + Lock::DBLockSkipOptions lockOptions{/*.skipFlowControlTicket=*/false, + /*.skipRSTLLock=*/true}; + Lock::DBLock dbLock(abortCtx, replState->dbName, MODE_IX, Date_t::max(), lockOptions); - const NamespaceStringOrUUID dbAndUUID(replState->dbName, replState->collectionUUID); - CollectionNamespaceOrUUIDLock collLock(abortCtx, dbAndUUID, MODE_X); - AutoGetCollection indexBuildEntryColl( - abortCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX); - _completeSelfAbort(abortCtx, replState, *indexBuildEntryColl, status); - }); + const NamespaceStringOrUUID dbAndUUID(replState->dbName, replState->collectionUUID); + CollectionNamespaceOrUUIDLock collLock(abortCtx, dbAndUUID, MODE_X); + AutoGetCollection indexBuildEntryColl( + abortCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX); + _completeSelfAbort(abortCtx, replState, *indexBuildEntryColl); + }); } void IndexBuildsCoordinator::_cleanUpTwoPhaseAfterNonShutdownFailure( OperationContext* opCtx, const CollectionPtr& collection, std::shared_ptr replState, - const IndexBuildOptions& indexBuildOptions, - const Status& status) { - - // We can only get here when there is no external abort, after a failure. If the operation has - // been killed, it must have been from a killop. In which case we cannot continue and try to - // vote, because we want the voting itself to be killable. Continue and try to abort as primary - // or crash. - if (!opCtx->isKillPending() && - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - feature_flags::gIndexBuildGracefulErrorHandling.isEnabledAndIgnoreFCVUnsafe()) { - if (ErrorCodes::NotWritablePrimary == status && !replState->isAbortCleanUpRequired()) { - // Clean up if the error happens due to stepdown before 'startIndexBuild' oplog entry is - // replicated. Other nodes will not be aware of this index build, so trying to signal - // for abort to the new primary cannot succeed. - activeIndexBuilds.unregisterIndexBuild(&_indexBuildsManager, replState); - return; - } - } + const IndexBuildOptions& indexBuildOptions) { + + invariant(replState->isAbortCleanUpRequired()); + const auto status = replState->getAbortStatus(); // Use a new OperationContext to abort the index build since our current opCtx may be // interrupted. This is still susceptible to shutdown interrupts, but in that case, on server @@ -2610,19 +2791,17 @@ void IndexBuildsCoordinator::_cleanUpTwoPhaseAfterNonShutdownFailure( opCtx, "self-abort", [this, replState, status](OperationContext* abortCtx) { // The index builder thread will need to reach out to the current primary to abort on // its own. This can happen if an error is thrown, it is interrupted by a user killop, - // or is killed internally by something like the DiskSpaceMonitor. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (feature_flags::gIndexBuildGracefulErrorHandling.isEnabledAndIgnoreFCVUnsafe()) { - // If we were interrupted by a caller internally who set a status, use that - // status instead of the generic interruption error status. - auto abortStatus = - !replState->getAbortStatus().isOK() ? replState->getAbortStatus() : status; + // or is killed internally by something like the DiskSpaceMonitor. Voting for abort is + // only allowed if the node did not previously attempt to vote for commit. + // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. + if (feature_flags::gIndexBuildGracefulErrorHandling.isEnabled( + serverGlobalParams.featureCompatibility) && + replState->canVoteForAbort()) { // Always request an abort to the primary node, even if we are primary. If // primary, the signal will loop back and cause an asynchronous external // index build abort. - _signalPrimaryForAbortAndWaitForExternalAbort( - abortCtx, replState.get(), abortStatus); + _signalPrimaryForAbortAndWaitForExternalAbort(abortCtx, replState.get()); // The abort, and state clean-up, is done externally by the async // 'voteAbortIndexBuild' command if the node is primary itself, or by the @@ -2633,33 +2812,28 @@ void IndexBuildsCoordinator::_cleanUpTwoPhaseAfterNonShutdownFailure( ShouldNotConflictWithSecondaryBatchApplicationBlock noConflict( abortCtx->lockState()); - // Take RSTL (implicitly by DBLock) to observe and prevent replication state - // from changing. - Lock::DBLock dbLock(abortCtx, replState->dbName, MODE_IX); + // Take RSTL to observe and prevent replication state from changing. This is + // done with the release/reacquire strategy to avoid deadlock with prepared + // txns. + auto [dbLock, collLock, rstl] = std::move( + _acquireExclusiveLockWithRSTLRetry(abortCtx, replState.get()).getValue()); const NamespaceStringOrUUID dbAndUUID(replState->dbName, replState->collectionUUID); auto replCoord = repl::ReplicationCoordinator::get(abortCtx); if (!replCoord->canAcceptWritesFor(abortCtx, dbAndUUID)) { - if (replState->isSettingUp()) { - // Clean up if the error happens before StartIndexBuild oplog entry - // is replicated during startup or stepdown. - activeIndexBuilds.unregisterIndexBuild(&_indexBuildsManager, replState); - return; - } else { - // Index builds may not fail on secondaries. If a primary replicated - // an abortIndexBuild oplog entry, then this index build would have - // received an IndexBuildAborted error code. - fassert(51101, - status.withContext(str::stream() - << "Index build: " << replState->buildUUID - << "; Database: " << replState->dbName)); - } + // Index builds may not fail on secondaries. If a primary replicated an + // abortIndexBuild oplog entry, then this index build would have been externally + // aborted. + fassert(51101, + status.withContext(str::stream() + << "Index build: " << replState->buildUUID + << "; Database: " + << replState->dbName.toStringForErrorMsg())); } - CollectionNamespaceOrUUIDLock collLock(abortCtx, dbAndUUID, MODE_X); AutoGetCollection indexBuildEntryColl( abortCtx, NamespaceString::kIndexBuildEntryNamespace, MODE_IX); - _completeSelfAbort(abortCtx, replState, *indexBuildEntryColl, status); + _completeSelfAbort(abortCtx, replState, *indexBuildEntryColl); } }); } @@ -2669,9 +2843,10 @@ void IndexBuildsCoordinator::_runIndexBuildInner( std::shared_ptr replState, const IndexBuildOptions& indexBuildOptions, const boost::optional& resumeInfo) { - // This Status stays unchanged unless we catch an exception in the following try-catch block. - auto status = Status::OK(); try { + // Try to set index build state to in-progress, if it has been aborted or interrupted the + // attempt will fail. + replState->setInProgress(opCtx); hangAfterInitializingIndexBuild.pauseWhileSet(opCtx); failIndexBuildWithError.executeIf( @@ -2695,14 +2870,30 @@ void IndexBuildsCoordinator::_runIndexBuildInner( } } catch (const DBException& ex) { - status = ex.toStatus(); - } - + // After this point, concurrent aborts are not allowed, with the exception of a loopback + // voteAbortIndexBuild. External aborters will retry until the build is actually aborted by + // the builder, or until the builder goes into kAwaitPrimaryAbort state, in which case an + // external abort is allowed. + + // Merge exception status with replication index build state status. When there was an + // external abort, the index build state already contains the abort reason as specified by + // the external aborter and this call does not override the status. In that case, the fact + // that this opCtx was interrupted (due to killOp) is irrelevant, as it is the means by + // which the builder is stopped, not the actual root cause. This returns a meaningful error + // message to the createIndexes caller in case of an external abort, e.g. a secondary voting + // to abort the index build. Not doing so would return a generic, not too helpful "operation + // was interrupted" error message, because the 'voteAbortIndexBuild' command kills the index + // build's operation context. + replState->setPostFailureState(ex.toStatus()); + } + + const auto status = replState->getAbortStatus(); + // No abort detected, index build returned normally. if (status.isOK()) { return; } - if (status.code() == ErrorCodes::IndexBuildAborted) { + if (replState->isExternalAbort()) { auto replCoord = repl::ReplicationCoordinator::get(opCtx); auto& collector = ResourceConsumption::MetricsCollector::get(opCtx); @@ -2720,14 +2911,14 @@ void IndexBuildsCoordinator::_runIndexBuildInner( // If the index build has already been cleaned-up because it encountered an error, there is no // work to do. If feature flag IndexBuildGracefulErrorHandling is not enabled, the most routine // case is for this to be due to a self-abort caused by constraint checking during the commit - // phase. When the flag is enabled, constraint violations cause the index build to abort - // immediately on primaries, and an async external abort is requested. - if (replState->isAborted()) { - if (ErrorCodes::isTenantMigrationError(replState->getAbortStatus())) - uassertStatusOK(replState->getAbortStatus()); + // phase. If an external abort was requested, cleanup is handled by the requester, and there is + // nothing to do. + if (replState->isAborted() || replState->isExternalAbort()) { uassertStatusOK(status); } + invariant(replState->isFailureCleanUp()); + // We do not hold a collection lock here, but we are protected against the collection being // dropped while the index build is still registered for the collection -- until abortIndexBuild // is called. The collection can be renamed, but it is OK for the name to be stale just for @@ -2741,9 +2932,6 @@ void IndexBuildsCoordinator::_runIndexBuildInner( NamespaceString nss = collection->ns(); logFailure(status, nss, replState); - // If we received an external abort, the caller should have already set our state to kAborted. - invariant(status.code() != ErrorCodes::IndexBuildAborted); - if (MONGO_unlikely(hangIndexBuildBeforeAbortCleanUp.shouldFail())) { LOGV2(4753601, "Hanging due to hangIndexBuildBeforeAbortCleanUp fail point"); hangIndexBuildBeforeAbortCleanUp.pauseWhileSet(); @@ -2753,8 +2941,8 @@ void IndexBuildsCoordinator::_runIndexBuildInner( // feature flag is enabled, two-phase builds can handle unexpected errors by requesting an abort // to the primary node. Single-phase builds can also abort immediately, as the primary or // standalone is the only node aware of the build. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gIndexBuildGracefulErrorHandling.isEnabledAndIgnoreFCVUnsafe()) { + if (!feature_flags::gIndexBuildGracefulErrorHandling.isEnabled( + serverGlobalParams.featureCompatibility)) { // Index builds only check index constraints when committing. If an error occurs at that // point, then the build is cleaned up while still holding the appropriate locks. The only // errors that we cannot anticipate are user interrupts and shutdown errors. @@ -2805,7 +2993,7 @@ void IndexBuildsCoordinator::_runIndexBuildInner( } } - _cleanUpAfterFailure(opCtx, collection, replState, indexBuildOptions, status); + _cleanUpAfterFailure(opCtx, collection, replState, indexBuildOptions); // Any error that escapes at this point is not fatal and can be handled by the caller. uassertStatusOK(status); @@ -3063,6 +3251,16 @@ void IndexBuildsCoordinator::_insertKeysFromSideTablesBlockingWrites( const IndexBuildOptions& indexBuildOptions) { indexBuildsSSS.drainSideWritesTablePreCommit.addAndFetch(1); const NamespaceStringOrUUID dbAndUUID(replState->dbName, replState->collectionUUID); + + failIndexBuildWithErrorInSecondDrain.executeIf( + [](const BSONObj& data) { + uasserted(data["error"].safeNumberInt(), + "failIndexBuildWithErrorInSecondDrain failpoint triggered"); + }, + [&](const BSONObj& data) { + return UUID::parse(data["buildUUID"]) == replState->buildUUID; + }); + // Perform the second drain while stopping writes on the collection. { // Skip RSTL to avoid deadlocks with prepare conflicts and state transitions. See @@ -3224,19 +3422,21 @@ IndexBuildsCoordinator::CommitResult IndexBuildsCoordinator::_insertKeysFromSide _completeAbortForShutdown(opCtx, replState, collection.get()); throw; } catch (const DBException& e) { + // There already is clean-up handling code up the stack, but this redundancy is introduced + // to make sure we abort the index build as primary, by doing so while we still have the + // locks. The caller's handling code will detect this condition and do nothing. auto status = e.toStatus(); logFailure(status, collection->ns(), replState); // It is illegal to abort the index build at this point. Note that Interruption exceptions // are allowed because we cannot control them as they bypass the routine abort machinery. - invariant(e.code() != ErrorCodes::IndexBuildAborted); + invariant(!replState->isExternalAbort()); // Index build commit may not fail on secondaries because it implies diverenge with data on // the primary. The only exception is single-phase builds started on primaries, which may // fail after a state transition. In this case, we have not replicated anything to // roll-back. With two-phase index builds, if a primary replicated an abortIndexBuild oplog - // entry, then this index build should have been interrupted before committing with an - // IndexBuildAborted error code. + // entry, then this index build should have been interrupted before committing. const bool twoPhaseAndNotPrimary = IndexBuildProtocol::kTwoPhase == replState->protocol && !isPrimary; if (twoPhaseAndNotPrimary) { @@ -3248,9 +3448,10 @@ IndexBuildsCoordinator::CommitResult IndexBuildsCoordinator::_insertKeysFromSide "error"_attr = status); } + replState->setPostFailureState(status); // This index build failed due to an indexing error in normal circumstances. Abort while // still holding the RSTL and collection locks. - _completeSelfAbort(opCtx, replState, *indexBuildEntryColl, status); + _completeSelfAbort(opCtx, replState, *indexBuildEntryColl); throw; } @@ -3359,23 +3560,18 @@ StatusWith> IndexBuildsCoordinator::_getInd return activeIndexBuilds.getIndexBuild(buildUUID); } -std::vector> IndexBuildsCoordinator::_getIndexBuilds() const { - auto filter = [](const auto& replState) { - return true; - }; - return activeIndexBuilds.filterIndexBuilds(filter); -} - int IndexBuildsCoordinator::getNumIndexesTotal(OperationContext* opCtx, const CollectionPtr& collection) { invariant(collection); const auto& nss = collection->ns(); invariant(opCtx->lockState()->isLocked(), str::stream() << "Unable to get index count because collection was not locked" - << nss); + << nss.toStringForErrorMsg()); auto indexCatalog = collection->getIndexCatalog(); - invariant(indexCatalog, str::stream() << "Collection is missing index catalog: " << nss); + invariant(indexCatalog, + str::stream() << "Collection is missing index catalog: " + << nss.toStringForErrorMsg()); return indexCatalog->numIndexesTotal(); } diff --git a/src/mongo/db/index_builds_coordinator.h b/src/mongo/db/index_builds_coordinator.h index 6580c4deb88c4..0224e7341b2ca 100644 --- a/src/mongo/db/index_builds_coordinator.h +++ b/src/mongo/db/index_builds_coordinator.h @@ -30,32 +30,56 @@ #pragma once #include +#include +#include +#include +#include +#include #include #include +#include #include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/active_index_builds.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/commit_quorum_options.h" #include "mongo/db/catalog/index_build_oplog_entry.h" #include "mongo/db/catalog/index_builds.h" #include "mongo/db/catalog/index_builds_manager.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" +#include "mongo/db/database_name.h" #include "mongo/db/index/column_key_generator.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/rebuild_indexes.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl_index_build_state.h" #include "mongo/db/resumable_index_builds_gen.h" #include "mongo/db/serverless/serverless_types_gen.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/disk_space_monitor.h" +#include "mongo/db/tenant_id.h" #include "mongo/executor/task_executor.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" #include "mongo/util/concurrency/with_lock.h" #include "mongo/util/fail_point.h" #include "mongo/util/future.h" @@ -114,6 +138,12 @@ class IndexBuildsCoordinator { static IndexBuildsCoordinator* get(ServiceContext* serviceContext); static IndexBuildsCoordinator* get(OperationContext* operationContext); + /** + * Returns Status::OK if there is enough available disk space to start an index build. Will + * return OutOfDiskSpace otherwise, with the context string providing the details. + */ + static Status checkDiskSpaceSufficientToStartIndexBuild(OperationContext* opCtx); + /** * Updates CurOp's 'op' type to 'command', the 'nss' field, and the 'opDescription' field with * 'createIndexes' command and index specs. Also ensures the timer is started. If provided, @@ -226,14 +256,14 @@ class IndexBuildsCoordinator { void applyAbortIndexBuild(OperationContext* opCtx, const IndexBuildOplogEntry& entry); /** - * Waits for all index builds to stop after they have been interrupted during shutdown. - * Leaves the index builds in a recoverable state. + * Waits for all index builds to stop. * * This should only be called when certain the server will not start any new index builds -- - * i.e. when the server is not accepting user requests and no internal operations are - * concurrently starting new index builds. + * i.e. after a call to setNewIndexBuildsBlocked -- and potentially after aborting all index + * builds that can be aborted -- i.e. using abortAllIndexBuildsWithReason -- to avoid an + * excesively long wait. */ - void waitForAllIndexBuildsToStopForShutdown(OperationContext* opCtx); + void waitForAllIndexBuildsToStop(OperationContext* opCtx); /** * Signals all of the index builds on the specified collection to abort and then waits until the @@ -329,6 +359,25 @@ class IndexBuildsCoordinator { const std::vector& indexNames, std::string reason); + /** + * Signals all of the index builds to abort and then waits until the index builds are no longer + * running. The provided 'reason' will be used in the error message that the index builders + * return to their callers. + * + * Does not require holding locks. + * + * Does not stop new index builds from starting. If required, caller must make that guarantee + * with a call to setNewIndexBuildsBlocked. + */ + void abortAllIndexBuildsWithReason(OperationContext* opCtx, const std::string& reason); + + /** + * Blocks or unblocks new index builds from starting. When blocking is enabled, new index builds + * will not immediately start and instead wait until a call to unblock is made. Concurrent calls + * to this function are not supported. + */ + void setNewIndexBuildsBlocked(bool newValue, boost::optional reason = boost::none); + /** * Returns true if there is an index builder building the given index names on a collection. */ @@ -540,6 +589,8 @@ class IndexBuildsCoordinator { indexBuilds.append("total", registered.loadRelaxed()); indexBuilds.append("killedDueToInsufficientDiskSpace", killedDueToInsufficientDiskSpace.loadRelaxed()); + indexBuilds.append("failedDueToDataCorruption", + failedDueToDataCorruption.loadRelaxed()); BSONObjBuilder phases; phases.append("scanCollection", scanCollection.loadRelaxed()); @@ -559,6 +610,7 @@ class IndexBuildsCoordinator { AtomicWord registered; AtomicWord killedDueToInsufficientDiskSpace; + AtomicWord failedDueToDataCorruption; AtomicWord scanCollection; AtomicWord drainSideWritesTable; AtomicWord drainSideWritesTablePreCommit; @@ -596,7 +648,16 @@ class IndexBuildsCoordinator { MigrationProtocolEnum protocol, const std::string& reason); + void _abortAllIndexBuildsWithReason(OperationContext* opCtx, + IndexBuildAction signalAction, + const std::string& reason); + protected: + void _waitIfNewIndexBuildsBlocked(OperationContext* opCtx, + const UUID& collectionUUID, + const std::vector& specs, + const UUID& buildUUID); + /** * Acquire the collection MODE_X lock (and other locks up the hierarchy) as usual, with the * exception of the RSTL. The RSTL will be acquired last, with a timeout. On timeout, all locks @@ -616,7 +677,6 @@ class IndexBuildsCoordinator { ReplIndexBuildState* replState, bool retry = true); - /** * Sets up the in-memory state of the index build. Validates index specs and filters out * existing indexes from the list of specs. @@ -712,8 +772,7 @@ class IndexBuildsCoordinator { void _cleanUpAfterFailure(OperationContext* opCtx, const CollectionPtr& collection, std::shared_ptr replState, - const IndexBuildOptions& indexBuildOptions, - const Status& status); + const IndexBuildOptions& indexBuildOptions); /** * Cleans up a single-phase index build after a failure, only if non-shutdown related. This @@ -722,8 +781,7 @@ class IndexBuildsCoordinator { void _cleanUpSinglePhaseAfterNonShutdownFailure(OperationContext* opCtx, const CollectionPtr& collection, std::shared_ptr replState, - const IndexBuildOptions& indexBuildOptions, - const Status& status); + const IndexBuildOptions& indexBuildOptions); /** * Cleans up a two-phase index build after a failure, only if non-shutdown related. This allows @@ -732,8 +790,7 @@ class IndexBuildsCoordinator { void _cleanUpTwoPhaseAfterNonShutdownFailure(OperationContext* opCtx, const CollectionPtr& collection, std::shared_ptr replState, - const IndexBuildOptions& indexBuildOptions, - const Status& status); + const IndexBuildOptions& indexBuildOptions); /** * Performs last steps of aborting an index build. @@ -741,12 +798,14 @@ class IndexBuildsCoordinator { void _completeAbort(OperationContext* opCtx, std::shared_ptr replState, const CollectionPtr& indexBuildEntryCollection, - IndexBuildAction signalAction, - Status reason); + IndexBuildAction signalAction); + void _completeExternalAbort(OperationContext* opCtx, + std::shared_ptr replState, + const CollectionPtr& indexBuildEntryCollection, + IndexBuildAction signalAction); void _completeSelfAbort(OperationContext* opCtx, std::shared_ptr replState, - const CollectionPtr& indexBuildEntryCollection, - Status reason); + const CollectionPtr& indexBuildEntryCollection); void _completeAbortForShutdown(OperationContext* opCtx, std::shared_ptr replState, const CollectionPtr& collection); @@ -823,8 +882,7 @@ class IndexBuildsCoordinator { * the index build to be externally aborted. */ virtual void _signalPrimaryForAbortAndWaitForExternalAbort(OperationContext* opCtx, - ReplIndexBuildState* replState, - const Status& abortStatus) = 0; + ReplIndexBuildState* replState) = 0; /** * Signals the primary to commit the index build by sending "voteCommitIndexBuild" command @@ -913,12 +971,6 @@ class IndexBuildsCoordinator { */ StatusWith> _getIndexBuild(const UUID& buildUUID) const; - /** - * Returns a snapshot of active index builds. Since each index build state is reference counted, - * it is fine to examine the returned index builds without re-locking 'mutex'. - */ - std::vector> _getIndexBuilds() const; - /** * Returns a list of index builds matching the criteria 'indexBuildFilter'. * Requires caller to lock '_mutex'. @@ -934,6 +986,16 @@ class IndexBuildsCoordinator { // The thread spawned during step-up to verify the builds. stdx::thread _stepUpThread; + + // Manages _newIndexBuildsBlocked. + mutable Mutex _newIndexBuildsBlockedMutex = + MONGO_MAKE_LATCH("IndexBuildsCoordinator::_newIndexBuildsBlocked"); + // Condition signalled to indicate new index builds are unblocked. + stdx::condition_variable _newIndexBuildsBlockedCV; + // Protected by _newIndexBuildsBlockedMutex. + bool _newIndexBuildsBlocked = false; + // Reason for blocking new index builds. + boost::optional _blockReason; }; // These fail points are used to control index build progress. Declared here to be shared diff --git a/src/mongo/db/index_builds_coordinator_mongod.cpp b/src/mongo/db/index_builds_coordinator_mongod.cpp index 77a2bad5375dd..0fef41ac810ac 100644 --- a/src/mongo/db/index_builds_coordinator_mongod.cpp +++ b/src/mongo/db/index_builds_coordinator_mongod.cpp @@ -28,35 +28,68 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/index_builds_coordinator_mongod.h" - #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/active_index_builds.h" #include "mongo/db/audit.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/index_build_entry_gen.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/locker.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" #include "mongo/db/curop.h" -#include "mongo/db/db_raii.h" #include "mongo/db/index_build_entry_helpers.h" +#include "mongo/db/index_builds_coordinator_mongod.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/global_user_write_block_state.h" #include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/service_context.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/server_parameter_with_storage.h" #include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/two_phase_index_build_knobs_gen.h" #include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" -#include "mongo/util/scoped_counter.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -93,6 +126,9 @@ ThreadPool::Options makeDefaultThreadPoolOptions() { // Ensure all threads have a client. options.onCreateThread = [](const std::string& threadName) { Client::initThread(threadName.c_str()); + + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); }; return options; @@ -192,12 +228,12 @@ IndexBuildsCoordinatorMongod::IndexBuildsCoordinatorMongod() }); } -void IndexBuildsCoordinatorMongod::shutdown(OperationContext* opCtx) { +void IndexBuildsCoordinatorMongod::shutdown(OperationContext*) { // Stop new scheduling. _threadPool.shutdown(); // Wait for all active builds to stop. - waitForAllIndexBuildsToStopForShutdown(opCtx); + activeIndexBuilds.waitForAllIndexBuildsToStopForShutdown(); // Wait for active threads to finish. _threadPool.join(); @@ -243,6 +279,8 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx, IndexBuildProtocol protocol, IndexBuildOptions indexBuildOptions, const boost::optional& resumeInfo) { + _waitIfNewIndexBuildsBlocked(opCtx, collectionUUID, specs, buildUUID); + const NamespaceStringOrUUID nssOrUuid{dbName, collectionUUID}; auto writeBlockState = GlobalUserWriteBlockState::get(opCtx); @@ -418,7 +456,7 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx, startPromise = std::move(startPromise), startTimestamp, shardVersion = oss.getShardVersion(nss), - dbVersion = oss.getDbVersion(dbName.toStringWithTenantId()), + dbVersion = oss.getDbVersion(dbName), resumeInfo, impersonatedClientAttrs = std::move(impersonatedClientAttrs), forwardableOpMetadata = @@ -461,12 +499,12 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx, sleepmillis(100); } - // Start collecting metrics for the index build. The metrics for this operation will only be - // aggregated globally if the node commits or aborts while it is primary. + // Start collecting metrics for the index build. The metrics for this operation will + // only be aggregated globally if the node commits or aborts while it is primary. auto& metricsCollector = ResourceConsumption::MetricsCollector::get(opCtx.get()); - if (ResourceConsumption::shouldCollectMetricsForDatabase(dbName.toStringWithTenantId()) && + if (ResourceConsumption::shouldCollectMetricsForDatabase(dbName) && ResourceConsumption::isMetricsCollectionEnabled()) { - metricsCollector.beginScopedCollecting(opCtx.get(), dbName.toStringWithTenantId()); + metricsCollector.beginScopedCollecting(opCtx.get(), dbName); } // Index builds should never take the PBWM lock, even on a primary. This allows the @@ -490,16 +528,16 @@ IndexBuildsCoordinatorMongod::_startIndexBuild(OperationContext* opCtx, hangBeforeRunningIndexBuild.pauseWhileSet(); - // Runs the remainder of the index build. Sets the promise result and cleans up the index - // build. + // Runs the remainder of the index build. Sets the promise result and cleans up the + // index build. _runIndexBuild(opCtx.get(), buildUUID, indexBuildOptions, resumeInfo); // Do not exit with an incomplete future. invariant(replState->sharedPromise.getFuture().isReady()); try { - // Logs the index build statistics if it took longer than the server parameter `slowMs` - // to complete. + // Logs the index build statistics if it took longer than the server parameter + // `slowMs` to complete. CurOp::get(opCtx.get()) ->completeAndLogOperation(MONGO_LOGV2_DEFAULT_COMPONENT, CollectionCatalog::get(opCtx.get()) @@ -678,29 +716,17 @@ bool IndexBuildsCoordinatorMongod::_signalIfCommitQuorumNotEnabled( } void IndexBuildsCoordinatorMongod::_signalPrimaryForAbortAndWaitForExternalAbort( - OperationContext* opCtx, ReplIndexBuildState* replState, const Status& abortStatus) { - + OperationContext* opCtx, ReplIndexBuildState* replState) { hangIndexBuildBeforeTransitioningReplStateTokAwaitPrimaryAbort.pauseWhileSet(opCtx); + const auto abortStatus = replState->getAbortStatus(); LOGV2(7419402, "Index build: signaling primary to abort index build", "buildUUID"_attr = replState->buildUUID, logAttrs(replState->dbName), "collectionUUID"_attr = replState->collectionUUID, "reason"_attr = abortStatus); - const auto transitionedToWaitForAbort = replState->requestAbortFromPrimary(abortStatus); - - if (!transitionedToWaitForAbort) { - // The index build has likely been aborted externally (e.g. its underlying collection was - // dropped), and it's in the midst of tearing down. There's nothing else to do here. - LOGV2(7530800, - "Index build: the build is already in aborted state; not signaling primary to abort", - "buildUUID"_attr = replState->buildUUID, - "db"_attr = replState->dbName, - "collectionUUID"_attr = replState->collectionUUID, - "reason"_attr = abortStatus); - return; - } + replState->requestAbortFromPrimary(); hangIndexBuildBeforeSignalingPrimaryForAbort.pauseWhileSet(opCtx); @@ -766,8 +792,10 @@ void IndexBuildsCoordinatorMongod::_signalPrimaryForAbortAndWaitForExternalAbort } // The promise was fullfilled before waiting. return; - } catch (const DBException& ex) { - if (ex.code() == ErrorCodes::IndexBuildAborted) { + } catch (const DBException&) { + // External aborts must wait for the builder thread, so we cannot be in an already aborted + // state. + if (replState->isExternalAbort()) { // The build was aborted, and the opCtx interrupted, before the thread checked the // future. return; @@ -795,6 +823,14 @@ void IndexBuildsCoordinatorMongod::_signalPrimaryForCommitReadiness( logAttrs(replState->dbName), "collectionUUID"_attr = replState->collectionUUID); + // Indicate that the index build in this node has already tried to vote for commit readiness. + // We do not try to determine whether the vote has actually succeeded or not, as it is + // challenging due to the asynchronous request and potential concurrent interrupts. After this + // point, the node cannot vote to abort this index build, and if it needs to abort the index + // build it must try to do so independently. Meaning, as a primary it will succeed, but as a + // secondary it will fassert. + replState->setVotedForCommitReadiness(opCtx); + const auto generateCmd = [](const UUID& uuid, const std::string& address) { return BSON("voteCommitIndexBuild" << uuid << "hostAndPort" << address << "writeConcern" << BSON("w" @@ -964,7 +1000,7 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx, return Status(ErrorCodes::IndexNotFound, str::stream() << "Cannot set a new commit quorum on an index build in collection '" - << nss << "' without providing any indexes."); + << nss.toStringForErrorMsg() << "' without providing any indexes."); } // Take the MODE_IX lock now, so that when we actually persist the value later, we don't need to @@ -972,7 +1008,8 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx, AutoGetCollection collection(opCtx, nss, MODE_IX); if (!collection) { return Status(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection '" << nss << "' was not found."); + str::stream() + << "Collection '" << nss.toStringForErrorMsg() << "' was not found."); } UUID collectionUUID = collection->uuid(); @@ -992,13 +1029,14 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx, auto collIndexBuilds = activeIndexBuilds.filterIndexBuilds(pred); if (collIndexBuilds.empty()) { return Status(ErrorCodes::IndexNotFound, - str::stream() << "Cannot find an index build on collection '" << nss - << "' with the provided index names"); + str::stream() + << "Cannot find an index build on collection '" + << nss.toStringForErrorMsg() << "' with the provided index names"); } invariant( 1U == collIndexBuilds.size(), str::stream() << "Found multiple index builds with the same index names on collection " - << nss << " (" << collectionUUID + << nss.toStringForErrorMsg() << " (" << collectionUUID << "): first index name: " << indexNames.front()); replState = collIndexBuilds.front(); @@ -1018,18 +1056,19 @@ Status IndexBuildsCoordinatorMongod::setCommitQuorum(OperationContext* opCtx, return Status(ErrorCodes::IndexNotFound, str::stream() << "Index build not yet started for the provided indexes in collection '" - << nss << "'."); + << nss.toStringForErrorMsg() << "'."); } auto currentCommitQuorum = invariantStatusOK(swOnDiskCommitQuorum); if (currentCommitQuorum.numNodes == CommitQuorumOptions::kDisabled || newCommitQuorum.numNodes == CommitQuorumOptions::kDisabled) { return Status(ErrorCodes::BadValue, - str::stream() << "Commit quorum value can be changed only for index builds " - << "with commit quorum enabled, nss: '" << nss - << "' first index name: '" << indexNames.front() - << "' currentCommitQuorum: " << currentCommitQuorum.toBSON() - << " providedCommitQuorum: " << newCommitQuorum.toBSON()); + str::stream() + << "Commit quorum value can be changed only for index builds " + << "with commit quorum enabled, nss: '" << nss.toStringForErrorMsg() + << "' first index name: '" << indexNames.front() + << "' currentCommitQuorum: " << currentCommitQuorum.toBSON() + << " providedCommitQuorum: " << newCommitQuorum.toBSON()); } invariant(opCtx->lockState()->isRSTLLocked()); diff --git a/src/mongo/db/index_builds_coordinator_mongod.h b/src/mongo/db/index_builds_coordinator_mongod.h index a56900578c53d..2fd03aaafddad 100644 --- a/src/mongo/db/index_builds_coordinator_mongod.h +++ b/src/mongo/db/index_builds_coordinator_mongod.h @@ -29,8 +29,27 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/commit_quorum_options.h" +#include "mongo/db/database_name.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl_index_build_state.h" +#include "mongo/db/resumable_index_builds_gen.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -158,8 +177,7 @@ class IndexBuildsCoordinatorMongod : public IndexBuildsCoordinator { std::shared_ptr replState) override; void _signalPrimaryForAbortAndWaitForExternalAbort(OperationContext* opCtx, - ReplIndexBuildState* replState, - const Status& abortStatus) override; + ReplIndexBuildState* replState) override; void _signalPrimaryForCommitReadiness(OperationContext* opCtx, std::shared_ptr replState) override; diff --git a/src/mongo/db/index_builds_coordinator_mongod_test.cpp b/src/mongo/db/index_builds_coordinator_mongod_test.cpp index 3e21ecf6f3312..055a0c34ff603 100644 --- a/src/mongo/db/index_builds_coordinator_mongod_test.cpp +++ b/src/mongo/db/index_builds_coordinator_mongod_test.cpp @@ -27,17 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/index_builds_coordinator_mongod.h" - +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/catalog_test_fixture.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/commit_quorum_options.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/index_builds_coordinator_mongod.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/serverless/serverless_types_gen.h" +#include "mongo/db/tenant_id.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/index_builds_coordinator_test.cpp b/src/mongo/db/index_builds_coordinator_test.cpp index d97002eb602a6..3bf209eefcb0d 100644 --- a/src/mongo/db/index_builds_coordinator_test.cpp +++ b/src/mongo/db/index_builds_coordinator_test.cpp @@ -27,9 +27,25 @@ * it in the license file. */ +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/index_names.cpp b/src/mongo/db/index_names.cpp index bc9e01f1d5770..ce8855d1d3488 100644 --- a/src/mongo/db/index_names.cpp +++ b/src/mongo/db/index_names.cpp @@ -29,7 +29,13 @@ #include "mongo/db/index_names.h" -#include "mongo/db/jsobj.h" +#include + +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/initialize_api_parameters.cpp b/src/mongo/db/initialize_api_parameters.cpp index c0200c25d8b50..382f9617c739f 100644 --- a/src/mongo/db/initialize_api_parameters.cpp +++ b/src/mongo/db/initialize_api_parameters.cpp @@ -27,16 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/initialize_api_parameters.h" - +#include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/initialize_api_parameters.h" #include "mongo/db/operation_context.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/platform/atomic_word.h" #include "mongo/transport/session.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/initialize_api_parameters.h b/src/mongo/db/initialize_api_parameters.h index e360fd1d24574..e47b358c51f28 100644 --- a/src/mongo/db/initialize_api_parameters.h +++ b/src/mongo/db/initialize_api_parameters.h @@ -31,6 +31,10 @@ #include "api_parameters.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/api_parameters_gen.h" +#include "mongo/db/operation_context.h" + namespace mongo { class BSONObj; diff --git a/src/mongo/db/session/initialize_operation_session_info.cpp b/src/mongo/db/initialize_operation_session_info.cpp similarity index 86% rename from src/mongo/db/session/initialize_operation_session_info.cpp rename to src/mongo/db/initialize_operation_session_info.cpp index 523534b0bc68f..03a5ab2405bde 100644 --- a/src/mongo/db/session/initialize_operation_session_info.cpp +++ b/src/mongo/db/initialize_operation_session_info.cpp @@ -27,28 +27,45 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include "mongo/db/initialize_operation_session_info.h" -#include "mongo/db/session/initialize_operation_session_info.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/client.h" #include "mongo/db/operation_context.h" #include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { OperationSessionInfoFromClient initializeOperationSessionInfo(OperationContext* opCtx, - const BSONObj& requestBody, + const OpMsgRequest& opMsgRequest, bool requiresAuth, bool attachToOpCtx, bool isReplSetMemberOrMongos) { auto osi = OperationSessionInfoFromClient::parse(IDLParserContext{"OperationSessionInfo"}, - requestBody); + opMsgRequest.body); auto isAuthorizedForInternalClusterAction = AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(opMsgRequest.getValidatedTenantId()), + ActionType::internal); if (opCtx->getClient()->isInDirectClient()) { uassert(50891, @@ -70,14 +87,14 @@ OperationSessionInfoFromClient initializeOperationSessionInfo(OperationContext* // logical sessions are disabled. A client may authenticate as the __sytem user, // or as an externally authorized user. if (authSession->isUsingLocalhostBypass() && !authSession->isAuthenticated()) { - return {}; + return OperationSessionInfoFromClient(); } // Do not initialize lsid when auth is enabled and no user is logged in since // there is no sensible uid that can be assigned to it. if (AuthorizationManager::get(opCtx->getServiceContext())->isAuthEnabled() && !authSession->isAuthenticated() && !requiresAuth) { - return {}; + return OperationSessionInfoFromClient(); } } @@ -88,7 +105,7 @@ OperationSessionInfoFromClient initializeOperationSessionInfo(OperationContext* if (!lsc) { // Ignore session information if the logical session cache has not been set up, e.g. on // the embedded version of mongod. - return {}; + return OperationSessionInfoFromClient(); } // If osi lsid includes the uid, makeLogicalSessionId will also verify that the hash @@ -96,7 +113,7 @@ OperationSessionInfoFromClient initializeOperationSessionInfo(OperationContext* auto lsid = makeLogicalSessionId(osi.getSessionId().value(), opCtx); if (!attachToOpCtx) { - return {}; + return OperationSessionInfoFromClient(); } if (isChildSession(lsid)) { @@ -166,7 +183,7 @@ OperationSessionInfoFromClient initializeOperationSessionInfo(OperationContext* osi.getStartTransaction().value()); } - return osi; + return OperationSessionInfoFromClient(std::move(osi)); } } // namespace mongo diff --git a/src/mongo/db/session/initialize_operation_session_info.h b/src/mongo/db/initialize_operation_session_info.h similarity index 92% rename from src/mongo/db/session/initialize_operation_session_info.h rename to src/mongo/db/initialize_operation_session_info.h index b2c1fcde67d14..7fecc29d14c99 100644 --- a/src/mongo/db/session/initialize_operation_session_info.h +++ b/src/mongo/db/initialize_operation_session_info.h @@ -29,12 +29,15 @@ #pragma once +#include "mongo/bson/bsonobj.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/rpc/op_msg.h" namespace mongo { /** - * Parses the session information from the body of a request and stores the sessionId and txnNumber + * Parses the session information from a request and stores the sessionId and txnNumber * on the current operation context. Must only be called once per operation and should be done right * in the beginning. Note that the session info will be stored in the operation context and returned * only if the current request supports it. For example, if attachToOpCtx is false or this is called @@ -50,7 +53,7 @@ namespace mongo { * this function will throw. */ OperationSessionInfoFromClient initializeOperationSessionInfo(OperationContext* opCtx, - const BSONObj& requestBody, + const OpMsgRequest& opMsgRequest, bool requiresAuth, bool attachToOpCtx, bool isReplSetMemberOrMongos); diff --git a/src/mongo/db/initialize_server_global_state.cpp b/src/mongo/db/initialize_server_global_state.cpp index 02a30aac334c6..97cbd9fa962cf 100644 --- a/src/mongo/db/initialize_server_global_state.cpp +++ b/src/mongo/db/initialize_server_global_state.cpp @@ -28,37 +28,59 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/initialize_server_global_state.h" -#include "mongo/db/initialize_server_global_state_gen.h" - #include +#include +#include +#include +#include #include #include -#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" #ifndef _WIN32 #include #include -#include -#include #endif -#include "mongo/base/init.h" -#include "mongo/config.h" +#if defined(__APPLE__) +#include +#endif + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/initialize_server_global_state.h" +#include "mongo/db/initialize_server_global_state_gen.h" #include "mongo/db/server_options.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/logv2/log_domain_global.h" -#include "mongo/platform/process_id.h" +#include "mongo/logv2/log_manager.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/errno_util.h" #include "mongo/util/exit_code.h" #include "mongo/util/processinfo.h" #include "mongo/util/quick_exit.h" #include "mongo/util/str.h" #include "mongo/util/testing_proctor.h" +#include "mongo/util/time_support.h" +#include +#include +#include +#include -#if defined(__APPLE__) -#include +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include #endif #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/db/internal_transactions_feature_flag.idl b/src/mongo/db/internal_transactions_feature_flag.idl index 97b0bcd927abe..e4d973183f58b 100644 --- a/src/mongo/db/internal_transactions_feature_flag.idl +++ b/src/mongo/db/internal_transactions_feature_flag.idl @@ -39,13 +39,17 @@ feature_flags: description: Feature flag to enable always creating the config.transactions partial index on step up to primary even if the collection is not empty. cpp_varname: gFeatureFlagAlwaysCreateConfigTransactionsPartialIndexOnStepUp default: false + shouldBeFCVGated: true featureFlagUpdateDocumentShardKeyUsingTransactionApi: description: Feature flag to enable usage of the transaction api for update findAndModify and update commands that change a document's shard key. cpp_varname: gFeatureFlagUpdateDocumentShardKeyUsingTransactionApi default: false + shouldBeFCVGated: true featureFlagUpdateOneWithoutShardKey: description: Feature flag to enable updateOne, deleteOne, and findAndModify without a shard key or _id equality in their filter against a sharded collection. cpp_varname: gFeatureFlagUpdateOneWithoutShardKey - default: false + default: true + version: 7.1 + shouldBeFCVGated: true diff --git a/src/mongo/db/introspect.cpp b/src/mongo/db/introspect.cpp index 27f265fe1503c..8ea4b38e41e7a 100644 --- a/src/mongo/db/introspect.cpp +++ b/src/mongo/db/introspect.cpp @@ -29,20 +29,55 @@ #include "mongo/db/introspect.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/builder.h" -#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/database_holder.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/concurrency/locker_impl.h" #include "mongo/db/curop.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" #include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -95,6 +130,13 @@ void profile(OperationContext* opCtx, NetworkOp op) { // killed or timed out. Those are the case we want to have profiling data. auto newClient = opCtx->getServiceContext()->makeClient("profiling"); auto newCtx = newClient->makeOperationContext(); + + // TODO(SERVER-74657): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*newClient.get()); + newClient.get()->setSystemOperationUnkillableByStepdown(lk); + } + // We swap the lockers as that way we preserve locks held in transactions and any other // options set for the locker like maxLockTimeout. auto oldLocker = opCtx->getClient()->swapLockState( @@ -106,25 +148,46 @@ void profile(OperationContext* opCtx, NetworkOp op) { }); AlternativeClientRegion acr(newClient); const auto dbProfilingNS = NamespaceString::makeSystemDotProfileNamespace(ns.dbName()); - AutoGetCollection autoColl(newCtx.get(), dbProfilingNS, MODE_IX); - Database* const db = autoColl.getDb(); - if (!db) { - // Database disappeared. - LOGV2(20700, - "note: not profiling because db went away for {namespace}", - "note: not profiling because db went away for namespace", - logAttrs(ns)); - return; + + boost::optional profileCollection; + while (true) { + profileCollection.emplace( + acquireCollection(newCtx.get(), + CollectionAcquisitionRequest( + dbProfilingNS, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(newCtx.get()), + AcquisitionPrerequisites::kWrite), + MODE_IX)); + + Database* const db = + DatabaseHolder::get(newCtx.get())->getDb(newCtx.get(), dbProfilingNS.dbName()); + if (!db) { + // Database disappeared. + LOGV2(20700, + "note: not profiling because db went away for {namespace}", + "note: not profiling because db went away for namespace", + logAttrs(ns)); + return; + } + + if (profileCollection->exists()) { + break; + } + + uassertStatusOK(createProfileCollection(newCtx.get(), db)); + profileCollection.reset(); } - uassertStatusOK(createProfileCollection(newCtx.get(), db)); - CollectionPtr coll(CollectionCatalog::get(newCtx.get()) - ->lookupCollectionByNamespace(newCtx.get(), dbProfilingNS)); + invariant(profileCollection && profileCollection->exists()); WriteUnitOfWork wuow(newCtx.get()); OpDebug* const nullOpDebug = nullptr; - uassertStatusOK(collection_internal::insertDocument( - newCtx.get(), coll, InsertStatement(p), nullOpDebug, false)); + uassertStatusOK(collection_internal::insertDocument(newCtx.get(), + profileCollection->getCollectionPtr(), + InsertStatement(p), + nullOpDebug, + false)); wuow.commit(); } catch (const AssertionException& assertionEx) { LOGV2_WARNING(20703, @@ -137,7 +200,6 @@ void profile(OperationContext* opCtx, NetworkOp op) { } } - Status createProfileCollection(OperationContext* opCtx, Database* db) { invariant(opCtx->lockState()->isDbLockedForMode(db->name(), MODE_IX)); @@ -146,13 +208,14 @@ Status createProfileCollection(OperationContext* opCtx, Database* db) { // Checking the collection exists must also be done in the WCE retry loop. Only retrying // collection creation would endlessly throw errors because the collection exists: must check // and see the collection exists in order to break free. - return writeConflictRetry(opCtx, "createProfileCollection", dbProfilingNS.ns(), [&] { + return writeConflictRetry(opCtx, "createProfileCollection", dbProfilingNS, [&] { const Collection* collection = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, dbProfilingNS); if (collection) { if (!collection->isCapped()) { return Status(ErrorCodes::NamespaceExists, - str::stream() << dbProfilingNS << " exists but isn't capped"); + str::stream() << dbProfilingNS.toStringForErrorMsg() + << " exists but isn't capped"); } return Status::OK(); diff --git a/src/mongo/db/key_generator.cpp b/src/mongo/db/key_generator.cpp index 4527ba7dd110c..9b393c6699dcc 100644 --- a/src/mongo/db/key_generator.cpp +++ b/src/mongo/db/key_generator.cpp @@ -27,16 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/key_generator.h" - -#include "mongo/client/read_preference.h" #include "mongo/db/keys_collection_client.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" #include "mongo/db/time_proof_service.h" #include "mongo/db/vector_clock.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/platform/compiler.h" #include "mongo/util/fail_point.h" namespace mongo { diff --git a/src/mongo/db/key_generator.h b/src/mongo/db/key_generator.h index b134ab9b2de1b..3c94fcc9a61b5 100644 --- a/src/mongo/db/key_generator.h +++ b/src/mongo/db/key_generator.h @@ -31,6 +31,7 @@ #include +#include "mongo/base/status.h" #include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/db/key_generator_update_test.cpp b/src/mongo/db/key_generator_update_test.cpp index 70464ca01ffae..0559271f740a8 100644 --- a/src/mongo/db/key_generator_update_test.cpp +++ b/src/mongo/db/key_generator_update_test.cpp @@ -27,21 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include #include #include +#include +#include -#include "mongo/db/jsobj.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/crypto/sha1_block.h" #include "mongo/db/key_generator.h" +#include "mongo/db/keys_collection_client.h" #include "mongo/db/keys_collection_client_sharded.h" #include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/s/config/config_server_test_fixture.h" +#include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/time_proof_service.h" +#include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_mutable.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/s/grid.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" namespace mongo { diff --git a/src/mongo/db/keypattern.cpp b/src/mongo/db/keypattern.cpp index 5a64ad8a27a2e..49db9b88e92d6 100644 --- a/src/mongo/db/keypattern.cpp +++ b/src/mongo/db/keypattern.cpp @@ -29,7 +29,12 @@ #include "mongo/db/keypattern.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/index_names.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/keypattern.h b/src/mongo/db/keypattern.h index 986d259eebdb4..75e2716c9c6a0 100644 --- a/src/mongo/db/keypattern.h +++ b/src/mongo/db/keypattern.h @@ -29,10 +29,18 @@ #pragma once +#include +#include + #include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/jsobj.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/util/str.h" namespace mongo { @@ -84,6 +92,14 @@ class KeyPattern { return _pattern; } + BSONObj serializeForIDL(const SerializationOptions& options = {}) const { + BSONObjBuilder bob; + for (const auto& e : _pattern) { + bob.appendAs(e, options.serializeIdentifier(e.fieldNameStringData())); + } + return bob.obj(); + } + /** * Returns a string representation of this KeyPattern. */ diff --git a/src/mongo/db/keypattern.idl b/src/mongo/db/keypattern.idl index 158c742faab33..cd67f8e54286d 100644 --- a/src/mongo/db/keypattern.idl +++ b/src/mongo/db/keypattern.idl @@ -38,5 +38,5 @@ types: bson_serialization_type: object description: An expression describing a transformation of a document into a document key. cpp_type: KeyPattern - serializer: KeyPattern::toBSON + serializer: KeyPattern::serializeForIDL deserializer: KeyPattern::fromBSON diff --git a/src/mongo/db/keypattern_test.cpp b/src/mongo/db/keypattern_test.cpp index fbb7c4e7af6a6..8c9d05ce0bca4 100644 --- a/src/mongo/db/keypattern_test.cpp +++ b/src/mongo/db/keypattern_test.cpp @@ -29,7 +29,13 @@ #include "mongo/db/keypattern.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace { diff --git a/src/mongo/db/keys_collection_cache.cpp b/src/mongo/db/keys_collection_cache.cpp index d5ba9274c76a2..b958f769047af 100644 --- a/src/mongo/db/keys_collection_cache.cpp +++ b/src/mongo/db/keys_collection_cache.cpp @@ -27,11 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/keys_collection_cache.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/keys_collection_cache.h" #include "mongo/db/keys_collection_client.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/util/str.h" diff --git a/src/mongo/db/keys_collection_cache.h b/src/mongo/db/keys_collection_cache.h index 31103e670ee22..ec0d1836cf954 100644 --- a/src/mongo/db/keys_collection_cache.h +++ b/src/mongo/db/keys_collection_cache.h @@ -30,9 +30,13 @@ #pragma once #include +#include +#include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/keys_collection_cache_test.cpp b/src/mongo/db/keys_collection_cache_test.cpp index b260dbb82e995..a8e00c3027798 100644 --- a/src/mongo/db/keys_collection_cache_test.cpp +++ b/src/mongo/db/keys_collection_cache_test.cpp @@ -27,23 +27,46 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/jsobj.h" #include "mongo/db/keys_collection_cache.h" +#include "mongo/db/keys_collection_client.h" #include "mongo/db/keys_collection_client_direct.h" #include "mongo/db/keys_collection_client_sharded.h" #include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/update_result.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/s/config/config_server_test_fixture.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/time_proof_service.h" +#include "mongo/db/transaction_resources.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/grid.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -72,8 +95,11 @@ class CacheTest : public ConfigServerTestFixture { } void insertDocument(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) { - AutoGetCollection coll(opCtx, nss, MODE_IX); - auto updateResult = Helpers::upsert(opCtx, nss, doc); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + auto updateResult = Helpers::upsert(opCtx, collection, doc); ASSERT_EQ(0, updateResult.numDocsModified); } @@ -118,6 +144,27 @@ class CacheTest : public ConfigServerTestFixture { ASSERT_OK(getStatusFromWriteCommandReply(result)); } + void testRefreshDoesNotErrorIfExternalKeysCacheIsEmpty(KeysCollectionClient* client); + + void testGetKeyShouldReturnCorrectKeysAfterRefresh(KeysCollectionClient* client); + + void testGetInternalKeyShouldReturnErrorIfNoKeyIsValidForGivenTime( + KeysCollectionClient* client); + + void testGetInternalKeyShouldReturnOldestKeyPossible(KeysCollectionClient* client); + + void testRefreshShouldNotGetInternalKeysForOtherPurpose(KeysCollectionClient* client); + + void testRefreshShouldNotGetExternalKeysForOtherPurpose(KeysCollectionClient* client); + + void testGetRefreshCanIncrementallyGetNewKeys(KeysCollectionClient* client); + + void testCacheExternalKeyBasic(KeysCollectionClient* client); + + void testRefreshClearsRemovedExternalKeys(KeysCollectionClient* client); + + void testRefreshHandlesKeysReceivingTTLValue(KeysCollectionClient* client); + private: std::unique_ptr _catalogClient; std::unique_ptr _directClient; @@ -151,8 +198,8 @@ TEST_F(CacheTest, RefreshErrorsIfInternalCacheIsEmpty) { ASSERT_FALSE(status.reason().empty()); } -TEST_F(CacheTest, RefreshDoesNotErrorIfExternalKeysCacheIsEmpty) { - KeysCollectionCache cache("test", catalogClient()); +void CacheTest::testRefreshDoesNotErrorIfExternalKeysCacheIsEmpty(KeysCollectionClient* client) { + KeysCollectionCache cache("test", client); KeysCollectionDocument origKey1(1); origKey1.setKeysCollectionDocumentBase( @@ -164,56 +211,16 @@ TEST_F(CacheTest, RefreshDoesNotErrorIfExternalKeysCacheIsEmpty) { ASSERT_OK(status); } - -TEST_F(CacheTest, GetKeyShouldReturnCorrectKeyAfterRefreshSharded) { - KeysCollectionCache cache("test", catalogClient()); - - KeysCollectionDocument origKey1(1); - origKey1.setKeysCollectionDocumentBase( - {"test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))}); - ASSERT_OK(insertToConfigCollection( - operationContext(), NamespaceString::kKeysCollectionNamespace, origKey1.toBSON())); - - auto refreshStatus = cache.refresh(operationContext()); - ASSERT_OK(refreshStatus.getStatus()); - - { - auto key = refreshStatus.getValue(); - ASSERT_EQ(1, key.getKeyId()); - ASSERT_EQ(origKey1.getKey(), key.getKey()); - ASSERT_EQ("test", key.getPurpose()); - ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); - } - - auto swInternalKey = cache.getInternalKey(LogicalTime(Timestamp(1, 0))); - ASSERT_OK(swInternalKey.getStatus()); - - { - auto key = swInternalKey.getValue(); - ASSERT_EQ(1, key.getKeyId()); - ASSERT_EQ(origKey1.getKey(), key.getKey()); - ASSERT_EQ("test", key.getPurpose()); - ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); - } - - swInternalKey = cache.getInternalKeyById(1, LogicalTime(Timestamp(1, 0))); - ASSERT_OK(swInternalKey.getStatus()); - - { - auto key = swInternalKey.getValue(); - ASSERT_EQ(1, key.getKeyId()); - ASSERT_EQ(origKey1.getKey(), key.getKey()); - ASSERT_EQ("test", key.getPurpose()); - ASSERT_EQ(Timestamp(105, 0), key.getExpiresAt().asTimestamp()); - } - - auto swExternalKeys = cache.getExternalKeysById(1, LogicalTime(Timestamp(1, 0))); - ASSERT_EQ(ErrorCodes::KeyNotFound, swExternalKeys.getStatus()); +TEST_F(CacheTest, RefreshDoesNotErrorIfExternalKeysCacheIsEmptyShardedClient) { + testRefreshDoesNotErrorIfExternalKeysCacheIsEmpty(catalogClient()); } -TEST_F(CacheTest, GetKeyShouldReturnCorrectKeysAfterRefreshDirectClient) { - KeysCollectionCache cache("test", directClient()); +TEST_F(CacheTest, RefreshDoesNotErrorIfExternalKeysCacheIsEmptyDirectClient) { + testRefreshDoesNotErrorIfExternalKeysCacheIsEmpty(directClient()); +} +void CacheTest::testGetKeyShouldReturnCorrectKeysAfterRefresh(KeysCollectionClient* client) { + KeysCollectionCache cache("test", client); KeysCollectionDocument origKey0(1); origKey0.setKeysCollectionDocumentBase( {"test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))}); @@ -222,14 +229,16 @@ TEST_F(CacheTest, GetKeyShouldReturnCorrectKeysAfterRefreshDirectClient) { // Use external keys with the same keyId and expiresAt as the internal key to test that the // cache correctly tackles key collisions. - ExternalKeysCollectionDocument origKey1(OID::gen(), 1, kMigrationId1); + ExternalKeysCollectionDocument origKey1(OID::gen(), 1); + origKey1.setMigrationId(kMigrationId1); origKey1.setKeysCollectionDocumentBase( {"test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))}); origKey1.setTTLExpiresAt(getServiceContext()->getFastClockSource()->now() + Seconds(30)); insertDocument( operationContext(), NamespaceString::kExternalKeysCollectionNamespace, origKey1.toBSON()); - ExternalKeysCollectionDocument origKey2(OID::gen(), 1, kMigrationId2); + ExternalKeysCollectionDocument origKey2(OID::gen(), 1); + origKey2.setMigrationId(kMigrationId2); origKey2.setKeysCollectionDocumentBase( {"test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(205, 0))}); insertDocument( @@ -316,8 +325,17 @@ TEST_F(CacheTest, GetKeyShouldReturnCorrectKeysAfterRefreshDirectClient) { ASSERT_EQ(ErrorCodes::KeyNotFound, swExternalKeys.getStatus()); } -TEST_F(CacheTest, GetInternalKeyShouldReturnErrorIfNoKeyIsValidForGivenTime) { - KeysCollectionCache cache("test", catalogClient()); +TEST_F(CacheTest, GetKeyShouldReturnCorrectKeyAfterRefreshShardedClient) { + testGetKeyShouldReturnCorrectKeysAfterRefresh(catalogClient()); +} + +TEST_F(CacheTest, GetKeyShouldReturnCorrectKeysAfterRefreshDirectClient) { + testGetKeyShouldReturnCorrectKeysAfterRefresh(directClient()); +} + +void CacheTest::testGetInternalKeyShouldReturnErrorIfNoKeyIsValidForGivenTime( + KeysCollectionClient* client) { + KeysCollectionCache cache("test", client); KeysCollectionDocument origKey1(1); origKey1.setKeysCollectionDocumentBase( @@ -340,8 +358,16 @@ TEST_F(CacheTest, GetInternalKeyShouldReturnErrorIfNoKeyIsValidForGivenTime) { ASSERT_EQ(ErrorCodes::KeyNotFound, swKey.getStatus()); } -TEST_F(CacheTest, GetInternalKeyShouldReturnOldestKeyPossible) { - KeysCollectionCache cache("test", catalogClient()); +TEST_F(CacheTest, GetInternalKeyShouldReturnErrorIfNoKeyIsValidForGivenTimeShardedClient) { + testGetInternalKeyShouldReturnErrorIfNoKeyIsValidForGivenTime(catalogClient()); +} + +TEST_F(CacheTest, GetInternalKeyShouldReturnErrorIfNoKeyIsValidForGivenTimeDirectClient) { + testGetInternalKeyShouldReturnErrorIfNoKeyIsValidForGivenTime(directClient()); +} + +void CacheTest::testGetInternalKeyShouldReturnOldestKeyPossible(KeysCollectionClient* client) { + KeysCollectionCache cache("test", client); KeysCollectionDocument origKey0(0); origKey0.setKeysCollectionDocumentBase( @@ -384,8 +410,16 @@ TEST_F(CacheTest, GetInternalKeyShouldReturnOldestKeyPossible) { } } -TEST_F(CacheTest, RefreshShouldNotGetInternalKeysForOtherPurpose) { - KeysCollectionCache cache("test", catalogClient()); +TEST_F(CacheTest, GetInternalKeyShouldReturnOldestKeyPossibleShardedClient) { + testGetInternalKeyShouldReturnOldestKeyPossible(catalogClient()); +} + +TEST_F(CacheTest, GetInternalKeyShouldReturnOldestKeyPossibleDirectClient) { + testGetInternalKeyShouldReturnOldestKeyPossible(directClient()); +} + +void CacheTest::testRefreshShouldNotGetInternalKeysForOtherPurpose(KeysCollectionClient* client) { + KeysCollectionCache cache("test", client); KeysCollectionDocument origKey0(0); origKey0.setKeysCollectionDocumentBase( @@ -430,8 +464,16 @@ TEST_F(CacheTest, RefreshShouldNotGetInternalKeysForOtherPurpose) { } } -TEST_F(CacheTest, RefreshShouldNotGetExternalKeysForOtherPurpose) { - KeysCollectionCache cache("test", directClient()); +TEST_F(CacheTest, RefreshShouldNotGetInternalKeysForOtherPurposeShardedClient) { + testRefreshShouldNotGetInternalKeysForOtherPurpose(catalogClient()); +} + +TEST_F(CacheTest, RefreshShouldNotGetInternalKeysForOtherPurposeDirectClient) { + testRefreshShouldNotGetInternalKeysForOtherPurpose(directClient()); +} + +void CacheTest::testRefreshShouldNotGetExternalKeysForOtherPurpose(KeysCollectionClient* client) { + KeysCollectionCache cache("test", client); KeysCollectionDocument origKey0(0); origKey0.setKeysCollectionDocumentBase( @@ -439,7 +481,8 @@ TEST_F(CacheTest, RefreshShouldNotGetExternalKeysForOtherPurpose) { insertDocument( operationContext(), NamespaceString::kKeysCollectionNamespace, origKey0.toBSON()); - ExternalKeysCollectionDocument origKey1(OID::gen(), 1, kMigrationId1); + ExternalKeysCollectionDocument origKey1(OID::gen(), 1); + origKey1.setMigrationId(kMigrationId1); origKey1.setKeysCollectionDocumentBase( {"dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))}); insertDocument( @@ -453,7 +496,8 @@ TEST_F(CacheTest, RefreshShouldNotGetExternalKeysForOtherPurpose) { ASSERT_EQ(ErrorCodes::KeyNotFound, swKey.getStatus()); } - ExternalKeysCollectionDocument origKey2(OID::gen(), 2, kMigrationId1); + ExternalKeysCollectionDocument origKey2(OID::gen(), 2); + origKey2.setMigrationId(kMigrationId2); origKey2.setKeysCollectionDocumentBase( {"test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(110, 0))}); insertDocument( @@ -478,8 +522,16 @@ TEST_F(CacheTest, RefreshShouldNotGetExternalKeysForOtherPurpose) { } } -TEST_F(CacheTest, RefreshCanIncrementallyGetNewKeys) { - KeysCollectionCache cache("test", catalogClient()); +TEST_F(CacheTest, RefreshShouldNotGetExternalKeysForOtherPurposeShardedClient) { + testRefreshShouldNotGetExternalKeysForOtherPurpose(catalogClient()); +} + +TEST_F(CacheTest, RefreshShouldNotGetExternalKeysForOtherPurposeDirectClient) { + testRefreshShouldNotGetExternalKeysForOtherPurpose(directClient()); +} + +void CacheTest::testGetRefreshCanIncrementallyGetNewKeys(KeysCollectionClient* client) { + KeysCollectionCache cache("test", client); KeysCollectionDocument origKey0(0); origKey0.setKeysCollectionDocumentBase( @@ -537,13 +589,22 @@ TEST_F(CacheTest, RefreshCanIncrementallyGetNewKeys) { } } -TEST_F(CacheTest, CacheExternalKeyBasic) { - KeysCollectionCache cache("test", catalogClient()); +TEST_F(CacheTest, RefreshCanIncrementallyGetNewKeysShardedClient) { + testGetRefreshCanIncrementallyGetNewKeys(catalogClient()); +} + +TEST_F(CacheTest, RefreshCanIncrementallyGetNewKeysDirectClient) { + testGetRefreshCanIncrementallyGetNewKeys(directClient()); +} + +void CacheTest::testCacheExternalKeyBasic(KeysCollectionClient* client) { + KeysCollectionCache cache("test", client); auto swExternalKeys = cache.getExternalKeysById(5, LogicalTime(Timestamp(10, 1))); ASSERT_EQ(ErrorCodes::KeyNotFound, swExternalKeys.getStatus()); - ExternalKeysCollectionDocument externalKey(OID::gen(), 5, kMigrationId1); + ExternalKeysCollectionDocument externalKey(OID::gen(), 5); + externalKey.setMigrationId(kMigrationId1); externalKey.setTTLExpiresAt(getServiceContext()->getFastClockSource()->now() + Seconds(30)); externalKey.setKeysCollectionDocumentBase( {"test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(100, 0))}); @@ -561,7 +622,15 @@ TEST_F(CacheTest, CacheExternalKeyBasic) { ASSERT_EQ(*externalKey.getTTLExpiresAt(), *cachedKey.getTTLExpiresAt()); } -TEST_F(CacheTest, RefreshClearsRemovedExternalKeys) { +TEST_F(CacheTest, CacheExternalKeyBasicShardedClient) { + testCacheExternalKeyBasic(catalogClient()); +} + +TEST_F(CacheTest, CacheExternalKeyBasicDirectClient) { + testCacheExternalKeyBasic(directClient()); +} + +void CacheTest::testRefreshClearsRemovedExternalKeys(KeysCollectionClient* client) { KeysCollectionCache cache("test", directClient()); KeysCollectionDocument origKey0(1); @@ -570,14 +639,16 @@ TEST_F(CacheTest, RefreshClearsRemovedExternalKeys) { insertDocument( operationContext(), NamespaceString::kKeysCollectionNamespace, origKey0.toBSON()); - ExternalKeysCollectionDocument origKey1(OID::gen(), 1, kMigrationId1); + ExternalKeysCollectionDocument origKey1(OID::gen(), 1); + origKey1.setMigrationId(kMigrationId1); origKey1.setKeysCollectionDocumentBase( {"test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))}); origKey1.setTTLExpiresAt(getServiceContext()->getFastClockSource()->now() + Seconds(30)); insertDocument( operationContext(), NamespaceString::kExternalKeysCollectionNamespace, origKey1.toBSON()); - ExternalKeysCollectionDocument origKey2(OID::gen(), 1, kMigrationId2); + ExternalKeysCollectionDocument origKey2(OID::gen(), 1); + origKey2.setMigrationId(kMigrationId2); origKey2.setKeysCollectionDocumentBase( {"test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(205, 0))}); insertDocument( @@ -634,8 +705,16 @@ TEST_F(CacheTest, RefreshClearsRemovedExternalKeys) { } } -TEST_F(CacheTest, RefreshHandlesKeysReceivingTTLValue) { - KeysCollectionCache cache("test", directClient()); +TEST_F(CacheTest, RefreshClearsRemovedExternalKeysShardedClient) { + testRefreshClearsRemovedExternalKeys(catalogClient()); +} + +TEST_F(CacheTest, RefreshClearsRemovedExternalKeysDirectClient) { + testRefreshClearsRemovedExternalKeys(directClient()); +} + +void CacheTest::testRefreshHandlesKeysReceivingTTLValue(KeysCollectionClient* client) { + KeysCollectionCache cache("test", client); KeysCollectionDocument origKey0(1); origKey0.setKeysCollectionDocumentBase( @@ -643,7 +722,8 @@ TEST_F(CacheTest, RefreshHandlesKeysReceivingTTLValue) { insertDocument( operationContext(), NamespaceString::kKeysCollectionNamespace, origKey0.toBSON()); - ExternalKeysCollectionDocument origKey1(OID::gen(), 1, kMigrationId1); + ExternalKeysCollectionDocument origKey1(OID::gen(), 1); + origKey1.setMigrationId(kMigrationId1); origKey1.setKeysCollectionDocumentBase( {"test", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))}); insertDocument( @@ -689,6 +769,14 @@ TEST_F(CacheTest, RefreshHandlesKeysReceivingTTLValue) { } } +TEST_F(CacheTest, RefreshHandlesKeysReceivingTTLValueShardedClient) { + testRefreshHandlesKeysReceivingTTLValue(catalogClient()); +} + +TEST_F(CacheTest, RefreshHandlesKeysReceivingTTLValueDirectClient) { + testRefreshHandlesKeysReceivingTTLValue(directClient()); +} + TEST_F(CacheTest, ResetCacheShouldNotClearKeysIfMajorityReadsAreSupported) { auto directClient = std::make_unique(false /* mustUseLocalReads */); KeysCollectionCache cache("test", directClient.get()); diff --git a/src/mongo/db/keys_collection_client_direct.cpp b/src/mongo/db/keys_collection_client_direct.cpp index 26acbe48d5306..4077f58028056 100644 --- a/src/mongo/db/keys_collection_client_direct.cpp +++ b/src/mongo/db/keys_collection_client_direct.cpp @@ -28,26 +28,33 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/keys_collection_client_direct.h" - -#include +#include +#include +#include +#include +#include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/util/bson_extract.h" #include "mongo/client/read_preference.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/keys_collection_client_direct.h" #include "mongo/db/keys_collection_document_gen.h" #include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" -#include "mongo/db/service_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/keys_collection_client_direct.h b/src/mongo/db/keys_collection_client_direct.h index 9457788d827ee..a619fc53ac438 100644 --- a/src/mongo/db/keys_collection_client_direct.h +++ b/src/mongo/db/keys_collection_client_direct.h @@ -29,11 +29,26 @@ #pragma once +#include #include #include +#include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" #include "mongo/db/keys_collection_client.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/rs_local_client.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/s/client/shard.h" namespace mongo { @@ -46,8 +61,8 @@ class KeysCollectionClientDirect : public KeysCollectionClient { KeysCollectionClientDirect(bool mustUseLocalReads); /** - * Returns keys in admin.system.keys that match the given purpose and have an expiresAt value - * greater than newerThanThis. Uses readConcern level majority if possible. + * Returns internal keys for the given purpose and have an expiresAt value greater than + * newerThanThis. Uses readConcern level majority if possible. */ StatusWith> getNewInternalKeys( OperationContext* opCtx, @@ -56,7 +71,7 @@ class KeysCollectionClientDirect : public KeysCollectionClient { bool tryUseMajority) override; /** - * Returns all keys in config.external_validation_keys that match the given purpose. + * Returns all external (i.e. validation-only) keys for the given purpose. */ StatusWith> getAllExternalKeys( OperationContext* opCtx, StringData purpose) override; @@ -76,7 +91,7 @@ class KeysCollectionClientDirect : public KeysCollectionClient { private: /** - * Returns keys in the given collection that match the given purpose and have an expiresAt value + * Returns keys in the given collection for the given purpose and have an expiresAt value * greater than newerThanThis, using readConcern level majority if possible. */ template diff --git a/src/mongo/db/keys_collection_client_sharded.cpp b/src/mongo/db/keys_collection_client_sharded.cpp index c6af8f729194d..e2f7334ab7bfd 100644 --- a/src/mongo/db/keys_collection_client_sharded.cpp +++ b/src/mongo/db/keys_collection_client_sharded.cpp @@ -27,10 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/keys_collection_client_sharded.h" - +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/s/catalog/sharding_catalog_client.h" namespace mongo { @@ -38,20 +38,19 @@ namespace mongo { KeysCollectionClientSharded::KeysCollectionClientSharded(ShardingCatalogClient* client) : _catalogClient(client) {} - StatusWith> KeysCollectionClientSharded::getNewInternalKeys( OperationContext* opCtx, StringData purpose, const LogicalTime& newerThanThis, bool tryUseMajority) { - - return _catalogClient->getNewKeys( + return _catalogClient->getNewInternalKeys( opCtx, purpose, newerThanThis, repl::ReadConcernLevel::kMajorityReadConcern); } StatusWith> KeysCollectionClientSharded::getAllExternalKeys(OperationContext* opCtx, StringData purpose) { - return std::vector{}; + return _catalogClient->getAllExternalKeys( + opCtx, purpose, repl::ReadConcernLevel::kMajorityReadConcern); } Status KeysCollectionClientSharded::insertNewKey(OperationContext* opCtx, const BSONObj& doc) { diff --git a/src/mongo/db/keys_collection_client_sharded.h b/src/mongo/db/keys_collection_client_sharded.h index 67f0f9e7109ea..377d9e1884077 100644 --- a/src/mongo/db/keys_collection_client_sharded.h +++ b/src/mongo/db/keys_collection_client_sharded.h @@ -29,7 +29,15 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/keys_collection_client.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/logical_time.h" namespace mongo { @@ -40,8 +48,8 @@ class KeysCollectionClientSharded : public KeysCollectionClient { KeysCollectionClientSharded(ShardingCatalogClient*); /** - * Returns keys in the config server's admin.system.keys that match the given purpose and have - * an expiresAt value greater than newerThanThis. Uses readConcern level majority if possible. + * Returns internal keys for the given purpose and have an expiresAt value greater than + * newerThanThis on the config server. Uses readConcern level majority if possible. */ StatusWith> getNewInternalKeys( OperationContext* opCtx, @@ -50,8 +58,7 @@ class KeysCollectionClientSharded : public KeysCollectionClient { bool tryUseMajority) override; /** - * Returns validation-only keys copied from other clusters that match the given purpose. - * Currently, a sharded cluster never copies cluster time keys from other clusters. + * Returns all external (i.e. validation-only) keys for the given purpose on the config server. */ StatusWith> getAllExternalKeys( OperationContext* opCtx, StringData purpose) override; diff --git a/src/mongo/db/keys_collection_document.idl b/src/mongo/db/keys_collection_document.idl index 5dc937dc81eca..ab41d710a98a7 100644 --- a/src/mongo/db/keys_collection_document.idl +++ b/src/mongo/db/keys_collection_document.idl @@ -84,6 +84,7 @@ structs: migrationId: type: uuid description: "The id of the tenant migration that inserted this key." + optional: true ttlExpiresAt: type: date description: >- diff --git a/src/mongo/db/keys_collection_document_test.cpp b/src/mongo/db/keys_collection_document_test.cpp index 84e0386858409..dd690cfc4dcfe 100644 --- a/src/mongo/db/keys_collection_document_test.cpp +++ b/src/mongo/db/keys_collection_document_test.cpp @@ -27,12 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/jsobj.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/timestamp.h" +#include "mongo/crypto/hash_block.h" +#include "mongo/crypto/sha1_block.h" #include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/logical_time.h" #include "mongo/db/time_proof_service.h" -#include "mongo/unittest/unittest.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/keys_collection_manager.cpp b/src/mongo/db/keys_collection_manager.cpp index 31e972919489e..307f060d8ceb9 100644 --- a/src/mongo/db/keys_collection_manager.cpp +++ b/src/mongo/db/keys_collection_manager.cpp @@ -27,24 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/keys_collection_manager.h" - +#include +#include #include - +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" #include "mongo/db/key_generator.h" #include "mongo/db/keys_collection_cache.h" #include "mongo/db/keys_collection_client.h" +#include "mongo/db/keys_collection_manager.h" #include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/db/vector_clock.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/idle_thread_block.h" #include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" -#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -240,12 +254,6 @@ void KeysCollectionManager::PeriodicRunner::_doPeriodicRefresh(ServiceContext* s std::string threadName, Milliseconds refreshInterval) { ThreadClient tc(threadName, service); - - { - stdx::lock_guard lk(*tc.get()); - tc.get()->setSystemOperationKillableByStepdown(lk); - } - ON_BLOCK_EXIT([this]() mutable { _hasSeenKeys.store(false); }); unsigned errorCount = 0; diff --git a/src/mongo/db/keys_collection_manager.h b/src/mongo/db/keys_collection_manager.h index 50bfe04abfe49..0ed89fe3d55bc 100644 --- a/src/mongo/db/keys_collection_manager.h +++ b/src/mongo/db/keys_collection_manager.h @@ -29,16 +29,23 @@ #pragma once +#include #include #include +#include +#include #include "mongo/base/status_with.h" #include "mongo/db/key_generator.h" #include "mongo/db/keys_collection_cache.h" #include "mongo/db/keys_collection_document_gen.h" #include "mongo/db/keys_collection_manager_gen.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" #include "mongo/util/concurrency/notification.h" #include "mongo/util/duration.h" diff --git a/src/mongo/db/keys_collection_manager_sharding_test.cpp b/src/mongo/db/keys_collection_manager_sharding_test.cpp index ca7877ef5759a..b7a8f10d77f10 100644 --- a/src/mongo/db/keys_collection_manager_sharding_test.cpp +++ b/src/mongo/db/keys_collection_manager_sharding_test.cpp @@ -27,19 +27,43 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/jsobj.h" +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/keys_collection_client_direct.h" #include "mongo/db/keys_collection_client_sharded.h" #include "mongo/db/keys_collection_document_gen.h" #include "mongo/db/keys_collection_manager.h" +#include "mongo/db/keys_collection_manager_gen.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/config/config_server_test_fixture.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/time_proof_service.h" #include "mongo/db/vector_clock_mutable.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/grid.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -416,7 +440,8 @@ TEST_F(KeysManagerDirectTest, CacheExternalKeyBasic) { // Refresh immediately to prevent a refresh from discovering the inserted keys. keyManager()->refreshNow(operationContext()); - ExternalKeysCollectionDocument externalKey1(OID::gen(), 1, kMigrationId1); + ExternalKeysCollectionDocument externalKey1(OID::gen(), 1); + externalKey1.setMigrationId(kMigrationId1); externalKey1.setKeysCollectionDocumentBase( {"dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(100, 0))}); ASSERT_OK(insertToConfigCollection(operationContext(), @@ -453,7 +478,8 @@ TEST_F(KeysManagerDirectTest, WillNotCacheExternalKeyWhenMonitoringIsStopped) { ASSERT_OK(insertToConfigCollection( operationContext(), NamespaceString::kKeysCollectionNamespace, internalKey.toBSON())); - ExternalKeysCollectionDocument externalKey1(OID::gen(), 1, kMigrationId1); + ExternalKeysCollectionDocument externalKey1(OID::gen(), 1); + externalKey1.setMigrationId(kMigrationId1); externalKey1.setKeysCollectionDocumentBase( {"dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(100, 0))}); ASSERT_OK(insertToConfigCollection(operationContext(), @@ -471,7 +497,8 @@ TEST_F(KeysManagerDirectTest, WillNotCacheExternalKeyWhenMonitoringIsStopped) { keyManager()->stopMonitoring(); - ExternalKeysCollectionDocument externalKey2(OID::gen(), 1, kMigrationId2); + ExternalKeysCollectionDocument externalKey2(OID::gen(), 1); + externalKey2.setMigrationId(kMigrationId2); externalKey2.setKeysCollectionDocumentBase( {"dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(100, 0))}); diff --git a/src/mongo/db/keys_collection_util.cpp b/src/mongo/db/keys_collection_util.cpp new file mode 100644 index 0000000000000..db2adac4f3850 --- /dev/null +++ b/src/mongo/db/keys_collection_util.cpp @@ -0,0 +1,103 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include "mongo/db/keys_collection_util.h" + +#include +#include + +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/dbhelpers.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" + +namespace mongo { +namespace keys_collection_util { + +ExternalKeysCollectionDocument makeExternalClusterTimeKeyDoc(BSONObj keyDoc, + boost::optional migrationId, + boost::optional expireAt) { + auto originalKeyDoc = KeysCollectionDocument::parse(IDLParserContext("keyDoc"), keyDoc); + + ExternalKeysCollectionDocument externalKeyDoc(OID::gen(), originalKeyDoc.getKeyId()); + externalKeyDoc.setMigrationId(migrationId); + externalKeyDoc.setKeysCollectionDocumentBase(originalKeyDoc.getKeysCollectionDocumentBase()); + externalKeyDoc.setTTLExpiresAt(expireAt); + + return externalKeyDoc; +} + +repl::OpTime storeExternalClusterTimeKeyDocs(OperationContext* opCtx, + std::vector keyDocs) { + const auto& nss = NamespaceString::kExternalKeysCollectionNamespace; + + for (auto& keyDoc : keyDocs) { + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + writeConflictRetry(opCtx, "storeExternalClusterTimeKeyDocs", nss, [&] { + const auto filter = + BSON(ExternalKeysCollectionDocument::kIdFieldName << keyDoc.getId()); + const auto updateMod = keyDoc.toBSON(); + + Helpers::upsert(opCtx, + collection, + filter, + updateMod, + /*fromMigrate=*/false); + }); + } + + return repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); +} + +} // namespace keys_collection_util +} // namespace mongo diff --git a/src/mongo/db/keys_collection_util.h b/src/mongo/db/keys_collection_util.h new file mode 100644 index 0000000000000..e47867564cfd9 --- /dev/null +++ b/src/mongo/db/keys_collection_util.h @@ -0,0 +1,62 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" + +namespace mongo { +namespace keys_collection_util { + +/* + * Creates an ExternalKeysCollectionDocument representing an config.external_validation_keys + * document created based on the given the admin.system.keys document BSONObj. + */ +ExternalKeysCollectionDocument makeExternalClusterTimeKeyDoc(BSONObj keyDoc, + boost::optional migrationId, + boost::optional expireAt); + +/* + * Upserts the given ExternalKeysCollectionDocuments into the + * config.external_validation_keys collection, and returns the optime for the upserts. + */ +repl::OpTime storeExternalClusterTimeKeyDocs(OperationContext* opCtx, + std::vector keyDocs); + +} // namespace keys_collection_util +} // namespace mongo diff --git a/src/mongo/db/log_process_details.cpp b/src/mongo/db/log_process_details.cpp index c35b6ae33d601..5e6e6348c8811 100644 --- a/src/mongo/db/log_process_details.cpp +++ b/src/mongo/db/log_process_details.cpp @@ -28,21 +28,24 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/log_process_details.h" - +#include #include +#include -#include "mongo/bson/bsonobj.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" -#include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/bson/oid.h" +#include "mongo/db/log_process_details.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/server_options.h" #include "mongo/db/server_options_server_helpers.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/process_id.h" #include "mongo/util/net/socket_utils.h" #include "mongo/util/processinfo.h" #include "mongo/util/version.h" diff --git a/src/mongo/db/logical_session_cache_factory_mongod.cpp b/src/mongo/db/logical_session_cache_factory_mongod.cpp index 8e161b1fa458f..2598d3d26601b 100644 --- a/src/mongo/db/logical_session_cache_factory_mongod.cpp +++ b/src/mongo/db/logical_session_cache_factory_mongod.cpp @@ -28,19 +28,21 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/logical_session_cache_factory_mongod.h" - #include +#include +#include "mongo/db/logical_session_cache_factory_mongod.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/sessions_collection_config_server.h" #include "mongo/db/service_liaison_mongod.h" #include "mongo/db/session/logical_session_cache_impl.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/session/sessions_collection.h" #include "mongo/db/session/sessions_collection_rs.h" #include "mongo/db/session/sessions_collection_standalone.h" #include "mongo/s/sessions_collection_sharded.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/db/logical_time.cpp b/src/mongo/db/logical_time.cpp index 86dea2106926f..04dbecaac1884 100644 --- a/src/mongo/db/logical_time.cpp +++ b/src/mongo/db/logical_time.cpp @@ -31,8 +31,13 @@ #include "mongo/base/data_type_endian.h" #include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/logical_time.h b/src/mongo/db/logical_time.h index 68b5c14331617..78efdf29cbf8a 100644 --- a/src/mongo/db/logical_time.h +++ b/src/mongo/db/logical_time.h @@ -29,7 +29,15 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/timestamp.h" +#include "mongo/platform/mutex.h" namespace mongo { diff --git a/src/mongo/db/logical_time_test.cpp b/src/mongo/db/logical_time_test.cpp index 19c3d5832b557..0764f4e0343ba 100644 --- a/src/mongo/db/logical_time_test.cpp +++ b/src/mongo/db/logical_time_test.cpp @@ -28,12 +28,24 @@ */ +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/timestamp.h" +#include "mongo/crypto/hash_block.h" #include "mongo/db/logical_time.h" #include "mongo/db/signed_logical_time.h" #include "mongo/db/time_proof_service.h" -#include "mongo/platform/basic.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/logical_time_validator.cpp b/src/mongo/db/logical_time_validator.cpp index 24e04282e8be9..1868b5d244cb2 100644 --- a/src/mongo/db/logical_time_validator.cpp +++ b/src/mongo/db/logical_time_validator.cpp @@ -28,22 +28,35 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/logical_time_validator.h" - -#include "mongo/base/init.h" -#include "mongo/db/auth/action_set.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status_with.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/client.h" #include "mongo/db/keys_collection_manager.h" +#include "mongo/db/logical_time_validator.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/db/vector_clock.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/transport/session.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -57,28 +70,20 @@ MONGO_FAIL_POINT_DEFINE(externalClientsNeverAuthorizedToAdvanceLogicalClock); MONGO_FAIL_POINT_DEFINE(throwClientDisconnectInSignLogicalTimeForExternalClients); const auto getLogicalTimeValidator = - ServiceContext::declareDecoration>(); + ServiceContext::declareDecoration>(); Mutex validatorMutex; // protects access to decoration instance of LogicalTimeValidator. -std::vector advanceClusterTimePrivilege; - -MONGO_INITIALIZER(InitializeAdvanceClusterTimePrivilegeVector)(InitializerContext* const) { - ActionSet actions; - actions.addAction(ActionType::advanceClusterTime); - advanceClusterTimePrivilege.emplace_back(ResourcePattern::forClusterResource(), actions); -} - Milliseconds kRefreshIntervalIfErrored(200); } // unnamed namespace -LogicalTimeValidator* LogicalTimeValidator::get(ServiceContext* service) { +std::shared_ptr LogicalTimeValidator::get(ServiceContext* service) { stdx::lock_guard lk(validatorMutex); - return getLogicalTimeValidator(service).get(); + return getLogicalTimeValidator(service); } -LogicalTimeValidator* LogicalTimeValidator::get(OperationContext* ctx) { +std::shared_ptr LogicalTimeValidator::get(OperationContext* ctx) { return get(ctx->getClient()->getServiceContext()); } @@ -218,11 +223,12 @@ bool LogicalTimeValidator::isAuthorizedToAdvanceClock(OperationContext* opCtx) { return isInternalClient; } - auto client = opCtx->getClient(); + auto as = AuthorizationSession::get(opCtx->getClient()); // Note: returns true if auth is off, courtesy of // AuthzSessionExternalStateServerCommon::shouldIgnoreAuthChecks. - return AuthorizationSession::get(client)->isAuthorizedForPrivileges( - advanceClusterTimePrivilege); + return as->isAuthorizedForPrivilege( + Privilege(ResourcePattern::forClusterResource(as->getUserTenantId()), + ActionType::advanceClusterTime)); } bool LogicalTimeValidator::shouldGossipLogicalTime() { diff --git a/src/mongo/db/logical_time_validator.h b/src/mongo/db/logical_time_validator.h index 404d02310a165..5cc188cfc1368 100644 --- a/src/mongo/db/logical_time_validator.h +++ b/src/mongo/db/logical_time_validator.h @@ -31,7 +31,11 @@ #include +#include "mongo/base/status.h" +#include "mongo/db/auth/cluster_auth_mode.h" +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/logical_time.h" #include "mongo/db/signed_logical_time.h" #include "mongo/db/time_proof_service.h" #include "mongo/platform/mutex.h" @@ -50,8 +54,8 @@ class KeysCollectionManager; class LogicalTimeValidator { public: // Decorate ServiceContext with LogicalTimeValidator instance. - static LogicalTimeValidator* get(ServiceContext* service); - static LogicalTimeValidator* get(OperationContext* ctx); + static std::shared_ptr get(ServiceContext* service); + static std::shared_ptr get(OperationContext* ctx); static void set(ServiceContext* service, std::unique_ptr validator); /** diff --git a/src/mongo/db/logical_time_validator_test.cpp b/src/mongo/db/logical_time_validator_test.cpp index 96a942ccda5ce..1be2986b496a1 100644 --- a/src/mongo/db/logical_time_validator_test.cpp +++ b/src/mongo/db/logical_time_validator_test.cpp @@ -27,22 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include + +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/timestamp.h" #include "mongo/db/keys_collection_client_sharded.h" #include "mongo/db/keys_collection_manager.h" #include "mongo/db/logical_time.h" #include "mongo/db/logical_time_validator.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/config/config_server_test_fixture.h" -#include "mongo/db/server_options.h" +#include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/signed_logical_time.h" #include "mongo/db/time_proof_service.h" #include "mongo/db/vector_clock_mutable.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/s/grid.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/SConscript b/src/mongo/db/matcher/SConscript index 6b3578b2d9fc1..4671764173390 100644 --- a/src/mongo/db/matcher/SConscript +++ b/src/mongo/db/matcher/SConscript @@ -96,6 +96,7 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/query/query_planner', '$BUILD_DIR/mongo/db/query/query_test_service_context', '$BUILD_DIR/mongo/db/query_expressions', + '$BUILD_DIR/mongo/db/service_context_non_d', 'path', ], ) diff --git a/src/mongo/db/matcher/debug_string_test.cpp b/src/mongo/db/matcher/debug_string_test.cpp index 3211dacd89557..b3b4b685a6e0c 100644 --- a/src/mongo/db/matcher/debug_string_test.cpp +++ b/src/mongo/db/matcher/debug_string_test.cpp @@ -27,34 +27,50 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/expression_array.h" -#include "mongo/db/matcher/expression_expr.h" #include "mongo/db/matcher/expression_geo.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/matcher/expression_text.h" -#include "mongo/db/matcher/expression_where.h" -#include "mongo/db/matcher/expression_where_base.h" -#include "mongo/db/matcher/matcher.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/expression_type.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/matcher/schema/expression_internal_schema_eq.h" -#include "mongo/db/matcher/schema/expression_internal_schema_fmod.h" -#include "mongo/db/matcher/schema/expression_internal_schema_match_array_index.h" #include "mongo/db/matcher/schema/expression_internal_schema_max_items.h" #include "mongo/db/matcher/schema/expression_internal_schema_max_length.h" #include "mongo/db/matcher/schema/expression_internal_schema_max_properties.h" #include "mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.h" #include "mongo/db/matcher/schema/expression_internal_schema_unique_items.h" -#include "mongo/db/matcher/schema/expression_internal_schema_xor.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/index_tag.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/golden_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/golden_test_base.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/matcher/doc_validation_error.cpp b/src/mongo/db/matcher/doc_validation_error.cpp index fe301c9aa4301..faea73802db66 100644 --- a/src/mongo/db/matcher/doc_validation_error.cpp +++ b/src/mongo/db/matcher/doc_validation_error.cpp @@ -27,26 +27,46 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/matcher/doc_validation_error.h" - +#include +#include #include - -#include "mongo/base/init.h" +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/geo/geoparser.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/geo/geometry_container.h" +#include "mongo/db/matcher/doc_validation_error.h" #include "mongo/db/matcher/doc_validation_util.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/expression_array.h" #include "mongo/db/matcher/expression_expr.h" #include "mongo/db/matcher/expression_geo.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_path.h" #include "mongo/db/matcher/expression_tree.h" #include "mongo/db/matcher/expression_type.h" #include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/expression_with_placeholder.h" #include "mongo/db/matcher/match_expression_util.h" #include "mongo/db/matcher/match_expression_walker.h" +#include "mongo/db/matcher/matchable.h" +#include "mongo/db/matcher/path.h" #include "mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h" #include "mongo/db/matcher/schema/expression_internal_schema_allowed_properties.h" #include "mongo/db/matcher/schema/expression_internal_schema_cond.h" @@ -58,11 +78,20 @@ #include "mongo/db/matcher/schema/expression_internal_schema_min_items.h" #include "mongo/db/matcher/schema/expression_internal_schema_min_length.h" #include "mongo/db/matcher/schema/expression_internal_schema_min_properties.h" +#include "mongo/db/matcher/schema/expression_internal_schema_num_array_items.h" #include "mongo/db/matcher/schema/expression_internal_schema_object_match.h" #include "mongo/db/matcher/schema/expression_internal_schema_str_length.h" #include "mongo/db/matcher/schema/expression_internal_schema_unique_items.h" #include "mongo/db/matcher/schema/expression_internal_schema_xor.h" #include "mongo/db/matcher/schema/json_schema_parser.h" +#include "mongo/db/query/tree_walker.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/pcre.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo::doc_validation_error { namespace { diff --git a/src/mongo/db/matcher/doc_validation_error.h b/src/mongo/db/matcher/doc_validation_error.h index f85ad12c77343..bbacc79efab6a 100644 --- a/src/mongo/db/matcher/doc_validation_error.h +++ b/src/mongo/db/matcher/doc_validation_error.h @@ -29,9 +29,18 @@ #pragma once +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util_core.h" namespace mongo::doc_validation_error { // The default maximum allowed size for a single doc validation error. diff --git a/src/mongo/db/matcher/doc_validation_error_json_schema_test.cpp b/src/mongo/db/matcher/doc_validation_error_json_schema_test.cpp index a77c76ab386df..e1cbab7d1239d 100644 --- a/src/mongo/db/matcher/doc_validation_error_json_schema_test.cpp +++ b/src/mongo/db/matcher/doc_validation_error_json_schema_test.cpp @@ -27,9 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" +#include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/db/matcher/doc_validation_error_test.h" +#include "mongo/db/matcher/expression_type.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/doc_validation_error_test.cpp b/src/mongo/db/matcher/doc_validation_error_test.cpp index 44eef6ac54f8e..bf401d30d4717 100644 --- a/src/mongo/db/matcher/doc_validation_error_test.cpp +++ b/src/mongo/db/matcher/doc_validation_error_test.cpp @@ -27,10 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" +#include "mongo/db/matcher/doc_validation_error.h" #include "mongo/db/matcher/doc_validation_error_test.h" #include "mongo/db/matcher/doc_validation_util.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::doc_validation_error { namespace { diff --git a/src/mongo/db/matcher/doc_validation_error_test.h b/src/mongo/db/matcher/doc_validation_error_test.h index 3248a2ae0689b..0e9aa72bc1b98 100644 --- a/src/mongo/db/matcher/doc_validation_error_test.h +++ b/src/mongo/db/matcher/doc_validation_error_test.h @@ -30,6 +30,8 @@ #pragma once #include "mongo/db/matcher/doc_validation_error.h" + +#include "mongo/bson/bsonobj.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/unittest/unittest.h" diff --git a/src/mongo/db/matcher/doc_validation_util.cpp b/src/mongo/db/matcher/doc_validation_util.cpp index 4689ce0e5767e..7bd59f63fd820 100644 --- a/src/mongo/db/matcher/doc_validation_util.cpp +++ b/src/mongo/db/matcher/doc_validation_util.cpp @@ -29,6 +29,14 @@ #include "mongo/db/matcher/doc_validation_util.h" +#include +#include +#include + +#include + +#include "mongo/bson/bson_depth.h" + namespace mongo::doc_validation_error { std::unique_ptr createAnnotation( const boost::intrusive_ptr& expCtx, diff --git a/src/mongo/db/matcher/doc_validation_util.h b/src/mongo/db/matcher/doc_validation_util.h index 687a21a506596..667789bc15494 100644 --- a/src/mongo/db/matcher/doc_validation_util.h +++ b/src/mongo/db/matcher/doc_validation_util.h @@ -29,6 +29,11 @@ #pragma once +#include +#include + +#include + #include "mongo/bson/bsonobj.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/pipeline/expression_context.h" diff --git a/src/mongo/db/matcher/expression.cpp b/src/mongo/db/matcher/expression.cpp index e9aee89cd3a9c..7d750d53d9e17 100644 --- a/src/mongo/db/matcher/expression.cpp +++ b/src/mongo/db/matcher/expression.cpp @@ -29,10 +29,17 @@ #include "mongo/db/matcher/expression.h" +#include + +#include +#include + #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/matcher/expression_parameterization.h" #include "mongo/db/matcher/schema/json_schema_parser.h" +#include "mongo/db/query/tree_walker.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression.h b/src/mongo/db/matcher/expression.h index 0976404646d27..35bd34bb47ea6 100644 --- a/src/mongo/db/matcher/expression.h +++ b/src/mongo/db/matcher/expression.h @@ -29,19 +29,37 @@ #pragma once +#include +#include #include +#include +#include +#include +#include #include #include +#include +#include +#include +#include #include "mongo/base/clonable_ptr.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/field_ref.h" #include "mongo/db/matcher/expression_visitor.h" #include "mongo/db/matcher/match_details.h" #include "mongo/db/matcher/matchable.h" #include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/serialization_options.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" namespace mongo { @@ -53,6 +71,7 @@ namespace mongo { extern FailPoint disableMatchExpressionOptimization; class CollatorInterface; + class MatchExpression; class TreeMatchExpression; @@ -488,9 +507,9 @@ class MatchExpression { * parsed, produces a logically equivalent MatchExpression. However, if special options are set, * this no longer holds. * - * If 'options.replacementForLiteralArgs' is set, the result is no longer expected to re-parse, - * since we will put strings in places where strings may not be accpeted syntactically (e.g. a - * number is always expected, as in with the $mod expression). + * If 'options.literalPolicy' is set to 'kToDebugTypeString', the result is no longer expected + * to re-parse, since we will put strings in places where strings may not be accpeted + * syntactically (e.g. a number is always expected, as in with the $mod expression). */ virtual void serialize(BSONObjBuilder* out, SerializationOptions options) const = 0; diff --git a/src/mongo/db/matcher/expression_algo.cpp b/src/mongo/db/matcher/expression_algo.cpp index d44c8b88b58be..12eca1494087b 100644 --- a/src/mongo/db/matcher/expression_algo.cpp +++ b/src/mongo/db/matcher/expression_algo.cpp @@ -27,25 +27,45 @@ * it in the license file. */ +#include "mongo/db/matcher/expression_algo.h" + +#include +#include +#include +#include +#include +#include -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include #include "mongo/base/checked_cast.h" -#include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/geo/geometry_container.h" #include "mongo/db/matcher/expression.h" -#include "mongo/db/matcher/expression_algo.h" -#include "mongo/db/matcher/expression_array.h" #include "mongo/db/matcher/expression_expr.h" #include "mongo/db/matcher/expression_geo.h" #include "mongo/db/matcher/expression_internal_bucket_geo_within.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_path.h" #include "mongo/db/matcher/expression_tree.h" #include "mongo/db/matcher/expression_type.h" #include "mongo/db/matcher/match_expression_dependencies.h" -#include "mongo/db/matcher/schema/expression_internal_schema_xor.h" +#include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/pipeline/dependencies.h" #include "mongo/db/query/collation/collation_index_key.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -377,12 +397,20 @@ unique_ptr createNorOfNodes(std::vector, unique_ptr> splitMatchExpressionByFunction( unique_ptr expr, const OrderedPathSet& fields, + const StringMap& renames, + expression::Renameables& renameables, expression::ShouldSplitExprFunc shouldSplitOut) { - if (shouldSplitOut(*expr, fields)) { + if (shouldSplitOut(*expr, fields, renames, renameables)) { // 'expr' satisfies our split condition and can be completely split out. return {std::move(expr), nullptr}; } + // At this point, the content of 'renameables' is no longer applicable because we chose not to + // proceed with the wholesale extraction of 'expr', or we try to find portion of 'expr' that can + // be split out by recursing down. In either case, we want to restart our renamable analysis and + // reset the state. + renameables.clear(); + if (expr->getCategory() != MatchExpression::MatchCategory::kLogical) { // 'expr' is a leaf and cannot be split out. return {nullptr, std::move(expr)}; @@ -395,13 +423,17 @@ std::pair, unique_ptr> splitMatchEx case MatchExpression::AND: { auto andExpr = checked_cast(expr.get()); for (size_t i = 0; i < andExpr->numChildren(); i++) { + expression::Renameables childRenameables; auto children = splitMatchExpressionByFunction( - andExpr->releaseChild(i), fields, shouldSplitOut); + andExpr->releaseChild(i), fields, renames, childRenameables, shouldSplitOut); invariant(children.first || children.second); if (children.first) { splitOut.push_back(std::move(children.first)); + // Accumulate the renameable expressions from the children. + renameables.insert( + renameables.end(), childRenameables.begin(), childRenameables.end()); } if (children.second) { remaining.push_back(std::move(children.second)); @@ -420,9 +452,13 @@ std::pair, unique_ptr> splitMatchEx // is equal to 1. auto norExpr = checked_cast(expr.get()); for (size_t i = 0; i < norExpr->numChildren(); i++) { + expression::Renameables childRenameables; auto child = norExpr->releaseChild(i); - if (shouldSplitOut(*child, fields)) { + if (shouldSplitOut(*child, fields, renames, childRenameables)) { splitOut.push_back(std::move(child)); + // Accumulate the renameable expressions from the children. + renameables.insert( + renameables.end(), childRenameables.begin(), childRenameables.end()); } else { remaining.push_back(std::move(child)); } @@ -776,21 +812,115 @@ bool isSubsetOf(const MatchExpression* lhs, const MatchExpression* rhs) { return false; } -bool hasOnlyRenameableMatchExpressionChildren(const MatchExpression& expr) { +// Type requirements for the hashOnlyRenameableMatchExpressionChildrenImpl() & isIndependentOfImpl() +// & isOnlyDependentOnImpl() functions +template +using MaybeMutablePtr = typename std::conditional::type; + +// const MatchExpression& should be passed with no 'renameables' argument to traverse the expression +// tree in read-only mode. +template +concept ConstTraverseMatchExpression = requires(E&& expr, Args&&... args) { + sizeof...(Args) == 0 && std::is_same_v; +}; + +// MatchExpression& should be passed with a single 'renameables' argument to traverse the expression +// tree in read-write mode. +template +constexpr bool shouldCollectRenameables = std::is_same_v && + sizeof...(Args) == 1 && (std::is_same_v && ...); + +// Traversing the expression tree in read-write mode is same as the 'shouldCollectRenameables'. +template +concept MutableTraverseMatchExpression = shouldCollectRenameables; + +// We traverse the expression tree in either read-only mode or read-write mode. +template +requires ConstTraverseMatchExpression || MutableTraverseMatchExpression +bool hasOnlyRenameableMatchExpressionChildrenImpl(E&& expr, + const StringMap& renames, + Args&&... renameables) { + constexpr bool mutating = shouldCollectRenameables; + if (expr.matchType() == MatchExpression::MatchType::EXPRESSION) { + if constexpr (mutating) { + auto exprExpr = checked_cast>(&expr); + if (renames.size() > 0 && exprExpr->hasRenameablePath(renames)) { + // The second element is ignored for $expr. + (renameables.emplace_back(exprExpr, ""_sd), ...); + } + } + return true; - } else if (expr.getCategory() == MatchExpression::MatchCategory::kOther) { + } + + if (expr.getCategory() == MatchExpression::MatchCategory::kOther) { + if constexpr (mutating) { + (renameables.clear(), ...); + } return false; - } else if (expr.getCategory() == MatchExpression::MatchCategory::kLogical) { - for (size_t i = 0; i < expr.numChildren(); i++) { - if (!hasOnlyRenameableMatchExpressionChildren(*expr.getChild(i))) { - return false; + } + + if (expr.getCategory() == MatchExpression::MatchCategory::kArrayMatching || + expr.getCategory() == MatchExpression::MatchCategory::kLeaf) { + auto pathExpr = checked_cast>(&expr); + if (renames.size() == 0 || !pathExpr->optPath()) { + return true; + } + + // Cannot proceed to dependency or independence checks if any attempted rename would fail. + auto&& [wouldSucceed, optNewPath] = pathExpr->wouldRenameSucceed(renames); + if (!wouldSucceed) { + if constexpr (mutating) { + (renameables.clear(), ...); } + return false; + } + + if constexpr (mutating) { + if (optNewPath) { + (renameables.emplace_back(pathExpr, *optNewPath), ...); + } + } + + return true; + } + + tassert(7585300, + "Expression category must be logical at this point", + expr.getCategory() == MatchExpression::MatchCategory::kLogical); + for (size_t i = 0; i < expr.numChildren(); ++i) { + bool hasOnlyRenameables = [&] { + if constexpr (mutating) { + return (hasOnlyRenameableMatchExpressionChildrenImpl( + *(expr.getChild(i)), renames, std::forward(renameables)), + ...); + } else { + return hasOnlyRenameableMatchExpressionChildrenImpl(*(expr.getChild(i)), renames); + } + }(); + if (!hasOnlyRenameables) { + if constexpr (mutating) { + (renameables.clear(), ...); + } + return false; } } + return true; } +bool hasOnlyRenameableMatchExpressionChildren(MatchExpression& expr, + const StringMap& renames, + Renameables& renameables) { + return hasOnlyRenameableMatchExpressionChildrenImpl(expr, renames, renameables); +} + +bool hasOnlyRenameableMatchExpressionChildren(const MatchExpression& expr, + const StringMap& renames) { + return hasOnlyRenameableMatchExpressionChildrenImpl(expr, renames); +} + bool containsDependency(const OrderedPathSet& testSet, const OrderedPathSet& prefixCandidates) { if (testSet.empty()) { return false; @@ -853,10 +983,27 @@ bool areIndependent(const OrderedPathSet& pathSet1, const OrderedPathSet& pathSe return !containsDependency(pathSet1, pathSet2) && !containsDependency(pathSet2, pathSet1); } -bool isIndependentOf(const MatchExpression& expr, const OrderedPathSet& pathSet) { +template +requires ConstTraverseMatchExpression || MutableTraverseMatchExpression +bool isIndependentOfImpl(E&& expr, + const OrderedPathSet& pathSet, + const StringMap& renames, + Args&&... renameables) { + constexpr bool mutating = shouldCollectRenameables; + // Any expression types that do not have renaming implemented cannot have their independence // evaluated here. See applyRenamesToExpression(). - if (!hasOnlyRenameableMatchExpressionChildren(expr)) { + bool hasOnlyRenameables = [&] { + if constexpr (mutating) { + return (hasOnlyRenameableMatchExpressionChildrenImpl( + expr, renames, std::forward(renameables)), + ...); + } else { + return hasOnlyRenameableMatchExpressionChildrenImpl(expr, renames); + } + }(); + + if (!hasOnlyRenameables) { return false; } @@ -869,10 +1016,42 @@ bool isIndependentOf(const MatchExpression& expr, const OrderedPathSet& pathSet) return areIndependent(pathSet, depsTracker.fields); } -bool isOnlyDependentOn(const MatchExpression& expr, const OrderedPathSet& pathSet) { +bool isIndependentOf(MatchExpression& expr, + const OrderedPathSet& pathSet, + const StringMap& renames, + Renameables& renameables) { + return isIndependentOfImpl(expr, pathSet, renames, renameables); +} + +bool isIndependentOfConst(const MatchExpression& expr, + const OrderedPathSet& pathSet, + const StringMap& renames) { + return isIndependentOfImpl(expr, pathSet, renames); +} + +template +requires ConstTraverseMatchExpression || MutableTraverseMatchExpression +bool isOnlyDependentOnImpl(E&& expr, + const OrderedPathSet& pathSet, + const StringMap& renames, + Args&&... renameables) { + constexpr bool mutating = shouldCollectRenameables; + // Any expression types that do not have renaming implemented cannot have their independence // evaluated here. See applyRenamesToExpression(). - if (!hasOnlyRenameableMatchExpressionChildren(expr)) { + bool hasOnlyRenameables = [&] { + if constexpr (mutating) { + return (hasOnlyRenameableMatchExpressionChildrenImpl( + expr, renames, std::forward(renameables)), + ...); + } else { + return hasOnlyRenameableMatchExpressionChildrenImpl(expr, renames); + } + }(); + + // Any expression types that do not have renaming implemented cannot have their independence + // evaluated here. See applyRenamesToExpression(). + if (!hasOnlyRenameables) { return false; } @@ -893,7 +1072,21 @@ bool isOnlyDependentOn(const MatchExpression& expr, const OrderedPathSet& pathSe pathsDepsCopy.insert(exprDepsTracker.fields.begin(), exprDepsTracker.fields.end()); return pathsDeps == - DepsTracker::simplifyDependencies(pathsDepsCopy, DepsTracker::TruncateToRootLevel::no); + DepsTracker::simplifyDependencies(std::move(pathsDepsCopy), + DepsTracker::TruncateToRootLevel::no); +} + +bool isOnlyDependentOn(MatchExpression& expr, + const OrderedPathSet& pathSet, + const StringMap& renames, + Renameables& renameables) { + return isOnlyDependentOnImpl(expr, pathSet, renames, renameables); +} + +bool isOnlyDependentOnConst(const MatchExpression& expr, + const OrderedPathSet& pathSet, + const StringMap& renames) { + return isOnlyDependentOnImpl(expr, pathSet, renames); } std::pair, unique_ptr> splitMatchExpressionBy( @@ -901,52 +1094,39 @@ std::pair, unique_ptr> splitMatchEx const OrderedPathSet& fields, const StringMap& renames, ShouldSplitExprFunc func /*= isIndependentOf */) { - auto splitExpr = splitMatchExpressionByFunction(expr->clone(), fields, func); - if (splitExpr.first) { - // If we get attemptedButFailedRenames == true, then it means we could not apply renames - // though there's sub-path match. In such a case, returns the original expression as the - // residual expression so that the match is not mistakenly swapped with the previous stage. - // Otherwise, the unrenamed $match would be swapped with the previous stage. - // - // TODO SERVER-74298 Remove if clause and just call applyRenamesToExpression(). - if (auto attemptedButFailedRenames = - applyRenamesToExpression(splitExpr.first.get(), renames); - attemptedButFailedRenames) { - return {nullptr, std::move(expr)}; - } + Renameables renameables; + auto splitExpr = + splitMatchExpressionByFunction(std::move(expr), fields, renames, renameables, func); + if (splitExpr.first && !renames.empty()) { + applyRenamesToExpression(renames, &renameables); } return splitExpr; } -// TODO SERVER-74298 Remove the return value. -// As soon as we find the first attempted but failed rename, we cancel the match expression tree -// traversal because the caller would return the original match expression. -bool applyRenamesToExpression(MatchExpression* expr, const StringMap& renames) { - if (expr->matchType() == MatchExpression::MatchType::EXPRESSION) { - ExprMatchExpression* exprExpr = checked_cast(expr); - exprExpr->applyRename(renames); - return false; - } - - if (expr->getCategory() == MatchExpression::MatchCategory::kOther) { - return false; - } - - if (expr->getCategory() == MatchExpression::MatchCategory::kArrayMatching || - expr->getCategory() == MatchExpression::MatchCategory::kLeaf) { - auto* pathExpr = checked_cast(expr); - if (pathExpr->applyRename(renames)) { - return true; +void applyRenamesToExpression(const StringMap& renames, + const Renameables* renameables) { + tassert(7585301, "Invalid argument", renameables); + for (auto&& [matchExpr, newPath] : *renameables) { + if (stdx::holds_alternative(matchExpr)) { + // PathMatchExpression. + stdx::get(matchExpr)->setPath(newPath); + } else { + // ExprMatchExpression. + stdx::get(matchExpr)->applyRename(renames); } } +} - for (size_t i = 0; i < expr->numChildren(); ++i) { - if (applyRenamesToExpression(expr->getChild(i), renames)) { - return true; - } +std::unique_ptr copyExpressionAndApplyRenames( + const MatchExpression* expr, const StringMap& renames) { + Renameables renameables; + if (auto exprCopy = expr->clone(); + hasOnlyRenameableMatchExpressionChildren(*exprCopy, renames, renameables)) { + applyRenamesToExpression(renames, &renameables); + return exprCopy; + } else { + return nullptr; } - - return false; } void mapOver(MatchExpression* expr, NodeTraversalFunc func, std::string path) { diff --git a/src/mongo/db/matcher/expression_algo.h b/src/mongo/db/matcher/expression_algo.h index 2d2b483c24472..5434c4ac7ac9d 100644 --- a/src/mongo/db/matcher/expression_algo.h +++ b/src/mongo/db/matcher/expression_algo.h @@ -32,6 +32,10 @@ #include #include #include +#include +#include +#include +#include #include "mongo/base/string_data.h" #include "mongo/db/pipeline/dependencies.h" @@ -39,7 +43,10 @@ namespace mongo { +class ExprMatchExpression; class MatchExpression; + +class PathMatchExpression; struct DepsTracker; namespace expression { @@ -53,10 +60,33 @@ using NodeTraversalFunc = std::function; */ bool hasExistencePredicateOnPath(const MatchExpression& expr, StringData path); +using PathOrExprMatchExpression = stdx::variant; +using Renameables = std::vector>; + /** - * Checks if 'expr' has any children which do not have renaming implemented. + * Checks if 'expr' has any subexpression which does not have renaming implemented or has renaming + * implemented but may fail to rename for any one of 'renames'. If there's any such subexpression, + * we should not proceed with renaming. */ -bool hasOnlyRenameableMatchExpressionChildren(const MatchExpression& expr); +bool hasOnlyRenameableMatchExpressionChildren(const MatchExpression& expr, + const StringMap& renames); + +/** + * Checks if 'expr' has any subexpression which does not have renaming implemented or has renaming + * implemented but may fail to rename for any one of 'renames'. If there's any such subexpression, + * we should not proceed with renaming. + * + * This function also fills out 'renameables' with the renameable subexpressions. For + * PathMatchExpression, the new path is returned in the second element of the pair. For + * ExprMatchExpression, the second element should be ignored and renames must be applied based on + * the full 'renames' map. + * + * Note: The 'renameables' is filled out while traversing the tree and so, designated as an output + * parameter as an optimization to avoid traversing the tree again unnecessarily. + */ +bool hasOnlyRenameableMatchExpressionChildren(MatchExpression& expr, + const StringMap& renames, + Renameables& renameables); /** * Returns true if the documents matched by 'lhs' are a subset of the documents matched by @@ -122,14 +152,40 @@ bool containsOverlappingPaths(const OrderedPathSet& testSet); bool containsEmptyPaths(const OrderedPathSet& testSet); /** - * Determine if 'expr' is reliant upon any path from 'pathSet'. + * Determines if 'expr' is reliant upon any path from 'pathSet' and can be renamed by 'renames'. + */ +bool isIndependentOfConst(const MatchExpression& expr, + const OrderedPathSet& pathSet, + const StringMap& renames = {}); + +/** + * Determines if 'expr' is reliant upon any path from 'pathSet' and can be renamed by 'renames'. + * + * Note: For a description of the expected value returned in the 'renameables' output parameter, see + * the documentation for the 'hasOnlyRenameableMatchExpressionChildren()' function. + */ +bool isIndependentOf(MatchExpression& expr, + const OrderedPathSet& pathSet, + const StringMap& renames, + Renameables& renameables); + +/** + * Determines if 'expr' is reliant only upon paths from 'pathSet' and can be renamed by 'renames'. */ -bool isIndependentOf(const MatchExpression& expr, const OrderedPathSet& pathSet); +bool isOnlyDependentOnConst(const MatchExpression& expr, + const OrderedPathSet& pathSet, + const StringMap& renames = {}); /** - * Determine if 'expr' is reliant only upon paths from 'pathSet'. + * Determines if 'expr' is reliant only upon paths from 'pathSet' and can be renamed by 'renames'. + * + * Note: For a description of the expected value returned in the 'renameables' output parameter, see + * the documentation for the 'hasOnlyRenameableMatchExpressionChildren()' function. */ -bool isOnlyDependentOn(const MatchExpression& expr, const OrderedPathSet& pathSet); +bool isOnlyDependentOn(MatchExpression& expr, + const OrderedPathSet& pathSet, + const StringMap& renames, + Renameables& renameables); /** * Returns whether the path represented by 'first' is an prefix of the path represented by 'second'. @@ -156,7 +212,8 @@ bool bidirectionalPathPrefixOf(StringData first, StringData second); */ void mapOver(MatchExpression* expr, NodeTraversalFunc func, std::string path = ""); -using ShouldSplitExprFunc = std::function; +using ShouldSplitExprFunc = std::function&, Renameables&)>; /** * Attempt to split 'expr' into two MatchExpressions according to 'func'. 'func' describes the @@ -176,12 +233,6 @@ using ShouldSplitExprFunc = std::function "new". * The returned exprLeft value will be {new: {$gt: 3}}, provided that "old" is not in 'fields'. * - * If the previous stage is a simple rename, 'fields' should be empty and 'renames' are attempted - * but due to the limitation of renaming algorithm, we may fail to rename, when we return the - * original expression as residualExpr. - * - * TODO SERVER-74298 Remove the above comment after the ticket is done. - * * Never returns {nullptr, nullptr}. */ std::pair, std::unique_ptr> @@ -191,21 +242,32 @@ splitMatchExpressionBy(std::unique_ptr expr, ShouldSplitExprFunc func = isIndependentOf); /** - * Applies the renames specified in 'renames' to 'expr'. 'renames' maps from path names in 'expr' - * to the new values of those paths. For example, suppose the original match expression is + * Applies the renames specified in 'renames' & 'renameables'. 'renames' maps from path names in + * 'expr' to the new values of those paths. For example, suppose the original match expression is * {old: {$gt: 3}} and 'renames' contains the mapping "old" => "new". At the end, 'expr' will be * {new: {$gt: 3}}. * - * The caller should make sure that `expr` is renamable as a whole. - * - * Returns whether there's any attempted but failed to rename. This case can happen when path - * component is part of sub-fields. For example, expr = {x: {$eq: {y: 3}}} and renames = {{"x.y", - * "a.b"}}. We should be able to rename 'x' and 'y' to 'a' and 'b' respectively but due to the - * current limitation of renaming algorithm, we cannot rename such match expressions. + * In order to do an in-place renaming of the match expression tree, the caller should first call + * 'hasOnlyRenameableMatchExpressionChildren()' and then call this function if it returns true, + * passing through the resulting 'renameables'. * - * TODO SERVER-74298 The return value might be necessary any more after the ticket is done. + * Note: To enforce the above precondition, the caller should pass in the output of the call to + * hasOnlyRenameableMatchExpressionChildren() as the 'renameables' argument. To avoid passing empty + * vector for 'renameables' like applyRenamesToExpression(expr, {}, {}), the parameter is defined as + * a pointer. + */ +void applyRenamesToExpression(const StringMap& renames, + const Renameables* renameables); + +/** + * Copies the 'expr' and applies the renames specified in 'renames' to the copy of 'expr' and + * returns the renamed copy of 'expr' if renaming is successful. Otherwise, returns nullptr. + * 'renames' maps from path names in 'expr' to the new values of those paths. For example, suppose + * the original match expression is {old: {$gt: 3}} and 'renames' contains the mapping "old" => + * "new". The returned expression will be {new: {$gt: 3}}. */ -bool applyRenamesToExpression(MatchExpression* expr, const StringMap& renames); +std::unique_ptr copyExpressionAndApplyRenames( + const MatchExpression* expr, const StringMap& renames); /** * Split a MatchExpression into two parts: diff --git a/src/mongo/db/matcher/expression_algo_test.cpp b/src/mongo/db/matcher/expression_algo_test.cpp index f2032fcdfd8c8..d699a2f1ed531 100644 --- a/src/mongo/db/matcher/expression_algo_test.cpp +++ b/src/mongo/db/matcher/expression_algo_test.cpp @@ -27,19 +27,33 @@ * it in the license file. */ -#include "mongo/unittest/unittest.h" - +#include +#include #include - -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_algo.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/parsed_match_expression_for_test.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/platform/decimal128.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -826,8 +840,8 @@ TEST(IsIndependent, AndIsIndependentOnlyIfChildrenAre) { ASSERT_OK(status.getStatus()); unique_ptr expr = std::move(status.getValue()); - ASSERT_FALSE(expression::isIndependentOf(*expr.get(), {"b"})); - ASSERT_TRUE(expression::isIndependentOf(*expr.get(), {"c"})); + ASSERT_FALSE(expression::isIndependentOfConst(*expr.get(), {"b"})); + ASSERT_TRUE(expression::isIndependentOfConst(*expr.get(), {"c"})); } TEST(IsIndependent, ElemMatchIsIndependent) { @@ -838,9 +852,9 @@ TEST(IsIndependent, ElemMatchIsIndependent) { ASSERT_OK(status.getStatus()); unique_ptr expr = std::move(status.getValue()); - ASSERT_FALSE(expression::isIndependentOf(*expr.get(), {"x"})); - ASSERT_FALSE(expression::isIndependentOf(*expr.get(), {"x.y"})); - ASSERT_TRUE(expression::isIndependentOf(*expr.get(), {"y"})); + ASSERT_FALSE(expression::isIndependentOfConst(*expr.get(), {"x"})); + ASSERT_FALSE(expression::isIndependentOfConst(*expr.get(), {"x.y"})); + ASSERT_TRUE(expression::isIndependentOfConst(*expr.get(), {"y"})); } TEST(IsIndependent, NorIsIndependentOnlyIfChildrenAre) { @@ -851,8 +865,8 @@ TEST(IsIndependent, NorIsIndependentOnlyIfChildrenAre) { ASSERT_OK(status.getStatus()); unique_ptr expr = std::move(status.getValue()); - ASSERT_FALSE(expression::isIndependentOf(*expr.get(), {"b"})); - ASSERT_TRUE(expression::isIndependentOf(*expr.get(), {"c"})); + ASSERT_FALSE(expression::isIndependentOfConst(*expr.get(), {"b"})); + ASSERT_TRUE(expression::isIndependentOfConst(*expr.get(), {"c"})); } TEST(IsIndependent, NotIsIndependentOnlyIfChildrenAre) { @@ -863,8 +877,8 @@ TEST(IsIndependent, NotIsIndependentOnlyIfChildrenAre) { ASSERT_OK(status.getStatus()); unique_ptr expr = std::move(status.getValue()); - ASSERT_TRUE(expression::isIndependentOf(*expr.get(), {"b"})); - ASSERT_FALSE(expression::isIndependentOf(*expr.get(), {"a"})); + ASSERT_TRUE(expression::isIndependentOfConst(*expr.get(), {"b"})); + ASSERT_FALSE(expression::isIndependentOfConst(*expr.get(), {"a"})); } TEST(IsIndependent, OrIsIndependentOnlyIfChildrenAre) { @@ -875,8 +889,8 @@ TEST(IsIndependent, OrIsIndependentOnlyIfChildrenAre) { ASSERT_OK(status.getStatus()); unique_ptr expr = std::move(status.getValue()); - ASSERT_FALSE(expression::isIndependentOf(*expr.get(), {"a"})); - ASSERT_TRUE(expression::isIndependentOf(*expr.get(), {"c"})); + ASSERT_FALSE(expression::isIndependentOfConst(*expr.get(), {"a"})); + ASSERT_TRUE(expression::isIndependentOfConst(*expr.get(), {"c"})); } TEST(IsIndependent, AndWithDottedFieldPathsIsNotIndependent) { @@ -887,8 +901,8 @@ TEST(IsIndependent, AndWithDottedFieldPathsIsNotIndependent) { ASSERT_OK(status.getStatus()); unique_ptr expr = std::move(status.getValue()); - ASSERT_FALSE(expression::isIndependentOf(*expr.get(), {"a.b.c"})); - ASSERT_FALSE(expression::isIndependentOf(*expr.get(), {"a.b"})); + ASSERT_FALSE(expression::isIndependentOfConst(*expr.get(), {"a.b.c"})); + ASSERT_FALSE(expression::isIndependentOfConst(*expr.get(), {"a.b"})); } TEST(IsIndependent, BallIsIndependentOfBalloon) { @@ -899,9 +913,9 @@ TEST(IsIndependent, BallIsIndependentOfBalloon) { ASSERT_OK(status.getStatus()); unique_ptr expr = std::move(status.getValue()); - ASSERT_TRUE(expression::isIndependentOf(*expr.get(), {"a.balloon"})); - ASSERT_TRUE(expression::isIndependentOf(*expr.get(), {"a.b"})); - ASSERT_FALSE(expression::isIndependentOf(*expr.get(), {"a.ball.c"})); + ASSERT_TRUE(expression::isIndependentOfConst(*expr.get(), {"a.balloon"})); + ASSERT_TRUE(expression::isIndependentOfConst(*expr.get(), {"a.b"})); + ASSERT_FALSE(expression::isIndependentOfConst(*expr.get(), {"a.ball.c"})); } // This is a descriptive test to ensure that until renames are implemented for these expressions, @@ -921,8 +935,8 @@ TEST(IsIndependent, NonRenameableExpressionIsNotIndependent) { auto matchExpression = std::move(swMatchExpression.getValue()); // Both of these should be true once renames are implemented. - ASSERT_FALSE(expression::isIndependentOf(*matchExpression.get(), {"c"})); - ASSERT_FALSE(expression::isOnlyDependentOn(*matchExpression.get(), {"a", "b"})); + ASSERT_FALSE(expression::isIndependentOfConst(*matchExpression.get(), {"c"})); + ASSERT_FALSE(expression::isOnlyDependentOnConst(*matchExpression.get(), {"a", "b"})); } } @@ -932,7 +946,7 @@ TEST(IsIndependent, EmptyDependencySetsPassIsOnlyDependentOn) { auto swMatchExpression = MatchExpressionParser::parse(matchPredicate, std::move(expCtx)); ASSERT_OK(swMatchExpression.getStatus()); auto matchExpression = std::move(swMatchExpression.getValue()); - ASSERT_TRUE(expression::isOnlyDependentOn(*matchExpression.get(), {})); + ASSERT_TRUE(expression::isOnlyDependentOnConst(*matchExpression.get(), {})); } TEST(ContainsOverlappingPaths, Basics) { @@ -1830,7 +1844,10 @@ TEST(SplitMatchExpression, ShouldSplitOutAndRenameJsonSchemaPatternByIsOnlyDepen ASSERT_TRUE(splitOutExpr.get()); // 'splitOutExpr' must be same as the expression after renaming 'a' to 'meta'. - expression::applyRenamesToExpression(originalExprCopy.get(), {{"a", "meta"}}); + expression::Renameables renameables; + ASSERT_TRUE( + expression::isOnlyDependentOn(*originalExprCopy, {"a"}, {{"a", "meta"}}, renameables)); + expression::applyRenamesToExpression({{"a", "meta"}}, &renameables); ASSERT_BSONOBJ_EQ(splitOutExpr->serialize(), originalExprCopy->serialize()); ASSERT_FALSE(residualExpr.get()); @@ -1918,9 +1935,10 @@ TEST(ApplyRenamesToExpression, ShouldApplyBasicRenamesForAMatchWithExpr) { ASSERT_OK(matcher.getStatus()); StringMap renames{{"a", "d"}, {"c", "e"}, {"x", "y"}}; - expression::applyRenamesToExpression(matcher.getValue().get(), renames); + auto renamedExpr = expression::copyExpressionAndApplyRenames(matcher.getValue().get(), renames); + ASSERT_TRUE(renamedExpr); - ASSERT_BSONOBJ_EQ(matcher.getValue()->serialize(), fromjson("{$expr: {$eq: ['$d.b', '$e']}}")); + ASSERT_BSONOBJ_EQ(renamedExpr->serialize(), fromjson("{$expr: {$eq: ['$d.b', '$e']}}")); } TEST(ApplyRenamesToExpression, ShouldApplyDottedRenamesForAMatchWithExpr) { @@ -1930,9 +1948,10 @@ TEST(ApplyRenamesToExpression, ShouldApplyDottedRenamesForAMatchWithExpr) { ASSERT_OK(matcher.getStatus()); StringMap renames{{"a.b.c", "x"}, {"d.e", "y"}}; - expression::applyRenamesToExpression(matcher.getValue().get(), renames); + auto renamedExpr = expression::copyExpressionAndApplyRenames(matcher.getValue().get(), renames); + ASSERT_TRUE(renamedExpr); - ASSERT_BSONOBJ_EQ(matcher.getValue()->serialize(), fromjson("{$expr: {$lt: ['$x', '$y.f']}}")); + ASSERT_BSONOBJ_EQ(renamedExpr->serialize(), fromjson("{$expr: {$lt: ['$x', '$y.f']}}")); } TEST(ApplyRenamesToExpression, ShouldApplyDottedRenamesForAMatchWithNestedExpr) { @@ -1943,10 +1962,11 @@ TEST(ApplyRenamesToExpression, ShouldApplyDottedRenamesForAMatchWithNestedExpr) ASSERT_OK(matcher.getStatus()); StringMap renames{{"a", "x.y"}, {"d.e", "y"}, {"c", "q.r"}}; - expression::applyRenamesToExpression(matcher.getValue().get(), renames); + auto renamedExpr = expression::copyExpressionAndApplyRenames(matcher.getValue().get(), renames); + ASSERT_TRUE(renamedExpr); ASSERT_BSONOBJ_EQ( - matcher.getValue()->serialize(), + renamedExpr->serialize(), fromjson( "{$and: [{$expr: {$eq: ['$x.y.b.c', '$q.r']}}, {$expr: {$lt: ['$y.f', '$x.y']}}]}")); } @@ -1958,10 +1978,11 @@ TEST(ApplyRenamesToExpression, ShouldNotApplyRenamesForAMatchWithExprWithNoField ASSERT_OK(matcher.getStatus()); StringMap renames{{"a", "x.y"}, {"d.e", "y"}, {"c", "q.r"}}; - expression::applyRenamesToExpression(matcher.getValue().get(), renames); + auto renamedExpr = expression::copyExpressionAndApplyRenames(matcher.getValue().get(), renames); + ASSERT_TRUE(renamedExpr); ASSERT_BSONOBJ_EQ( - matcher.getValue()->serialize(), + renamedExpr->serialize(), fromjson("{$expr: {$concat: [{$const: 'a'}, {$const: 'b'}, {$const: 'c'}]}}")); } diff --git a/src/mongo/db/matcher/expression_always_boolean.h b/src/mongo/db/matcher/expression_always_boolean.h index b82d582a68250..b11b3faaaf5d8 100644 --- a/src/mongo/db/matcher/expression_always_boolean.h +++ b/src/mongo/db/matcher/expression_always_boolean.h @@ -30,8 +30,23 @@ #pragma once #include - +#include +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/matchable.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -64,11 +79,7 @@ class AlwaysBooleanMatchExpression : public MatchExpression { } void serialize(BSONObjBuilder* out, SerializationOptions opts) const final { - if (opts.replacementForLiteralArgs) { - out->append(name(), *opts.replacementForLiteralArgs); - } else { - out->append(name(), 1); - } + opts.appendLiteral(out, name(), 1); } bool equivalent(const MatchExpression* other) const final { diff --git a/src/mongo/db/matcher/expression_always_boolean_test.cpp b/src/mongo/db/matcher/expression_always_boolean_test.cpp index 899c6fe1b12b7..517ef49db70d2 100644 --- a/src/mongo/db/matcher/expression_always_boolean_test.cpp +++ b/src/mongo/db/matcher/expression_always_boolean_test.cpp @@ -26,11 +26,14 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/matcher/expression_always_boolean.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_array.cpp b/src/mongo/db/matcher/expression_array.cpp index b0fd8d05a717f..0014da48ece22 100644 --- a/src/mongo/db/matcher/expression_array.cpp +++ b/src/mongo/db/matcher/expression_array.cpp @@ -28,8 +28,15 @@ */ #include "mongo/db/matcher/expression_array.h" -#include "mongo/db/field_ref.h" -#include "mongo/db/jsobj.h" + +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/query/util/make_data_structure.h" namespace mongo { @@ -97,9 +104,11 @@ void ElemMatchObjectMatchExpression::debugString(StringBuilder& debug, int inden _sub->debugString(debug, indentationLevel + 1); } -BSONObj ElemMatchObjectMatchExpression::getSerializedRightHandSide( - SerializationOptions opts) const { - return BSON("$elemMatch" << _sub->serialize(opts)); +void ElemMatchObjectMatchExpression::appendSerializedRightHandSide( + BSONObjBuilder* bob, SerializationOptions opts) const { + BSONObjBuilder elemMatchBob = bob->subobjStart("$elemMatch"); + _sub->serialize(&elemMatchBob, opts); + elemMatchBob.doneFast(); } MatchExpression::ExpressionOptimizerFunc ElemMatchObjectMatchExpression::getOptimizer() const { @@ -163,15 +172,14 @@ void ElemMatchValueMatchExpression::debugString(StringBuilder& debug, int indent } } -BSONObj ElemMatchValueMatchExpression::getSerializedRightHandSide(SerializationOptions opts) const { - BSONObjBuilder emBob; - +void ElemMatchValueMatchExpression::appendSerializedRightHandSide(BSONObjBuilder* bob, + SerializationOptions opts) const { + BSONObjBuilder emBob = bob->subobjStart("$elemMatch"); opts.includePath = false; for (auto&& child : _subs) { child->serialize(&emBob, opts); } - - return BSON("$elemMatch" << emBob.obj()); + emBob.doneFast(); } MatchExpression::ExpressionOptimizerFunc ElemMatchValueMatchExpression::getOptimizer() const { @@ -205,12 +213,9 @@ void SizeMatchExpression::debugString(StringBuilder& debug, int indentationLevel _debugStringAttachTagInfo(&debug); } -BSONObj SizeMatchExpression::getSerializedRightHandSide(SerializationOptions opts) const { - const char* opName = "$size"; - if (opts.replacementForLiteralArgs) { - return BSON(opName << *opts.replacementForLiteralArgs); - } - return BSON(opName << _size); +void SizeMatchExpression::appendSerializedRightHandSide(BSONObjBuilder* bob, + SerializationOptions opts) const { + opts.appendLiteral(bob, "$size", _size); } bool SizeMatchExpression::equivalent(const MatchExpression* other) const { diff --git a/src/mongo/db/matcher/expression_array.h b/src/mongo/db/matcher/expression_array.h index 7fa9c493b5229..465e9923ba336 100644 --- a/src/mongo/db/matcher/expression_array.h +++ b/src/mongo/db/matcher/expression_array.h @@ -29,13 +29,30 @@ #pragma once +#include #include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include +#include "mongo/base/clonable_ptr.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_path.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/path.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -90,7 +107,7 @@ class ElemMatchObjectMatchExpression final : public ArrayMatchingMatchExpression virtual void debugString(StringBuilder& debug, int indentationLevel) const; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; std::vector>* getChildVector() final { return nullptr; @@ -158,7 +175,7 @@ class ElemMatchValueMatchExpression final : public ArrayMatchingMatchExpression virtual void debugString(StringBuilder& debug, int indentationLevel) const; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; std::vector>* getChildVector() final { return &_subs; @@ -233,7 +250,7 @@ class SizeMatchExpression : public ArrayMatchingMatchExpression { virtual void debugString(StringBuilder& debug, int indentationLevel) const; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; virtual bool equivalent(const MatchExpression* other) const; diff --git a/src/mongo/db/matcher/expression_array_test.cpp b/src/mongo/db/matcher/expression_array_test.cpp index 1823518f9643e..77278da845166 100644 --- a/src/mongo/db/matcher/expression_array_test.cpp +++ b/src/mongo/db/matcher/expression_array_test.cpp @@ -27,17 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_array.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_tree.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_expr.cpp b/src/mongo/db/matcher/expression_expr.cpp index 9a8ad5b18e9e2..07345abe58d9c 100644 --- a/src/mongo/db/matcher/expression_expr.cpp +++ b/src/mongo/db/matcher/expression_expr.cpp @@ -27,13 +27,25 @@ * it in the license file. */ -#include "mongo/db/matcher/expression_visitor.h" -#include "mongo/db/pipeline/expression.h" -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/matcher/expression_expr.h" #include "mongo/db/matcher/expression_internal_eq_hashed_key.h" - +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/platform/compiler.h" #include "mongo/util/fail_point.h" namespace mongo { @@ -148,7 +160,7 @@ std::unique_ptr attemptToRewriteEqHash(ExprMatchExpression& exp // Where "a" can be any field path and ? can be any number. if (auto eq = dynamic_cast(childExpr.get()); eq && eq->getOp() == ExpressionCompare::CmpOp::EQ) { - auto children = eq->getChildren(); + const auto& children = eq->getChildren(); tassert(7281406, "should have 2 $eq children", children.size() == 2ul); auto eqFirst = children[0].get(); diff --git a/src/mongo/db/matcher/expression_expr.h b/src/mongo/db/matcher/expression_expr.h index 0f21242b31619..93954b8594ded 100644 --- a/src/mongo/db/matcher/expression_expr.h +++ b/src/mongo/db/matcher/expression_expr.h @@ -29,16 +29,36 @@ #pragma once +#include #include +#include +#include +#include +#include +#include #include +#include "mongo/base/clonable_ptr.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/matchable.h" #include "mongo/db/matcher/rewrite_expr.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_walker.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/expression_walker.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -136,6 +156,18 @@ class ExprMatchExpression final : public MatchExpression { expression_walker::walk(_expression.get(), &substituteWalker); } + bool hasRenameablePath(const StringMap& renameList) const { + bool hasRenameablePath = false; + FieldPathVisitor visitor([&](const ExpressionFieldPath* expr) { + hasRenameablePath = + hasRenameablePath || expr->isRenameableByAnyPrefixNameIn(renameList); + }); + stage_builder::ExpressionWalker walker( + &visitor, nullptr /*inVisitor*/, nullptr /*postVisitor*/); + expression_walker::walk(_expression.get(), &walker); + return hasRenameablePath; + } + private: ExpressionOptimizerFunc getOptimizer() const final; diff --git a/src/mongo/db/matcher/expression_expr_test.cpp b/src/mongo/db/matcher/expression_expr_test.cpp index bb008a536c19d..287b19620b334 100644 --- a/src/mongo/db/matcher/expression_expr_test.cpp +++ b/src/mongo/db/matcher/expression_expr_test.cpp @@ -27,19 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include #include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_expr.h" #include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/matcher/matcher.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/inline_auto_update.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" namespace mongo { @@ -803,7 +817,7 @@ DEATH_TEST_REGEX(ExprMatchTest, GetChildFailsIndexGreaterThanZero, "Tripwire ass /** * A default redaction strategy that generates easy to check results for testing purposes. */ -std::string redactFieldNameForTest(StringData s) { +std::string applyHmacForTest(StringData s) { return str::stream() << "HASH<" << s << ">"; } @@ -812,9 +826,9 @@ TEST_F(ExprMatchTest, ExprRedactsCorrectly) { createMatcher(fromjson("{$expr: {$sum: [\"$a\", \"$b\"]}}")); SerializationOptions opts; - opts.identifierRedactionPolicy = redactFieldNameForTest; - opts.redactIdentifiers = true; - opts.replacementForLiteralArgs = "?"; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + opts.transformIdentifiersCallback = applyHmacForTest; + opts.transformIdentifiers = true; ASSERT_BSONOBJ_EQ_AUTO( // NOLINT R"({"$expr":{"$sum":["$HASH","$HASH"]}})", @@ -822,7 +836,7 @@ TEST_F(ExprMatchTest, ExprRedactsCorrectly) { createMatcher(fromjson("{$expr: {$sum: [\"$a\", \"b\"]}}")); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({"$expr":{"$sum":["$HASH",{"$const":"?"}]}})", + R"({"$expr":{"$sum":["$HASH","?string"]}})", serialize(opts)); createMatcher(fromjson("{$expr: {$sum: [\"$a.b\", \"$b\"]}}")); @@ -842,7 +856,7 @@ TEST_F(ExprMatchTest, ExprRedactsCorrectly) { createMatcher(fromjson("{$expr: {$getField: {field: \"b\", input: {a: 1, b: 2}}}}")); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({"$expr":{"$getField":{"field":"HASH","input":{"$const":"?"}}}})", + R"({"$expr":{"$getField":{"field":"HASH","input":"?object"}}})", serialize(opts)); createMatcher(fromjson("{$expr: {$getField: {field: \"b\", input: \"$a\"}}}")); @@ -857,9 +871,7 @@ TEST_F(ExprMatchTest, ExprRedactsCorrectly) { "$getField": { "field": "HASH", "input": { - "HASH": { - "$const": "?" - }, + "HASH": "?number", "HASH": "$HASH" } } @@ -874,9 +886,7 @@ TEST_F(ExprMatchTest, ExprRedactsCorrectly) { "$getField": { "field": "HASH.HASH", "input": { - "HASH": { - "$const": "?" - }, + "HASH": "?number", "HASH": "$HASH" } } @@ -892,14 +902,10 @@ TEST_F(ExprMatchTest, ExprRedactsCorrectly) { "$setField": { "field": "HASH", "input": { - "HASH": { - "$const": "?" - }, + "HASH": "?number", "HASH": "$HASH" }, - "value": { - "$const": "?" - } + "value": "?number" } } })", @@ -913,9 +919,7 @@ TEST_F(ExprMatchTest, ExprRedactsCorrectly) { "$setField": { "field": "HASH.HASH", "input": { - "HASH": { - "$const": "?" - }, + "HASH": "?number", "HASH": "$HASH" }, "value": "$HASH" @@ -932,9 +936,7 @@ TEST_F(ExprMatchTest, ExprRedactsCorrectly) { "$setField": { "field": "HASH.HASH", "input": { - "HASH": { - "$const": "?" - }, + "HASH": "?number", "HASH": "$HASH" }, "value": "$HASH.HASH" @@ -952,14 +954,10 @@ TEST_F(ExprMatchTest, ExprRedactsCorrectly) { "$setField": { "field": "HASH", "input": { - "HASH": { - "$const": "?" - }, + "HASH": "?number", "HASH": "$HASH" }, - "value": { - "$const": "?" - } + "value": "?object" } } })", @@ -974,18 +972,12 @@ TEST_F(ExprMatchTest, ExprRedactsCorrectly) { "$setField": { "field": "HASH", "input": { - "HASH": { - "$const": "?" - }, + "HASH": "?number", "HASH": "$HASH" }, "value": { - "HASH": { - "$const": "?" - }, - "HASH": { - "$const": "?" - }, + "HASH": "?number", + "HASH": "?number", "HASH": "$HASH" } } diff --git a/src/mongo/db/matcher/expression_geo.cpp b/src/mongo/db/matcher/expression_geo.cpp index 5c496c2dca5cc..3d5fbbbd4e093 100644 --- a/src/mongo/db/matcher/expression_geo.cpp +++ b/src/mongo/db/matcher/expression_geo.cpp @@ -30,11 +30,25 @@ #include "mongo/db/matcher/expression_geo.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/geo/geoparser.h" +#include "mongo/db/matcher/expression_geo_serializer.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/logv2/log.h" -#include "mongo/platform/basic.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -104,63 +118,6 @@ Status GeoExpression::parseQuery(const BSONObj& obj) { return Status::OK(); } -BSONObj redactGeoExpression(const BSONObj& obj, - boost::optional literalArgsReplacement) { - - // Ideally each sub operator ($minDistance, $maxDistance, $geometry, $box) would serialize - // itself, rather than GeoExpression reparse the query during serialization. However GeoMatch - // and GeoNearMatch don't capture the nesting of the various sub-operators. Moreover, the data - // members representing the suboperators do not have serialize functions. As re-parsing is - // therefore required to serialize GeoNear and GeoNearMatch, the compromise is to have the same - // class responsible for parsing (GeoExpression) also responsible for serializing. - - BSONElement outerElem = obj.firstElement(); - BSONObjBuilder bob; - - // Legacy GeoNear query. - if (outerElem.type() == mongo::Array) { - BSONObjIterator it(obj); - while (it.more()) { - // In a legacy GeoNear query, the value associated with the first field ($near or - // $geoNear) is an array where the first two array elements represent the x and y - // coordinates respectively. An optional third array element denotes the $maxDistance. - // Alternatively, a legacy query can have a $maxDistance suboperator to make it more - // explicit. None of these values are enums so it is fine to treat them as literals - // during redaction. - outerElem = it.next(); - bob.append(outerElem.fieldNameStringData(), *literalArgsReplacement); - } - return bob.obj(); - } - // Non-legacy geo expressions have embedded objects that have to be traversed. - else { - BSONObjIterator embedded_it(outerElem.embeddedObject()); - StringData fieldName = outerElem.fieldNameStringData(); - BSONObjBuilder subObj = BSONObjBuilder(bob.subobjStart(fieldName)); - - while (embedded_it.more()) { - BSONElement argElem = embedded_it.next(); - fieldName = argElem.fieldNameStringData(); - if (fieldName == "$geometry") { - BSONObjBuilder nestedSubObj = BSONObjBuilder(subObj.subobjStart(fieldName)); - BSONElement typeElt = argElem.Obj().getField("type"); - if (!typeElt.eoo()) { - nestedSubObj.append(typeElt); - } - nestedSubObj.append("coordinates", *literalArgsReplacement); - nestedSubObj.doneFast(); - } - if (fieldName == "$maxDistance" || fieldName == "$box" || fieldName == "$nearSphere" || - fieldName == "$minDistance") { - subObj.append(fieldName, *literalArgsReplacement); - } - } - subObj.doneFast(); - } - - return bob.obj(); -} - Status GeoExpression::parseFrom(const BSONObj& obj) { // Initialize geoContainer and parse BSON object Status status = parseQuery(obj); @@ -466,7 +423,7 @@ bool GeoMatchExpression::contains(const GeometryContainer& queryGeom, if (GeoExpression::WITHIN == queryPredicate) { return queryGeom.contains(*geometry); } else { - verify(GeoExpression::INTERSECT == queryPredicate); + MONGO_verify(GeoExpression::INTERSECT == queryPredicate); return queryGeom.intersects(*geometry); } } @@ -498,13 +455,13 @@ void GeoMatchExpression::debugString(StringBuilder& debug, int indentationLevel) _debugStringAttachTagInfo(&debug); } -BSONObj GeoMatchExpression::getSerializedRightHandSide(SerializationOptions opts) const { - if (opts.replacementForLiteralArgs) { - return redactGeoExpression(_rawObj, opts.replacementForLiteralArgs); +void GeoMatchExpression::appendSerializedRightHandSide(BSONObjBuilder* bob, + SerializationOptions opts) const { + if (opts.literalPolicy != LiteralSerializationPolicy::kUnchanged) { + geoCustomSerialization(bob, _rawObj, opts); + return; } - BSONObjBuilder subobj; - subobj.appendElements(_rawObj); - return subobj.obj(); + bob->appendElements(_rawObj); } bool GeoMatchExpression::equivalent(const MatchExpression* other) const { @@ -554,13 +511,13 @@ void GeoNearMatchExpression::debugString(StringBuilder& debug, int indentationLe _debugStringAttachTagInfo(&debug); } -BSONObj GeoNearMatchExpression::getSerializedRightHandSide(SerializationOptions opts) const { - if (opts.replacementForLiteralArgs) { - return redactGeoExpression(_rawObj, opts.replacementForLiteralArgs); +void GeoNearMatchExpression::appendSerializedRightHandSide(BSONObjBuilder* bob, + SerializationOptions opts) const { + if (opts.literalPolicy != LiteralSerializationPolicy::kUnchanged) { + geoCustomSerialization(bob, _rawObj, opts); + return; } - BSONObjBuilder objBuilder; - objBuilder.appendElements(_rawObj); - return objBuilder.obj(); + bob->appendElements(_rawObj); } bool GeoNearMatchExpression::equivalent(const MatchExpression* other) const { diff --git a/src/mongo/db/matcher/expression_geo.h b/src/mongo/db/matcher/expression_geo.h index be79428d0a507..42a5a6c7461b9 100644 --- a/src/mongo/db/matcher/expression_geo.h +++ b/src/mongo/db/matcher/expression_geo.h @@ -31,10 +31,29 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/geo/geometry_container.h" #include "mongo/db/geo/geoparser.h" +#include "mongo/db/geo/shapes.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -107,7 +126,7 @@ class GeoMatchExpression : public LeafMatchExpression { virtual void debugString(StringBuilder& debug, int indentationLevel = 0) const; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; virtual bool equivalent(const MatchExpression* other) const; @@ -211,7 +230,7 @@ class GeoNearMatchExpression : public LeafMatchExpression { virtual void debugString(StringBuilder& debug, int indentationLevel = 0) const; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; virtual bool equivalent(const MatchExpression* other) const; @@ -272,7 +291,7 @@ class TwoDPtInAnnulusExpression : public LeafMatchExpression { // These won't be called. // - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final { + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final { MONGO_UNREACHABLE; } diff --git a/src/mongo/db/matcher/expression_geo_serializer.cpp b/src/mongo/db/matcher/expression_geo_serializer.cpp new file mode 100644 index 0000000000000..e9fed54bb6964 --- /dev/null +++ b/src/mongo/db/matcher/expression_geo_serializer.cpp @@ -0,0 +1,247 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/matcher/expression_geo_serializer.h" + +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + +namespace mongo { +namespace { +void appendLegacyGeoLiteral(BSONObjBuilder* bob, const BSONElement& e, SerializationOptions opts) { + if (opts.literalPolicy != LiteralSerializationPolicy::kToRepresentativeParseableValue) { + opts.appendLiteral(bob, e); + return; + } + + StringData fieldName = e.fieldNameStringData(); + if (fieldName == "$nearSphere"_sd || fieldName == "$near") { + // Legacy $nearSphere and $near requires at minimum 2 coordinates to be + // re-parseable, so the representative value is [1, 1]. + bob->appendArray(fieldName, BSON_ARRAY(1 << 1)); + } else if (fieldName == "$center"_sd || fieldName == "$centerSphere"_sd) { + // $center and $centerSphere requires a pair of coordinates and a radius to be + // re-parseable, so the representative value is [[1, 1],1]. + bob->appendArray(fieldName, BSON_ARRAY(BSON_ARRAY(1 << 1) << 1)); + } else if (fieldName == "$box"_sd) { + // $box requires two pairs of coordinates to be re-parseable, so the + // representative value is [[1, 1],[1,1]]. + bob->appendArray(fieldName, BSON_ARRAY(BSON_ARRAY(1 << 1) << BSON_ARRAY(1 << 1))); + } else if (fieldName == "$polygon"_sd) { + // $polygon requires three pairs of coordinates to be re-parseable, so the representative + // value is [[0,0],[0,1],[1,1]]. + bob->appendArray( + fieldName, BSON_ARRAY(BSON_ARRAY(0 << 0) << BSON_ARRAY(0 << 1) << BSON_ARRAY(1 << 1))); + } else { + opts.appendLiteral(bob, e); + } +} + +void appendGeoJSONCoordinatesLiteral(BSONObjBuilder* bob, + const BSONElement& e, + StringData geoJSONType, + SerializationOptions opts) { + if (opts.literalPolicy != LiteralSerializationPolicy::kToRepresentativeParseableValue) { + opts.appendLiteral(bob, e); + return; + } + + StringData fieldName = e.fieldNameStringData(); + if (geoJSONType == "Polygon"_sd) { + // Polygon requires four pairs of coordinates in a closed loop wrapped in an array to be + // re-parseable, so the representative value is [[[0,0],[0,1],[1,1],[0,0]]]. + bob->appendArray( + fieldName, + BSON_ARRAY(BSON_ARRAY(BSON_ARRAY(0 << 0) << BSON_ARRAY(0 << 1) << BSON_ARRAY(1 << 1) + << BSON_ARRAY(0 << 0)))); + } else if (geoJSONType == "MultiPolygon"_sd) { + // MultiPolygon requires four pairs of coordinates in a closed loop wrapped in 2 arrays to + // be re-parseable, so the representative value is [[[[0,0],[0,1],[1,1],[0,0]]]]. + bob->appendArray(fieldName, + BSON_ARRAY(BSON_ARRAY(BSON_ARRAY( + BSON_ARRAY(0 << 0) + << BSON_ARRAY(0 << 1) << BSON_ARRAY(1 << 1) << BSON_ARRAY(0 << 0))))); + } else if (geoJSONType == "Point"_sd) { + // Point requires a pair of coordinates to be re-parseable, so the representative + // value is [1,1]. + bob->appendArray(fieldName, BSON_ARRAY(1 << 1)); + } else if (geoJSONType == "MultiPoint"_sd) { + // MultiPoint requires a pair of coordinates wrapped in an array to be re-parseable, so the + // representative value is [[1,1]]. + bob->appendArray(fieldName, BSON_ARRAY(BSON_ARRAY(1 << 1))); + } else if (geoJSONType == "LineString"_sd) { + // LineString requires two pairs of coordinates to be re-parseable, so the representative + // value is [[0,0],[1,1]]. + bob->appendArray(fieldName, BSON_ARRAY(BSON_ARRAY(0 << 0) << BSON_ARRAY(1 << 1))); + } else { + opts.appendLiteral(bob, e); + } +} + +void appendCRSObject(BSONObjBuilder* bob, const BSONElement& crsObj, SerializationOptions opts) { + // 'crs' is always an object. + tassert(7559700, "Expected 'crs' to be an object", crsObj.type() == BSONType::Object); + // 'crs' is required to have a 'type' field with the value 'name'. + // Additionally, it is required to have an object properties field + // with a single 'name' field. + tassert(7559701, + str::stream() << "Expected 'crs' to contain a string 'type' field, got " << crsObj, + crsObj["type"] && crsObj["type"].type() == BSONType::String); + tassert(7559702, + str::stream() << "Expected 'crs' to contain a 'properties' object, got , " << crsObj, + crsObj["properties"] && crsObj["properties"].type() == BSONType::Object); + tassert(7559703, + str::stream() << "Expected 'crs.properties' to contain a 'name' " + "string field, got " + << crsObj["properties"], + crsObj["properties"].Obj()["name"] && + crsObj["properties"].Obj()["name"].type() == BSONType::String); + + // The CRS "type" and "properties.name" fields must be preserved for + // kToRepresentativeParseableValue serialization policy so the query + // shape can be re-parsed (and will be preserved for kUnchanged policy + // as well). + BSONObjBuilder crsObjBuilder(bob->subobjStart("crs")); + if (opts.literalPolicy == LiteralSerializationPolicy::kToDebugTypeString) { + opts.appendLiteral(&crsObjBuilder, crsObj["type"]); + } else { + crsObjBuilder.append(crsObj["type"]); + } + BSONObjBuilder crsPropBuilder(crsObjBuilder.subobjStart("properties")); + if (opts.literalPolicy == LiteralSerializationPolicy::kToDebugTypeString) { + opts.appendLiteral(&crsPropBuilder, crsObj["properties"].Obj()["name"]); + } else { + crsPropBuilder.append(crsObj["properties"].Obj()["name"]); + } + crsPropBuilder.doneFast(); + crsObjBuilder.doneFast(); +} + +void appendGeometrySubObject(BSONObjBuilder* bob, + const BSONObj& geometryObj, + SerializationOptions opts) { + auto typeElem = geometryObj["type"]; + if (typeElem) { + bob->append(typeElem); + } + if (auto coordinatesElem = geometryObj["coordinates"]) { + appendGeoJSONCoordinatesLiteral(bob, coordinatesElem, typeElem.valueStringData(), opts); + } + + // 'crs' can be present if users want to use STRICT_SPHERE coordinate + // system. + if (auto crsElt = geometryObj["crs"]) { + appendCRSObject(bob, crsElt, opts); + } +} +} // namespace + +void geoCustomSerialization(BSONObjBuilder* bob, const BSONObj& obj, SerializationOptions opts) { + BSONElement outerElem = obj.firstElement(); + + // Legacy GeoNear query. + if (outerElem.type() == mongo::Array) { + BSONObjIterator it(obj); + while (it.more()) { + // In a legacy GeoNear query, the value associated with the first field ($near or + // $geoNear) is an array where the first two array elements represent the x and y + // coordinates respectively. An optional third array element denotes the $maxDistance. + // Alternatively, a legacy query can have a $maxDistance suboperator to make it more + // explicit. None of these values are enums so it is fine to treat them as literals + // during redaction. + appendLegacyGeoLiteral(bob, it.next(), opts); + } + return; + } + + // Non-legacy geo expressions have embedded objects that have to be traversed. + BSONObjIterator outer_it(obj); + while (outer_it.more()) { + auto elem = outer_it.next(); + + if (!elem.isABSONObj()) { + // Typically, geo expressions have a single embedded object under the top-level geo + // operator, but there is an exception for syntax that allows geoJSON coordinates + // without specifying $geometry (e.g., {$nearSphere: {type: 'Point', coordinates: + // [1,2]}}). We're iterating outer_it and appending extra literals to handle + // $minDistance and $maxDistance fields that could be included outside the primary geo + // object in those edge cases (e.g., {$nearSphere: {type: 'Point', coordinates: [1,2]}, + // $minDistance: 10}). + opts.appendLiteral(bob, elem); + } else { + StringData fieldName = elem.fieldNameStringData(); + BSONObjBuilder subObj = BSONObjBuilder(bob->subobjStart(fieldName)); + BSONObjIterator embedded_it(elem.embeddedObject()); + + while (embedded_it.more()) { + BSONElement argElem = embedded_it.next(); + fieldName = argElem.fieldNameStringData(); + if (fieldName == "$geometry"_sd) { + if (argElem.type() == BSONType::Array) { + // This would be like {$geometry: [0, 0]} which must be a point. + auto asArray = argElem.Array(); + tassert(7539807, + "Expected the point to have exactly 2 elements: an x and y.", + asArray.size() == 2UL); + subObj.appendArray(fieldName, + BSON_ARRAY(opts.serializeLiteral(asArray[0]) + << opts.serializeLiteral(asArray[1]))); + } else { + BSONObjBuilder nestedSubObj = bob->subobjStart("$geometry"); + appendGeometrySubObject(&nestedSubObj, argElem.Obj(), opts); + nestedSubObj.doneFast(); + } + } else if (fieldName == "type"_sd) { + // This handles an edge-case where syntax allows geoJSON coordinates without + // specifying $geometry; e.g., {$nearSphere: {type: 'Point', coordinates: + // [1,2]}}. + appendGeometrySubObject(&subObj, elem.Obj(), opts); + + // appendGeometrySubObj handles all fields in this subObj, so we break out of + // the inner loop to avoid duplicating fields. + break; + } else { + appendLegacyGeoLiteral(&subObj, argElem, opts); + } + } + subObj.doneFast(); + } + } +} +} // namespace mongo diff --git a/src/mongo/db/matcher/expression_geo_serializer.h b/src/mongo/db/matcher/expression_geo_serializer.h new file mode 100644 index 0000000000000..1906e0f0933fa --- /dev/null +++ b/src/mongo/db/matcher/expression_geo_serializer.h @@ -0,0 +1,58 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/query/serialization_options.h" + +namespace mongo { + +/** + * We rely on this custom serializer for geo expressions to handle serialization with + * kToRepresentativeParseableValue and kToDebugTypeString policies since the original raw query + * needs to be re-parsed in order to properly serialize. + * + * Ideally each sub operator ($minDistance, $maxDistance, $geometry, $box) would serialize itself, + * rather than GeoExpression reparse the query during serialization. However, GeoExpression and + * GeoNearExpression don't capture the nesting of the various sub-operators. Re-parsing is therefore + * required to serialize GeoMatchExpression and GeoNearMatchExpression into BSON representative of + * the correct original query. + * + * To further complicate the serialization, serializing with policy + * kToRepresentativeParseableValue requires output that can again be + * re-parsed, and the geoparser performs validation checking to make sure input coordinates apply to + * the correct geo type. For example, a GeoJSON Polygon must have minimum four pairs of coordinates + * in a closed loop. The default representative parseable array value used in SerializationOptions + * (an empty array) is not useful here since it won't pass geo validation checks. As a workaround, + * this custom serializer determines a parseable value for each shape or point type. + */ +void geoCustomSerialization(BSONObjBuilder* bob, const BSONObj& obj, SerializationOptions opts); +} // namespace mongo diff --git a/src/mongo/db/matcher/expression_geo_test.cpp b/src/mongo/db/matcher/expression_geo_test.cpp index 92a06bf8b3bcc..1c6a685c5762c 100644 --- a/src/mongo/db/matcher/expression_geo_test.cpp +++ b/src/mongo/db/matcher/expression_geo_test.cpp @@ -29,15 +29,15 @@ /** Unit tests for MatchExpression operator implementations in match_operators.{h,cpp}. */ -#include "mongo/unittest/unittest.h" - #include -#include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" -#include "mongo/db/matcher/expression.h" +#include +#include + +#include "mongo/bson/json.h" #include "mongo/db/matcher/expression_geo.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { @@ -158,15 +158,14 @@ TEST(ExpressionGeoTest, GeoNearEquivalent) { TEST(ExpressionGeoTest, SerializeGeoExpressions) { SerializationOptions opts = {}; - opts.redactIdentifiers = true; - opts.replacementForLiteralArgs = "?"; + opts.transformIdentifiers = true; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; { BSONObj query = fromjson("{$within: {$box: [{x: 4, y: 4}, [6, 6]]}}"); std::unique_ptr ge(makeGeoMatchExpression(query)); - ASSERT_VALUE_EQ_AUTO( // NOLINT - "{ $within: { $box: \"?\" } }", // NOLINT (test - // auto-update) + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$within":{"$box":"?array<>"}})", ge->getSerializedRightHandSide(opts)); } { @@ -174,32 +173,56 @@ TEST(ExpressionGeoTest, SerializeGeoExpressions) { "{$geoWithin: {$geometry: {type: \"MultiPolygon\", coordinates: [[[[20.0, 70.0],[30.0, " "70.0],[30.0, 50.0],[20.0, 50.0],[20.0, 70.0]]]]}}}"); std::unique_ptr ge(makeGeoMatchExpression(query)); - ASSERT_VALUE_EQ_AUTO( // NOLINT - "{ $geoWithin: { $geometry: { type: \"MultiPolygon\", coordinates: \"?\" } } }", // NOLINT - // (test - // auto-update) + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$geoWithin": { + "$geometry": { + "type": "MultiPolygon", + "coordinates": "?array" + } + } + })", ge->getSerializedRightHandSide(opts)); } { BSONObj query = fromjson( - "{$geoIntersects: {$geometry: {type: \"MultiPolygon\",coordinates: [[[[-20.0, " - "-70.0],[-30.0, -70.0],[-30.0, -50.0],[-20.0, -50.0],[-20.0, -70.0]]]]}}}"); + R"({ + "$geoIntersects": { + "$geometry": { + "type": "MultiPolygon", + "coordinates": [[[ + [-20.0, -70.0], + [-30.0, -70.0], + [-30.0, -50.0], + [-20.0, -50.0], + [-20.0, -70.0] + ]]] + } + } + })"); std::unique_ptr ge(makeGeoMatchExpression(query)); - ASSERT_VALUE_EQ_AUTO( // NOLINT - "{ $geoIntersects: { $geometry: { type: \"MultiPolygon\", coordinates: \"?\" } } }", // NOLINT - // (test - // auto-update) + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$geoIntersects": { + "$geometry": { + "type": "MultiPolygon", + "coordinates": "?array" + } + } + })", ge->getSerializedRightHandSide(opts)); } { BSONObj query1 = fromjson( - "{$within: {$geometry: {type: 'Polygon'," - "coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]}}}"); + R"({$within: { + $geometry: { + type: 'Polygon', + coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]] + } + }})"); std::unique_ptr ge(makeGeoMatchExpression(query1)); - ASSERT_VALUE_EQ_AUTO( // NOLINT - "{ $within: { $geometry: { type: \"Polygon\", coordinates: \"?\" } } }", // NOLINT - // (test - // auto-update) + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$within":{"$geometry":{"type":"Polygon","coordinates":"?array"}}})", ge->getSerializedRightHandSide(opts)); } { @@ -207,48 +230,66 @@ TEST(ExpressionGeoTest, SerializeGeoExpressions) { "{$near: {$maxDistance: 100, " "$geometry: {type: 'Point', coordinates: [0, 0]}}}"); std::unique_ptr gne(makeGeoNearMatchExpression(query)); - ASSERT_VALUE_EQ_AUTO( // NOLINT - "{ $near: { $maxDistance: \"?\", $geometry: { type: \"Point\", coordinates: \"?\" } } " - "}", // NOLINT (test auto-update) + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$near": { + "$maxDistance": "?number", + "$geometry": { + "type": "Point", + "coordinates": "?array" + } + } + })", gne->getSerializedRightHandSide(opts)); } { BSONObj query = fromjson("{ $nearSphere: [0,0], $minDistance: 1, $maxDistance: 3 }"); std::unique_ptr gne(makeGeoNearMatchExpression(query)); - ASSERT_VALUE_EQ_AUTO( // NOLINT - "{ $nearSphere: \"?\", $minDistance: \"?\", $maxDistance: \"?\" }", // NOLINT (test - // auto-update) + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$nearSphere": "?array", + "$minDistance": "?number", + "$maxDistance": "?number" + })", gne->getSerializedRightHandSide(opts)); } { - BSONObj query = fromjson("{$near : [0, 0, 1] } }"); + BSONObj query = fromjson("{$near : [0, 0, 1] }"); std::unique_ptr gne(makeGeoNearMatchExpression(query)); - ASSERT_VALUE_EQ_AUTO( // NOLINT - "{ $near: \"?\" }", // NOLINT (test auto-update) + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$near":"?array"})", gne->getSerializedRightHandSide(opts)); } { BSONObj query = fromjson("{$geoNear: [0, 0, 100]}"); std::unique_ptr gne(makeGeoNearMatchExpression(query)); - ASSERT_VALUE_EQ_AUTO( // NOLINT - "{ $geoNear: \"?\" }", // NOLINT (test auto-update) + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$geoNear":"?array"})", gne->getSerializedRightHandSide(opts)); } { BSONObj query = fromjson("{$geoNear: [0, 10], $maxDistance: 80 }"); std::unique_ptr gne(makeGeoNearMatchExpression(query)); - ASSERT_VALUE_EQ_AUTO( // NOLINT - "{ $geoNear: \"?\", $maxDistance: \"?\" }", // NOLINT (test auto-update) + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$geoNear":"?array","$maxDistance":"?number"})", gne->getSerializedRightHandSide(opts)); } { BSONObj query = fromjson("{$geoIntersects: {$geometry: [0, 0]}}"); std::unique_ptr ge(makeGeoMatchExpression(query)); - ASSERT_VALUE_EQ_AUTO( // NOLINT - "{ $geoIntersects: { $geometry: { coordinates: \"?\" } } }", + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$geoIntersects":{"$geometry":["?number","?number"]}})", ge->getSerializedRightHandSide(opts)); } + { + // Make sure we reject arrays with <2 or >2 elements. + BSONObj query = fromjson("{$geoIntersects: {$geometry: [0, 0, 1]}}"); + std::unique_ptr gq(new GeoExpression); + ASSERT_NOT_OK(gq->parseFrom(query)); + query = fromjson("{$geoIntersects: {$geometry: [0]}}"); + ASSERT_NOT_OK(gq->parseFrom(query)); + } } /** @@ -280,4 +321,89 @@ TEST(ExpressionGeoTest, GeoNearNotEquivalent) { gne2(makeGeoNearMatchExpression(query2)); ASSERT(!gne1->equivalent(gne2.get())); } + +TEST(ExpressionGeoTest, SerializeWithCRSIFSpecifiedWithChangedOptions) { + BSONObj query1 = fromjson( + "{$within: {$geometry: {type: 'Polygon'," + "coordinates: [[[0, 0], [3, 6], [6, 1], [0, 0]]]," + "crs: {" + "type: 'name'," + "properties: { name: 'urn:x-mongodb:crs:strictwinding:EPSG:4326' }" + "}}}}"); + std::unique_ptr ge1(makeGeoMatchExpression(query1)); + SerializationOptions opts; + opts.literalPolicy = LiteralSerializationPolicy::kToRepresentativeParseableValue; + auto serialized = ge1->getSerializedRightHandSide(opts); + ASSERT_BSONOBJ_EQ_AUTO( + R"({ + "$within": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 0, + 1 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ], + "crs": { + "type": "name", + "properties": { + "name": "urn:x-mongodb:crs:strictwinding:EPSG:4326" + } + } + } + } + })", + serialized); + serialized = ge1->getSerializedRightHandSide(opts); + ASSERT_BSONOBJ_EQ_AUTO( + R"({ + "$within": { + "$geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 0, + 0 + ], + [ + 0, + 1 + ], + [ + 1, + 1 + ], + [ + 0, + 0 + ] + ] + ], + "crs": { + "type": "name", + "properties": { + "name": "urn:x-mongodb:crs:strictwinding:EPSG:4326" + } + } + } + } + })", + serialized); +} } // namespace mongo diff --git a/src/mongo/db/matcher/expression_internal_bucket_geo_within.cpp b/src/mongo/db/matcher/expression_internal_bucket_geo_within.cpp index bb1edfde18b63..95c0de2ea6748 100644 --- a/src/mongo/db/matcher/expression_internal_bucket_geo_within.cpp +++ b/src/mongo/db/matcher/expression_internal_bucket_geo_within.cpp @@ -28,14 +28,22 @@ */ -#include #include #include #include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" -#include "mongo/db/bson/dotted_path_support.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/geo/geoparser.h" +#include "mongo/db/geo/shapes.h" #include "mongo/db/matcher/expression_internal_bucket_geo_within.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/db/timeseries/timeseries_constants.h" @@ -190,11 +198,7 @@ void InternalBucketGeoWithinMatchExpression::serialize(BSONObjBuilder* builder, // Serialize the geometry shape. BSONObjBuilder withinRegionBob( bob.subobjStart(InternalBucketGeoWithinMatchExpression::kWithinRegion)); - if (opts.replacementForLiteralArgs) { - bob.append(_geoContainer->getGeoElement().fieldName(), *opts.replacementForLiteralArgs); - } else { - withinRegionBob.append(_geoContainer->getGeoElement()); - } + opts.appendLiteral(&withinRegionBob, _geoContainer->getGeoElement()); withinRegionBob.doneFast(); // Serialize the field which is being searched over. bob.append(InternalBucketGeoWithinMatchExpression::kField, diff --git a/src/mongo/db/matcher/expression_internal_bucket_geo_within.h b/src/mongo/db/matcher/expression_internal_bucket_geo_within.h index b7e4af99eb831..d24b0f67cb96f 100644 --- a/src/mongo/db/matcher/expression_internal_bucket_geo_within.h +++ b/src/mongo/db/matcher/expression_internal_bucket_geo_within.h @@ -29,9 +29,28 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/field_ref.h" #include "mongo/db/geo/geometry_container.h" #include "mongo/db/geo/geoparser.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/matchable.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -70,6 +89,7 @@ class InternalBucketGeoWithinMatchExpression final : public MatchExpression { : MatchExpression(MatchExpression::INTERNAL_BUCKET_GEO_WITHIN, std::move(annotation)), _geoContainer(container), _indexField("data." + field), + _fieldRef(_indexField), _field(std::move(field)) {} void debugString(StringBuilder& debug, int indentationLevel) const final; @@ -127,8 +147,7 @@ class InternalBucketGeoWithinMatchExpression final : public MatchExpression { } const FieldRef* fieldRef() const final { - MONGO_UNREACHABLE_TASSERT(5837104); - return nullptr; + return &_fieldRef; } void acceptVisitor(MatchExpressionMutableVisitor* visitor) final { @@ -153,6 +172,7 @@ class InternalBucketGeoWithinMatchExpression final : public MatchExpression { std::shared_ptr _geoContainer; std::string _indexField; + FieldRef _fieldRef; std::string _field; }; diff --git a/src/mongo/db/matcher/expression_internal_bucket_geo_within_test.cpp b/src/mongo/db/matcher/expression_internal_bucket_geo_within_test.cpp index ad66bda471d9d..c842691675ed7 100644 --- a/src/mongo/db/matcher/expression_internal_bucket_geo_within_test.cpp +++ b/src/mongo/db/matcher/expression_internal_bucket_geo_within_test.cpp @@ -27,15 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/json.h" #include "mongo/db/matcher/expression_internal_bucket_geo_within.h" #include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_internal_expr_comparison.h b/src/mongo/db/matcher/expression_internal_expr_comparison.h index 336d8a1fd8bd8..8f08eddd2b777 100644 --- a/src/mongo/db/matcher/expression_internal_expr_comparison.h +++ b/src/mongo/db/matcher/expression_internal_expr_comparison.h @@ -29,7 +29,22 @@ #pragma once +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/path.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_internal_expr_comparison_test.cpp b/src/mongo/db/matcher/expression_internal_expr_comparison_test.cpp index 767ecc4df2a71..8b7034dc9b647 100644 --- a/src/mongo/db/matcher/expression_internal_expr_comparison_test.cpp +++ b/src/mongo/db/matcher/expression_internal_expr_comparison_test.cpp @@ -27,15 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/matcher/expression_internal_expr_comparison.h" -#include "mongo/db/matcher/matcher.h" -#include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/db/query/index_tag.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/expression_internal_expr_eq_test.cpp b/src/mongo/db/matcher/expression_internal_expr_eq_test.cpp index f4621a6cb2363..52b9f13ba67d4 100644 --- a/src/mongo/db/matcher/expression_internal_expr_eq_test.cpp +++ b/src/mongo/db/matcher/expression_internal_expr_eq_test.cpp @@ -27,16 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_internal_expr_comparison.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/matcher.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/query/index_tag.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/expression_leaf.cpp b/src/mongo/db/matcher/expression_leaf.cpp index faa1fde055579..b3f5550e7ad4c 100644 --- a/src/mongo/db/matcher/expression_leaf.cpp +++ b/src/mongo/db/matcher/expression_leaf.cpp @@ -27,22 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/matcher/expression_leaf.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include #include +#include +#include #include +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonelement_comparator.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/config.h" -#include "mongo/db/field_ref.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/matcher/expression_parser.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/util/builder.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/path.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/platform/decimal128.h" +#include "mongo/platform/overflow_arithmetic.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/util/errno_util.h" #include "mongo/util/pcre.h" #include "mongo/util/pcre_util.h" @@ -51,10 +60,11 @@ namespace mongo { +template ComparisonMatchExpressionBase::ComparisonMatchExpressionBase( MatchType type, boost::optional path, - Value rhs, + T&& rhs, ElementPath::LeafArrayBehavior leafArrBehavior, ElementPath::NonLeafArrayBehavior nonLeafArrBehavior, clonable_ptr annotation, @@ -66,6 +76,24 @@ ComparisonMatchExpressionBase::ComparisonMatchExpressionBase( invariant(_rhs.type() != BSONType::EOO); } +// Instantiate above constructor for 'Value&&' and 'const BSONElement&' types. +template ComparisonMatchExpressionBase::ComparisonMatchExpressionBase( + MatchType, + boost::optional, + Value&&, + ElementPath::LeafArrayBehavior, + ElementPath::NonLeafArrayBehavior, + clonable_ptr, + const CollatorInterface*); +template ComparisonMatchExpressionBase::ComparisonMatchExpressionBase( + MatchType, + boost::optional, + const BSONElement&, + ElementPath::LeafArrayBehavior, + ElementPath::NonLeafArrayBehavior, + clonable_ptr, + const CollatorInterface*); + bool ComparisonMatchExpressionBase::equivalent(const MatchExpression* other) const { if (other->matchType() != matchType()) return false; @@ -87,22 +115,20 @@ void ComparisonMatchExpressionBase::debugString(StringBuilder& debug, int indent _debugStringAttachTagInfo(&debug); } -BSONObj ComparisonMatchExpressionBase::getSerializedRightHandSide(SerializationOptions opts) const { - if (opts.replacementForLiteralArgs) { - return BSON(name() << *opts.replacementForLiteralArgs); - } else { - return BSON(name() << _rhs); - } +void ComparisonMatchExpressionBase::appendSerializedRightHandSide(BSONObjBuilder* bob, + SerializationOptions opts) const { + opts.appendLiteral(bob, name(), _rhs); } +template ComparisonMatchExpression::ComparisonMatchExpression(MatchType type, boost::optional path, - Value rhs, + T&& rhs, clonable_ptr annotation, const CollatorInterface* collator) : ComparisonMatchExpressionBase(type, path, - std::move(rhs), + std::forward(rhs), ElementPath::LeafArrayBehavior::kTraverse, ElementPath::NonLeafArrayBehavior::kTraverse, std::move(annotation), @@ -122,6 +148,18 @@ ComparisonMatchExpression::ComparisonMatchExpression(MatchType type, } } +// Instantiate above constructor for 'Value&&' and 'const BSONElement&' types. +template ComparisonMatchExpression::ComparisonMatchExpression(MatchType, + boost::optional, + Value&&, + clonable_ptr, + const CollatorInterface*); +template ComparisonMatchExpression::ComparisonMatchExpression(MatchType, + boost::optional, + const BSONElement&, + clonable_ptr, + const CollatorInterface*); + bool ComparisonMatchExpression::matchesSingleElement(const BSONElement& e, MatchDetails* details) const { if (e.type() != _rhs.type()) { @@ -276,15 +314,21 @@ void RegexMatchExpression::debugString(StringBuilder& debug, int indentationLeve _debugStringAttachTagInfo(&debug); } -BSONObj RegexMatchExpression::getSerializedRightHandSide(SerializationOptions opts) const { - BSONObjBuilder regexBuilder; - regexBuilder.append("$regex", opts.replacementForLiteralArgs.value_or(_regex)); +void RegexMatchExpression::appendSerializedRightHandSide(BSONObjBuilder* bob, + SerializationOptions opts) const { + // Sadly we cannot use the fast/short syntax to append this, we need to be careful to generate a + // valid regex, and the default string "?" is not valid. + if (opts.literalPolicy == LiteralSerializationPolicy::kToRepresentativeParseableValue) { + bob->append("$regex", "\\?"); + } else { + // May generate {$regex: "?string"} - invalid regex but we don't care since it's not + // parseable it's just saying "there was a string here." + opts.appendLiteral(bob, "$regex", _regex); + } if (!_flags.empty()) { - regexBuilder.append("$options", opts.replacementForLiteralArgs.value_or(_flags)); + opts.appendLiteral(bob, "$options", _flags); } - - return regexBuilder.obj(); } void RegexMatchExpression::serializeToBSONTypeRegex(BSONObjBuilder* out) const { @@ -352,12 +396,10 @@ void ModMatchExpression::debugString(StringBuilder& debug, int indentationLevel) _debugStringAttachTagInfo(&debug); } -BSONObj ModMatchExpression::getSerializedRightHandSide(SerializationOptions opts) const { - if (auto str = opts.replacementForLiteralArgs) { - return BSON("$mod" << *str); - } else { - return BSON("$mod" << BSON_ARRAY(_divisor << _remainder)); - } +void ModMatchExpression::appendSerializedRightHandSide(BSONObjBuilder* bob, + SerializationOptions opts) const { + bob->append("$mod", + BSON_ARRAY(opts.serializeLiteral(_divisor) << opts.serializeLiteral(_remainder))); } bool ModMatchExpression::equivalent(const MatchExpression* other) const { @@ -387,12 +429,9 @@ void ExistsMatchExpression::debugString(StringBuilder& debug, int indentationLev _debugStringAttachTagInfo(&debug); } -BSONObj ExistsMatchExpression::getSerializedRightHandSide(SerializationOptions opts) const { - if (opts.replacementForLiteralArgs) { - return BSON("$exists" << *opts.replacementForLiteralArgs); - } else { - return BSON("$exists" << true); - } +void ExistsMatchExpression::appendSerializedRightHandSide(BSONObjBuilder* bob, + SerializationOptions opts) const { + opts.appendLiteral(bob, "$exists", true); } bool ExistsMatchExpression::equivalent(const MatchExpression* other) const { @@ -471,14 +510,43 @@ void InMatchExpression::debugString(StringBuilder& debug, int indentationLevel) _debugStringAttachTagInfo(&debug); } -BSONObj InMatchExpression::getSerializedRightHandSide(SerializationOptions opts) const { - if (opts.replacementForLiteralArgs) { - // In this case, treat an '$in' with any number of arguments as equivalent. - return BSON("$in" << BSON_ARRAY(*opts.replacementForLiteralArgs)); +namespace { +/** + * Reduces the potentially large vector of elements to just the first of each "canonical" type. + * Different types of numbers are not considered distinct. + * + * For example, collapses [2, 4, NumberInt(3), "string", "another", 3, 5] into just [2, "string"]. + */ +std::vector justFirstOfEachType(std::vector elems) { + stdx::unordered_set seenTypes; + std::vector result; + for (auto&& elem : elems) { + bool inserted = seenTypes.insert(canonicalizeBSONType(elem.type())).second; + if (inserted) { + // A new type. + result.emplace_back(elem); + } + } + return result; +} +} // namespace + +void InMatchExpression::serializeToShape(BSONObjBuilder* bob, SerializationOptions opts) const { + std::vector firstOfEachType = justFirstOfEachType(_equalitySet); + if (hasRegex()) { + firstOfEachType.emplace_back(BSONRegEx()); + } + opts.appendLiteral(bob, "$in", std::move(firstOfEachType)); +} + +void InMatchExpression::appendSerializedRightHandSide(BSONObjBuilder* bob, + SerializationOptions opts) const { + if (opts.literalPolicy != LiteralSerializationPolicy::kUnchanged) { + serializeToShape(bob, opts); + return; } - BSONObjBuilder inBob; - BSONArrayBuilder arrBob(inBob.subarrayStart("$in")); + BSONArrayBuilder arrBob(bob->subarrayStart("$in")); for (auto&& _equality : _equalitySet) { arrBob.append(_equality); } @@ -488,7 +556,6 @@ BSONObj InMatchExpression::getSerializedRightHandSide(SerializationOptions opts) arrBob.append(regexBob.obj().firstElement()); } arrBob.doneFast(); - return inBob.obj(); } bool InMatchExpression::equivalent(const MatchExpression* other) const { @@ -826,7 +893,8 @@ void BitTestMatchExpression::debugString(StringBuilder& debug, int indentationLe _debugStringAttachTagInfo(&debug); } -BSONObj BitTestMatchExpression::getSerializedRightHandSide(SerializationOptions opts) const { +void BitTestMatchExpression::appendSerializedRightHandSide(BSONObjBuilder* bob, + SerializationOptions opts) const { std::string opString = ""; switch (matchType()) { @@ -846,17 +914,15 @@ BSONObj BitTestMatchExpression::getSerializedRightHandSide(SerializationOptions MONGO_UNREACHABLE; } - if (opts.replacementForLiteralArgs) { - return BSON(opString << *opts.replacementForLiteralArgs); - } - BSONArrayBuilder arrBob; for (auto bitPosition : _bitPositions) { arrBob.append(static_cast(bitPosition)); } arrBob.doneFast(); - - return BSON(opString << arrBob.arr()); + // Unfortunately this cannot be done without copying the array into the BSONObjBuilder, since + // `opts.appendLiteral` may choose to append this actual array, a representative empty array, or + // a debug string. + opts.appendLiteral(bob, opString, arrBob.arr()); } bool BitTestMatchExpression::equivalent(const MatchExpression* other) const { diff --git a/src/mongo/db/matcher/expression_leaf.h b/src/mongo/db/matcher/expression_leaf.h index 86e736379280e..9ce2be54feb98 100644 --- a/src/mongo/db/matcher/expression_leaf.h +++ b/src/mongo/db/matcher/expression_leaf.h @@ -29,17 +29,36 @@ #pragma once +#include #include +#include +#include +#include +#include #include - +#include +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelement_comparator.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_path.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/path.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/db/query/util/make_data_structure.h" #include "mongo/stdx/unordered_map.h" #include "mongo/util/assert_util.h" @@ -149,9 +168,10 @@ class ComparisonMatchExpressionBase : public LeafMatchExpression { } } + template ComparisonMatchExpressionBase(MatchType type, boost::optional path, - Value rhs, + T&& rhs, ElementPath::LeafArrayBehavior, ElementPath::NonLeafArrayBehavior, clonable_ptr annotation = nullptr, @@ -161,7 +181,8 @@ class ComparisonMatchExpressionBase : public LeafMatchExpression { virtual void debugString(StringBuilder& debug, int indentationLevel = 0) const; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + virtual void appendSerializedRightHandSide(BSONObjBuilder* bob, + SerializationOptions opts) const final; virtual bool equivalent(const MatchExpression* other) const; @@ -248,9 +269,10 @@ class ComparisonMatchExpression : public ComparisonMatchExpressionBase { return isComparisonMatchExpression(expr->matchType()); } + template ComparisonMatchExpression(MatchType type, boost::optional path, - Value rhs, + T&& rhs, clonable_ptr annotation = nullptr, const CollatorInterface* collator = nullptr); @@ -272,7 +294,9 @@ class EqualityMatchExpression final : public ComparisonMatchExpression { const BSONElement& rhs, clonable_ptr annotation = nullptr, const CollatorInterface* collator = nullptr) - : ComparisonMatchExpression(EQ, path, Value(rhs), std::move(annotation), collator) {} + : ComparisonMatchExpression(EQ, path, rhs, std::move(annotation), collator) { + invariant(!rhs.eoo()); + } StringData name() const final { return kName; @@ -280,7 +304,7 @@ class EqualityMatchExpression final : public ComparisonMatchExpression { std::unique_ptr clone() const final { std::unique_ptr e = - std::make_unique(path(), Value(getData()), _errorAnnotation); + std::make_unique(path(), getData(), _errorAnnotation); if (getTag()) { e->setTag(getTag()->clone()); } @@ -311,7 +335,9 @@ class LTEMatchExpression final : public ComparisonMatchExpression { LTEMatchExpression(boost::optional path, const BSONElement& rhs, clonable_ptr annotation = nullptr) - : ComparisonMatchExpression(LTE, path, Value(rhs), std::move(annotation)) {} + : ComparisonMatchExpression(LTE, path, rhs, std::move(annotation)) { + invariant(!rhs.eoo()); + } StringData name() const final { return kName; @@ -350,7 +376,9 @@ class LTMatchExpression final : public ComparisonMatchExpression { LTMatchExpression(boost::optional path, const BSONElement& rhs, clonable_ptr annotation = nullptr) - : ComparisonMatchExpression(LT, path, Value(rhs), std::move(annotation)) {} + : ComparisonMatchExpression(LT, path, rhs, std::move(annotation)) { + invariant(!rhs.eoo()); + } StringData name() const final { return kName; @@ -394,7 +422,9 @@ class GTMatchExpression final : public ComparisonMatchExpression { GTMatchExpression(boost::optional path, const BSONElement& rhs, clonable_ptr annotation = nullptr) - : ComparisonMatchExpression(GT, path, Value(rhs), std::move(annotation)) {} + : ComparisonMatchExpression(GT, path, rhs, std::move(annotation)) { + invariant(!rhs.eoo()); + } StringData name() const final { return kName; @@ -437,7 +467,9 @@ class GTEMatchExpression final : public ComparisonMatchExpression { GTEMatchExpression(boost::optional path, const BSONElement& rhs, clonable_ptr annotation = nullptr) - : ComparisonMatchExpression(GTE, path, Value(rhs), std::move(annotation)) {} + : ComparisonMatchExpression(GTE, path, rhs, std::move(annotation)) { + invariant(!rhs.eoo()); + } StringData name() const final { return kName; @@ -508,7 +540,7 @@ class RegexMatchExpression : public LeafMatchExpression { virtual void debugString(StringBuilder& debug, int indentationLevel) const; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; void serializeToBSONTypeRegex(BSONObjBuilder* out) const; @@ -590,7 +622,7 @@ class ModMatchExpression : public LeafMatchExpression { virtual void debugString(StringBuilder& debug, int indentationLevel) const; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; virtual bool equivalent(const MatchExpression* other) const; @@ -657,7 +689,7 @@ class ExistsMatchExpression : public LeafMatchExpression { virtual void debugString(StringBuilder& debug, int indentationLevel) const; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; virtual bool equivalent(const MatchExpression* other) const; @@ -691,7 +723,7 @@ class InMatchExpression : public LeafMatchExpression { virtual void debugString(StringBuilder& debug, int indentationLevel) const; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; virtual bool equivalent(const MatchExpression* other) const; @@ -763,6 +795,12 @@ class InMatchExpression : public LeafMatchExpression { private: ExpressionOptimizerFunc getOptimizer() const final; + /** + * A helper to serialize to something like {$in: "?array"} or similar, depending on + * 'opts' and whether we have a mixed-type $in or not. + */ + void serializeToShape(BSONObjBuilder* bob, SerializationOptions opts) const; + // Whether or not '_equalities' has a jstNULL element in it. bool _hasNull = false; @@ -834,7 +872,7 @@ class BitTestMatchExpression : public LeafMatchExpression { virtual void debugString(StringBuilder& debug, int indentationLevel) const; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; virtual bool equivalent(const MatchExpression* other) const; @@ -914,7 +952,8 @@ class BitsAllSetMatchExpression : public BitTestMatchExpression { BitsAllSetMatchExpression(boost::optional path, std::vector bitPositions, clonable_ptr annotation = nullptr) - : BitTestMatchExpression(BITS_ALL_SET, path, bitPositions, std::move(annotation)) {} + : BitTestMatchExpression( + BITS_ALL_SET, path, std::move(bitPositions), std::move(annotation)) {} BitsAllSetMatchExpression(boost::optional path, uint64_t bitMask, @@ -958,7 +997,8 @@ class BitsAllClearMatchExpression : public BitTestMatchExpression { BitsAllClearMatchExpression(boost::optional path, std::vector bitPositions, clonable_ptr annotation = nullptr) - : BitTestMatchExpression(BITS_ALL_CLEAR, path, bitPositions, std::move(annotation)) {} + : BitTestMatchExpression( + BITS_ALL_CLEAR, path, std::move(bitPositions), std::move(annotation)) {} BitsAllClearMatchExpression(boost::optional path, uint64_t bitMask, @@ -1002,7 +1042,8 @@ class BitsAnySetMatchExpression : public BitTestMatchExpression { BitsAnySetMatchExpression(boost::optional path, std::vector bitPositions, clonable_ptr annotation = nullptr) - : BitTestMatchExpression(BITS_ANY_SET, path, bitPositions, std::move(annotation)) {} + : BitTestMatchExpression( + BITS_ANY_SET, path, std::move(bitPositions), std::move(annotation)) {} BitsAnySetMatchExpression(boost::optional path, uint64_t bitMask, @@ -1046,7 +1087,8 @@ class BitsAnyClearMatchExpression : public BitTestMatchExpression { BitsAnyClearMatchExpression(boost::optional path, std::vector bitPositions, clonable_ptr annotation = nullptr) - : BitTestMatchExpression(BITS_ANY_CLEAR, path, bitPositions, std::move(annotation)) {} + : BitTestMatchExpression( + BITS_ANY_CLEAR, path, std::move(bitPositions), std::move(annotation)) {} BitsAnyClearMatchExpression(boost::optional path, uint64_t bitMask, diff --git a/src/mongo/db/matcher/expression_leaf_test.cpp b/src/mongo/db/matcher/expression_leaf_test.cpp index e99605bdc2283..e0f21606a6fbd 100644 --- a/src/mongo/db/matcher/expression_leaf_test.cpp +++ b/src/mongo/db/matcher/expression_leaf_test.cpp @@ -29,21 +29,24 @@ /** Unit tests for MatchMatchExpression operator implementations in match_operators.{h,cpp}. */ +#include +// IWYU pragma: no_include "ext/type_traits.h" #include -#include "mongo/unittest/unittest.h" +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" -#include "mongo/db/matcher/expression.h" +#include "mongo/bson/json.h" #include "mongo/db/matcher/expression_leaf.h" -#include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/matcher/expression_tree.h" -#include "mongo/db/matcher/expression_visitor.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -171,9 +174,13 @@ TEST(EqOp, MatchesElement) { ASSERT(eq.equivalent(&eq)); } -DEATH_TEST_REGEX(EqOp, InvalidEooOperand, "Invariant failure.*_rhs") { - BSONObj operand; - EqualityMatchExpression eq(""_sd, operand.firstElement()); +DEATH_TEST_REGEX(EqOp, InvalidEooOperand, "failure.*eoo") { + try { + BSONObj operand; + EqualityMatchExpression eq(""_sd, operand.firstElement()); + } catch (...) { + invariant(false, "Threw when trying to construct obj from eoo element"); + } } TEST(EqOp, MatchesScalar) { @@ -342,9 +349,13 @@ TEST(LtOp, MatchesElement) { ASSERT(!lt.matchesSingleElement(notMatchWrongType.firstElement())); } -DEATH_TEST_REGEX(LtOp, InvalidEooOperand, "Invariant failure.*_rhs") { - BSONObj operand; - LTMatchExpression lt(""_sd, operand.firstElement()); +DEATH_TEST_REGEX(LtOp, InvalidEooOperand, "failure.*eoo") { + try { + BSONObj operand; + LTMatchExpression lt(""_sd, operand.firstElement()); + } catch (...) { + invariant(false, "Threw when trying to construct obj from eoo element"); + } } TEST(LtOp, MatchesScalar) { @@ -463,9 +474,13 @@ TEST(LteOp, MatchesElement) { ASSERT(!lte.matchesSingleElement(notMatchWrongType.firstElement())); } -DEATH_TEST_REGEX(LteOp, InvalidEooOperand, "Invariant failure.*_rhs") { - BSONObj operand; - LTEMatchExpression lte(""_sd, operand.firstElement()); +DEATH_TEST_REGEX(LteOp, InvalidEooOperand, "failure.*eoo") { + try { + BSONObj operand; + LTEMatchExpression lte(""_sd, operand.firstElement()); + } catch (...) { + invariant(false, "Threw when trying to construct obj from eoo element"); + } } TEST(LteOp, MatchesScalar) { @@ -564,9 +579,13 @@ TEST(LteOp, ElemMatchKey) { ASSERT_EQUALS("1", details.elemMatchKey()); } -DEATH_TEST_REGEX(GtOp, InvalidEooOperand, "Invariant failure.*_rhs") { - BSONObj operand; - GTMatchExpression gt(""_sd, operand.firstElement()); +DEATH_TEST_REGEX(GtOp, InvalidEooOperand, "failure.*eoo") { + try { + BSONObj operand; + GTMatchExpression gt(""_sd, operand.firstElement()); + } catch (...) { + invariant(false, "Threw when trying to construct obj from eoo element"); + } } TEST(GtOp, MatchesScalar) { @@ -680,9 +699,13 @@ TEST(GteOp, MatchesElement) { ASSERT(!gte.matchesSingleElement(notMatchWrongType.firstElement())); } -DEATH_TEST_REGEX(GteOp, InvalidEooOperand, "Invariant failure.*_rhs") { - BSONObj operand; - GTEMatchExpression gte(""_sd, operand.firstElement()); +DEATH_TEST_REGEX(GteOp, InvalidEooOperand, "failure.*eoo") { + try { + BSONObj operand; + GTEMatchExpression gte(""_sd, operand.firstElement()); + } catch (...) { + invariant(false, "Threw when trying to construct obj from eoo element"); + } } TEST(GteOp, MatchesScalar) { diff --git a/src/mongo/db/matcher/expression_optimize_test.cpp b/src/mongo/db/matcher/expression_optimize_test.cpp index 1cc91d127e4d3..c8807aa4cbe30 100644 --- a/src/mongo/db/matcher/expression_optimize_test.cpp +++ b/src/mongo/db/matcher/expression_optimize_test.cpp @@ -27,16 +27,41 @@ * it in the license file. */ -#include "mongo/db/pipeline/expression.h" - +#include +#include +#include #include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_always_boolean.h" +#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/index_tag.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/parsed_find_command.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/tailable_mode_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -76,7 +101,8 @@ Status isValid(const std::string& queryStr, const FindCommandRequest& findComman BSONObj queryObj = fromjson(queryStr); std::unique_ptr me(parseMatchExpression(queryObj)); me = MatchExpression::optimize(std::move(me)); - if (auto status = CanonicalQuery::isValid(me.get(), findCommand).getStatus(); !status.isOK()) { + if (auto status = parsed_find_command::isValid(me.get(), findCommand).getStatus(); + !status.isOK()) { return status; } return CanonicalQuery::isValidNormalized(me.get()); @@ -365,7 +391,6 @@ TEST(ExpressionOptimizeTest, AndWithSingleChildAlwaysTrueOptimizesToEmptyAnd) { BSONObj obj = fromjson("{$and: [{$alwaysTrue: 1}]}"); std::unique_ptr matchExpression(parseMatchExpression(obj)); auto optimizedMatchExpression = MatchExpression::optimize(std::move(matchExpression)); - // TODO SERVER-34759 We want this to optimize to an AlwaysTrueMatchExpression. ASSERT_TRUE(dynamic_cast(optimizedMatchExpression.get())); ASSERT_BSONOBJ_EQ(optimizedMatchExpression->serialize(), fromjson("{}")); } @@ -374,7 +399,6 @@ TEST(ExpressionOptimizeTest, AndWithEachChildAlwaysTrueOptimizesToEmptyAnd) { BSONObj obj = fromjson("{$and: [{$alwaysTrue: 1}, {$alwaysTrue: 1}]}"); std::unique_ptr matchExpression(parseMatchExpression(obj)); auto optimizedMatchExpression = MatchExpression::optimize(std::move(matchExpression)); - // TODO SERVER-34759 We want this to optimize to an AlwaysTrueMatchExpression. ASSERT_TRUE(dynamic_cast(optimizedMatchExpression.get())); ASSERT_BSONOBJ_EQ(optimizedMatchExpression->serialize(), fromjson("{}")); } @@ -390,7 +414,7 @@ TEST(ExpressionOptimizeTest, OrWithAlwaysTrueOptimizesToAlwaysTrue) { BSONObj obj = fromjson("{$or: [{a: 1}, {$alwaysTrue: 1}]}"); std::unique_ptr matchExpression(parseMatchExpression(obj)); auto optimizedMatchExpression = MatchExpression::optimize(std::move(matchExpression)); - ASSERT_BSONOBJ_EQ(optimizedMatchExpression->serialize(), fromjson("{$alwaysTrue: 1}")); + ASSERT_BSONOBJ_EQ(optimizedMatchExpression->serialize(), fromjson("{}")); } TEST(ExpressionOptimizeTest, OrRemovesAlwaysFalseChildren) { @@ -429,7 +453,7 @@ TEST(ExpressionOptimizeTest, NestedOrWithAlwaysTrueOptimizesToAlwaysTrue) { BSONObj obj = fromjson("{$or: [{$or: [{$alwaysTrue: 1}, {a: 1}]}, {b: 1}]}"); std::unique_ptr matchExpression(parseMatchExpression(obj)); auto optimizedMatchExpression = MatchExpression::optimize(std::move(matchExpression)); - ASSERT_BSONOBJ_EQ(optimizedMatchExpression->serialize(), fromjson("{$alwaysTrue: 1}")); + ASSERT_BSONOBJ_EQ(optimizedMatchExpression->serialize(), fromjson("{}")); } TEST(ExpressionOptimizeTest, OrRewrittenToIn) { diff --git a/src/mongo/db/matcher/expression_parameterization.cpp b/src/mongo/db/matcher/expression_parameterization.cpp index 72781001a6e17..05a9e886095eb 100644 --- a/src/mongo/db/matcher/expression_parameterization.cpp +++ b/src/mongo/db/matcher/expression_parameterization.cpp @@ -31,6 +31,11 @@ #include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" + namespace mongo { void MatchExpressionParameterizationVisitor::visitBitTestExpression(BitTestMatchExpression* expr) { expr->setBitPositionsParamId(_context->nextInputParamId(expr)); diff --git a/src/mongo/db/matcher/expression_parameterization.h b/src/mongo/db/matcher/expression_parameterization.h index 79a12362c9cd4..bff90b6155888 100644 --- a/src/mongo/db/matcher/expression_parameterization.h +++ b/src/mongo/db/matcher/expression_parameterization.h @@ -29,14 +29,21 @@ #pragma once +#include +#include +#include +#include #include +#include +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/expression_array.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_type.h" #include "mongo/db/matcher/expression_visitor.h" #include "mongo/db/matcher/expression_where.h" +#include "mongo/util/assert_util_core.h" namespace mongo { /** diff --git a/src/mongo/db/matcher/expression_parameterization_test.cpp b/src/mongo/db/matcher/expression_parameterization_test.cpp index 8f5a64ccddc24..a6c4b1689da64 100644 --- a/src/mongo/db/matcher/expression_parameterization_test.cpp +++ b/src/mongo/db/matcher/expression_parameterization_test.cpp @@ -28,10 +28,37 @@ */ #include "mongo/db/matcher/expression_parameterization.h" -#include "mongo/db/operation_context.h" + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/matcher_type_set.h" +#include "mongo/db/matcher/schema/expression_internal_schema_allowed_properties.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/query/query_planner_params.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/tree_walker.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/expression_parser.cpp b/src/mongo/db/matcher/expression_parser.cpp index cca4c984f79e5..4868881d09a7f 100644 --- a/src/mongo/db/matcher/expression_parser.cpp +++ b/src/mongo/db/matcher/expression_parser.cpp @@ -27,17 +27,42 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/matcher/expression_parser.h" - +#include +#include +#include +#include +#include +#include #include - -#include "mongo/base/init.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_depth.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/geo/geometry_container.h" #include "mongo/db/matcher/doc_validation_util.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/expression_array.h" @@ -47,9 +72,11 @@ #include "mongo/db/matcher/expression_internal_eq_hashed_key.h" #include "mongo/db/matcher/expression_internal_expr_comparison.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/expression_tree.h" #include "mongo/db/matcher/expression_type.h" #include "mongo/db/matcher/expression_with_placeholder.h" +#include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h" #include "mongo/db/matcher/schema/expression_internal_schema_allowed_properties.h" #include "mongo/db/matcher/schema/expression_internal_schema_cond.h" @@ -67,10 +94,12 @@ #include "mongo/db/matcher/schema/expression_internal_schema_unique_items.h" #include "mongo/db/matcher/schema/expression_internal_schema_xor.h" #include "mongo/db/matcher/schema/json_schema_parser.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/query/dbref.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/stats/counters.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #include "mongo/util/string_map.h" @@ -190,8 +219,8 @@ StatusWithMatchExpression parseComparison( // (e.g. {a: {$gt: /b/}} is illegal). if (MatchExpression::EQ != cmp->matchType() && BSONType::RegEx == e.type()) { return {ErrorCodes::BadValue, - str::stream() << "Can't have RegEx as arg to predicate over field '" << name - << "'."}; + str::stream() << "Can't have RegEx as arg to non-equality predicate over field '" + << name << "'."}; } cmp->setCollator(expCtx->getCollator()); diff --git a/src/mongo/db/matcher/expression_parser.h b/src/mongo/db/matcher/expression_parser.h index 038e9eb12ec63..4707822e5a588 100644 --- a/src/mongo/db/matcher/expression_parser.h +++ b/src/mongo/db/matcher/expression_parser.h @@ -29,10 +29,17 @@ #pragma once +#include +#include +#include #include +#include +#include #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_tree.h" diff --git a/src/mongo/db/matcher/expression_parser_array_test.cpp b/src/mongo/db/matcher/expression_parser_array_test.cpp index 243bed553f0c5..f56f09b9b5da7 100644 --- a/src/mongo/db/matcher/expression_parser_array_test.cpp +++ b/src/mongo/db/matcher/expression_parser_array_test.cpp @@ -28,17 +28,30 @@ */ #include - -#include "mongo/unittest/unittest.h" - -#include "mongo/db/matcher/expression_parser.h" - -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include +#include +#include + +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" #include "mongo/db/matcher/expression.h" -#include "mongo/db/matcher/expression_array.h" +#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_parser_geo_test.cpp b/src/mongo/db/matcher/expression_parser_geo_test.cpp index dcd3ba6e9363c..05d31b9d00d8e 100644 --- a/src/mongo/db/matcher/expression_parser_geo_test.cpp +++ b/src/mongo/db/matcher/expression_parser_geo_test.cpp @@ -27,16 +27,22 @@ * it in the license file. */ -#include "mongo/unittest/unittest.h" +#include -#include "mongo/db/matcher/expression_parser.h" +#include -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_geo.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_parser_leaf_test.cpp b/src/mongo/db/matcher/expression_parser_leaf_test.cpp index fb96325ada91f..25050997aacdd 100644 --- a/src/mongo/db/matcher/expression_parser_leaf_test.cpp +++ b/src/mongo/db/matcher/expression_parser_leaf_test.cpp @@ -27,19 +27,40 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/matcher/expression_type.h" +#include "mongo/db/matcher/matcher_type_set.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/platform/decimal128.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_parser_test.cpp b/src/mongo/db/matcher/expression_parser_test.cpp index 4e2a02ab6d923..6ffa4b79e9ae9 100644 --- a/src/mongo/db/matcher/expression_parser_test.cpp +++ b/src/mongo/db/matcher/expression_parser_test.cpp @@ -27,19 +27,36 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/unittest/unittest.h" - -#include "mongo/db/matcher/expression_parser.h" - -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" +#include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_always_boolean.h" -#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/expression_type.h" #include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_parser_tree_test.cpp b/src/mongo/db/matcher/expression_parser_tree_test.cpp index 9aa066b7cca6c..b2691b2b14ec4 100644 --- a/src/mongo/db/matcher/expression_parser_tree_test.cpp +++ b/src/mongo/db/matcher/expression_parser_tree_test.cpp @@ -27,15 +27,21 @@ * it in the license file. */ -#include "mongo/unittest/unittest.h" +#include -#include "mongo/db/matcher/expression_parser.h" +#include -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/expression.h" -#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_path.h b/src/mongo/db/matcher/expression_path.h index dcab87590f603..5eccd1912cbac 100644 --- a/src/mongo/db/matcher/expression_path.h +++ b/src/mongo/db/matcher/expression_path.h @@ -105,20 +105,52 @@ class PathMatchExpression : public MatchExpression { * element) and the path components that should replace the renamed prefix (as the second * element). * - * Returns whether there is any attempted but failed to rename. This case can happen when any - * renamed path component is part of sub-fields. For example, expr = {x: {$eq: {y: 3}}} and - * renames = {{"x.y", "a.b"}}. We should be able to rename 'x' and 'y' to 'a' and 'b' - * respectively but due to the current limitation of the algorithm, we cannot rename such match - * expressions. + * Returns whether renames will always succeed if any rename is applicable. See + * wouldRenameSucceed() for more details. * * TODO SERVER-74298 As soon as we implement SERVER-74298, the return value might not be * necessary any more. */ - bool applyRename(const StringMap& renameList) { - if (!_elementPath) { + [[nodiscard]] bool applyRename(const StringMap& renameList) { + if (!_elementPath || renameList.size() == 0) { + return true; + } + + if (auto&& [isRenamable, optRewrittenPath] = wouldRenameSucceed(renameList); !isRenamable) { return false; + } else if (optRewrittenPath) { + setPath(*optRewrittenPath); } + return true; + } + + /** + * Returns a pair of bool and boost::optional. + * + * - The bool indicates whether renames will always succeed if any rename is applicable. No + * applicable renames is considered as a successful rename and returns true with the second + * element of the pair is boost::none. This function can return false when a renamed path + * component descends into an $elemMatch or an object literal. For examples, + * + * expr = {x: {$eq: {y: 3}}} and renames = {{"x.y", "a.b"}}. We should be able to rename 'x' + * and 'y' to 'a' and 'b' respectively but due to the current limitation of the algorithm, we + * cannot rename such match expressions. + * + * Another similar example is expr = {x: {$elemMatch: {$eq: {y: 3}}}} and renames = {{"x.y", + * "a.b"}}. + + * - The boost::optional is the rewritten path iff one rename is applicable. The + * rewritten path is the path after applying the only applicable rename in 'renameList'. If no + * rename is applicable, the rewritten path is boost::none. + * + * TODO SERVER-74298 As soon as we implement SERVER-74298, this separate function may not be + * necessary any more and can be combined into applyRenames(). + */ + std::pair> wouldRenameSucceed( + const StringMap& renameList) const { + invariant(_elementPath); + size_t renamesFound = 0u; std::string rewrittenPath; for (const auto& rename : renameList) { @@ -141,9 +173,14 @@ class PathMatchExpression : public MatchExpression { ++renamesFound; } else if (pathFieldRef.isPrefixOf(prefixToRename)) { - // TODO SERVER-74298 Implement renaming by each path component instead of - // incrementing 'attemptedButFailedRenames'. - return true; + // TODO SERVER-74298 Implement renaming by each path component instead of returning + // the pair of 'false' and boost::none. We can traverse subexpressions with the + // remaining path suffix of 'prefixToRename' to see if we can rename each path + // component. Any subexpression would succeed with 'rewrittenPath' then this path + // component can be renamed. For example, assuming that 'pathFieldRef' == "a.b" and + // 'prefixToRename' == "a.b.c", we can recurse down to the subexpression with path + // "c" to see if we can rename it. If we can, we can rename this path too. + return {false, boost::none}; } } @@ -152,10 +189,10 @@ class PathMatchExpression : public MatchExpression { if (renamesFound == 1u) { // There is an applicable rename. Modify the path of this expression to use the new // name. - setPath(rewrittenPath); + return {true, rewrittenPath}; } - return false; + return {true, boost::none}; } void serialize(BSONObjBuilder* out, SerializationOptions opts) const override { @@ -168,17 +205,24 @@ class PathMatchExpression : public MatchExpression { } /** - * Returns a BSONObj that represents the right-hand-side of a PathMatchExpression. Used for + * Constructs a BSONObj that represents the right-hand-side of a PathMatchExpression. Used for * serialization of PathMatchExpression in cases where we do not want to serialize the path in * line with the expression. For example {x: {$not: {$eq: 1}}}, where $eq is the * PathMatchExpression. * * Serialization options should be respected for any descendent expressions. Eg, if the - * 'replacementForLiteralArgs' option is set, then any literal argument (like the number 1 in - * the example above), should be replaced with this string. 'literal' here is in contrast to - * another expression, if that is possible syntactically. + * 'literalPolicy' option is 'kToDebugTypeString', then any literal argument (like the number 1 + * in the example above), should be "shapified" (e.g. "?number"). 'literal' here is in contrast + * to another expression, if that is possible syntactically. */ - virtual BSONObj getSerializedRightHandSide(SerializationOptions opts = {}) const = 0; + virtual void appendSerializedRightHandSide(BSONObjBuilder* bob, + SerializationOptions opts = {}) const = 0; + + BSONObj getSerializedRightHandSide(SerializationOptions opts = {}) const { + BSONObjBuilder bob; + appendSerializedRightHandSide(&bob, opts); + return bob.obj(); + } private: // ElementPath holds a FieldRef, which owns the underlying path string. diff --git a/src/mongo/db/matcher/expression_serialization_test.cpp b/src/mongo/db/matcher/expression_serialization_test.cpp index d0bc980175300..3562422ddb8a7 100644 --- a/src/mongo/db/matcher/expression_serialization_test.cpp +++ b/src/mongo/db/matcher/expression_serialization_test.cpp @@ -29,14 +29,39 @@ // Unit tests for MatchExpression::serialize serialization. -#include "mongo/platform/basic.h" - -#include "mongo/db/json.h" +#include +#include +#include +#include +#include +#include +#include +#include + +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_always_boolean.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/expression_type.h" #include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/matcher/matcher.h" +#include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h" #include "mongo/db/matcher/schema/expression_internal_schema_cond.h" #include "mongo/db/matcher/schema/expression_internal_schema_eq.h" @@ -47,8 +72,14 @@ #include "mongo/db/matcher/schema/expression_internal_schema_min_items.h" #include "mongo/db/matcher/schema/expression_internal_schema_min_length.h" #include "mongo/db/matcher/schema/expression_internal_schema_min_properties.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -362,7 +393,6 @@ TEST(SerializeBasic, ExpressionElemMatchValueWithTripleNotSerializesCorrectly) { ASSERT_EQ(original.matches(obj), reserialized.matches(obj)); } - TEST(SerializeBasic, ExpressionSizeSerializesCorrectly) { boost::intrusive_ptr expCtx(new ExpressionContextForTest()); Matcher original(fromjson("{x: {$size: 2}}"), @@ -1100,7 +1130,7 @@ TEST(SerializeBasic, ExpressionNotWithDirectPathExpSerializesCorrectly) { // direct path expression child, instead creating a NOT -> AND -> path expression. This test // manually constructs such an expression in case it ever turns up, since that should still be // able to serialize. - auto originalBSON = fromjson("{a: {$not: {$eq: 2}}}}"); + auto originalBSON = fromjson("{a: {$not: {$eq: 2}}}"); auto equalityRHSElem = originalBSON["a"]["$not"]["$eq"]; auto equalityExpression = std::make_unique("a"_sd, equalityRHSElem); @@ -1686,7 +1716,7 @@ TEST(SerializeInternalSchema, ExpressionInternalSchemaMaxLengthSerializesCorrect TEST(SerializeInternalSchema, ExpressionInternalSchemaCondSerializesCorrectly) { boost::intrusive_ptr expCtx(new ExpressionContextForTest()); - Matcher original(fromjson("{$_internalSchemaCond: [{a: 1}, {b: 2}, {c: 3}]}}"), + Matcher original(fromjson("{$_internalSchemaCond: [{a: 1}, {b: 2}, {c: 3}]}"), expCtx, ExtensionsCallbackNoop(), MatchExpressionParser::kAllowAllSpecialFeatures); @@ -1697,7 +1727,7 @@ TEST(SerializeInternalSchema, ExpressionInternalSchemaCondSerializesCorrectly) { BSONObjBuilder builder; ASSERT_BSONOBJ_EQ( *reserialized.getQuery(), - fromjson("{$_internalSchemaCond: [{a: {$eq: 1}}, {b: {$eq: 2}}, {c: {$eq: 3}}]}}")); + fromjson("{$_internalSchemaCond: [{a: {$eq: 1}}, {b: {$eq: 2}}, {c: {$eq: 3}}]}")); ASSERT_BSONOBJ_EQ(*reserialized.getQuery(), serialize(reserialized.getMatchExpression())); } @@ -1855,8 +1885,10 @@ TEST(SerializeInternalBinDataSubType, ExpressionBinDataSubTypeSerializesCorrectl ASSERT_TRUE(original.matches(obj)); } -std::string redactFieldNameForTest(StringData s) { - return str::stream() << "HASH(" << s << ")"; +std::string applyHmacForTest(StringData s) { + // Avoid ending in a parenthesis since the results will occur in a raw string where the )" + // sequence will accidentally terminate the string. + return str::stream() << "HASH<" << s << ">"; } TEST(SerializeInternalSchema, AllowedPropertiesRedactsCorrectly) { @@ -1868,14 +1900,23 @@ TEST(SerializeInternalSchema, AllowedPropertiesRedactsCorrectly) { ASSERT_OK(objMatch.getStatus()); SerializationOptions opts; - opts.redactIdentifiers = true; - opts.identifierRedactionPolicy = redactFieldNameForTest; - opts.replacementForLiteralArgs = "?"; - - ASSERT_BSONOBJ_EQ( - fromjson( - "{ $_internalSchemaAllowedProperties: { properties: \"?\", namePlaceholder: \"?\", " - "patternProperties: [], otherwise: { \"HASH(i)\": { $eq: \"?\" } } } }"), + opts.transformIdentifiers = true; + opts.transformIdentifiersCallback = applyHmacForTest; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$_internalSchemaAllowedProperties": { + "properties": "?array", + "namePlaceholder": "?string", + "patternProperties": [], + "otherwise": { + "HASH": { + "$eq": "?number" + } + } + } + })", objMatch.getValue()->serialize(opts)); } @@ -1904,9 +1945,9 @@ std::unique_ptr createCondMatchExpression(BSO TEST(SerializeInternalSchema, CondMatchRedactsCorrectly) { SerializationOptions opts; - opts.redactIdentifiers = true; - opts.replacementForLiteralArgs = "?"; - opts.identifierRedactionPolicy = redactFieldNameForTest; + opts.transformIdentifiers = true; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + opts.transformIdentifiersCallback = applyHmacForTest; auto conditionQuery = BSON("age" << BSON("$lt" << 18)); auto thenQuery = BSON("job" << "student"); @@ -1915,25 +1956,38 @@ TEST(SerializeInternalSchema, CondMatchRedactsCorrectly) { auto cond = createCondMatchExpression(conditionQuery, thenQuery, elseQuery); BSONObjBuilder bob; cond->serialize(&bob, opts); - auto expectedResult = - BSON("$_internalSchemaCond" << BSON_ARRAY(BSON("HASH(age)" << BSON("$lt" - << "?")) - << BSON("HASH(job)" << BSON("$eq" - << "?")) - << BSON("HASH(job)" << BSON("$eq" - << "?")))); - ASSERT_BSONOBJ_EQ(expectedResult, bob.done()); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$_internalSchemaCond": [ + { + "HASH": { + "$lt": "?number" + } + }, + { + "HASH": { + "$eq": "?string" + } + }, + { + "HASH": { + "$eq": "?string" + } + } + ] + })", + bob.done()); } TEST(SerializeInternalSchema, FmodMatchRedactsCorrectly) { InternalSchemaFmodMatchExpression m("a"_sd, Decimal128(1.7), Decimal128(2)); SerializationOptions opts; - opts.replacementForLiteralArgs = "?"; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; BSONObjBuilder bob; m.serialize(&bob, opts); - ASSERT_BSONOBJ_EQ(BSON("a" << BSON("$_internalSchemaFmod" << BSON_ARRAY("?" - << "?"))), - bob.done()); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"a":{"$_internalSchemaFmod":["?number","?number"]}})", + bob.done()); } TEST(SerializeInternalSchema, MatchArrayIndexRedactsCorrectly) { @@ -1946,81 +2000,87 @@ TEST(SerializeInternalSchema, MatchArrayIndexRedactsCorrectly) { BSONObjBuilder bob; SerializationOptions opts; - opts.redactIdentifiers = true; - opts.identifierRedactionPolicy = redactFieldNameForTest; - opts.replacementForLiteralArgs = "?"; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + opts.transformIdentifiers = true; + opts.transformIdentifiersCallback = applyHmacForTest; objMatch.getValue()->serialize(&bob, opts); - - ASSERT_BSONOBJ_EQ(bob.done(), - BSON("HASH(foo)" << BSON("$_internalSchemaMatchArrayIndex" - << BSON("index" - << "?" - << "namePlaceholder" - << "HASH(i)" - << "expression" - << BSON("HASH(i)" << BSON("$type" - << "?")))))); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "HASH": { + "$_internalSchemaMatchArrayIndex": { + "index": "?number", + "namePlaceholder": "HASH", + "expression": { + "HASH": { + "$type": "?array" + } + } + } + } + })", + bob.done()); } TEST(SerializeInternalSchema, MaxItemsRedactsCorrectly) { InternalSchemaMaxItemsMatchExpression maxItems("a.b"_sd, 2); SerializationOptions opts; - opts.redactIdentifiers = true; - opts.replacementForLiteralArgs = "?"; - opts.identifierRedactionPolicy = redactFieldNameForTest; - - ASSERT_BSONOBJ_EQ(maxItems.getSerializedRightHandSide(opts), - BSON("$_internalSchemaMaxItems" - << "?")); + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + opts.transformIdentifiers = true; + opts.transformIdentifiersCallback = applyHmacForTest; + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$_internalSchemaMaxItems":"?number"})", + maxItems.getSerializedRightHandSide(opts)); } TEST(SerializeInternalSchema, MaxLengthRedactsCorrectly) { InternalSchemaMaxLengthMatchExpression maxLength("a"_sd, 2); SerializationOptions opts; - opts.replacementForLiteralArgs = "?"; - ASSERT_BSONOBJ_EQ(maxLength.getSerializedRightHandSide(opts), - BSON("$_internalSchemaMaxLength" - << "?")); + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + opts.transformIdentifiers = true; + opts.transformIdentifiersCallback = applyHmacForTest; + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$_internalSchemaMaxLength":"?number"})", + maxLength.getSerializedRightHandSide(opts)); } TEST(SerializeInternalSchema, MinItemsRedactsCorrectly) { InternalSchemaMinItemsMatchExpression minItems("a.b"_sd, 2); SerializationOptions opts; - opts.redactIdentifiers = true; - opts.replacementForLiteralArgs = "?"; - opts.identifierRedactionPolicy = redactFieldNameForTest; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + opts.transformIdentifiers = true; + opts.transformIdentifiersCallback = applyHmacForTest; - ASSERT_BSONOBJ_EQ(minItems.getSerializedRightHandSide(opts), - BSON("$_internalSchemaMinItems" - << "?")); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$_internalSchemaMinItems":"?number"})", + minItems.getSerializedRightHandSide(opts)); } TEST(SerializeInternalSchema, MinLengthRedactsCorrectly) { InternalSchemaMinLengthMatchExpression minLength("a"_sd, 2); SerializationOptions opts; - opts.replacementForLiteralArgs = "?"; - ASSERT_BSONOBJ_EQ(minLength.getSerializedRightHandSide(opts), - BSON("$_internalSchemaMinLength" - << "?")); + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$_internalSchemaMinLength":"?number"})", + minLength.getSerializedRightHandSide(opts)); } TEST(SerializeInternalSchema, MinPropertiesRedactsCorrectly) { InternalSchemaMinPropertiesMatchExpression minProperties(5); SerializationOptions opts; - opts.replacementForLiteralArgs = "?"; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; BSONObjBuilder bob; minProperties.serialize(&bob, opts); - ASSERT_BSONOBJ_EQ(bob.done(), - BSON("$_internalSchemaMinProperties" - << "?")); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$_internalSchemaMinProperties":"?number"})", + bob.done()); } TEST(SerializeInternalSchema, ObjectMatchRedactsCorrectly) { SerializationOptions opts; - opts.redactIdentifiers = true; - opts.identifierRedactionPolicy = redactFieldNameForTest; - opts.replacementForLiteralArgs = "?"; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + opts.transformIdentifiers = true; + opts.transformIdentifiersCallback = applyHmacForTest; auto query = fromjson( " {a: {$_internalSchemaObjectMatch: {" " c: {$eq: 3}" @@ -2029,30 +2089,32 @@ TEST(SerializeInternalSchema, ObjectMatchRedactsCorrectly) { auto objMatch = MatchExpressionParser::parse(query, expCtx); ASSERT_OK(objMatch.getStatus()); - ASSERT_BSONOBJ_EQ( - objMatch.getValue()->serialize(opts), - BSON("HASH(a)" << BSON("$_internalSchemaObjectMatch" << BSON("HASH(c)" << BSON("$eq" - << "?"))))); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"HASH":{"$_internalSchemaObjectMatch":{"HASH":{"$eq":"?number"}}}})", + objMatch.getValue()->serialize(opts)); } TEST(SerializeInternalSchema, RootDocEqRedactsCorrectly) { auto query = fromjson("{$_internalSchemaRootDocEq: {a:1, b: {c: 1, d: [1]}}}"); boost::intrusive_ptr expCtx(new ExpressionContextForTest()); SerializationOptions opts; - opts.redactIdentifiers = true; - opts.identifierRedactionPolicy = redactFieldNameForTest; - opts.replacementForLiteralArgs = "?"; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + opts.transformIdentifiers = true; + opts.transformIdentifiersCallback = applyHmacForTest; auto objMatch = MatchExpressionParser::parse(query, expCtx); - ASSERT_OK(objMatch.getStatus()); - - ASSERT_BSONOBJ_EQ( - objMatch.getValue()->serialize(opts), - BSON("$_internalSchemaRootDocEq" << BSON("HASH(a)" - << "?" - << "HASH(b)" - << BSON("HASH(c)" - << "?" - << "HASH(d)" << BSON_ARRAY("?"))))); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$_internalSchemaRootDocEq": { + "HASH": "?number", + "HASH": { + "HASH": "?number", + "HASH": [ + "?number" + ] + } + } + })", + objMatch.getValue()->serialize(opts)); } TEST(SerializeInternalSchema, BinDataEncryptedTypeRedactsCorrectly) { @@ -2061,50 +2123,57 @@ TEST(SerializeInternalSchema, BinDataEncryptedTypeRedactsCorrectly) { typeSet.bsonTypes.insert(BSONType::Date); InternalSchemaBinDataEncryptedTypeExpression e("a"_sd, std::move(typeSet)); SerializationOptions opts; - opts.replacementForLiteralArgs = "?"; - ASSERT_BSONOBJ_EQ(BSON("$_internalSchemaBinDataEncryptedType" - << "?"), - e.getSerializedRightHandSide(opts)); + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$_internalSchemaBinDataEncryptedType":"?array"})", + e.getSerializedRightHandSide(opts)); } TEST(SerializeInternalSchema, BinDataFLE2EncryptedTypeRedactsCorrectly) { InternalSchemaBinDataFLE2EncryptedTypeExpression e("ssn"_sd, BSONType::String); SerializationOptions opts; - opts.replacementForLiteralArgs = "?"; - ASSERT_BSONOBJ_EQ(BSON("$_internalSchemaBinDataFLE2EncryptedType" - << "?"), - e.getSerializedRightHandSide(opts)); + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$_internalSchemaBinDataFLE2EncryptedType":"?array"})", + e.getSerializedRightHandSide(opts)); } TEST(SerializesInternalSchema, MaxPropertiesRedactsCorrectly) { InternalSchemaMaxPropertiesMatchExpression maxProperties(5); SerializationOptions opts; - opts.replacementForLiteralArgs = "?"; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; BSONObjBuilder bob; maxProperties.serialize(&bob, opts); - ASSERT_BSONOBJ_EQ(bob.done(), - BSON("$_internalSchemaMaxProperties" - << "?")); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$_internalSchemaMaxProperties":"?number"})", + bob.done()); } TEST(SerializesInternalSchema, EqRedactsCorrectly) { SerializationOptions opts; - opts.identifierRedactionPolicy = redactFieldNameForTest; - opts.redactIdentifiers = true; - opts.replacementForLiteralArgs = "?"; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + opts.transformIdentifiersCallback = applyHmacForTest; + opts.transformIdentifiers = true; auto query = fromjson("{$_internalSchemaEq: {a:1, b: {c: 1, d: [1]}}}"); BSONObjBuilder bob; InternalSchemaEqMatchExpression e("a"_sd, query.firstElement()); e.serialize(&bob, opts); - ASSERT_BSONOBJ_EQ(bob.done(), - BSON("HASH(a)" << BSON("$_internalSchemaEq" - << BSON("HASH(a)" - << "?" - << "HASH(b)" - << BSON("HASH(c)" - << "?" - << "HASH(d)" << BSON_ARRAY("?")))))); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "HASH": { + "$_internalSchemaEq": { + "HASH": "?number", + "HASH": { + "HASH": "?number", + "HASH": [ + "?number" + ] + } + } + } + })", + bob.done()); } TEST(InternalSchemaAllElemMatchFromIndexMatchExpression, RedactsExpressionCorrectly) { @@ -2116,14 +2185,22 @@ TEST(InternalSchemaAllElemMatchFromIndexMatchExpression, RedactsExpressionCorrec expr.getValue().get()); SerializationOptions opts; - opts.redactIdentifiers = true; - opts.identifierRedactionPolicy = redactFieldNameForTest; - opts.replacementForLiteralArgs = "?"; - - ASSERT_BSONOBJ_EQ(BSON("$_internalSchemaAllElemMatchFromIndex" - << BSON_ARRAY("?" << BSON("HASH(a)" << BSON("$lt" - << "?")))), - elemMatchExpr->getSerializedRightHandSide(opts)); + opts.transformIdentifiers = true; + opts.transformIdentifiersCallback = applyHmacForTest; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$_internalSchemaAllElemMatchFromIndex": [ + "?number", + { + "HASH": { + "$lt": "?number" + } + } + ] + })", + elemMatchExpr->getSerializedRightHandSide(opts)); } } // namespace } // namespace mongo diff --git a/src/mongo/db/matcher/expression_text.cpp b/src/mongo/db/matcher/expression_text.cpp index f16258a035c16..9480bd7c86bd7 100644 --- a/src/mongo/db/matcher/expression_text.cpp +++ b/src/mongo/db/matcher/expression_text.cpp @@ -27,18 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/matcher/expression_text.h" - #include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/fts/fts_language.h" #include "mongo/db/fts/fts_spec.h" +#include "mongo/db/fts/fts_util.h" #include "mongo/db/index/fts_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression_text.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -63,7 +78,7 @@ TextMatchExpression::TextMatchExpression(OperationContext* opCtx, uassert(ErrorCodes::IndexNotFound, str::stream() << "text index required for $text query (no such collection '" - << nss.ns() << "')", + << nss.toStringForErrorMsg() << "')", db); CollectionPtr collection( @@ -71,7 +86,7 @@ TextMatchExpression::TextMatchExpression(OperationContext* opCtx, uassert(ErrorCodes::IndexNotFound, str::stream() << "text index required for $text query (no such collection '" - << nss.ns() << "')", + << nss.toStringForErrorMsg() << "')", collection); std::vector idxMatches; diff --git a/src/mongo/db/matcher/expression_text.h b/src/mongo/db/matcher/expression_text.h index bdaf88a1e3386..be227a0e75b10 100644 --- a/src/mongo/db/matcher/expression_text.h +++ b/src/mongo/db/matcher/expression_text.h @@ -29,9 +29,17 @@ #pragma once +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/db/fts/fts_query.h" #include "mongo/db/fts/fts_query_impl.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_text_base.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_text_base.cpp b/src/mongo/db/matcher/expression_text_base.cpp index 8cdb360bf3ad1..484217b1c574d 100644 --- a/src/mongo/db/matcher/expression_text_base.cpp +++ b/src/mongo/db/matcher/expression_text_base.cpp @@ -27,11 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/matcher/expression_text_base.h" +#include +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/fts/fts_query.h" +#include "mongo/db/matcher/expression_text_base.h" namespace mongo { @@ -52,18 +55,12 @@ void TextMatchExpressionBase::debugString(StringBuilder& debug, int indentationL void TextMatchExpressionBase::serialize(BSONObjBuilder* out, SerializationOptions opts) const { const fts::FTSQuery& ftsQuery = getFTSQuery(); - if (opts.replacementForLiteralArgs) { - out->append("$text", - BSON("$search" << *opts.replacementForLiteralArgs << "$language" - << *opts.replacementForLiteralArgs << "$caseSensitive" - << *opts.replacementForLiteralArgs << "$diacriticSensitive" - << *opts.replacementForLiteralArgs)); - } else { - out->append("$text", - BSON("$search" << ftsQuery.getQuery() << "$language" << ftsQuery.getLanguage() - << "$caseSensitive" << ftsQuery.getCaseSensitive() - << "$diacriticSensitive" << ftsQuery.getDiacriticSensitive())); - } + out->append("$text", + BSON("$search" << opts.serializeLiteral(ftsQuery.getQuery()) << "$language" + << opts.serializeLiteral(ftsQuery.getLanguage()) << "$caseSensitive" + << opts.serializeLiteral(ftsQuery.getCaseSensitive()) + << "$diacriticSensitive" + << opts.serializeLiteral(ftsQuery.getDiacriticSensitive()))); } bool TextMatchExpressionBase::equivalent(const MatchExpression* other) const { diff --git a/src/mongo/db/matcher/expression_text_base.h b/src/mongo/db/matcher/expression_text_base.h index 92b7b5a8c9447..418fc279c094a 100644 --- a/src/mongo/db/matcher/expression_text_base.h +++ b/src/mongo/db/matcher/expression_text_base.h @@ -29,7 +29,17 @@ #pragma once +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -60,7 +70,7 @@ class TextMatchExpressionBase : public LeafMatchExpression { */ virtual const fts::FTSQuery& getFTSQuery() const = 0; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final { + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final { MONGO_UNREACHABLE; } diff --git a/src/mongo/db/matcher/expression_text_noop.cpp b/src/mongo/db/matcher/expression_text_noop.cpp index 46e8551c61069..2bf8820a8f012 100644 --- a/src/mongo/db/matcher/expression_text_noop.cpp +++ b/src/mongo/db/matcher/expression_text_noop.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/matcher/expression_text_noop.h" +#include -#include +#include "mongo/base/string_data.h" +#include "mongo/db/fts/fts_util.h" +#include "mongo/db/matcher/expression_text_noop.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_text_noop.h b/src/mongo/db/matcher/expression_text_noop.h index 7e261dab2c3e5..488f46c00eaf5 100644 --- a/src/mongo/db/matcher/expression_text_noop.h +++ b/src/mongo/db/matcher/expression_text_noop.h @@ -29,8 +29,16 @@ #pragma once +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/db/fts/fts_query.h" #include "mongo/db/fts/fts_query_noop.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_text_base.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_tree.cpp b/src/mongo/db/matcher/expression_tree.cpp index 5dae4cb0f6b85..f3b10a2857cee 100644 --- a/src/mongo/db/matcher/expression_tree.cpp +++ b/src/mongo/db/matcher/expression_tree.cpp @@ -27,19 +27,115 @@ * it in the license file. */ +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include #include +#include +#include -#include "mongo/db/matcher/expression_tree.h" - -#include "mongo/bson/bsonmisc.h" +#include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/matcher/expression_always_boolean.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_path.h" -#include "mongo/db/matcher/expression_text_base.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/query/collation/collator_interface.h" namespace mongo { +namespace { + +PathMatchExpression* getEligiblePathMatchForNotSerialization(MatchExpression* expr) { + // Returns a pointer to a PathMatchExpression if 'expr' is such a pointer, otherwise returns + // nullptr. + // + // One exception: while TextMatchExpressionBase derives from PathMatchExpression, text match + // expressions cannot be serialized in the same manner as other PathMatchExpression derivatives. + // This is because the path for a TextMatchExpression is embedded within the $text object, + // whereas for other PathMatchExpressions it is on the left-hand-side, for example {x: {$eq: + // 1}}. + // + // Rather than the following dynamic_cast, we'll do a more performant, but also more verbose + // check. + // dynamic_cast(expr) && !dynamic_cast(expr) + // + // This version below is less obviously exhaustive, but because this is just a legibility + // optimization, and this function also gets called on the query shape stats recording hot path, + // we think it is worth it. + switch (expr->matchType()) { + // leaf types + case MatchExpression::EQ: + case MatchExpression::LTE: + case MatchExpression::LT: + case MatchExpression::GT: + case MatchExpression::GTE: + case MatchExpression::REGEX: + case MatchExpression::MOD: + case MatchExpression::EXISTS: + case MatchExpression::MATCH_IN: + case MatchExpression::BITS_ALL_SET: + case MatchExpression::BITS_ALL_CLEAR: + case MatchExpression::BITS_ANY_SET: + case MatchExpression::BITS_ANY_CLEAR: + // array types + case MatchExpression::ELEM_MATCH_OBJECT: + case MatchExpression::ELEM_MATCH_VALUE: + case MatchExpression::SIZE: + // special types + case MatchExpression::TYPE_OPERATOR: + case MatchExpression::GEO: + case MatchExpression::GEO_NEAR: + // Internal subclasses of PathMatchExpression: + case MatchExpression::INTERNAL_SCHEMA_ALL_ELEM_MATCH_FROM_INDEX: + case MatchExpression::INTERNAL_SCHEMA_BIN_DATA_ENCRYPTED_TYPE: + case MatchExpression::INTERNAL_SCHEMA_BIN_DATA_FLE2_ENCRYPTED_TYPE: + case MatchExpression::INTERNAL_SCHEMA_BIN_DATA_SUBTYPE: + case MatchExpression::INTERNAL_SCHEMA_MATCH_ARRAY_INDEX: + case MatchExpression::INTERNAL_SCHEMA_MAX_ITEMS: + case MatchExpression::INTERNAL_SCHEMA_MAX_LENGTH: + case MatchExpression::INTERNAL_SCHEMA_MAX_PROPERTIES: + case MatchExpression::INTERNAL_SCHEMA_MIN_ITEMS: + case MatchExpression::INTERNAL_SCHEMA_MIN_LENGTH: + case MatchExpression::INTERNAL_SCHEMA_TYPE: + case MatchExpression::INTERNAL_SCHEMA_UNIQUE_ITEMS: + return static_cast(expr); + // purposefully skip TEXT: + case MatchExpression::TEXT: + // Any other type is not considered a PathMatchExpression. + case MatchExpression::AND: + case MatchExpression::OR: + case MatchExpression::NOT: + case MatchExpression::NOR: + case MatchExpression::WHERE: + case MatchExpression::EXPRESSION: + case MatchExpression::ALWAYS_FALSE: + case MatchExpression::ALWAYS_TRUE: + case MatchExpression::INTERNAL_2D_POINT_IN_ANNULUS: + case MatchExpression::INTERNAL_BUCKET_GEO_WITHIN: + case MatchExpression::INTERNAL_EXPR_EQ: + case MatchExpression::INTERNAL_EXPR_GT: + case MatchExpression::INTERNAL_EXPR_GTE: + case MatchExpression::INTERNAL_EXPR_LT: + case MatchExpression::INTERNAL_EXPR_LTE: + case MatchExpression::INTERNAL_EQ_HASHED_KEY: + case MatchExpression::INTERNAL_SCHEMA_ALLOWED_PROPERTIES: + case MatchExpression::INTERNAL_SCHEMA_COND: + case MatchExpression::INTERNAL_SCHEMA_EQ: + case MatchExpression::INTERNAL_SCHEMA_FMOD: + case MatchExpression::INTERNAL_SCHEMA_MIN_PROPERTIES: + case MatchExpression::INTERNAL_SCHEMA_OBJECT_MATCH: + case MatchExpression::INTERNAL_SCHEMA_ROOT_DOC_EQ: + case MatchExpression::INTERNAL_SCHEMA_XOR: + return nullptr; + default: + MONGO_UNREACHABLE_TASSERT(7800300); + } +}; +} // namespace void ListOfMatchExpression::_debugList(StringBuilder& debug, int indentationLevel) const { for (unsigned i = 0; i < _expressions.size(); i++) @@ -111,14 +207,15 @@ MatchExpression::ExpressionOptimizerFunc ListOfMatchExpression::getOptimizer() c } // Check if the above optimizations eliminated all children. An OR with no children is - // always false. - // TODO SERVER-34759 It is correct to replace this empty AND with an $alwaysTrue, but we - // need to make enhancements to the planner to make it understand an $alwaysTrue and an - // empty AND as the same thing. The planner can create inferior plans for $alwaysTrue which - // it would not produce for an AND with no children. + // always false. An AND with no children is always true and we need to return an + // EmptyExpression if (children.empty() && matchType == MatchExpression::OR) { return std::make_unique(); } + // This ensures that the empty $and[] will be returned that serializes to {} (SERVER-34759) + if (children.empty() && matchType == MatchExpression::AND) { + return std::make_unique(); + } if (children.size() == 1) { if ((matchType == AND || matchType == OR || matchType == INTERNAL_SCHEMA_XOR)) { @@ -143,11 +240,12 @@ MatchExpression::ExpressionOptimizerFunc ListOfMatchExpression::getOptimizer() c if (childExpression->isTriviallyFalse() && matchType == MatchExpression::AND) { return std::make_unique(); } - // Likewise, an OR containing an expression that always evaluates to true can be - // optimized to a single $alwaysTrue expression. + // optimized to a single $and[] expression that is trivially true and serializes to + // {}. This "normalizes" the behaviour of true statements with $and and $or + // (SERVER-34759). if (childExpression->isTriviallyTrue() && matchType == MatchExpression::OR) { - return std::make_unique(); + return std::make_unique(); } } } @@ -455,11 +553,7 @@ void NotMatchExpression::serializeNotExpressionToNor(MatchExpression* exp, void NotMatchExpression::serialize(BSONObjBuilder* out, SerializationOptions opts) const { if (_exp->matchType() == MatchType::AND && _exp->numChildren() == 0) { - if (opts.replacementForLiteralArgs) { - out->append("$alwaysFalse", *opts.replacementForLiteralArgs); - } else { - out->append("$alwaysFalse", 1); - } + opts.appendLiteral(out, "$alwaysFalse", 1); return; } @@ -486,14 +580,7 @@ void NotMatchExpression::serialize(BSONObjBuilder* out, SerializationOptions opt // It is generally easier to be correct if we just always serialize to a $nor, since this will // delegate the path serialization to lower in the tree where we have the information on-hand. // However, for legibility we preserve a $not with a single path-accepting child as a $not. - // - // One exception: while TextMatchExpressionBase derives from PathMatchExpression, text match - // expressions cannot be serialized in the same manner as other PathMatchExpression derivatives. - // This is because the path for a TextMatchExpression is embedded within the $text object, - // whereas for other PathMatchExpressions it is on the left-hand-side, for example {x: {$eq: - // 1}}. - if (auto pathMatch = dynamic_cast(expressionToNegate); - pathMatch && !dynamic_cast(expressionToNegate)) { + if (auto pathMatch = getEligiblePathMatchForNotSerialization(expressionToNegate)) { auto append = [&](StringData path) { BSONObjBuilder pathBob(out->subobjStart(path)); pathBob.append("$not", pathMatch->getSerializedRightHandSide(opts)); diff --git a/src/mongo/db/matcher/expression_tree.h b/src/mongo/db/matcher/expression_tree.h index ce6fdb95bb18f..5e372d07e2eab 100644 --- a/src/mongo/db/matcher/expression_tree.h +++ b/src/mongo/db/matcher/expression_tree.h @@ -30,10 +30,24 @@ #pragma once #include +#include +#include #include - +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/matchable.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/util/assert_util.h" /** * this contains all Expessions that define the structure of the tree @@ -52,6 +66,10 @@ class ListOfMatchExpression : public MatchExpression { _expressions.push_back(std::move(e)); } + void reserve(size_t n) { + _expressions.reserve(n); + } + void clear() { _expressions.clear(); } @@ -127,6 +145,7 @@ class AndMatchExpression : public ListOfMatchExpression { virtual std::unique_ptr clone() const { std::unique_ptr self = std::make_unique(_errorAnnotation); + self->reserve(numChildren()); for (size_t i = 0; i < numChildren(); ++i) { self->add(getChild(i)->clone()); } @@ -171,6 +190,7 @@ class OrMatchExpression : public ListOfMatchExpression { virtual std::unique_ptr clone() const { std::unique_ptr self = std::make_unique(_errorAnnotation); + self->reserve(numChildren()); for (size_t i = 0; i < numChildren(); ++i) { self->add(getChild(i)->clone()); } @@ -215,6 +235,7 @@ class NorMatchExpression : public ListOfMatchExpression { virtual std::unique_ptr clone() const { std::unique_ptr self = std::make_unique(_errorAnnotation); + self->reserve(numChildren()); for (size_t i = 0; i < numChildren(); ++i) { self->add(getChild(i)->clone()); } diff --git a/src/mongo/db/matcher/expression_tree_test.cpp b/src/mongo/db/matcher/expression_tree_test.cpp index 101b0ded6f93b..53c6519f35cb3 100644 --- a/src/mongo/db/matcher/expression_tree_test.cpp +++ b/src/mongo/db/matcher/expression_tree_test.cpp @@ -29,15 +29,21 @@ /** Unit tests for MatchMatchExpression operator implementations in match_operators.{h,cpp}. */ -#include "mongo/unittest/unittest.h" +#include -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_tree.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_type.h b/src/mongo/db/matcher/expression_type.h index bc2a758caf732..0baee729fb6f6 100644 --- a/src/mongo/db/matcher/expression_type.h +++ b/src/mongo/db/matcher/expression_type.h @@ -29,9 +29,34 @@ #pragma once +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" #include "mongo/db/matcher/matcher_type_set.h" +#include "mongo/db/matcher/path.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/idl/idl_parser.h" namespace mongo { @@ -76,16 +101,8 @@ class TypeMatchExpressionBase : public LeafMatchExpression { _debugStringAttachTagInfo(&debug); } - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final { - BSONObjBuilder subBuilder; - if (opts.replacementForLiteralArgs) { - subBuilder.append(name(), opts.replacementForLiteralArgs.get()); - return subBuilder.obj(); - } - BSONArrayBuilder arrBuilder(subBuilder.subarrayStart(name())); - _typeSet.toBSONArray(&arrBuilder); - arrBuilder.doneFast(); - return subBuilder.obj(); + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final { + opts.appendLiteral(bob, name(), _typeSet.toBSONArray()); } bool equivalent(const MatchExpression* other) const final { @@ -246,14 +263,17 @@ class InternalSchemaBinDataSubTypeExpression final : public LeafMatchExpression _debugStringAttachTagInfo(&debug); } - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final { - BSONObjBuilder bob; - if (opts.replacementForLiteralArgs) { - bob.append(name(), opts.replacementForLiteralArgs.get()); + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final { + if (opts.literalPolicy == LiteralSerializationPolicy::kUnchanged) { + bob->append(name(), _binDataSubType); } else { - bob.append(name(), _binDataSubType); + // There is some fancy serialization logic to get the above BSONObjBuilder append to + // work. We just want to make sure we're doing the same thing here. + static_assert(BSONObjAppendFormat::value == NumberInt, + "Expecting that the BinData sub type should be specified and serialized " + "as an int."); + opts.appendLiteral(bob, name(), static_cast(_binDataSubType)); } - return bob.obj(); } bool equivalent(const MatchExpression* other) const final { diff --git a/src/mongo/db/matcher/expression_type_test.cpp b/src/mongo/db/matcher/expression_type_test.cpp index d934f8a402ce3..959cf437bdf58 100644 --- a/src/mongo/db/matcher/expression_type_test.cpp +++ b/src/mongo/db/matcher/expression_type_test.cpp @@ -27,9 +27,18 @@ * it in the license file. */ +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/json.h" #include "mongo/db/matcher/expression_type.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -220,10 +229,10 @@ TEST(ExpressionTypeTest, InternalSchemaTypeExprWithMultipleTypesMatchesAllSuchTy TEST(ExpressionTypeTest, RedactsTypesCorrectly) { TypeMatchExpression type(""_sd, String); SerializationOptions opts; - opts.replacementForLiteralArgs = "?"; - ASSERT_BSONOBJ_EQ(BSON("$type" - << "?"), - type.getSerializedRightHandSide(opts)); + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$type":"?array"})", + type.getSerializedRightHandSide(opts)); } TEST(ExpressionBinDataSubTypeTest, MatchesBinDataGeneral) { @@ -312,10 +321,10 @@ TEST(ExpressionBinDataSubTypeTest, Equivalent) { TEST(ExpressionBinDataSubTypeTest, RedactsCorrectly) { InternalSchemaBinDataSubTypeExpression e("b"_sd, BinDataType::newUUID); SerializationOptions opts; - opts.replacementForLiteralArgs = "?"; - ASSERT_BSONOBJ_EQ(BSON("$_internalSchemaBinDataSubType" - << "?"), - e.getSerializedRightHandSide(opts)); + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$_internalSchemaBinDataSubType":"?number"})", + e.getSerializedRightHandSide(opts)); } TEST(InternalSchemaBinDataEncryptedTypeTest, DoesNotTraverseLeafArrays) { diff --git a/src/mongo/db/matcher/expression_where.cpp b/src/mongo/db/matcher/expression_where.cpp index 95ca4aa2537ff..fb958944d0689 100644 --- a/src/mongo/db/matcher/expression_where.cpp +++ b/src/mongo/db/matcher/expression_where.cpp @@ -27,22 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/matcher/expression_where.h" - #include +#include -#include "mongo/base/init.h" -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" -#include "mongo/db/jsobj.h" +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/db/matcher/expression.h" -#include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/query/query_knobs_gen.h" -#include "mongo/scripting/engine.h" -#include "mongo/util/scopeguard.h" +#include "mongo/db/matcher/expression_where.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_where.h b/src/mongo/db/matcher/expression_where.h index f3a7de2291655..612238bdfb1f8 100644 --- a/src/mongo/db/matcher/expression_where.h +++ b/src/mongo/db/matcher/expression_where.h @@ -29,8 +29,19 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/js_function.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_visitor.h" #include "mongo/db/matcher/expression_where_base.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/matchable.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_where_base.cpp b/src/mongo/db/matcher/expression_where_base.cpp index 2d1bec727de2c..0316f5a31aa9a 100644 --- a/src/mongo/db/matcher/expression_where_base.cpp +++ b/src/mongo/db/matcher/expression_where_base.cpp @@ -27,12 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/matcher/expression_where_base.h" -#include "mongo/bson/simple_bsonobj_comparator.h" - namespace mongo { WhereMatchExpressionBase::WhereMatchExpressionBase(WhereParams params) @@ -48,11 +48,7 @@ void WhereMatchExpressionBase::debugString(StringBuilder& debug, int indentation } void WhereMatchExpressionBase::serialize(BSONObjBuilder* out, SerializationOptions opts) const { - if (opts.replacementForLiteralArgs) { - out->append("$where", *opts.replacementForLiteralArgs); - } else { - out->appendCode("$where", getCode()); - } + opts.appendLiteral(out, "$where", BSONCode(getCode())); } bool WhereMatchExpressionBase::equivalent(const MatchExpression* other) const { diff --git a/src/mongo/db/matcher/expression_where_base.h b/src/mongo/db/matcher/expression_where_base.h index 0a510740b454f..5ab9531f369f4 100644 --- a/src/mongo/db/matcher/expression_where_base.h +++ b/src/mongo/db/matcher/expression_where_base.h @@ -30,8 +30,19 @@ #pragma once #include - +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_where_noop.cpp b/src/mongo/db/matcher/expression_where_noop.cpp index a72d89da59399..ddba5d70770d4 100644 --- a/src/mongo/db/matcher/expression_where_noop.cpp +++ b/src/mongo/db/matcher/expression_where_noop.cpp @@ -27,11 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include #include "mongo/db/matcher/expression_where_noop.h" - -#include +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_where_noop.h b/src/mongo/db/matcher/expression_where_noop.h index 94807ee5646c1..651bb4666bd6f 100644 --- a/src/mongo/db/matcher/expression_where_noop.h +++ b/src/mongo/db/matcher/expression_where_noop.h @@ -29,7 +29,13 @@ #pragma once +#include + +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_visitor.h" #include "mongo/db/matcher/expression_where_base.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/matchable.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_with_placeholder.cpp b/src/mongo/db/matcher/expression_with_placeholder.cpp index c4bc9d05dca03..5609e2f21e6c1 100644 --- a/src/mongo/db/matcher/expression_with_placeholder.cpp +++ b/src/mongo/db/matcher/expression_with_placeholder.cpp @@ -27,14 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/matcher/expression_with_placeholder.h" +#include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" -#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/expression_path.h" +#include "mongo/db/matcher/expression_with_placeholder.h" #include "mongo/util/pcre.h" #include "mongo/util/static_immortal.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_with_placeholder.h b/src/mongo/db/matcher/expression_with_placeholder.h index 9a6629bc9fb0f..f6db54e6f251a 100644 --- a/src/mongo/db/matcher/expression_with_placeholder.h +++ b/src/mongo/db/matcher/expression_with_placeholder.h @@ -29,11 +29,22 @@ #pragma once +#include +#include #include +#include +#include +#include +#include +#include #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/match_details.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/matcher/expression_with_placeholder_test.cpp b/src/mongo/db/matcher/expression_with_placeholder_test.cpp index fcf6d86e5d795..73f7fbabae503 100644 --- a/src/mongo/db/matcher/expression_with_placeholder_test.cpp +++ b/src/mongo/db/matcher/expression_with_placeholder_test.cpp @@ -27,15 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/json.h" +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/expression_with_placeholder.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/matcher/extensions_callback.cpp b/src/mongo/db/matcher/extensions_callback.cpp index 4080df517dee5..ff6e7559b12c1 100644 --- a/src/mongo/db/matcher/extensions_callback.cpp +++ b/src/mongo/db/matcher/extensions_callback.cpp @@ -27,11 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/matcher/extensions_callback.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/db/matcher/extensions_callback.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/matcher/extensions_callback.h b/src/mongo/db/matcher/extensions_callback.h index b9787cb4c2556..74e8d50978cc6 100644 --- a/src/mongo/db/matcher/extensions_callback.h +++ b/src/mongo/db/matcher/extensions_callback.h @@ -29,6 +29,12 @@ #pragma once +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_text_base.h" #include "mongo/db/matcher/expression_where_base.h" diff --git a/src/mongo/db/matcher/extensions_callback_noop.cpp b/src/mongo/db/matcher/extensions_callback_noop.cpp index 53918fe661cdd..b0eeb2f92bd1f 100644 --- a/src/mongo/db/matcher/extensions_callback_noop.cpp +++ b/src/mongo/db/matcher/extensions_callback_noop.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/matcher/extensions_callback_noop.h" +#include #include "mongo/db/matcher/expression_text_noop.h" #include "mongo/db/matcher/expression_where_noop.h" +#include "mongo/db/matcher/extensions_callback_noop.h" namespace mongo { diff --git a/src/mongo/db/matcher/extensions_callback_noop.h b/src/mongo/db/matcher/extensions_callback_noop.h index 29ae9f2be4e20..2fb2bb635736c 100644 --- a/src/mongo/db/matcher/extensions_callback_noop.h +++ b/src/mongo/db/matcher/extensions_callback_noop.h @@ -29,7 +29,15 @@ #pragma once +#include + +#include + +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_text_base.h" +#include "mongo/db/matcher/expression_where_base.h" #include "mongo/db/matcher/extensions_callback.h" +#include "mongo/db/pipeline/expression_context.h" namespace mongo { diff --git a/src/mongo/db/matcher/extensions_callback_real.cpp b/src/mongo/db/matcher/extensions_callback_real.cpp index 87e0e175d6224..bcd80808c5159 100644 --- a/src/mongo/db/matcher/extensions_callback_real.cpp +++ b/src/mongo/db/matcher/extensions_callback_real.cpp @@ -27,19 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/matcher/extensions_callback_real.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/matcher/expression_expr.h" #include "mongo/db/matcher/expression_text.h" #include "mongo/db/matcher/expression_where.h" +#include "mongo/db/matcher/extensions_callback_real.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_function.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/util/make_data_structure.h" -#include "mongo/scripting/engine.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/matcher/extensions_callback_real.h b/src/mongo/db/matcher/extensions_callback_real.h index b78c46f3c1354..f01e1ed36ac2e 100644 --- a/src/mongo/db/matcher/extensions_callback_real.h +++ b/src/mongo/db/matcher/extensions_callback_real.h @@ -29,7 +29,16 @@ #pragma once +#include +#include + +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_text_base.h" +#include "mongo/db/matcher/expression_where_base.h" #include "mongo/db/matcher/extensions_callback.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" namespace mongo { diff --git a/src/mongo/db/matcher/implicit_validator.cpp b/src/mongo/db/matcher/implicit_validator.cpp index 5de5f276d49bc..3cc37f868cca7 100644 --- a/src/mongo/db/matcher/implicit_validator.cpp +++ b/src/mongo/db/matcher/implicit_validator.cpp @@ -29,18 +29,33 @@ #include "mongo/db/matcher/implicit_validator.h" -#include "mongo/db/query/stage_types.h" #include - +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/field_ref.h" #include "mongo/db/matcher/doc_validation_util.h" #include "mongo/db/matcher/expression_always_boolean.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_tree.h" #include "mongo/db/matcher/expression_type.h" #include "mongo/db/matcher/matcher_type_set.h" -#include "mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h" #include "mongo/db/matcher/schema/expression_internal_schema_object_match.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/implicit_validator.h b/src/mongo/db/matcher/implicit_validator.h index c4bdd6e2fe319..33d6b49209447 100644 --- a/src/mongo/db/matcher/implicit_validator.h +++ b/src/mongo/db/matcher/implicit_validator.h @@ -28,11 +28,15 @@ */ #pragma once +#include +#include + #include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsontypes.h" #include "mongo/crypto/encryption_fields_gen.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/variables.h" namespace mongo { diff --git a/src/mongo/db/matcher/implicit_validator_test.cpp b/src/mongo/db/matcher/implicit_validator_test.cpp index 73985eba0fc49..bbf9857489a68 100644 --- a/src/mongo/db/matcher/implicit_validator_test.cpp +++ b/src/mongo/db/matcher/implicit_validator_test.cpp @@ -27,13 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/json.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/matcher/expression_type.h" #include "mongo/db/matcher/implicit_validator.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/matcher/match_details.cpp b/src/mongo/db/matcher/match_details.cpp index be9c657c3d993..5bfeee73cd7dc 100644 --- a/src/mongo/db/matcher/match_details.cpp +++ b/src/mongo/db/matcher/match_details.cpp @@ -51,7 +51,7 @@ bool MatchDetails::hasElemMatchKey() const { } std::string MatchDetails::elemMatchKey() const { - verify(hasElemMatchKey()); + MONGO_verify(hasElemMatchKey()); return *(_elemMatchKey.get()); } diff --git a/src/mongo/db/matcher/match_expression_dependencies.cpp b/src/mongo/db/matcher/match_expression_dependencies.cpp index cd28008f3df30..a7d047334f66b 100644 --- a/src/mongo/db/matcher/match_expression_dependencies.cpp +++ b/src/mongo/db/matcher/match_expression_dependencies.cpp @@ -29,6 +29,14 @@ #include "mongo/db/matcher/match_expression_dependencies.h" +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/field_ref.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/expression_array.h" #include "mongo/db/matcher/expression_expr.h" @@ -37,6 +45,7 @@ #include "mongo/db/matcher/expression_internal_eq_hashed_key.h" #include "mongo/db/matcher/expression_internal_expr_comparison.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_path.h" #include "mongo/db/matcher/expression_text.h" #include "mongo/db/matcher/expression_text_noop.h" #include "mongo/db/matcher/expression_tree.h" @@ -62,6 +71,7 @@ #include "mongo/db/matcher/schema/expression_internal_schema_unique_items.h" #include "mongo/db/matcher/schema/expression_internal_schema_xor.h" #include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/query/tree_walker.h" namespace mongo::match_expression { diff --git a/src/mongo/db/matcher/match_expression_dependencies.h b/src/mongo/db/matcher/match_expression_dependencies.h index a04175f443c54..15cf0a3dfa02e 100644 --- a/src/mongo/db/matcher/match_expression_dependencies.h +++ b/src/mongo/db/matcher/match_expression_dependencies.h @@ -29,8 +29,11 @@ #pragma once +#include + #include "mongo/db/matcher/expression.h" #include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/variables.h" namespace mongo::match_expression { diff --git a/src/mongo/db/matcher/match_expression_util.cpp b/src/mongo/db/matcher/match_expression_util.cpp index 31a2d937477bb..319f67b86cdaa 100644 --- a/src/mongo/db/matcher/match_expression_util.cpp +++ b/src/mongo/db/matcher/match_expression_util.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/matcher/match_expression_util.h" namespace mongo::match_expression_util { diff --git a/src/mongo/db/matcher/match_expression_util.h b/src/mongo/db/matcher/match_expression_util.h index 191c4152d4ec8..f8a5d664dd5bf 100644 --- a/src/mongo/db/matcher/match_expression_util.h +++ b/src/mongo/db/matcher/match_expression_util.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/bson/bsonobj.h" namespace mongo::match_expression_util { diff --git a/src/mongo/db/matcher/match_expression_util_test.cpp b/src/mongo/db/matcher/match_expression_util_test.cpp index 3ff67328c32c8..dce65554b4975 100644 --- a/src/mongo/db/matcher/match_expression_util_test.cpp +++ b/src/mongo/db/matcher/match_expression_util_test.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/match_expression_util.h" - -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::match_expression_util { namespace { diff --git a/src/mongo/db/matcher/matchable.cpp b/src/mongo/db/matcher/matchable.cpp index 5c5bfa55fd36a..c56c07897946c 100644 --- a/src/mongo/db/matcher/matchable.cpp +++ b/src/mongo/db/matcher/matchable.cpp @@ -28,8 +28,6 @@ */ #include "mongo/db/matcher/matchable.h" -#include "mongo/db/jsobj.h" -#include "mongo/platform/basic.h" namespace mongo { diff --git a/src/mongo/db/matcher/matchable.h b/src/mongo/db/matcher/matchable.h index 062a3f2882636..f83006ef7570a 100644 --- a/src/mongo/db/matcher/matchable.h +++ b/src/mongo/db/matcher/matchable.h @@ -29,6 +29,11 @@ #pragma once +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/field_ref.h" diff --git a/src/mongo/db/matcher/matcher.cpp b/src/mongo/db/matcher/matcher.cpp index 4faf2fe44bfd4..3f3c8275e2886 100644 --- a/src/mongo/db/matcher/matcher.cpp +++ b/src/mongo/db/matcher/matcher.cpp @@ -27,16 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include "mongo/db/matcher/matcher.h" + +#include -#include "mongo/base/init.h" -#include "mongo/db/exec/working_set.h" -#include "mongo/db/jsobj.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/matcher/matcher.h" -#include "mongo/db/matcher/path.h" -#include "mongo/util/stacktrace.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/matcher/matcher.h b/src/mongo/db/matcher/matcher.h index 8410cd74fb96b..24a4343d538e0 100644 --- a/src/mongo/db/matcher/matcher.h +++ b/src/mongo/db/matcher/matcher.h @@ -30,12 +30,18 @@ #pragma once +#include +#include +#include + #include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback.h" #include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/matcher/match_details.h" +#include "mongo/db/pipeline/expression_context.h" namespace mongo { diff --git a/src/mongo/db/matcher/matcher_type_set.cpp b/src/mongo/db/matcher/matcher_type_set.cpp index 1c48d32ec25ba..bbf906b3086d4 100644 --- a/src/mongo/db/matcher/matcher_type_set.cpp +++ b/src/mongo/db/matcher/matcher_type_set.cpp @@ -27,13 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/matcher/matcher_type_set.h" +#include +#include +#include +#include +#include -#include "mongo/db/matcher/expression_parser.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/matcher/schema/json_schema_parser.h" #include "mongo/db/query/bson_typemask.h" +#include "mongo/util/str.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/matcher_type_set.h b/src/mongo/db/matcher/matcher_type_set.h index 7903d03f44121..a3d00e26f04dc 100644 --- a/src/mongo/db/matcher/matcher_type_set.h +++ b/src/mongo/db/matcher/matcher_type_set.h @@ -29,15 +29,22 @@ #pragma once +#include +#include +#include #include #include #include +#include +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/matcher/matcher_type_set_test.cpp b/src/mongo/db/matcher/matcher_type_set_test.cpp index c8f06fdab701a..79e6f7fafefdf 100644 --- a/src/mongo/db/matcher/matcher_type_set_test.cpp +++ b/src/mongo/db/matcher/matcher_type_set_test.cpp @@ -27,14 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/matcher/matcher_type_set.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/path.cpp b/src/mongo/db/matcher/path.cpp index 5edcb17291b54..90d34053046bb 100644 --- a/src/mongo/db/matcher/path.cpp +++ b/src/mongo/db/matcher/path.cpp @@ -28,9 +28,15 @@ */ #include "mongo/db/matcher/path.h" -#include "mongo/db/jsobj.h" + +#include +#include + +#include + +#include "mongo/bson/bsontypes.h" #include "mongo/db/matcher/path_internal.h" -#include "mongo/platform/basic.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/matcher/path.h b/src/mongo/db/matcher/path.h index 8cd09c072e161..11379d9608d76 100644 --- a/src/mongo/db/matcher/path.h +++ b/src/mongo/db/matcher/path.h @@ -30,8 +30,14 @@ #pragma once +#include +#include +#include +#include + #include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/field_ref.h" diff --git a/src/mongo/db/matcher/path_accepting_keyword_test.cpp b/src/mongo/db/matcher/path_accepting_keyword_test.cpp index 95a260fea4a52..cb013d40d6012 100644 --- a/src/mongo/db/matcher/path_accepting_keyword_test.cpp +++ b/src/mongo/db/matcher/path_accepting_keyword_test.cpp @@ -27,11 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/jsobj.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/expression_parser.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/path_internal.cpp b/src/mongo/db/matcher/path_internal.cpp index 78d025445a688..9b52f6b7ac8f3 100644 --- a/src/mongo/db/matcher/path_internal.cpp +++ b/src/mongo/db/matcher/path_internal.cpp @@ -31,6 +31,7 @@ #include +#include "mongo/bson/bsontypes.h" #include "mongo/util/ctype.h" namespace mongo { diff --git a/src/mongo/db/matcher/path_internal.h b/src/mongo/db/matcher/path_internal.h index a21a034ba931b..e8d084f6c5d94 100644 --- a/src/mongo/db/matcher/path_internal.h +++ b/src/mongo/db/matcher/path_internal.h @@ -29,9 +29,12 @@ #pragma once +#include #include #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/field_ref.h" #include "mongo/db/jsobj.h" diff --git a/src/mongo/db/matcher/path_test.cpp b/src/mongo/db/matcher/path_test.cpp index 58fe96df1ad9b..7f0a4c16a88ab 100644 --- a/src/mongo/db/matcher/path_test.cpp +++ b/src/mongo/db/matcher/path_test.cpp @@ -27,11 +27,16 @@ * it in the license file. */ -#include "mongo/unittest/unittest.h" +#include -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" #include "mongo/db/matcher/path.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/matcher/rewrite_expr.cpp b/src/mongo/db/matcher/rewrite_expr.cpp index fb0a8cfdfe260..9c62571d084f1 100644 --- a/src/mongo/db/matcher/rewrite_expr.cpp +++ b/src/mongo/db/matcher/rewrite_expr.cpp @@ -28,14 +28,23 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/matcher/rewrite_expr.h" - +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/matcher/expression_internal_expr_comparison.h" -#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/rewrite_expr.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/matcher/rewrite_expr.h b/src/mongo/db/matcher/rewrite_expr.h index 0cdef20af6a95..784efa7eab915 100644 --- a/src/mongo/db/matcher/rewrite_expr.h +++ b/src/mongo/db/matcher/rewrite_expr.h @@ -29,13 +29,19 @@ #pragma once +#include +#include #include +#include #include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_tree.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/query/collation/collator_interface.h" namespace mongo { diff --git a/src/mongo/db/matcher/rewrite_expr_test.cpp b/src/mongo/db/matcher/rewrite_expr_test.cpp index c4f1a4b761941..62d813e8f5ead 100644 --- a/src/mongo/db/matcher/rewrite_expr_test.cpp +++ b/src/mongo/db/matcher/rewrite_expr_test.cpp @@ -27,12 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/matcher/rewrite_expr.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/array_keywords_test.cpp b/src/mongo/db/matcher/schema/array_keywords_test.cpp index c652ea9b6185e..35e2776e7c0cf 100644 --- a/src/mongo/db/matcher/schema/array_keywords_test.cpp +++ b/src/mongo/db/matcher/schema/array_keywords_test.cpp @@ -27,14 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/schema/assert_serializes_to.h" #include "mongo/db/matcher/schema/json_schema_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/encrypt_keyword_test.cpp b/src/mongo/db/matcher/schema/encrypt_keyword_test.cpp index aaac0d897bc67..d056ef2966c28 100644 --- a/src/mongo/db/matcher/schema/encrypt_keyword_test.cpp +++ b/src/mongo/db/matcher/schema/encrypt_keyword_test.cpp @@ -27,15 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/schema/assert_serializes_to.h" #include "mongo/db/matcher/schema/json_schema_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/encrypt_schema_types.cpp b/src/mongo/db/matcher/schema/encrypt_schema_types.cpp index ef0a99dd76423..f4703cf2f1260 100644 --- a/src/mongo/db/matcher/schema/encrypt_schema_types.cpp +++ b/src/mongo/db/matcher/schema/encrypt_schema_types.cpp @@ -27,9 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/matcher/schema/encrypt_schema_types.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/encrypt_schema_types.h b/src/mongo/db/matcher/schema/encrypt_schema_types.h index ae785d796ebf5..94dccb7c45ba0 100644 --- a/src/mongo/db/matcher/schema/encrypt_schema_types.h +++ b/src/mongo/db/matcher/schema/encrypt_schema_types.h @@ -29,13 +29,17 @@ #pragma once +#include #include +#include #include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/schema/json_pointer.h" +#include "mongo/util/assert_util.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/encrypt_schema_types_test.cpp b/src/mongo/db/matcher/schema/encrypt_schema_types_test.cpp index 6e7b84ffe0b2a..9950385b7b55e 100644 --- a/src/mongo/db/matcher/schema/encrypt_schema_types_test.cpp +++ b/src/mongo/db/matcher/schema/encrypt_schema_types_test.cpp @@ -27,15 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/matcher/schema/encrypt_schema_gen.h" #include "mongo/db/matcher/schema/encrypt_schema_types.h" -#include "mongo/unittest/bson_test_util.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.cpp index 97ceacf1e0148..1aa353215a4bc 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.cpp @@ -26,12 +26,16 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h" +#include +#include +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h" namespace mongo { @@ -76,22 +80,11 @@ void InternalSchemaAllElemMatchFromIndexMatchExpression::debugString(StringBuild _expression->getFilter()->debugString(debug, indentationLevel + 1); } -BSONObj InternalSchemaAllElemMatchFromIndexMatchExpression::getSerializedRightHandSide( - SerializationOptions opts) const { - BSONObjBuilder allElemMatchBob; - BSONArrayBuilder subArray(allElemMatchBob.subarrayStart(kName)); - if (opts.replacementForLiteralArgs) { - subArray.append(opts.replacementForLiteralArgs.get()); - } else { - subArray.append(_index); - } - { - BSONObjBuilder eBuilder(subArray.subobjStart()); - _expression->getFilter()->serialize(&eBuilder, opts); - eBuilder.doneFast(); - } - subArray.doneFast(); - return allElemMatchBob.obj(); +void InternalSchemaAllElemMatchFromIndexMatchExpression::appendSerializedRightHandSide( + BSONObjBuilder* bob, SerializationOptions opts) const { + bob->append( + kName, + BSON_ARRAY(opts.serializeLiteral(_index) << _expression->getFilter()->serialize(opts))); } MatchExpression::ExpressionOptimizerFunc diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h b/src/mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h index 007ac6bdf590b..1b4eb5db130cb 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h @@ -29,12 +29,25 @@ #pragma once #include +#include +#include +#include +#include +#include +#include "mongo/base/clonable_ptr.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_array.h" +#include "mongo/db/matcher/expression_visitor.h" #include "mongo/db/matcher/expression_with_placeholder.h" +#include "mongo/db/matcher/match_details.h" #include "mongo/db/matcher/match_expression_util.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" namespace mongo { /** @@ -78,7 +91,7 @@ class InternalSchemaAllElemMatchFromIndexMatchExpression final void debugString(StringBuilder& debug, int indentationLevel) const final; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; bool equivalent(const MatchExpression* other) const final; diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index_test.cpp index 46e498fb67ca4..51307dd7e1d99 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index_test.cpp @@ -27,15 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_allowed_properties.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_allowed_properties.cpp index c9a76a5d61f38..20cacc19f8bde 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_allowed_properties.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_allowed_properties.cpp @@ -27,10 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/matcher/schema/expression_internal_schema_allowed_properties.h" #include "mongo/util/errno_util.h" +#include "mongo/util/str.h" namespace mongo { constexpr StringData InternalSchemaAllowedPropertiesMatchExpression::kName; @@ -128,26 +137,14 @@ void InternalSchemaAllowedPropertiesMatchExpression::serialize(BSONObjBuilder* b std::vector sortedProperties(_properties.begin(), _properties.end()); std::sort(sortedProperties.begin(), sortedProperties.end()); - if (opts.replacementForLiteralArgs) { - expressionBuilder.append("properties", opts.replacementForLiteralArgs.get()); - expressionBuilder.append("namePlaceholder", opts.replacementForLiteralArgs.get()); - } else { - expressionBuilder.append("properties", sortedProperties); - expressionBuilder.append("namePlaceholder", _namePlaceholder); - } + opts.appendLiteral(&expressionBuilder, "properties", sortedProperties); + opts.appendLiteral(&expressionBuilder, "namePlaceholder", _namePlaceholder); BSONArrayBuilder patternPropertiesBuilder(expressionBuilder.subarrayStart("patternProperties")); - for (auto&& item : _patternProperties) { - BSONObjBuilder itemBuilder(patternPropertiesBuilder.subobjStart()); - if (opts.replacementForLiteralArgs) { - itemBuilder.appendRegex("regex", opts.replacementForLiteralArgs.get()); - } else { - itemBuilder.appendRegex("regex", item.first.rawRegex); - } - - BSONObjBuilder subexpressionBuilder(itemBuilder.subobjStart("expression")); - item.second->getFilter()->serialize(&subexpressionBuilder, opts); - subexpressionBuilder.doneFast(); + for (auto&& [pattern, expression] : _patternProperties) { + patternPropertiesBuilder << BSON( + "regex" << opts.serializeLiteral(BSONRegEx(pattern.rawRegex)) << "expression" + << expression->getFilter()->serialize(opts)); } patternPropertiesBuilder.doneFast(); diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_allowed_properties.h b/src/mongo/db/matcher/schema/expression_internal_schema_allowed_properties.h index 823cfb8c42981..548ee7b660564 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_allowed_properties.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_allowed_properties.h @@ -30,13 +30,28 @@ #pragma once #include +#include +#include #include +#include #include #include +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_visitor.h" #include "mongo/db/matcher/expression_with_placeholder.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/matchable.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" #include "mongo/util/pcre.h" +#include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_allowed_properties_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_allowed_properties_test.cpp index e7e9243384c8f..b10b889df1cc4 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_allowed_properties_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_allowed_properties_test.cpp @@ -27,14 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/status_with.h" #include "mongo/bson/json.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/schema/expression_internal_schema_allowed_properties.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_cond.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_cond.cpp index b026baa7684df..c0f7b7e105026 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_cond.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_cond.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/matcher/schema/expression_internal_schema_cond.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_cond.h b/src/mongo/db/matcher/schema/expression_internal_schema_cond.h index 76d4ce070e4ad..a89dd2f1e42c0 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_cond.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_cond.h @@ -29,8 +29,20 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_arity.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/matchable.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_cond_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_cond_test.cpp index 58214ae137ee1..a1cd47e558bc5 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_cond_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_cond_test.cpp @@ -27,14 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/schema/expression_internal_schema_cond.h" #include "mongo/db/matcher/schema/expression_internal_schema_object_match.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_eq.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_eq.cpp index 43c0777f587f2..8f2078172698c 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_eq.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_eq.cpp @@ -27,13 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/matcher/schema/expression_internal_schema_eq.h" +#include +#include +#include #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/matcher/path.h" +#include "mongo/db/matcher/schema/expression_internal_schema_eq.h" namespace mongo { @@ -62,22 +68,16 @@ void InternalSchemaEqMatchExpression::debugString(StringBuilder& debug, _debugStringAttachTagInfo(&debug); } -BSONObj InternalSchemaEqMatchExpression::getSerializedRightHandSide( - SerializationOptions opts) const { - BSONObjBuilder eqObj; - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - if (_rhsElem.isABSONObj()) { - BSONObjBuilder exprSpec(eqObj.subobjStart(kName)); - opts.redactObjToBuilder(&exprSpec, _rhsElem.Obj()); - exprSpec.done(); - return eqObj.obj(); - } else if (opts.replacementForLiteralArgs) { - // If the element is not an object it must be a literal. - return BSON(kName << opts.replacementForLiteralArgs.get()); - } +void InternalSchemaEqMatchExpression::appendSerializedRightHandSide( + BSONObjBuilder* bob, SerializationOptions opts) const { + if (opts.literalPolicy != LiteralSerializationPolicy::kUnchanged && _rhsElem.isABSONObj()) { + BSONObjBuilder exprSpec(bob->subobjStart(kName)); + opts.addHmacedObjToBuilder(&exprSpec, _rhsElem.Obj()); + exprSpec.doneFast(); + return; } - eqObj.appendAs(_rhsElem, kName); - return eqObj.obj(); + // If the element is not an object it must be a literal. + opts.appendLiteral(bob, kName, _rhsElem); } bool InternalSchemaEqMatchExpression::equivalent(const MatchExpression* other) const { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_eq.h b/src/mongo/db/matcher/schema/expression_internal_schema_eq.h index 5099119f3bef6..430ed510e1df5 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_eq.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_eq.h @@ -30,9 +30,21 @@ #pragma once #include - +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/unordered_fields_bsonelement_comparator.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -59,7 +71,7 @@ class InternalSchemaEqMatchExpression final : public LeafMatchExpression { void debugString(StringBuilder& debug, int indentationLevel) const final; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; bool equivalent(const MatchExpression* other) const final; diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_eq_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_eq_test.cpp index 7f3d436273326..7bf33f0a6ddbb 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_eq_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_eq_test.cpp @@ -27,14 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/matcher/matcher.h" #include "mongo/db/matcher/schema/expression_internal_schema_eq.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_fmod.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_fmod.cpp index 93280d7cb5a52..834959a911ba2 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_fmod.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_fmod.cpp @@ -27,13 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/matcher/schema/expression_internal_schema_fmod.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/matcher/schema/expression_internal_schema_fmod.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -71,20 +78,10 @@ void InternalSchemaFmodMatchExpression::debugString(StringBuilder& debug, _debugStringAttachTagInfo(&debug); } -BSONObj InternalSchemaFmodMatchExpression::getSerializedRightHandSide( - SerializationOptions opts) const { - BSONObjBuilder objMatchBob; - BSONArrayBuilder arrBuilder(objMatchBob.subarrayStart("$_internalSchemaFmod")); - // Divisor and Remainder are always literals. - if (opts.replacementForLiteralArgs) { - arrBuilder.append(opts.replacementForLiteralArgs.get()); - arrBuilder.append(opts.replacementForLiteralArgs.get()); - } else { - arrBuilder.append(_divisor); - arrBuilder.append(_remainder); - } - arrBuilder.doneFast(); - return objMatchBob.obj(); +void InternalSchemaFmodMatchExpression::appendSerializedRightHandSide( + BSONObjBuilder* bob, SerializationOptions opts) const { + bob->append("$_internalSchemaFmod"_sd, + BSON_ARRAY(opts.serializeLiteral(_divisor) << opts.serializeLiteral(_remainder))); } bool InternalSchemaFmodMatchExpression::equivalent(const MatchExpression* other) const { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_fmod.h b/src/mongo/db/matcher/schema/expression_internal_schema_fmod.h index 27caa7dc27240..2f043b284d688 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_fmod.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_fmod.h @@ -29,7 +29,22 @@ #pragma once +#include + +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/platform/decimal128.h" namespace mongo { @@ -58,7 +73,7 @@ class InternalSchemaFmodMatchExpression final : public LeafMatchExpression { void debugString(StringBuilder& debug, int indentationLevel) const final; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; bool equivalent(const MatchExpression* other) const final; diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_fmod_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_fmod_test.cpp index d20860612e43f..1bc013f07ed8b 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_fmod_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_fmod_test.cpp @@ -27,10 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/schema/expression_internal_schema_fmod.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_match_array_index.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_match_array_index.cpp index fd1ce5f670e2f..3332bd1f2c53f 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_match_array_index.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_match_array_index.cpp @@ -27,8 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/matcher/schema/expression_internal_schema_match_array_index.h" namespace mongo { @@ -66,30 +73,13 @@ bool InternalSchemaMatchArrayIndexMatchExpression::equivalent(const MatchExpress _expression->equivalent(other->_expression.get()); } -BSONObj InternalSchemaMatchArrayIndexMatchExpression::getSerializedRightHandSide( - SerializationOptions opts) const { - BSONObjBuilder objBuilder; - { - BSONObjBuilder matchArrayElemSubobj(objBuilder.subobjStart(kName)); - if (opts.replacementForLiteralArgs) { - matchArrayElemSubobj.append("index", opts.replacementForLiteralArgs.get()); - } else { - matchArrayElemSubobj.append("index", _index); - } - if (auto placeHolder = _expression->getPlaceholder()) { - matchArrayElemSubobj.append("namePlaceholder", - opts.serializeFieldPathFromString(placeHolder.get())); - } else { - matchArrayElemSubobj.append("namePlaceholder", ""); - } - { - BSONObjBuilder subexprSubObj(matchArrayElemSubobj.subobjStart("expression")); - _expression->getFilter()->serialize(&subexprSubObj, opts); - subexprSubObj.doneFast(); - } - matchArrayElemSubobj.doneFast(); - } - return objBuilder.obj(); +void InternalSchemaMatchArrayIndexMatchExpression::appendSerializedRightHandSide( + BSONObjBuilder* bob, SerializationOptions opts) const { + bob->append(kName, + BSON("index" << opts.serializeLiteral(_index) << "namePlaceholder" + << opts.serializeFieldPathFromString( + _expression->getPlaceholder().value_or("")) + << "expression" << _expression->getFilter()->serialize(opts))); } std::unique_ptr InternalSchemaMatchArrayIndexMatchExpression::clone() const { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_match_array_index.h b/src/mongo/db/matcher/schema/expression_internal_schema_match_array_index.h index 96fea55511dce..ff49cec454ee8 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_match_array_index.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_match_array_index.h @@ -30,10 +30,24 @@ #pragma once #include +#include +#include +#include #include - +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_array.h" +#include "mongo/db/matcher/expression_visitor.h" #include "mongo/db/matcher/expression_with_placeholder.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -74,7 +88,7 @@ class InternalSchemaMatchArrayIndexMatchExpression final : public ArrayMatchingM return _expression->matchesBSONElement(element, details); } - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; std::unique_ptr clone() const final; diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_match_array_index_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_match_array_index_test.cpp index e1938d1506376..9e7d403c001dd 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_match_array_index_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_match_array_index_test.cpp @@ -27,16 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/status_with.h" #include "mongo/bson/json.h" #include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/matcher/expression_with_placeholder.h" #include "mongo/db/matcher/schema/expression_internal_schema_match_array_index.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_max_items.h b/src/mongo/db/matcher/schema/expression_internal_schema_max_items.h index bab4075f09c56..2eb3904eac0ea 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_max_items.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_max_items.h @@ -29,6 +29,18 @@ #pragma once +#include +#include + +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" #include "mongo/db/matcher/schema/expression_internal_schema_num_array_items.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_max_items_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_max_items_test.cpp index 232de14bba42b..5e08667e5608a 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_max_items_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_max_items_test.cpp @@ -27,12 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/matcher/expression.h" +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/schema/expression_internal_schema_max_items.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h b/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h index 9918531d1bff3..971807509491b 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_max_length.h @@ -29,7 +29,16 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_visitor.h" #include "mongo/db/matcher/schema/expression_internal_schema_str_length.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_max_length_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_max_length_test.cpp index fec18b87c9012..41faa43980d13 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_max_length_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_max_length_test.cpp @@ -27,12 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/matcher/expression.h" +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/schema/expression_internal_schema_max_length.h" #include "mongo/db/matcher/schema/expression_internal_schema_min_length.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_max_properties.h b/src/mongo/db/matcher/schema/expression_internal_schema_max_properties.h index 90f41810f7d69..55be477166ba9 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_max_properties.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_max_properties.h @@ -29,6 +29,18 @@ #pragma once +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/matchable.h" #include "mongo/db/matcher/schema/expression_internal_schema_num_properties.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_max_properties_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_max_properties_test.cpp index 587243cb0ad73..5aacd6f6e2e2f 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_max_properties_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_max_properties_test.cpp @@ -27,13 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/matcher/expression.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/schema/expression_internal_schema_max_properties.h" #include "mongo/db/matcher/schema/expression_internal_schema_min_properties.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_min_items.h b/src/mongo/db/matcher/schema/expression_internal_schema_min_items.h index 0a54ba120ecb3..8f1f80d7657cf 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_min_items.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_min_items.h @@ -29,6 +29,18 @@ #pragma once +#include +#include + +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" #include "mongo/db/matcher/schema/expression_internal_schema_num_array_items.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_min_items_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_min_items_test.cpp index e71a003abd83b..c57af4a05e257 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_min_items_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_min_items_test.cpp @@ -26,12 +26,18 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/matcher/expression.h" +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/schema/expression_internal_schema_min_items.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h b/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h index 8034bd555489b..536b5ee01d7d3 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_min_length.h @@ -29,7 +29,16 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_visitor.h" #include "mongo/db/matcher/schema/expression_internal_schema_str_length.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_min_length_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_min_length_test.cpp index b0f916cfdab64..8789a4be86b94 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_min_length_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_min_length_test.cpp @@ -27,11 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/matcher/expression.h" +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/schema/expression_internal_schema_min_length.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_min_properties.h b/src/mongo/db/matcher/schema/expression_internal_schema_min_properties.h index 56ae586a401e2..4ac09c55ce1b7 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_min_properties.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_min_properties.h @@ -29,6 +29,18 @@ #pragma once +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/matchable.h" #include "mongo/db/matcher/schema/expression_internal_schema_num_properties.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_min_properties_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_min_properties_test.cpp index c7abaa0c4e128..78f2eceaa6e9f 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_min_properties_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_min_properties_test.cpp @@ -27,13 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/matcher/expression.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/schema/expression_internal_schema_max_properties.h" #include "mongo/db/matcher/schema/expression_internal_schema_min_properties.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_num_array_items.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_num_array_items.cpp index 3ef7921faf34e..ce4db93c9652e 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_num_array_items.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_num_array_items.cpp @@ -27,8 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/matcher/schema/expression_internal_schema_num_array_items.h" namespace mongo { @@ -50,15 +56,9 @@ void InternalSchemaNumArrayItemsMatchExpression::debugString(StringBuilder& debu _debugStringAttachTagInfo(&debug); } -BSONObj InternalSchemaNumArrayItemsMatchExpression::getSerializedRightHandSide( - SerializationOptions opts) const { - BSONObjBuilder objBuilder; - if (opts.replacementForLiteralArgs) { - objBuilder.append(_name, opts.replacementForLiteralArgs.get()); - } else { - objBuilder.append(_name, _numItems); - } - return objBuilder.obj(); +void InternalSchemaNumArrayItemsMatchExpression::appendSerializedRightHandSide( + BSONObjBuilder* bob, SerializationOptions opts) const { + opts.appendLiteral(bob, _name, _numItems); } bool InternalSchemaNumArrayItemsMatchExpression::equivalent(const MatchExpression* other) const { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_num_array_items.h b/src/mongo/db/matcher/schema/expression_internal_schema_num_array_items.h index 24eb787df5582..5d1482270fc63 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_num_array_items.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_num_array_items.h @@ -30,9 +30,18 @@ #pragma once #include +#include +#include +#include +#include +#include "mongo/base/clonable_ptr.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_array.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -52,7 +61,7 @@ class InternalSchemaNumArrayItemsMatchExpression : public ArrayMatchingMatchExpr void debugString(StringBuilder& debug, int indentationLevel) const final; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; bool equivalent(const MatchExpression* other) const final; diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_num_properties.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_num_properties.cpp index 94fa820661634..7e124be6f0766 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_num_properties.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_num_properties.cpp @@ -27,9 +27,8 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/matcher/schema/expression_internal_schema_num_properties.h" +#include "mongo/bson/bsonobj.h" namespace mongo { @@ -44,11 +43,7 @@ void InternalSchemaNumPropertiesMatchExpression::debugString(StringBuilder& debu void InternalSchemaNumPropertiesMatchExpression::serialize(BSONObjBuilder* out, SerializationOptions opts) const { - if (opts.replacementForLiteralArgs) { - out->append(_name, opts.replacementForLiteralArgs.get()); - } else { - out->append(_name, _numProperties); - } + opts.appendLiteral(out, _name, _numProperties); } bool InternalSchemaNumPropertiesMatchExpression::equivalent(const MatchExpression* other) const { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_num_properties.h b/src/mongo/db/matcher/schema/expression_internal_schema_num_properties.h index 9b26f0c5b99a9..5b3a8fcc09f84 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_num_properties.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_num_properties.h @@ -30,9 +30,18 @@ #pragma once #include +#include +#include +#include +#include +#include +#include "mongo/base/clonable_ptr.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_object_match.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_object_match.cpp index 8e8bf6c6f7ad4..cb5e41b03b471 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_object_match.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_object_match.cpp @@ -27,8 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/matcher/path.h" #include "mongo/db/matcher/schema/expression_internal_schema_object_match.h" namespace mongo { @@ -62,13 +69,9 @@ void InternalSchemaObjectMatchExpression::debugString(StringBuilder& debug, _sub->debugString(debug, indentationLevel + 1); } -BSONObj InternalSchemaObjectMatchExpression::getSerializedRightHandSide( - SerializationOptions opts) const { - BSONObjBuilder objMatchBob; - BSONObjBuilder subBob(objMatchBob.subobjStart(kName)); - _sub->serialize(&subBob, opts); - subBob.doneFast(); - return objMatchBob.obj(); +void InternalSchemaObjectMatchExpression::appendSerializedRightHandSide( + BSONObjBuilder* bob, SerializationOptions opts) const { + bob->append(kName, _sub->serialize(opts)); } bool InternalSchemaObjectMatchExpression::equivalent(const MatchExpression* other) const { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_object_match.h b/src/mongo/db/matcher/schema/expression_internal_schema_object_match.h index 0d9d5f299424d..278c768e896d5 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_object_match.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_object_match.h @@ -30,8 +30,23 @@ #pragma once #include - +#include +#include +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_path.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -50,7 +65,7 @@ class InternalSchemaObjectMatchExpression final : public PathMatchExpression { void debugString(StringBuilder& debug, int indentationLevel = 0) const final; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; bool equivalent(const MatchExpression* other) const final; diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp index e3b67bd4828c6..fcd214a762c39 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_object_match_test.cpp @@ -26,16 +26,27 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/json.h" +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/matcher.h" #include "mongo/db/matcher/schema/expression_internal_schema_object_match.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.cpp index f9e40a3856dea..909793362fb11 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.cpp @@ -27,9 +27,8 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.h" +#include "mongo/bson/util/builder.h" namespace mongo { @@ -50,7 +49,7 @@ void InternalSchemaRootDocEqMatchExpression::debugString(StringBuilder& debug, void InternalSchemaRootDocEqMatchExpression::serialize(BSONObjBuilder* out, SerializationOptions opts) const { BSONObjBuilder subObj(out->subobjStart(kName)); - opts.redactObjToBuilder(&subObj, _rhsObj); + opts.addHmacedObjToBuilder(&subObj, _rhsObj); subObj.doneFast(); } diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.h b/src/mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.h index ea1fc6e8f8f8e..f1bbed4f468be 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.h @@ -30,9 +30,23 @@ #pragma once #include - +#include +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/matchable.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_root_doc_eq_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_root_doc_eq_test.cpp index 978ab0c247d87..ce18556417400 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_root_doc_eq_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_root_doc_eq_test.cpp @@ -27,14 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/json.h" #include "mongo/db/matcher/matcher.h" #include "mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_str_length.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_str_length.cpp index 114e24e3ca94c..52796ae4a74cb 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_str_length.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_str_length.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/matcher/schema/expression_internal_schema_str_length.h" +#include -#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/matcher/schema/expression_internal_schema_str_length.h" namespace mongo { @@ -50,15 +54,9 @@ void InternalSchemaStrLengthMatchExpression::debugString(StringBuilder& debug, _debugStringAttachTagInfo(&debug); } -BSONObj InternalSchemaStrLengthMatchExpression::getSerializedRightHandSide( - SerializationOptions opts) const { - BSONObjBuilder objBuilder; - if (opts.replacementForLiteralArgs) { - objBuilder.append(_name, opts.replacementForLiteralArgs.get()); - } else { - objBuilder.append(_name, _strLen); - } - return objBuilder.obj(); +void InternalSchemaStrLengthMatchExpression::appendSerializedRightHandSide( + BSONObjBuilder* bob, SerializationOptions opts) const { + opts.appendLiteral(bob, _name, _strLen); } bool InternalSchemaStrLengthMatchExpression::equivalent(const MatchExpression* other) const { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_str_length.h b/src/mongo/db/matcher/schema/expression_internal_schema_str_length.h index 6292f567cccab..99f0c85a04689 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_str_length.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_str_length.h @@ -30,9 +30,21 @@ #pragma once #include +#include +#include + +#include "mongo/base/clonable_ptr.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/str.h" namespace mongo { @@ -62,7 +74,7 @@ class InternalSchemaStrLengthMatchExpression : public LeafMatchExpression { void debugString(StringBuilder& debug, int indentationLevel) const final; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; bool equivalent(const MatchExpression* other) const final; diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_unique_items.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_unique_items.cpp index 4915d1ba2ab4f..cf6e972547e00 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_unique_items.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_unique_items.cpp @@ -27,8 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/schema/expression_internal_schema_unique_items.h" namespace mongo { @@ -53,11 +54,9 @@ bool InternalSchemaUniqueItemsMatchExpression::equivalent(const MatchExpression* return path() == other->path(); } -BSONObj InternalSchemaUniqueItemsMatchExpression::getSerializedRightHandSide( - SerializationOptions opts) const { - BSONObjBuilder bob; - bob.append(kName, true); - return bob.obj(); +void InternalSchemaUniqueItemsMatchExpression::appendSerializedRightHandSide( + BSONObjBuilder* bob, SerializationOptions opts) const { + bob->append(kName, true); } std::unique_ptr InternalSchemaUniqueItemsMatchExpression::clone() const { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_unique_items.h b/src/mongo/db/matcher/schema/expression_internal_schema_unique_items.h index a789cc044d8b2..608be447f78ec 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_unique_items.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_unique_items.h @@ -29,11 +29,27 @@ #pragma once +#include #include +#include +#include +#include +#include #include +#include +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/unordered_fields_bsonelement_comparator.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_array.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -86,7 +102,7 @@ class InternalSchemaUniqueItemsMatchExpression final : public ArrayMatchingMatch bool equivalent(const MatchExpression* other) const final; - BSONObj getSerializedRightHandSide(SerializationOptions opts) const final; + void appendSerializedRightHandSide(BSONObjBuilder* bob, SerializationOptions opts) const final; std::unique_ptr clone() const final; diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_unique_items_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_unique_items_test.cpp index 0051387fcd13b..c63a2645e4ca4 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_unique_items_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_unique_items_test.cpp @@ -27,13 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/matcher/schema/expression_internal_schema_unique_items.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_xor.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_xor.cpp index e4125ab81ed96..692688a20f276 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_xor.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_xor.cpp @@ -27,12 +27,7 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/matcher/schema/expression_internal_schema_xor.h" - -#include "mongo/bson/bsonmisc.h" -#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_xor.h b/src/mongo/db/matcher/schema/expression_internal_schema_xor.h index 903a8044aef83..6fd72785c285b 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_xor.h +++ b/src/mongo/db/matcher/schema/expression_internal_schema_xor.h @@ -29,7 +29,23 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/matcher/match_details.h" +#include "mongo/db/matcher/matchable.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/query/util/make_data_structure.h" namespace mongo { @@ -58,6 +74,7 @@ class InternalSchemaXorMatchExpression final : public ListOfMatchExpression { virtual std::unique_ptr clone() const { auto xorCopy = std::make_unique(_errorAnnotation); + xorCopy->reserve(numChildren()); for (size_t i = 0; i < numChildren(); ++i) { xorCopy->add(getChild(i)->clone()); } diff --git a/src/mongo/db/matcher/schema/expression_internal_schema_xor_test.cpp b/src/mongo/db/matcher/schema/expression_internal_schema_xor_test.cpp index cda74422ecc37..a5d840e15f08a 100644 --- a/src/mongo/db/matcher/schema/expression_internal_schema_xor_test.cpp +++ b/src/mongo/db/matcher/schema/expression_internal_schema_xor_test.cpp @@ -27,16 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/schema/expression_internal_schema_xor.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/expression_parser_schema_test.cpp b/src/mongo/db/matcher/schema/expression_parser_schema_test.cpp index 9ee366d5053ce..0e0369c12519f 100644 --- a/src/mongo/db/matcher/schema/expression_parser_schema_test.cpp +++ b/src/mongo/db/matcher/schema/expression_parser_schema_test.cpp @@ -27,20 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/json.h" +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/expression_type.h" -#include "mongo/db/matcher/schema/expression_internal_schema_max_items.h" -#include "mongo/db/matcher/schema/expression_internal_schema_max_length.h" -#include "mongo/db/matcher/schema/expression_internal_schema_min_items.h" -#include "mongo/db/matcher/schema/expression_internal_schema_min_length.h" -#include "mongo/db/matcher/schema/expression_internal_schema_object_match.h" -#include "mongo/db/matcher/schema/expression_internal_schema_unique_items.h" +#include "mongo/db/matcher/matcher_type_set.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/json_pointer.cpp b/src/mongo/db/matcher/schema/json_pointer.cpp index d23c0f7f231e5..ee61255f7a63b 100644 --- a/src/mongo/db/matcher/schema/json_pointer.cpp +++ b/src/mongo/db/matcher/schema/json_pointer.cpp @@ -27,11 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/db/matcher/schema/json_pointer.h" - -#include "mongo/bson/bsontypes.h" #include "mongo/util/assert_util.h" namespace { diff --git a/src/mongo/db/matcher/schema/json_pointer_test.cpp b/src/mongo/db/matcher/schema/json_pointer_test.cpp index f91d888871918..8b464300d1390 100644 --- a/src/mongo/db/matcher/schema/json_pointer_test.cpp +++ b/src/mongo/db/matcher/schema/json_pointer_test.cpp @@ -27,12 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/matcher/schema/json_pointer.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/json_schema_parser.cpp b/src/mongo/db/matcher/schema/json_schema_parser.cpp index b946bd547066a..7649bfa21d6a8 100644 --- a/src/mongo/db/matcher/schema/json_schema_parser.cpp +++ b/src/mongo/db/matcher/schema/json_schema_parser.cpp @@ -28,18 +28,40 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/matcher/schema/json_schema_parser.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement_comparator_interface.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" #include "mongo/bson/unordered_fields_bsonelement_comparator.h" -#include "mongo/db/feature_compatibility_version_documentation.h" #include "mongo/db/matcher/doc_validation_util.h" #include "mongo/db/matcher/expression_always_boolean.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/expression_type.h" +#include "mongo/db/matcher/expression_with_placeholder.h" #include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/matcher/schema/encrypt_schema_gen.h" #include "mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h" @@ -58,9 +80,16 @@ #include "mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.h" #include "mongo/db/matcher/schema/expression_internal_schema_unique_items.h" #include "mongo/db/matcher/schema/expression_internal_schema_xor.h" -#include "mongo/db/matcher/schema/json_pointer.h" +#include "mongo/db/matcher/schema/json_schema_parser.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/logv2/log_component_settings.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/pcre.h" +#include "mongo/util/str.h" #include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/matcher/schema/json_schema_parser.h b/src/mongo/db/matcher/schema/json_schema_parser.h index 62bc46a7343d8..ad5726b74e476 100644 --- a/src/mongo/db/matcher/schema/json_schema_parser.h +++ b/src/mongo/db/matcher/schema/json_schema_parser.h @@ -29,10 +29,18 @@ #pragma once +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/expression_tree.h" #include "mongo/db/matcher/expression_type.h" +#include "mongo/db/matcher/matcher_type_set.h" +#include "mongo/db/pipeline/expression_context.h" namespace mongo { diff --git a/src/mongo/db/matcher/schema/logical_keywords_test.cpp b/src/mongo/db/matcher/schema/logical_keywords_test.cpp index e4c8c73d4354d..ba2aee972c54c 100644 --- a/src/mongo/db/matcher/schema/logical_keywords_test.cpp +++ b/src/mongo/db/matcher/schema/logical_keywords_test.cpp @@ -27,14 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/schema/assert_serializes_to.h" #include "mongo/db/matcher/schema/json_schema_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/object_keywords_test.cpp b/src/mongo/db/matcher/schema/object_keywords_test.cpp index 468c0bb8cc90c..5fdfc1b25b134 100644 --- a/src/mongo/db/matcher/schema/object_keywords_test.cpp +++ b/src/mongo/db/matcher/schema/object_keywords_test.cpp @@ -27,15 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_always_boolean.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/matcher/schema/assert_serializes_to.h" #include "mongo/db/matcher/schema/json_schema_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" namespace mongo { namespace { diff --git a/src/mongo/db/matcher/schema/scalar_keywords_test.cpp b/src/mongo/db/matcher/schema/scalar_keywords_test.cpp index 04beec7b8536b..61d877cb2fbfe 100644 --- a/src/mongo/db/matcher/schema/scalar_keywords_test.cpp +++ b/src/mongo/db/matcher/schema/scalar_keywords_test.cpp @@ -27,15 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" -#include "mongo/db/matcher/expression_always_boolean.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/schema/assert_serializes_to.h" #include "mongo/db/matcher/schema/json_schema_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/metadata_consistency_types.idl b/src/mongo/db/metadata_consistency_types.idl index b571af9a0b80b..a7eb00e28bad3 100644 --- a/src/mongo/db/metadata_consistency_types.idl +++ b/src/mongo/db/metadata_consistency_types.idl @@ -40,6 +40,7 @@ enums: values: kCollectionUUIDMismatch: "CollectionUUIDMismatch" kCorruptedChunkShardKey: "CorruptedChunkShardKey" + kCorruptedZoneShardKey: "CorruptedZoneShardKey" kHiddenShardedCollection: "HiddenShardedCollection" kInconsistentIndex: "InconsistentIndex" kMisplacedCollection: "MisplacedCollection" @@ -49,6 +50,8 @@ enums: kRoutingTableMissingMinKey: "RoutingTableMissingMinKey" kRoutingTableRangeGap: "RoutingTableRangeGap" kRoutingTableRangeOverlap: "RoutingTableRangeOverlap" + kShardThinksCollectionIsUnsharded: "ShardThinksCollectionIsUnsharded" + kZonesRangeOverlap: "ZonesRangeOverlap" MetadataInconsistencyDescription: description: "Description of each metadata inconsistency." @@ -56,6 +59,7 @@ enums: values: kCollectionUUIDMismatch: "Found collection on non primary shard with mismatching UUID" kCorruptedChunkShardKey: "Found chunk with a shard key pattern violation" + kCorruptedZoneShardKey: "Found zone with a shard key pattern violation" kHiddenShardedCollection: "Found sharded collection but relative database does not exist" kInconsistentIndex: "Found an index of a sharded collection that is inconsistent between different shards" kMisplacedCollection: "Unsharded collection found on shard different from database primary shard" @@ -65,6 +69,8 @@ enums: kRoutingTableMissingMinKey: "Routing table has a gap because first chunk does not start from MinKey" kRoutingTableRangeGap: "Found a gap between two consecutive chunks" kRoutingTableRangeOverlap: "Found two overlapping consecutive chunks" + kShardThinksCollectionIsUnsharded: "Shard thinks collection is unsharded while instead is currently sharded" + kZonesRangeOverlap: "Found two overlapping zones" MetadataConsistencyCommandLevel: description: "Level mode of the metadata consistency command." @@ -79,7 +85,8 @@ structs: description: "Details about a misplaced collection inconsistency." strict: false fields: - ns: + namespace: + cpp_name: nss type: namespacestring description: "The namespace of the collection that is misplaced." shard: @@ -93,7 +100,8 @@ structs: description: "Details about a UUID mismatch inconsistency." strict: false fields: - ns: + namespace: + cpp_name: nss type: namespacestring description: "The namespace of the collection that has a UUID mismatch." shard: @@ -110,7 +118,8 @@ structs: description: "Details about a missing shard key index inconsistency." strict: false fields: - ns: + namespace: + cpp_name: nss type: namespacestring description: "The namespace of the collection that is missing a shard key index." shard: @@ -124,7 +133,8 @@ structs: description: "Details about a routing table range overlap inconsistency." strict: false fields: - ns: + namespace: + cpp_name: nss type: namespacestring description: "The namespace of the collection." collectionUUID: @@ -141,7 +151,8 @@ structs: description: "Details about a routing table range gap inconsistency." strict: false fields: - ns: + namespace: + cpp_name: nss type: namespacestring description: "The namespace of the collection." collectionUUID: @@ -158,7 +169,8 @@ structs: description: "Details about a routing table missing min key inconsistency." strict: false fields: - ns: + namespace: + cpp_name: nss type: namespacestring description: "The namespace of the collection." collectionUUID: @@ -175,7 +187,8 @@ structs: description: "Details about a routing table missing max key inconsistency." strict: false fields: - ns: + namespace: + cpp_name: nss type: namespacestring description: "The namespace of the collection." collectionUUID: @@ -192,7 +205,8 @@ structs: description: "Details about a corrupted chunk shard key inconsistency." strict: false fields: - ns: + namespace: + cpp_name: nss type: namespacestring description: "The namespace of the collection." collectionUUID: @@ -205,11 +219,30 @@ structs: type: object description: "The shard key pattern of the collection." + CorruptedZoneShardKeyDetails: + description: "Details about a corrupted zone shard key inconsistency." + strict: false + fields: + namespace: + cpp_name: nss + type: namespacestring + description: "The namespace of the collection." + collectionUUID: + type: uuid + description: "The UUID of the collection." + zone: + type: object + description: "The zone with a corrupted shard key." + shardKeyPattern: + type: object + description: "The shard key pattern of the collection." + HiddenShardedCollectionDetails: description: "Details about a hidden sharded collection inconsistency." strict: false fields: - ns: + namespace: + cpp_name: nss type: namespacestring description: "The namespace of the collection that is hidden." collection: @@ -220,7 +253,8 @@ structs: description: "Details about index inconsistency." strict: false fields: - ns: + namespace: + cpp_name: nss type: namespacestring description: "The namespace of the affected collection." info: @@ -231,12 +265,46 @@ structs: description: "Details about a missing routing table inconsistency." strict: false fields: - ns: + namespace: + cpp_name: nss type: namespacestring description: "The namespace of the collection that has no routing table." collectionUUID: type: uuid description: "The UUID of the collection." + + ShardThinksCollectionIsUnshardedDetails: + description: "Details about shard that thinks collection is unsharded." + strict: false + fields: + namespace: + cpp_name: nss + type: namespacestring + description: "The namespace of the collection." + collectionUUID: + type: uuid + description: "The UUID of the collection." + shard: + type: shard_id + description: "The shard with wrong collection timestamp." + + ZonesRangeOverlapDetails: + description: "Details about a zones range overlap inconsistency." + strict: false + fields: + namespace: + cpp_name: nss + type: namespacestring + description: "The namespace of the collection." + collectionUUID: + type: uuid + description: "The UUID of the collection." + zoneA: + type: object + description: "The first zone that overlaps with the second zone." + zoneB: + type: object + description: "The second zone that overlaps with the first zone." MetadataInconsistencyItem: description: "Object representing a single metadata inconsistency found in a specific shard" diff --git a/src/mongo/db/mirror_maestro.cpp b/src/mongo/db/mirror_maestro.cpp index aa167ea89b1fa..654a39d514a9e 100644 --- a/src/mongo/db/mirror_maestro.cpp +++ b/src/mongo/db/mirror_maestro.cpp @@ -28,34 +28,61 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/mirror_maestro.h" - -#include "mongo/rpc/get_status_from_command_result.h" +#include +#include +#include +#include +#include #include -#include +#include +#include +#include #include +#include -#include - +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/db/client.h" #include "mongo/db/client_out_of_line_executor.h" #include "mongo/db/commands.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/mirror_maestro.h" #include "mongo/db/mirror_maestro_gen.h" #include "mongo/db/mirroring_sampler.h" -#include "mongo/db/repl/hello_response.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/topology_version_observer.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" #include "mongo/executor/connection_pool.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" +#include "mongo/platform/random.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/synchronized_value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -399,6 +426,10 @@ void MirrorMaestroImpl::_mirror(const std::vector& hosts, // Limit the maxTimeMS bob.append("maxTimeMS", params.getMaxTimeMS()); + if (invocation->ns().tenantId()) { + invocation->ns().tenantId()->serializeToBSON("$tenant", &bob); + } + // Indicate that this is a mirrored read. bob.append("mirrored", true); @@ -422,7 +453,8 @@ void MirrorMaestroImpl::_mirror(const std::vector& hosts, for (auto i = 0; i < mirroringFactor; i++) { auto& host = hosts[(startIndex + i) % hosts.size()]; - auto mirrorResponseCallback = [host](auto& args) { + std::weak_ptr wExec(_executor); + auto mirrorResponseCallback = [host, wExec = std::move(wExec)](auto& args) { if (MONGO_likely(!mirrorMaestroExpectsResponse.shouldFail())) { // If we don't expect responses, then there is nothing to do here return; @@ -445,9 +477,23 @@ void MirrorMaestroImpl::_mirror(const std::vector& hosts, "error"_attr = args.response); return; } else if (!args.response.isOK()) { + if (args.response.status == ErrorCodes::CallbackCanceled) { + if (auto exec = wExec.lock(); exec && exec->isShuttingDown()) { + // The mirroring command was canceled as part of the executor being + // shutdown. We avoid crashing here since it's possible that node shutdown + // was triggered unexpectedly as part of our test infrastructure. + LOGV2_INFO( + 7558901, + "Mirroring command callback was canceled due to maestro shutdown", + "error"_attr = args.response, + "host"_attr = host.toString()); + return; + } + } LOGV2_FATAL(4717301, "Received mirroring response with a non-okay status", - "error"_attr = args.response); + "error"_attr = args.response, + "host"_attr = host.toString()); } }; @@ -548,6 +594,7 @@ void MirrorMaestroImpl::shutdown() noexcept { if (_executor) { _executor->shutdown(); + _executor->join(); } // Set _initGuard.liveness to kShutdown diff --git a/src/mongo/db/mirror_maestro.h b/src/mongo/db/mirror_maestro.h index df6c9c8ce440f..df422654ce02a 100644 --- a/src/mongo/db/mirror_maestro.h +++ b/src/mongo/db/mirror_maestro.h @@ -32,6 +32,7 @@ #include #include +#include "mongo/base/string_data.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/executor/task_executor.h" diff --git a/src/mongo/db/mirroring_sampler.cpp b/src/mongo/db/mirroring_sampler.cpp index 48aa4930676e7..179aa75c44628 100644 --- a/src/mongo/db/mirroring_sampler.cpp +++ b/src/mongo/db/mirroring_sampler.cpp @@ -28,9 +28,12 @@ */ #include -#include +#include + +#include #include "mongo/db/mirroring_sampler.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/mirroring_sampler.h b/src/mongo/db/mirroring_sampler.h index 4df8e5f03b1e9..7858b50da9b76 100644 --- a/src/mongo/db/mirroring_sampler.h +++ b/src/mongo/db/mirroring_sampler.h @@ -29,8 +29,10 @@ #pragma once +#include #include #include +#include #include #include "mongo/db/repl/hello_response.h" @@ -100,7 +102,7 @@ class MirroringSampler final { * Return all eligible hosts from a HelloResponse that we should mirror to. */ std::vector getRawMirroringTargets( - const std::shared_ptr& isMaster) noexcept; + const std::shared_ptr& helloResponse) noexcept; /** * Approximate use of the MirroringSampler for testing. @@ -108,7 +110,7 @@ class MirroringSampler final { * In practice, we call constituent functions in sequence to pessimistically spare work. */ static std::vector getMirroringTargets( - const std::shared_ptr& isMaster, + const std::shared_ptr& helloResponse, double ratio, RandomFunc rnd = defaultRandomFunc(), int rndMax = defaultRandomMax()) noexcept; diff --git a/src/mongo/db/mirroring_sampler_test.cpp b/src/mongo/db/mirroring_sampler_test.cpp index 21d6fc8ef9a78..39b28bc2416f4 100644 --- a/src/mongo/db/mirroring_sampler_test.cpp +++ b/src/mongo/db/mirroring_sampler_test.cpp @@ -27,11 +27,22 @@ * it in the license file. */ -#include "mongo/bson/util/bson_extract.h" +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/mirroring_sampler.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/mongod.cpp b/src/mongo/db/mongod.cpp index d16cb1954f311..40287e877374e 100644 --- a/src/mongo/db/mongod.cpp +++ b/src/mongo/db/mongod.cpp @@ -27,11 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/mongod_main.h" #include "mongo/util/quick_exit.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep #if defined(_WIN32) // In Windows, wmain() is an alternate entry point for main(), and receives the same parameters diff --git a/src/mongo/db/mongod_main.cpp b/src/mongo/db/mongod_main.cpp index be305bd586fab..db714790ac02e 100644 --- a/src/mongo/db/mongod_main.cpp +++ b/src/mongo/db/mongod_main.cpp @@ -29,33 +29,57 @@ #include "mongo/db/mongod_main.h" +#include #include -#include #include -#include +#include +#include +#include +#include +#include // IWYU pragma: keep +#include #include -#include #include +#include +#include +#include #include +#include +#include -#include "mongo/base/init.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/initializer.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connpool.h" +#include "mongo/client/dbclient_base.h" #include "mongo/client/global_conn_pool.h" #include "mongo/client/replica_set_monitor.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/audit.h" #include "mongo/db/auth/auth_op_observer.h" #include "mongo/db/auth/authorization_manager.h" -#include "mongo/db/auth/sasl_options.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_impl.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/database_holder_impl.h" #include "mongo/db/catalog/health_log.h" -#include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/catalog/index_key_validate.h" +#include "mongo/db/catalog/health_log_interface.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/catalog_shard_feature_flag_gen.h" #include "mongo/db/change_collection_expired_documents_remover.h" #include "mongo/db/change_stream_change_collection_manager.h" @@ -64,40 +88,43 @@ #include "mongo/db/client.h" #include "mongo/db/client_metadata_propagation_egress_hook.h" #include "mongo/db/clientcursor.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/commands.h" #include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/commands/feature_compatibility_version_gen.h" +#include "mongo/db/commands/fsync.h" #include "mongo/db/commands/shutdown.h" #include "mongo/db/commands/test_commands.h" #include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/flow_control_ticketholder.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker_impl.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/dbhelpers.h" -#include "mongo/db/dbmessage.h" -#include "mongo/db/exec/working_set_common.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/fle_crud.h" #include "mongo/db/free_mon/free_mon_mongod.h" #include "mongo/db/ftdc/ftdc_mongod.h" #include "mongo/db/ftdc/util.h" #include "mongo/db/global_settings.h" +#include "mongo/db/index_builds_coordinator.h" #include "mongo/db/index_builds_coordinator_mongod.h" -#include "mongo/db/index_names.h" #include "mongo/db/initialize_server_global_state.h" -#include "mongo/db/introspect.h" -#include "mongo/db/json.h" #include "mongo/db/keys_collection_client_direct.h" -#include "mongo/db/keys_collection_client_sharded.h" #include "mongo/db/keys_collection_manager.h" +#include "mongo/db/keys_collection_manager_gen.h" #include "mongo/db/log_process_details.h" #include "mongo/db/logical_session_cache_factory_mongod.h" #include "mongo/db/logical_time_validator.h" #include "mongo/db/mirror_maestro.h" #include "mongo/db/mongod_options.h" +#include "mongo/db/multitenancy_gen.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/fallback_op_observer.h" #include "mongo/db/op_observer/fcv_op_observer.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_impl.h" @@ -107,17 +134,20 @@ #include "mongo/db/periodic_runner_job_abort_expired_transactions.h" #include "mongo/db/pipeline/change_stream_expired_pre_image_remover.h" #include "mongo/db/pipeline/process_interface/replica_set_node_process_interface.h" -#include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/query_settings_manager.h" #include "mongo/db/query/stats/stats_cache_loader_impl.h" #include "mongo/db/query/stats/stats_catalog.h" +#include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mongod.h" +#include "mongo/db/repl/base_cloner.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" #include "mongo/db/repl/initial_syncer_factory.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/repl/primary_only_service_op_observer.h" +#include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/repl_settings.h" -#include "mongo/db/repl/replica_set_aware_service.h" #include "mongo/db/repl/replication_consistency_markers_impl.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_external_state_impl.h" @@ -127,25 +157,29 @@ #include "mongo/db/repl/replication_recovery.h" #include "mongo/db/repl/shard_merge_recipient_op_observer.h" #include "mongo/db/repl/shard_merge_recipient_service.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_donor_op_observer.h" #include "mongo/db/repl/tenant_migration_donor_service.h" #include "mongo/db/repl/tenant_migration_recipient_op_observer.h" #include "mongo/db/repl/tenant_migration_recipient_service.h" +#include "mongo/db/repl/tenant_migration_util.h" #include "mongo/db/repl/topology_coordinator.h" #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/repl_set_member_in_standalone_mode.h" +#include "mongo/db/request_execution_context.h" +#include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/collection_sharding_state_factory_shard.h" #include "mongo/db/s/collection_sharding_state_factory_standalone.h" #include "mongo/db/s/config/configsvr_coordinator_service.h" -#include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/config_server_op_observer.h" +#include "mongo/db/s/migration_chunk_cloner_source_op_observer.h" #include "mongo/db/s/migration_util.h" -#include "mongo/db/s/move_primary/move_primary_donor_service.h" -#include "mongo/db/s/move_primary/move_primary_recipient_service.h" -#include "mongo/db/s/op_observer_sharding_impl.h" #include "mongo/db/s/periodic_sharded_index_consistency_checker.h" -#include "mongo/db/s/query_analysis_op_observer.h" +#include "mongo/db/s/query_analysis_op_observer_configsvr.h" +#include "mongo/db/s/query_analysis_op_observer_rs.h" +#include "mongo/db/s/query_analysis_op_observer_shardsvr.h" #include "mongo/db/s/rename_collection_participant_service.h" #include "mongo/db/s/resharding/resharding_coordinator_service.h" #include "mongo/db/s/resharding/resharding_donor_service.h" @@ -154,23 +188,21 @@ #include "mongo/db/s/shard_server_op_observer.h" #include "mongo/db/s/sharding_ddl_coordinator_service.h" #include "mongo/db/s/sharding_initialization_mongod.h" -#include "mongo/db/s/sharding_state_recovery.h" +#include "mongo/db/s/sharding_state.h" #include "mongo/db/s/transaction_coordinator_service.h" #include "mongo/db/server_options.h" +#include "mongo/db/serverless/multitenancy_check.h" #include "mongo/db/serverless/shard_split_donor_op_observer.h" #include "mongo/db/serverless/shard_split_donor_service.h" #include "mongo/db/service_context.h" #include "mongo/db/service_entry_point_mongod.h" -#include "mongo/db/session/kill_sessions.h" #include "mongo/db/session/kill_sessions_local.h" #include "mongo/db/session/logical_session_cache.h" -#include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_killer.h" #include "mongo/db/set_change_stream_state_coordinator.h" #include "mongo/db/startup_recovery.h" #include "mongo/db/startup_warnings_mongod.h" -#include "mongo/db/stats/counters.h" #include "mongo/db/storage/backup_cursor_hooks.h" #include "mongo/db/storage/control/storage_control.h" #include "mongo/db/storage/disk_space_monitor.h" @@ -184,8 +216,9 @@ #include "mongo/db/storage/storage_engine_lock_file.h" #include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/system_index.h" -#include "mongo/db/transaction/internal_transactions_reap_service.h" +#include "mongo/db/timeseries/timeseries_op_observer.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/ttl.h" @@ -193,32 +226,51 @@ #include "mongo/db/wire_version.h" #include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_factory.h" -#include "mongo/executor/network_interface_thread_pool.h" +#include "mongo/executor/task_executor.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/idl/cluster_server_parameter_gen.h" #include "mongo/idl/cluster_server_parameter_initializer.h" #include "mongo/idl/cluster_server_parameter_op_observer.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" #include "mongo/platform/process_id.h" #include "mongo/platform/random.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" -#include "mongo/s/catalog/sharding_catalog_client_impl.h" +#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/analyze_shard_key_role.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/catalog_cache_loader.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/query_analysis_client.h" #include "mongo/s/query_analysis_sampler.h" #include "mongo/scripting/dbdirectclient_factory.h" #include "mongo/scripting/engine.h" -#include "mongo/stdx/future.h" -#include "mongo/stdx/thread.h" #include "mongo/transport/ingress_handshake_metrics.h" +#include "mongo/transport/service_entry_point.h" +#include "mongo/transport/transport_layer.h" #include "mongo/transport/transport_layer_manager.h" #include "mongo/util/assert_util.h" #include "mongo/util/background.h" +#include "mongo/util/clock_source.h" #include "mongo/util/cmdline_utils/censor_cmdline.h" #include "mongo/util/concurrency/idle_thread_block.h" #include "mongo/util/concurrency/thread_name.h" -#include "mongo/util/exception_filter_win32.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/debugger.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/errno_util.h" #include "mongo/util/exit.h" #include "mongo/util/exit_code.h" #include "mongo/util/fail_point.h" @@ -228,28 +280,20 @@ #include "mongo/util/net/private/ssl_expiration.h" #include "mongo/util/net/socket_utils.h" #include "mongo/util/net/ssl_manager.h" -#include "mongo/util/ntservice.h" +#include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/startup_options.h" +#include "mongo/util/options_parser/value.h" #include "mongo/util/periodic_runner.h" #include "mongo/util/periodic_runner_factory.h" #include "mongo/util/quick_exit.h" -#include "mongo/util/scopeguard.h" -#include "mongo/util/sequence_util.h" #include "mongo/util/signal_handlers.h" -#include "mongo/util/stacktrace.h" -#include "mongo/util/text.h" +#include "mongo/util/str.h" +#include "mongo/util/text.h" // IWYU pragma: keep +#include "mongo/util/thread_safety_context.h" #include "mongo/util/time_support.h" #include "mongo/util/version.h" #include "mongo/watchdog/watchdog_mongod.h" -#ifdef MONGO_CONFIG_SSL -#include "mongo/util/net/ssl_options.h" -#endif - -#if !defined(_WIN32) -#include -#endif - #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl @@ -381,19 +425,19 @@ void registerPrimaryOnlyServices(ServiceContext* serviceContext) { services.push_back(std::make_unique(serviceContext)); services.push_back(std::make_unique(serviceContext)); services.push_back(std::make_unique(serviceContext)); - services.push_back(std::make_unique(serviceContext)); - services.push_back(std::make_unique(serviceContext)); - services.push_back(std::make_unique(serviceContext)); - services.push_back(std::make_unique(serviceContext)); if (getGlobalReplSettings().isServerless()) { + services.push_back(std::make_unique(serviceContext)); + services.push_back( + std::make_unique(serviceContext)); services.push_back(std::make_unique(serviceContext)); } } if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { - services.push_back(std::make_unique(serviceContext)); - services.push_back(std::make_unique(serviceContext)); if (getGlobalReplSettings().isServerless()) { + services.push_back(std::make_unique(serviceContext)); + services.push_back( + std::make_unique(serviceContext)); services.push_back(std::make_unique(serviceContext)); services.push_back(std::make_unique(serviceContext)); } @@ -419,6 +463,12 @@ MONGO_FAIL_POINT_DEFINE(shutdownAtStartup); ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { Client::initThread("initandlisten"); + // TODO(SERVER-74659): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + serviceContext->setFastClockSource(FastClockSourceFactory::create(Milliseconds(10))); DBDirectClientFactory::get(serviceContext).registerImplementation([](OperationContext* opCtx) { @@ -702,7 +752,7 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { WaitForMajorityService::get(serviceContext).startup(serviceContext); if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { - // A catalog shard initializes sharding awareness after setting up its config server state. + // A config shard initializes sharding awareness after setting up its config server state. // This function may take the global lock. initializeShardingAwarenessIfNeededAndLoadGlobalSettings(startupOpCtx.get()); @@ -754,15 +804,6 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { startFreeMonitoring(serviceContext); - if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { - // Note: For replica sets, ShardingStateRecovery happens on transition to primary. - if (!replCoord->isReplEnabled()) { - if (ShardingState::get(startupOpCtx.get())->enabled()) { - uassertStatusOK(ShardingStateRecovery_DEPRECATED::recover(startupOpCtx.get())); - } - } - } - if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { initializeGlobalShardingStateForConfigServerIfNeeded(startupOpCtx.get()); @@ -770,10 +811,16 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { initializeShardingAwarenessIfNeededAndLoadGlobalSettings(startupOpCtx.get()); } - if (serverGlobalParams.clusterRole.has(ClusterRole::None) && - replSettings.usingReplSets()) { // standalone replica set - // The keys client must use local read concern if the storage engine can't support - // majority read concern. + if (replSettings.usingReplSets() && + (serverGlobalParams.clusterRole.has(ClusterRole::None) || + !Grid::get(startupOpCtx.get())->isShardingInitialized())) { + // If this is a mongod in a standalone replica set or a shardsvr replica set that has + // not initialized its sharding identity, start up the cluster time keys manager with a + // local/direct keys client. The keys client must use local read concern if the storage + // engine can't support majority read concern. If this is a mongod in a configsvr or + // shardsvr replica set that has initialized its sharding identity, the keys manager is + // by design initialized separately with a sharded keys client when the sharding state + // is initialized. auto keysClientMustUseLocalReads = !serviceContext->getStorageEngine()->supportsReadConcernMajority(); auto keysCollectionClient = @@ -786,7 +833,9 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { LogicalTimeValidator::set(startupOpCtx->getServiceContext(), std::make_unique(keyManager)); + } + if (replSettings.usingReplSets() && serverGlobalParams.clusterRole.has(ClusterRole::None)) { ReplicaSetNodeProcessInterface::getReplicaSetNodeExecutor(serviceContext)->startup(); } @@ -812,6 +861,17 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { "maintenance and no other clients are connected. The TTL collection monitor will " "not start because of this. For more info see " "http://dochub.mongodb.org/core/ttlcollections"); + + if (gAllowUnsafeUntimestampedWrites && + !repl::ReplSettings::shouldRecoverFromOplogAsStandalone()) { + LOGV2_WARNING_OPTIONS( + 7692300, + {logv2::LogTag::kStartupWarnings}, + "Replica set member is in standalone mode. Performing any writes will result " + "in them being untimestamped. If a write is to an existing document, the " + "document's history will be overwritten with the new value since the beginning " + "of time. This can break snapshot isolation within the storage engine."); + } } else { startTTLMonitor(serviceContext); } @@ -909,7 +969,7 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { LogicalSessionCache::set(serviceContext, makeLogicalSessionCacheD(kind)); - if (analyze_shard_key::supportsSamplingQueries(serviceContext, true /* ignoreFCV */) && + if (analyze_shard_key::supportsSamplingQueries(serviceContext) && serverGlobalParams.clusterRole.has(ClusterRole::None)) { analyze_shard_key::QueryAnalysisSampler::get(serviceContext).onStartup(); } @@ -918,6 +978,10 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { auto catalog = std::make_unique(serviceContext, std::move(cacheLoader)); stats::StatsCatalog::set(serviceContext, std::move(catalog)); + // Startup options are written to the audit log at the end of startup so that cluster server + // parameters are guaranteed to have been initialized from disk at this point. + audit::logStartupOptions(Client::getCurrent(), serverGlobalParams.parsedOpts); + // MessageServer::run will return when exit code closes its socket and we don't need the // operation context anymore startupOpCtx.reset(); @@ -946,10 +1010,6 @@ ExitCode _initAndListen(ServiceContext* serviceContext, int listenPort) { quickExit(ExitCode::fail); } - // Startup options are written to the audit log at the end of startup so that cluster server - // parameters are guaranteed to have been initialized from disk at this point. - audit::logStartupOptions(Client::getCurrent(), serverGlobalParams.parsedOpts); - serviceContext->notifyStartupComplete(); #ifndef _WIN32 @@ -1157,6 +1217,9 @@ auto makeReplicaSetNodeExecutor(ServiceContext* serviceContext) { tpOptions.maxThreads = ThreadPool::Options::kUnlimited; tpOptions.onCreateThread = [](const std::string& threadName) { Client::initThread(threadName.c_str()); + + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); }; auto hookList = std::make_unique(); hookList->addHook(std::make_unique(serviceContext)); @@ -1173,6 +1236,9 @@ auto makeReplicationExecutor(ServiceContext* serviceContext) { tpOptions.maxThreads = 50; tpOptions.onCreateThread = [](const std::string& threadName) { Client::initThread(threadName.c_str()); + + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); }; auto hookList = std::make_unique(); hookList->addHook(std::make_unique(serviceContext)); @@ -1219,14 +1285,10 @@ void setUpReplication(ServiceContext* serviceContext) { ReplicaSetNodeProcessInterface::setReplicaSetNodeExecutor( serviceContext, makeReplicaSetNodeExecutor(serviceContext)); - // The check below ignores the FCV because FCV is not initialized until after the replica - // set is initiated. - if (analyze_shard_key::isFeatureFlagEnabled(true /* ignoreFCV */)) { - analyze_shard_key::QueryAnalysisClient::get(serviceContext) - .setTaskExecutor( - serviceContext, - ReplicaSetNodeProcessInterface::getReplicaSetNodeExecutor(serviceContext)); - } + analyze_shard_key::QueryAnalysisClient::get(serviceContext) + .setTaskExecutor( + serviceContext, + ReplicaSetNodeProcessInterface::getReplicaSetNodeExecutor(serviceContext)); } repl::ReplicationCoordinator::set(serviceContext, std::move(replCoord)); @@ -1248,51 +1310,64 @@ void setUpObservers(ServiceContext* serviceContext) { if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { DurableHistoryRegistry::get(serviceContext) ->registerPin(std::make_unique()); - opObserverRegistry->addObserver(std::make_unique( + opObserverRegistry->addObserver(std::make_unique( std::make_unique(std::make_unique()))); + opObserverRegistry->addObserver(std::make_unique()); opObserverRegistry->addObserver(std::make_unique()); opObserverRegistry->addObserver(std::make_unique()); - opObserverRegistry->addObserver(std::make_unique()); - opObserverRegistry->addObserver( - std::make_unique()); opObserverRegistry->addObserver(std::make_unique()); if (getGlobalReplSettings().isServerless()) { + opObserverRegistry->addObserver( + std::make_unique()); + opObserverRegistry->addObserver( + std::make_unique()); opObserverRegistry->addObserver(std::make_unique()); opObserverRegistry->addObserver( std::make_unique()); } - } - - if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { - if (!gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafeAtStartup()) { + if (!gMultitenancySupport) { opObserverRegistry->addObserver( - std::make_unique(std::make_unique())); + std::make_unique()); } + } + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { opObserverRegistry->addObserver(std::make_unique()); opObserverRegistry->addObserver(std::make_unique()); + if (!gMultitenancySupport) { + opObserverRegistry->addObserver( + std::make_unique()); + } } if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { opObserverRegistry->addObserver( std::make_unique(std::make_unique())); - opObserverRegistry->addObserver(std::make_unique()); - opObserverRegistry->addObserver( - std::make_unique()); opObserverRegistry->addObserver(std::make_unique()); if (getGlobalReplSettings().isServerless()) { + opObserverRegistry->addObserver( + std::make_unique()); + opObserverRegistry->addObserver( + std::make_unique()); opObserverRegistry->addObserver(std::make_unique()); opObserverRegistry->addObserver( std::make_unique()); } + + auto replCoord = repl::ReplicationCoordinator::get(serviceContext); + if (!gMultitenancySupport && replCoord && replCoord->isReplEnabled()) { + opObserverRegistry->addObserver( + std::make_unique()); + } } + opObserverRegistry->addObserver(std::make_unique()); + opObserverRegistry->addObserver(std::make_unique()); opObserverRegistry->addObserver(std::make_unique()); opObserverRegistry->addObserver( std::make_unique(serviceContext)); opObserverRegistry->addObserver(std::make_unique()); opObserverRegistry->addObserver(std::make_unique()); - opObserverRegistry->addObserver(std::make_unique()); setupFreeMonitoringOpObserver(opObserverRegistry.get()); @@ -1310,18 +1385,18 @@ MONGO_INITIALIZER_GENERAL(setSSLManagerType, (), ("SSLManager")) } #endif -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif - // NOTE: This function may be called at any time after registerShutdownTask is called below. It // must not depend on the prior execution of mongo initializers or the existence of threads. void shutdownTask(const ShutdownTaskArgs& shutdownArgs) { // This client initiation pattern is only to be used here, with plans to eliminate this pattern // down the line. - if (!haveClient()) + if (!haveClient()) { Client::initThread(getThreadName()); + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + auto const client = Client::getCurrent(); auto const serviceContext = client->getServiceContext(); @@ -1338,6 +1413,14 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) { hangBeforeShutdown.pauseWhileSet(); } + // Before doing anything else, ensure fsync is inactive or make it release its GlobalRead lock. + { + stdx::unique_lock stateLock(fsyncStateMutex); + if (globalFsyncLockThread) { + globalFsyncLockThread->shutdown(stateLock); + } + } + // If we don't have shutdownArgs, we're shutting down from a signal, or other clean shutdown // path. // @@ -1405,7 +1488,7 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) { lsc->joinOnShutDown(); } - if (analyze_shard_key::supportsSamplingQueries(serviceContext, true /* ignoreFCV */)) { + if (analyze_shard_key::supportsSamplingQueries(serviceContext)) { LOGV2_OPTIONS(7350601, {LogComponent::kDefault}, "Shutting down the QueryAnalysisSampler"); analyze_shard_key::QueryAnalysisSampler::get(serviceContext).onShutdown(); } @@ -1524,7 +1607,10 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) { LOGV2_OPTIONS(4784918, {LogComponent::kNetwork}, "Shutting down the ReplicaSetMonitor"); ReplicaSetMonitor::shutdown(); - if (auto sr = Grid::get(serviceContext)->shardRegistry()) { + auto sr = Grid::get(serviceContext)->isInitialized() + ? Grid::get(serviceContext)->shardRegistry() + : nullptr; + if (sr) { LOGV2_OPTIONS(4784919, {LogComponent::kSharding}, "Shutting down the shard registry"); sr->shutdown(); } @@ -1541,7 +1627,10 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) { validator->shutDown(); } - if (auto pool = Grid::get(serviceContext)->getExecutorPool()) { + auto pool = Grid::get(serviceContext)->isInitialized() + ? Grid::get(serviceContext)->getExecutorPool() + : nullptr; + if (pool) { LOGV2_OPTIONS(6773200, {LogComponent::kSharding}, "Shutting down the ExecutorPool"); pool->shutdownAndJoin(); } @@ -1553,7 +1642,7 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) { migrationUtilExecutor->shutdown(); migrationUtilExecutor->join(); - if (ShardingState::get(serviceContext)->enabled()) { + if (Grid::get(serviceContext)->isShardingInitialized()) { // The CatalogCache must be shuted down before shutting down the CatalogCacheLoader as the // CatalogCache may try to schedule work on CatalogCacheLoader and fail. LOGV2_OPTIONS(6773201, {LogComponent::kSharding}, "Shutting down the CatalogCache"); @@ -1645,6 +1734,8 @@ void shutdownTask(const ShutdownTaskArgs& shutdownArgs) { int mongod_main(int argc, char* argv[]) { ThreadSafetyContext::getThreadSafetyContext()->forbidMultiThreading(); + waitForDebugger(); + registerShutdownTask(shutdownTask); setupSignalHandlers(); @@ -1707,9 +1798,8 @@ int mongod_main(int argc, char* argv[]) { setUpCatalog(service); setUpReplication(service); setUpObservers(service); + setUpMultitenancyCheck(service, gMultitenancySupport); service->setServiceEntryPoint(std::make_unique(service)); - SessionCatalog::get(service)->setOnEagerlyReapedSessionsFn( - InternalTransactionsReapService::onEagerlyReapedSessions); ErrorExtraInfo::invariantHaveAllParsers(); @@ -1734,6 +1824,8 @@ int mongod_main(int argc, char* argv[]) { ChangeStreamChangeCollectionManager::create(service); } + query_settings::QuerySettingsManager::create(service); + #if defined(_WIN32) if (ntservice::shouldStartService()) { ntservice::startService(); diff --git a/src/mongo/db/mongod_options.cpp b/src/mongo/db/mongod_options.cpp index 8d2755795974b..df0ac878dfca8 100644 --- a/src/mongo/db/mongod_options.cpp +++ b/src/mongo/db/mongod_options.cpp @@ -30,18 +30,35 @@ #include "mongo/db/mongod_options.h" -#include +#include +#include // IWYU pragma: keep +#include +#include +#include +#include +#include #include +#include +#include +#include #include +#include #include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/bson/oid.h" #include "mongo/bson/util/builder.h" -#include "mongo/config.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/cluster_auth_mode.h" #include "mongo/db/cluster_auth_mode_option_gen.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/global_settings.h" #include "mongo/db/keyfile_option_gen.h" #include "mongo/db/mongod_options_general_gen.h" @@ -49,20 +66,29 @@ #include "mongo/db/mongod_options_replication_gen.h" #include "mongo/db/mongod_options_sharding_gen.h" #include "mongo/db/mongod_options_storage_gen.h" +#include "mongo/db/repl/repl_set_config_params_gen.h" #include "mongo/db/repl/repl_settings.h" #include "mongo/db/server_options.h" #include "mongo/db/server_options_base.h" #include "mongo/db/server_options_nongeneral_gen.h" #include "mongo/db/server_options_server_helpers.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" -#include "mongo/logv2/log_domain_global.h" -#include "mongo/logv2/log_manager.h" -#include "mongo/util/net/ssl_options.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_proxy.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" #include "mongo/util/options_parser/startup_options.h" #include "mongo/util/str.h" #include "mongo/util/version.h" +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif + + #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl @@ -120,6 +146,47 @@ void appendSysInfo(BSONObjBuilder* obj) { #endif } +StatusWith populateReplSettings(const moe::Environment& params) { + repl::ReplSettings replSettings; + + if (params.count("replication.serverless")) { + if (params.count("replication.replSet") || params.count("replication.replSetName")) { + return Status(ErrorCodes::BadValue, + "serverless cannot be used with replSet or replSetName options"); + } + // Starting a node in "serverless" mode implies it uses a replSet. + replSettings.setServerlessMode(); + } else if (params.count("replication.replSet")) { + /* seed list of hosts for the repl set */ + replSettings.setReplSetString(params["replication.replSet"].as().c_str()); + } else if (params.count("replication.replSetName")) { + // "replSetName" is previously removed if "replSet" and "replSetName" are both found to be + // set by the user. Therefore, we only need to check for it if "replSet" in not found. + replSettings.setReplSetString(params["replication.replSetName"].as().c_str()); + } + + if (params.count("replication.oplogSizeMB")) { + long long x = params["replication.oplogSizeMB"].as(); + if (x <= 0) { + return Status(ErrorCodes::BadValue, + str::stream() << "bad --oplogSize, arg must be greater than 0," + "found: " + << x); + } + // note a small size such as x==1 is ok for an arbiter. + if (x > 1000 && sizeof(void*) == 4) { + StringBuilder sb; + sb << "--oplogSize of " << x + << "MB is too big for 32 bit version. Use 64 bit build instead."; + return Status(ErrorCodes::BadValue, sb.str()); + } + replSettings.setOplogSizeBytes(x * 1024 * 1024); + invariant(replSettings.getOplogSizeBytes() > 0); + } + + return replSettings; +} + } // namespace bool handlePreValidationMongodOptions(const moe::Environment& params, @@ -221,31 +288,6 @@ Status canonicalizeMongodOptions(moe::Environment* params) { } } - // "sharding.archiveMovedChunks" comes from the config file, so override it if - // "noMoveParanoia" or "moveParanoia" are set since those come from the command line. - if (params->count("noMoveParanoia")) { - Status ret = params->set("sharding.archiveMovedChunks", - moe::Value(!(*params)["noMoveParanoia"].as())); - if (!ret.isOK()) { - return ret; - } - ret = params->remove("noMoveParanoia"); - if (!ret.isOK()) { - return ret; - } - } - if (params->count("moveParanoia")) { - Status ret = params->set("sharding.archiveMovedChunks", - moe::Value((*params)["moveParanoia"].as())); - if (!ret.isOK()) { - return ret; - } - ret = params->remove("moveParanoia"); - if (!ret.isOK()) { - return ret; - } - } - // "sharding.clusterRole" comes from the config file, so override it if "configsvr" or // "shardsvr" are set since those come from the command line. if (params->count("configsvr")) { @@ -344,6 +386,23 @@ Status storeMongodOptions(const moe::Environment& params) { return ret; } + boost::optional> setParameterMap; + if (params.count("setParameter")) { + setParameterMap.emplace(params["setParameter"].as>()); + } + + auto checkConflictWithSetParameter = [&setParameterMap](const std::string& configName, + const std::string& parameterName) { + if (setParameterMap && setParameterMap->find(parameterName) != setParameterMap->end()) { + return Status(ErrorCodes::BadValue, + fmt::format("Conflicting server setting and setParameter, only one of " + "the two should be used: config={}, setParameter={}", + configName, + parameterName)); + } + return Status::OK(); + }; + // TODO: Integrate these options with their setParameter counterparts if (params.count("security.authSchemaVersion")) { return Status(ErrorCodes::BadValue, @@ -406,6 +465,11 @@ Status storeMongodOptions(const moe::Environment& params) { if (params.count("storage.syncPeriodSecs")) { storageGlobalParams.syncdelay = params["storage.syncPeriodSecs"].as(); + Status conflictStatus = + checkConflictWithSetParameter("storage.syncPeriodSecs", "syncdelay"); + if (!conflictStatus.isOK()) { + return conflictStatus; + } } if (params.count("storage.directoryPerDB")) { @@ -424,12 +488,10 @@ Status storeMongodOptions(const moe::Environment& params) { if (params.count("storage.journal.commitIntervalMs")) { auto journalCommitIntervalMs = params["storage.journal.commitIntervalMs"].as(); storageGlobalParams.journalCommitIntervalMs.store(journalCommitIntervalMs); - if (journalCommitIntervalMs < 1 || - journalCommitIntervalMs > StorageGlobalParams::kMaxJournalCommitIntervalMs) { - return Status(ErrorCodes::BadValue, - str::stream() - << "--journalCommitInterval out of allowed range (1-" - << StorageGlobalParams::kMaxJournalCommitIntervalMs << "ms)"); + Status conflictStatus = checkConflictWithSetParameter("storage.journal.commitIntervalMs", + "journalCommitInterval"); + if (!conflictStatus.isOK()) { + return conflictStatus; } } @@ -472,25 +534,22 @@ Status storeMongodOptions(const moe::Environment& params) { storageGlobalParams.magicRestore = 1; } - repl::ReplSettings replSettings; - if (params.count("replication.serverless")) { - if (params.count("replication.replSet") || params.count("replication.replSetName")) { + const auto replSettingsWithStatus = populateReplSettings(params); + if (!replSettingsWithStatus.isOK()) + return replSettingsWithStatus.getStatus(); + const repl::ReplSettings& replSettings(replSettingsWithStatus.getValue()); + + if (replSettings.usingReplSets()) { + if ((params.count("security.authorization") && + params["security.authorization"].as() == "enabled") && + !serverGlobalParams.startupClusterAuthMode.x509Only() && + serverGlobalParams.keyFile.empty()) { return Status(ErrorCodes::BadValue, - "serverless cannot be used with replSet or replSetName options"); + str::stream() << "security.keyFile is required when authorization is " + "enabled with replica sets"); } - // Starting a node in "serverless" mode implies it uses a replSet. - replSettings.setServerlessMode(); - } - if (params.count("replication.replSet")) { - /* seed list of hosts for the repl set */ - replSettings.setReplSetString(params["replication.replSet"].as().c_str()); - } else if (params.count("replication.replSetName")) { - // "replSetName" is previously removed if "replSet" and "replSetName" are both found to be - // set by the user. Therefore, we only need to check for it if "replSet" in not found. - replSettings.setReplSetString(params["replication.replSetName"].as().c_str()); } else { - // If neither "replication.replSet" nor "replication.replSetName" is set, then we are in - // standalone mode. + // If we are not using a replica set, then we are in standalone mode. // // A standalone node does not use the oplog collection, so special truncation handling for // the capped collection is unnecessary. @@ -506,17 +565,6 @@ Status storeMongodOptions(const moe::Environment& params) { storageGlobalParams.allowOplogTruncation = false; } - if (replSettings.usingReplSets() && - (params.count("security.authorization") && - params["security.authorization"].as() == "enabled") && - !serverGlobalParams.startupClusterAuthMode.x509Only() && - serverGlobalParams.keyFile.empty()) { - return Status( - ErrorCodes::BadValue, - str::stream() - << "security.keyFile is required when authorization is enabled with replica sets"); - } - serverGlobalParams.enableMajorityReadConcern = true; if (storageGlobalParams.engineSetByUser && (storageGlobalParams.engine == "devnull")) { @@ -537,25 +585,6 @@ Status storeMongodOptions(const moe::Environment& params) { } } - if (params.count("replication.oplogSizeMB")) { - long long x = params["replication.oplogSizeMB"].as(); - if (x <= 0) { - return Status(ErrorCodes::BadValue, - str::stream() << "bad --oplogSize, arg must be greater than 0," - "found: " - << x); - } - // note a small size such as x==1 is ok for an arbiter. - if (x > 1000 && sizeof(void*) == 4) { - StringBuilder sb; - sb << "--oplogSize of " << x - << "MB is too big for 32 bit version. Use 64 bit build instead."; - return Status(ErrorCodes::BadValue, sb.str()); - } - replSettings.setOplogSizeBytes(x * 1024 * 1024); - invariant(replSettings.getOplogSizeBytes() > 0); - } - if (params.count("storage.oplogMinRetentionHours")) { storageGlobalParams.oplogMinRetentionHours.store( params["storage.oplogMinRetentionHours"].as()); @@ -587,26 +616,20 @@ Status storeMongodOptions(const moe::Environment& params) { return Status(ErrorCodes::BadValue, sb.str()); } } - } else { - if (serverGlobalParams.port < 0 || serverGlobalParams.port > 65535) { - return Status(ErrorCodes::BadValue, "bad --port number"); - } } if (params.count("sharding.clusterRole")) { auto clusterRoleParam = params["sharding.clusterRole"].as(); - const bool replicationEnabled = params.count("replication.replSet") || - params.count("replication.replSetName") || params.count("replication.serverless"); // Force to set up the node as a replica set, unless we're a shard and we're using queryable // backup mode. if ((clusterRoleParam == "configsvr" || !params.count("storage.queryableBackupMode")) && - !replicationEnabled) { + !replSettings.usingReplSets()) { return Status(ErrorCodes::BadValue, str::stream() << "Cannot start a " << clusterRoleParam << " as a standalone server. Please use the option " "--replSet to start the node as a replica set."); } if (clusterRoleParam == "configsvr") { - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; // Config server requires majority read concern. uassert(5324702, str::stream() << "Cannot initialize config server with " @@ -621,10 +644,6 @@ Status storeMongodOptions(const moe::Environment& params) { } } - if (params.count("sharding.archiveMovedChunks")) { - serverGlobalParams.moveParanoia = params["sharding.archiveMovedChunks"].as(); - } - if (params.count("sharding._overrideShardIdentity")) { auto docAsString = params["sharding._overrideShardIdentity"].as(); diff --git a/src/mongo/db/mongod_options.h b/src/mongo/db/mongod_options.h index c1a918af908d5..b5194acd6266e 100644 --- a/src/mongo/db/mongod_options.h +++ b/src/mongo/db/mongod_options.h @@ -30,12 +30,18 @@ #pragma once #include +#include +#include +#include #include "mongo/base/status.h" +#include "mongo/db/auth/cluster_auth_mode.h" #include "mongo/db/server_options.h" #include "mongo/db/storage/storage_options.h" +#include "mongo/util/exit_code.h" #include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/option_section.h" +#include "mongo/util/options_parser/value.h" namespace mongo { diff --git a/src/mongo/db/mongod_options_init.cpp b/src/mongo/db/mongod_options_init.cpp index 8c749dff58abb..166eef4b19be2 100644 --- a/src/mongo/db/mongod_options_init.cpp +++ b/src/mongo/db/mongod_options_init.cpp @@ -27,11 +27,17 @@ * it in the license file. */ -#include "mongo/db/mongod_options.h" - #include +#include +#include +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/db/mongod_options.h" +#include "mongo/util/assert_util.h" #include "mongo/util/exit_code.h" +#include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/startup_option_init.h" #include "mongo/util/options_parser/startup_options.h" #include "mongo/util/quick_exit.h" diff --git a/src/mongo/db/mongod_options_sharding.idl b/src/mongo/db/mongod_options_sharding.idl index 88213fe5498fc..19874f10653d9 100644 --- a/src/mongo/db/mongod_options_sharding.idl +++ b/src/mongo/db/mongod_options_sharding.idl @@ -63,24 +63,3 @@ configs: source: yaml conflicts: configsvr requires: 'storage.queryableBackupMode' - noMoveParanoia: - description: 'Turn off paranoid saving of data for the moveChunk command; default' - arg_vartype: Switch - source: [ cli, ini ] - conflicts: moveParanoia - hidden: true - moveParanoia: - description: >- - Turn on paranoid saving of data during the moveChunk command - (used for internal system diagnostics) - arg_vartype: Switch - source: [ cli, ini ] - conflicts: noMoveParanoia - hidden: true - 'sharding.archiveMovedChunks': - description: >- - Config file option to turn on paranoid saving of data during the - moveChunk command (used for internal system diagnostics) - arg_vartype: Bool - source: yaml - hidden: true diff --git a/src/mongo/db/mongod_options_storage.idl b/src/mongo/db/mongod_options_storage.idl index b1c3e041ad47c..32c6ba289e2f1 100644 --- a/src/mongo/db/mongod_options_storage.idl +++ b/src/mongo/db/mongod_options_storage.idl @@ -83,20 +83,22 @@ configs: arg_vartype: Switch hidden: true + # This config has a setParameter alias in storage_paramaters.idl. To resolve the conflict, the + # default is encoded the StorageGlobalParams. 'storage.syncPeriodSecs': description: 'Seconds between disk syncs' short_name: syncdelay arg_vartype: Double - default: 60.0 validator: gte: 0.0 lte: { expr: 'StorageGlobalParams::kMaxSyncdelaySecs' } + # This config has a setParameter alias in storage_paramaters.idl. To resolve the conflict, the + # default is encoded the StorageGlobalParams. 'storage.journal.commitIntervalMs': description: 'how often to group/batch commit (ms)' short_name: 'journalCommitInterval' arg_vartype: Int - default: 100 validator: gte: 1 lte: { expr: 'StorageGlobalParams::kMaxJournalCommitIntervalMs' } diff --git a/src/mongo/db/multi_key_path_tracker.cpp b/src/mongo/db/multi_key_path_tracker.cpp index 6c1f52ddbe130..20dc49c8d48b1 100644 --- a/src/mongo/db/multi_key_path_tracker.cpp +++ b/src/mongo/db/multi_key_path_tracker.cpp @@ -27,13 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "boost/container/detail/flat_tree.hpp" +#include +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +// IWYU pragma: no_include "boost/move/algo/detail/set_difference.hpp" +// IWYU pragma: no_include "boost/move/detail/iterator_to_raw_pointer.hpp" +#include +#include +#include +#include +#include #include +#include -#include "mongo/db/multi_key_path_tracker.h" +#include +#include "mongo/db/multi_key_path_tracker.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/multi_key_path_tracker.h b/src/mongo/db/multi_key_path_tracker.h index eeecf43a43780..73e45a1f215d7 100644 --- a/src/mongo/db/multi_key_path_tracker.h +++ b/src/mongo/db/multi_key_path_tracker.h @@ -29,11 +29,13 @@ #pragma once -#include - #include +#include +#include +#include #include "mongo/db/index/multikey_paths.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/storage/key_string.h" #include "mongo/util/uuid.h" diff --git a/src/mongo/db/multi_key_path_tracker_test.cpp b/src/mongo/db/multi_key_path_tracker_test.cpp index 9203ff5ff4a8a..b3170a0212230 100644 --- a/src/mongo/db/multi_key_path_tracker_test.cpp +++ b/src/mongo/db/multi_key_path_tracker_test.cpp @@ -31,12 +31,12 @@ * Unittest for MultikeyPathTracker operations. */ -#include "mongo/platform/basic.h" - -#include +#include +#include "mongo/base/string_data.h" #include "mongo/db/multi_key_path_tracker.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/multitenancy.cpp b/src/mongo/db/multitenancy.cpp index f12a4f7b55d4a..2d9da274c4be4 100644 --- a/src/mongo/db/multitenancy.cpp +++ b/src/mongo/db/multitenancy.cpp @@ -29,6 +29,11 @@ #include "mongo/db/multitenancy.h" +#include +#include + +#include + #include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/tenant_id.h" diff --git a/src/mongo/db/multitenancy.h b/src/mongo/db/multitenancy.h index b028286659d1d..179ef08d7fb34 100644 --- a/src/mongo/db/multitenancy.h +++ b/src/mongo/db/multitenancy.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include "mongo/db/operation_context.h" #include "mongo/db/tenant_id.h" diff --git a/src/mongo/db/namespace_string.cpp b/src/mongo/db/namespace_string.cpp index 0a58d8c33213d..58424dd4fb95b 100644 --- a/src/mongo/db/namespace_string.cpp +++ b/src/mongo/db/namespace_string.cpp @@ -27,17 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/namespace_string.h" -#include +#include +#include +#include #include "mongo/base/parse_number.h" #include "mongo/base/status.h" -#include "mongo/db/multitenancy_gen.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/server_options.h" -#include "mongo/util/str.h" +// IWYU pragma: no_include "mongo/db/namespace_string_reserved.def.h" +#include "mongo/util/duration.h" namespace mongo { namespace { @@ -54,32 +57,6 @@ constexpr auto fle2EcocSuffix = ".ecoc"_sd; } // namespace - -NamespaceString NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode(StringData ns) { - if (!gMultitenancySupport) { - return NamespaceString(boost::none, ns); - } - - auto tenantDelim = ns.find('_'); - auto collDelim = ns.find('.'); - // If the first '_' is after the '.' that separates the db and coll names, the '_' is part - // of the coll name and is not a db prefix. - if (tenantDelim == std::string::npos || collDelim < tenantDelim) { - return NamespaceString(boost::none, ns); - } - - auto swOID = OID::parse(ns.substr(0, tenantDelim)); - if (swOID.getStatus() == ErrorCodes::BadValue) { - // If we fail to parse an OID, either the size of the substring is incorrect, or there is an - // invalid character. This indicates that the db has the "_" character, but it does not act - // as a delimeter for a tenantId prefix. - return NamespaceString(boost::none, ns); - } - - const TenantId tenantId(swOID.getValue()); - return NamespaceString(tenantId, ns.substr(tenantDelim + 1, ns.size() - 1 - tenantDelim)); -} - bool NamespaceString::isListCollectionsCursorNS() const { return coll() == listCollectionsCursorCol; } @@ -90,48 +67,47 @@ bool NamespaceString::isCollectionlessAggregateNS() const { bool NamespaceString::isLegalClientSystemNS( const ServerGlobalParams::FeatureCompatibility& currentFCV) const { - auto dbname = dbName().db(); - - if (dbname == DatabaseName::kAdmin.db()) { - if (coll() == "system.roles") + auto collectionName = coll(); + if (isAdminDB()) { + if (collectionName == "system.roles") return true; - if (coll() == kServerConfigurationNamespace.coll()) + if (collectionName == kServerConfigurationNamespace.coll()) return true; - if (coll() == kKeysCollectionNamespace.coll()) + if (collectionName == kKeysCollectionNamespace.coll()) return true; - if (coll() == "system.backup_users") + if (collectionName == "system.backup_users") return true; - } else if (dbname == DatabaseName::kConfig.db()) { - if (coll() == "system.sessions") + } else if (isConfigDB()) { + if (collectionName == "system.sessions") return true; - if (coll() == kIndexBuildEntryNamespace.coll()) + if (collectionName == kIndexBuildEntryNamespace.coll()) return true; - if (coll().find(".system.resharding.") != std::string::npos) + if (collectionName.find(".system.resharding.") != std::string::npos) return true; - if (coll() == kShardingDDLCoordinatorsNamespace.coll()) + if (collectionName == kShardingDDLCoordinatorsNamespace.coll()) return true; - if (coll() == kConfigsvrCoordinatorsNamespace.coll()) + if (collectionName == kConfigsvrCoordinatorsNamespace.coll()) return true; - } else if (dbname == DatabaseName::kLocal.db()) { - if (coll() == kSystemReplSetNamespace.coll()) + } else if (isLocalDB()) { + if (collectionName == kSystemReplSetNamespace.coll()) return true; - if (coll() == kLocalHealthLogNamespace.coll()) + if (collectionName == kLocalHealthLogNamespace.coll()) return true; - if (coll() == kConfigsvrRestoreNamespace.coll()) + if (collectionName == kConfigsvrRestoreNamespace.coll()) return true; } - if (coll() == "system.users") + if (collectionName == "system.users") return true; - if (coll() == "system.js") + if (collectionName == "system.js") return true; - if (coll() == kSystemDotViewsCollectionName) + if (collectionName == kSystemDotViewsCollectionName) return true; if (isTemporaryReshardingCollection()) { return true; } if (isTimeseriesBucketsCollection() && - validCollectionName(coll().substr(kTimeseriesBucketsCollectionPrefix.size()))) { + validCollectionName(collectionName.substr(kTimeseriesBucketsCollectionPrefix.size()))) { return true; } if (isChangeStreamPreImagesCollection()) { @@ -167,16 +143,17 @@ bool NamespaceString::isLegalClientSystemNS( * with creating tenant access blockers on secondaries. */ bool NamespaceString::mustBeAppliedInOwnOplogBatch() const { + auto ns = this->ns(); return isSystemDotViews() || isServerConfigurationCollection() || isPrivilegeCollection() || - _ns == kDonorReshardingOperationsNamespace.ns() || - _ns == kForceOplogBatchBoundaryNamespace.ns() || - _ns == kTenantMigrationDonorsNamespace.ns() || _ns == kShardMergeRecipientsNamespace.ns() || - _ns == kTenantMigrationRecipientsNamespace.ns() || _ns == kShardSplitDonorsNamespace.ns() || - _ns == kConfigsvrShardsNamespace.ns(); + ns == kDonorReshardingOperationsNamespace.ns() || + ns == kForceOplogBatchBoundaryNamespace.ns() || + ns == kTenantMigrationDonorsNamespace.ns() || ns == kShardMergeRecipientsNamespace.ns() || + ns == kTenantMigrationRecipientsNamespace.ns() || ns == kShardSplitDonorsNamespace.ns() || + ns == kConfigsvrShardsNamespace.ns(); } -NamespaceString NamespaceString::makeBulkWriteNSS() { - return NamespaceString(DatabaseName::kAdmin, bulkWriteCursorCol); +NamespaceString NamespaceString::makeBulkWriteNSS(const boost::optional& tenantId) { + return NamespaceString(DatabaseName::kAdmin.db(), bulkWriteCursorCol, tenantId); } NamespaceString NamespaceString::makeClusterParametersNSS( @@ -235,6 +212,11 @@ NamespaceString NamespaceString::makeMovePrimaryCollectionsToCloneNSS(const UUID "movePrimaryCollectionsToClone." + migrationId.toString()); } +NamespaceString NamespaceString::makeMovePrimaryTempCollectionsPrefix(const UUID& migrationId) { + return NamespaceString(DatabaseName::kConfig, + "movePrimaryRecipient." + migrationId.toString() + ".willBeDeleted."); +} + NamespaceString NamespaceString::makePreImageCollectionNSS( const boost::optional& tenantId) { return NamespaceString{tenantId, DatabaseName::kConfig.db(), kPreImagesCollectionName}; @@ -273,7 +255,7 @@ NamespaceString NamespaceString::makeDummyNamespace(const boost::optional NamespaceString::getDropPendingNamespaceOpTime() const { if (!isDropPendingNamespace()) { - return Status(ErrorCodes::BadValue, - str::stream() << "Not a drop-pending namespace: " << _ns); + return Status(ErrorCodes::BadValue, fmt::format("Not a drop-pending namespace: {}", ns())); } auto collectionName = coll(); @@ -313,20 +294,20 @@ StatusWith NamespaceString::getDropPendingNamespaceOpTime() const auto incrementSeparatorIndex = opTimeStr.find('i'); if (std::string::npos == incrementSeparatorIndex) { return Status(ErrorCodes::FailedToParse, - str::stream() << "Missing 'i' separator in drop-pending namespace: " << _ns); + fmt::format("Missing 'i' separator in drop-pending namespace: {}", ns())); } auto termSeparatorIndex = opTimeStr.find('t', incrementSeparatorIndex); if (std::string::npos == termSeparatorIndex) { return Status(ErrorCodes::FailedToParse, - str::stream() << "Missing 't' separator in drop-pending namespace: " << _ns); + fmt::format("Missing 't' separator in drop-pending namespace: {}", ns())); } long long seconds; auto status = NumberParser{}(opTimeStr.substr(0, incrementSeparatorIndex), &seconds); if (!status.isOK()) { return status.withContext( - str::stream() << "Invalid timestamp seconds in drop-pending namespace: " << _ns); + fmt::format("Invalid timestamp seconds in drop-pending namespace: {}", ns())); } unsigned int increment; @@ -335,14 +316,13 @@ StatusWith NamespaceString::getDropPendingNamespaceOpTime() const &increment); if (!status.isOK()) { return status.withContext( - str::stream() << "Invalid timestamp increment in drop-pending namespace: " << _ns); + fmt::format("Invalid timestamp increment in drop-pending namespace: {}", ns())); } long long term; status = mongo::NumberParser{}(opTimeStr.substr(termSeparatorIndex + 1), &term); if (!status.isOK()) { - return status.withContext(str::stream() - << "Invalid term in drop-pending namespace: " << _ns); + return status.withContext(fmt::format("Invalid term in drop-pending namespace: {}", ns())); } return repl::OpTime(Timestamp(Seconds(seconds), increment), term); @@ -350,18 +330,19 @@ StatusWith NamespaceString::getDropPendingNamespaceOpTime() const bool NamespaceString::isNamespaceAlwaysUnsharded() const { // Local and admin never have sharded collections - if (db() == DatabaseName::kLocal.db() || db() == DatabaseName::kAdmin.db()) + if (isLocalDB() || isAdminDB()) return true; // Config can only have the system.sessions as sharded - if (db() == DatabaseName::kConfig.db()) + if (isConfigDB()) return *this != NamespaceString::kLogicalSessionsNamespace; - if (isSystemDotProfile()) - return true; - - if (isSystemDotViews()) - return true; + if (isSystem()) { + // Only some system collections (.system.) can be sharded, + // all the others are always unsharded. + // This list does not contain 'config.system.sessions' because we already check it above + return !isTemporaryReshardingCollection() && !isTimeseriesBucketsCollection(); + } return false; } @@ -387,11 +368,11 @@ bool NamespaceString::isTimeseriesBucketsCollection() const { } bool NamespaceString::isChangeStreamPreImagesCollection() const { - return _dbName.db() == DatabaseName::kConfig.db() && coll() == kPreImagesCollectionName; + return isConfigDB() && coll() == kPreImagesCollectionName; } bool NamespaceString::isChangeCollection() const { - return _dbName.db() == DatabaseName::kConfig.db() && coll() == kChangeCollectionName; + return isConfigDB() && coll() == kChangeCollectionName; } bool NamespaceString::isConfigImagesCollection() const { @@ -416,6 +397,11 @@ bool NamespaceString::isSystemStatsCollection() const { return coll().startsWith(kStatisticsCollectionPrefix); } +bool NamespaceString::isOutTmpBucketsCollection() const { + return isTimeseriesBucketsCollection() && + getTimeseriesViewNamespace().coll().startsWith(kOutTmpCollectionPrefix); +} + NamespaceString NamespaceString::makeTimeseriesBucketsNamespace() const { return {dbName(), kTimeseriesBucketsCollectionPrefix.toString() + coll()}; } @@ -426,18 +412,21 @@ NamespaceString NamespaceString::getTimeseriesViewNamespace() const { } bool NamespaceString::isImplicitlyReplicated() const { - if (isChangeStreamPreImagesCollection() || isConfigImagesCollection() || isChangeCollection()) { - // Implicitly replicated namespaces are replicated, although they only replicate a subset of - // writes. - invariant(isReplicated()); - return true; + if (db() == DatabaseName::kConfig.db()) { + if (isChangeStreamPreImagesCollection() || isConfigImagesCollection() || + isChangeCollection()) { + // Implicitly replicated namespaces are replicated, although they only replicate a + // subset of writes. + invariant(isReplicated()); + return true; + } } return false; } bool NamespaceString::isReplicated() const { - if (isLocal()) { + if (isLocalDB()) { return false; } @@ -455,37 +444,27 @@ bool NamespaceString::isReplicated() const { return true; } -Status NamespaceStringOrUUID::isNssValid() const { - if (!_nss || _nss->isValid()) { - return Status::OK(); +std::string NamespaceStringOrUUID::toStringForErrorMsg() const { + if (isNamespaceString()) { + return nss().toStringForErrorMsg(); } - // _nss is set and not valid. - return {ErrorCodes::InvalidNamespace, - str::stream() << "Namespace " << _nss << " is not a valid collection name"}; + return uuid().toString(); } -std::string NamespaceStringOrUUID::toString() const { - if (_nss) - return _nss->toString(); - else - return _uuid->toString(); +std::string toStringForLogging(const NamespaceStringOrUUID& nssOrUUID) { + if (nssOrUUID.isNamespaceString()) { + return toStringForLogging(nssOrUUID.nss()); + } + + return nssOrUUID.uuid().toString(); } void NamespaceStringOrUUID::serialize(BSONObjBuilder* builder, StringData fieldName) const { - invariant(_uuid || _nss); - if (_preferNssForSerialization) { - if (_nss) { - builder->append(fieldName, _nss->coll()); - } else { - _uuid->appendToBuilder(builder, fieldName); - } + if (const NamespaceString* nss = get_if(&_nssOrUUID)) { + builder->append(fieldName, nss->coll()); } else { - if (_uuid) { - _uuid->appendToBuilder(builder, fieldName); - } else { - builder->append(fieldName, _nss->coll()); - } + get<1>(get(_nssOrUUID)).appendToBuilder(builder, fieldName); } } diff --git a/src/mongo/db/namespace_string.h b/src/mongo/db/namespace_string.h index 4a41986b56c68..fb6e916f664f5 100644 --- a/src/mongo/db/namespace_string.h +++ b/src/mongo/db/namespace_string.h @@ -30,25 +30,40 @@ #pragma once #include +#include +#include #include +#include +#include +#include +#include +#include #include #include #include +#include +#include +#include +#include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/bson/util/builder.h" #include "mongo/db/database_name.h" #include "mongo/db/repl/optime.h" #include "mongo/db/server_options.h" #include "mongo/db/tenant_id.h" #include "mongo/logv2/log_attr.h" +#include "mongo/stdx/variant.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #include "mongo/util/uuid.h" namespace mongo { class NamespaceStringUtil; -class IDLParserContext; class NamespaceString { public: @@ -108,15 +123,8 @@ class NamespaceString { decltype(auto) dbName() const { return _get().dbName(); } - decltype(auto) toString() const { - return _get().toString(); - } - - friend std::ostream& operator<<(std::ostream& stream, const ConstantProxy& nss) { - return stream << nss.toString(); - } - friend StringBuilder& operator<<(StringBuilder& builder, const ConstantProxy& nss) { - return builder << nss.toString(); + decltype(auto) toStringForErrorMsg() const { + return _get().toStringForErrorMsg(); } private: @@ -127,8 +135,6 @@ class NamespaceString { const SharedState* _sharedState; }; - constexpr static size_t MaxDatabaseNameLen = - 128; // max str len for the db name, including null char constexpr static size_t MaxNSCollectionLenFCV42 = 120U; constexpr static size_t MaxNsCollectionLen = 255; @@ -139,9 +145,6 @@ class NamespaceString { // Reserved system namespaces - // The $external database used by X.509, LDAP, etc... - static constexpr StringData kExternalDb = "$external"_sd; - // Name for the system views collection static constexpr StringData kSystemDotViewsCollectionName = "system.views"_sd; @@ -166,7 +169,6 @@ class NamespaceString { // Prefix for orphan collections static constexpr StringData kOrphanCollectionPrefix = "orphan."_sd; - static constexpr StringData kOrphanCollectionDb = "local"_sd; // Prefix for collections that store the local resharding oplog buffer. static constexpr StringData kReshardingLocalOplogBufferPrefix = @@ -186,6 +188,9 @@ class NamespaceString { static constexpr StringData kGlobalIndexCollectionPrefix = "globalIndex."_sd; + // Prefix for the temporary collection used by the $out stage. + static constexpr StringData kOutTmpCollectionPrefix = "tmp.agg_out."_sd; + // Maintainers Note: The large set of `NamespaceString`-typed static data // members of the `NamespaceString` class representing system-reserved // collections is now generated from "namespace_string_reserved.def.h". @@ -196,7 +201,8 @@ class NamespaceString { // type is incomplete, they can't be _declared_ fully constexpr (a constexpr // limitation). #define NSS_CONSTANT(id, db, coll) static const ConstantProxy id; -#include "namespace_string_reserved.def.h" +#include "namespace_string_reserved.def.h" // IWYU pragma: keep + #undef NSS_CONSTANT /** @@ -207,7 +213,7 @@ class NamespaceString { /** * Constructs a NamespaceString for the given database. */ - explicit NamespaceString(DatabaseName dbName) : _dbName(std::move(dbName)), _ns(_dbName.db()) {} + explicit NamespaceString(DatabaseName dbName) : _data(std::move(dbName._data)) {} // TODO SERVER-65920 Remove this constructor once all constructor call sites have been updated // to pass tenantId explicitly @@ -219,15 +225,7 @@ class NamespaceString { NamespaceString(StringData db, StringData collectionName, boost::optional tenantId = boost::none) - : NamespaceString(DatabaseName(std::move(tenantId), db), collectionName) {} - - /** - * Constructs a NamespaceString from the string 'ns'. Should only be used when reading a - * namespace from disk. 'ns' is expected to contain a tenantId when running in Serverless mode. - */ - // TODO SERVER-70013 Move this function into NamespaceStringUtil, and delegate overlapping - // functionality to DatabaseNameUtil::parseDbNameFromStringExpectTenantIdInMultitenancyMode. - static NamespaceString parseFromStringExpectTenantIdInMultitenancyMode(StringData ns); + : NamespaceString(std::move(tenantId), db, collectionName) {} /** * Constructs a NamespaceString in the global config db, "config.". @@ -338,7 +336,7 @@ class NamespaceString { * Constructs a NamespaceString representing a BulkWrite namespace. The format for this * namespace is admin.$cmd.bulkWrite". */ - static NamespaceString makeBulkWriteNSS(); + static NamespaceString makeBulkWriteNSS(const boost::optional& tenantId); /** * Constructs the oplog buffer NamespaceString for the given migration id for movePrimary op. @@ -350,6 +348,11 @@ class NamespaceString { */ static NamespaceString makeMovePrimaryCollectionsToCloneNSS(const UUID& migrationId); + /** + * Constructs the NamespaceString prefix for temporary movePrimary recipient collections. + */ + static NamespaceString makeMovePrimaryTempCollectionsPrefix(const UUID& migrationId); + /** * Constructs the oplog buffer NamespaceString for the given UUID and donor shardId. */ @@ -397,48 +400,91 @@ class NamespaceString { Allow, // Deprecated }; - const boost::optional& tenantId() const { - return _dbName.tenantId(); + boost::optional tenantId() const { + if (!_hasTenantId()) { + return boost::none; + } + + return TenantId{OID::from(&_data[kDataOffset])}; } StringData db() const { // TODO SERVER-65456 Remove this function. - return _dbName.db(); + auto offset = _hasTenantId() ? kDataOffset + OID::kOIDSize : kDataOffset; + return StringData{_data.data() + offset, _dbNameOffsetEnd()}; } - const DatabaseName& dbName() const { - return _dbName; + /** + * This function must only be used in unit tests. + */ + StringData db_forTest() const { + auto offset = _hasTenantId() ? kDataOffset + OID::kOIDSize : kDataOffset; + return StringData{_data.data() + offset, _dbNameOffsetEnd()}; + } + + DatabaseName dbName() const { + auto offset = _hasTenantId() ? kDataOffset + OID::kOIDSize : kDataOffset; + return DatabaseName{_data.substr(0, offset + _dbNameOffsetEnd()), + DatabaseName::TrustedInitTag{}}; } StringData coll() const { - return _dotIndex == std::string::npos - ? StringData() - : StringData(_ns.c_str() + _dotIndex + 1, _ns.size() - 1 - _dotIndex); + const auto offset = + kDataOffset + _dbNameOffsetEnd() + 1 + (_hasTenantId() ? OID::kOIDSize : 0); + if (offset > _data.size()) { + return {}; + } + + return StringData{_data.data() + offset, _data.size() - offset}; } - const std::string& ns() const { - return _ns; + StringData ns() const { + auto offset = _hasTenantId() ? kDataOffset + OID::kOIDSize : kDataOffset; + return StringData{_data.data() + offset, _data.size() - offset}; } - const std::string& toString() const { + StringData ns_forTest() const { return ns(); } - std::string toStringWithTenantId() const { - if (auto tenantId = _dbName.tenantId()) - return str::stream() << *tenantId << '_' << ns(); + /** + * Gets a namespace string without tenant id. + * + * MUST only be used for tests. + */ + std::string toString_forTest() const { + return toString(); + } - return ns(); + /** + * Returns a namespace string without tenant id. Only to be used when a tenant id cannot be + * tolerated in the serialized output, and should otherwise be avoided whenever possible. + */ + std::string serializeWithoutTenantPrefix_UNSAFE() const { + return toString(); + } + + /** + * Gets a namespace string with tenant id. + * + * MUST only be used for tests. + */ + std::string toStringWithTenantId_forTest() const { + return toStringWithTenantId(); + } + + /** + * This function should only be used when creating a resouce id for nss. + */ + std::string toStringForResourceId() const { + return toStringWithTenantId(); } /** * This function should only be used when logging a NamespaceString in an error message. */ std::string toStringForErrorMsg() const { - if (auto tenantId = _dbName.tenantId()) - return str::stream() << *tenantId << '_' << ns(); - - return ns(); + return toStringWithTenantId(); } /** @@ -446,18 +492,16 @@ class NamespaceString { * It is called anytime a NamespaceString is logged by logAttrs or otherwise. */ friend std::string toStringForLogging(const NamespaceString& nss) { - if (auto tenantId = nss.tenantId()) - return str::stream() << *tenantId << '_' << nss.ns(); - - return nss.ns(); + return nss.toStringWithTenantId(); } size_t size() const { - return _ns.size(); + auto offset = _hasTenantId() ? kDataOffset + OID::kOIDSize : kDataOffset; + return _data.size() - offset; } bool isEmpty() const { - return _ns.empty(); + return _data.size() == kDataOffset; } // @@ -465,13 +509,13 @@ class NamespaceString { // bool isHealthlog() const { - return isLocal() && coll() == "system.healthlog"; + return isLocalDB() && coll() == "system.healthlog"; } bool isSystem() const { return coll().startsWith("system."); } bool isNormalCollection() const { - return !isSystem() && !(isLocal() && coll().startsWith("replset.")); + return !isSystem() && !(isLocalDB() && coll().startsWith("replset.")); } bool isGlobalIndex() const { return coll().startsWith(kGlobalIndexCollectionPrefix); @@ -479,7 +523,7 @@ class NamespaceString { bool isAdminDB() const { return db() == DatabaseName::kAdmin.db(); } - bool isLocal() const { + bool isLocalDB() const { return db() == DatabaseName::kLocal.db(); } bool isSystemDotProfile() const { @@ -496,7 +540,7 @@ class NamespaceString { return coll() == kSystemDotJavascriptCollectionName; } bool isServerConfigurationCollection() const { - return (db() == DatabaseName::kAdmin.db()) && (coll() == "system.version"); + return isAdminDB() && (coll() == "system.version"); } bool isPrivilegeCollection() const { if (!isAdminDB()) { @@ -511,20 +555,14 @@ class NamespaceString { return coll() == "$cmd"; } bool isOplog() const { - return oplog(_ns); + return oplog(ns()); } bool isOnInternalDb() const { - if (db() == DatabaseName::kAdmin.db()) - return true; - if (db() == DatabaseName::kLocal.db()) - return true; - if (db() == DatabaseName::kConfig.db()) - return true; - return false; + return isAdminDB() || isLocalDB() || isConfigDB(); } bool isOrphanCollection() const { - return db() == kOrphanCollectionDb && coll().startsWith(kOrphanCollectionPrefix); + return isLocalDB() && coll().startsWith(kOrphanCollectionPrefix); } /** @@ -593,6 +631,12 @@ class NamespaceString { */ bool isSystemStatsCollection() const; + /** + * Returns true if the collection starts with "system.buckets.tmp.agg_out". Used for $out to + * time-series collections. + */ + bool isOutTmpBucketsCollection() const; + /** * Returns the time-series buckets namespace for this view. */ @@ -675,7 +719,7 @@ class NamespaceString { * valid. */ bool isValid(DollarInDbNameBehavior behavior = DollarInDbNameBehavior::Allow) const { - return validDBName(db(), behavior) && !coll().empty(); + return validDBName(dbName(), behavior) && !coll().empty(); } /** @@ -717,7 +761,7 @@ class NamespaceString { static bool validDBName(const DatabaseName& dbName, DollarInDbNameBehavior behavior = DollarInDbNameBehavior::Disallow) { - return validDBName(dbName.db(), behavior); + return validDBName(dbName.toStringWithTenantId(), behavior); } /** @@ -732,7 +776,7 @@ class NamespaceString { * @param ns - a full namespace (a.b) * @return if db.coll is an allowed collection name */ - static bool validCollectionComponent(StringData ns); + static bool validCollectionComponent(const NamespaceString& ns); /** * Takes a collection name and returns true if it is a valid collection name. @@ -747,44 +791,46 @@ class NamespaceString { */ static bool validCollectionName(StringData coll); - friend std::ostream& operator<<(std::ostream& stream, const NamespaceString& nss) { - return stream << nss.toString(); - } + int compare(const NamespaceString& other) const { + if (_hasTenantId() && !other._hasTenantId()) { + return 1; + } + + if (other._hasTenantId() && !_hasTenantId()) { + return -1; + } - friend StringBuilder& operator<<(StringBuilder& builder, const NamespaceString& nss) { - return builder << nss.toString(); + return StringData{_data.data() + kDataOffset, _data.size() - kDataOffset}.compare( + StringData{other._data.data() + kDataOffset, other._data.size() - kDataOffset}); } - friend bool operator==(const NamespaceString& a, const NamespaceString& b) { - return a._lens() == b._lens(); + friend bool operator==(const NamespaceString& lhs, const NamespaceString& rhs) { + return lhs._data == rhs._data; } - friend bool operator!=(const NamespaceString& a, const NamespaceString& b) { - return a._lens() != b._lens(); + friend bool operator!=(const NamespaceString& lhs, const NamespaceString& rhs) { + return lhs._data != rhs._data; } - friend bool operator<(const NamespaceString& a, const NamespaceString& b) { - return a._lens() < b._lens(); + friend bool operator<(const NamespaceString& lhs, const NamespaceString& rhs) { + return lhs.compare(rhs) < 0; } - friend bool operator>(const NamespaceString& a, const NamespaceString& b) { - return a._lens() > b._lens(); + friend bool operator<=(const NamespaceString& lhs, const NamespaceString& rhs) { + return lhs.compare(rhs) <= 0; } - friend bool operator<=(const NamespaceString& a, const NamespaceString& b) { - return a._lens() <= b._lens(); + friend bool operator>(const NamespaceString& lhs, const NamespaceString& rhs) { + return lhs.compare(rhs) > 0; } - friend bool operator>=(const NamespaceString& a, const NamespaceString& b) { - return a._lens() >= b._lens(); + friend bool operator>=(const NamespaceString& lhs, const NamespaceString& rhs) { + return lhs.compare(rhs) >= 0; } template friend H AbslHashValue(H h, const NamespaceString& nss) { - if (nss.tenantId()) { - return H::combine(std::move(h), nss._dbName.tenantId().get(), nss._ns); - } - return H::combine(std::move(h), nss._ns); + return H::combine(std::move(h), nss._data); } friend auto logAttrs(const NamespaceString& nss) { @@ -793,9 +839,6 @@ class NamespaceString { private: friend NamespaceStringUtil; - // TODO SERVER-74897 IDLParserContext should no longer be a friend once IDL generated commands - // call into NamespaceStringUtil directly to construct NamespaceStrings. - friend IDLParserContext; /** * In order to construct NamespaceString objects, use NamespaceStringUtil. The functions @@ -806,39 +849,29 @@ class NamespaceString { * Constructs a NamespaceString from the fully qualified namespace named in "ns" and the * tenantId. "ns" is NOT expected to contain the tenantId. */ - explicit NamespaceString(boost::optional tenantId, StringData ns) { - _dotIndex = ns.find("."); - - uassert(ErrorCodes::InvalidNamespace, - "namespaces cannot have embedded null characters", - ns.find('\0') == std::string::npos); - - StringData db = ns.substr(0, _dotIndex); - _dbName = DatabaseName(std::move(tenantId), db); - _ns = ns.toString(); - } + explicit NamespaceString(boost::optional tenantId, StringData ns) + : _data(makeData(tenantId, ns)) {} /** * Constructs a NamespaceString for the given database and collection names. * "dbName" must not contain a ".", and "collectionName" must not start with one. */ - NamespaceString(DatabaseName dbName, StringData collectionName) - : _dbName(std::move(dbName)), _ns(str::stream() << _dbName.db() << '.' << collectionName) { - const auto& db = _dbName.db(); - - uassert(ErrorCodes::InvalidNamespace, - "'.' is an invalid character in the database name: " + db, - db.find('.') == std::string::npos); + NamespaceString(DatabaseName dbName, StringData collectionName) { uassert(ErrorCodes::InvalidNamespace, "Collection names cannot start with '.': " + collectionName, collectionName.empty() || collectionName[0] != '.'); - - _dotIndex = db.size(); - dassert(_ns[_dotIndex] == '.'); - uassert(ErrorCodes::InvalidNamespace, "namespaces cannot have embedded null characters", - _ns.find('\0') == std::string::npos); + collectionName.find('\0') == std::string::npos); + + _data.resize(dbName._data.size() + 1 + collectionName.size()); + std::memcpy(_data.data(), dbName._data.data(), dbName._data.size()); + *reinterpret_cast(_data.data() + dbName._data.size()) = '.'; + if (!collectionName.empty()) { + std::memcpy(_data.data() + dbName._data.size() + 1, + collectionName.rawData(), + collectionName.size()); + } } /** @@ -847,16 +880,100 @@ class NamespaceString { * NOT expected to contain a tenantId. */ NamespaceString(boost::optional tenantId, StringData db, StringData collectionName) - : NamespaceString(DatabaseName(std::move(tenantId), db), collectionName) {} + : _data(makeData(tenantId, db, collectionName)) {} + + std::string toString() const { + return ns().toString(); + } + std::string toStringWithTenantId() const { + if (_hasTenantId()) { + return str::stream() << TenantId{OID::from(&_data[kDataOffset])} << "_" << ns(); + } + + return ns().toString(); + } + + static constexpr size_t kDataOffset = sizeof(uint8_t); + static constexpr uint8_t kTenantIdMask = 0x80; + static constexpr uint8_t kDatabaseNameOffsetEndMask = 0x7F; + + inline bool _hasTenantId() const { + return static_cast(_data.front()) & kTenantIdMask; + } - std::tuple&, const std::string&> _lens() const { - return std::tie(tenantId(), ns()); + inline size_t _dbNameOffsetEnd() const { + return static_cast(_data.front()) & kDatabaseNameOffsetEndMask; } - DatabaseName _dbName; - std::string _ns; - size_t _dotIndex = std::string::npos; + std::string makeData(boost::optional tenantId, + StringData db, + StringData collectionName) { + uassert(ErrorCodes::InvalidNamespace, + "namespaces cannot have embedded null characters", + db.find('\0') == std::string::npos && + collectionName.find('\0') == std::string::npos); + uassert(ErrorCodes::InvalidNamespace, + fmt::format("Collection names cannot start with '.': {}", collectionName), + collectionName.empty() || collectionName[0] != '.'); + uassert(ErrorCodes::InvalidNamespace, + fmt::format("db name must be at most {} characters, found: {}", + DatabaseName::kMaxDatabaseNameLength, + db.size()), + db.size() <= DatabaseName::kMaxDatabaseNameLength); + + uint8_t details = db.size() & kDatabaseNameOffsetEndMask; + size_t dbStartIndex = kDataOffset; + if (tenantId) { + dbStartIndex += OID::kOIDSize; + details |= kTenantIdMask; + } + + std::string data; + data.resize(collectionName.empty() ? dbStartIndex + db.size() + : dbStartIndex + db.size() + 1 + collectionName.size()); + *reinterpret_cast(data.data()) = details; + if (tenantId) { + std::memcpy(data.data() + kDataOffset, tenantId->_oid.view().view(), OID::kOIDSize); + } + + if (!db.empty()) { + std::memcpy(data.data() + dbStartIndex, db.rawData(), db.size()); + } + + if (!collectionName.empty()) { + *reinterpret_cast(data.data() + dbStartIndex + db.size()) = '.'; + std::memcpy(data.data() + dbStartIndex + db.size() + 1, + collectionName.rawData(), + collectionName.size()); + } + + return data; + } + + std::string makeData(boost::optional tenantId, StringData ns) { + auto dotIndex = ns.find('.'); + if (dotIndex == std::string::npos) { + return makeData(tenantId, ns, {}); + } + + return makeData(tenantId, ns.substr(0, dotIndex), ns.substr(dotIndex + 1, ns.size())); + } + + // In order to reduce the size of a NamespaceString, we pack all possible namespace data + // into a single std::string with the following in-memory layout: + // + // 1 byte 12 byte optional tenant id remaining bytes + // discriminator (see more below) namespace + // |<------------->|<--------------------------->|<-------------------------------------->| + // [---------------|----|----|----|----|----|----|----|----|----|----|----|----|----|----|] + // 0 1 12 ?? + // + // The MSB of the discriminator tells us whether a tenant id is present, and the remaining + // bits store the offset of end of the databaes component of the namespace. Database names + // must be 64 characters or shorter, so we can be confident the length will fit in three bits. + + std::string _data{'\0'}; }; /** @@ -866,85 +983,71 @@ class NamespaceString { class NamespaceStringOrUUID { public: NamespaceStringOrUUID() = delete; - NamespaceStringOrUUID(NamespaceString nss) : _nss(std::move(nss)) {} + NamespaceStringOrUUID(NamespaceString nss) : _nssOrUUID(std::move(nss)) {} NamespaceStringOrUUID(const NamespaceString::ConstantProxy& nss) : NamespaceStringOrUUID{static_cast(nss)} {} NamespaceStringOrUUID(DatabaseName dbname, UUID uuid) - : _uuid(std::move(uuid)), _dbname(std::move(dbname)) {} - NamespaceStringOrUUID(boost::optional tenantId, std::string db, UUID uuid) - : _uuid(std::move(uuid)), _dbname(DatabaseName(std::move(tenantId), std::move(db))) {} + : _nssOrUUID(UUIDWithDbName{std::move(dbname), std::move(uuid)}) {} // TODO SERVER-65920 Remove once all call sites have been changed to take tenantId explicitly NamespaceStringOrUUID(std::string db, UUID uuid, boost::optional tenantId = boost::none) - : _uuid(std::move(uuid)), _dbname(DatabaseName(std::move(tenantId), std::move(db))) {} + : _nssOrUUID( + UUIDWithDbName{DatabaseName{std::move(tenantId), std::move(db)}, std::move(uuid)}) {} + + bool isNamespaceString() const { + return stdx::holds_alternative(_nssOrUUID); + } - const boost::optional& nss() const { - return _nss; + const NamespaceString& nss() const { + invariant(stdx::holds_alternative(_nssOrUUID)); + return get(_nssOrUUID); } - void setNss(const NamespaceString& nss) { - _nss = nss; + bool isUUID() const { + return stdx::holds_alternative(_nssOrUUID); } - const boost::optional& uuid() const { - return _uuid; + const UUID& uuid() const { + invariant(stdx::holds_alternative(_nssOrUUID)); + return get<1>(get(_nssOrUUID)); } /** - * Returns database name if this object was initialized with a UUID. + * Returns database name as a string. * * TODO SERVER-66887 remove this function for better clarity once call sites have been changed */ std::string dbname() const { - return _dbname ? _dbname->db() : ""; + return dbName().db().toString(); } - const boost::optional& dbName() const { - return _dbname; - } + /** + * Returns the database name. + */ + DatabaseName dbName() const { + if (stdx::holds_alternative(_nssOrUUID)) { + return get(_nssOrUUID).dbName(); + } - void preferNssForSerialization() { - _preferNssForSerialization = true; + return get<0>(get(_nssOrUUID)); } /** - * Returns database name derived from either '_nss' or '_dbname'. + * This function should only be used when logging a NamespaceStringOrUUID in an error message. */ - StringData db() const { - return _nss ? _nss->db() : StringData(_dbname->db()); - } + std::string toStringForErrorMsg() const; /** - * Returns OK if either the nss is not set or is a valid nss. Otherwise returns an - * InvalidNamespace error. + * Method to be used only when logging a NamespaceStringOrUUID in a log message. */ - Status isNssValid() const; - - std::string toString() const; + friend std::string toStringForLogging(const NamespaceStringOrUUID& nssOrUUID); void serialize(BSONObjBuilder* builder, StringData fieldName) const; - friend std::ostream& operator<<(std::ostream& stream, const NamespaceStringOrUUID& o) { - return stream << o.toString(); - } - - friend StringBuilder& operator<<(StringBuilder& builder, const NamespaceStringOrUUID& o) { - return builder << o.toString(); - } - private: - // At any given time exactly one of these optionals will be initialized. - boost::optional _nss; - boost::optional _uuid; - - // When seralizing, if both '_nss' and '_uuid' are present, use '_nss'. - bool _preferNssForSerialization = false; - - // Empty when '_nss' is non-none, and contains the database name when '_uuid' is - // non-none. Although the UUID specifies a collection uniquely, we must later verify that the - // collection belongs to the database named here. - boost::optional _dbname; + using UUIDWithDbName = std::tuple; + stdx::variant _nssOrUUID; }; /** @@ -954,12 +1057,10 @@ inline StringData nsToDatabaseSubstring(StringData ns) { size_t i = ns.find('.'); if (i == std::string::npos) { massert( - 10078, "nsToDatabase: db too long", ns.size() < NamespaceString::MaxDatabaseNameLen); + 10078, "nsToDatabase: db too long", ns.size() <= DatabaseName::kMaxDatabaseNameLength); return ns; } - massert(10088, - "nsToDatabase: db too long", - i < static_cast(NamespaceString::MaxDatabaseNameLen)); + massert(10088, "nsToDatabase: db too long", i <= DatabaseName::kMaxDatabaseNameLength); return ns.substr(0, i); } @@ -1008,7 +1109,7 @@ inline bool nsIsDbOnly(StringData ns) { } inline bool NamespaceString::validDBName(StringData db, DollarInDbNameBehavior behavior) { - if (db.size() == 0 || db.size() >= 64) + if (db.size() == 0 || db.size() > DatabaseName::kMaxDatabaseNameLength) return false; for (StringData::const_iterator iter = db.begin(), end = db.end(); iter != end; ++iter) { @@ -1041,12 +1142,13 @@ inline bool NamespaceString::validDBName(StringData db, DollarInDbNameBehavior b return true; } -inline bool NamespaceString::validCollectionComponent(StringData ns) { - size_t idx = ns.find('.'); +inline bool NamespaceString::validCollectionComponent(const NamespaceString& ns) { + const auto nsStr = ns.ns(); + size_t idx = nsStr.find('.'); if (idx == std::string::npos) return false; - return validCollectionName(ns.substr(idx + 1)) || oplog(ns); + return validCollectionName(nsStr.substr(idx + 1)) || oplog(nsStr); } inline bool NamespaceString::validCollectionName(StringData coll) { @@ -1069,6 +1171,10 @@ inline bool NamespaceString::validCollectionName(StringData coll) { return true; } +inline std::string stringifyForAssert(const NamespaceString& nss) { + return toStringForLogging(nss); +} + // Here are the `constexpr` definitions for the `NamespaceString::ConstantProxy` // constant static data members of `NamespaceString`. They cannot be defined // `constexpr` inside the class definition, but they can be upgraded to @@ -1078,14 +1184,16 @@ inline bool NamespaceString::validCollectionName(StringData coll) { namespace nss_detail::const_proxy_shared_states { #define NSS_CONSTANT(id, db, coll) \ constexpr inline NamespaceString::ConstantProxy::SharedState id{db, coll}; -#include "namespace_string_reserved.def.h" +#include "namespace_string_reserved.def.h" // IWYU pragma: keep + #undef NSS_CONSTANT } // namespace nss_detail::const_proxy_shared_states #define NSS_CONSTANT(id, db, coll) \ constexpr inline NamespaceString::ConstantProxy NamespaceString::id{ \ &nss_detail::const_proxy_shared_states::id}; -#include "namespace_string_reserved.def.h" +#include "namespace_string_reserved.def.h" // IWYU pragma: keep + #undef NSS_CONSTANT } // namespace mongo diff --git a/src/mongo/db/namespace_string_reserved.def.h b/src/mongo/db/namespace_string_reserved.def.h index 2667967d44029..aba4146ea3d66 100644 --- a/src/mongo/db/namespace_string_reserved.def.h +++ b/src/mongo/db/namespace_string_reserved.def.h @@ -84,17 +84,6 @@ NSS_CONSTANT(kMigrationCoordinatorsNamespace, DatabaseName::kConfig, "migrationC // Namespace for storing the persisted state of migration recipients. NSS_CONSTANT(kMigrationRecipientsNamespace, DatabaseName::kConfig, "migrationRecipients"_sd) -// Namespace for storing the persisted state of movePrimary operation recipients. -NSS_CONSTANT(kMovePrimaryRecipientNamespace, DatabaseName::kConfig, "movePrimaryRecipients"_sd) - -// Namespace for storing the oplog applier progress of movePrimary operations at recipient. -NSS_CONSTANT(kMovePrimaryApplierProgressNamespace, - DatabaseName::kConfig, - "movePrimaryRecipients.applierProgress"_sd) - -// Namespace for storing the persisted state of movePrimary operation donors. -NSS_CONSTANT(kMovePrimaryDonorNamespace, DatabaseName::kConfig, "movePrimaryDonors"_sd) - // Namespace for storing the persisted state of tenant migration donors. NSS_CONSTANT(kTenantMigrationDonorsNamespace, DatabaseName::kConfig, "tenantMigrationDonors"_sd) @@ -218,10 +207,6 @@ NSS_CONSTANT(kShardCollectionCatalogNamespace, DatabaseName::kConfig, "shard.col // Namespace used for storing NamespacePlacementType docs on the CSRS. NSS_CONSTANT(kConfigsvrPlacementHistoryNamespace, DatabaseName::kConfig, "placementHistory"_sd) -// Identifier for the "initialization metadata descriptors" contained by -// kConfigsvrPlacementHistoryNamespace -NSS_CONSTANT(kConfigsvrPlacementHistoryFcvMarkerNamespace, DatabaseName::kEmpty, StringData{}) - // TODO SERVER-68551: remove once 7.0 becomes last-lts NSS_CONSTANT(kLockpingsNamespace, DatabaseName::kConfig, "lockpings"_sd) @@ -289,3 +274,8 @@ NSS_CONSTANT(kConfigVersionNamespace, DatabaseName::kConfig, "version"_sd) // Namespace used for storing mongos info on the CSRS. NSS_CONSTANT(kConfigMongosNamespace, DatabaseName::kConfig, "mongos"_sd) + +// Namespace used for oplog truncate after point. +NSS_CONSTANT(kDefaultOplogTruncateAfterPointNamespace, + DatabaseName::kLocal, + "replset.oplogTruncateAfterPoint"_sd) diff --git a/src/mongo/db/namespace_string_test.cpp b/src/mongo/db/namespace_string_test.cpp index 69e4b6348ac1d..a8465ded6e9eb 100644 --- a/src/mongo/db/namespace_string_test.cpp +++ b/src/mongo/db/namespace_string_test.cpp @@ -27,17 +27,23 @@ * it in the license file. */ -#include +#include -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/multitenancy_gen.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/optime.h" -#include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/namespace_string_util.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -48,13 +54,13 @@ using namespace fmt::literals; TEST(NamespaceStringTest, CheckNamespaceStringLogAttrs) { TenantId tenantId(OID::gen()); - DatabaseName dbName(tenantId, "foo"); + DatabaseName dbName = DatabaseName::createDatabaseName_forTest(tenantId, "foo"); NamespaceString nss = NamespaceString::createNamespaceString_forTest(dbName, "bar"); startCapturingLogMessages(); LOGV2(7311500, "Msg nss:", logAttrs(nss)); - std::string nssAsString = str::stream() << *(nss.tenantId()) << '_' << nss.ns(); + std::string nssAsString = str::stream() << *(nss.tenantId()) << '_' << nss.ns_forTest(); ASSERT_EQUALS( 1, countBSONFormatLogLinesIsSubset(BSON("attr" << BSON("namespace" << nssAsString)))); @@ -122,6 +128,15 @@ TEST(NamespaceStringTest, DatabaseValidNames) { "ThisIsADatabaseNameThatBrokeAllRecordsForValidLengthForDBName63")); ASSERT(!NamespaceString::validDBName( "WhileThisDatabaseNameExceedsTheMaximumLengthForDatabaseNamesof63")); + + ASSERT_THROWS_CODE( + NamespaceString{"WhileThisDatabaseNameExceedsTheMaximumLengthForDatabaseNamesof63"}, + AssertionException, + ErrorCodes::InvalidNamespace); + + const TenantId tenantId(OID::gen()); + ASSERT(!NamespaceString::validDBName(DatabaseName::createDatabaseName_forTest( + tenantId, "ATenantDBNameWithValidLength38ButHasA$"))); } TEST(NamespaceStringTest, ListCollectionsCursorNS) { @@ -242,12 +257,17 @@ TEST(NamespaceStringTest, GetDropPendingNamespaceOpTime) { NamespaceString{"test.system.drop.1234i111taaa.foo"}.getDropPendingNamespaceOpTime()); } -TEST(NamespaceStringTest, CollectionComponentValidNames) { - ASSERT(NamespaceString::validCollectionComponent("a.b")); - ASSERT(NamespaceString::validCollectionComponent("a.b")); - ASSERT(!NamespaceString::validCollectionComponent("a.")); - ASSERT(!NamespaceString::validCollectionComponent("a..foo")); - ASSERT(NamespaceString::validCollectionComponent("a.b.")); // TODO: should this change? +TEST(NamespaceStringTest, CollectionComponentValidNamesWithNamespaceString) { + ASSERT(NamespaceString::validCollectionComponent( + NamespaceString::createNamespaceString_forTest("a.b"))); + ASSERT(!NamespaceString::validCollectionComponent( + NamespaceString::createNamespaceString_forTest("a."))); + ASSERT_THROWS_CODE(NamespaceString::validCollectionComponent( + NamespaceString::createNamespaceString_forTest("a..foo")), + AssertionException, + ErrorCodes::InvalidNamespace); + ASSERT(NamespaceString::validCollectionComponent( + NamespaceString::createNamespaceString_forTest("a.b."))); } TEST(NamespaceStringTest, CollectionValidNames) { @@ -273,37 +293,38 @@ TEST(NamespaceStringTest, nsToDatabase1) { TEST(NamespaceStringTest, NamespaceStringParse1) { NamespaceString ns = NamespaceString::createNamespaceString_forTest("a.b"); - ASSERT_EQUALS(std::string("a"), ns.db()); + ASSERT_EQUALS(std::string("a"), ns.db_forTest()); ASSERT_EQUALS(std::string("b"), ns.coll()); } TEST(NamespaceStringTest, NamespaceStringParse2) { NamespaceString ns = NamespaceString::createNamespaceString_forTest("a.b.c"); - ASSERT_EQUALS(std::string("a"), ns.db()); + ASSERT_EQUALS(std::string("a"), ns.db_forTest()); ASSERT_EQUALS(std::string("b.c"), ns.coll()); } TEST(NamespaceStringTest, NamespaceStringParse3) { NamespaceString ns = NamespaceString::createNamespaceString_forTest("abc"); - ASSERT_EQUALS(std::string("abc"), ns.db()); + ASSERT_EQUALS(std::string("abc"), ns.db_forTest()); ASSERT_EQUALS(std::string(""), ns.coll()); } TEST(NamespaceStringTest, NamespaceStringParse4) { NamespaceString ns = NamespaceString::createNamespaceString_forTest("abc."); - ASSERT_EQUALS(std::string("abc"), ns.db()); + ASSERT_EQUALS(std::string("abc"), ns.db_forTest()); ASSERT(ns.coll().empty()); } TEST(NamespaceStringTest, NamespaceStringParse5) { NamespaceString ns = NamespaceString::createNamespaceString_forTest("abc", ""); - ASSERT_EQUALS(std::string("abc"), ns.db()); + ASSERT_EQUALS(std::string("abc"), ns.db_forTest()); ASSERT(ns.coll().empty()); } TEST(NamespaceStringTest, makeListCollectionsNSIsCorrect) { - NamespaceString ns = NamespaceString::makeListCollectionsNSS(DatabaseName(boost::none, "DB")); - ASSERT_EQUALS("DB", ns.db()); + NamespaceString ns = NamespaceString::makeListCollectionsNSS( + DatabaseName::createDatabaseName_forTest(boost::none, "DB")); + ASSERT_EQUALS("DB", ns.db_forTest()); ASSERT_EQUALS("$cmd.listCollections", ns.coll()); ASSERT(ns.isValid()); ASSERT(ns.isListCollectionsCursorNS()); @@ -318,41 +339,177 @@ TEST(NamespaceStringTest, EmptyNSStringReturnsEmptyColl) { TEST(NamespaceStringTest, EmptyNSStringReturnsEmptyDb) { NamespaceString nss{}; ASSERT_TRUE(nss.isEmpty()); - ASSERT_EQ(nss.db(), StringData{}); + ASSERT_EQ(nss.db_forTest(), StringData{}); +} + +TEST(NamespaceStringTest, EmptyDbWithColl) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("", "coll"); + ASSERT_EQ(nss.db_forTest(), StringData{}); + ASSERT_EQ(nss.coll(), "coll"); } TEST(NamespaceStringTest, NSSWithTenantId) { TenantId tenantId(OID::gen()); - std::string tenantNsStr = str::stream() << tenantId.toString() << "_foo.bar"; - - NamespaceString nss = NamespaceString::createNamespaceString_forTest(tenantId, "foo.bar"); - ASSERT_EQ(nss.ns(), "foo.bar"); - ASSERT_EQ(nss.toString(), "foo.bar"); - ASSERT_EQ(nss.toStringWithTenantId(), tenantNsStr); - ASSERT(nss.tenantId()); - ASSERT_EQ(*nss.tenantId(), tenantId); - - DatabaseName dbName(tenantId, "foo"); - NamespaceString nss2 = NamespaceString::createNamespaceString_forTest(dbName, "bar"); - ASSERT_EQ(nss2.ns(), "foo.bar"); - ASSERT_EQ(nss2.toString(), "foo.bar"); - ASSERT_EQ(nss2.toStringWithTenantId(), tenantNsStr); - ASSERT(nss2.tenantId()); - ASSERT_EQ(*nss2.tenantId(), tenantId); - NamespaceString nss3("foo", "bar", tenantId); - ASSERT_EQ(nss3.ns(), "foo.bar"); - ASSERT_EQ(nss3.toString(), "foo.bar"); - ASSERT_EQ(nss3.toStringWithTenantId(), tenantNsStr); - ASSERT(nss3.tenantId()); - ASSERT_EQ(*nss3.tenantId(), tenantId); - - NamespaceString nss4(dbName); - ASSERT_EQ(nss4.ns(), "foo"); - ASSERT_EQ(nss4.toString(), "foo"); - ASSERT_EQ(nss4.toStringWithTenantId(), "{}_foo"_format(tenantId.toString())); - ASSERT(nss4.tenantId()); - ASSERT_EQ(*nss4.tenantId(), tenantId); + { + std::string tenantNsStr = str::stream() << tenantId.toString() << "_foo.bar"; + NamespaceString nss = NamespaceString::createNamespaceString_forTest(tenantId, "foo.bar"); + ASSERT_EQ(nss.size(), 7); + ASSERT_EQ(nss.ns_forTest(), "foo.bar"); + ASSERT_EQ(nss.toString_forTest(), "foo.bar"); + ASSERT_EQ(nss.toStringWithTenantId_forTest(), tenantNsStr); + ASSERT_EQ(nss.db_forTest(), "foo"); + ASSERT_EQ(nss.coll(), "bar"); + ASSERT_EQ(nss.dbName().toString_forTest(), "foo"); + ASSERT_EQ(nss.size(), 7); + ASSERT(nss.tenantId()); + ASSERT(nss.dbName().tenantId()); + ASSERT_EQ(*nss.tenantId(), tenantId); + ASSERT_EQ(*nss.dbName().tenantId(), tenantId); + } + + { + std::string tenantNsStr = str::stream() << tenantId.toString() << "_foo"; + NamespaceString nss = NamespaceString::createNamespaceString_forTest(tenantId, "foo"); + ASSERT_EQ(nss.size(), 3); + ASSERT_EQ(nss.ns_forTest(), "foo"); + ASSERT_EQ(nss.toString_forTest(), "foo"); + ASSERT_EQ(nss.toStringWithTenantId_forTest(), tenantNsStr); + ASSERT_EQ(nss.db_forTest(), "foo"); + ASSERT_EQ(nss.coll(), ""); + ASSERT_EQ(nss.dbName().toString_forTest(), "foo"); + ASSERT_EQ(nss.size(), 3); + ASSERT(nss.tenantId()); + ASSERT(nss.dbName().tenantId()); + ASSERT_EQ(*nss.tenantId(), tenantId); + ASSERT_EQ(*nss.dbName().tenantId(), tenantId); + } + + { + std::string tenantNsStr = str::stream() << tenantId.toString() << "_foo.bar"; + DatabaseName dbName = DatabaseName::createDatabaseName_forTest(tenantId, "foo"); + NamespaceString nss2 = NamespaceString::createNamespaceString_forTest(dbName, "bar"); + ASSERT_EQ(nss2.size(), 7); + ASSERT_EQ(nss2.ns_forTest(), "foo.bar"); + ASSERT_EQ(nss2.toString_forTest(), "foo.bar"); + ASSERT_EQ(nss2.toStringWithTenantId_forTest(), tenantNsStr); + ASSERT_EQ(nss2.db_forTest(), "foo"); + ASSERT_EQ(nss2.coll(), "bar"); + ASSERT_EQ(nss2.dbName().toString_forTest(), "foo"); + ASSERT(nss2.tenantId()); + ASSERT(nss2.dbName().tenantId()); + ASSERT_EQ(*nss2.tenantId(), tenantId); + ASSERT_EQ(*nss2.dbName().tenantId(), tenantId); + } + + { + std::string tenantNsStr = str::stream() << tenantId.toString() << "_foo.bar"; + NamespaceString nss3 = + NamespaceString::createNamespaceString_forTest(tenantId, "foo", "bar"); + ASSERT_EQ(nss3.size(), 7); + ASSERT_EQ(nss3.ns_forTest(), "foo.bar"); + ASSERT_EQ(nss3.toString_forTest(), "foo.bar"); + ASSERT_EQ(nss3.toStringWithTenantId_forTest(), tenantNsStr); + ASSERT_EQ(nss3.db_forTest(), "foo"); + ASSERT_EQ(nss3.coll(), "bar"); + ASSERT_EQ(nss3.dbName().toString_forTest(), "foo"); + ASSERT(nss3.tenantId()); + ASSERT(nss3.dbName().tenantId()); + ASSERT_EQ(*nss3.tenantId(), tenantId); + ASSERT_EQ(*nss3.dbName().tenantId(), tenantId); + } + + { + DatabaseName dbName = DatabaseName::createDatabaseName_forTest(tenantId, "foo"); + NamespaceString nss4(dbName); + ASSERT_EQ(nss4.size(), 3); + ASSERT_EQ(nss4.ns_forTest(), "foo"); + ASSERT_EQ(nss4.toString_forTest(), "foo"); + ASSERT_EQ(nss4.toStringWithTenantId_forTest(), "{}_foo"_format(tenantId.toString())); + ASSERT_EQ(nss4.db_forTest(), "foo"); + ASSERT_EQ(nss4.coll(), ""); + ASSERT_EQ(nss4.dbName().toString_forTest(), "foo"); + ASSERT(nss4.tenantId()); + ASSERT(nss4.dbName().tenantId()); + ASSERT_EQ(*nss4.tenantId(), tenantId); + ASSERT_EQ(*nss4.dbName().tenantId(), tenantId); + } + + { + NamespaceString multiNss = NamespaceString::createNamespaceString_forTest( + tenantId, "config.system.change_collection"); + ASSERT(multiNss.isConfigDB()); + ASSERT_EQ(multiNss.size(), 31); + ASSERT_EQ(multiNss.ns_forTest(), "config.system.change_collection"); + ASSERT_EQ(multiNss.toString_forTest(), "config.system.change_collection"); + ASSERT_EQ(multiNss.toStringWithTenantId_forTest(), + "{}_config.system.change_collection"_format(tenantId.toString())); + ASSERT_EQ(multiNss.db_forTest(), "config"); + ASSERT_EQ(multiNss.coll(), "system.change_collection"); + ASSERT_EQ(multiNss.dbName().toString_forTest(), "config"); + ASSERT(multiNss.tenantId()); + ASSERT(multiNss.dbName().tenantId()); + ASSERT_EQ(*multiNss.tenantId(), tenantId); + ASSERT_EQ(*multiNss.dbName().tenantId(), tenantId); + } + + { + NamespaceString empty{}; + ASSERT_EQ(empty.size(), 0); + ASSERT_EQ(empty.coll(), ""); + ASSERT_EQ(empty.tenantId(), boost::none); + ASSERT_EQ(empty.toString_forTest(), ""); + ASSERT_EQ(empty.toStringWithTenantId_forTest(), ""); + ASSERT_EQ(empty.dbName().tenantId(), boost::none); + ASSERT_EQ(empty.dbName().toString_forTest(), ""); + ASSERT_EQ(empty.dbName().toStringWithTenantId_forTest(), ""); + } + + { + NamespaceString emptyWithTenant = + NamespaceString::createNamespaceString_forTest(tenantId, ""); + ASSERT_EQ(emptyWithTenant.size(), 0); + ASSERT_EQ(emptyWithTenant.coll(), ""); + ASSERT(emptyWithTenant.tenantId()); + ASSERT_EQ(*emptyWithTenant.tenantId(), tenantId); + ASSERT_EQ(emptyWithTenant.toString_forTest(), ""); + ASSERT_EQ(emptyWithTenant.toStringWithTenantId_forTest(), + "{}_"_format(tenantId.toString())); + ASSERT(emptyWithTenant.dbName().tenantId()); + ASSERT_EQ(emptyWithTenant.dbName().tenantId(), tenantId); + ASSERT_EQ(emptyWithTenant.dbName().toString_forTest(), ""); + ASSERT_EQ(emptyWithTenant.dbName().toStringWithTenantId_forTest(), + "{}_"_format(tenantId.toString())); + } + + { + NamespaceString dbWithoutColl = NamespaceString::createNamespaceString_forTest("foo"); + ASSERT_EQ(dbWithoutColl.size(), 3); + ASSERT_EQ(dbWithoutColl.coll(), ""); + ASSERT_FALSE(dbWithoutColl.tenantId()); + ASSERT_EQ(dbWithoutColl.toString_forTest(), "foo"); + ASSERT_EQ(dbWithoutColl.toStringWithTenantId_forTest(), "foo"); + ASSERT_FALSE(dbWithoutColl.dbName().tenantId()); + ASSERT_EQ(dbWithoutColl.dbName().toString_forTest(), "foo"); + ASSERT_EQ(dbWithoutColl.dbName().toStringWithTenantId_forTest(), "foo"); + } + + { + NamespaceString dbWithoutCollWithTenant = + NamespaceString::createNamespaceString_forTest(tenantId, "foo"); + ASSERT_EQ(dbWithoutCollWithTenant.size(), 3); + ASSERT_EQ(dbWithoutCollWithTenant.coll(), ""); + ASSERT(dbWithoutCollWithTenant.tenantId()); + ASSERT_EQ(*dbWithoutCollWithTenant.tenantId(), tenantId); + ASSERT_EQ(dbWithoutCollWithTenant.toString_forTest(), "foo"); + ASSERT_EQ(dbWithoutCollWithTenant.toStringWithTenantId_forTest(), + fmt::format("{}_foo", tenantId.toString())); + ASSERT(dbWithoutCollWithTenant.dbName().tenantId()); + ASSERT_EQ(dbWithoutCollWithTenant.dbName().tenantId(), tenantId); + ASSERT_EQ(dbWithoutCollWithTenant.dbName().toString_forTest(), "foo"); + ASSERT_EQ(dbWithoutCollWithTenant.dbName().toStringWithTenantId_forTest(), + fmt::format("{}_foo", tenantId.toString())); + } } TEST(NamespaceStringTest, NSSNoCollectionWithTenantId) { @@ -360,36 +517,23 @@ TEST(NamespaceStringTest, NSSNoCollectionWithTenantId) { std::string tenantNsStr = str::stream() << tenantId.toString() << "_foo"; NamespaceString nss = NamespaceString::createNamespaceString_forTest(tenantId, "foo"); - ASSERT_EQ(nss.ns(), "foo"); - ASSERT_EQ(nss.toString(), "foo"); - ASSERT_EQ(nss.toStringWithTenantId(), tenantNsStr); + + ASSERT_EQ(nss.ns_forTest(), "foo"); + ASSERT_EQ(nss.toString_forTest(), "foo"); + ASSERT_EQ(nss.toStringWithTenantId_forTest(), tenantNsStr); ASSERT(nss.tenantId()); ASSERT_EQ(*nss.tenantId(), tenantId); - DatabaseName dbName(tenantId, "foo"); + DatabaseName dbName = DatabaseName::createDatabaseName_forTest(tenantId, "foo"); NamespaceString nss2 = NamespaceString::createNamespaceString_forTest(dbName, ""); ASSERT(nss2.tenantId()); ASSERT_EQ(*nss2.tenantId(), tenantId); - NamespaceString nss3("foo", "", tenantId); + NamespaceString nss3 = NamespaceString::createNamespaceString_forTest(tenantId, "foo", ""); ASSERT(nss3.tenantId()); ASSERT_EQ(*nss3.tenantId(), tenantId); } -TEST(NamespaceStringTest, ParseNSSWithTenantId) { - RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); - - TenantId tenantId(OID::gen()); - std::string tenantNsStr = str::stream() << tenantId.toString() << "_foo.bar"; - - NamespaceString nss = - NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode(tenantNsStr); - ASSERT_EQ(nss.ns(), "foo.bar"); - ASSERT_EQ(nss.toStringWithTenantId(), tenantNsStr); - ASSERT(nss.tenantId()); - ASSERT_EQ(*nss.tenantId(), tenantId); -} - TEST(NamespaceStringTest, CompareNSSWithTenantId) { TenantId tenantIdMin(OID("000000000000000000000000")); TenantId tenantIdMax(OID::max()); @@ -420,6 +564,16 @@ TEST(NamespaceStringTest, CompareNSSWithTenantId) { NamespaceString::createNamespaceString_forTest(tenantIdMin, "foo.bar")); ASSERT(NamespaceString::createNamespaceString_forTest(tenantIdMin, "foo.bar") >= NamespaceString::createNamespaceString_forTest(tenantIdMin, "foo.bar")); + + + TenantId tenantId1(OID::gen()); + TenantId tenantId2(OID::gen()); + auto ns1 = NamespaceString::createNamespaceString_forTest(boost::none, "foo.bar"); + auto ns2 = NamespaceString::createNamespaceString_forTest(tenantId1, "foo.bar"); + auto ns3 = NamespaceString::createNamespaceString_forTest(tenantId2, "foo.bar"); + ASSERT_LT(ns1, ns2); + ASSERT_LT(ns1, ns3); + ASSERT_GT(ns3, ns2); } } // namespace diff --git a/src/mongo/db/nesting_depth_test.cpp b/src/mongo/db/nesting_depth_test.cpp index f60522f682bb6..592e1f32ec33d 100644 --- a/src/mongo/db/nesting_depth_test.cpp +++ b/src/mongo/db/nesting_depth_test.cpp @@ -27,16 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_depth.h" -#include "mongo/bson/bson_validate.h" -#include "mongo/bson/json.h" -#include "mongo/client/connection_string.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/executor/network_interface_integration_fixture.h" -#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace executor { diff --git a/src/mongo/db/not_primary_error_tracker.cpp b/src/mongo/db/not_primary_error_tracker.cpp index c9aaca76bd53b..2418f55c9c61b 100644 --- a/src/mongo/db/not_primary_error_tracker.cpp +++ b/src/mongo/db/not_primary_error_tracker.cpp @@ -27,9 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/not_primary_error_tracker.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/op_msg_fuzzer.cpp b/src/mongo/db/op_msg_fuzzer.cpp index 4a14144e5fa50..1174382be7c43 100644 --- a/src/mongo/db/op_msg_fuzzer.cpp +++ b/src/mongo/db/op_msg_fuzzer.cpp @@ -27,9 +27,15 @@ * it in the license file. */ +#include + #include "mongo/db/op_msg_fuzzer_fixture.h" extern "C" int LLVMFuzzerTestOneInput(const char* Data, size_t Size) { - static auto fixture = mongo::OpMsgFuzzerFixture(); + static auto fixture = []() { + auto core = boost::log::core::get(); + core->set_logging_enabled(false); + return mongo::OpMsgFuzzerFixture(); + }(); return fixture.testOneInput(Data, Size); } diff --git a/src/mongo/db/op_msg_fuzzer_fixture.cpp b/src/mongo/db/op_msg_fuzzer_fixture.cpp index 158b8f293b30e..00ef4d3a41df2 100644 --- a/src/mongo/db/op_msg_fuzzer_fixture.cpp +++ b/src/mongo/db/op_msg_fuzzer_fixture.cpp @@ -27,29 +27,46 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/op_msg_fuzzer_fixture.h" - -#include "mongo/db/auth/authorization_session_for_test.h" -#include "mongo/db/auth/authz_manager_external_state_local.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_impl.h" +#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/database_holder_impl.h" -#include "mongo/db/client.h" -#include "mongo/db/index/index_access_method.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/op_msg_fuzzer_fixture.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_registry.h" -#include "mongo/db/operation_context.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/collection_sharding_state_factory_standalone.h" -#include "mongo/db/service_entry_point_common.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_entry_point_mongod.h" #include "mongo/db/storage/control/storage_control.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_init.h" #include "mongo/db/storage/storage_options.h" #include "mongo/db/vector_clock_mutable.h" -#include "mongo/transport/service_entry_point_impl.h" +#include "mongo/rpc/message.h" +#include "mongo/transport/service_entry_point.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" #include "mongo/util/periodic_runner_factory.h" +#include "mongo/util/shared_buffer.h" +#include "mongo/util/version/releases.h" namespace mongo { diff --git a/src/mongo/db/op_msg_fuzzer_fixture.h b/src/mongo/db/op_msg_fuzzer_fixture.h index 883e417520132..c17445a90a1b3 100644 --- a/src/mongo/db/op_msg_fuzzer_fixture.h +++ b/src/mongo/db/op_msg_fuzzer_fixture.h @@ -27,6 +27,9 @@ * it in the license file. */ +#include +#include + #include "mongo/bson/timestamp.h" #include "mongo/db/auth/authorization_manager_impl.h" #include "mongo/db/auth/authz_manager_external_state_mock.h" diff --git a/src/mongo/db/op_msg_fuzzer_fixture_test.cpp b/src/mongo/db/op_msg_fuzzer_fixture_test.cpp index 28e0a705231e2..ec0b0372aa653 100644 --- a/src/mongo/db/op_msg_fuzzer_fixture_test.cpp +++ b/src/mongo/db/op_msg_fuzzer_fixture_test.cpp @@ -28,12 +28,13 @@ */ -#include "mongo/platform/basic.h" - +#include "mongo/base/string_data.h" #include "mongo/bson/json.h" #include "mongo/db/op_msg_fuzzer_fixture.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/op_observer/SConscript b/src/mongo/db/op_observer/SConscript index cd35f4fead5fd..f9c8c8b572d93 100644 --- a/src/mongo/db/op_observer/SConscript +++ b/src/mongo/db/op_observer/SConscript @@ -8,6 +8,7 @@ env.Library( target='op_observer', source=[ 'op_observer.cpp', + 'op_observer_registry.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/base', @@ -18,12 +19,14 @@ env.Library( target='op_observer_util', source=[ 'op_observer_util.cpp', + 'batched_write_context.cpp', ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/db/bson/dotted_path_support', '$BUILD_DIR/mongo/db/catalog/collection_options', '$BUILD_DIR/mongo/db/shard_role_api', + '$BUILD_DIR/mongo/db/transaction/transaction_operations', ], ) @@ -52,7 +55,6 @@ env.Library( target='op_observer_impl', source=[ 'op_observer_impl.cpp', - 'batched_write_context.cpp', ], LIBDEPS=[ 'op_observer', @@ -65,6 +67,7 @@ env.Library( '$BUILD_DIR/mongo/db/catalog/database_holder', '$BUILD_DIR/mongo/db/catalog/import_collection_oplog_entry', '$BUILD_DIR/mongo/db/change_stream_pre_images_collection_manager', + '$BUILD_DIR/mongo/db/change_stream_serverless_helpers', '$BUILD_DIR/mongo/db/commands/txn_cmd_request', '$BUILD_DIR/mongo/db/concurrency/exception_util', '$BUILD_DIR/mongo/db/dbhelpers', @@ -76,14 +79,9 @@ env.Library( '$BUILD_DIR/mongo/db/repl/repl_server_parameters', '$BUILD_DIR/mongo/db/repl/tenant_migration_access_blocker', '$BUILD_DIR/mongo/db/server_feature_flags', - '$BUILD_DIR/mongo/db/session/session_catalog', '$BUILD_DIR/mongo/db/session/session_catalog_mongod', - '$BUILD_DIR/mongo/db/timeseries/bucket_catalog/bucket_catalog', - '$BUILD_DIR/mongo/db/timeseries/timeseries_extended_range', '$BUILD_DIR/mongo/db/transaction/transaction', '$BUILD_DIR/mongo/db/transaction/transaction_operations', - '$BUILD_DIR/mongo/db/views/util', - '$BUILD_DIR/mongo/db/views/view_catalog_helpers', '$BUILD_DIR/mongo/s/coreshard', '$BUILD_DIR/mongo/s/grid', 'op_observer_util', @@ -149,8 +147,6 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/repl/storage_interface_impl', '$BUILD_DIR/mongo/db/repl/tenant_migration_access_blocker', '$BUILD_DIR/mongo/db/service_context_d_test_fixture', - '$BUILD_DIR/mongo/db/service_context_test_fixture', - '$BUILD_DIR/mongo/db/session/session_catalog', '$BUILD_DIR/mongo/db/session/session_catalog_mongod', '$BUILD_DIR/mongo/db/shard_role', '$BUILD_DIR/mongo/db/storage/recovery_unit_base', @@ -163,3 +159,21 @@ env.CppUnitTest( 'user_write_block_mode_op_observer', ], ) + +env.Library( + target='fallback_op_observer', + source=[ + 'fallback_op_observer.cpp', + ], + LIBDEPS=[ + 'op_observer', + ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/catalog/collection_catalog', + '$BUILD_DIR/mongo/db/read_write_concern_defaults', + '$BUILD_DIR/mongo/db/session/session_catalog_mongod', + '$BUILD_DIR/mongo/db/transaction/transaction', + '$BUILD_DIR/mongo/db/views/view_catalog_helpers', + 'op_observer_util', + ], +) diff --git a/src/mongo/db/op_observer/batched_write_context.cpp b/src/mongo/db/op_observer/batched_write_context.cpp index 24a31e7086005..c365a46816084 100644 --- a/src/mongo/db/op_observer/batched_write_context.cpp +++ b/src/mongo/db/op_observer/batched_write_context.cpp @@ -28,7 +28,17 @@ */ #include "mongo/db/op_observer/batched_write_context.h" + +#include + +#include +#include + +#include "mongo/db/concurrency/locker.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { const OperationContext::Decoration BatchedWriteContext::get = diff --git a/src/mongo/db/op_observer/batched_write_context.h b/src/mongo/db/op_observer/batched_write_context.h index d1972e69880bf..def884044e68d 100644 --- a/src/mongo/db/op_observer/batched_write_context.h +++ b/src/mongo/db/op_observer/batched_write_context.h @@ -30,7 +30,6 @@ #pragma once #include "mongo/db/operation_context.h" -#include "mongo/db/repl/oplog_entry.h" #include "mongo/db/transaction/transaction_operations.h" namespace mongo { diff --git a/src/mongo/db/op_observer/batched_write_context_test.cpp b/src/mongo/db/op_observer/batched_write_context_test.cpp index 88ed3d45f91ab..3d968befb57d7 100644 --- a/src/mongo/db/op_observer/batched_write_context_test.cpp +++ b/src/mongo/db/op_observer/batched_write_context_test.cpp @@ -27,22 +27,37 @@ * it in the license file. */ +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/batched_write_context.h" #include "mongo/db/repl/oplog_entry.h" -#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/decorable.h" +#include "mongo/util/uuid.h" namespace mongo { - namespace { // This test fixture provides access to a properly initialized global service context to test the // BatchedWriteContext class and its interaction with WriteUnitOfWork. For batched write // interactions with the oplog, see BatchedWriteOutputsTest. -class BatchedWriteContextTest : public ServiceContextTest {}; +class BatchedWriteContextTest : public ServiceContextMongoDTest {}; TEST_F(BatchedWriteContextTest, TestBatchingCondition) { auto opCtxRaii = makeOperationContext(); diff --git a/src/mongo/db/op_observer/batched_write_policy.h b/src/mongo/db/op_observer/batched_write_policy.h index b71f9b1f5eddd..164b73dec7fda 100644 --- a/src/mongo/db/op_observer/batched_write_policy.h +++ b/src/mongo/db/op_observer/batched_write_policy.h @@ -27,13 +27,17 @@ * it in the license file. */ +#include #include +#include #include #include #include +#include "mongo/bson/bsonobj.h" #include "mongo/bson/util/builder.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" namespace mongo { diff --git a/src/mongo/db/op_observer/batched_write_policy_test.cpp b/src/mongo/db/op_observer/batched_write_policy_test.cpp index d676233f7f675..580a43fe1e81e 100644 --- a/src/mongo/db/op_observer/batched_write_policy_test.cpp +++ b/src/mongo/db/op_observer/batched_write_policy_test.cpp @@ -27,15 +27,25 @@ * it in the license file. */ -#include +#include #include +#include +#include #include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/op_observer/batched_write_policy.h" #include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/storage/record_store.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -62,7 +72,7 @@ void _generateRecords(size_t numRecords, std::deque& records) { } -class CursorMock : public SeekableRecordCursor { +class CursorMock : public RecordCursor { public: CursorMock(std::deque* records) : _records(records) {} @@ -77,12 +87,6 @@ class CursorMock : public SeekableRecordCursor { return next; } - boost::optional seekExact(const RecordId& id) override { - return Record{}; - } - boost::optional seekNear(const RecordId& id) override { - return boost::none; - } void save() override {} bool restore(bool tolerateCappedRepositioning) override { return true; diff --git a/src/mongo/db/op_observer/fallback_op_observer.cpp b/src/mongo/db/op_observer/fallback_op_observer.cpp new file mode 100644 index 0000000000000..fe50878a264b9 --- /dev/null +++ b/src/mongo/db/op_observer/fallback_op_observer.cpp @@ -0,0 +1,229 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/op_observer/fallback_op_observer.h" + +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/views_for_database.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/logical_time_validator.h" +#include "mongo/db/op_observer/batched_write_context.h" +#include "mongo/db/op_observer/op_observer_util.h" +#include "mongo/db/read_write_concern_defaults.h" +#include "mongo/db/session/kill_sessions.h" +#include "mongo/db/session/session_catalog.h" +#include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/session/session_killer.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/views/util.h" +#include "mongo/db/views/view_catalog_helpers.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/scripting/engine.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/namespace_string_util.h" + +namespace mongo { + +void FallbackOpObserver::onInserts(OperationContext* opCtx, + const CollectionPtr& coll, + std::vector::const_iterator first, + std::vector::const_iterator last, + std::vector fromMigrate, + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { + auto txnParticipant = TransactionParticipant::get(opCtx); + const bool inMultiDocumentTransaction = + txnParticipant && opCtx->writesAreReplicated() && txnParticipant.transactionIsOpen(); + if (inMultiDocumentTransaction && !opCtx->getWriteUnitOfWork()) { + return; + } + + const auto& nss = coll->ns(); + + if (nss.isSystemDotJavascript()) { + Scope::storedFuncMod(opCtx); + } else if (nss.isSystemDotViews()) { + try { + for (auto it = first; it != last; it++) { + view_util::validateViewDefinitionBSON(opCtx, it->doc, nss.dbName()); + + uassertStatusOK(CollectionCatalog::get(opCtx)->createView( + opCtx, + NamespaceStringUtil::deserialize(nss.dbName().tenantId(), + it->doc.getStringField("_id")), + NamespaceStringUtil::parseNamespaceFromDoc(nss.dbName(), + it->doc.getStringField("viewOn")), + BSONArray{it->doc.getObjectField("pipeline")}, + view_catalog_helpers::validatePipeline, + it->doc.getObjectField("collation"), + ViewsForDatabase::Durability::kAlreadyDurable)); + } + } catch (const DBException&) { + // If a previous operation left the view catalog in an invalid state, our inserts can + // fail even if all the definitions are valid. Reloading may help us reset the state. + CollectionCatalog::get(opCtx)->reloadViews(opCtx, nss.dbName()); + } + } else if (nss == NamespaceString::kSessionTransactionsTableNamespace) { + if (opAccumulator) { + auto& opTimeList = opAccumulator->insertOpTimes; + if (!opTimeList.empty() && !opTimeList.back().isNull()) { + for (auto it = first; it != last; it++) { + auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx); + mongoDSessionCatalog->observeDirectWriteToConfigTransactions(opCtx, it->doc); + } + } + } + } else if (nss == NamespaceString::kConfigSettingsNamespace) { + for (auto it = first; it != last; it++) { + ReadWriteConcernDefaults::get(opCtx).observeDirectWriteToConfigSettings( + opCtx, it->doc["_id"], it->doc); + } + } else if (nss == NamespaceString::kExternalKeysCollectionNamespace) { + for (auto it = first; it != last; it++) { + auto externalKey = + ExternalKeysCollectionDocument::parse(IDLParserContext("externalKey"), it->doc); + opCtx->recoveryUnit()->onCommit( + [this, externalKey = std::move(externalKey)](OperationContext* opCtx, + boost::optional) mutable { + auto validator = LogicalTimeValidator::get(opCtx); + if (validator) { + validator->cacheExternalKey(externalKey); + } + }); + } + } +} + +void FallbackOpObserver::onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { + if (args.updateArgs->update.isEmpty()) { + return; + } + + const auto& nss = args.coll->ns(); + + if (nss.isSystemDotJavascript()) { + Scope::storedFuncMod(opCtx); + } else if (nss.isSystemDotViews()) { + CollectionCatalog::get(opCtx)->reloadViews(opCtx, nss.dbName()); + } else if (nss == NamespaceString::kSessionTransactionsTableNamespace && + !opAccumulator->opTime.writeOpTime.isNull()) { + auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx); + mongoDSessionCatalog->observeDirectWriteToConfigTransactions(opCtx, + args.updateArgs->updatedDoc); + } else if (nss == NamespaceString::kConfigSettingsNamespace) { + ReadWriteConcernDefaults::get(opCtx).observeDirectWriteToConfigSettings( + opCtx, args.updateArgs->updatedDoc["_id"], args.updateArgs->updatedDoc); + } +} + +void FallbackOpObserver::onDelete(OperationContext* opCtx, + const CollectionPtr& coll, + StmtId stmtId, + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { + const auto& nss = coll->ns(); + const bool inBatchedWrite = BatchedWriteContext::get(opCtx).writesAreBatched(); + + auto optDocKey = documentKeyDecoration(args); + invariant(optDocKey, nss.toStringForErrorMsg()); + auto& documentKey = optDocKey.value(); + + if (nss.isSystemDotJavascript()) { + Scope::storedFuncMod(opCtx); + } else if (nss.isSystemDotViews()) { + CollectionCatalog::get(opCtx)->reloadViews(opCtx, nss.dbName()); + } else if (nss == NamespaceString::kSessionTransactionsTableNamespace && + (inBatchedWrite || !opAccumulator->opTime.writeOpTime.isNull())) { + auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx); + mongoDSessionCatalog->observeDirectWriteToConfigTransactions(opCtx, documentKey.getId()); + } else if (nss == NamespaceString::kConfigSettingsNamespace) { + ReadWriteConcernDefaults::get(opCtx).observeDirectWriteToConfigSettings( + opCtx, documentKey.getId().firstElement(), boost::none); + } +} + +void FallbackOpObserver::onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) { + if (dbName.db() == NamespaceString::kSessionTransactionsTableNamespace.db()) { + auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx); + mongoDSessionCatalog->invalidateAllSessions(opCtx); + } +} + +repl::OpTime FallbackOpObserver::onDropCollection(OperationContext* opCtx, + const NamespaceString& collectionName, + const UUID& uuid, + std::uint64_t numRecords, + CollectionDropType dropType, + bool markFromMigrate) { + if (collectionName.isSystemDotJavascript()) { + Scope::storedFuncMod(opCtx); + } else if (collectionName.isSystemDotViews()) { + CollectionCatalog::get(opCtx)->clearViews(opCtx, collectionName.dbName()); + } else if (collectionName == NamespaceString::kSessionTransactionsTableNamespace) { + // Disallow this drop if there are currently prepared transactions. + const auto sessionCatalog = SessionCatalog::get(opCtx); + SessionKiller::Matcher matcherAllSessions( + KillAllSessionsByPatternSet{makeKillAllSessionsByPattern(opCtx)}); + bool noPreparedTxns = true; + sessionCatalog->scanSessions(matcherAllSessions, [&](const ObservableSession& session) { + auto txnParticipant = TransactionParticipant::get(session); + if (txnParticipant.transactionIsPrepared()) { + noPreparedTxns = false; + } + }); + uassert(4852500, + "Unable to drop transactions table (config.transactions) while prepared " + "transactions are present.", + noPreparedTxns); + + auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx); + mongoDSessionCatalog->invalidateAllSessions(opCtx); + } else if (collectionName == NamespaceString::kConfigSettingsNamespace) { + ReadWriteConcernDefaults::get(opCtx).invalidate(); + } + + return {}; +} + +} // namespace mongo diff --git a/src/mongo/db/op_observer/fallback_op_observer.h b/src/mongo/db/op_observer/fallback_op_observer.h new file mode 100644 index 0000000000000..1c1fbbf192534 --- /dev/null +++ b/src/mongo/db/op_observer/fallback_op_observer.h @@ -0,0 +1,92 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +#include "mongo/db/catalog/collection.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/util/uuid.h" + +namespace mongo { + +/** + * This OpObserver contains notifications to miscellaneous entities that were sitting in + * OpObserverImpl. + */ +class FallbackOpObserver final : public OpObserverNoop { + FallbackOpObserver(const FallbackOpObserver&) = delete; + FallbackOpObserver& operator=(const FallbackOpObserver&) = delete; + +public: + FallbackOpObserver() = default; + ~FallbackOpObserver() = default; + + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kConfigAndSystem, NamespaceFilter::kConfigAndSystem}; + } + + void onInserts(OperationContext* opCtx, + const CollectionPtr& coll, + std::vector::const_iterator first, + std::vector::const_iterator last, + std::vector fromMigrate, + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onDelete(OperationContext* opCtx, + const CollectionPtr& coll, + StmtId stmtId, + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final; + + repl::OpTime onDropCollection(OperationContext* opCtx, + const NamespaceString& collectionName, + const UUID& uuid, + std::uint64_t numRecords, + CollectionDropType dropType, + bool markFromMigrate) final; +}; + +} // namespace mongo diff --git a/src/mongo/db/op_observer/fcv_op_observer.cpp b/src/mongo/db/op_observer/fcv_op_observer.cpp index afe149ed92a92..d37cd000f4ffd 100644 --- a/src/mongo/db/op_observer/fcv_op_observer.cpp +++ b/src/mongo/db/op_observer/fcv_op_observer.cpp @@ -30,18 +30,41 @@ #include "mongo/db/op_observer/fcv_op_observer.h" +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/feature_compatibility_version_parser.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer_util.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/kill_sessions.h" #include "mongo/db/session/kill_sessions_local.h" +#include "mongo/db/session/session_killer.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/executor/egress_tag_closer_manager.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/transport/service_entry_point.h" +#include "mongo/transport/session.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -166,7 +189,8 @@ void FcvOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { if (coll->ns().isServerConfigurationCollection()) { for (auto it = first; it != last; it++) { _onInsertOrUpdate(opCtx, it->doc); @@ -174,7 +198,9 @@ void FcvOpObserver::onInserts(OperationContext* opCtx, } } -void FcvOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) { +void FcvOpObserver::onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (args.updateArgs->update.isEmpty()) { return; } @@ -186,12 +212,13 @@ void FcvOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs void FcvOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { const auto& nss = coll->ns(); // documentKeyDecoration is set in OpObserverImpl::aboutToDelete. So the FcvOpObserver // relies on the OpObserverImpl also being in the opObserverRegistry. - auto optDocKey = repl::documentKeyDecoration(opCtx); - invariant(optDocKey, nss.ns()); + auto optDocKey = documentKeyDecoration(args); + invariant(optDocKey, nss.toStringForErrorMsg()); if (nss.isServerConfigurationCollection()) { auto id = optDocKey.value().getId().firstElement(); if (id.type() == BSONType::String && id.String() == multiversion::kParameterName) { @@ -200,8 +227,8 @@ void FcvOpObserver::onDelete(OperationContext* opCtx, } } -void FcvOpObserver::_onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) { +void FcvOpObserver::onReplicationRollback(OperationContext* opCtx, + const RollbackObserverInfo& rbInfo) { // Ensures the in-memory and on-disk FCV states are consistent after a rollback. const auto query = BSON("_id" << multiversion::kParameterName); const auto swFcv = repl::StorageInterface::get(opCtx)->findById( diff --git a/src/mongo/db/op_observer/fcv_op_observer.h b/src/mongo/db/op_observer/fcv_op_observer.h index 152b5b8466f5b..66a7da7dedb84 100644 --- a/src/mongo/db/op_observer/fcv_op_observer.h +++ b/src/mongo/db/op_observer/fcv_op_observer.h @@ -29,7 +29,21 @@ #pragma once +#include + +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/util/uuid.h" #include "mongo/util/version/releases.h" namespace mongo { @@ -39,7 +53,7 @@ namespace mongo { * Observes all writes to the FCV document under admin.system.version and sets the in-memory FCV * value. */ -class FcvOpObserver final : public OpObserver { +class FcvOpObserver final : public OpObserverNoop { FcvOpObserver(const FcvOpObserver&) = delete; FcvOpObserver& operator=(const FcvOpObserver&) = delete; @@ -49,12 +63,17 @@ class FcvOpObserver final : public OpObserver { // FcvOpObserver overrides. + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kSystem, NamespaceFilter::kSystem}; + } + void onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) final; + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; void onInsertGlobalIndexKey(OperationContext* opCtx, const NamespaceString& globalIndexNss, @@ -68,178 +87,17 @@ class FcvOpObserver final : public OpObserver { const BSONObj& key, const BSONObj& docKey) final {} - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) final; + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) final; - - // Noop overrides. - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) final {} - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - void onCreateIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc, - bool fromMigrate) final {} - - void onStartIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onStartIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) final {} - - void aboutToDelete(OperationContext* opCtx, - const CollectionPtr& coll, - const BSONObj& doc) final {} - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final {} - void onCreateCollection(OperationContext* opCtx, - const CollectionPtr& coll, - const NamespaceString& collectionName, - const CollectionOptions& options, - const BSONObj& idIndex, - const OplogSlot& createOpTime, - bool fromMigrate) final {} - void onCollMod(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& collModCmd, - const CollectionOptions& oldCollOptions, - boost::optional indexInfo) final {} - void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final {} - using OpObserver::onDropCollection; - repl::OpTime onDropCollection(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid, - std::uint64_t numRecords, - const CollectionDropType dropType) final { - return {}; - } - void onDropIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const std::string& indexName, - const BSONObj& idxDescriptor) final {} - using OpObserver::onRenameCollection; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final {} - void onImportCollection(OperationContext* opCtx, - const UUID& importUUID, - const NamespaceString& nss, - long long numRecords, - long long dataSize, - const BSONObj& catalogEntry, - const BSONObj& storageMetadata, - bool isDryRun) final {} - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final { - return {}; - } - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) final {} - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) final {} - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) final {} - - void onTransactionStart(OperationContext* opCtx) final {} - - void onUnpreparedTransactionCommit(OperationContext* opCtx, - const TransactionOperations& transactionOperations) final {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept final{}; - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) final { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) final{}; + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) final {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) final{}; - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} - - void onMajorityCommitPointUpdate(ServiceContext* service, - const repl::OpTime& newCommitPoint) final {} + void onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final; private: /** @@ -257,8 +115,6 @@ class FcvOpObserver final : public OpObserver { * document and on commit, updates the server parameter. */ static void _onInsertOrUpdate(OperationContext* opCtx, const BSONObj& doc); - - void _onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final; }; } // namespace mongo diff --git a/src/mongo/db/op_observer/op_observer.cpp b/src/mongo/db/op_observer/op_observer.cpp index eb9b8dce99533..e47137182987f 100644 --- a/src/mongo/db/op_observer/op_observer.cpp +++ b/src/mongo/db/op_observer/op_observer.cpp @@ -29,7 +29,11 @@ #include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/operation_context.h" +#include + +#include + +#include "mongo/util/assert_util_core.h" namespace mongo { namespace { diff --git a/src/mongo/db/op_observer/op_observer.h b/src/mongo/db/op_observer/op_observer.h index 4aa505712ecac..feb0f77e65b2c 100644 --- a/src/mongo/db/op_observer/op_observer.h +++ b/src/mongo/db/op_observer/op_observer.h @@ -29,23 +29,70 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include #include +#include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/commit_quorum_options.h" +#include "mongo/db/database_name.h" #include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/rollback.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/transaction/transaction_operations.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { struct InsertStatement; -class OperationContext; -namespace repl { -class OpTime; -} // namespace repl +struct OpTimeBundle { + repl::OpTime writeOpTime; + Date_t wallClockTime; +}; + +/** + * The generic container for onUpdate/onDelete/onUnpreparedTransactionCommit state-passing between + * OpObservers. Despite the naming, some OpObserver's don't strictly observe. This struct is written + * by OpObserverImpl and useful for later observers to inspect state they need. + * + * These structs are decorable to support the sharing of critical resources between OpObserverImpl + * and MigrationChunkClonerSourceOpObserver. No other decorations should be added to these structs. + */ +struct OpStateAccumulator : Decorable { + OpStateAccumulator() = default; + + // Use either 'opTime' for non-insert operations or 'insertOpTimes', but not both. + OpTimeBundle opTime; + std::vector insertOpTimes; + +private: + OpStateAccumulator(const OpStateAccumulator&) = delete; + OpStateAccumulator& operator=(const OpStateAccumulator&) = delete; +}; enum class RetryableFindAndModifyLocation { // The operation is not retryable, or not a "findAndModify" command. Do not record a @@ -72,7 +119,15 @@ struct OplogUpdateEntryArgs { : updateArgs(updateArgs), coll(coll) {} }; -struct OplogDeleteEntryArgs { +/** + * Holds supplementary information required for OpObserver::onDelete() to write out an + * oplog entry for deleting a single document from a collection. + * + * This struct is also passed to OpObserver::aboutToDelete() so that OpObserver + * implementations may include additional information (via decorations) to be shared with + * the onDelete() method within the same implementation. + */ +struct OplogDeleteEntryArgs : Decorable { const BSONObj* deletedDoc = nullptr; // "fromMigrate" indicates whether the delete was induced by a chunk migration, and so @@ -113,6 +168,24 @@ class OpObserver { public: using ApplyOpsOplogSlotAndOperationAssignment = TransactionOperations::ApplyOpsInfo; + /** + * Used by CRUD ops: onInserts, onUpdate, aboutToDelete, and onDelete. + */ + enum class NamespaceFilter { + kConfig, // config database (i.e. config.*) + kSystem, // system collection (i.e. *.system.*) + kConfigAndSystem, // run the observer on config and system, but not user collections + kAll, // run the observer on all collections/databases + kNone, // never run the observer for this CRUD event + }; + + // Controls the OpObserverRegistry's filtering of CRUD events. + // Each OpObserver declares which events it cares about with this. + struct NamespaceFilters { + NamespaceFilter updateFilter; // onInserts, onUpdate + NamespaceFilter deleteFilter; // aboutToDelete, onDelete + }; + enum class CollectionDropType { // The collection is being dropped immediately, in one step. kOnePhase, @@ -124,6 +197,12 @@ class OpObserver { virtual ~OpObserver() = default; + // Used by the OpObserverRegistry to filter out CRUD operations. + // With this method, each OpObserver should declare if it wants to subscribe + // to a subset of operations to special internal collections. This helps + // improve performance. Avoid using 'kAll' as much as possible. + virtual NamespaceFilters getNamespaceFilters() const = 0; + virtual void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, const NamespaceString& nss, const UUID& uuid, @@ -183,14 +262,15 @@ class OpObserver { * and is intended to be forwarded to downstream subsystems that expect a single * 'fromMigrate' to describe the entire set of inserts. * Examples: ShardServerOpObserver, UserWriteBlockModeOpObserver, and - * OpObserverShardingImpl::shardObserveInsertsOp(). + * MigrationChunkClonerSourceOpObserver::onInserts(). */ virtual void onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) = 0; + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) = 0; virtual void onInsertGlobalIndexKey(OperationContext* opCtx, const NamespaceString& globalIndexNss, @@ -204,11 +284,15 @@ class OpObserver { const BSONObj& key, const BSONObj& docKey) = 0; - virtual void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) = 0; + virtual void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) = 0; virtual void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) = 0; + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) = 0; /** * Handles logging before document is deleted. @@ -222,7 +306,8 @@ class OpObserver { virtual void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) = 0; + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) = 0; /** * Logs a no-op with "msgObj" in the o field into oplog. @@ -308,19 +393,12 @@ class OpObserver { * * 'dropType' describes whether the collection drop is one-phase or two-phase. */ - virtual repl::OpTime onDropCollection(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid, - std::uint64_t numRecords, - CollectionDropType dropType) = 0; virtual repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, CollectionDropType dropType, - bool markFromMigrate) { - return onDropCollection(opCtx, collectionName, uuid, numRecords, dropType); - } + bool markFromMigrate) = 0; /** @@ -346,13 +424,6 @@ class OpObserver { * Returns the optime of the oplog entry successfully written to the oplog. * Returns a null optime if an oplog entry was not written for this operation. */ - virtual repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) = 0; virtual repl::OpTime preRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, @@ -360,10 +431,8 @@ class OpObserver { const boost::optional& dropTargetUUID, std::uint64_t numRecords, bool stayTemp, - bool markFromMigrate) { - return preRenameCollection( - opCtx, fromCollection, toCollection, uuid, dropTargetUUID, numRecords, stayTemp); - } + bool markFromMigrate) = 0; + /** * This function performs all op observer handling for a 'renameCollection' command except for * logging the oplog entry. It should be used specifically in instances where the optime is @@ -381,13 +450,6 @@ class OpObserver { * executed. It calls preRenameCollection to log the entry and postRenameCollection to do all * other handling. */ - virtual void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) = 0; virtual void onRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, @@ -395,10 +457,7 @@ class OpObserver { const boost::optional& dropTargetUUID, std::uint64_t numRecords, bool stayTemp, - bool markFromMigrate) { - onRenameCollection( - opCtx, fromCollection, toCollection, uuid, dropTargetUUID, numRecords, stayTemp); - } + bool markFromMigrate) = 0; virtual void onImportCollection(OperationContext* opCtx, const UUID& importUUID, @@ -427,11 +486,21 @@ class OpObserver { * transaction, before the RecoveryUnit onCommit() is called. It must not be called when no * transaction is active. * + * 'reservedSlots' is a list of oplog slots reserved for the oplog entries in a transaction. + * * The 'transactionOperations' contains the list of CRUD operations (formerly 'statements') to * be applied in this transaction. + * + * The 'applyOpsOperationAssignment' contains a representation of "applyOps" entries and oplog + * slots to be used for writing pre- and post- image oplog entries for a transaction. */ virtual void onUnpreparedTransactionCommit( - OperationContext* opCtx, const TransactionOperations& transactionOperations) = 0; + OperationContext* opCtx, + const std::vector& reservedSlots, + const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + OpStateAccumulator* opAccumulator = nullptr) = 0; + /** * The onPreparedTransactionCommit method is called on the commit of a prepared transaction, * after the RecoveryUnit onCommit() is called. It must not be called when no transaction is @@ -473,25 +542,20 @@ class OpObserver { * This method is called before an atomic transaction is prepared. It must be called when a * transaction is active. * - * Optionally returns a representation of "applyOps" entries to be written and oplog slots to be - * used for writing pre- and post- image oplog entries for a transaction. Only one OpObserver in - * the system should return the representation of "applyOps" entries. The returned value is - * passed to 'onTransactionPrepare()'. + * The 'transactionOperations' contains the list of CRUD operations to be applied in this + * transaction. The operations may be modified by setting pre-image and post-image oplog entry + * timestamps. * - * The 'reservedSlots' is a list of oplog slots reserved for the oplog entries in a transaction. - * The last reserved slot represents the prepareOpTime used for the prepare oplog entry. + * The 'applyOpsOperationAssignment' contains a representation of "applyOps" entries and oplog + * slots to be used for writing pre- and post- image oplog entries for a transaction. * * The 'wallClockTime' is the time to record as wall clock time on oplog entries resulting from * transaction preparation. - * - * The 'transactionOperations' contains the list of CRUD operations to be applied in this - * transaction. The operations may be modified by setting pre-image and post-image oplog entry - * timestamps. */ - virtual std::unique_ptr preTransactionPrepare( + virtual void preTransactionPrepare( OperationContext* opCtx, - const std::vector& reservedSlots, const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, Date_t wallClockTime) = 0; /** @@ -505,8 +569,8 @@ class OpObserver { * this transaction. * * The 'applyOpsOperationAssignment' contains a representation of "applyOps" entries and oplog - * slots to be used for writing pre- and post- image oplog entries for a transaction. A value - * returned by 'preTransactionPrepare()' should be passed as 'applyOpsOperationAssignment'. + * slots to be used for writing pre- and post- image oplog entries for a transaction. + * The same "applyOps" information should be passed to 'preTransactionPrepare()'. * * The 'numberOfPrePostImagesToWrite' is the number of CRUD operations that have a pre-image * to write as a noop oplog entry. The op observer will reserve oplog slots for these @@ -524,11 +588,20 @@ class OpObserver { Date_t wallClockTime) = 0; /** - * This is called when a transaction transitions into prepare while it is not primary. Example - * case can include secondary oplog application or when node was restared and tries to - * recover prepared transactions from the oplog. + * This method is called when a transaction transitions into prepare while it is not primary, + * e.g. during secondary oplog application or recoverying prepared transactions from the + * oplog after restart. The method explicitly requires a session id (i.e. does not use the + * session id attached to the opCtx) because transaction oplog application currently applies the + * oplog entries for each prepared transaction in multiple internal sessions acquired from the + * InternalSessionPool. Currently, those internal sessions are completely unrelated to the + * session for the transaction itself. For a non-retryable internal transaction, not using the + * transaction session id in the codepath here can cause the opTime for the transaction to + * show up in the chunk migration opTime buffer although the writes they correspond to are not + * retryable and therefore are discarded anyway. + * */ virtual void onTransactionPrepareNonPrimary(OperationContext* opCtx, + const LogicalSessionId& lsid, const std::vector& statements, const repl::OpTime& prepareOpTime) = 0; @@ -587,19 +660,9 @@ class OpObserver { * * This method is only applicable to the "rollback to a stable timestamp" algorithm, and is not * called when using any other rollback algorithm i.e "rollback via refetch". - * - * This function will call the private virtual '_onReplicationRollback' method. Any exceptions - * thrown indicates rollback failure that may have led us to some inconsistent on-disk or memory - * state, so we crash instead. */ - void onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) noexcept { - try { - _onReplicationRollback(opCtx, rbInfo); - } catch (const DBException& ex) { - fassert(6050902, ex.toStatus()); - } - }; + virtual void onReplicationRollback(OperationContext* opCtx, + const RollbackObserverInfo& rbInfo) = 0; /** * Called when the majority commit point is updated by replication. @@ -613,10 +676,6 @@ class OpObserver { struct Times; -private: - virtual void _onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) = 0; - protected: class ReservedTimes; }; diff --git a/src/mongo/db/op_observer/op_observer_impl.cpp b/src/mongo/db/op_observer/op_observer_impl.cpp index e2d257b29e27f..4102bd1180936 100644 --- a/src/mongo/db/op_observer/op_observer_impl.cpp +++ b/src/mongo/db/op_observer/op_observer_impl.cpp @@ -28,62 +28,87 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/op_observer/op_observer_impl.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include +#include +#include #include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_operation_source.h" #include "mongo/db/catalog/collection_options.h" -#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/import_collection_oplog_entry_gen.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/change_stream_pre_images_collection_manager.h" +#include "mongo/db/change_stream_serverless_helpers.h" +#include "mongo/db/client.h" #include "mongo/db/commands/txn_cmds_gen.h" -#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/create_indexes_gen.h" +#include "mongo/db/curop.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/logical_time_validator.h" -#include "mongo/db/multitenancy_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/batched_write_context.h" +#include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_util.h" #include "mongo/db/operation_context.h" +#include "mongo/db/ops/update_result.h" #include "mongo/db/pipeline/change_stream_preimage_gen.h" #include "mongo/db/read_write_concern_defaults.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/image_collection_entry_gen.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog_entry_gen.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_decoration.h" -#include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/scoped_collection_metadata.h" #include "mongo/db/s/sharding_write_router.h" -#include "mongo/db/server_feature_flags_gen.h" #include "mongo/db/server_options.h" -#include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_parameters_gen.h" -#include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" -#include "mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.h" -#include "mongo/db/timeseries/timeseries_extended_range.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/transaction_participant.h" -#include "mongo/db/transaction/transaction_participant_gen.h" -#include "mongo/db/views/util.h" -#include "mongo/db/views/view_catalog_helpers.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" -#include "mongo/s/client/shard_registry.h" -#include "mongo/s/grid.h" -#include "mongo/scripting/engine.h" -#include "mongo/stdx/mutex.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/catalog/type_index_catalog.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -93,8 +118,8 @@ using repl::DurableOplogEntry; using repl::MutableOplogEntry; using ChangeStreamPreImageRecordingMode = repl::ReplOperation::ChangeStreamPreImageRecordingMode; -const OperationContext::Decoration> destinedRecipientDecoration = - OperationContext::declareDecoration>(); +const auto destinedRecipientDecoration = + OplogDeleteEntryArgs::declareDecoration>(); namespace { @@ -123,6 +148,19 @@ repl::OpTime logOperation(OperationContext* opCtx, return opTime; } +void writeChangeStreamPreImageEntry( + OperationContext* opCtx, + // Skip the pre-image insert if we are in the middle of a tenant migration. Pre-image inserts + // for writes during the oplog catchup phase are handled in the oplog application code. + boost::optional tenantId, + const ChangeStreamPreImage& preImage) { + if (repl::tenantMigrationInfo(opCtx)) { + return; + } + + ChangeStreamPreImagesCollectionManager::get(opCtx).insertPreImage(opCtx, tenantId, preImage); +} + /** * Generic function that logs an operation. * Intended to reduce branching at call-sites by accepting the least common denominator @@ -207,12 +245,6 @@ BSONObj makeObject2ForDropOrRename(uint64_t numRecords) { return obj; } -struct OpTimeBundle { - repl::OpTime writeOpTime; - repl::OpTime prePostImageOpTime; - Date_t wallClockTime; -}; - /** * Write oplog entry(ies) for the update operation. */ @@ -250,18 +282,20 @@ OpTimeBundle replLogDelete(OperationContext* opCtx, const boost::optional& uuid, StmtId stmtId, bool fromMigrate, + const DocumentKey& documentKey, + const boost::optional& destinedRecipient, OplogWriter* oplogWriter) { oplogEntry->setTid(nss.tenantId()); oplogEntry->setNss(nss); oplogEntry->setUuid(uuid); - oplogEntry->setDestinedRecipient(destinedRecipientDecoration(opCtx)); + oplogEntry->setDestinedRecipient(destinedRecipient); repl::OplogLink oplogLink; oplogWriter->appendOplogEntryChainInfo(opCtx, oplogEntry, &oplogLink, {stmtId}); OpTimeBundle opTimes; oplogEntry->setOpType(repl::OpTypeEnum::kDelete); - oplogEntry->setObject(repl::documentKeyDecoration(opCtx).value().getShardKeyAndId()); + oplogEntry->setObject(documentKey.getShardKeyAndId()); oplogEntry->setFromMigrateIfTrue(fromMigrate); opTimes.writeOpTime = logOperation(opCtx, oplogEntry, true /*assignWallClockTime*/, oplogWriter); @@ -286,12 +320,16 @@ void writeToImageCollection(OperationContext* opCtx, // stronger lock acquisition is taken on this namespace is during step up to create the // collection. AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(opCtx->lockState()); - AutoGetCollection imageCollectionRaii( - opCtx, NamespaceString::kConfigImagesNamespace, LockMode::MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString::kConfigImagesNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); auto curOp = CurOp::get(opCtx); const auto existingNs = curOp->getNSS(); - UpdateResult res = - Helpers::upsert(opCtx, NamespaceString::kConfigImagesNamespace, imageEntry.toBSON()); + UpdateResult res = Helpers::upsert(opCtx, collection, imageEntry.toBSON()); { stdx::lock_guard clientLock(*opCtx->getClient()); curOp->setNS_inlock(existingNs); @@ -316,7 +354,7 @@ bool shouldTimestampIndexBuildSinglePhase(OperationContext* opCtx, const Namespa return false; // 3. If the index build is on the local database, do not timestamp. - if (nss.isLocal()) + if (nss.isLocalDB()) return false; // 4. All other cases, we generate a timestamp by writing a no-op oplog entry. This is @@ -419,7 +457,7 @@ void OpObserverImpl::onCreateIndex(OperationContext* opCtx, auto opTime = logMutableOplogEntry(opCtx, &oplogEntry, _oplogWriter.get()); - if (opCtx->writesAreReplicated()) { + if (!repl::ReplicationCoordinator::get(opCtx)->isOplogDisabledFor(opCtx, nss)) { if (opTime.isNull()) { LOGV2(7360100, "Added oplog entry for createIndexes to transaction", @@ -476,7 +514,8 @@ void OpObserverImpl::onStartIndexBuildSinglePhase(OperationContext* opCtx, opCtx, {}, boost::none, - BSON("msg" << std::string(str::stream() << "Creating indexes. Coll: " << nss)), + BSON("msg" << std::string(str::stream() << "Creating indexes. Coll: " + << NamespaceStringUtil::serialize(nss))), boost::none, boost::none, boost::none, @@ -494,7 +533,8 @@ void OpObserverImpl::onAbortIndexBuildSinglePhase(OperationContext* opCtx, opCtx, {}, boost::none, - BSON("msg" << std::string(str::stream() << "Aborting indexes. Coll: " << nss)), + BSON("msg" << std::string(str::stream() << "Aborting indexes. Coll: " + << NamespaceStringUtil::serialize(nss))), boost::none, boost::none, boost::none, @@ -566,12 +606,136 @@ void OpObserverImpl::onAbortIndexBuild(OperationContext* opCtx, logOperation(opCtx, &oplogEntry, true /*assignWallClockTime*/, _oplogWriter.get()); } +namespace { + +std::vector _logInsertOps(OperationContext* opCtx, + MutableOplogEntry* oplogEntryTemplate, + std::vector::const_iterator begin, + std::vector::const_iterator end, + const std::vector& fromMigrate, + const ShardingWriteRouter& shardingWriteRouter, + const CollectionPtr& collectionPtr, + OplogWriter* oplogWriter) { + invariant(begin != end); + + auto nss = oplogEntryTemplate->getNss(); + auto replCoord = repl::ReplicationCoordinator::get(opCtx); + if (replCoord->isOplogDisabledFor(opCtx, nss)) { + invariant(!begin->stmtIds.empty()); + uassert(ErrorCodes::IllegalOperation, + str::stream() << "retryable writes is not supported for unreplicated ns: " + << nss.toStringForErrorMsg(), + begin->stmtIds.front() == kUninitializedStmtId); + return {}; + } + + // The number of entries in 'fromMigrate' should be consistent with the number of insert + // operations in [begin, end). Also, 'fromMigrate' is a sharding concept, so there is no + // need to check 'fromMigrate' for inserts that are not replicated. See SERVER-75829. + invariant(std::distance(fromMigrate.begin(), fromMigrate.end()) == std::distance(begin, end), + oplogEntryTemplate->toReplOperation().toBSON().toString()); + + // If this oplog entry is from a tenant migration, include the tenant migration + // UUID and optional donor timeline metadata. + if (const auto& recipientInfo = repl::tenantMigrationInfo(opCtx)) { + oplogEntryTemplate->setFromTenantMigration(recipientInfo->uuid); + if (oplogEntryTemplate->getTid() && + change_stream_serverless_helpers::isChangeStreamEnabled( + opCtx, *oplogEntryTemplate->getTid()) && + recipientInfo->donorOplogEntryData) { + oplogEntryTemplate->setDonorOpTime(recipientInfo->donorOplogEntryData->donorOpTime); + oplogEntryTemplate->setDonorApplyOpsIndex( + recipientInfo->donorOplogEntryData->applyOpsIndex); + } + } + + const size_t count = end - begin; + + // Use OplogAccessMode::kLogOp to avoid recursive locking. + AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kLogOp); + + WriteUnitOfWork wuow(opCtx); + + std::vector opTimes(count); + std::vector timestamps(count); + std::vector bsonOplogEntries(count); + std::vector records(count); + for (size_t i = 0; i < count; i++) { + // Make a copy from the template for each insert oplog entry. + MutableOplogEntry oplogEntry = *oplogEntryTemplate; + // Make a mutable copy. + auto insertStatementOplogSlot = begin[i].oplogSlot; + // Fetch optime now, if not already fetched. + if (insertStatementOplogSlot.isNull()) { + insertStatementOplogSlot = oplogWriter->getNextOpTimes(opCtx, 1U)[0]; + } + const auto docKey = getDocumentKey(collectionPtr, begin[i].doc).getShardKeyAndId(); + oplogEntry.setObject(begin[i].doc); + oplogEntry.setObject2(docKey); + oplogEntry.setOpTime(insertStatementOplogSlot); + oplogEntry.setDestinedRecipient( + shardingWriteRouter.getReshardingDestinedRecipient(begin[i].doc)); + addDestinedRecipient.execute([&](const BSONObj& data) { + auto recipient = data["destinedRecipient"].String(); + oplogEntry.setDestinedRecipient(boost::make_optional({recipient})); + }); + + repl::OplogLink oplogLink; + if (i > 0) + oplogLink.prevOpTime = opTimes[i - 1]; + + oplogEntry.setFromMigrateIfTrue(fromMigrate[i]); + + oplogWriter->appendOplogEntryChainInfo(opCtx, &oplogEntry, &oplogLink, begin[i].stmtIds); + + opTimes[i] = insertStatementOplogSlot; + timestamps[i] = insertStatementOplogSlot.getTimestamp(); + bsonOplogEntries[i] = oplogEntry.toBSON(); + // The storage engine will assign the RecordId based on the "ts" field of the oplog entry, + // see record_id_helpers::extractKey. + records[i] = Record{ + RecordId(), RecordData(bsonOplogEntries[i].objdata(), bsonOplogEntries[i].objsize())}; + } + + sleepBetweenInsertOpTimeGenerationAndLogOp.execute([&](const BSONObj& data) { + auto numMillis = data["waitForMillis"].numberInt(); + LOGV2(7456300, + "Sleeping for {sleepMillis}ms after receiving {numOpTimesReceived} optimes from " + "{firstOpTime} to " + "{lastOpTime}", + "Sleeping due to sleepBetweenInsertOpTimeGenerationAndLogOp failpoint", + "sleepMillis"_attr = numMillis, + "numOpTimesReceived"_attr = count, + "firstOpTime"_attr = opTimes.front(), + "lastOpTime"_attr = opTimes.back()); + sleepmillis(numMillis); + }); + + invariant(!opTimes.empty()); + auto lastOpTime = opTimes.back(); + invariant(!lastOpTime.isNull()); + auto wallClockTime = oplogEntryTemplate->getWallClockTime(); + oplogWriter->logOplogRecords(opCtx, + nss, + &records, + timestamps, + oplogWrite.getCollection(), + lastOpTime, + wallClockTime, + /*isAbortIndexBuild=*/false); + wuow.commit(); + return opTimes; +} + +} // namespace + void OpObserverImpl::onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { auto txnParticipant = TransactionParticipant::get(opCtx); const bool inMultiDocumentTransaction = txnParticipant && opCtx->writesAreReplicated() && txnParticipant.transactionIsOpen(); @@ -582,7 +746,7 @@ void OpObserverImpl::onInserts(OperationContext* opCtx, std::vector opTimeList; repl::OpTime lastOpTime; - ShardingWriteRouter shardingWriteRouter(opCtx, nss, Grid::get(opCtx)->catalogCache()); + auto shardingWriteRouter = std::make_unique(opCtx, nss); auto& batchedWriteContext = BatchedWriteContext::get(opCtx); const bool inBatchedWrite = batchedWriteContext.writesAreBatched(); @@ -591,10 +755,10 @@ void OpObserverImpl::onInserts(OperationContext* opCtx, invariant(!defaultFromMigrate); for (auto iter = first; iter != last; iter++) { - const auto docKey = repl::getDocumentKey(opCtx, coll, iter->doc).getShardKeyAndId(); + const auto docKey = getDocumentKey(coll, iter->doc).getShardKeyAndId(); auto operation = MutableOplogEntry::makeInsertOperation(nss, uuid, iter->doc, docKey); operation.setDestinedRecipient( - shardingWriteRouter.getReshardingDestinedRecipient(iter->doc)); + shardingWriteRouter->getReshardingDestinedRecipient(iter->doc)); operation.setFromMigrateIfTrue(fromMigrate[std::distance(first, iter)]); @@ -615,24 +779,19 @@ void OpObserverImpl::onInserts(OperationContext* opCtx, isInternalSessionForRetryableWrite(*opCtx->getLogicalSessionId()); for (auto iter = first; iter != last; iter++) { - const auto docKey = repl::getDocumentKey(opCtx, coll, iter->doc).getShardKeyAndId(); + const auto docKey = getDocumentKey(coll, iter->doc).getShardKeyAndId(); auto operation = MutableOplogEntry::makeInsertOperation(nss, uuid, iter->doc, docKey); if (inRetryableInternalTransaction) { operation.setInitializedStatementIds(iter->stmtIds); } operation.setDestinedRecipient( - shardingWriteRouter.getReshardingDestinedRecipient(iter->doc)); + shardingWriteRouter->getReshardingDestinedRecipient(iter->doc)); operation.setFromMigrateIfTrue(fromMigrate[std::distance(first, iter)]); txnParticipant.addTransactionOperation(opCtx, operation); } } else { - std::function(const BSONObj& doc)> getDestinedRecipientFn = - [&shardingWriteRouter](const BSONObj& doc) { - return shardingWriteRouter.getReshardingDestinedRecipient(doc); - }; - // Ensure well-formed embedded ReplOperation for logging. // This means setting optype, nss, and object at the minimum. MutableOplogEntry oplogEntryTemplate; @@ -645,13 +804,14 @@ void OpObserverImpl::onInserts(OperationContext* opCtx, Date_t lastWriteDate = getWallClockTimeForOpLog(opCtx); oplogEntryTemplate.setWallClockTime(lastWriteDate); - opTimeList = _oplogWriter->logInsertOps(opCtx, - &oplogEntryTemplate, - first, - last, - std::move(fromMigrate), - getDestinedRecipientFn, - coll); + opTimeList = _logInsertOps(opCtx, + &oplogEntryTemplate, + first, + last, + std::move(fromMigrate), + *shardingWriteRouter, + coll, + _oplogWriter.get()); if (!opTimeList.empty()) lastOpTime = opTimeList.back(); @@ -671,83 +831,10 @@ void OpObserverImpl::onInserts(OperationContext* opCtx, onWriteOpCompleted(opCtx, stmtIdsWritten, sessionTxnRecord); } - shardObserveInsertsOp(opCtx, - nss, - first, - last, - opTimeList, - shardingWriteRouter, - defaultFromMigrate, - inMultiDocumentTransaction); - - if (nss.coll() == "system.js") { - Scope::storedFuncMod(opCtx); - } else if (nss.isSystemDotViews()) { - try { - for (auto it = first; it != last; it++) { - view_util::validateViewDefinitionBSON(opCtx, it->doc, nss.dbName()); - - uassertStatusOK(CollectionCatalog::get(opCtx)->createView( - opCtx, - NamespaceStringUtil::deserialize(nss.dbName().tenantId(), - it->doc.getStringField("_id")), - NamespaceStringUtil::parseNamespaceFromDoc(nss.dbName(), - it->doc.getStringField("viewOn")), - BSONArray{it->doc.getObjectField("pipeline")}, - view_catalog_helpers::validatePipeline, - it->doc.getObjectField("collation"), - ViewsForDatabase::Durability::kAlreadyDurable)); - } - } catch (const DBException&) { - // If a previous operation left the view catalog in an invalid state, our inserts can - // fail even if all the definitions are valid. Reloading may help us reset the state. - CollectionCatalog::get(opCtx)->reloadViews(opCtx, nss.dbName()); - } - } else if (nss == NamespaceString::kSessionTransactionsTableNamespace && !lastOpTime.isNull()) { - for (auto it = first; it != last; it++) { - auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx); - mongoDSessionCatalog->observeDirectWriteToConfigTransactions(opCtx, it->doc); - } - } else if (nss == NamespaceString::kConfigSettingsNamespace) { - for (auto it = first; it != last; it++) { - ReadWriteConcernDefaults::get(opCtx).observeDirectWriteToConfigSettings( - opCtx, it->doc["_id"], it->doc); - } - } else if (nss == NamespaceString::kExternalKeysCollectionNamespace) { - for (auto it = first; it != last; it++) { - auto externalKey = - ExternalKeysCollectionDocument::parse(IDLParserContext("externalKey"), it->doc); - opCtx->recoveryUnit()->onCommit( - [this, externalKey = std::move(externalKey)](OperationContext* opCtx, - boost::optional) mutable { - auto validator = LogicalTimeValidator::get(opCtx); - if (validator) { - validator->cacheExternalKey(externalKey); - } - }); - } - } else if (nss.isTimeseriesBucketsCollection()) { - // Check if the bucket _id is sourced from a date outside the standard range. If our writes - // end up erroring out or getting rolled back, then this flag will stay set. This is okay - // though, as it only disables some query optimizations and won't result in any correctness - // issues if the flag is set when it doesn't need to be (as opposed to NOT being set when it - // DOES need to be -- that will cause correctness issues). Additionally, if the user tried - // to insert measurements with dates outside the standard range, chances are they will do so - // again, and we will have only set the flag a little early. - invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX)); - // Hold reference to the catalog for collection lookup without locks to be safe. - auto catalog = CollectionCatalog::get(opCtx); - auto bucketsColl = catalog->lookupCollectionByNamespace(opCtx, nss); - tassert(6905201, "Could not find collection for write", bucketsColl); - auto timeSeriesOptions = bucketsColl->getTimeseriesOptions(); - if (timeSeriesOptions.has_value()) { - if (auto currentSetting = bucketsColl->getRequiresTimeseriesExtendedRangeSupport(); - !currentSetting && - timeseries::bucketsHaveDateOutsideStandardRange( - timeSeriesOptions.value(), first, last)) { - bucketsColl->setRequiresTimeseriesExtendedRangeSupport(opCtx); - } - } + if (opAccumulator) { + opAccumulator->insertOpTimes = std::move(opTimeList); + shardingWriteRouterOpStateAccumulatorDecoration(opAccumulator) = + std::move(shardingWriteRouter); } } @@ -791,19 +878,21 @@ void OpObserverImpl::onDeleteGlobalIndexKey(OperationContext* opCtx, opCtx, &oplogEntry, _oplogWriter.get(), isRequiredInMultiDocumentTransaction); } -void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) { +void OpObserverImpl::onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { failCollectionUpdates.executeIf( [&](const BSONObj&) { uasserted(40654, - str::stream() - << "failCollectionUpdates failpoint enabled, namespace: " - << args.coll->ns().ns() << ", update: " << args.updateArgs->update - << " on document with " << args.updateArgs->criteria); + str::stream() << "failCollectionUpdates failpoint enabled, namespace: " + << args.coll->ns().toStringForErrorMsg() + << ", update: " << args.updateArgs->update + << " on document with " << args.updateArgs->criteria); }, [&](const BSONObj& data) { // If the failpoint specifies no collection or matches the existing one, fail. - auto collElem = data["collectionNS"]; - return !collElem || args.coll->ns().ns() == collElem.String(); + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "collectionNS"); + return fpNss.isEmpty() || args.coll->ns() == fpNss; }); // Do not log a no-op operation; see SERVER-21738 @@ -815,8 +904,7 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg const bool inMultiDocumentTransaction = txnParticipant && opCtx->writesAreReplicated() && txnParticipant.transactionIsOpen(); - ShardingWriteRouter shardingWriteRouter( - opCtx, args.coll->ns(), Grid::get(opCtx)->catalogCache()); + auto shardingWriteRouter = std::make_unique(opCtx, args.coll->ns()); OpTimeBundle opTime; auto& batchedWriteContext = BatchedWriteContext::get(opCtx); @@ -826,20 +914,28 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg auto operation = MutableOplogEntry::makeUpdateOperation( args.coll->ns(), args.coll->uuid(), args.updateArgs->update, args.updateArgs->criteria); operation.setDestinedRecipient( - shardingWriteRouter.getReshardingDestinedRecipient(args.updateArgs->updatedDoc)); + shardingWriteRouter->getReshardingDestinedRecipient(args.updateArgs->updatedDoc)); operation.setFromMigrateIfTrue(args.updateArgs->source == OperationSource::kFromMigrate); batchedWriteContext.addBatchedOperation(opCtx, operation); } else if (inMultiDocumentTransaction) { const bool inRetryableInternalTransaction = isInternalSessionForRetryableWrite(*opCtx->getLogicalSessionId()); + invariant( + inRetryableInternalTransaction || + args.retryableFindAndModifyLocation == RetryableFindAndModifyLocation::kNone, + str::stream() + << "Attempted a retryable write within a non-retryable multi-document transaction"); + auto operation = MutableOplogEntry::makeUpdateOperation( args.coll->ns(), args.coll->uuid(), args.updateArgs->update, args.updateArgs->criteria); if (inRetryableInternalTransaction) { operation.setInitializedStatementIds(args.updateArgs->stmtIds); if (args.updateArgs->storeDocOption == CollectionUpdateArgs::StoreDocOption::PreImage) { - invariant(!args.updateArgs->preImageDoc.isEmpty()); + invariant(!args.updateArgs->preImageDoc.isEmpty(), + str::stream() + << "Pre-image document must be present for pre-image recording"); operation.setPreImage(args.updateArgs->preImageDoc.getOwned()); operation.setPreImageRecordedForRetryableInternalTransaction(); if (args.retryableFindAndModifyLocation == @@ -849,7 +945,9 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg } if (args.updateArgs->storeDocOption == CollectionUpdateArgs::StoreDocOption::PostImage) { - invariant(!args.updateArgs->updatedDoc.isEmpty()); + invariant(!args.updateArgs->updatedDoc.isEmpty(), + str::stream() + << "Update document must be present for pre-image recording"); operation.setPostImage(args.updateArgs->updatedDoc.getOwned()); if (args.retryableFindAndModifyLocation == RetryableFindAndModifyLocation::kSideCollection) { @@ -859,13 +957,15 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg } if (args.updateArgs->changeStreamPreAndPostImagesEnabledForCollection) { - invariant(!args.updateArgs->preImageDoc.isEmpty()); + invariant(!args.updateArgs->preImageDoc.isEmpty(), + str::stream() + << "Pre-image document must be present for pre-image recording"); operation.setPreImage(args.updateArgs->preImageDoc.getOwned()); operation.setChangeStreamPreImageRecordingMode( ChangeStreamPreImageRecordingMode::kPreImagesCollection); } - const auto& scopedCollectionDescription = shardingWriteRouter.getCollDesc(); + const auto& scopedCollectionDescription = shardingWriteRouter->getCollDesc(); // ShardingWriteRouter only has boost::none scopedCollectionDescription when not in a // sharded cluster. if (scopedCollectionDescription && scopedCollectionDescription->isSharded()) { @@ -875,13 +975,13 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg } operation.setDestinedRecipient( - shardingWriteRouter.getReshardingDestinedRecipient(args.updateArgs->updatedDoc)); + shardingWriteRouter->getReshardingDestinedRecipient(args.updateArgs->updatedDoc)); operation.setFromMigrateIfTrue(args.updateArgs->source == OperationSource::kFromMigrate); txnParticipant.addTransactionOperation(opCtx, operation); } else { MutableOplogEntry oplogEntry; oplogEntry.setDestinedRecipient( - shardingWriteRouter.getReshardingDestinedRecipient(args.updateArgs->updatedDoc)); + shardingWriteRouter->getReshardingDestinedRecipient(args.updateArgs->updatedDoc)); if (args.retryableFindAndModifyLocation == RetryableFindAndModifyLocation::kSideCollection) { @@ -896,6 +996,10 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg } opTime = replLogUpdate(opCtx, args, &oplogEntry, _oplogWriter.get()); + if (opAccumulator) { + opAccumulator->opTime.writeOpTime = opTime.writeOpTime; + opAccumulator->opTime.wallClockTime = opTime.wallClockTime; + } if (oplogEntry.getNeedsRetryImage()) { // If the oplog entry has `needsRetryImage`, copy the image into image collection. @@ -913,6 +1017,11 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg writeToImageCollection(opCtx, *opCtx->getLogicalSessionId(), imageToWrite); } + SessionTxnRecord sessionTxnRecord; + sessionTxnRecord.setLastWriteOpTime(opTime.writeOpTime); + sessionTxnRecord.setLastWriteDate(opTime.wallClockTime); + onWriteOpCompleted(opCtx, args.updateArgs->stmtIds, sessionTxnRecord); + // Write a pre-image to the change streams pre-images collection when following conditions // are met: // 1. The collection has 'changeStreamPreAndPostImages' enabled. @@ -930,81 +1039,43 @@ void OpObserverImpl::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArg args.updateArgs->source != OperationSource::kFromMigrate && !args.coll->ns().isTemporaryReshardingCollection()) { const auto& preImageDoc = args.updateArgs->preImageDoc; - tassert(5868600, "PreImage must be set", !preImageDoc.isEmpty()); + invariant(!preImageDoc.isEmpty(), str::stream() << "PreImage must be set"); ChangeStreamPreImageId id(args.coll->uuid(), opTime.writeOpTime.getTimestamp(), 0); ChangeStreamPreImage preImage(id, opTime.wallClockTime, preImageDoc); - ChangeStreamPreImagesCollectionManager::insertPreImage( - opCtx, args.coll->ns().tenantId(), preImage); + writeChangeStreamPreImageEntry(opCtx, args.coll->ns().tenantId(), preImage); } - - SessionTxnRecord sessionTxnRecord; - sessionTxnRecord.setLastWriteOpTime(opTime.writeOpTime); - sessionTxnRecord.setLastWriteDate(opTime.wallClockTime); - onWriteOpCompleted(opCtx, args.updateArgs->stmtIds, sessionTxnRecord); } - if (args.coll->ns() != NamespaceString::kSessionTransactionsTableNamespace) { - if (args.updateArgs->source != OperationSource::kFromMigrate) { - shardObserveUpdateOp(opCtx, - args.coll->ns(), - args.updateArgs->preImageDoc, - args.updateArgs->updatedDoc, - opTime.writeOpTime, - shardingWriteRouter, - opTime.prePostImageOpTime, - inMultiDocumentTransaction); - } - } - - if (args.coll->ns().coll() == "system.js") { - Scope::storedFuncMod(opCtx); - } else if (args.coll->ns().isSystemDotViews()) { - CollectionCatalog::get(opCtx)->reloadViews(opCtx, args.coll->ns().dbName()); - } else if (args.coll->ns() == NamespaceString::kSessionTransactionsTableNamespace && - !opTime.writeOpTime.isNull()) { - auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx); - mongoDSessionCatalog->observeDirectWriteToConfigTransactions(opCtx, - args.updateArgs->updatedDoc); - } else if (args.coll->ns() == NamespaceString::kConfigSettingsNamespace) { - ReadWriteConcernDefaults::get(opCtx).observeDirectWriteToConfigSettings( - opCtx, args.updateArgs->updatedDoc["_id"], args.updateArgs->updatedDoc); - } else if (args.coll->ns().isTimeseriesBucketsCollection()) { - if (args.updateArgs->source != OperationSource::kTimeseriesInsert) { - OID bucketId = args.updateArgs->updatedDoc["_id"].OID(); - timeseries::bucket_catalog::handleDirectWrite(opCtx, args.coll->ns(), bucketId); - } + if (opAccumulator) { + shardingWriteRouterOpStateAccumulatorDecoration(opAccumulator) = + std::move(shardingWriteRouter); } } void OpObserverImpl::aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - BSONObj const& doc) { - repl::documentKeyDecoration(opCtx).emplace(repl::getDocumentKey(opCtx, coll, doc)); - - ShardingWriteRouter shardingWriteRouter(opCtx, coll->ns(), Grid::get(opCtx)->catalogCache()); - - repl::DurableReplOperation op; - op.setDestinedRecipient(shardingWriteRouter.getReshardingDestinedRecipient(doc)); - destinedRecipientDecoration(opCtx) = op.getDestinedRecipient(); + BSONObj const& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { + documentKeyDecoration(args).emplace(getDocumentKey(coll, doc)); - shardObserveAboutToDelete(opCtx, coll->ns(), doc); - - if (coll->ns().isTimeseriesBucketsCollection()) { - OID bucketId = doc["_id"].OID(); - timeseries::bucket_catalog::handleDirectWrite(opCtx, coll->ns(), bucketId); + { + ShardingWriteRouter shardingWriteRouter(opCtx, coll->ns()); + destinedRecipientDecoration(args) = shardingWriteRouter.getReshardingDestinedRecipient(doc); } } void OpObserverImpl::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { const auto& nss = coll->ns(); const auto uuid = coll->uuid(); - auto optDocKey = repl::documentKeyDecoration(opCtx); - invariant(optDocKey, nss.ns()); + auto optDocKey = documentKeyDecoration(args); + invariant(optDocKey, nss.toStringForErrorMsg()); auto& documentKey = optDocKey.value(); auto txnParticipant = TransactionParticipant::get(opCtx); @@ -1018,17 +1089,18 @@ void OpObserverImpl::onDelete(OperationContext* opCtx, if (inBatchedWrite) { auto operation = MutableOplogEntry::makeDeleteOperation(nss, uuid, documentKey.getShardKeyAndId()); - operation.setDestinedRecipient(destinedRecipientDecoration(opCtx)); + operation.setDestinedRecipient(destinedRecipientDecoration(args)); operation.setFromMigrateIfTrue(args.fromMigrate); batchedWriteContext.addBatchedOperation(opCtx, operation); } else if (inMultiDocumentTransaction) { const bool inRetryableInternalTransaction = isInternalSessionForRetryableWrite(*opCtx->getLogicalSessionId()); - tassert(5868700, - "Attempted a retryable write within a non-retryable multi-document transaction", - inRetryableInternalTransaction || - args.retryableFindAndModifyLocation == RetryableFindAndModifyLocation::kNone); + invariant( + inRetryableInternalTransaction || + args.retryableFindAndModifyLocation == RetryableFindAndModifyLocation::kNone, + str::stream() + << "Attempted a retryable write within a non-retryable multi-document transaction"); auto operation = MutableOplogEntry::makeDeleteOperation(nss, uuid, documentKey.getShardKeyAndId()); @@ -1037,9 +1109,9 @@ void OpObserverImpl::onDelete(OperationContext* opCtx, operation.setInitializedStatementIds({stmtId}); if (args.retryableFindAndModifyLocation == RetryableFindAndModifyLocation::kSideCollection) { - tassert(6054000, - "Deleted document must be present for pre-image recording", - args.deletedDoc); + invariant(!args.deletedDoc->isEmpty(), + str::stream() + << "Deleted document must be present for pre-image recording"); operation.setPreImage(args.deletedDoc->getOwned()); operation.setPreImageRecordedForRetryableInternalTransaction(); operation.setNeedsRetryImage(repl::RetryImageEnum::kPreImage); @@ -1047,26 +1119,23 @@ void OpObserverImpl::onDelete(OperationContext* opCtx, } if (args.changeStreamPreAndPostImagesEnabledForCollection) { - tassert(5869400, - "Deleted document must be present for pre-image recording", - args.deletedDoc); + invariant(!args.deletedDoc->isEmpty(), + str::stream() << "Deleted document must be present for pre-image recording"); operation.setPreImage(args.deletedDoc->getOwned()); operation.setChangeStreamPreImageRecordingMode( ChangeStreamPreImageRecordingMode::kPreImagesCollection); } - operation.setDestinedRecipient(destinedRecipientDecoration(opCtx)); + operation.setDestinedRecipient(destinedRecipientDecoration(args)); operation.setFromMigrateIfTrue(args.fromMigrate); txnParticipant.addTransactionOperation(opCtx, operation); } else { MutableOplogEntry oplogEntry; - boost::optional deletedDocForOplog = boost::none; if (args.retryableFindAndModifyLocation == RetryableFindAndModifyLocation::kSideCollection) { - tassert(5868703, - "Deleted document must be present for pre-image recording", - args.deletedDoc); + invariant(!args.deletedDoc->isEmpty(), + str::stream() << "Deleted document must be present for pre-image recording"); invariant(opCtx->getTxnNumber()); oplogEntry.setNeedsRetryImage({repl::RetryImageEnum::kPreImage}); @@ -1074,8 +1143,20 @@ void OpObserverImpl::onDelete(OperationContext* opCtx, oplogEntry.setOpTime(args.oplogSlots.back()); } } - opTime = replLogDelete( - opCtx, nss, &oplogEntry, uuid, stmtId, args.fromMigrate, _oplogWriter.get()); + + opTime = replLogDelete(opCtx, + nss, + &oplogEntry, + uuid, + stmtId, + args.fromMigrate, + documentKey, + destinedRecipientDecoration(args), + _oplogWriter.get()); + if (opAccumulator) { + opAccumulator->opTime.writeOpTime = opTime.writeOpTime; + opAccumulator->opTime.wallClockTime = opTime.wallClockTime; + } if (oplogEntry.getNeedsRetryImage()) { auto imageDoc = *(args.deletedDoc); @@ -1084,6 +1165,11 @@ void OpObserverImpl::onDelete(OperationContext* opCtx, writeToImageCollection(opCtx, *opCtx->getLogicalSessionId(), imageToWrite); } + SessionTxnRecord sessionTxnRecord; + sessionTxnRecord.setLastWriteOpTime(opTime.writeOpTime); + sessionTxnRecord.setLastWriteDate(opTime.wallClockTime); + onWriteOpCompleted(opCtx, std::vector{stmtId}, sessionTxnRecord); + // Write a pre-image to the change streams pre-images collection when following conditions // are met: // 1. The collection has 'changeStreamPreAndPostImages' enabled. @@ -1098,44 +1184,13 @@ void OpObserverImpl::onDelete(OperationContext* opCtx, // sync mode application). if (args.changeStreamPreAndPostImagesEnabledForCollection && !opTime.writeOpTime.isNull() && !args.fromMigrate && !nss.isTemporaryReshardingCollection()) { - tassert(5868704, "Deleted document must be set", args.deletedDoc); + invariant(!args.deletedDoc->isEmpty(), str::stream() << "Deleted document must be set"); ChangeStreamPreImageId id(uuid, opTime.writeOpTime.getTimestamp(), 0); ChangeStreamPreImage preImage(id, opTime.wallClockTime, *args.deletedDoc); - ChangeStreamPreImagesCollectionManager::insertPreImage(opCtx, nss.tenantId(), preImage); + writeChangeStreamPreImageEntry(opCtx, nss.tenantId(), preImage); } - - SessionTxnRecord sessionTxnRecord; - sessionTxnRecord.setLastWriteOpTime(opTime.writeOpTime); - sessionTxnRecord.setLastWriteDate(opTime.wallClockTime); - onWriteOpCompleted(opCtx, std::vector{stmtId}, sessionTxnRecord); - } - - if (nss != NamespaceString::kSessionTransactionsTableNamespace) { - if (!args.fromMigrate) { - ShardingWriteRouter shardingWriteRouter(opCtx, nss, Grid::get(opCtx)->catalogCache()); - shardObserveDeleteOp(opCtx, - nss, - documentKey.getShardKeyAndId(), - opTime.writeOpTime, - shardingWriteRouter, - opTime.prePostImageOpTime, - inMultiDocumentTransaction); - } - } - - if (nss.coll() == "system.js") { - Scope::storedFuncMod(opCtx); - } else if (nss.isSystemDotViews()) { - CollectionCatalog::get(opCtx)->reloadViews(opCtx, nss.dbName()); - } else if (nss == NamespaceString::kSessionTransactionsTableNamespace && - (inBatchedWrite || !opTime.writeOpTime.isNull())) { - auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx); - mongoDSessionCatalog->observeDirectWriteToConfigTransactions(opCtx, documentKey.getId()); - } else if (nss == NamespaceString::kConfigSettingsNamespace) { - ReadWriteConcernDefaults::get(opCtx).observeDirectWriteToConfigSettings( - opCtx, documentKey.getId().firstElement(), boost::none); } } @@ -1190,7 +1245,7 @@ void OpObserverImpl::onCreateCollection(OperationContext* opCtx, oplogEntry.setOpTime(createOpTime); } auto opTime = logMutableOplogEntry(opCtx, &oplogEntry, _oplogWriter.get()); - if (opCtx->writesAreReplicated()) { + if (!repl::ReplicationCoordinator::get(opCtx)->isOplogDisabledFor(opCtx, collectionName)) { if (opTime.isNull()) { LOGV2(7360102, "Added oplog entry for create to transaction", @@ -1245,7 +1300,7 @@ void OpObserverImpl::onCollMod(OperationContext* opCtx, oplogEntry.setTid(nss.tenantId()); oplogEntry.setNss(nss.getCommandNS()); oplogEntry.setUuid(uuid); - oplogEntry.setObject(repl::makeCollModCmdObj(collModCmd, oldCollOptions, indexInfo)); + oplogEntry.setObject(makeCollModCmdObj(collModCmd, oldCollOptions, indexInfo)); oplogEntry.setObject2(o2Builder.done()); auto opTime = logOperation(opCtx, &oplogEntry, true /*assignWallClockTime*/, _oplogWriter.get()); @@ -1290,26 +1345,7 @@ void OpObserverImpl::onDropDatabase(OperationContext* opCtx, const DatabaseName& "object"_attr = oplogEntry.getObject()); } - uassert(50714, - "dropping the admin database is not allowed.", - dbName.db() != DatabaseName::kAdmin.db()); - - if (dbName.db() == NamespaceString::kSessionTransactionsTableNamespace.db()) { - auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx); - mongoDSessionCatalog->invalidateAllSessions(opCtx); - } - - auto& bucketCatalog = timeseries::bucket_catalog::BucketCatalog::get(opCtx); - clear(bucketCatalog, dbName.db()); -} - -repl::OpTime OpObserverImpl::onDropCollection(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid, - std::uint64_t numRecords, - CollectionDropType dropType) { - return onDropCollection( - opCtx, collectionName, uuid, numRecords, dropType, false /* markFromMigrate */); + uassert(50714, "dropping the admin database is not allowed.", !dbName.isAdminDB()); } repl::OpTime OpObserverImpl::onDropCollection(OperationContext* opCtx, @@ -1343,37 +1379,6 @@ repl::OpTime OpObserverImpl::onDropCollection(OperationContext* opCtx, "dropping the server configuration collection (admin.system.version) is not allowed.", collectionName != NamespaceString::kServerConfigurationNamespace); - if (collectionName.isSystemDotViews()) { - CollectionCatalog::get(opCtx)->clearViews(opCtx, collectionName.dbName()); - } else if (collectionName == NamespaceString::kSessionTransactionsTableNamespace) { - // Disallow this drop if there are currently prepared transactions. - const auto sessionCatalog = SessionCatalog::get(opCtx); - SessionKiller::Matcher matcherAllSessions( - KillAllSessionsByPatternSet{makeKillAllSessionsByPattern(opCtx)}); - bool noPreparedTxns = true; - sessionCatalog->scanSessions(matcherAllSessions, [&](const ObservableSession& session) { - auto txnParticipant = TransactionParticipant::get(session); - if (txnParticipant.transactionIsPrepared()) { - noPreparedTxns = false; - } - }); - uassert(4852500, - "Unable to drop transactions table (config.transactions) while prepared " - "transactions are present.", - noPreparedTxns); - - auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx); - mongoDSessionCatalog->invalidateAllSessions(opCtx); - } else if (collectionName == NamespaceString::kConfigSettingsNamespace) { - ReadWriteConcernDefaults::get(opCtx).invalidate(); - } else if (collectionName.isTimeseriesBucketsCollection()) { - auto& bucketCatalog = timeseries::bucket_catalog::BucketCatalog::get(opCtx); - clear(bucketCatalog, collectionName.getTimeseriesViewNamespace()); - } else if (collectionName.isSystemDotJavascript()) { - // Inform the JavaScript engine of the change to system.js. - Scope::storedFuncMod(opCtx); - } - return {}; } @@ -1402,23 +1407,6 @@ void OpObserverImpl::onDropIndex(OperationContext* opCtx, } } -repl::OpTime OpObserverImpl::preRenameCollection(OperationContext* const opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) { - return preRenameCollection(opCtx, - fromCollection, - toCollection, - uuid, - dropTargetUUID, - numRecords, - stayTemp, - false /* markFromMigrate */); -} - repl::OpTime OpObserverImpl::preRenameCollection(OperationContext* const opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, @@ -1471,23 +1459,6 @@ void OpObserverImpl::postRenameCollection(OperationContext* const opCtx, CollectionCatalog::get(opCtx)->reloadViews(opCtx, toCollection.dbName()); } -void OpObserverImpl::onRenameCollection(OperationContext* const opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) { - onRenameCollection(opCtx, - fromCollection, - toCollection, - uuid, - dropTargetUUID, - numRecords, - stayTemp, - false /* markFromMigrate */); -} - void OpObserverImpl::onRenameCollection(OperationContext* const opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, @@ -1578,7 +1549,7 @@ void writeChangeStreamPreImagesForApplyOpsEntries( invariant(operation.getUuid()); invariant(!operation.getPreImage().isEmpty()); - ChangeStreamPreImagesCollectionManager::insertPreImage( + writeChangeStreamPreImageEntry( opCtx, operation.getTid(), ChangeStreamPreImage{ @@ -1590,36 +1561,6 @@ void writeChangeStreamPreImagesForApplyOpsEntries( } } -/** - * Returns maximum number of operations to pack into a single oplog entry, - * when multi-oplog format for transactions is in use. - * - * Stop packing when either number of transaction operations is reached, or when the - * next one would make the total size of operations larger than the maximum BSON Object - * User Size. We rely on the headroom between BSONObjMaxUserSize and - * BSONObjMaxInternalSize to cover the BSON overhead and the other "applyOps" entry - * fields. But if a single operation in the set exceeds BSONObjMaxUserSize, we still fit - * it, as a single max-length operation should be able to be packed into an "applyOps" - * entry. - */ -std::size_t getMaxNumberOfTransactionOperationsInSingleOplogEntry() { - tassert(6278503, - "gMaxNumberOfTransactionOperationsInSingleOplogEntry should be positive number", - gMaxNumberOfTransactionOperationsInSingleOplogEntry > 0); - return static_cast(gMaxNumberOfTransactionOperationsInSingleOplogEntry); -} - -/** - * Returns maximum size (bytes) of operations to pack into a single oplog entry, - * when multi-oplog format for transactions is in use. - * - * Refer to getMaxNumberOfTransactionOperationsInSingleOplogEntry() comments for a - * description on packing transaction operations into "applyOps" entries. - */ -std::size_t getMaxSizeOfTransactionOperationsInSingleOplogEntryBytes() { - return static_cast(BSONObjMaxUserSize); -} - /** * Returns maximum number of operations to pack into a single oplog entry, * when multi-oplog format for batched writes is in use. @@ -1754,7 +1695,7 @@ void logCommitOrAbortForPreparedTransaction(OperationContext* opCtx, invariant(!opCtx->lockState()->hasMaxLockTimeout()); writeConflictRetry( - opCtx, "onPreparedTransactionCommitOrAbort", NamespaceString::kRsOplogNamespace.ns(), [&] { + opCtx, "onPreparedTransactionCommitOrAbort", NamespaceString::kRsOplogNamespace, [&] { // Writes to the oplog only require a Global intent lock. Guaranteed by // OplogSlotReserver. invariant(opCtx->lockState()->isWriteLocked()); @@ -1781,9 +1722,12 @@ void logCommitOrAbortForPreparedTransaction(OperationContext* opCtx, void OpObserverImpl::onTransactionStart(OperationContext* opCtx) {} void OpObserverImpl::onUnpreparedTransactionCommit( - OperationContext* opCtx, const TransactionOperations& transactionOperations) { + OperationContext* opCtx, + const std::vector& reservedSlots, + const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + OpStateAccumulator* opAccumulator) { const auto& statements = transactionOperations.getOperationsForOpObserver(); - auto numberOfPrePostImagesToWrite = transactionOperations.getNumberOfPrePostImagesToWrite(); invariant(opCtx->getTxnNumber()); @@ -1796,11 +1740,8 @@ void OpObserverImpl::onUnpreparedTransactionCommit( if (statements.empty()) return; - repl::OpTime commitOpTime; - // Reserve all the optimes in advance, so we only need to get the optime mutex once. We - // reserve enough entries for all statements in the transaction. - auto oplogSlots = - _oplogWriter->getNextOpTimes(opCtx, statements.size() + numberOfPrePostImagesToWrite); + const auto& oplogSlots = reservedSlots; + const auto& applyOpsOplogSlotAndOperationAssignment = applyOpsOperationAssignment; // Throw TenantMigrationConflict error if the database for the transaction statements is being // migrated. We only need check the namespace of the first statement since a transaction's @@ -1813,13 +1754,6 @@ void OpObserverImpl::onUnpreparedTransactionCommit( uasserted(51268, "hangAndFailUnpreparedCommitAfterReservingOplogSlot fail point enabled"); } - // Serialize transaction statements to BSON and determine their assignment to "applyOps" - // entries. - const auto applyOpsOplogSlotAndOperationAssignment = transactionOperations.getApplyOpsInfo( - oplogSlots, - getMaxNumberOfTransactionOperationsInSingleOplogEntry(), - getMaxSizeOfTransactionOperationsInSingleOplogEntryBytes(), - /*prepare=*/false); invariant(!applyOpsOplogSlotAndOperationAssignment.prepare); const auto wallClockTime = getWallClockTimeForOpLog(opCtx); @@ -1870,19 +1804,22 @@ void OpObserverImpl::onUnpreparedTransactionCommit( &imageToWrite); invariant(numOplogEntries > 0); - // Write change stream pre-images. At this point the pre-images will be written at the - // transaction commit timestamp as driven (implicitly) by the last written "applyOps" oplog - // entry. - writeChangeStreamPreImagesForTransaction( - opCtx, statements, applyOpsOplogSlotAndOperationAssignment, wallClockTime); + repl::OpTime commitOpTime = oplogSlots[numOplogEntries - 1]; + invariant(!commitOpTime.isNull()); + if (opAccumulator) { + opAccumulator->opTime.writeOpTime = commitOpTime; + opAccumulator->opTime.wallClockTime = wallClockTime; + } if (imageToWrite) { writeToImageCollection(opCtx, *opCtx->getLogicalSessionId(), *imageToWrite); } - commitOpTime = oplogSlots[numOplogEntries - 1]; - invariant(!commitOpTime.isNull()); - shardObserveTransactionPrepareOrUnpreparedCommit(opCtx, statements, commitOpTime); + // Write change stream pre-images. At this point the pre-images will be written at the + // transaction commit timestamp as driven (implicitly) by the last written "applyOps" oplog + // entry. + writeChangeStreamPreImagesForTransaction( + opCtx, statements, applyOpsOplogSlotAndOperationAssignment, wallClockTime); } void OpObserverImpl::onBatchedWriteStart(OperationContext* opCtx) { @@ -1928,8 +1865,7 @@ void OpObserverImpl::onBatchedWriteCommit(OperationContext* opCtx) { getMaxSizeOfBatchedOperationsInSingleOplogEntryBytes(), /*prepare=*/false); - if (!gFeatureFlagInternalWritesAreReplicatedTransactionally.isEnabled( - serverGlobalParams.featureCompatibility)) { + if (!gFeatureFlagLargeBatchedOperations.isEnabled(serverGlobalParams.featureCompatibility)) { // Before SERVER-70765, we relied on packTransactionStatementsForApplyOps() to check if the // batch of operations could fit in a single applyOps entry. Now, we pass the size limit to // TransactionOperations::getApplyOpsInfo() and are now able to return an error earlier. @@ -2031,21 +1967,14 @@ void OpObserverImpl::onPreparedTransactionCommit( opCtx, &oplogEntry, DurableTxnStateEnum::kCommitted, _oplogWriter.get()); } -std::unique_ptr -OpObserverImpl::preTransactionPrepare(OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) { - auto applyOpsOplogSlotAndOperationAssignment = transactionOperations.getApplyOpsInfo( - reservedSlots, - getMaxNumberOfTransactionOperationsInSingleOplogEntry(), - getMaxSizeOfTransactionOperationsInSingleOplogEntryBytes(), - /*prepare=*/true); +void OpObserverImpl::preTransactionPrepare( + OperationContext* opCtx, + const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + Date_t wallClockTime) { const auto& statements = transactionOperations.getOperationsForOpObserver(); writeChangeStreamPreImagesForTransaction( - opCtx, statements, applyOpsOplogSlotAndOperationAssignment, wallClockTime); - return std::make_unique( - std::move(applyOpsOplogSlotAndOperationAssignment)); + opCtx, statements, applyOpsOperationAssignment, wallClockTime); } void OpObserverImpl::onTransactionPrepare( @@ -2072,117 +2001,113 @@ void OpObserverImpl::onTransactionPrepare( invariant(reservedSlots.size() >= statements.size()); TransactionParticipant::SideTransactionBlock sideTxn(opCtx); - writeConflictRetry( - opCtx, "onTransactionPrepare", NamespaceString::kRsOplogNamespace.ns(), [&] { + writeConflictRetry(opCtx, "onTransactionPrepare", NamespaceString::kRsOplogNamespace, [&] { + // Writes to the oplog only require a Global intent lock. Guaranteed by + // OplogSlotReserver. + invariant(opCtx->lockState()->isWriteLocked()); + + WriteUnitOfWork wuow(opCtx); + // It is possible that the transaction resulted in no changes, In that case, we + // should not write any operations other than the prepare oplog entry. + if (!statements.empty()) { + // Storage transaction commit is the last place inside a transaction that can + // throw an exception. In order to safely allow exceptions to be thrown at that + // point, this function must be called from an outer WriteUnitOfWork in order to + // be rolled back upon reaching the exception. + invariant(opCtx->lockState()->inAWriteUnitOfWork()); + // Writes to the oplog only require a Global intent lock. Guaranteed by // OplogSlotReserver. invariant(opCtx->lockState()->isWriteLocked()); - WriteUnitOfWork wuow(opCtx); - // It is possible that the transaction resulted in no changes, In that case, we - // should not write any operations other than the prepare oplog entry. - if (!statements.empty()) { - // Storage transaction commit is the last place inside a transaction that can - // throw an exception. In order to safely allow exceptions to be thrown at that - // point, this function must be called from an outer WriteUnitOfWork in order to - // be rolled back upon reaching the exception. - invariant(opCtx->lockState()->inAWriteUnitOfWork()); - - // Writes to the oplog only require a Global intent lock. Guaranteed by - // OplogSlotReserver. - invariant(opCtx->lockState()->isWriteLocked()); - - if (applyOpsOperationAssignment.applyOpsEntries.size() > 1U) { - // Partial transactions create/reserve multiple oplog entries in the same - // WriteUnitOfWork. Because of this, such transactions will set multiple - // timestamps, violating the multi timestamp constraint. It's safe to ignore - // the multi timestamp constraints here as additional rollback logic is in - // place for this case. See SERVER-48771. - opCtx->recoveryUnit()->ignoreAllMultiTimestampConstraints(); - } - - // This is set for every oplog entry, except for the last one, in the applyOps - // chain of an unprepared multi-doc transaction. - // For a single prepare oplog entry, choose the last oplog slot for the first - // optime of the transaction. The first optime corresponds to the 'startOpTime' - // field in SessionTxnRecord that is persisted in config.transactions. - // See SERVER-40678. - auto startOpTime = applyOpsOperationAssignment.applyOpsEntries.size() == 1U - ? reservedSlots.back() - : reservedSlots.front(); - - auto logApplyOpsForPreparedTransaction = - [opCtx, oplogWriter = _oplogWriter.get(), startOpTime]( - repl::MutableOplogEntry* oplogEntry, - bool firstOp, - bool lastOp, - std::vector stmtIdsWritten) { - return logApplyOps(opCtx, - oplogEntry, - /*txnState=*/ - (lastOp ? DurableTxnStateEnum::kPrepared - : DurableTxnStateEnum::kInProgress), - startOpTime, - std::move(stmtIdsWritten), - /*updateTxnTable=*/(firstOp || lastOp), - oplogWriter); - }; - - // We had reserved enough oplog slots for the worst case where each operation - // produced one oplog entry. When operations are smaller and can be packed, we - // will waste the extra slots. The implicit prepare oplog entry will still use - // the last reserved slot, because the transaction participant has already used - // that as the prepare time. - boost::optional imageToWrite; - invariant(applyOpsOperationAssignment.prepare); - (void)transactionOperations.logOplogEntries(reservedSlots, - applyOpsOperationAssignment, - wallClockTime, - logApplyOpsForPreparedTransaction, - &imageToWrite); - if (imageToWrite) { - writeToImageCollection(opCtx, *opCtx->getLogicalSessionId(), *imageToWrite); - } - } else { - // Log an empty 'prepare' oplog entry. - // We need to have at least one reserved slot. - invariant(reservedSlots.size() > 0); - BSONObjBuilder applyOpsBuilder; - BSONArrayBuilder opsArray(applyOpsBuilder.subarrayStart("applyOps"_sd)); - opsArray.done(); - applyOpsBuilder.append("prepare", true); - - auto oplogSlot = reservedSlots.front(); - MutableOplogEntry oplogEntry; - oplogEntry.setOpType(repl::OpTypeEnum::kCommand); - oplogEntry.setNss(NamespaceString::kAdminCommandNamespace); - oplogEntry.setOpTime(oplogSlot); - oplogEntry.setPrevWriteOpTimeInTransaction(repl::OpTime()); - oplogEntry.setObject(applyOpsBuilder.done()); - oplogEntry.setWallClockTime(wallClockTime); - - // TODO SERVER-69286: set the top-level tenantId here - - logApplyOps(opCtx, - &oplogEntry, - DurableTxnStateEnum::kPrepared, - /*startOpTime=*/oplogSlot, - /*stmtIdsWritten=*/{}, - /*updateTxnTable=*/true, - _oplogWriter.get()); + if (applyOpsOperationAssignment.applyOpsEntries.size() > 1U) { + // Partial transactions create/reserve multiple oplog entries in the same + // WriteUnitOfWork. Because of this, such transactions will set multiple + // timestamps, violating the multi timestamp constraint. It's safe to ignore + // the multi timestamp constraints here as additional rollback logic is in + // place for this case. See SERVER-48771. + opCtx->recoveryUnit()->ignoreAllMultiTimestampConstraints(); } - wuow.commit(); - }); - } - shardObserveTransactionPrepareOrUnpreparedCommit(opCtx, statements, prepareOpTime); + // This is set for every oplog entry, except for the last one, in the applyOps + // chain of an unprepared multi-doc transaction. + // For a single prepare oplog entry, choose the last oplog slot for the first + // optime of the transaction. The first optime corresponds to the 'startOpTime' + // field in SessionTxnRecord that is persisted in config.transactions. + // See SERVER-40678. + auto startOpTime = applyOpsOperationAssignment.applyOpsEntries.size() == 1U + ? reservedSlots.back() + : reservedSlots.front(); + + auto logApplyOpsForPreparedTransaction = + [opCtx, oplogWriter = _oplogWriter.get(), startOpTime]( + repl::MutableOplogEntry* oplogEntry, + bool firstOp, + bool lastOp, + std::vector stmtIdsWritten) { + return logApplyOps(opCtx, + oplogEntry, + /*txnState=*/ + (lastOp ? DurableTxnStateEnum::kPrepared + : DurableTxnStateEnum::kInProgress), + startOpTime, + std::move(stmtIdsWritten), + /*updateTxnTable=*/(firstOp || lastOp), + oplogWriter); + }; + + // We had reserved enough oplog slots for the worst case where each operation + // produced one oplog entry. When operations are smaller and can be packed, we + // will waste the extra slots. The implicit prepare oplog entry will still use + // the last reserved slot, because the transaction participant has already used + // that as the prepare time. + boost::optional imageToWrite; + invariant(applyOpsOperationAssignment.prepare); + (void)transactionOperations.logOplogEntries(reservedSlots, + applyOpsOperationAssignment, + wallClockTime, + logApplyOpsForPreparedTransaction, + &imageToWrite); + if (imageToWrite) { + writeToImageCollection(opCtx, *opCtx->getLogicalSessionId(), *imageToWrite); + } + } else { + // Log an empty 'prepare' oplog entry. + // We need to have at least one reserved slot. + invariant(reservedSlots.size() > 0); + BSONObjBuilder applyOpsBuilder; + BSONArrayBuilder opsArray(applyOpsBuilder.subarrayStart("applyOps"_sd)); + opsArray.done(); + applyOpsBuilder.append("prepare", true); + + auto oplogSlot = reservedSlots.front(); + MutableOplogEntry oplogEntry; + oplogEntry.setOpType(repl::OpTypeEnum::kCommand); + oplogEntry.setNss(NamespaceString::kAdminCommandNamespace); + oplogEntry.setOpTime(oplogSlot); + oplogEntry.setPrevWriteOpTimeInTransaction(repl::OpTime()); + oplogEntry.setObject(applyOpsBuilder.done()); + oplogEntry.setWallClockTime(wallClockTime); + + // TODO SERVER-69286: set the top-level tenantId here + + logApplyOps(opCtx, + &oplogEntry, + DurableTxnStateEnum::kPrepared, + /*startOpTime=*/oplogSlot, + /*stmtIdsWritten=*/{}, + /*updateTxnTable=*/true, + _oplogWriter.get()); + } + wuow.commit(); + }); + } } void OpObserverImpl::onTransactionPrepareNonPrimary(OperationContext* opCtx, + const LogicalSessionId& lsid, const std::vector& statements, - const repl::OpTime& prepareOpTime) { - shardObserveNonPrimaryTransactionPrepare(opCtx, statements, prepareOpTime); -} + const repl::OpTime& prepareOpTime) {} void OpObserverImpl::onTransactionAbort(OperationContext* opCtx, boost::optional abortOplogEntryOpTime) { @@ -2215,7 +2140,8 @@ void OpObserverImpl::onModifyCollectionShardingIndexCatalog(OperationContext* op const UUID& uuid, BSONObj opDoc) { repl::MutableOplogEntry oplogEntry; - auto obj = BSON(kShardingIndexCatalogOplogEntryName << nss.toString()).addFields(opDoc); + auto obj = BSON(kShardingIndexCatalogOplogEntryName << NamespaceStringUtil::serialize(nss)) + .addFields(opDoc); oplogEntry.setOpType(repl::OpTypeEnum::kCommand); oplogEntry.setNss(nss); oplogEntry.setUuid(uuid); @@ -2224,8 +2150,8 @@ void OpObserverImpl::onModifyCollectionShardingIndexCatalog(OperationContext* op logOperation(opCtx, &oplogEntry, true, _oplogWriter.get()); } -void OpObserverImpl::_onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) { +void OpObserverImpl::onReplicationRollback(OperationContext* opCtx, + const RollbackObserverInfo& rbInfo) { // Reset the key manager cache. auto validator = LogicalTimeValidator::get(opCtx); if (validator) { @@ -2240,17 +2166,6 @@ void OpObserverImpl::_onReplicationRollback(OperationContext* opCtx, // Force the default read/write concern cache to reload on next access in case the defaults // document was rolled back. ReadWriteConcernDefaults::get(opCtx).invalidate(); - - stdx::unordered_set timeseriesNamespaces; - for (const auto& ns : rbInfo.rollbackNamespaces) { - if (ns.isTimeseriesBucketsCollection()) { - timeseriesNamespaces.insert(ns.getTimeseriesViewNamespace()); - } - } - auto& bucketCatalog = timeseries::bucket_catalog::BucketCatalog::get(opCtx); - clear(bucketCatalog, - [timeseriesNamespaces = std::move(timeseriesNamespaces)]( - const NamespaceString& bucketNs) { return timeseriesNamespaces.contains(bucketNs); }); } } // namespace mongo diff --git a/src/mongo/db/op_observer/op_observer_impl.h b/src/mongo/db/op_observer/op_observer_impl.h index cb31794a88c36..bfe1f1064a0c4 100644 --- a/src/mongo/db/op_observer/op_observer_impl.h +++ b/src/mongo/db/op_observer/op_observer_impl.h @@ -29,12 +29,32 @@ #pragma once +#include +#include +#include #include +#include +#include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/op_observer/op_observer_util.h" #include "mongo/db/op_observer/oplog_writer.h" -#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/transaction/transaction_operations.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -53,6 +73,10 @@ class OpObserverImpl : public OpObserver { OpObserverImpl(std::unique_ptr oplogWriter); virtual ~OpObserverImpl() = default; + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kAll, NamespaceFilter::kAll}; + } + void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, const NamespaceString& nss, const UUID& uuid, @@ -102,7 +126,8 @@ class OpObserverImpl : public OpObserver { std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) final; + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; void onInsertGlobalIndexKey(OperationContext* opCtx, const NamespaceString& globalIndexNss, @@ -116,14 +141,19 @@ class OpObserverImpl : public OpObserver { const BSONObj& key, const BSONObj& docKey) final; - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) final; + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) final; + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) final; + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; void onInternalOpMessage(OperationContext* opCtx, const NamespaceString& nss, const boost::optional& uuid, @@ -147,11 +177,6 @@ class OpObserverImpl : public OpObserver { const CollectionOptions& oldCollOptions, boost::optional indexInfo) final; void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final; - repl::OpTime onDropCollection(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid, - std::uint64_t numRecords, - CollectionDropType dropType) final; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, @@ -163,13 +188,6 @@ class OpObserverImpl : public OpObserver { const UUID& uuid, const std::string& indexName, const BSONObj& indexInfo) final; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final; repl::OpTime preRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, @@ -184,13 +202,6 @@ class OpObserverImpl : public OpObserver { const UUID& uuid, const boost::optional& dropTargetUUID, bool stayTemp) final; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final; void onRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, @@ -214,8 +225,12 @@ class OpObserverImpl : public OpObserver { const NamespaceString& collectionName, const UUID& uuid) final; void onTransactionStart(OperationContext* opCtx) final; - void onUnpreparedTransactionCommit(OperationContext* opCtx, - const TransactionOperations& transactionOperations) final; + void onUnpreparedTransactionCommit( + OperationContext* opCtx, + const std::vector& reservedSlots, + const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + OpStateAccumulator* opAccumulator = nullptr) final; void onBatchedWriteStart(OperationContext* opCtx) final; void onBatchedWriteCommit(OperationContext* opCtx) final; void onBatchedWriteAbort(OperationContext* opCtx) final; @@ -225,10 +240,10 @@ class OpObserverImpl : public OpObserver { Timestamp commitTimestamp, const std::vector& statements) noexcept final; - std::unique_ptr preTransactionPrepare( + void preTransactionPrepare( OperationContext* opCtx, - const std::vector& reservedSlots, const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, Date_t wallClockTime) final; void onTransactionPrepare( @@ -240,51 +255,17 @@ class OpObserverImpl : public OpObserver { Date_t wallClockTime) final; void onTransactionPrepareNonPrimary(OperationContext* opCtx, + const LogicalSessionId& lsid, const std::vector& statements, const repl::OpTime& prepareOpTime) final; void onTransactionAbort(OperationContext* opCtx, boost::optional abortOplogEntryOpTime) final; + void onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final; void onMajorityCommitPointUpdate(ServiceContext* service, const repl::OpTime& newCommitPoint) final {} private: - virtual void shardObserveAboutToDelete(OperationContext* opCtx, - NamespaceString const& nss, - BSONObj const& doc) {} - virtual void shardObserveInsertsOp(OperationContext* opCtx, - const NamespaceString& nss, - std::vector::const_iterator first, - std::vector::const_iterator last, - const std::vector& opTimeList, - const ShardingWriteRouter& shardingWriteRouter, - bool fromMigrate, - bool inMultiDocumentTransaction){}; - virtual void shardObserveUpdateOp(OperationContext* opCtx, - const NamespaceString& nss, - boost::optional preImageDoc, - const BSONObj& postImageDoc, - const repl::OpTime& opTime, - const ShardingWriteRouter& shardingWriteRouter, - const repl::OpTime& prePostImageOpTime, - const bool inMultiDocumentTransaction) {} - virtual void shardObserveDeleteOp(OperationContext* opCtx, - const NamespaceString& nss, - const BSONObj& documentKey, - const repl::OpTime& opTime, - const ShardingWriteRouter& shardingWriteRouter, - const repl::OpTime& preImageOpTime, - const bool inMultiDocumentTransaction) {} - virtual void shardObserveTransactionPrepareOrUnpreparedCommit( - OperationContext* opCtx, - const std::vector& stmts, - const repl::OpTime& prepareOrCommitOptime) {} - virtual void shardObserveNonPrimaryTransactionPrepare( - OperationContext* opCtx, - const std::vector& stmts, - const repl::OpTime& prepareOrCommitOptime) {} - void _onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final; - std::unique_ptr _oplogWriter; }; diff --git a/src/mongo/db/op_observer/op_observer_impl_test.cpp b/src/mongo/db/op_observer/op_observer_impl_test.cpp index f9d12984f34f3..abe0bcdd8b580 100644 --- a/src/mongo/db/op_observer/op_observer_impl_test.cpp +++ b/src/mongo/db/op_observer/op_observer_impl_test.cpp @@ -27,17 +27,44 @@ * it in the license file. */ +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/util/builder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection_options_gen.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/import_collection_oplog_entry_gen.h" +#include "mongo/db/catalog/local_oplog_info.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/concurrency/locker_noop.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/keys_collection_client_sharded.h" -#include "mongo/db/keys_collection_manager.h" -#include "mongo/db/logical_time_validator.h" -#include "mongo/db/multitenancy_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/batched_write_context.h" #include "mongo/db/op_observer/op_observer_impl.h" @@ -45,28 +72,52 @@ #include "mongo/db/op_observer/op_observer_util.h" #include "mongo/db/op_observer/oplog_writer_impl.h" #include "mongo/db/pipeline/change_stream_preimage_gen.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" -#include "mongo/db/repl/apply_ops.h" +#include "mongo/db/read_write_concern_defaults_gen.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/apply_ops_command_info.h" #include "mongo/db/repl/image_collection_entry_gen.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/oplog_interface.h" #include "mongo/db/repl/oplog_interface_local.h" #include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" +#include "mongo/db/repl/tenant_migration_donor_access_blocker.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/session.h" +#include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/transaction/transaction_participant_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -136,7 +187,24 @@ void commitUnpreparedTransaction(OperationContext* opCtx, OpObserverType& opObse auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), txnParticipant.getNumberOfPrePostImagesToWriteForTest()); - opObserver.onUnpreparedTransactionCommit(opCtx, *txnOps); + + // OpObserverImpl no longer reserves optimes when committing unprepared transactions. + // This is now done in TransactionParticipant::Participant::commitUnpreparedTransaction() + // prior to notifying the OpObserver. For test coverage specific to empty unprepared + // multi-document transactions, see TxnParticipantTest::EmptyUnpreparedTransactionCommit. + std::vector reservedSlots; + if (!txnOps->isEmpty()) { + reservedSlots = repl::getNextOpTimes( + opCtx, txnOps->numOperations() + txnOps->getNumberOfPrePostImagesToWrite()); + } + + auto applyOpsOplogSlotAndOperationAssignment = + txnOps->getApplyOpsInfo(reservedSlots, + getMaxNumberOfTransactionOperationsInSingleOplogEntry(), + getMaxSizeOfTransactionOperationsInSingleOplogEntryBytes(), + /*prepare=*/false); + opObserver.onUnpreparedTransactionCommit( + opCtx, reservedSlots, *txnOps, applyOpsOplogSlotAndOperationAssignment); } std::vector reserveOpTimesInSideTransaction(OperationContext* opCtx, size_t count) { @@ -203,7 +271,7 @@ class OpObserverTest : public ServiceContextMongoDTest { void reset(OperationContext* opCtx, NamespaceString nss, boost::optional uuid = boost::none) const { - writeConflictRetry(opCtx, "deleteAll", nss.ns(), [&] { + writeConflictRetry(opCtx, "deleteAll", nss, [&] { opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kNoTimestamp); opCtx->recoveryUnit()->abandonSnapshot(); @@ -283,8 +351,10 @@ class OpObserverTest : public ServiceContextMongoDTest { bool didWriteImageEntryToSideCollection(OperationContext* opCtx, const LogicalSessionId& sessionId) { AutoGetCollection sideCollection(opCtx, NamespaceString::kConfigImagesNamespace, MODE_IS); - const auto imageEntry = Helpers::findOneForTesting( - opCtx, sideCollection.getCollection(), BSON("_id" << sessionId.toBSON()), false); + const auto imageEntry = Helpers::findOneForTesting(opCtx, + sideCollection.getCollection(), + BSON("_id" << sessionId.toBSON()), + /*invariantOnError=*/false); return !imageEntry.isEmpty(); } @@ -292,28 +362,39 @@ class OpObserverTest : public ServiceContextMongoDTest { const ChangeStreamPreImageId preImageId) { AutoGetCollection preImagesCollection( opCtx, NamespaceString::makePreImageCollectionNSS(boost::none), LockMode::MODE_IS); - const auto preImage = Helpers::findOneForTesting( - opCtx, preImagesCollection.getCollection(), BSON("_id" << preImageId.toBSON()), false); + const auto preImage = Helpers::findOneForTesting(opCtx, + preImagesCollection.getCollection(), + BSON("_id" << preImageId.toBSON()), + /*invariantOnError=*/false); return !preImage.isEmpty(); } repl::ImageEntry getImageEntryFromSideCollection(OperationContext* opCtx, const LogicalSessionId& sessionId) { AutoGetCollection sideCollection(opCtx, NamespaceString::kConfigImagesNamespace, MODE_IS); - return repl::ImageEntry::parse( - IDLParserContext("image entry"), - Helpers::findOneForTesting( - opCtx, sideCollection.getCollection(), BSON("_id" << sessionId.toBSON()))); + auto doc = Helpers::findOneForTesting(opCtx, + sideCollection.getCollection(), + BSON("_id" << sessionId.toBSON()), + /*invariantOnError=*/false); + ASSERT_FALSE(doc.isEmpty()) + << "Change stream pre-image not found: " << sessionId.toBSON() + << " (pre-images collection: " << sideCollection->ns().toStringForErrorMsg() << ")"; + return repl::ImageEntry::parse(IDLParserContext("image entry"), doc); } SessionTxnRecord getTxnRecord(OperationContext* opCtx, const LogicalSessionId& sessionId) { AutoGetCollection configTransactions( opCtx, NamespaceString::kSessionTransactionsTableNamespace, MODE_IS); - return SessionTxnRecord::parse( - IDLParserContext("txn record"), - Helpers::findOneForTesting( - opCtx, configTransactions.getCollection(), BSON("_id" << sessionId.toBSON()))); + auto doc = Helpers::findOneForTesting(opCtx, + configTransactions.getCollection(), + BSON("_id" << sessionId.toBSON()), + /*invariantOnError=*/false); + ASSERT_FALSE(doc.isEmpty()) + << "Transaction not found for session: " << sessionId.toBSON() + << "(transactions collection: " << configTransactions->ns().toStringForErrorMsg() + << ")"; + return SessionTxnRecord::parse(IDLParserContext("txn record"), doc); } /** @@ -329,8 +410,12 @@ class OpObserverTest : public ServiceContextMongoDTest { opCtx, NamespaceString::makePreImageCollectionNSS(boost::none), LockMode::MODE_IS); *container = Helpers::findOneForTesting(opCtx, preImagesCollection.getCollection(), - BSON("_id" << preImageId.toBSON())) - .getOwned(); + BSON("_id" << preImageId.toBSON()), + /*invariantOnError=*/false); + ASSERT_FALSE(container->isEmpty()) + << "Change stream pre-image not found: " << preImageId.toBSON() + << " (pre-images collection: " << preImagesCollection->ns().toStringForErrorMsg() + << ")"; return ChangeStreamPreImage::parse(IDLParserContext("pre-image"), *container); } @@ -610,8 +695,12 @@ TEST_F(OpObserverTest, OnDropCollectionReturnsDropOpTime) { { AutoGetDb autoDb(opCtx.get(), nss.dbName(), MODE_X); WriteUnitOfWork wunit(opCtx.get()); - opObserver.onDropCollection( - opCtx.get(), nss, uuid, 0U, OpObserver::CollectionDropType::kTwoPhase); + opObserver.onDropCollection(opCtx.get(), + nss, + uuid, + 0U, + OpObserver::CollectionDropType::kTwoPhase, + /*markFromMigrate=*/false); dropOpTime = OpObserver::Times::get(opCtx.get()).reservedOpTimes.front(); wunit.commit(); } @@ -641,10 +730,14 @@ TEST_F(OpObserverTest, OnDropCollectionInlcudesTenantId) { // Write to the oplog. { - AutoGetDb autoDb(opCtx.get(), nss.db(), MODE_X); + AutoGetDb autoDb(opCtx.get(), nss.dbName(), MODE_X); WriteUnitOfWork wunit(opCtx.get()); - opObserver.onDropCollection( - opCtx.get(), nss, uuid, 0U, OpObserver::CollectionDropType::kTwoPhase); + opObserver.onDropCollection(opCtx.get(), + nss, + uuid, + 0U, + OpObserver::CollectionDropType::kTwoPhase, + /*markFromMigrate=*/false); wunit.commit(); } @@ -670,8 +763,14 @@ TEST_F(OpObserverTest, OnRenameCollectionReturnsRenameOpTime) { { AutoGetDb autoDb(opCtx.get(), sourceNss.dbName(), MODE_X); WriteUnitOfWork wunit(opCtx.get()); - opObserver.onRenameCollection( - opCtx.get(), sourceNss, targetNss, uuid, dropTargetUuid, 0U, stayTemp); + opObserver.onRenameCollection(opCtx.get(), + sourceNss, + targetNss, + uuid, + dropTargetUuid, + 0U, + stayTemp, + /*markFromMigrate=*/false); renameOpTime = OpObserver::Times::get(opCtx.get()).reservedOpTimes.front(); wunit.commit(); } @@ -682,8 +781,8 @@ TEST_F(OpObserverTest, OnRenameCollectionReturnsRenameOpTime) { ASSERT_EQUALS(uuid, unittest::assertGet(UUID::parse(oplogEntry["ui"]))); auto o = oplogEntry.getObjectField("o"); auto oExpected = - BSON("renameCollection" << sourceNss.ns() << "to" << targetNss.ns() << "stayTemp" - << stayTemp << "dropTarget" << dropTargetUuid); + BSON("renameCollection" << sourceNss.ns_forTest() << "to" << targetNss.ns_forTest() + << "stayTemp" << stayTemp << "dropTarget" << dropTargetUuid); ASSERT_BSONOBJ_EQ(oExpected, o); // Ensure that the rename optime returned is the same as the last optime in the ReplClientInfo. @@ -705,10 +804,16 @@ TEST_F(OpObserverTest, OnRenameCollectionIncludesTenantIdFeatureFlagOff) { // Write to the oplog. { - AutoGetDb autoDb(opCtx.get(), sourceNss.db(), MODE_X); + AutoGetDb autoDb(opCtx.get(), sourceNss.dbName(), MODE_X); WriteUnitOfWork wunit(opCtx.get()); - opObserver.onRenameCollection( - opCtx.get(), sourceNss, targetNss, uuid, dropTargetUuid, 0U, stayTemp); + opObserver.onRenameCollection(opCtx.get(), + sourceNss, + targetNss, + uuid, + dropTargetUuid, + 0U, + stayTemp, + /*markFromMigrate=*/false); wunit.commit(); } @@ -720,9 +825,10 @@ TEST_F(OpObserverTest, OnRenameCollectionIncludesTenantIdFeatureFlagOff) { ASSERT_FALSE(oplogEntry.getTid()); ASSERT_EQUALS(sourceNss.getCommandNS(), oplogEntry.getNss()); - auto oExpected = BSON("renameCollection" << sourceNss.toStringWithTenantId() << "to" - << targetNss.toStringWithTenantId() << "stayTemp" - << stayTemp << "dropTarget" << dropTargetUuid); + auto oExpected = + BSON("renameCollection" << sourceNss.toStringWithTenantId_forTest() << "to" + << targetNss.toStringWithTenantId_forTest() << "stayTemp" + << stayTemp << "dropTarget" << dropTargetUuid); ASSERT_BSONOBJ_EQ(oExpected, oplogEntry.getObject()); } @@ -741,10 +847,16 @@ TEST_F(OpObserverTest, OnRenameCollectionIncludesTenantIdFeatureFlagOn) { // Write to the oplog. { - AutoGetDb autoDb(opCtx.get(), sourceNss.db(), MODE_X); + AutoGetDb autoDb(opCtx.get(), sourceNss.dbName(), MODE_X); WriteUnitOfWork wunit(opCtx.get()); - opObserver.onRenameCollection( - opCtx.get(), sourceNss, targetNss, uuid, dropTargetUuid, 0U, stayTemp); + opObserver.onRenameCollection(opCtx.get(), + sourceNss, + targetNss, + uuid, + dropTargetUuid, + 0U, + stayTemp, + /*markFromMigrate=*/false); wunit.commit(); } @@ -756,9 +868,9 @@ TEST_F(OpObserverTest, OnRenameCollectionIncludesTenantIdFeatureFlagOn) { ASSERT_EQUALS(tid, *oplogEntry.getTid()); ASSERT_EQUALS(sourceNss.getCommandNS(), oplogEntry.getNss()); - auto oExpected = - BSON("renameCollection" << sourceNss.toString() << "to" << targetNss.toString() - << "stayTemp" << stayTemp << "dropTarget" << dropTargetUuid); + auto oExpected = BSON("renameCollection" << sourceNss.toString_forTest() << "to" + << targetNss.toString_forTest() << "stayTemp" + << stayTemp << "dropTarget" << dropTargetUuid); ASSERT_BSONOBJ_EQ(oExpected, oplogEntry.getObject()); } @@ -777,7 +889,8 @@ TEST_F(OpObserverTest, OnRenameCollectionOmitsDropTargetFieldIfDropTargetUuidIsN { AutoGetDb autoDb(opCtx.get(), sourceNss.dbName(), MODE_X); WriteUnitOfWork wunit(opCtx.get()); - opObserver.onRenameCollection(opCtx.get(), sourceNss, targetNss, uuid, {}, 0U, stayTemp); + opObserver.onRenameCollection( + opCtx.get(), sourceNss, targetNss, uuid, {}, 0U, stayTemp, /*markFromMigrate=*/false); wunit.commit(); } @@ -786,8 +899,8 @@ TEST_F(OpObserverTest, OnRenameCollectionOmitsDropTargetFieldIfDropTargetUuidIsN // Ensure that renameCollection fields were properly added to oplog entry. ASSERT_EQUALS(uuid, unittest::assertGet(UUID::parse(oplogEntry["ui"]))); auto o = oplogEntry.getObjectField("o"); - auto oExpected = BSON("renameCollection" << sourceNss.ns() << "to" << targetNss.ns() - << "stayTemp" << stayTemp); + auto oExpected = BSON("renameCollection" << sourceNss.ns_forTest() << "to" + << targetNss.ns_forTest() << "stayTemp" << stayTemp); ASSERT_BSONOBJ_EQ(oExpected, o); } @@ -815,7 +928,7 @@ TEST_F(OpObserverTest, ImportCollectionOplogEntry) { long long numRecords = 1; long long dataSize = 2; // A dummy invalid catalog entry. We do not need a valid catalog entry for this test. - auto catalogEntry = BSON("ns" << nss.ns() << "ident" + auto catalogEntry = BSON("ns" << nss.ns_forTest() << "ident" << "collection-7-1792004489479993697"); auto storageMetadata = BSON("storage" << "metadata"); @@ -858,7 +971,7 @@ TEST_F(OpObserverTest, ImportCollectionOplogEntryIncludesTenantId) { long long numRecords = 1; long long dataSize = 2; // A dummy invalid catalog entry. We do not need a valid catalog entry for this test. - auto catalogEntry = BSON("ns" << nss.ns() << "ident" + auto catalogEntry = BSON("ns" << nss.ns_forTest() << "ident" << "collection-7-1792004489479993697"); auto storageMetadata = BSON("storage" << "metadata"); @@ -866,7 +979,7 @@ TEST_F(OpObserverTest, ImportCollectionOplogEntryIncludesTenantId) { // Write to the oplog. { - AutoGetDb autoDb(opCtx.get(), nss.db(), MODE_X); + AutoGetDb autoDb(opCtx.get(), nss.dbName(), MODE_X); WriteUnitOfWork wunit(opCtx.get()); opObserver.onImportCollection(opCtx.get(), importUUID, @@ -970,8 +1083,8 @@ TEST_F(OpObserverTest, SingleStatementDeleteTestIncludesTenantId) { // This test does not call `OpObserver::aboutToDelete`. That method has the side-effect // of setting of `documentKey` on the delete for sharding purposes. // `OpObserverImpl::onDelete` asserts its existence. - repl::documentKeyDecoration(opCtx.get()).emplace(BSON("_id" << 0), boost::none); OplogDeleteEntryArgs deleteEntryArgs; + documentKeyDecoration(deleteEntryArgs).emplace(BSON("_id" << 0), boost::none); opObserver.onDelete(opCtx.get(), *locks, kUninitializedStmtId, deleteEntryArgs); wuow.commit(); @@ -1071,10 +1184,11 @@ TEST_F(OpObserverTest, MultipleAboutToDeleteAndOnDelete) { auto opCtx = cc().makeOperationContext(); AutoGetCollection autoColl(opCtx.get(), nss3, MODE_X); WriteUnitOfWork wunit(opCtx.get()); - opObserver.aboutToDelete(opCtx.get(), *autoColl, BSON("_id" << 1)); - opObserver.onDelete(opCtx.get(), *autoColl, kUninitializedStmtId, {}); - opObserver.aboutToDelete(opCtx.get(), *autoColl, BSON("_id" << 1)); - opObserver.onDelete(opCtx.get(), *autoColl, kUninitializedStmtId, {}); + OplogDeleteEntryArgs args; + opObserver.aboutToDelete(opCtx.get(), *autoColl, BSON("_id" << 1), &args); + opObserver.onDelete(opCtx.get(), *autoColl, kUninitializedStmtId, args); + opObserver.aboutToDelete(opCtx.get(), *autoColl, BSON("_id" << 1), &args); + opObserver.onDelete(opCtx.get(), *autoColl, kUninitializedStmtId, args); } DEATH_TEST_REGEX_F(OpObserverTest, @@ -1083,7 +1197,8 @@ DEATH_TEST_REGEX_F(OpObserverTest, OpObserverImpl opObserver(std::make_unique()); auto opCtx = cc().makeOperationContext(); AutoGetCollection autoColl(opCtx.get(), nss3, MODE_IX); - opObserver.onDelete(opCtx.get(), *autoColl, kUninitializedStmtId, {}); + OplogDeleteEntryArgs args; + opObserver.onDelete(opCtx.get(), *autoColl, kUninitializedStmtId, args); } DEATH_TEST_REGEX_F(OpObserverTest, @@ -1092,7 +1207,8 @@ DEATH_TEST_REGEX_F(OpObserverTest, OpObserverImpl opObserver(std::make_unique()); auto opCtx = cc().makeOperationContext(); AutoGetCollection autoColl(opCtx.get(), nss3, MODE_IX); - opObserver.aboutToDelete(opCtx.get(), *autoColl, {}); + OplogDeleteEntryArgs args; + opObserver.aboutToDelete(opCtx.get(), *autoColl, /*doc=*/{}, &args); } DEATH_TEST_REGEX_F(OpObserverTest, @@ -1164,13 +1280,16 @@ class OpObserverTxnParticipantTest : public OpObserverTest { auto txnOps = txnParticipant().retrieveCompletedTransactionOperations(opCtx()); auto currentTime = Date_t::now(); auto applyOpsAssignment = - opObserver().preTransactionPrepare(opCtx(), reservedSlots, *txnOps, currentTime); + txnOps->getApplyOpsInfo(reservedSlots, + getMaxNumberOfTransactionOperationsInSingleOplogEntry(), + getMaxSizeOfTransactionOperationsInSingleOplogEntryBytes(), + /*prepare=*/true); + opObserver().preTransactionPrepare(opCtx(), *txnOps, applyOpsAssignment, currentTime); opCtx()->recoveryUnit()->setPrepareTimestamp(prepareOpTime.getTimestamp()); - ASSERT(applyOpsAssignment); opObserver().onTransactionPrepare(opCtx(), reservedSlots, *txnOps, - *applyOpsAssignment, + applyOpsAssignment, numberOfPrePostImagesToWrite, currentTime); } @@ -1307,11 +1426,13 @@ TEST_F(OpObserverTransactionTest, TransactionalPrepareTest) { OplogUpdateEntryArgs update2(&updateArgs2, *autoColl2); opObserver().onUpdate(opCtx(), update2); + OplogDeleteEntryArgs args; opObserver().aboutToDelete(opCtx(), *autoColl1, BSON("_id" << 0 << "data" - << "x")); - opObserver().onDelete(opCtx(), *autoColl1, 0, {}); + << "x"), + &args); + opObserver().onDelete(opCtx(), *autoColl1, 0, args); // One reserved slot for each statement, plus the prepare. auto reservedSlots = reserveOpTimesInSideTransaction(opCtx(), 5); @@ -1326,30 +1447,30 @@ TEST_F(OpObserverTransactionTest, TransactionalPrepareTest) { checkCommonFields(oplogEntryObj); OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj)); auto o = oplogEntry.getObject(); - auto oExpected = - BSON("applyOps" << BSON_ARRAY(BSON("op" - << "i" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("_id" << 0 << "data" - << "x") - << "o2" << BSON("_id" << 0)) - << BSON("op" - << "i" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("_id" << 1 << "data" - << "y") - << "o2" << BSON("_id" << 1)) - << BSON("op" - << "u" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("$set" << BSON("data" - << "y")) - << "o2" << BSON("_id" << 0)) - << BSON("op" - << "d" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("_id" << 0))) - << "prepare" << true); + auto oExpected = BSON( + "applyOps" << BSON_ARRAY(BSON("op" + << "i" + << "ns" << nss1.toString_forTest() << "ui" << uuid1 << "o" + << BSON("_id" << 0 << "data" + << "x") + << "o2" << BSON("_id" << 0)) + << BSON("op" + << "i" + << "ns" << nss1.toString_forTest() << "ui" << uuid1 << "o" + << BSON("_id" << 1 << "data" + << "y") + << "o2" << BSON("_id" << 1)) + << BSON("op" + << "u" + << "ns" << nss2.toString_forTest() << "ui" << uuid2 << "o" + << BSON("$set" << BSON("data" + << "y")) + << "o2" << BSON("_id" << 0)) + << BSON("op" + << "d" + << "ns" << nss1.toString_forTest() << "ui" << uuid1 << "o" + << BSON("_id" << 0))) + << "prepare" << true); ASSERT_BSONOBJ_EQ(oExpected, o); ASSERT(oplogEntry.shouldPrepare()); ASSERT_EQ(oplogEntry.getTimestamp(), prepareOpTime.getTimestamp()); @@ -1420,8 +1541,8 @@ TEST_F(OpObserverTransactionTest, TransactionalPreparedCommitTest) { auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << nss.toString() << "ui" << uuid << "o" - << doc << "o2" << docKey)) + << "ns" << nss.toString_forTest() << "ui" << uuid + << "o" << doc << "o2" << docKey)) << "prepare" << true); ASSERT_BSONOBJ_EQ(oExpected, o); ASSERT(oplogEntry.shouldPrepare()); @@ -1486,8 +1607,8 @@ TEST_F(OpObserverTransactionTest, TransactionalPreparedAbortTest) { auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << nss.toString() << "ui" << uuid << "o" - << doc << "o2" << docKey)) + << "ns" << nss.toString_forTest() << "ui" << uuid + << "o" << doc << "o2" << docKey)) << "prepare" << true); ASSERT_BSONOBJ_EQ(oExpected, o); ASSERT(oplogEntry.shouldPrepare()); @@ -1640,7 +1761,7 @@ TEST_F(OpObserverTransactionTest, CommittingUnpreparedNonEmptyTransactionWritesT auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); opCtx()->getWriteUnitOfWork()->commit(); assertTxnRecord(txnNum(), {}, DurableTxnStateEnum::kCommitted); @@ -1653,7 +1774,7 @@ TEST_F(OpObserverTransactionTest, auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); txnParticipant.stashTransactionResources(opCtx()); @@ -1734,36 +1855,36 @@ TEST_F(OpObserverTransactionTest, TransactionalInsertTest) { /*defaultFromMigrate=*/false); auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); auto oplogEntryObj = getSingleOplogEntry(opCtx()); checkCommonFields(oplogEntryObj); OplogEntry oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj)); auto o = oplogEntry.getObject(); - auto oExpected = - BSON("applyOps" << BSON_ARRAY(BSON("op" - << "i" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("_id" << 0 << "data" - << "x") - << "o2" << BSON("_id" << 0)) - << BSON("op" - << "i" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("_id" << 1 << "data" - << "y") - << "o2" << BSON("_id" << 1)) - << BSON("op" - << "i" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("_id" << 2 << "data" - << "z") - << "o2" << BSON("_id" << 2)) - << BSON("op" - << "i" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("_id" << 3 << "data" - << "w") - << "o2" << BSON("_id" << 3)))); + auto oExpected = BSON( + "applyOps" << BSON_ARRAY(BSON("op" + << "i" + << "ns" << nss1.toString_forTest() << "ui" << uuid1 << "o" + << BSON("_id" << 0 << "data" + << "x") + << "o2" << BSON("_id" << 0)) + << BSON("op" + << "i" + << "ns" << nss1.toString_forTest() << "ui" << uuid1 << "o" + << BSON("_id" << 1 << "data" + << "y") + << "o2" << BSON("_id" << 1)) + << BSON("op" + << "i" + << "ns" << nss2.toString_forTest() << "ui" << uuid2 << "o" + << BSON("_id" << 2 << "data" + << "z") + << "o2" << BSON("_id" << 2)) + << BSON("op" + << "i" + << "ns" << nss2.toString_forTest() << "ui" << uuid2 << "o" + << BSON("_id" << 3 << "data" + << "w") + << "o2" << BSON("_id" << 3)))); ASSERT_BSONOBJ_EQ(oExpected, o); ASSERT(!oplogEntry.shouldPrepare()); ASSERT_FALSE(oplogEntryObj.hasField("prepare")); @@ -1809,7 +1930,7 @@ TEST_F(OpObserverTransactionTest, TransactionalInsertTestIncludesTenantId) { auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); auto oplogEntryObj = getSingleOplogEntry(opCtx()); checkCommonFields(oplogEntryObj); @@ -1821,28 +1942,28 @@ TEST_F(OpObserverTransactionTest, TransactionalInsertTestIncludesTenantId) { BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" << "tid" << nss1.tenantId().value() << "ns" - << nss1.toString() << "ui" << uuid1 << "o" + << nss1.toString_forTest() << "ui" << uuid1 << "o" << BSON("_id" << 0 << "data" << "x") << "o2" << BSON("_id" << 0)) << BSON("op" << "i" << "tid" << nss1.tenantId().value() << "ns" - << nss1.toString() << "ui" << uuid1 << "o" + << nss1.toString_forTest() << "ui" << uuid1 << "o" << BSON("_id" << 1 << "data" << "y") << "o2" << BSON("_id" << 1)) << BSON("op" << "i" << "tid" << nss2.tenantId().value() << "ns" - << nss2.toString() << "ui" << uuid2 << "o" + << nss2.toString_forTest() << "ui" << uuid2 << "o" << BSON("_id" << 2 << "data" << "z") << "o2" << BSON("_id" << 2)) << BSON("op" << "i" << "tid" << nss2.tenantId().value() << "ns" - << nss2.toString() << "ui" << uuid2 << "o" + << nss2.toString_forTest() << "ui" << uuid2 << "o" << BSON("_id" << 3 << "data" << "w") << "o2" << BSON("_id" << 3)))); @@ -1888,23 +2009,23 @@ TEST_F(OpObserverTransactionTest, TransactionalUpdateTest) { opObserver().onUpdate(opCtx(), update2); auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); auto oplogEntry = getSingleOplogEntry(opCtx()); checkCommonFields(oplogEntry); auto o = oplogEntry.getObjectField("o"); - auto oExpected = - BSON("applyOps" << BSON_ARRAY(BSON("op" - << "u" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("$set" << BSON("data" - << "x")) - << "o2" << BSON("_id" << 0)) - << BSON("op" - << "u" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("$set" << BSON("data" - << "y")) - << "o2" << BSON("_id" << 1)))); + auto oExpected = BSON( + "applyOps" << BSON_ARRAY(BSON("op" + << "u" + << "ns" << nss1.toString_forTest() << "ui" << uuid1 << "o" + << BSON("$set" << BSON("data" + << "x")) + << "o2" << BSON("_id" << 0)) + << BSON("op" + << "u" + << "ns" << nss2.toString_forTest() << "ui" << uuid2 << "o" + << BSON("$set" << BSON("data" + << "y")) + << "o2" << BSON("_id" << 1)))); ASSERT_BSONOBJ_EQ(oExpected, o); ASSERT_FALSE(oplogEntry.hasField("prepare")); ASSERT_FALSE(oplogEntry.getBoolField("prepare")); @@ -1948,7 +2069,7 @@ TEST_F(OpObserverTransactionTest, TransactionalUpdateTestIncludesTenantId) { auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); auto oplogEntryObj = getSingleOplogEntry(opCtx()); checkCommonFields(oplogEntryObj); @@ -1960,14 +2081,14 @@ TEST_F(OpObserverTransactionTest, TransactionalUpdateTestIncludesTenantId) { BSON("applyOps" << BSON_ARRAY(BSON("op" << "u" << "tid" << nss1.tenantId().value() << "ns" - << nss1.toString() << "ui" << uuid1 << "o" + << nss1.toString_forTest() << "ui" << uuid1 << "o" << BSON("$set" << BSON("data" << "x")) << "o2" << BSON("_id" << 0)) << BSON("op" << "u" << "tid" << nss2.tenantId().value() << "ns" - << nss2.toString() << "ui" << uuid2 << "o" + << nss2.toString_forTest() << "ui" << uuid2 << "o" << BSON("$set" << BSON("data" << "y")) << "o2" << BSON("_id" << 1)))); @@ -1986,30 +2107,34 @@ TEST_F(OpObserverTransactionTest, TransactionalDeleteTest) { WriteUnitOfWork wuow(opCtx()); AutoGetCollection autoColl1(opCtx(), nss1, MODE_IX); AutoGetCollection autoColl2(opCtx(), nss2, MODE_IX); + OplogDeleteEntryArgs args; opObserver().aboutToDelete(opCtx(), *autoColl1, BSON("_id" << 0 << "data" - << "x")); - opObserver().onDelete(opCtx(), *autoColl1, 0, {}); + << "x"), + &args); + opObserver().onDelete(opCtx(), *autoColl1, 0, args); opObserver().aboutToDelete(opCtx(), *autoColl2, BSON("_id" << 1 << "data" - << "y")); - opObserver().onDelete(opCtx(), *autoColl2, 0, {}); + << "y"), + &args); + opObserver().onDelete(opCtx(), *autoColl2, 0, args); auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); auto oplogEntry = getSingleOplogEntry(opCtx()); checkCommonFields(oplogEntry); auto o = oplogEntry.getObjectField("o"); - auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" - << "d" - << "ns" << nss1.toString() << "ui" << uuid1 - << "o" << BSON("_id" << 0)) - << BSON("op" - << "d" - << "ns" << nss2.toString() << "ui" - << uuid2 << "o" << BSON("_id" << 1)))); + auto oExpected = + BSON("applyOps" << BSON_ARRAY(BSON("op" + << "d" + << "ns" << nss1.toString_forTest() << "ui" << uuid1 + << "o" << BSON("_id" << 0)) + << BSON("op" + << "d" + << "ns" << nss2.toString_forTest() << "ui" << uuid2 + << "o" << BSON("_id" << 1)))); ASSERT_BSONOBJ_EQ(oExpected, o); ASSERT_FALSE(oplogEntry.hasField("prepare")); ASSERT_FALSE(oplogEntry.getBoolField("prepare")); @@ -2025,20 +2150,23 @@ TEST_F(OpObserverTransactionTest, TransactionalDeleteTestIncludesTenantId) { WriteUnitOfWork wuow(opCtx()); AutoGetCollection autoColl1(opCtx(), nss1, MODE_IX); AutoGetCollection autoColl2(opCtx(), nss2, MODE_IX); + OplogDeleteEntryArgs args; opObserver().aboutToDelete(opCtx(), *autoColl1, BSON("_id" << 0 << "data" - << "x")); - opObserver().onDelete(opCtx(), *autoColl1, 0, {}); + << "x"), + &args); + opObserver().onDelete(opCtx(), *autoColl1, 0, args); opObserver().aboutToDelete(opCtx(), *autoColl2, BSON("_id" << 1 << "data" - << "y")); - opObserver().onDelete(opCtx(), *autoColl2, 0, {}); + << "y"), + &args); + opObserver().onDelete(opCtx(), *autoColl2, 0, args); auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); auto oplogEntryObj = getSingleOplogEntry(opCtx()); checkCommonFields(oplogEntryObj); @@ -2046,15 +2174,17 @@ TEST_F(OpObserverTransactionTest, TransactionalDeleteTestIncludesTenantId) { auto o = oplogEntry.getObject(); // TODO SERVER-69288: disallow more than one tenant on a single transaction - auto oExpected = BSON("applyOps" << BSON_ARRAY( - BSON("op" - << "d" - << "tid" << nss1.tenantId().value() << "ns" << nss1.toString() - << "ui" << uuid1 << "o" << BSON("_id" << 0)) - << BSON("op" - << "d" - << "tid" << nss2.tenantId().value() << "ns" << nss2.toString() - << "ui" << uuid2 << "o" << BSON("_id" << 1)))); + auto oExpected = + BSON("applyOps" << BSON_ARRAY(BSON("op" + << "d" + << "tid" << nss1.tenantId().value() << "ns" + << nss1.toString_forTest() << "ui" << uuid1 << "o" + << BSON("_id" << 0)) + << BSON("op" + << "d" + << "tid" << nss2.tenantId().value() << "ns" + << nss2.toString_forTest() << "ui" << uuid2 << "o" + << BSON("_id" << 1)))); ASSERT_BSONOBJ_EQ(oExpected, o); // This test assumes that the top level tenantId matches the tenantId in the first entry @@ -2063,7 +2193,15 @@ TEST_F(OpObserverTransactionTest, TransactionalDeleteTestIncludesTenantId) { ASSERT_FALSE(oplogEntryObj.getBoolField("prepare")); } -TEST_F(OpObserverTransactionTest, +class OpObserverServerlessTransactionTest : public OpObserverTransactionTest { +private: + // Needs to override to set serverless mode. + repl::ReplSettings createReplSettings() override { + return repl::createServerlessReplSettings(); + } +}; + +TEST_F(OpObserverServerlessTransactionTest, OnUnpreparedTransactionCommitChecksIfTenantMigrationIsBlockingWrites) { // Add a tenant migration access blocker on donor for blocking writes. auto donorMtab = std::make_shared(getServiceContext(), uuid); @@ -2091,7 +2229,7 @@ TEST_F(OpObserverTransactionTest, auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - ASSERT_THROWS_CODE(opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps), + ASSERT_THROWS_CODE(commitUnpreparedTransaction(opCtx(), opObserver()), DBException, ErrorCodes::TenantMigrationConflict); @@ -2185,10 +2323,11 @@ class OpObserverRetryableFindAndModifyTest : public OpObserverTxnParticipantTest AutoGetCollection autoColl(opCtx(), nss, MODE_IX); const auto deletedDoc = BSON("_id" << 0 << "data" << "x"); - opObserver().aboutToDelete(opCtx(), *autoColl, deletedDoc); OplogDeleteEntryArgs args; args.retryableFindAndModifyLocation = RetryableFindAndModifyLocation::kSideCollection; args.deletedDoc = &deletedDoc; + + opObserver().aboutToDelete(opCtx(), *autoColl, deletedDoc, &args); opObserver().onDelete(opCtx(), *autoColl, 0, args); commit(); @@ -2807,9 +2946,8 @@ TEST_F(BatchedWriteOutputsTest, TestApplyOpsGrouping) { // This test does not call `OpObserver::aboutToDelete`. That method has the // side-effect of setting of `documentKey` on the delete for sharding purposes. // `OpObserverImpl::onDelete` asserts its existence. - repl::documentKeyDecoration(opCtx).emplace(docsToDelete[doc]["_id"].wrap(), - boost::none); - const OplogDeleteEntryArgs args; + OplogDeleteEntryArgs args; + documentKeyDecoration(args).emplace(docsToDelete[doc]["_id"].wrap(), boost::none); opCtx->getServiceContext()->getOpObserver()->onDelete( opCtx, *autoColl, kUninitializedStmtId, args); } @@ -2875,8 +3013,8 @@ TEST_F(BatchedWriteOutputsTest, TestApplyOpsInsertDeleteUpdate) { } // (1) Delete { - repl::documentKeyDecoration(opCtx).emplace(BSON("_id" << 1), boost::none); - const OplogDeleteEntryArgs args; + OplogDeleteEntryArgs args; + documentKeyDecoration(args).emplace(BSON("_id" << 1), boost::none); opCtx->getServiceContext()->getOpObserver()->onDelete( opCtx, *autoColl, kUninitializedStmtId, args); } @@ -2969,8 +3107,8 @@ TEST_F(BatchedWriteOutputsTest, TestApplyOpsInsertDeleteUpdateIncludesTenantId) } // (1) Delete { - repl::documentKeyDecoration(opCtx).emplace(BSON("_id" << 1), boost::none); - const OplogDeleteEntryArgs args; + OplogDeleteEntryArgs args; + documentKeyDecoration(args).emplace(BSON("_id" << 1), boost::none); opCtx->getServiceContext()->getOpObserver()->onDelete( opCtx, *autoColl, kUninitializedStmtId, args); } @@ -3075,8 +3213,8 @@ TEST_F(BatchedWriteOutputsTest, testWUOWLarge) { // This test does not call `OpObserver::aboutToDelete`. That method has the side-effect // of setting of `documentKey` on the delete for sharding purposes. // `OpObserverImpl::onDelete` asserts its existence. - repl::documentKeyDecoration(opCtx).emplace(BSON("_id" << docId), boost::none); - const OplogDeleteEntryArgs args; + OplogDeleteEntryArgs args; + documentKeyDecoration(args).emplace(BSON("_id" << docId), boost::none); opCtx->getServiceContext()->getOpObserver()->onDelete( opCtx, *autoColl, kUninitializedStmtId, args); } @@ -3107,6 +3245,9 @@ TEST_F(BatchedWriteOutputsTest, testWUOWLarge) { // Verifies a WUOW that would result in a an oplog entry >16MB fails with TransactionTooLarge. TEST_F(BatchedWriteOutputsTest, testWUOWTooLarge) { + RAIIServerParameterControllerForTest featureFlagController("featureFlagLargeBatchedOperations", + false); + // Setup. auto opCtxRaii = cc().makeOperationContext(); OperationContext* opCtx = opCtxRaii.get(); @@ -3125,8 +3266,8 @@ TEST_F(BatchedWriteOutputsTest, testWUOWTooLarge) { // This test does not call `OpObserver::aboutToDelete`. That method has the side-effect // of setting of `documentKey` on the delete for sharding purposes. // `OpObserverImpl::onDelete` asserts its existence. - repl::documentKeyDecoration(opCtx).emplace(BSON("_id" << docId), boost::none); - const OplogDeleteEntryArgs args; + OplogDeleteEntryArgs args; + documentKeyDecoration(args).emplace(BSON("_id" << docId), boost::none); opCtx->getServiceContext()->getOpObserver()->onDelete( opCtx, *autoColl, kUninitializedStmtId, args); } @@ -3273,7 +3414,7 @@ TEST_F(OnDeleteOutputsTest, TestNonTransactionFundamentalOnDeleteOutputs) { // This test does not call `OpObserver::aboutToDelete`. That method has the side-effect // of setting of `documentKey` on the delete for sharding purposes. // `OpObserverImpl::onDelete` asserts its existence. - repl::documentKeyDecoration(opCtx).emplace(_deletedDoc["_id"].wrap(), boost::none); + documentKeyDecoration(deleteEntryArgs).emplace(_deletedDoc["_id"].wrap(), boost::none); opObserver.onDelete( opCtx, *locks, testCase.isRetryable() ? 1 : kUninitializedStmtId, deleteEntryArgs); wuow.commit(); @@ -3323,7 +3464,7 @@ TEST_F(OnDeleteOutputsTest, TestTransactionFundamentalOnDeleteOutputs) { // This test does not call `OpObserver::aboutToDelete`. That method has the side-effect // of setting of `documentKey` on the delete for sharding purposes. // `OpObserverImpl::onDelete` asserts its existence. - repl::documentKeyDecoration(opCtx).emplace(_deletedDoc["_id"].wrap(), boost::none); + documentKeyDecoration(deleteEntryArgs).emplace(_deletedDoc["_id"].wrap(), boost::none); opObserver.onDelete(opCtx, *locks, stmtId, deleteEntryArgs); commitUnpreparedTransaction(opCtx, opObserver); wuow.commit(); @@ -3372,7 +3513,7 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionSingleStatementTest) { /*defaultFromMigrate=*/false); auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); auto oplogEntryObj = getNOplogEntries(opCtx(), 1)[0]; checkSessionAndTransactionFields(oplogEntryObj); auto oplogEntry = assertGet(OplogEntry::parse(oplogEntryObj)); @@ -3384,7 +3525,7 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionSingleStatementTest) { auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON( "op" << "i" - << "ns" << nss.toString() << "ui" << uuid << "o" + << "ns" << nss.toString_forTest() << "ui" << uuid << "o" << BSON("_id" << 0 << "a" << std::string(BSONObjMaxUserSize, 'a')) << "o2" << BSON("_id" << 0)))); ASSERT_BSONOBJ_EQ(oExpected, oplogEntry.getObject()); @@ -3416,7 +3557,7 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalInsertTest) { /*defaultFromMigrate=*/false); auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); auto oplogEntryObjs = getNOplogEntries(opCtx(), 4); std::vector oplogEntries; mongo::repl::OpTime expectedPrevWriteOpTime; @@ -3433,24 +3574,24 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalInsertTest) { auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("_id" << 0) << "o2" << BSON("_id" << 0))) + << "ns" << nss1.toString_forTest() << "ui" << uuid1 + << "o" << BSON("_id" << 0) << "o2" << BSON("_id" << 0))) << "partialTxn" << true); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject()); oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("_id" << 1) << "o2" << BSON("_id" << 1))) + << "ns" << nss1.toString_forTest() << "ui" << uuid1 + << "o" << BSON("_id" << 1) << "o2" << BSON("_id" << 1))) << "partialTxn" << true); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject()); oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("_id" << 2) << "o2" << BSON("_id" << 2))) + << "ns" << nss2.toString_forTest() << "ui" << uuid2 + << "o" << BSON("_id" << 2) << "o2" << BSON("_id" << 2))) << "partialTxn" << true); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[2].getObject()); @@ -3459,8 +3600,8 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalInsertTest) { oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("_id" << 3) << "o2" << BSON("_id" << 3))) + << "ns" << nss2.toString_forTest() << "ui" << uuid2 + << "o" << BSON("_id" << 3) << "o2" << BSON("_id" << 3))) << "count" << 4); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[3].getObject()); } @@ -3499,7 +3640,7 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalUpdateTest) { opObserver().onUpdate(opCtx(), update2); auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); auto oplogEntryObjs = getNOplogEntries(opCtx(), 2); std::vector oplogEntries; mongo::repl::OpTime expectedPrevWriteOpTime; @@ -3514,26 +3655,26 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalUpdateTest) { expectedPrevWriteOpTime = repl::OpTime{oplogEntry.getTimestamp(), *oplogEntry.getTerm()}; } - auto oExpected = - BSON("applyOps" << BSON_ARRAY(BSON("op" - << "u" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("$set" << BSON("data" - << "x")) - << "o2" << BSON("_id" << 0))) - << "partialTxn" << true); + auto oExpected = BSON( + "applyOps" << BSON_ARRAY(BSON("op" + << "u" + << "ns" << nss1.toString_forTest() << "ui" << uuid1 << "o" + << BSON("$set" << BSON("data" + << "x")) + << "o2" << BSON("_id" << 0))) + << "partialTxn" << true); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject()); // This should be the implicit commit oplog entry, indicated by the absence of the // 'partialTxn' field. - oExpected = - BSON("applyOps" << BSON_ARRAY(BSON("op" - << "u" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("$set" << BSON("data" - << "y")) - << "o2" << BSON("_id" << 1))) - << "count" << 2); + oExpected = BSON( + "applyOps" << BSON_ARRAY(BSON("op" + << "u" + << "ns" << nss2.toString_forTest() << "ui" << uuid2 << "o" + << BSON("$set" << BSON("data" + << "y")) + << "o2" << BSON("_id" << 1))) + << "count" << 2); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject()); } @@ -3544,19 +3685,22 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalDeleteTest) { WriteUnitOfWork wuow(opCtx()); AutoGetCollection autoColl1(opCtx(), nss1, MODE_IX); AutoGetCollection autoColl2(opCtx(), nss2, MODE_IX); + OplogDeleteEntryArgs args; opObserver().aboutToDelete(opCtx(), *autoColl1, BSON("_id" << 0 << "data" - << "x")); - opObserver().onDelete(opCtx(), *autoColl1, 0, {}); + << "x"), + &args); + opObserver().onDelete(opCtx(), *autoColl1, 0, args); opObserver().aboutToDelete(opCtx(), *autoColl2, BSON("_id" << 1 << "data" - << "y")); - opObserver().onDelete(opCtx(), *autoColl2, 0, {}); + << "y"), + &args); + opObserver().onDelete(opCtx(), *autoColl2, 0, args); auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); auto oplogEntryObjs = getNOplogEntries(opCtx(), 2); std::vector oplogEntries; mongo::repl::OpTime expectedPrevWriteOpTime; @@ -3573,18 +3717,19 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalDeleteTest) { auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" << "d" - << "ns" << nss1.toString() << "ui" << uuid1 - << "o" << BSON("_id" << 0))) + << "ns" << nss1.toString_forTest() << "ui" + << uuid1 << "o" << BSON("_id" << 0))) << "partialTxn" << true); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject()); // This should be the implicit commit oplog entry, indicated by the absence of the // 'partialTxn' field. - oExpected = oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" - << "d" - << "ns" << nss2.toString() << "ui" - << uuid2 << "o" << BSON("_id" << 1))) - << "count" << 2); + oExpected = oExpected = + BSON("applyOps" << BSON_ARRAY(BSON("op" + << "d" + << "ns" << nss2.toString_forTest() << "ui" << uuid2 + << "o" << BSON("_id" << 1))) + << "count" << 2); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject()); } @@ -3640,32 +3785,32 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalInsertPrepareTest) { auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("_id" << 0) << "o2" << BSON("_id" << 0))) + << "ns" << nss1.toString_forTest() << "ui" << uuid1 + << "o" << BSON("_id" << 0) << "o2" << BSON("_id" << 0))) << "partialTxn" << true); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject()); oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("_id" << 1) << "o2" << BSON("_id" << 1))) + << "ns" << nss1.toString_forTest() << "ui" << uuid1 + << "o" << BSON("_id" << 1) << "o2" << BSON("_id" << 1))) << "partialTxn" << true); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject()); oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("_id" << 2) << "o2" << BSON("_id" << 2))) + << "ns" << nss2.toString_forTest() << "ui" << uuid2 + << "o" << BSON("_id" << 2) << "o2" << BSON("_id" << 2))) << "partialTxn" << true); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[2].getObject()); oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("_id" << 3) << "o2" << BSON("_id" << 3))) + << "ns" << nss2.toString_forTest() << "ui" << uuid2 + << "o" << BSON("_id" << 3) << "o2" << BSON("_id" << 3))) << "prepare" << true << "count" << 4); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[3].getObject()); @@ -3727,24 +3872,24 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalUpdatePrepareTest) { expectedPrevWriteOpTime = repl::OpTime{oplogEntry.getTimestamp(), *oplogEntry.getTerm()}; } - auto oExpected = - BSON("applyOps" << BSON_ARRAY(BSON("op" - << "u" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("$set" << BSON("data" - << "x")) - << "o2" << BSON("_id" << 0))) - << "partialTxn" << true); + auto oExpected = BSON( + "applyOps" << BSON_ARRAY(BSON("op" + << "u" + << "ns" << nss1.toString_forTest() << "ui" << uuid1 << "o" + << BSON("$set" << BSON("data" + << "x")) + << "o2" << BSON("_id" << 0))) + << "partialTxn" << true); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject()); - oExpected = - BSON("applyOps" << BSON_ARRAY(BSON("op" - << "u" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("$set" << BSON("data" - << "y")) - << "o2" << BSON("_id" << 1))) - << "prepare" << true << "count" << 2); + oExpected = BSON( + "applyOps" << BSON_ARRAY(BSON("op" + << "u" + << "ns" << nss2.toString_forTest() << "ui" << uuid2 << "o" + << BSON("$set" << BSON("data" + << "y")) + << "o2" << BSON("_id" << 1))) + << "prepare" << true << "count" << 2); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject()); assertTxnRecord(txnNum(), prepareOpTime, DurableTxnStateEnum::kPrepared); @@ -3757,16 +3902,19 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalDeletePrepareTest) { AutoGetCollection autoColl1(opCtx(), nss1, MODE_IX); AutoGetCollection autoColl2(opCtx(), nss2, MODE_IX); + OplogDeleteEntryArgs args; opObserver().aboutToDelete(opCtx(), *autoColl1, BSON("_id" << 0 << "data" - << "x")); - opObserver().onDelete(opCtx(), *autoColl1, 0, {}); + << "x"), + &args); + opObserver().onDelete(opCtx(), *autoColl1, 0, args); opObserver().aboutToDelete(opCtx(), *autoColl2, BSON("_id" << 1 << "data" - << "y")); - opObserver().onDelete(opCtx(), *autoColl2, 0, {}); + << "y"), + &args); + opObserver().onDelete(opCtx(), *autoColl2, 0, args); auto reservedSlots = reserveOpTimesInSideTransaction(opCtx(), 2); auto prepareOpTime = reservedSlots.back(); @@ -3792,15 +3940,15 @@ TEST_F(OpObserverMultiEntryTransactionTest, TransactionalDeletePrepareTest) { auto oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" << "d" - << "ns" << nss1.toString() << "ui" << uuid1 - << "o" << BSON("_id" << 0))) + << "ns" << nss1.toString_forTest() << "ui" + << uuid1 << "o" << BSON("_id" << 0))) << "partialTxn" << true); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject()); oExpected = BSON("applyOps" << BSON_ARRAY(BSON("op" << "d" - << "ns" << nss2.toString() << "ui" << uuid2 - << "o" << BSON("_id" << 1))) + << "ns" << nss2.toString_forTest() << "ui" + << uuid2 << "o" << BSON("_id" << 1))) << "prepare" << true << "count" << 2); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[1].getObject()); @@ -3991,7 +4139,7 @@ TEST_F(OpObserverMultiEntryTransactionTest, UnpreparedTransactionPackingTest) { /*defaultFromMigrate=*/false); auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); auto oplogEntryObjs = getNOplogEntries(opCtx(), 1); std::vector oplogEntries; mongo::repl::OpTime expectedPrevWriteOpTime; @@ -4005,23 +4153,23 @@ TEST_F(OpObserverMultiEntryTransactionTest, UnpreparedTransactionPackingTest) { ASSERT_LT(expectedPrevWriteOpTime.getTimestamp(), oplogEntry.getTimestamp()); expectedPrevWriteOpTime = repl::OpTime{oplogEntry.getTimestamp(), *oplogEntry.getTerm()}; } - auto oExpected = - BSON("applyOps" << BSON_ARRAY(BSON("op" - << "i" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("_id" << 0) << "o2" << BSON("_id" << 0)) - << BSON("op" - << "i" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("_id" << 1) << "o2" << BSON("_id" << 1)) - << BSON("op" - << "i" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("_id" << 2) << "o2" << BSON("_id" << 2)) - << BSON("op" - << "i" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("_id" << 3) << "o2" << BSON("_id" << 3)))); + auto oExpected = BSON( + "applyOps" << BSON_ARRAY(BSON("op" + << "i" + << "ns" << nss1.toString_forTest() << "ui" << uuid1 << "o" + << BSON("_id" << 0) << "o2" << BSON("_id" << 0)) + << BSON("op" + << "i" + << "ns" << nss1.toString_forTest() << "ui" << uuid1 << "o" + << BSON("_id" << 1) << "o2" << BSON("_id" << 1)) + << BSON("op" + << "i" + << "ns" << nss2.toString_forTest() << "ui" << uuid2 << "o" + << BSON("_id" << 2) << "o2" << BSON("_id" << 2)) + << BSON("op" + << "i" + << "ns" << nss2.toString_forTest() << "ui" << uuid2 << "o" + << BSON("_id" << 3) << "o2" << BSON("_id" << 3)))); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject()); } @@ -4069,24 +4217,24 @@ TEST_F(OpObserverMultiEntryTransactionTest, PreparedTransactionPackingTest) { ASSERT_LT(expectedPrevWriteOpTime.getTimestamp(), oplogEntry.getTimestamp()); expectedPrevWriteOpTime = repl::OpTime{oplogEntry.getTimestamp(), *oplogEntry.getTerm()}; - auto oExpected = - BSON("applyOps" << BSON_ARRAY(BSON("op" - << "i" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("_id" << 0) << "o2" << BSON("_id" << 0)) - << BSON("op" - << "i" - << "ns" << nss1.toString() << "ui" << uuid1 << "o" - << BSON("_id" << 1) << "o2" << BSON("_id" << 1)) - << BSON("op" - << "i" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("_id" << 2) << "o2" << BSON("_id" << 2)) - << BSON("op" - << "i" - << "ns" << nss2.toString() << "ui" << uuid2 << "o" - << BSON("_id" << 3) << "o2" << BSON("_id" << 3))) - << "prepare" << true); + auto oExpected = BSON( + "applyOps" << BSON_ARRAY(BSON("op" + << "i" + << "ns" << nss1.toString_forTest() << "ui" << uuid1 << "o" + << BSON("_id" << 0) << "o2" << BSON("_id" << 0)) + << BSON("op" + << "i" + << "ns" << nss1.toString_forTest() << "ui" << uuid1 << "o" + << BSON("_id" << 1) << "o2" << BSON("_id" << 1)) + << BSON("op" + << "i" + << "ns" << nss2.toString_forTest() << "ui" << uuid2 << "o" + << BSON("_id" << 2) << "o2" << BSON("_id" << 2)) + << BSON("op" + << "i" + << "ns" << nss2.toString_forTest() << "ui" << uuid2 << "o" + << BSON("_id" << 3) << "o2" << BSON("_id" << 3))) + << "prepare" << true); ASSERT_BSONOBJ_EQ(oExpected, oplogEntries[0].getObject()); txnParticipant.unstashTransactionResources(opCtx(), "abortTransaction"); @@ -4217,7 +4365,7 @@ TEST_F(OpObserverLargeTransactionTest, LargeTransactionCreatesMultipleOplogEntri txnParticipant.addTransactionOperation(opCtx(), operation2); auto txnOps = txnParticipant.retrieveCompletedTransactionOperations(opCtx()); ASSERT_EQUALS(txnOps->getNumberOfPrePostImagesToWrite(), 0); - opObserver().onUnpreparedTransactionCommit(opCtx(), *txnOps); + commitUnpreparedTransaction(opCtx(), opObserver()); auto oplogEntryObjs = getNOplogEntries(opCtx(), 2); std::vector oplogEntries; mongo::repl::OpTime expectedPrevWriteOpTime; @@ -4279,7 +4427,15 @@ TEST_F(OpObserverTest, OnRollbackInvalidatesDefaultRWConcernCache) { ASSERT_EQ(Date_t::fromMillisSinceEpoch(5678), *newCachedDefaults.getUpdateWallClockTime()); } -TEST_F(OpObserverTest, OnInsertChecksIfTenantMigrationIsBlockingWrites) { +class OpObserverServerlessTest : public OpObserverTest { +private: + // Need to set serverless. + repl::ReplSettings createReplSettings() override { + return repl::createServerlessReplSettings(); + } +}; + +TEST_F(OpObserverServerlessTest, OnInsertChecksIfTenantMigrationIsBlockingWrites) { auto opCtx = cc().makeOperationContext(); // Add a tenant migration access blocker on donor for blocking writes. diff --git a/src/mongo/db/op_observer/op_observer_noop.h b/src/mongo/db/op_observer/op_observer_noop.h index 3ba66e80f79cc..8d94a6ad4445c 100644 --- a/src/mongo/db/op_observer/op_observer_noop.h +++ b/src/mongo/db/op_observer/op_observer_noop.h @@ -33,8 +33,18 @@ namespace mongo { +/** + * No-op implementation of OpObserver interface. + * + * Suitable base class of OpObserver implementations that do not need to implement most of the + * OpObserver interface. + */ class OpObserverNoop : public OpObserver { public: + NamespaceFilters getNamespaceFilters() const override { + return {NamespaceFilter::kAll, NamespaceFilter::kAll}; + } + void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, const NamespaceString& nss, const UUID& uuid, @@ -42,12 +52,12 @@ class OpObserverNoop : public OpObserver { void onCreateGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; + const UUID& globalIndexUUID) override {} void onDropGlobalIndex(OperationContext* opCtx, const NamespaceString& globalIndexNss, const UUID& globalIndexUUID, - long long numKeys) final{}; + long long numKeys) override {} void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -88,25 +98,37 @@ class OpObserverNoop : public OpObserver { std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) override {} + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) override {} + void onInsertGlobalIndexKey(OperationContext* opCtx, const NamespaceString& globalIndexNss, const UUID& globalIndexUuid, const BSONObj& key, - const BSONObj& docKey) final{}; + const BSONObj& docKey) override {} + void onDeleteGlobalIndexKey(OperationContext* opCtx, const NamespaceString& globalIndexNss, const UUID& globalIndexUuid, const BSONObj& key, - const BSONObj& docKey) final {} - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) override{}; + const BSONObj& docKey) override {} + + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override {} + void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) override {} + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) override {} + void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) override {} + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override {} + void onInternalOpMessage(OperationContext* opCtx, const NamespaceString& nss, const boost::optional& uuid, @@ -116,6 +138,7 @@ class OpObserverNoop : public OpObserver { const boost::optional postImageOpTime, const boost::optional prevWriteOpTimeInTransaction, const boost::optional slot) override {} + void onCreateCollection(OperationContext* opCtx, const CollectionPtr& coll, const NamespaceString& collectionName, @@ -123,34 +146,40 @@ class OpObserverNoop : public OpObserver { const BSONObj& idIndex, const OplogSlot& createOpTime, bool fromMigrate) override {} + void onCollMod(OperationContext* opCtx, const NamespaceString& nss, const UUID& uuid, const BSONObj& collModCmd, const CollectionOptions& oldCollOptions, boost::optional indexInfo) override {} + void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) override {} - using OpObserver::onDropCollection; + repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) override { + const CollectionDropType dropType, + bool markFromMigrate) override { return {}; } + void onDropIndex(OperationContext* opCtx, const NamespaceString& nss, const UUID& uuid, const std::string& indexName, const BSONObj& idxDescriptor) override {} - using OpObserver::onRenameCollection; + void onRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) override {} + bool stayTemp, + bool markFromMigrate) override {} + void onImportCollection(OperationContext* opCtx, const UUID& importUUID, const NamespaceString& nss, @@ -159,64 +188,81 @@ class OpObserverNoop : public OpObserver { const BSONObj& catalogEntry, const BSONObj& storageMetadata, bool isDryRun) override {} - using OpObserver::preRenameCollection; + repl::OpTime preRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) override { + bool stayTemp, + bool markFromMigrate) override { return {}; } + void postRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, const UUID& uuid, const boost::optional& dropTargetUUID, bool stayTemp) override {} + void onApplyOps(OperationContext* opCtx, const DatabaseName& dbName, const BSONObj& applyOpCmd) override {} + void onEmptyCapped(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid) override {} + void onTransactionStart(OperationContext* opCtx) override {} + void onUnpreparedTransactionCommit( - OperationContext* opCtx, const TransactionOperations& transactionOperations) override {} - void onBatchedWriteStart(OperationContext* opCtx) final {} - void onBatchedWriteCommit(OperationContext* opCtx) final {} - void onBatchedWriteAbort(OperationContext* opCtx) final {} + OperationContext* opCtx, + const std::vector& reservedSlots, + const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + OpStateAccumulator* opAccumulator = nullptr) override {} + + void onBatchedWriteStart(OperationContext* opCtx) override {} + + void onBatchedWriteCommit(OperationContext* opCtx) override {} + + void onBatchedWriteAbort(OperationContext* opCtx) override {} + void onPreparedTransactionCommit( OperationContext* opCtx, OplogSlot commitOplogEntryOpTime, Timestamp commitTimestamp, - const std::vector& statements) noexcept override{}; - std::unique_ptr preTransactionPrepare( + const std::vector& statements) noexcept override {} + + void preTransactionPrepare( OperationContext* opCtx, - const std::vector& reservedSlots, const TransactionOperations& transactionOperations, - Date_t wallClockTime) override { - return nullptr; - } + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + Date_t wallClockTime) override {} + void onTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, const TransactionOperations& transactionOperations, const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) override{}; + Date_t wallClockTime) override {} + void onTransactionPrepareNonPrimary(OperationContext* opCtx, + const LogicalSessionId& lsid, const std::vector& statements, const repl::OpTime& prepareOpTime) override {} + void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) override{}; + boost::optional abortOplogEntryOpTime) override {} + + void onReplicationRollback(OperationContext* opCtx, + const RollbackObserverInfo& rbInfo) override {} + void onMajorityCommitPointUpdate(ServiceContext* service, const repl::OpTime& newCommitPoint) override {} - -private: - void _onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) override {} }; } // namespace mongo diff --git a/src/mongo/db/op_observer/op_observer_registry.cpp b/src/mongo/db/op_observer/op_observer_registry.cpp new file mode 100644 index 0000000000000..7a61fcb42d6c1 --- /dev/null +++ b/src/mongo/db/op_observer/op_observer_registry.cpp @@ -0,0 +1,38 @@ +/** + * Copyright (C) 2018-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/op_observer/op_observer_registry.h" + +namespace mongo { + +OpObserverRegistry::OpObserverRegistry() = default; + +OpObserverRegistry::~OpObserverRegistry() = default; + +} // namespace mongo diff --git a/src/mongo/db/op_observer/op_observer_registry.h b/src/mongo/db/op_observer/op_observer_registry.h index afdedf8b58294..a5c8fb50665a0 100644 --- a/src/mongo/db/op_observer/op_observer_registry.h +++ b/src/mongo/db/op_observer/op_observer_registry.h @@ -30,10 +30,35 @@ #pragma once #include +#include +#include +#include +#include +#include #include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/transaction/transaction_operations.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -49,13 +74,61 @@ class OpObserverRegistry final : public OpObserver { OpObserverRegistry& operator=(const OpObserverRegistry&) = delete; public: - OpObserverRegistry() = default; - virtual ~OpObserverRegistry() = default; + OpObserverRegistry(); + virtual ~OpObserverRegistry(); + + // This implementaton is unused, but needs to be implemented to conform to the OpObserver + // interface. + NamespaceFilters getNamespaceFilters() const { + return {NamespaceFilter::kAll, NamespaceFilter::kAll}; + } // Add 'observer' to the list of observers to call. Observers are called in registration order. // Registration must be done while no calls to observers are made. void addObserver(std::unique_ptr observer) { + const auto& nsFilters = observer->getNamespaceFilters(); _observers.push_back(std::move(observer)); + + OpObserver* observerPtr = _observers.back().get(); + switch (nsFilters.updateFilter) { + case OpObserver::NamespaceFilter::kConfig: + _insertAndUpdateConfigObservers.push_back(observerPtr); + break; + case OpObserver::NamespaceFilter::kSystem: + _insertAndUpdateSystemObservers.push_back(observerPtr); + break; + case OpObserver::NamespaceFilter::kConfigAndSystem: + _insertAndUpdateConfigObservers.push_back(observerPtr); + _insertAndUpdateSystemObservers.push_back(observerPtr); + break; + case OpObserver::NamespaceFilter::kAll: + _insertAndUpdateConfigObservers.push_back(observerPtr); + _insertAndUpdateSystemObservers.push_back(observerPtr); + _insertAndUpdateUserObservers.push_back(observerPtr); + break; + default: + break; + } + + switch (nsFilters.deleteFilter) { + case OpObserver::NamespaceFilter::kConfig: + _onDeleteConfigObservers.push_back(observerPtr); + break; + case OpObserver::NamespaceFilter::kSystem: + _onDeleteSystemObservers.push_back(observerPtr); + break; + case OpObserver::NamespaceFilter::kConfigAndSystem: + _onDeleteConfigObservers.push_back(observerPtr); + _onDeleteSystemObservers.push_back(observerPtr); + break; + case OpObserver::NamespaceFilter::kAll: + _onDeleteConfigObservers.push_back(observerPtr); + _onDeleteSystemObservers.push_back(observerPtr); + _onDeleteUserObservers.push_back(observerPtr); + break; + default: + break; + } } void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, @@ -152,10 +225,24 @@ class OpObserverRegistry final : public OpObserver { std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) override { - ReservedTimes times{opCtx}; - for (auto& o : _observers) - o->onInserts(opCtx, coll, begin, end, fromMigrate, defaultFromMigrate); + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) override { + ReservedTimes times{opCtx}; + OpStateAccumulator opStateAccumulator; + + const auto& nss = coll->ns(); + std::vector* observerQueue; + if (nss.isConfigDB()) { + observerQueue = &_insertAndUpdateConfigObservers; + } else if (nss.isSystem()) { + observerQueue = &_insertAndUpdateSystemObservers; + } else { + observerQueue = &_insertAndUpdateUserObservers; + } + + for (auto& o : *observerQueue) + o->onInserts( + opCtx, coll, begin, end, fromMigrate, defaultFromMigrate, &opStateAccumulator); } void onInsertGlobalIndexKey(OperationContext* opCtx, @@ -179,27 +266,68 @@ class OpObserverRegistry final : public OpObserver { o->onDeleteGlobalIndexKey(opCtx, globalIndexNss, globalIndexUuid, key, docKey); } - void onUpdate(OperationContext* const opCtx, const OplogUpdateEntryArgs& args) override { + void onUpdate(OperationContext* const opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override { ReservedTimes times{opCtx}; - for (auto& o : _observers) - o->onUpdate(opCtx, args); + OpStateAccumulator opStateAccumulator; + + const auto& nss = args.coll->ns(); + std::vector* observerQueue; + if (nss.isConfigDB()) { + observerQueue = &_insertAndUpdateConfigObservers; + } else if (nss.isSystem()) { + observerQueue = &_insertAndUpdateSystemObservers; + } else { + observerQueue = &_insertAndUpdateUserObservers; + } + + for (auto& o : *observerQueue) + o->onUpdate(opCtx, args, &opStateAccumulator); } void aboutToDelete(OperationContext* const opCtx, const CollectionPtr& coll, - const BSONObj& doc) override { - ReservedTimes times{opCtx}; - for (auto& o : _observers) - o->aboutToDelete(opCtx, coll, doc); + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) override { + ReservedTimes times{opCtx}; + OpStateAccumulator opStateAccumulator; + + const auto& nss = coll->ns(); + std::vector* observerQueue; + if (nss.isConfigDB()) { + observerQueue = &_onDeleteConfigObservers; + } else if (nss.isSystem()) { + observerQueue = &_onDeleteSystemObservers; + } else { + observerQueue = &_onDeleteUserObservers; + } + + for (auto& o : *observerQueue) + o->aboutToDelete(opCtx, coll, doc, args, &opStateAccumulator); } void onDelete(OperationContext* const opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) override { - ReservedTimes times{opCtx}; - for (auto& o : _observers) - o->onDelete(opCtx, coll, stmtId, args); + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override { + ReservedTimes times{opCtx}; + OpStateAccumulator opStateAccumulator; + + const auto& nss = coll->ns(); + std::vector* observerQueue; + if (nss.isConfigDB()) { + observerQueue = &_onDeleteConfigObservers; + } else if (nss.isSystem()) { + observerQueue = &_onDeleteSystemObservers; + } else { + observerQueue = &_onDeleteUserObservers; + } + + for (auto& o : *observerQueue) + o->onDelete(opCtx, coll, stmtId, args, &opStateAccumulator); } void onInternalOpMessage(OperationContext* const opCtx, @@ -254,15 +382,6 @@ class OpObserverRegistry final : public OpObserver { o->onDropDatabase(opCtx, dbName); } - repl::OpTime onDropCollection(OperationContext* const opCtx, - const NamespaceString& collectionName, - const UUID& uuid, - std::uint64_t numRecords, - const CollectionDropType dropType) override { - return onDropCollection( - opCtx, collectionName, uuid, numRecords, dropType, false /* markFromMigrate*/); - } - repl::OpTime onDropCollection(OperationContext* const opCtx, const NamespaceString& collectionName, const UUID& uuid, @@ -288,23 +407,6 @@ class OpObserverRegistry final : public OpObserver { o->onDropIndex(opCtx, nss, uuid, indexName, idxDescriptor); } - void onRenameCollection(OperationContext* const opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) override { - onRenameCollection(opCtx, - fromCollection, - toCollection, - uuid, - dropTargetUUID, - numRecords, - stayTemp, - false /* markFromMigrate */); - } - void onRenameCollection(OperationContext* const opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, @@ -345,23 +447,6 @@ class OpObserverRegistry final : public OpObserver { isDryRun); } - repl::OpTime preRenameCollection(OperationContext* const opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) override { - return preRenameCollection(opCtx, - fromCollection, - toCollection, - uuid, - dropTargetUUID, - numRecords, - stayTemp, - false /* markFromMigrate */); - } - repl::OpTime preRenameCollection(OperationContext* const opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, @@ -420,10 +505,19 @@ class OpObserverRegistry final : public OpObserver { } void onUnpreparedTransactionCommit( - OperationContext* opCtx, const TransactionOperations& transactionOperations) override { + OperationContext* opCtx, + const std::vector& reservedSlots, + const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + OpStateAccumulator* opAccumulator = nullptr) override { ReservedTimes times{opCtx}; + OpStateAccumulator opStateAccumulator; for (auto& o : _observers) - o->onUnpreparedTransactionCommit(opCtx, transactionOperations); + o->onUnpreparedTransactionCommit(opCtx, + reservedSlots, + transactionOperations, + applyOpsOperationAssignment, + &opStateAccumulator); } void onPreparedTransactionCommit( @@ -437,24 +531,15 @@ class OpObserverRegistry final : public OpObserver { opCtx, commitOplogEntryOpTime, commitTimestamp, statements); } - std::unique_ptr preTransactionPrepare( + void preTransactionPrepare( OperationContext* opCtx, - const std::vector& reservedSlots, const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, Date_t wallClockTime) override { - std::unique_ptr - applyOpsOplogSlotAndOperationAssignment; for (auto&& observer : _observers) { - auto applyOpsAssignment = observer->preTransactionPrepare( - opCtx, reservedSlots, transactionOperations, wallClockTime); - tassert(6278501, - "More than one OpObserver returned operation to \"applyOps\" assignment", - !(applyOpsAssignment && applyOpsOplogSlotAndOperationAssignment)); - if (applyOpsAssignment) { - applyOpsOplogSlotAndOperationAssignment = std::move(applyOpsAssignment); - } + observer->preTransactionPrepare( + opCtx, transactionOperations, applyOpsOperationAssignment, wallClockTime); } - return applyOpsOplogSlotAndOperationAssignment; } void onTransactionPrepare( @@ -476,11 +561,12 @@ class OpObserverRegistry final : public OpObserver { } void onTransactionPrepareNonPrimary(OperationContext* opCtx, + const LogicalSessionId& lsid, const std::vector& statements, const repl::OpTime& prepareOpTime) override { ReservedTimes times{opCtx}; for (auto& observer : _observers) { - observer->onTransactionPrepareNonPrimary(opCtx, statements, prepareOpTime); + observer->onTransactionPrepareNonPrimary(opCtx, lsid, statements, prepareOpTime); } } @@ -512,6 +598,12 @@ class OpObserverRegistry final : public OpObserver { } } + void onReplicationRollback(OperationContext* opCtx, + const RollbackObserverInfo& rbInfo) override { + for (auto& o : _observers) + o->onReplicationRollback(opCtx, rbInfo); + } + void onMajorityCommitPointUpdate(ServiceContext* service, const repl::OpTime& newCommitPoint) override { for (auto& o : _observers) @@ -519,12 +611,6 @@ class OpObserverRegistry final : public OpObserver { } private: - void _onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) override { - for (auto& o : _observers) - o->onReplicationRollback(opCtx, rbInfo); - } - static repl::OpTime _getOpTimeToReturn(const std::vector& times) { if (times.empty()) { return repl::OpTime{}; @@ -533,6 +619,26 @@ class OpObserverRegistry final : public OpObserver { return times.front(); } + // For use by the long tail of non-performance-critical operations: non-CRUD. + // CRUD operations have the most observers and are worth optimizing. For non-CRUD operations, + // there are few implemented observers and as little as one that implement the interface. std::vector> _observers; + + // For performance reasons, store separate but still ordered queues for CRUD ops. + // Each CRUD operation will iterate through one of these queues based on the nss + // of the target document of the operation. + std::vector _insertAndUpdateConfigObservers; // config.* + std::vector _insertAndUpdateSystemObservers; // *.system.* + std::vector + _insertAndUpdateUserObservers; // not config nor system. + // Will impact writes to all user collections. + + // Having separate queues for delete operations allows observers like + // PrimaryOnlyServiceOpObserver to use the filtering differently between insert/update, and + // delete. + std::vector _onDeleteConfigObservers; // config.* + std::vector _onDeleteSystemObservers; // *.system.* + std::vector _onDeleteUserObservers; // not config nor system + // Will impact writes to all user collections. }; } // namespace mongo diff --git a/src/mongo/db/op_observer/op_observer_registry_test.cpp b/src/mongo/db/op_observer/op_observer_registry_test.cpp index 3f81f4219ff06..9a65cbad78fde 100644 --- a/src/mongo/db/op_observer/op_observer_registry_test.cpp +++ b/src/mongo/db/op_observer/op_observer_registry_test.cpp @@ -27,15 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/op_observer/op_observer_registry.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/op_observer/op_observer_noop.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/tenant_id.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -53,37 +60,43 @@ struct TestObserver : public OpObserverNoop { void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) { drops++; } - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) override { + const CollectionDropType dropType, + bool markFromMigrate) override { drops++; OpObserver::Times::get(opCtx).reservedOpTimes.push_back(opTime); return {}; } - using OpObserver::onRenameCollection; void onRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) { - preRenameCollection( - opCtx, fromCollection, toCollection, uuid, dropTargetUUID, numRecords, stayTemp); + bool stayTemp, + bool markFromMigrate) { + preRenameCollection(opCtx, + fromCollection, + toCollection, + uuid, + dropTargetUUID, + numRecords, + stayTemp, + markFromMigrate); postRenameCollection(opCtx, fromCollection, toCollection, uuid, dropTargetUUID, stayTemp); } - using OpObserver::preRenameCollection; repl::OpTime preRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) override { + bool stayTemp, + bool markFromMigrate) override { OpObserver::Times::get(opCtx).reservedOpTimes.push_back(opTime); return {}; } @@ -102,13 +115,17 @@ struct ThrowingObserver : public TestObserver { } }; -struct OpObserverRegistryTest : public unittest::Test { +struct OpObserverRegistryTest : public ServiceContextTest { NamespaceString testNss = NamespaceString::createNamespaceString_forTest("test", "coll"); std::unique_ptr unique1 = std::make_unique(); std::unique_ptr unique2 = std::make_unique(); TestObserver* observer1 = unique1.get(); TestObserver* observer2 = unique2.get(); OpObserverRegistry registry; + + ServiceContext::UniqueOperationContext opCtxHolder{makeOperationContext()}; + OperationContext* opCtx{opCtxHolder.get()}; + /** * The 'op' function calls an observer method on the registry that returns an OpTime. * The method checks that the registry correctly returns only the first observer's `OpTime`. @@ -136,88 +153,92 @@ struct OpObserverRegistryTest : public unittest::Test { }; TEST_F(OpObserverRegistryTest, NoObservers) { - OperationContextNoop opCtx; // Check that it's OK to call observer methods with no observers registered. - registry.onDropDatabase(&opCtx, DatabaseName(boost::none, "test")); + registry.onDropDatabase(opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "test")); } TEST_F(OpObserverRegistryTest, TwoObservers) { - OperationContextNoop opCtx; ASSERT_EQUALS(testObservers, 2); registry.addObserver(std::move(unique1)); registry.addObserver(std::move(unique2)); - registry.onDropDatabase(&opCtx, DatabaseName(boost::none, "test")); + registry.onDropDatabase(opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "test")); ASSERT_EQUALS(observer1->drops, 1); ASSERT_EQUALS(observer2->drops, 1); } TEST_F(OpObserverRegistryTest, ThrowingObserver1) { - OperationContextNoop opCtx; unique1 = std::make_unique(); observer1 = unique1.get(); registry.addObserver(std::move(unique1)); registry.addObserver(std::move(unique2)); - ASSERT_THROWS(registry.onDropDatabase(&opCtx, DatabaseName(boost::none, "test")), + ASSERT_THROWS(registry.onDropDatabase( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "test")), AssertionException); ASSERT_EQUALS(observer1->drops, 1); ASSERT_EQUALS(observer2->drops, 0); } TEST_F(OpObserverRegistryTest, ThrowingObserver2) { - OperationContextNoop opCtx; unique2 = std::make_unique(); observer2 = unique1.get(); registry.addObserver(std::move(unique1)); registry.addObserver(std::move(unique2)); - ASSERT_THROWS(registry.onDropDatabase(&opCtx, DatabaseName(boost::none, "test")), + ASSERT_THROWS(registry.onDropDatabase( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "test")), AssertionException); ASSERT_EQUALS(observer1->drops, 1); ASSERT_EQUALS(observer2->drops, 1); } TEST_F(OpObserverRegistryTest, OnDropCollectionObserverResultReturnsRightTime) { - OperationContextNoop opCtx; registry.addObserver(std::move(unique1)); registry.addObserver(std::make_unique()); auto op = [&]() -> repl::OpTime { - return registry.onDropCollection( - &opCtx, testNss, UUID::gen(), 0U, OpObserver::CollectionDropType::kOnePhase); + return registry.onDropCollection(opCtx, + testNss, + UUID::gen(), + 0U, + OpObserver::CollectionDropType::kOnePhase, + /*markFromMigrate=*/false); }; checkConsistentOpTime(op); } TEST_F(OpObserverRegistryTest, PreRenameCollectionObserverResultReturnsRightTime) { - OperationContextNoop opCtx; registry.addObserver(std::move(unique1)); registry.addObserver(std::make_unique()); auto op = [&]() -> repl::OpTime { UUID uuid = UUID::gen(); - auto opTime = registry.preRenameCollection(&opCtx, testNss, testNss, uuid, {}, 0U, false); - registry.postRenameCollection(&opCtx, testNss, testNss, uuid, {}, false); + auto opTime = registry.preRenameCollection( + opCtx, testNss, testNss, uuid, {}, 0U, /*stayTemp=*/false, /*markFromMigrate=*/false); + registry.postRenameCollection(opCtx, testNss, testNss, uuid, {}, false); return opTime; }; checkConsistentOpTime(op); } DEATH_TEST_F(OpObserverRegistryTest, OnDropCollectionReturnsInconsistentTime, "invariant") { - OperationContextNoop opCtx; registry.addObserver(std::move(unique1)); registry.addObserver(std::move(unique2)); auto op = [&]() -> repl::OpTime { - return registry.onDropCollection( - &opCtx, testNss, UUID::gen(), 0U, OpObserver::CollectionDropType::kOnePhase); + return registry.onDropCollection(opCtx, + testNss, + UUID::gen(), + 0U, + OpObserver::CollectionDropType::kOnePhase, + /*markFromMigrate=*/false); }; checkInconsistentOpTime(op); } DEATH_TEST_F(OpObserverRegistryTest, PreRenameCollectionReturnsInconsistentTime, "invariant") { - OperationContextNoop opCtx; registry.addObserver(std::move(unique1)); registry.addObserver(std::move(unique2)); auto op = [&]() -> repl::OpTime { UUID uuid = UUID::gen(); - auto opTime = registry.preRenameCollection(&opCtx, testNss, testNss, uuid, {}, 0U, false); - registry.postRenameCollection(&opCtx, testNss, testNss, uuid, {}, false); + auto opTime = registry.preRenameCollection( + opCtx, testNss, testNss, uuid, {}, 0U, /*stayTemp=*/false, /*markFromMigrate=*/false); + registry.postRenameCollection(opCtx, testNss, testNss, uuid, {}, false); return opTime; }; checkInconsistentOpTime(op); diff --git a/src/mongo/db/op_observer/op_observer_util.cpp b/src/mongo/db/op_observer/op_observer_util.cpp index cf7571768a043..e26ba78ad72fd 100644 --- a/src/mongo/db/op_observer/op_observer_util.cpp +++ b/src/mongo/db/op_observer/op_observer_util.cpp @@ -29,14 +29,30 @@ #include "mongo/db/op_observer/op_observer_util.h" +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/bson/dotted_path_support.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" + +namespace mongo { + +const OplogDeleteEntryArgs::Decoration> documentKeyDecoration = + OplogDeleteEntryArgs::declareDecoration>(); -namespace mongo::repl { +const OpStateAccumulator::Decoration> + shardingWriteRouterOpStateAccumulatorDecoration = + OpStateAccumulator::declareDecoration>(); -const OperationContext::Decoration> documentKeyDecoration = - OperationContext::declareDecoration>(); +MONGO_FAIL_POINT_DEFINE(addDestinedRecipient); +MONGO_FAIL_POINT_DEFINE(sleepBetweenInsertOpTimeGenerationAndLogOp); /** * Given a raw collMod command object and associated collection metadata, create and return the @@ -96,7 +112,7 @@ BSONObj DocumentKey::getShardKeyAndId() const { return getId(); } -DocumentKey getDocumentKey(OperationContext* opCtx, const CollectionPtr& coll, BSONObj const& doc) { +DocumentKey getDocumentKey(const CollectionPtr& coll, BSONObj const& doc) { auto idField = doc["_id"]; BSONObj id = idField ? idField.wrap() : doc; boost::optional shardKey; @@ -110,4 +126,4 @@ DocumentKey getDocumentKey(OperationContext* opCtx, const CollectionPtr& coll, B return {std::move(id), std::move(shardKey)}; } -} // namespace mongo::repl +} // namespace mongo diff --git a/src/mongo/db/op_observer/op_observer_util.h b/src/mongo/db/op_observer/op_observer_util.h index 149223aebdd8c..7082b8b06ac82 100644 --- a/src/mongo/db/op_observer/op_observer_util.h +++ b/src/mongo/db/op_observer/op_observer_util.h @@ -29,11 +29,28 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/s/sharding_write_router.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" + +namespace mongo { + +// Common fail points for logOp() and logInsertOps(). +extern FailPoint addDestinedRecipient; +extern FailPoint sleepBetweenInsertOpTimeGenerationAndLogOp; -namespace mongo::repl { BSONObj makeCollModCmdObj(const BSONObj& collModCmd, const CollectionOptions& oldCollOptions, boost::optional indexInfo); @@ -58,11 +75,20 @@ class DocumentKey { * Returns a DocumentKey constructed from the shard key fields, if the collection is sharded, * and the _id field, of the given document. */ -DocumentKey getDocumentKey(OperationContext* opCtx, const CollectionPtr& coll, BSONObj const& doc); +DocumentKey getDocumentKey(const CollectionPtr& coll, BSONObj const& doc); /** * Provides access to the DocumentKey attached to this OperationContext. */ -extern const OperationContext::Decoration> documentKeyDecoration; +extern const OplogDeleteEntryArgs::Decoration> documentKeyDecoration; + +/** + * Provides access to the ShardingWriteRouter attached to the op accumulator. + * The ShardingWriteRouter instance is created in OpObserverImpl and subsequently + * destroyed in MigrationChunkClonerSourceOpObserver. + * + */ +extern const OpStateAccumulator::Decoration> + shardingWriteRouterOpStateAccumulatorDecoration; -} // namespace mongo::repl +} // namespace mongo diff --git a/src/mongo/db/op_observer/oplog_writer.h b/src/mongo/db/op_observer/oplog_writer.h index d038c0fa8f452..dc2a5cf9bf9bc 100644 --- a/src/mongo/db/op_observer/oplog_writer.h +++ b/src/mongo/db/op_observer/oplog_writer.h @@ -29,17 +29,16 @@ #pragma once -#include #include // for std::size_t -#include #include -#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" // for CollectionPtr #include "mongo/db/operation_context.h" -#include "mongo/db/repl/oplog.h" // for InsertStatement and OplogLink +#include "mongo/db/repl/oplog.h" // for OplogLink #include "mongo/db/repl/oplog_entry.h" // for MutableOplogEntry #include "mongo/db/session/logical_session_id.h" // for StmtId -#include "mongo/db/shard_id.h" +#include "mongo/db/storage/record_store.h" // for Record +#include "mongo/util/time_support.h" // for Date_t namespace mongo { @@ -71,25 +70,36 @@ class OplogWriter { repl::OplogLink* oplogLink, const std::vector& stmtIds) = 0; - /** - * Log insert(s) to the local oplog. Returns the OpTime of every insert. - * Refer to repl::logInsertOps() in repl/oplog.h. - */ - virtual std::vector logInsertOps( - OperationContext* opCtx, - repl::MutableOplogEntry* oplogEntryTemplate, - std::vector::const_iterator begin, - std::vector::const_iterator end, - std::vector fromMigrate, - std::function(const BSONObj& doc)> getDestinedRecipientFn, - const CollectionPtr& collectionPtr) = 0; - /** * Returns the optime of the oplog entry written to the oplog. * Returns a null optime if oplog was not modified. */ virtual repl::OpTime logOp(OperationContext* opCtx, repl::MutableOplogEntry* oplogEntry) = 0; + /** + * Low level oplog function used by logOp() and similar functions to append + * storage engine records to the oplog collection. + * + * This function has to be called within the scope of a WriteUnitOfWork with + * a valid CollectionPtr reference to the oplog. + * + * @param records a vector of oplog records to be written. Records hold references + * to unowned BSONObj data. + * @param timestamps a vector of respective Timestamp objects for each oplog record. + * @param oplogCollection collection to be written to. + * @param finalOpTime the OpTime of the last oplog record. + * @param wallTime the wall clock time of the last oplog record. + * @param isAbortIndexBuild for tenant migration use only. + */ + virtual void logOplogRecords(OperationContext* opCtx, + const NamespaceString& nss, + std::vector* records, + const std::vector& timestamps, + const CollectionPtr& oplogCollection, + repl::OpTime finalOpTime, + Date_t wallTime, + bool isAbortIndexBuild) = 0; + /** * Allocates optimes for new entries in the oplog. Returns a vector of OplogSlots, which * contain the new optimes along with their terms and newly calculated hash fields. diff --git a/src/mongo/db/op_observer/oplog_writer_impl.cpp b/src/mongo/db/op_observer/oplog_writer_impl.cpp index 3bb4e13512e3b..6a43b3732e479 100644 --- a/src/mongo/db/op_observer/oplog_writer_impl.cpp +++ b/src/mongo/db/op_observer/oplog_writer_impl.cpp @@ -40,22 +40,22 @@ void OplogWriterImpl::appendOplogEntryChainInfo(OperationContext* opCtx, return repl::appendOplogEntryChainInfo(opCtx, oplogEntry, oplogLink, stmtIds); } -std::vector OplogWriterImpl::logInsertOps( - OperationContext* opCtx, - repl::MutableOplogEntry* oplogEntryTemplate, - std::vector::const_iterator begin, - std::vector::const_iterator end, - std::vector fromMigrate, - std::function(const BSONObj& doc)> getDestinedRecipientFn, - const CollectionPtr& collectionPtr) { - return repl::logInsertOps( - opCtx, oplogEntryTemplate, begin, end, fromMigrate, getDestinedRecipientFn, collectionPtr); -} - repl::OpTime OplogWriterImpl::logOp(OperationContext* opCtx, repl::MutableOplogEntry* oplogEntry) { return repl::logOp(opCtx, oplogEntry); } +void OplogWriterImpl::logOplogRecords(OperationContext* opCtx, + const NamespaceString& nss, + std::vector* records, + const std::vector& timestamps, + const CollectionPtr& oplogCollection, + repl::OpTime finalOpTime, + Date_t wallTime, + bool isAbortIndexBuild) { + repl::logOplogRecords( + opCtx, nss, records, timestamps, oplogCollection, finalOpTime, wallTime, isAbortIndexBuild); +} + std::vector OplogWriterImpl::getNextOpTimes(OperationContext* opCtx, std::size_t count) { return repl::getNextOpTimes(opCtx, count); } diff --git a/src/mongo/db/op_observer/oplog_writer_impl.h b/src/mongo/db/op_observer/oplog_writer_impl.h index 24071c4e01531..cf32a8e89b8ef 100644 --- a/src/mongo/db/op_observer/oplog_writer_impl.h +++ b/src/mongo/db/op_observer/oplog_writer_impl.h @@ -29,7 +29,20 @@ #pragma once +#include +#include + +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/oplog_writer.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -46,17 +59,17 @@ class OplogWriterImpl : public OplogWriter { repl::OplogLink* oplogLink, const std::vector& stmtIds) override; - std::vector logInsertOps( - OperationContext* opCtx, - repl::MutableOplogEntry* oplogEntryTemplate, - std::vector::const_iterator begin, - std::vector::const_iterator end, - std::vector fromMigrate, - std::function(const BSONObj& doc)> getDestinedRecipientFn, - const CollectionPtr& collectionPtr) override; - repl::OpTime logOp(OperationContext* opCtx, repl::MutableOplogEntry* oplogEntry) override; + void logOplogRecords(OperationContext* opCtx, + const NamespaceString& nss, + std::vector* records, + const std::vector& timestamps, + const CollectionPtr& oplogCollection, + repl::OpTime finalOpTime, + Date_t wallTime, + bool isAbortIndexBuild) override; + std::vector getNextOpTimes(OperationContext* opCtx, std::size_t count) override; }; diff --git a/src/mongo/db/op_observer/oplog_writer_mock.h b/src/mongo/db/op_observer/oplog_writer_mock.h index 5acdab4490716..225c4bdcdbb9c 100644 --- a/src/mongo/db/op_observer/oplog_writer_mock.h +++ b/src/mongo/db/op_observer/oplog_writer_mock.h @@ -46,23 +46,28 @@ class OplogWriterMock : public OplogWriter { repl::OplogLink* oplogLink, const std::vector& stmtIds) override {} - std::vector logInsertOps( - OperationContext* opCtx, - repl::MutableOplogEntry* oplogEntryTemplate, - std::vector::const_iterator begin, - std::vector::const_iterator end, - std::vector fromMigrate, - std::function(const BSONObj& doc)> getDestinedRecipientFn, - const CollectionPtr& collectionPtr) override { - return {}; - } - repl::OpTime logOp(OperationContext* opCtx, repl::MutableOplogEntry* oplogEntry) override { return {}; } + void logOplogRecords(OperationContext* opCtx, + const NamespaceString& nss, + std::vector* records, + const std::vector& timestamps, + const CollectionPtr& oplogCollection, + repl::OpTime finalOpTime, + Date_t wallTime, + bool isAbortIndexBuild) override {} + + /** + * Returns a vector of 'count' non-null OpTimes. + * Some tests have to populate test collections, which may require OpObserverImpl::onInserts() + * to be able to acquire non-null optimes for insert operations even though no oplog entries + * are appended to the oplog. + * If the test requires actual OpTimes to work, use OplogWriterImpl instead. + */ std::vector getNextOpTimes(OperationContext* opCtx, std::size_t count) override { - return {}; + return std::vector{count, OplogSlot(Timestamp(1, 1), /*term=*/1LL)}; } }; diff --git a/src/mongo/db/op_observer/oplog_writer_transaction_proxy.cpp b/src/mongo/db/op_observer/oplog_writer_transaction_proxy.cpp index 677d7c34caef5..231fe5bb760b8 100644 --- a/src/mongo/db/op_observer/oplog_writer_transaction_proxy.cpp +++ b/src/mongo/db/op_observer/oplog_writer_transaction_proxy.cpp @@ -29,6 +29,8 @@ #include "mongo/db/op_observer/oplog_writer_transaction_proxy.h" +#include + namespace mongo { OplogWriterTransactionProxy::OplogWriterTransactionProxy( @@ -42,28 +44,23 @@ void OplogWriterTransactionProxy::appendOplogEntryChainInfo(OperationContext* op return _targetOplogWriter->appendOplogEntryChainInfo(opCtx, oplogEntry, oplogLink, stmtIds); } -std::vector OplogWriterTransactionProxy::logInsertOps( - OperationContext* opCtx, - repl::MutableOplogEntry* oplogEntryTemplate, - std::vector::const_iterator begin, - std::vector::const_iterator end, - std::vector fromMigrate, - std::function(const BSONObj& doc)> getDestinedRecipientFn, - const CollectionPtr& collectionPtr) { - return _targetOplogWriter->logInsertOps(opCtx, - oplogEntryTemplate, - begin, - end, - std::move(fromMigrate), - getDestinedRecipientFn, - collectionPtr); -} - repl::OpTime OplogWriterTransactionProxy::logOp(OperationContext* opCtx, repl::MutableOplogEntry* oplogEntry) { return _targetOplogWriter->logOp(opCtx, oplogEntry); } +void OplogWriterTransactionProxy::logOplogRecords(OperationContext* opCtx, + const NamespaceString& nss, + std::vector* records, + const std::vector& timestamps, + const CollectionPtr& oplogCollection, + repl::OpTime finalOpTime, + Date_t wallTime, + bool isAbortIndexBuild) { + _targetOplogWriter->logOplogRecords( + opCtx, nss, records, timestamps, oplogCollection, finalOpTime, wallTime, isAbortIndexBuild); +} + std::vector OplogWriterTransactionProxy::getNextOpTimes(OperationContext* opCtx, std::size_t count) { return _targetOplogWriter->getNextOpTimes(opCtx, count); diff --git a/src/mongo/db/op_observer/oplog_writer_transaction_proxy.h b/src/mongo/db/op_observer/oplog_writer_transaction_proxy.h index b6a2cee4c8b1c..131ccda5bf596 100644 --- a/src/mongo/db/op_observer/oplog_writer_transaction_proxy.h +++ b/src/mongo/db/op_observer/oplog_writer_transaction_proxy.h @@ -29,9 +29,21 @@ #pragma once +#include #include +#include +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/oplog_writer.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -53,17 +65,17 @@ class OplogWriterTransactionProxy : public OplogWriter { repl::OplogLink* oplogLink, const std::vector& stmtIds) override; - std::vector logInsertOps( - OperationContext* opCtx, - repl::MutableOplogEntry* oplogEntryTemplate, - std::vector::const_iterator begin, - std::vector::const_iterator end, - std::vector fromMigrate, - std::function(const BSONObj& doc)> getDestinedRecipientFn, - const CollectionPtr& collectionPtr) override; - repl::OpTime logOp(OperationContext* opCtx, repl::MutableOplogEntry* oplogEntry) override; + void logOplogRecords(OperationContext* opCtx, + const NamespaceString& nss, + std::vector* records, + const std::vector& timestamps, + const CollectionPtr& oplogCollection, + repl::OpTime finalOpTime, + Date_t wallTime, + bool isAbortIndexBuild) override; + std::vector getNextOpTimes(OperationContext* opCtx, std::size_t count) override; private: diff --git a/src/mongo/db/op_observer/user_write_block_mode_op_observer.cpp b/src/mongo/db/op_observer/user_write_block_mode_op_observer.cpp index d9158099dca1b..e2dd1525f53ae 100644 --- a/src/mongo/db/op_observer/user_write_block_mode_op_observer.cpp +++ b/src/mongo/db/op_observer/user_write_block_mode_op_observer.cpp @@ -27,18 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection_operation_source.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/op_observer/user_write_block_mode_op_observer.h" - +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/global_user_write_block_state.h" #include "mongo/db/s/user_writes_critical_section_document_gen.h" #include "mongo/db/s/user_writes_recoverable_critical_section_service.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { -const auto documentIdDecoration = OperationContext::declareDecoration(); +const auto documentIdDecoration = OplogDeleteEntryArgs::declareDecoration(); bool isStandaloneOrPrimary(OperationContext* opCtx, const NamespaceString& nss) { auto replCoord = repl::ReplicationCoordinator::get(opCtx); @@ -52,7 +64,8 @@ void UserWriteBlockModeOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { const auto& nss = coll->ns(); if (!defaultFromMigrate) { @@ -91,7 +104,8 @@ void UserWriteBlockModeOpObserver::onInserts(OperationContext* opCtx, } void UserWriteBlockModeOpObserver::onUpdate(OperationContext* opCtx, - const OplogUpdateEntryArgs& args) { + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { const auto& nss = args.coll->ns(); if (args.updateArgs->source != OperationSource::kFromMigrate) { @@ -132,16 +146,19 @@ void UserWriteBlockModeOpObserver::onUpdate(OperationContext* opCtx, void UserWriteBlockModeOpObserver::aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - BSONObj const& doc) { + BSONObj const& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { if (coll->ns() == NamespaceString::kUserWritesCriticalSectionsNamespace) { - documentIdDecoration(opCtx) = doc; + documentIdDecoration(args) = doc; } } void UserWriteBlockModeOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { const auto& nss = coll->ns(); if (!args.fromMigrate) { _checkWriteAllowed(opCtx, nss); @@ -149,7 +166,7 @@ void UserWriteBlockModeOpObserver::onDelete(OperationContext* opCtx, if (nss == NamespaceString::kUserWritesCriticalSectionsNamespace && !user_writes_recoverable_critical_section_util::inRecoveryMode(opCtx)) { - auto& documentId = documentIdDecoration(opCtx); + auto& documentId = documentIdDecoration(args); invariant(!documentId.isEmpty()); const auto& deletedDoc = documentId; @@ -171,8 +188,8 @@ void UserWriteBlockModeOpObserver::onDelete(OperationContext* opCtx, } } -void UserWriteBlockModeOpObserver::_onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) { +void UserWriteBlockModeOpObserver::onReplicationRollback(OperationContext* opCtx, + const RollbackObserverInfo& rbInfo) { if (rbInfo.rollbackNamespaces.find(NamespaceString::kUserWritesCriticalSectionsNamespace) != rbInfo.rollbackNamespaces.end()) { UserWritesRecoverableCriticalSectionService::get(opCtx)->recoverRecoverableCriticalSections( @@ -230,7 +247,8 @@ repl::OpTime UserWriteBlockModeOpObserver::onDropCollection(OperationContext* op const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) { + CollectionDropType dropType, + bool markFromMigrate) { _checkWriteAllowed(opCtx, collectionName); return repl::OpTime(); } @@ -250,7 +268,8 @@ repl::OpTime UserWriteBlockModeOpObserver::preRenameCollection( const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) { + bool stayTemp, + bool markFromMigrate) { _checkWriteAllowed(opCtx, fromCollection); _checkWriteAllowed(opCtx, toCollection); return repl::OpTime(); @@ -262,7 +281,8 @@ void UserWriteBlockModeOpObserver::onRenameCollection(OperationContext* opCtx, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) { + bool stayTemp, + bool markFromMigrate) { _checkWriteAllowed(opCtx, fromCollection); _checkWriteAllowed(opCtx, toCollection); } diff --git a/src/mongo/db/op_observer/user_write_block_mode_op_observer.h b/src/mongo/db/op_observer/user_write_block_mode_op_observer.h index e4cc611da70b5..9652a72ddb32d 100644 --- a/src/mongo/db/op_observer/user_write_block_mode_op_observer.h +++ b/src/mongo/db/op_observer/user_write_block_mode_op_observer.h @@ -29,7 +29,24 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -37,7 +54,7 @@ namespace mongo { * OpObserver for user write blocking. On write operations, checks whether the current global user * write blocking state allows the write, and uasserts if not. */ -class UserWriteBlockModeOpObserver final : public OpObserver { +class UserWriteBlockModeOpObserver final : public OpObserverNoop { UserWriteBlockModeOpObserver(const UserWriteBlockModeOpObserver&) = delete; UserWriteBlockModeOpObserver& operator=(const UserWriteBlockModeOpObserver&) = delete; @@ -54,7 +71,8 @@ class UserWriteBlockModeOpObserver final : public OpObserver { std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) final; + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; void onInsertGlobalIndexKey(OperationContext* opCtx, const NamespaceString& globalIndexNss, @@ -68,12 +86,15 @@ class UserWriteBlockModeOpObserver final : public OpObserver { const BSONObj& key, const BSONObj& docKey) final {} - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) final; + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) final; + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; // DDL operations void onCreateIndex(OperationContext* opCtx, @@ -110,12 +131,12 @@ class UserWriteBlockModeOpObserver final : public OpObserver { void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final; - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) final; + CollectionDropType dropType, + bool markFromMigrate) final; void onDropIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -125,23 +146,23 @@ class UserWriteBlockModeOpObserver final : public OpObserver { // onRenameCollection is only for renaming to a nonexistent target NS, so we need // preRenameCollection too. - using OpObserver::preRenameCollection; repl::OpTime preRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) final; + bool stayTemp, + bool markFromMigrate) final; - using OpObserver::onRenameCollection; void onRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) final; + bool stayTemp, + bool markFromMigrate) final; void onImportCollection(OperationContext* opCtx, const UUID& importUUID, @@ -155,124 +176,35 @@ class UserWriteBlockModeOpObserver final : public OpObserver { // Note aboutToDelete is unchecked, but defined. void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) final; - - // Noop operations (don't perform any check). - - // Unchecked because sharded collection indexes catalog are modified from internal commands. - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) final {} - - // Unchecked because global indexes are created from internal commands. - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - // Index builds committing can be left unchecked since we kill any active index builds before - // enabling write blocking. This means any index build which gets to the commit phase while - // write blocking is active was started and hit the onStartIndexBuild hook with write blocking - // active, and thus must be allowed under user write blocking. - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; - // At the moment we are leaving the onAbortIndexBuilds as unchecked. This is because they can be - // called from both user and internal codepaths, and we don't want to risk throwing an assert - // for the internal paths. - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} + void onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final; - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) final {} - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final {} - - // We don't need to check this and preRenameCollection (they are in the same WUOW). - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) final {} - - // The transaction commit related hooks don't need to be checked, because all of the operations - // inside the transaction are checked and they all execute in one WUOW. - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) final {} - - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) final {} - - void onTransactionStart(OperationContext* opCtx) final {} - - void onUnpreparedTransactionCommit(OperationContext* opCtx, - const TransactionOperations& transactionOperations) final {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept final {} - - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) final { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) final {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) final {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) final {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} - - void onMajorityCommitPointUpdate(ServiceContext* service, - const repl::OpTime& newCommitPoint) final {} + // Noop operations below with explanations (don't perform any check). -private: - void _onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final; + // onModifyCollectionShardingIndexCatalog() is unchecked because sharded collection indexes + // catalog are modified from internal commands. + + // onCreateGlobalIndex() and onDropGlobalIndex() are unchecked because global indexes are + // created from internal commands. + + // Index builds committing (onCommitIndexBuild()) can be left unchecked since we kill any active + // index builds before enabling write blocking. This means any index build which gets to the + // commit phase while write blocking is active was started and hit the onStartIndexBuild hook + // with write blocking active, and thus must be allowed under user write blocking. + // At the moment we are leaving the onAbortIndexBuildSinglePhase() and onAbortIndexBuild() as + // unchecked. This is because they can be called from both user and internal codepaths, and + // we don't want to risk throwing an assert for the internal paths. + + // We don't need to check postRenameCollection() and preRenameCollection (they are in the same + // WUOW). + + // The on*Transaction*() transaction commit related hooks don't need to be checked, because all + // of the operations inside the transaction are checked and they all execute in one WUOW. +private: // uasserts that a write to the given namespace is allowed under the current user write blocking // setting. void _checkWriteAllowed(OperationContext* opCtx, const NamespaceString& nss); diff --git a/src/mongo/db/op_observer/user_write_block_mode_op_observer_test.cpp b/src/mongo/db/op_observer/user_write_block_mode_op_observer_test.cpp index e435f52319831..c73820ca9df8d 100644 --- a/src/mongo/db/op_observer/user_write_block_mode_op_observer_test.cpp +++ b/src/mongo/db/op_observer/user_write_block_mode_op_observer_test.cpp @@ -27,15 +27,40 @@ * it in the license file. */ +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection_operation_source.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/commands/create_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/op_observer/user_write_block_mode_op_observer.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/s/global_user_write_block_state.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/write_block_bypass.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -91,7 +116,7 @@ class UserWriteBlockModeOpObserverTest : public ServiceContextMongoDTest { AutoGetCollection autoColl(opCtx, nss, MODE_IX); if (!autoColl) - FAIL(str::stream() << "Collection " << nss << " doesn't exist"); + FAIL(str::stream() << "Collection " << nss.toStringForErrorMsg() << " doesn't exist"); UserWriteBlockModeOpObserver opObserver; std::vector inserts; @@ -153,19 +178,48 @@ class UserWriteBlockModeOpObserverTest : public ServiceContextMongoDTest { opObserver.onCreateCollection( opCtx, CollectionPtr(), nss, {}, BSONObj(), OplogSlot(), false); opObserver.onCollMod(opCtx, nss, uuid, BSONObj(), {}, boost::none); - opObserver.onDropDatabase(opCtx, DatabaseName(boost::none, nss.db())); + opObserver.onDropDatabase(opCtx, nss.dbName()); opObserver.onDropCollection( opCtx, nss, uuid, 0, - UserWriteBlockModeOpObserver::CollectionDropType::kOnePhase); + UserWriteBlockModeOpObserver::CollectionDropType::kOnePhase, + /*markFromMigrate=*/false); opObserver.onDropIndex(opCtx, nss, uuid, "", BSONObj()); // For renames, make sure we check both from and to for the given namespace - opObserver.preRenameCollection(opCtx, nss, adminNss, uuid, boost::none, 0, false); - opObserver.preRenameCollection(opCtx, adminNss, nss, uuid, boost::none, 0, false); - opObserver.onRenameCollection(opCtx, nss, adminNss, uuid, boost::none, 0, false); - opObserver.onRenameCollection(opCtx, adminNss, nss, uuid, boost::none, 0, false); + opObserver.preRenameCollection(opCtx, + nss, + adminNss, + uuid, + boost::none, + 0, + /*stayTemp=*/false, + /*markFromMigrate=*/false); + opObserver.preRenameCollection(opCtx, + adminNss, + nss, + uuid, + boost::none, + 0, + /*stayTemp=*/false, + /*markFromMigrate=*/false); + opObserver.onRenameCollection(opCtx, + nss, + adminNss, + uuid, + boost::none, + 0, + /*stayTemp=*/false, + /*markFromMigrate=*/false); + opObserver.onRenameCollection(opCtx, + adminNss, + nss, + uuid, + boost::none, + 0, + /*stayTemp=*/false, + /*markFromMigrate=*/false); opObserver.onImportCollection(opCtx, uuid, nss, 0, 0, BSONObj(), BSONObj(), false); } catch (...) { // Make it easier to see that this is where we failed. @@ -182,29 +236,53 @@ class UserWriteBlockModeOpObserverTest : public ServiceContextMongoDTest { AssertionException); ASSERT_THROWS(opObserver.onCollMod(opCtx, nss, uuid, BSONObj(), {}, boost::none), AssertionException); - ASSERT_THROWS(opObserver.onDropDatabase(opCtx, DatabaseName(boost::none, nss.db())), - AssertionException); + ASSERT_THROWS(opObserver.onDropDatabase(opCtx, nss.dbName()), AssertionException); ASSERT_THROWS(opObserver.onDropCollection( opCtx, nss, uuid, 0, - UserWriteBlockModeOpObserver::CollectionDropType::kOnePhase), + UserWriteBlockModeOpObserver::CollectionDropType::kOnePhase, + /*markFromMigrate=*/false), AssertionException); ASSERT_THROWS(opObserver.onDropIndex(opCtx, nss, uuid, "", BSONObj()), AssertionException); - ASSERT_THROWS( - opObserver.preRenameCollection(opCtx, nss, adminNss, uuid, boost::none, 0, false), - AssertionException); - ASSERT_THROWS( - opObserver.preRenameCollection(opCtx, adminNss, nss, uuid, boost::none, 0, false), - AssertionException); - ASSERT_THROWS( - opObserver.onRenameCollection(opCtx, nss, adminNss, uuid, boost::none, 0, false), - AssertionException); - ASSERT_THROWS( - opObserver.onRenameCollection(opCtx, adminNss, nss, uuid, boost::none, 0, false), - AssertionException); + ASSERT_THROWS(opObserver.preRenameCollection(opCtx, + nss, + adminNss, + uuid, + boost::none, + 0, + /*stayTemp=*/false, + /*markFromMigrate=*/false), + AssertionException); + ASSERT_THROWS(opObserver.preRenameCollection(opCtx, + adminNss, + nss, + uuid, + boost::none, + 0, + /*stayTemp=*/false, + /*markFromMigrate=*/false), + AssertionException); + ASSERT_THROWS(opObserver.onRenameCollection(opCtx, + nss, + adminNss, + uuid, + boost::none, + 0, + /*stayTemp=*/false, + /*markFromMigrate=*/false), + AssertionException); + ASSERT_THROWS(opObserver.onRenameCollection(opCtx, + adminNss, + nss, + uuid, + boost::none, + 0, + /*stayTemp=*/false, + /*markFromMigrate=*/false), + AssertionException); ASSERT_THROWS( opObserver.onImportCollection(opCtx, uuid, nss, 0, 0, BSONObj(), BSONObj(), false), AssertionException); diff --git a/src/mongo/db/operation_context.cpp b/src/mongo/db/operation_context.cpp index 2614555388bf9..171ccce31bf76 100644 --- a/src/mongo/db/operation_context.cpp +++ b/src/mongo/db/operation_context.cpp @@ -28,22 +28,35 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/operation_context.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include + +#include "mongo/base/error_extra_info.h" +#include "mongo/base/string_data.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/operation_context.h" #include "mongo/db/operation_key_manager.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/logv2/log.h" -#include "mongo/platform/mutex.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/platform/random.h" -#include "mongo/transport/baton.h" +#include "mongo/stdx/thread.h" +#include "mongo/transport/session.h" #include "mongo/util/assert_util.h" #include "mongo/util/clock_source.h" #include "mongo/util/fail_point.h" -#include "mongo/util/scopeguard.h" #include "mongo/util/system_tick_source.h" +#include "mongo/util/waitable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -147,12 +160,6 @@ bool OperationContext::hasDeadlineExpired() const { return true; } - // TODO: Remove once all OperationContexts are properly connected to Clients and ServiceContexts - // in tests. - if (MONGO_unlikely(!getClient() || !getServiceContext())) { - return false; - } - const auto now = getServiceContext()->getFastClockSource()->now(); return now >= getDeadline(); } @@ -220,17 +227,11 @@ bool opShouldFail(Client* client, const BSONObj& failPointInfo) { } // namespace Status OperationContext::checkForInterruptNoAssert() noexcept { - // TODO: Remove the MONGO_likely(hasClientAndServiceContext) once all operation contexts are - // constructed with clients. - const auto hasClientAndServiceContext = getClient() && getServiceContext(); - - if (MONGO_likely(hasClientAndServiceContext) && getClient()->getKilled() && - !_isExecutingShutdown) { + if (getClient()->getKilled() && !_isExecutingShutdown) { return Status(ErrorCodes::ClientMarkedKilled, "client has been killed"); } - if (MONGO_likely(hasClientAndServiceContext) && getServiceContext()->getKillAllOperations() && - !_isExecutingShutdown) { + if (getServiceContext()->getKillAllOperations() && !_isExecutingShutdown) { return Status(ErrorCodes::InterruptedAtShutdown, "interrupted at shutdown"); } diff --git a/src/mongo/db/operation_context.h b/src/mongo/db/operation_context.h index 0027cb683b116..570035a22d2d8 100644 --- a/src/mongo/db/operation_context.h +++ b/src/mongo/db/operation_context.h @@ -29,17 +29,29 @@ #pragma once -#include "mongo/util/assert_util.h" +#include +#include +#include #include -#include +#include +#include #include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/baton.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/locker.h" #include "mongo/db/operation_id.h" #include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_options.h" @@ -49,9 +61,11 @@ #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" #include "mongo/util/concurrency/with_lock.h" #include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/interruptible.h" #include "mongo/util/lockable_adapter.h" @@ -61,9 +75,7 @@ namespace mongo { class CurOp; -class ProgressMeter; class ServiceContext; -class StringData; namespace repl { class UnreplicatedWritesBlock; @@ -93,7 +105,7 @@ extern FailPoint maxTimeNeverTimeOut; * (RecoveryUnitState) to reduce complexity and duplication in the storage-engine specific * RecoveryUnit and to allow better invariant checking. */ -class OperationContext : public Interruptible, public Decorable { +class OperationContext final : public Interruptible, public Decorable { OperationContext(const OperationContext&) = delete; OperationContext& operator=(const OperationContext&) = delete; @@ -214,6 +226,9 @@ class OperationContext : public Interruptible, public Decorable(_comment->firstElement()) : boost::none; } + boost::optional getCommentOwnedCopy() const { + return _comment.has_value() ? boost::optional{_comment->copy()} : boost::none; + } + /** * Sets whether this operation is an exhaust command. */ @@ -894,12 +916,25 @@ class LockFreeReadsBlock { LockFreeReadsBlock& operator=(const LockFreeReadsBlock&) = delete; public: + // Allow move operators. + LockFreeReadsBlock(LockFreeReadsBlock&& rhs) : _opCtx(rhs._opCtx) { + rhs._opCtx = nullptr; + }; + LockFreeReadsBlock& operator=(LockFreeReadsBlock&& rhs) { + _opCtx = rhs._opCtx; + rhs._opCtx = nullptr; + + return *this; + }; + LockFreeReadsBlock(OperationContext* opCtx) : _opCtx(opCtx) { _opCtx->incrementLockFreeReadOpCount(); } ~LockFreeReadsBlock() { - _opCtx->decrementLockFreeReadOpCount(); + if (_opCtx) { + _opCtx->decrementLockFreeReadOpCount(); + } } private: diff --git a/src/mongo/db/operation_context_group.cpp b/src/mongo/db/operation_context_group.cpp index bb215d2109566..fa34abc9a1429 100644 --- a/src/mongo/db/operation_context_group.cpp +++ b/src/mongo/db/operation_context_group.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/operation_context_group.h" +#include #include "mongo/db/operation_context.h" +#include "mongo/db/operation_context_group.h" #include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/operation_context_group.h b/src/mongo/db/operation_context_group.h index 0de0792e2691f..690305264c097 100644 --- a/src/mongo/db/operation_context_group.h +++ b/src/mongo/db/operation_context_group.h @@ -29,10 +29,16 @@ #pragma once +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/client.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/operation_context_noop.h b/src/mongo/db/operation_context_noop.h deleted file mode 100644 index ca47eefb03a3c..0000000000000 --- a/src/mongo/db/operation_context_noop.h +++ /dev/null @@ -1,64 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ -#pragma once - -#include - -#include "mongo/db/concurrency/locker_noop.h" -#include "mongo/db/operation_context.h" -#include "mongo/db/storage/recovery_unit_noop.h" -#include "mongo/db/storage/write_unit_of_work.h" - -namespace mongo { - -class Client; - -class OperationContextNoop : public OperationContext { -public: - /** - * These constructors are for use in legacy tests that do not need operation contexts that are - * properly connected to clients. - */ - OperationContextNoop() : OperationContextNoop(nullptr, 0) {} - OperationContextNoop(RecoveryUnit* ru) : OperationContextNoop(nullptr, 0) { - setRecoveryUnit(std::unique_ptr(ru), - WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); - } - - /** - * This constructor is for use by ServiceContexts, and should not be called directly. - */ - OperationContextNoop(Client* client, unsigned int opId) : OperationContext(client, opId) { - setRecoveryUnit(std::make_unique(), - WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); - setLockState(std::make_unique()); - } -}; - -} // namespace mongo diff --git a/src/mongo/db/operation_context_test.cpp b/src/mongo/db/operation_context_test.cpp index 5eb9140159843..c32e9025cf0e4 100644 --- a/src/mongo/db/operation_context_test.cpp +++ b/src/mongo/db/operation_context_test.cpp @@ -28,29 +28,51 @@ */ -#include "mongo/platform/basic.h" - -#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" #include "mongo/db/curop.h" -#include "mongo/db/json.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/operation_context_group.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/db/session/logical_session_id.h" #include "mongo/logv2/log.h" -#include "mongo/logv2/log_debug.h" -#include "mongo/stdx/future.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/future.h" // IWYU pragma: keep #include "mongo/stdx/thread.h" #include "mongo/transport/session.h" #include "mongo/transport/transport_layer_mock.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/future.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/tick_source.h" #include "mongo/util/tick_source_mock.h" #include "mongo/util/time_support.h" @@ -748,7 +770,7 @@ class ThreadedOperationDeadlineTests : public OperationDeadlineTests { boost::optional maxTime, WaitFn waitFn) { auto barrier = std::make_shared(2); - task = stdx::packaged_task([=] { + task = stdx::packaged_task([=, this] { if (maxTime) opCtx->setDeadlineByDate(*maxTime, ErrorCodes::ExceededTimeLimit); stdx::unique_lock lk(mutex); @@ -1116,6 +1138,9 @@ TEST_F(OperationContextTest, CurrentOpExcludesKilledOperations) { auto client = makeClient("MainClient"); auto opCtx = client->makeOperationContext(); + const boost::intrusive_ptr expCtx(new ExpressionContext( + opCtx.get(), nullptr, NamespaceString::createNamespaceString_forTest("foo.bar"_sd))); + for (auto truncateOps : {true, false}) { for (auto backtraceMode : {true, false}) { BSONObjBuilder bobNoOpCtx, bobKilledOpCtx; @@ -1129,14 +1154,14 @@ TEST_F(OperationContextTest, CurrentOpExcludesKilledOperations) { // Generate report in absence of any opCtx CurOp::reportCurrentOpForClient( - opCtx.get(), threadClient.get(), truncateOps, backtraceMode, &bobNoOpCtx); + expCtx, threadClient.get(), truncateOps, backtraceMode, &bobNoOpCtx); auto threadOpCtx = threadClient->makeOperationContext(); getServiceContext()->killAndDelistOperation(threadOpCtx.get()); // Generate report in presence of a killed opCtx CurOp::reportCurrentOpForClient( - opCtx.get(), threadClient.get(), truncateOps, backtraceMode, &bobKilledOpCtx); + expCtx, threadClient.get(), truncateOps, backtraceMode, &bobKilledOpCtx); }); thread.join(); diff --git a/src/mongo/db/operation_cpu_timer.cpp b/src/mongo/db/operation_cpu_timer.cpp index 88f275dd13756..8b6009e38e6f2 100644 --- a/src/mongo/db/operation_cpu_timer.cpp +++ b/src/mongo/db/operation_cpu_timer.cpp @@ -28,21 +28,30 @@ */ -#include +// IWYU pragma: no_include +#include +#include +#include #include +#include +#include +#include +#include #if defined(__linux__) #include #endif // defined(__linux__) -#include "mongo/db/operation_cpu_timer.h" - #include "mongo/base/error_codes.h" -#include "mongo/db/client.h" +#include "mongo/base/status.h" #include "mongo/db/operation_context.h" +#include "mongo/db/operation_cpu_timer.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/stdx/thread.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/errno_util.h" #include "mongo/util/fail_point.h" diff --git a/src/mongo/db/operation_cpu_timer.h b/src/mongo/db/operation_cpu_timer.h index 1ac0f5c93cd29..b60b01473264d 100644 --- a/src/mongo/db/operation_cpu_timer.h +++ b/src/mongo/db/operation_cpu_timer.h @@ -29,13 +29,16 @@ #pragma once +#include #include +#include #include "mongo/util/duration.h" namespace mongo { class OperationContext; + class OperationCPUTimer; /** diff --git a/src/mongo/db/operation_cpu_timer_test.cpp b/src/mongo/db/operation_cpu_timer_test.cpp index 54f2785a6ff18..8fe5bd17592cf 100644 --- a/src/mongo/db/operation_cpu_timer_test.cpp +++ b/src/mongo/db/operation_cpu_timer_test.cpp @@ -29,17 +29,29 @@ #include "mongo/db/operation_cpu_timer.h" + +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/client.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/atomic_word.h" -#include "mongo/stdx/chrono.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" -#include "mongo/stdx/mutex.h" #include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -105,10 +117,14 @@ TEST_F(OperationCPUTimerTest, TestReset) { auto timer = makeTimer(); timer->start(); - busyWait(Milliseconds(1)); // Introducing some delay for the timer to measure. + busyWait(Milliseconds(2)); // Introducing some delay for the timer to measure. timer->stop(); auto elapsedAfterStop = timer->getElapsed(); - ASSERT_GTE(elapsedAfterStop, Milliseconds(1)); + // Due to inconsistencies between the CPU time-based clock used in the timer and the + // clock used in busyWait, the elapsed CPU time is sometimes observed as being less than the + // time spent busy waiting. To account for that, only assert that any amount of CPU + // time has elapsed, even though the thread was supposed to have busy-waited for 2ms. + ASSERT_GT(elapsedAfterStop, Nanoseconds(0)); timer->start(); auto elapsedAfterReset = timer->getElapsed(); diff --git a/src/mongo/db/operation_id.cpp b/src/mongo/db/operation_id.cpp index 3184fc44d2ee4..653241b0eb279 100644 --- a/src/mongo/db/operation_id.cpp +++ b/src/mongo/db/operation_id.cpp @@ -29,6 +29,10 @@ #include "mongo/db/operation_id.h" +#include + +#include + #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/operation_id.h b/src/mongo/db/operation_id.h index 44c2a28663da2..32612b50bfbe4 100644 --- a/src/mongo/db/operation_id.h +++ b/src/mongo/db/operation_id.h @@ -29,6 +29,12 @@ #pragma once +#include +#include +#include + +#include + #include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_set.h" diff --git a/src/mongo/db/operation_id_test.cpp b/src/mongo/db/operation_id_test.cpp index ff27050e26ede..1a2dd17ff94df 100644 --- a/src/mongo/db/operation_id_test.cpp +++ b/src/mongo/db/operation_id_test.cpp @@ -28,13 +28,14 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/string_data.h" #include "mongo/db/operation_id.h" -#include "mongo/logv2/log.h" -#include "mongo/platform/mutex.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/operation_key_manager.cpp b/src/mongo/db/operation_key_manager.cpp index 57bdde47ae93f..ee8941942e1da 100644 --- a/src/mongo/db/operation_key_manager.cpp +++ b/src/mongo/db/operation_key_manager.cpp @@ -28,15 +28,21 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/operation_key_manager.h" - +#include +#include +#include #include +#include + +#include #include "mongo/base/error_codes.h" +#include "mongo/db/operation_key_manager.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/db/operation_key_manager.h b/src/mongo/db/operation_key_manager.h index 865b618f5ed5f..b2870e8909b00 100644 --- a/src/mongo/db/operation_key_manager.h +++ b/src/mongo/db/operation_key_manager.h @@ -29,8 +29,13 @@ #pragma once +#include + +#include + #include "mongo/db/client.h" #include "mongo/db/operation_context.h" +#include "mongo/db/operation_id.h" #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_map.h" diff --git a/src/mongo/db/operation_killer.cpp b/src/mongo/db/operation_killer.cpp index a49359a341710..ab90923a90e24 100644 --- a/src/mongo/db/operation_killer.cpp +++ b/src/mongo/db/operation_killer.cpp @@ -28,15 +28,18 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/operation_killer.h" - -#include "mongo/db/audit.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/client.h" #include "mongo/db/operation_key_manager.h" +#include "mongo/db/operation_killer.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -50,12 +53,8 @@ OperationKiller::OperationKiller(Client* myClient) : _myClient(myClient) { bool OperationKiller::isGenerallyAuthorizedToKill() const { AuthorizationSession* authzSession = AuthorizationSession::get(_myClient); - if (authzSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::killop)) { - return true; - } - - return false; + return authzSession->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(authzSession->getUserTenantId()), ActionType::killop); } bool OperationKiller::isAuthorizedToKill(const LockedClient& target) const { diff --git a/src/mongo/db/operation_killer.h b/src/mongo/db/operation_killer.h index d9856a8bcea44..829a5219f4b12 100644 --- a/src/mongo/db/operation_killer.h +++ b/src/mongo/db/operation_killer.h @@ -31,6 +31,7 @@ #include "mongo/db/client.h" #include "mongo/db/operation_context.h" +#include "mongo/db/operation_id.h" #include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/operation_time_tracker.cpp b/src/mongo/db/operation_time_tracker.cpp index 2d45b49747c43..dfadea6b0fa81 100644 --- a/src/mongo/db/operation_time_tracker.cpp +++ b/src/mongo/db/operation_time_tracker.cpp @@ -27,10 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include #include "mongo/db/operation_time_tracker.h" #include "mongo/platform/mutex.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/db/operation_time_tracker.h b/src/mongo/db/operation_time_tracker.h index a259ee22d3766..02fd648a7f958 100644 --- a/src/mongo/db/operation_time_tracker.h +++ b/src/mongo/db/operation_time_tracker.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/operation_time_tracker_test.cpp b/src/mongo/db/operation_time_tracker_test.cpp index 6ed16feba58f6..1ec6320a8d709 100644 --- a/src/mongo/db/operation_time_tracker_test.cpp +++ b/src/mongo/db/operation_time_tracker_test.cpp @@ -27,11 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/logical_time.h" #include "mongo/db/operation_time_tracker.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/ops/SConscript b/src/mongo/db/ops/SConscript index cc820e1d6d504..a3aa548850f6d 100644 --- a/src/mongo/db/ops/SConscript +++ b/src/mongo/db/ops/SConscript @@ -76,6 +76,8 @@ env.Library( '$BUILD_DIR/mongo/db/timeseries/timeseries_stats', '$BUILD_DIR/mongo/db/timeseries/timeseries_write_util', '$BUILD_DIR/mongo/db/transaction/transaction', + '$BUILD_DIR/mongo/db/transaction/transaction_api', + '$BUILD_DIR/mongo/executor/inline_executor', '$BUILD_DIR/mongo/s/query_analysis_sampler', '$BUILD_DIR/mongo/util/fail_point', '$BUILD_DIR/mongo/util/log_and_backoff', @@ -128,8 +130,11 @@ env.Library( env.Library( target='parsed_update', - source='parsed_update.cpp', + source='parsed_update_base.cpp', LIBDEPS=[ + '$BUILD_DIR/mongo/db/disk_use_options_idl', + '$BUILD_DIR/mongo/db/shard_role_api', + '$BUILD_DIR/mongo/db/timeseries/timeseries_conversion_util', '$BUILD_DIR/mongo/db/update/update_driver', 'parsed_update_array_filters', ], @@ -162,7 +167,9 @@ env.CppUnitTest( env.CppIntegrationTest( target='db_ops_integration_test', - source='write_ops_document_stream_integration_test.cpp', + source=[ + 'write_ops_document_stream_integration_test.cpp', + ], LIBDEPS=[ '$BUILD_DIR/mongo/client/clientdriver_network', '$BUILD_DIR/mongo/transport/transport_layer_egress_init', diff --git a/src/mongo/db/ops/delete.cpp b/src/mongo/db/ops/delete.cpp index e7dffaeaa52f2..d250c00464e5e 100644 --- a/src/mongo/db/ops/delete.cpp +++ b/src/mongo/db/ops/delete.cpp @@ -27,48 +27,52 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/ops/delete.h" +#include +#include -#include "mongo/db/catalog/database.h" +#include "mongo/db/curop.h" +#include "mongo/db/ops/delete.h" #include "mongo/db/ops/parsed_delete.h" #include "mongo/db/query/get_executor.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/shard_role.h" +#include "mongo/util/assert_util.h" namespace mongo { long long deleteObjects(OperationContext* opCtx, - const CollectionPtr& collection, - const NamespaceString& ns, + const CollectionAcquisition& collection, BSONObj pattern, bool justOne, bool god, bool fromMigrate) { auto request = DeleteRequest{}; - request.setNsString(ns); + request.setNsString(collection.nss()); request.setQuery(pattern); request.setMulti(!justOne); request.setGod(god); request.setFromMigrate(fromMigrate); - ParsedDelete parsedDelete(opCtx, &request); + ParsedDelete parsedDelete(opCtx, &request, collection.getCollectionPtr()); uassertStatusOK(parsedDelete.parseRequest()); auto exec = uassertStatusOK(getExecutorDelete( - &CurOp::get(opCtx)->debug(), &collection, &parsedDelete, boost::none /* verbosity */)); + &CurOp::get(opCtx)->debug(), collection, &parsedDelete, boost::none /* verbosity */)); return exec->executeDelete(); } DeleteResult deleteObject(OperationContext* opCtx, - const CollectionPtr& collection, + const CollectionAcquisition& collection, const DeleteRequest& request) { - ParsedDelete parsedDelete(opCtx, &request); + ParsedDelete parsedDelete(opCtx, &request, collection.getCollectionPtr()); uassertStatusOK(parsedDelete.parseRequest()); auto exec = uassertStatusOK(getExecutorDelete( - &CurOp::get(opCtx)->debug(), &collection, &parsedDelete, boost::none /* verbosity */)); + &CurOp::get(opCtx)->debug(), collection, &parsedDelete, boost::none /* verbosity */)); if (!request.getReturnDeleted()) { return {exec->executeDelete(), boost::none}; diff --git a/src/mongo/db/ops/delete.h b/src/mongo/db/ops/delete.h index 79a35a3879b8d..513c1976e0688 100644 --- a/src/mongo/db/ops/delete.h +++ b/src/mongo/db/ops/delete.h @@ -29,7 +29,12 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/delete_request_gen.h" #include "mongo/db/query/plan_executor.h" @@ -37,6 +42,7 @@ namespace mongo { class Database; class OperationContext; +class CollectionAcquisition; /** * Deletes objects from 'collection' that match the query predicate given by 'pattern'. If 'justOne' @@ -44,8 +50,7 @@ class OperationContext; * not yield. If 'god' is true, deletes are allowed on system namespaces. */ long long deleteObjects(OperationContext* opCtx, - const CollectionPtr& collection, - const NamespaceString& ns, + const CollectionAcquisition& collection, BSONObj pattern, bool justOne, bool god = false, @@ -57,7 +62,7 @@ struct DeleteResult { }; DeleteResult deleteObject(OperationContext* opCtx, - const CollectionPtr& collection, + const CollectionAcquisition& collection, const DeleteRequest& request); } // namespace mongo diff --git a/src/mongo/db/ops/insert.cpp b/src/mongo/db/ops/insert.cpp index 3caaf3516688b..0d1705a6c3ba1 100644 --- a/src/mongo/db/ops/insert.cpp +++ b/src/mongo/db/ops/insert.cpp @@ -29,17 +29,30 @@ #include "mongo/db/ops/insert.h" +#include +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_depth.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/catalog/document_validation.h" -#include "mongo/db/query/dbref.h" -#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/update/storage_validation.h" #include "mongo/db/vector_clock_mutable.h" -#include "mongo/util/fail_point.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/decorable.h" #include "mongo/util/str.h" namespace mongo { @@ -185,7 +198,8 @@ Status userAllowedWriteNS(OperationContext* opCtx, const NamespaceString& ns) { if (ns.isSystemDotProfile() || ns.isSystemDotViews() || (ns.isOplog() && repl::ReplicationCoordinator::get(getGlobalServiceContext())->isReplEnabled())) { - return Status(ErrorCodes::InvalidNamespace, str::stream() << "cannot write to " << ns); + return Status(ErrorCodes::InvalidNamespace, + str::stream() << "cannot write to " << ns.toStringForErrorMsg()); } return userAllowedCreateNS(opCtx, ns); } @@ -198,7 +212,8 @@ Status userAllowedCreateNS(OperationContext* opCtx, const NamespaceString& ns) { } if (!ns.isValid(NamespaceString::DollarInDbNameBehavior::Disallow)) { - return Status(ErrorCodes::InvalidNamespace, str::stream() << "Invalid namespace: " << ns); + return Status(ErrorCodes::InvalidNamespace, + str::stream() << "Invalid namespace: " << ns.toStringForErrorMsg()); } if (!NamespaceString::validCollectionName(ns.coll())) { @@ -206,25 +221,19 @@ Status userAllowedCreateNS(OperationContext* opCtx, const NamespaceString& ns) { str::stream() << "Invalid collection name: " << ns.coll()); } - if (serverGlobalParams.clusterRole.exclusivelyHasConfigRole() && !ns.isOnInternalDb()) { - return Status(ErrorCodes::InvalidNamespace, - str::stream() - << "Can't create user databases on a dedicated --configsvr instance " - << ns); - } - if (ns.isSystemDotProfile()) { return Status::OK(); } if (ns.isSystem() && !ns.isLegalClientSystemNS(serverGlobalParams.featureCompatibility)) { return Status(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid system namespace: " << ns); + str::stream() << "Invalid system namespace: " << ns.toStringForErrorMsg()); } if (ns.isNormalCollection() && ns.size() > NamespaceString::MaxNsCollectionLen) { return Status(ErrorCodes::InvalidNamespace, - str::stream() << "Fully qualified namespace is too long. Namespace: " << ns + str::stream() << "Fully qualified namespace is too long. Namespace: " + << ns.toStringForErrorMsg() << " Max: " << NamespaceString::MaxNsCollectionLen); } @@ -243,7 +252,8 @@ Status userAllowedCreateNS(OperationContext* opCtx, const NamespaceString& ns) { return Status::OK(); } - return Status(ErrorCodes::BadValue, str::stream() << "Invalid namespace: " << ns); + return Status(ErrorCodes::BadValue, + str::stream() << "Invalid namespace: " << ns.toStringForErrorMsg()); } return Status::OK(); diff --git a/src/mongo/db/ops/insert.h b/src/mongo/db/ops/insert.h index faed6de589022..5e775cfa51cdc 100644 --- a/src/mongo/db/ops/insert.h +++ b/src/mongo/db/ops/insert.h @@ -27,6 +27,9 @@ * it in the license file. */ +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" diff --git a/src/mongo/db/ops/parsed_delete.cpp b/src/mongo/db/ops/parsed_delete.cpp index 0cd67bbedec8a..490c176c6c1f5 100644 --- a/src/mongo/db/ops/parsed_delete.cpp +++ b/src/mongo/db/ops/parsed_delete.cpp @@ -27,43 +27,49 @@ * it in the license file. */ +#include "mongo/db/ops/parsed_delete.h" -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/ops/parsed_delete.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/collection_operation_source.h" -#include "mongo/db/catalog/database.h" -#include "mongo/db/matcher/expression_algo.h" -#include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/matcher/extensions_callback_real.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/ops/delete_request_gen.h" +#include "mongo/db/ops/parsed_writes_common.h" #include "mongo/db/query/canonical_query.h" -#include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/query/get_executor.h" -#include "mongo/db/query/query_planner_common.h" -#include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/server_options.h" +#include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_update_delete_util.h" #include "mongo/util/assert_util.h" -#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite namespace mongo { +// Note: The caller should hold a lock on the 'collection' if it really exists so that it can stay +// alive until the end of the ParsedDelete's lifetime. ParsedDelete::ParsedDelete(OperationContext* opCtx, const DeleteRequest* request, - boost::optional timeseriesOptions) - : _opCtx(opCtx), _request(request) { - if (feature_flags::gTimeseriesDeletesSupport.isEnabled( - serverGlobalParams.featureCompatibility) && - timeseriesOptions) { - _timeseriesDeleteDetails = std::make_unique(*timeseriesOptions); - } -} + const CollectionPtr& collection, + bool isTimeseriesDelete) + : _opCtx(opCtx), + _request(request), + _collection(collection), + _timeseriesDeleteQueryExprs(isTimeseriesDelete + ? createTimeseriesWritesQueryExprsIfNecessary( + feature_flags::gTimeseriesDeletesSupport.isEnabled( + serverGlobalParams.featureCompatibility), + collection) + : nullptr), + _isRequestToTimeseries(isTimeseriesDelete) {} Status ParsedDelete::parseRequest() { dassert(!_canonicalQuery.get()); @@ -75,136 +81,58 @@ Status ParsedDelete::parseRequest() { // DeleteStage would not return the deleted document. invariant(_request->getProj().isEmpty() || _request->getReturnDeleted()); - std::unique_ptr collator(nullptr); - if (!_request->getCollation().isEmpty()) { - auto statusWithCollator = CollatorFactoryInterface::get(_opCtx->getServiceContext()) - ->makeFromBSON(_request->getCollation()); - - if (!statusWithCollator.isOK()) { - return statusWithCollator.getStatus(); - } - collator = uassertStatusOK(std::move(statusWithCollator)); - } + auto [collatorToUse, collationMatchesDefault] = + resolveCollator(_opCtx, _request->getCollation(), _collection); _expCtx = make_intrusive(_opCtx, - std::move(collator), + std::move(collatorToUse), _request->getNsString(), _request->getLegacyRuntimeConstants(), _request->getLet()); + _expCtx->collationMatchesDefault = collationMatchesDefault; // The '_id' field of a time-series collection needs to be handled as other fields. - if (CanonicalQuery::isSimpleIdQuery(_request->getQuery()) && !_timeseriesDeleteDetails) { + if (CanonicalQuery::isSimpleIdQuery(_request->getQuery()) && !_timeseriesDeleteQueryExprs) { return Status::OK(); } _expCtx->startExpressionCounters(); - return parseQueryToCQ(); -} -Status ParsedDelete::splitOutBucketMatchExpression(const ExtensionsCallback& extensionsCallback) { - tassert(7307300, - "Can split out the bucket-level match expression only for timeseries deletes", - _timeseriesDeleteDetails); - - auto& details = _timeseriesDeleteDetails; - const auto& timeseriesOptions = details->_timeseriesOptions; - - auto parseDeleteQuery = [&](const BSONObj deleteQuery) { - return MatchExpressionParser::parse(deleteQuery, - _expCtx, - extensionsCallback, - MatchExpressionParser::kAllowAllSpecialFeatures); - }; - - auto swMatchExpr = parseDeleteQuery(_request->getQuery()); - if (!swMatchExpr.isOK()) { - return swMatchExpr.getStatus(); - } - - if (auto optMetaField = timeseriesOptions.getMetaField()) { - auto metaField = optMetaField->toString(); - std::tie(details->_bucketExpr, details->_residualExpr) = expression::splitMatchExpressionBy( - std::move(swMatchExpr.getValue()), - {metaField}, - {{metaField, timeseries::kBucketMetaFieldName.toString()}}, - expression::isOnlyDependentOn); - details->_bucketExpr = - timeseries::getBucketLevelPredicateForWrites(std::move(details->_bucketExpr)); - } else if (_request->getMulti() && _request->getQuery().isEmpty()) { - // Special optimization: if the delete query for multi delete is empty, we don't set - // the residual filter. Otherwise, the non-null empty residual filter leads to the TS_MODIFY - // plan which is ineffective since it would unpack every bucket. Instead, we set the bucket - // filter to be one on "control.closed" so that we don't delete closed buckets. - details->_bucketExpr = timeseries::getBucketLevelPredicateForWrites(); - } else { - // The '_residualExpr' becomes the same as the original query predicate because nothing is - // to be split out if there is no meta field in the timeseries collection. - details->_residualExpr = std::move(swMatchExpr.getValue()); - details->_bucketExpr = timeseries::getBucketLevelPredicateForWrites(); + if (auto&& queryExprs = _timeseriesDeleteQueryExprs) { + // TODO: Due to the complexity which is related to the efficient sort support, we don't + // support yet findAndModify with a query and sort but it should not be impossible. This + // code assumes that in findAndModify code path, the parsed delete constructor should be + // called with isTimeseriesDelete = true for a time-series collection. + uassert(ErrorCodes::InvalidOptions, + "Cannot perform a findAndModify with a query and sort on a time-series collection.", + _request->getMulti() || _request->getSort().isEmpty()); + + // If we're deleting documents from a time-series collection, splits the match expression + // into a bucket-level match expression and a residual expression so that we can push down + // the bucket-level match expression to the system bucket collection SCAN or FETCH/IXSCAN. + *_timeseriesDeleteQueryExprs = timeseries::getMatchExprsForWrites( + _expCtx, *_collection->getTimeseriesOptions(), _request->getQuery()); + + // At this point, we parsed user-provided match expression. After this point, the new + // canonical query is internal to the bucket SCAN or FETCH/IXSCAN and will have additional + // internal match expression. We do not need to track the internal match expression counters + // and so we stop the counters. + _expCtx->stopExpressionCounters(); + + // At least, the bucket-level filter must contain the closed bucket filter. + tassert(7542400, "Bucket-level filter must not be null", queryExprs->_bucketExpr); } - return Status::OK(); + return parseQueryToCQ(); } Status ParsedDelete::parseQueryToCQ() { dassert(!_canonicalQuery.get()); - const ExtensionsCallbackReal extensionsCallback(_opCtx, &_request->getNsString()); - - // If we're deleting documents from a time-series collection, splits the match expression into - // a bucket-level match expression and a residual expression so that we can push down the - // bucket-level match expression to the system bucket collection scan or fetch. - if (_timeseriesDeleteDetails) { - if (auto status = splitOutBucketMatchExpression(extensionsCallback); !status.isOK()) { - return status; - } - } - - // The projection needs to be applied after the delete operation, so we do not specify a - // projection during canonicalization. - auto findCommand = std::make_unique(_request->getNsString()); - if (_timeseriesDeleteDetails) { - // Only sets the filter if the query predicate has bucket match components. - if (_timeseriesDeleteDetails->_bucketExpr) { - findCommand->setFilter(_timeseriesDeleteDetails->_bucketExpr->serialize().getOwned()); - } - } else { - findCommand->setFilter(_request->getQuery().getOwned()); - } - findCommand->setSort(_request->getSort().getOwned()); - findCommand->setCollation(_request->getCollation().getOwned()); - findCommand->setHint(_request->getHint()); - - // Limit should only used for the findAndModify command when a sort is specified. If a sort - // is requested, we want to use a top-k sort for efficiency reasons, so should pass the - // limit through. Generally, a delete stage expects to be able to skip documents that were - // deleted out from under it, but a limit could inhibit that and give an EOF when the delete - // has not actually deleted a document. This behavior is fine for findAndModify, but should - // not apply to deletes in general. - if (!_request->getMulti() && !_request->getSort().isEmpty()) { - // TODO: Due to the complexity which is related to the efficient sort support, we don't - // support yet findAndModify with a query and sort but it should not be impossible. - // This code assumes that in findAndModify code path, the parsed delete constructor should - // be called with source == kTimeseriesDelete for a time-series collection. - uassert(ErrorCodes::InvalidOptions, - "Cannot perform a findAndModify with a query and sort on a time-series collection.", - !_timeseriesDeleteDetails); - findCommand->setLimit(1); - } - - // If the delete request has runtime constants or let parameters attached to it, pass them to - // the FindCommandRequest. - if (auto& runtimeConstants = _request->getLegacyRuntimeConstants()) - findCommand->setLegacyRuntimeConstants(*runtimeConstants); - if (auto& letParams = _request->getLet()) - findCommand->setLet(*letParams); - - auto statusWithCQ = - CanonicalQuery::canonicalize(_opCtx, - std::move(findCommand), - _request->getIsExplain(), - _expCtx, - extensionsCallback, - MatchExpressionParser::kAllowAllSpecialFeatures); + auto statusWithCQ = mongo::parseWriteQueryToCQ( + _expCtx->opCtx, + _expCtx.get(), + *_request, + _timeseriesDeleteQueryExprs ? _timeseriesDeleteQueryExprs->_bucketExpr.get() : nullptr); if (statusWithCQ.isOK()) { _canonicalQuery = std::move(statusWithCQ.getValue()); @@ -230,19 +158,8 @@ std::unique_ptr ParsedDelete::releaseParsedQuery() { return std::move(_canonicalQuery); } -void ParsedDelete::setCollator(std::unique_ptr collator) { - if (_timeseriesDeleteDetails && _timeseriesDeleteDetails->_residualExpr) { - _timeseriesDeleteDetails->_residualExpr->setCollator(collator.get()); - } - if (_canonicalQuery) { - _canonicalQuery->setCollator(std::move(collator)); - } else { - _expCtx->setCollator(std::move(collator)); - } -} - bool ParsedDelete::isEligibleForArbitraryTimeseriesDelete() const { - return _timeseriesDeleteDetails && (getResidualExpr() || !_request->getMulti()); + return _timeseriesDeleteQueryExprs && (getResidualExpr() || !_request->getMulti()); } } // namespace mongo diff --git a/src/mongo/db/ops/parsed_delete.h b/src/mongo/db/ops/parsed_delete.h index 7d3b3df0d029b..89afc48526db2 100644 --- a/src/mongo/db/ops/parsed_delete.h +++ b/src/mongo/db/ops/parsed_delete.h @@ -29,12 +29,25 @@ #pragma once +#include +#include #include +#include +#include #include "mongo/base/status.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_operation_source.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/parsed_writes_common.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -54,8 +67,6 @@ class OperationContext; * * A delete request is parsed to a CanonicalQuery, so this class is a thin, delete-specific * wrapper around canonicalization. - * - * No locks need to be held during parsing. */ class ParsedDelete { ParsedDelete(const ParsedDelete&) = delete; @@ -63,24 +74,15 @@ class ParsedDelete { public: /** - * Constructs a parsed delete for a regular delete which does not involve a time-series - * collection. - * - * The object pointed to by "request" must stay in scope for the life of the constructed - * ParsedDelete. - */ - ParsedDelete(OperationContext* opCtx, const DeleteRequest* request) - : ParsedDelete(opCtx, request, boost::none) {} - - /** - * Constructs a parsed delete which may involve a time-series collection. + * Constructs a parsed delete for a regular delete or a delete on a timeseries collection. * * The object pointed to by "request" must stay in scope for the life of the constructed * ParsedDelete. */ ParsedDelete(OperationContext* opCtx, const DeleteRequest* request, - boost::optional timeseriesOptions); + const CollectionPtr& collection, + bool isTimeseriesDelete = false); /** * Parses the delete request to a canonical query. On success, the parsed delete can be @@ -131,15 +133,14 @@ class ParsedDelete { return _expCtx; } - void setCollator(std::unique_ptr collator); - /** * Returns the non-modifiable residual MatchExpression. * * Note: see _timeseriesDeleteDetails._residualExpr for more details. */ const MatchExpression* getResidualExpr() const { - return _timeseriesDeleteDetails ? _timeseriesDeleteDetails->_residualExpr.get() : nullptr; + return _timeseriesDeleteQueryExprs ? _timeseriesDeleteQueryExprs->_residualExpr.get() + : nullptr; } /** @@ -148,8 +149,8 @@ class ParsedDelete { * Note: see _timeseriesDeleteDetails._bucketMatchExpr for more details. */ std::unique_ptr releaseResidualExpr() { - return _timeseriesDeleteDetails ? std::move(_timeseriesDeleteDetails->_residualExpr) - : nullptr; + return _timeseriesDeleteQueryExprs ? std::move(_timeseriesDeleteQueryExprs->_residualExpr) + : nullptr; } /** @@ -158,6 +159,10 @@ class ParsedDelete { */ bool isEligibleForArbitraryTimeseriesDelete() const; + bool isRequestToTimeseries() const { + return _isRequestToTimeseries; + } + private: // Transactional context. Not owned by us. OperationContext* _opCtx; @@ -170,25 +175,12 @@ class ParsedDelete { boost::intrusive_ptr _expCtx; - Status splitOutBucketMatchExpression(const ExtensionsCallback& extensionsCallback); - - // Time-series deletes take some special handling to make sure we delete the buckets collection, - // but interact with the documents as if they were unpacked. - struct TimeseriesDeleteDetails { - TimeseriesDeleteDetails(const TimeseriesOptions& timeseriesOptions) - : _timeseriesOptions(timeseriesOptions) {} - - TimeseriesOptions _timeseriesOptions; - - // The bucket-level match expressions. - std::unique_ptr _bucketExpr = nullptr; - - // The residual expression after splitting out metaField-dependent splittable match - // expressions. - std::unique_ptr _residualExpr = nullptr; - }; + const CollectionPtr& _collection; + // Contains the bucket-level expression and the residual expression and the bucket-level + // expresion should be pushed down to the bucket collection. + std::unique_ptr _timeseriesDeleteQueryExprs; - std::unique_ptr _timeseriesDeleteDetails = nullptr; + const bool _isRequestToTimeseries; }; } // namespace mongo diff --git a/src/mongo/db/ops/parsed_update.cpp b/src/mongo/db/ops/parsed_update.cpp index b9557e4b2fe5a..f8d59cf4c90a7 100644 --- a/src/mongo/db/ops/parsed_update.cpp +++ b/src/mongo/db/ops/parsed_update.cpp @@ -1,5 +1,5 @@ /** - * Copyright (C) 2018-present MongoDB, Inc. + * Copyright (C) 2023-present MongoDB, Inc. * * This program is free software: you can redistribute it and/or modify * it under the terms of the Server Side Public License, version 1, @@ -29,213 +29,21 @@ #include "mongo/db/ops/parsed_update.h" -#include "mongo/db/ops/parsed_update_array_filters.h" #include "mongo/db/ops/update_request.h" -#include "mongo/db/ops/write_ops_gen.h" -#include "mongo/db/query/canonical_query.h" -#include "mongo/db/query/collation/collator_factory_interface.h" namespace mongo { ParsedUpdate::ParsedUpdate(OperationContext* opCtx, const UpdateRequest* request, - const ExtensionsCallback& extensionsCallback, - bool forgoOpCounterIncrements) - : _opCtx(opCtx), - _request(request), - _expCtx(make_intrusive( + const CollectionPtr& collection, + bool forgoOpCounterIncrements, + bool isRequestToTimeseries) + : ParsedUpdateBase( opCtx, - nullptr, - _request->getNamespaceString(), - _request->getLegacyRuntimeConstants(), - _request->getLetParameters(), - true, // mayDbProfile. We pass 'true' here conservatively. In the future we may - // change this. - request->explain())), - _driver(_expCtx), - _canonicalQuery(), - _extensionsCallback(extensionsCallback) { - if (forgoOpCounterIncrements) { - _expCtx->enabledCounters = false; - } -} - -Status ParsedUpdate::parseRequest() { - // It is invalid to request that the UpdateStage return the prior or newly-updated version - // of a document during a multi-update. - invariant(!(_request->shouldReturnAnyDocs() && _request->isMulti())); - - // It is invalid to specify 'upsertSupplied:true' for a non-upsert operation, or if no upsert - // document was supplied with the request. - if (_request->shouldUpsertSuppliedDocument()) { - uassert(ErrorCodes::FailedToParse, - str::stream() << "cannot specify '" - << write_ops::UpdateOpEntry::kUpsertSuppliedFieldName - << ": true' for a non-upsert operation", - _request->isUpsert()); - const auto& constants = _request->getUpdateConstants(); - uassert(ErrorCodes::FailedToParse, - str::stream() << "the parameter '" - << write_ops::UpdateOpEntry::kUpsertSuppliedFieldName - << "' is set to 'true', but no document was supplied", - constants && (*constants)["new"_sd].type() == BSONType::Object); - } - - // It is invalid to request that a ProjectionStage be applied to the UpdateStage if the - // UpdateStage would not return any document. - invariant(_request->getProj().isEmpty() || _request->shouldReturnAnyDocs()); - - if (!_request->getCollation().isEmpty()) { - auto collator = CollatorFactoryInterface::get(_opCtx->getServiceContext()) - ->makeFromBSON(_request->getCollation()); - if (!collator.isOK()) { - return collator.getStatus(); - } - _expCtx->setCollator(std::move(collator.getValue())); - } - - auto statusWithArrayFilters = parsedUpdateArrayFilters( - _expCtx, _request->getArrayFilters(), _request->getNamespaceString()); - if (!statusWithArrayFilters.isOK()) { - return statusWithArrayFilters.getStatus(); - } - _arrayFilters = std::move(statusWithArrayFilters.getValue()); - - // We parse the update portion before the query portion because the dispostion of the update - // may determine whether or not we need to produce a CanonicalQuery at all. For example, if - // the update involves the positional-dollar operator, we must have a CanonicalQuery even if - // it isn't required for query execution. - parseUpdate(); - Status status = parseQuery(); - if (!status.isOK()) - return status; - return Status::OK(); -} - -Status ParsedUpdate::parseQuery() { - dassert(!_canonicalQuery.get()); - - if (!_driver.needMatchDetails() && CanonicalQuery::isSimpleIdQuery(_request->getQuery())) { - return Status::OK(); - } - - return parseQueryToCQ(); -} - -Status ParsedUpdate::parseQueryToCQ() { - dassert(!_canonicalQuery.get()); - - // The projection needs to be applied after the update operation, so we do not specify a - // projection during canonicalization. - auto findCommand = std::make_unique(_request->getNamespaceString()); - findCommand->setFilter(_request->getQuery()); - findCommand->setSort(_request->getSort()); - findCommand->setHint(_request->getHint()); - - // We get the collation off the ExpressionContext because it may contain a collection-default - // collator if no collation was included in the user's request. - findCommand->setCollation(_expCtx->getCollatorBSON()); - - // Limit should only used for the findAndModify command when a sort is specified. If a sort - // is requested, we want to use a top-k sort for efficiency reasons, so should pass the - // limit through. Generally, a update stage expects to be able to skip documents that were - // deleted/modified under it, but a limit could inhibit that and give an EOF when the update - // has not actually updated a document. This behavior is fine for findAndModify, but should - // not apply to update in general. - if (!_request->isMulti() && !_request->getSort().isEmpty()) { - findCommand->setLimit(1); - } - - // $expr is not allowed in the query for an upsert, since it is not clear what the equality - // extraction behavior for $expr should be. - MatchExpressionParser::AllowedFeatureSet allowedMatcherFeatures = - MatchExpressionParser::kAllowAllSpecialFeatures; - if (_request->isUpsert()) { - allowedMatcherFeatures &= ~MatchExpressionParser::AllowedFeatures::kExpr; - } - - // If the update request has runtime constants or let parameters attached to it, pass them to - // the FindCommandRequest. - if (auto& runtimeConstants = _request->getLegacyRuntimeConstants()) { - findCommand->setLegacyRuntimeConstants(*runtimeConstants); - } - if (auto& letParams = _request->getLetParameters()) { - findCommand->setLet(*letParams); - } - - _expCtx->startExpressionCounters(); - auto statusWithCQ = CanonicalQuery::canonicalize(_opCtx, - std::move(findCommand), - static_cast(_request->explain()), - _expCtx, - _extensionsCallback, - allowedMatcherFeatures); - if (statusWithCQ.isOK()) { - _canonicalQuery = std::move(statusWithCQ.getValue()); - } - - if (statusWithCQ.getStatus().code() == ErrorCodes::QueryFeatureNotAllowed) { - // The default error message for disallowed $expr is not descriptive enough, so we rewrite - // it here. - return {ErrorCodes::QueryFeatureNotAllowed, - "$expr is not allowed in the query predicate for an upsert"}; - } - - return statusWithCQ.getStatus(); -} - -void ParsedUpdate::parseUpdate() { - _driver.setCollator(_expCtx->getCollator()); - _driver.setLogOp(true); - _driver.setFromOplogApplication(_request->isFromOplogApplication()); - // Time-series operations will not result in any documents with dots or dollars fields. - if (auto source = _request->source(); source == OperationSource::kTimeseriesInsert || - source == OperationSource::kTimeseriesUpdate) { - _driver.setSkipDotsDollarsCheck(true); - } - _expCtx->isParsingPipelineUpdate = true; - _driver.parse(_request->getUpdateModification(), - _arrayFilters, - _request->getUpdateConstants(), - _request->isMulti()); - _expCtx->isParsingPipelineUpdate = false; -} - -PlanYieldPolicy::YieldPolicy ParsedUpdate::yieldPolicy() const { - return _request->isGod() ? PlanYieldPolicy::YieldPolicy::NO_YIELD : _request->getYieldPolicy(); -} - -bool ParsedUpdate::hasParsedQuery() const { - return _canonicalQuery.get() != nullptr; -} - -std::unique_ptr ParsedUpdate::releaseParsedQuery() { - invariant(_canonicalQuery.get() != nullptr); - return std::move(_canonicalQuery); -} - -const UpdateRequest* ParsedUpdate::getRequest() const { - return _request; -} - -UpdateDriver* ParsedUpdate::getDriver() { - return &_driver; -} - -void ParsedUpdate::setCollator(std::unique_ptr collator) { - auto* rawCollator = collator.get(); - - if (_canonicalQuery) { - _canonicalQuery->setCollator(std::move(collator)); - } else { - _expCtx->setCollator(std::move(collator)); - } - - _driver.setCollator(rawCollator); - - for (auto&& arrayFilter : _arrayFilters) { - arrayFilter.second->getFilter()->setCollator(rawCollator); - } -} + request, + impl::makeExtensionsCallback(opCtx, &request->getNsString()), + collection, + forgoOpCounterIncrements, + isRequestToTimeseries) {} } // namespace mongo diff --git a/src/mongo/db/ops/parsed_update.h b/src/mongo/db/ops/parsed_update.h index 40e3a279712b0..360c85f813ff0 100644 --- a/src/mongo/db/ops/parsed_update.h +++ b/src/mongo/db/ops/parsed_update.h @@ -29,47 +29,67 @@ #pragma once +#include +#include +#include +#include +#include +#include + #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_with_placeholder.h" +#include "mongo/db/matcher/extensions_callback.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/matcher/extensions_callback_real.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/parsed_writes_common.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/update/update_driver.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { class CanonicalQuery; +class ExtensionsCallbackNoop; +class ExtensionsCallbackReal; class OperationContext; class UpdateRequest; +namespace impl { + /** - * This class takes a pointer to an UpdateRequest, and converts that request into a parsed form - * via the parseRequest() method. A ParsedUpdate can then be used to retrieve a PlanExecutor - * capable of executing the update. - * - * It is invalid to request that the UpdateStage return the prior or newly-updated version of a - * document during a multi-update. It is also invalid to request that a ProjectionStage be - * applied to the UpdateStage if the UpdateStage would not return any document. + * Note: this class is the base class for ParsedUpdate and ParsedUpdateForMongos. Their only + * difference is that ParsedUpdateForMongos uses the ExtensionsCallbackNoop and on the other hand, + * ParsedUpdate uses ExtensionsCallbackReal. The reason for this is that ExtensionsCallbackReal is + * available only on the mongod. This difference does not need to be exposed through the interface + * and can be hidden in the implementation. * - * No locks need to be held during parsing. - * - * The query part of the update is parsed to a CanonicalQuery, and the update part is parsed - * using the UpdateDriver. */ -class ParsedUpdate { - ParsedUpdate(const ParsedUpdate&) = delete; - ParsedUpdate& operator=(const ParsedUpdate&) = delete; +class ParsedUpdateBase { + ParsedUpdateBase(const ParsedUpdateBase&) = delete; + ParsedUpdateBase& operator=(const ParsedUpdateBase&) = delete; public: /** * Constructs a parsed update. * - * The objects pointed to by "request" and "extensionsCallback" must stay in scope for the life - * of the constructed ParsedUpdate. + * The objects pointed to by "request" must stay in scope for the life of the constructed + * ParsedUpdate. */ - ParsedUpdate(OperationContext* opCtx, - const UpdateRequest* request, - const ExtensionsCallback& extensionsCallback, - bool forgoOpCounterIncrements = false); + ParsedUpdateBase(OperationContext* opCtx, + const UpdateRequest* request, + std::unique_ptr extensionsCallback, + const CollectionPtr& collection, + bool forgoOpCounterIncrements = false, + bool isRequestToTimeseries = false); /** * Parses the update request to a canonical query and an update driver. On success, the @@ -120,18 +140,37 @@ class ParsedUpdate { std::unique_ptr releaseParsedQuery(); /** - * Sets this ParsedUpdate's collator. + * Never returns nullptr. + */ + boost::intrusive_ptr expCtx() const { + return _expCtx; + } + + /** + * Releases the ownership of the residual MatchExpression. * - * This setter can be used to override the collator that was created from the update request - * during ParsedUpdate construction. + * Note: see _timeseriesUpdateQueryExprs._bucketMatchExpr for more details. */ - void setCollator(std::unique_ptr collator); + std::unique_ptr releaseResidualExpr() { + return _timeseriesUpdateQueryExprs ? std::move(_timeseriesUpdateQueryExprs->_residualExpr) + : nullptr; + } /** - * Never returns nullptr. + * Releases the ownership of the original MatchExpression. */ - boost::intrusive_ptr expCtx() const { - return _expCtx; + std::unique_ptr releaseOriginalExpr() { + return std::move(_originalExpr); + } + + /** + * Returns true when we are performing multi updates using a residual predicate on a time-series + * collection or when performing singleton updates on a time-series collection. + */ + bool isEligibleForArbitraryTimeseriesUpdate() const; + + bool isRequestToTimeseries() const { + return _isRequestToTimeseries; } private: @@ -145,6 +184,12 @@ class ParsedUpdate { */ void parseUpdate(); + /** + * Handles splitting and/or translating the timeseries query predicate, if applicable. Must be + * called before parsing the query and update. + */ + Status maybeTranslateTimeseriesUpdate(); + // Unowned pointer to the transactional context. OperationContext* _opCtx; @@ -159,11 +204,78 @@ class ParsedUpdate { // Driver for processing updates on matched documents. UpdateDriver _driver; + // Requested update modifications on matched documents. + std::unique_ptr _modification; + // Parsed query object, or NULL if the query proves to be an id hack query. std::unique_ptr _canonicalQuery; // Reference to an extensions callback used when parsing to a canonical query. - const ExtensionsCallback& _extensionsCallback; + std::unique_ptr _extensionsCallback; + + // Reference to the collection this update is being performed on. + const CollectionPtr& _collection; + + // Contains the residual expression and the bucket-level expression that should be pushed down + // to the bucket collection. + std::unique_ptr _timeseriesUpdateQueryExprs; + + // The original, complete and untranslated write query expression. + std::unique_ptr _originalExpr = nullptr; + + const bool _isRequestToTimeseries; +}; + +template +requires std::is_same_v || std::is_same_v + std::unique_ptr makeExtensionsCallback(Ts&&... args) { + return std::make_unique(std::forward(args)...); +} + +} // namespace impl + +/** + * This class takes a pointer to an UpdateRequest, and converts that request into a parsed form + * via the parseRequest() method. A ParsedUpdate can then be used to get information about the + * update, or to retrieve an upsert document. + * + * No locks need to be held during parsing. + * + * The query part of the update is parsed to a CanonicalQuery, and the update part is parsed + * using the UpdateDriver. + * + * ParsedUpdateForMongos is a ParsedUpdate that can be used in mongos. + */ +class ParsedUpdateForMongos : public impl::ParsedUpdateBase { +public: + ParsedUpdateForMongos(OperationContext* opCtx, const UpdateRequest* request) + : ParsedUpdateBase(opCtx, + request, + impl::makeExtensionsCallback(), + CollectionPtr::null) {} +}; + +/** + * This class takes a pointer to an UpdateRequest, and converts that request into a parsed form + * via the parseRequest() method. A ParsedUpdate can then be used to retrieve a PlanExecutor + * capable of executing the update. + * + * It is invalid to request that the UpdateStage return the prior or newly-updated version of a + * document during a multi-update. It is also invalid to request that a ProjectionStage be + * applied to the UpdateStage if the UpdateStage would not return any document. + * + * The query part of the update is parsed to a CanonicalQuery, and the update part is parsed + * using the UpdateDriver. + * + * ParsedUpdate is a ParsedUpdate that can be used in mongod. + */ +class ParsedUpdate : public impl::ParsedUpdateBase { +public: + ParsedUpdate(OperationContext* opCtx, + const UpdateRequest* request, + const CollectionPtr& collection, + bool forgoOpCounterIncrements = false, + bool isRequestToTimeseries = false); }; } // namespace mongo diff --git a/src/mongo/db/ops/parsed_update_array_filters.cpp b/src/mongo/db/ops/parsed_update_array_filters.cpp index 9282fb8c64e5b..6547aef83f1c9 100644 --- a/src/mongo/db/ops/parsed_update_array_filters.cpp +++ b/src/mongo/db/ops/parsed_update_array_filters.cpp @@ -28,7 +28,19 @@ */ #include "mongo/db/ops/parsed_update_array_filters.h" + +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/ops/parsed_update_array_filters.h b/src/mongo/db/ops/parsed_update_array_filters.h index 2cbb249025d65..197ef8b6eb596 100644 --- a/src/mongo/db/ops/parsed_update_array_filters.h +++ b/src/mongo/db/ops/parsed_update_array_filters.h @@ -29,8 +29,18 @@ #pragma once +#include +#include +#include + +#include + #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/matcher/expression_with_placeholder.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" namespace mongo { diff --git a/src/mongo/db/ops/parsed_update_base.cpp b/src/mongo/db/ops/parsed_update_base.cpp new file mode 100644 index 0000000000000..e58bfe9f47fcb --- /dev/null +++ b/src/mongo/db/ops/parsed_update_base.cpp @@ -0,0 +1,319 @@ +/** + * Copyright (C) 2018-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_operation_source.h" +#include "mongo/db/exec/disk_use_options_gen.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/expression_with_placeholder.h" +#include "mongo/db/matcher/extensions_callback.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/parsed_update.h" +#include "mongo/db/ops/parsed_update_array_filters.h" +#include "mongo/db/ops/parsed_writes_common.h" +#include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/server_options.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/timeseries/timeseries_update_delete_util.h" +#include "mongo/db/update/update_driver.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" + +namespace mongo::impl { + +// Note: The caller should hold a lock on the 'collection' if it really exists so that it can stay +// alive until the end of the ParsedUpdate's lifetime. +ParsedUpdateBase::ParsedUpdateBase(OperationContext* opCtx, + const UpdateRequest* request, + std::unique_ptr extensionsCallback, + const CollectionPtr& collection, + bool forgoOpCounterIncrements, + bool isRequestToTimeseries) + : _opCtx(opCtx), + _request(request), + _expCtx(make_intrusive( + opCtx, + nullptr, + _request->getNamespaceString(), + _request->getLegacyRuntimeConstants(), + _request->getLetParameters(), + allowDiskUseByDefault.load(), // allowDiskUse + true, // mayDbProfile. We pass 'true' here conservatively. In the future we may + // change this. + request->explain())), + _driver(_expCtx), + _modification( + std::make_unique(_request->getUpdateModification())), + _canonicalQuery(), + _extensionsCallback(std::move(extensionsCallback)), + _collection(collection), + _timeseriesUpdateQueryExprs(isRequestToTimeseries + ? createTimeseriesWritesQueryExprsIfNecessary( + feature_flags::gTimeseriesUpdatesSupport.isEnabled( + serverGlobalParams.featureCompatibility), + collection) + : nullptr), + _isRequestToTimeseries(isRequestToTimeseries) { + if (forgoOpCounterIncrements) { + _expCtx->enabledCounters = false; + } + _expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp"; + + tassert( + 7655104, "timeseries collection must already exist", _collection || !isRequestToTimeseries); +} + +Status ParsedUpdateBase::maybeTranslateTimeseriesUpdate() { + if (!_timeseriesUpdateQueryExprs) { + // Not a timeseries update, bail out. + return Status::OK(); + } + + // TODO: Due to the complexity which is related to the efficient sort support, we don't support + // yet findAndModify with a query and sort but it should not be impossible. This code assumes + // that in findAndModify code path, the parsed update constructor should be called with + // isRequestToTimeseries = true for a time-series collection. + uassert(ErrorCodes::InvalidOptions, + "Cannot perform a findAndModify with a query and sort on a time-series collection.", + _request->isMulti() || _request->getSort().isEmpty()); + + // If we're updating documents in a time-series collection, splits the match expression into a + // bucket-level match expression and a residual expression so that we can push down the + // bucket-level match expression to the system bucket collection scan or fetch/ixscan. + *_timeseriesUpdateQueryExprs = timeseries::getMatchExprsForWrites( + _expCtx, *_collection->getTimeseriesOptions(), _request->getQuery()); + + // At this point, we parsed user-provided match expression. After this point, the new canonical + // query is internal to the bucket SCAN or FETCH and will have additional internal match + // expression. We do not need to track the internal match expression counters and so we stop the + // counters because we do not want to count the internal match expression. + _expCtx->stopExpressionCounters(); + + // We also need a copy of the original match expression to use for upserts and positional + // updates. + MatchExpressionParser::AllowedFeatureSet allowedFeatures = + MatchExpressionParser::kAllowAllSpecialFeatures; + if (_request->isUpsert()) { + allowedFeatures &= ~MatchExpressionParser::AllowedFeatures::kExpr; + } + _originalExpr = uassertStatusOK(MatchExpressionParser::parse( + _request->getQuery(), _expCtx, ExtensionsCallbackNoop(), allowedFeatures)); + + if (_request->isMulti() && !_timeseriesUpdateQueryExprs->_residualExpr) { + // If we don't have a residual predicate and this is not a single update, we might be able + // to perform this update directly on the buckets collection. Attempt to translate the + // update modification accordingly, if it succeeds, we can do a direct write. If we can't + // translate it (due to referencing data fields), go ahead with the arbitrary updates path. + const auto& timeseriesOptions = _collection->getTimeseriesOptions(); + auto swModification = + timeseries::translateUpdate(*_modification, timeseriesOptions->getMetaField()); + if (swModification.isOK()) { + _modification = + std::make_unique(swModification.getValue()); + + // We need to capture off the correct translated timeseries filter expressions in the + // canonical query before we clear out the timeseries state (this is kind of a hacky way + // to do it, but this whole fallback optimization is kind of hacky.). + if (auto status = parseQueryToCQ(); !status.isOK()) { + return status; + } + + _timeseriesUpdateQueryExprs = nullptr; + } + } + + return Status::OK(); +} + +Status ParsedUpdateBase::parseRequest() { + // It is invalid to request that the UpdateStage return the prior or newly-updated version + // of a document during a multi-update. + invariant(!(_request->shouldReturnAnyDocs() && _request->isMulti())); + + // It is invalid to specify 'upsertSupplied:true' for a non-upsert operation, or if no upsert + // document was supplied with the request. + if (_request->shouldUpsertSuppliedDocument()) { + uassert(ErrorCodes::FailedToParse, + str::stream() << "cannot specify '" + << write_ops::UpdateOpEntry::kUpsertSuppliedFieldName + << ": true' for a non-upsert operation", + _request->isUpsert()); + const auto& constants = _request->getUpdateConstants(); + uassert(ErrorCodes::FailedToParse, + str::stream() << "the parameter '" + << write_ops::UpdateOpEntry::kUpsertSuppliedFieldName + << "' is set to 'true', but no document was supplied", + constants && (*constants)["new"_sd].type() == BSONType::Object); + } + + // It is invalid to request that a ProjectionStage be applied to the UpdateStage if the + // UpdateStage would not return any document. + invariant(_request->getProj().isEmpty() || _request->shouldReturnAnyDocs()); + + auto [collatorToUse, collationMatchesDefault] = + resolveCollator(_opCtx, _request->getCollation(), _collection); + _expCtx->setCollator(std::move(collatorToUse)); + _expCtx->collationMatchesDefault = collationMatchesDefault; + + auto statusWithArrayFilters = parsedUpdateArrayFilters( + _expCtx, _request->getArrayFilters(), _request->getNamespaceString()); + if (!statusWithArrayFilters.isOK()) { + return statusWithArrayFilters.getStatus(); + } + _arrayFilters = std::move(statusWithArrayFilters.getValue()); + + _expCtx->startExpressionCounters(); + + if (auto status = maybeTranslateTimeseriesUpdate(); !status.isOK()) { + return status; + } + + // We parse the update portion before the query portion because the dispostion of the update + // may determine whether or not we need to produce a CanonicalQuery at all. For example, if + // the update involves the positional-dollar operator, we must have a CanonicalQuery even if + // it isn't required for query execution. + parseUpdate(); + Status status = parseQuery(); + + // After parsing to detect if $$USER_ROLES is referenced in the query, set the value of + // $$USER_ROLES for the update. + _expCtx->setUserRoles(); + + return status; +} + +Status ParsedUpdateBase::parseQuery() { + if (_canonicalQuery) { + // Query is already parsed. + return Status::OK(); + } + + if (!_timeseriesUpdateQueryExprs && !_driver.needMatchDetails() && + CanonicalQuery::isSimpleIdQuery(_request->getQuery())) { + return Status::OK(); + } + + return parseQueryToCQ(); +} + +Status ParsedUpdateBase::parseQueryToCQ() { + dassert(!_canonicalQuery.get()); + + auto statusWithCQ = impl::parseWriteQueryToCQ( + _expCtx->opCtx, + _expCtx.get(), + *_extensionsCallback, + *_request, + _timeseriesUpdateQueryExprs ? _timeseriesUpdateQueryExprs->_bucketExpr.get() : nullptr); + + if (statusWithCQ.isOK()) { + _canonicalQuery = std::move(statusWithCQ.getValue()); + } + + if (statusWithCQ.getStatus().code() == ErrorCodes::QueryFeatureNotAllowed) { + // The default error message for disallowed $expr is not descriptive enough, so we rewrite + // it here. + return {ErrorCodes::QueryFeatureNotAllowed, + "$expr is not allowed in the query predicate for an upsert"}; + } + + return statusWithCQ.getStatus(); +} + +void ParsedUpdateBase::parseUpdate() { + _driver.setCollator(_expCtx->getCollator()); + _driver.setLogOp(true); + _driver.setFromOplogApplication(_request->isFromOplogApplication()); + // Time-series operations will not result in any documents with dots or dollars fields. + if (auto source = _request->source(); source == OperationSource::kTimeseriesInsert || + source == OperationSource::kTimeseriesUpdate) { + _driver.setSkipDotsDollarsCheck(true); + } + _expCtx->isParsingPipelineUpdate = true; + _driver.parse( + *_modification, _arrayFilters, _request->getUpdateConstants(), _request->isMulti()); + _expCtx->isParsingPipelineUpdate = false; +} + +PlanYieldPolicy::YieldPolicy ParsedUpdateBase::yieldPolicy() const { + return _request->isGod() ? PlanYieldPolicy::YieldPolicy::NO_YIELD : _request->getYieldPolicy(); +} + +bool ParsedUpdateBase::hasParsedQuery() const { + return _canonicalQuery.get() != nullptr; +} + +std::unique_ptr ParsedUpdateBase::releaseParsedQuery() { + invariant(_canonicalQuery.get() != nullptr); + return std::move(_canonicalQuery); +} + +const UpdateRequest* ParsedUpdateBase::getRequest() const { + return _request; +} + +UpdateDriver* ParsedUpdateBase::getDriver() { + return &_driver; +} + +bool ParsedUpdateBase::isEligibleForArbitraryTimeseriesUpdate() const { + return _timeseriesUpdateQueryExprs.get() != nullptr; +} + +} // namespace mongo::impl diff --git a/src/mongo/db/ops/parsed_writes_common.h b/src/mongo/db/ops/parsed_writes_common.h new file mode 100644 index 0000000000000..32444ed1b4b60 --- /dev/null +++ b/src/mongo/db/ops/parsed_writes_common.h @@ -0,0 +1,152 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/matcher/extensions_callback_real.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/storage/storage_parameters_gen.h" + +namespace mongo { +class DeleteRequest; +class UpdateRequest; + +/** + * Query for timeseries arbitrary writes should be split into two parts: bucket expression and + * residual expression. The bucket expression is used to find the buckets and the residual + * expression is used to filter the documents in the buckets. + */ +struct TimeseriesWritesQueryExprs { + // The bucket-level match expression. + std::unique_ptr _bucketExpr = nullptr; + + // The residual expression which is applied to materialized measurements after splitting out + // bucket-level match expressions. + std::unique_ptr _residualExpr = nullptr; +}; + +/** + * Creates a TimeseriesWritesQueryExprs object if the collection is a time-series collection and + * the related feature flag is enabled. + */ +inline std::unique_ptr createTimeseriesWritesQueryExprsIfNecessary( + bool featureEnabled, const CollectionPtr& collection) { + if (featureEnabled && collection && collection->getTimeseriesOptions()) { + return std::make_unique(); + } else { + return nullptr; + } +} + +template +concept IsDeleteOrUpdateRequest = + std::is_same_v || std::is_same_v; + +namespace impl { + /** + * Parses the filter of 'request'or the given filter (if given) to a CanonicalQuery. This does a + * direct transformation and doesn't do any special handling, e.g. for timeseries. + */ + template + requires IsDeleteOrUpdateRequest StatusWith> + parseWriteQueryToCQ(OperationContext * opCtx, + ExpressionContext * expCtx, + const ExtensionsCallback& extensionsCallback, + const T& request, + const MatchExpression* rewrittenFilter = nullptr) { + // The projection needs to be applied after the delete/update operation, so we do not + // specify a projection during canonicalization. + auto findCommand = std::make_unique(request.getNsString()); + + if (rewrittenFilter) { + findCommand->setFilter(rewrittenFilter->serialize()); + } else { + findCommand->setFilter(request.getQuery()); + } + findCommand->setSort(request.getSort()); + findCommand->setHint(request.getHint()); + findCommand->setCollation(request.getCollation().getOwned()); + + // Limit should only used for the findAndModify command when a sort is specified. If a sort + // is requested, we want to use a top-k sort for efficiency reasons, so should pass the + // limit through. Generally, a update stage expects to be able to skip documents that were + // deleted/modified under it, but a limit could inhibit that and give an EOF when the + // delete/update has not actually delete/updated a document. This behavior is fine for + // findAndModify, but should not apply to delete/update in general. + if (!request.getMulti() && !request.getSort().isEmpty()) { + findCommand->setLimit(1); + } + + MatchExpressionParser::AllowedFeatureSet allowedMatcherFeatures = + MatchExpressionParser::kAllowAllSpecialFeatures; + if constexpr (std::is_same_v) { + // $expr is not allowed in the query for an upsert, since it is not clear what the + // equality extraction behavior for $expr should be. + if (request.isUpsert()) { + allowedMatcherFeatures &= ~MatchExpressionParser::AllowedFeatures::kExpr; + } + } + + // If the delete/update request has runtime constants or let parameters attached to it, pass + // them to the FindCommandRequest. + if (auto& runtimeConstants = request.getLegacyRuntimeConstants()) { + findCommand->setLegacyRuntimeConstants(*runtimeConstants); + } + if (auto& letParams = request.getLet()) { + findCommand->setLet(*letParams); + } + + return CanonicalQuery::canonicalize(opCtx, + std::move(findCommand), + request.getIsExplain(), + expCtx, + extensionsCallback, + allowedMatcherFeatures); + } +} // namespace impl + +template +requires IsDeleteOrUpdateRequest StatusWith> parseWriteQueryToCQ( + OperationContext* opCtx, + ExpressionContext* expCtx, + const T& request, + const MatchExpression* rewrittenFilter = nullptr) { + return impl::parseWriteQueryToCQ(opCtx, + expCtx, + ExtensionsCallbackReal(opCtx, &request.getNsString()), + request, + rewrittenFilter); +} +} // namespace mongo diff --git a/src/mongo/db/ops/update.cpp b/src/mongo/db/ops/update.cpp index c1e95dd993a61..ac08ad06d891b 100644 --- a/src/mongo/db/ops/update.cpp +++ b/src/mongo/db/ops/update.cpp @@ -28,78 +28,79 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/ops/update.h" +#include +#include +#include -#include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/collection_yield_restore.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" -#include "mongo/db/client.h" -#include "mongo/db/clientcursor.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/exec/update_stage.h" -#include "mongo/db/matcher/extensions_callback_real.h" -#include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/query/explain.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/parsed_update.h" +#include "mongo/db/ops/update.h" #include "mongo/db/query/get_executor.h" -#include "mongo/db/query/plan_summary_stats.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/update/update_driver.h" -#include "mongo/db/update_index_data.h" -#include "mongo/util/scopeguard.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite namespace mongo { -UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest& request) { - invariant(db); - +UpdateResult update(OperationContext* opCtx, + CollectionAcquisition& coll, + const UpdateRequest& request) { // Explain should never use this helper. invariant(!request.explain()); const NamespaceString& nsString = request.getNamespaceString(); invariant(opCtx->lockState()->isCollectionLockedForMode(nsString, MODE_IX)); - CollectionPtr collection; - // The update stage does not create its own collection. As such, if the update is // an upsert, create the collection that the update stage inserts into beforehand. - writeConflictRetry(opCtx, "createCollection", nsString.ns(), [&] { - collection = CollectionPtr( - CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nsString)); - if (collection || !request.isUpsert()) { - return; - } - - const bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() && - !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nsString); - - if (userInitiatedWritesAndNotPrimary) { - uassertStatusOK(Status(ErrorCodes::PrimarySteppedDown, - str::stream() << "Not primary while creating collection " - << nsString << " during upsert")); + writeConflictRetry(opCtx, "createCollection", nsString, [&] { + if (!coll.exists() && request.isUpsert()) { + const bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() && + !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nsString); + + if (userInitiatedWritesAndNotPrimary) { + uassertStatusOK(Status(ErrorCodes::PrimarySteppedDown, + str::stream() + << "Not primary while creating collection " + << nsString.toStringForErrorMsg() << " during upsert")); + } + + ScopedLocalCatalogWriteFence scopedLocalCatalogWriteFence(opCtx, &coll); + WriteUnitOfWork wuow(opCtx); + auto db = DatabaseHolder::get(opCtx)->openDb(opCtx, coll.nss().dbName()); + auto newCollectionPtr = db->createCollection(opCtx, nsString, CollectionOptions()); + invariant(newCollectionPtr); + wuow.commit(); } - WriteUnitOfWork wuow(opCtx); - collection = CollectionPtr(db->createCollection(opCtx, nsString, CollectionOptions())); - invariant(collection); - wuow.commit(); }); - collection.makeYieldable(opCtx, LockedCollectionYieldRestore(opCtx, collection)); + // If this is an upsert, at this point the collection must exist. + invariant(coll.exists() || !request.isUpsert()); // Parse the update, get an executor for it, run the executor, get stats out. - const ExtensionsCallbackReal extensionsCallback(opCtx, &request.getNamespaceString()); - ParsedUpdate parsedUpdate(opCtx, &request, extensionsCallback); + ParsedUpdate parsedUpdate(opCtx, &request, coll.getCollectionPtr()); uassertStatusOK(parsedUpdate.parseRequest()); OpDebug* const nullOpDebug = nullptr; auto exec = uassertStatusOK( - getExecutorUpdate(nullOpDebug, &collection, &parsedUpdate, boost::none /* verbosity */)); + getExecutorUpdate(nullOpDebug, coll, &parsedUpdate, boost::none /* verbosity */)); PlanExecutor::ExecState state = PlanExecutor::ADVANCED; BSONObj image; diff --git a/src/mongo/db/ops/update.h b/src/mongo/db/ops/update.h index cc0d1c42027d0..524d13f08a24b 100644 --- a/src/mongo/db/ops/update.h +++ b/src/mongo/db/ops/update.h @@ -32,14 +32,16 @@ #include "mongo/db/curop.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/update_request.h" #include "mongo/db/ops/update_result.h" +#include "mongo/db/shard_role.h" namespace mongo { class CanonicalQuery; -class Database; class OperationContext; +class CollectionAcquisition; class UpdateDriver; /** @@ -47,6 +49,8 @@ class UpdateDriver; * * Caller must hold the appropriate database locks. */ -UpdateResult update(OperationContext* opCtx, Database* db, const UpdateRequest& request); +UpdateResult update(OperationContext* opCtx, + CollectionAcquisition& coll, + const UpdateRequest& request); } // namespace mongo diff --git a/src/mongo/db/ops/update_request.h b/src/mongo/db/ops/update_request.h index a7a3ea9aa2a27..87f4c6e3991aa 100644 --- a/src/mongo/db/ops/update_request.h +++ b/src/mongo/db/ops/update_request.h @@ -89,6 +89,10 @@ class UpdateRequest { return _nsString; } + const NamespaceString& getNsString() const { + return _nsString; + } + void setQuery(const BSONObj& query) { _updateOp.setQ(query); } @@ -153,6 +157,10 @@ class UpdateRequest { return _letParameters; } + const boost::optional& getLet() const { + return _letParameters; + } + void setArrayFilters(const std::vector& arrayFilters) { _updateOp.setArrayFilters(arrayFilters); } @@ -192,6 +200,10 @@ class UpdateRequest { _updateOp.setMulti(value); } + bool getMulti() const { + return _updateOp.getMulti(); + } + bool isMulti() const { return _updateOp.getMulti(); } @@ -220,6 +232,10 @@ class UpdateRequest { return _explain; } + bool getIsExplain() const { + return static_cast(_explain); + } + void setReturnDocs(ReturnDocOption value) { _returnDocs = value; } diff --git a/src/mongo/db/ops/update_result.cpp b/src/mongo/db/ops/update_result.cpp index 3058e59460a88..8118618614565 100644 --- a/src/mongo/db/ops/update_result.cpp +++ b/src/mongo/db/ops/update_result.cpp @@ -28,12 +28,13 @@ */ -#include "mongo/platform/basic.h" - #include "mongo/db/ops/update_result.h" - +#include "mongo/bson/bsonelement.h" #include "mongo/db/not_primary_error_tracker.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite diff --git a/src/mongo/db/ops/update_result.h b/src/mongo/db/ops/update_result.h index a91befe5980fa..034f2dcdecab8 100644 --- a/src/mongo/db/ops/update_result.h +++ b/src/mongo/db/ops/update_result.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/bson/bsonobj.h" namespace mongo { diff --git a/src/mongo/db/ops/write_ops.cpp b/src/mongo/db/ops/write_ops.cpp index c57f65ccdf842..8e6a22a0efa5e 100644 --- a/src/mongo/db/ops/write_ops.cpp +++ b/src/mongo/db/ops/write_ops.cpp @@ -29,14 +29,42 @@ #include "mongo/db/ops/write_ops.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/commands/bulk_write_gen.h" #include "mongo/db/dbmessage.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/update_oplog_entry_serialization.h" #include "mongo/db/update/update_oplog_entry_version.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/stdx/variant.h" #include "mongo/util/assert_util.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep #include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -57,11 +85,17 @@ namespace { // each element. static constexpr int kPerElementOverhead = 2; +// This constant accounts for the size of a bool. +static constexpr int kBoolSize = 1; + // This constant tracks the overhead for serializing UUIDs. It includes 1 byte for the // 'BinDataType', 4 bytes for serializing the integer size of the UUID, and finally, 16 bytes // for the UUID itself. static const int kUUIDSize = 21; +// This constant accounts for the size of a 32-bit integer. +static const int kIntSize = 4; + template void checkOpCountForCommand(const T& op, size_t numOps) { uassert(ErrorCodes::InvalidLength, @@ -86,6 +120,60 @@ void checkOpCountForCommand(const T& op, size_t numOps) { } } +// Utility which estimates the size of 'WriteCommandRequestBase' when serialized. +int getWriteCommandRequestBaseSize(const WriteCommandRequestBase& base) { + static const int kSizeOfOrderedField = + write_ops::WriteCommandRequestBase::kOrderedFieldName.size() + kBoolSize + + kPerElementOverhead; + static const int kSizeOfBypassDocumentValidationField = + write_ops::WriteCommandRequestBase::kBypassDocumentValidationFieldName.size() + kBoolSize + + kPerElementOverhead; + + auto estSize = static_cast(BSONObj::kMinBSONLength) + kSizeOfOrderedField + + kSizeOfBypassDocumentValidationField; + + if (auto stmtId = base.getStmtId(); stmtId) { + estSize += write_ops::WriteCommandRequestBase::kStmtIdFieldName.size() + + write_ops::kStmtIdSize + kPerElementOverhead; + } + + if (auto stmtIds = base.getStmtIds(); stmtIds) { + estSize += write_ops::WriteCommandRequestBase::kStmtIdsFieldName.size(); + estSize += static_cast(BSONObj::kMinBSONLength); + estSize += + (write_ops::kStmtIdSize + write_ops::kWriteCommandBSONArrayPerElementOverheadBytes) * + stmtIds->size(); + estSize += kPerElementOverhead; + } + + if (auto isTimeseries = base.getIsTimeseriesNamespace(); isTimeseries.has_value()) { + estSize += write_ops::WriteCommandRequestBase::kIsTimeseriesNamespaceFieldName.size() + + kBoolSize + kPerElementOverhead; + } + + if (auto collUUID = base.getCollectionUUID(); collUUID) { + estSize += write_ops::WriteCommandRequestBase::kCollectionUUIDFieldName.size() + kUUIDSize + + kPerElementOverhead; + } + + if (auto encryptionInfo = base.getEncryptionInformation(); encryptionInfo) { + estSize += write_ops::WriteCommandRequestBase::kEncryptionInformationFieldName.size() + + encryptionInfo->toBSON().objsize() + kPerElementOverhead; + } + + if (auto query = base.getOriginalQuery(); query) { + estSize += write_ops::WriteCommandRequestBase::kOriginalQueryFieldName.size() + + query->objsize() + kPerElementOverhead; + } + + if (auto originalCollation = base.getOriginalCollation(); originalCollation) { + estSize += write_ops::WriteCommandRequestBase::kOriginalCollationFieldName.size() + + originalCollation->objsize() + kPerElementOverhead; + } + + return estSize; +} + } // namespace namespace write_ops { @@ -145,6 +233,50 @@ int32_t getStmtIdForWriteAt(const WriteCommandRequestBase& writeCommandBase, siz return kFirstStmtId + writePos; } +int estimateRuntimeConstantsSize(const mongo::LegacyRuntimeConstants& constants) { + int size = write_ops::UpdateCommandRequest::kLegacyRuntimeConstantsFieldName.size() + + static_cast(BSONObj::kMinBSONLength) + kPerElementOverhead; + + // $$NOW + size += + LegacyRuntimeConstants::kLocalNowFieldName.size() + sizeof(Date_t) + kPerElementOverhead; + + // $$CLUSTER_TIME + size += LegacyRuntimeConstants::kClusterTimeFieldName.size() + sizeof(Timestamp) + + kPerElementOverhead; + + // $$JS_SCOPE + if (const auto& scope = constants.getJsScope(); scope.has_value()) { + size += LegacyRuntimeConstants::kJsScopeFieldName.size() + scope->objsize() + + kPerElementOverhead; + } + + // $$IS_MR + if (const auto& isMR = constants.getIsMapReduce(); isMR.has_value()) { + size += + LegacyRuntimeConstants::kIsMapReduceFieldName.size() + kBoolSize + kPerElementOverhead; + } + + // $$USER_ROLES + if (const auto& userRoles = constants.getUserRoles(); userRoles.has_value()) { + size += LegacyRuntimeConstants::kUserRolesFieldName.size() + userRoles->objsize() + + kPerElementOverhead; + } + return size; +} + +int getArrayFiltersFieldSize(const std::vector& arrayFilters, + const StringData arrayFiltersFieldName) { + auto size = BSONObj::kMinBSONLength + arrayFiltersFieldName.size() + kPerElementOverhead; + for (auto&& filter : arrayFilters) { + // For each filter, we not only need to account for the size of the filter itself, + // but also for the per array element overhead. + size += filter.objsize(); + size += write_ops::kWriteCommandBSONArrayPerElementOverheadBytes; + } + return size; +} + int getUpdateSizeEstimate(const BSONObj& q, const write_ops::UpdateModification& u, const boost::optional& c, @@ -155,11 +287,6 @@ int getUpdateSizeEstimate(const BSONObj& q, const boost::optional& sampleId, const bool includeAllowShardKeyUpdatesWithoutFullShardKeyInQuery) { using UpdateOpEntry = write_ops::UpdateOpEntry; - - // This constant accounts for the null terminator in each field name and the BSONType byte for - // each element. - static const int kPerElementOverhead = 2; - static const int kBoolSize = 1; int estSize = static_cast(BSONObj::kMinBSONLength); // Add the sizes of the 'multi' and 'upsert' fields. @@ -188,17 +315,8 @@ int getUpdateSizeEstimate(const BSONObj& q, // Add the size of the 'arrayFilters' field, if present. if (arrayFilters) { - estSize += ([&]() { - auto size = BSONObj::kMinBSONLength + UpdateOpEntry::kArrayFiltersFieldName.size() + - kPerElementOverhead; - for (auto&& filter : *arrayFilters) { - // For each filter, we not only need to account for the size of the filter itself, - // but also for the per array element overhead. - size += filter.objsize(); - size += write_ops::kWriteCommandBSONArrayPerElementOverheadBytes; - } - return size; - })(); + estSize += + getArrayFiltersFieldSize(arrayFilters.get(), UpdateOpEntry::kArrayFiltersFieldName); } // Add the size of the 'hint' field, if present. @@ -220,13 +338,90 @@ int getUpdateSizeEstimate(const BSONObj& q, return estSize; } +// TODO SERVER-77871: Ensure sampleId size is accounted for in this method. +// TODO SERVER-72983: If we need to add a allowShardKeyUpdatesWithoutFullShardKeyInQuery field, +// ensure the size is accounted for in this method. +int getBulkWriteUpdateSizeEstimate(const BSONObj& filter, + const write_ops::UpdateModification& updateMods, + const boost::optional& constants, + const bool includeUpsertSupplied, + const boost::optional& collation, + const boost::optional>& arrayFilters, + const BSONObj& hint, + const boost::optional& sort, + const boost::optional returnValue, + const boost::optional& returnFields) { + int estSize = static_cast(BSONObj::kMinBSONLength); + + // Adds the size of the 'update' field which contains the index of the corresponding namespace. + estSize += BulkWriteUpdateOp::kUpdateFieldName.size() + kIntSize + kPerElementOverhead; + + // Add the sizes of the 'multi' and 'upsert' fields. + estSize += BulkWriteUpdateOp::kUpsertFieldName.size() + kBoolSize + kPerElementOverhead; + estSize += BulkWriteUpdateOp::kMultiFieldName.size() + kBoolSize + kPerElementOverhead; + + // Add the size of 'upsertSupplied' field if present. + if (includeUpsertSupplied) { + estSize += + BulkWriteUpdateOp::kUpsertSuppliedFieldName.size() + kBoolSize + kPerElementOverhead; + } + + // Add the sizes of the 'filter' and 'updateMods' fields. + estSize += (BulkWriteUpdateOp::kFilterFieldName.size() + filter.objsize() + + kPerElementOverhead + BulkWriteUpdateOp::kUpdateModsFieldName.size() + + updateMods.objsize() + kPerElementOverhead); + + // Add the size of the 'constants' field, if present. + if (constants) { + estSize += (BulkWriteUpdateOp::kConstantsFieldName.size() + constants->objsize() + + kPerElementOverhead); + } + + // Add the size of the 'collation' field, if present. + if (collation) { + estSize += (BulkWriteUpdateOp::kCollationFieldName.size() + collation->objsize() + + kPerElementOverhead); + } + + // Add the size of the 'arrayFilters' field, if present. + if (arrayFilters) { + estSize += + getArrayFiltersFieldSize(arrayFilters.get(), BulkWriteUpdateOp::kArrayFiltersFieldName); + } + + // Add the size of the 'hint' field, if present. + if (!hint.isEmpty()) { + estSize += BulkWriteUpdateOp::kHintFieldName.size() + hint.objsize() + kPerElementOverhead; + } + + // Add the size of the 'sort' field, if present. + if (sort) { + estSize += + (BulkWriteUpdateOp::kSortFieldName.size() + sort->objsize() + kPerElementOverhead); + } + + // Add the size of the 'return' field, if present. + if (returnValue) { + estSize += (BulkWriteUpdateOp::kReturnFieldName.size() + + // A string is stored with a leading int32 containing its length and a null byte + // as terminator. + (kIntSize + returnValue->size() + 1) + kPerElementOverhead); + } + + // Add the size of the 'returnFields' field, if present. + if (returnFields) { + estSize += (BulkWriteUpdateOp::kReturnFieldsFieldName.size() + returnFields->objsize() + + kPerElementOverhead); + } + + return estSize; +} + int getDeleteSizeEstimate(const BSONObj& q, const boost::optional& collation, const mongo::BSONObj& hint, const boost::optional& sampleId) { using DeleteOpEntry = write_ops::DeleteOpEntry; - - static const int kIntSize = 4; int estSize = static_cast(BSONObj::kMinBSONLength); // Add the size of the 'q' field. @@ -254,6 +449,69 @@ int getDeleteSizeEstimate(const BSONObj& q, return estSize; } +// TODO SERVER-77871: Ensure sampleId size is accounted for in this method. +int getBulkWriteDeleteSizeEstimate(const BSONObj& filter, + const boost::optional& collation, + const mongo::BSONObj& hint, + const boost::optional& sort, + bool includeReturn, + const boost::optional& returnFields) { + int estSize = static_cast(BSONObj::kMinBSONLength); + + // Adds the size of the 'delete' field which contains the index of the corresponding namespace. + estSize += BulkWriteDeleteOp::kDeleteCommandFieldName.size() + kIntSize + kPerElementOverhead; + + // Add the size of the 'filter' field. + estSize += BulkWriteDeleteOp::kFilterFieldName.size() + filter.objsize() + kPerElementOverhead; + + // Add the size of the 'multi' field. + estSize += BulkWriteDeleteOp::kMultiFieldName.size() + kBoolSize + kPerElementOverhead; + + // Add the size of the 'collation' field, if present. + if (collation) { + estSize += BulkWriteDeleteOp::kCollationFieldName.size() + collation->objsize() + + kPerElementOverhead; + } + + // Add the size of the 'hint' field, if present. + if (!hint.isEmpty()) { + estSize += + (BulkWriteDeleteOp::kHintFieldName.size() + hint.objsize() + kPerElementOverhead); + } + + // Add the size of the 'sort' field, if present. + if (sort) { + estSize += + (BulkWriteDeleteOp::kSortFieldName.size() + sort->objsize() + kPerElementOverhead); + } + + // Add the size of the 'return' field, if present. + if (includeReturn) { + estSize += (BulkWriteDeleteOp::kReturnFieldName.size() + kBoolSize + kPerElementOverhead); + } + + // Add the size of the 'returnFields' field, if present. + if (returnFields) { + estSize += (BulkWriteDeleteOp::kReturnFieldsFieldName.size() + returnFields->objsize() + + kPerElementOverhead); + } + + return estSize; +} + +int getBulkWriteInsertSizeEstimate(const mongo::BSONObj& document) { + int estSize = static_cast(BSONObj::kMinBSONLength); + + // Adds the size of the 'insert' field which contains the index of the corresponding namespace. + estSize += BulkWriteInsertOp::kInsertFieldName.size() + kIntSize + kPerElementOverhead; + + // Add the size of the 'document' field. + estSize += + BulkWriteInsertOp::kDocumentFieldName.size() + document.objsize() + kPerElementOverhead; + + return estSize; +} + bool verifySizeEstimate(const write_ops::UpdateOpEntry& update) { return write_ops::getUpdateSizeEstimate( update.getQ(), @@ -268,6 +526,144 @@ bool verifySizeEstimate(const write_ops::UpdateOpEntry& update) { update.toBSON().objsize(); } +bool verifySizeEstimate(const InsertCommandRequest& insertReq, + const OpMsgRequest* unparsedRequest) { + int size = getInsertHeaderSizeEstimate(insertReq); + for (auto&& docToInsert : insertReq.getDocuments()) { + size += docToInsert.objsize() + kWriteCommandBSONArrayPerElementOverheadBytes; + } + + // Return true if 'insertReq' originated from a document sequence and our size estimate exceeds + // the size limit. + if (unparsedRequest && !unparsedRequest->sequences.empty() && size > BSONObjMaxUserSize) { + return true; + } + return size >= insertReq.toBSON({} /* commandPassthroughFields */).objsize(); +} + +bool verifySizeEstimate(const UpdateCommandRequest& updateReq, + const OpMsgRequest* unparsedRequest) { + int size = getUpdateHeaderSizeEstimate(updateReq); + + for (auto&& update : updateReq.getUpdates()) { + size += getUpdateSizeEstimate( + update.getQ(), + update.getU(), + update.getC(), + update.getUpsertSupplied().has_value(), + update.getCollation(), + update.getArrayFilters(), + update.getHint(), + update.getSampleId(), + update.getAllowShardKeyUpdatesWithoutFullShardKeyInQuery().has_value()) + + kWriteCommandBSONArrayPerElementOverheadBytes; + } + + // Return true if 'updateReq' originated from a document sequence and our size estimate exceeds + // the size limit. + if (unparsedRequest && !unparsedRequest->sequences.empty() && size > BSONObjMaxUserSize) { + return true; + } + return size >= updateReq.toBSON({} /* commandPassthroughFields */).objsize(); +} + +bool verifySizeEstimate(const DeleteCommandRequest& deleteReq, + const OpMsgRequest* unparsedRequest) { + int size = getDeleteHeaderSizeEstimate(deleteReq); + + for (auto&& deleteOp : deleteReq.getDeletes()) { + size += write_ops::getDeleteSizeEstimate(deleteOp.getQ(), + deleteOp.getCollation(), + deleteOp.getHint(), + deleteOp.getSampleId()) + + kWriteCommandBSONArrayPerElementOverheadBytes; + } + + // Return true if 'deleteReq' originated from a document sequence and our size estimate exceeds + // the size limit. + if (unparsedRequest && !unparsedRequest->sequences.empty() && size > BSONObjMaxUserSize) { + return true; + } + return size >= deleteReq.toBSON({} /* commandPassthroughFields */).objsize(); +} + +int getInsertHeaderSizeEstimate(const InsertCommandRequest& insertReq) { + int size = getWriteCommandRequestBaseSize(insertReq.getWriteCommandRequestBase()) + + write_ops::InsertCommandRequest::kDocumentsFieldName.size() + kPerElementOverhead + + static_cast(BSONObj::kMinBSONLength); + + size += InsertCommandRequest::kCommandName.size() + kPerElementOverhead + + insertReq.getNamespace().size() + 1 /* ns string null terminator */; + + // Handle $tenant. Note that $tenant is injected as a hidden field into all IDL commands, unlike + // other passthrough fields. + if (auto tenant = insertReq.getDollarTenant(); tenant.has_value()) { + size += InsertCommandRequest::kDollarTenantFieldName.size() + OID::kOIDSize + + kPerElementOverhead; + } + return size; +} + +int getUpdateHeaderSizeEstimate(const UpdateCommandRequest& updateReq) { + int size = getWriteCommandRequestBaseSize(updateReq.getWriteCommandRequestBase()); + + size += UpdateCommandRequest::kCommandName.size() + kPerElementOverhead + + updateReq.getNamespace().size() + 1 /* ns string null terminator */; + + size += write_ops::UpdateCommandRequest::kUpdatesFieldName.size() + kPerElementOverhead + + static_cast(BSONObj::kMinBSONLength); + + // Handle $tenant. Note that $tenant is injected as a hidden field into all IDL commands, unlike + // other passthrough fields. + if (auto tenant = updateReq.getDollarTenant(); tenant.has_value()) { + size += UpdateCommandRequest::kDollarTenantFieldName.size() + OID::kOIDSize + + kPerElementOverhead; + } + + // Handle legacy runtime constants. + if (auto runtimeConstants = updateReq.getLegacyRuntimeConstants(); + runtimeConstants.has_value()) { + size += estimateRuntimeConstantsSize(*runtimeConstants); + } + + // Handle let parameters. + if (auto let = updateReq.getLet(); let.has_value()) { + size += write_ops::UpdateCommandRequest::kLetFieldName.size() + let->objsize() + + kPerElementOverhead; + } + return size; +} + +int getDeleteHeaderSizeEstimate(const DeleteCommandRequest& deleteReq) { + int size = getWriteCommandRequestBaseSize(deleteReq.getWriteCommandRequestBase()); + + size += DeleteCommandRequest::kCommandName.size() + kPerElementOverhead + + deleteReq.getNamespace().size() + 1 /* ns string null terminator */; + + size += write_ops::DeleteCommandRequest::kDeletesFieldName.size() + kPerElementOverhead + + static_cast(BSONObj::kMinBSONLength); + + // Handle $tenant. Note that $tenant is injected as a hidden field into all IDL commands, unlike + // other passthrough fields. + if (auto tenant = deleteReq.getDollarTenant(); tenant.has_value()) { + size += DeleteCommandRequest::kDollarTenantFieldName.size() + OID::kOIDSize + + kPerElementOverhead; + } + + // Handle legacy runtime constants. + if (auto runtimeConstants = deleteReq.getLegacyRuntimeConstants(); + runtimeConstants.has_value()) { + size += estimateRuntimeConstantsSize(*runtimeConstants); + } + + // Handle let parameters. + if (auto let = deleteReq.getLet(); let.has_value()) { + size += write_ops::UpdateCommandRequest::kLetFieldName.size() + let->objsize() + + kPerElementOverhead; + } + return size; +} + bool verifySizeEstimate(const write_ops::DeleteOpEntry& deleteOp) { return write_ops::getDeleteSizeEstimate(deleteOp.getQ(), deleteOp.getCollation(), diff --git a/src/mongo/db/ops/write_ops.h b/src/mongo/db/ops/write_ops.h index 2545c8eb78efd..35ad9d871ffac 100644 --- a/src/mongo/db/ops/write_ops.h +++ b/src/mongo/db/ops/write_ops.h @@ -29,9 +29,18 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -78,6 +87,9 @@ constexpr size_t kMaxWriteBatchSize = 100'000; // Limit the size that we write without yielding to 16MB / 64 (max expected number of indexes) constexpr size_t insertVectorMaxBytes = 256 * 1024; +// This constant accounts for the size of an individual stmtId, as used for retryable writes. +constexpr size_t kStmtIdSize = 4; + /** * Retrieves the statement id for the write at the specified position in the write batch entries * array. @@ -122,11 +134,52 @@ int getDeleteSizeEstimate(const BSONObj& q, const boost::optional& sampleId); /** - * Set of utilities which return true if the estimated write size is greater than or equal to the - * actual write size, false otherwise. + * Set of utilities which estimate the size, in bytes, of an insert/update/delete op with the given + * parameters, when serialized in the format used for the bulkWrite command. + */ +int getBulkWriteInsertSizeEstimate(const mongo::BSONObj& document); +int getBulkWriteUpdateSizeEstimate(const BSONObj& filter, + const write_ops::UpdateModification& updateMods, + const boost::optional& constants, + bool includeUpsertSupplied, + const boost::optional& collation, + const boost::optional>& arrayFilters, + const BSONObj& hint, + const boost::optional& sort, + boost::optional returnValue, + const boost::optional& returnFields); +int getBulkWriteDeleteSizeEstimate(const BSONObj& filter, + const boost::optional& collation, + const mongo::BSONObj& hint, + const boost::optional& sort, + bool includeReturn, + const boost::optional& returnFields); + +/** + * Set of utilities which return true if the estimated write size is greater than or equal to + * the actual write size, false otherwise. + * + * If the caller specifies 'unparsedRequest', these utilities will also return true if the request + * used document sequences and the size estimate is greater than the maximum size of a BSONObj. This + * indicates that 'unparsedRequest' cannot be serialized to a BSONObj because it exceeds the maximum + * BSONObj size. */ bool verifySizeEstimate(const write_ops::UpdateOpEntry& update); bool verifySizeEstimate(const write_ops::DeleteOpEntry& deleteOp); +bool verifySizeEstimate(const InsertCommandRequest& insertReq, + const OpMsgRequest* unparsedRequest = nullptr); +bool verifySizeEstimate(const UpdateCommandRequest& updateReq, + const OpMsgRequest* unparsedRequest = nullptr); +bool verifySizeEstimate(const DeleteCommandRequest& deleteReq, + const OpMsgRequest* unparsedRequest = nullptr); + +/** + * Set of utilities which estimate the size of the headers (that is, all fields in a write command + * outside of the write statements themselves) of an insert/update/delete command, respectively. + */ +int getInsertHeaderSizeEstimate(const InsertCommandRequest& insertReq); +int getUpdateHeaderSizeEstimate(const UpdateCommandRequest& updateReq); +int getDeleteHeaderSizeEstimate(const DeleteCommandRequest& deleteReq); /** * If the response from a write command contains any write errors, it will throw the first one. All diff --git a/src/mongo/db/ops/write_ops.idl b/src/mongo/db/ops/write_ops.idl index a2d80295d03d5..a02a15ee51a80 100644 --- a/src/mongo/db/ops/write_ops.idl +++ b/src/mongo/db/ops/write_ops.idl @@ -151,6 +151,8 @@ structs: chained_structs: WriteCommandReplyBase: writeCommandReplyBase + # IMPORTANT: If any changes are made to the fields here, please update the corresponding size + # estimation functions in 'write_ops.cpp'. WriteCommandRequestBase: description: "Contains basic information included by all write commands" strict: false @@ -206,18 +208,18 @@ structs: optional: true stability: unstable $_originalQuery: - description: "The original write query. This is used for updateOne/deleteOne - without shard key during the write phase of the two phase protocol in - order to make sure the shard key query analysis stores the correct + description: "The original write query. This is used for updateOne/deleteOne + without shard key during the write phase of the two phase protocol in + order to make sure the shard key query analysis stores the correct client query." type: object optional: true cpp_name: originalQuery stability: internal $_originalCollation: - description: "The original write query. This is used for updateOne/deleteOne - without shard key during the write phase of the two phase protocol in - order to make sure the shard key query analysis stores the correct + description: "The original write query. This is used for updateOne/deleteOne + without shard key during the write phase of the two phase protocol in + order to make sure the shard key query analysis stores the correct client collation." type: object optional: true @@ -283,7 +285,7 @@ structs: optional: true stability: unstable $_allowShardKeyUpdatesWithoutFullShardKeyInQuery: - description: "Set to true if shard key updates are allowed without the full shard + description: "Set to true if shard key updates are allowed without the full shard key in the query." type: optionalBool cpp_name: allowShardKeyUpdatesWithoutFullShardKeyInQuery @@ -363,7 +365,8 @@ structs: stability: unstable commands: - + # IMPORTANT: If any changes are made to the fields here, please update the corresponding insert + # size estimation functions in 'write_ops.cpp'. insert: description: "Parser for the 'insert' command." command_name: insert @@ -386,6 +389,8 @@ commands: supports_doc_sequence: true stability: stable + # IMPORTANT: If any changes are made to the fields here, please update the corresponding update + # size estimation functions in 'write_ops.cpp'. update: description: "Parser for the 'update' command." command_name: update @@ -421,6 +426,8 @@ commands: optional: true stability: stable + # IMPORTANT: If any changes are made to the fields here, please update the corresponding delete + # size estimation functions in 'write_ops.cpp'. delete: description: "Parser for the 'delete' command." command_name: delete @@ -575,27 +582,32 @@ commands: optional: true stability: unstable $_originalQuery: - description: "The original write query. This is used for findAndModify without shard - key during the write phase of the two phase protocol in order to make - sure the shard key query analysis stores the correct client + description: "The original write query. This is used for findAndModify without shard + key during the write phase of the two phase protocol in order to make + sure the shard key query analysis stores the correct client query." type: object optional: true cpp_name: originalQuery stability: internal $_originalCollation: - description: "The original collation. This is used for findAndModify without shard - key during the write phase of the two phase protocol in order to make - sure the shard key query analysis stores the correct client + description: "The original collation. This is used for findAndModify without shard + key during the write phase of the two phase protocol in order to make + sure the shard key query analysis stores the correct client collation." type: object optional: true cpp_name: originalCollation stability: internal $_allowShardKeyUpdatesWithoutFullShardKeyInQuery: - description: "Set to true if shard key updates are allowed without the full shard + description: "Set to true if shard key updates are allowed without the full shard key in the query." type: optionalBool cpp_name: allowShardKeyUpdatesWithoutFullShardKeyInQuery stability: internal - + isTimeseriesNamespace: + description: "This flag is set to true when the write command was originally sent + to the time-series view, but got rewritten to target time-series + buckets namespace." + type: optionalBool + stability: internal diff --git a/src/mongo/db/ops/write_ops_document_stream_integration_test.cpp b/src/mongo/db/ops/write_ops_document_stream_integration_test.cpp index dbfb2d3889eae..e0f32f17c9364 100644 --- a/src/mongo/db/ops/write_ops_document_stream_integration_test.cpp +++ b/src/mongo/db/ops/write_ops_document_stream_integration_test.cpp @@ -27,14 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" #include "mongo/client/dbclient_base.h" -#include "mongo/db/commands.h" #include "mongo/db/namespace_string.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/protocol.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/integration_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -48,7 +63,7 @@ TEST(WriteOpsDocSeq, InsertDocStreamWorks) { ASSERT_EQ(conn->count(ns), 0u); OpMsgRequest request; - request.body = BSON("insert" << ns.coll() << "$db" << ns.db()); + request.body = BSON("insert" << ns.coll() << "$db" << ns.db_forTest()); request.sequences = {{"documents", { BSON("_id" << 1), diff --git a/src/mongo/db/ops/write_ops_exec.cpp b/src/mongo/db/ops/write_ops_exec.cpp index 8fae667184d2e..be0e819a4591e 100644 --- a/src/mongo/db/ops/write_ops_exec.cpp +++ b/src/mongo/db/ops/write_ops_exec.cpp @@ -28,79 +28,152 @@ */ #include "mongo/db/ops/write_ops_exec.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/counter.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonelement_comparator.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_options.h" -#include "mongo/db/catalog/collection_uuid_mismatch.h" #include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/catalog/collection_yield_restore.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" #include "mongo/db/curop_failpoint_helpers.h" #include "mongo/db/curop_metrics.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/dbhelpers.h" +#include "mongo/db/database_name.h" +#include "mongo/db/db_raii.h" #include "mongo/db/error_labels.h" -#include "mongo/db/exec/delete_stage.h" -#include "mongo/db/exec/update_stage.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/introspect.h" -#include "mongo/db/matcher/extensions_callback_real.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/not_primary_error_tracker.h" #include "mongo/db/ops/delete_request_gen.h" #include "mongo/db/ops/insert.h" #include "mongo/db/ops/parsed_delete.h" #include "mongo/db/ops/parsed_update.h" +#include "mongo/db/ops/parsed_writes_common.h" #include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/write_ops.h" #include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/ops/write_ops_retryability.h" -#include "mongo/db/pipeline/aggregate_command_gen.h" -#include "mongo/db/pipeline/expression_context.h" -#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/collection_query_info.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/get_executor.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_explainer.h" #include "mongo/db/query/plan_summary_stats.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/record_id_helpers.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_conflict_info.h" #include "mongo/db/repl/tenant_migration_decoration.h" +#include "mongo/db/resource_yielder.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/query_analysis_writer.h" +#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_role.h" #include "mongo/db/stats/counters.h" +#include "mongo/db/stats/resource_consumption_metrics.h" #include "mongo/db/stats/server_write_concern_metrics.h" #include "mongo/db/stats/top.h" #include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" -#include "mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_identifiers.h" +#include "mongo/db/timeseries/bucket_catalog/closed_bucket.h" #include "mongo/db/timeseries/bucket_catalog/write_batch.h" #include "mongo/db/timeseries/bucket_compression.h" -#include "mongo/db/timeseries/timeseries_constants.h" -#include "mongo/db/timeseries/timeseries_extended_range.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" #include "mongo/db/timeseries/timeseries_options.h" #include "mongo/db/timeseries/timeseries_stats.h" #include "mongo/db/timeseries/timeseries_update_delete_util.h" #include "mongo/db/timeseries/timeseries_write_util.h" #include "mongo/db/transaction/retryable_writes_stats.h" +#include "mongo/db/transaction/transaction_api.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/transaction/transaction_participant_resource_yielder.h" +#include "mongo/db/transaction_resources.h" #include "mongo/db/update/document_diff_applier.h" #include "mongo/db/update/path_support.h" #include "mongo/db/update/update_oplog_entry_serialization.h" -#include "mongo/db/write_concern.h" +#include "mongo/executor/inline_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/message.h" +#include "mongo/s/analyze_shard_key_common_gen.h" +#include "mongo/s/analyze_shard_key_role.h" #include "mongo/s/query_analysis_sampler_util.h" +#include "mongo/s/type_collection_common_types_gen.h" #include "mongo/s/would_change_owning_shard_exception.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/s/write_ops/batched_upsert_detail.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" #include "mongo/util/log_and_backoff.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite @@ -113,7 +186,6 @@ template <> struct BSONObjAppendFormat : FormatKind {}; } // namespace mongo - namespace mongo::write_ops_exec { /** @@ -170,11 +242,9 @@ MONGO_FAIL_POINT_DEFINE(hangWithLockDuringBatchUpdate); MONGO_FAIL_POINT_DEFINE(hangWithLockDuringBatchRemove); MONGO_FAIL_POINT_DEFINE(failAtomicTimeseriesWrites); MONGO_FAIL_POINT_DEFINE(hangTimeseriesInsertBeforeCommit); -MONGO_FAIL_POINT_DEFINE(hangTimeseriesInsertBeforeReopeningQuery); MONGO_FAIL_POINT_DEFINE(hangTimeseriesInsertBeforeWrite); MONGO_FAIL_POINT_DEFINE(failUnorderedTimeseriesInsert); - /** * Metrics group for the `updateMany` and `deleteMany` operations. For each * operation, the `duration` and `numDocs` will contribute to aggregated total @@ -227,7 +297,7 @@ void finishCurOp(OperationContext* opCtx, CurOp* curOp) { recordCurOpMetrics(opCtx); Top::get(opCtx->getServiceContext()) .record(opCtx, - curOp->getNS(), + curOp->getNSS(), curOp->getLogicalOp(), Top::LockType::WriteLocked, durationCount(curOp->elapsedTimeExcludingPauses()), @@ -269,7 +339,7 @@ void finishCurOp(OperationContext* opCtx, CurOp* curOp) { } void makeCollection(OperationContext* opCtx, const NamespaceString& ns) { - writeConflictRetry(opCtx, "implicit collection creation", ns.ns(), [&opCtx, &ns] { + writeConflictRetry(opCtx, "implicit collection creation", ns, [&opCtx, &ns] { AutoGetDb autoDb(opCtx, ns.dbName(), MODE_IX); Lock::CollectionLock collLock(opCtx, ns, MODE_IX); @@ -293,11 +363,11 @@ void makeCollection(OperationContext* opCtx, const NamespaceString& ns) { }); } -void insertDocuments(OperationContext* opCtx, - const CollectionPtr& collection, - std::vector::iterator begin, - std::vector::iterator end, - bool fromMigrate) { +void insertDocumentsAtomically(OperationContext* opCtx, + const CollectionAcquisition& collection, + std::vector::iterator begin, + std::vector::iterator end, + bool fromMigrate) { // Intentionally not using writeConflictRetry. That is handled by the caller so it can react to // oversized batches. WriteUnitOfWork wuow(opCtx); @@ -312,7 +382,7 @@ void insertDocuments(OperationContext* opCtx, auto replCoord = repl::ReplicationCoordinator::get(opCtx); auto inTransaction = opCtx->inMultiDocumentTransaction(); - if (!inTransaction && !replCoord->isOplogDisabledFor(opCtx, collection->ns())) { + if (!inTransaction && !replCoord->isOplogDisabledFor(opCtx, collection.nss())) { // Populate 'slots' with new optimes for each insert. // This also notifies the storage engine of each new timestamp. auto oplogSlots = repl::getNextOpTimes(opCtx, batchSize); @@ -333,12 +403,16 @@ void insertDocuments(OperationContext* opCtx, }, [&](const BSONObj& data) { // Check if the failpoint specifies no collection or matches the existing one. - const auto collElem = data["collectionNS"]; - return !collElem || collection->ns().ns() == collElem.str(); + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "collectionNS"); + return fpNss.isEmpty() || collection.nss() == fpNss; }); - uassertStatusOK(collection_internal::insertDocuments( - opCtx, collection, begin, end, &CurOp::get(opCtx)->debug(), fromMigrate)); + uassertStatusOK(collection_internal::insertDocuments(opCtx, + collection.getCollectionPtr(), + begin, + end, + &CurOp::get(opCtx)->debug(), + fromMigrate)); wuow.commit(); } @@ -356,7 +430,7 @@ void insertDocuments(OperationContext* opCtx, Status checkIfTransactionOnCappedColl(OperationContext* opCtx, const CollectionPtr& collection) { if (opCtx->inMultiDocumentTransaction() && collection->isCapped()) { return {ErrorCodes::OperationNotSupportedInTransaction, - str::stream() << "Collection '" << collection->ns() + str::stream() << "Collection '" << collection->ns().toStringForErrorMsg() << "' is a capped collection. Writes in transactions are not allowed " "on capped collections."}; } @@ -366,7 +440,7 @@ Status checkIfTransactionOnCappedColl(OperationContext* opCtx, const CollectionP void assertTimeseriesBucketsCollectionNotFound(const NamespaceString& ns) { uasserted(ErrorCodes::NamespaceNotFound, str::stream() << "Buckets collection not found for time-series collection " - << ns.getTimeseriesViewNamespace()); + << ns.getTimeseriesViewNamespace().toStringForErrorMsg()); } template @@ -387,9 +461,10 @@ SingleWriteResult makeWriteResultForInsertOrDeleteRetry() { // perform. First item in the tuple determines whether to bypass document validation altogether, // second item determines if _safeContent_ array can be modified in an encrypted collection. std::tuple getDocumentValidationFlags(OperationContext* opCtx, - const write_ops::WriteCommandRequestBase& req) { + const write_ops::WriteCommandRequestBase& req, + const boost::optional& tenantId) { auto& encryptionInfo = req.getEncryptionInformation(); - const bool fleCrudProcessed = getFleCrudProcessed(opCtx, encryptionInfo); + const bool fleCrudProcessed = getFleCrudProcessed(opCtx, encryptionInfo, tenantId); return std::make_tuple(req.getBypassDocumentValidation(), fleCrudProcessed); } } // namespace @@ -489,13 +564,14 @@ bool handleError(OperationContext* opCtx, } bool getFleCrudProcessed(OperationContext* opCtx, - const boost::optional& encryptionInfo) { + const boost::optional& encryptionInfo, + const boost::optional& tenantId) { if (encryptionInfo && encryptionInfo->getCrudProcessed().value_or(false)) { uassert(6666201, "External users cannot have crudProcessed enabled", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(tenantId), ActionType::internal)); return true; } @@ -536,14 +612,15 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx, uasserted(ErrorCodes::InternalError, "failAllInserts failpoint active!"); } - boost::optional collection; + boost::optional collection; auto acquireCollection = [&] { while (true) { - collection.emplace(opCtx, - nss, - fixLockModeForSystemDotViewsChanges(nss, MODE_IX), - AutoGetCollection::Options{}.expectedUUID(collectionUUID)); - if (*collection) { + collection.emplace(mongo::acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, nss, AcquisitionPrerequisites::kWrite, collectionUUID), + fixLockModeForSystemDotViewsChanges(nss, MODE_IX))); + if (collection->exists()) { break; } @@ -586,15 +663,15 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx, if (shouldProceedWithBatchInsert) { try { - if (!collection->getCollection()->isCapped() && !inTxn && batch.size() > 1) { + if (!collection->getCollectionPtr()->isCapped() && !inTxn && batch.size() > 1) { // First try doing it all together. If all goes well, this is all we need to do. // See Collection::_insertDocuments for why we do all capped inserts one-at-a-time. lastOpFixer->startingOp(nss); - insertDocuments(opCtx, - collection->getCollection(), - batch.begin(), - batch.end(), - source == OperationSource::kFromMigrate); + insertDocumentsAtomically(opCtx, + *collection, + batch.begin(), + batch.end(), + source == OperationSource::kFromMigrate); lastOpFixer->finishedOpSuccessfully(); globalOpCounters.gotInserts(batch.size()); ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForInserts( @@ -622,19 +699,16 @@ bool insertBatchAndHandleErrors(OperationContext* opCtx, ServerWriteConcernMetrics::get(opCtx)->recordWriteConcernForInsert( opCtx->getWriteConcern()); try { - writeConflictRetry(opCtx, "insert", nss.ns(), [&] { + writeConflictRetry(opCtx, "insert", nss, [&] { try { if (!collection) acquireCollection(); // Transactions are not allowed to operate on capped collections. uassertStatusOK( - checkIfTransactionOnCappedColl(opCtx, collection->getCollection())); + checkIfTransactionOnCappedColl(opCtx, collection->getCollectionPtr())); lastOpFixer->startingOp(nss); - insertDocuments(opCtx, - collection->getCollection(), - it, - it + 1, - source == OperationSource::kFromMigrate); + insertDocumentsAtomically( + opCtx, *collection, it, it + 1, source == OperationSource::kFromMigrate); lastOpFixer->finishedOpSuccessfully(); SingleWriteResult result; result.setN(1); @@ -673,11 +747,13 @@ boost::optional advanceExecutor(OperationContext* opCtx, PlanExecutor::ExecState state; try { state = exec->getNext(&value, nullptr); + } catch (const WriteConflictException&) { + // Propagate the WCE to be retried at a higher-level without logging. + throw; } catch (DBException& exception) { auto&& explainer = exec->getPlanExplainer(); auto&& [stats, _] = explainer.getWinningPlanStats(ExplainOptions::Verbosity::kExecStats); LOGV2_WARNING(7267501, - "Plan executor error during findAndModify: {error}, stats: {stats}", "Plan executor error during findAndModify", "error"_attr = exception.toStatus(), "stats"_attr = redact(stats)); @@ -695,69 +771,75 @@ boost::optional advanceExecutor(OperationContext* opCtx, } UpdateResult writeConflictRetryUpsert(OperationContext* opCtx, - const NamespaceString& nsString, + const NamespaceString& nss, CurOp* curOp, OpDebug* opDebug, bool inTransaction, bool remove, bool upsert, boost::optional& docFound, - ParsedUpdate* parsedUpdate) { - AutoGetCollection autoColl(opCtx, nsString, MODE_IX); - Database* db = autoColl.ensureDbExists(opCtx); + const UpdateRequest& updateRequest) { + auto [isTimeseriesUpdate, nsString] = timeseries::isTimeseries(opCtx, updateRequest); + // TODO SERVER-76583: Remove this check. + uassert(7314600, + "Retryable findAndModify on a timeseries is not supported", + !isTimeseriesUpdate || !opCtx->isRetryableWrite()); + + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, nsString, AcquisitionPrerequisites::kWrite), + MODE_IX); + auto dbName = nsString.dbName(); + Database* db = [&]() { + AutoGetDb autoDb(opCtx, dbName, MODE_IX); + return autoDb.ensureDbExists(opCtx); + }(); { stdx::lock_guard lk(*opCtx->getClient()); CurOp::get(opCtx)->enter_inlock( - nsString, CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(nsString.dbName())); + nsString, CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(dbName)); } assertCanWrite_inlock(opCtx, nsString); - CollectionPtr createdCollection; - const CollectionPtr* collectionPtr = &autoColl.getCollection(); - // TODO SERVER-50983: Create abstraction for creating collection when using // AutoGetCollection Create the collection if it does not exist when performing an upsert // because the update stage does not create its own collection - if (!*collectionPtr && upsert) { - assertCanWrite_inlock(opCtx, nsString); - - createdCollection = CollectionPtr( - CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nsString)); - - // If someone else beat us to creating the collection, do nothing - if (!createdCollection) { - uassertStatusOK(userAllowedCreateNS(opCtx, nsString)); - OperationShardingState::ScopedAllowImplicitCollectionCreate_UNSAFE - unsafeCreateCollection(opCtx); - WriteUnitOfWork wuow(opCtx); - CollectionOptions defaultCollectionOptions; - uassertStatusOK(db->userCreateNS(opCtx, nsString, defaultCollectionOptions)); - wuow.commit(); - - createdCollection = CollectionPtr( - CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nsString)); - } - - invariant(createdCollection); - createdCollection.makeYieldable(opCtx, - LockedCollectionYieldRestore(opCtx, createdCollection)); - collectionPtr = &createdCollection; - } - const auto& collection = *collectionPtr; - - if (collection && collection->isCapped()) { + if (!collection.exists() && upsert) { + CollectionWriter collectionWriter(opCtx, &collection); + uassertStatusOK(userAllowedCreateNS(opCtx, nsString)); + OperationShardingState::ScopedAllowImplicitCollectionCreate_UNSAFE unsafeCreateCollection( + opCtx); + WriteUnitOfWork wuow(opCtx); + ScopedLocalCatalogWriteFence scopedLocalCatalogWriteFence(opCtx, &collection); + CollectionOptions defaultCollectionOptions; + uassertStatusOK(db->userCreateNS(opCtx, nsString, defaultCollectionOptions)); + wuow.commit(); + } + + if (collection.exists() && collection.getCollectionPtr()->isCapped()) { uassert( ErrorCodes::OperationNotSupportedInTransaction, - str::stream() << "Collection '" << collection->ns() + str::stream() << "Collection '" << collection.nss().toStringForErrorMsg() << "' is a capped collection. Writes in transactions are not allowed on " "capped collections.", !inTransaction); } + if (isTimeseriesUpdate) { + timeseries::assertTimeseriesBucketsCollection(collection.getCollectionPtr().get()); + } + + ParsedUpdate parsedUpdate(opCtx, + &updateRequest, + collection.getCollectionPtr(), + false /*forgoOpCounterIncrements*/, + isTimeseriesUpdate); + uassertStatusOK(parsedUpdate.parseRequest()); + const auto exec = uassertStatusOK( - getExecutorUpdate(opDebug, &collection, parsedUpdate, boost::none /* verbosity */)); + getExecutorUpdate(opDebug, collection, &parsedUpdate, boost::none /* verbosity */)); { stdx::lock_guard lk(*opCtx->getClient()); @@ -772,8 +854,9 @@ UpdateResult writeConflictRetryUpsert(OperationContext* opCtx, PlanSummaryStats summaryStats; auto&& explainer = exec->getPlanExplainer(); explainer.getSummaryStats(&summaryStats); - if (collection) { - CollectionQueryInfo::get(collection).notifyOfQuery(opCtx, collection, summaryStats); + if (collection.exists()) { + CollectionQueryInfo::get(collection.getCollectionPtr()) + .notifyOfQuery(opCtx, collection.getCollectionPtr(), summaryStats); } auto updateResult = exec->getUpdateResult(); write_ops_exec::recordUpdateResultInOpDebug(updateResult, opDebug); @@ -801,20 +884,27 @@ UpdateResult writeConflictRetryUpsert(OperationContext* opCtx, } long long writeConflictRetryRemove(OperationContext* opCtx, - const NamespaceString& nsString, - DeleteRequest* deleteRequest, + const NamespaceString& nss, + const DeleteRequest& deleteRequest, CurOp* curOp, OpDebug* opDebug, bool inTransaction, boost::optional& docFound) { + auto [isTimeseriesDelete, nsString] = timeseries::isTimeseries(opCtx, deleteRequest); + // TODO SERVER-76583: Remove this check. + uassert(7308305, + "Retryable findAndModify on a timeseries is not supported", + !isTimeseriesDelete || !opCtx->isRetryableWrite()); - invariant(deleteRequest); + const auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, nsString, AcquisitionPrerequisites::kWrite), + MODE_IX); + const auto& collectionPtr = collection.getCollectionPtr(); - ParsedDelete parsedDelete(opCtx, deleteRequest); + ParsedDelete parsedDelete(opCtx, &deleteRequest, collectionPtr, isTimeseriesDelete); uassertStatusOK(parsedDelete.parseRequest()); - AutoGetCollection collection(opCtx, nsString, MODE_IX); - { stdx::lock_guard lk(*opCtx->getClient()); CurOp::get(opCtx)->enter_inlock( @@ -823,17 +913,17 @@ long long writeConflictRetryRemove(OperationContext* opCtx, assertCanWrite_inlock(opCtx, nsString); - if (collection && collection->isCapped()) { + if (collectionPtr && collectionPtr->isCapped()) { uassert( ErrorCodes::OperationNotSupportedInTransaction, - str::stream() << "Collection '" << collection->ns() + str::stream() << "Collection '" << collection.nss().toStringForErrorMsg() << "' is a capped collection. Writes in transactions are not allowed on " "capped collections.", !inTransaction); } - const auto exec = uassertStatusOK(getExecutorDelete( - opDebug, &collection.getCollection(), &parsedDelete, boost::none /* verbosity */)); + const auto exec = uassertStatusOK( + getExecutorDelete(opDebug, collection, &parsedDelete, boost::none /* verbosity */)); { stdx::lock_guard lk(*opCtx->getClient()); @@ -847,13 +937,13 @@ long long writeConflictRetryRemove(OperationContext* opCtx, PlanSummaryStats summaryStats; exec->getPlanExplainer().getSummaryStats(&summaryStats); - if (const auto& coll = collection.getCollection()) { + if (const auto& coll = collectionPtr) { CollectionQueryInfo::get(coll).notifyOfQuery(opCtx, coll, summaryStats); } opDebug->setPlanSummaryMetrics(summaryStats); // Fill out OpDebug with the number of deleted docs. - auto nDeleted = exec->executeDelete(); + auto nDeleted = exec->getDeleteResult(); opDebug->additiveMetrics.ndeleted = nDeleted; if (curOp->shouldDBProfile()) { @@ -937,7 +1027,7 @@ WriteResult performInserts(OperationContext* opCtx, curOp.done(); Top::get(opCtx->getServiceContext()) .record(opCtx, - wholeOp.getNamespace().ns(), + wholeOp.getNamespace(), LogicalOp::opInsert, Top::LockType::WriteLocked, durationCount(curOp.elapsedTimeExcludingPauses()), @@ -959,8 +1049,8 @@ WriteResult performInserts(OperationContext* opCtx, uassertStatusOK(userAllowedWriteNS(opCtx, wholeOp.getNamespace())); } - const auto [disableDocumentValidation, fleCrudProcessed] = - getDocumentValidationFlags(opCtx, wholeOp.getWriteCommandRequestBase()); + const auto [disableDocumentValidation, fleCrudProcessed] = getDocumentValidationFlags( + opCtx, wholeOp.getWriteCommandRequestBase(), wholeOp.getDbName().tenantId()); DisableDocumentSchemaValidationIfTrue docSchemaValidationDisabler(opCtx, disableDocumentValidation); @@ -1094,78 +1184,78 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx, uasserted(ErrorCodes::InternalError, "failAllUpdates failpoint active!"); } - boost::optional collection; - while (true) { - collection.emplace(opCtx, - ns, - fixLockModeForSystemDotViewsChanges(ns, MODE_IX), - AutoGetCollection::Options{}.expectedUUID(opCollectionUUID)); - if (*collection) { - break; - } - - if (source == OperationSource::kTimeseriesInsert || - source == OperationSource::kTimeseriesUpdate) { - assertTimeseriesBucketsCollectionNotFound(ns); - } - - // If this is an upsert, which is an insert, we must have a collection. - // An update on a non-existent collection is okay and handled later. - if (!updateRequest->isUpsert()) - break; + const CollectionAcquisition collection = [&]() { + const auto acquisitionRequest = CollectionAcquisitionRequest::fromOpCtx( + opCtx, ns, AcquisitionPrerequisites::kWrite, opCollectionUUID); + while (true) { + { + auto acquisition = acquireCollection( + opCtx, acquisitionRequest, fixLockModeForSystemDotViewsChanges(ns, MODE_IX)); + if (acquisition.exists()) { + return acquisition; + } - collection.reset(); // unlock. - makeCollection(opCtx, ns); - } + if (source == OperationSource::kTimeseriesInsert || + source == OperationSource::kTimeseriesUpdate) { + assertTimeseriesBucketsCollectionNotFound(ns); + } - UpdateStageParams::DocumentCounter documentCounter = nullptr; + // If this is an upsert, which is an insert, we must have a collection. + // An update on a non-existent collection is okay and handled later. + if (!updateRequest->isUpsert()) { + // Inexistent collection. + return acquisition; + } + } + makeCollection(opCtx, ns); + } + }(); if (source == OperationSource::kTimeseriesUpdate) { - uassert(ErrorCodes::NamespaceNotFound, - "Could not find time-series buckets collection for update", - collection); - - auto timeseriesOptions = collection->getCollection()->getTimeseriesOptions(); - uassert(ErrorCodes::InvalidOptions, - "Time-series buckets collection is missing time-series options", - timeseriesOptions); - - auto metaField = timeseriesOptions->getMetaField(); - uassert( - ErrorCodes::InvalidOptions, - "Cannot perform an update on a time-series collection that does not have a metaField", - metaField); - - uassert(ErrorCodes::InvalidOptions, - "Cannot perform a non-multi update on a time-series collection", - updateRequest->isMulti()); - - uassert(ErrorCodes::InvalidOptions, - "Cannot perform an upsert on a time-series collection", - !updateRequest->isUpsert()); + timeseries::assertTimeseriesBucketsCollection(collection.getCollectionPtr().get()); // Only translate the hint if it is specified with an index key. + auto timeseriesOptions = collection.getCollectionPtr()->getTimeseriesOptions(); if (timeseries::isHintIndexKey(updateRequest->getHint())) { updateRequest->setHint( uassertStatusOK(timeseries::createBucketsIndexSpecFromTimeseriesIndexSpec( *timeseriesOptions, updateRequest->getHint()))); } - updateRequest->setQuery(timeseries::translateQuery(updateRequest->getQuery(), *metaField)); - updateRequest->setUpdateModification( - timeseries::translateUpdate(updateRequest->getUpdateModification(), *metaField)); + if (!feature_flags::gTimeseriesUpdatesSupport.isEnabled( + serverGlobalParams.featureCompatibility)) { + uassert(ErrorCodes::InvalidOptions, + "Cannot perform a non-multi update on a time-series collection", + updateRequest->isMulti()); + + uassert(ErrorCodes::InvalidOptions, + "Cannot perform an upsert on a time-series collection", + !updateRequest->isUpsert()); + + auto metaField = timeseriesOptions->getMetaField(); + uassert(ErrorCodes::InvalidOptions, + "Cannot perform an update on a time-series collection that does not have a " + "metaField", + timeseriesOptions->getMetaField()); - documentCounter = - timeseries::numMeasurementsForBucketCounter(timeseriesOptions->getTimeField()); + updateRequest->setQuery( + timeseries::translateQuery(updateRequest->getQuery(), *metaField)); + auto modification = uassertStatusOK( + timeseries::translateUpdate(updateRequest->getUpdateModification(), *metaField)); + updateRequest->setUpdateModification(modification); + } } - if (const auto& coll = collection->getCollection()) { + if (const auto& coll = collection.getCollectionPtr()) { // Transactions are not allowed to operate on capped collections. uassertStatusOK(checkIfTransactionOnCappedColl(opCtx, coll)); } - const ExtensionsCallbackReal extensionsCallback(opCtx, &updateRequest->getNamespaceString()); - ParsedUpdate parsedUpdate(opCtx, updateRequest, extensionsCallback, forgoOpCounterIncrements); + ParsedUpdate parsedUpdate(opCtx, + updateRequest, + collection.getCollectionPtr(), + forgoOpCounterIncrements, + updateRequest->source() == OperationSource::kTimeseriesUpdate); uassertStatusOK(parsedUpdate.parseRequest()); CurOpFailpointHelpers::waitWhileFailPointEnabled( @@ -1173,7 +1263,7 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx, auto& curOp = *CurOp::get(opCtx); - if (collection->getDb()) { + if (DatabaseHolder::get(opCtx)->getDb(opCtx, ns.dbName())) { curOp.raiseDbProfileLevel( CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(ns.dbName())); } @@ -1181,11 +1271,7 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx, assertCanWrite_inlock(opCtx, ns); auto exec = uassertStatusOK( - getExecutorUpdate(&curOp.debug(), - collection ? &collection->getCollection() : &CollectionPtr::null, - &parsedUpdate, - boost::none /* verbosity */, - std::move(documentCounter))); + getExecutorUpdate(&curOp.debug(), collection, &parsedUpdate, boost::none /* verbosity */)); { stdx::lock_guard lk(*opCtx->getClient()); @@ -1197,7 +1283,7 @@ static SingleWriteResult performSingleUpdateOp(OperationContext* opCtx, PlanSummaryStats summary; auto&& explainer = exec->getPlanExplainer(); explainer.getSummaryStats(&summary); - if (const auto& coll = collection->getCollection()) { + if (const auto& coll = collection.getCollectionPtr()) { CollectionQueryInfo::get(coll).notifyOfQuery(opCtx, coll, summary); } @@ -1269,9 +1355,7 @@ static SingleWriteResult performSingleUpdateOpWithDupKeyRetry( request.setLetParameters(std::move(letParams)); } request.setStmtIds(stmtIds); - request.setYieldPolicy(opCtx->inMultiDocumentTransaction() - ? PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY - : PlanYieldPolicy::YieldPolicy::YIELD_AUTO); + request.setYieldPolicy(PlanYieldPolicy::YieldPolicy::YIELD_AUTO); request.setSource(source); if (sampleId) { request.setSampleId(sampleId); @@ -1283,13 +1367,13 @@ static SingleWriteResult performSingleUpdateOpWithDupKeyRetry( try { bool containsDotsAndDollarsField = false; - const auto ret = performSingleUpdateOp(opCtx, - ns, - opCollectionUUID, - &request, - source, - &containsDotsAndDollarsField, - forgoOpCounterIncrements); + auto ret = performSingleUpdateOp(opCtx, + ns, + opCollectionUUID, + &request, + source, + &containsDotsAndDollarsField, + forgoOpCounterIncrements); if (containsDotsAndDollarsField) { // If it's an upsert, increment 'inserts' metric, otherwise increment 'updates'. @@ -1298,16 +1382,10 @@ static SingleWriteResult performSingleUpdateOpWithDupKeyRetry( return ret; } catch (ExceptionFor& ex) { - const ExtensionsCallbackReal extensionsCallback(opCtx, &request.getNamespaceString()); - ParsedUpdate parsedUpdate(opCtx, &request, extensionsCallback); - uassertStatusOK(parsedUpdate.parseRequest()); - - if (!parsedUpdate.hasParsedQuery()) { - uassertStatusOK(parsedUpdate.parseQueryToCQ()); - } + auto cq = uassertStatusOK(parseWriteQueryToCQ(opCtx, nullptr /* expCtx */, request)); - if (!shouldRetryDuplicateKeyException(parsedUpdate, - *ex.extraInfo())) { + if (!write_ops_exec::shouldRetryDuplicateKeyException( + request, *cq, *ex.extraInfo())) { throw; } @@ -1323,11 +1401,75 @@ static SingleWriteResult performSingleUpdateOpWithDupKeyRetry( MONGO_UNREACHABLE; } +void runTimeseriesRetryableUpdates(OperationContext* opCtx, + const NamespaceString& bucketNs, + const write_ops::UpdateCommandRequest& wholeOp, + std::shared_ptr executor, + write_ops_exec::WriteResult* reply) { + ON_BLOCK_EXIT([&] { + // Increments the counter if the command contains retries. + if (!reply->retriedStmtIds.empty()) { + RetryableWritesStats::get(opCtx)->incrementRetriedCommandsCount(); + } + }); + size_t nextOpIndex = 0; + for (auto&& singleOp : wholeOp.getUpdates()) { + auto singleUpdateOp = timeseries::buildSingleUpdateOp(wholeOp, nextOpIndex); + const auto stmtId = write_ops::getStmtIdForWriteAt(wholeOp, nextOpIndex++); + + auto inlineExecutor = std::make_shared(); + txn_api::SyncTransactionWithRetries txn( + opCtx, executor, TransactionParticipantResourceYielder::make("update"), inlineExecutor); + + auto swResult = txn.runNoThrow( + opCtx, + [&singleUpdateOp, stmtId, &reply](const txn_api::TransactionClient& txnClient, + ExecutorPtr txnExec) { + auto updateResponse = txnClient.runCRUDOpSync(singleUpdateOp, {stmtId}); + // Propagates the write results from executing the statement to the current + // command's results. + SingleWriteResult singleReply; + singleReply.setN(updateResponse.getN()); + singleReply.setNModified(updateResponse.getNModified()); + if (updateResponse.isUpsertDetailsSet()) { + invariant(updateResponse.sizeUpsertDetails() == 1); + singleReply.setUpsertedId( + updateResponse.getUpsertDetailsAt(0)->getUpsertedID()); + } + if (updateResponse.areRetriedStmtIdsSet()) { + invariant(updateResponse.getRetriedStmtIds().size() == 1); + reply->retriedStmtIds.push_back(updateResponse.getRetriedStmtIds()[0]); + } + reply->results.push_back(singleReply); + return SemiFuture::makeReady(); + }); + try { + // Rethrows the error from the command or the internal transaction api to handle them + // accordingly. + uassertStatusOK(swResult); + uassertStatusOK(swResult.getValue().getEffectiveStatus()); + } catch (const DBException& ex) { + reply->canContinue = handleError(opCtx, + ex, + bucketNs, + wholeOp.getOrdered(), + singleOp.getMulti(), + singleOp.getSampleId(), + reply); + if (!reply->canContinue) { + break; + } + } + } +} + WriteResult performUpdates(OperationContext* opCtx, const write_ops::UpdateCommandRequest& wholeOp, OperationSource source) { auto ns = wholeOp.getNamespace(); + NamespaceString originalNs; if (source == OperationSource::kTimeseriesUpdate && !ns.isTimeseriesBucketsCollection()) { + originalNs = ns; ns = ns.makeTimeseriesBucketsNamespace(); } @@ -1338,8 +1480,8 @@ WriteResult performUpdates(OperationContext* opCtx, (txnParticipant && opCtx->inMultiDocumentTransaction())); uassertStatusOK(userAllowedWriteNS(opCtx, ns)); - const auto [disableDocumentValidation, fleCrudProcessed] = - getDocumentValidationFlags(opCtx, wholeOp.getWriteCommandRequestBase()); + const auto [disableDocumentValidation, fleCrudProcessed] = getDocumentValidationFlags( + opCtx, wholeOp.getWriteCommandRequestBase(), wholeOp.getDbName().tenantId()); DisableDocumentSchemaValidationIfTrue docSchemaValidationDisabler(opCtx, disableDocumentValidation); @@ -1361,15 +1503,28 @@ WriteResult performUpdates(OperationContext* opCtx, const auto& runtimeConstants = wholeOp.getLegacyRuntimeConstants().value_or(Variables::generateRuntimeConstants(opCtx)); - // Increment operator counters only during the fisrt single update operation in a batch of + // Increment operator counters only during the first single update operation in a batch of // updates. bool forgoOpCounterIncrements = false; for (auto&& singleOp : wholeOp.getUpdates()) { + if (source == OperationSource::kTimeseriesUpdate) { + uassert(ErrorCodes::OperationNotSupportedInTransaction, + fmt::format( + "Cannot perform a multi update inside of a multi-document transaction on a " + "time-series collection: {}", + ns.toStringForErrorMsg()), + !opCtx->inMultiDocumentTransaction() || !singleOp.getMulti() || + opCtx->isRetryableWrite()); + } const auto currentOpIndex = nextOpIndex++; const auto stmtId = getStmtIdForWriteOp(opCtx, wholeOp, currentOpIndex); if (opCtx->isRetryableWrite()) { if (auto entry = txnParticipant.checkStatementExecuted(opCtx, stmtId)) { - containsRetry = true; + // For non-sharded user time-series updates, handles the metrics of the command at + // the caller since each statement will run as a command through the internal + // transaction API. + containsRetry = source != OperationSource::kTimeseriesUpdate || + originalNs.isTimeseriesBucketsCollection(); RetryableWritesStats::get(opCtx)->incrementRetriedStatementsCount(); out.results.emplace_back(parseOplogEntryForUpdate(*entry)); out.retriedStmtIds.push_back(stmtId); @@ -1396,7 +1551,7 @@ WriteResult performUpdates(OperationContext* opCtx, opCtx, ns, analyze_shard_key::SampledCommandNameEnum::kUpdate, singleOp); if (sampleId) { analyze_shard_key::QueryAnalysisWriter::get(opCtx) - ->addUpdateQuery(*sampleId, wholeOp, currentOpIndex) + ->addUpdateQuery(opCtx, *sampleId, wholeOp, currentOpIndex) .getAsync([](auto) {}); } @@ -1434,6 +1589,15 @@ WriteResult performUpdates(OperationContext* opCtx, collectMultiUpdateDeleteMetrics(timer->elapsed(), reply.getNModified()); } } catch (const DBException& ex) { + // Do not handle errors for time-series bucket compressions. They need to be transparent + // to users to not interfere with any decisions around operation retry. It is OK to + // leave bucket uncompressed in these edge cases. We just record the status to the + // result vector so we can keep track of statistics for failed bucket compressions. + if (source == OperationSource::kTimeseriesBucketCompression) { + out.results.emplace_back(ex.toStatus()); + break; + } + out.canContinue = handleError(opCtx, ex, ns, @@ -1483,9 +1647,7 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx, request.setQuery(op.getQ()); request.setCollation(write_ops::collationOf(op)); request.setMulti(op.getMulti()); - request.setYieldPolicy(opCtx->inMultiDocumentTransaction() - ? PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY - : PlanYieldPolicy::YieldPolicy::YIELD_AUTO); + request.setYieldPolicy(PlanYieldPolicy::YieldPolicy::YIELD_AUTO); request.setStmtId(stmtId); request.setHint(op.getHint()); @@ -1499,23 +1661,16 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx, uasserted(ErrorCodes::InternalError, "failAllRemoves failpoint active!"); } - AutoGetCollection collection(opCtx, - ns, - fixLockModeForSystemDotViewsChanges(ns, MODE_IX), - AutoGetCollection::Options{}.expectedUUID(opCollectionUUID)); - - DeleteStageParams::DocumentCounter documentCounter = nullptr; + auto acquisitionRequest = CollectionAcquisitionRequest::fromOpCtx( + opCtx, ns, AcquisitionPrerequisites::kWrite, opCollectionUUID); + const auto collection = acquireCollection( + opCtx, acquisitionRequest, fixLockModeForSystemDotViewsChanges(ns, MODE_IX)); if (source == OperationSource::kTimeseriesDelete) { - uassert(ErrorCodes::NamespaceNotFound, - "Could not find time-series buckets collection for write", - *collection); - auto timeseriesOptions = collection->getTimeseriesOptions(); - uassert(ErrorCodes::InvalidOptions, - "Time-series buckets collection is missing time-series options", - timeseriesOptions); + timeseries::assertTimeseriesBucketsCollection(collection.getCollectionPtr().get()); // Only translate the hint if it is specified by index key. + auto timeseriesOptions = collection.getCollectionPtr()->getTimeseriesOptions(); if (timeseries::isHintIndexKey(request.getHint())) { request.setHint( uassertStatusOK(timeseries::createBucketsIndexSpecFromTimeseriesIndexSpec( @@ -1537,19 +1692,15 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx, request.setQuery(timeseries::translateQuery(request.getQuery(), *metaField)); } } - - documentCounter = - timeseries::numMeasurementsForBucketCounter(timeseriesOptions->getTimeField()); } ParsedDelete parsedDelete(opCtx, &request, - source == OperationSource::kTimeseriesDelete && collection - ? collection->getTimeseriesOptions() - : boost::none); + collection.getCollectionPtr(), + source == OperationSource::kTimeseriesDelete); uassertStatusOK(parsedDelete.parseRequest()); - if (collection.getDb()) { + if (DatabaseHolder::get(opCtx)->getDb(opCtx, ns.dbName())) { curOp.raiseDbProfileLevel( CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(ns.dbName())); } @@ -1559,11 +1710,8 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx, CurOpFailpointHelpers::waitWhileFailPointEnabled( &hangWithLockDuringBatchRemove, opCtx, "hangWithLockDuringBatchRemove"); - auto exec = uassertStatusOK(getExecutorDelete(&curOp.debug(), - &collection.getCollection(), - &parsedDelete, - boost::none /* verbosity */, - std::move(documentCounter))); + auto exec = uassertStatusOK( + getExecutorDelete(&curOp.debug(), collection, &parsedDelete, boost::none /* verbosity */)); { stdx::lock_guard lk(*opCtx->getClient()); @@ -1576,7 +1724,7 @@ static SingleWriteResult performSingleDeleteOp(OperationContext* opCtx, PlanSummaryStats summary; auto&& explainer = exec->getPlanExplainer(); explainer.getSummaryStats(&summary); - if (const auto& coll = collection.getCollection()) { + if (const auto& coll = collection.getCollectionPtr()) { CollectionQueryInfo::get(coll).notifyOfQuery(opCtx, coll, summary); } curOp.debug().setPlanSummaryMetrics(summary); @@ -1606,8 +1754,8 @@ WriteResult performDeletes(OperationContext* opCtx, (txnParticipant && opCtx->inMultiDocumentTransaction())); uassertStatusOK(userAllowedWriteNS(opCtx, ns)); - const auto [disableDocumentValidation, fleCrudProcessed] = - getDocumentValidationFlags(opCtx, wholeOp.getWriteCommandRequestBase()); + const auto [disableDocumentValidation, fleCrudProcessed] = getDocumentValidationFlags( + opCtx, wholeOp.getWriteCommandRequestBase(), wholeOp.getDbName().tenantId()); DisableDocumentSchemaValidationIfTrue docSchemaValidationDisabler(opCtx, disableDocumentValidation); @@ -1634,7 +1782,7 @@ WriteResult performDeletes(OperationContext* opCtx, uassert(ErrorCodes::OperationNotSupportedInTransaction, str::stream() << "Cannot perform a multi delete inside of a multi-document " "transaction on a time-series collection: " - << ns, + << ns.toStringForErrorMsg(), !opCtx->inMultiDocumentTransaction() || !singleOp.getMulti()); } @@ -1670,7 +1818,7 @@ WriteResult performDeletes(OperationContext* opCtx, if (auto sampleId = analyze_shard_key::getOrGenerateSampleId( opCtx, ns, analyze_shard_key::SampledCommandNameEnum::kDelete, singleOp)) { analyze_shard_key::QueryAnalysisWriter::get(opCtx) - ->addDeleteQuery(*sampleId, wholeOp, currentOpIndex) + ->addDeleteQuery(opCtx, *sampleId, wholeOp, currentOpIndex) .getAsync([](auto) {}); } @@ -1734,8 +1882,11 @@ Status performAtomicTimeseriesWrites( LastOpFixer lastOpFixer(opCtx); lastOpFixer.startingOp(ns); - AutoGetCollection coll{opCtx, ns, MODE_IX}; - if (!coll) { + const auto coll = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, ns, AcquisitionPrerequisites::kWrite), + MODE_IX); + if (!coll.exists()) { assertTimeseriesBucketsCollectionNotFound(ns); } @@ -1776,7 +1927,7 @@ Status performAtomicTimeseriesWrites( if (!insertOps.empty()) { auto status = collection_internal::insertDocuments( - opCtx, *coll, inserts.begin(), inserts.end(), &curOp->debug()); + opCtx, coll.getCollectionPtr(), inserts.begin(), inserts.end(), &curOp->debug()); if (!status.isOK()) { return status; } @@ -1791,10 +1942,10 @@ Status performAtomicTimeseriesWrites( invariant(op.getUpdates().size() == 1); auto& update = op.getUpdates().front(); - invariant(coll->isClustered()); + invariant(coll.getCollectionPtr()->isClustered()); auto recordId = record_id_helpers::keyForOID(update.getQ()["_id"].OID()); - auto original = coll->docFor(opCtx, recordId); + auto original = coll.getCollectionPtr()->docFor(opCtx, recordId); CollectionUpdateArgs args{original.value()}; args.criteria = update.getQ(); @@ -1809,13 +1960,10 @@ Status performAtomicTimeseriesWrites( collection_internal::kUpdateAllIndexes; // Assume all indexes are affected. if (update.getU().type() == write_ops::UpdateModification::Type::kDelta) { diffFromUpdate = update.getU().getDiff(); - auto result = doc_diff::applyDiff(original.value(), - diffFromUpdate, - &CollectionQueryInfo::get(*coll).getIndexKeys(opCtx), - static_cast(repl::tenantMigrationInfo(opCtx))); - updated = result.postImage; - diffOnIndexes = - result.indexesAffected ? &diffFromUpdate : collection_internal::kUpdateNoIndexes; + updated = doc_diff::applyDiff(original.value(), + diffFromUpdate, + static_cast(repl::tenantMigrationInfo(opCtx))); + diffOnIndexes = &diffFromUpdate; args.update = update_oplog_entry::makeDeltaOplogEntry(diffFromUpdate); } else if (update.getU().type() == write_ops::UpdateModification::Type::kTransform) { const auto& transform = update.getU().getTransform(); @@ -1835,8 +1983,15 @@ Status performAtomicTimeseriesWrites( opCtx->recoveryUnit()->setTimestamp(args.oplogSlots[0].getTimestamp())); } - collection_internal::updateDocument( - opCtx, *coll, recordId, original, updated, diffOnIndexes, &curOp->debug(), &args); + collection_internal::updateDocument(opCtx, + coll.getCollectionPtr(), + recordId, + original, + updated, + diffOnIndexes, + nullptr /*indexesAffected*/, + &curOp->debug(), + &args); if (slot) { if (participant) { // Manually sets the timestamp so that the "prevOpTime" field in the oplog entry is @@ -1892,18 +2047,15 @@ bool matchContainsOnlyAndedEqualityNodes(const MatchExpression& root) { } } // namespace -bool shouldRetryDuplicateKeyException(const ParsedUpdate& parsedUpdate, +bool shouldRetryDuplicateKeyException(const UpdateRequest& updateRequest, + const CanonicalQuery& cq, const DuplicateKeyErrorInfo& errorInfo) { - invariant(parsedUpdate.hasParsedQuery()); - - const auto updateRequest = parsedUpdate.getRequest(); - // In order to be retryable, the update must be an upsert with multi:false. - if (!updateRequest->isUpsert() || updateRequest->isMulti()) { + if (!updateRequest.isUpsert() || updateRequest.isMulti()) { return false; } - auto matchExpr = parsedUpdate.getParsedQuery()->root(); + auto matchExpr = cq.root(); invariant(matchExpr); // In order to be retryable, the update query must contain no expressions other than AND and EQ. @@ -1960,11 +2112,6 @@ struct TimeseriesSingleWriteResult { bool canContinue = true; }; -enum struct TimeseriesAtomicWriteResult { - kSuccess, - kContinuableError, - kNonContinuableError, -}; /** * Returns true if the time-series write is retryable. */ @@ -1980,20 +2127,6 @@ bool isTimeseriesWriteRetryable(OperationContext* opCtx) { return true; } -void getOpTimeAndElectionId(OperationContext* opCtx, - boost::optional* opTime, - boost::optional* electionId) { - auto* replCoord = repl::ReplicationCoordinator::get(opCtx->getServiceContext()); - const auto replMode = replCoord->getReplicationMode(); - - *opTime = replMode != repl::ReplicationCoordinator::modeNone - ? boost::make_optional(repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp()) - : boost::none; - *electionId = replMode == repl::ReplicationCoordinator::modeReplSet - ? boost::make_optional(replCoord->getElectionId()) - : boost::none; -} - NamespaceString ns(const write_ops::InsertCommandRequest& request) { return request.getNamespace(); } @@ -2002,93 +2135,6 @@ NamespaceString makeTimeseriesBucketsNamespace(const NamespaceString& nss) { return nss.isTimeseriesBucketsCollection() ? nss : nss.makeTimeseriesBucketsNamespace(); } -/** - * Transforms a single time-series insert to an update request on an existing bucket. - */ -write_ops::UpdateOpEntry makeTimeseriesUpdateOpEntry( - OperationContext* opCtx, - std::shared_ptr batch, - const BSONObj& metadata) { - BSONObjBuilder updateBuilder; - { - if (!batch->min.isEmpty() || !batch->max.isEmpty()) { - BSONObjBuilder controlBuilder(updateBuilder.subobjStart( - str::stream() << doc_diff::kSubDiffSectionFieldPrefix << "control")); - if (!batch->min.isEmpty()) { - controlBuilder.append( - str::stream() << doc_diff::kSubDiffSectionFieldPrefix << "min", batch->min); - } - if (!batch->max.isEmpty()) { - controlBuilder.append( - str::stream() << doc_diff::kSubDiffSectionFieldPrefix << "max", batch->max); - } - } - } - { // doc_diff::kSubDiffSectionFieldPrefix + => {: ..., :} - StringDataMap dataFieldBuilders; - auto metadataElem = metadata.firstElement(); - DecimalCounter count(batch->numPreviouslyCommittedMeasurements); - for (const auto& doc : batch->measurements) { - for (const auto& elem : doc) { - auto key = elem.fieldNameStringData(); - if (metadataElem && key == metadataElem.fieldNameStringData()) { - continue; - } - auto& builder = dataFieldBuilders[key]; - builder.appendAs(elem, count); - } - ++count; - } - - // doc_diff::kSubDiffSectionFieldPrefix + - BSONObjBuilder dataBuilder(updateBuilder.subobjStart("sdata")); - BSONObjBuilder newDataFieldsBuilder; - for (auto& pair : dataFieldBuilders) { - // Existing 'data' fields with measurements require different treatment from fields - // not observed before (missing from control.min and control.max). - if (batch->newFieldNamesToBeInserted.count(pair.first)) { - newDataFieldsBuilder.append(pair.first, pair.second.obj()); - } - } - auto newDataFields = newDataFieldsBuilder.obj(); - if (!newDataFields.isEmpty()) { - dataBuilder.append(doc_diff::kInsertSectionFieldName, newDataFields); - } - for (auto& pair : dataFieldBuilders) { - // Existing 'data' fields with measurements require different treatment from fields - // not observed before (missing from control.min and control.max). - if (!batch->newFieldNamesToBeInserted.count(pair.first)) { - dataBuilder.append(doc_diff::kSubDiffSectionFieldPrefix + pair.first.toString(), - BSON(doc_diff::kInsertSectionFieldName << pair.second.obj())); - } - } - } - write_ops::UpdateModification::DiffOptions options; - options.mustCheckExistenceForInsertOperations = - static_cast(repl::tenantMigrationInfo(opCtx)); - write_ops::UpdateModification u( - updateBuilder.obj(), write_ops::UpdateModification::DeltaTag{}, options); - auto oid = batch->bucketHandle.bucketId.oid; - write_ops::UpdateOpEntry update(BSON("_id" << oid), std::move(u)); - invariant(!update.getMulti(), oid.toString()); - invariant(!update.getUpsert(), oid.toString()); - return update; -} - -/** - * Transforms a single time-series insert to an update request on an existing bucket. - */ -write_ops::UpdateOpEntry makeTimeseriesTransformationOpEntry( - OperationContext* opCtx, - const OID& bucketId, - write_ops::UpdateModification::TransformFunc transformationFunc) { - write_ops::UpdateModification u(std::move(transformationFunc)); - write_ops::UpdateOpEntry update(BSON("_id" << bucketId), std::move(u)); - invariant(!update.getMulti(), bucketId.toString()); - invariant(!update.getUpsert(), bucketId.toString()); - return update; -} - boost::optional> checkFailUnorderedTimeseriesInsertFailPoint( const BSONObj& metadata) { bool canContinue = true; @@ -2120,56 +2166,20 @@ TimeseriesSingleWriteResult getTimeseriesSingleWriteResult( write_ops_exec::WriteResult&& reply, const write_ops::InsertCommandRequest& request) { invariant(reply.results.size() == 1, str::stream() << "Unexpected number of results (" << reply.results.size() - << ") for insert on time-series collection " << ns(request)); + << ") for insert on time-series collection " + << ns(request).toStringForErrorMsg()); return {std::move(reply.results[0]), reply.canContinue}; } -write_ops::WriteCommandRequestBase makeTimeseriesWriteOpBase(std::vector&& stmtIds) { - write_ops::WriteCommandRequestBase base; - - // The schema validation configured in the bucket collection is intended for direct - // operations by end users and is not applicable here. - base.setBypassDocumentValidation(true); - - if (!stmtIds.empty()) { - base.setStmtIds(std::move(stmtIds)); - } - - return base; -} - -write_ops::InsertCommandRequest makeTimeseriesInsertOp( - std::shared_ptr batch, - const BSONObj& metadata, - std::vector&& stmtIds, - const write_ops::InsertCommandRequest& request) { - write_ops::InsertCommandRequest op{makeTimeseriesBucketsNamespace(ns(request)), - {timeseries::makeNewDocumentForWrite(batch, metadata)}}; - op.setWriteCommandRequestBase(makeTimeseriesWriteOpBase(std::move(stmtIds))); - return op; -} - -write_ops::UpdateCommandRequest makeTimeseriesUpdateOp( - OperationContext* opCtx, - std::shared_ptr batch, - const BSONObj& metadata, - std::vector&& stmtIds, - const write_ops::InsertCommandRequest& request) { - write_ops::UpdateCommandRequest op(makeTimeseriesBucketsNamespace(ns(request)), - {makeTimeseriesUpdateOpEntry(opCtx, batch, metadata)}); - op.setWriteCommandRequestBase(makeTimeseriesWriteOpBase(std::move(stmtIds))); - return op; -} - write_ops::UpdateCommandRequest makeTimeseriesTransformationOp( OperationContext* opCtx, const OID& bucketId, write_ops::UpdateModification::TransformFunc transformationFunc, const write_ops::InsertCommandRequest& request) { - write_ops::UpdateCommandRequest op( - makeTimeseriesBucketsNamespace(ns(request)), - {makeTimeseriesTransformationOpEntry(opCtx, bucketId, std::move(transformationFunc))}); + write_ops::UpdateCommandRequest op(makeTimeseriesBucketsNamespace(ns(request)), + {timeseries::makeTimeseriesTransformationOpEntry( + opCtx, bucketId, std::move(transformationFunc))}); write_ops::WriteCommandRequestBase base; // The schema validation configured in the bucket collection is intended for direct @@ -2199,7 +2209,8 @@ TimeseriesSingleWriteResult performTimeseriesInsert( return getTimeseriesSingleWriteResult( write_ops_exec::performInserts( opCtx, - makeTimeseriesInsertOp(batch, metadata, std::move(stmtIds), request), + timeseries::makeTimeseriesInsertOp( + batch, makeTimeseriesBucketsNamespace(ns(request)), metadata, std::move(stmtIds)), OperationSource::kTimeseriesInsert), request); } @@ -2220,13 +2231,21 @@ TimeseriesSingleWriteResult performTimeseriesUpdate( write_ops_exec::performUpdates(opCtx, op, OperationSource::kTimeseriesInsert), request); } -TimeseriesSingleWriteResult performTimeseriesBucketCompression( +/** + * Attempts to perform bucket compression on time-series bucket. It will surpress any error caused + * by the write and silently leave the bucket uncompressed when any type of error is encountered. + */ +void tryPerformTimeseriesBucketCompression( OperationContext* opCtx, const timeseries::bucket_catalog::ClosedBucket& closedBucket, const write_ops::InsertCommandRequest& request) { + // When enabled, we skip constructing ClosedBuckets which results in skipping compression. + invariant(!feature_flags::gTimeseriesAlwaysUseCompressedBuckets.isEnabled( + serverGlobalParams.featureCompatibility)); + // Buckets with just a single measurement is not worth compressing. if (closedBucket.numMeasurements.has_value() && closedBucket.numMeasurements.value() <= 1) { - return {SingleWriteResult(), true}; + return; } bool validateCompression = gValidateTimeseriesCompression.load(); @@ -2265,7 +2284,9 @@ TimeseriesSingleWriteResult performTimeseriesBucketCompression( auto compressionOp = makeTimeseriesTransformationOp( opCtx, closedBucket.bucketId.oid, bucketCompressionFunc, request); auto result = getTimeseriesSingleWriteResult( - write_ops_exec::performUpdates(opCtx, compressionOp, OperationSource::kStandard), request); + write_ops_exec::performUpdates( + opCtx, compressionOp, OperationSource::kTimeseriesBucketCompression), + request); // Report stats, if we fail before running the transform function then just skip // reporting. @@ -2281,43 +2302,6 @@ TimeseriesSingleWriteResult performTimeseriesBucketCompression( stats.onBucketClosed(*beforeSize, compressionStats); } } - - return result; -} - -write_ops::UpdateCommandRequest makeTimeseriesDecompressAndUpdateOp( - OperationContext* opCtx, - std::shared_ptr batch, - const BSONObj& metadata, - std::vector&& stmtIds, - const write_ops::InsertCommandRequest& request) { - // Generate the diff and apply it against the previously decrompressed bucket document. - const bool mustCheckExistenceForInsertOperations = - static_cast(repl::tenantMigrationInfo(opCtx)); - auto diff = makeTimeseriesUpdateOpEntry(opCtx, batch, metadata).getU().getDiff(); - auto after = - doc_diff::applyDiff( - batch->decompressed.value().after, diff, nullptr, mustCheckExistenceForInsertOperations) - .postImage; - - auto bucketDecompressionFunc = - [before = std::move(batch->decompressed.value().before), - after = std::move(after)](const BSONObj& bucketDoc) -> boost::optional { - // Make sure the document hasn't changed since we read it into the BucketCatalog. - // This should not happen, but since we can double-check it here, we can guard - // against the missed update that would result from simply replacing with 'after'. - if (!bucketDoc.binaryEqual(before)) { - throwWriteConflictException("Bucket document changed between initial read and update"); - } - return after; - }; - - write_ops::UpdateCommandRequest op( - makeTimeseriesBucketsNamespace(ns(request)), - {makeTimeseriesTransformationOpEntry( - opCtx, batch->bucketHandle.bucketId.oid, std::move(bucketDecompressionFunc))}); - op.setWriteCommandRequestBase(makeTimeseriesWriteOpBase(std::move(stmtIds))); - return op; } /** @@ -2362,9 +2346,17 @@ bool commitTimeseriesBucket(OperationContext* opCtx, << "', but found " << output.result.getValue().getN() << "."); } else { auto op = batch->decompressed.has_value() - ? makeTimeseriesDecompressAndUpdateOp( - opCtx, batch, metadata, std::move(stmtIds), request) - : makeTimeseriesUpdateOp(opCtx, batch, metadata, std::move(stmtIds), request); + ? timeseries::makeTimeseriesDecompressAndUpdateOp( + opCtx, + batch, + makeTimeseriesBucketsNamespace(ns(request)), + metadata, + std::move(stmtIds)) + : timeseries::makeTimeseriesUpdateOp(opCtx, + batch, + makeTimeseriesBucketsNamespace(ns(request)), + metadata, + std::move(stmtIds)); auto const output = performTimeseriesUpdate(opCtx, batch, metadata, op, request); if ((output.result.isOK() && output.result.getValue().getNModified() != 1) || @@ -2385,19 +2377,14 @@ bool commitTimeseriesBucket(OperationContext* opCtx, } } - getOpTimeAndElectionId(opCtx, opTime, electionId); + timeseries::getOpTimeAndElectionId(opCtx, opTime, electionId); auto closedBucket = finish(bucketCatalog, batch, timeseries::bucket_catalog::CommitInfo{*opTime, *electionId}); if (closedBucket) { // If this write closed a bucket, compress the bucket - auto output = performTimeseriesBucketCompression(opCtx, *closedBucket, request); - if (auto error = write_ops_exec::generateError( - opCtx, output.result.getStatus(), start + index, errors->size())) { - errors->emplace_back(std::move(*error)); - return output.canContinue; - } + tryPerformTimeseriesBucketCompression(opCtx, *closedBucket, request); } return true; } catch (const DBException& ex) { @@ -2406,34 +2393,24 @@ bool commitTimeseriesBucket(OperationContext* opCtx, throw; } -TimeseriesAtomicWriteResult commitTimeseriesBucketsAtomically( - OperationContext* opCtx, - TimeseriesBatches* batches, - TimeseriesStmtIds&& stmtIds, - std::vector* errors, - boost::optional* opTime, - boost::optional* electionId, - const write_ops::InsertCommandRequest& request) { - auto& bucketCatalog = timeseries::bucket_catalog::BucketCatalog::get(opCtx); - - std::vector>> - batchesToCommit; +std::shared_ptr& extractFromPair( + std::pair, size_t>& pair) { + return pair.first; +} - for (auto& [batch, _] : *batches) { - if (timeseries::bucket_catalog::claimWriteBatchCommitRights(*batch)) { - batchesToCommit.push_back(batch); - } - } +bool commitTimeseriesBucketsAtomically(OperationContext* opCtx, + TimeseriesBatches& batches, + TimeseriesStmtIds&& stmtIds, + boost::optional* opTime, + boost::optional* electionId, + const write_ops::InsertCommandRequest& request) { + auto& bucketCatalog = timeseries::bucket_catalog::BucketCatalog::get(opCtx); + auto batchesToCommit = timeseries::determineBatchesToCommit(batches, extractFromPair); if (batchesToCommit.empty()) { - return TimeseriesAtomicWriteResult::kSuccess; + return true; } - // Sort by bucket so that preparing the commit for each batch cannot deadlock. - std::sort(batchesToCommit.begin(), batchesToCommit.end(), [](auto left, auto right) { - return left.get()->bucketHandle.bucketId.oid < right.get()->bucketHandle.bucketId.oid; - }); - Status abortStatus = Status::OK(); ScopeGuard batchGuard{[&] { for (auto batch : batchesToCommit) { @@ -2452,32 +2429,16 @@ TimeseriesAtomicWriteResult commitTimeseriesBucketsAtomically( auto prepareCommitStatus = prepareCommit(bucketCatalog, batch); if (!prepareCommitStatus.isOK()) { abortStatus = prepareCommitStatus; - return TimeseriesAtomicWriteResult::kContinuableError; + return false; } - if (batch.get()->numPreviouslyCommittedMeasurements == 0) { - insertOps.push_back(makeTimeseriesInsertOp( - batch, - metadata, - std::move(stmtIds[batch.get()->bucketHandle.bucketId.oid]), - request)); - } else { - if (batch.get()->decompressed.has_value()) { - updateOps.push_back(makeTimeseriesDecompressAndUpdateOp( - opCtx, - batch, - metadata, - std::move(stmtIds[batch.get()->bucketHandle.bucketId.oid]), - request)); - } else { - updateOps.push_back(makeTimeseriesUpdateOp( - opCtx, - batch, - metadata, - std::move(stmtIds[batch.get()->bucketHandle.bucketId.oid]), - request)); - } - } + timeseries::makeWriteRequest(opCtx, + batch, + metadata, + stmtIds, + makeTimeseriesBucketsNamespace(ns(request)), + &insertOps, + &updateOps); } hangTimeseriesInsertBeforeWrite.pauseWhileSet(); @@ -2485,32 +2446,21 @@ TimeseriesAtomicWriteResult commitTimeseriesBucketsAtomically( auto result = write_ops_exec::performAtomicTimeseriesWrites(opCtx, insertOps, updateOps); if (!result.isOK()) { abortStatus = result; - return TimeseriesAtomicWriteResult::kContinuableError; + return false; } - getOpTimeAndElectionId(opCtx, opTime, electionId); + timeseries::getOpTimeAndElectionId(opCtx, opTime, electionId); - bool compressClosedBuckets = true; for (auto batch : batchesToCommit) { auto closedBucket = finish( bucketCatalog, batch, timeseries::bucket_catalog::CommitInfo{*opTime, *electionId}); batch.get().reset(); - if (!closedBucket || !compressClosedBuckets) { + if (!closedBucket) { continue; } - // If this write closed a bucket, compress the bucket - auto ret = performTimeseriesBucketCompression(opCtx, *closedBucket, request); - if (!ret.result.isOK()) { - // Don't try to compress any other buckets if we fail. We're not allowed to - // do more write operations. - compressClosedBuckets = false; - } - if (!ret.canContinue) { - abortStatus = ret.result.getStatus(); - return TimeseriesAtomicWriteResult::kNonContinuableError; - } + tryPerformTimeseriesBucketCompression(opCtx, *closedBucket, request); } } catch (const DBException& ex) { abortStatus = ex.toStatus(); @@ -2518,7 +2468,7 @@ TimeseriesAtomicWriteResult commitTimeseriesBucketsAtomically( } batchGuard.dismiss(); - return TimeseriesAtomicWriteResult::kSuccess; + return true; } // For sharded time-series collections, we need to use the granularity from the config @@ -2571,14 +2521,14 @@ void rebuildOptionsWithGranularityFromConfigServer(OperationContext* opCtx, } } -std::tuple -insertIntoBucketCatalog(OperationContext* opCtx, - size_t start, - size_t numDocs, - const std::vector& indices, - std::vector* errors, - bool* containsRetry, - const write_ops::InsertCommandRequest& request) { +std::tuple insertIntoBucketCatalog( + OperationContext* opCtx, + size_t start, + size_t numDocs, + const std::vector& indices, + std::vector* errors, + bool* containsRetry, + const write_ops::InsertCommandRequest& request) { auto& bucketCatalog = timeseries::bucket_catalog::BucketCatalog::get(opCtx); auto bucketsNs = makeTimeseriesBucketsNamespace(ns(request)); @@ -2586,12 +2536,7 @@ insertIntoBucketCatalog(OperationContext* opCtx, // invalidated. auto catalog = CollectionCatalog::get(opCtx); auto bucketsColl = catalog->lookupCollectionByNamespace(opCtx, bucketsNs); - uassert(ErrorCodes::NamespaceNotFound, - "Could not find time-series buckets collection for write", - bucketsColl); - uassert(ErrorCodes::InvalidOptions, - "Time-series buckets collection is missing time-series options", - bucketsColl->getTimeseriesOptions()); + timeseries::assertTimeseriesBucketsCollection(bucketsColl); auto timeSeriesOptions = *bucketsColl->getTimeseriesOptions(); @@ -2612,7 +2557,6 @@ insertIntoBucketCatalog(OperationContext* opCtx, TimeseriesBatches batches; TimeseriesStmtIds stmtIds; - bool canContinue = true; auto insert = [&](size_t index) { invariant(start + index < request.getDocuments().size()); @@ -2640,105 +2584,14 @@ insertIntoBucketCatalog(OperationContext* opCtx, : ns(request); auto& measurementDoc = request.getDocuments()[start + index]; - StatusWith swResult = - Status{ErrorCodes::BadValue, "Uninitialized InsertResult"}; - do { - if (feature_flags::gTimeseriesScalabilityImprovements.isEnabled( - serverGlobalParams.featureCompatibility)) { - swResult = timeseries::bucket_catalog::tryInsert( - opCtx, - bucketCatalog, - viewNs, - bucketsColl->getDefaultCollator(), - timeSeriesOptions, - measurementDoc, - canCombineTimeseriesInsertWithOtherClients(opCtx, request)); - - if (swResult.isOK()) { - const auto& insertResult = swResult.getValue(); - - // If the InsertResult doesn't contain a batch, we failed to insert the - // measurement into an open bucket and need to create/reopen a bucket. - if (!insertResult.batch) { - timeseries::bucket_catalog::BucketFindResult bucketFindResult; - BSONObj suitableBucket; - - if (auto* bucketId = stdx::get_if(&insertResult.candidate)) { - DBDirectClient client{opCtx}; - hangTimeseriesInsertBeforeReopeningQuery.pauseWhileSet(); - suitableBucket = - client.findOne(bucketsColl->ns(), BSON("_id" << *bucketId)); - bucketFindResult.fetchedBucket = true; - } else if (auto* pipeline = stdx::get_if>( - &insertResult.candidate)) { - // Resort to Query-Based reopening approach. - DBDirectClient client{opCtx}; - - // Ensure we have a index on meta and time for the time-series - // collection before performing the query. Without the index we - // will perform a full collection scan which could cause us to - // take a performance hit. - if (timeseries::collectionHasIndexSupportingReopeningQuery( - opCtx, bucketsColl->getIndexCatalog(), timeSeriesOptions)) { - hangTimeseriesInsertBeforeReopeningQuery.pauseWhileSet(); - - // Run an aggregation to find a suitable bucket to reopen. - AggregateCommandRequest aggRequest(bucketsColl->ns(), *pipeline); - - auto cursor = uassertStatusOK( - DBClientCursor::fromAggregationRequest(&client, - aggRequest, - false /* secondaryOk - */, false /* - useExhaust*/)); - - if (cursor->more()) { - suitableBucket = cursor->next(); - } - bucketFindResult.queriedBucket = true; - } - } - - boost::optional bucketToReopen = - boost::none; - if (!suitableBucket.isEmpty()) { - auto validator = [&](OperationContext * opCtx, - const BSONObj& bucketDoc) -> auto { - return bucketsColl->checkValidation(opCtx, bucketDoc); - }; - auto bucketToReopen = timeseries::bucket_catalog::BucketToReopen{ - suitableBucket, validator, insertResult.catalogEra}; - bucketFindResult.bucketToReopen = std::move(bucketToReopen); - } - - swResult = timeseries::bucket_catalog::insert( - opCtx, - bucketCatalog, - viewNs, - bucketsColl->getDefaultCollator(), - timeSeriesOptions, - measurementDoc, - canCombineTimeseriesInsertWithOtherClients(opCtx, request), - std::move(bucketFindResult)); - } - } - } else { - timeseries::bucket_catalog::BucketFindResult bucketFindResult; - swResult = timeseries::bucket_catalog::insert( - opCtx, - bucketCatalog, - viewNs, - bucketsColl->getDefaultCollator(), - timeSeriesOptions, - measurementDoc, - canCombineTimeseriesInsertWithOtherClients(opCtx, request), - bucketFindResult); - } - - // If there is an era offset (between the bucket we want to reopen and the - // catalog's current era), we could hit a WriteConflict error indicating we will - // need to refetch a bucket document as it is potentially stale. - } while (!swResult.isOK() && (swResult.getStatus().code() == ErrorCodes::WriteConflict)); + auto swResult = timeseries::attemptInsertIntoBucket( + opCtx, + bucketCatalog, + viewNs, + bucketsColl, + timeSeriesOptions, + measurementDoc, + canCombineTimeseriesInsertWithOtherClients(opCtx, request)); if (auto error = write_ops_exec::generateError( opCtx, swResult.getStatus(), start + index, errors->size())) { @@ -2757,22 +2610,7 @@ insertIntoBucketCatalog(OperationContext* opCtx, // If this insert closed buckets, rewrite to be a compressed column. If we cannot // perform write operations at this point the bucket will be left uncompressed. for (const auto& closedBucket : insertResult.closedBuckets) { - if (!canContinue) { - break; - } - - // If this write closed a bucket, compress the bucket - auto ret = performTimeseriesBucketCompression(opCtx, closedBucket, request); - if (auto error = write_ops_exec::generateError( - opCtx, ret.result.getStatus(), start + index, errors->size())) { - // Bucket compression only fail when we may not try to perform any other - // write operation. When handleError() inside write_ops_exec.cpp return - // false. - errors->emplace_back(std::move(*error)); - canContinue = false; - return false; - } - canContinue = ret.canContinue; + tryPerformTimeseriesBucketCompression(opCtx, closedBucket, request); } return true; @@ -2783,12 +2621,12 @@ insertIntoBucketCatalog(OperationContext* opCtx, } else { for (size_t i = 0; i < numDocs; i++) { if (!insert(i) && request.getOrdered()) { - return {std::move(batches), std::move(stmtIds), i, canContinue}; + return {std::move(batches), std::move(stmtIds), i}; } } } - return {std::move(batches), std::move(stmtIds), request.getDocuments().size(), canContinue}; + return {std::move(batches), std::move(stmtIds), request.getDocuments().size()}; } void getTimeseriesBatchResults(OperationContext* opCtx, @@ -2857,30 +2695,25 @@ void getTimeseriesBatchResults(OperationContext* opCtx, } } -TimeseriesAtomicWriteResult performOrderedTimeseriesWritesAtomically( - OperationContext* opCtx, - std::vector* errors, - boost::optional* opTime, - boost::optional* electionId, - bool* containsRetry, - const write_ops::InsertCommandRequest& request) { - auto [batches, stmtIds, numInserted, canContinue] = insertIntoBucketCatalog( +bool performOrderedTimeseriesWritesAtomically(OperationContext* opCtx, + std::vector* errors, + boost::optional* opTime, + boost::optional* electionId, + bool* containsRetry, + const write_ops::InsertCommandRequest& request) { + auto [batches, stmtIds, numInserted] = insertIntoBucketCatalog( opCtx, 0, request.getDocuments().size(), {}, errors, containsRetry, request); - if (!canContinue) { - return TimeseriesAtomicWriteResult::kNonContinuableError; - } hangTimeseriesInsertBeforeCommit.pauseWhileSet(); - auto result = commitTimeseriesBucketsAtomically( - opCtx, &batches, std::move(stmtIds), errors, opTime, electionId, request); - if (result != TimeseriesAtomicWriteResult::kSuccess) { - return result; + if (!commitTimeseriesBucketsAtomically( + opCtx, batches, std::move(stmtIds), opTime, electionId, request)) { + return false; } getTimeseriesBatchResults(opCtx, batches, 0, batches.size(), true, errors, opTime, electionId); - return TimeseriesAtomicWriteResult::kSuccess; + return true; } /** @@ -2902,17 +2735,14 @@ std::vector performUnorderedTimeseriesWrites( bool* containsRetry, const write_ops::InsertCommandRequest& request, absl::flat_hash_map& retryAttemptsForDup) { - auto [batches, bucketStmtIds, _, canContinue] = + auto [batches, bucketStmtIds, _] = insertIntoBucketCatalog(opCtx, start, numDocs, indices, errors, containsRetry, request); hangTimeseriesInsertBeforeCommit.pauseWhileSet(); + bool canContinue = true; std::vector docsToRetry; - if (!canContinue) { - return docsToRetry; - } - size_t itr = 0; for (; itr < batches.size(); ++itr) { auto& [batch, index] = batches[itr]; @@ -2987,19 +2817,9 @@ size_t performOrderedTimeseriesWrites(OperationContext* opCtx, boost::optional* electionId, bool* containsRetry, const write_ops::InsertCommandRequest& request) { - auto result = performOrderedTimeseriesWritesAtomically( - opCtx, errors, opTime, electionId, containsRetry, request); - switch (result) { - case TimeseriesAtomicWriteResult::kSuccess: - return request.getDocuments().size(); - case TimeseriesAtomicWriteResult::kNonContinuableError: - // If we can't continue, we know that 0 were inserted since this function should - // guarantee that the inserts are atomic. - return 0; - case TimeseriesAtomicWriteResult::kContinuableError: - break; - default: - MONGO_UNREACHABLE; + if (performOrderedTimeseriesWritesAtomically( + opCtx, errors, opTime, electionId, containsRetry, request)) { + return request.getDocuments().size(); } for (size_t i = 0; i < request.getDocuments().size(); ++i) { @@ -3024,7 +2844,7 @@ write_ops::InsertCommandReply performTimeseriesWrites( curOp.done(); Top::get(opCtx->getServiceContext()) .record(opCtx, - ns(request).ns(), + ns(request), LogicalOp::opInsert, Top::LockType::WriteLocked, durationCount(curOp.elapsedTimeExcludingPauses()), @@ -3035,7 +2855,7 @@ write_ops::InsertCommandReply performTimeseriesWrites( uassert(ErrorCodes::OperationNotSupportedInTransaction, str::stream() << "Cannot insert into a time-series collection in a multi-document " "transaction: " - << ns(request), + << ns(request).toStringForErrorMsg(), !opCtx->inMultiDocumentTransaction()); { diff --git a/src/mongo/db/ops/write_ops_exec.h b/src/mongo/db/ops/write_ops_exec.h index 5203cb1fa2f8c..fd0cabff598a0 100644 --- a/src/mongo/db/ops/write_ops_exec.h +++ b/src/mongo/db/ops/write_ops_exec.h @@ -29,26 +29,43 @@ #pragma once +#include #include +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/db/catalog/collection_operation_source.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/single_write_result_gen.h" #include "mongo/db/ops/update_result.h" #include "mongo/db/ops/write_ops.h" #include "mongo/db/ops/write_ops_exec_util.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/tenant_id.h" +#include "mongo/executor/task_executor.h" #include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { class DeleteRequest; class OpDebug; -class ParsedUpdate; class PlanExecutor; +class UpdateRequest; +class CanonicalQuery; namespace write_ops_exec { @@ -81,7 +98,8 @@ bool handleError(OperationContext* opCtx, WriteResult* out); bool getFleCrudProcessed(OperationContext* opCtx, - const boost::optional& encryptionInfo); + const boost::optional& encryptionInfo, + const boost::optional& tenantId); /** * Returns true if caller should try to insert more documents. Does nothing else if batch is empty. @@ -108,22 +126,22 @@ boost::optional advanceExecutor(OperationContext* opCtx, * applicable). Should be called in a writeConflictRetry loop. */ UpdateResult writeConflictRetryUpsert(OperationContext* opCtx, - const NamespaceString& nsString, + const NamespaceString& nss, CurOp* curOp, OpDebug* opDebug, bool inTransaction, bool remove, bool upsert, boost::optional& docFound, - ParsedUpdate* parsedUpdate); + const UpdateRequest& updateRequest); /** * Executes a findAndModify with remove:true, the returned document is placed into docFound (if * applicable). Should be called in a writeConflictRetry loop. */ long long writeConflictRetryRemove(OperationContext* opCtx, - const NamespaceString& nsString, - DeleteRequest* deleteRequest, + const NamespaceString& nss, + const DeleteRequest& deleteRequest, CurOp* curOp, OpDebug* opDebug, bool inTransaction, @@ -169,6 +187,18 @@ Status performAtomicTimeseriesWrites(OperationContext* opCtx, const std::vector& insertOps, const std::vector& updateOps); +/** + * Runs a time-series update command in a transaction and collects the write result from each + * statement. + * + * Assumes the update command is a retryable write and targeted on the time-series view namespace. + */ +void runTimeseriesRetryableUpdates(OperationContext* opCtx, + const NamespaceString& bucketNs, + const write_ops::UpdateCommandRequest& wholeOp, + std::shared_ptr executor, + write_ops_exec::WriteResult* reply); + /** * Populate 'opDebug' with stats describing the execution of an update operation. Illegal to call * with a null OpDebug pointer. @@ -177,9 +207,9 @@ void recordUpdateResultInOpDebug(const UpdateResult& updateResult, OpDebug* opDe /** * Returns true if an update failure due to a given DuplicateKey error is eligible for retry. - * Requires that parsedUpdate.hasParsedQuery() is true. */ -bool shouldRetryDuplicateKeyException(const ParsedUpdate& parsedUpdate, +bool shouldRetryDuplicateKeyException(const UpdateRequest& updateRequest, + const CanonicalQuery& cq, const DuplicateKeyErrorInfo& errorInfo); /** diff --git a/src/mongo/db/ops/write_ops_exec_test.cpp b/src/mongo/db/ops/write_ops_exec_test.cpp index f977d7878dc72..87a0ed4269b96 100644 --- a/src/mongo/db/ops/write_ops_exec_test.cpp +++ b/src/mongo/db/ops/write_ops_exec_test.cpp @@ -27,14 +27,40 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/db/basic_types.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/ops/write_ops.h" #include "mongo/db/ops/write_ops_exec.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/record_id_helpers.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/timeseries/bucket_compression.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -122,6 +148,200 @@ TEST_F(WriteOpsExecTest, TestDeleteSizeEstimationLogic) { ASSERT(write_ops::verifySizeEstimate(deleteOpEntry)); } +TEST_F(WriteOpsExecTest, TestInsertRequestSizeEstimationLogic) { + NamespaceString ns = + NamespaceString::createNamespaceString_forTest("db_write_ops_exec_test", "insert_test"); + write_ops::InsertCommandRequest insert(ns); + BSONObj docToInsert(fromjson("{_id: 1, foo: 1}")); + insert.setDocuments({docToInsert}); + ASSERT(write_ops::verifySizeEstimate(insert)); + + // Configure $tenant. + insert.setDollarTenant(mongo::TenantId(mongo::OID::gen())); + ASSERT(write_ops::verifySizeEstimate(insert)); + + // Configure different fields for 'wcb'. + write_ops::WriteCommandRequestBase wcb; + + // stmtId + wcb.setStmtId(2); + insert.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(insert)); + + // stmtIds + wcb.setStmtIds(std::vector{2, 3}); + insert.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(insert)); + + // isTimeseries + wcb.setIsTimeseriesNamespace(true); + insert.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(insert)); + + // collUUID + wcb.setCollectionUUID(UUID::gen()); + insert.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(insert)); + + // encryptionInfo + wcb.setEncryptionInformation( + EncryptionInformation(fromjson("{schema: 'I love encrypting and protecting my data'}"))); + insert.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(insert)); + + // originalQuery + wcb.setOriginalQuery(fromjson("{field: 'value'}")); + insert.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(insert)); + + // originalCollation + wcb.setOriginalCollation(fromjson("{locale: 'fr'}")); + insert.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(insert)); +} + +TEST_F(WriteOpsExecTest, TestUpdateRequestSizeEstimationLogic) { + NamespaceString ns = + NamespaceString::createNamespaceString_forTest("db_write_ops_exec_test", "update_test"); + write_ops::UpdateCommandRequest update(ns); + + const BSONObj updateStmt = fromjson("{$set: {a: 5}}"); + auto mod = write_ops::UpdateModification::parseFromClassicUpdate(updateStmt); + write_ops::UpdateOpEntry updateOpEntry(BSON("_id" << 1), std::move(mod)); + update.setUpdates({updateOpEntry}); + + ASSERT(write_ops::verifySizeEstimate(update)); + + // Configure $tenant. + update.setDollarTenant(mongo::TenantId(mongo::OID::gen())); + ASSERT(write_ops::verifySizeEstimate(update)); + + // Configure different fields for 'wcb'. + write_ops::WriteCommandRequestBase wcb; + + // stmtId + wcb.setStmtId(2); + update.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(update)); + + // stmtIds + wcb.setStmtIds(std::vector{2, 3}); + update.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(update)); + + // isTimeseries + wcb.setIsTimeseriesNamespace(true); + update.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(update)); + + // collUUID + wcb.setCollectionUUID(UUID::gen()); + update.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(update)); + + // encryptionInfo + wcb.setEncryptionInformation( + EncryptionInformation(fromjson("{schema: 'I love encrypting and protecting my data'}"))); + update.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(update)); + + // originalQuery + wcb.setOriginalQuery(fromjson("{field: 'value'}")); + update.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(update)); + + // originalCollation + wcb.setOriginalCollation(fromjson("{locale: 'fr'}")); + update.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(update)); + + // Configure different fields specific to 'UpdateStatementRequest'. + LegacyRuntimeConstants legacyRuntimeConstants; + const auto now = Date_t::now(); + + // At a minimum, $$NOW and $$CLUSTER_TIME must be set. + legacyRuntimeConstants.setLocalNow(now); + legacyRuntimeConstants.setClusterTime(Timestamp(now)); + update.setLegacyRuntimeConstants(legacyRuntimeConstants); + ASSERT(write_ops::verifySizeEstimate(update)); + + // $$JS_SCOPE + BSONObj jsScope = fromjson("{constant: 'I love mapReduce and javascript :D'}"); + legacyRuntimeConstants.setJsScope(jsScope); + update.setLegacyRuntimeConstants(legacyRuntimeConstants); + ASSERT(write_ops::verifySizeEstimate(update)); + + // $$IS_MR + legacyRuntimeConstants.setIsMapReduce(true); + update.setLegacyRuntimeConstants(legacyRuntimeConstants); + ASSERT(write_ops::verifySizeEstimate(update)); + + // $$USER_ROLES + BSONArray arr = BSON_ARRAY(fromjson("{role: 'readWriteAnyDatabase', db: 'admin'}")); + legacyRuntimeConstants.setUserRoles(arr); + update.setLegacyRuntimeConstants(legacyRuntimeConstants); + ASSERT(write_ops::verifySizeEstimate(update)); + + const std::string kLargeString(100 * 1024, 'b'); + BSONObj letParams = BSON("largeStrParam" << kLargeString); + update.setLet(letParams); + ASSERT(write_ops::verifySizeEstimate(update)); +} + +TEST_F(WriteOpsExecTest, TestDeleteRequestSizeEstimationLogic) { + NamespaceString ns = + NamespaceString::createNamespaceString_forTest("db_write_ops_exec_test", "delete_test"); + write_ops::DeleteCommandRequest deleteReq(ns); + // Basic test case. + write_ops::DeleteOpEntry deleteOpEntry(BSON("_id" << 1), false /* multi */); + deleteReq.setDeletes({deleteOpEntry}); + + ASSERT(write_ops::verifySizeEstimate(deleteReq)); + + // Configure $tenant. + deleteReq.setDollarTenant(mongo::TenantId(mongo::OID::gen())); + ASSERT(write_ops::verifySizeEstimate(deleteReq)); + + // Configure different fields for 'wcb'. + write_ops::WriteCommandRequestBase wcb; + + // stmtId + wcb.setStmtId(2); + deleteReq.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(deleteReq)); + + // stmtIds + wcb.setStmtIds(std::vector{2, 3}); + deleteReq.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(deleteReq)); + + // isTimeseries + wcb.setIsTimeseriesNamespace(true); + deleteReq.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(deleteReq)); + + // collUUID + wcb.setCollectionUUID(UUID::gen()); + deleteReq.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(deleteReq)); + + // encryptionInfo + wcb.setEncryptionInformation( + EncryptionInformation(fromjson("{schema: 'I love encrypting and protecting my data'}"))); + deleteReq.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(deleteReq)); + + // originalQuery + wcb.setOriginalQuery(fromjson("{field: 'value'}")); + deleteReq.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(deleteReq)); + + // originalCollation + wcb.setOriginalCollation(fromjson("{locale: 'fr'}")); + deleteReq.setWriteCommandRequestBase(wcb); + ASSERT(write_ops::verifySizeEstimate(deleteReq)); +} + TEST_F(WriteOpsExecTest, PerformAtomicTimeseriesWritesWithTransform) { NamespaceString ns = NamespaceString::createNamespaceString_forTest("db_write_ops_exec_test", "ts"); diff --git a/src/mongo/db/ops/write_ops_exec_util.cpp b/src/mongo/db/ops/write_ops_exec_util.cpp index c5d121ef454b8..9f2b9e15a8efe 100644 --- a/src/mongo/db/ops/write_ops_exec_util.cpp +++ b/src/mongo/db/ops/write_ops_exec_util.cpp @@ -29,7 +29,14 @@ #include "mongo/db/ops/write_ops_exec_util.h" +#include "mongo/base/error_codes.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_state.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite @@ -56,7 +63,7 @@ LastOpFixer::~LastOpFixer() { void LastOpFixer::startingOp(const NamespaceString& ns) { // Operations on the local DB aren't replicated, so they don't need to bump the lastOp. - _needToFixLastOp = !ns.isLocal(); + _needToFixLastOp = !ns.isLocalDB(); _opTimeAtLastOpStart = replClientInfo().getLastOp(); } @@ -69,7 +76,7 @@ void LastOpFixer::finishedOpSuccessfully() { void assertCanWrite_inlock(OperationContext* opCtx, const NamespaceString& nss) { uassert(ErrorCodes::PrimarySteppedDown, - str::stream() << "Not primary while writing to " << nss.ns(), + str::stream() << "Not primary while writing to " << nss.toStringForErrorMsg(), repl::ReplicationCoordinator::get(opCtx->getServiceContext()) ->canAcceptWritesFor(opCtx, nss)); diff --git a/src/mongo/db/ops/write_ops_exec_util.h b/src/mongo/db/ops/write_ops_exec_util.h index cf9d80a2fd68b..e618ddf842cbc 100644 --- a/src/mongo/db/ops/write_ops_exec_util.h +++ b/src/mongo/db/ops/write_ops_exec_util.h @@ -29,10 +29,13 @@ #pragma once +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/logv2/log.h" +#include "mongo/util/decorable.h" namespace mongo::write_ops_exec { diff --git a/src/mongo/db/ops/write_ops_parsers.h b/src/mongo/db/ops/write_ops_parsers.h index e3fc36cf7f61c..410ae9a286be6 100644 --- a/src/mongo/db/ops/write_ops_parsers.h +++ b/src/mongo/db/ops/write_ops_parsers.h @@ -29,14 +29,29 @@ #pragma once +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/repl/optime.h" #include "mongo/db/update/document_diff_serialization.h" #include "mongo/stdx/variant.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo { namespace write_ops { diff --git a/src/mongo/db/ops/write_ops_parsers_test.cpp b/src/mongo/db/ops/write_ops_parsers_test.cpp index 5b6777208195f..5e133fddf2621 100644 --- a/src/mongo/db/ops/write_ops_parsers_test.cpp +++ b/src/mongo/db/ops/write_ops_parsers_test.cpp @@ -27,14 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/document_validation.h" -#include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/dbmessage.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/ops/write_ops_parsers_test_helpers.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/repl/optime.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -210,9 +229,9 @@ TEST(CommandWriteOpsParsers, SingleInsert) { const BSONObj obj = BSON("x" << 1); auto cmd = BSON("insert" << ns.coll() << "documents" << BSON_ARRAY(obj)); for (bool seq : {false, true}) { - auto request = toOpMsg(ns.db(), cmd, seq); + auto request = toOpMsg(ns.db_forTest(), cmd, seq); const auto op = InsertOp::parse(request); - ASSERT_EQ(op.getNamespace().ns(), ns.ns()); + ASSERT_EQ(op.getNamespace().ns_forTest(), ns.ns_forTest()); ASSERT(!op.getWriteCommandRequestBase().getBypassDocumentValidation()); ASSERT(op.getWriteCommandRequestBase().getOrdered()); ASSERT_EQ(op.getDocuments().size(), 1u); @@ -224,7 +243,7 @@ TEST(CommandWriteOpsParsers, EmptyMultiInsertFails) { const auto ns = NamespaceString::createNamespaceString_forTest("test", "foo"); auto cmd = BSON("insert" << ns.coll() << "documents" << BSONArray()); for (bool seq : {false, true}) { - auto request = toOpMsg(ns.db(), cmd, seq); + auto request = toOpMsg(ns.db_forTest(), cmd, seq); ASSERT_THROWS_CODE(InsertOp::parse(request), AssertionException, ErrorCodes::InvalidLength); } } @@ -235,9 +254,9 @@ TEST(CommandWriteOpsParsers, RealMultiInsert) { const BSONObj obj1 = BSON("x" << 1); auto cmd = BSON("insert" << ns.coll() << "documents" << BSON_ARRAY(obj0 << obj1)); for (bool seq : {false, true}) { - auto request = toOpMsg(ns.db(), cmd, seq); + auto request = toOpMsg(ns.db_forTest(), cmd, seq); const auto op = InsertOp::parse(request); - ASSERT_EQ(op.getNamespace().ns(), ns.ns()); + ASSERT_EQ(op.getNamespace().ns_forTest(), ns.ns_forTest()); ASSERT(!op.getWriteCommandRequestBase().getBypassDocumentValidation()); ASSERT(op.getWriteCommandRequestBase().getOrdered()); ASSERT_EQ(op.getDocuments().size(), 2u); @@ -255,9 +274,9 @@ TEST(CommandWriteOpsParsers, MultiInsertWithStmtId) { auto cmd = BSON("insert" << ns.coll() << "documents" << BSON_ARRAY(obj0 << obj1) << "stmtId" << 10); for (bool seq : {false, true}) { - auto request = toOpMsg(ns.db(), cmd, seq); + auto request = toOpMsg(ns.db_forTest(), cmd, seq); const auto op = InsertOp::parse(request); - ASSERT_EQ(op.getNamespace().ns(), ns.ns()); + ASSERT_EQ(op.getNamespace().ns_forTest(), ns.ns_forTest()); ASSERT(!op.getWriteCommandRequestBase().getBypassDocumentValidation()); ASSERT(op.getWriteCommandRequestBase().getOrdered()); ASSERT_EQ(op.getDocuments().size(), 2u); @@ -275,9 +294,9 @@ TEST(CommandWriteOpsParsers, MultiInsertWithStmtIdsArray) { auto cmd = BSON("insert" << ns.coll() << "documents" << BSON_ARRAY(obj0 << obj1) << "stmtIds" << BSON_ARRAY(15 << 17)); for (bool seq : {false, true}) { - auto request = toOpMsg(ns.db(), cmd, seq); + auto request = toOpMsg(ns.db_forTest(), cmd, seq); const auto op = InsertOp::parse(request); - ASSERT_EQ(op.getNamespace().ns(), ns.ns()); + ASSERT_EQ(op.getNamespace().ns_forTest(), ns.ns_forTest()); ASSERT(!op.getWriteCommandRequestBase().getBypassDocumentValidation()); ASSERT(op.getWriteCommandRequestBase().getOrdered()); ASSERT_EQ(op.getDocuments().size(), 2u); @@ -302,9 +321,9 @@ TEST(CommandWriteOpsParsers, UpdateCommandRequest) { << "multi" << multi << "upsert" << upsert << "collation" << collation); auto cmd = BSON("update" << ns.coll() << "updates" << BSON_ARRAY(rawUpdate)); for (bool seq : {false, true}) { - auto request = toOpMsg(ns.db(), cmd, seq); + auto request = toOpMsg(ns.db_forTest(), cmd, seq); auto op = UpdateOp::parse(request); - ASSERT_EQ(op.getNamespace().ns(), ns.ns()); + ASSERT_EQ(op.getNamespace().ns_forTest(), ns.ns_forTest()); ASSERT(!op.getWriteCommandRequestBase().getBypassDocumentValidation()); ASSERT_EQ(op.getWriteCommandRequestBase().getOrdered(), true); ASSERT_EQ(op.getUpdates().size(), 1u); @@ -339,9 +358,9 @@ TEST(CommandWriteOpsParsers, UpdateWithPipeline) { << "upsert" << upsert << "collation" << collation); auto cmd = BSON("update" << ns.coll() << "updates" << BSON_ARRAY(rawUpdate)); for (bool seq : {false, true}) { - auto request = toOpMsg(ns.db(), cmd, seq); + auto request = toOpMsg(ns.db_forTest(), cmd, seq); auto op = UpdateOp::parse(request); - ASSERT_EQ(op.getNamespace().ns(), ns.ns()); + ASSERT_EQ(op.getNamespace().ns_forTest(), ns.ns_forTest()); ASSERT(!op.getWriteCommandRequestBase().getBypassDocumentValidation()); ASSERT_EQ(op.getWriteCommandRequestBase().getOrdered(), true); ASSERT_EQ(op.getUpdates().size(), 1u); @@ -372,9 +391,9 @@ TEST(CommandWriteOpsParsers, Remove) { BSON("q" << query << "limit" << (multi ? 0 : 1) << "collation" << collation); auto cmd = BSON("delete" << ns.coll() << "deletes" << BSON_ARRAY(rawDelete)); for (bool seq : {false, true}) { - auto request = toOpMsg(ns.db(), cmd, seq); + auto request = toOpMsg(ns.db_forTest(), cmd, seq); auto op = DeleteOp::parse(request); - ASSERT_EQ(op.getNamespace().ns(), ns.ns()); + ASSERT_EQ(op.getNamespace().ns_forTest(), ns.ns_forTest()); ASSERT(!op.getWriteCommandRequestBase().getBypassDocumentValidation()); ASSERT_EQ(op.getWriteCommandRequestBase().getOrdered(), true); ASSERT_EQ(op.getDeletes().size(), 1u); @@ -407,7 +426,7 @@ TEST(LegacyWriteOpsParsers, SingleInsert) { auto message = makeUnsupportedOpInsertMessage( ns, &obj, 1, continueOnError ? InsertOption_ContinueOnError : 0); const auto op = InsertOp::parseLegacy(message); - ASSERT_EQ(op.getNamespace().ns(), ns); + ASSERT_EQ(op.getNamespace().ns_forTest(), ns); ASSERT(!op.getWriteCommandRequestBase().getBypassDocumentValidation()); ASSERT_EQ(!op.getWriteCommandRequestBase().getOrdered(), continueOnError); ASSERT_EQ(op.getDocuments().size(), 1u); @@ -435,7 +454,7 @@ TEST(LegacyWriteOpsParsers, RealMultiInsert) { auto message = makeUnsupportedOpInsertMessage( ns, objs.data(), objs.size(), continueOnError ? InsertOption_ContinueOnError : 0); const auto op = InsertOp::parseLegacy(message); - ASSERT_EQ(op.getNamespace().ns(), ns); + ASSERT_EQ(op.getNamespace().ns_forTest(), ns); ASSERT(!op.getWriteCommandRequestBase().getBypassDocumentValidation()); ASSERT_EQ(!op.getWriteCommandRequestBase().getOrdered(), continueOnError); ASSERT_EQ(op.getDocuments().size(), 2u); diff --git a/src/mongo/db/ops/write_ops_parsers_test_helpers.cpp b/src/mongo/db/ops/write_ops_parsers_test_helpers.cpp index 9118bf1fef48a..1ae042942f5a5 100644 --- a/src/mongo/db/ops/write_ops_parsers_test_helpers.cpp +++ b/src/mongo/db/ops/write_ops_parsers_test_helpers.cpp @@ -27,12 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/ops/write_ops_parsers_test_helpers.h" -#include - namespace mongo { namespace { std::set sequenceFields{"documents", "updates", "deletes", "GARBAGE"}; diff --git a/src/mongo/db/ops/write_ops_parsers_test_helpers.h b/src/mongo/db/ops/write_ops_parsers_test_helpers.h index 9b99406b574ba..753d3a77eaf35 100644 --- a/src/mongo/db/ops/write_ops_parsers_test_helpers.h +++ b/src/mongo/db/ops/write_ops_parsers_test_helpers.h @@ -29,6 +29,8 @@ #pragma once +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" #include "mongo/rpc/op_msg.h" diff --git a/src/mongo/db/ops/write_ops_retryability.cpp b/src/mongo/db/ops/write_ops_retryability.cpp index 3ca6efeb126ea..4b1d408e4bb59 100644 --- a/src/mongo/db/ops/write_ops_retryability.cpp +++ b/src/mongo/db/ops/write_ops_retryability.cpp @@ -28,19 +28,37 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/ops/write_ops_retryability.h" - -#include "mongo/bson/util/bson_extract.h" +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/client.h" #include "mongo/db/curop.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/namespace_string.h" #include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_retryability.h" #include "mongo/db/repl/image_collection_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/logv2/redaction.h" -#include "mongo/stdx/mutex.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -262,7 +280,10 @@ SingleWriteResult parseOplogEntryForUpdate(const repl::OplogEntry& entry) { BSONObjBuilder upserted; upserted.append(entry.getObject()["_id"]); res.setUpsertedId(upserted.obj()); - } else if (entry.getOpType() == repl::OpTypeEnum::kUpdate) { + } else if (entry.getOpType() == repl::OpTypeEnum::kUpdate || + entry.getOpType() == repl::OpTypeEnum::kDelete) { + // Time-series updates could generate an oplog of type "kDelete". It also implies one + // user-level measurement is modified. res.setN(1); res.setNModified(1); } else if (entry.getOpType() == repl::OpTypeEnum::kNoop) { diff --git a/src/mongo/db/ops/write_ops_retryability.h b/src/mongo/db/ops/write_ops_retryability.h index 525847f89f4b0..e607787035626 100644 --- a/src/mongo/db/ops/write_ops_retryability.h +++ b/src/mongo/db/ops/write_ops_retryability.h @@ -29,9 +29,16 @@ #pragma once +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/single_write_result_gen.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" namespace mongo { diff --git a/src/mongo/db/ops/write_ops_retryability_test.cpp b/src/mongo/db/ops/write_ops_retryability_test.cpp index 1750a5126f383..9e1d2d147eee3 100644 --- a/src/mongo/db/ops/write_ops_retryability_test.cpp +++ b/src/mongo/db/ops/write_ops_retryability_test.cpp @@ -27,26 +27,49 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/data_type_endian.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/ops/write_ops.h" #include "mongo/db/ops/write_ops_exec.h" #include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/ops/write_ops_retryability.h" #include "mongo/db/repl/mock_repl_coord_server_fixture.h" #include "mongo/db/repl/oplog_entry.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/session_catalog.h" +#include "mongo/db/shard_id.h" #include "mongo/db/transaction/transaction_participant.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -192,14 +215,18 @@ TEST_F(WriteOpsRetryability, ParseOplogEntryForNestedUpsert) { ASSERT_BSONOBJ_EQ(res.getUpsertedId(), BSON("_id" << 2)); } -TEST_F(WriteOpsRetryability, ShouldFailIfParsingDeleteOplogForUpdate) { +TEST_F(WriteOpsRetryability, ParsingDeleteOplogForUpdate) { auto deleteOplog = makeOplogEntry(repl::OpTime(Timestamp(50, 10), 1), // optime repl::OpTypeEnum::kDelete, // op type NamespaceString::createNamespaceString_forTest("a.b"), // namespace BSON("_id" << 2)); // o - ASSERT_THROWS(parseOplogEntryForUpdate(deleteOplog), AssertionException); + auto res = parseOplogEntryForUpdate(deleteOplog); + + ASSERT_EQ(res.getN(), 1); + ASSERT_EQ(res.getNModified(), 1); + ASSERT_BSONOBJ_EQ(res.getUpsertedId(), BSONObj()); } TEST_F(WriteOpsRetryability, PerformInsertsSuccess) { diff --git a/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp b/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp index 09467c988a5e8..755682d9b83de 100644 --- a/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp +++ b/src/mongo/db/periodic_runner_job_abort_expired_transactions.cpp @@ -28,16 +28,28 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/periodic_runner_job_abort_expired_transactions.h" +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/periodic_runner_job_abort_expired_transactions.h" #include "mongo/db/service_context.h" #include "mongo/db/session/kill_sessions_local.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/transaction/transaction_participant_gen.h" +#include "mongo/idl/mutable_observer_registry.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/periodic_runner.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -63,6 +75,10 @@ Milliseconds getPeriod(const Argument& transactionLifetimeLimitSeconds) { } // namespace +// Tracks the number of passes the "abortExpiredTransactions" thread makes to abort expired +// transactions. +CounterMetric abortExpiredTransactionsPasses("abortExpiredTransactions.passes"); + auto PeriodicThreadToAbortExpiredTransactions::get(ServiceContext* serviceContext) -> PeriodicThreadToAbortExpiredTransactions& { auto& jobContainer = _serviceDecoration(serviceContext); @@ -111,11 +127,14 @@ void PeriodicThreadToAbortExpiredTransactions::_init(ServiceContext* serviceCont try { killAllExpiredTransactions(opCtx.get()); + abortExpiredTransactionsPasses.increment(1); } catch (ExceptionForCat& ex) { LOGV2_DEBUG(4684101, 2, "Periodic job canceled", "{reason}"_attr = ex.reason()); } }, - getPeriod(gTransactionLifetimeLimitSeconds.load())); + getPeriod(gTransactionLifetimeLimitSeconds.load()), + // TODO(SERVER-74656): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/); _anchor = std::make_shared(periodicRunner->makeJob(std::move(job))); diff --git a/src/mongo/db/periodic_runner_job_abort_expired_transactions.h b/src/mongo/db/periodic_runner_job_abort_expired_transactions.h index b2831c19130df..3914badbdc824 100644 --- a/src/mongo/db/periodic_runner_job_abort_expired_transactions.h +++ b/src/mongo/db/periodic_runner_job_abort_expired_transactions.h @@ -30,6 +30,9 @@ #pragma once #include +#include + +#include #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/persistent_task_store.h b/src/mongo/db/persistent_task_store.h index 8b21e9b9c5bf8..04560e791e95f 100644 --- a/src/mongo/db/persistent_task_store.h +++ b/src/mongo/db/persistent_task_store.h @@ -29,15 +29,34 @@ #pragma once +#include #include - +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" #include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" namespace mongo { @@ -158,8 +177,8 @@ class PersistentTaskStore { while (cursor->more()) { auto bson = cursor->next(); - auto t = - T::parse(IDLParserContext("PersistentTaskStore:" + _storageNss.toString()), bson); + auto t = T::parse( + IDLParserContext("PersistentTaskStore:" + _storageNss.toStringForErrorMsg()), bson); if (bool shouldContinue = handler(t); !shouldContinue) return; @@ -201,7 +220,7 @@ class PersistentTaskStore { uassert(ErrorCodes::NoMatchingDocument, "No matching document found for query {} on namespace {}"_format( - filter.toString(), _storageNss.toString()), + filter.toString(), _storageNss.toStringForErrorMsg()), upsert || commandResponse.getN() > 0); WriteConcernResult ignoreResult; diff --git a/src/mongo/db/persistent_task_store_test.cpp b/src/mongo/db/persistent_task_store_test.cpp index e463b9773d4e9..9ac8fc6287689 100644 --- a/src/mongo/db/persistent_task_store_test.cpp +++ b/src/mongo/db/persistent_task_store_test.cpp @@ -27,12 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/db/catalog/catalog_test_fixture.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/persistent_task_store.h" -#include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/SConscript b/src/mongo/db/pipeline/SConscript index 51191dfcc8752..85249b95859a0 100644 --- a/src/mongo/db/pipeline/SConscript +++ b/src/mongo/db/pipeline/SConscript @@ -205,7 +205,8 @@ env.Library( 'document_path_support.cpp', ], LIBDEPS=[ - "$BUILD_DIR/mongo/db/exec/document_value/document_value", + '$BUILD_DIR/mongo/db/common', + '$BUILD_DIR/mongo/db/exec/document_value/document_value', ], ) @@ -310,6 +311,7 @@ pipelineEnv.Library( 'document_source_out.cpp', 'document_source_plan_cache_stats.cpp', 'document_source_project.cpp', + 'document_source_query_stats.cpp', 'document_source_queue.cpp', 'document_source_redact.cpp', 'document_source_replace_root.cpp', @@ -325,7 +327,6 @@ pipelineEnv.Library( 'document_source_sort_by_count.cpp', 'document_source_streaming_group.cpp', 'document_source_tee_consumer.cpp', - 'document_source_telemetry.cpp', 'document_source_union_with.cpp', 'document_source_unwind.cpp', 'group_from_first_document_transformation.cpp', @@ -362,7 +363,6 @@ pipelineEnv.Library( '$BUILD_DIR/mongo/db/query/collation/collator_interface', '$BUILD_DIR/mongo/db/query/cursor_response_idl', '$BUILD_DIR/mongo/db/query/datetime/date_time_support', - '$BUILD_DIR/mongo/db/query/op_metrics', '$BUILD_DIR/mongo/db/query/query_knobs', '$BUILD_DIR/mongo/db/query/sort_pattern', '$BUILD_DIR/mongo/db/query/stats/stats_gen', @@ -380,6 +380,7 @@ pipelineEnv.Library( '$BUILD_DIR/mongo/db/storage/encryption_hooks', '$BUILD_DIR/mongo/db/storage/index_entry_comparison', '$BUILD_DIR/mongo/db/storage/storage_options', + '$BUILD_DIR/mongo/db/timeseries/catalog_helper', '$BUILD_DIR/mongo/db/update/update_document_diff', '$BUILD_DIR/mongo/db/views/resolved_view', '$BUILD_DIR/mongo/s/is_mongos', @@ -393,6 +394,7 @@ pipelineEnv.Library( 'granularity_rounder', ], LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/catalog/collection_catalog', '$BUILD_DIR/mongo/db/commands/test_commands_enabled', '$BUILD_DIR/mongo/db/dbdirectclient', '$BUILD_DIR/mongo/db/fts/base_fts', @@ -484,7 +486,10 @@ env.Library( 'document_source_merge.idl', 'document_source_merge_modes.idl', 'document_source_merge_spec.cpp', + 'document_source_out.idl', 'document_source_parsing_validators.cpp', + 'document_source_query_stats.idl', + 'document_source_query_stats_validators.cpp', 'document_source_replace_root.idl', 'document_source_set_window_fields.idl', 'document_source_union_with.idl', @@ -496,7 +501,10 @@ env.Library( LIBDEPS=[ '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/db/exec/document_value/document_value', + '$BUILD_DIR/mongo/db/query/query_stats_parse', + '$BUILD_DIR/mongo/db/serialization_options', '$BUILD_DIR/mongo/db/storage/key_string', + '$BUILD_DIR/mongo/db/timeseries/timeseries_options', '$BUILD_DIR/mongo/idl/idl_parser', '$BUILD_DIR/mongo/s/common_s', 'runtime_constants_idl', @@ -581,7 +589,6 @@ env.CppUnitTest( 'aggregation_request_test.cpp', 'change_stream_document_diff_parser_test.cpp', 'change_stream_event_transform_test.cpp', - 'change_stream_expired_pre_image_remover_test.cpp', 'change_stream_rewrites_test.cpp', 'change_stream_split_event_helpers_test.cpp', 'dependencies_test.cpp', @@ -593,9 +600,11 @@ env.CppUnitTest( 'document_source_change_stream_add_post_image_test.cpp', 'document_source_change_stream_test.cpp', 'document_source_check_resume_token_test.cpp', + 'document_source_coll_stats_test.cpp', 'document_source_count_test.cpp', 'document_source_current_op_test.cpp', 'document_source_densify_test.cpp', + 'document_source_documents_test.cpp', 'document_source_exchange_test.cpp', 'document_source_facet_test.cpp', 'document_source_find_and_modify_image_lookup_test.cpp', @@ -624,7 +633,7 @@ env.CppUnitTest( 'document_source_skip_test.cpp', 'document_source_sort_by_count_test.cpp', 'document_source_sort_test.cpp', - 'document_source_telemetry_test.cpp', + 'document_source_query_stats_test.cpp', 'document_source_union_with_test.cpp', 'document_source_internal_compute_geo_near_distance_test.cpp', 'document_source_internal_convert_bucket_index_stats_test.cpp', @@ -638,6 +647,7 @@ env.CppUnitTest( 'document_source_internal_unpack_bucket_test/pushdown_computed_meta_projections_test.cpp', 'document_source_internal_unpack_bucket_test/sample_reorder_test.cpp', 'document_source_internal_unpack_bucket_test/sort_reorder_test.cpp', + 'document_source_internal_unpack_bucket_test/limit_reorder_test.cpp', 'document_source_internal_unpack_bucket_test/split_match_on_meta_and_rename_test.cpp', 'document_source_internal_unpack_bucket_test/unpack_bucket_exec_test.cpp', 'document_source_unwind_test.cpp', @@ -698,6 +708,7 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/auth/authmocks', '$BUILD_DIR/mongo/db/change_stream_options', '$BUILD_DIR/mongo/db/change_stream_options_manager', + '$BUILD_DIR/mongo/db/change_stream_pre_image_util', '$BUILD_DIR/mongo/db/change_stream_pre_images_collection_manager', '$BUILD_DIR/mongo/db/change_streams_cluster_parameter', '$BUILD_DIR/mongo/db/cst/cst', @@ -717,9 +728,7 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/repl/replmocks', '$BUILD_DIR/mongo/db/repl/storage_interface_impl', '$BUILD_DIR/mongo/db/s/sharding_runtime_d', - '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/db/service_context_d_test_fixture', - '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/mongo/db/storage/devnull/storage_devnull_core', '$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture', '$BUILD_DIR/mongo/s/is_mongos', @@ -768,6 +777,16 @@ env.Benchmark( ], ) +env.Benchmark( + target='window_function_percentile_bm', + source=['window_function/window_function_percentile_bm_fixture.cpp'], + LIBDEPS=[ + '$BUILD_DIR/mongo/db/query/query_test_service_context', + '$BUILD_DIR/mongo/db/service_context_non_d', + 'accumulator', + ], +) + bmEnv = env.Clone() bmEnv.InjectThirdParty(libraries=["benchmark"]) diff --git a/src/mongo/db/pipeline/abt/abt_fallback_mechanism_cq_bm.cpp b/src/mongo/db/pipeline/abt/abt_fallback_mechanism_cq_bm.cpp index 20043272bf069..53142c490831f 100644 --- a/src/mongo/db/pipeline/abt/abt_fallback_mechanism_cq_bm.cpp +++ b/src/mongo/db/pipeline/abt/abt_fallback_mechanism_cq_bm.cpp @@ -28,11 +28,17 @@ */ #include +#include +#include +#include -#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/abt/abt_translate_bm_fixture.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/cqf_command_utils.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/query_test_service_context.h" namespace mongo::optimizer { @@ -56,7 +62,7 @@ class FallBackMechanismCQBenchmark : public ABTTranslateBenchmarkFixture { BSONObj projectSpec) override final { QueryTestServiceContext testServiceContext; auto opCtx = testServiceContext.makeOperationContext(); - auto nss = NamespaceString("test.bm"); + auto nss = NamespaceString::createNamespaceString_forTest("test.bm"); auto findCommand = std::make_unique(nss); findCommand->setFilter(matchSpec); diff --git a/src/mongo/db/pipeline/abt/abt_fallback_mechanism_pipeline_bm.cpp b/src/mongo/db/pipeline/abt/abt_fallback_mechanism_pipeline_bm.cpp index 6de2ee842e023..f7e330d40d068 100644 --- a/src/mongo/db/pipeline/abt/abt_fallback_mechanism_pipeline_bm.cpp +++ b/src/mongo/db/pipeline/abt/abt_fallback_mechanism_pipeline_bm.cpp @@ -28,12 +28,21 @@ */ #include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/abt/abt_translate_bm_fixture.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/cqf_command_utils.h" #include "mongo/db/query/query_test_service_context.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::optimizer { namespace { @@ -49,7 +58,7 @@ class FallBackMechanismPipelineBenchmark : public ABTTranslateBenchmarkFixture { QueryTestServiceContext testServiceContext; auto opCtx = testServiceContext.makeOperationContext(); - auto nss = NamespaceString("test.bm"); + auto nss = NamespaceString::createNamespaceString_forTest("test.bm"); auto expCtx = make_intrusive(opCtx.get(), nss); std::unique_ptr parsedPipeline = diff --git a/src/mongo/db/pipeline/abt/abt_optimization_test.cpp b/src/mongo/db/pipeline/abt/abt_optimization_test.cpp index de36df51e8b8b..193aa502c7a63 100644 --- a/src/mongo/db/pipeline/abt/abt_optimization_test.cpp +++ b/src/mongo/db/pipeline/abt/abt_optimization_test.cpp @@ -28,11 +28,31 @@ */ +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/abt/utils.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/metadata_factory.h" #include "mongo/db/query/optimizer/opt_phase_manager.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/unit_test_pipeline_utils.h" -#include "mongo/unittest/golden_test.h" +#include "mongo/db/query/optimizer/utils/unit_test_utils.h" +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer { diff --git a/src/mongo/db/pipeline/abt/abt_translate_bm_fixture.cpp b/src/mongo/db/pipeline/abt/abt_translate_bm_fixture.cpp index e7fefdd0857d6..c721953fc86c9 100644 --- a/src/mongo/db/pipeline/abt/abt_translate_bm_fixture.cpp +++ b/src/mongo/db/pipeline/abt/abt_translate_bm_fixture.cpp @@ -29,8 +29,17 @@ #include "mongo/db/pipeline/abt/abt_translate_bm_fixture.h" +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/json.h" +#include "mongo/bson/json.h" +#include "mongo/util/assert_util_core.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/abt/abt_translate_bm_fixture.h b/src/mongo/db/pipeline/abt/abt_translate_bm_fixture.h index 7e53b94016dd4..cbcfd5a685acf 100644 --- a/src/mongo/db/pipeline/abt/abt_translate_bm_fixture.h +++ b/src/mongo/db/pipeline/abt/abt_translate_bm_fixture.h @@ -29,11 +29,11 @@ #pragma once -#include "mongo/platform/basic.h" - #include +#include #include "mongo/bson/bsonobj.h" +#include "mongo/platform/basic.h" namespace mongo { diff --git a/src/mongo/db/pipeline/abt/abt_translate_cq_bm.cpp b/src/mongo/db/pipeline/abt/abt_translate_cq_bm.cpp index 9816bbb5f64bd..135b5ebc3561d 100644 --- a/src/mongo/db/pipeline/abt/abt_translate_cq_bm.cpp +++ b/src/mongo/db/pipeline/abt/abt_translate_cq_bm.cpp @@ -28,12 +28,25 @@ */ #include +#include +#include +#include -#include "mongo/bson/bsonobjbuilder.h" +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/abt/abt_translate_bm_fixture.h" #include "mongo/db/pipeline/abt/canonical_query_translation.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/cqf_command_utils.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/db/query/query_test_service_context.h" namespace mongo::optimizer { @@ -56,7 +69,7 @@ class CanonicalQueryABTTranslate : public ABTTranslateBenchmarkFixture { BSONObj projectSpec) override final { QueryTestServiceContext testServiceContext; auto opCtx = testServiceContext.makeOperationContext(); - auto nss = NamespaceString("test.bm"); + auto nss = NamespaceString::createNamespaceString_forTest("test.bm"); Metadata metadata{{}}; auto prefixId = PrefixId::createForTests(); diff --git a/src/mongo/db/pipeline/abt/abt_translate_pipeline_bm.cpp b/src/mongo/db/pipeline/abt/abt_translate_pipeline_bm.cpp index 684d676af0dcd..7ea6d73d74cbc 100644 --- a/src/mongo/db/pipeline/abt/abt_translate_pipeline_bm.cpp +++ b/src/mongo/db/pipeline/abt/abt_translate_pipeline_bm.cpp @@ -28,13 +28,29 @@ */ #include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/abt/abt_translate_bm_fixture.h" #include "mongo/db/pipeline/abt/document_source_visitor.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/cqf_command_utils.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/db/query/query_test_service_context.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::optimizer { namespace { @@ -62,8 +78,8 @@ class PipelineABTTranslateBenchmark : public ABTTranslateBenchmarkFixture { const std::vector& pipeline) override final { QueryTestServiceContext testServiceContext; auto opCtx = testServiceContext.makeOperationContext(); - auto expCtx = - make_intrusive(opCtx.get(), NamespaceString("test.bm")); + auto expCtx = make_intrusive( + opCtx.get(), NamespaceString::createNamespaceString_forTest("test.bm")); Metadata metadata{{}}; auto prefixId = PrefixId::createForTests(); diff --git a/src/mongo/db/pipeline/abt/abt_translation_test.cpp b/src/mongo/db/pipeline/abt/abt_translation_test.cpp index ef3fdf34cff86..cee6579376cc1 100644 --- a/src/mongo/db/pipeline/abt/abt_translation_test.cpp +++ b/src/mongo/db/pipeline/abt/abt_translation_test.cpp @@ -27,15 +27,34 @@ * it in the license file. */ +#include +#include +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/abt/canonical_query_translation.h" -#include "mongo/db/pipeline/abt/utils.h" -#include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/metadata_factory.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/unit_test_pipeline_utils.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" +#include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/db/query/query_request_helper.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/unittest/golden_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer { diff --git a/src/mongo/db/pipeline/abt/agg_expression_visitor.cpp b/src/mongo/db/pipeline/abt/agg_expression_visitor.cpp index 731192fe20e65..7825e6c2e0df8 100644 --- a/src/mongo/db/pipeline/abt/agg_expression_visitor.cpp +++ b/src/mongo/db/pipeline/abt/agg_expression_visitor.cpp @@ -27,18 +27,38 @@ * it in the license file. */ -#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include #include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/exec/docval_to_sbeval.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/pipeline/abt/agg_expression_visitor.h" #include "mongo/db/pipeline/abt/expr_algebrizer_context.h" #include "mongo/db/pipeline/abt/utils.h" #include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/accumulator_multi.h" #include "mongo/db/pipeline/accumulator_percentile.h" +#include "mongo/db/pipeline/expression_visitor.h" #include "mongo/db/pipeline/expression_walker.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/utils/path_utils.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::optimizer { diff --git a/src/mongo/db/pipeline/abt/agg_expression_visitor.h b/src/mongo/db/pipeline/abt/agg_expression_visitor.h index b3798bee4dea4..b5534e196cff9 100644 --- a/src/mongo/db/pipeline/abt/agg_expression_visitor.h +++ b/src/mongo/db/pipeline/abt/agg_expression_visitor.h @@ -31,7 +31,9 @@ #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/expression.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/utils.h" diff --git a/src/mongo/db/pipeline/abt/canonical_query_translation.cpp b/src/mongo/db/pipeline/abt/canonical_query_translation.cpp index 7f0be84bd6d80..8f881c60c1747 100644 --- a/src/mongo/db/pipeline/abt/canonical_query_translation.cpp +++ b/src/mongo/db/pipeline/abt/canonical_query_translation.cpp @@ -29,11 +29,23 @@ #include "mongo/db/pipeline/abt/canonical_query_translation.h" +#include + +#include +#include +#include + #include "mongo/db/pipeline/abt/algebrizer_context.h" #include "mongo/db/pipeline/abt/collation_translation.h" #include "mongo/db/pipeline/abt/match_expression_visitor.h" #include "mongo/db/pipeline/abt/transformer_visitor.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/expr.h" #include "mongo/db/query/optimizer/utils/path_utils.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::optimizer { diff --git a/src/mongo/db/pipeline/abt/collation_translation.cpp b/src/mongo/db/pipeline/abt/collation_translation.cpp index 7a98ccd57656f..3d68f69781df5 100644 --- a/src/mongo/db/pipeline/abt/collation_translation.cpp +++ b/src/mongo/db/pipeline/abt/collation_translation.cpp @@ -28,7 +28,21 @@ */ #include "mongo/db/pipeline/abt/collation_translation.h" + +#include +#include + +#include +#include + #include "mongo/db/pipeline/abt/utils.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer { diff --git a/src/mongo/db/pipeline/abt/document_source_visitor.cpp b/src/mongo/db/pipeline/abt/document_source_visitor.cpp index 2170ab14407aa..6de27e92196d4 100644 --- a/src/mongo/db/pipeline/abt/document_source_visitor.cpp +++ b/src/mongo/db/pipeline/abt/document_source_visitor.cpp @@ -29,51 +29,59 @@ #include "mongo/db/pipeline/abt/document_source_visitor.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/abt/agg_expression_visitor.h" #include "mongo/db/pipeline/abt/algebrizer_context.h" #include "mongo/db/pipeline/abt/collation_translation.h" +#include "mongo/db/pipeline/abt/field_map_builder.h" #include "mongo/db/pipeline/abt/match_expression_visitor.h" #include "mongo/db/pipeline/abt/transformer_visitor.h" #include "mongo/db/pipeline/abt/utils.h" -#include "mongo/db/pipeline/document_source_bucket_auto.h" -#include "mongo/db/pipeline/document_source_coll_stats.h" -#include "mongo/db/pipeline/document_source_current_op.h" -#include "mongo/db/pipeline/document_source_exchange.h" -#include "mongo/db/pipeline/document_source_facet.h" -#include "mongo/db/pipeline/document_source_geo_near.h" -#include "mongo/db/pipeline/document_source_graph_lookup.h" +#include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/document_source_group.h" -#include "mongo/db/pipeline/document_source_index_stats.h" -#include "mongo/db/pipeline/document_source_internal_inhibit_optimization.h" -#include "mongo/db/pipeline/document_source_internal_shard_filter.h" -#include "mongo/db/pipeline/document_source_internal_split_pipeline.h" -#include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" #include "mongo/db/pipeline/document_source_limit.h" -#include "mongo/db/pipeline/document_source_list_cached_and_active_users.h" -#include "mongo/db/pipeline/document_source_list_local_sessions.h" -#include "mongo/db/pipeline/document_source_list_sessions.h" #include "mongo/db/pipeline/document_source_lookup.h" #include "mongo/db/pipeline/document_source_match.h" -#include "mongo/db/pipeline/document_source_merge.h" -#include "mongo/db/pipeline/document_source_operation_metrics.h" -#include "mongo/db/pipeline/document_source_out.h" -#include "mongo/db/pipeline/document_source_plan_cache_stats.h" -#include "mongo/db/pipeline/document_source_queue.h" -#include "mongo/db/pipeline/document_source_redact.h" -#include "mongo/db/pipeline/document_source_sample.h" -#include "mongo/db/pipeline/document_source_sample_from_random_cursor.h" -#include "mongo/db/pipeline/document_source_sequential_document_cache.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/document_source_skip.h" #include "mongo/db/pipeline/document_source_sort.h" -#include "mongo/db/pipeline/document_source_tee_consumer.h" -#include "mongo/db/pipeline/document_source_telemetry.h" #include "mongo/db/pipeline/document_source_union_with.h" #include "mongo/db/pipeline/document_source_unwind.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h" #include "mongo/db/pipeline/visitors/document_source_walker.h" #include "mongo/db/pipeline/visitors/transformer_interface_walker.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/utils/path_utils.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" #include "mongo/util/string_map.h" namespace mongo::optimizer { diff --git a/src/mongo/db/pipeline/abt/document_source_visitor.h b/src/mongo/db/pipeline/abt/document_source_visitor.h index a1bdfcc1484ee..c522bd5cd932d 100644 --- a/src/mongo/db/pipeline/abt/document_source_visitor.h +++ b/src/mongo/db/pipeline/abt/document_source_visitor.h @@ -29,10 +29,15 @@ #pragma once +#include + #include "mongo/db/pipeline/abt/algebrizer_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/visitors/document_source_visitor_registry.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/utils.h" namespace mongo::optimizer { diff --git a/src/mongo/db/pipeline/abt/expr_algebrizer_context.cpp b/src/mongo/db/pipeline/abt/expr_algebrizer_context.cpp index 516a3d6d2dc77..fe89defa73975 100644 --- a/src/mongo/db/pipeline/abt/expr_algebrizer_context.cpp +++ b/src/mongo/db/pipeline/abt/expr_algebrizer_context.cpp @@ -29,6 +29,10 @@ #include "mongo/db/pipeline/abt/expr_algebrizer_context.h" +#include + +#include "mongo/db/query/optimizer/syntax/expr.h" + namespace mongo::optimizer { ExpressionAlgebrizerContext::ExpressionAlgebrizerContext(const bool assertExprSort, @@ -54,7 +58,7 @@ void ExpressionAlgebrizerContext::push(ABT node) { ABT ExpressionAlgebrizerContext::pop() { uassert(6624428, "Arity violation", !_stack.empty()); - ABT node = _stack.top(); + ABT node = std::move(_stack.top()); _stack.pop(); return node; } diff --git a/src/mongo/db/pipeline/abt/expr_algebrizer_context.h b/src/mongo/db/pipeline/abt/expr_algebrizer_context.h index 0c8d82ce0c8e8..6a41f900c4951 100644 --- a/src/mongo/db/pipeline/abt/expr_algebrizer_context.h +++ b/src/mongo/db/pipeline/abt/expr_algebrizer_context.h @@ -29,11 +29,20 @@ #pragma once +#include +#include +#include #include +#include +#include +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_path.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { diff --git a/src/mongo/db/pipeline/abt/field_map_builder.cpp b/src/mongo/db/pipeline/abt/field_map_builder.cpp index 55085817c9236..ef280fa84c0f0 100644 --- a/src/mongo/db/pipeline/abt/field_map_builder.cpp +++ b/src/mongo/db/pipeline/abt/field_map_builder.cpp @@ -28,6 +28,18 @@ */ #include "mongo/db/pipeline/abt/field_map_builder.h" + +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/utils/path_utils.h" diff --git a/src/mongo/db/pipeline/abt/field_map_builder.h b/src/mongo/db/pipeline/abt/field_map_builder.h index ba21193520edc..b9d3958501073 100644 --- a/src/mongo/db/pipeline/abt/field_map_builder.h +++ b/src/mongo/db/pipeline/abt/field_map_builder.h @@ -29,8 +29,20 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/field_path.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { diff --git a/src/mongo/db/pipeline/abt/match_expression_visitor.cpp b/src/mongo/db/pipeline/abt/match_expression_visitor.cpp index 4449cdb5ad4f6..919dd7e31d7b5 100644 --- a/src/mongo/db/pipeline/abt/match_expression_visitor.cpp +++ b/src/mongo/db/pipeline/abt/match_expression_visitor.cpp @@ -28,7 +28,22 @@ */ #include "mongo/db/pipeline/abt/match_expression_visitor.h" + +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/docval_to_sbeval.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/field_ref.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/expression_array.h" #include "mongo/db/matcher/expression_expr.h" @@ -37,6 +52,7 @@ #include "mongo/db/matcher/expression_internal_eq_hashed_key.h" #include "mongo/db/matcher/expression_internal_expr_comparison.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_path.h" #include "mongo/db/matcher/expression_text.h" #include "mongo/db/matcher/expression_text_noop.h" #include "mongo/db/matcher/expression_tree.h" @@ -45,6 +61,7 @@ #include "mongo/db/matcher/expression_where.h" #include "mongo/db/matcher/expression_where_noop.h" #include "mongo/db/matcher/match_expression_walker.h" +#include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h" #include "mongo/db/matcher/schema/expression_internal_schema_allowed_properties.h" #include "mongo/db/matcher/schema/expression_internal_schema_cond.h" @@ -64,7 +81,14 @@ #include "mongo/db/pipeline/abt/agg_expression_visitor.h" #include "mongo/db/pipeline/abt/expr_algebrizer_context.h" #include "mongo/db/pipeline/abt/utils.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/utils/path_utils.h" +#include "mongo/db/query/tree_walker.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::optimizer { diff --git a/src/mongo/db/pipeline/abt/match_expression_visitor.h b/src/mongo/db/pipeline/abt/match_expression_visitor.h index 5228cc2675786..2fe9f16692a88 100644 --- a/src/mongo/db/pipeline/abt/match_expression_visitor.h +++ b/src/mongo/db/pipeline/abt/match_expression_visitor.h @@ -30,7 +30,9 @@ #pragma once #include "mongo/db/matcher/expression.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/utils.h" diff --git a/src/mongo/db/pipeline/abt/projection_ast_visitor.cpp b/src/mongo/db/pipeline/abt/projection_ast_visitor.cpp deleted file mode 100644 index 04538efc87595..0000000000000 --- a/src/mongo/db/pipeline/abt/projection_ast_visitor.cpp +++ /dev/null @@ -1,146 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/pipeline/abt/projection_ast_visitor.h" - -#include "mongo/db/pipeline/abt/agg_expression_visitor.h" -#include "mongo/db/pipeline/abt/utils.h" -#include "mongo/db/query/projection_ast_path_tracking_visitor.h" - -namespace mongo::optimizer { - -class ProjectionPreVisitor final : public projection_ast::ProjectionASTConstVisitor { -public: - ProjectionPreVisitor(projection_ast::PathTrackingVisitorContext<>* context, - bool isInclusion, - const ProjectionName& rootProjName, - const ProjectionName& scanProjName) - : _context{context}, - _builder(rootProjName, rootProjName == scanProjName), - _isInclusion(isInclusion) { - invariant(_context); - } - - void visit(const projection_ast::ProjectionPositionalASTNode* node) final { - unsupportedProjectionType("ProjectionPositionalASTNode"); - } - - void visit(const projection_ast::ProjectionSliceASTNode* node) final { - unsupportedProjectionType("ProjectionSliceASTNode"); - } - - void visit(const projection_ast::ProjectionElemMatchASTNode* node) final { - unsupportedProjectionType("ProjectionElemMatchASTNode"); - } - - void visit(const projection_ast::ExpressionASTNode* node) final { - unsupportedProjectionType("ExpressionASTNode"); - } - - void visit(const projection_ast::BooleanConstantASTNode* node) final { - const auto& path = _context->fullPath(); - if (_isInclusion) { - const auto isIdField = path == "_id"; - // If current field is _id and _id : 0, then don't include it. - if (isIdField && !node->value()) { - return; - } - // In inclusion projection only _id field can be excluded, make sure this is the case. - tassert( - 6684601, "In inclusion projection only _id field can be excluded", node->value()); - builderIntegrateInclusion(path.fullPath()); - } else { - builderIntegrateExclusion(path.fullPath()); - } - } - - void visit(const projection_ast::ProjectionPathASTNode* node) final {} - void visit(const projection_ast::MatchExpressionASTNode* node) final { - unsupportedProjectionType("MatchExpressionASTNode"); - } - - auto generateABT() { - return _builder.generateABT(); - } - -private: - projection_ast::PathTrackingVisitorContext<>* _context; - FieldMapBuilder _builder; - bool _isInclusion; - - void assertSupportedPath(const std::string& path) { - uassert(ErrorCodes::InternalErrorNotSupported, - "Projection contains unsupported numeric path component", - !FieldRef(path).hasNumericPathComponents()); - } - - void builderIntegrateInclusion(const std::string& fullPath) { - assertSupportedPath(fullPath); - _builder.integrateFieldPath(FieldPath(fullPath), - [](const bool isLastElement, FieldMapEntry& entry) { - entry._hasLeadingObj = true; - entry._hasKeep = true; - }); - } - - void builderIntegrateExclusion(const std::string& fullPath) { - assertSupportedPath(fullPath); - _builder.integrateFieldPath(FieldPath(fullPath), - [](const bool isLastElement, FieldMapEntry& entry) { - if (isLastElement) { - entry._hasDrop = true; - } - }); - } - - void unsupportedProjectionType(const std::string& unsupportedNode) const { - uasserted(ErrorCodes::InternalErrorNotSupported, - str::stream() << "Projection node is not supported (type: " << unsupportedNode - << ")"); - } -}; - -void translateProjection(AlgebrizerContext& ctx, const projection_ast::Projection& proj) { - projection_ast::PathTrackingVisitorContext context{}; - const bool isInclusion = proj.type() == projection_ast::ProjectType::kInclusion; - const ProjectionName& rootProjName = ctx.getNode()._rootProjection; - - ProjectionPreVisitor astVisitor{&context, isInclusion, rootProjName, ctx.getScanProjName()}; - projection_ast::PathTrackingWalker walker{&context, {&astVisitor}, {}}; - tree_walker::walk(proj.root(), &walker); - - auto result = astVisitor.generateABT(); - tassert(7021702, "Failed to generate ABT for projection", result); - - auto entry = ctx.getNode(); - const ProjectionName projName = ctx.getNextId("combinedProjection"); - ctx.setNode(projName, projName, std::move(*result), std::move(entry._node)); -} - -} // namespace mongo::optimizer diff --git a/src/mongo/db/pipeline/abt/projection_ast_visitor.h b/src/mongo/db/pipeline/abt/projection_ast_visitor.h deleted file mode 100644 index f0a737bd42c60..0000000000000 --- a/src/mongo/db/pipeline/abt/projection_ast_visitor.h +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/pipeline/abt/algebrizer_context.h" -#include "mongo/db/pipeline/abt/field_map_builder.h" -#include "mongo/db/query/projection.h" - -namespace mongo::optimizer { -/** - * Populate's ctx's root node with an EvaluationNode encoding the projection. - */ -void translateProjection(AlgebrizerContext& ctx, const projection_ast::Projection& proj); - -} // namespace mongo::optimizer diff --git a/src/mongo/db/pipeline/abt/transformer_visitor.cpp b/src/mongo/db/pipeline/abt/transformer_visitor.cpp index 364dc1172466d..63af94676fc86 100644 --- a/src/mongo/db/pipeline/abt/transformer_visitor.cpp +++ b/src/mongo/db/pipeline/abt/transformer_visitor.cpp @@ -29,11 +29,36 @@ #include "mongo/db/pipeline/abt/transformer_visitor.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/exec/projection_executor.h" +#include "mongo/db/exec/projection_executor_builder.h" +#include "mongo/db/field_ref.h" #include "mongo/db/pipeline/abt/agg_expression_visitor.h" #include "mongo/db/pipeline/abt/utils.h" -#include "mongo/db/pipeline/document_source_group.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source_replace_root.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/group_from_first_document_transformation.h" #include "mongo/db/pipeline/visitors/transformer_interface_walker.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/projection_policies.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo::optimizer { diff --git a/src/mongo/db/pipeline/abt/transformer_visitor.h b/src/mongo/db/pipeline/abt/transformer_visitor.h index 46c0405fe55da..7b2cf655bfb05 100644 --- a/src/mongo/db/pipeline/abt/transformer_visitor.h +++ b/src/mongo/db/pipeline/abt/transformer_visitor.h @@ -29,13 +29,20 @@ #pragma once +#include +#include + #include "mongo/db/exec/add_fields_projection_executor.h" #include "mongo/db/exec/exclusion_projection_executor.h" #include "mongo/db/exec/inclusion_projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" #include "mongo/db/pipeline/abt/algebrizer_context.h" #include "mongo/db/pipeline/abt/field_map_builder.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/transformer_interface.h" #include "mongo/db/pipeline/visitors/transformer_interface_visitor.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/projection.h" namespace mongo::optimizer { diff --git a/src/mongo/db/pipeline/abt/utils.cpp b/src/mongo/db/pipeline/abt/utils.cpp index a22424d3580f8..3ddd0cce11d46 100644 --- a/src/mongo/db/pipeline/abt/utils.cpp +++ b/src/mongo/db/pipeline/abt/utils.cpp @@ -29,9 +29,30 @@ #include "mongo/db/pipeline/abt/utils.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/docval_to_sbeval.h" -#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/utils/path_utils.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" namespace mongo::optimizer { @@ -74,7 +95,7 @@ ABT translateFieldRef(const FieldRef& fieldRef, ABT initial) { if (trailingEmptyPath) { auto arrCase = make(); maybeComposePath(arrCase, result.cast()->getPath()); - maybeComposePath(result, arrCase); + maybeComposePath(result, std::move(arrCase)); } else { result = make(PathTraverse::kSingleLevel, std::move(result)); } diff --git a/src/mongo/db/pipeline/abt/utils.h b/src/mongo/db/pipeline/abt/utils.h index 43bd557ac22d2..729def8770624 100644 --- a/src/mongo/db/pipeline/abt/utils.h +++ b/src/mongo/db/pipeline/abt/utils.h @@ -29,9 +29,19 @@ #pragma once +#include +#include +#include +#include + #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/field_ref.h" #include "mongo/db/pipeline/field_path.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer { diff --git a/src/mongo/db/pipeline/accumulation_statement.cpp b/src/mongo/db/pipeline/accumulation_statement.cpp index d503e143af0eb..3c2c15d87f727 100644 --- a/src/mongo/db/pipeline/accumulation_statement.cpp +++ b/src/mongo/db/pipeline/accumulation_statement.cpp @@ -27,15 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include #include -#include "mongo/db/pipeline/accumulation_statement.h" +#include +#include +#include -#include "mongo/db/exec/document_value/value.h" #include "mongo/db/feature_compatibility_version_documentation.h" +#include "mongo/db/pipeline/accumulation_statement.h" #include "mongo/db/pipeline/accumulator.h" #include "mongo/db/query/allowed_contexts.h" #include "mongo/db/stats/counters.h" diff --git a/src/mongo/db/pipeline/accumulation_statement.h b/src/mongo/db/pipeline/accumulation_statement.h index 8b661cdc67a70..4f6aee94e6590 100644 --- a/src/mongo/db/pipeline/accumulation_statement.h +++ b/src/mongo/db/pipeline/accumulation_statement.h @@ -29,9 +29,31 @@ #pragma once +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator.h b/src/mongo/db/pipeline/accumulator.h index 52e28579ac527..1a4a8de72adbb 100644 --- a/src/mongo/db/pipeline/accumulator.h +++ b/src/mongo/db/pipeline/accumulator.h @@ -29,23 +29,31 @@ #pragma once -#include "mongo/base/string_data.h" -#include "mongo/platform/basic.h" - #include +#include #include +#include +#include +#include #include #include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" #include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/db/query/stats/stats_gen.h" #include "mongo/db/query/stats/value_utils.h" +#include "mongo/platform/basic.h" +#include "mongo/platform/decimal128.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/summation.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_add_to_set.cpp b/src/mongo/db/pipeline/accumulator_add_to_set.cpp index fce5d90a2b06e..7fe6d53b8989f 100644 --- a/src/mongo/db/pipeline/accumulator_add_to_set.cpp +++ b/src/mongo/db/pipeline/accumulator_add_to_set.cpp @@ -27,16 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/accumulator.h" +#include +#include +#include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function_add_to_set.h" #include "mongo/db/pipeline/window_function/window_function_expression.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_avg.cpp b/src/mongo/db/pipeline/accumulator_avg.cpp index 1d1f22ad46aa4..969ebc01b5b13 100644 --- a/src/mongo/db/pipeline/accumulator_avg.cpp +++ b/src/mongo/db/pipeline/accumulator_avg.cpp @@ -27,19 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/exec/sbe/accumulator_sum_value_enum.h" -#include "mongo/db/pipeline/accumulator.h" +#include +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/sbe/accumulator_sum_value_enum.h" #include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function_avg.h" #include "mongo/db/pipeline/window_function/window_function_expression.h" #include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/summation.h" namespace mongo { @@ -63,7 +71,7 @@ Value serializePartialSum(BSONType nonDecimalTotalType, void AccumulatorAvg::processInternal(const Value& input, bool merging) { if (merging) { // We expect an object that contains both a partial sum and a count. - verify(input.getType() == Object); + MONGO_verify(input.getType() == Object); auto partialSumVal = input[stage_builder::partialSumName]; tassert(6422700, "'ps' field must be present", !partialSumVal.missing()); diff --git a/src/mongo/db/pipeline/accumulator_covariance.cpp b/src/mongo/db/pipeline/accumulator_covariance.cpp index 82223ce95e8ab..cb334ee0283d3 100644 --- a/src/mongo/db/pipeline/accumulator_covariance.cpp +++ b/src/mongo/db/pipeline/accumulator_covariance.cpp @@ -27,16 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include - -#include "mongo/db/pipeline/accumulator_for_window_functions.h" +#include +#include #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/pipeline/accumulation_statement.h" -#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/accumulator_for_window_functions.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/window_function/window_function_covariance.h" #include "mongo/db/pipeline/window_function/window_function_expression.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_exp_moving_avg.cpp b/src/mongo/db/pipeline/accumulator_exp_moving_avg.cpp index f2d7cb1bb0f11..8220c49dff73d 100644 --- a/src/mongo/db/pipeline/accumulator_exp_moving_avg.cpp +++ b/src/mongo/db/pipeline/accumulator_exp_moving_avg.cpp @@ -27,17 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include -#include - -#include "mongo/db/pipeline/accumulator.h" +#include +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/pipeline/accumulation_statement.h" -#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function_expression.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_first.cpp b/src/mongo/db/pipeline/accumulator_first.cpp index 673546faee357..55fb7050f3b36 100644 --- a/src/mongo/db/pipeline/accumulator_first.cpp +++ b/src/mongo/db/pipeline/accumulator_first.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/accumulator.h" +#include #include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_integral.cpp b/src/mongo/db/pipeline/accumulator_integral.cpp index c49b019815cf4..44dd6f921ddd0 100644 --- a/src/mongo/db/pipeline/accumulator_integral.cpp +++ b/src/mongo/db/pipeline/accumulator_integral.cpp @@ -27,14 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/accumulator_for_window_functions.h" +#include +#include +#include #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/pipeline/accumulation_statement.h" -#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/accumulator_for_window_functions.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function_expression.h" +#include "mongo/db/pipeline/window_function/window_function_integral.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_internal_construct_stats.cpp b/src/mongo/db/pipeline/accumulator_internal_construct_stats.cpp index 9bbc1cf520c53..3e6690b32ed2f 100644 --- a/src/mongo/db/pipeline/accumulator_internal_construct_stats.cpp +++ b/src/mongo/db/pipeline/accumulator_internal_construct_stats.cpp @@ -27,17 +27,34 @@ * it in the license file. */ -#include "mongo/bson/bsonobjbuilder.h" +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/docval_to_sbeval.h" #include "mongo/db/pipeline/accumulation_statement.h" #include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/allowed_contexts.h" +#include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/max_diff.h" #include "mongo/db/query/stats/stats_gen.h" #include "mongo/db/query/stats/value_utils.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/pipeline/accumulator_js_reduce.cpp b/src/mongo/db/pipeline/accumulator_js_reduce.cpp index 4ea8186297e2e..d2954b81cdf80 100644 --- a/src/mongo/db/pipeline/accumulator_js_reduce.cpp +++ b/src/mongo/db/pipeline/accumulator_js_reduce.cpp @@ -27,12 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/pipeline/accumulator_js_reduce.h" +#include "mongo/db/pipeline/javascript_execution.h" #include "mongo/db/pipeline/make_js_function.h" #include "mongo/db/pipeline/map_reduce_options_gen.h" +#include "mongo/scripting/engine.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { @@ -197,11 +213,8 @@ void AccumulatorInternalJsReduce::reset() { Document AccumulatorInternalJsReduce::serialize(boost::intrusive_ptr initializer, boost::intrusive_ptr argument, SerializationOptions options) const { - if (options.replacementForLiteralArgs) { - return DOC(kName << DOC("data" << argument->serialize(options) << "eval" - << *options.replacementForLiteralArgs)); - } - return DOC(kName << DOC("data" << argument->serialize(options) << "eval" << _funcSource)); + return DOC(kName << DOC("data" << argument->serialize(options) << "eval" + << options.serializeLiteral(_funcSource))); } REGISTER_ACCUMULATOR(accumulator, AccumulatorJs::parse); @@ -242,21 +255,13 @@ Document AccumulatorJs::serialize(boost::intrusive_ptr initializer, SerializationOptions options) const { MutableDocument args; - args.addField("init", - options.replacementForLiteralArgs ? Value(*options.replacementForLiteralArgs) - : Value(_init)); - args.addField("initArgs", Value(initializer->serialize(options))); - args.addField("accumulate", - options.replacementForLiteralArgs ? Value(*options.replacementForLiteralArgs) - : Value(_accumulate)); - args.addField("accumulateArgs", Value(argument->serialize(options))); - args.addField("merge", - options.replacementForLiteralArgs ? Value(*options.replacementForLiteralArgs) - : Value(_merge)); + args.addField("init", options.serializeLiteral(_init)); + args.addField("initArgs", initializer->serialize(options)); + args.addField("accumulate", options.serializeLiteral(_accumulate)); + args.addField("accumulateArgs", argument->serialize(options)); + args.addField("merge", options.serializeLiteral(_merge)); if (_finalize) { - args.addField("finalize", - options.replacementForLiteralArgs ? Value(*options.replacementForLiteralArgs) - : Value(*_finalize)); + args.addField("finalize", options.serializeLiteral(*_finalize)); } args.addField("lang", Value("js"_sd)); return DOC(kName << args.freeze()); diff --git a/src/mongo/db/pipeline/accumulator_js_reduce.h b/src/mongo/db/pipeline/accumulator_js_reduce.h index 8970223c24bb7..e47e46283f590 100644 --- a/src/mongo/db/pipeline/accumulator_js_reduce.h +++ b/src/mongo/db/pipeline/accumulator_js_reduce.h @@ -30,11 +30,24 @@ #pragma once #include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/accumulation_statement.h" #include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_js_test.cpp b/src/mongo/db/pipeline/accumulator_js_test.cpp index 2a44e8e967c25..08f22a4b24ad5 100644 --- a/src/mongo/db/pipeline/accumulator_js_test.cpp +++ b/src/mongo/db/pipeline/accumulator_js_test.cpp @@ -27,19 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include - +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/accumulator_js_reduce.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/process_interface/standalone_process_interface.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/idl/server_parameter_test_util.h" #include "mongo/scripting/engine.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/accumulator_last.cpp b/src/mongo/db/pipeline/accumulator_last.cpp index dc5bd49b859fc..ad69fcd716afe 100644 --- a/src/mongo/db/pipeline/accumulator_last.cpp +++ b/src/mongo/db/pipeline/accumulator_last.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/accumulator.h" +#include #include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_locf.cpp b/src/mongo/db/pipeline/accumulator_locf.cpp index 1a5d6a4ec7465..f4a601e3cfc86 100644 --- a/src/mongo/db/pipeline/accumulator_locf.cpp +++ b/src/mongo/db/pipeline/accumulator_locf.cpp @@ -27,14 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/accumulator_for_window_functions.h" +#include +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/pipeline/accumulation_statement.h" -#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/accumulator_for_window_functions.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function_expression.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_merge_objects.cpp b/src/mongo/db/pipeline/accumulator_merge_objects.cpp index 43e2979e3abf0..cf1ce23c8214c 100644 --- a/src/mongo/db/pipeline/accumulator_merge_objects.cpp +++ b/src/mongo/db/pipeline/accumulator_merge_objects.cpp @@ -27,13 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/pipeline/accumulator.h" +#include +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_min_max.cpp b/src/mongo/db/pipeline/accumulator_min_max.cpp index 21e43263befef..92db4220d67a3 100644 --- a/src/mongo/db/pipeline/accumulator_min_max.cpp +++ b/src/mongo/db/pipeline/accumulator_min_max.cpp @@ -27,15 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/accumulator.h" +#include +#include "mongo/bson/bsonmisc.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function_expression.h" #include "mongo/db/pipeline/window_function/window_function_min_max.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_multi.cpp b/src/mongo/db/pipeline/accumulator_multi.cpp index a8dedfcc086b6..bc1895e59a631 100644 --- a/src/mongo/db/pipeline/accumulator_multi.cpp +++ b/src/mongo/db/pipeline/accumulator_multi.cpp @@ -28,8 +28,31 @@ */ #include "mongo/db/pipeline/accumulator_multi.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/sort_pattern.h" -#include "mongo/util/version/releases.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { using FirstLastSense = AccumulatorFirstLastN::Sense; @@ -80,7 +103,7 @@ void AccumulatorN::processInternal(const Value& input, bool merging) { if (merging) { tassert(5787803, "input must be an array when 'merging' is true", input.isArray()); - auto array = input.getArray(); + const auto& array = input.getArray(); for (auto&& val : array) { _processValue(val); } @@ -168,7 +191,6 @@ template AccumulationExpression AccumulatorMinMaxN::parseMinMaxN(ExpressionContext* const expCtx, BSONElement elem, VariablesParseState vps) { - expCtx->sbeGroupCompatibility = SbeCompatibility::notCompatible; auto name = [] { if constexpr (s == MinMaxSense::kMin) { return AccumulatorMinN::getName(); @@ -255,7 +277,6 @@ template AccumulationExpression AccumulatorFirstLastN::parseFirstLastN(ExpressionContext* const expCtx, BSONElement elem, VariablesParseState vps) { - expCtx->sbeGroupCompatibility = SbeCompatibility::notCompatible; auto name = [] { if constexpr (v == Sense::kFirst) { return AccumulatorFirstN::getName(); @@ -466,17 +487,30 @@ Document AccumulatorTopBottomN::serialize( if constexpr (!single) { args.addField(kFieldNameN, Value(initializer->serialize(options))); } - auto serializedArg = argument->serialize(options); - - // If 'argument' contains a field named 'output', this means that we are serializing the - // accumulator's original output expression under the field name 'output'. Otherwise, we are - // serializing a custom argument under the field name 'output'. For instance, a merging $group - // will provide an argument that merges multiple partial groups. - if (auto output = serializedArg[kFieldNameOutput]; !output.missing()) { - args.addField(kFieldNameOutput, Value(output)); + + // If 'argument' is either an ExpressionObject or an ExpressionConstant of object type, then + // we are serializing the original expression under the 'output' field of the object. Otherwise, + // we're serializing a custom expression for merging group. + if (auto argObj = dynamic_cast(argument.get())) { + bool foundOutputField = false; + for (auto& child : argObj->getChildExpressions()) { + if (child.first == kFieldNameOutput) { + auto output = child.second->serialize(options); + args.addField(kFieldNameOutput, output); + foundOutputField = true; + break; + } + } + tassert(7773700, "'output' field should be present.", foundOutputField); + } else if (auto argConst = dynamic_cast(argument.get())) { + auto output = argConst->getValue().getDocument()[kFieldNameOutput]; + tassert(7773701, "'output' field should be present.", !output.missing()); + args.addField(kFieldNameOutput, output); } else { + auto serializedArg = argument->serialize(options); args.addField(kFieldNameOutput, serializedArg); } + args.addField(kFieldNameSortBy, Value(_sortPattern.serialize( SortPattern::SortKeySerialization::kForPipelineSerialization, options))); @@ -484,12 +518,13 @@ Document AccumulatorTopBottomN::serialize( } template -std::pair parseAccumulatorTopBottomNSortBy(ExpressionContext* const expCtx, - BSONObj sortBy) { +std::tuple parseAccumulatorTopBottomNSortBy( + ExpressionContext* const expCtx, BSONObj sortBy) { SortPattern sortPattern(sortBy, expCtx); BSONArrayBuilder sortFieldsExpBab; BSONObjIterator sortByBoi(sortBy); + bool hasMeta = false; for (const auto& part : sortPattern) { const auto fieldName = sortByBoi.next().fieldNameStringData(); if (part.expression) { @@ -500,11 +535,12 @@ std::pair parseAccumulatorTopBottomNSortBy(ExpressionCon // sortFields array contains the data we need for sorting. const auto serialized = part.expression->serialize(false); sortFieldsExpBab.append(serialized.getDocument().toBson()); + hasMeta = true; } else { sortFieldsExpBab.append((StringBuilder() << "$" << fieldName).str()); } } - return {sortPattern, sortFieldsExpBab.arr()}; + return {sortPattern, sortFieldsExpBab.arr(), hasMeta}; } template @@ -513,7 +549,12 @@ AccumulationExpression AccumulatorTopBottomN::parseTopBottomN( auto name = AccumulatorTopBottomN::getName(); const auto [n, output, sortBy] = accumulatorNParseArgs(expCtx, elem, name.rawData(), true, vps); - auto [sortPattern, sortFieldsExp] = parseAccumulatorTopBottomNSortBy(expCtx, *sortBy); + auto [sortPattern, sortFieldsExp, hasMeta] = + parseAccumulatorTopBottomNSortBy(expCtx, *sortBy); + + if (hasMeta) { + expCtx->sbeGroupCompatibility = SbeCompatibility::notCompatible; + } // Construct argument expression. If given sortBy: {field1: 1, field2: 1} it will be shaped like // {output: , sortFields: ["$field1", "$field2"]}. This projects out only the @@ -533,7 +574,7 @@ template boost::intrusive_ptr AccumulatorTopBottomN::create( ExpressionContext* expCtx, BSONObj sortBy, bool isRemovable) { return make_intrusive>( - expCtx, parseAccumulatorTopBottomNSortBy(expCtx, sortBy).first, isRemovable); + expCtx, std::get<0>(parseAccumulatorTopBottomNSortBy(expCtx, sortBy)), isRemovable); } template @@ -680,17 +721,17 @@ Value AccumulatorTopBottomN::getValueConst(bool toBeMerged) const }; if constexpr (!single) { - return Value(result); + return Value(std::move(result)); } else { if (toBeMerged) { - return Value(result); + return Value(std::move(result)); } else { if (result.empty()) { // This only occurs in a window function scenario, an accumulator will always have // at least one value processed. return Value(BSONNULL); } - return Value(result[0]); + return Value(std::move(result[0])); } } } diff --git a/src/mongo/db/pipeline/accumulator_multi.h b/src/mongo/db/pipeline/accumulator_multi.h index ac1d70aae0c95..a708e45a6a17d 100644 --- a/src/mongo/db/pipeline/accumulator_multi.h +++ b/src/mongo/db/pipeline/accumulator_multi.h @@ -29,11 +29,32 @@ #pragma once +#include +#include +#include #include - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/exec/sort_key_comparator.h" #include "mongo/db/index/sort_key_generator.h" #include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/query/sort_pattern.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_percentile.cpp b/src/mongo/db/pipeline/accumulator_percentile.cpp index 1c8fe9ede6269..4e12e9a885692 100644 --- a/src/mongo/db/pipeline/accumulator_percentile.cpp +++ b/src/mongo/db/pipeline/accumulator_percentile.cpp @@ -28,48 +28,99 @@ */ #include "mongo/db/pipeline/accumulator_percentile.h" + +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/basic_types.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/accumulator_percentile_gen.h" #include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { using boost::intrusive_ptr; -REGISTER_ACCUMULATOR_WITH_FEATURE_FLAG(percentile, - AccumulatorPercentile::parseArgs, - feature_flags::gFeatureFlagApproxPercentiles); +REGISTER_ACCUMULATOR(percentile, AccumulatorPercentile::parseArgs); +REGISTER_STABLE_EXPRESSION(percentile, AccumulatorPercentile::parseExpression); -REGISTER_EXPRESSION_WITH_FEATURE_FLAG(percentile, - AccumulatorPercentile::parseExpression, - AllowedWithApiStrict::kNeverInVersion1, - AllowedWithClientType::kAny, - feature_flags::gFeatureFlagApproxPercentiles); +REGISTER_ACCUMULATOR(median, AccumulatorMedian::parseArgs); +REGISTER_STABLE_EXPRESSION(median, AccumulatorMedian::parseExpression); + +Status AccumulatorPercentile::validatePercentileMethod(StringData method) { + if (method != kApproximate) { + return {ErrorCodes::BadValue, + "Currently only 'approximate' can be used as percentile 'method'."}; + } + return Status::OK(); +} +namespace { +PercentileMethod methodNameToEnum(StringData method) { + if (method == AccumulatorPercentile::kApproximate) { + return PercentileMethod::Approximate; + } -REGISTER_ACCUMULATOR_WITH_FEATURE_FLAG(median, - AccumulatorMedian::parseArgs, - feature_flags::gFeatureFlagApproxPercentiles); + // The idl should have validated the input string (see 'validatePercentileMethod()'). + uasserted(7766600, "Currently only approximate percentiles are supported"); +} -REGISTER_EXPRESSION_WITH_FEATURE_FLAG(median, - AccumulatorMedian::parseExpression, - AllowedWithApiStrict::kNeverInVersion1, - AllowedWithClientType::kAny, - feature_flags::gFeatureFlagApproxPercentiles); +StringData percentileMethodEnumToString(PercentileMethod method) { + switch (method) { + case PercentileMethod::Approximate: + return AccumulatorPercentile::kApproximate; + case PercentileMethod::Discrete: + return AccumulatorPercentile::kDiscrete; + case PercentileMethod::Continuous: + return AccumulatorPercentile::kContinuous; + } + MONGO_UNREACHABLE; +} -Status AccumulatorPercentile::validatePercentileArg(const std::vector& pv) { - if (pv.empty()) { - return {ErrorCodes::BadValue, "'p' cannot be an empty array"}; +// Deal with the 'p' field. It's allowed to use constant expressions and variables as long as it +// evaluates to an array of doubles from the range [0.0, 1.0]. +std::vector parseP(ExpressionContext* const expCtx, + BSONElement elem, + VariablesParseState vps) { + auto expr = Expression::parseOperand(expCtx, elem, vps)->optimize(); + ExpressionConstant* constExpr = dynamic_cast(expr.get()); + uassert(7750300, + str::stream() << "The $percentile 'p' field must be an array of " + "constant values, but found value: " + << elem.toString(false, false) << ".", + constExpr); + Value pVals = constExpr->getValue(); + + auto msg = + "The $percentile 'p' field must be an array of numbers from [0.0, 1.0], but found: "_sd; + if (!pVals.isArray() || pVals.getArrayLength() == 0) { + uasserted(7750301, str::stream() << msg << pVals.toString()); } - for (const double& p : pv) { + + std::vector ps; + ps.reserve(pVals.getArrayLength()); + for (const Value& pVal : pVals.getArray()) { + if (!pVal.numeric()) { + uasserted(7750302, str::stream() << msg << pVal.toString()); + } + double p = pVal.coerceToDouble(); if (p < 0 || p > 1) { - return {ErrorCodes::BadValue, - str::stream() << "'p' must be an array of numeric values from [0.0, 1.0] " - "range, but received incorrect value: " - << p}; + uasserted(7750303, str::stream() << msg << p); } + ps.push_back(p); } - return Status::OK(); + return ps; } +} // namespace AccumulationExpression AccumulatorPercentile::parseArgs(ExpressionContext* const expCtx, BSONElement elem, @@ -81,14 +132,16 @@ AccumulationExpression AccumulatorPercentile::parseArgs(ExpressionContext* const elem.type() == BSONType::Object); auto spec = AccumulatorPercentileSpec::parse(IDLParserContext(kName), elem.Obj()); + boost::intrusive_ptr input = Expression::parseOperand(expCtx, spec.getInput().getElement(), vps); - std::vector ps = spec.getP(); - PercentileMethodEnum method = spec.getMethod(); + std::vector ps = parseP(expCtx, spec.getP().getElement(), vps); + + const PercentileMethod method = methodNameToEnum(spec.getMethod()); auto factory = [expCtx, ps, method] { - return AccumulatorPercentile::create(expCtx, ps, static_cast(method)); + return AccumulatorPercentile::create(expCtx, ps, method); }; return {ExpressionConstant::create(expCtx, Value(BSONNULL)) /*initializer*/, @@ -97,11 +150,13 @@ AccumulationExpression AccumulatorPercentile::parseArgs(ExpressionContext* const "$percentile"_sd /*name*/}; } -std::pair /*ps*/, int32_t /*method*/> -AccumulatorPercentile::parsePercentileAndMethod(BSONElement elem) { +std::pair /*ps*/, PercentileMethod> +AccumulatorPercentile::parsePercentileAndMethod(ExpressionContext* expCtx, + BSONElement elem, + VariablesParseState vps) { auto spec = AccumulatorPercentileSpec::parse(IDLParserContext(kName), elem.Obj()); - return std::pair, int32_t>(spec.getP(), - static_cast(spec.getMethod())); + return std::make_pair(parseP(expCtx, spec.getP().getElement(), vps), + methodNameToEnum(spec.getMethod())); } boost::intrusive_ptr AccumulatorPercentile::parseExpression( @@ -115,11 +170,11 @@ boost::intrusive_ptr AccumulatorPercentile::parseExpression( boost::intrusive_ptr input = Expression::parseOperand(expCtx, spec.getInput().getElement(), vps); - std::vector ps = spec.getP(); - PercentileMethodEnum method = spec.getMethod(); + std::vector ps = parseP(expCtx, spec.getP().getElement(), vps); + const PercentileMethod method = methodNameToEnum(spec.getMethod()); return make_intrusive>( - expCtx, ps, input, static_cast(method)); + expCtx, ps, input, method); } void AccumulatorPercentile::processInternal(const Value& input, bool merging) { @@ -153,12 +208,12 @@ Value AccumulatorPercentile::getValue(bool toBeMerged) { } namespace { -std::unique_ptr createPercentileAlgorithm(int32_t method) { - switch (static_cast(method)) { - case PercentileMethodEnum::Approximate: +std::unique_ptr createPercentileAlgorithm(PercentileMethod method) { + switch (method) { + case PercentileMethod::Approximate: return createTDigestDistributedClassic(); default: - tasserted(7435800, + uasserted(7435800, str::stream() << "Currently only approximate percentiles are supported"); } return nullptr; @@ -167,7 +222,7 @@ std::unique_ptr createPercentileAlgorithm(int32_t method) { AccumulatorPercentile::AccumulatorPercentile(ExpressionContext* const expCtx, const std::vector& ps, - int32_t method) + PercentileMethod method) : AccumulatorState(expCtx), _percentiles(ps), _algo(createPercentileAlgorithm(method)), @@ -188,8 +243,7 @@ Document AccumulatorPercentile::serialize(boost::intrusive_ptr initi invariant(ec->getValue().nullish()); MutableDocument md; - AccumulatorPercentile::serializeHelper( - argument, options, _percentiles, static_cast(_method), md); + AccumulatorPercentile::serializeHelper(argument, options, _percentiles, _method, md); return DOC(getOpName() << md.freeze()); } @@ -197,18 +251,18 @@ Document AccumulatorPercentile::serialize(boost::intrusive_ptr initi void AccumulatorPercentile::serializeHelper(const boost::intrusive_ptr& argument, SerializationOptions options, std::vector percentiles, - int32_t method, + PercentileMethod method, MutableDocument& md) { md.addField(AccumulatorPercentileSpec::kInputFieldName, Value(argument->serialize(options))); md.addField(AccumulatorPercentileSpec::kPFieldName, Value(std::vector(percentiles.begin(), percentiles.end()))); md.addField(AccumulatorPercentileSpec::kMethodFieldName, - Value(PercentileMethod_serializer(static_cast(method)))); + Value(percentileMethodEnumToString(method))); } intrusive_ptr AccumulatorPercentile::create(ExpressionContext* const expCtx, const std::vector& ps, - int32_t method) { + PercentileMethod method) { return new AccumulatorPercentile(expCtx, ps, method); } @@ -225,10 +279,10 @@ AccumulationExpression AccumulatorMedian::parseArgs(ExpressionContext* const exp boost::intrusive_ptr input = Expression::parseOperand(expCtx, spec.getInput().getElement(), vps); - PercentileMethodEnum method = spec.getMethod(); + const PercentileMethod method = methodNameToEnum(spec.getMethod()); auto factory = [expCtx, method] { - return AccumulatorMedian::create(expCtx, {} /* unused */, static_cast(method)); + return AccumulatorMedian::create(expCtx, {} /* unused */, method); }; return {ExpressionConstant::create(expCtx, Value(BSONNULL)) /*initializer*/, @@ -237,10 +291,10 @@ AccumulationExpression AccumulatorMedian::parseArgs(ExpressionContext* const exp "$ median"_sd /*name*/}; } -std::pair /*ps*/, int32_t /*method*/> -AccumulatorMedian::parsePercentileAndMethod(BSONElement elem) { +std::pair /*ps*/, PercentileMethod> AccumulatorMedian::parsePercentileAndMethod( + ExpressionContext* /*expCtx*/, BSONElement elem, VariablesParseState /*vps*/) { auto spec = AccumulatorMedianSpec::parse(IDLParserContext(kName), elem.Obj()); - return std::pair, int32_t>({0.5}, static_cast(spec.getMethod())); + return std::make_pair(std::vector({0.5}), methodNameToEnum(spec.getMethod())); } boost::intrusive_ptr AccumulatorMedian::parseExpression(ExpressionContext* const expCtx, @@ -255,21 +309,23 @@ boost::intrusive_ptr AccumulatorMedian::parseExpression(ExpressionCo boost::intrusive_ptr input = Expression::parseOperand(expCtx, spec.getInput().getElement(), vps); + std::vector p = {0.5}; - PercentileMethodEnum method = spec.getMethod(); + + const PercentileMethod method = methodNameToEnum(spec.getMethod()); return make_intrusive>( - expCtx, p, input, static_cast(method)); + expCtx, p, input, method); } AccumulatorMedian::AccumulatorMedian(ExpressionContext* expCtx, const std::vector& /* unused */, - int32_t method) + PercentileMethod method) : AccumulatorPercentile(expCtx, {0.5} /* ps */, method){}; intrusive_ptr AccumulatorMedian::create(ExpressionContext* expCtx, const std::vector& /* unused */, - int32_t method) { + PercentileMethod method) { return new AccumulatorMedian(expCtx, {} /* unused */, method); } @@ -303,8 +359,7 @@ Document AccumulatorMedian::serialize(boost::intrusive_ptr initializ invariant(ec->getValue().nullish()); MutableDocument md; - AccumulatorMedian::serializeHelper( - argument, options, _percentiles, static_cast(_method), md); + AccumulatorMedian::serializeHelper(argument, options, _percentiles, _method, md); return DOC(getOpName() << md.freeze()); } @@ -312,10 +367,10 @@ Document AccumulatorMedian::serialize(boost::intrusive_ptr initializ void AccumulatorMedian::serializeHelper(const boost::intrusive_ptr& argument, SerializationOptions options, std::vector percentiles, - int32_t method, + PercentileMethod method, MutableDocument& md) { md.addField(AccumulatorPercentileSpec::kInputFieldName, Value(argument->serialize(options))); md.addField(AccumulatorPercentileSpec::kMethodFieldName, - Value(PercentileMethod_serializer(static_cast(method)))); + Value(percentileMethodEnumToString(method))); } } // namespace mongo diff --git a/src/mongo/db/pipeline/accumulator_percentile.h b/src/mongo/db/pipeline/accumulator_percentile.h index 4670f0ca4460e..6ac1acfd8b936 100644 --- a/src/mongo/db/pipeline/accumulator_percentile.h +++ b/src/mongo/db/pipeline/accumulator_percentile.h @@ -29,9 +29,24 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/accumulator_percentile_gen.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/percentile_algo.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { /** @@ -39,15 +54,19 @@ namespace mongo { */ class AccumulatorPercentile : public AccumulatorState { public: + static constexpr auto kApproximate = "approximate"_sd; + static constexpr auto kContinuous = "continuous"_sd; + static constexpr auto kDiscrete = "discrete"_sd; + static constexpr auto kName = "$percentile"_sd; const char* getOpName() const { return kName.rawData(); } /** - * Checks that 'pv' is an array of valid percentile specifications. Called by the IDL file. + * Blocks the percentile methods that aren't supported yet. */ - static Status validatePercentileArg(const std::vector& pv); + static Status validatePercentileMethod(StringData method); /** * Parsing and creating the accumulator. A separate accumulator object is created per group. @@ -62,9 +81,17 @@ class AccumulatorPercentile : public AccumulatorState { static boost::intrusive_ptr create(ExpressionContext* expCtx, const std::vector& ps, - int32_t method); + PercentileMethod method); - AccumulatorPercentile(ExpressionContext* expCtx, const std::vector& ps, int32_t method); + /** + * Necessary for supporting $percentile as window functions and/or as expression. + */ + static std::pair /*ps*/, PercentileMethod> parsePercentileAndMethod( + ExpressionContext* expCtx, BSONElement elem, VariablesParseState vps); + static Value formatFinalValue(int nPercentiles, const std::vector& pctls); + AccumulatorPercentile(ExpressionContext* expCtx, + const std::vector& ps, + PercentileMethod method); /** * Ingressing values and computing the requested percentiles. @@ -77,12 +104,6 @@ class AccumulatorPercentile : public AccumulatorState { */ void reset() final; - /** - * Necessary for supporting $percentile as window functions and/or as expression. - */ - static std::pair /*ps*/, int32_t /*method*/> parsePercentileAndMethod( - BSONElement elem); - static Value formatFinalValue(int nPercentiles, const std::vector& pctls); /** * Serializes this accumulator to a valid MQL accumulation statement that would be legal @@ -105,16 +126,13 @@ class AccumulatorPercentile : public AccumulatorState { static void serializeHelper(const boost::intrusive_ptr& argument, SerializationOptions options, std::vector percentiles, - int32_t method, + PercentileMethod method, MutableDocument& md); protected: std::vector _percentiles; std::unique_ptr _algo; - - // TODO SERVER-74894: This should have been 'PercentileMethodEnum' but the generated - // header from the IDL includes this header, creating a dependency. - const int32_t _method; + const PercentileMethod _method; }; /* @@ -141,20 +159,22 @@ class AccumulatorMedian : public AccumulatorPercentile { static boost::intrusive_ptr create(ExpressionContext* expCtx, const std::vector& unused, - int32_t method); + PercentileMethod method); /** * We are matching the signature of the AccumulatorPercentile for the purpose of using * ExpressionFromAccumulatorQuantile as a template for both $median and $percentile. This is the * reason for passing in `unused` and it will not be referenced. */ - AccumulatorMedian(ExpressionContext* expCtx, const std::vector& unused, int32_t method); + AccumulatorMedian(ExpressionContext* expCtx, + const std::vector& unused, + PercentileMethod method); /** * Necessary for supporting $median as window functions and/or as expression. */ - static std::pair /*ps*/, int32_t /*method*/> parsePercentileAndMethod( - BSONElement elem); + static std::pair /*ps*/, PercentileMethod> parsePercentileAndMethod( + ExpressionContext* expCtx, BSONElement elem, VariablesParseState vps); static Value formatFinalValue(int nPercentiles, const std::vector& pctls); /** @@ -174,7 +194,7 @@ class AccumulatorMedian : public AccumulatorPercentile { static void serializeHelper(const boost::intrusive_ptr& argument, SerializationOptions options, std::vector percentiles, - int32_t method, + PercentileMethod method, MutableDocument& md); }; } // namespace mongo diff --git a/src/mongo/db/pipeline/accumulator_percentile.idl b/src/mongo/db/pipeline/accumulator_percentile.idl index c9e2745bc9ce8..2310432ca2941 100644 --- a/src/mongo/db/pipeline/accumulator_percentile.idl +++ b/src/mongo/db/pipeline/accumulator_percentile.idl @@ -31,15 +31,9 @@ global: - "mongo/db/pipeline/accumulator_percentile.h" imports: - "mongo/db/basic_types.idl" -enums: - PercentileMethod: - description: "The method used to calculate the percentile." - type: string - values: - Approximate: "approximate" - Discrete: "discrete" - Continuous: "continuous" structs: + # $percentile allows expressions for 'input' and 'p' fields, which we cannot evaluate through + # the 'validator' callback and have to accept IDLAnyType. AccumulatorPercentileSpec: description: "Specification for the $percentile accumulator." strict: true @@ -50,14 +44,14 @@ structs: optional: false p: description: "The array listing the percentiles we will calculate." - type: array + type: IDLAnyType optional: false - validator: - {callback: "AccumulatorPercentile::validatePercentileArg"} method: description: "The method we will use to find the percentile." - type: PercentileMethod + type: string optional: false + validator: + {callback: "AccumulatorPercentile::validatePercentileMethod"} AccumulatorMedianSpec: description: "Specification for the $median accumulator." strict: true @@ -68,5 +62,7 @@ structs: optional: false method: description: "The method we will use to find the percentile." - type: PercentileMethod + type: string optional: false + validator: + {callback: "AccumulatorPercentile::validatePercentileMethod"} diff --git a/src/mongo/db/pipeline/accumulator_push.cpp b/src/mongo/db/pipeline/accumulator_push.cpp index 2adb4febbe08b..f9ac4568ac1b4 100644 --- a/src/mongo/db/pipeline/accumulator_push.cpp +++ b/src/mongo/db/pipeline/accumulator_push.cpp @@ -27,16 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/accumulator.h" +#include +#include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function_expression.h" #include "mongo/db/pipeline/window_function/window_function_push.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_rank.cpp b/src/mongo/db/pipeline/accumulator_rank.cpp index 7df6895a2ed7d..27f373af3c0c2 100644 --- a/src/mongo/db/pipeline/accumulator_rank.cpp +++ b/src/mongo/db/pipeline/accumulator_rank.cpp @@ -27,18 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include -#include - -#include "mongo/db/pipeline/accumulator_for_window_functions.h" +#include +#include +#include +#include +#include #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/pipeline/accumulation_statement.h" -#include "mongo/db/pipeline/expression.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/accumulator_for_window_functions.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function_expression.h" -#include "mongo/util/summation.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_std_dev.cpp b/src/mongo/db/pipeline/accumulator_std_dev.cpp index bcc43d7b66f99..9e22e69fce6a5 100644 --- a/src/mongo/db/pipeline/accumulator_std_dev.cpp +++ b/src/mongo/db/pipeline/accumulator_std_dev.cpp @@ -27,17 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/pipeline/accumulator.h" +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function_expression.h" #include "mongo/db/pipeline/window_function/window_function_stddev.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { using boost::intrusive_ptr; @@ -69,7 +75,7 @@ void AccumulatorStdDev::processInternal(const Value& input, bool merging) { } } else { // This is what getValue(true) produced below. - verify(input.getType() == Object); + MONGO_verify(input.getType() == Object); const double m2 = input["m2"].getDouble(); const double mean = input["mean"].getDouble(); const long long count = input["count"].getLong(); diff --git a/src/mongo/db/pipeline/accumulator_sum.cpp b/src/mongo/db/pipeline/accumulator_sum.cpp index 819f16bbf3545..f4c5e693b2888 100644 --- a/src/mongo/db/pipeline/accumulator_sum.cpp +++ b/src/mongo/db/pipeline/accumulator_sum.cpp @@ -27,20 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include -#include +#include +#include -#include "mongo/db/exec/sbe/accumulator_sum_value_enum.h" -#include "mongo/db/pipeline/accumulator.h" +#include +#include +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/sbe/accumulator_sum_value_enum.h" #include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function_count.h" #include "mongo/db/pipeline/window_function/window_function_expression.h" #include "mongo/db/pipeline/window_function/window_function_sum.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/summation.h" namespace mongo { diff --git a/src/mongo/db/pipeline/accumulator_test.cpp b/src/mongo/db/pipeline/accumulator_test.cpp index 5a38b558fb7ae..28f6932607c67 100644 --- a/src/mongo/db/pipeline/accumulator_test.cpp +++ b/src/mongo/db/pipeline/accumulator_test.cpp @@ -28,11 +28,27 @@ */ -#include "mongo/platform/basic.h" - +#include +#include #include +#include +#include +#include #include - +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/accumulation_statement.h" @@ -42,10 +58,18 @@ #include "mongo/db/pipeline/accumulator_multi.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/random.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -1736,7 +1760,7 @@ TEST(Accumulators, CovarianceWithRandomVariables) { assertCovariance(&expCtx, randomVariables, boost::none); } // Test serialization with redaction -std::string redactFieldNameForTest(StringData s) { +std::string applyHmacForTest(StringData s) { return str::stream() << "HASH<" << s << ">"; } @@ -1745,10 +1769,9 @@ Value parseAndSerializeAccumExpr( std::function( ExpressionContext* expCtx, BSONElement, const VariablesParseState&)> func) { SerializationOptions options; - std::string replacementChar = "?"; - options.replacementForLiteralArgs = replacementChar; - options.redactIdentifiers = true; - options.identifierRedactionPolicy = redactFieldNameForTest; + options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + options.transformIdentifiers = true; + options.transformIdentifiersCallback = applyHmacForTest; auto expCtx = make_intrusive(); auto expr = func(expCtx.get(), obj.firstElement(), expCtx->variablesParseState); return expr->serialize(options); @@ -1759,10 +1782,9 @@ Document parseAndSerializeAccum( std::function func) { SerializationOptions options; - std::string replacementChar = "?"; - options.replacementForLiteralArgs = replacementChar; - options.redactIdentifiers = true; - options.identifierRedactionPolicy = redactFieldNameForTest; + options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + options.transformIdentifiers = true; + options.transformIdentifiersCallback = applyHmacForTest; auto expCtx = make_intrusive(); VariablesParseState vps = expCtx->variablesParseState; @@ -1788,16 +1810,14 @@ TEST(Accumulators, SerializeWithRedaction) { ASSERT_DOCUMENT_EQ_AUTO( // NOLINT R"({ "$accumulator": { - "init": "?", - "initArgs": { - "$const": "?" - }, - "accumulate": "?", + "init": "?string", + "initArgs": "[]", + "accumulate": "?string", "accumulateArgs": [ "$HASH", "$HASH" ], - "merge": "?", + "merge": "?string", "lang": "js" } })", @@ -1811,15 +1831,8 @@ TEST(Accumulators, SerializeWithRedaction) { ASSERT_DOCUMENT_EQ_AUTO( // NOLINT R"({ "$topN": { - "n": { - "$const": "?" - }, - "output": { - "HASH": "$HASH", - "HASH": [ - "$HASH" - ] - }, + "n": "?number", + "output": "$HASH", "sortBy": { "HASH": 1 } @@ -1831,34 +1844,21 @@ TEST(Accumulators, SerializeWithRedaction) { actual = parseAndSerializeAccum(addToSet.firstElement(), &genericParseSingleExpressionAccumulator); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({"$addToSet":{"$const":"?"}})", + R"({"$addToSet":"?object"})", actual); - auto sum = BSON("$sum" << BSON_ARRAY("$a" << 5 << 3 << BSON("$sum" << BSON_ARRAY(4 << 6)))); + auto sum = BSON("$sum" << BSON_ARRAY(4 << 6)); actual = parseAndSerializeAccum(sum.firstElement(), &genericParseSingleExpressionAccumulator); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({ - "$sum": [ - "$HASH", - { - "$const": "?" - }, - { - "$const": "?" - }, - { - "$sum": [ - { - "$const": "?" - }, - { - "$const": "?" - } - ] - } - ] - })", + R"({"$sum": "?array"})", + actual); + + sum = BSON("$sum" << BSON_ARRAY("$a" << 5 << 3 << BSON("$sum" << BSON_ARRAY(4 << 6)))); + actual = parseAndSerializeAccum(sum.firstElement(), + &genericParseSingleExpressionAccumulator); + ASSERT_DOCUMENT_EQ_AUTO( // NOLINT + R"({"$sum":["$HASH","?number","?number",{"$sum":"?array"}]})", actual); auto mergeObjs = BSON("$mergeObjects" << BSON_ARRAY("$a" << BSON("b" @@ -1867,7 +1867,7 @@ TEST(Accumulators, SerializeWithRedaction) { parseAndSerializeAccum(mergeObjs.firstElement(), &genericParseSingleExpressionAccumulator); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({"$mergeObjects":["$HASH",{"$const":"?"}]})", + R"({"$mergeObjects":["$HASH","?object"]})", actual); auto push = BSON("$push" << BSON("$eq" << BSON_ARRAY("$str" @@ -1875,7 +1875,7 @@ TEST(Accumulators, SerializeWithRedaction) { actual = parseAndSerializeAccum(push.firstElement(), &genericParseSingleExpressionAccumulator); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({"$push":{"$eq":["$HASH",{"$const":"?"}]}})", + R"({"$push":{"$eq":["$HASH","?string"]}})", actual); auto top = BSON("$top" << BSON("output" @@ -1886,12 +1886,7 @@ TEST(Accumulators, SerializeWithRedaction) { ASSERT_DOCUMENT_EQ_AUTO( // NOLINT R"({ "$top": { - "output": { - "HASH": "$HASH", - "HASH": [ - "$HASH" - ] - }, + "output": "$HASH", "sortBy": { "HASH": 1 } @@ -1907,22 +1902,14 @@ TEST(Accumulators, SerializeWithRedaction) { R"({ "$max": [ "$HASH", - { - "$const": "?" - }, - { - "$const": "?" - }, + "?number", + "?number", { "$max": [ [ "$HASH", - { - "$const": "?" - }, - { - "$const": "?" - } + "?number", + "?number" ] ] } @@ -1938,7 +1925,7 @@ TEST(Accumulators, SerializeWithRedaction) { actual = parseAndSerializeAccum(internalJsReduce.firstElement(), &AccumulatorInternalJsReduce::parseInternalJsReduce); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({"$_internalJsReduce":{"data":"$HASH","eval":"?"}})", + R"({"$_internalJsReduce":{"data":"$HASH","eval":"?string"}})", actual); } @@ -1948,7 +1935,7 @@ TEST(AccumulatorsToExpression, SerializeWithRedaction) { auto actual = parseAndSerializeAccumExpr(maxN, &AccumulatorMinMaxN::parseExpression); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({"$maxN":{"n":{"$const":"?"},"input":{"$const":"?"}}})", + R"({"$maxN":{"n":"?number","input":"?array"}})", actual.getDocument()); auto firstN = BSON("$firstN" << BSON("input" @@ -1959,7 +1946,7 @@ TEST(AccumulatorsToExpression, SerializeWithRedaction) { actual = parseAndSerializeAccumExpr( firstN, &AccumulatorFirstLastN::parseExpression); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({"$firstN":{"n":{"$const":"?"},"input":"$HASH"}})", + R"({"$firstN":{"n":"?string","input":"$HASH"}})", actual.getDocument()); } diff --git a/src/mongo/db/pipeline/aggregate_command.idl b/src/mongo/db/pipeline/aggregate_command.idl index f825b983444ca..ad49775c48843 100644 --- a/src/mongo/db/pipeline/aggregate_command.idl +++ b/src/mongo/db/pipeline/aggregate_command.idl @@ -96,10 +96,10 @@ commands: - privilege: # $planCacheStats resource_pattern: exact_namespace action_type: planCacheRead - - privilege: # $telemetry - agg_stage: telemetry + - privilege: # $queryStats + agg_stage: queryStats resource_pattern: cluster - action_type: telemetryRead + action_type: queryStatsRead - privilege: # $changeStream resource_pattern: exact_namespace action_type: changeStream @@ -283,12 +283,6 @@ commands: cpp_name: passthroughToShard optional: true stability: unstable - # TODO SERVER-65369: $_generateV2ResumeTokens can be removed after 7.0. - $_generateV2ResumeTokens: - description: "Internal parameter to signal whether v2 resume tokens should be generated." - type: optionalBool - cpp_name: generateV2ResumeTokens - stability: internal encryptionInformation: description: "Encryption Information schema and other tokens for CRUD commands" type: EncryptionInformation @@ -317,4 +311,17 @@ commands: type: optionalBool cpp_name: isClusterQueryWithoutShardKeyCmd stability: internal + $_requestResumeToken: + description: "Internal usage only. This flag is used in natural order collection scan for the DocumentSourceCursor to start scan after the given token." + cpp_name: requestResumeToken + type: optionalBool + stability: unstable + $_resumeAfter: + description: "Internal usage only. If present, the collection scan will seek to the + exact RecordId, or return KeyNotFound if it does not exist. Must only be set on + forward collection scans. This field cannot be used in conjunction with 'min' or 'max'." + cpp_name: resumeAfter + type: object_owned + optional: true + stability: unstable diff --git a/src/mongo/db/pipeline/aggregation_context_fixture.h b/src/mongo/db/pipeline/aggregation_context_fixture.h index a4da2c2953d42..6688c3d420c40 100644 --- a/src/mongo/db/pipeline/aggregation_context_fixture.h +++ b/src/mongo/db/pipeline/aggregation_context_fixture.h @@ -33,7 +33,6 @@ #include #include -#include "mongo/db/concurrency/locker_noop_client_observer.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/service_context_test_fixture.h" @@ -52,9 +51,6 @@ class AggregationContextFixture : public ServiceContextTest { boost::none, "unittests", "pipeline_test")) {} AggregationContextFixture(NamespaceString nss) { - auto service = getServiceContext(); - service->registerClientObserver( - std::make_unique()); _opCtx = makeOperationContext(); _expCtx = make_intrusive(_opCtx.get(), nss); unittest::TempDir tempDir("AggregationContextFixture"); @@ -83,11 +79,11 @@ class AggregationContextFixture : public ServiceContextTest { SerializationOptions options; options.verbosity = verbosity; if (performRedaction) { - options.replacementForLiteralArgs = "?"; - options.identifierRedactionPolicy = [](StringData s) -> std::string { + options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + options.transformIdentifiersCallback = [](StringData s) -> std::string { return str::stream() << "HASH<" << s << ">"; }; - options.redactIdentifiers = true; + options.transformIdentifiers = true; } std::vector serialized; docSource.serializeToArray(serialized, options); @@ -95,6 +91,21 @@ class AggregationContextFixture : public ServiceContextTest { return serialized[0].getDocument().toBson().getOwned(); } + std::vector redactToArray(const DocumentSource& docSource, + bool performRedaction = true) { + SerializationOptions options; + if (performRedaction) { + options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + options.transformIdentifiersCallback = [](StringData s) -> std::string { + return str::stream() << "HASH<" << s << ">"; + }; + options.transformIdentifiers = true; + } + std::vector serialized; + docSource.serializeToArray(serialized, options); + return serialized; + } + private: ServiceContext::UniqueOperationContext _opCtx; boost::intrusive_ptr _expCtx; diff --git a/src/mongo/db/pipeline/aggregation_request_helper.cpp b/src/mongo/db/pipeline/aggregation_request_helper.cpp index 43d0c0491a734..4f3ca1413005a 100644 --- a/src/mongo/db/pipeline/aggregation_request_helper.cpp +++ b/src/mongo/db/pipeline/aggregation_request_helper.cpp @@ -28,20 +28,38 @@ */ #include "mongo/db/pipeline/aggregation_request_helper.h" + +#include +#include +#include +#include +#include +#include + +#include +#include + #include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/catalog/document_validation.h" -#include "mongo/db/commands.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/client.h" #include "mongo/db/exec/document_value/document.h" -#include "mongo/db/exec/document_value/value.h" -#include "mongo/db/query/cursor_request.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/query/query_request_helper.h" -#include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/storage/storage_options.h" -#include "mongo/idl/command_generic_argument.h" -#include "mongo/platform/basic.h" +#include "mongo/db/server_options.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/transport/session.h" +#include "mongo/util/decorable.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" namespace mongo { namespace aggregation_request_helper { @@ -58,8 +76,10 @@ AggregateCommandRequest parseFromBSON(OperationContext* opCtx, const DatabaseName& dbName, const BSONObj& cmdObj, boost::optional explainVerbosity, - bool apiStrict) { - return parseFromBSON(opCtx, parseNs(dbName, cmdObj), cmdObj, explainVerbosity, apiStrict); + bool apiStrict, + const SerializationContext& serializationContext) { + return parseFromBSON( + opCtx, parseNs(dbName, cmdObj), cmdObj, explainVerbosity, apiStrict, serializationContext); } StatusWith parseFromBSONForTests( @@ -68,7 +88,8 @@ StatusWith parseFromBSONForTests( boost::optional explainVerbosity, bool apiStrict) { try { - return parseFromBSON(/*opCtx=*/nullptr, nss, cmdObj, explainVerbosity, apiStrict); + return parseFromBSON( + /*opCtx=*/nullptr, nss, cmdObj, explainVerbosity, apiStrict, SerializationContext()); } catch (const AssertionException&) { return exceptionToStatus(); } @@ -80,8 +101,9 @@ StatusWith parseFromBSONForTests( boost::optional explainVerbosity, bool apiStrict) { try { + // TODO SERVER-75930: pass serializationContext in return parseFromBSON( - /*opCtx=*/nullptr, dbName, cmdObj, explainVerbosity, apiStrict); + /*opCtx=*/nullptr, dbName, cmdObj, explainVerbosity, apiStrict, SerializationContext()); } catch (const AssertionException&) { return exceptionToStatus(); } @@ -91,7 +113,8 @@ AggregateCommandRequest parseFromBSON(OperationContext* opCtx, NamespaceString nss, const BSONObj& cmdObj, boost::optional explainVerbosity, - bool apiStrict) { + bool apiStrict, + const SerializationContext& serializationContext) { // if the command object lacks field 'aggregate' or '$db', we will use the namespace in 'nss'. bool cmdObjChanged = false; @@ -104,9 +127,11 @@ AggregateCommandRequest parseFromBSON(OperationContext* opCtx, } AggregateCommandRequest request(nss); - request = - AggregateCommandRequest::parse(IDLParserContext("aggregate", apiStrict, nss.tenantId()), - cmdObjChanged ? cmdObjBob.obj() : cmdObj); + // TODO SERVER-75930: tenantId in VTS isn't properly detected by call to parse(IDLParseContext&, + // BSONObj&) + request = AggregateCommandRequest::parse( + IDLParserContext("aggregate", apiStrict, nss.tenantId(), serializationContext), + cmdObjChanged ? cmdObjBob.obj() : cmdObj); if (explainVerbosity) { uassert(ErrorCodes::FailedToParse, @@ -136,11 +161,12 @@ NamespaceString parseNs(const DatabaseName& dbName, const BSONObj& cmdObj) { << typeName(firstElement.type()), firstElement.type() == BSONType::String); - const NamespaceString nss( + NamespaceString nss( NamespaceStringUtil::parseNamespaceFromRequest(dbName, firstElement.valueStringData())); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid namespace specified '" << nss.ns() << "'", + str::stream() << "Invalid namespace specified '" << nss.toStringForErrorMsg() + << "'", nss.isValid() && !nss.isCollectionlessAggregateNS()); return nss; @@ -193,8 +219,33 @@ void validate(OperationContext* opCtx, requestReshardingResumeTokenElem && requestReshardingResumeTokenElem.boolean(); uassert(ErrorCodes::FailedToParse, str::stream() << AggregateCommandRequest::kRequestReshardingResumeTokenFieldName - << " must only be set for the oplog namespace, not " << nss, + << " must only be set for the oplog namespace, not " + << nss.toStringForErrorMsg(), !hasRequestReshardingResumeToken || nss.isOplog()); + + auto requestResumeTokenElem = cmdObj[AggregateCommandRequest::kRequestResumeTokenFieldName]; + uassert(ErrorCodes::InvalidOptions, + "$_requestResumeToken is not supported without Resharding Improvements", + !requestResumeTokenElem || + resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)); + uassert(ErrorCodes::FailedToParse, + str::stream() << AggregateCommandRequest::kRequestResumeTokenFieldName + << " must be a boolean type", + !requestResumeTokenElem || requestResumeTokenElem.isBoolean()); + bool hasRequestResumeToken = requestResumeTokenElem && requestResumeTokenElem.boolean(); + uassert(ErrorCodes::FailedToParse, + str::stream() << AggregateCommandRequest::kRequestResumeTokenFieldName + << " must be set for non-oplog namespace", + !hasRequestResumeToken || !nss.isOplog()); + if (hasRequestResumeToken) { + auto hintElem = cmdObj[AggregateCommandRequest::kHintFieldName]; + uassert(ErrorCodes::BadValue, + "hint must be {$natural:1} if 'requestResumeToken' is enabled", + hintElem && hintElem.isABSONObj() && + SimpleBSONObjComparator::kInstance.evaluate( + hintElem.Obj() == BSON(query_request_helper::kNaturalSortField << 1))); + } } void validateRequestForAPIVersion(const OperationContext* opCtx, diff --git a/src/mongo/db/pipeline/aggregation_request_helper.h b/src/mongo/db/pipeline/aggregation_request_helper.h index 63d77287db409..42348cd1ff05b 100644 --- a/src/mongo/db/pipeline/aggregation_request_helper.h +++ b/src/mongo/db/pipeline/aggregation_request_helper.h @@ -29,18 +29,31 @@ #pragma once +#include #include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/exchange_spec_gen.h" #include "mongo/db/pipeline/legacy_runtime_constants_gen.h" #include "mongo/db/pipeline/plan_executor_pipeline.h" #include "mongo/db/query/explain_options.h" #include "mongo/db/write_concern_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/serialization_context.h" namespace mongo { @@ -67,11 +80,13 @@ static constexpr long long kDefaultBatchSize = 101; * then 'explainVerbosity' contains this information. In this case, 'cmdObj' may not itself * contain the explain specifier. Otherwise, 'explainVerbosity' should be boost::none. */ -AggregateCommandRequest parseFromBSON(OperationContext* opCtx, - NamespaceString nss, - const BSONObj& cmdObj, - boost::optional explainVerbosity, - bool apiStrict); +AggregateCommandRequest parseFromBSON( + OperationContext* opCtx, + NamespaceString nss, + const BSONObj& cmdObj, + boost::optional explainVerbosity, + bool apiStrict, + const SerializationContext& serializationContext = SerializationContext()); StatusWith parseFromBSONForTests( NamespaceString nss, @@ -83,11 +98,13 @@ StatusWith parseFromBSONForTests( * Convenience overload which constructs the request's NamespaceString from the given database * name and command object. */ -AggregateCommandRequest parseFromBSON(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& cmdObj, - boost::optional explainVerbosity, - bool apiStrict); +AggregateCommandRequest parseFromBSON( + OperationContext* opCtx, + const DatabaseName& dbName, + const BSONObj& cmdObj, + boost::optional explainVerbosity, + bool apiStrict, + const SerializationContext& serializationContext = SerializationContext()); StatusWith parseFromBSONForTests( const DatabaseName& dbName, diff --git a/src/mongo/db/pipeline/aggregation_request_test.cpp b/src/mongo/db/pipeline/aggregation_request_test.cpp index 04c8aef0dc87d..5b5e92057028d 100644 --- a/src/mongo/db/pipeline/aggregation_request_test.cpp +++ b/src/mongo/db/pipeline/aggregation_request_test.cpp @@ -27,22 +27,46 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/aggregation_request_helper.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" -#include "mongo/db/catalog/document_validation.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/query_request_helper.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -256,8 +280,8 @@ TEST(AggregationRequestTest, ShouldSerializeBatchSizeIfSetAndExplainFalse) { } TEST(AggregationRequestTest, ShouldSerialiseAggregateFieldToOneIfCollectionIsAggregateOneNSS) { - NamespaceString nss = - NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(boost::none, "a")); + NamespaceString nss = NamespaceString::makeCollectionlessAggregateNSS( + DatabaseName::createDatabaseName_forTest(boost::none, "a")); AggregateCommandRequest request(nss, std::vector()); auto expectedSerialization = @@ -384,14 +408,15 @@ void parseNSHelper(const std::string& dbName, const BSONObj& invalidFields, ErrorCodes::Error expectedCode) { // Verify that 'validRequest' parses correctly. - auto shouldNotThrow = - aggregation_request_helper::parseNs(DatabaseName(boost::none, dbName), validRequest); + auto shouldNotThrow = aggregation_request_helper::parseNs( + DatabaseName::createDatabaseName_forTest(boost::none, dbName), validRequest); auto invalidRequest = constructInvalidRequest(validRequest, invalidFields); // Verify that the constructed invalid request fails to parse with 'expectedCode'. ASSERT_THROWS_CODE( - aggregation_request_helper::parseNs(DatabaseName(boost::none, "a"), invalidRequest), + aggregation_request_helper::parseNs( + DatabaseName::createDatabaseName_forTest(boost::none, "a"), invalidRequest), AssertionException, expectedCode); } @@ -667,7 +692,8 @@ TEST(AggregationRequestTest, ParseNSShouldReturnAggregateOneNSIfAggregateFieldIs for (auto& one : ones) { const BSONObj inputBSON = fromjson(str::stream() << "{aggregate: " << one << ", pipeline: [], $db: 'a'}"); - ASSERT(aggregation_request_helper::parseNs(DatabaseName(boost::none, "a"), inputBSON) + ASSERT(aggregation_request_helper::parseNs( + DatabaseName::createDatabaseName_forTest(boost::none, "a"), inputBSON) .isCollectionlessAggregateNS()); } } @@ -788,5 +814,43 @@ TEST(AggregationRequestTest, ShouldIgnoreQueryOptions) { ASSERT_OK(aggregation_request_helper::parseFromBSONForTests(nss, inputBson).getStatus()); } +TEST(AggregationRequestTest, ShouldRejectRequestResumeTokenIfNonBooleanType) { + RAIIServerParameterControllerForTest featureFlagController("featureFlagReshardingImprovements", + true); + NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.collection"); + const BSONObj validRequest = fromjson( + "{aggregate: 'collection'," + "pipeline: []," + "$_requestResumeToken: true," + "hint: {$natural: 1}," + "$db: 'a', " + "cursor: {}}"); + const BSONObj nonBoolReshardingResumeToken = fromjson("{$_requestResumeToken: 'yes'}"); + aggregationRequestParseFailureHelper( + nss, validRequest, nonBoolReshardingResumeToken, ErrorCodes::TypeMismatch); +} + +TEST(AggregationRequestTest, ShouldRejectRequestResumeTokenIfOplogNss) { + RAIIServerParameterControllerForTest featureFlagController("featureFlagReshardingImprovements", + true); + NamespaceString nonOplogNss = NamespaceString::createNamespaceString_forTest("a.collection"); + const BSONObj validRequest = fromjson( + "{aggregate: 'collection'," + "pipeline: []," + "$_requestResumeToken: true," + "hint: {$natural: 1}," + "$db: 'a', " + "cursor: {}}"); + ASSERT_OK( + aggregation_request_helper::parseFromBSONForTests(nonOplogNss, validRequest).getStatus()); + + NamespaceString oplogNss = NamespaceString::createNamespaceString_forTest("local.oplog.rs"); + auto status = + aggregation_request_helper::parseFromBSONForTests(oplogNss, validRequest).getStatus(); + ASSERT_NOT_OK(status); + ASSERT_EQ(status, ErrorCodes::FailedToParse); +} + + } // namespace } // namespace mongo diff --git a/src/mongo/db/pipeline/change_stream_document_diff_parser.cpp b/src/mongo/db/pipeline/change_stream_document_diff_parser.cpp index aa5e6cbabe646..74400d509d4ac 100644 --- a/src/mongo/db/pipeline/change_stream_document_diff_parser.cpp +++ b/src/mongo/db/pipeline/change_stream_document_diff_parser.cpp @@ -29,7 +29,24 @@ #include "mongo/db/pipeline/change_stream_document_diff_parser.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/field_ref.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" namespace mongo { using doc_diff::ArrayDiffReader; diff --git a/src/mongo/db/pipeline/change_stream_document_diff_parser.h b/src/mongo/db/pipeline/change_stream_document_diff_parser.h index 5e871c177e668..620d59dc8547c 100644 --- a/src/mongo/db/pipeline/change_stream_document_diff_parser.h +++ b/src/mongo/db/pipeline/change_stream_document_diff_parser.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/update/document_diff_serialization.h" diff --git a/src/mongo/db/pipeline/change_stream_document_diff_parser_test.cpp b/src/mongo/db/pipeline/change_stream_document_diff_parser_test.cpp index fa2bae02a6f56..51ebd9eb1c9bd 100644 --- a/src/mongo/db/pipeline/change_stream_document_diff_parser_test.cpp +++ b/src/mongo/db/pipeline/change_stream_document_diff_parser_test.cpp @@ -30,12 +30,14 @@ #include "mongo/db/pipeline/change_stream_document_diff_parser.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/exec/document_value/value_comparator.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/change_stream_event_transform.cpp b/src/mongo/db/pipeline/change_stream_event_transform.cpp index df965830b9d72..d3a4f54b7aa9e 100644 --- a/src/mongo/db/pipeline/change_stream_event_transform.cpp +++ b/src/mongo/db/pipeline/change_stream_event_transform.cpp @@ -29,19 +29,38 @@ #include "mongo/db/pipeline/change_stream_event_transform.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/change_stream_document_diff_parser.h" -#include "mongo/db/pipeline/change_stream_filter_helpers.h" #include "mongo/db/pipeline/change_stream_helpers.h" #include "mongo/db/pipeline/change_stream_helpers_legacy.h" #include "mongo/db/pipeline/change_stream_preimage_gen.h" -#include "mongo/db/pipeline/document_path_support.h" -#include "mongo/db/pipeline/document_source_change_stream_add_post_image.h" +#include "mongo/db/pipeline/document_source_change_stream.h" #include "mongo/db/pipeline/resume_token.h" -#include "mongo/db/repl/bson_extract_optime.h" #include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/oplog_entry_gen.h" -#include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/db/update/update_oplog_entry_version.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -66,7 +85,7 @@ repl::OpTypeEnum getOplogOpType(const Document& oplog) { Value makeChangeStreamNsField(const NamespaceString& nss) { // For certain types, such as dropDatabase, the collection name may be empty and should be // omitted. We never report the NamespaceString's tenantId in change stream events. - return Value(Document{{"db", nss.dbName().db()}, + return Value(Document{{"db", nss.dbName().serializeWithoutTenantPrefix()}, {"coll", (nss.coll().empty() ? Value() : Value(nss.coll()))}}); } @@ -206,18 +225,17 @@ Document ChangeStreamDefaultEventTransformation::applyTransformation(const Docum if (_changeStreamSpec.getShowRawUpdateDescription()) { updateDescription = input[repl::OplogEntry::kObjectFieldName]; } else { - const auto showDisambiguatedPaths = _changeStreamSpec.getShowExpandedEvents() && - feature_flags::gFeatureFlagChangeStreamsFurtherEnrichedEvents.isEnabled( - serverGlobalParams.featureCompatibility); const auto& deltaDesc = change_stream_document_diff_parser::parseDiff( diffObj.getDocument().toBson()); - updateDescription = Value(Document{ - {"updatedFields", deltaDesc.updatedFields}, - {"removedFields", std::move(deltaDesc.removedFields)}, - {"truncatedArrays", std::move(deltaDesc.truncatedArrays)}, - {"disambiguatedPaths", - showDisambiguatedPaths ? Value(deltaDesc.disambiguatedPaths) : Value()}}); + updateDescription = + Value(Document{{"updatedFields", deltaDesc.updatedFields}, + {"removedFields", std::move(deltaDesc.removedFields)}, + {"truncatedArrays", std::move(deltaDesc.truncatedArrays)}, + {"disambiguatedPaths", + _changeStreamSpec.getShowExpandedEvents() + ? Value(deltaDesc.disambiguatedPaths) + : Value()}}); } } else if (!oplogVersion.missing() || id.missing()) { // This is not a replacement op, and we did not see a valid update version number. diff --git a/src/mongo/db/pipeline/change_stream_event_transform.h b/src/mongo/db/pipeline/change_stream_event_transform.h index eda26b75b03a9..d0931d3339ca2 100644 --- a/src/mongo/db/pipeline/change_stream_event_transform.h +++ b/src/mongo/db/pipeline/change_stream_event_transform.h @@ -29,9 +29,19 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source_change_stream_gen.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/resume_token.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { /** diff --git a/src/mongo/db/pipeline/change_stream_event_transform_test.cpp b/src/mongo/db/pipeline/change_stream_event_transform_test.cpp index 8f09356c895a5..cd534b22342d7 100644 --- a/src/mongo/db/pipeline/change_stream_event_transform_test.cpp +++ b/src/mongo/db/pipeline/change_stream_event_transform_test.cpp @@ -27,25 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/exec/document_value/value_comparator.h" -#include "mongo/db/matcher/schema/expression_internal_schema_object_match.h" -#include "mongo/db/multitenancy_gen.h" -#include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/change_stream_event_transform.h" -#include "mongo/db/pipeline/change_stream_rewrite_helpers.h" #include "mongo/db/pipeline/change_stream_test_helpers.h" -#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" namespace mongo { @@ -85,7 +95,7 @@ TEST(ChangeStreamEventTransformTest, TestDefaultUpdateTransform) { {DocumentSourceChangeStream::kCollectionUuidField, testUuid()}, {DocumentSourceChangeStream::kWallTimeField, Date_t()}, {DocumentSourceChangeStream::kNamespaceField, - Document{{"db", nss.db()}, {"coll", nss.coll()}}}, + Document{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DocumentSourceChangeStream::kDocumentKeyField, documentKey}, { "updateDescription", @@ -100,8 +110,8 @@ TEST(ChangeStreamEventTransformTest, TestDefaultUpdateTransform) { } TEST(ChangeStreamEventTransformTest, TestCreateViewTransform) { - const NamespaceString systemViewNss = - NamespaceString::makeSystemDotViewsNamespace({boost::none, "viewDB"}); + const NamespaceString systemViewNss = NamespaceString::makeSystemDotViewsNamespace( + DatabaseName::createDatabaseName_forTest(boost::none, "viewDB")); const NamespaceString viewNss = NamespaceString::createNamespaceString_forTest(boost::none, "viewDB.view.name"); const auto viewPipeline = @@ -109,7 +119,7 @@ TEST(ChangeStreamEventTransformTest, TestCreateViewTransform) { const auto opDescription = Document{{"viewOn", "baseColl"_sd}, {"pipeline", viewPipeline}}; auto oplogEntry = makeOplogEntry(repl::OpTypeEnum::kInsert, // op type systemViewNss, // namespace - BSON("_id" << viewNss.toString() << "viewOn" + BSON("_id" << viewNss.toString_forTest() << "viewOn" << "baseColl" << "pipeline" << viewPipeline), // o testUuid(), // uuid @@ -125,23 +135,24 @@ TEST(ChangeStreamEventTransformTest, TestCreateViewTransform) { {DocumentSourceChangeStream::kClusterTimeField, kDefaultTs}, {DocumentSourceChangeStream::kWallTimeField, Date_t()}, {DocumentSourceChangeStream::kNamespaceField, - Document{{"db", viewNss.db()}, {"coll", viewNss.coll()}}}, + Document{{"db", viewNss.db_forTest()}, {"coll", viewNss.coll()}}}, {DocumentSourceChangeStream::kOperationDescriptionField, opDescription}}; - ASSERT_DOCUMENT_EQ(applyTransformation(oplogEntry, - NamespaceString::makeCollectionlessAggregateNSS( - DatabaseName(boost::none, "viewDB"))), - expectedDoc); + ASSERT_DOCUMENT_EQ( + applyTransformation(oplogEntry, + NamespaceString::makeCollectionlessAggregateNSS( + DatabaseName::createDatabaseName_forTest(boost::none, "viewDB"))), + expectedDoc); } TEST(ChangeStreamEventTransformTest, TestCreateViewOnSingleCollection) { - const NamespaceString systemViewNss = - NamespaceString::makeSystemDotViewsNamespace({boost::none, "viewDB"}); + const NamespaceString systemViewNss = NamespaceString::makeSystemDotViewsNamespace( + DatabaseName::createDatabaseName_forTest(boost::none, "viewDB")); const NamespaceString viewNss = NamespaceString::createNamespaceString_forTest(boost::none, "viewDB.view.name"); const auto viewPipeline = Value(fromjson("[{$match: {field: 'value'}}, {$project: {field: 1}}]")); - const auto document = BSON("_id" << viewNss.toString() << "viewOn" + const auto document = BSON("_id" << viewNss.toString_forTest() << "viewOn" << "baseColl" << "pipeline" << viewPipeline); const auto documentKey = Value(Document{{"_id", document["_id"]}}); @@ -163,7 +174,7 @@ TEST(ChangeStreamEventTransformTest, TestCreateViewOnSingleCollection) { {DocumentSourceChangeStream::kWallTimeField, Date_t()}, {DocumentSourceChangeStream::kFullDocumentField, Document(document)}, {DocumentSourceChangeStream::kNamespaceField, - Document{{"db", systemViewNss.db()}, {"coll", systemViewNss.coll()}}}, + Document{{"db", systemViewNss.db_forTest()}, {"coll", systemViewNss.coll()}}}, {DocumentSourceChangeStream::kDocumentKeyField, documentKey}}; ASSERT_DOCUMENT_EQ(applyTransformation(oplogEntry), expectedDoc); @@ -190,7 +201,7 @@ TEST(ChangeStreamEventTransformTest, TestUpdateTransformWithTenantId) { ); Document expectedNamespace = - Document{{"db", nssWithTenant.dbName().db()}, {"coll", nssWithTenant.coll()}}; + Document{{"db", nssWithTenant.dbName().toString_forTest()}, {"coll", nssWithTenant.coll()}}; auto changeStreamDoc = applyTransformation(updateField, nssWithTenant); auto outputNs = changeStreamDoc[DocumentSourceChangeStream::kNamespaceField].getDocument(); @@ -227,20 +238,22 @@ TEST(ChangeStreamEventTransformTest, TestRenameTransformWithTenantId) { NamespaceString renameTo = NamespaceString::createNamespaceString_forTest(tenantId, "unittests.rename_coll"); - auto renameField = makeOplogEntry( - repl::OpTypeEnum::kCommand, // op type - renameFrom.getCommandNS(), // namespace - BSON("renameCollection" << renameFrom.toString() << "to" << renameTo.toString()), // o - testUuid() // uuid - ); + auto renameField = + makeOplogEntry(repl::OpTypeEnum::kCommand, // op type + renameFrom.getCommandNS(), // namespace + BSON("renameCollection" << renameFrom.toString_forTest() << "to" + << renameTo.toString_forTest()), // o + testUuid() // uuid + ); - Document expectedDoc{{DocumentSourceChangeStream::kNamespaceField, - Document{{"db", renameFrom.dbName().db()}, {"coll", renameFrom.coll()}}}, - {DocumentSourceChangeStream::kRenameTargetNssField, - Document{{"db", renameTo.dbName().db()}, {"coll", renameTo.coll()}}}, - {DocumentSourceChangeStream::kOperationDescriptionField, - Document{BSON("to" << BSON("db" << renameTo.dbName().db() << "coll" - << renameTo.coll()))}}}; + Document expectedDoc{ + {DocumentSourceChangeStream::kNamespaceField, + Document{{"db", renameFrom.dbName().toString_forTest()}, {"coll", renameFrom.coll()}}}, + {DocumentSourceChangeStream::kRenameTargetNssField, + Document{{"db", renameTo.dbName().toString_forTest()}, {"coll", renameTo.coll()}}}, + {DocumentSourceChangeStream::kOperationDescriptionField, + Document{BSON("to" << BSON("db" << renameTo.dbName().toString_forTest() << "coll" + << renameTo.coll()))}}}; auto changeStreamDoc = applyTransformation(renameField, renameFrom); auto renameDoc = Document{ @@ -257,12 +270,13 @@ TEST(ChangeStreamEventTransformTest, TestRenameTransformWithTenantId) { // in the oplog entry. It should still not be a part of the db name in the change event. RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); - auto oplogEntry = makeOplogEntry( - repl::OpTypeEnum::kCommand, // op type - renameFrom.getCommandNS(), // namespace - BSON("renameCollection" << renameFrom.toString() << "to" << renameTo.toString()), // o - testUuid() // uuid - ); + auto oplogEntry = + makeOplogEntry(repl::OpTypeEnum::kCommand, // op type + renameFrom.getCommandNS(), // namespace + BSON("renameCollection" << renameFrom.toString_forTest() << "to" + << renameTo.toString_forTest()), // o + testUuid() // uuid + ); changeStreamDoc = applyTransformation(oplogEntry, renameFrom); renameDoc = Document{ @@ -292,7 +306,7 @@ TEST(ChangeStreamEventTransformTest, TestDropDatabaseTransformWithTenantId) { testUuid() // uuid ); - Document expectedNamespace = Document{{"db", dbToDrop.dbName().db()}}; + Document expectedNamespace = Document{{"db", dbToDrop.dbName().toString_forTest()}}; auto changeStreamDoc = applyTransformation(dropDbField, dbToDrop); auto outputNs = changeStreamDoc[DocumentSourceChangeStream::kNamespaceField].getDocument(); @@ -332,7 +346,7 @@ TEST(ChangeStreamEventTransformTest, TestCreateTransformWithTenantId) { ); Document expectedNamespace = - Document{{"db", nssWithTenant.dbName().db()}, {"coll", nssWithTenant.coll()}}; + Document{{"db", nssWithTenant.dbName().toString_forTest()}, {"coll", nssWithTenant.coll()}}; auto changeStreamDoc = applyTransformation(createField, nssWithTenant); auto outputNs = changeStreamDoc[DocumentSourceChangeStream::kNamespaceField].getDocument(); @@ -363,8 +377,8 @@ TEST(ChangeStreamEventTransformTest, TestCreateViewTransformWithTenantId) { const auto tenantId = TenantId(OID::gen()); - const NamespaceString systemViewNss = - NamespaceString::makeSystemDotViewsNamespace({tenantId, "viewDB"}); + const NamespaceString systemViewNss = NamespaceString::makeSystemDotViewsNamespace( + DatabaseName::createDatabaseName_forTest(tenantId, "viewDB")); const NamespaceString viewNss = NamespaceString::createNamespaceString_forTest(tenantId, "viewDB.view.name"); const auto viewPipeline = @@ -372,12 +386,13 @@ TEST(ChangeStreamEventTransformTest, TestCreateViewTransformWithTenantId) { const auto opDescription = Document{{"viewOn", "baseColl"_sd}, {"pipeline", viewPipeline}}; auto createView = makeOplogEntry(repl::OpTypeEnum::kInsert, // op type systemViewNss, // namespace - BSON("_id" << viewNss.toString() << "viewOn" + BSON("_id" << viewNss.toString_forTest() << "viewOn" << "baseColl" << "pipeline" << viewPipeline), // o testUuid()); // uuid - Document expectedNamespace = Document{{"db", viewNss.dbName().db()}, {"coll", viewNss.coll()}}; + Document expectedNamespace = + Document{{"db", viewNss.dbName().toString_forTest()}, {"coll", viewNss.coll()}}; auto changeStreamDoc = applyTransformation( createView, NamespaceString::makeCollectionlessAggregateNSS(viewNss.dbName())); @@ -391,7 +406,7 @@ TEST(ChangeStreamEventTransformTest, TestCreateViewTransformWithTenantId) { auto oplogEntry = makeOplogEntry(repl::OpTypeEnum::kInsert, // op type systemViewNss, // namespace - BSON("_id" << viewNss.toString() << "viewOn" + BSON("_id" << viewNss.toString_forTest() << "viewOn" << "baseColl" << "pipeline" << viewPipeline), // o testUuid()); diff --git a/src/mongo/db/pipeline/change_stream_expired_pre_image_remover.cpp b/src/mongo/db/pipeline/change_stream_expired_pre_image_remover.cpp index cb2afcf06e23f..0853b24597606 100644 --- a/src/mongo/db/pipeline/change_stream_expired_pre_image_remover.cpp +++ b/src/mongo/db/pipeline/change_stream_expired_pre_image_remover.cpp @@ -28,15 +28,31 @@ */ -#include "mongo/platform/basic.h" +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include #include "change_stream_expired_pre_image_remover.h" +#include "mongo/base/string_data.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/change_stream_pre_images_collection_manager.h" +#include "mongo/db/client.h" +#include "mongo/db/pipeline/change_stream_preimage_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/background.h" #include "mongo/util/concurrency/idle_thread_block.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -88,18 +104,16 @@ class ChangeStreamExpiredPreImagesRemover : public BackgroundJob { void run() { LOGV2(7080100, "Starting Change Stream Expired Pre-images Remover thread"); ThreadClient tc(name(), getGlobalServiceContext()); - AuthorizationSession::get(cc())->grantInternalAuthorization(&cc()); - { - stdx::lock_guard lk(*tc.get()); - tc.get()->setSystemOperationKillableByStepdown(lk); - } + AuthorizationSession::get(cc())->grantInternalAuthorization(&cc()); while (true) { LOGV2_DEBUG(6278517, 3, "Thread awake"); + auto iterationStartTime = Date_t::now(); - ChangeStreamPreImagesCollectionManager::performExpiredChangeStreamPreImagesRemovalPass( - tc.get()); + ChangeStreamPreImagesCollectionManager::get(tc->getServiceContext()) + .performExpiredChangeStreamPreImagesRemovalPass(tc.get()); + { // Wait until either gExpiredChangeStreamPreImageRemovalJobSleepSecs passes or a // shutdown is requested. diff --git a/src/mongo/db/pipeline/change_stream_expired_pre_image_remover_test.cpp b/src/mongo/db/pipeline/change_stream_expired_pre_image_remover_test.cpp deleted file mode 100644 index fb8eeedb2117e..0000000000000 --- a/src/mongo/db/pipeline/change_stream_expired_pre_image_remover_test.cpp +++ /dev/null @@ -1,385 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/platform/basic.h" - -#include "mongo/db/change_stream_options_manager.h" - -#include "mongo/db/catalog_raii.h" -#include "mongo/db/change_stream_pre_images_collection_manager.h" -#include "mongo/db/change_stream_pre_images_truncate_markers.h" -#include "mongo/db/change_stream_serverless_helpers.h" -#include "mongo/db/change_streams_cluster_parameter_gen.h" -#include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/op_observer/op_observer_impl.h" -#include "mongo/db/op_observer/op_observer_registry.h" -#include "mongo/db/op_observer/oplog_writer_impl.h" -#include "mongo/db/pipeline/change_stream_expired_pre_image_remover.h" -#include "mongo/db/pipeline/change_stream_preimage_gen.h" -#include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/repl/storage_interface_impl.h" -#include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" - -namespace mongo { - -namespace { -std::unique_ptr populateChangeStreamPreImageOptions( - stdx::variant expireAfterSeconds) { - PreAndPostImagesOptions preAndPostImagesOptions; - preAndPostImagesOptions.setExpireAfterSeconds(expireAfterSeconds); - - auto changeStreamOptions = std::make_unique(); - changeStreamOptions->setPreAndPostImages(std::move(preAndPostImagesOptions)); - - return changeStreamOptions; -} - -void setChangeStreamOptionsToManager(OperationContext* opCtx, - ChangeStreamOptions& changeStreamOptions) { - auto& changeStreamOptionsManager = ChangeStreamOptionsManager::get(opCtx); - ASSERT_EQ(changeStreamOptionsManager.setOptions(opCtx, changeStreamOptions).getStatus(), - ErrorCodes::OK); -} - -class ChangeStreamPreImageExpirationPolicyTest : public ServiceContextTest { -public: - ChangeStreamPreImageExpirationPolicyTest() { - ChangeStreamOptionsManager::create(getServiceContext()); - } -}; - -TEST_F(ChangeStreamPreImageExpirationPolicyTest, getPreImageExpirationTimeWithValidIntegralValue) { - auto opCtx = cc().makeOperationContext(); - const int64_t expireAfterSeconds = 10; - - auto changeStreamOptions = populateChangeStreamPreImageOptions(expireAfterSeconds); - setChangeStreamOptionsToManager(opCtx.get(), *changeStreamOptions.get()); - - auto currentTime = Date_t::now(); - auto receivedExpireAfterSeconds = - change_stream_pre_image_helpers::getPreImageExpirationTime(opCtx.get(), currentTime); - ASSERT(receivedExpireAfterSeconds); - ASSERT_EQ(*receivedExpireAfterSeconds, currentTime - Seconds(expireAfterSeconds)); -} - -TEST_F(ChangeStreamPreImageExpirationPolicyTest, getPreImageExpirationTimeWithUnsetValue) { - auto opCtx = cc().makeOperationContext(); - - auto currentTime = Date_t::now(); - auto receivedExpireAfterSeconds = - change_stream_pre_image_helpers::getPreImageExpirationTime(opCtx.get(), currentTime); - ASSERT_FALSE(receivedExpireAfterSeconds); -} - -TEST_F(ChangeStreamPreImageExpirationPolicyTest, getPreImageExpirationTimeWithOffValue) { - auto opCtx = cc().makeOperationContext(); - - auto changeStreamOptions = populateChangeStreamPreImageOptions("off"); - setChangeStreamOptionsToManager(opCtx.get(), *changeStreamOptions.get()); - - auto currentTime = Date_t::now(); - auto receivedExpireAfterSeconds = - change_stream_pre_image_helpers::getPreImageExpirationTime(opCtx.get(), currentTime); - ASSERT_FALSE(receivedExpireAfterSeconds); -} -} // namespace - -class PreImagesTruncateMarkersTest : public ServiceContextMongoDTest { -protected: - explicit PreImagesTruncateMarkersTest() : ServiceContextMongoDTest() { - ChangeStreamOptionsManager::create(getServiceContext()); - } - - virtual void setUp() override { - ServiceContextMongoDTest::setUp(); - - auto service = getServiceContext(); - auto opCtx = cc().makeOperationContext(); - - // Use the full StorageInterfaceImpl so the earliest oplog entry Timestamp is not the - // minimum Timestamp. - repl::StorageInterface::set(service, std::make_unique()); - - // Set up ReplicationCoordinator and create oplog. The earliest oplog entry Timestamp is - // required for computing whether a truncate marker is expired. - repl::ReplicationCoordinator::set( - service, std::make_unique(service)); - repl::createOplog(opCtx.get()); - - // Ensure that we are primary. - auto replCoord = repl::ReplicationCoordinator::get(opCtx.get()); - ASSERT_OK(replCoord->setFollowerMode(repl::MemberState::RS_PRIMARY)); - } - - void tearDown() override { - serverGlobalParams.clusterRole = ClusterRole::None; - } - - void serverlessSetExpireAfterSeconds(const TenantId& tenantId, int64_t expireAfterSeconds) { - auto* clusterParameters = ServerParameterSet::getClusterParameterSet(); - auto* changeStreamsParam = - clusterParameters - ->get>( - "changeStreams"); - - auto oldSettings = changeStreamsParam->getValue(tenantId); - oldSettings.setExpireAfterSeconds(expireAfterSeconds); - changeStreamsParam->setValue(oldSettings, tenantId).ignore(); - } - - RecordId generatePreImageRecordId(Timestamp timestamp) { - const UUID uuid{UUID::gen()}; - ChangeStreamPreImageId preImageId(uuid, timestamp, 0); - return change_stream_pre_image_helpers::toRecordId(preImageId); - } - - - RecordId generatePreImageRecordId(Date_t wallTime) { - const UUID uuid{UUID::gen()}; - Timestamp timestamp{wallTime}; - ChangeStreamPreImageId preImageId(uuid, timestamp, 0); - return change_stream_pre_image_helpers::toRecordId(preImageId); - } - - bool hasExcessMarkers(OperationContext* opCtx, PreImagesTruncateMarkers& markers) { - return markers._hasExcessMarkers(opCtx); - } - - // The oplog must be populated in order to produce an earliest Timestamp. Creates then - // performs an insert on an arbitrary collection in order to populate the oplog. - void initEarliestOplogTSWithInsert(OperationContext* opCtx) { - NamespaceString arbitraryNss = - NamespaceString::createNamespaceString_forTest("test", "coll"); - - writeConflictRetry(opCtx, "createCollection", arbitraryNss.ns(), [&] { - WriteUnitOfWork wunit(opCtx); - AutoGetCollection collRaii(opCtx, arbitraryNss, MODE_X); - invariant(!collRaii); - auto db = collRaii.ensureDbExists(opCtx); - invariant(db->createCollection(opCtx, arbitraryNss, {})); - wunit.commit(); - }); - - std::vector insert; - insert.emplace_back(BSON("_id" << 0 << "data" - << "x")); - WriteUnitOfWork wuow(opCtx); - AutoGetCollection autoColl(opCtx, arbitraryNss, MODE_IX); - OpObserverRegistry opObserver; - opObserver.addObserver( - std::make_unique(std::make_unique())); - opObserver.onInserts(opCtx, - *autoColl, - insert.begin(), - insert.end(), - /*fromMigrate=*/std::vector(insert.size(), false), - /*defaultFromMigrate=*/false); - wuow.commit(); - } -}; - -// When 'expireAfterSeconds' is off, defaults to comparing the 'lastRecord's Timestamp of oldest -// marker with the Timestamp of the ealiest oplog entry. -// -// When 'expireAfterSeconds' is on, defaults to comparing the 'lastRecord's wallTime with -// the current time - 'expireAfterSeconds', which is already tested as a part of the -// ChangeStreamPreImageExpirationPolicyTest. -TEST_F(PreImagesTruncateMarkersTest, hasExcessMarkersExpiredAfterSecondsOff) { - auto opCtxPtr = cc().makeOperationContext(); - auto opCtx = opCtxPtr.get(); - - // With no explicit 'expireAfterSeconds', excess markers are determined by whether the Timestamp - // of the 'lastRecord' in the oldest marker is greater than the Timestamp of the earliest oplog - // entry. - auto changeStreamOptions = populateChangeStreamPreImageOptions("off"); - setChangeStreamOptionsToManager(opCtx, *changeStreamOptions.get()); - - initEarliestOplogTSWithInsert(opCtx); - const auto currentEarliestOplogEntryTs = - repl::StorageInterface::get(opCtx->getServiceContext())->getEarliestOplogTimestamp(opCtx); - - // Ensure that the generated Timestamp associated with the lastRecord of the marker is less than - // the earliest oplog entry Timestamp. - auto ts = currentEarliestOplogEntryTs - 1; - ASSERT_GT(currentEarliestOplogEntryTs, ts); - auto wallTime = Date_t::fromMillisSinceEpoch(ts.asInt64()); - auto lastRecordId = generatePreImageRecordId(wallTime); - - auto numRecords = 1; - auto numBytes = 100; - std::deque initialMarkers{ - {numRecords, numBytes, lastRecordId, wallTime}}; - - PreImagesTruncateMarkers markers( - boost::none /* tenantId */, std::move(initialMarkers), 0, 0, 100); - bool excessMarkers = hasExcessMarkers(opCtx, markers); - ASSERT_TRUE(excessMarkers); -} - -TEST_F(PreImagesTruncateMarkersTest, hasNoExcessMarkersExpiredAfterSecondsOff) { - auto opCtxPtr = cc().makeOperationContext(); - auto opCtx = opCtxPtr.get(); - - // With no explicit 'expireAfterSeconds', excess markers are determined by whether the Timestamp - // of the 'lastRecord' in the oldest marker is greater than the Timestamp of the earliest oplog - // entry. - auto changeStreamOptions = populateChangeStreamPreImageOptions("off"); - setChangeStreamOptionsToManager(opCtx, *changeStreamOptions.get()); - - initEarliestOplogTSWithInsert(opCtx); - const auto currentEarliestOplogEntryTs = - repl::StorageInterface::get(opCtx->getServiceContext())->getEarliestOplogTimestamp(opCtx); - - // Ensure that the generated Timestamp associated with the lastRecord of the marker is less than - // the earliest oplog entry Timestamp. - auto ts = currentEarliestOplogEntryTs + 1; - ASSERT_LT(currentEarliestOplogEntryTs, ts); - auto wallTime = Date_t::fromMillisSinceEpoch(ts.asInt64()); - auto lastRecordId = generatePreImageRecordId(wallTime); - - auto numRecords = 1; - auto numBytes = 100; - std::deque initialMarkers{ - {numRecords, numBytes, lastRecordId, wallTime}}; - - PreImagesTruncateMarkers markers( - boost::none /* tenantId */, std::move(initialMarkers), 0, 0, 100); - bool excessMarkers = hasExcessMarkers(opCtx, markers); - ASSERT_FALSE(excessMarkers); -} - -TEST_F(PreImagesTruncateMarkersTest, serverlessHasNoExcessMarkers) { - int64_t expireAfterSeconds = 1000; - auto tenantId = change_stream_serverless_helpers::getTenantIdForTesting(); - serverlessSetExpireAfterSeconds(tenantId, expireAfterSeconds); - - auto opCtxPtr = cc().makeOperationContext(); - auto opCtx = opCtxPtr.get(); - auto wallTime = opCtx->getServiceContext()->getFastClockSource()->now() + Minutes(120); - auto lastRecordId = generatePreImageRecordId(wallTime); - auto numRecords = 1; - auto numBytes = 100; - std::deque initialMarkers{ - {numRecords, numBytes, lastRecordId, wallTime}}; - - PreImagesTruncateMarkers markers(tenantId, std::move(initialMarkers), 0, 0, 100); - bool excessMarkers = hasExcessMarkers(opCtx, markers); - ASSERT_FALSE(excessMarkers); -} - -TEST_F(PreImagesTruncateMarkersTest, serverlessHasExcessMarkers) { - int64_t expireAfterSeconds = 1; - auto tenantId = change_stream_serverless_helpers::getTenantIdForTesting(); - serverlessSetExpireAfterSeconds(tenantId, expireAfterSeconds); - - auto opCtxPtr = cc().makeOperationContext(); - auto opCtx = opCtxPtr.get(); - auto wallTime = opCtx->getServiceContext()->getFastClockSource()->now() - Minutes(120); - auto lastRecordId = generatePreImageRecordId(wallTime); - auto numRecords = 1; - auto numBytes = 100; - std::deque initialMarkers{ - {numRecords, numBytes, lastRecordId, wallTime}}; - - PreImagesTruncateMarkers markers(tenantId, std::move(initialMarkers), 0, 0, 100); - bool excessMarkers = hasExcessMarkers(opCtx, markers); - ASSERT_TRUE(excessMarkers); -} - -TEST_F(PreImagesTruncateMarkersTest, RecordIdToPreImageTimstampRetrieval) { - // Basic case. - { - Timestamp ts0(Date_t::now()); - int64_t applyOpsIndex = 0; - - ChangeStreamPreImageId preImageId(UUID::gen(), ts0, applyOpsIndex); - auto preImageRecordId = change_stream_pre_image_helpers::toRecordId(preImageId); - - auto ts1 = change_stream_pre_image_helpers::getPreImageTimestamp(preImageRecordId); - ASSERT_EQ(ts0, ts1); - } - - // Min Timestamp. - { - Timestamp ts0 = Timestamp::min(); - int64_t applyOpsIndex = 0; - - ChangeStreamPreImageId preImageId(UUID::gen(), ts0, applyOpsIndex); - auto preImageRecordId = change_stream_pre_image_helpers::toRecordId(preImageId); - - auto ts1 = change_stream_pre_image_helpers::getPreImageTimestamp(preImageRecordId); - ASSERT_EQ(ts0, ts1); - } - - // Max Timestamp - { - Timestamp ts0 = Timestamp::max(); - int64_t applyOpsIndex = 0; - - ChangeStreamPreImageId preImageId(UUID::gen(), ts0, applyOpsIndex); - auto preImageRecordId = change_stream_pre_image_helpers::toRecordId(preImageId); - - auto ts1 = change_stream_pre_image_helpers::getPreImageTimestamp(preImageRecordId); - ASSERT_EQ(ts0, ts1); - } - - // Extra large 'applyOpsIndex'. - // - // Parsing a RecordId with an underlying KeyString representation into BSON discards type bits. - // Since the 'applyOpsIndex' is the only field in 'ChangeStreamPreImageId' that requires type - // bits to generate the original value from KeyString, ensure different numeric values of - // 'applyOpsIndex' don't impact the Timestamp retrieval. - { - Timestamp ts0(Date_t::now()); - int64_t applyOpsIndex = std::numeric_limits::max(); - - ChangeStreamPreImageId preImageId(UUID::gen(), ts0, applyOpsIndex); - auto preImageRecordId = change_stream_pre_image_helpers::toRecordId(preImageId); - - auto ts1 = change_stream_pre_image_helpers::getPreImageTimestamp(preImageRecordId); - ASSERT_EQ(ts0, ts1); - } - - // Extra large 'applyOpsIndex' with Timestamp::max(). - { - Timestamp ts0 = Timestamp::max(); - int64_t applyOpsIndex = std::numeric_limits::max(); - - ChangeStreamPreImageId preImageId(UUID::gen(), ts0, applyOpsIndex); - auto preImageRecordId = change_stream_pre_image_helpers::toRecordId(preImageId); - - auto ts1 = change_stream_pre_image_helpers::getPreImageTimestamp(preImageRecordId); - ASSERT_EQ(ts0, ts1); - } -} - -} // namespace mongo diff --git a/src/mongo/db/pipeline/change_stream_filter_helpers.cpp b/src/mongo/db/pipeline/change_stream_filter_helpers.cpp index 5f552921008a5..0c1199638734e 100644 --- a/src/mongo/db/pipeline/change_stream_filter_helpers.cpp +++ b/src/mongo/db/pipeline/change_stream_filter_helpers.cpp @@ -29,13 +29,31 @@ #include "mongo/db/pipeline/change_stream_filter_helpers.h" +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/db/basic_types.h" #include "mongo/db/bson/bson_helper.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/change_stream_helpers_legacy.h" #include "mongo/db/pipeline/change_stream_rewrite_helpers.h" #include "mongo/db/pipeline/document_source_change_stream.h" -#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" namespace mongo { namespace change_stream_filter { @@ -309,11 +327,8 @@ std::unique_ptr buildInternalOpFilter( internalOpTypes.push_back("migrateLastChunkFromShard"_sd); } - if (feature_flags::gFeatureFlagChangeStreamsFurtherEnrichedEvents.isEnabled( - serverGlobalParams.featureCompatibility)) { - internalOpTypes.push_back("refineCollectionShardKey"_sd); - internalOpTypes.push_back("reshardCollection"_sd); - } + internalOpTypes.push_back("refineCollectionShardKey"_sd); + internalOpTypes.push_back("reshardCollection"_sd); // Build the oplog filter to match the required internal op types. BSONArrayBuilder internalOpTypeOrBuilder; diff --git a/src/mongo/db/pipeline/change_stream_filter_helpers.h b/src/mongo/db/pipeline/change_stream_filter_helpers.h index e718f122520c0..459279d27c65a 100644 --- a/src/mongo/db/pipeline/change_stream_filter_helpers.h +++ b/src/mongo/db/pipeline/change_stream_filter_helpers.h @@ -29,7 +29,14 @@ #pragma once +#include + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/variables.h" namespace mongo { namespace change_stream_filter { diff --git a/src/mongo/db/pipeline/change_stream_helpers.cpp b/src/mongo/db/pipeline/change_stream_helpers.cpp index 720646d32eab5..614b16a656293 100644 --- a/src/mongo/db/pipeline/change_stream_helpers.cpp +++ b/src/mongo/db/pipeline/change_stream_helpers.cpp @@ -29,7 +29,12 @@ #include "mongo/db/pipeline/change_stream_helpers.h" +#include + +#include + #include "mongo/db/pipeline/document_source_change_stream_gen.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace change_stream { diff --git a/src/mongo/db/pipeline/change_stream_helpers.h b/src/mongo/db/pipeline/change_stream_helpers.h index 7adc4f92f8fc0..cd676addec841 100644 --- a/src/mongo/db/pipeline/change_stream_helpers.h +++ b/src/mongo/db/pipeline/change_stream_helpers.h @@ -29,8 +29,11 @@ #pragma once +#include + #include "mongo/db/pipeline/document_source_change_stream_gen.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/resume_token.h" namespace mongo { diff --git a/src/mongo/db/pipeline/change_stream_helpers_legacy.cpp b/src/mongo/db/pipeline/change_stream_helpers_legacy.cpp index 4bc23d7920deb..834c03e8ec3d0 100644 --- a/src/mongo/db/pipeline/change_stream_helpers_legacy.cpp +++ b/src/mongo/db/pipeline/change_stream_helpers_legacy.cpp @@ -30,18 +30,14 @@ #include "mongo/db/pipeline/change_stream_helpers_legacy.h" -#include "mongo/db/pipeline/change_stream_filter_helpers.h" -#include "mongo/db/pipeline/document_source_change_stream_add_post_image.h" -#include "mongo/db/pipeline/document_source_change_stream_add_pre_image.h" -#include "mongo/db/pipeline/document_source_change_stream_check_invalidate.h" -#include "mongo/db/pipeline/document_source_change_stream_check_resumability.h" -#include "mongo/db/pipeline/document_source_change_stream_check_topology_change.h" -#include "mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.h" -#include "mongo/db/pipeline/document_source_change_stream_handle_topology_change.h" -#include "mongo/db/pipeline/document_source_change_stream_oplog_match.h" -#include "mongo/db/pipeline/document_source_change_stream_transform.h" -#include "mongo/db/pipeline/document_source_change_stream_unwind_transaction.h" -#include "mongo/db/pipeline/expression.h" +#include + +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/resume_token.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -78,7 +74,7 @@ Document convertFromLegacyOplogFormat(const Document& o2Entry, const NamespaceSt // This field would be the first field in the new format, but the current change stream code // does not depend on the field order. - doc.addField(type.getString(), Value(nss.toString())); + doc.addField(type.getString(), Value(NamespaceStringUtil::serialize(nss))); return doc.freeze(); } diff --git a/src/mongo/db/pipeline/change_stream_helpers_legacy.h b/src/mongo/db/pipeline/change_stream_helpers_legacy.h index 1c207767300e5..62009ba2a62e5 100644 --- a/src/mongo/db/pipeline/change_stream_helpers_legacy.h +++ b/src/mongo/db/pipeline/change_stream_helpers_legacy.h @@ -29,8 +29,17 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source_change_stream.h" #include "mongo/db/pipeline/document_source_change_stream_gen.h" +#include "mongo/db/pipeline/expression_context.h" namespace mongo::change_stream_legacy { diff --git a/src/mongo/db/pipeline/change_stream_invalidation_info.cpp b/src/mongo/db/pipeline/change_stream_invalidation_info.cpp index 7be3bf8e61452..036f323d73608 100644 --- a/src/mongo/db/pipeline/change_stream_invalidation_info.cpp +++ b/src/mongo/db/pipeline/change_stream_invalidation_info.cpp @@ -29,7 +29,9 @@ #include "mongo/db/pipeline/change_stream_invalidation_info.h" -#include "mongo/base/init.h" + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" namespace mongo { diff --git a/src/mongo/db/pipeline/change_stream_invalidation_info.h b/src/mongo/db/pipeline/change_stream_invalidation_info.h index 5f9620093085c..c8fb8b7ed1489 100644 --- a/src/mongo/db/pipeline/change_stream_invalidation_info.h +++ b/src/mongo/db/pipeline/change_stream_invalidation_info.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" #include "mongo/bson/bsonobj.h" diff --git a/src/mongo/db/pipeline/change_stream_preimage.idl b/src/mongo/db/pipeline/change_stream_preimage.idl index 486fab96fdb4a..2fc5954155d83 100644 --- a/src/mongo/db/pipeline/change_stream_preimage.idl +++ b/src/mongo/db/pipeline/change_stream_preimage.idl @@ -45,6 +45,18 @@ server_parameters: gte: 1 default: 10 + preImagesCollectionTruncateMarkersMinBytes: + description: >- + Server parameter that specifies the minimum number of bytes contained in each + truncate marker. This is only of use if featureFlagUseUnreplicatedTruncatesForDeletions is + enabled + set_at: startup + cpp_varname: gPreImagesCollectionTruncateMarkersMinBytes + cpp_vartype: int32_t + default: 33_554_432 # 32 MiB + validator: + gt: 0 + structs: ChangeStreamPreImageId: description: Uniquely identifies a pre-image for a given node or replica set. diff --git a/src/mongo/db/pipeline/change_stream_rewrite_helpers.cpp b/src/mongo/db/pipeline/change_stream_rewrite_helpers.cpp index 1f13e8ed87ff1..19bf578b12673 100644 --- a/src/mongo/db/pipeline/change_stream_rewrite_helpers.cpp +++ b/src/mongo/db/pipeline/change_stream_rewrite_helpers.cpp @@ -29,13 +29,46 @@ #include "mongo/db/pipeline/change_stream_rewrite_helpers.h" +#include #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/field_ref.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/expression_expr.h" +#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/expression_path.h" +#include "mongo/db/matcher/expression_tree.h" #include "mongo/db/pipeline/document_source_change_stream.h" -#include "mongo/db/pipeline/document_source_match.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo { namespace change_stream_rewrite { @@ -57,7 +90,7 @@ std::unique_ptr cloneWithSubstitution( const PathMatchExpression* predicate, const StringMap& renameList) { auto clonedPred = std::unique_ptr( static_cast(predicate->clone().release())); - clonedPred->applyRename(renameList); + tassert(7585302, "Failed to rename", clonedPred->applyRename(renameList)); return clonedPred; } boost::intrusive_ptr cloneWithSubstitution( diff --git a/src/mongo/db/pipeline/change_stream_rewrite_helpers.h b/src/mongo/db/pipeline/change_stream_rewrite_helpers.h index 9df83fde24fdd..2cd11c94ce14c 100644 --- a/src/mongo/db/pipeline/change_stream_rewrite_helpers.h +++ b/src/mongo/db/pipeline/change_stream_rewrite_helpers.h @@ -29,7 +29,14 @@ #pragma once +#include +#include +#include + +#include + #include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/variables.h" namespace mongo { namespace change_stream_rewrite { diff --git a/src/mongo/db/pipeline/change_stream_rewrites_test.cpp b/src/mongo/db/pipeline/change_stream_rewrites_test.cpp index 60eb51d0738fb..171715480115a 100644 --- a/src/mongo/db/pipeline/change_stream_rewrites_test.cpp +++ b/src/mongo/db/pipeline/change_stream_rewrites_test.cpp @@ -28,14 +28,32 @@ */ #include +#include +#include #include -#include "mongo/bson/bsonobj.h" +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/json.h" #include "mongo/db/bson/bson_helper.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/change_stream_rewrite_helpers.h" #include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -1292,7 +1310,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteNonNullPredicateOnFieldFullDocumentBef TEST_F(ChangeStreamRewriteTest, CanRewriteFullNamespaceObject) { auto expCtx = getExpCtx(); auto statusWithMatchExpression = MatchExpressionParser::parse( - BSON("ns" << BSON("db" << expCtx->ns.db() << "coll" << expCtx->ns.coll())), expCtx); + BSON("ns" << BSON("db" << expCtx->ns.db_forTest() << "coll" << expCtx->ns.coll())), expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); auto rewrittenMatchExpression = change_stream_rewrite::rewriteFilterForFields( @@ -1300,8 +1318,8 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteFullNamespaceObject) { ASSERT(rewrittenMatchExpression); auto rewrittenPredicate = rewrittenMatchExpression->serialize(); - const std::string ns = expCtx->ns.db().toString() + "." + expCtx->ns.coll().toString(); - const std::string cmdNs = expCtx->ns.db().toString() + ".$cmd"; + const std::string ns = expCtx->ns.db_forTest().toString() + "." + expCtx->ns.coll().toString(); + const std::string cmdNs = expCtx->ns.db_forTest().toString() + ".$cmd"; ASSERT_BSONOBJ_EQ( rewrittenPredicate, @@ -1329,7 +1347,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteFullNamespaceObject) { TEST_F(ChangeStreamRewriteTest, CanRewriteNamespaceObjectWithSwappedField) { auto expCtx = getExpCtx(); auto statusWithMatchExpression = MatchExpressionParser::parse( - BSON("ns" << BSON("coll" << expCtx->ns.coll() << "db" << expCtx->ns.db())), expCtx); + BSON("ns" << BSON("coll" << expCtx->ns.coll() << "db" << expCtx->ns.db_forTest())), expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); auto rewrittenMatchExpression = change_stream_rewrite::rewriteFilterForFields( @@ -1355,14 +1373,14 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteNamespaceObjectWithSwappedField) { TEST_F(ChangeStreamRewriteTest, CanRewriteNamespaceObjectWithOnlyDbField) { auto expCtx = getExpCtx(); auto statusWithMatchExpression = - MatchExpressionParser::parse(BSON("ns" << BSON("db" << expCtx->ns.db())), expCtx); + MatchExpressionParser::parse(BSON("ns" << BSON("db" << expCtx->ns.db_forTest())), expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); auto rewrittenMatchExpression = change_stream_rewrite::rewriteFilterForFields( expCtx, statusWithMatchExpression.getValue().get(), {"ns"}); ASSERT(rewrittenMatchExpression); - const std::string cmdNs = expCtx->ns.db().toString() + ".$cmd"; + const std::string cmdNs = expCtx->ns.db_forTest().toString() + ".$cmd"; auto rewrittenPredicate = rewrittenMatchExpression->serialize(); ASSERT_BSONOBJ_EQ( @@ -1435,7 +1453,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteNamespaceObjectWithInvalidDbField) { TEST_F(ChangeStreamRewriteTest, CanRewriteNamespaceObjectWithInvalidCollField) { auto expCtx = getExpCtx(); auto statusWithMatchExpression = MatchExpressionParser::parse( - BSON("ns" << BSON("db" << expCtx->ns.db() << "coll" << 1)), expCtx); + BSON("ns" << BSON("db" << expCtx->ns.db_forTest() << "coll" << 1)), expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); auto rewrittenMatchExpression = change_stream_rewrite::rewriteFilterForFields( @@ -1487,7 +1505,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteNamespaceObjectWithExtraField) { TEST_F(ChangeStreamRewriteTest, CanRewriteNamespaceWithStringDbFieldPath) { auto expCtx = getExpCtx(); auto statusWithMatchExpression = - MatchExpressionParser::parse(BSON("ns.db" << expCtx->ns.db()), expCtx); + MatchExpressionParser::parse(BSON("ns.db" << expCtx->ns.db_forTest()), expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); auto rewrittenMatchExpression = change_stream_rewrite::rewriteFilterForFields( @@ -1495,9 +1513,9 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteNamespaceWithStringDbFieldPath) { ASSERT(rewrittenMatchExpression); auto rewrittenPredicate = rewrittenMatchExpression->serialize(); - const std::string regexNs = "^" + expCtx->ns.db().toString() + "\\." + + const std::string regexNs = "^" + expCtx->ns.db_forTest().toString() + "\\." + DocumentSourceChangeStream::kRegexAllCollections.toString(); - const std::string cmdNs = expCtx->ns.db().toString() + ".$cmd"; + const std::string cmdNs = expCtx->ns.db_forTest().toString() + ".$cmd"; ASSERT_BSONOBJ_EQ( rewrittenPredicate, @@ -2357,7 +2375,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteNamespaceWithEmptyNinExpression) { TEST_F(ChangeStreamRewriteTest, CanRewriteNsWithExprOnFullObject) { auto expCtx = getExpCtx(); - auto expr = fromjson("{$expr: {$eq: ['$ns', {db: '" + expCtx->ns.db() + "', coll: '" + + auto expr = fromjson("{$expr: {$eq: ['$ns', {db: '" + expCtx->ns.db_forTest() + "', coll: '" + expCtx->ns.coll() + "'}]}}"); auto statusWithMatchExpression = MatchExpressionParser::parse(expr, expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); @@ -2423,7 +2441,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteNsWithExprOnFullObject) { TEST_F(ChangeStreamRewriteTest, CanRewriteNsWithExprOnFullObjectWithOnlyDb) { auto expCtx = getExpCtx(); - auto expr = fromjson("{$expr: {$eq: ['$ns', {db: '" + expCtx->ns.db() + "'}]}}"); + auto expr = fromjson("{$expr: {$eq: ['$ns', {db: '" + expCtx->ns.db_forTest() + "'}]}}"); auto statusWithMatchExpression = MatchExpressionParser::parse(expr, expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); @@ -2598,7 +2616,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteNsWithExprOnInvalidFieldPath) { TEST_F(ChangeStreamRewriteTest, CanRewriteFullToObject) { auto expCtx = getExpCtx(); auto statusWithMatchExpression = MatchExpressionParser::parse( - BSON("to" << BSON("db" << expCtx->ns.db() << "coll" << expCtx->ns.coll())), expCtx); + BSON("to" << BSON("db" << expCtx->ns.db_forTest() << "coll" << expCtx->ns.coll())), expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); auto rewrittenMatchExpression = change_stream_rewrite::rewriteFilterForFields( @@ -2606,7 +2624,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteFullToObject) { ASSERT(rewrittenMatchExpression); auto rewrittenPredicate = rewrittenMatchExpression->serialize(); - const std::string ns = expCtx->ns.db().toString() + "." + expCtx->ns.coll().toString(); + const std::string ns = expCtx->ns.db_forTest().toString() + "." + expCtx->ns.coll().toString(); ASSERT_BSONOBJ_EQ(rewrittenPredicate, BSON(AND(fromjson("{op: {$eq: 'c'}}"), BSON("o.to" << BSON("$eq" << ns))))); @@ -2615,7 +2633,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteFullToObject) { TEST_F(ChangeStreamRewriteTest, CanRewriteToObjectWithSwappedField) { auto expCtx = getExpCtx(); auto statusWithMatchExpression = MatchExpressionParser::parse( - BSON("to" << BSON("coll" << expCtx->ns.coll() << "db" << expCtx->ns.db())), expCtx); + BSON("to" << BSON("coll" << expCtx->ns.coll() << "db" << expCtx->ns.db_forTest())), expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); auto rewrittenMatchExpression = change_stream_rewrite::rewriteFilterForFields( @@ -2630,7 +2648,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteToObjectWithSwappedField) { TEST_F(ChangeStreamRewriteTest, CanRewriteToObjectWithOnlyDbField) { auto expCtx = getExpCtx(); auto statusWithMatchExpression = - MatchExpressionParser::parse(BSON("to" << BSON("db" << expCtx->ns.db())), expCtx); + MatchExpressionParser::parse(BSON("to" << BSON("db" << expCtx->ns.db_forTest())), expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); auto rewrittenMatchExpression = change_stream_rewrite::rewriteFilterForFields( @@ -2675,7 +2693,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteToObjectWithInvalidDbField) { TEST_F(ChangeStreamRewriteTest, CanRewriteToObjectWithInvalidCollField) { auto expCtx = getExpCtx(); auto statusWithMatchExpression = MatchExpressionParser::parse( - BSON("to" << BSON("db" << expCtx->ns.db() << "coll" << 1)), expCtx); + BSON("to" << BSON("db" << expCtx->ns.db_forTest() << "coll" << 1)), expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); auto rewrittenMatchExpression = change_stream_rewrite::rewriteFilterForFields( @@ -2705,7 +2723,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteToObjectWithExtraField) { TEST_F(ChangeStreamRewriteTest, CanRewriteToWithStringDbFieldPath) { auto expCtx = getExpCtx(); auto statusWithMatchExpression = - MatchExpressionParser::parse(BSON("to.db" << expCtx->ns.db()), expCtx); + MatchExpressionParser::parse(BSON("to.db" << expCtx->ns.db_forTest()), expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); auto rewrittenMatchExpression = change_stream_rewrite::rewriteFilterForFields( @@ -2713,7 +2731,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteToWithStringDbFieldPath) { ASSERT(rewrittenMatchExpression); auto rewrittenPredicate = rewrittenMatchExpression->serialize(); - const std::string regexNs = "^" + expCtx->ns.db().toString() + "\\." + + const std::string regexNs = "^" + expCtx->ns.db_forTest().toString() + "\\." + DocumentSourceChangeStream::kRegexAllCollections.toString(); ASSERT_BSONOBJ_EQ( @@ -3162,7 +3180,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteToWithEmptyNinExpression) { TEST_F(ChangeStreamRewriteTest, CanRewriteToWithExprOnFullObject) { auto expCtx = getExpCtx(); - auto expr = fromjson("{$expr: {$eq: ['$to', {db: '" + expCtx->ns.db() + "', coll: '" + + auto expr = fromjson("{$expr: {$eq: ['$to', {db: '" + expCtx->ns.db_forTest() + "', coll: '" + expCtx->ns.coll() + "'}]}}"); auto statusWithMatchExpression = MatchExpressionParser::parse(expr, expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); @@ -3200,7 +3218,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteToWithExprOnFullObject) { TEST_F(ChangeStreamRewriteTest, CanRewriteToWithExprOnDbFieldPath) { auto expCtx = getExpCtx(); - auto expr = fromjson("{$expr: {$eq: ['$to.db', '" + expCtx->ns.db() + "']}}"); + auto expr = fromjson("{$expr: {$eq: ['$to.db', '" + expCtx->ns.db_forTest() + "']}}"); auto statusWithMatchExpression = MatchExpressionParser::parse(expr, expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); @@ -3281,7 +3299,7 @@ TEST_F(ChangeStreamRewriteTest, CanRewriteToWithExprOnInvalidFieldPath) { TEST_F(ChangeStreamRewriteTest, CanRewriteToWithExprOnInvalidDbSubFieldPath) { auto expCtx = getExpCtx(); - auto expr = fromjson("{$expr: {$eq: ['$to.db.test', '" + expCtx->ns.db() + "']}}"); + auto expr = fromjson("{$expr: {$eq: ['$to.db.test', '" + expCtx->ns.db_forTest() + "']}}"); auto statusWithMatchExpression = MatchExpressionParser::parse(expr, expCtx); ASSERT_OK(statusWithMatchExpression.getStatus()); diff --git a/src/mongo/db/pipeline/change_stream_split_event_helpers.cpp b/src/mongo/db/pipeline/change_stream_split_event_helpers.cpp index 19fba5afea510..25af770ddde0a 100644 --- a/src/mongo/db/pipeline/change_stream_split_event_helpers.cpp +++ b/src/mongo/db/pipeline/change_stream_split_event_helpers.cpp @@ -29,8 +29,21 @@ #include "mongo/db/pipeline/change_stream_split_event_helpers.h" +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/resume_token.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace change_stream_split_event { diff --git a/src/mongo/db/pipeline/change_stream_split_event_helpers.h b/src/mongo/db/pipeline/change_stream_split_event_helpers.h index ee39b52dead0a..240632416ecf2 100644 --- a/src/mongo/db/pipeline/change_stream_split_event_helpers.h +++ b/src/mongo/db/pipeline/change_stream_split_event_helpers.h @@ -29,8 +29,11 @@ #pragma once +#include #include +#include +#include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/document.h" namespace mongo { diff --git a/src/mongo/db/pipeline/change_stream_split_event_helpers_test.cpp b/src/mongo/db/pipeline/change_stream_split_event_helpers_test.cpp index aea1ca6fce83b..c1a07d8d3256e 100644 --- a/src/mongo/db/pipeline/change_stream_split_event_helpers_test.cpp +++ b/src/mongo/db/pipeline/change_stream_split_event_helpers_test.cpp @@ -28,9 +28,23 @@ */ #include "mongo/db/pipeline/change_stream_split_event_helpers.h" + +#include + +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/resume_token.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/change_stream_start_after_invalidate_info.cpp b/src/mongo/db/pipeline/change_stream_start_after_invalidate_info.cpp index bfbf71aefff0a..f9b546a698894 100644 --- a/src/mongo/db/pipeline/change_stream_start_after_invalidate_info.cpp +++ b/src/mongo/db/pipeline/change_stream_start_after_invalidate_info.cpp @@ -29,7 +29,8 @@ #include "mongo/db/pipeline/change_stream_start_after_invalidate_info.h" -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" namespace mongo { diff --git a/src/mongo/db/pipeline/change_stream_start_after_invalidate_info.h b/src/mongo/db/pipeline/change_stream_start_after_invalidate_info.h index 84afc41689ae8..7d0112b88000f 100644 --- a/src/mongo/db/pipeline/change_stream_start_after_invalidate_info.h +++ b/src/mongo/db/pipeline/change_stream_start_after_invalidate_info.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" #include "mongo/bson/bsonobj.h" diff --git a/src/mongo/db/pipeline/change_stream_test_helpers.cpp b/src/mongo/db/pipeline/change_stream_test_helpers.cpp index 1f6b5a4a19694..9ca1468caf838 100644 --- a/src/mongo/db/pipeline/change_stream_test_helpers.cpp +++ b/src/mongo/db/pipeline/change_stream_test_helpers.cpp @@ -29,16 +29,18 @@ #include "mongo/db/pipeline/change_stream_test_helpers.h" +#include + +#include +#include +#include + #include "mongo/db/exec/document_value/document.h" -#include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/exec/document_value/value_comparator.h" -#include "mongo/db/matcher/schema/expression_internal_schema_object_match.h" -#include "mongo/db/pipeline/aggregation_context_fixture.h" -#include "mongo/db/pipeline/change_stream_rewrite_helpers.h" -#include "mongo/db/pipeline/document_source.h" -#include "mongo/db/pipeline/document_source_change_stream.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" namespace mongo::change_stream_test_helper { diff --git a/src/mongo/db/pipeline/change_stream_test_helpers.h b/src/mongo/db/pipeline/change_stream_test_helpers.h index a1bc916d2a085..45eaec6400f7b 100644 --- a/src/mongo/db/pipeline/change_stream_test_helpers.h +++ b/src/mongo/db/pipeline/change_stream_test_helpers.h @@ -29,14 +29,26 @@ #pragma once +#include +#include +#include +#include #include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/resume_token.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/tenant_id.h" #include "mongo/util/uuid.h" namespace mongo::change_stream_test_helper { diff --git a/src/mongo/db/pipeline/change_stream_topology_change_info.cpp b/src/mongo/db/pipeline/change_stream_topology_change_info.cpp index e64743b671e57..a0069207a649e 100644 --- a/src/mongo/db/pipeline/change_stream_topology_change_info.cpp +++ b/src/mongo/db/pipeline/change_stream_topology_change_info.cpp @@ -29,7 +29,8 @@ #include "mongo/db/pipeline/change_stream_topology_change_info.h" -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" namespace mongo { diff --git a/src/mongo/db/pipeline/change_stream_topology_change_info.h b/src/mongo/db/pipeline/change_stream_topology_change_info.h index 18fd9b24838a5..2385686cd395d 100644 --- a/src/mongo/db/pipeline/change_stream_topology_change_info.h +++ b/src/mongo/db/pipeline/change_stream_topology_change_info.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" #include "mongo/bson/bsonobj.h" diff --git a/src/mongo/db/pipeline/dependencies.cpp b/src/mongo/db/pipeline/dependencies.cpp index e96d65d93708e..486f07c802507 100644 --- a/src/mongo/db/pipeline/dependencies.cpp +++ b/src/mongo/db/pipeline/dependencies.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document_metadata_fields.h" -#include "mongo/db/jsobj.h" #include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/field_path.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/dependencies.h b/src/mongo/db/pipeline/dependencies.h index 9ee8746387d4d..9ab80e879d260 100644 --- a/src/mongo/db/pipeline/dependencies.h +++ b/src/mongo/db/pipeline/dependencies.h @@ -30,10 +30,13 @@ #pragma once #include +#include #include #include +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/pipeline/variables.h" namespace mongo { diff --git a/src/mongo/db/pipeline/dependencies_test.cpp b/src/mongo/db/pipeline/dependencies_test.cpp index 75451b258b8ab..fb66528b75bff 100644 --- a/src/mongo/db/pipeline/dependencies_test.cpp +++ b/src/mongo/db/pipeline/dependencies_test.cpp @@ -27,17 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include #include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/dependencies.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp index 1f39789aa9a3c..9fc7211f0d394 100644 --- a/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp +++ b/src/mongo/db/pipeline/dispatch_shard_pipeline_test.cpp @@ -27,11 +27,54 @@ * it in the license file. */ +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/sharded_agg_helpers.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/index_version.h" #include "mongo/s/query/sharded_agg_test_fixture.h" #include "mongo/s/router_role.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/s/stale_exception.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_path_support.cpp b/src/mongo/db/pipeline/document_path_support.cpp index eeb0831b55dcb..6ea0ad64e6929 100644 --- a/src/mongo/db/pipeline/document_path_support.cpp +++ b/src/mongo/db/pipeline/document_path_support.cpp @@ -27,17 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include -#include "mongo/db/pipeline/document_path_support.h" +#include +#include +#include -#include "mongo/base/parse_number.h" +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/document_path_support.h" #include "mongo/db/pipeline/field_path.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/str.h" namespace mongo { @@ -80,7 +86,11 @@ void visitAllValuesAtPathHelper(Document doc, // positional specifications, if applicable. For example, it will consume "0" and "1" from the // path "a.0.1.b" if the value at "a" is an array with arrays inside it. while (fieldPathIndex < path.getPathLength() && nextValue.isArray()) { - if (auto index = str::parseUnsignedBase10Integer(path.getFieldName(fieldPathIndex))) { + const StringData field = path.getFieldName(fieldPathIndex); + // Check for a numeric component that is not prefixed by 0 (for example "1" rather than + // "01"). These should act as field names, not as an index into an array. + if (auto index = str::parseUnsignedBase10Integer(field); + index && FieldRef::isNumericPathComponentStrict(field)) { nextValue = nextValue[*index]; ++fieldPathIndex; } else { diff --git a/src/mongo/db/pipeline/document_path_support.h b/src/mongo/db/pipeline/document_path_support.h index b1c127af0e403..db3bf461a50d3 100644 --- a/src/mongo/db/pipeline/document_path_support.h +++ b/src/mongo/db/pipeline/document_path_support.h @@ -32,9 +32,13 @@ #include #include +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/field_path.h" diff --git a/src/mongo/db/pipeline/document_path_support_test.cpp b/src/mongo/db/pipeline/document_path_support_test.cpp index a33b1ae5d487d..390099c69aed4 100644 --- a/src/mongo/db/pipeline/document_path_support_test.cpp +++ b/src/mongo/db/pipeline/document_path_support_test.cpp @@ -27,20 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include #include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/json.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/exec/document_value/document.h" -#include "mongo/db/exec/document_value/document_comparator.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/document_path_support.h" #include "mongo/db/pipeline/field_path.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace document_path_support { @@ -310,6 +317,27 @@ TEST(VisitAllValuesAtPathTest, DoesNotAddMissingValueWithinArrayToResults) { ASSERT_EQ(values.count(Value(2)), 1UL); } +TEST(VisitAllValuesAtPathTest, StrictNumericFields) { + auto values = kDefaultValueComparator.makeUnorderedValueSet(); + auto callback = [&values](const Value& val) { + values.insert(val); + }; + { + Document doc(fromjson("{a: [[], [{b: [3]}, {b: {\"00\": 2}}]]}")); + visitAllValuesAtPath(doc, FieldPath("a.1.b.00"), callback); + // We only find 2. + ASSERT_EQ(values.size(), 1UL); + ASSERT_EQ(values.count(Value(2)), 1UL); + } + { + // Test a 0-prefixed case other than "00". + Document doc(fromjson("{a: [{b: [0, 1]}, {b: {\"01\": 2}}]}")); + visitAllValuesAtPath(doc, FieldPath("a.b.01"), callback); + ASSERT_EQ(values.size(), 1UL); + ASSERT_EQ(values.count(Value(2)), 1UL); + } +} + TEST(ExtractElementAlongNonArrayPathTest, ReturnsMissingIfPathDoesNotExist) { Document doc{{"a", 1}, {"b", 2}}; auto result = extractElementAlongNonArrayPath(doc, FieldPath{"c.d"}); diff --git a/src/mongo/db/pipeline/document_source.cpp b/src/mongo/db/pipeline/document_source.cpp index 28dc3b4ab5c05..a6d1482c4c4e8 100644 --- a/src/mongo/db/pipeline/document_source.cpp +++ b/src/mongo/db/pipeline/document_source.cpp @@ -28,25 +28,34 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/document_source.h" +#include +#include +#include +#include +#include +#include +#include "mongo/base/initializer.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/feature_compatibility_version_documentation.h" #include "mongo/db/matcher/expression_algo.h" -#include "mongo/db/pipeline/document_source_add_fields.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_group.h" -#include "mongo/db/pipeline/document_source_internal_shard_filter.h" #include "mongo/db/pipeline/document_source_match.h" -#include "mongo/db/pipeline/document_source_project.h" -#include "mongo/db/pipeline/document_source_replace_root.h" #include "mongo/db/pipeline/document_source_sample.h" -#include "mongo/db/pipeline/document_source_sequential_document_cache.h" +#include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/expression_context.h" -#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/plan_summary_stats_visitor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/duration.h" #include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/pipeline/document_source.h b/src/mongo/db/pipeline/document_source.h index 491b2f23ba47b..3318bd0e17ee4 100644 --- a/src/mongo/db/pipeline/document_source.h +++ b/src/mongo/db/pipeline/document_source.h @@ -29,17 +29,31 @@ #pragma once -#include "mongo/platform/basic.h" - +#include #include +#include +#include #include +#include +#include +#include +#include +#include #include +#include #include #include +#include #include +#include #include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/collection_index_usage_tracker.h" #include "mongo/db/commands.h" @@ -47,20 +61,31 @@ #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/scoped_timer.h" -#include "mongo/db/exec/scoped_timer_factory.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/generic_cursor.h" #include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression_algo.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/allowed_contexts.h" #include "mongo/db/query/explain_options.h" +#include "mongo/db/query/plan_summary_stats.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/basic.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -363,10 +388,9 @@ class DocumentSource : public RefCountable { } auto serviceCtx = pExpCtx->opCtx->getServiceContext(); - invariant(serviceCtx); + dassert(serviceCtx); - auto timer = scoped_timer_factory::make( - serviceCtx, QueryExecTimerPrecision::kMillis, _commonStats.executionTime.get_ptr()); + ScopedTimer timer(_commonStats.executionTime.get_ptr(), serviceCtx->getFastClockSource()); ++_commonStats.works; diff --git a/src/mongo/db/pipeline/document_source_add_fields.cpp b/src/mongo/db/pipeline/document_source_add_fields.cpp index 448715a96eaf6..e2600374d75c2 100644 --- a/src/mongo/db/pipeline/document_source_add_fields.cpp +++ b/src/mongo/db/pipeline/document_source_add_fields.cpp @@ -27,15 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_add_fields.h" - -#include #include +#include +#include +#include + +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/add_fields_projection_executor.h" +#include "mongo/db/pipeline/document_source_add_fields.h" +#include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/transformer_interface.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { using boost::intrusive_ptr; diff --git a/src/mongo/db/pipeline/document_source_add_fields.h b/src/mongo/db/pipeline/document_source_add_fields.h index 49cb228c73b0e..f692fc6a8edfb 100644 --- a/src/mongo/db/pipeline/document_source_add_fields.h +++ b/src/mongo/db/pipeline/document_source_add_fields.h @@ -29,7 +29,16 @@ #pragma once +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_add_fields_test.cpp b/src/mongo/db/pipeline/document_source_add_fields_test.cpp index 9d3227f84196f..be38b0ae92e64 100644 --- a/src/mongo/db/pipeline/document_source_add_fields_test.cpp +++ b/src/mongo/db/pipeline/document_source_add_fields_test.cpp @@ -27,19 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include #include +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/bson_depth.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_add_fields.h" #include "mongo/db/pipeline/document_source_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_bucket.cpp b/src/mongo/db/pipeline/document_source_bucket.cpp index 4de8daee49187..a27ca050573d9 100644 --- a/src/mongo/db/pipeline/document_source_bucket.cpp +++ b/src/mongo/db/pipeline/document_source_bucket.cpp @@ -29,10 +29,27 @@ #include "mongo/db/pipeline/document_source_bucket.h" +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/document_source_sort.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_bucket.h b/src/mongo/db/pipeline/document_source_bucket.h index 7ac16dbdd7aa7..b82b51fbc7a92 100644 --- a/src/mongo/db/pipeline/document_source_bucket.h +++ b/src/mongo/db/pipeline/document_source_bucket.h @@ -29,7 +29,13 @@ #pragma once +#include + +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_bucket_auto.cpp b/src/mongo/db/pipeline/document_source_bucket_auto.cpp index 599d1d54a36b8..ab706e3f850f5 100644 --- a/src/mongo/db/pipeline/document_source_bucket_auto.cpp +++ b/src/mongo/db/pipeline/document_source_bucket_auto.cpp @@ -27,14 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_bucket_auto.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/document_source_bucket_auto.h" #include "mongo/db/pipeline/expression_dependencies.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/db/sorter/sorter_stats.h" #include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -92,7 +110,7 @@ const char* DocumentSourceBucketAuto::getSourceName() const { DocumentSource::GetNextResult DocumentSourceBucketAuto::doGetNext() { if (!_populated) { - const auto populationResult = populateSorter(); + auto populationResult = populateSorter(); if (populationResult.isPaused()) { return populationResult; } @@ -384,10 +402,10 @@ Value DocumentSourceBucketAuto::serialize(SerializationOptions opts) const { MutableDocument insides; insides["groupBy"] = _groupByExpression->serialize(opts); - insides["buckets"] = opts.serializeLiteralValue(_nBuckets); + insides["buckets"] = opts.serializeLiteral(_nBuckets); if (_granularityRounder) { - insides["granularity"] = opts.serializeLiteralValue(_granularityRounder->getName()); + insides["granularity"] = opts.serializeLiteral(_granularityRounder->getName()); } MutableDocument outputSpec(_accumulatedFields.size()); @@ -426,7 +444,7 @@ intrusive_ptr DocumentSourceBucketAuto::create( return new DocumentSourceBucketAuto(pExpCtx, groupByExpression, numBuckets, - accumulationStatements, + std::move(accumulationStatements), granularityRounder, maxMemoryUsageBytes); } @@ -539,8 +557,11 @@ intrusive_ptr DocumentSourceBucketAuto::createFromBson( "$bucketAuto requires 'groupBy' and 'buckets' to be specified", groupByExpression && numBuckets); - return DocumentSourceBucketAuto::create( - pExpCtx, groupByExpression, numBuckets.value(), accumulationStatements, granularityRounder); + return DocumentSourceBucketAuto::create(pExpCtx, + groupByExpression, + numBuckets.value(), + std::move(accumulationStatements), + granularityRounder); } } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_bucket_auto.h b/src/mongo/db/pipeline/document_source_bucket_auto.h index 9d576eb9a16b0..55dcc0795311c 100644 --- a/src/mongo/db/pipeline/document_source_bucket_auto.h +++ b/src/mongo/db/pipeline/document_source_bucket_auto.h @@ -29,12 +29,37 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/accumulation_statement.h" #include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/granularity_rounder.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/db/sorter/sorter.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp b/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp index 20d3ff3bb6cba..5dc72b0e4382e 100644 --- a/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp +++ b/src/mongo/db/pipeline/document_source_bucket_auto_test.cpp @@ -27,18 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include #include #include #include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" @@ -46,8 +52,12 @@ #include "mongo/db/pipeline/document_source_bucket_auto.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -868,13 +878,11 @@ TEST_F(BucketAutoTests, RedactionWithoutOutputField) { R"({ "$bucketAuto": { "groupBy": "$HASH<_id>", - "buckets": "?", - "granularity": "?", + "buckets": "?number", + "granularity": "?string", "output": { "HASH": { - "$sum": { - "$const": "?" - } + "$sum": "?number" } } } @@ -891,20 +899,21 @@ TEST_F(BucketAutoTests, RedactionWithOutputField) { count: { $sum: 1 }, years: { $push: '$year' } } - } - })"); + }})"); auto docSource = DocumentSourceBucketAuto::createFromBson(spec.firstElement(), getExpCtx()); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT R"({ "$bucketAuto": { "groupBy": "$HASH", - "buckets": "?", + "buckets": "?number", "output": { "HASH": { - $sum: { "$const": "?" } + "$sum": "?number" }, - "HASH": { $push: "$HASH" } + "HASH": { + "$push": "$HASH" + } } } })", diff --git a/src/mongo/db/pipeline/document_source_bucket_test.cpp b/src/mongo/db/pipeline/document_source_bucket_test.cpp index 1036c456ef36a..e407242c929cb 100644 --- a/src/mongo/db/pipeline/document_source_bucket_test.cpp +++ b/src/mongo/db/pipeline/document_source_bucket_test.cpp @@ -27,23 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include #include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_bucket.h" #include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/document_source_sort.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_change_stream.cpp b/src/mongo/db/pipeline/document_source_change_stream.cpp index 271ed1f4b7f07..aca138fba45c4 100644 --- a/src/mongo/db/pipeline/document_source_change_stream.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream.cpp @@ -30,15 +30,19 @@ #include "mongo/db/pipeline/document_source_change_stream.h" -#include "mongo/bson/simple_bsonelement_comparator.h" -#include "mongo/db/bson/bson_helper.h" -#include "mongo/db/feature_compatibility_version_documentation.h" -#include "mongo/db/pipeline/aggregate_command_gen.h" -#include "mongo/db/pipeline/change_stream_constants.h" +#include +#include + +#include +#include +#include + +#include "mongo/db/basic_types.h" +#include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" #include "mongo/db/pipeline/change_stream_filter_helpers.h" #include "mongo/db/pipeline/change_stream_helpers.h" -#include "mongo/db/pipeline/change_stream_helpers_legacy.h" -#include "mongo/db/pipeline/document_path_support.h" #include "mongo/db/pipeline/document_source_change_stream_add_post_image.h" #include "mongo/db/pipeline/document_source_change_stream_add_pre_image.h" #include "mongo/db/pipeline/document_source_change_stream_check_invalidate.h" @@ -47,19 +51,16 @@ #include "mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.h" #include "mongo/db/pipeline/document_source_change_stream_handle_topology_change.h" #include "mongo/db/pipeline/document_source_change_stream_oplog_match.h" -#include "mongo/db/pipeline/document_source_change_stream_split_large_event.h" #include "mongo/db/pipeline/document_source_change_stream_transform.h" #include "mongo/db/pipeline/document_source_change_stream_unwind_transaction.h" -#include "mongo/db/pipeline/document_source_limit.h" -#include "mongo/db/pipeline/document_source_sort.h" -#include "mongo/db/pipeline/expression.h" -#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/resume_token.h" -#include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/repl/oplog_entry.h" -#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/vector_clock.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -72,6 +73,10 @@ using std::list; using std::string; using std::vector; +namespace { +CounterMetric changeStreamsShowExpandedEvents("changeStreams.showExpandedEvents"); +} + // The $changeStream stage is an alias for many stages. REGISTER_DOCUMENT_SOURCE(changeStream, DocumentSourceChangeStream::LiteParsed::parse, @@ -340,6 +345,7 @@ std::list> DocumentSourceChangeStream::_bui stages.push_back(DocumentSourceMatch::create( change_stream_filter::getMatchFilterForClassicOperationTypes(), expCtx)); } + changeStreamsShowExpandedEvents.increment(spec.getShowExpandedEvents()); return stages; } @@ -359,7 +365,7 @@ void DocumentSourceChangeStream::assertIsLegalSpecification( uassert(ErrorCodes::InvalidOptions, str::stream() << "A $changeStream with 'allChangesForCluster:true' may only be opened " "on the 'admin' database, and with no collection name; found " - << expCtx->ns.ns(), + << expCtx->ns.toStringForErrorMsg(), !spec.getAllChangesForCluster() || (expCtx->ns.isAdminDB() && expCtx->ns.isCollectionlessAggregateNS())); @@ -367,10 +373,10 @@ void DocumentSourceChangeStream::assertIsLegalSpecification( // 'admin' database iff 'allChangesForCluster' is true. A stream may run against the 'config' // database iff 'allowToRunOnConfigDB' is true. const bool isNotBannedInternalDB = - !expCtx->ns.isLocal() && (!expCtx->ns.isConfigDB() || spec.getAllowToRunOnConfigDB()); + !expCtx->ns.isLocalDB() && (!expCtx->ns.isConfigDB() || spec.getAllowToRunOnConfigDB()); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "$changeStream may not be opened on the internal " << expCtx->ns.db() - << " database", + str::stream() << "$changeStream may not be opened on the internal " + << expCtx->ns.dbName().toStringForErrorMsg() << " database", expCtx->ns.isAdminDB() ? static_cast(spec.getAllChangesForCluster()) : isNotBannedInternalDB); @@ -378,8 +384,8 @@ void DocumentSourceChangeStream::assertIsLegalSpecification( // against the internal collections iff 'allowToRunOnSystemNS' is true and the stream is not // opened through a mongos process. uassert(ErrorCodes::InvalidNamespace, - str::stream() << "$changeStream may not be opened on the internal " << expCtx->ns.ns() - << " collection" + str::stream() << "$changeStream may not be opened on the internal " + << expCtx->ns.toStringForErrorMsg() << " collection" << (spec.getAllowToRunOnSystemNS() ? " through mongos" : ""), !expCtx->ns.isSystem() || (spec.getAllowToRunOnSystemNS() && !expCtx->inMongos)); diff --git a/src/mongo/db/pipeline/document_source_change_stream.h b/src/mongo/db/pipeline/document_source_change_stream.h index 236be5e3994c0..925b4b3721261 100644 --- a/src/mongo/db/pipeline/document_source_change_stream.h +++ b/src/mongo/db/pipeline/document_source_change_stream.h @@ -29,17 +29,44 @@ #pragma once +#include +#include +#include +#include +#include +#include #include - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/change_stream_constants.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream_gen.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" #include "mongo/db/pipeline/resume_token.h" #include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { @@ -81,10 +108,10 @@ class DocumentSourceChangeStream final { bool bypassDocumentValidation) const override { if (_nss.isAdminDB() && _nss.isCollectionlessAggregateNS()) { // Watching a whole cluster. - return {Privilege(ResourcePattern::forAnyNormalResource(), actions)}; + return {Privilege(ResourcePattern::forAnyNormalResource(_nss.tenantId()), actions)}; } else if (_nss.isCollectionlessAggregateNS()) { // Watching a whole database. - return {Privilege(ResourcePattern::forDatabaseName(_nss.db()), actions)}; + return {Privilege(ResourcePattern::forDatabaseName(_nss.dbName()), actions)}; } else { // Watching a single collection. Note if this is in the admin database it will fail // at parse time. @@ -130,8 +157,10 @@ class DocumentSourceChangeStream final { } } - private: + protected: const NamespaceString _nss; + + private: BSONElement _spec; }; @@ -334,12 +363,17 @@ class LiteParsedDocumentSourceChangeStreamInternal final LiteParsedDocumentSourceChangeStreamInternal(std::string parseTimeName, NamespaceString nss, const BSONElement& spec) - : DocumentSourceChangeStream::LiteParsed(std::move(parseTimeName), std::move(nss), spec) {} + : DocumentSourceChangeStream::LiteParsed(std::move(parseTimeName), std::move(nss), spec), + _privileges({Privilege(ResourcePattern::forClusterResource(_nss.tenantId()), + ActionType::internal)}) {} PrivilegeVector requiredPrivileges(bool isMongos, bool bypassDocumentValidation) const override final { - return {Privilege(ResourcePattern::forClusterResource(), ActionType::internal)}; + return _privileges; } + +private: + const PrivilegeVector _privileges; }; } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_change_stream.idl b/src/mongo/db/pipeline/document_source_change_stream.idl index 1ed1f0c0dfafe..3dcafc5f9a3b7 100644 --- a/src/mongo/db/pipeline/document_source_change_stream.idl +++ b/src/mongo/db/pipeline/document_source_change_stream.idl @@ -86,6 +86,7 @@ structs: description: A document used to specify the $changeStream stage of an aggregation pipeline. The same spec is also used by the $_internalChangeStreamTransform stage. + query_shape_component: true fields: resumeAfter: cpp_name: resumeAfter @@ -94,6 +95,7 @@ structs: description: An object representing the point at which we should resume reporting changes from. Only one of resumeAfter, startAfter, and startAtOperationTime should be specified. + query_shape: literal startAfter: cpp_name: startAfter @@ -103,6 +105,7 @@ structs: changes from. This is allowed to be a token from an invalidating command. Only one of resumeAfter, startAfter, and startAtOperationTime should be specified. + query_shape: literal startAtOperationTime: cpp_name: startAtOperationTime @@ -111,6 +114,7 @@ structs: description: The operation time after which we should start reporting changes. Only one of resumeAfter, startAfter, and startAtOperationTime should be specified. + query_shape: literal fullDocument: cpp_name: fullDocument @@ -118,6 +122,7 @@ structs: default: kDefault description: A string '"updateLookup"' or '"default"', indicating whether or not we should return a full document or just changes for an update. + query_shape: parameter fullDocumentBeforeChange: cpp_name: fullDocumentBeforeChange @@ -132,6 +137,7 @@ structs: set to "required", then the "fullDocumentBeforeChange" field is always populated and an exception is thrown if the pre-image is not available. + query_shape: parameter allChangesForCluster: cpp_name: allChangesForCluster @@ -139,6 +145,7 @@ structs: description: A flag indicating whether the stream should report all changes that occur on the deployment, aside from those on internal databases or collections. + query_shape: literal showMigrationEvents: cpp_name: showMigrationEvents @@ -149,12 +156,14 @@ structs: deletes may appear that do not reflect actual deletions or insertions of data. Instead they reflect this data moving from one shard to another. + query_shape: literal showSystemEvents: cpp_name: showSystemEvents type: optionalBool description: A flag indicating whether the stream should report events on system collections. + query_shape: literal allowToRunOnConfigDB: cpp_name: allowToRunOnConfigDB @@ -163,17 +172,20 @@ structs: 'config' database, which is usually banned. This flag is used internally to allow mongoS to open a stream on 'config.shards', in order to monitor for the addition of new shards to the cluster. + query_shape: literal allowToRunOnSystemNS: cpp_name: allowToRunOnSystemNS type: optionalBool description: An internal flag indicating whether the change stream may be opened on a system collection. + query_shape: literal showExpandedEvents: type: optionalBool description: A flag indicating whether the change stream output should include the expanded events and parameters that may not be part of the stable API. + query_shape: literal showRawUpdateDescription: cpp_name: showRawUpdateDescription @@ -183,6 +195,7 @@ structs: raw update description from the oplog entry for the corresponding update command. When this flag is set to true, the standard "updateDescription" field in update events will be omitted. + query_shape: literal DocumentSourceChangeStreamOplogMatchSpec: strict: true @@ -202,32 +215,40 @@ structs: strict: true description: A document used to specify the $_internalChangeStreamCheckInvalidate stage of an aggregation pipeline. + query_shape_component: true fields: startAfterInvalidate: type: resumeToken optional: true - + query_shape: literal + DocumentSourceChangeStreamCheckResumabilitySpec: strict: true description: A document used to specify the $_internalChangeStreamCheckResumability stage of an aggregation pipeline. + query_shape_component: true fields: resumeToken: type: resumeToken optional: false + query_shape: literal DocumentSourceChangeStreamAddPreImageSpec: strict: true description: A document used to specify the $_internalChangeStreamAddPreImage stage of an aggregation pipeline. + query_shape_component: true fields: fullDocumentBeforeChange: type: FullDocumentBeforeChangeMode + query_shape: parameter DocumentSourceChangeStreamAddPostImageSpec: strict: true description: A document used to specify the $_internalChangeStreamAddPostImage stage of an aggregation pipeline. + query_shape_component: true fields: fullDocument: type: FullDocumentMode + query_shape: parameter diff --git a/src/mongo/db/pipeline/document_source_change_stream_add_post_image.cpp b/src/mongo/db/pipeline/document_source_change_stream_add_post_image.cpp index 217518bc20ef3..40fed33724c7b 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_add_post_image.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream_add_post_image.cpp @@ -27,15 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_change_stream_add_post_image.h" - -#include "mongo/bson/simple_bsonelement_comparator.h" +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/field_ref_set.h" #include "mongo/db/ops/write_ops_parsers.h" -#include "mongo/db/pipeline/change_stream_helpers_legacy.h" +#include "mongo/db/pipeline/document_source_change_stream_add_post_image.h" #include "mongo/db/pipeline/document_source_change_stream_add_pre_image.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/resume_token.h" #include "mongo/db/update/update_driver.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -119,8 +136,9 @@ NamespaceString DocumentSourceChangeStreamAddPostImage::assertValidNamespace( // the database is 'admin', then this is a cluster-wide $changeStream and we are permitted to // lookup into any namespace. uassert(40579, - str::stream() << "unexpected namespace during post image lookup: " << nss.ns() - << ", expected " << pExpCtx->ns.ns(), + str::stream() << "unexpected namespace during post image lookup: " + << nss.toStringForErrorMsg() << ", expected " + << pExpCtx->ns.toStringForErrorMsg(), nss == pExpCtx->ns || (pExpCtx->isClusterAggregation() || pExpCtx->isDBAggregation(nss.db()))); @@ -210,23 +228,13 @@ boost::optional DocumentSourceChangeStreamAddPostImage::lookupLatestPo } Value DocumentSourceChangeStreamAddPostImage::serialize(SerializationOptions opts) const { - BSONObjBuilder builder; - if (opts.verbosity) { - BSONObjBuilder sub(builder.subobjStart(DocumentSourceChangeStream::kStageName)); - sub.append("stage"_sd, kStageName); - opts.serializeLiteralValue(FullDocumentMode_serializer(_fullDocumentMode)) - .addToBsonObj(&sub, kFullDocumentFieldName); - sub.done(); - } else { - BSONObjBuilder sub(builder.subobjStart(kStageName)); - if (opts.replacementForLiteralArgs) { - sub.append(DocumentSourceChangeStreamAddPostImageSpec::kFullDocumentFieldName, - *opts.replacementForLiteralArgs); - } else { - DocumentSourceChangeStreamAddPostImageSpec(_fullDocumentMode).serialize(&sub); - } - sub.done(); - } - return Value(builder.obj()); + return opts.verbosity + ? Value(Document{ + {DocumentSourceChangeStream::kStageName, + Document{{"stage"_sd, kStageName}, + {kFullDocumentFieldName, FullDocumentMode_serializer(_fullDocumentMode)}}}}) + : Value(Document{ + {kStageName, + DocumentSourceChangeStreamAddPostImageSpec(_fullDocumentMode).toBSON(opts)}}); } } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_change_stream_add_post_image.h b/src/mongo/db/pipeline/document_source_change_stream_add_post_image.h index 15bd2d365c321..4c833c62b4112 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_add_post_image.h +++ b/src/mongo/db/pipeline/document_source_change_stream_add_post_image.h @@ -29,8 +29,29 @@ #pragma once +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_change_stream_add_post_image_test.cpp b/src/mongo/db/pipeline/document_source_change_stream_add_post_image_test.cpp index ba6b8e751a8f3..a61509161c4aa 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_add_post_image_test.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream_add_post_image_test.cpp @@ -27,15 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include #include +#include +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" @@ -43,9 +50,14 @@ #include "mongo/db/pipeline/document_source_change_stream.h" #include "mongo/db/pipeline/document_source_change_stream_add_post_image.h" #include "mongo/db/pipeline/document_source_mock.h" -#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/process_interface/stub_lookup_single_document_process_interface.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/db/pipeline/resume_token.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/tenant_id.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -137,7 +149,7 @@ TEST_F(DocumentSourceChangeStreamAddPostImageTest, ShouldErrorIfMissingDocumentK Document{{"_id", makeResumeToken(0)}, {"operationType", "update"_sd}, {"fullDocument", Document{{"_id", 0}}}, - {"ns", Document{{"db", expCtx->ns.db()}, {"coll", expCtx->ns.coll()}}}}, + {"ns", Document{{"db", expCtx->ns.db_forTest()}, {"coll", expCtx->ns.coll()}}}}, expCtx); lookupChangeStage->setSource(mockLocalSource.get()); @@ -160,7 +172,7 @@ TEST_F(DocumentSourceChangeStreamAddPostImageTest, ShouldErrorIfMissingOperation Document{{"_id", makeResumeToken(0)}, {"documentKey", Document{{"_id", 0}}}, {"fullDocument", Document{{"_id", 0}}}, - {"ns", Document{{"db", expCtx->ns.db()}, {"coll", expCtx->ns.coll()}}}}, + {"ns", Document{{"db", expCtx->ns.db_forTest()}, {"coll", expCtx->ns.coll()}}}}, expCtx); lookupChangeStage->setSource(mockLocalSource.get()); @@ -246,7 +258,8 @@ TEST_F(DocumentSourceChangeStreamAddPostImageTest, ShouldErrorIfDatabaseMismatchOnCollectionlessNss) { auto expCtx = getExpCtx(); - expCtx->ns = NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(boost::none, "test")); + expCtx->ns = NamespaceString::makeCollectionlessAggregateNSS( + DatabaseName::createDatabaseName_forTest(boost::none, "test")); // Set up the lookup change post image stage. auto lookupChangeStage = DocumentSourceChangeStreamAddPostImage::create(expCtx, getSpec()); @@ -271,7 +284,8 @@ TEST_F(DocumentSourceChangeStreamAddPostImageTest, TEST_F(DocumentSourceChangeStreamAddPostImageTest, ShouldPassIfDatabaseMatchesOnCollectionlessNss) { auto expCtx = getExpCtx(); - expCtx->ns = NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(boost::none, "test")); + expCtx->ns = NamespaceString::makeCollectionlessAggregateNSS( + DatabaseName::createDatabaseName_forTest(boost::none, "test")); // Set up the lookup change post image stage. auto lookupChangeStage = DocumentSourceChangeStreamAddPostImage::create(expCtx, getSpec()); @@ -284,7 +298,7 @@ TEST_F(DocumentSourceChangeStreamAddPostImageTest, ShouldPassIfDatabaseMatchesOn Document{{"_id", makeResumeToken(0)}, {"documentKey", Document{{"_id", 0}}}, {"operationType", "update"_sd}, - {"ns", Document{{"db", expCtx->ns.db()}, {"coll", "irrelevant"_sd}}}}, + {"ns", Document{{"db", expCtx->ns.db_forTest()}, {"coll", "irrelevant"_sd}}}}, expCtx); lookupChangeStage->setSource(mockLocalSource.get()); @@ -296,7 +310,7 @@ TEST_F(DocumentSourceChangeStreamAddPostImageTest, ShouldPassIfDatabaseMatchesOn (Document{{"_id", makeResumeToken(0)}, {"documentKey", Document{{"_id", 0}}}, {"operationType", "update"_sd}, - {"ns", Document{{"db", expCtx->ns.db()}, {"coll", "irrelevant"_sd}}}, + {"ns", Document{{"db", expCtx->ns.db_forTest()}, {"coll", "irrelevant"_sd}}}, {"fullDocument", Document{{"_id", 0}}}})); } @@ -311,7 +325,7 @@ TEST_F(DocumentSourceChangeStreamAddPostImageTest, ShouldErrorIfDocumentKeyIsNot Document{{"_id", makeResumeToken(0)}, {"documentKey", Document{{"_id", 0}}}, {"operationType", "update"_sd}, - {"ns", Document{{"db", expCtx->ns.db()}, {"coll", expCtx->ns.coll()}}}}, + {"ns", Document{{"db", expCtx->ns.db_forTest()}, {"coll", expCtx->ns.coll()}}}}, expCtx); lookupChangeStage->setSource(mockLocalSource.get()); @@ -337,13 +351,13 @@ TEST_F(DocumentSourceChangeStreamAddPostImageTest, ShouldPropagatePauses) { {Document{{"_id", makeResumeToken(0)}, {"documentKey", Document{{"_id", 0}}}, {"operationType", "insert"_sd}, - {"ns", Document{{"db", expCtx->ns.db()}, {"coll", expCtx->ns.coll()}}}, + {"ns", Document{{"db", expCtx->ns.db_forTest()}, {"coll", expCtx->ns.coll()}}}, {"fullDocument", Document{{"_id", 0}}}}, DocumentSource::GetNextResult::makePauseExecution(), Document{{"_id", makeResumeToken(1)}, {"documentKey", Document{{"_id", 1}}}, {"operationType", "update"_sd}, - {"ns", Document{{"db", expCtx->ns.db()}, {"coll", expCtx->ns.coll()}}}}, + {"ns", Document{{"db", expCtx->ns.db_forTest()}, {"coll", expCtx->ns.coll()}}}}, DocumentSource::GetNextResult::makePauseExecution()}, expCtx); @@ -362,7 +376,7 @@ TEST_F(DocumentSourceChangeStreamAddPostImageTest, ShouldPropagatePauses) { (Document{{"_id", makeResumeToken(0)}, {"documentKey", Document{{"_id", 0}}}, {"operationType", "insert"_sd}, - {"ns", Document{{"db", expCtx->ns.db()}, {"coll", expCtx->ns.coll()}}}, + {"ns", Document{{"db", expCtx->ns.db_forTest()}, {"coll", expCtx->ns.coll()}}}, {"fullDocument", Document{{"_id", 0}}}})); ASSERT_TRUE(lookupChangeStage->getNext().isPaused()); @@ -374,7 +388,7 @@ TEST_F(DocumentSourceChangeStreamAddPostImageTest, ShouldPropagatePauses) { (Document{{"_id", makeResumeToken(1)}, {"documentKey", Document{{"_id", 1}}}, {"operationType", "update"_sd}, - {"ns", Document{{"db", expCtx->ns.db()}, {"coll", expCtx->ns.coll()}}}, + {"ns", Document{{"db", expCtx->ns.db_forTest()}, {"coll", expCtx->ns.coll()}}}, {"fullDocument", Document{{"_id", 1}}}})); ASSERT_TRUE(lookupChangeStage->getNext().isPaused()); diff --git a/src/mongo/db/pipeline/document_source_change_stream_add_pre_image.cpp b/src/mongo/db/pipeline/document_source_change_stream_add_pre_image.cpp index 1ec7179e4e666..2bbd70b45987f 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_add_pre_image.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream_add_pre_image.cpp @@ -27,18 +27,27 @@ * it in the license file. */ -#include "mongo/util/assert_util.h" - -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_change_stream_add_pre_image.h" - -#include "mongo/bson/simple_bsonelement_comparator.h" +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/change_stream_serverless_helpers.h" -#include "mongo/db/pipeline/change_stream_helpers_legacy.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/change_stream_preimage_gen.h" -#include "mongo/db/transaction/transaction_history_iterator.h" +#include "mongo/db/pipeline/document_source_change_stream_add_pre_image.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" #include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -140,27 +149,15 @@ boost::optional DocumentSourceChangeStreamAddPreImage::lookupPreImage( } Value DocumentSourceChangeStreamAddPreImage::serialize(SerializationOptions opts) const { - BSONObjBuilder builder; - if (opts.verbosity) { - BSONObjBuilder sub(builder.subobjStart(DocumentSourceChangeStream::kStageName)); - sub.append("stage"_sd, kStageName); - opts.serializeLiteralValue( - FullDocumentBeforeChangeMode_serializer(_fullDocumentBeforeChangeMode)) - .addToBsonObj(&sub, kFullDocumentBeforeChangeFieldName); - sub.done(); - } else { - BSONObjBuilder sub(builder.subobjStart(kStageName)); - if (opts.replacementForLiteralArgs) { - sub.append( - DocumentSourceChangeStreamAddPreImageSpec::kFullDocumentBeforeChangeFieldName, - *opts.replacementForLiteralArgs); - } else { - DocumentSourceChangeStreamAddPreImageSpec(_fullDocumentBeforeChangeMode) - .serialize(&sub); - } - sub.done(); - } - return Value(builder.obj()); + return opts.verbosity + ? Value(Document{ + {DocumentSourceChangeStream::kStageName, + Document{{"stage"_sd, "internalAddPreImage"_sd}, + {"fullDocumentBeforeChange"_sd, + FullDocumentBeforeChangeMode_serializer(_fullDocumentBeforeChangeMode)}}}}) + : Value(Document{{kStageName, + DocumentSourceChangeStreamAddPreImageSpec(_fullDocumentBeforeChangeMode) + .toBSON(opts)}}); } std::string DocumentSourceChangeStreamAddPreImage::makePreImageNotFoundErrorMsg( diff --git a/src/mongo/db/pipeline/document_source_change_stream_add_pre_image.h b/src/mongo/db/pipeline/document_source_change_stream_add_pre_image.h index bad3ccd51b011..6d082d49c3054 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_add_pre_image.h +++ b/src/mongo/db/pipeline/document_source_change_stream_add_pre_image.h @@ -29,8 +29,28 @@ #pragma once +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_change_stream_check_invalidate.cpp b/src/mongo/db/pipeline/document_source_change_stream_check_invalidate.cpp index 3b7ae113496e8..49d08960030ba 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_check_invalidate.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream_check_invalidate.cpp @@ -28,14 +28,25 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/pipeline/change_stream_helpers.h" #include "mongo/db/pipeline/change_stream_start_after_invalidate_info.h" #include "mongo/db/pipeline/document_source_change_stream.h" #include "mongo/db/pipeline/document_source_change_stream_check_invalidate.h" -#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -99,7 +110,7 @@ DocumentSource::GetNextResult DocumentSourceChangeStreamCheckInvalidate::doGetNe // then throws a 'ChangeStreamInvalidated' exception on the next call to this method. if (_queuedInvalidate) { - const auto res = DocumentSource::GetNextResult(std::move(_queuedInvalidate.value())); + auto res = DocumentSource::GetNextResult(std::move(_queuedInvalidate.value())); _queuedInvalidate.reset(); return res; } @@ -187,22 +198,13 @@ Value DocumentSourceChangeStreamCheckInvalidate::serialize(SerializationOptions BSONObjBuilder sub(builder.subobjStart(DocumentSourceChangeStream::kStageName)); sub.append("stage"_sd, kStageName); sub.done(); - } else { - BSONObjBuilder sub(builder.subobjStart(kStageName)); - if (_startAfterInvalidate) { - if (opts.replacementForLiteralArgs) { - sub.append( - DocumentSourceChangeStreamCheckInvalidateSpec::kStartAfterInvalidateFieldName, - *opts.replacementForLiteralArgs); - } else { - DocumentSourceChangeStreamCheckInvalidateSpec spec; - spec.setStartAfterInvalidate(ResumeToken(*_startAfterInvalidate)); - spec.serialize(&sub); - } - } - sub.done(); } - return Value(builder.obj()); + DocumentSourceChangeStreamCheckInvalidateSpec spec; + if (_startAfterInvalidate) { + spec.setStartAfterInvalidate(ResumeToken(*_startAfterInvalidate)); + } + return Value( + Document{{DocumentSourceChangeStreamCheckInvalidate::kStageName, spec.toBSON(opts)}}); } } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_change_stream_check_invalidate.h b/src/mongo/db/pipeline/document_source_change_stream_check_invalidate.h index a8fa30cdac51c..afca2d693ca14 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_check_invalidate.h +++ b/src/mongo/db/pipeline/document_source_change_stream_check_invalidate.h @@ -29,8 +29,29 @@ #pragma once +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/change_stream_invalidation_info.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/resume_token.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_change_stream_check_resumability.cpp b/src/mongo/db/pipeline/document_source_change_stream_check_resumability.cpp index f0170cb9f8b92..edbd9d5abde3f 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_check_resumability.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream_check_resumability.cpp @@ -27,14 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/curop.h" +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/change_stream_helpers.h" +#include "mongo/db/pipeline/document_source_change_stream.h" #include "mongo/db/pipeline/document_source_change_stream_check_resumability.h" -#include "mongo/db/query/query_feature_flags_gen.h" -#include "mongo/db/repl/oplog_entry.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" using boost::intrusive_ptr; @@ -216,19 +233,14 @@ Value DocumentSourceChangeStreamCheckResumability::serialize(SerializationOption if (opts.verbosity) { BSONObjBuilder sub(builder.subobjStart(DocumentSourceChangeStream::kStageName)); sub.append("stage"_sd, kStageName); - opts.serializeLiteralValue(ResumeToken(_tokenFromClient).toDocument().toBson()) + opts.serializeLiteral(ResumeToken(_tokenFromClient).toDocument().toBson()) .addToBsonObj(&sub, "resumeToken"_sd); sub.done(); } else { - BSONObjBuilder sub(builder.subobjStart(kStageName)); - if (opts.replacementForLiteralArgs) { - sub.append(DocumentSourceChangeStreamCheckResumabilitySpec::kResumeTokenFieldName, - *opts.replacementForLiteralArgs); - } else { + builder.append( + kStageName, DocumentSourceChangeStreamCheckResumabilitySpec(ResumeToken(_tokenFromClient)) - .serialize(&sub); - } - sub.done(); + .toBSON(opts)); } return Value(builder.obj()); } diff --git a/src/mongo/db/pipeline/document_source_change_stream_check_resumability.h b/src/mongo/db/pipeline/document_source_change_stream_check_resumability.h index 3693ab378f75d..e026eb99c3e8f 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_check_resumability.h +++ b/src/mongo/db/pipeline/document_source_change_stream_check_resumability.h @@ -29,13 +29,28 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/change_stream_constants.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream.h" #include "mongo/db/pipeline/document_source_change_stream_gen.h" #include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/resume_token.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { /** diff --git a/src/mongo/db/pipeline/document_source_change_stream_check_topology_change.cpp b/src/mongo/db/pipeline/document_source_change_stream_check_topology_change.cpp index a8953afe8b268..a5bfb56bf5014 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_check_topology_change.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream_check_topology_change.cpp @@ -27,12 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/pipeline/document_source_change_stream_check_topology_change.h" +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/change_stream_helpers_legacy.h" #include "mongo/db/pipeline/change_stream_topology_change_info.h" +#include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_change_stream_check_topology_change.h b/src/mongo/db/pipeline/document_source_change_stream_check_topology_change.h index c4ada63a032eb..48e6e9783fcba 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_check_topology_change.h +++ b/src/mongo/db/pipeline/document_source_change_stream_check_topology_change.h @@ -29,8 +29,22 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.cpp b/src/mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.cpp index 651e249346e7f..c8806a8a63565 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.cpp @@ -27,13 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/change_stream_helpers.h" #include "mongo/db/pipeline/change_stream_start_after_invalidate_info.h" -#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { @@ -136,7 +146,7 @@ DocumentSource::GetNextResult DocumentSourceChangeStreamEnsureResumeTokenPresent const auto extraInfo = ex.extraInfo(); tassert(5779200, "Missing ChangeStreamStartAfterInvalidationInfo on exception", extraInfo); - const DocumentSource::GetNextResult nextInput = + DocumentSource::GetNextResult nextInput = Document::fromBsonWithMetaData(extraInfo->getStartAfterInvalidateEvent()); _resumeStatus = @@ -158,12 +168,12 @@ Value DocumentSourceChangeStreamEnsureResumeTokenPresent::serialize( if (opts.verbosity) { BSONObjBuilder sub(builder.subobjStart(DocumentSourceChangeStream::kStageName)); sub.append("stage"_sd, kStageName); - opts.serializeLiteralValue(ResumeToken(_tokenFromClient).toDocument()) + opts.serializeLiteral(ResumeToken(_tokenFromClient).toDocument()) .addToBsonObj(&sub, "resumeToken"_sd); sub.done(); } else { BSONObjBuilder sub(builder.subobjStart(kStageName)); - opts.serializeLiteralValue(ResumeToken(_tokenFromClient).toDocument()) + opts.serializeLiteral(ResumeToken(_tokenFromClient).toDocument()) .addToBsonObj(&sub, "resumeToken"_sd); sub.done(); } diff --git a/src/mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.h b/src/mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.h index dfdbff78379b1..728167bb7760f 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.h +++ b/src/mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.h @@ -29,7 +29,19 @@ #pragma once +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream_check_resumability.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/resume_token.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { /** diff --git a/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp b/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp index 28b695c89be06..887dbdc13a239 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.cpp @@ -29,17 +29,41 @@ #include "mongo/db/pipeline/document_source_change_stream_handle_topology_change.h" -#include - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsontypes.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/pipeline/change_stream_topology_change_info.h" #include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/resume_token.h" #include "mongo/db/pipeline/sharded_agg_helpers.h" -#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/platform/compiler.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/query/establish_cursors.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -210,9 +234,14 @@ BSONObj DocumentSourceChangeStreamHandleTopologyChange::createUpdatedCommandForN auto* opCtx = pExpCtx->opCtx; bool apiStrict = APIParameters::get(opCtx).getAPIStrict().value_or(false); + tassert(7663502, + str::stream() << "SerializationContext on the expCtx should not be empty, with ns: " + << pExpCtx->ns.ns(), + pExpCtx->serializationCtxt != SerializationContext::stateDefault()); + // Create the 'AggregateCommandRequest' object which will help in creating the parsed pipeline. auto aggCmdRequest = aggregation_request_helper::parseFromBSON( - opCtx, pExpCtx->ns, shardCommand, boost::none, apiStrict); + opCtx, pExpCtx->ns, shardCommand, boost::none, apiStrict, pExpCtx->serializationCtxt); // Parse and optimize the pipeline. auto pipeline = Pipeline::parse(aggCmdRequest.getPipeline(), pExpCtx); @@ -251,7 +280,7 @@ BSONObj DocumentSourceChangeStreamHandleTopologyChange::replaceResumeTokenInComm pipeline[0] = Value(Document{{DocumentSourceChangeStream::kStageName, changeStreamStage.freeze()}}); MutableDocument newCmd(std::move(originalCmd)); - newCmd[AggregateCommandRequest::kPipelineFieldName] = Value(pipeline); + newCmd[AggregateCommandRequest::kPipelineFieldName] = Value(std::move(pipeline)); return newCmd.freeze().toBson(); } diff --git a/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.h b/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.h index afa6e16a704d2..ec0c0c1858fae 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.h +++ b/src/mongo/db/pipeline/document_source_change_stream_handle_topology_change.h @@ -29,13 +29,31 @@ #pragma once +#include +#include +#include #include - +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/change_stream_constants.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/db/shard_id.h" +#include "mongo/s/query/async_results_merger_params_gen.h" #include "mongo/s/query/document_source_merge_cursors.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_change_stream_oplog_match.cpp b/src/mongo/db/pipeline/document_source_change_stream_oplog_match.cpp index c824afbed4df8..74d424be72e4f 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_oplog_match.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream_oplog_match.cpp @@ -29,10 +29,29 @@ #include "mongo/db/pipeline/document_source_change_stream_oplog_match.h" -#include "mongo/bson/bsonmisc.h" +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/change_stream_filter_helpers.h" #include "mongo/db/pipeline/change_stream_helpers.h" -#include "mongo/db/pipeline/document_source_change_stream_unwind_transaction.h" +#include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/resume_token.h" +#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/idl/idl_parser.h" namespace mongo { @@ -215,7 +234,8 @@ Value DocumentSourceChangeStreamOplogMatch::serialize(SerializationOptions opts) sub.done(); } else { BSONObjBuilder sub(builder.subobjStart(kStageName)); - if (opts.replacementForLiteralArgs || opts.redactIdentifiers) { + if (opts.literalPolicy != LiteralSerializationPolicy::kUnchanged || + opts.transformIdentifiers) { sub.append(DocumentSourceChangeStreamOplogMatchSpec::kFilterFieldName, getMatchExpression()->serialize(opts)); } else { diff --git a/src/mongo/db/pipeline/document_source_change_stream_oplog_match.h b/src/mongo/db/pipeline/document_source_change_stream_oplog_match.h index a5a65d4cb25e5..c2e904bbb4f86 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_oplog_match.h +++ b/src/mongo/db/pipeline/document_source_change_stream_oplog_match.h @@ -29,7 +29,27 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" +#include "mongo/db/pipeline/document_source_match.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/query/tailable_mode_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { /** diff --git a/src/mongo/db/pipeline/document_source_change_stream_split_large_event.cpp b/src/mongo/db/pipeline/document_source_change_stream_split_large_event.cpp index 7c997eae6083b..4e75987080b85 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_split_large_event.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream_split_large_event.cpp @@ -29,10 +29,32 @@ #include "mongo/db/pipeline/document_source_change_stream_split_large_event.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/pipeline/change_stream_helpers.h" #include "mongo/db/pipeline/change_stream_split_event_helpers.h" +#include "mongo/db/pipeline/document_source_change_stream_check_resumability.h" #include "mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.h" #include "mongo/db/pipeline/document_source_change_stream_handle_topology_change.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_change_stream_split_large_event.h b/src/mongo/db/pipeline/document_source_change_stream_split_large_event.h index 2cae822038af4..66ff28d8386a6 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_split_large_event.h +++ b/src/mongo/db/pipeline/document_source_change_stream_split_large_event.h @@ -29,9 +29,28 @@ #pragma once +#include #include - +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/resume_token.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_change_stream_test.cpp b/src/mongo/db/pipeline/document_source_change_stream_test.cpp index 278d8760d9a8e..4feaa8d756081 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_test.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream_test.cpp @@ -27,25 +27,41 @@ * it in the license file. */ -#include "mongo/bson/bsontypes.h" -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include #include +#include #include +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" -#include "mongo/db/bson/bson_helper.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_mock.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/exec/document_value/value_comparator.h" -#include "mongo/db/matcher/schema/expression_internal_schema_object_match.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/matcher/matcher.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" -#include "mongo/db/pipeline/change_stream_rewrite_helpers.h" #include "mongo/db/pipeline/change_stream_test_helpers.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream.h" @@ -60,20 +76,32 @@ #include "mongo/db/pipeline/document_source_change_stream_split_large_event.h" #include "mongo/db/pipeline/document_source_change_stream_transform.h" #include "mongo/db/pipeline/document_source_change_stream_unwind_transaction.h" -#include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_mock.h" -#include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" +#include "mongo/db/pipeline/resume_token.h" #include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/s/resharding/resharding_change_event_o2_field_gen.h" #include "mongo/db/s/resharding/resharding_util.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/transaction_history_iterator.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" namespace mongo { @@ -372,7 +400,7 @@ class ChangeStreamStageTest : public ChangeStreamStageTestNoSetup { {DSChangeStream::kClusterTimeField, ts}, {DSChangeStream::kCollectionUuidField, expandedEvents ? V{testUuid()} : Value()}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, V{documentKey}}, {"updateDescription", upateMod}, }; @@ -496,8 +524,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAtOperationTimeAndResumeAfter Lock::GlobalWrite lk(expCtx->opCtx); std::shared_ptr collection = std::make_shared(nss); CollectionCatalog::write(expCtx->opCtx, [&](CollectionCatalog& catalog) { - catalog.registerCollection( - expCtx->opCtx, testUuid(), std::move(collection), /*ts=*/boost::none); + catalog.registerCollection(expCtx->opCtx, std::move(collection), /*ts=*/boost::none); }); } @@ -523,8 +550,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAfterAndResumeAfterOptions) { Lock::GlobalWrite lk(opCtx); std::shared_ptr collection = std::make_shared(nss); CollectionCatalog::write(opCtx, [&](CollectionCatalog& catalog) { - catalog.registerCollection( - opCtx, testUuid(), std::move(collection), /*ts=*/boost::none); + catalog.registerCollection(opCtx, std::move(collection), /*ts=*/boost::none); }); } @@ -555,8 +581,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectBothStartAtOperationTimeAndStartAfterO Lock::GlobalWrite lk(opCtx); std::shared_ptr collection = std::make_shared(nss); CollectionCatalog::write(opCtx, [&](CollectionCatalog& catalog) { - catalog.registerCollection( - opCtx, testUuid(), std::move(collection), /*ts=*/boost::none); + catalog.registerCollection(opCtx, std::move(collection), /*ts=*/boost::none); }); } @@ -582,8 +607,7 @@ TEST_F(ChangeStreamStageTest, ShouldRejectResumeAfterWithResumeTokenMissingUUID) Lock::GlobalWrite lk(opCtx); std::shared_ptr collection = std::make_shared(nss); CollectionCatalog::write(opCtx, [&](CollectionCatalog& catalog) { - catalog.registerCollection( - opCtx, testUuid(), std::move(collection), /*ts=*/boost::none); + catalog.registerCollection(opCtx, std::move(collection), /*ts=*/boost::none); }); } @@ -667,7 +691,7 @@ TEST_F(ChangeStreamStageTest, TransformInsertDocKeyXAndId) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 1}, {"x", 2}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"x", 2}, {"_id", 1}}}, // Note _id <-> x reversal. }; checkTransformation(insert, expectedInsert); @@ -697,7 +721,7 @@ TEST_F(ChangeStreamStageTest, TransformInsertDocKeyIdAndX) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"x", 2}, {"_id", 1}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}}, // _id first }; checkTransformation(insert, expectedInsert); @@ -718,7 +742,7 @@ TEST_F(ChangeStreamStageTest, TransformInsertDocKeyJustId) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 1}, {"x", 2}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 1}}}, }; checkTransformation(insert, expectedInsert); @@ -754,7 +778,7 @@ TEST_F(ChangeStreamStageTest, TransformInsertFromMigrateShowMigrations) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"x", 2}, {"_id", 1}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}}, // _id first }; checkTransformation(insert, expectedInsert, spec); @@ -988,7 +1012,7 @@ TEST_F(ChangeStreamStageTest, TransformReplace) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 1}, {"x", 2}, {"y", 1}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}}, }; checkTransformation(replace, expectedReplace); @@ -1013,7 +1037,7 @@ TEST_F(ChangeStreamStageTest, TransformReplaceShowExpandedEvents) { {DSChangeStream::kCollectionUuidField, testUuid()}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 1}, {"x", 2}, {"y", 1}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}}, }; checkTransformation(replace, expectedReplace, kShowExpandedEventsSpec); @@ -1035,7 +1059,7 @@ TEST_F(ChangeStreamStageTest, TransformDelete) { {DSChangeStream::kOperationTypeField, DSChangeStream::kDeleteOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}}, }; checkTransformation(deleteEntry, expectedDelete); @@ -1068,7 +1092,7 @@ TEST_F(ChangeStreamStageTest, TransformDeleteShowExpandedEvents) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kCollectionUuidField, testUuid()}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}}, }; checkTransformation(deleteEntry, expectedDelete, kShowExpandedEventsSpec); @@ -1113,7 +1137,7 @@ TEST_F(ChangeStreamStageTest, TransformDeleteFromMigrateShowMigrations) { {DSChangeStream::kOperationTypeField, DSChangeStream::kDeleteOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 1}}}, }; @@ -1129,7 +1153,7 @@ TEST_F(ChangeStreamStageTest, TransformDrop) { {DSChangeStream::kOperationTypeField, DSChangeStream::kDropCollectionOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, }; Document expectedInvalidate{ {DSChangeStream::kIdField, @@ -1156,7 +1180,7 @@ TEST_F(ChangeStreamStageTest, TransformDropShowExpandedEvents) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kCollectionUuidField, testUuid()}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, }; Document expectedInvalidate{ @@ -1190,7 +1214,7 @@ TEST_F(ChangeStreamStageTest, TransformCreate) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kCollectionUuidField, testUuid()}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kOperationDescriptionField, Value(expectedOpDescription)}, }; @@ -1199,19 +1223,20 @@ TEST_F(ChangeStreamStageTest, TransformCreate) { TEST_F(ChangeStreamStageTest, TransformRename) { NamespaceString otherColl = NamespaceString::createNamespaceString_forTest("test.bar"); - OplogEntry rename = - createCommand(BSON("renameCollection" << nss.ns() << "to" << otherColl.ns()), testUuid()); + OplogEntry rename = createCommand( + BSON("renameCollection" << nss.ns_forTest() << "to" << otherColl.ns_forTest()), testUuid()); - const auto opDesc = Value(D{{"to", D{{"db", otherColl.db()}, {"coll", otherColl.coll()}}}}); + const auto opDesc = + Value(D{{"to", D{{"db", otherColl.db_forTest()}, {"coll", otherColl.coll()}}}}); Document expectedRename{ {DSChangeStream::kRenameTargetNssField, - D{{"db", otherColl.db()}, {"coll", otherColl.coll()}}}, + D{{"db", otherColl.db_forTest()}, {"coll", otherColl.coll()}}}, {DSChangeStream::kIdField, makeResumeToken(kDefaultTs, testUuid(), opDesc, DSChangeStream::kRenameCollectionOpType)}, {DSChangeStream::kOperationTypeField, DSChangeStream::kRenameCollectionOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, }; Document expectedInvalidate{ {DSChangeStream::kIdField, @@ -1231,24 +1256,25 @@ TEST_F(ChangeStreamStageTest, TransformRename) { TEST_F(ChangeStreamStageTest, TransformRenameShowExpandedEvents) { NamespaceString otherColl = NamespaceString::createNamespaceString_forTest("test.bar"); auto dropTarget = UUID::gen(); - OplogEntry rename = createCommand(BSON("renameCollection" << nss.ns() << "to" << otherColl.ns() - << "dropTarget" << dropTarget), - testUuid()); + OplogEntry rename = + createCommand(BSON("renameCollection" << nss.ns_forTest() << "to" << otherColl.ns_forTest() + << "dropTarget" << dropTarget), + testUuid()); const auto opDesc = V{ - D{{"to", D{{"db", otherColl.db()}, {"coll", otherColl.coll()}}}, + D{{"to", D{{"db", otherColl.db_forTest()}, {"coll", otherColl.coll()}}}, {"dropTarget", dropTarget}}, }; Document expectedRename{ {DSChangeStream::kRenameTargetNssField, - D{{"db", otherColl.db()}, {"coll", otherColl.coll()}}}, + D{{"db", otherColl.db_forTest()}, {"coll", otherColl.coll()}}}, {DSChangeStream::kIdField, makeResumeToken(kDefaultTs, testUuid(), opDesc, DSChangeStream::kRenameCollectionOpType)}, {DSChangeStream::kOperationTypeField, DSChangeStream::kRenameCollectionOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kCollectionUuidField, testUuid()}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kOperationDescriptionField, opDesc}, }; Document expectedInvalidate{ @@ -1275,10 +1301,10 @@ TEST_F(ChangeStreamStageTest, TransformInvalidateFromMigrate) { bool dropDBFromMigrate = true; OplogEntry dropDB = createCommand(BSON("dropDatabase" << 1), boost::none, dropDBFromMigrate); bool renameFromMigrate = true; - OplogEntry rename = - createCommand(BSON("renameCollection" << nss.ns() << "to" << otherColl.ns()), - boost::none, - renameFromMigrate); + OplogEntry rename = createCommand( + BSON("renameCollection" << nss.ns_forTest() << "to" << otherColl.ns_forTest()), + boost::none, + renameFromMigrate); for (auto& entry : {dropColl, dropDB, rename}) { checkTransformation(entry, boost::none); @@ -1287,18 +1313,19 @@ TEST_F(ChangeStreamStageTest, TransformInvalidateFromMigrate) { TEST_F(ChangeStreamStageTest, TransformRenameTarget) { NamespaceString otherColl = NamespaceString::createNamespaceString_forTest("test.bar"); - OplogEntry rename = - createCommand(BSON("renameCollection" << otherColl.ns() << "to" << nss.ns()), testUuid()); + OplogEntry rename = createCommand( + BSON("renameCollection" << otherColl.ns_forTest() << "to" << nss.ns_forTest()), testUuid()); - const auto opDesc = Value(D{{"to", D{{"db", nss.db()}, {"coll", nss.coll()}}}}); + const auto opDesc = Value(D{{"to", D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}}); Document expectedRename{ - {DSChangeStream::kRenameTargetNssField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kRenameTargetNssField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kIdField, makeResumeToken(kDefaultTs, testUuid(), opDesc, DSChangeStream::kRenameCollectionOpType)}, {DSChangeStream::kOperationTypeField, DSChangeStream::kRenameCollectionOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", otherColl.db()}, {"coll", otherColl.coll()}}}, + {DSChangeStream::kNamespaceField, + D{{"db", otherColl.db_forTest()}, {"coll", otherColl.coll()}}}, }; Document expectedInvalidate{ {DSChangeStream::kIdField, @@ -1335,7 +1362,7 @@ TEST_F(ChangeStreamStageTest, TransformNewShardDetectedLegacyFormat) { {DSChangeStream::kOperationTypeField, DSChangeStream::kNewShardDetectedOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, }; getExpCtx()->needsMerge = true; @@ -1344,7 +1371,7 @@ TEST_F(ChangeStreamStageTest, TransformNewShardDetectedLegacyFormat) { } TEST_F(ChangeStreamStageTest, TransformNewShardDetected) { - auto o2Field = D{{"migrateChunkToNewShard", nss.toString()}, + auto o2Field = D{{"migrateChunkToNewShard", nss.toString_forTest()}, {"fromShardId", "fromShard"_sd}, {"toShardId", "toShard"_sd}}; auto newShardDetected = makeOplogEntry(OpTypeEnum::kNoop, @@ -1362,7 +1389,7 @@ TEST_F(ChangeStreamStageTest, TransformNewShardDetected) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kCollectionUuidField, testUuid()}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kOperationDescriptionField, opDesc}, }; @@ -1396,7 +1423,7 @@ TEST_F(ChangeStreamStageTest, TransformReshardBeginLegacyFormat) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kCollectionUuidField, uuid}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kOperationDescriptionField, opDesc}, }; checkTransformation(reshardingBegin, expectedReshardingBegin, spec); @@ -1425,7 +1452,7 @@ TEST_F(ChangeStreamStageTest, TransformReshardBegin) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kCollectionUuidField, uuid}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kOperationDescriptionField, opDesc}, }; checkTransformation(reshardingBegin, expectedReshardingBegin, spec); @@ -1434,7 +1461,7 @@ TEST_F(ChangeStreamStageTest, TransformReshardBegin) { TEST_F(ChangeStreamStageTest, TransformReshardDoneCatchUpLegacyFormat) { auto existingUuid = UUID::gen(); auto reshardingUuid = UUID::gen(); - auto temporaryNs = resharding::constructTemporaryReshardingNss(nss.db(), existingUuid); + auto temporaryNs = resharding::constructTemporaryReshardingNss(nss.db_forTest(), existingUuid); const auto o2FieldInLegacyFormat = BSON("type" << "reshardDoneCatchUp" @@ -1463,7 +1490,7 @@ TEST_F(ChangeStreamStageTest, TransformReshardDoneCatchUpLegacyFormat) { {DSChangeStream::kCollectionUuidField, reshardingUuid}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kNamespaceField, - D{{"db", temporaryNs.db()}, {"coll", temporaryNs.coll()}}}, + D{{"db", temporaryNs.db_forTest()}, {"coll", temporaryNs.coll()}}}, {DSChangeStream::kOperationDescriptionField, opDesc}, }; @@ -1473,7 +1500,7 @@ TEST_F(ChangeStreamStageTest, TransformReshardDoneCatchUpLegacyFormat) { TEST_F(ChangeStreamStageTest, TransformReshardDoneCatchUp) { auto existingUuid = UUID::gen(); auto reshardingUuid = UUID::gen(); - auto temporaryNs = resharding::constructTemporaryReshardingNss(nss.db(), existingUuid); + auto temporaryNs = resharding::constructTemporaryReshardingNss(nss.db_forTest(), existingUuid); ReshardDoneCatchUpChangeEventO2Field o2Field{temporaryNs, reshardingUuid}; auto reshardDoneCatchUp = makeOplogEntry(OpTypeEnum::kNoop, @@ -1500,7 +1527,7 @@ TEST_F(ChangeStreamStageTest, TransformReshardDoneCatchUp) { {DSChangeStream::kCollectionUuidField, reshardingUuid}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kNamespaceField, - D{{"db", temporaryNs.db()}, {"coll", temporaryNs.coll()}}}, + D{{"db", temporaryNs.db_forTest()}, {"coll", temporaryNs.coll()}}}, {DSChangeStream::kOperationDescriptionField, opDesc}, }; @@ -1522,7 +1549,7 @@ DEATH_TEST_F(ChangeStreamStageTest, ShouldCrashWithNoopInsideApplyOps, "Unexpect Document{{"applyOps", Value{std::vector{ Document{{"op", "n"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", Value{Document{{"_id", 123}, {"x", "hallo"_sd}}}}}}}}}; LogicalSessionFromClient lsid = testLsid(); @@ -1535,7 +1562,7 @@ DEATH_TEST_F(ChangeStreamStageTest, Document applyOpsDoc = Document{{"applyOps", Value{std::vector{ - Document{{"ns", nss.ns()}, + Document{{"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", Value{Document{{"_id", 123}, {"x", "hallo"_sd}}}}}}}}}; LogicalSessionFromClient lsid = testLsid(); @@ -1549,7 +1576,7 @@ DEATH_TEST_F(ChangeStreamStageTest, Document{{"applyOps", Value{std::vector{ Document{{"op", 2}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", Value{Document{{"_id", 123}, {"x", "hallo"_sd}}}}}}}}}; LogicalSessionFromClient lsid = testLsid(); @@ -1561,7 +1588,7 @@ TEST_F(ChangeStreamStageTest, TransformNonTxnNumberApplyOps) { Document{{"applyOps", Value{std::vector{ Document{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", Value{Document{{"_id", 123}, {"x", "hallo"_sd}}}}}}}}}; @@ -1586,15 +1613,15 @@ TEST_F(ChangeStreamStageTest, TransformNonTxnNumberBatchedDeleteApplyOps) { {"applyOps", Value{std::vector{ Document{{"op", "d"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", Value{Document{{"_id", 10}}}}}, Document{{"op", "d"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", Value{Document{{"_id", 11}}}}}, Document{{"op", "d"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", Value{Document{{"_id", 12}}}}}, }}}, @@ -1646,7 +1673,7 @@ TEST_F(ChangeStreamStageTest, PreparedTransactionApplyOpsEntriesAreIgnored) { Document{{"applyOps", Value{std::vector{ Document{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", Value{Document{{"_id", 123}, {"x", "hallo"_sd}}}}}}}}, {"prepare", true}}; @@ -1664,7 +1691,7 @@ TEST_F(ChangeStreamStageTest, CommitCommandReturnsOperationsFromPreparedTransact {"applyOps", Value{std::vector{ D{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", V{D{{"_id", 123}}}}, {"o2", V{D{}}}}, @@ -1718,7 +1745,7 @@ TEST_F(ChangeStreamStageTest, CommitCommandReturnsOperationsFromPreparedTransact {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 123}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{}}, }; @@ -1736,12 +1763,12 @@ TEST_F(ChangeStreamStageTest, TransactionWithMultipleOplogEntries) { {"applyOps", V{std::vector{ D{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", V{Document{{"_id", 123}}}}, {"o2", V{Document{{"_id", 123}}}}}, D{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", V{Document{{"_id", 456}}}}, {"o2", V{Document{{"_id", 456}}}}}, @@ -1764,7 +1791,7 @@ TEST_F(ChangeStreamStageTest, TransactionWithMultipleOplogEntries) { {"applyOps", V{std::vector{ D{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", V{D{{"_id", 789}}}}, {"o2", V{D{{"_id", 789}}}}}, @@ -1881,7 +1908,7 @@ TEST_F(ChangeStreamStageTest, TransactionWithEmptyOplogEntries) { {"applyOps", V{std::vector{ D{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", V{Document{{"_id", 123}}}}, {"o2", V{Document{{"_id", 123}}}}}, @@ -1919,7 +1946,7 @@ TEST_F(ChangeStreamStageTest, TransactionWithEmptyOplogEntries) { Document applyOps4{ {"applyOps", V{std::vector{D{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", V{Document{{"_id", 456}}}}, {"o2", V{Document{{"_id", 456}}}}}}}}, @@ -2073,12 +2100,12 @@ TEST_F(ChangeStreamStageTest, PreparedTransactionWithMultipleOplogEntries) { {"applyOps", V{std::vector{ D{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", V{D{{"_id", 123}}}}, {"o2", V{D{{"_id", 123}}}}}, D{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", V{D{{"_id", 456}}}}, {"o2", V{D{{"_id", 456}}}}}, @@ -2101,7 +2128,7 @@ TEST_F(ChangeStreamStageTest, PreparedTransactionWithMultipleOplogEntries) { {"applyOps", V{std::vector{ D{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", V{D{{"_id", 789}}}}, {"o2", V{D{{"_id", 789}}}}}, @@ -2224,12 +2251,12 @@ TEST_F(ChangeStreamStageTest, PreparedTransactionEndingWithEmptyApplyOps) { {"applyOps", V{std::vector{ D{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", V{D{{"_id", 123}}}}, {"o2", V{D{{"_id", 123}}}}}, D{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", V{D{{"_id", 456}}}}, {"o2", V{D{{"_id", 456}}}}}, @@ -2346,11 +2373,11 @@ TEST_F(ChangeStreamStageTest, TransformApplyOps) { {"applyOps", Value{std::vector{ Document{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", Value{Document{{"_id", 123}, {"x", "hallo"_sd}}}}}, Document{{"op", "u"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", Value{Document{ @@ -2401,17 +2428,17 @@ TEST_F(ChangeStreamStageTest, TransformApplyOpsWithCreateOperation) { {"applyOps", Value{std::vector{ Document{{"op", "c"_sd}, - {"ns", nss.db() + ".$cmd"}, + {"ns", nss.db_forTest() + ".$cmd"}, {"ui", testUuid()}, {"o", Value{Document{{"create", nss.coll()}, {"idIndex", idIndexDef}}}}, {"ts", Timestamp(0, 1)}}, Document{{"op", "i"_sd}, - {"ns", nss.ns()}, + {"ns", nss.ns_forTest()}, {"ui", testUuid()}, {"o", Value{Document{{"_id", 123}, {"x", "hallo"_sd}}}}}, Document{ {"op", "c"_sd}, - {"ns", nss.db() + ".$cmd"}, + {"ns", nss.db_forTest() + ".$cmd"}, {"ui", UUID::gen()}, // Operation on another collection which should be skipped. {"o", Value{Document{{"create", "otherCollection"_sd}, {"idIndex", idIndexDef}}}}}, @@ -2479,28 +2506,29 @@ TEST_F(ChangeStreamStageTest, ClusterTimeMatchesOplogEntry) { {DSChangeStream::kOperationTypeField, DSChangeStream::kDropCollectionOpType}, {DSChangeStream::kClusterTimeField, ts}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, }; checkTransformation(dropColl, expectedDrop); // Test the 'clusterTime' field is copied from the oplog entry for a collection rename. NamespaceString otherColl = NamespaceString::createNamespaceString_forTest("test.bar"); - OplogEntry rename = - createCommand(BSON("renameCollection" << nss.ns() << "to" << otherColl.ns()), - testUuid(), - boost::none, - opTime); + OplogEntry rename = createCommand( + BSON("renameCollection" << nss.ns_forTest() << "to" << otherColl.ns_forTest()), + testUuid(), + boost::none, + opTime); - const auto opDesc = Value(D{{"to", D{{"db", otherColl.db()}, {"coll", otherColl.coll()}}}}); + const auto opDesc = + Value(D{{"to", D{{"db", otherColl.db_forTest()}, {"coll", otherColl.coll()}}}}); Document expectedRename{ {DSChangeStream::kRenameTargetNssField, - D{{"db", otherColl.db()}, {"coll", otherColl.coll()}}}, + D{{"db", otherColl.db_forTest()}, {"coll", otherColl.coll()}}}, {DSChangeStream::kIdField, makeResumeToken(ts, testUuid(), opDesc, DSChangeStream::kRenameCollectionOpType)}, {DSChangeStream::kOperationTypeField, DSChangeStream::kRenameCollectionOpType}, {DSChangeStream::kClusterTimeField, ts}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, }; checkTransformation(rename, expectedRename); } @@ -2722,7 +2750,7 @@ TEST_F(ChangeStreamStageTest, CloseCursorOnInvalidateEntries) { {DSChangeStream::kOperationTypeField, DSChangeStream::kDropCollectionOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, }; Document expectedInvalidate{ {DSChangeStream::kIdField, @@ -2767,10 +2795,10 @@ TEST_F(ChangeStreamStageTest, DocumentKeyShouldNotIncludeShardKeyWhenNoO2FieldIn { Lock::GlobalWrite lk(getExpCtx()->opCtx); - std::shared_ptr collection = std::make_shared(nss); + std::shared_ptr collection = std::make_shared(uuid, nss); CollectionCatalog::write(getExpCtx()->opCtx, [&](CollectionCatalog& catalog) { catalog.registerCollection( - getExpCtx()->opCtx, uuid, std::move(collection), /*ts=*/boost::none); + getExpCtx()->opCtx, std::move(collection), /*ts=*/boost::none); }); } @@ -2795,7 +2823,7 @@ TEST_F(ChangeStreamStageTest, DocumentKeyShouldNotIncludeShardKeyWhenNoO2FieldIn {DSChangeStream::kClusterTimeField, ts}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 2}, {"shardKey", 3}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 2}}}, }; checkTransformation( @@ -2814,10 +2842,10 @@ TEST_F(ChangeStreamStageTest, DocumentKeyShouldUseO2FieldInOplog) { { Lock::GlobalWrite lk(getExpCtx()->opCtx); - std::shared_ptr collection = std::make_shared(nss); + std::shared_ptr collection = std::make_shared(uuid, nss); CollectionCatalog::write(getExpCtx()->opCtx, [&](CollectionCatalog& catalog) { catalog.registerCollection( - getExpCtx()->opCtx, uuid, std::move(collection), /*ts=*/boost::none); + getExpCtx()->opCtx, std::move(collection), /*ts=*/boost::none); }); } @@ -2842,7 +2870,7 @@ TEST_F(ChangeStreamStageTest, DocumentKeyShouldUseO2FieldInOplog) { {DSChangeStream::kClusterTimeField, ts}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 2}, {"shardKey", 3}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 2}, {"shardKey", 3}}}, }; // When o2 is present in the oplog entry, we should use its value for the document key, even if @@ -2857,14 +2885,13 @@ TEST_F(ChangeStreamStageTest, DocumentKeyShouldUseO2FieldInOplog) { TEST_F(ChangeStreamStageTest, ResumeAfterFailsIfResumeTokenDoesNotContainUUID) { const Timestamp ts(3, 45); - const auto uuid = testUuid(); { Lock::GlobalWrite lk(getExpCtx()->opCtx); std::shared_ptr collection = std::make_shared(nss); CollectionCatalog::write(getExpCtx()->opCtx, [&](CollectionCatalog& catalog) { catalog.registerCollection( - getExpCtx()->opCtx, uuid, std::move(collection), /*ts=*/boost::none); + getExpCtx()->opCtx, std::move(collection), /*ts=*/boost::none); }); } @@ -2883,19 +2910,21 @@ TEST_F(ChangeStreamStageTest, RenameFromSystemToUserCollectionShouldIncludeNotif // Renaming to a non-system collection will include a notification in the stream. NamespaceString systemColl = NamespaceString::createNamespaceString_forTest(nss.dbName(), "system.users"); - OplogEntry rename = - createCommand(BSON("renameCollection" << systemColl.ns() << "to" << nss.ns()), testUuid()); + OplogEntry rename = createCommand( + BSON("renameCollection" << systemColl.ns_forTest() << "to" << nss.ns_forTest()), + testUuid()); // Note that the collection rename does *not* have the queued invalidated field. - const auto opDesc = Value(D{{"to", D{{"db", nss.db()}, {"coll", nss.coll()}}}}); + const auto opDesc = Value(D{{"to", D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}}); Document expectedRename{ - {DSChangeStream::kRenameTargetNssField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kRenameTargetNssField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kIdField, makeResumeToken(kDefaultTs, testUuid(), opDesc, DSChangeStream::kRenameCollectionOpType)}, {DSChangeStream::kOperationTypeField, DSChangeStream::kRenameCollectionOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", systemColl.db()}, {"coll", systemColl.coll()}}}, + {DSChangeStream::kNamespaceField, + D{{"db", systemColl.db_forTest()}, {"coll", systemColl.coll()}}}, }; checkTransformation(rename, expectedRename); } @@ -2904,20 +2933,22 @@ TEST_F(ChangeStreamStageTest, RenameFromUserToSystemCollectionShouldIncludeNotif // Renaming to a system collection will include a notification in the stream. NamespaceString systemColl = NamespaceString::createNamespaceString_forTest(nss.dbName(), "system.users"); - OplogEntry rename = - createCommand(BSON("renameCollection" << nss.ns() << "to" << systemColl.ns()), testUuid()); + OplogEntry rename = createCommand( + BSON("renameCollection" << nss.ns_forTest() << "to" << systemColl.ns_forTest()), + testUuid()); // Note that the collection rename does *not* have the queued invalidated field. - const auto opDesc = Value(D{{"to", D{{"db", systemColl.db()}, {"coll", systemColl.coll()}}}}); + const auto opDesc = + Value(D{{"to", D{{"db", systemColl.db_forTest()}, {"coll", systemColl.coll()}}}}); Document expectedRename{ {DSChangeStream::kRenameTargetNssField, - D{{"db", systemColl.db()}, {"coll", systemColl.coll()}}}, + D{{"db", systemColl.db_forTest()}, {"coll", systemColl.coll()}}}, {DSChangeStream::kIdField, makeResumeToken(kDefaultTs, testUuid(), opDesc, DSChangeStream::kRenameCollectionOpType)}, {DSChangeStream::kOperationTypeField, DSChangeStream::kRenameCollectionOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, }; checkTransformation(rename, expectedRename); } @@ -2931,7 +2962,7 @@ TEST_F(ChangeStreamStageTest, ResumeAfterWithTokenFromInvalidateShouldFail) { std::shared_ptr collection = std::make_shared(nss); CollectionCatalog::write(expCtx->opCtx, [&](CollectionCatalog& catalog) { catalog.registerCollection( - getExpCtx()->opCtx, testUuid(), std::move(collection), /*ts=*/boost::none); + getExpCtx()->opCtx, std::move(collection), /*ts=*/boost::none); }); } @@ -2980,8 +3011,7 @@ TEST_F(ChangeStreamStageTest, UsesResumeTokenAsSortKeyIfNeedsMergeIsFalse) { class ChangeStreamStageDBTest : public ChangeStreamStageTest { public: ChangeStreamStageDBTest() - : ChangeStreamStageTest(NamespaceString::makeCollectionlessAggregateNSS( - DatabaseName(boost::none, nss.db()))) {} + : ChangeStreamStageTest(NamespaceString::makeCollectionlessAggregateNSS(nss.dbName())) {} }; TEST_F(ChangeStreamStageDBTest, TransformInsert) { @@ -3000,7 +3030,7 @@ TEST_F(ChangeStreamStageDBTest, TransformInsert) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 1}, {"x", 2}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"x", 2}, {"_id", 1}}}, // Note _id <-> x reversal. }; checkTransformation(insert, expectedInsert); @@ -3023,7 +3053,7 @@ TEST_F(ChangeStreamStageDBTest, TransformInsertShowExpandedEvents) { {DSChangeStream::kCollectionUuidField, testUuid()}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 1}, {"x", 2}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"x", 2}, {"_id", 1}}}, // Note _id <-> x reversal. }; checkTransformation(insert, expectedInsert, kShowExpandedEventsSpec); @@ -3048,7 +3078,8 @@ TEST_F(ChangeStreamStageDBTest, InsertOnOtherCollections) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 1}, {"x", 2}}}, - {DSChangeStream::kNamespaceField, D{{"db", otherNss.db()}, {"coll", otherNss.coll()}}}, + {DSChangeStream::kNamespaceField, + D{{"db", otherNss.db_forTest()}, {"coll", otherNss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"x", 2}, {"_id", 1}}}, // Note _id <-> x reversal. }; checkTransformation(insertOtherColl, expectedInsert); @@ -3109,7 +3140,7 @@ TEST_F(ChangeStreamStageDBTest, TransformsEntriesForLegalClientCollectionsWithSy {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 1}}}, - {DSChangeStream::kNamespaceField, D{{"db", ns.db()}, {"coll", ns.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", ns.db_forTest()}, {"coll", ns.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 1}}}, }; checkTransformation(insert, expectedInsert); @@ -3184,7 +3215,7 @@ TEST_F(ChangeStreamStageDBTest, TransformReplace) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 1}, {"x", 2}, {"y", 1}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}}, }; checkTransformation(replace, expectedReplace); @@ -3206,7 +3237,7 @@ TEST_F(ChangeStreamStageDBTest, TransformDelete) { {DSChangeStream::kOperationTypeField, DSChangeStream::kDeleteOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}}, }; checkTransformation(deleteEntry, expectedDelete); @@ -3252,7 +3283,7 @@ TEST_F(ChangeStreamStageDBTest, TransformDeleteFromMigrateShowMigrations) { {DSChangeStream::kOperationTypeField, DSChangeStream::kDeleteOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 1}, {"x", 2}}}, }; @@ -3267,26 +3298,27 @@ TEST_F(ChangeStreamStageDBTest, TransformDrop) { {DSChangeStream::kOperationTypeField, DSChangeStream::kDropCollectionOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, }; checkTransformation(dropColl, expectedDrop); } TEST_F(ChangeStreamStageDBTest, TransformRename) { NamespaceString otherColl = NamespaceString::createNamespaceString_forTest("test.bar"); - OplogEntry rename = - createCommand(BSON("renameCollection" << nss.ns() << "to" << otherColl.ns()), testUuid()); + OplogEntry rename = createCommand( + BSON("renameCollection" << nss.ns_forTest() << "to" << otherColl.ns_forTest()), testUuid()); - const auto opDesc = Value(D{{"to", D{{"db", otherColl.db()}, {"coll", otherColl.coll()}}}}); + const auto opDesc = + Value(D{{"to", D{{"db", otherColl.db_forTest()}, {"coll", otherColl.coll()}}}}); Document expectedRename{ {DSChangeStream::kRenameTargetNssField, - D{{"db", otherColl.db()}, {"coll", otherColl.coll()}}}, + D{{"db", otherColl.db_forTest()}, {"coll", otherColl.coll()}}}, {DSChangeStream::kIdField, makeResumeToken(kDefaultTs, testUuid(), opDesc, DSChangeStream::kRenameCollectionOpType)}, {DSChangeStream::kOperationTypeField, DSChangeStream::kRenameCollectionOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, }; checkTransformation(rename, expectedRename); } @@ -3301,7 +3333,7 @@ TEST_F(ChangeStreamStageDBTest, TransformDropDatabase) { {DSChangeStream::kOperationTypeField, DSChangeStream::kDropDatabaseOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}}}, }; Document expectedInvalidate{ {DSChangeStream::kIdField, @@ -3328,7 +3360,7 @@ TEST_F(ChangeStreamStageDBTest, TransformDropDatabaseShowExpandedEvents) { {DSChangeStream::kOperationTypeField, DSChangeStream::kDropDatabaseOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}}}, }; Document expectedInvalidate{ {DSChangeStream::kIdField, @@ -3357,8 +3389,9 @@ TEST_F(ChangeStreamStageDBTest, MatchFiltersOperationsOnSystemCollections) { // Rename from a 'system' collection to another 'system' collection should not include a // notification. NamespaceString renamedSystemColl(NamespaceString::makeSystemDotViewsNamespace(nss.dbName())); - OplogEntry rename = createCommand( - BSON("renameCollection" << systemColl.ns() << "to" << renamedSystemColl.ns()), testUuid()); + OplogEntry rename = createCommand(BSON("renameCollection" << systemColl.ns_forTest() << "to" + << renamedSystemColl.ns_forTest()), + testUuid()); checkTransformation(rename, boost::none); } @@ -3369,19 +3402,22 @@ TEST_F(ChangeStreamStageDBTest, RenameFromSystemToUserCollectionShouldIncludeNot NamespaceString renamedColl = NamespaceString::createNamespaceString_forTest(nss.dbName(), "non_system_coll"); OplogEntry rename = createCommand( - BSON("renameCollection" << systemColl.ns() << "to" << renamedColl.ns()), testUuid()); + BSON("renameCollection" << systemColl.ns_forTest() << "to" << renamedColl.ns_forTest()), + testUuid()); // Note that the collection rename does *not* have the queued invalidated field. - const auto opDesc = Value(D{{"to", D{{"db", renamedColl.db()}, {"coll", renamedColl.coll()}}}}); + const auto opDesc = + Value(D{{"to", D{{"db", renamedColl.db_forTest()}, {"coll", renamedColl.coll()}}}}); Document expectedRename{ {DSChangeStream::kRenameTargetNssField, - D{{"db", renamedColl.db()}, {"coll", renamedColl.coll()}}}, + D{{"db", renamedColl.db_forTest()}, {"coll", renamedColl.coll()}}}, {DSChangeStream::kIdField, makeResumeToken(kDefaultTs, testUuid(), opDesc, DSChangeStream::kRenameCollectionOpType)}, {DSChangeStream::kOperationTypeField, DSChangeStream::kRenameCollectionOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", systemColl.db()}, {"coll", systemColl.coll()}}}, + {DSChangeStream::kNamespaceField, + D{{"db", systemColl.db_forTest()}, {"coll", systemColl.coll()}}}, }; checkTransformation(rename, expectedRename); } @@ -3390,20 +3426,22 @@ TEST_F(ChangeStreamStageDBTest, RenameFromUserToSystemCollectionShouldIncludeNot // Renaming to a system collection will include a notification in the stream. NamespaceString systemColl = NamespaceString::createNamespaceString_forTest(nss.dbName(), "system.users"); - OplogEntry rename = - createCommand(BSON("renameCollection" << nss.ns() << "to" << systemColl.ns()), testUuid()); + OplogEntry rename = createCommand( + BSON("renameCollection" << nss.ns_forTest() << "to" << systemColl.ns_forTest()), + testUuid()); // Note that the collection rename does *not* have the queued invalidated field. - const auto opDesc = Value(D{{"to", D{{"db", systemColl.db()}, {"coll", systemColl.coll()}}}}); + const auto opDesc = + Value(D{{"to", D{{"db", systemColl.db_forTest()}, {"coll", systemColl.coll()}}}}); Document expectedRename{ {DSChangeStream::kRenameTargetNssField, - D{{"db", systemColl.db()}, {"coll", systemColl.coll()}}}, + D{{"db", systemColl.db_forTest()}, {"coll", systemColl.coll()}}}, {DSChangeStream::kIdField, makeResumeToken(kDefaultTs, testUuid(), opDesc, DSChangeStream::kRenameCollectionOpType)}, {DSChangeStream::kOperationTypeField, DSChangeStream::kRenameCollectionOpType}, {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, }; checkTransformation(rename, expectedRename); } @@ -3424,10 +3462,10 @@ TEST_F(ChangeStreamStageDBTest, DocumentKeyShouldNotIncludeShardKeyWhenNoO2Field { Lock::GlobalWrite lk(getExpCtx()->opCtx); - std::shared_ptr collection = std::make_shared(nss); + std::shared_ptr collection = std::make_shared(uuid, nss); CollectionCatalog::write(getExpCtx()->opCtx, [&](CollectionCatalog& catalog) { catalog.registerCollection( - getExpCtx()->opCtx, uuid, std::move(collection), /*ts=*/boost::none); + getExpCtx()->opCtx, std::move(collection), /*ts=*/boost::none); }); } @@ -3451,7 +3489,7 @@ TEST_F(ChangeStreamStageDBTest, DocumentKeyShouldNotIncludeShardKeyWhenNoO2Field {DSChangeStream::kClusterTimeField, ts}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 2}, {"shardKey", 3}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 2}}}, }; checkTransformation( @@ -3466,10 +3504,10 @@ TEST_F(ChangeStreamStageDBTest, DocumentKeyShouldUseO2FieldInOplog) { { Lock::GlobalWrite lk(getExpCtx()->opCtx); - std::shared_ptr collection = std::make_shared(nss); + std::shared_ptr collection = std::make_shared(uuid, nss); CollectionCatalog::write(getExpCtx()->opCtx, [&](CollectionCatalog& catalog) { catalog.registerCollection( - getExpCtx()->opCtx, uuid, std::move(collection), /*ts=*/boost::none); + getExpCtx()->opCtx, std::move(collection), /*ts=*/boost::none); }); } @@ -3493,7 +3531,7 @@ TEST_F(ChangeStreamStageDBTest, DocumentKeyShouldUseO2FieldInOplog) { {DSChangeStream::kClusterTimeField, ts}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 2}, {"shardKey", 3}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 2}, {"shardKey", 3}}}, }; // When o2 is present in the oplog entry, we should use its value for the document key, even if @@ -3511,7 +3549,7 @@ TEST_F(ChangeStreamStageDBTest, ResumeAfterWithTokenFromInvalidateShouldFail) { std::shared_ptr collection = std::make_shared(nss); CollectionCatalog::write(expCtx->opCtx, [&](CollectionCatalog& catalog) { catalog.registerCollection( - getExpCtx()->opCtx, testUuid(), std::move(collection), /*ts=*/boost::none); + getExpCtx()->opCtx, std::move(collection), /*ts=*/boost::none); }); } @@ -3536,10 +3574,10 @@ TEST_F(ChangeStreamStageDBTest, ResumeAfterWithTokenFromDropDatabase) { { Lock::GlobalWrite lk(getExpCtx()->opCtx); - std::shared_ptr collection = std::make_shared(nss); + std::shared_ptr collection = std::make_shared(uuid, nss); CollectionCatalog::write(getExpCtx()->opCtx, [&](CollectionCatalog& catalog) { catalog.registerCollection( - getExpCtx()->opCtx, uuid, std::move(collection), /*ts=*/boost::none); + getExpCtx()->opCtx, std::move(collection), /*ts=*/boost::none); }); } @@ -3561,7 +3599,7 @@ TEST_F(ChangeStreamStageDBTest, ResumeAfterWithTokenFromDropDatabase) { {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 2}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 2}}}, }; checkTransformation( @@ -3574,10 +3612,10 @@ TEST_F(ChangeStreamStageDBTest, StartAfterSucceedsEvenIfResumeTokenDoesNotContai { Lock::GlobalWrite lk(getExpCtx()->opCtx); - std::shared_ptr collection = std::make_shared(nss); + std::shared_ptr collection = std::make_shared(uuid, nss); CollectionCatalog::write(getExpCtx()->opCtx, [&](CollectionCatalog& catalog) { catalog.registerCollection( - getExpCtx()->opCtx, uuid, std::move(collection), /*ts=*/boost::none); + getExpCtx()->opCtx, std::move(collection), /*ts=*/boost::none); }); } @@ -3596,7 +3634,7 @@ TEST_F(ChangeStreamStageDBTest, StartAfterSucceedsEvenIfResumeTokenDoesNotContai {DSChangeStream::kClusterTimeField, kDefaultTs}, {DSChangeStream::kWallTimeField, Date_t()}, {DSChangeStream::kFullDocumentField, D{{"_id", 2}}}, - {DSChangeStream::kNamespaceField, D{{"db", nss.db()}, {"coll", nss.coll()}}}, + {DSChangeStream::kNamespaceField, D{{"db", nss.db_forTest()}, {"coll", nss.coll()}}}, {DSChangeStream::kDocumentKeyField, D{{"_id", 2}}}, }; checkTransformation( @@ -4424,8 +4462,8 @@ TEST_F(MultiTokenFormatVersionTest, CanResumeFromV1HighWaterMark) { ResumeTokenData resumeToken = ResumeToken::makeHighWaterMarkToken(resumeTs, 2).getData(); resumeToken.version = 1; auto expCtx = getExpCtxRaw(); - expCtx->ns = - NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(boost::none, "unittests")); + expCtx->ns = NamespaceString::makeCollectionlessAggregateNSS( + DatabaseName::createDatabaseName_forTest(boost::none, "unittests")); // Create a change stream spec that resumes after 'resumeToken'. const auto spec = @@ -4484,7 +4522,7 @@ TEST_F(ChangeStreamStageTestNoSetup, RedactDocumentSourceChangeStreamAddPostImag R"({"$_internalChangeStreamAddPostImage":{"fullDocument":"updateLookup"}})", docSource->serialize(SerializationOptions{}).getDocument().toBson()); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({"$_internalChangeStreamAddPostImage":{"fullDocument":"?"}})", + R"({"$_internalChangeStreamAddPostImage":{"fullDocument":"updateLookup"}})", redact(*docSource)); } @@ -4500,7 +4538,7 @@ TEST_F(ChangeStreamStageTestNoSetup, RedactDocumentSourceChangeStreamAddPreImage })", docSource.serialize(SerializationOptions{}).getDocument().toBson()); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({"$_internalChangeStreamAddPreImage":{"fullDocumentBeforeChange":"?"}})", + R"({"$_internalChangeStreamAddPreImage":{"fullDocumentBeforeChange":"whenAvailable"}})", redact(docSource)); } @@ -4525,7 +4563,7 @@ TEST_F(ChangeStreamStageTestNoSetup, RedactDocumentSourceChangeStreamCheckInvali })", docSource->serialize(SerializationOptions{}).getDocument().toBson()); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({"$_internalChangeStreamCheckInvalidate":{"startAfterInvalidate":"?"}})", + R"({"$_internalChangeStreamCheckInvalidate":{"startAfterInvalidate":"?object"}})", redact(*docSource)); } @@ -4549,7 +4587,7 @@ TEST_F(ChangeStreamStageTestNoSetup, RedactDocumentSourceChangeStreamCheckResuma })", docSource->serialize(SerializationOptions{}).getDocument().toBson()); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({"$_internalChangeStreamCheckResumability":{"resumeToken":"?"}})", + R"({"$_internalChangeStreamCheckResumability":{"resumeToken":"?object"}})", redact(*docSource)); } @@ -4584,7 +4622,11 @@ TEST_F(ChangeStreamStageTestNoSetup, RedactDocumentSourceChangeStreamEnsureResum })", docSource->serialize(SerializationOptions{}).getDocument().toBson()); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({"$_internalChangeStreamEnsureResumeTokenPresent":{"resumeToken":"?"}})", + R"({ + "$_internalChangeStreamEnsureResumeTokenPresent": { + "resumeToken": "?object" + } + })", redact(*docSource)); } @@ -4641,9 +4683,9 @@ TEST_F(ChangeStreamStageTestNoSetup, RedactDocumentSourceChangeStreamTransform) ASSERT_BSONOBJ_EQ_AUTO( // NOLINT R"({ "$_internalChangeStreamTransform": { - "resumeAfter": "?", - "fullDocument": "?", - "fullDocumentBeforeChange": "?" + "resumeAfter": "?object", + "fullDocument": "default", + "fullDocumentBeforeChange": "off" } })", redact(*docSource)); diff --git a/src/mongo/db/pipeline/document_source_change_stream_transform.cpp b/src/mongo/db/pipeline/document_source_change_stream_transform.cpp index 805d95262b397..ca56f7be10b03 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_transform.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream_transform.cpp @@ -28,16 +28,25 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_change_stream_transform.h" - +#include +#include + +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/change_stream_helpers.h" -#include "mongo/db/pipeline/expression.h" -#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/document_source_change_stream_transform.h" #include "mongo/db/pipeline/resume_token.h" -#include "mongo/db/repl/bson_extract_optime.h" +#include "mongo/idl/idl_parser.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -113,7 +122,7 @@ void serializeSpecField(BSONObjBuilder* builder, const StringData& fieldName, const boost::optional& value) { if (value) { - opts.serializeLiteralValue((*value).toBSON()).addToBsonObj(builder, fieldName); + opts.serializeLiteral((*value).toBSON()).addToBsonObj(builder, fieldName); } } @@ -123,7 +132,7 @@ void serializeSpecField(BSONObjBuilder* builder, const StringData& fieldName, const boost::optional& value) { if (value) { - opts.serializeLiteralValue(*value).addToBsonObj(builder, fieldName); + opts.serializeLiteral(*value).addToBsonObj(builder, fieldName); } } @@ -132,7 +141,7 @@ void serializeSpecField(BSONObjBuilder* builder, SerializationOptions opts, const StringData& fieldName, const T& value) { - opts.serializeLiteralValue(value).addToBsonObj(builder, fieldName); + opts.appendLiteral(builder, fieldName, value); } template <> @@ -141,11 +150,7 @@ void serializeSpecField(BSONObjBuilder* builder, const StringData& fieldName, const mongo::OptionalBool& value) { if (value.has_value()) { - if (opts.replacementForLiteralArgs) { - builder->append(fieldName, *opts.replacementForLiteralArgs); - } else { - value.serializeToBSON(fieldName, builder); - } + opts.appendLiteral(builder, fieldName, value.value_or(true)); } } @@ -204,20 +209,14 @@ void serializeSpec(const DocumentSourceChangeStreamSpec& spec, } // namespace Value DocumentSourceChangeStreamTransform::serialize(SerializationOptions opts) const { - BSONObjBuilder builder; if (opts.verbosity) { - BSONObjBuilder sub(builder.subobjStart(DocumentSourceChangeStream::kStageName)); - sub.append("stage"_sd, kStageName); - BSONObjBuilder options(sub.subobjStart("options"_sd)); - serializeSpec(_changeStreamSpec, opts, &options); - options.done(); - sub.done(); - } else { - BSONObjBuilder sub(builder.subobjStart(kStageName)); - serializeSpec(_changeStreamSpec, opts, &sub); - sub.done(); + return Value(Document{{DocumentSourceChangeStream::kStageName, + Document{{"stage"_sd, "internalTransform"_sd}, + {"options"_sd, _changeStreamSpec.toBSON(opts)}}}}); } - return Value(builder.obj()); + + return Value(Document{ + {DocumentSourceChangeStreamTransform::kStageName, _changeStreamSpec.toBSON(opts)}}); } DepsTracker::State DocumentSourceChangeStreamTransform::getDependencies(DepsTracker* deps) const { diff --git a/src/mongo/db/pipeline/document_source_change_stream_transform.h b/src/mongo/db/pipeline/document_source_change_stream_transform.h index ee433d6cd50f9..0d9ae2d6fce50 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_transform.h +++ b/src/mongo/db/pipeline/document_source_change_stream_transform.h @@ -29,8 +29,25 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/change_stream_event_transform.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_change_stream_unwind_transaction.cpp b/src/mongo/db/pipeline/document_source_change_stream_unwind_transaction.cpp index 6ed09402bdf46..00a0fac2fab77 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_unwind_transaction.cpp +++ b/src/mongo/db/pipeline/document_source_change_stream_unwind_transaction.cpp @@ -28,14 +28,40 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_change_stream_unwind_transaction.h" - +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/expression_tree.h" #include "mongo/db/pipeline/change_stream_filter_helpers.h" #include "mongo/db/pipeline/change_stream_rewrite_helpers.h" +#include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" +#include "mongo/db/pipeline/document_source_change_stream_unwind_transaction.h" +#include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/transaction/transaction_history_iterator.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -141,7 +167,7 @@ Value DocumentSourceChangeStreamUnwindTransaction::serialize(SerializationOption } Value spec; - if (opts.replacementForLiteralArgs || opts.redactIdentifiers) { + if (opts.literalPolicy != LiteralSerializationPolicy::kUnchanged || opts.transformIdentifiers) { spec = Value(DOC(DocumentSourceChangeStreamUnwindTransactionSpec::kFilterFieldName << _expression->serialize(opts))); } else { diff --git a/src/mongo/db/pipeline/document_source_change_stream_unwind_transaction.h b/src/mongo/db/pipeline/document_source_change_stream_unwind_transaction.h index 18fd795c1f4a8..18cb74736ecb9 100644 --- a/src/mongo/db/pipeline/document_source_change_stream_unwind_transaction.h +++ b/src/mongo/db/pipeline/document_source_change_stream_unwind_transaction.h @@ -29,7 +29,39 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp b/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp index 0283aeb68f991..c477abc0c7704 100644 --- a/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp +++ b/src/mongo/db/pipeline/document_source_check_resume_token_test.cpp @@ -27,30 +27,69 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include #include - +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_mock.h" #include "mongo/db/exec/collection_scan.h" +#include "mongo/db/exec/collection_scan_common.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream_check_resumability.h" #include "mongo/db/pipeline/document_source_change_stream_ensure_resume_token_present.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/expression_context.h" -#include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/resume_token.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/db/service_context.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/devnull/devnull_kv_engine.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" using boost::intrusive_ptr; @@ -76,7 +115,9 @@ class ChangeStreamOplogCursorMock : public SeekableRecordCursor { } boost::optional seekExact(const RecordId& id) override { - return Record{}; + boost::optional result; + result.emplace(); + return result; } boost::optional seekNear(const RecordId& id) override { return boost::none; @@ -205,7 +246,7 @@ class DocumentSourceChangeStreamMock : public DocumentSourceMock { // If this is the first call to doGetNext, we must create the COLLSCAN. if (!_collScan) { _collScan = std::make_unique( - pExpCtx.get(), _collectionPtr, _params, &_ws, _filter.get()); + pExpCtx.get(), &_collectionPtr, _params, &_ws, _filter.get()); } while (true) { // If the next result is a pause, return it and don't collscan. diff --git a/src/mongo/db/pipeline/document_source_coll_stats.cpp b/src/mongo/db/pipeline/document_source_coll_stats.cpp index 6e8efcca9aa27..cf3498841bec8 100644 --- a/src/mongo/db/pipeline/document_source_coll_stats.cpp +++ b/src/mongo/db/pipeline/document_source_coll_stats.cpp @@ -27,14 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/pipeline/document_source_coll_stats.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonobj.h" -#include "mongo/db/pipeline/lite_parsed_document_source.h" -#include "mongo/db/stats/top.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/net/socket_utils.h" +#include "mongo/util/serialization_context.h" #include "mongo/util/time_support.h" using boost::intrusive_ptr; @@ -65,8 +74,15 @@ intrusive_ptr DocumentSourceCollStats::createFromBson( uassert(40166, str::stream() << "$collStats must take a nested object but found: " << specElem, specElem.type() == BSONType::Object); - auto spec = - DocumentSourceCollStatsSpec::parse(IDLParserContext(kStageName), specElem.embeddedObject()); + + // TODO SERVER-77056: add assertion to validate pExpCtx->serializationCtxt != stateDefault() + + auto spec = DocumentSourceCollStatsSpec::parse( + IDLParserContext(kStageName, + false /* apiStrict */, + pExpCtx->ns.tenantId(), + SerializationContext::stateCommandReply(pExpCtx->serializationCtxt)), + specElem.embeddedObject()); return make_intrusive(pExpCtx, std::move(spec)); } @@ -78,7 +94,12 @@ BSONObj DocumentSourceCollStats::makeStatsForNs( const boost::optional& filterObj) { BSONObjBuilder builder; - builder.append("ns", NamespaceStringUtil::serialize(nss)); + // We need to use the serialization context from the request when calling + // NamespaceStringUtil to build the reply. + builder.append( + "ns", + NamespaceStringUtil::serialize( + nss, SerializationContext::stateCommandReply(spec.getSerializationContext()))); auto shardName = expCtx->mongoProcessInterface->getShardName(expCtx->opCtx); @@ -90,17 +111,19 @@ BSONObj DocumentSourceCollStats::makeStatsForNs( builder.appendDate("localTime", jsTime()); if (auto latencyStatsSpec = spec.getLatencyStats()) { + // getRequestOnTimeseriesView is set to true if collstats is called on the view. + auto resolvedNss = + spec.getRequestOnTimeseriesView() ? nss.getTimeseriesViewNamespace() : nss; expCtx->mongoProcessInterface->appendLatencyStats( - expCtx->opCtx, nss, latencyStatsSpec->getHistograms(), &builder); + expCtx->opCtx, resolvedNss, latencyStatsSpec->getHistograms(), &builder); } if (auto storageStats = spec.getStorageStats()) { // If the storageStats field exists, it must have been validated as an object when parsing. BSONObjBuilder storageBuilder(builder.subobjStart("storageStats")); - uassertStatusOKWithContext( - expCtx->mongoProcessInterface->appendStorageStats( - expCtx->opCtx, nss, *storageStats, &storageBuilder, filterObj), - "Unable to retrieve storageStats in $collStats stage"); + uassertStatusOKWithContext(expCtx->mongoProcessInterface->appendStorageStats( + expCtx, nss, *storageStats, &storageBuilder, filterObj), + "Unable to retrieve storageStats in $collStats stage"); storageBuilder.doneFast(); } @@ -129,10 +152,7 @@ DocumentSource::GetNextResult DocumentSourceCollStats::doGetNext() { } Value DocumentSourceCollStats::serialize(SerializationOptions opts) const { - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484352); - } - return Value(Document{{getSourceName(), _collStatsSpec.toBSON()}}); + return Value(Document{{getSourceName(), _collStatsSpec.toBSON(opts)}}); } } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_coll_stats.h b/src/mongo/db/pipeline/document_source_coll_stats.h index 6c29644fb8756..e6933075ec811 100644 --- a/src/mongo/db/pipeline/document_source_coll_stats.h +++ b/src/mongo/db/pipeline/document_source_coll_stats.h @@ -29,8 +29,38 @@ #pragma once +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_coll_stats_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_coll_stats.idl b/src/mongo/db/pipeline/document_source_coll_stats.idl index e0babb11ef610..09da6d6506507 100644 --- a/src/mongo/db/pipeline/document_source_coll_stats.idl +++ b/src/mongo/db/pipeline/document_source_coll_stats.idl @@ -39,31 +39,46 @@ structs: LatencyStatsSpec: description: Represents the 'latencyStats' argument to the $collStats stage. strict: true + query_shape_component: true fields: histograms: description: Adds latency histogram information to the embedded documents in latencyStats if true. type: optionalBool + # Do not abstract this literal, since it is parameterizing the stage like an enum rather than representing + # real user input. + query_shape: parameter DocumentSourceCollStatsSpec: description: Specification for a $collStats stage. strict: true + query_shape_component: true fields: latencyStats: description: A request to include latency stats in the $collStats output. type: LatencyStatsSpec optional: true + query_shape: literal storageStats: description: Adds storage statistics to the return document. type: StorageStatsSpec optional: true + query_shape: literal count: description: Adds the total number of documents in the collection to the return document. type: object validator: callback: validateObjectIsEmpty optional: true + query_shape: literal queryExecStats: description: Adds query execution statistics to the return document. type: object validator: callback: validateObjectIsEmpty optional: true + query_shape: literal + $_requestOnTimeseriesView: + description: When set to true, $collStats stage requests statistics from the view namespace. + When set to false, $collStats stage requests statistics from the underlying collection. + cpp_name: requestOnTimeseriesView + type: optionalBool + query_shape: parameter diff --git a/src/mongo/db/pipeline/document_source_coll_stats_test.cpp b/src/mongo/db/pipeline/document_source_coll_stats_test.cpp new file mode 100644 index 0000000000000..1be8cf2b259a4 --- /dev/null +++ b/src/mongo/db/pipeline/document_source_coll_stats_test.cpp @@ -0,0 +1,124 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include + +#include +#include + +#include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source_coll_stats.h" +#include "mongo/db/pipeline/document_source_coll_stats_gen.h" +#include "mongo/db/pipeline/storage_stats_spec_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" + +namespace mongo { +namespace { +using DocumentSourceCollStatsTest = AggregationContextFixture; +TEST_F(DocumentSourceCollStatsTest, QueryShape) { + auto spec = DocumentSourceCollStatsSpec(); + + auto stage = make_intrusive(getExpCtx(), spec); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$collStats":{}})", + redact(*stage)); + + spec.setCount(BSONObj()); + spec.setQueryExecStats(BSONObj()); + stage = make_intrusive(getExpCtx(), spec); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$collStats":{"count":"?object","queryExecStats":"?object"}})", + redact(*stage)); + + auto latencyStats = LatencyStatsSpec(); + latencyStats.setHistograms(true); + spec.setLatencyStats(latencyStats); + stage = make_intrusive(getExpCtx(), spec); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$collStats": { + "latencyStats": { + "histograms": true + }, + "count": "?object", + "queryExecStats": "?object" + } + })", + redact(*stage)); + + auto storageStats = StorageStatsSpec(); + storageStats.setScale(2); + storageStats.setVerbose(true); + spec.setStorageStats(storageStats); + stage = make_intrusive(getExpCtx(), spec); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$collStats": { + "latencyStats": { + "histograms": true + }, + "storageStats": { + "scale": "?number", + "verbose": true, + "waitForLock": true, + "numericOnly": false + }, + "count": "?object", + "queryExecStats": "?object" + } + })", + redact(*stage)); + + storageStats.setWaitForLock(false); + storageStats.setNumericOnly(false); + spec.setStorageStats(storageStats); + stage = make_intrusive(getExpCtx(), spec); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$collStats": { + "latencyStats": { + "histograms": true + }, + "storageStats": { + "scale": "?number", + "verbose": true, + "waitForLock": false, + "numericOnly": false + }, + "count": "?object", + "queryExecStats": "?object" + } + })", + redact(*stage)); +} +} // namespace +} // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_count.cpp b/src/mongo/db/pipeline/document_source_count.cpp index bef22eef1785f..bcb9a7827dff9 100644 --- a/src/mongo/db/pipeline/document_source_count.cpp +++ b/src/mongo/db/pipeline/document_source_count.cpp @@ -27,15 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/document_source_count.h" +#include -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/pipeline/document_source_count.h" #include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/document_source_project.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_count.h b/src/mongo/db/pipeline/document_source_count.h index 5abc4bfca69d6..b7a16b44533fd 100644 --- a/src/mongo/db/pipeline/document_source_count.h +++ b/src/mongo/db/pipeline/document_source_count.h @@ -29,7 +29,13 @@ #pragma once +#include + +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_count_test.cpp b/src/mongo/db/pipeline/document_source_count_test.cpp index 9ffd8a6d120ed..5cf3f12aa170c 100644 --- a/src/mongo/db/pipeline/document_source_count_test.cpp +++ b/src/mongo/db/pipeline/document_source_count_test.cpp @@ -27,11 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include #include +#include + #include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" @@ -43,6 +43,11 @@ #include "mongo/db/pipeline/document_source_count.h" #include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_current_op.cpp b/src/mongo/db/pipeline/document_source_current_op.cpp index d31ead33731e6..145cb18bb7a5c 100644 --- a/src/mongo/db/pipeline/document_source_current_op.cpp +++ b/src/mongo/db/pipeline/document_source_current_op.cpp @@ -27,11 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/document_source_current_op.h" +#include +#include +#include -#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/document_source_current_op.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { @@ -43,6 +52,7 @@ const StringData kLocalOpsFieldName = "localOps"_sd; const StringData kTruncateOpsFieldName = "truncateOps"_sd; const StringData kIdleCursorsFieldName = "idleCursors"_sd; const StringData kBacktraceFieldName = "backtrace"_sd; +const StringData kTargetAllNodesFieldName = "targetAllNodes"_sd; const StringData kOpIdFieldName = "opid"_sd; const StringData kClientFieldName = "client"_sd; @@ -102,7 +112,7 @@ std::unique_ptr DocumentSourceCurrentOp::Li } return std::make_unique( - spec.fieldName(), allUsers, localOps); + spec.fieldName(), nss.tenantId(), allUsers, localOps); } const char* DocumentSourceCurrentOp::getSourceName() const { @@ -184,7 +194,7 @@ intrusive_ptr DocumentSourceCurrentOp::createFromBson( uassert(ErrorCodes::InvalidNamespace, "$currentOp must be run against the 'admin' database with {aggregate: 1}", - nss.db() == DatabaseName::kAdmin.db() && nss.isCollectionlessAggregateNS()); + nss.isAdminDB() && nss.isCollectionlessAggregateNS()); boost::optional includeIdleConnections; boost::optional includeIdleSessions; @@ -193,6 +203,7 @@ intrusive_ptr DocumentSourceCurrentOp::createFromBson( boost::optional truncateOps; boost::optional idleCursors; boost::optional backtrace; + boost::optional targetAllNodes; for (auto&& elem : spec.embeddedObject()) { const auto fieldName = elem.fieldNameStringData(); @@ -228,6 +239,10 @@ intrusive_ptr DocumentSourceCurrentOp::createFromBson( "a boolean value, but found: " << typeName(elem.type()), elem.type() == BSONType::Bool); + uassert(ErrorCodes::FailedToParse, + str::stream() << "The 'localOps' parameter of the $currentOp stage cannot be " + "true when 'targetAllNodes' is also true", + !(targetAllNodes.value_or(false) && elem.boolean())); showLocalOpsOnMongoS = (elem.boolean() ? LocalOpsMode::kLocalMongosOps : LocalOpsMode::kRemoteShardOps); } else if (fieldName == kTruncateOpsFieldName) { @@ -254,6 +269,24 @@ intrusive_ptr DocumentSourceCurrentOp::createFromBson( elem.type() == BSONType::Bool); backtrace = (elem.boolean() ? BacktraceMode::kIncludeBacktrace : BacktraceMode::kExcludeBacktrace); + } else if (fieldName == kTargetAllNodesFieldName) { + uassert(ErrorCodes::FailedToParse, + str::stream() << "The 'targetAllNodes' parameter of the $currentOp stage must " + "be a boolean value, but found: " + << typeName(elem.type()), + elem.type() == BSONType::Bool); + uassert(ErrorCodes::FailedToParse, + "The 'localOps' parameter of the $currentOp stage cannot be " + "true when 'targetAllNodes' is also true", + !((showLocalOpsOnMongoS && + showLocalOpsOnMongoS.value() == LocalOpsMode::kLocalMongosOps) && + elem.boolean())); + targetAllNodes = elem.boolean(); + if (targetAllNodes.value_or(false)) { + uassert(ErrorCodes::FailedToParse, + "$currentOp supports targetAllNodes parameter only for sharded clusters", + pExpCtx->fromMongos || pExpCtx->inMongos); + } } else { uasserted(ErrorCodes::FailedToParse, str::stream() @@ -268,7 +301,8 @@ intrusive_ptr DocumentSourceCurrentOp::createFromBson( showLocalOpsOnMongoS, truncateOps, idleCursors, - backtrace); + backtrace, + targetAllNodes); } intrusive_ptr DocumentSourceCurrentOp::create( @@ -279,7 +313,8 @@ intrusive_ptr DocumentSourceCurrentOp::create( boost::optional showLocalOpsOnMongoS, boost::optional truncateOps, boost::optional idleCursors, - boost::optional backtrace) { + boost::optional backtrace, + boost::optional targetAllNodes) { return new DocumentSourceCurrentOp(pExpCtx, includeIdleConnections, includeIdleSessions, @@ -287,7 +322,8 @@ intrusive_ptr DocumentSourceCurrentOp::create( showLocalOpsOnMongoS, truncateOps, idleCursors, - backtrace); + backtrace, + targetAllNodes); } Value DocumentSourceCurrentOp::serialize(SerializationOptions opts) const { @@ -296,35 +332,35 @@ Value DocumentSourceCurrentOp::serialize(SerializationOptions opts) const { Document{ {kIdleConnectionsFieldName, _includeIdleConnections.has_value() - ? opts.serializeLiteralValue(_includeIdleConnections.value() == - ConnMode::kIncludeIdle) + ? opts.serializeLiteral(_includeIdleConnections.value() == ConnMode::kIncludeIdle) : Value()}, {kIdleSessionsFieldName, _includeIdleSessions.has_value() - ? opts.serializeLiteralValue(_includeIdleSessions.value() == - SessionMode::kIncludeIdle) + ? opts.serializeLiteral(_includeIdleSessions.value() == SessionMode::kIncludeIdle) : Value()}, {kAllUsersFieldName, _includeOpsFromAllUsers.has_value() - ? opts.serializeLiteralValue(_includeOpsFromAllUsers.value() == - UserMode::kIncludeAll) + ? opts.serializeLiteral(_includeOpsFromAllUsers.value() == UserMode::kIncludeAll) : Value()}, {kLocalOpsFieldName, _showLocalOpsOnMongoS.has_value() - ? opts.serializeLiteralValue(_showLocalOpsOnMongoS.value() == - LocalOpsMode::kLocalMongosOps) + ? opts.serializeLiteral(_showLocalOpsOnMongoS.value() == + LocalOpsMode::kLocalMongosOps) : Value()}, {kTruncateOpsFieldName, _truncateOps.has_value() - ? opts.serializeLiteralValue(_truncateOps.value() == TruncationMode::kTruncateOps) + ? opts.serializeLiteral(_truncateOps.value() == TruncationMode::kTruncateOps) : Value()}, {kIdleCursorsFieldName, _idleCursors.has_value() - ? opts.serializeLiteralValue(_idleCursors.value() == CursorMode::kIncludeCursors) + ? opts.serializeLiteral(_idleCursors.value() == CursorMode::kIncludeCursors) : Value()}, {kBacktraceFieldName, - _backtrace.has_value() ? opts.serializeLiteralValue(_backtrace.value() == - BacktraceMode::kIncludeBacktrace) - : Value()}}}}); + _backtrace.has_value() + ? opts.serializeLiteral(_backtrace.value() == BacktraceMode::kIncludeBacktrace) + : Value()}, + {kTargetAllNodesFieldName, + _targetAllNodes.has_value() ? opts.serializeLiteral(_targetAllNodes.value()) + : Value()}}}}); } } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_current_op.h b/src/mongo/db/pipeline/document_source_current_op.h index 548ac5362da87..eaa88813d9ece 100644 --- a/src/mongo/db/pipeline/document_source_current_op.h +++ b/src/mongo/db/pipeline/document_source_current_op.h @@ -29,7 +29,36 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/tenant_id.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { @@ -58,10 +87,15 @@ class DocumentSourceCurrentOp final : public DocumentSource { static std::unique_ptr parse(const NamespaceString& nss, const BSONElement& spec); - LiteParsed(std::string parseTimeName, UserMode allUsers, LocalOpsMode localOps) + LiteParsed(std::string parseTimeName, + const boost::optional& tenantId, + UserMode allUsers, + LocalOpsMode localOps) : LiteParsedDocumentSource(std::move(parseTimeName)), _allUsers(allUsers), - _localOps(localOps) {} + _localOps(localOps), + _privileges( + {Privilege(ResourcePattern::forClusterResource(tenantId), ActionType::inprog)}) {} stdx::unordered_set getInvolvedNamespaces() const final { return stdx::unordered_set(); @@ -69,17 +103,15 @@ class DocumentSourceCurrentOp final : public DocumentSource { PrivilegeVector requiredPrivileges(bool isMongos, bool bypassDocumentValidation) const final { - PrivilegeVector privileges; - // In a sharded cluster, we always need the inprog privilege to run $currentOp on the // shards. If we are only looking up local mongoS operations, we do not need inprog to // view our own ops but *do* require it to view other users' ops. if (_allUsers == UserMode::kIncludeAll || (isMongos && _localOps == LocalOpsMode::kRemoteShardOps)) { - privileges.push_back({ResourcePattern::forClusterResource(), ActionType::inprog}); + return _privileges; } - return privileges; + return PrivilegeVector(); } bool allowedToPassthroughFromMongos() const final { @@ -102,6 +134,7 @@ class DocumentSourceCurrentOp final : public DocumentSource { private: const UserMode _allUsers; const LocalOpsMode _localOps; + const PrivilegeVector _privileges; }; static boost::intrusive_ptr create( @@ -112,17 +145,26 @@ class DocumentSourceCurrentOp final : public DocumentSource { boost::optional showLocalOpsOnMongoS = boost::none, boost::optional truncateOps = boost::none, boost::optional idleCursors = boost::none, - boost::optional backtrace = boost::none); + boost::optional backtrace = boost::none, + boost::optional targetAllNodes = boost::none); const char* getSourceName() const final; StageConstraints constraints(Pipeline::SplitState pipeState) const final { bool showLocalOps = _showLocalOpsOnMongoS.value_or(kDefaultLocalOpsMode) == LocalOpsMode::kLocalMongosOps; + HostTypeRequirement hostTypeRequirement; + if (showLocalOps) { + hostTypeRequirement = HostTypeRequirement::kLocalOnly; + } else if (_targetAllNodes.value_or(false)) { + hostTypeRequirement = HostTypeRequirement::kAllShardServers; + } else { + hostTypeRequirement = HostTypeRequirement::kAnyShard; + } StageConstraints constraints( StreamType::kStreaming, PositionRequirement::kFirst, - (showLocalOps ? HostTypeRequirement::kLocalOnly : HostTypeRequirement::kAnyShard), + hostTypeRequirement, DiskUseRequirement::kNoDiskUse, FacetRequirement::kNotAllowed, TransactionRequirement::kNotAllowed, @@ -153,7 +195,8 @@ class DocumentSourceCurrentOp final : public DocumentSource { boost::optional showLocalOpsOnMongoS, boost::optional truncateOps, boost::optional idleCursors, - boost::optional backtrace) + boost::optional backtrace, + boost::optional targetAllNodes) : DocumentSource(kStageName, pExpCtx), _includeIdleConnections(includeIdleConnections), _includeIdleSessions(includeIdleSessions), @@ -161,7 +204,8 @@ class DocumentSourceCurrentOp final : public DocumentSource { _showLocalOpsOnMongoS(showLocalOpsOnMongoS), _truncateOps(truncateOps), _idleCursors(idleCursors), - _backtrace(backtrace) {} + _backtrace(backtrace), + _targetAllNodes(targetAllNodes) {} GetNextResult doGetNext() final; @@ -173,6 +217,7 @@ class DocumentSourceCurrentOp final : public DocumentSource { boost::optional _idleCursors; boost::optional _backtrace; + boost::optional _targetAllNodes; std::string _shardName; std::vector _ops; diff --git a/src/mongo/db/pipeline/document_source_current_op_test.cpp b/src/mongo/db/pipeline/document_source_current_op_test.cpp index 01596237e6fc7..5037fc0a3ce2a 100644 --- a/src/mongo/db/pipeline/document_source_current_op_test.cpp +++ b/src/mongo/db/pipeline/document_source_current_op_test.cpp @@ -27,15 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/json.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_current_op.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/tenant_id.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/str.h" namespace mongo { @@ -54,8 +65,7 @@ class DocumentSourceCurrentOpTest : public AggregationContextFixture { public: DocumentSourceCurrentOpTest() : AggregationContextFixture( - NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(boost::none, "admin"))) { - } + NamespaceString::makeCollectionlessAggregateNSS(DatabaseName::kAdmin)) {} }; /** @@ -100,8 +110,8 @@ TEST_F(DocumentSourceCurrentOpTest, ShouldFailToParseIfSpecIsNotObject) { TEST_F(DocumentSourceCurrentOpTest, ShouldFailToParseIfNotRunOnAdmin) { const auto specObj = fromjson("{$currentOp:{}}"); - getExpCtx()->ns = - NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(boost::none, "foo")); + getExpCtx()->ns = NamespaceString::makeCollectionlessAggregateNSS( + DatabaseName::createDatabaseName_forTest(boost::none, "foo")); ASSERT_THROWS_CODE(DocumentSourceCurrentOp::createFromBson(specObj.firstElement(), getExpCtx()), AssertionException, ErrorCodes::InvalidNamespace); @@ -150,6 +160,42 @@ TEST_F(DocumentSourceCurrentOpTest, ShouldFailToParseTruncateOpsIfNotBoolean) { ErrorCodes::FailedToParse); } +TEST_F(DocumentSourceCurrentOpTest, ShouldFailToParseTargetAllNodesIfNotBoolean) { + const auto specObj = fromjson("{$currentOp:{targetAllNodes:1}}"); + ASSERT_THROWS_CODE(DocumentSourceCurrentOp::createFromBson(specObj.firstElement(), getExpCtx()), + AssertionException, + ErrorCodes::FailedToParse); +} + +TEST_F(DocumentSourceCurrentOpTest, ShouldFailToParseTrueTargetAllNodesIfTrueLocalOps) { + const auto specObj = fromjson("{$currentOp:{targetAllNodes:true, localOps:true}}"); + ASSERT_THROWS_CODE(DocumentSourceCurrentOp::createFromBson(specObj.firstElement(), getExpCtx()), + AssertionException, + ErrorCodes::FailedToParse); +} + +TEST_F(DocumentSourceCurrentOpTest, ShouldFailToParseTrueTargetAllNodesIfUnsharded) { + const auto specObj = fromjson("{$currentOp:{targetAllNodes:true}}"); + ASSERT_THROWS_CODE(DocumentSourceCurrentOp::createFromBson(specObj.firstElement(), getExpCtx()), + AssertionException, + ErrorCodes::FailedToParse); +} + +TEST_F(DocumentSourceCurrentOpTest, ShouldParseAndSerializeTargetAllNodesIfSharded) { + const auto specObj = fromjson("{$currentOp:{targetAllNodes:true}}"); + + getExpCtx()->fromMongos = true; + + const auto parsed = + DocumentSourceCurrentOp::createFromBson(specObj.firstElement(), getExpCtx()); + + const auto currentOp = static_cast(parsed.get()); + + const auto expectedOutput = Document{{"$currentOp", Document{{"targetAllNodes", true}}}}; + + ASSERT_DOCUMENT_EQ(currentOp->serialize().getDocument(), expectedOutput); +} + TEST_F(DocumentSourceCurrentOpTest, ShouldFailToParseIfUnrecognisedParameterSpecified) { const auto specObj = fromjson("{$currentOp:{foo:true}}"); ASSERT_THROWS_CODE(DocumentSourceCurrentOp::createFromBson(specObj.firstElement(), getExpCtx()), @@ -160,7 +206,7 @@ TEST_F(DocumentSourceCurrentOpTest, ShouldFailToParseIfUnrecognisedParameterSpec TEST_F(DocumentSourceCurrentOpTest, ShouldParseAndSerializeAllExplicitlySpecifiedArguments) { const auto specObj = fromjson( "{$currentOp:{idleConnections:false, idleSessions:false, allUsers:true, localOps:true, " - "truncateOps:false}}"); + "truncateOps:false, targetAllNodes:false}}"); const auto parsed = DocumentSourceCurrentOp::createFromBson(specObj.firstElement(), getExpCtx()); @@ -172,7 +218,8 @@ TEST_F(DocumentSourceCurrentOpTest, ShouldParseAndSerializeAllExplicitlySpecifie {"idleSessions", false}, {"allUsers", true}, {"localOps", true}, - {"truncateOps", false}}}}; + {"truncateOps", false}, + {"targetAllNodes", false}}}}; ASSERT_DOCUMENT_EQ(currentOp->serialize().getDocument(), expectedOutput); } @@ -185,7 +232,8 @@ TEST_F(DocumentSourceCurrentOpTest, idleConnections: true, allUsers: false, idleSessions: false, - localOps: true + localOps: true, + targetAllNodes: false } })"); auto docSource = DocumentSourceCurrentOp::createFromBson(spec.firstElement(), getExpCtx()); @@ -193,10 +241,11 @@ TEST_F(DocumentSourceCurrentOpTest, ASSERT_BSONOBJ_EQ_AUTO( // NOLINT R"({ "$currentOp": { - "idleConnections": "?", - "idleSessions": "?", - "allUsers": "?", - "localOps": "?" + "idleConnections": "?bool", + "idleSessions": "?bool", + "allUsers": "?bool", + "localOps": "?bool", + "targetAllNodes": "?bool" } })", redact(*docSource)); @@ -222,9 +271,7 @@ TEST_F(DocumentSourceCurrentOpTest, ShouldNotSerializeOmittedOptionalArgumentsWi DocumentSourceCurrentOp::createFromBson(specObj.firstElement(), getExpCtx()); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "$currentOp": {} - })", + R"({"$currentOp": {}})", redact(*docSource)); } diff --git a/src/mongo/db/pipeline/document_source_cursor.cpp b/src/mongo/db/pipeline/document_source_cursor.cpp index ae35d219af5ff..90d03c2d220cd 100644 --- a/src/mongo/db/pipeline/document_source_cursor.cpp +++ b/src/mongo/db/pipeline/document_source_cursor.cpp @@ -28,23 +28,39 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/pipeline/document_source_cursor.h" +#include +#include +#include +#include -#include "mongo/db/catalog/collection.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/curop_failpoint_helpers.h" #include "mongo/db/db_raii.h" #include "mongo/db/exec/document_value/document.h" -#include "mongo/db/exec/working_set_common.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/document_source_cursor.h" #include "mongo/db/query/collection_query_info.h" #include "mongo/db/query/explain.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/find_common.h" -#include "mongo/db/storage/storage_options.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/s/resharding/resume_token_gen.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -215,11 +231,10 @@ void DocumentSourceCursor::recordPlanSummaryStats() { Value DocumentSourceCursor::serialize(SerializationOptions opts) const { auto verbosity = opts.verbosity; - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484350); - } - // We never parse a DocumentSourceCursor, so we only serialize for explain. - if (!verbosity) + // We never parse a DocumentSourceCursor, so we only serialize for explain. Since it's never + // part of user input, there's no need to compute its query shape. + if (!verbosity || opts.transformIdentifiers || + opts.literalPolicy != LiteralSerializationPolicy::kUnchanged) return Value(); invariant(_exec); @@ -251,6 +266,7 @@ Value DocumentSourceCursor::serialize(SerializationOptions opts) const { _execStatus, _winningPlanTrialStats, BSONObj(), + SerializationContext::stateCommandReply(pExpCtx->serializationCtxt), BSONObj(), &explainStatsBuilder); } @@ -349,7 +365,7 @@ DocumentSourceCursor::DocumentSourceCursor( for (auto& [nss, coll] : collections.getSecondaryCollections()) { if (coll) { PlanSummaryStats stats; - explainer.getSecondarySummaryStats(nss.toString(), &stats); + explainer.getSecondarySummaryStats(nss, &stats); CollectionQueryInfo::get(coll).notifyOfQuery(pExpCtx->opCtx, coll, stats); } } diff --git a/src/mongo/db/pipeline/document_source_cursor.h b/src/mongo/db/pipeline/document_source_cursor.h index ab1af3feb31fb..5f99850ce5bab 100644 --- a/src/mongo/db/pipeline/document_source_cursor.h +++ b/src/mongo/db/pipeline/document_source_cursor.h @@ -29,15 +29,42 @@ #pragma once +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/db_raii.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/explain_options.h" #include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_explainer.h" #include "mongo/db/query/plan_summary_stats.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_densify.cpp b/src/mongo/db/pipeline/document_source_densify.cpp index f035cff11f7eb..a585c20fc5834 100644 --- a/src/mongo/db/pipeline/document_source_densify.cpp +++ b/src/mongo/db/pipeline/document_source_densify.cpp @@ -28,13 +28,30 @@ */ #include "mongo/db/pipeline/document_source_densify.h" -#include "mongo/base/exact_cast.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/basic_types.h" #include "mongo/db/pipeline/document_source_sort.h" #include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/allowed_contexts.h" #include "mongo/db/query/sort_pattern.h" +#include "mongo/idl/idl_parser.h" #include "mongo/stdx/variant.h" #include "mongo/util/assert_util.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep using boost::intrusive_ptr; using boost::optional; @@ -190,11 +207,17 @@ SortPattern getSortPatternForDensify(RangeStatement rangeStatement, } } - // Add field path to sort spec. - SortPatternPart part; - part.fieldPath = field.fullPath(); - sortParts.push_back(std::move(part)); - return SortPattern{sortParts}; + // Add field path to sort spec if it is not yet in the sort spec. + const auto inserted = std::find_if( + sortParts.begin(), sortParts.end(), [&field](const SortPatternPart& s) -> bool { + return s.fieldPath->fullPath().compare(field.fullPath()) == 0; + }); + if (inserted == sortParts.end()) { + SortPatternPart part; + part.fieldPath = field.fullPath(); + sortParts.push_back(std::move(part)); + } + return SortPattern{std::move(sortParts)}; } list> create(const intrusive_ptr& expCtx, @@ -331,10 +354,19 @@ DocumentSource::GetNextResult DocumentSourceInternalDensify::densifyExplicitRang RangeStatement(_range.getStep(), ExplicitBounds(bounds.first, bounds.second), _range.getUnit())); + } else if (_current < bounds.first) { + // All the documents we saw were below the explicit range, so _current is below the range. + // Densification starts at the first bounds, so _current is no longer relevant. + createDocGenerator(bounds.first, + RangeStatement(_range.getStep(), + ExplicitBounds(bounds.first, bounds.second), + _range.getUnit())); + } else if (_current->increment(_range) >= bounds.second) { _densifyState = DensifyState::kDensifyDone; return DocumentSource::GetNextResult::makeEOF(); } else { + // _current is somewhere in the middle of the range. auto lowerBound = _current->increment(_range); createDocGenerator(lowerBound, RangeStatement(_range.getStep(), @@ -641,7 +673,7 @@ Value DocumentSourceInternalDensify::serialize(SerializationOptions opts) const _partitions.end(), serializedPartitionByFields.begin(), [&](FieldPath field) -> Value { return Value(opts.serializeFieldPath(field)); }); - spec[kPartitionByFieldsFieldName] = Value(serializedPartitionByFields); + spec[kPartitionByFieldsFieldName] = Value(std::move(serializedPartitionByFields)); spec[kRangeFieldName] = _range.serialize(opts); MutableDocument out; out[getSourceName()] = Value(spec.freeze()); diff --git a/src/mongo/db/pipeline/document_source_densify.h b/src/mongo/db/pipeline/document_source_densify.h index b6a2ddf35281b..b94df91b5972d 100644 --- a/src/mongo/db/pipeline/document_source_densify.h +++ b/src/mongo/db/pipeline/document_source_densify.h @@ -29,16 +29,50 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_densify_gen.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/memory_usage_tracker.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/datetime/date_time_support.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/str.h" #include "mongo/util/time_support.h" @@ -246,18 +280,18 @@ class RangeStatement { Value serialize(SerializationOptions opts) const { MutableDocument spec; - spec[kArgStep] = opts.serializeLiteralValue(_step); + spec[kArgStep] = opts.serializeLiteral(_step); spec[kArgBounds] = stdx::visit( OverloadedVisitor{[&](Full) { return Value(kValFull); }, [&](Partition) { return Value(kValPartition); }, [&](ExplicitBounds bounds) { return Value(std::vector( - {opts.serializeLiteralValue(bounds.first.toValue()), - opts.serializeLiteralValue(bounds.second.toValue())})); + {opts.serializeLiteral(bounds.first.toValue()), + opts.serializeLiteral(bounds.second.toValue())})); }}, _bounds); if (_unit) - spec[kArgUnit] = opts.serializeLiteralValue(serializeTimeUnit(*_unit)); + spec[kArgUnit] = opts.serializeLiteral(serializeTimeUnit(*_unit)); return spec.freezeToValue(); } @@ -307,7 +341,7 @@ class DocumentSourceInternalDensify final : public DocumentSource { const RangeStatement& range) : DocumentSource(kStageName, pExpCtx), _field(std::move(field)), - _partitions(std::move(partitions)), + _partitions(partitions), _range(std::move(range)), _partitionTable(pExpCtx->getValueComparator().makeUnorderedValueMap()), _memTracker( diff --git a/src/mongo/db/pipeline/document_source_densify_test.cpp b/src/mongo/db/pipeline/document_source_densify_test.cpp index 38290d12504f4..ca32f21be3f80 100644 --- a/src/mongo/db/pipeline/document_source_densify_test.cpp +++ b/src/mongo/db/pipeline/document_source_densify_test.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/bson/bsonmisc.h" +#include + +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_densify.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" -#include +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -1299,6 +1307,7 @@ TEST_F(DensifyRedactionTest, RedactionDateBounds) { } } })"); + auto docSource = DocumentSourceInternalDensify::createFromBson(spec.firstElement(), getExpCtx()); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT @@ -1307,9 +1316,12 @@ TEST_F(DensifyRedactionTest, RedactionDateBounds) { "field": "HASH", "partitionByFields": [], "range": { - "step": "?", - "bounds": ["?", "?"], - "unit": "?" + "step": "?number", + "bounds": [ + "?date", + "?date" + ], + "unit": "?string" } } })", @@ -1339,7 +1351,7 @@ TEST_F(DensifyRedactionTest, RedactionFullBoundsWithPartitionFields) { "HASH.HASH" ], "range": { - "step": "?", + "step": "?number", "bounds": "full" } } @@ -1369,9 +1381,9 @@ TEST_F(DensifyRedactionTest, RedactionPartitionBounds) { "HASH" ], "range": { - "step": "?", + "step": "?number", "bounds": "partition", - "unit": "?" + "unit": "?string" } } })", diff --git a/src/mongo/db/pipeline/document_source_documents.cpp b/src/mongo/db/pipeline/document_source_documents.cpp index 934116a7c2709..a57106f087724 100644 --- a/src/mongo/db/pipeline/document_source_documents.cpp +++ b/src/mongo/db/pipeline/document_source_documents.cpp @@ -28,16 +28,22 @@ */ #include "mongo/db/pipeline/document_source_documents.h" -#include "mongo/db/exec/projection_executor.h" -#include "mongo/db/exec/projection_executor_builder.h" + +#include + +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_project.h" #include "mongo/db/pipeline/document_source_queue.h" #include "mongo/db/pipeline/document_source_replace_root.h" #include "mongo/db/pipeline/document_source_unwind.h" -#include "mongo/db/pipeline/lite_parsed_document_source.h" - -#include +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_documents.h b/src/mongo/db/pipeline/document_source_documents.h index 6048ed551bb62..c0c661feff027 100644 --- a/src/mongo/db/pipeline/document_source_documents.h +++ b/src/mongo/db/pipeline/document_source_documents.h @@ -29,8 +29,22 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" #include "mongo/db/query/projection_parser.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { @@ -62,7 +76,7 @@ class LiteParsed : public LiteParsedDocumentSource { static constexpr StringData kStageName = "$documents"_sd; -static std::list> createFromBson( +std::list> createFromBson( BSONElement elem, const boost::intrusive_ptr& pExpCtx); }; // namespace DocumentSourceDocuments diff --git a/src/mongo/db/pipeline/document_source_documents_test.cpp b/src/mongo/db/pipeline/document_source_documents_test.cpp new file mode 100644 index 0000000000000..a5c90d0b81584 --- /dev/null +++ b/src/mongo/db/pipeline/document_source_documents_test.cpp @@ -0,0 +1,89 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#include + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source_documents.h" +#include "mongo/db/pipeline/document_source_unwind.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" + +namespace mongo { + +using DocumentSourceDocumentsTest = AggregationContextFixture; + +TEST_F(DocumentSourceDocumentsTest, DocumentsStageRedactsCorrectly) { + auto spec = fromjson(R"({ + $documents: [ + { x: 10 }, { x: 2 }, { x: 5 } + ] + })"); + auto docSourcesList = DocumentSourceDocuments::createFromBson(spec.firstElement(), getExpCtx()); + ASSERT_EQ(4, docSourcesList.size()); + + // We must retrieve the internally-generated field name shared across these stages in order to + // make sure they're serialized properly. + std::vector> docSourcesVec(docSourcesList.begin(), + docSourcesList.end()); + auto unwindStage = static_cast(docSourcesVec[2].get()); + ASSERT(unwindStage); + auto generatedField = unwindStage->getUnwindPath(); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$queue":"?array"})", + redact(*docSourcesVec[0])); + + + ASSERT_BSONOBJ_EQ( // NOLINT + fromjson(R"({ + "$project": { + "HASH<_id>": true, + "HASH<)" + + generatedField + + R"(>": "?array" + } + })"), + redact(*docSourcesVec[1])); + + + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + "{'$unwind': {'path' : '$HASH<" + generatedField + ">' } }", + redact(*docSourcesVec[2])); + + + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + "{$replaceRoot: {newRoot: '$HASH<" + generatedField + ">'}}", + redact(*docSourcesVec[3])); +} + +} // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_exchange.cpp b/src/mongo/db/pipeline/document_source_exchange.cpp index 2d6999f4a7d7e..b77c15d32a472 100644 --- a/src/mongo/db/pipeline/document_source_exchange.cpp +++ b/src/mongo/db/pipeline/document_source_exchange.cpp @@ -28,17 +28,32 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" #include +#include #include +#include #include +#include -#include "mongo/db/curop.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/hasher.h" #include "mongo/db/pipeline/document_source_exchange.h" #include "mongo/db/storage/key_string.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -94,10 +109,7 @@ const char* DocumentSourceExchange::getSourceName() const { } Value DocumentSourceExchange::serialize(SerializationOptions opts) const { - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484348); - } - return Value(DOC(getSourceName() << _exchange->getSpec().toBSON())); + return Value(DOC(getSourceName() << _exchange->getSpec().toBSON(opts))); } DocumentSourceExchange::DocumentSourceExchange( @@ -167,7 +179,7 @@ std::vector Exchange::extractBoundaries( kb << "" << elem; } - KeyString::Builder key{KeyString::Version::V1, kb.obj(), ordering}; + key_string::Builder key{key_string::Version::V1, kb.obj(), ordering}; std::string keyStr{key.getBuffer(), key.getSize()}; ret.emplace_back(std::move(keyStr)); @@ -188,8 +200,8 @@ std::vector Exchange::extractBoundaries( kbMax << "" << MAXKEY; } - KeyString::Builder minKey{KeyString::Version::V1, kbMin.obj(), ordering}; - KeyString::Builder maxKey{KeyString::Version::V1, kbMax.obj(), ordering}; + key_string::Builder minKey{key_string::Version::V1, kbMin.obj(), ordering}; + key_string::Builder maxKey{key_string::Version::V1, kbMax.obj(), ordering}; StringData minKeyStr{minKey.getBuffer(), minKey.getSize()}; StringData maxKeyStr{maxKey.getBuffer(), maxKey.getSize()}; @@ -427,7 +439,7 @@ size_t Exchange::getTargetConsumer(const Document& input) { ++counter; } - KeyString::Builder key{KeyString::Version::V1, kb.obj(), _ordering}; + key_string::Builder key{key_string::Version::V1, kb.obj(), _ordering}; std::string keyStr{key.getBuffer(), key.getSize()}; // Binary search for the consumer id. diff --git a/src/mongo/db/pipeline/document_source_exchange.h b/src/mongo/db/pipeline/document_source_exchange.h index 190c6152c63be..57bd4c805115b 100644 --- a/src/mongo/db/pipeline/document_source_exchange.h +++ b/src/mongo/db/pipeline/document_source_exchange.h @@ -29,15 +29,40 @@ #pragma once +#include +#include #include +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/ordering.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/exchange_spec_gen.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/resource_yielder.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_exchange_test.cpp b/src/mongo/db/pipeline/document_source_exchange_test.cpp index 6063c252dbde5..2d3b6108f9472 100644 --- a/src/mongo/db/pipeline/document_source_exchange_test.cpp +++ b/src/mongo/db/pipeline/document_source_exchange_test.cpp @@ -28,22 +28,38 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/hasher.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_exchange.h" #include "mongo/db/pipeline/document_source_mock.h" -#include "mongo/db/storage/key_string.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/service_context.h" #include "mongo/executor/network_interface_factory.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/random.h" -#include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/stdx/mutex.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/concurrency/thread_pool.h" -#include "mongo/util/system_clock_source.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -749,4 +765,30 @@ TEST_F(DocumentSourceExchangeTest, RejectInvalidMissingKeys) { Exchange(parseSpec(spec), Pipeline::create({}, getExpCtx())), AssertionException, 50967); } +TEST_F(DocumentSourceExchangeTest, QueryShape) { + const size_t nDocs = 500; + + auto source = getMockSource(nDocs); + + ExchangeSpec spec; + spec.setPolicy(ExchangePolicyEnum::kRoundRobin); + spec.setConsumers(1); + spec.setBufferSize(1024); + boost::intrusive_ptr ex = new Exchange(spec, Pipeline::create({source}, getExpCtx())); + boost::intrusive_ptr stage = + new DocumentSourceExchange(getExpCtx(), ex, 0, nullptr); + + ASSERT_BSONOBJ_EQ_AUTO( // + R"({ + "$_internalExchange": { + "policy": "roundrobin", + "consumers": "?number", + "orderPreserving": false, + "bufferSize": "?number", + "key": "?object" + } + })", + redact(*stage)); +} + } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_facet.cpp b/src/mongo/db/pipeline/document_source_facet.cpp index be3dfc3b646f4..747b3513c07be 100644 --- a/src/mongo/db/pipeline/document_source_facet.cpp +++ b/src/mongo/db/pipeline/document_source_facet.cpp @@ -27,24 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_facet.h" - +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include #include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/document_source_facet.h" #include "mongo/db/pipeline/document_source_tee_consumer.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/tee_buffer.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/db/query/plan_summary_stats.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" @@ -186,15 +194,11 @@ DocumentSource::GetNextResult DocumentSourceFacet::doGetNext() { } Value DocumentSourceFacet::serialize(SerializationOptions opts) const { - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484347); - } - MutableDocument serialized; for (auto&& facet : _facets) { - serialized[facet.name] = - Value(opts.verbosity ? facet.pipeline->writeExplainOps(*opts.verbosity) - : facet.pipeline->serialize()); + serialized[opts.serializeFieldPathFromString(facet.name)] = + Value(opts.verbosity ? facet.pipeline->writeExplainOps(opts) + : facet.pipeline->serialize(opts)); } return Value(Document{{"$facet", serialized.freezeToValue()}}); } diff --git a/src/mongo/db/pipeline/document_source_facet.h b/src/mongo/db/pipeline/document_source_facet.h index 344b3448ac9c3..0375e1aedfd51 100644 --- a/src/mongo/db/pipeline/document_source_facet.h +++ b/src/mongo/db/pipeline/document_source_facet.h @@ -30,14 +30,40 @@ #pragma once #include +#include +#include #include +#include +#include +#include #include +#include +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/tee_buffer.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_facet_test.cpp b/src/mongo/db/pipeline/document_source_facet_test.cpp index 3b278cdb53dd3..22a338c299330 100644 --- a/src/mongo/db/pipeline/document_source_facet_test.cpp +++ b/src/mongo/db/pipeline/document_source_facet_test.cpp @@ -27,25 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_facet.h" - +#include #include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source_facet.h" #include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/document_source_skip.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" +#include "mongo/db/tenant_id.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { using std::deque; @@ -116,8 +127,8 @@ TEST_F(DocumentSourceFacetTest, ShouldSucceedWhenNamespaceIsCollectionless) { auto ctx = getExpCtx(); auto spec = fromjson("{$facet: {a: [{$match: {}}]}}"); - ctx->ns = - NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(boost::none, "unittests")); + ctx->ns = NamespaceString::makeCollectionlessAggregateNSS( + DatabaseName::createDatabaseName_forTest(boost::none, "unittests")); ASSERT_TRUE(DocumentSourceFacet::createFromBson(spec.firstElement(), ctx).get()); } @@ -905,5 +916,165 @@ TEST_F(DocumentSourceFacetTest, ShouldSurfaceStrictestRequirementsOfEachConstrai ASSERT_FALSE( facetStage->constraints(Pipeline::SplitState::kUnsplit).isAllowedInLookupPipeline()); } + +TEST_F(DocumentSourceFacetTest, RedactsCorrectly) { + auto spec = fromjson(R"({ + $facet: { + a: [ + { $unwind: "$foo" }, + { $sortByCount: "$foo" } + ], + b: [ + { + $match: { + bar: { $exists: 1 } + } + }, + { + $bucket: { + groupBy: "$bar.foo", + boundaries: [0, 50, 100, 200], + output: { + z: { $sum : 1 } + } + } + } + ], + c: [ + { + $bucketAuto: { + groupBy: "$bar.baz", + buckets: 4 + } + } + ] + } + })"); + auto docSource = DocumentSourceFacet::createFromBson(spec.firstElement(), getExpCtx()); + + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$facet": { + "HASH": [ + { + "$unwind": { + "path": "$HASH" + } + }, + { + "$group": { + "_id": "$HASH", + "HASH": { + "$sum": "?number" + } + } + }, + { + "$sort": { + "HASH": -1 + } + } + ], + "HASH": [ + { + "$match": { + "HASH": { + "$exists": "?bool" + } + } + }, + { + "$group": { + "_id": { + "$switch": { + "branches": [ + { + "case": { + "$and": [ + { + "$gte": [ + "$HASH.HASH", + "?number" + ] + }, + { + "$lt": [ + "$HASH.HASH", + "?number" + ] + } + ] + }, + "then": "?number" + }, + { + "case": { + "$and": [ + { + "$gte": [ + "$HASH.HASH", + "?number" + ] + }, + { + "$lt": [ + "$HASH.HASH", + "?number" + ] + } + ] + }, + "then": "?number" + }, + { + "case": { + "$and": [ + { + "$gte": [ + "$HASH.HASH", + "?number" + ] + }, + { + "$lt": [ + "$HASH.HASH", + "?number" + ] + } + ] + }, + "then": "?number" + } + ] + } + }, + "HASH": { + "$sum": "?number" + } + } + }, + { + "$sort": { + "HASH<_id>": 1 + } + } + ], + "HASH": [ + { + "$bucketAuto": { + "groupBy": "$HASH.HASH", + "buckets": "?number", + "output": { + "HASH": { + "$sum": "?number" + } + } + } + } + ] + } + })", + redact(*docSource)); +} } // namespace } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_fill.cpp b/src/mongo/db/pipeline/document_source_fill.cpp index 768dfcae877c4..2f645f46f5ba1 100644 --- a/src/mongo/db/pipeline/document_source_fill.cpp +++ b/src/mongo/db/pipeline/document_source_fill.cpp @@ -29,16 +29,36 @@ #include "mongo/db/pipeline/document_source_fill.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/pipeline/document_source_add_fields.h" +#include "mongo/db/pipeline/document_source_fill_gen.h" #include "mongo/db/pipeline/document_source_set_window_fields.h" -#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" -#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/idl/idl_parser.h" #include "mongo/stdx/variant.h" #include "mongo/util/assert_util.h" -#include "mongo/util/overloaded_visitor.h" -#include +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_fill.h b/src/mongo/db/pipeline/document_source_fill.h index 4808c2d15ff8c..20ff8550fdb7f 100644 --- a/src/mongo/db/pipeline/document_source_fill.h +++ b/src/mongo/db/pipeline/document_source_fill.h @@ -29,6 +29,11 @@ #pragma once +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_fill_gen.h" #include "mongo/db/pipeline/expression_context.h" diff --git a/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup.cpp b/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup.cpp index bdf7ab5a6df3d..d23ecc805ece0 100644 --- a/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup.cpp +++ b/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup.cpp @@ -28,14 +28,51 @@ */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/commands/txn_cmds_gen.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source_find_and_modify_image_lookup.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/repl/apply_ops_command_info.h" +#include "mongo/db/repl/apply_ops_gen.h" #include "mongo/db/repl/image_collection_entry_gen.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -179,11 +216,11 @@ StageConstraints DocumentSourceFindAndModifyImageLookup::constraints( } Value DocumentSourceFindAndModifyImageLookup::serialize(SerializationOptions opts) const { - return Value(Document{ - {kStageName, - Value(Document{ - {kIncludeCommitTransactionTimestampFieldName, - _includeCommitTransactionTimestamp ? opts.serializeLiteralValue(true) : Value()}})}}); + return Value( + Document{{kStageName, + Value(Document{{kIncludeCommitTransactionTimestampFieldName, + _includeCommitTransactionTimestamp ? opts.serializeLiteral(true) + : Value()}})}}); } DepsTracker::State DocumentSourceFindAndModifyImageLookup::getDependencies( diff --git a/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup.h b/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup.h index 25837196b45e0..5d1206c73967f 100644 --- a/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup.h +++ b/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup.h @@ -29,7 +29,23 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup_test.cpp b/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup_test.cpp index df5624353a696..b5aac4dd98f42 100644 --- a/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup_test.cpp +++ b/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup_test.cpp @@ -27,26 +27,53 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/commands/txn_cmds_gen.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/matcher/matcher.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_find_and_modify_image_lookup.h" #include "mongo/db/pipeline/document_source_mock.h" -#include "mongo/db/pipeline/process_interface/stub_lookup_single_document_process_interface.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" #include "mongo/db/repl/apply_ops_command_info.h" #include "mongo/db/repl/image_collection_entry_gen.h" -#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/pipeline/document_source_geo_near.cpp b/src/mongo/db/pipeline/document_source_geo_near.cpp index 5e966e09cd557..aa03fe8569948 100644 --- a/src/mongo/db/pipeline/document_source_geo_near.cpp +++ b/src/mongo/db/pipeline/document_source_geo_near.cpp @@ -28,23 +28,46 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/geo/geoconstants.h" -#include "mongo/db/pipeline/document_source_add_fields.h" +#include "mongo/db/geo/geoparser.h" +#include "mongo/db/geo/shapes.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_geo.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/pipeline/document_source_geo_near.h" #include "mongo/db/pipeline/document_source_internal_compute_geo_near_distance.h" #include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" #include "mongo/db/pipeline/document_source_match.h" - -#include "mongo/db/exec/document_value/document.h" -#include "mongo/db/matcher/expression_geo.h" #include "mongo/db/pipeline/document_source_sort.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_dependencies.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/db/query/sort_pattern.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -70,7 +93,7 @@ Value DocumentSourceGeoNear::serialize(SerializationOptions opts) const { auto nearValue = [&]() -> Value { if (auto constGeometry = dynamic_cast(_nearGeometry.get()); constGeometry) { - return opts.serializeLiteralValue(constGeometry->getValue()); + return opts.serializeLiteral(constGeometry->getValue()); } else { return _nearGeometry->serialize(opts); } @@ -79,22 +102,22 @@ Value DocumentSourceGeoNear::serialize(SerializationOptions opts) const { result.setField("distanceField", Value(opts.serializeFieldPath(*distanceField))); if (maxDistance) { - result.setField("maxDistance", opts.serializeLiteralValue(*maxDistance)); + result.setField("maxDistance", opts.serializeLiteral(*maxDistance)); } if (minDistance) { - result.setField("minDistance", opts.serializeLiteralValue(*minDistance)); + result.setField("minDistance", opts.serializeLiteral(*minDistance)); } - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { + if (opts.transformIdentifiers || opts.literalPolicy != LiteralSerializationPolicy::kUnchanged) { auto matchExpr = uassertStatusOK(MatchExpressionParser::parse(query, pExpCtx)); result.setField("query", Value(matchExpr->serialize(opts))); } else { result.setField("query", Value(query)); } - result.setField("spherical", opts.serializeLiteralValue(spherical)); + result.setField("spherical", opts.serializeLiteral(spherical)); if (distanceMultiplier) { - result.setField("distanceMultiplier", opts.serializeLiteralValue(*distanceMultiplier)); + result.setField("distanceMultiplier", opts.serializeLiteral(*distanceMultiplier)); } if (includeLocs) @@ -333,6 +356,9 @@ bool DocumentSourceGeoNear::hasQuery() const { void DocumentSourceGeoNear::parseOptions(BSONObj options, const boost::intrusive_ptr& pCtx) { + + const std::string nearStr = "near"; + const std::string distanceFieldStr = "distanceField"; // First, check for explicitly-disallowed fields. // The old geoNear command used to accept a collation. We explicitly ban it here, since the @@ -352,73 +378,71 @@ void DocumentSourceGeoNear::parseOptions(BSONObj options, uassert(50856, "$geoNear no longer supports the 'start' argument.", !options["start"]); // The "near" and "distanceField" parameters are required. - uassert(5860400, "$geoNear requires a 'near' argument", options["near"]); - _nearGeometry = - Expression::parseOperand(pCtx.get(), options["near"], pCtx->variablesParseState); - - uassert(16606, - "$geoNear requires a 'distanceField' option as a String", - options["distanceField"].type() == String); - distanceField.reset(new FieldPath(options["distanceField"].str())); - - // The remaining fields are optional. - if (auto maxDistElem = options["maxDistance"]) { - uassert(ErrorCodes::TypeMismatch, - "maxDistance must be a number", - isNumericBSONType(maxDistElem.type())); - maxDistance = options["maxDistance"].numberDouble(); - uassert(ErrorCodes::BadValue, "maxDistance must be nonnegative", *maxDistance >= 0); - } - - if (auto minDistElem = options["minDistance"]) { - uassert(ErrorCodes::TypeMismatch, - "minDistance must be a number", - isNumericBSONType(minDistElem.type())); - minDistance = options["minDistance"].numberDouble(); - uassert(ErrorCodes::BadValue, "minDistance must be nonnegative", *minDistance >= 0); - } - - if (auto distMultElem = options["distanceMultiplier"]) { - uassert(ErrorCodes::TypeMismatch, - "distanceMultiplier must be a number", - isNumericBSONType(distMultElem.type())); - distanceMultiplier = options["distanceMultiplier"].numberDouble(); - uassert(ErrorCodes::BadValue, - "distanceMultiplier must be nonnegative", - *distanceMultiplier >= 0); - } - - if (auto queryElem = options["query"]) { - uassert(ErrorCodes::TypeMismatch, - "query must be an object", - queryElem.type() == BSONType::Object); - query = queryElem.embeddedObject().getOwned(); - } - - spherical = options["spherical"].trueValue(); - - if (options.hasField("includeLocs")) { - uassert(16607, - "$geoNear requires that 'includeLocs' option is a String", - options["includeLocs"].type() == String); - includeLocs = FieldPath(options["includeLocs"].str()); - } - - if (options.hasField("uniqueDocs")) - LOGV2_WARNING(23758, "ignoring deprecated uniqueDocs option in $geoNear aggregation stage"); - - if (auto keyElt = options[kKeyFieldName]) { - uassert(ErrorCodes::TypeMismatch, - str::stream() << "$geoNear parameter '" << DocumentSourceGeoNear::kKeyFieldName - << "' must be of type string but found type: " - << typeName(keyElt.type()), - keyElt.type() == BSONType::String); - const auto keyFieldStr = keyElt.valueStringData(); - uassert(ErrorCodes::BadValue, - str::stream() << "$geoNear parameter '" << DocumentSourceGeoNear::kKeyFieldName - << "' cannot be the empty string", - !keyFieldStr.empty()); - keyFieldPath = FieldPath(keyFieldStr); + uassert(5860400, "$geoNear requires a 'near' argument", options[nearStr]); + uassert(25278, "$geoNear requires a 'distanceField' argument", options[distanceFieldStr]); + + // go through all the fields + for (auto&& argument : options) { + const auto argName = argument.fieldNameStringData(); + if (argName == nearStr) { + _nearGeometry = + Expression::parseOperand(pCtx.get(), argument, pCtx->variablesParseState); + } else if (argName == distanceFieldStr) { + uassert(16606, + "$geoNear requires a 'distanceField' option as a String", + argument.type() == String); + distanceField.reset(new FieldPath(argument.str())); + } else if (argName == "maxDistance") { + uassert(ErrorCodes::TypeMismatch, + "maxDistance must be a number", + isNumericBSONType(argument.type())); + maxDistance = argument.numberDouble(); + uassert(ErrorCodes::BadValue, "maxDistance must be nonnegative", *maxDistance >= 0); + } else if (argName == "minDistance") { + uassert(ErrorCodes::TypeMismatch, + "minDistance must be a number", + isNumericBSONType(argument.type())); + minDistance = argument.numberDouble(); + uassert(ErrorCodes::BadValue, "minDistance must be nonnegative", *minDistance >= 0); + } else if (argName == "distanceMultiplier") { + uassert(ErrorCodes::TypeMismatch, + "distanceMultiplier must be a number", + isNumericBSONType(argument.type())); + distanceMultiplier = argument.numberDouble(); + uassert(ErrorCodes::BadValue, + "distanceMultiplier must be nonnegative", + *distanceMultiplier >= 0); + } else if (argName == "query") { + uassert(ErrorCodes::TypeMismatch, + "query must be an object", + argument.type() == BSONType::Object); + query = argument.embeddedObject().getOwned(); + } else if (argName == "spherical") { + spherical = argument.trueValue(); + } else if (argName == "includeLocs") { + uassert(16607, + "$geoNear requires that 'includeLocs' option is a String", + argument.type() == String); + includeLocs = FieldPath(argument.str()); + } else if (argName == "uniqueDocs") { + LOGV2_WARNING(23758, + "ignoring deprecated uniqueDocs option in $geoNear aggregation stage"); + } else if (argName == kKeyFieldName) { + uassert(ErrorCodes::TypeMismatch, + str::stream() << "$geoNear parameter '" << DocumentSourceGeoNear::kKeyFieldName + << "' must be of type string but found type: " + << typeName(argument.type()), + argument.type() == BSONType::String); + const auto keyFieldStr = argument.valueStringData(); + uassert(ErrorCodes::BadValue, + str::stream() << "$geoNear parameter '" << DocumentSourceGeoNear::kKeyFieldName + << "' cannot be the empty string", + !keyFieldStr.empty()); + keyFieldPath = FieldPath(keyFieldStr); + } else { + uasserted(ErrorCodes::BadValue, + str::stream() << "Unknown argument to $geoNear: " << argument.fieldName()); + } } } diff --git a/src/mongo/db/pipeline/document_source_geo_near.h b/src/mongo/db/pipeline/document_source_geo_near.h index 44ba7ccd635cf..2fbccce9766c2 100644 --- a/src/mongo/db/pipeline/document_source_geo_near.h +++ b/src/mongo/db/pipeline/document_source_geo_near.h @@ -29,8 +29,30 @@ #pragma once +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_geo_near_cursor.cpp b/src/mongo/db/pipeline/document_source_geo_near_cursor.cpp index 8de9f300ab927..4bb3df0639105 100644 --- a/src/mongo/db/pipeline/document_source_geo_near_cursor.cpp +++ b/src/mongo/db/pipeline/document_source_geo_near_cursor.cpp @@ -27,26 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_geo_near_cursor.h" - -#include -#include -#include +#include #include +#include + +#include +#include +#include #include "mongo/base/string_data.h" -#include "mongo/bson/bsonelement.h" -#include "mongo/bson/bsonobj.h" -#include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/db/catalog/collection.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source_cursor.h" -#include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/document_source_geo_near_cursor.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_geo_near_cursor.h b/src/mongo/db/pipeline/document_source_geo_near_cursor.h index c071cf9dd4c09..1c9e2b79ae5ac 100644 --- a/src/mongo/db/pipeline/document_source_geo_near_cursor.h +++ b/src/mongo/db/pipeline/document_source_geo_near_cursor.h @@ -30,7 +30,11 @@ #pragma once #include +#include +#include #include +#include +#include #include #include diff --git a/src/mongo/db/pipeline/document_source_geo_near_test.cpp b/src/mongo/db/pipeline/document_source_geo_near_test.cpp index 714f98c190868..1f8b8d506374c 100644 --- a/src/mongo/db/pipeline/document_source_geo_near_test.cpp +++ b/src/mongo/db/pipeline/document_source_geo_near_test.cpp @@ -27,17 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/bson/bsonmisc.h" +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_geo_near.h" -#include "mongo/db/pipeline/document_source_limit.h" -#include "mongo/db/pipeline/pipeline.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -130,18 +132,16 @@ TEST_F(DocumentSourceGeoNearTest, RedactionWithGeoJSONPoint) { ASSERT_BSONOBJ_EQ_AUTO( // NOLINT R"({ "$geoNear": { - "near": { - "$const": "?" - }, + "near": "?object", "distanceField": "HASH", - "maxDistance": "?", - "minDistance": "?", + "maxDistance": "?number", + "minDistance": "?number", "query": { "HASH": { - "$eq": "?" + "$eq": "?string" } }, - "spherical": "?" + "spherical": "?bool" } })", redact(*docSource)); @@ -162,13 +162,11 @@ TEST_F(DocumentSourceGeoNearTest, RedactionWithGeoJSONLineString) { ASSERT_BSONOBJ_EQ_AUTO( // NOLINT R"({ "$geoNear": { - "near": { - "$const": "?" - }, + "near": "?object", "distanceField": "HASH", - "minDistance": "?", + "minDistance": "?number", "query": {}, - "spherical": "?" + "spherical": "?bool" } })", redact(*docSource)); @@ -193,22 +191,30 @@ TEST_F(DocumentSourceGeoNearTest, RedactionWithLegacyCoordinates) { R"({ "$geoNear": { "key": "HASH", - "near": { - "$const": "?" - }, + "near": "?array", "distanceField": "HASH", "query": { "HASH": { - "$gt": "?" + "$gt": "?number" } }, - "spherical": "?", - "distanceMultiplier": "?", + "spherical": "?bool", + "distanceMultiplier": "?number", "includeLocs": "HASH.HASH" } })", redact(*docSource)); } +TEST_F(DocumentSourceGeoNearTest, FailToParseIfUnkownArg) { + auto stageObj = fromjson( + "{$geoNear: {near: {type: 'Point', coordinates: [0, 0]}, distanceField: 'distanceField', " + "spherical: true, blah: 'blaarghhh'}}"); + ASSERT_THROWS_CODE(DocumentSourceGeoNear::createFromBson(stageObj.firstElement(), getExpCtx()), + AssertionException, + ErrorCodes::BadValue); +} + + } // namespace } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_graph_lookup.cpp b/src/mongo/db/pipeline/document_source_graph_lookup.cpp index 3ca22997078e4..5fd599983ed7a 100644 --- a/src/mongo/db/pipeline/document_source_graph_lookup.cpp +++ b/src/mongo/db/pipeline/document_source_graph_lookup.cpp @@ -27,28 +27,54 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_graph_lookup.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_comparator.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/pipeline/document_path_support.h" +#include "mongo/db/pipeline/document_source_graph_lookup.h" #include "mongo/db/pipeline/document_source_merge_gen.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" #include "mongo/db/pipeline/sort_reorder_helpers.h" -#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/allowed_contexts.h" #include "mongo/db/stats/counters.h" #include "mongo/db/views/resolved_view.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -74,7 +100,8 @@ NamespaceString parseGraphLookupFromAndResolveNamespace(const BSONElement& elem, NamespaceString fromNss( NamespaceStringUtil::parseNamespaceFromRequest(defaultDb, elem.valueStringData())); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "invalid $graphLookup namespace: " << fromNss.ns(), + str::stream() << "invalid $graphLookup namespace: " + << fromNss.toStringForErrorMsg(), fromNss.isValid()); return fromNss; } @@ -323,7 +350,7 @@ void DocumentSourceGraphLookUp::doBreadthFirstSearch() { while (auto next = pipeline->getNext()) { uassert(40271, str::stream() - << "Documents in the '" << _from.ns() + << "Documents in the '" << _from.toStringForErrorMsg() << "' namespace must contain an _id for de-duplication in $graphLookup", !(*next)["_id"].missing()); @@ -583,50 +610,56 @@ void DocumentSourceGraphLookUp::checkMemoryUsage() { void DocumentSourceGraphLookUp::serializeToArray(std::vector& array, SerializationOptions opts) const { - auto explain = opts.verbosity; - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484344); - } - // Do not include tenantId in serialized 'from' namespace. auto fromValue = (pExpCtx->ns.db() == _from.db()) - ? Value(_from.coll()) - : Value(Document{{"db", _from.dbName().db()}, {"coll", _from.coll()}}); + ? Value(opts.serializeIdentifier(_from.coll())) + : Value(Document{ + {"db", opts.serializeIdentifier(_from.dbName().serializeWithoutTenantPrefix())}, + {"coll", opts.serializeIdentifier(_from.coll())}}); // Serialize default options. - MutableDocument spec(DOC("from" << fromValue << "as" << _as.fullPath() << "connectToField" - << _connectToField.fullPath() << "connectFromField" - << _connectFromField.fullPath() << "startWith" - << _startWith->serialize(false))); + MutableDocument spec(DOC("from" << fromValue << "as" << opts.serializeFieldPath(_as) + << "connectToField" << opts.serializeFieldPath(_connectToField) + << "connectFromField" + << opts.serializeFieldPath(_connectFromField) << "startWith" + << _startWith->serialize(opts))); // depthField is optional; serialize it if it was specified. if (_depthField) { - spec["depthField"] = Value(_depthField->fullPath()); + spec["depthField"] = Value(opts.serializeFieldPath(*_depthField)); } if (_maxDepth) { - spec["maxDepth"] = Value(*_maxDepth); + spec["maxDepth"] = Value(opts.serializeLiteral(*_maxDepth)); } if (_additionalFilter) { - spec["restrictSearchWithMatch"] = Value(*_additionalFilter); + if (opts.transformIdentifiers || + opts.literalPolicy != LiteralSerializationPolicy::kUnchanged) { + auto matchExpr = + uassertStatusOK(MatchExpressionParser::parse(*_additionalFilter, pExpCtx)); + spec["restrictSearchWithMatch"] = Value(matchExpr->serialize(opts)); + } else { + spec["restrictSearchWithMatch"] = Value(*_additionalFilter); + } } // If we are explaining, include an absorbed $unwind inside the $graphLookup specification. - if (_unwind && explain) { + if (_unwind && opts.verbosity) { const boost::optional indexPath = (*_unwind)->indexPath(); spec["unwinding"] = Value(DOC("preserveNullAndEmptyArrays" - << (*_unwind)->preserveNullAndEmptyArrays() << "includeArrayIndex" - << (indexPath ? Value((*indexPath).fullPath()) : Value()))); + << opts.serializeLiteral((*_unwind)->preserveNullAndEmptyArrays()) + << "includeArrayIndex" + << (indexPath ? Value(opts.serializeFieldPath(*indexPath)) : Value()))); } array.push_back(Value(DOC(getSourceName() << spec.freeze()))); // If we are not explaining, the output of this method must be parseable, so serialize our // $unwind into a separate stage. - if (_unwind && !explain) { - (*_unwind)->serializeToArray(array); + if (_unwind && !opts.verbosity) { + (*_unwind)->serializeToArray(array, opts); } } diff --git a/src/mongo/db/pipeline/document_source_graph_lookup.h b/src/mongo/db/pipeline/document_source_graph_lookup.h index 64165cad00844..5c338ad154469 100644 --- a/src/mongo/db/pipeline/document_source_graph_lookup.h +++ b/src/mongo/db/pipeline/document_source_graph_lookup.h @@ -29,12 +29,49 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_comparator.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/match_expression_dependencies.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_unwind.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" #include "mongo/db/pipeline/lookup_set_cache.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -52,9 +89,15 @@ class DocumentSourceGraphLookUp final : public DocumentSource { const BSONElement& spec); - bool allowShardedForeignCollection(NamespaceString nss, - bool inMultiDocumentTransaction) const override { - return !inMultiDocumentTransaction || _foreignNss != nss; + Status checkShardedForeignCollAllowed(NamespaceString nss, + bool inMultiDocumentTransaction) const override { + if (!inMultiDocumentTransaction || _foreignNss != nss) { + return Status::OK(); + } + + return Status( + ErrorCodes::NamespaceCannotBeSharded, + "Sharded $graphLookup is not allowed within a multi-document transaction"); } PrivilegeVector requiredPrivileges(bool isMongos, bool bypassDocumentValidation) const { diff --git a/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp b/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp index 39a826a978adf..d36df9b69b1bf 100644 --- a/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp +++ b/src/mongo/db/pipeline/document_source_graph_lookup_test.cpp @@ -27,23 +27,40 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include #include +#include + +#include +#include +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_graph_lookup.h" #include "mongo/db/pipeline/document_source_mock.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" +#include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" #include "mongo/db/stats/counters.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -782,6 +799,129 @@ TEST_F(DocumentSourceGraphLookUpTest, IncrementNestedAggregateOpCounterOnCreateB testOpCounter(NamespaceString::createNamespaceString_forTest("local", "testColl"), 0); } +TEST_F(DocumentSourceGraphLookUpTest, RedactionStartWithSingleField) { + NamespaceString graphLookupNs( + NamespaceString::createNamespaceString_forTest(getExpCtx()->ns.dbName(), "coll")); + getExpCtx()->setResolvedNamespaces(StringMap{ + {graphLookupNs.coll().toString(), {graphLookupNs, std::vector()}}}); + + auto spec = fromjson(R"({ + "$graphLookup": { + "from": "coll", + "startWith": "$a.b", + "connectFromField": "c.d", + "connectToField": "e.f", + "as": "x", + "depthField": "y", + "maxDepth": 5, + "restrictSearchWithMatch": { + "foo": "abc", + "bar.baz": { "$gt": 5 } + } + } + })"); + auto docSource = DocumentSourceGraphLookUp::createFromBson(spec.firstElement(), getExpCtx()); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$graphLookup": { + "from": "HASH", + "as": "HASH", + "connectToField": "HASH.HASH", + "connectFromField": "HASH.HASH", + "startWith": "$HASH.HASH", + "depthField": "HASH", + "maxDepth": "?number", + "restrictSearchWithMatch": { + "$and": [ + { + "HASH": { + "$eq": "?string" + } + }, + { + "HASH.HASH": { + "$gt": "?number" + } + } + ] + } + } + })", + redact(*docSource)); +} + +TEST_F(DocumentSourceGraphLookUpTest, RedactionStartWithArrayOfFields) { + NamespaceString graphLookupNs( + NamespaceString::createNamespaceString_forTest(getExpCtx()->ns.dbName(), "coll")); + getExpCtx()->setResolvedNamespaces(StringMap{ + {graphLookupNs.coll().toString(), {graphLookupNs, std::vector()}}}); + + auto spec = fromjson(R"({ + $graphLookup: { + from: "coll", + startWith: ["$a.b", "$bar.baz"], + connectFromField: "x", + connectToField: "y", + as: "z" + } + })"); + auto docSource = DocumentSourceGraphLookUp::createFromBson(spec.firstElement(), getExpCtx()); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$graphLookup": { + "from": "HASH", + "as": "HASH", + "connectToField": "HASH", + "connectFromField": "HASH", + "startWith": ["$HASH.HASH", "$HASH.HASH"] + } + })", + redact(*docSource)); +} + +TEST_F(DocumentSourceGraphLookUpTest, RedactionWithAbsorbedUnwind) { + auto expCtx = getExpCtx(); + + NamespaceString graphLookupNs( + NamespaceString::createNamespaceString_forTest(expCtx->ns.dbName(), "coll")); + expCtx->setResolvedNamespaces(StringMap{ + {graphLookupNs.coll().toString(), {graphLookupNs, std::vector()}}}); + + auto unwindStage = DocumentSourceUnwind::create(expCtx, "results", false, boost::none); + auto graphLookupStage = DocumentSourceGraphLookUp::create( + getExpCtx(), + graphLookupNs, + "results", + "from", + "to", + ExpressionFieldPath::deprecatedCreate(expCtx.get(), "startPoint"), + boost::none, + boost::none, + boost::none, + unwindStage); + + auto serialized = redactToArray(*graphLookupStage); + ASSERT_EQ(2, serialized.size()); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$graphLookup": { + "from": "HASH", + "as": "HASH", + "connectToField": "HASH", + "connectFromField": "HASH", + "startWith": "$HASH" + } + })", + serialized[0].getDocument().toBson()); + + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$unwind": { + path: "$HASH" + } + })", + serialized[1].getDocument().toBson()); +} using DocumentSourceGraphLookupServerlessTest = ServerlessAggregationContextFixture; @@ -872,7 +1012,7 @@ TEST_F(DocumentSourceGraphLookupServerlessTest, flagStatus); NamespaceString nss = NamespaceString::createNamespaceString_forTest( - boost::none, expCtx->ns.dbName().toString(), _targetColl); + boost::none, expCtx->ns.dbName().toString_forTest(), _targetColl); auto liteParsedLookup = DocumentSourceGraphLookUp::LiteParsed::parse(nss, originalBSON.firstElement()); auto namespaceSet = liteParsedLookup->getInvolvedNamespaces(); diff --git a/src/mongo/db/pipeline/document_source_group.cpp b/src/mongo/db/pipeline/document_source_group.cpp index e5ad992f8c7a4..41c9d093086eb 100644 --- a/src/mongo/db/pipeline/document_source_group.cpp +++ b/src/mongo/db/pipeline/document_source_group.cpp @@ -27,22 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include +#include +#include +#include -#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/accumulation_statement.h" -#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" -#include "mongo/db/pipeline/expression_dependencies.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" -#include "mongo/db/stats/resource_consumption_metrics.h" -#include "mongo/util/destructor_guard.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -93,7 +94,7 @@ boost::intrusive_ptr DocumentSourceGroup::createFromBsonWithMaxM DocumentSource::GetNextResult DocumentSourceGroup::doGetNext() { if (!_groupsReady) { - const auto initializationResult = performBlockingGroup(); + auto initializationResult = performBlockingGroup(); if (initializationResult.isPaused()) { return initializationResult; } diff --git a/src/mongo/db/pipeline/document_source_group.h b/src/mongo/db/pipeline/document_source_group.h index a1f8e9b2b9a5c..fc713df1bc0e7 100644 --- a/src/mongo/db/pipeline/document_source_group.h +++ b/src/mongo/db/pipeline/document_source_group.h @@ -29,10 +29,21 @@ #pragma once +#include +#include +#include +#include #include #include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_group_base.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_group_base.cpp b/src/mongo/db/pipeline/document_source_group_base.cpp index 71c75eb00ac10..2392edd53c460 100644 --- a/src/mongo/db/pipeline/document_source_group_base.cpp +++ b/src/mongo/db/pipeline/document_source_group_base.cpp @@ -27,13 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/accumulation_statement.h" #include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/document_source_group.h" @@ -41,10 +56,15 @@ #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_dependencies.h" -#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/stats/counters.h" #include "mongo/db/stats/resource_consumption_metrics.h" -#include "mongo/util/destructor_guard.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -114,7 +134,7 @@ Value DocumentSourceGroupBase::serialize(SerializationOptions opts) const { } if (_doingMerge) { - insides["$doingMerge"] = opts.serializeLiteralValue(true); + insides["$doingMerge"] = opts.serializeLiteral(true); } serializeAdditionalFields(insides, opts); @@ -127,21 +147,21 @@ Value DocumentSourceGroupBase::serialize(SerializationOptions opts) const { for (size_t i = 0; i < _accumulatedFields.size(); i++) { md[opts.serializeFieldPathFromString(_accumulatedFields[i].fieldName)] = - opts.serializeLiteralValue(static_cast( + opts.serializeLiteral(static_cast( _memoryTracker[_accumulatedFields[i].fieldName].maxMemoryBytes())); } out["maxAccumulatorMemoryUsageBytes"] = Value(md.freezeToValue()); out["totalOutputDataSizeBytes"] = - opts.serializeLiteralValue(static_cast(_stats.totalOutputDataSizeBytes)); - out["usedDisk"] = opts.serializeLiteralValue(_stats.spills > 0); - out["spills"] = opts.serializeLiteralValue(static_cast(_stats.spills)); + opts.serializeLiteral(static_cast(_stats.totalOutputDataSizeBytes)); + out["usedDisk"] = opts.serializeLiteral(_stats.spills > 0); + out["spills"] = opts.serializeLiteral(static_cast(_stats.spills)); out["spilledDataStorageSize"] = - opts.serializeLiteralValue(static_cast(_stats.spilledDataStorageSize)); + opts.serializeLiteral(static_cast(_stats.spilledDataStorageSize)); out["numBytesSpilledEstimate"] = - opts.serializeLiteralValue(static_cast(_stats.numBytesSpilledEstimate)); + opts.serializeLiteral(static_cast(_stats.numBytesSpilledEstimate)); out["spilledRecords"] = - opts.serializeLiteralValue(static_cast(_stats.spilledRecords)); + opts.serializeLiteral(static_cast(_stats.spilledRecords)); } return out.freezeToValue(); @@ -512,7 +532,6 @@ void DocumentSourceGroupBase::processDocument(const Value& id, const Document& r vector>& group = (*_groups)[id]; const bool inserted = _groups->size() != oldSize; - vector oldAccumMemUsage(numAccumulators, 0); if (inserted) { _memoryTracker.set(_memoryTracker.currentMemoryBytes() + id.getApproximateSize()); @@ -548,10 +567,10 @@ void DocumentSourceGroupBase::processDocument(const Value& id, const Document& r if (kDebugBuild && !pExpCtx->opCtx->readOnly()) { // In debug mode, spill every time we have a duplicate id to stress merge logic. - if (!inserted && // is a dup - !pExpCtx->inMongos && // can't spill to disk in mongos - !_memoryTracker._allowDiskUse && // don't change behavior when testing external sort - _sortedFiles.size() < 20) { // don't open too many FDs + if (!inserted && // is a dup + !pExpCtx->inMongos && // can't spill to disk in mongos + _memoryTracker._allowDiskUse && // never spill when disk use is explicitly prohibited + _sortedFiles.size() < 20) { // don't open too many FDs spill(); } } @@ -575,7 +594,7 @@ void DocumentSourceGroupBase::readyGroups() { _currentAccumulators.push_back(accumulatedField.makeAccumulator()); } - verify(_sorterIterator->more()); // we put data in, we should get something out. + MONGO_verify(_sorterIterator->more()); // we put data in, we should get something out. _firstPartOfNextGroup = _sorterIterator->next(); } else { // start the group iterator diff --git a/src/mongo/db/pipeline/document_source_group_base.h b/src/mongo/db/pipeline/document_source_group_base.h index c32009178cb15..1fc34a2c06408 100644 --- a/src/mongo/db/pipeline/document_source_group_base.h +++ b/src/mongo/db/pipeline/document_source_group_base.h @@ -29,15 +29,41 @@ #pragma once +#include +#include +#include +#include +#include +#include #include +#include +#include +#include #include - +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/pipeline/accumulation_statement.h" #include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/group_from_first_document_transformation.h" #include "mongo/db/pipeline/memory_usage_tracker.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/db/sorter/sorter.h" +#include "mongo/db/sorter/sorter_stats.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_group_test.cpp b/src/mongo/db/pipeline/document_source_group_test.cpp index 889e68e9911d0..2ebd66abe300c 100644 --- a/src/mongo/db/pipeline/document_source_group_test.cpp +++ b/src/mongo/db/pipeline/document_source_group_test.cpp @@ -27,34 +27,56 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include #include +#include +#include #include #include #include +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" -#include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/document_source_streaming_group.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/query_test_service_context.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/stdx/unordered_set.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -254,18 +276,7 @@ TEST_F(DocumentSourceGroupTest, GroupRedactsCorrectWithIdNull) { })"); auto docSource = DocumentSourceGroup::createFromBson(spec.firstElement(), getExpCtx()); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "$group": { - "_id": { - "$const": "?" - }, - "HASH": { - "$sum": { - "$const": "?" - } - } - } - })", + R"({"$group":{"_id":"?null","HASH":{"$sum":"?number"}}})", redact(*docSource)); } @@ -1146,11 +1157,17 @@ class StreamingCanSpill final : public CheckResultsAndSpills { class StreamingAlternatingSpillAndNoSpillBatches : public CheckResultsAndSpills { public: StreamingAlternatingSpillAndNoSpillBatches() - : CheckResultsAndSpills(GroupStageType::Streaming, 3 /*expectedSpills*/) {} + : CheckResultsAndSpills(GroupStageType::Streaming, expectedSpills()) {} private: static constexpr int kCount = 12; + int expectedSpills() const { + // 'DocumentSourceGroup' has test-only behavior where it will spill more aggressively in + // debug builds. + return kDebugBuild ? kCount : 3; + } + deque inputData() final { deque queue; for (int i = 0; i < kCount; ++i) { diff --git a/src/mongo/db/pipeline/document_source_index_stats.cpp b/src/mongo/db/pipeline/document_source_index_stats.cpp index f730ff9eb0218..b29ef40e64edb 100644 --- a/src/mongo/db/pipeline/document_source_index_stats.cpp +++ b/src/mongo/db/pipeline/document_source_index_stats.cpp @@ -27,12 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/pipeline/document_source_index_stats.h" -#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/query/allowed_contexts.h" #include "mongo/db/server_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/net/socket_utils.h" namespace mongo { @@ -79,9 +85,6 @@ intrusive_ptr DocumentSourceIndexStats::createFromBson( } Value DocumentSourceIndexStats::serialize(SerializationOptions opts) const { - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484342); - } return Value(DOC(getSourceName() << Document())); } } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_index_stats.h b/src/mongo/db/pipeline/document_source_index_stats.h index 1c7f7525317be..1ca2ba9e916e0 100644 --- a/src/mongo/db/pipeline/document_source_index_stats.h +++ b/src/mongo/db/pipeline/document_source_index_stats.h @@ -29,8 +29,32 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/collection_index_usage_tracker.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_internal_all_collection_stats.cpp b/src/mongo/db/pipeline/document_source_internal_all_collection_stats.cpp index 5eb4e7625aae4..527a91bc84be7 100644 --- a/src/mongo/db/pipeline/document_source_internal_all_collection_stats.cpp +++ b/src/mongo/db/pipeline/document_source_internal_all_collection_stats.cpp @@ -28,7 +28,31 @@ */ #include "mongo/db/pipeline/document_source_internal_all_collection_stats.h" -#include "mongo/db/pipeline/document_source_project.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/document_source_coll_stats.h" +#include "mongo/db/pipeline/document_source_single_document_transformation.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/transformer_interface.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -129,12 +153,9 @@ Pipeline::SourceContainer::iterator DocumentSourceInternalAllCollectionStats::do void DocumentSourceInternalAllCollectionStats::serializeToArray(std::vector& array, SerializationOptions opts) const { auto explain = opts.verbosity; - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484341); - } if (explain) { BSONObjBuilder bob; - _internalAllCollectionStatsSpec.serialize(&bob); + _internalAllCollectionStatsSpec.serialize(&bob, opts); if (_absorbedMatch) { bob.append("match", _absorbedMatch->getQuery()); } @@ -170,9 +191,6 @@ const char* DocumentSourceInternalAllCollectionStats::getSourceName() const { } Value DocumentSourceInternalAllCollectionStats::serialize(SerializationOptions opts) const { - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484340); - } - return Value(Document{{getSourceName(), _internalAllCollectionStatsSpec.toBSON()}}); + return Value(Document{{getSourceName(), _internalAllCollectionStatsSpec.toBSON(opts)}}); } } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_internal_all_collection_stats.h b/src/mongo/db/pipeline/document_source_internal_all_collection_stats.h index 15d378b376c7d..57d1b60a64d6b 100644 --- a/src/mongo/db/pipeline/document_source_internal_all_collection_stats.h +++ b/src/mongo/db/pipeline/document_source_internal_all_collection_stats.h @@ -29,10 +29,38 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_coll_stats.h" #include "mongo/db/pipeline/document_source_internal_all_collection_stats_gen.h" #include "mongo/db/pipeline/document_source_match.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/tenant_id.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -58,11 +86,13 @@ class DocumentSourceInternalAllCollectionStats final : public DocumentSource { public: static std::unique_ptr parse(const NamespaceString& nss, const BSONElement& spec) { - return std::make_unique(spec.fieldName()); + return std::make_unique(nss.tenantId(), spec.fieldName()); } - explicit LiteParsed(std::string parseTimeName) - : LiteParsedDocumentSource(std::move(parseTimeName)) {} + explicit LiteParsed(const boost::optional& tenantId, std::string parseTimeName) + : LiteParsedDocumentSource(std::move(parseTimeName)), + _privileges({Privilege(ResourcePattern::forClusterResource(tenantId), + ActionType::allCollectionStats)}) {} stdx::unordered_set getInvolvedNamespaces() const final { return stdx::unordered_set(); @@ -70,13 +100,15 @@ class DocumentSourceInternalAllCollectionStats final : public DocumentSource { PrivilegeVector requiredPrivileges(bool isMongos, bool bypassDocumentValidation) const final { - return { - Privilege(ResourcePattern::forClusterResource(), ActionType::allCollectionStats)}; + return _privileges; } bool isInitialSource() const final { return true; } + + private: + const PrivilegeVector _privileges; }; const char* getSourceName() const final; diff --git a/src/mongo/db/pipeline/document_source_internal_all_collection_stats.idl b/src/mongo/db/pipeline/document_source_internal_all_collection_stats.idl index bb02e8b195408..247992a02ee6a 100644 --- a/src/mongo/db/pipeline/document_source_internal_all_collection_stats.idl +++ b/src/mongo/db/pipeline/document_source_internal_all_collection_stats.idl @@ -40,8 +40,10 @@ structs: DocumentSourceInternalAllCollectionStatsSpec: description: Specification for an $_internalAllCollectionStats stage. strict: true + query_shape_component: true fields: stats: description: Specification for a $collStats stage. type: DocumentSourceCollStatsSpec optional: true + query_shape: literal diff --git a/src/mongo/db/pipeline/document_source_internal_apply_oplog_update.cpp b/src/mongo/db/pipeline/document_source_internal_apply_oplog_update.cpp index 7dcec348219d2..3fd8d6c73894f 100644 --- a/src/mongo/db/pipeline/document_source_internal_apply_oplog_update.cpp +++ b/src/mongo/db/pipeline/document_source_internal_apply_oplog_update.cpp @@ -28,19 +28,24 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_internal_apply_oplog_update.h" - -#include #include +#include +#include -#include "mongo/db/exec/add_fields_projection_executor.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/mutable/document.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/field_ref_set.h" #include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/document_source_internal_apply_oplog_update.h" #include "mongo/db/pipeline/document_source_internal_apply_oplog_update_gen.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/allowed_contexts.h" #include "mongo/db/update/update_driver.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -99,7 +104,7 @@ DocumentSource::GetNextResult DocumentSourceInternalApplyOplogUpdate::doGetNext( Value DocumentSourceInternalApplyOplogUpdate::serialize(SerializationOptions opts) const { return Value(Document{ - {kStageName, Document{{kOplogUpdateFieldName, opts.serializeLiteralValue(_oplogUpdate)}}}}); + {kStageName, Document{{kOplogUpdateFieldName, opts.serializeLiteral(_oplogUpdate)}}}}); } } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_internal_apply_oplog_update.h b/src/mongo/db/pipeline/document_source_internal_apply_oplog_update.h index b4b2d370e5836..ee50141cd19b4 100644 --- a/src/mongo/db/pipeline/document_source_internal_apply_oplog_update.h +++ b/src/mongo/db/pipeline/document_source_internal_apply_oplog_update.h @@ -29,8 +29,22 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/db/update/update_driver.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_internal_apply_oplog_update_test.cpp b/src/mongo/db/pipeline/document_source_internal_apply_oplog_update_test.cpp index ef733d1f530c3..3a1c4803189eb 100644 --- a/src/mongo/db/pipeline/document_source_internal_apply_oplog_update_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_apply_oplog_update_test.cpp @@ -27,19 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_internal_apply_oplog_update.h" #include "mongo/db/pipeline/document_source_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -254,7 +260,7 @@ TEST_F(DocumentSourceInternalApplyOplogUpdateTest, RedactsCorrectly) { ASSERT_BSONOBJ_EQ_AUTO( // NOLINT R"({ "$_internalApplyOplogUpdate": { - "oplogUpdate": "?" + "oplogUpdate":"?object" } })", redact(*docSource)); diff --git a/src/mongo/db/pipeline/document_source_internal_compute_geo_near_distance.cpp b/src/mongo/db/pipeline/document_source_internal_compute_geo_near_distance.cpp index 65681cdca94f7..1eba7d2fb9a22 100644 --- a/src/mongo/db/pipeline/document_source_internal_compute_geo_near_distance.cpp +++ b/src/mongo/db/pipeline/document_source_internal_compute_geo_near_distance.cpp @@ -28,11 +28,22 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/geo/geometry_container.h" #include "mongo/db/geo/geoparser.h" #include "mongo/db/pipeline/document_source_internal_compute_geo_near_distance.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -144,13 +155,13 @@ DocumentSource::GetNextResult DocumentSourceInternalGeoNearDistance::doGetNext() Value DocumentSourceInternalGeoNearDistance::serialize(SerializationOptions opts) const { MutableDocument out; out.setField(DocumentSourceInternalGeoNearDistance::kNearFieldName, - opts.serializeLiteralValue(_coords)); + opts.serializeLiteral(_coords)); out.setField(DocumentSourceInternalGeoNearDistance::kKeyFieldName, Value(opts.serializeFieldPathFromString(_key))); out.setField(DocumentSourceInternalGeoNearDistance::kDistanceFieldFieldName, Value(opts.serializeFieldPath(_distanceField))); out.setField(DocumentSourceInternalGeoNearDistance::kDistanceMultiplierFieldName, - opts.serializeLiteralValue(_distanceMultiplier)); + opts.serializeLiteral(_distanceMultiplier)); return Value(DOC(getSourceName() << out.freeze())); } diff --git a/src/mongo/db/pipeline/document_source_internal_compute_geo_near_distance.h b/src/mongo/db/pipeline/document_source_internal_compute_geo_near_distance.h index 4514d54c24956..3e7fd179f6a00 100644 --- a/src/mongo/db/pipeline/document_source_internal_compute_geo_near_distance.h +++ b/src/mongo/db/pipeline/document_source_internal_compute_geo_near_distance.h @@ -29,8 +29,28 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/geo/geometry_container.h" +#include "mongo/db/geo/shapes.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_internal_compute_geo_near_distance_test.cpp b/src/mongo/db/pipeline/document_source_internal_compute_geo_near_distance_test.cpp index 1e2273acb4364..21699b634dad0 100644 --- a/src/mongo/db/pipeline/document_source_internal_compute_geo_near_distance_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_compute_geo_near_distance_test.cpp @@ -27,16 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_internal_compute_geo_near_distance.h" #include "mongo/db/pipeline/document_source_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -161,7 +163,14 @@ TEST_F(DocumentSourceInternalGeoNearDistanceTest, RedactsCorrectly) { computeGeoSpec.firstElement(), getExpCtx()); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({$_internalComputeGeoNearDistance: {near: "?", key: "HASH", distanceField: "HASH", distanceMultiplier: "?"}})", + R"({ + "$_internalComputeGeoNearDistance": { + "near": "?object", + "key": "HASH", + "distanceField": "HASH", + "distanceMultiplier": "?number" + } + })", redact(*geoDist, true)); } diff --git a/src/mongo/db/pipeline/document_source_internal_convert_bucket_index_stats.cpp b/src/mongo/db/pipeline/document_source_internal_convert_bucket_index_stats.cpp index 64c7913d81180..46745b4654aca 100644 --- a/src/mongo/db/pipeline/document_source_internal_convert_bucket_index_stats.cpp +++ b/src/mongo/db/pipeline/document_source_internal_convert_bucket_index_stats.cpp @@ -27,18 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/document_source_internal_convert_bucket_index_stats.h" +#include +#include +#include #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/list_indexes_gen.h" +#include "mongo/db/pipeline/document_source_internal_convert_bucket_index_stats.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/allowed_contexts.h" #include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_internal_convert_bucket_index_stats.h b/src/mongo/db/pipeline/document_source_internal_convert_bucket_index_stats.h index a8e60cd0ec99d..d1bef72069a5a 100644 --- a/src/mongo/db/pipeline/document_source_internal_convert_bucket_index_stats.h +++ b/src/mongo/db/pipeline/document_source_internal_convert_bucket_index_stats.h @@ -29,7 +29,23 @@ #pragma once +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_internal_convert_bucket_index_stats_test.cpp b/src/mongo/db/pipeline/document_source_internal_convert_bucket_index_stats_test.cpp index d0b1bddbe0b0f..97b4db69f1cd0 100644 --- a/src/mongo/db/pipeline/document_source_internal_convert_bucket_index_stats_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_convert_bucket_index_stats_test.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/db/exec/document_value/document_value_test_util.h" +#include + +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_internal_convert_bucket_index_stats.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp index 2e5f5093e3a0d..a1a417689bdad 100644 --- a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp +++ b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.cpp @@ -27,10 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/pipeline/document_source_internal_inhibit_optimization.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" + namespace mongo { REGISTER_DOCUMENT_SOURCE(_internalInhibitOptimization, diff --git a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h index 4454f929a829e..65f64ead634dc 100644 --- a/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h +++ b/src/mongo/db/pipeline/document_source_internal_inhibit_optimization.h @@ -29,7 +29,21 @@ #pragma once +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_internal_shard_filter.cpp b/src/mongo/db/pipeline/document_source_internal_shard_filter.cpp index 574d7365b25da..3884c91f4f391 100644 --- a/src/mongo/db/pipeline/document_source_internal_shard_filter.cpp +++ b/src/mongo/db/pipeline/document_source_internal_shard_filter.cpp @@ -28,12 +28,20 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/pipeline/document_source_internal_shard_filter.h" +#include #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/document_source_internal_shard_filter.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/pipeline/document_source_internal_shard_filter.h b/src/mongo/db/pipeline/document_source_internal_shard_filter.h index d05a486cbd003..20b4525aee6a9 100644 --- a/src/mongo/db/pipeline/document_source_internal_shard_filter.h +++ b/src/mongo/db/pipeline/document_source_internal_shard_filter.h @@ -29,8 +29,26 @@ #pragma once +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_internal_shard_filter_test.cpp b/src/mongo/db/pipeline/document_source_internal_shard_filter_test.cpp index 6678f43347006..4b1ba6acec46b 100644 --- a/src/mongo/db/pipeline/document_source_internal_shard_filter_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_shard_filter_test.cpp @@ -27,17 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/shard_filterer.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_internal_shard_filter.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/pipeline.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_internal_shardserver_info.cpp b/src/mongo/db/pipeline/document_source_internal_shardserver_info.cpp index d74eea0860319..2202cd9fb4bbe 100644 --- a/src/mongo/db/pipeline/document_source_internal_shardserver_info.cpp +++ b/src/mongo/db/pipeline/document_source_internal_shardserver_info.cpp @@ -27,10 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/pipeline/document_source_internal_shardserver_info.h" -#include "mongo/db/pipeline/pipeline.h" + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_internal_shardserver_info.h b/src/mongo/db/pipeline/document_source_internal_shardserver_info.h index 1e2d480a7fe46..8420774738e59 100644 --- a/src/mongo/db/pipeline/document_source_internal_shardserver_info.h +++ b/src/mongo/db/pipeline/document_source_internal_shardserver_info.h @@ -29,7 +29,29 @@ #pragma once +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp b/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp index 36ccc35bcc4aa..47a463475d29a 100644 --- a/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp +++ b/src/mongo/db/pipeline/document_source_internal_split_pipeline.cpp @@ -27,9 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/document_source_internal_split_pipeline.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_internal_split_pipeline.h b/src/mongo/db/pipeline/document_source_internal_split_pipeline.h index 53bf8885a8b81..0136c50b973ce 100644 --- a/src/mongo/db/pipeline/document_source_internal_split_pipeline.h +++ b/src/mongo/db/pipeline/document_source_internal_split_pipeline.h @@ -29,7 +29,24 @@ #pragma once +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_internal_split_pipeline_test.cpp b/src/mongo/db/pipeline/document_source_internal_split_pipeline_test.cpp index 4b52c29e052dc..9c506fe52ff72 100644 --- a/src/mongo/db/pipeline/document_source_internal_split_pipeline_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_split_pipeline_test.cpp @@ -27,16 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include -#include -#include - -#include "mongo/db/operation_context_noop.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_internal_split_pipeline.h" #include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket.cpp index d338b88bedbaf..362f012e17be3 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket.cpp +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket.cpp @@ -29,25 +29,43 @@ #include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include #include +#include +#include #include -#include +#include +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/matcher/expression.h" -#include "mongo/db/matcher/expression_algo.h" -#include "mongo/db/matcher/expression_expr.h" -#include "mongo/db/matcher/expression_geo.h" -#include "mongo/db/matcher/expression_internal_bucket_geo_within.h" -#include "mongo/db/matcher/expression_internal_expr_comparison.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/matcher/match_expression_dependencies.h" +#include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/accumulator_multi.h" +#include "mongo/db/pipeline/document_path_support.h" #include "mongo/db/pipeline/document_source_add_fields.h" #include "mongo/db/pipeline/document_source_geo_near.h" #include "mongo/db/pipeline/document_source_group.h" +#include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_project.h" #include "mongo/db/pipeline/document_source_sample.h" @@ -55,15 +73,21 @@ #include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/document_source_sort.h" #include "mongo/db/pipeline/document_source_streaming_group.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/monotonic_expression.h" +#include "mongo/db/pipeline/transformer_interface.h" +#include "mongo/db/query/allowed_contexts.h" #include "mongo/db/query/query_planner_common.h" -#include "mongo/db/query/util/make_data_structure.h" +#include "mongo/db/query/sort_pattern.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_options.h" -#include "mongo/logv2/log.h" -#include "mongo/util/duration.h" -#include "mongo/util/time_support.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -113,6 +137,18 @@ auto getIncludeExcludeProjectAndType(DocumentSource* src) { return std::pair{BSONObj{}, false}; } +/** + * Creates a new DocumentSourceSort by pulling out the logic for getting maxMemoryUsageBytes. + */ +boost::intrusive_ptr createNewSortWithMemoryUsage( + const DocumentSourceSort& sort, const SortPattern& pattern, long long limit) { + boost::optional maxMemoryUsageBytes; + if (auto sortStatsPtr = dynamic_cast(sort.getSpecificStats())) { + maxMemoryUsageBytes = sortStatsPtr->maxMemoryUsageBytes; + } + return DocumentSourceSort::create(sort.getContext(), pattern, limit, maxMemoryUsageBytes); +} + /** * Checks if a sort stage's pattern following our internal unpack bucket is suitable to be reordered * before us. The sort stage must refer exclusively to the meta field or any subfields. @@ -168,18 +204,8 @@ boost::intrusive_ptr createMetadataSortForReorder( : sort.getSortKeyPattern(); std::vector updatedPattern; - if (groupIdField) { - auto groupId = FieldPath(groupIdField.value()); - SortPattern::SortPatternPart patternPart; - patternPart.isAscending = !flipSort; - patternPart.fieldPath = groupId; - updatedPattern.push_back(patternPart); - } - - for (const auto& entry : sortPattern) { updatedPattern.push_back(entry); - if (lastpointTimeField && entry.fieldPath->fullPath() == lastpointTimeField.value()) { updatedPattern.back().fieldPath = FieldPath((entry.isAscending ? timeseries::kControlMinFieldNamePrefix @@ -199,14 +225,35 @@ boost::intrusive_ptr createMetadataSortForReorder( updatedPattern.back().fieldPath = updated; } } - - boost::optional maxMemoryUsageBytes; - if (auto sortStatsPtr = dynamic_cast(sort.getSpecificStats())) { - maxMemoryUsageBytes = sortStatsPtr->maxMemoryUsageBytes; + // After the modifications of the sortPattern are completed, for the lastPoint + // optimizations, the group field needs to be added to the beginning of the sort pattern. + // Do note that the modified sort pattern is for sorting within a group (within the bucket) + // and the plan is to do the grouping and sort within on go. + // If the group field is already in the sortPattern then it needs to moved to the first + // position. A flip in the later case is not necessary anymore as the sort order was + // already flipped. + // Example 1: $group: {a:1}, $sort{b: 1, a: -1} --> modifiedPattern: {a: -1, b: 1} + // Example 2: $group: {c:1}, $sort{d: -1, e: 1} --> modifiedPattern: {c: 1, d: -1, e: 1} + if (groupIdField) { + const auto groupId = FieldPath(groupIdField.value()); + SortPattern::SortPatternPart patternPart; + patternPart.fieldPath = groupId; + const auto pattern = + std::find_if(updatedPattern.begin(), + updatedPattern.end(), + [&groupId](const SortPattern::SortPatternPart& s) -> bool { + return s.fieldPath->fullPath().compare(groupId.fullPath()) == 0; + }); + if (pattern != updatedPattern.end()) { + patternPart.isAscending = pattern->isAscending; + updatedPattern.erase(pattern); + } else { + patternPart.isAscending = !flipSort; + } + updatedPattern.insert(updatedPattern.begin(), patternPart); } - return DocumentSourceSort::create( - sort.getContext(), SortPattern{updatedPattern}, 0, maxMemoryUsageBytes); + return createNewSortWithMemoryUsage(sort, SortPattern{std::move(updatedPattern)}, 0); } /** @@ -226,7 +273,7 @@ boost::intrusive_ptr createBucketGroupForReorder( expCtx.get(), field.firstElement(), expCtx->variablesParseState)); }; - return DocumentSourceGroup::create(expCtx, groupByExpr, accumulators); + return DocumentSourceGroup::create(expCtx, groupByExpr, std::move(accumulators)); } // Optimize the section of the pipeline before the $_internalUnpackBucket stage. @@ -507,17 +554,17 @@ void DocumentSourceInternalUnpackBucket::serializeToArray(std::vector& ar out.addField(timeseries::kMetaFieldName, Value{opts.serializeFieldPathFromString(*spec.metaField())}); } - out.addField(kBucketMaxSpanSeconds, opts.serializeLiteralValue(Value{_bucketMaxSpanSeconds})); + out.addField(kBucketMaxSpanSeconds, opts.serializeLiteral(Value{_bucketMaxSpanSeconds})); if (_assumeNoMixedSchemaData) out.addField(kAssumeNoMixedSchemaData, - opts.serializeLiteralValue(Value(_assumeNoMixedSchemaData))); + opts.serializeLiteral(Value(_assumeNoMixedSchemaData))); if (spec.usesExtendedRange()) { // Include this flag so that 'explain' is more helpful. // But this is not so useful for communicating from one process to another, // because mongos and/or the primary shard don't know whether any other shard // has extended-range data. - out.addField(kUsesExtendedRange, opts.serializeLiteralValue(Value{true})); + out.addField(kUsesExtendedRange, opts.serializeLiteral(Value{true})); } if (!spec.computedMetaProjFields().empty()) @@ -535,11 +582,11 @@ void DocumentSourceInternalUnpackBucket::serializeToArray(std::vector& ar if (_bucketUnpacker.includeMinTimeAsMetadata()) { out.addField(kIncludeMinTimeAsMetadata, - opts.serializeLiteralValue(Value{_bucketUnpacker.includeMinTimeAsMetadata()})); + opts.serializeLiteral(Value{_bucketUnpacker.includeMinTimeAsMetadata()})); } if (_bucketUnpacker.includeMaxTimeAsMetadata()) { out.addField(kIncludeMaxTimeAsMetadata, - opts.serializeLiteralValue(Value{_bucketUnpacker.includeMaxTimeAsMetadata()})); + opts.serializeLiteral(Value{_bucketUnpacker.includeMaxTimeAsMetadata()})); } if (_wholeBucketFilter) { @@ -558,8 +605,8 @@ void DocumentSourceInternalUnpackBucket::serializeToArray(std::vector& ar } else { if (_sampleSize) { out.addField("sample", - opts.serializeLiteralValue(Value{static_cast(*_sampleSize)})); - out.addField("bucketMaxCount", opts.serializeLiteralValue(Value{_bucketMaxCount})); + opts.serializeLiteral(Value{static_cast(*_sampleSize)})); + out.addField("bucketMaxCount", opts.serializeLiteral(Value{_bucketMaxCount})); } array.push_back(Value(DOC(getSourceName() << out.freeze()))); } @@ -724,7 +771,6 @@ BucketSpec::BucketPredicate DocumentSourceInternalUnpackBucket::createPredicates matchExpr, _bucketUnpacker.bucketSpec(), _bucketMaxSpanSeconds, - pExpCtx->collationMatchesDefault, pExpCtx, haveComputedMetaField(), _bucketUnpacker.includeMetaField(), @@ -747,6 +793,15 @@ std::pair DocumentSourceInternalUnpackBucket::extractProjectForPu std::pair DocumentSourceInternalUnpackBucket::rewriteGroupByMinMax(Pipeline::SourceContainer::iterator itr, Pipeline::SourceContainer* container) { + // The computed min/max for each bucket uses the default collation. If the collation of the + // query doesn't match the default we cannot rely on the computed values as they might differ + // (e.g. numeric and lexicographic collations compare "5" and "10" in opposite order). + // NB: Unfortuntealy, this means we have to forgo the optimization even if the source field is + // numeric and not affected by the collation as we cannot know the data type until runtime. + if (pExpCtx->collationMatchesDefault == ExpressionContext::CollationMatchesDefault::kNo) { + return {}; + } + const auto* groupPtr = dynamic_cast(std::next(itr)->get()); if (groupPtr == nullptr) { return {}; @@ -1208,9 +1263,10 @@ Pipeline::SourceContainer::iterator DocumentSourceInternalUnpackBucket::doOptimi // and return a pointer to the preceding stage. auto sortForReorder = createMetadataSortForReorder(*sortPtr); - // If the original sort had a limit, we will not preserve that in the swapped sort. - // Instead we will add a $limit to the end of the pipeline to keep the number of - // expected results. + // If the original sort had a limit that did not come from the limit value that we + // just added above, we will not preserve that limit in the swapped sort. Instead we + // will add a $limit to the end of the pipeline to keep the number of expected + // results. if (auto limit = sortPtr->getLimit(); limit && *limit != 0) { container->push_back(DocumentSourceLimit::create(pExpCtx, *limit)); } @@ -1302,6 +1358,21 @@ Pipeline::SourceContainer::iterator DocumentSourceInternalUnpackBucket::doOptimi return itr; } } + + // If the next stage is a limit, then push the limit above to avoid fetching more buckets than + // necessary. + // If _eventFilter is true, a match was present which may impact the number of + // documents we return from limit, hence we don't want to push limit. + // If _triedLimitPushDown is true, we have already done a limit push down and don't want to + // push again to avoid an infinite loop. + if (!_eventFilter && !_triedLimitPushDown) { + if (auto limitPtr = dynamic_cast(std::next(itr)->get()); limitPtr) { + _triedLimitPushDown = true; + container->insert(itr, DocumentSourceLimit::create(getContext(), limitPtr->getLimit())); + return container->begin(); + } + } + { // Check if we can avoid unpacking if we have a group stage with min/max aggregates. auto [success, result] = rewriteGroupByMinMax(itr, container); diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket.h b/src/mongo/db/pipeline/document_source_internal_unpack_bucket.h index 03627b3f7354a..a5b081c50bfd5 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket.h +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket.h @@ -29,15 +29,36 @@ #pragma once +#include +#include +#include +#include +#include #include +#include +#include #include -#include "mongo/db/exec/bucket_unpacker.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/collection_scan.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/index_scan.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/timeseries/bucket_spec.h" +#include "mongo/db/exec/timeseries/bucket_unpacker.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_match.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" namespace mongo { class DocumentSourceInternalUnpackBucket : public DocumentSource { @@ -297,5 +318,6 @@ class DocumentSourceInternalUnpackBucket : public DocumentSource { bool _optimizedEndOfPipeline = false; bool _triedInternalizeProject = false; bool _triedLastpointRewrite = false; + bool _triedLimitPushDown = false; }; } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/create_predicates_on_bucket_level_field_test.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/create_predicates_on_bucket_level_field_test.cpp index a054bee04dcf4..d177014ba4e16 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/create_predicates_on_bucket_level_field_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/create_predicates_on_bucket_level_field_test.cpp @@ -27,13 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/timeseries/bucket_spec.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_tree.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" #include "mongo/db/pipeline/document_source_match.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/extract_or_build_project_to_internalize_test.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/extract_or_build_project_to_internalize_test.cpp index 3bee8e7b5b0b2..f0f31c2fedd62 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/extract_or_build_project_to_internalize_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/extract_or_build_project_to_internalize_test.cpp @@ -27,14 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/extract_project_for_pushdown_test.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/extract_project_for_pushdown_test.cpp index 1a82b1eb31306..ca203bc30696a 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/extract_project_for_pushdown_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/extract_project_for_pushdown_test.cpp @@ -27,16 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" #include "mongo/db/pipeline/document_source_project.h" -#include "mongo/db/pipeline/pipeline.h" -#include "mongo/db/query/util/make_data_structure.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/group_reorder_test.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/group_reorder_test.cpp index 749c8c33a0242..b6ccd1ff67f5e 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/group_reorder_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/group_reorder_test.cpp @@ -27,10 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/internalize_project_test.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/internalize_project_test.cpp index c86ef45e44783..64ffae708d5fa 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/internalize_project_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/internalize_project_test.cpp @@ -27,13 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/limit_reorder_test.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/limit_reorder_test.cpp new file mode 100644 index 0000000000000..78ed3ccecbede --- /dev/null +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/limit_reorder_test.cpp @@ -0,0 +1,192 @@ +/** + * Copyright (C) 2020-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/util/make_data_structure.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" + +namespace mongo { +namespace { + +using InternalUnpackBucketLimitReorderTest = AggregationContextFixture; + +auto unpackSpecObj = fromjson(R"({ + $_internalUnpackBucket: { + exclude: [], timeField: 'foo', + metaField: 'meta1', + bucketMaxSpanSeconds: 3600 + } + })"); +auto limitObj2 = fromjson("{$limit: 2}"); +auto limitObj10 = fromjson("{$limit: 10}"); +auto limitObj5 = fromjson("{$limit: 5}"); +auto matchObj = fromjson("{$match: {'_id': 2}}"); +auto sortObj = fromjson("{$sort: {'meta1.a': 1, 'meta1.b': -1}}"); + +// Simple test to push limit down. +TEST_F(InternalUnpackBucketLimitReorderTest, OptimizeForOnlyLimit) { + auto pipeline = Pipeline::parse(makeVector(unpackSpecObj, limitObj2), getExpCtx()); + pipeline->optimizePipeline(); + + auto serialized = pipeline->serializeToBson(); + + // $limit is now before unpack bucket. + ASSERT_EQ(3, serialized.size()); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[0]); + ASSERT_BSONOBJ_EQ(unpackSpecObj, serialized[1]); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[2]); +} + +// Test that when there are multiple limits in a row, they are merged into one taking the smallest +// limit value ({$limit: 2} in this case) and pushed down. +TEST_F(InternalUnpackBucketLimitReorderTest, OptimizeForMultipleLimits) { + auto pipeline = + Pipeline::parse(makeVector(unpackSpecObj, limitObj10, limitObj2, limitObj5), getExpCtx()); + pipeline->optimizePipeline(); + + auto serialized = pipeline->serializeToBson(); + + ASSERT_EQ(3, serialized.size()); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[0]); + ASSERT_BSONOBJ_EQ(unpackSpecObj, serialized[1]); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[2]); +} + +// Test that the stages after $limit are also preserved. +TEST_F(InternalUnpackBucketLimitReorderTest, OptimizeForLimitWithMatch) { + auto pipeline = Pipeline::parse(makeVector(unpackSpecObj, limitObj2, matchObj), getExpCtx()); + pipeline->optimizePipeline(); + + auto serialized = pipeline->serializeToBson(); + + // $limit is before unpack bucket stage. + ASSERT_EQ(4, serialized.size()); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[0]); + ASSERT_BSONOBJ_EQ(unpackSpecObj, serialized[1]); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[2]); + ASSERT_BSONOBJ_EQ(fromjson("{$match: {'_id': 2}}"), serialized[3]); +} + +// Test that limit is not pushed down if it comes after match. +TEST_F(InternalUnpackBucketLimitReorderTest, NoOptimizeForMatchBeforeLimit) { + auto pipeline = Pipeline::parse(makeVector(unpackSpecObj, matchObj, limitObj2), getExpCtx()); + pipeline->optimizePipeline(); + + auto serialized = pipeline->serializeToBson(); + + // Using hasField rather than matching whole json to check that the stages are what we expect + // because the match push down changes the shape of the original $match and + // $_internalUnpackBucket. + ASSERT_EQ(3, serialized.size()); + ASSERT(serialized[0].hasField("$match")); + ASSERT(serialized[1].hasField("$_internalUnpackBucket")); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[2]); +} + +// Test that the sort that was pushed up absorbs the limit, while preserving the original limit. +TEST_F(InternalUnpackBucketLimitReorderTest, OptimizeForLimitWithSort) { + + auto pipeline = Pipeline::parse(makeVector(unpackSpecObj, sortObj, limitObj2), getExpCtx()); + pipeline->optimizePipeline(); + + auto serialized = pipeline->serializeToBson(); + auto container = pipeline->getSources(); + + // The following assertions ensure that the first limit is absorbed by the sort. The serialized + // array has 4 stages even though the first limit is absorbed by the sort, because + // serializeToArray adds a limit stage when the $sort has a $limit. + ASSERT_EQ(4, serialized.size()); + ASSERT_BSONOBJ_EQ(fromjson("{$sort: {'meta.a': 1, 'meta.b': -1}}"), serialized[0]); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[1]); + ASSERT_BSONOBJ_EQ(unpackSpecObj, serialized[2]); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[3]); + + ASSERT_EQ(3, container.size()); + auto firstSort = dynamic_cast(container.begin()->get()); + ASSERT(firstSort->hasLimit()); + ASSERT_EQ(2, *firstSort->getLimit()); +} + +// Test for sort with multiple limits in increasing limit values. +TEST_F(InternalUnpackBucketLimitReorderTest, OptimizeForLimitWithSortAndTwoLimitsIncreasing) { + auto pipeline = + Pipeline::parse(makeVector(unpackSpecObj, sortObj, limitObj5, limitObj10), getExpCtx()); + pipeline->optimizePipeline(); + + auto serialized = pipeline->serializeToBson(); + auto container = pipeline->getSources(); + + ASSERT_EQ(4, serialized.size()); + ASSERT_BSONOBJ_EQ(fromjson("{$sort: {'meta.a': 1, 'meta.b': -1}}"), serialized[0]); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 5}"), serialized[1]); + ASSERT_BSONOBJ_EQ(unpackSpecObj, serialized[2]); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 5}"), serialized[3]); + + ASSERT_EQ(3, container.size()); + auto firstSort = dynamic_cast(container.begin()->get()); + ASSERT(firstSort->hasLimit()); + ASSERT_EQ(5, *firstSort->getLimit()); +} + +// Test for sort with multiple limits in decreasing limit values. In this case, the last limit +// {$limit: 2} would eventually replace the {$limit: 10} after {$limit: 10} is pushed up. +TEST_F(InternalUnpackBucketLimitReorderTest, OptimizeForLimitWithSortAndTwoLimitsDecreasing) { + auto pipeline = + Pipeline::parse(makeVector(unpackSpecObj, sortObj, limitObj10, limitObj2), getExpCtx()); + pipeline->optimizePipeline(); + + auto serialized = pipeline->serializeToBson(); + auto container = pipeline->getSources(); + + ASSERT_EQ(4, serialized.size()); + ASSERT_BSONOBJ_EQ(fromjson("{$sort: {'meta.a': 1, 'meta.b': -1}}"), serialized[0]); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[1]); + ASSERT_BSONOBJ_EQ(unpackSpecObj, serialized[2]); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[3]); + + ASSERT_EQ(3, container.size()); + auto firstSort = dynamic_cast(container.begin()->get()); + ASSERT(firstSort->hasLimit()); + ASSERT_EQ(2, *firstSort->getLimit()); +} + +} // namespace +} // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/optimize_lastpoint_test.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/optimize_lastpoint_test.cpp index 77cfaadabde88..2c04a4da6e0c2 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/optimize_lastpoint_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/optimize_lastpoint_test.cpp @@ -27,12 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" -#include "mongo/db/query/util/make_data_structure.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/optimize_pipeline_test.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/optimize_pipeline_test.cpp index b008a8089d51d..530aaefb584ea 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/optimize_pipeline_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/optimize_pipeline_test.cpp @@ -27,14 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" -#include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/pushdown_computed_meta_projections_test.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/pushdown_computed_meta_projections_test.cpp index 3f34f8cc3450f..c5b7a6bae8004 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/pushdown_computed_meta_projections_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/pushdown_computed_meta_projections_test.cpp @@ -27,15 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/bson/unordered_fields_bsonobj_comparator.h" -#include "mongo/db/exec/document_value/document_value_test_util.h" +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" -#include "mongo/db/pipeline/document_source_mock.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/sample_reorder_test.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/sample_reorder_test.cpp index 0b26fce8604bb..d22333b018624 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/sample_reorder_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/sample_reorder_test.cpp @@ -27,11 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/sort_reorder_test.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/sort_reorder_test.cpp index 819edc3029ef3..f9d6a9cd96b8e 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/sort_reorder_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/sort_reorder_test.cpp @@ -27,10 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -85,14 +95,22 @@ TEST_F(InternalUnpackBucketSortReorderTest, OptimizeForMetaSortLimit) { pipeline->optimizePipeline(); auto serialized = pipeline->serializeToBson(); + auto container = pipeline->getSources(); - // $match and $sort are now before $_internalUnpackBucket, with a new $limit added after the - // stage. - ASSERT_EQ(4, serialized.size()); + // $match and $sort are now before $_internalUnpackBucket, with a new $limit added before and + // after the stage. + ASSERT_EQ(5, serialized.size()); ASSERT_BSONOBJ_EQ(fromjson("{$match: {meta: {$gt: 2}}}"), serialized[0]); ASSERT_BSONOBJ_EQ(fromjson("{$sort: {'meta.a': 1, 'meta.b': -1}}"), serialized[1]); - ASSERT_BSONOBJ_EQ(unpackSpecObj, serialized[2]); - ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[3]); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[2]); + ASSERT_BSONOBJ_EQ(unpackSpecObj, serialized[3]); + ASSERT_BSONOBJ_EQ(fromjson("{$limit: 2}"), serialized[4]); + + // The following assertions ensure that the first limit is absorbed by the sort. When we call + // serializeToArray on DocumentSourceSort, it tries to pull the limit out of sort as its own + // additional stage. The container from pipeline->getSources(), on the other hand, preserves the + // original pipeline with limit absorbed into sort. Therefore, there should only be 4 stages + ASSERT_EQ(4, container.size()); } } // namespace diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/split_match_on_meta_and_rename_test.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/split_match_on_meta_and_rename_test.cpp index 4ce5d558ac488..030f000ba58dc 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/split_match_on_meta_and_rename_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/split_match_on_meta_and_rename_test.cpp @@ -27,14 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" -#include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" -#include "mongo/db/pipeline/document_source_match.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/unpack_bucket_exec_test.cpp b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/unpack_bucket_exec_test.cpp index 8ca1f6ec75d3c..fbacee8252fef 100644 --- a/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/unpack_bucket_exec_test.cpp +++ b/src/mongo/db/pipeline/document_source_internal_unpack_bucket_test/unpack_bucket_exec_test.cpp @@ -27,11 +27,29 @@ * it in the license file. */ +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" #include "mongo/db/pipeline/document_source_mock.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -926,26 +944,17 @@ TEST_F(InternalUnpackBucketExecTest, ParserRoundtripsComputedMetaProjFieldOverri ASSERT_BSONOBJ_EQ(array[0].getDocument().toBson(), bson); } -std::string redactFieldNameForTest(StringData s) { - return str::stream() << "HASH<" << s << ">"; -} - TEST_F(InternalUnpackBucketExecTest, RedactsCorrectly) { auto bson = fromjson( "{$_internalUnpackBucket: {include: ['a', 'b', 'c'], timeField: 'time', metaField: 'meta', " "bucketMaxSpanSeconds: 3600, computedMetaProjFields: ['a', 'b', 'c']}}"); - auto array = std::vector{}; - SerializationOptions opts; - opts.identifierRedactionPolicy = redactFieldNameForTest; - opts.redactIdentifiers = true; - opts.replacementForLiteralArgs = "?"; - DocumentSourceInternalUnpackBucket::createFromBsonInternal(bson.firstElement(), getExpCtx()) - ->serializeToArray(array, opts); - ASSERT_VALUE_EQ_AUTO( // NOLINT + auto docSource = DocumentSourceInternalUnpackBucket::createFromBsonInternal(bson.firstElement(), + getExpCtx()); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT "{$_internalUnpackBucket: {include: [\"HASH\", \"HASH\", \"HASH\"], timeField: " - "\"HASH\", \"HASH\", \"HASH\"]}}", - array[0]); + redact(*docSource)); } } // namespace } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_limit.cpp b/src/mongo/db/pipeline/document_source_limit.cpp index bb3302b68d3a4..f3516be3d4141 100644 --- a/src/mongo/db/pipeline/document_source_limit.cpp +++ b/src/mongo/db/pipeline/document_source_limit.cpp @@ -27,16 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/pipeline/document_source_limit.h" +#include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { @@ -89,7 +95,7 @@ DocumentSource::GetNextResult DocumentSourceLimit::doGetNext() { } Value DocumentSourceLimit::serialize(SerializationOptions opts) const { - return Value(Document{{getSourceName(), opts.serializeLiteralValue(_limit)}}); + return Value(Document{{getSourceName(), opts.serializeLiteral(_limit)}}); } intrusive_ptr DocumentSourceLimit::create( diff --git a/src/mongo/db/pipeline/document_source_limit.h b/src/mongo/db/pipeline/document_source_limit.h index a080b832ee372..5a207f27db44c 100644 --- a/src/mongo/db/pipeline/document_source_limit.h +++ b/src/mongo/db/pipeline/document_source_limit.h @@ -29,7 +29,25 @@ #pragma once +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_limit_test.cpp b/src/mongo/db/pipeline/document_source_limit_test.cpp index e35ec2fd5ed78..807c6ccd13bce 100644 --- a/src/mongo/db/pipeline/document_source_limit_test.cpp +++ b/src/mongo/db/pipeline/document_source_limit_test.cpp @@ -27,10 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/dependencies.h" @@ -38,8 +44,11 @@ #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/document_source_project.h" +#include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/pipeline.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -163,13 +172,9 @@ TEST_F(DocumentSourceLimitTest, ShouldPropagatePauses) { TEST_F(DocumentSourceLimitTest, RedactsCorrectly) { auto limit = DocumentSourceLimit::create(getExpCtx(), 2); - SerializationOptions opts; - opts.replacementForLiteralArgs = "?"_sd; - std::vector vec; - limit->serializeToArray(vec, opts); ASSERT_VALUE_EQ_AUTO( // NOLINT - "{$limit: \"?\"}", - vec[0]); + "{ $limit: \"?number\" }", + redact(*limit)); } } // namespace diff --git a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp index 835a359809950..c12669a9f7c1e 100644 --- a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp +++ b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.cpp @@ -27,14 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/pipeline/document_source_list_cached_and_active_users.h" -#include "mongo/db/auth/authorization_session.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/user_name.h" -#include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h index d86611ac8745a..4f9c726094bbb 100644 --- a/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h +++ b/src/mongo/db/pipeline/document_source_list_cached_and_active_users.h @@ -29,11 +29,37 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/tenant_id.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { @@ -49,11 +75,13 @@ class DocumentSourceListCachedAndActiveUsers final : public DocumentSource { public: static std::unique_ptr parse(const NamespaceString& nss, const BSONElement& spec) { - return std::make_unique(spec.fieldName()); + return std::make_unique(spec.fieldName(), nss.tenantId()); } - explicit LiteParsed(std::string parseTimeName) - : LiteParsedDocumentSource(std::move(parseTimeName)) {} + explicit LiteParsed(std::string parseTimeName, const boost::optional& tenantId) + : LiteParsedDocumentSource(std::move(parseTimeName)), + _requiredPrivilege(Privilege(ResourcePattern::forAnyNormalResource(tenantId), + ActionType::listCachedAndActiveUsers)) {} stdx::unordered_set getInvolvedNamespaces() const final { return stdx::unordered_set(); @@ -61,8 +89,7 @@ class DocumentSourceListCachedAndActiveUsers final : public DocumentSource { PrivilegeVector requiredPrivileges(bool isMongos, bool bypassDocumentValidation) const final { - return {Privilege(ResourcePattern::forAnyNormalResource(), - ActionType::listCachedAndActiveUsers)}; + return {_requiredPrivilege}; } bool isInitialSource() const final { @@ -81,6 +108,9 @@ class DocumentSourceListCachedAndActiveUsers final : public DocumentSource { void assertSupportsMultiDocumentTransaction() const { transactionNotSupported(kStageName); } + + private: + const Privilege _requiredPrivilege; }; const char* getSourceName() const final { @@ -88,9 +118,6 @@ class DocumentSourceListCachedAndActiveUsers final : public DocumentSource { } Value serialize(SerializationOptions opts = SerializationOptions()) const final override { - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484330); - } return Value(Document{{getSourceName(), Document{}}}); } diff --git a/src/mongo/db/pipeline/document_source_list_catalog.cpp b/src/mongo/db/pipeline/document_source_list_catalog.cpp index 64ca0219e765e..0d9798e33814e 100644 --- a/src/mongo/db/pipeline/document_source_list_catalog.cpp +++ b/src/mongo/db/pipeline/document_source_list_catalog.cpp @@ -31,13 +31,25 @@ #include -#include "mongo/db/feature_compatibility_version_documentation.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/query/allowed_contexts.h" #include "mongo/db/server_options.h" #include "mongo/db/storage/storage_parameters_gen.h" -#include "mongo/util/version/releases.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -59,17 +71,19 @@ PrivilegeVector DocumentSourceListCatalog::LiteParsed::requiredPrivileges( // See builtin_roles.cpp. ActionSet listCollectionsAndIndexesActions{ActionType::listCollections, ActionType::listIndexes}; - return _ns.isCollectionlessAggregateNS() - ? PrivilegeVector{Privilege(ResourcePattern::forClusterResource(), - ActionType::listDatabases), - Privilege(ResourcePattern::forAnyNormalResource(), - listCollectionsAndIndexesActions), - Privilege(ResourcePattern::forCollectionName("system.js"), - listCollectionsAndIndexesActions), - Privilege(ResourcePattern::forAnySystemBuckets(), - listCollectionsAndIndexesActions)} - : PrivilegeVector{ - Privilege(ResourcePattern::forExactNamespace(_ns), listCollectionsAndIndexesActions)}; + if (_ns.isCollectionlessAggregateNS()) { + const auto& tenantId = _ns.tenantId(); + return {Privilege(ResourcePattern::forClusterResource(tenantId), ActionType::listDatabases), + Privilege(ResourcePattern::forAnyNormalResource(tenantId), + listCollectionsAndIndexesActions), + Privilege(ResourcePattern::forCollectionName(tenantId, "system.js"_sd), + listCollectionsAndIndexesActions), + Privilege(ResourcePattern::forAnySystemBuckets(tenantId), + listCollectionsAndIndexesActions)}; + } else { + return { + Privilege(ResourcePattern::forExactNamespace(_ns), listCollectionsAndIndexesActions)}; + } } DocumentSource::GetNextResult DocumentSourceListCatalog::doGetNext() { @@ -108,7 +122,7 @@ intrusive_ptr DocumentSourceListCatalog::createFromBson( uassert( ErrorCodes::InvalidNamespace, "Collectionless $listCatalog must be run against the 'admin' database with {aggregate: 1}", - nss.db() == DatabaseName::kAdmin.db() || !nss.isCollectionlessAggregateNS()); + nss.isAdminDB() || !nss.isCollectionlessAggregateNS()); uassert(ErrorCodes::QueryFeatureNotAllowed, fmt::format("The {} aggregation stage is not enabled", kStageName), diff --git a/src/mongo/db/pipeline/document_source_list_catalog.h b/src/mongo/db/pipeline/document_source_list_catalog.h index f42bce56a049a..74d8f0e6cd1ff 100644 --- a/src/mongo/db/pipeline/document_source_list_catalog.h +++ b/src/mongo/db/pipeline/document_source_list_catalog.h @@ -30,10 +30,30 @@ #pragma once #include - +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_list_local_sessions.cpp b/src/mongo/db/pipeline/document_source_list_local_sessions.cpp index 637171d2e7ef4..89657b13b48e5 100644 --- a/src/mongo/db/pipeline/document_source_list_local_sessions.cpp +++ b/src/mongo/db/pipeline/document_source_list_local_sessions.cpp @@ -27,13 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/auth/user_name.h" +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/document_source_list_local_sessions.h" #include "mongo/db/pipeline/document_source_list_sessions_gen.h" +#include "mongo/db/query/allowed_contexts.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { @@ -112,7 +130,8 @@ std::vector mongo::listSessionsUsersToDigests( return ret; } -mongo::PrivilegeVector mongo::listSessionsRequiredPrivileges(const ListSessionsSpec& spec) { +mongo::PrivilegeVector mongo::listSessionsRequiredPrivileges( + const ListSessionsSpec& spec, const boost::optional& tenantId) { const auto needsPrivs = ([spec]() { if (spec.getAllUsers()) { return true; @@ -128,7 +147,7 @@ mongo::PrivilegeVector mongo::listSessionsRequiredPrivileges(const ListSessionsS })(); if (needsPrivs) { - return {Privilege(ResourcePattern::forClusterResource(), ActionType::listSessions)}; + return {Privilege(ResourcePattern::forClusterResource(tenantId), ActionType::listSessions)}; } else { return PrivilegeVector(); } diff --git a/src/mongo/db/pipeline/document_source_list_local_sessions.h b/src/mongo/db/pipeline/document_source_list_local_sessions.h index 1244c2fa0ffa4..1ebfe947db822 100644 --- a/src/mongo/db/pipeline/document_source_list_local_sessions.h +++ b/src/mongo/db/pipeline/document_source_list_local_sessions.h @@ -29,20 +29,44 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/crypto/sha256_block.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_list_sessions_gen.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/session/logical_session_cache.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/tenant_id.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { ListSessionsSpec listSessionsParseSpec(StringData stageName, const BSONElement& spec); -PrivilegeVector listSessionsRequiredPrivileges(const ListSessionsSpec& spec); +PrivilegeVector listSessionsRequiredPrivileges(const ListSessionsSpec& spec, + const boost::optional& tenantId); std::vector listSessionsUsersToDigests(const std::vector& users); /** @@ -61,11 +85,16 @@ class DocumentSourceListLocalSessions final : public DocumentSource { return std::make_unique( spec.fieldName(), + nss.tenantId(), listSessionsParseSpec(DocumentSourceListLocalSessions::kStageName, spec)); } - explicit LiteParsed(std::string parseTimeName, const ListSessionsSpec& spec) - : LiteParsedDocumentSource(std::move(parseTimeName)), _spec(spec) {} + explicit LiteParsed(std::string parseTimeName, + const boost::optional& tenantId, + const ListSessionsSpec& spec) + : LiteParsedDocumentSource(std::move(parseTimeName)), + _spec(spec), + _privileges(listSessionsRequiredPrivileges(_spec, tenantId)) {} stdx::unordered_set getInvolvedNamespaces() const final { return stdx::unordered_set(); @@ -73,7 +102,7 @@ class DocumentSourceListLocalSessions final : public DocumentSource { PrivilegeVector requiredPrivileges(bool isMongos, bool bypassDocumentValidation) const final { - return listSessionsRequiredPrivileges(_spec); + return _privileges; } bool isInitialSource() const final { @@ -95,6 +124,7 @@ class DocumentSourceListLocalSessions final : public DocumentSource { private: const ListSessionsSpec _spec; + const PrivilegeVector _privileges; }; const char* getSourceName() const final { @@ -102,10 +132,7 @@ class DocumentSourceListLocalSessions final : public DocumentSource { } Value serialize(SerializationOptions opts = SerializationOptions()) const final override { - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484328); - } - return Value(Document{{getSourceName(), _spec.toBSON()}}); + return Value(Document{{getSourceName(), _spec.toBSON(opts)}}); } StageConstraints constraints(Pipeline::SplitState pipeState) const final { diff --git a/src/mongo/db/pipeline/document_source_list_sampled_queries.cpp b/src/mongo/db/pipeline/document_source_list_sampled_queries.cpp index d361eeddfe915..0d7fa60f3cff4 100644 --- a/src/mongo/db/pipeline/document_source_list_sampled_queries.cpp +++ b/src/mongo/db/pipeline/document_source_list_sampled_queries.cpp @@ -29,28 +29,41 @@ #include "mongo/db/pipeline/document_source_list_sampled_queries.h" -#include "mongo/db/dbdirectclient.h" +#include + +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" +#include "mongo/db/query/allowed_contexts.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/s/analyze_shard_key_documents_gen.h" -#include "mongo/s/analyze_shard_key_feature_flag_gen.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding namespace mongo { namespace analyze_shard_key { -REGISTER_DOCUMENT_SOURCE_WITH_FEATURE_FLAG(listSampledQueries, - DocumentSourceListSampledQueries::LiteParsed::parse, - DocumentSourceListSampledQueries::createFromBson, - AllowedWithApiStrict::kNeverInVersion1, - analyze_shard_key::gFeatureFlagAnalyzeShardKey); +REGISTER_DOCUMENT_SOURCE(listSampledQueries, + DocumentSourceListSampledQueries::LiteParsed::parse, + DocumentSourceListSampledQueries::createFromBson, + AllowedWithApiStrict::kNeverInVersion1); boost::intrusive_ptr DocumentSourceListSampledQueries::createFromBson( BSONElement specElem, const boost::intrusive_ptr& pExpCtx) { const NamespaceString& nss = pExpCtx->ns; uassert(ErrorCodes::InvalidNamespace, "$listSampledQueries must be run against the 'admin' database with {aggregate: 1}", - nss.db() == DatabaseName::kAdmin.db() && nss.isCollectionlessAggregateNS()); + nss.isAdminDB() && nss.isCollectionlessAggregateNS()); uassert(6876001, str::stream() << kStageName << " must take a nested object but found: " << specElem, specElem.type() == BSONType::Object); @@ -61,40 +74,54 @@ boost::intrusive_ptr DocumentSourceListSampledQueries::createFro } Value DocumentSourceListSampledQueries::serialize(SerializationOptions opts) const { - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(6876002); - } - - return Value(Document{{getSourceName(), _spec.toBSON()}}); + return Value(Document{{getSourceName(), _spec.toBSON(opts)}}); } DocumentSource::GetNextResult DocumentSourceListSampledQueries::doGetNext() { - if (_finished) { - return GetNextResult::makeEOF(); - } + if (_pipeline == nullptr) { + auto foreignExpCtx = pExpCtx->copyWith(NamespaceString::kConfigSampledQueriesNamespace); + MakePipelineOptions opts; + // For a sharded cluster, disallow shard targeting since we want to fetch the + // config.sampledQueries documents on this replica set not the ones on the config server. + opts.shardTargetingPolicy = ShardTargetingPolicy::kNotAllowed; - auto ns = _spec.getNamespace(); - if (_cursor == nullptr) { - FindCommandRequest findRequest{NamespaceString::kConfigSampledQueriesNamespace}; - if (ns) { - findRequest.setFilter(BSON(SampledQueryDocument::kNsFieldName << ns->toString())); + std::vector stages; + if (auto& nss = _spec.getNamespace()) { + stages.push_back(BSON("$match" << BSON(SampledQueryDocument::kNsFieldName + << NamespaceStringUtil::serialize(*nss)))); + } + try { + _pipeline = Pipeline::makePipeline(std::move(stages), foreignExpCtx, opts); + } catch (ExceptionFor& ex) { + LOGV2(7807800, + "Failed to create aggregation pipeline to list sampled queries", + "error"_attr = redact(ex.toStatus())); + return GetNextResult::makeEOF(); } - - DBDirectClient client(pExpCtx->opCtx); - _cursor = client.find(std::move(findRequest)); } - if (_cursor->more()) { - const auto obj = _cursor->next().getOwned(); - const auto doc = SampledQueryDocument::parse( - IDLParserContext(DocumentSourceListSampledQueries::kStageName), obj); + if (auto doc = _pipeline->getNext()) { + const auto queryDoc = SampledQueryDocument::parse( + IDLParserContext(DocumentSourceListSampledQueries::kStageName), doc->toBson()); DocumentSourceListSampledQueriesResponse response; - response.setSampledQueryDocument(doc); + response.setSampledQueryDocument(std::move(queryDoc)); return {Document(response.toBSON())}; } - _finished = true; + return GetNextResult::makeEOF(); } +void DocumentSourceListSampledQueries::detachFromOperationContext() { + if (_pipeline) { + _pipeline->detachFromOperationContext(); + } +} + +void DocumentSourceListSampledQueries::reattachToOperationContext(OperationContext* opCtx) { + if (_pipeline) { + _pipeline->reattachToOperationContext(opCtx); + } +} + } // namespace analyze_shard_key } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_list_sampled_queries.h b/src/mongo/db/pipeline/document_source_list_sampled_queries.h index 6dbdcf95d4635..617bd6a7c0b8c 100644 --- a/src/mongo/db/pipeline/document_source_list_sampled_queries.h +++ b/src/mongo/db/pipeline/document_source_list_sampled_queries.h @@ -29,13 +29,47 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_list_sampled_queries_gen.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" #include "mongo/s/analyze_shard_key_documents_gen.h" #include "mongo/s/analyze_shard_key_util.h" #include "mongo/s/is_mongos.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -61,9 +95,6 @@ class DocumentSourceListSampledQueries final : public DocumentSource { uassert(ErrorCodes::IllegalOperation, str::stream() << kStageName << " is not supported on a multitenant replica set", !gMultitenancySupport); - uassert(ErrorCodes::IllegalOperation, - str::stream() << kStageName << " is not supported on a configsvr mongod", - !serverGlobalParams.clusterRole.exclusivelyHasConfigRole()); auto spec = DocumentSourceListSampledQueriesSpec::parse(IDLParserContext(kStageName), specElem.embeddedObject()); @@ -76,12 +107,14 @@ class DocumentSourceListSampledQueries final : public DocumentSource { explicit LiteParsed(std::string parseTimeName, NamespaceString nss, DocumentSourceListSampledQueriesSpec spec) - : LiteParsedDocumentSource(std::move(parseTimeName)), _nss(std::move(nss)) {} + : LiteParsedDocumentSource(std::move(parseTimeName)), + _nss(std::move(nss)), + _privileges({Privilege(ResourcePattern::forClusterResource(_nss.tenantId()), + ActionType::listSampledQueries)}) {} PrivilegeVector requiredPrivileges(bool isMongos, bool bypassDocumentValidation) const override { - return { - Privilege(ResourcePattern::forClusterResource(), ActionType::listSampledQueries)}; + return _privileges; } stdx::unordered_set getInvolvedNamespaces() const override { @@ -98,6 +131,7 @@ class DocumentSourceListSampledQueries final : public DocumentSource { private: const NamespaceString _nss; + const PrivilegeVector _privileges; }; DocumentSourceListSampledQueries(const boost::intrusive_ptr& pExpCtx, @@ -137,6 +171,9 @@ class DocumentSourceListSampledQueries final : public DocumentSource { static boost::intrusive_ptr createFromBson( BSONElement elem, const boost::intrusive_ptr& pExpCtx); + void detachFromOperationContext() final; + void reattachToOperationContext(OperationContext* opCtx) final; + private: DocumentSourceListSampledQueries(const boost::intrusive_ptr& expCtx) : DocumentSource(kStageName, expCtx) {} @@ -144,8 +181,7 @@ class DocumentSourceListSampledQueries final : public DocumentSource { GetNextResult doGetNext() final; DocumentSourceListSampledQueriesSpec _spec; - bool _finished = false; - std::unique_ptr _cursor; + std::unique_ptr _pipeline; }; } // namespace analyze_shard_key diff --git a/src/mongo/db/pipeline/document_source_list_sampled_queries.idl b/src/mongo/db/pipeline/document_source_list_sampled_queries.idl index 2f9a6caf78df2..5f14e15927d0c 100644 --- a/src/mongo/db/pipeline/document_source_list_sampled_queries.idl +++ b/src/mongo/db/pipeline/document_source_list_sampled_queries.idl @@ -28,7 +28,7 @@ global: cpp_namespace: mongo::analyze_shard_key - + imports: - "mongo/db/basic_types.idl" - "mongo/s/analyze_shard_key_documents.idl" @@ -36,13 +36,15 @@ imports: structs: DocumentSourceListSampledQueriesSpec: description: Specification for a $listSampledQueries stage. - strict: false + strict: false + query_shape_component: true fields: namespace: description: The namespace to return sampled queries for. If unspecified, all sampled queries will be returned. type: namespacestring optional: true + query_shape: custom DocumentSourceListSampledQueriesResponse: description: The document that a $listSampledQueries stage outputs. diff --git a/src/mongo/db/pipeline/document_source_list_sessions.cpp b/src/mongo/db/pipeline/document_source_list_sessions.cpp index 2370d5d324a7b..ed3a3fc504818 100644 --- a/src/mongo/db/pipeline/document_source_list_sessions.cpp +++ b/src/mongo/db/pipeline/document_source_list_sessions.cpp @@ -27,14 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/crypto/hash_block.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/document_source_list_sessions.h" #include "mongo/db/pipeline/document_source_list_sessions_gen.h" -#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -49,7 +61,7 @@ boost::intrusive_ptr DocumentSourceListSessions::createFromBson( uassert(ErrorCodes::InvalidNamespace, str::stream() << kStageName << " may only be run against " - << NamespaceString::kLogicalSessionsNamespace.ns(), + << NamespaceString::kLogicalSessionsNamespace.toStringForErrorMsg(), nss == NamespaceString::kLogicalSessionsNamespace); const auto& spec = listSessionsParseSpec(kStageName, elem); @@ -75,14 +87,11 @@ boost::intrusive_ptr DocumentSourceListSessions::createFromBson( } Value DocumentSourceListSessions::serialize(SerializationOptions opts) const { - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484327); - } ListSessionsSpec spec; spec.setAllUsers(_allUsers); spec.setUsers(_users); spec.setPredicate(_predicate); - return Value(Document{{getSourceName(), spec.toBSON()}}); + return Value(Document{{getSourceName(), spec.toBSON(opts)}}); } } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_list_sessions.h b/src/mongo/db/pipeline/document_source_list_sessions.h index cc19525e9f202..05bc6dd34ce5b 100644 --- a/src/mongo/db/pipeline/document_source_list_sessions.h +++ b/src/mongo/db/pipeline/document_source_list_sessions.h @@ -29,12 +29,32 @@ #pragma once +#include +#include +#include +#include +#include #include +#include +#include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_list_local_sessions.h" +#include "mongo/db/pipeline/document_source_list_sessions_gen.h" #include "mongo/db/pipeline/document_source_match.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/tenant_id.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/util/intrusive_counter.h" namespace mongo { @@ -67,11 +87,16 @@ class DocumentSourceListSessions final : public DocumentSourceMatch { const BSONElement& spec) { return std::make_unique( spec.fieldName(), + nss.tenantId(), listSessionsParseSpec(DocumentSourceListSessions::kStageName, spec)); } - explicit LiteParsed(std::string parseTimeName, const ListSessionsSpec& spec) - : LiteParsedDocumentSource(std::move(parseTimeName)), _spec(spec) {} + explicit LiteParsed(std::string parseTimeName, + const boost::optional& tenantId, + const ListSessionsSpec& spec) + : LiteParsedDocumentSource(std::move(parseTimeName)), + _spec(spec), + _privileges(listSessionsRequiredPrivileges(_spec, tenantId)) {} stdx::unordered_set getInvolvedNamespaces() const final { return stdx::unordered_set(); @@ -79,7 +104,7 @@ class DocumentSourceListSessions final : public DocumentSourceMatch { PrivilegeVector requiredPrivileges(bool isMongos, bool bypassDocumentValidation) const final { - return listSessionsRequiredPrivileges(_spec); + return _privileges; } bool isInitialSource() const final { @@ -92,6 +117,7 @@ class DocumentSourceListSessions final : public DocumentSourceMatch { private: const ListSessionsSpec _spec; + const PrivilegeVector _privileges; }; const char* getSourceName() const final { diff --git a/src/mongo/db/pipeline/document_source_list_sessions.idl b/src/mongo/db/pipeline/document_source_list_sessions.idl index e919fbda2bd92..85e32a3764172 100644 --- a/src/mongo/db/pipeline/document_source_list_sessions.idl +++ b/src/mongo/db/pipeline/document_source_list_sessions.idl @@ -40,21 +40,31 @@ structs: description: "A struct representing a $listSessions/$listLocalSessions User" strict: true generate_comparison_operators: true + query_shape_component: true fields: - user: string - db: string + user: + type: string + query_shape: anonymize + db: + type: string + query_shape: anonymize ListSessionsSpec: description: "$listSessions and $listLocalSessions pipeline spec" strict: true + query_shape_component: true fields: allUsers: type: bool default: false + # This boolean parameterizes the stage rather than representing user input, so do not abstract the literal. + query_shape: parameter users: type: array optional: true + query_shape: literal $_internalPredicate: cpp_name: predicate type: object optional: true + query_shape: literal # This is a MatchExpression predicate and could be shape-ified rather than completely abstracted. diff --git a/src/mongo/db/pipeline/document_source_lookup.cpp b/src/mongo/db/pipeline/document_source_lookup.cpp index b34b88368e661..5e653ae52c861 100644 --- a/src/mongo/db/pipeline/document_source_lookup.cpp +++ b/src/mongo/db/pipeline/document_source_lookup.cpp @@ -29,30 +29,68 @@ #include "mongo/db/pipeline/document_source_lookup.h" -#include "mongo/base/init.h" +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog_shard_feature_flag_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_algo.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/pipeline/document_path_support.h" #include "mongo/db/pipeline/document_source_documents.h" #include "mongo/db/pipeline/document_source_merge_gen.h" -#include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/document_source_sequential_document_cache.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" #include "mongo/db/pipeline/sort_reorder_helpers.h" #include "mongo/db/pipeline/variable_validation.h" +#include "mongo/db/query/allowed_contexts.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/plan_summary_stats.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/stats/counters.h" #include "mongo/db/views/resolved_view.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/overflow_arithmetic.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -361,7 +399,7 @@ std::unique_ptr DocumentSourceLookUp::LitePars fromNss = parseLookupFromAndResolveNamespace(fromElement, nss.dbName()); } uassert(ErrorCodes::InvalidNamespace, - str::stream() << "invalid $lookup namespace: " << fromNss.ns(), + str::stream() << "invalid $lookup namespace: " << fromNss.toStringForErrorMsg(), fromNss.isValid()); // Recursively lite parse the nested pipeline, if one exists. @@ -449,16 +487,6 @@ StageConstraints DocumentSourceLookUp::constraints(Pipeline::SplitState pipeStat // This stage will only be on the shards pipeline if $lookup on sharded foreign collections // is allowed. hostRequirement = HostTypeRequirement::kAnyShard; - } else if (_fromNs == NamespaceString::kConfigsvrCollectionsNamespace && - // (Ignore FCV check): If the catalog shard feature flag is enabled, the config - // server should have the components necessary to handle a merge. Config servers are - // upgraded first and downgraded last, so if any server is running the latest binary, - // we can assume the conifg servers are too. - !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafe()) { - // This is an unsharded collection, but the primary shard would be the config server, and - // the config servers are not prepared to take queries. Instead, we'll merge on any of the - // other shards. - hostRequirement = HostTypeRequirement::kAnyShard; } else { // If the pipeline is unsplit or this stage is on the merging part of the pipeline, // when $lookup on sharded foreign collections is allowed, the foreign collection is @@ -571,7 +599,7 @@ std::unique_ptr DocumentSourceLookUp::buildPipelineFr // Resolve the view definition. auto pipeline = Pipeline::makePipelineFromViewDefinition( - _fromExpCtx, resolvedNamespace, serializedPipeline, opts); + _fromExpCtx, resolvedNamespace, std::move(serializedPipeline), opts); // Store the pipeline with resolved namespaces so that we only trigger this exception on the // first input document. @@ -671,7 +699,7 @@ std::unique_ptr DocumentSourceLookUp::buildPipeline( // This exception returns the information we need to resolve a sharded view. Update the // pipeline with the resolved view definition. pipeline = buildPipelineFromViewDefinition( - serializedPipeline, + std::move(serializedPipeline), ExpressionContext::ResolvedNamespace{e->getNamespace(), e->getPipeline()}); // The serialized pipeline does not have a cache stage, so we will add it back to the @@ -1024,47 +1052,62 @@ void DocumentSourceLookUp::appendSpecificExecStats(MutableDocument& doc) const { void DocumentSourceLookUp::serializeToArray(std::vector& array, SerializationOptions opts) const { - auto explain = opts.verbosity; - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484326); - } - // Support alternative $lookup from config.cache.chunks* namespaces. // // Do not include the tenantId in serialized 'from' namespace. auto fromValue = (pExpCtx->ns.db() == _fromNs.db()) - ? Value(_fromNs.coll()) - : Value(Document{{"db", _fromNs.dbName().db()}, {"coll", _fromNs.coll()}}); + ? Value(opts.serializeIdentifier(_fromNs.coll())) + : Value(Document{ + {"db", opts.serializeIdentifier(_fromNs.dbName().serializeWithoutTenantPrefix())}, + {"coll", opts.serializeIdentifier(_fromNs.coll())}}); - MutableDocument output( - Document{{getSourceName(), Document{{"from", fromValue}, {"as", _as.fullPath()}}}}); + MutableDocument output(Document{ + {getSourceName(), Document{{"from", fromValue}, {"as", opts.serializeFieldPath(_as)}}}}); if (hasLocalFieldForeignFieldJoin()) { - output[getSourceName()]["localField"] = Value(_localField->fullPath()); - output[getSourceName()]["foreignField"] = Value(_foreignField->fullPath()); + output[getSourceName()]["localField"] = Value(opts.serializeFieldPath(_localField.value())); + output[getSourceName()]["foreignField"] = + Value(opts.serializeFieldPath(_foreignField.value())); } // Add a pipeline field if only-pipeline syntax was used (to ensure the output is valid $lookup // syntax) or if a $match was absorbed. - auto pipeline = _userPipeline.get_value_or(std::vector()); + auto serializedPipeline = [&]() -> std::vector { + auto pipeline = _userPipeline.get_value_or(std::vector()); + if (opts.transformIdentifiers || + opts.literalPolicy != LiteralSerializationPolicy::kUnchanged) { + return Pipeline::parse(pipeline, _fromExpCtx)->serializeToBson(opts); + } + return pipeline; + }(); if (_additionalFilter) { - pipeline.emplace_back(BSON("$match" << *_additionalFilter)); + auto serializedFilter = [&]() -> BSONObj { + if (opts.transformIdentifiers || + opts.literalPolicy != LiteralSerializationPolicy::kUnchanged) { + auto filter = + uassertStatusOK(MatchExpressionParser::parse(*_additionalFilter, pExpCtx)); + return filter->serialize(opts); + } + return *_additionalFilter; + }(); + serializedPipeline.emplace_back(BSON("$match" << serializedFilter)); } - if (!hasLocalFieldForeignFieldJoin() || pipeline.size() > 0) { + if (!hasLocalFieldForeignFieldJoin() || serializedPipeline.size() > 0) { MutableDocument exprList; for (const auto& letVar : _letVariables) { - exprList.addField(letVar.name, letVar.expression->serialize(explain)); + exprList.addField(opts.serializeFieldPathFromString(letVar.name), + letVar.expression->serialize(opts)); } output[getSourceName()]["let"] = Value(exprList.freeze()); - output[getSourceName()]["pipeline"] = Value(pipeline); + output[getSourceName()]["pipeline"] = Value(serializedPipeline); } if (_hasExplicitCollation) { output[getSourceName()]["_internalCollation"] = Value(_fromExpCtx->getCollatorBSON()); } - if (explain) { + if (opts.verbosity) { if (_unwindSrc) { const boost::optional indexPath = _unwindSrc->indexPath(); output[getSourceName()]["unwinding"] = @@ -1073,7 +1116,7 @@ void DocumentSourceLookUp::serializeToArray(std::vector& array, << (indexPath ? Value(indexPath->fullPath()) : Value()))); } - if (explain.value() >= ExplainOptions::Verbosity::kExecStats) { + if (opts.verbosity.value() >= ExplainOptions::Verbosity::kExecStats) { appendSpecificExecStats(output); } @@ -1112,7 +1155,19 @@ DepsTracker::State DocumentSourceLookUp::getDependencies(DepsTracker* deps) cons } if (hasLocalFieldForeignFieldJoin()) { - deps->fields.insert(_localField->fullPath()); + const FieldRef ref(_localField->fullPath()); + // We need everything up until the first numeric component. Otherwise, a projection could + // treat the numeric component as a field name rather than an index into an array. + size_t firstNumericIx; + for (firstNumericIx = 0; firstNumericIx < ref.numParts(); firstNumericIx++) { + // We are lenient with the component, because classic $lookup treats 0-prefixed numeric + // fields like "00" as both an index and a field name. Allowing it in a dependency would + // restrict the usage to only a field name. + if (ref.isNumericPathComponentLenient(firstNumericIx)) { + break; + } + } + deps->fields.insert(ref.dottedSubstring(0, firstNumericIx).toString()); } // Purposely ignore '_matchSrc' and '_unwindSrc', since those should only be absorbed if we know diff --git a/src/mongo/db/pipeline/document_source_lookup.h b/src/mongo/db/pipeline/document_source_lookup.h index 8da14b230178a..be33614b767e0 100644 --- a/src/mongo/db/pipeline/document_source_lookup.h +++ b/src/mongo/db/pipeline/document_source_lookup.h @@ -29,16 +29,54 @@ #pragma once +#include +#include +#include #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_sequential_document_cache.h" #include "mongo/db/pipeline/document_source_unwind.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" #include "mongo/db/pipeline/lookup_set_cache.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/sequential_document_cache.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -80,13 +118,18 @@ class DocumentSourceLookUp final : public DocumentSource { /** * Lookup from a sharded collection may not be allowed. */ - bool allowShardedForeignCollection(NamespaceString nss, - bool inMultiDocumentTransaction) const override final { + Status checkShardedForeignCollAllowed( + NamespaceString nss, bool inMultiDocumentTransaction) const override final { if (!inMultiDocumentTransaction) { - return true; + return Status::OK(); } auto involvedNss = getInvolvedNamespaces(); - return (involvedNss.find(nss) == involvedNss.end()); + if (involvedNss.find(nss) == involvedNss.end()) { + return Status::OK(); + } + + return Status(ErrorCodes::NamespaceCannotBeSharded, + "Sharded $lookup is not allowed within a multi-document transaction"); } void assertPermittedInAPIVersion(const APIParameters& apiParameters) const final { diff --git a/src/mongo/db/pipeline/document_source_lookup_test.cpp b/src/mongo/db/pipeline/document_source_lookup_test.cpp index 23704cfdb1e14..a5d7493881e01 100644 --- a/src/mongo/db/pipeline/document_source_lookup_test.cpp +++ b/src/mongo/db/pipeline/document_source_lookup_test.cpp @@ -27,30 +27,54 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include #include +#include +#include +#include #include +#include +#include +#include +#include +#include + #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_lookup.h" #include "mongo/db/pipeline/document_source_mock.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" +#include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" -#include "mongo/db/server_options.h" #include "mongo/db/stats/counters.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo { namespace { @@ -1504,6 +1528,58 @@ TEST_F(DocumentSourceLookUpTest, IncrementNestedAggregateOpCounterOnCreateButNot testOpCounter(NamespaceString::createNamespaceString_forTest("local", "testColl"), 0); } +TEST_F(DocumentSourceLookUpTest, RedactsCorrectlyWithPipeline) { + auto expCtx = getExpCtx(); + auto fromNs = NamespaceString::createNamespaceString_forTest(expCtx->ns.dbName(), "coll"); + expCtx->setResolvedNamespaces(StringMap{ + {fromNs.coll().toString(), {fromNs, std::vector()}}}); + + BSONArrayBuilder pipeline; + pipeline << BSON("$match" << BSON("a" + << "myStr")); + pipeline << BSON("$project" << BSON("_id" << 0 << "a" << 1)); + auto docSource = DocumentSourceLookUp::createFromBson( + BSON("$lookup" << BSON("from" << fromNs.coll() << "localField" + << "foo" + << "foreignField" + << "bar" + << "let" + << BSON("var1" + << "$x") + << "pipeline" << pipeline.arr() << "as" + << "out")) + .firstElement(), + expCtx); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$lookup": { + "from": "HASH", + "as": "HASH", + "localField": "HASH", + "foreignField": "HASH", + "let": { + "HASH": "$HASH" + }, + "pipeline": [ + { + "$match": { + "HASH": { + "$eq": "?string" + } + } + }, + { + "$project": { + "HASH": true, + "HASH<_id>": false + } + } + ] + } + })", + redact(*docSource)); +} + using DocumentSourceLookUpServerlessTest = ServerlessAggregationContextFixture; TEST_F(DocumentSourceLookUpServerlessTest, @@ -1609,7 +1685,7 @@ TEST_F(DocumentSourceLookUpServerlessTest, << "lookup1")); NamespaceString nss = NamespaceString::createNamespaceString_forTest( - boost::none, expCtx->ns.dbName().toString(), _targetColl); + boost::none, expCtx->ns.dbName().toString_forTest(), _targetColl); for (bool flagStatus : {false, true}) { RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", diff --git a/src/mongo/db/pipeline/document_source_match.cpp b/src/mongo/db/pipeline/document_source_match.cpp index 83079a6629db7..a73a38beb6af9 100644 --- a/src/mongo/db/pipeline/document_source_match.cpp +++ b/src/mongo/db/pipeline/document_source_match.cpp @@ -27,17 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_match.h" - +#include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" #include +#include +#include +#include +#include #include +#include +#include + +#include +#include #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/matcher/expression_algo.h" #include "mongo/db/matcher/expression_array.h" #include "mongo/db/matcher/expression_leaf.h" @@ -45,11 +53,12 @@ #include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/matcher/match_expression_dependencies.h" #include "mongo/db/pipeline/document_path_support.h" -#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" #include "mongo/db/pipeline/semantic_analysis.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" #include "mongo/util/ctype.h" -#include "mongo/util/str.h" namespace mongo { @@ -69,7 +78,8 @@ const char* DocumentSourceMatch::getSourceName() const { } Value DocumentSourceMatch::serialize(SerializationOptions opts) const { - if (opts.verbosity || opts.redactIdentifiers || opts.replacementForLiteralArgs) { + if (opts.verbosity || opts.transformIdentifiers || + opts.literalPolicy != LiteralSerializationPolicy::kUnchanged) { return Value(DOC(getSourceName() << Document(_expression->serialize(opts)))); } return Value(DOC(getSourceName() << Document(getQuery()))); @@ -503,7 +513,7 @@ DocumentSourceMatch::splitMatchByModifiedFields( // This stage modifies all paths, so cannot be swapped with a $match at all. return {nullptr, match}; case DocumentSource::GetModPathsReturn::Type::kFiniteSet: - modifiedPaths = std::move(modifiedPathsRet.paths); + modifiedPaths = modifiedPathsRet.paths; break; case DocumentSource::GetModPathsReturn::Type::kAllExcept: { DepsTracker depsTracker; diff --git a/src/mongo/db/pipeline/document_source_match.h b/src/mongo/db/pipeline/document_source_match.h index 15682350f3646..dcaa0b002456a 100644 --- a/src/mongo/db/pipeline/document_source_match.h +++ b/src/mongo/db/pipeline/document_source_match.h @@ -29,15 +29,35 @@ #pragma once +#include +#include +#include +#include +#include #include +#include +#include #include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/connpool.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_algo.h" #include "mongo/db/matcher/matcher.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_match_test.cpp b/src/mongo/db/pipeline/document_source_match_test.cpp index e699537521dae..340680d2797fe 100644 --- a/src/mongo/db/pipeline/document_source_match_test.cpp +++ b/src/mongo/db/pipeline/document_source_match_test.cpp @@ -28,23 +28,37 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include +#include + +#include +#include "mongo/base/status.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/document_source_project.h" +#include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/explain_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -724,12 +738,12 @@ TEST_F(DocumentSourceMatchTest, RedactionWithAnd) { "$and": [ { "HASH.HASH": { - "$eq": "?" + "$eq": "?string" } }, { "HASH": { - "$gt": "?" + "$gt": "?number" } } ] diff --git a/src/mongo/db/pipeline/document_source_merge.cpp b/src/mongo/db/pipeline/document_source_merge.cpp index 485ad58300fa8..1df110172f314 100644 --- a/src/mongo/db/pipeline/document_source_merge.cpp +++ b/src/mongo/db/pipeline/document_source_merge.cpp @@ -28,21 +28,43 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_merge.h" - +#include +#include +#include +#include #include #include +#include #include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsontypes.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/curop_failpoint_helpers.h" -#include "mongo/db/ops/write_ops.h" -#include "mongo/db/pipeline/document_path_support.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/document_source_merge.h" +#include "mongo/db/pipeline/document_source_merge_gen.h" +#include "mongo/db/pipeline/document_source_merge_spec.h" #include "mongo/db/pipeline/variable_validation.h" #include "mongo/db/query/allowed_contexts.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -60,6 +82,7 @@ namespace { using MergeStrategyDescriptor = DocumentSourceMerge::MergeStrategyDescriptor; using MergeMode = MergeStrategyDescriptor::MergeMode; using MergeStrategy = MergeStrategyDescriptor::MergeStrategy; +using BatchedCommandGenerator = MergeStrategyDescriptor::BatchedCommandGenerator; using MergeStrategyDescriptorsMap = std::map; using WhenMatched = MergeStrategyDescriptor::WhenMatched; using WhenNotMatched = MergeStrategyDescriptor::WhenNotMatched; @@ -86,6 +109,55 @@ constexpr auto kPipelineDiscardMode = MergeMode{WhenMatched::kPipeline, WhenNotM const auto kDefaultPipelineLet = BSON("new" << "$$ROOT"); +BatchedCommandGenerator makeInsertCommandGenerator() { + return [](const auto& expCtx, const auto& ns) -> BatchedCommandRequest { + return DocumentSourceMerge::DocumentSourceWriter::makeInsertCommand( + ns, expCtx->bypassDocumentValidation); + }; +} + +BatchedCommandGenerator makeUpdateCommandGenerator() { + return [](const auto& expCtx, const auto& ns) -> BatchedCommandRequest { + write_ops::UpdateCommandRequest updateOp(ns); + updateOp.setWriteCommandRequestBase([&] { + write_ops::WriteCommandRequestBase wcb; + wcb.setOrdered(false); + wcb.setBypassDocumentValidation(expCtx->bypassDocumentValidation); + return wcb; + }()); + auto [constants, letParams] = + expCtx->variablesParseState.transitionalCompatibilitySerialize(expCtx->variables); + updateOp.setLegacyRuntimeConstants(std::move(constants)); + if (!letParams.isEmpty()) { + updateOp.setLet(std::move(letParams)); + } + return BatchedCommandRequest(std::move(updateOp)); + }; +} + +/** + * Converts 'batch' into a vector of UpdateOpEntries. + */ +std::vector constructUpdateEntries( + DocumentSourceMerge::DocumentSourceWriter::BatchedObjects&& batch, + UpsertType upsert, + bool multi) { + std::vector updateEntries; + for (auto&& obj : batch) { + write_ops::UpdateOpEntry entry; + auto&& [q, u, c] = obj; + entry.setQ(std::move(q)); + entry.setU(std::move(u)); + entry.setC(std::move(c)); + entry.setUpsert(upsert != UpsertType::kNone); + entry.setUpsertSupplied({{entry.getUpsert(), upsert == UpsertType::kInsertSuppliedDoc}}); + entry.setMulti(multi); + + updateEntries.push_back(std::move(entry)); + } + return updateEntries; +} + /** * Creates a merge strategy which uses update semantics to perform a merge operation. */ @@ -95,10 +167,13 @@ MergeStrategy makeUpdateStrategy() { const auto& wc, auto epoch, auto&& batch, + auto&& bcr, UpsertType upsert) { constexpr auto multi = false; + auto updateCommand = bcr.extractUpdateRequest(); + updateCommand->setUpdates(constructUpdateEntries(std::move(batch), upsert, multi)); uassertStatusOK(expCtx->mongoProcessInterface->update( - expCtx, ns, std::move(batch), wc, upsert, multi, epoch)); + expCtx, ns, std::move(updateCommand), wc, upsert, multi, epoch)); }; } @@ -115,11 +190,14 @@ MergeStrategy makeStrictUpdateStrategy() { const auto& wc, auto epoch, auto&& batch, + auto&& bcr, UpsertType upsert) { const int64_t batchSize = batch.size(); constexpr auto multi = false; + auto updateCommand = bcr.extractUpdateRequest(); + updateCommand->setUpdates(constructUpdateEntries(std::move(batch), upsert, multi)); auto updateResult = uassertStatusOK(expCtx->mongoProcessInterface->update( - expCtx, ns, std::move(batch), wc, upsert, multi, epoch)); + expCtx, ns, std::move(updateCommand), wc, upsert, multi, epoch)); uassert(ErrorCodes::MergeStageNoMatchingDocument, "{} could not find a matching document in the target collection " "for at least one document in the source collection"_format(kStageName), @@ -136,6 +214,7 @@ MergeStrategy makeInsertStrategy() { const auto& wc, auto epoch, auto&& batch, + auto&& bcr, UpsertType upsertType) { std::vector objectsToInsert(batch.size()); // The batch stores replacement style updates, but for this "insert" style of $merge we'd @@ -143,8 +222,10 @@ MergeStrategy makeInsertStrategy() { std::transform(batch.begin(), batch.end(), objectsToInsert.begin(), [](const auto& obj) { return std::get(obj).getUpdateReplacement(); }); - uassertStatusOK(expCtx->mongoProcessInterface->insert( - expCtx, ns, std::move(objectsToInsert), wc, epoch)); + auto insertCommand = bcr.extractInsertRequest(); + insertCommand->setDocuments(std::move(objectsToInsert)); + uassertStatusOK( + expCtx->mongoProcessInterface->insert(expCtx, ns, std::move(insertCommand), wc, epoch)); }; } @@ -174,72 +255,95 @@ const MergeStrategyDescriptorsMap& getDescriptors() { // be initialized first. By wrapping the map into a function we can guarantee that it won't be // initialized until the first use, which is when the program already started and all global // variables had been initialized. - static const auto mergeStrategyDescriptors = MergeStrategyDescriptorsMap{ - // whenMatched: replace, whenNotMatched: insert - {kReplaceInsertMode, - {kReplaceInsertMode, - {ActionType::insert, ActionType::update}, - makeUpdateStrategy(), - {}, - UpsertType::kGenerateNewDoc}}, - // whenMatched: replace, whenNotMatched: fail - {kReplaceFailMode, - {kReplaceFailMode, - {ActionType::update}, - makeStrictUpdateStrategy(), - {}, - UpsertType::kNone}}, - // whenMatched: replace, whenNotMatched: discard - {kReplaceDiscardMode, - {kReplaceDiscardMode, {ActionType::update}, makeUpdateStrategy(), {}, UpsertType::kNone}}, - // whenMatched: merge, whenNotMatched: insert - {kMergeInsertMode, - {kMergeInsertMode, - {ActionType::insert, ActionType::update}, - makeUpdateStrategy(), - makeUpdateTransform("$set"), - UpsertType::kGenerateNewDoc}}, - // whenMatched: merge, whenNotMatched: fail - {kMergeFailMode, - {kMergeFailMode, - {ActionType::update}, - makeStrictUpdateStrategy(), - makeUpdateTransform("$set"), - UpsertType::kNone}}, - // whenMatched: merge, whenNotMatched: discard - {kMergeDiscardMode, - {kMergeDiscardMode, - {ActionType::update}, - makeUpdateStrategy(), - makeUpdateTransform("$set"), - UpsertType::kNone}}, - // whenMatched: keepExisting, whenNotMatched: insert - {kKeepExistingInsertMode, - {kKeepExistingInsertMode, - {ActionType::insert, ActionType::update}, - makeUpdateStrategy(), - makeUpdateTransform("$setOnInsert"), - UpsertType::kGenerateNewDoc}}, - // whenMatched: [pipeline], whenNotMatched: insert - {kPipelineInsertMode, - {kPipelineInsertMode, - {ActionType::insert, ActionType::update}, - makeUpdateStrategy(), - {}, - UpsertType::kInsertSuppliedDoc}}, - // whenMatched: [pipeline], whenNotMatched: fail - {kPipelineFailMode, - {kPipelineFailMode, - {ActionType::update}, - makeStrictUpdateStrategy(), - {}, - UpsertType::kNone}}, - // whenMatched: [pipeline], whenNotMatched: discard - {kPipelineDiscardMode, - {kPipelineDiscardMode, {ActionType::update}, makeUpdateStrategy(), {}, UpsertType::kNone}}, - // whenMatched: fail, whenNotMatched: insert - {kFailInsertMode, - {kFailInsertMode, {ActionType::insert}, makeInsertStrategy(), {}, UpsertType::kNone}}}; + static const auto mergeStrategyDescriptors = + MergeStrategyDescriptorsMap{// whenMatched: replace, whenNotMatched: insert + {kReplaceInsertMode, + {kReplaceInsertMode, + {ActionType::insert, ActionType::update}, + makeUpdateStrategy(), + {}, + UpsertType::kGenerateNewDoc, + makeUpdateCommandGenerator()}}, + // whenMatched: replace, whenNotMatched: fail + {kReplaceFailMode, + {kReplaceFailMode, + {ActionType::update}, + makeStrictUpdateStrategy(), + {}, + UpsertType::kNone, + makeUpdateCommandGenerator()}}, + // whenMatched: replace, whenNotMatched: discard + {kReplaceDiscardMode, + {kReplaceDiscardMode, + {ActionType::update}, + makeUpdateStrategy(), + {}, + UpsertType::kNone, + makeUpdateCommandGenerator()}}, + // whenMatched: merge, whenNotMatched: insert + {kMergeInsertMode, + {kMergeInsertMode, + {ActionType::insert, ActionType::update}, + makeUpdateStrategy(), + makeUpdateTransform("$set"), + UpsertType::kGenerateNewDoc, + makeUpdateCommandGenerator()}}, + // whenMatched: merge, whenNotMatched: fail + {kMergeFailMode, + {kMergeFailMode, + {ActionType::update}, + makeStrictUpdateStrategy(), + makeUpdateTransform("$set"), + UpsertType::kNone, + makeUpdateCommandGenerator()}}, + // whenMatched: merge, whenNotMatched: discard + {kMergeDiscardMode, + {kMergeDiscardMode, + {ActionType::update}, + makeUpdateStrategy(), + makeUpdateTransform("$set"), + UpsertType::kNone, + makeUpdateCommandGenerator()}}, + // whenMatched: keepExisting, whenNotMatched: insert + {kKeepExistingInsertMode, + {kKeepExistingInsertMode, + {ActionType::insert, ActionType::update}, + makeUpdateStrategy(), + makeUpdateTransform("$setOnInsert"), + UpsertType::kGenerateNewDoc, + makeUpdateCommandGenerator()}}, + // whenMatched: [pipeline], whenNotMatched: insert + {kPipelineInsertMode, + {kPipelineInsertMode, + {ActionType::insert, ActionType::update}, + makeUpdateStrategy(), + {}, + UpsertType::kInsertSuppliedDoc, + makeUpdateCommandGenerator()}}, + // whenMatched: [pipeline], whenNotMatched: fail + {kPipelineFailMode, + {kPipelineFailMode, + {ActionType::update}, + makeStrictUpdateStrategy(), + {}, + UpsertType::kNone, + makeUpdateCommandGenerator()}}, + // whenMatched: [pipeline], whenNotMatched: discard + {kPipelineDiscardMode, + {kPipelineDiscardMode, + {ActionType::update}, + makeUpdateStrategy(), + {}, + UpsertType::kNone, + makeUpdateCommandGenerator()}}, + // whenMatched: fail, whenNotMatched: insert + {kFailInsertMode, + {kFailInsertMode, + {ActionType::insert}, + makeInsertStrategy(), + {}, + UpsertType::kNone, + makeInsertCommandGenerator()}}}; return mergeStrategyDescriptors; } @@ -343,7 +447,7 @@ std::unique_ptr DocumentSourceMerge::LiteParsed auto targetNss = mergeSpec.getTargetNss(); uassert(ErrorCodes::InvalidNamespace, - "Invalid {} target namespace: '{}'"_format(kStageName, targetNss.ns()), + "Invalid {} target namespace: '{}'"_format(kStageName, targetNss.toStringForErrorMsg()), targetNss.isValid()); auto whenMatched = @@ -425,7 +529,7 @@ boost::intrusive_ptr DocumentSourceMerge::create( isSupportedMergeMode(whenMatched, whenNotMatched)); uassert(ErrorCodes::InvalidNamespace, - "Invalid {} target namespace: '{}'"_format(kStageName, outputNs.ns()), + "Invalid {} target namespace: '{}'"_format(kStageName, outputNs.toStringForErrorMsg()), outputNs.isValid()); uassert(ErrorCodes::OperationNotSupportedInTransaction, @@ -439,7 +543,8 @@ boost::intrusive_ptr DocumentSourceMerge::create( (outputNs.isSystemStatsCollection() && isInternalClient(expCtx->opCtx->getClient()))); uassert(31320, - "Cannot {} to internal database: {}"_format(kStageName, outputNs.db()), + "Cannot {} to internal database: {}"_format(kStageName, + outputNs.dbName().toStringForErrorMsg()), !outputNs.isOnInternalDb() || isInternalClient(expCtx->opCtx->getClient())); if (whenMatched == WhenMatched::kPipeline) { @@ -534,10 +639,6 @@ boost::optional DocumentSourceMerge::distr Value DocumentSourceMerge::serialize(SerializationOptions opts) const { auto explain = opts.verbosity; - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484324); - } - DocumentSourceMergeSpec spec; spec.setTargetNss(_outputNs); spec.setLet([&]() -> boost::optional { @@ -551,7 +652,23 @@ Value DocumentSourceMerge::serialize(SerializationOptions opts) const { } return bob.obj(); }()); - spec.setWhenMatched(MergeWhenMatchedPolicy{_descriptor.mode.first, _pipeline}); + spec.setWhenMatched(MergeWhenMatchedPolicy{ + _descriptor.mode.first, [&]() -> boost::optional> { + if (!_pipeline.has_value()) { + return boost::none; + } + auto expCtxWithLetVariables = pExpCtx->copyWith(pExpCtx->ns); + if (spec.getLet()) { + BSONObjBuilder cleanLetSpecBuilder; + for (auto& elt : spec.getLet().value()) { + cleanLetSpecBuilder.append(elt.fieldNameStringData(), BSONObj{}); + } + expCtxWithLetVariables->variables.seedVariablesWithLetParameters( + expCtxWithLetVariables.get(), cleanLetSpecBuilder.obj()); + } + return Pipeline::parse(_pipeline.value(), expCtxWithLetVariables) + ->serializeToBson(opts); + }()}); spec.setWhenNotMatched(_descriptor.mode.second); spec.setOn([&]() { std::vector mergeOnFields; @@ -586,14 +703,19 @@ std::pair DocumentSourceMerge::makeBatchO _writeSizeEstimator->estimateUpdateSizeBytes(batchObject, _descriptor.upsertType)}; } -void DocumentSourceMerge::spill(BatchedObjects&& batch) try { +void DocumentSourceMerge::spill(BatchedCommandRequest&& bcr, BatchedObjects&& batch) try { DocumentSourceWriteBlock writeBlock(pExpCtx->opCtx); auto targetEpoch = _targetCollectionPlacementVersion ? boost::optional(_targetCollectionPlacementVersion->epoch()) : boost::none; - _descriptor.strategy( - pExpCtx, _outputNs, _writeConcern, targetEpoch, std::move(batch), _descriptor.upsertType); + _descriptor.strategy(pExpCtx, + _outputNs, + _writeConcern, + targetEpoch, + std::move(batch), + std::move(bcr), + _descriptor.upsertType); } catch (const ExceptionFor& ex) { uassertStatusOKWithContext(ex.toStatus(), "$merge failed to update the matching document, did you " @@ -617,6 +739,10 @@ void DocumentSourceMerge::spill(BatchedObjects&& batch) try { } } +BatchedCommandRequest DocumentSourceMerge::initializeBatchedWriteRequest() const { + return _descriptor.batchedCommandGenerator(pExpCtx, _outputNs); +} + void DocumentSourceMerge::waitWhileFailPointEnabled() { CurOpFailpointHelpers::waitWhileFailPointEnabled( &hangWhileBuildingDocumentSourceMergeBatch, diff --git a/src/mongo/db/pipeline/document_source_merge.h b/src/mongo/db/pipeline/document_source_merge.h index b0eed7a6df982..ccf5deb8d3aa1 100644 --- a/src/mongo/db/pipeline/document_source_merge.h +++ b/src/mongo/db/pipeline/document_source_merge.h @@ -29,9 +29,56 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_merge_gen.h" +#include "mongo/db/pipeline/document_source_merge_modes_gen.h" #include "mongo/db/pipeline/document_source_writer.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -48,8 +95,9 @@ class DocumentSourceMerge final : public DocumentSourceWriter, BatchedObjects&&, + BatchedCommandRequest&&, UpsertType upsert)>; + // A function object that will be invoked to generate a BatchedCommandRequest. + using BatchedCommandGenerator = std::function&, const NamespaceString&)>; + MergeMode mode; ActionSet actions; MergeStrategy strategy; BatchTransform transform; UpsertType upsertType; + BatchedCommandGenerator batchedCommandGenerator; }; /** @@ -195,7 +249,8 @@ class DocumentSourceMerge final : public DocumentSourceWriter +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/oid.h" +#include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_factory_mock.h" #include "mongo/client/remote_command_targeter_mock.h" #include "mongo/db/curop.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" -#include "mongo/db/pipeline/document_source_limit.h" -#include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/cursor_response.h" -#include "mongo/db/query/getmore_command_gen.h" -#include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/query/query_request_helper.h" -#include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor.h" -#include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/executor/thread_pool_mock.h" +#include "mongo/idl/idl_parser.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/s/query/async_results_merger_params_gen.h" +#include "mongo/s/query/document_source_merge_cursors.h" #include "mongo/s/sharding_router_test_fixture.h" -#include "mongo/stdx/thread.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -154,8 +187,9 @@ TEST_F(DocumentSourceMergeCursorsTest, ShouldRejectEmptyArray) { TEST_F(DocumentSourceMergeCursorsTest, ShouldRejectLegacySerializationFormats) { // Formats like this were used in old versions of the server but are no longer supported. - auto spec = BSON("$mergeCursors" << BSON_ARRAY(BSON("ns" << getTenantIdNss().ns() << "id" << 0LL - << "host" << kTestHost.toString()))); + auto spec = + BSON("$mergeCursors" << BSON_ARRAY(BSON("ns" << getTenantIdNss().ns_forTest() << "id" << 0LL + << "host" << kTestHost.toString()))); ASSERT_THROWS_CODE(DocumentSourceMergeCursors::createFromBson(spec.firstElement(), getExpCtx()), AssertionException, 17026); @@ -531,4 +565,41 @@ TEST_F(DocumentSourceMergeCursorsMultiTenancyAndFeatureFlagTest, // AsyncResultsMergerParams. ASSERT(DocumentSourceMergeCursors::createFromBson(newSpec.firstElement(), getExpCtx())); } +using DocumentSourceMergeCursorsShapeTest = AggregationContextFixture; +TEST_F(DocumentSourceMergeCursorsShapeTest, QueryShape) { + auto expCtx = getExpCtx(); + AsyncResultsMergerParams armParams; + armParams.setNss( + NamespaceString::createNamespaceString_forTest(boost::none, kMergeCursorNsStr)); + std::vector cursors; + cursors.emplace_back( + makeRemoteCursor(kTestShardIds[0], kTestShardHosts[0], CursorResponse(expCtx->ns, 1, {}))); + cursors.emplace_back( + makeRemoteCursor(kTestShardIds[1], kTestShardHosts[1], CursorResponse(expCtx->ns, 2, {}))); + armParams.setRemotes(std::move(cursors)); + auto stage = DocumentSourceMergeCursors::create(expCtx, std::move(armParams)); + + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$mergeCursors": { + "compareWholeSortKey": "?bool", + "remotes": [ + { + "shardId": "HASH", + "hostAndPort": "HASH", + "cursorResponse": "?object" + }, + { + "shardId": "HASH", + "hostAndPort": "HASH", + "cursorResponse": "?object" + } + ], + "nss": "HASH", + "allowPartialResults": false, + "recordRemoteOpWaitTime": false + } + })", + redact(*stage)); +} } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_merge_spec.cpp b/src/mongo/db/pipeline/document_source_merge_spec.cpp index d98b567f5c59f..80502389d4a55 100644 --- a/src/mongo/db/pipeline/document_source_merge_spec.cpp +++ b/src/mongo/db/pipeline/document_source_merge_spec.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_merge_spec.h" - +#include #include +#include +#include + +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/database_name.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/pipeline/document_source_merge.h" #include "mongo/db/pipeline/document_source_merge_gen.h" +#include "mongo/db/pipeline/document_source_merge_spec.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { using namespace fmt::literals; @@ -66,7 +74,7 @@ NamespaceString mergeTargetNssParseFromBSON(boost::optional tenantId, coll && !coll->empty()); return NamespaceStringUtil::parseNamespaceFromRequest( - spec.getDb().value_or(DatabaseName(tenantId, "")), *coll); + spec.getDb().value_or(DatabaseNameUtil::deserialize(tenantId, "")), *coll); } void mergeTargetNssSerializeToBSON(const NamespaceString& targetNss, diff --git a/src/mongo/db/pipeline/document_source_merge_spec.h b/src/mongo/db/pipeline/document_source_merge_spec.h index 274326c817048..94735d4d85aa2 100644 --- a/src/mongo/db/pipeline/document_source_merge_spec.h +++ b/src/mongo/db/pipeline/document_source_merge_spec.h @@ -29,13 +29,20 @@ #pragma once +#include #include +#include #include #include #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source_merge_modes_gen.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/serialization_context.h" namespace mongo { class BSONObjBuilder; diff --git a/src/mongo/db/pipeline/document_source_merge_test.cpp b/src/mongo/db/pipeline/document_source_merge_test.cpp index 1cd0aa0dcd660..4b69352550bfe 100644 --- a/src/mongo/db/pipeline/document_source_merge_test.cpp +++ b/src/mongo/db/pipeline/document_source_merge_test.cpp @@ -27,17 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include +#include +#include +#include +#include "mongo/bson/json.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_merge.h" -#include "mongo/db/pipeline/document_source_mock.h" -#include "mongo/db/pipeline/process_interface/non_shardsvr_process_interface.h" +#include "mongo/db/pipeline/document_source_merge_gen.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -97,39 +111,39 @@ class DocumentSourceMergeTest : public AggregationContextFixture { }; TEST_F(DocumentSourceMergeTest, CorrectlyParsesIfMergeSpecIsString) { - const auto& defaultDb = getExpCtx()->ns.db(); + const auto& defaultDb = getExpCtx()->ns.db_forTest(); const std::string targetColl = "target_collection"; auto spec = BSON("$merge" << targetColl); auto mergeStage = createMergeStage(spec); ASSERT(mergeStage); - ASSERT_EQ(mergeStage->getOutputNs().db(), defaultDb); + ASSERT_EQ(mergeStage->getOutputNs().db_forTest(), defaultDb); ASSERT_EQ(mergeStage->getOutputNs().coll(), targetColl); } TEST_F(DocumentSourceMergeTest, CorrectlyParsesIfIntoIsString) { - const auto& defaultDb = getExpCtx()->ns.db(); + const auto& defaultDb = getExpCtx()->ns.db_forTest(); const std::string targetColl = "target_collection"; auto spec = BSON("$merge" << BSON("into" << targetColl)); auto mergeStage = createMergeStage(spec); ASSERT(mergeStage); - ASSERT_EQ(mergeStage->getOutputNs().db(), defaultDb); + ASSERT_EQ(mergeStage->getOutputNs().db_forTest(), defaultDb); ASSERT_EQ(mergeStage->getOutputNs().coll(), targetColl); } TEST_F(DocumentSourceMergeTest, CorrectlyParsesIfIntoIsObject) { - const auto& defaultDb = getExpCtx()->ns.db(); + const auto& defaultDb = getExpCtx()->ns.db_forTest(); const std::string targetDb = "target_db"; const std::string targetColl = "target_collection"; auto spec = BSON("$merge" << BSON("into" << BSON("coll" << targetColl))); auto mergeStage = createMergeStage(spec); ASSERT(mergeStage); - ASSERT_EQ(mergeStage->getOutputNs().db(), defaultDb); + ASSERT_EQ(mergeStage->getOutputNs().db_forTest(), defaultDb); ASSERT_EQ(mergeStage->getOutputNs().coll(), targetColl); spec = BSON("$merge" << BSON("into" << BSON("db" << targetDb << "coll" << targetColl))); mergeStage = createMergeStage(spec); ASSERT(mergeStage); - ASSERT_EQ(mergeStage->getOutputNs().db(), targetDb); + ASSERT_EQ(mergeStage->getOutputNs().db_forTest(), targetDb); ASSERT_EQ(mergeStage->getOutputNs().coll(), targetColl); } @@ -149,7 +163,7 @@ TEST_F(DocumentSourceMergeTest, CorrectlyParsesIfWhenMatchedIsStringOrArray) { TEST_F(DocumentSourceMergeTest, CorrectlyParsesIfTargetAndAggregationNamespacesAreSame) { const auto targetNsSameAsAggregationNs = getExpCtx()->ns; const auto targetColl = targetNsSameAsAggregationNs.coll(); - const auto targetDb = targetNsSameAsAggregationNs.db(); + const auto targetDb = targetNsSameAsAggregationNs.db_forTest(); auto spec = BSON("$merge" << BSON("into" << BSON("coll" << targetColl << "db" << targetDb))); ASSERT(createMergeStage(spec)); @@ -397,13 +411,13 @@ TEST_F(DocumentSourceMergeTest, FailsToParseIfOnFieldIsNotStringOrArrayOfStrings } TEST_F(DocumentSourceMergeTest, CorrectlyUsesTargetDbThatMatchesAggregationDb) { - const auto targetDbSameAsAggregationDb = getExpCtx()->ns.db(); + const auto targetDbSameAsAggregationDb = getExpCtx()->ns.db_forTest(); const auto targetColl = "target_collection"; auto spec = BSON("$merge" << BSON("into" << BSON("coll" << targetColl << "db" << targetDbSameAsAggregationDb))); auto mergeStage = createMergeStage(spec); - ASSERT_EQ(mergeStage->getOutputNs().db(), targetDbSameAsAggregationDb); + ASSERT_EQ(mergeStage->getOutputNs().db_forTest(), targetDbSameAsAggregationDb); ASSERT_EQ(mergeStage->getOutputNs().coll(), targetColl); } @@ -490,7 +504,7 @@ TEST_F(DocumentSourceMergeTest, SerializeDottedPathOnFieldsSharedPrefix) { } TEST_F(DocumentSourceMergeTest, SerializeIntoWhenMergeSpecIsStringNotDotted) { - const auto aggregationDb = getExpCtx()->ns.db(); + const auto aggregationDb = getExpCtx()->ns.db_forTest(); auto spec = BSON("$merge" << "target_collection"); auto mergeStage = createMergeStage(spec); @@ -500,7 +514,7 @@ TEST_F(DocumentSourceMergeTest, SerializeIntoWhenMergeSpecIsStringNotDotted) { } TEST_F(DocumentSourceMergeTest, SerializeIntoWhenMergeSpecIsStringDotted) { - const auto aggregationDb = getExpCtx()->ns.db(); + const auto aggregationDb = getExpCtx()->ns.db_forTest(); auto spec = BSON("$merge" << "my.target_collection"); auto mergeStage = createMergeStage(spec); @@ -510,7 +524,7 @@ TEST_F(DocumentSourceMergeTest, SerializeIntoWhenMergeSpecIsStringDotted) { } TEST_F(DocumentSourceMergeTest, SerializeIntoWhenIntoIsStringNotDotted) { - const auto aggregationDb = getExpCtx()->ns.db(); + const auto aggregationDb = getExpCtx()->ns.db_forTest(); auto spec = BSON("$merge" << BSON("into" << "target_collection")); auto mergeStage = createMergeStage(spec); @@ -520,7 +534,7 @@ TEST_F(DocumentSourceMergeTest, SerializeIntoWhenIntoIsStringNotDotted) { } TEST_F(DocumentSourceMergeTest, SerializeIntoWhenIntoIsStringDotted) { - const auto aggregationDb = getExpCtx()->ns.db(); + const auto aggregationDb = getExpCtx()->ns.db_forTest(); auto spec = BSON("$merge" << BSON("into" << "my.target_collection")); auto mergeStage = createMergeStage(spec); @@ -530,7 +544,7 @@ TEST_F(DocumentSourceMergeTest, SerializeIntoWhenIntoIsStringDotted) { } TEST_F(DocumentSourceMergeTest, SerializeIntoWhenIntoIsObjectWithCollNotDotted) { - const auto aggregationDb = getExpCtx()->ns.db(); + const auto aggregationDb = getExpCtx()->ns.db_forTest(); auto spec = BSON("$merge" << BSON("into" << BSON("coll" << "target_collection"))); auto mergeStage = createMergeStage(spec); @@ -540,7 +554,7 @@ TEST_F(DocumentSourceMergeTest, SerializeIntoWhenIntoIsObjectWithCollNotDotted) } TEST_F(DocumentSourceMergeTest, SerializeIntoWhenIntoIsObjectWithCollDotted) { - const auto aggregationDb = getExpCtx()->ns.db(); + const auto aggregationDb = getExpCtx()->ns.db_forTest(); auto spec = BSON("$merge" << BSON("into" << BSON("coll" << "my.target_collection"))); auto mergeStage = createMergeStage(spec); @@ -794,12 +808,12 @@ TEST_F(DocumentSourceMergeTest, SerializeDefaultLetVariable) { // Test the behaviour of 'let' serialization for each whenNotMatched mode. TEST_F(DocumentSourceMergeTest, SerializeLetVariables) { - auto pipeline = BSON_ARRAY(BSON("$project" << BSON("x" - << "$$v1" - << "y" - << "$$v2" - << "z" - << "$$v3"))); + auto pipeline = BSON_ARRAY(BSON("$project" << BSON("_id" << true << "x" + << "$$v1" + << "y" + << "$$v2" + << "z" + << "$$v3"))); const auto createAndSerializeMergeStage = [this, &pipeline](StringData whenNotMatched) { auto spec = BSON("$merge" << BSON("into" @@ -845,8 +859,8 @@ TEST_F(DocumentSourceMergeTest, SerializeLetVariables) { TEST_F(DocumentSourceMergeTest, SerializeLetArrayVariable) { for (auto&& whenNotMatched : {"insert", "fail", "discard"}) { - auto pipeline = BSON_ARRAY(BSON("$project" << BSON("x" - << "$$v1"))); + auto pipeline = BSON_ARRAY(BSON("$project" << BSON("_id" << true << "x" + << "$$v1"))); auto spec = BSON( "$merge" << BSON("into" << "target_collection" @@ -874,8 +888,9 @@ TEST_F(DocumentSourceMergeTest, SerializeLetArrayVariable) { // SERVER-41272, this test should be updated to accordingly. TEST_F(DocumentSourceMergeTest, SerializeNullLetVariablesAsDefault) { for (auto&& whenNotMatched : {"insert", "fail", "discard"}) { - auto pipeline = BSON_ARRAY(BSON("$project" << BSON("x" - << "1"))); + auto pipeline = BSON_ARRAY(BSON("$project" << BSON("_id" << true << "x" + << BSON("$const" + << "1")))); auto spec = BSON("$merge" << BSON("into" << "target_collection" << "let" << BSONNULL << "whenMatched" << pipeline @@ -892,8 +907,9 @@ TEST_F(DocumentSourceMergeTest, SerializeNullLetVariablesAsDefault) { TEST_F(DocumentSourceMergeTest, SerializeEmptyLetVariables) { for (auto&& whenNotMatched : {"insert", "fail", "discard"}) { - auto pipeline = BSON_ARRAY(BSON("$project" << BSON("x" - << "1"))); + auto pipeline = BSON_ARRAY(BSON("$project" << BSON("_id" << true << "x" + << BSON("$const" + << "1")))); auto spec = BSON("$merge" << BSON("into" << "target_collection" << "let" << BSONObj() << "whenMatched" << pipeline @@ -909,6 +925,41 @@ TEST_F(DocumentSourceMergeTest, SerializeEmptyLetVariables) { } } +TEST_F(DocumentSourceMergeTest, SerializeEmptyLetVariableMentionNew) { + auto pipeline = BSON_ARRAY(fromjson("{$project: {_id: true, x: '$$new'}}")); + auto spec = + BSON("$merge" << BSON("into" + << "target_collection" + << "let" << BSONObj() << "whenMatched" << pipeline << "whenNotMatched" + << "insert")); + auto mergeStage = createMergeStage(spec); + ASSERT(mergeStage); + auto serialized = mergeStage->serialize().getDocument(); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$merge": { + "into": { + "db": "unittests", + "coll": "target_collection" + }, + "on": "_id", + "let": { + "new": "$$ROOT" + }, + "whenMatched": [ + { + "$project": { + "_id": true, + "x": "$$new" + } + } + ], + "whenNotMatched": "insert" + } + })", + serialized.toBson()); +} + TEST_F(DocumentSourceMergeTest, OnlyObjectCanBeUsedAsLetVariables) { for (auto&& whenNotMatched : {"insert", "fail", "discard"}) { auto pipeline = BSON_ARRAY(BSON("$project" << BSON("x" @@ -1015,8 +1066,8 @@ TEST_F(DocumentSourceMergeServerlessTest, // Pass collection name as a db + coll object. auto stageSpec = - BSON("$merge" << BSON("into" << BSON("db" << nss.dbName().toStringWithTenantId() << "coll" - << _targetColl))); + BSON("$merge" << BSON("into" << BSON("db" << nss.dbName().toStringWithTenantId_forTest() + << "coll" << _targetColl))); auto liteParsedLookup = DocumentSourceMerge::LiteParsed::parse(nss, stageSpec.firstElement()); auto namespaceSet = liteParsedLookup->getInvolvedNamespaces(); ASSERT_EQ(1, namespaceSet.size()); @@ -1045,8 +1096,8 @@ TEST_F(DocumentSourceMergeServerlessTest, ASSERT_EQ(*mergeSource->getOutputNs().tenantId(), *expCtx->ns.tenantId()); // Assert the tenantId is not included in the serialized namespace. - auto dbField = flagStatus ? expCtx->ns.dbName().toString() - : expCtx->ns.dbName().toStringWithTenantId(); + auto dbField = flagStatus ? expCtx->ns.dbName().toString_forTest() + : expCtx->ns.dbName().toStringWithTenantId_forTest(); auto expectedDoc = Document{{"db", dbField}, {"coll", _targetColl}}; auto serialized = mergeSource->serialize().getDocument(); @@ -1073,8 +1124,8 @@ TEST_F(DocumentSourceMergeServerlessTest, ASSERT(mergeSource->getOutputNs().tenantId()); ASSERT_EQ(*mergeSource->getOutputNs().tenantId(), *expCtx->ns.tenantId()); - auto dbField = flagStatus ? expCtx->ns.dbName().toString() - : expCtx->ns.dbName().toStringWithTenantId(); + auto dbField = flagStatus ? expCtx->ns.dbName().toString_forTest() + : expCtx->ns.dbName().toStringWithTenantId_forTest(); auto expectedDoc = Document{{"db", dbField}, {"coll", _targetColl}}; auto serialized = mergeSource->serialize().getDocument(); @@ -1138,6 +1189,41 @@ TEST_F(DocumentSourceMergeServerlessTest, ASSERT_DOCUMENT_EQ(serialized["$merge"][kIntoFieldName].getDocument(), expectedDoc); } +TEST_F(DocumentSourceMergeTest, QueryShape) { + auto pipeline = BSON_ARRAY(BSON("$project" << BSON("x" + << "1"))); + auto spec = + BSON("$merge" << BSON("into" + << "target_collection" + << "let" << BSONObj() << "whenMatched" << pipeline << "whenNotMatched" + << "insert")); + auto mergeStage = createMergeStage(spec); + ASSERT(mergeStage); + auto serialized = mergeStage->serialize().getDocument(); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$merge": { + "into": { + "db": "unittests", + "coll": "target_collection" + }, + "on": "_id", + "let": { + "new": "$$ROOT" + }, + "whenMatched": [ + { + "$project": { + "HASH<_id>": true, + "HASH": "?string" + } + } + ], + "whenNotMatched": "insert" + } + })", + redact(*mergeStage)); +} } // namespace } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_mock.cpp b/src/mongo/db/pipeline/document_source_mock.cpp index 4567f5e217859..5f226707ec94d 100644 --- a/src/mongo/db/pipeline/document_source_mock.cpp +++ b/src/mongo/db/pipeline/document_source_mock.cpp @@ -27,13 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/pipeline/document_source_mock.h" +#include + +#include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/expression_context.h" -#include "mongo/db/pipeline/expression_context_for_test.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_mock.h b/src/mongo/db/pipeline/document_source_mock.h index e8c86ee404b30..27e76b90c2297 100644 --- a/src/mongo/db/pipeline/document_source_mock.h +++ b/src/mongo/db/pipeline/document_source_mock.h @@ -29,11 +29,29 @@ #pragma once +#include +#include +#include +#include +#include #include - +#include +#include +#include + +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_queue.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_mock_test.cpp b/src/mongo/db/pipeline/document_source_mock_test.cpp index 374c860173862..1c5e756c06be3 100644 --- a/src/mongo/db/pipeline/document_source_mock_test.cpp +++ b/src/mongo/db/pipeline/document_source_mock_test.cpp @@ -27,14 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/document_source_operation_metrics.cpp b/src/mongo/db/pipeline/document_source_operation_metrics.cpp index a2a420eabe077..3a41dcaee6109 100644 --- a/src/mongo/db/pipeline/document_source_operation_metrics.cpp +++ b/src/mongo/db/pipeline/document_source_operation_metrics.cpp @@ -27,15 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/document_source_operation_metrics.h" +#include -#include "mongo/db/pipeline/lite_parsed_document_source.h" -#include "mongo/db/server_options.h" -#include "mongo/db/stats/operation_resource_consumption_gen.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/document_source_operation_metrics.h" +#include "mongo/db/query/allowed_contexts.h" #include "mongo/db/stats/resource_consumption_metrics.h" -#include "mongo/util/net/socket_utils.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -95,7 +100,7 @@ intrusive_ptr DocumentSourceOperationMetrics::createFromBson( const NamespaceString& nss = pExpCtx->ns; uassert(ErrorCodes::InvalidNamespace, "$operationMetrics must be run against the 'admin' database with {aggregate: 1}", - nss.db() == DatabaseName::kAdmin.db() && nss.isCollectionlessAggregateNS()); + nss.isAdminDB() && nss.isCollectionlessAggregateNS()); uassert(ErrorCodes::BadValue, "The $operationMetrics stage specification must be an object", diff --git a/src/mongo/db/pipeline/document_source_operation_metrics.h b/src/mongo/db/pipeline/document_source_operation_metrics.h index 2a1e9ecfec2e5..ca48ed23a376a 100644 --- a/src/mongo/db/pipeline/document_source_operation_metrics.h +++ b/src/mongo/db/pipeline/document_source_operation_metrics.h @@ -29,7 +29,32 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/tenant_id.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { @@ -44,15 +69,17 @@ class DocumentSourceOperationMetrics : public DocumentSource { public: static std::unique_ptr parse(const NamespaceString& nss, const BSONElement& spec) { - return std::make_unique(spec.fieldName()); + return std::make_unique(spec.fieldName(), nss.tenantId()); } - explicit LiteParsed(std::string parseTimeName) - : LiteParsedDocumentSource(std::move(parseTimeName)) {} + explicit LiteParsed(std::string parseTimeName, const boost::optional& tenantId) + : LiteParsedDocumentSource(std::move(parseTimeName)), + _privileges({Privilege(ResourcePattern::forClusterResource(tenantId), + ActionType::operationMetrics)}) {} PrivilegeVector requiredPrivileges(bool isMongos, bool bypassDocumentValidation) const final { - return {Privilege(ResourcePattern::forClusterResource(), ActionType::operationMetrics)}; + return _privileges; } stdx::unordered_set getInvolvedNamespaces() const final { @@ -62,6 +89,9 @@ class DocumentSourceOperationMetrics : public DocumentSource { bool isInitialSource() const final { return true; } + + private: + const PrivilegeVector _privileges; }; DocumentSourceOperationMetrics(const boost::intrusive_ptr& pExpCtx, diff --git a/src/mongo/db/pipeline/document_source_out.cpp b/src/mongo/db/pipeline/document_source_out.cpp index 113ce0b77cb64..5ba89230a37d6 100644 --- a/src/mongo/db/pipeline/document_source_out.cpp +++ b/src/mongo/db/pipeline/document_source_out.cpp @@ -28,19 +28,38 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_out.h" - #include - +#include +#include +#include + +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/client.h" #include "mongo/db/curop_failpoint_helpers.h" -#include "mongo/db/ops/write_ops.h" -#include "mongo/db/pipeline/document_path_support.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/document_source_out.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/timeseries/catalog_helper.h" +#include "mongo/db/timeseries/timeseries_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/destructor_guard.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -61,9 +80,22 @@ DocumentSourceOut::~DocumentSourceOut() { // Make sure we drop the temp collection if anything goes wrong. Errors are ignored // here because nothing can be done about them. Additionally, if this fails and the // collection is left behind, it will be cleaned up next time the server is started. - if (_tempNs.size()) { + + // If creating a time-series collection, we must drop the "real" buckets collection, if + // anything goes wrong creating the view. + + // If creating a time-series collection, '_tempNs' is translated to include the + // "system.buckets" prefix. + if (_tempNs.size() || (_timeseries && !_timeseriesStateConsistent)) { auto cleanupClient = pExpCtx->opCtx->getServiceContext()->makeClient("$out_replace_coll_cleanup"); + + // TODO(SERVER-74662): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*cleanupClient.get()); + cleanupClient.get()->setSystemOperationUnkillableByStepdown(lk); + } + AlternativeClientRegion acr(cleanupClient); // Create a new operation context so that any interrupts on the current operation will // not affect the dropCollection operation below. @@ -71,51 +103,104 @@ DocumentSourceOut::~DocumentSourceOut() { DocumentSourceWriteBlock writeBlock(cleanupOpCtx.get()); - pExpCtx->mongoProcessInterface->dropCollection(cleanupOpCtx.get(), _tempNs); + auto deleteNs = _tempNs.size() ? _tempNs : makeBucketNsIfTimeseries(getOutputNs()); + pExpCtx->mongoProcessInterface->dropCollection(cleanupOpCtx.get(), deleteNs); }); } -NamespaceString DocumentSourceOut::parseNsFromElem(const BSONElement& spec, - const DatabaseName& defaultDB) { +DocumentSourceOutSpec DocumentSourceOut::parseOutSpecAndResolveTargetNamespace( + const BSONElement& spec, const DatabaseName& defaultDB) { + DocumentSourceOutSpec outSpec; if (spec.type() == BSONType::String) { - return NamespaceStringUtil::parseNamespaceFromRequest(defaultDB, spec.valueStringData()); + outSpec.setColl(spec.valueStringData()); + // TODO SERVER-77000: access a SerializationContext object to serialize properly + outSpec.setDb(defaultDB.db()); } else if (spec.type() == BSONType::Object) { - auto nsObj = spec.Obj(); - uassert(16994, - str::stream() << "If an object is passed to " << kStageName - << " it must have exactly 2 fields: 'db' and 'coll'", - nsObj.nFields() == 2 && nsObj.hasField("coll") && nsObj.hasField("db")); - return NamespaceStringUtil::parseNamespaceFromRequest( - defaultDB.tenantId(), nsObj["db"].String(), nsObj["coll"].String()); + // TODO SERVER-77000: access a SerializationContext object to pass into the IDLParserContext + outSpec = mongo::DocumentSourceOutSpec::parse(IDLParserContext(kStageName), + spec.embeddedObject()); } else { uassert(16990, "{} only supports a string or object argument, but found {}"_format( kStageName, typeName(spec.type())), spec.type() == BSONType::String); } - MONGO_UNREACHABLE; + + return outSpec; +} + +NamespaceString DocumentSourceOut::makeBucketNsIfTimeseries(const NamespaceString& ns) { + return _timeseries ? ns.makeTimeseriesBucketsNamespace() : ns; } std::unique_ptr DocumentSourceOut::LiteParsed::parse( const NamespaceString& nss, const BSONElement& spec) { - NamespaceString targetNss = parseNsFromElem(spec, nss.dbName()); + auto outSpec = parseOutSpecAndResolveTargetNamespace(spec, nss.dbName()); + NamespaceString targetNss = NamespaceStringUtil::parseNamespaceFromRequest( + nss.dbName().tenantId(), outSpec.getDb(), outSpec.getColl()); + uassert(ErrorCodes::InvalidNamespace, - "Invalid {} target namespace, {}"_format(kStageName, targetNss.ns()), + "Invalid {} target namespace, {}"_format(kStageName, targetNss.toStringForErrorMsg()), targetNss.isValid()); return std::make_unique(spec.fieldName(), std::move(targetNss)); } +boost::optional DocumentSourceOut::validateTimeseries() { + const NamespaceString& outNs = getOutputNs(); + auto existingOpts = mongo::timeseries::getTimeseriesOptions(pExpCtx->opCtx, outNs, true); + + // If the user did not specify the 'timeseries' option in the input, but the target namespace is + // a time-series collection, then we can fetch the time-series options from the + // CollectionCatalog and treat this operation as a write to time-series collection. If the user + // did specify 'timeseries' options and the target namespace exists, then the options should + // match. + if (!_timeseries) { + return existingOpts; + } + + if (existingOpts) { + uassert(7406103, + str::stream() << "Time-series options inputted must match the existing time-series " + "collection. Received: " + << _timeseries->toBSON().toString() + << "Found: " << existingOpts->toBSON().toString(), + timeseries::optionsAreEqual(_timeseries.value(), existingOpts.value())); + } else { + auto collection = CollectionCatalog::get(pExpCtx->opCtx) + ->lookupCollectionByNamespace(pExpCtx->opCtx, outNs); + uassert(7268700, + "Cannot create a time-series collection from a non time-series collection.", + !collection); + auto view = CollectionCatalog::get(pExpCtx->opCtx)->lookupView(pExpCtx->opCtx, outNs); + uassert( + 7268703, "Cannot create a time-series collection from a non time-series view.", !view); + } + return _timeseries; +} + void DocumentSourceOut::initialize() { DocumentSourceWriteBlock writeBlock(pExpCtx->opCtx); - const auto& outputNs = getOutputNs(); - // We will write all results into a temporary collection, then rename the temporary collection - // to be the target collection once we are done. - // Note that this temporary collection name is used by MongoMirror and thus should not be - // changed without consultation. + // Must be called before all other functions, since sets the value of '_timeseries', which the + // rest of the function heavily relies on. + _timeseries = validateTimeseries(); + + uassert(7406100, + "$out to time-series collections is only supported on FCV greater than or equal to 7.1", + feature_flags::gFeatureFlagAggOutTimeseries.isEnabled( + serverGlobalParams.featureCompatibility) || + !_timeseries); + + const NamespaceString& outputNs = makeBucketNsIfTimeseries(getOutputNs()); + + // We will write all results into a temporary collection, then rename the temporary + // collection to be the target collection once we are done. Note that this temporary + // collection name is used by MongoMirror and thus should not be changed without + // consultation. _tempNs = NamespaceStringUtil::parseNamespaceFromRequest( - outputNs.tenantId(), - str::stream() << outputNs.dbName().toString() << ".tmp.agg_out." << UUID::gen()); + getOutputNs().tenantId(), + str::stream() << getOutputNs().dbName().db() << "." + << NamespaceString::kOutTmpCollectionPrefix << UUID::gen()); // Save the original collection options and index specs so we can check they didn't change // during computation. @@ -130,19 +215,33 @@ void DocumentSourceOut::initialize() { // If the collection becomes capped during processing, the collection options will have changed, // and the $out will fail. uassert(17152, - "namespace '{}' is capped so it can't be used for {}"_format(outputNs.ns(), kStageName), + "namespace '{}' is capped so it can't be used for {}"_format( + outputNs.toStringForErrorMsg(), kStageName), _originalOutOptions["capped"].eoo()); { BSONObjBuilder cmd; cmd << "create" << _tempNs.coll(); cmd << "temp" << true; - cmd.appendElementsUnique(_originalOutOptions); - + if (_timeseries) { + // Append the original collection options without the 'validator' and 'clusteredIndex' + // fields since these fields are invalid with the 'timeseries' field and will be + // recreated when the buckets collection is created. + _originalOutOptions.isEmpty() + ? cmd << DocumentSourceOutSpec::kTimeseriesFieldName << _timeseries->toBSON() + : cmd.appendElementsUnique(_originalOutOptions.removeFields( + StringDataSet{"clusteredIndex", "validator"})); + } else { + cmd.appendElementsUnique(_originalOutOptions); + } pExpCtx->mongoProcessInterface->createCollection( pExpCtx->opCtx, _tempNs.dbName(), cmd.done()); } + // After creating the tmp collection we should update '_tempNs' to represent the buckets + // collection if the collection is time-series. + _tempNs = makeBucketNsIfTimeseries(_tempNs); + CurOpFailpointHelpers::waitWhileFailPointEnabled( &outWaitAfterTempCollectionCreation, pExpCtx->opCtx, @@ -170,7 +269,15 @@ void DocumentSourceOut::initialize() { void DocumentSourceOut::finalize() { DocumentSourceWriteBlock writeBlock(pExpCtx->opCtx); - const auto& outputNs = getOutputNs(); + uassert(7406101, + "$out to time-series collections is only supported on FCV greater than or equal to 7.1", + feature_flags::gFeatureFlagAggOutTimeseries.isEnabled( + serverGlobalParams.featureCompatibility) || + !_timeseries); + + // If the collection is time-series, we must rename to the "real" buckets collection. + const NamespaceString& outputNs = makeBucketNsIfTimeseries(getOutputNs()); + pExpCtx->mongoProcessInterface->renameIfOptionsAndIndexesHaveNotChanged(pExpCtx->opCtx, _tempNs, outputNs, @@ -181,17 +288,38 @@ void DocumentSourceOut::finalize() { // The rename succeeded, so the temp collection no longer exists. _tempNs = {}; + + _timeseriesStateConsistent = false; + // If the collection is time-series, try to create the view. + if (_timeseries) { + BSONObjBuilder cmd; + cmd << "create" << getOutputNs().coll(); + cmd << DocumentSourceOutSpec::kTimeseriesFieldName << _timeseries->toBSON(); + pExpCtx->mongoProcessInterface->createTimeseriesView( + pExpCtx->opCtx, getOutputNs(), cmd.done(), _timeseries.value()); + } + + // Creating the view succeeded, so the boolean should be set to true. + _timeseriesStateConsistent = true; } -boost::intrusive_ptr DocumentSourceOut::create( - NamespaceString outputNs, const boost::intrusive_ptr& expCtx) { +BatchedCommandRequest DocumentSourceOut::initializeBatchedWriteRequest() const { + // Note that our insert targets '_tempNs' (or the associated timeseries view) since we will + // never write to 'outputNs' directly. + const auto& targetNss = _timeseries ? _tempNs.getTimeseriesViewNamespace() : _tempNs; + return DocumentSourceWriter::makeInsertCommand(targetNss, pExpCtx->bypassDocumentValidation); +} +boost::intrusive_ptr DocumentSourceOut::create( + NamespaceString outputNs, + const boost::intrusive_ptr& expCtx, + boost::optional timeseries) { uassert(ErrorCodes::OperationNotSupportedInTransaction, "{} cannot be used in a transaction"_format(kStageName), !expCtx->opCtx->inMultiDocumentTransaction()); uassert(ErrorCodes::InvalidNamespace, - "Invalid {} target namespace, {}"_format(kStageName, outputNs.ns()), + "Invalid {} target namespace, {}"_format(kStageName, outputNs.toStringForErrorMsg()), outputNs.isValid()); uassert(17385, @@ -199,24 +327,32 @@ boost::intrusive_ptr DocumentSourceOut::create( !outputNs.isSystem()); uassert(31321, - "Can't {} to internal database: {}"_format(kStageName, outputNs.db()), + "Can't {} to internal database: {}"_format(kStageName, + outputNs.dbName().toStringForErrorMsg()), !outputNs.isOnInternalDb()); - - return new DocumentSourceOut(std::move(outputNs), expCtx); + return new DocumentSourceOut(std::move(outputNs), std::move(timeseries), expCtx); } boost::intrusive_ptr DocumentSourceOut::createFromBson( BSONElement elem, const boost::intrusive_ptr& expCtx) { - auto targetNS = parseNsFromElem(elem, expCtx->ns.dbName()); - return create(targetNS, expCtx); + auto outSpec = parseOutSpecAndResolveTargetNamespace(elem, expCtx->ns.dbName()); + NamespaceString targetNss = NamespaceStringUtil::parseNamespaceFromRequest( + expCtx->ns.dbName().tenantId(), outSpec.getDb(), outSpec.getColl()); + return create(std::move(targetNss), expCtx, std::move(outSpec.getTimeseries())); } Value DocumentSourceOut::serialize(SerializationOptions opts) const { - MutableDocument spec; - // Do not include the tenantId in the serialized 'outputNs'. - spec["db"] = Value(opts.serializeIdentifier(_outputNs.dbName().db())); - spec["coll"] = Value(opts.serializeIdentifier(_outputNs.coll())); - return Value(Document{{kStageName, spec.freezeToValue()}}); + BSONObjBuilder bob; + DocumentSourceOutSpec spec; + // TODO SERVER-77000: use SerializatonContext from expCtx and DatabaseNameUtil to serialize + // spec.setDb(DatabaseNameUtil::serialize( + // _outputNs.dbName(), + // SerializationContext::stateCommandReply(pExpCtx->serializationCtxt))); + spec.setDb(_outputNs.dbName().db()); + spec.setColl(_outputNs.coll()); + spec.setTimeseries(_timeseries); + spec.serialize(&bob, opts); + return Value(Document{{kStageName, bob.done()}}); } void DocumentSourceOut::waitWhileFailPointEnabled() { @@ -225,8 +361,9 @@ void DocumentSourceOut::waitWhileFailPointEnabled() { pExpCtx->opCtx, "hangWhileBuildingDocumentSourceOutBatch", []() { - LOGV2(20902, - "Hanging aggregation due to 'hangWhileBuildingDocumentSourceOutBatch' failpoint"); + LOGV2( + 20902, + "Hanging aggregation due to 'hangWhileBuildingDocumentSourceOutBatch' failpoint"); }); } diff --git a/src/mongo/db/pipeline/document_source_out.h b/src/mongo/db/pipeline/document_source_out.h index 94192fb18d466..58f7d7510b20e 100644 --- a/src/mongo/db/pipeline/document_source_out.h +++ b/src/mongo/db/pipeline/document_source_out.h @@ -29,7 +29,49 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/document_source_out_gen.h" #include "mongo/db/pipeline/document_source_writer.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { /** @@ -50,9 +92,14 @@ class DocumentSourceOut final : public DocumentSourceWriter { static std::unique_ptr parse(const NamespaceString& nss, const BSONElement& spec); - bool allowShardedForeignCollection(NamespaceString nss, - bool inMultiDocumentTransaction) const final { - return _foreignNss != nss; + Status checkShardedForeignCollAllowed(NamespaceString nss, + bool inMultiDocumentTransaction) const final { + if (_foreignNss != nss) { + return Status::OK(); + } + + return Status(ErrorCodes::NamespaceCannotBeSharded, + "$out to a sharded collection is not allowed"); } bool allowedToPassthroughFromMongos() const final { @@ -97,7 +144,9 @@ class DocumentSourceOut final : public DocumentSourceWriter { * Creates a new $out stage from the given arguments. */ static boost::intrusive_ptr create( - NamespaceString outputNs, const boost::intrusive_ptr& expCtx); + NamespaceString outputNs, + const boost::intrusive_ptr& expCtx, + boost::optional timeseries = boost::none); /** * Parses a $out stage from the user-supplied BSON. @@ -113,21 +162,35 @@ class DocumentSourceOut final : public DocumentSourceWriter { private: DocumentSourceOut(NamespaceString outputNs, + boost::optional timeseries, const boost::intrusive_ptr& expCtx) - : DocumentSourceWriter(kStageName.rawData(), std::move(outputNs), expCtx) {} - - static NamespaceString parseNsFromElem(const BSONElement& spec, const DatabaseName& defaultDB); + : DocumentSourceWriter(kStageName.rawData(), std::move(outputNs), expCtx), + _timeseries(std::move(timeseries)) {} + static DocumentSourceOutSpec parseOutSpecAndResolveTargetNamespace( + const BSONElement& spec, const DatabaseName& defaultDB); void initialize() override; void finalize() override; - void spill(BatchedObjects&& batch) override { + void spill(BatchedCommandRequest&& bcr, BatchedObjects&& batch) override { DocumentSourceWriteBlock writeBlock(pExpCtx->opCtx); + auto insertCommand = bcr.extractInsertRequest(); + insertCommand->setDocuments(std::move(batch)); auto targetEpoch = boost::none; - uassertStatusOK(pExpCtx->mongoProcessInterface->insert( - pExpCtx, _tempNs, std::move(batch), _writeConcern, targetEpoch)); + + if (_timeseries) { + uassertStatusOK(pExpCtx->mongoProcessInterface->insertTimeseries( + pExpCtx, + _tempNs.getTimeseriesViewNamespace(), + std::move(insertCommand), + _writeConcern, + targetEpoch)); + } else { + uassertStatusOK(pExpCtx->mongoProcessInterface->insert( + pExpCtx, _tempNs, std::move(insertCommand), _writeConcern, targetEpoch)); + } } std::pair makeBatchObject(Document&& doc) const override { @@ -136,8 +199,22 @@ class DocumentSourceOut final : public DocumentSourceWriter { return {obj, _writeSizeEstimator->estimateInsertSizeBytes(obj)}; } + BatchedCommandRequest initializeBatchedWriteRequest() const override; + void waitWhileFailPointEnabled() override; + /** + * Determines if an error exists with the user input and existing collections. + * The function will error if: + * 1. The user provides the 'timeseries' field, but a non time-series collection or view exists + * in that namespace. + * 2. The user provides the 'timeseries' field with a specification that does not match an + * existing time-series collection. The function will replace the value of '_timeseries' if the + * user does not provide the 'timeseries' field, but a time-series collection exists. + */ + boost::optional validateTimeseries(); + + NamespaceString makeBucketNsIfTimeseries(const NamespaceString& ns); // Holds on to the original collection options and index specs so we can check they didn't // change during computation. BSONObj _originalOutOptions; @@ -145,6 +222,14 @@ class DocumentSourceOut final : public DocumentSourceWriter { // The temporary namespace for the $out writes. NamespaceString _tempNs; + + // Set if $out is writing to a time-series collection. This is how $out determines if it is + // writing to a time-series collection or not. + boost::optional _timeseries; + + // Set to true if the stage has not initialized or the view was successfully created. + // Used by the destructor to determine if the "real" buckets collection should be destroyed. + bool _timeseriesStateConsistent = true; }; } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_out.idl b/src/mongo/db/pipeline/document_source_out.idl new file mode 100644 index 0000000000000..9581c3de80013 --- /dev/null +++ b/src/mongo/db/pipeline/document_source_out.idl @@ -0,0 +1,62 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +# Document source out stage IDL file + +global: + cpp_namespace: "mongo" + +imports: + - "mongo/db/basic_types.idl" + - "mongo/db/timeseries/timeseries.idl" + +structs: + + DocumentSourceOutSpec: + description: "$out pipeline spec" + strict: true + query_shape_component: true + fields: + coll: + description: "Target collection name to write documents from $out to." + type: string + optional: false + query_shape: anonymize + db: + description: "Target database name to write documents from $out to." + type: string + optional: false + query_shape: anonymize + timeseries: + cpp_name: timeseries + description: "If set, the aggregation stage will use these options to create or + replace a time-series collection in the given namespace." + type: TimeseriesOptions + optional: true + query_shape: literal + diff --git a/src/mongo/db/pipeline/document_source_out_test.cpp b/src/mongo/db/pipeline/document_source_out_test.cpp index b31d6964cf430..27521678d2918 100644 --- a/src/mongo/db/pipeline/document_source_out_test.cpp +++ b/src/mongo/db/pipeline/document_source_out_test.cpp @@ -27,15 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include +#include +#include +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_out.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/s/chunk_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -92,7 +108,7 @@ TEST_F(DocumentSourceOutTest, FailsToParseIncorrectType) { ASSERT_THROWS_CODE(createOutStage(spec), AssertionException, 16990); spec = BSON("$out" << BSONObj()); - ASSERT_THROWS_CODE(createOutStage(spec), AssertionException, 16994); + ASSERT_THROWS_CODE(createOutStage(spec), AssertionException, 40414); } TEST_F(DocumentSourceOutTest, AcceptsStringArgument) { @@ -115,8 +131,7 @@ TEST_F(DocumentSourceOutTest, SerializeToString) { ASSERT_EQ(reSerialized["$out"]["coll"].getStringData(), "some_collection"); } -TEST_F(DocumentSourceOutTest, Redaction) { - // TODO SERVER-75110 test support for redaction with timeseries options +TEST_F(DocumentSourceOutTest, RedactionNoTimeseries) { auto spec = fromjson(R"({ $out: { db: "foo", @@ -128,8 +143,40 @@ TEST_F(DocumentSourceOutTest, Redaction) { ASSERT_BSONOBJ_EQ_AUTO( // NOLINT R"({ $out: { - db: "HASH", - coll: "HASH" + coll: "HASH", + db: "HASH" + } + })", + redact(*docSource)); +} + +TEST_F(DocumentSourceOutTest, RedactionTimeseries) { + auto spec = fromjson(R"({ + $out: { + db: "foo", + coll: "bar", + timeseries: { + timeField: "time", + metaField: "meta", + granularity: "minutes", + bucketRoundingSeconds: 300, + bucketMaxSpanSeconds: 300 + } + } + })"); + auto docSource = DocumentSourceOut::createFromBson(spec.firstElement(), getExpCtx()); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$out": { + "coll": "HASH", + "db": "HASH", + "timeseries": { + "timeField": "HASH": 1 } }, - "totalDataSizeSortedBytesEstimate": "?", - "usedDisk": "?", - "spills": "?", - "spilledDataStorageSize": "?" + "totalDataSizeSortedBytesEstimate": "?number", + "usedDisk": "?bool", + "spills": "?number", + "spilledDataStorageSize": "?number" })", redact(*sort(), true, ExplainOptions::Verbosity::kExecStats)); @@ -605,14 +616,14 @@ TEST_F(DocumentSourceSortTest, Redaction) { }, "bound": { "base": "min", - "offsetSeconds": "?" + "offsetSeconds": "?number" }, - "limit": "?" + "limit": "?number" }, - "totalDataSizeSortedBytesEstimate": "?", - "usedDisk": "?", - "spills": "?", - "spilledDataStorageSize": "?" + "totalDataSizeSortedBytesEstimate": "?number", + "usedDisk": "?bool", + "spills": "?number", + "spilledDataStorageSize": "?number" })", redact(*boundedSort, true, ExplainOptions::Verbosity::kExecStats)); } diff --git a/src/mongo/db/pipeline/document_source_streaming_group.cpp b/src/mongo/db/pipeline/document_source_streaming_group.cpp index 4b6dea86d2232..cfe02b82cf098 100644 --- a/src/mongo/db/pipeline/document_source_streaming_group.cpp +++ b/src/mongo/db/pipeline/document_source_streaming_group.cpp @@ -27,23 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include #include +#include +#include + +#include +#include +#include +#include +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/accumulation_statement.h" -#include "mongo/db/pipeline/accumulator.h" -#include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/document_source_streaming_group.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" -#include "mongo/db/pipeline/expression_dependencies.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" -#include "mongo/db/stats/resource_consumption_metrics.h" -#include "mongo/util/destructor_guard.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -95,10 +104,10 @@ boost::intrusive_ptr DocumentSourceStreamingGroup: for (auto&& statement : accumulationStatements) { groupStage->addAccumulator(statement); } - tassert(7026709, + uassert(7026709, "streaming group must have at least one monotonic id expression", !monotonicExpressionIndexes.empty()); - tassert(7026710, + uassert(7026710, "streaming group monotonic expression indexes must correspond to id expressions", std::all_of(monotonicExpressionIndexes.begin(), monotonicExpressionIndexes.end(), @@ -250,7 +259,7 @@ bool DocumentSourceStreamingGroup::checkForBatchEndAndUpdateLastIdValues( // of the exact same array could appear in the input sequence, but with a different array in // the middle of them, and that would still be considered sorted. That would break our // batching group logic. - tassert(7026708, + uassert(7026708, "Monotonic value should not be missing, null or an array", !value.nullish() && !value.isArray()); return value; diff --git a/src/mongo/db/pipeline/document_source_streaming_group.h b/src/mongo/db/pipeline/document_source_streaming_group.h index d48c87fbee1b4..153ad08a7214c 100644 --- a/src/mongo/db/pipeline/document_source_streaming_group.h +++ b/src/mongo/db/pipeline/document_source_streaming_group.h @@ -29,10 +29,25 @@ #pragma once +#include +#include +#include +#include +#include #include #include - +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_group_base.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_tee_consumer.cpp b/src/mongo/db/pipeline/document_source_tee_consumer.cpp index ae1260b17e6d5..a6bb376e87ed8 100644 --- a/src/mongo/db/pipeline/document_source_tee_consumer.cpp +++ b/src/mongo/db/pipeline/document_source_tee_consumer.cpp @@ -27,13 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/pipeline/document_source_tee_consumer.h" -#include -#include -#include +#include +#include #include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/expression_context.h" diff --git a/src/mongo/db/pipeline/document_source_tee_consumer.h b/src/mongo/db/pipeline/document_source_tee_consumer.h index eaac8906326ac..4345e300b2c5d 100644 --- a/src/mongo/db/pipeline/document_source_tee_consumer.h +++ b/src/mongo/db/pipeline/document_source_tee_consumer.h @@ -30,11 +30,25 @@ #pragma once #include +#include #include - +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" #include "mongo/db/pipeline/tee_buffer.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_telemetry.cpp b/src/mongo/db/pipeline/document_source_telemetry.cpp deleted file mode 100644 index 4d81065855fdb..0000000000000 --- a/src/mongo/db/pipeline/document_source_telemetry.cpp +++ /dev/null @@ -1,167 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/pipeline/document_source_telemetry.h" - -#include "mongo/bson/timestamp.h" -#include "mongo/util/assert_util.h" -#include "mongo/util/debug_util.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery - -namespace mongo { - -REGISTER_DOCUMENT_SOURCE_WITH_FEATURE_FLAG(telemetry, - DocumentSourceTelemetry::LiteParsed::parse, - DocumentSourceTelemetry::createFromBson, - AllowedWithApiStrict::kNeverInVersion1, - feature_flags::gFeatureFlagTelemetry); - -bool parseTelemetryEmbeddedObject(BSONObj embeddedObj) { - auto fieldNameRedaction = false; - if (!embeddedObj.isEmpty()) { - uassert(ErrorCodes::FailedToParse, - str::stream() - << DocumentSourceTelemetry::kStageName - << " parameters object may only contain one field, 'redactIdentifiers'. Found: " - << embeddedObj.toString(), - embeddedObj.nFields() == 1); - - uassert(ErrorCodes::FailedToParse, - str::stream() - << DocumentSourceTelemetry::kStageName - << " parameters object may only contain 'redactIdentifiers' option. Found: " - << embeddedObj.firstElementFieldName(), - embeddedObj.hasField("redactIdentifiers")); - - uassert(ErrorCodes::FailedToParse, - str::stream() << DocumentSourceTelemetry::kStageName - << " redactIdentifiers parameter must be boolean. Found type: " - << typeName(embeddedObj.firstElementType()), - embeddedObj.firstElementType() == BSONType::Bool); - fieldNameRedaction = embeddedObj["redactIdentifiers"].trueValue(); - } - return fieldNameRedaction; -} - -std::unique_ptr DocumentSourceTelemetry::LiteParsed::parse( - const NamespaceString& nss, const BSONElement& spec) { - uassert(ErrorCodes::FailedToParse, - str::stream() << kStageName - << " value must be an object. Found: " << typeName(spec.type()), - spec.type() == BSONType::Object); - - return std::make_unique( - spec.fieldName(), parseTelemetryEmbeddedObject(spec.embeddedObject())); -} - -boost::intrusive_ptr DocumentSourceTelemetry::createFromBson( - BSONElement spec, const boost::intrusive_ptr& pExpCtx) { - uassert(ErrorCodes::FailedToParse, - str::stream() << kStageName - << " value must be an object. Found: " << typeName(spec.type()), - spec.type() == BSONType::Object); - - const NamespaceString& nss = pExpCtx->ns; - - uassert(ErrorCodes::InvalidNamespace, - "$telemetry must be run against the 'admin' database with {aggregate: 1}", - nss.db() == DatabaseName::kAdmin.db() && nss.isCollectionlessAggregateNS()); - - return new DocumentSourceTelemetry(pExpCtx, - parseTelemetryEmbeddedObject(spec.embeddedObject())); -} - -Value DocumentSourceTelemetry::serialize(SerializationOptions opts) const { - // This document source never contains any user information, so no need for any work when - // redacting. - return Value{Document{{kStageName, Document{}}}}; -} - -DocumentSource::GetNextResult DocumentSourceTelemetry::doGetNext() { - /** - * We maintain nested iterators: - * - Outer one over the set of partitions. - * - Inner one over the set of entries in a "materialized" partition. - * - * When an inner iterator is present and contains more elements, we can return the next element. - * When the inner iterator is exhausted, we move to the next element in the outer iterator and - * create a new inner iterator. When the outer iterator is exhausted, we have finished iterating - * over the telemetry store entries. - * - * The inner iterator iterates over a materialized container of all entries in the partition. - * This is done to reduce the time under which the partition lock is held. - */ - while (true) { - // First, attempt to exhaust all elements in the materialized partition. - if (!_materializedPartition.empty()) { - // Move out of the container reference. - auto doc = std::move(_materializedPartition.front()); - _materializedPartition.pop_front(); - return {std::move(doc)}; - } - - TelemetryStore& _telemetryStore = getTelemetryStore(getContext()->opCtx); - - // Materialized partition is exhausted, move to the next. - _currentPartition++; - if (_currentPartition >= _telemetryStore.numPartitions()) { - return DocumentSource::GetNextResult::makeEOF(); - } - - // We only keep the partition (which holds a lock) for the time needed to materialize it to - // a set of Document instances. - auto&& partition = _telemetryStore.getPartition(_currentPartition); - - // Capture the time at which reading the partition begins to indicate to the caller - // when the snapshot began. - const auto partitionReadTime = - Timestamp{Timestamp(Date_t::now().toMillisSinceEpoch() / 1000, 0)}; - for (auto&& [key, metrics] : *partition) { - auto swKey = metrics->redactKey(key, _redactIdentifiers, pExpCtx->opCtx); - if (!swKey.isOK()) { - LOGV2_DEBUG(7349403, - 3, - "Error encountered when redacting query shape, will not publish " - "telemetry for this entry.", - "status"_attr = swKey.getStatus()); - if (kDebugBuild) { - tasserted(7349401, - "Was not able to re-parse telemetry key when reading telemetry."); - } - continue; - } - _materializedPartition.push_back({{"key", std::move(swKey.getValue())}, - {"metrics", metrics->toBSON()}, - {"asOf", partitionReadTime}}); - } - } -} - -} // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_telemetry.h b/src/mongo/db/pipeline/document_source_telemetry.h deleted file mode 100644 index fff298025f04a..0000000000000 --- a/src/mongo/db/pipeline/document_source_telemetry.h +++ /dev/null @@ -1,136 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/pipeline/document_source.h" -#include "mongo/db/pipeline/lite_parsed_document_source.h" -#include "mongo/db/query/telemetry.h" -#include "mongo/util/producer_consumer_queue.h" - -namespace mongo { - -using namespace telemetry; - -class DocumentSourceTelemetry final : public DocumentSource { -public: - static constexpr StringData kStageName = "$telemetry"_sd; - - class LiteParsed final : public LiteParsedDocumentSource { - public: - static std::unique_ptr parse(const NamespaceString& nss, - const BSONElement& spec); - - LiteParsed(std::string parseTimeName, bool redactIdentifiers) - : LiteParsedDocumentSource(std::move(parseTimeName)), - _redactIdentifiers(redactIdentifiers) {} - - stdx::unordered_set getInvolvedNamespaces() const override { - return stdx::unordered_set(); - } - - PrivilegeVector requiredPrivileges(bool isMongos, - bool bypassDocumentValidation) const override { - return {Privilege(ResourcePattern::forClusterResource(), ActionType::telemetryRead)}; - ; - } - - bool allowedToPassthroughFromMongos() const final { - // $telemetry must be run locally on a mongod. - return false; - } - - bool isInitialSource() const final { - return true; - } - - void assertSupportsMultiDocumentTransaction() const { - transactionNotSupported(kStageName); - } - - bool _redactIdentifiers; - }; - - static boost::intrusive_ptr createFromBson( - BSONElement elem, const boost::intrusive_ptr& pExpCtx); - - virtual ~DocumentSourceTelemetry() = default; - - StageConstraints constraints( - Pipeline::SplitState = Pipeline::SplitState::kUnsplit) const override { - StageConstraints constraints{StreamType::kStreaming, - PositionRequirement::kFirst, - HostTypeRequirement::kLocalOnly, - DiskUseRequirement::kNoDiskUse, - FacetRequirement::kNotAllowed, - TransactionRequirement::kNotAllowed, - LookupRequirement::kNotAllowed, - UnionRequirement::kNotAllowed}; - - constraints.requiresInputDocSource = false; - constraints.isIndependentOfAnyCollection = true; - return constraints; - } - - boost::optional distributedPlanLogic() final { - return boost::none; - } - - const char* getSourceName() const override { - return kStageName.rawData(); - } - - Value serialize(SerializationOptions opts = SerializationOptions()) const final override; - - void addVariableRefs(std::set* refs) const final {} - -private: - DocumentSourceTelemetry(const boost::intrusive_ptr& expCtx, - bool redactIdentifiers = false) - : DocumentSource(kStageName, expCtx), _redactIdentifiers(redactIdentifiers) {} - - GetNextResult doGetNext() final; - - /** - * The current partition materialized as a set of Document instances. We pop from the queue and - * return DocumentSource results. - */ - std::deque _materializedPartition; - - /** - * Iterator over all telemetry partitions. This is incremented when we exhaust the current - * _materializedPartition. - */ - TelemetryStore::PartitionId _currentPartition = -1; - - // When true, redact field names from returned query shapes. - bool _redactIdentifiers; -}; - -} // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_telemetry_test.cpp b/src/mongo/db/pipeline/document_source_telemetry_test.cpp deleted file mode 100644 index 25e2416450729..0000000000000 --- a/src/mongo/db/pipeline/document_source_telemetry_test.cpp +++ /dev/null @@ -1,96 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/document_value/document.h" -#include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/pipeline/aggregation_context_fixture.h" -#include "mongo/db/pipeline/document_source_telemetry.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" -#include "mongo/util/str.h" - -namespace mongo { -namespace { - -/** - * Subclass AggregationContextFixture to set the ExpressionContext's namespace to 'admin' with - * {aggregate: 1} by default, so that parsing tests other than those which validate the namespace do - * not need to explicitly set it. - */ -class DocumentSourceTelemetryTest : public AggregationContextFixture { -public: - DocumentSourceTelemetryTest() - : AggregationContextFixture( - NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(boost::none, "admin"))) { - } -}; - -TEST_F(DocumentSourceTelemetryTest, ShouldFailToParseIfSpecIsNotObject) { - ASSERT_THROWS_CODE(DocumentSourceTelemetry::createFromBson( - fromjson("{$telemetry: 1}").firstElement(), getExpCtx()), - AssertionException, - ErrorCodes::FailedToParse); -} - -TEST_F(DocumentSourceTelemetryTest, ShouldFailToParseIfNotRunOnAdmin) { - getExpCtx()->ns = - NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(boost::none, "foo")); - ASSERT_THROWS_CODE(DocumentSourceTelemetry::createFromBson( - fromjson("{$telemetry: {}}").firstElement(), getExpCtx()), - AssertionException, - ErrorCodes::InvalidNamespace); -} - -TEST_F(DocumentSourceTelemetryTest, ShouldFailToParseIfNotRunWithAggregateOne) { - getExpCtx()->ns = NamespaceString::createNamespaceString_forTest("admin.foo"); - ASSERT_THROWS_CODE(DocumentSourceTelemetry::createFromBson( - fromjson("{$telemetry: {}}").firstElement(), getExpCtx()), - AssertionException, - ErrorCodes::InvalidNamespace); -} - -TEST_F(DocumentSourceTelemetryTest, ShouldFailToParseIfUnrecognisedParameterSpecified) { - ASSERT_THROWS_CODE(DocumentSourceTelemetry::createFromBson( - fromjson("{$telemetry: {foo: true}}").firstElement(), getExpCtx()), - AssertionException, - ErrorCodes::FailedToParse); -} - -TEST_F(DocumentSourceTelemetryTest, ParseAndSerialize) { - auto obj = fromjson("{$telemetry: {}}"); - auto doc = DocumentSourceTelemetry::createFromBson(obj.firstElement(), getExpCtx()); - auto telemetryOp = static_cast(doc.get()); - auto expected = Document{{"$telemetry", Document{}}}; - ASSERT_DOCUMENT_EQ(telemetryOp->serialize().getDocument(), expected); -} - -} // namespace -} // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_union_with.cpp b/src/mongo/db/pipeline/document_source_union_with.cpp index fd745b37e0ca6..56c4673d7bd31 100644 --- a/src/mongo/db/pipeline/document_source_union_with.cpp +++ b/src/mongo/db/pipeline/document_source_union_with.cpp @@ -28,19 +28,37 @@ */ -#include "mongo/platform/basic.h" - +#include +#include #include -#include "mongo/db/commands/test_commands_enabled.h" +#include +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/document_source_documents.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_queue.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/document_source_union_with.h" #include "mongo/db/pipeline/document_source_union_with_gen.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/db/query/plan_summary_stats.h" #include "mongo/db/views/resolved_view.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -75,7 +93,10 @@ std::unique_ptr buildPipelineFromViewDefinition( opts.validator = validatorCallback; return Pipeline::makePipelineFromViewDefinition( - expCtx->copyForSubPipeline(expCtx->ns, resolvedNs.uuid), resolvedNs, currentPipeline, opts); + expCtx->copyForSubPipeline(expCtx->ns, resolvedNs.uuid), + resolvedNs, + std::move(currentPipeline), + opts); } } // namespace @@ -235,7 +256,7 @@ DocumentSource::GetNextResult DocumentSourceUnionWith::doGetNext() { _pipeline = buildPipelineFromViewDefinition( pExpCtx, ExpressionContext::ResolvedNamespace{e->getNamespace(), e->getPipeline()}, - serializedPipe); + std::move(serializedPipe)); logShardedViewFound(e); return doGetNext(); } @@ -327,13 +348,8 @@ void DocumentSourceUnionWith::doDispose() { } Value DocumentSourceUnionWith::serialize(SerializationOptions opts) const { - auto explain = opts.verbosity; - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484307); - } - auto collectionless = _pipeline->getContext()->ns.isCollectionlessAggregateNS(); - if (explain) { + if (opts.verbosity) { // There are several different possible states depending on the explain verbosity as well as // the other stages in the pipeline: // * If verbosity is queryPlanner, then the sub-pipeline should be untouched and we can @@ -343,9 +359,9 @@ Value DocumentSourceUnionWith::serialize(SerializationOptions opts) const { // $limit stage after the $unionWith which results in only reading from the base collection // branch and not the sub-pipeline. Pipeline* pipeCopy = nullptr; - if (*explain == ExplainOptions::Verbosity::kQueryPlanner) { + if (*opts.verbosity == ExplainOptions::Verbosity::kQueryPlanner) { pipeCopy = Pipeline::create(_pipeline->getSources(), _pipeline->getContext()).release(); - } else if (*explain >= ExplainOptions::Verbosity::kExecStats && + } else if (*opts.verbosity >= ExplainOptions::Verbosity::kExecStats && _executionState > ExecutionProgress::kIteratingSource) { // We've either exhausted the sub-pipeline or at least started iterating it. Use the // cached pipeline to get the explain output since the '_pipeline' may have been @@ -355,32 +371,33 @@ Value DocumentSourceUnionWith::serialize(SerializationOptions opts) const { // The plan does not require reading from the sub-pipeline, so just include the // serialization in the explain output. BSONArrayBuilder bab; - for (auto&& stage : _pipeline->serialize(explain)) + for (auto&& stage : _pipeline->serialize(opts)) bab << stage; auto spec = collectionless ? DOC("pipeline" << bab.arr()) - : DOC("coll" << _pipeline->getContext()->ns.coll() << "pipeline" << bab.arr()); + : DOC("coll" << opts.serializeIdentifier(_pipeline->getContext()->ns.coll()) + << "pipeline" << bab.arr()); return Value(DOC(getSourceName() << spec)); } invariant(pipeCopy); BSONObj explainLocal = - pExpCtx->mongoProcessInterface->preparePipelineAndExplain(pipeCopy, *explain); + pExpCtx->mongoProcessInterface->preparePipelineAndExplain(pipeCopy, *opts.verbosity); LOGV2_DEBUG(4553501, 3, "$unionWith attached cursor to pipeline for explain"); // We expect this to be an explanation of a pipeline -- there should only be one field. invariant(explainLocal.nFields() == 1); - auto spec = collectionless ? DOC("pipeline" << explainLocal.firstElement()) - : DOC("coll" << _pipeline->getContext()->ns.coll() << "pipeline" - << explainLocal.firstElement()); + auto spec = collectionless + ? DOC("pipeline" << explainLocal.firstElement()) + : DOC("coll" << opts.serializeIdentifier(_pipeline->getContext()->ns.coll()) + << "pipeline" << explainLocal.firstElement()); return Value(DOC(getSourceName() << spec)); } else { - BSONArrayBuilder bab; - for (auto&& stage : _pipeline->serialize()) - bab << stage; + auto serializedPipeline = _pipeline->serializeToBson(opts); auto spec = collectionless - ? DOC("pipeline" << bab.arr()) - : DOC("coll" << _pipeline->getContext()->ns.coll() << "pipeline" << bab.arr()); + ? DOC("pipeline" << serializedPipeline) + : DOC("coll" << opts.serializeIdentifier(_pipeline->getContext()->ns.coll()) + << "pipeline" << serializedPipeline); return Value(DOC(getSourceName() << spec)); } } diff --git a/src/mongo/db/pipeline/document_source_union_with.h b/src/mongo/db/pipeline/document_source_union_with.h index 3b1dbb194e560..80c6ac6a9f332 100644 --- a/src/mongo/db/pipeline/document_source_union_with.h +++ b/src/mongo/db/pipeline/document_source_union_with.h @@ -29,13 +29,43 @@ #pragma once +#include #include - +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/db/stats/counters.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/document_source_union_with_test.cpp b/src/mongo/db/pipeline/document_source_union_with_test.cpp index ece28cc0d6007..4c6553612bbae 100644 --- a/src/mongo/db/pipeline/document_source_union_with_test.cpp +++ b/src/mongo/db/pipeline/document_source_union_with_test.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include #include #include -#include #include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_comparator.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source.h" @@ -46,12 +54,17 @@ #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/document_source_replace_root.h" #include "mongo/db/pipeline/document_source_union_with.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/stub_lookup_single_document_process_interface.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" namespace mongo { namespace { @@ -609,6 +622,64 @@ TEST_F(DocumentSourceUnionWithTest, IncrementNestedAggregateOpCounterOnCreateBut testOpCounter(NamespaceString::createNamespaceString_forTest("local", "testColl"), 0); } +TEST_F(DocumentSourceUnionWithTest, RedactsCorrectlyBasic) { + auto expCtx = getExpCtx(); + auto nsToUnionWith = + NamespaceString::createNamespaceString_forTest(expCtx->ns.dbName(), "coll"); + expCtx->setResolvedNamespaces(StringMap{ + {nsToUnionWith.coll().toString(), {nsToUnionWith, std::vector()}}}); + + auto docSource = DocumentSourceUnionWith::createFromBson( + BSON("$unionWith" << nsToUnionWith.coll()).firstElement(), expCtx); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$unionWith": { + "coll": "HASH", + "pipeline": [] + } + })", + redact(*docSource)); +} + +TEST_F(DocumentSourceUnionWithTest, RedactsCorrectlyWithPipeline) { + auto expCtx = getExpCtx(); + auto nsToUnionWith = + NamespaceString::createNamespaceString_forTest(expCtx->ns.dbName(), "coll"); + expCtx->setResolvedNamespaces(StringMap{ + {nsToUnionWith.coll().toString(), {nsToUnionWith, std::vector()}}}); + + BSONArrayBuilder pipeline; + pipeline << BSON("$match" << BSON("a" << 15)); + pipeline << BSON("$project" << BSON("a" << 1 << "b" << 1)); + auto docSource = DocumentSourceUnionWith::createFromBson( + BSON("$unionWith" << BSON("coll" << nsToUnionWith.coll() << "pipeline" << pipeline.arr())) + .firstElement(), + expCtx); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "$unionWith": { + "coll": "HASH", + "pipeline": [ + { + "$match": { + "HASH": { + "$eq": "?number" + } + } + }, + { + "$project": { + "HASH<_id>": true, + "HASH": true, + "HASH": true + } + } + ] + } + })", + redact(*docSource)); +} + using DocumentSourceUnionWithServerlessTest = ServerlessAggregationContextFixture; TEST_F(DocumentSourceUnionWithServerlessTest, diff --git a/src/mongo/db/pipeline/document_source_unwind.cpp b/src/mongo/db/pipeline/document_source_unwind.cpp index b5ae2df48654f..0d588ce57df62 100644 --- a/src/mongo/db/pipeline/document_source_unwind.cpp +++ b/src/mongo/db/pipeline/document_source_unwind.cpp @@ -27,18 +27,32 @@ * it in the license file. */ -#include "mongo/db/pipeline/document_source_limit.h" -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/document_source_unwind.h" - +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression_algo.h" +#include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/document_source_unwind.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { @@ -46,53 +60,6 @@ using boost::intrusive_ptr; using std::string; using std::vector; -/** Helper class to unwind array from a single document. */ -class DocumentSourceUnwind::Unwinder { -public: - Unwinder(const FieldPath& unwindPath, - bool preserveNullAndEmptyArrays, - const boost::optional& indexPath, - bool strict); - /** Reset the unwinder to unwind a new document. */ - void resetDocument(const Document& document); - - /** - * @return the next document unwound from the document provided to resetDocument(), using - * the current value in the array located at the provided unwindPath. - * - * Returns boost::none if the array is exhausted. - */ - DocumentSource::GetNextResult getNext(); - -private: - // Tracks whether or not we can possibly return any more documents. Note we may return - // boost::none even if this is true. - bool _haveNext = false; - - // Path to the array to unwind. - const FieldPath _unwindPath; - - // Documents that have a nullish value, or an empty array for the field '_unwindPath', will pass - // through the $unwind stage unmodified if '_preserveNullAndEmptyArrays' is true. - const bool _preserveNullAndEmptyArrays; - - // If set, the $unwind stage will include the array index in the specified path, overwriting any - // existing value, setting to null when the value was a non-array or empty array. - const boost::optional _indexPath; - // Specifies if input to $unwind is required to be an array. - const bool _strict; - - Value _inputArray; - - MutableDocument _output; - - // Document indexes of the field path components. - vector _unwindPathFieldIndexes; - - // Index into the _inputArray to return next. - size_t _index = 0; -}; - DocumentSourceUnwind::Unwinder::Unwinder(const FieldPath& unwindPath, bool preserveNullAndEmptyArrays, const boost::optional& indexPath, @@ -297,7 +264,7 @@ Value DocumentSourceUnwind::serialize(SerializationOptions opts) const { return Value(DOC( getSourceName() << DOC( "path" << opts.serializeFieldPathWithPrefix(_unwindPath) << "preserveNullAndEmptyArrays" - << (_preserveNullAndEmptyArrays ? opts.serializeLiteralValue(true) : Value()) + << (_preserveNullAndEmptyArrays ? opts.serializeLiteral(true) : Value()) << "includeArrayIndex" << (_indexPath ? Value(opts.serializeFieldPath(*_indexPath)) : Value())))); } diff --git a/src/mongo/db/pipeline/document_source_unwind.h b/src/mongo/db/pipeline/document_source_unwind.h index f2b97085d334a..1f59c98add8f2 100644 --- a/src/mongo/db/pipeline/document_source_unwind.h +++ b/src/mongo/db/pipeline/document_source_unwind.h @@ -29,10 +29,32 @@ #pragma once +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_internal.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { @@ -138,4 +160,51 @@ class DocumentSourceUnwind final : public DocumentSource { boost::optional _smallestLimitPushedDown; }; +/** Helper class to unwind array from a single document. */ +class DocumentSourceUnwind::Unwinder { +public: + Unwinder(const FieldPath& unwindPath, + bool preserveNullAndEmptyArrays, + const boost::optional& indexPath, + bool strict); + /** Reset the unwinder to unwind a new document. */ + void resetDocument(const Document& document); + + /** + * @return the next document unwound from the document provided to resetDocument(), using + * the current value in the array located at the provided unwindPath. + * + * Returns boost::none if the array is exhausted. + */ + DocumentSource::GetNextResult getNext(); + +private: + // Tracks whether or not we can possibly return any more documents. Note we may return + // boost::none even if this is true. + bool _haveNext = false; + + // Path to the array to unwind. + const FieldPath _unwindPath; + + // Documents that have a nullish value, or an empty array for the field '_unwindPath', will pass + // through the $unwind stage unmodified if '_preserveNullAndEmptyArrays' is true. + const bool _preserveNullAndEmptyArrays; + + // If set, the $unwind stage will include the array index in the specified path, overwriting any + // existing value, setting to null when the value was a non-array or empty array. + const boost::optional _indexPath; + // Specifies if input to $unwind is required to be an array. + const bool _strict; + + Value _inputArray; + + MutableDocument _output; + + // Document indexes of the field path components. + std::vector _unwindPathFieldIndexes; + + // Index into the _inputArray to return next. + size_t _index = 0; +}; + } // namespace mongo diff --git a/src/mongo/db/pipeline/document_source_unwind_test.cpp b/src/mongo/db/pipeline/document_source_unwind_test.cpp index d477873c98a09..4129ca91d6578 100644 --- a/src/mongo/db/pipeline/document_source_unwind_test.cpp +++ b/src/mongo/db/pipeline/document_source_unwind_test.cpp @@ -27,19 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include #include #include #include #include +#include +#include +#include +#include + #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" -#include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/dependencies.h" @@ -48,7 +51,11 @@ #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/query_test_service_context.h" #include "mongo/db/service_context.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -830,7 +837,7 @@ TEST_F(UnwindStageTest, Redaction) { R"({ "$unwind": { "path": "$HASH.HASH", - "preserveNullAndEmptyArrays": "?", + "preserveNullAndEmptyArrays": "?bool", "includeArrayIndex": "HASH.HASH" } })", diff --git a/src/mongo/db/pipeline/document_source_writer.h b/src/mongo/db/pipeline/document_source_writer.h index 96958928d70cf..752eb49288f9f 100644 --- a/src/mongo/db/pipeline/document_source_writer.h +++ b/src/mongo/db/pipeline/document_source_writer.h @@ -36,8 +36,11 @@ #include "mongo/db/db_raii.h" #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/read_concern.h" #include "mongo/db/storage/recovery_unit.h" +#include "mongo/rpc/metadata/impersonated_user_metadata.h" +#include "mongo/s/write_ops/batched_command_request.h" namespace mongo { using namespace fmt::literals; @@ -83,11 +86,14 @@ class DocumentSourceWriteBlock { /** * This is a base abstract class for all stages performing a write operation into an output * collection. The writes are organized in batches in which elements are objects of the templated - * type 'B'. A subclass must override two methods to be able to write into the output collection: + * type 'B'. A subclass must override the following methods to be able to write into the output + * collection: * - * 1. 'makeBatchObject()' - to create an object of type 'B' from the given 'Document', which is, + * - 'makeBatchObject()' - creates an object of type 'B' from the given 'Document', which is, * essentially, a result of the input source's 'getNext()' . - * 2. 'spill()' - to write the batch into the output collection. + * - 'spill()' - writes the batch into the output collection. + * - 'initializeBatchedWriteRequest()' - initializes the request object for writing a batch to + * the output collection. * * Two other virtual methods exist which a subclass may override: 'initialize()' and 'finalize()', * which are called before the first element is read from the input source, and after the last one @@ -99,6 +105,18 @@ class DocumentSourceWriter : public DocumentSource { using BatchObject = B; using BatchedObjects = std::vector; + static BatchedCommandRequest makeInsertCommand(const NamespaceString& outputNs, + bool bypassDocumentValidation) { + write_ops::InsertCommandRequest insertOp(outputNs); + insertOp.setWriteCommandRequestBase([&] { + write_ops::WriteCommandRequestBase wcb; + wcb.setOrdered(false); + wcb.setBypassDocumentValidation(bypassDocumentValidation); + return wcb; + }()); + return BatchedCommandRequest(std::move(insertOp)); + } + DocumentSourceWriter(const char* stageName, NamespaceString outputNs, const boost::intrusive_ptr& expCtx) @@ -145,9 +163,31 @@ class DocumentSourceWriter : public DocumentSource { virtual void finalize() {} /** - * Writes the documents in 'batch' to the output namespace. + * Writes the documents in 'batch' to the output namespace via 'bcr'. + */ + virtual void spill(BatchedCommandRequest&& bcr, BatchedObjects&& batch) = 0; + + /** + * Estimates the size of the header of a batch write (that is, the size of the write command + * minus the size of write statements themselves). */ - virtual void spill(BatchedObjects&& batch) = 0; + int estimateWriteHeaderSize(const BatchedCommandRequest& bcr) const { + using BatchType = BatchedCommandRequest::BatchType; + switch (bcr.getBatchType()) { + case BatchType::BatchType_Insert: + return _writeSizeEstimator->estimateInsertHeaderSize(bcr.getInsertRequest()); + case BatchType::BatchType_Update: + return _writeSizeEstimator->estimateUpdateHeaderSize(bcr.getUpdateRequest()); + case BatchType::BatchType_Delete: + break; + } + MONGO_UNREACHABLE; + } + + /** + * Constructs and configures a BatchedCommandRequest for performing a batch write. + */ + virtual BatchedCommandRequest initializeBatchedWriteRequest() const = 0; /** * Creates a batch object from the given document and returns it to the caller along with the @@ -203,9 +243,30 @@ DocumentSource::GetNextResult DocumentSourceWriter::doGetNext() { _initialized = true; } - BatchedObjects batch; - int bufferedBytes = 0; + // While most metadata attached to a command is limited to less than a KB, Impersonation + // metadata may grow to an arbitrary size. + // + // Ask the active Client how much impersonation metadata we'll use for it, add in our own + // estimate of write header size, and assume that the rest can fit in the space reserved by + // BSONObjMaxUserSize's overhead plus the value from the server parameter: + // internalQueryDocumentSourceWriterBatchExtraReservedBytes. + const auto estimatedMetadataSizeBytes = + rpc::estimateImpersonatedUserMetadataSize(pExpCtx->opCtx); + + BatchedCommandRequest batchWrite = initializeBatchedWriteRequest(); + const auto writeHeaderSize = estimateWriteHeaderSize(batchWrite); + const auto initialRequestSize = estimatedMetadataSizeBytes + writeHeaderSize + + internalQueryDocumentSourceWriterBatchExtraReservedBytes.load(); + uassert(7637800, + "Unable to proceed with write while metadata size ({}KB) exceeds {}KB"_format( + initialRequestSize / 1024, BSONObjMaxUserSize / 1024), + initialRequestSize <= BSONObjMaxUserSize); + + const auto maxBatchSizeBytes = BSONObjMaxUserSize - initialRequestSize; + + BatchedObjects batch; + size_t bufferedBytes = 0; auto nextInput = pSource->getNext(); for (; nextInput.isAdvanced(); nextInput = pSource->getNext()) { waitWhileFailPointEnabled(); @@ -215,16 +276,17 @@ DocumentSource::GetNextResult DocumentSourceWriter::doGetNext() { bufferedBytes += objSize; if (!batch.empty() && - (bufferedBytes > BSONObjMaxUserSize || + (bufferedBytes > maxBatchSizeBytes || batch.size() >= write_ops::kMaxWriteBatchSize)) { - spill(std::move(batch)); + spill(std::move(batchWrite), std::move(batch)); batch.clear(); + batchWrite = initializeBatchedWriteRequest(); bufferedBytes = objSize; } batch.push_back(obj); } if (!batch.empty()) { - spill(std::move(batch)); + spill(std::move(batchWrite), std::move(batch)); batch.clear(); } diff --git a/src/mongo/db/pipeline/exchange_spec.idl b/src/mongo/db/pipeline/exchange_spec.idl index fd411056767e9..a765f159e1b77 100644 --- a/src/mongo/db/pipeline/exchange_spec.idl +++ b/src/mongo/db/pipeline/exchange_spec.idl @@ -46,25 +46,30 @@ enums: structs: ExchangeSpec: description: "exchange aggregation request specification" + query_shape_component: true fields: policy: type: ExchangePolicy description: A string indicating a policy of how documents are distributed to consumers. stability: stable + query_shape: parameter consumers: type: int description: Number of consumers. stability: stable + query_shape: literal orderPreserving: type: bool default: false description: A flag indicating documents are merged while preserving the order. stability: stable + query_shape: parameter bufferSize: type: int default: 16777216 description: The size of exchange buffers. stability: stable + query_shape: literal key: type: object default: "BSONObj()" @@ -74,14 +79,17 @@ structs: field listed here, or if any prefix of any path is multikey (i.e. an array is encountered while traversing a path listed here), then it is by definition sent to consumer 0. + query_shape: literal boundaries: type: array optional: true description: Range/hash split points. stability: stable + query_shape: literal consumerIds: type: array optional: true description: Mapping from a range index to a consumer id. stability: stable + query_shape: literal diff --git a/src/mongo/db/pipeline/expression.cpp b/src/mongo/db/pipeline/expression.cpp index 90d0ba1038a7e..5516b0921ded4 100644 --- a/src/mongo/db/pipeline/expression.cpp +++ b/src/mongo/db/pipeline/expression.cpp @@ -27,41 +27,79 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/expression.h" +#include +#include +#include +#include #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include -#include +#include +#include +#include +#include +#include #include #include +// IWYU pragma: no_include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include "mongo/base/data_range.h" +#include "mongo/base/parse_number.h" +#include "mongo/bson/bsonelement_comparator_interface.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/basic_types.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/feature_compatibility_version_documentation.h" +#include "mongo/db/field_ref.h" #include "mongo/db/hasher.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_parser_gen.h" #include "mongo/db/pipeline/variable_validation.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/sort_pattern.h" +#include "mongo/db/query/str_trim_utils.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/db/record_id.h" #include "mongo/db/stats/counters.h" -#include "mongo/platform/bits.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/decimal128.h" +#include "mongo/platform/overflow_arithmetic.h" +#include "mongo/platform/random.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/errno_util.h" #include "mongo/util/pcre.h" #include "mongo/util/pcre_util.h" #include "mongo/util/str.h" #include "mongo/util/string_map.h" -#include "mongo/util/summation.h" namespace mongo { @@ -74,12 +112,18 @@ using std::string; using std::vector; /// Helper function to easily wrap constants with $const. -static Value serializeConstant(Value val) { +Value ExpressionConstant::serializeConstant(const SerializationOptions& opts, Value val) { if (val.missing()) { return Value("$$REMOVE"_sd); } + if (opts.literalPolicy == LiteralSerializationPolicy::kToDebugTypeString) { + return opts.serializeLiteral(val); + } - return Value(DOC("$const" << val)); + // Other serialization policies need to include this $const in order to be unambiguous for + // re-parsing this output later. If for example the constant was '$cashMoney' - we don't want to + // misinterpret it as a field path when parsing. + return Value(DOC("$const" << opts.serializeLiteral(val))); } /* --------------------------- Expression ------------------------------ */ @@ -423,21 +467,21 @@ class AddState { case NumberInt: case NumberLong: if (overflow::add(longTotal, valToAdd.coerceToLong(), &longTotal)) { - uasserted(ErrorCodes::Overflow, "date overflow in $add"); + uasserted(ErrorCodes::Overflow, "date overflow"); } break; case NumberDouble: { using limits = std::numeric_limits; double doubleToAdd = valToAdd.coerceToDouble(); uassert(ErrorCodes::Overflow, - "date overflow in $add", + "date overflow", // The upper bound is exclusive because it rounds up when it is cast to // a double. doubleToAdd >= static_cast(limits::min()) && doubleToAdd < static_cast(limits::max())); if (overflow::add(longTotal, llround(doubleToAdd), &longTotal)) { - uasserted(ErrorCodes::Overflow, "date overflow in $add"); + uasserted(ErrorCodes::Overflow, "date overflow"); } break; } @@ -448,7 +492,7 @@ class AddState { std::int64_t longToAdd = decimalToAdd.toLong(&signalingFlags); if (signalingFlags != Decimal128::SignalingFlag::kNoFlag || overflow::add(longTotal, longToAdd, &longTotal)) { - uasserted(ErrorCodes::Overflow, "date overflow in $add"); + uasserted(ErrorCodes::Overflow, "date overflow"); } break; } @@ -548,7 +592,7 @@ intrusive_ptr ExpressionAnd::optimize() { */ const size_t n = pAnd->_children.size(); // ExpressionNary::optimize() generates an ExpressionConstant for {$and:[]}. - verify(n > 0); + MONGO_verify(n > 0); intrusive_ptr pLast(pAnd->_children[n - 1]); const ExpressionConstant* pConst = dynamic_cast(pLast.get()); if (!pConst) @@ -639,8 +683,10 @@ Value ExpressionArray::evaluate(const Document& root, Variables* variables) cons } Value ExpressionArray::serialize(SerializationOptions options) const { - if (options.replacementForLiteralArgs && selfAndChildrenAreConstant()) { - return serializeConstant(Value(options.replacementForLiteralArgs.get())); + if (options.literalPolicy != LiteralSerializationPolicy::kUnchanged && + selfAndChildrenAreConstant()) { + return ExpressionConstant::serializeConstant( + options, evaluate(Document{}, &(getExpressionContext()->variables))); } vector expressions; expressions.reserve(_children.size()); @@ -780,7 +826,7 @@ Value ExpressionObjectToArray::evaluate(const Document& root, Variables* variabl output.push_back(keyvalue.freezeToValue()); } - return Value(output); + return Value(std::move(output)); } REGISTER_STABLE_EXPRESSION(objectToArray, ExpressionObjectToArray::parse); @@ -1009,7 +1055,7 @@ intrusive_ptr ExpressionCompare::parse(ExpressionContext* const expC intrusive_ptr expr = new ExpressionCompare(expCtx, op); ExpressionVector args = parseArguments(expCtx, bsonExpr, vps); expr->validateArguments(args); - expr->_children = args; + expr->_children = std::move(args); return expr; } @@ -1163,7 +1209,7 @@ intrusive_ptr ExpressionCond::parse(ExpressionContext* const expCtx, if (expr.type() != Object) { return Base::parse(expCtx, expr, vps); } - verify(expr.fieldNameStringData() == "$cond"); + MONGO_verify(expr.fieldNameStringData() == "$cond"); intrusive_ptr ret = new ExpressionCond(expCtx); ret->_children.resize(3); @@ -1223,10 +1269,7 @@ Value ExpressionConstant::evaluate(const Document& root, Variables* variables) c } Value ExpressionConstant::serialize(SerializationOptions options) const { - if (options.replacementForLiteralArgs) { - return serializeConstant(Value(options.replacementForLiteralArgs.get())); - } - return serializeConstant(_value); + return ExpressionConstant::serializeConstant(options, _value); } REGISTER_STABLE_EXPRESSION(const, ExpressionConstant::parse); @@ -1913,7 +1956,7 @@ REGISTER_STABLE_EXPRESSION(dateToString, ExpressionDateToString::parse); intrusive_ptr ExpressionDateToString::parse(ExpressionContext* const expCtx, BSONElement expr, const VariablesParseState& vps) { - verify(expr.fieldNameStringData() == "$dateToString"); + MONGO_verify(expr.fieldNameStringData() == "$dateToString"); uassert(18629, "$dateToString only supports an object as its argument", @@ -2043,7 +2086,8 @@ Value ExpressionDateToString::evaluate(const Document& root, Variables* variable timeZone->formatDate(formatValue.getStringData(), date.coerceToDate()))); } - return Value(uassertStatusOK(timeZone->formatDate(kISOFormatString, date.coerceToDate()))); + return Value(uassertStatusOK(timeZone->formatDate( + timeZone->isUtcZone() ? kIsoFormatStringZ : kIsoFormatStringNonZ, date.coerceToDate()))); } /* ----------------------- ExpressionDateDiff ---------------------------- */ @@ -2385,8 +2429,9 @@ bool ExpressionObject::selfAndChildrenAreConstant() const { } Value ExpressionObject::serialize(SerializationOptions options) const { - if (options.replacementForLiteralArgs && selfAndChildrenAreConstant()) { - return serializeConstant(Value(options.replacementForLiteralArgs.get())); + if (options.literalPolicy != LiteralSerializationPolicy::kUnchanged && + selfAndChildrenAreConstant()) { + return ExpressionConstant::serializeConstant(options, Value(Document{})); } MutableDocument outputDoc; for (auto&& pair : _expressions) { @@ -2440,22 +2485,10 @@ intrusive_ptr ExpressionFieldPath::parse(ExpressionContext* variableValidation::validateNameForUserRead(varName); auto varId = vps.getVariable(varName); - bool queryFeatureAllowedUserRoles = varId == Variables::kUserRolesId - ? (!expCtx->maxFeatureCompatibilityVersion || - feature_flags::gFeatureFlagUserRoles.isEnabledOnVersion( - *expCtx->maxFeatureCompatibilityVersion)) - : true; - - uassert( - ErrorCodes::QueryFeatureNotAllowed, - // We would like to include the current version and the required minimum version in this - // error message, but using FeatureCompatibilityVersion::toString() would introduce a - // dependency cycle (see SERVER-31968). - str::stream() - << "$$USER_ROLES is not allowed in the current feature compatibility version. See " - << feature_compatibility_version_documentation::kCompatibilityLink - << " for more information.", - queryFeatureAllowedUserRoles); + // If the variable we are parsing is a system variable, then indicate that we have seen it. + if (!Variables::isUserDefinedVariable(varId)) { + expCtx->setSystemVarReferencedInQuery(varId); + } return new ExpressionFieldPath(expCtx, fieldPath.toString(), varId); } else { @@ -2597,7 +2630,7 @@ Value ExpressionFieldPath::serialize(SerializationOptions options) const { auto [prefix, path] = getPrefixAndPath(_fieldPath); // First handles special cases for redaction of system variables. User variables will fall // through to the default full redaction case. - if (options.redactIdentifiers && prefix.length() == 2) { + if (options.transformIdentifiers && prefix.length() == 2) { if (path.getPathLength() == 1 && Variables::isBuiltin(_variable)) { // Nothing to redact for builtin variables. return Value(prefix + path.fullPath()); @@ -2655,6 +2688,21 @@ std::unique_ptr ExpressionFieldPath::copyWithSubstitution( return nullptr; } +bool ExpressionFieldPath::isRenameableByAnyPrefixNameIn( + const StringMap& renameList) const { + if (_variable != Variables::kRootId || _fieldPath.getPathLength() == 1) { + return false; + } + + FieldRef path(getFieldPathWithoutCurrentPrefix().fullPath()); + for (const auto& rename : renameList) { + if (FieldRef oldName(rename.first); oldName.isPrefixOfOrEqualTo(path)) { + return true; + } + } + return false; +} + monotonic::State ExpressionFieldPath::getMonotonicState(const FieldPath& sortedFieldPath) const { return getFieldPathWithoutCurrentPrefix() == sortedFieldPath ? monotonic::State::Increasing : monotonic::State::NonMonotonic; @@ -2666,7 +2714,7 @@ REGISTER_STABLE_EXPRESSION(filter, ExpressionFilter::parse); intrusive_ptr ExpressionFilter::parse(ExpressionContext* const expCtx, BSONElement expr, const VariablesParseState& vpsIn) { - verify(expr.fieldNameStringData() == "$filter"); + MONGO_verify(expr.fieldNameStringData() == "$filter"); uassert(28646, "$filter only supports an object as its argument", expr.type() == Object); @@ -2761,7 +2809,7 @@ Value ExpressionFilter::serialize(SerializationOptions options) const { Value ExpressionFilter::evaluate(const Document& root, Variables* variables) const { // We are guaranteed at parse time that this isn't using our _varId. - const Value inputVal = _children[_kInput]->evaluate(root, variables); + Value inputVal = _children[_kInput]->evaluate(root, variables); if (inputVal.nullish()) return Value(BSONNULL); @@ -2863,7 +2911,7 @@ REGISTER_STABLE_EXPRESSION(let, ExpressionLet::parse); intrusive_ptr ExpressionLet::parse(ExpressionContext* const expCtx, BSONElement expr, const VariablesParseState& vpsIn) { - verify(expr.fieldNameStringData() == "$let"); + MONGO_verify(expr.fieldNameStringData() == "$let"); uassert(16874, "$let only supports an object as its argument", expr.type() == Object); const BSONObj args = expr.embeddedObject(); @@ -2945,8 +2993,8 @@ Value ExpressionLet::serialize(SerializationOptions options) const { for (VariableMap::const_iterator it = _variables.begin(), end = _variables.end(); it != end; ++it) { auto key = it->second.name; - if (options.redactIdentifiers) { - key = options.identifierRedactionPolicy(key); + if (options.transformIdentifiers) { + key = options.transformIdentifiersCallback(key); } vars[key] = it->second.expression->serialize(options); } @@ -2971,7 +3019,7 @@ REGISTER_STABLE_EXPRESSION(map, ExpressionMap::parse); intrusive_ptr ExpressionMap::parse(ExpressionContext* const expCtx, BSONElement expr, const VariablesParseState& vpsIn) { - verify(expr.fieldNameStringData() == "$map"); + MONGO_verify(expr.fieldNameStringData() == "$map"); uassert(16878, "$map only supports an object as its argument", expr.type() == Object); @@ -3040,7 +3088,7 @@ Value ExpressionMap::serialize(SerializationOptions options) const { Value ExpressionMap::evaluate(const Document& root, Variables* variables) const { // guaranteed at parse time that this isn't using our _varId - const Value inputVal = _children[_kInput]->evaluate(root, variables); + Value inputVal = _children[_kInput]->evaluate(root, variables); if (inputVal.nullish()) return Value(BSONNULL); @@ -3526,7 +3574,7 @@ Value ExpressionIndexOfArray::evaluate(const Document& root, Variables* variable << typeName(arrayArg.getType()), arrayArg.isArray()); - std::vector array = arrayArg.getArray(); + const std::vector& array = arrayArg.getArray(); auto args = evaluateAndValidateArguments(root, _children, array.size(), variables); for (int i = args.startIndex; i < args.endIndex; i++) { if (getExpressionContext()->getValueComparator().evaluate(array[i] == @@ -3621,7 +3669,7 @@ intrusive_ptr ExpressionIndexOfArray::optimize() { << "argument is of type: " << typeName(valueArray.getType()), valueArray.isArray()); - auto arr = valueArray.getArray(); + const auto& arr = valueArray.getArray(); // To handle the case of duplicate values the values need to map to a vector of indecies. auto indexMap = @@ -3998,7 +4046,7 @@ Value ExpressionInternalFLEBetween::serialize(SerializationOptions options) cons } return Value(Document{{kInternalFleBetween, Document{{"field", _children[0]->serialize(options)}, - {"server", Value(serverDerivedValues)}}}}); + {"server", Value(std::move(serverDerivedValues))}}}}); } Value ExpressionInternalFLEBetween::evaluate(const Document& root, Variables* variables) const { @@ -4199,7 +4247,7 @@ intrusive_ptr ExpressionOr::optimize() { */ const size_t n = pOr->_children.size(); // ExpressionNary::optimize() generates an ExpressionConstant for {$or:[]}. - verify(n > 0); + MONGO_verify(n > 0); intrusive_ptr pLast(pOr->_children[n - 1]); const ExpressionConstant* pConst = dynamic_cast(pLast.get()); if (!pConst) @@ -4799,7 +4847,7 @@ Value ExpressionReverseArray::evaluate(const Document& root, Variables* variable std::vector array = input.getArray(); std::reverse(array.begin(), array.end()); - return Value(array); + return Value(std::move(array)); } REGISTER_STABLE_EXPRESSION(reverseArray, ExpressionReverseArray::parse); @@ -4895,7 +4943,7 @@ Value ExpressionSortArray::evaluate(const Document& root, Variables* variables) std::vector array = input.getArray(); std::sort(array.begin(), array.end(), _sortBy); - return Value(array); + return Value(std::move(array)); } REGISTER_STABLE_EXPRESSION(sortArray, ExpressionSortArray::parse); @@ -5229,7 +5277,7 @@ Value ExpressionInternalFindAllValuesAtPath::evaluate(const Document& root, outputVals.push_back(Value(elt)); } - return Value(outputVals); + return Value(std::move(outputVals)); } // This expression is not part of the stable API, but can always be used. It is // an internal expression used only for distinct. @@ -5682,14 +5730,45 @@ StatusWith ExpressionSubtract::apply(Value lhs, Value rhs) { } else if (lhs.nullish() || rhs.nullish()) { return Value(BSONNULL); } else if (lhs.getType() == Date) { - if (rhs.getType() == Date) { - return Value(durationCount(lhs.getDate() - rhs.getDate())); - } else if (rhs.numeric()) { - return Value(lhs.getDate() - Milliseconds(rhs.coerceToLong())); - } else { - return Status(ErrorCodes::TypeMismatch, - str::stream() - << "can't $subtract " << typeName(rhs.getType()) << " from Date"); + BSONType rhsType = rhs.getType(); + switch (rhsType) { + case Date: + return Value(durationCount(lhs.getDate() - rhs.getDate())); + case NumberInt: + case NumberLong: { + long long longDiff = lhs.getDate().toMillisSinceEpoch(); + if (overflow::sub(longDiff, rhs.coerceToLong(), &longDiff)) { + return Status(ErrorCodes::Overflow, str::stream() << "date overflow"); + } + return Value(Date_t::fromMillisSinceEpoch(longDiff)); + } + case NumberDouble: { + using limits = std::numeric_limits; + long long longDiff = lhs.getDate().toMillisSinceEpoch(); + double doubleRhs = rhs.coerceToDouble(); + // check the doubleRhs should not exceed int64 limit and result will not overflow + if (doubleRhs >= static_cast(limits::min()) && + doubleRhs < static_cast(limits::max()) && + !overflow::sub(longDiff, llround(doubleRhs), &longDiff)) { + return Value(Date_t::fromMillisSinceEpoch(longDiff)); + } + return Status(ErrorCodes::Overflow, str::stream() << "date overflow"); + } + case NumberDecimal: { + long long longDiff = lhs.getDate().toMillisSinceEpoch(); + Decimal128 decimalRhs = rhs.coerceToDecimal(); + std::uint32_t signalingFlags = Decimal128::SignalingFlag::kNoFlag; + std::int64_t longRhs = decimalRhs.toLong(&signalingFlags); + if (signalingFlags != Decimal128::SignalingFlag::kNoFlag || + overflow::sub(longDiff, longRhs, &longDiff)) { + return Status(ErrorCodes::Overflow, str::stream() << "date overflow"); + } + return Value(Date_t::fromMillisSinceEpoch(longDiff)); + } + default: + return Status(ErrorCodes::TypeMismatch, + str::stream() + << "can't $subtract " << typeName(rhs.getType()) << " from Date"); } } else { return Status(ErrorCodes::TypeMismatch, @@ -5868,11 +5947,12 @@ Value ExpressionSwitch::serialize(SerializationOptions options) const { if (defaultExpr()) { return Value(Document{{"$switch", - Document{{"branches", Value(serializedBranches)}, + Document{{"branches", Value(std::move(serializedBranches))}, {"default", defaultExpr()->serialize(options)}}}}); } - return Value(Document{{"$switch", Document{{"branches", Value(serializedBranches)}}}}); + return Value( + Document{{"$switch", Document{{"branches", Value(std::move(serializedBranches))}}}}); } /* ------------------------- ExpressionToLower ----------------------------- */ @@ -5944,74 +6024,6 @@ intrusive_ptr ExpressionTrim::parse(ExpressionContext* const expCtx, return new ExpressionTrim(expCtx, trimType, name, input, characters); } -namespace { -const std::vector kDefaultTrimWhitespaceChars = { - "\0"_sd, // Null character. Avoid using "\u0000" syntax to work around a gcc bug: - // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53690. - "\u0020"_sd, // Space - "\u0009"_sd, // Horizontal tab - "\u000A"_sd, // Line feed/new line - "\u000B"_sd, // Vertical tab - "\u000C"_sd, // Form feed - "\u000D"_sd, // Horizontal tab - "\u00A0"_sd, // Non-breaking space - "\u1680"_sd, // Ogham space mark - "\u2000"_sd, // En quad - "\u2001"_sd, // Em quad - "\u2002"_sd, // En space - "\u2003"_sd, // Em space - "\u2004"_sd, // Three-per-em space - "\u2005"_sd, // Four-per-em space - "\u2006"_sd, // Six-per-em space - "\u2007"_sd, // Figure space - "\u2008"_sd, // Punctuation space - "\u2009"_sd, // Thin space - "\u200A"_sd // Hair space -}; - -/** - * Assuming 'charByte' is the beginning of a UTF-8 code point, returns the number of bytes that - * should be used to represent the code point. Said another way, computes how many continuation - * bytes are expected to be present after 'charByte' in a UTF-8 encoded string. - */ -inline size_t numberOfBytesForCodePoint(char charByte) { - if ((charByte & 0b11111000) == 0b11110000) { - return 4; - } else if ((charByte & 0b11110000) == 0b11100000) { - return 3; - } else if ((charByte & 0b11100000) == 0b11000000) { - return 2; - } else { - return 1; - } -} - -/** - * Returns a vector with one entry per code point to trim, or throws an exception if 'utf8String' - * contains invalid UTF-8. - */ -std::vector extractCodePointsFromChars(StringData utf8String, - StringData expressionName) { - std::vector codePoints; - std::size_t i = 0; - while (i < utf8String.size()) { - uassert(50698, - str::stream() << "Failed to parse \"chars\" argument to " << expressionName - << ": Detected invalid UTF-8. Got continuation byte when expecting " - "the start of a new code point.", - !str::isUTF8ContinuationByte(utf8String[i])); - codePoints.push_back(utf8String.substr(i, numberOfBytesForCodePoint(utf8String[i]))); - i += numberOfBytesForCodePoint(utf8String[i]); - } - uassert(50697, - str::stream() - << "Failed to parse \"chars\" argument to " << expressionName - << ": Detected invalid UTF-8. Missing expected continuation byte at end of string.", - i <= utf8String.size()); - return codePoints; -} -} // namespace - Value ExpressionTrim::evaluate(const Document& root, Variables* variables) const { auto unvalidatedInput = _children[_kInput]->evaluate(root, variables); if (unvalidatedInput.nullish()) { @@ -6025,7 +6037,11 @@ Value ExpressionTrim::evaluate(const Document& root, Variables* variables) const const StringData input(unvalidatedInput.getStringData()); if (!_children[_kCharacters]) { - return Value(doTrim(input, kDefaultTrimWhitespaceChars)); + return Value( + str_trim_utils::doTrim(input, + str_trim_utils::kDefaultTrimWhitespaceChars, + _trimType == TrimType::kBoth || _trimType == TrimType::kLeft, + _trimType == TrimType::kBoth || _trimType == TrimType::kRight)); } auto unvalidatedUserChars = _children[_kCharacters]->evaluate(root, variables); if (unvalidatedUserChars.nullish()) { @@ -6037,65 +6053,11 @@ Value ExpressionTrim::evaluate(const Document& root, Variables* variables) const << typeName(unvalidatedUserChars.getType()) << ") instead.", unvalidatedUserChars.getType() == BSONType::String); - return Value( - doTrim(input, extractCodePointsFromChars(unvalidatedUserChars.getStringData(), _name))); -} - -bool ExpressionTrim::codePointMatchesAtIndex(const StringData& input, - std::size_t indexOfInput, - const StringData& testCP) { - for (size_t i = 0; i < testCP.size(); ++i) { - if (indexOfInput + i >= input.size() || input[indexOfInput + i] != testCP[i]) { - return false; - } - } - return true; -}; - -StringData ExpressionTrim::trimFromLeft(StringData input, const std::vector& trimCPs) { - std::size_t bytesTrimmedFromLeft = 0u; - while (bytesTrimmedFromLeft < input.size()) { - // Look for any matching code point to trim. - auto matchingCP = std::find_if(trimCPs.begin(), trimCPs.end(), [&](auto& testCP) { - return codePointMatchesAtIndex(input, bytesTrimmedFromLeft, testCP); - }); - if (matchingCP == trimCPs.end()) { - // Nothing to trim, stop here. - break; - } - bytesTrimmedFromLeft += matchingCP->size(); - } - return input.substr(bytesTrimmedFromLeft); -} - -StringData ExpressionTrim::trimFromRight(StringData input, const std::vector& trimCPs) { - std::size_t bytesTrimmedFromRight = 0u; - while (bytesTrimmedFromRight < input.size()) { - std::size_t indexToTrimFrom = input.size() - bytesTrimmedFromRight; - auto matchingCP = std::find_if(trimCPs.begin(), trimCPs.end(), [&](auto& testCP) { - if (indexToTrimFrom < testCP.size()) { - // We've gone off the left of the string. - return false; - } - return codePointMatchesAtIndex(input, indexToTrimFrom - testCP.size(), testCP); - }); - if (matchingCP == trimCPs.end()) { - // Nothing to trim, stop here. - break; - } - bytesTrimmedFromRight += matchingCP->size(); - } - return input.substr(0, input.size() - bytesTrimmedFromRight); -} - -StringData ExpressionTrim::doTrim(StringData input, const std::vector& trimCPs) const { - if (_trimType == TrimType::kBoth || _trimType == TrimType::kLeft) { - input = trimFromLeft(input, trimCPs); - } - if (_trimType == TrimType::kBoth || _trimType == TrimType::kRight) { - input = trimFromRight(input, trimCPs); - } - return input; + return Value(str_trim_utils::doTrim( + input, + str_trim_utils::extractCodePointsFromChars(unvalidatedUserChars.getStringData()), + _trimType == TrimType::kBoth || _trimType == TrimType::kLeft, + _trimType == TrimType::kBoth || _trimType == TrimType::kRight)); } boost::intrusive_ptr ExpressionTrim::optimize() { @@ -6222,12 +6184,6 @@ Value ExpressionTrunc::evaluate(const Document& root, Variables* variables) cons root, _children, getOpName(), Decimal128::kRoundTowardZero, &std::trunc, variables); } -intrusive_ptr ExpressionTrunc::parse(ExpressionContext* const expCtx, - BSONElement elem, - const VariablesParseState& vps) { - return ExpressionRangedArity::parse(expCtx, elem, vps); -} - REGISTER_STABLE_EXPRESSION(trunc, ExpressionTrunc::parse); const char* ExpressionTrunc::getOpName() const { return "$trunc"; @@ -6397,7 +6353,7 @@ Value ExpressionZip::evaluate(const Document& root, Variables* variables) const output.push_back(Value(outputChild)); } - return Value(output); + return Value(std::move(output)); } boost::intrusive_ptr ExpressionZip::optimize() { @@ -6521,7 +6477,7 @@ class ConversionTable { table[BSONType::Date][BSONType::String] = [](ExpressionContext* const expCtx, Value inputValue) { auto dateString = uassertStatusOK( - TimeZoneDatabase::utcZone().formatDate(kISOFormatString, inputValue.getDate())); + TimeZoneDatabase::utcZone().formatDate(kIsoFormatStringZ, inputValue.getDate())); return Value(dateString); }; table[BSONType::Date][BSONType::Bool] = [](ExpressionContext* const expCtx, @@ -7142,7 +7098,7 @@ Value ExpressionRegex::nextMatch(RegexExecutionState* regexState) const { MutableDocument match; match.addField("match", Value(m[0])); match.addField("idx", Value(regexState->startCodePointPos)); - match.addField("captures", Value(captures)); + match.addField("captures", Value(std::move(captures))); return match.freezeToValue(); } @@ -7341,7 +7297,7 @@ Value ExpressionRegexFindAll::evaluate(const Document& root, Variables* variable std::vector output; auto executionState = buildInitialState(root, variables); if (executionState.nullish()) { - return Value(output); + return Value(std::move(output)); } StringData input = *(executionState.input); size_t totalDocSize = 0; @@ -7383,7 +7339,7 @@ Value ExpressionRegexFindAll::evaluate(const Document& root, Variables* variable invariant(executionState.startCodePointPos > 0); invariant(executionState.startCodePointPos <= executionState.startBytePos); } while (static_cast(executionState.startBytePos) < input.size()); - return Value(output); + return Value(std::move(output)); } /* -------------------------- ExpressionRegexMatch ------------------------------ */ @@ -7989,7 +7945,6 @@ Value ExpressionGetField::evaluate(const Document& root, Variables* variables) c return Value(); } - return inputValue.getDocument().getField(fieldValue.getString()); } @@ -7998,19 +7953,23 @@ intrusive_ptr ExpressionGetField::optimize() { } Value ExpressionGetField::serialize(SerializationOptions options) const { - MutableDocument argDoc; - if (options.redactIdentifiers) { - // The parser guarantees that the '_children[_kField]' expression evaluates to a constant - // string. - auto strPath = - static_cast(_children[_kField].get())->getValue().getString(); - argDoc.addField("field"_sd, Value(options.serializeFieldPathFromString(strPath))); - } else { - argDoc.addField("field"_sd, _children[_kField]->serialize(options)); + // The parser guarantees that the '_children[_kField]' expression evaluates to a constant + // string. + auto strPath = + static_cast(_children[_kField].get())->getValue().getString(); + + Value maybeRedactedPath{options.serializeFieldPathFromString(strPath)}; + // This is a pretty unique option to serialize. It is both a constant and a field path, which + // means that it: + // - should be redacted (if that option is set). + // - should *not* be wrapped in $const iff we are serializing for a debug string + if (options.literalPolicy != LiteralSerializationPolicy::kToDebugTypeString) { + maybeRedactedPath = Value(Document{{"$const"_sd, maybeRedactedPath}}); } - argDoc.addField("input"_sd, _children[_kInput]->serialize(options)); - return Value(Document{{"$getField"_sd, argDoc.freezeToValue()}}); + return Value(Document{{"$getField"_sd, + Document{{"field"_sd, std::move(maybeRedactedPath)}, + {"input"_sd, _children[_kInput]->serialize(options)}}}}); } /* -------------------------- ExpressionSetField ------------------------------ */ @@ -8121,20 +8080,24 @@ intrusive_ptr ExpressionSetField::optimize() { } Value ExpressionSetField::serialize(SerializationOptions options) const { - MutableDocument argDoc; - if (options.redactIdentifiers) { - // The parser guarantees that the '_children[_kField]' expression evaluates to a constant - // string. - auto strPath = - static_cast(_children[_kField].get())->getValue().getString(); - argDoc.addField("field"_sd, Value(options.serializeFieldPathFromString(strPath))); - } else { - argDoc.addField("field"_sd, _children[_kField]->serialize(options)); + // The parser guarantees that the '_children[_kField]' expression evaluates to a constant + // string. + auto strPath = + static_cast(_children[_kField].get())->getValue().getString(); + + Value maybeRedactedPath{options.serializeFieldPathFromString(strPath)}; + // This is a pretty unique option to serialize. It is both a constant and a field path, which + // means that it: + // - should be redacted (if that option is set). + // - should *not* be wrapped in $const iff we are serializing for a debug string + if (options.literalPolicy != LiteralSerializationPolicy::kToDebugTypeString) { + maybeRedactedPath = Value(Document{{"$const"_sd, maybeRedactedPath}}); } - argDoc.addField("input"_sd, _children[_kInput]->serialize(options)); - argDoc.addField("value"_sd, _children[_kValue]->serialize(options)); - return Value(Document{{"$setField"_sd, argDoc.freezeToValue()}}); + return Value(Document{{"$setField"_sd, + Document{{"field"_sd, std::move(maybeRedactedPath)}, + {"input"_sd, _children[_kInput]->serialize(options)}, + {"value"_sd, _children[_kValue]->serialize(options)}}}}); } /* ------------------------- ExpressionTsSecond ----------------------------- */ @@ -8191,11 +8154,7 @@ Value ExpressionBitNot::evaluateNumericArg(const Value& numericArg) const { } } -REGISTER_EXPRESSION_WITH_FEATURE_FLAG(bitNot, - ExpressionBitNot::parse, - AllowedWithApiStrict::kNeverInVersion1, - AllowedWithClientType::kAny, - feature_flags::gFeatureFlagBitwise); +REGISTER_STABLE_EXPRESSION(bitNot, ExpressionBitNot::parse); const char* ExpressionBitNot::getOpName() const { return "$bitNot"; @@ -8203,21 +8162,9 @@ const char* ExpressionBitNot::getOpName() const { /* ------------------------- $bitAnd, $bitOr, and $bitXor ------------------------ */ -REGISTER_EXPRESSION_WITH_FEATURE_FLAG(bitAnd, - ExpressionBitAnd::parse, - AllowedWithApiStrict::kNeverInVersion1, - AllowedWithClientType::kAny, - feature_flags::gFeatureFlagBitwise); -REGISTER_EXPRESSION_WITH_FEATURE_FLAG(bitOr, - ExpressionBitOr::parse, - AllowedWithApiStrict::kNeverInVersion1, - AllowedWithClientType::kAny, - feature_flags::gFeatureFlagBitwise); -REGISTER_EXPRESSION_WITH_FEATURE_FLAG(bitXor, - ExpressionBitXor::parse, - AllowedWithApiStrict::kNeverInVersion1, - AllowedWithClientType::kAny, - feature_flags::gFeatureFlagBitwise); +REGISTER_STABLE_EXPRESSION(bitAnd, ExpressionBitAnd::parse); +REGISTER_STABLE_EXPRESSION(bitOr, ExpressionBitOr::parse); +REGISTER_STABLE_EXPRESSION(bitXor, ExpressionBitXor::parse); MONGO_INITIALIZER_GROUP(BeginExpressionRegistration, ("default"), ("EndExpressionRegistration")) MONGO_INITIALIZER_GROUP(EndExpressionRegistration, ("BeginExpressionRegistration"), ()) diff --git a/src/mongo/db/pipeline/expression.h b/src/mongo/db/pipeline/expression.h index 91341d79546b2..d5a1d88e62913 100644 --- a/src/mongo/db/pipeline/expression.h +++ b/src/mongo/db/pipeline/expression.h @@ -29,27 +29,54 @@ #pragma once -#include "mongo/base/data_range.h" -#include "mongo/platform/basic.h" - -#include #include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include +#include +#include +#include #include +#include +#include #include #include #include -#include "mongo/base/init.h" +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/crypto/fle_crypto_predicate.h" +#include "mongo/crypto/fle_crypto_types.h" #include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_visitor.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/monotonic_expression.h" +#include "mongo/db/pipeline/percentile_algo.h" #include "mongo/db/pipeline/percentile_algo_discrete.h" #include "mongo/db/pipeline/variables.h" #include "mongo/db/query/allowed_contexts.h" @@ -59,9 +86,14 @@ #include "mongo/db/query/sort_pattern.h" #include "mongo/db/server_options.h" #include "mongo/db/update/pattern_cmp.h" +#include "mongo/platform/basic.h" +#include "mongo/util/assert_util.h" #include "mongo/util/intrusive_counter.h" #include "mongo/util/pcre.h" +#include "mongo/util/safe_num.h" #include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -377,6 +409,93 @@ class Expression : public RefCountable { ExpressionContext* const _expCtx; }; +/** + * A constant expression. Repeated calls to evaluate() will always return the same thing. + */ +class ExpressionConstant final : public Expression { +public: + ExpressionConstant(ExpressionContext* expCtx, const Value& value); + + boost::intrusive_ptr optimize() final; + Value evaluate(const Document& root, Variables* variables) const final; + Value serialize(SerializationOptions options) const final; + + const char* getOpName() const; + + /** + * Creates a new ExpressionConstant with value 'value'. + */ + static boost::intrusive_ptr create(ExpressionContext* expCtx, + const Value& value); + + static boost::intrusive_ptr parse(ExpressionContext* expCtx, + BSONElement bsonExpr, + const VariablesParseState& vps); + + /** + * Returns true if 'expression' is nullptr or if 'expression' is an instance of an + * ExpressionConstant. + */ + static bool isNullOrConstant(boost::intrusive_ptr expression) { + return !expression || dynamic_cast(expression.get()); + } + + /** + * Returns true if 'expression' is an instance of an ExpressionConstant. + */ + static bool isConstant(boost::intrusive_ptr expression) { + return dynamic_cast(expression.get()); + } + static Value serializeConstant(const SerializationOptions& opts, Value val); + + bool selfAndChildrenAreConstant() const override final { + return true; + } + + /** + * Returns true if every expression in 'expressions' is either a nullptr or an instance of an + * ExpressionConstant. + */ + static bool allNullOrConstant( + const std::initializer_list>& expressions) { + return std::all_of(expressions.begin(), expressions.end(), [](auto exp) { + return ExpressionConstant::isNullOrConstant(exp); + }); + } + template + static bool allConstant(const ExpressionContainer& expressions) { + return std::all_of(expressions.begin(), expressions.end(), [](auto exp) { + return ExpressionConstant::isConstant(exp); + }); + } + + /** + * Returns the constant value represented by this Expression. + */ + Value getValue() const { + return _value; + } + + void setValue(const Value& value) { + _value = value; + }; + + void acceptVisitor(ExpressionMutableVisitor* visitor) final { + return visitor->visit(this); + } + + void acceptVisitor(ExpressionConstVisitor* visitor) const final { + return visitor->visit(this); + } + +private: + monotonic::State getMonotonicState(const FieldPath& sortedFieldPath) const final { + return monotonic::State::Constant; + } + + Value _value; +}; + /** * Inherit from ExpressionVariadic or ExpressionFixedArity instead of directly from this class. */ @@ -448,6 +567,33 @@ class ExpressionVariadic : public ExpressionNaryBase { : ExpressionNaryBase(expCtx) {} ExpressionVariadic(ExpressionContext* const expCtx, Expression::ExpressionVector&& children) : ExpressionNaryBase(expCtx, std::move(children)) {} + + Value serialize(SerializationOptions options) const { + // As a special case, we would like to serialize a variadic number of children as + // "?array" if they are all constant. Check for that here, otherwise default to + // the normal one-by-one serialization of the children. + if (options.literalPolicy == LiteralSerializationPolicy::kToDebugTypeString && + ExpressionConstant::allConstant(this->_children)) { + // We could evaluate the expression right here and now and end up with just the one + // constant answer, but this is not an optimization funciton, it is meant to just + // serialize what we have, so let's preserve the array of constants. + auto args = [&]() { + std::vector values; + const auto& constants = this->_children; + values.reserve(constants.size()); + std::transform(constants.begin(), + constants.end(), + std::back_inserter(values), + [](const auto& exp) { + return static_cast(exp.get())->getValue(); + }); + return values; + }(); + return Value(Document{ + {this->getOpName(), ExpressionConstant::serializeConstant(options, Value(args))}}); + } + return ExpressionNary::serialize(options); + } }; /** @@ -604,7 +750,7 @@ class ExpressionFromAccumulatorQuantile : public Expression { explicit ExpressionFromAccumulatorQuantile(ExpressionContext* const expCtx, std::vector& ps, boost::intrusive_ptr input, - int32_t method) + PercentileMethod method) : Expression(expCtx, {input}), _ps(ps), _input(input), _method(method) { expCtx->sbeCompatibility = SbeCompatibility::notCompatible; } @@ -627,12 +773,11 @@ class ExpressionFromAccumulatorQuantile : public Expression { _ps.size(), std::vector(_ps.size(), input.coerceToDouble())); } - if (input.isArray()) { - uassert(7436202, - "Input to $percentile or $median cannot be an empty array.", - input.getArray().size() > 0); - - if (_method != 2 /*continuous*/) { + if (input.isArray() && input.getArrayLength() > 0) { + if (_method != PercentileMethod::Continuous) { + // On small datasets, which are likely to be the inputs for the expression, creating + // t-digests is inefficient, so instead we use DiscretePercentile algo directly for + // both "discrete" and "approximate" methods. std::vector samples; samples.reserve(input.getArrayLength()); for (const auto& item : input.getArray()) { @@ -647,7 +792,7 @@ class ExpressionFromAccumulatorQuantile : public Expression { // Delegate to the accumulator. Note: it would be more efficient to use the // percentile algorithms directly rather than an accumulator, as it would reduce // heap alloc, virtual calls and avoid unnecessary for expressions memory tracking. - // However, on large datasets these overheads are less noticeable. + // This path currently cannot be executed as we only support continuous percentiles. TAccumulator accum(this->getExpressionContext(), _ps, _method); for (const auto& item : input.getArray()) { accum.process(item, false /* merging */); @@ -672,8 +817,7 @@ class ExpressionFromAccumulatorQuantile : public Expression { private: std::vector _ps; boost::intrusive_ptr _input; - // TODO SERVER-74894: This should be 'PercentileMethodEnum', not 'int32_t'. - int32_t _method; + PercentileMethod _method; }; /** @@ -752,85 +896,6 @@ class ExpressionTwoNumericArgs : public ExpressionFixedArity { virtual Value evaluateNumericArgs(const Value& numericArg1, const Value& numericArg2) const = 0; }; -/** - * A constant expression. Repeated calls to evaluate() will always return the same thing. - */ -class ExpressionConstant final : public Expression { -public: - ExpressionConstant(ExpressionContext* expCtx, const Value& value); - - boost::intrusive_ptr optimize() final; - Value evaluate(const Document& root, Variables* variables) const final; - Value serialize(SerializationOptions options) const final; - - const char* getOpName() const; - - /** - * Creates a new ExpressionConstant with value 'value'. - */ - static boost::intrusive_ptr create(ExpressionContext* expCtx, - const Value& value); - - static boost::intrusive_ptr parse(ExpressionContext* expCtx, - BSONElement bsonExpr, - const VariablesParseState& vps); - - /** - * Returns true if 'expression' is nullptr or if 'expression' is an instance of an - * ExpressionConstant. - */ - static bool isNullOrConstant(boost::intrusive_ptr expression) { - return !expression || dynamic_cast(expression.get()); - } - - /** - * Returns true if 'expression' is an instance of an ExpressionConstant. - */ - static bool isConstant(boost::intrusive_ptr expression) { - return dynamic_cast(expression.get()); - } - bool selfAndChildrenAreConstant() const override final { - return true; - } - - /** - * Returns true if every expression in 'expressions' is either a nullptr or an instance of an - * ExpressionConstant. - */ - static bool allNullOrConstant( - const std::initializer_list>& expressions) { - return std::all_of(expressions.begin(), expressions.end(), [](auto exp) { - return ExpressionConstant::isNullOrConstant(exp); - }); - } - - /** - * Returns the constant value represented by this Expression. - */ - Value getValue() const { - return _value; - } - - void setValue(const Value& value) { - _value = value; - }; - - void acceptVisitor(ExpressionMutableVisitor* visitor) final { - return visitor->visit(this); - } - - void acceptVisitor(ExpressionConstVisitor* visitor) const final { - return visitor->visit(this); - } - -private: - monotonic::State getMonotonicState(const FieldPath& sortedFieldPath) const final { - return monotonic::State::Constant; - } - - Value _value; -}; - /** * Inherit from this class if your expression works with date types, and accepts either a single * argument which is a date, or an object {date: , timezone: }. @@ -1127,9 +1192,7 @@ class ExpressionAnyElementTrue final : public ExpressionFixedArity { public: explicit ExpressionArray(ExpressionContext* const expCtx) - : ExpressionVariadic(expCtx) { - expCtx->sbeCompatibility = SbeCompatibility::notCompatible; - } + : ExpressionVariadic(expCtx) {} ExpressionArray(ExpressionContext* const expCtx, std::vector>&& children) @@ -1230,10 +1293,7 @@ class ExpressionLast final : public ExpressionFixedArity { class ExpressionObjectToArray final : public ExpressionFixedArity { public: explicit ExpressionObjectToArray(ExpressionContext* const expCtx) - : ExpressionFixedArity(expCtx) { - expCtx->sbeCompatibility = - std::min(expCtx->sbeCompatibility, SbeCompatibility::flagGuarded); - } + : ExpressionFixedArity(expCtx) {} Value evaluate(const Document& root, Variables* variables) const final; const char* getOpName() const final; @@ -1250,10 +1310,7 @@ class ExpressionObjectToArray final : public ExpressionFixedArity { public: explicit ExpressionArrayToObject(ExpressionContext* const expCtx) - : ExpressionFixedArity(expCtx) { - expCtx->sbeCompatibility = - std::min(expCtx->sbeCompatibility, SbeCompatibility::flagGuarded); - } + : ExpressionFixedArity(expCtx) {} ExpressionArrayToObject(ExpressionContext* const expCtx, ExpressionVector&& children) : ExpressionFixedArity(expCtx, std::move(children)) {} @@ -1994,6 +2051,12 @@ class ExpressionFieldPath : public Expression { std::unique_ptr copyWithSubstitution( const StringMap& renameList) const; + /** + * Checks if any key of 'renameList' map is a prefix of this ExpressionFieldPath's path. It + * would mean that this ExpressionFieldPath is renameable by 'renameList' if so. + */ + bool isRenameableByAnyPrefixNameIn(const StringMap& renameList) const; + void acceptVisitor(ExpressionMutableVisitor* visitor) final { return visitor->visit(this); } @@ -2114,10 +2177,7 @@ class ExpressionHour final : public DateExpressionAcceptingTimeZone date, boost::intrusive_ptr timeZone = nullptr) : DateExpressionAcceptingTimeZone( - expCtx, "$hour", std::move(date), std::move(timeZone)) { - expCtx->sbeCompatibility = - std::min(expCtx->sbeCompatibility, SbeCompatibility::flagGuarded); - } + expCtx, "$hour", std::move(date), std::move(timeZone)) {} Value evaluateDate(Date_t date, const TimeZone& timeZone) const final { return Value(timeZone.dateParts(date).hour); @@ -2500,10 +2560,7 @@ class ExpressionMillisecond final : public DateExpressionAcceptingTimeZone date, boost::intrusive_ptr timeZone = nullptr) : DateExpressionAcceptingTimeZone( - expCtx, "$millisecond", std::move(date), std::move(timeZone)) { - expCtx->sbeCompatibility = - std::min(expCtx->sbeCompatibility, SbeCompatibility::flagGuarded); - } + expCtx, "$millisecond", std::move(date), std::move(timeZone)) {} Value evaluateDate(Date_t date, const TimeZone& timeZone) const final { return Value(timeZone.dateParts(date).millisecond); @@ -2525,10 +2582,7 @@ class ExpressionMinute final : public DateExpressionAcceptingTimeZone date, boost::intrusive_ptr timeZone = nullptr) : DateExpressionAcceptingTimeZone( - expCtx, "$minute", std::move(date), std::move(timeZone)) { - expCtx->sbeCompatibility = - std::min(expCtx->sbeCompatibility, SbeCompatibility::flagGuarded); - } + expCtx, "$minute", std::move(date), std::move(timeZone)) {} Value evaluateDate(Date_t date, const TimeZone& timeZone) const final { return Value(timeZone.dateParts(date).minute); @@ -2612,10 +2666,7 @@ class ExpressionMonth final : public DateExpressionAcceptingTimeZone date, boost::intrusive_ptr timeZone = nullptr) : DateExpressionAcceptingTimeZone( - expCtx, "$month", std::move(date), std::move(timeZone)) { - expCtx->sbeCompatibility = - std::min(expCtx->sbeCompatibility, SbeCompatibility::flagGuarded); - } + expCtx, "$month", std::move(date), std::move(timeZone)) {} Value evaluateDate(Date_t date, const TimeZone& timeZone) const final { return Value(timeZone.dateParts(date).month); @@ -2910,10 +2961,7 @@ class ExpressionSecond final : public DateExpressionAcceptingTimeZone date, boost::intrusive_ptr timeZone = nullptr) : DateExpressionAcceptingTimeZone( - expCtx, "$second", std::move(date), std::move(timeZone)) { - expCtx->sbeCompatibility = - std::min(expCtx->sbeCompatibility, SbeCompatibility::flagGuarded); - } + expCtx, "$second", std::move(date), std::move(timeZone)) {} Value evaluateDate(Date_t date, const TimeZone& timeZone) const final { return Value(timeZone.dateParts(date).second); @@ -3357,9 +3405,7 @@ class ExpressionSubstrCP final : public ExpressionFixedArity { public: explicit ExpressionStrLenBytes(ExpressionContext* const expCtx) - : ExpressionFixedArity(expCtx) { - expCtx->sbeCompatibility = SbeCompatibility::notCompatible; - } + : ExpressionFixedArity(expCtx) {} ExpressionStrLenBytes(ExpressionContext* const expCtx, ExpressionVector&& children) : ExpressionFixedArity(expCtx, std::move(children)) {} @@ -3592,33 +3638,16 @@ class ExpressionTrim final : public Expression { return visitor->visit(this); } -private: - /** - * Returns true if the unicode character found at index 'indexIntoInput' of 'input' is equal to - * 'testCP'. - */ - static bool codePointMatchesAtIndex(const StringData& input, - std::size_t indexIntoInput, - const StringData& testCP); - - /** - * Given the input string and the code points to trim from that string, returns a substring of - * 'input' with any code point from 'trimCPs' trimmed from the left. - */ - static StringData trimFromLeft(StringData input, const std::vector& trimCPs); - - /** - * Given the input string and the code points to trim from that string, returns a substring of - * 'input' with any code point from 'trimCPs' trimmed from the right. - */ - static StringData trimFromRight(StringData input, const std::vector& trimCPs); + /* Returns "trim"/"ltrim"/"rtrim" based on the expression name without the $ sign. */ + std::string getTrimTypeString() const { + return _name.substr(1, _name.size()); + } - /** - * Returns the trimmed version of 'input', with all code points in 'trimCPs' removed from the - * front, back, or both - depending on _trimType. - */ - StringData doTrim(StringData input, const std::vector& trimCPs) const; + bool hasCharactersExpr() const { + return _children[_kCharacters] != nullptr; + } +private: static constexpr size_t _kInput = 0; static constexpr size_t _kCharacters = 1; // Optional, null if not specified. @@ -3630,15 +3659,10 @@ class ExpressionTrim final : public Expression { class ExpressionTrunc final : public ExpressionRangedArity { public: explicit ExpressionTrunc(ExpressionContext* const expCtx) - : ExpressionRangedArity(expCtx) { - expCtx->sbeCompatibility = SbeCompatibility::notCompatible; - } + : ExpressionRangedArity(expCtx) {} ExpressionTrunc(ExpressionContext* const expCtx, ExpressionVector&& children) : ExpressionRangedArity(expCtx, std::move(children)) {} - static boost::intrusive_ptr parse(ExpressionContext* expCtx, - BSONElement elem, - const VariablesParseState& vps); Value evaluate(const Document& root, Variables* variables) const final; const char* getOpName() const final; @@ -3697,10 +3721,7 @@ class ExpressionWeek final : public DateExpressionAcceptingTimeZone date, boost::intrusive_ptr timeZone = nullptr) : DateExpressionAcceptingTimeZone( - expCtx, "$week", std::move(date), std::move(timeZone)) { - expCtx->sbeCompatibility = - std::min(expCtx->sbeCompatibility, SbeCompatibility::flagGuarded); - } + expCtx, "$week", std::move(date), std::move(timeZone)) {} Value evaluateDate(Date_t date, const TimeZone& timeZone) const final { return Value(timeZone.week(date)); @@ -3722,10 +3743,7 @@ class ExpressionIsoWeekYear final : public DateExpressionAcceptingTimeZone date, boost::intrusive_ptr timeZone = nullptr) : DateExpressionAcceptingTimeZone( - expCtx, "$isoWeekYear", std::move(date), std::move(timeZone)) { - expCtx->sbeCompatibility = - std::min(expCtx->sbeCompatibility, SbeCompatibility::flagGuarded); - } + expCtx, "$isoWeekYear", std::move(date), std::move(timeZone)) {} Value evaluateDate(Date_t date, const TimeZone& timeZone) const final { return Value(timeZone.isoYear(date)); @@ -3748,10 +3766,7 @@ class ExpressionIsoDayOfWeek final boost::intrusive_ptr date, boost::intrusive_ptr timeZone = nullptr) : DateExpressionAcceptingTimeZone( - expCtx, "$isoDayOfWeek", std::move(date), std::move(timeZone)) { - expCtx->sbeCompatibility = - std::min(expCtx->sbeCompatibility, SbeCompatibility::flagGuarded); - } + expCtx, "$isoDayOfWeek", std::move(date), std::move(timeZone)) {} Value evaluateDate(Date_t date, const TimeZone& timeZone) const final { return Value(timeZone.isoDayOfWeek(date)); @@ -3773,10 +3788,7 @@ class ExpressionIsoWeek final : public DateExpressionAcceptingTimeZone date, boost::intrusive_ptr timeZone = nullptr) : DateExpressionAcceptingTimeZone( - expCtx, "$isoWeek", std::move(date), std::move(timeZone)) { - expCtx->sbeCompatibility = - std::min(expCtx->sbeCompatibility, SbeCompatibility::flagGuarded); - } + expCtx, "$isoWeek", std::move(date), std::move(timeZone)) {} Value evaluateDate(Date_t date, const TimeZone& timeZone) const final { return Value(timeZone.isoWeek(date)); @@ -3798,10 +3810,7 @@ class ExpressionYear final : public DateExpressionAcceptingTimeZone date, boost::intrusive_ptr timeZone = nullptr) : DateExpressionAcceptingTimeZone( - expCtx, "$year", std::move(date), std::move(timeZone)) { - expCtx->sbeCompatibility = - std::min(expCtx->sbeCompatibility, SbeCompatibility::flagGuarded); - } + expCtx, "$year", std::move(date), std::move(timeZone)) {} Value evaluateDate(Date_t date, const TimeZone& timeZone) const final { return Value(timeZone.dateParts(date).year); @@ -4253,6 +4262,30 @@ struct SubstituteFieldPathWalker { const StringMap& renameList; }; +/** + * This visitor is used to visit only ExpressionFieldPath nodes in an expression tree and call 'fn' + * on them. + * + * Usage example: + * bool isFoo = false; + * FieldPathVisitor visitor([&](const ExpressionFieldPath* expr) { + * isFoo = isFoo || expr->isFoo(); + * }); + */ +template +struct FieldPathVisitor : public SelectiveConstExpressionVisitorBase { + // To avoid overloaded-virtual warnings. + using SelectiveConstExpressionVisitorBase::visit; + + explicit FieldPathVisitor(const F& fn) : _fn(fn) {} + + void visit(const ExpressionFieldPath* expr) final { + _fn(expr); + } + + F _fn; +}; + /** * $dateTrunc expression that maps a date to a lower bound of a bin of a certain size that the date * belongs to. It uses 2000-01-01T00:00:00.000 as a reference point. diff --git a/src/mongo/db/pipeline/expression_and_test.cpp b/src/mongo/db/pipeline/expression_and_test.cpp index 46bdd07dd9025..2fde410f2bd03 100644 --- a/src/mongo/db/pipeline/expression_and_test.cpp +++ b/src/mongo/db/pipeline/expression_and_test.cpp @@ -27,17 +27,25 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" -#include "mongo/config.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/exec/document_value/document.h" -#include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/exec/document_value/value_comparator.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace ExpressionTests { diff --git a/src/mongo/db/pipeline/expression_bm.cpp b/src/mongo/db/pipeline/expression_bm.cpp index 001d0d9b6f854..3ab1a1289f8e8 100644 --- a/src/mongo/db/pipeline/expression_bm.cpp +++ b/src/mongo/db/pipeline/expression_bm.cpp @@ -27,15 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include +#include + +#include -#include "mongo/db/matcher/expression_parser.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_bm_fixture.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/query_test_service_context.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -46,7 +51,7 @@ class ClassicExpressionBenchmarkFixture : public ExpressionBenchmarkFixture { const std::vector& documents) override final { QueryTestServiceContext testServiceContext; auto opContext = testServiceContext.makeOperationContext(); - NamespaceString nss("test.bm"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.bm"); auto exprContext = make_intrusive(opContext.get(), nss); // Build an expression. diff --git a/src/mongo/db/pipeline/expression_bm_fixture.cpp b/src/mongo/db/pipeline/expression_bm_fixture.cpp index 00b656f8733e0..cd19cc09adcf6 100644 --- a/src/mongo/db/pipeline/expression_bm_fixture.cpp +++ b/src/mongo/db/pipeline/expression_bm_fixture.cpp @@ -27,13 +27,25 @@ * it in the license file. */ +#include +#include +#include #include +#include +#include +#include #include +#include +#include -#include "mongo/db/pipeline/expression_bm_fixture.h" +#include -#include "mongo/db/json.h" -#include "mongo/db/matcher/expression_geo.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/pipeline/expression_bm_fixture.h" +#include "mongo/platform/decimal128.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/pipeline/expression_bm_fixture.h b/src/mongo/db/pipeline/expression_bm_fixture.h index 9b0a182aae368..5e7e87a25262d 100644 --- a/src/mongo/db/pipeline/expression_bm_fixture.h +++ b/src/mongo/db/pipeline/expression_bm_fixture.h @@ -29,13 +29,17 @@ #pragma once -#include "mongo/platform/basic.h" - #include +#include +#include +#include +#include #include "mongo/bson/bsonobj.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/platform/basic.h" #include "mongo/platform/random.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/pipeline/expression_compare_test.cpp b/src/mongo/db/pipeline/expression_compare_test.cpp index a2ee85a1a65fc..1dfb0e4432a97 100644 --- a/src/mongo/db/pipeline/expression_compare_test.cpp +++ b/src/mongo/db/pipeline/expression_compare_test.cpp @@ -27,16 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace ExpressionTests { diff --git a/src/mongo/db/pipeline/expression_context.cpp b/src/mongo/db/pipeline/expression_context.cpp index e5b9d16f97919..3e68452d5c1d9 100644 --- a/src/mongo/db/pipeline/expression_context.cpp +++ b/src/mongo/db/pipeline/expression_context.cpp @@ -29,10 +29,18 @@ #include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/timestamp.h" +#include "mongo/db/basic_types.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" -#include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/stats/counters.h" #include "mongo/util/intrusive_counter.h" @@ -43,6 +51,50 @@ ExpressionContext::ResolvedNamespace::ResolvedNamespace(NamespaceString ns, boost::optional collUUID) : ns(std::move(ns)), pipeline(std::move(pipeline)), uuid(collUUID) {} +ExpressionContext::ExpressionContext(OperationContext* opCtx, + const FindCommandRequest& findCmd, + std::unique_ptr collator, + bool mayDbProfile, + boost::optional verbosity, + bool allowDiskUseDefault) + // Although both 'find' and 'aggregate' commands have an ExpressionContext, some of the data + // members in the ExpressionContext are used exclusively by the aggregation subsystem. This + // includes the following fields which here we simply initialize to some meaningless default + // value: + // - explain + // - fromMongos + // - needsMerge + // - bypassDocumentValidation + // - mongoProcessInterface + // - resolvedNamespaces + // - uuid + // + // As we change the code to make the find and agg systems more tightly coupled, it would make + // sense to start initializing these fields for find operations as well. + : ExpressionContext( + opCtx, + verbosity, + false, // fromMongos + false, // needsMerge + findCmd.getAllowDiskUse().value_or(allowDiskUseDefault), + false, // bypassDocumentValidation + false, // isMapReduceCommand + findCmd.getNamespaceOrUUID().isNamespaceString() ? findCmd.getNamespaceOrUUID().nss() + : NamespaceString{}, + findCmd.getLegacyRuntimeConstants(), + std::move(collator), + nullptr, // mongoProcessInterface + {}, // resolvedNamespaces + [findCmd]() -> boost::optional { + if (findCmd.getNamespaceOrUUID().isUUID()) { + return findCmd.getNamespaceOrUUID().uuid(); + } + return boost::none; + }(), + findCmd.getLet(), + mayDbProfile, + findCmd.getSerializationContext()) {} + ExpressionContext::ExpressionContext(OperationContext* opCtx, const AggregateCommandRequest& request, std::unique_ptr collator, @@ -65,7 +117,8 @@ ExpressionContext::ExpressionContext(OperationContext* opCtx, std::move(resolvedNamespaces), std::move(collUUID), request.getLet(), - mayDbProfile) { + mayDbProfile, + request.getSerializationContext()) { if (request.getIsMapReduceCommand()) { // mapReduce command JavaScript invocation is only subject to the server global @@ -90,7 +143,8 @@ ExpressionContext::ExpressionContext( StringMap resolvedNamespaces, boost::optional collUUID, const boost::optional& letParameters, - bool mayDbProfile) + bool mayDbProfile, + const SerializationContext& serializationCtx) : explain(explain), fromMongos(fromMongos), needsMerge(needsMerge), @@ -98,6 +152,7 @@ ExpressionContext::ExpressionContext( !(opCtx && opCtx->readOnly())), // Disallow disk use if in read-only mode. bypassDocumentValidation(bypassDocumentValidation), ns(ns), + serializationCtxt(serializationCtx), uuid(std::move(collUUID)), opCtx(opCtx), mongoProcessInterface(mongoProcessInterface), @@ -135,9 +190,11 @@ ExpressionContext::ExpressionContext( const NamespaceString& nss, const boost::optional& runtimeConstants, const boost::optional& letParameters, + bool allowDiskUse, bool mayDbProfile, boost::optional explain) : explain(explain), + allowDiskUse(allowDiskUse), ns(nss), opCtx(opCtx), mongoProcessInterface(std::make_shared()), @@ -204,7 +261,8 @@ boost::intrusive_ptr ExpressionContext::copyWith( _resolvedNamespaces, uuid, boost::none /* letParameters */, - mayDbProfile); + mayDbProfile, + SerializationContext()); expCtx->inMongos = inMongos; expCtx->maxFeatureCompatibilityVersion = maxFeatureCompatibilityVersion; @@ -225,6 +283,7 @@ boost::intrusive_ptr ExpressionContext::copyWith( expCtx->originalAggregateCommand = originalAggregateCommand.getOwned(); expCtx->inLookup = inLookup; + expCtx->serializationCtxt = serializationCtxt; // Note that we intentionally skip copying the value of '_interruptCounter' because 'expCtx' is // intended to be used for executing a separate aggregation pipeline. @@ -234,57 +293,49 @@ boost::intrusive_ptr ExpressionContext::copyWith( void ExpressionContext::startExpressionCounters() { if (enabledCounters && !_expressionCounters) { - _expressionCounters = boost::make_optional({}); + _expressionCounters = std::make_unique(); } } void ExpressionContext::incrementMatchExprCounter(StringData name) { if (enabledCounters && _expressionCounters) { - ++_expressionCounters.value().matchExprCountersMap[name]; + ++_expressionCounters->matchExprCountersMap[name]; } } void ExpressionContext::incrementAggExprCounter(StringData name) { if (enabledCounters && _expressionCounters) { - ++_expressionCounters.value().aggExprCountersMap[name]; + ++_expressionCounters->aggExprCountersMap[name]; } } void ExpressionContext::incrementGroupAccumulatorExprCounter(StringData name) { if (enabledCounters && _expressionCounters) { - ++_expressionCounters.value().groupAccumulatorExprCountersMap[name]; + ++_expressionCounters->groupAccumulatorExprCountersMap[name]; } } void ExpressionContext::incrementWindowAccumulatorExprCounter(StringData name) { if (enabledCounters && _expressionCounters) { - ++_expressionCounters.value().windowAccumulatorExprCountersMap[name]; + ++_expressionCounters->windowAccumulatorExprCountersMap[name]; } } void ExpressionContext::stopExpressionCounters() { if (enabledCounters && _expressionCounters) { - operatorCountersMatchExpressions.mergeCounters( - _expressionCounters.value().matchExprCountersMap); - operatorCountersAggExpressions.mergeCounters( - _expressionCounters.value().aggExprCountersMap); + operatorCountersMatchExpressions.mergeCounters(_expressionCounters->matchExprCountersMap); + operatorCountersAggExpressions.mergeCounters(_expressionCounters->aggExprCountersMap); operatorCountersGroupAccumulatorExpressions.mergeCounters( - _expressionCounters.value().groupAccumulatorExprCountersMap); + _expressionCounters->groupAccumulatorExprCountersMap); operatorCountersWindowAccumulatorExpressions.mergeCounters( - _expressionCounters.value().windowAccumulatorExprCountersMap); + _expressionCounters->windowAccumulatorExprCountersMap); } - _expressionCounters = boost::none; + _expressionCounters.reset(); } void ExpressionContext::setUserRoles() { - // We need to check the FCV here because the $$USER_ROLES variable will always appear in the - // serialized command when one shard is sending a sub-query to another shard. The query will - // fail in the case where the shards are running different binVersions and one of them does not - // have a notion of this variable. This FCV check prevents this from happening, as the value of - // the variable is not set (and therefore not serialized) if the FCV is too old. - if (serverGlobalParams.featureCompatibility.isVersionInitialized() && - feature_flags::gFeatureFlagUserRoles.isEnabled(serverGlobalParams.featureCompatibility) && - enableAccessToUserRoles.load()) { + // Only set the value of $$USER_ROLES if it is referenced in the query. + if (isSystemVarReferencedInQuery(Variables::kUserRolesId) && enableAccessToUserRoles.load()) { variables.defineUserRoles(opCtx); } } diff --git a/src/mongo/db/pipeline/expression_context.h b/src/mongo/db/pipeline/expression_context.h index dcbe7016b56b2..5cf5814594bc6 100644 --- a/src/mongo/db/pipeline/expression_context.h +++ b/src/mongo/db/pipeline/expression_context.h @@ -29,15 +29,29 @@ #pragma once +#include #include +#include +#include #include +#include +#include +#include +#include #include #include +#include #include +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_comparator.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" @@ -45,16 +59,27 @@ #include "mongo/db/pipeline/javascript_execution.h" #include "mongo/db/pipeline/legacy_runtime_constants_gen.h" #include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/resume_token.h" #include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/collation/collation_spec.h" #include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/datetime/date_time_support.h" #include "mongo/db/query/explain_options.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/tailable_mode.h" +#include "mongo/db/query/tailable_mode_gen.h" #include "mongo/db/server_options.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/scripting/engine.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #include "mongo/util/intrusive_counter.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" #include "mongo/util/string_map.h" #include "mongo/util/uuid.h" +#include "mongo/util/version/releases.h" namespace mongo { @@ -112,7 +137,7 @@ class ExpressionContext : public RefCountable { boost::intrusive_ptr _expCtx; - std::unique_ptr _originalCollator; + std::shared_ptr _originalCollator; }; /** @@ -126,6 +151,16 @@ class ExpressionContext : public RefCountable { StringMap windowAccumulatorExprCountersMap; }; + /** + * Constructs an ExpressionContext to be used for find command parsing and evaluation. + */ + ExpressionContext(OperationContext* opCtx, + const FindCommandRequest& findCmd, + std::unique_ptr collator, + bool mayDbProfile, + boost::optional verbosity = boost::none, + bool allowDiskUseByDefault = false); + /** * Constructs an ExpressionContext to be used for Pipeline parsing and evaluation. * 'resolvedNamespaces' maps collection names (not full namespaces) to ResolvedNamespaces. @@ -158,7 +193,8 @@ class ExpressionContext : public RefCountable { StringMap resolvedNamespaces, boost::optional collUUID, const boost::optional& letParameters = boost::none, - bool mayDbProfile = true); + bool mayDbProfile = true, + const SerializationContext& serializationCtx = SerializationContext()); /** * Constructs an ExpressionContext suitable for use outside of the aggregation system, including @@ -171,6 +207,7 @@ class ExpressionContext : public RefCountable { const NamespaceString& ns, const boost::optional& runtimeConstants = boost::none, const boost::optional& letParameters = boost::none, + bool allowDiskUse = false, bool mayDbProfile = true, boost::optional explain = boost::none); @@ -209,6 +246,10 @@ class ExpressionContext : public RefCountable { return _collator.get(); } + std::shared_ptr getCollatorShared() const { + return _collator; + } + /** * Whether to track timing information and "work" counts in the agg layer. */ @@ -235,7 +276,7 @@ class ExpressionContext : public RefCountable { * Use with caution - '_collator' is used in the context of a Pipeline, and it is illegal * to change the collation once a Pipeline has been parsed with this ExpressionContext. */ - void setCollator(std::unique_ptr collator) { + void setCollator(std::shared_ptr collator) { _collator = std::move(collator); // Document/Value comparisons must be aware of the collation. @@ -411,7 +452,7 @@ class ExpressionContext : public RefCountable { void stopExpressionCounters(); bool expressionCountersAreActive() { - return _expressionCounters.is_initialized(); + return static_cast(_expressionCounters); } /** @@ -419,6 +460,27 @@ class ExpressionContext : public RefCountable { */ void setUserRoles(); + /** + * Record that we have seen the given system variable in the query. + */ + void setSystemVarReferencedInQuery(Variables::Id var) { + tassert(7612600, + "Cannot track references to user-defined variables.", + !Variables::isUserDefinedVariable(var)); + _varsReferencedInQuery.insert(var); + } + + /** + * Returns true if the given system variable is referenced in the query and false otherwise. + */ + bool isSystemVarReferencedInQuery(Variables::Id var) const { + tassert( + 7612601, + "Cannot access whether a variable is referenced to or not for a user-defined variable.", + !Variables::isUserDefinedVariable(var)); + return _varsReferencedInQuery.count(var); + } + // The explain verbosity requested by the user, or boost::none if no explain was requested. boost::optional explain; @@ -432,7 +494,10 @@ class ExpressionContext : public RefCountable { NamespaceString ns; + SerializationContext serializationCtxt; + // If known, the UUID of the execution namespace for this aggregation command. + // TODO(SERVER-78226): Replace `ns` and `uuid` with a type which can express "nss and uuid". boost::optional uuid; std::string tempDir; // Defaults to empty to prevent external sorting in mongos. @@ -500,9 +565,9 @@ class ExpressionContext : public RefCountable { bool exprDeprectedForApiV1 = false; // Tracks whether the collator to use for the aggregation matches the default collation of the - // collection or view. For collectionless aggregates this is set to 'kNoDefaultCollation'. - enum class CollationMatchesDefault { kNoDefault, kYes, kNo }; - CollationMatchesDefault collationMatchesDefault = CollationMatchesDefault::kNoDefault; + // collection or view. + enum class CollationMatchesDefault { kYes, kNo }; + CollationMatchesDefault collationMatchesDefault = CollationMatchesDefault::kYes; // When non-empty, contains the unmodified user provided aggregation command. BSONObj originalAggregateCommand; @@ -537,15 +602,43 @@ class ExpressionContext : public RefCountable { return _requiresTimeseriesExtendedRangeSupport; } - // Returns true if the resolved collation of the context is simple. - bool isResolvedCollationSimple() const { - return getCollatorBSON().woCompare(CollationSpec::kSimpleSpec) == 0; - } - // Forces the plan cache to be used even if there's only one solution available. Queries that // are ineligible will still not be cached. bool forcePlanCache = false; + // This is state that is to be shared between the DocumentInternalSearchMongotRemote and + // DocumentInternalSearchIdLookup stages (these stages are the result of desugaring $search) + // during runtime. + class SharedSearchState { + public: + SharedSearchState() {} + + long long getDocsReturnedByIdLookup() const { + return _docsReturnedByIdLookup; + } + + /** + * Sets the value of _docsReturnedByIdLookup to 0. + */ + void resetDocsReturnedByIdLookup() { + _docsReturnedByIdLookup = 0; + } + + /** + * Increments the value of _docsReturnedByIdLookup by 1. + */ + void incrementDocsReturnedByIdLookup() { + _docsReturnedByIdLookup++; + } + + private: + // When there is an extractable limit in the query, DocumentInternalSearchMongotRemote sends + // a getMore to mongot that specifies how many more documents it needs to fulfill that + // limit, and it incorporates the amount of documents returned by the + // DocumentInternalSearchIdLookup stage into that value. + long long _docsReturnedByIdLookup = 0; + } sharedSearchState; + protected: static const int kInterruptCheckPeriod = 128; @@ -556,7 +649,7 @@ class ExpressionContext : public RefCountable { void checkForInterruptSlow(); // Collator used for comparisons. - std::unique_ptr _collator; + std::shared_ptr _collator; // Used for all comparisons of Document/Value during execution of the aggregation operation. // Must not be changed after parsing a Pipeline with this ExpressionContext. @@ -573,8 +666,12 @@ class ExpressionContext : public RefCountable { bool _requiresTimeseriesExtendedRangeSupport = false; private: - boost::optional _expressionCounters = boost::none; + std::unique_ptr _expressionCounters; bool _gotTemporarilyUnavailableException = false; + + // We use this set to indicate whether or not a system variable was referenced in the query that + // is being executed (if the variable was referenced, it is an element of this set). + stdx::unordered_set _varsReferencedInQuery; }; } // namespace mongo diff --git a/src/mongo/db/pipeline/expression_context_for_test.h b/src/mongo/db/pipeline/expression_context_for_test.h index 6cd98445d8fc4..2a740fbf3a2cb 100644 --- a/src/mongo/db/pipeline/expression_context_for_test.h +++ b/src/mongo/db/pipeline/expression_context_for_test.h @@ -57,7 +57,8 @@ class ExpressionContextForTest : public ExpressionContext { * Defaults to using a namespace of "test.namespace". */ ExpressionContextForTest() - : ExpressionContextForTest(NamespaceString{"test"_sd, "namespace"_sd}) {} + : ExpressionContextForTest( + NamespaceString::createNamespaceString_forTest("test"_sd, "namespace"_sd)) {} /** * If there is a global ServiceContext available, this constructor will adopt it. Otherwise, it * will internally create an owned QueryTestServiceContext. Similarly, if an OperationContext @@ -76,11 +77,11 @@ class ExpressionContextForTest : public ExpressionContext { LegacyRuntimeConstants(Date_t::now(), Timestamp(1, 0)), {}, // collator std::make_shared(), - {}, // resolvedNamespaces - {}, // collUUID - {}, // let - false // mayDbProfile - ) { + {}, // resolvedNamespaces + {}, // collUUID + {}, // let + false, // mayDbProfile + SerializationContext()) { // If there is an existing global ServiceContext, adopt it. Otherwise, create a new context. // Similarly, we create a new OperationContext or adopt an existing context as appropriate. if (hasGlobalServiceContext()) { @@ -109,7 +110,8 @@ class ExpressionContextForTest : public ExpressionContext { * Defaults to using a namespace of "test.namespace". */ ExpressionContextForTest(OperationContext* opCtx) - : ExpressionContextForTest(opCtx, NamespaceString{"test"_sd, "namespace"_sd}) {} + : ExpressionContextForTest( + opCtx, NamespaceString::createNamespaceString_forTest("test"_sd, "namespace"_sd)) {} /** * Constructor which sets the given OperationContext on the ExpressionContextForTest. This will @@ -127,11 +129,11 @@ class ExpressionContextForTest : public ExpressionContext { LegacyRuntimeConstants(Date_t::now(), Timestamp(1, 0)), {}, // collator std::make_shared(), - {}, // resolvedNamespaces - {}, // collUUID - {}, // let - false // mayDbProfile - ), + {}, // resolvedNamespaces + {}, // collUUID + {}, // let + false, // mayDbProfile + SerializationContext()), _serviceContext(opCtx->getServiceContext()) { // Resolve the TimeZoneDatabase to be used by this ExpressionContextForTest. _setTimeZoneDatabase(); @@ -172,8 +174,8 @@ class ExpressionContextForTest : public ExpressionContext { {}, // resolvedNamespaces {}, // collUUID letParameters, - false // mayDbProfile - ), + false, // mayDbProfile + SerializationContext()), _serviceContext(opCtx->getServiceContext()) { // Resolve the TimeZoneDatabase to be used by this ExpressionContextForTest. _setTimeZoneDatabase(); @@ -202,15 +204,11 @@ class ExpressionContextForTest : public ExpressionContext { // In cases when there is a ServiceContext, if there already exists a TimeZoneDatabase // associated with the ServiceContext, adopt it. Otherwise, create a new one. void _setTimeZoneDatabase() { - // In some cases, e.g. the user uses an OperationContextNoop which does _not_ provide a - // ServiceContext to create this ExpressionContextForTest, then it shouldn't resolve any - // timeZoneDatabase. - if (auto* serviceContext = getServiceContext()) { - if (!TimeZoneDatabase::get(serviceContext)) { - TimeZoneDatabase::set(serviceContext, std::make_unique()); - } - timeZoneDatabase = TimeZoneDatabase::get(serviceContext); + auto* serviceContext = getServiceContext(); + if (!TimeZoneDatabase::get(serviceContext)) { + TimeZoneDatabase::set(serviceContext, std::make_unique()); } + timeZoneDatabase = TimeZoneDatabase::get(serviceContext); } stdx::variant> _serviceContext; diff --git a/src/mongo/db/pipeline/expression_context_test.cpp b/src/mongo/db/pipeline/expression_context_test.cpp index a8e93bb290d1c..73fa64479b26c 100644 --- a/src/mongo/db/pipeline/expression_context_test.cpp +++ b/src/mongo/db/pipeline/expression_context_test.cpp @@ -27,17 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" -#include "mongo/db/query/datetime/date_time_support.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/db/vector_clock_mutable.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/expression_convert_test.cpp b/src/mongo/db/pipeline/expression_convert_test.cpp index 4bf42b9b8a5c2..2414fc6dbf4bb 100644 --- a/src/mongo/db/pipeline/expression_convert_test.cpp +++ b/src/mongo/db/pipeline/expression_convert_test.cpp @@ -27,13 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" #include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/pipeline/expression_date_test.cpp b/src/mongo/db/pipeline/expression_date_test.cpp index 419028b9f06c7..8f470ed3c122e 100644 --- a/src/mongo/db/pipeline/expression_date_test.cpp +++ b/src/mongo/db/pipeline/expression_date_test.cpp @@ -27,15 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include #include #include - +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/expression_dependencies.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/pipeline/expression_dependencies.cpp b/src/mongo/db/pipeline/expression_dependencies.cpp index b561d9785fe0b..c89922538817a 100644 --- a/src/mongo/db/pipeline/expression_dependencies.cpp +++ b/src/mongo/db/pipeline/expression_dependencies.cpp @@ -29,10 +29,22 @@ #include "mongo/db/pipeline/expression_dependencies.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/matcher/copyable_match_expression.h" #include "mongo/db/matcher/match_expression_dependencies.h" #include "mongo/db/pipeline/expression_find_internal.h" #include "mongo/db/pipeline/expression_visitor.h" #include "mongo/db/pipeline/expression_walker.h" +#include "mongo/db/pipeline/field_path.h" namespace mongo::expression { diff --git a/src/mongo/db/pipeline/expression_dependencies.h b/src/mongo/db/pipeline/expression_dependencies.h index 8c5cb030cd22b..db07677ef11ee 100644 --- a/src/mongo/db/pipeline/expression_dependencies.h +++ b/src/mongo/db/pipeline/expression_dependencies.h @@ -29,9 +29,12 @@ #pragma once +#include + #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/variables.h" namespace mongo::expression { diff --git a/src/mongo/db/pipeline/expression_field_path_test.cpp b/src/mongo/db/pipeline/expression_field_path_test.cpp index 4b332d9e4f45a..34fcc4fd1e05e 100644 --- a/src/mongo/db/pipeline/expression_field_path_test.cpp +++ b/src/mongo/db/pipeline/expression_field_path_test.cpp @@ -27,17 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/exec/document_value/value_comparator.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/expression_dependencies.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace ExpressionTests { @@ -61,7 +76,7 @@ Document fromBson(BSONObj obj) { return Document(obj); } -std::string redactFieldNameForTest(StringData s) { +std::string applyHmacForTest(StringData s) { return str::stream() << "HASH<" << s << ">"; } @@ -233,8 +248,8 @@ TEST(FieldPath, ScalarVariableWithDottedFieldPathOptimizesToConstantMissingValue TEST(FieldPath, SerializeWithRedaction) { SerializationOptions options; - options.identifierRedactionPolicy = redactFieldNameForTest; - options.redactIdentifiers = true; + options.transformIdentifiersCallback = applyHmacForTest; + options.transformIdentifiers = true; auto expCtx = ExpressionContextForTest{}; intrusive_ptr expression = @@ -280,12 +295,11 @@ TEST(FieldPath, SerializeWithRedaction) { expression->serialize(options).getDocument()); // Test that a variable followed by user fields is properly hashed. - std::string replacementChar = "?"; - options.replacementForLiteralArgs = replacementChar; + options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; expression = expr(R"({$gt: ["$$ROOT.a.b", 5]})"); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({"$gt":["$$ROOT.HASH.HASH",{"$const":"?"}]})", + R"({"$gt":["$$ROOT.HASH.HASH","?number"]})", expression->serialize(options).getDocument()); expression = expr(R"({$gt: ["$foo", "$$NOW"]})"); diff --git a/src/mongo/db/pipeline/expression_find_internal.h b/src/mongo/db/pipeline/expression_find_internal.h index 9baf15b395861..c52c75c916db5 100644 --- a/src/mongo/db/pipeline/expression_find_internal.h +++ b/src/mongo/db/pipeline/expression_find_internal.h @@ -29,11 +29,29 @@ #pragma once +#include +#include +#include #include - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/projection_executor_utils.h" #include "mongo/db/matcher/copyable_match_expression.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_visitor.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { /** diff --git a/src/mongo/db/pipeline/expression_find_internal_test.cpp b/src/mongo/db/pipeline/expression_find_internal_test.cpp index 183e009cead6d..ae3b55c5e437e 100644 --- a/src/mongo/db/pipeline/expression_find_internal_test.cpp +++ b/src/mongo/db/pipeline/expression_find_internal_test.cpp @@ -27,14 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/projection_executor.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/expression_dependencies.h" #include "mongo/db/pipeline/expression_find_internal.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/string_map.h" namespace mongo::expression_internal_tests { constexpr auto kProjectionPostImageVarName = diff --git a/src/mongo/db/pipeline/expression_function.cpp b/src/mongo/db/pipeline/expression_function.cpp index 4e895b3e1c5c5..86a46c4cbbb44 100644 --- a/src/mongo/db/pipeline/expression_function.cpp +++ b/src/mongo/db/pipeline/expression_function.cpp @@ -29,6 +29,19 @@ #include "mongo/db/pipeline/expression_function.h" +#include +#include + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/pipeline/javascript_execution.h" +#include "mongo/scripting/engine.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + namespace mongo { REGISTER_STABLE_EXPRESSION(function, ExpressionFunction::parse); @@ -47,17 +60,17 @@ ExpressionFunction::ExpressionFunction(ExpressionContext* const expCtx, } Value ExpressionFunction::serialize(SerializationOptions options) const { - MutableDocument d; - d["body"] = options.replacementForLiteralArgs ? Value(*options.replacementForLiteralArgs) - : Value(_funcSource); - d["args"] = Value(_passedArgs->serialize(options)); - d["lang"] = Value(_lang); + MutableDocument innerOpts(Document{{"body"_sd, options.serializeLiteral(_funcSource)}, + {"args"_sd, _passedArgs->serialize(options)}, + // "lang" is purposefully not treated as a literal since it + // is more of a selection of an enum + {"lang"_sd, _lang}}); // This field will only be seralized when desugaring $where in $expr + $_internalJs if (_assignFirstArgToThis) { - d["_internalSetObjToThis"] = Value(_assignFirstArgToThis); + innerOpts["_internalSetObjToThis"] = options.serializeLiteral(_assignFirstArgToThis); } - return Value(Document{{kExpressionName, d.freezeToValue()}}); + return Value(Document{{kExpressionName, innerOpts.freezeToValue()}}); } boost::intrusive_ptr ExpressionFunction::parse(ExpressionContext* const expCtx, diff --git a/src/mongo/db/pipeline/expression_function.h b/src/mongo/db/pipeline/expression_function.h index 29111b21f6435..d44c90eb409e4 100644 --- a/src/mongo/db/pipeline/expression_function.h +++ b/src/mongo/db/pipeline/expression_function.h @@ -29,8 +29,21 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_visitor.h" #include "mongo/db/pipeline/javascript_execution.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { /** diff --git a/src/mongo/db/pipeline/expression_function_test.cpp b/src/mongo/db/pipeline/expression_function_test.cpp index 640804b55b4f0..7b66fbbea1801 100644 --- a/src/mongo/db/pipeline/expression_function_test.cpp +++ b/src/mongo/db/pipeline/expression_function_test.cpp @@ -27,12 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/expression_function.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/framework.h" +#include "mongo/util/str.h" + namespace mongo { namespace { @@ -40,18 +48,16 @@ namespace { /** * A default redaction strategy that generates easy to check results for testing purposes. */ -std::string redactFieldNameForTest(StringData s) { +std::string applyHmacForTest(StringData s) { return str::stream() << "HASH<" << s << ">"; } TEST(ExpressionFunction, SerializeAndRedactArgs) { - SerializationOptions options; - std::string replacementChar = "?"; - options.replacementForLiteralArgs = replacementChar; - options.redactIdentifiers = true; - options.identifierRedactionPolicy = redactFieldNameForTest; + options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + options.transformIdentifiers = true; + options.transformIdentifiersCallback = applyHmacForTest; auto expCtx = ExpressionContextForTest(); auto expr = BSON("$function" << BSON("body" @@ -61,7 +67,7 @@ TEST(ExpressionFunction, SerializeAndRedactArgs) { VariablesParseState vps = expCtx.variablesParseState; auto exprFunc = ExpressionFunction::parse(&expCtx, expr.firstElement(), vps); ASSERT_DOCUMENT_EQ_AUTO( // NOLINT - R"({"$function":{"body":"?","args":["$HASH"],"lang":"js"}})", + R"({"$function":{"body":"?string","args":["$HASH"],"lang":"js"}})", exprFunc->serialize(options).getDocument()); } } // namespace diff --git a/src/mongo/db/pipeline/expression_javascript_test.cpp b/src/mongo/db/pipeline/expression_javascript_test.cpp index dc0df7cb51de6..51a8be82a200f 100644 --- a/src/mongo/db/pipeline/expression_javascript_test.cpp +++ b/src/mongo/db/pipeline/expression_javascript_test.cpp @@ -27,18 +27,32 @@ * it in the license file. */ -#include "mongo/db/pipeline/expression_js_emit.h" +#include + +#include -#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/expression_function.h" +#include "mongo/db/pipeline/expression_js_emit.h" #include "mongo/db/pipeline/process_interface/standalone_process_interface.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/platform/atomic_word.h" #include "mongo/scripting/engine.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/expression_js_emit.cpp b/src/mongo/db/pipeline/expression_js_emit.cpp index dd044ae474b95..b812d8ce3eeec 100644 --- a/src/mongo/db/pipeline/expression_js_emit.cpp +++ b/src/mongo/db/pipeline/expression_js_emit.cpp @@ -27,12 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/auth/authorization_session.h" +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/pipeline/expression_js_emit.h" +#include "mongo/db/pipeline/javascript_execution.h" #include "mongo/db/pipeline/make_js_function.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" namespace mongo { diff --git a/src/mongo/db/pipeline/expression_js_emit.h b/src/mongo/db/pipeline/expression_js_emit.h index 185408660014c..a64140bef9a25 100644 --- a/src/mongo/db/pipeline/expression_js_emit.h +++ b/src/mongo/db/pipeline/expression_js_emit.h @@ -29,8 +29,24 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_visitor.h" #include "mongo/db/pipeline/javascript_execution.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/expression_let_test.cpp b/src/mongo/db/pipeline/expression_let_test.cpp index fe4e1291fc5f3..e9840ff949ea9 100644 --- a/src/mongo/db/pipeline/expression_let_test.cpp +++ b/src/mongo/db/pipeline/expression_let_test.cpp @@ -27,25 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/str.h" namespace mongo { namespace ExpressionTests { namespace { -std::string redactFieldNameForTest(StringData s) { +std::string applyHmacForTest(StringData s) { return str::stream() << "HASH<" << s << ">"; } TEST(RedactionTest, ExpressionLet) { SerializationOptions options; - options.identifierRedactionPolicy = redactFieldNameForTest; - options.redactIdentifiers = true; + options.transformIdentifiersCallback = applyHmacForTest; + options.transformIdentifiers = true; auto expCtx = ExpressionContextForTest{}; diff --git a/src/mongo/db/pipeline/expression_nary_test.cpp b/src/mongo/db/pipeline/expression_nary_test.cpp index 6058802bcd9c3..bbc868876062a 100644 --- a/src/mongo/db/pipeline/expression_nary_test.cpp +++ b/src/mongo/db/pipeline/expression_nary_test.cpp @@ -27,20 +27,41 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" -#include "mongo/config.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/timestamp.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/exec/document_value/value_comparator.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/expression_dependencies.h" -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/pipeline/expression_visitor.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace ExpressionTests { using boost::intrusive_ptr; diff --git a/src/mongo/db/pipeline/expression_object_test.cpp b/src/mongo/db/pipeline/expression_object_test.cpp index 36a57d03ab39c..864f38058df6b 100644 --- a/src/mongo/db/pipeline/expression_object_test.cpp +++ b/src/mongo/db/pipeline/expression_object_test.cpp @@ -27,20 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" -#include "mongo/config.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/exec/document_value/value_comparator.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/expression_dependencies.h" -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace ExpressionTests { diff --git a/src/mongo/db/pipeline/expression_or_test.cpp b/src/mongo/db/pipeline/expression_or_test.cpp index ca4f5fdf843fb..09e89f3f18345 100644 --- a/src/mongo/db/pipeline/expression_or_test.cpp +++ b/src/mongo/db/pipeline/expression_or_test.cpp @@ -27,20 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" -#include "mongo/config.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/exec/document_value/document.h" -#include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/exec/document_value/value_comparator.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace ExpressionTests { diff --git a/src/mongo/db/pipeline/expression_replace_test.cpp b/src/mongo/db/pipeline/expression_replace_test.cpp index df5e19e784a9c..f30d243b3b02f 100644 --- a/src/mongo/db/pipeline/expression_replace_test.cpp +++ b/src/mongo/db/pipeline/expression_replace_test.cpp @@ -27,15 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace { using namespace mongo; diff --git a/src/mongo/db/pipeline/expression_test.cpp b/src/mongo/db/pipeline/expression_test.cpp index 0b1631d6bff24..c843c074a8008 100644 --- a/src/mongo/db/pipeline/expression_test.cpp +++ b/src/mongo/db/pipeline/expression_test.cpp @@ -27,31 +27,42 @@ * it in the license file. */ +#include +#include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include + #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" -#include "mongo/config.h" -#include "mongo/crypto/fle_crypto.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/api_parameters.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value_comparator.h" -#include "mongo/db/hasher.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" #include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/accumulator_multi.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/record_id.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/decimal128.h" #include "mongo/unittest/assert.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/summation.h" #include "mongo/util/time_support.h" -#include #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -179,7 +190,7 @@ void parseAndVerifyResults( /** * A default redaction strategy that generates easy to check results for testing purposes. */ -std::string redactFieldNameForTest(StringData s) { +std::string applyHmacForTest(StringData s) { return str::stream() << "HASH<" << s << ">"; } @@ -822,42 +833,20 @@ TEST(ExpressionConstantTest, ConstantOfValueMissingSerializesToRemoveSystemVar) TEST(ExpressionConstantTest, ConstantRedaction) { SerializationOptions options; - std::string replacementChar = "?"; - options.replacementForLiteralArgs = replacementChar; + options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; // Test that a constant is replaced. auto expCtx = ExpressionContextForTest{}; intrusive_ptr expression = ExpressionConstant::create(&expCtx, Value("my_ssn"_sd)); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({"field":{"$const":"?"}})", + R"({"field":"?string"})", BSON("field" << expression->serialize(options))); auto expressionBSON = BSON("$and" << BSON_ARRAY(BSON("$gt" << BSON_ARRAY("$foo" << 5)) << BSON("$lt" << BSON_ARRAY("$foo" << 10)))); expression = Expression::parseExpression(&expCtx, expressionBSON, expCtx.variablesParseState); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "field": { - "$and": [ - { - "$gt": [ - "$foo", - { - "$const": "?" - } - ] - }, - { - "$lt": [ - "$foo", - { - "$const": "?" - } - ] - } - ] - } - })", + R"({"field":{"$and":[{"$gt":["$foo","?number"]},{"$lt":["$foo","?number"]}]}})", BSON("field" << expression->serialize(options))); } @@ -3707,11 +3696,18 @@ TEST(ExpressionGetFieldTest, GetFieldSerializesStringArgumentCorrectly) { VariablesParseState vps = expCtx.variablesParseState; BSONObj expr = fromjson("{$meta: \"foo\"}"); auto expression = ExpressionGetField::parse(&expCtx, expr.firstElement(), vps); - ASSERT_BSONOBJ_EQ(BSON("ignoredField" << BSON("$getField" << BSON("field" << BSON("$const" - << "foo") - << "input" - << "$$CURRENT"))), - BSON("ignoredField" << expression->serialize(false))); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "ignoredField": { + "$getField": { + "field": { + "$const": "foo" + }, + "input": "$$CURRENT" + } + } + })", + BSON("ignoredField" << expression->serialize(false))); } TEST(ExpressionGetFieldTest, GetFieldSerializesCorrectly) { @@ -3719,20 +3715,29 @@ TEST(ExpressionGetFieldTest, GetFieldSerializesCorrectly) { VariablesParseState vps = expCtx.variablesParseState; BSONObj expr = fromjson("{$meta: {\"field\": \"foo\", \"input\": {a: 1}}}"); auto expression = ExpressionGetField::parse(&expCtx, expr.firstElement(), vps); - ASSERT_BSONOBJ_EQ( - BSON("ignoredField" << BSON( - "$getField" << BSON("field" << BSON("$const" - << "foo") - << "input" << BSON("a" << BSON("$const" << 1))))), + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "ignoredField": { + "$getField": { + "field": { + "$const": "foo" + }, + "input": { + "a": { + "$const": 1 + } + } + } + } + })", BSON("ignoredField" << expression->serialize(false))); } TEST(ExpressionGetFieldTest, GetFieldSerializesAndRedactsCorrectly) { SerializationOptions options; - std::string replacementChar = "?"; - options.replacementForLiteralArgs = replacementChar; - options.identifierRedactionPolicy = redactFieldNameForTest; - options.redactIdentifiers = true; + options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + options.transformIdentifiers = true; + options.transformIdentifiersCallback = applyHmacForTest; auto expCtx = ExpressionContextForTest{}; VariablesParseState vps = expCtx.variablesParseState; @@ -3770,14 +3775,45 @@ TEST(ExpressionGetFieldTest, GetFieldSerializesAndRedactsCorrectly) { } })", BSON("field" << expression->serialize(options))); + + // Test a field with a '$' character. + expressionBSON = BSON("$getField" + << "a.$b.c"); + + expression = ExpressionGetField::parse(&expCtx, expressionBSON.firstElement(), vps); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "field": { + "$getField": { + "field": "HASH.HASH<$b>.HASH", + "input": "$$CURRENT" + } + } + })", + BSON("field" << expression->serialize(options))); + + // Test a field with a trailing '.' character (invalid FieldPath). + expressionBSON = BSON("$getField" + << "a.b.c."); + + expression = ExpressionGetField::parse(&expCtx, expressionBSON.firstElement(), vps); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "field": { + "$getField": { + "field": "HASH", + "input": "$$CURRENT" + } + } + })", + BSON("field" << expression->serialize(options))); } TEST(ExpressionSetFieldTest, SetFieldRedactsCorrectly) { SerializationOptions options; - std::string replacementChar = "?"; - options.replacementForLiteralArgs = replacementChar; - options.identifierRedactionPolicy = redactFieldNameForTest; - options.redactIdentifiers = true; + options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + options.transformIdentifiersCallback = applyHmacForTest; + options.transformIdentifiers = true; auto expCtx = ExpressionContextForTest{}; VariablesParseState vps = expCtx.variablesParseState; @@ -3811,12 +3847,8 @@ TEST(ExpressionSetFieldTest, SetFieldRedactsCorrectly) { "field": { "$setField": { "field": "HASH", - "input": { - "$const": "?" - }, - "value": { - "$const": "?" - } + "input": "?object", + "value": "?number" } } })", @@ -3833,12 +3865,8 @@ TEST(ExpressionSetFieldTest, SetFieldRedactsCorrectly) { "field": { "$setField": { "field": "HASH", - "input": { - "$const": "?" - }, - "value": { - "$const": "?" - } + "input": "?object", + "value": "?number" } } })", @@ -3860,9 +3888,7 @@ TEST(ExpressionSetFieldTest, SetFieldRedactsCorrectly) { "input": { "HASH": "$HASH" }, - "value": { - "$const": "?" - } + "value": "?number" } } })", @@ -3883,9 +3909,7 @@ TEST(ExpressionSetFieldTest, SetFieldRedactsCorrectly) { "field": { "$setField": { "field": "HASH", - "input": { - "$const": "?" - }, + "input": "?object", "value": { "HASH": "$HASH" } @@ -3905,12 +3929,8 @@ TEST(ExpressionSetFieldTest, SetFieldRedactsCorrectly) { "field": { "$setField": { "field": "HASH", - "input": { - "$const": "?" - }, - "value": { - "$const": "?" - } + "input": "?object", + "value": "?number" } } })", diff --git a/src/mongo/db/pipeline/expression_test_api_version.cpp b/src/mongo/db/pipeline/expression_test_api_version.cpp index 6a4c46b73c411..fc2a4177f4b5d 100644 --- a/src/mongo/db/pipeline/expression_test_api_version.cpp +++ b/src/mongo/db/pipeline/expression_test_api_version.cpp @@ -26,10 +26,21 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/api_parameters.h" #include "mongo/db/pipeline/expression_test_api_version.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/expression_test_api_version.h b/src/mongo/db/pipeline/expression_test_api_version.h index 19f2dfc891edd..b042c8a73ae45 100644 --- a/src/mongo/db/pipeline/expression_test_api_version.h +++ b/src/mongo/db/pipeline/expression_test_api_version.h @@ -29,7 +29,16 @@ #pragma once +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_visitor.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { /** diff --git a/src/mongo/db/pipeline/expression_test_api_version_test.cpp b/src/mongo/db/pipeline/expression_test_api_version_test.cpp index 05c9156ddf017..40a5aef7dc0e2 100644 --- a/src/mongo/db/pipeline/expression_test_api_version_test.cpp +++ b/src/mongo/db/pipeline/expression_test_api_version_test.cpp @@ -27,13 +27,21 @@ * it in the license file. */ +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/platform/basic.h" - #include "mongo/db/pipeline/expression_test_api_version.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/expression_trigonometric.cpp b/src/mongo/db/pipeline/expression_trigonometric.cpp index 06b63e16bcbff..1bebc91743690 100644 --- a/src/mongo/db/pipeline/expression_trigonometric.cpp +++ b/src/mongo/db/pipeline/expression_trigonometric.cpp @@ -27,10 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "expression_trigonometric.h" +#include "mongo/db/pipeline/expression.h" + namespace mongo { // The parse methods of the expressions are registered here in the .cpp file to prevent multiple diff --git a/src/mongo/db/pipeline/expression_trigonometric.h b/src/mongo/db/pipeline/expression_trigonometric.h index 72affdd77ee60..e5c7ea101a7ad 100644 --- a/src/mongo/db/pipeline/expression_trigonometric.h +++ b/src/mongo/db/pipeline/expression_trigonometric.h @@ -29,8 +29,21 @@ #pragma once +#include +#include +#include +#include + #include "expression.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_visitor.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + namespace mongo { /** diff --git a/src/mongo/db/pipeline/expression_trigonometric_test.cpp b/src/mongo/db/pipeline/expression_trigonometric_test.cpp index 5a37a78a21a65..f5a4cc7032c46 100644 --- a/src/mongo/db/pipeline/expression_trigonometric_test.cpp +++ b/src/mongo/db/pipeline/expression_trigonometric_test.cpp @@ -27,13 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/expression_trigonometric.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/pipeline/expression_trigonometric.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace expression_tests { diff --git a/src/mongo/db/pipeline/expression_trim_test.cpp b/src/mongo/db/pipeline/expression_trim_test.cpp index e338dfe6fc866..6299462c73ae9 100644 --- a/src/mongo/db/pipeline/expression_trim_test.cpp +++ b/src/mongo/db/pipeline/expression_trim_test.cpp @@ -27,21 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" -#include "mongo/config.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/exec/document_value/value_comparator.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/unittest/unittest.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace ExpressionTests { diff --git a/src/mongo/db/pipeline/expression_walker_test.cpp b/src/mongo/db/pipeline/expression_walker_test.cpp index adc341415540d..d2ff1c2f99152 100644 --- a/src/mongo/db/pipeline/expression_walker_test.cpp +++ b/src/mongo/db/pipeline/expression_walker_test.cpp @@ -27,22 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include #include #include +#include #include +#include + #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/expression_walker.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/field_path.cpp b/src/mongo/db/pipeline/field_path.cpp index 9a0a53f61fcd1..bf75fe8a80013 100644 --- a/src/mongo/db/pipeline/field_path.cpp +++ b/src/mongo/db/pipeline/field_path.cpp @@ -27,14 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include + +#include #include "mongo/base/string_data.h" #include "mongo/bson/bson_depth.h" #include "mongo/db/pipeline/field_path.h" -#include "mongo/db/query/query_feature_flags_gen.h" -#include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/server_options.h" #include "mongo/util/str.h" #include "mongo/util/string_map.h" @@ -74,7 +73,7 @@ string FieldPath::getFullyQualifiedPath(StringData prefix, StringData suffix) { return str::stream() << prefix << "." << suffix; } -FieldPath::FieldPath(std::string inputPath, bool precomputeHashes) +FieldPath::FieldPath(std::string inputPath, bool precomputeHashes, bool validateFieldNames) : _fieldPath(std::move(inputPath)), _fieldPathDotPosition{string::npos} { uassert(40352, "FieldPath cannot be constructed with empty string", !_fieldPath.empty()); uassert(40353, "FieldPath must not end with a '.'.", _fieldPath[_fieldPath.size() - 1] != '.'); @@ -97,7 +96,9 @@ FieldPath::FieldPath(std::string inputPath, bool precomputeHashes) _fieldHash.reserve(pathLength); for (size_t i = 0; i < pathLength; ++i) { const auto& fieldName = getFieldName(i); - uassertValidFieldName(fieldName); + if (validateFieldNames) { + uassertValidFieldName(fieldName); + } _fieldHash.push_back(precomputeHashes ? FieldNameHasher()(fieldName) : kHashUninitialized); } } @@ -106,7 +107,6 @@ void FieldPath::uassertValidFieldName(StringData fieldName) { uassert(15998, "FieldPath field names may not be empty strings.", !fieldName.empty()); const auto dotsAndDollarsHint = " Consider using $getField or $setField."; - if (fieldName[0] == '$' && !kAllowedDollarPrefixedFields.count(fieldName)) { uasserted(16410, str::stream() << "FieldPath field names may not start with '$'." diff --git a/src/mongo/db/pipeline/field_path.h b/src/mongo/db/pipeline/field_path.h index 60792a5e2d20d..ffe3b2d4a462b 100644 --- a/src/mongo/db/pipeline/field_path.h +++ b/src/mongo/db/pipeline/field_path.h @@ -29,10 +29,16 @@ #pragma once +#include +#include #include #include +#include #include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" #include "mongo/bson/bson_depth.h" #include "mongo/db/exec/document_value/document_internal.h" @@ -69,11 +75,17 @@ class FieldPath { * * Field names are validated using uassertValidFieldName(). */ - /* implicit */ FieldPath(std::string inputPath, bool precomputeHashes = false); - /* implicit */ FieldPath(StringData inputPath, bool precomputeHashes = false) - : FieldPath(inputPath.toString(), precomputeHashes) {} - /* implicit */ FieldPath(const char* inputPath, bool precomputeHashes = false) - : FieldPath(std::string(inputPath), precomputeHashes) {} + /* implicit */ FieldPath(std::string inputPath, + bool precomputeHashes = false, + bool validateFieldNames = true); + /* implicit */ FieldPath(StringData inputPath, + bool precomputeHashes = false, + bool validateFieldNames = true) + : FieldPath(inputPath.toString(), precomputeHashes, validateFieldNames) {} + /* implicit */ FieldPath(const char* inputPath, + bool precomputeHashes = false, + bool validateFieldNames = true) + : FieldPath(std::string(inputPath), precomputeHashes, validateFieldNames) {} /** * Returns the number of path elements in the field path. diff --git a/src/mongo/db/pipeline/field_path_test.cpp b/src/mongo/db/pipeline/field_path_test.cpp index 208c3bd2d4749..be561ad63a696 100644 --- a/src/mongo/db/pipeline/field_path_test.cpp +++ b/src/mongo/db/pipeline/field_path_test.cpp @@ -27,11 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/bson/bson_depth.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/pipeline/field_path.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/pipeline/granularity_rounder.cpp b/src/mongo/db/pipeline/granularity_rounder.cpp index a54728ff91af1..aa0feb6ec6d9f 100644 --- a/src/mongo/db/pipeline/granularity_rounder.cpp +++ b/src/mongo/db/pipeline/granularity_rounder.cpp @@ -27,10 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/pipeline/granularity_rounder.h" +#include +#include "mongo/db/pipeline/granularity_rounder.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/pipeline/granularity_rounder.h b/src/mongo/db/pipeline/granularity_rounder.h index c0e9ac74b5971..d9b53abfe5aad 100644 --- a/src/mongo/db/pipeline/granularity_rounder.h +++ b/src/mongo/db/pipeline/granularity_rounder.h @@ -29,10 +29,13 @@ #pragma once +#include #include +#include #include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/jsobj.h" #include "mongo/db/pipeline/expression_context.h" diff --git a/src/mongo/db/pipeline/granularity_rounder_powers_of_two.cpp b/src/mongo/db/pipeline/granularity_rounder_powers_of_two.cpp index aa622f9b0141b..4640c791ad187 100644 --- a/src/mongo/db/pipeline/granularity_rounder_powers_of_two.cpp +++ b/src/mongo/db/pipeline/granularity_rounder_powers_of_two.cpp @@ -27,12 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/pipeline/granularity_rounder.h" +#include +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/granularity_rounder.h" #include "mongo/platform/bits.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/granularity_rounder_powers_of_two_test.cpp b/src/mongo/db/pipeline/granularity_rounder_powers_of_two_test.cpp index 34645752e3f0f..7a997504b5743 100644 --- a/src/mongo/db/pipeline/granularity_rounder_powers_of_two_test.cpp +++ b/src/mongo/db/pipeline/granularity_rounder_powers_of_two_test.cpp @@ -27,14 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/pipeline/granularity_rounder.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/pipeline/granularity_rounder.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/granularity_rounder_preferred_numbers.cpp b/src/mongo/db/pipeline/granularity_rounder_preferred_numbers.cpp index f60924b7a3fe2..c2c79c594d8a2 100644 --- a/src/mongo/db/pipeline/granularity_rounder_preferred_numbers.cpp +++ b/src/mongo/db/pipeline/granularity_rounder_preferred_numbers.cpp @@ -27,9 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/granularity_rounder.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp b/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp index cbbc48766f7cf..c758e2b6bdb8d 100644 --- a/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp +++ b/src/mongo/db/pipeline/granularity_rounder_preferred_numbers_test.cpp @@ -27,14 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include -#include "mongo/db/pipeline/granularity_rounder.h" +#include -#include "mongo/db/exec/document_value/document.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/granularity_rounder.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/group_from_first_document_transformation.cpp b/src/mongo/db/pipeline/group_from_first_document_transformation.cpp index ebf7ca8fd940d..88339cf306bf8 100644 --- a/src/mongo/db/pipeline/group_from_first_document_transformation.cpp +++ b/src/mongo/db/pipeline/group_from_first_document_transformation.cpp @@ -29,8 +29,15 @@ #include "mongo/db/pipeline/group_from_first_document_transformation.h" +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/expression_dependencies.h" + namespace mongo { -Document GroupFromFirstDocumentTransformation::applyTransformation(const Document& input) { +Document GroupFromFirstDocumentTransformation::applyTransformation(const Document& input) const { MutableDocument output(_accumulatorExprs.size()); for (auto&& expr : _accumulatorExprs) { diff --git a/src/mongo/db/pipeline/group_from_first_document_transformation.h b/src/mongo/db/pipeline/group_from_first_document_transformation.h index c698b95283beb..ff088501bc879 100644 --- a/src/mongo/db/pipeline/group_from_first_document_transformation.h +++ b/src/mongo/db/pipeline/group_from_first_document_transformation.h @@ -29,8 +29,26 @@ #pragma once +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/transformer_interface.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -74,7 +92,7 @@ class GroupFromFirstDocumentTransformation final : public TransformerInterface { return _expectedInput; } - Document applyTransformation(const Document& input) final; + Document applyTransformation(const Document& input) const final; void optimize() final; diff --git a/src/mongo/db/pipeline/inner_pipeline_stage_impl.cpp b/src/mongo/db/pipeline/inner_pipeline_stage_impl.cpp index 73269c6e9f28e..063377ec9fa54 100644 --- a/src/mongo/db/pipeline/inner_pipeline_stage_impl.cpp +++ b/src/mongo/db/pipeline/inner_pipeline_stage_impl.cpp @@ -27,10 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include "mongo/db/pipeline/inner_pipeline_stage_impl.h" + +#include #include "mongo/db/pipeline/document_source.h" -#include "mongo/db/pipeline/inner_pipeline_stage_impl.h" namespace mongo { InnerPipelineStageImpl::InnerPipelineStageImpl(const boost::intrusive_ptr& src, diff --git a/src/mongo/db/pipeline/inner_pipeline_stage_impl.h b/src/mongo/db/pipeline/inner_pipeline_stage_impl.h index 4f5cafcddd6e8..f9c4c2c03a922 100644 --- a/src/mongo/db/pipeline/inner_pipeline_stage_impl.h +++ b/src/mongo/db/pipeline/inner_pipeline_stage_impl.h @@ -29,12 +29,14 @@ #pragma once -#include "mongo/platform/basic.h" - #include #include +#include +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/inner_pipeline_stage_interface.h" +#include "mongo/platform/basic.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { class InnerPipelineStageImpl : public InnerPipelineStageInterface { diff --git a/src/mongo/db/pipeline/javascript_execution.cpp b/src/mongo/db/pipeline/javascript_execution.cpp index f9ce429ba76ed..614b4a8d6619c 100644 --- a/src/mongo/db/pipeline/javascript_execution.cpp +++ b/src/mongo/db/pipeline/javascript_execution.cpp @@ -27,13 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/javascript_execution.h" - -#include +#include -#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/pipeline/javascript_execution.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/javascript_execution.h b/src/mongo/db/pipeline/javascript_execution.h index d95e90f6eea26..32047df4f1b24 100644 --- a/src/mongo/db/pipeline/javascript_execution.h +++ b/src/mongo/db/pipeline/javascript_execution.h @@ -29,11 +29,18 @@ #pragma once +#include +#include +#include +#include + #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/client.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" #include "mongo/scripting/engine.h" namespace mongo { diff --git a/src/mongo/db/pipeline/lite_parsed_document_source.cpp b/src/mongo/db/pipeline/lite_parsed_document_source.cpp index 7c96d56865c61..d6c2545f827e5 100644 --- a/src/mongo/db/pipeline/lite_parsed_document_source.cpp +++ b/src/mongo/db/pipeline/lite_parsed_document_source.cpp @@ -27,11 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include #include "mongo/db/pipeline/lite_parsed_document_source.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" #include "mongo/db/stats/counters.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -122,9 +129,10 @@ void LiteParsedDocumentSourceNestedPipelines::getForeignExecutionNamespaces( for (auto&& pipeline : _pipelines) { auto nssVector = pipeline.getForeignExecutionNamespaces(); for (const auto& nssOrUUID : nssVector) { - auto nss = nssOrUUID.nss(); - tassert(6458500, "nss expected to contain a NamespaceString", nss != boost::none); - nssSet.insert(*nss); + tassert(6458500, + "nss expected to contain a NamespaceString", + nssOrUUID.isNamespaceString()); + nssSet.insert(nssOrUUID.nss()); } } } @@ -136,12 +144,15 @@ bool LiteParsedDocumentSourceNestedPipelines::allowedToPassthroughFromMongos() c }); } -bool LiteParsedDocumentSourceNestedPipelines::allowShardedForeignCollection( +Status LiteParsedDocumentSourceNestedPipelines::checkShardedForeignCollAllowed( NamespaceString nss, bool inMultiDocumentTransaction) const { - return std::all_of( - _pipelines.begin(), _pipelines.end(), [&nss, inMultiDocumentTransaction](auto&& pipeline) { - return pipeline.allowShardedForeignCollection(nss, inMultiDocumentTransaction); - }); + for (auto&& pipeline : _pipelines) { + if (auto status = pipeline.checkShardedForeignCollAllowed(nss, inMultiDocumentTransaction); + !status.isOK()) { + return status; + } + } + return Status::OK(); } ReadConcernSupportResult LiteParsedDocumentSourceNestedPipelines::supportsReadConcern( diff --git a/src/mongo/db/pipeline/lite_parsed_document_source.h b/src/mongo/db/pipeline/lite_parsed_document_source.h index 3e1dc6ea6c65e..def9b72aebdeb 100644 --- a/src/mongo/db/pipeline/lite_parsed_document_source.h +++ b/src/mongo/db/pipeline/lite_parsed_document_source.h @@ -29,11 +29,20 @@ #pragma once +#include #include +#include #include #include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/api_parameters.h" #include "mongo/db/auth/privilege.h" #include "mongo/db/commands/server_status_metric.h" @@ -41,7 +50,10 @@ #include "mongo/db/query/allowed_contexts.h" #include "mongo/db/read_concern_support_result.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -192,13 +204,13 @@ class LiteParsedDocumentSource { } /** - * Returns true if the involved namespace 'nss' is allowed to be sharded. The behavior is to - * allow by default and stages should opt-out if foreign collections are not allowed to be - * sharded. + * Returns Status::OK() if the involved namespace 'nss' is allowed to be sharded. The behavior + * is to allow by default. Stages should opt-out if foreign collections are not allowed to be + * sharded by returning a Status with a message explaining why. */ - virtual bool allowShardedForeignCollection(NamespaceString nss, - bool inMultiDocumentTransaction) const { - return true; + virtual Status checkShardedForeignCollAllowed(NamespaceString nss, + bool inMultiDocumentTransaction) const { + return Status::OK(); } /** @@ -326,8 +338,8 @@ class LiteParsedDocumentSourceNestedPipelines : public LiteParsedDocumentSource stdx::unordered_set& nssSet) const override; bool allowedToPassthroughFromMongos() const override; - bool allowShardedForeignCollection(NamespaceString nss, - bool inMultiDocumentTransaction) const override; + Status checkShardedForeignCollAllowed(NamespaceString nss, + bool inMultiDocumentTransaction) const override; const std::vector& getSubPipelines() const override { return _pipelines; diff --git a/src/mongo/db/pipeline/lite_parsed_pipeline.cpp b/src/mongo/db/pipeline/lite_parsed_pipeline.cpp index f5760429206ce..49e4cac100a06 100644 --- a/src/mongo/db/pipeline/lite_parsed_pipeline.cpp +++ b/src/mongo/db/pipeline/lite_parsed_pipeline.cpp @@ -27,13 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/commands/server_status_metric.h" #include "mongo/db/operation_context.h" -#include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" +#include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/server_options.h" #include "mongo/db/stats/counters.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -134,10 +146,11 @@ void LiteParsedPipeline::verifyIsSupported( } // Verify that no involved namespace is sharded unless allowed by the pipeline. for (const auto& nss : getInvolvedNamespaces()) { - uassert(28769, - str::stream() << nss.ns() << " cannot be sharded", - allowShardedForeignCollection(nss, inMultiDocumentTransaction) || - !isSharded(opCtx, nss)); + const auto status = checkShardedForeignCollAllowed(nss, inMultiDocumentTransaction); + uassert(status.code(), + str::stream() << nss.toStringForErrorMsg() + << " cannot be sharded: " << status.reason(), + status.isOK() || !isSharded(opCtx, nss)); } } diff --git a/src/mongo/db/pipeline/lite_parsed_pipeline.h b/src/mongo/db/pipeline/lite_parsed_pipeline.h index edd3716cf8682..f5aee6cff286d 100644 --- a/src/mongo/db/pipeline/lite_parsed_pipeline.h +++ b/src/mongo/db/pipeline/lite_parsed_pipeline.h @@ -29,15 +29,25 @@ #pragma once +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include #include #include #include +#include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/privilege.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { @@ -148,16 +158,18 @@ class LiteParsedPipeline { } /** - * Returns false if at least one of the stages does not allow the involved namespace 'nss' to be - * sharded. + * Returns an error Status if at least one of the stages does not allow the involved namespace + * 'nss' to be sharded, otherwise returns Status::OK(). */ - bool allowShardedForeignCollection(NamespaceString nss, bool isMultiDocumentTransaction) const { - return std::all_of(_stageSpecs.begin(), - _stageSpecs.end(), - [&nss, isMultiDocumentTransaction](auto&& spec) { - return spec->allowShardedForeignCollection( - nss, isMultiDocumentTransaction); - }); + Status checkShardedForeignCollAllowed(NamespaceString nss, + bool isMultiDocumentTransaction) const { + for (auto&& spec : _stageSpecs) { + if (auto status = spec->checkShardedForeignCollAllowed(nss, isMultiDocumentTransaction); + !status.isOK()) { + return status; + } + } + return Status::OK(); } /** diff --git a/src/mongo/db/pipeline/lookup_set_cache.h b/src/mongo/db/pipeline/lookup_set_cache.h index c2d6869003b0b..970bcc7822c57 100644 --- a/src/mongo/db/pipeline/lookup_set_cache.h +++ b/src/mongo/db/pipeline/lookup_set_cache.h @@ -28,21 +28,34 @@ */ #pragma once -#include "mongo/platform/basic.h" - #include #include #include #include #include #include +#include +// IWYU pragma: no_include "boost/multi_index/detail/bidir_node_iterator.hpp" +// IWYU pragma: no_include "boost/multi_index/detail/hash_index_iterator.hpp" +// IWYU pragma: no_include "boost/multi_index/detail/index_node_base.hpp" +#include +#include +#include +#include +#include +#include #include +#include +#include #include #include "mongo/base/string_data_comparator_interface.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/platform/basic.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/lookup_set_cache_test.cpp b/src/mongo/db/pipeline/lookup_set_cache_test.cpp index ec78b0b05725f..b64dba611e3cd 100644 --- a/src/mongo/db/pipeline/lookup_set_cache_test.cpp +++ b/src/mongo/db/pipeline/lookup_set_cache_test.cpp @@ -27,18 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include -#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/exec/document_value/document_comparator.h" #include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/lookup_set_cache.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/pipeline/make_js_function.cpp b/src/mongo/db/pipeline/make_js_function.cpp index 8cb1e0d32aafb..72715aef0563d 100644 --- a/src/mongo/db/pipeline/make_js_function.cpp +++ b/src/mongo/db/pipeline/make_js_function.cpp @@ -27,9 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/pipeline/make_js_function.h" +#include "mongo/db/pipeline/javascript_execution.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/pipeline/make_js_function.h b/src/mongo/db/pipeline/make_js_function.h index aa619cff7f566..6a6edbd74eafc 100644 --- a/src/mongo/db/pipeline/make_js_function.h +++ b/src/mongo/db/pipeline/make_js_function.h @@ -29,8 +29,11 @@ #pragma once +#include + #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/javascript_execution.h" +#include "mongo/scripting/engine.h" namespace mongo { diff --git a/src/mongo/db/pipeline/memory_usage_tracker.h b/src/mongo/db/pipeline/memory_usage_tracker.h index 0984b32054323..84a7190237ce5 100644 --- a/src/mongo/db/pipeline/memory_usage_tracker.h +++ b/src/mongo/db/pipeline/memory_usage_tracker.h @@ -29,11 +29,19 @@ #pragma once +#include +#include +#include #include +#include +#include #include +#include #include +#include "mongo/base/string_data.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/memory_usage_tracker_test.cpp b/src/mongo/db/pipeline/memory_usage_tracker_test.cpp index f60933a4655e7..e502fa00beee5 100644 --- a/src/mongo/db/pipeline/memory_usage_tracker_test.cpp +++ b/src/mongo/db/pipeline/memory_usage_tracker_test.cpp @@ -28,8 +28,9 @@ */ #include "mongo/db/pipeline/memory_usage_tracker.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" + +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/monotonic_expression.cpp b/src/mongo/db/pipeline/monotonic_expression.cpp index bc34b756fece1..c593b48aa419c 100644 --- a/src/mongo/db/pipeline/monotonic_expression.cpp +++ b/src/mongo/db/pipeline/monotonic_expression.cpp @@ -29,6 +29,8 @@ #include "mongo/db/pipeline/monotonic_expression.h" +#include "mongo/util/assert_util.h" + namespace mongo::monotonic { State opposite(State state) { diff --git a/src/mongo/db/pipeline/monotonic_expression.h b/src/mongo/db/pipeline/monotonic_expression.h index 6f1664cf19962..11a7861bd1135 100644 --- a/src/mongo/db/pipeline/monotonic_expression.h +++ b/src/mongo/db/pipeline/monotonic_expression.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/db/pipeline/field_path.h" namespace mongo::monotonic { diff --git a/src/mongo/db/pipeline/monotonic_expression_test.cpp b/src/mongo/db/pipeline/monotonic_expression_test.cpp index 0c6671082018c..1c08cddcd4330 100644 --- a/src/mongo/db/pipeline/monotonic_expression_test.cpp +++ b/src/mongo/db/pipeline/monotonic_expression_test.cpp @@ -27,9 +27,21 @@ * it in the license file. */ +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/monotonic_expression.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/partition_key_comparator.h b/src/mongo/db/pipeline/partition_key_comparator.h index befc9839a55b2..7dc81658f33eb 100644 --- a/src/mongo/db/pipeline/partition_key_comparator.h +++ b/src/mongo/db/pipeline/partition_key_comparator.h @@ -26,9 +26,23 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ +#pragma once +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/memory_usage_tracker.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" + namespace mongo { /** diff --git a/src/mongo/db/pipeline/partition_key_comparator_test.cpp b/src/mongo/db/pipeline/partition_key_comparator_test.cpp index 00d45f010d064..2dee26edf3a95 100644 --- a/src/mongo/db/pipeline/partition_key_comparator_test.cpp +++ b/src/mongo/db/pipeline/partition_key_comparator_test.cpp @@ -27,12 +27,19 @@ * it in the license file. */ +#include +#include + +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" -#include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/partition_key_comparator.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/percentile_algo.h b/src/mongo/db/pipeline/percentile_algo.h index a75ef16ff33d5..9d210c52c75d5 100644 --- a/src/mongo/db/pipeline/percentile_algo.h +++ b/src/mongo/db/pipeline/percentile_algo.h @@ -29,13 +29,18 @@ #pragma once -#include - #include +#include #include namespace mongo { +enum class PercentileMethod : int8_t { + Approximate = 0, + Discrete, + Continuous, +}; + /** * Eventually we'll be supporting multiple types of percentiles (discrete, continuous, approximate) * and potentially multiple different algorithms for computing the approximate ones. diff --git a/src/mongo/db/pipeline/percentile_algo_bm_fixture.cpp b/src/mongo/db/pipeline/percentile_algo_bm_fixture.cpp index 3a6c2938805f9..737784af4f4e5 100644 --- a/src/mongo/db/pipeline/percentile_algo_bm_fixture.cpp +++ b/src/mongo/db/pipeline/percentile_algo_bm_fixture.cpp @@ -29,11 +29,15 @@ #include #include +#include +#include +#include #include -#include "mongo/db/pipeline/percentile_algo_bm_fixture.h" +#include #include "mongo/db/pipeline/percentile_algo.h" +#include "mongo/db/pipeline/percentile_algo_bm_fixture.h" #include "mongo/db/pipeline/percentile_algo_tdigest.h" namespace mongo { diff --git a/src/mongo/db/pipeline/percentile_algo_bm_fixture.h b/src/mongo/db/pipeline/percentile_algo_bm_fixture.h index af2fe989c64f9..d4509caa59a67 100644 --- a/src/mongo/db/pipeline/percentile_algo_bm_fixture.h +++ b/src/mongo/db/pipeline/percentile_algo_bm_fixture.h @@ -28,12 +28,12 @@ */ #pragma once -#include "mongo/platform/basic.h" -#include - #include +#include +#include #include "mongo/db/pipeline/percentile_algo_tdigest.h" +#include "mongo/platform/basic.h" namespace mongo { diff --git a/src/mongo/db/pipeline/percentile_algo_discrete.cpp b/src/mongo/db/pipeline/percentile_algo_discrete.cpp index 42a0013843787..787e77ad95dd7 100644 --- a/src/mongo/db/pipeline/percentile_algo_discrete.cpp +++ b/src/mongo/db/pipeline/percentile_algo_discrete.cpp @@ -28,11 +28,17 @@ */ #include +#include +#include #include +#include +#include -#include "mongo/db/pipeline/percentile_algo_discrete.h" +#include +#include "mongo/db/pipeline/percentile_algo_discrete.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" namespace mongo { using std::vector; diff --git a/src/mongo/db/pipeline/percentile_algo_discrete.h b/src/mongo/db/pipeline/percentile_algo_discrete.h index a6e174d1b5106..65ab2ae9c63f3 100644 --- a/src/mongo/db/pipeline/percentile_algo_discrete.h +++ b/src/mongo/db/pipeline/percentile_algo_discrete.h @@ -29,6 +29,8 @@ #include +#include + #include "mongo/db/pipeline/percentile_algo.h" namespace mongo { diff --git a/src/mongo/db/pipeline/percentile_algo_discrete_test.cpp b/src/mongo/db/pipeline/percentile_algo_discrete_test.cpp index 503363fa7f4ed..9b96b2a6c51c9 100644 --- a/src/mongo/db/pipeline/percentile_algo_discrete_test.cpp +++ b/src/mongo/db/pipeline/percentile_algo_discrete_test.cpp @@ -28,15 +28,24 @@ */ #include +#include #include #include +#include +#include +#include +#include #include -#include "mongo/db/pipeline/percentile_algo_discrete.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/db/pipeline/percentile_algo_discrete.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/unittest/assert.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/db/pipeline/percentile_algo_tdigest.cpp b/src/mongo/db/pipeline/percentile_algo_tdigest.cpp index acee12d99e3cf..37468fc8371ee 100644 --- a/src/mongo/db/pipeline/percentile_algo_tdigest.cpp +++ b/src/mongo/db/pipeline/percentile_algo_tdigest.cpp @@ -28,11 +28,17 @@ */ #include +#include +#include +#include #include +#include -#include "mongo/db/pipeline/percentile_algo_tdigest.h" +#include +#include "mongo/db/pipeline/percentile_algo_tdigest.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -117,10 +123,9 @@ void TDigest::flushBuffer() { return; } - // TODO SERVER-74359: 'boost::sort::spreadsort::spreadsort' shows an observable perf improvement - // over std::sort and potentially might provide even more benefits if we separate accumulated - // data by type, as it can do radix sort on integers. However, we don't currently include - // boost::sort into out third_party libs. + // TODO SERVER-75565: 'boost::sort::spreadsort::spreadsort' shows an observable perf improvement + // over std::sort on large datasets. If switching to boost's spreadsort would need to re-tune + // the default delta setting and the size of the buffer. std::sort(_buffer.begin(), _buffer.end()); merge(_buffer); _buffer.clear(); @@ -157,9 +162,11 @@ boost::optional TDigest::computePercentile(double p) { // contributed to. size_t i = 0; // index of the target centroid double r = 0; // cumulative weight of all centroids up to, and including, i_th one - // TODO SERVER-74359 (tune t-digest): is it worth optimizing traversing the set of centroids - // backwards for p > 0.5? This likely doesn't matter when TDigest is used by accumulator but - // might become noticeable in expressions. + + // We are not optimizing traversing the set of centroids for higher percentiles or when + // multiple percentiles have been requested because our benchmarks don't show this to be a + // problem in the accumulator context, and for expressions, where it might matter, we are not + // using t-digest. for (; i < _centroids.size(); i++) { r += _centroids[i].weight; if (r > rank) { diff --git a/src/mongo/db/pipeline/percentile_algo_tdigest.h b/src/mongo/db/pipeline/percentile_algo_tdigest.h index d62de70c19d86..5c07544df3b6a 100644 --- a/src/mongo/db/pipeline/percentile_algo_tdigest.h +++ b/src/mongo/db/pipeline/percentile_algo_tdigest.h @@ -30,9 +30,14 @@ #pragma once #include +#include +#include +#include #include #include +#include + #include "mongo/db/pipeline/percentile_algo.h" namespace mongo { @@ -259,8 +264,8 @@ class TDigest : public PercentileAlgorithm { // Buffer for the incoming inputs. When the buffer is full, the inputs are sorted and merged // into '_centroids'. The max size is set in constructors to bufferCoeff * delta. The - // coefficient has been determined empirically from micro-benchmarks. - static constexpr int bufferCoeff = 5; + // coefficient has been determined empirically from benchmarks. + static constexpr int bufferCoeff = 3; const size_t _maxBufferSize; std::vector _buffer; diff --git a/src/mongo/db/pipeline/percentile_algo_tdigest_distributed.cpp b/src/mongo/db/pipeline/percentile_algo_tdigest_distributed.cpp index 42849e937633d..ddf8cfff4257b 100644 --- a/src/mongo/db/pipeline/percentile_algo_tdigest_distributed.cpp +++ b/src/mongo/db/pipeline/percentile_algo_tdigest_distributed.cpp @@ -27,13 +27,18 @@ * it in the license file. */ -#include -#include +#include +#include +#include +#include -#include "mongo/db/pipeline/percentile_algo_tdigest.h" +#include #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/percentile_algo.h" +#include "mongo/db/pipeline/percentile_algo_tdigest.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/pipeline/percentile_algo_tdigest_distributed_test.cpp b/src/mongo/db/pipeline/percentile_algo_tdigest_distributed_test.cpp index a5cf8fe3252f2..d0a7622183515 100644 --- a/src/mongo/db/pipeline/percentile_algo_tdigest_distributed_test.cpp +++ b/src/mongo/db/pipeline/percentile_algo_tdigest_distributed_test.cpp @@ -29,16 +29,25 @@ #include #include +#include +#include #include +#include +#include #include +#include -#include "mongo/db/pipeline/percentile_algo.h" -#include "mongo/db/pipeline/percentile_algo_tdigest.h" +#include +#include +#include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/percentile_algo.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/unittest/assert.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/db/pipeline/percentile_algo_tdigest_test.cpp b/src/mongo/db/pipeline/percentile_algo_tdigest_test.cpp index 85a06b731550e..0f845a6d53816 100644 --- a/src/mongo/db/pipeline/percentile_algo_tdigest_test.cpp +++ b/src/mongo/db/pipeline/percentile_algo_tdigest_test.cpp @@ -27,18 +27,33 @@ * it in the license file. */ -#include +#include +#include #include #include #include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include #include +#include +#include #include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/db/pipeline/percentile_algo.h" #include "mongo/db/pipeline/percentile_algo_tdigest.h" - #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/unittest/assert.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -774,11 +789,11 @@ TEST(TDigestTest, PreciseOnSmallDataset_k2) { // and then sorts a buffer so spreading the duplicates within distances comparable to the // buffer-size doesn't serve any purpose. template -vector generateData(TDist& dist, - size_t n, - size_t dupes = 1, - bool keepDupesTogether = true) { - auto seed = 1680027861; // arbitrary +vector generateData( + TDist& dist, size_t n, size_t dupes, bool keepDupesTogether, long seed) { + if (seed == 0) { + seed = time(nullptr); + } LOGV2(7429513, "{seed}", "generateData", "seed"_attr = seed); std::mt19937 generator(seed); @@ -806,36 +821,39 @@ vector generateData(TDist& dist, return inputs; } -typedef vector (*DataGenerator)(size_t, size_t, bool); +typedef vector (*DataGenerator)(size_t, size_t, bool, long); // Generates 'n' values in [0, 100] range with uniform distribution. -vector generateUniform(size_t n, size_t dupes, bool keepDupesTogether) { +vector generateUniform(size_t n, size_t dupes, bool keepDupesTogether, long seed) { boost::random::uniform_real_distribution dist(0 /* min */, 100 /* max */); - return generateData(dist, n, dupes, keepDupesTogether); + return generateData(dist, n, dupes, keepDupesTogether, seed); } // Generates 'n' values from normal distribution with mean = 0.0 and sigma = 0.5. -vector generateNormal(size_t n, size_t dupes, bool keepDupesTogether) { +vector generateNormal(size_t n, size_t dupes, bool keepDupesTogether, long seed) { boost::random::normal_distribution dist(0.0 /* mean */, 0.5 /* sigma */); - return generateData(dist, n, dupes, keepDupesTogether); + return generateData(dist, n, dupes, keepDupesTogether, seed); } // Generates 'n' values from exponential distribution with lambda = 1.0: p(x)=lambda*e^(-lambda*x). -vector generateExponential(size_t n, size_t dupes, bool keepDupesTogether) { +vector generateExponential(size_t n, size_t dupes, bool keepDupesTogether, long seed) { boost::random::exponential_distribution dist(1.0 /* lambda */); - return generateData(dist, n, dupes, keepDupesTogether); + return generateData(dist, n, dupes, keepDupesTogether, seed); +} + +// Generates 'n' values from weibull distribution with a : 1.0, b: 0.5 to produce a heavy tail. +vector generateWeibull(size_t n, size_t dupes, bool keepDupesTogether, long seed) { + boost::random::weibull_distribution dist(1.0 /* a */, 0.5 /* b */); + return generateData(dist, n, dupes, keepDupesTogether, seed); } /* - * The following tests generate datasets with 10,000 values. The accuracy 0.00ab means that the - * rank of the computed percentile cannot differ from the true rank by more than |ab|. T-digest does - * not guarantee error bounds, the accuracy numbers here are empirical. + * The following tests generate datasets with 10,000 values. T-digest does not guarantee error + * bounds, but on well-behaved datasets it should be within 0.5% for the middle percentiles and even + * better for the extreme ones. * * These tests also indirectly validate the merging and compacting with a more complex scaling * function. - * - * We run two separate tests for each distribution because t-digest with k2 scaling function should - * be more accurate for the extreme percentiles. */ void runTestWithDataGenerator(TDigest::ScalingFunction k_limit, @@ -844,9 +862,10 @@ void runTestWithDataGenerator(TDigest::ScalingFunction k_limit, const vector& percentiles, double accuracy, const char* msg) { - vector inputs = dg(10'000 /* nUnique */, 1 /* dupes */, true /* keepDupesTogether*/); + vector inputs = + dg(100'000 /* nUnique */, 1 /* dupes */, true /* keepDupesTogether*/, 0 /*seed*/); - const int delta = 100; + const int delta = 500; TDigest digest(k_limit, delta); for (auto val : inputs) { digest.incorporate(val); @@ -868,7 +887,7 @@ TEST(TDigestTest, UniformDistribution_Mid) { TDigest::k2, generateUniform, percentiles, - 0.0050 /* accuracy */, + 0.005 /* accuracy */, "Uniform distribution mid"); } TEST(TDigestTest, UniformDistribution_Extr) { @@ -877,7 +896,7 @@ TEST(TDigestTest, UniformDistribution_Extr) { TDigest::k2, generateUniform, percentiles, - 0.0010 /* accuracy */, + 0.0005 /* accuracy */, "Uniform distribution extr"); } @@ -887,7 +906,7 @@ TEST(TDigestTest, NormalDistribution_Mid) { TDigest::k2, generateNormal, percentiles, - 0.0050 /* accuracy */, + 0.005 /* accuracy */, "Normal distribution mid"); } TEST(TDigestTest, NormalDistribution_Extr) { @@ -896,7 +915,7 @@ TEST(TDigestTest, NormalDistribution_Extr) { TDigest::k2, generateNormal, percentiles, - 0.0010 /* accuracy */, + 0.0005 /* accuracy */, "Normal distribution extr"); } @@ -906,7 +925,7 @@ TEST(TDigestTest, ExponentialDistribution_Mid) { TDigest::k2, generateExponential, percentiles, - 0.0050 /* accuracy */, + 0.005 /* accuracy */, "Exponential distribution mid"); } TEST(TDigestTest, ExponentialDistribution_Extr) { @@ -915,38 +934,32 @@ TEST(TDigestTest, ExponentialDistribution_Extr) { TDigest::k2, generateExponential, percentiles, - 0.0010 /* accuracy */, + 0.0005 /* accuracy */, "Exponential distribution extr"); } -/** - * For the median, k0 should yield as good accuracy as k2. Unfortunately, on any given dataset a - * particular digest might get (un)lucky so we cannot directly compare the accuracy of k0 vs k2. But - * we can check that the accuracy error is similar to the tests above that use k2. - */ -TEST(TDigestTest, Median_k0) { - runTestWithDataGenerator(TDigest::k0_limit, - TDigest::k0, - generateUniform, - {0.5}, - 0.0050 /* accuracy */, - "Uniform distribution median with k0"); - runTestWithDataGenerator(TDigest::k0_limit, - TDigest::k0, - generateNormal, - {0.5}, - 0.0050 /* accuracy */, - "Normal distribution median with k0"); - runTestWithDataGenerator(TDigest::k0_limit, - TDigest::k0, - generateExponential, - {0.5}, - 0.0050 /* accuracy */, - "Exponential distribution median with k0"); +TEST(TDigestTest, WeibullDistribution_Mid) { + const vector percentiles = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9}; + runTestWithDataGenerator(TDigest::k2_limit, + TDigest::k2, + generateWeibull, + percentiles, + 0.005 /* accuracy */, + "Exponential distribution mid"); +} +TEST(TDigestTest, WeibullDistribution_Extr) { + const vector percentiles = {0.0001, 0.001, 0.01, 0.99, 0.999, 0.9999}; + runTestWithDataGenerator(TDigest::k2_limit, + TDigest::k2, + generateWeibull, + percentiles, + 0.0005 /* accuracy */, + "Exponential distribution extr"); } /** - * Tests distributions with duplicated data. + * Tests distributions with duplicated data. Notice that we tend to get lower accuracy in presence + * of many duplicates. */ void runTestWithDuplicatesInData(DataGenerator dg, size_t n, @@ -955,8 +968,8 @@ void runTestWithDuplicatesInData(DataGenerator dg, const vector& percentiles, double accuracy, const char* msg) { - const int delta = 100; - vector inputs = dg(n, dupes, keepDupesTogether); + const int delta = 500; + vector inputs = dg(n, dupes, keepDupesTogether, 0 /*seed*/); TDigest digest(TDigest::k2_limit, delta); for (auto val : inputs) { @@ -972,72 +985,75 @@ void runTestWithDuplicatesInData(DataGenerator dg, std::sort(inputs.begin(), inputs.end()); assertExpectedAccuracy(inputs, digest, percentiles, accuracy, msg); } + TEST(TDigestTest, Duplicates_uniform_mid) { const vector percentiles = {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9}; runTestWithDuplicatesInData(generateUniform, - 1000 /* nUnique*/, + 10'000 /* nUnique*/, 10 /* dupes */, false /* keepDupesTogether*/, percentiles, - 0.0100 /* accuracy */, - "Uniform distribution with shuffled dupes mid (1000x10)"); + 0.005 /* accuracy */, + "Uniform distribution with shuffled dupes mid (10'000x10)"); runTestWithDuplicatesInData(generateUniform, - 1000 /* nUnique*/, + 10'000 /* nUnique*/, 10 /* dupes */, true /* keepDupesTogether*/, percentiles, - 0.0100 /* accuracy */, - "Uniform distribution with clustered dupes mid (1000x10)"); + 0.005 /* accuracy */, + "Uniform distribution with clustered dupes mid (10'000x10)"); runTestWithDuplicatesInData(generateUniform, - 100 /* nUnique*/, + 1000 /* nUnique*/, 100 /* dupes */, false /* keepDupesTogether*/, percentiles, - 0.0100 /* accuracy */, - "Uniform distribution with shuffled dupes mid (100x100)"); + 0.01 /* accuracy */, + "Uniform distribution with shuffled dupes mid (1000x100)"); runTestWithDuplicatesInData(generateUniform, 1000 /* nUnique*/, - 10 /* dupes */, + 100 /* dupes */, true /* keepDupesTogether*/, percentiles, - 0.0100 /* accuracy */, - "Uniform distribution with clustered dupes mid (100x100)"); + 0.01 /* accuracy */, + "Uniform distribution with clustered dupes mid (1000x100)"); } + TEST(TDigestTest, Duplicates_uniform_extr) { const vector percentiles = {0.0001, 0.001, 0.01, 0.99, 0.999, 0.9999}; runTestWithDuplicatesInData(generateUniform, - 1000 /* nUnique*/, + 10'000 /* nUnique*/, 10 /* dupes */, false /* keepDupesTogether*/, percentiles, - 0.0050 /* accuracy */, - "Uniform distribution with shuffled dupes extr (1000x10)"); + 0.0005 /* accuracy */, + "Uniform distribution with shuffled dupes extr (10'000x10)"); runTestWithDuplicatesInData(generateUniform, - 1000 /* nUnique*/, + 10'000 /* nUnique*/, 10 /* dupes */, true /* keepDupesTogether*/, percentiles, - 0.0050 /* accuracy */, - "Uniform distribution with clustered dupes extr (1000x10)"); + 0.0005 /* accuracy */, + "Uniform distribution with clustered dupes extr (10'000x10)"); runTestWithDuplicatesInData(generateUniform, - 100 /* nUnique*/, + 1000 /* nUnique*/, 100 /* dupes */, false /* keepDupesTogether*/, percentiles, - 0.0050 /* accuracy */, - "Uniform distribution with shuffled dupes extr (100x100)"); + 0.005 /* accuracy */, + "Uniform distribution with shuffled dupes extr (1000x100)"); runTestWithDuplicatesInData(generateUniform, 1000 /* nUnique*/, - 10 /* dupes */, + 100 /* dupes */, true /* keepDupesTogether*/, percentiles, - 0.0050 /* accuracy */, - "Uniform distribution with clustered dupes extr (100x100)"); + 0.005 /* accuracy */, + "Uniform distribution with clustered dupes extr (1000x100)"); } + TEST(TDigestTest, Duplicates_all) { const int delta = 100; const vector inputs(10'000, 42); @@ -1048,17 +1064,18 @@ TEST(TDigestTest, Duplicates_all) { } digest.flushBuffer(); assertIsValid(digest, TDigest::k2, delta, "All duplicates"); - assertExpectedAccuracy(inputs, - digest, - {0.0001, 0.001, 0.01, 0.99, 0.999, 0.9999} /* percentiles */, - 0.0001 /* accuracy */, - "All duplicates extr"); - assertExpectedAccuracy(inputs, - digest, - {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9} /* percentiles */, - 0.0001 /* accuracy */, - "All duplicates mid"); + + // The t-digest of all duplicates will still contain multiple centroids but because all of them + // have the same mean that is equal to min and max, the interpolation should always return that + // mean as a result and, thus, would produce accurate percentile. + assertExpectedAccuracy( + inputs, + digest, + {0.0001, 0.001, 0.01, 0.1, 0.2, 0.5, 0.8, 0.9, 0.99, 0.999, 0.9999} /* percentiles */, + 0.0 /* accuracy */, + "All duplicates mid"); } + TEST(TDigestTest, Duplicates_two_clusters) { const int delta = 100; vector sorted(10'000, 0); @@ -1079,37 +1096,38 @@ TEST(TDigestTest, Duplicates_two_clusters) { } digest.flushBuffer(); assertIsValid(digest, TDigest::k2, delta, "Duplicates_two_clusters"); - assertExpectedAccuracy(sorted, - digest, - {0.0001, 0.001, 0.01, 0.99, 0.999, 0.9999} /* percentiles */, - 0.0010 /* accuracy */, - "Duplicates two clusters extr"); assertExpectedAccuracy(sorted, digest, {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9} /* percentiles */, - 0.0050 /* accuracy */, + 0.01 /* accuracy */, "Duplicates two clusters mid"); + assertExpectedAccuracy(sorted, + digest, + {0.0001, 0.001, 0.01, 0.99, 0.999, 0.9999} /* percentiles */, + 0.005 /* accuracy */, + "Duplicates two clusters extr"); } + TEST(TDigestTest, Frankenstein_distribution) { vector> chunks = { - generateNormal(1000, 1, true), // 1000 - generateNormal(200, 5, true), // 1000 - generateNormal(100, 10, true), // 1000 - generateNormal(50, 20, true), // 1000 - generateNormal(25, 40, true), // 1000 - generateUniform(2000, 1, true), // 3000 - generateUniform(10, 100, true), // 1000 - generateUniform(5, 200, true), // 1000 - generateExponential(1000, 1, true), // 1000 + generateNormal(10000, 1, true, 0), // 10000 + generateNormal(2000, 5, true, 0), // 10000 + generateNormal(1000, 10, true, 0), // 10000 + generateNormal(500, 20, true, 0), // 10000 + generateNormal(250, 40, true, 0), // 10000 + generateUniform(20000, 1, true, 0), // 30000 + generateUniform(100, 100, true, 0), // 10000 + generateUniform(50, 200, true, 0), // 10000 + generateExponential(10000, 1, true, 0), // 10000 }; vector inputs; - inputs.reserve(10'000); + inputs.reserve(100'000); for (const auto& chunk : chunks) { inputs.insert(inputs.end(), chunk.begin(), chunk.end()); } std::shuffle(inputs.begin(), inputs.end(), std::mt19937(2023 /*seed*/)); - const int delta = 100; + const int delta = 500; TDigest digest(TDigest::k2_limit, delta); for (auto val : inputs) { digest.incorporate(val); @@ -1123,23 +1141,23 @@ TEST(TDigestTest, Frankenstein_distribution) { ASSERT_LTE(digest.centroids().size(), 2 * delta) << "Upper bound on the number of centroids"; std::sort(inputs.begin(), inputs.end()); - assertExpectedAccuracy(inputs, - digest, - {0.0001, 0.001, 0.01, 0.99, 0.999, 0.9999}, - 0.0050, - "Frankenstein distribution extr"); assertExpectedAccuracy(inputs, digest, {0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9}, - 0.0100, + 0.01, "Frankenstein distribution mid"); + assertExpectedAccuracy(inputs, + digest, + {0.0001, 0.001, 0.01, 0.99, 0.999, 0.9999}, + 0.005, + "Frankenstein distribution extr"); } /** - * The tests below were used to assess accuracy of t-digest on datasets of size 1e6 and 1e7 over - * multiple iterations. We'd like to keep the tests alive to be able to repeat the experiments in - * the future if we decide to tune t-digest further. However, for running as part of unit tests the - * number of iterations and the dataset size have been set to lower values. + * The tests below were used to assess accuracy of t-digest on datasets of large size (>=1e7) over + * multiple iterations. They never fail but we'd like to keep the code alive to be able to repeat + * the experiments in the future if we decide to tune t-digest further. However, for running as part + * of unit tests the number of iterations and the dataset size have been set to lower values. */ vector computeError(vector sorted, TDigest& digest, @@ -1164,9 +1182,10 @@ std::pair> /*errors*/, vector /*# centroids*/> generateA vector> errors(deltas.size(), vector(percentiles.size(), 0)); vector n_centroids(deltas.size(), 0); + long seed = time(nullptr); for (int i = 0; i < nIterations; ++i) { std::cout << "*** iteration " << i << std::endl; - vector data = dg(nUnique, 1 /* dupes */, false /* keepDupesTogether */); + vector data = dg(nUnique, 1 /* dupes */, false /* keepDupesTogether */, ++seed); vector sorted = data; std::sort(sorted.begin(), sorted.end()); @@ -1215,14 +1234,17 @@ void runAccuracyTest(DataGenerator dg) { } } -TEST(TDigestTest, AccuracyStats_uniform) { - runAccuracyTest(generateUniform); -} -TEST(TDigestTest, AccuracyStats_normal) { - runAccuracyTest(generateNormal); -} -TEST(TDigestTest, AccuracyStats_exp) { - runAccuracyTest(generateExponential); -} +// TEST(TDigestTest, AccuracyStats_uniform) { +// runAccuracyTest(generateUniform); +// } +// TEST(TDigestTest, AccuracyStats_normal) { +// runAccuracyTest(generateNormal); +// } +// TEST(TDigestTest, AccuracyStats_exp) { +// runAccuracyTest(generateExponential); +// } +// TEST(TDigestTest, AccuracyStats_weibull) { +// runAccuracyTest(generateWeibull); +// } } // namespace } // namespace mongo diff --git a/src/mongo/db/pipeline/pipeline.cpp b/src/mongo/db/pipeline/pipeline.cpp index db9f6923c4e02..1848ed4737913 100644 --- a/src/mongo/db/pipeline/pipeline.cpp +++ b/src/mongo/db/pipeline/pipeline.cpp @@ -29,28 +29,51 @@ #include "mongo/db/pipeline/pipeline.h" -#include "mongo/logv2/log.h" +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include +#include +#include +#include +#include +#include +#include #include "mongo/base/error_codes.h" #include "mongo/base/exact_cast.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/exec/document_value/document.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/matcher/expression_algo.h" #include "mongo/db/operation_context.h" -#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/change_stream_helpers.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_merge.h" #include "mongo/db/pipeline/document_source_out.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include "mongo/db/pipeline/resume_token.h" #include "mongo/db/pipeline/search_helper.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/transformer_interface.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/storage/storage_options.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/str.h" @@ -83,7 +106,7 @@ void validateTopLevelPipeline(const Pipeline& pipeline) { // Verify that the specified namespace is valid for the initial stage of this pipeline. const NamespaceString& nss = pipeline.getContext()->ns; - auto sources = pipeline.getSources(); + const auto& sources = pipeline.getSources(); if (sources.empty()) { uassert(ErrorCodes::InvalidNamespace, @@ -184,7 +207,7 @@ std::unique_ptr Pipeline::clone( for (auto&& stage : _sources) { clonedStages.push_back(stage->clone(expCtx)); } - return create(clonedStages, expCtx); + return create(std::move(clonedStages), expCtx); } template @@ -194,8 +217,14 @@ std::unique_ptr Pipeline::parseCommon( PipelineValidatorCallback validator, std::function getElemFunc) { - SourceContainer stages; + // Before parsing the pipeline, make sure it's not so long that it will make us run out of + // memory. + uassert(7749501, + str::stream() << "Pipeline length must be no longer than " + << internalPipelineLengthLimit << " stages.", + static_cast(rawPipeline.size()) <= internalPipelineLengthLimit); + SourceContainer stages; for (auto&& stageElem : rawPipeline) { auto parsedSources = DocumentSource::parse(expCtx, getElemFunc(stageElem)); stages.insert(stages.end(), parsedSources.begin(), parsedSources.end()); @@ -256,7 +285,7 @@ std::unique_ptr Pipeline::create( } void Pipeline::validateCommon(bool alreadyOptimized) const { - uassert(ErrorCodes::FailedToParse, + uassert(5054701, str::stream() << "Pipeline length must be no longer than " << internalPipelineLengthLimit << " stages", static_cast(_sources.size()) <= internalPipelineLengthLimit); @@ -524,39 +553,11 @@ vector Pipeline::serializeContainer(const SourceContainer& container, return serializedSources; } -vector Pipeline::serializeContainer(const SourceContainer& container, - boost::optional explain) { - // TODO SERVER-75139 Remove this function once all calls have been removed. - vector serializedSources; - for (auto&& source : container) { - source->serializeToArray(serializedSources, explain); - } - return serializedSources; -} - -vector Pipeline::serialize(boost::optional explain) const { - // TODO SERVER-75139 Remove this function once all calls have been removed. - return serializeContainer(_sources, {explain}); -} - vector Pipeline::serialize(boost::optional opts) const { return serializeContainer(_sources, opts); } -vector Pipeline::serializeToBson( - boost::optional explain) const { - const auto serialized = serialize(explain); - std::vector asBson; - asBson.reserve(serialized.size()); - for (auto&& stage : serialized) { - invariant(stage.getType() == BSONType::Object); - asBson.push_back(stage.getDocument().toBson()); - } - return asBson; -} - vector Pipeline::serializeToBson(boost::optional opts) const { - // TODO SERVER-75139 Remove this function once all calls have been removed. const auto serialized = serialize(opts); std::vector asBson; asBson.reserve(serialized.size()); @@ -598,16 +599,16 @@ boost::optional Pipeline::getNext() { : boost::optional{nextResult.releaseDocument()}; } -vector Pipeline::writeExplainOps(ExplainOptions::Verbosity verbosity) const { +vector Pipeline::writeExplainOps(SerializationOptions opts) const { vector array; for (auto&& stage : _sources) { auto beforeSize = array.size(); - stage->serializeToArray(array, verbosity); + stage->serializeToArray(array, opts); auto afterSize = array.size(); // Append execution stats to the serialized stage if the specified verbosity is // 'executionStats' or 'allPlansExecution'. invariant(afterSize - beforeSize == 1u); - if (verbosity >= ExplainOptions::Verbosity::kExecStats) { + if (*opts.verbosity >= ExplainOptions::Verbosity::kExecStats) { auto serializedStage = array.back(); array.back() = appendCommonExecStats(serializedStage, stage->getCommonStats()); } diff --git a/src/mongo/db/pipeline/pipeline.h b/src/mongo/db/pipeline/pipeline.h index 3fc97596fb3e0..3ac1386f12518 100644 --- a/src/mongo/db/pipeline/pipeline.h +++ b/src/mongo/db/pipeline/pipeline.h @@ -29,25 +29,48 @@ #pragma once +#include +#include +#include +#include +#include +#include #include #include +#include +#include #include -#include - +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/plan_stats.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/cursor_response_gen.h" #include "mongo/db/query/explain_options.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/service_context.h" #include "mongo/executor/task_executor.h" -#include "mongo/s/query/async_results_merger_params_gen.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" #include "mongo/util/intrusive_counter.h" #include "mongo/util/timer.h" +#include "mongo/util/uuid.h" namespace mongo { class BSONObj; @@ -318,13 +341,9 @@ class Pipeline { /** * Helpers to serialize a pipeline. */ - std::vector serialize(boost::optional explain) const; std::vector serialize(boost::optional opts = boost::none) const; - std::vector serializeToBson(boost::optional explain) const; std::vector serializeToBson( boost::optional opts = boost::none) const; - static std::vector serializeContainer( - const SourceContainer& container, boost::optional explain); static std::vector serializeContainer( const SourceContainer& container, boost::optional opts = boost::none); @@ -342,7 +361,7 @@ class Pipeline { * Write the pipeline's operators to a std::vector, providing the level of detail * specified by 'verbosity'. */ - std::vector writeExplainOps(ExplainOptions::Verbosity verbosity) const; + std::vector writeExplainOps(SerializationOptions opts = SerializationOptions()) const; /** * Returns the dependencies needed by this pipeline. 'unavailableMetadata' should reflect what diff --git a/src/mongo/db/pipeline/pipeline_d.cpp b/src/mongo/db/pipeline/pipeline_d.cpp index c1747819a15df..b07a3ad85e4ca 100644 --- a/src/mongo/db/pipeline/pipeline_d.cpp +++ b/src/mongo/db/pipeline/pipeline_d.cpp @@ -29,39 +29,69 @@ #include "mongo/db/pipeline/pipeline_d.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/exact_cast.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/basic_types.h" #include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/curop.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/exec/cached_plan.h" #include "mongo/db/exec/collection_scan.h" -#include "mongo/db/exec/fetch.h" +#include "mongo/db/exec/collection_scan_common.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/index_scan.h" #include "mongo/db/exec/multi_iterator.h" #include "mongo/db/exec/multi_plan.h" -#include "mongo/db/exec/projection.h" -#include "mongo/db/exec/queued_data_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sample_from_timeseries_bucket.h" #include "mongo/db/exec/shard_filter.h" +#include "mongo/db/exec/shard_filterer.h" #include "mongo/db/exec/shard_filterer_impl.h" -#include "mongo/db/exec/subplan.h" +#include "mongo/db/exec/timeseries/bucket_spec.h" #include "mongo/db/exec/trial_stage.h" #include "mongo/db/exec/unpack_timeseries_bucket.h" #include "mongo/db/exec/working_set.h" -#include "mongo/db/index/index_access_method.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression_algo.h" #include "mongo/db/matcher/expression_expr.h" #include "mongo/db/matcher/extensions_callback_real.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/ops/write_ops_exec.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" -#include "mongo/db/pipeline/document_source_change_stream.h" #include "mongo/db/pipeline/document_source_cursor.h" #include "mongo/db/pipeline/document_source_geo_near.h" #include "mongo/db/pipeline/document_source_geo_near_cursor.h" #include "mongo/db/pipeline/document_source_group.h" +#include "mongo/db/pipeline/document_source_group_base.h" #include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" #include "mongo/db/pipeline/document_source_lookup.h" #include "mongo/db/pipeline/document_source_match.h" @@ -69,31 +99,49 @@ #include "mongo/db/pipeline/document_source_sample_from_random_cursor.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/inner_pipeline_stage_impl.h" +#include "mongo/db/pipeline/inner_pipeline_stage_interface.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/skip_and_limit.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/transformer_interface.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/get_executor.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/parsed_distinct.h" #include "mongo/db/query/plan_executor_factory.h" #include "mongo/db/query/plan_executor_impl.h" -#include "mongo/db/query/plan_summary_stats.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/projection.h" #include "mongo/db/query/projection_parser.h" +#include "mongo/db/query/projection_policies.h" #include "mongo/db/query/query_feature_flags_gen.h" #include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/query/query_planner.h" #include "mongo/db/query/query_planner_params.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/record_id_bound.h" #include "mongo/db/query/sort_pattern.h" #include "mongo/db/query/stage_types.h" +#include "mongo/db/query/tailable_mode_gen.h" +#include "mongo/db/query/util/make_data_structure.h" +#include "mongo/db/record_id.h" #include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/s/scoped_collection_metadata.h" #include "mongo/db/server_options.h" -#include "mongo/db/service_context.h" -#include "mongo/db/stats/top.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/server_parameter_with_storage.h" #include "mongo/db/storage/record_store.h" -#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/db/timeseries/timeseries_gen.h" -#include "mongo/rpc/metadata/client_metadata.h" -#include "mongo/s/query/document_source_merge_cursors.h" -#include "mongo/util/time_support.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -236,6 +284,10 @@ std::unique_ptr createFindCommand( if (aggRequest) { findCommand->setAllowDiskUse(aggRequest->getAllowDiskUse()); findCommand->setHint(aggRequest->getHint().value_or(BSONObj()).getOwned()); + findCommand->setRequestResumeToken(aggRequest->getRequestResumeToken()); + if (aggRequest->getResumeAfter()) { + findCommand->setResumeAfter(*aggRequest->getResumeAfter()); + } } // The collation on the ExpressionContext has been resolved to either the user-specified @@ -367,7 +419,8 @@ StringData extractGeoNearFieldFromIndexes(OperationContext* opCtx, std::vector idxs; collection->getIndexCatalog()->findIndexByType(opCtx, IndexNames::GEO_2D, idxs); uassert(ErrorCodes::IndexNotFound, - str::stream() << "There is more than one 2d index on " << collection->ns().ns() + str::stream() << "There is more than one 2d index on " + << collection->ns().toStringForErrorMsg() << "; unsure which to use for $geoNear", idxs.size() <= 1U); if (idxs.size() == 1U) { @@ -386,7 +439,8 @@ StringData extractGeoNearFieldFromIndexes(OperationContext* opCtx, "$geoNear requires a 2d or 2dsphere index, but none were found", !idxs.empty()); uassert(ErrorCodes::IndexNotFound, - str::stream() << "There is more than one 2dsphere index on " << collection->ns().ns() + str::stream() << "There is more than one 2dsphere index on " + << collection->ns().toStringForErrorMsg() << "; unsure which to use for $geoNear", idxs.size() <= 1U); @@ -558,13 +612,21 @@ StatusWith> PipelineD::createRan // Build a MultiIteratorStage and pass it the random-sampling RecordCursor. auto ws = std::make_unique(); std::unique_ptr root = - std::make_unique(expCtx.get(), ws.get(), coll); + std::make_unique(expCtx.get(), ws.get(), &coll); static_cast(root.get())->addIterator(std::move(rsRandCursor)); TrialStage* trialStage = nullptr; - auto scopedCss = CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, coll->ns()); - const bool isSharded = scopedCss->getCollectionDescription(opCtx).isSharded(); + const auto [isSharded, optOwnershipFilter] = [&]() { + auto scopedCss = + CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, coll->ns()); + const bool isSharded = scopedCss->getCollectionDescription(opCtx).isSharded(); + boost::optional optFilter = isSharded + ? boost::optional(scopedCss->getOwnershipFilter( + opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup)) + : boost::none; + return std::pair(isSharded, std::move(optFilter)); + }(); // Because 'numRecords' includes orphan documents, our initial decision to optimize the $sample // cursor may have been mistaken. For sharded collections, build a TRIAL plan that will switch @@ -618,8 +680,8 @@ StatusWith> PipelineD::createRan if (isSharded) { // In the sharded case, we need to use a ShardFilterer within the ARHASH plan to // eliminate orphans from the working set, since the stage owns the cursor. - maybeShardFilter = std::make_unique(scopedCss->getOwnershipFilter( - opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup)); + invariant(optOwnershipFilter); + maybeShardFilter = std::make_unique(*optOwnershipFilter); } auto arhashPlan = std::make_unique( @@ -636,15 +698,14 @@ StatusWith> PipelineD::createRan gTimeseriesBucketMaxCount); std::unique_ptr collScanPlan = std::make_unique( - expCtx.get(), coll, CollectionScanParams{}, ws.get(), nullptr); + expCtx.get(), &coll, CollectionScanParams{}, ws.get(), nullptr); if (isSharded) { // In the sharded case, we need to add a shard-filterer stage to the backup plan to // eliminate orphans. The trial plan is thus SHARDING_FILTER-COLLSCAN. - auto collectionFilter = scopedCss->getOwnershipFilter( - opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup); + invariant(optOwnershipFilter); collScanPlan = std::make_unique( - expCtx.get(), std::move(collectionFilter), ws.get(), std::move(collScanPlan)); + expCtx.get(), *optOwnershipFilter, ws.get(), std::move(collScanPlan)); } auto topkSortPlan = std::make_unique( @@ -683,16 +744,15 @@ StatusWith> PipelineD::createRan // Since the incoming operation is sharded, use the CSS to infer the filtering metadata for // the collection. We get the shard ownership filter after checking to see if the collection // is sharded to avoid an invariant from being fired in this call. - auto collectionFilter = scopedCss->getOwnershipFilter( - opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup); + invariant(optOwnershipFilter); // The trial plan is SHARDING_FILTER-MULTI_ITERATOR. auto randomCursorPlan = std::make_unique( - expCtx.get(), collectionFilter, ws.get(), std::move(root)); + expCtx.get(), *optOwnershipFilter, ws.get(), std::move(root)); // The backup plan is SHARDING_FILTER-COLLSCAN. std::unique_ptr collScanPlan = std::make_unique( - expCtx.get(), coll, CollectionScanParams{}, ws.get(), nullptr); + expCtx.get(), &coll, CollectionScanParams{}, ws.get(), nullptr); collScanPlan = std::make_unique( - expCtx.get(), collectionFilter, ws.get(), std::move(collScanPlan)); + expCtx.get(), *optOwnershipFilter, ws.get(), std::move(collScanPlan)); // Place a TRIAL stage at the root of the plan tree, and pass it the trial and backup plans. root = std::make_unique(expCtx.get(), ws.get(), @@ -707,9 +767,7 @@ StatusWith> PipelineD::createRan std::move(ws), std::move(root), &coll, - opCtx->inMultiDocumentTransaction() - ? PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY - : PlanYieldPolicy::YieldPolicy::YIELD_AUTO, + PlanYieldPolicy::YieldPolicy::YIELD_AUTO, QueryPlannerParams::RETURN_OWNED_DATA); if (!execStatus.isOK()) { return execStatus.getStatus(); @@ -1307,11 +1365,7 @@ PipelineD::buildInnerQueryExecutorGeneric(const MultipleCollectionAccessor& coll // If this is a query on a time-series collection then it may be eligible for a post-planning // sort optimization. We check eligibility and perform the rewrite here. auto [unpack, sort] = findUnpackThenSort(pipeline->_sources); - const bool timeseriesBoundedSortOptimization = - serverGlobalParams.featureCompatibility.isVersionInitialized() && - feature_flags::gFeatureFlagBucketUnpackWithSort.isEnabled( - serverGlobalParams.featureCompatibility) && - unpack && sort; + const bool timeseriesBoundedSortOptimization = unpack && sort; QueryPlannerParams plannerOpts; if (timeseriesBoundedSortOptimization) { plannerOpts.traversalPreference = createTimeSeriesTraversalPreference(unpack, sort); @@ -1557,8 +1611,8 @@ PipelineD::buildInnerQueryExecutorGeoNear(const MultipleCollectionAccessor& coll // $geoNear can only run over the main collection. const auto& collection = collections.getMainCollection(); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "$geoNear requires a geo index to run, but " << nss.ns() - << " does not exist", + str::stream() << "$geoNear requires a geo index to run, but " + << nss.toStringForErrorMsg() << " does not exist", collection); Pipeline::SourceContainer& sources = pipeline->_sources; diff --git a/src/mongo/db/pipeline/pipeline_d.h b/src/mongo/db/pipeline/pipeline_d.h index c9171748b1c54..8b58466af67ac 100644 --- a/src/mongo/db/pipeline/pipeline_d.h +++ b/src/mongo/db/pipeline/pipeline_d.h @@ -29,12 +29,22 @@ #pragma once -#include "mongo/db/exec/bucket_unpacker.h" -#include "mongo/db/query/query_planner_params.h" #include +#include +#include +#include #include +#include +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/timeseries/bucket_unpacker.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/dependencies.h" @@ -42,10 +52,16 @@ #include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" #include "mongo/db/pipeline/document_source_sample.h" -#include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/group_from_first_document_transformation.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/plan_executor.h" #include "mongo/db/query/query_planner.h" +#include "mongo/db/query/query_planner_params.h" +#include "mongo/db/query/sort_pattern.h" namespace mongo { class Collection; @@ -54,6 +70,7 @@ class DocumentSourceCursor; class DocumentSourceMatch; class DocumentSourceSort; class ExpressionContext; + class SkipThenLimit; class OperationContext; class Pipeline; @@ -137,34 +154,6 @@ class PipelineD { */ static BSONObj getPostBatchResumeToken(const Pipeline* pipeline); - /** - * Resolves the collator to either the user-specified collation or, if none was specified, to - * the collection-default collation. - */ - static std::pair, ExpressionContext::CollationMatchesDefault> - resolveCollator(OperationContext* opCtx, - BSONObj userCollation, - const CollectionPtr& collection) { - if (!collection || !collection->getDefaultCollator()) { - return {userCollation.isEmpty() - ? nullptr - : uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext()) - ->makeFromBSON(userCollation)), - ExpressionContext::CollationMatchesDefault::kNoDefault}; - } - if (userCollation.isEmpty()) { - return {collection->getDefaultCollator()->clone(), - ExpressionContext::CollationMatchesDefault::kYes}; - } - auto userCollator = uassertStatusOK( - CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(userCollation)); - return { - std::move(userCollator), - CollatorInterface::collatorsMatch(collection->getDefaultCollator(), userCollator.get()) - ? ExpressionContext::CollationMatchesDefault::kYes - : ExpressionContext::CollationMatchesDefault::kNo}; - } - private: PipelineD(); // does not exist: prevent instantiation @@ -262,9 +251,8 @@ class PipelineD { /* * Takes a leaf plan stage and a sort pattern and returns a pair if they support the Bucket -Unpacking with Sort Optimization. - * The pair includes whether the index order and sort order agree with each other as its first - * member and the order of the index as the second parameter. + * Unpacking with Sort Optimization. The pair includes whether the index order and sort order + * agree with each other as its first member and the order of the index as the second parameter. * * Note that the index scan order is different from the index order. */ diff --git a/src/mongo/db/pipeline/pipeline_metadata_tree.h b/src/mongo/db/pipeline/pipeline_metadata_tree.h index e53baaf600478..001582edb7b06 100644 --- a/src/mongo/db/pipeline/pipeline_metadata_tree.h +++ b/src/mongo/db/pipeline/pipeline_metadata_tree.h @@ -29,19 +29,29 @@ #pragma once -#include +#include #include +#include +#include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include #include +#include +#include #include #include #include #include #include +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_facet.h" #include "mongo/db/pipeline/document_source_lookup.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" /** * A simple representation of an Aggregation Pipeline and functions for building it. diff --git a/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp b/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp index c521c4325972d..85271569645fa 100644 --- a/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp +++ b/src/mongo/db/pipeline/pipeline_metadata_tree_test.cpp @@ -27,9 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include #include #include #include @@ -37,9 +37,16 @@ #include #include +#include +#include + #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/pipeline/document_source_bucket_auto.h" @@ -53,10 +60,13 @@ #include "mongo/db/pipeline/document_source_sort.h" #include "mongo/db/pipeline/document_source_tee_consumer.h" #include "mongo/db/pipeline/document_source_unwind.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/pipeline_metadata_tree.h" -#include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/pipeline_test.cpp b/src/mongo/db/pipeline/pipeline_test.cpp index 9278b81455db9..3993fc164d624 100644 --- a/src/mongo/db/pipeline/pipeline_test.cpp +++ b/src/mongo/db/pipeline/pipeline_test.cpp @@ -27,21 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include +#include #include +#include #include -#include "mongo/db/exec/document_value/document.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream.h" #include "mongo/db/pipeline/document_source_change_stream_add_post_image.h" #include "mongo/db/pipeline/document_source_change_stream_add_pre_image.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" #include "mongo/db/pipeline/document_source_facet.h" #include "mongo/db/pipeline/document_source_graph_lookup.h" #include "mongo/db/pipeline/document_source_internal_split_pipeline.h" @@ -53,27 +68,28 @@ #include "mongo/db/pipeline/document_source_sort.h" #include "mongo/db/pipeline/document_source_test_optimizations.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" #include "mongo/db/pipeline/semantic_analysis.h" #include "mongo/db/pipeline/sharded_agg_helpers.h" -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/query_test_service_context.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { -using boost::intrusive_ptr; -using std::string; -using std::vector; - const NamespaceString kTestNss = NamespaceString::createNamespaceString_forTest("a.collection"); const NamespaceString kAdminCollectionlessNss = NamespaceString::createNamespaceString_forTest("admin.$cmd.aggregate"); @@ -126,13 +142,13 @@ void assertPipelineOptimizesAndSerializesTo(std::string inputPipeJson, const BSONObj serializePipeExpected = pipelineFromJsonArray(serializedPipeJson); ASSERT_EQUALS(inputBson["pipeline"].type(), BSONType::Array); - vector rawPipeline; + std::vector rawPipeline; for (auto&& stageElem : inputBson["pipeline"].Array()) { ASSERT_EQUALS(stageElem.type(), BSONType::Object); rawPipeline.push_back(stageElem.embeddedObject()); } AggregateCommandRequest request(aggNss, rawPipeline); - intrusive_ptr ctx = + boost::intrusive_ptr ctx = new ExpressionContextForTest(opCtx.get(), request); ctx->mongoProcessInterface = std::make_shared(); TempDir tempDir("PipelineTest"); @@ -169,13 +185,13 @@ TEST(PipelineOptimizationTest, LimitDoesNotMoveBeforeProject) { } TEST(PipelineOptimizationTest, SampleLegallyPushedBefore) { - string inputPipe = + std::string inputPipe = "[{$replaceRoot: { newRoot: \"$a\" }}, " "{$project: { b: 1 }}, " "{$addFields: { c: 1 }}, " "{$sample: { size: 4 }}]"; - string outputPipe = + std::string outputPipe = "[{$sample: {size: 4}}, " "{$replaceRoot: {newRoot: \"$a\"}}, " "{$project: {_id: true, b : true}}, " @@ -185,17 +201,17 @@ TEST(PipelineOptimizationTest, SampleLegallyPushedBefore) { } TEST(PipelineOptimizationTest, SampleNotIllegallyPushedBefore) { - string inputPipe = + std::string inputPipe = "[{$project: { a : 1 }}, " "{$match: { a: 1 }}, " "{$sample: { size: 4 }}]"; - string outputPipe = + std::string outputPipe = "[{$match: {a: {$eq: 1}}}, " "{$sample : {size: 4}}, " "{$project: {_id: true, a : true}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {a: 1}}, " "{$sample : {size: 4}}, " "{$project: {_id: true, a : true}}]"; @@ -204,24 +220,24 @@ TEST(PipelineOptimizationTest, SampleNotIllegallyPushedBefore) { } TEST(PipelineOptimizationTest, MoveMatchBeforeAddFieldsIfInvolvedFieldsNotRelated) { - string inputPipe = "[{$addFields : {a : 1}}, {$match : {b : 1}}]"; + std::string inputPipe = "[{$addFields : {a : 1}}, {$match : {b : 1}}]"; - string outputPipe = "[{$match : {b : {$eq : 1}}}, {$addFields : {a : {$const : 1}}}]"; + std::string outputPipe = "[{$match : {b : {$eq : 1}}}, {$addFields : {a : {$const : 1}}}]"; - string serializedPipe = "[{$match: {b : 1}}, {$addFields: {a : {$const : 1}}}]"; + std::string serializedPipe = "[{$match: {b : 1}}, {$addFields: {a : {$const : 1}}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MoveMatchWithExprBeforeAddFieldsIfInvolvedFieldsNotRelated) { - string inputPipe = "[{$addFields : {a : 1}}, {$match : {$expr: {$eq: ['$b', 1]}}}]"; + std::string inputPipe = "[{$addFields : {a : 1}}, {$match : {$expr: {$eq: ['$b', 1]}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$and: [{b: {$_internalExprEq: 1}}," " {$expr: {$eq: ['$b', {$const: 1}]}}]}}," " {$addFields : {a : {$const : 1}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match : {$expr: {$eq: ['$b', 1]}}}," " {$addFields : {a : {$const : 1}}}]"; @@ -229,24 +245,24 @@ TEST(PipelineOptimizationTest, MoveMatchWithExprBeforeAddFieldsIfInvolvedFieldsN } TEST(PipelineOptimizationTest, MatchDoesNotMoveBeforeAddFieldsIfInvolvedFieldsAreRelated) { - string inputPipe = "[{$addFields : {a : 1}}, {$match : {a : 1}}]"; + std::string inputPipe = "[{$addFields : {a : 1}}, {$match : {a : 1}}]"; - string outputPipe = "[{$addFields : {a : {$const : 1}}}, {$match : {a : {$eq : 1}}}]"; + std::string outputPipe = "[{$addFields : {a : {$const : 1}}}, {$match : {a : {$eq : 1}}}]"; - string serializedPipe = "[{$addFields : {a : {$const : 1}}}, {$match: {a : 1}}]"; + std::string serializedPipe = "[{$addFields : {a : {$const : 1}}}, {$match: {a : 1}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchWithExprDoesNotMoveBeforeAddFieldsIfInvolvedFieldsAreRelated) { - string inputPipe = "[{$addFields : {a : 1}}, {$match : {$expr: {$eq: ['$a', 1]}}}]"; + std::string inputPipe = "[{$addFields : {a : 1}}, {$match : {$expr: {$eq: ['$a', 1]}}}]"; - string outputPipe = + std::string outputPipe = "[{$addFields : {a : {$const : 1}}}," " {$match: {$and: [{a: {$_internalExprEq: 1}}," " {$expr: {$eq: ['$a', {$const: 1}]}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$addFields : {a : {$const : 1}}}," " {$match : {$expr: {$eq: ['$a', 1]}}}]"; @@ -254,24 +270,25 @@ TEST(PipelineOptimizationTest, MatchWithExprDoesNotMoveBeforeAddFieldsIfInvolved } TEST(PipelineOptimizationTest, MatchOnTopLevelFieldDoesNotMoveBeforeAddFieldsOfNestedPath) { - string inputPipe = "[{$addFields : {'a.b' : 1}}, {$match : {a : 1}}]"; + std::string inputPipe = "[{$addFields : {'a.b' : 1}}, {$match : {a : 1}}]"; - string outputPipe = "[{$addFields : {a : {b : {$const : 1}}}}, {$match : {a : {$eq : 1}}}]"; + std::string outputPipe = + "[{$addFields : {a : {b : {$const : 1}}}}, {$match : {a : {$eq : 1}}}]"; - string serializedPipe = "[{$addFields: {a: {b: {$const: 1}}}}, {$match: {a: 1}}]"; + std::string serializedPipe = "[{$addFields: {a: {b: {$const: 1}}}}, {$match: {a: 1}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchWithExprOnTopLevelFieldDoesNotMoveBeforeAddFieldsOfNestedPath) { - string inputPipe = "[{$addFields : {'a.b' : 1}}, {$match : {$expr: {$eq: ['$a', 1]}}}]"; + std::string inputPipe = "[{$addFields : {'a.b' : 1}}, {$match : {$expr: {$eq: ['$a', 1]}}}]"; - string outputPipe = + std::string outputPipe = "[{$addFields : {a : {b : {$const : 1}}}}," " {$match: {$and: [{a: {$_internalExprEq: 1}}," " {$expr: {$eq: ['$a', {$const: 1}]}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$addFields: {a: {b: {$const: 1}}}}," " {$match : {$expr: {$eq: ['$a', 1]}}}]"; @@ -279,24 +296,24 @@ TEST(PipelineOptimizationTest, MatchWithExprOnTopLevelFieldDoesNotMoveBeforeAddF } TEST(PipelineOptimizationTest, MatchOnNestedFieldDoesNotMoveBeforeAddFieldsOfPrefixOfPath) { - string inputPipe = "[{$addFields : {a : 1}}, {$match : {'a.b' : 1}}]"; + std::string inputPipe = "[{$addFields : {a : 1}}, {$match : {'a.b' : 1}}]"; - string outputPipe = "[{$addFields : {a : {$const : 1}}}, {$match : {'a.b' : {$eq : 1}}}]"; + std::string outputPipe = "[{$addFields : {a : {$const : 1}}}, {$match : {'a.b' : {$eq : 1}}}]"; - string serializedPipe = "[{$addFields : {a : {$const : 1}}}, {$match : {'a.b' : 1}}]"; + std::string serializedPipe = "[{$addFields : {a : {$const : 1}}}, {$match : {'a.b' : 1}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchWithExprOnNestedFieldDoesNotMoveBeforeAddFieldsOfPrefixOfPath) { - string inputPipe = "[{$addFields : {a : 1}}, {$match : {$expr: {$eq: ['$a.b', 1]}}}]"; + std::string inputPipe = "[{$addFields : {a : 1}}, {$match : {$expr: {$eq: ['$a.b', 1]}}}]"; - string outputPipe = + std::string outputPipe = "[{$addFields : {a : {$const : 1}}}," " {$match: {$and: [{'a.b': {$_internalExprEq: 1}}," " {$expr: {$eq: ['$a.b', {$const: 1}]}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$addFields : {a : {$const : 1}}}," " {$match : {$expr: {$eq: ['$a.b', 1]}}}]"; @@ -304,25 +321,26 @@ TEST(PipelineOptimizationTest, MatchWithExprOnNestedFieldDoesNotMoveBeforeAddFie } TEST(PipelineOptimizationTest, MoveMatchOnNestedFieldBeforeAddFieldsOfDifferentNestedField) { - string inputPipe = "[{$addFields : {'a.b' : 1}}, {$match : {'a.c' : 1}}]"; + std::string inputPipe = "[{$addFields : {'a.b' : 1}}, {$match : {'a.c' : 1}}]"; - string outputPipe = "[{$match : {'a.c' : {$eq : 1}}}, {$addFields : {a : {b : {$const : 1}}}}]"; + std::string outputPipe = + "[{$match : {'a.c' : {$eq : 1}}}, {$addFields : {a : {b : {$const : 1}}}}]"; - string serializedPipe = "[{$match : {'a.c' : 1}}, {$addFields : {a : {b: {$const : 1}}}}]"; + std::string serializedPipe = "[{$match : {'a.c' : 1}}, {$addFields : {a : {b: {$const : 1}}}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MoveMatchWithExprOnNestedFieldBeforeAddFieldsOfDifferentNestedField) { - string inputPipe = "[{$addFields : {'a.b' : 1}}, {$match : {$expr: {$eq: ['$a.c', 1]}}}]"; + std::string inputPipe = "[{$addFields : {'a.b' : 1}}, {$match : {$expr: {$eq: ['$a.c', 1]}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$and: [{'a.c': {$_internalExprEq: 1}}," " {$expr: {$eq: ['$a.c', {$const: 1}]}}]}}," " {$addFields : {a : {b : {$const : 1}}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match : {$expr: {$eq: ['$a.c', 1]}}}," " {$addFields : {a : {b: {$const : 1}}}}]"; @@ -330,25 +348,25 @@ TEST(PipelineOptimizationTest, } TEST(PipelineOptimizationTest, MoveMatchBeforeAddFieldsWhenMatchedFieldIsPrefixOfAddedFieldName) { - string inputPipe = "[{$addFields : {abcd : 1}}, {$match : {abc : 1}}]"; + std::string inputPipe = "[{$addFields : {abcd : 1}}, {$match : {abc : 1}}]"; - string outputPipe = "[{$match : {abc : {$eq : 1}}}, {$addFields : {abcd: {$const: 1}}}]"; + std::string outputPipe = "[{$match : {abc : {$eq : 1}}}, {$addFields : {abcd: {$const: 1}}}]"; - string serializedPipe = "[{$match : {abc : 1}}, {$addFields : {abcd : {$const : 1}}}]"; + std::string serializedPipe = "[{$match : {abc : 1}}, {$addFields : {abcd : {$const : 1}}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MoveMatchWithExprBeforeAddFieldsWhenMatchedFieldIsPrefixOfAddedFieldName) { - string inputPipe = "[{$addFields : {abcd : 1}}, {$match : {$expr: {$eq: ['$abc', 1]}}}]"; + std::string inputPipe = "[{$addFields : {abcd : 1}}, {$match : {$expr: {$eq: ['$abc', 1]}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$and: [{abc: {$_internalExprEq: 1}}," " {$expr: {$eq: ['$abc', {$const: 1}]}}]}}," " {$addFields : {abcd: {$const: 1}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match : {$expr: {$eq: ['$abc', 1]}}}," " {$addFields : {abcd : {$const : 1}}}]"; @@ -498,17 +516,17 @@ TEST(PipelineOptimizationTest, SortDoesNotSwapBeforeUnwindBecauseUnwindPathEqual } TEST(PipelineOptimizationTest, LookupShouldCoalesceWithUnwindOnAsSortDoesNotInterfere) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$same'}}" ",{$sort : {'a.b': 1}}" "]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right', unwinding: {preserveNullAndEmptyArrays: false}}}" ",{$sort : {sortKey: {'a.b': 1}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$same'}}" @@ -995,19 +1013,19 @@ TEST(PipelineOptimizationTest, RemoveEmptyMatch) { } TEST(PipelineOptimizationTest, RemoveMultipleEmptyMatches) { - string inputPipe = "[{$match: {}}, {$match: {}}]"; + std::string inputPipe = "[{$match: {}}, {$match: {}}]"; - string outputPipe = "[{$match: {}}]"; + std::string outputPipe = "[{$match: {}}]"; - string serializedPipe = "[{$match: {$and: [{}, {}]}}]"; + std::string serializedPipe = "[{$match: {$and: [{}, {}]}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, RemoveEmptyMatchesAndKeepNonEmptyMatches) { - string inputPipe = "[{$match: {}}, {$match: {}}, {$match: {a: 1}}]"; - string outputPipe = "[{$match: {a: {$eq: 1}}}]"; - string serializedPipe = "[{$match: {$and: [{}, {}, {a: 1}]}}]"; + std::string inputPipe = "[{$match: {}}, {$match: {}}, {$match: {a: 1}}]"; + std::string outputPipe = "[{$match: {a: {$eq: 1}}}]"; + std::string serializedPipe = "[{$match: {$and: [{}, {}, {a: 1}]}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } @@ -1016,40 +1034,40 @@ TEST(PipelineOptimizationTest, RemoveEmptyMatchesAndKeepOtherStages) { } TEST(PipelineOptimizationTest, KeepEmptyMatchWithComment) { - string inputPipe = "[{$match: {$comment: 'foo'}}]"; - string outputPipe = "[{$match: {}}]"; - string serializedPipe = "[{$match: {$comment: 'foo'}}]"; + std::string inputPipe = "[{$match: {$comment: 'foo'}}]"; + std::string outputPipe = "[{$match: {}}]"; + std::string serializedPipe = "[{$match: {$comment: 'foo'}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, DoNotRemoveNonEmptyMatch) { - string inputPipe = "[{$match: {_id: 1}}]"; + std::string inputPipe = "[{$match: {_id: 1}}]"; - string outputPipe = "[{$match: {_id: {$eq : 1}}}]"; + std::string outputPipe = "[{$match: {_id: {$eq : 1}}}]"; - string serializedPipe = "[{$match: {_id: 1}}]"; + std::string serializedPipe = "[{$match: {_id: 1}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, RemoveMatchWithTrueConstExpr) { - string inputPipe = "[{$match: {$expr: true}}]"; - string outputPipe = "[{$match: {}}]"; - string serializedPipe = "[{$match: {$expr: true}}]"; + std::string inputPipe = "[{$match: {$expr: true}}]"; + std::string outputPipe = "[{$match: {}}]"; + std::string serializedPipe = "[{$match: {$expr: true}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, RemoveMultipleMatchesWithTrueConstExpr) { - string inputPipe = "[{$match: {$expr: true}}, {$match: {$expr: true}}]"; - string outputPipe = "[{$match: {}}]"; - string serializedPipe = "[{$match: {$and: [{$expr: true}, {$expr: true}]}}]"; + std::string inputPipe = "[{$match: {$expr: true}}, {$match: {$expr: true}}]"; + std::string outputPipe = "[{$match: {}}]"; + std::string serializedPipe = "[{$match: {$and: [{$expr: true}, {$expr: true}]}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, RemoveMatchWithTruthyConstExpr) { - string inputPipe = "[{$match: {$expr: {$concat: ['a', 'b']}}}]"; - string outputPipe = "[{$match: {}}]"; - string serializedPipe = "[{$match: {$expr: {$concat: ['a', 'b']}}}]"; + std::string inputPipe = "[{$match: {$expr: {$concat: ['a', 'b']}}}]"; + std::string outputPipe = "[{$match: {}}]"; + std::string serializedPipe = "[{$match: {$expr: {$concat: ['a', 'b']}}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } @@ -1066,17 +1084,17 @@ TEST(PipelineOptimizationTest, MoveMatchBeforeSort) { } TEST(PipelineOptimizationTest, LookupMoveSortNotOnAsBefore) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from : 'lookupColl', as : 'new', localField: 'left', foreignField: " "'right'}}" ",{$sort: {left: 1}}" "]"; - string outputPipe = + std::string outputPipe = "[{$sort: {sortKey: {left: 1}}}" ",{$lookup: {from : 'lookupColl', as : 'new', localField: 'left', foreignField: " "'right'}}" "]"; - string serializedPipe = + std::string serializedPipe = "[{$sort: {left: 1}}" ",{$lookup: {from : 'lookupColl', as : 'new', localField: 'left', foreignField: " "'right'}}" @@ -1085,17 +1103,17 @@ TEST(PipelineOptimizationTest, LookupMoveSortNotOnAsBefore) { } TEST(PipelineOptimizationTest, LookupMoveSortOnPrefixStringOfAsBefore) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from : 'lookupColl', as : 'leftNew', localField: 'left', foreignField: " "'right'}}" ",{$sort: {left: 1}}" "]"; - string outputPipe = + std::string outputPipe = "[{$sort: {sortKey: {left: 1}}}" ",{$lookup: {from : 'lookupColl', as : 'leftNew', localField: 'left', foreignField: " "'right'}}" "]"; - string serializedPipe = + std::string serializedPipe = "[{$sort: {left: 1}}" ",{$lookup: {from : 'lookupColl', as : 'leftNew', localField: 'left', foreignField: " "'right'}}" @@ -1104,17 +1122,17 @@ TEST(PipelineOptimizationTest, LookupMoveSortOnPrefixStringOfAsBefore) { } TEST(PipelineOptimizationTest, LookupShouldNotMoveSortOnAsBefore) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$sort: {same: 1, left: 1}}" "]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$sort: {sortKey: {same: 1, left: 1}}}" "]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$sort: {same: 1, left: 1}}" @@ -1123,17 +1141,17 @@ TEST(PipelineOptimizationTest, LookupShouldNotMoveSortOnAsBefore) { } TEST(PipelineOptimizationTest, LookupShouldNotMoveSortOnPathPrefixOfAsBefore) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from : 'lookupColl', as : 'same.new', localField: 'left', foreignField: " "'right'}}" ",{$sort: {same: 1}}" "]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from : 'lookupColl', as : 'same.new', localField: 'left', foreignField: " "'right'}}" ",{$sort: {sortKey: {same: 1}}}" "]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from : 'lookupColl', as : 'same.new', localField: 'left', foreignField: " "'right'}}" ",{$sort: {same: 1}}" @@ -1142,18 +1160,18 @@ TEST(PipelineOptimizationTest, LookupShouldNotMoveSortOnPathPrefixOfAsBefore) { } TEST(PipelineOptimizationTest, LookupUnwindShouldNotMoveSortBefore) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$same'}}" ",{$sort: {left: 1}}" "]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right', unwinding: {preserveNullAndEmptyArrays: false}}}" ",{$sort: {sortKey: {left: 1}}}" "]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$same'}}" @@ -1173,15 +1191,15 @@ TEST(PipelineOptimizationTest, MoveMatchOnExprBeforeSort) { } TEST(PipelineOptimizationTest, LookupShouldCoalesceWithUnwindOnAs) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$same'}}" "]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right', unwinding: {preserveNullAndEmptyArrays: false}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$same'}}" @@ -1190,14 +1208,14 @@ TEST(PipelineOptimizationTest, LookupShouldCoalesceWithUnwindOnAs) { } TEST(PipelineOptimizationTest, LookupWithPipelineSyntaxShouldCoalesceWithUnwindOnAs) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', let: {}, pipeline: []}}" ",{$unwind: {path: '$same'}}" "]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', let: {}, pipeline: [], " "unwinding: {preserveNullAndEmptyArrays: false}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from : 'lookupColl', as : 'same', let: {}, pipeline: []}}" ",{$unwind: {path: '$same'}}" "]"; @@ -1205,15 +1223,15 @@ TEST(PipelineOptimizationTest, LookupWithPipelineSyntaxShouldCoalesceWithUnwindO } TEST(PipelineOptimizationTest, LookupShouldCoalesceWithUnwindOnAsWithPreserveEmpty) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$same', preserveNullAndEmptyArrays: true}}" "]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right', unwinding: {preserveNullAndEmptyArrays: true}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$same', preserveNullAndEmptyArrays: true}}" @@ -1222,16 +1240,16 @@ TEST(PipelineOptimizationTest, LookupShouldCoalesceWithUnwindOnAsWithPreserveEmp } TEST(PipelineOptimizationTest, LookupShouldCoalesceWithUnwindOnAsWithIncludeArrayIndex) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$same', includeArrayIndex: 'index'}}" "]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right', unwinding: {preserveNullAndEmptyArrays: false, includeArrayIndex: " "'index'}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$same', includeArrayIndex: 'index'}}" @@ -1240,12 +1258,12 @@ TEST(PipelineOptimizationTest, LookupShouldCoalesceWithUnwindOnAsWithIncludeArra } TEST(PipelineOptimizationTest, LookupShouldNotCoalesceWithUnwindNotOnAs) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$from'}}" "]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$from'}}" @@ -1254,11 +1272,11 @@ TEST(PipelineOptimizationTest, LookupShouldNotCoalesceWithUnwindNotOnAs) { } TEST(PipelineOptimizationTest, LookupWithPipelineSyntaxShouldNotCoalesceWithUnwindNotOnAs) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', pipeline: []}}" ",{$unwind: {path: '$from'}}" "]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from : 'lookupColl', as : 'same', let: {}, pipeline: []}}" ",{$unwind: {path: '$from'}}" "]"; @@ -1266,15 +1284,15 @@ TEST(PipelineOptimizationTest, LookupWithPipelineSyntaxShouldNotCoalesceWithUnwi } TEST(PipelineOptimizationTest, LookupShouldSwapWithMatch) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " " {$match: {'independent': 0}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {independent: {$eq : 0}}}, " " {$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {independent: 0}}, " "{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: 'z'}}]"; @@ -1282,15 +1300,15 @@ TEST(PipelineOptimizationTest, LookupShouldSwapWithMatch) { } TEST(PipelineOptimizationTest, LookupShouldSwapWithMatchOnExpr) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " " {$match: {$expr: {$eq: ['$independent', 1]}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$and: [{independent: {$_internalExprEq: 1}}," " {$expr: {$eq: ['$independent', {$const: 1}]}}]}}," " {$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: 'z'}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {$expr: {$eq: ['$independent', 1]}}}, " "{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: 'z'}}]"; @@ -1298,13 +1316,13 @@ TEST(PipelineOptimizationTest, LookupShouldSwapWithMatchOnExpr) { } TEST(PipelineOptimizationTest, LookupWithPipelineSyntaxShouldSwapWithMatch) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', pipeline: []}}, " " {$match: {'independent': 0}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {independent: {$eq : 0}}}, " " {$lookup: {from: 'lookupColl', as: 'asField', let: {}, pipeline: []}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {independent: 0}}, " "{$lookup: {from: 'lookupColl', as: 'asField', let: {}, pipeline: []}}]"; @@ -1312,14 +1330,14 @@ TEST(PipelineOptimizationTest, LookupWithPipelineSyntaxShouldSwapWithMatch) { } TEST(PipelineOptimizationTest, LookupWithPipelineSyntaxShouldSwapWithMatchOnExpr) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', pipeline: []}}, " " {$match: {$expr: {$eq: ['$independent', 1]}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$and: [{independent: {$_internalExprEq: 1}}," " {$expr: {$eq: ['$independent', {$const: 1}]}}]}}," " {$lookup: {from: 'lookupColl', as: 'asField', let: {}, pipeline: []}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {$expr: {$eq: ['$independent', 1]}}}, " "{$lookup: {from: 'lookupColl', as: 'asField', let: {}, pipeline: []}}]"; @@ -1327,11 +1345,11 @@ TEST(PipelineOptimizationTest, LookupWithPipelineSyntaxShouldSwapWithMatchOnExpr } TEST(PipelineOptimizationTest, LookupShouldSplitMatch) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " " {$match: {'independent': 0, asField: {$eq: 3}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {independent: {$eq: 0}}}, " " {$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " @@ -1340,15 +1358,15 @@ TEST(PipelineOptimizationTest, LookupShouldSplitMatch) { } TEST(PipelineOptimizationTest, LookupShouldNotAbsorbMatchOnAs) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " " {$match: {'asField.subfield': 0}}]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " " {$match: {'asField.subfield': {$eq : 0}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " " {$match: {'asField.subfield': 0}}]"; @@ -1357,14 +1375,14 @@ TEST(PipelineOptimizationTest, LookupShouldNotAbsorbMatchOnAs) { } TEST(PipelineOptimizationTest, LookupShouldNotAbsorbMatchWithExprOnAs) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: 'z'}}," " {$match: {$expr: {$eq: ['$asField.subfield', 0]}}}]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: 'z'}}," "{$match: {$and: [{'asField.subfield': {$_internalExprEq: 0}}," " {$expr: {$eq: ['$asField.subfield', {$const: 0}]}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: 'z'}}," " {$match: {$expr: {$eq: ['$asField.subfield', 0]}}}]"; @@ -1372,16 +1390,16 @@ TEST(PipelineOptimizationTest, LookupShouldNotAbsorbMatchWithExprOnAs) { } TEST(PipelineOptimizationTest, LookupShouldAbsorbUnwindMatch) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " "{$unwind: '$asField'}, " "{$match: {'asField.subfield': {$eq: 1}}}]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: 'z', " " let: {}, pipeline: [{$match: {subfield: {$eq: 1}}}]," " unwinding: {preserveNullAndEmptyArrays: false}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z', let: {}, pipeline: [{$match: {subfield: {$eq: 1}}}]}}," "{$unwind: {path: '$asField'}}]"; @@ -1389,16 +1407,16 @@ TEST(PipelineOptimizationTest, LookupShouldAbsorbUnwindMatch) { } TEST(PipelineOptimizationTest, LookupShouldAbsorbUnwindAndTypeMatch) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " "{$unwind: '$asField'}, " "{$match: {'asField.subfield': {$type: [2]}}}]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: 'z', " " let: {}, pipeline: [{$match: {subfield: {$type: [2]}}}]," " unwinding: {preserveNullAndEmptyArrays: false}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z', let: {}, pipeline: [{$match: {subfield: {$type: [2]}}}]}}," "{$unwind: {path: '$asField'}}]"; @@ -1406,15 +1424,15 @@ TEST(PipelineOptimizationTest, LookupShouldAbsorbUnwindAndTypeMatch) { } TEST(PipelineOptimizationTest, LookupWithPipelineSyntaxShouldAbsorbUnwindMatch) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', pipeline: []}}, " "{$unwind: '$asField'}, " "{$match: {'asField.subfield': {$eq: 1}}}]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', let: {}, " "pipeline: [{$match: {subfield: {$eq: 1}}}], " "unwinding: {preserveNullAndEmptyArrays: false} } } ]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', let: {}, " "pipeline: [{$match: {subfield: {$eq: 1}}}]}}, " "{$unwind: {path: '$asField'}}]"; @@ -1422,12 +1440,12 @@ TEST(PipelineOptimizationTest, LookupWithPipelineSyntaxShouldAbsorbUnwindMatch) } TEST(PipelineOptimizationTest, LookupShouldAbsorbUnwindAndSplitAndAbsorbMatch) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " " {$unwind: '$asField'}, " " {$match: {'asField.subfield': {$eq: 1}, independentField: {$gt: 2}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {independentField: {$gt: 2}}}, " " {$lookup: { " " from: 'lookupColl', " @@ -1440,7 +1458,7 @@ TEST(PipelineOptimizationTest, LookupShouldAbsorbUnwindAndSplitAndAbsorbMatch) { " preserveNullAndEmptyArrays: false" " } " " }}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {independentField: {$gt: 2}}}, " " {$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z', let: {}, pipeline: [{$match: {subfield: {$eq: 1}}}]}}, " @@ -1452,18 +1470,18 @@ TEST(PipelineOptimizationTest, LookupShouldNotSplitIndependentAndDependentOrClau // If any child of the $or is dependent on the 'asField', then the $match cannot be moved above // the $lookup, and if any child of the $or is independent of the 'asField', then the $match // cannot be absorbed by the $lookup. - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " " {$unwind: '$asField'}, " " {$match: {$or: [{'independent': {$gt: 4}}, " " {'asField.dependent': {$elemMatch: {a: {$eq: 1}}}}]}}]"; - string outputPipe = + std::string outputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: 'z', " " unwinding: {preserveNullAndEmptyArrays: false}}}, " " {$match: {$or: [{'independent': {$gt: 4}}, " " {'asField.dependent': {$elemMatch: {a: {$eq: 1}}}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " " {$unwind: {path: '$asField'}}, " @@ -1473,12 +1491,12 @@ TEST(PipelineOptimizationTest, LookupShouldNotSplitIndependentAndDependentOrClau } TEST(PipelineOptimizationTest, LookupWithMatchOnArrayIndexFieldShouldNotCoalesce) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " " {$unwind: {path: '$asField', includeArrayIndex: 'index'}}, " " {$match: {index: 0, 'asField.value': {$gt: 0}, independent: 1}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {independent: {$eq: 1}}}, " " {$lookup: { " " from: 'lookupColl', " @@ -1491,7 +1509,7 @@ TEST(PipelineOptimizationTest, LookupWithMatchOnArrayIndexFieldShouldNotCoalesce " } " " }}, " " {$match: {$and: [{index: {$eq: 0}}, {'asField.value': {$gt: 0}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {independent: {$eq: 1}}}, " " {$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " @@ -1501,12 +1519,12 @@ TEST(PipelineOptimizationTest, LookupWithMatchOnArrayIndexFieldShouldNotCoalesce } TEST(PipelineOptimizationTest, LookupWithUnwindPreservingNullAndEmptyArraysShouldNotCoalesce) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " " {$unwind: {path: '$asField', preserveNullAndEmptyArrays: true}}, " " {$match: {'asField.value': {$gt: 0}, independent: 1}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {independent: {$eq: 1}}}, " " {$lookup: { " " from: 'lookupColl', " @@ -1518,7 +1536,7 @@ TEST(PipelineOptimizationTest, LookupWithUnwindPreservingNullAndEmptyArraysShoul " } " " }}, " " {$match: {'asField.value': {$gt: 0}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {independent: {$eq: 1}}}, " " {$lookup: {from: 'lookupColl', as: 'asField', localField: 'y', foreignField: " "'z'}}, " @@ -1528,11 +1546,11 @@ TEST(PipelineOptimizationTest, LookupWithUnwindPreservingNullAndEmptyArraysShoul } TEST(PipelineOptimizationTest, LookupDoesNotAbsorbElemMatch) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'x', localField: 'y', foreignField: 'z'}}, " " {$unwind: '$x'}, " " {$match: {x: {$elemMatch: {a: 1}}}}]"; - string outputPipe = + std::string outputPipe = "[{$lookup: { " " from: 'lookupColl', " " as: 'x', " @@ -1544,7 +1562,7 @@ TEST(PipelineOptimizationTest, LookupDoesNotAbsorbElemMatch) { " } " " }, " " {$match: {x: {$elemMatch: {a: {$eq: 1}}}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$lookup: {from: 'lookupColl', as: 'x', localField: 'y', foreignField: 'z'}}, " " {$unwind: {path: '$x'}}, " " {$match: {x: {$elemMatch: {a: 1}}}}]"; @@ -1552,31 +1570,31 @@ TEST(PipelineOptimizationTest, LookupDoesNotAbsorbElemMatch) { } TEST(PipelineOptimizationTest, LookupDoesSwapWithMatchOnLocalField) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'x', localField: 'y', foreignField: 'z'}}, " " {$match: {y: {$eq: 3}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {y: {$eq: 3}}}, " " {$lookup: {from: 'lookupColl', as: 'x', localField: 'y', foreignField: 'z'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); } TEST(PipelineOptimizationTest, LookupDoesSwapWithMatchOnFieldWithSameNameAsForeignField) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'x', localField: 'y', foreignField: 'z'}}, " " {$match: {z: {$eq: 3}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {z: {$eq: 3}}}, " " {$lookup: {from: 'lookupColl', as: 'x', localField: 'y', foreignField: 'z'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); } TEST(PipelineOptimizationTest, LookupDoesNotAbsorbUnwindOnSubfieldOfAsButStillMovesMatch) { - string inputPipe = + std::string inputPipe = "[{$lookup: {from: 'lookupColl', as: 'x', localField: 'y', foreignField: 'z'}}, " " {$unwind: {path: '$x.subfield'}}, " " {$match: {'independent': 2, 'x.dependent': 2}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {'independent': {$eq: 2}}}, " " {$lookup: {from: 'lookupColl', as: 'x', localField: 'y', foreignField: 'z'}}, " " {$match: {'x.dependent': {$eq: 2}}}, " @@ -1585,13 +1603,13 @@ TEST(PipelineOptimizationTest, LookupDoesNotAbsorbUnwindOnSubfieldOfAsButStillMo } TEST(PipelineOptimizationTest, GroupShouldSwapWithMatchIfFilteringOnID) { - string inputPipe = + std::string inputPipe = "[{$group : {_id:'$a'}}, " " {$match: {_id : 4}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {a:{$eq : 4}}}, " " {$group:{_id:'$a'}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {a:{$eq :4}}}, " " {$group:{_id:'$a'}}]"; @@ -1599,13 +1617,13 @@ TEST(PipelineOptimizationTest, GroupShouldSwapWithMatchIfFilteringOnID) { } TEST(PipelineOptimizationTest, GroupShouldSwapWithMatchOnExprIfFilteringOnID) { - string inputPipe = + std::string inputPipe = "[{$group: {_id: '$a'}}, " " {$match: {$expr: {$eq: ['$_id', 4]}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$and: [{a: {$_internalExprEq: 4}}, {$expr: {$eq: ['$a', {$const: 4}]}}]}}," " {$group: {_id: '$a'}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {$expr: {$eq: ['$a', {$const: 4}]}}}, " " {$group: {_id: '$a'}}]"; @@ -1613,13 +1631,13 @@ TEST(PipelineOptimizationTest, GroupShouldSwapWithMatchOnExprIfFilteringOnID) { } TEST(PipelineOptimizationTest, GroupShouldNotSwapWithMatchOnExprIfNotFilteringOnID) { - string inputPipe = + std::string inputPipe = "[{$group : {_id:'$a'}}, " " {$match: {$expr: {$eq: ['$b', 4]}}}]"; - string outputPipe = + std::string outputPipe = "[{$group : {_id:'$a'}}, " " {$match: {$and: [{b: {$_internalExprEq: 4}}, {$expr: {$eq: ['$b', {$const: 4}]}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$group : {_id:'$a'}}, " " {$match: {$expr: {$eq: ['$b', 4]}}}]"; @@ -1627,13 +1645,13 @@ TEST(PipelineOptimizationTest, GroupShouldNotSwapWithMatchOnExprIfNotFilteringOn } TEST(PipelineOptimizationTest, GroupShouldNotSwapWithMatchIfNotFilteringOnID) { - string inputPipe = + std::string inputPipe = "[{$group : {_id:'$a'}}, " " {$match: {b : 4}}]"; - string outputPipe = + std::string outputPipe = "[{$group : {_id:'$a'}}, " " {$match: {b : {$eq: 4}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$group : {_id:'$a'}}, " " {$match: {b : 4}}]"; @@ -1641,13 +1659,13 @@ TEST(PipelineOptimizationTest, GroupShouldNotSwapWithMatchIfNotFilteringOnID) { } TEST(PipelineOptimizationTest, GroupShouldNotSwapWithMatchIfExistsPredicateOnID) { - string inputPipe = + std::string inputPipe = "[{$group : {_id:'$x'}}, " " {$match: {_id : {$exists: true}}}]"; - string outputPipe = + std::string outputPipe = "[{$group : {_id:'$x'}}, " " {$match: {_id : {$exists: true}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$group : {_id:'$x'}}, " " {$match: {_id : {$exists: true}}}]"; @@ -1655,13 +1673,13 @@ TEST(PipelineOptimizationTest, GroupShouldNotSwapWithMatchIfExistsPredicateOnID) } TEST(PipelineOptimizationTest, GroupShouldNotSwapWithCompoundMatchIfExistsPredicateOnID) { - string inputPipe = + std::string inputPipe = "[{$group : {_id:'$x'}}, " " {$match: {$or : [ {_id : {$exists: true}}, {_id : {$gt : 70}}]}}]"; - string outputPipe = + std::string outputPipe = "[{$group : {_id:'$x'}}, " " {$match: {$or : [ {_id : {$exists: true}}, {_id : {$gt : 70}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$group : {_id:'$x'}}, " " {$match: {$or : [ {_id : {$exists: true}}, {_id : {$gt : 70}}]}}]"; @@ -1669,13 +1687,13 @@ TEST(PipelineOptimizationTest, GroupShouldNotSwapWithCompoundMatchIfExistsPredic } TEST(PipelineOptimizationTest, GroupShouldSwapWithCompoundMatchIfFilteringOnID) { - string inputPipe = + std::string inputPipe = "[{$group : {_id:'$x'}}, " " {$match: {$or : [ {_id : {$lte : 50}}, {_id : {$gt : 70}}]}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$or : [ {x : {$lte : 50}}, {x : {$gt : 70}}]}}," "{$group : {_id:'$x'}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {$or : [ {x : {$lte : 50}}, {x : {$gt : 70}}]}}," "{$group : {_id:'$x'}}]"; @@ -1683,54 +1701,55 @@ TEST(PipelineOptimizationTest, GroupShouldSwapWithCompoundMatchIfFilteringOnID) } TEST(PipelineOptimizationTest, MatchShouldDuplicateItselfBeforeRedact) { - string inputPipe = "[{$redact: '$$PRUNE'}, {$match: {a: 1, b:12}}]"; - string outputPipe = + std::string inputPipe = "[{$redact: '$$PRUNE'}, {$match: {a: 1, b:12}}]"; + std::string outputPipe = "[{$match: {$and: [{a: {$eq: 1}}, {b: {$eq: 12}}]}}, {$redact: '$$PRUNE'}, " "{$match: {$and: [{a: {$eq: 1}}, {b: {$eq: 12}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {a: 1, b: 12}}, {$redact: '$$PRUNE'}, {$match: {a: 1, b: 12}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchShouldSwapWithUnwind) { - string inputPipe = + std::string inputPipe = "[{$unwind: '$a.b.c'}, " "{$match: {'b': 1}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {'b': {$eq : 1}}}, " "{$unwind: {path: '$a.b.c'}}]"; - string serializedPipe = "[{$match: {b: 1}}, {$unwind: {path: '$a.b.c'}}]"; + std::string serializedPipe = "[{$match: {b: 1}}, {$unwind: {path: '$a.b.c'}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchOnExprShouldSwapWithUnwind) { - string inputPipe = + std::string inputPipe = "[{$unwind: '$a.b.c'}, " "{$match: {$expr: {$eq: ['$b', 1]}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$and: [{b: {$_internalExprEq: 1}}, {$expr: {$eq: ['$b', {$const: 1}]}}]}}, " "{$unwind: {path: '$a.b.c'}}]"; - string serializedPipe = "[{$match: {$expr: {$eq: ['$b', 1]}}}, {$unwind: {path: '$a.b.c'}}]"; + std::string serializedPipe = + "[{$match: {$expr: {$eq: ['$b', 1]}}}, {$unwind: {path: '$a.b.c'}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchOnPrefixShouldNotSwapOnUnwind) { - string inputPipe = + std::string inputPipe = "[{$unwind: {path: '$a.b.c'}}, " "{$match: {'a.b': 1}}]"; - string outputPipe = + std::string outputPipe = "[{$unwind: {path: '$a.b.c'}}, " "{$match: {'a.b': {$eq : 1}}}]"; - string serializedPipe = "[{$unwind: {path: '$a.b.c'}}, {$match: {'a.b': 1}}]"; + std::string serializedPipe = "[{$unwind: {path: '$a.b.c'}}, {$match: {'a.b': 1}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchShouldSplitOnUnwind) { - string inputPipe = + std::string inputPipe = "[{$unwind: '$a.b'}, " "{$match: {$and: [{f: {$eq: 5}}, " " {$nor: [{'a.d': 1, c: 5}, {'a.b': 3, c: 5}]}]}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$and: [{f: {$eq: 5}}," " {$nor: [{$and: [{'a.d': {$eq: 1}}, {c: {$eq: 5}}]}]}]}}," "{$unwind: {path: '$a.b'}}, " @@ -1741,13 +1760,13 @@ TEST(PipelineOptimizationTest, MatchShouldSplitOnUnwind) { // The 'a.b' path is a modified one by $unwind and $elemMatch is dependent on it and so we can't // swap $elemMatch in this case. TEST(PipelineOptimizationTest, MatchShouldNotOptimizeWithElemMatchOnModifiedPathByUnwind) { - string inputPipe = + std::string inputPipe = "[{$unwind: {path: '$a.b'}}, " "{$match: {a: {$elemMatch: {b: {d: 1}}}}}]"; - string outputPipe = + std::string outputPipe = "[{$unwind: {path: '$a.b'}}, " "{$match: {a: {$elemMatch: {b: {$eq : {d: 1}}}}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$unwind : {path : '$a.b'}}, {$match : {a : {$elemMatch : {b : {d : 1}}}}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } @@ -1755,13 +1774,13 @@ TEST(PipelineOptimizationTest, MatchShouldNotOptimizeWithElemMatchOnModifiedPath // The 'a.b' path is a modified one by $project and $elemMatch is dependent on it and so we can't // swap $elemMatch in this case. TEST(PipelineOptimizationTest, MatchShouldNotOptimizeWithElemMatchOnModifiedPathByProject1) { - string inputPipe = + std::string inputPipe = "[{$project: {x: '$a.b', _id: false}}, " "{$match: {x: {$elemMatch: {d: 1}}}}]"; - string outputPipe = + std::string outputPipe = "[{$project: {x: '$a.b', _id: false}}, " "{$match: {x: {$elemMatch: {d: {$eq: 1}}}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$project: {x: '$a.b', _id: false}}, " "{$match: {x: {$elemMatch: {d: 1}}}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); @@ -1770,13 +1789,13 @@ TEST(PipelineOptimizationTest, MatchShouldNotOptimizeWithElemMatchOnModifiedPath // The 'a.b' path is a modified one by $project and $elemMatch is dependent on it and so we can't // swap $elemMatch in this case. TEST(PipelineOptimizationTest, MatchShouldNotOptimizeWithElemMatchOnModifiedPathByProject2) { - string inputPipe = + std::string inputPipe = "[{$project: {x: {y: '$a.b'}, _id: false}}, " "{$match: {'x.y': {$elemMatch: {d: 1}}}}]"; - string outputPipe = + std::string outputPipe = "[{$project: {x: {y: '$a.b'}, _id: false}}, " "{$match: {'x.y': {$elemMatch: {d: {$eq: 1}}}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$project: {x: {y: '$a.b'}, _id: false}}, " "{$match: {'x.y': {$elemMatch: {d: 1}}}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); @@ -1785,23 +1804,23 @@ TEST(PipelineOptimizationTest, MatchShouldNotOptimizeWithElemMatchOnModifiedPath // The 'a.b' path is a modified one by $project and $elemMatch is dependent on it and so we can't // swap $elemMatch in this case. TEST(PipelineOptimizationTest, MatchShouldNotOptimizeWithElemMatchOnModifiedPathByProject3) { - string inputPipe = + std::string inputPipe = "[{$project: {x: {y: {z: '$a.b'}}, _id: false}}, " "{$match: {'x.y.z': {$elemMatch: {d: 1}}}}]"; - string outputPipe = + std::string outputPipe = "[{$project: {x: {y: {z: '$a.b'}}, _id: false}}, " "{$match: {'x.y.z': {$elemMatch: {d: {$eq: 1}}}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$project: {x: {y: {z: '$a.b'}}, _id: false}}, " "{$match: {'x.y.z': {$elemMatch: {d: 1}}}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchShouldNotOptimizeWhenMatchingOnIndexField) { - string inputPipe = + std::string inputPipe = "[{$unwind: {path: '$a', includeArrayIndex: 'foo'}}, " " {$match: {foo: 0, b: 1}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {b: {$eq: 1}}}, " " {$unwind: {path: '$a', includeArrayIndex: 'foo'}}, " " {$match: {foo: {$eq: 0}}}]"; @@ -1809,15 +1828,15 @@ TEST(PipelineOptimizationTest, MatchShouldNotOptimizeWhenMatchingOnIndexField) { } TEST(PipelineOptimizationTest, MatchWithNorOnlySplitsIndependentChildren) { - string inputPipe = + std::string inputPipe = "[{$unwind: {path: '$a'}}, " "{$match: {$nor: [{$and: [{a: {$eq: 1}}, {b: {$eq: 1}}]}, {b: {$eq: 2}} ]}}]"; - string outputPipe = + std::string outputPipe = R"( [{$match: {b: {$not: {$eq: 2}}}}, {$unwind: {path: '$a'}}, {$match: {$nor: [{$and: [{a: {$eq: 1}}, {b: {$eq: 1}}]}]}}])"; - string serializedPipe = R"( + std::string serializedPipe = R"( [{$match: {$nor: [{b: {$eq: 2}}]}}, {$unwind: {path: '$a'}}, {$match: {$nor: [{$and: [{a: {$eq: 1}}, {b: {$eq: 1}}]}]}}])"; @@ -1825,36 +1844,36 @@ TEST(PipelineOptimizationTest, MatchWithNorOnlySplitsIndependentChildren) { } TEST(PipelineOptimizationTest, MatchWithOrDoesNotSplit) { - string inputPipe = + std::string inputPipe = "[{$unwind: {path: '$a'}}, " "{$match: {$or: [{a: {$eq: 'dependent'}}, {b: {$eq: 'independent'}}]}}]"; - string outputPipe = + std::string outputPipe = "[{$unwind: {path: '$a'}}, " "{$match: {$or: [{a: {$eq: 'dependent'}}, {b: {$eq: 'independent'}}]}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); } TEST(PipelineOptimizationTest, MatchOnExprWithOrDoesNotSplit) { - string inputPipe = + std::string inputPipe = "[{$unwind: {path: '$a'}}, " " {$match: {$or: [{$expr: {$eq: ['$a', 'dependent']}}, {b: {$eq: 'independent'}}]}}]"; - string outputPipe = + std::string outputPipe = "[{$unwind: {path: '$a'}}, " " {$match: {$or: [{$and: [{a: {$_internalExprEq: 'dependent'}}," " {$expr: {$eq: ['$a', {$const: 'dependent'}]}}]}," " {b: {$eq: 'independent'}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$unwind: {path: '$a'}}, " " {$match: {$or: [{$expr: {$eq: ['$a', 'dependent']}}, {b: {$eq: 'independent'}}]}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, UnwindBeforeDoubleMatchShouldRepeatedlyOptimize) { - string inputPipe = + std::string inputPipe = "[{$unwind: '$a'}, " "{$match: {b: {$gt: 0}}}, " "{$match: {a: 1, c: 1}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$and: [{b: {$gt: 0}}, {c: {$eq: 1}}]}}," "{$unwind: {path: '$a'}}, " "{$match: {a: {$eq: 1}}}]"; @@ -1862,17 +1881,17 @@ TEST(PipelineOptimizationTest, UnwindBeforeDoubleMatchShouldRepeatedlyOptimize) } TEST(PipelineOptimizationTest, GraphLookupShouldCoalesceWithUnwindOnAs) { - string inputPipe = + std::string inputPipe = "[{$graphLookup: {from: 'lookupColl', as: 'out', connectToField: 'b', " " connectFromField: 'c', startWith: '$d'}}, " " {$unwind: '$out'}]"; - string outputPipe = + std::string outputPipe = "[{$graphLookup: {from: 'lookupColl', as: 'out', connectToField: 'b', " " connectFromField: 'c', startWith: '$d', " " unwinding: {preserveNullAndEmptyArrays: false}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$graphLookup: {from: 'lookupColl', as: 'out', connectToField: 'b', " " connectFromField: 'c', startWith: '$d'}}, " " {$unwind: {path: '$out'}}]"; @@ -1880,17 +1899,17 @@ TEST(PipelineOptimizationTest, GraphLookupShouldCoalesceWithUnwindOnAs) { } TEST(PipelineOptimizationTest, GraphLookupShouldCoalesceWithUnwindOnAsWithPreserveEmpty) { - string inputPipe = + std::string inputPipe = "[{$graphLookup: {from: 'lookupColl', as: 'out', connectToField: 'b', " " connectFromField: 'c', startWith: '$d'}}, " " {$unwind: {path: '$out', preserveNullAndEmptyArrays: true}}]"; - string outputPipe = + std::string outputPipe = "[{$graphLookup: {from: 'lookupColl', as: 'out', connectToField: 'b', " " connectFromField: 'c', startWith: '$d', " " unwinding: {preserveNullAndEmptyArrays: true}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$graphLookup: {from: 'lookupColl', as: 'out', connectToField: 'b', " " connectFromField: 'c', startWith: '$d'}}, " " {$unwind: {path: '$out', preserveNullAndEmptyArrays: true}}]"; @@ -1898,18 +1917,18 @@ TEST(PipelineOptimizationTest, GraphLookupShouldCoalesceWithUnwindOnAsWithPreser } TEST(PipelineOptimizationTest, GraphLookupShouldCoalesceWithUnwindOnAsWithIncludeArrayIndex) { - string inputPipe = + std::string inputPipe = "[{$graphLookup: {from: 'lookupColl', as: 'out', connectToField: 'b', " " connectFromField: 'c', startWith: '$d'}}, " " {$unwind: {path: '$out', includeArrayIndex: 'index'}}]"; - string outputPipe = + std::string outputPipe = "[{$graphLookup: {from: 'lookupColl', as: 'out', connectToField: 'b', " " connectFromField: 'c', startWith: '$d', " " unwinding: {preserveNullAndEmptyArrays: false, " " includeArrayIndex: 'index'}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$graphLookup: {from: 'lookupColl', as: 'out', connectToField: 'b', " " connectFromField: 'c', " " startWith: '$d'}}, " @@ -1918,12 +1937,12 @@ TEST(PipelineOptimizationTest, GraphLookupShouldCoalesceWithUnwindOnAsWithInclud } TEST(PipelineOptimizationTest, GraphLookupShouldNotCoalesceWithUnwindNotOnAs) { - string inputPipe = + std::string inputPipe = "[{$graphLookup: {from: 'lookupColl', as: 'out', connectToField: 'b', " " connectFromField: 'c', startWith: '$d'}}, " " {$unwind: '$nottherightthing'}]"; - string outputPipe = + std::string outputPipe = "[{$graphLookup: {from: 'lookupColl', as: 'out', connectToField: 'b', " " connectFromField: 'c', startWith: '$d'}}, " " {$unwind: {path: '$nottherightthing'}}]"; @@ -1931,7 +1950,7 @@ TEST(PipelineOptimizationTest, GraphLookupShouldNotCoalesceWithUnwindNotOnAs) { } TEST(PipelineOptimizationTest, GraphLookupShouldSwapWithMatch) { - string inputPipe = + std::string inputPipe = "[{$graphLookup: {" " from: 'lookupColl'," " as: 'results'," @@ -1941,7 +1960,7 @@ TEST(PipelineOptimizationTest, GraphLookupShouldSwapWithMatch) { " }}," " {$match: {independent: 'x'}}" "]"; - string outputPipe = + std::string outputPipe = "[{$match: {independent: {$eq : 'x'}}}," " {$graphLookup: {" " from: 'lookupColl'," @@ -1950,7 +1969,7 @@ TEST(PipelineOptimizationTest, GraphLookupShouldSwapWithMatch) { " connectFromField: 'from'," " startWith: '$startVal'" " }}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {independent: 'x'}}, " " {$graphLookup: {" " from: 'lookupColl'," @@ -1963,7 +1982,7 @@ TEST(PipelineOptimizationTest, GraphLookupShouldSwapWithMatch) { } TEST(PipelineOptimizationTest, GraphLookupShouldSwapWithSortNotOnAs) { - string inputPipe = + std::string inputPipe = "[" " {$graphLookup: {" " from: 'lookupColl'," @@ -1974,7 +1993,7 @@ TEST(PipelineOptimizationTest, GraphLookupShouldSwapWithSortNotOnAs) { " }}," " {$sort: {from: 1}}" "]"; - string outputPipe = + std::string outputPipe = "[" " {$sort: {sortKey: {from: 1}}}," " {$graphLookup: {" @@ -1985,7 +2004,7 @@ TEST(PipelineOptimizationTest, GraphLookupShouldSwapWithSortNotOnAs) { " startWith: '$start'" " }}" "]"; - string serializedPipe = + std::string serializedPipe = "[" " {$sort: {from: 1}}," " {$graphLookup: {" @@ -2000,7 +2019,7 @@ TEST(PipelineOptimizationTest, GraphLookupShouldSwapWithSortNotOnAs) { } TEST(PipelineOptimizationTest, GraphLookupWithInternalUnwindShouldNotSwapWithSortNotOnAs) { - string inputPipe = + std::string inputPipe = "[" " {$graphLookup: {" " from: 'lookupColl'," @@ -2012,7 +2031,7 @@ TEST(PipelineOptimizationTest, GraphLookupWithInternalUnwindShouldNotSwapWithSor " {$unwind: {path: '$out', includeArrayIndex: 'index'}}," " {$sort: {from: 1}}" "]"; - string outputPipe = + std::string outputPipe = "[" " {$graphLookup: {" " from: 'lookupColl'," @@ -2028,7 +2047,7 @@ TEST(PipelineOptimizationTest, GraphLookupWithInternalUnwindShouldNotSwapWithSor } TEST(PipelineOptimizationTest, GraphLookupShouldNotSwapWithSortOnAs) { - string inputPipe = + std::string inputPipe = "[" " {$graphLookup: {" " from: 'lookupColl'," @@ -2039,7 +2058,7 @@ TEST(PipelineOptimizationTest, GraphLookupShouldNotSwapWithSortOnAs) { " }}," " {$sort: {out: 1}}" "]"; - string outputPipe = + std::string outputPipe = "[" " {$graphLookup: {" " from: 'lookupColl'," @@ -2054,10 +2073,10 @@ TEST(PipelineOptimizationTest, GraphLookupShouldNotSwapWithSortOnAs) { } TEST(PipelineOptimizationTest, ExclusionProjectShouldSwapWithIndependentMatch) { - string inputPipe = "[{$project: {redacted: 0}}, {$match: {unrelated: 4}}]"; - string outputPipe = + std::string inputPipe = "[{$project: {redacted: 0}}, {$match: {unrelated: 4}}]"; + std::string outputPipe = "[{$match: {unrelated: {$eq : 4}}}, {$project: {redacted: false, _id: true}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match : {unrelated : 4}}, {$project : {redacted : false, _id: true}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } @@ -2070,10 +2089,10 @@ TEST(PipelineOptimizationTest, ExclusionProjectShouldNotSwapWithMatchOnExcludedF } TEST(PipelineOptimizationTest, MatchShouldSplitIfPartIsIndependentOfExclusionProjection) { - string inputPipe = + std::string inputPipe = "[{$project: {redacted: 0}}," " {$match: {redacted: 'x', unrelated: 4}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {unrelated: {$eq: 4}}}," " {$project: {redacted: false, _id: true}}," " {$match: {redacted: {$eq: 'x'}}}]"; @@ -2081,17 +2100,17 @@ TEST(PipelineOptimizationTest, MatchShouldSplitIfPartIsIndependentOfExclusionPro } TEST(PipelineOptimizationTest, MatchOnExprShouldSplitIfPartIsIndependentOfExclusionProjection) { - string inputPipe = + std::string inputPipe = "[{$project: {redacted: 0}}," " {$match: {$and: [{$expr: {$eq: ['$redacted', 'x']}}," " {$expr: {$eq: ['$unrelated', 4]}}]}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$and: [{unrelated: {$_internalExprEq: 4}}," " {$expr: {$eq: ['$unrelated', {$const: 4}]}}]}}," " {$project: {redacted: false, _id: true}}," " {$match: {$and: [{redacted: {$_internalExprEq: 'x'}}," " {$expr: {$eq: ['$redacted', {$const: 'x'}]}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {$expr: {$eq: ['$unrelated', {$const: 4}]}}}," " {$project: {redacted: false, _id: true}}," " {$match: {$expr: {$eq: ['$redacted', {$const: 'x'}]}}}]"; @@ -2099,32 +2118,32 @@ TEST(PipelineOptimizationTest, MatchOnExprShouldSplitIfPartIsIndependentOfExclus } TEST(PipelineOptimizationTest, InclusionProjectShouldSwapWithIndependentMatch) { - string inputPipe = "[{$project: {included: 1}}, {$match: {included: 4}}]"; - string outputPipe = + std::string inputPipe = "[{$project: {included: 1}}, {$match: {included: 4}}]"; + std::string outputPipe = "[{$match: {included: {$eq : 4}}}, {$project: {_id: true, included: true}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match : {included : 4}}, {$project : {_id: true, included : true}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, InclusionProjectShouldNotSwapWithMatchOnFieldsNotIncluded) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, included: true, subdoc: {included: true}}}," " {$match: {notIncluded: 'x', unrelated: 4}}]"; - string outputPipe = + std::string outputPipe = "[{$project: {_id: true, included: true, subdoc: {included: true}}}," " {$match: {$and: [{notIncluded: {$eq: 'x'}}, {unrelated: {$eq: 4}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$project: {_id: true, included: true, subdoc: {included: true}}}," " {$match: {notIncluded: 'x', unrelated: 4}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchShouldSplitIfPartIsIndependentOfInclusionProjection) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, included: true}}," " {$match: {included: 'x', unrelated: 4}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {included: {$eq: 'x'}}}," " {$project: {_id: true, included: true}}," " {$match: {unrelated: {$eq: 4}}}]"; @@ -2132,25 +2151,25 @@ TEST(PipelineOptimizationTest, MatchShouldSplitIfPartIsIndependentOfInclusionPro } TEST(PipelineOptimizationTest, MatchOnExprShouldNotSplitIfDependentOnInclusionProjection) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, included: true}}," " {$match: {$expr: {$eq: ['$redacted', 'x']}}}]"; - string outputPipe = + std::string outputPipe = "[{$project: {_id: true, included: true}}," " {$match: {$and: [{redacted: {$_internalExprEq: 'x'}}," " {$expr: {$eq: ['$redacted', {$const: 'x'}]}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$project: {_id: true, included: true}}," " {$match: {$expr: {$eq: ['$redacted', 'x']}}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, TwoMatchStagesShouldBothPushIndependentPartsBeforeProjection) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, included: true}}," " {$match: {included: 'x', unrelated: 4}}," " {$match: {included: 'y', unrelated: 5}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$and: [{included: {$eq: 'x'}}, {included: {$eq: 'y'}}]}}," " {$project: {_id: true, included: true}}," " {$match: {$and: [{unrelated: {$eq: 4}}, {unrelated: {$eq: 5}}]}}]"; @@ -2158,75 +2177,75 @@ TEST(PipelineOptimizationTest, TwoMatchStagesShouldBothPushIndependentPartsBefor } TEST(PipelineOptimizationTest, NeighboringMatchesShouldCoalesce) { - string inputPipe = + std::string inputPipe = "[{$match: {x: 'x'}}," " {$match: {y: 'y'}}]"; - string outputPipe = "[{$match: {$and: [{x: {$eq: 'x'}}, {y: {$eq : 'y'}}]}}]"; - string serializedPipe = "[{$match: {$and: [{x: 'x'}, {y: 'y'}]}}]"; + std::string outputPipe = "[{$match: {$and: [{x: {$eq: 'x'}}, {y: {$eq : 'y'}}]}}]"; + std::string serializedPipe = "[{$match: {$and: [{x: 'x'}, {y: 'y'}]}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchShouldNotSwapBeforeLimit) { - string inputPipe = "[{$limit: 3}, {$match: {y: 'y'}}]"; - string outputPipe = "[{$limit: 3}, {$match: {y: {$eq : 'y'}}}]"; - string serializedPipe = "[{$limit: 3}, {$match: {y: 'y'}}]"; + std::string inputPipe = "[{$limit: 3}, {$match: {y: 'y'}}]"; + std::string outputPipe = "[{$limit: 3}, {$match: {y: {$eq : 'y'}}}]"; + std::string serializedPipe = "[{$limit: 3}, {$match: {y: 'y'}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchOnExprShouldNotSwapBeforeLimit) { - string inputPipe = "[{$limit: 3}, {$match : {$expr: {$eq: ['$y', 'y']}}}]"; - string outputPipe = + std::string inputPipe = "[{$limit: 3}, {$match : {$expr: {$eq: ['$y', 'y']}}}]"; + std::string outputPipe = "[{$limit: 3}, {$match: {$and: [{y: {$_internalExprEq: 'y'}}," " {$expr: {$eq: ['$y', {$const: 'y'}]}}]}}]"; - string serializedPipe = "[{$limit: 3}, {$match : {$expr: {$eq: ['$y', 'y']}}}]"; + std::string serializedPipe = "[{$limit: 3}, {$match : {$expr: {$eq: ['$y', 'y']}}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchShouldNotSwapBeforeSkip) { - string inputPipe = "[{$skip: 3}, {$match: {y: 'y'}}]"; - string outputPipe = "[{$skip: 3}, {$match: {y: {$eq : 'y'}}}]"; - string serializedPipe = "[{$skip: 3}, {$match: {y: 'y'}}]"; + std::string inputPipe = "[{$skip: 3}, {$match: {y: 'y'}}]"; + std::string outputPipe = "[{$skip: 3}, {$match: {y: {$eq : 'y'}}}]"; + std::string serializedPipe = "[{$skip: 3}, {$match: {y: 'y'}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchOnExprShouldNotSwapBeforeSkip) { - string inputPipe = "[{$skip: 3}, {$match : {$expr: {$eq: ['$y', 'y']}}}]"; - string outputPipe = + std::string inputPipe = "[{$skip: 3}, {$match : {$expr: {$eq: ['$y', 'y']}}}]"; + std::string outputPipe = "[{$skip: 3}, {$match: {$and: [{y: {$_internalExprEq: 'y'}}," " {$expr: {$eq: ['$y', {$const: 'y'}]}}]}}]"; - string serializedPipe = "[{$skip: 3}, {$match : {$expr: {$eq: ['$y', 'y']}}}]"; + std::string serializedPipe = "[{$skip: 3}, {$match : {$expr: {$eq: ['$y', 'y']}}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); } TEST(PipelineOptimizationTest, MatchShouldMoveAcrossProjectRename) { - string inputPipe = "[{$project: {_id: true, a: '$b'}}, {$match: {a: {$eq: 1}}}]"; - string outputPipe = "[{$match: {b: {$eq: 1}}}, {$project: {_id: true, a: '$b'}}]"; + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, {$match: {a: {$eq: 1}}}]"; + std::string outputPipe = "[{$match: {b: {$eq: 1}}}, {$project: {_id: true, a: '$b'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); } TEST(PipelineOptimizationTest, MatchShouldMoveAcrossAddFieldsRename) { - string inputPipe = "[{$addFields: {a: '$b'}}, {$match: {a: {$eq: 1}}}]"; - string outputPipe = "[{$match: {b: {$eq: 1}}}, {$addFields: {a: '$b'}}]"; + std::string inputPipe = "[{$addFields: {a: '$b'}}, {$match: {a: {$eq: 1}}}]"; + std::string outputPipe = "[{$match: {b: {$eq: 1}}}, {$addFields: {a: '$b'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); } TEST(PipelineOptimizationTest, MatchShouldMoveAcrossProjectRenameWithExplicitROOT) { - string inputPipe = "[{$project: {_id: true, a: '$$ROOT.b'}}, {$match: {a: {$eq: 1}}}]"; - string outputPipe = "[{$match: {b: {$eq: 1}}}, {$project: {_id: true, a: '$$ROOT.b'}}]"; + std::string inputPipe = "[{$project: {_id: true, a: '$$ROOT.b'}}, {$match: {a: {$eq: 1}}}]"; + std::string outputPipe = "[{$match: {b: {$eq: 1}}}, {$project: {_id: true, a: '$$ROOT.b'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); } TEST(PipelineOptimizationTest, MatchShouldMoveAcrossAddFieldsRenameWithExplicitCURRENT) { - string inputPipe = "[{$addFields: {a: '$$CURRENT.b'}}, {$match: {a: {$eq: 1}}}]"; - string outputPipe = "[{$match: {b: {$eq: 1}}}, {$addFields: {a: '$b'}}]"; + std::string inputPipe = "[{$addFields: {a: '$$CURRENT.b'}}, {$match: {a: {$eq: 1}}}]"; + std::string outputPipe = "[{$match: {b: {$eq: 1}}}, {$addFields: {a: '$b'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); } TEST(PipelineOptimizationTest, PartiallyDependentMatchWithRenameShouldSplitAcrossAddFields) { - string inputPipe = + std::string inputPipe = "[{$addFields: {'a.b': '$c', d: {$add: ['$e', '$f']}}}," "{$match: {$and: [{$or: [{'a.b': 1}, {x: 2}]}, {d: 3}]}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$or: [{c: {$eq: 1}}, {x: {$eq: 2}}]}}," "{$addFields: {a: {b: '$c'}, d: {$add: ['$e', '$f']}}}," "{$match: {d: {$eq: 3}}}]"; @@ -2234,14 +2253,14 @@ TEST(PipelineOptimizationTest, PartiallyDependentMatchWithRenameShouldSplitAcros } TEST(PipelineOptimizationTest, NorCanSplitAcrossProjectWithRename) { - string inputPipe = + std::string inputPipe = "[{$project: {x: true, y: '$z', _id: false}}," "{$match: {$nor: [{w: {$eq: 1}}, {y: {$eq: 1}}]}}]"; - string outputPipe = + std::string outputPipe = R"([{$match: {z : {$not: {$eq: 1}}}}, {$project: {x: true, y: "$z", _id: false}}, {$match: {w: {$not: {$eq: 1}}}}])"; - string serializedPipe = R"( + std::string serializedPipe = R"( [{$match: {$nor: [ {z : {$eq: 1}}]}}, {$project: {x: true, y: "$z", _id: false}}, {$match: {$nor: [ {w: {$eq: 1}}]}}] @@ -2250,18 +2269,18 @@ TEST(PipelineOptimizationTest, NorCanSplitAcrossProjectWithRename) { } TEST(PipelineOptimizationTest, MatchCanMoveAcrossSeveralRenames) { - string inputPipe = + std::string inputPipe = "[{$project: {c: '$d', _id: false}}," "{$addFields: {b: '$c'}}," "{$project: {a: '$b', z: 1}}," "{$match: {a: 1, z: 2}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {d: {$eq: 1}}}," "{$project: {c: '$d', _id: false}}," "{$match: {z: {$eq: 2}}}," "{$addFields: {b: '$c'}}," "{$project: {_id: true, z: true, a: '$b'}}]"; - string serializedPipe = R"( + std::string serializedPipe = R"( [{$match: {d : {$eq: 1}}}, {$project: {c: "$d", _id: false}}, {$match: {z : {$eq: 2}}}, @@ -2271,34 +2290,35 @@ TEST(PipelineOptimizationTest, MatchCanMoveAcrossSeveralRenames) { } TEST(PipelineOptimizationTest, RenameShouldNotBeAppliedToDependentMatch) { - string pipeline = + std::string pipeline = "[{$project: {x: {$add: ['$foo', '$bar']}, y: '$z', _id: false}}," "{$match: {$or: [{x: {$eq: 1}}, {y: {$eq: 1}}]}}]"; assertPipelineOptimizesTo(pipeline, pipeline); } TEST(PipelineOptimizationTest, MatchCannotMoveAcrossAddFieldsRenameOfDottedPath) { - string pipeline = "[{$addFields: {a: '$b.c'}}, {$match: {a: {$eq: 1}}}]"; + std::string pipeline = "[{$addFields: {a: '$b.c'}}, {$match: {a: {$eq: 1}}}]"; assertPipelineOptimizesTo(pipeline, pipeline); } TEST(PipelineOptimizationTest, MatchCannotMoveAcrossProjectRenameOfDottedPath) { - string inputPipe = "[{$project: {a: '$$CURRENT.b.c', _id: false}}, {$match: {a: {$eq: 1}}}]"; - string outputPipe = "[{$project: {a: '$b.c', _id: false}}, {$match: {a: {$eq: 1}}}]"; + std::string inputPipe = + "[{$project: {a: '$$CURRENT.b.c', _id: false}}, {$match: {a: {$eq: 1}}}]"; + std::string outputPipe = "[{$project: {a: '$b.c', _id: false}}, {$match: {a: {$eq: 1}}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); } TEST(PipelineOptimizationTest, MatchWithTypeShouldMoveAcrossRename) { - string inputPipe = "[{$addFields: {a: '$b'}}, {$match: {a: {$type: 4}}}]"; - string outputPipe = "[{$match: {b: {$type: [4]}}}, {$addFields: {a: '$b'}}]"; + std::string inputPipe = "[{$addFields: {a: '$b'}}, {$match: {a: {$type: 4}}}]"; + std::string outputPipe = "[{$match: {b: {$type: [4]}}}, {$addFields: {a: '$b'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); } TEST(PipelineOptimizationTest, MatchOnArrayFieldCanSplitAcrossRenameWithMapAndProject) { - string inputPipe = + std::string inputPipe = "[{$project: {d: {$map: {input: '$a', as: 'iter', in: {e: '$$iter.b', f: {$add: " "['$$iter.c', 1]}}}}}}, {$match: {'d.e': 1, 'd.f': 1}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {'a.b': {$eq: 1}}}, {$project: {_id: true, d: {$map: {input: '$a', as: 'iter', " "in: {e: '$$iter.b', f: {$add: ['$$iter.c', {$const: 1}]}}}}}}, {$match: {'d.f': {$eq: " "1}}}]"; @@ -2309,7 +2329,7 @@ TEST(PipelineOptimizationTest, MatchElemMatchValueOnArrayFieldCanSplitAcrossRenameWithMapAndProject) { // The $project simply renames 'a.b' & 'a.c' to 'd.e' & 'd.f' and the $match with $elemMatch on // the leaf value can be swapped with $project. - string inputPipe = R"( + std::string inputPipe = R"( [ { $project: { @@ -2321,7 +2341,7 @@ TEST(PipelineOptimizationTest, {$match: {"d.e": {$elemMatch: {$eq: 1}}, "d.f": {$elemMatch: {$eq: 1}}}} ] )"; - string outputPipe = R"( + std::string outputPipe = R"( [ { $match: {$and: [{"a.b": {$elemMatch: {$eq: 1}}}, {"a.c": {$elemMatch: {$eq: 1}}}]} @@ -2346,7 +2366,7 @@ TEST(PipelineOptimizationTest, // The $project simply renames 'a.b' & 'a.c' to 'd.e' & 'd.f' but the dependency tracker reports // the 'd' for $elemMatch as a modified dependency and so $match cannot be swapped with // $project. - string inputPipe = R"( + std::string inputPipe = R"( [ { $project: { @@ -2358,7 +2378,7 @@ TEST(PipelineOptimizationTest, {$match: {d: {$elemMatch: {e: 1, f: 1}}}} ] )"; - string outputPipe = R"( + std::string outputPipe = R"( [ { $project: { @@ -2371,7 +2391,7 @@ TEST(PipelineOptimizationTest, {$match: {d: {$elemMatch: {$and: [{e: {$eq: 1}}, {f: {$eq: 1}}]}}}} ] )"; - string serializedPipe = R"( + std::string serializedPipe = R"( [ { $project: { @@ -2392,7 +2412,7 @@ TEST(PipelineOptimizationTest, TEST(PipelineOptimizationTest, MatchEqObjectCanNotSplitAcrossRenameWithMapAndProject) { // The $project simply renames 'a.b' & 'a.c' to 'd.e' & 'd.f' but the dependency tracker reports // the 'd' for $eq as a modified dependency and so $match cannot be swapped with $project. - string inputPipe = R"( + std::string inputPipe = R"( [ { $project: { @@ -2404,7 +2424,7 @@ TEST(PipelineOptimizationTest, MatchEqObjectCanNotSplitAcrossRenameWithMapAndPro {$match: {d: {$eq: {e: 1, f: 1}}}} ] )"; - string outputPipe = R"( + std::string outputPipe = R"( [ { $project: { @@ -2417,7 +2437,7 @@ TEST(PipelineOptimizationTest, MatchEqObjectCanNotSplitAcrossRenameWithMapAndPro {$match: {d: {$eq: {e: 1, f: 1}}}} ] )"; - string serializedPipe = R"( + std::string serializedPipe = R"( [ { $project: { @@ -2435,31 +2455,31 @@ TEST(PipelineOptimizationTest, MatchEqObjectCanNotSplitAcrossRenameWithMapAndPro } TEST(PipelineOptimizationTest, MatchOnArrayFieldCanSplitAcrossRenameWithMapAndAddFields) { - string inputPipe = + std::string inputPipe = "[{$addFields: {d: {$map: {input: '$a', as: 'iter', in: {e: '$$iter.b', f: {$add: " "['$$iter.c', 1]}}}}}}, {$match: {'d.e': 1, 'd.f': 1}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {'a.b': {$eq: 1}}}, {$addFields: {d: {$map: {input: '$a', as: 'iter', in: {e: " "'$$iter.b', f: {$add: ['$$iter.c', {$const: 1}]}}}}}}, {$match: {'d.f': {$eq: 1}}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); } TEST(PipelineOptimizationTest, MatchCannotSwapWithLimit) { - string pipeline = "[{$limit: 3}, {$match: {x: {$gt: 0}}}]"; + std::string pipeline = "[{$limit: 3}, {$match: {x: {$gt: 0}}}]"; assertPipelineOptimizesTo(pipeline, pipeline); } TEST(PipelineOptimizationTest, MatchCannotSwapWithSortLimit) { - string inputPipe = "[{$sort: {x: -1}}, {$limit: 3}, {$match: {x: {$gt: 0}}}]"; - string outputPipe = "[{$sort: {sortKey: {x: -1}, limit: 3}}, {$match: {x: {$gt: 0}}}]"; + std::string inputPipe = "[{$sort: {x: -1}}, {$limit: 3}, {$match: {x: {$gt: 0}}}]"; + std::string outputPipe = "[{$sort: {sortKey: {x: -1}, limit: 3}}, {$match: {x: {$gt: 0}}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, inputPipe); } TEST(PipelineOptimizationTest, MatchOnMinItemsShouldSwapSinceCategoryIsArrayMatching) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {a: {$_internalSchemaMinItems: 1}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {b: {$_internalSchemaMinItems: 1}}}, " "{$project: {_id: true, a: '$b'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); @@ -2482,10 +2502,10 @@ TEST(PipelineOptimizationTest, MatchOnMinItemsShouldSwapSinceCategoryIsArrayMatc } TEST(PipelineOptimizationTest, MatchOnMaxItemsShouldSwapSinceCategoryIsArrayMatching) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {a: {$_internalSchemaMaxItems: 1}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {b: {$_internalSchemaMaxItems: 1}}}, " "{$project: {_id: true, a: '$b'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); @@ -2508,7 +2528,7 @@ TEST(PipelineOptimizationTest, MatchOnMaxItemsShouldSwapSinceCategoryIsArrayMatc } TEST(PipelineOptimizationTest, MatchOnAllElemMatchFromIndexShouldNotSwapBecauseOfNamePlaceHolder) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {a: {$_internalSchemaAllElemMatchFromIndex: [1, {b: {$gt: 0}}]}}}]"; assertPipelineOptimizesTo(inputPipe, inputPipe); @@ -2516,7 +2536,7 @@ TEST(PipelineOptimizationTest, MatchOnAllElemMatchFromIndexShouldNotSwapBecauseO inputPipe = "[{$project: {redacted: false, _id: true}}, " "{$match: {a: {$_internalSchemaAllElemMatchFromIndex: [1, {b: {$gt: 0}}]}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {a: {$_internalSchemaAllElemMatchFromIndex: [1, {b: {$gt: 0}}]}}}, " "{$project: {redacted: false, _id: true}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); @@ -2531,7 +2551,7 @@ TEST(PipelineOptimizationTest, MatchOnAllElemMatchFromIndexShouldNotSwapBecauseO } TEST(PipelineOptimizationTest, MatchOnArrayIndexShouldNotSwapBecauseOfNamePlaceHolder) { - string inputPipe = R"( + std::string inputPipe = R"( [{$project: {_id: true, a: '$b'}}, {$match: {a: {$_internalSchemaMatchArrayIndex: {index: 0, namePlaceholder: 'i', expression: {i: {$lt: 0}}}}}}])"; @@ -2541,7 +2561,7 @@ TEST(PipelineOptimizationTest, MatchOnArrayIndexShouldNotSwapBecauseOfNamePlaceH [{$project: {redacted: false, _id: true}}, {$match: {a: {$_internalSchemaMatchArrayIndex: {index: 0, namePlaceholder: 'i', expression: {i: {$lt: 0}}}}}}])"; - string outputPipe = R"( + std::string outputPipe = R"( [{$match: {a: {$_internalSchemaMatchArrayIndex: {index: 0, namePlaceholder: 'i', expression: {i: {$lt: 0}}}}}}, {$project: {redacted: false, _id: true}}])"; @@ -2559,10 +2579,10 @@ TEST(PipelineOptimizationTest, MatchOnArrayIndexShouldNotSwapBecauseOfNamePlaceH } TEST(PipelineOptimizationTest, MatchOnUniqueItemsShouldSwapSinceCategoryIsArrayMatching) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {a: {$_internalSchemaUniqueItems: true}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {b: {$_internalSchemaUniqueItems: true}}}, " "{$project: {_id: true, a: '$b'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); @@ -2587,13 +2607,13 @@ TEST(PipelineOptimizationTest, MatchOnUniqueItemsShouldSwapSinceCategoryIsArrayM // Descriptive test. The following internal match expression *could* participate in pipeline // optimizations, but it currently does not. TEST(PipelineOptimizationTest, MatchOnObjectMatchShouldNotSwapSinceCategoryIsOther) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {a: {$_internalSchemaObjectMatch: {b: 1}}}}]"; - string outputPipe = + std::string outputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {a: {$_internalSchemaObjectMatch: {b: {$eq: 1}}}}}]"; - string serializedPipe = + std::string serializedPipe = "[{$project: {_id: true, a: '$b'}}," "{$match: {a: {$_internalSchemaObjectMatch: {b: 1}}}}]"; @@ -2627,7 +2647,7 @@ TEST(PipelineOptimizationTest, MatchOnObjectMatchShouldNotSwapSinceCategoryIsOth // Descriptive test. The following internal match expression *could* participate in pipeline // optimizations, but it currently does not. TEST(PipelineOptimizationTest, MatchOnMinPropertiesShouldNotSwapSinceCategoryIsOther) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {$_internalSchemaMinProperties: 2}}]"; assertPipelineOptimizesTo(inputPipe, inputPipe); @@ -2646,7 +2666,7 @@ TEST(PipelineOptimizationTest, MatchOnMinPropertiesShouldNotSwapSinceCategoryIsO // Descriptive test. The following internal match expression *could* participate in pipeline // optimizations, but it currently does not. TEST(PipelineOptimizationTest, MatchOnMaxPropertiesShouldNotSwapSinceCategoryIsOther) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {$_internalSchemaMaxProperties: 2}}]"; assertPipelineOptimizesTo(inputPipe, inputPipe); @@ -2665,7 +2685,7 @@ TEST(PipelineOptimizationTest, MatchOnMaxPropertiesShouldNotSwapSinceCategoryIsO // Descriptive test. The following internal match expression *could* participate in pipeline // optimizations, but it currently does not. TEST(PipelineOptimizationTest, MatchOnAllowedPropertiesShouldNotSwapSinceCategoryIsOther) { - string inputPipe = R"( + std::string inputPipe = R"( [{$project: {_id: true, a: '$b'}}, {$match: {$_internalSchemaAllowedProperties: { properties: ['b'], @@ -2673,7 +2693,7 @@ TEST(PipelineOptimizationTest, MatchOnAllowedPropertiesShouldNotSwapSinceCategor patternProperties: [], otherwise: {i: 1} }}}])"; - string outputPipe = R"( + std::string outputPipe = R"( [{$project: {_id: true, a: '$b'}}, {$match: {$_internalSchemaAllowedProperties: { properties: ['b'], @@ -2681,7 +2701,7 @@ TEST(PipelineOptimizationTest, MatchOnAllowedPropertiesShouldNotSwapSinceCategor patternProperties: [], otherwise: {i: {$eq : 1}} }}}])"; - string serializedPipe = R"( + std::string serializedPipe = R"( [{$project: {_id: true, a: '$b'}}, {$match: {$_internalSchemaAllowedProperties: { properties: ['b'], @@ -2747,13 +2767,13 @@ TEST(PipelineOptimizationTest, MatchOnAllowedPropertiesShouldNotSwapSinceCategor // Descriptive test. The following internal match expression *could* participate in pipeline // optimizations, but it currently does not. TEST(PipelineOptimizationTest, MatchOnCondShouldNotSwapSinceCategoryIsOther) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {$_internalSchemaCond: [{a: 1}, {b: 1}, {c: 1}]}}]"; - string outputPipe = + std::string outputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {$_internalSchemaCond: [{a: {$eq : 1}}, {b: {$eq : 1}}, {c: {$eq : 1}}]}}]"; - string serializedPipe = + std::string serializedPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {$_internalSchemaCond: [{a: 1}, {b: 1}, {c: 1}]}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); @@ -2784,7 +2804,7 @@ TEST(PipelineOptimizationTest, MatchOnCondShouldNotSwapSinceCategoryIsOther) { // Descriptive test. The following internal match expression *could* participate in pipeline // optimizations, but it currently does not. TEST(PipelineOptimizationTest, MatchOnRootDocEqShouldNotSwapSinceCategoryIsOther) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {$_internalSchemaRootDocEq: {a: 1}}}]"; assertPipelineOptimizesTo(inputPipe, inputPipe); @@ -2803,13 +2823,13 @@ TEST(PipelineOptimizationTest, MatchOnRootDocEqShouldNotSwapSinceCategoryIsOther // Descriptive test. The following internal match expression can participate in pipeline // optimizations. TEST(PipelineOptimizationTest, MatchOnInternalSchemaTypeShouldSwap) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {a: {$_internalSchemaType: 1}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {b: {$_internalSchemaType: [1]}}}, " "{$project: {_id: true, a: '$b'}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {b: {$_internalSchemaType: [1]}}}, " "{$project: {_id: true, a: '$b'}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); @@ -2838,10 +2858,10 @@ TEST(PipelineOptimizationTest, MatchOnInternalSchemaTypeShouldSwap) { } TEST(PipelineOptimizationTest, MatchOnMinLengthShouldSwapWithAdjacentStage) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {a: {$_internalSchemaMinLength: 1}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {b: {$_internalSchemaMinLength: 1}}}," "{$project: {_id: true, a: '$b'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); @@ -2864,10 +2884,10 @@ TEST(PipelineOptimizationTest, MatchOnMinLengthShouldSwapWithAdjacentStage) { } TEST(PipelineOptimizationTest, MatchOnMaxLengthShouldSwapWithAdjacentStage) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {a: {$_internalSchemaMaxLength: 1}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {b: {$_internalSchemaMaxLength: 1}}}," "{$project: {_id: true, a: '$b'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); @@ -2890,10 +2910,10 @@ TEST(PipelineOptimizationTest, MatchOnMaxLengthShouldSwapWithAdjacentStage) { } TEST(PipelineOptimizationTest, MatchOnInternalEqShouldSwapWithAdjacentStage) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {a: {$_internalSchemaEq: {c: 1}}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {b: {$_internalSchemaEq: {c: 1}}}}, " "{$project: {_id: true, a: '$b'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); @@ -2916,10 +2936,10 @@ TEST(PipelineOptimizationTest, MatchOnInternalEqShouldSwapWithAdjacentStage) { } TEST(PipelineOptimizationTest, MatchOnXorShouldSwapIfEverySubExpressionIsEligible) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b', c: '$d'}}, " "{$match: {$_internalSchemaXor: [{a: 1}, {c: 1}]}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {$_internalSchemaXor: [{b: {$eq: 1}}, {d: {$eq: 1}}]}}, " "{$project: {_id: true, a: '$b', c: '$d'}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, outputPipe); @@ -2930,7 +2950,7 @@ TEST(PipelineOptimizationTest, MatchOnXorShouldSwapIfEverySubExpressionIsEligibl outputPipe = "[{$match: {$_internalSchemaXor: [{a: {$eq : 1}}, {b: {$eq : 1}}]}}, " "{$project: {redacted: false, _id: true}}]"; - string serializedPipe = + std::string serializedPipe = "[{$match: {$_internalSchemaXor: [{a: 1}, {b: 1}]}}, " " {$project: {redacted: false, _id: true}}]"; assertPipelineOptimizesAndSerializesTo(inputPipe, outputPipe, serializedPipe); @@ -2959,10 +2979,10 @@ TEST(PipelineOptimizationTest, MatchOnXorShouldSwapIfEverySubExpressionIsEligibl } TEST(PipelineOptimizationTest, MatchOnFmodShouldSwapWithAdjacentStage) { - string inputPipe = + std::string inputPipe = "[{$project: {_id: true, a: '$b'}}, " "{$match: {a: {$_internalSchemaFmod: [5, 0]}}}]"; - string outputPipe = + std::string outputPipe = "[{$match: {b: {$_internalSchemaFmod: [5, 0]}}}, " "{$project: {_id: true, a: '$b'}}]"; assertPipelineOptimizesTo(inputPipe, outputPipe); @@ -2988,7 +3008,7 @@ TEST(PipelineOptimizationTest, ChangeStreamLookupSwapsWithIndependentMatch) { QueryTestServiceContext testServiceContext; auto opCtx = testServiceContext.makeOperationContext(); - intrusive_ptr expCtx(new ExpressionContextForTest(kTestNss)); + boost::intrusive_ptr expCtx(new ExpressionContextForTest(kTestNss)); expCtx->opCtx = opCtx.get(); expCtx->uuid = UUID::gen(); setMockReplicationCoordinatorOnOpCtx(expCtx->opCtx); @@ -3019,7 +3039,7 @@ TEST(PipelineOptimizationTest, ChangeStreamLookupDoesNotSwapWithMatchOnPostImage QueryTestServiceContext testServiceContext; auto opCtx = testServiceContext.makeOperationContext(); - intrusive_ptr expCtx(new ExpressionContextForTest(kTestNss)); + boost::intrusive_ptr expCtx(new ExpressionContextForTest(kTestNss)); expCtx->opCtx = opCtx.get(); expCtx->uuid = UUID::gen(); setMockReplicationCoordinatorOnOpCtx(expCtx->opCtx); @@ -3048,7 +3068,7 @@ TEST(PipelineOptimizationTest, FullDocumentBeforeChangeLookupSwapsWithIndependen QueryTestServiceContext testServiceContext; auto opCtx = testServiceContext.makeOperationContext(); - intrusive_ptr expCtx(new ExpressionContextForTest(kTestNss)); + boost::intrusive_ptr expCtx(new ExpressionContextForTest(kTestNss)); expCtx->opCtx = opCtx.get(); expCtx->uuid = UUID::gen(); setMockReplicationCoordinatorOnOpCtx(expCtx->opCtx); @@ -3079,7 +3099,7 @@ TEST(PipelineOptimizationTest, FullDocumentBeforeChangeDoesNotSwapWithMatchOnPre QueryTestServiceContext testServiceContext; auto opCtx = testServiceContext.makeOperationContext(); - intrusive_ptr expCtx(new ExpressionContextForTest(kTestNss)); + boost::intrusive_ptr expCtx(new ExpressionContextForTest(kTestNss)); expCtx->opCtx = opCtx.get(); expCtx->uuid = UUID::gen(); setMockReplicationCoordinatorOnOpCtx(expCtx->opCtx); @@ -3472,13 +3492,13 @@ std::unique_ptr getOptimizedPipeline(const BSONObj in auto opCtx = testServiceContext.makeOperationContext(); ASSERT_EQUALS(inputBson["pipeline"].type(), BSONType::Array); - vector rawPipeline; + std::vector rawPipeline; for (auto&& stageElem : inputBson["pipeline"].Array()) { ASSERT_EQUALS(stageElem.type(), BSONType::Object); rawPipeline.push_back(stageElem.embeddedObject()); } AggregateCommandRequest request(kTestNss, rawPipeline); - intrusive_ptr ctx = + boost::intrusive_ptr ctx = new ExpressionContextForTest(opCtx.get(), request); ctx->mongoProcessInterface = std::make_shared(); TempDir tempDir("PipelineTest"); @@ -3582,19 +3602,19 @@ TEST(PipelineOptimizationTest, MergeUnwindPipelineWithSortLimitPipelinePlacesLim namespace Sharded { -class Base { +class Base : public ScopedGlobalServiceContextForTest { public: // These all return json arrays of pipeline operators - virtual string inputPipeJson() = 0; - virtual string shardPipeJson() = 0; - virtual string mergePipeJson() = 0; + virtual std::string inputPipeJson() = 0; + virtual std::string shardPipeJson() = 0; + virtual std::string mergePipeJson() = 0; // Allows tests to override the default resolvedNamespaces. virtual NamespaceString getLookupCollNs() { return NamespaceString::createNamespaceString_forTest("a", "lookupColl"); } - BSONObj pipelineFromJsonArray(const string& array) { + BSONObj pipelineFromJsonArray(const std::string& array) { return fromjson("{pipeline: " + array + "}"); } virtual void run() { @@ -3603,13 +3623,13 @@ class Base { const BSONObj mergePipeExpected = pipelineFromJsonArray(mergePipeJson()); ASSERT_EQUALS(inputBson["pipeline"].type(), BSONType::Array); - vector rawPipeline; + std::vector rawPipeline; for (auto&& stageElem : inputBson["pipeline"].Array()) { ASSERT_EQUALS(stageElem.type(), BSONType::Object); rawPipeline.push_back(stageElem.embeddedObject()); } AggregateCommandRequest request(kTestNss, rawPipeline); - intrusive_ptr ctx = createExpressionContext(request); + boost::intrusive_ptr ctx = createExpressionContext(request); TempDir tempDir("PipelineTest"); ctx->tempDir = tempDir.path(); @@ -3637,9 +3657,9 @@ class Base { virtual ~Base() {} - virtual intrusive_ptr createExpressionContext( + virtual boost::intrusive_ptr createExpressionContext( const AggregateCommandRequest& request) { - return new ExpressionContextForTest(&_opCtx, request); + return new ExpressionContextForTest(_opCtx.get(), request); } protected: @@ -3647,18 +3667,19 @@ class Base { std::unique_ptr shardPipe; private: - OperationContextNoop _opCtx; + ThreadClient _threadClient{getServiceContext()}; + ServiceContext::UniqueOperationContext _opCtx{_threadClient->makeOperationContext()}; }; // General test to make sure all optimizations support empty pipelines class Empty : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[]"; } }; @@ -3667,43 +3688,43 @@ class Empty : public Base { // config.cache.chunks.* should run on each shard in parallel. namespace lookupFromShardsInParallel { class LookupWithDBAndColl : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$lookup: {from: {db: 'config', coll: 'cache.chunks.test.foo'}, as: 'results', " "localField: 'x', foreignField: '_id'}}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return inputPipeJson(); } - string mergePipeJson() { + std::string mergePipeJson() { return "[]"; } NamespaceString getLookupCollNs() override { - return {"config", "cache.chunks.test.foo"}; + return NamespaceString::createNamespaceString_forTest("config", "cache.chunks.test.foo"); } }; class LookupWithLetWithDBAndColl : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$lookup: {from: {db: 'config', coll: 'cache.chunks.test.foo'}, as: 'results', " "let: {x_field: '$x'}, pipeline: []}}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return inputPipeJson(); } - string mergePipeJson() { + std::string mergePipeJson() { return "[]"; } NamespaceString getLookupCollNs() override { - return {"config", "cache.chunks.test.foo"}; + return NamespaceString::createNamespaceString_forTest("config", "cache.chunks.test.foo"); } }; class CollectionCloningPipeline : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$match: {$expr: {$gte: ['$_id', {$literal: 1}]}}}" ",{$sort: {_id: 1}}" ",{$replaceWith: {original: '$$ROOT'}}" @@ -3714,7 +3735,7 @@ class CollectionCloningPipeline : public Base { "]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$match: {$and: [{_id: {$_internalExprGte: 1}}, {$expr: {$gte: ['$_id', " "{$const: 1}]}}]}}" ", {$sort: {sortKey: {_id: 1}}}" @@ -3726,12 +3747,12 @@ class CollectionCloningPipeline : public Base { "]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[]"; } NamespaceString getLookupCollNs() override { - return {"config", "cache.chunks.test"}; + return NamespaceString::createNamespaceString_forTest("config", "cache.chunks.test"); } }; @@ -3740,49 +3761,49 @@ class CollectionCloningPipeline : public Base { namespace moveFinalUnwindFromShardsToMerger { class OneUnwind : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$unwind: {path: '$a'}}]}"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[]}"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$unwind: {path: '$a'}}]}"; } }; class TwoUnwind : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$unwind: {path: '$a'}}, {$unwind: {path: '$b'}}]}"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[]}"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$unwind: {path: '$a'}}, {$unwind: {path: '$b'}}]}"; } }; class UnwindNotFinal : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$unwind: {path: '$a'}}, {$match: {a:1}}]}"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$unwind: {path: '$a'}}, {$match: {a:{$eq:1}}}]}"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[]}"; } }; class UnwindWithOther : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$match: {a:1}}, {$unwind: {path: '$a'}}]}"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$match: {a:{$eq:1}}}]}"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$unwind: {path: '$a'}}]}"; } }; @@ -3798,13 +3819,13 @@ namespace propagateDocLimitToShards { * sharded_agg_helpers.cpp and the explanation in SERVER-36881. */ class MatchWithSkipAndLimit : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$match: {x: 4}}, {$skip: 10}, {$limit: 5}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$match: {x: {$eq: 4}}}, {$limit: 15}]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$skip: 10}, {$limit: 5}]"; } }; @@ -3814,13 +3835,13 @@ class MatchWithSkipAndLimit : public Base { * all $skip stages in any pipeline that has more than one. */ class MatchWithMultipleSkipsAndLimit : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$match: {x: 4}}, {$skip: 7}, {$skip: 3}, {$limit: 5}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$match: {x: {$eq: 4}}}, {$limit: 15}]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$skip: 10}, {$limit: 5}]"; } }; @@ -3831,13 +3852,13 @@ class MatchWithMultipleSkipsAndLimit : public Base { * $limit to the shard pipeline. */ class MatchWithLimitAndSkip : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$match: {x: 4}}, {$limit: 10}, {$skip: 5}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$match: {x: {$eq: 4}}}, {$limit: 10}]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$limit: 10}, {$skip: 5}]"; } }; @@ -3848,13 +3869,13 @@ class MatchWithLimitAndSkip : public Base { * propagating the limit to the shards. */ class MatchWithSkipAddFieldsAndLimit : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$match: {x: 4}}, {$skip: 10}, {$addFields: {y: 1}}, {$limit: 5}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$match: {x: {$eq: 4}}}, {$limit: 15}]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$skip: 10}, {$addFields: {y: {$const: 1}}}, {$limit: 5}]"; } }; @@ -3865,13 +3886,13 @@ class MatchWithSkipAddFieldsAndLimit : public Base { * shard before it can apply the $limit. */ class MatchWithSkipGroupAndLimit : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$match: {x: 4}}, {$skip: 10}, {$group: {_id: '$y'}}, {$limit: 5}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$match: {x: {$eq: 4}}}, {$project: {y: true, _id: false}}]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$skip: 10}, {$group: {_id: '$y'}}, {$limit: 5}]"; } }; @@ -3882,13 +3903,13 @@ class MatchWithSkipGroupAndLimit : public Base { * filter in the second $match, so we also don't know how many documents we'll need from the shards. */ class MatchWithSkipSecondMatchAndLimit : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$match: {x: 4}}, {$skip: 10}, {$match: {y: {$gt: 10}}}, {$limit: 5}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$match: {x: {$eq: 4}}}]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$skip: 10}, {$match: {y: {$gt: 10}}}, {$limit: 5}]"; } }; @@ -3899,53 +3920,53 @@ namespace limitFieldsSentFromShardsToMerger { // always a split point and neutral in terms of needed fields. class NeedWholeDoc : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$limit:1}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$limit:1}]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$limit:1}]"; } }; class JustNeedsId : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$limit:1}, {$group: {_id: '$_id'}}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$limit:1}, {$project: {_id:true}}]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$limit:1}, {$group: {_id: '$_id'}}]"; } }; class JustNeedsNonId : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$limit:1}, {$group: {_id: '$a.b'}}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$limit:1}, {$project: {a: {b: true}, _id: false}}]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$limit:1}, {$group: {_id: '$a.b'}}]"; } }; class NothingNeeded : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$limit:1}" ",{$group: {_id: {$const: null}, count: {$sum: {$const: 1}}}}" "]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$limit:1}" ",{$project: {_id: true}}" "]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$limit:1}" ",{$group: {_id: {$const: null}, count: {$sum: {$const: 1}}}}" "]"; @@ -3957,24 +3978,24 @@ class ShardAlreadyExhaustive : public Base { // 'a' field is still sent because it is explicitly asked for, even though it // isn't actually needed. If this changes in the future, this test will need to // change. - string inputPipeJson() { + std::string inputPipeJson() { return "[{$project: {_id:true, a:true}}" ",{$group: {_id: '$_id'}}" "]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$project: {_id:true, a:true}}" ",{$group: {_id: '$_id'}}" "]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$group: {_id: '$$ROOT._id', $doingMerge: true}}" "]"; } }; class ShardedSortMatchProjSkipLimBecomesMatchTopKSortSkipProj : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$sort: {a : 1}}" ",{$match: {a: 1}}" ",{$project : {a: 1}}" @@ -3982,13 +4003,13 @@ class ShardedSortMatchProjSkipLimBecomesMatchTopKSortSkipProj : public Base { ",{$limit: 5}" "]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$match: {a: {$eq : 1}}}" ",{$sort: {sortKey: {a: 1}, limit: 8}}" ",{$project: {_id: true, a: true}}" "]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$limit: 8}" ",{$skip: 3}" ",{$project: {_id: true, a: true}}" @@ -3997,36 +4018,36 @@ class ShardedSortMatchProjSkipLimBecomesMatchTopKSortSkipProj : public Base { }; class ShardedMatchProjLimDoesNotBecomeMatchLimProj : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$match: {a: 1}}" ",{$project : {a: 1}}" ",{$limit: 5}" "]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$match: {a: {$eq : 1}}}" ",{$project: {_id: true, a: true}}" ",{$limit: 5}" "]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$limit: 5}]"; } }; class ShardedSortProjLimBecomesTopKSortProj : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$sort: {a : 1}}" ",{$project : {a: 1}}" ",{$limit: 5}" "]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$sort: {sortKey: {a: 1}, limit: 5}}" ",{$project: {_id: true, a: true}}" "]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$limit: 5}" ",{$project: {_id: true, a: true}}" "]"; @@ -4034,19 +4055,19 @@ class ShardedSortProjLimBecomesTopKSortProj : public Base { }; class ShardedSortGroupProjLimDoesNotBecomeTopKSortProjGroup : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$sort: {a : 1}}" ",{$group : {_id: {a: '$a'}}}" ",{$project : {a: 1}}" ",{$limit: 5}" "]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$sort: {sortKey: {a: 1}}}" ",{$project : {a: true, _id: false}}" "]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$group : {_id: {a: '$a'}}}" ",{$project: {_id: true, a: true}}" ",{$limit: 5}" @@ -4055,20 +4076,20 @@ class ShardedSortGroupProjLimDoesNotBecomeTopKSortProjGroup : public Base { }; class ShardedMatchSortProjLimBecomesMatchTopKSortProj : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$match: {a: {$eq : 1}}}" ",{$sort: {a: -1}}" ",{$project : {a: 1}}" ",{$limit: 6}" "]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$match: {a: {$eq : 1}}}" ",{$sort: {sortKey: {a: -1}, limit: 6}}" ",{$project: {_id: true, a: true}}" "]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$limit: 6}" ",{$project: {_id: true, a: true}}" "]"; @@ -4080,48 +4101,48 @@ class ShardedMatchSortProjLimBecomesMatchTopKSortProj : public Base { namespace coalesceLookUpAndUnwind { class ShouldCoalesceUnwindOnAs : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$same'}}" "]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right', unwinding: {preserveNullAndEmptyArrays: false}}}]"; } }; class ShouldCoalesceUnwindOnAsWithPreserveEmpty : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$same', preserveNullAndEmptyArrays: true}}" "]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right', unwinding: {preserveNullAndEmptyArrays: true}}}]"; } }; class ShouldCoalesceUnwindOnAsWithIncludeArrayIndex : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$same', includeArrayIndex: 'index'}}" "]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right', unwinding: {preserveNullAndEmptyArrays: false, includeArrayIndex: " "'index'}}}]"; @@ -4129,16 +4150,16 @@ class ShouldCoalesceUnwindOnAsWithIncludeArrayIndex : public Base { }; class ShouldNotCoalesceUnwindNotOnAs : public Base { - string inputPipeJson() { + std::string inputPipeJson() { return "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$from'}}" "]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}" ",{$unwind: {path: '$from'}}" @@ -4164,14 +4185,14 @@ class Out : public ShardMergerBase { bool needsPrimaryShardMerger() { return true; } - string inputPipeJson() { + std::string inputPipeJson() { return "[{$out: 'outColl'}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[]"; } - string mergePipeJson() { - return "[{$out: {db: 'a', coll: 'outColl'}}]"; + std::string mergePipeJson() { + return "[{$out: {coll: 'outColl', db: 'a'}}]"; } }; @@ -4179,20 +4200,20 @@ class MergeWithUnshardedCollection : public ShardMergerBase { bool needsPrimaryShardMerger() { return true; } - string inputPipeJson() { + std::string inputPipeJson() { return "[{$merge: 'outColl'}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$merge: {into: {db: 'a', coll: 'outColl'}, on: '_id', " "whenMatched: 'merge', whenNotMatched: 'insert'}}]"; } }; class MergeWithShardedCollection : public ShardMergerBase { - intrusive_ptr createExpressionContext( + boost::intrusive_ptr createExpressionContext( const AggregateCommandRequest& request) override { class ProcessInterface : public StubMongoProcessInterface { bool isSharded(OperationContext* opCtx, const NamespaceString& ns) override { @@ -4209,14 +4230,14 @@ class MergeWithShardedCollection : public ShardMergerBase { bool needsPrimaryShardMerger() { return false; } - string inputPipeJson() { + std::string inputPipeJson() { return "[{$merge: 'outColl'}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$merge: {into: {db: 'a', coll: 'outColl'}, on: '_id', " "whenMatched: 'merge', whenNotMatched: 'insert'}}]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[]"; } }; @@ -4225,13 +4246,13 @@ class Project : public ShardMergerBase { bool needsPrimaryShardMerger() { return false; } - string inputPipeJson() { + std::string inputPipeJson() { return "[{$project: {a : 1}}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[{$project: {_id: true, a: true}}]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[]"; } }; @@ -4240,14 +4261,14 @@ class LookUp : public ShardMergerBase { bool needsPrimaryShardMerger() { return true; } - string inputPipeJson() { + std::string inputPipeJson() { return "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}]"; } - string shardPipeJson() { + std::string shardPipeJson() { return "[]"; } - string mergePipeJson() { + std::string mergePipeJson() { return "[{$lookup: {from : 'lookupColl', as : 'same', localField: 'left', foreignField: " "'right'}}]"; } @@ -4562,23 +4583,25 @@ TEST_F(PipelineDeferredMergeSortTest, StageThatCantSwapGoesToMergingHalf) { } // namespace Sharded } // namespace Optimizations -TEST(PipelineInitialSource, GeoNearInitialQuery) { - OperationContextNoop _opCtx; +class PipelineInitialSource : public ServiceContextTest {}; + +TEST_F(PipelineInitialSource, GeoNearInitialQuery) { + auto opCtx = makeOperationContext(); const std::vector rawPipeline = { fromjson("{$geoNear: {distanceField: 'd', near: [0, 0], query: {a: 1}}}")}; - intrusive_ptr ctx = new ExpressionContextForTest( - &_opCtx, + boost::intrusive_ptr ctx = new ExpressionContextForTest( + opCtx.get(), AggregateCommandRequest(NamespaceString::createNamespaceString_forTest("a.collection"), rawPipeline)); auto pipe = Pipeline::parse(rawPipeline, ctx); ASSERT_BSONOBJ_EQ(pipe->getInitialQuery(), BSON("a" << 1)); } -TEST(PipelineInitialSource, MatchInitialQuery) { - OperationContextNoop _opCtx; +TEST_F(PipelineInitialSource, MatchInitialQuery) { + auto opCtx = makeOperationContext(); const std::vector rawPipeline = {fromjson("{$match: {'a': 4}}")}; - intrusive_ptr ctx = new ExpressionContextForTest( - &_opCtx, + boost::intrusive_ptr ctx = new ExpressionContextForTest( + opCtx.get(), AggregateCommandRequest(NamespaceString::createNamespaceString_forTest("a.collection"), rawPipeline)); @@ -4620,7 +4643,8 @@ TEST_F(PipelineValidateTest, AggregateOneNSNotValidForEmptyPipeline) { const std::vector rawPipeline = {}; auto ctx = getExpCtx(); - ctx->ns = NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(boost::none, "a")); + ctx->ns = NamespaceString::makeCollectionlessAggregateNSS( + DatabaseName::createDatabaseName_forTest(boost::none, "a")); ASSERT_THROWS_CODE( Pipeline::parse(rawPipeline, ctx), AssertionException, ErrorCodes::InvalidNamespace); @@ -4630,7 +4654,8 @@ TEST_F(PipelineValidateTest, AggregateOneNSNotValidIfInitialStageRequiresCollect const std::vector rawPipeline = {fromjson("{$match: {}}")}; auto ctx = getExpCtx(); - ctx->ns = NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(boost::none, "a")); + ctx->ns = NamespaceString::makeCollectionlessAggregateNSS( + DatabaseName::createDatabaseName_forTest(boost::none, "a")); ASSERT_THROWS_CODE( Pipeline::parse(rawPipeline, ctx), AssertionException, ErrorCodes::InvalidNamespace); @@ -4640,7 +4665,8 @@ TEST_F(PipelineValidateTest, AggregateOneNSValidIfInitialStageIsCollectionless) auto ctx = getExpCtx(); auto collectionlessSource = DocumentSourceCollectionlessMock::create(ctx); - ctx->ns = NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(boost::none, "a")); + ctx->ns = NamespaceString::makeCollectionlessAggregateNSS( + DatabaseName::createDatabaseName_forTest(boost::none, "a")); Pipeline::create({collectionlessSource}, ctx); } @@ -4661,8 +4687,8 @@ TEST_F(PipelineValidateTest, AggregateOneNSValidForFacetPipelineRegardlessOfInit const std::vector rawPipeline = {fromjson("{$facet: {subPipe: [{$match: {}}]}}")}; auto ctx = getExpCtx(); - ctx->ns = - NamespaceString::makeCollectionlessAggregateNSS(DatabaseName(boost::none, "unittests")); + ctx->ns = NamespaceString::makeCollectionlessAggregateNSS( + DatabaseName::createDatabaseName_forTest(boost::none, "unittests")); ASSERT_THROWS_CODE( Pipeline::parse(rawPipeline, ctx), AssertionException, ErrorCodes::InvalidNamespace); @@ -5059,8 +5085,6 @@ TEST_F(PipelineDependenciesTest, PathModifiedWithoutNameChangeShouldStillBeADepe } } // namespace Dependencies -namespace { - using PipelineRenameTracking = AggregationContextFixture; TEST_F(PipelineRenameTracking, ReportsIdentityMapWhenEmpty) { @@ -5436,7 +5460,7 @@ TEST_F(InvolvedNamespacesTest, IncludesLookupNamespace) { const NamespaceString lookupNss = NamespaceString::createNamespaceString_forTest("test", "foo"); const NamespaceString resolvedNss = NamespaceString::createNamespaceString_forTest("test", "bar"); - expCtx->setResolvedNamespace(lookupNss, {resolvedNss, vector{}}); + expCtx->setResolvedNamespace(lookupNss, {resolvedNss, std::vector{}}); auto lookupSpec = fromjson("{$lookup: {from: 'foo', as: 'x', localField: 'foo_id', foreignField: '_id'}}"); auto pipeline = @@ -5454,7 +5478,7 @@ TEST_F(InvolvedNamespacesTest, IncludesGraphLookupNamespace) { const NamespaceString lookupNss = NamespaceString::createNamespaceString_forTest("test", "foo"); const NamespaceString resolvedNss = NamespaceString::createNamespaceString_forTest("test", "bar"); - expCtx->setResolvedNamespace(lookupNss, {resolvedNss, vector{}}); + expCtx->setResolvedNamespace(lookupNss, {resolvedNss, std::vector{}}); auto graphLookupSpec = fromjson( "{$graphLookup: {" " from: 'foo'," @@ -5483,8 +5507,8 @@ TEST_F(InvolvedNamespacesTest, IncludesLookupSubpipelineNamespaces) { NamespaceString::createNamespaceString_forTest("test", "foo_inner"); const NamespaceString innerResolvedNss = NamespaceString::createNamespaceString_forTest("test", "bar_inner"); - expCtx->setResolvedNamespace(outerLookupNss, {outerResolvedNss, vector{}}); - expCtx->setResolvedNamespace(innerLookupNss, {innerResolvedNss, vector{}}); + expCtx->setResolvedNamespace(outerLookupNss, {outerResolvedNss, std::vector{}}); + expCtx->setResolvedNamespace(innerLookupNss, {innerResolvedNss, std::vector{}}); auto lookupSpec = fromjson( "{$lookup: {" " from: 'foo_outer', " @@ -5512,12 +5536,12 @@ TEST_F(InvolvedNamespacesTest, IncludesGraphLookupSubPipeline) { NamespaceString::createNamespaceString_forTest("test", "foo_inner"); const NamespaceString innerResolvedNss = NamespaceString::createNamespaceString_forTest("test", "bar_inner"); - expCtx->setResolvedNamespace(outerLookupNss, {outerResolvedNss, vector{}}); + expCtx->setResolvedNamespace(outerLookupNss, {outerResolvedNss, std::vector{}}); expCtx->setResolvedNamespace( outerLookupNss, {outerResolvedNss, - vector{fromjson("{$lookup: {from: 'foo_inner', as: 'x', pipeline: []}}")}}); - expCtx->setResolvedNamespace(innerLookupNss, {innerResolvedNss, vector{}}); + std::vector{fromjson("{$lookup: {from: 'foo_inner', as: 'x', pipeline: []}}")}}); + expCtx->setResolvedNamespace(innerLookupNss, {innerResolvedNss, std::vector{}}); auto graphLookupSpec = fromjson( "{$graphLookup: {" " from: 'foo_outer', " @@ -5549,11 +5573,12 @@ TEST_F(InvolvedNamespacesTest, IncludesAllCollectionsWhenResolvingViews) { expCtx->setResolvedNamespace( lookupNss, {resolvedNss, - vector{ + std::vector{ fromjson("{$lookup: {from: 'extra_backer_of_bar', as: 'x', pipeline: []}}")}}); expCtx->setResolvedNamespace(nssIncludedInResolvedView, - {nssIncludedInResolvedView, vector{}}); - expCtx->setResolvedNamespace(normalCollectionNss, {normalCollectionNss, vector{}}); + {nssIncludedInResolvedView, std::vector{}}); + expCtx->setResolvedNamespace(normalCollectionNss, + {normalCollectionNss, std::vector{}}); auto facetSpec = fromjson( "{$facet: {" " pipe_1: [" @@ -5584,8 +5609,6 @@ TEST_F(InvolvedNamespacesTest, IncludesAllCollectionsWhenResolvingViews) { ASSERT(involvedNssSet.find(normalCollectionNss) != involvedNssSet.end()); } -} // namespace - class All : public OldStyleSuiteSpecification { public: All() : OldStyleSuiteSpecification("PipelineOptimizations") {} diff --git a/src/mongo/db/pipeline/plan_executor_pipeline.cpp b/src/mongo/db/pipeline/plan_executor_pipeline.cpp index 75c8753446d5f..8dcbcbb294e3c 100644 --- a/src/mongo/db/pipeline/plan_executor_pipeline.cpp +++ b/src/mongo/db/pipeline/plan_executor_pipeline.cpp @@ -27,17 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/plan_executor_pipeline.h" +#include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/change_stream_start_after_invalidate_info.h" #include "mongo/db/pipeline/change_stream_topology_change_info.h" +#include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/document_source_cursor.h" #include "mongo/db/pipeline/pipeline_d.h" +#include "mongo/db/pipeline/plan_executor_pipeline.h" #include "mongo/db/pipeline/plan_explainer_pipeline.h" #include "mongo/db/pipeline/resume_token.h" #include "mongo/db/repl/speculative_majority_read_info.h" -#include "mongo/util/duration.h" +#include "mongo/util/str.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/plan_executor_pipeline.h b/src/mongo/db/pipeline/plan_executor_pipeline.h index 824c425632732..df70deb5f86b2 100644 --- a/src/mongo/db/pipeline/plan_executor_pipeline.h +++ b/src/mongo/db/pipeline/plan_executor_pipeline.h @@ -29,13 +29,36 @@ #pragma once -#include "mongo/util/duration.h" +#include +#include +#include +#include +#include #include +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/update_result.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/plan_explainer_pipeline.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/restore_context.h" +#include "mongo/db/record_id.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -111,6 +134,9 @@ class PlanExecutorPipeline final : public PlanExecutor { long long executeDelete() override { MONGO_UNREACHABLE; } + long long getDeleteResult() const override { + MONGO_UNREACHABLE; + } BatchedDeleteStats getBatchedDeleteStats() override { MONGO_UNREACHABLE; } diff --git a/src/mongo/db/pipeline/plan_explainer_pipeline.cpp b/src/mongo/db/pipeline/plan_explainer_pipeline.cpp index cb26f9822dd67..6f965041579b9 100644 --- a/src/mongo/db/pipeline/plan_explainer_pipeline.cpp +++ b/src/mongo/db/pipeline/plan_explainer_pipeline.cpp @@ -27,14 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/pipeline/plan_explainer_pipeline.h" +#include +#include +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_cursor.h" -#include "mongo/db/pipeline/plan_executor_pipeline.h" -#include "mongo/db/query/explain.h" +#include "mongo/db/pipeline/plan_explainer_pipeline.h" #include "mongo/db/query/plan_summary_stats_visitor.h" +#include "mongo/util/assert_util.h" namespace mongo { const PlanExplainer::ExplainVersion& PlanExplainerPipeline::getVersion() const { @@ -94,10 +98,4 @@ std::vector PlanExplainerPipeline::getRejectedP // Multi-planning is not supported for aggregation pipelines. return {}; } - -std::vector PlanExplainerPipeline::getCachedPlanStats( - const plan_cache_debug_info::DebugInfo&, ExplainOptions::Verbosity) const { - // Pipelines are not cached, so we should never try to rebuild the stats from a cached entry. - MONGO_UNREACHABLE; -} } // namespace mongo diff --git a/src/mongo/db/pipeline/plan_explainer_pipeline.h b/src/mongo/db/pipeline/plan_explainer_pipeline.h index e6c1a689a16c9..6d3e7af6e384e 100644 --- a/src/mongo/db/pipeline/plan_explainer_pipeline.h +++ b/src/mongo/db/pipeline/plan_explainer_pipeline.h @@ -29,8 +29,14 @@ #pragma once +#include +#include +#include + #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/plan_summary_stats.h" #include "mongo/util/duration.h" namespace mongo { @@ -52,8 +58,6 @@ class PlanExplainerPipeline final : public PlanExplainer { PlanStatsDetails getWinningPlanTrialStats() const final; std::vector getRejectedPlansStats( ExplainOptions::Verbosity verbosity) const final; - std::vector getCachedPlanStats(const plan_cache_debug_info::DebugInfo&, - ExplainOptions::Verbosity) const final; void incrementNReturned() { ++_nReturned; diff --git a/src/mongo/db/pipeline/process_interface/SConscript b/src/mongo/db/pipeline/process_interface/SConscript index 4fc32ca2bad49..b3b465c90e0c9 100644 --- a/src/mongo/db/pipeline/process_interface/SConscript +++ b/src/mongo/db/pipeline/process_interface/SConscript @@ -1,6 +1,7 @@ # -*- mode: python -*- Import('env') +Import('get_option') env = env.Clone() @@ -25,6 +26,8 @@ env.Library( '$BUILD_DIR/mongo/db/operation_time_tracker', '$BUILD_DIR/mongo/db/pipeline/field_path', '$BUILD_DIR/mongo/s/sharding_router_api', + '$BUILD_DIR/mongo/util/diagnostic_info' + if get_option('use-diagnostic-latches') == 'on' else [], '$BUILD_DIR/mongo/util/namespace_string_database_name_util', ], ) @@ -130,7 +133,9 @@ env.CppUnitTest( ], LIBDEPS=[ '$BUILD_DIR/mongo/db/auth/authmocks', + '$BUILD_DIR/mongo/db/concurrency/lock_manager', '$BUILD_DIR/mongo/db/query/query_test_service_context', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/mongo/db/vector_clock_mongod', '$BUILD_DIR/mongo/s/sharding_router_test_fixture', diff --git a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp index d30f7c5c49d30..6e34161948407 100644 --- a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp +++ b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.cpp @@ -30,55 +30,91 @@ #include "mongo/db/pipeline/process_interface/common_mongod_process_interface.h" +#include #include +#include +#include +#include +#include +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/database_holder.h" -#include "mongo/db/catalog/document_validation.h" -#include "mongo/db/catalog/drop_collection.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/index_catalog_entry.h" -#include "mongo/db/catalog/list_indexes.h" -#include "mongo/db/catalog/rename_collection.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/collection_index_usage_tracker.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/flow_control_ticketholder.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" #include "mongo/db/cursor_manager.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_cursor.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" #include "mongo/db/pipeline/pipeline_d.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/collection_index_usage_tracker_decoration.h" #include "mongo/db/query/collection_query_info.h" -#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/explain.h" +#include "mongo/db/query/multiple_collection_accessor.h" +#include "mongo/db/query/plan_cache.h" #include "mongo/db/query/sbe_plan_cache.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/s/query_analysis_writer.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/transaction_coordinator_curop.h" #include "mongo/db/s/transaction_coordinator_worker_curop_repository.h" -#include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/kill_sessions.h" #include "mongo/db/session/session_catalog.h" +#include "mongo/db/shard_id.h" #include "mongo/db/stats/fill_locker_info.h" #include "mongo/db/stats/storage_stats.h" +#include "mongo/db/stats/top.h" #include "mongo/db/storage/backup_cursor_hooks.h" #include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/catalog_helper.h" +#include "mongo/db/timeseries/timeseries_options.h" #include "mongo/db/transaction/transaction_history_iterator.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/transaction/transaction_participant_resource_yielder.h" +#include "mongo/db/views/view.h" #include "mongo/logv2/log.h" -#include "mongo/s/cluster_commands_helpers.h" -#include "mongo/s/query/document_source_merge_cursors.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/analyze_shard_key_common_gen.h" +#include "mongo/s/analyze_shard_key_role.h" #include "mongo/s/query_analysis_sample_tracker.h" #include "mongo/s/query_analysis_sampler_util.h" #include "mongo/util/database_name_util.h" +#include "mongo/util/future.h" #include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -148,7 +184,7 @@ void listDurableCatalog(OperationContext* opCtx, continue; } - NamespaceString ns(NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode( + NamespaceString ns(NamespaceStringUtil::parseFromStringExpectTenantIdInMultitenancyMode( obj.getStringField("ns"))); if (ns.isSystemDotViews()) { systemViewsNamespaces->push_back(ns); @@ -216,14 +252,7 @@ std::vector CommonMongodProcessInterface::getIndexStats(OperationConte auto entry = idxCatalog->getEntry(idx); doc["spec"] = Value(idx->infoObj()); - // Not all indexes in the CollectionIndexUsageTracker may be visible or consistent with our - // snapshot. For this reason, it is unsafe to check `isReady` on the entry, which - // asserts that the index's in-memory state is consistent with our snapshot. - if (!entry->isPresentInMySnapshot(opCtx)) { - continue; - } - - if (!entry->isReadyInMySnapshot(opCtx)) { + if (!entry->isReady()) { doc["building"] = Value(true); } @@ -272,18 +301,17 @@ std::deque CommonMongodProcessInterface::listCatalog(OperationContext* std::vector systemViewsNamespacesFromSecondCatalogRead; listDurableCatalog( opCtx, getShardName(opCtx), &docs, &systemViewsNamespacesFromSecondCatalogRead); - if (!std::equal( - systemViewsNamespaces.cbegin(), - systemViewsNamespaces.cend(), - systemViewsNamespacesFromSecondCatalogRead.cbegin(), - [](const auto& lhs, const auto& rhs) { return *lhs.nss() == *rhs.nss(); })) { + if (!std::equal(systemViewsNamespaces.cbegin(), + systemViewsNamespaces.cend(), + systemViewsNamespacesFromSecondCatalogRead.cbegin(), + [](const auto& lhs, const auto& rhs) { return lhs.nss() == rhs.nss(); })) { continue; } for (const auto& svns : systemViewsNamespaces) { // Hold reference to the catalog for collection lookup without locks to be safe. auto catalog = CollectionCatalog::get(opCtx); - auto collection = catalog->lookupCollectionByNamespace(opCtx, *svns.nss()); + auto collection = catalog->lookupCollectionByNamespace(opCtx, svns.nss()); if (!collection) { continue; } @@ -292,7 +320,7 @@ std::deque CommonMongodProcessInterface::listCatalog(OperationContext* while (auto record = cursor->next()) { BSONObj obj = record->data.releaseToBson(); - NamespaceString ns(NamespaceStringUtil::deserialize((*svns.nss()).tenantId(), + NamespaceString ns(NamespaceStringUtil::deserialize(svns.nss().tenantId(), obj.getStringField("_id"))); NamespaceString viewOnNs(NamespaceStringUtil::parseNamespaceFromDoc( ns.dbName(), obj.getStringField("viewOn"))); @@ -330,7 +358,7 @@ boost::optional CommonMongodProcessInterface::getCatalogEntry( auto cursor = rs->getCursor(opCtx); while (auto record = cursor->next()) { auto obj = record->data.toBson(); - if (NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode( + if (NamespaceStringUtil::parseFromStringExpectTenantIdInMultitenancyMode( obj.getStringField("ns")) != ns) { continue; } @@ -371,12 +399,13 @@ void CommonMongodProcessInterface::appendLatencyStats(OperationContext* opCtx, } Status CommonMongodProcessInterface::appendStorageStats( - OperationContext* opCtx, + const boost::intrusive_ptr& expCtx, const NamespaceString& nss, const StorageStatsSpec& spec, BSONObjBuilder* builder, const boost::optional& filterObj) const { - return appendCollectionStorageStats(opCtx, nss, spec, builder, filterObj); + return appendCollectionStorageStats( + expCtx->opCtx, nss, spec, expCtx->serializationCtxt, builder, filterObj); } Status CommonMongodProcessInterface::appendRecordCount(OperationContext* opCtx, @@ -391,7 +420,7 @@ Status CommonMongodProcessInterface::appendQueryExecStats(OperationContext* opCt AutoGetCollectionForReadCommand collection(opCtx, nss); if (!collection) { return {ErrorCodes::NamespaceNotFound, - str::stream() << "Collection [" << nss.toString() << "] not found."}; + str::stream() << "Collection [" << nss.toStringForErrorMsg() << "] not found."}; } bool redactForQE = @@ -588,8 +617,9 @@ std::vector CommonMongodProcessInterface::getMatchingPlanCacheEntryStat }; AutoGetCollection collection(opCtx, nss, MODE_IS); - uassert( - 50933, str::stream() << "collection '" << nss.toString() << "' does not exist", collection); + uassert(50933, + str::stream() << "collection '" << nss.toStringForErrorMsg() << "' does not exist", + collection); const auto& collQueryInfo = CollectionQueryInfo::get(collection.getCollection()); const auto planCache = collQueryInfo.getPlanCache(); @@ -648,13 +678,13 @@ bool CommonMongodProcessInterface::fieldsHaveSupportingUniqueIndex( } BSONObj CommonMongodProcessInterface::_reportCurrentOpForClient( - OperationContext* opCtx, + const boost::intrusive_ptr& expCtx, Client* client, CurrentOpTruncateMode truncateOps, CurrentOpBacktraceMode backtraceMode) const { BSONObjBuilder builder; - CurOp::reportCurrentOpForClient(opCtx, + CurOp::reportCurrentOpForClient(expCtx, client, (truncateOps == CurrentOpTruncateMode::kTruncateOps), (backtraceMode == CurrentOpBacktraceMode::kIncludeBacktrace), @@ -791,59 +821,6 @@ CommonMongodProcessInterface::ensureFieldsUniqueOrResolveDocumentKey( return {*fieldPaths, targetCollectionPlacementVersion}; } -write_ops::InsertCommandRequest CommonMongodProcessInterface::buildInsertOp( - const NamespaceString& nss, std::vector&& objs, bool bypassDocValidation) { - write_ops::InsertCommandRequest insertOp(nss); - insertOp.setDocuments(std::move(objs)); - insertOp.setWriteCommandRequestBase([&] { - write_ops::WriteCommandRequestBase wcb; - wcb.setOrdered(false); - wcb.setBypassDocumentValidation(bypassDocValidation); - return wcb; - }()); - return insertOp; -} - -write_ops::UpdateCommandRequest CommonMongodProcessInterface::buildUpdateOp( - const boost::intrusive_ptr& expCtx, - const NamespaceString& nss, - BatchedObjects&& batch, - UpsertType upsert, - bool multi) { - write_ops::UpdateCommandRequest updateOp(nss); - updateOp.setUpdates([&] { - std::vector updateEntries; - for (auto&& obj : batch) { - updateEntries.push_back([&] { - write_ops::UpdateOpEntry entry; - auto&& [q, u, c] = obj; - entry.setQ(std::move(q)); - entry.setU(std::move(u)); - entry.setC(std::move(c)); - entry.setUpsert(upsert != UpsertType::kNone); - entry.setUpsertSupplied( - {{entry.getUpsert(), upsert == UpsertType::kInsertSuppliedDoc}}); - entry.setMulti(multi); - return entry; - }()); - } - return updateEntries; - }()); - updateOp.setWriteCommandRequestBase([&] { - write_ops::WriteCommandRequestBase wcb; - wcb.setOrdered(false); - wcb.setBypassDocumentValidation(expCtx->bypassDocumentValidation); - return wcb; - }()); - auto [constants, letParams] = - expCtx->variablesParseState.transitionalCompatibilitySerialize(expCtx->variables); - updateOp.setLegacyRuntimeConstants(std::move(constants)); - if (!letParams.isEmpty()) { - updateOp.setLet(std::move(letParams)); - } - return updateOp; -} - BSONObj CommonMongodProcessInterface::_convertRenameToInternalRename( OperationContext* opCtx, const NamespaceString& sourceNs, @@ -864,6 +841,27 @@ BSONObj CommonMongodProcessInterface::_convertRenameToInternalRename( return newCmd.obj(); } +void CommonMongodProcessInterface::_handleTimeseriesCreateError(const DBException& ex, + OperationContext* opCtx, + const NamespaceString& ns, + TimeseriesOptions userOpts) { + // If we receive a NamespaceExists error for a time-series view that has the same + // specification as the time-series view we wanted to create, we should not throw an + // error. The user is allowed to overwrite an existing time-series collection when + // entering this function. + auto view = CollectionCatalog::get(opCtx)->lookupView(opCtx, ns); + // Confirming the error is NamespaceExists and that there is a time-series view in that + // namespace. + if (ex.code() != ErrorCodes::NamespaceExists || !view || !view->timeseries()) { + throw; + } + // Confirming the time-series options of the existing view are the same as expected. + auto timeseriesOpts = mongo::timeseries::getTimeseriesOptions(opCtx, ns, true); + if (!timeseriesOpts || !mongo::timeseries::optionsAreEqual(timeseriesOpts.value(), userOpts)) { + throw; + } +} + void CommonMongodProcessInterface::writeRecordsToRecordStore( const boost::intrusive_ptr& expCtx, RecordStore* rs, @@ -871,7 +869,7 @@ void CommonMongodProcessInterface::writeRecordsToRecordStore( const std::vector& ts) const { tassert(5643012, "Attempted to write to record store with nullptr", records); assertIgnorePrepareConflictsBehavior(expCtx); - writeConflictRetry(expCtx->opCtx, "MPI::writeRecordsToRecordStore", expCtx->ns.ns(), [&] { + writeConflictRetry(expCtx->opCtx, "MPI::writeRecordsToRecordStore", expCtx->ns, [&] { Lock::GlobalLock lk(expCtx->opCtx, MODE_IS); WriteUnitOfWork wuow(expCtx->opCtx); auto writeResult = rs->insertRecords(expCtx->opCtx, records, ts); @@ -901,7 +899,7 @@ Document CommonMongodProcessInterface::readRecordFromRecordStore( void CommonMongodProcessInterface::deleteRecordFromRecordStore( const boost::intrusive_ptr& expCtx, RecordStore* rs, RecordId rID) const { assertIgnorePrepareConflictsBehavior(expCtx); - writeConflictRetry(expCtx->opCtx, "MPI::deleteFromRecordStore", expCtx->ns.ns(), [&] { + writeConflictRetry(expCtx->opCtx, "MPI::deleteFromRecordStore", expCtx->ns, [&] { Lock::GlobalLock lk(expCtx->opCtx, MODE_IS); WriteUnitOfWork wuow(expCtx->opCtx); rs->deleteRecord(expCtx->opCtx, rID); @@ -912,7 +910,7 @@ void CommonMongodProcessInterface::deleteRecordFromRecordStore( void CommonMongodProcessInterface::truncateRecordStore( const boost::intrusive_ptr& expCtx, RecordStore* rs) const { assertIgnorePrepareConflictsBehavior(expCtx); - writeConflictRetry(expCtx->opCtx, "MPI::truncateRecordStore", expCtx->ns.ns(), [&] { + writeConflictRetry(expCtx->opCtx, "MPI::truncateRecordStore", expCtx->ns, [&] { Lock::GlobalLock lk(expCtx->opCtx, MODE_IS); WriteUnitOfWork wuow(expCtx->opCtx); auto status = rs->truncate(expCtx->opCtx); diff --git a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.h b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.h index 3258b4f07d3b6..a9ec9634a970a 100644 --- a/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/common_mongod_process_interface.h @@ -29,13 +29,54 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/generic_cursor_gen.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops_exec.h" #include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/javascript_execution.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/common_process_interface.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/storage_stats_spec_gen.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/resource_yielder.h" +#include "mongo/db/storage/backup_cursor_state.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/temporary_record_store.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/s/chunk_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -66,7 +107,7 @@ class CommonMongodProcessInterface : public CommonProcessInterface { const NamespaceString& nss, bool includeHistograms, BSONObjBuilder* builder) const final; - Status appendStorageStats(OperationContext* opCtx, + Status appendStorageStats(const boost::intrusive_ptr& expCtx, const NamespaceString& nss, const StorageStatsSpec& spec, BSONObjBuilder* builder, @@ -145,24 +186,7 @@ class CommonMongodProcessInterface : public CommonProcessInterface { const Document& documentKey, MakePipelineOptions opts); - /** - * Builds an ordered insert op on namespace 'nss' and documents to be written 'objs'. - */ - write_ops::InsertCommandRequest buildInsertOp(const NamespaceString& nss, - std::vector&& objs, - bool bypassDocValidation); - - /** - * Builds an ordered update op on namespace 'nss' with update entries contained in 'batch'. - */ - write_ops::UpdateCommandRequest buildUpdateOp( - const boost::intrusive_ptr& expCtx, - const NamespaceString& nss, - BatchedObjects&& batch, - UpsertType upsert, - bool multi); - - BSONObj _reportCurrentOpForClient(OperationContext* opCtx, + BSONObj _reportCurrentOpForClient(const boost::intrusive_ptr& expCtx, Client* client, CurrentOpTruncateMode truncateOps, CurrentOpBacktraceMode backtraceMode) const final; @@ -192,6 +216,11 @@ class CommonMongodProcessInterface : public CommonProcessInterface { const BSONObj& originalCollectionOptions, const std::list& originalIndexes); + void _handleTimeseriesCreateError(const DBException& ex, + OperationContext* opCtx, + const NamespaceString& ns, + TimeseriesOptions userOpts); + private: /** * Looks up the collection default collator for the collection given by 'collectionUUID'. A diff --git a/src/mongo/db/pipeline/process_interface/common_process_interface.cpp b/src/mongo/db/pipeline/process_interface/common_process_interface.cpp index c3a978ca76638..13cf67000bc7e 100644 --- a/src/mongo/db/pipeline/process_interface/common_process_interface.cpp +++ b/src/mongo/db/pipeline/process_interface/common_process_interface.cpp @@ -28,27 +28,45 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/process_interface/common_process_interface.h" - -#include "mongo/bson/mutable/document.h" -#include "mongo/config.h" +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/client.h" #include "mongo/db/curop.h" +#include "mongo/db/generic_cursor_gen.h" +#include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" #include "mongo/db/operation_time_tracker.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/process_interface/common_process_interface.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/service_context.h" -#include "mongo/platform/atomic_word.h" -#include "mongo/platform/mutex.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/tenant_id.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/namespace_string_util.h" #include "mongo/util/net/socket_utils.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" #ifndef MONGO_CONFIG_USE_RAW_LATCHES #include "mongo/util/diagnostic_info.h" @@ -109,7 +127,7 @@ std::vector CommonProcessInterface::getCurrentOps( } // Delegate to the mongoD- or mongoS-specific implementation of _reportCurrentOpForClient. - ops.emplace_back(_reportCurrentOpForClient(opCtx, client, truncateMode, backtraceMode)); + ops.emplace_back(_reportCurrentOpForClient(expCtx, client, truncateMode, backtraceMode)); } // If 'cursorMode' is set to include idle cursors, retrieve them and add them to ops. @@ -120,8 +138,17 @@ std::vector CommonProcessInterface::getCurrentOps( cursorObj.append("type", "idleCursor"); cursorObj.append("host", getHostNameCachedAndPort()); // First, extract fields which need to go at the top level out of the GenericCursor. - auto ns = cursor.getNs(); - cursorObj.append("ns", ns ? NamespaceStringUtil::serialize(*ns) : ""); + if (auto ns = cursor.getNs()) { + tassert(7663401, + str::stream() + << "SerializationContext on the expCtx should not be empty, with ns: " + << ns->ns(), + expCtx->serializationCtxt != SerializationContext::stateDefault()); + cursorObj.append("ns", + NamespaceStringUtil::serialize(*ns, expCtx->serializationCtxt)); + } else + cursorObj.append("ns", ""); + if (auto lsid = cursor.getLsid()) { cursorObj.append("lsid", lsid->toBSON()); } @@ -168,12 +195,6 @@ std::vector CommonProcessInterface::collectDocumentKeyFieldsActingAsR return {"_id"}; } -std::unique_ptr -CommonProcessInterface::getWriteSizeEstimator(OperationContext* opCtx, - const NamespaceString& ns) const { - return std::make_unique(); -} - void CommonProcessInterface::updateClientOperationTime(OperationContext* opCtx) const { // In order to support causal consistency in a replica set or a sharded cluster when reading // with secondary read preference, the secondary must propagate the primary's operation time diff --git a/src/mongo/db/pipeline/process_interface/common_process_interface.h b/src/mongo/db/pipeline/process_interface/common_process_interface.h index b3b5f26468f2e..b8df28b82f5b3 100644 --- a/src/mongo/db/pipeline/process_interface/common_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/common_process_interface.h @@ -29,10 +29,31 @@ #pragma once +#include +#include +#include +#include #include +#include +#include +#include +#include +#include + #include "mongo/bson/bsonobj.h" +#include "mongo/db/client.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -47,10 +68,22 @@ class CommonProcessInterface : public MongoProcessInterface { /** * Estimates the size of writes that will be executed on the current node. Note that this - * does not account for the full size of an update statement. + * does not account for the full size of an update statement because in the case of local + * writes, we will not have to serialize to BSON and are therefore not subject to the 16MB + * BSONObj size limit. */ class LocalWriteSizeEstimator final : public WriteSizeEstimator { public: + int estimateInsertHeaderSize( + const write_ops::InsertCommandRequest& insertReq) const override { + return 0; + } + + int estimateUpdateHeaderSize( + const write_ops::UpdateCommandRequest& insertReq) const override { + return 0; + } + int estimateInsertSizeBytes(const BSONObj& insert) const override { return insert.objsize(); } @@ -70,6 +103,16 @@ class CommonProcessInterface : public MongoProcessInterface { */ class TargetPrimaryWriteSizeEstimator final : public WriteSizeEstimator { public: + int estimateInsertHeaderSize( + const write_ops::InsertCommandRequest& insertReq) const override { + return write_ops::getInsertHeaderSizeEstimate(insertReq); + } + + int estimateUpdateHeaderSize( + const write_ops::UpdateCommandRequest& updateReq) const override { + return write_ops::getUpdateHeaderSizeEstimate(updateReq); + } + int estimateInsertSizeBytes(const BSONObj& insert) const override { return insert.objsize() + write_ops::kWriteCommandBSONArrayPerElementOverheadBytes; } @@ -109,8 +152,6 @@ class CommonProcessInterface : public MongoProcessInterface { virtual std::vector collectDocumentKeyFieldsActingAsRouter( OperationContext*, const NamespaceString&) const override; - std::unique_ptr getWriteSizeEstimator( - OperationContext* opCtx, const NamespaceString& ns) const override; virtual void updateClientOperationTime(OperationContext* opCtx) const final; @@ -133,7 +174,7 @@ class CommonProcessInterface : public MongoProcessInterface { * executed by the supplied client. This method is called by the getCurrentOps method of * CommonProcessInterface to delegate to the mongoS- or mongoD- specific implementation. */ - virtual BSONObj _reportCurrentOpForClient(OperationContext* opCtx, + virtual BSONObj _reportCurrentOpForClient(const boost::intrusive_ptr& expCtx, Client* client, CurrentOpTruncateMode truncateOps, CurrentOpBacktraceMode backtraceMode) const = 0; diff --git a/src/mongo/db/pipeline/process_interface/mongo_process_interface.cpp b/src/mongo/db/pipeline/process_interface/mongo_process_interface.cpp index 46101c78629e8..fd5b3e0572aa7 100644 --- a/src/mongo/db/pipeline/process_interface/mongo_process_interface.cpp +++ b/src/mongo/db/pipeline/process_interface/mongo_process_interface.cpp @@ -27,10 +27,7 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/pipeline/process_interface/mongo_process_interface.h" - #include "mongo/base/shim.h" namespace mongo { diff --git a/src/mongo/db/pipeline/process_interface/mongo_process_interface.h b/src/mongo/db/pipeline/process_interface/mongo_process_interface.h index 5b61f3a279c34..2f81331066ee9 100644 --- a/src/mongo/db/pipeline/process_interface/mongo_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/mongo_process_interface.h @@ -30,40 +30,71 @@ #pragma once #include +#include #include +#include +#include +#include #include #include #include +#include #include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/dbclient_base.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/collection_index_usage_tracker.h" +#include "mongo/db/database_name.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/generic_cursor.h" +#include "mongo/db/generic_cursor_gen.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops_exec.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" #include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" #include "mongo/db/pipeline/storage_stats_spec_gen.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/explain_options.h" #include "mongo/db/record_id.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/resource_yielder.h" #include "mongo/db/storage/backup_cursor_hooks.h" #include "mongo/db/storage/backup_cursor_state.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/temporary_record_store.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/write_concern_options.h" #include "mongo/executor/task_executor.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" #include "mongo/s/shard_version.h" +#include "mongo/util/uuid.h" namespace mongo { class ShardFilterer; class ExpressionContext; class JsExecution; + class Pipeline; class PipelineDeleter; class TransactionHistoryIteratorBase; @@ -84,7 +115,7 @@ class MongoProcessInterface { * 2. write_ops::UpdateModification - either the new document we want to upsert or insert into * the collection (i.e. a 'classic' replacement update), or the pipeline to run to compute * the new document. - * 3. boost::optional - for pipeline-style updated, specifies variables that can be + * 3. boost::optional - for pipeline-style updates, specifies variables that can be * referred to in the pipeline performing the custom update. */ using BatchObject = @@ -112,8 +143,19 @@ class MongoProcessInterface { public: virtual ~WriteSizeEstimator() = default; + /** + * Set of functions which estimate the entire size of a write command except for the array + * of write statements themselves. + */ + virtual int estimateInsertHeaderSize( + const write_ops::InsertCommandRequest& insertReq) const = 0; + virtual int estimateUpdateHeaderSize( + const write_ops::UpdateCommandRequest& updateReq) const = 0; + + /** + * Set of functions which estimate the size of a single write statement. + */ virtual int estimateInsertSizeBytes(const BSONObj& insert) const = 0; - virtual int estimateUpdateSizeBytes(const BatchObject& batchObject, UpsertType type) const = 0; }; @@ -168,29 +210,35 @@ class MongoProcessInterface { virtual void updateClientOperationTime(OperationContext* opCtx) const = 0; /** - * Inserts 'objs' into 'ns' and returns an error Status if the insert fails. If 'targetEpoch' is - * set, throws ErrorCodes::StaleEpoch if the targeted collection does not have the same epoch or - * the epoch changes during the course of the insert. + * Executes 'insertCommand' against 'ns' and returns an error Status if the insert fails. If + * 'targetEpoch' is set, throws ErrorCodes::StaleEpoch if the targeted collection does not have + * the same epoch or the epoch changes during the course of the insert. */ virtual Status insert(const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - std::vector&& objs, + std::unique_ptr insertCommand, const WriteConcernOptions& wc, boost::optional targetEpoch) = 0; + virtual Status insertTimeseries(const boost::intrusive_ptr& expCtx, + const NamespaceString& ns, + std::unique_ptr insertCommand, + const WriteConcernOptions& wc, + boost::optional targetEpoch) = 0; /** - * Updates the documents matching 'queries' with the objects 'updates'. Returns an error Status - * if any of the updates fail, otherwise returns an 'UpdateResult' objects with the details of - * the update operation. If 'targetEpoch' is set, throws ErrorCodes::StaleEpoch if the targeted - * collection does not have the same epoch, or if the epoch changes during the update. + * Executes the updates described by 'updateCommand'. Returns an error Status if any of the + * updates fail, otherwise returns an 'UpdateResult' objects with the details of the update + * operation. If 'targetEpoch' is set, throws ErrorCodes::StaleEpoch if the targeted collection + * does not have the same epoch, or if the epoch changes during the update. */ - virtual StatusWith update(const boost::intrusive_ptr& expCtx, - const NamespaceString& ns, - BatchedObjects&& batch, - const WriteConcernOptions& wc, - UpsertType upsert, - bool multi, - boost::optional targetEpoch) = 0; + virtual StatusWith update( + const boost::intrusive_ptr& expCtx, + const NamespaceString& ns, + std::unique_ptr updateCommand, + const WriteConcernOptions& wc, + UpsertType upsert, + bool multi, + boost::optional targetEpoch) = 0; /** * Returns index usage statistics for each index on collection 'ns' along with additional @@ -235,7 +283,7 @@ class MongoProcessInterface { * specific stats to be appended to parameter 'builder'. By passing 'boost::none' to * 'filterObj', the caller is requesting to append all possible storage stats. */ - virtual Status appendStorageStats(OperationContext* opCtx, + virtual Status appendStorageStats(const boost::intrusive_ptr& expCtx, const NamespaceString& nss, const StorageStatsSpec& spec, BSONObjBuilder* builder, @@ -284,6 +332,13 @@ class MongoProcessInterface { virtual void createCollection(OperationContext* opCtx, const DatabaseName& dbName, const BSONObj& cmdObj) = 0; + /** + * Creates the view backing a time-series collection. + */ + virtual void createTimeseriesView(OperationContext* opCtx, + const NamespaceString& ns, + const BSONObj& cmdObj, + const TimeseriesOptions& userOpts) = 0; /** * Runs createIndexes on the given database for the given index specs. If running on a shardsvr diff --git a/src/mongo/db/pipeline/process_interface/mongod_process_interface_factory.cpp b/src/mongo/db/pipeline/process_interface/mongod_process_interface_factory.cpp index 0405df29bed0b..1fa1ad1e407cc 100644 --- a/src/mongo/db/pipeline/process_interface/mongod_process_interface_factory.cpp +++ b/src/mongo/db/pipeline/process_interface/mongod_process_interface_factory.cpp @@ -27,16 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include #include "mongo/base/shim.h" -#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/pipeline/process_interface/replica_set_node_process_interface.h" #include "mongo/db/pipeline/process_interface/shardsvr_process_interface.h" #include "mongo/db/pipeline/process_interface/standalone_process_interface.h" #include "mongo/db/s/sharding_state.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/s/grid.h" +#include "mongo/transport/session.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/process_interface/mongos_process_interface.cpp b/src/mongo/db/pipeline/process_interface/mongos_process_interface.cpp index 18e2f11ee1b60..0b7fe12265153 100644 --- a/src/mongo/db/pipeline/process_interface/mongos_process_interface.cpp +++ b/src/mongo/db/pipeline/process_interface/mongos_process_interface.cpp @@ -29,29 +29,57 @@ #include "mongo/db/pipeline/process_interface/mongos_process_interface.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/curop.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/document_source_merge.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" #include "mongo/db/pipeline/sharded_agg_helpers.h" +#include "mongo/db/query/collation/collation_spec.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/cursor_response.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/kill_sessions.h" +#include "mongo/db/session/session_catalog.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/redaction.h" #include "mongo/s/analyze_shard_key_role.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/s/query/async_results_merger_params_gen.h" #include "mongo/s/query/cluster_cursor_manager.h" #include "mongo/s/query/document_source_merge_cursors.h" #include "mongo/s/query/establish_cursors.h" -#include "mongo/s/query/router_exec_stage.h" #include "mongo/s/query_analysis_sample_tracker.h" +#include "mongo/s/shard_version.h" #include "mongo/s/stale_shard_version_helpers.h" #include "mongo/s/transaction_router.h" -#include "mongo/util/fail_point.h" +#include "mongo/util/decorable.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -70,7 +98,7 @@ StatusWith getCollectionRoutingInfo( if (swRoutingInfo.isOK() && expCtx->uuid && swRoutingInfo.getValue().cm.isSharded()) { if (!swRoutingInfo.getValue().cm.uuidMatches(*expCtx->uuid)) { return {ErrorCodes::NamespaceNotFound, - str::stream() << "The UUID of collection " << expCtx->ns.ns() + str::stream() << "The UUID of collection " << expCtx->ns.toStringForErrorMsg() << " changed; it may have been dropped and re-created."}; } } @@ -98,7 +126,7 @@ bool supportsUniqueKey(const boost::intrusive_ptr& expCtx, } // namespace -std::unique_ptr +std::unique_ptr MongosProcessInterface::getWriteSizeEstimator(OperationContext* opCtx, const NamespaceString& ns) const { return std::make_unique(); @@ -201,8 +229,15 @@ boost::optional MongosProcessInterface::lookupSingleDocument( // single shard will be targeted here; however, in certain cases where only the _id // is present, we may need to scatter-gather the query to all shards in order to // find the document. - auto requests = getVersionedRequestsForTargetedShards( - expCtx->opCtx, nss, cri, findCmd, filterObj, CollationSpec::kSimpleSpec); + auto requests = + getVersionedRequestsForTargetedShards(expCtx->opCtx, + nss, + cri, + findCmd, + filterObj, + CollationSpec::kSimpleSpec, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); // Dispatch the requests. The 'establishCursors' method conveniently prepares the // result into a vector of cursor responses for us. @@ -252,13 +287,13 @@ boost::optional MongosProcessInterface::lookupSingleDocumentLocally( } BSONObj MongosProcessInterface::_reportCurrentOpForClient( - OperationContext* opCtx, + const boost::intrusive_ptr& expCtx, Client* client, CurrentOpTruncateMode truncateOps, CurrentOpBacktraceMode backtraceMode) const { BSONObjBuilder builder; - CurOp::reportCurrentOpForClient(opCtx, + CurOp::reportCurrentOpForClient(expCtx, client, (truncateOps == CurrentOpTruncateMode::kTruncateOps), (backtraceMode == CurrentOpBacktraceMode::kIncludeBacktrace), diff --git a/src/mongo/db/pipeline/process_interface/mongos_process_interface.h b/src/mongo/db/pipeline/process_interface/mongos_process_interface.h index 205d299e6eb19..e4d36a69bf963 100644 --- a/src/mongo/db/pipeline/process_interface/mongos_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/mongos_process_interface.h @@ -29,9 +29,58 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/generic_cursor_gen.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/common_process_interface.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" +#include "mongo/db/pipeline/storage_stats_spec_gen.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/resource_yielder.h" +#include "mongo/db/storage/backup_cursor_state.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/temporary_record_store.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -72,15 +121,23 @@ class MongosProcessInterface : public CommonProcessInterface { Status insert(const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - std::vector&& objs, + std::unique_ptr insertCommand, const WriteConcernOptions& wc, boost::optional) final { MONGO_UNREACHABLE; } + Status insertTimeseries(const boost::intrusive_ptr& expCtx, + const NamespaceString& ns, + std::unique_ptr insertCommand, + const WriteConcernOptions& wc, + boost::optional targetEpoch) final { + MONGO_UNREACHABLE; + } + StatusWith update(const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - BatchedObjects&& batch, + std::unique_ptr updateCommand, const WriteConcernOptions& wc, UpsertType upsert, bool multi, @@ -117,7 +174,7 @@ class MongosProcessInterface : public CommonProcessInterface { MONGO_UNREACHABLE; } - Status appendStorageStats(OperationContext* opCtx, + Status appendStorageStats(const boost::intrusive_ptr& expCtx, const NamespaceString& nss, const StorageStatsSpec& spec, BSONObjBuilder* builder, @@ -157,6 +214,14 @@ class MongosProcessInterface : public CommonProcessInterface { MONGO_UNREACHABLE; } + + void createTimeseriesView(OperationContext* opCtx, + const NamespaceString& ns, + const BSONObj& cmdObj, + const TimeseriesOptions& userOpts) final { + MONGO_UNREACHABLE; + } + void createIndexesOnEmptyCollection(OperationContext* opCtx, const NamespaceString& ns, const std::vector& indexSpecs) final { @@ -294,7 +359,7 @@ class MongosProcessInterface : public CommonProcessInterface { } protected: - BSONObj _reportCurrentOpForClient(OperationContext* opCtx, + BSONObj _reportCurrentOpForClient(const boost::intrusive_ptr& expCtx, Client* client, CurrentOpTruncateMode truncateOps, CurrentOpBacktraceMode backtraceMode) const final; diff --git a/src/mongo/db/pipeline/process_interface/mongos_process_interface_factory.cpp b/src/mongo/db/pipeline/process_interface/mongos_process_interface_factory.cpp index 88db7a878ae0b..77094c0d80592 100644 --- a/src/mongo/db/pipeline/process_interface/mongos_process_interface_factory.cpp +++ b/src/mongo/db/pipeline/process_interface/mongos_process_interface_factory.cpp @@ -27,9 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/base/shim.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/pipeline/process_interface/mongos_process_interface.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/s/grid.h" diff --git a/src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp b/src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp index a6fe00a49c71e..3ab3c3661ca37 100644 --- a/src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp +++ b/src/mongo/db/pipeline/process_interface/mongos_process_interface_test.cpp @@ -27,9 +27,19 @@ * it in the license file. */ +#include + +#include +#include +#include +#include + #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/process_interface/mongos_process_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp b/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp index cad249ff22782..d1730709fc87a 100644 --- a/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp +++ b/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.cpp @@ -29,17 +29,40 @@ #include "mongo/db/pipeline/process_interface/non_shardsvr_process_interface.h" +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/drop_collection.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/list_indexes.h" #include "mongo/db/catalog/rename_collection.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/ops/single_write_result_gen.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_exec.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/document_source_cursor.h" #include "mongo/db/repl/speculative_majority_read_info.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/util/str.h" namespace mongo { @@ -106,13 +129,13 @@ boost::optional NonShardServerProcessInterface::lookupSingleDocument( return lookedUpDocument; } -Status NonShardServerProcessInterface::insert(const boost::intrusive_ptr& expCtx, - const NamespaceString& ns, - std::vector&& objs, - const WriteConcernOptions& wc, - boost::optional targetEpoch) { - auto writeResults = write_ops_exec::performInserts( - expCtx->opCtx, buildInsertOp(ns, std::move(objs), expCtx->bypassDocumentValidation)); +Status NonShardServerProcessInterface::insert( + const boost::intrusive_ptr& expCtx, + const NamespaceString& ns, + std::unique_ptr insertCommand, + const WriteConcernOptions& wc, + boost::optional targetEpoch) { + auto writeResults = write_ops_exec::performInserts(expCtx->opCtx, *insertCommand); // Need to check each result in the batch since the writes are unordered. for (const auto& result : writeResults.results) { @@ -123,16 +146,32 @@ Status NonShardServerProcessInterface::insert(const boost::intrusive_ptr& expCtx, + const NamespaceString& ns, + std::unique_ptr insertCommand, + const WriteConcernOptions& wc, + boost::optional targetEpoch) { + try { + auto insertReply = write_ops_exec::performTimeseriesWrites(expCtx->opCtx, *insertCommand); + + checkWriteErrors(insertReply.getWriteCommandReplyBase()); + } catch (DBException& ex) { + ex.addContext(str::stream() << "time-series insert failed: " << ns.ns()); + throw; + } + return Status::OK(); +} + StatusWith NonShardServerProcessInterface::update( const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - BatchedObjects&& batch, + std::unique_ptr updateCommand, const WriteConcernOptions& wc, UpsertType upsert, bool multi, boost::optional targetEpoch) { - auto writeResults = write_ops_exec::performUpdates( - expCtx->opCtx, buildUpdateOp(expCtx, ns, std::move(batch), upsert, multi)); + auto writeResults = write_ops_exec::performUpdates(expCtx->opCtx, *updateCommand); // Need to check each result in the batch since the writes are unordered. UpdateResult updateResult; @@ -152,7 +191,7 @@ void NonShardServerProcessInterface::createIndexesOnEmptyCollection( AutoGetCollection autoColl(opCtx, ns, MODE_X); CollectionWriter collection(opCtx, autoColl); writeConflictRetry( - opCtx, "CommonMongodProcessInterface::createIndexesOnEmptyCollection", ns.ns(), [&] { + opCtx, "CommonMongodProcessInterface::createIndexesOnEmptyCollection", ns, [&] { uassert(ErrorCodes::DatabaseDropPending, str::stream() << "The database is in the process of being dropped " << ns.dbName().toStringForErrorMsg(), @@ -161,11 +200,13 @@ void NonShardServerProcessInterface::createIndexesOnEmptyCollection( uassert(ErrorCodes::NamespaceNotFound, str::stream() << "Failed to create indexes for aggregation because collection " "does not exist: " - << ns << ": " << BSON("indexes" << indexSpecs), + << ns.toStringForErrorMsg() << ": " + << BSON("indexes" << indexSpecs), collection.get()); invariant(collection->isEmpty(opCtx), - str::stream() << "Expected empty collection for index creation: " << ns + str::stream() << "Expected empty collection for index creation: " + << ns.toStringForErrorMsg() << ": numRecords: " << collection->numRecords(opCtx) << ": " << BSON("indexes" << indexSpecs)); @@ -201,6 +242,17 @@ void NonShardServerProcessInterface::renameIfOptionsAndIndexesHaveNotChanged( opCtx, sourceNs, targetNs, options, originalIndexes, originalCollectionOptions); } +void NonShardServerProcessInterface::createTimeseriesView(OperationContext* opCtx, + const NamespaceString& ns, + const BSONObj& cmdObj, + const TimeseriesOptions& userOpts) { + try { + uassertStatusOK(mongo::createTimeseries(opCtx, ns, cmdObj)); + } catch (DBException& ex) { + _handleTimeseriesCreateError(ex, opCtx, ns, userOpts); + } +} + void NonShardServerProcessInterface::createCollection(OperationContext* opCtx, const DatabaseName& dbName, const BSONObj& cmdObj) { diff --git a/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.h b/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.h index 53de646093a91..26ef3e13f2b5e 100644 --- a/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/non_shardsvr_process_interface.h @@ -29,11 +29,41 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" #include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/common_mongod_process_interface.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -91,13 +121,19 @@ class NonShardServerProcessInterface : public CommonMongodProcessInterface { Status insert(const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - std::vector&& objs, + std::unique_ptr insertCommand, const WriteConcernOptions& wc, boost::optional targetEpoch) override; + Status insertTimeseries(const boost::intrusive_ptr& expCtx, + const NamespaceString& ns, + std::unique_ptr insertCommand, + const WriteConcernOptions& wc, + boost::optional targetEpoch) override; + StatusWith update(const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - BatchedObjects&& batch, + std::unique_ptr updateCommand, const WriteConcernOptions& wc, UpsertType upsert, bool multi, @@ -116,6 +152,11 @@ class NonShardServerProcessInterface : public CommonMongodProcessInterface { const DatabaseName& dbName, const BSONObj& cmdObj) override; + void createTimeseriesView(OperationContext* opCtx, + const NamespaceString& ns, + const BSONObj& cmdObj, + const TimeseriesOptions& userOpts) override; + void dropCollection(OperationContext* opCtx, const NamespaceString& collection) override; void createIndexesOnEmptyCollection(OperationContext* opCtx, diff --git a/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp b/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp index 16473b19bda16..2180040abaa8a 100644 --- a/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp +++ b/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.cpp @@ -27,23 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/process_interface/replica_set_node_process_interface.h" - -#include "mongo/db/catalog/create_collection.h" -#include "mongo/db/catalog/drop_collection.h" -#include "mongo/db/catalog/rename_collection.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/commands.h" #include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/logical_time.h" #include "mongo/db/operation_time_tracker.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/pipeline/process_interface/common_mongod_process_interface.h" +#include "mongo/db/pipeline/process_interface/replica_set_node_process_interface.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" namespace mongo { @@ -69,26 +84,27 @@ void ReplicaSetNodeProcessInterface::setReplicaSetNodeExecutor( replicaSetNodeExecutor(service) = std::move(executor); } -Status ReplicaSetNodeProcessInterface::insert(const boost::intrusive_ptr& expCtx, - const NamespaceString& ns, - std::vector&& objs, - const WriteConcernOptions& wc, - boost::optional targetEpoch) { +Status ReplicaSetNodeProcessInterface::insert( + const boost::intrusive_ptr& expCtx, + const NamespaceString& ns, + std::unique_ptr insertCommand, + const WriteConcernOptions& wc, + boost::optional targetEpoch) { auto&& opCtx = expCtx->opCtx; if (_canWriteLocally(opCtx, ns)) { - return NonShardServerProcessInterface::insert(expCtx, ns, std::move(objs), wc, targetEpoch); + return NonShardServerProcessInterface::insert( + expCtx, ns, std::move(insertCommand), wc, targetEpoch); } - BatchedCommandRequest insertCommand( - buildInsertOp(ns, std::move(objs), expCtx->bypassDocumentValidation)); + BatchedCommandRequest batchInsertCommand(std::move(insertCommand)); - return _executeCommandOnPrimary(opCtx, ns, std::move(insertCommand.toBSON())).getStatus(); + return _executeCommandOnPrimary(opCtx, ns, batchInsertCommand.toBSON()).getStatus(); } StatusWith ReplicaSetNodeProcessInterface::update( const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - BatchedObjects&& batch, + std::unique_ptr updateCommand, const WriteConcernOptions& wc, UpsertType upsert, bool multi, @@ -96,11 +112,11 @@ StatusWith ReplicaSetNodeProcessInterface:: auto&& opCtx = expCtx->opCtx; if (_canWriteLocally(opCtx, ns)) { return NonShardServerProcessInterface::update( - expCtx, ns, std::move(batch), wc, upsert, multi, targetEpoch); + expCtx, ns, std::move(updateCommand), wc, upsert, multi, targetEpoch); } + BatchedCommandRequest batchUpdateCommand(std::move(updateCommand)); - BatchedCommandRequest updateCommand(buildUpdateOp(expCtx, ns, std::move(batch), upsert, multi)); - auto result = _executeCommandOnPrimary(opCtx, ns, std::move(updateCommand.toBSON())); + auto result = _executeCommandOnPrimary(opCtx, ns, batchUpdateCommand.toBSON()); if (!result.isOK()) { return result.getStatus(); } @@ -124,6 +140,36 @@ void ReplicaSetNodeProcessInterface::createIndexesOnEmptyCollection( uassertStatusOK(_executeCommandOnPrimary(opCtx, ns, cmd.obj())); } +void ReplicaSetNodeProcessInterface::createTimeseriesView(OperationContext* opCtx, + const NamespaceString& ns, + const BSONObj& cmdObj, + const TimeseriesOptions& userOpts) { + if (_canWriteLocally(opCtx, ns)) { + return NonShardServerProcessInterface::createTimeseriesView(opCtx, ns, cmdObj, userOpts); + } + + try { + uassertStatusOK(_executeCommandOnPrimary(opCtx, ns, cmdObj)); + } catch (const DBException& ex) { + _handleTimeseriesCreateError(ex, opCtx, ns, userOpts); + } +} + +Status ReplicaSetNodeProcessInterface::insertTimeseries( + const boost::intrusive_ptr& expCtx, + const NamespaceString& ns, + std::unique_ptr insertCommand, + const WriteConcernOptions& wc, + boost::optional targetEpoch) { + if (_canWriteLocally(expCtx->opCtx, ns)) { + return NonShardServerProcessInterface::insertTimeseries( + expCtx, ns, std::move(insertCommand), wc, targetEpoch); + } else { + return ReplicaSetNodeProcessInterface::insert( + expCtx, ns, std::move(insertCommand), wc, targetEpoch); + } +} + void ReplicaSetNodeProcessInterface::renameIfOptionsAndIndexesHaveNotChanged( OperationContext* opCtx, const NamespaceString& sourceNs, diff --git a/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.h b/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.h index 02c183d43a066..aaaa0e68d8167 100644 --- a/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/replica_set_node_process_interface.h @@ -29,9 +29,31 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/common_process_interface.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/pipeline/process_interface/non_shardsvr_process_interface.h" +#include "mongo/db/service_context.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor.h" namespace mongo { @@ -68,12 +90,13 @@ class ReplicaSetNodeProcessInterface final : public NonShardServerProcessInterfa Status insert(const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - std::vector&& objs, + std::unique_ptr insertCommand, const WriteConcernOptions& wc, boost::optional targetEpoch) final; + StatusWith update(const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - BatchedObjects&& batch, + std::unique_ptr updateCommand, const WriteConcernOptions& wc, UpsertType upsert, bool multi, @@ -93,6 +116,16 @@ class ReplicaSetNodeProcessInterface final : public NonShardServerProcessInterfa void createIndexesOnEmptyCollection(OperationContext* opCtx, const NamespaceString& ns, const std::vector& indexSpecs); + void createTimeseriesView(OperationContext* opCtx, + const NamespaceString& ns, + const BSONObj& cmdObj, + const TimeseriesOptions& userOpts); + + Status insertTimeseries(const boost::intrusive_ptr& expCtx, + const NamespaceString& ns, + std::unique_ptr insertCommand, + const WriteConcernOptions& wc, + boost::optional targetEpoch); private: /** diff --git a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp index d177df795cf16..40f8f1530facd 100644 --- a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp +++ b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.cpp @@ -29,27 +29,57 @@ #include "mongo/db/pipeline/process_interface/shardsvr_process_interface.h" +#include +#include +#include #include - -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/exec/shard_filterer_impl.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/document_source_cursor.h" -#include "mongo/db/pipeline/document_source_internal_shard_filter.h" #include "mongo/db/pipeline/document_source_merge.h" #include "mongo/db/pipeline/sharded_agg_helpers.h" -#include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/cluster_write.h" +#include "mongo/s/grid.h" +#include "mongo/s/index_version.h" #include "mongo/s/query/document_source_merge_cursors.h" #include "mongo/s/router_role.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" -#include "mongo/s/stale_shard_version_helpers.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/s/stale_exception.h" +#include "mongo/s/write_ops/batch_write_exec.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/read_through_cache.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -102,8 +132,8 @@ void ShardServerProcessInterface::checkRoutingInfoEpochOrThrow( }(); uassert(StaleEpochInfo(nss, receivedVersion, wantedVersion), - str::stream() << "Could not act as router for " << nss.ns() << ", received " - << receivedVersion.toString() << ", but found " + str::stream() << "Could not act as router for " << nss.toStringForErrorMsg() + << ", received " << receivedVersion.toString() << ", but found " << wantedVersion.toString(), wantedVersion.placementVersion().isSameCollection(receivedVersion.placementVersion())); } @@ -123,20 +153,21 @@ boost::optional ShardServerProcessInterface::lookupSingleDocument( return doLookupSingleDocument(expCtx, nss, collectionUUID, documentKey, std::move(opts)); } -Status ShardServerProcessInterface::insert(const boost::intrusive_ptr& expCtx, - const NamespaceString& ns, - std::vector&& objs, - const WriteConcernOptions& wc, - boost::optional targetEpoch) { +Status ShardServerProcessInterface::insert( + const boost::intrusive_ptr& expCtx, + const NamespaceString& ns, + std::unique_ptr insertCommand, + const WriteConcernOptions& wc, + boost::optional targetEpoch) { BatchedCommandResponse response; BatchWriteExecStats stats; - BatchedCommandRequest insertCommand( - buildInsertOp(ns, std::move(objs), expCtx->bypassDocumentValidation)); + BatchedCommandRequest batchInsertCommand(std::move(insertCommand)); - insertCommand.setWriteConcern(wc.toBSON()); + batchInsertCommand.setWriteConcern(wc.toBSON()); - cluster::write(expCtx->opCtx, insertCommand, &stats, &response, targetEpoch); + cluster::write( + expCtx->opCtx, batchInsertCommand, nullptr /* nss */, &stats, &response, targetEpoch); return response.toStatus(); } @@ -144,7 +175,7 @@ Status ShardServerProcessInterface::insert(const boost::intrusive_ptr ShardServerProcessInterface::update( const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - BatchedObjects&& batch, + std::unique_ptr updateCommand, const WriteConcernOptions& wc, UpsertType upsert, bool multi, @@ -152,11 +183,11 @@ StatusWith ShardServerProcessInterface::upd BatchedCommandResponse response; BatchWriteExecStats stats; - BatchedCommandRequest updateCommand(buildUpdateOp(expCtx, ns, std::move(batch), upsert, multi)); - - updateCommand.setWriteConcern(wc.toBSON()); + BatchedCommandRequest batchUpdateCommand(std::move(updateCommand)); + batchUpdateCommand.setWriteConcern(wc.toBSON()); - cluster::write(expCtx->opCtx, updateCommand, &stats, &response, targetEpoch); + cluster::write( + expCtx->opCtx, batchUpdateCommand, nullptr /* nss */, &stats, &response, targetEpoch); if (auto status = response.toStatus(); status != Status::OK()) { return status; @@ -262,8 +293,8 @@ BSONObj ShardServerProcessInterface::getCollectionOptions(OperationContext* opCt } tassert(5983900, - str::stream() << "Expected at most one collection with the name " << nss << ": " - << resultCollections.docs.size(), + str::stream() << "Expected at most one collection with the name " + << nss.toStringForErrorMsg() << ": " << resultCollections.docs.size(), resultCollections.docs.size() <= 1); } @@ -297,8 +328,8 @@ std::list ShardServerProcessInterface::getIndexSpecs(OperationContext* void ShardServerProcessInterface::createCollection(OperationContext* opCtx, const DatabaseName& dbName, const BSONObj& cmdObj) { - auto cachedDbInfo = uassertStatusOK( - Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName.toStringWithTenantId())); + auto cachedDbInfo = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase( + opCtx, DatabaseNameUtil::serializeForCatalog(dbName))); BSONObjBuilder finalCmdBuilder(cmdObj); finalCmdBuilder.append(WriteConcernOptions::kWriteConcernField, opCtx->getWriteConcern().toBSON()); @@ -306,7 +337,7 @@ void ShardServerProcessInterface::createCollection(OperationContext* opCtx, // TODO SERVER-67411 change executeCommandAgainstDatabasePrimary to take in DatabaseName auto response = executeCommandAgainstDatabasePrimary(opCtx, - dbName.toStringWithTenantId(), + DatabaseNameUtil::serialize(dbName), std::move(cachedDbInfo), finalCmdObj, ReadPreferenceSetting(ReadPreference::PrimaryOnly), @@ -388,6 +419,27 @@ void ShardServerProcessInterface::dropCollection(OperationContext* opCtx, << "write concern failed while running command " << cmdObj); } +void ShardServerProcessInterface::createTimeseriesView(OperationContext* opCtx, + const NamespaceString& ns, + const BSONObj& cmdObj, + const TimeseriesOptions& userOpts) { + try { + ShardServerProcessInterface::createCollection(opCtx, ns.dbName(), cmdObj); + } catch (const DBException& ex) { + _handleTimeseriesCreateError(ex, opCtx, ns, userOpts); + } +} + +Status ShardServerProcessInterface::insertTimeseries( + const boost::intrusive_ptr& expCtx, + const NamespaceString& ns, + std::unique_ptr insertCommand, + const WriteConcernOptions& wc, + boost::optional targetEpoch) { + return ShardServerProcessInterface::insert( + expCtx, ns, std::move(insertCommand), wc, targetEpoch); +} + std::unique_ptr ShardServerProcessInterface::attachCursorSourceToPipeline(Pipeline* ownedPipeline, ShardTargetingPolicy shardTargetingPolicy, @@ -435,7 +487,7 @@ ShardServerProcessInterface::expectUnshardedCollectionInScope( void ShardServerProcessInterface::checkOnPrimaryShardForDb(OperationContext* opCtx, const NamespaceString& nss) { - DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, nss.db()); + DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, nss.dbName()); } } // namespace mongo diff --git a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.h b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.h index 94535d9222970..ce3250e13cd80 100644 --- a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface.h @@ -29,8 +29,37 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/common_mongod_process_interface.h" +#include "mongo/db/pipeline/process_interface/common_process_interface.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -53,6 +82,11 @@ class ShardServerProcessInterface final : public CommonMongodProcessInterface { const NamespaceString& nss, ChunkVersion targetCollectionPlacementVersion) const final; + std::unique_ptr getWriteSizeEstimator( + OperationContext* opCtx, const NamespaceString& ns) const final { + return std::make_unique(); + } + std::vector collectDocumentKeyFieldsActingAsRouter( OperationContext*, const NamespaceString&) const final { // We don't expect anyone to use this method on the shard itself (yet). This is currently @@ -71,23 +105,15 @@ class ShardServerProcessInterface final : public CommonMongodProcessInterface { const Document& documentKey, boost::optional readConcern) final; - /** - * Inserts the documents 'objs' into the namespace 'ns' using the ClusterWriter for locking, - * routing, stale config handling, etc. - */ Status insert(const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - std::vector&& objs, + std::unique_ptr insertCommand, const WriteConcernOptions& wc, boost::optional targetEpoch) final; - /** - * Replaces the documents matching 'queries' with 'updates' using the ClusterWriter for locking, - * routing, stale config handling, etc. - */ StatusWith update(const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - BatchedObjects&& batch, + std::unique_ptr updateCommand, const WriteConcernOptions& wc, UpsertType upsert, bool multi, @@ -141,6 +167,17 @@ class ShardServerProcessInterface final : public CommonMongodProcessInterface { const boost::optional& dbVersion) override; void checkOnPrimaryShardForDb(OperationContext* opCtx, const NamespaceString& nss) final; + + void createTimeseriesView(OperationContext* opCtx, + const NamespaceString& ns, + const BSONObj& cmdObj, + const TimeseriesOptions& userOpts) final; + + Status insertTimeseries(const boost::intrusive_ptr& expCtx, + const NamespaceString& ns, + std::unique_ptr insertCommand, + const WriteConcernOptions& wc, + boost::optional targetEpoch) final; }; } // namespace mongo diff --git a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface_test.cpp b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface_test.cpp index 3a457d8112de9..73bbb59ccfbe9 100644 --- a/src/mongo/db/pipeline/process_interface/shardsvr_process_interface_test.cpp +++ b/src/mongo/db/pipeline/process_interface/shardsvr_process_interface_test.cpp @@ -27,12 +27,30 @@ * it in the license file. */ -#include "mongo/db/concurrency/lock_state.h" +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/concurrency/locker_impl.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_out.h" #include "mongo/db/pipeline/document_source_queue.h" #include "mongo/db/pipeline/process_interface/shardsvr_process_interface.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/query/sharded_agg_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -81,7 +99,7 @@ TEST_F(ShardedProcessInterfaceTest, TestInsert) { const BSONObj indexBSON = BSON("_id" << 1); const BSONObj listIndexesResponse = BSON("v" << 1 << "key" << indexBSON << "name" << "_id_" - << "ns" << kOutNss.toString()); + << "ns" << kOutNss.toString_forTest()); onCommand([&](const executor::RemoteCommandRequest& request) { return CursorResponse(kTestAggregateNss, CursorId{0}, {listIndexesResponse}) .toBSON(CursorResponse::ResponseType::InitialResponse); diff --git a/src/mongo/db/pipeline/process_interface/standalone_process_interface.h b/src/mongo/db/pipeline/process_interface/standalone_process_interface.h index aceff8e692846..0f8b86bc1830d 100644 --- a/src/mongo/db/pipeline/process_interface/standalone_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/standalone_process_interface.h @@ -29,7 +29,15 @@ #pragma once +#include +#include + +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/common_process_interface.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/pipeline/process_interface/non_shardsvr_process_interface.h" +#include "mongo/executor/task_executor.h" namespace mongo { @@ -41,6 +49,11 @@ class StandaloneProcessInterface : public NonShardServerProcessInterface { StandaloneProcessInterface(std::shared_ptr exec) : NonShardServerProcessInterface(std::move(exec)) {} + std::unique_ptr getWriteSizeEstimator( + OperationContext* opCtx, const NamespaceString& ns) const final { + return std::make_unique(); + } + virtual ~StandaloneProcessInterface() = default; }; diff --git a/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp b/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp index 51e84b57ecfb4..d88b51ec32e43 100644 --- a/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp +++ b/src/mongo/db/pipeline/process_interface/standalone_process_interface_test.cpp @@ -27,9 +27,27 @@ * it in the license file. */ +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/process_interface/standalone_process_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/chunk_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/process_interface/stub_lookup_single_document_process_interface.cpp b/src/mongo/db/pipeline/process_interface/stub_lookup_single_document_process_interface.cpp index 6ee4e6ef393a6..4abf6fcf8ed75 100644 --- a/src/mongo/db/pipeline/process_interface/stub_lookup_single_document_process_interface.cpp +++ b/src/mongo/db/pipeline/process_interface/stub_lookup_single_document_process_interface.cpp @@ -27,15 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/process_interface/stub_lookup_single_document_process_interface.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/stub_lookup_single_document_process_interface.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/process_interface/stub_lookup_single_document_process_interface.h b/src/mongo/db/pipeline/process_interface/stub_lookup_single_document_process_interface.h index 4afb6724fba01..19525fc12d407 100644 --- a/src/mongo/db/pipeline/process_interface/stub_lookup_single_document_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/stub_lookup_single_document_process_interface.h @@ -30,14 +30,29 @@ #pragma once #include +#include +#include +#include +#include +#include #include +#include +#include #include +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" +#include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/pipeline/process_interface/stub_mongo_process_interface.h b/src/mongo/db/pipeline/process_interface/stub_mongo_process_interface.h index 7c11f25e699a1..453be0b672661 100644 --- a/src/mongo/db/pipeline/process_interface/stub_mongo_process_interface.h +++ b/src/mongo/db/pipeline/process_interface/stub_mongo_process_interface.h @@ -55,6 +55,16 @@ class StubMongoProcessInterface : public MongoProcessInterface { class StubWriteSizeEstimator final : public WriteSizeEstimator { public: + int estimateInsertHeaderSize( + const write_ops::InsertCommandRequest& insertReq) const override { + return 0; + } + + int estimateUpdateHeaderSize( + const write_ops::UpdateCommandRequest& insertReq) const override { + return 0; + } + int estimateInsertSizeBytes(const BSONObj& insert) const override { MONGO_UNREACHABLE; } @@ -64,6 +74,7 @@ class StubMongoProcessInterface : public MongoProcessInterface { MONGO_UNREACHABLE; } }; + std::unique_ptr getWriteSizeEstimator( OperationContext* opCtx, const NamespaceString& ns) const override { return std::make_unique(); @@ -77,15 +88,23 @@ class StubMongoProcessInterface : public MongoProcessInterface { Status insert(const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - std::vector&& objs, + std::unique_ptr insertCommand, const WriteConcernOptions& wc, boost::optional) override { MONGO_UNREACHABLE; } + Status insertTimeseries(const boost::intrusive_ptr& expCtx, + const NamespaceString& ns, + std::unique_ptr insertCommand, + const WriteConcernOptions& wc, + boost::optional targetEpoch) override { + MONGO_UNREACHABLE; + } + StatusWith update(const boost::intrusive_ptr& expCtx, const NamespaceString& ns, - BatchedObjects&& batch, + std::unique_ptr updateCommand, const WriteConcernOptions& wc, UpsertType upsert, bool multi, @@ -110,6 +129,13 @@ class StubMongoProcessInterface : public MongoProcessInterface { MONGO_UNREACHABLE; } + void createTimeseriesView(OperationContext* opCtx, + const NamespaceString& ns, + const BSONObj& cmdObj, + const TimeseriesOptions& userOpts) final { + MONGO_UNREACHABLE; + } + boost::optional getCatalogEntry(OperationContext* opCtx, const NamespaceString& ns) const override { MONGO_UNREACHABLE; @@ -122,7 +148,7 @@ class StubMongoProcessInterface : public MongoProcessInterface { MONGO_UNREACHABLE; } - Status appendStorageStats(OperationContext* opCtx, + Status appendStorageStats(const boost::intrusive_ptr& expCtx, const NamespaceString& nss, const StorageStatsSpec& spec, BSONObjBuilder* builder, diff --git a/src/mongo/db/pipeline/resume_token.cpp b/src/mongo/db/pipeline/resume_token.cpp index aa4c8aa130be2..850f8c1bf07db 100644 --- a/src/mongo/db/pipeline/resume_token.cpp +++ b/src/mongo/db/pipeline/resume_token.cpp @@ -27,19 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/pipeline/resume_token.h" - -#include +#include +#include -#include "mongo/bson/bsonmisc.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/ordering.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/pipeline/change_stream_helpers_legacy.h" +#include "mongo/db/pipeline/resume_token.h" #include "mongo/db/storage/key_string.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" #include "mongo/util/hex.h" #include "mongo/util/optional_util.h" +#include "mongo/util/str.h" namespace mongo { constexpr StringData ResumeToken::kDataFieldName; @@ -165,7 +175,7 @@ ResumeToken::ResumeToken(const ResumeTokenData& data) { } auto keyObj = builder.obj(); - KeyString::Builder encodedToken(KeyString::Version::V1, keyObj, Ordering::make(BSONObj())); + key_string::Builder encodedToken(key_string::Version::V1, keyObj, Ordering::make(BSONObj())); _hexKeyString = hexblob::encode(encodedToken.getBuffer(), encodedToken.getSize()); const auto& typeBits = encodedToken.getTypeBits(); if (!typeBits.isAllZeros()) @@ -185,7 +195,7 @@ bool ResumeToken::operator==(const ResumeToken& other) const { } ResumeTokenData ResumeToken::getData() const { - KeyString::TypeBits typeBits(KeyString::Version::V1); + key_string::TypeBits typeBits(key_string::Version::V1); if (!_typeBits.missing()) { BSONBinData typeBitsBinData = _typeBits.getBinData(); BufReader typeBitsReader(typeBitsBinData.data, typeBitsBinData.length); @@ -200,10 +210,10 @@ ResumeTokenData ResumeToken::getData() const { hexblob::decode(_hexKeyString, &hexDecodeBuf); BSONBinData keyStringBinData = BSONBinData(hexDecodeBuf.buf(), hexDecodeBuf.len(), BinDataType::BinDataGeneral); - auto internalBson = KeyString::toBsonSafe(static_cast(keyStringBinData.data), - keyStringBinData.length, - Ordering::make(BSONObj()), - typeBits); + auto internalBson = key_string::toBsonSafe(static_cast(keyStringBinData.data), + keyStringBinData.length, + Ordering::make(BSONObj()), + typeBits); BSONObjIterator i(internalBson); ResumeTokenData result; diff --git a/src/mongo/db/pipeline/resume_token.h b/src/mongo/db/pipeline/resume_token.h index 1c3d88e67ed0f..e763d519d53ea 100644 --- a/src/mongo/db/pipeline/resume_token.h +++ b/src/mongo/db/pipeline/resume_token.h @@ -29,7 +29,13 @@ #pragma once +#include #include +#include +#include +#include +#include +#include #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" diff --git a/src/mongo/db/pipeline/resume_token_test.cpp b/src/mongo/db/pipeline/resume_token_test.cpp index 744e9223d7428..9c21fbcf27b3e 100644 --- a/src/mongo/db/pipeline/resume_token_test.cpp +++ b/src/mongo/db/pipeline/resume_token_test.cpp @@ -30,12 +30,21 @@ #include "mongo/db/pipeline/resume_token.h" -#include -#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/db/exec/document_value/document.h" -#include "mongo/db/pipeline/document_source_change_stream.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/hex.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/pipeline/sampling_based_initial_split_policy_test.cpp b/src/mongo/db/pipeline/sampling_based_initial_split_policy_test.cpp index b1d2c223caf2a..90676a3e6a8eb 100644 --- a/src/mongo/db/pipeline/sampling_based_initial_split_policy_test.cpp +++ b/src/mongo/db/pipeline/sampling_based_initial_split_policy_test.cpp @@ -27,14 +27,36 @@ * it in the license file. */ -#include "mongo/db/catalog/collection_catalog.h" +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_mock.h" -#include "mongo/db/pipeline/sharded_agg_helpers.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/s/config/initial_split_policy.h" +#include "mongo/db/shard_id.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" #include "mongo/s/query/sharded_agg_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/pipeline/search_helper.cpp b/src/mongo/db/pipeline/search_helper.cpp index e5bfb6764bc28..9409b23fb4ce7 100644 --- a/src/mongo/db/pipeline/search_helper.cpp +++ b/src/mongo/db/pipeline/search_helper.cpp @@ -28,9 +28,18 @@ */ #include "mongo/db/pipeline/search_helper.h" -#include "mongo/db/pipeline/dependencies.h" + +#include +#include +#include +#include +#include + +#include + #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/variables.h" +#include "mongo/util/assert_util.h" namespace mongo { ServiceContext::Decoration> getSearchHelpers = diff --git a/src/mongo/db/pipeline/search_helper.h b/src/mongo/db/pipeline/search_helper.h index 62d0ecb6c152f..1653f36aaaa03 100644 --- a/src/mongo/db/pipeline/search_helper.h +++ b/src/mongo/db/pipeline/search_helper.h @@ -29,7 +29,19 @@ #pragma once +#include + +#include +#include +#include + +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/service_context.h" +#include "mongo/util/decorable.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/pipeline/semantic_analysis.cpp b/src/mongo/db/pipeline/semantic_analysis.cpp index 04aa2de4541a6..033539fd996d5 100644 --- a/src/mongo/db/pipeline/semantic_analysis.cpp +++ b/src/mongo/db/pipeline/semantic_analysis.cpp @@ -27,13 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/matcher/expression_algo.h" #include "mongo/db/pipeline/document_source_replace_root.h" +#include "mongo/db/pipeline/document_source_single_document_transformation.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/semantic_analysis.h" +#include "mongo/db/pipeline/transformer_interface.h" +#include "mongo/util/assert_util.h" namespace mongo::semantic_analysis { diff --git a/src/mongo/db/pipeline/semantic_analysis.h b/src/mongo/db/pipeline/semantic_analysis.h index befbddac92344..cd6ce98853b68 100644 --- a/src/mongo/db/pipeline/semantic_analysis.h +++ b/src/mongo/db/pipeline/semantic_analysis.h @@ -29,12 +29,18 @@ #pragma once +#include #include +#include +#include #include #include +#include +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/util/string_map.h" namespace mongo::semantic_analysis { diff --git a/src/mongo/db/pipeline/semantic_analysis_test.cpp b/src/mongo/db/pipeline/semantic_analysis_test.cpp index e86ad2cf2a809..72f0966029b0f 100644 --- a/src/mongo/db/pipeline/semantic_analysis_test.cpp +++ b/src/mongo/db/pipeline/semantic_analysis_test.cpp @@ -27,17 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/bson/bsonmisc.h" +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_test_optimizations.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/semantic_analysis.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/sequential_document_cache.cpp b/src/mongo/db/pipeline/sequential_document_cache.cpp index 691d3c54ef0a1..2aefe6f83f65a 100644 --- a/src/mongo/db/pipeline/sequential_document_cache.cpp +++ b/src/mongo/db/pipeline/sequential_document_cache.cpp @@ -27,12 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include #include "mongo/db/pipeline/sequential_document_cache.h" - -#include "mongo/base/error_codes.h" -#include "mongo/base/status.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/pipeline/sequential_document_cache.h b/src/mongo/db/pipeline/sequential_document_cache.h index 99d0f97694f13..8705d78b8f312 100644 --- a/src/mongo/db/pipeline/sequential_document_cache.h +++ b/src/mongo/db/pipeline/sequential_document_cache.h @@ -31,11 +31,11 @@ #include #include +#include #include -#include "mongo/db/exec/document_value/document.h" - #include "mongo/base/status.h" +#include "mongo/db/exec/document_value/document.h" namespace mongo { diff --git a/src/mongo/db/pipeline/sequential_document_cache_test.cpp b/src/mongo/db/pipeline/sequential_document_cache_test.cpp index 2fa47818b0f10..c3f4f8d3ad370 100644 --- a/src/mongo/db/pipeline/sequential_document_cache_test.cpp +++ b/src/mongo/db/pipeline/sequential_document_cache_test.cpp @@ -27,13 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/sequential_document_cache.h" +#include +#include +#include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/pipeline/sequential_document_cache.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.cpp b/src/mongo/db/pipeline/sharded_agg_helpers.cpp index 8e5fb369e796f..5344a404dbeb8 100644 --- a/src/mongo/db/pipeline/sharded_agg_helpers.cpp +++ b/src/mongo/db/pipeline/sharded_agg_helpers.cpp @@ -27,21 +27,54 @@ * it in the license file. */ - #include "mongo/db/pipeline/sharded_agg_helpers.h" -#include "mongo/db/catalog_shard_feature_flag_gen.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/client/read_preference.h" +#include "mongo/crypto/sha256_block.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/commands.h" #include "mongo/db/curop.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/logical_time.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_change_stream.h" -#include "mongo/db/pipeline/document_source_change_stream_handle_topology_change.h" +#include "mongo/db/pipeline/document_source_change_stream_gen.h" #include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/document_source_limit.h" -#include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_merge.h" -#include "mongo/db/pipeline/document_source_out.h" #include "mongo/db/pipeline/document_source_project.h" #include "mongo/db/pipeline/document_source_sequential_document_cache.h" #include "mongo/db/pipeline/document_source_set_variable_from_subpipeline.h" @@ -49,23 +82,57 @@ #include "mongo/db/pipeline/document_source_sort.h" #include "mongo/db/pipeline/document_source_unwind.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" -#include "mongo/db/pipeline/search_helper.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/pipeline/semantic_analysis.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/collation/collation_spec.h" +#include "mongo/db/query/cursor_response.h" #include "mongo/db/query/cursor_response_gen.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/vector_clock.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/analyze_shard_key_common_gen.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/collection_uuid_mismatch.h" -#include "mongo/s/is_mongos.h" +#include "mongo/s/database_version.h" +#include "mongo/s/grid.h" +#include "mongo/s/query/async_results_merger_params_gen.h" #include "mongo/s/query/cluster_query_knobs_gen.h" #include "mongo/s/query/document_source_merge_cursors.h" #include "mongo/s/query/establish_cursors.h" #include "mongo/s/query_analysis_sampler_util.h" #include "mongo/s/router_role.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" #include "mongo/s/transaction_router.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -118,13 +185,6 @@ RemoteCursor openChangeStreamNewShardMonitor(const boost::intrusive_ptrchangeStreamTokenVersion == 1) { - // A request for v1 resume tokens on mongos should only be allowed in test mode. - tassert(6497000, "Invalid request for v1 resume tokens", getTestCommandsEnabled()); - aggReq.setGenerateV2ResumeTokens(false); - } - SimpleCursorOptions cursor; cursor.setBatchSize(0); aggReq.setCursor(cursor); @@ -164,11 +224,12 @@ BSONObj genericTransformForShards(MutableDocument&& cmdForShards, } if (expCtx->opCtx->getTxnNumber()) { - invariant(cmdForShards.peek()[OperationSessionInfo::kTxnNumberFieldName].missing(), - str::stream() << "Command for shards unexpectedly had the " - << OperationSessionInfo::kTxnNumberFieldName - << " field set: " << cmdForShards.peek().toString()); - cmdForShards[OperationSessionInfo::kTxnNumberFieldName] = + invariant( + cmdForShards.peek()[OperationSessionInfoFromClient::kTxnNumberFieldName].missing(), + str::stream() << "Command for shards unexpectedly had the " + << OperationSessionInfoFromClient::kTxnNumberFieldName + << " field set: " << cmdForShards.peek().toString()); + cmdForShards[OperationSessionInfoFromClient::kTxnNumberFieldName] = Value(static_cast(*expCtx->opCtx->getTxnNumber())); } @@ -189,6 +250,7 @@ std::vector establishShardCursors( const BSONObj& cmdObj, const boost::optional& sampleId, const ReadPreferenceSetting& readPref, + AsyncRequestsSender::ShardHostMap designatedHostsMap, bool targetEveryShardServer) { LOGV2_DEBUG(20904, 1, @@ -201,9 +263,8 @@ std::vector establishShardCursors( invariant(cri || mustRunOnAllShards); if (targetEveryShardServer) { - uassert(7355703, - "Cannot target all hosts if the pipeline is not run on all shards.", - mustRunOnAllShards); + // If we are running on all shard servers we should never designate a particular server. + invariant(designatedHostsMap.empty()); if (MONGO_unlikely(shardedAggregateHangBeforeEstablishingShardCursors.shouldFail())) { LOGV2( 7355704, @@ -268,7 +329,9 @@ std::vector establishShardCursors( readPref, requests, false /* do not allow partial results */, - getDesiredRetryPolicy(opCtx)); + getDesiredRetryPolicy(opCtx), + {} /* providedOpKeys */, + designatedHostsMap); } std::set getTargetedShards(boost::intrusive_ptr expCtx, @@ -1045,7 +1108,8 @@ DispatchShardPipelineResults dispatchShardPipeline( std::unique_ptr pipeline, boost::optional explain, ShardTargetingPolicy shardTargetingPolicy, - boost::optional readConcern) { + boost::optional readConcern, + AsyncRequestsSender::ShardHostMap designatedHostsMap) { auto expCtx = pipeline->getContext(); // The process is as follows: @@ -1192,7 +1256,7 @@ DispatchShardPipelineResults dispatchShardPipeline( // shards, and should participate in the shard version protocol. invariant(executionNsRoutingInfo); shardResults = - scatterGatherVersionedTargetByRoutingTable(opCtx, + scatterGatherVersionedTargetByRoutingTable(expCtx, expCtx->ns.db(), expCtx->ns, *executionNsRoutingInfo, @@ -1213,6 +1277,7 @@ DispatchShardPipelineResults dispatchShardPipeline( targetedCommand, targetedSampleId, ReadPreferenceSetting::get(opCtx), + designatedHostsMap, targetEveryShardServer); } catch (const ExceptionFor& e) { @@ -1272,24 +1337,21 @@ AsyncResultsMergerParams buildArmParams(boost::intrusive_ptr armParams.setTailableMode(expCtx->tailableMode); armParams.setNss(expCtx->ns); - OperationSessionInfoFromClient sessionInfo; - boost::optional lsidFromClient; + if (auto lsid = expCtx->opCtx->getLogicalSessionId()) { + OperationSessionInfoFromClient sessionInfo([&] { + LogicalSessionFromClient lsidFromClient(lsid->getId()); + lsidFromClient.setUid(lsid->getUid()); + return lsidFromClient; + }()); + sessionInfo.setTxnNumber(expCtx->opCtx->getTxnNumber()); - auto lsid = expCtx->opCtx->getLogicalSessionId(); - if (lsid) { - lsidFromClient.emplace(lsid->getId()); - lsidFromClient->setUid(lsid->getUid()); - } - - sessionInfo.setSessionId(lsidFromClient); - sessionInfo.setTxnNumber(expCtx->opCtx->getTxnNumber()); + if (TransactionRouter::get(expCtx->opCtx)) { + sessionInfo.setAutocommit(false); + } - if (TransactionRouter::get(expCtx->opCtx)) { - sessionInfo.setAutocommit(false); + armParams.setOperationSessionInfo(sessionInfo); } - armParams.setOperationSessionInfo(sessionInfo); - // Convert owned cursors into a vector of remote cursors to be transferred to the merge // pipeline. std::vector remoteCursors; @@ -1597,7 +1659,7 @@ std::unique_ptr attachCursorToPipeline( // these namespaces, a local cursor should always be used. // TODO SERVER-59957: use NamespaceString::isPerShardNamespace instead. auto shouldAlwaysAttachLocalCursorForNamespace = [](const NamespaceString& ns) { - return (ns.isLocal() || ns.isConfigDotCacheDotChunks() || + return (ns.isLocalDB() || ns.isConfigDotCacheDotChunks() || ns.isReshardingLocalOplogBufferCollection() || ns == NamespaceString::kConfigImagesNamespace || ns.isChangeStreamPreImagesCollection()); @@ -1619,21 +1681,11 @@ std::unique_ptr attachCursorToPipeline( const auto& cm = cri.cm; auto pipelineToTarget = pipeline->clone(); - if (!cm.isSharded() && - // TODO SERVER-75391: Remove this condition. - (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) || - expCtx->ns != NamespaceString::kConfigsvrCollectionsNamespace)) { + if (!cm.isSharded()) { // If the collection is unsharded and we are on the primary, we should be able to // do a local read. The primary may be moved right after the primary shard check, // but the local read path will do a db version check before it establishes a cursor // to catch this case and ensure we fail to read locally. - // - // There is the case where we are in config.collections (collection unsharded) and - // we want to broadcast to all shards for the $shardedDataDistribution pipeline. In - // this case we don't want to do a local read and we must target the config servers. - // In 7.0, only the config server will be targeted for this collection, but in a - // mixed version cluster, an older binary mongos may still target a shard, so if the - // current node is not the config server, we force remote targeting. try { auto expectUnshardedCollection( expCtx->mongoProcessInterface->expectUnshardedCollectionInScope( diff --git a/src/mongo/db/pipeline/sharded_agg_helpers.h b/src/mongo/db/pipeline/sharded_agg_helpers.h index 2460cfc874fbf..c08bcc7be3038 100644 --- a/src/mongo/db/pipeline/sharded_agg_helpers.h +++ b/src/mongo/db/pipeline/sharded_agg_helpers.h @@ -29,9 +29,33 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/exchange_spec_gen.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/shard_id.h" #include "mongo/s/async_requests_sender.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/query/owned_remote_cursor.h" #include "mongo/s/stale_shard_version_helpers.h" #include "mongo/stdx/variant.h" @@ -145,7 +169,8 @@ DispatchShardPipelineResults dispatchShardPipeline( std::unique_ptr pipeline, boost::optional explain, ShardTargetingPolicy shardTargetingPolicy = ShardTargetingPolicy::kAllowed, - boost::optional readConcern = boost::none); + boost::optional readConcern = boost::none, + AsyncRequestsSender::ShardHostMap designatedHostsMap = {}); BSONObj createPassthroughCommandForShard( const boost::intrusive_ptr& expCtx, diff --git a/src/mongo/db/pipeline/sharded_union_test.cpp b/src/mongo/db/pipeline/sharded_union_test.cpp index e29c8d1960479..10e9be845ef6f 100644 --- a/src/mongo/db/pipeline/sharded_union_test.cpp +++ b/src/mongo/db/pipeline/sharded_union_test.cpp @@ -27,18 +27,71 @@ * it in the license file. */ +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_queue.h" #include "mongo/db/pipeline/document_source_union_with.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/shardsvr_process_interface.h" +#include "mongo/db/query/cursor_response.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/shard_id.h" #include "mongo/db/views/resolved_view.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/index_version.h" #include "mongo/s/query/sharded_agg_test_fixture.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" #include "mongo/s/stale_exception.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -406,7 +459,7 @@ TEST_F(ShardedUnionTest, IncorporatesViewDefinitionAndRetriesWhenViewErrorReceiv auto cm = loadRoutingTableWithTwoChunksAndTwoShards(kTestAggregateNss); NamespaceString nsToUnionWith = - NamespaceString::createNamespaceString_forTest(expCtx()->ns.db(), "view"); + NamespaceString::createNamespaceString_forTest(expCtx()->ns.db_forTest(), "view"); // Mock out the view namespace as emtpy for now - this is what it would be when parsing in a // sharded cluster - only later would we learn the actual view definition. expCtx()->setResolvedNamespaces(StringMap{ diff --git a/src/mongo/db/pipeline/skip_and_limit.cpp b/src/mongo/db/pipeline/skip_and_limit.cpp index e9e7e9772ce3f..d8b6aaa9a75fa 100644 --- a/src/mongo/db/pipeline/skip_and_limit.cpp +++ b/src/mongo/db/pipeline/skip_and_limit.cpp @@ -27,12 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include #include "mongo/base/exact_cast.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/document_source_skip.h" #include "mongo/db/pipeline/skip_and_limit.h" +#include "mongo/db/pipeline/stage_constraints.h" #include "mongo/platform/overflow_arithmetic.h" namespace mongo { @@ -82,8 +93,21 @@ Pipeline::SourceContainer::iterator eraseAndStich(Pipeline::SourceContainer::ite } // namespace -boost::optional extractLimitForPushdown(Pipeline::SourceContainer::iterator itr, - Pipeline::SourceContainer* container) { +/** + * If there are any $limit stages that could be logically swapped forward to the position of the + * pipeline pointed to by 'itr' without changing the meaning of the query, removes these $limit + * stages from the Pipeline and returns the resulting limit. A single limit value is computed by + * taking the minimum after swapping each individual $limit stage forward. + * + * This method also implements the ability to swap a $limit before a $skip, by adding the value of + * the $skip to the value of the $limit. + * + * If shouldModifyPipeline is false, this method does not swap any stages but rather just returns + * the single limit value described above. + */ +boost::optional extractLimitForPushdownHelper(Pipeline::SourceContainer::iterator itr, + Pipeline::SourceContainer* container, + bool shouldModifyPipeline) { int64_t skipSum = 0; boost::optional minLimit; while (itr != container->end()) { @@ -104,7 +128,11 @@ boost::optional extractLimitForPushdown(Pipeline::SourceContainer::it minLimit = std::min(static_cast(safeSum), *minLimit); } - itr = eraseAndStich(itr, container); + if (shouldModifyPipeline) { + itr = eraseAndStich(itr, container); + } else { + ++itr; + } } else if (!nextStage->constraints().canSwapWithSkippingOrLimitingStage) { break; } else { @@ -115,6 +143,16 @@ boost::optional extractLimitForPushdown(Pipeline::SourceContainer::it return minLimit; } +boost::optional extractLimitForPushdown(Pipeline::SourceContainer::iterator itr, + Pipeline::SourceContainer* container) { + return extractLimitForPushdownHelper(itr, container, true /* shouldModifyPipeline */); +} + +boost::optional getUserLimit(Pipeline::SourceContainer::iterator itr, + Pipeline::SourceContainer* container) { + return extractLimitForPushdownHelper(itr, container, false /* shouldModifyPipeline */); +} + boost::optional extractSkipForPushdown(Pipeline::SourceContainer::iterator itr, Pipeline::SourceContainer* container) { boost::optional skipSum; diff --git a/src/mongo/db/pipeline/skip_and_limit.h b/src/mongo/db/pipeline/skip_and_limit.h index 2a3ecec598272..86c8a553ebfca 100644 --- a/src/mongo/db/pipeline/skip_and_limit.h +++ b/src/mongo/db/pipeline/skip_and_limit.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include "mongo/db/pipeline/pipeline.h" @@ -88,6 +89,13 @@ class LimitThenSkip final : public SkipAndLimit { boost::optional extractLimitForPushdown(Pipeline::SourceContainer::iterator itr, Pipeline::SourceContainer* container); +/** + * This is similar to extractLimitForPushdown, except that it should be used when the caller does + * not want to modify the pipeline but still obtain the calculated limit value of the query. + */ +boost::optional getUserLimit(Pipeline::SourceContainer::iterator itr, + Pipeline::SourceContainer* container); + /** * If there are any $skip stages that could be logically swapped forward to the position of the * pipeline pointed to by 'itr' without changing the meaning of the query, removes these $skip diff --git a/src/mongo/db/pipeline/skip_and_limit_test.cpp b/src/mongo/db/pipeline/skip_and_limit_test.cpp index 1d4798f3e2ddb..a15060de26ac8 100644 --- a/src/mongo/db/pipeline/skip_and_limit_test.cpp +++ b/src/mongo/db/pipeline/skip_and_limit_test.cpp @@ -27,12 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include +#include +#include "mongo/base/string_data.h" #include "mongo/db/pipeline/skip_and_limit.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/pipeline/sort_reorder_helpers.cpp b/src/mongo/db/pipeline/sort_reorder_helpers.cpp index f8ff49f6eabcd..9ba31f87f1068 100644 --- a/src/mongo/db/pipeline/sort_reorder_helpers.cpp +++ b/src/mongo/db/pipeline/sort_reorder_helpers.cpp @@ -29,6 +29,27 @@ #include "mongo/db/pipeline/sort_reorder_helpers.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/matcher/expression_algo.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/document_source_graph_lookup.h" +#include "mongo/db/pipeline/document_source_lookup.h" +#include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/intrusive_counter.h" + namespace mongo { bool checkModifiedPathsSortReorder(const SortPattern& sortPattern, diff --git a/src/mongo/db/pipeline/sort_reorder_helpers.h b/src/mongo/db/pipeline/sort_reorder_helpers.h index c5d3b6ca53a3f..54024761a0d87 100644 --- a/src/mongo/db/pipeline/sort_reorder_helpers.h +++ b/src/mongo/db/pipeline/sort_reorder_helpers.h @@ -31,6 +31,7 @@ #include "mongo/db/pipeline/document_source_graph_lookup.h" #include "mongo/db/pipeline/document_source_lookup.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/sort_pattern.h" namespace mongo { diff --git a/src/mongo/db/pipeline/stage_constraints.h b/src/mongo/db/pipeline/stage_constraints.h index 511a2c6bf0bac..b1fc57def20dd 100644 --- a/src/mongo/db/pipeline/stage_constraints.h +++ b/src/mongo/db/pipeline/stage_constraints.h @@ -77,7 +77,7 @@ struct StageConstraints { kAnyShard, // Indicates that the stage can only run on mongoS. kMongoS, - // Indicates that the stage should run on all data-bearing nodes, primary and seconday, for + // Indicates that the stage should run on all data-bearing nodes, primary and secondary, for // the participating shards. This is useful for stages like $currentOp which generate // node-specific metadata. kAllShardServers, diff --git a/src/mongo/db/pipeline/storage_stats_spec.idl b/src/mongo/db/pipeline/storage_stats_spec.idl index a241dfb2b84cd..59bfe5c5230ad 100644 --- a/src/mongo/db/pipeline/storage_stats_spec.idl +++ b/src/mongo/db/pipeline/storage_stats_spec.idl @@ -36,18 +36,23 @@ structs: StorageStatsSpec: description: Represents the 'storageStats' argument to the $collStats stage. strict: false + query_shape_component: true fields: scale: description: A number to use as a scaling factor applied to reported metrics. type: safeInt optional: true validator: { gte: 1 } + query_shape: literal verbose: type: optionalBool default: false + query_shape: parameter waitForLock: type: optionalBool default: true + query_shape: parameter numericOnly: type: optionalBool default: false + query_shape: parameter diff --git a/src/mongo/db/pipeline/tee_buffer.cpp b/src/mongo/db/pipeline/tee_buffer.cpp index a7e5019f3063e..d1088c130a6c2 100644 --- a/src/mongo/db/pipeline/tee_buffer.cpp +++ b/src/mongo/db/pipeline/tee_buffer.cpp @@ -27,13 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/tee_buffer.h" - #include +#include +#include + +#include #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/tee_buffer.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/tee_buffer.h b/src/mongo/db/pipeline/tee_buffer.h index 55d5aea136bf7..fd36eb41d8b9d 100644 --- a/src/mongo/db/pipeline/tee_buffer.h +++ b/src/mongo/db/pipeline/tee_buffer.h @@ -31,11 +31,15 @@ #include #include +#include +#include +#include #include #include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/tee_buffer_test.cpp b/src/mongo/db/pipeline/tee_buffer_test.cpp index 42fae3730b26a..b8967389424b2 100644 --- a/src/mongo/db/pipeline/tee_buffer_test.cpp +++ b/src/mongo/db/pipeline/tee_buffer_test.cpp @@ -27,15 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/tee_buffer.h" +#include +#include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/tee_buffer.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/pipeline/transformer_interface.h b/src/mongo/db/pipeline/transformer_interface.h index f9fefccf9ab9e..95fef596b8aac 100644 --- a/src/mongo/db/pipeline/transformer_interface.h +++ b/src/mongo/db/pipeline/transformer_interface.h @@ -55,7 +55,7 @@ class TransformerInterface { kGroupFromFirstDocument, }; virtual ~TransformerInterface() = default; - virtual Document applyTransformation(const Document& input) = 0; + virtual Document applyTransformation(const Document& input) const = 0; virtual TransformerType getType() const = 0; virtual void optimize() = 0; virtual DepsTracker::State addDependencies(DepsTracker* deps) const = 0; diff --git a/src/mongo/db/pipeline/variable_validation.cpp b/src/mongo/db/pipeline/variable_validation.cpp index a6eb26ac07026..d850a2aa251f2 100644 --- a/src/mongo/db/pipeline/variable_validation.cpp +++ b/src/mongo/db/pipeline/variable_validation.cpp @@ -27,8 +27,13 @@ * it in the license file. */ +#include +#include + #include "mongo/base/error_codes.h" -#include "mongo/util/stacktrace.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo::variableValidation { diff --git a/src/mongo/db/pipeline/variables.cpp b/src/mongo/db/pipeline/variables.cpp index 9e2a5b60b64e1..e9ad5b93fa873 100644 --- a/src/mongo/db/pipeline/variables.cpp +++ b/src/mongo/db/pipeline/variables.cpp @@ -28,15 +28,35 @@ */ #include "mongo/db/pipeline/variables.h" + +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/client.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_dependencies.h" #include "mongo/db/pipeline/variable_validation.h" #include "mongo/db/vector_clock.h" -#include "mongo/platform/basic.h" -#include "mongo/platform/random.h" +#include "mongo/transport/session.h" #include "mongo/util/str.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/pipeline/variables.h b/src/mongo/db/pipeline/variables.h index efc089d7184d6..48be20b32321c 100644 --- a/src/mongo/db/pipeline/variables.h +++ b/src/mongo/db/pipeline/variables.h @@ -29,12 +29,28 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/legacy_runtime_constants_gen.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h b/src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h index 24d11c814be4f..b2ce89d66e9bc 100644 --- a/src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h +++ b/src/mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h @@ -63,6 +63,7 @@ #include "mongo/db/pipeline/document_source_list_cached_and_active_users.h" #include "mongo/db/pipeline/document_source_list_catalog.h" #include "mongo/db/pipeline/document_source_list_local_sessions.h" +#include "mongo/db/pipeline/document_source_list_sampled_queries.h" #include "mongo/db/pipeline/document_source_list_sessions.h" #include "mongo/db/pipeline/document_source_lookup.h" #include "mongo/db/pipeline/document_source_match.h" @@ -70,6 +71,7 @@ #include "mongo/db/pipeline/document_source_operation_metrics.h" #include "mongo/db/pipeline/document_source_out.h" #include "mongo/db/pipeline/document_source_plan_cache_stats.h" +#include "mongo/db/pipeline/document_source_query_stats.h" #include "mongo/db/pipeline/document_source_queue.h" #include "mongo/db/pipeline/document_source_redact.h" #include "mongo/db/pipeline/document_source_replace_root.h" @@ -83,10 +85,13 @@ #include "mongo/db/pipeline/document_source_sort.h" #include "mongo/db/pipeline/document_source_streaming_group.h" #include "mongo/db/pipeline/document_source_tee_consumer.h" -#include "mongo/db/pipeline/document_source_telemetry.h" #include "mongo/db/pipeline/document_source_union_with.h" #include "mongo/db/pipeline/document_source_unwind.h" #include "mongo/db/pipeline/visitors/document_source_visitor_registry.h" +#include "mongo/db/s/document_source_analyze_shard_key_read_write_distribution.h" +#include "mongo/db/s/resharding/document_source_resharding_add_resume_id.h" +#include "mongo/db/s/resharding/document_source_resharding_iterate_transaction.h" +#include "mongo/db/s/resharding/document_source_resharding_ownership_match.h" namespace mongo { @@ -151,6 +156,7 @@ void registerMongodVisitor(ServiceContext* service) { DocumentSourceListCachedAndActiveUsers, DocumentSourceListCatalog, DocumentSourceListLocalSessions, + analyze_shard_key::DocumentSourceListSampledQueries, DocumentSourceListSessions, DocumentSourceLookUp, DocumentSourceMatch, @@ -169,9 +175,23 @@ void registerMongodVisitor(ServiceContext* service) { DocumentSourceSort, DocumentSourceStreamingGroup, DocumentSourceTeeConsumer, - DocumentSourceTelemetry, + DocumentSourceQueryStats, DocumentSourceUnionWith, DocumentSourceUnwind>(®istry); } +/** + * See 'registerMongodVisitor'. This function has the same semantics except for the DocumentSources + * defined in the 's/sharding_runtime_d' module. + */ +template +void registerShardingRuntimeDVisitor(ServiceContext* service) { + auto& registry = getDocumentSourceVisitorRegistry(service); + registerVisitFuncs(®istry); +} + } // namespace mongo diff --git a/src/mongo/db/pipeline/visitors/document_source_walker.cpp b/src/mongo/db/pipeline/visitors/document_source_walker.cpp index 6076aaf82e50f..e820d513450b4 100644 --- a/src/mongo/db/pipeline/visitors/document_source_walker.cpp +++ b/src/mongo/db/pipeline/visitors/document_source_walker.cpp @@ -29,6 +29,10 @@ #include "mongo/db/pipeline/visitors/document_source_walker.h" +#include + +#include + namespace mongo { void DocumentSourceWalker::walk(const Pipeline& pipeline) { diff --git a/src/mongo/db/pipeline/visitors/document_source_walker_test.cpp b/src/mongo/db/pipeline/visitors/document_source_walker_test.cpp index 20dfc567d2beb..4673df3341b02 100644 --- a/src/mongo/db/pipeline/visitors/document_source_walker_test.cpp +++ b/src/mongo/db/pipeline/visitors/document_source_walker_test.cpp @@ -27,15 +27,26 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/visitors/document_source_visitor_registry.h" #include "mongo/db/pipeline/visitors/document_source_walker.h" +#include "mongo/db/query/util/make_data_structure.h" #include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/visitors/transformer_interface_walker.cpp b/src/mongo/db/pipeline/visitors/transformer_interface_walker.cpp index 11555f764b981..976ef5312c66d 100644 --- a/src/mongo/db/pipeline/visitors/transformer_interface_walker.cpp +++ b/src/mongo/db/pipeline/visitors/transformer_interface_walker.cpp @@ -28,11 +28,13 @@ */ #include "mongo/db/pipeline/visitors/transformer_interface_walker.h" + #include "mongo/db/exec/add_fields_projection_executor.h" #include "mongo/db/exec/exclusion_projection_executor.h" #include "mongo/db/exec/inclusion_projection_executor.h" -#include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/document_source_replace_root.h" +#include "mongo/db/pipeline/group_from_first_document_transformation.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/partition_iterator.cpp b/src/mongo/db/pipeline/window_function/partition_iterator.cpp index 9acee8e118ba6..af31553bd87bf 100644 --- a/src/mongo/db/pipeline/window_function/partition_iterator.cpp +++ b/src/mongo/db/pipeline/window_function/partition_iterator.cpp @@ -27,10 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include + +#include +#include +#include +#include +#include + +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/pipeline/window_function/partition_iterator.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/platform/decimal128.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/str.h" using boost::optional; diff --git a/src/mongo/db/pipeline/window_function/partition_iterator.h b/src/mongo/db/pipeline/window_function/partition_iterator.h index 28e0e6a6242ff..0a5c2d02e78a7 100644 --- a/src/mongo/db/pipeline/window_function/partition_iterator.h +++ b/src/mongo/db/pipeline/window_function/partition_iterator.h @@ -29,14 +29,30 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/memory_usage_tracker.h" #include "mongo/db/pipeline/partition_key_comparator.h" #include "mongo/db/pipeline/window_function/spillable_cache.h" #include "mongo/db/pipeline/window_function/window_bounds.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/sort_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/partition_iterator_test.cpp b/src/mongo/db/pipeline/window_function/partition_iterator_test.cpp index 16be89ef9c171..d989ef734b7e4 100644 --- a/src/mongo/db/pipeline/window_function/partition_iterator_test.cpp +++ b/src/mongo/db/pipeline/window_function/partition_iterator_test.cpp @@ -27,15 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_mock.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/window_function/partition_iterator.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/window_function/spillable_cache.cpp b/src/mongo/db/pipeline/window_function/spillable_cache.cpp index 307cc106755b3..fb0db8474377d 100644 --- a/src/mongo/db/pipeline/window_function/spillable_cache.cpp +++ b/src/mongo/db/pipeline/window_function/spillable_cache.cpp @@ -27,12 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/pipeline/window_function/spillable_cache.h" +#include +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/window_function/spillable_cache.h" #include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/key_format.h" #include "mongo/db/storage/record_data.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/spillable_cache.h b/src/mongo/db/pipeline/window_function/spillable_cache.h index 4969b01d85411..7447a5586668c 100644 --- a/src/mongo/db/pipeline/window_function/spillable_cache.h +++ b/src/mongo/db/pipeline/window_function/spillable_cache.h @@ -29,9 +29,15 @@ #pragma once +#include +#include +#include +#include + #include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/memory_usage_tracker.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/temporary_record_store.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/spillable_cache_test.cpp b/src/mongo/db/pipeline/window_function/spillable_cache_test.cpp index 7599842de575e..b388ded724e9c 100644 --- a/src/mongo/db/pipeline/window_function/spillable_cache_test.cpp +++ b/src/mongo/db/pipeline/window_function/spillable_cache_test.cpp @@ -27,15 +27,40 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregation_mongod_context_fixture.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" #include "mongo/db/pipeline/window_function/spillable_cache.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -56,7 +81,7 @@ class MongoProcessInterfaceForTest : public StubMongoProcessInterface { std::vector* records, const std::vector& ts) const override { - writeConflictRetry(expCtx->opCtx, "MPI::writeRecordsToRecordStore", expCtx->ns.ns(), [&] { + writeConflictRetry(expCtx->opCtx, "MPI::writeRecordsToRecordStore", expCtx->ns, [&] { AutoGetCollection autoColl(expCtx->opCtx, expCtx->ns, MODE_IX); WriteUnitOfWork wuow(expCtx->opCtx); auto writeResult = rs->insertRecords(expCtx->opCtx, records, ts); diff --git a/src/mongo/db/pipeline/window_function/window_bounds.cpp b/src/mongo/db/pipeline/window_function/window_bounds.cpp index 136e8f0c6c48d..74f2f3a5da2c0 100644 --- a/src/mongo/db/pipeline/window_function/window_bounds.cpp +++ b/src/mongo/db/pipeline/window_function/window_bounds.cpp @@ -27,9 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" -#include "mongo/db/pipeline/window_function/window_function_expression.h" +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/window_function/window_bounds.h" +#include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" using boost::intrusive_ptr; using boost::optional; @@ -70,7 +94,7 @@ Value serializeBound(const WindowBounds::Bound& bound, SerializationOptions o [&](const WindowBounds::Current&) { return Value(WindowBounds::kValCurrent); }, [&](const T& n) { // If not "unbounded" or "current", n must be a literal constant - return opts.serializeLiteralValue(n); + return opts.serializeLiteral(n); }, }, bound); @@ -239,8 +263,8 @@ void WindowBounds::serialize(MutableDocument& args, SerializationOptions opts) c serializeBound(rangeBounds.upper, opts), }}; if (rangeBounds.unit) { - args[kArgUnit] = Value{ - opts.serializeLiteralValue(serializeTimeUnit(*rangeBounds.unit))}; + args[kArgUnit] = + opts.serializeLiteral(serializeTimeUnit(*rangeBounds.unit)); } }, }, diff --git a/src/mongo/db/pipeline/window_function/window_function_add_to_set.h b/src/mongo/db/pipeline/window_function/window_function_add_to_set.h index f9433ca6aa867..c89424f844103 100644 --- a/src/mongo/db/pipeline/window_function/window_function_add_to_set.h +++ b/src/mongo/db/pipeline/window_function/window_function_add_to_set.h @@ -29,7 +29,18 @@ #pragma once +#include +#include +#include +#include + +#include + +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -76,7 +87,7 @@ class WindowFunctionAddToSet final : public WindowFunctionState { output.push_back(*it); } - return Value(output); + return Value(std::move(output)); } private: diff --git a/src/mongo/db/pipeline/window_function/window_function_add_to_set_test.cpp b/src/mongo/db/pipeline/window_function/window_function_add_to_set_test.cpp index 25e444826a62e..682566f283a49 100644 --- a/src/mongo/db/pipeline/window_function/window_function_add_to_set_test.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_add_to_set_test.cpp @@ -27,12 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/window_function/window_function_add_to_set.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/window_function/window_function_avg.h b/src/mongo/db/pipeline/window_function/window_function_avg.h index 2dc0d09e4bea1..fd6e434fe9ec6 100644 --- a/src/mongo/db/pipeline/window_function/window_function_avg.h +++ b/src/mongo/db/pipeline/window_function/window_function_avg.h @@ -29,10 +29,22 @@ #pragma once +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/window_function/window_function.h" #include "mongo/db/pipeline/window_function/window_function_sum.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_avg_test.cpp b/src/mongo/db/pipeline/window_function/window_function_avg_test.cpp index b909b55acf221..0df6e8a9c920e 100644 --- a/src/mongo/db/pipeline/window_function/window_function_avg_test.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_avg_test.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/window_function/window_function_avg.h" -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/window_function/window_function_count.cpp b/src/mongo/db/pipeline/window_function/window_function_count.cpp index 459b1badee9d8..a1ffe6c13ee62 100644 --- a/src/mongo/db/pipeline/window_function/window_function_count.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_count.cpp @@ -28,7 +28,27 @@ */ #include "mongo/db/pipeline/window_function/window_function_count.h" + +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/window_function/window_bounds.h" +#include "mongo/db/pipeline/window_function/window_function_expression.h" #include "mongo/db/pipeline/window_function/window_function_sum.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo::window_function { diff --git a/src/mongo/db/pipeline/window_function/window_function_count.h b/src/mongo/db/pipeline/window_function/window_function_count.h index 70c3db784bb74..d35874a76ee97 100644 --- a/src/mongo/db/pipeline/window_function/window_function_count.h +++ b/src/mongo/db/pipeline/window_function/window_function_count.h @@ -29,6 +29,12 @@ #pragma once #include "window_function_expression.h" +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/sort_pattern.h" namespace mongo::window_function { diff --git a/src/mongo/db/pipeline/window_function/window_function_covariance.cpp b/src/mongo/db/pipeline/window_function/window_function_covariance.cpp index c9b56a7d1d762..ef6d3480d2f2b 100644 --- a/src/mongo/db/pipeline/window_function/window_function_covariance.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_covariance.cpp @@ -28,10 +28,17 @@ */ #include "mongo/db/pipeline/window_function/window_function_covariance.h" -#include "mongo/db/pipeline/window_function/window_function_sum.h" -#include "mongo/db/pipeline/document_source.h" +#include +#include + +#include + +#include "mongo/bson/bsontypes.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/window_function/window_function_sum.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_covariance.h b/src/mongo/db/pipeline/window_function/window_function_covariance.h index a907f791b31e5..a2918836fe6d7 100644 --- a/src/mongo/db/pipeline/window_function/window_function_covariance.h +++ b/src/mongo/db/pipeline/window_function/window_function_covariance.h @@ -29,10 +29,16 @@ #pragma once +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function.h" #include "mongo/db/pipeline/window_function/window_function_avg.h" #include "mongo/db/pipeline/window_function/window_function_sum.h" #include "mongo/platform/decimal128.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_covariance_test.cpp b/src/mongo/db/pipeline/window_function/window_function_covariance_test.cpp index a170bb97be816..b16241656030b 100644 --- a/src/mongo/db/pipeline/window_function/window_function_covariance_test.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_covariance_test.cpp @@ -27,12 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/pipeline/window_function/window_function.h" #include "mongo/db/pipeline/window_function/window_function_covariance.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec.cpp b/src/mongo/db/pipeline/window_function/window_function_exec.cpp index 5afcdfb319763..1f42264951d6b 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_exec.cpp @@ -28,6 +28,20 @@ */ #include "mongo/db/pipeline/window_function/window_function_exec.h" + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/db/pipeline/document_source_set_window_fields.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/window_function/window_bounds.h" #include "mongo/db/pipeline/window_function/window_function_exec_derivative.h" #include "mongo/db/pipeline/window_function/window_function_exec_first_last.h" #include "mongo/db/pipeline/window_function/window_function_exec_linear_fill.h" @@ -35,7 +49,9 @@ #include "mongo/db/pipeline/window_function/window_function_exec_non_removable_range.h" #include "mongo/db/pipeline/window_function/window_function_exec_removable_document.h" #include "mongo/db/pipeline/window_function/window_function_exec_removable_range.h" +#include "mongo/db/pipeline/window_function/window_function_expression.h" #include "mongo/db/pipeline/window_function/window_function_shift.h" +#include "mongo/stdx/variant.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec.h b/src/mongo/db/pipeline/window_function/window_function_exec.h index 1fae842490bd0..651ca74ed9e9c 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec.h +++ b/src/mongo/db/pipeline/window_function/window_function_exec.h @@ -29,14 +29,25 @@ #pragma once +#include +#include +#include +#include #include +#include +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_set_window_fields.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/memory_usage_tracker.h" #include "mongo/db/pipeline/window_function/partition_iterator.h" #include "mongo/db/pipeline/window_function/window_bounds.h" #include "mongo/db/pipeline/window_function/window_function.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_derivative.cpp b/src/mongo/db/pipeline/window_function/window_function_exec_derivative.cpp index ec1b1300d43d3..edb9ff0b09332 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_derivative.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_exec_derivative.cpp @@ -29,6 +29,18 @@ #include "mongo/db/pipeline/window_function/window_function_exec_derivative.h" +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/expression_context.h" + namespace mongo { Value WindowFunctionExecDerivative::getNext() { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_derivative.h b/src/mongo/db/pipeline/window_function/window_function_exec_derivative.h index c9de780d44370..d28018be0f38b 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_derivative.h +++ b/src/mongo/db/pipeline/window_function/window_function_exec_derivative.h @@ -29,12 +29,24 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/memory_usage_tracker.h" #include "mongo/db/pipeline/window_function/partition_iterator.h" #include "mongo/db/pipeline/window_function/window_bounds.h" #include "mongo/db/pipeline/window_function/window_function_exec.h" #include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_derivative_test.cpp b/src/mongo/db/pipeline/window_function/window_function_exec_derivative_test.cpp index bd8fe4f36cd70..656f6d94dd9e4 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_derivative_test.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_exec_derivative_test.cpp @@ -27,18 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/window_function/partition_iterator.h" #include "mongo/db/pipeline/window_function/window_bounds.h" #include "mongo/db/pipeline/window_function/window_function_exec_derivative.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_first_last.h b/src/mongo/db/pipeline/window_function/window_function_exec_first_last.h index f64b77c28f8dc..c356115fe5790 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_first_last.h +++ b/src/mongo/db/pipeline/window_function/window_function_exec_first_last.h @@ -29,10 +29,23 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/memory_usage_tracker.h" #include "mongo/db/pipeline/window_function/partition_iterator.h" #include "mongo/db/pipeline/window_function/window_bounds.h" #include "mongo/db/pipeline/window_function/window_function_exec.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_first_last_test.cpp b/src/mongo/db/pipeline/window_function/window_function_exec_first_last_test.cpp index 0650321c464db..719dec4aef020 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_first_last_test.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_exec_first_last_test.cpp @@ -27,18 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/window_function/partition_iterator.h" #include "mongo/db/pipeline/window_function/window_bounds.h" #include "mongo/db/pipeline/window_function/window_function_exec_first_last.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_linear_fill.cpp b/src/mongo/db/pipeline/window_function/window_function_exec_linear_fill.cpp index aaa85be194947..35ebaf4848a48 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_linear_fill.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_exec_linear_fill.cpp @@ -29,6 +29,17 @@ #include "mongo/db/pipeline/window_function/window_function_exec_linear_fill.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + namespace mongo { namespace { namespace value_arithmetic_operators { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_linear_fill.h b/src/mongo/db/pipeline/window_function/window_function_exec_linear_fill.h index 80f310c0d93a9..329606d64c82a 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_linear_fill.h +++ b/src/mongo/db/pipeline/window_function/window_function_exec_linear_fill.h @@ -29,12 +29,23 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/memory_usage_tracker.h" #include "mongo/db/pipeline/window_function/partition_iterator.h" #include "mongo/db/pipeline/window_function/window_bounds.h" #include "mongo/db/pipeline/window_function/window_function_exec.h" #include "mongo/db/query/datetime/date_time_support.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_non_removable.h b/src/mongo/db/pipeline/window_function/window_function_exec_non_removable.h index 4d3c8a44c0851..80fb9831b9877 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_non_removable.h +++ b/src/mongo/db/pipeline/window_function/window_function_exec_non_removable.h @@ -29,11 +29,22 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/memory_usage_tracker.h" #include "mongo/db/pipeline/window_function/partition_iterator.h" #include "mongo/db/pipeline/window_function/window_bounds.h" #include "mongo/db/pipeline/window_function/window_function_exec.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_non_removable_test.cpp b/src/mongo/db/pipeline/window_function/window_function_exec_non_removable_test.cpp index 960d21877cf0a..2d79ec58543a0 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_non_removable_test.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_exec_non_removable_test.cpp @@ -27,8 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/accumulator_for_window_functions.h" @@ -36,14 +48,12 @@ #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/window_function/partition_iterator.h" #include "mongo/db/pipeline/window_function/window_bounds.h" -#include "mongo/db/pipeline/window_function/window_function.h" #include "mongo/db/pipeline/window_function/window_function_exec_non_removable.h" -#include "mongo/db/pipeline/window_function/window_function_exec_removable_document.h" -#include "mongo/db/pipeline/window_function/window_function_min_max.h" -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_removable_document.cpp b/src/mongo/db/pipeline/window_function/window_function_exec_removable_document.cpp index c06f868df3725..826021812d120 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_removable_document.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_exec_removable_document.cpp @@ -27,9 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function_exec_removable_document.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_removable_document.h b/src/mongo/db/pipeline/window_function/window_function_exec_removable_document.h index 0c716f47c5d8d..e8c97d8653224 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_removable_document.h +++ b/src/mongo/db/pipeline/window_function/window_function_exec_removable_document.h @@ -29,10 +29,18 @@ #pragma once +#include +#include +#include +#include +#include + #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/memory_usage_tracker.h" #include "mongo/db/pipeline/window_function/partition_iterator.h" #include "mongo/db/pipeline/window_function/window_bounds.h" +#include "mongo/db/pipeline/window_function/window_function.h" #include "mongo/db/pipeline/window_function/window_function_exec.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_removable_range.cpp b/src/mongo/db/pipeline/window_function/window_function_exec_removable_range.cpp index 8518e4a864aef..d4237300197c7 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_removable_range.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_exec_removable_range.cpp @@ -27,9 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include +#include +#include + +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function_exec_removable_range.h" +#include "mongo/util/assert_util.h" using boost::optional; using std::pair; diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_removable_range.h b/src/mongo/db/pipeline/window_function/window_function_exec_removable_range.h index d3c295faaeb10..b04533738c452 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_removable_range.h +++ b/src/mongo/db/pipeline/window_function/window_function_exec_removable_range.h @@ -29,11 +29,21 @@ #pragma once +#include +#include +#include +#include +#include +#include + #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/memory_usage_tracker.h" #include "mongo/db/pipeline/window_function/partition_iterator.h" #include "mongo/db/pipeline/window_function/window_bounds.h" +#include "mongo/db/pipeline/window_function/window_function.h" #include "mongo/db/pipeline/window_function/window_function_exec.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_exec_removable_test.cpp b/src/mongo/db/pipeline/window_function/window_function_exec_removable_test.cpp index b681dea292565..f07ac87f4cb7b 100644 --- a/src/mongo/db/pipeline/window_function/window_function_exec_removable_test.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_exec_removable_test.cpp @@ -27,22 +27,41 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/memory_usage_tracker.h" #include "mongo/db/pipeline/window_function/partition_iterator.h" #include "mongo/db/pipeline/window_function/window_bounds.h" -#include "mongo/db/pipeline/window_function/window_function_exec_non_removable.h" +#include "mongo/db/pipeline/window_function/window_function.h" #include "mongo/db/pipeline/window_function/window_function_exec_removable_document.h" #include "mongo/db/pipeline/window_function/window_function_integral.h" #include "mongo/db/pipeline/window_function/window_function_min_max.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/window_function/window_function_expression.cpp b/src/mongo/db/pipeline/window_function/window_function_expression.cpp index 957ab26d713fe..6dd914111167b 100644 --- a/src/mongo/db/pipeline/window_function/window_function_expression.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_expression.cpp @@ -27,28 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/feature_compatibility_version_documentation.h" -#include "mongo/db/pipeline/accumulation_statement.h" -#include "mongo/db/pipeline/document_source_add_fields.h" -#include "mongo/db/pipeline/document_source_project.h" -#include "mongo/db/pipeline/document_source_set_window_fields.h" -#include "mongo/db/pipeline/document_source_set_window_fields_gen.h" -#include "mongo/db/pipeline/lite_parsed_document_source.h" -#include "mongo/db/query/query_feature_flags_gen.h" -#include "mongo/db/stats/counters.h" +#include +#include +#include +#include +#include +#include -#include "mongo/db/pipeline/window_function/partition_iterator.h" -#include "mongo/db/pipeline/window_function/window_function_exec.h" -#include "mongo/db/pipeline/window_function/window_function_exec_derivative.h" -#include "mongo/db/pipeline/window_function/window_function_exec_first_last.h" +#include "mongo/db/feature_compatibility_version_documentation.h" +#include "mongo/db/pipeline/accumulator_percentile.h" #include "mongo/db/pipeline/window_function/window_function_expression.h" #include "mongo/db/pipeline/window_function/window_function_first_last_n.h" #include "mongo/db/pipeline/window_function/window_function_min_max.h" #include "mongo/db/pipeline/window_function/window_function_n_traits.h" #include "mongo/db/pipeline/window_function/window_function_percentile.h" #include "mongo/db/pipeline/window_function/window_function_top_bottom_n.h" +#include "mongo/db/stats/counters.h" using boost::intrusive_ptr; using boost::optional; @@ -80,17 +78,11 @@ REGISTER_STABLE_WINDOW_FUNCTION( (ExpressionN>::parse)); -REGISTER_WINDOW_FUNCTION_WITH_FEATURE_FLAG( - percentile, - (window_function::ExpressionQuantile::parse), - feature_flags::gFeatureFlagApproxPercentiles, - AllowedWithApiStrict::kNeverInVersion1); - -REGISTER_WINDOW_FUNCTION_WITH_FEATURE_FLAG( - median, - (window_function::ExpressionQuantile::parse), - feature_flags::gFeatureFlagApproxPercentiles, - AllowedWithApiStrict::kNeverInVersion1); +REGISTER_STABLE_WINDOW_FUNCTION( + percentile, (window_function::ExpressionQuantile::parse)); + +REGISTER_STABLE_WINDOW_FUNCTION(median, + (window_function::ExpressionQuantile::parse)); StringMap Expression::parserMap; intrusive_ptr Expression::parse(BSONObj obj, @@ -404,11 +396,12 @@ boost::intrusive_ptr ExpressionQuantile::parse( BSONObj obj, const boost::optional& sortBy, ExpressionContext* expCtx) { std::vector ps; - int32_t method = -1; + PercentileMethod method = PercentileMethod::Approximate; boost::intrusive_ptr<::mongo::Expression> outputExpr; boost::intrusive_ptr<::mongo::Expression> initializeExpr; // need for serializer. boost::optional bounds = WindowBounds::defaultBounds(); auto name = AccumulatorTType::kName; + for (auto&& elem : obj) { auto fieldName = elem.fieldNameStringData(); if (fieldName == name) { @@ -420,7 +413,9 @@ boost::intrusive_ptr ExpressionQuantile::parse( initializeExpr = std::move(accExpr.initializer); // Retrieve the values of 'ps' and 'method' from the accumulator's IDL parser. - std::tie(ps, method) = AccumulatorTType::parsePercentileAndMethod(elem); + std::tie(ps, method) = AccumulatorTType::parsePercentileAndMethod( + expCtx, elem, expCtx->variablesParseState); + } else if (fieldName == kWindowArg) { bounds = WindowBounds::parse(elem, sortBy, expCtx); } else { @@ -429,9 +424,9 @@ boost::intrusive_ptr ExpressionQuantile::parse( } } - tassert(7455900, - str::stream() << "missing accumulator specification for " << name, - initializeExpr && outputExpr && !ps.empty() && method != -1); + uassert(7455900, + str::stream() << "Missing or incomplete accumulator specification for " << name, + initializeExpr && outputExpr && !ps.empty()); return make_intrusive( expCtx, std::string(name), std::move(outputExpr), initializeExpr, *bounds, ps, method); diff --git a/src/mongo/db/pipeline/window_function/window_function_expression.h b/src/mongo/db/pipeline/window_function/window_function_expression.h index 03d3075910fcd..ef50f2b14c7c3 100644 --- a/src/mongo/db/pipeline/window_function/window_function_expression.h +++ b/src/mongo/db/pipeline/window_function/window_function_expression.h @@ -29,19 +29,59 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/initializer.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/accumulator_for_window_functions.h" #include "mongo/db/pipeline/accumulator_multi.h" #include "mongo/db/pipeline/accumulator_percentile.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_set_window_fields_gen.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/percentile_algo.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/pipeline/window_function/window_bounds.h" #include "mongo/db/pipeline/window_function/window_function.h" +#include "mongo/db/pipeline/window_function/window_function_integral.h" +#include "mongo/db/query/allowed_contexts.h" #include "mongo/db/query/datetime/date_time_support.h" #include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/db/query/sort_pattern.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo { class WindowFunctionExec; @@ -502,9 +542,9 @@ class ExpressionExpMovingAvg : public Expression { MutableDocument subObj; tassert(5433604, "ExpMovingAvg neither N nor alpha was set", _N || _alpha); if (_N) { - subObj[kNArg] = opts.serializeLiteralValue(_N.get()); + subObj[kNArg] = opts.serializeLiteral(_N.get()); } else { - subObj[kAlphaArg] = opts.serializeLiteralValue(_alpha.get()); + subObj[kAlphaArg] = opts.serializeLiteral(_alpha.get()); } subObj[kInputArg] = _input->serialize(opts); MutableDocument outerObj; @@ -946,7 +986,7 @@ class ExpressionQuantile : public Expression { boost::intrusive_ptr<::mongo::Expression> initializeExpr, WindowBounds bounds, std::vector ps, - int32_t method) + PercentileMethod method) : Expression(expCtx, std::move(accumulatorName), std::move(input), std::move(bounds)), _ps(std::move(ps)), _method(method), @@ -960,7 +1000,7 @@ class ExpressionQuantile : public Expression { private: std::vector _ps; - int32_t _method; + PercentileMethod _method; boost::intrusive_ptr<::mongo::Expression> _intializeExpr; }; diff --git a/src/mongo/db/pipeline/window_function/window_function_integral.cpp b/src/mongo/db/pipeline/window_function/window_function_integral.cpp index 2ffe63d5998ac..3575c158574e4 100644 --- a/src/mongo/db/pipeline/window_function/window_function_integral.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_integral.cpp @@ -28,14 +28,22 @@ */ #include "mongo/db/pipeline/window_function/window_function_integral.h" -#include "mongo/db/pipeline/accumulator.h" + +#include +#include +#include + +#include + +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value_comparator.h" namespace mongo { Value WindowFunctionIntegral::integralOfTwoPointsByTrapezoidalRule(const Value& preValue, const Value& newValue) { - auto preArr = preValue.getArray(); - auto newArr = newValue.getArray(); + const auto& preArr = preValue.getArray(); + const auto& newArr = newValue.getArray(); if (preArr[0].isNaN() || preArr[1].isNaN() || newArr[0].isNaN() || newArr[1].isNaN()) return Value(0); @@ -62,7 +70,7 @@ void WindowFunctionIntegral::assertValueType(const Value& value) { value.isArray() && value.getArray().size() == 2 && value.getArray()[1].numeric() && (value.getArray()[0].numeric() || value.getArray()[0].getType() == BSONType::Date)); - auto arr = value.getArray(); + const auto& arr = value.getArray(); if (_unitMillis) { uassert(5423901, "$integral with 'unit' expects the sortBy field to be a Date", @@ -77,7 +85,7 @@ void WindowFunctionIntegral::assertValueType(const Value& value) { void WindowFunctionIntegral::add(Value value) { assertValueType(value); - auto arr = value.getArray(); + const auto& arr = value.getArray(); if (arr[0].isNaN() || arr[1].isNaN()) _nanCount++; @@ -105,7 +113,7 @@ void WindowFunctionIntegral::remove(Value value) { "Attempted to remove an element other than the first element from WindowFunctionIntegral", _expCtx->getValueComparator().evaluate(_values.front() == value)); - auto arr = value.getArray(); + const auto& arr = value.getArray(); if (arr[0].isNaN() || arr[1].isNaN()) _nanCount--; diff --git a/src/mongo/db/pipeline/window_function/window_function_integral.h b/src/mongo/db/pipeline/window_function/window_function_integral.h index 919c9030f2c0a..f50fd315bb056 100644 --- a/src/mongo/db/pipeline/window_function/window_function_integral.h +++ b/src/mongo/db/pipeline/window_function/window_function_integral.h @@ -29,8 +29,20 @@ #pragma once +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function.h" #include "mongo/db/pipeline/window_function/window_function_sum.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_integral_test.cpp b/src/mongo/db/pipeline/window_function/window_function_integral_test.cpp index 6e6db7086c49b..7170317f362e4 100644 --- a/src/mongo/db/pipeline/window_function/window_function_integral_test.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_integral_test.cpp @@ -27,12 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/window_function/window_function_integral.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_min_max.h b/src/mongo/db/pipeline/window_function/window_function_min_max.h index f0765abbd3be8..36c20890c2945 100644 --- a/src/mongo/db/pipeline/window_function/window_function_min_max.h +++ b/src/mongo/db/pipeline/window_function/window_function_min_max.h @@ -29,9 +29,25 @@ #pragma once +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/pipeline/accumulation_statement.h" #include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/accumulator_multi.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/pipeline/window_function/window_function.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_min_max_test.cpp b/src/mongo/db/pipeline/window_function/window_function_min_max_test.cpp index aaf913e578637..cb7a5e07abdf7 100644 --- a/src/mongo/db/pipeline/window_function/window_function_min_max_test.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_min_max_test.cpp @@ -27,13 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/window_function/window_function_min_max.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/window_function/window_function_n_test.cpp b/src/mongo/db/pipeline/window_function/window_function_n_test.cpp index 8c65ca650aa68..97a5cff1e0bdb 100644 --- a/src/mongo/db/pipeline/window_function/window_function_n_test.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_n_test.cpp @@ -27,12 +27,27 @@ * it in the license file. */ +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function_first_last_n.h" #include "mongo/db/pipeline/window_function/window_function_min_max.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/window_function/window_function_percentile.h b/src/mongo/db/pipeline/window_function/window_function_percentile.h index 8cef109b9859b..81815755f2c53 100644 --- a/src/mongo/db/pipeline/window_function/window_function_percentile.h +++ b/src/mongo/db/pipeline/window_function/window_function_percentile.h @@ -30,7 +30,7 @@ #pragma once -#include "mongo/db/pipeline/accumulator_percentile.h" +#include "mongo/db/pipeline/percentile_algo.h" #include "mongo/db/pipeline/window_function/window_function.h" namespace mongo { @@ -70,22 +70,28 @@ class WindowFunctionPercentileCommon : public WindowFunctionState { protected: explicit WindowFunctionPercentileCommon(ExpressionContext* const expCtx) - : WindowFunctionState(expCtx), _values(std::multiset()) {} + : WindowFunctionState(expCtx), _values(boost::container::flat_multiset()) {} Value computePercentile(double p) const { // Calculate the rank. const double n = _values.size(); - const double rank = std::max(0, std::ceil(p * n) - 1); + const double rank = PercentileAlgorithm::computeTrueRank(n, p); - // std::multiset stores the values in ascending order, so we don't need to sort them before - // finding the value at index 'rank'. + // boost::container::flat_multiset stores the values in ascending order, so we don't need to + // sort them before finding the value at index 'rank'. + // boost::container::flat_multiset has random-access iterators, so std::advance has an + // expected runtime of O(1). auto it = _values.begin(); std::advance(it, rank); return Value(*it); } // Holds all the values in the window in ascending order. - std::multiset _values; + // A boost::container::flat_multiset stores elements in a contiguous array, so iterating through + // the set is faster than iterating through a std::multiset which stores its elements typically + // as a binary search tree. Thus, using a boost::container::flat_multiset significantly improved + // performance. + boost::container::flat_multiset _values; }; class WindowFunctionPercentile : public WindowFunctionPercentileCommon { @@ -105,7 +111,7 @@ class WindowFunctionPercentile : public WindowFunctionPercentileCommon { if (_values.empty()) { std::vector nulls; nulls.insert(nulls.end(), _ps.size(), Value(BSONNULL)); - return Value(nulls); + return Value(std::move(nulls)); } std::vector pctls; pctls.reserve(_ps.size()); @@ -114,7 +120,7 @@ class WindowFunctionPercentile : public WindowFunctionPercentileCommon { pctls.push_back(result); } - return Value(pctls); + return Value(std::move(pctls)); }; void reset() final { diff --git a/src/mongo/db/pipeline/window_function/window_function_percentile_bm_fixture.cpp b/src/mongo/db/pipeline/window_function/window_function_percentile_bm_fixture.cpp new file mode 100644 index 0000000000000..1085c32c537cc --- /dev/null +++ b/src/mongo/db/pipeline/window_function/window_function_percentile_bm_fixture.cpp @@ -0,0 +1,135 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/window_function/window_function.h" +#include "mongo/db/pipeline/window_function/window_function_percentile.h" +#include "mongo/db/pipeline/window_function/window_function_percentile_bm_fixture.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" + +namespace mongo { +using std::vector; +vector generateNormalData(size_t n) { + std::mt19937 generator(curTimeMillis64()); + boost::random::normal_distribution dist(0.0 /* mean */, 1.0 /* sigma */); + + vector inputs; + inputs.reserve(n); + for (size_t i = 0; i < n; i++) { + inputs.push_back(dist(generator)); + } + + return inputs; +} + +// This benchmark is mimicking the behavior of computing $percentile for a [0, unbounded] +// window. In a [0, unbounded] window the first window will add all of the inputs in the window +// function. Then for each following window, the element before the current element will be removed +// and the percentile will be recalculated. +void WindowFunctionPercentileBenchmarkFixture::removable_unbounded_percentile( + benchmark::State& state, std::vector ps) { + // Generate the data. + const vector inputs = generateNormalData(dataSizeLarge); + auto expCtx = make_intrusive(); + + // Run the test. + for (auto keepRunning : state) { + auto w = WindowFunctionPercentile::create(expCtx.get(), ps); + + // Calculate the percentile for a [0, unbounded] window for each input. + for (size_t i = 0; i < dataSizeLarge; i++) { + // All of the values are in the first window. + if (i == 0) { + for (double input : inputs) { + w->add(Value(input)); + } + benchmark::DoNotOptimize(w->getValue()); + } else { + // Remove the previous value for the next window. + double valToRemove = inputs[i - 1]; + w->remove(Value(valToRemove)); + benchmark::DoNotOptimize(w->getValue()); + } + } + benchmark::ClobberMemory(); + } +} + +// This benchmark is mimicking the behavior of computing $percentile for a ["current", 100] +// window. In a ["current", 100] window, the first window will add itself and the next 100 elements +// in 'inputs' to the window function. Then for each following window, the previous current element +// will be removed, and a new element (100 indexes away from the new current element) will be added. +// Then the percentile will be recalculated. We will not add any elements if the index is out of +// bounds, resulting in smaller windows towards the end of 'inputs'. +void WindowFunctionPercentileBenchmarkFixture::removable_bounded_percentile( + benchmark::State& state, std::vector ps) { + // Generate the data. + const vector inputs = generateNormalData(dataSizeLarge); + auto expCtx = make_intrusive(); + + // Run the test. + for (auto keepRunning : state) { + auto w = WindowFunctionPercentile::create(expCtx.get(), ps); + + // Calculate the percentile for a ["current", 100] window for each input. + for (size_t i = 0; i < dataSizeLarge; i++) { + // Add the first value and the next 100 to the window. + if (i == 0) { + for (size_t j = 0; j < 101; j++) { + w->add(Value(inputs[j])); + } + benchmark::DoNotOptimize(w->getValue()); + } else { + // Remove the previous current value. + double valToRemove = inputs[i - 1]; + w->remove(Value(valToRemove)); + // If possible, add the new value. + if (i + 100 < dataSizeLarge - 1) { + w->add(Value(inputs[i + 100])); + } + benchmark::DoNotOptimize(w->getValue()); + } + } + benchmark::ClobberMemory(); + } +} + +BENCHMARK_WINDOW_PERCENTILE(WindowFunctionPercentileBenchmarkFixture); +} // namespace mongo diff --git a/src/mongo/db/pipeline/window_function/window_function_percentile_bm_fixture.h b/src/mongo/db/pipeline/window_function/window_function_percentile_bm_fixture.h new file mode 100644 index 0000000000000..0ffd0abba3cb6 --- /dev/null +++ b/src/mongo/db/pipeline/window_function/window_function_percentile_bm_fixture.h @@ -0,0 +1,73 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#pragma once +#include +#include + +#include "mongo/db/pipeline/window_function/window_function_percentile.h" +#include "mongo/platform/basic.h" + +namespace mongo { + +class WindowFunctionPercentileBenchmarkFixture : public benchmark::Fixture { +public: + void removable_unbounded_percentile(benchmark::State& state, std::vector ps); + void removable_bounded_percentile(benchmark::State& state, std::vector ps); + + static constexpr int dataSizeLarge = 100'000; +}; + +#define BENCHMARK_WINDOW_PERCENTILE(Fixture) \ + \ + BENCHMARK_F(Fixture, percentile_unbounded_low_p)(benchmark::State & state) { \ + removable_unbounded_percentile(state, {0.001}); \ + } \ + BENCHMARK_F(Fixture, percentile_unbounded_high_p)(benchmark::State & state) { \ + removable_unbounded_percentile(state, {.999}); \ + } \ + BENCHMARK_F(Fixture, percentile_unbounded_mid_p)(benchmark::State & state) { \ + removable_unbounded_percentile(state, {.55}); \ + } \ + BENCHMARK_F(Fixture, percentile_unbounded_multi_p)(benchmark::State & state) { \ + removable_unbounded_percentile(state, {.1, .47, .88, .05, .33, .999, .2, .59, .9, .7}); \ + } \ + BENCHMARK_F(Fixture, percentile_bounded_low_p)(benchmark::State & state) { \ + removable_bounded_percentile(state, {.001}); \ + } \ + BENCHMARK_F(Fixture, percentile_bounded_high_p)(benchmark::State & state) { \ + removable_bounded_percentile(state, {.999}); \ + } \ + BENCHMARK_F(Fixture, percentile_bounded_mid_p)(benchmark::State & state) { \ + removable_bounded_percentile(state, {.55}); \ + } \ + BENCHMARK_F(Fixture, percentile_bounded_multi_p)(benchmark::State & state) { \ + removable_bounded_percentile(state, {.1, .47, .88, .05, .33, .999, .2, .59, .9, .7}); \ + } + +} // namespace mongo diff --git a/src/mongo/db/pipeline/window_function/window_function_push.h b/src/mongo/db/pipeline/window_function/window_function_push.h index 4ed5b0839965c..f529f3ee0c5ae 100644 --- a/src/mongo/db/pipeline/window_function/window_function_push.h +++ b/src/mongo/db/pipeline/window_function/window_function_push.h @@ -29,7 +29,19 @@ #pragma once +#include +#include +#include +#include +#include + +#include + +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_push_test.cpp b/src/mongo/db/pipeline/window_function/window_function_push_test.cpp index ac41abe6a671b..fad76490becb1 100644 --- a/src/mongo/db/pipeline/window_function/window_function_push_test.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_push_test.cpp @@ -27,12 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" #include "mongo/db/pipeline/window_function/window_function_push.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/window_function/window_function_shift.cpp b/src/mongo/db/pipeline/window_function/window_function_shift.cpp index eef18c819a481..e7bea87e8da19 100644 --- a/src/mongo/db/pipeline/window_function/window_function_shift.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_shift.cpp @@ -28,8 +28,21 @@ */ #include "window_function_shift.h" -#include "partition_iterator.h" -#include "window_function_exec_first_last.h" + +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo::window_function { REGISTER_STABLE_WINDOW_FUNCTION(shift, ExpressionShift::parse); @@ -121,10 +134,10 @@ boost::intrusive_ptr ExpressionShift::parse(BSONObj obj, Value ExpressionShift::serialize(SerializationOptions opts) const { MutableDocument args; - args.addField(kByArg, opts.serializeLiteralValue(_offset)); + args.addField(kByArg, opts.serializeLiteral(_offset)); args.addField(kOutputArg, _input->serialize(opts)); args.addField(kDefaultArg, - opts.serializeLiteralValue(_defaultVal.get_value_or(mongo::Value(BSONNULL)))); + opts.serializeLiteral(_defaultVal.get_value_or(mongo::Value(BSONNULL)))); MutableDocument windowFun; windowFun.addField(_accumulatorName, args.freezeToValue()); return windowFun.freezeToValue(); diff --git a/src/mongo/db/pipeline/window_function/window_function_shift.h b/src/mongo/db/pipeline/window_function/window_function_shift.h index ce120ac2829d6..46bebc6816059 100644 --- a/src/mongo/db/pipeline/window_function/window_function_shift.h +++ b/src/mongo/db/pipeline/window_function/window_function_shift.h @@ -29,7 +29,27 @@ #pragma once +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/window_function/window_bounds.h" +#include "mongo/db/pipeline/window_function/window_function.h" #include "mongo/db/pipeline/window_function/window_function_expression.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::window_function { diff --git a/src/mongo/db/pipeline/window_function/window_function_std_dev_test.cpp b/src/mongo/db/pipeline/window_function/window_function_std_dev_test.cpp index fed505e7c62e6..79494e81c7199 100644 --- a/src/mongo/db/pipeline/window_function/window_function_std_dev_test.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_std_dev_test.cpp @@ -27,12 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/window_function/window_function_stddev.h" -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/decimal128.h" +#include "mongo/platform/random.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/pipeline/window_function/window_function_sum.cpp b/src/mongo/db/pipeline/window_function/window_function_sum.cpp index 08b71cc4d8b24..9075d8053c104 100644 --- a/src/mongo/db/pipeline/window_function/window_function_sum.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_sum.cpp @@ -29,9 +29,11 @@ #include "mongo/db/pipeline/window_function/window_function_sum.h" +#include + +#include + #include "mongo/db/pipeline/accumulator.h" -#include "mongo/db/pipeline/document_source.h" -#include "mongo/db/pipeline/expression.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_sum.h b/src/mongo/db/pipeline/window_function/window_function_sum.h index 6f4b6bfaca98a..9b408e175aa70 100644 --- a/src/mongo/db/pipeline/window_function/window_function_sum.h +++ b/src/mongo/db/pipeline/window_function/window_function_sum.h @@ -29,10 +29,22 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/window_function/window_function.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/pipeline/window_function/window_function_sum_test.cpp b/src/mongo/db/pipeline/window_function/window_function_sum_test.cpp index de4b1b7bb7746..6c2529705964f 100644 --- a/src/mongo/db/pipeline/window_function/window_function_sum_test.cpp +++ b/src/mongo/db/pipeline/window_function/window_function_sum_test.cpp @@ -27,13 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/pipeline/window_function/window_function.h" #include "mongo/db/pipeline/window_function/window_function_sum.h" -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/prepare_conflict_tracker.cpp b/src/mongo/db/prepare_conflict_tracker.cpp index ac9c1b495fc28..06119b4981d19 100644 --- a/src/mongo/db/prepare_conflict_tracker.cpp +++ b/src/mongo/db/prepare_conflict_tracker.cpp @@ -28,7 +28,14 @@ */ #include "mongo/db/prepare_conflict_tracker.h" -#include "mongo/platform/basic.h" + +#include + +#include + +#include "mongo/db/service_context.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/prepare_conflict_tracker.h b/src/mongo/db/prepare_conflict_tracker.h index 2d03fbce2cad3..6b9806cee8235 100644 --- a/src/mongo/db/prepare_conflict_tracker.h +++ b/src/mongo/db/prepare_conflict_tracker.h @@ -31,6 +31,8 @@ #include "mongo/db/operation_context.h" #include "mongo/platform/atomic_word.h" +#include "mongo/util/duration.h" +#include "mongo/util/tick_source.h" namespace mongo { diff --git a/src/mongo/db/process_health/SConscript b/src/mongo/db/process_health/SConscript index d30b66e619b5d..5f0ae5b89c7c6 100644 --- a/src/mongo/db/process_health/SConscript +++ b/src/mongo/db/process_health/SConscript @@ -65,6 +65,7 @@ env.CppUnitTest( ], LIBDEPS=[ '$BUILD_DIR/mongo/base', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/executor/network_interface_mock', '$BUILD_DIR/mongo/executor/task_executor_test_fixture', '$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture', diff --git a/src/mongo/db/process_health/config_server_health_observer.cpp b/src/mongo/db/process_health/config_server_health_observer.cpp index bf011d2847224..377d7f3cfc440 100644 --- a/src/mongo/db/process_health/config_server_health_observer.cpp +++ b/src/mongo/db/process_health/config_server_health_observer.cpp @@ -27,17 +27,61 @@ * it in the license file. */ +#include +#include +#include +#include +#include #include - +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" +#include "mongo/client/remote_command_targeter.h" +#include "mongo/db/client.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/process_health/fault_manager_config.h" +#include "mongo/db/process_health/health_check_status.h" +#include "mongo/db/process_health/health_monitoring_server_parameters_gen.h" +#include "mongo/db/process_health/health_observer.h" #include "mongo/db/process_health/health_observer_base.h" - -#include "mongo/db/catalog_raii.h" -#include "mongo/db/dbdirectclient.h" #include "mongo/db/process_health/health_observer_registration.h" -#include "mongo/executor/remote_command_request.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" -#include "mongo/util/future_util.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kProcessHealth @@ -173,6 +217,12 @@ Future ConfigServerHealthObserver::_che checkCtx->opCtx = checkCtx->client->makeOperationContext(); checkCtx->opCtx->setDeadlineAfterNowBy(kObserverTimeout, ErrorCodes::ExceededTimeLimit); + // TODO(SERVER-74659): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*checkCtx->client.get()); + checkCtx->client.get()->setSystemOperationUnkillableByStepdown(lk); + } + LOGV2_DEBUG(5939001, 3, "Checking Config server health"); _runSmokeReadShardsCommand(checkCtx); diff --git a/src/mongo/db/process_health/dns_health_observer.cpp b/src/mongo/db/process_health/dns_health_observer.cpp index b4a8bf0bf2b2d..f4594848d2d31 100644 --- a/src/mongo/db/process_health/dns_health_observer.cpp +++ b/src/mongo/db/process_health/dns_health_observer.cpp @@ -29,14 +29,39 @@ #include "mongo/db/process_health/dns_health_observer.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/connection_string.h" +#include "mongo/db/client.h" +#include "mongo/db/process_health/health_observer.h" #include "mongo/db/process_health/health_observer_registration.h" +#include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" -#include "mongo/util/dns_name.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/net/hostname_canonicalization.h" -#include -#include #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kProcessHealth @@ -71,6 +96,13 @@ Future DnsHealthObserver::periodicCheckImpl( if (!isFailPointActive) { auto client = _svcCtx->makeClient("DNSHealthObserver"); + + // TODO(SERVER-74659): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } + auto opCtx = client->makeOperationContext(); auto const shardRegistry = Grid::get(_svcCtx)->shardRegistry(); auto shardIds = shardRegistry->getAllShardIds(opCtx.get()); diff --git a/src/mongo/db/process_health/dns_health_observer.h b/src/mongo/db/process_health/dns_health_observer.h index 11f54ad01bddd..d2852b3de789d 100644 --- a/src/mongo/db/process_health/dns_health_observer.h +++ b/src/mongo/db/process_health/dns_health_observer.h @@ -28,8 +28,13 @@ */ #pragma once +#include "mongo/db/process_health/fault_manager_config.h" +#include "mongo/db/process_health/health_check_status.h" #include "mongo/db/process_health/health_observer_base.h" +#include "mongo/db/service_context.h" #include "mongo/platform/random.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" namespace mongo { namespace process_health { diff --git a/src/mongo/db/process_health/fault.cpp b/src/mongo/db/process_health/fault.cpp index a98b73bd55113..fb2c1df4cc7a2 100644 --- a/src/mongo/db/process_health/fault.cpp +++ b/src/mongo/db/process_health/fault.cpp @@ -29,6 +29,16 @@ #include "mongo/db/process_health/fault.h" +#include +#include +#include + +#include + +#include "mongo/db/process_health/health_check_status.h" +#include "mongo/db/process_health/health_monitoring_server_parameters_gen.h" +#include "mongo/util/assert_util_core.h" + namespace mongo { namespace process_health { diff --git a/src/mongo/db/process_health/fault.h b/src/mongo/db/process_health/fault.h index dbc9a0317b340..f79d3563b04b9 100644 --- a/src/mongo/db/process_health/fault.h +++ b/src/mongo/db/process_health/fault.h @@ -28,11 +28,23 @@ */ #pragma once +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/process_health/fault_facet.h" +#include "mongo/db/process_health/fault_manager_config.h" #include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" #include "mongo/util/clock_source.h" #include "mongo/util/duration.h" +#include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/time_support.h" #include "mongo/util/timer.h" +#include "mongo/util/uuid.h" namespace mongo { namespace process_health { diff --git a/src/mongo/db/process_health/fault_facet.h b/src/mongo/db/process_health/fault_facet.h index 1927921b2540f..2f74eb1747bc2 100644 --- a/src/mongo/db/process_health/fault_facet.h +++ b/src/mongo/db/process_health/fault_facet.h @@ -28,8 +28,13 @@ */ #pragma once +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/process_health/fault_manager_config.h" #include "mongo/db/process_health/health_check_status.h" +#include "mongo/util/duration.h" namespace mongo { namespace process_health { diff --git a/src/mongo/db/process_health/fault_facet_impl.cpp b/src/mongo/db/process_health/fault_facet_impl.cpp index 62d6c4585baab..c1c407b7b5936 100644 --- a/src/mongo/db/process_health/fault_facet_impl.cpp +++ b/src/mongo/db/process_health/fault_facet_impl.cpp @@ -29,6 +29,12 @@ #include "mongo/db/process_health/fault_facet_impl.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" + namespace mongo { namespace process_health { diff --git a/src/mongo/db/process_health/fault_facet_impl.h b/src/mongo/db/process_health/fault_facet_impl.h index e956a573ce271..7e37dd12ef40b 100644 --- a/src/mongo/db/process_health/fault_facet_impl.h +++ b/src/mongo/db/process_health/fault_facet_impl.h @@ -28,11 +28,19 @@ */ #pragma once -#include "mongo/db/process_health/fault_facet.h" +#include +#include +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/process_health/fault_facet.h" +#include "mongo/db/process_health/fault_manager_config.h" #include "mongo/db/process_health/health_check_status.h" #include "mongo/db/process_health/health_observer.h" +#include "mongo/platform/mutex.h" #include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/time_support.h" namespace mongo { namespace process_health { diff --git a/src/mongo/db/process_health/fault_facet_test.cpp b/src/mongo/db/process_health/fault_facet_test.cpp index 33ad76a522619..02f20f8d501fb 100644 --- a/src/mongo/db/process_health/fault_facet_test.cpp +++ b/src/mongo/db/process_health/fault_facet_test.cpp @@ -28,9 +28,15 @@ */ #include "mongo/db/process_health/fault_facet.h" + +#include "mongo/base/string_data.h" #include "mongo/db/process_health/fault_facet_impl.h" #include "mongo/db/process_health/fault_facet_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/service_context.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/clock_source_mock.h" namespace mongo { namespace process_health { diff --git a/src/mongo/db/process_health/fault_manager.cpp b/src/mongo/db/process_health/fault_manager.cpp index 26872eed400d7..90c06a5ecb1f8 100644 --- a/src/mongo/db/process_health/fault_manager.cpp +++ b/src/mongo/db/process_health/fault_manager.cpp @@ -28,24 +28,50 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/process_health/fault_manager.h" - +#include +#include #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/process_health/fault.h" #include "mongo/db/process_health/fault_facet_impl.h" +#include "mongo/db/process_health/fault_manager.h" #include "mongo/db/process_health/fault_manager_config.h" #include "mongo/db/process_health/health_monitoring_gen.h" #include "mongo/db/process_health/health_observer_registration.h" +#include "mongo/db/server_options.h" +#include "mongo/executor/network_interface.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/task_executor.h" -#include "mongo/executor/task_executor_pool.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/decorable.h" #include "mongo/util/exit_code.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kProcessHealth @@ -223,7 +249,7 @@ void FaultManager::setupStateMachine() { }); auto bindThis = [&](auto&& pmf) { - return [=](auto&&... a) { + return [=, this](auto&&... a) { return (this->*pmf)(a...); }; }; diff --git a/src/mongo/db/process_health/fault_manager.h b/src/mongo/db/process_health/fault_manager.h index 4e23c629a096b..1f6c072d5255b 100644 --- a/src/mongo/db/process_health/fault_manager.h +++ b/src/mongo/db/process_health/fault_manager.h @@ -28,11 +28,19 @@ */ #pragma once +#include +#include +#include #include +#include +#include +#include +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/process_health/fault.h" #include "mongo/db/process_health/fault_facet.h" #include "mongo/db/process_health/fault_manager_config.h" +#include "mongo/db/process_health/health_check_status.h" #include "mongo/db/process_health/health_monitoring_server_parameters_gen.h" #include "mongo/db/process_health/health_observer.h" #include "mongo/db/process_health/progress_monitor.h" @@ -41,6 +49,14 @@ #include "mongo/executor/task_executor.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/time_support.h" namespace mongo { namespace process_health { diff --git a/src/mongo/db/process_health/fault_manager_config.cpp b/src/mongo/db/process_health/fault_manager_config.cpp index 3f097ceac5c26..aa9a9bbd5895f 100644 --- a/src/mongo/db/process_health/fault_manager_config.cpp +++ b/src/mongo/db/process_health/fault_manager_config.cpp @@ -28,8 +28,6 @@ */ -#include "mongo/platform/basic.h" - #include "mongo/db/process_health/fault_manager_config.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kProcessHealth diff --git a/src/mongo/db/process_health/fault_manager_config.h b/src/mongo/db/process_health/fault_manager_config.h index f4d63721053b3..1edada86873a5 100644 --- a/src/mongo/db/process_health/fault_manager_config.h +++ b/src/mongo/db/process_health/fault_manager_config.h @@ -28,11 +28,27 @@ */ #pragma once +#include +#include +#include +#include +#include +#include #include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/process_health/health_monitoring_server_parameters_gen.h" +#include "mongo/db/server_parameter.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/basic.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" #include "mongo/util/duration.h" +#include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/synchronized_value.h" namespace mongo { diff --git a/src/mongo/db/process_health/fault_manager_test.cpp b/src/mongo/db/process_health/fault_manager_test.cpp index f57f6b869b71f..6ee2ca5846741 100644 --- a/src/mongo/db/process_health/fault_manager_test.cpp +++ b/src/mongo/db/process_health/fault_manager_test.cpp @@ -29,8 +29,10 @@ #include "mongo/db/process_health/fault_manager.h" +#include "mongo/base/string_data.h" #include "mongo/db/process_health/fault_manager_test_suite.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/process_health/fault_manager_test_suite.h b/src/mongo/db/process_health/fault_manager_test_suite.h index 224a14a35533a..c656e75e344bf 100644 --- a/src/mongo/db/process_health/fault_manager_test_suite.h +++ b/src/mongo/db/process_health/fault_manager_test_suite.h @@ -26,11 +26,11 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ + #pragma once #include -#include "mongo/db/concurrency/locker_noop_client_observer.h" #include "mongo/db/process_health/fault_manager.h" #include "mongo/db/process_health/health_observer_mock.h" #include "mongo/db/process_health/health_observer_registration.h" @@ -144,8 +144,6 @@ class FaultManagerTest : public unittest::Test { _svcCtx->setFastClockSource(std::make_unique()); _svcCtx->setPreciseClockSource(std::make_unique()); _svcCtx->setTickSource(std::make_unique>()); - _svcCtx->registerClientObserver( - std::make_unique()); advanceTime(Seconds(100)); } } @@ -283,7 +281,7 @@ class FaultManagerTest : public unittest::Test { } void waitForTransitionIntoState(FaultState state) { - assertSoon([=]() { return manager().getFaultState() == state; }); + assertSoon([=, this]() { return manager().getFaultState() == state; }); } private: diff --git a/src/mongo/db/process_health/fault_state_machine_test.cpp b/src/mongo/db/process_health/fault_state_machine_test.cpp index 918d8427211e1..bdeed55ad603e 100644 --- a/src/mongo/db/process_health/fault_state_machine_test.cpp +++ b/src/mongo/db/process_health/fault_state_machine_test.cpp @@ -27,14 +27,37 @@ * it in the license file. */ -#include "mongo/db/process_health/fault_manager.h" - +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/process_health/dns_health_observer.h" +#include "mongo/db/process_health/fault.h" +#include "mongo/db/process_health/fault_facet.h" +#include "mongo/db/process_health/fault_manager_config.h" #include "mongo/db/process_health/fault_manager_test_suite.h" #include "mongo/db/process_health/health_check_status.h" -#include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/db/process_health/health_monitoring_server_parameters_gen.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/process_health/fault_test.cpp b/src/mongo/db/process_health/fault_test.cpp index c894ec6af6403..852601328fdfe 100644 --- a/src/mongo/db/process_health/fault_test.cpp +++ b/src/mongo/db/process_health/fault_test.cpp @@ -29,9 +29,16 @@ #include "mongo/db/process_health/fault.h" +#include + +#include "mongo/base/string_data.h" #include "mongo/db/process_health/fault_facet_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/process_health/health_check_status.h" +#include "mongo/db/service_context.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/synchronized_value.h" namespace mongo { namespace process_health { diff --git a/src/mongo/db/process_health/health_monitoring.idl b/src/mongo/db/process_health/health_monitoring.idl index 88837981d8cbe..5c9a23e5bc59c 100644 --- a/src/mongo/db/process_health/health_monitoring.idl +++ b/src/mongo/db/process_health/health_monitoring.idl @@ -40,3 +40,4 @@ feature_flags: cpp_varname: gFeatureFlagHealthMonitoring default: true version: 5.3 + shouldBeFCVGated: true diff --git a/src/mongo/db/process_health/health_monitoring_server_parameters.cpp b/src/mongo/db/process_health/health_monitoring_server_parameters.cpp index 57c41b475ca4f..a1a2690c027ab 100644 --- a/src/mongo/db/process_health/health_monitoring_server_parameters.cpp +++ b/src/mongo/db/process_health/health_monitoring_server_parameters.cpp @@ -27,11 +27,26 @@ * it in the license file. */ #include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/db/operation_context.h" #include "mongo/db/process_health/fault_manager.h" #include "mongo/db/process_health/health_monitoring_server_parameters_gen.h" -#include "mongo/db/process_health/health_observer.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/synchronized_value.h" namespace mongo { diff --git a/src/mongo/db/process_health/health_monitoring_server_status_section.cpp b/src/mongo/db/process_health/health_monitoring_server_status_section.cpp index 3a7713af2857f..ba58ca519e07e 100644 --- a/src/mongo/db/process_health/health_monitoring_server_status_section.cpp +++ b/src/mongo/db/process_health/health_monitoring_server_status_section.cpp @@ -27,10 +27,14 @@ * it in the license file. */ +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/util/builder_fwd.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" #include "mongo/db/process_health/fault_manager.h" #include "mongo/db/service_context.h" diff --git a/src/mongo/db/process_health/health_observer.h b/src/mongo/db/process_health/health_observer.h index f85ff1c91f135..a2839ef221cd9 100644 --- a/src/mongo/db/process_health/health_observer.h +++ b/src/mongo/db/process_health/health_observer.h @@ -28,10 +28,18 @@ */ #pragma once +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/process_health/fault_facet.h" #include "mongo/db/process_health/fault_manager_config.h" +#include "mongo/db/process_health/health_check_status.h" #include "mongo/executor/task_executor.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/duration.h" #include "mongo/util/future.h" +#include "mongo/util/time_support.h" namespace mongo { namespace process_health { diff --git a/src/mongo/db/process_health/health_observer_base.cpp b/src/mongo/db/process_health/health_observer_base.cpp index 01d90bf3db6b2..00970faf46aef 100644 --- a/src/mongo/db/process_health/health_observer_base.cpp +++ b/src/mongo/db/process_health/health_observer_base.cpp @@ -30,9 +30,23 @@ #include "mongo/db/process_health/health_observer_base.h" +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/process_health/deadline_future.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future_impl.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kProcessHealth diff --git a/src/mongo/db/process_health/health_observer_base.h b/src/mongo/db/process_health/health_observer_base.h index ef7900f640f7b..74f2db3512ecf 100644 --- a/src/mongo/db/process_health/health_observer_base.h +++ b/src/mongo/db/process_health/health_observer_base.h @@ -28,10 +28,28 @@ */ #pragma once -#include "mongo/db/process_health/health_observer.h" +#include +#include + +#include +#include "mongo/base/status.h" #include "mongo/db/process_health/deadline_future.h" +#include "mongo/db/process_health/fault_manager_config.h" +#include "mongo/db/process_health/health_check_status.h" +#include "mongo/db/process_health/health_observer.h" #include "mongo/db/service_context.h" +#include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/platform/random.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/tick_source.h" +#include "mongo/util/time_support.h" namespace mongo { namespace process_health { diff --git a/src/mongo/db/process_health/health_observer_registration.cpp b/src/mongo/db/process_health/health_observer_registration.cpp index 12b0ff028a780..07efb9ecbd062 100644 --- a/src/mongo/db/process_health/health_observer_registration.cpp +++ b/src/mongo/db/process_health/health_observer_registration.cpp @@ -29,6 +29,8 @@ #include "mongo/db/process_health/health_observer_registration.h" +#include + namespace mongo { namespace process_health { diff --git a/src/mongo/db/process_health/health_observer_registration.h b/src/mongo/db/process_health/health_observer_registration.h index 6606e13f05a6a..16d6588a8af32 100644 --- a/src/mongo/db/process_health/health_observer_registration.h +++ b/src/mongo/db/process_health/health_observer_registration.h @@ -29,10 +29,10 @@ #pragma once #include +#include #include #include "mongo/db/process_health/health_observer.h" - #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/process_health/health_observer_test.cpp b/src/mongo/db/process_health/health_observer_test.cpp index 665c076b9de78..19fe46652d05e 100644 --- a/src/mongo/db/process_health/health_observer_test.cpp +++ b/src/mongo/db/process_health/health_observer_test.cpp @@ -29,13 +29,34 @@ #include "mongo/db/process_health/health_observer.h" +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/process_health/fault.h" #include "mongo/db/process_health/fault_manager_test_suite.h" +#include "mongo/db/process_health/health_observer_base.h" #include "mongo/db/process_health/health_observer_mock.h" #include "mongo/db/process_health/health_observer_registration.h" #include "mongo/db/service_context.h" +#include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/timer.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source_mock.h" +#include "mongo/util/future_impl.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/process_health/progress_monitor.cpp b/src/mongo/db/process_health/progress_monitor.cpp index d940a06c49877..9883622335392 100644 --- a/src/mongo/db/process_health/progress_monitor.cpp +++ b/src/mongo/db/process_health/progress_monitor.cpp @@ -30,10 +30,24 @@ #include "mongo/db/process_health/progress_monitor.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/client.h" #include "mongo/db/process_health/fault_manager.h" +#include "mongo/db/process_health/fault_manager_config.h" #include "mongo/db/process_health/health_monitoring_server_parameters_gen.h" #include "mongo/db/process_health/health_observer.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kProcessHealth @@ -141,6 +155,12 @@ void ProgressMonitor::_progressMonitorLoop() { Client::initThread("FaultManagerProgressMonitor"_sd, _svcCtx, nullptr); static const int kSleepsPerInterval = 10; + // TODO(SERVER-74659): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + while (!_terminate.load()) { progressMonitorCheck(_crashCb); diff --git a/src/mongo/db/process_health/progress_monitor.h b/src/mongo/db/process_health/progress_monitor.h index 9b7e1f79b4ecd..c9ac82e502490 100644 --- a/src/mongo/db/process_health/progress_monitor.h +++ b/src/mongo/db/process_health/progress_monitor.h @@ -33,6 +33,7 @@ #include #include "mongo/db/service_context.h" +#include "mongo/platform/atomic_word.h" #include "mongo/stdx/thread.h" namespace mongo { diff --git a/src/mongo/db/process_health/state_machine.h b/src/mongo/db/process_health/state_machine.h index a0ad7c715e9d4..12b1b0d508c86 100644 --- a/src/mongo/db/process_health/state_machine.h +++ b/src/mongo/db/process_health/state_machine.h @@ -28,11 +28,19 @@ */ #pragma once +#include +#include +#include +#include +#include +#include +#include #include #include "mongo/stdx/mutex.h" #include "mongo/stdx/unordered_map.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #include "mongo/util/functional.h" #include "mongo/util/str.h" diff --git a/src/mongo/db/process_health/state_machine_test.cpp b/src/mongo/db/process_health/state_machine_test.cpp index d1bb807b1d71d..637f9e701b51e 100644 --- a/src/mongo/db/process_health/state_machine_test.cpp +++ b/src/mongo/db/process_health/state_machine_test.cpp @@ -28,8 +28,19 @@ */ #include "mongo/db/process_health/state_machine.h" + +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/process_health/test_health_observer.cpp b/src/mongo/db/process_health/test_health_observer.cpp index 01224117baa7d..fcd1f42527f25 100644 --- a/src/mongo/db/process_health/test_health_observer.cpp +++ b/src/mongo/db/process_health/test_health_observer.cpp @@ -29,9 +29,26 @@ #include "mongo/db/process_health/test_health_observer.h" +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/process_health/health_observer.h" #include "mongo/db/process_health/health_observer_registration.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kProcessHealth diff --git a/src/mongo/db/process_health/test_health_observer.h b/src/mongo/db/process_health/test_health_observer.h index 0c23df7fb4271..840f4370fa2f3 100644 --- a/src/mongo/db/process_health/test_health_observer.h +++ b/src/mongo/db/process_health/test_health_observer.h @@ -28,7 +28,12 @@ */ #pragma once +#include "mongo/db/process_health/fault_manager_config.h" +#include "mongo/db/process_health/health_check_status.h" #include "mongo/db/process_health/health_observer_base.h" +#include "mongo/db/service_context.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" namespace mongo { namespace process_health { diff --git a/src/mongo/db/profile_filter.cpp b/src/mongo/db/profile_filter.cpp index d26086caa20c9..31fc1cea01e2c 100644 --- a/src/mongo/db/profile_filter.cpp +++ b/src/mongo/db/profile_filter.cpp @@ -29,6 +29,11 @@ #include "mongo/db/profile_filter.h" +#include +#include + +#include "mongo/platform/mutex.h" + namespace mongo { static std::shared_ptr defaultProfileFilter; diff --git a/src/mongo/db/profile_filter.h b/src/mongo/db/profile_filter.h index 209637cf4d6a7..33baa2eb227b1 100644 --- a/src/mongo/db/profile_filter.h +++ b/src/mongo/db/profile_filter.h @@ -30,6 +30,9 @@ #pragma once +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" namespace mongo { @@ -49,9 +52,16 @@ class ProfileFilter { const CurOp& curop; }; + virtual ~ProfileFilter() = default; + virtual bool matches(OperationContext*, const OpDebug&, const CurOp&) const = 0; virtual BSONObj serialize() const = 0; - virtual ~ProfileFilter() = default; + + /** + * Returns true if the profile filter depends on the given top-level field name and false + * otherwise. + */ + virtual bool dependsOn(StringData topLevelField) const = 0; /** * Thread-safe getter for the global 'ProfileFilter' default. diff --git a/src/mongo/db/profile_filter_impl.cpp b/src/mongo/db/profile_filter_impl.cpp index 2ac93a006ff3c..2af7cd93eb937 100644 --- a/src/mongo/db/profile_filter_impl.cpp +++ b/src/mongo/db/profile_filter_impl.cpp @@ -28,12 +28,31 @@ */ -#include "mongo/logv2/log.h" -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/db/matcher/match_expression_dependencies.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/profile_filter_impl.h" +#include "mongo/db/server_options.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -56,14 +75,14 @@ ProfileFilterImpl::ProfileFilterImpl(BSONObj expr) : _matcher(expr.getOwned(), m "Profile filter is not allowed to depend on metadata", !deps.getNeedsAnyMetadata()); - // Reduce the DepsTracker down to a set of top-level fields. - StringSet toplevelFields; + // We only bother tracking top-level fields as dependencies. for (auto&& field : deps.fields) { - toplevelFields.emplace(FieldPath(std::move(field)).front()); + _dependencies.emplace(FieldPath(std::move(field)).front()); } + _needWholeDocument = deps.needWholeDocument; // Remember a list of functions we'll call whenever we need to build BSON from CurOp. - _makeBSON = OpDebug::appendStaged(toplevelFields, deps.needWholeDocument); + _makeBSON = OpDebug::appendStaged(_dependencies, _needWholeDocument); } bool ProfileFilterImpl::matches(OperationContext* opCtx, diff --git a/src/mongo/db/profile_filter_impl.h b/src/mongo/db/profile_filter_impl.h index b37108af155ca..d3663d6fe8d39 100644 --- a/src/mongo/db/profile_filter_impl.h +++ b/src/mongo/db/profile_filter_impl.h @@ -29,10 +29,18 @@ #pragma once -#include "mongo/db/profile_filter.h" +#include + +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/curop.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/matcher.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/profile_filter.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -44,7 +52,14 @@ class ProfileFilterImpl final : public ProfileFilter { return _matcher.getMatchExpression()->serialize(); } + bool dependsOn(StringData topLevelField) const override { + return _needWholeDocument || _dependencies.count(topLevelField) > 0; + } + private: + StringSet _dependencies; + bool _needWholeDocument = false; + Matcher _matcher; std::function _makeBSON; }; diff --git a/src/mongo/db/query/SConscript b/src/mongo/db/query/SConscript index 4366724ac7707..bd9ae87658ea0 100644 --- a/src/mongo/db/query/SConscript +++ b/src/mongo/db/query/SConscript @@ -9,6 +9,7 @@ env = env.Clone() env.SConscript( dirs=[ + 'boolean_simplification', 'ce', 'collation', 'cost_model', @@ -23,7 +24,11 @@ env.SConscript( env.Library( target='canonical_query', - source=['canonical_query.cpp', 'canonical_query_encoder.cpp', 'query_shape.cpp'], + source=[ + 'canonical_query.cpp', + 'canonical_query_encoder.cpp', + 'parsed_find_command.cpp', + ], LIBDEPS=[ '$BUILD_DIR/mongo/crypto/encrypted_field_config', '$BUILD_DIR/mongo/db/cst/cst', @@ -131,6 +136,59 @@ env.Library( ], ) +env.Library( + target='index_hint', + source=[ + 'index_hint.idl', + 'index_hint.cpp', + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/db/server_base', + ], +) + +env.Library( + target='query_shape_hash', + source=[ + 'query_shape_hash.idl', + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/crypto/sha256_block', + ], +) + +env.Library( + target='query_settings', + source=[ + 'query_settings.idl', + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/db/server_base', + 'index_hint', + 'query_shape_hash', + ], +) + +env.Library( + target='query_settings_manager', + source=[ + 'query_settings_cluster_parameter.idl', + 'query_settings_manager.cpp', + ], + LIBDEPS=[ + 'query_settings', + ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/crypto/sha256_block', + '$BUILD_DIR/mongo/db/concurrency/exception_util', + '$BUILD_DIR/mongo/db/concurrency/lock_manager', + '$BUILD_DIR/mongo/db/server_base', + '$BUILD_DIR/mongo/db/service_context', + '$BUILD_DIR/mongo/idl/cluster_server_parameter', + 'query_knobs', + ], +) + # Shared mongod/mongos query code. env.Library( target="query_common", @@ -241,6 +299,7 @@ env.Library( '$BUILD_DIR/mongo/db/commands/test_commands_enabled', '$BUILD_DIR/mongo/db/pipeline/runtime_constants_idl', '$BUILD_DIR/mongo/db/repl/read_concern_args', + '$BUILD_DIR/mongo/s/common_s', '$BUILD_DIR/mongo/util/namespace_string_database_name_util', 'hint_parser', ], @@ -260,7 +319,7 @@ env.Library( 'query_feature_flags.idl', 'query_knobs.idl', 'sbe_plan_cache_on_parameter_change.cpp', - 'telemetry_util.cpp', + 'query_stats_util.cpp', ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/commands/test_commands_enabled', @@ -273,16 +332,18 @@ env.Library( ) env.Library( - target="query_test_service_context", + target='query_test_service_context', source=[ - "query_test_service_context.cpp", + 'query_test_service_context.cpp', ], LIBDEPS=[ - "$BUILD_DIR/mongo/db/service_context", - "$BUILD_DIR/mongo/db/session/logical_session_id", - "collation/collator_factory_mock", + '$BUILD_DIR/mongo/db/service_context', + '$BUILD_DIR/mongo/db/session/logical_session_id', + 'collation/collator_factory_mock', + ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/concurrency/lock_manager', ], - LIBDEPS_PRIVATE=[], ) env.Library( @@ -331,6 +392,16 @@ env.Library( ], ) +env.Library( + target='str_trim_utils', + source=[ + 'str_trim_utils.cpp', + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/base', + ], +) + env.Library( target='plan_yield_policy', source=[ @@ -342,7 +413,9 @@ env.Library( '$BUILD_DIR/mongo/util/fail_point', ], LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/catalog/collection_uuid_mismatch_info', '$BUILD_DIR/mongo/db/concurrency/exception_util', + '$BUILD_DIR/mongo/db/shard_role', '$BUILD_DIR/mongo/db/storage/recovery_unit_base', ], ) @@ -359,48 +432,56 @@ env.Library( ) env.Library( - target='op_metrics', + target='query_shape', source=[ - 'telemetry.cpp', - '$BUILD_DIR/mongo/db/curop.cpp', + 'query_shape.cpp', + 'query_shape.idl', + 'query_stats_find_key_generator.cpp', + 'query_stats_aggregate_key_generator.cpp', + ], + LIBDEPS=[ + '$BUILD_DIR/mongo/base', + '$BUILD_DIR/mongo/db/catalog/collection_catalog', + '$BUILD_DIR/mongo/db/pipeline/pipeline', + "$BUILD_DIR/mongo/idl/idl_parser", + 'canonical_query', ], + LIBDEPS_PRIVATE=[ + 'projection_ast', + 'sort_pattern', + ], +) + +env.Library(target='query_stats_parse', source=['query_stats_transform_algorithm.idl'], LIBDEPS=[ + '$BUILD_DIR/mongo/base', + '$BUILD_DIR/mongo/idl/idl_parser', +]) + +env.Library( + target='op_metrics', + source=['query_stats.cpp'], LIBDEPS=[ '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/bson/mutable/mutable_bson', '$BUILD_DIR/mongo/db/commands', - '$BUILD_DIR/mongo/db/concurrency/lock_manager', - '$BUILD_DIR/mongo/db/generic_cursor', '$BUILD_DIR/mongo/db/profile_filter', '$BUILD_DIR/mongo/db/server_options', '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/db/stats/counters', - '$BUILD_DIR/mongo/db/stats/timer_stats', '$BUILD_DIR/mongo/db/storage/storage_engine_parameters', '$BUILD_DIR/mongo/rpc/client_metadata', - '$BUILD_DIR/mongo/transport/service_executor', - '$BUILD_DIR/mongo/util/diagnostic_info' - if get_option('use-diagnostic-latches') == 'on' else [], '$BUILD_DIR/mongo/util/fail_point', '$BUILD_DIR/mongo/util/net/network', '$BUILD_DIR/mongo/util/processinfo', - '$BUILD_DIR/mongo/util/progress_meter', 'command_request_response', 'memory_util', 'query_knobs', + 'query_stats_parse', 'rate_limiting', ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/auth/auth', - '$BUILD_DIR/mongo/db/auth/user_acquisition_stats', - '$BUILD_DIR/mongo/db/exec/projection_executor', - '$BUILD_DIR/mongo/db/prepare_conflict_tracker', - '$BUILD_DIR/mongo/db/stats/resource_consumption_metrics', - # TODO (SERVER-66896): Remove this dependency. - '$BUILD_DIR/mongo/db/storage/storage_engine_feature_flags', - '$BUILD_DIR/mongo/util/concurrency/admission_context', '$BUILD_DIR/mongo/util/namespace_string_database_name_util', - 'projection_ast', - 'sort_pattern', ], ) @@ -426,6 +507,7 @@ env.CppUnitTest( "index_bounds_builder_type_test.cpp", "index_bounds_test.cpp", "index_entry_test.cpp", + "index_hint_test.cpp", "interval_evaluation_tree_test.cpp", "interval_test.cpp", "killcursors_request_test.cpp", @@ -455,8 +537,11 @@ env.CppUnitTest( "query_planner_tree_test.cpp", "query_planner_wildcard_index_test.cpp", "query_request_test.cpp", + "query_settings_manager_test.cpp", "query_settings_test.cpp", "query_shape_test.cpp", + "query_shape_test.idl", + "query_stats_store_test.cpp", "query_solution_test.cpp", "rate_limiting_test.cpp", "sbe_and_hash_test.cpp", @@ -470,7 +555,6 @@ env.CppUnitTest( "sbe_stage_builder_type_checker_test.cpp", "shard_filterer_factory_mock.cpp", "sort_pattern_test.cpp", - "telemetry_store_test.cpp", "util/memory_util_test.cpp", "view_response_formatter_test.cpp", 'map_reduce_output_format_test.cpp', @@ -489,17 +573,21 @@ env.CppUnitTest( "$BUILD_DIR/mongo/db/repl/storage_interface_impl", "$BUILD_DIR/mongo/db/service_context_d_test_fixture", "$BUILD_DIR/mongo/dbtests/mocklib", + "$BUILD_DIR/mongo/idl/idl_parser", "$BUILD_DIR/mongo/rpc/rpc", "$BUILD_DIR/mongo/util/clock_source_mock", "collation/collator_factory_mock", "collation/collator_interface_mock", "common_query_enums_and_helpers", "hint_parser", + "index_hint", "map_reduce_output_format", "query_common", "query_planner", "query_planner_test_fixture", "query_request", + "query_settings_manager", + "query_shape", "query_test_service_context", "rate_limiting", ], diff --git a/src/mongo/db/query/all_indices_required_checker.cpp b/src/mongo/db/query/all_indices_required_checker.cpp index f83c90d090cec..821adf2995e8e 100644 --- a/src/mongo/db/query/all_indices_required_checker.cpp +++ b/src/mongo/db/query/all_indices_required_checker.cpp @@ -29,6 +29,22 @@ #include "mongo/db/query/all_indices_required_checker.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + namespace mongo { AllIndicesRequiredChecker::AllIndicesRequiredChecker( @@ -62,7 +78,7 @@ void AllIndicesRequiredChecker::checkIndicesForCollection(OperationContext* opCt auto indexDesc = collection->getIndexCatalog()->findIndexByIdent(opCtx, ident); uassert(ErrorCodes::QueryPlanKilled, str::stream() << "query plan killed :: index '" << nameRef << "' for collection '" - << collection->ns() << "' dropped", + << collection->ns().toStringForErrorMsg() << "' dropped", indexDesc && !indexDesc->getEntry()->isDropped()); } } diff --git a/src/mongo/db/query/all_indices_required_checker.h b/src/mongo/db/query/all_indices_required_checker.h index d637fd63c1f91..0ef32049cd586 100644 --- a/src/mongo/db/query/all_indices_required_checker.h +++ b/src/mongo/db/query/all_indices_required_checker.h @@ -29,12 +29,17 @@ #pragma once +#include #include +#include #include #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/multiple_collection_accessor.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/query/allowed_contexts.cpp b/src/mongo/db/query/allowed_contexts.cpp index 2d18368e87e26..cce78af0a3b95 100644 --- a/src/mongo/db/query/allowed_contexts.cpp +++ b/src/mongo/db/query/allowed_contexts.cpp @@ -27,11 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/query/allowed_contexts.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/query/allowed_contexts.h" #include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/query/allowed_contexts.h b/src/mongo/db/query/allowed_contexts.h index 6cfb72884390d..8cb2ea4aa2d39 100644 --- a/src/mongo/db/query/allowed_contexts.h +++ b/src/mongo/db/query/allowed_contexts.h @@ -29,8 +29,15 @@ #pragma once +#include +#include +#include + #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/api_parameters.h" +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/query/analyze_regex.cpp b/src/mongo/db/query/analyze_regex.cpp index fc1296670ccf9..5d29e2590091b 100644 --- a/src/mongo/db/query/analyze_regex.cpp +++ b/src/mongo/db/query/analyze_regex.cpp @@ -31,6 +31,7 @@ #include +#include "mongo/base/string_data.h" #include "mongo/util/ctype.h" #include "mongo/util/str.h" diff --git a/src/mongo/db/query/bind_input_params.cpp b/src/mongo/db/query/bind_input_params.cpp index b93698b8585ce..007dd0cedb33b 100644 --- a/src/mongo/db/query/bind_input_params.cpp +++ b/src/mongo/db/query/bind_input_params.cpp @@ -29,29 +29,57 @@ #include "mongo/db/query/bind_input_params.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/collection_scan_common.h" +#include "mongo/db/exec/js_function.h" #include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_array.h" +#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/expression_type.h" #include "mongo/db/matcher/expression_visitor.h" #include "mongo/db/matcher/expression_where.h" +#include "mongo/db/matcher/matcher_type_set.h" +#include "mongo/db/matcher/schema/expression_internal_schema_allowed_properties.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/planner_access.h" +#include "mongo/db/query/record_id_bound.h" #include "mongo/db/query/sbe_stage_builder_filter.h" #include "mongo/db/query/sbe_stage_builder_index_scan.h" +#include "mongo/db/query/tree_walker.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" namespace mongo::input_params { namespace { class MatchExpressionParameterBindingVisitor final : public MatchExpressionConstVisitor { public: - MatchExpressionParameterBindingVisitor( - const stage_builder::InputParamToSlotMap& inputParamToSlotMap, - sbe::RuntimeEnvironment* runtimeEnvironment, - bool bindingCachedPlan) - : _inputParamToSlotMap(inputParamToSlotMap), - _runtimeEnvironment(runtimeEnvironment), - _bindingCachedPlan(bindingCachedPlan) { - invariant(_runtimeEnvironment); - } + MatchExpressionParameterBindingVisitor(stage_builder::PlanStageData& data, + bool bindingCachedPlan) + : _data(data), _bindingCachedPlan(bindingCachedPlan) {} void visit(const BitsAllClearMatchExpression* expr) final { visitBitTestExpression(expr); @@ -92,8 +120,9 @@ class MatchExpressionParameterBindingVisitor final : public MatchExpressionConst // contains any regexes. tassert(6279503, "Unexpected parameter marker for $in with regexes", !expr->hasRegex()); + auto coll = _data.staticData->queryCollator.get(); auto&& [arrSetTag, arrSetVal, hasArray, hasObject, hasNull] = - stage_builder::convertInExpressionEqualities(expr); + stage_builder::convertInExpressionEqualities(expr, coll); bindParam(*slotId, true /*owned*/, arrSetTag, arrSetVal); // Auto-parameterization should not kick in if the $in's list of equalities includes any @@ -291,7 +320,7 @@ class MatchExpressionParameterBindingVisitor final : public MatchExpressionConst if (owned) { guard.emplace(typeTag, value); } - auto accessor = _runtimeEnvironment->getAccessor(slotId); + auto accessor = _data.env->getAccessor(slotId); if (owned) { guard->reset(); } @@ -304,17 +333,14 @@ class MatchExpressionParameterBindingVisitor final : public MatchExpressionConst } boost::optional getSlotId(MatchExpression::InputParamId paramId) const { - auto it = _inputParamToSlotMap.find(paramId); - if (it != _inputParamToSlotMap.end()) { + auto it = _data.staticData->inputParamToSlotMap.find(paramId); + if (it != _data.staticData->inputParamToSlotMap.end()) { return it->second; } return boost::none; } - const stage_builder::InputParamToSlotMap& _inputParamToSlotMap; - - sbe::RuntimeEnvironment* const _runtimeEnvironment; - + stage_builder::PlanStageData& _data; // True if the plan for which we are binding parameter values is being recovered from the SBE // plan cache. const bool _bindingCachedPlan; @@ -392,12 +418,12 @@ void bindSingleIntervalPlanSlots(const stage_builder::IndexBoundsEvaluationInfo& indexBoundsInfo.slots.slots); runtimeEnvironment->resetSlot(singleInterval.lowKey, sbe::value::TypeTags::ksValue, - sbe::value::bitcastFrom(lowKey.release()), + sbe::value::bitcastFrom(lowKey.release()), /* owned */ true); runtimeEnvironment->resetSlot(singleInterval.highKey, sbe::value::TypeTags::ksValue, - sbe::value::bitcastFrom(highKey.release()), + sbe::value::bitcastFrom(highKey.release()), /* owned */ true); } @@ -430,11 +456,9 @@ void bindGenericPlanSlots(const stage_builder::IndexBoundsEvaluationInfo& indexB } // namespace void bind(const CanonicalQuery& canonicalQuery, - const stage_builder::InputParamToSlotMap& inputParamToSlotMap, - sbe::RuntimeEnvironment* runtimeEnvironment, + stage_builder::PlanStageData& data, const bool bindingCachedPlan) { - MatchExpressionParameterBindingVisitor visitor{ - inputParamToSlotMap, runtimeEnvironment, bindingCachedPlan}; + MatchExpressionParameterBindingVisitor visitor{data, bindingCachedPlan}; MatchExpressionParameterBindingWalker walker{&visitor}; tree_walker::walk(canonicalQuery.root(), &walker); } @@ -444,11 +468,13 @@ void bindIndexBounds( const stage_builder::IndexBoundsEvaluationInfo& indexBoundsInfo, sbe::RuntimeEnvironment* runtimeEnvironment, interval_evaluation_tree::IndexBoundsEvaluationCache* indexBoundsEvaluationCache) { - auto bounds = makeIndexBounds(indexBoundsInfo, cq, indexBoundsEvaluationCache); - auto intervals = stage_builder::makeIntervalsFromIndexBounds(*bounds, - indexBoundsInfo.direction == 1, - indexBoundsInfo.keyStringVersion, - indexBoundsInfo.ordering); + std::unique_ptr bounds = + makeIndexBounds(indexBoundsInfo, cq, indexBoundsEvaluationCache); + stage_builder::IndexIntervals intervals = + stage_builder::makeIntervalsFromIndexBounds(*bounds, + indexBoundsInfo.direction == 1, + indexBoundsInfo.keyStringVersion, + indexBoundsInfo.ordering); const bool isSingleIntervalSolution = stdx::holds_alternative< mongo::stage_builder::ParameterizedIndexScanSlots::SingleIntervalPlan>( indexBoundsInfo.slots.slots); @@ -459,4 +485,72 @@ void bindIndexBounds( indexBoundsInfo, std::move(intervals), std::move(bounds), runtimeEnvironment); } } + +void bindClusteredCollectionBounds(const CanonicalQuery& cq, + const sbe::PlanStage* root, + const stage_builder::PlanStageData* data, + sbe::RuntimeEnvironment* runtimeEnvironment) { + // Arguments needed to mimic the original build-time bounds setting from the current query. + auto clusteredBoundInfos = data->staticData->clusteredCollBoundsInfos; + const MatchExpression* conjunct = cq.root(); // this is csn->filter + bool minAndMaxEmpty = cq.getFindCommandRequest().getMin().isEmpty() && + cq.getFindCommandRequest().getMax().isEmpty(); + + // Caching OR queries with collection scans is restricted, since it is challenging to determine + // which match expressions from the input query require a clustered collection scan. Therefore, + // we cannot correctly calculate the correct bounds for the query using the cached plan. + tassert(6125900, + "OR queries with clustered collection scans are not supported by the SBE cache.", + cq.root()->matchType() != MatchExpression::OR || !minAndMaxEmpty); + + tassert(7228000, + "We only expect to cache plans with one clustered collection scan.", + 1 == clusteredBoundInfos.size()); + + const CollatorInterface* queryCollator = cq.getCollator(); // current query's desired collator + + for (size_t i = 0; i < clusteredBoundInfos.size(); ++i) { + // The outputs produced by the QueryPlannerAccess APIs below (passed by reference). + boost::optional minRecord; // scan start bound + boost::optional maxRecord; // scan end bound + + // 'boundInclusion' is needed for handleRIDRangeMinMax, but we don't need to bind it to a + // slot because it is always the same as the original in a plan matched from cache since + // only the "max" keyword can change it from its default, and plans using "max" are not + // cached. + CollectionScanParams::ScanBoundInclusion boundInclusion; // whether end bound is inclusive + + // Cast the return value to void since we are not building a CollectionScanNode here so do + // not need to set it in its 'hasCompatibleCollation' member. + static_cast( + QueryPlannerAccess::handleRIDRangeScan(conjunct, + queryCollator, + data->staticData->ccCollator.get(), + data->staticData->clusterKeyFieldName, + minRecord, + maxRecord)); + QueryPlannerAccess::handleRIDRangeMinMax(cq, + data->staticData->direction, + queryCollator, + data->staticData->ccCollator.get(), + minRecord, + maxRecord, + boundInclusion); + // Bind the scan bounds to input slots. + if (minRecord) { + boost::optional minRecordId = + data->staticData->clusteredCollBoundsInfos[i].minRecord; + tassert(7571500, "minRecordId slot missing", minRecordId.has_value()); + auto [tag, val] = sbe::value::makeCopyRecordId(minRecord->recordId()); + runtimeEnvironment->resetSlot(minRecordId.value(), tag, val, true); + } + if (maxRecord) { + boost::optional maxRecordId = + data->staticData->clusteredCollBoundsInfos[i].maxRecord; + tassert(7571501, "maxRecordId slot missing", maxRecordId.has_value()); + auto [tag, val] = sbe::value::makeCopyRecordId(maxRecord->recordId()); + runtimeEnvironment->resetSlot(maxRecordId.value(), tag, val, true); + } + } +} // bindClusteredCollectionBounds } // namespace mongo::input_params diff --git a/src/mongo/db/query/bind_input_params.h b/src/mongo/db/query/bind_input_params.h index 69c6cea1e9c3c..981c151d739ee 100644 --- a/src/mongo/db/query/bind_input_params.h +++ b/src/mongo/db/query/bind_input_params.h @@ -30,8 +30,11 @@ #pragma once #include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/interval_evaluation_tree.h" #include "mongo/db/query/sbe_stage_builder.h" namespace mongo::input_params { @@ -44,17 +47,14 @@ namespace mongo::input_params { * The caller should pass true for 'bindingCachedPlan' if we are binding-in new parameter values for * a plan that was recovered from the SBE plan cache. */ -void bind(const CanonicalQuery&, - const stage_builder::InputParamToSlotMap&, - sbe::RuntimeEnvironment*, - bool bindingCachedPlan); +void bind(const CanonicalQuery&, stage_builder::PlanStageData&, bool bindingCachedPlan); /** * Binds index bounds evaluated from IETs to index bounds slots for the given query. * * - 'cq' is the query * - 'indexBoundsInfo' contains the IETs and the slots - * - runtimeEnvironment SBE runtime environment + * - 'runtimeEnvironment' is the SBE runtime environment * - 'indexBoundsEvaluationCache' is the evaluation cache used by the explode nodes to keep the * common IET evaluation results. */ @@ -63,4 +63,19 @@ void bindIndexBounds( const stage_builder::IndexBoundsEvaluationInfo& indexBoundsInfo, sbe::RuntimeEnvironment* runtimeEnvironment, interval_evaluation_tree::IndexBoundsEvaluationCache* indexBoundsEvaluationCache = nullptr); + +/** + * If the execution tree ('root'), which was cloned from the SBE plan cache, contains an SBE + * clustered collection scan stage, this method is called to bind the current query ('cq')'s scan + * bounds into its minRecord and maxRecord slots. + * + * - 'cq' is the query + * - 'root' is the root node of the SBE execution plan from the plan cache + * - 'data' contains cached info to be substituted into the plan + * - 'runtimeEnvironment' is the SBE runtime environment + */ +void bindClusteredCollectionBounds(const CanonicalQuery& cq, + const sbe::PlanStage* root, + const stage_builder::PlanStageData* data, + sbe::RuntimeEnvironment* runtimeEnvironment); } // namespace mongo::input_params diff --git a/src/mongo/db/query/boolean_simplification/SConscript b/src/mongo/db/query/boolean_simplification/SConscript new file mode 100644 index 0000000000000..dbfc50f762e65 --- /dev/null +++ b/src/mongo/db/query/boolean_simplification/SConscript @@ -0,0 +1,38 @@ +# -*- mode: python -*- + +Import("env") + +env.Library( + target="boolean_simplification", + source=[ + "bitset_algebra.cpp", + "petrick.cpp", + "quine_mccluskey.cpp", + ], + LIBDEPS=[ + "$BUILD_DIR/mongo/db/mongohasher", + ], +) + +env.CppUnitTest( + target="boolean_simplification_test", + source=[ + "bitset_algebra_test.cpp", + "petrick_test.cpp", + "quine_mccluskey_test.cpp", + ], + LIBDEPS=[ + "$BUILD_DIR/mongo/db/query_expressions", + "boolean_simplification", + ], +) + +env.Benchmark( + target="quine_mccluskey_bm", + source=[ + "quine_mccluskey_bm.cpp", + ], + LIBDEPS=[ + "boolean_simplification", + ], +) diff --git a/src/mongo/db/query/boolean_simplification/bitset_algebra.cpp b/src/mongo/db/query/boolean_simplification/bitset_algebra.cpp new file mode 100644 index 0000000000000..72fb0df5a334a --- /dev/null +++ b/src/mongo/db/query/boolean_simplification/bitset_algebra.cpp @@ -0,0 +1,152 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/query/boolean_simplification/bitset_algebra.h" + +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include + +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/stream_utils.h" + +namespace mongo::boolean_simplification { +Maxterm::Maxterm(size_t size) : _numberOfBits(size) {} + +Maxterm::Maxterm(std::initializer_list init) + : minterms(std::move(init)), _numberOfBits(0) { + tassert(7507918, "Maxterm cannot be initilized with empty list of minterms", !minterms.empty()); + for (auto& minterm : minterms) { + _numberOfBits = std::max(minterm.size(), _numberOfBits); + } + + for (auto& minterm : minterms) { + if (_numberOfBits > minterm.size()) { + minterm.resize(_numberOfBits); + } + } +} + +std::string Maxterm::toString() const { + std::ostringstream oss{}; + oss << *this; + return oss.str(); +} + +Maxterm& Maxterm::operator|=(const Minterm& rhs) { + minterms.emplace_back(rhs); + return *this; +} + +Maxterm Maxterm::operator~() const { + if (minterms.empty()) { + return {Minterm{numberOfBits()}}; + } + + Maxterm result = ~minterms.front(); + for (size_t i = 1; i < minterms.size(); ++i) { + result &= ~minterms[i]; + } + + return result; +} + +void Maxterm::removeRedundancies() { + stdx::unordered_set seen{}; + std::vector newMinterms{}; + for (const auto& minterm : minterms) { + const bool isAlwaysTrue = minterm.mask.none(); + if (isAlwaysTrue) { + newMinterms.clear(); + newMinterms.emplace_back(minterm); + break; + } + auto [it, isInserted] = seen.insert(minterm); + if (isInserted) { + newMinterms.push_back(minterm); + } + } + + minterms.swap(newMinterms); +} + +void Maxterm::append(size_t bitIndex, bool val) { + minterms.emplace_back(_numberOfBits, bitIndex, val); +} + +void Maxterm::appendEmpty() { + minterms.emplace_back(_numberOfBits); +} + +Maxterm Minterm::operator~() const { + Maxterm result{size()}; + for (size_t i = 0; i < mask.size(); ++i) { + if (mask[i]) { + result |= Minterm(mask.size(), i, !predicates[i]); + } + } + return result; +} + +bool operator==(const Minterm& lhs, const Minterm& rhs) { + return lhs.predicates == rhs.predicates && lhs.mask == rhs.mask; +} + +std::ostream& operator<<(std::ostream& os, const Minterm& minterm) { + os << '(' << minterm.predicates << ", " << minterm.mask << ")"; + return os; +} + +Maxterm& Maxterm::operator|=(const Maxterm& rhs) { + for (auto& right : rhs.minterms) { + *this |= right; + } + return *this; +} + +Maxterm& Maxterm::operator&=(const Maxterm& rhs) { + Maxterm result = *this & rhs; + minterms.swap(result.minterms); + return *this; +} + +bool operator==(const Maxterm& lhs, const Maxterm& rhs) { + return lhs.minterms == rhs.minterms; +} + +std::ostream& operator<<(std::ostream& os, const Maxterm& maxterm) { + using mongo::operator<<; + return os << maxterm.minterms; +} +} // namespace mongo::boolean_simplification diff --git a/src/mongo/db/query/boolean_simplification/bitset_algebra.h b/src/mongo/db/query/boolean_simplification/bitset_algebra.h new file mode 100644 index 0000000000000..6ec29f555b022 --- /dev/null +++ b/src/mongo/db/query/boolean_simplification/bitset_algebra.h @@ -0,0 +1,205 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" + +namespace mongo::boolean_simplification { + +/** + * This file defines Maxterm and Minterm classes and operations over them. Maxterm/Minterms are used + * to represent a boolean expression in a canonical form. For example, for Disjunctive Normal Form, + * a Maxterm is used to represent the top disjunctive term and minterms are used to represent the + * children conjunctive terms. + */ + +using Bitset = boost::dynamic_bitset; + +inline Bitset operator""_b(const char* bits, size_t len) { + return Bitset{std::string{bits, len}}; +} + +struct Minterm; + +/** + * Maxterm represents top disjunction of an expression in Disjunctive Normal Form and consists of a + * list of children conjunctions. Each child conjunction is represented as a Minterm. + */ +struct Maxterm { + explicit Maxterm(size_t size); + Maxterm(std::initializer_list init); + + Maxterm& operator|=(const Minterm& rhs); + Maxterm& operator|=(const Maxterm& rhs); + Maxterm& operator&=(const Maxterm& rhs); + Maxterm operator~() const; + + /** + * Removes redundant minterms from the maxterm. A minterm might be redundant if it is a + * duplicate of another or the maxterm contains an empty minterm which means that it is always + * true and any other minters are not needed. + */ + void removeRedundancies(); + + /** + * Appends a new minterm with the bit at 'bitIndex' set to 'val' and all other bits unset. + */ + void append(size_t bitIndex, bool val); + + /** + * Appends empty minterm. + */ + void appendEmpty(); + + /** + * Returns the number of bits that each individual minterm in the maxterm contains. + */ + size_t numberOfBits() const { + return _numberOfBits; + } + + std::string toString() const; + + std::vector minterms; + +private: + size_t _numberOfBits; + + friend Maxterm operator&(const Maxterm& lhs, const Maxterm& rhs); +}; + +/** + * Minterms represent a conjunction of an expression in Disjunctive Normal Form and consists of + * predicates which can be in true (for a predicate A, true form is just A) of false forms (for a + * predicate A the false form is the negation of A: ~A). Every predicate is represented by a bit in + * the predicates bitset. + */ +struct Minterm { + explicit Minterm(size_t nbits) : predicates(nbits, 0), mask(nbits, 0) {} + Minterm(StringData bits, StringData mask) + : predicates{bits.toString()}, mask{mask.toString()} {} + Minterm(size_t nbits, size_t bitIndex, bool val) : predicates(nbits, 0), mask(nbits, 0) { + predicates.set(bitIndex, val); + mask.set(bitIndex, true); + } + Minterm(Bitset bitset, Bitset mask) : predicates(bitset), mask(mask) {} + + void set(size_t bitIndex, bool value) { + if (mask.size() <= bitIndex) { + constexpr size_t blockSize = sizeof(Bitset::block_type); + const size_t newSize = (1 + bitIndex / blockSize) * blockSize; + resize(newSize); + } + mask.set(bitIndex); + predicates.set(bitIndex, value); + } + + /** + * Returns the set of bits in which the conflicting bits of the minterms are set. The bits of + * two minterms are conflicting if in one minterm the bit is set to 1 and in another to 0. + */ + inline Bitset getConflicts(const Minterm& other) const { + return (predicates ^ other.predicates) & (mask & other.mask); + } + + Maxterm operator~() const; + + size_t size() const { + return mask.size(); + } + + void resize(size_t newSize) { + predicates.resize(newSize); + mask.resize(newSize); + } + + /** + * Predicates bitset, if a predicate takes part in the conjunction its corresponding bit in the + * predicates bitset set to 1 if the predicate in true form or to 0 otherwise. + */ + Bitset predicates; + + /** + * Predicates mask, if a predicate takes part in the conjunction its corresponding bit set to 1. + */ + Bitset mask; +}; + +inline Maxterm operator&(const Minterm& lhs, const Minterm& rhs) { + if (lhs.getConflicts(rhs).any()) { + return Maxterm{lhs.size()}; + } + return {{Minterm(lhs.predicates | rhs.predicates, lhs.mask | rhs.mask)}}; +} + +inline Maxterm operator&(const Maxterm& lhs, const Maxterm& rhs) { + Maxterm result{lhs.numberOfBits()}; + result.minterms.reserve(lhs.minterms.size() * rhs.minterms.size()); + for (const auto& left : lhs.minterms) { + for (const auto& right : rhs.minterms) { + result |= left & right; + } + } + return result; +} + +bool operator==(const Minterm& lhs, const Minterm& rhs); +std::ostream& operator<<(std::ostream& os, const Minterm& minterm); +bool operator==(const Maxterm& lhs, const Maxterm& rhs); +std::ostream& operator<<(std::ostream& os, const Maxterm& maxterm); + +} // namespace mongo::boolean_simplification + +namespace std { +template <> +struct hash { + using argument_type = mongo::boolean_simplification::Minterm; + using result_type = size_t; + + result_type operator()(const argument_type& mt) const { + // Just some random value for the seed. + result_type seed{3037}; + boost::hash_combine(seed, mt.predicates); + boost::hash_combine(seed, mt.mask); + return seed; + } +}; +} // namespace std diff --git a/src/mongo/db/query/boolean_simplification/bitset_algebra_test.cpp b/src/mongo/db/query/boolean_simplification/bitset_algebra_test.cpp new file mode 100644 index 0000000000000..d870f26d8e8ea --- /dev/null +++ b/src/mongo/db/query/boolean_simplification/bitset_algebra_test.cpp @@ -0,0 +1,230 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/query/boolean_simplification/bitset_algebra.h" + +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" + +namespace mongo::boolean_simplification { +constexpr size_t nbits = 64; + +TEST(MintermOperationsTest, AAndB) { + Minterm a{"01", "01"}; + Minterm b{"10", "10"}; + Maxterm expectedResult{{"11", "11"}}; + + auto result = a & b; + ASSERT_EQ(expectedResult, result); +} + +TEST(MintermOperationsTest, AAndNotB) { + Minterm a{"01", "01"}; + Minterm b{"00", "10"}; + Maxterm expectedResult{{"01", "11"}}; + + auto result = a & b; + ASSERT_EQ(expectedResult, result); +} + +TEST(MintermOperationsTest, AAndNotA) { + Minterm a{"1", "1"}; + Minterm na{"0", "1"}; + Maxterm expectedResult{a.size()}; + + auto result = a & na; + ASSERT_EQ(expectedResult, result); +} + +TEST(MintermOperationsTest, AAndA) { + Minterm a1{"1", "1"}; + Minterm a2{"1", "1"}; + Maxterm expectedResult{{"1", "1"}}; + + auto result = a1 & a2; + ASSERT_EQ(expectedResult, result); +} + +TEST(MintermOperationsTest, ACDAndB) { + Minterm acd{"1101", "1101"}; + Minterm b{"0010", "0010"}; + Maxterm expectedResult{{"1111", "1111"}}; + + auto result = acd & b; + ASSERT_EQ(expectedResult, result); +} + +TEST(MintermOperationsTest, ComplexExpr) { + Minterm acnbd{"1101", "1111"}; + Minterm b{"0010", "0010"}; + Maxterm expectedResult{b.size()}; + + auto result = acnbd & b; + ASSERT_EQ(expectedResult, result); +} + +TEST(MintermOperationsTest, Not) { + Minterm a{"00010001", "00110011"}; + Maxterm expectedResult({ + {"00000000", "00000001"}, + {"00000010", "00000010"}, + {"00000000", "00010000"}, + {"00100000", "00100000"}, + }); + + auto result = ~a; + ASSERT_EQ(expectedResult, result); +} + +TEST(MaxtermOperationsTest, ABOrC) { + Maxterm ab{{"011", "011"}}; + Maxterm c{{"100", "100"}}; + Maxterm expectedResult{ + {"011", "011"}, + {"100", "100"}, + }; + + ab |= c; + ASSERT_EQ(ab, expectedResult); +} + +TEST(MaxtermOperationsTest, ABOrA) { + Maxterm ab{{"11", "11"}}; + Maxterm a{{"01", "01"}}; + Maxterm expectedResult{ + {"11", "11"}, + {"01", "01"}, + }; + + ab |= a; + ASSERT_EQ(ab, expectedResult); +} + +// (AB | A ) |= (~AC | BD) +TEST(MaxtermOperationsTest, ComplexOr) { + Maxterm abOrA{ + {"0011", "0011"}, + {"0001", "0001"}, + }; + Maxterm nacOrBd{ + {"0100", "0101"}, + {"1010", "1010"}, + }; + Maxterm expectedResult{ + {"0011", "0011"}, // A & B + {"0001", "0001"}, // A + {"0100", "0101"}, // ~A & C + {"1010", "1010"}, // B & D + }; + + abOrA |= nacOrBd; + ASSERT_EQ(abOrA, expectedResult); +} + +// (A | B) & C +TEST(MaxtermOperationsTest, ComplexAnd) { + Maxterm aOrB{ + {"001", "001"}, + {"010", "010"}, + }; + + Maxterm c{ + {"100", "100"}, + }; + + Maxterm expectedResult{ + {"101", "101"}, + {"110", "110"}, + }; + + auto result = aOrB & c; + ASSERT_EQ(expectedResult, result); +} + +// "(A | B) &= C" +TEST(MaxtermOperationsTest, ComplexUsingAndAssignmentOperator) { + Maxterm aOrB{ + {"001", "001"}, + {"010", "010"}, + }; + + Maxterm c{ + {"100", "100"}, + }; + + Maxterm expectedResult{ + {"101", "101"}, + {"110", "110"}, + }; + + aOrB &= c; + ASSERT_EQ(expectedResult, aOrB); +} + +// (A | B) & (C | ~D) +TEST(MaxtermOperationsTest, ComplexAnd2) { + Maxterm aOrB{ + {"0001", "0001"}, + {"0010", "0010"}, + }; + + Maxterm cOrNd{ + {"0100", "0100"}, + {"0000", "1000"}, + }; + + Maxterm expectedResult{ + {"0101", "0101"}, // A & C + {"0001", "1001"}, // A & ~D + {"0110", "0110"}, // B & C + {"0010", "1010"}, // B & ~D + }; + + auto result = aOrB & cOrNd; + ASSERT_EQ(expectedResult, result); +} + +// not (BC | A~D) +TEST(MaxtermOperationsTest, ComplexNot) { + Maxterm bcOrAnd{ + {"0110", "0110"}, + {"0001", "1001"}, + }; + + Maxterm expectedResult{ + {"0000", "0011"}, // ~A & ~B + {"1000", "1010"}, // ~B & D + {"0000", "0101"}, // ~A & ~C + {"1000", "1100"}, // ~C & D + }; + + auto result = ~bcOrAnd; + ASSERT_EQ(expectedResult, result); +} +} // namespace mongo::boolean_simplification diff --git a/src/mongo/db/query/boolean_simplification/petrick.cpp b/src/mongo/db/query/boolean_simplification/petrick.cpp new file mode 100644 index 0000000000000..26d995d0de14a --- /dev/null +++ b/src/mongo/db/query/boolean_simplification/petrick.cpp @@ -0,0 +1,219 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/query/boolean_simplification/petrick.h" + +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include + +namespace mongo::boolean_simplification { +namespace { + +class PrimeImplicant { +public: + PrimeImplicant() {} + + PrimeImplicant(size_t numberOfBits, size_t implicantIndex) : _implicant(numberOfBits) { + _implicant.set(implicantIndex); + } + + explicit PrimeImplicant(boost::dynamic_bitset bitset) : _implicant(std::move(bitset)) {} + + /** + * Returns true if 'this' is a non-strict subset of 'other'. + */ + bool isSubset(const PrimeImplicant& other) const { + return (_implicant & other._implicant) == _implicant; + } + + std::vector getListOfSetBits() const { + std::vector result{}; + for (uint32_t i = _implicant.find_first(); i < _implicant.size(); + i = _implicant.find_next(i)) { + result.emplace_back(i); + } + return result; + } + + size_t numberOfSetBits() const { + return _implicant.count(); + } + + friend PrimeImplicant operator|(const PrimeImplicant& lhs, const PrimeImplicant& rhs); + +private: + boost::dynamic_bitset _implicant; +}; + +PrimeImplicant operator|(const PrimeImplicant& lhs, const PrimeImplicant& rhs) { + return PrimeImplicant(lhs._implicant | rhs._implicant); +} + +/** + * Sum (union) of prime implicants. + */ +class ImplicantSum { +public: + void appendNewImplicant(size_t numberOfBits, size_t implicantIndex) { + _implicants.emplace_back(numberOfBits, implicantIndex); + } + + /** + * Inserts the 'implicant'. Uses the absorption law to minimize the number of + * implicants. Three outcomes are possible: 1. The implicant is inserted. 2. The implicant is + * inserted and some existing implicants are removed due to being absorbed by the new one. 3. + * The implicant is not inserted because it was absorbed by one of the existing implicants. + */ + void insert(PrimeImplicant implicant) { + size_t size = _implicants.size(); + size_t pos = 0; + const int32_t implicantNumberOfSetBits = static_cast(implicant.numberOfSetBits()); + + while (pos < size) { + auto& current = _implicants[pos]; + const int32_t diffInNumberOfSetBits = + static_cast(current.numberOfSetBits()) - implicantNumberOfSetBits; + // Here we apply the absorption law: X + XY = X. + if (diffInNumberOfSetBits <= 0 && current.isSubset(implicant)) { + // Current is a non-strict subset of the new implicant, we don't need to add + // implicant. + return; + } else if (diffInNumberOfSetBits > 0 && implicant.isSubset(current)) { + // New implicant is a subset of the current, it means we remove the current, by + // swapping the current element with the last element. The last elements will be + // deleted in the end of the function by calling resize(). + --size; + std::swap(current, _implicants[size]); + --pos; + } + ++pos; + } + + // Erase removed elements and allocate memory for the new one if required. + _implicants.resize(size + 1); + // Insert new implicant. + _implicants[_implicants.size() - 1] = std::move(implicant); + } + + /** + * Finds the product of two implicant sums using De Morgan's laws. + */ + ImplicantSum product(const ImplicantSum& other) const { + // E.g., one implicant sum covers minterms with indices 0 and 1, and another with 0 and 2. + // (I0 + I1) * (I0 + I2) = I0 + I0*I2 + I0*I1 + I1*I2. + ImplicantSum result{}; + for (const auto& l : _implicants) { + for (const auto& r : other._implicants) { + auto implicant = l | r; + // Trying to add to the result every applicant we got here. In the example above it + // would be I0, I0*I2, I0*I1, I1*I2. 'insertImplicant()' applies the absorption + // law (X + XY = X) to minimize number of implicants. In the example only I0 + // and I1*I2 would be added, because I0 "absorbs" I0*I2 and I0*I1. + result.insert(std::move(implicant)); + } + } + + return result; + } + + void swap(ImplicantSum& other) { + _implicants.swap(other._implicants); + } + + /** + * Expands a bitset representation of each prime implicant into a vector of minterm indexes and + * returns the resulting vector. + */ + std::vector> getCoverages() const { + std::vector> result{}; + result.reserve(_implicants.size()); + + for (const auto& implicant : _implicants) { + result.emplace_back(implicant.getListOfSetBits()); + } + + return result; + } + +private: + std::vector _implicants; +}; + +/** + * The Petrick's method implementation using tabular approach. + */ +class TabularPetrick { +public: + explicit TabularPetrick(const std::vector>& data) + : _numberOfBits(data.size()) { + for (size_t implicantIndex = 0; implicantIndex < data.size(); ++implicantIndex) { + for (auto mintermIndex : data[implicantIndex]) { + insert(mintermIndex, implicantIndex); + } + } + } + + std::vector> getMinimalCoverages() { + while (_table.size() > 1) { + const size_t size = _table.size(); + auto productResult = _table[size - 1].product(_table[size - 2]); + _table.pop_back(); + _table[_table.size() - 1].swap(productResult); + } + + return _table.front().getCoverages(); + } + +private: + void insert(size_t mintermIndex, size_t implicantIndex) { + if (_table.size() <= mintermIndex) { + _table.resize(mintermIndex + 1); + } + + _table[mintermIndex].appendNewImplicant(_numberOfBits, implicantIndex); + }; + + const size_t _numberOfBits; + std::vector _table; +}; +} // namespace + +std::vector petricksMethod( + const std::vector& data) { + if (data.empty()) { + return {}; + } + TabularPetrick table{data}; + return table.getMinimalCoverages(); +} +} // namespace mongo::boolean_simplification diff --git a/src/mongo/db/query/boolean_simplification/petrick.h b/src/mongo/db/query/boolean_simplification/petrick.h new file mode 100644 index 0000000000000..7de158d63e3fc --- /dev/null +++ b/src/mongo/db/query/boolean_simplification/petrick.h @@ -0,0 +1,65 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +namespace mongo::boolean_simplification { +/** + * The list of original minterms covered by a derived minterm (a.k.a. prime implicant). + */ +using CoveredOriginalMinterms = std::vector; + +/** + * Represents a list of prime implicants, identified by their indices. + */ +using PrimeImplicantIndices = std::vector; + + +/** + * An implementation of Petrick's Method: https://en.wikipedia.org/wiki/Petrick%27s_method. This is + * an algorithm for finding a minimum sum-of-products expression given as input a list of prime + * implicants found by the Quine-McCluskey algorithm. The outer list has an element for each minterm + * found by the Quine-McCluskey algorithm. The inner list describes which of the original minterms + * are covered by that prime implicant minterm, referring to the indexes of the original minterms. + * Returns a list of a minimal sets of the indices of the original minterms that covers all of the + * original minterms. The caller is expected to choose a coverage which has the fewest number of + * minterms, and if there is still a tie to choose the coverage with the fewest number of literals. + * For example, consider the following input [[1, 2, 3], [3, 4], [0, 4, 5]]. We can see that the + * original maxterm given to the Quine-McCluskey algorithm has 6 minterms, indexed from 0 to 5. The + * Quine-McCluskey algorithm managed to simplify the original maxterm to the new one with just 3 + * minterms, indexed from 0 to 2. For this example, we expect the output to be [[0, 2]], because + * only 2 prime implicants with indices 0 and 2 are enough to cover all 6 original minterms. It is + * possible that we can get more than one coverage as output. For the given input: [[0, 1, 2], [2, + * 3], [0, 3]] two coverages are possible: [[0, 1], [0, 2]]. + */ +std::vector petricksMethod(const std::vector& data); +} // namespace mongo::boolean_simplification diff --git a/src/mongo/db/query/boolean_simplification/petrick_test.cpp b/src/mongo/db/query/boolean_simplification/petrick_test.cpp new file mode 100644 index 0000000000000..f8ef6e9802d9a --- /dev/null +++ b/src/mongo/db/query/boolean_simplification/petrick_test.cpp @@ -0,0 +1,134 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/query/boolean_simplification/petrick.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" + +namespace mongo::boolean_simplification { +/** + * The classic demonstration of how the Petric's method works. This example is available at the + * Wikipedia page at the moment + * (https://en.wikipedia.org/w/index.php?title=Petrick%27s_method&oldid=1142937196) and can be found + * in a number of publications as well. + */ +TEST(PetrictTest, ClaassicExample) { + std::vector> data{ + {0, 1}, + {0, 3}, + {1, 2}, + {3, 4}, + {2, 5}, + {4, 5}, + }; + + std::vector> expectedResult{ + {0, 3, 4}, + {1, 2, 3, 4}, + {1, 2, 5}, + {0, 1, 4, 5}, + {0, 2, 3, 5}, + }; + + const auto result = petricksMethod(data); + ASSERT_EQ(expectedResult, result); +} + +TEST(PetrictTest, OneCoverage) { + std::vector> data{ + {1, 2, 3}, + {3, 4}, + {0, 4, 5}, + }; + + std::vector> expectedResult{ + {0, 2}, + }; + + const auto result = petricksMethod(data); + ASSERT_EQ(expectedResult, result); +} + +TEST(PetrictTest, TwoCoverages) { + std::vector> data{ + {0, 1, 2}, + {2, 3}, + {0, 3}, + }; + + std::vector> expectedResult{ + {0, 1}, + {0, 2}, + }; + + const auto result = petricksMethod(data); + ASSERT_EQ(expectedResult, result); +} + +TEST(PetrictTest, NoSimplifications) { + std::vector> data{ + {0}, + {1}, + {2}, + {3}, + }; + + std::vector> expectedResult{ + {0, 1, 2, 3}, + }; + + const auto result = petricksMethod(data); + ASSERT_EQ(expectedResult, result); +} + +TEST(PetrictTest, OneMinterm) { + std::vector> data{ + {0}, + }; + + std::vector> expectedResult{ + {0}, + }; + + const auto result = petricksMethod(data); + ASSERT_EQ(expectedResult, result); +} + +TEST(PetrictTest, NoMinterms) { + std::vector> data{}; + + std::vector> expectedResult{}; + + const auto result = petricksMethod(data); + ASSERT_EQ(expectedResult, result); +} +} // namespace mongo::boolean_simplification diff --git a/src/mongo/db/query/boolean_simplification/quine_mccluskey.cpp b/src/mongo/db/query/boolean_simplification/quine_mccluskey.cpp new file mode 100644 index 0000000000000..2a303162a5d03 --- /dev/null +++ b/src/mongo/db/query/boolean_simplification/quine_mccluskey.cpp @@ -0,0 +1,224 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/query/boolean_simplification/quine_mccluskey.h" + +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include + +#include "mongo/stdx/unordered_set.h" + +namespace mongo::boolean_simplification { +namespace { + +struct MintermData { + MintermData(Minterm minterm, CoveredOriginalMinterms coveredMinterms) + : minterm(std::move(minterm)), + coveredMinterms(std::move(coveredMinterms)), + combined(false) {} + + Minterm minterm; + + // List of indices of original input minterms which are "covered" by the current derived + // minterm. The original minterm is covered by all minterms which are produced + // by combinations of the original minterm. + CoveredOriginalMinterms coveredMinterms; + + // Set to true for minterms which are combination of at least two other minterms. + bool combined; +}; + +/** + * A utility class that helps to organise minterms by the number of bits set. This is the main + * internal data structure of the Quine–McCluskey algorithm. It contains minterms organized by + * number of bits set to 1 in predicates list. The QMC algorithm can combine minterms which have the + * same mask and the number of bits in the predicates differ by 1. + */ +struct QmcTable { + explicit QmcTable(size_t maximumSize) { + table.reserve(maximumSize); + } + + QmcTable(std::vector minterms) { + size_t size = 0; + for (const auto& minterm : minterms) { + size = std::max(minterm.predicates.count(), size); + } + table.resize(size); + + for (uint32_t i = 0; i < static_cast(minterms.size()); ++i) { + insert(std::move(minterms[i]), std::vector{i}); + } + } + + void insert(Minterm minterm, std::vector coveredMinterms) { + const auto count = minterm.predicates.count(); + if (table.size() <= count) { + table.resize(count + 1); + } + table[count].emplace_back(std::move(minterm), std::move(coveredMinterms)); + } + + bool empty() const { + return table.empty(); + } + + size_t size() const { + return table.size(); + } + + // List of minterms origanized by number of true predicates. + std::vector> table; +}; + +/** + * Main step of the Quine-McCluskey method. It combines minterms that differ by only one bit and + * builds a new QMC table to be used for the next iteration. + */ +QmcTable combine(QmcTable& qmc) { + QmcTable result{qmc.size()}; + + for (size_t i = 0; i < qmc.table.size() - 1; ++i) { + // QmcTable organizes minterms by number of true predicates in them. Therefore, here we + // always try to combine minterms where the number of true predicates differ by 1. + for (auto& lhs : qmc.table[i]) { + for (auto& rhs : qmc.table[i + 1]) { + // We combine two minterms if and only if: + // 1. They have the same mask. + if (lhs.minterm.mask != rhs.minterm.mask) { + continue; + } + const auto differentBits = lhs.minterm.predicates ^ rhs.minterm.predicates; + // 2. The number of true predicates differs by 1. + if (differentBits.count() == 1) { + lhs.combined = true; + rhs.combined = true; + + CoveredOriginalMinterms coveredMinterms{}; + coveredMinterms.reserve(lhs.coveredMinterms.size() + + rhs.coveredMinterms.size()); + std::merge(begin(lhs.coveredMinterms), + end(lhs.coveredMinterms), + begin(rhs.coveredMinterms), + end(rhs.coveredMinterms), + back_inserter(coveredMinterms)); + // Main QMC step: Adding the new combined minterm which is a combination of two + // minterms which have the same masks and the number of set bits in the + // predicates differs by 1. Now we can use this minterm only instead of the two + // originals. It unsets the differing bit from the mask. + result.insert(Minterm{lhs.minterm.predicates & rhs.minterm.predicates, + lhs.minterm.mask & ~differentBits}, + std::move(coveredMinterms)); + } + } + } + } + return result; +} + +size_t getCoverageCost(const std::vector& coverage, const Maxterm& maxterm) { + size_t cost = coverage.size() * maxterm.numberOfBits(); + for (const auto& mintermIndex : coverage) { + cost += maxterm.minterms[mintermIndex].mask.count(); + } + return cost; +} + +/** + * Choose a coverage which has the fewest number of minterms, and if there is still a tie to + * choose the coverage with the fewest number of literals. + */ +const std::vector& findOptimalCoverage( + const std::vector>& coverages, const Maxterm& maxterm) { + return *std::min_element( + begin(coverages), end(coverages), [&maxterm](const auto& lhs, const auto& rhs) { + return getCoverageCost(lhs, maxterm) < getCoverageCost(rhs, maxterm); + }); +} +} // namespace + +std::pair> findPrimeImplicants(Maxterm maxterm) { + std::pair> result{Maxterm{maxterm.numberOfBits()}, + {}}; + QmcTable qmc{std::move(maxterm.minterms)}; + stdx::unordered_set seenMinterms{}; + + while (!qmc.empty()) { + auto combinedTable = combine(qmc); + + for (auto&& mintermDataRow : qmc.table) { + for (auto&& mintermData : mintermDataRow) { + Minterm minterm{std::move(mintermData.minterm)}; + // If the minterm was not combined during this step we need to preserve it in the + // result. + if (!mintermData.combined && seenMinterms.insert(minterm).second) { + result.first.minterms.emplace_back(std::move(minterm)); + result.second.emplace_back(std::move(mintermData.coveredMinterms)); + } + } + } + + std::swap(qmc, combinedTable); + } + + return result; +} + +Maxterm quineMcCluskey(Maxterm inputMaxterm) { + auto [maxterm, maxtermCoverage] = findPrimeImplicants(std::move(inputMaxterm)); + const auto& primeImplicantCoverages = petricksMethod(maxtermCoverage); + if (primeImplicantCoverages.size() < 2) { + return maxterm; + } + + const auto& minCoverage = findOptimalCoverage(primeImplicantCoverages, maxterm); + + // All minterms are included into the minumal coverage. + if (minCoverage.size() == maxterm.minterms.size()) { + return maxterm; + } + + std::vector selectedMinterms{}; + selectedMinterms.reserve(minCoverage.size()); + for (const auto& mintermIndex : minCoverage) { + selectedMinterms.emplace_back(std::move(maxterm.minterms[mintermIndex])); + } + + maxterm.minterms.swap(selectedMinterms); + return maxterm; +} + +} // namespace mongo::boolean_simplification diff --git a/src/mongo/db/query/boolean_simplification/quine_mccluskey.h b/src/mongo/db/query/boolean_simplification/quine_mccluskey.h new file mode 100644 index 0000000000000..c49ec44951b9b --- /dev/null +++ b/src/mongo/db/query/boolean_simplification/quine_mccluskey.h @@ -0,0 +1,72 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +#include "mongo/db/query/boolean_simplification/bitset_algebra.h" +#include "mongo/db/query/boolean_simplification/petrick.h" +#include "mongo/stdx/unordered_set.h" + +namespace mongo::boolean_simplification { +/** + * This is an implementation of the finding prime implicants step of the Quine–McCluskey algorithm. + * Quine-McCluskey is a method used for minimizing boolean expressions. The function takes a maxterm + * to simplify and returns the simplified maxterm (aka prime implicants) and a vector of indices of + * covered input minterms by the corresponding derived minterms. The vector of covered input + * minterms is supposed to be used in the next optimization step that calculates a minimal coverage. + * See 'petricksMethod()' for details. For example, consider the following input ~A~B~C~D | ~A~B~CD + * | ~AB~C~D | ~AB~CD | ~ABCD | A~BCD, which can be represented as the maxterm: [(0001, 1111), + * (0100, 1111), (0101, 1111), (0111, 1111), (1011, 1111)], where the first bitset in every pair + * represents predicates and the second one prepresents mask (see Maxtern and Minterm documentation + * for details). The prime implicants for the expression will be A~BCD, ~ABD, ~A~C or represented as + * minterms: [(1011, 1111), (0101, 1101), (0000, 1010)] and the input minterms coverage will be: + * [5], [3, 4], [0, 1, 2, 3], which means that the first prime implicant A~BCD covers only the input + * minterms with index 5, which has the same value A~BCD. And the second prime implicant ~ABD covers + * input minterms with indexes 3 and 4 which are ~AB~CD, ~ABCD. + * Indeed ~AB~CD | ~ABCD == ~ABD & (~C | C) == ~ABD. + */ +std::pair> findPrimeImplicants(Maxterm maxterm); + +/** + * The Quine–McCluskey algorithm is a method used for minimizing boolean expressions. It works by + * comparing pairs of minterms and merging them into derived minterms. This process continues until + * no further simplification is possible. The function takes a maxterm to simplify and returns the + * simplified maxterm. For example, consider the following input ~A~B~C~D | ~A~B~CD | ~AB~C~D | + * ~AB~CD |~ABCD | A~BCD, which can be represented as the maxterm: [(0001, 1111), (0100, 1111), + * (0101, 1111), (0111, 1111), (1011, 1111)], where the first bitset in every pair represents + * predicates and the second one prepresents mask (see Maxtern and Minterm documentation for + * details). This expression can be simplified to A~BCD | ~ABD | ~A~C, or [(1011, 1111), (0101, + * 1101), (0000, 1010)] in maxterm/minterm representation. + */ +Maxterm quineMcCluskey(Maxterm maxterm); + +} // namespace mongo::boolean_simplification diff --git a/src/mongo/db/query/boolean_simplification/quine_mccluskey_bm.cpp b/src/mongo/db/query/boolean_simplification/quine_mccluskey_bm.cpp new file mode 100644 index 0000000000000..d09845b31d933 --- /dev/null +++ b/src/mongo/db/query/boolean_simplification/quine_mccluskey_bm.cpp @@ -0,0 +1,189 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include + +#include "mongo/db/query/boolean_simplification/bitset_algebra.h" +#include "mongo/db/query/boolean_simplification/petrick.h" +#include "mongo/db/query/boolean_simplification/quine_mccluskey.h" + +namespace mongo::boolean_simplification { + +/** + * Benchmarks maxterm 'A' which simplifies to 'A'. + */ +void quineMcCluskey_1predicate(benchmark::State& state) { + Bitset mask{"1"_b}; + Maxterm maxterm{ + Minterm{"1"_b, mask}, + }; + + for (auto _ : state) { + benchmark::DoNotOptimize(findPrimeImplicants(maxterm)); + } +} + +/** + * Benchmarks maxterm 'AB | A~B' which simplifies to 'A'. + */ +void quineMcCluskey_2predicates(benchmark::State& state) { + Bitset mask{"11"_b}; + Maxterm maxterm{ + Minterm{"10"_b, mask}, + Minterm{"11"_b, mask}, + }; + + for (auto _ : state) { + benchmark::DoNotOptimize(findPrimeImplicants(maxterm)); + } +} + +/** + * Benchmarks maxterm '~A~B~C~D | ~A~B~CD | ~AB~C~D | ~AB~CD' which simplifies to '~A~C'. + */ +void quineMcCluskey_3predicates(benchmark::State& state) { + // "ABC | A~BC = AC" + Bitset mask{"111"_b}; + Maxterm maxterm{ + Minterm{"111"_b, mask}, + Minterm{"101"_b, mask}, + }; + + for (auto _ : state) { + benchmark::DoNotOptimize(findPrimeImplicants(maxterm)); + } +} + +/** + * Benchmarks maxterm '~A~B~C | ~AB~C | A~B~C | ~ABC | A~BC | ABC' which simplifies to '~A~C | ~B~C + * | ~AB | A~B | BC | AC'. + */ +void quineMcCluskey_3predicates_complex(benchmark::State& state) { + Bitset mask{"111"_b}; + Maxterm maxterm{ + Minterm{"000"_b, mask}, + Minterm{"010"_b, mask}, + Minterm{"100"_b, mask}, + Minterm{"011"_b, mask}, + Minterm{"101"_b, mask}, + Minterm{"111"_b, mask}, + }; + + for (auto _ : state) { + benchmark::DoNotOptimize(findPrimeImplicants(maxterm)); + } +} + +/** + * Benchmarks maxterm '~A~B~C~D | ~A~B~CD | ~AB~C~D | ~AB~CD' which simplifies to '~A~C'. + */ +void quineMcCluskey_4predicates_complex(benchmark::State& state) { + Bitset mask{"1111"_b}; + Maxterm maxterm{ + Minterm{"0000"_b, mask}, + Minterm{"0001"_b, mask}, + Minterm{"0100"_b, mask}, + Minterm{"0101"_b, mask}, + }; + + for (auto _ : state) { + benchmark::DoNotOptimize(findPrimeImplicants(maxterm)); + } +} + +/** + * Benchmarks the case of N minterms of N predicates, every minterm has exactly 1 true + * predicate so no simplifications is possible. + */ +void quineMcCluskey_noSimplifications(benchmark::State& state) { + const auto numPredicates = static_cast(state.range()); + Maxterm maxterm{numPredicates}; + for (size_t predicateIndex = 0; predicateIndex < numPredicates; ++predicateIndex) { + maxterm.append(predicateIndex, true); + } + for (auto _ : state) { + benchmark::DoNotOptimize(findPrimeImplicants(maxterm)); + } +} + +/** + * Benchmarks the case of N minterms of N predicates, every minterm is in form of 'AB' or 'A~B', so + * the pair of such minterm can be simplified to just one minterm 'A'. + */ +void quineMcCluskey_someSimplifications(benchmark::State& state) { + const auto numPredicates = static_cast(state.range()); + Maxterm maxterm{numPredicates}; + for (size_t predicateIndex = 0; predicateIndex < numPredicates - 1; predicateIndex += 2) { + maxterm.append(predicateIndex, true); + maxterm.minterms.back().set(predicateIndex + 1, true); + + maxterm.append(predicateIndex, true); + maxterm.minterms.back().set(predicateIndex + 1, false); + } + for (auto _ : state) { + benchmark::DoNotOptimize(findPrimeImplicants(maxterm)); + } +} + +/** + * Benchmarks a degenerate test case in which all minterms have the same number of true predicates + * which equals to #predicates / 10. + */ +void quineMcCluskey_degenerate(benchmark::State& state) { + const auto numPredicates = static_cast(state.range()); + const size_t step = numPredicates / 10; + Maxterm maxterm{numPredicates}; + for (size_t predicateIndex = 0; predicateIndex < numPredicates - step + 1; + predicateIndex += step) { + maxterm.append(predicateIndex, true); + for (size_t i = 1; i < step; ++i) { + maxterm.minterms.back().set(predicateIndex + i, true); + } + } + + for (auto _ : state) { + benchmark::DoNotOptimize(findPrimeImplicants(maxterm)); + } +} + +BENCHMARK(quineMcCluskey_1predicate); +BENCHMARK(quineMcCluskey_2predicates); +BENCHMARK(quineMcCluskey_3predicates); +BENCHMARK(quineMcCluskey_3predicates_complex); +BENCHMARK(quineMcCluskey_4predicates_complex); +BENCHMARK(quineMcCluskey_noSimplifications)->DenseRange(5, 50, 5); +BENCHMARK(quineMcCluskey_noSimplifications)->DenseRange(100, 1000, 100); +BENCHMARK(quineMcCluskey_someSimplifications)->DenseRange(5, 50, 5); +BENCHMARK(quineMcCluskey_someSimplifications)->DenseRange(100, 1000, 100); +BENCHMARK(quineMcCluskey_degenerate)->DenseRange(1000, 10000, 1000); + +} // namespace mongo::boolean_simplification diff --git a/src/mongo/db/query/boolean_simplification/quine_mccluskey_test.cpp b/src/mongo/db/query/boolean_simplification/quine_mccluskey_test.cpp new file mode 100644 index 0000000000000..060d232d09fc8 --- /dev/null +++ b/src/mongo/db/query/boolean_simplification/quine_mccluskey_test.cpp @@ -0,0 +1,225 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/query/boolean_simplification/quine_mccluskey.h" + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" + +namespace mongo::boolean_simplification { +TEST(FindPrimeImplicantsTest, Test1) { + // AB | ~AB = B + Bitset mask{"11"_b}; + Maxterm maxterm{{ + Minterm{"10"_b, mask}, + Minterm{"11"_b, mask}, + }}; + Maxterm expectedMaxterm{Minterm{"10", "10"}}; + std::vector> expectedCoveredMinterms{{0, 1}}; + + auto [actualMaxterm, actualCoveredMinterms] = findPrimeImplicants(maxterm); + ASSERT_EQ(expectedMaxterm, actualMaxterm); + ASSERT_EQ(expectedCoveredMinterms, actualCoveredMinterms); +} + +TEST(FindPrimeImplicantsTest, Test2) { + // "ABC | A~BC = AC" + Bitset mask{"111"_b}; + Maxterm maxterm{ + Minterm{"111"_b, mask}, + Minterm{"101"_b, mask}, + }; + Maxterm expectedMaxterm{Minterm{"101", "101"}}; + std::vector> expectedCoveredMinterms{{0, 1}}; + + auto [actualMaxterm, actualCoveredMinterms] = findPrimeImplicants(maxterm); + ASSERT_EQ(expectedMaxterm, actualMaxterm); + ASSERT_EQ(expectedCoveredMinterms, actualCoveredMinterms); +} + +TEST(FindPrimeImplicantsTest, Test3) { + // ABC | A~BC | AB~C = AC | AB + Bitset mask{"111"_b}; + Maxterm maxterm{ + Minterm{"111"_b, mask}, + Minterm{"101"_b, mask}, + Minterm{"110"_b, mask}, + }; + + Maxterm expectedMaxterm{ + Minterm{"101", "101"}, + Minterm{"110", "110"}, + }; + + std::vector> expectedCoveredMinterms{ + {0, 1}, + {0, 2}, + }; + + auto [actualMaxterm, actualCoveredMinterms] = findPrimeImplicants(maxterm); + ASSERT_EQ(expectedMaxterm, actualMaxterm); + ASSERT_EQ(expectedCoveredMinterms, actualCoveredMinterms); +} + +TEST(FindPrimeImplicantsTest, Test4) { + // ~A~B~C~D | ~A~B~CD | ~AB~C~D | ~AB~CD = ~A~C + Bitset mask{"1111"_b}; + Maxterm maxterm{ + Minterm{"0000"_b, mask}, + Minterm{"0001"_b, mask}, + Minterm{"0100"_b, mask}, + Minterm{"0101"_b, mask}, + }; + + Maxterm expectedMaxterm{ + Minterm{"0000", "1010"}, + }; + + std::vector> expectedCoveredMinterms{ + {0, 1, 2, 3}, + }; + + auto [actualMaxterm, actualCoveredMinterms] = findPrimeImplicants(maxterm); + ASSERT_EQ(expectedMaxterm, actualMaxterm); + ASSERT_EQ(expectedCoveredMinterms, actualCoveredMinterms); +} + +TEST(FindPrimeImplicantsTest, Test5) { + // ~A~B~C~D | ~A~B~CD | ~AB~C~D | ~AB~CD |~ABCD | A~BCD = A~BCD | ~ABD | ~A~C + Bitset mask{"1111"_b}; + Maxterm maxterm{{"0000"_b, mask}, + Minterm{"0001"_b, mask}, + Minterm{"0100"_b, mask}, + Minterm{"0101"_b, mask}, + Minterm{"0111"_b, mask}, + Minterm{"1011"_b, mask}}; + + Maxterm expectedMaxterm{ + Minterm{"1011", "1111"}, + Minterm{"0101", "1101"}, + Minterm{"0000", "1010"}, + }; + + std::vector> expectedCoveredMinterms{ + {5}, + {3, 4}, + {0, 1, 2, 3}, + }; + + auto [actualMaxterm, actualCoveredMinterms] = findPrimeImplicants(maxterm); + ASSERT_EQ(expectedMaxterm, actualMaxterm); + ASSERT_EQ(expectedCoveredMinterms, actualCoveredMinterms); +} + +TEST(FindPrimeImplicantsTest, Test6) { + // ~A~B~C | ~AB~C | A~B~C | ~ABC | A~BC | ABC = ~A~C | ~B~C | ~AB | A~B | BC | AC + Bitset mask{"111"_b}; + Maxterm maxterm{ + Minterm{"000"_b, mask}, + Minterm{"010"_b, mask}, + Minterm{"100"_b, mask}, + Minterm{"011"_b, mask}, + Minterm{"101"_b, mask}, + Minterm{"111"_b, mask}, + }; + + Maxterm expectedMaxterm{ + Minterm{"000", "101"}, + Minterm{"000", "011"}, + Minterm{"010", "110"}, + Minterm{"100", "110"}, + Minterm{"011", "011"}, + Minterm{"101", "101"}, + }; + + std::vector> expectedCoveredMinterms{ + {0, 1}, + {0, 2}, + {1, 3}, + {2, 4}, + {3, 5}, + {4, 5}, + }; + + auto [actualMaxterm, actualCoveredMinterms] = findPrimeImplicants(maxterm); + ASSERT_EQ(expectedMaxterm, actualMaxterm); + ASSERT_EQ(expectedCoveredMinterms, actualCoveredMinterms); +} + +TEST(QuineMcCluskeyTest, Test5) { + // ~A~B~C~D | ~A~B~CD | ~AB~C~D | ~AB~CD |~ABCD | A~BCD = A~BCD | ~ABD | ~A~C + Bitset mask{"1111"_b}; + Maxterm maxterm{{"0000"_b, mask}, + Minterm{"0001"_b, mask}, + Minterm{"0100"_b, mask}, + Minterm{"0101"_b, mask}, + Minterm{"0111"_b, mask}, + Minterm{"1011"_b, mask}}; + + Maxterm expectedMaxterm{ + Minterm{"1011", "1111"}, + Minterm{"0101", "1101"}, + Minterm{"0000", "1010"}, + }; + + auto actualMaxterm = quineMcCluskey(maxterm); + ASSERT_EQ(expectedMaxterm, actualMaxterm); +} + +/** + * This test simplifies the same expression as FindPrimeImplicantsTest::Test6 but because it employs + * Petricks's method for further optimization the resulting expression is much smaller. + */ +TEST(QuineMcCluskeyTest, Test6) { + // ~A~B~C | ~AB~C | A~B~C | ~ABC | A~BC | ABC = ~A~C | A~B | BC + Bitset mask{"111"_b}; + Maxterm maxterm{ + Minterm{"000"_b, mask}, + Minterm{"010"_b, mask}, + Minterm{"100"_b, mask}, + Minterm{"011"_b, mask}, + Minterm{"101"_b, mask}, + Minterm{"111"_b, mask}, + }; + + Maxterm expectedMaxterm{ + Minterm{"000", "101"}, + Minterm{"100", "110"}, + Minterm{"011", "011"}, + }; + + auto actualMaxterm = quineMcCluskey(maxterm); + // This test asserts on one possible output: ~A~C | A~B | BC, another possible output is ~A~C | + // ~AB | AC. See the coverage output in FindPrimeImplicantsTest::Test6, the last uncovered + // minterm #5 can be covered by BC or AC. It just happens that we select the first optimal + // coverage, if we change quineMcCluskey the second one can be picked up. + ASSERT_EQ(expectedMaxterm, actualMaxterm); +} +} // namespace mongo::boolean_simplification diff --git a/src/mongo/db/query/canonical_query.cpp b/src/mongo/db/query/canonical_query.cpp index 59216530b83ef..dec75dfbd3da9 100644 --- a/src/mongo/db/query/canonical_query.cpp +++ b/src/mongo/db/query/canonical_query.cpp @@ -28,37 +28,42 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/canonical_query.h" - -#include "mongo/crypto/encryption_fields_gen.h" -#include "mongo/db/catalog/collection.h" -#include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/cst/cst_parser.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/matcher/expression_array.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/basic_types.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query_encoder.h" -#include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/indexability.h" +#include "mongo/db/query/parsed_find_command.h" #include "mongo/db/query/projection_parser.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner_common.h" +#include "mongo/db/server_parameter.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/str.h" +#include "mongo/util/synchronized_value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery namespace mongo { namespace { -bool parsingCanProduceNoopMatchNodes(const ExtensionsCallback& extensionsCallback, - MatchExpressionParser::AllowedFeatureSet allowedFeatures) { - return extensionsCallback.hasNoopExtensions() && - (allowedFeatures & MatchExpressionParser::AllowedFeatures::kText || - allowedFeatures & MatchExpressionParser::AllowedFeatures::kJavascript); -} - boost::optional loadMaxParameterCount() { auto value = internalQueryAutoParameterizationMaxParameterCount.load(); if (value > 0) { @@ -75,87 +80,57 @@ StatusWith> CanonicalQuery::canonicalize( OperationContext* opCtx, std::unique_ptr findCommand, bool explain, - const boost::intrusive_ptr& expCtx, + const boost::intrusive_ptr& givenExpCtx, const ExtensionsCallback& extensionsCallback, MatchExpressionParser::AllowedFeatureSet allowedFeatures, const ProjectionPolicies& projectionPolicies, std::vector> pipeline, bool isCountLike) { - auto status = query_request_helper::validateFindCommandRequest(*findCommand); - if (!status.isOK()) { - return status; - } - - std::unique_ptr collator; - if (!findCommand->getCollation().isEmpty()) { - auto statusWithCollator = CollatorFactoryInterface::get(opCtx->getServiceContext()) - ->makeFromBSON(findCommand->getCollation()); - if (!statusWithCollator.isOK()) { - return statusWithCollator.getStatus(); + if (givenExpCtx) { + // Caller provided an ExpressionContext, let's go ahead and use that. + auto swParsedFind = parsed_find_command::parse(givenExpCtx, + std::move(findCommand), + extensionsCallback, + allowedFeatures, + projectionPolicies); + if (!swParsedFind.isOK()) { + return swParsedFind.getStatus(); } - collator = std::move(statusWithCollator.getValue()); - } - - // Make MatchExpression. - boost::intrusive_ptr newExpCtx; - if (!expCtx.get()) { - invariant(findCommand->getNamespaceOrUUID().nss()); - newExpCtx = make_intrusive(opCtx, - std::move(collator), - *findCommand->getNamespaceOrUUID().nss(), - findCommand->getLegacyRuntimeConstants(), - findCommand->getLet()); + return canonicalize(std::move(givenExpCtx), + std::move(swParsedFind.getValue()), + explain, + std::move(pipeline), + isCountLike); } else { - newExpCtx = expCtx; - // A collator can enter through both the FindCommandRequest and ExpressionContext arguments. - // This invariant ensures that both collators are the same because downstream we - // pull the collator from only one of the ExpressionContext carrier. - if (collator.get() && expCtx->getCollator()) { - invariant(CollatorInterface::collatorsMatch(collator.get(), expCtx->getCollator())); + // No ExpressionContext provided, let's call the override that makes one for us. + auto swResults = parsed_find_command::parse( + opCtx, std::move(findCommand), extensionsCallback, allowedFeatures, projectionPolicies); + if (!swResults.isOK()) { + return swResults.getStatus(); } + auto&& [expCtx, parsedFind] = std::move(swResults.getValue()); + return canonicalize( + std::move(expCtx), std::move(parsedFind), explain, std::move(pipeline), isCountLike); } +} + +// static +StatusWith> CanonicalQuery::canonicalize( + boost::intrusive_ptr expCtx, + std::unique_ptr parsedFind, + bool explain, + std::vector> pipeline, + bool isCountLike) { // Make the CQ we'll hopefully return. - std::unique_ptr cq(new CanonicalQuery()); + auto cq = std::make_unique(); cq->setExplain(explain); - - StatusWithMatchExpression statusWithMatcher = [&]() -> StatusWithMatchExpression { - if (getTestCommandsEnabled() && internalQueryEnableCSTParser.load()) { - try { - return cst::parseToMatchExpression( - findCommand->getFilter(), newExpCtx, extensionsCallback); - } catch (const DBException& ex) { - return ex.toStatus(); - } - } else { - return MatchExpressionParser::parse( - findCommand->getFilter(), newExpCtx, extensionsCallback, allowedFeatures); - } - }(); - if (!statusWithMatcher.isOK()) { - return statusWithMatcher.getStatus(); - } - - // Stop counting expressions after they have been parsed to exclude expressions created - // during optimization and other processing steps. - newExpCtx->stopExpressionCounters(); - - std::unique_ptr me = std::move(statusWithMatcher.getValue()); - - Status initStatus = - cq->init(opCtx, - std::move(newExpCtx), - std::move(findCommand), - parsingCanProduceNoopMatchNodes(extensionsCallback, allowedFeatures), - std::move(me), - projectionPolicies, - std::move(pipeline), - isCountLike); - - if (!initStatus.isOK()) { + if (auto initStatus = + cq->init(std::move(expCtx), std::move(parsedFind), std::move(pipeline), isCountLike); + !initStatus.isOK()) { return initStatus; } - return std::move(cq); + return {std::move(cq)}; } // static @@ -166,53 +141,63 @@ StatusWith> CanonicalQuery::canonicalize( findCommand->setProjection(baseQuery.getFindCommandRequest().getProjection().getOwned()); findCommand->setSort(baseQuery.getFindCommandRequest().getSort().getOwned()); findCommand->setCollation(baseQuery.getFindCommandRequest().getCollation().getOwned()); - auto status = query_request_helper::validateFindCommandRequest(*findCommand); - if (!status.isOK()) { - return status; - } // Make the CQ we'll hopefully return. - std::unique_ptr cq(new CanonicalQuery()); + auto cq = std::make_unique(); cq->setExplain(baseQuery.getExplain()); - Status initStatus = cq->init(opCtx, - baseQuery.getExpCtx(), - std::move(findCommand), - baseQuery.canHaveNoopMatchNodes(), - root->clone(), - ProjectionPolicies::findProjectionPolicies(), - {} /* an empty pipeline */, - baseQuery.isCountLike()); - - if (!initStatus.isOK()) { - return initStatus; - } - return std::move(cq); + auto swParsedFind = ParsedFindCommand::withExistingFilter( + baseQuery.getExpCtx(), + baseQuery.getCollator() ? baseQuery.getCollator()->clone() : nullptr, + root->clone(), + std::move(findCommand)); + if (!swParsedFind.isOK()) { + return swParsedFind.getStatus(); + } + auto initStatus = cq->init(baseQuery.getExpCtx(), + std::move(swParsedFind.getValue()), + {} /* an empty pipeline */, + baseQuery.isCountLike()); + invariant(initStatus.isOK()); + return {std::move(cq)}; } -Status CanonicalQuery::init(OperationContext* opCtx, - boost::intrusive_ptr expCtx, - std::unique_ptr findCommand, - bool canHaveNoopMatchNodes, - std::unique_ptr root, - const ProjectionPolicies& projectionPolicies, +Status CanonicalQuery::init(boost::intrusive_ptr expCtx, + std::unique_ptr parsedFind, std::vector> pipeline, bool isCountLike) { _expCtx = expCtx; - _findCommand = std::move(findCommand); + _findCommand = std::move(parsedFind->findCommandRequest); - _canHaveNoopMatchNodes = canHaveNoopMatchNodes; _forceClassicEngine = ServerParameterSet::getNodeParameterSet() ->get("internalQueryFrameworkControl") ->_data.get() == QueryFrameworkControlEnum::kForceClassicEngine; + _root = MatchExpression::normalize(std::move(parsedFind->filter)); + if (parsedFind->proj) { + if (parsedFind->proj->requiresMatchDetails()) { + // Sadly, in some cases the match details cannot be generated from the unoptimized + // MatchExpression. For example, a rooted-$or of equalities won't work to produce the + // details, but if you optimize that query to an $in, it will work. If we were starting + // from scratch, we may disallow this. But it has already been released as working so we + // will keep it so, and here have to re-parse the projection using the new, normalized + // MatchExpression, before we save this projection for later execution. + _proj.emplace(projection_ast::parseAndAnalyze(expCtx, + _findCommand->getProjection(), + _root.get(), + _findCommand->getFilter(), + *parsedFind->savedProjectionPolicies, + true /* optimize */)); + } else { + _proj.emplace(std::move(*parsedFind->proj)); + _proj->optimize(); + } + } + if (parsedFind->sort) { + _sortPattern = std::move(parsedFind->sort); + } + _pipeline = std::move(pipeline); _isCountLike = isCountLike; - auto validStatus = isValid(root.get(), *_findCommand); - if (!validStatus.isOK()) { - return validStatus.getStatus(); - } - auto unavailableMetadata = validStatus.getValue(); - _root = MatchExpression::normalize(std::move(root)); // If caching is disabled, do not perform any autoparameterization. if (!internalQueryDisablePlanCache.load()) { @@ -232,82 +217,38 @@ Status CanonicalQuery::init(OperationContext* opCtx, } } // The tree must always be valid after normalization. - dassert(isValid(_root.get(), *_findCommand).isOK()); + dassert(parsed_find_command::isValid(_root.get(), *_findCommand).isOK()); if (auto status = isValidNormalized(_root.get()); !status.isOK()) { return status; } - // Validate the projection if there is one. - if (!_findCommand->getProjection().isEmpty()) { - try { - _proj.emplace(projection_ast::parseAndAnalyze(expCtx, - _findCommand->getProjection(), - _root.get(), - _findCommand->getFilter(), - projectionPolicies, - true /* Should optimize? */)); + if (_proj) { + _metadataDeps = _proj->metadataDeps(); - // Fail if any of the projection's dependencies are unavailable. - DepsTracker{unavailableMetadata}.requestMetadata(_proj->metadataDeps()); - } catch (const DBException& e) { - return e.toStatus(); + if (_proj->metadataDeps()[DocumentMetadataFields::kSortKey] && + _findCommand->getSort().isEmpty()) { + return {ErrorCodes::BadValue, "cannot use sortKey $meta projection without a sort"}; } - - _metadataDeps = _proj->metadataDeps(); } - _pipeline = std::move(pipeline); - - if (_proj && _proj->metadataDeps()[DocumentMetadataFields::kSortKey] && - _findCommand->getSort().isEmpty()) { - return Status(ErrorCodes::BadValue, "cannot use sortKey $meta projection without a sort"); - } + if (_sortPattern) { + // Be sure to track and add any metadata dependencies from the sort (e.g. text score). + _metadataDeps |= _sortPattern->metadataDeps(parsedFind->unavailableMetadata); - // If there is a sort, parse it and add any metadata dependencies it induces. - try { - initSortPattern(unavailableMetadata); - } catch (const DBException& ex) { - return ex.toStatus(); + // If the results of this query might have to be merged on a remote node, then that node + // might need the sort key metadata. Request that the plan generates this metadata. + if (_expCtx->needsMerge) { + _metadataDeps.set(DocumentMetadataFields::kSortKey); + } } // If the 'returnKey' option is set, then the plan should produce index key metadata. if (_findCommand->getReturnKey()) { _metadataDeps.set(DocumentMetadataFields::kIndexKey); } - return Status::OK(); } -void CanonicalQuery::initSortPattern(QueryMetadataBitSet unavailableMetadata) { - if (_findCommand->getSort().isEmpty()) { - return; - } - - // A $natural sort is really a hint, and should be handled as such. Furthermore, the downstream - // sort handling code may not expect a $natural sort. - // - // We have already validated that if there is a $natural sort and a hint, that the hint - // also specifies $natural with the same direction. Therefore, it is safe to clear the $natural - // sort and rewrite it as a $natural hint. - if (_findCommand->getSort()[query_request_helper::kNaturalSortField]) { - _findCommand->setHint(_findCommand->getSort().getOwned()); - _findCommand->setSort(BSONObj{}); - } - - if (getTestCommandsEnabled() && internalQueryEnableCSTParser.load()) { - _sortPattern = cst::parseToSortPattern(_findCommand->getSort(), _expCtx); - } else { - _sortPattern = SortPattern{_findCommand->getSort(), _expCtx}; - } - _metadataDeps |= _sortPattern->metadataDeps(unavailableMetadata); - - // If the results of this query might have to be merged on a remote node, then that node might - // need the sort key metadata. Request that the plan generates this metadata. - if (_expCtx->needsMerge) { - _metadataDeps.set(DocumentMetadataFields::kSortKey); - } -} - void CanonicalQuery::setCollator(std::unique_ptr collator) { auto collatorRaw = collator.get(); // We must give the ExpressionContext the same collator. @@ -348,138 +289,9 @@ bool CanonicalQuery::isSimpleIdQuery(const BSONObj& query) { return hasID; } -size_t CanonicalQuery::countNodes(const MatchExpression* root, MatchExpression::MatchType type) { - size_t sum = 0; - if (type == root->matchType()) { - sum = 1; - } - for (size_t i = 0; i < root->numChildren(); ++i) { - sum += countNodes(root->getChild(i), type); - } - return sum; -} - -/** - * Does 'root' have a subtree of type 'subtreeType' with a node of type 'childType' inside? - */ -bool hasNodeInSubtree(const MatchExpression* root, - MatchExpression::MatchType childType, - MatchExpression::MatchType subtreeType) { - if (subtreeType == root->matchType()) { - return QueryPlannerCommon::hasNode(root, childType); - } - for (size_t i = 0; i < root->numChildren(); ++i) { - if (hasNodeInSubtree(root->getChild(i), childType, subtreeType)) { - return true; - } - } - return false; -} - -StatusWith CanonicalQuery::isValid(const MatchExpression* root, - const FindCommandRequest& findCommand) { - QueryMetadataBitSet unavailableMetadata{}; - - // There can only be one TEXT. If there is a TEXT, it cannot appear inside a NOR. - // - // Note that the query grammar (as enforced by the MatchExpression parser) forbids TEXT - // inside of value-expression clauses like NOT, so we don't check those here. - size_t numText = countNodes(root, MatchExpression::TEXT); - if (numText > 1) { - return Status(ErrorCodes::BadValue, "Too many text expressions"); - } else if (1 == numText) { - if (hasNodeInSubtree(root, MatchExpression::TEXT, MatchExpression::NOR)) { - return Status(ErrorCodes::BadValue, "text expression not allowed in nor"); - } - } else { - // Text metadata is not available. - unavailableMetadata.set(DocumentMetadataFields::kTextScore); - } - - // There can only be one NEAR. If there is a NEAR, it must be either the root or the root - // must be an AND and its child must be a NEAR. - size_t numGeoNear = countNodes(root, MatchExpression::GEO_NEAR); - if (numGeoNear > 1) { - return Status(ErrorCodes::BadValue, "Too many geoNear expressions"); - } else if (1 == numGeoNear) { - // Do nothing, we will perform extra checks in CanonicalQuery::isValidNormalized. - } else { - // Geo distance and geo point metadata are unavailable. - unavailableMetadata |= DepsTracker::kAllGeoNearData; - } - - const BSONObj& sortObj = findCommand.getSort(); - BSONElement sortNaturalElt = sortObj["$natural"]; - const BSONObj& hintObj = findCommand.getHint(); - BSONElement hintNaturalElt = hintObj["$natural"]; - - if (sortNaturalElt && sortObj.nFields() != 1) { - return Status(ErrorCodes::BadValue, - str::stream() << "Cannot include '$natural' in compound sort: " << sortObj); - } - - if (hintNaturalElt && hintObj.nFields() != 1) { - return Status(ErrorCodes::BadValue, - str::stream() << "Cannot include '$natural' in compound hint: " << hintObj); - } - - // NEAR cannot have a $natural sort or $natural hint. - if (numGeoNear > 0) { - if (sortNaturalElt) { - return Status(ErrorCodes::BadValue, - "geoNear expression not allowed with $natural sort order"); - } - - if (hintNaturalElt) { - return Status(ErrorCodes::BadValue, - "geoNear expression not allowed with $natural hint"); - } - } - - // TEXT and NEAR cannot both be in the query. - if (numText > 0 && numGeoNear > 0) { - return Status(ErrorCodes::BadValue, "text and geoNear not allowed in same query"); - } - - // TEXT and {$natural: ...} sort order cannot both be in the query. - if (numText > 0 && sortNaturalElt) { - return Status(ErrorCodes::BadValue, "text expression not allowed with $natural sort order"); - } - - // TEXT and hint cannot both be in the query. - if (numText > 0 && !hintObj.isEmpty()) { - return Status(ErrorCodes::BadValue, "text and hint not allowed in same query"); - } - - // TEXT and tailable are incompatible. - if (numText > 0 && findCommand.getTailable()) { - return Status(ErrorCodes::BadValue, "text and tailable cursor not allowed in same query"); - } - - // NEAR and tailable are incompatible. - if (numGeoNear > 0 && findCommand.getTailable()) { - return Status(ErrorCodes::BadValue, - "Tailable cursors and geo $near cannot be used together"); - } - - // $natural sort order must agree with hint. - if (sortNaturalElt) { - if (!hintObj.isEmpty() && !hintNaturalElt) { - return Status(ErrorCodes::BadValue, "index hint not allowed with $natural sort order"); - } - if (hintNaturalElt) { - if (hintNaturalElt.numberInt() != sortNaturalElt.numberInt()) { - return Status(ErrorCodes::BadValue, - "$natural hint must be in the same direction as $natural sort order"); - } - } - } - - return unavailableMetadata; -} - Status CanonicalQuery::isValidNormalized(const MatchExpression* root) { - if (auto numGeoNear = countNodes(root, MatchExpression::GEO_NEAR); numGeoNear > 0) { + if (auto numGeoNear = QueryPlannerCommon::countNodes(root, MatchExpression::GEO_NEAR); + numGeoNear > 0) { tassert(5705300, "Only one geo $near expression is expected", numGeoNear == 1); auto topLevel = false; @@ -502,9 +314,13 @@ Status CanonicalQuery::isValidNormalized(const MatchExpression* root) { return Status::OK(); } -std::string CanonicalQuery::toString() const { +std::string CanonicalQuery::toString(bool forErrMsg) const { str::stream ss; - ss << "ns=" << _findCommand->getNamespaceOrUUID().nss().value_or(NamespaceString()).ns(); + if (forErrMsg) { + ss << "ns=" << _findCommand->getNamespaceOrUUID().toStringForErrorMsg(); + } else { + ss << "ns=" << toStringForLogging(_findCommand->getNamespaceOrUUID()); + } if (_findCommand->getBatchSize()) { ss << " batchSize=" << *_findCommand->getBatchSize(); @@ -528,10 +344,15 @@ std::string CanonicalQuery::toString() const { return ss; } -std::string CanonicalQuery::toStringShort() const { +std::string CanonicalQuery::toStringShort(bool forErrMsg) const { str::stream ss; - ss << "ns: " << _findCommand->getNamespaceOrUUID().nss().value_or(NamespaceString()).ns() - << " query: " << _findCommand->getFilter().toString() + if (forErrMsg) { + ss << "ns: " << _findCommand->getNamespaceOrUUID().toStringForErrorMsg(); + } else { + ss << "ns: " << toStringForLogging(_findCommand->getNamespaceOrUUID()); + } + + ss << " query: " << _findCommand->getFilter().toString() << " sort: " << _findCommand->getSort().toString() << " projection: " << _findCommand->getProjection().toString(); diff --git a/src/mongo/db/query/canonical_query.h b/src/mongo/db/query/canonical_query.h index 17e837e932155..cd95fae5ee962 100644 --- a/src/mongo/db/query/canonical_query.h +++ b/src/mongo/db/query/canonical_query.h @@ -30,17 +30,39 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/cst/c_node.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback.h" #include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/inner_pipeline_stage_interface.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/parsed_find_command.h" #include "mongo/db/query/projection.h" #include "mongo/db/query/projection_policies.h" #include "mongo/db/query/query_request_helper.h" #include "mongo/db/query/sort_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -77,6 +99,16 @@ class CanonicalQuery { std::vector> pipeline = {}, bool isCountLike = false); + /** + * Creates a CanonicalQuery from a ParsedFindCommand. Uses 'expCtx->opCtx', which must be valid. + */ + static StatusWith> canonicalize( + boost::intrusive_ptr expCtx, + std::unique_ptr parsedFind, + bool explain = false, + std::vector> pipeline = {}, + bool isCountLike = false); + /** * For testing or for internal clients to use. */ @@ -98,33 +130,22 @@ class CanonicalQuery { static bool isSimpleIdQuery(const BSONObj& query); /** - * Validates the match expression 'root' as well as the query specified by 'request', checking - * for illegal combinations of operators. Returns a non-OK status if any such illegal - * combination is found. - * - * This method can be called both on normalized and non-normalized 'root'. However, some checks - * can only be performed once the match expressions is normalized. To perform these checks one - * can call 'isValidNormalized()'. - * - * On success, returns a bitset indicating which types of metadata are *unavailable*. For - * example, if 'root' does not contain a $text predicate, then the returned metadata bitset will - * indicate that text score metadata is unavailable. This means that if subsequent - * $meta:"textScore" expressions are found during analysis of the query, we should raise in an - * error. + * Perform validation checks on the normalized 'root' which could not be checked before + * normalization - those should happen in parsed_find_command::isValid(). */ - static StatusWith isValid(const MatchExpression* root, - const FindCommandRequest& findCommand); + static Status isValidNormalized(const MatchExpression* root); /** - * Perform additional validation checks on the normalized 'root'. + * For internal use only - but public for accessibility for make_unique(). You must go through + * canonicalize to create a CanonicalQuery. */ - static Status isValidNormalized(const MatchExpression* root); + CanonicalQuery() {} NamespaceString nss() const { - invariant(_findCommand->getNamespaceOrUUID().nss()); - return *_findCommand->getNamespaceOrUUID().nss(); + invariant(_findCommand->getNamespaceOrUUID().isNamespaceString()); + return _findCommand->getNamespaceOrUUID().nss(); } - std::string ns() const { + StringData ns() const { return nss().ns(); } @@ -160,6 +181,10 @@ class CanonicalQuery { return _expCtx->getCollator(); } + std::shared_ptr getCollatorShared() const { + return _expCtx->getCollatorShared(); + } + /** * Returns a bitset indicating what metadata has been requested in the query. */ @@ -197,25 +222,14 @@ class CanonicalQuery { void setCollator(std::unique_ptr collator); // Debugging - std::string toString() const; - std::string toStringShort() const; - - /** - * Returns a count of 'type' nodes in expression tree. - */ - static size_t countNodes(const MatchExpression* root, MatchExpression::MatchType type); + std::string toString(bool forErrMsg = false) const; + std::string toStringShort(bool forErrMsg = false) const; - /** - * Returns true if this canonical query may have converted extensions such as $where and $text - * into no-ops during parsing. This will be the case if it allowed $where and $text in parsing, - * but parsed using an ExtensionsCallbackNoop. This does not guarantee that a $where or $text - * existed in the query. - * - * Queries with a no-op extension context are special because they can be parsed and planned, - * but they cannot be executed. - */ - bool canHaveNoopMatchNodes() const { - return _canHaveNoopMatchNodes; + std::string toStringForErrorMsg() const { + return toString(true); + } + std::string toStringShortForErrorMsg() const { + return toStringShort(true); } bool getExplain() const { @@ -298,24 +312,11 @@ class CanonicalQuery { } private: - // You must go through canonicalize to create a CanonicalQuery. - CanonicalQuery() {} - - Status init(OperationContext* opCtx, - boost::intrusive_ptr expCtx, - std::unique_ptr findCommand, - bool canHaveNoopMatchNodes, - std::unique_ptr root, - const ProjectionPolicies& projectionPolicies, + Status init(boost::intrusive_ptr expCtx, + std::unique_ptr parsedFind, std::vector> pipeline, bool isCountLike); - // Initializes '_sortPattern', adding any metadata dependencies implied by the sort. - // - // Throws a UserException if the sort is illegal, or if any metadata type in - // 'unavailableMetadata' is required. - void initSortPattern(QueryMetadataBitSet unavailableMetadata); - boost::intrusive_ptr _expCtx; std::unique_ptr _findCommand; @@ -333,8 +334,6 @@ class CanonicalQuery { // Keeps track of what metadata has been explicitly requested. QueryMetadataBitSet _metadataDeps; - bool _canHaveNoopMatchNodes = false; - bool _explain = false; // Determines whether the classic engine must be used. diff --git a/src/mongo/db/query/canonical_query_encoder.cpp b/src/mongo/db/query/canonical_query_encoder.cpp index 949501604e262..8d3769df5dd84 100644 --- a/src/mongo/db/query/canonical_query_encoder.cpp +++ b/src/mongo/db/query/canonical_query_encoder.cpp @@ -30,24 +30,69 @@ #include "mongo/db/query/canonical_query_encoder.h" +#include #include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include #include "mongo/base/simple_string_data_comparator.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/geo/geometry_container.h" +#include "mongo/db/geo/shapes.h" #include "mongo/db/matcher/expression_array.h" #include "mongo/db/matcher/expression_expr.h" #include "mongo/db/matcher/expression_geo.h" +#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_path.h" #include "mongo/db/matcher/expression_text.h" #include "mongo/db/matcher/expression_text_noop.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/expression_type.h" +#include "mongo/db/matcher/expression_visitor.h" #include "mongo/db/matcher/expression_where.h" #include "mongo/db/matcher/expression_where_noop.h" +#include "mongo/db/matcher/schema/expression_internal_schema_allowed_properties.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/document_source_lookup.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/inner_pipeline_stage_interface.h" #include "mongo/db/query/analyze_regex.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/projection.h" -#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/query_request_helper.h" #include "mongo/db/query/tree_walker.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #include "mongo/util/base64.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -587,7 +632,7 @@ void encodeKeyForProj(const projection_ast::Projection* proj, StringBuilder* key return; } - auto requiredFields = proj->getRequiredFields(); + const auto& requiredFields = proj->getRequiredFields(); // If the only requirement is that $sortKey be included with some value, we just act as if the // entire document is needed. @@ -669,10 +714,6 @@ CanonicalQuery::QueryShapeString encodeClassic(const CanonicalQuery& cq) { encodeKeyForProj(cq.getProj(), &keyBuilder); encodeCollation(cq.getCollator(), &keyBuilder); - // This encoding can be removed once the classic query engine reaches EOL and SBE is used - // exclusively for all query execution. - keyBuilder << kEncodeSectionDelimiter << (cq.getForceClassicEngine() ? "f" : "t"); - // The apiStrict flag can cause the query to see different set of indexes. For example, all // sparse indexes will be ignored with apiStrict is used. const bool apiStrict = diff --git a/src/mongo/db/query/canonical_query_encoder.h b/src/mongo/db/query/canonical_query_encoder.h index 29620e47afafc..739979d729342 100644 --- a/src/mongo/db/query/canonical_query_encoder.h +++ b/src/mongo/db/query/canonical_query_encoder.h @@ -29,6 +29,10 @@ #pragma once +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/query/canonical_query.h" namespace mongo { diff --git a/src/mongo/db/query/canonical_query_encoder_test.cpp b/src/mongo/db/query/canonical_query_encoder_test.cpp index 865445b0e3095..fb6ddc066155f 100644 --- a/src/mongo/db/query/canonical_query_encoder_test.cpp +++ b/src/mongo/db/query/canonical_query_encoder_test.cpp @@ -29,19 +29,48 @@ #include "mongo/db/query/canonical_query_encoder.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" -#include "mongo/db/pipeline/document_source.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/inner_pipeline_stage_impl.h" +#include "mongo/db/pipeline/inner_pipeline_stage_interface.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query_test_util.h" -#include "mongo/db/query/plan_cache_key_factory.h" -#include "mongo/db/query/query_test_service_context.h" +#include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/projection_policies.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/golden_test.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/unittest/golden_test_base.h" +#include "mongo/util/decorable.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/canonical_query_test.cpp b/src/mongo/db/query/canonical_query_test.cpp index be8b5eb6fc765..1de24e192b36d 100644 --- a/src/mongo/db/query/canonical_query_test.cpp +++ b/src/mongo/db/query/canonical_query_test.cpp @@ -29,13 +29,23 @@ #include "mongo/db/query/canonical_query.h" -#include "mongo/db/json.h" +#include + +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/query/query_request_helper.h" #include "mongo/db/query/query_test_service_context.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -291,6 +301,49 @@ TEST(CanonicalQueryTest, CanonicalizeFromBaseQuery) { ASSERT_TRUE(childCq->getExplain()); } +TEST(CanonicalQueryTest, CanonicalizeFromBaseQueryWithSpecialFeature) { + // Like the above test, but use $text which is a 'special feature' not always allowed. This is + // meant to reproduce SERVER-XYZ. + QueryTestServiceContext serviceContext; + auto opCtx = serviceContext.makeOperationContext(); + + const bool isExplain = true; + const std::string cmdStr = R"({ + find:'bogusns', + filter: { + $or:[ + {a: 'foo'}, + {$text: {$search: 'bar'}} + ] + }, + projection: {a:1}, + sort: {b:1}, + $db: 'test' + })"; + auto findCommand = query_request_helper::makeFromFindCommandForTests(fromjson(cmdStr)); + auto baseCq = + assertGet(CanonicalQuery::canonicalize(opCtx.get(), + std::move(findCommand), + isExplain, + nullptr, + ExtensionsCallbackNoop(), + MatchExpressionParser::kAllowAllSpecialFeatures)); + + // Note: be sure to use the second child to get $text, since we 'normalize' and sort the + // MatchExpression tree as part of canonicalization. This will put the text search clause + // second. + MatchExpression* secondClauseExpr = baseCq->root()->getChild(1); + auto childCq = assertGet(CanonicalQuery::canonicalize(opCtx.get(), *baseCq, secondClauseExpr)); + + ASSERT_BSONOBJ_EQ(childCq->getFindCommandRequest().getFilter(), secondClauseExpr->serialize()); + + ASSERT_BSONOBJ_EQ(childCq->getFindCommandRequest().getProjection(), + baseCq->getFindCommandRequest().getProjection()); + ASSERT_BSONOBJ_EQ(childCq->getFindCommandRequest().getSort(), + baseCq->getFindCommandRequest().getSort()); + ASSERT_TRUE(childCq->getExplain()); +} + TEST(CanonicalQueryTest, CanonicalQueryFromQRWithNoCollation) { QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); diff --git a/src/mongo/db/query/canonical_query_test_util.cpp b/src/mongo/db/query/canonical_query_test_util.cpp index ff83ae0df1436..72caf0aa20b88 100644 --- a/src/mongo/db/query/canonical_query_test_util.cpp +++ b/src/mongo/db/query/canonical_query_test_util.cpp @@ -29,8 +29,25 @@ #include "mongo/db/query/canonical_query_test_util.h" +#include +#include + +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/json.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/intrusive_counter.h" + namespace mongo { -const NamespaceString CanonicalQueryTest::nss("test.collection"); +const NamespaceString CanonicalQueryTest::nss = + NamespaceString::createNamespaceString_forTest("test.collection"); /** * Utility functions to create a CanonicalQuery diff --git a/src/mongo/db/query/canonical_query_test_util.h b/src/mongo/db/query/canonical_query_test_util.h index bb69978711cfd..8e9cbfcad5182 100644 --- a/src/mongo/db/query/canonical_query_test_util.h +++ b/src/mongo/db/query/canonical_query_test_util.h @@ -27,8 +27,16 @@ * it in the license file. */ +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/query_test_service_context.h" +#include "mongo/db/service_context.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/unittest.h" namespace mongo { diff --git a/src/mongo/db/query/ce/SConscript b/src/mongo/db/query/ce/SConscript index 97c0cf3f4e71a..a2204ad1fb6ec 100644 --- a/src/mongo/db/query/ce/SConscript +++ b/src/mongo/db/query/ce/SConscript @@ -8,6 +8,7 @@ env.Library( target="ce_utils", source=[ 'bound_utils.cpp', + 'sel_tree_utils.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/db/pipeline/abt_utils', @@ -45,6 +46,7 @@ env.Library( LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/query/optimizer/optimizer_memo', 'ce_heuristic_estimation', + 'ce_utils', ], ) @@ -55,6 +57,7 @@ env.Library( ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/query/optimizer/optimizer_memo', + 'ce_utils', 'query_ce_heuristic', ], ) @@ -70,6 +73,7 @@ env.Library( LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/query/optimizer/optimizer_memo', 'ce_heuristic_estimation', + 'ce_utils', ], ) @@ -81,20 +85,20 @@ env.Library( LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/exec/sbe/query_sbe_abt', '$BUILD_DIR/mongo/db/query/optimizer/optimizer', + 'ce_utils', ], ) env.Library( - target="test_utils", + target='test_utils', source=[ 'test_utils.cpp', ], LIBDEPS=[ - '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/db/exec/sbe/query_sbe_values', '$BUILD_DIR/mongo/db/exec/sbe/sbe_abt_test_util', '$BUILD_DIR/mongo/db/query/optimizer/unit_test_pipeline_utils', - "$BUILD_DIR/mongo/unittest/unittest", + '$BUILD_DIR/mongo/db/server_base', 'query_ce_heuristic', 'query_ce_histogram', 'query_ce_sampling', @@ -102,9 +106,9 @@ env.Library( ) env.CppUnitTest( - target="histogram_estimator_test", + target='histogram_estimator_test', source=[ - "histogram_estimator_test.cpp", + 'histogram_estimator_test.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/db/query/stats/stats_test_utils', @@ -155,9 +159,9 @@ env.CppUnitTest( ) env.CppUnitTest( - target="heuristic_dataflow_nodes_test", + target='heuristic_dataflow_nodes_test', source=[ - "heuristic_dataflow_nodes_test.cpp", + 'heuristic_dataflow_nodes_test.cpp', ], LIBDEPS=[ 'test_utils', @@ -175,7 +179,7 @@ env.CppUnitTest( ) env.CppUnitTest( - target="maxdiff_histogram_test", + target='maxdiff_histogram_test', source=[ 'maxdiff_histogram_test.cpp', ], diff --git a/src/mongo/db/query/ce/benchmark_test.cpp b/src/mongo/db/query/ce/benchmark_test.cpp index 14fe88a0ce7e0..2d4c0c25a5393 100644 --- a/src/mongo/db/query/ce/benchmark_test.cpp +++ b/src/mongo/db/query/ce/benchmark_test.cpp @@ -27,12 +27,36 @@ * it in the license file. */ -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/query/ce/benchmark_utils.h" -#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/unit_test_utils.h" +#include "mongo/db/query/stats/array_histogram.h" +#include "mongo/db/query/stats/collection_statistics.h" +#include "mongo/db/query/stats/collection_statistics_mock.h" #include "mongo/db/query/stats/max_diff.h" #include "mongo/db/query/stats/rand_utils.h" +#include "mongo/db/query/stats/value_utils.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/test_info.h" +#include "mongo/util/str.h" namespace mongo::optimizer::ce { namespace { @@ -146,7 +170,7 @@ BenchmarkRuntimeParameters generateHistorgrams( * A test fixture for CE benchmaarks. It provides a common 'setUp' hook to be invoked before each * benchmark, as well as a 'runBenchmarks' driver to be used in each TEST_F. */ -class CEBenchmarkTest : public LockerNoopServiceContextTest { +class CEBenchmarkTest : public ServiceContextTest { protected: /** * Given a 'benchmarkName' name (which corresponds to a test name specified in a TEST_F diff --git a/src/mongo/db/query/ce/benchmark_utils.cpp b/src/mongo/db/query/ce/benchmark_utils.cpp index 7a42bb637e157..07d464ddf398c 100644 --- a/src/mongo/db/query/ce/benchmark_utils.cpp +++ b/src/mongo/db/query/ce/benchmark_utils.cpp @@ -29,6 +29,18 @@ #include "mongo/db/query/ce/benchmark_utils.h" +#include +#include +#include + +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/query/optimizer/metadata_factory.h" +#include "mongo/db/query/optimizer/utils/unit_test_pipeline_utils.h" +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/unittest/assert.h" + namespace mongo::optimizer::ce { BSONObj BenchmarkResults::toBSON() const { BSONObjBuilder bob; diff --git a/src/mongo/db/query/ce/benchmark_utils.h b/src/mongo/db/query/ce/benchmark_utils.h index 3417ab17dcb52..b86a82b44d4f3 100644 --- a/src/mongo/db/query/ce/benchmark_utils.h +++ b/src/mongo/db/query/ce/benchmark_utils.h @@ -27,16 +27,46 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/db/exec/scoped_timer.h" #include "mongo/db/query/ce/heuristic_estimator.h" #include "mongo/db/query/ce/histogram_estimator.h" #include "mongo/db/query/ce/test_utils.h" +#include "mongo/db/query/optimizer/cascades/interfaces.h" +#include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/unit_test_pipeline_utils.h" +#include "mongo/db/query/optimizer/utils/unit_test_utils.h" +#include "mongo/db/query/stats/collection_statistics.h" #include "mongo/db/query/stats/collection_statistics_mock.h" #include "mongo/db/query/util/named_enum.h" #include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" #include "mongo/util/system_tick_source.h" +#include "mongo/util/tick_source.h" namespace mongo::optimizer::ce { /** diff --git a/src/mongo/db/query/ce/bound_utils.cpp b/src/mongo/db/query/ce/bound_utils.cpp index 3374a550c74c0..bbbd6a73d3d7d 100644 --- a/src/mongo/db/query/ce/bound_utils.cpp +++ b/src/mongo/db/query/ce/bound_utils.cpp @@ -29,13 +29,23 @@ #include "mongo/db/query/ce/bound_utils.h" +#include +#include +#include +#include + +#include + #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/pipeline/abt/utils.h" +#include "mongo/db/query/optimizer/bool_expression.h" #include "mongo/db/query/optimizer/index_bounds.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" #include "mongo/db/query/optimizer/syntax/expr.h" #include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/interval_utils.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::optimizer::ce { @@ -81,8 +91,8 @@ IntervalRequirement getMinMaxIntervalForType(sbe::value::TypeTags type) { auto&& [max, maxInclusive] = getMinMaxBoundForType(false /*isMin*/, type); tassert(7051104, str::stream() << "Type " << type << " has no maximum", max); - return IntervalRequirement{BoundRequirement(minInclusive, *min), - BoundRequirement(maxInclusive, *max)}; + return IntervalRequirement{BoundRequirement(minInclusive, std::move(*min)), + BoundRequirement(maxInclusive, std::move(*max))}; } bool isIntervalSubsetOfType(const IntervalRequirement& interval, sbe::value::TypeTags type) { diff --git a/src/mongo/db/query/ce/bound_utils.h b/src/mongo/db/query/ce/bound_utils.h index b6ec0d31bc3eb..84e9c543170ad 100644 --- a/src/mongo/db/query/ce/bound_utils.h +++ b/src/mongo/db/query/ce/bound_utils.h @@ -27,8 +27,13 @@ * it in the license file. */ +#include + +#include + #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer::ce { diff --git a/src/mongo/db/query/ce/generated_histograms_test.cpp b/src/mongo/db/query/ce/generated_histograms_test.cpp index cab2ce5b83e74..0ac00a1317c36 100644 --- a/src/mongo/db/query/ce/generated_histograms_test.cpp +++ b/src/mongo/db/query/ce/generated_histograms_test.cpp @@ -27,14 +27,23 @@ * it in the license file. */ -#include +#include +#include +#include +#include +#include #include +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/ce/histogram_predicate_estimation.h" #include "mongo/db/query/ce/test_utils.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/stats/array_histogram.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/stats/scalar_histogram.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer::ce { namespace { diff --git a/src/mongo/db/query/ce/heuristic_dataflow_nodes_test.cpp b/src/mongo/db/query/ce/heuristic_dataflow_nodes_test.cpp index ed8c40d5640fc..0efc0588f5705 100644 --- a/src/mongo/db/query/ce/heuristic_dataflow_nodes_test.cpp +++ b/src/mongo/db/query/ce/heuristic_dataflow_nodes_test.cpp @@ -27,13 +27,29 @@ * it in the license file. */ -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" #include "mongo/db/query/ce/heuristic_estimator.h" #include "mongo/db/query/ce/test_utils.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/cascades/interfaces.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" -#include "mongo/db/query/optimizer/utils/utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer::ce { namespace { @@ -61,7 +77,7 @@ bool isRootNodeFn(const ABT& node) { return node.is(); } -class CEDataflowTest : public LockerNoopServiceContextTest {}; +class CEDataflowTest : public ServiceContextTest {}; TEST_F(CEDataflowTest, EstimateTrivialNodes) { DataflowCETester t; diff --git a/src/mongo/db/query/ce/heuristic_estimator.cpp b/src/mongo/db/query/ce/heuristic_estimator.cpp index 3e3854be225d6..30f054c9c01bc 100644 --- a/src/mongo/db/query/ce/heuristic_estimator.cpp +++ b/src/mongo/db/query/ce/heuristic_estimator.cpp @@ -29,11 +29,27 @@ #include "mongo/db/query/ce/heuristic_estimator.h" -#include "mongo/db/query/ce/heuristic_predicate_estimation.h" +#include +#include +#include + +#include +#include +#include "mongo/db/query/ce/heuristic_predicate_estimation.h" +#include "mongo/db/query/ce/sel_tree_utils.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/partial_schema_requirements.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/utils/ce_math.h" - +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/util/assert_util.h" namespace mongo::optimizer::ce { @@ -137,12 +153,16 @@ class EvalFilterSelectivityTransport { EvalFilterSelectivityResult transport(const PathDefault& node, CEType inputCard, EvalFilterSelectivityResult childResult) { - if (node.getDefault() == Constant::boolean(false)) { - // We have a {$exists: true} predicate on this path if we have a Constant[false] child - // here. Note that ${exists: false} is handled by the presence of a negation expression - // higher in the ABT. - childResult.selectivity = kDefaultExistsSel; + if (const auto* constPtr = node.getDefault().cast(); + constPtr && constPtr->isValueBool()) { + // We have a $exists predicate on this path. Constant[false] represents {$exists: + // true} whereas Constant[true] represents {$exists: false} here. Note that + // Constant[true] usually comes from NotPushdown which push down a higher negation + // through PathDefault. + const bool exists = !constPtr->getValueBool(); + childResult.selectivity = exists ? kDefaultExistsSel : negateSel(kDefaultExistsSel); } + return childResult; } @@ -165,7 +185,7 @@ class EvalFilterSelectivityTransport { SelectivityType disjunctionSel(const SelectivityType left, const SelectivityType right) { // We sum the selectivities and subtract the overlapping part so that it's only counted // once. - return left + right - left * right; + return negateSel(negateSel(left) * negateSel(right)); } }; @@ -173,8 +193,9 @@ class HeuristicTransport { public: CEType transport(const ScanNode& node, CEType /*bindResult*/) { // Default cardinality estimate. - const CEType metadataCE = _metadata._scanDefs.at(node.getScanDefName()).getCE(); - return (metadataCE < 0.0) ? kDefaultCard : metadataCE; + const boost::optional& metadataCE = + _metadata._scanDefs.at(node.getScanDefName()).getCE(); + return metadataCE.get_value_or(kDefaultCard); } CEType transport(const ValueScanNode& node, CEType /*bindResult*/) { @@ -221,44 +242,22 @@ class HeuristicTransport { return {0.0}; } - SelectivityType topLevelSel{1.0}; - std::vector topLevelSelectivities; + EstimateIntervalSelFn estimateIntervalFn = [&](SelectivityTreeBuilder& selTreeBuilder, + const IntervalRequirement& interval) { + selTreeBuilder.atom(heuristicIntervalSel(interval, childResult)); + }; - // TODO SERVER-74540: Handle top-level disjunction. - PSRExpr::visitDNF(node.getReqMap().getRoot(), [&](const PartialSchemaEntry& e) { + EstimatePartialSchemaEntrySelFn estimateFn = [&](SelectivityTreeBuilder& selTreeBuilder, + const PartialSchemaEntry& e) { const auto& [key, req] = e; - if (req.getIsPerfOnly()) { - // Ignore perf-only requirements. - return; - } - - SelectivityType disjSel{1.0}; - std::vector disjSelectivities; - // Intervals are in DNF. - const auto intervalDNF = req.getIntervals(); - const auto disjuncts = intervalDNF.cast()->nodes(); - for (const auto& disjunct : disjuncts) { - const auto& conjuncts = disjunct.cast()->nodes(); - SelectivityType conjSel{1.0}; - std::vector conjSelectivities; - for (const auto& conjunct : conjuncts) { - const auto& interval = conjunct.cast()->getExpr(); - const SelectivityType sel = heuristicIntervalSel(interval, childResult); - conjSelectivities.push_back(sel); - } - conjSel = conjExponentialBackoff(std::move(conjSelectivities)); - disjSelectivities.push_back(conjSel); - } - disjSel = disjExponentialBackoff(std::move(disjSelectivities)); - topLevelSelectivities.push_back(disjSel); - }); - - if (topLevelSelectivities.empty()) { - return childResult; - } - // The elements of the PartialSchemaRequirements map represent an implicit conjunction. - topLevelSel = conjExponentialBackoff(std::move(topLevelSelectivities)); - CEType card = std::max(topLevelSel * childResult, kMinCard); + IntervalSelectivityTreeBuilder intEstimator{selTreeBuilder, estimateIntervalFn}; + intEstimator.build(req.getIntervals()); + }; + + PartialSchemaRequirementsCardinalityEstimator estimator(estimateFn, childResult); + const CEType estimate = estimator.estimateCE(node.getReqMap().getRoot()); + + const CEType card = std::max(estimate, kMinCard); uassert(6716602, "Invalid cardinality.", validCardinality(card)); return card; } diff --git a/src/mongo/db/query/ce/heuristic_estimator.h b/src/mongo/db/query/ce/heuristic_estimator.h index 0cfef17d6c2dd..359a5bef0c655 100644 --- a/src/mongo/db/query/ce/heuristic_estimator.h +++ b/src/mongo/db/query/ce/heuristic_estimator.h @@ -30,6 +30,10 @@ #pragma once #include "mongo/db/query/optimizer/cascades/interfaces.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer::ce { diff --git a/src/mongo/db/query/ce/heuristic_estimator_test.cpp b/src/mongo/db/query/ce/heuristic_estimator_test.cpp index faada8790272e..9f8f6651ed9c4 100644 --- a/src/mongo/db/query/ce/heuristic_estimator_test.cpp +++ b/src/mongo/db/query/ce/heuristic_estimator_test.cpp @@ -27,21 +27,27 @@ * it in the license file. */ +#include "mongo/db/query/ce/heuristic_estimator.h" + +#include #include +#include -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" -#include "mongo/db/query/ce/heuristic_estimator.h" +#include + +#include "mongo/base/string_data.h" #include "mongo/db/query/ce/test_utils.h" -#include "mongo/db/query/optimizer/cascades/logical_props_derivation.h" -#include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/comparison_op.h" #include "mongo/db/query/optimizer/defs.h" -#include "mongo/db/query/optimizer/explain.h" -#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/opt_phase_manager.h" #include "mongo/db/query/optimizer/props.h" -#include "mongo/db/query/optimizer/utils/unit_test_utils.h" -#include "mongo/db/query/optimizer/utils/utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer::ce { namespace { @@ -61,7 +67,7 @@ class HeuristicCETester : public CETester { } }; -class CEHeuristicTest : public LockerNoopServiceContextTest {}; +class CEHeuristicTest : public ServiceContextTest {}; TEST_F(CEHeuristicTest, CEWithoutOptimizationGtLtNum) { std::string query = "{a0 : {$gt : 14, $lt : 21}}"; @@ -688,11 +694,18 @@ TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_OR1path) { TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_OR2paths) { std::string query = "{$or: [{a0: {$gt:44}}, {b0: {$lt: 9}}]}"; - HeuristicCETester ht(collName, kOnlySubPhaseSet); - // Disjunctions on different paths are not SARGable. - ASSERT_MATCH_CE_CARD(ht, query, 8.19, 9.0); - ASSERT_MATCH_CE_CARD(ht, query, 69.0525, 99.0); - ASSERT_MATCH_CE_CARD(ht, query, 551.1, 1000.0); + HeuristicCETester ht(collName); + ASSERT_MATCH_CE_CARD(ht, query, 7.52115, 9.0); + ASSERT_MATCH_CE_CARD(ht, query, 58.6188, 99.0); + ASSERT_MATCH_CE_CARD(ht, query, 451.581, 1000.0); +} + +TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_OR3paths) { + std::string query = "{$or: [{a0: {$gt:44}}, {b0: {$lt: 9}}, {c0: {$eq: 5}}]}"; + HeuristicCETester ht(collName); + ASSERT_MATCH_CE_CARD(ht, query, 7.66374, 9.0); + ASSERT_MATCH_CE_CARD(ht, query, 59.6741, 99.0); + ASSERT_MATCH_CE_CARD(ht, query, 455.969, 1000.0); } TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_DNF1pathSimple) { @@ -709,7 +722,7 @@ TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_DNF1pathSimple) { TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_DNF1pathComplex) { - HeuristicCETester ht(collName, kOnlySubPhaseSet); + HeuristicCETester ht(collName); // Each disjunct has different number of conjuncts, // so that its selectivity is different. We need 5 disjuncts to test exponential backoff which // cuts off at the first 4. The conjuncts are in selectivity order. @@ -745,11 +758,22 @@ TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_DNF2paths) { "{$and: [{a0: {$gt: 9}}, {a0: {$lt: 12}}]}," "{$and: [{b0: {$gt:40}}, {b0: {$lt: 44}}]}" "]}"; - HeuristicCETester ht(collName, kOnlySubPhaseSet); - // Disjunctions on different paths are not SARGable. - ASSERT_MATCH_CE_CARD(ht, query, 6.6591, 9.0); - ASSERT_MATCH_CE_CARD(ht, query, 36.0354, 99.0); - ASSERT_MATCH_CE_CARD(ht, query, 205.941, 1000.0); + HeuristicCETester ht(collName); + ASSERT_MATCH_CE_CARD(ht, query, 6.59965, 9.0); + ASSERT_MATCH_CE_CARD(ht, query, 41.2515, 99.0); + ASSERT_MATCH_CE_CARD(ht, query, 270.42, 1000.0); +} + +TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_DNF4paths) { + std::string query = + "{$or: [" + "{$and: [{a0: {$gt: 9}}, {b0: {$lt: 12}}]}," + "{$and: [{c0: {$gt:40}}, {d0: {$lt: 44}}]}" + "]}"; + HeuristicCETester ht(collName); + ASSERT_MATCH_CE_CARD(ht, query, 6.59965, 9.0); + ASSERT_MATCH_CE_CARD(ht, query, 41.2515, 99.0); + ASSERT_MATCH_CE_CARD(ht, query, 270.42, 1000.0); } TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_CNF1path) { @@ -776,6 +800,103 @@ TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_CNF2paths) { ASSERT_MATCH_CE_CARD(ht, query, 192.613, 1000.0); } +TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_CNF4paths) { + std::string query = + "{$and : [" + "{$or : [ {a0 : {$gt : 11}}, {a1 : {$lt : 44}} ]}," + "{$or : [ {b0 : {$gt : 77}}, {b1 : {$lt : 51}} ]}" + "]}"; + HeuristicCETester ht(collName); + ASSERT_MATCH_CE_CARD(ht, query, 6.2853, 9.0); + ASSERT_MATCH_CE_CARD(ht, query, 34.7087, 99.0); + ASSERT_MATCH_CE_CARD(ht, query, 203.926, 1000.0); +} + +TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_AsymmetricDisj) { + std::string query = + "{$or: [" + "{a: {$lt: 3}}," + "{$and: [{b: {$gt:5}}, {c: {$lt: 10}}]}" + "]}"; + HeuristicCETester ht(collName); + ASSERT_MATCH_CE_CARD(ht, query, 7.26203, 9.0); + ASSERT_MATCH_CE_CARD(ht, query, 53.5047, 99.0); + ASSERT_MATCH_CE_CARD(ht, query, 396.84, 1000.0); +} + +TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_AsymmetricConj) { + std::string query = + "{$and: [" + "{a: {$lt: 3}}," + "{$or: [{b: {$gt:5}}, {c: {$lt: 10}}]}" + "]}"; + HeuristicCETester ht(collName); + ASSERT_MATCH_CE_CARD(ht, query, 5.2648, 9.0); + ASSERT_MATCH_CE_CARD(ht, query, 26.3785, 99.0); + ASSERT_MATCH_CE_CARD(ht, query, 149.022, 1000.0); +} + +TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_SymmetricDisjNested) { + std::string query = + "{$or: [" + "{$and: [{$or: [{a: {$gt:5}}, {b: {$lt: 10}}]}, {$or: [{c: {$gt:5}}, {d: {$lt: 10}}]}]}," + "{$and: [{$or: [{e: {$gt:5}}, {f: {$lt: 10}}]}, {$or: [{g: {$gt:5}}, {h: {$lt: 10}}]}]}" + "]}"; + HeuristicCETester ht(collName); + ASSERT_MATCH_CE_CARD(ht, query, 8.73405, 9.0); + ASSERT_MATCH_CE_CARD(ht, query, 72.8961, 99.0); + ASSERT_MATCH_CE_CARD(ht, query, 515.182, 1000.0); +} + +TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_SymmetricConjNested) { + std::string query = + "{$and: [" + "{$or: [{$and: [{a: {$gt:5}}, {b: {$lt: 10}}]}, {$and: [{c: {$gt:5}}, {d: {$lt: 10}}]}]}," + "{$or: [{$and: [{e: {$gt:5}}, {f: {$lt: 10}}]}, {$and: [{g: {$gt:5}}, {h: {$lt: 10}}]}]}" + "]}"; + HeuristicCETester ht(collName); + ASSERT_MATCH_CE_CARD(ht, query, 4.83949, 9.0); + ASSERT_MATCH_CE_CARD(ht, query, 17.1888, 99.0); + ASSERT_MATCH_CE_CARD(ht, query, 73.1271, 1000.0); +} + +TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_AsymmetricDisjNested) { + std::string query = + "{$or: [" + "{a: {$gt: 4}}," + "{$and: [" + "{b: {$lt : 3}}," + "{$or: [" + "{c: {$gt: 2}}," + "{$and: [{d: {$lt: 1}}, {e: {$gt: 0}}]}" + "]}" + "]}" + "]}"; + HeuristicCETester ht(collName); + ASSERT_MATCH_CE_CARD(ht, query, 7.90083, 9.0); + ASSERT_MATCH_CE_CARD(ht, query, 58.3051, 99.0); + ASSERT_MATCH_CE_CARD(ht, query, 419.095, 1000.0); +} + +TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionPhase_AsymmetricConjNested) { + std::string query = + "{$and: [" + "{a: {$gt: 4}}," + "{$or: [" + "{b: {$lt : 3}}," + "{$and: [" + "{c: {$gt: 2}}," + "{$or: [{d: {$lt: 1}}, {e: {$gt: 0}}]}" + "]}" + "]}" + "]}"; + HeuristicCETester ht(collName); + ASSERT_MATCH_CE_CARD(ht, query, 5.61393, 9.0); + ASSERT_MATCH_CE_CARD(ht, query, 27.7382, 99.0); + ASSERT_MATCH_CE_CARD(ht, query, 149.11, 1000.0); +} + + TEST_F(CEHeuristicTest, CEAfterMemoSubstitutionExplorationPhases) { HeuristicCETester ht(collName); ASSERT_MATCH_CE(ht, "{a : 13, b : 42}", 10.0); @@ -811,6 +932,13 @@ TEST_F(CEHeuristicTest, CENotEquality) { ASSERT_MATCH_CE(opt, "{$and: [{f1: {$ne: 7}}, {f2: {$ne: 'abc'}}]}", neNeCE); ASSERT_MATCH_CE(opt, "{$and: [{f1: {$ne: 7}}, {f2: {$eq: 'abc'}}]}", neEqCE); + neNeCE = {9999}; + neEqCE = {9901}; + ASSERT_MATCH_CE(opt, "{$or: [{f1: {$ne: 7}}, {f1: {$ne: 'abc'}}]}", neNeCE); + ASSERT_MATCH_CE(opt, "{$or: [{f1: {$ne: 7}}, {f1: {$eq: 'abc'}}]}", neEqCE); + ASSERT_MATCH_CE(opt, "{$or: [{f1: {$ne: 7}}, {f2: {$ne: 'abc'}}]}", neNeCE); + ASSERT_MATCH_CE(opt, "{$or: [{f1: {$ne: 7}}, {f2: {$eq: 'abc'}}]}", neEqCE); + // Update cardinality to 25. collCard = {25}; opt.setCollCard(collCard); @@ -835,6 +963,13 @@ TEST_F(CEHeuristicTest, CENotEquality) { ASSERT_MATCH_CE(opt, "{$and: [{f1: {$ne: 7}}, {f2: {$ne: 'abc'}}]}", neNeCE); ASSERT_MATCH_CE(opt, "{$and: [{f1: {$ne: 7}}, {f2: {$eq: 'abc'}}]}", neEqCE); + neNeCE = {24}; + neEqCE = {21}; + ASSERT_MATCH_CE(opt, "{$or: [{f1: {$ne: 7}}, {f1: {$ne: 'abc'}}]}", neNeCE); + ASSERT_MATCH_CE(opt, "{$or: [{f1: {$ne: 7}}, {f1: {$eq: 'abc'}}]}", neEqCE); + ASSERT_MATCH_CE(opt, "{$or: [{f1: {$ne: 7}}, {f2: {$ne: 'abc'}}]}", neNeCE); + ASSERT_MATCH_CE(opt, "{$or: [{f1: {$ne: 7}}, {f2: {$eq: 'abc'}}]}", neEqCE); + // Update cardinality to 9. collCard = {9}; opt.setCollCard(collCard); @@ -858,6 +993,13 @@ TEST_F(CEHeuristicTest, CENotEquality) { ASSERT_MATCH_CE(opt, "{$and: [{f1: {$ne: 7}}, {f1: {$eq: 'abc'}}]}", neEqCE); ASSERT_MATCH_CE(opt, "{$and: [{f1: {$ne: 7}}, {f2: {$ne: 'abc'}}]}", neNeCE); ASSERT_MATCH_CE(opt, "{$and: [{f1: {$ne: 7}}, {f2: {$eq: 'abc'}}]}", neEqCE); + + neNeCE = {8}; + neEqCE = {7}; + ASSERT_MATCH_CE(opt, "{$or: [{f1: {$ne: 7}}, {f1: {$ne: 'abc'}}]}", neNeCE); + ASSERT_MATCH_CE(opt, "{$or: [{f1: {$ne: 7}}, {f1: {$eq: 'abc'}}]}", neEqCE); + ASSERT_MATCH_CE(opt, "{$or: [{f1: {$ne: 7}}, {f2: {$ne: 'abc'}}]}", neNeCE); + ASSERT_MATCH_CE(opt, "{$or: [{f1: {$ne: 7}}, {f2: {$eq: 'abc'}}]}", neEqCE); } TEST_F(CEHeuristicTest, CENotOpenRange) { diff --git a/src/mongo/db/query/ce/heuristic_predicate_estimation.cpp b/src/mongo/db/query/ce/heuristic_predicate_estimation.cpp index b3a8b755244c0..8cbed895b3c1f 100644 --- a/src/mongo/db/query/ce/heuristic_predicate_estimation.cpp +++ b/src/mongo/db/query/ce/heuristic_predicate_estimation.cpp @@ -29,8 +29,20 @@ #include "mongo/db/query/ce/heuristic_predicate_estimation.h" +#include +#include +#include + +#include +#include +#include + #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/ce/bound_utils.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/ce_math.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer::ce { @@ -46,35 +58,28 @@ SelectivityType heuristicEqualitySel(const CEType inputCard) { } SelectivityType heuristicClosedRangeSel(const CEType inputCard) { - SelectivityType sel = kInvalidSel; if (inputCard < kSmallLimit) { - sel = kSmallCardClosedRangeSel; + return kSmallCardClosedRangeSel; } else if (inputCard < kMediumLimit) { - sel = kMediumCardClosedRangeSel; - } else { - sel = kLargeCardClosedRangeSel; + return kMediumCardClosedRangeSel; } - return sel; + return kLargeCardClosedRangeSel; } SelectivityType heuristicOpenRangeSel(const CEType inputCard) { - SelectivityType sel = kInvalidSel; if (inputCard < kSmallLimit) { - sel = kSmallCardOpenRangeSel; + return kSmallCardOpenRangeSel; } else if (inputCard < kMediumLimit) { - sel = kMediumCardOpenRangeSel; - } else { - sel = kLargeCardOpenRangeSel; + return kMediumCardOpenRangeSel; } - return sel; + return kLargeCardOpenRangeSel; } SelectivityType heuristicIntervalSel(const IntervalRequirement& interval, const CEType inputCard) { - SelectivityType sel = kInvalidSel; if (interval.isFullyOpen()) { - sel = {1.0}; + return 1.0; } else if (interval.isEquality()) { - sel = heuristicEqualitySel(inputCard); + return heuristicEqualitySel(inputCard); } else if (interval.getHighBound().isPlusInf() || interval.getLowBound().isMinusInf() || getBoundReqTypeTag(interval.getLowBound()) != getBoundReqTypeTag(interval.getHighBound())) { @@ -84,12 +89,9 @@ SelectivityType heuristicIntervalSel(const IntervalRequirement& interval, const // one of the bounds is the lowest/highest value of the previous/next type. // TODO: Notice that sometimes type bracketing uses a min/max value from the same type, // so sometimes we may not detect an open-ended interval. - sel = heuristicOpenRangeSel(inputCard); - } else { - sel = heuristicClosedRangeSel(inputCard); + return heuristicOpenRangeSel(inputCard); } - uassert(6716603, "Invalid selectivity.", validSelectivity(sel)); - return sel; + return heuristicClosedRangeSel(inputCard); } CEType heuristicIntervalCard(const IntervalRequirement& interval, const CEType inputCard) { diff --git a/src/mongo/db/query/ce/heuristic_predicate_estimation.h b/src/mongo/db/query/ce/heuristic_predicate_estimation.h index 46b09c7f0b9d3..701201404e655 100644 --- a/src/mongo/db/query/ce/heuristic_predicate_estimation.h +++ b/src/mongo/db/query/ce/heuristic_predicate_estimation.h @@ -27,14 +27,14 @@ * it in the license file. */ +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/utils/ce_math.h" #include "mongo/db/query/optimizer/utils/memo_utils.h" namespace mongo::optimizer::ce { -// Invalid estimate - an arbitrary negative value used for initialization. -constexpr SelectivityType kInvalidSel{-1.0}; -constexpr CEType kInvalidEstimate{-1.0}; - constexpr SelectivityType kDefaultFilterSel{0.1}; constexpr SelectivityType kDefaultExistsSel{0.70}; diff --git a/src/mongo/db/query/ce/hinted_estimator.cpp b/src/mongo/db/query/ce/hinted_estimator.cpp index 1c1b15114331d..fb41ee565877e 100644 --- a/src/mongo/db/query/ce/hinted_estimator.cpp +++ b/src/mongo/db/query/ce/hinted_estimator.cpp @@ -29,7 +29,16 @@ #include "mongo/db/query/ce/hinted_estimator.h" +#include +#include + +#include + #include "mongo/db/query/ce/heuristic_estimator.h" +#include "mongo/db/query/ce/sel_tree_utils.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/partial_schema_requirements.h" namespace mongo::optimizer::ce { class HintedTransport { @@ -39,20 +48,19 @@ class HintedTransport { CEType childResult, CEType /*bindsResult*/, CEType /*refsResult*/) { - CEType result = childResult; - // TODO SERVER-74540: Handle top-level disjunction. - PSRExpr::visitDNF(node.getReqMap().getRoot(), [&](const PartialSchemaEntry& e) { + EstimatePartialSchemaEntrySelFn entrySelFn = [&](SelectivityTreeBuilder& selTreeBuilder, + const PartialSchemaEntry& e) { const auto& [key, req] = e; if (!isIntervalReqFullyOpenDNF(req.getIntervals())) { auto it = _hints.find(key); if (it != _hints.cend()) { - // Assume independence. - result *= it->second; + selTreeBuilder.atom(it->second); } } - }); + }; - return result; + PartialSchemaRequirementsCardinalityEstimator estimator(entrySelFn, childResult); + return estimator.estimateCE(node.getReqMap().getRoot()); } template diff --git a/src/mongo/db/query/ce/hinted_estimator.h b/src/mongo/db/query/ce/hinted_estimator.h index 766a1a1f03c71..d4716b5612b05 100644 --- a/src/mongo/db/query/ce/hinted_estimator.h +++ b/src/mongo/db/query/ce/hinted_estimator.h @@ -29,12 +29,20 @@ #pragma once +#include +#include + #include "mongo/db/query/optimizer/cascades/interfaces.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer::ce { using PartialSchemaSelHints = - std::map; + std::map; /** * Estimation based on hints. The hints are organized in a PartialSchemaSelHints structure. diff --git a/src/mongo/db/query/ce/histogram_array_data_test.cpp b/src/mongo/db/query/ce/histogram_array_data_test.cpp index 10cf1685957db..5f57726679652 100644 --- a/src/mongo/db/query/ce/histogram_array_data_test.cpp +++ b/src/mongo/db/query/ce/histogram_array_data_test.cpp @@ -27,14 +27,25 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/ce/histogram_predicate_estimation.h" #include "mongo/db/query/ce/test_utils.h" -#include "mongo/db/query/query_test_service_context.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/stats/array_histogram.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/stats/scalar_histogram.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer::ce { namespace { diff --git a/src/mongo/db/query/ce/histogram_edge_cases_test.cpp b/src/mongo/db/query/ce/histogram_edge_cases_test.cpp index 79aa3963772bf..699b2941240e3 100644 --- a/src/mongo/db/query/ce/histogram_edge_cases_test.cpp +++ b/src/mongo/db/query/ce/histogram_edge_cases_test.cpp @@ -27,15 +27,41 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/data_view.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/pipeline/abt/utils.h" #include "mongo/db/query/ce/histogram_predicate_estimation.h" #include "mongo/db/query/ce/test_utils.h" -#include "mongo/db/query/optimizer/utils/ce_math.h" -#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/syntax/expr.h" #include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/maxdiff_test_utils.h" +#include "mongo/db/query/stats/scalar_histogram.h" #include "mongo/db/query/stats/value_utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo::optimizer::ce { namespace { diff --git a/src/mongo/db/query/ce/histogram_estimator.cpp b/src/mongo/db/query/ce/histogram_estimator.cpp index 4319c97f785a2..c88087c82ce61 100644 --- a/src/mongo/db/query/ce/histogram_estimator.cpp +++ b/src/mongo/db/query/ce/histogram_estimator.cpp @@ -29,21 +29,41 @@ #include "mongo/db/query/ce/histogram_estimator.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/pipeline/abt/utils.h" - #include "mongo/db/query/ce/bound_utils.h" #include "mongo/db/query/ce/heuristic_predicate_estimation.h" #include "mongo/db/query/ce/histogram_predicate_estimation.h" - +#include "mongo/db/query/ce/sel_tree_utils.h" #include "mongo/db/query/cqf_command_utils.h" - +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/cascades/memo.h" #include "mongo/db/query/optimizer/explain.h" -#include "mongo/db/query/optimizer/utils/abt_hash.h" -#include "mongo/db/query/optimizer/utils/ce_math.h" -#include "mongo/db/query/optimizer/utils/memo_utils.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/partial_schema_requirements.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/utils/path_utils.h" - +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/db/query/stats/value_utils.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -217,7 +237,6 @@ std::string serializePath(const ABT& path) { auto str = algebra::transport(path, pdt); return str; } - } // namespace IntervalEstimation analyzeIntervalEstimationMode(const stats::ArrayHistogram* histogram, @@ -278,22 +297,6 @@ class HistogramTransport { return {_stats->getCardinality()}; } - /** - * This struct is used to track an intermediate representation of the intervals in the - * requirements map. In particular, grouping intervals along each path in the map allows us to - * determine which paths should be estimated as $elemMatches without relying on a particular - * order of entries in the requirements map. - */ - struct SargableConjunct { - bool includeScalar; - const stats::ArrayHistogram* histogram; - std::vector> intervals; - - bool isPathArr() const { - return histogram && !includeScalar && intervals.empty(); - } - }; - CEType transport(const ABT& n, const SargableNode& node, const Metadata& metadata, @@ -307,103 +310,18 @@ class HistogramTransport { return {0.0}; } - // Initial first pass through the requirements map to extract information about each path. - // TODO SERVER-74540: Handle top-level disjunction. - std::map conjunctRequirements; - PSRExpr::visitDNF(node.getReqMap().getRoot(), [&](const PartialSchemaEntry& e) { - const auto& [key, req] = e; - if (req.getIsPerfOnly()) { - // Ignore perf-only requirements. - return; - } - - const auto serializedPath = serializePath(key._path.ref()); - const auto& interval = req.getIntervals(); - const bool isPathArrInterval = - (_arrayOnlyInterval == interval) && !pathEndsInTraverse(key._path.ref()); - - // Check if we have already seen this path. - if (auto conjunctIt = conjunctRequirements.find({serializedPath}); - conjunctIt != conjunctRequirements.end()) { - auto& conjunctReq = conjunctIt->second; - if (isPathArrInterval) { - // We should estimate this path's intervals using $elemMatch semantics. - // Don't push back the interval for estimation; instead, we use it to change how - // we estimate other intervals along this path. - conjunctReq.includeScalar = false; - } else { - // We will need to estimate this interval. - conjunctReq.intervals.push_back(interval); - } - return; - } - - // Get histogram from statistics if it exists, or null if not. - const auto* histogram = _stats->getHistogram(serializedPath); - - // Add this path to the map. If this is not a 'PathArr' interval, add it to the vector - // of intervals we will be estimating. - SargableConjunct sc{!isPathArrInterval, histogram, {}}; - if (sc.includeScalar) { - sc.intervals.push_back(interval); - } - conjunctRequirements.emplace(serializedPath, std::move(sc)); - }); - - std::vector topLevelSelectivities; - for (const auto& [serializedPath, conjunctReq] : conjunctRequirements) { - if (conjunctReq.isPathArr()) { - // If there is a single 'PathArr' interval for this field, we should estimate this - // as the selectivity of array values. - topLevelSelectivities.push_back(getArraySelectivity(*conjunctReq.histogram)); - } - - // Intervals are in DNF. - for (const IntervalReqExpr::Node& intervalDNF : conjunctReq.intervals) { - std::vector disjSelectivities; - - const auto disjuncts = intervalDNF.cast()->nodes(); - for (const auto& disjunct : disjuncts) { - const auto& conjuncts = disjunct.cast()->nodes(); - - std::vector conjSelectivities; - for (const auto& conjunct : conjuncts) { - const auto& interval = conjunct.cast()->getExpr(); - const auto selectivity = estimateInterval(conjunctReq.histogram, - interval, - conjunctReq.includeScalar, - childResult); - OPTIMIZER_DEBUG_LOG(7151301, - 5, - "Estimated path and interval as:", - "path"_attr = serializedPath, - "interval"_attr = - ExplainGenerator::explainInterval(interval), - "selectivity"_attr = selectivity._value); - conjSelectivities.push_back(selectivity); - } - - const auto backoff = conjExponentialBackoff(std::move(conjSelectivities)); - disjSelectivities.push_back(backoff); - } + SelectivityTreeBuilder selTreeBuilder; + selTreeBuilder.pushDisj(); + PSRExpr::visitDisjuncts(node.getReqMap().getRoot(), + [&](const PSRExpr::Node& n, const PSRExpr::VisitorContext&) { + estimateConjunct(n, selTreeBuilder, childResult); + }); - const auto backoff = disjExponentialBackoff(std::move(disjSelectivities)); - OPTIMIZER_DEBUG_LOG(7151303, - 5, - "Estimating disjunction on path using histograms", - "path"_attr = serializedPath, - "intervalDNF"_attr = - ExplainGenerator::explainIntervalExpr(intervalDNF), - "selectivity"_attr = backoff._value); - topLevelSelectivities.push_back(backoff); - } + if (auto selTree = selTreeBuilder.finish()) { + const SelectivityType topLevelSel = estimateSelectivityTree(*selTree); + childResult *= topLevelSel; } - // The elements of the PartialSchemaRequirements map represent an implicit conjunction. - if (!topLevelSelectivities.empty()) { - const auto backoff = conjExponentialBackoff(std::move(topLevelSelectivities)); - childResult *= backoff; - } OPTIMIZER_DEBUG_LOG(7151304, 5, "Final estimate for SargableNode using histograms.", @@ -440,6 +358,109 @@ class HistogramTransport { } private: + /** + * This struct is used to track an intermediate representation of the intervals in the + * requirements map. In particular, grouping intervals along each path in the map allows us to + * determine which paths should be estimated as $elemMatches without relying on a particular + * order of entries in the requirements map. + */ + struct SargableConjunct { + bool includeScalar; + const stats::ArrayHistogram* histogram; + std::vector> intervals; + + bool isPathArr() const { + return histogram && !includeScalar && intervals.empty(); + } + }; + + /** + * Estimate the selectivities of a PartialSchemaRequirements conjunction. It is assumed that the + * conjuncts are all PartialSchemaEntries. The entire conjunction must be estimated at the same + * time because some paths may have multiple requirements which should be considered together. + */ + void estimateConjunct(const PSRExpr::Node& conj, + SelectivityTreeBuilder& selTreeBuilder, + const CEType& childResult) { + // Initial first pass through the requirements map to extract information about each path. + std::map conjunctRequirements; + PSRExpr::visitConjuncts( + conj, [&](const PSRExpr::Node& atom, const PSRExpr::VisitorContext&) { + PSRExpr::visitAtom( + atom, [&](const PartialSchemaEntry& e, const PSRExpr::VisitorContext&) { + const auto& [key, req] = e; + if (req.getIsPerfOnly()) { + // Ignore perf-only requirements. + return; + } + + const auto serializedPath = serializePath(key._path.ref()); + const auto& interval = req.getIntervals(); + const bool isPathArrInterval = (_arrayOnlyInterval == interval) && + !pathEndsInTraverse(key._path.ref()); + + // Check if we have already seen this path. + if (auto conjunctIt = conjunctRequirements.find({serializedPath}); + conjunctIt != conjunctRequirements.end()) { + auto& conjunctReq = conjunctIt->second; + if (isPathArrInterval) { + // We should estimate this path's intervals using $elemMatch + // semantics. Don't push back the interval for estimation; instead, + // we use it to change how we estimate other intervals along this + // path. + conjunctReq.includeScalar = false; + } else { + // We will need to estimate this interval. + conjunctReq.intervals.push_back(interval); + } + return; + } + + // Get histogram from statistics if it exists, or null if not. + const auto* histogram = _stats->getHistogram(serializedPath); + + // Add this path to the map. If this is not a 'PathArr' interval, add it to + // the vector of intervals we will be estimating. + SargableConjunct sc{!isPathArrInterval, histogram, {}}; + if (sc.includeScalar) { + sc.intervals.push_back(interval); + } + conjunctRequirements.emplace(serializedPath, std::move(sc)); + }); + }); + + selTreeBuilder.pushConj(); + for (const auto& conjunctRequirement : conjunctRequirements) { + const auto& serializedPath = conjunctRequirement.first; + const auto& conjunctReq = conjunctRequirement.second; + + if (conjunctReq.isPathArr()) { + // If there is a single 'PathArr' interval for this field, we should estimate this + // as the selectivity of array values. + selTreeBuilder.atom(getArraySelectivity(*conjunctReq.histogram)); + } + + EstimateIntervalSelFn estimateIntervalFn = [&](SelectivityTreeBuilder& b, + const IntervalRequirement& interval) { + const auto selectivity = estimateInterval( + conjunctReq.histogram, interval, conjunctReq.includeScalar, childResult); + selTreeBuilder.atom(selectivity); + OPTIMIZER_DEBUG_LOG(7151301, + 5, + "Estimated path and interval as:", + "path"_attr = serializedPath, + "interval"_attr = ExplainGenerator::explainInterval(interval), + "selectivity"_attr = selectivity._value); + }; + IntervalSelectivityTreeBuilder intervalSelBuilder{selTreeBuilder, estimateIntervalFn}; + + for (const IntervalReqExpr::Node& intervalDNF : conjunctReq.intervals) { + intervalSelBuilder.build(intervalDNF); + } + } + selTreeBuilder.pop(); + } + std::shared_ptr _stats; std::unique_ptr _fallbackCE; diff --git a/src/mongo/db/query/ce/histogram_estimator.h b/src/mongo/db/query/ce/histogram_estimator.h index 74c0f822bb1e9..2060d511df7ef 100644 --- a/src/mongo/db/query/ce/histogram_estimator.h +++ b/src/mongo/db/query/ce/histogram_estimator.h @@ -29,7 +29,19 @@ #pragma once +#include +#include +#include + +#include + #include "mongo/db/query/optimizer/cascades/interfaces.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/collection_statistics.h" namespace mongo::optimizer::ce { diff --git a/src/mongo/db/query/ce/histogram_estimator_test.cpp b/src/mongo/db/query/ce/histogram_estimator_test.cpp index dccb8cefbcf78..18b42a888794f 100644 --- a/src/mongo/db/query/ce/histogram_estimator_test.cpp +++ b/src/mongo/db/query/ce/histogram_estimator_test.cpp @@ -27,15 +27,37 @@ * it in the license file. */ -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/docval_to_sbeval.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/ce/histogram_estimator.h" #include "mongo/db/query/ce/histogram_predicate_estimation.h" #include "mongo/db/query/ce/test_utils.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" #include "mongo/db/query/stats/collection_statistics_mock.h" #include "mongo/db/query/stats/max_diff.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/stats/scalar_histogram.h" +#include "mongo/db/query/stats/value_utils.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer::ce { namespace { @@ -190,7 +212,7 @@ void addHistogramFromValues(CEHistogramTester& t, } } -class CEHistogramTest : public LockerNoopServiceContextTest {}; +class CEHistogramTest : public ServiceContextTest {}; TEST_F(CEHistogramTest, AssertSmallMaxDiffHistogramEstimatesAtomicPredicates) { constexpr CEType kCollCard{8.0}; @@ -295,6 +317,114 @@ TEST_F(CEHistogramTest, AssertSmallHistogramEstimatesComplexPredicates) { // heuristics if no predicates have a histogram. ASSERT_MATCH_CE(t, "{a: {$eq: 2}, c: {$eq: 1}}", 2.23607); ASSERT_MATCH_CE(t, "{c: {$eq: 2}, d: {$eq: 22}}", 1.73205); + + // Test disjunction where together the predicates include the entire range. + ASSERT_MATCH_CE(t, "{$or: [{a: {$gt: 2}}, {a: {$lte: 2}}]}", 9.0); + ASSERT_MATCH_CE(t, "{$or: [{a: {$gte: 2}}, {a: {$lte: 2}}]}", 9.0); + ASSERT_MATCH_CE(t, "{$or: [{a: {$gt: 2}}, {a: {$lte: 3}}]}", 9.0); + + // Test disjunction with overlapping, redundant ranges. + ASSERT_MATCH_CE(t, "{$or: [{a: {$gte: 2}}, {a: {$gte: 3}}]}", 6.0); + + // Test disjunction with disjoint ranges. We again expect exponential backoff here. + ASSERT_MATCH_CE(t, "{$or: [{a: {$lte: 1}}, {a: {$gte: 3}}]}", 3.34315); + + // Test disjunctions over multiple fields for which we have histograms. We again expect + // exponential backoff here. + ASSERT_MATCH_CE(t, "{$or: [{a: {$eq: 2}}, {b: {$eq: 22}}]}", 5.73401); + ASSERT_MATCH_CE(t, "{$or: [{a: {$eq: 2}}, {b: {$eq: 25}}]}", 5.0); + ASSERT_MATCH_CE(t, "{$or: [{a: {$eq: 11}}, {b: {$eq: 25}}]}", 0.0); + ASSERT_MATCH_CE(t, "{$or: [{a: {$gt: 11}}, {a: {$lte: 100}}, {b: {$eq: 22}}]}", 9.0); + ASSERT_MATCH_CE(t, "{$or: [{a: {$lt: 3}}, {a: {$gte: 1}}, {b: {$gt: 30}}]}", 9.0); + ASSERT_MATCH_CE(t, "{$or: [{a: {$lte: 1}}, {a: {$gte: 3}}, {b: {$gt: 30}}]}", 6.62159); + + // Test conjunctions over multiple fields for which we may not have histograms. This is + // expected to fall back as described above. + ASSERT_MATCH_CE(t, "{$or: [{a: {$eq: 2}}, {c: {$eq: 1}}]}", 5.73401); + ASSERT_MATCH_CE(t, "{$or: [{c: {$eq: 2}}, {d: {$eq: 22}}]}", 4.1010); +} + +TEST_F(CEHistogramTest, AssertSmallHistogramEstimatesAsymmetricAndNestedPredicates) { + constexpr CEType kCollCard{26.0}; + CEHistogramTester t(collName, kCollCard); + + // Basic histogram, which we will use for all fields: a scalar histogram with values in the + // range [1,11], most of which are in the middle bucket. + auto hist = getArrayHistogramFromData({ + {Value(1), 2 /* frequency */}, + {Value(2), 2 /* frequency */}, + {Value(3), 4 /* frequency */, 10 /* range frequency */, 5 /* ndv */}, + {Value(8), 2 /* frequency */, 5 /* range frequency */, 3 /* ndv */}, + {Value(11), 1 /* frequency */}, + }); + t.addHistogram("a", hist); + t.addHistogram("b", hist); + t.addHistogram("c", hist); + t.addHistogram("d", hist); + t.addHistogram("e", hist); + t.addHistogram("f", hist); + t.addHistogram("g", hist); + t.addHistogram("h", hist); + + // Asymmetric disjunction. + std::string query = + "{$or: [" + "{a: {$lt: 3}}," + "{$and: [{b: {$gt:5}}, {c: {$lt: 9}}]}" + "]}"; + ASSERT_MATCH_CE(t, query, 15.4447); + + // Asymmetric conjunction. + query = + "{$and: [" + "{a: {$lt: 3}}," + "{$or: [{b: {$gt:5}}, {c: {$lt: 9}}]}" + "]}"; + ASSERT_MATCH_CE(t, query, 13.5277); + + // Symmetric disjunction nested (3 levels). + query = + "{$or: [" + "{$and: [{$or: [{a: {$gt:5}}, {b: {$lt: 9}}]}, {$or: [{c: {$gt:5}}, {d: {$lt: 9}}]}]}," + "{$and: [{$or: [{e: {$gt:5}}, {f: {$lt: 9}}]}, {$or: [{g: {$gt:5}}, {h: {$lt: 9}}]}]}" + "]}"; + ASSERT_MATCH_CE(t, query, 19.1444); + + // Symmetric conjunction nested (3 levels). + query = + "{$and: [" + "{$or: [{$and: [{a: {$gt:5}}, {b: {$lt: 9}}]}, {$and: [{c: {$gt:5}}, {d: {$lt: 9}}]}]}," + "{$or: [{$and: [{e: {$gt:5}}, {f: {$lt: 9}}]}, {$and: [{g: {$gt:5}}, {h: {$lt: 9}}]}]}" + "]}"; + ASSERT_MATCH_CE(t, query, 2.65302); + + // Asymmetric disjunction nested (4 levels). + query = + "{$or: [" + "{a: {$gt: 6}}," + "{$and: [" + "{b: {$lt : 5}}," + "{$or: [" + "{c: {$gt: 4}}," + "{$and: [{d: {$lt: 3}}, {e: {$gt: 2}}]}" + "]}" + "]}" + "]}"; + ASSERT_MATCH_CE(t, query, 15.3124); + + // Asymmetric conjunction nested (4 levels). + query = + "{$and: [" + "{a: {$gt: 6}}," + "{$or: [" + "{b: {$lt : 5}}," + "{$and: [" + "{c: {$gt: 4}}," + "{$or: [{d: {$lt: 3}}, {e: {$gt: 2}}]}" + "]}" + "]}" + "]}"; + ASSERT_MATCH_CE(t, query, 4.4555); } TEST_F(CEHistogramTest, SanityTestEmptyHistogram) { @@ -306,6 +436,7 @@ TEST_F(CEHistogramTest, SanityTestEmptyHistogram) { ASSERT_MATCH_CE(t, "{empty: {$lt: 1.0}, empty: {$gt: 0.0}}", 0.0); ASSERT_MATCH_CE(t, "{empty: {$eq: 1.0}, other: {$eq: \"anything\"}}", 0.0); ASSERT_MATCH_CE(t, "{other: {$eq: \"anything\"}, empty: {$eq: 1.0}}", 0.0); + ASSERT_MATCH_CE(t, "{$or: [{empty: {$lt: 1.0}}, {empty: {$gt: 0.0}}]}", 0.0); } TEST_F(CEHistogramTest, TestOneBucketOneIntHistogram) { @@ -690,11 +821,19 @@ TEST_F(CEHistogramTest, TestArrayHistogramOnCompositePredicates) { ASSERT_MATCH_CE(t, "{scalar: {$eq: 5}, array: {$elemMatch: {$eq: 5}}}", 2.236); ASSERT_MATCH_CE(t, "{scalar: {$elemMatch: {$eq: 5}}, array: {$eq: 5}}", 0.0); + ASSERT_MATCH_CE(t, "{$or: [{scalar: {$eq: 5}}, {array: {$eq: 5}}]}", 37.0145); + ASSERT_MATCH_CE(t, "{$or: [{scalar: {$eq: 5}}, {array: {$elemMatch: {$eq: 5}}}]}", 37.0145); + ASSERT_MATCH_CE(t, "{$or: [{scalar: {$elemMatch: {$eq: 5}}}, {array: {$eq: 5}}]}", 35.0); + // Composite predicate on 'mixed' and 'array' fields. ASSERT_MATCH_CE(t, "{mixed: {$eq: 5}, array: {$eq: 5}}", 8.721); ASSERT_MATCH_CE(t, "{mixed: {$eq: 5}, array: {$elemMatch: {$eq: 5}}}", 8.721); ASSERT_MATCH_CE(t, "{mixed: {$elemMatch: {$eq: 5}}, array: {$eq: 5}}", 7.603); + ASSERT_MATCH_CE(t, "{$or: [{mixed: {$eq: 5}}, {array: {$eq: 5}}]}", 43.0303); + ASSERT_MATCH_CE(t, "{$or: [{mixed: {$eq: 5}}, {array: {$elemMatch: {$eq: 5}}}]}", 43.0303); + ASSERT_MATCH_CE(t, "{$or: [{mixed: {$elemMatch: {$eq: 5}}}, {array: {$eq: 5}}]}", 41.9737); + // Composite predicate on 'scalar' and 'mixed' fields. ASSERT_MATCH_CE(t, "{scalar: {$eq: 5}, mixed: {$eq: 5}}", 1.669); ASSERT_MATCH_CE(t, "{scalar: {$eq: 5}, mixed: {$elemMatch: {$eq: 5}}}", 1.559); @@ -714,6 +853,32 @@ TEST_F(CEHistogramTest, TestArrayHistogramOnCompositePredicates) { ASSERT_MATCH_CE(t, "{mixed: {$elemMatch: {$eq: 5}}, mixed: {$eq: 5}}", 17.0); ASSERT_MATCH_CE(t, "{array: {$elemMatch: {$eq: 5}}, array: {$eq: 5}}", 35.0); + // Test case where the same path has both a $match and $elemMatch, but where the top-level + // query is a disjunction. + ASSERT_MATCH_CE(t, "{mixed: {$eq: 5}}", 19.5); + ASSERT_MATCH_CE(t, "{scalar: {$eq: 5}}", 5.0); + ASSERT_MATCH_CE(t, "{$or: [{scalar: {$elemMatch: {$eq: 5}}}, {scalar: {$eq: 5}}]}", 5.0); + ASSERT_MATCH_CE(t, + "{$or: [" + "{$and: [{scalar: {$elemMatch: {$eq: 5}}}, {scalar: {$eq: 5}}]}," + "{mixed: {$eq: 5}}]}", + 19.5); + ASSERT_MATCH_CE(t, + "{$or: [" + "{$and: [{mixed: {$elemMatch: {$eq: 5}}}, {mixed: {$eq: 5}}]}," + "{scalar: {$eq: 5}}]}", + 19.2735); + ASSERT_MATCH_CE(t, + "{$or: [" + "{$and: [{array: {$elemMatch: {$eq: 5}}}, {array: {$eq: 5}}]}," + "{scalar: {$eq: 5}}]}", + 37.0145); + ASSERT_MATCH_CE(t, + "{$or: [" + "{$and: [{scalar: {$elemMatch: {$eq: 5}}}, {scalar: {$eq: 5}}]}," + "{scalar: {$eq: 5}}]}", + 5.0); + // Test case with multiple predicates and ranges. ASSERT_MATCH_CE(t, "{array: {$elemMatch: {$lt: 5}}, mixed: {$lt: 5}}", 67.1508); ASSERT_MATCH_CE(t, "{array: {$elemMatch: {$lt: 5}}, mixed: {$gt: 5}}", 27.8562); @@ -743,6 +908,8 @@ TEST_F(CEHistogramTest, TestArrayHistogramOnCompositePredicates) { makeIndexDefinition("scalar", CollationOp::Ascending, /* isMultiKey */ false)}}); ASSERT_MATCH_CE(t, "{scalar: {$elemMatch: {$eq: 5}}}", 0.0); ASSERT_MATCH_CE(t, "{scalar: {$elemMatch: {$gt: 1, $lt: 10}}}", 0.0); + ASSERT_MATCH_CE( + t, "{$or: [{scalar: {$elemMatch: {$gt: 1, $lt: 10}}}, {mixed: {$eq: 5}}]}", 19.5); // Test how we estimate singular PathArr sargable predicate. ASSERT_MATCH_CE_NODE(t, "{array: {$elemMatch: {}}}", 175.0, isSargable); @@ -1322,7 +1489,7 @@ TEST_F(CEHistogramTest, TestHistogramNeq) { ASSERT_EQ_ELEMMATCH_CE(t, eqHeu, eqHeuElem, "b", "{$ne: 'charB'}"); // Test conjunctions where both fields have histograms. Note that when both ops are $ne, we - // never use histogram estimation because the optimizer only generates filetr nodes (no sargable + // never use histogram estimation because the optimizer only generates filter nodes (no sargable // nodes). CEType neNeCE{11.5873}; CEType neEqCE{0.585786}; @@ -1339,9 +1506,26 @@ TEST_F(CEHistogramTest, TestHistogramNeq) { eqEqCE = {0.945742}; ASSERT_MATCH_CE(t, "{$and: [{a: {$ne: 7}}, {noHist: {$eq: 'charB'}}]}", neEqCE); ASSERT_MATCH_CE(t, "{$and: [{a: {$eq: 7}}, {noHist: {$eq: 'charB'}}]}", eqEqCE); + + // Same as above, but testing disjunction. In this case, if either op is $ne, we never use + // histogram estimation because the optimizer only generates filter nodes. + neNeCE = {19}; + neEqCE = {16.5279}; + eqEqCE = {2.92370}; + ASSERT_MATCH_CE(t, "{$or: [{a: {$ne: 7}}, {b: {$ne: 'charB'}}]}", neNeCE); + ASSERT_MATCH_CE(t, "{$or: [{a: {$ne: 7}}, {b: {$eq: 'charB'}}]}", neEqCE); + ASSERT_MATCH_CE(t, "{$or: [{a: {$eq: 7}}, {b: {$ne: 'charB'}}]}", neEqCE); + ASSERT_MATCH_CE(t, "{$or: [{a: {$eq: 7}}, {b: {$eq: 'charB'}}]}", eqEqCE); + + // Where only one fields has a histogram. + eqEqCE = {5.26899}; + ASSERT_MATCH_CE(t, "{$or: [{a: {$ne: 7}}, {noHist: {$ne: 'charB'}}]}", neNeCE); + ASSERT_MATCH_CE(t, "{$or: [{a: {$ne: 7}}, {noHist: {$eq: 'charB'}}]}", neEqCE); + ASSERT_MATCH_CE(t, "{$or: [{a: {$eq: 7}}, {noHist: {$ne: 'charB'}}]}", neEqCE); + ASSERT_MATCH_CE(t, "{$or: [{a: {$eq: 7}}, {noHist: {$eq: 'charB'}}]}", eqEqCE); } -TEST_F(CEHistogramTest, TestHistogramConjTypeCount) { +TEST_F(CEHistogramTest, TestHistogramConjAndDisjTypeCount) { constexpr double kCollCard = 40.0; CEHistogramTester t("test", {kCollCard}); { @@ -1382,6 +1566,12 @@ TEST_F(CEHistogramTest, TestHistogramConjTypeCount) { ASSERT_MATCH_CE(t, "{$and: [{i: {$lt: 8}}, {tc: {$eq: true}}]}", 4.0); // CE = 8/40*sqrt(20/40)*40 ASSERT_MATCH_CE(t, "{$and: [{i: {$lt: 8}}, {tc: {$eq: false}}]}", 5.65685); + + // Same tests, but with a disjunction of predicates. + // CE = (1 - (1 - 10/40) * sqrt(1 - 8/40)) * 40 + ASSERT_MATCH_CE(t, "{$or: [{i: {$lt: 8}}, {tc: {$eq: true}}]}", 13.16718); + // CE = (1 - (1 - 20/40) * sqrt(1 - 8.40)) * 40 + ASSERT_MATCH_CE(t, "{$or: [{i: {$lt: 8}}, {tc: {$eq: false}}]}", 22.11146); } TEST(CEHistogramTest, RoundUpNegativeEstimate) { diff --git a/src/mongo/db/query/ce/histogram_interpolation_test.cpp b/src/mongo/db/query/ce/histogram_interpolation_test.cpp index aab6a1ce48015..0cd2f19064ae3 100644 --- a/src/mongo/db/query/ce/histogram_interpolation_test.cpp +++ b/src/mongo/db/query/ce/histogram_interpolation_test.cpp @@ -27,11 +27,20 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/ce/histogram_predicate_estimation.h" #include "mongo/db/query/ce/test_utils.h" -#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/stats/array_histogram.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/stats/scalar_histogram.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer::ce { namespace { diff --git a/src/mongo/db/query/ce/histogram_predicate_estimation.cpp b/src/mongo/db/query/ce/histogram_predicate_estimation.cpp index 3671cd4ea6a50..805e8d1935419 100644 --- a/src/mongo/db/query/ce/histogram_predicate_estimation.cpp +++ b/src/mongo/db/query/ce/histogram_predicate_estimation.cpp @@ -29,13 +29,20 @@ #include "mongo/db/query/ce/histogram_predicate_estimation.h" -#include "mongo/db/exec/sbe/abt/abt_lower.h" -#include "mongo/db/pipeline/abt/utils.h" +#include +#include +#include +#include +#include + +#include +#include +#include "mongo/db/pipeline/abt/utils.h" #include "mongo/db/query/ce/bound_utils.h" -#include "mongo/db/query/optimizer/syntax/expr.h" -#include "mongo/db/query/optimizer/utils/ce_math.h" #include "mongo/db/query/stats/value_utils.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer::ce { namespace value = sbe::value; diff --git a/src/mongo/db/query/ce/histogram_predicate_estimation.h b/src/mongo/db/query/ce/histogram_predicate_estimation.h index 07799510a9605..60ff64f64d450 100644 --- a/src/mongo/db/query/ce/histogram_predicate_estimation.h +++ b/src/mongo/db/query/ce/histogram_predicate_estimation.h @@ -29,9 +29,14 @@ #pragma once +#include + +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/index_bounds.h" #include "mongo/db/query/stats/array_histogram.h" +#include "mongo/db/query/stats/scalar_histogram.h" +#include "mongo/stdx/unordered_map.h" namespace mongo::optimizer::ce { diff --git a/src/mongo/db/query/ce/maxdiff_histogram_test.cpp b/src/mongo/db/query/ce/maxdiff_histogram_test.cpp index 7225c1a877440..28a696a342e98 100644 --- a/src/mongo/db/query/ce/maxdiff_histogram_test.cpp +++ b/src/mongo/db/query/ce/maxdiff_histogram_test.cpp @@ -27,22 +27,27 @@ * it in the license file. */ -#include "mongo/db/concurrency/lock_state.h" -#include "mongo/db/exec/sbe/abt/sbe_abt_test_util.h" +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/ce/histogram_predicate_estimation.h" #include "mongo/db/query/ce/test_utils.h" -#include "mongo/db/query/optimizer/utils/unit_test_utils.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/max_diff.h" #include "mongo/db/query/stats/maxdiff_test_utils.h" #include "mongo/db/query/stats/rand_utils.h" -#include "mongo/db/query/stats/rand_utils_new.h" #include "mongo/db/query/stats/scalar_histogram.h" -#include "mongo/logv2/log_component.h" -#include "mongo/logv2/log_component_settings.h" -#include "mongo/logv2/log_severity.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/stats/value_utils.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer::ce { namespace { @@ -60,31 +65,6 @@ using stats::ScalarHistogram; const double kTolerance = 0.001; class HistogramTest : public ServiceContextTest {}; -class HistogramTestLarge : public ServiceContextTest {}; - -class TestObserver : public ServiceContext::ClientObserver { -public: - TestObserver() = default; - ~TestObserver() = default; - - void onCreateClient(Client* client) final {} - - void onDestroyClient(Client* client) final {} - - void onCreateOperationContext(OperationContext* opCtx) override { - opCtx->setLockState(std::make_unique(opCtx->getServiceContext())); - } - - void onDestroyOperationContext(OperationContext* opCtx) final {} -}; - -const ServiceContext::ConstructorActionRegisterer clientObserverRegisterer{ - "TestObserver", - [](ServiceContext* service) { - service->registerClientObserver(std::make_unique()); - }, - [](ServiceContext* serviceContext) { - }}; static double estimateCard(const ScalarHistogram& hist, const int v, const EstimationType type) { const auto [tag, val] = makeInt64Value(v); diff --git a/src/mongo/db/query/ce/sampling_estimator.cpp b/src/mongo/db/query/ce/sampling_estimator.cpp index f07e27280f0e2..2f736b1c7d4be 100644 --- a/src/mongo/db/query/ce/sampling_estimator.cpp +++ b/src/mongo/db/query/ce/sampling_estimator.cpp @@ -29,16 +29,49 @@ #include "mongo/db/query/ce/sampling_estimator.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "mongo/db/exec/sbe/abt/abt_lower.h" +#include "mongo/db/exec/sbe/abt/abt_lower_defs.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" #include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/ce/sel_tree_utils.h" #include "mongo/db/query/cqf_command_utils.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/containers.h" #include "mongo/db/query/optimizer/explain.h" #include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/node_defs.h" +#include "mongo/db/query/optimizer/partial_schema_requirements.h" #include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/syntax/expr.h" #include "mongo/db/query/optimizer/utils/abt_hash.h" -#include "mongo/db/query/optimizer/utils/memo_utils.h" +#include "mongo/db/query/optimizer/utils/physical_plan_builder.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -79,19 +112,20 @@ class SamplingPlanExtractor { PhysPlanBuilder result{childResult}; // Retain only output bindings without applying filters. - // TODO SERVER-74540: Handle top-level disjunction. - PSRExpr::visitDNF(node.getReqMap().getRoot(), [&](const PartialSchemaEntry& e) { - const auto& [key, req] = e; - if (const auto& boundProjName = req.getBoundProjectionName()) { - lowerPartialSchemaRequirement( - key, - PartialSchemaRequirement{ - boundProjName, IntervalReqExpr::makeSingularDNF(), req.getIsPerfOnly()}, - _phaseManager.getPathToInterval(), - boost::none /*residualCE*/, - result); - } - }); + PSRExpr::visitAnyShape( + node.getReqMap().getRoot(), + [&](const PartialSchemaEntry& e, const PSRExpr::VisitorContext& ctx) { + const auto& [key, req] = e; + if (const auto& boundProjName = req.getBoundProjectionName()) { + lowerPartialSchemaRequirement( + key, + PartialSchemaRequirement{ + boundProjName, IntervalReqExpr::makeSingularDNF(), req.getIsPerfOnly()}, + _phaseManager.getPathToInterval(), + boost::none /*residualCE*/, + result); + } + }); std::swap(n, result._node); } @@ -165,16 +199,10 @@ class SamplingTransport { ABT extracted = planExtractor.extract(n); // Estimate individual requirements separately by potentially re-using cached results. - // Here we assume that each requirement is independent. // TODO: consider estimating together the entire set of requirements (but caching!) - // TODO SERVER-74540: Handle top-level disjunction. - CEType result = childResult; - PSRExpr::visitDNF(node.getReqMap().getRoot(), [&](const PartialSchemaEntry& e) { + EstimatePartialSchemaEntrySelFn estimateFn = [&](SelectivityTreeBuilder& selTreeBuilder, + const PartialSchemaEntry& e) { const auto& [key, req] = e; - if (req.getIsPerfOnly()) { - // Ignore perf-only requirements. - return; - } if (!isIntervalReqFullyOpenDNF(req.getIntervals())) { PhysPlanBuilder lowered{extracted}; @@ -188,12 +216,16 @@ class SamplingTransport { boost::none /*residualCE*/, lowered); uassert(6624243, "Expected a filter node", lowered._node.is()); - result = estimateFilterCE( - metadata, memo, logicalProps, n, std::move(lowered._node), result); + const CEType filterCE = estimateFilterCE( + metadata, memo, logicalProps, n, std::move(lowered._node), childResult); + const SelectivityType sel = + childResult > 0.0 ? (filterCE / childResult) : SelectivityType{0.0}; + selTreeBuilder.atom(sel); } - }); + }; - return result; + PartialSchemaRequirementsCardinalityEstimator estimator(estimateFn, childResult); + return estimator.estimateCE(node.getReqMap().getRoot()); } /** @@ -275,7 +307,9 @@ class SamplingTransport { ids, _phaseManager.getMetadata(), planAndProps._map, - ScanOrder::Random}; + internalCascadesOptimizerSamplingCEScanStartOfColl.load() + ? ScanOrder::Forward + : ScanOrder::Random}; auto sbePlan = g.optimize(planAndProps._node, slotMap, ridSlot); tassert(6624261, "Unexpected rid slot", !ridSlot); diff --git a/src/mongo/db/query/ce/sampling_estimator.h b/src/mongo/db/query/ce/sampling_estimator.h index cf9d0973a39c2..a6a9ca649409d 100644 --- a/src/mongo/db/query/ce/sampling_estimator.h +++ b/src/mongo/db/query/ce/sampling_estimator.h @@ -29,8 +29,17 @@ #pragma once +#include +#include + +#include "mongo/db/operation_context.h" #include "mongo/db/query/optimizer/cascades/interfaces.h" +#include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer::ce { diff --git a/src/mongo/db/query/ce/sel_tree_utils.cpp b/src/mongo/db/query/ce/sel_tree_utils.cpp new file mode 100644 index 0000000000000..86201ad10f717 --- /dev/null +++ b/src/mongo/db/query/ce/sel_tree_utils.cpp @@ -0,0 +1,147 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/query/ce/sel_tree_utils.h" + +#include +#include + +#include +#include +#include + +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/utils/ce_math.h" +#include "mongo/util/assert_util.h" + +namespace mongo::optimizer::ce { + +namespace { + +class SelectivityTreeEstimatorTransport { +public: + SelectivityType transport(const SelectivityTree::Atom& node) { + SelectivityType sel = node.getExpr(); + tassert(7454000, "Leaf nodes must have computed CE.", validSelectivity(sel)); + return sel; + } + + SelectivityType transport(const SelectivityTree::Conjunction& node, + std::vector children) { + SelectivityType conjSel = conjExponentialBackoff(std::move(children)); + tassert(7454001, "Failed to estimate conjunction.", validSelectivity(conjSel)); + return conjSel; + } + + SelectivityType transport(const SelectivityTree::Disjunction& node, + std::vector children) { + SelectivityType disjSel = disjExponentialBackoff(std::move(children)); + tassert(7454002, "Failed to estimate disjunction.", validSelectivity(disjSel)); + return disjSel; + } + + SelectivityType estimate(const SelectivityTree::Node& selTree) { + SelectivityType sel = algebra::transport(selTree, *this); + tassert(7454003, "Invalid selectivity.", validSelectivity(sel)); + return sel; + } +}; +} // namespace + +SelectivityType estimateSelectivityTree(const SelectivityTree::Node& selTree) { + return SelectivityTreeEstimatorTransport{}.estimate(selTree); +} + +PartialSchemaRequirementsCardinalityEstimator::PartialSchemaRequirementsCardinalityEstimator( + const EstimatePartialSchemaEntrySelFn& estimatePartialSchemEntryFn, CEType inputCE) + : _estimatePartialSchemEntryFn(estimatePartialSchemEntryFn), _inputCE(inputCE) {} + +void PartialSchemaRequirementsCardinalityEstimator::transport(const PSRExpr::Atom& atom) { + const auto& entry = atom.getExpr(); + + // Ignore perf-only requirements. + if (!entry.second.getIsPerfOnly()) { + _estimatePartialSchemEntryFn(_selTreeBuilder, entry); + } +} + +void PartialSchemaRequirementsCardinalityEstimator::prepare(const PSRExpr::Conjunction& node) { + _selTreeBuilder.pushConj(); +} +void PartialSchemaRequirementsCardinalityEstimator::transport( + const PSRExpr::Conjunction& node, const PSRExpr::NodeVector& /* children */) { + _selTreeBuilder.pop(); +} + +void PartialSchemaRequirementsCardinalityEstimator::prepare(const PSRExpr::Disjunction& node) { + _selTreeBuilder.pushDisj(); +} +void PartialSchemaRequirementsCardinalityEstimator::transport( + const PSRExpr::Disjunction& node, const PSRExpr::NodeVector& /* children */) { + _selTreeBuilder.pop(); +} + +CEType PartialSchemaRequirementsCardinalityEstimator::estimateCE(const PSRExpr::Node& n) { + algebra::transport(n, *this); + if (auto selTree = _selTreeBuilder.finish()) { + return _inputCE * estimateSelectivityTree(*selTree); + } + + return _inputCE; +} + + +IntervalSelectivityTreeBuilder::IntervalSelectivityTreeBuilder( + SelectivityTreeBuilder& selTreeBuilder, const EstimateIntervalSelFn& estimateIntervalSelFn) + : _estimateIntervalSelFn(estimateIntervalSelFn), _selTreeBuilder(selTreeBuilder) {} + +void IntervalSelectivityTreeBuilder::transport(const IntervalReqExpr::Atom& node) { + _estimateIntervalSelFn(_selTreeBuilder, node.getExpr()); +} + +void IntervalSelectivityTreeBuilder::prepare(const IntervalReqExpr::Conjunction& node) { + _selTreeBuilder.pushConj(); +} +void IntervalSelectivityTreeBuilder::transport(const IntervalReqExpr::Conjunction& node, + const IntervalReqExpr::NodeVector& /* children */) { + _selTreeBuilder.pop(); +} + +void IntervalSelectivityTreeBuilder::prepare(const IntervalReqExpr::Disjunction& node) { + _selTreeBuilder.pushDisj(); +} +void IntervalSelectivityTreeBuilder::transport(const IntervalReqExpr::Disjunction& node, + const IntervalReqExpr::NodeVector& /* children */) { + _selTreeBuilder.pop(); +} + +void IntervalSelectivityTreeBuilder::build(const IntervalReqExpr::Node& intervalTree) { + algebra::transport(intervalTree, *this); +} +} // namespace mongo::optimizer::ce diff --git a/src/mongo/db/query/ce/sel_tree_utils.h b/src/mongo/db/query/ce/sel_tree_utils.h new file mode 100644 index 0000000000000..13d26517cc046 --- /dev/null +++ b/src/mongo/db/query/ce/sel_tree_utils.h @@ -0,0 +1,123 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include + +#include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/partial_schema_requirements.h" + +namespace mongo::optimizer::ce { +/** + * A tree of selectivity estimates for a certain boolean expression. The leaves of the tree contain + * selectivity estimates of some nodes. The internal nodes encode the Boolean structure of the + * expression being estimated but they do not contain the internal node estimates themselves. + * This tree is passed to a Boolean selectivity estimator that knows how to combine child estimates. + */ +using SelectivityTree = BoolExpr; +using SelectivityTreeBuilder = + SelectivityTree::Builder; + +SelectivityType estimateSelectivityTree(const SelectivityTree::Node& selTree); + +/** + * Function to estimate one PartialSchemaEntry within a PartialSchemaRequirements expression. The + * estimate should be incorporated into the builder. + */ +using EstimatePartialSchemaEntrySelFn = + std::function; + +/** + * Given a Boolean tree of PartialSchemaEntries, build a SelectivityTree with the same structure, + * such that the leaf nodes of this tree contain selectivity estimates of the corresponding entries, + * produce a selectivity estimate from the SelectivityTree, and use that to estimate the cardinality + * of the requirements, given 'inputCE'. Leaf node selectivities are estimated via the provided + * EstimatePartialSchemaEntrySelFn. Perf-only partial schema entries are not included in the + * estimate. Return 'inputCE' if there are no entries to estimate. + */ +class PartialSchemaRequirementsCardinalityEstimator { +public: + PartialSchemaRequirementsCardinalityEstimator( + const EstimatePartialSchemaEntrySelFn& estimatePartialSchemEntryFn, CEType inputCE); + + void transport(const PSRExpr::Atom& atom); + + void prepare(const PSRExpr::Conjunction& node); + void transport(const PSRExpr::Conjunction& node, const PSRExpr::NodeVector&); + + void prepare(const PSRExpr::Disjunction& node); + void transport(const PSRExpr::Disjunction& node, const PSRExpr::NodeVector&); + + CEType estimateCE(const PSRExpr::Node& n); + +private: + const EstimatePartialSchemaEntrySelFn& _estimatePartialSchemEntryFn; + const CEType _inputCE; + SelectivityTreeBuilder _selTreeBuilder; +}; + +/** + * Function to estimate one IntervalRequirement within a IntervalReqExpr. The estimate should be + * incorporated into the builder. + */ +using EstimateIntervalSelFn = + std::function; + +/** + * Given a Boolean tree of intervals build a SelectivityTree with the same structure, such that the + * leaf nodes of this tree contain selectivity estimates of the corresponding intervals. Leaf node + * selectivities are estimated via the provided EstimateIntervalSelFn. + */ +class IntervalSelectivityTreeBuilder { +public: + IntervalSelectivityTreeBuilder(SelectivityTreeBuilder& selTreeBuilder, + const EstimateIntervalSelFn& estimateIntervalSelFn); + + void transport(const IntervalReqExpr::Atom& node); + + void prepare(const IntervalReqExpr::Conjunction& node); + + void transport(const IntervalReqExpr::Conjunction& node, + const IntervalReqExpr::NodeVector& /* children */); + + void prepare(const IntervalReqExpr::Disjunction& node); + void transport(const IntervalReqExpr::Disjunction& node, + const IntervalReqExpr::NodeVector& /* children */); + + void build(const IntervalReqExpr::Node& intervalTree); + +private: + const EstimateIntervalSelFn& _estimateIntervalSelFn; + SelectivityTreeBuilder& _selTreeBuilder; +}; +} // namespace mongo::optimizer::ce diff --git a/src/mongo/db/query/ce/test_utils.cpp b/src/mongo/db/query/ce/test_utils.cpp index cd1e9939a5475..bd4161054f437 100644 --- a/src/mongo/db/query/ce/test_utils.cpp +++ b/src/mongo/db/query/ce/test_utils.cpp @@ -29,15 +29,33 @@ #include "mongo/db/query/ce/test_utils.h" +#include +#include +#include +#include +#include +#include + +#include +#include + #include "mongo/db/exec/docval_to_sbeval.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/pipeline/abt/utils.h" +#include "mongo/db/query/optimizer/cascades/memo.h" #include "mongo/db/query/optimizer/explain.h" #include "mongo/db/query/optimizer/metadata_factory.h" #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" +#include "mongo/db/query/optimizer/utils/const_fold_interface.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/unit_test_pipeline_utils.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/stats/value_utils.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/assert_util_core.h" namespace mongo::optimizer::ce { namespace value = sbe::value; @@ -111,7 +129,7 @@ CEType CETester::getCE(ABT& abt, std::function nodePredicate) return card; } - CEType outCard = kInvalidCardinality; + boost::optional outCard; for (size_t groupId = 0; groupId < memo.getGroupCount(); groupId++) { // We only want to return the cardinality for the memo group matching the 'nodePredicate'. if (const auto& node = memo.getLogicalNodes(groupId).front(); nodePredicate(node)) { @@ -121,13 +139,13 @@ CEType CETester::getCE(ABT& abt, std::function nodePredicate) } } - ASSERT_NOT_EQUALS(outCard, kInvalidCardinality); + ASSERT_TRUE(outCard.has_value()); if constexpr (kCETestLogOnly) { - std::cout << "CE: " << outCard << std::endl; + std::cout << "CE: " << *outCard << std::endl; } - return outCard; + return *outCard; } void CETester::optimize(OptPhaseManager& phaseManager, ABT& abt) const { @@ -152,7 +170,7 @@ void CETester::setIndexes(opt::unordered_map index } void CETester::addCollection(std::string collName, - CEType numRecords, + boost::optional numRecords, opt::unordered_map indexes) { _metadata._scanDefs.insert_or_assign(collName, createScanDef({}, diff --git a/src/mongo/db/query/ce/test_utils.h b/src/mongo/db/query/ce/test_utils.h index 3a9c7f9c78e50..1c99b324b6cef 100644 --- a/src/mongo/db/query/ce/test_utils.h +++ b/src/mongo/db/query/ce/test_utils.h @@ -29,9 +29,26 @@ #pragma once +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/query/ce/histogram_predicate_estimation.h" #include "mongo/db/query/optimizer/cascades/interfaces.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/partial_schema_requirements.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/db/query/stats/scalar_histogram.h" namespace mongo::optimizer::ce { @@ -39,7 +56,6 @@ namespace mongo::optimizer::ce { constexpr bool kCETestLogOnly = false; const double kMaxCEError = 0.01; -const CEType kInvalidCardinality{-1.0}; const OptPhaseManager::PhaseSet kDefaultCETestPhaseSet{OptPhase::MemoSubstitutionPhase, OptPhase::MemoExplorationPhase, @@ -182,7 +198,7 @@ class CETester { * Adds a ScanDefinition for an additional collection for the test. */ void addCollection(std::string collName, - CEType numRecords, + boost::optional numRecords, opt::unordered_map indexes = {}); /** diff --git a/src/mongo/db/query/ce_mode_parameter.cpp b/src/mongo/db/query/ce_mode_parameter.cpp index f54831bf626d8..61c857cb87820 100644 --- a/src/mongo/db/query/ce_mode_parameter.cpp +++ b/src/mongo/db/query/ce_mode_parameter.cpp @@ -28,7 +28,10 @@ */ #include "mongo/db/query/ce_mode_parameter.h" -#include "mongo/db/query/query_knobs_gen.h" + +#include + +#include "mongo/base/error_codes.h" namespace mongo::optimizer::ce { Status validateCEMode(const std::string& value, const boost::optional&) { diff --git a/src/mongo/db/query/ce_mode_parameter.h b/src/mongo/db/query/ce_mode_parameter.h index f253c60e5fd6a..a7a93e5b50b83 100644 --- a/src/mongo/db/query/ce_mode_parameter.h +++ b/src/mongo/db/query/ce_mode_parameter.h @@ -31,6 +31,8 @@ #include +#include + #include "mongo/base/status.h" #include "mongo/db/tenant_id.h" diff --git a/src/mongo/db/query/ce_mode_parameter_test.cpp b/src/mongo/db/query/ce_mode_parameter_test.cpp index 011c5f210d3d2..c2e062828cd03 100644 --- a/src/mongo/db/query/ce_mode_parameter_test.cpp +++ b/src/mongo/db/query/ce_mode_parameter_test.cpp @@ -29,7 +29,11 @@ #include "mongo/db/query/ce_mode_parameter.h" -#include "mongo/unittest/unittest.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer::ce { diff --git a/src/mongo/db/query/classic_plan_cache.cpp b/src/mongo/db/query/classic_plan_cache.cpp index 00f189587f95c..2d23de4f5fea2 100644 --- a/src/mongo/db/query/classic_plan_cache.cpp +++ b/src/mongo/db/query/classic_plan_cache.cpp @@ -29,7 +29,21 @@ #include "mongo/db/query/classic_plan_cache.h" +#include + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/basic_types.h" #include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/str.h" namespace mongo { CounterMetric planCacheTotalSizeEstimateBytes("query.planCacheTotalSizeEstimateBytes"); @@ -107,14 +121,14 @@ std::unique_ptr SolutionCacheData::clone() const { std::string SolutionCacheData::toString() const { switch (this->solnType) { case WHOLE_IXSCAN_SOLN: - verify(this->tree.get()); + MONGO_verify(this->tree.get()); return str::stream() << "(whole index scan solution: " << "dir=" << this->wholeIXSolnDir << "; " << "tree=" << this->tree->toString() << ")"; case COLLSCAN_SOLN: return "(collection scan)"; case USE_INDEX_TAGS_SOLN: - verify(this->tree.get()); + MONGO_verify(this->tree.get()); return str::stream() << "(index-tagged expression tree: " << "tree=" << this->tree->toString() << ")"; } diff --git a/src/mongo/db/query/classic_plan_cache.h b/src/mongo/db/query/classic_plan_cache.h index e510b48dd6d63..95670935cb584 100644 --- a/src/mongo/db/query/classic_plan_cache.h +++ b/src/mongo/db/query/classic_plan_cache.h @@ -29,12 +29,24 @@ #pragma once +#include +#include +#include +#include +#include #include +#include +#include + +#include #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/index_entry.h" #include "mongo/db/query/plan_cache.h" +#include "mongo/db/query/plan_cache_debug_info.h" #include "mongo/db/query/plan_cache_key_info.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/container_size_helper.h" namespace mongo { diff --git a/src/mongo/db/query/classic_stage_builder.cpp b/src/mongo/db/query/classic_stage_builder.cpp index f5e0324d89056..0a533ab24bf60 100644 --- a/src/mongo/db/query/classic_stage_builder.cpp +++ b/src/mongo/db/query/classic_stage_builder.cpp @@ -28,21 +28,31 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/classic_stage_builder.h" - #include - +#include +#include + +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +// IWYU pragma: no_include "boost/move/detail/iterator_to_raw_pointer.hpp" +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/client.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/exec/and_hash.h" #include "mongo/db/exec/and_sorted.h" #include "mongo/db/exec/collection_scan.h" +#include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/exec/count_scan.h" #include "mongo/db/exec/distinct_scan.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/eof.h" #include "mongo/db/exec/fetch.h" #include "mongo/db/exec/geo_near.h" @@ -59,10 +69,27 @@ #include "mongo/db/exec/sort_key_generator.h" #include "mongo/db/exec/text_match.h" #include "mongo/db/exec/text_or.h" +#include "mongo/db/fts/fts_query_impl.h" +#include "mongo/db/fts/fts_spec.h" #include "mongo/db/index/fts_access_method.h" -#include "mongo/db/matcher/extensions_callback_real.h" -#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/classic_stage_builder.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -73,6 +100,8 @@ namespace mongo::stage_builder { std::unique_ptr ClassicStageBuilder::build(const QuerySolutionNode* root) { auto* const expCtx = _cq.getExpCtxRaw(); + const auto& collectionPtr = _collection.getCollectionPtr(); + switch (root->getType()) { case STAGE_COLLSCAN: { const CollectionScanNode* csn = static_cast(root); @@ -96,12 +125,12 @@ std::unique_ptr ClassicStageBuilder::build(const QuerySolutionNode* r case STAGE_IXSCAN: { const IndexScanNode* ixn = static_cast(root); - invariant(_collection); - auto descriptor = _collection->getIndexCatalog()->findIndexByName( + invariant(collectionPtr); + auto descriptor = collectionPtr->getIndexCatalog()->findIndexByName( _opCtx, ixn->index.identifier.catalogName); invariant(descriptor, - str::stream() << "Namespace: " << _collection->ns() - << ", CanonicalQuery: " << _cq.toStringShort() + str::stream() << "Namespace: " << collectionPtr->ns().toStringForErrorMsg() + << ", CanonicalQuery: " << _cq.toStringShortForErrorMsg() << ", IndexEntry: " << ixn->index.toString()); // We use the node's internal name, keyPattern and multikey details here. For $** @@ -252,8 +281,8 @@ std::unique_ptr ClassicStageBuilder::build(const QuerySolutionNode* r params.addPointMeta = node->addPointMeta; params.addDistMeta = node->addDistMeta; - invariant(_collection); - const IndexDescriptor* twoDIndex = _collection->getIndexCatalog()->findIndexByName( + invariant(collectionPtr); + const IndexDescriptor* twoDIndex = collectionPtr->getIndexCatalog()->findIndexByName( _opCtx, node->index.identifier.catalogName); invariant(twoDIndex); @@ -269,8 +298,8 @@ std::unique_ptr ClassicStageBuilder::build(const QuerySolutionNode* r params.addPointMeta = node->addPointMeta; params.addDistMeta = node->addDistMeta; - invariant(_collection); - const IndexDescriptor* s2Index = _collection->getIndexCatalog()->findIndexByName( + invariant(collectionPtr); + const IndexDescriptor* s2Index = collectionPtr->getIndexCatalog()->findIndexByName( _opCtx, node->index.identifier.catalogName); invariant(s2Index); @@ -292,8 +321,8 @@ std::unique_ptr ClassicStageBuilder::build(const QuerySolutionNode* r } case STAGE_TEXT_MATCH: { auto node = static_cast(root); - tassert(5432200, "collection object is not provided", _collection); - auto catalog = _collection->getIndexCatalog(); + tassert(5432200, "collection object is not provided", collectionPtr); + auto catalog = collectionPtr->getIndexCatalog(); tassert(5432201, "index catalog is unavailable", catalog); auto desc = catalog->findIndexByName(_opCtx, node->index.identifier.catalogName); tassert(5432202, @@ -324,20 +353,19 @@ std::unique_ptr ClassicStageBuilder::build(const QuerySolutionNode* r const ShardingFilterNode* fn = static_cast(root); auto childStage = build(fn->children[0].get()); - auto scopedCss = CollectionShardingState::assertCollectionLockedAndAcquire( - _opCtx, _collection->ns()); + auto shardFilterer = _collection.getShardingFilter(_opCtx); + invariant(shardFilterer, + "Attempting to use shard filter when there's no shard filter available for " + "the collection"); + return std::make_unique( - expCtx, - scopedCss->getOwnershipFilter( - _opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup), - _ws, - std::move(childStage)); + expCtx, std::move(*shardFilterer), _ws, std::move(childStage)); } case STAGE_DISTINCT_SCAN: { const DistinctNode* dn = static_cast(root); - invariant(_collection); - auto descriptor = _collection->getIndexCatalog()->findIndexByName( + invariant(collectionPtr); + auto descriptor = collectionPtr->getIndexCatalog()->findIndexByName( _opCtx, dn->index.identifier.catalogName); invariant(descriptor); @@ -357,8 +385,8 @@ std::unique_ptr ClassicStageBuilder::build(const QuerySolutionNode* r case STAGE_COUNT_SCAN: { const CountScanNode* csn = static_cast(root); - invariant(_collection); - auto descriptor = _collection->getIndexCatalog()->findIndexByName( + invariant(collectionPtr); + auto descriptor = collectionPtr->getIndexCatalog()->findIndexByName( _opCtx, csn->index.identifier.catalogName); invariant(descriptor); @@ -427,7 +455,8 @@ std::unique_ptr ClassicStageBuilder::build(const QuerySolutionNode* r case STAGE_SPOOL: case STAGE_SENTINEL: case STAGE_COLUMN_SCAN: - case STAGE_UPDATE: { + case STAGE_UPDATE: + case STAGE_SEARCH: { LOGV2_WARNING(4615604, "Can't build exec tree for node", "node"_attr = *root); } } diff --git a/src/mongo/db/query/classic_stage_builder.h b/src/mongo/db/query/classic_stage_builder.h index d761cedfca5af..68a7becfe2bb4 100644 --- a/src/mongo/db/query/classic_stage_builder.h +++ b/src/mongo/db/query/classic_stage_builder.h @@ -29,26 +29,38 @@ #pragma once +#include +#include + +#include + #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/query_solution.h" #include "mongo/db/query/stage_builder.h" namespace mongo::stage_builder { /** * A stage builder which builds an executable tree using classic PlanStages. */ -class ClassicStageBuilder : public StageBuilder { +class ClassicStageBuilder : public StageBuilder> { public: + using PlanType = std::unique_ptr; + ClassicStageBuilder(OperationContext* opCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const CanonicalQuery& cq, const QuerySolution& solution, WorkingSet* ws) - : StageBuilder{opCtx, cq, solution}, _collection(collection), _ws{ws} {} + : StageBuilder{opCtx, cq, solution}, _collection(collection), _ws{ws} {} - std::unique_ptr build(const QuerySolutionNode* root) final; + PlanType build(const QuerySolutionNode* root) final; private: - const CollectionPtr& _collection; + VariantCollectionPtrOrAcquisition _collection; WorkingSet* _ws; boost::optional _ftsKeyPrefixSize; diff --git a/src/mongo/db/query/classic_stage_builder_test.cpp b/src/mongo/db/query/classic_stage_builder_test.cpp index 0df26b56b0240..7485a07b94241 100644 --- a/src/mongo/db/query/classic_stage_builder_test.cpp +++ b/src/mongo/db/query/classic_stage_builder_test.cpp @@ -27,16 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/classic_stage_builder.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -76,7 +91,7 @@ class ClassicStageBuilderTest : public ServiceContextMongoDTest { ASSERT_OK(statusWithCQ.getStatus()); stage_builder::ClassicStageBuilder builder{ - opCtx(), CollectionPtr::null, *statusWithCQ.getValue(), *querySolution, workingSet()}; + opCtx(), &CollectionPtr::null, *statusWithCQ.getValue(), *querySolution, workingSet()}; return builder.build(querySolution->root()); } diff --git a/src/mongo/db/query/collation/SConscript b/src/mongo/db/query/collation/SConscript index 98acc2f2a5728..f3746bceb0dce 100644 --- a/src/mongo/db/query/collation/SConscript +++ b/src/mongo/db/query/collation/SConscript @@ -27,13 +27,13 @@ env.Library( ) env.Library( - target="collator_factory_interface", + target='collator_factory_interface', source=[ - "collator_factory_interface.cpp", + 'collator_factory_interface.cpp', ], LIBDEPS=[ - "$BUILD_DIR/mongo/db/service_context", - "collator_interface", + '$BUILD_DIR/mongo/db/service_context', + 'collator_interface', ], ) diff --git a/src/mongo/db/query/collation/collation_bson_comparison_test.cpp b/src/mongo/db/query/collation/collation_bson_comparison_test.cpp index 8fe135a1a0cc6..bc221b534f648 100644 --- a/src/mongo/db/query/collation/collation_bson_comparison_test.cpp +++ b/src/mongo/db/query/collation/collation_bson_comparison_test.cpp @@ -27,16 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelement_comparator.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/simple_bsonelement_comparator.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/collation/collation_index_key.cpp b/src/mongo/db/query/collation/collation_index_key.cpp index 44b647044ca0a..de3ca3e7e88e3 100644 --- a/src/mongo/db/query/collation/collation_index_key.cpp +++ b/src/mongo/db/query/collation/collation_index_key.cpp @@ -27,18 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collation/collation_index_key.h" - -#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/builder.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/query/collation/collation_index_key.h" #include "mongo/db/query/collation/collator_interface.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/query/collation/collation_index_key.h b/src/mongo/db/query/collation/collation_index_key.h index e9c1dc73121aa..8493c60b182e9 100644 --- a/src/mongo/db/query/collation/collation_index_key.h +++ b/src/mongo/db/query/collation/collation_index_key.h @@ -35,6 +35,7 @@ namespace mongo { class BSONElement; class BSONObjBuilder; + class CollatorInterface; /** diff --git a/src/mongo/db/query/collation/collation_index_key_test.cpp b/src/mongo/db/query/collation/collation_index_key_test.cpp index 1f184778fe2f9..a73a35a30ee09 100644 --- a/src/mongo/db/query/collation/collation_index_key_test.cpp +++ b/src/mongo/db/query/collation/collation_index_key_test.cpp @@ -27,15 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collation/collation_index_key.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/json.h" +#include "mongo/db/query/collation/collation_index_key.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/storage/key_string.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace { @@ -44,18 +53,18 @@ using namespace mongo; void assertKeyStringCollatorOutput(const CollatorInterfaceMock& collator, const BSONObj& dataObj, const BSONObj& expected) { - KeyString::Builder ks(KeyString::Version::kLatestVersion, KeyString::ALL_ASCENDING); + key_string::Builder ks(key_string::Version::kLatestVersion, key_string::ALL_ASCENDING); ks.appendBSONElement(dataObj.firstElement(), [&](StringData stringData) { return collator.getComparisonString(stringData); }); - ASSERT_EQ( - ks.getValueCopy(), - KeyString::Builder(KeyString::Version::kLatestVersion, expected, KeyString::ALL_ASCENDING)); + ASSERT_EQ(ks.getValueCopy(), + key_string::Builder( + key_string::Version::kLatestVersion, expected, key_string::ALL_ASCENDING)); } void assertKeyStringCollatorThrows(const CollatorInterfaceMock& collator, const BSONObj& dataObj) { - KeyString::Builder ks(KeyString::Version::kLatestVersion, KeyString::ALL_ASCENDING); + key_string::Builder ks(key_string::Version::kLatestVersion, key_string::ALL_ASCENDING); ASSERT_THROWS_CODE(ks.appendBSONElement(dataObj.firstElement(), [&](StringData stringData) { return collator.getComparisonString(stringData); diff --git a/src/mongo/db/query/collation/collator_factory_icu.cpp b/src/mongo/db/query/collation/collator_factory_icu.cpp index c0451909db044..102bf58f63de2 100644 --- a/src/mongo/db/query/collation/collator_factory_icu.cpp +++ b/src/mongo/db/query/collation/collator_factory_icu.cpp @@ -27,20 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collation/collator_factory_icu.h" - #include - +#include #include #include #include #include - +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/query/collation/collation_spec.h" +#include "mongo/db/query/collation/collator_factory_icu.h" #include "mongo/db/query/collation/collator_interface_icu.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/query/collation/collator_factory_icu.h b/src/mongo/db/query/collation/collator_factory_icu.h index 2f392c3dd1a9c..4288d82a84933 100644 --- a/src/mongo/db/query/collation/collator_factory_icu.h +++ b/src/mongo/db/query/collation/collator_factory_icu.h @@ -29,7 +29,12 @@ #pragma once +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" namespace mongo { diff --git a/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp b/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp index 6ca6ae52ee6d0..77f4a1a660c22 100644 --- a/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp +++ b/src/mongo/db/query/collation/collator_factory_icu_decoration.cpp @@ -27,12 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/db/query/collation/collator_factory_icu.h" +#include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/query/collation/collator_factory_icu_locales_test.cpp b/src/mongo/db/query/collation/collator_factory_icu_locales_test.cpp index 86a36f1dc8c56..e60f2f5ec6912 100644 --- a/src/mongo/db/query/collation/collator_factory_icu_locales_test.cpp +++ b/src/mongo/db/query/collation/collator_factory_icu_locales_test.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collation/collator_factory_icu.h" - +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/collation/collator_factory_icu.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/collation/collator_factory_icu_test.cpp b/src/mongo/db/query/collation/collator_factory_icu_test.cpp index c736c49a2ff41..1cd796c632016 100644 --- a/src/mongo/db/query/collation/collator_factory_icu_test.cpp +++ b/src/mongo/db/query/collation/collator_factory_icu_test.cpp @@ -27,15 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collation/collator_factory_icu.h" - #include -#include "mongo/base/init.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/query/collation/collator_factory_icu.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/str.h" namespace { diff --git a/src/mongo/db/query/collation/collator_factory_interface.cpp b/src/mongo/db/query/collation/collator_factory_interface.cpp index c5026619b679b..c78469bc75294 100644 --- a/src/mongo/db/query/collation/collator_factory_interface.cpp +++ b/src/mongo/db/query/collation/collator_factory_interface.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/query/collation/collator_factory_interface.h" +#include + +#include + #include "mongo/db/service_context.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/query/collation/collator_factory_interface.h b/src/mongo/db/query/collation/collator_factory_interface.h index c13b8c8fbe5eb..5fb2f96746542 100644 --- a/src/mongo/db/query/collation/collator_factory_interface.h +++ b/src/mongo/db/query/collation/collator_factory_interface.h @@ -29,12 +29,17 @@ #pragma once +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/cluster_auth_mode.h" #include "mongo/db/query/collation/collator_interface.h" namespace mongo { class BSONObj; class ServiceContext; + template class StatusWith; diff --git a/src/mongo/db/query/collation/collator_factory_mock.cpp b/src/mongo/db/query/collation/collator_factory_mock.cpp index cc82e36522a2a..a9acbbfeed48d 100644 --- a/src/mongo/db/query/collation/collator_factory_mock.cpp +++ b/src/mongo/db/query/collation/collator_factory_mock.cpp @@ -27,16 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collation/collator_factory_mock.h" - #include +#include + +#include #include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/query/collation/collation_spec.h" +#include "mongo/db/query/collation/collator_factory_mock.h" #include "mongo/db/query/collation/collator_interface_mock.h" namespace mongo { diff --git a/src/mongo/db/query/collation/collator_factory_mock.h b/src/mongo/db/query/collation/collator_factory_mock.h index d44124f74566a..6717d53ba9e45 100644 --- a/src/mongo/db/query/collation/collator_factory_mock.h +++ b/src/mongo/db/query/collation/collator_factory_mock.h @@ -29,7 +29,12 @@ #pragma once +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" namespace mongo { diff --git a/src/mongo/db/query/collation/collator_factory_mock_test.cpp b/src/mongo/db/query/collation/collator_factory_mock_test.cpp index 5c64a87dbed34..07f42da9fa7c0 100644 --- a/src/mongo/db/query/collation/collator_factory_mock_test.cpp +++ b/src/mongo/db/query/collation/collator_factory_mock_test.cpp @@ -27,12 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/query/collation/collator_factory_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/query/collation/collator_interface.cpp b/src/mongo/db/query/collation/collator_interface.cpp index ae2dedbcde9f2..0b685c84f72ae 100644 --- a/src/mongo/db/query/collation/collator_interface.cpp +++ b/src/mongo/db/query/collation/collator_interface.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/query/collation/collator_interface.h" #include "mongo/base/simple_string_data_comparator.h" diff --git a/src/mongo/db/query/collation/collator_interface.h b/src/mongo/db/query/collation/collator_interface.h index f5aae85e1471a..3180e10ceb2ca 100644 --- a/src/mongo/db/query/collation/collator_interface.h +++ b/src/mongo/db/query/collation/collator_interface.h @@ -29,11 +29,15 @@ #pragma once +#include +#include #include +#include #include "mongo/base/string_data.h" #include "mongo/base/string_data_comparator_interface.h" #include "mongo/bson/bsonobj_comparator_interface.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/query/collation/collation_spec.h" namespace mongo { @@ -93,6 +97,7 @@ class CollatorInterface : public StringData::ComparatorInterface { virtual ~CollatorInterface() {} virtual std::unique_ptr clone() const = 0; + virtual std::shared_ptr cloneShared() const = 0; /** * Returns a number < 0 if 'left' is less than 'right' with respect to the collation, a number > diff --git a/src/mongo/db/query/collation/collator_interface_icu.cpp b/src/mongo/db/query/collation/collator_interface_icu.cpp index 6dceb89b1401b..551bfc2805acb 100644 --- a/src/mongo/db/query/collation/collator_interface_icu.cpp +++ b/src/mongo/db/query/collation/collator_interface_icu.cpp @@ -27,15 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collation/collator_interface_icu.h" - +#include #include - +#include #include #include +#include + +#include +#include +#include +#include +#include +#include "mongo/db/query/collation/collator_interface_icu.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -44,9 +49,13 @@ CollatorInterfaceICU::CollatorInterfaceICU(Collation spec, std::unique_ptr CollatorInterfaceICU::clone() const { - auto clone = std::make_unique( + return std::make_unique( + getSpec(), std::unique_ptr(_collator->clone())); +} + +std::shared_ptr CollatorInterfaceICU::cloneShared() const { + return std::make_shared( getSpec(), std::unique_ptr(_collator->clone())); - return {std::move(clone)}; } int CollatorInterfaceICU::compare(StringData left, StringData right) const { diff --git a/src/mongo/db/query/collation/collator_interface_icu.h b/src/mongo/db/query/collation/collator_interface_icu.h index d33a0b35392b0..bb9f32e97a1c7 100644 --- a/src/mongo/db/query/collation/collator_interface_icu.h +++ b/src/mongo/db/query/collation/collator_interface_icu.h @@ -29,10 +29,12 @@ #pragma once -#include "mongo/db/query/collation/collator_interface.h" - #include +#include "mongo/base/string_data.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/query/collation/collator_interface.h" + namespace icu { class Collator; } // namespace icu @@ -48,6 +50,7 @@ class CollatorInterfaceICU final : public CollatorInterface { CollatorInterfaceICU(Collation spec, std::unique_ptr collator); std::unique_ptr clone() const final; + std::shared_ptr cloneShared() const final; int compare(StringData left, StringData right) const final; diff --git a/src/mongo/db/query/collation/collator_interface_icu_test.cpp b/src/mongo/db/query/collation/collator_interface_icu_test.cpp index c6bb0f4ad9950..41fc046a61a37 100644 --- a/src/mongo/db/query/collation/collator_interface_icu_test.cpp +++ b/src/mongo/db/query/collation/collator_interface_icu_test.cpp @@ -27,15 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collation/collator_interface_icu.h" - -#include -#include +#include #include +#include -#include "mongo/unittest/unittest.h" +#include +#include + +#include "mongo/db/query/collation/collator_interface_icu.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace { diff --git a/src/mongo/db/query/collation/collator_interface_mock.cpp b/src/mongo/db/query/collation/collator_interface_mock.cpp index 3da2637c9272b..2d328efb0109f 100644 --- a/src/mongo/db/query/collation/collator_interface_mock.cpp +++ b/src/mongo/db/query/collation/collator_interface_mock.cpp @@ -27,14 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collation/collator_interface_mock.h" - #include #include #include +#include + +#include +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" @@ -70,8 +71,11 @@ CollatorInterfaceMock::CollatorInterfaceMock(MockType mockType) _mockType(mockType) {} std::unique_ptr CollatorInterfaceMock::clone() const { - auto clone = std::make_unique(_mockType); - return {std::move(clone)}; + return std::make_unique(_mockType); +} + +std::shared_ptr CollatorInterfaceMock::cloneShared() const { + return std::make_shared(_mockType); } int CollatorInterfaceMock::compare(StringData left, StringData right) const { diff --git a/src/mongo/db/query/collation/collator_interface_mock.h b/src/mongo/db/query/collation/collator_interface_mock.h index c09c054185ebd..852dcb6a6782c 100644 --- a/src/mongo/db/query/collation/collator_interface_mock.h +++ b/src/mongo/db/query/collation/collator_interface_mock.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/base/string_data.h" #include "mongo/db/query/collation/collator_interface.h" namespace mongo { @@ -63,6 +66,7 @@ class CollatorInterfaceMock final : public CollatorInterface { CollatorInterfaceMock(MockType mockType); std::unique_ptr clone() const final; + std::shared_ptr cloneShared() const final; int compare(StringData left, StringData right) const final; diff --git a/src/mongo/db/query/collation/collator_interface_mock_test.cpp b/src/mongo/db/query/collation/collator_interface_mock_test.cpp index 9e5994f4f3069..88a089332b142 100644 --- a/src/mongo/db/query/collation/collator_interface_mock_test.cpp +++ b/src/mongo/db/query/collation/collator_interface_mock_test.cpp @@ -27,17 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collation/collator_interface_mock.h" +#include +#include +#include +#include +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelement_comparator.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/bson/simple_bsonelement_comparator.h" #include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/query/collection_index_usage_tracker_decoration.cpp b/src/mongo/db/query/collection_index_usage_tracker_decoration.cpp index ec32e742bbd9e..acbdaead0254b 100644 --- a/src/mongo/db/query/collection_index_usage_tracker_decoration.cpp +++ b/src/mongo/db/query/collection_index_usage_tracker_decoration.cpp @@ -28,12 +28,13 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collection_index_usage_tracker_decoration.h" +#include +#include "mongo/db/aggregated_index_usage_tracker.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/query/collection_index_usage_tracker_decoration.h" #include "mongo/db/service_context.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/query/collection_query_info.cpp b/src/mongo/db/query/collection_query_info.cpp index d3af887f9298a..514ced818a0a1 100644 --- a/src/mongo/db/query/collection_query_info.cpp +++ b/src/mongo/db/query/collection_query_info.cpp @@ -27,37 +27,52 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - #include "mongo/db/query/collection_query_info.h" -#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/aggregated_index_usage_tracker.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/collection_index_usage_tracker.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/curop_metrics.h" +#include "mongo/db/exec/index_path_projection.h" #include "mongo/db/exec/projection_executor.h" -#include "mongo/db/exec/projection_executor_utils.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/field_ref.h" #include "mongo/db/fts/fts_spec.h" #include "mongo/db/index/columns_access_method.h" +#include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/wildcard_access_method.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/transformer_interface.h" #include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/collection_index_usage_tracker_decoration.h" -#include "mongo/db/query/get_executor.h" +#include "mongo/db/query/index_entry.h" #include "mongo/db/query/planner_ixselect.h" -#include "mongo/db/service_context.h" +#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/logv2/log.h" -#include "mongo/util/clock_source.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage - namespace mongo { - namespace { CoreIndexInfo indexInfoFromIndexCatalogEntry(const IndexCatalogEntry& ice) { diff --git a/src/mongo/db/query/collection_query_info.h b/src/mongo/db/query/collection_query_info.h index 2b621d69a2ae4..5b270de251d5d 100644 --- a/src/mongo/db/query/collection_query_info.h +++ b/src/mongo/db/query/collection_query_info.h @@ -29,12 +29,22 @@ #pragma once +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include +#include + #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/plan_cache_indexability.h" #include "mongo/db/query/plan_cache_invalidator.h" #include "mongo/db/query/plan_summary_stats.h" #include "mongo/db/update_index_data.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/query/collection_query_info_test.cpp b/src/mongo/db/query/collection_query_info_test.cpp index 0bf1bef1093b4..4d319dda48e0d 100644 --- a/src/mongo/db/query/collection_query_info_test.cpp +++ b/src/mongo/db/query/collection_query_info_test.cpp @@ -27,11 +27,38 @@ * it in the license file. */ +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/ordering.h" +#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/wildcard_access_method.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collection_query_info.h" #include "mongo/db/storage/devnull/devnull_kv_engine.h" +#include "mongo/db/storage/ident.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -130,15 +157,7 @@ class IndexCatalogEntryMock : public IndexCatalogEntry { MONGO_UNIMPLEMENTED; } - bool isReady(OperationContext* opCtx) const override { - MONGO_UNIMPLEMENTED; - } - - bool isPresentInMySnapshot(OperationContext* opCtx) const override { - MONGO_UNIMPLEMENTED; - } - - bool isReadyInMySnapshot(OperationContext* opCtx) const override { + bool isReady() const override { MONGO_UNIMPLEMENTED; } @@ -150,14 +169,6 @@ class IndexCatalogEntryMock : public IndexCatalogEntry { MONGO_UNIMPLEMENTED; } - boost::optional getMinimumVisibleSnapshot() const override { - MONGO_UNIMPLEMENTED; - } - - void setMinimumVisibleSnapshot(Timestamp name) override { - MONGO_UNIMPLEMENTED; - } - const UpdateIndexData& getIndexedPaths() const override { MONGO_UNIMPLEMENTED; } diff --git a/src/mongo/db/query/cost_model/cost_estimator_impl.cpp b/src/mongo/db/query/cost_model/cost_estimator_impl.cpp index 6daf1d972d7f4..e9999af452a8e 100644 --- a/src/mongo/db/query/cost_model/cost_estimator_impl.cpp +++ b/src/mongo/db/query/cost_model/cost_estimator_impl.cpp @@ -29,8 +29,20 @@ #include "mongo/db/query/cost_model/cost_estimator_impl.h" +#include +#include +#include + +#include + +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/expr.h" #include "mongo/db/query/optimizer/utils/path_utils.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/util/assert_util.h" namespace mongo::cost_model { @@ -46,7 +58,6 @@ struct CostAndCEInternal { uassert(7034001, "Invalid cardinality", std::isfinite(ce._value) && ce >= 0.0); } - // TODO: SERVER-71799: Cost model manager to deliver costs via a strong type. double _cost; CEType _ce; }; diff --git a/src/mongo/db/query/cost_model/cost_estimator_impl.h b/src/mongo/db/query/cost_model/cost_estimator_impl.h index 0ed094c02a9ca..2bc05377fe625 100644 --- a/src/mongo/db/query/cost_model/cost_estimator_impl.h +++ b/src/mongo/db/query/cost_model/cost_estimator_impl.h @@ -29,9 +29,16 @@ #pragma once +#include + #include "mongo/db/query/cost_model/cost_model_gen.h" #include "mongo/db/query/optimizer/cascades/interfaces.h" #include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node_defs.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::cost_model { /** diff --git a/src/mongo/db/query/cost_model/cost_estimator_test.cpp b/src/mongo/db/query/cost_model/cost_estimator_test.cpp index 4ea8331075ae5..f11c5ca69fe96 100644 --- a/src/mongo/db/query/cost_model/cost_estimator_test.cpp +++ b/src/mongo/db/query/cost_model/cost_estimator_test.cpp @@ -27,11 +27,30 @@ * it in the license file. */ +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/query/cost_model/cost_estimator_impl.h" #include "mongo/db/query/cost_model/cost_model_gen.h" #include "mongo/db/query/cost_model/cost_model_utils.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/optimizer/cascades/memo.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/node_defs.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::cost_model { diff --git a/src/mongo/db/query/cost_model/cost_model_manager.cpp b/src/mongo/db/query/cost_model/cost_model_manager.cpp index 55cda3bc20602..9b1caf53b4b0b 100644 --- a/src/mongo/db/query/cost_model/cost_model_manager.cpp +++ b/src/mongo/db/query/cost_model/cost_model_manager.cpp @@ -29,7 +29,9 @@ #include "mongo/db/query/cost_model/cost_model_manager.h" -#include "mongo/db/query/cost_model/cost_model_on_update.h" +#include + +#include "mongo/idl/idl_parser.h" namespace mongo::cost_model { namespace { diff --git a/src/mongo/db/query/cost_model/cost_model_manager.h b/src/mongo/db/query/cost_model/cost_model_manager.h index bed1d11922b53..52f56ebd6f399 100644 --- a/src/mongo/db/query/cost_model/cost_model_manager.h +++ b/src/mongo/db/query/cost_model/cost_model_manager.h @@ -31,6 +31,7 @@ #include +#include "mongo/bson/bsonobj.h" #include "mongo/db/query/cost_model/cost_model_gen.h" #include "mongo/stdx/mutex.h" diff --git a/src/mongo/db/query/cost_model/cost_model_on_update.cpp b/src/mongo/db/query/cost_model/cost_model_on_update.cpp index c64a9385b8a19..2f21f9af3f81c 100644 --- a/src/mongo/db/query/cost_model/cost_model_on_update.cpp +++ b/src/mongo/db/query/cost_model/cost_model_on_update.cpp @@ -29,11 +29,18 @@ #include "mongo/db/query/cost_model/cost_model_on_update.h" +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/json.h" #include "mongo/db/client.h" -#include "mongo/db/query/cost_model/cost_model_manager.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/query/cost_model/cost_model_on_update.h b/src/mongo/db/query/cost_model/cost_model_on_update.h index 89cf95443abe1..b3188f221cccf 100644 --- a/src/mongo/db/query/cost_model/cost_model_on_update.h +++ b/src/mongo/db/query/cost_model/cost_model_on_update.h @@ -29,12 +29,14 @@ #pragma once +#include #include #include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/query/sbe_plan_cache_on_parameter_change.h" #include "mongo/db/service_context.h" +#include "mongo/util/decorable.h" namespace mongo::cost_model { diff --git a/src/mongo/db/query/cost_model/cost_model_test.cpp b/src/mongo/db/query/cost_model/cost_model_test.cpp index b8fd107fa56c3..86fe3fddfd4e2 100644 --- a/src/mongo/db/query/cost_model/cost_model_test.cpp +++ b/src/mongo/db/query/cost_model/cost_model_test.cpp @@ -27,14 +27,30 @@ * it in the license file. */ -#include "mongo/db/query/cost_model/cost_model_manager.h" +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/query/cost_model/cost_model_gen.h" #include "mongo/db/query/cost_model/cost_model_utils.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/metadata_factory.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/printable_enum.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::cost_model { diff --git a/src/mongo/db/query/cost_model/on_coefficients_change_updater_impl.cpp b/src/mongo/db/query/cost_model/on_coefficients_change_updater_impl.cpp index aa8b430fc78c7..c70184106a457 100644 --- a/src/mongo/db/query/cost_model/on_coefficients_change_updater_impl.cpp +++ b/src/mongo/db/query/cost_model/on_coefficients_change_updater_impl.cpp @@ -29,6 +29,10 @@ #include "mongo/db/query/cost_model/on_coefficients_change_updater_impl.h" +#include +#include +#include + #include "mongo/bson/json.h" #include "mongo/db/query/query_knobs_gen.h" diff --git a/src/mongo/db/query/cost_model/on_coefficients_change_updater_impl.h b/src/mongo/db/query/cost_model/on_coefficients_change_updater_impl.h index f49a5c73bc74d..f3737144f2216 100644 --- a/src/mongo/db/query/cost_model/on_coefficients_change_updater_impl.h +++ b/src/mongo/db/query/cost_model/on_coefficients_change_updater_impl.h @@ -29,8 +29,11 @@ #pragma once +#include "mongo/bson/bsonobj.h" #include "mongo/db/query/cost_model/cost_model_manager.h" #include "mongo/db/query/cost_model/cost_model_on_update.h" +#include "mongo/db/service_context.h" +#include "mongo/util/decorable.h" namespace mongo::cost_model { diff --git a/src/mongo/db/query/count_command_as_aggregation_command.cpp b/src/mongo/db/query/count_command_as_aggregation_command.cpp index aa3d761626b31..96ece4e365572 100644 --- a/src/mongo/db/query/count_command_as_aggregation_command.cpp +++ b/src/mongo/db/query/count_command_as_aggregation_command.cpp @@ -27,12 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/query/count_command_as_aggregation_command.h" - #include "mongo/db/query/query_request_helper.h" -#include "mongo/util/str.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/count_command_as_aggregation_command.h b/src/mongo/db/query/count_command_as_aggregation_command.h index 052332787655e..37508840766ad 100644 --- a/src/mongo/db/query/count_command_as_aggregation_command.h +++ b/src/mongo/db/query/count_command_as_aggregation_command.h @@ -29,6 +29,8 @@ #pragma once +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/query/count_command_gen.h" diff --git a/src/mongo/db/query/count_command_test.cpp b/src/mongo/db/query/count_command_test.cpp index 2414e1330cee6..7970855660e41 100644 --- a/src/mongo/db/query/count_command_test.cpp +++ b/src/mongo/db/query/count_command_test.cpp @@ -27,21 +27,40 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include - +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/query/count_command_as_aggregation_command.h" #include "mongo/db/query/count_command_gen.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/str.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { -static const NamespaceString testns("TestDB.TestColl"); +static const NamespaceString testns = + NamespaceString::createNamespaceString_forTest("TestDB.TestColl"); const IDLParserContext ctxt("count"); @@ -175,7 +194,7 @@ TEST(CountCommandTest, ConvertToAggregationWithHint) { << "hint" << BSON("x" << 1)); auto countCmd = CountCommandRequest::parse(ctxt, commandObj); auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns)); - auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db(), agg).body; + auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg).body; auto ar = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests(testns, cmdObj)); ASSERT_BSONOBJ_EQ(ar.getHint().value_or(BSONObj()), BSON("x" << 1)); @@ -197,7 +216,7 @@ TEST(CountCommandTest, ConvertToAggregationWithQueryAndFilterAndLimit) { << "limit" << 200 << "skip" << 300 << "query" << BSON("x" << 7)); auto countCmd = CountCommandRequest::parse(ctxt, commandObj); auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns)); - auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db(), agg).body; + auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg).body; auto ar = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests(testns, cmdObj)); ASSERT_EQ(ar.getCursor().getBatchSize().value_or(aggregation_request_helper::kDefaultBatchSize), @@ -223,7 +242,7 @@ TEST(CountCommandTest, ConvertToAggregationWithMaxTimeMS) { << "maxTimeMS" << 100 << "$db" << "TestDB")); auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns)); - auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db(), agg).body; + auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg).body; auto ar = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests(testns, cmdObj)); ASSERT_EQ(ar.getMaxTimeMS().value_or(0), 100u); @@ -246,7 +265,7 @@ TEST(CountCommandTest, ConvertToAggregationWithQueryOptions) { countCmd.setQueryOptions(BSON("readPreference" << "secondary")); auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns)); - auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db(), agg).body; + auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg).body; auto ar = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests(testns, cmdObj)); ASSERT_BSONOBJ_EQ(ar.getUnwrappedReadPref().value_or(BSONObj()), @@ -271,7 +290,7 @@ TEST(CountCommandTest, ConvertToAggregationWithReadConcern) { countCmd.setReadConcern(BSON("level" << "linearizable")); auto agg = uassertStatusOK(countCommandAsAggregationCommand(countCmd, testns)); - auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db(), agg).body; + auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg).body; auto ar = uassertStatusOK(aggregation_request_helper::parseFromBSONForTests(testns, cmdObj)); ASSERT_BSONOBJ_EQ(ar.getReadConcern().value_or(BSONObj()), diff --git a/src/mongo/db/query/count_request.cpp b/src/mongo/db/query/count_request.cpp index 50788947ec947..d5d60162046df 100644 --- a/src/mongo/db/query/count_request.cpp +++ b/src/mongo/db/query/count_request.cpp @@ -27,13 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" #include "mongo/db/query/count_request.h" - -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/query/query_request_helper.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace count_request { diff --git a/src/mongo/db/query/cqf_command_utils.cpp b/src/mongo/db/query/cqf_command_utils.cpp index a6279c4340090..47fb70d53293b 100644 --- a/src/mongo/db/query/cqf_command_utils.cpp +++ b/src/mongo/db/query/cqf_command_utils.cpp @@ -29,11 +29,30 @@ #include "mongo/db/query/cqf_command_utils.h" +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/exec/add_fields_projection_executor.h" #include "mongo/db/exec/exclusion_projection_executor.h" #include "mongo/db/exec/inclusion_projection_executor.h" +#include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" -#include "mongo/db/exec/sbe/abt/abt_lower.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/expression_array.h" #include "mongo/db/matcher/expression_expr.h" @@ -42,6 +61,7 @@ #include "mongo/db/matcher/expression_internal_eq_hashed_key.h" #include "mongo/db/matcher/expression_internal_expr_comparison.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_path.h" #include "mongo/db/matcher/expression_text.h" #include "mongo/db/matcher/expression_text_noop.h" #include "mongo/db/matcher/expression_tree.h" @@ -66,76 +86,41 @@ #include "mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.h" #include "mongo/db/matcher/schema/expression_internal_schema_unique_items.h" #include "mongo/db/matcher/schema/expression_internal_schema_xor.h" -#include "mongo/db/pipeline/abt/agg_expression_visitor.h" -#include "mongo/db/pipeline/abt/document_source_visitor.h" -#include "mongo/db/pipeline/abt/match_expression_visitor.h" -#include "mongo/db/pipeline/abt/utils.h" +#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/accumulator_multi.h" #include "mongo/db/pipeline/accumulator_percentile.h" -#include "mongo/db/pipeline/document_source_bucket_auto.h" -#include "mongo/db/pipeline/document_source_change_stream_add_post_image.h" -#include "mongo/db/pipeline/document_source_change_stream_add_pre_image.h" -#include "mongo/db/pipeline/document_source_change_stream_check_invalidate.h" -#include "mongo/db/pipeline/document_source_change_stream_check_resumability.h" -#include "mongo/db/pipeline/document_source_change_stream_check_topology_change.h" -#include "mongo/db/pipeline/document_source_change_stream_handle_topology_change.h" -#include "mongo/db/pipeline/document_source_change_stream_transform.h" -#include "mongo/db/pipeline/document_source_change_stream_unwind_transaction.h" -#include "mongo/db/pipeline/document_source_coll_stats.h" -#include "mongo/db/pipeline/document_source_current_op.h" -#include "mongo/db/pipeline/document_source_cursor.h" -#include "mongo/db/pipeline/document_source_densify.h" -#include "mongo/db/pipeline/document_source_exchange.h" -#include "mongo/db/pipeline/document_source_facet.h" -#include "mongo/db/pipeline/document_source_find_and_modify_image_lookup.h" -#include "mongo/db/pipeline/document_source_geo_near.h" -#include "mongo/db/pipeline/document_source_geo_near_cursor.h" -#include "mongo/db/pipeline/document_source_graph_lookup.h" -#include "mongo/db/pipeline/document_source_group.h" -#include "mongo/db/pipeline/document_source_index_stats.h" -#include "mongo/db/pipeline/document_source_internal_all_collection_stats.h" -#include "mongo/db/pipeline/document_source_internal_apply_oplog_update.h" -#include "mongo/db/pipeline/document_source_internal_compute_geo_near_distance.h" -#include "mongo/db/pipeline/document_source_internal_convert_bucket_index_stats.h" -#include "mongo/db/pipeline/document_source_internal_inhibit_optimization.h" -#include "mongo/db/pipeline/document_source_internal_shard_filter.h" -#include "mongo/db/pipeline/document_source_internal_split_pipeline.h" -#include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" -#include "mongo/db/pipeline/document_source_limit.h" -#include "mongo/db/pipeline/document_source_list_cached_and_active_users.h" -#include "mongo/db/pipeline/document_source_list_catalog.h" -#include "mongo/db/pipeline/document_source_list_local_sessions.h" -#include "mongo/db/pipeline/document_source_list_sessions.h" -#include "mongo/db/pipeline/document_source_lookup.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source_match.h" -#include "mongo/db/pipeline/document_source_merge.h" -#include "mongo/db/pipeline/document_source_operation_metrics.h" -#include "mongo/db/pipeline/document_source_out.h" -#include "mongo/db/pipeline/document_source_plan_cache_stats.h" -#include "mongo/db/pipeline/document_source_queue.h" -#include "mongo/db/pipeline/document_source_redact.h" #include "mongo/db/pipeline/document_source_replace_root.h" -#include "mongo/db/pipeline/document_source_sample.h" -#include "mongo/db/pipeline/document_source_sample_from_random_cursor.h" -#include "mongo/db/pipeline/document_source_sequential_document_cache.h" -#include "mongo/db/pipeline/document_source_set_variable_from_subpipeline.h" -#include "mongo/db/pipeline/document_source_set_window_fields.h" #include "mongo/db/pipeline/document_source_single_document_transformation.h" -#include "mongo/db/pipeline/document_source_skip.h" -#include "mongo/db/pipeline/document_source_sort.h" -#include "mongo/db/pipeline/document_source_streaming_group.h" -#include "mongo/db/pipeline/document_source_tee_consumer.h" -#include "mongo/db/pipeline/document_source_telemetry.h" -#include "mongo/db/pipeline/document_source_union_with.h" -#include "mongo/db/pipeline/document_source_unwind.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_visitor.h" +#include "mongo/db/pipeline/expression_walker.h" +#include "mongo/db/pipeline/group_from_first_document_transformation.h" +#include "mongo/db/pipeline/transformer_interface.h" #include "mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h" #include "mongo/db/pipeline/visitors/document_source_walker.h" +#include "mongo/db/pipeline/visitors/transformer_interface_visitor.h" #include "mongo/db/pipeline/visitors/transformer_interface_walker.h" +#include "mongo/db/query/ce_mode_parameter.h" #include "mongo/db/query/expression_walker.h" -#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/projection_policies.h" #include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/query/query_planner_params.h" +#include "mongo/db/query/tree_walker.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" -#include "mongo/s/query/document_source_merge_cursors.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" +#include "mongo/util/synchronized_value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery namespace mongo { @@ -151,7 +136,8 @@ namespace { */ class ABTMatchExpressionVisitor : public MatchExpressionConstVisitor { public: - ABTMatchExpressionVisitor(bool& eligible) : _eligible(eligible) {} + ABTMatchExpressionVisitor(bool& eligible, QueryFrameworkControlEnum frameworkControl) + : _eligible(eligible), _frameworkControl(frameworkControl) {} void visit(const LTEMatchExpression* expr) override { assertSupportedPathExpression(expr); @@ -380,11 +366,20 @@ class ABTMatchExpressionVisitor : public MatchExpressionConstVisitor { } void assertSupportedPathExpression(const PathMatchExpression* expr) { - if (FieldRef(expr->path()).hasNumericPathComponents()) + const auto fieldRef = FieldRef(expr->path()); + if (fieldRef.hasNumericPathComponents()) _eligible = false; + + // In M2, match expressions which compare against _id should fall back because they could + // use the _id index. + if (!fieldRef.empty() && fieldRef.getPart(0) == "_id" && + _frameworkControl == QueryFrameworkControlEnum::kTryBonsai) { + _eligible = false; + } } bool& _eligible; + const QueryFrameworkControlEnum _frameworkControl; }; class ABTUnsupportedAggExpressionVisitor : public ExpressionConstVisitor { @@ -1053,77 +1048,100 @@ class ABTTransformerVisitor : public TransformerInterfaceConstVisitor { template bool isEligibleCommon(const RequestType& request, OperationContext* opCtx, - const CollectionPtr& collection) { + const CollectionPtr& collection, + QueryFrameworkControlEnum frameworkControl) { + // + // Check unsupported command options. + // + // The FindCommandRequest defaults some parameters to BSONObj() instead of boost::none. - auto noneOrDefaultEmpty = [&](auto param) { + auto hasParam = [&](auto param) { if constexpr (std::is_same_v>) { return param && !param->isEmpty(); } else { return !param.isEmpty(); } }; - const bool unsupportedCmdOption = noneOrDefaultEmpty(request.getCollation()) || - noneOrDefaultEmpty(request.getLet()) || request.getLegacyRuntimeConstants(); + if (hasParam(request.getCollation()) || hasParam(request.getLet()) || + hasParam(request.getResumeAfter()) || request.getRequestResumeToken() || + request.getLegacyRuntimeConstants()) { + return false; + } - bool unsupportedIndexType = [&]() { - if (!collection) - return false; + // In M2, we should fall back on any index hint that is not a $natural hint. + auto hasIndexHint = [&](auto param) { + if constexpr (std::is_same_v>) { + return param && !param->isEmpty() && + param->firstElementFieldNameStringData() != "$natural"_sd; + } else { + return !param.isEmpty() && param.firstElementFieldNameStringData() != "$natural"_sd; + } + }; + if (frameworkControl == QueryFrameworkControlEnum::kTryBonsai && + hasIndexHint(request.getHint())) { + return false; + } - const IndexCatalog& indexCatalog = *collection->getIndexCatalog(); - auto indexIterator = - indexCatalog.getIndexIterator(opCtx, IndexCatalog::InclusionPolicy::kReady); + // + // Check unsupported index types. + // - while (indexIterator->more()) { - const IndexDescriptor& descriptor = *indexIterator->next()->descriptor(); - if (descriptor.hidden()) { - // An index that is hidden will not be considered by the optimizer, so we don't need - // to check its eligibility further. - continue; - } + if (!collection) + return true; - if (descriptor.infoObj().hasField(IndexDescriptor::kExpireAfterSecondsFieldName) || - descriptor.isPartial() || descriptor.isSparse() || - descriptor.getIndexType() != IndexType::INDEX_BTREE || - !descriptor.collation().isEmpty()) { - return true; - } + const IndexCatalog& indexCatalog = *collection->getIndexCatalog(); + auto indexIterator = + indexCatalog.getIndexIterator(opCtx, IndexCatalog::InclusionPolicy::kReady); + + while (indexIterator->more()) { + const IndexDescriptor& descriptor = *indexIterator->next()->descriptor(); + if (descriptor.hidden()) { + // An index that is hidden will not be considered by the optimizer, so we don't need + // to check its eligibility further. + continue; } - return false; - }(); - bool unsupportedCollectionType = [&]() { - if (!collection) + // In M2, we should fall back on any non-hidden, non-_id index. + if (!descriptor.isIdIndex() && frameworkControl == QueryFrameworkControlEnum::kTryBonsai) { return false; + } - if (collection->isClustered() || !collection->getCollectionOptions().collation.isEmpty() || - collection->getTimeseriesOptions() || collection->isCapped()) { - return true; + if (descriptor.infoObj().hasField(IndexDescriptor::kExpireAfterSecondsFieldName) || + descriptor.isPartial() || descriptor.isSparse() || + descriptor.getIndexType() != IndexType::INDEX_BTREE || + !descriptor.collation().isEmpty()) { + return false; } + } + // + // Check unsupported collection types. + // + + if (collection->isClustered() || !collection->getCollectionOptions().collation.isEmpty() || + collection->getTimeseriesOptions() || collection->isCapped()) { return false; - }(); + } - return !unsupportedCmdOption && !unsupportedIndexType && !unsupportedCollectionType && - !storageGlobalParams.noTableScan.load(); + // Check notablescan. + return !storageGlobalParams.noTableScan.load(); } -boost::optional shouldForceEligibility() { +boost::optional shouldForceEligibility(QueryFrameworkControlEnum frameworkControl) { // We don't need to consult the feature flag here, since the framework control knob can only // be set to enable bonsai if featureFlagCommonQueryFramework is enabled. - auto queryControl = ServerParameterSet::getNodeParameterSet()->get( - "internalQueryFrameworkControl"); - LOGV2_DEBUG(7325101, 4, "internalQueryFrameworkControl={knob}", "logging internalQueryFrameworkControl", - "knob"_attr = QueryFrameworkControl_serializer(queryControl->_data.get())); + "knob"_attr = QueryFrameworkControl_serializer(frameworkControl)); - switch (queryControl->_data.get()) { + switch (frameworkControl) { case QueryFrameworkControlEnum::kForceClassicEngine: case QueryFrameworkControlEnum::kTrySbeEngine: return false; case QueryFrameworkControlEnum::kTryBonsai: + case QueryFrameworkControlEnum::kTryBonsaiExperimental: // Return boost::none to indicate that we should not force eligibility of bonsai nor the // classic engine. return boost::none; @@ -1134,18 +1152,20 @@ boost::optional shouldForceEligibility() { MONGO_UNREACHABLE; } -bool isEligibleForBonsai(ServiceContext* serviceCtx, const Pipeline& pipeline) { - ABTUnsupportedDocumentSourceVisitorContext visitorCtx; +bool isEligibleForBonsai(ServiceContext* serviceCtx, + const Pipeline& pipeline, + QueryFrameworkControlEnum frameworkControl) { + ABTUnsupportedDocumentSourceVisitorContext visitorCtx{frameworkControl}; auto& reg = getDocumentSourceVisitorRegistry(serviceCtx); DocumentSourceWalker walker(reg, &visitorCtx); walker.walk(pipeline); return visitorCtx.eligible; } -bool isEligibleForBonsai(const CanonicalQuery& cq) { +bool isEligibleForBonsai(const CanonicalQuery& cq, QueryFrameworkControlEnum frameworkControl) { auto expression = cq.root(); bool eligible = true; - ABTMatchExpressionVisitor visitor(eligible); + ABTMatchExpressionVisitor visitor(eligible, frameworkControl); MatchExpressionWalker walker(nullptr /*preVisitor*/, nullptr /*inVisitor*/, &visitor); tree_walker::walk(expression, &walker); @@ -1170,17 +1190,23 @@ bool isEligibleForBonsai(const AggregateCommandRequest& request, const Pipeline& pipeline, OperationContext* opCtx, const CollectionPtr& collection) { - if (auto forceBonsai = shouldForceEligibility(); forceBonsai.has_value()) { + auto frameworkControl = ServerParameterSet::getNodeParameterSet() + ->get("internalQueryFrameworkControl") + ->_data.get(); + + if (auto forceBonsai = shouldForceEligibility(frameworkControl); forceBonsai.has_value()) { return *forceBonsai; } // Explain is not currently supported but is allowed if the failpoint is set // for testing purposes. + // TODO SERVER-77719: eventually explain should be permitted by default with tryBonsai, but we + // will still want to fall back on explain commands with tryBonsaiExperimental. if (!MONGO_unlikely(enableExplainInBonsai.shouldFail()) && request.getExplain()) { return false; } - bool commandOptionsEligible = isEligibleCommon(request, opCtx, collection) && + bool commandOptionsEligible = isEligibleCommon(request, opCtx, collection, frameworkControl) && !request.getRequestReshardingResumeToken().has_value() && !request.getExchange(); // Early return to avoid unnecessary work of walking the input pipeline. @@ -1188,45 +1214,60 @@ bool isEligibleForBonsai(const AggregateCommandRequest& request, return false; } - return isEligibleForBonsai(opCtx->getServiceContext(), pipeline); + return isEligibleForBonsai(opCtx->getServiceContext(), pipeline, frameworkControl); } bool isEligibleForBonsai(const CanonicalQuery& cq, OperationContext* opCtx, const CollectionPtr& collection) { - if (auto forceBonsai = shouldForceEligibility(); forceBonsai.has_value()) { + auto frameworkControl = ServerParameterSet::getNodeParameterSet() + ->get("internalQueryFrameworkControl") + ->_data.get(); + if (auto forceBonsai = shouldForceEligibility(frameworkControl); forceBonsai.has_value()) { return *forceBonsai; } + if (!cq.useCqfIfEligible()) { + return false; + } + // Explain is not currently supported but is allowed if the failpoint is set // for testing purposes. + // TODO SERVER-77719: eventually explain should be permitted by default with tryBonsai, but we + // will still want to fall back on explain commands with tryBonsaiExperimental. if (!MONGO_unlikely(enableExplainInBonsai.shouldFail()) && cq.getExplain()) { return false; } auto request = cq.getFindCommandRequest(); - bool commandOptionsEligible = isEligibleCommon(request, opCtx, collection) && + bool commandOptionsEligible = isEligibleCommon(request, opCtx, collection, frameworkControl) && request.getSort().isEmpty() && request.getMin().isEmpty() && request.getMax().isEmpty() && !request.getReturnKey() && !request.getSingleBatch() && !request.getTailable() && !request.getSkip() && !request.getLimit() && !request.getNoCursorTimeout() && - !request.getRequestResumeToken() && !request.getAllowPartialResults() && - !request.getAllowSpeculativeMajorityRead() && !request.getAwaitData() && - !request.getReadOnce() && !request.getShowRecordId() && !request.getTerm(); + !request.getAllowPartialResults() && !request.getAllowSpeculativeMajorityRead() && + !request.getAwaitData() && !request.getReadOnce() && !request.getShowRecordId() && + !request.getTerm(); // Early return to avoid unnecessary work of walking the input expression. - if (!commandOptionsEligible || !cq.useCqfIfEligible()) { + if (!commandOptionsEligible) { return false; } - return isEligibleForBonsai(cq); + return isEligibleForBonsai(cq, frameworkControl); } bool isEligibleForBonsai_forTesting(const CanonicalQuery& cq) { - return isEligibleForBonsai(cq); + auto frameworkControl = ServerParameterSet::getNodeParameterSet() + ->get("internalQueryFrameworkControl") + ->_data.get(); + return isEligibleForBonsai(cq, frameworkControl); } bool isEligibleForBonsai_forTesting(ServiceContext* serviceCtx, const Pipeline& pipeline) { - return isEligibleForBonsai(serviceCtx, pipeline); + auto frameworkControl = ServerParameterSet::getNodeParameterSet() + ->get("internalQueryFrameworkControl") + ->_data.get(); + return isEligibleForBonsai(serviceCtx, pipeline, frameworkControl); } } // namespace mongo @@ -1239,7 +1280,7 @@ void visit(ABTUnsupportedDocumentSourceVisitorContext* ctx, const T&) { } void visit(ABTUnsupportedDocumentSourceVisitorContext* ctx, const DocumentSourceMatch& source) { - ABTMatchExpressionVisitor visitor(ctx->eligible); + ABTMatchExpressionVisitor visitor(ctx->eligible, ctx->frameworkControl); MatchExpressionWalker walker(nullptr, nullptr, &visitor); tree_walker::walk(source.getMatchExpression(), &walker); } diff --git a/src/mongo/db/query/cqf_command_utils.h b/src/mongo/db/query/cqf_command_utils.h index c6948c9a18edc..0043407c2ad03 100644 --- a/src/mongo/db/query/cqf_command_utils.h +++ b/src/mongo/db/query/cqf_command_utils.h @@ -29,10 +29,20 @@ #pragma once +#include +#include + #include "mongo/db/catalog/collection.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/document_source_internal_inhibit_optimization.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/visitors/document_source_visitor_registry.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -46,7 +56,10 @@ namespace optimizer { * should set 'eligible' to false. */ struct ABTUnsupportedDocumentSourceVisitorContext : public DocumentSourceVisitorContextBase { + ABTUnsupportedDocumentSourceVisitorContext(QueryFrameworkControlEnum controlEnum) + : frameworkControl(controlEnum) {} bool eligible{true}; + const QueryFrameworkControlEnum frameworkControl; }; } // namespace optimizer diff --git a/src/mongo/db/query/cqf_get_executor.cpp b/src/mongo/db/query/cqf_get_executor.cpp index e278a4d3617ff..a368e7c2dd25c 100644 --- a/src/mongo/db/query/cqf_get_executor.cpp +++ b/src/mongo/db/query/cqf_get_executor.cpp @@ -29,37 +29,100 @@ #include "mongo/db/query/cqf_get_executor.h" +#include +#include +#include +// IWYU pragma: no_include "boost/container/detail/flat_tree.hpp" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/ordering.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/curop.h" +#include "mongo/db/db_raii.h" #include "mongo/db/exec/sbe/abt/abt_lower.h" +#include "mongo/db/exec/sbe/abt/abt_lower_defs.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/pipeline/abt/canonical_query_translation.h" #include "mongo/db/pipeline/abt/document_source_visitor.h" #include "mongo/db/pipeline/abt/match_expression_visitor.h" #include "mongo/db/pipeline/abt/utils.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/ce/heuristic_estimator.h" #include "mongo/db/query/ce/histogram_estimator.h" #include "mongo/db/query/ce/sampling_estimator.h" #include "mongo/db/query/ce_mode_parameter.h" +#include "mongo/db/query/collation/collation_spec.h" #include "mongo/db/query/cost_model/cost_estimator_impl.h" #include "mongo/db/query/cost_model/cost_model_gen.h" #include "mongo/db/query/cost_model/cost_model_manager.h" #include "mongo/db/query/cost_model/on_coefficients_change_updater_impl.h" #include "mongo/db/query/cqf_command_utils.h" -#include "mongo/db/query/explain_version_validator.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/optimizer/cascades/interfaces.h" +#include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/containers.h" #include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/metadata_factory.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/node_defs.h" #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/partial_schema_requirements.h" +#include "mongo/db/query/optimizer/reference_tracker.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/const_fold_interface.h" +#include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_request_helper.h" #include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/query/stats/collection_statistics_impl.h" #include "mongo/db/query/yield_policy_callbacks_impl.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" #include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/synchronized_value.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -73,7 +136,7 @@ using ce::SamplingEstimator; using cost_model::CostEstimatorImpl; using cost_model::CostModelManager; -static opt::unordered_map buildIndexSpecsOptimizer( +static std::pair buildIndexSpecsOptimizer( boost::intrusive_ptr expCtx, OperationContext* opCtx, const CollectionPtr& collection, @@ -88,28 +151,31 @@ static opt::unordered_map buildIndexSpe return {}; } + std::pair result; std::string indexHintName; + bool skipAllIndexes = false; if (indexHint) { const BSONElement element = indexHint->firstElement(); const StringData fieldName = element.fieldNameStringData(); if (fieldName == "$natural"_sd) { // Do not add indexes. - return {}; + skipAllIndexes = true; } else if (fieldName == "$hint"_sd && element.type() == BSONType::String) { indexHintName = element.valueStringData().toString(); } - disableScan = true; + disableScan = !skipAllIndexes; } const IndexCatalog& indexCatalog = *collection->getIndexCatalog(); - opt::unordered_map result; + auto indexIterator = indexCatalog.getIndexIterator(opCtx, IndexCatalog::InclusionPolicy::kReady); while (indexIterator->more()) { const IndexCatalogEntry& catalogEntry = *indexIterator->next(); const IndexDescriptor& descriptor = *catalogEntry.descriptor(); + bool skipIndex = false; if (descriptor.hidden()) { // Index is hidden; don't consider it. @@ -127,11 +193,11 @@ static opt::unordered_map buildIndexSpe if (!SimpleBSONObjComparator::kInstance.evaluate(descriptor.keyPattern() == *indexHint)) { // Index key pattern does not match hint. - continue; + skipIndex = true; } } else if (indexHintName != descriptor.indexName()) { // Index name does not match hint. - continue; + skipIndex = true; } } @@ -227,16 +293,28 @@ static opt::unordered_map buildIndexSpe partialIndexReqMap = std::move(conversion->_reqMap); } + IndexDefinition indexDef(std::move(indexCollationSpec), + version, + orderingBits, + isMultiKey, + DistributionType::Centralized, + std::move(partialIndexReqMap)); + // Skip partial indexes. A path could be non-multikey on a partial index (subset of the + // collection), but still be multikey on the overall collection. + if (indexDef.getPartialReqMap().isNoop()) { + for (const auto& component : indexDef.getCollationSpec()) { + result.second.add(component._path.ref()); + } + } // For now we assume distribution is Centralized. - result.emplace(descriptor.indexName(), - IndexDefinition(std::move(indexCollationSpec), - version, - orderingBits, - isMultiKey, - DistributionType::Centralized, - std::move(partialIndexReqMap))); + if (!skipIndex && !skipAllIndexes) { + result.first.emplace(descriptor.indexName(), std::move(indexDef)); + } } + // The empty path refers to the whole document, which can't be an array. + result.second.isMultiKey = false; + return result; } @@ -291,19 +369,26 @@ static ExecParams createExecutor(OptPhaseManager phaseManager, OPTIMIZER_DEBUG_LOG(6264802, 5, "Lowered SBE plan", "plan"_attr = p.print(*sbePlan.get())); } - stage_builder::PlanStageData data{std::move(runtimeEnvironment)}; - data.outputs.set(stage_builder::PlanStageSlots::kResult, slotMap.begin()->second); + stage_builder::PlanStageSlots outputs; + outputs.set(stage_builder::PlanStageSlots::kResult, slotMap.begin()->second); if (requireRID) { - data.outputs.set(stage_builder::PlanStageSlots::kRecordId, *ridSlot); + outputs.set(stage_builder::PlanStageSlots::kRecordId, *ridSlot); } + auto staticData = std::make_unique(); + staticData->outputs = std::move(outputs); + + stage_builder::PlanStageData data( + stage_builder::PlanStageEnvironment(std::move(runtimeEnvironment)), std::move(staticData)); + sbePlan->attachToOperationContext(opCtx); if (needsExplain || expCtx->mayDbProfile) { sbePlan->markShouldCollectTimingInfo(); } auto yieldPolicy = - std::make_unique(PlanYieldPolicy::YieldPolicy::YIELD_AUTO, + std::make_unique(opCtx, + PlanYieldPolicy::YieldPolicy::YIELD_AUTO, opCtx->getServiceContext()->getFastClockSource(), internalQueryExecYieldIterations.load(), Milliseconds{internalQueryExecYieldPeriodMS.load()}, @@ -337,7 +422,7 @@ static ExecParams createExecutor(OptPhaseManager phaseManager, abtPrinter = std::make_unique(std::move(toExplain), explainVersion); } - sbePlan->prepare(data.ctx); + sbePlan->prepare(data.env.ctx); CurOp::get(opCtx)->stopQueryPlanningTimer(); return {opCtx, @@ -377,17 +462,18 @@ static void populateAdditionalScanDefs( // access to the metadata so it generates a scan over just the collection name. const std::string scanDefName = collNameStr; - opt::unordered_map indexDefs; + IndexDefinitions indexDefs; + MultikeynessTrie multikeynessTrie; const ProjectionName& scanProjName = prefixId.getNextId("scan"); if (collectionExists) { - indexDefs = buildIndexSpecsOptimizer(expCtx, - opCtx, - collection, - indexHint, - scanProjName, - prefixId, - disableIndexOptions, - disableScan); + tie(indexDefs, multikeynessTrie) = buildIndexSpecsOptimizer(expCtx, + opCtx, + collection, + indexHint, + scanProjName, + prefixId, + disableIndexOptions, + disableScan); } // For now handle only local parallelism (no over-the-network exchanges). @@ -395,13 +481,17 @@ static void populateAdditionalScanDefs( ? DistributionType::Centralized : DistributionType::UnknownPartitioning}; - const CEType collectionCE{collectionExists ? collection->numRecords(opCtx) : -1.0}; + boost::optional collectionCE; + if (collectionExists) { + collectionCE = collection->numRecords(opCtx); + } scanDefs.emplace(scanDefName, createScanDef({{"type", "mongod"}, {"database", involvedNss.db().toString()}, {"uuid", uuidStr}, {ScanNode::kDefaultCollectionNameSpec, collNameStr}}, std::move(indexDefs), + std::move(multikeynessTrie), constFold, std::move(distribution), collectionExists, @@ -503,15 +593,16 @@ Metadata populateMetadata(boost::intrusive_ptr expCtx, // Add the base collection metadata. opt::unordered_map indexDefs; + MultikeynessTrie multikeynessTrie; if (collectionExists) { - indexDefs = buildIndexSpecsOptimizer(expCtx, - opCtx, - collection, - indexHint, - scanProjName, - prefixId, - queryHints._disableIndexes, - queryHints._disableScan); + tie(indexDefs, multikeynessTrie) = buildIndexSpecsOptimizer(expCtx, + opCtx, + collection, + indexHint, + scanProjName, + prefixId, + queryHints._disableIndexes, + queryHints._disableScan); } const size_t numberOfPartitions = internalQueryDefaultDOP.load(); @@ -521,17 +612,21 @@ Metadata populateMetadata(boost::intrusive_ptr expCtx, : DistributionType::UnknownPartitioning}; opt::unordered_map scanDefs; - const int64_t numRecords = collectionExists ? collection->numRecords(opCtx) : -1; + boost::optional numRecords; + if (collectionExists) { + numRecords = static_cast(collection->numRecords(opCtx)); + } scanDefs.emplace(scanDefName, createScanDef({{"type", "mongod"}, {"database", nss.db().toString()}, {"uuid", uuidStr}, {ScanNode::kDefaultCollectionNameSpec, nss.coll().toString()}}, std::move(indexDefs), + std::move(multikeynessTrie), constFold, std::move(distribution), collectionExists, - {static_cast(numRecords)})); + numRecords)); // Add a scan definition for all involved collections. Note that the base namespace has already // been accounted for above and isn't included here. diff --git a/src/mongo/db/query/cqf_get_executor.h b/src/mongo/db/query/cqf_get_executor.h index 2a59017f10d40..0d142084c9e5f 100644 --- a/src/mongo/db/query/cqf_get_executor.h +++ b/src/mongo/db/query/cqf_get_executor.h @@ -29,15 +29,29 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/explain_interface.h" #include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_yield_policy_sbe.h" +#include "mongo/db/query/query_solution.h" #include "mongo/db/query/sbe_stage_builder.h" namespace mongo { diff --git a/src/mongo/db/query/cursor_idl_validator.cpp b/src/mongo/db/query/cursor_idl_validator.cpp index c66964a7e7eec..f7f27f3877749 100644 --- a/src/mongo/db/query/cursor_idl_validator.cpp +++ b/src/mongo/db/query/cursor_idl_validator.cpp @@ -29,6 +29,14 @@ #include "mongo/db/query/cursor_idl_validator.h" +#include + +#include +#include + +#include "mongo/db/query/cursor_response_gen.h" +#include "mongo/util/assert_util.h" + namespace mongo { /** diff --git a/src/mongo/db/query/cursor_idl_validator.h b/src/mongo/db/query/cursor_idl_validator.h index 31617d7a4287e..faf4b06906cb7 100644 --- a/src/mongo/db/query/cursor_idl_validator.h +++ b/src/mongo/db/query/cursor_idl_validator.h @@ -33,6 +33,7 @@ namespace mongo { class CursorInitialReply; + /** * Function used by the IDL parser to validate that a response has exactly one cursor type field. */ diff --git a/src/mongo/db/query/cursor_request.cpp b/src/mongo/db/query/cursor_request.cpp index aaa7c76e95069..6ed28c930ebcb 100644 --- a/src/mongo/db/query/cursor_request.cpp +++ b/src/mongo/db/query/cursor_request.cpp @@ -29,9 +29,20 @@ #include "mongo/db/query/cursor_request.h" +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/basic_types_gen.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/query/cursor_request.h b/src/mongo/db/query/cursor_request.h index 3d92f42842f35..eb45eaf31b813 100644 --- a/src/mongo/db/query/cursor_request.h +++ b/src/mongo/db/query/cursor_request.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/base/error_extra_info.h" #include "mongo/base/status.h" namespace mongo { diff --git a/src/mongo/db/query/cursor_response.cpp b/src/mongo/db/query/cursor_response.cpp index 6d71206bdf73e..8fc4f617c634b 100644 --- a/src/mongo/db/query/cursor_response.cpp +++ b/src/mongo/db/query/cursor_response.cpp @@ -28,13 +28,21 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/query/cursor_response.h" +#include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsontypes.h" +#include "mongo/db/query/cursor_response.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -70,7 +78,9 @@ CursorResponseBuilder::CursorResponseBuilder(rpc::ReplyBuilderInterface* replyBu : kBatchField)); } -void CursorResponseBuilder::done(CursorId cursorId, const NamespaceString& cursorNamespace) { +void CursorResponseBuilder::done(CursorId cursorId, + const NamespaceString& cursorNamespace, + const SerializationContext& serializationContext) { invariant(_active); _batch.reset(); @@ -90,7 +100,8 @@ void CursorResponseBuilder::done(CursorId cursorId, const NamespaceString& curso } _cursorObject->append(kIdField, cursorId); - _cursorObject->append(kNsField, NamespaceStringUtil::serialize(cursorNamespace)); + _cursorObject->append(kNsField, + NamespaceStringUtil::serialize(cursorNamespace, serializationContext)); if (_options.atClusterTime) { _cursorObject->append(kAtClusterTimeField, _options.atClusterTime->asTimestamp()); } @@ -114,10 +125,12 @@ void appendCursorResponseObject(long long cursorId, const NamespaceString& cursorNamespace, BSONArray firstBatch, boost::optional cursorType, - BSONObjBuilder* builder) { + BSONObjBuilder* builder, + const SerializationContext& serializationContext) { BSONObjBuilder cursorObj(builder->subobjStart(kCursorField)); cursorObj.append(kIdField, cursorId); - cursorObj.append(kNsField, NamespaceStringUtil::serialize(cursorNamespace)); + cursorObj.append(kNsField, + NamespaceStringUtil::serialize(cursorNamespace, serializationContext)); cursorObj.append(kBatchFieldInitial, firstBatch); if (cursorType) { cursorObj.append(kTypeField, cursorType.value()); diff --git a/src/mongo/db/query/cursor_response.h b/src/mongo/db/query/cursor_response.h index e994cd1467f64..d492a66cebb9d 100644 --- a/src/mongo/db/query/cursor_response.h +++ b/src/mongo/db/query/cursor_response.h @@ -29,14 +29,29 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include #include #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/clientcursor.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/tenant_id.h" #include "mongo/rpc/op_msg.h" #include "mongo/rpc/reply_builder_interface.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/serialization_context.h" namespace mongo { @@ -117,7 +132,10 @@ class CursorResponseBuilder { * Call this after successfully appending all fields that will be part of this response. * After calling, you may not call any more methods on this object. */ - void done(CursorId cursorId, const NamespaceString& cursorNamespace); + void done(CursorId cursorId, + const NamespaceString& cursorNamespace, + const SerializationContext& serializationContext = + SerializationContext::stateCommandReply()); /** * Call this if the response should not contain cursor information. It will completely remove @@ -154,11 +172,13 @@ class CursorResponseBuilder { * * This function is deprecated. Prefer CursorResponseBuilder or CursorResponse::toBSON() instead. */ -void appendCursorResponseObject(long long cursorId, - const NamespaceString& cursorNamespace, - BSONArray firstBatch, - boost::optional cursorType, - BSONObjBuilder* builder); +void appendCursorResponseObject( + long long cursorId, + const NamespaceString& cursorNamespace, + BSONArray firstBatch, + boost::optional cursorType, + BSONObjBuilder* builder, + const SerializationContext& serializationContext = SerializationContext::stateCommandReply()); /** * Builds a getMore response object from the provided cursor identifiers and "nextBatch", diff --git a/src/mongo/db/query/cursor_response_test.cpp b/src/mongo/db/query/cursor_response_test.cpp index 8d76e0d86d4d9..cedab57846916 100644 --- a/src/mongo/db/query/cursor_response_test.cpp +++ b/src/mongo/db/query/cursor_response_test.cpp @@ -27,15 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/query/cursor_response.h" - -#include "mongo/rpc/op_msg_rpc_impls.h" +#include +#include +#include +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/oid.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/resume_token.h" +#include "mongo/db/query/cursor_response.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/op_msg_rpc_impls.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { @@ -51,7 +62,7 @@ TEST(CursorResponseTest, parseFromBSONFirstBatch) { CursorResponse response = std::move(result.getValue()); ASSERT_EQ(response.getCursorId(), CursorId(123)); - ASSERT_EQ(response.getNSS().ns(), "db.coll"); + ASSERT_EQ(response.getNSS().ns_forTest(), "db.coll"); ASSERT_EQ(response.getBatch().size(), 2U); ASSERT_BSONOBJ_EQ(response.getBatch()[0], BSON("_id" << 1)); ASSERT_BSONOBJ_EQ(response.getBatch()[1], BSON("_id" << 2)); @@ -67,7 +78,7 @@ TEST(CursorResponseTest, parseFromBSONNextBatch) { CursorResponse response = std::move(result.getValue()); ASSERT_EQ(response.getCursorId(), CursorId(123)); - ASSERT_EQ(response.getNSS().ns(), "db.coll"); + ASSERT_EQ(response.getNSS().ns_forTest(), "db.coll"); ASSERT_EQ(response.getBatch().size(), 2U); ASSERT_BSONOBJ_EQ(response.getBatch()[0], BSON("_id" << 1)); ASSERT_BSONOBJ_EQ(response.getBatch()[1], BSON("_id" << 2)); @@ -83,7 +94,7 @@ TEST(CursorResponseTest, parseFromBSONCursorIdZero) { CursorResponse response = std::move(result.getValue()); ASSERT_EQ(response.getCursorId(), CursorId(0)); - ASSERT_EQ(response.getNSS().ns(), "db.coll"); + ASSERT_EQ(response.getNSS().ns_forTest(), "db.coll"); ASSERT_EQ(response.getBatch().size(), 2U); ASSERT_BSONOBJ_EQ(response.getBatch()[0], BSON("_id" << 1)); ASSERT_BSONOBJ_EQ(response.getBatch()[1], BSON("_id" << 2)); @@ -99,7 +110,7 @@ TEST(CursorResponseTest, parseFromBSONEmptyBatch) { CursorResponse response = std::move(result.getValue()); ASSERT_EQ(response.getCursorId(), CursorId(123)); - ASSERT_EQ(response.getNSS().ns(), "db.coll"); + ASSERT_EQ(response.getNSS().ns_forTest(), "db.coll"); ASSERT_EQ(response.getBatch().size(), 0U); } @@ -195,7 +206,7 @@ TEST(CursorResponseTest, parseFromBSONPartialResultsReturnedField) { CursorResponse response = std::move(result.getValue()); ASSERT_EQ(response.getCursorId(), CursorId(123)); - ASSERT_EQ(response.getNSS().ns(), "db.coll"); + ASSERT_EQ(response.getNSS().ns_forTest(), "db.coll"); ASSERT_EQ(response.getBatch().size(), 2U); ASSERT_BSONOBJ_EQ(response.getBatch()[0], BSON("_id" << 1)); ASSERT_BSONOBJ_EQ(response.getBatch()[1], BSON("_id" << 2)); @@ -223,7 +234,7 @@ TEST(CursorResponseTest, parseFromBSONVarsFieldCorrect) { CursorResponse response = std::move(result.getValue()); ASSERT_EQ(response.getCursorId(), CursorId(123)); - ASSERT_EQ(response.getNSS().ns(), "db.coll"); + ASSERT_EQ(response.getNSS().ns_forTest(), "db.coll"); ASSERT_EQ(response.getBatch().size(), 2U); ASSERT_BSONOBJ_EQ(response.getBatch()[0], BSON("_id" << 1)); ASSERT_BSONOBJ_EQ(response.getBatch()[1], BSON("_id" << 2)); @@ -251,7 +262,7 @@ TEST(CursorResponseTest, parseFromBSONMultipleVars) { CursorResponse response = std::move(result.getValue()); ASSERT_EQ(response.getCursorId(), CursorId(123)); - ASSERT_EQ(response.getNSS().ns(), "db.coll"); + ASSERT_EQ(response.getNSS().ns_forTest(), "db.coll"); ASSERT_EQ(response.getBatch().size(), 2U); ASSERT_BSONOBJ_EQ(response.getBatch()[0], BSON("_id" << 1)); ASSERT_BSONOBJ_EQ(response.getBatch()[1], BSON("_id" << 2)); @@ -274,7 +285,9 @@ TEST(CursorResponseTest, roundTripThroughCursorResponseBuilderWithPartialResults CursorResponseBuilder crb(&builder, options); crb.append(testDoc); crb.setPartialResultsReturned(true); - crb.done(CursorId(123), NamespaceString::createNamespaceString_forTest(boost::none, "db.coll")); + crb.done(CursorId(123), + NamespaceString::createNamespaceString_forTest(boost::none, "db.coll"), + SerializationContext::stateCommandReply()); // Confirm that the resulting BSONObj response matches the expected body. auto msg = builder.done(); @@ -288,7 +301,7 @@ TEST(CursorResponseTest, roundTripThroughCursorResponseBuilderWithPartialResults // Confirm the CursorReponse parsed from CursorResponseBuilder output has the correct content. CursorResponse response = std::move(swCursorResponse.getValue()); ASSERT_EQ(response.getCursorId(), CursorId(123)); - ASSERT_EQ(response.getNSS().ns(), "db.coll"); + ASSERT_EQ(response.getNSS().ns_forTest(), "db.coll"); ASSERT_EQ(response.getBatch().size(), 1U); ASSERT_BSONOBJ_EQ(response.getBatch()[0], testDoc); ASSERT_EQ(response.getPartialResultsReturned(), true); @@ -317,22 +330,22 @@ TEST(CursorResponseTest, RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); for (bool flagStatus : {false, true}) { - RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", - flagStatus); - rpc::OpMsgReplyBuilder builder; BSONObj okStatus = BSON("ok" << 1); BSONObj testDoc = BSON("_id" << 1); + auto scReply = SerializationContext::stateCommandReply(); + scReply.setTenantIdSource(flagStatus /*nonPrefixedTenantId*/); + BSONObj expectedBody = BSON("cursor" << BSON("firstBatch" << BSON_ARRAY(testDoc) << "partialResultsReturned" << true << "id" << CursorId(123) << "ns" - << NamespaceStringUtil::serialize(nss))); + << NamespaceStringUtil::serialize(nss, scReply))); // Use CursorResponseBuilder to serialize the cursor response to OpMsgReplyBuilder. CursorResponseBuilder crb(&builder, options); crb.append(testDoc); crb.setPartialResultsReturned(true); - crb.done(CursorId(123), nss); + crb.done(CursorId(123), nss, scReply); // Confirm that the resulting BSONObj response matches the expected body. auto msg = builder.done(); @@ -355,7 +368,7 @@ TEST(CursorResponseTest, ASSERT_EQ(response.getPartialResultsReturned(), true); // Re-serialize a BSONObj response from the CursorResponse. - auto cursorResBSON = response.toBSONAsInitialResponse(); + auto cursorResBSON = response.toBSONAsInitialResponse(scReply); // Confirm that the BSON serialized by the CursorResponse is the same as that serialized by // the CursorResponseBuilder. Field ordering differs between the two, so compare @@ -510,7 +523,7 @@ TEST(CursorResponseTest, serializePostBatchResumeToken) { ASSERT_OK(reparsed.getStatus()); CursorResponse reparsedResponse = std::move(reparsed.getValue()); ASSERT_EQ(reparsedResponse.getCursorId(), CursorId(123)); - ASSERT_EQ(reparsedResponse.getNSS().ns(), "db.coll"); + ASSERT_EQ(reparsedResponse.getNSS().ns_forTest(), "db.coll"); ASSERT_EQ(reparsedResponse.getBatch().size(), 2U); ASSERT_BSONOBJ_EQ(*reparsedResponse.getPostBatchResumeToken(), postBatchResumeToken); } diff --git a/src/mongo/db/query/datetime/date_time_support.cpp b/src/mongo/db/query/datetime/date_time_support.cpp index 97d6ab3b7100f..149ede5fbeff8 100644 --- a/src/mongo/db/query/datetime/date_time_support.cpp +++ b/src/mongo/db/query/datetime/date_time_support.cpp @@ -28,21 +28,31 @@ */ -#include "mongo/platform/basic.h" - #include +#include +#include +#include #include #include +#include +#include #include +#include -#include "mongo/db/query/datetime/date_time_support.h" +#include +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/parse_number.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/query/datetime/date_time_support.h" #include "mongo/db/service_context.h" #include "mongo/platform/overflow_arithmetic.h" #include "mongo/util/assert_util.h" #include "mongo/util/ctype.h" +#include "mongo/util/decorable.h" #include "mongo/util/duration.h" #include "mongo/util/str.h" diff --git a/src/mongo/db/query/datetime/date_time_support.h b/src/mongo/db/query/datetime/date_time_support.h index 627b02c04fac9..1129b38202390 100644 --- a/src/mongo/db/query/datetime/date_time_support.h +++ b/src/mongo/db/query/datetime/date_time_support.h @@ -29,12 +29,24 @@ #pragma once +#include +#include +#include +#include +#include #include +#include #include #include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/string_map.h" #include "mongo/util/time_support.h" @@ -47,7 +59,12 @@ struct _timelib_tzinfo; namespace mongo { using namespace std::string_literals; -static constexpr StringData kISOFormatString = "%Y-%m-%dT%H:%M:%S.%LZ"_sd; + +/** + * Default format values for date-times, e.g. for $dateToString aggregations. + */ +static constexpr StringData kIsoFormatStringZ = "%Y-%m-%dT%H:%M:%S.%LZ"_sd; +static constexpr StringData kIsoFormatStringNonZ = "%Y-%m-%dT%H:%M:%S.%L"_sd; /** * A set of standard measures of time used to express a length of time interval. diff --git a/src/mongo/db/query/datetime/date_time_support_test.cpp b/src/mongo/db/query/datetime/date_time_support_test.cpp index eb8794d91c351..63fab59b2a63f 100644 --- a/src/mongo/db/query/datetime/date_time_support_test.cpp +++ b/src/mongo/db/query/datetime/date_time_support_test.cpp @@ -27,14 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include -#include +#include #include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include "mongo/db/query/datetime/date_time_support.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/datetime/init_timezone_data.cpp b/src/mongo/db/query/datetime/init_timezone_data.cpp index a9f8fe97ec806..143138f15fbd1 100644 --- a/src/mongo/db/query/datetime/init_timezone_data.cpp +++ b/src/mongo/db/query/datetime/init_timezone_data.cpp @@ -27,16 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include - +#include #include +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/db/query/datetime/date_time_support.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/query/distinct_command.idl b/src/mongo/db/query/distinct_command.idl index 7dbb262412585..8d898d08ac87f 100644 --- a/src/mongo/db/query/distinct_command.idl +++ b/src/mongo/db/query/distinct_command.idl @@ -31,6 +31,7 @@ global: imports: - "mongo/db/basic_types.idl" + - "mongo/db/query/hint.idl" commands: distinct: @@ -60,3 +61,7 @@ commands: description: "The unique sample id for the operation if it has been chosen for sampling." type: uuid optional: true + hint: + description: "The index name to use or the index specification document." + type: indexHint + default: BSONObj() diff --git a/src/mongo/db/query/explain.cpp b/src/mongo/db/query/explain.cpp index 7d6a4a30b2cdc..fa7f2d5c3edbd 100644 --- a/src/mongo/db/query/explain.cpp +++ b/src/mongo/db/query/explain.cpp @@ -27,45 +27,50 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/explain.h" - -#include "mongo/bson/util/builder.h" -#include "mongo/db/exec/cached_plan.h" -#include "mongo/db/exec/collection_scan.h" -#include "mongo/db/exec/count_scan.h" -#include "mongo/db/exec/distinct_scan.h" -#include "mongo/db/exec/idhack.h" -#include "mongo/db/exec/index_scan.h" -#include "mongo/db/exec/multi_plan.h" -#include "mongo/db/exec/near.h" -#include "mongo/db/exec/sort.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/keypattern.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/curop.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/pipeline/plan_executor_pipeline.h" -#include "mongo/db/query/canonical_query_encoder.h" -#include "mongo/db/query/collection_query_info.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/explain.h" #include "mongo/db/query/explain_common.h" -#include "mongo/db/query/get_executor.h" #include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/plan_cache.h" +#include "mongo/db/query/plan_cache_debug_info.h" #include "mongo/db/query/plan_cache_key_factory.h" +#include "mongo/db/query/plan_enumerator_explain_info.h" #include "mongo/db/query/plan_executor.h" -#include "mongo/db/query/plan_executor_impl.h" -#include "mongo/db/query/plan_executor_sbe.h" +#include "mongo/db/query/plan_explainer_impl.h" +#include "mongo/db/query/plan_ranking_decision.h" #include "mongo/db/query/plan_summary_stats.h" -#include "mongo/db/query/query_planner.h" #include "mongo/db/query/query_settings.h" #include "mongo/db/query/query_settings_decoration.h" -#include "mongo/db/query/stage_builder.h" -#include "mongo/db/server_options.h" +#include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/hex.h" -#include "mongo/util/net/socket_utils.h" -#include "mongo/util/overloaded_visitor.h" -#include "mongo/util/str.h" -#include "mongo/util/version.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo { namespace { @@ -84,10 +89,12 @@ namespace { void generatePlannerInfo(PlanExecutor* exec, const MultipleCollectionAccessor& collections, BSONObj extraInfo, + const SerializationContext& serializationContext, BSONObjBuilder* out) { BSONObjBuilder plannerBob(out->subobjStart("queryPlanner")); - plannerBob.append("namespace", NamespaceStringUtil::serialize(exec->nss())); + plannerBob.append("namespace", + NamespaceStringUtil::serialize(exec->nss(), serializationContext)); // Find whether there is an index filter set for the query shape. The 'indexFilterSet' field // will always be false in the case of EOF or idhack plans. @@ -345,6 +352,7 @@ void Explain::explainStages(PlanExecutor* exec, Status executePlanStatus, boost::optional winningPlanTrialStats, BSONObj extraInfo, + const SerializationContext& serializationContext, const BSONObj& command, BSONObjBuilder* out) { // @@ -355,7 +363,7 @@ void Explain::explainStages(PlanExecutor* exec, out->appendElements(explainVersionToBson(explainer.getVersion())); if (verbosity >= ExplainOptions::Verbosity::kQueryPlanner) { - generatePlannerInfo(exec, collections, extraInfo, out); + generatePlannerInfo(exec, collections, extraInfo, serializationContext, out); } if (verbosity >= ExplainOptions::Verbosity::kExecStats) { @@ -397,6 +405,7 @@ void Explain::explainStages(PlanExecutor* exec, const MultipleCollectionAccessor& collections, ExplainOptions::Verbosity verbosity, BSONObj extraInfo, + const SerializationContext& serializationContext, const BSONObj& command, BSONObjBuilder* out) { auto&& explainer = exec->getPlanExplainer(); @@ -427,6 +436,7 @@ void Explain::explainStages(PlanExecutor* exec, executePlanStatus, winningPlanTrialStats, extraInfo, + serializationContext, command, out); @@ -438,9 +448,16 @@ void Explain::explainStages(PlanExecutor* exec, const CollectionPtr& collection, ExplainOptions::Verbosity verbosity, BSONObj extraInfo, + const SerializationContext& serializationContext, const BSONObj& command, BSONObjBuilder* out) { - explainStages(exec, MultipleCollectionAccessor(collection), verbosity, extraInfo, command, out); + explainStages(exec, + MultipleCollectionAccessor(collection), + verbosity, + extraInfo, + serializationContext, + command, + out); } void Explain::planCacheEntryToBSON(const PlanCacheEntry& entry, BSONObjBuilder* out) { @@ -464,18 +481,8 @@ void Explain::planCacheEntryToBSON(const PlanCacheEntry& entry, BSONObjBuilder* } } - auto explainer = stdx::visit( - OverloadedVisitor{[](const plan_ranker::StatsDetails&) { - return plan_explainer_factory::make(nullptr); - }, - [](const plan_ranker::SBEStatsDetails&) { - return plan_explainer_factory::make(nullptr, nullptr, nullptr); - }}, - debugInfo.decision->stats); - auto plannerStats = - explainer->getCachedPlanStats(debugInfo, ExplainOptions::Verbosity::kQueryPlanner); - auto execStats = - explainer->getCachedPlanStats(debugInfo, ExplainOptions::Verbosity::kExecStats); + auto plannerStats = getCachedPlanStats(debugInfo, ExplainOptions::Verbosity::kQueryPlanner); + auto execStats = getCachedPlanStats(debugInfo, ExplainOptions::Verbosity::kExecStats); invariant(plannerStats.size() > 0); out->append("cachedPlan", plannerStats[0].first); diff --git a/src/mongo/db/query/explain.h b/src/mongo/db/query/explain.h index 6892f3982ba8f..5460748cbc287 100644 --- a/src/mongo/db/query/explain.h +++ b/src/mongo/db/query/explain.h @@ -29,11 +29,20 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/explain_options.h" +#include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/plan_cache.h" #include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_explainer.h" #include "mongo/db/query/sbe_plan_cache.h" +#include "mongo/util/serialization_context.h" namespace mongo { @@ -76,6 +85,7 @@ class Explain { const CollectionPtr& collection, ExplainOptions::Verbosity verbosity, BSONObj extraInfo, + const SerializationContext& serializationContext, const BSONObj& command, BSONObjBuilder* out); @@ -87,6 +97,7 @@ class Explain { const MultipleCollectionAccessor& collections, ExplainOptions::Verbosity verbosity, BSONObj extraInfo, + const SerializationContext& serializationContext, const BSONObj& command, BSONObjBuilder* out); @@ -114,6 +125,7 @@ class Explain { Status executePlanStatus, boost::optional winningPlanTrialStats, BSONObj extraInfo, + const SerializationContext& serializationContext, const BSONObj& command, BSONObjBuilder* out); diff --git a/src/mongo/db/query/explain_common.cpp b/src/mongo/db/query/explain_common.cpp index 74c1eab1ab759..11d9df80e2b8b 100644 --- a/src/mongo/db/query/explain_common.cpp +++ b/src/mongo/db/query/explain_common.cpp @@ -27,13 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/query/explain_common.h" - +#include "mongo/bson/util/builder.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/net/socket_utils.h" +#include "mongo/util/str.h" +#include "mongo/util/synchronized_value.h" #include "mongo/util/version.h" namespace mongo::explain_common { diff --git a/src/mongo/db/query/explain_common.h b/src/mongo/db/query/explain_common.h index 93b735b7997df..146e9a1aff7d0 100644 --- a/src/mongo/db/query/explain_common.h +++ b/src/mongo/db/query/explain_common.h @@ -29,6 +29,8 @@ #pragma once +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" /** diff --git a/src/mongo/db/query/explain_options.cpp b/src/mongo/db/query/explain_options.cpp index dc8a8c1525709..bb006d2360ab3 100644 --- a/src/mongo/db/query/explain_options.cpp +++ b/src/mongo/db/query/explain_options.cpp @@ -27,13 +27,8 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/query/explain_options.h" - #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/idl/command_generic_argument.h" -#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/query/explain_options.h b/src/mongo/db/query/explain_options.h index a2a46ca355ebb..63a29d117dd3a 100644 --- a/src/mongo/db/query/explain_options.h +++ b/src/mongo/db/query/explain_options.h @@ -30,6 +30,7 @@ #pragma once #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/query/explain_verbosity_gen.h" diff --git a/src/mongo/db/query/explain_version_validator.cpp b/src/mongo/db/query/explain_version_validator.cpp index f23c04c7b7d0c..b73a50989dc16 100644 --- a/src/mongo/db/query/explain_version_validator.cpp +++ b/src/mongo/db/query/explain_version_validator.cpp @@ -29,6 +29,11 @@ #include "mongo/db/query/explain_version_validator.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" + namespace mongo::optimizer { Status validateOptimizerExplainVersion(const std::string& value, const boost::optional&) { diff --git a/src/mongo/db/query/explain_version_validator.h b/src/mongo/db/query/explain_version_validator.h index fb56c0d87baa1..f60476d8da8db 100644 --- a/src/mongo/db/query/explain_version_validator.h +++ b/src/mongo/db/query/explain_version_validator.h @@ -29,6 +29,10 @@ #pragma once +#include + +#include + #include "mongo/base/status.h" #include "mongo/db/tenant_id.h" diff --git a/src/mongo/db/query/expression_index.cpp b/src/mongo/db/query/expression_index.cpp index 70e222efa0dc1..0a1097a9fda73 100644 --- a/src/mongo/db/query/expression_index.cpp +++ b/src/mongo/db/query/expression_index.cpp @@ -30,18 +30,30 @@ #include "mongo/db/query/expression_index.h" -#include +#include +#include #include #include #include +#include +#include #include +#include -#include "mongo/db/geo/geoconstants.h" +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/geo/r2_region_coverer.h" #include "mongo/db/hasher.h" -#include "mongo/db/index/expression_params.h" #include "mongo/db/query/expression_index_knobs_gen.h" +#include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/interval.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -77,7 +89,7 @@ std::vector ExpressionMapping::get2dCovering(const R2Region& region, const BSONObj& indexInfoObj, int maxCoveringCells) { auto result = GeoHashConverter::createFromDoc(indexInfoObj); - verify(result.isOK()); // We validated the parameters when creating the index. + MONGO_verify(result.isOK()); // We validated the parameters when creating the index. const auto bits = result.getValue()->getBits(); R2RegionCoverer coverer(std::move(result.getValue())); diff --git a/src/mongo/db/query/expression_index.h b/src/mongo/db/query/expression_index.h index a07f1b12f713c..3ea8b32ab43d2 100644 --- a/src/mongo/db/query/expression_index.h +++ b/src/mongo/db/query/expression_index.h @@ -29,12 +29,17 @@ #pragma once +#include +#include #include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/geo/hash.h" #include "mongo/db/geo/shapes.h" #include "mongo/db/index/s2_common.h" #include "mongo/db/jsobj.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_bounds_builder.h" // For OrderedIntervalList class S2CellId; diff --git a/src/mongo/db/query/expression_walker.h b/src/mongo/db/query/expression_walker.h index 55b4ddf4054ef..67369f7d290ff 100644 --- a/src/mongo/db/query/expression_walker.h +++ b/src/mongo/db/query/expression_walker.h @@ -27,7 +27,7 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#pragma once #include "mongo/db/pipeline/expression_visitor.h" diff --git a/src/mongo/db/query/find.cpp b/src/mongo/db/query/find.cpp index 62d89dd904e50..50c43ac0e2ff5 100644 --- a/src/mongo/db/query/find.cpp +++ b/src/mongo/db/query/find.cpp @@ -28,44 +28,25 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/find.h" - #include +#include +#include -#include "mongo/base/error_codes.h" -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include +#include + +#include "mongo/db/basic_types.h" #include "mongo/db/clientcursor.h" -#include "mongo/db/commands.h" #include "mongo/db/curop.h" -#include "mongo/db/curop_failpoint_helpers.h" -#include "mongo/db/cursor_manager.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/exec/filter.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/keypattern.h" -#include "mongo/db/matcher/extensions_callback_real.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/collection_query_info.h" -#include "mongo/db/query/explain.h" -#include "mongo/db/query/find_common.h" -#include "mongo/db/query/get_executor.h" -#include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/find.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/plan_explainer.h" #include "mongo/db/query/plan_summary_stats.h" -#include "mongo/db/query/query_planner_params.h" -#include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/s/collection_sharding_state.h" -#include "mongo/db/server_options.h" -#include "mongo/db/service_context.h" -#include "mongo/db/stats/resource_consumption_metrics.h" -#include "mongo/db/stats/top.h" -#include "mongo/db/storage/storage_options.h" -#include "mongo/logv2/log.h" -#include "mongo/s/stale_exception.h" +#include "mongo/db/query/query_stats_key_generator.h" #include "mongo/util/fail_point.h" -#include "mongo/util/scopeguard.h" -#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -112,7 +93,7 @@ void endQueryOp(OperationContext* opCtx, auto curOp = CurOp::get(opCtx); // Fill out basic CurOp query exec properties. More metrics (nreturned and executionTime) - // are collected within collectTelemetryMongod. + // are collected within collectQueryStatsMongod. curOp->debug().cursorid = (cursor.has_value() ? cursor->getCursor()->cursorid() : -1); curOp->debug().cursorExhausted = !cursor.has_value(); curOp->debug().additiveMetrics.nBatches = 1; @@ -122,11 +103,12 @@ void endQueryOp(OperationContext* opCtx, auto&& explainer = exec.getPlanExplainer(); explainer.getSummaryStats(&summaryStats); curOp->debug().setPlanSummaryMetrics(summaryStats); + curOp->setEndOfOpMetrics(numResults); if (cursor) { - collectTelemetryMongod(opCtx, *cursor, numResults); + collectQueryStatsMongod(opCtx, *cursor); } else { - collectTelemetryMongod(opCtx, cmdObj, numResults); + collectQueryStatsMongod(opCtx, std::move(curOp->debug().queryStatsKeyGenerator)); } if (collection) { diff --git a/src/mongo/db/query/find.h b/src/mongo/db/query/find.h index 235ebf91ed823..2ee67d9e10af5 100644 --- a/src/mongo/db/query/find.h +++ b/src/mongo/db/query/find.h @@ -29,12 +29,16 @@ #pragma once +#include #include +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/clientcursor.h" #include "mongo/db/dbmessage.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/rpc/message.h" namespace mongo { diff --git a/src/mongo/db/query/find_command.h b/src/mongo/db/query/find_command.h new file mode 100644 index 0000000000000..8faf18e92aee4 --- /dev/null +++ b/src/mongo/db/query/find_command.h @@ -0,0 +1,68 @@ +/** + * Copyright (C) 2018-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include "mongo/db/query/find_command_gen.h" + +namespace mongo { + +class FindCommandRequest : public FindCommandRequestBase { +public: + explicit FindCommandRequest( + NamespaceStringOrUUID nssOrUUID, + boost::optional serializationContext = boost::none) + : FindCommandRequestBase(std::move(nssOrUUID), std::move(serializationContext)) {} + + const NamespaceStringOrUUID& getNamespaceOrUUID() const { + if (_overrideNssOrUUID) { + return _overrideNssOrUUID.value(); + } + + return FindCommandRequestBase::getNamespaceOrUUID(); + } + + void setNss(const NamespaceString& nss) { + _overrideNssOrUUID = NamespaceStringOrUUID{nss}; + } + + static FindCommandRequest parse(const IDLParserContext& ctxt, const BSONObj& bsonObject) { + NamespaceString localNS; + FindCommandRequest object(localNS); + object.parseProtected(ctxt, bsonObject); + return object; + } + +private: + // This value is never serialized, instead we will serialize out the NamespaceStringOrUUID we + // parsed when building the FindCommandRequest. + boost::optional _overrideNssOrUUID; +}; + +} // namespace mongo diff --git a/src/mongo/db/query/find_command.idl b/src/mongo/db/query/find_command.idl index c7a157dc81053..3ad9137ca6133 100644 --- a/src/mongo/db/query/find_command.idl +++ b/src/mongo/db/query/find_command.idl @@ -68,12 +68,11 @@ types: commands: find: - cpp_name: FindCommandRequest + cpp_name: FindCommandRequestBase command_name: find description: "A struct representing the find command" strict: true namespace: concatenate_with_db_or_uuid - non_const_getter: true api_version: "1" access_check: complex: @@ -188,25 +187,32 @@ commands: optional: true stability: stable term: - description: "Deprecated." + description: "Internal usage only. Currently used by the replication system." type: safeInt64 optional: true stability: unstable readOnce: - description: "Deprecated." + description: "Deprecated. Used to inform the server whether this operation intends to + perform reads that do not need to keep data in the storage engine cache." type: optionalBool stability: unstable allowSpeculativeMajorityRead: - description: "Deprecated." + description: "Internal usage only. This flag currently allows change stream queries to use + speculative majority reads so that they can be used even when + enableMajorityReadConcern:false." type: optionalBool stability: unstable $_requestResumeToken: - description: "Deprecated." + description: "Internal usage only. This flag is currently used by the replication system to + request a resumeToken. If the client requested a resume token and we are scanning the + oplog, return timestamp-based tokens. Otherwise, returns generic RecordId-based tokens." cpp_name: requestResumeToken type: optionalBool stability: unstable $_resumeAfter: - description: "Deprecated." + description: "Internal usage only. If present, the collection scan will seek to the exact + RecordId, or return KeyNotFound if it does not exist. Must only be set on forward collection + scans. This field cannot be used in conjunction with 'min' or 'max'." cpp_name: resumeAfter type: object_owned_nonempty_serialize default: mongo::BSONObj() diff --git a/src/mongo/db/query/find_common.cpp b/src/mongo/db/query/find_common.cpp index 4597d9a523372..0302e9a6afab3 100644 --- a/src/mongo/db/query/find_common.cpp +++ b/src/mongo/db/query/find_common.cpp @@ -28,16 +28,28 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/query/find_common.h" +#include +#include +#include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" -#include "mongo/db/curop.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/curop_failpoint_helpers.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/find_common.h" #include "mongo/db/query/query_request_helper.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/query/find_common.h b/src/mongo/db/query/find_common.h index d38d580bdd22c..27e1ad4f9b66c 100644 --- a/src/mongo/db/query/find_common.h +++ b/src/mongo/db/query/find_common.h @@ -27,9 +27,13 @@ * it in the license file. */ +#include + #include "mongo/bson/bsonobj.h" #include "mongo/db/operation_context.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/query/find_common_test.cpp b/src/mongo/db/query/find_common_test.cpp index d7dfc10d9506d..452dcfb3c9c91 100644 --- a/src/mongo/db/query/find_common_test.cpp +++ b/src/mongo/db/query/find_common_test.cpp @@ -27,15 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/query/find_common.h" - -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/query/fle/encrypted_predicate.cpp b/src/mongo/db/query/fle/encrypted_predicate.cpp index 0114b70be84a6..ca7122dab79db 100644 --- a/src/mongo/db/query/fle/encrypted_predicate.cpp +++ b/src/mongo/db/query/fle/encrypted_predicate.cpp @@ -29,11 +29,27 @@ #include "encrypted_predicate.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/crypto/fle_crypto.h" #include "mongo/crypto/fle_crypto_types.h" #include "mongo/db/matcher/expression_array.h" -#include "mongo/db/query/fle/query_rewriter_interface.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/query/fle/encrypted_predicate.h b/src/mongo/db/query/fle/encrypted_predicate.h index 7e8a5205cc9ba..a1fcad7c5c759 100644 --- a/src/mongo/db/query/fle/encrypted_predicate.h +++ b/src/mongo/db/query/fle/encrypted_predicate.h @@ -29,18 +29,30 @@ #pragma once +#include #include - -#include "mongo/base/init.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_crypto_types.h" #include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/fle/query_rewriter_interface.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" /** * This file contains an abstract class that describes rewrites on agg Expressions and diff --git a/src/mongo/db/query/fle/encrypted_predicate_test_fixtures.h b/src/mongo/db/query/fle/encrypted_predicate_test_fixtures.h index d83d5056ba3f7..2ca672e1ff1aa 100644 --- a/src/mongo/db/query/fle/encrypted_predicate_test_fixtures.h +++ b/src/mongo/db/query/fle/encrypted_predicate_test_fixtures.h @@ -73,7 +73,7 @@ class MockServerRewrite : public QueryRewriterInterface { private: boost::intrusive_ptr _expCtx; EncryptedCollScanMode _mode{EncryptedCollScanMode::kUseIfNeeded}; - NamespaceString _mockNss{"mock"_sd}; + NamespaceString _mockNss = NamespaceString::createNamespaceString_forTest("mock"_sd); boost::optional _mockOptionalNss; }; diff --git a/src/mongo/db/query/fle/equality_predicate.cpp b/src/mongo/db/query/fle/equality_predicate.cpp index 3da59874b7a59..37b869b90cd95 100644 --- a/src/mongo/db/query/fle/equality_predicate.cpp +++ b/src/mongo/db/query/fle/equality_predicate.cpp @@ -29,14 +29,35 @@ #include "equality_predicate.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" #include "mongo/crypto/fle_crypto.h" #include "mongo/crypto/fle_tags.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/matcher/expression_expr.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_tree.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/fle/encrypted_predicate.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo::fle { @@ -192,7 +213,7 @@ std::unique_ptr EqualityPredicate::rewriteToRuntimeComparison( */ boost::optional> EqualityPredicate::extractDetailsFromComparison(ExpressionCompare* expr) const { - auto equalitiesList = expr->getChildren(); + auto& equalitiesList = expr->getChildren(); auto leftConstant = dynamic_cast(equalitiesList[0].get()); auto rightConstant = dynamic_cast(equalitiesList[1].get()); diff --git a/src/mongo/db/query/fle/equality_predicate.h b/src/mongo/db/query/fle/equality_predicate.h index e57fcd7f1a965..944a54c781773 100644 --- a/src/mongo/db/query/fle/equality_predicate.h +++ b/src/mongo/db/query/fle/equality_predicate.h @@ -29,8 +29,18 @@ #pragma once +#include +#include +#include +#include + #include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_crypto_types.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/query/fle/encrypted_predicate.h" +#include "mongo/db/query/fle/query_rewriter_interface.h" namespace mongo::fle { /** diff --git a/src/mongo/db/query/fle/equality_predicate_test.cpp b/src/mongo/db/query/fle/equality_predicate_test.cpp index 34fd05d277c1b..1c0f9860e3cbd 100644 --- a/src/mongo/db/query/fle/equality_predicate_test.cpp +++ b/src/mongo/db/query/fle/equality_predicate_test.cpp @@ -27,19 +27,37 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/crypto/fle_crypto.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/matcher/expression_expr.h" #include "mongo/db/matcher/expression_leaf.h" -#include "mongo/db/matcher/expression_tree.h" -#include "mongo/db/matcher/expression_visitor.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/fle/encrypted_predicate_test_fixtures.h" #include "mongo/db/query/fle/equality_predicate.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::fle { namespace { diff --git a/src/mongo/db/query/fle/query_rewriter.cpp b/src/mongo/db/query/fle/query_rewriter.cpp index bf7f37f3c50be..4ae94399fcfaa 100644 --- a/src/mongo/db/query/fle/query_rewriter.cpp +++ b/src/mongo/db/query/fle/query_rewriter.cpp @@ -29,9 +29,23 @@ #include "query_rewriter.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + #include "mongo/db/matcher/expression_expr.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/pipeline/expression_walker.h" #include "mongo/db/query/fle/range_validator.h" +#include "mongo/util/assert_util.h" namespace mongo::fle { @@ -70,10 +84,7 @@ std::unique_ptr QueryRewriter::rewriteExpression(Expression* express boost::optional QueryRewriter::rewriteMatchExpression(const BSONObj& filter) { auto expr = uassertStatusOK(MatchExpressionParser::parse(filter, _expCtx)); - - if (gFeatureFlagFLE2Range.isEnabled(serverGlobalParams.featureCompatibility)) { - validateRanges(*expr.get()); - } + validateRanges(*expr.get()); _rewroteLastExpression = false; if (auto res = _rewrite(expr.get())) { diff --git a/src/mongo/db/query/fle/query_rewriter.h b/src/mongo/db/query/fle/query_rewriter.h index c200df79cede4..87f298d39b41c 100644 --- a/src/mongo/db/query/fle/query_rewriter.h +++ b/src/mongo/db/query/fle/query_rewriter.h @@ -29,12 +29,22 @@ #pragma once +#include +#include +#include + #include "mongo/bson/bsonobj.h" #include "mongo/crypto/fle_crypto.h" #include "mongo/db/fle_crud.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/fle/encrypted_predicate.h" #include "mongo/db/query/fle/query_rewriter_interface.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::fle { /** diff --git a/src/mongo/db/query/fle/query_rewriter_test.cpp b/src/mongo/db/query/fle/query_rewriter_test.cpp index 7c985cc22f653..eeb9e3f566ef1 100644 --- a/src/mongo/db/query/fle/query_rewriter_test.cpp +++ b/src/mongo/db/query/fle/query_rewriter_test.cpp @@ -29,13 +29,24 @@ #include +#include +#include +#include #include "query_rewriter.h" +#include +#include +#include +#include +#include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" -#include "mongo/bson/bsonmisc.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/crypto/fle_crypto_types.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/expression.h" @@ -43,9 +54,10 @@ #include "mongo/db/query/fle/encrypted_predicate.h" #include "mongo/db/query/fle/encrypted_predicate_test_fixtures.h" #include "mongo/db/query/fle/query_rewriter_interface.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo { @@ -277,7 +289,7 @@ class FLEServerRewriteTest : public unittest::Test { std::unique_ptr _mock; fle::ExpressionToRewriteMap _agg; fle::MatchTypeToRewriteMap _match; - NamespaceString _mockNss{"mock"_sd}; + NamespaceString _mockNss = NamespaceString::createNamespaceString_forTest("mock"_sd); }; #define ASSERT_MATCH_EXPRESSION_REWRITE(input, expected) \ diff --git a/src/mongo/db/query/fle/range_predicate.cpp b/src/mongo/db/query/fle/range_predicate.cpp index ab271cc24b67e..9ad70c9837aa5 100644 --- a/src/mongo/db/query/fle/range_predicate.cpp +++ b/src/mongo/db/query/fle/range_predicate.cpp @@ -29,27 +29,35 @@ #include "range_predicate.h" +#include +#include +#include +#include +#include #include +#include + +#include -#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/crypto/fle_crypto.h" #include "mongo/crypto/fle_tags.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/expression_expr.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/fle/encrypted_predicate.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::fle { -REGISTER_ENCRYPTED_MATCH_PREDICATE_REWRITE_WITH_FLAG(GT, RangePredicate, gFeatureFlagFLE2Range); -REGISTER_ENCRYPTED_MATCH_PREDICATE_REWRITE_WITH_FLAG(GTE, RangePredicate, gFeatureFlagFLE2Range); -REGISTER_ENCRYPTED_MATCH_PREDICATE_REWRITE_WITH_FLAG(LT, RangePredicate, gFeatureFlagFLE2Range); -REGISTER_ENCRYPTED_MATCH_PREDICATE_REWRITE_WITH_FLAG(LTE, RangePredicate, gFeatureFlagFLE2Range); +REGISTER_ENCRYPTED_MATCH_PREDICATE_REWRITE(GT, RangePredicate); +REGISTER_ENCRYPTED_MATCH_PREDICATE_REWRITE(GTE, RangePredicate); +REGISTER_ENCRYPTED_MATCH_PREDICATE_REWRITE(LT, RangePredicate); +REGISTER_ENCRYPTED_MATCH_PREDICATE_REWRITE(LTE, RangePredicate); -REGISTER_ENCRYPTED_AGG_PREDICATE_REWRITE_WITH_FLAG(ExpressionCompare, - RangePredicate, - gFeatureFlagFLE2Range); +REGISTER_ENCRYPTED_AGG_PREDICATE_REWRITE(ExpressionCompare, RangePredicate); namespace { // Validate the range operator passed in and return the fieldpath and payload for the rewrite. If diff --git a/src/mongo/db/query/fle/range_predicate.h b/src/mongo/db/query/fle/range_predicate.h index 865905c986387..96c2dbc25959f 100644 --- a/src/mongo/db/query/fle/range_predicate.h +++ b/src/mongo/db/query/fle/range_predicate.h @@ -29,8 +29,21 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_crypto_types.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/query/fle/encrypted_predicate.h" +#include "mongo/db/query/fle/query_rewriter_interface.h" namespace mongo::fle { /** diff --git a/src/mongo/db/query/fle/range_predicate_test.cpp b/src/mongo/db/query/fle/range_predicate_test.cpp index 3495d5b0365d0..0f4d2fe9774b4 100644 --- a/src/mongo/db/query/fle/range_predicate_test.cpp +++ b/src/mongo/db/query/fle/range_predicate_test.cpp @@ -27,19 +27,37 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include +#include + #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/crypto/fle_crypto.h" #include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/matcher/expression_expr.h" #include "mongo/db/matcher/expression_leaf.h" -#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/fle/encrypted_predicate.h" #include "mongo/db/query/fle/encrypted_predicate_test_fixtures.h" #include "mongo/db/query/fle/range_predicate.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo::fle { namespace { @@ -111,8 +129,6 @@ class RangePredicateRewriteTest : public EncryptedPredicateRewriteTest { }; TEST_F(RangePredicateRewriteTest, MatchRangeRewrite_NoStub) { - RAIIServerParameterControllerForTest controller("featureFlagFLE2Range", true); - std::vector allTags = {{1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}}; auto expCtx = make_intrusive(); @@ -126,8 +142,6 @@ TEST_F(RangePredicateRewriteTest, MatchRangeRewrite_NoStub) { } TEST_F(RangePredicateRewriteTest, MatchRangeRewrite_Stub) { - RAIIServerParameterControllerForTest controller("featureFlagFLE2Range", true); - auto expCtx = make_intrusive(); auto payload = fromjson("{x: [1, 2, 3, 4, 5, 6, 7, 8, 9]}"); @@ -159,8 +173,6 @@ TEST_F(RangePredicateRewriteTest, MatchRangeRewrite_Stub) { } TEST_F(RangePredicateRewriteTest, AggRangeRewrite_Stub) { - RAIIServerParameterControllerForTest controller("featureFlagFLE2Range", true); - auto ops = {"$gt", "$lt", "$gte", "$lte"}; for (auto& op : ops) { auto input = fromjson(str::stream() << "{" << op << ": [\"$age\", {$literal: [1, 2, 3]}]}"); diff --git a/src/mongo/db/query/fle/range_validator.cpp b/src/mongo/db/query/fle/range_validator.cpp index 0151c2e3052b7..c9a2cea245622 100644 --- a/src/mongo/db/query/fle/range_validator.cpp +++ b/src/mongo/db/query/fle/range_validator.cpp @@ -28,13 +28,31 @@ */ #include "range_validator.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/crypto/fle_crypto.h" #include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_tree.h" #include "mongo/db/query/fle/encrypted_predicate.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/util/assert_util.h" -#include +#include "mongo/util/str.h" namespace mongo { namespace fle { diff --git a/src/mongo/db/query/fle/range_validator.h b/src/mongo/db/query/fle/range_validator.h index 7a6c0c0153bfc..fac0d8398b006 100644 --- a/src/mongo/db/query/fle/range_validator.h +++ b/src/mongo/db/query/fle/range_validator.h @@ -28,10 +28,13 @@ */ #pragma once +#include "mongo/bson/util/builder_fwd.h" #include "mongo/crypto/fle_crypto.h" #include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/util/assert_util.h" + namespace mongo { // This overload is used when building validation error messages. inline StringBuilder& operator<<(StringBuilder& os, const Fle2RangeOperator& op) { diff --git a/src/mongo/db/query/fle/range_validator_test.cpp b/src/mongo/db/query/fle/range_validator_test.cpp index ee238b9ee2628..f3d576f86bd83 100644 --- a/src/mongo/db/query/fle/range_validator_test.cpp +++ b/src/mongo/db/query/fle/range_validator_test.cpp @@ -26,16 +26,36 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ +#include +#include +#include +#include +#include +#include + #include "encrypted_predicate_test_fixtures.h" +#include "range_validator.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" #include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_crypto_types.h" #include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/unittest/assert.h" -#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/framework.h" -#include "range_validator.h" -#include +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo::fle { class RangeValidatorTest : public unittest::Test { diff --git a/src/mongo/db/query/fle/server_rewrite.cpp b/src/mongo/db/query/fle/server_rewrite.cpp index 7626e9f4518db..fb02e97857282 100644 --- a/src/mongo/db/query/fle/server_rewrite.cpp +++ b/src/mongo/db/query/fle/server_rewrite.cpp @@ -30,32 +30,44 @@ #include "mongo/db/query/fle/server_rewrite.h" +#include +#include +#include #include - -#include "mongo/bson/bsonmisc.h" +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/bsontypes.h" #include "mongo/crypto/encryption_fields_gen.h" #include "mongo/crypto/fle_crypto.h" #include "mongo/crypto/fle_field_schema_gen.h" -#include "mongo/crypto/fle_tags.h" #include "mongo/db/fle_crud.h" -#include "mongo/db/matcher/expression_expr.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_geo_near.h" #include "mongo/db/pipeline/document_source_graph_lookup.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/query/fle/encrypted_predicate.h" #include "mongo/db/query/fle/query_rewriter.h" #include "mongo/db/service_context.h" -#include "mongo/logv2/log.h" -#include "mongo/s/grid.h" -#include "mongo/s/transaction_router_resource_yielder.h" +#include "mongo/db/transaction/transaction_api.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/util/assert_util.h" +#include "mongo/util/future.h" #include "mongo/util/intrusive_counter.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/query/fle/server_rewrite.h b/src/mongo/db/query/fle/server_rewrite.h index 61db414c32fd5..fc36879c444f5 100644 --- a/src/mongo/db/query/fle/server_rewrite.h +++ b/src/mongo/db/query/fle/server_rewrite.h @@ -29,17 +29,23 @@ #pragma once -#include - #include +#include #include "mongo/bson/bsonobj.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/fle_crud.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/count_command_gen.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/fle/query_rewriter_interface.h" #include "mongo/db/transaction/transaction_api.h" @@ -48,6 +54,7 @@ */ namespace mongo { class FLETagQueryInterface; + namespace fle { diff --git a/src/mongo/db/query/framework_control.cpp b/src/mongo/db/query/framework_control.cpp index 41a74540e85c1..15aa0a2140ec9 100644 --- a/src/mongo/db/query/framework_control.cpp +++ b/src/mongo/db/query/framework_control.cpp @@ -27,11 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/query_feature_flags_gen.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/synchronized_value.h" namespace mongo { @@ -49,34 +62,29 @@ Status QueryFrameworkControl::setFromString(StringData value, const boost::optio // To enable Bonsai, the feature flag must be enabled. Here, we return an error to the user if // they try to set the framework control knob to use Bonsai while the feature flag is disabled. // - // Note that we only check if the feature flag is enabled ignoring FCV. If, for example, the FCV - // is not initialized, then we don't want to fail here. - // // The feature flag should be initialized by this point because // server_options_detail::applySetParameterOptions(std::map ...) // handles setParameters in alphabetical order, so "feature" comes before "internal". - // (Ignore FCV check): This is intentional because we always want to use this feature once the - // feature flag is enabled. - bool enabledWithoutFCV = - feature_flags::gFeatureFlagCommonQueryFramework.isEnabledAndIgnoreFCVUnsafe(); switch (newVal) { case QueryFrameworkControlEnum::kForceClassicEngine: case QueryFrameworkControlEnum::kTrySbeEngine: break; case QueryFrameworkControlEnum::kTryBonsai: - if (enabledWithoutFCV) { + if (feature_flags::gFeatureFlagCommonQueryFramework.isEnabled( + serverGlobalParams.featureCompatibility)) { break; } return {ErrorCodes::IllegalOperation, "featureFlagCommonQueryFramework must be enabled to run with tryBonsai"}; + case QueryFrameworkControlEnum::kTryBonsaiExperimental: case QueryFrameworkControlEnum::kForceBonsai: - if (enabledWithoutFCV && getTestCommandsEnabled()) { + if (getTestCommandsEnabled()) { break; } - return {ErrorCodes::IllegalOperation, - "featureFlagCommonQueryFramework and testCommands must be enabled to run with " - "forceBonsai"}; + return { + ErrorCodes::IllegalOperation, + "testCommands must be enabled to run with tryBonsaiExperimental or forceBonsai"}; } _data = std::move(newVal); diff --git a/src/mongo/db/query/get_executor.cpp b/src/mongo/db/query/get_executor.cpp index ba526bb6c72ec..b63b33523622a 100644 --- a/src/mongo/db/query/get_executor.cpp +++ b/src/mongo/db/query/get_executor.cpp @@ -29,60 +29,111 @@ #include "mongo/db/query/get_executor.h" +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include +#include +#include +#include +#include +#include +#include #include "mongo/base/error_codes.h" -#include "mongo/base/parse_number.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" -#include "mongo/db/exec/bucket_unpacker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/batched_delete_stage.h" #include "mongo/db/exec/cached_plan.h" -#include "mongo/db/exec/collection_scan.h" #include "mongo/db/exec/count.h" +#include "mongo/db/exec/delete_stage.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/eof.h" #include "mongo/db/exec/idhack.h" +#include "mongo/db/exec/index_path_projection.h" #include "mongo/db/exec/multi_plan.h" #include "mongo/db/exec/plan_cache_util.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/projection.h" #include "mongo/db/exec/projection_executor_utils.h" #include "mongo/db/exec/record_store_fast_count.h" #include "mongo/db/exec/return_key.h" -#include "mongo/db/exec/sbe/stages/co_scan.h" -#include "mongo/db/exec/sbe/stages/limit_skip.h" +#include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/exec/shard_filter.h" #include "mongo/db/exec/sort_key_generator.h" +#include "mongo/db/exec/spool.h" #include "mongo/db/exec/subplan.h" +#include "mongo/db/exec/timeseries/bucket_unpacker.h" #include "mongo/db/exec/timeseries_modify.h" -#include "mongo/db/exec/unpack_timeseries_bucket.h" +#include "mongo/db/exec/timeseries_upsert.h" +#include "mongo/db/exec/update_stage.h" #include "mongo/db/exec/upsert_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/field_ref.h" #include "mongo/db/index/columns_access_method.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_metadata_access_stats.h" #include "mongo/db/index/wildcard_access_method.h" #include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback.h" #include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/matcher/extensions_callback_real.h" #include "mongo/db/ops/delete_request_gen.h" -#include "mongo/db/pipeline/document_source_lookup.h" -#include "mongo/db/query/bind_input_params.h" +#include "mongo/db/ops/update_request.h" +#include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/document_source_group.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/inner_pipeline_stage_interface.h" #include "mongo/db/query/canonical_query.h" -#include "mongo/db/query/canonical_query_encoder.h" #include "mongo/db/query/classic_plan_cache.h" -#include "mongo/db/query/collation/collation_index_key.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collection_query_info.h" #include "mongo/db/query/cqf_command_utils.h" #include "mongo/db/query/cqf_get_executor.h" -#include "mongo/db/query/explain.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_bounds_builder.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/interval.h" +#include "mongo/db/query/interval_evaluation_tree.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/explain_interface.h" +#include "mongo/db/query/plan_cache.h" #include "mongo/db/query/plan_cache_key_factory.h" #include "mongo/db/query/plan_executor_factory.h" -#include "mongo/db/query/planner_access.h" +#include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/plan_explainer_factory.h" +#include "mongo/db/query/plan_yield_policy_sbe.h" #include "mongo/db/query/planner_analysis.h" #include "mongo/db/query/planner_ixselect.h" #include "mongo/db/query/planner_wildcard_helpers.h" +#include "mongo/db/query/projection.h" #include "mongo/db/query/projection_parser.h" +#include "mongo/db/query/projection_policies.h" #include "mongo/db/query/query_feature_flags_gen.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner.h" @@ -93,28 +144,45 @@ #include "mongo/db/query/query_utils.h" #include "mongo/db/query/sbe_cached_solution_planner.h" #include "mongo/db/query/sbe_multi_planner.h" +#include "mongo/db/query/sbe_plan_cache.h" +#include "mongo/db/query/sbe_runtime_planner.h" +#include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/query/sbe_sub_planner.h" #include "mongo/db/query/stage_builder_util.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/query/util/make_data_structure.h" #include "mongo/db/query/wildcard_multikey_paths.h" #include "mongo/db/query/yield_policy_callbacks_impl.h" -#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" #include "mongo/db/service_context.h" #include "mongo/db/stats/counters.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_options.h" -#include "mongo/db/timeseries/timeseries_options.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/timeseries/timeseries_update_delete_util.h" +#include "mongo/db/update/update_driver.h" +#include "mongo/db/yieldable.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/shard_key_pattern_query_util.h" -#include "mongo/scripting/engine.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/processinfo.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" -#include "mongo/util/tick_source.h" -#include "mongo/util/timer.h" +#include "mongo/util/synchronized_value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -224,8 +292,8 @@ IndexEntry indexEntryFromIndexCatalogEntry(OperationContext* opCtx, const WildcardProjection* wildcardProjection = nullptr; std::set multikeyPathSet; if (desc->getIndexType() == IndexType::INDEX_WILDCARD) { - auto wam = static_cast(accessMethod); - wildcardProjection = wam->getWildcardProjection(); + wildcardProjection = + static_cast(accessMethod)->getWildcardProjection(); if (isMultikey) { MultikeyMetadataAccessStats mkAccessStats; @@ -241,9 +309,9 @@ IndexEntry indexEntryFromIndexCatalogEntry(OperationContext* opCtx, } multikeyPathSet = - getWildcardMultikeyPathSet(wam, opCtx, projectedFields, &mkAccessStats); + getWildcardMultikeyPathSet(opCtx, &ice, projectedFields, &mkAccessStats); } else { - multikeyPathSet = getWildcardMultikeyPathSet(wam, opCtx, &mkAccessStats); + multikeyPathSet = getWildcardMultikeyPathSet(opCtx, &ice, &mkAccessStats); } LOGV2_DEBUG(20920, @@ -534,7 +602,8 @@ bool shouldWaitForOplogVisibility(OperationContext* opCtx, // to wait for the oplog visibility timestamp to be updated, it would wait for a replication // batch that would never complete because it couldn't reacquire its own lock, the global lock // held by the waiting reader. - return repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx, "admin"); + return repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase( + opCtx, DatabaseName::kAdmin); } namespace { @@ -767,7 +836,7 @@ class PrepareExecutionHelper { // Tailable: If the query requests tailable the collection must be capped. if (_cq->getFindCommandRequest().getTailable() && !mainColl->isCapped()) { return Status(ErrorCodes::BadValue, - str::stream() << "error processing query: " << _cq->toString() + str::stream() << "error processing query: " << _cq->toStringForErrorMsg() << " tailable cursor requested on non capped collection"); } @@ -799,7 +868,7 @@ class PrepareExecutionHelper { if (!statusWithMultiPlanSolns.isOK()) { return statusWithMultiPlanSolns.getStatus().withContext( - str::stream() << "error processing query: " << _cq->toString() + str::stream() << "error processing query: " << _cq->toStringForErrorMsg() << " planner returned error"); } @@ -944,7 +1013,7 @@ class ClassicPrepareExecutionHelper final false /* DeferExecutionTreeGeneration */> { public: ClassicPrepareExecutionHelper(OperationContext* opCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, WorkingSet* ws, CanonicalQuery* cq, PlanYieldPolicy* yieldPolicy, @@ -954,7 +1023,7 @@ class ClassicPrepareExecutionHelper final _ws{ws} {} const CollectionPtr& getMainCollection() const override { - return _collection; + return _collection.getCollectionPtr(); } protected: @@ -963,10 +1032,11 @@ class ClassicPrepareExecutionHelper final } std::unique_ptr buildIdHackPlan() { - if (!isIdHackEligibleQuery(_collection, *_cq)) + if (!isIdHackEligibleQuery(getMainCollection(), *_cq)) return nullptr; - const IndexDescriptor* descriptor = _collection->getIndexCatalog()->findIdIndex(_opCtx); + const IndexDescriptor* descriptor = + getMainCollection()->getIndexCatalog()->findIdIndex(_opCtx); if (!descriptor) return nullptr; @@ -981,14 +1051,13 @@ class ClassicPrepareExecutionHelper final // Might have to filter out orphaned docs. if (_plannerParams.options & QueryPlannerParams::INCLUDE_SHARD_FILTER) { + auto shardFilterer = _collection.getShardingFilter(_opCtx); + invariant(shardFilterer, + "Attempting to use shard filter when there's no shard filter available for " + "the collection"); + stage = std::make_unique( - _cq->getExpCtxRaw(), - CollectionShardingState::assertCollectionLockedAndAcquire(_opCtx, _cq->nss()) - ->getOwnershipFilter( - _opCtx, - CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup), - _ws, - std::move(stage)); + _cq->getExpCtxRaw(), std::move(*shardFilterer), _ws, std::move(stage)); } const auto* cqProjection = _cq->getProj(); @@ -1038,7 +1107,7 @@ class ClassicPrepareExecutionHelper final } PlanCacheKey buildPlanCacheKey() const { - return plan_cache_key_factory::make(*_cq, _collection); + return plan_cache_key_factory::make(*_cq, getMainCollection()); } std::unique_ptr buildCachedPlan( @@ -1056,7 +1125,7 @@ class ClassicPrepareExecutionHelper final if (shouldCacheQuery(*_cq)) { // Try to look up a cached solution for the query. - if (auto cs = CollectionQueryInfo::get(_collection) + if (auto cs = CollectionQueryInfo::get(getMainCollection()) .getPlanCache() ->getCacheEntryIfActive(planCacheKey)) { planCacheCounters.incrementClassicHitsCounter(); @@ -1127,7 +1196,7 @@ class ClassicPrepareExecutionHelper final } private: - const CollectionPtr& _collection; + VariantCollectionPtrOrAcquisition _collection; WorkingSet* _ws; }; @@ -1224,7 +1293,7 @@ class SlotBasedPrepareExecutionHelper final StatusWith> getClassicExecutor( OperationContext* opCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, std::unique_ptr canonicalQuery, PlanYieldPolicy::YieldPolicy yieldPolicy, const QueryPlannerParams& plannerParams) { @@ -1246,7 +1315,7 @@ StatusWith> getClassicExecu return plan_executor_factory::make(std::move(canonicalQuery), std::move(ws), std::move(root), - &collection, + collection, yieldPolicy, plannerParams.options, {}, @@ -1303,7 +1372,7 @@ std::unique_ptr makeRuntimePlannerIfNeeded( // we will need to do the runtime planning to check if the cached plan still // performs efficiently, or requires re-planning. tassert(6693503, "PlanStageData must be present", planStageData); - const bool hasHashLookup = !planStageData->foreignHashJoinCollections.empty(); + const bool hasHashLookup = !planStageData->staticData->foreignHashJoinCollections.empty(); if (decisionWorks || hasHashLookup) { QueryPlannerParams plannerParams; plannerParams.options = plannerOptions; @@ -1317,10 +1386,11 @@ std::unique_ptr makeRuntimePlannerIfNeeded( std::unique_ptr makeSbeYieldPolicy( OperationContext* opCtx, - PlanYieldPolicy::YieldPolicy requestedYieldPolicy, - const Yieldable* yieldable, + PlanYieldPolicy::YieldPolicy policy, + stdx::variant yieldable, NamespaceString nss) { - return std::make_unique(requestedYieldPolicy, + return std::make_unique(opCtx, + policy, opCtx->getServiceContext()->getFastClockSource(), internalQueryExecYieldIterations.load(), Milliseconds{internalQueryExecYieldPeriodMS.load()}, @@ -1414,34 +1484,7 @@ StatusWith> getSlotBasedExe std::move(yieldPolicy), planningResult->isRecoveredFromPlanCache(), false /* generatedByBonsai */); -} - -/** - * Checks if the result of query planning is SBE compatible. If any of the query solutions in - * 'planningResult' cannot currently be compiled to an SBE plan via the SBE stage builders, then we - * will fall back to the classic engine. - */ -bool shouldPlanningResultUseSbe(const SlotBasedPrepareExecutionResult& planningResult) { - // If we have an entry in the SBE plan cache, then we can use SBE. - if (planningResult.isRecoveredFromPlanCache()) { - return true; - } - - const auto& solutions = planningResult.solutions(); - if (solutions.empty()) { - // Query needs subplanning (plans are generated later, we don't have access yet). We can - // proceed with using SBE in this case. - invariant(planningResult.needsSubplanning()); - return true; - } - - // Check that all query solutions are SBE compatible. - return std::all_of(solutions.begin(), solutions.end(), [](const auto& solution) { - // We must have a solution, otherwise we would have early exited. - invariant(solution->root()); - return isQueryPlanSbeCompatible(solution.get()); - }); -} +} // getSlotBasedExecutor /** * Function which returns true if 'cq' uses features that are currently supported in SBE without @@ -1457,7 +1500,21 @@ bool shouldUseRegularSbe(const CanonicalQuery& cq) { // The 'ExpressionContext' may indicate that there are expressions which are only supported in // SBE when 'featureFlagSbeFull' is set, or fully supported regardless of the value of the // feature flag. This function should only return true in the latter case. - return cq.getExpCtx()->sbeCompatibility == SbeCompatibility::fullyCompatible; + if (cq.getExpCtx()->sbeCompatibility != SbeCompatibility::fullyCompatible) { + return false; + } + for (const auto& stage : cq.pipeline()) { + if (auto groupStage = dynamic_cast(stage->documentSource())) { + // Group stage wouldn't be pushed down if it's not supported in SBE. + tassert(7548611, + "Unexpected SBE compatibility value", + groupStage->sbeCompatibility() != SbeCompatibility::notCompatible); + if (groupStage->sbeCompatibility() != SbeCompatibility::fullyCompatible) { + return false; + } + } + } + return true; } /** @@ -1495,11 +1552,14 @@ attemptToGetSlotBasedExecutor( // SBE-compatible query using SBE, even if the query uses features that are not on in SBE by // default. Either way, try to construct an SBE plan executor. if (canUseRegularSbe || sbeFull) { - // Create the SBE prepare execution helper and initialize the params for the planner. If - // planning results in any 'QuerySolution' which cannot be handled by the SBE stage builder, - // then we will fall back to the classic engine. - auto sbeYieldPolicy = makeSbeYieldPolicy( - opCtx, yieldPolicy, &collections.getMainCollection(), canonicalQuery->nss()); + stdx::variant yieldable; + if (collections.isAcquisition()) { + yieldable = PlanYieldPolicy::YieldThroughAcquisitions{}; + } else { + yieldable = &collections.getMainCollection(); + } + auto sbeYieldPolicy = + makeSbeYieldPolicy(opCtx, yieldPolicy, yieldable, canonicalQuery->nss()); SlotBasedPrepareExecutionHelper helper{ opCtx, collections, canonicalQuery.get(), sbeYieldPolicy.get(), plannerParams.options}; auto planningResultWithStatus = helper.prepare(); @@ -1507,24 +1567,22 @@ attemptToGetSlotBasedExecutor( return planningResultWithStatus.getStatus(); } - if (shouldPlanningResultUseSbe(*planningResultWithStatus.getValue())) { - if (extractAndAttachPipelineStages) { - // We know now that we will use SBE, so we need to remove the pushed-down stages - // from the original pipeline object. - extractAndAttachPipelineStages(canonicalQuery.get(), false /* attachOnly */); - } - auto statusWithExecutor = - getSlotBasedExecutor(opCtx, - collections, - std::move(canonicalQuery), - std::move(sbeYieldPolicy), - plannerParams, - std::move(planningResultWithStatus.getValue())); - if (statusWithExecutor.isOK()) { - return std::move(statusWithExecutor.getValue()); - } else { - return statusWithExecutor.getStatus(); - } + if (extractAndAttachPipelineStages) { + // Given that we are using SBE, we need to remove the pushed-down stages + // from the original pipeline object. + extractAndAttachPipelineStages(canonicalQuery.get(), false /* attachOnly */); + } + auto statusWithExecutor = + getSlotBasedExecutor(opCtx, + collections, + std::move(canonicalQuery), + std::move(sbeYieldPolicy), + plannerParams, + std::move(planningResultWithStatus.getValue())); + if (statusWithExecutor.isOK()) { + return std::move(statusWithExecutor.getValue()); + } else { + return statusWithExecutor.getStatus(); } } @@ -1565,8 +1623,8 @@ StatusWith> getExecutor( ServerParameterSet::getNodeParameterSet()->get( "internalQueryFrameworkControl"); tassert(7319400, - "Optimization failed either without tryBonsai set, or without a hint.", - queryControl->_data.get() == QueryFrameworkControlEnum::kTryBonsai && + "Optimization failed either with forceBonsai set, or without a hint.", + queryControl->_data.get() != QueryFrameworkControlEnum::kForceBonsai && !canonicalQuery->getFindCommandRequest().getHint().isEmpty() && !fastIndexNullHandling); } @@ -1603,8 +1661,16 @@ StatusWith> getExecutor( } // Ensure that 'sbeCompatible' is set accordingly. canonicalQuery->setSbeCompatible(false); - return getClassicExecutor( - opCtx, mainColl, std::move(canonicalQuery), yieldPolicy, plannerParams); + if (collections.isAcquisition()) { + return getClassicExecutor(opCtx, + collections.getMainAcquisition(), + std::move(canonicalQuery), + yieldPolicy, + plannerParams); + } else { + return getClassicExecutor( + opCtx, &mainColl, std::move(canonicalQuery), yieldPolicy, plannerParams); + } }(); if (exec.isOK()) { stdx::lock_guard lk(*opCtx->getClient()); @@ -1615,19 +1681,21 @@ StatusWith> getExecutor( StatusWith> getExecutor( OperationContext* opCtx, - const CollectionPtr* collection, + VariantCollectionPtrOrAcquisition coll, std::unique_ptr canonicalQuery, std::function extractAndAttachPipelineStages, PlanYieldPolicy::YieldPolicy yieldPolicy, size_t plannerOptions) { - MultipleCollectionAccessor multi{collection}; - return getExecutor(opCtx, - multi, - std::move(canonicalQuery), - std::move(extractAndAttachPipelineStages), - yieldPolicy, - QueryPlannerParams{plannerOptions}); + return getExecutor( + opCtx, + stdx::holds_alternative(coll.get()) + ? MultipleCollectionAccessor{stdx::get(coll.get())} + : MultipleCollectionAccessor{coll.getCollectionPtr()}, + std::move(canonicalQuery), + std::move(extractAndAttachPipelineStages), + yieldPolicy, + QueryPlannerParams{plannerOptions}); } // @@ -1642,9 +1710,8 @@ StatusWith> getExecutorFind bool permitYield, QueryPlannerParams plannerParams) { - auto yieldPolicy = (permitYield && !opCtx->inMultiDocumentTransaction()) - ? PlanYieldPolicy::YieldPolicy::YIELD_AUTO - : PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY; + auto yieldPolicy = permitYield ? PlanYieldPolicy::YieldPolicy::YIELD_AUTO + : PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY; if (OperationShardingState::isComingFromRouter(opCtx)) { plannerParams.options |= QueryPlannerParams::INCLUDE_SHARD_FILTER; @@ -1725,11 +1792,11 @@ StatusWith> makeProjection(const BSO StatusWith> getExecutorDelete( OpDebug* opDebug, - const CollectionPtr* coll, + CollectionAcquisition coll, ParsedDelete* parsedDelete, - boost::optional verbosity, - DeleteStageParams::DocumentCounter&& documentCounter) { - const auto& collection = *coll; + boost::optional verbosity) { + const auto& collectionPtr = coll.getCollectionPtr(); + auto expCtx = parsedDelete->expCtx(); OperationContext* opCtx = expCtx->opCtx; const DeleteRequest* request = parsedDelete->getRequest(); @@ -1743,18 +1810,11 @@ StatusWith> getExecutorDele } } - if (collection && collection->isCapped()) { + if (collectionPtr && collectionPtr->isCapped()) { expCtx->setIsCappedDelete(); } - // If the parsed delete does not have a user-specified collation, set it from the collection - // default. - if (collection && parsedDelete->getRequest()->getCollation().isEmpty() && - collection->getDefaultCollator()) { - parsedDelete->setCollator(collection->getDefaultCollator()->clone()); - } - - if (collection && collection->isCapped() && opCtx->inMultiDocumentTransaction()) { + if (collectionPtr && collectionPtr->isCapped() && opCtx->inMultiDocumentTransaction()) { // This check is duplicated from collection_internal::deleteDocument() for two reasons: // - Performing a remove on an empty capped collection would not call // collection_internal::deleteDocument(). @@ -1763,7 +1823,7 @@ StatusWith> getExecutorDele ErrorCodes::IllegalOperation, str::stream() << "Cannot remove from a capped collection in a multi-document transaction: " - << nss.ns()); + << nss.toStringForErrorMsg()); } bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() && @@ -1771,7 +1831,8 @@ StatusWith> getExecutorDele if (userInitiatedWritesAndNotPrimary) { return Status(ErrorCodes::PrimarySteppedDown, - str::stream() << "Not primary while removing from " << nss.ns()); + str::stream() + << "Not primary while removing from " << nss.toStringForErrorMsg()); } auto deleteStageParams = std::make_unique(); @@ -1782,12 +1843,17 @@ StatusWith> getExecutorDele deleteStageParams->sort = request->getSort(); deleteStageParams->opDebug = opDebug; deleteStageParams->stmtId = request->getStmtId(); - deleteStageParams->numStatsForDoc = std::move(documentCounter); + + if (parsedDelete->isRequestToTimeseries() && + !parsedDelete->isEligibleForArbitraryTimeseriesDelete()) { + deleteStageParams->numStatsForDoc = timeseries::numMeasurementsForBucketCounter( + collectionPtr->getTimeseriesOptions()->getTimeField()); + } std::unique_ptr ws = std::make_unique(); const auto policy = parsedDelete->yieldPolicy(); - if (!collection) { + if (!collectionPtr) { // Treat collections that do not exist as empty collections. Return a PlanExecutor which // contains an EOF stage. LOGV2_DEBUG(20927, @@ -1798,7 +1864,7 @@ StatusWith> getExecutorDele return plan_executor_factory::make(expCtx, std::move(ws), std::make_unique(expCtx.get()), - &CollectionPtr::null, + coll, policy, false, /* whether we must return owned data */ nss); @@ -1812,7 +1878,8 @@ StatusWith> getExecutorDele // create a CanonicalQuery. const BSONObj& unparsedQuery = request->getQuery(); - const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(opCtx); + const IndexDescriptor* descriptor = + collectionPtr->getIndexCatalog()->findIdIndex(opCtx); // Construct delete request collator. std::unique_ptr collator; @@ -1825,24 +1892,25 @@ StatusWith> getExecutorDele collator = std::move(statusWithCollator.getValue()); } const bool hasCollectionDefaultCollation = request->getCollation().isEmpty() || - CollatorInterface::collatorsMatch(collator.get(), collection->getDefaultCollator()); + CollatorInterface::collatorsMatch(collator.get(), + collectionPtr->getDefaultCollator()); if (descriptor && CanonicalQuery::isSimpleIdQuery(unparsedQuery) && request->getProj().isEmpty() && hasCollectionDefaultCollation) { LOGV2_DEBUG(20928, 2, "Using idhack", "query"_attr = redact(unparsedQuery)); auto idHackStage = std::make_unique( - expCtx.get(), unparsedQuery["_id"].wrap(), ws.get(), collection, descriptor); + expCtx.get(), unparsedQuery["_id"].wrap(), ws.get(), coll, descriptor); std::unique_ptr root = std::make_unique(expCtx.get(), std::move(deleteStageParams), ws.get(), - collection, + coll, idHackStage.release()); return plan_executor_factory::make(expCtx, std::move(ws), std::move(root), - &collection, + coll, policy, false /* whether owned BSON must be returned */); } @@ -1861,7 +1929,7 @@ StatusWith> getExecutorDele uassert(ErrorCodes::InternalErrorNotSupported, "delete command is not eligible for bonsai", - !isEligibleForBonsai(*cq, opCtx, collection)); + !isEligibleForBonsai(*cq, opCtx, collectionPtr)); // Transfer the explain verbosity level into the expression context. cq->getExpCtx()->explain = verbosity; @@ -1884,7 +1952,7 @@ StatusWith> getExecutorDele const size_t defaultPlannerOptions = QueryPlannerParams::DEFAULT; ClassicPrepareExecutionHelper helper{ - opCtx, collection, ws.get(), cq.get(), nullptr, defaultPlannerOptions}; + opCtx, coll, ws.get(), cq.get(), nullptr, defaultPlannerOptions}; auto executionResult = helper.prepare(); if (!executionResult.isOK()) { @@ -1897,16 +1965,14 @@ StatusWith> getExecutorDele // TODO (SERVER-64506): support change streams' pre- and post-images. // TODO (SERVER-66079): allow batched deletions in the config.* namespace. - const bool batchDelete = - feature_flags::gBatchMultiDeletes.isEnabled(serverGlobalParams.featureCompatibility) && - gBatchUserMultiDeletes.load() && + const bool batchDelete = gBatchUserMultiDeletes.load() && (opCtx->recoveryUnit()->getState() == RecoveryUnit::State::kInactive || opCtx->recoveryUnit()->getState() == RecoveryUnit::State::kActiveNotInUnitOfWork) && !opCtx->inMultiDocumentTransaction() && !opCtx->isRetryableWrite() && - !collection->isChangeStreamPreAndPostImagesEnabled() && !collection->ns().isConfigDB() && - deleteStageParams->isMulti && !deleteStageParams->fromMigrate && - !deleteStageParams->returnDeleted && deleteStageParams->sort.isEmpty() && - !deleteStageParams->numStatsForDoc; + !collectionPtr->isChangeStreamPreAndPostImagesEnabled() && + !collectionPtr->ns().isConfigDB() && deleteStageParams->isMulti && + !deleteStageParams->fromMigrate && !deleteStageParams->returnDeleted && + deleteStageParams->sort.isEmpty() && !deleteStageParams->numStatsForDoc; auto expCtxRaw = cq->getExpCtxRaw(); if (parsedDelete->isEligibleForArbitraryTimeseriesDelete()) { @@ -1914,22 +1980,22 @@ StatusWith> getExecutorDele // directly. root = std::make_unique( expCtxRaw, - std::move(deleteStageParams), + TimeseriesModifyParams(deleteStageParams.get()), ws.get(), std::move(root), - collection, - BucketUnpacker(*collection->getTimeseriesOptions()), + coll, + BucketUnpacker(*collectionPtr->getTimeseriesOptions()), parsedDelete->releaseResidualExpr()); } else if (batchDelete) { root = std::make_unique(expCtxRaw, std::move(deleteStageParams), std::make_unique(), ws.get(), - collection, + coll, root.release()); } else { root = std::make_unique( - expCtxRaw, std::move(deleteStageParams), ws.get(), collection, root.release()); + expCtxRaw, std::move(deleteStageParams), ws.get(), coll, root.release()); } if (projection) { @@ -1942,7 +2008,7 @@ StatusWith> getExecutorDele return plan_executor_factory::make(std::move(cq), std::move(ws), std::move(root), - &collection, + coll, policy, defaultPlannerOptions, NamespaceString(), @@ -1955,11 +2021,10 @@ StatusWith> getExecutorDele StatusWith> getExecutorUpdate( OpDebug* opDebug, - const CollectionPtr* coll, + CollectionAcquisition coll, ParsedUpdate* parsedUpdate, - boost::optional verbosity, - UpdateStageParams::DocumentCounter&& documentCounter) { - const auto& collection = *coll; + boost::optional verbosity) { + const auto& collectionPtr = coll.getCollectionPtr(); auto expCtx = parsedUpdate->expCtx(); OperationContext* opCtx = expCtx->opCtx; @@ -1971,24 +2036,17 @@ StatusWith> getExecutorUpda if (nss.isSystem() && opCtx->lockState()->shouldConflictWithSecondaryBatchApplication()) { uassert(10156, - str::stream() << "cannot update a system namespace: " << nss.ns(), + str::stream() << "cannot update a system namespace: " << nss.toStringForErrorMsg(), nss.isLegalClientSystemNS(serverGlobalParams.featureCompatibility)); } // If there is no collection and this is an upsert, callers are supposed to create // the collection prior to calling this method. Explain, however, will never do // collection or database creation. - if (!collection && request->isUpsert()) { + if (!coll.exists() && request->isUpsert()) { invariant(request->explain()); } - // If the parsed update does not have a user-specified collation, set it from the collection - // default. - if (collection && parsedUpdate->getRequest()->getCollation().isEmpty() && - collection->getDefaultCollator()) { - parsedUpdate->setCollator(collection->getDefaultCollator()->clone()); - } - // If this is a user-issued update, then we want to return an error: you cannot perform // writes on a secondary. If this is an update to a secondary from the replication system, // however, then we make an exception and let the write proceed. @@ -1997,11 +2055,21 @@ StatusWith> getExecutorUpda if (userInitiatedWritesAndNotPrimary) { return Status(ErrorCodes::PrimarySteppedDown, - str::stream() << "Not primary while performing update on " << nss.ns()); + str::stream() << "Not primary while performing update on " + << nss.toStringForErrorMsg()); } const auto policy = parsedUpdate->yieldPolicy(); + auto documentCounter = [&] { + if (parsedUpdate->isRequestToTimeseries() && + !parsedUpdate->isEligibleForArbitraryTimeseriesUpdate()) { + return timeseries::numMeasurementsForBucketCounter( + collectionPtr->getTimeseriesOptions()->getTimeField()); + } + return UpdateStageParams::DocumentCounter{}; + }(); + std::unique_ptr ws = std::make_unique(); UpdateStageParams updateStageParams(request, driver, opDebug, std::move(documentCounter)); @@ -2009,7 +2077,7 @@ StatusWith> getExecutorUpda // should have already enforced upstream that in this case either the upsert flag is false, or // we are an explain. If the collection doesn't exist, we're not an explain, and the upsert flag // is true, we expect the caller to have created the collection already. - if (!collection) { + if (!coll.exists()) { LOGV2_DEBUG(20929, 2, "Collection does not exist. Using EOF stage", @@ -2024,23 +2092,18 @@ StatusWith> getExecutorUpda nss); } - // Pass index information to the update driver, so that it can determine for us whether the - // update affects indices. - const auto& updateIndexData = CollectionQueryInfo::get(collection).getIndexKeys(opCtx); - driver->refreshIndexKeys(&updateIndexData); - if (!parsedUpdate->hasParsedQuery()) { - // Only consider using the idhack if no hint was provided. if (request->getHint().isEmpty()) { // This is the idhack fast-path for getting a PlanExecutor without doing the work // to create a CanonicalQuery. const BSONObj& unparsedQuery = request->getQuery(); - const IndexDescriptor* descriptor = collection->getIndexCatalog()->findIdIndex(opCtx); + const IndexDescriptor* descriptor = + collectionPtr->getIndexCatalog()->findIdIndex(opCtx); const bool hasCollectionDefaultCollation = CollatorInterface::collatorsMatch( - expCtx->getCollator(), collection->getDefaultCollator()); + expCtx->getCollator(), collectionPtr->getDefaultCollator()); if (descriptor && CanonicalQuery::isSimpleIdQuery(unparsedQuery) && request->getProj().isEmpty() && hasCollectionDefaultCollation) { @@ -2049,7 +2112,7 @@ StatusWith> getExecutorUpda // Working set 'ws' is discarded. InternalPlanner::updateWithIdHack() makes its own // WorkingSet. return InternalPlanner::updateWithIdHack(opCtx, - &collection, + coll, updateStageParams, descriptor, unparsedQuery["_id"].wrap(), @@ -2070,7 +2133,7 @@ StatusWith> getExecutorUpda uassert(ErrorCodes::InternalErrorNotSupported, "update command is not eligible for bonsai", - !isEligibleForBonsai(*cq, opCtx, collection)); + !isEligibleForBonsai(*cq, opCtx, collectionPtr)); std::unique_ptr projection; if (!request->getProj().isEmpty()) { @@ -2093,7 +2156,7 @@ StatusWith> getExecutorUpda const size_t defaultPlannerOptions = QueryPlannerParams::DEFAULT; ClassicPrepareExecutionHelper helper{ - opCtx, collection, ws.get(), cq.get(), nullptr, defaultPlannerOptions}; + opCtx, coll, ws.get(), cq.get(), nullptr, defaultPlannerOptions}; auto executionResult = helper.prepare(); if (!executionResult.isOK()) { @@ -2104,11 +2167,41 @@ StatusWith> getExecutorUpda updateStageParams.canonicalQuery = cq.get(); const bool isUpsert = updateStageParams.request->isUpsert(); - root = (isUpsert - ? std::make_unique( - cq->getExpCtxRaw(), updateStageParams, ws.get(), collection, root.release()) - : std::make_unique( - cq->getExpCtxRaw(), updateStageParams, ws.get(), collection, root.release())); + if (parsedUpdate->isEligibleForArbitraryTimeseriesUpdate()) { + if (request->isMulti()) { + // If this is a multi-update, we need to spool the data before beginning to apply + // updates, in order to avoid the Halloween problem. + root = std::make_unique(cq->getExpCtxRaw(), ws.get(), std::move(root)); + } + if (isUpsert) { + root = std::make_unique( + cq->getExpCtxRaw(), + TimeseriesModifyParams(&updateStageParams), + ws.get(), + std::move(root), + coll, + BucketUnpacker(*collectionPtr->getTimeseriesOptions()), + parsedUpdate->releaseResidualExpr(), + parsedUpdate->releaseOriginalExpr(), + *request); + } else { + root = std::make_unique( + cq->getExpCtxRaw(), + TimeseriesModifyParams(&updateStageParams), + ws.get(), + std::move(root), + coll, + BucketUnpacker(*collectionPtr->getTimeseriesOptions()), + parsedUpdate->releaseResidualExpr(), + parsedUpdate->releaseOriginalExpr()); + } + } else if (isUpsert) { + root = std::make_unique( + cq->getExpCtxRaw(), updateStageParams, ws.get(), coll, root.release()); + } else { + root = std::make_unique( + cq->getExpCtxRaw(), updateStageParams, ws.get(), coll, root.release()); + } if (projection) { root = std::make_unique( @@ -2120,7 +2213,7 @@ StatusWith> getExecutorUpda return plan_executor_factory::make(std::move(cq), std::move(ws), std::move(root), - &collection, + coll, policy, defaultPlannerOptions, NamespaceString(), @@ -2203,7 +2296,8 @@ bool turnIxscanIntoCount(QuerySolution* soln) { auto makeCountScan = [&isn](BSONObj& csnStartKey, bool startKeyInclusive, BSONObj& csnEndKey, - bool endKeyInclusive) { + bool endKeyInclusive, + std::vector iets) { // Since count scans return no data, they are always forward scans. Index scans, on the // other hand, may need to scan the index in reverse order in order to obtain a sort. If the // index scan direction is backwards, then we need to swap the start and end of the count @@ -2218,6 +2312,7 @@ bool turnIxscanIntoCount(QuerySolution* soln) { csn->startKeyInclusive = startKeyInclusive; csn->endKey = csnEndKey; csn->endKeyInclusive = endKeyInclusive; + csn->iets = std::move(iets); return csn; }; @@ -2230,13 +2325,14 @@ bool turnIxscanIntoCount(QuerySolution* soln) { // quickly explode to a point where it would just be more efficient to use a single index // scan. Consequently, we draw the line at one such interval. if (auto nullFieldNo = boundsHasExactlyOneNullOrNullAndEmptyInterval(isn)) { - OrderedIntervalList undefinedPointOil, nullPointOil; - undefinedPointOil.intervals.push_back(IndexBoundsBuilder::kUndefinedPointInterval); - nullPointOil.intervals.push_back(IndexBoundsBuilder::kNullPointInterval); - tassert(5506501, "The index of the null interval is invalid", *nullFieldNo < isn->bounds.fields.size()); + auto nullFieldName = isn->bounds.fields[*nullFieldNo].name; + OrderedIntervalList undefinedPointOil(nullFieldName), nullPointOil(nullFieldName); + undefinedPointOil.intervals.push_back(IndexBoundsBuilder::kUndefinedPointInterval); + nullPointOil.intervals.push_back(IndexBoundsBuilder::kNullPointInterval); + auto makeNullBoundsCountScan = [&](OrderedIntervalList& oil) -> std::unique_ptr { std::swap(isn->bounds.fields[*nullFieldNo], oil); @@ -2246,7 +2342,17 @@ bool turnIxscanIntoCount(QuerySolution* soln) { bool startKeyInclusive, endKeyInclusive; if (IndexBoundsBuilder::isSingleInterval( isn->bounds, &startKey, &startKeyInclusive, &endKey, &endKeyInclusive)) { - return makeCountScan(startKey, startKeyInclusive, endKey, endKeyInclusive); + // Build a new IET list based on the rewritten index bounds. + std::vector iets = isn->iets; + if (!isn->iets.empty()) { + tassert(8423396, + "IETs and index bounds field must have same size.", + iets.size() == isn->bounds.fields.size()); + iets[*nullFieldNo] = interval_evaluation_tree::IET::make< + interval_evaluation_tree::ConstNode>(isn->bounds.fields[*nullFieldNo]); + } + return makeCountScan( + startKey, startKeyInclusive, endKey, endKeyInclusive, std::move(iets)); } return nullptr; @@ -2266,7 +2372,7 @@ bool turnIxscanIntoCount(QuerySolution* soln) { if (isn->index.multikey) { // For a multikey index, add the third COUNT_SCAN stage for empty array values. - OrderedIntervalList emptyArrayPointOil; + OrderedIntervalList emptyArrayPointOil(nullFieldName); emptyArrayPointOil.intervals.push_back( IndexBoundsBuilder::kEmptyArrayPointInterval); auto emptyArrayCsn = makeNullBoundsCountScan(emptyArrayPointOil); @@ -2287,7 +2393,7 @@ bool turnIxscanIntoCount(QuerySolution* soln) { } // Make the count node that we replace the fetch + ixscan with. - auto csn = makeCountScan(startKey, startKeyInclusive, endKey, endKeyInclusive); + auto csn = makeCountScan(startKey, startKeyInclusive, endKey, endKeyInclusive, isn->iets); // Takes ownership of 'cn' and deletes the old root. soln->setRoot(std::move(csn)); return true; @@ -2394,9 +2500,7 @@ StatusWith> getExecutorCoun } std::unique_ptr cq = std::move(statusWithCQ.getValue()); - const auto yieldPolicy = opCtx->inMultiDocumentTransaction() - ? PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY - : PlanYieldPolicy::YieldPolicy::YIELD_AUTO; + const auto yieldPolicy = PlanYieldPolicy::YieldPolicy::YIELD_AUTO; const auto skip = request.getSkip().value_or(0); const auto limit = request.getLimit().value_or(0); @@ -2431,7 +2535,7 @@ StatusWith> getExecutorCoun if (useRecordStoreCount) { std::unique_ptr root = - std::make_unique(expCtx.get(), collection, skip, limit); + std::make_unique(expCtx.get(), &collection, skip, limit); return plan_executor_factory::make(expCtx, std::move(ws), std::move(root), @@ -2447,8 +2551,7 @@ StatusWith> getExecutorCoun plannerOptions |= QueryPlannerParams::INCLUDE_SHARD_FILTER; } - ClassicPrepareExecutionHelper helper{ - opCtx, collection, ws.get(), cq.get(), nullptr, plannerOptions}; + ClassicPrepareExecutionHelper helper{opCtx, coll, ws.get(), cq.get(), nullptr, plannerOptions}; auto executionResult = helper.prepare(); if (!executionResult.isOK()) { return executionResult.getStatus(); @@ -2736,11 +2839,10 @@ QueryPlannerParams fillOutPlannerParamsForDistinct(OperationContext* opCtx, */ StatusWith> getExecutorForSimpleDistinct( OperationContext* opCtx, - const CollectionPtr* coll, + VariantCollectionPtrOrAcquisition coll, const QueryPlannerParams& plannerParams, PlanYieldPolicy::YieldPolicy yieldPolicy, ParsedDistinct* parsedDistinct) { - const auto& collection = *coll; invariant(parsedDistinct->getQuery()); auto collator = parsedDistinct->getQuery()->getCollator(); @@ -2781,7 +2883,7 @@ StatusWith> getExecutorForS std::unique_ptr ws = std::make_unique(); auto&& root = stage_builder::buildClassicExecutableTree( - opCtx, collection, *parsedDistinct->getQuery(), *soln, ws.get()); + opCtx, coll, *parsedDistinct->getQuery(), *soln, ws.get()); auto exec = plan_executor_factory::make(parsedDistinct->releaseQuery(), std::move(ws), @@ -2816,13 +2918,12 @@ StatusWith> getExecutorForS // 'strictDistinctOnly' parameter. StatusWith> getExecutorDistinctFromIndexSolutions(OperationContext* opCtx, - const CollectionPtr* coll, + VariantCollectionPtrOrAcquisition coll, std::vector> solutions, PlanYieldPolicy::YieldPolicy yieldPolicy, ParsedDistinct* parsedDistinct, bool flipDistinctScanDirection, size_t plannerOptions) { - const auto& collection = *coll; const bool strictDistinctOnly = (plannerOptions & QueryPlannerParams::STRICT_DISTINCT_ONLY); // We look for a solution that has an ixscan we can turn into a distinctixscan @@ -2835,7 +2936,7 @@ getExecutorDistinctFromIndexSolutions(OperationContext* opCtx, std::unique_ptr ws = std::make_unique(); std::unique_ptr currentSolution = std::move(solutions[i]); auto&& root = stage_builder::buildClassicExecutableTree( - opCtx, collection, *parsedDistinct->getQuery(), *currentSolution, ws.get()); + opCtx, coll, *parsedDistinct->getQuery(), *currentSolution, ws.get()); auto exec = plan_executor_factory::make(parsedDistinct->releaseQuery(), std::move(ws), @@ -2867,17 +2968,17 @@ getExecutorDistinctFromIndexSolutions(OperationContext* opCtx, */ StatusWith> getExecutorWithoutProjection( OperationContext* opCtx, - const CollectionPtr* coll, + VariantCollectionPtrOrAcquisition coll, const CanonicalQuery* cq, PlanYieldPolicy::YieldPolicy yieldPolicy, size_t plannerOptions) { - const auto& collection = *coll; + const auto& collectionPtr = coll.getCollectionPtr(); auto findCommand = std::make_unique(cq->getFindCommandRequest()); findCommand->setProjection(BSONObj()); const boost::intrusive_ptr expCtx; - const ExtensionsCallbackReal extensionsCallback(opCtx, &collection->ns()); + const ExtensionsCallbackReal extensionsCallback(opCtx, &collectionPtr->ns()); auto cqWithoutProjection = uassertStatusOKWithContext( CanonicalQuery::canonicalize(opCtx, @@ -2898,24 +2999,22 @@ StatusWith> getExecutorWith } // namespace StatusWith> getExecutorDistinct( - const CollectionPtr* coll, + VariantCollectionPtrOrAcquisition coll, size_t plannerOptions, ParsedDistinct* parsedDistinct, bool flipDistinctScanDirection) { - const auto& collection = *coll; + const auto& collectionPtr = coll.getCollectionPtr(); auto expCtx = parsedDistinct->getQuery()->getExpCtx(); OperationContext* opCtx = expCtx->opCtx; - const auto yieldPolicy = opCtx->inMultiDocumentTransaction() - ? PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY - : PlanYieldPolicy::YieldPolicy::YIELD_AUTO; + const auto yieldPolicy = PlanYieldPolicy::YieldPolicy::YIELD_AUTO; // Assert that not eligible for bonsai uassert(ErrorCodes::InternalErrorNotSupported, "distinct command is not eligible for bonsai", - !isEligibleForBonsai(*parsedDistinct->getQuery(), opCtx, collection)); + !isEligibleForBonsai(*parsedDistinct->getQuery(), opCtx, collectionPtr)); - if (!collection) { + if (!collectionPtr) { // Treat collections that do not exist as empty collections. return plan_executor_factory::make(parsedDistinct->releaseQuery(), std::make_unique(), @@ -2937,7 +3036,7 @@ StatusWith> getExecutorDist // a soln with the above properties. auto plannerParams = fillOutPlannerParamsForDistinct( - opCtx, collection, plannerOptions, *parsedDistinct, flipDistinctScanDirection); + opCtx, collectionPtr, plannerOptions, *parsedDistinct, flipDistinctScanDirection); // If there are no suitable indices for the distinct hack bail out now into regular planning // with no projection. diff --git a/src/mongo/db/query/get_executor.h b/src/mongo/db/query/get_executor.h index 57d795a60050f..9068d52b80cc5 100644 --- a/src/mongo/db/query/get_executor.h +++ b/src/mongo/db/query/get_executor.h @@ -29,23 +29,47 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/curop.h" #include "mongo/db/exec/batched_delete_stage.h" #include "mongo/db/exec/delete_stage.h" #include "mongo/db/exec/update_stage.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/delete_request_gen.h" #include "mongo/db/ops/parsed_delete.h" #include "mongo/db/ops/parsed_update.h" #include "mongo/db/ops/update_request.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/count_command_gen.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/index_entry.h" #include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/parsed_distinct.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_planner.h" #include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_settings.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/record_id.h" +#include "mongo/db/shard_role.h" #include "mongo/db/update/update_driver.h" namespace mongo { @@ -173,7 +197,7 @@ StatusWith> getExecutor( StatusWith> getExecutor( OperationContext* opCtx, - const CollectionPtr* collection, + VariantCollectionPtrOrAcquisition coll, std::unique_ptr canonicalQuery, std::function extractAndAttachPipelineStages, PlanYieldPolicy::YieldPolicy yieldPolicy, @@ -270,7 +294,7 @@ bool turnIxscanIntoDistinctIxscan(QuerySolution* soln, * distinct. */ StatusWith> getExecutorDistinct( - const CollectionPtr* collection, + VariantCollectionPtrOrAcquisition coll, size_t plannerOptions, ParsedDistinct* parsedDistinct, bool flipDistinctScanDirection = false); @@ -310,10 +334,9 @@ StatusWith> getExecutorCoun */ StatusWith> getExecutorDelete( OpDebug* opDebug, - const CollectionPtr* collection, + CollectionAcquisition coll, ParsedDelete* parsedDelete, - boost::optional verbosity, - DeleteStageParams::DocumentCounter&& documentCounter = nullptr); + boost::optional verbosity); /** * Get a PlanExecutor for an update operation. 'parsedUpdate' describes the query predicate @@ -337,10 +360,9 @@ StatusWith> getExecutorDele */ StatusWith> getExecutorUpdate( OpDebug* opDebug, - const CollectionPtr* collection, + CollectionAcquisition coll, ParsedUpdate* parsedUpdate, - boost::optional verbosity, - UpdateStageParams::DocumentCounter&& documentCounter = nullptr); + boost::optional verbosity); /** * Direction of collection scan plan executor returned by makeCollectionScanPlanExecutor() below. diff --git a/src/mongo/db/query/get_executor_test.cpp b/src/mongo/db/query/get_executor_test.cpp index f8954cdad4fb4..539bfe447da83 100644 --- a/src/mongo/db/query/get_executor_test.cpp +++ b/src/mongo/db/query/get_executor_test.cpp @@ -33,22 +33,37 @@ #include "mongo/db/query/get_executor.h" -#include +#include +#include #include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj_comparator_interface.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/exec/index_path_projection.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" -#include "mongo/db/json.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/query/classic_plan_cache.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/projection_parser.h" #include "mongo/db/query/projection_policies.h" #include "mongo/db/query/query_settings.h" #include "mongo/db/query/query_test_service_context.h" +#include "mongo/stdx/type_traits.h" #include "mongo/stdx/unordered_set.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/str.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" using namespace mongo; diff --git a/src/mongo/db/query/getmore_request_test.cpp b/src/mongo/db/query/getmore_request_test.cpp index cf04d5075c89b..3fe56c7ba65f2 100644 --- a/src/mongo/db/query/getmore_request_test.cpp +++ b/src/mongo/db/query/getmore_request_test.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/cursor_id.h" -#include "mongo/db/jsobj.h" #include "mongo/db/query/getmore_command_gen.h" #include "mongo/db/repl/optime.h" - -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/query/hint_parser.cpp b/src/mongo/db/query/hint_parser.cpp index 1de75b84f8291..d8a80aaeffdd2 100644 --- a/src/mongo/db/query/hint_parser.cpp +++ b/src/mongo/db/query/hint_parser.cpp @@ -29,7 +29,11 @@ #include "mongo/db/query/hint_parser.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/query/hint_parser.h b/src/mongo/db/query/hint_parser.h index aee0e3a4126da..493e6f6a6fcb5 100644 --- a/src/mongo/db/query/hint_parser.h +++ b/src/mongo/db/query/hint_parser.h @@ -30,6 +30,7 @@ #pragma once +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" diff --git a/src/mongo/db/query/hint_parser_test.cpp b/src/mongo/db/query/hint_parser_test.cpp index 2760a2226e8b5..91a5f2feb0844 100644 --- a/src/mongo/db/query/hint_parser_test.cpp +++ b/src/mongo/db/query/hint_parser_test.cpp @@ -27,13 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/query/hint_parser.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/query/index_bounds.cpp b/src/mongo/db/query/index_bounds.cpp index 8ac73a28b781c..b22b8b901b8ab 100644 --- a/src/mongo/db/query/index_bounds.cpp +++ b/src/mongo/db/query/index_bounds.cpp @@ -29,12 +29,19 @@ #include "mongo/db/query/index_bounds.h" +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include -#include +#include +#include #include -#include "mongo/base/simple_string_data_comparator.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/util/builder.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -599,19 +606,19 @@ bool IndexBoundsChecker::isValidKey(const BSONObj& key) { } IndexBoundsChecker::KeyState IndexBoundsChecker::checkKey(const BSONObj& key, IndexSeekPoint* out) { - verify(_curInterval.size() > 0); + MONGO_verify(_curInterval.size() > 0); out->keySuffix.resize(_curInterval.size()); // It's useful later to go from a field number to the value for that field. Store these. size_t i = 0; BSONObjIterator keyIt(key); while (keyIt.more()) { - verify(i < _curInterval.size()); + MONGO_verify(i < _curInterval.size()); _keyValues[i] = keyIt.next(); i++; } - verify(i == _curInterval.size()); + MONGO_verify(i == _curInterval.size()); size_t firstNonContainedField; Location orientation; @@ -655,7 +662,7 @@ IndexBoundsChecker::KeyState IndexBoundsChecker::checkKey(const BSONObj& key, In return MUST_ADVANCE; } - verify(AHEAD == orientation); + MONGO_verify(AHEAD == orientation); // Field number 'firstNonContainedField' of the index key is after interval we think it's // in. Fields 0 through 'firstNonContained-1' are within their current intervals and we can @@ -699,7 +706,7 @@ IndexBoundsChecker::KeyState IndexBoundsChecker::checkKey(const BSONObj& key, In return MUST_ADVANCE; } else { - verify(AHEAD == where); + MONGO_verify(AHEAD == where); // Field number 'firstNonContainedField' cannot possibly be placed into an interval, // as it is already past its last possible interval. The caller must move forward // to a key with a greater value for the previous field. @@ -724,7 +731,7 @@ IndexBoundsChecker::KeyState IndexBoundsChecker::checkKey(const BSONObj& key, In } } - verify(firstNonContainedField == _curInterval.size()); + MONGO_verify(firstNonContainedField == _curInterval.size()); return VALID; } diff --git a/src/mongo/db/query/index_bounds.h b/src/mongo/db/query/index_bounds.h index 4ea4de48a9c83..616d575143f37 100644 --- a/src/mongo/db/query/index_bounds.h +++ b/src/mongo/db/query/index_bounds.h @@ -29,9 +29,12 @@ #pragma once +#include #include #include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" #include "mongo/db/query/interval.h" #include "mongo/db/storage/index_entry_comparison.h" diff --git a/src/mongo/db/query/index_bounds_builder.cpp b/src/mongo/db/query/index_bounds_builder.cpp index 804bdef84667d..1623089240af7 100644 --- a/src/mongo/db/query/index_bounds_builder.cpp +++ b/src/mongo/db/query/index_bounds_builder.cpp @@ -30,21 +30,33 @@ #include "mongo/db/query/index_bounds_builder.h" +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include #include #include -#include -#include +#include +#include +#include #include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsontypes.h" -#include "mongo/db/geo/geoconstants.h" -#include "mongo/db/geo/s2.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/geo/geometry_container.h" +#include "mongo/db/geo/shapes.h" #include "mongo/db/index/expression_params.h" #include "mongo/db/index/s2_common.h" +#include "mongo/db/index_names.h" #include "mongo/db/matcher/expression_geo.h" #include "mongo/db/matcher/expression_internal_bucket_geo_within.h" #include "mongo/db/matcher/expression_internal_eq_hashed_key.h" #include "mongo/db/matcher/expression_internal_expr_comparison.h" +#include "mongo/db/matcher/expression_type.h" +#include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/query/analyze_regex.h" #include "mongo/db/query/collation/collation_index_key.h" #include "mongo/db/query/collation/collator_interface.h" @@ -53,8 +65,15 @@ #include "mongo/db/query/indexability.h" #include "mongo/db/query/planner_ixselect.h" #include "mongo/db/query/planner_wildcard_helpers.h" -#include "mongo/db/query/query_knobs_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -270,9 +289,9 @@ void IndexBoundsBuilder::translateAndUnion(const MatchExpression* expr, bool typeMatch(const BSONObj& obj) { BSONObjIterator it(obj); - verify(it.more()); + MONGO_verify(it.more()); BSONElement first = it.next(); - verify(it.more()); + MONGO_verify(it.more()); BSONElement second = it.next(); return first.canonicalType() == second.canonicalType(); } @@ -693,7 +712,7 @@ void IndexBoundsBuilder::_translatePredicate(const MatchExpression* expr, BSONObjBuilder bob; buildBoundsForQueryElementForLT(dataElt, index.collator, &bob); BSONObj dataObj = bob.done().getOwned(); - verify(dataObj.isOwned()); + MONGO_verify(dataObj.isOwned()); bool inclusiveBounds = dataElt.type() == BSONType::Array; Interval interval = makeRangeInterval(dataObj, @@ -781,7 +800,7 @@ void IndexBoundsBuilder::_translatePredicate(const MatchExpression* expr, BSONObjBuilder bob; buildBoundsForQueryElementForLT(dataElt, index.collator, &bob); BSONObj dataObj = bob.done().getOwned(); - verify(dataObj.isOwned()); + MONGO_verify(dataObj.isOwned()); bool inclusiveBounds = dataElt.type() == BSONType::Array || typeMatch(dataObj); const Interval interval = makeRangeInterval( @@ -853,7 +872,7 @@ void IndexBoundsBuilder::_translatePredicate(const MatchExpression* expr, BSONObjBuilder bob; buildBoundsForQueryElementForGT(dataElt, index.collator, &bob); BSONObj dataObj = bob.done().getOwned(); - verify(dataObj.isOwned()); + MONGO_verify(dataObj.isOwned()); bool inclusiveBounds = dataElt.type() == BSONType::Array; Interval interval = makeRangeInterval(dataObj, @@ -942,7 +961,7 @@ void IndexBoundsBuilder::_translatePredicate(const MatchExpression* expr, BSONObjBuilder bob; buildBoundsForQueryElementForGT(dataElt, index.collator, &bob); BSONObj dataObj = bob.done().getOwned(); - verify(dataObj.isOwned()); + MONGO_verify(dataObj.isOwned()); bool inclusiveBounds = dataElt.type() == BSONType::Array || typeMatch(dataObj); const Interval interval = makeRangeInterval( dataObj, IndexBounds::makeBoundInclusionFromBoundBools(true, inclusiveBounds)); @@ -1005,7 +1024,7 @@ void IndexBoundsBuilder::_translatePredicate(const MatchExpression* expr, bob.appendMinForType("", NumberDouble); bob.appendMaxForType("", NumberDouble); BSONObj dataObj = bob.obj(); - verify(dataObj.isOwned()); + MONGO_verify(dataObj.isOwned()); oilOut->intervals.push_back( makeRangeInterval(dataObj, BoundInclusion::kIncludeBothStartAndEndKeys)); *tightnessOut = IndexBoundsBuilder::INEXACT_COVERED; @@ -1109,14 +1128,14 @@ void IndexBoundsBuilder::_translatePredicate(const MatchExpression* expr, } else if (MatchExpression::GEO == expr->matchType()) { const GeoMatchExpression* gme = static_cast(expr); if ("2dsphere" == elt.valueStringDataSafe()) { - verify(gme->getGeoExpression().getGeometry().hasS2Region()); + MONGO_verify(gme->getGeoExpression().getGeometry().hasS2Region()); const S2Region& region = gme->getGeoExpression().getGeometry().getS2Region(); S2IndexingParams indexParams; ExpressionParams::initialize2dsphereParams(index.infoObj, index.collator, &indexParams); ExpressionMapping::cover2dsphere(region, indexParams, oilOut); *tightnessOut = IndexBoundsBuilder::INEXACT_FETCH; } else if ("2d" == elt.valueStringDataSafe()) { - verify(gme->getGeoExpression().getGeometry().hasR2Region()); + MONGO_verify(gme->getGeoExpression().getGeometry().hasR2Region()); const R2Region& region = gme->getGeoExpression().getGeometry().getR2Region(); ExpressionMapping::cover2d( @@ -1127,7 +1146,7 @@ void IndexBoundsBuilder::_translatePredicate(const MatchExpression* expr, LOGV2_WARNING(20934, "Planner error trying to build geo bounds for an index element", "element"_attr = elt.toString()); - verify(0); + MONGO_verify(0); } } else if (MatchExpression::INTERNAL_BUCKET_GEO_WITHIN == expr->matchType()) { const InternalBucketGeoWithinMatchExpression* ibgwme = @@ -1151,7 +1170,7 @@ void IndexBoundsBuilder::_translatePredicate(const MatchExpression* expr, LOGV2_WARNING(20935, "Planner error while trying to build bounds for expression", "expression"_attr = redact(expr->debugString())); - verify(0); + MONGO_verify(0); } } @@ -1162,9 +1181,9 @@ Interval IndexBoundsBuilder::makeRangeInterval(const BSONObj& obj, BoundInclusio ret.startInclusive = IndexBounds::isStartIncludedInBound(boundInclusion); ret.endInclusive = IndexBounds::isEndIncludedInBound(boundInclusion); BSONObjIterator it(obj); - verify(it.more()); + MONGO_verify(it.more()); ret.start = it.next(); - verify(it.more()); + MONGO_verify(it.more()); ret.end = it.next(); return ret; } @@ -1190,7 +1209,7 @@ void IndexBoundsBuilder::intersectize(const OrderedIntervalList& oilA, OrderedIn } Interval::IntervalComparison cmp = oilAIntervals[oilAIdx].compare(oilBIntervals[oilBIdx]); - verify(Interval::INTERVAL_UNKNOWN != cmp); + MONGO_verify(Interval::INTERVAL_UNKNOWN != cmp); if (cmp == Interval::INTERVAL_PRECEDES || cmp == Interval::INTERVAL_PRECEDES_COULD_UNION) { // oilAIntervals is before oilBIntervals. move oilAIntervals forward. @@ -1242,7 +1261,7 @@ void IndexBoundsBuilder::unionize(OrderedIntervalList* oilOut) { Interval::IntervalComparison cmp = iv[i].compare(iv[i + 1]); // This means our sort didn't work. - verify(Interval::INTERVAL_SUCCEEDS != cmp); + MONGO_verify(Interval::INTERVAL_SUCCEEDS != cmp); // Intervals are correctly ordered. if (Interval::INTERVAL_PRECEDES == cmp) { @@ -1345,7 +1364,7 @@ void IndexBoundsBuilder::translateRegex(const RegexMatchExpression* rme, bob.appendMinForType("", String); bob.appendMaxForType("", String); BSONObj dataObj = bob.obj(); - verify(dataObj.isOwned()); + MONGO_verify(dataObj.isOwned()); oilOut->intervals.push_back( makeRangeInterval(dataObj, BoundInclusion::kIncludeStartKeyOnly)); } @@ -1376,7 +1395,7 @@ void IndexBoundsBuilder::translateEquality(const BSONElement& data, dataObj = ExpressionMapping::hash(dataObj.firstElement()); } - verify(dataObj.isOwned()); + MONGO_verify(dataObj.isOwned()); oil->intervals.push_back(makePointInterval(dataObj)); if (isHashed) { diff --git a/src/mongo/db/query/index_bounds_builder.h b/src/mongo/db/query/index_bounds_builder.h index d1067caa5617d..54d43c17cc7f0 100644 --- a/src/mongo/db/query/index_bounds_builder.h +++ b/src/mongo/db/query/index_bounds_builder.h @@ -29,11 +29,21 @@ #pragma once +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/hasher.h" #include "mongo/db/jsobj.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_entry.h" +#include "mongo/db/query/interval.h" #include "mongo/db/query/interval_evaluation_tree.h" namespace mongo { diff --git a/src/mongo/db/query/index_bounds_builder_collator_test.cpp b/src/mongo/db/query/index_bounds_builder_collator_test.cpp index f57c49cc0599a..d5de4b647cdb8 100644 --- a/src/mongo/db/query/index_bounds_builder_collator_test.cpp +++ b/src/mongo/db/query/index_bounds_builder_collator_test.cpp @@ -27,12 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/index_bounds_builder_test.h" - +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/query/expression_index.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/index_bounds_builder_test.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/interval.h" +#include "mongo/db/query/interval_evaluation_tree.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/index_bounds_builder_eq_null_test.cpp b/src/mongo/db/query/index_bounds_builder_eq_null_test.cpp index f8752b2f757e1..b812d407d5d65 100644 --- a/src/mongo/db/query/index_bounds_builder_eq_null_test.cpp +++ b/src/mongo/db/query/index_bounds_builder_eq_null_test.cpp @@ -27,11 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/index_bounds_builder_test.h" - +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/query/expression_index.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/index_bounds_builder_test.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/interval.h" +#include "mongo/db/query/interval_evaluation_tree.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/index_bounds_builder_interval_test.cpp b/src/mongo/db/query/index_bounds_builder_interval_test.cpp index 3ccb72a91c29f..944419630ec45 100644 --- a/src/mongo/db/query/index_bounds_builder_interval_test.cpp +++ b/src/mongo/db/query/index_bounds_builder_interval_test.cpp @@ -27,9 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" -#include "mongo/db/query/index_bounds_builder_test.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/interval.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/index_bounds_builder_regex_test.cpp b/src/mongo/db/query/index_bounds_builder_regex_test.cpp index d6dde0c5983cd..4a3a3c0e8f629 100644 --- a/src/mongo/db/query/index_bounds_builder_regex_test.cpp +++ b/src/mongo/db/query/index_bounds_builder_regex_test.cpp @@ -27,11 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/index_bounds_builder_test.h" - +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/index_bounds_builder_test.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/interval.h" +#include "mongo/db/query/interval_evaluation_tree.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/index_bounds_builder_test.cpp b/src/mongo/db/query/index_bounds_builder_test.cpp index 4a93c705fc071..01c2fa6191241 100644 --- a/src/mongo/db/query/index_bounds_builder_test.cpp +++ b/src/mongo/db/query/index_bounds_builder_test.cpp @@ -27,20 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/index_bounds_builder_test.h" - +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include #include -#include "mongo/db/json.h" -#include "mongo/db/matcher/expression_parser.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/query/expression_index.h" +#include "mongo/db/query/index_bounds_builder_test.h" +#include "mongo/db/query/interval.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/stdx/type_traits.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/time_support.h" namespace { diff --git a/src/mongo/db/query/index_bounds_builder_test.h b/src/mongo/db/query/index_bounds_builder_test.h index c8c036f2d83e4..07d59f6cce58c 100644 --- a/src/mongo/db/query/index_bounds_builder_test.h +++ b/src/mongo/db/query/index_bounds_builder_test.h @@ -29,11 +29,38 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/index_entry.h" #include "mongo/db/query/interval_evaluation_tree.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/unittest.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/query/index_bounds_builder_type_test.cpp b/src/mongo/db/query/index_bounds_builder_type_test.cpp index 43d057d0511e2..5c4281ac5858b 100644 --- a/src/mongo/db/query/index_bounds_builder_type_test.cpp +++ b/src/mongo/db/query/index_bounds_builder_type_test.cpp @@ -27,11 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/index_bounds_builder_test.h" - #include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/index_bounds_builder_test.h" +#include "mongo/db/query/interval.h" +#include "mongo/db/query/interval_evaluation_tree.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/index_bounds_test.cpp b/src/mongo/db/query/index_bounds_test.cpp index c9fb1442e5ecc..61864fac28bb0 100644 --- a/src/mongo/db/query/index_bounds_test.cpp +++ b/src/mongo/db/query/index_bounds_test.cpp @@ -31,15 +31,19 @@ * This file contains tests for mongo/db/query/index_bounds.cpp */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/query/index_bounds.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep using namespace mongo; diff --git a/src/mongo/db/query/index_entry.cpp b/src/mongo/db/query/index_entry.cpp index 74be2c8ab32e0..4f06051fad77b 100644 --- a/src/mongo/db/query/index_entry.cpp +++ b/src/mongo/db/query/index_entry.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/query/index_entry.h" +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/query/index_entry.h b/src/mongo/db/query/index_entry.h index 5416bb274c445..05463f229fb75 100644 --- a/src/mongo/db/query/index_entry.h +++ b/src/mongo/db/query/index_entry.h @@ -29,20 +29,35 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include #include #include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/field_ref.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/multikey_paths.h" #include "mongo/db/index_names.h" #include "mongo/db/jsobj.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/container_size_helper.h" #include "mongo/util/str.h" namespace mongo { class CollatorInterface; class MatchExpression; + class IndexPathProjection; using WildcardProjection = IndexPathProjection; diff --git a/src/mongo/db/query/index_entry_test.cpp b/src/mongo/db/query/index_entry_test.cpp index b52a429833fed..ecc4b43e98edb 100644 --- a/src/mongo/db/query/index_entry_test.cpp +++ b/src/mongo/db/query/index_entry_test.cpp @@ -27,12 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/query/index_entry.h" +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/query/index_hint.cpp b/src/mongo/db/query/index_hint.cpp new file mode 100644 index 0000000000000..91bf78bac866c --- /dev/null +++ b/src/mongo/db/query/index_hint.cpp @@ -0,0 +1,111 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/query/index_hint.h" + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/str.h" + +namespace mongo { +namespace { +static constexpr auto kNaturalFieldName = "$natural"_sd; +}; // namespace + +IndexHint IndexHint::parse(const BSONElement& element) { + if (element.type() == BSONType::String) { + return IndexHint(element.String()); + } else if (element.type() == BSONType::Object) { + auto obj = element.Obj(); + if (obj.firstElementFieldName() == kNaturalFieldName) { + switch (obj.firstElement().numberInt()) { + case 1: + return IndexHint(NaturalOrderHint(NaturalOrderHint::Direction::kForward)); + case -1: + return IndexHint(NaturalOrderHint(NaturalOrderHint::Direction::kBackward)); + default: + uasserted(ErrorCodes::FailedToParse, + str::stream() << "$natural hint may only accept 1 or -1, not " + << element.toString()); + } + } + return IndexHint(obj.getOwned()); + } else { + uasserted(ErrorCodes::FailedToParse, "Hint must be a string or an object"); + } +} + +void IndexHint::append(const IndexHint& hint, StringData fieldName, BSONObjBuilder* builder) { + stdx::visit( + OverloadedVisitor{ + [&](const IndexKeyPattern& keyPattern) { builder->append(fieldName, keyPattern); }, + [&](const IndexName& indexName) { builder->append(fieldName, indexName); }, + [&](const NaturalOrderHint& naturalOrderHint) { + builder->append(fieldName, BSON(kNaturalFieldName << naturalOrderHint.direction)); + }}, + hint._hint); +} + +void IndexHint::append(BSONArrayBuilder* builder) const { + stdx::visit(OverloadedVisitor{ + [&](const IndexKeyPattern& keyPattern) { builder->append(keyPattern); }, + [&](const IndexName& indexName) { builder->append(indexName); }, + [&](const NaturalOrderHint& naturalOrderHint) { + builder->append(BSON(kNaturalFieldName << naturalOrderHint.direction)); + }}, + _hint); +} + +boost::optional IndexHint::getIndexKeyPattern() const { + if (!stdx::holds_alternative(_hint)) { + return {}; + } + return stdx::get(_hint); +} + +boost::optional IndexHint::getIndexName() const { + if (!stdx::holds_alternative(_hint)) { + return {}; + } + return stdx::get(_hint); +} + +boost::optional IndexHint::getNaturalHint() const { + if (!stdx::holds_alternative(_hint)) { + return {}; + } + return stdx::get(_hint); +} + +}; // namespace mongo diff --git a/src/mongo/db/query/index_hint.h b/src/mongo/db/query/index_hint.h new file mode 100644 index 0000000000000..6f80c5a07d76d --- /dev/null +++ b/src/mongo/db/query/index_hint.h @@ -0,0 +1,93 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/stdx/variant.h" + +namespace mongo { + +using IndexKeyPattern = BSONObj; +using IndexName = std::string; + +/** + * Struct representing a $natural hint. The natural sort hint $natural is used to: + * - include collection scan into a list of valid plans generated by the optimizer, which will + * return the documents of the collection in natural order. + * - specify a natural sort when running a find operation against a view. + */ +struct NaturalOrderHint { + enum class Direction { + kForward = 1, + kBackward = -1, + }; + + explicit NaturalOrderHint(Direction direction) : direction(direction) {} + + Direction direction; +}; + +/** + * Class represents all possible index hint definitions. Index hint may be specified as: + * - Index key pattern, defined as a BSONObj + * - Index name + * - $natural hint + */ +class IndexHint { +public: + explicit IndexHint(IndexKeyPattern keyPattern) : _hint(keyPattern) {} + explicit IndexHint(IndexName indexName) : _hint(indexName) {} + explicit IndexHint(NaturalOrderHint hint) : _hint(hint) {} + + static IndexHint parse(const BSONElement& element); + static void append(const IndexHint& hint, StringData fieldName, BSONObjBuilder* builder); + + void append(BSONArrayBuilder* builder) const; + + const stdx::variant& getHint() const { + return _hint; + } + boost::optional getIndexKeyPattern() const; + boost::optional getIndexName() const; + boost::optional getNaturalHint() const; + +private: + stdx::variant _hint; +}; // namespace index_hint + + +} // namespace mongo diff --git a/src/mongo/db/query/index_hint.idl b/src/mongo/db/query/index_hint.idl new file mode 100644 index 0000000000000..0bfdd17e7da0e --- /dev/null +++ b/src/mongo/db/query/index_hint.idl @@ -0,0 +1,45 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +global: + cpp_namespace: "mongo" + cpp_includes: + - "mongo/db/query/index_hint.h" + +imports: + - "mongo/db/basic_types.idl" + +types: + IndexHint: + description: >- + Type representing possible index hint definition, including index key + pattern, index name and $natural hint. + cpp_type: IndexHint + bson_serialization_type: any + serializer: "mongo::IndexHint::append" + deserializer: "mongo::IndexHint::parse" diff --git a/src/mongo/db/query/index_hint_test.cpp b/src/mongo/db/query/index_hint_test.cpp new file mode 100644 index 0000000000000..7316457c42591 --- /dev/null +++ b/src/mongo/db/query/index_hint_test.cpp @@ -0,0 +1,81 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/query/index_hint.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" + +namespace mongo { +namespace { + +TEST(IndexHint, ParseKeyPatternHint) { + auto hint = BSON("hint" << BSON("x" << 5)); + ASSERT_BSONOBJ_EQ(*IndexHint::parse(hint.firstElement()).getIndexKeyPattern(), BSON("x" << 5)); +} + +TEST(IndexHint, ParseIndexNameHint) { + auto indexName = "x_1"; + auto hint = BSON("hint" << indexName); + ASSERT_EQ(*IndexHint::parse(hint.firstElement()).getIndexName(), indexName); +} + +TEST(IndexHint, ParseNaturalHint) { + auto hint = BSON("hint" << BSON("$natural" << 1)); + ASSERT_EQ(IndexHint::parse(hint.firstElement()).getNaturalHint()->direction, + NaturalOrderHint::Direction::kForward); +} + +TEST(IndexHint, BadHintType) { + auto hint = BSON("hint" << 1); + ASSERT_THROWS_CODE( + IndexHint::parse(hint.firstElement()), AssertionException, ErrorCodes::FailedToParse); +} + +TEST(IndexHint, ShouldRejectHintAsArray) { + BSONObj arrayHint = BSON("hint" << BSON_ARRAY("invalid" + << "hint")); + ASSERT_THROWS_CODE( + IndexHint::parse(arrayHint.firstElement()), AssertionException, ErrorCodes::FailedToParse); +} + +TEST(IndexHint, SerializeNonEmptyHint) { + auto indexKeyPattern = BSON("x" << 1); + auto hint = IndexHint(indexKeyPattern); + BSONObjBuilder bob; + IndexHint::append(hint, "hint", &bob); + ASSERT_BSONOBJ_EQ(bob.obj(), BSON("hint" << indexKeyPattern)); +} + +} // namespace +} // namespace mongo diff --git a/src/mongo/db/query/index_tag.cpp b/src/mongo/db/query/index_tag.cpp index c71bb810ab910..615907421ae19 100644 --- a/src/mongo/db/query/index_tag.cpp +++ b/src/mongo/db/query/index_tag.cpp @@ -29,13 +29,22 @@ #include "mongo/db/query/index_tag.h" -#include "mongo/db/matcher/expression_array.h" -#include "mongo/db/matcher/expression_tree.h" -#include "mongo/db/query/indexability.h" -#include "mongo/stdx/unordered_map.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include #include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" + +#include "mongo/base/checked_cast.h" +#include "mongo/base/string_data.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/matcher/expression_path.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -125,21 +134,33 @@ void sortUsingTags(MatchExpression* tree) { }); } -// Attaches 'node' to 'target'. If 'target' is an AND, adds 'node' as a child of 'target'. -// Otherwise, creates an AND that is a child of 'targetParent' at position 'targetPosition', and -// adds 'target' and 'node' as its children. Tags 'node' with 'tagData'. +/** + * Attaches 'node' to 'target'. If 'target' is an AND, adds 'node' as a child of 'target'. + * Otherwise, creates an AND that is a child of 'targetParent' at position 'targetPosition', and + * adds 'target' and 'node' as its children. Tags 'node' with 'tagData'. If 'node' appears as a key + * in 'pathsToUpdate', then we set the new path onto the clone. + */ void attachNode(MatchExpression* node, MatchExpression* target, OrMatchExpression* targetParent, size_t targetPosition, - std::unique_ptr tagData) { + std::unique_ptr tagData, + const stdx::unordered_map& pathsToUpdate) { auto clone = node->clone(); if (clone->matchType() == MatchExpression::NOT) { IndexTag* indexTag = checked_cast(tagData.get()); clone->setTag(new IndexTag(indexTag->index)); clone->getChild(0)->setTag(tagData.release()); + + if (auto it = pathsToUpdate.find(node->getChild(0)); it != pathsToUpdate.end()) { + checked_cast(clone->getChild(0)) + ->setPath(it->second.dottedField()); + } } else { clone->setTag(tagData.release()); + if (auto it = pathsToUpdate.find(node); it != pathsToUpdate.end()) { + checked_cast(clone.get())->setPath(it->second.dottedField()); + } } if (MatchExpression::AND == target->matchType()) { @@ -169,17 +190,24 @@ stdx::unordered_map> partitionCh return childDestinations; } -// Finds the node within 'tree' that is an indexed OR, if one exists. -MatchExpression* getIndexedOr(MatchExpression* tree) { +/** + * Finds the node within 'tree' that is an indexed OR, if one exists. It also returns the subpath in + * which the indexed OR lives. + */ +std::pair getIndexedOr(FieldRef currentPath, MatchExpression* tree) { if (MatchExpression::OR == tree->matchType() && tree->getTag()) { - return tree; + return {tree, std::move(currentPath)}; + } + if (const auto* fieldRef = tree->fieldRef()) { + currentPath = currentPath + *fieldRef; } + for (size_t i = 0; i < tree->numChildren(); ++i) { - if (auto indexedOrChild = getIndexedOr(tree->getChild(i))) { - return indexedOrChild; + if (auto result = getIndexedOr(currentPath, tree->getChild(i)); result.first) { + return result; } } - return nullptr; + return {}; } // Pushes down 'node' along the routes in 'target' specified in 'destinations'. Each value in the @@ -187,7 +215,8 @@ MatchExpression* getIndexedOr(MatchExpression* tree) { // descendant of 'target'. bool pushdownNode(MatchExpression* node, MatchExpression* target, - std::vector destinations) { + std::vector destinations, + const stdx::unordered_map& pathsToUpdate) { if (MatchExpression::OR == target->matchType()) { OrMatchExpression* orNode = static_cast(target); bool moveToAllChildren = true; @@ -211,13 +240,15 @@ bool pushdownNode(MatchExpression* node, orNode->getChild(i), orNode, i, - std::move(childDestinations->second[0].tagData)); + std::move(childDestinations->second[0].tagData), + pathsToUpdate); } else { // This child was specified by a non-trivial route in destinations, so we recur. moveToAllChildren = pushdownNode(node, orNode->getChild(i), - std::move(childDestinations->second)) && + std::move(childDestinations->second), + pathsToUpdate) && moveToAllChildren; } } @@ -226,36 +257,81 @@ bool pushdownNode(MatchExpression* node, } if (MatchExpression::AND == target->matchType()) { - auto indexedOr = getIndexedOr(target); + auto [indexedOr, fieldRef_unused] = getIndexedOr({} /*fieldRef*/, target); invariant(indexedOr); - return pushdownNode(node, indexedOr, std::move(destinations)); + return pushdownNode(node, indexedOr, std::move(destinations), pathsToUpdate); } MONGO_UNREACHABLE_TASSERT(4457014); } -// Populates 'out' with all descendants of 'node' that have OrPushdownTags, assuming the initial -// input is an ELEM_MATCH_OBJECT. -void getElemMatchOrPushdownDescendants(MatchExpression* node, std::vector* out) { +/** + * Populates 'out' with all descendants of 'node' that have OrPushdownTags, assuming the initial + * input is an ELEM_MATCH_OBJECT. The "currentPath" argument is the combined path traversed so far. + * Additionally, we populate a map to keep track of paths to update afterward during cloning. + */ +void getElemMatchOrPushdownDescendants( + const FieldRef& indexedOrPath, + FieldRef currentPath, + MatchExpression* node, + std::vector* out, + stdx::unordered_map* pathsToUpdate) { + const bool updatePath = node->fieldRef() != nullptr; + if (updatePath) { + currentPath = currentPath + *node->fieldRef(); + } + + // Do not do extra pushdown of OR inside $elemmatch. if (node->getTag() && node->getTag()->getType() == TagType::OrPushdownTag) { + if (updatePath) { + // Make sure that we remove the common prefix between the "destination" OR and the + // current expression, as it may be contained within the same $elemmatch. + + const auto prefixSize = indexedOrPath.commonPrefixSize(currentPath); + for (auto i = 0; i < prefixSize; i++) { + currentPath.removeFirstPart(); + } + if (currentPath != *node->fieldRef()) { + pathsToUpdate->emplace(node, std::move(currentPath)); + } + } out->push_back(node); } else if (node->matchType() == MatchExpression::ELEM_MATCH_OBJECT || node->matchType() == MatchExpression::AND) { for (size_t i = 0; i < node->numChildren(); ++i) { - getElemMatchOrPushdownDescendants(node->getChild(i), out); + getElemMatchOrPushdownDescendants( + indexedOrPath, currentPath, node->getChild(i), out, pathsToUpdate); } } else if (node->matchType() == MatchExpression::NOT) { // The immediate child of NOT may be tagged, but there should be no tags deeper than this. auto* childNode = node->getChild(0); if (childNode->getTag() && childNode->getTag()->getType() == TagType::OrPushdownTag) { + if (!childNode->path().empty()) { + // Make sure that we remove the common prefix between the "destination" OR and the + // current expression, as it may be contained within the same $elemmatch. + + currentPath = currentPath + *childNode->fieldRef(); + const auto prefixSize = indexedOrPath.commonPrefixSize(currentPath); + for (auto i = 0; i < prefixSize; i++) { + currentPath.removeFirstPart(); + } + if (currentPath != *childNode->fieldRef()) { + pathsToUpdate->emplace(childNode, std::move(currentPath)); + } + } out->push_back(node); } } } -// Attempts to push the given node down into the 'indexedOr' subtree. Returns true if the predicate -// can subsequently be trimmed from the MatchExpression tree, false otherwise. -bool processOrPushdownNode(MatchExpression* node, MatchExpression* indexedOr) { +/** + * Attempts to push the given node down into the 'indexedOr' subtree. Returns true if the predicate + * can subsequently be trimmed from the MatchExpression tree, false otherwise. Also supplied is a + * map to optionally update the path of the 'node' being pushed down. + */ +bool processOrPushdownNode(MatchExpression* node, + MatchExpression* indexedOr, + const stdx::unordered_map& pathsToUpdate) { // If the node is a negation, then its child is the predicate node that may be tagged. auto* predNode = node->matchType() == MatchExpression::NOT ? node->getChild(0) : node; @@ -272,7 +348,7 @@ bool processOrPushdownNode(MatchExpression* node, MatchExpression* indexedOr) { predNode->setTag(nullptr); // Attempt to push the node into the indexedOr, then re-set its tag to the indexTag. - const bool pushedDown = pushdownNode(node, indexedOr, std::move(destinations)); + const bool pushedDown = pushdownNode(node, indexedOr, std::move(destinations), pathsToUpdate); predNode->setTag(indexTag.release()); // Return true if we can trim the predicate. We could trim the node even if it had an index tag @@ -289,7 +365,7 @@ void resolveOrPushdowns(MatchExpression* tree) { } if (MatchExpression::AND == tree->matchType()) { AndMatchExpression* andNode = static_cast(tree); - MatchExpression* indexedOr = getIndexedOr(andNode); + auto [indexedOr, indexedOrPath] = getIndexedOr({} /*fieldRef*/, andNode); if (indexedOr) { for (size_t i = 0; i < andNode->numChildren(); ++i) { @@ -300,11 +376,17 @@ void resolveOrPushdowns(MatchExpression* tree) { // entirety. if (child->matchType() == MatchExpression::ELEM_MATCH_OBJECT) { std::vector orPushdownDescendants; - getElemMatchOrPushdownDescendants(child, &orPushdownDescendants); + stdx::unordered_map pathsToUpdate; + getElemMatchOrPushdownDescendants(indexedOrPath, + {} /*currentPath*/, + child, + &orPushdownDescendants, + &pathsToUpdate); for (auto descendant : orPushdownDescendants) { - static_cast(processOrPushdownNode(descendant, indexedOr)); + static_cast( + processOrPushdownNode(descendant, indexedOr, pathsToUpdate)); } - } else if (processOrPushdownNode(child, indexedOr)) { + } else if (processOrPushdownNode(child, indexedOr, {} /*pathsToUpdate*/)) { // The indexed $or can completely satisfy the child predicate, so we trim it. auto ownedChild = andNode->removeChild(i); --i; diff --git a/src/mongo/db/query/index_tag.h b/src/mongo/db/query/index_tag.h index 5f7493d3de60b..fe96c9b017f94 100644 --- a/src/mongo/db/query/index_tag.h +++ b/src/mongo/db/query/index_tag.h @@ -29,10 +29,15 @@ #pragma once +#include #include +#include +#include +#include #include #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/matcher/expression.h" namespace mongo { diff --git a/src/mongo/db/query/internal_plans.cpp b/src/mongo/db/query/internal_plans.cpp index de914d66f2727..85720b18780b9 100644 --- a/src/mongo/db/query/internal_plans.cpp +++ b/src/mongo/db/query/internal_plans.cpp @@ -27,23 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/internal_plans.h" - +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +// IWYU pragma: no_include "boost/move/detail/iterator_to_raw_pointer.hpp" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status_with.h" #include "mongo/db/catalog/clustered_collection_util.h" -#include "mongo/db/catalog/database.h" -#include "mongo/db/client.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/exec/collection_scan.h" -#include "mongo/db/exec/eof.h" #include "mongo/db/exec/fetch.h" #include "mongo/db/exec/idhack.h" #include "mongo/db/exec/index_scan.h" +#include "mongo/db/exec/limit.h" +#include "mongo/db/exec/multi_iterator.h" #include "mongo/db/exec/update_stage.h" #include "mongo/db/exec/upsert_stage.h" +#include "mongo/db/ops/update_request.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/get_executor.h" +#include "mongo/db/query/internal_plans.h" #include "mongo/db/query/plan_executor_factory.h" #include "mongo/db/record_id_helpers.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { @@ -145,9 +160,41 @@ CollectionScanParams createCollectionScanParams( } } // namespace +std::unique_ptr InternalPlanner::sampleCollection( + OperationContext* opCtx, + VariantCollectionPtrOrAcquisition collection, + PlanYieldPolicy::YieldPolicy yieldPolicy, + boost::optional numSamples) { + const auto& collectionPtr = collection.getCollectionPtr(); + invariant(collectionPtr); + + std::unique_ptr ws = std::make_unique(); + + auto expCtx = make_intrusive( + opCtx, std::unique_ptr(nullptr), collectionPtr->ns()); + + auto rsRandCursor = collectionPtr->getRecordStore()->getRandomCursor(opCtx); + std::unique_ptr root = + std::make_unique(expCtx.get(), ws.get(), collection); + static_cast(root.get())->addIterator(std::move(rsRandCursor)); + + if (numSamples) { + auto samples = *numSamples; + invariant(samples >= 0, + "Number of samples must be >= 0, otherwise LimitStage it will never end"); + root = std::make_unique(expCtx.get(), samples, ws.get(), std::move(root)); + } + + auto statusWithPlanExecutor = plan_executor_factory::make( + expCtx, std::move(ws), std::move(root), collection, yieldPolicy, false); + + invariant(statusWithPlanExecutor.isOK()); + return std::move(statusWithPlanExecutor.getValue()); +} + std::unique_ptr InternalPlanner::collectionScan( OperationContext* opCtx, - const CollectionPtr* coll, + VariantCollectionPtrOrAcquisition collection, PlanYieldPolicy::YieldPolicy yieldPolicy, const Direction direction, const boost::optional& resumeAfterRecordId, @@ -155,17 +202,17 @@ std::unique_ptr InternalPlanner::collection boost::optional maxRecord, CollectionScanParams::ScanBoundInclusion boundInclusion, bool shouldReturnEofOnFilterMismatch) { - const auto& collection = *coll; - invariant(collection); + const auto& collectionPtr = collection.getCollectionPtr(); + invariant(collectionPtr); std::unique_ptr ws = std::make_unique(); auto expCtx = make_intrusive( - opCtx, std::unique_ptr(nullptr), collection->ns()); + opCtx, std::unique_ptr(nullptr), collectionPtr->ns()); auto collScanParams = createCollectionScanParams(expCtx, ws.get(), - coll, + &collectionPtr, direction, resumeAfterRecordId, minRecord, @@ -173,14 +220,14 @@ std::unique_ptr InternalPlanner::collection boundInclusion, shouldReturnEofOnFilterMismatch); - auto cs = _collectionScan(expCtx, ws.get(), &collection, collScanParams); + auto cs = _collectionScan(expCtx, ws.get(), &collectionPtr, collScanParams); // Takes ownership of 'ws' and 'cs'. auto statusWithPlanExecutor = plan_executor_factory::make(expCtx, std::move(ws), std::move(cs), - &collection, + collection, yieldPolicy, false /* whether owned BSON must be returned */); invariant(statusWithPlanExecutor.isOK()); @@ -215,7 +262,7 @@ std::unique_ptr InternalPlanner::collection std::unique_ptr InternalPlanner::deleteWithCollectionScan( OperationContext* opCtx, - const CollectionPtr* coll, + CollectionAcquisition coll, std::unique_ptr params, PlanYieldPolicy::YieldPolicy yieldPolicy, Direction direction, @@ -225,8 +272,8 @@ std::unique_ptr InternalPlanner::deleteWith std::unique_ptr batchedDeleteParams, const MatchExpression* filter, bool shouldReturnEofOnFilterMismatch) { - const auto& collection = *coll; - invariant(collection); + const auto& collectionPtr = coll.getCollectionPtr(); + invariant(collectionPtr); if (shouldReturnEofOnFilterMismatch) { tassert(7010801, "MatchExpression filter must be provided when 'shouldReturnEofOnFilterMismatch' is " @@ -236,15 +283,15 @@ std::unique_ptr InternalPlanner::deleteWith auto ws = std::make_unique(); auto expCtx = make_intrusive( - opCtx, std::unique_ptr(nullptr), collection->ns()); + opCtx, std::unique_ptr(nullptr), collectionPtr->ns()); - if (collection->isCapped()) { + if (collectionPtr->isCapped()) { expCtx->setIsCappedDelete(); } auto collScanParams = createCollectionScanParams(expCtx, ws.get(), - coll, + &collectionPtr, direction, boost::none /* resumeAfterId */, minRecord, @@ -252,24 +299,24 @@ std::unique_ptr InternalPlanner::deleteWith boundInclusion, shouldReturnEofOnFilterMismatch); - auto root = _collectionScan(expCtx, ws.get(), &collection, collScanParams, filter); + auto root = _collectionScan(expCtx, ws.get(), &collectionPtr, collScanParams, filter); if (batchedDeleteParams) { root = std::make_unique(expCtx.get(), std::move(params), std::move(batchedDeleteParams), ws.get(), - collection, + coll, root.release()); } else { root = std::make_unique( - expCtx.get(), std::move(params), ws.get(), collection, root.release()); + expCtx.get(), std::move(params), ws.get(), coll, root.release()); } auto executor = plan_executor_factory::make(expCtx, std::move(ws), std::move(root), - &collection, + coll, yieldPolicy, false /* whether owned BSON must be returned */ ); @@ -316,7 +363,7 @@ std::unique_ptr InternalPlanner::indexScan( std::unique_ptr InternalPlanner::deleteWithIndexScan( OperationContext* opCtx, - const CollectionPtr* coll, + CollectionAcquisition coll, std::unique_ptr params, const IndexDescriptor* descriptor, const BSONObj& startKey, @@ -325,16 +372,16 @@ std::unique_ptr InternalPlanner::deleteWith PlanYieldPolicy::YieldPolicy yieldPolicy, Direction direction, std::unique_ptr batchedDeleteParams) { - const auto& collection = *coll; - invariant(collection); + const auto& collectionPtr = coll.getCollectionPtr(); + invariant(collectionPtr); auto ws = std::make_unique(); auto expCtx = make_intrusive( - opCtx, std::unique_ptr(nullptr), collection->ns()); + opCtx, std::unique_ptr(nullptr), collectionPtr->ns()); std::unique_ptr root = _indexScan(expCtx, ws.get(), - &collection, + &collectionPtr, descriptor, startKey, endKey, @@ -347,17 +394,17 @@ std::unique_ptr InternalPlanner::deleteWith std::move(params), std::move(batchedDeleteParams), ws.get(), - collection, + coll, root.release()); } else { root = std::make_unique( - expCtx.get(), std::move(params), ws.get(), collection, root.release()); + expCtx.get(), std::move(params), ws.get(), coll, root.release()); } auto executor = plan_executor_factory::make(expCtx, std::move(ws), std::move(root), - &collection, + coll, yieldPolicy, false /* whether owned BSON must be returned */ ); @@ -394,7 +441,7 @@ std::unique_ptr InternalPlanner::shardKeyIn std::unique_ptr InternalPlanner::deleteWithShardKeyIndexScan( OperationContext* opCtx, - const CollectionPtr* coll, + CollectionAcquisition coll, std::unique_ptr params, const ShardKeyIndex& shardKeyIdx, const BSONObj& startKey, @@ -413,25 +460,30 @@ std::unique_ptr InternalPlanner::deleteWith yieldPolicy, direction); } - auto collectionScanParams = convertIndexScanParamsToCollScanParams( - opCtx, coll, shardKeyIdx.keyPattern(), startKey, endKey, boundInclusion, direction); + auto collectionScanParams = convertIndexScanParamsToCollScanParams(opCtx, + &coll.getCollectionPtr(), + shardKeyIdx.keyPattern(), + startKey, + endKey, + boundInclusion, + direction); - const auto& collection = *coll; - invariant(collection); + const auto& collectionPtr = coll.getCollectionPtr(); + invariant(collectionPtr); std::unique_ptr ws = std::make_unique(); auto expCtx = make_intrusive( - opCtx, std::unique_ptr(nullptr), collection->ns()); + opCtx, std::unique_ptr(nullptr), collectionPtr->ns()); - auto root = _collectionScan(expCtx, ws.get(), &collection, collectionScanParams); + auto root = _collectionScan(expCtx, ws.get(), &collectionPtr, collectionScanParams); root = std::make_unique( - expCtx.get(), std::move(params), ws.get(), collection, root.release()); + expCtx.get(), std::move(params), ws.get(), coll, root.release()); auto executor = plan_executor_factory::make(expCtx, std::move(ws), std::move(root), - &collection, + coll, yieldPolicy, false /* whether owned BSON must be returned */ ); @@ -441,17 +493,17 @@ std::unique_ptr InternalPlanner::deleteWith std::unique_ptr InternalPlanner::updateWithIdHack( OperationContext* opCtx, - const CollectionPtr* coll, + CollectionAcquisition collection, const UpdateStageParams& params, const IndexDescriptor* descriptor, const BSONObj& key, PlanYieldPolicy::YieldPolicy yieldPolicy) { - const auto& collection = *coll; - invariant(collection); + const auto& collectionPtr = collection.getCollectionPtr(); + invariant(collectionPtr); auto ws = std::make_unique(); auto expCtx = make_intrusive( - opCtx, std::unique_ptr(nullptr), collection->ns()); + opCtx, std::unique_ptr(nullptr), collectionPtr->ns()); auto idHackStage = std::make_unique(expCtx.get(), key, ws.get(), collection, descriptor); @@ -465,10 +517,9 @@ std::unique_ptr InternalPlanner::updateWith auto executor = plan_executor_factory::make(expCtx, std::move(ws), std::move(root), - &collection, + collection, yieldPolicy, - false /* whether owned BSON must be returned */ - ); + false /* whether owned BSON must be returned */); invariant(executor.getStatus()); return std::move(executor.getValue()); } @@ -483,7 +534,7 @@ std::unique_ptr InternalPlanner::_collectionScan( const auto& collection = *coll; invariant(collection); - return std::make_unique(expCtx.get(), collection, params, ws, filter); + return std::make_unique(expCtx.get(), coll, params, ws, filter); } std::unique_ptr InternalPlanner::_indexScan( @@ -509,10 +560,10 @@ std::unique_ptr InternalPlanner::_indexScan( params.shouldDedup = descriptor->getEntry()->isMultikey(expCtx->opCtx, collection); std::unique_ptr root = - std::make_unique(expCtx.get(), collection, std::move(params), ws, nullptr); + std::make_unique(expCtx.get(), coll, std::move(params), ws, nullptr); if (InternalPlanner::IXSCAN_FETCH & options) { - root = std::make_unique(expCtx.get(), ws, std::move(root), nullptr, collection); + root = std::make_unique(expCtx.get(), ws, std::move(root), nullptr, coll); } return root; diff --git a/src/mongo/db/query/internal_plans.h b/src/mongo/db/query/internal_plans.h index aa1beb31e2132..59aa950980302 100644 --- a/src/mongo/db/query/internal_plans.h +++ b/src/mongo/db/query/internal_plans.h @@ -29,14 +29,32 @@ #pragma once +#include +#include +#include +#include +#include + #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/exec/batched_delete_stage.h" +#include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/exec/delete_stage.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/index_bounds.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/record_id_bound.h" #include "mongo/db/record_id.h" #include "mongo/db/s/shard_key_index_util.h" +#include "mongo/db/shard_role.h" namespace mongo { @@ -46,6 +64,7 @@ class CollectionPtr; class IndexDescriptor; class OperationContext; class PlanStage; +class CollectionAcquisition; class WorkingSet; struct UpdateStageParams; @@ -70,13 +89,27 @@ class InternalPlanner { IXSCAN_FETCH = 1, }; + /** + * Returns a sampling of the given collection with up to 'numSamples'. If the caller doesn't + * provide a value for 'numSamples' then the executor will return an infinite stream of random + * documents of the collection. + * + * Note that the set of documents returned can contain duplicates. Sampling is performed + * without memory of the previous results. + */ + static std::unique_ptr sampleCollection( + OperationContext* opCtx, + VariantCollectionPtrOrAcquisition collection, + PlanYieldPolicy::YieldPolicy yieldPolicy, + boost::optional numSamples = boost::none); + /** * Returns a collection scan. Refer to CollectionScanParams for usage of 'minRecord' and * 'maxRecord'. */ static std::unique_ptr collectionScan( OperationContext* opCtx, - const CollectionPtr* collection, + VariantCollectionPtrOrAcquisition collection, PlanYieldPolicy::YieldPolicy yieldPolicy, Direction direction = FORWARD, const boost::optional& resumeAfterRecordId = boost::none, @@ -98,7 +131,7 @@ class InternalPlanner { */ static std::unique_ptr deleteWithCollectionScan( OperationContext* opCtx, - const CollectionPtr* collection, + CollectionAcquisition collection, std::unique_ptr deleteStageParams, PlanYieldPolicy::YieldPolicy yieldPolicy, Direction direction = FORWARD, @@ -130,7 +163,7 @@ class InternalPlanner { */ static std::unique_ptr deleteWithIndexScan( OperationContext* opCtx, - const CollectionPtr* collection, + CollectionAcquisition collection, std::unique_ptr params, const IndexDescriptor* descriptor, const BSONObj& startKey, @@ -164,7 +197,7 @@ class InternalPlanner { */ static std::unique_ptr deleteWithShardKeyIndexScan( OperationContext* opCtx, - const CollectionPtr* collection, + CollectionAcquisition collection, std::unique_ptr params, const ShardKeyIndex& shardKeyIdx, const BSONObj& startKey, @@ -178,7 +211,7 @@ class InternalPlanner { */ static std::unique_ptr updateWithIdHack( OperationContext* opCtx, - const CollectionPtr* collection, + CollectionAcquisition collection, const UpdateStageParams& params, const IndexDescriptor* descriptor, const BSONObj& key, diff --git a/src/mongo/db/query/interval.cpp b/src/mongo/db/query/interval.cpp index 7462d5ee01d6c..f53cfd7ba1097 100644 --- a/src/mongo/db/query/interval.cpp +++ b/src/mongo/db/query/interval.cpp @@ -29,6 +29,14 @@ #include "mongo/db/query/interval.h" +#include + +#include + +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" + namespace mongo { using std::string; @@ -45,7 +53,7 @@ Interval::Interval(BSONObj base, bool si, bool ei) { } void Interval::init(BSONObj base, bool si, bool ei) { - verify(base.nFields() >= 2); + MONGO_verify(base.nFields() >= 2); _intervalData = base.getOwned(); BSONObjIterator it(_intervalData); @@ -248,7 +256,7 @@ void Interval::intersect(const Interval& other, IntervalComparison cmp) { break; default: - verify(false); + MONGO_verify(false); } } @@ -284,7 +292,7 @@ void Interval::combine(const Interval& other, IntervalComparison cmp) { break; default: - verify(false); + MONGO_verify(false); } } diff --git a/src/mongo/db/query/interval.h b/src/mongo/db/query/interval.h index 26fadffb11bab..396daf66f34d7 100644 --- a/src/mongo/db/query/interval.h +++ b/src/mongo/db/query/interval.h @@ -29,6 +29,11 @@ #pragma once +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/jsobj.h" #include "mongo/util/hex.h" #include "mongo/util/str.h" diff --git a/src/mongo/db/query/interval_evaluation_tree.cpp b/src/mongo/db/query/interval_evaluation_tree.cpp index 02916d1ef0888..29c497dc07d17 100644 --- a/src/mongo/db/query/interval_evaluation_tree.cpp +++ b/src/mongo/db/query/interval_evaluation_tree.cpp @@ -29,8 +29,23 @@ #include "mongo/db/query/interval_evaluation_tree.h" -#include "mongo/db/matcher/expression_internal_expr_comparison.h" +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_path.h" +#include "mongo/db/matcher/expression_type.h" #include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/interval.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::interval_evaluation_tree { namespace { diff --git a/src/mongo/db/query/interval_evaluation_tree.h b/src/mongo/db/query/interval_evaluation_tree.h index f9b4b4d2f1c80..b0885c8e91f43 100644 --- a/src/mongo/db/query/interval_evaluation_tree.h +++ b/src/mongo/db/query/interval_evaluation_tree.h @@ -29,8 +29,14 @@ #pragma once +#include +#include #include +#include +#include +#include +#include "mongo/bson/bsonelement.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/query/index_bounds.h" diff --git a/src/mongo/db/query/interval_evaluation_tree_test.cpp b/src/mongo/db/query/interval_evaluation_tree_test.cpp index 4dd5d2627c074..390dacc1c160e 100644 --- a/src/mongo/db/query/interval_evaluation_tree_test.cpp +++ b/src/mongo/db/query/interval_evaluation_tree_test.cpp @@ -27,10 +27,29 @@ * it in the license file. */ +#include +#include + +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/index_bounds_builder.h" #include "mongo/db/query/interval_evaluation_tree.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { class IntervalEvaluationTreeTest : public unittest::Test { diff --git a/src/mongo/db/query/interval_test.cpp b/src/mongo/db/query/interval_test.cpp index 90402f1bce019..c6353fc1011fc 100644 --- a/src/mongo/db/query/interval_test.cpp +++ b/src/mongo/db/query/interval_test.cpp @@ -29,9 +29,14 @@ #include "mongo/db/query/interval.h" -#include "mongo/bson/bsontypes.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/killcursors_request_test.cpp b/src/mongo/db/query/killcursors_request_test.cpp index c7e63d8eb3cdd..e39b3819f06f6 100644 --- a/src/mongo/db/query/killcursors_request_test.cpp +++ b/src/mongo/db/query/killcursors_request_test.cpp @@ -27,12 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/kill_cursors_gen.h" - -#include "mongo/db/clientcursor.h" -#include "mongo/unittest/unittest.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -46,7 +55,7 @@ TEST(KillCursorsRequestTest, parseSuccess) { << "cursors" << BSON_ARRAY(CursorId(123) << CursorId(456)) << "$db" << "db"); KillCursorsCommandRequest request = KillCursorsCommandRequest::parse(ctxt, bsonObj); - ASSERT_EQ(request.getNamespace().ns(), "db.coll"); + ASSERT_EQ(request.getNamespace().ns_forTest(), "db.coll"); ASSERT_EQ(request.getCursorIds().size(), 2U); ASSERT_EQ(request.getCursorIds()[0], CursorId(123)); ASSERT_EQ(request.getCursorIds()[1], CursorId(456)); diff --git a/src/mongo/db/query/lru_key_value.h b/src/mongo/db/query/lru_key_value.h index 265379d4c1800..b8b1af2d34b79 100644 --- a/src/mongo/db/query/lru_key_value.h +++ b/src/mongo/db/query/lru_key_value.h @@ -28,11 +28,17 @@ */ #pragma once +#include +#include #include +#include #include #include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/stdx/unordered_map.h" #include "mongo/util/assert_util.h" @@ -114,7 +120,7 @@ class LRUKeyValue { clear(); } - typedef std::pair KVListEntry; + typedef std::pair KVListEntry; typedef std::list KVList; typedef typename KVList::iterator KVListIt; @@ -145,8 +151,9 @@ class LRUKeyValue { } _budgetTracker.onAdd(key, entry); - _kvList.push_front(std::make_pair(key, std::move(entry))); + _kvList.push_front(std::make_pair(nullptr, std::move(entry))); _kvMap[key] = _kvList.begin(); + _kvList.begin()->first = &(_kvMap.find(key)->first); return evict(); } @@ -164,10 +171,11 @@ class LRUKeyValue { KVListIt found = i->second; // Promote the kv-store entry to the front of the list. It is now the most recently used. - _kvList.push_front(std::make_pair(key, std::move(found->second))); + _kvList.push_front(std::make_pair(nullptr, std::move(found->second))); _kvMap.erase(i); _kvList.erase(found); _kvMap[key] = _kvList.begin(); + _kvList.begin()->first = &(_kvMap.find(key)->first); return _kvList.begin(); } @@ -196,9 +204,9 @@ class LRUKeyValue { size_t removeIf(KeyValuePredicate predicate) { size_t removed = 0; for (auto it = _kvList.begin(); it != _kvList.end();) { - if (predicate(it->first, *it->second)) { - _budgetTracker.onRemove(it->first, it->second); - _kvMap.erase(it->first); + if (predicate(*it->first, *it->second)) { + _budgetTracker.onRemove(*it->first, it->second); + _kvMap.erase(*it->first); it = _kvList.erase(it); ++removed; } else { @@ -261,8 +269,8 @@ class LRUKeyValue { while (_budgetTracker.isOverBudget()) { invariant(!_kvList.empty()); - _budgetTracker.onRemove(_kvList.back().first, _kvList.back().second); - _kvMap.erase(_kvList.back().first); + _budgetTracker.onRemove(*_kvList.back().first, _kvList.back().second); + _kvMap.erase(*_kvList.back().first); _kvList.pop_back(); ++nEvicted; @@ -278,7 +286,6 @@ class LRUKeyValue { mutable KVList _kvList; // Maps from a key to the corresponding std::list entry. - // TODO: SERVER-73659 LRUKeyValue should track and include the size of _kvMap in overall budget. mutable KVMap _kvMap; }; diff --git a/src/mongo/db/query/lru_key_value_test.cpp b/src/mongo/db/query/lru_key_value_test.cpp index a92f68f39fa80..3dac45ed5960b 100644 --- a/src/mongo/db/query/lru_key_value_test.cpp +++ b/src/mongo/db/query/lru_key_value_test.cpp @@ -29,8 +29,16 @@ #include "mongo/db/query/lru_key_value.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" using namespace mongo; @@ -295,10 +303,10 @@ TEST(LRUKeyValueTest, IterationTest) { cache.add(2, std::make_shared(2)); auto i = cache.begin(); - ASSERT_EQUALS(i->first, 2); + ASSERT_EQUALS(*i->first, 2); ASSERT_EQUALS(*i->second, 2); ++i; - ASSERT_EQUALS(i->first, 1); + ASSERT_EQUALS(*i->first, 1); ASSERT_EQUALS(*i->second, 1); ++i; ASSERT(i == cache.end()); diff --git a/src/mongo/db/query/map_reduce_output_format.cpp b/src/mongo/db/query/map_reduce_output_format.cpp index 9c8b6da9c9dad..aba089d4b0f65 100644 --- a/src/mongo/db/query/map_reduce_output_format.cpp +++ b/src/mongo/db/query/map_reduce_output_format.cpp @@ -27,10 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/query/map_reduce_output_format.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" + namespace mongo::map_reduce_output_format { void appendInlineResponse(BSONArray&& documents, BSONObjBuilder* resultBuilder) { diff --git a/src/mongo/db/query/map_reduce_output_format.h b/src/mongo/db/query/map_reduce_output_format.h index 72f1dbd2ecf95..d61bf0b76cbc9 100644 --- a/src/mongo/db/query/map_reduce_output_format.h +++ b/src/mongo/db/query/map_reduce_output_format.h @@ -29,6 +29,11 @@ #pragma once +#include + +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" /** diff --git a/src/mongo/db/query/map_reduce_output_format_test.cpp b/src/mongo/db/query/map_reduce_output_format_test.cpp index aabd28181b4b7..1a80771f963ce 100644 --- a/src/mongo/db/query/map_reduce_output_format_test.cpp +++ b/src/mongo/db/query/map_reduce_output_format_test.cpp @@ -27,14 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/bson/bsonobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/json.h" -#include "mongo/db/namespace_string.h" +#include "mongo/bson/json.h" #include "mongo/db/query/map_reduce_output_format.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" using namespace std::literals::string_literals; diff --git a/src/mongo/db/query/max_time_ms_parser.cpp b/src/mongo/db/query/max_time_ms_parser.cpp index 38cf4a3d6d7ff..56a5429c5fed2 100644 --- a/src/mongo/db/query/max_time_ms_parser.cpp +++ b/src/mongo/db/query/max_time_ms_parser.cpp @@ -29,9 +29,18 @@ #include "mongo/db/query/max_time_ms_parser.h" +#include +#include #include -#include "mongo/bson/bsonobjbuilder.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/query/max_time_ms_parser.h b/src/mongo/db/query/max_time_ms_parser.h index d111b480599be..9be4a93b55761 100644 --- a/src/mongo/db/query/max_time_ms_parser.h +++ b/src/mongo/db/query/max_time_ms_parser.h @@ -30,6 +30,9 @@ #pragma once +#include + +#include "mongo/base/status_with.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" diff --git a/src/mongo/db/query/mock_yield_policies.h b/src/mongo/db/query/mock_yield_policies.h index fa2cee9fbba3b..5774c24fd6e55 100644 --- a/src/mongo/db/query/mock_yield_policies.h +++ b/src/mongo/db/query/mock_yield_policies.h @@ -39,8 +39,10 @@ namespace mongo { */ class MockYieldPolicy : public PlanYieldPolicy { public: - MockYieldPolicy(ClockSource* clockSource, PlanYieldPolicy::YieldPolicy policy) - : PlanYieldPolicy(policy, clockSource, 0, Milliseconds{0}, nullptr, nullptr) {} + MockYieldPolicy(OperationContext* opCtx, + ClockSource* clockSource, + PlanYieldPolicy::YieldPolicy policy) + : PlanYieldPolicy(opCtx, policy, clockSource, 0, Milliseconds{0}, nullptr, nullptr) {} private: void saveState(OperationContext* opCtx) override final { @@ -58,8 +60,8 @@ class MockYieldPolicy : public PlanYieldPolicy { */ class AlwaysTimeOutYieldPolicy final : public MockYieldPolicy { public: - AlwaysTimeOutYieldPolicy(ClockSource* cs) - : MockYieldPolicy(cs, PlanYieldPolicy::YieldPolicy::ALWAYS_TIME_OUT) {} + AlwaysTimeOutYieldPolicy(OperationContext* opCtx, ClockSource* cs) + : MockYieldPolicy(opCtx, cs, PlanYieldPolicy::YieldPolicy::ALWAYS_TIME_OUT) {} bool shouldYieldOrInterrupt(OperationContext*) override { return true; @@ -76,8 +78,8 @@ class AlwaysTimeOutYieldPolicy final : public MockYieldPolicy { */ class AlwaysPlanKilledYieldPolicy final : public MockYieldPolicy { public: - AlwaysPlanKilledYieldPolicy(ClockSource* cs) - : MockYieldPolicy(cs, PlanYieldPolicy::YieldPolicy::ALWAYS_MARK_KILLED) {} + AlwaysPlanKilledYieldPolicy(OperationContext* opCtx, ClockSource* cs) + : MockYieldPolicy(opCtx, cs, PlanYieldPolicy::YieldPolicy::ALWAYS_MARK_KILLED) {} bool shouldYieldOrInterrupt(OperationContext*) override { return true; @@ -94,8 +96,8 @@ class AlwaysPlanKilledYieldPolicy final : public MockYieldPolicy { */ class NoopYieldPolicy final : public MockYieldPolicy { public: - NoopYieldPolicy(ClockSource* clockSource) - : MockYieldPolicy(clockSource, PlanYieldPolicy::YieldPolicy::NO_YIELD) {} + NoopYieldPolicy(OperationContext* opCtx, ClockSource* clockSource) + : MockYieldPolicy(opCtx, clockSource, PlanYieldPolicy::YieldPolicy::NO_YIELD) {} bool shouldYieldOrInterrupt(OperationContext*) override { return false; diff --git a/src/mongo/db/query/multiple_collection_accessor.h b/src/mongo/db/query/multiple_collection_accessor.h index 04c766770bb9d..6fe6b2a371636 100644 --- a/src/mongo/db/query/multiple_collection_accessor.h +++ b/src/mongo/db/query/multiple_collection_accessor.h @@ -31,6 +31,8 @@ #include "mongo/db/catalog/collection.h" #include "mongo/db/db_raii.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/shard_role.h" namespace mongo { @@ -71,12 +73,14 @@ class MultipleCollectionAccessor final { explicit MultipleCollectionAccessor(const CollectionPtr& mainColl) : MultipleCollectionAccessor(&mainColl) {} + explicit MultipleCollectionAccessor(CollectionAcquisition mainAcq) : _mainAcq(mainAcq) {} + bool hasMainCollection() const { - return _mainColl && _mainColl->get(); + return (_mainColl && _mainColl->get()) || (_mainAcq && _mainAcq->exists()); } const CollectionPtr& getMainCollection() const { - return *_mainColl; + return _mainAcq ? _mainAcq->getCollectionPtr() : *_mainColl; } const std::map& getSecondaryCollections() const { @@ -87,9 +91,19 @@ class MultipleCollectionAccessor final { return _isAnySecondaryNamespaceAViewOrSharded; } + bool isAcquisition() const { + return bool(_mainAcq); + } + + const CollectionAcquisition& getMainAcquisition() const { + return *_mainAcq; + } + const CollectionPtr& lookupCollection(const NamespaceString& nss) const { if (_mainColl && _mainColl->get() && nss == _mainColl->get()->ns()) { return *_mainColl; + } else if (_mainAcq && nss == _mainAcq->getCollectionPtr()->ns()) { + return _mainAcq->getCollectionPtr(); } else if (auto itr = _secondaryColls.find(nss); itr != _secondaryColls.end()) { return itr->second; } @@ -98,6 +112,7 @@ class MultipleCollectionAccessor final { void clear() { _mainColl = &CollectionPtr::null; + _mainAcq.reset(); _secondaryColls.clear(); } @@ -114,6 +129,7 @@ class MultipleCollectionAccessor final { private: const CollectionPtr* _mainColl{&CollectionPtr::null}; + boost::optional _mainAcq; // Tracks whether any secondary namespace is a view or sharded based on information captured // at the time of lock acquisition. This is used to determine if a $lookup is eligible for diff --git a/src/mongo/db/query/optimizer/README.md b/src/mongo/db/query/optimizer/README.md index f72b012692bd3..7e473c72f6c2d 100644 --- a/src/mongo/db/query/optimizer/README.md +++ b/src/mongo/db/query/optimizer/README.md @@ -33,7 +33,6 @@ exercising this codebase: - **cqf_disabled_pipeline_opt**: [buildscripts/resmokeconfig/suites/cqf_disabled_pipeline_opt.yml](/buildscripts/resmokeconfig/suites/cqf_disabled_pipeline_opt.yml) - **cqf_parallel**: [buildscripts/resmokeconfig/suites/cqf_parallel.yml](/buildscripts/resmokeconfig/suites/cqf_parallel.yml) -- **cqf_passthrough**: [buildscripts/resmokeconfig/suites/cqf_passthrough.yml](/buildscripts/resmokeconfig/suites/cqf_passthrough.yml) - **query_golden_cqf**: [buildscripts/resmokeconfig/suites/query_golden_cqf.yml](/buildscripts/resmokeconfig/suites/query_golden_cqf.yml) Desriptions of these suites can be found in @@ -43,13 +42,7 @@ You may run these like so, adjusting the `-j` flag for the appropriate level of parallel execution for your machine. ``` ./buildscripts/resmoke.py run -j4 \ - --suites=cqf,cqf_disabled_pipeline_opt,cqf_parallel,cqf_passthrough,query_golden_cqf -``` - -cqf_passthrough takes the longest to run by far, so this command may be more -useful for a quicker signal: -``` -./buildscripts/resmoke.py run --suites=cqf,cqf_disabled_pipeline_opt,cqf_parallel,query_golden_cqf -j4 + --suites=cqf,cqf_disabled_pipeline_opt,cqf_parallel,query_golden_cqf ``` ## Local Testing Recommendation @@ -67,12 +60,7 @@ build/install/bin/sbe_abt_test \ **Note:** You may need to adjust the path to the unit test binary targets if your SCons install directory is something more like `build/opt/install/bin`. -**Note:** The 'cqf_passthrough' suite may also be useful for additional coverage, -but due to the number of tests it takes quite a while to run locally -(~15 minutes depending on parallelization and compile flags) - ## Evergreen Testing Recommendation In addition to the above suites, there is a patch-only variant which enables the CQF feature flag -on a selection of existing suites. The variant, "Query Patch Only (all feature flags and CQF -enabled)", runs the suites listed above and some others including: aggregation, jsCore, -noPassthrough, and a handful of passthroughs (e.g. sharding_jscore_passthrough). +on a selection of existing suites. The variant, "Query (all feature flags and CQF enabled)", runs +all the tasks from the recommended all-feature-flags variants. diff --git a/src/mongo/db/query/optimizer/SConscript b/src/mongo/db/query/optimizer/SConscript index 2abdccf1f044a..fcf38bcb463f6 100644 --- a/src/mongo/db/query/optimizer/SConscript +++ b/src/mongo/db/query/optimizer/SConscript @@ -147,12 +147,13 @@ env.CppUnitTest( env.CppUnitTest( target='interval_simplify_test', source=[ - "interval_simplify_test.cpp", + 'interval_simplify_test.cpp', ], LIBDEPS=[ - "$BUILD_DIR/mongo/db/service_context_test_fixture", - "optimizer", - "unit_test_pipeline_utils", + '$BUILD_DIR/mongo/db/service_context_non_d', + '$BUILD_DIR/mongo/db/service_context_test_fixture', + 'optimizer', + 'unit_test_pipeline_utils', ], ) diff --git a/src/mongo/db/query/optimizer/algebra/algebra_test.cpp b/src/mongo/db/query/optimizer/algebra/algebra_test.cpp index 2ff918ffec088..d8c1ae2afebcb 100644 --- a/src/mongo/db/query/optimizer/algebra/algebra_test.cpp +++ b/src/mongo/db/query/optimizer/algebra/algebra_test.cpp @@ -27,9 +27,18 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/query/optimizer/algebra/operator.h" #include "mongo/db/query/optimizer/algebra/polyvalue.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer::algebra { diff --git a/src/mongo/db/query/optimizer/algebra/operator.h b/src/mongo/db/query/optimizer/algebra/operator.h index 1dd752727474b..dede8effa6cf1 100644 --- a/src/mongo/db/query/optimizer/algebra/operator.h +++ b/src/mongo/db/query/optimizer/algebra/operator.h @@ -33,8 +33,6 @@ #include #include -#include "mongo/util/concepts.h" - namespace mongo::optimizer { namespace algebra { @@ -66,19 +64,17 @@ class OpFixedArity : public OpNodeStorage { using Base = OpNodeStorage; public: - TEMPLATE(typename... Ts) - REQUIRES(sizeof...(Ts) == Arity) - OpFixedArity(Ts&&... vals) : Base({std::forward(vals)...}) {} + template + requires(sizeof...(Ts) == Arity) OpFixedArity(Ts&&... vals) + : Base({std::forward(vals)...}) {} - TEMPLATE(int I) - REQUIRES(I >= 0 && I < Arity) - auto& get() noexcept { + template + requires(I >= 0 && I < Arity) auto& get() noexcept { return this->_nodes[I]; } - TEMPLATE(int I) - REQUIRES(I >= 0 && I < Arity) - const auto& get() const noexcept { + template + requires(I >= 0 && I < Arity) const auto& get() const noexcept { return this->_nodes[I]; } }; diff --git a/src/mongo/db/query/optimizer/bool_expression.h b/src/mongo/db/query/optimizer/bool_expression.h index fb5a851781ce3..f529a05a41d44 100644 --- a/src/mongo/db/query/optimizer/bool_expression.h +++ b/src/mongo/db/query/optimizer/bool_expression.h @@ -30,8 +30,15 @@ #pragma once +#include #include +#include +#include +#include +#include #include +#include +#include #include #include "mongo/db/query/optimizer/algebra/operator.h" @@ -42,7 +49,7 @@ namespace mongo::optimizer { template struct TassertNegator { - T operator()(const T v) const { + T operator()(T v) const { tassert(7453909, "No negator specified", false); return v; } @@ -62,7 +69,6 @@ struct BoolExpr { using Node = algebra::PolyValue; using NodeVector = std::vector; - class Atom final : public algebra::OpFixedArity { using Base = algebra::OpFixedArity; @@ -150,113 +156,154 @@ struct BoolExpr { return getSingularDNF(n).has_value(); } - using ChildVisitor = std::function; - using ChildVisitorConst = std::function; - using AtomVisitor = std::function; - using AtomVisitorConst = std::function; - - static size_t visitConjuncts(const Node& node, const ChildVisitorConst& visitor) { - size_t index = 0; - for (const auto& conj : node.template cast()->nodes()) { - visitor(conj, index++); + /** + * Context present during traversal. + */ + struct VisitorContext { + /** + * Get the index of the child element in the conjunction or disjunction being traversed. + */ + size_t getChildIndex() const { + return _childIndex; } - return index; - } - static size_t visitConjuncts(Node& node, const ChildVisitor& visitor) { - size_t index = 0; - for (auto& conj : node.template cast()->nodes()) { - visitor(conj, index++); + /** + * Allow the visitor to signal that traversal should end early. + */ + void returnEarly() const { + _returnEarly = true; } - return index; - } - static size_t visitDisjuncts(const Node& node, const ChildVisitorConst& visitor) { - size_t index = 0; - for (const auto& conj : node.template cast()->nodes()) { - visitor(conj, index++); + private: + size_t _childIndex = 0; + mutable bool _returnEarly = false; + + friend struct BoolExpr; + }; + + using AtomPredConst = std::function; + + template + static size_t visitNodes(NodeType&& node, const Visitor& visitor) { + VisitorContext ctx; + for (auto&& n : node.template cast()->nodes()) { + visitor(n, ctx); + ctx._childIndex++; + if (ctx._returnEarly) { + break; + } } - return index; + return ctx._childIndex; } - static size_t visitDisjuncts(Node& node, const ChildVisitor& visitor) { - size_t index = 0; - for (auto& conj : node.template cast()->nodes()) { - visitor(conj, index++); - } - return index; + template + static size_t visitConjuncts(NodeType&& node, const Visitor& visitor) { + return visitNodes(node, visitor); } - static void visitAtom(const Node& node, const AtomVisitorConst& visitor) { - visitor(node.template cast()->getExpr()); + template + static size_t visitDisjuncts(NodeType&& node, const Visitor& visitor) { + return visitNodes(node, visitor); } - static void visitAtom(Node& node, const AtomVisitor& visitor) { - visitor(node.template cast()->getExpr()); + template + static void visitAtom(NodeType&& node, const Visitor& visitor) { + const VisitorContext ctx; + visitor(node.template cast()->getExpr(), ctx); } - static void visitCNF(const Node& node, const AtomVisitorConst& visitor) { - visitConjuncts(node, [&](const Node& child, const size_t) { - visitDisjuncts(child, [&](const Node& grandChild, const size_t) { - visitAtom(grandChild, visitor); + template + static void visitCNF(NodeType&& node, const Visitor& visitor) { + visitConjuncts(node, [&](const Node& child, const VisitorContext& conjCtx) { + visitDisjuncts(child, [&](const Node& grandChild, const VisitorContext& disjCtx) { + visitor(grandChild.template cast()->getExpr(), disjCtx); + if (disjCtx._returnEarly) { + conjCtx.returnEarly(); + } }); }); } - static void visitDNF(const Node& node, const AtomVisitorConst& visitor) { - visitDisjuncts(node, [&](const Node& child, const size_t) { - visitConjuncts(child, [&](const Node& grandChild, const size_t) { - visitAtom(grandChild, visitor); + template + static void visitDNF(NodeType&& node, const Visitor& visitor) { + visitDisjuncts(node, [&](NodeType&& child, const VisitorContext& disjCtx) { + visitConjuncts(child, [&](NodeType&& grandChild, const VisitorContext& conjCtx) { + visitor(grandChild.template cast()->getExpr(), conjCtx); + if (conjCtx._returnEarly) { + disjCtx.returnEarly(); + } }); }); } - static void visitAnyShape(const Node& node, const AtomVisitorConst& atomVisitor) { + template + static void visitSingletonDNF(NodeType&& node, const Visitor& visitor) { + tassert(7382800, "Expected a singleton disjunction", isSingletonDisjunction(node)); + visitDNF(node, visitor); + } + + template + static void visitAnyShape(NodeType&& node, const Visitor& atomVisitor) { + constexpr bool isConst = std::is_const_v>; + using VectorT = std::conditional_t; struct AtomTransport { - void transport(const Conjunction&, const NodeVector&) {} - void transport(const Disjunction&, const NodeVector&) {} - void transport(const Atom& node) { - atomVisitor(node.getExpr()); + void transport(std::conditional_t, VectorT) { + // noop + } + void transport(std::conditional_t, VectorT) { + // noop } - const AtomVisitorConst& atomVisitor; + void transport(std::conditional_t node) { + const VisitorContext ctx; + atomVisitor(node.getExpr(), ctx); + } + const Visitor& atomVisitor; }; AtomTransport impl{atomVisitor}; algebra::transport(node, impl); } - static void visitCNF(Node& node, const AtomVisitor& visitor) { - visitConjuncts(node, [&](Node& child, const size_t) { - visitDisjuncts(child, - [&](Node& grandChild, const size_t) { visitAtom(grandChild, visitor); }); + template + static T& firstDNFLeaf(NodeType&& node) { + T* leaf = nullptr; + visitDNF(node, [&](T& e, const VisitorContext& ctx) { + leaf = &e; + ctx.returnEarly(); }); + tassert(7382801, "Expected a non-empty expression", leaf); + return *leaf; } - static void visitDNF(Node& node, const AtomVisitor& visitor) { - visitDisjuncts(node, [&](Node& child, const size_t) { - visitConjuncts(child, - [&](Node& grandChild, const size_t) { visitAtom(grandChild, visitor); }); + static bool any(const Node& node, const AtomPredConst& atomPred) { + bool result = false; + visitAnyShape(node, [&](const T& atom, const VisitorContext& ctx) { + if (atomPred(atom)) { + result = true; + ctx.returnEarly(); + } }); + return result; } - static void visitAnyShape(Node& node, const AtomVisitor& atomVisitor) { - struct AtomTransport { - void transport(Conjunction&, NodeVector&) {} - void transport(Disjunction&, NodeVector&) {} - void transport(Atom& node) { - atomVisitor(node.getExpr()); + static bool all(const Node& node, const AtomPredConst& atomPred) { + bool result = true; + visitAnyShape(node, [&](const T& atom, const VisitorContext& ctx) { + if (!atomPred(atom)) { + result = false; + ctx.returnEarly(); } - const AtomVisitor& atomVisitor; - }; - AtomTransport impl{atomVisitor}; - algebra::transport(node, impl); + }); + return result; } - static bool isCNF(const Node& n) { if (n.template is()) { bool disjunctions = true; - visitConjuncts(n, [&](const Node& child, const size_t) { - disjunctions &= child.template is(); + visitConjuncts(n, [&](const Node& child, const VisitorContext& ctx) { + if (!child.template is()) { + disjunctions = false; + ctx.returnEarly(); + } }); return disjunctions; } @@ -266,8 +313,11 @@ struct BoolExpr { static bool isDNF(const Node& n) { if (n.template is()) { bool conjunctions = true; - visitDisjuncts(n, [&](const Node& child, const size_t) { - conjunctions &= child.template is(); + visitDisjuncts(n, [&](const Node& child, const VisitorContext& ctx) { + if (!child.template is()) { + conjunctions = false; + ctx.returnEarly(); + } }); return conjunctions; } @@ -331,6 +381,15 @@ struct BoolExpr { return *this; } + Builder& subtree(BoolExpr::Node expr) { + tassert(6902603, + "BoolExpr::Builder::subtree does not support negation", + !isCurrentlyNegated()); + _result = std::move(expr); + maybeAddToParent(); + return *this; + } + Builder& push(const bool isConjunction) { const bool negated = isCurrentlyNegated(); _stack.push_back({(negated == isConjunction) ? NodeType::Disj : NodeType::Conj, diff --git a/src/mongo/db/query/optimizer/bool_expression_test.cpp b/src/mongo/db/query/optimizer/bool_expression_test.cpp index 3a0bc516ea649..074460b84d3a1 100644 --- a/src/mongo/db/query/optimizer/bool_expression_test.cpp +++ b/src/mongo/db/query/optimizer/bool_expression_test.cpp @@ -29,13 +29,22 @@ #include "mongo/db/query/optimizer/bool_expression.h" +#include #include +#include +#include +#include -#include "mongo/db/query/optimizer/explain.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/query/optimizer/index_bounds.h" #include "mongo/db/query/optimizer/utils/bool_expression_printer.h" #include "mongo/db/query/optimizer/utils/unit_test_abt_literals.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/inline_auto_update.h" namespace mongo::optimizer { namespace { @@ -243,15 +252,19 @@ TEST(BoolExpr, BoolExprVisitorTest) { ASSERT_FALSE(IntBoolExpr::isDNF(intExprCNF)); int max = -1; - IntBoolExpr::visitConjuncts(intExprCNF, [&](const IntBoolExpr::Node& conjunct, int) { - IntBoolExpr::visitDisjuncts(conjunct, [&](const IntBoolExpr::Node& disjunct, int) { - IntBoolExpr::visitAtom(disjunct, [&](const int& val) { - if (val > max) { - max = val; - } - }); + IntBoolExpr::visitConjuncts( + intExprCNF, [&](const IntBoolExpr::Node& conjunct, const IntBoolExpr::VisitorContext&) { + IntBoolExpr::visitDisjuncts( + conjunct, + [&](const IntBoolExpr::Node& disjunct, const IntBoolExpr::VisitorContext&) { + IntBoolExpr::visitAtom(disjunct, + [&](const int& val, const IntBoolExpr::VisitorContext&) { + if (val > max) { + max = val; + } + }); + }); }); - }); ASSERT_EQ(5, max); // Show non const visitors @@ -261,15 +274,39 @@ TEST(BoolExpr, BoolExprVisitorTest) { ASSERT(IntBoolExpr::isDNF(intExprDNF)); ASSERT_FALSE(IntBoolExpr::isCNF(intExprDNF)); - IntBoolExpr::visitDisjuncts(intExprDNF, [](IntBoolExpr::Node& disjunct, int) { - IntBoolExpr::visitConjuncts(disjunct, [](IntBoolExpr::Node& conjunct, int) { - IntBoolExpr::visitAtom(conjunct, [](int& val) { val = val + 1; }); + IntBoolExpr::visitDisjuncts( + intExprDNF, [](IntBoolExpr::Node& disjunct, const IntBoolExpr::VisitorContext&) { + IntBoolExpr::visitConjuncts( + disjunct, [](IntBoolExpr::Node& conjunct, const IntBoolExpr::VisitorContext&) { + IntBoolExpr::visitAtom( + conjunct, + [](int& val, const IntBoolExpr::VisitorContext&) { val = val + 1; }); + }); }); - }); ASSERT_STR_EQ_AUTO( // NOLINT "((2 ^ 3 ^ 4) U (5 ^ 6))", // NOLINT (test auto-update) BoolExprPrinter().print(intExprDNF)); } + +TEST(BoolExpr, BoolExprVisitorEarlyReturnTest) { + IntBoolExpr::Builder b; + b.pushConj().pushDisj().atom(1).atom(2).atom(3).pop().pushDisj().atom(4).atom(5).pop(); + auto intExprCNF = b.finish().get(); + + int visitedNodes = 0; + IntBoolExpr::visitConjuncts(intExprCNF, + [&](const IntBoolExpr::Node& node, + const IntBoolExpr::VisitorContext& ctx) { visitedNodes++; }); + ASSERT_EQ(2, visitedNodes); + + visitedNodes = 0; + IntBoolExpr::visitConjuncts( + intExprCNF, [&](const IntBoolExpr::Node& node, const IntBoolExpr::VisitorContext& ctx) { + visitedNodes++; + ctx.returnEarly(); + }); + ASSERT_EQ(1, visitedNodes); +} } // namespace } // namespace mongo::optimizer diff --git a/src/mongo/db/query/optimizer/cascades/enforcers.cpp b/src/mongo/db/query/optimizer/cascades/enforcers.cpp index 3d6ccdfb39387..d8205b2df425b 100644 --- a/src/mongo/db/query/optimizer/cascades/enforcers.cpp +++ b/src/mongo/db/query/optimizer/cascades/enforcers.cpp @@ -29,8 +29,24 @@ #include "mongo/db/query/optimizer/cascades/enforcers.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/optimizer/cascades/rewriter_rules.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/memo_utils.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer::cascades { @@ -69,13 +85,15 @@ class PropEnforcerVisitor { const RIDProjectionsMap& ridProjections, PhysRewriteQueue& queue, const PhysProps& physProps, - const LogicalProps& logicalProps) + const LogicalProps& logicalProps, + PrefixId& prefixId) : _groupId(groupId), _metadata(metadata), _ridProjections(ridProjections), _queue(queue), _physProps(physProps), - _logicalProps(logicalProps) {} + _logicalProps(logicalProps), + _prefixId(prefixId) {} void operator()(const PhysProperty&, const CollationRequirement& prop) { if (hasIncompleteScanIndexingRequirement(_physProps)) { @@ -254,6 +272,73 @@ class PropEnforcerVisitor { // Noop. We do not currently enforce this property. It only affects costing. } + void operator()(const PhysProperty&, const RemoveOrphansRequirement& prop) { + if (!prop.mustRemove()) { + // Nothing to do if we don't need to remove any orphans. + return; + } + + tassert(7829701, + "Enforcer for RemoveOrphansRequirement for a group without IndexingAvailability", + hasProperty(_logicalProps)); + + const auto& scanDefName = + getPropertyConst(_logicalProps).getScanDefName(); + const auto& scanDef = _metadata._scanDefs.at(scanDefName); + + // Constuct a plan fragment which enforces the requirement by projecting all fields of the + // shard key and invoking the shardFilter FunctionCall in a filter. + const auto& shardKey = scanDef.getDistributionAndPaths()._paths; + tassert( + 7829702, + "Enforcer for RemoveOrphansRequirement but scan definition doesn't have a shard key.", + !shardKey.empty()); + const auto& scanProj = + getPropertyConst(_logicalProps).getScanProjection(); + ABTVector shardKeyFieldVars; + + PhysPlanBuilder builder{make(_groupId)}; + + // Use the cardinality estimate of the group for costing purposes of the evaluation and + // filter nodes that we are constructing in this plan fragment because in the majority of + // cases, we expect there to be very few orphans and thus we don't adjust CE estimates to + // account for them. + auto ce = getPropertyConst(_logicalProps); + + // Save a pointer to the MemoLogicalDelagatorNode so we can use it in the childPropsMap. + ABT* childPtr = nullptr; + + for (auto&& field : shardKey) { + auto projName = _prefixId.getNextId("shardKey"); + builder.make(ce.getEstimate(), + projName, + make(field, make(scanProj)), + std::move(builder._node)); + shardKeyFieldVars.push_back(make(projName)); + if (childPtr == nullptr) { + childPtr = &builder._node.cast()->getChild(); + } + } + tassert(7829703, "Unable to save pointer to MemoLogicalDelagatorNode child.", childPtr); + builder.make(ce.getEstimate(), + make("shardFilter", std::move(shardKeyFieldVars)), + std::move(builder._node)); + + PhysProps childProps = _physProps; + setPropertyOverwrite(childProps, RemoveOrphansRequirement{false}); + addProjectionsToProperties(childProps, ProjectionNameSet{scanProj}); + + ChildPropsType childPropsMap; + childPropsMap.emplace_back(childPtr, std::move(childProps)); + + optimizeChildrenNoAssert(_queue, + kDefaultPriority, + PhysicalRewriteType::EnforceShardFilter, + std::move(builder._node), + std::move(childPropsMap), + std::move(builder._nodeCEMap)); + } + private: const GroupIdType _groupId; @@ -263,6 +348,7 @@ class PropEnforcerVisitor { PhysRewriteQueue& _queue; const PhysProps& _physProps; const LogicalProps& _logicalProps; + PrefixId& _prefixId; }; void addEnforcers(const GroupIdType groupId, @@ -270,8 +356,10 @@ void addEnforcers(const GroupIdType groupId, const RIDProjectionsMap& ridProjections, PhysRewriteQueue& queue, const PhysProps& physProps, - const LogicalProps& logicalProps) { - PropEnforcerVisitor visitor(groupId, metadata, ridProjections, queue, physProps, logicalProps); + const LogicalProps& logicalProps, + PrefixId& prefixId) { + PropEnforcerVisitor visitor( + groupId, metadata, ridProjections, queue, physProps, logicalProps, prefixId); for (const auto& entry : physProps) { entry.second.visit(visitor); } diff --git a/src/mongo/db/query/optimizer/cascades/enforcers.h b/src/mongo/db/query/optimizer/cascades/enforcers.h index 15bd4d139dd07..cb45a62328562 100644 --- a/src/mongo/db/query/optimizer/cascades/enforcers.h +++ b/src/mongo/db/query/optimizer/cascades/enforcers.h @@ -30,6 +30,9 @@ #pragma once #include "mongo/db/query/optimizer/cascades/rewrite_queues.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/utils/utils.h" namespace mongo::optimizer::cascades { @@ -42,6 +45,7 @@ void addEnforcers(GroupIdType groupId, const RIDProjectionsMap& ridProjections, PhysRewriteQueue& queue, const properties::PhysProps& physProps, - const properties::LogicalProps& logicalProps); + const properties::LogicalProps& logicalProps, + PrefixId& prefixId); } // namespace mongo::optimizer::cascades diff --git a/src/mongo/db/query/optimizer/cascades/implementers.cpp b/src/mongo/db/query/optimizer/cascades/implementers.cpp index 834066e60405c..56522074ad36c 100644 --- a/src/mongo/db/query/optimizer/cascades/implementers.cpp +++ b/src/mongo/db/query/optimizer/cascades/implementers.cpp @@ -29,13 +29,41 @@ #include "mongo/db/query/optimizer/cascades/implementers.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/cascades/rewrite_queues.h" #include "mongo/db/query/optimizer/cascades/rewriter_rules.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/node_defs.h" +#include "mongo/db/query/optimizer/partial_schema_requirements.h" #include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/ce_math.h" #include "mongo/db/query/optimizer/utils/interval_utils.h" #include "mongo/db/query/optimizer/utils/memo_utils.h" +#include "mongo/db/query/optimizer/utils/physical_plan_builder.h" #include "mongo/db/query/optimizer/utils/reftracker_utils.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer::cascades { @@ -68,23 +96,30 @@ static bool propertyAffectsProjection(const PhysProps& props, // SortedMerge and MergeJoin to produce a stream of sorted RIDs, allowing us to potentially // deduplicate with a streaming Unique. static bool canReturnSortedOutput(const CompoundIntervalReqExpr::Node& intervals) { + using CIQExpr = CompoundIntervalReqExpr; bool canBeSorted = true; - // TODO SERVER-73828 this pattern could use early return. - CompoundIntervalReqExpr::visitDisjuncts( - intervals, [&](const CompoundIntervalReqExpr::Node& conj, size_t) { - CompoundIntervalReqExpr::visitConjuncts( - conj, [&](const CompoundIntervalReqExpr::Node& atom, size_t i) { - if (i > 0) { + CIQExpr::visitDisjuncts( + intervals, [&](const CIQExpr::Node& conj, const CIQExpr::VisitorContext& disjCtx) { + CIQExpr::visitConjuncts( + conj, [&](const CIQExpr::Node& atom, const CIQExpr::VisitorContext& conjCtx) { + if (conjCtx.getChildIndex() > 0) { canBeSorted = false; } else { - CompoundIntervalReqExpr::visitAtom( - atom, [&](const CompoundIntervalRequirement& req) { - if (!req.isEquality()) { - canBeSorted = false; - } - }); + CIQExpr::visitAtom(atom, + [&](const CompoundIntervalRequirement& req, + const CIQExpr::VisitorContext&) { + if (!req.isEquality()) { + canBeSorted = false; + } + }); + } + if (!canBeSorted) { + conjCtx.returnEarly(); } }); + if (!canBeSorted) { + disjCtx.returnEarly(); + } }); // We shouldn't use a SortedMerge for a singleton disjunction, because with one child there is // nothing to sort-merge. @@ -92,7 +127,17 @@ static bool canReturnSortedOutput(const CompoundIntervalReqExpr::Node& intervals } /** - * Implement physical nodes based on existing logical nodes. + * Takes a logical node and required physical properties, and creates zero or more physical subtrees + * that can implement that logical node while satisfying those properties. + * + * The input logical node is expected to come from a Memo, which means its immediate children are + * 'MemoLogicalDelegator' nodes (because each logical node lives in a separate Group). + * + * The physical plans are output by adding 'PhysRewriteEntry' entries to '_queue'. Each entry must + * contain: + * - a tree of physical nodes, with logical(!) delegator nodes as the leaves. + * - a cardinality estimate for each physical node. + * - the physical properties that we require for each logical leaf. */ class ImplementationVisitor { public: @@ -106,6 +151,11 @@ class ImplementationVisitor { // TODO: consider rid? return; } + if (getPropertyConst(_physProps).mustRemove()) { + // Cannot satisfy remove orphans. The enforcer for a group representing a scan will + // produce an alternative which performs shard filtering. + return; + } const auto& indexReq = getPropertyConst(_physProps); const IndexReqTarget indexReqTarget = indexReq.getIndexReqTarget(); @@ -390,6 +440,10 @@ class ImplementationVisitor { // Cannot satisfy limit-skip. return; } + if (getPropertyConst(_physProps).mustRemove()) { + // TODO SERVER-78507: Implement this implementer. + return; + } const IndexingAvailability& indexingAvailability = getPropertyConst(_logicalProps); @@ -416,7 +470,7 @@ class ImplementationVisitor { if (_hints._disableScan) { return; } - if (_hints._forceIndexScanForPredicates && hasProperIntervals(reqMap)) { + if (_hints._forceIndexScanForPredicates && hasProperIntervals(reqMap.getRoot())) { return; } break; @@ -447,16 +501,10 @@ class ImplementationVisitor { const ProjectionName& scanProjectionName = indexingAvailability.getScanProjection(); // We can only satisfy partial schema requirements using our root projection. - { - bool anyNonRoot = false; - PSRExpr::visitAnyShape(reqMap.getRoot(), [&](const PartialSchemaEntry& e) { - if (e.first._projectionName != scanProjectionName) { - anyNonRoot = true; - } - }); - if (anyNonRoot) { - return; - } + if (PSRExpr::any(reqMap.getRoot(), [&](const PartialSchemaEntry& e) { + return e.first._projectionName != scanProjectionName; + })) { + return; } const auto& requiredProjections = @@ -473,7 +521,7 @@ class ImplementationVisitor { requiresRootProjection = projectionsLeftToSatisfy.erase(scanProjectionName); } - for (const auto& [key, boundProjName] : getBoundProjections(reqMap.getRoot())) { + for (const auto& [key, boundProjName] : getBoundProjections(reqMap)) { projectionsLeftToSatisfy.erase(boundProjName); } if (!projectionsLeftToSatisfy.getVector().empty()) { @@ -551,7 +599,9 @@ class ImplementationVisitor { std::set residIndexes; if (residualReqs) { ResidualRequirements::visitDNF( - *residualReqs, [&](const ResidualRequirement& residReq) { + *residualReqs, + [&](const ResidualRequirement& residReq, + const ResidualRequirements::VisitorContext&) { residIndexes.emplace(residReq._entryIndex); }); } @@ -563,13 +613,17 @@ class ImplementationVisitor { std::vector atomSels; std::vector conjuctionSels; PSRExpr::visitDisjuncts( - reqMap.getRoot(), [&](const PSRExpr::Node& child, const size_t) { + reqMap.getRoot(), + [&](const PSRExpr::Node& child, const PSRExpr::VisitorContext&) { atomSels.clear(); PSRExpr::visitConjuncts( - child, [&](const PSRExpr::Node& atom, const size_t) { + child, + [&](const PSRExpr::Node& atom, const PSRExpr::VisitorContext&) { PSRExpr::visitAtom( - atom, [&](const PartialSchemaEntry& entry) { + atom, + [&](const PartialSchemaEntry& entry, + const PSRExpr::VisitorContext&) { if (residIndexes.count(entryIndex) == 0) { const SelectivityType sel = partialSchemaKeyCE.at(entryIndex).second / @@ -662,6 +716,7 @@ class ImplementationVisitor { if (residualReqs) { auto reqsWithCE = createResidualReqsWithCE(*residualReqs, partialSchemaKeyCE); lowerPartialSchemaRequirements(scanGroupCE, + scanGroupCE, std::move(indexPredSels), std::move(reqsWithCE), _pathToInterval, @@ -733,8 +788,12 @@ class ImplementationVisitor { if (residualReqs) { auto reqsWithCE = createResidualReqsWithCE(*residualReqs, partialSchemaKeyCE); - lowerPartialSchemaRequirements( - baseCE, {} /*indexPredSels*/, std::move(reqsWithCE), _pathToInterval, builder); + lowerPartialSchemaRequirements(scanGroupCE, + baseCE, + {} /*indexPredSels*/, + std::move(reqsWithCE), + _pathToInterval, + builder); } optimizeChildrenNoAssert(_queue, @@ -747,6 +806,11 @@ class ImplementationVisitor { } void operator()(const ABT& /*n*/, const RIDIntersectNode& node) { + if (getPropertyConst(_physProps).mustRemove()) { + // TODO SERVER-78508: Implement this implementer. + return; + } + const auto& indexingAvailability = getPropertyConst(_logicalProps); const std::string& scanDefName = indexingAvailability.getScanDefName(); { @@ -972,10 +1036,133 @@ class ImplementationVisitor { } } + void operator()(const ABT& /*n*/, const RIDUnionNode& node) { - // TODO SERVER-69026 should implement this. - tasserted(7016300, "RIDUnionNode not implemented yet."); - return; + const auto& indexingAvailability = getPropertyConst(_logicalProps); + const std::string& scanDefName = indexingAvailability.getScanDefName(); + { + const auto& scanDef = _metadata._scanDefs.at(scanDefName); + if (scanDef.getIndexDefs().empty()) { + // Reject if we do not have any indexes. + return; + } + } + const auto& ridProjName = _ridProjections.at(scanDefName); + + if (hasProperty(_physProps)) { + // Cannot satisfy limit-skip. + return; + } + + const IndexingRequirement& requirements = getPropertyConst(_physProps); + const bool dedupRID = requirements.getDedupRID(); + const IndexReqTarget indexReqTarget = requirements.getIndexReqTarget(); + if (indexReqTarget != IndexReqTarget::Index) { + // We only allow index target. + return; + } + + const GroupIdType leftGroupId = + node.getLeftChild().cast()->getGroupId(); + const GroupIdType rightGroupId = + node.getRightChild().cast()->getGroupId(); + + const LogicalProps& leftLogicalProps = _memo.getLogicalProps(leftGroupId); + const LogicalProps& rightLogicalProps = _memo.getLogicalProps(rightGroupId); + + const bool hasProperIntervalLeft = + getPropertyConst(leftLogicalProps).hasProperInterval(); + const bool hasProperIntervalRight = + getPropertyConst(rightLogicalProps).hasProperInterval(); + + if (!hasProperIntervalLeft || !hasProperIntervalRight) { + // We need to have proper intervals on both sides. + return; + } + + const auto& distribRequirement = getPropertyConst(_physProps); + const auto& distrAndProjections = distribRequirement.getDistributionAndProjections(); + if (distrAndProjections._type != DistributionType::Centralized) { + // For now we allow only centralized distribution. + return; + } + + const auto& required = getPropertyConst(_physProps).getProjections(); + if (required.getVector().size() != 1 || !required.find(ridProjName)) { + // For now we can only satisfy requirement for ridProjection. + return; + } + + ProjectionNameOrderPreservingSet leftChildProjections; + leftChildProjections.emplace_back(ridProjName); + + ProjectionNameOrderPreservingSet rightChildProjections; + rightChildProjections.emplace_back(ridProjName); + + if (hasProperty(_physProps)) { + // For now we cannot satisfy collation requirement. + return; + } + + // We are propagating the distribution requirements to both sides. + PhysProps leftPhysProps = _physProps; + PhysProps rightPhysProps = _physProps; + + getProperty(leftPhysProps).setDisableExchanges(false); + getProperty(rightPhysProps).setDisableExchanges(false); + + // Propagate IndexingRequirement, but don't require children to provide deduped RIDs: + // if the consumer needs deduped RIDs, then we construct a Unique node anyway, to handle any + // RIDs that appear in both children. + setPropertyOverwrite( + leftPhysProps, + {IndexReqTarget::Index, + false /*dedupRID*/, + requirements.getSatisfiedPartialIndexesGroupId()}); + setPropertyOverwrite( + rightPhysProps, + {IndexReqTarget::Index, + false /*dedupRID*/, + requirements.getSatisfiedPartialIndexesGroupId()}); + + setPropertyOverwrite(leftPhysProps, std::move(leftChildProjections)); + setPropertyOverwrite(rightPhysProps, + std::move(rightChildProjections)); + + ABT physicalUnion = make(required.getVector(), + makeSeq(node.getLeftChild(), node.getRightChild())); + UnionNode& n = *physicalUnion.cast(); + + ChildPropsType childProps; + childProps.emplace_back(&n.nodes().at(0), std::move(leftPhysProps)); + childProps.emplace_back(&n.nodes().at(1), std::move(rightPhysProps)); + + if (dedupRID) { + ABT unique = + make(ProjectionNameVector{{ridProjName}}, std::move(physicalUnion)); + + // To estimate the physical Union node, we can add the estimates of the left and right + // child. This makes sense because physical Union does not do anything about duplicates: + // it's a bag-union. That's different than the overall CE of the current group, which + // should estimate the cardinality we have after duplicates are removed. + CEType overallCE = getPropertyConst(_logicalProps).getEstimate(); + CEType leftCE = getPropertyConst(leftLogicalProps).getEstimate(); + CEType rightCE = getPropertyConst(rightLogicalProps).getEstimate(); + NodeCEMap ceMap{ + {unique.cast(), overallCE}, + {unique.cast()->getChild().cast(), leftCE + rightCE}, + }; + optimizeChildrenNoAssert(_queue, + kDefaultPriority, + PhysicalRewriteType::RIDUnionUnique, + std::move(unique), + std::move(childProps), + std::move(ceMap)); + } else { + // The consumer doesn't require unique row IDs, so we don't need any Unique stage. + optimizeChildren( + _queue, kDefaultPriority, std::move(physicalUnion), std::move(childProps)); + } } void operator()(const ABT& n, const BinaryJoinNode& node) { diff --git a/src/mongo/db/query/optimizer/cascades/implementers.h b/src/mongo/db/query/optimizer/cascades/implementers.h index 0e71c2c9d9a0d..e37b1b1305679 100644 --- a/src/mongo/db/query/optimizer/cascades/implementers.h +++ b/src/mongo/db/query/optimizer/cascades/implementers.h @@ -30,7 +30,11 @@ #pragma once #include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/cascades/memo_defs.h" #include "mongo/db/query/optimizer/cascades/rewrite_queues.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/utils/utils.h" namespace mongo::optimizer::cascades { diff --git a/src/mongo/db/query/optimizer/cascades/logical_props_derivation.cpp b/src/mongo/db/query/optimizer/cascades/logical_props_derivation.cpp index 7afbaa04bb5ce..4d8a6a694dcaf 100644 --- a/src/mongo/db/query/optimizer/cascades/logical_props_derivation.cpp +++ b/src/mongo/db/query/optimizer/cascades/logical_props_derivation.cpp @@ -28,7 +28,28 @@ */ #include "mongo/db/query/optimizer/cascades/logical_props_derivation.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/partial_schema_requirements.h" #include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer::cascades { @@ -37,6 +58,12 @@ using namespace properties; static void populateInitialDistributions(const DistributionAndPaths& distributionAndPaths, const bool isMultiPartition, DistributionSet& distributions) { + + if (!isMultiPartition) { + distributions.insert({DistributionType::Centralized}); + return; + } + switch (distributionAndPaths._type) { case DistributionType::Centralized: distributions.insert({DistributionType::Centralized}); @@ -103,41 +130,51 @@ static void populateDistributionPaths(const PartialSchemaRequirements& req, */ static bool computeEqPredsOnly(const PartialSchemaRequirements& reqMap) { bool eqPredsOnly = true; - PSRExpr::visitDisjuncts(reqMap.getRoot(), [&](const PSRExpr::Node& child, const size_t) { - PartialSchemaKeySet equalityKeys; - PartialSchemaKeySet fullyOpenKeys; + PSRExpr::visitDisjuncts( + reqMap.getRoot(), [&](const PSRExpr::Node& child, const PSRExpr::VisitorContext& disjCtx) { + PartialSchemaKeySet equalityKeys; + PartialSchemaKeySet fullyOpenKeys; + + PSRExpr::visitConjuncts( + child, [&](const PSRExpr::Node& atom, const PSRExpr::VisitorContext& conjCtx) { + PSRExpr::visitAtom( + atom, [&](const PartialSchemaEntry& e, const PSRExpr::VisitorContext&) { + const auto& [key, req] = e; + const auto& intervals = req.getIntervals(); + if (auto singularInterval = + IntervalReqExpr::getSingularDNF(intervals)) { + if (singularInterval->isFullyOpen()) { + fullyOpenKeys.insert(key); + } else if (singularInterval->isEquality()) { + equalityKeys.insert(key); + } else { + // Encountered a non-equality and not-fully-open interval. + eqPredsOnly = false; + } + } else { + // Encountered a non-trivial interval. + eqPredsOnly = false; + } - PSRExpr::visitConjuncts(child, [&](const PSRExpr::Node& atom, const size_t) { - PSRExpr::visitAtom(atom, [&](const PartialSchemaEntry& e) { - if (!eqPredsOnly) { - return; - } + if (!eqPredsOnly) { + conjCtx.returnEarly(); + } + }); - const auto& [key, req] = e; - const auto& intervals = req.getIntervals(); - if (auto singularInterval = IntervalReqExpr::getSingularDNF(intervals)) { - if (singularInterval->isFullyOpen()) { - fullyOpenKeys.insert(key); - } else if (singularInterval->isEquality()) { - equalityKeys.insert(key); - } else { - // Encountered a non-equality and not-fully-open interval. - eqPredsOnly = false; + if (!eqPredsOnly) { + disjCtx.returnEarly(); } - } else { - // Encountered a non-trivial interval. + }); + + for (const auto& key : fullyOpenKeys) { + if (equalityKeys.count(key) == 0) { + // No possible match for fully open requirement. eqPredsOnly = false; + disjCtx.returnEarly(); + return; } - }); - }); - - for (const auto& key : fullyOpenKeys) { - if (equalityKeys.count(key) == 0) { - // No possible match for fully open requirement. - eqPredsOnly = false; } - } - }); + }); return eqPredsOnly; } @@ -249,7 +286,7 @@ class DeriveLogicalProperties { } } - indexingAvailability.setHasProperInterval(hasProperIntervals(node.getReqMap())); + indexingAvailability.setHasProperInterval(hasProperIntervals(node.getReqMap().getRoot())); return maybeUpdateNodePropsMap(node, std::move(result)); } diff --git a/src/mongo/db/query/optimizer/cascades/logical_props_derivation.h b/src/mongo/db/query/optimizer/cascades/logical_props_derivation.h index d8b4d8ec34970..ca5bd1ab1b1ae 100644 --- a/src/mongo/db/query/optimizer/cascades/logical_props_derivation.h +++ b/src/mongo/db/query/optimizer/cascades/logical_props_derivation.h @@ -31,6 +31,10 @@ #include "mongo/db/query/optimizer/cascades/interfaces.h" #include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer::cascades { diff --git a/src/mongo/db/query/optimizer/cascades/logical_rewriter.cpp b/src/mongo/db/query/optimizer/cascades/logical_rewriter.cpp index 17052b694c494..845dbbb524ebd 100644 --- a/src/mongo/db/query/optimizer/cascades/logical_rewriter.cpp +++ b/src/mongo/db/query/optimizer/cascades/logical_rewriter.cpp @@ -29,11 +29,37 @@ #include "mongo/db/query/optimizer/cascades/logical_rewriter.h" +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include + +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/cascades/rewrite_queues.h" #include "mongo/db/query/optimizer/cascades/rewriter_rules.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/partial_schema_requirements.h" +#include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/utils/path_utils.h" +#include "mongo/db/query/optimizer/utils/physical_plan_builder.h" #include "mongo/db/query/optimizer/utils/reftracker_utils.h" - +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer::cascades { @@ -125,7 +151,7 @@ std::pair LogicalRewriter::addNode(const ABT& node, const GroupIdType targetGroupId, const LogicalRewriteType rule, const bool addExistingNodeWithNewChild) { - NodeIdSet insertNodeIds; + NodeIdSet insertedNodeIds; Memo::NodeTargetGroupMap targetGroupMap; if (targetGroupId >= 0) { @@ -136,15 +162,18 @@ std::pair LogicalRewriter::addNode(const ABT& node, Memo::Context{&_metadata, &_debugInfo, &_logicalPropsDerivation, &_cardinalityEstimator}, node, std::move(targetGroupMap), - insertNodeIds, - rule, - addExistingNodeWithNewChild); + insertedNodeIds, + rule); uassert(6624046, "Result group is not the same as target group", targetGroupId < 0 || targetGroupId == resultGroupId); - for (const MemoLogicalNodeId& nodeMemoId : insertNodeIds) { + // Every memo group that was extended with a new node may have new rewrites that can apply to + // it, so enqueue each of these groups to be visited by a rewrite later. + for (const MemoLogicalNodeId& nodeMemoId : insertedNodeIds) { + // However, if 'addExistingNodeWithNewChild' then don't schedule the 'targetGroupId' for new + // rewrites, to avoid applying the same rewrite forever. if (addExistingNodeWithNewChild && nodeMemoId._groupId == targetGroupId) { continue; } @@ -157,7 +186,7 @@ std::pair LogicalRewriter::addNode(const ABT& node, } } - return {resultGroupId, std::move(insertNodeIds)}; + return {resultGroupId, std::move(insertedNodeIds)}; } void LogicalRewriter::clearGroup(const GroupIdType groupId) { @@ -706,6 +735,10 @@ static void convertFilterToSargableNode(ABT::reference_type node, IndexReqTarget::Complete, filterNode.getChild()); if (conversion->_retainPredicate) { + // '_retainPredicate' means the 'sargableNode' is an over-approximation, so we also have to + // keep the original Filter node. But this means the Filter-to-Sargable rewrite could apply + // again, to avoid rewriting endlessly we need to avoid scheduling this rewrite. So we pass + // 'addExistingNodeWithNewChild = true'. ABT newNode = node; newNode.cast()->getChild() = std::move(sargableNode); ctx.addNode(newNode, true /*substitute*/, true /*addExistingNodeWithNewChild*/); @@ -714,10 +747,29 @@ static void convertFilterToSargableNode(ABT::reference_type node, } } +/** + * Type utilities to negate PathComposeA and PathComposeM. + */ +template +struct negate_compose; + +template <> +struct negate_compose { + using type = PathComposeA; +}; + +template <> +struct negate_compose { + using type = PathComposeM; +}; + +template +using negate_compose_t = typename negate_compose::type; + /** * Takes an expression or path and attempts to remove Not nodes by pushing them - * down toward the leaves. We only remove a Not if we can combine it into a - * PathCompare, or cancel it out with another Not. + * down toward the leaves. We push a Not if we can combine it into a PathCompare, + * push though PathConstant, or cancel it out with another Not. * * Caller provides: * - an input ABT @@ -740,6 +792,14 @@ class NotPushdown { }; using Result = boost::optional; + Result operator()(const ABT& /*n*/, const Constant& constant, const bool negate) { + if (negate && constant.isValueBool()) { + return { + {true, make(sbe::value::TypeTags::Boolean, !constant.getValueBool())}}; + } + return {}; + } + Result operator()(const ABT& /*n*/, const PathGet& get, const bool negate) { if (auto simplified = get.getPath().visit(*this, negate)) { return { @@ -760,6 +820,30 @@ class NotPushdown { return {}; } + Result operator()(const ABT& /*n*/, const PathConstant& constant, const bool negate) { + if (auto simplified = constant.getConstant().visit(*this, negate)) { + return {{simplified->negated, make(std::move(simplified->newNode))}}; + } else if (negate) { + // we can still negate the inner expression. + return {{true, + make( + make(Operations::Not, std::move(constant.getConstant())))}}; + } + return {}; + } + + Result operator()(const ABT& /*n*/, const PathDefault& pathDefault, const bool negate) { + if (auto simplified = pathDefault.getDefault().visit(*this, negate)) { + return {{simplified->negated, make(std::move(simplified->newNode))}}; + } else if (negate) { + // We can still negate the inner expression. + return {{true, + make( + make(Operations::Not, std::move(pathDefault.getDefault())))}}; + } + return {}; + } + Result operator()(const ABT& /*n*/, const UnaryOp& unary, const bool negate) { // Only handle Not. if (unary.op() != Operations::Not) { @@ -843,30 +927,31 @@ class NotPushdown { } } - Result operator()(const ABT& /*n*/, const PathComposeM& compose, const bool negate) { - auto simplified1 = compose.getPath1().visit(*this, negate); - auto simplified2 = compose.getPath2().visit(*this, negate); + template + Result handleComposition(const ABT& path1, const ABT& path2, const bool negate) { + auto simplified1 = path1.visit(*this, negate); + auto simplified2 = path2.visit(*this, negate); if (!simplified1 && !simplified2) { // Neither child is simplified. return {}; } + // At least one child is simplified, so we're going to rebuild a node. // If either child was not simplified, we're going to copy the original // unsimplified child. if (!simplified1) { - simplified1 = {{false, compose.getPath1()}}; + simplified1 = {{false, path1}}; } if (!simplified2) { - simplified2 = {{false, compose.getPath2()}}; + simplified2 = {{false, path2}}; } - if (!simplified1->negated && !simplified2->negated) { - // Neither is negated: keep the ComposeM. + // Neither is negated: keep the original composition. return {{false, - make(std::move(simplified1->newNode), - std::move(simplified2->newNode))}}; + make(std::move(simplified1->newNode), + std::move(simplified2->newNode))}}; } - // At least one child is negated, so we're going to rewrite to ComposeA. + // At least one child is negated, so we're going to rewrite to the negated composition. // If either child was not able to aborb a Not, we'll add an explicit Not to its root. if (!simplified1->negated) { simplified1 = {{true, negatePath(std::move(simplified1->newNode))}}; @@ -874,9 +959,17 @@ class NotPushdown { if (!simplified2->negated) { simplified2 = {{true, negatePath(std::move(simplified2->newNode))}}; } - return { - {true, - make(std::move(simplified1->newNode), std::move(simplified2->newNode))}}; + return {{true, + make>(std::move(simplified1->newNode), + std::move(simplified2->newNode))}}; + } + + Result operator()(const ABT& /*n*/, const PathComposeA& compose, const bool negate) { + return handleComposition(compose.getPath1(), compose.getPath2(), negate); + } + + Result operator()(const ABT& /*n*/, const PathComposeM& compose, const bool negate) { + return handleComposition(compose.getPath1(), compose.getPath2(), negate); } Result operator()(const ABT& /*n*/, const EvalFilter& evalF, const bool negate) { @@ -1094,18 +1187,21 @@ struct SubstituteConvert { return; } - PSRExpr::visitDNF(conversion->_reqMap.getRoot(), [&](PartialSchemaEntry& entry) { - auto& [key, req] = entry; - req = { - evalNode.getProjectionName(), std::move(req.getIntervals()), req.getIsPerfOnly()}; - - uassert(6624114, - "Eval partial schema requirement must contain a variable name.", - key._projectionName); - uassert(6624115, - "Eval partial schema requirement cannot have a range", - isIntervalReqFullyOpenDNF(req.getIntervals())); - }); + PSRExpr::visitDNF(conversion->_reqMap.getRoot(), + [&](PartialSchemaEntry& entry, const PSRExpr::VisitorContext&) { + auto& [key, req] = entry; + req = {evalNode.getProjectionName(), + std::move(req.getIntervals()), + req.getIsPerfOnly()}; + + uassert( + 6624114, + "Eval partial schema requirement must contain a variable name.", + key._projectionName); + uassert(6624115, + "Eval partial schema requirement cannot have a range", + isIntervalReqFullyOpenDNF(req.getIntervals())); + }); ProjectionRenames projectionRenames_unused; const bool hasEmptyInterval = simplifyPartialSchemaReqPaths(scanProjName, @@ -1139,11 +1235,12 @@ static void lowerSargableNode(const SargableNode& node, RewriteContext& ctx) { PhysPlanBuilder builder{node.getChild()}; const auto& reqMap = node.getReqMap(); lowerPartialSchemaRequirements(boost::none /*scanGroupCE*/, + boost::none /*baseCE*/, {} /*indexPredSels*/, createResidualReqsWithEmptyCE(reqMap.getRoot()), ctx.getPathToInterval(), builder); - ctx.addNode(builder._node, true /*clear*/); + ctx.addNode(builder._node, true /*substitute*/); } template @@ -1155,7 +1252,6 @@ struct ExploreConvert { * Used to pre-compute properties of a PSR. */ struct RequirementProps { - bool _isFullyOpen; bool _mayReturnNull; }; @@ -1171,99 +1267,215 @@ struct SplitRequirementsResult { }; /** - * Used to split requirements into left and right side. If "isIndex" is false, this is a separation - * between "index" and "fetch" predicates, otherwise it is a separation between the two sides of - * index intersection. The separation handles cases where we may have intervals which include Null - * and return the value, in which case instead of moving the requirement on the left, we insert a - * copy on the right side which will fetch the value from the collection. We convert perf-only - * requirements to non-perf when inserting on the left under "isIndex", otherwise we drop them. The - * mask parameter represents a bitmask indicating which requirements go on the left (bit is 1) and - * which go on the right. + * Helper transport for 'splitRequirementsFetch': adds a PSRExpr::Node to a builder. The caller can + * specify whether to keep only predicates, only projections, or both. Implicitly it handles + * perfOnly predicates: either dropping them (on the fetch side) or converting them to non-perfOnly + * (on the index side). + */ +struct SplitRequirementsFetchTransport { + enum class Keep { + kBoth, + kPredicateOnly, + kProjectionOnly, + }; + static void addReq(const bool left, + const PSRExpr::Node& expr, + const Keep keep, + const boost::optional& indexFieldPrefixMap, + PSRExprBuilder& leftReqs, + PSRExprBuilder& rightReqs, + bool& hasFieldCoverage) { + auto& builder = left ? leftReqs : rightReqs; + + SplitRequirementsFetchTransport impl{ + left, + keep, + indexFieldPrefixMap, + builder, + hasFieldCoverage, + }; + algebra::transport(expr, impl); + } + + void prepare(const PSRExpr::Conjunction&) { + builder.pushConj(); + } + void transport(const PSRExpr::Conjunction&, const PSRExpr::NodeVector&) { + builder.pop(); + } + void prepare(const PSRExpr::Disjunction&) { + builder.pushDisj(); + } + void transport(const PSRExpr::Disjunction&, const PSRExpr::NodeVector&) { + builder.pop(); + } + void transport(const PSRExpr::Atom& node) { + const bool keepPred = keep != Keep::kProjectionOnly; + const bool keepProj = keep != Keep::kPredicateOnly; + + const auto& [key, req] = node.getExpr(); + const bool perfOnly = req.getIsPerfOnly(); + auto outputBinding = keepProj ? req.getBoundProjectionName() : boost::none; + // perfOnly predicates on the fetch side become trivially true. + auto intervals = ((perfOnly && !left) || !keepPred) ? IntervalReqExpr::makeSingularDNF() + : req.getIntervals(); + + if (outputBinding || !isIntervalReqFullyOpenDNF(intervals)) { + builder.atom(key, + PartialSchemaRequirement{ + std::move(outputBinding), std::move(intervals), false /*isPerfOnly*/}); + + if (left && indexFieldPrefixMap) { + if (auto pathPtr = key._path.cast(); + pathPtr != nullptr && indexFieldPrefixMap->count(pathPtr->name()) == 0) { + // We have found a left requirement which cannot be covered with an + // index. + hasFieldCoverage = false; + } + } + } else { + // The whole predicate/projection is trivial and its indexability doesn't + // matter. + } + } + + const bool left; + const Keep keep; + const boost::optional& indexFieldPrefixMap; + + PSRExprBuilder& builder; + bool& hasFieldCoverage; +}; + +/** + * Takes a vector of PSRExpr, 'conjuncts', and splits them into an index side (on the left) and a + * fetch side (on the right). + * + * The bitfield 'mask' says how to split: each corresponding bit is 1 for left or 0 for right. + * + * 'perfOnly' predicates are preserved and converted to non-perfOnly when they go on the index side. + * On the fetch side they are dropped, by converting them to trivially-true. + * + * If yielding-tolerant plans are requested (by 'hints._disableYieldingTolerantPlans == false') then + * any predicate that should go on the left, we actually put on both sides. + * + * Some special cases apply when we attempt to put a predicate on the index side: + * - If yielding-tolerant plans are requested (by 'hints._disableYieldingTolerantPlans == false') + * then we put the predicate on both sides. + * - If correct null handling is requested (by 'hints._fastIndexNullHandling == false') and the + * predicate may contain null, we satisfy its output projection (if any) on the fetch side + * instead. */ -static SplitRequirementsResult splitRequirements( +static SplitRequirementsResult splitRequirementsFetch( const size_t mask, - const bool isIndex, const QueryHints& hints, const std::vector& reqProps, - const boost::optional& indexFieldPrefixMapForScanDef, - const PartialSchemaRequirements& reqMap) { - SplitRequirementsResult result; - - result._leftReqsBuilder.pushDisj().pushConj(); - result._rightReqsBuilder.pushDisj().pushConj(); - - const auto addRequirement = [&](const bool left, - PartialSchemaKey key, - boost::optional boundProjectionName, - IntervalReqExpr::Node intervals) { - // We always strip out the perf-only flag. - auto& builder = left ? result._leftReqsBuilder : result._rightReqsBuilder; - builder.atom(std::move(key), - PartialSchemaRequirement{std::move(boundProjectionName), - std::move(intervals), - false /*isPerfOnly*/}); - }; + const boost::optional& indexFieldPrefixMap, + const PSRExpr::NodeVector& conjuncts) { + + bool hasFieldCoverage = true; + PSRExprBuilder leftReqs; + PSRExprBuilder rightReqs; + leftReqs.pushConj(); + rightReqs.pushConj(); + + // Adds a PSRExpr 'expr' to the left or right, as specified by 'left'. + // When adding to the right, replaces any 'perfOnly' atoms with trivially-true. + // When adding to the left, keeps 'perfOnly' atoms and marks them non-perfOnly. + // + // 'keep' specifies whether to keep only the predicate, only the projection, or both. + // It defaults to both. + // + // If we end up adding an unindexed path (one we know does not appear in any index), + // set 'hasFieldCoverage' to false as a signal to bail out. + using Keep = SplitRequirementsFetchTransport::Keep; + const auto addReq = + [&](const bool left, const PSRExpr::Node& expr, const Keep keep = Keep::kBoth) { + SplitRequirementsFetchTransport::addReq( + left, expr, keep, indexFieldPrefixMap, leftReqs, rightReqs, hasFieldCoverage); + }; size_t index = 0; - for (const auto& [key, req] : reqMap.conjuncts()) { - const auto& intervals = req.getIntervals(); - const auto& outputBinding = req.getBoundProjectionName(); - const bool perfOnly = req.getIsPerfOnly(); + + for (const auto& conjunct : conjuncts) { const auto& reqProp = reqProps.at(index); + const bool left = ((1ull << index) & mask); - if (((1ull << index) & mask) == 0) { + if (!left) { // Predicate should go on the right side. - if (isIndex || !perfOnly) { - addRequirement(false /*left*/, key, outputBinding, intervals); - } + addReq(false /*left*/, conjunct); index++; continue; } - // Predicate should go on the left side. - bool addedToLeft = false; - if (isIndex || hints._fastIndexNullHandling || !reqProp._mayReturnNull) { + // Predicate should go on the left side. However: + // - Correct null handling requires moving the projection to the fetch side. + // - Yield-safe plans require duplicating the predicate to both sides. + // - Except that 'perfOnly' predicates become true on the fetch side. + + if (hints._fastIndexNullHandling || !reqProp._mayReturnNull) { // We can never return Null values from the requirement. - if (isIndex || hints._disableYieldingTolerantPlans || perfOnly) { + if (hints._disableYieldingTolerantPlans) { // Insert into left side unchanged. - addRequirement(true /*left*/, key, outputBinding, intervals); + addReq(true /*left*/, conjunct); + } else { // Insert a requirement on the right side too, left side is non-binding. - addRequirement(true /*left*/, key, boost::none /*boundProjectionName*/, intervals); - addRequirement(false /*left*/, key, outputBinding, intervals); + addReq(true /*left*/, conjunct, Keep::kPredicateOnly); + addReq(false /*left*/, conjunct); } - addedToLeft = true; } else { // At this point we should not be seeing perf-only predicates. - invariant(!perfOnly); - // We cannot return index values if our interval can possibly contain Null. Instead, + // We cannot return index values, since the interval can possibly contain Null. Instead, // we remove the output binding for the left side, and return the value from the // right (seek) side. - if (!reqProp._isFullyOpen) { - addRequirement(true /*left*/, key, boost::none /*boundProjectionName*/, intervals); - addedToLeft = true; - } - addRequirement(false /*left*/, - key, - outputBinding, - hints._disableYieldingTolerantPlans ? IntervalReqExpr::makeSingularDNF() - : intervals); + addReq(true /*left*/, conjunct, Keep::kPredicateOnly); + addReq(false /*left*/, + conjunct, + // Yield-safe plans keep both the predicate and projection on the fetch side. + // Yield-unsafe plans only need the projection. + hints._disableYieldingTolerantPlans ? Keep::kProjectionOnly : Keep::kBoth); } - if (addedToLeft && indexFieldPrefixMapForScanDef) { - if (auto pathPtr = key._path.cast(); - pathPtr != nullptr && indexFieldPrefixMapForScanDef->count(pathPtr->name()) == 0) { - // We have found a left requirement which cannot be covered with an - // index. - result._hasFieldCoverage = false; - break; - } + if (!hasFieldCoverage) { + break; } index++; } - return result; + return { + std::move(leftReqs), + std::move(rightReqs), + hasFieldCoverage, + }; +} + +static SplitRequirementsResult splitRequirementsIndex(const size_t mask, + const PSRExpr::NodeVector& reqs, + const bool disjunctive) { + PSRExprBuilder leftReqs; + PSRExprBuilder rightReqs; + if (disjunctive) { + leftReqs.pushDisj(); + rightReqs.pushDisj(); + } else { + leftReqs.pushConj(); + rightReqs.pushConj(); + } + + size_t index = 0; + for (const auto& req : reqs) { + if ((1ull << index) & mask) { + leftReqs.subtree(req); + } else { + rightReqs.subtree(req); + } + + index++; + } + + return {std::move(leftReqs), std::move(rightReqs)}; } template <> @@ -1287,11 +1499,6 @@ struct ExploreConvert { return; } - // SERVER-69026: Support "splitting" a top-level disjunction for index ORing. - if (!PSRExpr::isSingletonDisjunction(sargableNode.getReqMap().getRoot())) { - return; - } - const std::string& scanDefName = indexingAvailability.getScanDefName(); const ScanDefinition& scanDef = ctx.getMetadata()._scanDefs.at(scanDefName); if (scanDef.getIndexDefs().empty()) { @@ -1315,6 +1522,42 @@ struct ExploreConvert { const bool isIndex = target == IndexReqTarget::Index; + // Decide whether to do a conjunctive or disjunctive split. + // Rearrange the predicates so that the top-level node is the one we want to split: + // - DNF if we want a disjunctive split. + // - CNF if we want a conjunctive split. + boost::optional splittable; + { + const auto& reqMap = sargableNode.getReqMap().getRoot(); + if (isIndex) { + // When targeting an index, do a disjunctive split if possible. + if (PSRExpr::isSingletonDisjunction(reqMap)) { + // Trivial disjunction means we can only do a conjunctive split. + splittable = PSRExpr::convertToCNF(reqMap, SargableNode::kMaxPartialSchemaReqs); + tassert(6902602, + "converting DNF with only trivial disjunction to CNF should never fail", + splittable); + } else { + splittable = reqMap; + } + } else { + // When targeting 'Complete', the only split we allow is index/fetch, + // because we want to do all union/intersection of record IDs within the index side, + // to avoid redundant fetching. + + // Index/fetch is a conjunctive split. + splittable = PSRExpr::convertToCNF(reqMap); + } + } + if (!splittable) { + // Conversion between DNF/CNF can fail if the result would be too big. + return; + } + const bool disjunctive = splittable->is(); + const PSRExpr::NodeVector& reqs = disjunctive + ? splittable->cast()->nodes() + : splittable->cast()->nodes(); + const auto& indexFieldPrefixMap = ctx.getIndexFieldPrefixMap(); boost::optional indexFieldPrefixMapForScanDef; if (auto it = indexFieldPrefixMap.find(scanDefName); @@ -1322,21 +1565,27 @@ struct ExploreConvert { indexFieldPrefixMapForScanDef = it->second; } - const auto& reqMap = sargableNode.getReqMap(); const auto& hints = ctx.getHints(); // Pre-computed properties of the requirements. + // We only need these for the index/fetch split. std::vector reqProps; - for (const auto& [key, req] : reqMap.conjuncts()) { - // Pre-compute if a requirement's interval is fully open. - const bool fullyOpen = isIntervalReqFullyOpenDNF(req.getIntervals()); - - // Pre-compute if a requirement's interval may contain nulls, and also has an output - // binding. Do use constant folding if we do not have to. - const bool mayReturnNull = - !hints._fastIndexNullHandling && !isIndex && req.mayReturnNull(ctx.getConstFold()); - - reqProps.push_back({fullyOpen, mayReturnNull}); + if (!isIndex) { + reqProps.reserve(reqs.size()); + for (const auto& conjunct : reqs) { + // Pre-compute if a requirement's interval is fully open. + + // Pre-compute if a requirement's interval may contain nulls, and also has an output + // binding. Do use constant folding if we do not have to. + const bool mayReturnNull = !hints._fastIndexNullHandling && + PSRExpr::any(conjunct, [&](const PartialSchemaEntry& entry) { + return entry.second.mayReturnNull(ctx.getConstFold()); + }); + + reqProps.push_back({ + mayReturnNull, + }); + } } // We iterate over the possible ways to split N predicates into 2^N subsets, one goes to the @@ -1344,23 +1593,93 @@ struct ExploreConvert { // try having at least one predicate on the left (mask = 1), and we try all possible // subsets. For index intersection however (isIndex = true), we try symmetric partitioning // (thus the high bound is 2^(N-1)). - const size_t reqSize = PSRExpr::numLeaves(reqMap.getRoot()); // assumes singular DNF - const size_t highMask = isIndex ? (1ull << (reqSize - 1)) : (1ull << reqSize); + const size_t highMask = isIndex ? (1ull << (reqs.size() - 1)) : (1ull << reqs.size()); for (size_t mask = 1; mask < highMask; mask++) { - SplitRequirementsResult splitResult = splitRequirements( - mask, isIndex, hints, reqProps, indexFieldPrefixMapForScanDef, reqMap); - - auto leftReqs = splitResult._leftReqsBuilder.finish(); - auto rightReqs = splitResult._rightReqsBuilder.finish(); + auto splitResult = isIndex + ? splitRequirementsIndex(mask, reqs, disjunctive) + : splitRequirementsFetch( + mask, hints, reqProps, indexFieldPrefixMapForScanDef, reqs); + if (!splitResult._hasFieldCoverage) { + // Reject rewrite. No suitable indexes. + continue; + } + auto leftReqExpr = splitResult._leftReqsBuilder.finish(); + auto rightReqExpr = splitResult._rightReqsBuilder.finish(); - if (!leftReqs) { + if (!leftReqExpr) { // Can happen if we have intervals containing null. invariant(!hints._fastIndexNullHandling && !isIndex); continue; } + // Convert everything back to DNF. + if (!PSRExpr::isDNF(*leftReqExpr)) { + leftReqExpr = PSRExpr::convertToDNF(std::move(*leftReqExpr)); + if (!leftReqExpr) { + continue; + } + } + if (rightReqExpr && !PSRExpr::isDNF(*rightReqExpr)) { + rightReqExpr = PSRExpr::convertToDNF(std::move(*rightReqExpr)); + if (!rightReqExpr) { + continue; + } + } + boost::optional leftReqs; + if (leftReqExpr) { + leftReqs.emplace(std::move(*leftReqExpr)); + } + boost::optional rightReqs; + if (rightReqExpr) { + rightReqs.emplace(std::move(*rightReqExpr)); + } + + // DNF / CNF conversions can create redundant predicates; try to simplify. + // If the reqs are too big, even after simplification, creating a SargableNode will + // fail, so bail out. + const auto isTooBig = [&](const PSRExpr::Node& reqs) -> bool { + return PSRExpr::numLeaves(reqs) > SargableNode::kMaxPartialSchemaReqs; + }; + if (leftReqs) { + PartialSchemaRequirements::simplifyRedundantDNF(leftReqs->getRoot()); + ProjectionRenames renames; + const bool hasEmptyInterval = + simplifyPartialSchemaReqPaths(scanProjectionName, + scanDef.getMultikeynessTrie(), + *leftReqs, + renames, + ctx.getConstFold()); + tassert(6902605, + "Did not expect projection renames from CNF -> DNF conversion", + renames.empty()); + if (hasEmptyInterval) { + continue; + } + if (isTooBig(leftReqs->getRoot())) { + continue; + } + } + if (rightReqs) { + PartialSchemaRequirements::simplifyRedundantDNF(rightReqs->getRoot()); + ProjectionRenames renames; + const bool hasEmptyInterval = + simplifyPartialSchemaReqPaths(scanProjectionName, + scanDef.getMultikeynessTrie(), + *rightReqs, + renames, + ctx.getConstFold()); + tassert(6902604, + "Did not expect projection renames from CNF -> DNF conversion", + renames.empty()); + if (hasEmptyInterval) { + continue; + } + if (isTooBig(rightReqs->getRoot())) { + continue; + } + } - const bool hasLeftintervals = hasProperIntervals(*leftReqs); - const bool hasRightIntervals = rightReqs && hasProperIntervals(*rightReqs); + const bool hasLeftintervals = hasProperIntervals(leftReqs->getRoot()); + const bool hasRightIntervals = rightReqs && hasProperIntervals(rightReqs->getRoot()); if (isIndex) { if (!hasLeftintervals || !hasRightIntervals) { // Reject. Must have at least one proper interval on either side. @@ -1371,19 +1690,17 @@ struct ExploreConvert { continue; } - if (!splitResult._hasFieldCoverage) { - // Reject rewrite. No suitable indexes. - continue; - } - auto leftCandidateIndexes = computeCandidateIndexes(ctx.getPrefixId(), scanProjectionName, *leftReqs, scanDef, hints, - ctx.getConstFold()); - if (isIndex && leftCandidateIndexes.empty()) { - // Reject rewrite. + ctx.getConstFold(), + isIndex); + if (isIndex && leftCandidateIndexes.empty() && + PSRExpr::isSingletonDisjunction(leftReqs->getRoot())) { + // Reject rewrite, because further splitting can only be conjunctive, + // which does not increase the set of candidate indexes. continue; } @@ -1394,11 +1711,14 @@ struct ExploreConvert { *rightReqs, scanDef, hints, - ctx.getConstFold()); + ctx.getConstFold(), + isIndex); } - if (isIndex && rightCandidateIndexes.empty()) { - // With empty candidate map, reject only if we cannot implement as Seek. + if (isIndex && rightCandidateIndexes.empty() && + PSRExpr::isSingletonDisjunction(rightReqs->getRoot())) { + // Reject rewrite, because further splitting can only be conjunctive, + // which does not increase the set of candidate indexes. continue; } @@ -1423,8 +1743,11 @@ struct ExploreConvert { scanDelegator) : scanDelegator; - ABT newRoot = make( - scanProjectionName, std::move(leftChild), std::move(rightChild)); + ABT newRoot = disjunctive + ? make( + scanProjectionName, std::move(leftChild), std::move(rightChild)) + : make( + scanProjectionName, std::move(leftChild), std::move(rightChild)); const auto& result = ctx.addNode(newRoot, false /*substitute*/); for (const MemoLogicalNodeId nodeId : result.second) { diff --git a/src/mongo/db/query/optimizer/cascades/logical_rewriter.h b/src/mongo/db/query/optimizer/cascades/logical_rewriter.h index 5ed180a222b1d..6d345664de79f 100644 --- a/src/mongo/db/query/optimizer/cascades/logical_rewriter.h +++ b/src/mongo/db/query/optimizer/cascades/logical_rewriter.h @@ -29,10 +29,21 @@ #pragma once +#include +#include #include +#include +#include +#include +#include "mongo/db/query/optimizer/cascades/interfaces.h" #include "mongo/db/query/optimizer/cascades/memo.h" #include "mongo/db/query/optimizer/cascades/rewriter_rules.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/const_fold_interface.h" #include "mongo/db/query/optimizer/utils/utils.h" namespace mongo::optimizer::cascades { diff --git a/src/mongo/db/query/optimizer/cascades/memo.cpp b/src/mongo/db/query/optimizer/cascades/memo.cpp index 64bdc811291c4..ec8f0007a4fcd 100644 --- a/src/mongo/db/query/optimizer/cascades/memo.cpp +++ b/src/mongo/db/query/optimizer/cascades/memo.cpp @@ -29,12 +29,36 @@ #include "mongo/db/query/optimizer/cascades/memo.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/bool_expression.h" #include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/partial_schema_requirements.h" #include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/utils/abt_hash.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer::cascades { @@ -123,7 +147,14 @@ const ExpressionBinder& Group::binder() const { } /** - * TODO SERVER-70407: Improve documentation around the Memo and related classes. + * MemoIntegrator adds a logical plan to a Memo, by putting each node (stage) in the appropriate + * group, and returning the group ID where the root node can be found. + * + * It works recursively: to add a node to a Memo, first add each child, and replace each child with + * a MemoLogicalDelegator that refers to the group where that child was put. + * + * After stubbing out the children it relies on 'Memo::addNode' to ensure that if a syntactically + * equal node is already in some group, we reuse it. */ class MemoIntegrator { public: @@ -131,14 +162,12 @@ class MemoIntegrator { Memo& memo, Memo::NodeTargetGroupMap targetGroupMap, NodeIdSet& insertedNodeIds, - const LogicalRewriteType rule, - const bool addExistingNodeWithNewChild) + const LogicalRewriteType rule) : _ctx(std::move(ctx)), _memo(memo), _insertedNodeIds(insertedNodeIds), _targetGroupMap(std::move(targetGroupMap)), - _rule(rule), - _addExistingNodeWithNewChild(addExistingNodeWithNewChild) {} + _rule(rule) {} // This is a transient structure. We do not allow copying or moving. MemoIntegrator(const MemoIntegrator& /*other*/) = delete; @@ -149,9 +178,6 @@ class MemoIntegrator { /** * Nodes */ - void prepare(const ABT& n, const ScanNode& node, const VariableEnvironment& /*env*/) { - // noop - } GroupIdType transport(const ABT& n, const ScanNode& node, @@ -160,10 +186,6 @@ class MemoIntegrator { return addNodes(n, node, n, env, {}); } - void prepare(const ABT& n, const ValueScanNode& node, const VariableEnvironment& /*env*/) { - // noop - } - GroupIdType transport(const ABT& n, const ValueScanNode& node, const VariableEnvironment& env, @@ -171,12 +193,6 @@ class MemoIntegrator { return addNodes(n, node, n, env, {}); } - void prepare(const ABT& n, - const MemoLogicalDelegatorNode& node, - const VariableEnvironment& /*env*/) { - // noop - } - GroupIdType transport(const ABT& n, const MemoLogicalDelegatorNode& node, const VariableEnvironment& env) { @@ -187,10 +203,6 @@ class MemoIntegrator { return addNodes(n, node, n, env, {}); } - void prepare(const ABT& n, const FilterNode& node, const VariableEnvironment& /*env*/) { - updateTargetGroupMapUnary(n, node); - } - GroupIdType transport(const ABT& n, const FilterNode& node, const VariableEnvironment& env, @@ -199,10 +211,6 @@ class MemoIntegrator { return addNode(n, node, env, child); } - void prepare(const ABT& n, const EvaluationNode& node, const VariableEnvironment& /*env*/) { - updateTargetGroupMapUnary(n, node); - } - GroupIdType transport(const ABT& n, const EvaluationNode& node, const VariableEnvironment& env, @@ -211,10 +219,6 @@ class MemoIntegrator { return addNode(n, node, env, child); } - void prepare(const ABT& n, const SargableNode& node, const VariableEnvironment& /*env*/) { - updateTargetGroupMapUnary(n, node); - } - GroupIdType transport(const ABT& n, const SargableNode& node, const VariableEnvironment& env, @@ -224,14 +228,6 @@ class MemoIntegrator { return addNode(n, node, env, child); } - void prepare(const ABT& n, const RIDIntersectNode& node, const VariableEnvironment& /*env*/) { - // noop. - } - - void prepare(const ABT& n, const RIDUnionNode& node, const VariableEnvironment& /*env*/) { - // noop. - } - GroupIdType transport(const ABT& n, const RIDIntersectNode& node, const VariableEnvironment& env, @@ -248,10 +244,6 @@ class MemoIntegrator { return addNodes(n, node, env, leftChild, rightChild); } - void prepare(const ABT& n, const BinaryJoinNode& node, const VariableEnvironment& /*env*/) { - updateTargetGroupMapBinary(n, node); - } - GroupIdType transport(const ABT& n, const BinaryJoinNode& node, const VariableEnvironment& env, @@ -261,10 +253,6 @@ class MemoIntegrator { return addNodes(n, node, env, leftChild, rightChild); } - void prepare(const ABT& n, const UnionNode& node, const VariableEnvironment& /*env*/) { - updateTargetGroupMapNary(n, node); - } - GroupIdType transport(const ABT& n, const UnionNode& node, const VariableEnvironment& env, @@ -274,10 +262,6 @@ class MemoIntegrator { return addNodes(n, node, env, std::move(children)); } - void prepare(const ABT& n, const GroupByNode& node, const VariableEnvironment& /*env*/) { - updateTargetGroupMapUnary(n, node); - } - GroupIdType transport(const ABT& n, const GroupByNode& node, const VariableEnvironment& env, @@ -289,10 +273,6 @@ class MemoIntegrator { return addNode(n, node, env, child); } - void prepare(const ABT& n, const UnwindNode& node, const VariableEnvironment& /*env*/) { - updateTargetGroupMapUnary(n, node); - } - GroupIdType transport(const ABT& n, const UnwindNode& node, const VariableEnvironment& env, @@ -302,10 +282,6 @@ class MemoIntegrator { return addNode(n, node, env, child); } - void prepare(const ABT& n, const CollationNode& node, const VariableEnvironment& /*env*/) { - updateTargetGroupMapUnary(n, node); - } - GroupIdType transport(const ABT& n, const CollationNode& node, const VariableEnvironment& env, @@ -314,10 +290,6 @@ class MemoIntegrator { return addNode(n, node, env, child); } - void prepare(const ABT& n, const LimitSkipNode& node, const VariableEnvironment& /*env*/) { - updateTargetGroupMapUnary(n, node); - } - GroupIdType transport(const ABT& n, const LimitSkipNode& node, const VariableEnvironment& env, @@ -325,10 +297,6 @@ class MemoIntegrator { return addNode(n, node, env, child); } - void prepare(const ABT& n, const ExchangeNode& node, const VariableEnvironment& /*env*/) { - updateTargetGroupMapUnary(n, node); - } - GroupIdType transport(const ABT& n, const ExchangeNode& node, const VariableEnvironment& env, @@ -337,10 +305,6 @@ class MemoIntegrator { return addNode(n, node, env, child); } - void prepare(const ABT& n, const RootNode& node, const VariableEnvironment& /*env*/) { - updateTargetGroupMapUnary(n, node); - } - GroupIdType transport(const ABT& n, const RootNode& node, const VariableEnvironment& env, @@ -361,11 +325,6 @@ class MemoIntegrator { return -1; } - template - void prepare(const ABT& n, const T& /*node*/, const VariableEnvironment& /*env*/) { - static_assert(!canBeLogicalNode(), "Logical node must implement its prepare."); - } - GroupIdType integrate(const ABT& n) { return algebra::transport(n, *this, VariableEnvironment::build(n, &_memo)); } @@ -430,105 +389,20 @@ class MemoIntegrator { return addNodes(n, node, std::move(forMemo), env, {leftGroupId, rightGroupId}); } - template - ABT::reference_type findExistingNodeFromTargetGroupMap(const ABT& n, const T& node) { - auto it = _targetGroupMap.find(n.ref()); - if (it == _targetGroupMap.cend()) { - return nullptr; - } - if (const auto index = _memo.findNodeInGroup(it->second, n.ref())) { - ABT::reference_type result = _memo.getNode({it->second, *index}); - uassert(6624049, "Node type in memo does not match target type", result.is()); - return result; - } - return nullptr; - } - - void updateTargetGroupRefs( - const std::vector>& childGroups) { - for (auto [childRef, targetGroupId] : childGroups) { - auto it = _targetGroupMap.find(childRef); - if (it == _targetGroupMap.cend()) { - _targetGroupMap.emplace(childRef, targetGroupId); - } else if (it->second != targetGroupId) { - uasserted(6624050, "Incompatible target groups for parent and child"); - } - } - } - - template - void updateTargetGroupMapUnary(const ABT& n, const T& node) { - if (_addExistingNodeWithNewChild) { - return; - } - - ABT::reference_type existing = findExistingNodeFromTargetGroupMap(n, node); - if (!existing.empty()) { - const GroupIdType targetGroupId = existing.cast() - ->getChild() - .template cast() - ->getGroupId(); - updateTargetGroupRefs({{node.getChild().ref(), targetGroupId}}); - } - } - - template - void updateTargetGroupMapNary(const ABT& n, const T& node) { - ABT::reference_type existing = findExistingNodeFromTargetGroupMap(n, node); - if (!existing.empty()) { - const ABTVector& existingChildren = existing.cast()->nodes(); - const ABTVector& targetChildren = node.nodes(); - uassert(6624051, - "Different number of children between existing and target node", - existingChildren.size() == targetChildren.size()); - - std::vector> childGroups; - for (size_t i = 0; i < existingChildren.size(); i++) { - const ABT& existingChild = existingChildren.at(i); - const ABT& targetChild = targetChildren.at(i); - childGroups.emplace_back( - targetChild.ref(), - existingChild.cast()->getGroupId()); - } - updateTargetGroupRefs(childGroups); - } - } - - template - void updateTargetGroupMapBinary(const ABT& n, const T& node) { - ABT::reference_type existing = findExistingNodeFromTargetGroupMap(n, node); - if (existing.empty()) { - return; - } - - const T& existingNode = *existing.cast(); - const GroupIdType leftGroupId = - existingNode.getLeftChild().template cast()->getGroupId(); - const GroupIdType rightGroupId = - existingNode.getRightChild().template cast()->getGroupId(); - updateTargetGroupRefs( - {{node.getLeftChild().ref(), leftGroupId}, {node.getRightChild().ref(), rightGroupId}}); - } - - /** - * We do not own any of these. - */ + // Contains several resources required for calling Memo::addNode. Memo::Context _ctx; + // The memo to be updated. Memo& _memo; + // An out param, for reporting nodes we insert to the memo. NodeIdSet& _insertedNodeIds; - /** - * We own this. - */ + // Each [node, groupId] entry means force the given node to go into the given group. + // This is only used for the root node passed in to Memo::integrate(), so there should be + // at most 1 entry. Memo::NodeTargetGroupMap _targetGroupMap; // Rewrite rule that triggered this node to be created. const LogicalRewriteType _rule; - - // If set we enable modification of target group based on existing nodes. In practical terms, we - // would not assume that if F(x) = F(y) then x = y. This is currently used in conjunction with - // $elemMatch rewrite (PathTraverse over PathCompose). - bool _addExistingNodeWithNewChild; }; Memo::Context::Context(const Metadata* metadata, @@ -565,30 +439,11 @@ Group& Memo::getGroup(const GroupIdType groupId) { return *_groups.at(groupId); } -boost::optional Memo::findNodeInGroup(GroupIdType groupId, ABT::reference_type node) const { - return getGroup(groupId)._logicalNodes.find(node); -} - GroupIdType Memo::addGroup(ProjectionNameSet projections) { _groups.emplace_back(std::make_unique(std::move(projections))); return _groups.size() - 1; } -std::pair Memo::addNode(GroupIdType groupId, - ABT n, - LogicalRewriteType rule) { - uassert(6624052, "Attempting to insert a physical node", !n.is()); - - Group& group = *_groups.at(groupId); - OrderPreservingABTSet& nodes = group._logicalNodes; - - const auto [index, inserted] = nodes.emplace_back(std::move(n)); - if (inserted) { - group._rules.push_back(rule); - } - return {{groupId, index}, inserted}; -} - ABT::reference_type Memo::getNode(const MemoLogicalNodeId nodeMemoId) const { return getGroup(nodeMemoId._groupId)._logicalNodes.at(nodeMemoId._index); } @@ -613,20 +468,24 @@ static bool isSimpleIdLookup(ABT::reference_type n) { } bool isIdLookup = false; - PSRExpr::visitAnyShape(node->getReqMap().getRoot(), [&](const PartialSchemaEntry& entry) { - if (const auto interval = IntervalReqExpr::getSingularDNF(entry.second.getIntervals()); - !interval || !interval->isEquality()) { - return; - } - if (const PathGet* getPtr = entry.first._path.cast(); - getPtr && getPtr->name() == "_id") { - if (getPtr->getPath().is()) { - isIdLookup = true; - } else if (const PathTraverse* traversePtr = getPtr->getPath().cast()) { - isIdLookup = traversePtr->getPath().is(); + PSRExpr::visitAnyShape( + node->getReqMap().getRoot(), + [&](const PartialSchemaEntry& entry, const PSRExpr::VisitorContext& ctx) { + if (const auto interval = IntervalReqExpr::getSingularDNF(entry.second.getIntervals()); + !interval || !interval->isEquality()) { + return; } - } - }); + if (const PathGet* getPtr = entry.first._path.cast(); + getPtr && getPtr->name() == "_id") { + if (getPtr->getPath().is()) { + isIdLookup = true; + ctx.returnEarly(); + } else if (const PathTraverse* traversePtr = + getPtr->getPath().cast()) { + isIdLookup = traversePtr->getPath().is(); + } + } + }); return isIdLookup; } @@ -653,19 +512,20 @@ void Memo::estimateCE(const Context& ctx, const GroupIdType groupId) { invariant(partialSchemaKeyCE.empty()); // Cache estimation for each individual requirement. - PSRExpr::visitDNF(sargablePtr->getReqMap().getRoot(), [&](const PartialSchemaEntry& e) { - ABT singularReq = - make(PartialSchemaRequirements{PSRExpr::makeSingularDNF(e)}, - CandidateIndexes{}, - ScanParams{}, - sargablePtr->getTarget(), - sargablePtr->getChild()); - const CEType singularEst = simpleIdLookup - ? CEType{1.0} - : ctx._cardinalityEstimator->deriveCE( - *ctx._metadata, *this, props, singularReq.ref()); - partialSchemaKeyCE.emplace_back(e.first, singularEst); - }); + PSRExpr::visitDNF(sargablePtr->getReqMap().getRoot(), + [&](const PartialSchemaEntry& e, const PSRExpr::VisitorContext&) { + ABT singularReq = make( + PartialSchemaRequirements{PSRExpr::makeSingularDNF(e)}, + CandidateIndexes{}, + ScanParams{}, + sargablePtr->getTarget(), + sargablePtr->getChild()); + const CEType singularEst = simpleIdLookup + ? CEType{1.0} + : ctx._cardinalityEstimator->deriveCE( + *ctx._metadata, *this, props, singularReq.ref()); + partialSchemaKeyCE.emplace_back(e.first, singularEst); + }); } properties::setPropertyOverwrite(props, std::move(ceProp)); @@ -682,6 +542,7 @@ MemoLogicalNodeId Memo::addNode(const Context& ctx, NodeIdSet& insertedNodeIds, ABT n, const LogicalRewriteType rule) { + uassert(6624052, "Attempting to insert a physical node", !n.is()); for (const GroupIdType groupId : groupVector) { // Invalid tree: node is its own child. uassert(6624127, "Target group appears inside group vector", groupId != targetGroupId); @@ -703,11 +564,18 @@ MemoLogicalNodeId Memo::addNode(const Context& ctx, // Current node is not in the memo. Insert unchanged. const GroupIdType groupId = noTargetGroup ? addGroup(std::move(projections)) : targetGroupId; - auto [newId, inserted] = addNode(groupId, std::move(n), rule); + Group& group = *_groups.at(groupId); + OrderPreservingABTSet& nodes = group._logicalNodes; + const auto [index, inserted] = nodes.emplace_back(std::move(n)); + if (inserted) { + group._rules.push_back(rule); + } + auto newId = MemoLogicalNodeId{groupId, index}; + if (inserted || noTargetGroup) { insertedNodeIds.insert(newId); _inputGroupsToNodeIdMap[groupVector].insert(newId); - _nodeIdToInputGroupsMap[newId] = groupVector; + _nodeIdToInputGroupsMap[newId] = std::move(groupVector); if (noTargetGroup) { estimateCE(ctx, groupId); @@ -731,11 +599,9 @@ GroupIdType Memo::integrate(const Memo::Context& ctx, const ABT& node, NodeTargetGroupMap targetGroupMap, NodeIdSet& insertedNodeIds, - const LogicalRewriteType rule, - const bool addExistingNodeWithNewChild) { + const LogicalRewriteType rule) { _stats._numIntegrations++; - MemoIntegrator integrator( - ctx, *this, std::move(targetGroupMap), insertedNodeIds, rule, addExistingNodeWithNewChild); + MemoIntegrator integrator(ctx, *this, std::move(targetGroupMap), insertedNodeIds, rule); return integrator.integrate(node); } diff --git a/src/mongo/db/query/optimizer/cascades/memo.h b/src/mongo/db/query/optimizer/cascades/memo.h index 26c6148f1edbd..123095f6dd321 100644 --- a/src/mongo/db/query/optimizer/cascades/memo.h +++ b/src/mongo/db/query/optimizer/cascades/memo.h @@ -29,7 +29,10 @@ #pragma once +#include +#include #include +#include #include #include #include @@ -39,6 +42,12 @@ #include "mongo/db/query/optimizer/cascades/memo_explain_interface.h" #include "mongo/db/query/optimizer/cascades/memo_group_binder_interface.h" #include "mongo/db/query/optimizer/cascades/rewrite_queues.h" +#include "mongo/db/query/optimizer/cascades/rewriter_rules.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer::cascades { @@ -87,30 +96,87 @@ struct PhysNodes { opt::unordered_map _physPropsToPhysNodeMap; }; +/** + * Represents a set of equivalent query plans. See 'class Memo' for more detail. + */ struct Group { explicit Group(ProjectionNameSet projections); Group(const Group&) = delete; Group(Group&&) = default; + // Returns the set of bindings that all plans in this group are expected to produce. + // (Since all plans in a Group are equivalent, they all must produce the same bindings.) const ExpressionBinder& binder() const; - // Associated logical nodes. + // Contains a set of equivalent logical plans. Each element is a LogicalNode, and its immediate + // immediate children are MemoLogicalDelegatorNode. This ensures every logical node has an + // associated group. For example we would never have (Filter B (Filter A (Delegator _))) here + // because 'Filter A' would have no associated group. OrderPreservingABTSet _logicalNodes; - // Rule that triggered each logical node. + // Stores, for each logical node, the rewrite rule that first caused that node to be created. + // '_rules[i]' corresponds to '_logicalNodes[i]'. + // Used only for explain / debugging. std::vector _rules; - // Group logical properties. + // Contains logical properties that are derived bottom-up from the first logical plan in the + // group. Since all plans in the group are expected to be equivalent, the logical properties are + // expected to be true for all plans in the group. properties::LogicalProps _logicalProperties; + + // Same as 'binder()'. ABT _binder; + // A collection of 'LogicalRewriteEntry', indicating which rewrites we will attempt next, and at + // which node. + // + // Each entry represents a specific rewrite rule, and a specific node. Typically there are many + // entries pointing to the same node, but each for a different rewrite rule. In + // 'LogicalRewriter::addNode', for every newly added node we schedule all possible rewrites + // which transform it or reorder it against other nodes. The goal is to try all possible ways to + // generate new plans using this new node. LogicalRewriteQueue _logicalRewriteQueue; // Best physical plan for given physical properties: aka "Winner's circle". + // + // Unlike '_logicalNodes', the immediate children of physical nodes are not required to be + // delegator nodes. Each entry in '_physicalNode' can be a complex tree of nodes, which may or + // may not end in 'MemoPhysicalDelegatorNode' at the leaves. PhysNodes _physicalNodes; }; /** - * TODO SERVER-70407: Improve documentation around the Memo and related classes. + * A Memo holds all the alternative plans for a query, and for all of its subqueries. + * + * A Memo is made of 'groups': a group is a set of alternative plans that produce the same result + * (the same bag of rows). You can think of a group as representing a question: "what is the best + * plan for this query?". During optimization a group holds several possible answers, and at the end + * we will choose the best answer based on cost estimates. + * + * The logical plans in a group are all interchangeable, since they compute the same bag. Anywhere + * one logical plan can appear, so can an equivalent one: it doesn't change the overall result. + * So, the logical plans in a group are all stored together in one ABTVector. + * + * By contrast, not all physical plans are interchangeable. For example, the MergeJoin algorithm + * requires sorted input. So the physical plans in a group are stored separately, to answer separate + * questions: + * - "What is the best physical plan whose results are sorted by ?" + * - "What is the best physical plan that uses an index?" + * - "What is the best physical plan whose results are sorted by (, ), and uses an index?" + * - "What is the best physical plan (with no constraints)?" + * etc. Each set of physical properties is a different optimization question. So a group has a + * mapping from set of physical properties, to the best physical plan discovered so far that + * produces the same logical result and satisfies those properties. For optimization we only need + * the best plan for each set of properties, but if 'keepRejectedPlans' is enabled then we keep the + * non-best plans for debugging. + * + * Typically a Memo is populated by calling 'integrate()' to add the initial logical plan, and then + * letting rewrite rules add more plans. + * - In the substitution phase, 'RewriteContext' uses 'Memo::clearLogicalNodes()' and + * 'Memo::integrate()' to replace a group with a single logical node. + * - In the exploration phase, 'RewriteContext' uses 'Memo::integrate()' to add alternative logical + * plans to a group. + * - In the implementation phase, 'PhysicalRewriter' uses 'PhysNodes::addOptimizationResult()' to + * update the set of physical plans. */ class Memo : public MemoExplainInterface, public MemoGroupBinderInterface { // To be able to access _stats field. @@ -178,8 +244,6 @@ class Memo : public MemoExplainInterface, public MemoGroupBinderInterface { LogicalRewriteQueue& getLogicalRewriteQueue(GroupIdType groupId); - boost::optional findNodeInGroup(GroupIdType groupId, ABT::reference_type node) const; - ABT::reference_type getNode(MemoLogicalNodeId nodeMemoId) const; /** @@ -189,20 +253,20 @@ class Memo : public MemoExplainInterface, public MemoGroupBinderInterface { */ void estimateCE(const Context& ctx, GroupIdType groupId); - MemoLogicalNodeId addNode(const Context& ctx, - GroupIdVector groupVector, - ProjectionNameSet projections, - GroupIdType targetGroupId, - NodeIdSet& insertedNodeIds, - ABT n, - LogicalRewriteType rule); - + /** + * Takes a logical plan, and adds each Node to the appropriate group. + * + * Caller can use 'targetGroupMap' to force a node to go into a desired group. + * The out-param 'insertedNodeIds' tells the caller which nodes were newly inserted. + * Optional 'rule' is used to annotate any newly inserted nodes, for debugging. + * + * See 'class MemoIntegrator' for more details. + */ GroupIdType integrate(const Context& ctx, const ABT& node, NodeTargetGroupMap targetGroupMap, NodeIdSet& insertedNodeIds, - LogicalRewriteType rule = LogicalRewriteType::Root, - bool addExistingNodeWithNewChild = false); + LogicalRewriteType rule = LogicalRewriteType::Root); void clearLogicalNodes(GroupIdType groupId); @@ -215,13 +279,40 @@ class Memo : public MemoExplainInterface, public MemoGroupBinderInterface { size_t getPhysicalNodeCount() const; private: + // MemoIntegrator is a helper / transport for 'Memo::integrate()'. + friend class MemoIntegrator; + + /** + * Ensures the logical node 'n' is present in some Group. + * + * 'groupVector' should be the set of group IDs that contain the immediate children of 'n'. This + * is used to maintain '_inputGroupsToNodeIdMap' and '_nodeIdToInputGroupsMap'. + * + * 'projections' should be the set of output bindings of 'n'. It's used to initialize the + * ProjectionAvailability property in the case where a new Group is created. + * + * Optional 'targetGroupId' means force the node to be added to the given group, + * and raise an error if it's already present in some other group. '-1' means use an existing + * group if possible or create a new one otherwise. + * + * 'rule' is for explain/debugging only: it identifies the rewrite that introduced the node 'n'. + * + * The out-param 'insertedNodeIds' is appended to if a new logical node was added to any group + * (existing or new). + */ + MemoLogicalNodeId addNode(const Context& ctx, + GroupIdVector groupVector, + ProjectionNameSet projections, + GroupIdType targetGroupId, + NodeIdSet& insertedNodeIds, + ABT n, + LogicalRewriteType rule); + const Group& getGroup(GroupIdType groupId) const; Group& getGroup(GroupIdType groupId); GroupIdType addGroup(ProjectionNameSet projections); - std::pair addNode(GroupIdType groupId, ABT n, LogicalRewriteType rule); - boost::optional findNode(const GroupIdVector& groups, const ABT& node); std::vector> _groups; diff --git a/src/mongo/db/query/optimizer/cascades/memo_defs.cpp b/src/mongo/db/query/optimizer/cascades/memo_defs.cpp index f9d78ca802021..bab0ebf39f074 100644 --- a/src/mongo/db/query/optimizer/cascades/memo_defs.cpp +++ b/src/mongo/db/query/optimizer/cascades/memo_defs.cpp @@ -29,6 +29,12 @@ #include "mongo/db/query/optimizer/cascades/memo_defs.h" +#include +#include + +#include + +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/optimizer/utils/abt_hash.h" diff --git a/src/mongo/db/query/optimizer/cascades/memo_defs.h b/src/mongo/db/query/optimizer/cascades/memo_defs.h index 456116d05f931..eb2b3302f8b73 100644 --- a/src/mongo/db/query/optimizer/cascades/memo_defs.h +++ b/src/mongo/db/query/optimizer/cascades/memo_defs.h @@ -29,10 +29,20 @@ #pragma once +#include +#include +#include +#include + +#include + #include "mongo/db/query/optimizer/cascades/rewriter_rules.h" +#include "mongo/db/query/optimizer/containers.h" #include "mongo/db/query/optimizer/defs.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/node_defs.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer::cascades { @@ -42,16 +52,24 @@ namespace mongo::optimizer::cascades { * the memo itself. */ +/** + * Deep hashing, compatible with deep equality comparison. + */ struct MemoNodeRefHash { size_t operator()(const ABT::reference_type& nodeRef) const; }; +/** + * Deep equality comparison. + */ struct MemoNodeRefCompare { bool operator()(const ABT::reference_type& left, const ABT::reference_type& right) const; }; /** - * A set of ABT nodes which keeps track of the order in which we inserted them. + * A set of ABT which keeps track of the order in which we inserted them. + * + * Compares ABTs using deep equality. */ class OrderPreservingABTSet { public: diff --git a/src/mongo/db/query/optimizer/cascades/physical_rewriter.cpp b/src/mongo/db/query/optimizer/cascades/physical_rewriter.cpp index 6ddbc35109d89..2ca51a49327a4 100644 --- a/src/mongo/db/query/optimizer/cascades/physical_rewriter.cpp +++ b/src/mongo/db/query/optimizer/cascades/physical_rewriter.cpp @@ -29,10 +29,26 @@ #include "mongo/db/query/optimizer/cascades/physical_rewriter.h" +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include + +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/optimizer/cascades/enforcers.h" #include "mongo/db/query/optimizer/cascades/implementers.h" +#include "mongo/db/query/optimizer/cascades/rewrite_queues.h" #include "mongo/db/query/optimizer/cascades/rewriter_rules.h" #include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" namespace mongo::optimizer::cascades { @@ -69,6 +85,10 @@ class PropCompatibleVisitor { return true; } + bool operator()(const PhysProperty&, const DistributionRequirement& requiredProp) { + return getPropertyConst(_availableProps) == requiredProp; + } + bool operator()(const PhysProperty&, const IndexingRequirement& requiredProp) { const auto& available = getPropertyConst(_availableProps); return available.getIndexReqTarget() == requiredProp.getIndexReqTarget() && @@ -77,9 +97,20 @@ class PropCompatibleVisitor { requiredProp.getSatisfiedPartialIndexesGroupId(); } - template - bool operator()(const PhysProperty&, const T& requiredProp) { - return getPropertyConst(_availableProps) == requiredProp; + bool operator()(const PhysProperty&, const RepetitionEstimate& requiredProp) { + return getPropertyConst(_availableProps) == requiredProp; + } + + bool operator()(const PhysProperty&, const LimitEstimate& requiredProp) { + return getPropertyConst(_availableProps) == requiredProp; + } + + bool operator()(const PhysProperty&, const RemoveOrphansRequirement& requiredProp) { + const auto& available = getPropertyConst(_availableProps); + // If the winner's circle contains a plan that removes orphans, then it doesn't matter what + // the required property is. Otherwise, the required property must not require removing + // orphans. + return available.mustRemove() || !requiredProp.mustRemove(); } static bool propertiesCompatible(const PhysProps& requiredProps, @@ -250,11 +281,21 @@ PhysicalRewriter::OptimizeGroupResult PhysicalRewriter::optimizeGroup(const Grou Group& group = _memo.getGroup(groupId); const LogicalProps& logicalProps = group._logicalProperties; - if (hasProperty(logicalProps) && - !hasProperty(physProps)) { - // Re-optimize under complete scan indexing requirements. - setPropertyOverwrite( - physProps, IndexingRequirement{IndexReqTarget::Complete, true /*dedupRID*/, groupId}); + if (hasProperty(logicalProps)) { + if (!hasProperty(physProps)) { + // Re-optimize under complete scan indexing requirements. + setPropertyOverwrite( + physProps, + IndexingRequirement{IndexReqTarget::Complete, true /*dedupRID*/, groupId}); + } + if (!hasProperty(physProps)) { + // Re-optimize with RemoveOrphansRequirement. Only require orphan filtering if the + // metadata for the scan definition indicates that the collection may contain orphans. + auto& scanDef = _metadata._scanDefs.at( + getPropertyConst(logicalProps).getScanDefName()); + setPropertyOverwrite( + physProps, RemoveOrphansRequirement{scanDef.shardingMetadata().mayContainOrphans}); + } } auto& physicalNodes = group._physicalNodes; @@ -348,8 +389,13 @@ PhysicalRewriter::OptimizeGroupResult PhysicalRewriter::optimizeGroup(const Grou // Enforcement rewrites run just once, and are independent of the logical nodes. if (groupId != _rootGroupId) { // Verify properties can be enforced and add enforcers if necessary. - addEnforcers( - groupId, _metadata, _ridProjections, queue._queue, bestResult._physProps, logicalProps); + addEnforcers(groupId, + _metadata, + _ridProjections, + queue._queue, + bestResult._physProps, + logicalProps, + _prefixId); } // Iterate until we perform all logical for the group and physical rewrites for our best plan. diff --git a/src/mongo/db/query/optimizer/cascades/physical_rewriter.h b/src/mongo/db/query/optimizer/cascades/physical_rewriter.h index aa4555d97df9d..4e5730c2f590b 100644 --- a/src/mongo/db/query/optimizer/cascades/physical_rewriter.h +++ b/src/mongo/db/query/optimizer/cascades/physical_rewriter.h @@ -29,8 +29,21 @@ #pragma once +#include +#include + +#include + +#include "mongo/db/query/optimizer/cascades/interfaces.h" #include "mongo/db/query/optimizer/cascades/logical_rewriter.h" #include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/cascades/memo_defs.h" +#include "mongo/db/query/optimizer/cascades/rewriter_rules.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node_defs.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/utils.h" namespace mongo::optimizer::cascades { diff --git a/src/mongo/db/query/optimizer/cascades/rewrite_queues.cpp b/src/mongo/db/query/optimizer/cascades/rewrite_queues.cpp index baf67b22ac071..e9ba42ae8b68b 100644 --- a/src/mongo/db/query/optimizer/cascades/rewrite_queues.cpp +++ b/src/mongo/db/query/optimizer/cascades/rewrite_queues.cpp @@ -28,6 +28,9 @@ */ #include "mongo/db/query/optimizer/cascades/rewrite_queues.h" + +#include + #include "mongo/db/query/optimizer/cascades/rewriter_rules.h" #include "mongo/db/query/optimizer/defs.h" diff --git a/src/mongo/db/query/optimizer/cascades/rewrite_queues.h b/src/mongo/db/query/optimizer/cascades/rewrite_queues.h index 732d338a15358..083392be4acae 100644 --- a/src/mongo/db/query/optimizer/cascades/rewrite_queues.h +++ b/src/mongo/db/query/optimizer/cascades/rewrite_queues.h @@ -29,15 +29,24 @@ #pragma once +#include #include +#include +#include + +#include #include "mongo/db/query/optimizer/cascades/rewriter_rules.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/node_defs.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer::cascades { /** - * Keeps track of candidate physical rewrites. + * Represents a logical rewrite that will be attempted at a particular node, in a particular group. */ struct LogicalRewriteEntry { LogicalRewriteEntry(double priority, LogicalRewriteType type, MemoLogicalNodeId nodeId); diff --git a/src/mongo/db/query/optimizer/cascades/rewriter_rules.h b/src/mongo/db/query/optimizer/cascades/rewriter_rules.h index 7f6b0cdb07bf5..c07972ad2d918 100644 --- a/src/mongo/db/query/optimizer/cascades/rewriter_rules.h +++ b/src/mongo/db/query/optimizer/cascades/rewriter_rules.h @@ -93,6 +93,7 @@ MAKE_PRINTABLE_ENUM_STRING_ARRAY(LogicalRewriterTypeEnum, F(EnforceCollation) \ F(EnforceLimitSkip) \ F(EnforceDistribution) \ + F(EnforceShardFilter) \ F(AttemptCoveringQuery) \ F(Seek) \ F(PhysicalScan) \ @@ -115,6 +116,8 @@ MAKE_PRINTABLE_ENUM_STRING_ARRAY(LogicalRewriterTypeEnum, F(RIDIntersectMergeJoin) \ F(RIDIntersectHashJoin) \ F(RIDIntersectGroupBy) \ + F(RIDUnion) \ + F(RIDUnionUnique) \ F(IndexFetch) MAKE_PRINTABLE_ENUM(PhysicalRewriteType, PHYSICALREWRITER_NAMES); diff --git a/src/mongo/db/query/optimizer/comparison_op.h b/src/mongo/db/query/optimizer/comparison_op.h index 1023a281a9401..1fb0985980874 100644 --- a/src/mongo/db/query/optimizer/comparison_op.h +++ b/src/mongo/db/query/optimizer/comparison_op.h @@ -30,6 +30,7 @@ #pragma once #include "mongo/db/query/optimizer/utils/printable_enum.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/defs.cpp b/src/mongo/db/query/optimizer/defs.cpp index c2a2c80fa3791..6d59e586c6ed9 100644 --- a/src/mongo/db/query/optimizer/defs.cpp +++ b/src/mongo/db/query/optimizer/defs.cpp @@ -28,6 +28,17 @@ */ #include "mongo/db/query/optimizer/defs.h" + +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include + #include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/db/query/optimizer/defs.h b/src/mongo/db/query/optimizer/defs.h index 061dab2ecbfd5..769dbb3db225d 100644 --- a/src/mongo/db/query/optimizer/defs.h +++ b/src/mongo/db/query/optimizer/defs.h @@ -29,10 +29,16 @@ #pragma once +#include #include +#include +#include +#include +#include #include #include #include +#include #include #include "mongo/db/query/optimizer/containers.h" @@ -193,12 +199,16 @@ class DebugInfo { struct SelectivityTag { // Selectivity does not have units, it is a simple ratio. static constexpr bool kUnitless = true; + static constexpr double kMaxValue = 1.0; + static constexpr double kMinValue = 0.0; }; using SelectivityType = StrongDoubleAlias; struct CETag { // Cardinality has units: it is measured in documents. static constexpr bool kUnitless = false; + static constexpr double kMaxValue = std::numeric_limits::max(); + static constexpr double kMinValue = 0.0; }; using CEType = StrongDoubleAlias; diff --git a/src/mongo/db/query/optimizer/explain.cpp b/src/mongo/db/query/optimizer/explain.cpp index 4cb4c18abf773..bdb4f4dc9d726 100644 --- a/src/mongo/db/query/optimizer/explain.cpp +++ b/src/mongo/db/query/optimizer/explain.cpp @@ -29,11 +29,44 @@ #include "mongo/db/query/optimizer/explain.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/cascades/memo_defs.h" #include "mongo/db/query/optimizer/cascades/rewriter_rules.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/containers.h" #include "mongo/db/query/optimizer/defs.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/utils/path_utils.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/util/assert_util.h" @@ -1045,6 +1078,91 @@ class ExplainGeneratorTransporter { return printer.str(); } + void printCandidateIndexEntry(ExplainPrinter& local, + const CandidateIndexEntry& candidateIndexEntry) { + local.fieldName("indexDefName", ExplainVersion::V3) + .print(candidateIndexEntry._indexDefName) + .separator(", "); + + local.separator("{"); + printFieldProjectionMap(local, candidateIndexEntry._fieldProjectionMap); + local.separator("}, {"); + + { + if constexpr (version < ExplainVersion::V3) { + bool first = true; + for (const auto type : candidateIndexEntry._predTypes) { + if (first) { + first = false; + } else { + local.print(", "); + } + local.print(IndexFieldPredTypeEnum::toString[static_cast(type)]); + } + } else if constexpr (version == ExplainVersion::V3) { + std::vector printers; + for (const auto type : candidateIndexEntry._predTypes) { + ExplainPrinter local1; + local1.print(IndexFieldPredTypeEnum::toString[static_cast(type)]); + printers.push_back(std::move(local1)); + } + local.fieldName("predType").print(printers); + } else { + MONGO_UNREACHABLE; + } + } + + local.separator("}, "); + { + if (candidateIndexEntry._eqPrefixes.size() == 1) { + local.fieldName("intervals", ExplainVersion::V3); + + ExplainPrinter intervals = printIntervalExpr( + candidateIndexEntry._eqPrefixes.front()._interval); + local.printSingleLevel(intervals, "" /*singleLevelSpacer*/); + } else { + std::vector eqPrefixPrinters; + for (const auto& entry : candidateIndexEntry._eqPrefixes) { + ExplainPrinter eqPrefixPrinter; + eqPrefixPrinter.fieldName("startPos", ExplainVersion::V3) + .print(entry._startPos) + .separator(", "); + + ExplainPrinter intervals = + printIntervalExpr(entry._interval); + eqPrefixPrinter.separator("[") + .fieldName("interval", ExplainVersion::V3) + .printSingleLevel(intervals, "" /*singleLevelSpacer*/) + .separator("]"); + + eqPrefixPrinters.push_back(std::move(eqPrefixPrinter)); + } + + local.print(eqPrefixPrinters); + } + } + + if (const auto& residualReqs = candidateIndexEntry._residualRequirements) { + local.separator("}, "); + if constexpr (version < ExplainVersion::V3) { + ExplainPrinter residualReqMapPrinter; + printResidualRequirements(residualReqMapPrinter, *residualReqs); + local.print(residualReqMapPrinter); + } else if (version == ExplainVersion::V3) { + printResidualRequirements(local, *residualReqs); + } else { + MONGO_UNREACHABLE; + } + } + } + + + std::string printCandidateIndexEntry(const CandidateIndexEntry& indexEntry) { + ExplainPrinter printer; + printCandidateIndexEntry(printer, indexEntry); + return printer.str(); + } + void printPartialSchemaEntry(ExplainPrinter& printer, const PartialSchemaEntry& entry) { const auto& [key, req] = entry; @@ -1429,83 +1547,8 @@ class ExplainGeneratorTransporter { const CandidateIndexEntry& candidateIndexEntry = candidateIndexes.at(index); ExplainPrinter local; - local.fieldName("candidateId") - .print(index + 1) - .separator(", ") - .fieldName("indexDefName", ExplainVersion::V3) - .print(candidateIndexEntry._indexDefName) - .separator(", "); - - local.separator("{"); - printFieldProjectionMap(local, candidateIndexEntry._fieldProjectionMap); - local.separator("}, {"); - - { - if constexpr (version < ExplainVersion::V3) { - bool first = true; - for (const auto type : candidateIndexEntry._predTypes) { - if (first) { - first = false; - } else { - local.print(", "); - } - local.print(IndexFieldPredTypeEnum::toString[static_cast(type)]); - } - } else if constexpr (version == ExplainVersion::V3) { - std::vector printers; - for (const auto type : candidateIndexEntry._predTypes) { - ExplainPrinter local1; - local1.print(IndexFieldPredTypeEnum::toString[static_cast(type)]); - printers.push_back(std::move(local1)); - } - local.fieldName("predType").print(printers); - } else { - MONGO_UNREACHABLE; - } - } - - local.separator("}, "); - { - if (candidateIndexEntry._eqPrefixes.size() == 1) { - local.fieldName("intervals", ExplainVersion::V3); - - ExplainPrinter intervals = printIntervalExpr( - candidateIndexEntry._eqPrefixes.front()._interval); - local.printSingleLevel(intervals, "" /*singleLevelSpacer*/); - } else { - std::vector eqPrefixPrinters; - for (const auto& entry : candidateIndexEntry._eqPrefixes) { - ExplainPrinter eqPrefixPrinter; - eqPrefixPrinter.fieldName("startPos", ExplainVersion::V3) - .print(entry._startPos) - .separator(", "); - - ExplainPrinter intervals = - printIntervalExpr(entry._interval); - eqPrefixPrinter.separator("[") - .fieldName("interval", ExplainVersion::V3) - .printSingleLevel(intervals, "" /*singleLevelSpacer*/) - .separator("]"); - - eqPrefixPrinters.push_back(std::move(eqPrefixPrinter)); - } - - local.print(eqPrefixPrinters); - } - } - - if (const auto& residualReqs = candidateIndexEntry._residualRequirements) { - if constexpr (version < ExplainVersion::V3) { - ExplainPrinter residualReqMapPrinter; - printResidualRequirements(residualReqMapPrinter, *residualReqs); - local.print(residualReqMapPrinter); - } else if (version == ExplainVersion::V3) { - printResidualRequirements(local, *residualReqs); - } else { - MONGO_UNREACHABLE; - } - } - + local.fieldName("candidateId").print(index + 1).separator(", "); + printCandidateIndexEntry(local, candidateIndexEntry); candidateIndexesPrinters.push_back(std::move(local)); } ExplainPrinter candidateIndexesPrinter; @@ -2319,11 +2362,22 @@ class ExplainGeneratorTransporter { void operator()(const properties::PhysProperty&, const properties::RepetitionEstimate& prop) { - _parent.fieldName("repetitionEstimate").print(prop.getEstimate()); + ExplainPrinter printer; + printer.print(prop.getEstimate()); + _parent.fieldName("repetitionEstimate").print(printer); } void operator()(const properties::PhysProperty&, const properties::LimitEstimate& prop) { - _parent.fieldName("limitEstimate").print(prop.getEstimate()); + ExplainPrinter printer; + printer.print(prop.getEstimate()); + _parent.fieldName("limitEstimate").print(printer); + } + + void operator()(const properties::PhysProperty&, + const properties::RemoveOrphansRequirement& prop) { + ExplainPrinter printer; + printer.print(prop.mustRemove() ? "true" : "false"); + _parent.fieldName("removeOrphans").print(printer); } private: @@ -2767,7 +2821,10 @@ class ExplainGeneratorTransporter { .print(nodeInfo._localCost.getCost()) .separator(", ") .fieldName("adjustedCE") - .print(nodeInfo._adjustedCE); + .print(nodeInfo._adjustedCE) + .separator(", ") + .fieldName("rule") + .print(cascades::PhysicalRewriterTypeEnum::toString[static_cast(nodeInfo._rule)]); ExplainGeneratorTransporter subGen( _displayProperties, _memoInterface, _nodeMap, nodeInfo._nodeCEMap); @@ -2823,12 +2880,6 @@ class ExplainGeneratorTransporter { local.print(physOptResult->_costLimit.getCost()); } - if (physOptResult->_nodeInfo) { - const cascades::PhysicalRewriteType rule = physOptResult->_nodeInfo->_rule; - local.separator(", ").fieldName("rule").print( - cascades::PhysicalRewriterTypeEnum::toString[static_cast(rule)]); - } - ExplainPrinter propPrinter = printPhysProps("Physical properties", physOptResult->_physProps); local.fieldName("physicalProperties", ExplainVersion::V3).print(propPrinter); @@ -3057,4 +3108,9 @@ std::string ExplainGenerator::explainIntervalExpr( ExplainGeneratorV2 gen; return gen.printIntervalExpr(intervalExpr).str(); } + +std::string ExplainGenerator::explainCandidateIndex(const CandidateIndexEntry& indexEntry) { + ExplainGeneratorV2 gen; + return gen.printCandidateIndexEntry(indexEntry); +} } // namespace mongo::optimizer diff --git a/src/mongo/db/query/optimizer/explain.h b/src/mongo/db/query/optimizer/explain.h index 20137ac9157e6..558c2fca40c1d 100644 --- a/src/mongo/db/query/optimizer/explain.h +++ b/src/mongo/db/query/optimizer/explain.h @@ -29,11 +29,17 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/optimizer/cascades/memo_explain_interface.h" #include "mongo/db/query/optimizer/explain_interface.h" +#include "mongo/db/query/optimizer/index_bounds.h" #include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/node_defs.h" +#include "mongo/db/query/optimizer/partial_schema_requirements.h" #include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/syntax/syntax.h" @@ -122,6 +128,8 @@ class ExplainGenerator { static std::string explainIntervalExpr(const IntervalReqExpr::Node& intervalExpr); static std::string explainIntervalExpr(const CompoundIntervalReqExpr::Node& intervalExpr); + + static std::string explainCandidateIndex(const CandidateIndexEntry& indexEntry); }; } // namespace mongo::optimizer diff --git a/src/mongo/db/query/optimizer/index_bounds.cpp b/src/mongo/db/query/optimizer/index_bounds.cpp index 72de5613a18b4..7008039483bb2 100644 --- a/src/mongo/db/query/optimizer/index_bounds.cpp +++ b/src/mongo/db/query/optimizer/index_bounds.cpp @@ -29,9 +29,23 @@ #include "mongo/db/query/optimizer/index_bounds.h" -#include "mongo/db/query/optimizer/node.h" +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/utils/abt_compare.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { @@ -180,27 +194,80 @@ bool PartialSchemaRequirement::mayReturnNull(const ConstFoldFn& constFold) const return _boundProjectionName && checkMaybeHasNull(getIntervals(), constFold); }; -bool IndexPath3WComparator::operator()(const ABT& path1, const ABT& path2) const { +bool IndexPathLessComparator::operator()(const ABT& path1, const ABT& path2) const { return compareExprAndPaths(path1, path2) < 0; } -bool PartialSchemaKeyLessComparator::operator()(const PartialSchemaKey& k1, - const PartialSchemaKey& k2) const { +int PartialSchemaKeyComparator::Cmp3W::operator()(const PartialSchemaKey& k1, + const PartialSchemaKey& k2) const { if (const auto& p1 = k1._projectionName) { if (const auto& p2 = k2._projectionName) { const int projCmp = p1->compare(*p2); if (projCmp != 0) { - return projCmp < 0; + return projCmp; } // Fallthrough to comparison below. } else { - return false; + // p1 > p2 because nonempty > empty. + return 1; } } else if (k2._projectionName) { + // p1 < p2 because empty < nonempty + return -1; + } + // p1 == p2 so compare paths. + return compareExprAndPaths(k1._path, k2._path); +} +bool PartialSchemaKeyComparator::Less::operator()(const PartialSchemaKey& k1, + const PartialSchemaKey& k2) const { + return PartialSchemaKeyComparator::Cmp3W{}(k1, k2) < 0; +} + +int PartialSchemaRequirementComparator::Cmp3W::operator()( + const PartialSchemaRequirement& req1, const PartialSchemaRequirement& req2) const { + + int intervalCmp = compareIntervalExpr(req1.getIntervals(), req2.getIntervals()); + if (intervalCmp != 0) { + return intervalCmp; + } + + // Intervals are equal: compare the output bindings. + auto b1 = req1.getBoundProjectionName(); + auto b2 = req2.getBoundProjectionName(); + if (b1 && b2) { + return b1->compare(*b2); + } else if (b1) { + // b1 > b2 because nonempty > empty. + return 1; + } else if (b2) { + // b1 < b2 because empty < nonempty. + return -1; + } else { + // empty == empty. + return 0; + } +} + +bool PartialSchemaRequirementComparator::Less::operator()( + const PartialSchemaRequirement& req1, const PartialSchemaRequirement& req2) const { + + int intervalCmp = compareIntervalExpr(req1.getIntervals(), req2.getIntervals()); + if (intervalCmp < 0) { + return true; + } else if (intervalCmp > 0) { return false; } - return compareExprAndPaths(k1._path, k2._path) < 0; + // Intervals are equal: compare the output bindings. + auto b1 = req1.getBoundProjectionName(); + auto b2 = req2.getBoundProjectionName(); + if (b1 && b2) { + return b1->compare(*b2) < 0; + } else { + // If either or both optionals are empty, then the only way for + // 'b1 < b2' is '{} < {anything}'. + return !b1 && b2; + } } ResidualRequirement::ResidualRequirement(PartialSchemaKey key, diff --git a/src/mongo/db/query/optimizer/index_bounds.h b/src/mongo/db/query/optimizer/index_bounds.h index b23ae574a4801..b9c738d90b2d0 100644 --- a/src/mongo/db/query/optimizer/index_bounds.h +++ b/src/mongo/db/query/optimizer/index_bounds.h @@ -29,10 +29,22 @@ #pragma once +#include +#include +#include +#include +#include + +#include +#include +#include + #include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/containers.h" #include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/const_fold_interface.h" +#include "mongo/db/query/optimizer/utils/printable_enum.h" namespace mongo::optimizer { @@ -186,8 +198,13 @@ class CompoundIntervalRequirement : public Interval { // Unions and conjunctions of individual compound intervals. using CompoundIntervalReqExpr = BoolExpr; +/** + * An input binding and a path to be applied over the input binding. Used in conjunction with a + * PartialSchemaRequirement to indicate which values a requirement should be applied to. The path + * should only contain Get, Traverse, and Id path elements. + */ struct PartialSchemaKey { - // The default construct sets the path to PathIdentity and the projectionName to boost::none. + // The default constructor sets the path to PathIdentity and the projectionName to boost::none. PartialSchemaKey(); PartialSchemaKey(ABT path); @@ -199,13 +216,19 @@ struct PartialSchemaKey { return !(*this == other); } - // Referred, or input projection name. + // Referred, or input projection name. May be boost::none while constructing + // PartialSchemaRequirements, before it is known which projection the path should be applied to. boost::optional _projectionName; // (Partially determined) path. ABT _path; }; +/** + * Represents a constraint on the schema in the collection. Used in conjunction with a + * PartialSchemKey to apply an interval constraint to some value and optionally bind the + * output to a projection. + */ class PartialSchemaRequirement { public: PartialSchemaRequirement(boost::optional boundProjectionName, @@ -237,15 +260,33 @@ class PartialSchemaRequirement { /** * This comparator is used to compare paths with Get, Traverse, and Id. */ -struct IndexPath3WComparator { +struct IndexPathLessComparator { bool operator()(const ABT& path1, const ABT& path2) const; }; -using IndexPathSet = std::set; +using IndexPathSet = std::set; + +struct PartialSchemaKeyComparator { + struct Less { + bool operator()(const PartialSchemaKey& k1, const PartialSchemaKey& k2) const; + }; -struct PartialSchemaKeyLessComparator { - bool operator()(const PartialSchemaKey& k1, const PartialSchemaKey& k2) const; + struct Cmp3W { + int operator()(const PartialSchemaKey& k1, const PartialSchemaKey& k2) const; + }; }; +struct PartialSchemaRequirementComparator { + struct Less { + bool operator()(const PartialSchemaRequirement& req1, + const PartialSchemaRequirement& req2) const; + }; + + struct Cmp3W { + int operator()(const PartialSchemaRequirement& req1, + const PartialSchemaRequirement& req2) const; + }; +}; + /** * Used to track cardinality estimates per predicate inside a PartialSchemaRequirement. The order of @@ -253,7 +294,7 @@ struct PartialSchemaKeyLessComparator { */ using PartialSchemaKeyCE = std::vector>; -using PartialSchemaKeySet = std::set; +using PartialSchemaKeySet = std::set; // Requirements which are not satisfied directly by an IndexScan, PhysicalScan or Seek (e.g. using // an index field, or scan field). The index refers to the underlying entry in the diff --git a/src/mongo/db/query/optimizer/index_union_optimizer_test.cpp b/src/mongo/db/query/optimizer/index_union_optimizer_test.cpp index 06592638760d2..f7856dc8d85a4 100644 --- a/src/mongo/db/query/optimizer/index_union_optimizer_test.cpp +++ b/src/mongo/db/query/optimizer/index_union_optimizer_test.cpp @@ -27,17 +27,35 @@ * it in the license file. */ +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/pipeline/abt/utils.h" -#include "mongo/db/query/optimizer/cascades/logical_props_derivation.h" -#include "mongo/db/query/optimizer/cascades/rewriter_rules.h" -#include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/cost_model/cost_model_gen.h" +#include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/metadata_factory.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/opt_phase_manager.h" -#include "mongo/db/query/optimizer/rewrites/const_eval.h" +#include "mongo/db/query/optimizer/partial_schema_requirements.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/abt_hash.h" +#include "mongo/db/query/optimizer/utils/physical_plan_builder.h" #include "mongo/db/query/optimizer/utils/unit_test_abt_literals.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/unittest/framework.h" using namespace mongo::optimizer::unit_test_abt_literals; @@ -335,7 +353,6 @@ TEST(LogicalRewriter, DisjunctionConversionDedup) { // We should see everything get reordered and deduped, // so each of the leaf predicates appears once. - // TODO SERVER-73827 We should get 2 leaf predicates instead of 3 here. ASSERT_EXPLAIN_V2_AUTO( "Root [{scan_0}]\n" "Sargable [Complete]\n" @@ -343,8 +360,6 @@ TEST(LogicalRewriter, DisjunctionConversionDedup) { "| | {\n" "| | {{scan_0, 'PathGet [a] PathIdentity []', {{{=Const [0]}}}}}\n" "| | U \n" - "| | {{scan_0, 'PathGet [a] PathIdentity []', {{{=Const [0]}}}}}\n" - "| | U \n" "| | {{scan_0, 'PathGet [b] PathIdentity []', {{{=Const [1]}}}}}\n" "| | }\n" "| scanParams: \n" @@ -353,9 +368,7 @@ TEST(LogicalRewriter, DisjunctionConversionDedup) { "| {\n" "| {{evalTemp_0, 'PathIdentity []', {{{=Const [0]}}}, entryIndex: 0}}\n" "| U \n" - "| {{evalTemp_0, 'PathIdentity []', {{{=Const [0]}}}, entryIndex: 1}}\n" - "| U \n" - "| {{evalTemp_1, 'PathIdentity []', {{{=Const [1]}}}, entryIndex: 2}}\n" + "| {{evalTemp_1, 'PathIdentity []', {{{=Const [1]}}}, entryIndex: 1}}\n" "| }\n" "Scan [coll, {scan_0}]\n", optimized); @@ -393,7 +406,7 @@ TEST(PhysRewriter, LowerRequirementsWithTopLevelDisjunction) { .pop(); auto residReqs = residReqsBuilder.finish().get(); lowerPartialSchemaRequirements( - scanGroupCE, indexPredSels, residReqs, defaultConvertPathToInterval, builder); + scanGroupCE, scanGroupCE, indexPredSels, residReqs, defaultConvertPathToInterval, builder); ASSERT_EXPLAIN_V2_AUTO( "Filter []\n" @@ -460,55 +473,37 @@ TEST(PhysRewriter, OptimizeSargableNodeWithTopLevelDisjunction) { builder.pushDisj().pushConj().atom({makeKey("g"), req}).pop(); auto reqs3 = PartialSchemaRequirements(builder.finish().get()); - // During logical optimization, the SargableNodes not directly above the Scan will first be - // lowered to Filter nodes based on their requirements. The SargableNode immediately above the - // Scan will be lowered later based on its residual requirements. - ResidualRequirements::Builder residReqs; - residReqs.pushDisj() - .pushConj() - .atom({makeKey("a"), req, 0}) - .atom({makeKey("b"), req, 1}) - .pop() - .pushConj() - .atom({makeKey("c"), req, 2}) - .atom({makeKey("d"), req, 3}) - .pop(); - ScanParams scanParams; - scanParams._residualRequirements = residReqs.finish(); - ABT scanNode = make("ptest", "test"); ABT sargableNode1 = make( - reqs1, CandidateIndexes(), scanParams, IndexReqTarget::Index, std::move(scanNode)); + reqs1, CandidateIndexes(), boost::none, IndexReqTarget::Complete, std::move(scanNode)); ABT sargableNode2 = make( - reqs2, CandidateIndexes(), boost::none, IndexReqTarget::Index, std::move(sargableNode1)); + reqs2, CandidateIndexes(), boost::none, IndexReqTarget::Complete, std::move(sargableNode1)); ABT sargableNode3 = make( - reqs3, CandidateIndexes(), boost::none, IndexReqTarget::Index, std::move(sargableNode2)); + reqs3, CandidateIndexes(), boost::none, IndexReqTarget::Complete, std::move(sargableNode2)); ABT rootNode = make(properties::ProjectionRequirement{ProjectionNameVector{"ptest"}}, std::move(sargableNode3)); - // Show that the optimization of the SargableNode does not throw, and that all three - // SargableNodes are correctly lowered to FilterNodes. auto prefixId = PrefixId::createForTests(); auto phaseManager = makePhaseManager( - {OptPhase::MemoSubstitutionPhase, - OptPhase::MemoExplorationPhase, - OptPhase::MemoImplementationPhase}, + { + OptPhase::MemoSubstitutionPhase, + OptPhase::MemoExplorationPhase, + OptPhase::MemoImplementationPhase, + }, prefixId, {{{"test", createScanDef( {}, { - // For now, verify that we do not get an indexed plan even when there - // are indexes available on the queried fields. {"ab", - IndexDefinition{{{makeIndexPath("a"), CollationOp::Ascending}, - {makeIndexPath("b"), CollationOp::Ascending}}, + IndexDefinition{{{makeNonMultikeyIndexPath("a"), CollationOp::Ascending}, + {makeNonMultikeyIndexPath("b"), CollationOp::Ascending}}, false /*isMultiKey*/, {DistributionType::Centralized}, {}}}, {"cd", - IndexDefinition{{{makeIndexPath("c"), CollationOp::Ascending}, - {makeIndexPath("d"), CollationOp::Ascending}}, + IndexDefinition{{{makeNonMultikeyIndexPath("c"), CollationOp::Ascending}, + {makeNonMultikeyIndexPath("d"), CollationOp::Ascending}}, false /*isMultiKey*/, {DistributionType::Centralized}, {}}}, @@ -517,9 +512,13 @@ TEST(PhysRewriter, OptimizeSargableNodeWithTopLevelDisjunction) { {"g", makeIndexDefinition("g", CollationOp::Ascending, false /*isMultiKey*/)}, })}}}, boost::none /*costModel*/, - DebugInfo::kDefaultForTests); + DebugInfo::kDefaultForTests, + QueryHints{ + ._disableScan = true, + }); phaseManager.optimize(rootNode); + // We should get an index union between 'ab' and 'cd'. ASSERT_EXPLAIN_V2Compact_AUTO( "Root [{ptest}]\n" "Filter []\n" @@ -534,23 +533,103 @@ TEST(PhysRewriter, OptimizeSargableNodeWithTopLevelDisjunction) { "| EvalFilter []\n" "| | Variable [ptest]\n" "| PathGet [e] PathCompare [Eq] Const [1]\n" - "Filter []\n" - "| BinaryOp [Or]\n" - "| | BinaryOp [And]\n" - "| | | EvalFilter []\n" - "| | | | Variable [ptest]\n" - "| | | PathGet [d] PathCompare [Eq] Const [1]\n" - "| | EvalFilter []\n" - "| | | Variable [ptest]\n" - "| | PathGet [c] PathCompare [Eq] Const [1]\n" - "| BinaryOp [And]\n" - "| | EvalFilter []\n" - "| | | Variable [ptest]\n" - "| | PathGet [b] PathCompare [Eq] Const [1]\n" - "| EvalFilter []\n" - "| | Variable [ptest]\n" - "| PathGet [a] PathCompare [Eq] Const [1]\n" - "PhysicalScan [{'': ptest}, test]\n", + "NestedLoopJoin [joinType: Inner, {rid_0}]\n" + "| | Const [true]\n" + "| LimitSkip [limit: 1, skip: 0]\n" + "| Seek [ridProjection: rid_0, {'': ptest}, test]\n" + "Unique [{rid_0}]\n" + "Union [{rid_0}]\n" + "| IndexScan [{'': rid_0}, scanDefName: test, indexDefName: cd, interval: {=Const " + "[1 | 1]}]\n" + "IndexScan [{'': rid_0}, scanDefName: test, indexDefName: ab, interval: {=Const [1 | " + "1]}]\n", + rootNode); +} + +TEST(PhysRewriter, ThreeWayIndexUnion) { + auto req = + PartialSchemaRequirement(boost::none, + _disj(_conj(_interval(_incl("1"_cint32), _incl("1"_cint32)))), + false /*perfOnly*/); + + auto makeKey = [](std::string pathName) { + return PartialSchemaKey("ptest", + make(FieldNameType{pathName}, make())); + }; + + // Create three SargableNodes with a 3-argument disjunction. + PSRExpr::Builder builder; + builder.pushDisj() + .pushConj() + .atom({makeKey("a"), req}) + .pop() + .pushConj() + .atom({makeKey("b"), req}) + .pop() + .pushConj() + .atom({makeKey("c"), req}) + .pop(); + auto reqs = PartialSchemaRequirements(builder.finish().get()); + + ABT scanNode = make("ptest", "test"); + ABT sargableNode = make( + reqs, CandidateIndexes(), boost::none, IndexReqTarget::Complete, std::move(scanNode)); + ABT rootNode = make(properties::ProjectionRequirement{ProjectionNameVector{"ptest"}}, + std::move(sargableNode)); + + // Show that the optimization of the SargableNode does not throw, and that all three + // SargableNodes are correctly lowered to FilterNodes. + auto prefixId = PrefixId::createForTests(); + auto phaseManager = makePhaseManager( + { + OptPhase::MemoSubstitutionPhase, + OptPhase::MemoExplorationPhase, + OptPhase::MemoImplementationPhase, + }, + prefixId, + {{{"test", + createScanDef( + {}, + { + {"a", + IndexDefinition{{{makeNonMultikeyIndexPath("a"), CollationOp::Ascending}}, + false /*isMultiKey*/, + {DistributionType::Centralized}, + {}}}, + {"b", + IndexDefinition{{{makeNonMultikeyIndexPath("b"), CollationOp::Ascending}}, + false /*isMultiKey*/, + {DistributionType::Centralized}, + {}}}, + {"c", + IndexDefinition{{{makeNonMultikeyIndexPath("c"), CollationOp::Ascending}}, + false /*isMultiKey*/, + {DistributionType::Centralized}, + {}}}, + })}}}, + boost::none /*costModel*/, + DebugInfo::kDefaultForTests, + QueryHints{ + ._disableScan = true, + }); + phaseManager.optimize(rootNode); + + // We should get a union of three index scans. + ASSERT_EXPLAIN_V2Compact_AUTO( + "Root [{ptest}]\n" + "NestedLoopJoin [joinType: Inner, {rid_0}]\n" + "| | Const [true]\n" + "| LimitSkip [limit: 1, skip: 0]\n" + "| Seek [ridProjection: rid_0, {'': ptest}, test]\n" + "Unique [{rid_0}]\n" + "Union [{rid_0}]\n" + "| Union [{rid_0}]\n" + "| | IndexScan [{'': rid_0}, scanDefName: test, indexDefName: c, interval: " + "{=Const [1]}]\n" + "| IndexScan [{'': rid_0}, scanDefName: test, indexDefName: b, interval: {=Const " + "[1]}]\n" + "IndexScan [{'': rid_0}, scanDefName: test, indexDefName: a, interval: {=Const " + "[1]}]\n", rootNode); } diff --git a/src/mongo/db/query/optimizer/interval_simplify_test.cpp b/src/mongo/db/query/optimizer/interval_simplify_test.cpp index d6ccfcd1a55a4..04a35c37e4d28 100644 --- a/src/mongo/db/query/optimizer/interval_simplify_test.cpp +++ b/src/mongo/db/query/optimizer/interval_simplify_test.cpp @@ -27,23 +27,54 @@ * it in the license file. */ +#include +#include +#include +#include #include +#include #include -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/query/cost_model/cost_model_gen.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/containers.h" #include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/metadata_factory.h" #include "mongo/db/query/optimizer/opt_phase_manager.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/bool_expression_printer.h" #include "mongo/db/query/optimizer/utils/ce_math.h" #include "mongo/db/query/optimizer/utils/interval_utils.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/unit_test_abt_literals.h" #include "mongo/db/query/optimizer/utils/unit_test_pipeline_utils.h" +#include "mongo/db/query/optimizer/utils/unit_test_utils.h" +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/db/service_context_test_fixture.h" #include "mongo/platform/atomic_word.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/random.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/inline_auto_update.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/processinfo.h" +#include "mongo/util/time_support.h" namespace mongo::optimizer { namespace { @@ -71,7 +102,7 @@ ABT optimizedQueryPlan(const std::string& query, return optimized; } -class IntervalIntersection : public LockerNoopServiceContextTest {}; +class IntervalIntersection : public ServiceContextTest {}; TEST_F(IntervalIntersection, SingleFieldIntersection) { opt::unordered_map testIndex = { diff --git a/src/mongo/db/query/optimizer/logical_rewriter_optimizer_test.cpp b/src/mongo/db/query/optimizer/logical_rewriter_optimizer_test.cpp index f84bc76b1b4ab..a6aaad47cac20 100644 --- a/src/mongo/db/query/optimizer/logical_rewriter_optimizer_test.cpp +++ b/src/mongo/db/query/optimizer/logical_rewriter_optimizer_test.cpp @@ -27,16 +27,43 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/query/cost_model/cost_model_gen.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/cascades/interfaces.h" #include "mongo/db/query/optimizer/cascades/logical_props_derivation.h" -#include "mongo/db/query/optimizer/cascades/rewriter_rules.h" +#include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/metadata_factory.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/node_defs.h" #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/unit_test_abt_literals.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/inline_auto_update.h" using namespace mongo::optimizer::unit_test_abt_literals; @@ -1329,6 +1356,131 @@ TEST(LogicalRewriter, NotPushdownToplevelFailureMultikey) { latest); } +TEST(LogicalRewriter, NotPushdownComposeA) { + ABT rootNode = + NodeBuilder{} + .root("scan_0") + .filter(_evalf( + _get("top", + _traverse1(_pconst(_unary( + "Not", + _evalf(_composea( + // A ComposeA where both args can be negated. + _composea(_get("a", _cmp("Eq", "2"_cint64)), + _get("b", _cmp("Eq", "3"_cint64))), + // A ComposeA where only one arg can be negated. + _composea(_get("c", _cmp("Eq", "4"_cint64)), + _get("d", _traverse1(_cmp("Eq", "5"_cint64))))), + "scan_0"_var))))), + "scan_0"_var)) + .finish(_scan("scan_0", "coll")); + + auto prefixId = PrefixId::createForTests(); + auto phaseManager = makePhaseManager({OptPhase::MemoSubstitutionPhase}, + prefixId, + Metadata{{{"coll", createScanDef({}, {})}}}, + boost::none /*costModel*/, + DebugInfo::kDefaultForTests); + ABT latest = std::move(rootNode); + phaseManager.optimize(latest); + + // We should push the Not down as far as possible, so that some leaves become Neq. + // Leaves with a Traverse in the way residualize a Not instead. + + // Note that the top level traverse is to prevent ComposeM from being decomposed into filter + // nodes. + ASSERT_EXPLAIN_V2_AUTO( + "Root [{scan_0}]\n" + "Filter []\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathGet [top]\n" + "| PathTraverse [1]\n" + "| PathConstant []\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathComposeM []\n" + "| | PathComposeM []\n" + "| | | PathLambda []\n" + "| | | LambdaAbstraction [tmp_bool_0]\n" + "| | | UnaryOp [Not]\n" + "| | | EvalFilter []\n" + "| | | | Variable [tmp_bool_0]\n" + "| | | PathGet [d]\n" + "| | | PathTraverse [1]\n" + "| | | PathCompare [Eq]\n" + "| | | Const [5]\n" + "| | PathGet [c]\n" + "| | PathCompare [Neq]\n" + "| | Const [4]\n" + "| PathComposeM []\n" + "| | PathGet [b]\n" + "| | PathCompare [Neq]\n" + "| | Const [3]\n" + "| PathGet [a]\n" + "| PathCompare [Neq]\n" + "| Const [2]\n" + "Scan [coll, {scan_0}]\n", + latest); +} + +TEST(LogicalRewriter, NotPushdownComposeABothPathsCannotBeNegated) { + ABT rootNode = + NodeBuilder{} + .root("scan_0") + .filter(_unary( + "Not", + _evalf(_composea( + // A ComposeA where both args cannot be negated. + _composea(_get("a", _traverse1(_cmp("Eq", "2"_cint64))), + _get("b", _traverse1(_cmp("Eq", "3"_cint64)))), + // A ComposeA where both args cannot be negated but can be simplified + _composea(_get("c", _traverse1(_pconst(_unary("Not", _cbool(true))))), + _get("d", _traverse1(_pconst(_unary("Not", _cbool(false))))))), + "scan_0"_var))) + .finish(_scan("scan_0", "coll")); + + auto prefixId = PrefixId::createForTests(); + auto phaseManager = makePhaseManager({OptPhase::MemoSubstitutionPhase}, + prefixId, + Metadata{{{"coll", createScanDef({}, {})}}}, + boost::none /*costModel*/, + DebugInfo::kDefaultForTests); + ABT latest = std::move(rootNode); + phaseManager.optimize(latest); + + // We should keep ComposeA if both paths cannot be negated. If a path can be simplified (for + // example, Unary [Not] Constant [true] -> Constant [false]), the simplified path should + // reflected in ComposeA even if it is not negated. + ASSERT_EXPLAIN_V2_AUTO( + "Root [{scan_0}]\n" + "Filter []\n" + "| UnaryOp [Not]\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathComposeA []\n" + "| | PathComposeA []\n" + "| | | PathGet [d]\n" + "| | | PathTraverse [1]\n" + "| | | PathConstant []\n" + "| | | Const [true]\n" + "| | PathGet [c]\n" + "| | PathTraverse [1]\n" + "| | PathConstant []\n" + "| | Const [false]\n" + "| PathComposeA []\n" + "| | PathGet [b]\n" + "| | PathTraverse [1]\n" + "| | PathCompare [Eq]\n" + "| | Const [3]\n" + "| PathGet [a]\n" + "| PathTraverse [1]\n" + "| PathCompare [Eq]\n" + "| Const [2]\n" + "Scan [coll, {scan_0}]\n", + latest); +} + TEST(LogicalRewriter, NotPushdownComposeM) { using namespace unit_test_abt_literals; using namespace properties; @@ -1388,6 +1540,196 @@ TEST(LogicalRewriter, NotPushdownComposeM) { latest); } +TEST(LogicalRewriter, NotPushdownPathConstant) { + ABT rootNode = + NodeBuilder{} + .root("scan_0") + .filter(_unary("Not", + _evalf(_pconst(_binary("Gt", "10"_cint64, "100"_cint64)), "scan_0"_var) + + )) + .finish(_scan("scan_0", "coll")); + + auto prefixId = PrefixId::createForTests(); + auto phaseManager = makePhaseManager({OptPhase::MemoSubstitutionPhase}, + prefixId, + Metadata{{{"coll", createScanDef({}, {})}}}, + boost::none /*costModel*/, + DebugInfo::kDefaultForTests); + ABT latest = std::move(rootNode); + phaseManager.optimize(latest); + + // We should push the Not down. If the child expression of a PathConstant cannot be further + // simplified, we negate the expression with UnaryOp [Not]. + ASSERT_EXPLAIN_V2_AUTO( + "Root [{scan_0}]\n" + "Filter []\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathConstant []\n" + "| UnaryOp [Not]\n" + "| BinaryOp [Gt]\n" + "| | Const [100]\n" + "| Const [10]\n" + "Scan [coll, {scan_0}]\n", + latest); +} + +TEST(LogicalRewriter, NotPushdownPathConstantNested) { + ABT rootNode = + NodeBuilder{} + .root("scan_0") + .filter(_evalf( + _pconst(_unary( + "Not", _evalf(_pconst(_evalp(_get("a", _id()), "scan_0"_var)), "scan_0"_var))), + "scan_0"_var)) + .finish(_scan("scan_0", "coll")); + + auto prefixId = PrefixId::createForTests(); + auto phaseManager = makePhaseManager({OptPhase::MemoSubstitutionPhase}, + prefixId, + Metadata{{{"coll", createScanDef({}, {})}}}, + boost::none /*costModel*/, + DebugInfo::kDefaultForTests); + ABT latest = std::move(rootNode); + phaseManager.optimize(latest); + + // We should push the Not down through another PathConstant, until EvalPath. + ASSERT_EXPLAIN_V2_AUTO( + "Root [{scan_0}]\n" + "Filter []\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathConstant []\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathConstant []\n" + "| UnaryOp [Not]\n" + "| EvalPath []\n" + "| | Variable [scan_0]\n" + "| PathGet [a]\n" + "| PathIdentity []\n" + "Scan [coll, {scan_0}]\n", + latest); +} + +TEST(LogicalRewriter, NotPushdownPathConstantNotsAreCancelled) { + ABT rootNode = + NodeBuilder{} + .root("scan_0") + .filter(_evalf( + _pconst(_unary( + "Not", + _evalf(_pconst(_unary("Not", + _evalf(_pconst(_evalp(_get("a", _id()), "scan_0"_var)), + "scan_0"_var))), + "scan_0"_var))), + "scan_0"_var)) + .finish(_scan("scan_0", "coll")); + + auto prefixId = PrefixId::createForTests(); + auto phaseManager = makePhaseManager({OptPhase::MemoSubstitutionPhase}, + prefixId, + Metadata{{{"coll", createScanDef({}, {})}}}, + boost::none /*costModel*/, + DebugInfo::kDefaultForTests); + ABT latest = std::move(rootNode); + phaseManager.optimize(latest); + + // We should push the Not down and cancel out the Nots inside PathConstant. + ASSERT_EXPLAIN_V2_AUTO( + "Root [{scan_0}]\n" + "Filter []\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathConstant []\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathConstant []\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathConstant []\n" + "| EvalPath []\n" + "| | Variable [scan_0]\n" + "| PathGet [a]\n" + "| PathIdentity []\n" + "Scan [coll, {scan_0}]\n", + latest); +} + +TEST(LogicalRewriter, NotPushdownPathDefault) { + // MQL: aggregate({$match:{ a: {$exists:false}}}) + ABT rootNode = + NodeBuilder{} + .root("scan_0") + .filter(_evalf( + _pconst(_unary("Not", _evalf(_get("a", _default(_cbool(false))), "scan_0"_var))), + "scan_0"_var)) + .finish(_scan("scan_0", "coll")); + + auto prefixId = PrefixId::createForTests(); + auto phaseManager = makePhaseManager({OptPhase::MemoSubstitutionPhase}, + prefixId, + Metadata{{{"coll", createScanDef({}, {})}}}, + boost::none /*costModel*/, + DebugInfo::kDefaultForTests); + ABT latest = std::move(rootNode); + phaseManager.optimize(latest); + + // We should push the Not down through PathConstant. + ASSERT_EXPLAIN_V2_AUTO( + "Root [{scan_0}]\n" + "Filter []\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathConstant []\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathGet [a]\n" + "| PathDefault []\n" + "| Const [true]\n" + "Scan [coll, {scan_0}]\n", + latest); +} + +TEST(LogicalRewriter, NotPushdownPathDefaultNested) { + ABT rootNode = + NodeBuilder{} + .root("scan_0") + .filter(_evalf( + _pconst(_unary( + "Not", + _evalf(_default(_evalf(_default(_cbool(false)), "scan_0"_var)), "scan_0"_var))), + "scan_0"_var)) + .finish(_scan("scan_0", "coll")); + + auto prefixId = PrefixId::createForTests(); + auto phaseManager = makePhaseManager({OptPhase::MemoSubstitutionPhase}, + prefixId, + Metadata{{{"coll", createScanDef({}, {})}}}, + boost::none /*costModel*/, + DebugInfo::kDefaultForTests); + ABT latest = std::move(rootNode); + phaseManager.optimize(latest); + + // We should push the Not down through the nested PathConstant. + ASSERT_EXPLAIN_V2_AUTO( + "Root [{scan_0}]\n" + "Filter []\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathConstant []\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathDefault []\n" + "| EvalFilter []\n" + "| | Variable [scan_0]\n" + "| PathDefault []\n" + "| Const [true]\n" + "Scan [coll, {scan_0}]\n", + latest); +} + TEST(LogicalRewriter, NotPushdownUnderLambdaSuccess) { // Example translation of {a: {$elemMatch: {b: {$ne: 2}}}} ABT scanNode = make("scan_0", "coll"); @@ -1820,6 +2162,150 @@ TEST(LogicalRewriter, RelaxComposeM) { optimized); } +TEST(LogicalRewriter, UnboundCandidateIndexInSingleIndexScan) { + auto prefixId = PrefixId::createForTests(); + + // Construct a query which tests "b" = 1 and "c" = 2. + ABT rootNode = NodeBuilder{} + .root("root") + .filter(_evalf(_get("b", _traverse1(_cmp("Eq", "1"_cint64))), "root"_var)) + .filter(_evalf(_get("c", _traverse1(_cmp("Eq", "2"_cint64))), "root"_var)) + .finish(_scan("root", "c1")); + + // We have one index with 2 fields: "a", "b" + auto phaseManager = makePhaseManager( + {OptPhase::MemoSubstitutionPhase, OptPhase::MemoExplorationPhase}, + prefixId, + {{{"c1", + createScanDef( + {}, + {{"index1", + IndexDefinition{{{makeNonMultikeyIndexPath("a"), CollationOp::Ascending}, + {makeNonMultikeyIndexPath("b"), CollationOp::Ascending}}, + false /*isMultiKey*/, + {DistributionType::Centralized}, + {}}}})}}}, + boost::none /*costModel*/, + {true /*debugMode*/, 2 /*debugLevel*/, DebugInfo::kIterationLimitForTests}); + + ABT optimized = std::move(rootNode); + phaseManager.optimize(optimized); + + const RIDIntersectNode& ridIntersectNode = + *optimized.cast()->getChild().cast(); + + // As opposed to the test 'DiscardUnboundCandidateIndexInMultiIndexScan', the 'indexNode' should + // still keep its unbound candidate indexes as it is not a multi-index plan. + ASSERT_EXPLAIN_V2_AUTO( + "Sargable [Index]\n" + "| | requirements: \n" + "| | {{{root, 'PathGet [b] PathIdentity []', {{{=Const [1]}}}}}}\n" + "| candidateIndexes: \n" + "| candidateId: 1, index1, {' 1': evalTemp_6}, {Unbound, Unbound}, " + "{{{}}}}, \n" + "| residualReqs: \n" + "| {{{evalTemp_6, 'PathIdentity []', {{{=Const [1]}}}, entryIndex: 0}}}\n" + "Scan [c1, {root}]\n", + ridIntersectNode.getLeftChild()); + + ASSERT_EXPLAIN_V2_AUTO( + "Sargable [Seek]\n" + "| | requirements: \n" + "| | {{{root, 'PathGet [c] PathTraverse [1] PathIdentity []', {{{=Const [2]}}}}}}\n" + "| scanParams: \n" + "| {'c': evalTemp_7}\n" + "| residualReqs: \n" + "| {{{evalTemp_7, 'PathTraverse [1] PathIdentity []', {{{=Const [2]}}}, " + "entryIndex: 0}}}\n" + "Scan [c1, {root}]\n", + ridIntersectNode.getRightChild()); +} + +/** + * A walker to check if all the sargable nodes have empty candidate index. + */ +class CheckEmptyCandidateIndexTransport { +public: + bool transport(const SargableNode& node, bool childResult, bool bindResult, bool refResult) { + ++_visitedSargableNodes; + return node.getCandidateIndexes().empty(); + } + + template + bool transport(const T& node, Ts&&... childResults) { + return (all(childResults) && ...); + } + + /** + * Returns true if all the SargableNodes in the ABT 'n' have no candidate index. + */ + bool check(const ABT& n) { + return algebra::transport(n, *this); + } + + size_t visitedSargableNodes() { + return _visitedSargableNodes; + } + +private: + bool all(bool r) { + return r; + } + + bool all(const std::vector& r) { + return std::all_of(r.begin(), r.end(), [](bool e) { return e; }); + } + + size_t _visitedSargableNodes = 0; +}; + +TEST(LogicalRewriter, DiscardUnboundCandidateIndexInMultiIndexScan) { + auto prefixId = PrefixId::createForTests(); + + // Construct a query which tests "b" = 1, "c" = 2, "b1" = 3, "c1" = 4 + ABT rootNode = NodeBuilder{} + .root("root") + .filter(_evalf(_get("b1", _traverse1(_cmp("Eq", "3"_cint64))), "root"_var)) + .filter(_evalf(_get("c1", _traverse1(_cmp("Eq", "4"_cint64))), "root"_var)) + .filter(_evalf(_get("b", _traverse1(_cmp("Eq", "1"_cint64))), "root"_var)) + .filter(_evalf(_get("c", _traverse1(_cmp("Eq", "2"_cint64))), "root"_var)) + .finish(_scan("root", "c1")); + + // We have 2 indexes with 2 fields for each: ("a", "b") and ("a1", "b1") + auto phaseManager = makePhaseManager( + {OptPhase::MemoSubstitutionPhase, OptPhase::MemoExplorationPhase}, + prefixId, + {{{"c1", + createScanDef( + {}, + {{"index1", + IndexDefinition{{{makeNonMultikeyIndexPath("a"), CollationOp::Ascending}, + {makeNonMultikeyIndexPath("b"), CollationOp::Ascending}}, + false /*isMultiKey*/, + {DistributionType::Centralized}, + {}}}, + {"index2", + IndexDefinition{{{makeNonMultikeyIndexPath("a1"), CollationOp::Ascending}, + {makeNonMultikeyIndexPath("b1"), CollationOp::Ascending}}, + false /*isMultiKey*/, + {DistributionType::Centralized}, + {}}}})}}}, + boost::none /*costModel*/, + {true /*debugMode*/, 2 /*debugLevel*/, DebugInfo::kIterationLimitForTests}); + + ABT optimized = std::move(rootNode); + phaseManager.getHints()._keepRejectedPlans = true; + const auto& plans = + phaseManager.optimizeNoAssert(std::move(optimized), true /* includeRejected */); + + // Check if all the unbound candidate indexes are discarded during SargableSplit rewrites. + CheckEmptyCandidateIndexTransport transport; + for (const PlanAndProps& plan : plans) { + ASSERT_TRUE(transport.check(plan._node)); + } + ASSERT_GT(transport.visitedSargableNodes(), 0); +} + TEST(LogicalRewriter, SargableNodeRIN) { using namespace properties; using namespace unit_test_abt_literals; diff --git a/src/mongo/db/query/optimizer/metadata.cpp b/src/mongo/db/query/optimizer/metadata.cpp index 5cabb4826f7ed..14cda86762675 100644 --- a/src/mongo/db/query/optimizer/metadata.cpp +++ b/src/mongo/db/query/optimizer/metadata.cpp @@ -29,7 +29,18 @@ #include "mongo/db/query/optimizer/metadata.h" -#include "mongo/db/query/optimizer/node.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { @@ -109,7 +120,7 @@ IndexDefinition::IndexDefinition(IndexCollationSpec collationSpec, DistributionAndPaths distributionAndPaths, PartialSchemaRequirements partialReqMap) : IndexDefinition(std::move(collationSpec), - 2 /*version*/, + 1 /*version*/, 0 /*orderingBits*/, isMultiKey, std::move(distributionAndPaths), @@ -162,20 +173,23 @@ ScanDefinition::ScanDefinition() {} /*nonMultiKeyPathSet*/, {DistributionType::Centralized}, true /*exists*/, - {-1.0} /*ce*/) {} + boost::none /*ce*/, + {} /*shardingMetadata*/) {} ScanDefinition::ScanDefinition(ScanDefOptions options, opt::unordered_map indexDefs, MultikeynessTrie multikeynessTrie, DistributionAndPaths distributionAndPaths, const bool exists, - const CEType ce) + boost::optional ce, + ShardingMetadata shardingMetadata) : _options(std::move(options)), _distributionAndPaths(std::move(distributionAndPaths)), _indexDefs(std::move(indexDefs)), _multikeynessTrie(std::move(multikeynessTrie)), _exists(exists), - _ce(ce) {} + _ce(std::move(ce)), + _shardingMetadata(std::move(shardingMetadata)) {} const ScanDefOptions& ScanDefinition::getOptionsMap() const { return _options; @@ -201,10 +215,14 @@ bool ScanDefinition::exists() const { return _exists; } -CEType ScanDefinition::getCE() const { +const boost::optional& ScanDefinition::getCE() const { return _ce; } +ShardingMetadata ScanDefinition::shardingMetadata() const { + return _shardingMetadata; +} + Metadata::Metadata(opt::unordered_map scanDefs) : Metadata(std::move(scanDefs), 1 /*numberOfPartitions*/) {} diff --git a/src/mongo/db/query/optimizer/metadata.h b/src/mongo/db/query/optimizer/metadata.h index 89e5cb03949cf..b1ff55827c631 100644 --- a/src/mongo/db/query/optimizer/metadata.h +++ b/src/mongo/db/query/optimizer/metadata.h @@ -28,10 +28,19 @@ */ #pragma once - +#include +#include #include +#include +#include + +#include +#include +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/partial_schema_requirements.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer { @@ -65,6 +74,14 @@ struct DistributionAndPaths { }; +/** + * Structure to represent index field component and its associated collation. The _path field + * contains the path to the field component, restricted to Get, Traverse, and Id elements. + * For example, if we have an index on {a.b, c} that contains arrays, the _path for the first entry + * would be Get "a" Traverse Get "b" Traverse Id, and the _path for the second entry would be + * Get "c" Traverse Id. + * Implicitly contains multikey info through Traverse element or lack of Traverse element. + */ struct IndexCollationEntry { IndexCollationEntry(ABT path, CollationOp op); @@ -74,6 +91,7 @@ struct IndexCollationEntry { CollationOp _op; }; +// Full collation specification, using a list of component entries. using IndexCollationSpec = std::vector; /** @@ -108,14 +126,16 @@ struct MultikeynessTrie { void merge(const MultikeynessTrie& other); void add(const ABT& path); - opt::unordered_map children; + std::map children; // An empty trie doesn't know whether anything is multikey. // 'true' means "not sure" while 'false' means "definitely no arrays". bool isMultiKey = true; }; /** - * Defines an available system index. + * Metadata associated with an index. Holds the index specification (index fields and their + * collations), its version (0 or 1), the collations as a bit mask, multikeyness info, and + * distribution info. This is a convenient structure for the query planning process. */ class IndexDefinition { public: @@ -158,20 +178,34 @@ class IndexDefinition { PartialSchemaRequirements _partialReqMap; }; +using IndexDefinitions = opt::unordered_map; using ScanDefOptions = opt::unordered_map; -// Used to specify parameters to scan node, such as collection name, or file where collection is -// read from. +/** + * Metadata associated with the sharding state of a collection. + */ +struct ShardingMetadata { + // Whether the collection may contain orphans. + bool mayContainOrphans{false}; +}; + +/** + * Parameters to a scan node, including distribution information, associated index definitions, + * and multikeyness information. Also includes any ScanDefOptions we might have, such as which + * database the collection is associated with, the origin of the collection (mongod or a BSON file), + * or the UUID of the collection. + */ class ScanDefinition { public: ScanDefinition(); ScanDefinition(ScanDefOptions options, - opt::unordered_map indexDefs, + IndexDefinitions indexDefs, MultikeynessTrie multikeynessTrie, DistributionAndPaths distributionAndPaths, bool exists, - CEType ce); + boost::optional ce, + ShardingMetadata shardingMetadata); const ScanDefOptions& getOptionsMap() const; @@ -184,7 +218,9 @@ class ScanDefinition { bool exists() const; - CEType getCE() const; + const boost::optional& getCE() const; + + ShardingMetadata shardingMetadata() const; private: ScanDefOptions _options; @@ -203,9 +239,19 @@ class ScanDefinition { bool _exists; // If positive, estimated number of docs in the collection. - CEType _ce; + boost::optional _ce; + + ShardingMetadata _shardingMetadata; }; +/** + * Represents the optimizer’s view of the state of the rest of the system in terms of relevant + * resources. Currently we store the set of available collections in the system. In the future, + * when we support distributed planning, this is where we will put information related to the + * physical organization and topology of the machines. + * For each collection, we hold distribution information (fields it may be sharded on), multikeyness + * info, and data related to associated indexes in addition to other relevant metadata. + */ struct Metadata { Metadata(opt::unordered_map scanDefs); Metadata(opt::unordered_map scanDefs, size_t numberOfPartitions); diff --git a/src/mongo/db/query/optimizer/metadata_factory.cpp b/src/mongo/db/query/optimizer/metadata_factory.cpp index b6231d4b24fc5..497d979eec2b7 100644 --- a/src/mongo/db/query/optimizer/metadata_factory.cpp +++ b/src/mongo/db/query/optimizer/metadata_factory.cpp @@ -29,29 +29,27 @@ #include "mongo/db/query/optimizer/metadata_factory.h" +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/db/query/optimizer/partial_schema_requirements.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { -ScanDefinition createScanDef(ScanDefOptions options, - opt::unordered_map indexDefs) { - return createScanDef(std::move(options), - std::move(indexDefs), - ConstEval::constFold, - {DistributionType::Centralized}, - true /*exists*/); -} - -ScanDefinition createScanDef(ScanDefOptions options, - opt::unordered_map indexDefs, - const ConstFoldFn& constFold, - DistributionAndPaths distributionAndPaths, - const bool exists, - const CEType ce) { +MultikeynessTrie createTrie(const IndexDefinitions& indexDefs) { MultikeynessTrie multikeynessTrie; - // Collect non-multiKeyPaths from each index. for (const auto& [indexDefName, indexDef] : indexDefs) { // Skip partial indexes. A path could be non-multikey on a partial index (subset of the @@ -66,6 +64,46 @@ ScanDefinition createScanDef(ScanDefOptions options, } // The empty path refers to the whole document, which can't be an array. multikeynessTrie.isMultiKey = false; + return multikeynessTrie; +} + +ScanDefinition createScanDef(ScanDefOptions options, IndexDefinitions indexDefs) { + + MultikeynessTrie multikeynessTrie = createTrie(indexDefs); + return createScanDef(std::move(options), + std::move(indexDefs), + std::move(multikeynessTrie), + ConstEval::constFold, + {DistributionType::Centralized}, + true); +} + +ScanDefinition createScanDef(ScanDefOptions options, + IndexDefinitions indexDefs, + const ConstFoldFn& constFold, + DistributionAndPaths distributionAndPaths, + const bool exists, + boost::optional ce) { + + MultikeynessTrie multikeynessTrie = createTrie(indexDefs); + + return createScanDef(std::move(options), + std::move(indexDefs), + std::move(multikeynessTrie), + constFold, + std::move(distributionAndPaths), + exists, + std::move(ce)); +} + +ScanDefinition createScanDef(ScanDefOptions options, + IndexDefinitions indexDefs, + MultikeynessTrie multikeynessTrie, + const ConstFoldFn& constFold, + DistributionAndPaths distributionAndPaths, + const bool exists, + boost::optional ce, + ShardingMetadata shardingMetadata) { // Simplify partial filter requirements using the non-multikey paths. for (auto& [indexDefName, indexDef] : indexDefs) { @@ -88,7 +126,8 @@ ScanDefinition createScanDef(ScanDefOptions options, std::move(multikeynessTrie), std::move(distributionAndPaths), exists, - ce}; + std::move(ce), + std::move(shardingMetadata)}; } } // namespace mongo::optimizer diff --git a/src/mongo/db/query/optimizer/metadata_factory.h b/src/mongo/db/query/optimizer/metadata_factory.h index 4971a42673b69..75c3746d3e419 100644 --- a/src/mongo/db/query/optimizer/metadata_factory.h +++ b/src/mongo/db/query/optimizer/metadata_factory.h @@ -29,19 +29,34 @@ #pragma once +#include +#include + +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/utils/const_fold_interface.h" namespace mongo::optimizer { +MultikeynessTrie createTrie(const IndexDefinitions& indexDefs); + +ScanDefinition createScanDef(ScanDefOptions options, IndexDefinitions indexDefs); + ScanDefinition createScanDef(ScanDefOptions options, - opt::unordered_map indexDefs); + IndexDefinitions indexDefs, + const ConstFoldFn& constFold, + DistributionAndPaths distributionAndPaths, + bool exists = true, + boost::optional ce = boost::none); ScanDefinition createScanDef(ScanDefOptions options, - opt::unordered_map indexDefs, + IndexDefinitions indexDefs, + MultikeynessTrie multikeynessTrie, const ConstFoldFn& constFold, DistributionAndPaths distributionAndPaths, bool exists = true, - CEType ce = CEType{-1.0}); + boost::optional ce = boost::none, + ShardingMetadata shardingMetadata = {}); } // namespace mongo::optimizer diff --git a/src/mongo/db/query/optimizer/node.cpp b/src/mongo/db/query/optimizer/node.cpp index e4e8892119e42..0467d9e9280b3 100644 --- a/src/mongo/db/query/optimizer/node.cpp +++ b/src/mongo/db/query/optimizer/node.cpp @@ -27,12 +27,24 @@ * it in the license file. */ -#include -#include - -#include "mongo/db/query/optimizer/node.h" +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/expr.h" #include "mongo/db/query/optimizer/utils/path_utils.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/util/str.h" namespace mongo::optimizer { @@ -373,19 +385,21 @@ const ProjectionName& RIDUnionNode::getScanProjectionName() const { static ProjectionNameVector createSargableBindings(const PartialSchemaRequirements& reqMap) { ProjectionNameVector result; - PSRExpr::visitDNF(reqMap.getRoot(), [&](const PartialSchemaEntry& e) { - if (auto binding = e.second.getBoundProjectionName()) { - result.push_back(*binding); - } - }); + PSRExpr::visitDNF(reqMap.getRoot(), + [&](const PartialSchemaEntry& e, const PSRExpr::VisitorContext&) { + if (auto binding = e.second.getBoundProjectionName()) { + result.push_back(*binding); + } + }); return result; } static ProjectionNameVector createSargableReferences(const PartialSchemaRequirements& reqMap) { ProjectionNameOrderPreservingSet result; - PSRExpr::visitDNF(reqMap.getRoot(), [&](const PartialSchemaEntry& e) { - result.emplace_back(*e.first._projectionName); - }); + PSRExpr::visitDNF(reqMap.getRoot(), + [&](const PartialSchemaEntry& e, const PSRExpr::VisitorContext&) { + result.emplace_back(*e.first._projectionName); + }); return result.getVector(); } @@ -423,44 +437,49 @@ SargableNode::SargableNode(PartialSchemaRequirements reqMap, // projections, or non-trivial multikey requirements which also bind. Further assert that under // a conjunction 1) non-multikey paths have at most one req and 2) there are no duplicate bound // projection names. - PSRExpr::visitDisjuncts(_reqMap.getRoot(), [&](const PSRExpr::Node& disjunct, const size_t) { - PartialSchemaKeySet seenKeys; - ProjectionNameSet seenProjNames; - PSRExpr::visitConjuncts(disjunct, [&](const PSRExpr::Node& conjunct, const size_t) { - PSRExpr::visitAtom(conjunct, [&](const PartialSchemaEntry& e) { - const auto& [key, req] = e; - if (auto projName = req.getBoundProjectionName()) { - tassert( - 6624094, - "SargableNode has a multikey requirement with a non-trivial interval which " - "also binds", - isIntervalReqFullyOpenDNF(req.getIntervals()) || - !checkPathContainsTraverse(key._path)); - tassert(6624095, - "SargableNode has a perf only binding requirement", - !req.getIsPerfOnly()); - - auto insertedBoundProj = seenProjNames.insert(*projName).second; - tassert(6624087, - "PartialSchemaRequirements has duplicate bound projection names in " - "a conjunction", - insertedBoundProj); - } - - tassert(6624088, - "SargableNode cannot reference an internally bound projection", - boundsProjectionNameSet.count(*key._projectionName) == 0); - - if (!checkPathContainsTraverse(key._path)) { - auto insertedKey = seenKeys.insert(key).second; - tassert(7155020, - "PartialSchemaRequirements has two predicates on the same non-multikey " - "path in a conjunction", - insertedKey); - } - }); + PSRExpr::visitDisjuncts( + _reqMap.getRoot(), [&](const PSRExpr::Node& disjunct, const PSRExpr::VisitorContext&) { + PartialSchemaKeySet seenKeys; + ProjectionNameSet seenProjNames; + PSRExpr::visitConjuncts( + disjunct, [&](const PSRExpr::Node& conjunct, const PSRExpr::VisitorContext&) { + PSRExpr::visitAtom( + conjunct, [&](const PartialSchemaEntry& e, const PSRExpr::VisitorContext&) { + const auto& [key, req] = e; + if (auto projName = req.getBoundProjectionName()) { + tassert(6624094, + "SargableNode has a multikey requirement with a " + "non-trivial interval which " + "also binds", + isIntervalReqFullyOpenDNF(req.getIntervals()) || + !checkPathContainsTraverse(key._path)); + tassert(6624095, + "SargableNode has a perf only binding requirement", + !req.getIsPerfOnly()); + + auto insertedBoundProj = seenProjNames.insert(*projName).second; + tassert(6624087, + "PartialSchemaRequirements has duplicate bound projection " + "names in " + "a conjunction", + insertedBoundProj); + } + + tassert(6624088, + "SargableNode cannot reference an internally bound projection", + boundsProjectionNameSet.count(*key._projectionName) == 0); + + if (!checkPathContainsTraverse(key._path)) { + auto insertedKey = seenKeys.insert(key).second; + tassert(7155020, + "PartialSchemaRequirements has two predicates on the same " + "non-multikey " + "path in a conjunction", + insertedKey); + } + }); + }); }); - }); } bool SargableNode::operator==(const SargableNode& other) const { diff --git a/src/mongo/db/query/optimizer/node.h b/src/mongo/db/query/optimizer/node.h index 8e92546b1381c..98a0527e132f9 100644 --- a/src/mongo/db/query/optimizer/node.h +++ b/src/mongo/db/query/optimizer/node.h @@ -29,6 +29,12 @@ #pragma once +#include +#include +#include +#include +#include +#include #include #include #include @@ -38,10 +44,15 @@ #include "mongo/db/query/optimizer/algebra/operator.h" #include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/index_bounds.h" #include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/partial_schema_requirements.h" #include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/syntax/expr.h" #include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/printable_enum.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { @@ -269,9 +280,6 @@ class IndexScanNode final : public ABTOpFixedArity<1>, public ExclusivelyPhysica * 'ridProjectionName' parameter designates the incoming rid which is the starting point of the * seek. 'fieldProjectionMap' may choose to include an outgoing rid which will contain the * successive (if we do not have a following limit) document ids. - * - * TODO SERVER-68936: Can we let it advance with a limit based on upper rid limit in case of primary - * index? */ class SeekNode final : public ABTOpFixedArity<2>, public ExclusivelyPhysicalNode { using Base = ABTOpFixedArity<2>; @@ -458,14 +466,31 @@ class RIDUnionNode final : public ABTOpFixedArity<2>, public ExclusivelyLogicalN * This is a logical node which represents special kinds of (simple) evaluations and filters which * are amenable to being used in indexing or covered scans. * - * It collects a conjunction of predicates in the following form: - * -> - - * For example to encode a conjunction which encodes filtering with array traversal on "a" - ($match(a: {$gt, 1}} combined with a retrieval of the field "b" (without restrictions on its - value). - * PathGet "a" Traverse Id | scan_0 -> [1, +inf], - * PathGet "b" Id | scan_0 -> (-inf, +inf), "pb" + * These evaluations and filters are tracked via PartialSchemaRequirements in DNF. For example, a + * SargableNode which encodes a disjunction of three predicates, {a: {$eq: 1}}, + * {b: {$eq: 2}}, and {c: {$gt: 3}} may have the following PartialSchemaEntries: + * entry1: {, <[1, 1], >} + * entry2: {, <[2, 2], >} + * entry3: {, <[3, +inf], >} + * These entries would then be composed in DNF: OR( AND( entry1 ), AND( entry2 ), AND( entry3 )). + * + * The partial schema requirements should be simplified before constructing a SargableNode. There + * should be at least 1 and at most kMaxPartialSchemaReqs entries in the requirements. Also, within + * a conjunction of PartialSchemaEntries, only one instance of a path without Traverse elements + * (non-multikey) is allowed. By contrast several instances of paths with Traverse elements + * (multikey) are allowed. For example: Get "a" Get "b" Id is allowed just once while Get "a" + * Traverse Get "b" Id is allowed multiple times. + * + * The SargableNode also tracks some precomputed information such as which indexes are suitable + * for satisfying the requirements. + * + * Finally, each SargableNode has an IndexReqTarget used to control SargableNode splitting + * optimizations. During optimization, SargableNodes are first introduced with a Complete target. + * A Complete target indicates that the SargableNode is responsible for satisfying + * the entire set of predicates extracted from the original query (that is, all predicates + * identified pre-splitting). During SargableNode splitting, Index and Seek targets may be + * introduced. An Index target indicates the SargableNode need only produce index keys, whereas a + * Seek target indicates the SargableNode should produce documents given RIDs. */ class SargableNode final : public ABTOpFixedArity<3>, public ExclusivelyLogicalNode { using Base = ABTOpFixedArity<3>; diff --git a/src/mongo/db/query/optimizer/opt_phase_manager.cpp b/src/mongo/db/query/optimizer/opt_phase_manager.cpp index 8b7b218bda0ae..6fa9b19183eca 100644 --- a/src/mongo/db/query/optimizer/opt_phase_manager.cpp +++ b/src/mongo/db/query/optimizer/opt_phase_manager.cpp @@ -29,11 +29,26 @@ #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include +#include +#include +#include +#include + +#include +#include + #include "mongo/db/query/optimizer/cascades/logical_props_derivation.h" +#include "mongo/db/query/optimizer/cascades/physical_rewriter.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" #include "mongo/db/query/optimizer/rewrites/path.h" #include "mongo/db/query/optimizer/rewrites/path_lower.h" #include "mongo/db/query/optimizer/utils/memo_utils.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/opt_phase_manager.h b/src/mongo/db/query/optimizer/opt_phase_manager.h index 5f22ebcbf1830..79f95a1239991 100644 --- a/src/mongo/db/query/optimizer/opt_phase_manager.h +++ b/src/mongo/db/query/optimizer/opt_phase_manager.h @@ -29,29 +29,44 @@ #pragma once +#include +#include +#include +#include #include +#include +#include +#include "mongo/db/query/optimizer/algebra/operator.h" #include "mongo/db/query/optimizer/cascades/interfaces.h" #include "mongo/db/query/optimizer/cascades/logical_rewriter.h" +#include "mongo/db/query/optimizer/cascades/memo.h" #include "mongo/db/query/optimizer/cascades/physical_rewriter.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node_defs.h" #include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/const_fold_interface.h" #include "mongo/db/query/optimizer/utils/memo_utils.h" +#include "mongo/db/query/optimizer/utils/printable_enum.h" +#include "mongo/db/query/optimizer/utils/utils.h" namespace mongo::optimizer { using namespace cascades; -/** - * This class wraps together different optimization phases. - * First the transport rewrites are applied such as constant folding and redundant expression - * elimination. Second the logical and physical reordering rewrites are applied using the memo. - * Third the final transport rewritesd are applied. - */ - #define OPT_PHASE(F) \ /* ConstEval performs the following rewrites: constant folding, inlining, and dead code \ - * elimination. */ \ + * elimination. \ + * PathFusion implements path laws, for example shortcutting field assignment and reads, and \ + * other path optimizations. \ + * We switch between applying ConstEval and PathFusion for as long as they change the query, \ + * as they can enable new rewrites in each other. These are both done in-place rather than \ + * creating plan alternatives \ + */ \ F(ConstEvalPre) \ F(PathFuse) \ \ @@ -65,13 +80,23 @@ using namespace cascades; /* Implementation and enforcement rules. */ \ F(MemoImplementationPhase) \ \ + /* Lowers paths to expressions. Not to be confused with SBENodeLowering, which lowers ABT \ + * nodes and expressions to an SBE plan. */ \ F(PathLower) \ + /* Final round of constant folding, identical to the first ConstEval stage. */ \ F(ConstEvalPost) MAKE_PRINTABLE_ENUM(OptPhase, OPT_PHASE); MAKE_PRINTABLE_ENUM_STRING_ARRAY(OptPhaseEnum, OptPhase, OPT_PHASE); #undef OPT_PHASE +/** + * This class drives the optimization process, wrapping together different optimization phases. + * First the transport rewrites are applied such as constant folding and redundant expression + * elimination. Second the logical and physical reordering rewrites are applied using the memo. + * Third the final transport rewrites are applied. + * Phases may be skipped by specifying a subset of the phases to run in the phaseSet argument. + */ class OptPhaseManager { public: using PhaseSet = opt::unordered_set; diff --git a/src/mongo/db/query/optimizer/optimizer_failure_test.cpp b/src/mongo/db/query/optimizer/optimizer_failure_test.cpp index aee3576baff39..6f087b173e3f0 100644 --- a/src/mongo/db/query/optimizer/optimizer_failure_test.cpp +++ b/src/mongo/db/query/optimizer/optimizer_failure_test.cpp @@ -27,16 +27,32 @@ * it in the license file. */ +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/query/cost_model/cost_model_gen.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/metadata_factory.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/unit_test_abt_literals.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" #include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/optimizer_test.cpp b/src/mongo/db/query/optimizer/optimizer_test.cpp index 13d34e6d807bc..88a37fc06be61 100644 --- a/src/mongo/db/query/optimizer/optimizer_test.cpp +++ b/src/mongo/db/query/optimizer/optimizer_test.cpp @@ -27,15 +27,32 @@ * it in the license file. */ -#include "mongo/db/query/optimizer/node.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/reference_tracker.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/interval_utils.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/unit_test_abt_literals.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" #include "mongo/db/query/optimizer/utils/utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/partial_schema_requirements.cpp b/src/mongo/db/query/optimizer/partial_schema_requirements.cpp index 40b54d4d61dc1..dd9ff9dbc6751 100644 --- a/src/mongo/db/query/optimizer/partial_schema_requirements.cpp +++ b/src/mongo/db/query/optimizer/partial_schema_requirements.cpp @@ -29,9 +29,20 @@ #include "mongo/db/query/optimizer/partial_schema_requirements.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/db/query/optimizer/algebra/operator.h" #include "mongo/db/query/optimizer/index_bounds.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/utils/abt_compare.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { namespace { @@ -42,11 +53,11 @@ class PSRNormalizeTransporter { } void transport(PSRExpr::Conjunction& node, std::vector& children) { - sortChildren(children); + sortAndDedupChildren(children); } void transport(PSRExpr::Disjunction& node, std::vector& children) { - sortChildren(children); + sortAndDedupChildren(children); } void normalize(PSRExpr::Node& node) { @@ -54,13 +65,16 @@ class PSRNormalizeTransporter { } private: - void sortChildren(std::vector& children) { + void sortAndDedupChildren(std::vector& children) { struct Comparator { bool operator()(const PSRExpr::Node& i1, const PSRExpr::Node& i2) const { return comparePartialSchemaRequirementsExpr(i1, i2) < 0; } }; std::sort(children.begin(), children.end(), Comparator{}); + + auto end = std::unique(children.begin(), children.end()); + children.erase(end, children.end()); } }; @@ -74,8 +88,11 @@ PartialSchemaEntry makeNoopPartialSchemaEntry() { } } // namespace +void PartialSchemaRequirements::normalize(PSRExpr::Node& expr) { + PSRNormalizeTransporter{}.normalize(expr); +} void PartialSchemaRequirements::normalize() { - PSRNormalizeTransporter{}.normalize(_expr); + normalize(_expr); } PartialSchemaRequirements::PartialSchemaRequirements(PSRExpr::Node requirements) @@ -106,7 +123,7 @@ bool PartialSchemaRequirements::isNoop() const { // ...or if it has exactly one predicate which is a no-op. auto reqIsNoop = false; - auto checkNoop = [&](const Entry& entry) { + auto checkNoop = [&](const Entry& entry, const PSRExpr::VisitorContext&) { reqIsNoop = (entry == makeNoopPartialSchemaEntry()); }; if (PSRExpr::isCNF(_expr)) { @@ -125,7 +142,7 @@ boost::optional PartialSchemaRequirements::findProjection( PSRExpr::isSingletonDisjunction(getRoot())); boost::optional proj; - PSRExpr::visitDNF(_expr, [&](const Entry& entry) { + PSRExpr::visitDNF(_expr, [&](const Entry& entry, const PSRExpr::VisitorContext&) { if (!proj && entry.first == key) { proj = entry.second.getBoundProjectionName(); } @@ -141,28 +158,30 @@ PartialSchemaRequirements::findFirstConjunct(const PartialSchemaKey& key) const size_t i = 0; boost::optional> res; - PSRExpr::visitDNF(_expr, [&](const PartialSchemaEntry& entry) { - if (!res && entry.first == key) { - res = {{i, entry.second}}; - } - ++i; - }); + PSRExpr::visitDNF(_expr, + [&](const PartialSchemaEntry& entry, const PSRExpr::VisitorContext& ctx) { + if (entry.first == key) { + res = {{i, entry.second}}; + ctx.returnEarly(); + return; + } + ++i; + }); return res; } void PartialSchemaRequirements::add(PartialSchemaKey key, PartialSchemaRequirement req) { tassert(7016406, "Expected a PartialSchemaRequirements in DNF form", PSRExpr::isDNF(_expr)); - // TODO SERVER-69026 Consider applying the distributive law. tassert(7453912, "Expected a singleton disjunction", PSRExpr::isSingletonDisjunction(_expr)); // Add an entry to the first conjunction - PSRExpr::visitDisjuncts(_expr, [&](PSRExpr::Node& disjunct, const size_t i) { - if (i == 0) { + PSRExpr::visitDisjuncts( + _expr, [&](PSRExpr::Node& disjunct, const PSRExpr::VisitorContext& ctx) { const auto& conjunction = disjunct.cast(); conjunction->nodes().emplace_back( PSRExpr::make(Entry(std::move(key), std::move(req)))); - } - }); + ctx.returnEarly(); + }); normalize(); } @@ -228,12 +247,82 @@ static bool simplifyExpr( bool PartialSchemaRequirements::simplify( std::function func) { - if (PSRExpr::isCNF(_expr)) { - return simplifyExpr(_expr, func); + return simplify(_expr, func); +} +bool PartialSchemaRequirements::simplify( + PSRExpr::Node& expr, + std::function func) { + if (PSRExpr::isCNF(expr)) { + return simplifyExpr(expr, func); } - return simplifyExpr(_expr, func); + return simplifyExpr(expr, func); } +void PartialSchemaRequirements::simplifyRedundantDNF(PSRExpr::Node& expr) { + tassert(6902601, "simplifyRedundantDNF expects DNF", PSRExpr::isDNF(expr)); + + // Normalizing ensures: + // - Each term has no duplicate atoms. + // - The overall expression has no duplicate terms. + // - The terms are sorted in increasing length. + PSRNormalizeTransporter{}.normalize(expr); + + // Now remove terms that are subsumed by some other term. This means try to remove terms whose + // atoms are a superset of some other term: (a^b) subsumes (a^b^c), so remove (a^b^c). Since + // there are no duplicate atoms, we're looking to remove terms whose 'nodes().size()' is large. + PSRExpr::NodeVector& terms = expr.cast()->nodes(); + + // First give each unique atom a label. + // Store each atom by value because 'remove_if' can move-from a 'PSRExpr::Node', which deletes + // the heap-allocated 'Atom'. + std::vector atoms; + const auto atomLabel = [&](const PSRExpr::Atom& atom) -> size_t { + size_t i = 0; + for (const auto& seen : atoms) { + if (atom == seen) { + return i; + } + ++i; + } + atoms.emplace_back(atom); + return i; + }; + using Mask = size_t; + static constexpr size_t maxAtoms = sizeof(Mask) * CHAR_BIT; + for (const PSRExpr::Node& termNode : terms) { + for (const PSRExpr::Node& atomNode : termNode.cast()->nodes()) { + const PSRExpr::Atom& atom = *atomNode.cast(); + atomLabel(atom); + if (atoms.size() > maxAtoms) { + return; + } + } + } + + std::vector seen; + seen.reserve(terms.size()); + auto last = std::remove_if(terms.begin(), terms.end(), [&](const PSRExpr::Node& term) -> bool { + Mask mask = 0; + for (const PSRExpr::Node& atomNode : term.cast()->nodes()) { + const PSRExpr::Atom& atom = *atomNode.cast(); + mask |= Mask{1} << atomLabel(atom); + } + + // Does any previously-seen mask subsume this one? + for (Mask prev : seen) { + const bool isSuperset = (prev & mask) == prev; + if (isSuperset) { + return true; + } + } + + seen.push_back(mask); + return false; + }); + terms.erase(last, terms.end()); +} + + /** * Returns a vector of ((input binding, path), output binding). The output binding names * are unique and you can think of the vector as a product: every row has all the projections @@ -243,12 +332,13 @@ std::vector> getBoundProjections( const PartialSchemaRequirements& reqs) { // For now we assume no projections inside a nontrivial disjunction. std::vector> result; - PSRExpr::visitAnyShape(reqs.getRoot(), [&](const PartialSchemaEntry& e) { - const auto& [key, req] = e; - if (auto proj = req.getBoundProjectionName()) { - result.emplace_back(key, *proj); - } - }); + PSRExpr::visitAnyShape(reqs.getRoot(), + [&](const PartialSchemaEntry& e, const PSRExpr::VisitorContext&) { + const auto& [key, req] = e; + if (auto proj = req.getBoundProjectionName()) { + result.emplace_back(key, *proj); + } + }); tassert(7453906, "Expected no bound projections in a nontrivial disjunction", result.empty() || PSRExpr::isSingletonDisjunction(reqs.getRoot())); diff --git a/src/mongo/db/query/optimizer/partial_schema_requirements.h b/src/mongo/db/query/optimizer/partial_schema_requirements.h index 47feba141e82c..6664173944190 100644 --- a/src/mongo/db/query/optimizer/partial_schema_requirements.h +++ b/src/mongo/db/query/optimizer/partial_schema_requirements.h @@ -29,8 +29,23 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/index_bounds.h" #include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { @@ -39,12 +54,26 @@ using PSRExpr = BoolExpr; using PSRExprBuilder = PSRExpr::Builder; /** - * Represents a set of predicates and projections. Cannot represent all predicates/projections: - * only those that can typically be answered efficiently with an index. + * Represents a set of predicates and projections composed in a boolean expression in CNF or DNF. + * Cannot represent all predicates/projections, only those that can typically be answered + * efficiently with an index. * - * Only one instance of a path without Traverse elements (non-multikey) is allowed. By contrast - * several instances of paths with Traverse elements (multikey) are allowed. For example: Get "a" - * Get "b" Id is allowed just once while Get "a" Traverse Get "b" Id is allowed multiple times. + * The predicates take the following form, represented by type PartialSchemaEntry: + * {, } + * + * For example, suppose there is a ScanNode which creates a binding 'scan_0' representing the + * documents in a collection. To represent a conjunction which encodes filtering with array + * traversal on "a" {$match: {a: {$gt, 1}} combined with a retrieval of the field "b" (without + * restrictions on its value), the PartialSchemaEntries would look like: + * entry1: {, <[1, +inf], >} + * entry2: {, <(-inf, +inf), "pb">}. + * + * These entries could be composed in DNF: OR( AND( entry1, entry2 )). In this case we have a + * trivial disjunction, where the top-level disjunction only has one child. Or, they could be + * composed in CNF: AND( OR( entry1 ), OR( entry2 )). + * + * When representing a non-trivial disjunction, the PartialSchemaRequirements must not have any + * output bindings. * * The default / empty state represents a conjunction of zero predicates, which means always true. */ @@ -52,76 +81,11 @@ class PartialSchemaRequirements { public: using Entry = std::pair; - // TODO SERVER-69026: Remove these iterator constructs. - using ConstNodeVecIter = std::vector::const_iterator; - using NodeVecIter = std::vector::iterator; - - template - using MaybeConstNodeVecIter = - typename std::conditional::type; - - template - struct PSRIterator { - using iterator_category = std::forward_iterator_tag; - using difference_type = ptrdiff_t; - using value_type = Entry; - using pointer = typename std::conditional::type; - using reference = typename std::conditional::type; - - PSRIterator(MaybeConstNodeVecIter atomsIt) : _atomsIt(atomsIt) {} - - reference operator*() const { - return _atomsIt->template cast()->getExpr(); - } - pointer operator->() { - return &(_atomsIt->template cast()->getExpr()); - } - - PSRIterator& operator++() { - _atomsIt++; - return *this; - } - - PSRIterator operator++(int) { - PSRIterator tmp = *this; - ++(*this); - return tmp; - } - - friend bool operator==(const PSRIterator& a, const PSRIterator& b) { - return a._atomsIt == b._atomsIt; - }; - friend bool operator!=(const PSRIterator& a, const PSRIterator& b) { - return a._atomsIt != b._atomsIt; - }; - - MaybeConstNodeVecIter _atomsIt; - }; - - template - struct Range { - auto begin() const { - return PSRIterator(_begin); - } - auto end() const { - return PSRIterator(_end); - } - auto cbegin() const { - return PSRIterator(_begin); - } - auto cend() const { - return PSRIterator(_end); - } - - MaybeConstNodeVecIter _begin; - MaybeConstNodeVecIter _end; - }; - // Default PartialSchemaRequirements is a singular DNF of an empty PartialSchemaKey and // fully-open PartialSchemaRequirement which does not bind. PartialSchemaRequirements(); - PartialSchemaRequirements(PSRExpr::Node requirements); + explicit PartialSchemaRequirements(PSRExpr::Node requirements); bool operator==(const PartialSchemaRequirements& other) const; @@ -145,40 +109,12 @@ class PartialSchemaRequirements { boost::optional> findFirstConjunct( const PartialSchemaKey&) const; - // TODO SERVER-69026: Remove these methods in favor of visitDis/Conjuncts(). - Range conjuncts() const { - tassert(7453905, - "Expected PartialSchemaRequirement to be a singleton disjunction", - PSRExpr::isSingletonDisjunction(_expr)); - const auto& atoms = _expr.cast() - ->nodes() - .begin() - ->cast() - ->nodes(); - return {atoms.begin(), atoms.end()}; - } - - Range conjuncts() { - tassert(7453904, - "Expected PartialSchemaRequirement to be a singleton disjunction", - PSRExpr::isSingletonDisjunction(_expr)); - auto& atoms = _expr.cast() - ->nodes() - .begin() - ->cast() - ->nodes(); - return {atoms.begin(), atoms.end()}; - } - /** * Conjunctively combine 'this' with another PartialSchemaRequirement. * Asserts that 'this' is in DNF. * * For now, we assert that we have only one disjunct. This means we avoid applying * the distributive law, which would duplicate the new requirement into each disjunct. - * - * TODO SERVER-69026 Consider applying the distributive law to allow contained-OR in - * SargableNode. */ void add(PartialSchemaKey, PartialSchemaRequirement); @@ -196,6 +132,19 @@ class PartialSchemaRequirements { * TODO SERVER-73827: Consider applying this simplification during BoolExpr building. */ bool simplify(std::function); + static bool simplify(PSRExpr::Node& expr, + std::function); + static void normalize(PSRExpr::Node& expr); + + /** + * Given a DNF, try to detect and remove redundant terms. + * + * For example, in ((a ^ b) U (z) U (a ^ b ^ c)) the (a ^ b) is redundant because + * (a ^ b ^ c) implies (a ^ b). + * + * TODO SERVER-73827 Consider doing this simplification as part of BoolExpr::Builder. + */ + static void simplifyRedundantDNF(PSRExpr::Node& expr); const auto& getRoot() const { return _expr; diff --git a/src/mongo/db/query/optimizer/physical_rewriter_optimizer_test.cpp b/src/mongo/db/query/optimizer/physical_rewriter_optimizer_test.cpp index 54acbe55e0793..3f5635a338dbd 100644 --- a/src/mongo/db/query/optimizer/physical_rewriter_optimizer_test.cpp +++ b/src/mongo/db/query/optimizer/physical_rewriter_optimizer_test.cpp @@ -27,16 +27,55 @@ * it in the license file. */ -#include "mongo/db/pipeline/abt/utils.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/ce/hinted_estimator.h" +#include "mongo/db/query/ce/test_utils.h" +#include "mongo/db/query/cost_model/cost_model_gen.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/cascades/memo_defs.h" #include "mongo/db/query/optimizer/cascades/rewriter_rules.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/metadata_factory.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/node_defs.h" #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/reference_tracker.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/unit_test_abt_literals.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/inline_auto_update.h" +#include "mongo/util/str.h" namespace mongo::optimizer { @@ -119,6 +158,8 @@ TEST(PhysRewriter, PhysicalRewriterBasic) { "| type: Centralized\n" "| indexingRequirement: \n" "| Complete, dedupRID\n" + "| removeOrphans: \n" + "| false\n" "Root [{p2}]\n" "Properties [cost: 0.438321, localCost: 0.00983406, adjustedCE: 10]\n" "| | Logical:\n" @@ -143,6 +184,8 @@ TEST(PhysRewriter, PhysicalRewriterBasic) { "| type: Centralized\n" "| indexingRequirement: \n" "| Complete, dedupRID\n" + "| removeOrphans: \n" + "| false\n" "Filter []\n" "| EvalFilter []\n" "| | Variable [p2]\n" @@ -170,6 +213,8 @@ TEST(PhysRewriter, PhysicalRewriterBasic) { "| type: Centralized, disableExchanges\n" "| indexingRequirement: \n" "| Complete, dedupRID\n" + "| removeOrphans: \n" + "| false\n" "Evaluation [{p2} = Variable [p1]]\n" "Properties [cost: 0.428487, localCost: 0, adjustedCE: 100]\n" "| | Logical:\n" @@ -191,6 +236,8 @@ TEST(PhysRewriter, PhysicalRewriterBasic) { "| type: Centralized, disableExchanges\n" "| indexingRequirement: \n" "| Complete, dedupRID\n" + "| removeOrphans: \n" + "| false\n" "Filter []\n" "| EvalFilter []\n" "| | Variable [p1]\n" @@ -215,6 +262,8 @@ TEST(PhysRewriter, PhysicalRewriterBasic) { "| type: Centralized, disableExchanges\n" "| indexingRequirement: \n" "| Complete, dedupRID\n" + "| removeOrphans: \n" + "| false\n" "PhysicalScan [{'': p1}, test]\n", phaseManager); } @@ -1102,7 +1151,7 @@ TEST(PhysRewriter, FilterIndexing4) { const BSONObj explain = ExplainGenerator::explainMemoBSONObj(phaseManager.getMemo()); for (const auto& pathAndCE : pathAndCEs) { BSONElement el = dotted_path_support::extractElementAtPath(explain, pathAndCE.first); - ASSERT_EQ(el.Double(), pathAndCE.second); + ASSERT_CE_APPROX_EQUAL(el.Double(), pathAndCE.second, ce::kMaxCEError); } ASSERT_EXPLAIN_V2_AUTO( @@ -1339,18 +1388,23 @@ TEST(PhysRewriter, FilterIndexingStress) { ASSERT_BSON_PATH("\"Filter\"", explainNLJ, "rightChild.child.child.child.child.nodeType"); ASSERT_BSON_PATH("\"Filter\"", explainNLJ, "rightChild.child.child.child.child.child.nodeType"); ASSERT_BSON_PATH( - "\"LimitSkip\"", explainNLJ, "rightChild.child.child.child.child.child.child.nodeType"); + "\"Filter\"", explainNLJ, "rightChild.child.child.child.child.child.child.nodeType"); ASSERT_BSON_PATH( - "\"Seek\"", explainNLJ, "rightChild.child.child.child.child.child.child.child.nodeType"); - - ASSERT_BSON_PATH("\"MergeJoin\"", explainNLJ, "leftChild.nodeType"); - ASSERT_BSON_PATH("\"IndexScan\"", explainNLJ, "leftChild.leftChild.nodeType"); - ASSERT_BSON_PATH("\"index1\"", explainNLJ, "leftChild.leftChild.indexDefName"); - ASSERT_BSON_PATH("\"Union\"", explainNLJ, "leftChild.rightChild.nodeType"); - ASSERT_BSON_PATH("\"Evaluation\"", explainNLJ, "leftChild.rightChild.children.0.nodeType"); - ASSERT_BSON_PATH("\"IndexScan\"", explainNLJ, "leftChild.rightChild.children.0.child.nodeType"); - ASSERT_BSON_PATH( - "\"index3\"", explainNLJ, "leftChild.rightChild.children.0.child.indexDefName"); + "\"Filter\"", explainNLJ, "rightChild.child.child.child.child.child.child.child.nodeType"); + ASSERT_BSON_PATH("\"LimitSkip\"", + explainNLJ, + "rightChild.child.child.child.child.child.child.child.child.nodeType"); + ASSERT_BSON_PATH("\"Seek\"", + explainNLJ, + "rightChild.child.child.child.child.child.child.child.child.child.nodeType"); + + ASSERT_BSON_PATH("\"IndexScan\"", explainNLJ, "leftChild.nodeType"); + + // With heuristic CE both indexes are equally preferable. + const auto& indexName = + dotted_path_support::extractElementAtPath(explainNLJ, "leftChild.indexDefName") + .toString(false /*includeFieldName*/); + ASSERT_TRUE(indexName == "\"index1\"" || indexName == "\"index3\""); } TEST(PhysRewriter, FilterIndexingVariable) { @@ -1805,14 +1859,14 @@ TEST(PhysRewriter, SargableProjectionRenames) { // projections. ASSERT_EXPLAIN_V2_AUTO( "Root [{root}]\n" - "Evaluation [{pa1} = Variable [pa]]\n" + "Evaluation [{pa} = Variable [pa1]]\n" "Sargable [Complete]\n" "| | requirements: \n" - "| | {{{root, 'PathGet [a] PathIdentity []', pa, {{{=Const [1]}}}}}}\n" + "| | {{{root, 'PathGet [a] PathIdentity []', pa1, {{{=Const [1]}}}}}}\n" "| scanParams: \n" - "| {'a': pa}\n" + "| {'a': pa1}\n" "| residualReqs: \n" - "| {{{pa, 'PathIdentity []', {{{=Const [1]}}}, entryIndex: 0}}}\n" + "| {{{pa1, 'PathIdentity []', {{{=Const [1]}}}, entryIndex: 0}}}\n" "Scan [c1, {root}]\n", optimized); } @@ -1980,7 +2034,10 @@ TEST(PhysRewriter, CoveredScan) { ABT optimized = std::move(rootNode); phaseManager.optimize(optimized); - ASSERT_EQ(5, phaseManager.getMemo().getStats()._physPlanExplorationCount); + ASSERT_BETWEEN_AUTO( // + 3, + 6, + phaseManager.getMemo().getStats()._physPlanExplorationCount); // Since we do not optimize with fast null handling, we need to split the predicate between the // index scan and fetch in order to handle null. @@ -1990,8 +2047,8 @@ TEST(PhysRewriter, CoveredScan) { "| | Const [true]\n" "| LimitSkip [limit: 1, skip: 0]\n" "| Seek [ridProjection: rid_0, {'a': pa}, c1]\n" - "IndexScan [{'': rid_0}, scanDefName: c1, indexDefName: index1, interval: {': rid_0}, scanDefName: c1, indexDefName: index1, interval: {': root, 'a': evalTemp_2}, coll1]\n" - "Filter []\n" - "| EvalFilter []\n" - "| | FunctionCall [getArraySize] Variable [sides_0]\n" - "| PathCompare [Eq] Const [2]\n" - "GroupBy [{rid_0}]\n" - "| aggregations: \n" - "| [sides_0]\n" - "| FunctionCall [$addToSet] Variable [sideId_0]\n" - "Union [{rid_0, sideId_0}]\n" - "| Evaluation [{sideId_0} = Const [1]]\n" - "| IndexScan [{'': rid_0}, scanDefName: coll1, indexDefName: index1, interval: " - "{[Const [[]], Const [BinData(0, )])}]\n" - "Evaluation [{sideId_0} = Const [0]]\n" + "Unique [{rid_0}]\n" "Filter []\n" "| EvalFilter []\n" "| | Variable [evalTemp_3]\n" "| PathTraverse [1] PathCompare [Eq] Const [2]\n" - "IndexScan [{' 0': evalTemp_3, '': rid_0}, scanDefName: coll1, indexDefNam" - "e: index1, interval: {}]\n", + "IndexScan [{' 0': evalTemp_3, '': rid_0}, scanDefName: coll1, " + "indexDefName: index1, interval: {}]\n", optimized); } @@ -4087,6 +4135,8 @@ TEST(PhysRewriter, CollationLimit) { "| type: Centralized\n" "| indexingRequirement: \n" "| Complete, dedupRID\n" + "| removeOrphans: \n" + "| false\n" "Collation [{pa: Ascending}]\n" "Properties [cost: 0.428487, localCost: 0.428487, adjustedCE: 1000]\n" "| | Logical:\n" @@ -4112,6 +4162,8 @@ TEST(PhysRewriter, CollationLimit) { "| type: Centralized\n" "| indexingRequirement: \n" "| Complete, dedupRID\n" + "| removeOrphans: \n" + "| false\n" "PhysicalScan [{'': root, 'a': pa}, c1]\n", phaseManager); } @@ -4817,6 +4869,8 @@ TEST(PhysRewriter, EqMemberSargable) { "| type: Centralized\n" "| indexingRequirement: \n" "| Complete, dedupRID\n" + "| removeOrphans: \n" + "| false\n" "Root [{root}]\n" "Properties [cost: 0.163045, localCost: 0.0180785, adjustedCE: 54.6819]\n" "| | Logical:\n" @@ -4842,6 +4896,8 @@ TEST(PhysRewriter, EqMemberSargable) { "| type: Centralized\n" "| indexingRequirement: \n" "| Complete, dedupRID\n" + "| removeOrphans: \n" + "| false\n" "NestedLoopJoin [joinType: Inner, {rid_0}]\n" "| | Const [true]\n" "| Properties [cost: 0.0757996, localCost: 0.0757996, adjustedCE: 54.6819]\n" @@ -4864,7 +4920,10 @@ TEST(PhysRewriter, EqMemberSargable) { "| | type: Centralized\n" "| | indexingRequirement: \n" "| | Seek, dedupRID\n" - "| | repetitionEstimate: 54.6819\n" + "| | repetitionEstimate: \n" + "| | 54.6819\n" + "| | removeOrphans: \n" + "| | false\n" "| LimitSkip [limit: 1, skip: 0]\n" "| Seek [ridProjection: rid_0, {'': root}, c1]\n" "Properties [cost: 0.0691671, localCost: 0.0691671, adjustedCE: 54.6819]\n" @@ -4891,6 +4950,8 @@ TEST(PhysRewriter, EqMemberSargable) { "| type: Centralized\n" "| indexingRequirement: \n" "| Index, dedupRID\n" + "| removeOrphans: \n" + "| false\n" "Unique [{rid_0}]\n" "SortedMerge []\n" "| | | | collation: \n" @@ -4960,7 +5021,7 @@ TEST(PhysRewriter, IndexSubfieldCovered) { ABT optimized = rootNode; phaseManager.optimize(optimized); - ASSERT_BETWEEN(20, 35, phaseManager.getMemo().getStats()._physPlanExplorationCount); + ASSERT_BETWEEN(13, 22, phaseManager.getMemo().getStats()._physPlanExplorationCount); // Observe we have a covered plan. The filters for subfields "b" and "c" are expressed as // residual predicates. Also observe the traverse for "a.c" is removed due to "a" being @@ -5277,6 +5338,210 @@ TEST(PhysRewriter, ConjunctionTraverseMultikey2) { optimized); } +TEST(PhysRewriter, ExplainMemoDisplayRulesForRejectedPlans) { + ABT rootNode = NodeBuilder{} + .root("root") + .filter(_evalf(_get("a", _traverse1(_cmp("Eq", "1"_cint64))), "root"_var)) + .finish(_scan("root", "c1")); + + auto prefixId = PrefixId::createForTests(); + auto phaseManager = makePhaseManager( + {OptPhase::MemoSubstitutionPhase, + OptPhase::MemoExplorationPhase, + OptPhase::MemoImplementationPhase}, + prefixId, + {{{"c1", + createScanDef({}, + {{"index1", + IndexDefinition{{{makeIndexPath("a"), CollationOp::Ascending}}, + true /*isMultiKey*/}}})}}}, + boost::none /*costModel*/, + {true /*debugMode*/, 2 /*debugLevel*/, DebugInfo::kIterationLimitForTests}); + + ABT optimized = rootNode; + phaseManager.getHints()._disableBranchAndBound = true; + phaseManager.getHints()._keepRejectedPlans = true; + phaseManager.optimize(optimized); + ASSERT_EQ(4, phaseManager.getMemo().getStats()._physPlanExplorationCount); + ASSERT_EXPLAIN_MEMO_AUTO( // NOLINT + "Memo: \n" + " groupId: 0\n" + " | | Logical properties:\n" + " | | cardinalityEstimate: \n" + " | | ce: 1000\n" + " | | projections: \n" + " | | root\n" + " | | indexingAvailability: \n" + " | | [groupId: 0, scanProjection: root, scanDefName: c1, eqPredsOnly]\n" + " | | collectionAvailability: \n" + " | | c1\n" + " | | distributionAvailability: \n" + " | | distribution: \n" + " | | type: Centralized\n" + " | logicalNodes: \n" + " | logicalNodeId: 0, rule: Root\n" + " | Scan [c1, {root}]\n" + " physicalNodes: \n" + " physicalNodeId: 0, costLimit: {Infinite cost}\n" + " Physical properties:\n" + " projections: \n" + " root\n" + " distribution: \n" + " type: Centralized\n" + " indexingRequirement: \n" + " Seek, dedupRID\n" + " repetitionEstimate: \n" + " 31.6228\n" + " removeOrphans: \n" + " false\n" + " cost: 0.0472695, localCost: 0.0472695, adjustedCE: 31.6228, rule: Seek, " + "node: \n" + " LimitSkip [limit: 1, skip: 0]\n" + " ce: 1\n" + " Seek [ridProjection: rid_0, {'': root}, c1]\n" + " ce: 1\n" + " groupId: 1\n" + " | | Logical properties:\n" + " | | cardinalityEstimate: \n" + " | | ce: 31.6228\n" + " | | requirementCEs: \n" + " | | refProjection: root, path: 'PathGet [a] PathTraverse [1] " + "PathIdentity []', ce: 31.6228\n" + " | | projections: \n" + " | | root\n" + " | | indexingAvailability: \n" + " | | [groupId: 0, scanProjection: root, scanDefName: c1, eqPredsOnly, " + "hasProperInterval]\n" + " | | collectionAvailability: \n" + " | | c1\n" + " | | distributionAvailability: \n" + " | | distribution: \n" + " | | type: Centralized\n" + " | logicalNodes: \n" + " | logicalNodeId: 0, rule: Root\n" + " | Sargable [Complete]\n" + " | | | | requirements: \n" + " | | | | {{{root, 'PathGet [a] PathTraverse [1] PathIdentity []', " + "{{{=Const [1]}}}}}}\n" + " | | | candidateIndexes: \n" + " | | | candidateId: 1, index1, {}, {SimpleEquality}, {{{=Const " + "[1]}}}\n" + " | | scanParams: \n" + " | | {'a': evalTemp_0}\n" + " | | residualReqs: \n" + " | | {{{evalTemp_0, 'PathTraverse [1] PathIdentity []', " + "{{{=Const [1]}}}, entryIndex: 0}}}\n" + " | MemoLogicalDelegator [groupId: 0]\n" + " | logicalNodeId: 1, rule: SargableSplit\n" + " | RIDIntersect [root]\n" + " | | MemoLogicalDelegator [groupId: 0]\n" + " | MemoLogicalDelegator [groupId: 3]\n" + " physicalNodes: \n" + " physicalNodeId: 0, costLimit: {Infinite cost}\n" + " Physical properties:\n" + " projections: \n" + " root\n" + " distribution: \n" + " type: Centralized\n" + " indexingRequirement: \n" + " Complete, dedupRID\n" + " removeOrphans: \n" + " false\n" + " cost: 0.0847147, localCost: 0.0106248, adjustedCE: 31.6228, rule: " + "IndexFetch, node: \n" + " NestedLoopJoin [joinType: Inner, {rid_0}]\n" + " ce: 31.6228\n" + " | | Const [true]\n" + " | MemoPhysicalDelegator [groupId: 0, index: 0]\n" + " MemoPhysicalDelegator [groupId: 3, index: 0]\n" + " rejectedPlans: \n" + " cost: 0.513676, localCost: 0.513676, adjustedCE: 1000, rule: " + "SargableToPhysicalScan, node: \n" + " Filter []\n" + " ce: 1000\n" + " | EvalFilter []\n" + " | | Variable [evalTemp_0]\n" + " | PathTraverse [1]\n" + " | PathCompare [Eq]\n" + " | Const [1]\n" + " PhysicalScan [{'': root, 'a': evalTemp_0}, c1]\n" + " ce: 1000\n" + " groupId: 2\n" + " | | Logical properties:\n" + " | | cardinalityEstimate: \n" + " | | ce: 31.6228\n" + " | | projections: \n" + " | | root\n" + " | | indexingAvailability: \n" + " | | [groupId: 0, scanProjection: root, scanDefName: c1, eqPredsOnly, " + "hasProperInterval]\n" + " | | collectionAvailability: \n" + " | | c1\n" + " | | distributionAvailability: \n" + " | | distribution: \n" + " | | type: Centralized\n" + " | logicalNodes: \n" + " | logicalNodeId: 0, rule: Root\n" + " | Root [{root}]\n" + " | MemoLogicalDelegator [groupId: 1]\n" + " physicalNodes: \n" + " physicalNodeId: 0, costLimit: {Infinite cost}\n" + " Physical properties:\n" + " distribution: \n" + " type: Centralized\n" + " indexingRequirement: \n" + " Complete, dedupRID\n" + " removeOrphans: \n" + " false\n" + " cost: 0.0847147, localCost: 0, adjustedCE: 31.6228, rule: Root, node: \n" + " Root [{root}]\n" + " ce: 31.6228\n" + " MemoPhysicalDelegator [groupId: 1, index: 0]\n" + " groupId: 3\n" + " | | Logical properties:\n" + " | | cardinalityEstimate: \n" + " | | ce: 31.6228\n" + " | | requirementCEs: \n" + " | | refProjection: root, path: 'PathGet [a] PathTraverse [1] " + "PathIdentity []', ce: 31.6228\n" + " | | projections: \n" + " | | root\n" + " | | indexingAvailability: \n" + " | | [groupId: 0, scanProjection: root, scanDefName: c1, eqPredsOnly, " + "hasProperInterval]\n" + " | | collectionAvailability: \n" + " | | c1\n" + " | | distributionAvailability: \n" + " | | distribution: \n" + " | | type: Centralized\n" + " | logicalNodes: \n" + " | logicalNodeId: 0, rule: SargableSplit\n" + " | Sargable [Index]\n" + " | | | requirements: \n" + " | | | {{{root, 'PathGet [a] PathTraverse [1] PathIdentity []', " + "{{{=Const [1]}}}}}}\n" + " | | candidateIndexes: \n" + " | | candidateId: 1, index1, {}, {SimpleEquality}, {{{=Const [1]}}}\n" + " | MemoLogicalDelegator [groupId: 0]\n" + " physicalNodes: \n" + " physicalNodeId: 0, costLimit: {Infinite cost}\n" + " Physical properties:\n" + " projections: \n" + " rid_0\n" + " distribution: \n" + " type: Centralized\n" + " indexingRequirement: \n" + " Index, dedupRID\n" + " removeOrphans: \n" + " false\n" + " cost: 0.0268205, localCost: 0.0268205, adjustedCE: 31.6228, rule: " + "SargableToIndex, node: \n" + " IndexScan [{'': rid_0}, scanDefName: c1, indexDefName: index1, " + "interval: {=Const [1]}]\n" + " ce: 31.6228\n", + phaseManager.getMemo()); +} + TEST(PhysRewriter, ExtractAllPlans) { using namespace properties; @@ -5307,7 +5572,7 @@ TEST(PhysRewriter, ExtractAllPlans) { phaseManager.getHints()._disableBranchAndBound = true; phaseManager.getHints()._keepRejectedPlans = true; auto plans = phaseManager.optimizeNoAssert(std::move(optimized), true /*includeRejected*/); - ASSERT_EQ(22, plans.size()); + ASSERT_EQ(6, plans.size()); // Sort plans by estimated cost. If costs are equal, sort lexicographically by plan explain. // This allows us to break ties if costs are equal. @@ -5393,5 +5658,140 @@ TEST(PhysRewriter, ExtractAllPlans) { "| minKey], Const [1 | maxKey]]}]\n", getExplainForPlan(2)); } + +TEST(PhysRewriter, RemoveOrphansEnforcer) { + // Hypothetical MQL which could generate this ABT: {$match: {a: 1}} + ABT rootNode = NodeBuilder{} + .root("root") + .filter(_evalf(_get("a", _traverse1(_cmp("Eq", "1"_cint64))), "root"_var)) + .finish(_scan("root", "c1")); + + auto prefixId = PrefixId::createForTests(); + + auto scanDef = + createScanDef(ScanDefOptions{}, + IndexDefinitions{}, + MultikeynessTrie{}, + ConstEval::constFold, + // Sharded on {a: 1, b:1} + DistributionAndPaths{DistributionType::RangePartitioning, + ABTVector{make("a", make()), + make("b", make())}}, + true /*exists*/, + boost::none /*ce*/, + ShardingMetadata{.mayContainOrphans = true}); + + auto phaseManager = makePhaseManager( + {OptPhase::MemoSubstitutionPhase, + OptPhase::MemoExplorationPhase, + OptPhase::MemoImplementationPhase}, + prefixId, + {{{"c1", scanDef}}}, + boost::none /*costModel*/, + {true /*debugMode*/, 2 /*debugLevel*/, DebugInfo::kIterationLimitForTests}); + + ABT optimized = rootNode; + phaseManager.optimize(optimized); + + // Note new evaluation nodes for fields of the shard key and the filter node to perform the + // shard filtering. + ASSERT_EXPLAIN_V2_AUTO( + "Root [{root}]\n" + "Filter []\n" + "| FunctionCall [shardFilter]\n" + "| | Variable [shardKey_1]\n" + "| Variable [shardKey_0]\n" + "Evaluation [{shardKey_1}]\n" + "| EvalPath []\n" + "| | Variable [root]\n" + "| PathGet [b]\n" + "| PathIdentity []\n" + "Evaluation [{shardKey_0}]\n" + "| EvalPath []\n" + "| | Variable [root]\n" + "| PathGet [a]\n" + "| PathIdentity []\n" + "Filter []\n" + "| EvalFilter []\n" + "| | Variable [evalTemp_0]\n" + "| PathTraverse [1]\n" + "| PathCompare [Eq]\n" + "| Const [1]\n" + "PhysicalScan [{'': root, 'a': evalTemp_0}, c1]\n", + optimized); +} + +TEST(PhysRewriter, RemoveOrphansEnforcerMultipleCollections) { + // Hypothetical MQL which could generate this ABT: + // db.c1.aggregate([{$unionWith: {coll: "c2", pipeline: [{$match: {}}]}}]) + ABT rootNode = NodeBuilder{} + .root("root") + .un(ProjectionNameVector{"root"}, + {NodeHolder{NodeBuilder{}.finish(_scan("root", "c2"))}}) + .finish(_scan("root", "c1")); + + auto prefixId = PrefixId::createForTests(); + + auto scanDef1 = + createScanDef(ScanDefOptions{}, + IndexDefinitions{}, + MultikeynessTrie{}, + ConstEval::constFold, + // Sharded on {a: 1} + DistributionAndPaths{DistributionType::RangePartitioning, + ABTVector{make("a", make())}}, + true /*exists*/, + boost::none /*ce*/, + ShardingMetadata{.mayContainOrphans = true}); + + auto scanDef2 = + createScanDef(ScanDefOptions{}, + IndexDefinitions{}, + MultikeynessTrie{}, + ConstEval::constFold, + // Sharded on {b: 1} + DistributionAndPaths{DistributionType::RangePartitioning, + ABTVector{make("b", make())}}, + true /*exists*/, + boost::none /*ce*/, + ShardingMetadata{.mayContainOrphans = true}); + + auto phaseManager = makePhaseManager( + {OptPhase::MemoSubstitutionPhase, + OptPhase::MemoExplorationPhase, + OptPhase::MemoImplementationPhase}, + prefixId, + {{{"c1", scanDef1}, {"c2", scanDef2}}}, + boost::none /*costModel*/, + {true /*debugMode*/, 2 /*debugLevel*/, DebugInfo::kIterationLimitForTests}); + + ABT optimized = rootNode; + phaseManager.optimize(optimized); + + // Note the evaluation node to project the shard key and filter node to perform shard filtering. + ASSERT_EXPLAIN_V2_AUTO( + "Root [{root}]\n" + "Union [{root}]\n" + "| Filter []\n" + "| | FunctionCall [shardFilter]\n" + "| | Variable [shardKey_1]\n" + "| Evaluation [{shardKey_1}]\n" + "| | EvalPath []\n" + "| | | Variable [root]\n" + "| | PathGet [b]\n" + "| | PathIdentity []\n" + "| PhysicalScan [{'': root}, c2]\n" + "Filter []\n" + "| FunctionCall [shardFilter]\n" + "| Variable [shardKey_0]\n" + "Evaluation [{shardKey_0}]\n" + "| EvalPath []\n" + "| | Variable [root]\n" + "| PathGet [a]\n" + "| PathIdentity []\n" + "PhysicalScan [{'': root}, c1]\n", + optimized); +} + } // namespace } // namespace mongo::optimizer diff --git a/src/mongo/db/query/optimizer/physical_rewriter_parallelism_test.cpp b/src/mongo/db/query/optimizer/physical_rewriter_parallelism_test.cpp index 39490f65e235e..b5d2651501eb8 100644 --- a/src/mongo/db/query/optimizer/physical_rewriter_parallelism_test.cpp +++ b/src/mongo/db/query/optimizer/physical_rewriter_parallelism_test.cpp @@ -27,14 +27,35 @@ * it in the license file. */ +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/query/ce/hinted_estimator.h" +#include "mongo/db/query/cost_model/cost_model_gen.h" +#include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/metadata_factory.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/unit_test_abt_literals.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer { @@ -432,6 +453,8 @@ TEST(PhysRewriterParallel, LocalLimitSkip) { "| type: Centralized\n" "| indexingRequirement: \n" "| Complete, dedupRID\n" + "| removeOrphans: \n" + "| false\n" "LimitSkip [limit: 20, skip: 10]\n" "Properties [cost: 0.00676997, localCost: 0.003001, adjustedCE: 30]\n" "| | Logical:\n" @@ -453,7 +476,10 @@ TEST(PhysRewriterParallel, LocalLimitSkip) { "| type: Centralized\n" "| indexingRequirement: \n" "| Complete, dedupRID\n" - "| limitEstimate: 30\n" + "| limitEstimate: \n" + "| 30\n" + "| removeOrphans: \n" + "| false\n" "Exchange []\n" "| | distribution: \n" "| | type: Centralized\n" @@ -477,7 +503,10 @@ TEST(PhysRewriterParallel, LocalLimitSkip) { "| type: UnknownPartitioning, disableExchanges\n" "| indexingRequirement: \n" "| Complete, dedupRID\n" - "| limitEstimate: 30\n" + "| limitEstimate: \n" + "| 30\n" + "| removeOrphans: \n" + "| false\n" "PhysicalScan [{'': root}, c1, parallel]\n", phaseManager); } diff --git a/src/mongo/db/query/optimizer/props.cpp b/src/mongo/db/query/optimizer/props.cpp index d5b6ea479e806..25493b9ab0e75 100644 --- a/src/mongo/db/query/optimizer/props.cpp +++ b/src/mongo/db/query/optimizer/props.cpp @@ -28,6 +28,13 @@ */ #include "mongo/db/query/optimizer/props.h" + +#include +#include + +#include + +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/util/assert_util.h" @@ -244,6 +251,20 @@ CEType LimitEstimate::getEstimate() const { return _estimate; } +RemoveOrphansRequirement::RemoveOrphansRequirement(bool mustRemove) : _mustRemove(mustRemove) {} + +bool RemoveOrphansRequirement::operator==(const RemoveOrphansRequirement& other) const { + return _mustRemove == other._mustRemove; +} + +ProjectionNameSet RemoveOrphansRequirement::getAffectedProjectionNames() const { + return {}; +} + +bool RemoveOrphansRequirement::mustRemove() const { + return _mustRemove; +} + ProjectionAvailability::ProjectionAvailability(ProjectionNameSet projections) : _projections(std::move(projections)) {} diff --git a/src/mongo/db/query/optimizer/props.h b/src/mongo/db/query/optimizer/props.h index cdb906084e96a..5c94833709a67 100644 --- a/src/mongo/db/query/optimizer/props.h +++ b/src/mongo/db/query/optimizer/props.h @@ -29,13 +29,21 @@ #pragma once +#include +#include +#include +#include #include #include +#include +#include #include #include "mongo/db/query/optimizer/algebra/operator.h" #include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/containers.h" #include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/index_bounds.h" #include "mongo/db/query/optimizer/metadata.h" #include "mongo/util/assert_util.h" @@ -71,6 +79,7 @@ class DistributionRequirement; class IndexingRequirement; class RepetitionEstimate; class LimitEstimate; +class RemoveOrphansRequirement; using LogicalProperty = algebra::PolyValue; + LimitEstimate, + RemoveOrphansRequirement>; using LogicalProps = opt::unordered_map; using PhysProps = opt::unordered_map; @@ -352,6 +362,29 @@ class LimitEstimate final : public PhysPropertyTag { CEType _estimate; }; +/** + * A physical property that specifies that we must filter out data corresponding to orphaned + * documents from a particular collection. This property is only meaningful for groups which have + * IndexingAvailability. For every group with IndexingAvailability, we add a + * RemoveOrphansRequirement. This allows us to distinguish between RemoveOrphansRequirement{false}, + * which means do not perform shard filtering, and the lack of a RemoveOrphansRequirement, which + * implies that we are in a group without IndexingAvailability and thus don't have access to the + * scan projection (e.g. above a GroupByNode). + */ +class RemoveOrphansRequirement final : public PhysPropertyTag { +public: + RemoveOrphansRequirement(bool mustRemove); + + bool operator==(const RemoveOrphansRequirement& other) const; + + ProjectionNameSet getAffectedProjectionNames() const; + + bool mustRemove() const; + +private: + bool _mustRemove; +}; + /** * A logical property which specifies available projections for a given ABT tree. */ diff --git a/src/mongo/db/query/optimizer/reference_tracker.cpp b/src/mongo/db/query/optimizer/reference_tracker.cpp index 79c2d7bda357a..f9f81b4a956e3 100644 --- a/src/mongo/db/query/optimizer/reference_tracker.cpp +++ b/src/mongo/db/query/optimizer/reference_tracker.cpp @@ -27,9 +27,21 @@ * it in the license file. */ -#include +#include +#include +#include +#include +#include +#include + +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::optimizer { @@ -479,10 +491,10 @@ struct Collector { return result; } - template - CollectedInfo handleRIDNodeReferences(const T& node, - CollectedInfo leftChildResult, - CollectedInfo rightChildResult) { + CollectedInfo transport(const ABT& n, + const RIDIntersectNode& node, + CollectedInfo leftChildResult, + CollectedInfo rightChildResult) { CollectedInfo result{}; // This is a special case where both children of 'node' have a definition for the scan @@ -498,21 +510,29 @@ struct Collector { return result; } - CollectedInfo transport(const ABT& n, - const RIDIntersectNode& node, - CollectedInfo leftChildResult, - CollectedInfo rightChildResult) { - return handleRIDNodeReferences( - node, std::move(leftChildResult), std::move(rightChildResult)); - } - CollectedInfo transport(const ABT& n, const RIDUnionNode& node, CollectedInfo leftChildResult, CollectedInfo rightChildResult) { - // TODO SERVER-69026 should determine how the reference tracker for RIDUnionNode will work. - return handleRIDNodeReferences( - node, std::move(leftChildResult), std::move(rightChildResult)); + CollectedInfo result{}; + + // For simplicity, don't provide any projections. In principle we could preserve any + // projections that are common to both the left and right children, but for now we don't + // need this because we don't support covered index union plans. + // + // The only projection we preserve is the scanDef projection name. + const auto& scanProjName = node.getScanProjectionName(); + auto preservedDef = std::move(leftChildResult.defs.at(scanProjName)); + rightChildResult.defs.clear(); + leftChildResult.defs.clear(); + leftChildResult.defs.emplace(scanProjName, std::move(preservedDef)); + + result.merge(std::move(leftChildResult)); + result.merge(std::move(rightChildResult)); + + result.nodeDefs[&node] = result.defs; + + return result; } template diff --git a/src/mongo/db/query/optimizer/reference_tracker.h b/src/mongo/db/query/optimizer/reference_tracker.h index c11ddce41f484..d917b5841f4c5 100644 --- a/src/mongo/db/query/optimizer/reference_tracker.h +++ b/src/mongo/db/query/optimizer/reference_tracker.h @@ -29,8 +29,16 @@ #pragma once +#include +#include +#include + +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/optimizer/cascades/memo_group_binder_interface.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/reference_tracker_test.cpp b/src/mongo/db/query/optimizer/reference_tracker_test.cpp index 6ce0c38516536..6bcd9882ab2f6 100644 --- a/src/mongo/db/query/optimizer/reference_tracker_test.cpp +++ b/src/mongo/db/query/optimizer/reference_tracker_test.cpp @@ -28,8 +28,23 @@ */ #include "mongo/db/query/optimizer/reference_tracker.h" + +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/db/query/optimizer/rewrites/const_eval.cpp b/src/mongo/db/query/optimizer/rewrites/const_eval.cpp index 2aa4e511a8353..8bdce935d2b33 100644 --- a/src/mongo/db/query/optimizer/rewrites/const_eval.cpp +++ b/src/mongo/db/query/optimizer/rewrites/const_eval.cpp @@ -28,8 +28,22 @@ */ #include "mongo/db/query/optimizer/rewrites/const_eval.h" + +#include +#include +#include +#include +#include +#include + +#include + #include "mongo/db/exec/sbe/values/arith_common.h" -#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { bool ConstEval::optimize(ABT& n) { diff --git a/src/mongo/db/query/optimizer/rewrites/const_eval.h b/src/mongo/db/query/optimizer/rewrites/const_eval.h index ddec3328eb14b..5589fa21cac2e 100644 --- a/src/mongo/db/query/optimizer/rewrites/const_eval.h +++ b/src/mongo/db/query/optimizer/rewrites/const_eval.h @@ -29,7 +29,20 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/abt_hash.h" diff --git a/src/mongo/db/query/optimizer/rewrites/path.cpp b/src/mongo/db/query/optimizer/rewrites/path.cpp index af950a753581c..8661f7259cea8 100644 --- a/src/mongo/db/query/optimizer/rewrites/path.cpp +++ b/src/mongo/db/query/optimizer/rewrites/path.cpp @@ -28,7 +28,24 @@ */ #include "mongo/db/query/optimizer/rewrites/path.h" + +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/utils/path_utils.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/rewrites/path.h b/src/mongo/db/query/optimizer/rewrites/path.h index de1d483535199..2d55658f8a03f 100644 --- a/src/mongo/db/query/optimizer/rewrites/path.h +++ b/src/mongo/db/query/optimizer/rewrites/path.h @@ -29,7 +29,13 @@ #pragma once +#include + +#include "mongo/db/query/optimizer/containers.h" #include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/rewrites/path_lower.cpp b/src/mongo/db/query/optimizer/rewrites/path_lower.cpp index cf852899335ab..35628e9f1365e 100644 --- a/src/mongo/db/query/optimizer/rewrites/path_lower.cpp +++ b/src/mongo/db/query/optimizer/rewrites/path_lower.cpp @@ -29,6 +29,19 @@ #include "mongo/db/query/optimizer/rewrites/path_lower.h" +#include +#include + +#include + +#include "mongo/base/parse_number.h" +#include "mongo/base/status.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/util/assert_util.h" + namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/rewrites/path_lower.h b/src/mongo/db/query/optimizer/rewrites/path_lower.h index 6d156f60cbf64..d4c9f19b6a23c 100644 --- a/src/mongo/db/query/optimizer/rewrites/path_lower.h +++ b/src/mongo/db/query/optimizer/rewrites/path_lower.h @@ -29,7 +29,13 @@ #pragma once +#include + +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/utils.h" diff --git a/src/mongo/db/query/optimizer/rewrites/path_lower_bm.cpp b/src/mongo/db/query/optimizer/rewrites/path_lower_bm.cpp index b752a557dd790..b85d94596dfdb 100644 --- a/src/mongo/db/query/optimizer/rewrites/path_lower_bm.cpp +++ b/src/mongo/db/query/optimizer/rewrites/path_lower_bm.cpp @@ -27,14 +27,23 @@ * it in the license file. */ -#include "mongo/bson/util/builder_fwd.h" -#include "mongo/db/query/optimizer/rewrites/path_lower.h" #include +#include +#include +#include + +#include +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/rewrites/path_lower.h" #include "mongo/db/query/optimizer/syntax/expr.h" #include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/syntax/syntax.h" -#include +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/util/str.h" namespace mongo { namespace optimizer { diff --git a/src/mongo/db/query/optimizer/rewrites/path_optimizer_test.cpp b/src/mongo/db/query/optimizer/rewrites/path_optimizer_test.cpp index 12f5adc93c91c..4b47fe230d107 100644 --- a/src/mongo/db/query/optimizer/rewrites/path_optimizer_test.cpp +++ b/src/mongo/db/query/optimizer/rewrites/path_optimizer_test.cpp @@ -27,13 +27,30 @@ * it in the license file. */ -#include "mongo/db/query/optimizer/explain.h" +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/reference_tracker.h" #include "mongo/db/query/optimizer/rewrites/const_eval.h" #include "mongo/db/query/optimizer/rewrites/path.h" #include "mongo/db/query/optimizer/rewrites/path_lower.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/unit_test_abt_literals.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::optimizer { namespace { diff --git a/src/mongo/db/query/optimizer/syntax/expr.cpp b/src/mongo/db/query/optimizer/syntax/expr.cpp index 8eb566a95d455..3ef88b136b8b9 100644 --- a/src/mongo/db/query/optimizer/syntax/expr.cpp +++ b/src/mongo/db/query/optimizer/syntax/expr.cpp @@ -28,8 +28,12 @@ */ #include "mongo/db/query/optimizer/syntax/expr.h" + +#include + #include "mongo/db/exec/sbe/values/value.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/storage/key_string.h" #include "mongo/platform/decimal128.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/syntax/expr.h b/src/mongo/db/query/optimizer/syntax/expr.h index 8d4509308103a..8fa41f0622fef 100644 --- a/src/mongo/db/query/optimizer/syntax/expr.h +++ b/src/mongo/db/query/optimizer/syntax/expr.h @@ -29,11 +29,23 @@ #pragma once -#include "mongo/platform/decimal128.h" +#include +#include +#include #include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/unit_test_infra_test.cpp b/src/mongo/db/query/optimizer/unit_test_infra_test.cpp index de24e04da10ba..6d4a3d568385e 100644 --- a/src/mongo/db/query/optimizer/unit_test_infra_test.cpp +++ b/src/mongo/db/query/optimizer/unit_test_infra_test.cpp @@ -27,9 +27,22 @@ * it in the license file. */ +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/unit_test_abt_literals.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/inline_auto_update.h" +#include "mongo/util/str.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/utils/abt_compare.cpp b/src/mongo/db/query/optimizer/utils/abt_compare.cpp index c321add3efa0f..1935da01fd44f 100644 --- a/src/mongo/db/query/optimizer/utils/abt_compare.cpp +++ b/src/mongo/db/query/optimizer/utils/abt_compare.cpp @@ -29,6 +29,21 @@ #include "mongo/db/query/optimizer/utils/abt_compare.h" +#include +#include +#include +#include + +#include + +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/util/assert_util.h" + namespace mongo::optimizer { @@ -411,9 +426,14 @@ class PSRExprComparator { private: int compare(const PSRExpr::Atom& node, const PSRExpr::Atom& other) { - auto isSorted = - PartialSchemaKeyLessComparator{}(node.getExpr().first, other.getExpr().first); - return isSorted ? -1 : 1; + const auto& [key1, req1] = node.getExpr(); + const auto& [key2, req2] = other.getExpr(); + + int keyCmp = PartialSchemaKeyComparator::Cmp3W{}(key1, key2); + if (keyCmp != 0) { + return keyCmp; + } + return PartialSchemaRequirementComparator::Cmp3W{}(req1, req2); } int compare(const PSRExpr::Conjunction& node, const PSRExpr::Conjunction& other) { diff --git a/src/mongo/db/query/optimizer/utils/abt_compare.h b/src/mongo/db/query/optimizer/utils/abt_compare.h index 5f65a794a8b19..d5e72f8695cd5 100644 --- a/src/mongo/db/query/optimizer/utils/abt_compare.h +++ b/src/mongo/db/query/optimizer/utils/abt_compare.h @@ -30,7 +30,9 @@ #pragma once #include "mongo/db/query/optimizer/index_bounds.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/partial_schema_requirements.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/utils/abt_hash.cpp b/src/mongo/db/query/optimizer/utils/abt_hash.cpp index 2f00111cf1b2d..99ce88d799fd9 100644 --- a/src/mongo/db/query/optimizer/utils/abt_hash.cpp +++ b/src/mongo/db/query/optimizer/utils/abt_hash.cpp @@ -29,8 +29,30 @@ #include "mongo/db/query/optimizer/utils/abt_hash.h" -#include "mongo/db/query/optimizer/node.h" +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/partial_schema_requirements.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { @@ -462,6 +484,11 @@ class PhysPropsHasher { return computeHashSeq<7>(CEType::Hasher()(prop.getEstimate())); } + size_t operator()(const properties::PhysProperty&, + const properties::RemoveOrphansRequirement& prop) { + return computeHashSeq<8>(std::hash()(prop.mustRemove())); + } + static size_t computeHash(const properties::PhysProps& props) { PhysPropsHasher visitor; size_t result = 17; diff --git a/src/mongo/db/query/optimizer/utils/abt_hash.h b/src/mongo/db/query/optimizer/utils/abt_hash.h index 1f097bed3a1a6..e51b99e1b5724 100644 --- a/src/mongo/db/query/optimizer/utils/abt_hash.h +++ b/src/mongo/db/query/optimizer/utils/abt_hash.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/db/query/optimizer/props.h" #include "mongo/db/query/optimizer/syntax/syntax.h" diff --git a/src/mongo/db/query/optimizer/utils/ce_math.cpp b/src/mongo/db/query/optimizer/utils/ce_math.cpp index 7cdf765684d82..9aa24e6ad0e33 100644 --- a/src/mongo/db/query/optimizer/utils/ce_math.cpp +++ b/src/mongo/db/query/optimizer/utils/ce_math.cpp @@ -28,8 +28,10 @@ */ #include // std::sort -#include // std::pow #include // std::greater +#include +#include +#include #include "mongo/db/query/optimizer/utils/ce_math.h" #include "mongo/util/assert_util.h" @@ -66,6 +68,10 @@ template , std::greater>> SelectivityType expBackoffInternal(std::vector sels) { + if (sels.size() == 1) { + return sels[0]; + } + const size_t actualMaxBackoffElements = std::min(sels.size(), kMaxBackoffElements); std::partial_sort( sels.begin(), sels.begin() + actualMaxBackoffElements, sels.end(), Comparator()); diff --git a/src/mongo/db/query/optimizer/utils/ce_math.h b/src/mongo/db/query/optimizer/utils/ce_math.h index a920b22e38c7b..7843a55138025 100644 --- a/src/mongo/db/query/optimizer/utils/ce_math.h +++ b/src/mongo/db/query/optimizer/utils/ce_math.h @@ -29,10 +29,12 @@ #pragma once +#include #include #include #include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" namespace mongo::optimizer::ce { diff --git a/src/mongo/db/query/optimizer/utils/interval_utils.cpp b/src/mongo/db/query/optimizer/utils/interval_utils.cpp index 8dbfa23bbe45c..eb0c5f4500cba 100644 --- a/src/mongo/db/query/optimizer/utils/interval_utils.cpp +++ b/src/mongo/db/query/optimizer/utils/interval_utils.cpp @@ -29,9 +29,27 @@ #include "mongo/db/query/optimizer/utils/interval_utils.h" -#include "mongo/db/exec/sbe/values/value.h" -#include "mongo/db/query/optimizer/node.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/abt_compare.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { @@ -57,14 +75,8 @@ ABT maxABT(const ABT& v1, const ABT& v2) { }; void constFoldInterval(IntervalRequirement& interval, const ConstFoldFn& constFold) { - ABT low = interval.getLowBound().getBound(); - ABT high = interval.getHighBound().getBound(); - constFold(low); - constFold(high); - interval = IntervalRequirement{ - BoundRequirement{interval.getLowBound().isInclusive(), std::move(low)}, - BoundRequirement{interval.getHighBound().isInclusive(), std::move(high)}, - }; + constFold(interval.getLowBound().getBound()); + constFold(interval.getHighBound().getBound()); } // Returns true if the interval can be proven to be empty. If no conclusion can be made, or the @@ -776,56 +788,6 @@ void normalizeIntervals(IntervalReqExpr::Node& intervals) { IntervalNormalizer{}.normalize(intervals); } -boost::optional coerceIntervalToPathCompareEqMember(const IntervalReqExpr::Node& interval) { - // Create the array that EqMember will use to hold the members. - auto [eqMembersTag, eqMembersVal] = sbe::value::makeNewArray(); - sbe::value::ValueGuard guard{eqMembersTag, eqMembersVal}; - auto eqMembersArray = sbe::value::getArrayView(eqMembersVal); - - // An EqMember is a disjunction of conjunctions of atoms (point intervals). For example [1, 1] U - // [2, 2] U [3, 3] However each conjunction should only have one atom child, so we can think of - // it as a disjunction of point intervals instead. - if (const auto disj = interval.cast()) { - // We only make an EqMember if we have 2 or more comparisons. - if (disj->nodes().size() < 2) { - return boost::none; - } - - for (const auto& child : disj->nodes()) { - if (!child.is()) { - return boost::none; - } - - // Check that the conjunction has one atom child. - const auto conjChild = child.cast(); - if (conjChild->nodes().size() != 1 || - !conjChild->nodes().front().is()) { - return boost::none; - } - - // Check that the atom is a point interval, and the bound is a constant. - const auto atomChild = conjChild->nodes().front().cast(); - if (!atomChild->getExpr().isEquality() || - !atomChild->getExpr().getLowBound().getBound().is()) { - return boost::none; - } - - const auto constAtomChildPair = - atomChild->getExpr().getLowBound().getBound().cast()->get(); - - // Make a copy of the point bound, insert it into our EqMember members. - const auto newEqMember = copyValue(constAtomChildPair.first, constAtomChildPair.second); - eqMembersArray->push_back(newEqMember.first, newEqMember.second); - } - - // If we got to this point, we have successfully coerced the interval into an EqMember! - // Reset the guard so the members array doesn't get deleted. - guard.reset(); - return make(Operations::EqMember, make(eqMembersTag, eqMembersVal)); - } - return boost::none; -} - bool isSimpleRange(const CompoundIntervalReqExpr::Node& interval) { if (const auto singularInterval = CompoundIntervalReqExpr::getSingularDNF(interval); singularInterval && !singularInterval->isEquality()) { diff --git a/src/mongo/db/query/optimizer/utils/interval_utils.h b/src/mongo/db/query/optimizer/utils/interval_utils.h index 79ed0ac218fe3..a2ea679aba7dd 100644 --- a/src/mongo/db/query/optimizer/utils/interval_utils.h +++ b/src/mongo/db/query/optimizer/utils/interval_utils.h @@ -29,8 +29,11 @@ #pragma once -#include "mongo/db/query/optimizer/index_bounds.h" +#include +#include +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/utils/const_fold_interface.h" #include "mongo/db/query/optimizer/utils/utils.h" namespace mongo::optimizer { @@ -123,12 +126,6 @@ void padCompoundIntervalsDNF(CompoundIntervalReqExpr::Node& targetIntervals, boo */ void normalizeIntervals(IntervalReqExpr::Node& intervals); -/** - * Analyze the given interval, and convert it into a PathCompare EqMember if possible. - */ -boost::optional coerceIntervalToPathCompareEqMember(const IntervalReqExpr::Node& interval); - - /** * Returns true if the interval corresponds to a simple range (e.g >10 as opposed to a point * equality or complex boolean expression of intervals). diff --git a/src/mongo/db/query/optimizer/utils/memo_utils.cpp b/src/mongo/db/query/optimizer/utils/memo_utils.cpp index 1aa3a9603678e..f634c33f45761 100644 --- a/src/mongo/db/query/optimizer/utils/memo_utils.cpp +++ b/src/mongo/db/query/optimizer/utils/memo_utils.cpp @@ -29,7 +29,30 @@ #include "mongo/db/query/optimizer/utils/memo_utils.h" +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include + +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/cascades/memo_defs.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { @@ -273,6 +296,30 @@ class MemoPhysicalPlanExtractor { return result; } + PlanExtractorResult operator()(const ABT& n, + const EvaluationNode& node, + const bool isGroupRoot, + ProjectionNameOrderPreservingSet required) { + ProjectionNameOrderPreservingSet requiredForChild = required; + requiredForChild.erase(node.getProjectionName()); + auto env = VariableEnvironment::build(node.getProjection()); + for (const auto& proj : env.freeVariableNames()) { + requiredForChild.emplace_back(proj); + } + + auto result = + node.getChild().visit(*this, false /*isGroupRoot*/, std::move(requiredForChild)); + + PlanAndProps initial = createInitial(isGroupRoot, n, std::move(required)); + for (size_t index = 0; index < result.size(); index++) { + auto& entry = result.at(index); + PlanAndProps merged = moveOrCopy(initial, index == result.size() - 1); + mergeNodeAndProps(_includeRejected, merged, entry, true /*canMove*/); + std::swap(entry, merged); + } + return result; + } + template PlanExtractorResult operator()(const ABT& n, const T& node, @@ -381,6 +428,11 @@ class MemoPhysicalPlanExtractor { const auto& nodeInfo = (altIndex == 0) ? *result._nodeInfo : result._rejectedNodeInfo.at(altIndex - 1); const ABT& node = nodeInfo._node; + if (nodeInfo._cost.isInfinite()) { + // Skip the node with infinity cost as that indicates the failure of physical plan + // optimization. + continue; + } MemoPhysicalPlanExtractor instance(memo, metadata, diff --git a/src/mongo/db/query/optimizer/utils/memo_utils.h b/src/mongo/db/query/optimizer/utils/memo_utils.h index d7a942fb776d9..8d800bf8652b9 100644 --- a/src/mongo/db/query/optimizer/utils/memo_utils.h +++ b/src/mongo/db/query/optimizer/utils/memo_utils.h @@ -29,9 +29,14 @@ #pragma once +#include + #include "mongo/db/query/optimizer/cascades/memo.h" +#include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/node_defs.h" +#include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/utils.h" diff --git a/src/mongo/db/query/optimizer/utils/path_utils.cpp b/src/mongo/db/query/optimizer/utils/path_utils.cpp index bc1cfcfd3294d..a0bfda08c1196 100644 --- a/src/mongo/db/query/optimizer/utils/path_utils.cpp +++ b/src/mongo/db/query/optimizer/utils/path_utils.cpp @@ -29,6 +29,20 @@ #include "mongo/db/query/optimizer/utils/path_utils.h" +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/util/assert_util.h" + namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/utils/path_utils.h b/src/mongo/db/query/optimizer/utils/path_utils.h index 17e9eefe05e65..d544a0adebfe8 100644 --- a/src/mongo/db/query/optimizer/utils/path_utils.h +++ b/src/mongo/db/query/optimizer/utils/path_utils.h @@ -29,8 +29,21 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/optimizer/defs.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer { @@ -79,8 +92,6 @@ std::vector collectComposedBounded(const ABT& n, size_t max * * If the number of compositions exceeds "maxDepth" then we return the a single FilterNode * consisting of an EvalFilter over the original path and input. - * - * TODO: SERVER-73744. Consolidate usages in a new optimizer phase. */ constexpr size_t kMaxPathConjunctionDecomposition = 20; boost::optional decomposeToFilterNodes(const ABT& input, diff --git a/src/mongo/db/query/optimizer/utils/reftracker_utils.cpp b/src/mongo/db/query/optimizer/utils/reftracker_utils.cpp index 436e666d2f423..b9500b6edd68e 100644 --- a/src/mongo/db/query/optimizer/utils/reftracker_utils.cpp +++ b/src/mongo/db/query/optimizer/utils/reftracker_utils.cpp @@ -29,7 +29,13 @@ #include "mongo/db/query/optimizer/utils/reftracker_utils.h" +#include + +#include + +#include "mongo/db/query/optimizer/algebra/operator.h" #include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/syntax/expr.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/utils/reftracker_utils.h b/src/mongo/db/query/optimizer/utils/reftracker_utils.h index 6cf7746a99ac6..3522535eeac40 100644 --- a/src/mongo/db/query/optimizer/utils/reftracker_utils.h +++ b/src/mongo/db/query/optimizer/utils/reftracker_utils.h @@ -29,7 +29,9 @@ #pragma once -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/syntax.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/utils/strong_alias.h b/src/mongo/db/query/optimizer/utils/strong_alias.h index a7413754c8883..b083d64556f91 100644 --- a/src/mongo/db/query/optimizer/utils/strong_alias.h +++ b/src/mongo/db/query/optimizer/utils/strong_alias.h @@ -29,8 +29,9 @@ #pragma once -#include "mongo/util/assert_util.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/str.h" +#include namespace mongo::optimizer { @@ -112,14 +113,11 @@ StreamType& operator<<(StreamType& stream, const StrongStringAlias& t) return stream << t.value(); } - /** * Strong double alias. Used for cardinality estimation and selectivity. The tag type is expected to * have a boolean field "kUnitless". It specifies if this entity is unitless (e.g. a simple ratio, a * percent) vs having units (e.g. documents). This effectively enables or disables multiplication * and division by the same alias type. - * - * TODO: SERVER-71801: Validation for strong double alias. */ template struct StrongDoubleAlias { @@ -133,6 +131,17 @@ struct StrongDoubleAlias { return _value; } + constexpr void assertValid() const { + uassert(7180104, "Invalid value", _value >= TagType::kMinValue); + uassert(7180105, "Invalid value", _value <= TagType::kMaxValue); + } + + constexpr StrongDoubleAlias(const double value) : _value(value) { + assertValid(); + } + + constexpr StrongDoubleAlias() = default; + constexpr bool operator==(const StrongDoubleAlias other) const { return _value == other._value; } diff --git a/src/mongo/db/query/optimizer/utils/unit_test_pipeline_utils.cpp b/src/mongo/db/query/optimizer/utils/unit_test_pipeline_utils.cpp index 8f0c9adfff829..ebe8c38b6cd96 100644 --- a/src/mongo/db/query/optimizer/utils/unit_test_pipeline_utils.cpp +++ b/src/mongo/db/query/optimizer/utils/unit_test_pipeline_utils.cpp @@ -29,12 +29,38 @@ #include "mongo/db/query/optimizer/utils/unit_test_pipeline_utils.h" +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/db/client.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/abt/document_source_visitor.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/cost_model/cost_model_gen.h" #include "mongo/db/query/optimizer/explain.h" -#include "mongo/db/query/optimizer/rewrites/const_eval.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/utils/strong_alias.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/temp_dir.h" +#include "mongo/util/intrusive_counter.h" namespace mongo::optimizer { @@ -84,7 +110,10 @@ ABT translatePipeline(const Metadata& metadata, const std::vector& involvedNss) { auto opCtx = cc().makeOperationContext(); auto pipeline = - parsePipeline(NamespaceString("a." + scanDefName), pipelineStr, *opCtx, involvedNss); + parsePipeline(NamespaceString::createNamespaceString_forTest("a." + scanDefName), + pipelineStr, + *opCtx, + involvedNss); return translatePipelineToABT(metadata, *pipeline.get(), scanProjName, @@ -186,7 +215,12 @@ void serializeMetadata(std::ostream& stream, Metadata metadata) { } stream << "\t\t\tcollection exists: " << scanDef.exists() << std::endl; - stream << "\t\t\tCE type: " << scanDef.getCE() << std::endl; + stream << "\t\t\tCE type: "; + if (const auto& ce = scanDef.getCE()) { + stream << *ce << std::endl; + } else { + stream << "(empty)" << std::endl; + } } } diff --git a/src/mongo/db/query/optimizer/utils/unit_test_pipeline_utils.h b/src/mongo/db/query/optimizer/utils/unit_test_pipeline_utils.h index 84ac6f5cdc530..9894d5674564d 100644 --- a/src/mongo/db/query/optimizer/utils/unit_test_pipeline_utils.h +++ b/src/mongo/db/query/optimizer/utils/unit_test_pipeline_utils.h @@ -29,14 +29,25 @@ #pragma once -#include "mongo/db/operation_context_noop.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/optimizer/containers.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/metadata_factory.h" #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/unit_test_utils.h" +#include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/unittest/golden_test.h" +#include "mongo/unittest/golden_test_base.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/utils/unit_test_utils.cpp b/src/mongo/db/query/optimizer/utils/unit_test_utils.cpp index 9d8519a021d99..0f2d48306adda 100644 --- a/src/mongo/db/query/optimizer/utils/unit_test_utils.cpp +++ b/src/mongo/db/query/optimizer/utils/unit_test_utils.cpp @@ -29,19 +29,26 @@ #include "mongo/db/query/optimizer/utils/unit_test_utils.h" -#include +#include +#include +#include // IWYU pragma: keep +#include +#include + +#include #include "mongo/db/pipeline/abt/utils.h" #include "mongo/db/query/ce/heuristic_estimator.h" #include "mongo/db/query/ce/hinted_estimator.h" #include "mongo/db/query/cost_model/cost_estimator_impl.h" #include "mongo/db/query/cost_model/cost_model_manager.h" +#include "mongo/db/query/optimizer/cascades/memo.h" #include "mongo/db/query/optimizer/explain.h" #include "mongo/db/query/optimizer/metadata.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/rewrites/const_eval.h" -#include "mongo/unittest/framework.h" -#include "mongo/util/str_escape.h" +#include "mongo/db/query/optimizer/syntax/path.h" +#include "mongo/db/query/optimizer/utils/const_fold_interface.h" namespace mongo::optimizer { diff --git a/src/mongo/db/query/optimizer/utils/unit_test_utils.h b/src/mongo/db/query/optimizer/utils/unit_test_utils.h index 6a2521bca0d91..aecfccb50a957 100644 --- a/src/mongo/db/query/optimizer/utils/unit_test_utils.h +++ b/src/mongo/db/query/optimizer/utils/unit_test_utils.h @@ -29,13 +29,22 @@ #pragma once +#include +#include +#include +#include + #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/query/ce/hinted_estimator.h" #include "mongo/db/query/cost_model/cost_model_gen.h" +#include "mongo/db/query/optimizer/cascades/interfaces.h" #include "mongo/db/query/optimizer/defs.h" #include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/metadata.h" #include "mongo/db/query/optimizer/opt_phase_manager.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/inline_auto_update.h" diff --git a/src/mongo/db/query/optimizer/utils/utils.cpp b/src/mongo/db/query/optimizer/utils/utils.cpp index 881cad5c5f4da..9e75855afcf73 100644 --- a/src/mongo/db/query/optimizer/utils/utils.cpp +++ b/src/mongo/db/query/optimizer/utils/utils.cpp @@ -29,15 +29,35 @@ #include "mongo/db/query/optimizer/utils/utils.h" +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/optimizer/index_bounds.h" #include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/syntax/expr.h" #include "mongo/db/query/optimizer/syntax/path.h" #include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/ce_math.h" #include "mongo/db/query/optimizer/utils/interval_utils.h" #include "mongo/db/query/optimizer/utils/path_utils.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { @@ -196,13 +216,14 @@ class PartialSchemaReqConverter { // Fill it in with 'inputVar'. const ProjectionName& inputVarName = inputVar->name(); - PSRExpr::visitAnyShape(pathResult->_reqMap.getRoot(), [&](PartialSchemaEntry& entry) { - tassert( - 7453903, - "Expected PartialSchemaReqConversion for a path to have its input left blank", - !entry.first._projectionName); - entry.first._projectionName = inputVarName; - }); + PSRExpr::visitAnyShape(pathResult->_reqMap.getRoot(), + [&](PartialSchemaEntry& entry, const PSRExpr::VisitorContext&) { + tassert(7453903, + "Expected PartialSchemaReqConversion for a path to " + "have its input left blank", + !entry.first._projectionName); + entry.first._projectionName = inputVarName; + }); return pathResult; } @@ -283,17 +304,18 @@ class PartialSchemaReqConverter { // Check if the left and right requirements are all or none perf-only. size_t perfOnlyCount = 0; for (const auto* reqs : {&leftReqMap, &rightReqMap}) { - PSRExpr::visitAnyShape(reqs->getRoot(), [&](const PartialSchemaEntry& e) { - const auto& [key, req] = e; - // Additive composition should only have predicates; no projections. - tassert(7155021, - "Unexpected binding in ComposeA in PartialSchemaReqConverter", - !req.getBoundProjectionName()); - - if (req.getIsPerfOnly()) { - perfOnlyCount++; - } - }); + PSRExpr::visitAnyShape( + reqs->getRoot(), [&](const PartialSchemaEntry& e, const PSRExpr::VisitorContext&) { + const auto& [key, req] = e; + // Additive composition should only have predicates; no projections. + tassert(7155021, + "Unexpected binding in ComposeA in PartialSchemaReqConverter", + !req.getBoundProjectionName()); + + if (req.getIsPerfOnly()) { + perfOnlyCount++; + } + }); } if (perfOnlyCount != 0 && perfOnlyCount != @@ -335,24 +357,17 @@ class PartialSchemaReqConverter { return {}; } - auto leftEntries = leftReqMap.conjuncts(); - auto rightEntries = rightReqMap.conjuncts(); - - auto leftEntry = leftEntries.begin(); - auto rightEntry = rightEntries.begin(); - auto& [leftKey, leftReq] = *leftEntry; - auto& [rightKey, rightReq] = *rightEntry; + auto& leftPair = PSRExpr::firstDNFLeaf(leftReqMap.getRoot()); + auto& leftKey = leftPair.first; + auto& leftReq = leftPair.second; + auto& [rightKey, rightReq] = PSRExpr::firstDNFLeaf(rightReqMap.getRoot()); // Do all reqs from both sides use the same key? - bool allSameKey = true; - for (const auto* reqs : {&leftReqMap, &rightReqMap}) { - for (auto&& [k, req] : reqs->conjuncts()) { - if (k != leftKey) { - allSameKey = false; - break; - } - } - } + auto hasDiffKey = [&](const PartialSchemaEntry& e) { + return e.first != leftKey; + }; + const bool allSameKey = !PSRExpr::any(leftReqMap.getRoot(), hasDiffKey) && + !PSRExpr::any(rightReqMap.getRoot(), hasDiffKey); if (allSameKey) { // All reqs from both sides use the same key (input binding + path). @@ -361,23 +376,29 @@ class PartialSchemaReqConverter { // (a & b) | (x & y) == (a | x) & (a | y) & (b | x) & (b | y) PSRExpr::Builder resultReqs; resultReqs.pushDisj().pushConj(); - for (const auto& [rightKey1, rightReq1] : rightReqMap.conjuncts()) { - for (const auto& [leftKey1, leftReq1] : leftReqMap.conjuncts()) { - auto combinedIntervals = leftReq1.getIntervals(); - combineIntervalsDNF( - false /*intersect*/, combinedIntervals, rightReq1.getIntervals()); - - PartialSchemaRequirement combinedReq{ - // We already asserted that there are no projections. - boost::none, - std::move(combinedIntervals), - leftReq1.getIsPerfOnly(), - }; - resultReqs.atom(leftKey1, combinedReq); - } - } + PSRExpr::visitDNF( + rightReqMap.getRoot(), + [&](const PartialSchemaEntry& rightEntry, const PSRExpr::VisitorContext&) { + PSRExpr::visitDNF( + leftReqMap.getRoot(), + [&](const PartialSchemaEntry& leftEntry, const PSRExpr::VisitorContext&) { + auto& [leftKey1, leftReq1] = leftEntry; + auto& rightReq1 = rightEntry.second; + auto combinedIntervals = leftReq1.getIntervals(); + combineIntervalsDNF( + false /*intersect*/, combinedIntervals, rightReq1.getIntervals()); + + PartialSchemaRequirement combinedReq{ + // We already asserted that there are no projections. + boost::none, + std::move(combinedIntervals), + leftReq1.getIsPerfOnly(), + }; + resultReqs.atom(leftKey1, combinedReq); + }); + }); - leftReqMap = std::move(*resultReqs.finish()); + leftReqMap = PartialSchemaRequirements{std::move(*resultReqs.finish())}; return leftResult; } // Left and right don't all use the same key. @@ -526,16 +547,18 @@ class PartialSchemaReqConverter { return {}; } - PSRExpr::visitAnyShape(inputResult->_reqMap.getRoot(), [&](PartialSchemaEntry& entry) { - ABT path = entry.first._path; + PSRExpr::visitAnyShape(inputResult->_reqMap.getRoot(), + [&](PartialSchemaEntry& entry, const PSRExpr::VisitorContext&) { + ABT path = entry.first._path; - // Updated key path to be now rooted at n, with existing key path as child. - ABT appendedPath = n; - std::swap(appendedPath.cast()->getPath(), path); - std::swap(path, appendedPath); + // Updated key path to be now rooted at n, with existing key path + // as child. + ABT appendedPath = n; + std::swap(appendedPath.cast()->getPath(), path); + std::swap(path, appendedPath); - entry.first._path = path; - }); + entry.first._path = path; + }); return inputResult; } @@ -584,29 +607,28 @@ class PartialSchemaReqConverter { // Union the single intervals together. If we have PathCompare [EqMember] Const [[1, 2, 3]] // we create [1, 1] U [2, 2] U [3, 3]. - boost::optional unionedInterval; + // The intervals are added so that they form a DNF from single-predicate conjunctions that + // are children of a top-level disjunction. The creation of the interval DNF doesn't reuse + // combineIntervalsDNF() because this function would end up adding its first argument to + // itself for each new bound, thus creating N*(N+1)/2 duplicates. + IntervalReqExpr::Builder builder; + builder.pushDisj(); for (size_t i = 0; i < boundArray->size(); i++) { auto singleBoundLow = Constant::createFromCopy(boundArray->getAt(i).first, boundArray->getAt(i).second); auto singleBoundHigh = singleBoundLow; - - auto singleInterval = IntervalReqExpr::makeSingularDNF( - IntervalRequirement{{true /*inclusive*/, std::move(singleBoundLow)}, - {true /*inclusive*/, std::move(singleBoundHigh)}}); - - if (unionedInterval) { - // Union the singleInterval with the unionedInterval we want to update. - combineIntervalsDNF(false /*intersect*/, *unionedInterval, singleInterval); - } else { - unionedInterval = std::move(singleInterval); - } + builder.pushConj() + .atom({{true /*inclusive*/, std::move(singleBoundLow)}, + {true /*inclusive*/, std::move(singleBoundHigh)}}) + .pop(); } + auto unionedInterval = std::move(*builder.finish()); return {{PartialSchemaRequirements{ PSRExpr::makeSingularDNF(PartialSchemaKey{make()}, PartialSchemaRequirement{boost::none /*boundProjectionName*/, - std::move(*unionedInterval), + std::move(unionedInterval), false /*isPerfOnly*/})}}}; } @@ -722,13 +744,12 @@ boost::optional convertExprToPartialSchemaReq( // We need to determine either path or interval (or both). { - bool trivialAtom = false; - PSRExpr::visitAnyShape(reqMap.getRoot(), [&](PartialSchemaEntry& entry) { - auto&& [key, req] = entry; - if (key._path.is() && isIntervalReqFullyOpenDNF(req.getIntervals())) { - trivialAtom = true; - } - }); + const bool trivialAtom = + PSRExpr::any(reqMap.getRoot(), [](const PartialSchemaEntry& entry) { + auto&& [key, req] = entry; + return key._path.is() && + isIntervalReqFullyOpenDNF(req.getIntervals()); + }); if (trivialAtom) { return {}; } @@ -736,12 +757,13 @@ boost::optional convertExprToPartialSchemaReq( // If we over-approximate, we need to switch all requirements to perf-only. if (result->_retainPredicate) { - PSRExpr::visitDNF(reqMap.getRoot(), [&](PartialSchemaEntry& entry) { - auto& [key, req] = entry; - if (!req.getIsPerfOnly()) { - req = {req.getBoundProjectionName(), req.getIntervals(), true /*isPerfOnly*/}; - } - }); + PSRExpr::visitDNF( + reqMap.getRoot(), [&](PartialSchemaEntry& entry, const PSRExpr::VisitorContext&) { + auto& [key, req] = entry; + if (!req.getIsPerfOnly()) { + req = {req.getBoundProjectionName(), req.getIntervals(), true /*isPerfOnly*/}; + } + }); } return result; } @@ -767,92 +789,96 @@ bool simplifyPartialSchemaReqPaths(const boost::optional& scanPr // If any one conjunction is empty, the overall disjunction is trivially true. bool hasEmptyConjunction = false; - PSRExpr::visitDisjuncts(reqMap.getRoot(), [&](const PSRExpr::Node& disjunct, size_t) { - resultReqs.pushConj(); - boost::optional> prevEntry; + PSRExpr::visitDisjuncts( + reqMap.getRoot(), [&](const PSRExpr::Node& disjunct, const PSRExpr::VisitorContext&) { + resultReqs.pushConj(); + boost::optional> prevEntry; - // Track whether this conjunction has any arguments. - bool hasAnyConjunct = false; + // Track whether this conjunction has any arguments. + bool hasAnyConjunct = false; - const auto nextEntryFn = [&](PartialSchemaKey newKey, const PartialSchemaRequirement& req) { - resultReqs.atom(std::move(prevEntry->first), std::move(prevEntry->second)); - prevEntry.reset({std::move(newKey), req}); - hasAnyConjunct = true; - }; + const auto nextEntryFn = [&](PartialSchemaKey newKey, + const PartialSchemaRequirement& req) { + resultReqs.atom(std::move(prevEntry->first), std::move(prevEntry->second)); + prevEntry.reset({std::move(newKey), req}); + hasAnyConjunct = true; + }; - // Simplify paths by eliminating unnecessary Traverse elements. + // Simplify paths by eliminating unnecessary Traverse elements. - PSRExpr::visitConjuncts(disjunct, [&](const PSRExpr::Node conjunct, size_t) { - const auto& [key, req] = conjunct.cast()->getExpr(); + PSRExpr::visitConjuncts( + disjunct, [&](const PSRExpr::Node conjunct, const PSRExpr::VisitorContext&) { + const auto& [key, req] = conjunct.cast()->getExpr(); - PartialSchemaKey newKey = key; + PartialSchemaKey newKey = key; - bool simplified = false; - const bool containedTraverse = checkPathContainsTraverse(newKey._path); - if (key._projectionName == scanProjName && containedTraverse) { - simplified = simplifyTraverseNonArray(newKey._path, multikeynessTrie); - } - // At this point we have simplified the path in newKey. + bool simplified = false; + const bool containedTraverse = checkPathContainsTraverse(newKey._path); + if (key._projectionName == scanProjName && containedTraverse) { + simplified = simplifyTraverseNonArray(newKey._path, multikeynessTrie); + } + // At this point we have simplified the path in newKey. - if (!prevEntry) { - prevEntry.reset({std::move(newKey), req}); - return; - } - if (prevEntry->first != newKey) { - nextEntryFn(std::move(newKey), req); - return; - } + if (!prevEntry) { + prevEntry.reset({std::move(newKey), req}); + return; + } + if (prevEntry->first != newKey) { + nextEntryFn(std::move(newKey), req); + return; + } - auto& prevReq = prevEntry->second; - auto resultIntervals = prevReq.getIntervals(); - combineIntervalsDNF(true /*intersect*/, resultIntervals, req.getIntervals()); - - // Ensure that Traverse-less keys appear only once: we can move the conjunction into the - // intervals and simplify. For traversing keys, check if interval is subsumed in the - // other and if so, then combine. - if (containedTraverse && !simplified && - !(resultIntervals == prevReq.getIntervals() || - resultIntervals == req.getIntervals())) { - // We cannot combine multikey paths where one interval does not subsume the other. - nextEntryFn(std::move(newKey), req); - return; - } + auto& prevReq = prevEntry->second; + auto resultIntervals = prevReq.getIntervals(); + combineIntervalsDNF(true /*intersect*/, resultIntervals, req.getIntervals()); + + // Ensure that Traverse-less keys appear only once: we can move the conjunction + // into the intervals and simplify. For traversing keys, check if interval is + // subsumed in the other and if so, then combine. + if (containedTraverse && !simplified && + !(resultIntervals == prevReq.getIntervals() || + resultIntervals == req.getIntervals())) { + // We cannot combine multikey paths where one interval does not subsume the + // other. + nextEntryFn(std::move(newKey), req); + return; + } - auto resultBoundProjName = prevReq.getBoundProjectionName(); - if (const auto& boundProjName = req.getBoundProjectionName()) { - if (resultBoundProjName) { - // The existing name wins (stays in 'reqMap'). We tell the caller that the name - // "boundProjName" is available under "resultBoundProjName". - projectionRenames.emplace(*boundProjName, *resultBoundProjName); - } else { - resultBoundProjName = boundProjName; - } - } + auto resultBoundProjName = prevReq.getBoundProjectionName(); + if (const auto& boundProjName = req.getBoundProjectionName()) { + if (resultBoundProjName) { + // The existing name wins (stays in 'reqMap'). We tell the caller that + // the name "boundProjName" is available under "resultBoundProjName". + projectionRenames.emplace(*boundProjName, *resultBoundProjName); + } else { + resultBoundProjName = boundProjName; + } + } - if (constFold && !simplifyFn(resultIntervals)) { - // TODO SERVER-73827 Consider having the BoolExpr builder handle simplifying away - // trivial (always-true or always-false) clauses. + if (constFold && !simplifyFn(resultIntervals)) { + // TODO SERVER-73827 Consider having the BoolExpr builder handle simplifying + // away trivial (always-true or always-false) clauses. - // An always-false conjunct means the whole conjunction is always-false. - // However, there can be other disjuncts, so we can't short-circuit the whole tree. - // Create an explicit always-false atom. - resultIntervals = IntervalReqExpr::makeSingularDNF( - BoundRequirement::makePlusInf(), BoundRequirement::makeMinusInf()); + // An always-false conjunct means the whole conjunction is always-false. + // However, there can be other disjuncts, so we can't short-circuit the + // whole tree. Create an explicit always-false atom. + resultIntervals = IntervalReqExpr::makeSingularDNF( + BoundRequirement::makePlusInf(), BoundRequirement::makeMinusInf()); + } + prevReq = {std::move(resultBoundProjName), + std::move(resultIntervals), + req.getIsPerfOnly() && prevReq.getIsPerfOnly()}; + }); + if (prevEntry) { + resultReqs.atom(std::move(prevEntry->first), std::move(prevEntry->second)); + hasAnyConjunct = true; } - prevReq = {std::move(resultBoundProjName), - std::move(resultIntervals), - req.getIsPerfOnly() && prevReq.getIsPerfOnly()}; - }); - if (prevEntry) { - resultReqs.atom(std::move(prevEntry->first), std::move(prevEntry->second)); - hasAnyConjunct = true; - } - resultReqs.pop(); - if (!hasAnyConjunct) { - hasEmptyConjunction = true; - } - }); + resultReqs.pop(); + if (!hasAnyConjunct) { + hasEmptyConjunction = true; + } + }); boost::optional builderResult = resultReqs.finish(); if (!builderResult) { @@ -894,7 +920,7 @@ bool simplifyPartialSchemaReqPaths(const boost::optional& scanPr } /** - * Try to compute the intersection of a an existing PartialSchemaRequirements object and a new + * Try to compute the intersection of an existing PartialSchemaRequirements object and a new * key/requirement pair. * * Returns false on "failure", which means the result was not representable. This happens if there @@ -913,30 +939,36 @@ static bool intersectPartialSchemaReq(PartialSchemaRequirements& reqMap, PartialSchemaRequirement req) { for (;;) { bool merged = false; + bool canIntersect = true; const bool reqHasBoundProj = req.getBoundProjectionName().has_value(); - for (const auto& [existingKey, existingReq] : reqMap.conjuncts()) { - uassert(6624150, - "Existing key referring to new requirement.", - !reqHasBoundProj || - existingKey._projectionName != *req.getBoundProjectionName()); - - if (const auto& boundProjName = existingReq.getBoundProjectionName(); - boundProjName && key._projectionName == *boundProjName) { - // The new key is referring to a projection the existing requirement binds. - if (reqHasBoundProj) { - return false; + PSRExpr::visitSingletonDNF( + reqMap.getRoot(), [&](const PartialSchemaEntry& e, const PSRExpr::VisitorContext& ctx) { + const auto& [existingKey, existingReq] = e; + + uassert(6624150, + "Existing key referring to new requirement.", + !reqHasBoundProj || + existingKey._projectionName != *req.getBoundProjectionName()); + + if (const auto& boundProjName = existingReq.getBoundProjectionName(); + boundProjName && key._projectionName == *boundProjName) { + // The new key is referring to a projection the existing requirement binds. + if (reqHasBoundProj) { + canIntersect = false; + } else { + key = PartialSchemaKey{ + existingKey._projectionName, + PathAppender::append(existingKey._path, std::move(key._path)), + }; + merged = true; + } + ctx.returnEarly(); } + }); - key = PartialSchemaKey{ - existingKey._projectionName, - PathAppender::append(existingKey._path, std::move(key._path)), - }; - merged = true; - break; - } - } - - if (merged) { + if (!canIntersect) { + return false; + } else if (merged) { // continue around the loop } else { reqMap.add(std::move(key), std::move(req)); @@ -954,11 +986,12 @@ bool isSubsetOfPartialSchemaReq(const PartialSchemaRequirements& lhs, // However, we're assuming that 'rhs' has no projections. // If it did have projections, the result (lhs ^ rhs) would have projections // and wouldn't match 'lhs'. - PSRExpr::visitAnyShape(rhs.getRoot(), [](const PartialSchemaEntry& e) { - tassert(7155010, - "isSubsetOfPartialSchemaReq expects 'rhs' to have no projections", - !e.second.getBoundProjectionName()); - }); + PSRExpr::visitAnyShape( + rhs.getRoot(), [](const PartialSchemaEntry& e, const PSRExpr::VisitorContext&) { + tassert(7155010, + "isSubsetOfPartialSchemaReq expects 'rhs' to have no projections", + !e.second.getBoundProjectionName()); + }); PartialSchemaRequirements intersection = lhs; const bool success = intersectPartialSchemaReq(intersection, rhs); @@ -979,19 +1012,22 @@ bool isSubsetOfPartialSchemaReq(const PartialSchemaRequirements& lhs, bool intersectPartialSchemaReq(PartialSchemaRequirements& target, const PartialSchemaRequirements& source) { - // TODO SERVER-69026 Consider implementing intersect for top-level disjunctions. if (!PSRExpr::isSingletonDisjunction(target.getRoot()) || !PSRExpr::isSingletonDisjunction(source.getRoot())) { return false; } - for (const auto& [key, req] : source.conjuncts()) { - if (!intersectPartialSchemaReq(target, key, req)) { - return false; - } - } + bool performedIntersection = true; - return true; + PSRExpr::visitDNF(source.getRoot(), + [&](const PartialSchemaEntry& entry, const PSRExpr::VisitorContext& ctx) { + if (!intersectPartialSchemaReq(target, entry.first, entry.second)) { + performedIntersection = false; + ctx.returnEarly(); + } + }); + + return performedIntersection; } PartialSchemaRequirements unionPartialSchemaReq(PartialSchemaRequirements&& left, @@ -1007,7 +1043,7 @@ PartialSchemaRequirements unionPartialSchemaReq(PartialSchemaRequirements&& left resultNodes.insert(resultNodes.end(), std::make_move_iterator(rightDisj.nodes().begin()), std::make_move_iterator(rightDisj.nodes().end())); - return PSRExpr::make(std::move(resultNodes)); + return PartialSchemaRequirements{PSRExpr::make(std::move(resultNodes))}; } std::string encodeIndexKeyName(const size_t indexField) { @@ -1123,7 +1159,8 @@ static bool computeCandidateIndexEntry(PrefixId& prefixId, const QueryHints& hints, const ConstFoldFn& constFold, const IndexCollationSpec& indexCollationSpec, - CandidateIndexEntry& entry) { + CandidateIndexEntry& entry, + const bool isMultiIndexPlan) { auto& fieldProjMap = entry._fieldProjectionMap; auto& eqPrefixes = entry._eqPrefixes; auto& correlatedProjNames = entry._correlatedProjNames; @@ -1255,9 +1292,13 @@ static bool computeCandidateIndexEntry(PrefixId& prefixId, entry._residualRequirements = residualReqs.finish(); - if (entry._intervalPrefixSize == 0 && !entry._residualRequirements) { - // Need to encode at least one query requirement in the index bounds. - return false; + if (entry._intervalPrefixSize == 0) { + if (!entry._residualRequirements || isMultiIndexPlan) { + // Need to encode at least one query requirement in the index bounds. Also, if an + // unbound index is part of a multi-index plan, discard it as it could be more expensive + // than a single collection scan. + return false; + } } return true; @@ -1284,7 +1325,8 @@ CandidateIndexes computeCandidateIndexes(PrefixId& prefixId, const PartialSchemaRequirements& reqMap, const ScanDefinition& scanDef, const QueryHints& hints, - const ConstFoldFn& constFold) { + const ConstFoldFn& constFold, + const bool isMultiIndexPlan) { // A candidate index is one that can directly satisfy the SargableNode, without using // any other indexes. Typically a disjunction would require unioning two different indexes, // so we bail out if there's a nontrivial disjunction here. @@ -1304,28 +1346,45 @@ CandidateIndexes computeCandidateIndexes(PrefixId& prefixId, // paths may be satisfied via the same index scan. MultikeynessTrie indexPathTrie; - for (const auto& [key, req] : reqMap.conjuncts()) { - if (req.getIsPerfOnly()) { - // Perf only do not need to be necessarily satisfied. - continue; - } + // Flag which is set if visitor identifies a case which prohibits index usage. + bool prohibitIndexUsage = false; - if (!unsatisfiedKeysInitial.insert(key).second) { - // We cannot satisfy two or more non-multikey path instances using an index. - return {}; - } + PSRExpr::visitSingletonDNF( + reqMap.getRoot(), [&](const PartialSchemaEntry& e, const PSRExpr::VisitorContext& ctx) { + const auto& [key, req] = e; - if (!hints._fastIndexNullHandling && !req.getIsPerfOnly() && req.mayReturnNull(constFold)) { - // We cannot use indexes to return values for fields if we have an interval with null - // bounds. - return {}; - } + if (req.getIsPerfOnly()) { + // Perf only do not need to be necessarily satisfied. + return; + } - const auto currentTrie = MultikeynessTrie::fromIndexPath(key._path); - if (!checkCanFuse(indexPathTrie, currentTrie)) { - return {}; - } - indexPathTrie.merge(currentTrie); + if (!unsatisfiedKeysInitial.insert(key).second) { + // We cannot satisfy two or more non-multikey path instances using an index. + prohibitIndexUsage = true; + ctx.returnEarly(); + return; + } + + if (!hints._fastIndexNullHandling && !req.getIsPerfOnly() && + req.mayReturnNull(constFold)) { + // We cannot use indexes to return values for fields if we have an interval with + // null bounds. + prohibitIndexUsage = true; + ctx.returnEarly(); + return; + } + + const auto currentTrie = MultikeynessTrie::fromIndexPath(key._path); + if (!checkCanFuse(indexPathTrie, currentTrie)) { + prohibitIndexUsage = true; + ctx.returnEarly(); + return; + } + indexPathTrie.merge(currentTrie); + }); + + if (prohibitIndexUsage) { + return {}; } CandidateIndexes result; @@ -1340,7 +1399,8 @@ CandidateIndexes computeCandidateIndexes(PrefixId& prefixId, hints, constFold, indexDef.getCollationSpec(), - entry); + entry, + isMultiIndexPlan); if (success && entry._eqPrefixes.size() >= hints._minIndexEqPrefixes) { result.push_back(std::move(entry)); @@ -1361,67 +1421,79 @@ boost::optional computeScanParams(PrefixId& prefixId, bool invalid = false; size_t entryIndex = 0; residReqs.pushDisj(); - PSRExpr::visitDisjuncts(reqMap.getRoot(), [&](const PSRExpr::Node& disjunct, size_t) { - residReqs.pushConj(); - PSRExpr::visitConjuncts(disjunct, [&](const PSRExpr::Node& conjunct, size_t) { - PSRExpr::visitAtom(conjunct, [&](const PartialSchemaEntry& e) { - if (invalid) { - // Short circuit if we're going to return {} anyway. - return; - } - const auto& [key, req] = e; - if (req.getIsPerfOnly()) { - // Ignore perf only requirements. - return; - } - if (key._projectionName != rootProj) { - // We are not sitting right above a ScanNode. - invalid = true; - return; - } + PSRExpr::visitDisjuncts( + reqMap.getRoot(), + [&](const PSRExpr::Node& disjunct, const PSRExpr::VisitorContext& disjCtx) { + residReqs.pushConj(); + PSRExpr::visitConjuncts( + disjunct, + [&](const PSRExpr::Node& conjunct, const PSRExpr::VisitorContext& conjCtx) { + PSRExpr::visitAtom( + conjunct, [&](const PartialSchemaEntry& e, const PSRExpr::VisitorContext&) { + const auto& [key, req] = e; + if (req.getIsPerfOnly()) { + // Ignore perf only requirements. + return; + } + if (key._projectionName != rootProj) { + // We are not sitting right above a ScanNode. + invalid = true; + conjCtx.returnEarly(); + return; + } - if (auto pathGet = key._path.cast(); pathGet != nullptr) { - const FieldNameType& fieldName = pathGet->name(); - - // Extract a new requirements path with removed simple paths. - // For example if we have a key Get "a" Traverse Compare = 0 we leave - // only Traverse Compare 0. - if (const auto& boundProjName = req.getBoundProjectionName(); - boundProjName && pathGet->getPath().is()) { - const auto [it, insertedInFPM] = - fieldProjMap._fieldProjections.emplace(fieldName, *boundProjName); - - if (!insertedInFPM) { - residReqs.atom(PartialSchemaKey{it->second, make()}, - PartialSchemaRequirement{req.getBoundProjectionName(), - req.getIntervals(), - false /*isPerfOnly*/}, - entryIndex); - } else if (!isIntervalReqFullyOpenDNF(req.getIntervals())) { - residReqs.atom( - PartialSchemaKey{*boundProjName, make()}, - PartialSchemaRequirement{boost::none /*boundProjectionName*/, - req.getIntervals(), - false /*isPerfOnly*/}, - entryIndex); - } - } else { - const ProjectionName& tempProjName = - getExistingOrTempProjForFieldName(prefixId, fieldName, fieldProjMap); - residReqs.atom( - PartialSchemaKey{tempProjName, pathGet->getPath()}, req, entryIndex); - } - } else { - // Move other conditions into the residual map. - fieldProjMap._rootProjection = rootProj; - residReqs.atom(key, req, entryIndex); - } + if (auto pathGet = key._path.cast(); pathGet != nullptr) { + const FieldNameType& fieldName = pathGet->name(); - entryIndex++; - }); + // Extract a new requirements path with removed simple paths. + // For example if we have a key Get "a" Traverse Compare = 0 we + // leave only Traverse Compare 0. + if (const auto& boundProjName = req.getBoundProjectionName(); + boundProjName && pathGet->getPath().is()) { + const auto [it, insertedInFPM] = + fieldProjMap._fieldProjections.emplace(fieldName, + *boundProjName); + + if (!insertedInFPM) { + residReqs.atom( + PartialSchemaKey{it->second, make()}, + PartialSchemaRequirement{req.getBoundProjectionName(), + req.getIntervals(), + false /*isPerfOnly*/}, + entryIndex); + } else if (!isIntervalReqFullyOpenDNF(req.getIntervals())) { + residReqs.atom( + PartialSchemaKey{*boundProjName, make()}, + PartialSchemaRequirement{ + boost::none /*boundProjectionName*/, + req.getIntervals(), + false /*isPerfOnly*/}, + entryIndex); + } + } else { + const ProjectionName& tempProjName = + getExistingOrTempProjForFieldName( + prefixId, fieldName, fieldProjMap); + residReqs.atom( + PartialSchemaKey{tempProjName, pathGet->getPath()}, + req, + entryIndex); + } + } else { + // Move other conditions into the residual map. + fieldProjMap._rootProjection = rootProj; + residReqs.atom(key, req, entryIndex); + } + + entryIndex++; + }); + }); + if (invalid) { + disjCtx.returnEarly(); + return; + } + residReqs.pop(); }); - residReqs.pop(); - }); if (invalid) { return {}; } @@ -1696,48 +1768,55 @@ void lowerPartialSchemaRequirement(const PartialSchemaKey& key, } void lowerPartialSchemaRequirements(boost::optional scanGroupCE, + boost::optional baseCE, std::vector indexPredSels, ResidualRequirementsWithOptionalCE::Node requirements, const PathToIntervalFn& pathToInterval, PhysPlanBuilder& builder) { + using Requirements = ResidualRequirementsWithOptionalCE; + sortResidualRequirements(requirements); // If there is a single Conjunction, build a sequence of FilterNode (one for each conjunct). - if (ResidualRequirementsWithOptionalCE::isSingletonDisjunction(requirements)) { - ResidualRequirementsWithOptionalCE::visitDNF( - requirements, [&](const ResidualRequirementWithOptionalCE& entry) { - auto residualCE = scanGroupCE; - if (residualCE) { - if (!indexPredSels.empty()) { - *residualCE *= ce::conjExponentialBackoff(indexPredSels); - } - if (entry._ce && *scanGroupCE > 0.0) { - // Compute the selectivity after we assign CE, which is the "input" to the - // cost. - indexPredSels.push_back(*entry._ce / *scanGroupCE); - } - } - - lowerPartialSchemaRequirement( - entry._key, entry._req, pathToInterval, residualCE, builder); - }); + if (Requirements::isSingletonDisjunction(requirements)) { + Requirements::visitDNF(requirements, + [&](const ResidualRequirementWithOptionalCE& entry, + const Requirements::VisitorContext&) { + auto residualCE = baseCE; + if (residualCE) { + if (!indexPredSels.empty()) { + *residualCE *= ce::conjExponentialBackoff(indexPredSels); + } + if (entry._ce && *scanGroupCE > 0.0) { + // Compute the selectivity after we assign CE, which is + // the "input" to the cost. + indexPredSels.push_back(*entry._ce / *scanGroupCE); + } + } + + lowerPartialSchemaRequirement( + entry._key, entry._req, pathToInterval, residualCE, builder); + }); return; } // For multiple Conjunctions, build a top-level Or expression representing the composition. ABTVector toOr; std::vector disjSels; - ResidualRequirementsWithOptionalCE::visitDisjuncts( + Requirements::visitDisjuncts( requirements, - [&](const typename ResidualRequirementsWithOptionalCE::Node& child, const size_t) { + [&](const typename ResidualRequirementsWithOptionalCE::Node& child, + const Requirements::VisitorContext&) { ABTVector toAnd; std::vector conjSels; - ResidualRequirementsWithOptionalCE::visitConjuncts( + Requirements::visitConjuncts( child, - [&](const typename ResidualRequirementsWithOptionalCE::Node& atom, const size_t) { - ResidualRequirementsWithOptionalCE::visitAtom( - atom, [&](const ResidualRequirementWithOptionalCE& entry) { + [&](const typename Requirements::Node& atom, const Requirements::VisitorContext&) { + Requirements::visitAtom( + atom, + [&](const ResidualRequirementWithOptionalCE& entry, + const Requirements::VisitorContext&) { const auto& [key, req, ce] = entry; auto [evalPath, evalFilter] = makeLoweredExpressionsForReq(key, req, pathToInterval); @@ -1757,10 +1836,10 @@ void lowerPartialSchemaRequirements(boost::optional scanGroupCE, toOr.push_back(makeBalancedBooleanOpTree(Operations::And, std::move(toAnd))); }); - boost::optional finalFilterCE = scanGroupCE; + boost::optional finalFilterCE = baseCE; if (!disjSels.empty()) { indexPredSels.push_back(ce::disjExponentialBackoff(disjSels)); - finalFilterCE = *scanGroupCE * ce::conjExponentialBackoff(indexPredSels); + finalFilterCE = *baseCE * ce::conjExponentialBackoff(indexPredSels); } builder.make(finalFilterCE, makeBalancedBooleanOpTree(Operations::Or, std::move(toOr)), @@ -1768,17 +1847,21 @@ void lowerPartialSchemaRequirements(boost::optional scanGroupCE, } void sortResidualRequirements(ResidualRequirementsWithOptionalCE::Node& residualReqs) { - ResidualRequirementsWithOptionalCE::visitDisjuncts( - residualReqs, [](ResidualRequirementsWithOptionalCE::Node& child, const size_t) { + using Requirements = ResidualRequirementsWithOptionalCE; + + Requirements::visitDisjuncts( + residualReqs, [](Requirements::Node& child, const Requirements::VisitorContext&) { // Collect the estimated costs of each child under a conjunction. Assume it is // more expensive to deliver a bound projection than to just filter. std::vector> costs; size_t numConjuncts = 0; ResidualRequirementsWithOptionalCE::visitConjuncts( - child, [&](ResidualRequirementsWithOptionalCE::Node& atom, const size_t index) { - ResidualRequirementsWithOptionalCE::visitAtom( - atom, [&](ResidualRequirementWithOptionalCE& entry) { + child, [&](Requirements::Node& atom, const Requirements::VisitorContext& conjCtx) { + Requirements::visitAtom( + atom, + [&](ResidualRequirementWithOptionalCE& entry, + const Requirements::VisitorContext&) { numConjuncts++; if (entry._ce) { @@ -1789,7 +1872,8 @@ void sortResidualRequirements(ResidualRequirementsWithOptionalCE::Node& residual if (!isIntervalReqFullyOpenDNF(entry._req.getIntervals())) { multiplier++; } - costs.emplace_back(entry._ce->_value * multiplier, index); + costs.emplace_back(entry._ce->_value * multiplier, + conjCtx.getChildIndex()); } }); }); @@ -1819,14 +1903,21 @@ ResidualRequirementsWithOptionalCE::Node createResidualReqsWithCE( b.pushDisj(); ResidualRequirements::visitDisjuncts( - residReqs, [&](const ResidualRequirements::Node& child, const size_t) { + residReqs, + [&](const ResidualRequirements::Node& child, const ResidualRequirements::VisitorContext&) { b.pushConj(); ResidualRequirements::visitConjuncts( - child, [&](const ResidualRequirements::Node& atom, const size_t) { - ResidualRequirements::visitAtom(atom, [&](const ResidualRequirement& req) { - b.atom(req._key, req._req, partialSchemaKeyCE.at(req._entryIndex).second); - }); + child, + [&](const ResidualRequirements::Node& atom, + const ResidualRequirements::VisitorContext&) { + ResidualRequirements::visitAtom( + atom, + [&](const ResidualRequirement& req, + const ResidualRequirements::VisitorContext&) { + b.atom( + req._key, req._req, partialSchemaKeyCE.at(req._entryIndex).second); + }); }); b.pop(); @@ -1839,14 +1930,16 @@ ResidualRequirementsWithOptionalCE::Node createResidualReqsWithEmptyCE(const PSR ResidualRequirementsWithOptionalCE::Builder b; b.pushDisj(); - PSRExpr::visitDisjuncts(reqs, [&](const PSRExpr::Node& child, const size_t) { + PSRExpr::visitDisjuncts(reqs, [&](const PSRExpr::Node& child, const PSRExpr::VisitorContext&) { b.pushConj(); - PSRExpr::visitConjuncts(child, [&](const PSRExpr::Node& atom, const size_t) { - PSRExpr::visitAtom(atom, [&](const PartialSchemaEntry& entry) { - b.atom(entry.first, entry.second, boost::none); + PSRExpr::visitConjuncts( + child, [&](const PSRExpr::Node& atom, const PSRExpr::VisitorContext&) { + PSRExpr::visitAtom( + atom, [&](const PartialSchemaEntry& entry, const PSRExpr::VisitorContext&) { + b.atom(entry.first, entry.second, boost::none); + }); }); - }); b.pop(); }); @@ -1872,13 +1965,19 @@ void removeRedundantResidualPredicates(const ProjectionNameOrderPreservingSet& r newReqs.pushDisj(); ResidualRequirements::visitDisjuncts( - *residualReqs, [&](const ResidualRequirements::Node& child, const size_t) { + *residualReqs, + [&](const ResidualRequirements::Node& child, + const ResidualRequirements::VisitorContext&) { newReqs.pushConj(); ResidualRequirements::visitConjuncts( - child, [&](const ResidualRequirements::Node& atom, const size_t) { + child, + [&](const ResidualRequirements::Node& atom, + const ResidualRequirements::VisitorContext&) { ResidualRequirements::visitAtom( - atom, [&](const ResidualRequirement& residReq) { + atom, + [&](const ResidualRequirement& residReq, + const ResidualRequirements::VisitorContext&) { auto& [key, req, ce] = residReq; if (const auto& boundProjName = req.getBoundProjectionName(); @@ -2651,7 +2750,7 @@ PhysPlanBuilder lowerEqPrefixes(PrefixId& prefixId, eqPrefixes, eqPrefixIndex, reverseOrder, - correlatedProjNames, + std::move(correlatedProjNames), indexPredSelMap, indexCE, scanGroupCE, @@ -2659,13 +2758,11 @@ PhysPlanBuilder lowerEqPrefixes(PrefixId& prefixId, return lowerTransport.lower(eqPrefixes.at(eqPrefixIndex)._interval); } -bool hasProperIntervals(const PartialSchemaRequirements& reqMap) { +bool hasProperIntervals(const PSRExpr::Node& reqs) { // Compute if this node has any proper (not fully open) intervals. - bool hasProperIntervals = false; - PSRExpr::visitDNF(reqMap.getRoot(), [&](const PartialSchemaEntry& e) { - hasProperIntervals |= !isIntervalReqFullyOpenDNF(e.second.getIntervals()); + return PSRExpr::any(reqs, [](const PartialSchemaEntry& e) { + return !isIntervalReqFullyOpenDNF(e.second.getIntervals()); }); - return hasProperIntervals; } } // namespace mongo::optimizer diff --git a/src/mongo/db/query/optimizer/utils/utils.h b/src/mongo/db/query/optimizer/utils/utils.h index 8fb3602e31d42..64075cf30b190 100644 --- a/src/mongo/db/query/optimizer/utils/utils.h +++ b/src/mongo/db/query/optimizer/utils/utils.h @@ -29,12 +29,37 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/db/query/optimizer/bool_expression.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/containers.h" #include "mongo/db/query/optimizer/defs.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/index_bounds.h" +#include "mongo/db/query/optimizer/metadata.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/node_defs.h" +#include "mongo/db/query/optimizer/partial_schema_requirements.h" #include "mongo/db/query/optimizer/props.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/const_fold_interface.h" #include "mongo/db/query/optimizer/utils/physical_plan_builder.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/id_generator.h" +#include "mongo/util/str.h" namespace mongo::optimizer { @@ -331,7 +356,8 @@ CandidateIndexes computeCandidateIndexes(PrefixId& prefixId, const PartialSchemaRequirements& reqMap, const ScanDefinition& scanDef, const QueryHints& hints, - const ConstFoldFn& constFold); + const ConstFoldFn& constFold, + bool isMultiIndexPlan = false); /** * Computes a set of residual predicates which will be applied on top of a Scan. @@ -359,9 +385,14 @@ void lowerPartialSchemaRequirement(const PartialSchemaKey& key, /** * Lower ResidualRequirementsWithCE to a subtree consisting of functionally equivalent Filter and * Eval nodes. Note that we take indexPredSels by value here because the implementation needs its - * own copy. + * own copy. "scanGroupCE" is the estimated cardinality of the underlying collection scan (the + * Sargable node's child group), while the "baseCE" is the initial cardinality on top of which the + * residual predicates act. For a sargable node with a "Seek" target it is "1" to reflect the fact + * that we fetch one row id at a time, and for "Complete" and "Index" it is the same as the + * "scanGroupCE". */ void lowerPartialSchemaRequirements(boost::optional scanGroupCE, + boost::optional baseCE, std::vector indexPredSels, ResidualRequirementsWithOptionalCE::Node requirements, const PathToIntervalFn& pathToInterval, @@ -458,5 +489,5 @@ PhysPlanBuilder lowerEqPrefixes(PrefixId& prefixId, CEType scanGroupCE, bool useSortedMerge); -bool hasProperIntervals(const PartialSchemaRequirements& reqMap); +bool hasProperIntervals(const PSRExpr::Node& reqs); } // namespace mongo::optimizer diff --git a/src/mongo/db/query/parsed_distinct.cpp b/src/mongo/db/query/parsed_distinct.cpp index 03f2e3bf9e96a..bfd686d1ee165 100644 --- a/src/mongo/db/query/parsed_distinct.cpp +++ b/src/mongo/db/query/parsed_distinct.cpp @@ -27,20 +27,40 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/parsed_distinct.h" - +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/parse_number.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/pipeline/document_source_replace_root.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/distinct_command_gen.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/max_time_ms_parser.h" +#include "mongo/db/query/parsed_distinct.h" #include "mongo/db/query/query_request_helper.h" #include "mongo/db/repl/read_concern_args.h" #include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/str.h" namespace mongo { @@ -49,6 +69,7 @@ const char ParsedDistinct::kKeyField[] = "key"; const char ParsedDistinct::kQueryField[] = "query"; const char ParsedDistinct::kCollationField[] = "collation"; const char ParsedDistinct::kUnwoundArrayFieldForViewUnwind[] = "_internalUnwoundArray"; +const char ParsedDistinct::kHintField[] = "hint"; namespace { @@ -175,8 +196,10 @@ StatusWith ParsedDistinct::asAggregationCommand() const { invariant(_query); const FindCommandRequest& findCommand = _query->getFindCommandRequest(); - aggregationBuilder.append( - "aggregate", findCommand.getNamespaceOrUUID().nss().value_or(NamespaceString()).coll()); + tassert(ErrorCodes::BadValue, + "Unsupported type UUID for namespace", + findCommand.getNamespaceOrUUID().isNamespaceString()); + aggregationBuilder.append("aggregate", findCommand.getNamespaceOrUUID().nss().coll()); // Build a pipeline that accomplishes the distinct request. The building code constructs a // pipeline that looks like this, assuming the distinct is on the key "a.b.c" @@ -218,6 +241,7 @@ StatusWith ParsedDistinct::asAggregationCommand() const { pipelineBuilder.doneFast(); aggregationBuilder.append(kCollationField, findCommand.getCollation()); + aggregationBuilder.append(kHintField, findCommand.getHint()); int maxTimeMS = findCommand.getMaxTimeMS() ? static_cast(*findCommand.getMaxTimeMS()) : 0; if (maxTimeMS > 0) { @@ -273,6 +297,8 @@ StatusWith ParsedDistinct::parse(OperationContext* opCtx, findCommand->setCollation(collation.value().getOwned()); } + findCommand->setHint(parsedDistinct.getHint()); + // The IDL parser above does not handle generic command arguments. Since the underlying query // request requires the following options, manually parse and verify them here. if (auto readConcernElt = cmdObj[repl::ReadConcernArgs::kReadConcernFieldName]) { diff --git a/src/mongo/db/query/parsed_distinct.h b/src/mongo/db/query/parsed_distinct.h index 49809d98440d5..e666fb0920355 100644 --- a/src/mongo/db/query/parsed_distinct.h +++ b/src/mongo/db/query/parsed_distinct.h @@ -29,11 +29,24 @@ #pragma once +#include +#include +#include +#include #include #include +#include #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/matcher/extensions_callback.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -52,6 +65,7 @@ class ParsedDistinct { static const char kCollationField[]; static const char kCommentField[]; static const char kUnwoundArrayFieldForViewUnwind[]; + static const char kHintField[]; ParsedDistinct(std::unique_ptr query, const std::string key, diff --git a/src/mongo/db/query/parsed_distinct_test.cpp b/src/mongo/db/query/parsed_distinct_test.cpp index 19b8f5539a87a..a5f9e4217ecaf 100644 --- a/src/mongo/db/query/parsed_distinct_test.cpp +++ b/src/mongo/db/query/parsed_distinct_test.cpp @@ -27,16 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include - +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/query/parsed_distinct.h" #include "mongo/db/query/query_test_service_context.h" -#include "mongo/unittest/unittest.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -60,7 +75,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationNoQuery) { auto agg = pd.getValue().asAggregationCommand(); ASSERT_OK(agg); - auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, cmdObj); ASSERT_OK(ar.getStatus()); ASSERT(!ar.getValue().getExplain()); @@ -105,7 +120,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationDottedPathNoQuery) { auto agg = pd.getValue().asAggregationCommand(); ASSERT_OK(agg); - auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, cmdObj); ASSERT_OK(ar.getStatus()); ASSERT(!ar.getValue().getExplain()); @@ -146,7 +161,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithAllOptions) { << "testcoll" << "key" << "x" - << "collation" + << "hint" << BSON("b" << 5) << "collation" << BSON("locale" << "en_US") << "readConcern" @@ -164,7 +179,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithAllOptions) { auto agg = pd.getValue().asAggregationCommand(); ASSERT_OK(agg); - auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, cmdObj); ASSERT_OK(ar.getStatus()); ASSERT(!ar.getValue().getExplain()); @@ -182,6 +197,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithAllOptions) { BSON("readPreference" << "secondary")); ASSERT_EQUALS(ar.getValue().getMaxTimeMS().value_or(0), 100u); + ASSERT_BSONOBJ_EQ(ar.getValue().getHint().value_or(BSONObj()), fromjson("{ b : 5 }")); std::vector expectedPipeline{ BSON("$replaceRoot" << BSON( @@ -216,7 +232,7 @@ TEST(ParsedDistinctTest, ConvertToAggregationWithQuery) { auto agg = pd.getValue().asAggregationCommand(); ASSERT_OK(agg); - auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, cmdObj); ASSERT_OK(ar.getStatus()); ASSERT(!ar.getValue().getExplain()); @@ -264,7 +280,7 @@ TEST(ParsedDistinctTest, ExplainNotIncludedWhenConvertingToAggregationCommand) { ASSERT_FALSE(agg.getValue().hasField("explain")); - auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto cmdObj = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, cmdObj); ASSERT_OK(ar.getStatus()); ASSERT(!ar.getValue().getExplain()); diff --git a/src/mongo/db/query/parsed_find_command.cpp b/src/mongo/db/query/parsed_find_command.cpp new file mode 100644 index 0000000000000..a6efdd6b5eeb5 --- /dev/null +++ b/src/mongo/db/query/parsed_find_command.cpp @@ -0,0 +1,367 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/query/parsed_find_command.h" + +#include +#include +#include + +#include +#include + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/projection_parser.h" +#include "mongo/db/query/query_planner_common.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" + +namespace mongo { + +namespace { +/** + * Does 'root' have a subtree of type 'subtreeType' with a node of type 'childType' inside? + */ +bool hasNodeInSubtree(const MatchExpression* root, + MatchExpression::MatchType childType, + MatchExpression::MatchType subtreeType) { + if (subtreeType == root->matchType()) { + return QueryPlannerCommon::hasNode(root, childType); + } + for (size_t i = 0; i < root->numChildren(); ++i) { + if (hasNodeInSubtree(root->getChild(i), childType, subtreeType)) { + return true; + } + } + return false; +} + +std::unique_ptr resolveCollator( + OperationContext* opCtx, const std::unique_ptr& findCommand) { + if (!findCommand->getCollation().isEmpty()) { + return uassertStatusOKWithContext(CollatorFactoryInterface::get(opCtx->getServiceContext()) + ->makeFromBSON(findCommand->getCollation()), + "unable to parse collation"); + } + return nullptr; +} + +/** + * Helper for building 'out.' If there is a projection, parse it and add any metadata dependencies + * it induces. + * + * Throws exceptions if there is an error parsing the projection. + */ +void setProjection(ParsedFindCommand* out, + const boost::intrusive_ptr& expCtx, + const std::unique_ptr& findCommand, + const ProjectionPolicies& policies) { + if (!findCommand->getProjection().isEmpty()) { + out->savedProjectionPolicies.emplace(policies); + out->proj.emplace(projection_ast::parseAndAnalyze(expCtx, + findCommand->getProjection(), + out->filter.get(), + findCommand->getFilter(), + policies)); + + // This will throw if any of the projection's dependencies are unavailable. + DepsTracker{out->unavailableMetadata}.requestMetadata(out->proj->metadataDeps()); + } +} + +/** + * Helper for building 'out.' If there is a sort, parse it and add any metadata dependencies it + * induces. + * + * Throws exceptions if there is an error parsing the sort pattern. + */ +void setSort(ParsedFindCommand* out, + const boost::intrusive_ptr& expCtx, + const std::unique_ptr& findCommand) { + if (!findCommand->getSort().isEmpty()) { + // A $natural sort is really a hint, and should be handled as such. Furthermore, the + // downstream sort handling code may not expect a $natural sort. + // + // We have already validated that if there is a $natural sort and a hint, that the hint + // also specifies $natural with the same direction. Therefore, it is safe to clear the + // $natural sort and rewrite it as a $natural hint. + if (findCommand->getSort()[query_request_helper::kNaturalSortField]) { + findCommand->setHint(findCommand->getSort().getOwned()); + findCommand->setSort(BSONObj{}); + } + out->sort.emplace(findCommand->getSort(), expCtx); + } +} + +/** + * Helper for building 'out.' If there is a sort, parse it and add any metadata dependencies it + * induces. + */ +Status setSortAndProjection(ParsedFindCommand* out, + const boost::intrusive_ptr& expCtx, + const std::unique_ptr& findCommand, + const ProjectionPolicies& policies) { + try { + setProjection(out, expCtx, findCommand, policies); + setSort(out, expCtx, findCommand); + } catch (const DBException& ex) { + return ex.toStatus(); + } + + return Status::OK(); +} + +/** + * Helper for building 'out.' Sets 'out->filter' and validates that it is well formed. In the + * process, also populates 'out->unavailableMetadata.' + */ +Status setFilter(ParsedFindCommand* out, + std::unique_ptr filter, + const std::unique_ptr& findCommand) { + // Verify the filter follows certain rules like there must be at most one text clause. + auto swMeta = parsed_find_command::isValid(filter.get(), *findCommand); + if (!swMeta.isOK()) { + return swMeta.getStatus(); + } + out->unavailableMetadata = swMeta.getValue(); + out->filter = std::move(filter); + return Status::OK(); +} + + +StatusWith> parseWithValidatedCollator( + const boost::intrusive_ptr& expCtx, + std::unique_ptr findCommand, + const ExtensionsCallback& extensionsCallback, + MatchExpressionParser::AllowedFeatureSet allowedFeatures, + const ProjectionPolicies& projectionPolicies) { + auto out = std::make_unique(); + + if (auto status = query_request_helper::validateFindCommandRequest(*findCommand); + !status.isOK()) { + return status; + } + + // Parse the MatchExpression. + auto statusWithMatcher = MatchExpressionParser::parse( + findCommand->getFilter(), expCtx, extensionsCallback, allowedFeatures); + if (!statusWithMatcher.isOK()) { + return statusWithMatcher.getStatus(); + } + + // Stop counting expressions after they have been parsed to exclude expressions created + // during optimization and other processing steps. + expCtx->stopExpressionCounters(); + + if (auto status = setFilter(out.get(), std::move(statusWithMatcher.getValue()), findCommand); + !status.isOK()) { + return status; + } + + if (auto status = setSortAndProjection(out.get(), expCtx, findCommand, projectionPolicies); + !status.isOK()) { + return status; + } + + out->findCommandRequest = std::move(findCommand); + return {std::move(out)}; +} + +} // namespace + +StatusWith> ParsedFindCommand::withExistingFilter( + const boost::intrusive_ptr& expCtx, + std::unique_ptr collator, + std::unique_ptr filter, + std::unique_ptr findCommandRequest) { + auto out = std::make_unique(); + out->collator = std::move(collator); + if (auto status = setFilter(out.get(), std::move(filter), findCommandRequest); !status.isOK()) { + return status; + } + if (auto status = setSortAndProjection( + out.get(), expCtx, findCommandRequest, ProjectionPolicies::findProjectionPolicies()); + !status.isOK()) { + return status; + } + out->findCommandRequest = std::move(findCommandRequest); + return std::move(out); +} + +namespace parsed_find_command { +StatusWith isValid(const MatchExpression* root, + const FindCommandRequest& findCommand) { + QueryMetadataBitSet unavailableMetadata{}; + + // There can only be one TEXT. If there is a TEXT, it cannot appear inside a NOR. + // + // Note that the query grammar (as enforced by the MatchExpression parser) forbids TEXT + // inside of value-expression clauses like NOT, so we don't check those here. + size_t numText = QueryPlannerCommon::countNodes(root, MatchExpression::TEXT); + if (numText > 1) { + return Status(ErrorCodes::BadValue, "Too many text expressions"); + } else if (1 == numText) { + if (hasNodeInSubtree(root, MatchExpression::TEXT, MatchExpression::NOR)) { + return Status(ErrorCodes::BadValue, "text expression not allowed in nor"); + } + } else { + // Text metadata is not available. + unavailableMetadata.set(DocumentMetadataFields::kTextScore); + } + + // There can only be one NEAR. If there is a NEAR, it must be either the root or the root + // must be an AND and its child must be a NEAR. + size_t numGeoNear = QueryPlannerCommon::countNodes(root, MatchExpression::GEO_NEAR); + if (numGeoNear > 1) { + return Status(ErrorCodes::BadValue, "Too many geoNear expressions"); + } else if (1 == numGeoNear) { + // Do nothing, we will perform extra checks in CanonicalQuery::isValidNormalized. + } else { + // Geo distance and geo point metadata are unavailable. + unavailableMetadata |= DepsTracker::kAllGeoNearData; + } + + const BSONObj& sortObj = findCommand.getSort(); + BSONElement sortNaturalElt = sortObj["$natural"]; + const BSONObj& hintObj = findCommand.getHint(); + BSONElement hintNaturalElt = hintObj["$natural"]; + + if (sortNaturalElt && sortObj.nFields() != 1) { + return Status(ErrorCodes::BadValue, + str::stream() << "Cannot include '$natural' in compound sort: " << sortObj); + } + + if (hintNaturalElt && hintObj.nFields() != 1) { + return Status(ErrorCodes::BadValue, + str::stream() << "Cannot include '$natural' in compound hint: " << hintObj); + } + + // NEAR cannot have a $natural sort or $natural hint. + if (numGeoNear > 0) { + if (sortNaturalElt) { + return Status(ErrorCodes::BadValue, + "geoNear expression not allowed with $natural sort order"); + } + + if (hintNaturalElt) { + return Status(ErrorCodes::BadValue, + "geoNear expression not allowed with $natural hint"); + } + } + + // TEXT and NEAR cannot both be in the query. + if (numText > 0 && numGeoNear > 0) { + return Status(ErrorCodes::BadValue, "text and geoNear not allowed in same query"); + } + + // TEXT and {$natural: ...} sort order cannot both be in the query. + if (numText > 0 && sortNaturalElt) { + return Status(ErrorCodes::BadValue, "text expression not allowed with $natural sort order"); + } + + // TEXT and hint cannot both be in the query. + if (numText > 0 && !hintObj.isEmpty()) { + return Status(ErrorCodes::BadValue, "text and hint not allowed in same query"); + } + + // TEXT and tailable are incompatible. + if (numText > 0 && findCommand.getTailable()) { + return Status(ErrorCodes::BadValue, "text and tailable cursor not allowed in same query"); + } + + // NEAR and tailable are incompatible. + if (numGeoNear > 0 && findCommand.getTailable()) { + return Status(ErrorCodes::BadValue, + "Tailable cursors and geo $near cannot be used together"); + } + + // $natural sort order must agree with hint. + if (sortNaturalElt) { + if (!hintObj.isEmpty() && !hintNaturalElt) { + return Status(ErrorCodes::BadValue, "index hint not allowed with $natural sort order"); + } + if (hintNaturalElt) { + if (hintNaturalElt.numberInt() != sortNaturalElt.numberInt()) { + return Status(ErrorCodes::BadValue, + "$natural hint must be in the same direction as $natural sort order"); + } + } + } + + return unavailableMetadata; +} + +StatusWith, std::unique_ptr>> +parse(OperationContext* opCtx, + std::unique_ptr findCommand, + const ExtensionsCallback& extensionsCallback, + MatchExpressionParser::AllowedFeatureSet allowedFeatures, + const ProjectionPolicies& projectionPolicies) { + // Make the expCtx. + invariant(findCommand->getNamespaceOrUUID().isNamespaceString()); + auto expCtx = make_intrusive( + opCtx, *findCommand, resolveCollator(opCtx, findCommand), true /* mayDbProfile */); + auto swResult = parseWithValidatedCollator( + expCtx, std::move(findCommand), extensionsCallback, allowedFeatures, projectionPolicies); + if (!swResult.isOK()) { + return swResult.getStatus(); + } + + return std::pair{std::move(expCtx), std::move(swResult.getValue())}; +} + +StatusWith> parse( + const boost::intrusive_ptr& expCtx, + std::unique_ptr findCommand, + const ExtensionsCallback& extensionsCallback, + MatchExpressionParser::AllowedFeatureSet allowedFeatures, + const ProjectionPolicies& projectionPolicies) { + // A collator can enter through both the FindCommandRequest and ExpressionContext arguments. + // This invariant ensures that both collators are the same because downstream we + // pull the collator from only one of the ExpressionContext carrier. + auto collator = resolveCollator(expCtx->opCtx, findCommand); + if (collator.get() && expCtx->getCollator()) { + invariant(CollatorInterface::collatorsMatch(collator.get(), expCtx->getCollator())); + } + return parseWithValidatedCollator( + expCtx, std::move(findCommand), extensionsCallback, allowedFeatures, projectionPolicies); +} +} // namespace parsed_find_command +} // namespace mongo diff --git a/src/mongo/db/query/parsed_find_command.h b/src/mongo/db/query/parsed_find_command.h new file mode 100644 index 0000000000000..bf088747ebda0 --- /dev/null +++ b/src/mongo/db/query/parsed_find_command.h @@ -0,0 +1,134 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/projection.h" +#include "mongo/db/query/projection_policies.h" +#include "mongo/db/query/sort_pattern.h" + +namespace mongo { + +/** + * Represents a find command request, but with more fully parsed ASTs for some fields which are + * still raw BSONObj on the FindCommandRequest type. + */ +struct ParsedFindCommand { + ParsedFindCommand() = default; + + /** + * This API adds the ability to construct from a pre-parsed filter. The other arguments will be + * re-parsed again from BSON on the 'findCommandRequest' argument, since we don't have a good + * way of cloning them. + */ + static StatusWith> withExistingFilter( + const boost::intrusive_ptr& expCtx, + std::unique_ptr collator, + std::unique_ptr filter, + std::unique_ptr findCommandRequest); + + std::unique_ptr collator; + std::unique_ptr filter; + boost::optional proj; + boost::optional sort; + + // Based on parsing the query, which metadata will *not* be available. For example, if there is + // no $text clause, then a text score will not be available. + QueryMetadataBitSet unavailableMetadata; + + // This is saved for an edge case where we need to re-parse a projection later. Only populated + // if there is a non-empty projection. + boost::optional savedProjectionPolicies; + + // All other parameters to the find command which do not have AST-like types and can be + // appropriately tracked as raw value types like ints. The fields above like 'filter' are all + // still present in their raw form on this FidnCommandRequest, but it is not expected that they + // will be useful other than to keep the original BSON values around in-memory to avoid copying + // large strings and such. + std::unique_ptr findCommandRequest; +}; + +namespace parsed_find_command { +/** + * Validates the match expression 'root' as well as the query specified by 'request', checking for + * illegal combinations of operators. Returns a non-OK status if any such illegal combination is + * found. + * + * This method can be called both on normalized and non-normalized 'root'. However, some checks can + * only be performed once the match expressions is normalized. To perform these checks one can call + * 'CanonicalQuery::isValidNormalized()'. + * + * On success, returns a bitset indicating which types of metadata are *unavailable*. For example, + * if 'root' does not contain a $text predicate, then the returned metadata bitset will indicate + * that text score metadata is unavailable. This means that if subsequent $meta:"textScore" + * expressions are found during analysis of the query, we should raise in an error. + */ +StatusWith isValid(const MatchExpression* root, + const FindCommandRequest& findCommand); + +/** + * Parses each big component of the input 'findCommand.' Throws exceptions if failing to parse. + * Comes in one overload which will create an ExpressionContext for the caller, and one overload to + * be used when the caller already has an ExpressionContext. + */ +StatusWith, std::unique_ptr>> +parse(OperationContext* opCtx, + std::unique_ptr findCommand, + const ExtensionsCallback& extensionsCallback = ExtensionsCallbackNoop(), + MatchExpressionParser::AllowedFeatureSet allowedFeatures = + MatchExpressionParser::kDefaultSpecialFeatures, + const ProjectionPolicies& projectionPolicies = ProjectionPolicies::findProjectionPolicies()); + +// Overload of the above for when the caller has an available ExpressionContext. +StatusWith> parse( + const boost::intrusive_ptr& expCtx, + std::unique_ptr findCommand, + const ExtensionsCallback& extensionsCallback = ExtensionsCallbackNoop(), + MatchExpressionParser::AllowedFeatureSet allowedFeatures = + MatchExpressionParser::kDefaultSpecialFeatures, + const ProjectionPolicies& projectionPolicies = ProjectionPolicies::findProjectionPolicies()); + +} // namespace parsed_find_command +} // namespace mongo diff --git a/src/mongo/db/query/partitioned_cache.h b/src/mongo/db/query/partitioned_cache.h index a5b1f1218b6a4..87f622373d24c 100644 --- a/src/mongo/db/query/partitioned_cache.h +++ b/src/mongo/db/query/partitioned_cache.h @@ -176,7 +176,7 @@ class PartitionedCache { auto lockedPartition = _partitionedCache->lockOnePartitionById(partitionId); for (auto&& [key, entry] : *lockedPartition) { - op(key, entry); + op(*key, entry); } } } diff --git a/src/mongo/db/query/plan_cache.h b/src/mongo/db/query/plan_cache.h index 04b2cf118f9ad..45a1144a0efe5 100644 --- a/src/mongo/db/query/plan_cache.h +++ b/src/mongo/db/query/plan_cache.h @@ -29,19 +29,46 @@ #pragma once +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/catalog/util/partitioned.h" #include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/query/lru_key_value.h" #include "mongo/db/query/partitioned_cache.h" #include "mongo/db/query/plan_cache_callbacks.h" #include "mongo/db/query/plan_cache_debug_info.h" +#include "mongo/db/query/plan_ranking_decision.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_proxy.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/util/assert_util.h" #include "mongo/util/container_size_helper.h" +#include "mongo/util/time_support.h" namespace mongo { class QuerySolution; struct QuerySolutionNode; - template class PlanCacheEntryBase; diff --git a/src/mongo/db/query/plan_cache_callbacks.cpp b/src/mongo/db/query/plan_cache_callbacks.cpp index e467938b1232c..ab8aa6a6f7c5a 100644 --- a/src/mongo/db/query/plan_cache_callbacks.cpp +++ b/src/mongo/db/query/plan_cache_callbacks.cpp @@ -30,7 +30,12 @@ #include "mongo/db/query/plan_cache_callbacks.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/query/plan_cache_callbacks.h b/src/mongo/db/query/plan_cache_callbacks.h index 0bc2df32e5136..fe9c82f90d4fb 100644 --- a/src/mongo/db/query/plan_cache_callbacks.h +++ b/src/mongo/db/query/plan_cache_callbacks.h @@ -29,8 +29,19 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query_encoder.h" #include "mongo/db/query/plan_cache_debug_info.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/hex.h" namespace mongo { // The logging facility enforces the rule that logging should not be done in a header file. Since diff --git a/src/mongo/db/query/plan_cache_debug_info.h b/src/mongo/db/query/plan_cache_debug_info.h index 4501e8a10e1f0..17cd36932a5e4 100644 --- a/src/mongo/db/query/plan_cache_debug_info.h +++ b/src/mongo/db/query/plan_cache_debug_info.h @@ -31,6 +31,7 @@ #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/plan_ranking_decision.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/util/container_size_helper.h" namespace mongo::plan_cache_debug_info { @@ -143,7 +144,7 @@ struct DebugInfoSBE { } CollectionDebugInfoSBE mainStats; - StringMap secondaryStats; + mongo::stdx::unordered_map secondaryStats; std::string planSummary; }; } // namespace mongo::plan_cache_debug_info diff --git a/src/mongo/db/query/plan_cache_indexability.cpp b/src/mongo/db/query/plan_cache_indexability.cpp index 560e33e4df718..3ef0d821acc5d 100644 --- a/src/mongo/db/query/plan_cache_indexability.cpp +++ b/src/mongo/db/query/plan_cache_indexability.cpp @@ -27,24 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/query/plan_cache_indexability.h" - -#include - -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/index_path_projection.h" #include "mongo/db/exec/projection_executor_utils.h" -#include "mongo/db/index/wildcard_key_generator.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/index_names.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_algo.h" -#include "mongo/db/matcher/expression_internal_expr_comparison.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/query/collation/collation_index_key.h" #include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/index_entry.h" +#include "mongo/db/query/plan_cache_indexability.h" #include "mongo/db/query/planner_ixselect.h" -#include +#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/query/plan_cache_indexability.h b/src/mongo/db/query/plan_cache_indexability.h index 0aa08359c27f8..95893e3630a88 100644 --- a/src/mongo/db/query/plan_cache_indexability.h +++ b/src/mongo/db/query/plan_cache_indexability.h @@ -30,16 +30,22 @@ #pragma once #include +#include +#include #include +#include "mongo/base/error_extra_info.h" +#include "mongo/base/string_data.h" #include "mongo/util/string_map.h" namespace mongo { class BSONObj; class CollatorInterface; + class CompositeIndexabilityDiscriminator; class MatchExpression; struct CoreIndexInfo; + namespace projection_executor { class ProjectionExecutor; } diff --git a/src/mongo/db/query/plan_cache_indexability_test.cpp b/src/mongo/db/query/plan_cache_indexability_test.cpp index 49f9fb79285cf..a4ce62f7653fd 100644 --- a/src/mongo/db/query/plan_cache_indexability_test.cpp +++ b/src/mongo/db/query/plan_cache_indexability_test.cpp @@ -27,16 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/wildcard_key_generator.h" -#include "mongo/db/json.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/query/index_entry.h" #include "mongo/db/query/plan_cache_indexability.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/plan_cache_invalidator.cpp b/src/mongo/db/query/plan_cache_invalidator.cpp index 3ef1607be8559..eaf78290983d6 100644 --- a/src/mongo/db/query/plan_cache_invalidator.cpp +++ b/src/mongo/db/query/plan_cache_invalidator.cpp @@ -30,8 +30,19 @@ #include "mongo/db/query/plan_cache_invalidator.h" +#include +#include + +#include +#include + #include "mongo/db/query/sbe_plan_cache.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/query/plan_cache_invalidator.h b/src/mongo/db/query/plan_cache_invalidator.h index f36abf63cc547..0095cd10d8dc9 100644 --- a/src/mongo/db/query/plan_cache_invalidator.h +++ b/src/mongo/db/query/plan_cache_invalidator.h @@ -29,7 +29,14 @@ #pragma once +#include + +#include +#include + #include "mongo/db/catalog/collection.h" +#include "mongo/db/service_context.h" +#include "mongo/util/uuid.h" namespace mongo { /** diff --git a/src/mongo/db/query/plan_cache_key_factory.cpp b/src/mongo/db/query/plan_cache_key_factory.cpp index c5d486cdc1ce0..af6d8df2688fd 100644 --- a/src/mongo/db/query/plan_cache_key_factory.cpp +++ b/src/mongo/db/query/plan_cache_key_factory.cpp @@ -29,10 +29,25 @@ #include "mongo/db/query/plan_cache_key_factory.h" +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/canonical_query_encoder.h" #include "mongo/db/query/collection_query_info.h" +#include "mongo/db/query/plan_cache_key_info.h" #include "mongo/db/query/planner_ixselect.h" #include "mongo/db/s/operation_sharding_state.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/shard_version.h" namespace mongo { namespace plan_cache_detail { @@ -107,40 +122,7 @@ PlanCacheKeyInfo makePlanCacheKeyInfo(CanonicalQuery::QueryShapeString&& shapeSt } namespace { -/** - * Returns the highest index commit timestamp associated with an index on 'collection' that is - * visible to this operation. - */ -boost::optional computeNewestVisibleIndexTimestamp(OperationContext* opCtx, - const CollectionPtr& collection) { - auto recoveryUnit = opCtx->recoveryUnit(); - auto mySnapshot = recoveryUnit->getPointInTimeReadTimestamp(opCtx).get_value_or( - recoveryUnit->getCatalogConflictingTimestamp()); - if (mySnapshot.isNull()) { - return boost::none; - } - - Timestamp currentNewestVisible = Timestamp::min(); - - auto ii = collection->getIndexCatalog()->getIndexIterator( - opCtx, IndexCatalog::InclusionPolicy::kReady); - while (ii->more()) { - const IndexCatalogEntry* ice = ii->next(); - auto minVisibleSnapshot = ice->getMinimumVisibleSnapshot(); - if (!minVisibleSnapshot) { - continue; - } - - if (mySnapshot < *minVisibleSnapshot) { - continue; - } - - currentNewestVisible = std::max(currentNewestVisible, *minVisibleSnapshot); - } - - return currentNewestVisible.isNull() ? boost::optional{} : currentNewestVisible; -} - +// TODO: SERVER-77571 use acquisitions APIs for retrieving the shardVersion. sbe::PlanCacheKeyCollectionState computeCollectionState(OperationContext* opCtx, const CollectionPtr& collection, bool isSecondaryColl) { @@ -159,7 +141,6 @@ sbe::PlanCacheKeyCollectionState computeCollectionState(OperationContext* opCtx, } return {collection->uuid(), CollectionQueryInfo::get(collection).getPlanCacheInvalidatorVersion(), - plan_cache_detail::computeNewestVisibleIndexTimestamp(opCtx, collection), keyShardingEpoch}; } } // namespace @@ -194,7 +175,7 @@ sbe::PlanCacheKey make(const CanonicalQuery& query, const MultipleCollectionAcce opCtx, collection, true /* isSecondaryColl */)); } } - + secondaryCollectionStates.shrink_to_fit(); auto shapeString = canonical_query_encoder::encodeSBE(query); return {plan_cache_detail::makePlanCacheKeyInfo( std::move(shapeString), query.root(), collections.getMainCollection()), diff --git a/src/mongo/db/query/plan_cache_key_factory.h b/src/mongo/db/query/plan_cache_key_factory.h index 663297093c766..899738f6dfd36 100644 --- a/src/mongo/db/query/plan_cache_key_factory.h +++ b/src/mongo/db/query/plan_cache_key_factory.h @@ -29,9 +29,14 @@ #pragma once +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/collection_query_info.h" +#include "mongo/db/query/multiple_collection_accessor.h" +#include "mongo/db/query/plan_cache_indexability.h" #include "mongo/db/query/sbe_plan_cache.h" namespace mongo { diff --git a/src/mongo/db/query/plan_cache_key_info.h b/src/mongo/db/query/plan_cache_key_info.h index ff97ef6a57cb3..57680956ba1e4 100644 --- a/src/mongo/db/query/plan_cache_key_info.h +++ b/src/mongo/db/query/plan_cache_key_info.h @@ -29,6 +29,13 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query_encoder.h" namespace mongo { @@ -67,6 +74,10 @@ class PlanCacheKeyInfo { return _key; } + size_t keySizeInBytes() const { + return _key.size(); + } + /** * Return the 'indexability discriminators', that is, the plan cache key component after the * stable key, but before the boolean indicating whether we are using the classic engine. diff --git a/src/mongo/db/query/plan_cache_key_info_test.cpp b/src/mongo/db/query/plan_cache_key_info_test.cpp index 0052936f5d02c..104f1b7369d2a 100644 --- a/src/mongo/db/query/plan_cache_key_info_test.cpp +++ b/src/mongo/db/query/plan_cache_key_info_test.cpp @@ -27,14 +27,37 @@ * it in the license file. */ +#include +#include +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/index/wildcard_key_generator.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/canonical_query_test_util.h" +#include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/plan_cache_indexability.h" #include "mongo/db/query/plan_cache_key_factory.h" #include "mongo/db/query/plan_cache_key_info.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { @@ -99,38 +122,6 @@ std::pair> makeWildcardUpdate std::move(wcProj)}; } -/** - * Check that the stable keys of 'a' and 'b' are not equal because of the last character. - */ -void assertPlanCacheKeysUnequalDueToForceClassicEngineValue(const PlanCacheKeyInfo& a, - const PlanCacheKeyInfo& b) { - auto aUnstablePart = a.getIndexabilityDiscriminators(); - auto bUnstablePart = b.getIndexabilityDiscriminators(); - auto aStablePart = a.getQueryShape(); - auto bStablePart = b.getQueryShape(); - - ASSERT_EQ(aUnstablePart, bUnstablePart); - // The last 2 characters (plus separator) of the stable part encodes the engine that uses this - // PlanCacheKey and if apiStrict was used. So the stable parts except for the last two - // characters should be identical. - ASSERT_EQ(aStablePart.substr(0, aStablePart.size() - 2), - bStablePart.substr(0, bStablePart.size() - 2)); - - // Should have at least 2 byte to represent whether we must use the classic engine and stable - // API. - ASSERT_GTE(aStablePart.size(), 2); - - // The indexability discriminators should match. - ASSERT_EQ(a.getIndexabilityDiscriminators(), b.getIndexabilityDiscriminators()); - - // The stable parts should not match because of the second character from the back, encoding the - // engine type. - ASSERT_NE(aStablePart, bStablePart); - ASSERT_NE(aStablePart[aStablePart.size() - 2], bStablePart[bStablePart.size() - 2]); - // Ensure that the the apiStrict values are equal. - ASSERT_EQ(aStablePart.back(), bStablePart.back()); -} - /** * Check that the stable keys of 'a' and 'b' are equal, but the index discriminators are not. */ @@ -521,32 +512,6 @@ TEST_F(PlanCacheKeyInfoTest, } } -TEST_F(PlanCacheKeyInfoTest, DifferentQueryEngines) { - const auto keyPattern = BSON("a" << 1); - const std::vector indexCores = { - CoreIndexInfo(keyPattern, - IndexNames::nameToType(IndexNames::findPluginName(keyPattern)), - false, // sparse - IndexEntry::Identifier{""})}; // name - - // Helper to construct a plan cache key given the 'forceClassicEngine' flag. - auto constructPlanCacheKey = [&](bool forceClassicEngine) { - RAIIServerParameterControllerForTest controller{"internalQueryFrameworkControl", - forceClassicEngine ? "forceClassicEngine" - : "trySbeEngine"}; - const auto queryStr = "{a: 0}"; - unique_ptr cq(canonicalize(queryStr)); - return makeKey(*cq, indexCores); - }; - - const auto classicEngineKey = constructPlanCacheKey(true); - const auto noClassicEngineKey = constructPlanCacheKey(false); - - // Check that the two plan cache keys are not equal because the plans were created under - // different engines. - assertPlanCacheKeysUnequalDueToForceClassicEngineValue(classicEngineKey, noClassicEngineKey); -} - TEST_F(PlanCacheKeyInfoTest, ComputeKeyWildcardDiscriminatesCorrectlyWithPartialFilterAndExpression) { // Partial filter is an AND of multiple conditions. diff --git a/src/mongo/db/query/plan_cache_test.cpp b/src/mongo/db/query/plan_cache_test.cpp index 5edf0ea76acf3..4e7327b9e5f6e 100644 --- a/src/mongo/db/query/plan_cache_test.cpp +++ b/src/mongo/db/query/plan_cache_test.cpp @@ -34,33 +34,64 @@ #include "mongo/db/query/plan_cache.h" -#include +#include +#include +#include +#include #include -#include - +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_mock.h" +#include "mongo/db/exec/index_path_projection.h" #include "mongo/db/exec/plan_cache_util.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/wildcard_key_generator.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_noop.h" -#include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/query/canonical_query_encoder.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query_test_util.h" +#include "mongo/db/query/classic_plan_cache.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/db/query/plan_cache.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/plan_cache_indexability.h" #include "mongo/db/query/plan_cache_key_factory.h" -#include "mongo/db/query/plan_ranker.h" +#include "mongo/db/query/plan_cache_key_info.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_lib.h" +#include "mongo/db/query/query_request_helper.h" #include "mongo/db/query/query_solution.h" #include "mongo/db/query/query_test_service_context.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/db/query/sbe_plan_cache.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/scopeguard.h" -#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -1006,7 +1037,7 @@ class CachePlanSelectionTest : public mongo::unittest::Test { // Clean up any previous state from a call to runQueryFull or runQueryAsCommand. solns.clear(); - NamespaceString nss("test.collection"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.collection"); auto findCommand = std::make_unique(nss); findCommand->setFilter(query); findCommand->setSort(sort); @@ -1121,7 +1152,7 @@ class CachePlanSelectionTest : public mongo::unittest::Test { QueryTestServiceContext serviceContext; auto opCtx = serviceContext.makeOperationContext(); - NamespaceString nss("test.collection"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.collection"); auto findCommand = std::make_unique(nss); findCommand->setFilter(query); findCommand->setSort(sort); @@ -2116,4 +2147,113 @@ TEST_F(PlanCacheTest, PlanCacheMaxSizeParameterCanBeZero) { PlanSecurityLevel::kNotSensitive)); ASSERT_EQ(0U, planCache.size()); } + +/** + * Tests specifically for SBE plan cache. + */ +class SbePlanCacheTest : public unittest::Test { +protected: + void setUp() override { + _queryTestServiceContext = std::make_unique(); + _operationContext = _queryTestServiceContext->makeOperationContext(); + _collection = std::make_unique(_nss); + _collectionPtr = CollectionPtr(_collection.get()); + } + + void tearDown() override { + _collectionPtr.reset(); + _collection.reset(); + _operationContext.reset(); + _queryTestServiceContext.reset(); + } + + sbe::PlanCacheKey makeSbeKey(const CanonicalQuery& cq) { + ASSERT_TRUE(cq.isSbeCompatible()); + return plan_cache_key_factory::make(cq, _collectionPtr); + } + + /** + * Checks if plan cache size calculation returns expected result. + */ + void assertSbePlanCacheKeySize(const char* queryStr, + const char* sortStr, + const char* projectionStr, + const char* collationStr) { + // Create canonical query. + std::unique_ptr cq = makeCQ(queryStr, sortStr, projectionStr, collationStr); + cq->setSbeCompatible(true); + + auto sbeKey = makeSbeKey(*cq); + + // The static size of the key structure. + const size_t staticSize = sizeof(sbeKey); + + // The actual key representation is encoded as a string. + const size_t keyRepresentationSize = sbeKey.toString().size(); + + // The tests are setup for a single collection. + const size_t additionalCollectionSize = 0; + + ASSERT_TRUE(sbeKey.estimatedKeySizeBytes() == + staticSize + keyRepresentationSize + additionalCollectionSize); + } + +private: + static const NamespaceString _nss; + + std::unique_ptr makeCQ(const BSONObj& query, + const BSONObj& sort, + const BSONObj& projection, + const BSONObj& collation) { + auto findCommand = std::make_unique(_nss); + findCommand->setFilter(query); + findCommand->setSort(sort); + findCommand->setProjection(projection); + findCommand->setCollation(collation); + auto statusWithInputQuery = + CanonicalQuery::canonicalize(_operationContext.get(), std::move(findCommand)); + ASSERT_OK(statusWithInputQuery.getStatus()); + return std::move(statusWithInputQuery.getValue()); + } + + std::unique_ptr makeCQ(const char* queryStr, + const char* sortStr, + const char* projectionStr, + const char* collationStr) { + BSONObj query = fromjson(queryStr); + BSONObj sort = fromjson(sortStr); + BSONObj projection = fromjson(projectionStr); + BSONObj collation = fromjson(collationStr); + + auto findCommand = std::make_unique(_nss); + findCommand->setFilter(query); + findCommand->setSort(sort); + findCommand->setProjection(projection); + findCommand->setCollation(collation); + auto statusWithInputQuery = + CanonicalQuery::canonicalize(_operationContext.get(), std::move(findCommand)); + ASSERT_OK(statusWithInputQuery.getStatus()); + return std::move(statusWithInputQuery.getValue()); + } + + std::unique_ptr _queryTestServiceContext; + + ServiceContext::UniqueOperationContext _operationContext; + std::unique_ptr _collection; + CollectionPtr _collectionPtr; +}; + +const NamespaceString SbePlanCacheTest::_nss( + NamespaceString::createNamespaceString_forTest("test.collection")); + +TEST_F(SbePlanCacheTest, SBEPlanCacheBudgetTest) { + assertSbePlanCacheKeySize("{a: 2}", "{}", "{_id: 1, a: 1}", "{}"); + + assertSbePlanCacheKeySize( + "{b: 'foo'}", "{}", "{_id: 1, b: 1}", "{locale: 'mock_reverse_string'}"); + + assertSbePlanCacheKeySize( + "{a: 1, b: 1}", "{a: -1}", "{_id: 0, a: 1}", "{locale: 'mock_reverse_string'}"); +} + } // namespace diff --git a/src/mongo/db/query/plan_enumerator.cpp b/src/mongo/db/query/plan_enumerator.cpp index 4db9ca8b69385..58979cd50eeed 100644 --- a/src/mongo/db/query/plan_enumerator.cpp +++ b/src/mongo/db/query/plan_enumerator.cpp @@ -30,11 +30,36 @@ #include "mongo/db/query/plan_enumerator.h" +#include +// IWYU pragma: no_include "boost/container/detail/flat_tree.hpp" +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/index_names.h" #include "mongo/db/query/index_tag.h" #include "mongo/db/query/indexability.h" +#include "mongo/db/query/query_planner_common.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -60,8 +85,8 @@ std::string getPathPrefix(std::string path) { * is a predicate that is required to use an index. */ bool expressionRequiresIndex(const MatchExpression* node) { - return CanonicalQuery::countNodes(node, MatchExpression::GEO_NEAR) > 0 || - CanonicalQuery::countNodes(node, MatchExpression::TEXT) > 0; + return QueryPlannerCommon::countNodes(node, MatchExpression::GEO_NEAR) > 0 || + QueryPlannerCommon::countNodes(node, MatchExpression::TEXT) > 0; } size_t getPathLength(const MatchExpression* expr) { @@ -83,7 +108,7 @@ bool isPathOutsideElemMatch(const RelevantTag* rt, size_t component) { } const size_t elemMatchRootLength = getPathLength(rt->elemMatchExpr); - invariant(elemMatchRootLength > 0); + tassert(6811401, "Failed procondition in query plan enumerator", elemMatchRootLength > 0); return component < elemMatchRootLength; } @@ -92,7 +117,9 @@ using PossibleFirstAssignment = std::vector; void getPossibleFirstAssignments(const IndexEntry& thisIndex, const vector& predsOverLeadingField, std::vector* possibleFirstAssignments) { - invariant(thisIndex.multikey && !thisIndex.multikeyPaths.empty()); + tassert(6811402, + "Failed procondition in query plan enumerator", + thisIndex.multikey && !thisIndex.multikeyPaths.empty()); if (thisIndex.multikeyPaths[0].empty()) { // No prefix of the leading index field causes the index to be multikey. In other words, the @@ -107,7 +134,7 @@ void getPossibleFirstAssignments(const IndexEntry& thisIndex, // $elemMatch. std::map> predsByElemMatchExpr; for (auto* pred : predsOverLeadingField) { - invariant(pred->getTag()); + tassert(6811403, "Failed procondition in query plan enumerator", pred->getTag()); RelevantTag* rt = static_cast(pred->getTag()); if (rt->elemMatchExpr == nullptr) { @@ -128,15 +155,18 @@ void getPossibleFirstAssignments(const IndexEntry& thisIndex, // leaf expressions inside the $elemMatch can match distinct elements. We are therefore unable // to assign both to the index and intersect the bounds. for (const auto& elemMatchExprIt : predsByElemMatchExpr) { - invariant(!elemMatchExprIt.second.empty()); + tassert(6811404, + "Failed procondition in query plan enumerator", + !elemMatchExprIt.second.empty()); const auto* pred = elemMatchExprIt.second.front(); - invariant(pred->getTag()); + tassert(6811405, "Failed procondition in query plan enumerator", pred->getTag()); RelevantTag* rt = static_cast(pred->getTag()); - invariant(rt->elemMatchExpr != nullptr); + tassert( + 6811406, "Failed procondition in query plan enumerator", rt->elemMatchExpr != nullptr); const size_t elemMatchRootLength = getPathLength(elemMatchExprIt.first); - invariant(elemMatchRootLength > 0); + tassert(6811407, "Failed procondition in query plan enumerator", elemMatchRootLength > 0); // Since the multikey path components are 0-indexed, 'elemMatchRootLength' actually // corresponds to the path component immediately following the root of the $elemMatch. @@ -170,7 +200,7 @@ void getPossibleFirstAssignments(const IndexEntry& thisIndex, bool canAssignPredToIndex(const RelevantTag* rt, const MultikeyComponents& multikeyComponents, StringMap* used) { - invariant(used); + tassert(6811408, "Failed procondition in query plan enumerator", used); const FieldRef path(rt->path); // We start by checking with the shortest prefix of the queried path to avoid needing to undo @@ -186,7 +216,9 @@ bool canAssignPredToIndex(const RelevantTag* rt, // 'pathPrefix' is outside the innermost $elemMatch, so we record its $elemMatch // context to ensure that we don't assign another predicate to 'thisIndex' along // this path unless they are part of the same $elemMatch. - invariant(rt->elemMatchExpr != nullptr); + tassert(6811409, + "Failed procondition in query plan enumerator", + rt->elemMatchExpr != nullptr); (*used)[pathPrefix] = rt->elemMatchExpr; } else { // 'pathPrefix' is either inside the innermost $elemMatch or not inside an @@ -398,9 +430,9 @@ void PlanEnumerator::allocateAssignment(MatchExpression* expr, size_t newID = _memo.size() + 1; // Shouldn't be anything there already. - verify(_nodeToId.end() == _nodeToId.find(expr)); + MONGO_verify(_nodeToId.end() == _nodeToId.find(expr)); _nodeToId[expr] = newID; - verify(_memo.end() == _memo.find(newID)); + MONGO_verify(_memo.end() == _memo.find(newID)); NodeAssignment* newAssignment = new NodeAssignment(); _memo[newID] = newAssignment; *assign = newAssignment; @@ -561,7 +593,9 @@ bool PlanEnumerator::prepMemo(MatchExpression* node, PrepMemoContext context) { for (size_t i = 0; i < indexedPreds.size(); ++i) { MatchExpression* child = indexedPreds[i]; - invariant(Indexability::nodeCanUseIndexOnOwnField(child)); + tassert(6811410, + "Failed procondition in query plan enumerator", + Indexability::nodeCanUseIndexOnOwnField(child)); RelevantTag* rt = static_cast(child->getTag()); @@ -570,14 +604,20 @@ bool PlanEnumerator::prepMemo(MatchExpression* node, PrepMemoContext context) { // This should include only TEXT and GEO_NEAR preds. // We expect either 0 or 1 mandatory predicates. - invariant(nullptr == mandatoryPred); + tassert(6811411, + "Failed procondition in query plan enumerator", + nullptr == mandatoryPred); // Mandatory predicates are TEXT or GEO_NEAR. - invariant(MatchExpression::TEXT == child->matchType() || - MatchExpression::GEO_NEAR == child->matchType()); + tassert(6811412, + "Failed procondition in query plan enumerator", + MatchExpression::TEXT == child->matchType() || + MatchExpression::GEO_NEAR == child->matchType()); // The mandatory predicate must have a corresponding "mandatory index". - invariant(rt->first.size() != 0 || rt->notFirst.size() != 0); + tassert(6811413, + "Failed procondition in query plan enumerator", + rt->first.size() != 0 || rt->notFirst.size() != 0); mandatoryPred = child; @@ -621,7 +661,8 @@ bool PlanEnumerator::prepMemo(MatchExpression* node, PrepMemoContext context) { if (nullptr != mandatoryPred) { // We must have at least one index which can be used to answer 'mandatoryPred'. - invariant(!mandatoryIndices.empty()); + tassert( + 6811414, "Failed procondition in query plan enumerator", !mandatoryIndices.empty()); return enumerateMandatoryIndex( idxToFirst, idxToNotFirst, mandatoryPred, mandatoryIndices, andAssignment); } @@ -649,7 +690,9 @@ void PlanEnumerator::assignToNonMultikeyMandatoryIndex( // source text. However, the leading and trailing non-text fields of the index cannot be // multikey. As a result, we should use non-multikey predicate assignment rules for such // indexes. - invariant(!index.multikey || index.type == IndexType::INDEX_TEXT); + tassert(6811415, + "Failed procondition in query plan enumerator", + !index.multikey || index.type == IndexType::INDEX_TEXT); // Since the index is not multikey, all predicates over the leading field can be assigned. indexAssign->preds = predsOverLeadingField; @@ -684,8 +727,10 @@ bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst, // Only text, 2d, and 2dsphere index types should be able to satisfy // mandatory predicates. - invariant(INDEX_TEXT == thisIndex.type || INDEX_2D == thisIndex.type || - INDEX_2DSPHERE == thisIndex.type); + tassert(6811416, + "Failed procondition in query plan enumerator", + INDEX_TEXT == thisIndex.type || INDEX_2D == thisIndex.type || + INDEX_2DSPHERE == thisIndex.type); OneIndexAssignment indexAssign; indexAssign.index = *indexIt; @@ -708,7 +753,9 @@ bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst, } else if (thisIndex.multikey && !thisIndex.multikeyPaths.empty()) { // 2dsphere indexes are the only special index type that should ever have path-level // multikey information. - invariant(INDEX_2DSPHERE == thisIndex.type); + tassert(6811417, + "Failed procondition in query plan enumerator", + INDEX_2DSPHERE == thisIndex.type); if (predsOverLeadingField.end() != std::find( @@ -751,7 +798,9 @@ bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst, auto mandIt = std::find(predsOverNonLeadingFields.begin(), predsOverNonLeadingFields.end(), mandatoryPred); - invariant(mandIt != predsOverNonLeadingFields.end()); + tassert(6811418, + "Failed procondition in query plan enumerator", + mandIt != predsOverNonLeadingFields.end()); predsOverNonLeadingFields.erase(mandIt); @@ -774,7 +823,9 @@ bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst, } else { // The mandatory pred is notFirst. Assign an arbitrary predicate // over the first position. - invariant(!predsOverLeadingField.empty()); + tassert(6811419, + "Failed procondition in query plan enumerator", + !predsOverLeadingField.empty()); indexAssign.preds.push_back(predsOverLeadingField[0]); indexAssign.positions.push_back(0); @@ -823,8 +874,10 @@ bool PlanEnumerator::enumerateMandatoryIndex(const IndexToPredMap& idxToFirst, } // The mandatory predicate must be assigned. - invariant(indexAssign.preds.end() != - std::find(indexAssign.preds.begin(), indexAssign.preds.end(), mandatoryPred)); + tassert(6811420, + "Failed procondition in query plan enumerator", + indexAssign.preds.end() != + std::find(indexAssign.preds.begin(), indexAssign.preds.end(), mandatoryPred)); // Output the assignments for this index. AndEnumerableState state; @@ -863,12 +916,12 @@ void PlanEnumerator::assignPredicate( } void PlanEnumerator::markTraversedThroughElemMatchObj(PrepMemoContext* context) { - invariant(context); + tassert(6811421, "Failed procondition in query plan enumerator", context); for (auto&& pred : context->outsidePreds) { auto relevantTag = static_cast(pred.first->getTag()); // Only indexed predicates should ever be considered as outside predicates eligible for // pushdown. - invariant(relevantTag); + tassert(6811422, "Failed procondition in query plan enumerator", relevantTag); // Check whether the current $elemMatch through which we are traversing is the same as the // outside predicate's $elemMatch context. If so, then that outside predicate hasn't @@ -909,7 +962,7 @@ void PlanEnumerator::enumerateOneIndex( // assigned to the index, but we will ensure that any OneIndexAssignment contains some // predicates from the current node. for (const auto& pred : outsidePreds) { - invariant(pred.first->getTag()); + tassert(6811423, "Failed procondition in query plan enumerator", pred.first->getTag()); RelevantTag* relevantTag = static_cast(pred.first->getTag()); for (auto index : relevantTag->first) { if (idxToFirst.find(index) != idxToFirst.end() || @@ -1026,7 +1079,9 @@ void PlanEnumerator::enumerateOneIndex( } // Output the assignment. - invariant(!indexAssign.preds.empty()); + tassert(6811424, + "Failed procondition in query plan enumerator", + !indexAssign.preds.empty()); AndEnumerableState state; state.assignments.push_back(std::move(indexAssign)); andAssignment->choices.push_back(std::move(state)); @@ -1331,7 +1386,6 @@ void PlanEnumerator::getIndexedPreds(MatchExpression* node, } bool PlanEnumerator::prepSubNodes(MatchExpression* node, - PrepMemoContext context, vector* subnodesOut, vector* mandatorySubnodes) { @@ -1366,9 +1420,13 @@ bool PlanEnumerator::prepSubNodes(MatchExpression* node, childContext.elemMatchExpr = child; childContext.outsidePreds = context.outsidePreds; markTraversedThroughElemMatchObj(&childContext); - prepSubNodes(child, childContext, subnodesOut, mandatorySubnodes); + if (!prepSubNodes(child, childContext, subnodesOut, mandatorySubnodes)) { + return false; + } } else if (MatchExpression::AND == child->matchType()) { - prepSubNodes(child, context, subnodesOut, mandatorySubnodes); + if (!prepSubNodes(child, context, subnodesOut, mandatorySubnodes)) { + return false; + } } } return true; @@ -1393,11 +1451,13 @@ void PlanEnumerator::getMultikeyCompoundablePreds(const vector // initializing the top-level scope with the prefix of the full path. for (size_t i = 0; i < assigned.size(); i++) { const MatchExpression* assignedPred = assigned[i]; - invariant(nullptr != assignedPred->getTag()); + tassert(6811425, + "Failed procondition in query plan enumerator", + nullptr != assignedPred->getTag()); RelevantTag* usedRt = static_cast(assignedPred->getTag()); set usedPrefixes; usedPrefixes.insert(getPathPrefix(usedRt->path)); - used[nullptr] = usedPrefixes; + used[nullptr] = std::move(usedPrefixes); // If 'assigned' is a predicate inside an $elemMatch, we have to // add the prefix not only to the top-level context, but also to the @@ -1412,17 +1472,21 @@ void PlanEnumerator::getMultikeyCompoundablePreds(const vector // in the top-level context, but here must be different because 'usedRt' // is in an $elemMatch context. elemMatchUsed.insert(usedRt->pathPrefix); - used[usedRt->elemMatchExpr] = elemMatchUsed; + used[usedRt->elemMatchExpr] = std::move(elemMatchUsed); } } for (size_t i = 0; i < couldCompound.size(); ++i) { - invariant(Indexability::nodeCanUseIndexOnOwnField(couldCompound[i])); + tassert(6811426, + "Failed procondition in query plan enumerator", + Indexability::nodeCanUseIndexOnOwnField(couldCompound[i])); RelevantTag* rt = static_cast(couldCompound[i]->getTag()); if (used.end() == used.find(rt->elemMatchExpr)) { // This is a new $elemMatch that we haven't seen before. - invariant(used.end() != used.find(nullptr)); + tassert(6811427, + "Failed procondition in query plan enumerator", + used.end() != used.find(nullptr)); set& topLevelUsed = used.find(nullptr)->second; // If the top-level path prefix of the $elemMatch hasn't been @@ -1431,7 +1495,7 @@ void PlanEnumerator::getMultikeyCompoundablePreds(const vector topLevelUsed.insert(getPathPrefix(rt->path)); set usedPrefixes; usedPrefixes.insert(rt->pathPrefix); - used[rt->elemMatchExpr] = usedPrefixes; + used[rt->elemMatchExpr] = std::move(usedPrefixes); // Output the predicate. out->push_back(couldCompound[i]); @@ -1457,11 +1521,14 @@ void PlanEnumerator::assignMultikeySafePredicates( const std::vector& couldAssign, const stdx::unordered_map& outsidePreds, OneIndexAssignment* indexAssignment) { - invariant(indexAssignment); - invariant(indexAssignment->preds.size() == indexAssignment->positions.size()); + tassert(6811428, "Failed procondition in query plan enumerator", indexAssignment); + tassert(6811429, + "Failed procondition in query plan enumerator", + indexAssignment->preds.size() == indexAssignment->positions.size()); const IndexEntry& thisIndex = (*_indices)[indexAssignment->index]; - invariant(!thisIndex.multikeyPaths.empty()); + tassert( + 6811430, "Failed procondition in query plan enumerator", !thisIndex.multikeyPaths.empty()); // 'used' is a map from each prefix of a queried path that causes 'thisIndex' to be multikey to // the 'elemMatchExpr' of the associated leaf expression's RelevantTag. We use it to ensure that @@ -1474,7 +1541,7 @@ void PlanEnumerator::assignMultikeySafePredicates( const auto* assignedPred = indexAssignment->preds[i]; const auto posInIdx = indexAssignment->positions[i]; - invariant(assignedPred->getTag()); + tassert(6811431, "Failed procondition in query plan enumerator", assignedPred->getTag()); RelevantTag* rt = static_cast(assignedPred->getTag()); // 'assignedPred' has already been assigned to 'thisIndex', so canAssignPredToIndex() ought @@ -1487,13 +1554,16 @@ void PlanEnumerator::assignMultikeySafePredicates( // be multikey can be shared with the leading index field. The predicates cannot // possibly be joined by an $elemMatch because $near predicates must be specified at the // top-level of the query. - invariant(assignedPred->matchType() == MatchExpression::GEO_NEAR); + tassert(6811432, + "Failed procondition in query plan enumerator", + assignedPred->matchType() == MatchExpression::GEO_NEAR); } } // Update 'used' with all outside predicates already assigned to 'thisIndex'; for (const auto& orPushdown : indexAssignment->orPushdowns) { - invariant(orPushdown.first->getTag()); + tassert( + 6811433, "Failed procondition in query plan enumerator", orPushdown.first->getTag()); RelevantTag* rt = static_cast(orPushdown.first->getTag()); // Any outside predicates already assigned to 'thisIndex' were assigned in the first @@ -1501,7 +1571,7 @@ void PlanEnumerator::assignMultikeySafePredicates( const size_t position = 0; const bool shouldHaveAssigned = canAssignPredToIndex(rt, thisIndex.multikeyPaths[position], &used); - invariant(shouldHaveAssigned); + tassert(6811434, "Failed procondition in query plan enumerator", shouldHaveAssigned); } size_t posInIdx = 0; @@ -1510,7 +1580,9 @@ void PlanEnumerator::assignMultikeySafePredicates( // Attempt to assign the predicates to 'thisIndex' according to their position in the index // key pattern. for (auto* couldAssignPred : couldAssign) { - invariant(Indexability::nodeCanUseIndexOnOwnField(couldAssignPred)); + tassert(6811435, + "Failed procondition in query plan enumerator", + Indexability::nodeCanUseIndexOnOwnField(couldAssignPred)); RelevantTag* rt = static_cast(couldAssignPred->getTag()); if (keyElem.fieldNameStringData() != rt->path) { @@ -1581,7 +1653,7 @@ bool PlanEnumerator::alreadyCompounded(const set& ixisectAssig } size_t PlanEnumerator::getPosition(const IndexEntry& indexEntry, MatchExpression* predicate) { - invariant(predicate->getTag()); + tassert(6811436, "Failed procondition in query plan enumerator", predicate->getTag()); RelevantTag* relevantTag = static_cast(predicate->getTag()); size_t position = 0; for (auto&& element : indexEntry.keyPattern) { @@ -1638,7 +1710,7 @@ void PlanEnumerator::compound(const vector& tryCompound, void PlanEnumerator::tagMemo(size_t id) { LOGV2_DEBUG(20944, 5, "Tagging memoID", "id"_attr = id); NodeAssignment* assign = _memo[id]; - verify(nullptr != assign); + MONGO_verify(nullptr != assign); if (nullptr != assign->orAssignment) { OrAssignment* oa = assign->orAssignment.get(); @@ -1655,7 +1727,7 @@ void PlanEnumerator::tagMemo(size_t id) { tagMemo(aa->subnodes[aa->counter]); } else if (nullptr != assign->andAssignment) { AndAssignment* aa = assign->andAssignment.get(); - verify(aa->counter < aa->choices.size()); + MONGO_verify(aa->counter < aa->choices.size()); const AndEnumerableState& aes = aa->choices[aa->counter]; @@ -1689,7 +1761,7 @@ void PlanEnumerator::tagMemo(size_t id) { } } } else { - verify(0); + MONGO_verify(0); } } @@ -1811,7 +1883,7 @@ bool PlanEnumerator::_nextMemoForLockstepOrAssignment( bool PlanEnumerator::nextMemo(size_t id) { NodeAssignment* assign = _memo[id]; - verify(nullptr != assign); + MONGO_verify(nullptr != assign); if (nullptr != assign->orAssignment) { OrAssignment* oa = assign->orAssignment.get(); diff --git a/src/mongo/db/query/plan_enumerator.h b/src/mongo/db/query/plan_enumerator.h index 9eabd5b09b964..53e35d24ab0fe 100644 --- a/src/mongo/db/query/plan_enumerator.h +++ b/src/mongo/db/query/plan_enumerator.h @@ -29,15 +29,27 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include "mongo/base/status.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/index_entry.h" #include "mongo/db/query/index_tag.h" #include "mongo/db/query/plan_enumerator_explain_info.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/platform/atomic_word.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/fail_point.h" namespace mongo { @@ -338,7 +350,8 @@ class PlanEnumerator { * Recursively traverse 'node', with OR nodes as the base case. The OR nodes are not * explored--instead we call prepMemo() on the OR subnode, and add its assignment to the output. * Subnodes are "mandatory" if they *must* use an index (TEXT and GEO). - * Returns a boolean indicating whether all mandatory subnodes can be indexed. + * Returns a boolean indicating whether all mandatory subnodes can be indexed and returns false + * to stop enumerating alternatives for an indexed OR. */ bool prepSubNodes(MatchExpression* node, PrepMemoContext context, diff --git a/src/mongo/db/query/plan_executor.cpp b/src/mongo/db/query/plan_executor.cpp index 99b2fd8fefa9a..c6c095837507d 100644 --- a/src/mongo/db/query/plan_executor.cpp +++ b/src/mongo/db/query/plan_executor.cpp @@ -27,10 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/query/plan_executor.h" +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/shard_role.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" namespace mongo { @@ -59,4 +68,30 @@ void PlanExecutor::checkFailPointPlanExecAlwaysFails() { } } +const CollectionPtr& VariantCollectionPtrOrAcquisition::getCollectionPtr() const { + return *stdx::visit(OverloadedVisitor{ + [](const CollectionPtr* collectionPtr) { return collectionPtr; }, + [](const CollectionAcquisition& collectionAcquisition) { + return &collectionAcquisition.getCollectionPtr(); + }, + }, + _collectionPtrOrAcquisition); +} + +boost::optional VariantCollectionPtrOrAcquisition::getShardingFilter( + OperationContext* opCtx) const { + return stdx::visit( + OverloadedVisitor{ + [&](const CollectionPtr* collPtr) -> boost::optional { + auto scopedCss = CollectionShardingState::assertCollectionLockedAndAcquire( + opCtx, collPtr->get()->ns()); + return scopedCss->getOwnershipFilter( + opCtx, CollectionShardingState::OrphanCleanupPolicy::kDisallowOrphanCleanup); + }, + [](const CollectionAcquisition& acq) -> boost::optional { + return acq.getShardingFilter(); + }}, + _collectionPtrOrAcquisition); +} + } // namespace mongo diff --git a/src/mongo/db/query/plan_executor.h b/src/mongo/db/query/plan_executor.h index 77d6a8c4935f1..7c9b881918779 100644 --- a/src/mongo/db/query/plan_executor.h +++ b/src/mongo/db/query/plan_executor.h @@ -29,8 +29,22 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/plan_stats.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/update_result.h" #include "mongo/db/query/canonical_query.h" @@ -38,12 +52,49 @@ #include "mongo/db/query/plan_summary_stats.h" #include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/restore_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/shard_role.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" namespace mongo { -class BSONObj; -class PlanStage; -class RecordId; +// TODO: SERVER-76397 Remove this once we use CollectionAcquisition everywhere. +class VariantCollectionPtrOrAcquisition { +public: + VariantCollectionPtrOrAcquisition(const CollectionPtr* collectionPtr) + : _collectionPtrOrAcquisition(collectionPtr) {} + VariantCollectionPtrOrAcquisition(CollectionAcquisition collection) + : _collectionPtrOrAcquisition(collection) {} + + const stdx::variant& get() { + return _collectionPtrOrAcquisition; + }; + + const CollectionPtr& getCollectionPtr() const; + + bool isCollectionPtr() const { + return stdx::holds_alternative(_collectionPtrOrAcquisition); + } + + bool isAcquisition() const { + return stdx::holds_alternative(_collectionPtrOrAcquisition); + } + + const CollectionAcquisition& getAcquisition() const { + return stdx::get(_collectionPtrOrAcquisition); + } + + boost::optional getShardingFilter(OperationContext* opCtx) const; + +private: + stdx::variant _collectionPtrOrAcquisition; +}; /** * If a getMore command specified a lastKnownCommittedOpTime (as secondaries do), we want to stop @@ -284,7 +335,7 @@ class PlanExecutor { * If this plan executor has already executed an update operation, returns the an 'UpdateResult' * describing the outcome of the update. Illegal to call if either 1) the PlanExecutor is not * an update PlanExecutor, or 2) the PlanExecutor has not yet been executed either with - * 'executeUpdate()' or by calling 'getNext()' until end-of-stream. + * 'executeUpdate()' or by calling 'getNext()' until ADVANCED or end-of-stream. */ virtual UpdateResult getUpdateResult() const = 0; @@ -295,6 +346,14 @@ class PlanExecutor { */ virtual long long executeDelete() = 0; + /** + * If this plan executor has already executed a delete operation, returns the the number of + * documents that were deleted. Illegal to call if either 1) the PlanExecutor is not a delete + * PlanExecutor, or 2) the PlanExecutor has not yet been executed either with 'executeDelete()' + * or by calling 'getNext()' until ADVANCED or end-of-stream. + */ + virtual long long getDeleteResult() const = 0; + /** * If this plan executor has already executed a batched delete operation, returns the * 'BatchedDeleteStats' describing the outcome of the batched delete. Illegal to call if either diff --git a/src/mongo/db/query/plan_executor_factory.cpp b/src/mongo/db/query/plan_executor_factory.cpp index 6d1ec8826728e..5f36d368a3d5a 100644 --- a/src/mongo/db/query/plan_executor_factory.cpp +++ b/src/mongo/db/query/plan_executor_factory.cpp @@ -27,20 +27,30 @@ * it in the license file. */ -#include "mongo/util/duration.h" -#include +#include +#include +#include -#include "mongo/platform/basic.h" - -#include "mongo/db/query/plan_executor_factory.h" +#include +#include "mongo/base/status.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/trial_run_tracker.h" #include "mongo/db/pipeline/plan_executor_pipeline.h" +#include "mongo/db/query/plan_executor_factory.h" #include "mongo/db/query/plan_executor_impl.h" #include "mongo/db/query/plan_executor_sbe.h" +#include "mongo/db/query/plan_ranker.h" #include "mongo/db/query/query_planner_params.h" +#include "mongo/db/query/sbe_plan_ranker.h" #include "mongo/db/query/util/make_data_structure.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -51,7 +61,7 @@ StatusWith> make( std::unique_ptr cq, std::unique_ptr ws, std::unique_ptr rt, - const CollectionPtr* collection, + VariantCollectionPtrOrAcquisition collection, PlanYieldPolicy::YieldPolicy yieldPolicy, size_t plannerOptions, NamespaceString nss, @@ -74,7 +84,7 @@ StatusWith> make( const boost::intrusive_ptr& expCtx, std::unique_ptr ws, std::unique_ptr rt, - const CollectionPtr* collection, + VariantCollectionPtrOrAcquisition collection, PlanYieldPolicy::YieldPolicy yieldPolicy, size_t plannerOptions, NamespaceString nss, @@ -99,11 +109,14 @@ StatusWith> make( std::unique_ptr qs, std::unique_ptr cq, const boost::intrusive_ptr& expCtx, - const CollectionPtr* collection, + VariantCollectionPtrOrAcquisition collection, size_t plannerOptions, NamespaceString nss, PlanYieldPolicy::YieldPolicy yieldPolicy) { - dassert(collection); + stdx::visit(OverloadedVisitor{[](const CollectionPtr* ptr) { dassert(ptr); }, + [](const CollectionAcquisition& acq) { + }}, + collection.get()); try { auto execImpl = new PlanExecutorImpl(opCtx, @@ -112,7 +125,7 @@ StatusWith> make( std::move(qs), std::move(cq), expCtx, - *collection, + collection, plannerOptions & QueryPlannerParams::RETURN_OWNED_DATA, std::move(nss), yieldPolicy); diff --git a/src/mongo/db/query/plan_executor_factory.h b/src/mongo/db/query/plan_executor_factory.h index c87ec0cd7b7bf..e31cd15d7f3dc 100644 --- a/src/mongo/db/query/plan_executor_factory.h +++ b/src/mongo/db/query/plan_executor_factory.h @@ -29,21 +29,33 @@ #pragma once -#include "mongo/util/duration.h" +#include +#include +#include #include +#include +#include "mongo/base/status_with.h" #include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/exec/working_set.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/plan_executor_pipeline.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/cqf_get_executor.h" +#include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/optimizer/explain_interface.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/plan_yield_policy_sbe.h" #include "mongo/db/query/query_solution.h" #include "mongo/db/query/sbe_plan_ranker.h" #include "mongo/db/query/sbe_runtime_planner.h" #include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/shard_role.h" +#include "mongo/util/duration.h" namespace mongo::plan_executor_factory { @@ -71,7 +83,7 @@ StatusWith> make( std::unique_ptr cq, std::unique_ptr ws, std::unique_ptr rt, - const CollectionPtr* collection, + VariantCollectionPtrOrAcquisition collection, PlanYieldPolicy::YieldPolicy yieldPolicy, size_t plannerOptions, NamespaceString nss = NamespaceString(), @@ -88,7 +100,7 @@ StatusWith> make( const boost::intrusive_ptr& expCtx, std::unique_ptr ws, std::unique_ptr rt, - const CollectionPtr* collection, + VariantCollectionPtrOrAcquisition collection, PlanYieldPolicy::YieldPolicy yieldPolicy, size_t plannerOptions, NamespaceString nss = NamespaceString(), @@ -101,7 +113,7 @@ StatusWith> make( std::unique_ptr qs, std::unique_ptr cq, const boost::intrusive_ptr& expCtx, - const CollectionPtr* collection, + VariantCollectionPtrOrAcquisition collection, size_t plannerOptions, NamespaceString nss, PlanYieldPolicy::YieldPolicy yieldPolicy); diff --git a/src/mongo/db/query/plan_executor_impl.cpp b/src/mongo/db/query/plan_executor_impl.cpp index 2c25777d8f275..37d3df4239341 100644 --- a/src/mongo/db/query/plan_executor_impl.cpp +++ b/src/mongo/db/query/plan_executor_impl.cpp @@ -28,45 +28,55 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/plan_executor_impl.h" - -#include "mongo/util/duration.h" +#include +#include +#include +#include +#include #include +#include +#include +#include -#include "mongo/bson/simple_bsonobj_comparator.h" +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/curop.h" #include "mongo/db/exec/cached_plan.h" #include "mongo/db/exec/collection_scan.h" -#include "mongo/db/exec/count_scan.h" -#include "mongo/db/exec/distinct_scan.h" -#include "mongo/db/exec/idhack.h" -#include "mongo/db/exec/index_scan.h" -#include "mongo/db/exec/near.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stats.h" -#include "mongo/db/exec/sort.h" #include "mongo/db/exec/subplan.h" +#include "mongo/db/exec/timeseries_modify.h" #include "mongo/db/exec/trial_stage.h" #include "mongo/db/exec/update_stage.h" #include "mongo/db/exec/working_set.h" -#include "mongo/db/exec/working_set_common.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/mock_yield_policies.h" +#include "mongo/db/query/plan_executor_impl.h" #include "mongo/db/query/plan_explainer_factory.h" #include "mongo/db/query/plan_explainer_impl.h" #include "mongo/db/query/plan_insert_listener.h" #include "mongo/db/query/plan_yield_policy_impl.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/query/yield_policy_callbacks_impl.h" -#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" -#include "mongo/util/scopeguard.h" -#include "mongo/util/stacktrace.h" +#include "mongo/util/future.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -90,9 +100,10 @@ namespace { /** * Constructs a PlanYieldPolicy based on 'policy'. */ -std::unique_ptr makeYieldPolicy(PlanExecutorImpl* exec, - PlanYieldPolicy::YieldPolicy policy, - const Yieldable* yieldable) { +std::unique_ptr makeYieldPolicy( + PlanExecutorImpl* exec, + PlanYieldPolicy::YieldPolicy policy, + stdx::variant yieldable) { switch (policy) { case PlanYieldPolicy::YieldPolicy::YIELD_AUTO: case PlanYieldPolicy::YieldPolicy::YIELD_MANUAL: @@ -104,11 +115,11 @@ std::unique_ptr makeYieldPolicy(PlanExecutorImpl* exec, } case PlanYieldPolicy::YieldPolicy::ALWAYS_TIME_OUT: { return std::make_unique( - exec->getOpCtx()->getServiceContext()->getFastClockSource()); + exec->getOpCtx(), exec->getOpCtx()->getServiceContext()->getFastClockSource()); } case PlanYieldPolicy::YieldPolicy::ALWAYS_MARK_KILLED: { return std::make_unique( - exec->getOpCtx()->getServiceContext()->getFastClockSource()); + exec->getOpCtx(), exec->getOpCtx()->getServiceContext()->getFastClockSource()); } default: MONGO_UNREACHABLE; @@ -122,7 +133,7 @@ PlanExecutorImpl::PlanExecutorImpl(OperationContext* opCtx, unique_ptr qs, unique_ptr cq, const boost::intrusive_ptr& expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, bool returnOwnedBson, NamespaceString nss, PlanYieldPolicy::YieldPolicy yieldPolicy) @@ -138,23 +149,40 @@ PlanExecutorImpl::PlanExecutorImpl(OperationContext* opCtx, invariant(!_expCtx || _expCtx->opCtx == _opCtx); invariant(!_cq || !_expCtx || _cq->getExpCtx() == _expCtx); + const CollectionPtr* collectionPtr = &collection.getCollectionPtr(); + invariant(collectionPtr); + const bool collectionExists = static_cast(*collectionPtr); + // If we don't yet have a namespace string, then initialize it from either 'collection' or // '_cq'. if (_nss.isEmpty()) { - if (collection) { - _nss = collection->ns(); + if (collectionExists) { + _nss = (*collectionPtr)->ns(); } else { invariant(_cq); - _nss = - _cq->getFindCommandRequest().getNamespaceOrUUID().nss().value_or(NamespaceString()); + if (_cq->getFindCommandRequest().getNamespaceOrUUID().isNamespaceString()) { + _nss = _cq->getFindCommandRequest().getNamespaceOrUUID().nss(); + } } } // There's no point in yielding if the collection doesn't exist. - _yieldPolicy = - makeYieldPolicy(this, - collection ? yieldPolicy : PlanYieldPolicy::YieldPolicy::NO_YIELD, - collection ? &collection : nullptr); + const stdx::variant yieldable = + stdx::visit( + OverloadedVisitor{[](const CollectionPtr* coll) { + return stdx::variant( + *coll ? coll : nullptr); + }, + [](const CollectionAcquisition& coll) { + return stdx::variant( + PlanYieldPolicy::YieldThroughAcquisitions{}); + }}, + collection.get()); + + _yieldPolicy = makeYieldPolicy( + this, collectionExists ? yieldPolicy : PlanYieldPolicy::YieldPolicy::NO_YIELD, yieldable); uassertStatusOK(_pickBestPlan()); @@ -252,7 +280,10 @@ void PlanExecutorImpl::saveState() { if (!isMarkedAsKilled()) { _root->saveState(); } - _yieldPolicy->setYieldable(nullptr); + + if (!_yieldPolicy->usesCollectionAcquisitions()) { + _yieldPolicy->setYieldable(nullptr); + } _currentState = kSaved; } @@ -272,7 +303,9 @@ void PlanExecutorImpl::restoreStateWithoutRetrying(const RestoreContext& context const Yieldable* yieldable) { invariant(_currentState == kSaved); - _yieldPolicy->setYieldable(yieldable); + if (!_yieldPolicy->usesCollectionAcquisitions()) { + _yieldPolicy->setYieldable(yieldable); + } if (!isMarkedAsKilled()) { _root->restoreState(context); } @@ -353,11 +386,10 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted* ob // Capped insert data; declared outside the loop so we hold a shared pointer to the capped // insert notifier the entire time we are in the loop. Holding a shared pointer to the // capped insert notifier is necessary for the notifierVersion to advance. - insert_listener::CappedInsertNotifierData cappedInsertNotifierData; + std::unique_ptr notifier; if (insert_listener::shouldListenForInserts(_opCtx, _cq.get())) { - // We always construct the CappedInsertNotifier for awaitData cursors. - cappedInsertNotifierData.notifier = - insert_listener::getCappedInsertNotifier(_opCtx, _nss, _yieldPolicy.get()); + // We always construct the insert_listener::Notifier for awaitData cursors. + notifier = insert_listener::getCappedInsertNotifier(_opCtx, _nss, _yieldPolicy.get()); } for (;;) { // These are the conditions which can cause us to yield: @@ -456,7 +488,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted* ob _opCtx, tempUnavailErrorsInARow, "plan executor", - _nss.ns(), + NamespaceStringOrUUID(_nss), TemporarilyUnavailableException( Status(ErrorCodes::TemporarilyUnavailable, "temporarily unavailable"))); } else { @@ -469,7 +501,8 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted* ob CurOp::get(_opCtx)->debug().additiveMetrics.incrementWriteConflicts(1); writeConflictsInARow++; - logWriteConflictAndBackoff(writeConflictsInARow, "plan execution", _nss.ns()); + logWriteConflictAndBackoff( + writeConflictsInARow, "plan execution", ""_sd, NamespaceStringOrUUID(_nss)); } // Yield next time through the loop. @@ -481,11 +514,8 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted* ob invariant(PlanStage::IS_EOF == code); if (MONGO_unlikely(planExecutorHangBeforeShouldWaitForInserts.shouldFail( [this](const BSONObj& data) { - if (data.hasField("namespace") && - _nss != NamespaceString(data.getStringField("namespace"))) { - return false; - } - return true; + auto fpNss = NamespaceStringUtil::parseFailPointData(data, "namespace"_sd); + return fpNss.isEmpty() || fpNss == _nss; }))) { LOGV2(20946, "PlanExecutor - planExecutorHangBeforeShouldWaitForInserts fail point " @@ -497,7 +527,7 @@ PlanExecutor::ExecState PlanExecutorImpl::_getNextImpl(Snapshotted* ob return PlanExecutor::IS_EOF; } - insert_listener::waitForInserts(_opCtx, _yieldPolicy.get(), &cappedInsertNotifierData); + insert_listener::waitForInserts(_opCtx, _yieldPolicy.get(), notifier); // There may be more results, keep going. continue; @@ -571,30 +601,54 @@ UpdateResult PlanExecutorImpl::getUpdateResult() const { } // If the collection exists, then we expect the root of the plan tree to either be an update - // stage, or (for findAndModify) a projection stage wrapping an update stage. - switch (_root->stageType()) { - case StageType::STAGE_PROJECTION_DEFAULT: - case StageType::STAGE_PROJECTION_COVERED: - case StageType::STAGE_PROJECTION_SIMPLE: { - invariant(_root->getChildren().size() == 1U); - invariant(StageType::STAGE_UPDATE == _root->child()->stageType()); - const SpecificStats* stats = _root->child()->getSpecificStats(); + // stage, or (for findAndModify) a projection stage wrapping an update / TS_MODIFY stage. + const auto updateStage = [&] { + switch (_root->stageType()) { + case StageType::STAGE_PROJECTION_DEFAULT: + case StageType::STAGE_PROJECTION_COVERED: + case StageType::STAGE_PROJECTION_SIMPLE: { + tassert(7314604, + "Unexpected number of children: {}"_format(_root->getChildren().size()), + _root->getChildren().size() == 1U); + auto childStage = _root->child().get(); + tassert(7314605, + "Unexpected child stage type: {}"_format(childStage->stageType()), + StageType::STAGE_UPDATE == childStage->stageType() || + StageType::STAGE_TIMESERIES_MODIFY == childStage->stageType()); + return childStage; + } + default: + return _root.get(); + } + }(); + switch (updateStage->stageType()) { + case StageType::STAGE_TIMESERIES_MODIFY: { + const auto& stats = + static_cast(*updateStage->getSpecificStats()); + return UpdateResult( + stats.nMeasurementsModified > 0 /* Did we update at least one obj? */, + stats.isModUpdate /* Is this a $mod update? */, + stats.nMeasurementsModified /* number of modified docs, no no-ops */, + stats.nMeasurementsMatched /* # of docs matched/updated, even no-ops */, + stats.objInserted /* objInserted */, + static_cast(updateStage)->containsDotsAndDollarsField()); + } + case StageType::STAGE_UPDATE: { + const auto& stats = static_cast(*updateStage->getSpecificStats()); return updateStatsToResult( - static_cast(*stats), - static_cast(_root->child().get())->containsDotsAndDollarsField()); + stats, static_cast(updateStage)->containsDotsAndDollarsField()); } default: - invariant(StageType::STAGE_UPDATE == _root->stageType()); - const auto stats = _root->getSpecificStats(); - return updateStatsToResult( - static_cast(*stats), - static_cast(_root.get())->containsDotsAndDollarsField()); + MONGO_UNREACHABLE_TASSERT(7314606); } } long long PlanExecutorImpl::executeDelete() { _executePlan(); + return getDeleteResult(); +} +long long PlanExecutorImpl::getDeleteResult() const { // If we're deleting from a non-existent collection, then the delete plan may have an EOF as // the root stage. if (_root->stageType() == STAGE_EOF) { @@ -602,27 +656,40 @@ long long PlanExecutorImpl::executeDelete() { } // If the collection exists, the delete plan may either have a delete stage at the root, or - // (for findAndModify) a projection stage wrapping a delete stage. - switch (_root->stageType()) { - case StageType::STAGE_PROJECTION_DEFAULT: - case StageType::STAGE_PROJECTION_COVERED: - case StageType::STAGE_PROJECTION_SIMPLE: { - invariant(_root->getChildren().size() == 1U); - invariant(StageType::STAGE_DELETE == _root->child()->stageType()); - const SpecificStats* stats = _root->child()->getSpecificStats(); - return static_cast(stats)->docsDeleted; + // (for findAndModify) a projection stage wrapping a delete / TS_MODIFY stage. + const auto deleteStage = [&] { + switch (_root->stageType()) { + case StageType::STAGE_PROJECTION_DEFAULT: + case StageType::STAGE_PROJECTION_COVERED: + case StageType::STAGE_PROJECTION_SIMPLE: { + tassert(7308302, + "Unexpected number of children: {}"_format(_root->getChildren().size()), + _root->getChildren().size() == 1U); + auto childStage = _root->child().get(); + tassert(7308303, + "Unexpected child stage type: {}"_format(childStage->stageType()), + StageType::STAGE_DELETE == childStage->stageType() || + StageType::STAGE_TIMESERIES_MODIFY == childStage->stageType()); + return childStage; + } + default: + return _root.get(); } + }(); + switch (deleteStage->stageType()) { case StageType::STAGE_TIMESERIES_MODIFY: { - const auto* tsModifyStats = - static_cast(_root->getSpecificStats()); - return tsModifyStats->nMeasurementsDeleted; + const auto& tsModifyStats = + static_cast(*deleteStage->getSpecificStats()); + return tsModifyStats.nMeasurementsModified; } - default: { - invariant(StageType::STAGE_DELETE == _root->stageType() || - StageType::STAGE_BATCHED_DELETE == _root->stageType()); - const auto* deleteStats = static_cast(_root->getSpecificStats()); - return deleteStats->docsDeleted; + case StageType::STAGE_DELETE: + case StageType::STAGE_BATCHED_DELETE: { + const auto& deleteStats = + static_cast(*deleteStage->getSpecificStats()); + return deleteStats.docsDeleted; } + default: + MONGO_UNREACHABLE_TASSERT(7308306); } } diff --git a/src/mongo/db/query/plan_executor_impl.h b/src/mongo/db/query/plan_executor_impl.h index 81fc7f60b5c66..0486f0029e391 100644 --- a/src/mongo/db/query/plan_executor_impl.h +++ b/src/mongo/db/query/plan_executor_impl.h @@ -29,16 +29,40 @@ #pragma once -#include "mongo/util/duration.h" #include +#include +#include +#include +#include #include +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/multi_plan.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/working_set.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/update_result.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/restore_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/yieldable.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" namespace mongo { @@ -52,8 +76,7 @@ namespace mongo { * WriteConflictException. */ template -auto handlePlanStageYield( - ExpressionContext* expCtx, StringData opStr, StringData ns, F&& f, H&& yieldHandler) { +auto handlePlanStageYield(ExpressionContext* expCtx, StringData opStr, F&& f, H&& yieldHandler) { auto opCtx = expCtx->opCtx; invariant(opCtx); invariant(opCtx->lockState()); @@ -67,7 +90,7 @@ auto handlePlanStageYield( return PlanStage::NEED_YIELD; } catch (const TemporarilyUnavailableException& e) { if (opCtx->inMultiDocumentTransaction()) { - handleTemporarilyUnavailableExceptionInTransaction(opCtx, opStr, ns, e); + handleTemporarilyUnavailableExceptionInTransaction(opCtx, opStr, e); } expCtx->setTemporarilyUnavailableException(true); yieldHandler(); @@ -95,7 +118,7 @@ class PlanExecutorImpl : public PlanExecutor { std::unique_ptr qs, std::unique_ptr cq, const boost::intrusive_ptr& expCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, bool returnOwnedBson, NamespaceString nss, PlanYieldPolicy::YieldPolicy yieldPolicy); @@ -116,6 +139,7 @@ class PlanExecutorImpl : public PlanExecutor { UpdateResult executeUpdate() override; UpdateResult getUpdateResult() const override; long long executeDelete() override; + long long getDeleteResult() const override; BatchedDeleteStats getBatchedDeleteStats() override; void markAsKilled(Status killStatus) final; void dispose(OperationContext* opCtx) final; diff --git a/src/mongo/db/query/plan_executor_sbe.cpp b/src/mongo/db/query/plan_executor_sbe.cpp index 0c436764c7801..4514f1f7bb0a6 100644 --- a/src/mongo/db/query/plan_executor_sbe.cpp +++ b/src/mongo/db/query/plan_executor_sbe.cpp @@ -27,19 +27,51 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/plan_executor_sbe.h" - +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/db_raii.h" -#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/plan_executor_sbe.h" #include "mongo/db/query/plan_explainer_factory.h" #include "mongo/db/query/plan_insert_listener.h" +#include "mongo/db/query/plan_ranker.h" +#include "mongo/db/query/sbe_plan_ranker.h" #include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/server_options.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/decimal128.h" +#include "mongo/s/resharding/resharding_feature_flag_gen.h" #include "mongo/s/resharding/resume_token_gen.h" -#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/shared_buffer.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -71,23 +103,28 @@ PlanExecutorSBE::PlanExecutorSBE(OperationContext* opCtx, invariant(!_nss.isEmpty()); invariant(_root); - if (auto slot = _rootData.outputs.getIfExists(stage_builder::PlanStageSlots::kResult); slot) { - _result = _root->getAccessor(_rootData.ctx, *slot); + auto& outputs = _rootData.staticData->outputs; + + if (auto slot = outputs.getIfExists(stage_builder::PlanStageSlots::kResult)) { + _result = _root->getAccessor(_rootData.env.ctx, *slot); uassert(4822865, "Query does not have result slot.", _result); } - if (auto slot = _rootData.outputs.getIfExists(stage_builder::PlanStageSlots::kRecordId); slot) { - _resultRecordId = _root->getAccessor(_rootData.ctx, *slot); + if (auto slot = outputs.getIfExists(stage_builder::PlanStageSlots::kRecordId)) { + _resultRecordId = _root->getAccessor(_rootData.env.ctx, *slot); uassert(4822866, "Query does not have recordId slot.", _resultRecordId); } - if (_rootData.shouldTrackLatestOplogTimestamp) { - _oplogTs = _rootData.env->getAccessor(_rootData.env->getSlot("oplogTs"_sd)); + auto& env = _rootData.env; + if (_rootData.staticData->shouldTrackLatestOplogTimestamp) { + _oplogTs = env->getAccessor(env->getSlot("oplogTs"_sd)); } - if (_rootData.shouldUseTailableScan) { - _resumeRecordIdSlot = _rootData.env->getSlot("resumeRecordId"_sd); + if (_rootData.staticData->shouldUseTailableScan) { + _resumeRecordIdSlot = env->getSlot("resumeRecordId"_sd); } + _minRecordIdSlot = env->getSlotIfExists("minRecordId"_sd); + _maxRecordIdSlot = env->getSlotIfExists("maxRecordId"_sd); if (!_stash.empty()) { // The PlanExecutor keeps an extra reference to the last object pulled out of the PlanStage @@ -146,12 +183,16 @@ void PlanExecutorSBE::saveState() { _root->saveState(relinquishCursor, discardSlotState); } - _yieldPolicy->setYieldable(nullptr); + if (!_yieldPolicy->usesCollectionAcquisitions()) { + _yieldPolicy->setYieldable(nullptr); + } _lastGetNext = BSONObj(); } void PlanExecutorSBE::restoreState(const RestoreContext& context) { - _yieldPolicy->setYieldable(context.collection()); + if (!_yieldPolicy->usesCollectionAcquisitions()) { + _yieldPolicy->setYieldable(context.collection()); + } if (_isSaveRecoveryUnitAcrossCommandsEnabled) { _root->restoreState(false /* NOT relinquishing cursor */); @@ -273,14 +314,13 @@ PlanExecutor::ExecState PlanExecutorSBE::getNextImpl(ObjectType* out, RecordId* // // Note that we need to hold a database intent lock before acquiring a notifier. boost::optional coll; - insert_listener::CappedInsertNotifierData cappedInsertNotifierData; + std::unique_ptr notifier; if (insert_listener::shouldListenForInserts(_opCtx, _cq.get())) { if (!_opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_IS)) { coll.emplace(_opCtx, _nss); } - cappedInsertNotifierData.notifier = - insert_listener::getCappedInsertNotifier(_opCtx, _nss, _yieldPolicy.get()); + notifier = insert_listener::getCappedInsertNotifier(_opCtx, _nss, _yieldPolicy.get()); } for (;;) { @@ -309,11 +349,9 @@ PlanExecutor::ExecState PlanExecutorSBE::getNextImpl(ObjectType* out, RecordId* if (MONGO_unlikely(planExecutorHangBeforeShouldWaitForInserts.shouldFail( [this](const BSONObj& data) { - if (data.hasField("namespace") && - _nss != NamespaceString(data.getStringField("namespace"))) { - return false; - } - return true; + const auto fpNss = + NamespaceStringUtil::parseFailPointData(data, "namespace"_sd); + return fpNss.isEmpty() || _nss == fpNss; }))) { LOGV2(5567001, "PlanExecutor - planExecutorHangBeforeShouldWaitForInserts fail point " @@ -325,7 +363,7 @@ PlanExecutor::ExecState PlanExecutorSBE::getNextImpl(ObjectType* out, RecordId* return PlanExecutor::ExecState::IS_EOF; } - insert_listener::waitForInserts(_opCtx, _yieldPolicy.get(), &cappedInsertNotifierData); + insert_listener::waitForInserts(_opCtx, _yieldPolicy.get(), notifier); // There may be more results, keep going. continue; } else if (_resumeRecordIdSlot) { @@ -363,7 +401,7 @@ template PlanExecutor::ExecState PlanExecutorSBE::getNextImpl(Document RecordId* dlOut); Timestamp PlanExecutorSBE::getLatestOplogTimestamp() const { - if (_rootData.shouldTrackLatestOplogTimestamp) { + if (_rootData.staticData->shouldTrackLatestOplogTimestamp) { tassert(5567201, "The '_oplogTs' accessor should be populated when " "'shouldTrackLatestOplogTimestamp' is true", @@ -384,7 +422,7 @@ Timestamp PlanExecutorSBE::getLatestOplogTimestamp() const { } BSONObj PlanExecutorSBE::getPostBatchResumeToken() const { - if (_rootData.shouldTrackResumeToken) { + if (_rootData.staticData->shouldTrackResumeToken) { invariant(_resultRecordId); auto [tag, val] = _resultRecordId->getViewOfValue(); @@ -397,11 +435,19 @@ BSONObj PlanExecutorSBE::getPostBatchResumeToken() const { tag == sbe::value::TypeTags::RecordId); BSONObjBuilder builder; sbe::value::getRecordIdView(val)->serializeToken("$recordId", &builder); + if (resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + auto initialSyncId = + repl::ReplicationCoordinator::get(_opCtx)->getInitialSyncId(_opCtx); + if (initialSyncId) { + initialSyncId.value().appendToBuilder(&builder, "$initialSyncId"); + } + } return builder.obj(); } } - if (_rootData.shouldTrackLatestOplogTimestamp) { + if (_rootData.staticData->shouldTrackLatestOplogTimestamp) { return ResumeTokenOplogTimestamp{getLatestOplogTimestamp()}.toBSON(); } diff --git a/src/mongo/db/query/plan_executor_sbe.h b/src/mongo/db/query/plan_executor_sbe.h index 8d21efc6aec53..7021f57190c70 100644 --- a/src/mongo/db/query/plan_executor_sbe.h +++ b/src/mongo/db/query/plan_executor_sbe.h @@ -29,17 +29,42 @@ #pragma once -#include "mongo/util/duration.h" +#include +#include +#include +#include +#include #include - +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/update_result.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/optimizer/explain_interface.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_explainer.h" #include "mongo/db/query/plan_explainer_sbe.h" #include "mongo/db/query/plan_yield_policy_sbe.h" +#include "mongo/db/query/query_solution.h" +#include "mongo/db/query/restore_context.h" #include "mongo/db/query/sbe_plan_ranker.h" #include "mongo/db/query/sbe_runtime_planner.h" #include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/record_id.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" namespace mongo { class PlanExecutorSBE final : public PlanExecutor { @@ -102,6 +127,11 @@ class PlanExecutorSBE final : public PlanExecutor { MONGO_UNREACHABLE; } + long long getDeleteResult() const override { + // Using SBE to execute a delete command is not yet supported. + MONGO_UNREACHABLE; + } + BatchedDeleteStats getBatchedDeleteStats() override { // Using SBE to execute a batched delete command is not yet supported. MONGO_UNREACHABLE; @@ -185,8 +215,16 @@ class PlanExecutorSBE final : public PlanExecutor { sbe::value::Value _valLastRecordId{0}; sbe::RuntimeEnvironment::Accessor* _oplogTs{nullptr}; + // Only for a resumed scan ("seek"). Slot holding the TypeTags::RecordId of the record to resume + // the scan from. '_seekRecordId' is the RecordId value, initialized from the slot at runtime. boost::optional _resumeRecordIdSlot; + // Only for clustered collection scans, holds the minimum record ID of the scan, if applicable. + boost::optional _minRecordIdSlot; + + // Only for clustered collection scans, holds the maximum record ID of the scan, if applicable. + boost::optional _maxRecordIdSlot; + // NOTE: '_stash' stores documents as BSON. Currently, one of the '_stash' is usages is to store // documents received from the plan during multiplanning. This means that the documents // generated during multiplanning cannot exceed maximum BSON size. $group and $lookup CAN diff --git a/src/mongo/db/query/plan_explainer.h b/src/mongo/db/query/plan_explainer.h index ffaabecc43af6..9bf62599a68e9 100644 --- a/src/mongo/db/query/plan_explainer.h +++ b/src/mongo/db/query/plan_explainer.h @@ -98,7 +98,7 @@ class PlanExplainer { * override this function if the summary stats for secondary collections need to be reported * separately. */ - virtual void getSecondarySummaryStats(std::string secondaryColl, + virtual void getSecondarySummaryStats(const NamespaceString& secondaryColl, PlanSummaryStats* statsOut) const {} /** @@ -123,15 +123,6 @@ class PlanExplainer { virtual std::vector getRejectedPlansStats( ExplainOptions::Verbosity verbosity) const = 0; - /** - * Serializes plan cache entry debug info into the provided BSONObjBuilder. The output format is - * intended to be human readable, and useful for debugging query performance problems related to - * the plan cache. - */ - virtual std::vector getCachedPlanStats( - const plan_cache_debug_info::DebugInfo& debugInfo, - ExplainOptions::Verbosity verbosity) const = 0; - /** * Returns an object containing what query knobs the planner hit during plan enumeration. */ diff --git a/src/mongo/db/query/plan_explainer_factory.cpp b/src/mongo/db/query/plan_explainer_factory.cpp index c4aaaef701f2b..f6337563a8f64 100644 --- a/src/mongo/db/query/plan_explainer_factory.cpp +++ b/src/mongo/db/query/plan_explainer_factory.cpp @@ -27,15 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/query/plan_explainer_factory.h" +#include #include "mongo/db/exec/plan_cache_util.h" +#include "mongo/db/query/plan_explainer_factory.h" #include "mongo/db/query/plan_explainer_impl.h" #include "mongo/db/query/plan_explainer_sbe.h" -#include "mongo/util/duration.h" -#include +#include "mongo/util/assert_util_core.h" namespace mongo::plan_explainer_factory { std::unique_ptr make(PlanStage* root) { @@ -81,10 +81,10 @@ std::unique_ptr make( bool isMultiPlan, bool isFromPlanCache, std::shared_ptr debugInfoSBE) { - // TODO SERVER-64882: Consider invariant(debugInfoSBE) as we may not need to create a - // DebugInfoSBE from QuerySolution after the feature flag is removed. We currently need it - // because debugInfoSBE can be null if the plan was recovered from the classic plan cache. - if (!debugInfoSBE) { + // If the plan was recovered from the plan cache, we should already have 'debugInfoSBE'. + if (isFromPlanCache) { + invariant(debugInfoSBE); + } else { debugInfoSBE = std::make_shared( plan_cache_util::buildDebugInfo(solution)); } diff --git a/src/mongo/db/query/plan_explainer_factory.h b/src/mongo/db/query/plan_explainer_factory.h index 3b1618898349e..98841afe6f107 100644 --- a/src/mongo/db/query/plan_explainer_factory.h +++ b/src/mongo/db/query/plan_explainer_factory.h @@ -29,13 +29,18 @@ #pragma once +#include +#include + #include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/query/optimizer/explain_interface.h" +#include "mongo/db/query/plan_cache_debug_info.h" #include "mongo/db/query/plan_enumerator_explain_info.h" #include "mongo/db/query/plan_explainer.h" #include "mongo/db/query/query_solution.h" #include "mongo/db/query/sbe_plan_ranker.h" +#include "mongo/db/query/sbe_stage_builder.h" #include "mongo/util/duration.h" namespace mongo::plan_explainer_factory { diff --git a/src/mongo/db/query/plan_explainer_impl.cpp b/src/mongo/db/query/plan_explainer_impl.cpp index 5ff5187c5fe37..57364d7f7ab64 100644 --- a/src/mongo/db/query/plan_explainer_impl.cpp +++ b/src/mongo/db/query/plan_explainer_impl.cpp @@ -28,10 +28,23 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/plan_explainer_impl.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/exec/cached_plan.h" #include "mongo/db/exec/collection_scan.h" #include "mongo/db/exec/count_scan.h" @@ -43,15 +56,18 @@ #include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sort.h" -#include "mongo/db/exec/subplan.h" #include "mongo/db/exec/text_match.h" -#include "mongo/db/exec/trial_stage.h" +#include "mongo/db/field_ref.h" #include "mongo/db/keypattern.h" -#include "mongo/db/query/explain.h" +#include "mongo/db/query/plan_explainer_impl.h" +#include "mongo/db/query/plan_ranking_decision.h" #include "mongo/db/query/plan_summary_stats_visitor.h" #include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/record_id_helpers.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -491,8 +507,19 @@ void statsToBSON(const PlanStageStats& stats, if (verbosity >= ExplainOptions::Verbosity::kExecStats) { bob->appendNumber("nBucketsUnpacked", static_cast(spec->nBucketsUnpacked)); - bob->appendNumber("nMeasurementsDeleted", - static_cast(spec->nMeasurementsDeleted)); + + bool isUpdate = spec->opType.starts_with("update"); + if (isUpdate) { + bob->appendNumber("nMeasurementsMatched", + static_cast(spec->nMeasurementsMatched)); + bob->appendNumber("nMeasurementsUpdated", + static_cast(spec->nMeasurementsModified)); + bob->appendNumber("nMeasurementsUpserted", + static_cast(spec->nMeasurementsUpserted)); + } else { + bob->appendNumber("nMeasurementsDeleted", + static_cast(spec->nMeasurementsModified)); + } } } else if (STAGE_UNPACK_TIMESERIES_BUCKET == stats.stageType) { UnpackTimeseriesBucketStats* spec = @@ -511,10 +538,16 @@ void statsToBSON(const PlanStageStats& stats, } } else if (STAGE_SPOOL == stats.stageType) { SpoolStats* spec = static_cast(stats.specific.get()); + bob->appendNumber("memLimit", static_cast(spec->maxMemoryUsageBytes)); + bob->appendNumber("diskLimit", static_cast(spec->maxDiskUsageBytes)); if (verbosity >= ExplainOptions::Verbosity::kExecStats) { bob->appendNumber("totalDataSizeSpooled", static_cast(spec->totalDataSizeBytes)); + bob->appendBool("usedDisk", (spec->spills > 0)); + bob->appendNumber("spills", static_cast(spec->spills)); + bob->appendNumber("spilledDataStorageSize", + static_cast(spec->spilledDataStorageSize)); } } @@ -823,22 +856,6 @@ std::vector PlanExplainerImpl::getRejectedPlans return res; } -std::vector PlanExplainerImpl::getCachedPlanStats( - const plan_cache_debug_info::DebugInfo& debugInfo, ExplainOptions::Verbosity verbosity) const { - const auto& decision = *debugInfo.decision; - std::vector res; - auto winningPlanIdx = getWinningPlanIdx(_root); - - for (auto&& stats : decision.getStats().candidatePlanStats) { - BSONObjBuilder bob; - statsToBSON(*stats, verbosity, winningPlanIdx, &bob, &bob); - res.push_back({bob.obj(), - {verbosity >= ExplainOptions::Verbosity::kExecStats, - collectExecutionStatsSummary(stats.get(), winningPlanIdx)}}); - } - return res; -} - PlanStage* getStageByType(PlanStage* root, StageType type) { tassert(3420010, "Can't find a stage in a NULL plan root", root != nullptr); if (root->stageType() == type) { @@ -855,4 +872,20 @@ PlanStage* getStageByType(PlanStage* root, StageType type) { return nullptr; } + +std::vector getCachedPlanStats( + const plan_cache_debug_info::DebugInfo& debugInfo, ExplainOptions::Verbosity verbosity) { + const auto& decision = *debugInfo.decision; + std::vector res; + auto winningPlanIdx = getWinningPlanIdx(nullptr); + + for (auto&& stats : decision.getStats().candidatePlanStats) { + BSONObjBuilder bob; + statsToBSON(*stats, verbosity, winningPlanIdx, &bob, &bob); + res.push_back({bob.obj(), + {verbosity >= ExplainOptions::Verbosity::kExecStats, + collectExecutionStatsSummary(stats.get(), winningPlanIdx)}}); + } + return res; +} } // namespace mongo diff --git a/src/mongo/db/query/plan_explainer_impl.h b/src/mongo/db/query/plan_explainer_impl.h index f73a2ec62505d..e0ac62922323c 100644 --- a/src/mongo/db/query/plan_explainer_impl.h +++ b/src/mongo/db/query/plan_explainer_impl.h @@ -29,11 +29,20 @@ #pragma once +#include +#include + #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/plan_cache_debug_info.h" #include "mongo/db/query/plan_enumerator_explain_info.h" #include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/plan_summary_stats.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/stage_types.h" #include "mongo/util/duration.h" namespace mongo { @@ -58,7 +67,7 @@ class PlanExplainerImpl final : public PlanExplainer { std::vector getRejectedPlansStats( ExplainOptions::Verbosity verbosity) const final; std::vector getCachedPlanStats(const plan_cache_debug_info::DebugInfo&, - ExplainOptions::Verbosity) const final; + ExplainOptions::Verbosity) const; private: PlanStage* const _root; @@ -70,6 +79,12 @@ class PlanExplainerImpl final : public PlanExplainer { */ PlanStage* getStageByType(PlanStage* root, StageType type); +/** + * Returns filtered plan stats from the debugInfo object for different verbosity levels. + */ +std::vector getCachedPlanStats( + const plan_cache_debug_info::DebugInfo& debugInfo, ExplainOptions::Verbosity verbosity); + /** * Adds the path-level multikey information to the explain output in a field called "multiKeyPaths". * The value associated with the "multiKeyPaths" field is an object with keys equal to those in the diff --git a/src/mongo/db/query/plan_explainer_sbe.cpp b/src/mongo/db/query/plan_explainer_sbe.cpp index 18f42a417fadb..1f4a70b2ef977 100644 --- a/src/mongo/db/query/plan_explainer_sbe.cpp +++ b/src/mongo/db/query/plan_explainer_sbe.cpp @@ -27,23 +27,72 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/plan_explainer_sbe.h" - -#include - -#include "mongo/db/exec/plan_stats_walker.h" +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/bson/bson_depth.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" +#include "mongo/db/fts/fts_query.h" #include "mongo/db/fts/fts_query_impl.h" -#include "mongo/db/keypattern.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/index_entry.h" #include "mongo/db/query/optimizer/explain_interface.h" #include "mongo/db/query/plan_explainer_impl.h" +#include "mongo/db/query/plan_explainer_sbe.h" +#include "mongo/db/query/plan_ranker.h" #include "mongo/db/query/plan_summary_stats_visitor.h" +#include "mongo/db/query/projection.h" +#include "mongo/db/query/projection_ast.h" #include "mongo/db/query/projection_ast_util.h" #include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/duration.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/string_map.h" namespace mongo { namespace { +/** + * This function replaces field names in *replace* with those from the object + * *fieldNames*, preserving field ordering. Both objects must have the same + * number of fields. + * + * Example: + * + * replaceBSONKeyNames({ 'a': 1, 'b' : 1 }, { '': 'foo', '', 'bar' }) => + * + * { 'a' : 'foo' }, { 'b' : 'bar' } + */ +BSONObj replaceBSONFieldNames(const BSONObj& replace, const BSONObj& fieldNames) { + invariant(replace.nFields() == fieldNames.nFields()); + + BSONObjBuilder bob; + auto iter = fieldNames.begin(); + + for (const BSONElement& el : replace) { + bob.appendAs(el, (*iter++).fieldNameStringData()); + } + + return bob.obj(); +} + void statsToBSON(const QuerySolutionNode* node, BSONObjBuilder* bob, const BSONObjBuilder* topLevelBob) { @@ -56,7 +105,13 @@ void statsToBSON(const QuerySolutionNode* node, return; } - bob->append("stage", stageTypeToString(node->getType())); + StageType nodeType = node->getType(); + if ((nodeType == STAGE_COLLSCAN) && + static_cast(node)->doSbeClusteredCollectionScan()) { + bob->append("stage", sbeClusteredCollectionScanToString()); + } else { + bob->append("stage", stageTypeToString(node->getType())); + } bob->appendNumber("planNodeId", static_cast(node->nodeId())); // Display the BSON representation of the filter, if there is one. @@ -77,6 +132,34 @@ void statsToBSON(const QuerySolutionNode* node, } break; } + case STAGE_COUNT_SCAN: { + auto csn = static_cast(node); + + bob->append("keyPattern", csn->index.keyPattern); + bob->append("indexName", csn->index.identifier.catalogName); + auto collation = + csn->index.infoObj.getObjectField(IndexDescriptor::kCollationFieldName); + if (!collation.isEmpty()) { + bob->append("collation", collation); + } + bob->appendBool("isMultiKey", csn->index.multikey); + if (!csn->index.multikeyPaths.empty()) { + appendMultikeyPaths(csn->index.keyPattern, csn->index.multikeyPaths, bob); + } + bob->appendBool("isUnique", csn->index.unique); + bob->appendBool("isSparse", csn->index.sparse); + bob->appendBool("isPartial", csn->index.filterExpr != nullptr); + bob->append("indexVersion", static_cast(csn->index.version)); + + BSONObjBuilder indexBoundsBob(bob->subobjStart("indexBounds")); + indexBoundsBob.append("startKey", + replaceBSONFieldNames(csn->startKey, csn->index.keyPattern)); + indexBoundsBob.append("startKeyInclusive", csn->startKeyInclusive); + indexBoundsBob.append("endKey", + replaceBSONFieldNames(csn->endKey, csn->index.keyPattern)); + indexBoundsBob.append("endKeyInclusive", csn->endKeyInclusive); + break; + } case STAGE_GEO_NEAR_2D: { auto geo2d = static_cast(node); bob->append("keyPattern", geo2d->index.keyPattern); @@ -169,7 +252,8 @@ void statsToBSON(const QuerySolutionNode* node, case STAGE_EQ_LOOKUP: { auto eln = static_cast(node); - bob->append("foreignCollection", eln->foreignCollection.toString()); + bob->append("foreignCollection", + NamespaceStringUtil::serialize(eln->foreignCollection)); bob->append("localField", eln->joinFieldLocal.fullPath()); bob->append("foreignField", eln->joinFieldForeign.fullPath()); bob->append("asField", eln->joinField.fullPath()); @@ -413,7 +497,7 @@ void PlanExplainerSBE::getSummaryStats(PlanSummaryStats* statsOut) const { statsOut->collectionScansNonTailable = _debugInfo->mainStats.collectionScansNonTailable; } -void PlanExplainerSBE::getSecondarySummaryStats(std::string secondaryColl, +void PlanExplainerSBE::getSecondarySummaryStats(const NamespaceString& secondaryColl, PlanSummaryStats* statsOut) const { tassert(6466202, "statsOut should be a valid pointer", statsOut); @@ -480,28 +564,6 @@ std::vector PlanExplainerSBE::getRejectedPlansS return res; } -std::vector PlanExplainerSBE::getCachedPlanStats( - const plan_cache_debug_info::DebugInfo& debugInfo, ExplainOptions::Verbosity verbosity) const { - const auto& decision = *debugInfo.decision; - std::vector res; - - auto&& stats = decision.getStats(); - if (verbosity >= ExplainOptions::Verbosity::kExecStats) { - for (auto&& planStats : stats.candidatePlanStats) { - invariant(planStats); - res.push_back( - buildPlanStatsDetails(nullptr, *planStats, boost::none, boost::none, verbosity)); - } - } else { - // At the "queryPlanner" verbosity we only need to provide details about the winning plan - // when explaining from the plan cache. - invariant(verbosity == ExplainOptions::Verbosity::kQueryPlanner); - res.push_back({stats.serializedWinningPlan, boost::none}); - } - - return res; -} - boost::optional PlanExplainerSBE::buildCascadesPlan() const { if (_optimizerData) { return _optimizerData->explainBSON(); diff --git a/src/mongo/db/query/plan_explainer_sbe.h b/src/mongo/db/query/plan_explainer_sbe.h index d539668a550e4..77576a5d93da0 100644 --- a/src/mongo/db/query/plan_explainer_sbe.h +++ b/src/mongo/db/query/plan_explainer_sbe.h @@ -29,12 +29,31 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/query/optimizer/explain_interface.h" #include "mongo/db/query/plan_cache_debug_info.h" #include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/plan_summary_stats.h" #include "mongo/db/query/query_solution.h" #include "mongo/db/query/sbe_plan_ranker.h" +#include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/util/assert_util.h" #include "mongo/util/duration.h" namespace mongo { @@ -72,14 +91,12 @@ class PlanExplainerSBE final : public PlanExplainer { const ExplainVersion& getVersion() const final; std::string getPlanSummary() const final; void getSummaryStats(PlanSummaryStats* statsOut) const final; - void getSecondarySummaryStats(std::string secondaryColl, + void getSecondarySummaryStats(const NamespaceString& secondaryColl, PlanSummaryStats* statsOut) const override; PlanStatsDetails getWinningPlanStats(ExplainOptions::Verbosity verbosity) const final; PlanStatsDetails getWinningPlanTrialStats() const final; std::vector getRejectedPlansStats( ExplainOptions::Verbosity verbosity) const final; - std::vector getCachedPlanStats(const plan_cache_debug_info::DebugInfo&, - ExplainOptions::Verbosity) const final; private: static boost::optional buildExecPlanDebugInfo( diff --git a/src/mongo/db/query/plan_insert_listener.cpp b/src/mongo/db/query/plan_insert_listener.cpp index d6ee899dce556..a3cb3e75f9cdb 100644 --- a/src/mongo/db/query/plan_insert_listener.cpp +++ b/src/mongo/db/query/plan_insert_listener.cpp @@ -27,16 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/query/plan_insert_listener.h" +#include -#include "mongo/db/catalog/database.h" -#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/curop.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/find_common.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_insert_listener.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -76,28 +86,31 @@ bool shouldWaitForInserts(OperationContext* opCtx, return false; } -std::shared_ptr getCappedInsertNotifier(OperationContext* opCtx, - const NamespaceString& nss, - PlanYieldPolicy* yieldPolicy) { +std::unique_ptr getCappedInsertNotifier(OperationContext* opCtx, + const NamespaceString& nss, + PlanYieldPolicy* yieldPolicy) { // We don't expect to need a capped insert notifier for non-yielding plans. invariant(yieldPolicy->canReleaseLocksDuringExecution()); - // We can only wait if we have a collection; otherwise we should retry immediately when - // we hit EOF. + // In case of the read concern majority, return a majority committed point notifier, otherwise, + // a notifier associated with that capped collection // - // Hold reference to the catalog for collection lookup without locks to be safe. - auto catalog = CollectionCatalog::get(opCtx); - auto collection = catalog->lookupCollectionByNamespace(opCtx, nss); - invariant(collection); - - return collection->getRecordStore()->getCappedInsertNotifier(); + // We can only wait on the capped collection insert notifier if the collection is present, + // otherwise we should retry immediately when we hit EOF. + if (opCtx->recoveryUnit()->getTimestampReadSource() == RecoveryUnit::kMajorityCommitted) { + return std::make_unique(); + } else { + auto collection = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss); + invariant(collection); + + return std::make_unique( + collection->getRecordStore()->getCappedInsertNotifier()); + } } void waitForInserts(OperationContext* opCtx, PlanYieldPolicy* yieldPolicy, - CappedInsertNotifierData* notifierData) { - invariant(notifierData->notifier); - + std::unique_ptr& notifier) { // The notifier wait() method will not wait unless the version passed to it matches the // current version of the notifier. Since the version passed to it is the current version // of the notifier at the time of the previous EOF, we require two EOFs in a row with no @@ -107,10 +120,10 @@ void waitForInserts(OperationContext* opCtx, curOp->pauseTimer(); ON_BLOCK_EXIT([curOp] { curOp->resumeTimer(); }); - uint64_t currentNotifierVersion = notifierData->notifier->getVersion(); - auto yieldResult = yieldPolicy->yieldOrInterrupt(opCtx, [opCtx, notifierData] { + notifier->prepareForWait(opCtx); + auto yieldResult = yieldPolicy->yieldOrInterrupt(opCtx, [opCtx, ¬ifier] { const auto deadline = awaitDataState(opCtx).waitForInsertsDeadline; - notifierData->notifier->waitUntil(notifierData->lastEOFVersion, deadline); + notifier->waitUntil(opCtx, deadline); if (MONGO_unlikely(planExecutorHangWhileYieldedInWaitForInserts.shouldFail())) { LOGV2(4452903, "PlanExecutor - planExecutorHangWhileYieldedInWaitForInserts fail point enabled. " @@ -118,7 +131,7 @@ void waitForInserts(OperationContext* opCtx, planExecutorHangWhileYieldedInWaitForInserts.pauseWhileSet(); } }); - notifierData->lastEOFVersion = currentNotifierVersion; + notifier->doneWaiting(opCtx); uassertStatusOK(yieldResult); } diff --git a/src/mongo/db/query/plan_insert_listener.h b/src/mongo/db/query/plan_insert_listener.h index be4407d3df802..16b97cd45bce3 100644 --- a/src/mongo/db/query/plan_insert_listener.h +++ b/src/mongo/db/query/plan_insert_listener.h @@ -29,19 +29,111 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/time_support.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery namespace mongo::insert_listener { -/** - * A helper wrapper struct around CappedInsertNotifier which also holds the last version returned - * by the 'notifier'. - */ -struct CappedInsertNotifierData { - std::shared_ptr notifier; - uint64_t lastEOFVersion = ~0; + +// An abstract class used to notify on new insert events. +class Notifier { +public: + virtual ~Notifier(){}; + + // Performs the necessary work needed for waiting. Should be called prior calling waitUntil(). + virtual void prepareForWait(OperationContext* opCtx) = 0; + + // Performs any necessary steps after waiting. Should be called after waitUntil(). + // After calling doneWaiting, the caller must attempt to read the data waited for before + // calling prepareForWait and waitUntil again, or a spurious wait may occur. + virtual void doneWaiting(OperationContext* opCtx) = 0; + + // Blocks the caller until an insert event is fired or the deadline is hit. Must be robust + // to being called multiple times without an intervening read. + virtual void waitUntil(OperationContext* opCtx, Date_t deadline) = 0; +}; + +// Class used to notify listeners of local inserts into the capped collection. +class LocalCappedInsertNotifier final : public Notifier { +public: + LocalCappedInsertNotifier(std::shared_ptr notifier) + : _notifier(notifier) {} + + void prepareForWait(OperationContext* opCtx) final { + invariant(_notifier); + _currentVersion = _notifier->getVersion(); + } + + void waitUntil(OperationContext* opCtx, Date_t deadline) final { + _notifier->waitUntil(opCtx, _lastEOFVersion, deadline); + } + + void doneWaiting(OperationContext* opCtx) final { + _lastEOFVersion = _currentVersion; + } + +private: + std::shared_ptr _notifier; + uint64_t _lastEOFVersion = ~uint64_t(0); + // This will be initialized by prepareForWait. + uint64_t _currentVersion; +}; + +// Class used to notify listeners on majority committed point advancement events. +class MajorityCommittedPointNotifier final : public Notifier { +public: + MajorityCommittedPointNotifier(repl::OpTime opTime = repl::OpTime()) + : _opTimeToBeMajorityCommitted(opTime) {} + + // Computes the OpTime to wait on by incrementing the current read timestamp. + void prepareForWait(OperationContext* opCtx) final { + auto readTs = opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx); + invariant(readTs); + _opTimeToBeMajorityCommitted = + repl::OpTime(*readTs + 1, repl::ReplicationCoordinator::get(opCtx)->getTerm()); + } + + void waitUntil(OperationContext* opCtx, Date_t deadline) final { + auto majorityCommittedFuture = WaitForMajorityService::get(opCtx->getServiceContext()) + .waitUntilMajorityForRead(_opTimeToBeMajorityCommitted, + opCtx->getCancellationToken()); + opCtx->runWithDeadline(deadline, opCtx->getTimeoutError(), [&] { + auto status = majorityCommittedFuture.getNoThrow(opCtx); + if (!status.isOK()) { + LOGV2_DEBUG(7455500, + 3, + "Failure waiting for the majority committed event", + "status"_attr = status.toString()); + } + }); + } + + void doneWaiting(OperationContext* opCtx) final {} + +private: + repl::OpTime _opTimeToBeMajorityCommitted; }; /** @@ -61,12 +153,11 @@ bool shouldWaitForInserts(OperationContext* opCtx, PlanYieldPolicy* yieldPolicy); /** - * Gets the CappedInsertNotifier for a capped collection. Returns nullptr if this plan executor - * is not capable of yielding based on a notifier. + * Returns an insert notifier for a capped collection. */ -std::shared_ptr getCappedInsertNotifier(OperationContext* opCtx, - const NamespaceString& nss, - PlanYieldPolicy* yieldPolicy); +std::unique_ptr getCappedInsertNotifier(OperationContext* opCtx, + const NamespaceString& nss, + PlanYieldPolicy* yieldPolicy); /** * Called for tailable and awaitData cursors in order to yield locks and waits for inserts to @@ -74,8 +165,9 @@ std::shared_ptr getCappedInsertNotifier(OperationContext* * and there may be new results. If the PlanExecutor was killed during a yield, throws an * exception. */ - void waitForInserts(OperationContext* opCtx, PlanYieldPolicy* yieldPolicy, - CappedInsertNotifierData* notifierData); + std::unique_ptr& notifier); } // namespace mongo::insert_listener + +#undef MONGO_LOGV2_DEFAULT_COMPONENT diff --git a/src/mongo/db/query/plan_ranker.cpp b/src/mongo/db/query/plan_ranker.cpp index ba06a55c5cd1e..6154b29a9ad87 100644 --- a/src/mongo/db/query/plan_ranker.cpp +++ b/src/mongo/db/query/plan_ranker.cpp @@ -28,11 +28,22 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/query/plan_ranker.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/query/plan_ranker.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/query/plan_ranker.h b/src/mongo/db/query/plan_ranker.h index d456aa69d3150..b23943b618c7d 100644 --- a/src/mongo/db/query/plan_ranker.h +++ b/src/mongo/db/query/plan_ranker.h @@ -29,14 +29,28 @@ #pragma once +#include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include +#include +#include #include +#include +#include "mongo/base/status.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/stages/plan_stats.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/query/explain.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_ranking_decision.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/container_size_helper.h" namespace mongo::plan_ranker { diff --git a/src/mongo/db/query/plan_ranker_test.cpp b/src/mongo/db/query/plan_ranker_test.cpp index 20034580e7999..f57c97ace8f43 100644 --- a/src/mongo/db/query/plan_ranker_test.cpp +++ b/src/mongo/db/query/plan_ranker_test.cpp @@ -32,9 +32,16 @@ */ #include "mongo/db/query/plan_ranker.h" + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/query/plan_ranker_util.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" using namespace mongo; diff --git a/src/mongo/db/query/plan_yield_policy.cpp b/src/mongo/db/query/plan_yield_policy.cpp index 8243f76032846..6dbda1b2d30c1 100644 --- a/src/mongo/db/query/plan_yield_policy.cpp +++ b/src/mongo/db/query/plan_yield_policy.cpp @@ -27,33 +27,75 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/query/plan_yield_policy.h" +#include -#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_uuid_mismatch_info.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/yieldable.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" #include "mongo/util/scopeguard.h" -#include "mongo/util/time_support.h" namespace mongo { -PlanYieldPolicy::PlanYieldPolicy(YieldPolicy policy, - ClockSource* cs, - int yieldIterations, - Milliseconds yieldPeriod, - const Yieldable* yieldable, - std::unique_ptr callbacks) - : _policy(policy), +PlanYieldPolicy::PlanYieldPolicy( + OperationContext* opCtx, + YieldPolicy policy, + ClockSource* cs, + int yieldIterations, + Milliseconds yieldPeriod, + stdx::variant yieldable, + std::unique_ptr callbacks) + : _policy(getPolicyOverrideForOperation(opCtx, policy)), _yieldable(yieldable), _callbacks(std::move(callbacks)), _elapsedTracker(cs, yieldIterations, yieldPeriod) { - invariant(!_yieldable || _yieldable->yieldable() || - policy == YieldPolicy::WRITE_CONFLICT_RETRY_ONLY || policy == YieldPolicy::NO_YIELD || - policy == YieldPolicy::INTERRUPT_ONLY || policy == YieldPolicy::ALWAYS_TIME_OUT || - policy == YieldPolicy::ALWAYS_MARK_KILLED); + stdx::visit(OverloadedVisitor{[&](const Yieldable* collectionPtr) { + invariant(!collectionPtr || collectionPtr->yieldable() || + policy == YieldPolicy::WRITE_CONFLICT_RETRY_ONLY || + policy == YieldPolicy::NO_YIELD || + policy == YieldPolicy::INTERRUPT_ONLY || + policy == YieldPolicy::ALWAYS_TIME_OUT || + policy == YieldPolicy::ALWAYS_MARK_KILLED); + }, + [&](const YieldThroughAcquisitions& yieldThroughAcquisitions) { + // CollectionAcquisitions are always yieldable. + }}, + _yieldable); +} + +PlanYieldPolicy::YieldPolicy PlanYieldPolicy::getPolicyOverrideForOperation( + OperationContext* opCtx, PlanYieldPolicy::YieldPolicy desired) { + // We may have a null opCtx in testing. + if (MONGO_unlikely(!opCtx)) { + return desired; + } + // Multi-document transactions cannot yield locks or snapshots. We convert to a non-yielding + // interruptible plan. + if (opCtx->inMultiDocumentTransaction() && + (desired == YieldPolicy::YIELD_AUTO || desired == YieldPolicy::YIELD_MANUAL || + desired == YieldPolicy::WRITE_CONFLICT_RETRY_ONLY)) { + return YieldPolicy::INTERRUPT_ONLY; + } + + // If the state of our locks held is not yieldable at all, we will assume this is an internal + // operation that should not be interrupted or yielded. + // TODO: SERVER-76238 Evaluate if we can make everything INTERRUPT_ONLY instead. + if (!opCtx->lockState()->canSaveLockState() && + (desired == YieldPolicy::YIELD_AUTO || desired == YieldPolicy::YIELD_MANUAL)) { + return YieldPolicy::NO_YIELD; + } + + return desired; } bool PlanYieldPolicy::shouldYieldOrInterrupt(OperationContext* opCtx) { @@ -96,7 +138,7 @@ Status PlanYieldPolicy::yieldOrInterrupt(OperationContext* opCtx, for (int attempt = 1; true; attempt++) { try { // Saving and restoring can modify '_yieldable', so we make a copy before we start. - const Yieldable* yieldable = _yieldable; + const auto yieldable = _yieldable; try { saveState(opCtx); @@ -127,17 +169,26 @@ Status PlanYieldPolicy::yieldOrInterrupt(OperationContext* opCtx, invariant(!opCtx->isLockFreeReadsOp()); opCtx->recoveryUnit()->abandonSnapshot(); } else { - invariant(yieldable); - performYield(opCtx, *yieldable, whileYieldingFn); + if (usesCollectionAcquisitions()) { + performYieldWithAcquisitions(opCtx, whileYieldingFn); + } else { + const Yieldable* yieldablePtr = stdx::get(yieldable); + invariant(yieldablePtr); + performYield(opCtx, *yieldablePtr, whileYieldingFn); + } } - restoreState(opCtx, yieldable); + restoreState(opCtx, + stdx::holds_alternative(yieldable) + ? stdx::get(yieldable) + : nullptr); return Status::OK(); - } catch (const WriteConflictException&) { + } catch (const WriteConflictException& e) { if (_callbacks) { _callbacks->handledWriteConflict(opCtx); } - logWriteConflictAndBackoff(attempt, "query yield", ""_sd); + logWriteConflictAndBackoff( + attempt, "query yield", e.reason(), NamespaceStringOrUUID(NamespaceString())); // Retry the yielding process. } catch (...) { // Errors other than write conflicts don't get retried, and should instead result in @@ -154,42 +205,32 @@ void PlanYieldPolicy::performYield(OperationContext* opCtx, std::function whileYieldingFn) { // Things have to happen here in a specific order: // * Release 'yieldable'. - // * Release lock mgr locks. + // * Abandon the current storage engine snapshot. // * Check for interrupt if the yield policy requires. - // * Abondon the query's current storage engine snapshot. - // * Reacquire lock mgr locks. + // * Release lock manager locks. + // * Reacquire lock manager locks. // * Restore 'yieldable'. - Locker* locker = opCtx->lockState(); + invariant(_policy == YieldPolicy::YIELD_AUTO || _policy == YieldPolicy::YIELD_MANUAL); - if (locker->isGlobalLockedRecursively()) { - // No purpose in yielding if the locks are recursively held and cannot be released. - return; - } - - // Since the locks are not recursively held, this is a top level operation and we can safely - // clear the 'yieldable' state before unlocking and then re-establish it after re-locking. + // If we are here, the caller has guaranteed locks are not recursively held. This is a top level + // operation and we can safely clear the 'yieldable' state before unlocking and then + // re-establish it after re-locking. yieldable.yield(); - Locker::LockSnapshot snapshot; - auto unlocked = locker->saveLockStateAndUnlock(&snapshot); + // Release any storage engine resources. This requires holding a global lock to correctly + // synchronize with states such as shutdown and rollback. + opCtx->recoveryUnit()->abandonSnapshot(); - // After all steps to relinquish locks and save the execution plan have been taken, check - // for interrupt. This is the main interrupt check during query execution. Yield points and - // interrupt points are one and the same. + // Check for interrupt before releasing locks. This avoids the complexities of having to + // re-acquire locks to clean up when we are interrupted. This is the main interrupt check during + // query execution. Yield points and interrupt points are one and the same. if (getPolicy() == PlanYieldPolicy::YieldPolicy::YIELD_AUTO) { opCtx->checkForInterrupt(); // throws } - if (!unlocked) { - // Nothing was unlocked. Recursively held locks are not the only reason locks cannot be - // released. Restore the 'yieldable' state before returning. - yieldable.restore(); - return; - } - - // Top-level locks are freed, release any potential low-level (storage engine-specific - // locks). If we are yielding, we are at a safe place to do so. - opCtx->recoveryUnit()->abandonSnapshot(); + Locker* locker = opCtx->lockState(); + Locker::LockSnapshot snapshot; + locker->saveLockStateAndUnlock(&snapshot); if (_callbacks) { _callbacks->duringYield(opCtx); @@ -207,4 +248,52 @@ void PlanYieldPolicy::performYield(OperationContext* opCtx, yieldable.restore(); } +void PlanYieldPolicy::performYieldWithAcquisitions(OperationContext* opCtx, + std::function whileYieldingFn) { + // Things have to happen here in a specific order: + // * Yield the acquired TransactionResources + // * Abandon the current storage engine snapshot. + // * Check for interrupt if the yield policy requires. + // * Restore the yielded TransactionResources + invariant(_policy == YieldPolicy::YIELD_AUTO || _policy == YieldPolicy::YIELD_MANUAL); + + // Release any storage engine resources. This requires holding a global lock to correctly + // synchronize with states such as shutdown and rollback. + opCtx->recoveryUnit()->abandonSnapshot(); + + // Check for interrupt before releasing locks. This avoids the complexities of having to + // re-acquire locks to clean up when we are interrupted. This is the main interrupt check during + // query execution. Yield points and interrupt points are one and the same. + if (getPolicy() == PlanYieldPolicy::YieldPolicy::YIELD_AUTO) { + opCtx->checkForInterrupt(); // throws + } + + auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx); + ScopeGuard yieldFailedScopeGuard( + [&] { yieldedTransactionResources.transitionTransactionResourcesToFailedState(opCtx); }); + + if (_callbacks) { + _callbacks->duringYield(opCtx); + } + + if (whileYieldingFn) { + whileYieldingFn(); + } + + yieldFailedScopeGuard.dismiss(); + try { + restoreTransactionResourcesToOperationContext(opCtx, + std::move(yieldedTransactionResources)); + } catch (const ExceptionFor& ex) { + const auto extraInfo = ex.extraInfo(); + if (extraInfo->actualCollection()) { + throwCollectionRenamedError(NamespaceString(extraInfo->expectedCollection()), + NamespaceString(*extraInfo->actualCollection()), + extraInfo->collectionUUID()); + } else { + throwCollectionDroppedError(extraInfo->collectionUUID()); + } + } +} + } // namespace mongo diff --git a/src/mongo/db/query/plan_yield_policy.h b/src/mongo/db/query/plan_yield_policy.h index 4cc060abf3ae2..6831baa754a35 100644 --- a/src/mongo/db/query/plan_yield_policy.h +++ b/src/mongo/db/query/plan_yield_policy.h @@ -29,16 +29,29 @@ #pragma once +#include #include - +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/duration.h" #include "mongo/util/elapsed_tracker.h" +#include "mongo/util/str.h" #include "mongo/util/uuid.h" namespace mongo { class ClockSource; + class Yieldable; class YieldPolicyCallbacks { @@ -167,23 +180,36 @@ class PlanYieldPolicy { const NamespaceString& newNss, UUID collUuid) { uasserted(ErrorCodes::QueryPlanKilled, - str::stream() << "collection renamed from '" << oldNss << "' to '" << newNss - << "'. UUID " << collUuid); + str::stream() << "collection renamed from '" << oldNss.toStringForErrorMsg() + << "' to '" << newNss.toStringForErrorMsg() << "'. UUID " + << collUuid); } + /** + * Returns the policy that this operation should use, accounting for any special circumstances, + * and otherwise the desired policy. Should always be used when constructing a PlanYieldPolicy. + */ + static YieldPolicy getPolicyOverrideForOperation(OperationContext* opCtx, YieldPolicy desired); + + class YieldThroughAcquisitions {}; + /** * Constructs a PlanYieldPolicy of the given 'policy' type. This class uses an ElapsedTracker * to keep track of elapsed time, which is initialized from the parameters 'cs', * 'yieldIterations' and 'yieldPeriod'. * * If provided, the given 'yieldable' is released and restored by the 'PlanYieldPolicy' (in - * addition to releasing/restoring locks and the storage engine snapshot). + * addition to releasing/restoring locks and the storage engine snapshot). The provided 'policy' + * will be overridden depending on the nature of this operation. For example, multi-document + * transactions will always downgrade to INTERRUPT_ONLY, and operations with recursively held + * locks will downgrade to NO_YIELD. */ - PlanYieldPolicy(YieldPolicy policy, + PlanYieldPolicy(OperationContext* opCtx, + YieldPolicy policy, ClockSource* cs, int yieldIterations, Milliseconds yieldPeriod, - const Yieldable* yieldable, + stdx::variant yieldable, std::unique_ptr callbacks); virtual ~PlanYieldPolicy() = default; @@ -273,9 +299,14 @@ class PlanYieldPolicy { } void setYieldable(const Yieldable* yieldable) { + invariant(!usesCollectionAcquisitions()); _yieldable = yieldable; } + bool usesCollectionAcquisitions() const { + return stdx::holds_alternative(_yieldable); + } + private: /** * Functions to be implemented by derived classes which save and restore query execution state. @@ -303,9 +334,11 @@ class PlanYieldPolicy { void performYield(OperationContext* opCtx, const Yieldable& yieldable, std::function whileYieldingFn); + void performYieldWithAcquisitions(OperationContext* opCtx, + std::function whileYieldingFn); const YieldPolicy _policy; - const Yieldable* _yieldable; + stdx::variant _yieldable; std::unique_ptr _callbacks; bool _forceYield = false; diff --git a/src/mongo/db/query/plan_yield_policy_impl.cpp b/src/mongo/db/query/plan_yield_policy_impl.cpp index e4f0688c97f2f..eb894718b8790 100644 --- a/src/mongo/db/query/plan_yield_policy_impl.cpp +++ b/src/mongo/db/query/plan_yield_policy_impl.cpp @@ -27,21 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/query/plan_yield_policy_impl.h" - +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/restore_context.h" #include "mongo/db/service_context.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/duration.h" namespace mongo { -PlanYieldPolicyImpl::PlanYieldPolicyImpl(PlanExecutorImpl* exec, - PlanYieldPolicy::YieldPolicy policy, - const Yieldable* yieldable, - std::unique_ptr callbacks) - : PlanYieldPolicy(exec->getOpCtx()->lockState()->isGlobalLockedRecursively() - ? PlanYieldPolicy::YieldPolicy::NO_YIELD - : policy, +PlanYieldPolicyImpl::PlanYieldPolicyImpl( + PlanExecutorImpl* exec, + PlanYieldPolicy::YieldPolicy policy, + stdx::variant yieldable, + std::unique_ptr callbacks) + : PlanYieldPolicy(exec->getOpCtx(), + policy, exec->getOpCtx()->getServiceContext()->getFastClockSource(), internalQueryExecYieldIterations.load(), Milliseconds{internalQueryExecYieldPeriodMS.load()}, diff --git a/src/mongo/db/query/plan_yield_policy_impl.h b/src/mongo/db/query/plan_yield_policy_impl.h index 5f0db5388b93c..31ea47fbaf233 100644 --- a/src/mongo/db/query/plan_yield_policy_impl.h +++ b/src/mongo/db/query/plan_yield_policy_impl.h @@ -29,6 +29,10 @@ #pragma once +#include +#include + +#include "mongo/db/operation_context.h" #include "mongo/db/query/plan_executor_impl.h" #include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/yieldable.h" @@ -39,7 +43,7 @@ class PlanYieldPolicyImpl final : public PlanYieldPolicy { public: PlanYieldPolicyImpl(PlanExecutorImpl* exec, PlanYieldPolicy::YieldPolicy policy, - const Yieldable* yieldable, + stdx::variant yieldable, std::unique_ptr callbacks); private: diff --git a/src/mongo/db/query/plan_yield_policy_sbe.cpp b/src/mongo/db/query/plan_yield_policy_sbe.cpp index 1f10a2fe74766..6a4a7316e3b68 100644 --- a/src/mongo/db/query/plan_yield_policy_sbe.cpp +++ b/src/mongo/db/query/plan_yield_policy_sbe.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/query/plan_yield_policy_sbe.h" namespace mongo { diff --git a/src/mongo/db/query/plan_yield_policy_sbe.h b/src/mongo/db/query/plan_yield_policy_sbe.h index 0ab0e3aa95602..0307624fbc578 100644 --- a/src/mongo/db/query/plan_yield_policy_sbe.h +++ b/src/mongo/db/query/plan_yield_policy_sbe.h @@ -29,22 +29,37 @@ #pragma once +#include +#include +#include +#include + #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" namespace mongo { class PlanYieldPolicySBE final : public PlanYieldPolicy { public: - PlanYieldPolicySBE(YieldPolicy policy, + PlanYieldPolicySBE(OperationContext* opCtx, + YieldPolicy policy, ClockSource* clockSource, int yieldFrequency, Milliseconds yieldPeriod, - const Yieldable* yieldable, + stdx::variant yieldable, std::unique_ptr callbacks) - : PlanYieldPolicy( - policy, clockSource, yieldFrequency, yieldPeriod, yieldable, std::move(callbacks)), + : PlanYieldPolicy(opCtx, + policy, + clockSource, + yieldFrequency, + yieldPeriod, + yieldable, + std::move(callbacks)), _useExperimentalCommitTxnBehavior(gYieldingSupportForSBE) { uassert(4822879, "WRITE_CONFLICT_RETRY_ONLY yield policy is not supported in SBE", diff --git a/src/mongo/db/query/planner_access.cpp b/src/mongo/db/query/planner_access.cpp index ce5ecda487454..684a4024b1152 100644 --- a/src/mongo/db/query/planner_access.cpp +++ b/src/mongo/db/query/planner_access.cpp @@ -28,33 +28,66 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/planner_access.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include +#include +#include +#include +#include #include -#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/basic_types.h" #include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/fts/fts_index_format.h" -#include "mongo/db/fts/fts_query_noop.h" +#include "mongo/db/fts/fts_query.h" +#include "mongo/db/fts/fts_query_impl.h" #include "mongo/db/fts/fts_spec.h" +#include "mongo/db/fts/fts_util.h" +#include "mongo/db/index_names.h" #include "mongo/db/matcher/expression.h" -#include "mongo/db/matcher/expression_array.h" +#include "mongo/db/matcher/expression_algo.h" #include "mongo/db/matcher/expression_geo.h" -#include "mongo/db/matcher/expression_text.h" +#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_text_base.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_bounds_builder.h" #include "mongo/db/query/index_tag.h" #include "mongo/db/query/indexability.h" +#include "mongo/db/query/planner_access.h" #include "mongo/db/query/planner_wildcard_helpers.h" +#include "mongo/db/query/projection.h" +#include "mongo/db/query/query_feature_flags_gen.h" #include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/query/query_planner.h" #include "mongo/db/query/query_planner_common.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/record_id.h" #include "mongo/db/record_id_helpers.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/storage/key_format.h" #include "mongo/logv2/log.h" -#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -133,7 +166,6 @@ std::vector canProvideSortWithMergeSort( } return shouldReverseScan; } - } // namespace namespace mongo { @@ -206,6 +238,26 @@ bool isOplogTsLowerBoundPred(const mongo::MatchExpression* me) { return me->path() == repl::OpTime::kTimestampFieldName; } +/** + * Sets the lowPriority parameter on the given index scan node. + */ +void deprioritizeUnboundedIndexScan(IndexScanNode* solnRoot, + const FindCommandRequest& findCommand) { + auto sort = findCommand.getSort(); + if (findCommand.getLimit() && + (sort.isEmpty() || sort[query_request_helper::kNaturalSortField])) { + // There is a limit with either no sort or the natural sort. + return; + } + + auto indexScan = checked_cast(solnRoot); + if (!indexScan->bounds.isUnbounded()) { + return; + } + + indexScan->lowPriority = true; +} + // True if the element type is affected by a collator (i.e. it is or contains a String). bool affectedByCollator(const BSONElement& element) { switch (element.type()) { @@ -249,22 +301,22 @@ void setHighestRecord(boost::optional& curr, const BSONObj& newMa // Returns whether element is not affected by collators or query and collection collators are // compatible. -bool compatibleCollator(const QueryPlannerParams& params, +bool compatibleCollator(const CollatorInterface* collCollator, const CollatorInterface* queryCollator, const BSONElement& element) { - auto const collCollator = params.clusteredCollectionCollator; bool compatible = CollatorInterface::collatorsMatch(queryCollator, collCollator); return compatible || !affectedByCollator(element); } +} // namespace -/** - * Helper function that checks to see if min() or max() were provided along with the query. If so, - * adjusts the collection scan bounds to fit the constraints. - */ -void handleRIDRangeMinMax(const CanonicalQuery& query, - CollectionScanNode* collScan, - const QueryPlannerParams& params, - const CollatorInterface* collator) { +void QueryPlannerAccess::handleRIDRangeMinMax( + const CanonicalQuery& query, + const int direction, + const CollatorInterface* queryCollator, + const CollatorInterface* ccCollator, + boost::optional& minRecord, + boost::optional& maxRecord, + CollectionScanParams::ScanBoundInclusion& boundInclusion) { BSONObj minObj = query.getFindCommandRequest().getMin(); BSONObj maxObj = query.getFindCommandRequest().getMax(); if (minObj.isEmpty() && maxObj.isEmpty()) { @@ -278,56 +330,63 @@ void handleRIDRangeMinMax(const CanonicalQuery& query, uassert( 6137402, "min() / max() are only supported for forward collection scans on clustered collections", - collScan->direction == 1); + direction == 1); boost::optional newMinRecord, newMaxRecord; - if (!maxObj.isEmpty() && compatibleCollator(params, collator, maxObj.firstElement())) { + if (!maxObj.isEmpty() && compatibleCollator(ccCollator, queryCollator, maxObj.firstElement())) { // max() is exclusive. // Assumes clustered collection scans are only supported with the forward direction. - collScan->boundInclusion = - CollectionScanParams::ScanBoundInclusion::kIncludeStartRecordOnly; - setLowestRecord(collScan->maxRecord, - IndexBoundsBuilder::objFromElement(maxObj.firstElement(), collator)); + boundInclusion = CollectionScanParams::ScanBoundInclusion::kIncludeStartRecordOnly; + setLowestRecord(maxRecord, + IndexBoundsBuilder::objFromElement(maxObj.firstElement(), queryCollator)); } - if (!minObj.isEmpty() && compatibleCollator(params, collator, minObj.firstElement())) { + if (!minObj.isEmpty() && compatibleCollator(ccCollator, queryCollator, minObj.firstElement())) { // The min() is inclusive as are bounded collection scans by default. - setHighestRecord(collScan->minRecord, - IndexBoundsBuilder::objFromElement(minObj.firstElement(), collator)); + setHighestRecord(minRecord, + IndexBoundsBuilder::objFromElement(minObj.firstElement(), queryCollator)); } } -/** - * Helper function to add an RID range to collection scans. - * If the query solution tree contains a collection scan node with a suitable comparison - * predicate on '_id', we add a minRecord and maxRecord on the collection node. - */ -void handleRIDRangeScan(const MatchExpression* conjunct, - CollectionScanNode* collScan, - const QueryPlannerParams& params, - const CollatorInterface* collator) { - invariant(params.clusteredInfo); - +[[nodiscard]] bool QueryPlannerAccess::handleRIDRangeScan( + const MatchExpression* conjunct, + const CollatorInterface* queryCollator, + const CollatorInterface* ccCollator, + const StringData& clusterKeyFieldName, + boost::optional& minRecord, + boost::optional& maxRecord) { if (conjunct == nullptr) { - return; + return false; } - auto* andMatchPtr = dynamic_cast(conjunct); + const AndMatchExpression* andMatchPtr = dynamic_cast(conjunct); if (andMatchPtr != nullptr) { + bool atLeastOneConjunctCompatibleCollation = false; for (size_t index = 0; index < andMatchPtr->numChildren(); index++) { - handleRIDRangeScan(andMatchPtr->getChild(index), collScan, params, collator); + // Recursive call on each branch of 'andMatchPtr'. + if (handleRIDRangeScan(andMatchPtr->getChild(index), + queryCollator, + ccCollator, + clusterKeyFieldName, + minRecord, + maxRecord)) { + atLeastOneConjunctCompatibleCollation = true; + } } - return; + + // If one of the conjuncts excludes values of the cluster key which are affected by + // collation, then the entire $and will also exclude those values. + return atLeastOneConjunctCompatibleCollation; } - if (conjunct->path() != - clustered_util::getClusterKeyFieldName(params.clusteredInfo->getIndexSpec())) { - // No match on the cluster key. - return; + // If 'conjunct' does not apply to the cluster key, return early here, as updating bounds based + // on this conjunct is incorrect and can result in garbage bounds. + if (conjunct->path() != clusterKeyFieldName) { + return false; } // TODO SERVER-62707: Allow $in with regex to use a clustered index. - auto inMatch = dynamic_cast(conjunct); + const InMatchExpression* inMatch = dynamic_cast(conjunct); if (inMatch && !inMatch->hasRegex()) { // Iterate through the $in equalities to find the min/max values. The min/max bounds for the // collscan need to be loose enough to cover all of these values. @@ -335,9 +394,9 @@ void handleRIDRangeScan(const MatchExpression* conjunct, boost::optional maxBound; bool allEltsCollationCompatible = true; - for (const auto& element : inMatch->getEqualities()) { - if (compatibleCollator(params, collator, element)) { - const auto collated = IndexBoundsBuilder::objFromElement(element, collator); + for (const BSONElement& element : inMatch->getEqualities()) { + if (compatibleCollator(ccCollator, queryCollator, element)) { + const BSONObj collated = IndexBoundsBuilder::objFromElement(element, queryCollator); setLowestRecord(minBound, collated); setHighestRecord(maxBound, collated); } else { @@ -353,90 +412,80 @@ void handleRIDRangeScan(const MatchExpression* conjunct, setHighestRecord(maxBound, bMax.obj()); } } - collScan->hasCompatibleCollation = allEltsCollationCompatible; // Finally, tighten the collscan bounds with the min/max bounds for the $in. if (minBound) { - setHighestRecord(collScan->minRecord, *minBound); + setHighestRecord(minRecord, *minBound); } if (maxBound) { - setLowestRecord(collScan->maxRecord, *maxBound); + setLowestRecord(maxRecord, *maxBound); } - return; + return allEltsCollationCompatible; } auto match = dynamic_cast(conjunct); if (match == nullptr) { - return; // Not a comparison match expression. + return false; // Not a comparison match expression. } - const auto& element = match->getData(); + const BSONElement& element = match->getData(); // Set coarse min/max bounds based on type in case we can't set tight bounds. BSONObjBuilder minb; minb.appendMinForType("", element.type()); - setHighestRecord(collScan->minRecord, minb.obj()); + setHighestRecord(minRecord, minb.obj()); BSONObjBuilder maxb; maxb.appendMaxForType("", element.type()); - setLowestRecord(collScan->maxRecord, maxb.obj()); + setLowestRecord(maxRecord, maxb.obj()); - bool compatible = compatibleCollator(params, collator, element); + bool compatible = compatibleCollator(ccCollator, queryCollator, element); if (!compatible) { - return; // Collator affects probe and it's not compatible with collection's collator. + // Collator affects probe and it's not compatible with collection's collator. + return false; } // Even if the collations don't match at this point, it's fine, // because the bounds exclude values that use it - collScan->hasCompatibleCollation = true; - - const auto collated = IndexBoundsBuilder::objFromElement(element, collator); + const BSONObj collated = IndexBoundsBuilder::objFromElement(element, queryCollator); if (dynamic_cast(match)) { - setHighestRecord(collScan->minRecord, collated); - setLowestRecord(collScan->maxRecord, collated); + setHighestRecord(minRecord, collated); + setLowestRecord(maxRecord, collated); } else if (dynamic_cast(match) || dynamic_cast(match)) { - setLowestRecord(collScan->maxRecord, collated); + setLowestRecord(maxRecord, collated); } else if (dynamic_cast(match) || dynamic_cast(match)) { - setHighestRecord(collScan->minRecord, collated); + setHighestRecord(minRecord, collated); } -} -/** - * Sets the lowPriority parameter on the given index scan node. - */ -void deprioritizeUnboundedIndexScan(IndexScanNode* solnRoot, - const FindCommandRequest& findCommand) { - auto sort = findCommand.getSort(); - if (findCommand.getLimit() && - (sort.isEmpty() || sort[query_request_helper::kNaturalSortField])) { - // There is a limit with either no sort or the natural sort. - return; - } - - auto indexScan = checked_cast(solnRoot); - if (!indexScan->bounds.isUnbounded()) { - return; - } - - indexScan->lowPriority = true; + return true; } -} // namespace - std::unique_ptr QueryPlannerAccess::makeCollectionScan( - const CanonicalQuery& query, bool tailable, const QueryPlannerParams& params, int direction) { + const CanonicalQuery& query, + bool tailable, + const QueryPlannerParams& params, + int direction, + const MatchExpression* root) { + + // The following are expensive to look up, so only do it once for each. + const mongo::NamespaceString nss = query.nss(); + const bool isOplog = nss.isOplog(); + const bool isChangeCollection = nss.isChangeCollection(); + // Make the (only) node, a collection scan. auto csn = std::make_unique(); - csn->name = query.ns(); - csn->filter = query.root()->clone(); + csn->nss = nss; + csn->filter = root->clone(); csn->tailable = tailable; csn->shouldTrackLatestOplogTimestamp = params.options & QueryPlannerParams::TRACK_LATEST_OPLOG_TS; csn->shouldWaitForOplogVisibility = params.options & QueryPlannerParams::OPLOG_SCAN_WAIT_FOR_VISIBLE; csn->direction = direction; + csn->isOplog = isOplog; + csn->isClustered = params.clusteredInfo ? true : false; if (params.clusteredInfo) { csn->clusteredIndex = params.clusteredInfo->getIndexSpec(); @@ -457,8 +506,8 @@ std::unique_ptr QueryPlannerAccess::makeCollectionScan( // the collection scan to return timestamp-based tokens. Otherwise, we should // return generic RecordId-based tokens. if (query.getFindCommandRequest().getRequestResumeToken()) { - csn->shouldTrackLatestOplogTimestamp = query.nss().isOplogOrChangeCollection(); - csn->requestResumeToken = !query.nss().isOplogOrChangeCollection(); + csn->shouldTrackLatestOplogTimestamp = (isOplog || isChangeCollection); + csn->requestResumeToken = !csn->shouldTrackLatestOplogTimestamp; } // Extract and assign the RecordId from the 'resumeAfter' token, if present. @@ -470,13 +519,13 @@ std::unique_ptr QueryPlannerAccess::makeCollectionScan( const bool assertMinTsHasNotFallenOffOplog = params.options & QueryPlannerParams::ASSERT_MIN_TS_HAS_NOT_FALLEN_OFF_OPLOG; - if (query.nss().isOplogOrChangeCollection() && csn->direction == 1) { + if ((isOplog || isChangeCollection) && csn->direction == 1) { // Takes Timestamp 'ts' as input, transforms it to the RecordIdBound and assigns it to the // output parameter 'recordId'. The RecordId format for the change collection is a string, // where as the RecordId format for the oplog is a long integer. The timestamp should be // converted to the required format before assigning it to the 'recordId'. auto assignRecordIdFromTimestamp = [&](auto& ts, auto* recordId) { - auto keyFormat = query.nss().isChangeCollection() ? KeyFormat::String : KeyFormat::Long; + auto keyFormat = isChangeCollection ? KeyFormat::String : KeyFormat::Long; auto status = record_id_helpers::keyForOptime(ts, keyFormat); if (status.isOK()) { *recordId = RecordIdBound(status.getValue()); @@ -486,7 +535,7 @@ std::unique_ptr QueryPlannerAccess::makeCollectionScan( // Optimizes the start and end location parameters for a collection scan for an oplog // collection. Not compatible with $_resumeAfter so we do not optimize in that case. if (resumeAfterObj.isEmpty()) { - auto [minTs, maxTs] = extractTsRange(query.root()); + auto [minTs, maxTs] = extractTsRange(root); if (minTs) { assignRecordIdFromTimestamp(*minTs, &csn->minRecord); if (assertMinTsHasNotFallenOffOplog) { @@ -502,7 +551,7 @@ std::unique_ptr QueryPlannerAccess::makeCollectionScan( // collection after the first matching one must also match. To avoid wasting time // running the match expression on every document to be returned, we tell the // CollectionScan stage to stop applying the filter once it finds the first match. - if (isOplogTsLowerBoundPred(query.root())) { + if (isOplogTsLowerBoundPred(root)) { csn->stopApplyingFilterAfterFirstMatch = true; } } @@ -518,18 +567,34 @@ std::unique_ptr QueryPlannerAccess::makeCollectionScan( auto queryCollator = query.getCollator(); auto collCollator = params.clusteredCollectionCollator; - csn->hasCompatibleCollation = - !queryCollator || (collCollator && *queryCollator == *collCollator); + csn->hasCompatibleCollation = CollatorInterface::collatorsMatch(queryCollator, collCollator); - if (params.clusteredInfo && !csn->resumeAfterRecordId) { + if (csn->isClustered && !csn->resumeAfterRecordId) { // This is a clustered collection. Attempt to perform an efficient, bounded collection scan - // via minRecord and maxRecord if applicable. - handleRIDRangeScan(csn->filter.get(), csn.get(), params, queryCollator); - handleRIDRangeMinMax(query, csn.get(), params, queryCollator); + // via minRecord and maxRecord if applicable. During this process, we will check if the + // query is guaranteed to exclude values of the cluster key which are affected by collation. + // If so, then even if the query and collection collations differ, the collation difference + // won't affect the query results. In that case, we can say hasCompatibleCollation is true. + bool compatibleCollation = handleRIDRangeScan( + csn->filter.get(), + queryCollator, + collCollator, + clustered_util::getClusterKeyFieldName(params.clusteredInfo->getIndexSpec()), + csn->minRecord, + csn->maxRecord); + csn->hasCompatibleCollation |= compatibleCollation; + + handleRIDRangeMinMax(query, + csn->direction, + queryCollator, + collCollator, + csn->minRecord, + csn->maxRecord, + csn->boundInclusion); } return csn; -} +} // makeCollectionScan std::unique_ptr QueryPlannerAccess::makeLeafNode( const CanonicalQuery& query, @@ -605,10 +670,10 @@ std::unique_ptr QueryPlannerAccess::makeLeafNode( BSONObjIterator it(index.keyPattern); BSONElement keyElt = it.next(); for (size_t i = 0; i < pos; ++i) { - verify(it.more()); + MONGO_verify(it.more()); keyElt = it.next(); } - verify(!keyElt.eoo()); + MONGO_verify(!keyElt.eoo()); IndexBoundsBuilder::translate( expr, keyElt, index, &isn->bounds.fields[pos], tightnessOut, ietBuilder); @@ -755,7 +820,7 @@ void QueryPlannerAccess::mergeWithLeafNode(MatchExpression* expr, ScanBuildingSt GeoNear2DSphereNode* gn = static_cast(node); boundsToFillOut = &gn->baseBounds; } else { - verify(type == STAGE_IXSCAN); + MONGO_verify(type == STAGE_IXSCAN); IndexScanNode* scan = static_cast(node); // See STAGE_GEO_NEAR_2D above - 2D indexes can only accumulate scan bounds over the first @@ -778,13 +843,13 @@ void QueryPlannerAccess::mergeWithLeafNode(MatchExpression* expr, ScanBuildingSt BSONObjIterator it(index.keyPattern); BSONElement keyElt = it.next(); for (size_t i = 0; i < pos; ++i) { - verify(it.more()); + MONGO_verify(it.more()); keyElt = it.next(); } - verify(!keyElt.eoo()); + MONGO_verify(!keyElt.eoo()); scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH; - verify(boundsToFillOut->fields.size() > pos); + MONGO_verify(boundsToFillOut->fields.size() > pos); OrderedIntervalList* oil = &boundsToFillOut->fields[pos]; @@ -796,7 +861,7 @@ void QueryPlannerAccess::mergeWithLeafNode(MatchExpression* expr, ScanBuildingSt IndexBoundsBuilder::translateAndIntersect( expr, keyElt, index, oil, &scanState->tightness, scanState->getCurrentIETBuilder()); } else { - verify(MatchExpression::OR == mergeType); + MONGO_verify(MatchExpression::OR == mergeType); IndexBoundsBuilder::translateAndUnion( expr, keyElt, index, oil, &scanState->tightness, scanState->getCurrentIETBuilder()); } @@ -1042,7 +1107,7 @@ void QueryPlannerAccess::finishLeafNode( bounds = &gnode->baseBounds; nodeIndex = &gnode->index; } else { - verify(type == STAGE_IXSCAN); + MONGO_verify(type == STAGE_IXSCAN); IndexScanNode* scan = static_cast(node); nodeIndex = &scan->index; bounds = &scan->bounds; @@ -1058,7 +1123,7 @@ void QueryPlannerAccess::finishLeafNode( size_t firstEmptyField = 0; for (firstEmptyField = 0; firstEmptyField < bounds->fields.size(); ++firstEmptyField) { if (bounds->fields[firstEmptyField].name.empty()) { - verify(bounds->fields[firstEmptyField].intervals.empty()); + MONGO_verify(bounds->fields[firstEmptyField].intervals.empty()); break; } } @@ -1068,7 +1133,7 @@ void QueryPlannerAccess::finishLeafNode( // Skip ahead to the firstEmptyField-th element, where we begin filling in bounds. BSONObjIterator it(nodeIndex->keyPattern); for (size_t i = 0; i < firstEmptyField; ++i) { - verify(it.more()); + MONGO_verify(it.more()); it.next(); } @@ -1078,14 +1143,14 @@ void QueryPlannerAccess::finishLeafNode( // There may be filled-in fields to the right of the firstEmptyField; for instance, the // index {loc:"2dsphere", x:1} with a predicate over x and a near search over loc. if (bounds->fields[firstEmptyField].name.empty()) { - verify(bounds->fields[firstEmptyField].intervals.empty()); + MONGO_verify(bounds->fields[firstEmptyField].intervals.empty()); IndexBoundsBuilder::allValuesForField(kpElt, &bounds->fields[firstEmptyField]); } ++firstEmptyField; } // Make sure that the length of the key is the length of the bounds we started. - verify(firstEmptyField == bounds->fields.size()); + MONGO_verify(firstEmptyField == bounds->fields.size()); } // Build Interval Evaluation Trees used to restore index bounds from cached SBE Plans. @@ -1201,7 +1266,7 @@ bool projNeedsFetch(const CanonicalQuery& query) { // document, or requires metadata, we will still need a FETCH stage. if (proj->type() == projection_ast::ProjectType::kInclusion && !proj->requiresMatchDetails() && proj->metadataDeps().none() && !proj->requiresDocument()) { - auto projFields = proj->getRequiredFields(); + const auto& projFields = proj->getRequiredFields(); // Note that it is not possible to project onto dotted paths of _id here, since they may be // null or missing, and the index cannot differentiate between the two cases, so we would // still need a FETCH stage. @@ -1258,7 +1323,7 @@ bool QueryPlannerAccess::processIndexScans(const CanonicalQuery& query, scanState.ixtag = checked_cast(child->getTag()); // If there's a tag it must be valid. - verify(IndexTag::kNoIndex != scanState.ixtag->index); + MONGO_verify(IndexTag::kNoIndex != scanState.ixtag->index); // If the child can't use an index on its own field (and the child is not a negation // of a bounds-generating expression), then it's indexed by virtue of one of @@ -1308,7 +1373,7 @@ bool QueryPlannerAccess::processIndexScans(const CanonicalQuery& query, if (shouldMergeWithLeaf(child, scanState)) { // The child uses the same index we're currently building a scan for. Merge // the bounds and filters. - verify(scanState.currentIndexNumber == scanState.ixtag->index); + MONGO_verify(scanState.currentIndexNumber == scanState.ixtag->index); scanState.tightness = IndexBoundsBuilder::INEXACT_FETCH; mergeWithLeafNode(child, &scanState); refineTightnessForMaybeCoveredQuery(query, scanState.tightness); @@ -1318,7 +1383,7 @@ bool QueryPlannerAccess::processIndexScans(const CanonicalQuery& query, // Output the current scan before starting to construct a new out. finishAndOutputLeaf(&scanState, out); } else { - verify(IndexTag::kNoIndex == scanState.currentIndexNumber); + MONGO_verify(IndexTag::kNoIndex == scanState.currentIndexNumber); } // Reset state before producing a new leaf. @@ -1336,6 +1401,42 @@ bool QueryPlannerAccess::processIndexScans(const CanonicalQuery& query, } } + // If the index is partial and we have reached children without index tag, check if they are + // covered by the index' filter expression. In this case the child can be removed. In some cases + // this enables to remove the fetch stage from the plan. + // The check could be put inside the 'handleFilterAnd()' function, but if moved then the + // optimization will not be applied if the predicate contains an $elemMatch expression, since + // then the 'handleFilterAnd()' is not called. + if (IndexTag::kNoIndex != scanState.currentIndexNumber) { + const IndexEntry& index = indices[scanState.currentIndexNumber]; + if (index.filterExpr != nullptr) { + while (scanState.curChild < root->numChildren()) { + MatchExpression* child = root->getChild(scanState.curChild); + if (expression::isSubsetOf(index.filterExpr, child)) { + // When the documents satisfying the index filter predicate are a subset of the + // documents satisfying the child expression, the child predicate is redundant. + // Remove the child from the root's children. + // For example: index on 'a' with a filter {$and: [{a: {$gt: 10}}, {b: {$lt: + // 100}}]} and a query predicate {$and: [{a: {$gt: 20}}, {b: {$lt: 100}}]}. The + // non-indexed child {b: {$lt: 100}} is always satisfied by the index filter and + // can be removed. + + // In case of index filter predicate with $or, this optimization is not + // applicable, since the subset relationship doesn't hold. + // For example, an index on field 'c' with a filter expression {$or: [{a: {$gt: + // 10}}, {b: {$lt: 100}}]} could be applicable for the query with a predicate + // {$and: [{c: {$gt: 100}}, {b: {$lt: 100}}]}, but the predicate will not be + // removed. + scanState.tightness = IndexBoundsBuilder::EXACT; + refineTightnessForMaybeCoveredQuery(query, scanState.tightness); + handleFilter(&scanState); + } else { + ++scanState.curChild; + } + } + } + } + // Output the scan we're done with, if it exists. if (nullptr != scanState.currentScan.get()) { finishAndOutputLeaf(&scanState, out); @@ -1417,7 +1518,7 @@ bool QueryPlannerAccess::processIndexScansElemMatch( if (shouldMergeWithLeaf(emChild, *scanState)) { // The child uses the same index we're currently building a scan for. Merge // the bounds and filters. - verify(scanState->currentIndexNumber == scanState->ixtag->index); + MONGO_verify(scanState->currentIndexNumber == scanState->ixtag->index); scanState->tightness = IndexBoundsBuilder::INEXACT_FETCH; mergeWithLeafNode(emChild, scanState); @@ -1425,7 +1526,7 @@ bool QueryPlannerAccess::processIndexScansElemMatch( if (nullptr != scanState->currentScan.get()) { finishAndOutputLeaf(scanState, out); } else { - verify(IndexTag::kNoIndex == scanState->currentIndexNumber); + MONGO_verify(IndexTag::kNoIndex == scanState->currentIndexNumber); } // Reset state before producing a new leaf. @@ -1513,7 +1614,7 @@ std::unique_ptr QueryPlannerAccess::buildIndexedAnd( // We must use an index for at least one child of the AND. We shouldn't be here if this // isn't the case. - verify(ixscanNodes.size() >= 1); + MONGO_verify(ixscanNodes.size() >= 1); // Short-circuit: an AND of one child is just the child. if (ixscanNodes.size() == 1) { @@ -1605,7 +1706,7 @@ std::unique_ptr QueryPlannerAccess::buildIndexedAnd( // index, so we put a fetch with filter. if (root->numChildren() > 0) { auto fetch = std::make_unique(); - verify(ownedRoot); + MONGO_verify(ownedRoot); if (ownedRoot->numChildren() == 1) { // An $and of one thing is that thing. fetch->filter = std::move((*ownedRoot->getChildVector())[0]); @@ -1633,11 +1734,56 @@ std::unique_ptr QueryPlannerAccess::buildIndexedOr( const QueryPlannerParams& params) { const bool inArrayOperator = !ownedRoot; - std::vector> ixscanNodes; - if (!processIndexScans(query, root, inArrayOperator, indices, params, &ixscanNodes)) { + bool usedClusteredCollScan = false; + std::vector> scanNodes; + if (!processIndexScans(query, root, inArrayOperator, indices, params, &scanNodes)) { return nullptr; } + // Check if we can use a CLUSTERED_IXSCAN on the remaining children if they have no plans. + // Since, the only clustered index currently supported is on '_id' if we are in an array + // operator, we know we won't make clustered collection scans. + if (!inArrayOperator && 0 != root->numChildren()) { + bool clusteredCollection = params.clusteredInfo.has_value(); + const bool isTailable = query.getFindCommandRequest().getTailable(); + if (clusteredCollection) { + auto clusteredScanDirection = + QueryPlannerCommon::determineClusteredScanDirection(query, params).value_or(1); + while (0 < root->numChildren()) { + usedClusteredCollScan = true; + MatchExpression* child = root->getChild(0); + std::unique_ptr collScan = + makeCollectionScan(query, isTailable, params, clusteredScanDirection, child); + // Confirm the collection scan node is a clustered collection scan. + CollectionScanNode* collScanNode = static_cast(collScan.get()); + if (!collScanNode->doClusteredCollectionScanClassic()) { + return nullptr; + } + + // Caching OR queries with collection scans is restricted, since it is challenging + // to determine which match expressions from the input query require a clustered + // collection scan. Therefore, we cannot correctly calculate the correct bounds for + // the query using the cached plan. + collScanNode->markNotEligibleForPlanCache(); + scanNodes.push_back(std::move(collScan)); + // Erase child from root. + root->getChildVector()->erase(root->getChildVector()->begin()); + } + } + + // If we have a clustered collection scan, then all index scan stages must be wrapped inside + // a 'FETCH'. This is to avoid having an unnecessary collection scan node inside a 'FETCH', + // since the documents will already be fetched. + // TODO SERVER-77867 investigate when we can avoid adding this 'FETCH' stage. + if (usedClusteredCollScan) { + for (size_t i = 0; i < scanNodes.size(); ++i) { + if (scanNodes[i]->getType() == STAGE_IXSCAN) { + scanNodes[i] = std::make_unique(std::move(scanNodes[i])); + } + } + } + } + // Unlike an AND, an OR cannot have filters hanging off of it. We stop processing // when any of our children lack index tags. If a node lacks an index tag it cannot // be answered via an index. @@ -1649,51 +1795,51 @@ std::unique_ptr QueryPlannerAccess::buildIndexedOr( return nullptr; } + if (!wcp::expandWildcardFieldBounds(scanNodes)) { + return nullptr; + } + // If all index scans are identical, then we collapse them into a single scan. This prevents // us from creating OR plans where the branches of the OR perform duplicate work. - ixscanNodes = collapseEquivalentScans(std::move(ixscanNodes)); + scanNodes = collapseEquivalentScans(std::move(scanNodes)); std::unique_ptr orResult; // An OR of one node is just that node. - if (1 == ixscanNodes.size()) { - orResult = std::move(ixscanNodes[0]); + if (1 == scanNodes.size()) { + orResult = std::move(scanNodes[0]); } else { std::vector shouldReverseScan; - // (Ignore FCV check): This is intentional because we want clusters which have wildcard - // indexes still be able to use the feature even if the FCV is downgraded. - if (feature_flags::gFeatureFlagCompoundWildcardIndexes.isEnabledAndIgnoreFCVUnsafe() && - wildcard_planning::canOnlyAnswerWildcardPrefixQuery(ixscanNodes)) { - // If we get here, we have a an OR of IXSCANs, one of which is a compound wildcard - // index, but at least one of them can only support a FETCH + IXSCAN on queries on the - // prefix. This means this plan will produce incorrect results. - return nullptr; - } if (query.getSortPattern()) { - // If all ixscanNodes can provide the sort, shouldReverseScan is populated with which + // If all 'scanNodes' can provide the sort, shouldReverseScan is populated with which // scans to reverse. shouldReverseScan = - canProvideSortWithMergeSort(ixscanNodes, query.getFindCommandRequest().getSort()); + canProvideSortWithMergeSort(scanNodes, query.getFindCommandRequest().getSort()); } if (!shouldReverseScan.empty()) { + // TODO SERVER-77601 remove this conditional once SBE supports sort keys in collection + // scans. + if (usedClusteredCollScan) { + return nullptr; + } // Each node can provide either the requested sort, or the reverse of the requested // sort. - invariant(ixscanNodes.size() == shouldReverseScan.size()); - for (size_t i = 0; i < ixscanNodes.size(); ++i) { + invariant(scanNodes.size() == shouldReverseScan.size()); + for (size_t i = 0; i < scanNodes.size(); ++i) { if (shouldReverseScan[i]) { - QueryPlannerCommon::reverseScans(ixscanNodes[i].get()); + QueryPlannerCommon::reverseScans(scanNodes[i].get()); } } auto msn = std::make_unique(); msn->sort = query.getFindCommandRequest().getSort(); - msn->addChildren(std::move(ixscanNodes)); + msn->addChildren(std::move(scanNodes)); orResult = std::move(msn); } else { auto orn = std::make_unique(); - orn->addChildren(std::move(ixscanNodes)); + orn->addChildren(std::move(scanNodes)); orResult = std::move(orn); } } @@ -1761,7 +1907,7 @@ std::unique_ptr QueryPlannerAccess::_buildIndexedDataAccess( } auto soln = makeLeafNode(query, index, tag->pos, root, &tightness, ietBuilder); - verify(nullptr != soln); + MONGO_verify(nullptr != soln); finishLeafNode(soln.get(), index, std::move(ietBuilders)); if (!ownedRoot) { @@ -1787,7 +1933,7 @@ std::unique_ptr QueryPlannerAccess::_buildIndexedDataAccess( return soln; } else if (tightness == IndexBoundsBuilder::INEXACT_COVERED && !indices[tag->index].multikey) { - verify(nullptr == soln->filter.get()); + MONGO_verify(nullptr == soln->filter.get()); soln->filter = std::move(ownedRoot); return soln; } else { @@ -1881,7 +2027,7 @@ void QueryPlannerAccess::addFilterToSolutionNode(QuerySolutionNode* node, if (MatchExpression::AND == type) { listFilter = std::make_unique(); } else { - verify(MatchExpression::OR == type); + MONGO_verify(MatchExpression::OR == type); listFilter = std::make_unique(); } unique_ptr oldFilter = node->filter->clone(); diff --git a/src/mongo/db/query/planner_access.h b/src/mongo/db/query/planner_access.h index 03e9773a0570d..e0106e4cd1e5f 100644 --- a/src/mongo/db/query/planner_access.h +++ b/src/mongo/db/query/planner_access.h @@ -29,15 +29,27 @@ #pragma once +#include +#include +#include #include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/collection_scan_common.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/index_entry.h" #include "mongo/db/query/index_tag.h" #include "mongo/db/query/interval_evaluation_tree.h" #include "mongo/db/query/query_planner.h" #include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -101,12 +113,17 @@ namespace mongo { class QueryPlannerAccess { public: /** - * Return a CollectionScanNode that scans as requested in 'query'. + * Return a CollectionScanNode that scans as requested in 'query'. The function will return a + * collection scan with 'root' as the filter. This was needed to support rooted $OR queries that + * use this helper to build clustered collection scans. In this case, the 'root' will be the + * child branch of the $OR expression. In all other cases 'root' should be the 'root' of + * 'query'. */ static std::unique_ptr makeCollectionScan(const CanonicalQuery& query, bool tailable, const QueryPlannerParams& params, - int direction); + int direction, + const MatchExpression* root); /** * Return a plan that uses the provided index as a proxy for a collection scan. @@ -126,7 +143,7 @@ class QueryPlannerAccess { const BSONObj& endKey); /** - * Consructs a data access plan for 'query' which answers the predicate contained in 'root'. + * Constructs a data access plan for 'query' which answers the predicate contained in 'root'. * Assumes the presence of the passed in indices. Planning behavior is controlled by the * settings in 'params'. */ @@ -136,6 +153,68 @@ class QueryPlannerAccess { const std::vector& indices, const QueryPlannerParams& params); + /** + * Helper method that checks to see if min() or max() were provided along with the query. If so, + * adjusts the collection scan bounds to fit the constraints. + * + * This method is shared by QO planner and QE SBE cached plan variable bind, which must get the + * same answers for the output args (hence sharing one implementation instead of creating a + * separate parallel one). + * + * Arguments + * (in) query - current query + * (in) direction - 'query's scan direction: 1: forward; -1: reverse + * (in) queryCollator - 'query's collator + * (in) ccCollator - clustered collection's collator + * (out) minRecord - scan start bound + * (out) maxRecord - scan end bound + * (out) boundInclusion - whether to exclude 'maxRecord' because it was specified by the 'max' + * keyword + */ + static void handleRIDRangeMinMax(const CanonicalQuery& query, + int direction, + const CollatorInterface* queryCollator, + const CollatorInterface* ccCollator, + boost::optional& minRecord, + boost::optional& maxRecord, + CollectionScanParams::ScanBoundInclusion& boundInclusion); + + /** + * Helper method to add an RID range to collection scans. If the query solution tree contains a + * collection scan node with a suitable comparison predicate on '_id', we add a minRecord and + * maxRecord on the collection node. + * + * This method is shared by QO planner and QE SBE cached plan variable bind, which must get the + * same answers for the output args (hence sharing one implementation instead of creating a + * separate parallel one). + * + * Returns true if the MatchExpression is a comparison against the cluster key which either: + * 1) is guaranteed to exclude values of the cluster key which are affected by collation or + * 2) may return values of the cluster key which are affected by collation, but the query and + * collection collations match. + * Otherwise, returns false. + * + * For example, assuming the cluster key is "_id": + * Given {a: {$eq: 2}}, we return false, because the comparison is not against the cluster key. + * Given {_id: {$gte: 5}}, we return true, because this comparison against the cluster key + * excludes keys which are affected by collations. + * Given {_id: {$eq: "str"}}, we return true only if the query and collection collations match. + * + * Arguments + * (in) conjunct - current query's match expression (or subexpression in recursive calls) + * (in) queryCollator - current query's collator + * (in) ccCollator - clustered collection's collator + * (in) clusterKeyFieldName - only "_id" is officially supported, but this may change someday + * (out) minRecord - scan start bound + * (out) maxRecord - scan end bound + */ + [[nodiscard]] static bool handleRIDRangeScan(const MatchExpression* conjunct, + const CollatorInterface* queryCollator, + const CollatorInterface* ccCollator, + const StringData& clusterKeyFieldName, + boost::optional& minRecord, + boost::optional& maxRecord); + private: /** * Building the leaves (i.e. the index scans) is done by looping through @@ -315,6 +394,10 @@ class QueryPlannerAccess { * of index scans. As such, the processing for AND and OR is almost identical. * * Does not take ownership of 'root' but may remove children from it. + * + * If 'inArrayOperator' is true, then 'root' will be left unmodified. + * If 'inArrayOperator' is false, then the children of 'root' that are processed will be removed + * from 'root'. */ static bool processIndexScans(const CanonicalQuery& query, MatchExpression* root, diff --git a/src/mongo/db/query/planner_access_test.cpp b/src/mongo/db/query/planner_access_test.cpp index d30544b43fafa..c6c40a9530628 100644 --- a/src/mongo/db/query/planner_access_test.cpp +++ b/src/mongo/db/query/planner_access_test.cpp @@ -27,12 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/matcher.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/index_tag.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/planner_analysis.cpp b/src/mongo/db/query/planner_analysis.cpp index c8b04888b303c..b66304a6d053e 100644 --- a/src/mongo/db/query/planner_analysis.cpp +++ b/src/mongo/db/query/planner_analysis.cpp @@ -30,23 +30,57 @@ #include "mongo/db/query/planner_analysis.h" +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include +#include #include -#include "mongo/base/simple_string_data_comparator.h" +#include "mongo/base/checked_cast.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonelement_comparator.h" +#include "mongo/db/basic_types.h" #include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/index/expression_params.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/s2_common.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_geo.h" -#include "mongo/db/matcher/match_expression_dependencies.h" -#include "mongo/db/query/planner_ixselect.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/interval.h" +#include "mongo/db/query/interval_evaluation_tree.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/projection.h" -#include "mongo/db/query/query_planner.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner_common.h" +#include "mongo/db/query/query_request_helper.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/stage_types.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -836,7 +870,7 @@ QueryPlannerAnalysis::determineLookupStrategy( auto foreignCollItr = collectionsInfo.find(foreignCollName); tassert(5842600, str::stream() << "Expected collection info, but found none; target collection: " - << foreignCollName, + << foreignCollName.toStringForErrorMsg(), foreignCollItr != collectionsInfo.end()); // Check if an eligible index exists for indexed loop join strategy. @@ -1173,7 +1207,12 @@ std::unique_ptr QueryPlannerAnalysis::analyzeSort( // Sort is not provided. See if we provide the reverse of our sort pattern. // If so, we can reverse the scan direction(s). BSONObj reverseSort = QueryPlannerCommon::reverseSortObj(sortObj); - if (providedSorts.contains(reverseSort)) { + // The only collection scan that includes a sort order in 'providedSorts' is a scan on a + // clustered collection. However, we cannot reverse this scan if its direction is specified by a + // $natural hint. + const bool naturalCollScan = solnRoot->getType() == StageType::STAGE_COLLSCAN && + query.getFindCommandRequest().getHint()[query_request_helper::kNaturalSortField]; + if (providedSorts.contains(reverseSort) && !naturalCollScan) { QueryPlannerCommon::reverseScans(solnRoot.get()); LOGV2_DEBUG(20951, 5, @@ -1193,11 +1232,12 @@ std::unique_ptr QueryPlannerAnalysis::analyzeSort( if (!solnRoot->fetched()) { const bool sortIsCovered = std::all_of(sortObj.begin(), sortObj.end(), [&](BSONElement e) { - // Note that hasField() will return 'false' in the case that this field is a string - // and there is a non-simple collation on the index. This will lead to encoding of - // the field from the document on fetch, despite having read the encoded value from - // the index. - return solnRoot->hasField(e.fieldName()); + // If the index has the collation that the query is expecting then kCollatedProvided + // will be returned hence we can use the index for sorting and grouping (distinct_scan) + // but need to add a fetch to retrieve a proper value of the key. + auto fieldAvailability = solnRoot->getFieldAvailability(e.fieldName()); + return fieldAvailability == FieldAvailability::kCollatedProvided || + fieldAvailability == FieldAvailability::kFullyProvided; }); if (!sortIsCovered) { @@ -1261,9 +1301,13 @@ std::unique_ptr QueryPlannerAnalysis::analyzeDataAccess( bool fetch = false; for (auto&& shardKeyField : params.shardKey) { auto fieldAvailability = solnRoot->getFieldAvailability(shardKeyField.fieldName()); - if (fieldAvailability == FieldAvailability::kNotProvided) { - // One of the shard key fields is not provided by an index. We need to fetch the - // full documents prior to shard filtering. + if (fieldAvailability == FieldAvailability::kNotProvided || + fieldAvailability == FieldAvailability::kCollatedProvided) { + // One of the shard key fields are not or only a collated version are provided + // by an index. We need to fetch the full documents prior to shard filtering. In + // the case of kCollatedProvided the fetch is needed to get a non-ICU encoded + // value from the collection. Else the IDXScan would only return non-readable + // ICU encoded values. fetch = true; break; } diff --git a/src/mongo/db/query/planner_analysis.h b/src/mongo/db/query/planner_analysis.h index 1cbc4d5e5ad99..f16d69846b1f4 100644 --- a/src/mongo/db/query/planner_analysis.h +++ b/src/mongo/db/query/planner_analysis.h @@ -29,7 +29,17 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/index_entry.h" #include "mongo/db/query/query_planner.h" #include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_solution.h" diff --git a/src/mongo/db/query/planner_analysis_test.cpp b/src/mongo/db/query/planner_analysis_test.cpp index efe1e1446c576..12f005c1281e1 100644 --- a/src/mongo/db/query/planner_analysis_test.cpp +++ b/src/mongo/db/query/planner_analysis_test.cpp @@ -29,13 +29,27 @@ #include "mongo/db/query/planner_analysis.h" +#include +#include +#include #include - -#include "mongo/db/json.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_geo.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_entry.h" +#include "mongo/db/query/interval.h" #include "mongo/db/query/query_solution.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" using namespace mongo; diff --git a/src/mongo/db/query/planner_ixselect.cpp b/src/mongo/db/query/planner_ixselect.cpp index e119a51dc2e18..507935a900137 100644 --- a/src/mongo/db/query/planner_ixselect.cpp +++ b/src/mongo/db/query/planner_ixselect.cpp @@ -30,25 +30,46 @@ #include "mongo/db/query/planner_ixselect.h" +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include #include #include "mongo/base/simple_string_data_comparator.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data_comparator_interface.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/geo/geometry_container.h" #include "mongo/db/geo/hash.h" +#include "mongo/db/geo/shapes.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/s2_common.h" -#include "mongo/db/index/wildcard_key_generator.h" #include "mongo/db/index_names.h" #include "mongo/db/matcher/expression_algo.h" #include "mongo/db/matcher/expression_geo.h" #include "mongo/db/matcher/expression_internal_bucket_geo_within.h" -#include "mongo/db/matcher/expression_internal_expr_comparison.h" -#include "mongo/db/matcher/expression_text.h" #include "mongo/db/query/canonical_query_encoder.h" #include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/index_tag.h" #include "mongo/db/query/indexability.h" #include "mongo/db/query/planner_wildcard_helpers.h" -#include "mongo/db/query/query_planner_common.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -381,11 +402,13 @@ bool QueryPlannerIXSelect::_compatible(const BSONElement& keyPatternElt, } if (index.type == IndexType::INDEX_WILDCARD) { - // Fields after "$_path" of a compound wildcard index should not be used to answer any - // query, because wildcard IndexEntry with reserved field, "$_path", present is used only - // to answer query on non-wildcard prefix. + // If the compound wildcard index is expanded to a generic CWI IndexEntry with '$_path' + // field being the wildcard field, this index is mostly for queries on regular prefix of the + // CWI. So such IndexEntry is ineligible to answer a query on any field after "$_path". size_t idx = 0; for (auto&& elt : index.keyPattern) { + // Bail out because this IndexEntry is trying to answer a field comes after "$_path" + // field. if (elt.fieldNameStringData() == "$_path") { return false; } @@ -394,13 +417,6 @@ bool QueryPlannerIXSelect::_compatible(const BSONElement& keyPatternElt, } idx++; } - - // If this IndexEntry is considered relevant to a regular field of a compound wildcard - // index, the IndexEntry must not have a specific expanded field for the wildcard field. - // Instead, the key pattern should contain a reserved "$_path" field. - if (keyPatternIdx < index.wildcardFieldPos && !index.keyPattern.hasField("$_path")) { - return false; - } } // Historically one could create indices with any particular value for the index spec, @@ -648,7 +664,7 @@ bool QueryPlannerIXSelect::_compatible(const BSONElement& keyPatternElt, return false; } - verify(SPHERE == cap->crs); + MONGO_verify(SPHERE == cap->crs); const Circle& circle = cap->circle; // No wrapping around the edge of the world is allowed in 2d centerSphere. @@ -664,7 +680,7 @@ bool QueryPlannerIXSelect::_compatible(const BSONElement& keyPatternElt, "Unknown indexing for given node and field", "node"_attr = node->debugString(), "field"_attr = keyPatternElt.toString()); - verify(0); + MONGO_verify(0); } MONGO_UNREACHABLE; } @@ -792,7 +808,7 @@ void QueryPlannerIXSelect::_rateIndices(MatchExpression* node, fullPath = prefix + node->path().toString(); } - verify(nullptr == node->getTag()); + MONGO_verify(nullptr == node->getTag()); node->setTag(new RelevantTag()); auto rt = static_cast(node->getTag()); rt->path = fullPath; @@ -960,7 +976,7 @@ void QueryPlannerIXSelect::stripUnneededAssignments(MatchExpression* node, */ static void removeIndexRelevantTag(MatchExpression* node, size_t idx) { RelevantTag* tag = static_cast(node->getTag()); - verify(tag); + MONGO_verify(tag); vector::iterator firstIt = std::find(tag->first.begin(), tag->first.end(), idx); if (firstIt != tag->first.end()) { tag->first.erase(firstIt); @@ -1178,7 +1194,7 @@ void QueryPlannerIXSelect::stripInvalidAssignmentsToTextIndexes(MatchExpression* // the prefix precedes "text". for (BSONElement elt = it.next(); elt.type() != String; elt = it.next()) { textIndexPrefixPaths.insert(elt.fieldName()); - verify(it.more()); + MONGO_verify(it.more()); } // If the index prefix is non-empty, remove invalid assignments to it. diff --git a/src/mongo/db/query/planner_ixselect.h b/src/mongo/db/query/planner_ixselect.h index 15f1e135d5eb6..828029227da4a 100644 --- a/src/mongo/db/query/planner_ixselect.h +++ b/src/mongo/db/query/planner_ixselect.h @@ -29,9 +29,20 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_array.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/index_entry.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/stdx/unordered_set.h" namespace mongo { diff --git a/src/mongo/db/query/planner_ixselect_test.cpp b/src/mongo/db/query/planner_ixselect_test.cpp index 1df4d714e6724..67d050c0f4d60 100644 --- a/src/mongo/db/query/planner_ixselect_test.cpp +++ b/src/mongo/db/query/planner_ixselect_test.cpp @@ -33,17 +33,45 @@ #include "mongo/db/query/planner_ixselect.h" +#include +#include +#include +#include +#include + +#include +#include +// IWYU pragma: no_include "boost/container/detail/flat_tree.hpp" +#include +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/exec/index_path_projection.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/wildcard_key_generator.h" -#include "mongo/db/json.h" +#include "mongo/db/index_names.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/query/index_tag.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" -#include "mongo/util/text.h" -#include +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/text.h" // IWYU pragma: keep using namespace mongo; diff --git a/src/mongo/db/query/planner_wildcard_helpers.cpp b/src/mongo/db/query/planner_wildcard_helpers.cpp index f196cdddc96e5..8d95a57714bce 100644 --- a/src/mongo/db/query/planner_wildcard_helpers.cpp +++ b/src/mongo/db/query/planner_wildcard_helpers.cpp @@ -28,18 +28,45 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/planner_wildcard_helpers.h" - +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include #include -#include "mongo/bson/util/builder.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/index_path_projection.h" #include "mongo/db/exec/projection_executor_utils.h" -#include "mongo/db/index/wildcard_key_generator.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index_names.h" #include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/interval.h" +#include "mongo/db/query/planner_wildcard_helpers.h" +#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/query/stage_types.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -688,12 +715,12 @@ void finalizeWildcardIndexScanConfiguration( index->wildcardFieldPos++; // If the wildcard field is "$_path", the index is used to answer query only on the non-wildcard - // prefix of a compound wildcard index. The bounds for both "$_path" fields should be - // "[MinKey, MaxKey]". Because the wildcard field can generate multiple keys for one single - // document, we should also instruct the IXSCAN to dedup keys. + // prefix of a compound wildcard index. The bounds for the "$_path" field should scan all + // string values and 'MinKey'. The bounds for the generic wildcard field should scan all values + // with bounds, "[MinKey, MaxKey]". Because the wildcard field can generate multiple keys for + // one single document, we should also instruct the IXSCAN to dedup keys. if (wildcardFieldName == "$_path"_sd) { - bounds->fields[index->wildcardFieldPos - 1].intervals.push_back( - IndexBoundsBuilder::allValues()); + bounds->fields[index->wildcardFieldPos - 1].intervals = makeAllValuesForPath(); bounds->fields[index->wildcardFieldPos].intervals.push_back( IndexBoundsBuilder::allValues()); bounds->fields[index->wildcardFieldPos].name = "$_path"; @@ -773,5 +800,109 @@ BSONElement getWildcardField(const IndexEntry& index) { return wildcardElt; } + +std::vector makeAllValuesForPath() { + std::vector intervals; + + // Generating a [MinKey, MinKey] point interval. We use 'MinKey' as the "$_path" key value for + // documents that don't have any wildcard field. + BSONObjBuilder minKeyBob; + minKeyBob.appendMinKey(""); + intervals.push_back(IndexBoundsBuilder::makePointInterval(minKeyBob.obj())); + + // Generating a all-value index bounds for only string type, because "$_path" with a string + // value tracks the wildcard path. + BSONObjBuilder allStringBob; + allStringBob.appendMinForType("", BSONType::String); + allStringBob.appendMaxForType("", BSONType::String); + intervals.push_back(IndexBoundsBuilder::makeRangeInterval( + allStringBob.obj(), BoundInclusion::kIncludeStartKeyOnly)); + + return intervals; +} + +bool expandWildcardFieldBounds(std::vector>& ixscanNodes) { + // Check if the index is a CWI and its wildcard field was expanded to a specific field. + auto isCompoundWildcardIndexToExpand = [](const IndexScanNode* idxNode) { + const IndexEntry& index = idxNode->index; + BSONElement elt = index.keyPattern.firstElement(); + if (index.type == INDEX_WILDCARD && elt.fieldNameStringData() != "$_path"_sd && + index.keyPattern.nFields() > 2 && + idxNode->bounds.fields[index.wildcardFieldPos].name != "$_path") { + return true; + } + return false; + }; + // Expand the CWI's index bounds to include all keys for the '$_path' field and the wildcard + // field. + auto expandIndexBoundsForCWI = [](IndexScanNode* idxNode) { + IndexEntry& index = idxNode->index; + idxNode->bounds.fields[index.wildcardFieldPos - 1].intervals = makeAllValuesForPath(); + idxNode->bounds.fields[index.wildcardFieldPos].intervals = + std::vector{IndexBoundsBuilder::allValues()}; + idxNode->bounds.fields[index.wildcardFieldPos - 1].name = "$_path"; + if (!idxNode->iets.empty()) { + tassert(7842600, + "The size of iets must be the same as in the index bounds", + idxNode->iets.size() == idxNode->bounds.fields.size()); + idxNode->iets[index.wildcardFieldPos - 1] = + interval_evaluation_tree::IET::make( + idxNode->bounds.fields[index.wildcardFieldPos - 1]); + idxNode->iets[index.wildcardFieldPos] = + interval_evaluation_tree::IET::make( + idxNode->bounds.fields[index.wildcardFieldPos]); + } + index.multikeyPaths[index.wildcardFieldPos] = MultikeyComponents(); + idxNode->shouldDedup = true; + + // Reverse the index bounds of the wildcard field. + size_t idx = 0; + for (auto elem : index.keyPattern) { + if (idx == index.wildcardFieldPos) { + if (elem.number() < 0) { + idxNode->bounds.fields[index.wildcardFieldPos].reverse(); + } + } + idx++; + } + }; + + // Expand the index bounds of certain compound wildcard indexes in order to avoid missing any + // documents for $or queries. + for (auto&& node : ixscanNodes) { + // This expanding logic is only for $or queries with the assumption that there're only FETCH + // and IXSCAN under OR to expand. + if (STAGE_FETCH == node->getType()) { + QuerySolutionNode* child = node->children[0].get(); + if (STAGE_IXSCAN == child->getType()) { + IndexScanNode* idxNode = dynamic_cast(child); + tassert(7767201, "There must be an IndexScanNode under the FetchNode", idxNode); + if (isCompoundWildcardIndexToExpand(idxNode)) { + if ((!node->filter || !idxNode->filter)) { + expandIndexBoundsForCWI(idxNode); + } else { + return false; + } + } + } + } else if (STAGE_IXSCAN == node->getType()) { + IndexScanNode* idxNode = static_cast(node.get()); + if (isCompoundWildcardIndexToExpand(idxNode)) { + if (!idxNode->filter) { + // It's not safe to include/expand the index bounds if there's no filter in + // the IndexScanNode and no FetchNode above the IndexScanNode. Therefore, + // in order to prevent missing any document in any predicate of the $or query, + // we should abandon the query plan using such compound wildcard index. + return false; + } else { + // We can expand the CWI because there's a filter making sure the returning + // documents match the predicate. + expandIndexBoundsForCWI(idxNode); + } + } + } + } + return true; +} } // namespace wildcard_planning } // namespace mongo diff --git a/src/mongo/db/query/planner_wildcard_helpers.h b/src/mongo/db/query/planner_wildcard_helpers.h index b42968cf16f19..f72c56ed06d2f 100644 --- a/src/mongo/db/query/planner_wildcard_helpers.h +++ b/src/mongo/db/query/planner_wildcard_helpers.h @@ -29,12 +29,21 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/db/field_ref.h" #include "mongo/db/index/multikey_paths.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_bounds_builder.h" #include "mongo/db/query/index_entry.h" +#include "mongo/db/query/interval.h" #include "mongo/db/query/interval_evaluation_tree.h" #include "mongo/db/query/query_solution.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { namespace wildcard_planning { @@ -103,5 +112,19 @@ bool isWildcardObjectSubpathScan(const IndexScanNode* node); * 'index' must be a WILDCARD index. */ BSONElement getWildcardField(const IndexEntry& index); + +/** + * This helper generates index intervals for the "$_path" field to scan all keys indexing a + * document. The index intervals will be ['[MinKey, MinKey]', '["", {})]' ]. The "MinKey" key value + * is for documents missing the wildcard field. + */ +std::vector makeAllValuesForPath(); + +/** + * If the compound wildcard index is expanded to any known field and the index is used to answer a + * $or query, we should expand the index bounds of the wildcard field in such IndexEntry to include + * all keys. Returns false if the query plan cannot use the index. + */ +bool expandWildcardFieldBounds(std::vector>& ixscanNodes); } // namespace wildcard_planning } // namespace mongo diff --git a/src/mongo/db/query/planner_wildcard_helpers_test.cpp b/src/mongo/db/query/planner_wildcard_helpers_test.cpp index 5b11bdc7c01c9..500dcab42d1d5 100644 --- a/src/mongo/db/query/planner_wildcard_helpers_test.cpp +++ b/src/mongo/db/query/planner_wildcard_helpers_test.cpp @@ -27,12 +27,32 @@ * it in the license file. */ +// IWYU pragma: no_include "boost/container/detail/flat_tree.hpp" +#include +// IWYU pragma: no_include "boost/intrusive/detail/algorithm.hpp" +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/wildcard_key_generator.h" +#include "mongo/db/index_names.h" +#include "mongo/db/query/interval.h" #include "mongo/db/query/interval_evaluation_tree.h" #include "mongo/db/query/planner_wildcard_helpers.h" #include "mongo/db/query/query_solution.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo::wildcard_planning { namespace { diff --git a/src/mongo/db/query/projection.cpp b/src/mongo/db/query/projection.cpp index 6ccabc3174c2a..f7d2ff2049036 100644 --- a/src/mongo/db/query/projection.cpp +++ b/src/mongo/db/query/projection.cpp @@ -29,12 +29,25 @@ #include "mongo/db/query/projection.h" +#include +#include +#include +#include +#include + +#include +#include +#include + #include "mongo/base/exact_cast.h" +#include "mongo/db/matcher/copyable_match_expression.h" #include "mongo/db/matcher/match_expression_dependencies.h" #include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/projection_ast_path_tracking_visitor.h" +#include "mongo/db/query/projection_ast_visitor.h" #include "mongo/db/query/tree_walker.h" -#include "mongo/db/query/util/make_data_structure.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace projection_ast { @@ -239,6 +252,10 @@ void optimizeProjection(ProjectionPathASTNode* root) { Projection::Projection(ProjectionPathASTNode root, ProjectType type) : _root(std::move(root)), _type(type), _deps(analyzeProjection(&_root, type)) {} +void Projection::optimize() { + optimizeProjection(&_root); + _deps = analyzeProjection(&_root, _type); +} namespace { /** diff --git a/src/mongo/db/query/projection.h b/src/mongo/db/query/projection.h index f920a778e9aea..77b895e26dd1f 100644 --- a/src/mongo/db/query/projection.h +++ b/src/mongo/db/query/projection.h @@ -29,8 +29,16 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/query/projection_ast.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/str.h" namespace mongo { @@ -164,6 +172,13 @@ class Projection { return _deps.containsElemMatch; } + /** + * Optimizes the projection tree. Additionally, re-computes dependencies in case anything + * changes as in projection {x: {$and: [false, "$b"]}} - which when optimized will no longer + * depend on "b". + */ + void optimize(); + private: ProjectionPathASTNode _root; ProjectType _type; @@ -171,7 +186,10 @@ class Projection { }; /** - * Walks the projection AST and optimizes each node. + * Walks the projection AST and optimizes each node. Note if you have a 'Projection' instance you + * should prefer to use Projection::optimize() since it will additionally re-compute dependencies in + * case anything changes as in projection {x: {$and: [false, "$b"]}} - which when optimized will no + * longer depend on "b". */ void optimizeProjection(ProjectionPathASTNode* root); diff --git a/src/mongo/db/query/projection_ast.h b/src/mongo/db/query/projection_ast.h index 1304c68efe560..e9f727146d542 100644 --- a/src/mongo/db/query/projection_ast.h +++ b/src/mongo/db/query/projection_ast.h @@ -29,12 +29,31 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/jsobj.h" #include "mongo/db/matcher/copyable_match_expression.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/projection_ast_visitor.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace projection_ast { diff --git a/src/mongo/db/query/projection_ast_test.cpp b/src/mongo/db/query/projection_ast_test.cpp index ee8f4fd7e6fc4..5f5f3bc2c9e92 100644 --- a/src/mongo/db/query/projection_ast_test.cpp +++ b/src/mongo/db/query/projection_ast_test.cpp @@ -27,21 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include #include -#include -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/json.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/query/projection.h" #include "mongo/db/query/projection_ast.h" #include "mongo/db/query/projection_ast_util.h" #include "mongo/db/query/projection_parser.h" -#include "mongo/db/query/query_planner_test_fixture.h" +#include "mongo/db/query/projection_policies.h" #include "mongo/db/query/serialization_options.h" -#include "mongo/unittest/inline_auto_update.h" +#include "mongo/platform/decimal128.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace { @@ -773,15 +784,15 @@ TEST_F(ProjectionASTTest, ShouldThrowWithPositionalOnExclusion) { DBException, 31395); } -std::string redactFieldNameForTest(StringData s) { +std::string applyHmacForTest(StringData s) { return str::stream() << "HASH<" << s << ">"; } TEST_F(ProjectionASTTest, TestASTRedaction) { SerializationOptions options; - options.replacementForLiteralArgs = "?"; - options.redactIdentifiers = true; - options.identifierRedactionPolicy = redactFieldNameForTest; + options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + options.transformIdentifiers = true; + options.transformIdentifiersCallback = applyHmacForTest; auto proj = fromjson("{'a.b': 1}"); @@ -806,7 +817,7 @@ TEST_F(ProjectionASTTest, TestASTRedaction) { proj = fromjson("{f: {$elemMatch: {foo: 'bar'}}}"); output = projection_ast::serialize(*parseWithFindFeaturesEnabled(proj).root(), options); ASSERT_BSONOBJ_EQ_AUTO( // - R"({"HASH":{"$elemMatch":{"HASH":{"$eq":"?"}}},"HASH<_id>":true})", + R"({"HASH":{"$elemMatch":{"HASH":{"$eq":"?string"}}},"HASH<_id>":true})", output); // Positional projection @@ -821,14 +832,14 @@ TEST_F(ProjectionASTTest, TestASTRedaction) { proj = fromjson("{a: {$slice: 1}}"); output = projection_ast::serialize(*parseWithFindFeaturesEnabled(proj).root(), options); ASSERT_BSONOBJ_EQ_AUTO( // - R"({"HASH":{"$slice":"?"}})", + R"({"HASH":{"$slice":"?number"}})", output); // Slice (second form) proj = fromjson("{a: {$slice: [1, 3]}}"); output = projection_ast::serialize(*parseWithFindFeaturesEnabled(proj).root(), options); ASSERT_BSONOBJ_EQ_AUTO( // - R"({"HASH":{"$slice":["?","?"]}})", + R"({"HASH":{"$slice":["?number","?number"]}})", output); /// $meta projection diff --git a/src/mongo/db/query/projection_ast_util.cpp b/src/mongo/db/query/projection_ast_util.cpp index 3806cff410578..361c7fbd08076 100644 --- a/src/mongo/db/query/projection_ast_util.cpp +++ b/src/mongo/db/query/projection_ast_util.cpp @@ -27,12 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/matcher/copyable_match_expression.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/query/projection_ast_path_tracking_visitor.h" #include "mongo/db/query/projection_ast_util.h" +#include "mongo/db/query/projection_ast_visitor.h" #include "mongo/db/query/serialization_options.h" #include "mongo/db/query/tree_walker.h" +#include "mongo/util/assert_util.h" namespace mongo::projection_ast { namespace { @@ -61,18 +77,11 @@ class BSONPreVisitor : public ProjectionASTConstVisitor { void visit(const ProjectionSliceASTNode* node) override { BSONObjBuilder sub(_builders.top().subobjStart(getFieldName())); if (node->skip()) { - if (_options.replacementForLiteralArgs) { - const auto rep = _options.replacementForLiteralArgs.value(); - sub.appendArray("$slice", BSON_ARRAY(rep << rep)); - } else { - sub.appendArray("$slice", BSON_ARRAY(*node->skip() << node->limit())); - } + sub.appendArray("$slice", + BSON_ARRAY(_options.serializeLiteral(*node->skip()) + << _options.serializeLiteral(node->limit()))); } else { - if (_options.replacementForLiteralArgs) { - sub.append("$slice", _options.replacementForLiteralArgs.value()); - } else { - sub.appendNumber("$slice", node->limit()); - } + _options.appendLiteral(&sub, "$slice", node->limit()); } } diff --git a/src/mongo/db/query/projection_ast_util.h b/src/mongo/db/query/projection_ast_util.h index b0849a00e7188..212cc2598327a 100644 --- a/src/mongo/db/query/projection_ast_util.h +++ b/src/mongo/db/query/projection_ast_util.h @@ -29,8 +29,10 @@ #pragma once +#include "mongo/bson/bsonobj.h" #include "mongo/db/query/projection.h" #include "mongo/db/query/projection_ast.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { namespace projection_ast { diff --git a/src/mongo/db/query/projection_parser.cpp b/src/mongo/db/query/projection_parser.cpp index 50cb3b5b97f48..c62e810cffb9e 100644 --- a/src/mongo/db/query/projection_parser.cpp +++ b/src/mongo/db/query/projection_parser.cpp @@ -29,8 +29,32 @@ #include "mongo/db/query/projection_parser.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/exact_cast.h" -#include "mongo/db/exec/document_value/document.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/matcher/copyable_match_expression.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace projection_ast { diff --git a/src/mongo/db/query/projection_parser.h b/src/mongo/db/query/projection_parser.h index 366953e4b7e46..4c3eaff2838fa 100644 --- a/src/mongo/db/query/projection_parser.h +++ b/src/mongo/db/query/projection_parser.h @@ -29,6 +29,14 @@ #pragma once +#include + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/projection.h" #include "mongo/db/query/projection_ast.h" #include "mongo/db/query/projection_policies.h" diff --git a/src/mongo/db/query/projection_test.cpp b/src/mongo/db/query/projection_test.cpp index eec9945523298..2eede8f632333 100644 --- a/src/mongo/db/query/projection_test.cpp +++ b/src/mongo/db/query/projection_test.cpp @@ -27,14 +27,37 @@ * it in the license file. */ +#include #include - -#include "mongo/db/json.h" +#include +#include +#include +#include + +#include + +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/projection.h" #include "mongo/db/query/projection_parser.h" +#include "mongo/db/query/projection_policies.h" #include "mongo/db/query/query_test_service_context.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace { using std::string; diff --git a/src/mongo/db/query/query_feature_flags.idl b/src/mongo/db/query/query_feature_flags.idl index 86d0622d38591..1830fb841bbcc 100644 --- a/src/mongo/db/query/query_feature_flags.idl +++ b/src/mongo/db/query/query_feature_flags.idl @@ -35,88 +35,75 @@ feature_flags: cpp_varname: gFeatureFlagChangeStreamsRewrite default: true version: 5.1 - - featureFlagShardedTimeSeries: - description: "Feature flag for allowing sharding a Time Series collection" - cpp_varname: gFeatureFlagShardedTimeSeries - default: true - version: 5.1 - - featureFlagShardedTimeSeriesUpdateDelete: - description: "Feature flag for allowing update and delete operations on a sharded Time Series collection" - cpp_varname: gFeatureFlagShardedTimeSeriesUpdateDelete - default: true - version: 5.1 - - featureFlagChangeStreamsFurtherEnrichedEvents: - description: "Feature flag for enabling the reshardCollection and refineCollectionShardKey events, as well as enhancements to the updateDescription field for update events" - cpp_varname: gFeatureFlagChangeStreamsFurtherEnrichedEvents - default: true - version: 6.1 + shouldBeFCVGated: true featureFlagCommonQueryFramework: description: "Feature flag for allowing use of Cascades-based query optimizer" cpp_varname: gFeatureFlagCommonQueryFramework default: false - - featureFlagBucketUnpackWithSort: - description: "Enables a time-series optimization that allows for partially-blocking sort on time" - cpp_varname: gFeatureFlagBucketUnpackWithSort - default: true - version: 6.0 + shouldBeFCVGated: false featureFlagColumnstoreIndexes: description: "Enables creation of a new columnstore index type" cpp_varname: gFeatureFlagColumnstoreIndexes default: false + shouldBeFCVGated: true featureFlagServerlessChangeStreams: description: "Feature flag to enable reading change events from the change collection rather than the oplog" cpp_varname: gFeatureFlagServerlessChangeStreams default: false + shouldBeFCVGated: true featureFlagSbeFull: description: "Feature flag for SBE behaviors, features, or extensions that are not yet enabled by default" cpp_varname: gFeatureFlagSbeFull default: false + shouldBeFCVGated: true - featureFlagTelemetry: - description: "Feature flag for enabling the telemetry store." - cpp_varname: gFeatureFlagTelemetry + featureFlagQueryStats: + description: "Feature flag for enabling the queryStats store." + cpp_varname: gFeatureFlagQueryStats default: false - - featureFlagBitwiseAggOperators: - description: "Feature flag for enabling support for bitwise operators in the aggregation language." - cpp_varname: gFeatureFlagBitwise - default: true - version: 6.3 + shouldBeFCVGated: true featureFlagCompoundWildcardIndexes: description: "Feature flag to enable compound wildcard indexes." cpp_varname: gFeatureFlagCompoundWildcardIndexes default: true version: 7.0 + shouldBeFCVGated: true - featureFlagShardedSearchCustomSort: - description: "Feature flag to enable user specified sort for sharded $search queries." - cpp_varname: gFeatureFlagShardedSearchCustomSort + featureFlagSearchBatchSizeLimit: + description: "Feature flag to enable the search batchsize and limit optimization." + cpp_varname: gFeatureFlagSearchBatchSizeLimit default: true - version: 7.0 + version: 7.1 + shouldBeFCVGated: true - featureFlagUserRoles: - description: "Feature flag to enable usage of $$USER_ROLES." - cpp_varname: gFeatureFlagUserRoles + featureFlagAggOutTimeseries: + description: "Feature flag for enabling support for $out to write to time-series collections" + cpp_varname: gFeatureFlagAggOutTimeseries default: true - version: 7.0 + version: 7.1 + shouldBeFCVGated: true - featureFlagApproxPercentiles: - description: "Feature flag to enable approximate $percentile accumulator/expression." - cpp_varname: gFeatureFlagApproxPercentiles - default: true - version: 7.0 + featureFlagQuerySettings: + description: "Feature flag for enabling persistent query settings" + cpp_varname: gFeatureFlagQuerySettings + default: false + shouldBeFCVGated: true - featureFlagSearchBatchSizeLimit: - description: "Feature flag to enable the search batchsize and limit optimization." - cpp_varname: gFeatureFlagSearchBatchSizeLimit + featureFlagVectorSearchPublicPreview: + description: "Feature flag to enable vector search for public preview." + cpp_varname: gFeatureFlagVectorSearchPublicPreview + default: false + # TODO SERVER-78294 Toggle this once we've backported the feature. + shouldBeFCVGated: true + + featureFlagSearchInSbe: + description: "Feature flag to enable $search execution in SBE" + cpp_varname: gFeatureFlagSearchInSbe default: false + shouldBeFCVGated: true diff --git a/src/mongo/db/query/query_knobs.idl b/src/mongo/db/query/query_knobs.idl index f63351f6b6018..ee0d90f29785f 100644 --- a/src/mongo/db/query/query_knobs.idl +++ b/src/mongo/db/query/query_knobs.idl @@ -36,7 +36,7 @@ global: - "mongo/db/query/ce_mode_parameter.h" - "mongo/db/query/explain_version_validator.h" - "mongo/db/query/sbe_plan_cache_on_parameter_change.h" - - "mongo/db/query/telemetry_util.h" + - "mongo/db/query/query_stats_util.h" - "mongo/platform/atomic_proxy.h" - "mongo/platform/atomic_word.h" @@ -52,19 +52,12 @@ enums: # Attempt to use the Bonsai optimizer and lower to SBE for eligible queries, otherwise # fallback to "trySbeEngine". kTryBonsai: "tryBonsai" + # Like, "tryBonsai", but additionally includes features that should not be enabled by + # default. Falls back to "trySbeEngine" for ineligible queries. + kTryBonsaiExperimental: "tryBonsaiExperimental" # Force the Bonsai optimizer for all queries. kForceBonsai: "forceBonsai" - QueryTelemetryFieldNameRedactionStrategy: - description: "Enum for possible values of queryFieldNameRedactionStrategy." - type: string - values: - kNoRedactionStrategy: "none" - # Use the constant redaction strategy. - kConstantRedactionStrategy: "constant" - # Use a prefix of sha256 redaction strategy - kSha256RedactionStrategy: "sha256" - structs: InternalQueryCutoffForSampleFromRandomCursorStorage: description: "A specification for the 'internalQueryCutoffForSampleFromRandomCursor' cluster-wide configuration parameter type." @@ -719,6 +712,7 @@ server_parameters: expr: 100 * 1024 * 1024 validator: gt: 0 + on_update: plan_cache_util::clearSbeCacheOnParameterChange enableSearchMeta: description: "Exists for backwards compatibility in startup parameters, enabling this was @@ -759,9 +753,10 @@ server_parameters: set_at: [ startup, runtime ] cpp_varname: "internalQueryCardinalityEstimatorMode" cpp_vartype: std::string - default: sampling + default: heuristic validator: callback: optimizer::ce::validateCEMode + on_update: plan_cache_util::clearSbeCacheOnParameterChange internalCascadesOptimizerDisableScan: description: "Disable full collection scans in the Cascades optimizer." @@ -769,6 +764,7 @@ server_parameters: cpp_varname: "internalCascadesOptimizerDisableScan" cpp_vartype: AtomicWord default: false + on_update: plan_cache_util::clearSbeCacheOnParameterChange internalCascadesOptimizerDisableIndexes: description: "Disable index scan plans in the Cascades optimizer." @@ -776,6 +772,7 @@ server_parameters: cpp_varname: "internalCascadesOptimizerDisableIndexes" cpp_vartype: AtomicWord default: false + on_update: plan_cache_util::clearSbeCacheOnParameterChange internalCascadesOptimizerDisableMergeJoinRIDIntersect: description: "Disable index RID intersection via merge join in the Cascades optimizer." @@ -783,6 +780,7 @@ server_parameters: cpp_varname: "internalCascadesOptimizerDisableMergeJoinRIDIntersect" cpp_vartype: AtomicWord default: false + on_update: plan_cache_util::clearSbeCacheOnParameterChange internalCascadesOptimizerDisableHashJoinRIDIntersect: description: "Disable index RID intersection via hash join in the Cascades optimizer." @@ -790,6 +788,7 @@ server_parameters: cpp_varname: "internalCascadesOptimizerDisableHashJoinRIDIntersect" cpp_vartype: AtomicWord default: false + on_update: plan_cache_util::clearSbeCacheOnParameterChange internalCascadesOptimizerDisableGroupByAndUnionRIDIntersect: description: "Disable index RID intersection via group by and union in the Cascades optimizer." @@ -797,6 +796,7 @@ server_parameters: cpp_varname: "internalCascadesOptimizerDisableGroupByAndUnionRIDIntersect" cpp_vartype: AtomicWord default: false + on_update: plan_cache_util::clearSbeCacheOnParameterChange internalCascadesOptimizerKeepRejectedPlans: description: "Keep track of rejected plans in the memo. Applies only to the Cascades optimizer." @@ -804,6 +804,7 @@ server_parameters: cpp_varname: "internalCascadesOptimizerKeepRejectedPlans" cpp_vartype: AtomicWord default: false + test_only: true internalCascadesOptimizerDisableBranchAndBound: description: "Disable cascades branch-and-bound strategy, and fully evaluate all plans in the @@ -812,6 +813,7 @@ server_parameters: cpp_varname: "internalCascadesOptimizerDisableBranchAndBound" cpp_vartype: AtomicWord default: false + test_only: true internalCascadesOptimizerFastIndexNullHandling: description: "Controls if we prefer to cover queries which may return nulls with indexes." @@ -819,6 +821,7 @@ server_parameters: cpp_varname: "internalCascadesOptimizerFastIndexNullHandling" cpp_vartype: AtomicWord default: false + on_update: plan_cache_util::clearSbeCacheOnParameterChange internalCascadesOptimizerDisableYieldingTolerantPlans: description: "Controls if we prefer to insert redundant index predicates on the Seek side in order to prevent issues arising from yielding." @@ -833,6 +836,7 @@ server_parameters: cpp_varname: "internalCascadesOptimizerMinIndexEqPrefixes" cpp_vartype: AtomicWord default: 1 + on_update: plan_cache_util::clearSbeCacheOnParameterChange internalCascadesOptimizerMaxIndexEqPrefixes: description: "Controls the maximum number of equality prefixes in each candidate index." @@ -840,6 +844,7 @@ server_parameters: cpp_varname: "internalCascadesOptimizerMaxIndexEqPrefixes" cpp_vartype: AtomicWord default: 1 + on_update: plan_cache_util::clearSbeCacheOnParameterChange internalCascadesOptimizerStdCoutDebugOutput: description: "Enables verbose, non-JSON, debug output for Cascades optimizer." @@ -867,6 +872,14 @@ server_parameters: default: false test_only: true + internalCascadesOptimizerSamplingCEScanStartOfColl: + description: "Forces sampling CE to select sample by scanning from the start of the collection." + set_at: [ startup, runtime ] + cpp_varname: "internalCascadesOptimizerSamplingCEScanStartOfColl" + cpp_vartype: AtomicWord + default: false + test_only: true + internalQueryFrameworkControl: description: "Knob to control the optimizer/execution engine to use." set_at: [ startup, runtime ] @@ -1017,32 +1030,40 @@ server_parameters: default: false test_only: true - internalQueryConfigureTelemetrySamplingRate: - description: "The maximum number of queries per second that are sampled for query telemetry. + internalQueryStatsRateLimit: + description: "The maximum number of queries per second that are sampled for query stats. If the rate of queries goes above this number, then rate limiting will kick in, and any further queries will not be sampled. To sample all queries, this can be set to -1. This can be - set to 0 to turn telemetry off completely." + set to 0 to turn queryStats off completely." set_at: [ startup, runtime ] - cpp_varname: "queryTelemetrySamplingRate" + cpp_varname: "internalQueryStatsRateLimit" cpp_vartype: AtomicWord default: 0 validator: gte: -1 - on_update: telemetry_util::onTelemetrySamplingRateUpdate + on_update: query_stats_util::onQueryStatsSamplingRateUpdate - internalQueryConfigureTelemetryCacheSize: - description: "The maximum amount of memory that the system will allocate for the query telemetry + internalQueryStatsCacheSize: + description: "The maximum amount of memory that the system will allocate for the query queryStats cache. This will accept values in either of the following formats: 1. % indicates a percentage of the physical memory available to the process. E.g.: 15%. 2. (MB|GB), indicates the amount of memory in MB or GB. E.g.: 1.5GB, 100MB. The default value is 1%, which means 1% of the physical memory available to the process." set_at: [ startup, runtime ] - cpp_varname: "queryTelemetryStoreSize" + cpp_varname: "internalQueryStatsCacheSize" cpp_vartype: synchronized_value default: "1%" - on_update: telemetry_util::onTelemetryStoreSizeUpdate + on_update: query_stats_util::onQueryStatsStoreSizeUpdate validator: - callback: telemetry_util::validateTelemetryStoreSize + callback: query_stats_util::validateQueryStatsStoreSize + + internalQueryStatsErrorsAreCommandFatal: + description: "Whether errors in the $queryStats stage cause the aggregation pipeline to + immediately fail and report the error. Note that this is always the case for debug builds." + set_at: [ startup, runtime ] + cpp_varname: "internalQueryStatsErrorsAreCommandFatal" + cpp_vartype: AtomicWord + default: false internalQueryColumnScanMinCollectionSizeBytes: description: "The min collection size threshold for which column scan will always be allowed. If @@ -1129,7 +1150,7 @@ server_parameters: default: 60000 validator: gte: 0 - + internalQueryAggMulticastMaxConcurrency: description: "Max number of concurrent requests when aggregations are sent to all shard servers" set_at: startup @@ -1147,11 +1168,15 @@ server_parameters: default: true internalQueryTdigestDelta: - description: "Compaction parameter the for t-digest algorithm." + description: "Compaction parameter the for t-digest algorithm. Increasing delta might improve + accuracy of the computed percentiles at the cost of using more memory (about 12KB per 1000 of + increase). Runtime of t-digest also depends on delta but non-linearly. The current default was + chosen empirically to yield good balance between runtime, memory consumption and accuracy on + most datasets." set_at: [ startup, runtime ] cpp_varname: internalQueryTdigestDelta cpp_vartype: AtomicWord - default: 1000 + default: 2000 validator: gte: 0 lte: 100000 # arbitrary, just to set an upper limit on the amount of memory used by t-digest @@ -1168,8 +1193,8 @@ server_parameters: gte: 0 internalQueryAutoParameterizationMaxParameterCount: - description: "The maximum numbers of parameters that query auto-parameterization can extract from a query. - If auto parameterizating a query would result in a greater number of parameters than the limit, + description: "The maximum numbers of parameters that query auto-parameterization can extract from a query. + If auto parameterizating a query would result in a greater number of parameters than the limit, then auto parameterization will not be performed. If set to 0, then no limit will be applied." set_at: [ startup, runtime ] @@ -1180,6 +1205,54 @@ server_parameters: gte: 0 on_update: plan_cache_util::clearSbeCacheOnParameterChange + internalQueryMaxSpoolMemoryUsageBytes: + description: "The maximum amount of memory a query or command is willing to use to execute a + spool, measured in bytes. If disk use is allowed, then it may be possible to spool more data, + but this limit will still constrain the memory consumption." + set_at: [ startup, runtime ] + cpp_varname: "internalQueryMaxSpoolMemoryUsageBytes" + cpp_vartype: AtomicWord + default: + expr: 100 * 1024 * 1024 + validator: + gt: 0 + + internalQueryMaxSpoolDiskUsageBytes: + description: "The maximum amount of disk a query or command is willing to use to execute a + spool, measured in bytes." + set_at: [ startup, runtime ] + cpp_varname: "internalQueryMaxSpoolDiskUsageBytes" + cpp_vartype: AtomicWord + default: + expr: 10 * 100 * 1024 * 1024 + validator: + gt: 0 + + deprioritizeUnboundedUserCollectionScans: + description: "Unbounded user collection scans are executed with low storage admission priority" + set_at: [ startup, runtime ] + cpp_varname: gDeprioritizeUnboundedUserCollectionScans + cpp_vartype: AtomicWord + default: true + + deprioritizeUnboundedUserIndexScans: + description: "Unbounded user index scans are executed with low storage admission priority" + set_at: [ startup, runtime ] + cpp_varname: gDeprioritizeUnboundedUserIndexScans + cpp_vartype: AtomicWord + default: true + + internalQueryDocumentSourceWriterBatchExtraReservedBytes: + description: "Space to reserve in document source writer batches for miscellaneous metadata" + set_at: [ startup, runtime ] + cpp_vartype: AtomicWord + cpp_varname: internalQueryDocumentSourceWriterBatchExtraReservedBytes + validator: + gte: 0 + lte: + expr: 8 * 1024 * 1024 # 8MB + default: 0 + # Note for adding additional query knobs: # # When adding a new query knob, you should consider whether or not you need to add an 'on_update' diff --git a/src/mongo/db/query/query_planner.cpp b/src/mongo/db/query/query_planner.cpp index e7c9022277761..3736ca7975b96 100644 --- a/src/mongo/db/query/query_planner.cpp +++ b/src/mongo/db/query/query_planner.cpp @@ -28,51 +28,80 @@ */ -#include "mongo/db/pipeline/dependencies.h" -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/simple_bsonelement_comparator.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/basic_types.h" #include "mongo/db/bson/dotted_path_support.h" -#include "mongo/db/catalog/clustered_collection_util.h" -#include "mongo/db/exec/bucket_unpacker.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/exec/index_path_projection.h" #include "mongo/db/exec/projection_executor_utils.h" -#include "mongo/db/index/wildcard_key_generator.h" #include "mongo/db/index_names.h" #include "mongo/db/matcher/expression_algo.h" -#include "mongo/db/matcher/expression_geo.h" -#include "mongo/db/matcher/expression_text.h" #include "mongo/db/matcher/match_expression_dependencies.h" #include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/document_source_lookup.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/inner_pipeline_stage_interface.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/collation/collation_index_key.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/index_entry.h" -#include "mongo/db/query/internal_plans.h" -#include "mongo/db/query/plan_cache.h" +#include "mongo/db/query/index_tag.h" #include "mongo/db/query/plan_enumerator.h" +#include "mongo/db/query/plan_enumerator_explain_info.h" #include "mongo/db/query/planner_access.h" #include "mongo/db/query/planner_analysis.h" #include "mongo/db/query/planner_ixselect.h" -#include "mongo/db/query/projection_parser.h" +#include "mongo/db/query/projection.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner.h" #include "mongo/db/query/query_planner_common.h" +#include "mongo/db/query/query_request_helper.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/query/util/set_util.h" #include "mongo/logv2/log.h" -#include "mongo/util/assert_util_core.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery - namespace mongo { namespace log_detail { void logSubplannerIndexEntry(const IndexEntry& entry, size_t childIndex) { @@ -473,6 +502,28 @@ bool collscanIsBounded(const CollectionScanNode* collscan) { return collscan->minRecord || collscan->maxRecord; } +bool canUseClusteredCollScan(QuerySolutionNode* node, + std::vector> children) { + if (node->getType() == StageType::STAGE_COLLSCAN) { + return static_cast(node)->doClusteredCollectionScanClassic(); + } + + // We assume we are subplanning the children of an OR expression and therefore should expect one + // child per node. However, we have to recur down to the child leaf node to check if we can + // perform a clustered collection scan. + if (1 == children.size()) { + QuerySolutionNode* child = children[0].get(); + // Find the leaf node of the solution node. + while (1 == child->children.size()) { + child = child->children[0].get(); + } + if (child->getType() == StageType::STAGE_COLLSCAN) { + return static_cast(child)->doClusteredCollectionScanClassic(); + } + } + return false; +} + } // namespace using std::numeric_limits; @@ -686,44 +737,18 @@ static BSONObj finishMaxObj(const IndexEntry& indexEntry, } } -bool providesSort(const CanonicalQuery& query, const BSONObj& kp) { - return query.getFindCommandRequest().getSort().isPrefixOf( - kp, SimpleBSONElementComparator::kInstance); -} - -/** - * Determine whether this query has a sort that can be provided by the clustered index, if so, which - * direction the scan should be. If the collection is not clustered, or the sort cannot be provided, - * returns 'boost::none'. - */ -boost::optional determineClusteredScanDirection(const CanonicalQuery& query, - const QueryPlannerParams& params) { - if (params.clusteredInfo && query.getSortPattern() && - CollatorInterface::collatorsMatch(params.clusteredCollectionCollator, - query.getCollator())) { - auto kp = clustered_util::getSortPattern(params.clusteredInfo->getIndexSpec()); - if (providesSort(query, kp)) { - return 1; - } else if (providesSort(query, QueryPlannerCommon::reverseSortObj(kp))) { - return -1; - } - } - - return boost::none; -} - /** * Determine the direction of the scan needed for the query. Defaults to 1 unless this is a * clustered collection and we have a sort that can be provided by the clustered index. */ int determineCollscanDirection(const CanonicalQuery& query, const QueryPlannerParams& params) { - return determineClusteredScanDirection(query, params).value_or(1); + return QueryPlannerCommon::determineClusteredScanDirection(query, params).value_or(1); } std::pair, const CollectionScanNode*> buildCollscanSolnWithNode( const CanonicalQuery& query, bool tailable, const QueryPlannerParams& params, int direction) { std::unique_ptr solnRoot( - QueryPlannerAccess::makeCollectionScan(query, tailable, params, direction)); + QueryPlannerAccess::makeCollectionScan(query, tailable, params, direction, query.root())); const auto* collscanNode = checked_cast(solnRoot.get()); return std::make_pair( QueryPlannerAnalysis::analyzeDataAccess(query, params, std::move(solnRoot)), collscanNode); @@ -828,7 +853,7 @@ StatusWith> QueryPlanner::cacheDataFromTagge // static Status QueryPlanner::tagAccordingToCache(MatchExpression* filter, const PlanCacheIndexTree* const indexTree, - const map& indexMap) { + const std::map& indexMap) { if (nullptr == filter) { return Status(ErrorCodes::NoQueryExecutionPlans, "Cannot tag tree: filter is NULL."); } @@ -838,7 +863,7 @@ Status QueryPlanner::tagAccordingToCache(MatchExpression* filter, // We're tagging the tree here, so it shouldn't have // any tags hanging off yet. - verify(nullptr == filter->getTag()); + MONGO_verify(nullptr == filter->getTag()); if (filter->numChildren() != indexTree->children.size()) { str::stream ss; @@ -947,7 +972,7 @@ StatusWith> QueryPlanner::planFromCache( QueryPlannerIXSelect::expandIndexes(fields, params.indices, false /* indexHinted */); // Map from index name to index number. - map indexMap; + std::map indexMap; for (size_t i = 0; i < expandedIndexes.size(); ++i) { const IndexEntry& ie = expandedIndexes[i]; const auto insertionRes = indexMap.insert(std::make_pair(ie.identifier, i)); @@ -978,14 +1003,14 @@ StatusWith> QueryPlanner::planFromCache( if (!solnRoot) { return Status(ErrorCodes::NoQueryExecutionPlans, str::stream() << "Failed to create data access plan from cache. Query: " - << query.toStringShort()); + << query.toStringShortForErrorMsg()); } auto soln = QueryPlannerAnalysis::analyzeDataAccess(query, params, std::move(solnRoot)); if (!soln) { return Status(ErrorCodes::NoQueryExecutionPlans, - str::stream() - << "Failed to analyze plan from cache. Query: " << query.toStringShort()); + str::stream() << "Failed to analyze plan from cache. Query: " + << query.toStringShortForErrorMsg()); } LOGV2_DEBUG(20966, @@ -1500,7 +1525,7 @@ StatusWith>> QueryPlanner::plan( } const BSONObj kp = QueryPlannerAnalysis::getSortPattern(index.keyPattern); - if (providesSort(query, kp)) { + if (QueryPlannerCommon::providesSort(query, kp)) { LOGV2_DEBUG( 20981, 5, "Planner: outputting soln that uses index to provide sort"); auto soln = buildWholeIXSoln(fullIndexList[i], query, params); @@ -1516,7 +1541,8 @@ StatusWith>> QueryPlanner::plan( out.push_back(std::move(soln)); } } - if (providesSort(query, QueryPlannerCommon::reverseSortObj(kp))) { + if (QueryPlannerCommon::providesSort(query, + QueryPlannerCommon::reverseSortObj(kp))) { LOGV2_DEBUG( 20982, 5, @@ -1605,8 +1631,9 @@ StatusWith>> QueryPlanner::plan( } if (possibleToCollscan && (collscanRequested || collScanRequired || clusteredCollection)) { - auto clusteredScanDirection = determineClusteredScanDirection(query, params); - auto direction = clusteredScanDirection.value_or(1); + boost::optional clusteredScanDirection = + QueryPlannerCommon::determineClusteredScanDirection(query, params); + int direction = clusteredScanDirection.value_or(1); auto [collscanSoln, collscanNode] = buildCollscanSolnWithNode(query, isTailable, params, direction); if (!collscanSoln && collScanRequired) { @@ -1637,7 +1664,7 @@ StatusWith>> QueryPlanner::plan( invariant(out.size() > 0); return {std::move(out)}; -} +} // QueryPlanner::plan /** * The 'query' might contain parts of aggregation pipeline. For now, we plan those separately and @@ -1725,8 +1752,17 @@ StatusWith> QueryPlanner::choosePlanForSubqueries QuerySolution* soln = branchResult->solutions.front().get(); Status tagStatus = tagOrChildAccordingToCache( cacheData.get(), soln->cacheData.get(), orChild, planningResult.indexMap); + + // Check if 'soln' is a CLUSTERED_IXSCAN. This branch won't be tagged, and 'tagStatus' + // will return 'NoQueryExecutionPlans'. However, this plan can be executed by the OR + // stage. + QuerySolutionNode* root = soln->root(); if (!tagStatus.isOK()) { - return tagStatus; + const bool allowPlanWithoutTag = tagStatus == ErrorCodes::NoQueryExecutionPlans && + canUseClusteredCollScan(root, std::move(root->children)); + if (!allowPlanWithoutTag) { + return tagStatus; + } } } else { // N solutions, rank them. @@ -1750,22 +1786,33 @@ StatusWith> QueryPlanner::choosePlanForSubqueries return Status(ErrorCodes::NoQueryExecutionPlans, ss); } - if (SolutionCacheData::USE_INDEX_TAGS_SOLN != bestSoln->cacheData->solnType) { - str::stream ss; - ss << "No indexed cache data for subchild " << orChild->debugString(); - return Status(ErrorCodes::NoQueryExecutionPlans, ss); + // The cached plan might be an indexed solution or a clustered collection scan. + SolutionCacheData::SolutionType solnType = bestSoln->cacheData->solnType; + bool useClusteredCollScan = false; + if (SolutionCacheData::USE_INDEX_TAGS_SOLN != solnType) { + if (!(SolutionCacheData::COLLSCAN_SOLN == solnType && + canUseClusteredCollScan(bestSoln->root(), + std::move(bestSoln->root()->children)))) { + str::stream ss; + ss << "No indexed cache data for subchild " << orChild->debugString(); + return Status(ErrorCodes::NoQueryExecutionPlans, ss); + } else { + useClusteredCollScan = true; + } } - // Add the index assignments to our original query. - Status tagStatus = QueryPlanner::tagAccordingToCache( - orChild, bestSoln->cacheData->tree.get(), planningResult.indexMap); - if (!tagStatus.isOK()) { - str::stream ss; - ss << "Failed to extract indices from subchild " << orChild->debugString(); - return tagStatus.withContext(ss); + // If the cached plan is not a clustered collection scan, add the index assignments to + // the original query. + if (!useClusteredCollScan) { + Status tagStatus = QueryPlanner::tagAccordingToCache( + orChild, bestSoln->cacheData->tree.get(), planningResult.indexMap); + if (!tagStatus.isOK()) { + str::stream ss; + ss << "Failed to extract indices from subchild " << orChild->debugString(); + return tagStatus.withContext(ss); + } + cacheData->children.push_back(bestSoln->cacheData->tree->clone()); } - - cacheData->children.push_back(bestSoln->cacheData->tree->clone()); } } diff --git a/src/mongo/db/query/query_planner.h b/src/mongo/db/query/query_planner.h index c5e0779658566..85b8d6c3205cb 100644 --- a/src/mongo/db/query/query_planner.h +++ b/src/mongo/db/query/query_planner.h @@ -29,9 +29,22 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/classic_plan_cache.h" +#include "mongo/db/query/index_entry.h" #include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_solution.h" diff --git a/src/mongo/db/query/query_planner_array_test.cpp b/src/mongo/db/query/query_planner_array_test.cpp index 19dfc99dbcd1a..cac9441d264d9 100644 --- a/src/mongo/db/query/query_planner_array_test.cpp +++ b/src/mongo/db/query/query_planner_array_test.cpp @@ -27,12 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" -#include "mongo/db/query/query_planner.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_fixture.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { @@ -2110,13 +2118,13 @@ TEST_F(QueryPlannerTest, CanHoistNegatedPredFromElemMatchIntoSiblingOrWithMultik "{fetch: {filter: {arr: {$elemMatch: {a: {$ne: 1}, b: {$in: [2, 3]}}}}," "node: {" " or: {nodes: [" - " {fetch: {filter: {a: {$ne: 1}}," + " {fetch: {filter: {'arr.a': {$ne: 1}}," " node: {ixscan: {pattern: {'arr.a': 1, 'arr.b': 1, c: 1, d: 1}," " bounds: {'arr.a': [['MinKey', 1, true, false], [1, 'MaxKey', false, true]]," " 'arr.b': [[2, 2, true, true], [3, 3, true, true]]," " c: [[4, 4, true, true]]," " d: [[5, 5, true, true]]}}}}}," - " {fetch: {filter: {a: {$ne: 1}}," + " {fetch: {filter: {'arr.a': {$ne: 1}}," " node: {ixscan: {pattern: {'arr.a': 1, 'arr.b': 1, c: 1, d: 1}," " bounds: {'arr.a': [['MinKey', 1, true, false],[1, 'MaxKey', false, true]]," " 'arr.b': [[2, 2, true, true], [3, 3, true, true]]," diff --git a/src/mongo/db/query/query_planner_collation_test.cpp b/src/mongo/db/query/query_planner_collation_test.cpp index 4491270565fa2..f7108dbca3594 100644 --- a/src/mongo/db/query/query_planner_collation_test.cpp +++ b/src/mongo/db/query/query_planner_collation_test.cpp @@ -27,10 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_fixture.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/framework.h" namespace { @@ -756,26 +763,24 @@ TEST_F(QueryPlannerTest, } /** - * This test confirms that we place a fetch stage before sort in the case where both query + * This test confirms that we place a fetch stage after the IDXScan in the case where both query * and index have the same non-simple collation. To handle this scenario without this fetch would * require a mechanism to ensure we don't attempt to encode for collation an already encoded index * key entry when generating the sort key. */ -TEST_F(QueryPlannerTest, MustFetchBeforeSortWhenQueryHasSameNonSimpleCollationAsIndex) { +TEST_F(QueryPlannerTest, MustFetchAfterIDXScanWhenQueryHasSameNonSimpleCollationAsIndex) { params.options &= ~QueryPlannerParams::INCLUDE_COLLSCAN; CollatorInterfaceMock collator(CollatorInterfaceMock::MockType::kReverseString); addIndex(fromjson("{a: 1, b: 1}"), &collator); runQueryAsCommand( - fromjson("{find: 'testns', filter: {a: {$gt: 0}}, projection: {a: 1, b:1, _id: 0}, " - "collation: {locale: 'reverse'}, sort: {b: 1, a: 1}}")); + fromjson("{find: 'testns', filter: {a: {$gt: 0}}, projection: {a: 1, b: 1, _id: 0}, " + "collation: {locale: 'reverse'}, sort: {a: 1, b: 1}}")); - assertNumSolutions(1U); assertSolutionExists( - "{sort: {pattern: {b: 1, a: 1}, limit: 0, type: 'simple', node: " "{proj: {spec: {a: 1, b: 1, _id: 0}, node:" - "{fetch: {filter: null, collation: {locale: 'reverse'}, node:" - "{ixscan: {pattern: {a: 1, b: 1}}}}}}}}}}}"); + "{fetch: {filter: null, node:" + "{ixscan: {pattern: {a: 1, b: 1}}}}}}}"); } TEST_F(QueryPlannerTest, NoSortStageWhenMinMaxIndexCollationDoesNotMatchButBoundsContainNoStrings) { diff --git a/src/mongo/db/query/query_planner_columnar_test.cpp b/src/mongo/db/query/query_planner_columnar_test.cpp index 72b691aef4334..d260283bbcc4f 100644 --- a/src/mongo/db/query/query_planner_columnar_test.cpp +++ b/src/mongo/db/query/query_planner_columnar_test.cpp @@ -27,20 +27,39 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/index_path_projection.h" #include "mongo/db/index/column_key_generator.h" -#include "mongo/db/pipeline/document_source.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/inner_pipeline_stage_impl.h" #include "mongo/db/pipeline/inner_pipeline_stage_interface.h" #include "mongo/db/pipeline/pipeline.h" -#include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/index_entry.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_fixture.h" #include "mongo/db/query/query_planner_test_lib.h" +#include "mongo/db/query/query_solution.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/death_test.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { const std::string kIndexName = "indexName"; diff --git a/src/mongo/db/query/query_planner_common.cpp b/src/mongo/db/query/query_planner_common.cpp index b6c3253fd2822..96e29e0dc1a66 100644 --- a/src/mongo/db/query/query_planner_common.cpp +++ b/src/mongo/db/query/query_planner_common.cpp @@ -28,17 +28,34 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include #include "mongo/base/exact_cast.h" +#include "mongo/base/string_data.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/projection_ast.h" #include "mongo/db/query/projection_ast_path_tracking_visitor.h" +#include "mongo/db/query/projection_ast_visitor.h" #include "mongo/db/query/query_planner_common.h" #include "mongo/db/query/query_solution.h" #include "mongo/db/query/stage_types.h" #include "mongo/db/query/tree_walker.h" -#include "mongo/logv2/log.h" #include "mongo/logv2/redaction.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -167,4 +184,22 @@ std::vector QueryPlannerCommon::extractSortKeyMetaFieldsFromProjectio return std::move(ctx.data().metaPaths); } + +boost::optional QueryPlannerCommon::determineClusteredScanDirection( + const CanonicalQuery& query, const QueryPlannerParams& params) { + if (params.clusteredInfo && query.getSortPattern() && + CollatorInterface::collatorsMatch(params.clusteredCollectionCollator, + query.getCollator())) { + BSONObj kp = clustered_util::getSortPattern(params.clusteredInfo->getIndexSpec()); + if (QueryPlannerCommon::providesSort(query, kp)) { + return 1; + } else if (QueryPlannerCommon::providesSort(query, + QueryPlannerCommon::reverseSortObj(kp))) { + return -1; + } + } + + return boost::none; +} + } // namespace mongo diff --git a/src/mongo/db/query/query_planner_common.h b/src/mongo/db/query/query_planner_common.h index 6d441155b54cc..3f4d001269430 100644 --- a/src/mongo/db/query/query_planner_common.h +++ b/src/mongo/db/query/query_planner_common.h @@ -29,9 +29,21 @@ #pragma once +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonelement_comparator.h" #include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/projection.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_solution.h" namespace mongo { @@ -64,6 +76,20 @@ class QueryPlannerCommon { return false; } + /** + * Returns a count of 'type' nodes in expression tree. + */ + static size_t countNodes(const MatchExpression* root, MatchExpression::MatchType type) { + size_t sum = 0; + if (type == root->matchType()) { + sum = 1; + } + for (size_t i = 0; i < root->numChildren(); ++i) { + sum += countNodes(root->getChild(i), type); + } + return sum; + } + /** * Assumes the provided BSONObj is of the form {field1: -+1, ..., field2: -+1} * Returns a BSONObj with the values negated. @@ -99,6 +125,19 @@ class QueryPlannerCommon { */ static std::vector extractSortKeyMetaFieldsFromProjection( const projection_ast::Projection& proj); + + static bool providesSort(const CanonicalQuery& query, const BSONObj& kp) { + return query.getFindCommandRequest().getSort().isPrefixOf( + kp, SimpleBSONElementComparator::kInstance); + } + + /** + * Determine whether this query has a sort that can be provided by the collection's clustering + * index, if so, which direction the scan should be. If the collection is not clustered, or the + * sort cannot be provided, returns 'boost::none'. + */ + static boost::optional determineClusteredScanDirection(const CanonicalQuery& query, + const QueryPlannerParams& params); }; } // namespace mongo diff --git a/src/mongo/db/query/query_planner_geo_test.cpp b/src/mongo/db/query/query_planner_geo_test.cpp index 4686a945ea676..a9c82886e8eb0 100644 --- a/src/mongo/db/query/query_planner_geo_test.cpp +++ b/src/mongo/db/query/query_planner_geo_test.cpp @@ -27,12 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" -#include "mongo/db/query/query_planner.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_fixture.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/query/query_planner_hashed_index_test.cpp b/src/mongo/db/query/query_planner_hashed_index_test.cpp index cfc146bb5345b..75f2e26a588d6 100644 --- a/src/mongo/db/query/query_planner_hashed_index_test.cpp +++ b/src/mongo/db/query/query_planner_hashed_index_test.cpp @@ -27,11 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/hasher.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_fixture.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/str.h" namespace mongo { /** diff --git a/src/mongo/db/query/query_planner_index_test.cpp b/src/mongo/db/query/query_planner_index_test.cpp index 960a64c01899a..2c7de1c85d796 100644 --- a/src/mongo/db/query/query_planner_index_test.cpp +++ b/src/mongo/db/query/query_planner_index_test.cpp @@ -27,13 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/db/query/query_planner.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/query_solution.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/query_planner_operator_test.cpp b/src/mongo/db/query/query_planner_operator_test.cpp index 94feaf2b3fe44..3c5b89e69d312 100644 --- a/src/mongo/db/query/query_planner_operator_test.cpp +++ b/src/mongo/db/query/query_planner_operator_test.cpp @@ -27,12 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/db/query/query_planner.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/index_names.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/query_planner_options_test.cpp b/src/mongo/db/query/query_planner_options_test.cpp index 094686bc9f3df..a7b45384c4aae 100644 --- a/src/mongo/db/query/query_planner_options_test.cpp +++ b/src/mongo/db/query/query_planner_options_test.cpp @@ -27,13 +27,40 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collation/collator_interface_mock.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/classic_plan_cache.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/index_entry.h" #include "mongo/db/query/index_tag.h" #include "mongo/db/query/query_planner.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/query_solution.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -847,7 +874,7 @@ TEST_F(QueryPlannerTest, TagAccordingToCacheFailsOnBadInput) { } TEST_F(QueryPlannerTest, DollarResumeAfterFieldPropagatedFromQueryRequestToStageBuilder) { - BSONObj cmdObj = BSON("find" << nss.ns() << "hint" << BSON("$natural" << 1) << "sort" + BSONObj cmdObj = BSON("find" << nss.ns_forTest() << "hint" << BSON("$natural" << 1) << "sort" << BSON("$natural" << 1) << "$_requestResumeToken" << true << "$_resumeAfter" << BSON("$recordId" << 42LL)); diff --git a/src/mongo/db/query/query_planner_partialidx_test.cpp b/src/mongo/db/query/query_planner_partialidx_test.cpp index 88a806f2f63c8..3d59fa2d06f25 100644 --- a/src/mongo/db/query/query_planner_partialidx_test.cpp +++ b/src/mongo/db/query/query_planner_partialidx_test.cpp @@ -27,11 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/db/query/query_planner.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_fixture.h" +#include "mongo/db/service_context.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/query_planner_pipeline_pushdown_test.cpp b/src/mongo/db/query/query_planner_pipeline_pushdown_test.cpp index 535cfb232e206..002e3d487cb1f 100644 --- a/src/mongo/db/query/query_planner_pipeline_pushdown_test.cpp +++ b/src/mongo/db/query/query_planner_pipeline_pushdown_test.cpp @@ -27,15 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include -#include "mongo/db/json.h" -#include "mongo/db/pipeline/document_source_group.h" -#include "mongo/db/pipeline/document_source_mock.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/inner_pipeline_stage_impl.h" #include "mongo/db/pipeline/inner_pipeline_stage_interface.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/query_planner.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_fixture.h" #include "mongo/db/query/query_planner_test_lib.h" +#include "mongo/db/query/query_solution.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { using namespace mongo; @@ -169,7 +186,7 @@ TEST_F(QueryPlannerPipelinePushdownTest, PushdownOfASingleLookup) { ASSERT(!cq->pipeline().empty()); auto solution = QueryPlanner::extendWithAggPipeline(*cq, std::move(solns[0]), secondaryCollMap); ASSERT_OK(QueryPlannerTestLib::solutionMatches( - "{eq_lookup: {foreignCollection: '" + kSecondaryNamespace.toString() + + "{eq_lookup: {foreignCollection: '" + kSecondaryNamespace.toString_forTest() + "', joinFieldLocal: 'x', joinFieldForeign: 'y', joinField: 'out', " "strategy: 'NestedLoopJoin', node: " "{cscan: {dir:1, filter: {x:1}}}}}", @@ -198,11 +215,11 @@ TEST_F(QueryPlannerPipelinePushdownTest, PushdownOfTwoLookups) { ASSERT(!cq->pipeline().empty()); auto solution = QueryPlanner::extendWithAggPipeline(*cq, std::move(solns[0]), secondaryCollMap); ASSERT_OK(QueryPlannerTestLib::solutionMatches( - "{eq_lookup: {foreignCollection: '" + kSecondaryNamespace.toString() + + "{eq_lookup: {foreignCollection: '" + kSecondaryNamespace.toString_forTest() + "', joinFieldLocal: 'a', joinFieldForeign: 'b', joinField: 'c', " "strategy: 'NestedLoopJoin', node: " "{eq_lookup: {foreignCollection: '" + - kSecondaryNamespace.toString() + + kSecondaryNamespace.toString_forTest() + "', joinFieldLocal: 'x', joinFieldForeign: 'y', joinField: 'out'," "strategy: 'NestedLoopJoin', node: {cscan: {dir:1, filter: {x:1}}}}}}}", solution->root())) @@ -234,12 +251,12 @@ TEST_F(QueryPlannerPipelinePushdownTest, PushdownOfTwoLookupsAndTwoGroups) { ASSERT_OK(QueryPlannerTestLib::solutionMatches( "{group: {key: {_id: '$c'}, accs: [{count: {$min: '$count'}}], node: " "{eq_lookup: {foreignCollection: '" + - kSecondaryNamespace.toString() + + kSecondaryNamespace.toString_forTest() + "', joinFieldLocal: 'a', joinFieldForeign: 'b', joinField: 'c', " "strategy: 'NestedLoopJoin', node: " "{group: {key: {_id: '$out'}, accs: [{count: {$sum: '$x'}}], node: " "{eq_lookup: {foreignCollection: '" + - kSecondaryNamespace.toString() + + kSecondaryNamespace.toString_forTest() + "', joinFieldLocal: 'x', joinFieldForeign: 'y', joinField: 'out'," "strategy: 'NestedLoopJoin', node: " "{cscan: {dir:1, filter: {x:1}}}}}}}}}}}", diff --git a/src/mongo/db/query/query_planner_test_fixture.cpp b/src/mongo/db/query/query_planner_test_fixture.cpp index 1a3a2508d74dc..3cd9bcfb8b2fe 100644 --- a/src/mongo/db/query/query_planner_test_fixture.cpp +++ b/src/mongo/db/query/query_planner_test_fixture.cpp @@ -28,21 +28,40 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/query_planner_test_fixture.h" - #include - +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/projection_policies.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner.h" +#include "mongo/db/query/query_planner_test_fixture.h" #include "mongo/db/query/query_planner_test_lib.h" +#include "mongo/db/query/query_request_helper.h" #include "mongo/logv2/log.h" -#include "mongo/util/transitional_tools_do_not_use/vector_spooling.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/unittest/assert.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -51,7 +70,8 @@ namespace mongo { using unittest::assertGet; -const NamespaceString QueryPlannerTest::nss("test.collection"); +const NamespaceString QueryPlannerTest::nss = + NamespaceString::createNamespaceString_forTest("test.collection"); void QueryPlannerTest::setUp() { opCtx = serviceContext.makeOperationContext(); diff --git a/src/mongo/db/query/query_planner_test_fixture.h b/src/mongo/db/query/query_planner_test_fixture.h index 1aca97f6d4c95..a937cf19b57fb 100644 --- a/src/mongo/db/query/query_planner_test_fixture.h +++ b/src/mongo/db/query/query_planner_test_fixture.h @@ -29,19 +29,37 @@ #pragma once +#include +#include +#include #include #include #include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/index/multikey_paths.h" #include "mongo/db/jsobj.h" #include "mongo/db/json.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/inner_pipeline_stage_interface.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/index_entry.h" #include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_solution.h" #include "mongo/db/query/query_test_service_context.h" +#include "mongo/db/service_context.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/query/query_planner_test_lib.cpp b/src/mongo/db/query/query_planner_test_lib.cpp index 3814952b2a6b8..be908faf34761 100644 --- a/src/mongo/db/query/query_planner_test_lib.cpp +++ b/src/mongo/db/query/query_planner_test_lib.cpp @@ -34,23 +34,55 @@ #include "mongo/db/query/query_planner_test_lib.h" -#include - +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/fts/fts_query.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_algo.h" #include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/pipeline/document_source_group.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/collation/collator_factory_mock.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/interval.h" +#include "mongo/db/query/projection.h" +#include "mongo/db/query/projection_ast.h" #include "mongo/db/query/projection_ast_util.h" #include "mongo/db/query/projection_parser.h" -#include "mongo/db/query/query_planner.h" +#include "mongo/db/query/projection_policies.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/stage_types.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -1008,8 +1040,8 @@ Status QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln, // Create an empty/dummy expression context without access to the operation context and // collator. This should be sufficient to parse a projection. - auto expCtx = - make_intrusive(nullptr, nullptr, NamespaceString("test.dummy")); + auto expCtx = make_intrusive( + nullptr, nullptr, NamespaceString::createNamespaceString_forTest("test.dummy")); auto projection = projection_ast::parseAndAnalyze( expCtx, spec.Obj(), ProjectionPolicies::findProjectionPolicies()); auto specProjObj = projection_ast::astToDebugBSON(projection.root()); @@ -1415,13 +1447,14 @@ Status QueryPlannerTestLib::solutionMatches(const BSONObj& testSoln, << testSoln.toString()}; } - if (expectedForeignCollection.str() != actualEqLookupNode->foreignCollection.toString()) { + if (expectedForeignCollection.str() != + actualEqLookupNode->foreignCollection.toString_forTest()) { return { ErrorCodes::Error{6267502}, str::stream() << "Test solution 'foreignCollection' does not match actual; test " "" << expectedForeignCollection.str() << " != actual " - << actualEqLookupNode->foreignCollection}; + << actualEqLookupNode->foreignCollection.toStringForErrorMsg()}; } auto expectedLocalField = expectedEqLookupSoln["joinFieldLocal"]; diff --git a/src/mongo/db/query/query_planner_test_lib.h b/src/mongo/db/query/query_planner_test_lib.h index e02fbc1e950a2..5b270a79ab242 100644 --- a/src/mongo/db/query/query_planner_test_lib.h +++ b/src/mongo/db/query/query_planner_test_lib.h @@ -31,14 +31,20 @@ * This file contains tests for mongo/db/query/query_planner.cpp */ +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/jsobj.h" #include "mongo/db/json.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/query_planner.h" #include "mongo/db/query/query_solution.h" #include "mongo/unittest/unittest.h" #include "mongo/util/assert_util.h" -#include namespace mongo { diff --git a/src/mongo/db/query/query_planner_text_test.cpp b/src/mongo/db/query/query_planner_text_test.cpp index eeb809f551355..5b513b950941d 100644 --- a/src/mongo/db/query/query_planner_text_test.cpp +++ b/src/mongo/db/query/query_planner_text_test.cpp @@ -27,12 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" -#include "mongo/db/query/query_planner.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_fixture.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/query/query_planner_tree_test.cpp b/src/mongo/db/query/query_planner_tree_test.cpp index 8c56e4c41ea98..e91a9e12e177b 100644 --- a/src/mongo/db/query/query_planner_tree_test.cpp +++ b/src/mongo/db/query/query_planner_tree_test.cpp @@ -27,12 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/db/query/query_planner.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" namespace mongo { namespace { diff --git a/src/mongo/db/query/query_planner_wildcard_index_test.cpp b/src/mongo/db/query/query_planner_wildcard_index_test.cpp index 8d4af877eb3ff..577de3ca38bb5 100644 --- a/src/mongo/db/query/query_planner_wildcard_index_test.cpp +++ b/src/mongo/db/query/query_planner_wildcard_index_test.cpp @@ -27,13 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/wildcard_key_generator.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/query/index_entry.h" #include "mongo/db/query/planner_wildcard_helpers.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_fixture.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/scopeguard.h" namespace mongo { diff --git a/src/mongo/db/query/query_request_helper.cpp b/src/mongo/db/query/query_request_helper.cpp index 16d95be8c878c..4b257241364a3 100644 --- a/src/mongo/db/query/query_request_helper.cpp +++ b/src/mongo/db/query/query_request_helper.cpp @@ -27,17 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/query_request_helper.h" - +#include +#include +#include +#include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/basic_types.h" #include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/dbmessage.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/query/cursor_response_gen.h" +#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/tailable_mode.h" +#include "mongo/db/server_options.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery namespace mongo { @@ -84,6 +105,47 @@ Status validateGetMoreCollectionName(StringData collectionName) { return Status::OK(); } +Status validateResumeAfter(const mongo::BSONObj& resumeAfter, bool isClusteredCollection) { + if (resumeAfter.isEmpty()) { + return Status::OK(); + } + + BSONType recordIdType = resumeAfter["$recordId"].type(); + if (mongo::resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + if (resumeAfter.nFields() > 2 || + (recordIdType != BSONType::NumberLong && recordIdType != BSONType::BinData && + recordIdType != BSONType::jstNULL) || + (resumeAfter.nFields() == 2 && + (resumeAfter["$initialSyncId"].type() != BSONType::BinData || + resumeAfter["$initialSyncId"].binDataType() != BinDataType::newUUID))) { + return Status(ErrorCodes::BadValue, + "Malformed resume token: the '_resumeAfter' object must contain" + " '$recordId', of type NumberLong, BinData or jstNULL and" + " optional '$initialSyncId of type BinData."); + } + } else if (resumeAfter.nFields() != 1 || + (recordIdType != BSONType::NumberLong && recordIdType != BSONType::BinData && + recordIdType != BSONType::jstNULL)) { + return Status(ErrorCodes::BadValue, + "Malformed resume token: the '_resumeAfter' object must contain" + " exactly one field named '$recordId', of type NumberLong, BinData " + "or jstNULL."); + } + + // Clustered collections can only accept '$_resumeAfter' parameter of type BinData. Non + // clustered collections should only accept '$_resumeAfter' of type Long. + if ((isClusteredCollection && recordIdType == BSONType::NumberLong) || + (!isClusteredCollection && recordIdType == BSONType::BinData)) { + return Status(ErrorCodes::Error(7738600), + "The '$_resumeAfter parameter must match collection type. Clustered " + "collections only have BinData recordIds, and all other collections" + "have Long recordId."); + } + + return Status::OK(); +} + Status validateFindCommandRequest(const FindCommandRequest& findCommand) { // Min and Max objects must have the same fields. if (!findCommand.getMin().isEmpty() && !findCommand.getMax().isEmpty()) { @@ -130,17 +192,8 @@ Status validateFindCommandRequest(const FindCommandRequest& findCommand) { return Status(ErrorCodes::BadValue, "sort must be unset or {$natural:1} if 'requestResumeToken' is enabled"); } - if (!findCommand.getResumeAfter().isEmpty()) { - if (findCommand.getResumeAfter().nFields() != 1 || - (findCommand.getResumeAfter()["$recordId"].type() != BSONType::NumberLong && - findCommand.getResumeAfter()["$recordId"].type() != BSONType::BinData && - findCommand.getResumeAfter()["$recordId"].type() != BSONType::jstNULL)) { - return Status(ErrorCodes::BadValue, - "Malformed resume token: the '_resumeAfter' object must contain" - " exactly one field named '$recordId', of type NumberLong, BinData " - "or jstNULL."); - } - } + // The $_resumeAfter parameter is checked in 'validateResumeAfter()'. + } else if (!findCommand.getResumeAfter().isEmpty()) { return Status(ErrorCodes::BadValue, "'requestResumeToken' must be true if 'resumeAfter' is" @@ -150,14 +203,6 @@ Status validateFindCommandRequest(const FindCommandRequest& findCommand) { return Status::OK(); } -void refreshNSS(const NamespaceString& nss, FindCommandRequest* findCommand) { - if (findCommand->getNamespaceOrUUID().uuid()) { - auto& nssOrUUID = findCommand->getNamespaceOrUUID(); - nssOrUUID.setNss(nss); - } - invariant(findCommand->getNamespaceOrUUID().nss()); -} - std::unique_ptr makeFromFindCommand(const BSONObj& cmdObj, boost::optional nss, bool apiStrict) { @@ -165,12 +210,6 @@ std::unique_ptr makeFromFindCommand(const BSONObj& cmdObj, IDLParserContext("FindCommandRequest", apiStrict, nss ? nss->tenantId() : boost::none), cmdObj)); - // If there is an explicit namespace specified overwite it. - if (nss) { - auto& nssOrUuid = findCommand->getNamespaceOrUUID(); - nssOrUuid.setNss(*nss); - } - addMetaProjection(findCommand.get()); if (findCommand->getSkip() && *findCommand->getSkip() == 0) { @@ -231,10 +270,16 @@ TailableModeEnum getTailableMode(const FindCommandRequest& findCommand) { tailableModeFromBools(findCommand.getTailable(), findCommand.getAwaitData())); } -void validateCursorResponse(const BSONObj& outputAsBson, boost::optional tenantId) { +void validateCursorResponse(const BSONObj& outputAsBson, + boost::optional tenantId, + const SerializationContext& serializationContext) { if (getTestCommandsEnabled()) { CursorInitialReply::parse( - IDLParserContext("CursorInitialReply", false /* apiStrict */, tenantId), outputAsBson); + IDLParserContext("CursorInitialReply", + false /* apiStrict */, + tenantId, + SerializationContext::stateCommandReply(serializationContext)), + outputAsBson); } } @@ -301,23 +346,26 @@ StatusWith asAggregationCommand(const FindCommandRequest& findCommand) << " not supported in aggregation."}; } - if (findCommand.getRequestResumeToken()) { - return {ErrorCodes::InvalidPipelineOperator, - str::stream() << "Option " << FindCommandRequest::kRequestResumeTokenFieldName - << " not supported in aggregation."}; - } + if (!resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + if (findCommand.getRequestResumeToken()) { + return {ErrorCodes::InvalidPipelineOperator, + str::stream() << "Option " << FindCommandRequest::kRequestResumeTokenFieldName + << " not supported in aggregation."}; + } - if (!findCommand.getResumeAfter().isEmpty()) { - return {ErrorCodes::InvalidPipelineOperator, - str::stream() << "Option " << FindCommandRequest::kResumeAfterFieldName - << " not supported in aggregation."}; + if (!findCommand.getResumeAfter().isEmpty()) { + return {ErrorCodes::InvalidPipelineOperator, + str::stream() << "Option " << FindCommandRequest::kResumeAfterFieldName + << " not supported in aggregation."}; + } } // Now that we've successfully validated this QR, begin building the aggregation command. - aggregationBuilder.append("aggregate", - findCommand.getNamespaceOrUUID().nss() - ? findCommand.getNamespaceOrUUID().nss()->coll() - : ""); + tassert(ErrorCodes::BadValue, + "Unsupported type UUID for namspace", + findCommand.getNamespaceOrUUID().isNamespaceString()); + aggregationBuilder.append("aggregate", findCommand.getNamespaceOrUUID().nss().coll()); // Construct an aggregation pipeline that finds the equivalent documents to this query request. BSONArrayBuilder pipelineBuilder(aggregationBuilder.subarrayStart("pipeline")); @@ -385,6 +433,17 @@ StatusWith asAggregationCommand(const FindCommandRequest& findCommand) if (findCommand.getLet()) { aggregationBuilder.append(FindCommandRequest::kLetFieldName, *findCommand.getLet()); } + if (resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + if (findCommand.getRequestResumeToken()) { + aggregationBuilder.append(FindCommandRequest::kRequestResumeTokenFieldName, + findCommand.getRequestResumeToken().value_or(false)); + } + if (!findCommand.getResumeAfter().isEmpty()) { + aggregationBuilder.append(FindCommandRequest::kResumeAfterFieldName, + findCommand.getResumeAfter()); + } + } return StatusWith(aggregationBuilder.obj()); } diff --git a/src/mongo/db/query/query_request_helper.h b/src/mongo/db/query/query_request_helper.h index 3f73d188578c7..b006e8a122af9 100644 --- a/src/mongo/db/query/query_request_helper.h +++ b/src/mongo/db/query/query_request_helper.h @@ -29,14 +29,26 @@ #pragma once +#include #include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/tailable_mode.h" +#include "mongo/db/query/tailable_mode_gen.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/serialization_context.h" namespace mongo { @@ -60,6 +72,12 @@ static constexpr auto kNaturalSortField = "$natural"; */ Status validateGetMoreCollectionName(StringData collectionName); +/** + * Returns a non-OK status if '$_resumeAfter' is set to an unexpected value, or the wrong type + * determined by the collection type. + */ +Status validateResumeAfter(const mongo::BSONObj& resumeAfter, bool isClusteredCollection); + /** * Returns a non-OK status if any property of the QR has a bad value (e.g. a negative skip * value) or if there is a bad combination of options (e.g. awaitData is illegal without @@ -84,11 +102,6 @@ std::unique_ptr makeFromFindCommandForTests( boost::optional nss = boost::none, bool apiStrict = false); -/** - * If _uuid exists for this FindCommandRequest, update the value of _nss. - */ -void refreshNSS(const NamespaceString& nss, FindCommandRequest* findCommand); - /** * Converts this FindCommandRequest into an aggregation using $match. If this FindCommandRequest has * options that cannot be satisfied by aggregation, a non-OK status is returned and 'cmdBuilder' is @@ -139,7 +152,9 @@ TailableModeEnum getTailableMode(const FindCommandRequest& findCommand); /** * Asserts whether the cursor response adhere to the format defined in IDL. */ -void validateCursorResponse(const BSONObj& outputAsBson, boost::optional tenantId); +void validateCursorResponse(const BSONObj& outputAsBson, + boost::optional tenantId, + const SerializationContext& serializationContext); /** * Updates the projection object with a $meta projection for the showRecordId option. diff --git a/src/mongo/db/query/query_request_test.cpp b/src/mongo/db/query/query_request_test.cpp index 15827a9be2f28..c6ec91342a279 100644 --- a/src/mongo/db/query/query_request_test.cpp +++ b/src/mongo/db/query/query_request_test.cpp @@ -27,21 +27,53 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "mongo/base/error_codes.h" -#include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/collection_mock.h" -#include "mongo/db/dbmessage.h" -#include "mongo/db/json.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/query/cursor_response_gen.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/max_time_ms_parser.h" #include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/tailable_mode_gen.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -49,7 +81,8 @@ namespace { using std::unique_ptr; using unittest::assertGet; -static const NamespaceString testns("testdb.testcoll"); +static const NamespaceString testns = + NamespaceString::createNamespaceString_forTest("testdb.testcoll"); TEST(QueryRequestTest, NegativeSkip) { FindCommandRequest findCommand(testns); @@ -246,10 +279,13 @@ TEST(QueryRequestTest, InvalidResumeAfterWrongRecordIdType) { findCommand.setRequestResumeToken(true); // Hint must be explicitly set for the query request to validate. findCommand.setHint(fromjson("{$natural: 1}")); - ASSERT_NOT_OK(query_request_helper::validateFindCommandRequest(findCommand)); + ASSERT_NOT_OK(query_request_helper::validateResumeAfter(findCommand.getResumeAfter(), + false /* isClusteredCollection */)); resumeAfter = BSON("$recordId" << 1LL); findCommand.setResumeAfter(resumeAfter); ASSERT_OK(query_request_helper::validateFindCommandRequest(findCommand)); + ASSERT_OK(query_request_helper::validateResumeAfter(findCommand.getResumeAfter(), + false /* isClusteredCollection */)); } TEST(QueryRequestTest, InvalidResumeAfterExtraField) { @@ -259,7 +295,8 @@ TEST(QueryRequestTest, InvalidResumeAfterExtraField) { findCommand.setRequestResumeToken(true); // Hint must be explicitly set for the query request to validate. findCommand.setHint(fromjson("{$natural: 1}")); - ASSERT_NOT_OK(query_request_helper::validateFindCommandRequest(findCommand)); + ASSERT_NOT_OK(query_request_helper::validateResumeAfter(findCommand.getResumeAfter(), + false /* isClusteredCollection */)); } TEST(QueryRequestTest, ResumeAfterWithHint) { @@ -282,6 +319,8 @@ TEST(QueryRequestTest, ResumeAfterWithSort) { // Hint must be explicitly set for the query request to validate. findCommand.setHint(fromjson("{$natural: 1}")); ASSERT_OK(query_request_helper::validateFindCommandRequest(findCommand)); + ASSERT_OK(query_request_helper::validateResumeAfter(findCommand.getResumeAfter(), + false /* isClusteredCollection */)); findCommand.setSort(fromjson("{a: 1}")); ASSERT_NOT_OK(query_request_helper::validateFindCommandRequest(findCommand)); findCommand.setSort(fromjson("{$natural: 1}")); @@ -297,6 +336,8 @@ TEST(QueryRequestTest, ResumeNoSpecifiedRequestResumeToken) { ASSERT_NOT_OK(query_request_helper::validateFindCommandRequest(findCommand)); findCommand.setRequestResumeToken(true); ASSERT_OK(query_request_helper::validateFindCommandRequest(findCommand)); + ASSERT_OK(query_request_helper::validateResumeAfter(findCommand.getResumeAfter(), + false /* isClusteredCollection */)); } TEST(QueryRequestTest, ExplicitEmptyResumeAfter) { @@ -308,6 +349,8 @@ TEST(QueryRequestTest, ExplicitEmptyResumeAfter) { ASSERT_OK(query_request_helper::validateFindCommandRequest(findCommand)); findCommand.setRequestResumeToken(true); ASSERT_OK(query_request_helper::validateFindCommandRequest(findCommand)); + ASSERT_OK(query_request_helper::validateResumeAfter(findCommand.getResumeAfter(), + false /* isClusteredCollection */)); } // @@ -1240,7 +1283,7 @@ TEST(QueryRequestTest, ConvertToAggregationSucceeds) { auto agg = query_request_helper::asAggregationCommand(findCommand); ASSERT_OK(agg); - auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd); ASSERT_OK(ar.getStatus()); ASSERT(!ar.getValue().getExplain()); @@ -1258,7 +1301,7 @@ TEST(QueryRequestTest, ConvertToAggregationOmitsExplain) { auto agg = query_request_helper::asAggregationCommand(findCommand); ASSERT_OK(agg); - auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd); ASSERT_OK(ar.getStatus()); ASSERT_FALSE(ar.getValue().getExplain()); @@ -1273,7 +1316,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithHintSucceeds) { const auto agg = query_request_helper::asAggregationCommand(findCommand); ASSERT_OK(agg); - auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd); ASSERT_OK(ar.getStatus()); ASSERT_BSONOBJ_EQ(findCommand.getHint(), ar.getValue().getHint().value_or(BSONObj())); @@ -1347,10 +1390,18 @@ TEST(QueryRequestTest, ConvertToAggregationWithAllowPartialResultsFails) { ASSERT_NOT_OK(query_request_helper::asAggregationCommand(findCommand)); } -TEST(QueryRequestTest, ConvertToAggregationWithRequestResumeTokenFails) { +TEST(QueryRequestTest, ConvertToAggregationWithRequestResumeTokenSucceeds) { + RAIIServerParameterControllerForTest featureFlagController("featureFlagReshardingImprovements", + true); FindCommandRequest findCommand(testns); findCommand.setRequestResumeToken(true); - ASSERT_NOT_OK(query_request_helper::asAggregationCommand(findCommand)); + findCommand.setHint(BSON("$natural" << 1)); + const auto agg = query_request_helper::asAggregationCommand(findCommand); + ASSERT_OK(agg); + auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; + auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd); + ASSERT_OK(ar.getStatus()); + ASSERT(ar.getValue().getRequestResumeToken()); } TEST(QueryRequestTest, ConvertToAggregationWithResumeAfterFails) { @@ -1371,7 +1422,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithPipeline) { auto agg = query_request_helper::asAggregationCommand(findCommand); ASSERT_OK(agg); - auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd); ASSERT_OK(ar.getStatus()); ASSERT(!ar.getValue().getExplain()); @@ -1399,7 +1450,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithBatchSize) { auto agg = query_request_helper::asAggregationCommand(findCommand); ASSERT_OK(agg); - auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd); ASSERT_OK(ar.getStatus()); ASSERT(!ar.getValue().getExplain()); @@ -1420,7 +1471,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithMaxTimeMS) { const BSONObj cmdObj = agg.getValue(); ASSERT_EQ(cmdObj["maxTimeMS"].Int(), 9); - auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db(), cmdObj).body; + auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db_forTest(), cmdObj).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd); ASSERT_OK(ar.getStatus()); ASSERT(!ar.getValue().getExplain()); @@ -1437,7 +1488,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithCollationSucceeds) { auto agg = query_request_helper::asAggregationCommand(findCommand); ASSERT_OK(agg); - auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd); ASSERT_OK(ar.getStatus()); ASSERT(!ar.getValue().getExplain()); @@ -1470,7 +1521,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithLegacyRuntimeConstantsSucceeds) { auto agg = query_request_helper::asAggregationCommand(findCommand); ASSERT_OK(agg); - auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd); ASSERT_OK(ar.getStatus()); ASSERT(ar.getValue().getLegacyRuntimeConstants().has_value()); @@ -1484,7 +1535,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithAllowDiskUseTrueSucceeds) { const auto agg = query_request_helper::asAggregationCommand(findCommand); ASSERT_OK(agg.getStatus()); - auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd); ASSERT_OK(ar.getStatus()); ASSERT(ar.getValue().getAllowDiskUse().has_value()); @@ -1497,7 +1548,7 @@ TEST(QueryRequestTest, ConvertToAggregationWithAllowDiskUseFalseSucceeds) { const auto agg = query_request_helper::asAggregationCommand(findCommand); ASSERT_OK(agg.getStatus()); - auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db(), agg.getValue()).body; + auto aggCmd = OpMsgRequest::fromDBAndBody(testns.db_forTest(), agg.getValue()).body; auto ar = aggregation_request_helper::parseFromBSONForTests(testns, aggCmd); ASSERT_OK(ar.getStatus()); ASSERT(ar.getValue().getAllowDiskUse().has_value()); @@ -1524,19 +1575,19 @@ TEST(QueryRequestTest, ConvertToFindWithAllowDiskUseFalseSucceeds) { TEST(QueryRequestHelperTest, ValidateResponseMissingFields) { BSONObjBuilder builder; - ASSERT_THROWS_CODE( - query_request_helper::validateCursorResponse(builder.asTempObj(), boost::none), - DBException, - 6253507); + ASSERT_THROWS_CODE(query_request_helper::validateCursorResponse( + builder.asTempObj(), boost::none, SerializationContext()), + DBException, + 6253507); } TEST(QueryRequestHelperTest, ValidateResponseWrongDataType) { BSONObjBuilder builder; builder.append("cursor", 1); - ASSERT_THROWS_CODE( - query_request_helper::validateCursorResponse(builder.asTempObj(), boost::none), - DBException, - ErrorCodes::TypeMismatch); + ASSERT_THROWS_CODE(query_request_helper::validateCursorResponse( + builder.asTempObj(), boost::none, SerializationContext()), + DBException, + ErrorCodes::TypeMismatch); } TEST(QueryRequestHelperTest, ParsedCursorRemainsValidAfterBSONDestroyed) { @@ -1563,14 +1614,11 @@ class QueryRequestTest : public ServiceContextTest {}; TEST_F(QueryRequestTest, ParseFromUUID) { const UUID uuid = UUID::gen(); - - NamespaceStringOrUUID nssOrUUID("test", uuid); FindCommandRequest findCommand(nssOrUUID); const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.testns"); - // Ensure a call to refreshNSS succeeds. - query_request_helper::refreshNSS(nss, &findCommand); - ASSERT_EQ(nss, *findCommand.getNamespaceOrUUID().nss()); + findCommand.setNss(nss); + ASSERT_EQ(nss, findCommand.getNamespaceOrUUID().nss()); } } // namespace diff --git a/src/mongo/db/query/query_settings.cpp b/src/mongo/db/query/query_settings.cpp index 2f376e2822ffe..b2883949c1ada 100644 --- a/src/mongo/db/query/query_settings.cpp +++ b/src/mongo/db/query/query_settings.cpp @@ -29,8 +29,19 @@ #include "mongo/db/query/query_settings.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/query/canonical_query.h" -#include "mongo/db/query/plan_cache.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" namespace mongo { diff --git a/src/mongo/db/query/query_settings.h b/src/mongo/db/query/query_settings.h index bff602de3cbe8..d7b0d42757032 100644 --- a/src/mongo/db/query/query_settings.h +++ b/src/mongo/db/query/query_settings.h @@ -29,16 +29,22 @@ #pragma once +#include #include +#include +#include #include +#include #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobj_comparator_interface.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/index_entry.h" #include "mongo/db/query/plan_cache.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { diff --git a/src/mongo/db/query/query_settings.idl b/src/mongo/db/query/query_settings.idl new file mode 100644 index 0000000000000..1de37e20f09a0 --- /dev/null +++ b/src/mongo/db/query/query_settings.idl @@ -0,0 +1,85 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +global: + cpp_namespace: "mongo::query_settings" + +imports: + - "mongo/db/basic_types.idl" + - "mongo/db/query/index_hint.idl" + - "mongo/db/query/query_shape_hash.idl" + +enums: + QueryEngineVersion: + description: >- + Enum representing the query engine version as a combination of execution + engine and optimizer being used when executing the query. + type: string + values: + kV1: "v1" # Classic + Multiplanner + kV2: "v2" # Nebari + Multiplanner + +structs: + Namespace: + description: >- + Struct representing a namespace. An alternative definition of the + NamespaceString. + fields: + db: string + coll: string + + IndexHintSpec: + description: "Struct representing the index hint spec." + fields: + ns: + type: Namespace + optional: true + allowedIndexes: + type: array + + QuerySettings: + description: >- + Struct representing possible settings that may be applied to a given query. + fields: + indexHints: + type: + variant: [IndexHintSpec, array] + optional: true + queryEngineVersion: + type: QueryEngineVersion + optional: true + + QueryShapeConfiguration: + description: >- + Struct representing configuration for a particular query shape of the + 'representativeQuery'. Configuration consists of query settings that will + be applied to all query instances of the query shape. + fields: + queryShapeHash: QueryShapeHash + settings: QuerySettings + representativeQuery: object_owned diff --git a/src/mongo/db/query/query_settings_cluster_parameter.idl b/src/mongo/db/query/query_settings_cluster_parameter.idl new file mode 100644 index 0000000000000..f9fc3e2ad7a7c --- /dev/null +++ b/src/mongo/db/query/query_settings_cluster_parameter.idl @@ -0,0 +1,62 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +global: + cpp_namespace: "mongo::query_settings" + cpp_includes: + - "mongo/db/query/query_feature_flags_gen.h" + +imports: + - "mongo/db/basic_types.idl" + - "mongo/db/query/query_feature_flags.idl" + - "mongo/db/query/query_settings.idl" + - "mongo/idl/cluster_server_parameter.idl" + +structs: + QuerySettingsClusterParameterValue: + description: "The underlying value of the query settings cluster parameter." + inline_chained_structs: true + chained_structs: + ClusterServerParameter: clusterServerParameter + fields: + settingsArray: + type: array + stability: stable + +server_parameters: + querySettings: + description: >- + Query settings for all query shapes will be stored within this cluster + server parameter. + set_at: cluster + cpp_class: + name: QuerySettingsClusterParameter + override_set: true + override_validate: true + condition: + feature_flag: feature_flags::gFeatureFlagQuerySettings diff --git a/src/mongo/db/query/query_settings_decoration.cpp b/src/mongo/db/query/query_settings_decoration.cpp index 1015bbfe09ba6..2977f3de7a995 100644 --- a/src/mongo/db/query/query_settings_decoration.cpp +++ b/src/mongo/db/query/query_settings_decoration.cpp @@ -28,11 +28,11 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/query_settings_decoration.h" +#include #include "mongo/db/catalog/collection.h" +#include "mongo/db/query/query_settings_decoration.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/query/query_settings_decoration.h b/src/mongo/db/query/query_settings_decoration.h index b02128eb61694..edcfac68f499f 100644 --- a/src/mongo/db/query/query_settings_decoration.h +++ b/src/mongo/db/query/query_settings_decoration.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/db/query/query_settings.h" namespace mongo { diff --git a/src/mongo/db/query/query_settings_manager.cpp b/src/mongo/db/query/query_settings_manager.cpp new file mode 100644 index 0000000000000..07dbf62df5b08 --- /dev/null +++ b/src/mongo/db/query/query_settings_manager.cpp @@ -0,0 +1,226 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/query/query_settings_manager.h" + +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/query_settings_cluster_parameter_gen.h" +#include "mongo/db/query/query_settings_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" + +namespace mongo::query_settings { + +namespace { +static const auto kParameterName = "querySettings"; +const auto getQuerySettingsManager = + ServiceContext::declareDecoration>(); +} // namespace + +QuerySettingsManager& QuerySettingsManager::get(ServiceContext* service) { + return *getQuerySettingsManager(service); +} + +QuerySettingsManager& QuerySettingsManager::get(OperationContext* opCtx) { + return *getQuerySettingsManager(opCtx->getServiceContext()); +} + +void QuerySettingsManager::create(ServiceContext* service) { + getQuerySettingsManager(service).emplace(service); +} + +boost::optional> +QuerySettingsManager::getQuerySettingsForQueryShapeHash( + OperationContext* opCtx, + const query_shape::QueryShapeHash& queryShapeHash, + const boost::optional& tenantId) const { + Lock::SharedLock readLock(opCtx, _mutex); + + // Perform the lookup for query settings map maintained for the given tenant. + auto versionedQueryShapeConfigurationsIt = + _tenantIdToVersionedQueryShapeConfigurationsMap.find(tenantId); + if (versionedQueryShapeConfigurationsIt == + _tenantIdToVersionedQueryShapeConfigurationsMap.end()) { + return {}; + } + + // Lookup query settings for 'queryShapeHash'. + auto& queryShapeConfigurationsMap = + versionedQueryShapeConfigurationsIt->second.queryShapeConfigurationsMap; + auto queryShapeConfigurationIt = queryShapeConfigurationsMap.find(queryShapeHash); + if (queryShapeConfigurationIt == queryShapeConfigurationsMap.end()) { + return {}; + } + + return queryShapeConfigurationIt->second; +} + +void QuerySettingsManager::setQueryShapeConfigurations( + OperationContext* opCtx, + std::vector&& settingsArray, + LogicalTime parameterClusterTime, + const boost::optional& tenantId) { + QueryShapeConfigurationsMap queryShapeConfigurationsMap; + queryShapeConfigurationsMap.reserve(settingsArray.size()); + for (auto&& queryShapeConfiguration : settingsArray) { + queryShapeConfigurationsMap.insert({queryShapeConfiguration.getQueryShapeHash(), + {queryShapeConfiguration.getSettings(), + queryShapeConfiguration.getRepresentativeQuery()}}); + } + Lock::ExclusiveLock writeLock(opCtx, _mutex); + _tenantIdToVersionedQueryShapeConfigurationsMap.insert_or_assign( + tenantId, + VersionedQueryShapeConfigurations{std::move(queryShapeConfigurationsMap), + parameterClusterTime}); +} + +std::vector QuerySettingsManager::getAllQueryShapeConfigurations( + OperationContext* opCtx, const boost::optional& tenantId) const { + Lock::SharedLock readLock(opCtx, _mutex); + return getAllQueryShapeConfigurations_inlock(opCtx, tenantId); +} + +std::vector QuerySettingsManager::getAllQueryShapeConfigurations_inlock( + OperationContext* opCtx, const boost::optional& tenantId) const { + auto versionedQueryShapeConfigurationsIt = + _tenantIdToVersionedQueryShapeConfigurationsMap.find(tenantId); + if (versionedQueryShapeConfigurationsIt == + _tenantIdToVersionedQueryShapeConfigurationsMap.end()) { + return {}; + } + + std::vector configurations; + for (const auto& [queryShapeHash, value] : + versionedQueryShapeConfigurationsIt->second.queryShapeConfigurationsMap) { + configurations.emplace_back(queryShapeHash, value.first, value.second); + } + return configurations; +} + +void QuerySettingsManager::removeAllQueryShapeConfigurations( + OperationContext* opCtx, const boost::optional& tenantId) { + Lock::ExclusiveLock writeLock(opCtx, _mutex); + _tenantIdToVersionedQueryShapeConfigurationsMap.erase(tenantId); +} + +LogicalTime QuerySettingsManager::getClusterParameterTime( + OperationContext* opCtx, const boost::optional& tenantId) const { + Lock::SharedLock readLock(opCtx, _mutex); + return getClusterParameterTime_inlock(opCtx, tenantId); +} + +LogicalTime QuerySettingsManager::getClusterParameterTime_inlock( + OperationContext* opCtx, const boost::optional& tenantId) const { + auto versionedQueryShapeConfigurationsIt = + _tenantIdToVersionedQueryShapeConfigurationsMap.find(tenantId); + if (versionedQueryShapeConfigurationsIt == + _tenantIdToVersionedQueryShapeConfigurationsMap.end()) { + return LogicalTime::kUninitialized; + } + return versionedQueryShapeConfigurationsIt->second.clusterParameterTime; +} + +void QuerySettingsManager::appendQuerySettingsClusterParameterValue( + OperationContext* opCtx, BSONObjBuilder* bob, const boost::optional& tenantId) { + Lock::SharedLock readLock(opCtx, _mutex); + bob->append("_id"_sd, kParameterName); + BSONArrayBuilder arrayBuilder( + bob->subarrayStart(QuerySettingsClusterParameterValue::kSettingsArrayFieldName)); + for (auto&& item : getAllQueryShapeConfigurations_inlock(opCtx, tenantId)) { + arrayBuilder.append(item.toBSON()); + } + arrayBuilder.done(); + bob->append(QuerySettingsClusterParameterValue::kClusterParameterTimeFieldName, + getClusterParameterTime_inlock(opCtx, tenantId).asTimestamp()); +} + +void QuerySettingsClusterParameter::append(OperationContext* opCtx, + BSONObjBuilder* bob, + StringData name, + const boost::optional& tenantId) { + auto& querySettingsManager = QuerySettingsManager::get(getGlobalServiceContext()); + querySettingsManager.appendQuerySettingsClusterParameterValue(opCtx, bob, tenantId); +} + +Status QuerySettingsClusterParameter::validate(const BSONElement& newValueElement, + const boost::optional& tenantId) const { + try { + (void)QuerySettingsClusterParameterValue::parse( + IDLParserContext("querySettingsParameterValue"), newValueElement.Obj()); + return Status::OK(); + } catch (const AssertionException&) { + return {ErrorCodes::BadValue, + "Call setQuerySettings or removeQuerySettings commands in order to set or remove " + "query settings for a given query shape"}; + } +} + +Status QuerySettingsClusterParameter::set(const BSONElement& newValueElement, + const boost::optional& tenantId) { + auto& querySettingsManager = QuerySettingsManager::get(getGlobalServiceContext()); + auto newSettings = QuerySettingsClusterParameterValue::parse( + IDLParserContext("querySettingsParameterValue"), newValueElement.Obj()); + querySettingsManager.setQueryShapeConfigurations(Client::getCurrent()->getOperationContext(), + std::move(newSettings.getSettingsArray()), + newSettings.getClusterParameterTime(), + tenantId); + return Status::OK(); +} + +Status QuerySettingsClusterParameter::reset(const boost::optional& tenantId) { + auto& querySettingsManager = QuerySettingsManager::get(getGlobalServiceContext()); + querySettingsManager.removeAllQueryShapeConfigurations( + Client::getCurrent()->getOperationContext(), tenantId); + return Status::OK(); +} + +LogicalTime QuerySettingsClusterParameter::getClusterParameterTime( + const boost::optional& tenantId) const { + auto& querySettingsManager = QuerySettingsManager::get(getGlobalServiceContext()); + return querySettingsManager.getClusterParameterTime(Client::getCurrent()->getOperationContext(), + tenantId); +} + +}; // namespace mongo::query_settings diff --git a/src/mongo/db/query/query_settings_manager.h b/src/mongo/db/query/query_settings_manager.h new file mode 100644 index 0000000000000..b83071ae9175e --- /dev/null +++ b/src/mongo/db/query/query_settings_manager.h @@ -0,0 +1,175 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/data_view.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/query_settings_gen.h" +#include "mongo/db/query/query_shape.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/trusted_hasher.h" +#include "mongo/stdx/unordered_map.h" + +namespace mongo { +/** + * Truncates the 256 bit QueryShapeHash by taking only the first sizeof(size_t) bytes. + */ +class QueryShapeHashHasher { +public: + size_t operator()(const query_shape::QueryShapeHash& hash) const { + return ConstDataView(reinterpret_cast(hash.data())).read(); + } +}; +template <> +struct IsTrustedHasher : std::true_type {}; + +namespace query_settings { + +using QueryInstance = BSONObj; + +using QueryShapeConfigurationsMap = stdx::unordered_map, + QueryShapeHashHasher>; + +/** + * Struct stores all query shape configurations for a given tenant. It stores the same information + * as QuerySettingsClusterParameterValue. The data present in the 'settingsArray' is stored in the + * QueryShapeConfigurationsMap for faster access. + */ +struct VersionedQueryShapeConfigurations { + /** + * An unordered_map of all QueryShapeConfigurations stored within the + * QuerySettingsClusterParameter keyed by QueryShapHash. + */ + QueryShapeConfigurationsMap queryShapeConfigurationsMap; + + /** + * Cluster time of the current version of the QuerySettingsClusterParameter. + */ + LogicalTime clusterParameterTime; +}; + +/** + * Class responsible for managing in-memory storage and fetching of query settings. The in-memory + * storage is eventually consistent with the query settings on other cluster nodes and is updated + * based on OpObserver call performed when executing setClusterParameter command. + * + * Query settings in-memory storage is maintained separately for each tenant. In dedicated + * environments the 'tenantId' argument passed to the methods must be boost::none. + * + * Query settings should only be retrieved through this class. + */ +class QuerySettingsManager { +public: + explicit QuerySettingsManager(ServiceContext* service) {} + + ~QuerySettingsManager() = default; + + QuerySettingsManager(const QuerySettingsManager&) = delete; + QuerySettingsManager& operator=(const QuerySettingsManager&) = delete; + + static void create(ServiceContext* service); + + static QuerySettingsManager& get(ServiceContext* service); + static QuerySettingsManager& get(OperationContext* opCtx); + + /** + * Returns (QuerySettings, QueryInstance) pair associated with the QueryShapeHash for the given + * tenant. + */ + boost::optional> getQuerySettingsForQueryShapeHash( + OperationContext* opCtx, + const query_shape::QueryShapeHash& queryShapeHash, + const boost::optional& tenantId) const; + + /** + * Returns all QueryShapeConfigurations stored for the given tenant. + */ + std::vector getAllQueryShapeConfigurations( + OperationContext* opCtx, const boost::optional& tenantId) const; + + /** + * Sets the QueryShapeConfigurations by replacing an existing VersionedQueryShapeConfigurations + * with the newly built one. + */ + void setQueryShapeConfigurations(OperationContext* opCtx, + std::vector&& settings, + LogicalTime parameterClusterTime, + const boost::optional& tenantId); + + /** + * Removes all query settings documents for the given tenant. + */ + void removeAllQueryShapeConfigurations(OperationContext* opCtx, + const boost::optional& tenantId); + + /** + * Returns the cluster parameter time of the current QuerySettingsClusterParameter value for the + * given tenant. + */ + LogicalTime getClusterParameterTime(OperationContext* opCtx, + const boost::optional& tenantId) const; + + /** + * Appends the QuerySettingsClusterParameterValue maintained as + * VersionedQueryShapeConfigurations for the given tenant. + */ + void appendQuerySettingsClusterParameterValue(OperationContext* opCtx, + BSONObjBuilder* bob, + const boost::optional& tenantId); + +private: + std::vector getAllQueryShapeConfigurations_inlock( + OperationContext* opCtx, const boost::optional& tenantId) const; + + LogicalTime getClusterParameterTime_inlock(OperationContext* opCtx, + const boost::optional& tenantId) const; + + TenantIdMap _tenantIdToVersionedQueryShapeConfigurationsMap; + Lock::ResourceMutex _mutex = Lock::ResourceMutex("QuerySettingsManager::mutex"); +}; +}; // namespace query_settings +} // namespace mongo diff --git a/src/mongo/db/query/query_settings_manager_test.cpp b/src/mongo/db/query/query_settings_manager_test.cpp new file mode 100644 index 0000000000000..3e1871c06b286 --- /dev/null +++ b/src/mongo/db/query/query_settings_manager_test.cpp @@ -0,0 +1,260 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/index_hint.h" +#include "mongo/db/query/parsed_find_command.h" +#include "mongo/db/query/query_settings_cluster_parameter_gen.h" +#include "mongo/db/query/query_settings_gen.h" +#include "mongo/db/query/query_settings_manager.h" +#include "mongo/db/query/query_shape.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/serialization_context.h" + +namespace mongo::query_settings { +namespace { +static const auto kParameterName = "querySettings"; + +QueryShapeConfiguration makeQueryShapeConfiguration( + const QuerySettings& settings, + QueryInstance query, + boost::intrusive_ptr expCtx) { + auto findCommandRequest = std::make_unique( + FindCommandRequest::parse(IDLParserContext("findCommandRequest"), query)); + auto parsedFindCommandResult = + parsed_find_command::parse(expCtx, std::move(findCommandRequest)); + ASSERT_OK(parsedFindCommandResult); + return QueryShapeConfiguration( + query_shape::hash(query_shape::extractQueryShape( + *parsedFindCommandResult.getValue(), SerializationOptions(), expCtx)), + settings, + query); +} + +// QueryShapeConfiguration is not comparable, therefore comparing the corresponding +// BSONObj encoding. +void assertQueryShapeConfigurationsEquals( + const std::vector& expectedQueryShapeConfigurations, + const std::vector& actualQueryShapeConfigurations) { + std::vector lhs, rhs; + std::transform(expectedQueryShapeConfigurations.begin(), + expectedQueryShapeConfigurations.end(), + std::back_inserter(lhs), + [](auto x) { return x.toBSON(); }); + std::transform(actualQueryShapeConfigurations.begin(), + actualQueryShapeConfigurations.end(), + std::back_inserter(rhs), + [](auto x) { return x.toBSON(); }); + std::sort(lhs.begin(), lhs.end(), SimpleBSONObjComparator::kInstance.makeLessThan()); + std::sort(rhs.begin(), rhs.end(), SimpleBSONObjComparator::kInstance.makeLessThan()); + ASSERT(std::equal( + lhs.begin(), lhs.end(), rhs.begin(), SimpleBSONObjComparator::kInstance.makeEqualTo())); +} + +} // namespace + +class QuerySettingsManagerTest : public ServiceContextTest { +public: + static std::vector getExampleQueryShapeConfigurations( + boost::intrusive_ptr expCtx) { + QuerySettings settings; + settings.setQueryEngineVersion(QueryEngineVersionEnum::kV2); + settings.setIndexHints({{IndexHintSpec({IndexHint("a_1")})}}); + QueryInstance queryA = BSON("find" + << "exampleColl" + << "$db" + << "foo" + << "filter" << BSON("a" << 2)); + QueryInstance queryB = BSON("find" + << "exampleColl" + << "$db" + << "foo" + << "filter" << BSON("a" << BSONNULL)); + return {makeQueryShapeConfiguration(settings, queryA, expCtx), + makeQueryShapeConfiguration(settings, queryB, expCtx)}; + } + + void setUp() final { + QuerySettingsManager::create(getServiceContext()); + + _opCtx = cc().makeOperationContext(); + _expCtx = boost::intrusive_ptr{new ExpressionContextForTest(opCtx())}; + } + + OperationContext* opCtx() { + return _opCtx.get(); + } + + boost::intrusive_ptr expCtx() { + return _expCtx; + } + + QuerySettingsManager& manager() { + return QuerySettingsManager::get(opCtx()); + } + +private: + ServiceContext::UniqueOperationContext _opCtx; + boost::intrusive_ptr _expCtx; +}; + +TEST_F(QuerySettingsManagerTest, QuerySettingsClusterParameterSerialization) { + // Set query shape configuration. + QuerySettings settings; + settings.setQueryEngineVersion(QueryEngineVersionEnum::kV2); + QueryInstance query = BSON("find" + << "exampleColl" + << "$db" + << "foo"); + auto config = makeQueryShapeConfiguration(settings, query, expCtx()); + LogicalTime clusterParameterTime(Timestamp(113, 59)); + TenantId tenantId(OID::gen()); + manager().setQueryShapeConfigurations(opCtx(), {config}, clusterParameterTime, tenantId); + + // Ensure the serialized parameter value contains 'settingsArray' with 'config' as value as well + // parameter id and clusterParameterTime. + BSONObjBuilder bob; + manager().appendQuerySettingsClusterParameterValue(opCtx(), &bob, tenantId); + ASSERT_BSONOBJ_EQ( + bob.done(), + BSON("_id" << kParameterName << QuerySettingsClusterParameterValue::kSettingsArrayFieldName + << BSON_ARRAY(config.toBSON()) + << QuerySettingsClusterParameterValue::kClusterParameterTimeFieldName + << clusterParameterTime.asTimestamp())); +} + +TEST_F(QuerySettingsManagerTest, QuerySettingsSetAndReset) { + auto configs = getExampleQueryShapeConfigurations(expCtx()); + auto firstConfig = configs[0], secondConfig = configs[1]; + LogicalTime firstWriteTime(Timestamp(1, 0)), secondWriteTime(Timestamp(2, 0)); + TenantId tenantId(OID::fromTerm(1)), otherTenantId(OID::fromTerm(2)); + + // Ensure that the maintained in-memory query shape configurations equal to the + // configurations specified in the parameter for both tenants. + manager().setQueryShapeConfigurations(opCtx(), {firstConfig}, firstWriteTime, tenantId); + manager().setQueryShapeConfigurations(opCtx(), {firstConfig}, firstWriteTime, otherTenantId); + assertQueryShapeConfigurationsEquals( + {firstConfig}, manager().getAllQueryShapeConfigurations(opCtx(), tenantId)); + assertQueryShapeConfigurationsEquals( + {firstConfig}, manager().getAllQueryShapeConfigurations(opCtx(), otherTenantId)); + ASSERT_EQ(manager().getClusterParameterTime(opCtx(), tenantId), firstWriteTime); + ASSERT_EQ(manager().getClusterParameterTime(opCtx(), otherTenantId), firstWriteTime); + + // Update query settings for tenant with 'tenantId'. Ensure its query shape configurations and + // parameter cluster time are updated accordingly. + manager().setQueryShapeConfigurations(opCtx(), {secondConfig}, secondWriteTime, tenantId); + assertQueryShapeConfigurationsEquals( + {secondConfig}, manager().getAllQueryShapeConfigurations(opCtx(), tenantId)); + assertQueryShapeConfigurationsEquals( + {firstConfig}, manager().getAllQueryShapeConfigurations(opCtx(), otherTenantId)); + ASSERT_EQ(manager().getClusterParameterTime(opCtx(), tenantId), secondWriteTime); + ASSERT_EQ(manager().getClusterParameterTime(opCtx(), otherTenantId), firstWriteTime); + + // Reset the parameter value and ensure that the in-memory storage is cleared for tenant with + // 'tenantId'. QueryShapeConfigurations for tenant with 'otherTenantId' must not be affected.. + manager().removeAllQueryShapeConfigurations(opCtx(), tenantId); + assertQueryShapeConfigurationsEquals( + {}, manager().getAllQueryShapeConfigurations(opCtx(), tenantId)); + assertQueryShapeConfigurationsEquals( + {firstConfig}, manager().getAllQueryShapeConfigurations(opCtx(), otherTenantId)); + ASSERT_EQ(manager().getClusterParameterTime(opCtx(), tenantId), LogicalTime::kUninitialized); + ASSERT_EQ(manager().getClusterParameterTime(opCtx(), otherTenantId), firstWriteTime); +} + +TEST_F(QuerySettingsManagerTest, QuerySettingsLookup) { + auto configs = getExampleQueryShapeConfigurations(expCtx()); + TenantId tenantId(OID::fromTerm(1)), otherTenantId(OID::fromTerm(2)); + manager().setQueryShapeConfigurations( + opCtx(), std::vector(configs), LogicalTime(), tenantId); + + // Ensure QuerySettingsManager returns boost::none when QuerySettings are not found. + ASSERT_FALSE(manager().getQuerySettingsForQueryShapeHash( + opCtx(), query_shape::QueryShapeHash(), tenantId)); + + // Ensure QuerySettingsManager returns a valid (QuerySettings, QueryInstance) pair on lookup. + auto querySettingsPair = manager().getQuerySettingsForQueryShapeHash( + opCtx(), configs[1].getQueryShapeHash(), tenantId); + ASSERT(querySettingsPair.has_value()); + auto [settings, queryInstance] = *querySettingsPair; + ASSERT_BSONOBJ_EQ(settings.toBSON(), configs[1].getSettings().toBSON()); + ASSERT_BSONOBJ_EQ(queryInstance, configs[1].getRepresentativeQuery()); + + // Ensure QuerySettingsManager returns boost::none when no QuerySettings are set for the given + // tenant, however, exists for other tenant. + ASSERT_FALSE(manager() + .getQuerySettingsForQueryShapeHash( + opCtx(), query_shape::QueryShapeHash(), otherTenantId) + .has_value()); +} + +TEST(QuerySettingsClusterParameter, ParameterValidation) { + // Ensure validation fails for invalid input. + TenantId tenantId(OID::gen()); + QuerySettingsClusterParameter querySettingsParameter(kParameterName, + ServerParameterType::kClusterWide); + ASSERT_NOT_OK(querySettingsParameter.validate(BSON("" << BSON("a" + << "b")) + .firstElement(), + tenantId)); + + // Ensure validation passes for valid input. + QuerySettingsClusterParameterValue parameterValue({}, {}); + ASSERT_OK(querySettingsParameter.validate(BSON("" << parameterValue.toBSON()).firstElement(), + tenantId)); +} +} // namespace mongo::query_settings diff --git a/src/mongo/db/query/query_settings_test.cpp b/src/mongo/db/query/query_settings_test.cpp index 15d3bebdccfc7..71dca0f1f6c61 100644 --- a/src/mongo/db/query/query_settings_test.cpp +++ b/src/mongo/db/query/query_settings_test.cpp @@ -33,12 +33,17 @@ #include "mongo/db/query/query_settings.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" +#include "mongo/bson/mutable/mutable_bson_test_utils.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_names.h" #include "mongo/db/query/index_entry.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" using mongo::AllowedIndicesFilter; using mongo::BSONObj; diff --git a/src/mongo/db/query/query_shape.cpp b/src/mongo/db/query/query_shape.cpp index 4f5bddc17883b..a35e78cc920a6 100644 --- a/src/mongo/db/query/query_shape.cpp +++ b/src/mongo/db/query/query_shape.cpp @@ -29,21 +29,351 @@ #include "mongo/db/query/query_shape.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/projection.h" +#include "mongo/db/query/projection_ast_util.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/query_shape_gen.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" + namespace mongo::query_shape { -BSONObj predicateShape(const MatchExpression* predicate) { +BSONObj debugPredicateShape(const MatchExpression* predicate) { + SerializationOptions opts; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + return predicate->serialize(opts); +} +BSONObj representativePredicateShape(const MatchExpression* predicate) { + SerializationOptions opts; + opts.literalPolicy = LiteralSerializationPolicy::kToRepresentativeParseableValue; + return predicate->serialize(opts); +} + +BSONObj debugPredicateShape(const MatchExpression* predicate, + std::function transformIdentifiersCallback) { SerializationOptions opts; - opts.replacementForLiteralArgs = kLiteralArgString; + opts.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + opts.transformIdentifiersCallback = transformIdentifiersCallback; + opts.transformIdentifiers = true; return predicate->serialize(opts); } -BSONObj predicateShape(const MatchExpression* predicate, - std::function identifierRedactionPolicy) { +BSONObj representativePredicateShape( + const MatchExpression* predicate, + std::function transformIdentifiersCallback) { SerializationOptions opts; - opts.replacementForLiteralArgs = kLiteralArgString; - opts.identifierRedactionPolicy = identifierRedactionPolicy; - opts.redactIdentifiers = true; + opts.literalPolicy = LiteralSerializationPolicy::kToRepresentativeParseableValue; + opts.transformIdentifiersCallback = transformIdentifiersCallback; + opts.transformIdentifiers = true; return predicate->serialize(opts); } +BSONObj extractSortShape(const BSONObj& sortSpec, + const boost::intrusive_ptr& expCtx, + const SerializationOptions& opts) { + if (sortSpec.isEmpty()) { + return sortSpec; + } + auto natural = sortSpec[query_request_helper::kNaturalSortField]; + + if (!natural) { + return SortPattern{sortSpec, expCtx} + .serialize(SortPattern::SortKeySerialization::kForPipelineSerialization, opts) + .toBson(); + } + // This '$natural' will fail to parse as a valid SortPattern since it is not a valid field + // path - it is usually considered and converted into a hint. For the query shape, we'll + // keep it unmodified. + BSONObjBuilder bob; + for (auto&& elem : sortSpec) { + if (elem.isABSONObj()) { + // We expect this won't work or parse on the main command path, but for shapification we + // don't really care, just treat it as a literal and don't bother parsing. + opts.appendLiteral( + &bob, opts.serializeFieldPathFromString(elem.fieldNameStringData()), elem); + } else if (elem.fieldNameStringData() == natural.fieldNameStringData()) { + bob.append(elem); + } else { + bob.appendAs(elem, opts.serializeFieldPathFromString(elem.fieldNameStringData())); + } + } + return bob.obj(); +} + +static std::string hintSpecialField = "$hint"; +void addShapeLiterals(BSONObjBuilder* bob, + const FindCommandRequest& findCommand, + const SerializationOptions& opts) { + if (auto limit = findCommand.getLimit()) { + opts.appendLiteral( + bob, FindCommandRequest::kLimitFieldName, static_cast(*limit)); + } + if (auto skip = findCommand.getSkip()) { + opts.appendLiteral(bob, FindCommandRequest::kSkipFieldName, static_cast(*skip)); + } +} + +static std::vector< + std::pair>> + boolArgMap = { + {FindCommandRequest::kSingleBatchFieldName, &FindCommandRequest::getSingleBatch}, + {FindCommandRequest::kAllowDiskUseFieldName, &FindCommandRequest::getAllowDiskUse}, + {FindCommandRequest::kReturnKeyFieldName, &FindCommandRequest::getReturnKey}, + {FindCommandRequest::kShowRecordIdFieldName, &FindCommandRequest::getShowRecordId}, + {FindCommandRequest::kTailableFieldName, &FindCommandRequest::getTailable}, + {FindCommandRequest::kAwaitDataFieldName, &FindCommandRequest::getAwaitData}, + {FindCommandRequest::kMirroredFieldName, &FindCommandRequest::getMirrored}, + {FindCommandRequest::kOplogReplayFieldName, &FindCommandRequest::getOplogReplay}, +}; +std::vector>> + objArgMap = { + {FindCommandRequest::kCollationFieldName, &FindCommandRequest::getCollation}, + +}; + +void addRemainingFindCommandFields(BSONObjBuilder* bob, + const FindCommandRequest& findCommand, + const SerializationOptions& opts) { + for (auto [fieldName, getterFunction] : boolArgMap) { + auto optBool = getterFunction(findCommand); + optBool.serializeToBSON(fieldName, bob); + } + auto collation = findCommand.getCollation(); + if (!collation.isEmpty()) { + bob->append(FindCommandRequest::kCollationFieldName, collation); + } +} + +BSONObj extractHintShape(BSONObj obj, const SerializationOptions& opts, bool preserveValue) { + BSONObjBuilder bob; + for (BSONElement elem : obj) { + if (hintSpecialField.compare(elem.fieldName()) == 0) { + if (elem.type() == BSONType::String) { + bob.append(hintSpecialField, opts.serializeFieldPathFromString(elem.String())); + } else if (elem.type() == BSONType::Object) { + opts.appendLiteral(&bob, hintSpecialField, elem.Obj()); + } else { + uasserted(ErrorCodes::FailedToParse, "$hint must be a string or an object"); + } + continue; + } + + // $natural doesn't need to be redacted. + if (elem.fieldNameStringData().compare(query_request_helper::kNaturalSortField) == 0) { + bob.append(elem); + continue; + } + + if (preserveValue) { + bob.appendAs(elem, opts.serializeFieldPathFromString(elem.fieldName())); + } else { + opts.appendLiteral(&bob, opts.serializeFieldPathFromString(elem.fieldName()), elem); + } + } + return bob.obj(); +} + +/** + * In a let specification all field names are variable names, and all values are either expressions + * or constants. + */ +BSONObj extractLetSpecShape(BSONObj letSpec, + const SerializationOptions& opts, + const boost::intrusive_ptr& expCtx) { + + BSONObjBuilder bob; + for (BSONElement elem : letSpec) { + auto expr = Expression::parseOperand(expCtx.get(), elem, expCtx->variablesParseState); + auto redactedValue = expr->serialize(opts); + // Note that this will throw on deeply nested let variables. + redactedValue.addToBsonObj(&bob, opts.serializeFieldPathFromString(elem.fieldName())); + } + return bob.obj(); +} + +void appendCmdNs(BSONObjBuilder& bob, + const NamespaceString& nss, + const SerializationOptions& opts) { + BSONObjBuilder nsObj = bob.subobjStart("cmdNs"); + appendNamespaceShape(nsObj, nss, opts); + nsObj.doneFast(); +} + +void appendNamespaceShape(BSONObjBuilder& bob, + const NamespaceString& nss, + const SerializationOptions& opts) { + if (nss.tenantId()) { + bob.append("tenantId", opts.serializeIdentifier(nss.tenantId().value().toString())); + } + bob.append("db", opts.serializeIdentifier(nss.db())); + bob.append("coll", opts.serializeIdentifier(nss.coll())); +} + +BSONObj extractQueryShape(const ParsedFindCommand& findRequest, + const SerializationOptions& opts, + const boost::intrusive_ptr& expCtx) { + const auto& findCmd = *findRequest.findCommandRequest; + BSONObjBuilder bob; + // Serialize the namespace as part of the query shape. + { + auto ns = findCmd.getNamespaceOrUUID(); + if (ns.isNamespaceString()) { + appendCmdNs(bob, ns.nss(), opts); + } else { + BSONObjBuilder cmdNs = bob.subobjStart("cmdNs"); + cmdNs.append("uuid", opts.serializeIdentifier(ns.uuid().toString())); + cmdNs.append("db", opts.serializeIdentifier(ns.dbname())); + cmdNs.doneFast(); + } + } + + bob.append("command", "find"); + std::unique_ptr filterExpr; + // Filter. + bob.append(FindCommandRequest::kFilterFieldName, findRequest.filter->serialize(opts)); + + // Let Spec. + if (auto letSpec = findCmd.getLet()) { + auto redactedObj = extractLetSpecShape(letSpec.get(), opts, expCtx); + auto ownedObj = redactedObj.getOwned(); + bob.append(FindCommandRequest::kLetFieldName, std::move(ownedObj)); + } + + if (findRequest.proj) { + bob.append(FindCommandRequest::kProjectionFieldName, + projection_ast::serialize(*findRequest.proj->root(), opts)); + } + + // Assume the hint is correct and contains field names. It is possible that this hint + // doesn't actually represent an index, but we can't detect that here. + // Hint, max, and min won't serialize if the object is empty. + if (!findCmd.getHint().isEmpty()) { + bob.append(FindCommandRequest::kHintFieldName, + extractHintShape(findCmd.getHint(), opts, true)); + // Max/Min aren't valid without hint. + if (!findCmd.getMax().isEmpty()) { + bob.append(FindCommandRequest::kMaxFieldName, + extractHintShape(findCmd.getMax(), opts, false)); + } + if (!findCmd.getMin().isEmpty()) { + bob.append(FindCommandRequest::kMinFieldName, + extractHintShape(findCmd.getMin(), opts, false)); + } + } + + // Sort. + if (findRequest.sort) { + bob.append( + FindCommandRequest::kSortFieldName, + findRequest.sort + ->serialize(SortPattern::SortKeySerialization::kForPipelineSerialization, opts) + .toBson()); + } + + // Fields for literal redaction. Adds limit and skip. + addShapeLiterals(&bob, findCmd, opts); + + // Add the fields that require no redaction. + addRemainingFindCommandFields(&bob, findCmd, opts); + + return bob.obj(); +} + +BSONObj extractQueryShape(const AggregateCommandRequest& aggregateCommand, + const Pipeline& pipeline, + const SerializationOptions& opts, + const boost::intrusive_ptr& expCtx, + const NamespaceString& nss) { + BSONObjBuilder bob; + + // namespace + appendCmdNs(bob, nss, opts); + bob.append("command", "aggregate"); + + // pipeline + { + BSONArrayBuilder pipelineBab( + bob.subarrayStart(AggregateCommandRequest::kPipelineFieldName)); + auto serializedPipeline = pipeline.serializeToBson(opts); + for (const auto& stage : serializedPipeline) { + pipelineBab.append(stage); + } + pipelineBab.doneFast(); + } + + // explain + if (aggregateCommand.getExplain().has_value()) { + bob.append(AggregateCommandRequest::kExplainFieldName, true); + } + + // allowDiskUse + if (auto param = aggregateCommand.getAllowDiskUse(); param.has_value()) { + bob.append(AggregateCommandRequest::kAllowDiskUseFieldName, param.value_or(false)); + } + + // collation + if (auto param = aggregateCommand.getCollation()) { + bob.append(AggregateCommandRequest::kCollationFieldName, param.get()); + } + + // hint + if (auto hint = aggregateCommand.getHint()) { + bob.append(AggregateCommandRequest::kHintFieldName, + extractHintShape(hint.get(), opts, true)); + } + + // let + if (auto letSpec = aggregateCommand.getLet()) { + auto redactedObj = extractLetSpecShape(letSpec.get(), opts, expCtx); + auto ownedObj = redactedObj.getOwned(); + bob.append(FindCommandRequest::kLetFieldName, std::move(ownedObj)); + } + return bob.obj(); +} + +NamespaceStringOrUUID parseNamespaceShape(BSONElement cmdNsElt) { + tassert(7632900, "cmdNs must be an object.", cmdNsElt.type() == BSONType::Object); + auto cmdNs = CommandNamespace::parse(IDLParserContext("cmdNs"), cmdNsElt.embeddedObject()); + + boost::optional tenantId = cmdNs.getTenantId().map(TenantId::parseFromString); + + if (cmdNs.getColl().has_value()) { + tassert(7632903, + "Exactly one of 'uuid' and 'coll' can be defined.", + !cmdNs.getUuid().has_value()); + return NamespaceString(cmdNs.getDb(), cmdNs.getColl().value()); + } else { + tassert(7632904, + "Exactly one of 'uuid' and 'coll' can be defined.", + !cmdNs.getColl().has_value()); + UUID uuid = uassertStatusOK(UUID::parse(cmdNs.getUuid().value().toString())); + return NamespaceStringOrUUID(cmdNs.getDb().toString(), uuid, tenantId); + } +} + +QueryShapeHash hash(const BSONObj& queryShape) { + return QueryShapeHash::computeHash(reinterpret_cast(queryShape.objdata()), + queryShape.objsize()); +} } // namespace mongo::query_shape diff --git a/src/mongo/db/query/query_shape.h b/src/mongo/db/query/query_shape.h index 3ec7c6696b64b..8e7fb689d097b 100644 --- a/src/mongo/db/query/query_shape.h +++ b/src/mongo/db/query/query_shape.h @@ -29,15 +29,31 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/crypto/sha256_block.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/parsed_find_command.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/serialization_options.h" namespace mongo::query_shape { -constexpr StringData kLiteralArgString = "?"_sd; - +using QueryShapeHash = SHA256Block; /** * Computes a BSONObj that is meant to be used to classify queries according to their shape, for the - * purposes of collecting telemetry. + * purposes of collecting queryStats. * * For example, if the MatchExpression represents {a: 2}, it will return the same BSONObj as the * MatchExpression for {a: 1}, {a: 10}, and {a: {$eq: 2}} (identical bits but not sharing memory) @@ -49,9 +65,32 @@ constexpr StringData kLiteralArgString = "?"_sd; * TODO better consider how this interacts with persistent query settings project, and document it. * TODO (TODO SERVER ticket) better distinguish this from a plan cache or CQ 'query shape'. */ -BSONObj predicateShape(const MatchExpression* predicate); +BSONObj debugPredicateShape(const MatchExpression* predicate); +BSONObj representativePredicateShape(const MatchExpression* predicate); + +BSONObj debugPredicateShape(const MatchExpression* predicate, + std::function transformIdentifiersCallback); +BSONObj representativePredicateShape( + const MatchExpression* predicate, + std::function transformIdentifiersCallback); + +BSONObj extractSortShape(const BSONObj& sortSpec, + const boost::intrusive_ptr& expCtx, + const SerializationOptions& opts); + +BSONObj extractQueryShape(const ParsedFindCommand& findRequest, + const SerializationOptions& opts, + const boost::intrusive_ptr& expCtx); +BSONObj extractQueryShape(const AggregateCommandRequest& aggregateCommand, + const Pipeline& pipeline, + const SerializationOptions& opts, + const boost::intrusive_ptr& expCtx, + const NamespaceString& nss); -BSONObj predicateShape(const MatchExpression* predicate, - std::function identifierRedactionPolicy); +NamespaceStringOrUUID parseNamespaceShape(BSONElement cmdNsElt); +void appendNamespaceShape(BSONObjBuilder& bob, + const NamespaceString& nss, + const SerializationOptions& opts); +QueryShapeHash hash(const BSONObj& queryShape); } // namespace mongo::query_shape diff --git a/src/mongo/db/query/query_shape.idl b/src/mongo/db/query/query_shape.idl new file mode 100644 index 0000000000000..fa5eea280d135 --- /dev/null +++ b/src/mongo/db/query/query_shape.idl @@ -0,0 +1,50 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. + +global: + cpp_namespace: "mongo::query_shape" + +imports: + - "mongo/db/basic_types.idl" + + +structs: + CommandNamespace: + description: "Representation of the cmdNs sub-object of the query shape." + fields: + db: + type: string + coll: + type: string + optional: true + uuid: + type: string + optional: true + tenantId: + type: string + optional: true + \ No newline at end of file diff --git a/src/mongo/db/query/query_shape_hash.idl b/src/mongo/db/query/query_shape_hash.idl new file mode 100644 index 0000000000000..578180c68a3c7 --- /dev/null +++ b/src/mongo/db/query/query_shape_hash.idl @@ -0,0 +1,41 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +global: + cpp_namespace: "mongo::query_shape" + +imports: + - "mongo/crypto/sha256_block.idl" + +types: + QueryShapeHash: + bson_serialization_type: string + description: "Type representing SHA256 hash of the QueryShape." + cpp_type: mongo::SHA256Block + serializer: "mongo::SHA256Block::toHexString" + deserializer: "mongo::SHA256Block::fromHexString" diff --git a/src/mongo/db/query/query_shape_test.cpp b/src/mongo/db/query/query_shape_test.cpp index 77df7e710273a..133ccac48ec62 100644 --- a/src/mongo/db/query/query_shape_test.cpp +++ b/src/mongo/db/query/query_shape_test.cpp @@ -27,14 +27,31 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include + #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/matcher/expression_geo.h" -#include "mongo/db/matcher/extensions_callback_real.h" +#include "mongo/db/matcher/expression_text_base.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/matcher/parsed_match_expression_for_test.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/query_shape.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/query_shape_test_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -42,22 +59,22 @@ namespace { /** * Simplistic redaction strategy for testing which appends the field name to the prefix "REDACT_". */ -std::string redactFieldNameForTest(StringData sd) { +std::string applyHmacForTest(StringData sd) { return "REDACT_" + sd.toString(); } -static const SerializationOptions literalAndFieldRedactOpts{redactFieldNameForTest, - query_shape::kLiteralArgString}; +static const SerializationOptions literalAndFieldRedactOpts{ + applyHmacForTest, LiteralSerializationPolicy::kToDebugTypeString}; BSONObj predicateShape(std::string filterJson) { ParsedMatchExpressionForTest expr(filterJson); - return query_shape::predicateShape(expr.get()); + return query_shape::debugPredicateShape(expr.get()); } BSONObj predicateShapeRedacted(std::string filterJson) { ParsedMatchExpressionForTest expr(filterJson); - return query_shape::predicateShape(expr.get(), redactFieldNameForTest); + return query_shape::debugPredicateShape(expr.get(), applyHmacForTest); } #define ASSERT_SHAPE_EQ_AUTO(expected, actual) \ @@ -70,23 +87,62 @@ BSONObj predicateShapeRedacted(std::string filterJson) { TEST(QueryPredicateShape, Equals) { ASSERT_SHAPE_EQ_AUTO( // Implicit equals - R"({"a":{"$eq":"?"}})", + R"({"a":{"$eq":"?number"}})", "{a: 5}"); ASSERT_SHAPE_EQ_AUTO( // Explicit equals - R"({"a":{"$eq":"?"}})", + R"({"a":{"$eq":"?number"}})", "{a: {$eq: 5}}"); ASSERT_SHAPE_EQ_AUTO( // implicit $and - R"({"$and":[{"a":{"$eq":"?"}},{"b":{"$eq":"?"}}]})", + R"({"$and":[{"a":{"$eq":"?number"}},{"b":{"$eq":"?number"}}]})", "{a: 5, b: 6}"); ASSERT_REDACTED_SHAPE_EQ_AUTO( // Implicit equals - R"({"REDACT_a":{"$eq":"?"}})", + R"({"REDACT_a":{"$eq":"?number"}})", "{a: 5}"); ASSERT_REDACTED_SHAPE_EQ_AUTO( // Explicit equals - R"({"REDACT_a":{"$eq":"?"}})", + R"({"REDACT_a":{"$eq":"?number"}})", "{a: {$eq: 5}}"); ASSERT_REDACTED_SHAPE_EQ_AUTO( // NOLINT - R"({"$and":[{"REDACT_a":{"$eq":"?"}},{"REDACT_b":{"$eq":"?"}}]})", + R"({"$and":[{"REDACT_a":{"$eq":"?number"}},{"REDACT_b":{"$eq":"?number"}}]})", "{a: 5, b: 6}"); + ASSERT_REDACTED_SHAPE_EQ_AUTO( // NOLINT + R"({"REDACT_foo.REDACT_$bar":{"$eq":"?number"}})", + R"({"foo.$bar":0})"); +} + +TEST(QueryPredicateShape, ArraySubTypes) { + ASSERT_SHAPE_EQ_AUTO( // NOLINT + "{a: {$eq: '[]'}}", + "{a: []}"); + ASSERT_SHAPE_EQ_AUTO( // NOLINT + "{a: {$eq: '?array'}}", + "{a: [2]}"); + ASSERT_SHAPE_EQ_AUTO( // NOLINT + R"({"a":{"$eq":"?array"}})", + "{a: [2, 3]}"); + ASSERT_SHAPE_EQ_AUTO( // NOLINT + R"({"a":{"$eq":"?array"}})", + "{a: [{}]}"); + ASSERT_SHAPE_EQ_AUTO( // NOLINT + R"({"a":{"$eq":"?array"}})", + "{a: [{}, {}]}"); + ASSERT_SHAPE_EQ_AUTO( // NOLINT + R"({"a":{"$eq":"?array"}})", + "{a: [[], [], []]}"); + ASSERT_SHAPE_EQ_AUTO( // NOLINT + R"({"a":{"$eq":"?array"}})", + "{a: [[2, 3], ['string'], []]}"); + ASSERT_SHAPE_EQ_AUTO( // NOLINT + R"({"a":{"$eq":"?array<>"}})", + "{a: [{}, 2]}"); + ASSERT_SHAPE_EQ_AUTO( // NOLINT + R"({"a":{"$eq":"?array<>"}})", + "{a: [[], 2]}"); + ASSERT_SHAPE_EQ_AUTO( // NOLINT + R"({"a":{"$eq":"?array<>"}})", + "{a: [[{}, 'string'], 2]}"); + ASSERT_SHAPE_EQ_AUTO( // NOLINT + R"({"a":{"$eq":"?array<>"}})", + "{a: [[{}, 'string'], 2]}"); } TEST(QueryPredicateShape, Comparisons) { @@ -95,22 +151,22 @@ TEST(QueryPredicateShape, Comparisons) { "$and": [ { "a": { - "$lt": "?" + "$lt": "?number" } }, { "b": { - "$gt": "?" + "$gt": "?number" } }, { "c": { - "$gte": "?" + "$gte": "?number" } }, { "c": { - "$lte": "?" + "$lte": "?number" } } ] @@ -121,114 +177,184 @@ TEST(QueryPredicateShape, Comparisons) { namespace { void assertShapeIs(std::string filterJson, BSONObj expectedShape) { ParsedMatchExpressionForTest expr(filterJson); - ASSERT_BSONOBJ_EQ(expectedShape, query_shape::predicateShape(expr.get())); + ASSERT_BSONOBJ_EQ(expectedShape, query_shape::debugPredicateShape(expr.get())); } void assertRedactedShapeIs(std::string filterJson, BSONObj expectedShape) { ParsedMatchExpressionForTest expr(filterJson); ASSERT_BSONOBJ_EQ(expectedShape, - query_shape::predicateShape(expr.get(), redactFieldNameForTest)); + query_shape::debugPredicateShape(expr.get(), applyHmacForTest)); } } // namespace TEST(QueryPredicateShape, Regex) { // Note/warning: 'fromjson' will parse $regex into a /regex/, so these tests can't use // auto-updating BSON assertions. - assertShapeIs("{a: /a+/}", BSON("a" << BSON("$regex" << query_shape::kLiteralArgString))); + assertShapeIs("{a: /a+/}", + BSON("a" << BSON("$regex" + << "?string"))); assertShapeIs("{a: /a+/i}", - BSON("a" << BSON("$regex" << query_shape::kLiteralArgString << "$options" - << query_shape::kLiteralArgString))); + BSON("a" << BSON("$regex" + << "?string" + << "$options" + << "?string"))); + assertRedactedShapeIs("{a: /a+/}", + BSON("REDACT_a" << BSON("$regex" + << "?string"))); assertRedactedShapeIs("{a: /a+/}", - BSON("REDACT_a" << BSON("$regex" << query_shape::kLiteralArgString))); + BSON("REDACT_a" << BSON("$regex" + << "?string"))); } TEST(QueryPredicateShape, Mod) { ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$mod":"?"}})", + R"({"a":{"$mod":["?number","?number"]}})", "{a: {$mod: [2, 0]}}"); } TEST(QueryPredicateShape, Exists) { ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$exists":"?"}})", + R"({"a":{"$exists":"?bool"}})", "{a: {$exists: true}}"); } TEST(QueryPredicateShape, In) { // Any number of children is always the same shape ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$in":["?"]}})", + R"({"a":{"$in":"?array"}})", "{a: {$in: [1]}}"); ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$in":["?"]}})", + R"({"a":{"$in":"?array<>"}})", "{a: {$in: [1, 4, 'str', /regex/]}}"); } TEST(QueryPredicateShape, BitTestOperators) { ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$bitsAllSet":"?"}})", + R"({"a":{"$bitsAllSet":"?array"}})", "{a: {$bitsAllSet: [1, 5]}}"); ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$bitsAllSet":"?"}})", + R"({"a":{"$bitsAllSet":"?array"}})", "{a: {$bitsAllSet: 50}}"); ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$bitsAnySet":"?"}})", + R"({"a":{"$bitsAnySet":"?array"}})", "{a: {$bitsAnySet: [1, 5]}}"); ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$bitsAnySet":"?"}})", + R"({"a":{"$bitsAnySet":"?array"}})", "{a: {$bitsAnySet: 50}}"); ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$bitsAllClear":"?"}})", + R"({"a":{"$bitsAllClear":"?array"}})", "{a: {$bitsAllClear: [1, 5]}}"); ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$bitsAllClear":"?"}})", + R"({"a":{"$bitsAllClear":"?array"}})", "{a: {$bitsAllClear: 50}}"); ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$bitsAnyClear":"?"}})", + R"({"a":{"$bitsAnyClear":"?array"}})", "{a: {$bitsAnyClear: [1, 5]}}"); ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$bitsAnyClear":"?"}})", + R"({"a":{"$bitsAnyClear":"?array"}})", "{a: {$bitsAnyClear: 50}}"); } TEST(QueryPredicateShape, AlwaysBoolean) { ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"$alwaysTrue":"?"})", + R"({"$alwaysTrue":"?number"})", "{$alwaysTrue: 1}"); ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"$alwaysFalse":"?"})", + R"({"$alwaysFalse":"?number"})", "{$alwaysFalse: 1}"); } TEST(QueryPredicateShape, And) { ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"$and":[{"a":{"$lt":"?"}},{"b":{"$gte":"?"}},{"c":{"$lte":"?"}}]})", + R"({ + "$and": [ + { + "a": { + "$lt": "?number" + } + }, + { + "b": { + "$gte": "?number" + } + }, + { + "c": { + "$lte": "?number" + } + } + ] + })", "{$and: [{a: {$lt: 5}}, {b: {$gte: 3}}, {c: {$lte: 10}}]}"); } TEST(QueryPredicateShape, Or) { ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"$or":[{"a":{"$eq":"?"}},{"b":{"$in":["?"]}},{"c":{"$gt":"?"}}]})", + R"({ + "$or": [ + { + "a": { + "$eq": "?number" + } + }, + { + "b": { + "$in": "?array" + } + }, + { + "c": { + "$gt": "?number" + } + } + ] + })", "{$or: [{a: 5}, {b: {$in: [1,2,3]}}, {c: {$gt: 10}}]}"); } TEST(QueryPredicateShape, ElemMatch) { // ElemMatchObjectMatchExpression ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$elemMatch":{"$and":[{"b":{"$eq":"?"}},{"c":{"$exists":"?"}}]}}})", + R"({ + "a": { + "$elemMatch": { + "$and": [ + { + "b": { + "$eq": "?number" + } + }, + { + "c": { + "$exists": "?bool" + } + } + ] + } + } + })", "{a: {$elemMatch: {b: 5, c: {$exists: true}}}}"); // ElemMatchValueMatchExpression ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"a":{"$elemMatch":{"$gt":"?","$lt":"?"}}})", + R"({"a":{"$elemMatch":{"$gt":"?number","$lt":"?number"}}})", "{a: {$elemMatch: {$gt: 5, $lt: 10}}}"); // Nested ASSERT_REDACTED_SHAPE_EQ_AUTO( // NOLINT - R"({"REDACT_a":{"$elemMatch":{"$elemMatch":{"$gt":"?","$lt":"?"}}}})", + R"({ + "REDACT_a": { + "$elemMatch": { + "$elemMatch": { + "$gt": "?number", + "$lt": "?number" + } + } + } + })", "{a: {$elemMatch: {$elemMatch: {$gt: 5, $lt: 10}}}}"); } @@ -240,7 +366,7 @@ TEST(QueryPredicateShape, InternalBucketGeoWithinMatchExpression) { R"({ "$_internalBucketGeoWithin": { "withinRegion": { - "$centerSphere": "?" + "$centerSphere": "?array<>" }, "field": "REDACT_a" } @@ -250,26 +376,26 @@ TEST(QueryPredicateShape, InternalBucketGeoWithinMatchExpression) { TEST(QueryPredicateShape, NorMatchExpression) { ASSERT_REDACTED_SHAPE_EQ_AUTO( // NOLINT - R"({"$nor":[{"REDACT_a":{"$lt":"?"}},{"REDACT_b":{"$gt":"?"}}]})", + R"({"$nor":[{"REDACT_a":{"$lt":"?number"}},{"REDACT_b":{"$gt":"?number"}}]})", "{ $nor: [ { a: {$lt: 5} }, { b: {$gt: 4} } ] }"); } TEST(QueryPredicateShape, NotMatchExpression) { ASSERT_REDACTED_SHAPE_EQ_AUTO( // NOLINT - R"({"REDACT_price":{"$not":{"$gt":"?"}}})", + R"({"REDACT_price":{"$not":{"$gt":"?number"}}})", "{ price: { $not: { $gt: 1.99 } } }"); // Test the special case where NotMatchExpression::serialize() reduces to $alwaysFalse. auto emptyAnd = std::make_unique(); const MatchExpression& notExpr = NotMatchExpression(std::move(emptyAnd)); auto serialized = notExpr.serialize(literalAndFieldRedactOpts); ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({"$alwaysFalse":"?"})", + R"({"$alwaysFalse":"?number"})", serialized); } TEST(QueryPredicateShape, SizeMatchExpression) { ASSERT_REDACTED_SHAPE_EQ_AUTO( // NOLINT - R"({"REDACT_price":{"$size":"?"}})", + R"({"REDACT_price":{"$size":"?number"}})", "{ price: { $size: 2 } }"); } @@ -279,10 +405,10 @@ TEST(QueryPredicateShape, TextMatchExpression) { ASSERT_BSONOBJ_EQ_AUTO( // NOLINT R"({ "$text": { - "$search": "?", - "$language": "?", - "$caseSensitive": "?", - "$diacriticSensitive": "?" + "$search": "?string", + "$language": "?string", + "$caseSensitive": "?bool", + "$diacriticSensitive": "?bool" } })", expr->serialize(literalAndFieldRedactOpts)); @@ -297,7 +423,7 @@ TEST(QueryPredicateShape, TwoDPtInAnnulusExpression) { TEST(QueryPredicateShape, WhereMatchExpression) { ASSERT_SHAPE_EQ_AUTO( // NOLINT - R"({"$where":"?"})", + R"({"$where":"?javascript"})", "{$where: \"some_code()\"}"); } @@ -308,7 +434,7 @@ BSONObj queryShapeForOptimizedExprExpression(std::string exprPredicateJson) { // the computation on any MatchExpression, and this is the easiest way we can create this type // of MatchExpression node. auto optimized = MatchExpression::optimize(expr.release()); - return query_shape::predicateShape(optimized.get()); + return query_shape::debugPredicateShape(optimized.get()); } TEST(QueryPredicateShape, OptimizedExprPredicates) { @@ -317,16 +443,14 @@ TEST(QueryPredicateShape, OptimizedExprPredicates) { "$and": [ { "a": { - "$_internalExprEq": "?" + "$_internalExprEq": "?number" } }, { "$expr": { "$eq": [ "$a", - { - "$const": "?" - } + "?number" ] } } @@ -339,16 +463,14 @@ TEST(QueryPredicateShape, OptimizedExprPredicates) { "$and": [ { "a": { - "$_internalExprLt": "?" + "$_internalExprLt": "?number" } }, { "$expr": { "$lt": [ "$a", - { - "$const": "?" - } + "?number" ] } } @@ -361,16 +483,14 @@ TEST(QueryPredicateShape, OptimizedExprPredicates) { "$and": [ { "a": { - "$_internalExprLte": "?" + "$_internalExprLte": "?number" } }, { "$expr": { "$lte": [ "$a", - { - "$const": "?" - } + "?number" ] } } @@ -383,16 +503,14 @@ TEST(QueryPredicateShape, OptimizedExprPredicates) { "$and": [ { "a": { - "$_internalExprGt": "?" + "$_internalExprGt": "?number" } }, { "$expr": { "$gt": [ "$a", - { - "$const": "?" - } + "?number" ] } } @@ -405,16 +523,14 @@ TEST(QueryPredicateShape, OptimizedExprPredicates) { "$and": [ { "a": { - "$_internalExprGte": "?" + "$_internalExprGte": "?number" } }, { "$expr": { "$gte": [ "$a", - { - "$const": "?" - } + "?number" ] } } @@ -423,4 +539,221 @@ TEST(QueryPredicateShape, OptimizedExprPredicates) { queryShapeForOptimizedExprExpression("{$expr: {$gte: ['$a', 2]}}")); } +TEST(SortPatternShape, NormalSortPattern) { + boost::intrusive_ptr expCtx; + expCtx = make_intrusive(); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"a.b.c":1,"foo":-1})", + query_shape::extractSortShape(fromjson(R"({"a.b.c": 1, "foo": -1})"), + expCtx, + SerializationOptions::kDebugQueryShapeSerializeOptions)); +} + +TEST(SortPatternShape, NaturalSortPattern) { + boost::intrusive_ptr expCtx; + expCtx = make_intrusive(); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({$natural: 1})", + query_shape::extractSortShape(fromjson(R"({$natural: 1})"), + expCtx, + SerializationOptions::kDebugQueryShapeSerializeOptions)); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({$natural: -1})", + query_shape::extractSortShape(fromjson(R"({$natural: -1})"), + expCtx, + SerializationOptions::kDebugQueryShapeSerializeOptions)); +} + +TEST(SortPatternShape, NaturalSortPatternWithMeta) { + boost::intrusive_ptr expCtx; + expCtx = make_intrusive(); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({$natural: 1, x: '?object'})", + query_shape::extractSortShape(fromjson(R"({$natural: 1, x: {$meta: "textScore"}})"), + expCtx, + SerializationOptions::kDebugQueryShapeSerializeOptions)); +} + +TEST(SortPatternShape, MetaPatternWithoutNatural) { + boost::intrusive_ptr expCtx; + expCtx = make_intrusive(); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"normal":1,"$computed1":{"$meta":"textScore"}})", + query_shape::extractSortShape(fromjson(R"({normal: 1, x: {$meta: "textScore"}})"), + expCtx, + SerializationOptions::kDebugQueryShapeSerializeOptions)); +} + +// Here we have one test to ensure that the redaction policy is accepted and applied in the +// query_shape utility, but there are more extensive redaction tests in sort_pattern_test.cpp +TEST(SortPatternShape, RespectsRedactionPolicy) { + boost::intrusive_ptr expCtx; + expCtx = make_intrusive(); + SerializationOptions opts = SerializationOptions::kDebugQueryShapeSerializeOptions; + opts.transformIdentifiers = true; + opts.transformIdentifiersCallback = applyHmacForTest; + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"REDACT_normal":1,"REDACT_y":1})", + query_shape::extractSortShape(fromjson(R"({normal: 1, y: 1})"), expCtx, opts)); + + // No need to redact $natural. + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({"$natural":1,"REDACT_y":1})", + query_shape::extractSortShape(fromjson(R"({$natural: 1, y: 1})"), expCtx, opts)); +} + +TEST(QueryShapeIDL, ShapifyIDLStruct) { + SerializationOptions options; + options.transformIdentifiers = true; + options.transformIdentifiersCallback = [](StringData s) -> std::string { + return str::stream() << "HASH<" << s << ">"; + }; + options.literalPolicy = LiteralSerializationPolicy::kToDebugTypeString; + + auto nested = NestedStruct("value", + ExampleEnumEnum::Value1, + 1337, + "hello", + {1, 2, 3, 4}, + "field.path", + {"field.path.1", "fieldpath2"}, + NamespaceString::createNamespaceString_forTest("db", "coll"), + NamespaceString::createNamespaceString_forTest("db", "coll"), + 177, + true); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "stringField": "value", + "enumField": "EnumValue1", + "stringIntVariant": 1337, + "stringIntVariantEnum": "hello", + "arrayOfInts": [ + 1, + 2, + 3, + 4 + ], + "fieldpath": "field.path", + "fieldpathList": [ + "field.path.1", + "fieldpath2" + ], + "nss": "db.coll", + "plainNss": "db.coll", + "safeInt64Field": 177, + "boolField": true + })", + nested.toBSON()); + + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "stringField": "?string", + "enumField": "EnumValue1", + "stringIntVariant": "?number", + "stringIntVariantEnum": "hello", + "arrayOfInts": "?array", + "fieldpath": "HASH.HASH", + "fieldpathList": [ + "HASH.HASH.HASH<1>", + "HASH" + ], + "nss": "HASH", + "plainNss": "db.coll", + "safeInt64Field": "?number", + "boolField": "?bool" + })", + nested.toBSON(options)); + + + auto parent = ParentStruct(nested, nested); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "nested_shape": { + "stringField": "value", + "enumField": "EnumValue1", + "stringIntVariant": 1337, + "stringIntVariantEnum": "hello", + "arrayOfInts": [ + 1, + 2, + 3, + 4 + ], + "fieldpath": "field.path", + "fieldpathList": [ + "field.path.1", + "fieldpath2" + ], + "nss": "db.coll", + "plainNss": "db.coll", + "safeInt64Field": 177, + "boolField": true + }, + "nested_no_shape": { + "stringField": "value", + "enumField": "EnumValue1", + "stringIntVariant": 1337, + "stringIntVariantEnum": "hello", + "arrayOfInts": [ + 1, + 2, + 3, + 4 + ], + "fieldpath": "field.path", + "fieldpathList": [ + "field.path.1", + "fieldpath2" + ], + "nss": "db.coll", + "plainNss": "db.coll", + "safeInt64Field": 177, + "boolField": true + } + })", + parent.toBSON()); + + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "nested_shape": { + "stringField": "?string", + "enumField": "EnumValue1", + "stringIntVariant": "?number", + "stringIntVariantEnum": "hello", + "arrayOfInts": "?array", + "fieldpath": "HASH.HASH", + "fieldpathList": [ + "HASH.HASH.HASH<1>", + "HASH" + ], + "nss": "HASH", + "plainNss": "db.coll", + "safeInt64Field": "?number", + "boolField": "?bool" + }, + "nested_no_shape": { + "stringField": "value", + "enumField": "EnumValue1", + "stringIntVariant": 1337, + "stringIntVariantEnum": "hello", + "arrayOfInts": [ + 1, + 2, + 3, + 4 + ], + "fieldpath": "field.path", + "fieldpathList": [ + "field.path.1", + "fieldpath2" + ], + "nss": "db.coll", + "plainNss": "db.coll", + "safeInt64Field": 177, + "boolField": true + } + })", + parent.toBSON(options)); +} + } // namespace mongo diff --git a/src/mongo/db/query/query_shape_test.idl b/src/mongo/db/query/query_shape_test.idl new file mode 100644 index 0000000000000..4123943446dcb --- /dev/null +++ b/src/mongo/db/query/query_shape_test.idl @@ -0,0 +1,95 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +global: + cpp_namespace: "mongo" + +imports: + - "mongo/db/basic_types.idl" + +enums: + ExampleEnum: + description: "" + type: string + values: + Value1: "EnumValue1" + Value2: "EnumValue2" + +structs: + NestedStruct: + query_shape_component: true + strict: true + description: "" + fields: + stringField: + query_shape: literal + type: string + enumField: + query_shape: parameter + type: ExampleEnum + stringIntVariant: + query_shape: literal + type: + variant: [string, int] + stringIntVariantEnum: + query_shape: parameter + type: + variant: [string, int] + arrayOfInts: + query_shape: literal + type: array + fieldpath: + query_shape: anonymize + type: string + fieldpathList: + query_shape: anonymize + type: array + nss: + query_shape: custom + type: namespacestring + plainNss: + query_shape: parameter + type: namespacestring + safeInt64Field: + query_shape: literal + type: safeInt64 + boolField: + query_shape: literal + type: bool + + ParentStruct: + query_shape_component: true + strict: true + description: "" + fields: + nested_shape: + query_shape: literal + type: NestedStruct + nested_no_shape: + query_shape: parameter + type: NestedStruct diff --git a/src/mongo/db/query/query_solution.cpp b/src/mongo/db/query/query_solution.cpp index b2c287843bd8c..0c219d71f566b 100644 --- a/src/mongo/db/query/query_solution.cpp +++ b/src/mongo/db/query/query_solution.cpp @@ -27,31 +27,53 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include +#include +#include #include -#include "mongo/db/query/query_solution.h" - -#include -#include +#include +#include +#include +#include +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" -#include "mongo/bson/mutable/document.h" #include "mongo/bson/simple_bsonelement_comparator.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/field_ref.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index_names.h" #include "mongo/db/keypattern.h" #include "mongo/db/matcher/expression_algo.h" #include "mongo/db/matcher/expression_geo.h" #include "mongo/db/query/collation/collation_index_key.h" #include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/interval.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/planner_analysis.h" #include "mongo/db/query/planner_wildcard_helpers.h" +#include "mongo/db/query/projection_ast.h" #include "mongo/db/query/projection_ast_util.h" +#include "mongo/db/query/query_feature_flags_gen.h" #include "mongo/db/query/query_planner_common.h" -#include "mongo/db/query/util/set_util.h" +#include "mongo/db/query/query_solution.h" namespace mongo { @@ -176,6 +198,20 @@ bool QuerySolutionNode::hasNode(StageType type) const { return false; } +bool QuerySolutionNode::isEligibleForPlanCache() const { + if (!eligibleForPlanCache) { + return false; + } + + for (auto&& child : children) { + if (!child->isEligibleForPlanCache()) { + return false; + } + } + + return true; +} + std::string QuerySolution::summaryString() const { tassert(5968205, "QuerySolutionNode cannot be null in this QuerySolution", _root); @@ -324,6 +360,11 @@ CollectionScanNode::CollectionScanNode() void CollectionScanNode::computeProperties() { if (clusteredIndex && hasCompatibleCollation) { auto sort = clustered_util::getSortPattern(*clusteredIndex); + if (direction == -1) { + // If we are scanning the collection in the descending direction, we provide the reverse + // sort order. + sort = QueryPlannerCommon::reverseSortObj(sort); + } sortSet = ProvidedSortSet(sort); } } @@ -332,7 +373,7 @@ void CollectionScanNode::appendToString(str::stream* ss, int indent) const { addIndent(ss, indent); *ss << "COLLSCAN\n"; addIndent(ss, indent + 1); - *ss << "ns = " << name << '\n'; + *ss << "ns = " << toStringForLogging(nss) << '\n'; if (nullptr != filter) { addIndent(ss, indent + 1); *ss << "filter = " << filter->debugString(); @@ -344,9 +385,11 @@ std::unique_ptr CollectionScanNode::clone() const { auto copy = std::make_unique(); cloneBaseData(copy.get()); - copy->name = this->name; + copy->nss = this->nss; copy->tailable = this->tailable; copy->direction = this->direction; + copy->isClustered = this->isClustered; + copy->isOplog = this->isOplog; copy->shouldTrackLatestOplogTimestamp = this->shouldTrackLatestOplogTimestamp; copy->assertTsHasNotFallenOff = this->assertTsHasNotFallenOff; copy->shouldWaitForOplogVisibility = this->shouldWaitForOplogVisibility; @@ -659,6 +702,14 @@ void IndexScanNode::appendToString(str::stream* ss, int indent) const { addCommon(ss, indent); } +bool IndexScanNode::hasStringBounds(const string& field) const { + std::set collatedFields = getFieldsWithStringBounds(bounds, index.keyPattern); + if (collatedFields.find(field) != collatedFields.end()) { + return true; + } + return false; +} + FieldAvailability IndexScanNode::getFieldAvailability(const string& field) const { // A $** index whose bounds overlap the object type bracket cannot provide covering, since the // index only contains the leaf nodes along each of the object's subpaths. @@ -685,11 +736,18 @@ FieldAvailability IndexScanNode::getFieldAvailability(const string& field) const return FieldAvailability::kNotProvided; } + // If the index and the query collator are the same and the field is in the index we can use it + // for sorting and search. + if (index.collator && CollatorInterface::collatorsMatch(index.collator, this->queryCollator)) { + if (hasStringBounds(field)) { + return FieldAvailability::kCollatedProvided; + } + } + // If the index has a non-simple collation and we have collation keys inside 'field', then this // index scan does not provide that field (and the query cannot be covered). if (index.collator) { - std::set collatedFields = getFieldsWithStringBounds(bounds, index.keyPattern); - if (collatedFields.find(field) != collatedFields.end()) { + if (hasStringBounds(field)) { return FieldAvailability::kNotProvided; } } @@ -986,12 +1044,17 @@ ProvidedSortSet computeSortsForScan(const IndexEntry& index, "The bounds did not have as many fields as the key pattern.", static_cast(index.keyPattern.nFields()) == bounds.fields.size()); - // No sorts are provided if the bounds for '$_path' consist of multiple intervals. This can - // happen for existence queries. For example, {a: {$exists: true}} results in bounds - // [["a","a"], ["a.", "a/")] for '$_path' so that keys from documents where "a" is a nested - // object are in bounds. - if (bounds.fields[index.wildcardFieldPos - 1].intervals.size() != 1u) { - return {}; + // TODO SERVER-68303: Merge this check with the same check below for CWI. + // + // No sorts are provided if this wildcard index has one single field and the bounds for + // '$_path' consist of multiple intervals. This can happen for existence queries. For + // example, {a: {$exists: true}} results in bounds + // [["a","a"], ["a.", "a/")] for '$_path' so that keys from documents where "a" is a + // nested object are in bounds. + if (bounds.fields.size() == 2u) { + if (bounds.fields[index.wildcardFieldPos - 1].intervals.size() != 1u) { + return {}; + } } BSONObjBuilder sortPatternStripped; @@ -1000,13 +1063,24 @@ ProvidedSortSet computeSortsForScan(const IndexEntry& index, // (Ignore FCV check): This is intentional because we want clusters which have wildcard // indexes still be able to use the feature even if the FCV is downgraded. if (feature_flags::gFeatureFlagCompoundWildcardIndexes.isEnabledAndIgnoreFCVUnsafe()) { - bool hasPathField = false; for (auto elem : sortPatternProvidedByIndex) { if (elem.fieldNameStringData() == "$_path"_sd) { - if (hasPathField) { + tassert(7767200, + "The bounds cannot be empty.", + bounds.fields[index.wildcardFieldPos - 1].intervals.size() > 0u); + + auto allValuePath = wcp::makeAllValuesForPath(); + // No sorts on the following fields should be provided if it's full scan on the + // '$_path' field or the bounds for '$_path' consist of multiple intervals. This + // can happen for existence queries. For example, {a: {$exists: true}} results + // in bounds [["a","a"], ["a.", "a/")] for '$_path' so that keys from documents + // where "a" is a nested object are in bounds. + if (bounds.fields[index.wildcardFieldPos - 1].intervals.size() != 1u || + std::equal(bounds.fields[index.wildcardFieldPos - 1].intervals.begin(), + bounds.fields[index.wildcardFieldPos - 1].intervals.end(), + allValuePath.begin())) { break; } - hasPathField = true; } else { sortPatternStripped.append(elem); } @@ -1695,7 +1769,7 @@ void EqLookupNode::appendToString(str::stream* ss, int indent) const { addIndent(ss, indent); *ss << "EQ_LOOKUP\n"; addIndent(ss, indent + 1); - *ss << "from = " << foreignCollection << "\n"; + *ss << "from = " << toStringForLogging(foreignCollection) << "\n"; addIndent(ss, indent + 1); *ss << "as = " << joinField.fullPath() << "\n"; addIndent(ss, indent + 1); @@ -1738,4 +1812,15 @@ void SentinelNode::appendToString(str::stream* ss, int indent) const { addIndent(ss, indent); *ss << "SENTINEL\n"; } + +std::unique_ptr SearchNode::clone() const { + return std::make_unique(isSearchMeta); +} + +void SearchNode::appendToString(str::stream* ss, int indent) const { + addIndent(ss, indent); + *ss << "SEARCH\n"; + addIndent(ss, indent + 1); + *ss << "isSearchMeta = " << isSearchMeta << '\n'; +} } // namespace mongo diff --git a/src/mongo/db/query/query_solution.h b/src/mongo/db/query/query_solution.h index 8beba56a658ac..e3921f49bad22 100644 --- a/src/mongo/db/query/query_solution.h +++ b/src/mongo/db/query/query_solution.h @@ -29,23 +29,55 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include +#include +#include +#include +#include #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator_interface.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/fts/fts_query.h" #include "mongo/db/jsobj.h" #include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/classic_plan_cache.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/index_entry.h" #include "mongo/db/query/interval_evaluation_tree.h" #include "mongo/db/query/plan_enumerator_explain_info.h" +#include "mongo/db/query/projection.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/record_id_bound.h" #include "mongo/db/query/stage_types.h" +#include "mongo/db/record_id.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" #include "mongo/util/id_generator.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -63,6 +95,10 @@ enum class FieldAvailability { // can happen when the field is a hashed field in an index. kHashedValueProvided, + // The field is available as ICU encoded string and can be used to do sorting but it does not + // provide the actual value. + kCollatedProvided, + // The field is completely provided. kFullyProvided, }; @@ -148,7 +184,7 @@ struct QuerySolutionNode { std::string toString() const; /** - * What stage should this be transcribed to? See stage_types.h. + * What stage should this node be transcribed to? See stage_types.h. */ virtual StageType getType() const = 0; @@ -254,6 +290,11 @@ struct QuerySolutionNode { */ bool hasNode(StageType type) const; + /** + * True, if this node, and all of it's children are eligible to be cached. + */ + bool isEligibleForPlanCache() const; + /** * Returns the id associated with this node. Each node in a 'QuerySolution' tree is assigned a * unique identifier, which are assigned as sequential positive integers starting from 1. An id @@ -297,6 +338,8 @@ struct QuerySolutionNode { } } + bool eligibleForPlanCache = true; + private: // Allows the QuerySolution constructor to set '_nodeId'. friend class QuerySolution; @@ -345,6 +388,13 @@ class QuerySolution { return _root && _root->hasNode(type); } + /** + * Return true if all nodes in the solution tree can be cached. + */ + bool isEligibleForPlanCache() const { + return !_root || _root->isEligibleForPlanCache(); + } + /** * Output a human-readable std::string representing the plan. */ @@ -453,10 +503,24 @@ struct CollectionScanNode : public QuerySolutionNodeWithSortSet { return false; } + // Tells whether this scan will be performed as a clustered collection scan in SBE. + bool doSbeClusteredCollectionScan() const { + return (isClustered && !isOplog && (minRecord || maxRecord || resumeAfterRecordId)); + } + + // Tells whether this scan will be performed as a clustered collection scan in classic. + bool doClusteredCollectionScanClassic() const { + return (isClustered && !isOplog && (minRecord || maxRecord)); + } + + void markNotEligibleForPlanCache() { + eligibleForPlanCache = false; + } + std::unique_ptr clone() const final; // Name of the namespace. - std::string name; + NamespaceString nss; // If present, this parameter sets the start point of a forward scan or the end point of a // reverse scan. @@ -492,8 +556,15 @@ struct CollectionScanNode : public QuerySolutionNodeWithSortSet { // Assert that the specified timestamp has not fallen off the oplog or change collection. boost::optional assertTsHasNotFallenOff = boost::none; + // Scan direction: 1 means forward; -1 means reverse. int direction{1}; + // Tells whether the collection is clustered (which includes oplog collections). + bool isClustered = false; + + // Tells whether the collection is an oplog. + bool isOplog = false; + // By default, includes the minRecord and maxRecord when present. CollectionScanParams::ScanBoundInclusion boundInclusion = CollectionScanParams::ScanBoundInclusion::kIncludeBothStartAndEndRecords; @@ -502,6 +573,7 @@ struct CollectionScanNode : public QuerySolutionNodeWithSortSet { bool shouldWaitForOplogVisibility = false; // Once the first matching document is found, assume that all documents after it must match. + // This is useful for oplog queries where we know we will see records ordered by the ts field. bool stopApplyingFilterAfterFirstMatch = false; // Whether the collection scan should have low storage admission priority. @@ -776,6 +848,11 @@ struct IndexScanNode : public QuerySolutionNodeWithSortSet { bool fetched() const { return false; } + /** + * This function checks if the given field has string bounds. This is needed to check if we need + * to do some special handling in the case of collations. + */ + bool hasStringBounds(const std::string& field) const; FieldAvailability getFieldAvailability(const std::string& field) const; bool sortedByDiskLoc() const; @@ -823,7 +900,7 @@ struct ReturnKeyNode : public QuerySolutionNode { std::vector sortKeyMetaFields) : QuerySolutionNode(std::move(child)), sortKeyMetaFields(std::move(sortKeyMetaFields)) {} - StageType getType() const final { + virtual StageType getType() const { return STAGE_RETURN_KEY; } @@ -913,7 +990,7 @@ struct ProjectionNode : public QuerySolutionNodeWithSortSet { struct ProjectionNodeDefault final : ProjectionNode { using ProjectionNode::ProjectionNode; - StageType getType() const final { + virtual StageType getType() const { return STAGE_PROJECTION_DEFAULT; } @@ -935,7 +1012,7 @@ struct ProjectionNodeCovered final : ProjectionNode { : ProjectionNode(std::move(child), fullExpression, std::move(proj)), coveredKeyObj(std::move(coveredKeyObj)) {} - StageType getType() const final { + virtual StageType getType() const { return STAGE_PROJECTION_COVERED; } @@ -956,7 +1033,7 @@ struct ProjectionNodeCovered final : ProjectionNode { struct ProjectionNodeSimple final : ProjectionNode { using ProjectionNode::ProjectionNode; - StageType getType() const final { + virtual StageType getType() const { return STAGE_PROJECTION_SIMPLE; } @@ -968,7 +1045,7 @@ struct ProjectionNodeSimple final : ProjectionNode { }; struct SortKeyGeneratorNode : public QuerySolutionNode { - StageType getType() const final { + virtual StageType getType() const { return STAGE_SORT_KEY_GENERATOR; } @@ -1043,7 +1120,7 @@ struct SortNode : public QuerySolutionNodeWithSortSet { * Represents sort algorithm that can handle any kind of input data. */ struct SortNodeDefault final : public SortNode { - virtual StageType getType() const override { + virtual StageType getType() const { return STAGE_SORT_DEFAULT; } @@ -1303,6 +1380,11 @@ struct CountScanNode : public QuerySolutionNodeWithSortSet { BSONObj endKey; bool endKeyInclusive; + + /** + * A vector of Interval Evaluation Trees (IETs) with the same ordering as the index key pattern. + */ + std::vector iets; }; struct EofNode : public QuerySolutionNodeWithSortSet { @@ -1332,7 +1414,7 @@ struct EofNode : public QuerySolutionNodeWithSortSet { struct TextOrNode : public OrNode { TextOrNode() {} - StageType getType() const override { + virtual StageType getType() const { return STAGE_TEXT_OR; } @@ -1344,7 +1426,7 @@ struct TextMatchNode : public QuerySolutionNodeWithSortSet { TextMatchNode(IndexEntry index, std::unique_ptr ftsQuery, bool wantTextScore) : index(std::move(index)), ftsQuery(std::move(ftsQuery)), wantTextScore(wantTextScore) {} - StageType getType() const override { + virtual StageType getType() const { return STAGE_TEXT_MATCH; } @@ -1401,12 +1483,12 @@ struct GroupNode : public QuerySolutionNode { expression::addDependencies(acc.expr.argument.get(), &deps); } - requiredFields = deps.fields; + requiredFields = std::move(deps.fields); needWholeDocument = deps.needWholeDocument; needsAnyMetadata = deps.getNeedsAnyMetadata(); } - StageType getType() const override { + virtual StageType getType() const { return STAGE_GROUP; } @@ -1505,7 +1587,7 @@ struct EqLookupNode : public QuerySolutionNode { idxEntry(std::move(idxEntry)), shouldProduceBson(shouldProduceBson) {} - StageType getType() const override { + virtual StageType getType() const { return STAGE_EQ_LOOKUP; } @@ -1578,10 +1660,9 @@ struct EqLookupNode : public QuerySolutionNode { }; struct SentinelNode : public QuerySolutionNode { - SentinelNode() {} - StageType getType() const override { + virtual StageType getType() const { return STAGE_SENTINEL; } @@ -1604,4 +1685,37 @@ struct SentinelNode : public QuerySolutionNode { std::unique_ptr clone() const final; }; + +struct SearchNode : public QuerySolutionNode { + explicit SearchNode(bool isSearchMeta) : isSearchMeta(isSearchMeta) {} + + StageType getType() const override { + return STAGE_SEARCH; + } + + void appendToString(str::stream* ss, int indent) const override; + + bool fetched() const { + return true; + } + + FieldAvailability getFieldAvailability(const std::string& field) const { + return FieldAvailability::kFullyProvided; + } + + bool sortedByDiskLoc() const override { + return false; + } + + const ProvidedSortSet& providedSorts() const final { + return kEmptySet; + } + + std::unique_ptr clone() const final; + + /** + * True for $searchMeta, False for $search query. + */ + bool isSearchMeta; +}; } // namespace mongo diff --git a/src/mongo/db/query/query_solution_test.cpp b/src/mongo/db/query/query_solution_test.cpp index 69e1d5614e65a..2a528f1ce7aaa 100644 --- a/src/mongo/db/query/query_solution_test.cpp +++ b/src/mongo/db/query/query_solution_test.cpp @@ -27,21 +27,37 @@ * it in the license file. */ -#include - -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include #include +#include +#include -#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/query/index_bounds_builder.h" #include "mongo/db/query/index_entry.h" +#include "mongo/db/query/interval.h" #include "mongo/db/query/projection_parser.h" +#include "mongo/db/query/projection_policies.h" #include "mongo/db/query/query_solution.h" #include "mongo/db/query/query_test_service_context.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { @@ -54,6 +70,8 @@ std::ostream& operator<<(std::ostream& os, FieldAvailability value) { return os << "NotProvided"; case FieldAvailability::kHashedValueProvided: return os << "HashedValueProvided"; + case FieldAvailability::kCollatedProvided: + return os << "CollatedValueProvided"; case FieldAvailability::kFullyProvided: return os << "FullyProvided"; } @@ -108,15 +126,32 @@ IndexEntry buildSimpleIndexEntry(const BSONObj& kp) { nullptr}; } +/** + * Make a minimal IndexEntry from just a key pattern and a collation. A dummy name will be added. + */ +IndexEntry buildSimpleIndexEntry(const BSONObj& kp, CollatorInterface* ci) { + return {kp, + IndexNames::nameToType(IndexNames::findPluginName(kp)), + IndexDescriptor::kLatestIndexVersion, + false, + {}, + {}, + false, + false, + CoreIndexInfo::Identifier("test_foo"), + nullptr, + {}, + ci, + nullptr}; +} + void assertNamespaceVectorsAreEqual(const std::vector& secondaryNssVector, const std::vector& expectedNssVector) { ASSERT_EQ(secondaryNssVector.size(), expectedNssVector.size()); for (size_t i = 0; i < secondaryNssVector.size(); ++i) { - auto secondary = secondaryNssVector[i].nss(); - auto expected = expectedNssVector[i].nss(); - ASSERT(secondary != boost::none); - ASSERT(expected != boost::none); - ASSERT_EQ(*secondary, *expected); + ASSERT(secondaryNssVector[i].isNamespaceString()); + ASSERT(expectedNssVector[i].isNamespaceString()); + ASSERT_EQ(secondaryNssVector[i].nss(), expectedNssVector[i].nss()); } } @@ -531,6 +566,27 @@ TEST(QuerySolutionTest, GetFieldsWithStringBoundsIdentifiesStringsWithInclusiveB ASSERT_TRUE(fields.count("a")); } +TEST(QuerySolutionTest, IndexScanWithCollatedValues) { + IndexBounds bounds; + CollatorInterfaceMock queryCollator(CollatorInterfaceMock::MockType::kReverseString); + + BSONObj keyPattern = BSON("a" << 1); + bounds.isSimpleRange = true; + bounds.startKey = fromjson("{'a': 1}"); + bounds.endKey = fromjson("{'a': ''}"); + bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; + + IndexScanNode node{buildSimpleIndexEntry(BSON("a" << 1))}; + node.bounds = bounds; + node.index.collator = &queryCollator; + node.queryCollator = &queryCollator; + node.computeProperties(); + + ASSERT_TRUE(node.hasStringBounds("a")); + ASSERT_EQ(node.getFieldAvailability("a"), FieldAvailability::kCollatedProvided); + ASSERT_EQ(node.getFieldAvailability("any_field"), FieldAvailability::kNotProvided); +} + TEST(QuerySolutionTest, IndexScanNodeRemovesNonMatchingCollatedFieldsFromSortsOnSimpleBounds) { IndexScanNode node{buildSimpleIndexEntry(BSON("a" << 1 << "b" << 1))}; CollatorInterfaceMock queryCollator(CollatorInterfaceMock::MockType::kReverseString); @@ -1235,12 +1291,16 @@ TEST(QuerySolutionTest, FieldAvailabilityOutputStreamOperator) { ASSERT_EQ(ex1.str(), "NotProvided"); std::stringstream ex2; - ex2 << FieldAvailability::kFullyProvided; - ASSERT_EQ(ex2.str(), "FullyProvided"); + ex2 << FieldAvailability::kHashedValueProvided; + ASSERT_EQ(ex2.str(), "HashedValueProvided"); std::stringstream ex3; - ex3 << FieldAvailability::kHashedValueProvided; - ASSERT_EQ(ex3.str(), "HashedValueProvided"); + ex3 << FieldAvailability::kCollatedProvided; + ASSERT_EQ(ex3.str(), "CollatedValueProvided"); + + std::stringstream ex4; + ex4 << FieldAvailability::kFullyProvided; + ASSERT_EQ(ex4.str(), "FullyProvided"); } TEST(QuerySolutionTest, GetSecondaryNamespaceVectorOverSingleEqLookupNode) { diff --git a/src/mongo/db/query/query_stats.cpp b/src/mongo/db/query/query_stats.cpp new file mode 100644 index 0000000000000..0e2b02a73ad36 --- /dev/null +++ b/src/mongo/db/query/query_stats.cpp @@ -0,0 +1,359 @@ +/** + * Copyright (C) 2022-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/query/query_stats.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/crypto/hash_block.h" +#include "mongo/crypto/sha256_block.h" +#include "mongo/db/catalog/util/partitioned.h" +#include "mongo/db/curop.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/lru_key_value.h" +#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/query_stats_util.h" +#include "mongo/db/query/rate_limiting.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/query/util/memory_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/processinfo.h" +#include "mongo/util/synchronized_value.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery + +namespace mongo { + +namespace query_stats { + +CounterMetric queryStatsStoreSizeEstimateBytesMetric("queryStats.queryStatsStoreSizeEstimateBytes"); + +namespace { + +CounterMetric queryStatsEvictedMetric("queryStats.numEvicted"); +CounterMetric queryStatsRateLimitedRequestsMetric("queryStats.numRateLimitedRequests"); +CounterMetric queryStatsStoreWriteErrorsMetric("queryStats.numQueryStatsStoreWriteErrors"); + +/** + * Cap the queryStats store size. + */ +size_t capQueryStatsStoreSize(size_t requestedSize) { + size_t cappedStoreSize = memory_util::capMemorySize( + requestedSize /*requestedSizeBytes*/, 1 /*maximumSizeGB*/, 25 /*percentTotalSystemMemory*/); + // If capped size is less than requested size, the queryStats store has been capped at its + // upper limit. + if (cappedStoreSize < requestedSize) { + LOGV2_DEBUG(7106502, + 1, + "The queryStats store size has been capped", + "cappedSize"_attr = cappedStoreSize); + } + return cappedStoreSize; +} + +/** + * Get the queryStats store size based on the query job's value. + */ +size_t getQueryStatsStoreSize() { + auto status = memory_util::MemorySize::parse(internalQueryStatsCacheSize.get()); + uassertStatusOK(status); + size_t requestedSize = memory_util::convertToSizeInBytes(status.getValue()); + return capQueryStatsStoreSize(requestedSize); +} + +/** + * A manager for the queryStats store allows a "pointer swap" on the queryStats store itself. The + * usage patterns are as follows: + * + * - Updating the queryStats store uses the `getQueryStatsStore()` method. The queryStats store + * instance is obtained, entries are looked up and mutated, or created anew. + * - The queryStats store is "reset". This involves atomically allocating a new instance, once + * there are no more updaters (readers of the store "pointer"), and returning the existing + * instance. + */ +class QueryStatsStoreManager { +public: + template + QueryStatsStoreManager(size_t cacheSize, size_t numPartitions) + : _queryStatsStore(std::make_unique(cacheSize, numPartitions)), + _maxSize(cacheSize) {} + + /** + * Acquire the instance of the queryStats store. + */ + QueryStatsStore& getQueryStatsStore() { + return *_queryStatsStore; + } + + size_t getMaxSize() { + return _maxSize; + } + + /** + * Resize the queryStats store and return the number of evicted + * entries. + */ + size_t resetSize(size_t cacheSize) { + _maxSize = cacheSize; + return _queryStatsStore->reset(cacheSize); + } + +private: + std::unique_ptr _queryStatsStore; + + /** + * Max size of the queryStats store. Tracked here to avoid having to recompute after it's + * divided up into partitions. + */ + size_t _maxSize; +}; + +const auto queryStatsStoreDecoration = + ServiceContext::declareDecoration>(); + +const auto queryStatsRateLimiter = + ServiceContext::declareDecoration>(); + +class TelemetryOnParamChangeUpdaterImpl final : public query_stats_util::OnParamChangeUpdater { +public: + void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) final { + auto requestedSize = memory_util::convertToSizeInBytes(memSize); + auto cappedSize = capQueryStatsStoreSize(requestedSize); + auto& queryStatsStoreManager = queryStatsStoreDecoration(serviceCtx); + size_t numEvicted = queryStatsStoreManager->resetSize(cappedSize); + queryStatsEvictedMetric.increment(numEvicted); + } + + void updateSamplingRate(ServiceContext* serviceCtx, int samplingRate) { + queryStatsRateLimiter(serviceCtx).get()->setSamplingRate(samplingRate); + } +}; + +ServiceContext::ConstructorActionRegisterer queryStatsStoreManagerRegisterer{ + "QueryStatsStoreManagerRegisterer", [](ServiceContext* serviceCtx) { + // It is possible that this is called before FCV is properly set up. Setting up the store if + // the flag is enabled but FCV is incorrect is safe, and guards against the FCV being + // changed to a supported version later. + if (!feature_flags::gFeatureFlagQueryStats.isEnabledAndIgnoreFCVUnsafeAtStartup()) { + // featureFlags are not allowed to be changed at runtime. Therefore it's not an issue + // to not create a queryStats store in ConstructorActionRegisterer at start up with the + // flag off - because the flag can not be turned on at any point afterwards. + query_stats_util::queryStatsStoreOnParamChangeUpdater(serviceCtx) = + std::make_unique(); + return; + } + + query_stats_util::queryStatsStoreOnParamChangeUpdater(serviceCtx) = + std::make_unique(); + size_t size = getQueryStatsStoreSize(); + auto&& globalQueryStatsStoreManager = queryStatsStoreDecoration(serviceCtx); + // The plan cache and queryStats store should use the same number of partitions. + // That is, the number of cpu cores. + size_t numPartitions = ProcessInfo::getNumCores(); + size_t partitionBytes = size / numPartitions; + size_t metricsSize = sizeof(QueryStatsEntry); + if (partitionBytes < metricsSize * 10) { + numPartitions = size / metricsSize; + if (numPartitions < 1) { + numPartitions = 1; + } + } + globalQueryStatsStoreManager = + std::make_unique(size, numPartitions); + auto configuredSamplingRate = internalQueryStatsRateLimit.load(); + queryStatsRateLimiter(serviceCtx) = std::make_unique( + configuredSamplingRate < 0 ? INT_MAX : configuredSamplingRate); + }}; + +/** + * Top-level checks for whether queryStats collection is enabled. If this returns false, we must go + * no further. + */ +bool isQueryStatsEnabled(const ServiceContext* serviceCtx) { + // During initialization FCV may not yet be setup but queries could be run. We can't + // check whether queryStats should be enabled without FCV, so default to not recording + // those queries. + // TODO SERVER-75935 Remove FCV Check. + return feature_flags::gFeatureFlagQueryStats.isEnabled( + serverGlobalParams.featureCompatibility) && + queryStatsStoreDecoration(serviceCtx)->getMaxSize() > 0; +} + +/** + * Internal check for whether we should collect metrics. This checks the rate limiting + * configuration for a global on/off decision and, if enabled, delegates to the rate limiter. + */ +bool shouldCollect(const ServiceContext* serviceCtx) { + // Quick escape if queryStats is turned off. + if (!isQueryStatsEnabled(serviceCtx)) { + return false; + } + // Cannot collect queryStats if sampling rate is not greater than 0. Note that we do not + // increment queryStatsRateLimitedRequestsMetric here since queryStats is entirely disabled. + auto samplingRate = queryStatsRateLimiter(serviceCtx)->getSamplingRate(); + if (samplingRate <= 0) { + return false; + } + // Check if rate limiting allows us to collect queryStats for this request. + if (samplingRate < INT_MAX && + !queryStatsRateLimiter(serviceCtx)->handleRequestSlidingWindow()) { + queryStatsRateLimitedRequestsMetric.increment(); + return false; + } + return true; +} + +std::string sha256HmacStringDataHasher(std::string key, const StringData& sd) { + auto hashed = SHA256Block::computeHmac( + (const uint8_t*)key.data(), key.size(), (const uint8_t*)sd.rawData(), sd.size()); + return hashed.toString(); +} + +std::size_t hash(const BSONObj& obj) { + return absl::hash_internal::CityHash64(obj.objdata(), obj.objsize()); +} + +} // namespace + +BSONObj QueryStatsEntry::computeQueryStatsKey(OperationContext* opCtx, + TransformAlgorithmEnum algorithm, + std::string hmacKey) const { + return keyGenerator->generate( + opCtx, + algorithm == TransformAlgorithmEnum::kHmacSha256 + ? boost::optional( + [&](StringData sd) { return sha256HmacStringDataHasher(hmacKey, sd); }) + : boost::none); +} + +void registerRequest(OperationContext* opCtx, + const NamespaceString& collection, + std::function(void)> makeKeyGenerator) { + if (!isQueryStatsEnabled(opCtx->getServiceContext())) { + return; + } + + // Queries against metadata collections should never appear in queryStats data. + if (collection.isFLE2StateCollection()) { + return; + } + + if (!shouldCollect(opCtx->getServiceContext())) { + return; + } + auto& opDebug = CurOp::get(opCtx)->debug(); + + if (opDebug.queryStatsKeyGenerator) { + // A find() request may have already registered the shapifier. Ie, it's a find command over + // a non-physical collection, eg view, which is implemented by generating an agg pipeline. + LOGV2_DEBUG(7198700, + 2, + "Query stats request shapifier already registered", + "collection"_attr = collection); + return; + } + + opDebug.queryStatsKeyGenerator = makeKeyGenerator(); + opDebug.queryStatsStoreKeyHash = opDebug.queryStatsKeyGenerator->hash(); +} + +QueryStatsStore& getQueryStatsStore(OperationContext* opCtx) { + uassert(6579000, + "Telemetry is not enabled without the feature flag on and a cache size greater than 0 " + "bytes", + isQueryStatsEnabled(opCtx->getServiceContext())); + return queryStatsStoreDecoration(opCtx->getServiceContext())->getQueryStatsStore(); +} + +void writeQueryStats(OperationContext* opCtx, + boost::optional queryStatsKeyHash, + std::unique_ptr keyGenerator, + const uint64_t queryExecMicros, + const uint64_t firstResponseExecMicros, + const uint64_t docsReturned) { + if (!queryStatsKeyHash) { + return; + } + auto&& queryStatsStore = getQueryStatsStore(opCtx); + auto&& [statusWithMetrics, partitionLock] = + queryStatsStore.getWithPartitionLock(*queryStatsKeyHash); + std::shared_ptr metrics; + if (statusWithMetrics.isOK()) { + metrics = *statusWithMetrics.getValue(); + } else { + tassert(7315200, + "keyGenerator cannot be null when writing a new entry to the telemetry store", + keyGenerator != nullptr); + size_t numEvicted = + queryStatsStore.put(*queryStatsKeyHash, + std::make_shared(std::move(keyGenerator)), + partitionLock); + queryStatsEvictedMetric.increment(numEvicted); + auto newMetrics = partitionLock->get(*queryStatsKeyHash); + if (!newMetrics.isOK()) { + // This can happen if the budget is immediately exceeded. Specifically if the there is + // not enough room for a single new entry if the number of partitions is too high + // relative to the size. + queryStatsStoreWriteErrorsMetric.increment(); + LOGV2_DEBUG(7560900, + 1, + "Failed to store queryStats entry.", + "status"_attr = newMetrics.getStatus(), + "queryStatsKeyHash"_attr = queryStatsKeyHash); + return; + } + metrics = newMetrics.getValue()->second; + } + + metrics->latestSeenTimestamp = Date_t::now(); + metrics->lastExecutionMicros = queryExecMicros; + metrics->execCount++; + metrics->totalExecMicros.aggregate(queryExecMicros); + metrics->firstResponseExecMicros.aggregate(firstResponseExecMicros); + metrics->docsReturned.aggregate(docsReturned); +} +} // namespace query_stats +} // namespace mongo diff --git a/src/mongo/db/query/query_stats.h b/src/mongo/db/query/query_stats.h new file mode 100644 index 0000000000000..e5da86ed6fc5a --- /dev/null +++ b/src/mongo/db/query/query_stats.h @@ -0,0 +1,275 @@ +/** + * Copyright (C) 2022-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/curop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/partitioned_cache.h" +#include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/query_stats_key_generator.h" +#include "mongo/db/query/query_stats_transform_algorithm_gen.h" +#include "mongo/db/query/util/memory_util.h" +#include "mongo/db/service_context.h" +#include "mongo/db/views/view.h" +#include "mongo/util/time_support.h" + +namespace mongo { + +namespace { +/** + * Type we use to render values to BSON. + */ +using BSONNumeric = long long; +} // namespace + +namespace query_stats { + +/** + * An aggregated metric stores a compressed view of data. It balances the loss of information + * with the reduction in required storage. + */ +struct AggregatedMetric { + + /** + * Aggregate an observed value into the metric. + */ + void aggregate(uint64_t val) { + sum += val; + max = std::max(val, max); + min = std::min(val, min); + sumOfSquares += val * val; + } + + void appendTo(BSONObjBuilder& builder, const StringData& fieldName) const { + BSONObjBuilder metricsBuilder = builder.subobjStart(fieldName); + metricsBuilder.append("sum", (BSONNumeric)sum); + metricsBuilder.append("max", (BSONNumeric)max); + metricsBuilder.append("min", (BSONNumeric)min); + metricsBuilder.append("sumOfSquares", (BSONNumeric)sumOfSquares); + metricsBuilder.done(); + } + + uint64_t sum = 0; + // Default to the _signed_ maximum (which fits in unsigned range) because we cast to + // BSONNumeric when serializing. + uint64_t min = (uint64_t)std::numeric_limits::max; + uint64_t max = 0; + + /** + * The sum of squares along with (an externally stored) count will allow us to compute the + * variance/stddev. + */ + uint64_t sumOfSquares = 0; +}; + +extern CounterMetric queryStatsStoreSizeEstimateBytesMetric; +const auto kKeySize = sizeof(std::size_t); +// Used to aggregate the metrics for one query stats key over all its executions. +class QueryStatsEntry { +public: + QueryStatsEntry(std::unique_ptr keyGenerator) + : firstSeenTimestamp(Date_t::now()), keyGenerator(std::move(keyGenerator)) { + // Increment by size of query stats store key (hash returns size_t) and value + // (QueryStatsEntry) + queryStatsStoreSizeEstimateBytesMetric.increment(kKeySize + size()); + } + + QueryStatsEntry(QueryStatsEntry& entry) = delete; + + QueryStatsEntry(QueryStatsEntry&& entry) = delete; + + ~QueryStatsEntry() { + // Decrement by size of query stats store key (hash returns size_t) and value + // (QueryStatsEntry) + queryStatsStoreSizeEstimateBytesMetric.decrement(kKeySize + size()); + } + + BSONObj toBSON() const { + BSONObjBuilder builder{sizeof(QueryStatsEntry) + 100}; + builder.append("lastExecutionMicros", (BSONNumeric)lastExecutionMicros); + builder.append("execCount", (BSONNumeric)execCount); + totalExecMicros.appendTo(builder, "totalExecMicros"); + firstResponseExecMicros.appendTo(builder, "firstResponseExecMicros"); + docsReturned.appendTo(builder, "docsReturned"); + builder.append("firstSeenTimestamp", firstSeenTimestamp); + builder.append("latestSeenTimestamp", latestSeenTimestamp); + return builder.obj(); + } + + int64_t size() { + return sizeof(*this) + (keyGenerator ? keyGenerator->size() : 0); + } + + /** + * Generate the queryStats key for this entry's request. If algorithm is not + * TransformAlgorithm::kNone, any identifying information (field names, namespace) will be + * anonymized. + */ + BSONObj computeQueryStatsKey(OperationContext* opCtx, + TransformAlgorithmEnum algorithm, + std::string hmacKey) const; + + BSONObj getRepresentativeQueryShapeForDebug() const { + return keyGenerator->getRepresentativeQueryShapeForDebug(); + } + + /** + * Timestamp for when this query shape was added to the store. Set on construction. + */ + const Date_t firstSeenTimestamp; + + /** + * Timestamp for when the latest time this query shape was seen. + */ + Date_t latestSeenTimestamp; + + /** + * Last execution time in microseconds. + */ + uint64_t lastExecutionMicros = 0; + + /** + * Number of query executions. + */ + uint64_t execCount = 0; + + /** + * Aggregates the total time for execution including getMore requests. + */ + AggregatedMetric totalExecMicros; + + /** + * Aggregates the time for execution for first batch only. + */ + AggregatedMetric firstResponseExecMicros; + + AggregatedMetric docsReturned; + + /** + * The KeyGenerator that can generate the query stats key for this request. + */ + std::unique_ptr keyGenerator; +}; +struct TelemetryPartitioner { + // The partitioning function for use with the 'Partitioned' utility. + std::size_t operator()(const std::size_t k, const std::size_t nPartitions) const { + return k % nPartitions; + } +}; + +struct QueryStatsStoreEntryBudgetor { + size_t operator()(const std::size_t key, const std::shared_ptr& value) { + return sizeof(decltype(key)) + value->size(); + } +}; +using QueryStatsStore = PartitionedCache, + QueryStatsStoreEntryBudgetor, + TelemetryPartitioner>; + +/** + * Acquire a reference to the global queryStats store. + */ +QueryStatsStore& getQueryStatsStore(OperationContext* opCtx); + +/** + * Registers a request for query stats collection. The function may decide not to collect anything, + * so this should be called for all requests. The decision is made based on the feature flag and + * query stats rate limiting. + * + * The originating command/query does not persist through the end of query execution due to + * optimizations made to the original query and the expiration of OpCtx across getMores. In order + * to pair the query stats metrics that are collected at the end of execution with the original + * query, it is necessary to store the original query during planning and persist it through + * getMores. + * + * During planning, registerRequest is called to serialize the query stats key and save it to + * OpDebug. If a query's execution is complete within the original operation, + * collectQueryStatsMongod/collectQueryStatsMongos will call writeQueryStats() and pass along the + * query stats key to be saved in the query stats store alongside metrics collected. + * + * However, OpDebug does not persist through cursor iteration, so if a query's execution will span + * more than one request/operation, it's necessary to save the query stats context to the cursor + * upon cursor registration. In these cases, collectQueryStatsMongod/collectQueryStatsMongos will + * aggregate each operation's metrics within the cursor. Once the request is eventually complete, + * the cursor calls writeQueryStats() on its destruction. + * + * Notes: + * - It's important to call registerRequest with the original request, before canonicalizing or + * optimizing it, in order to preserve the user's input for the query shape. + * - Calling this affects internal state. It should be called exactly once for each request for + * which query stats may be collected. + * - The std::function argument to construct an abstracted KeyGenerator is provided to break + * library cycles so this library does not need to know how to parse everything. It is done as a + * deferred construction callback to ensure that this feature does not impact performance if + * collecting stats is not needed due to the feature being disabled or the request being rate + * limited. + */ +void registerRequest(OperationContext* opCtx, + const NamespaceString& collection, + std::function(void)> makeKeyGenerator); + +/** + * Writes query stats to the query stats store for the operation identified by `queryStatsKeyHash`. + * + * Direct calls to writeQueryStats in new code should be avoided in favor of calling existing + * functions: + * - collectQueryStatsMongod/collectQueryStatsMongos in the case of requests that span one + * operation + * - ClientCursor::dispose/ClusterClientCursorImpl::kill in the case of requests that span + * multiple operations (via getMore) + */ +void writeQueryStats(OperationContext* opCtx, + boost::optional queryStatsKeyHash, + std::unique_ptr keyGenerator, + uint64_t queryExecMicros, + uint64_t firstResponseExecMicros, + uint64_t docsReturned); +} // namespace query_stats +} // namespace mongo diff --git a/src/mongo/db/query/query_stats_aggregate_key_generator.cpp b/src/mongo/db/query/query_stats_aggregate_key_generator.cpp new file mode 100644 index 0000000000000..29c8439dd9c8e --- /dev/null +++ b/src/mongo/db/query/query_stats_aggregate_key_generator.cpp @@ -0,0 +1,231 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/query/query_stats_aggregate_key_generator.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/pipeline/exchange_spec_gen.h" +#include "mongo/db/pipeline/external_data_source_option_gen.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/query_shape.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" + +namespace mongo::query_stats { + +BSONObj AggregateKeyGenerator::generate( + OperationContext* opCtx, + boost::optional hmacPolicy) const { + // TODO SERVER-76087 We will likely want to set a flag here to stop $search from calling out + // to mongot. + auto expCtx = makeDummyExpCtx(opCtx); + + SerializationOptions opts = hmacPolicy + ? SerializationOptions(*hmacPolicy, LiteralSerializationPolicy::kToDebugTypeString) + : SerializationOptions(LiteralSerializationPolicy::kToDebugTypeString); + + return makeQueryStatsKey(opts, expCtx); +} + +void AggregateKeyGenerator::appendCommandSpecificComponents( + BSONObjBuilder& bob, const SerializationOptions& opts) const { + // cursor + if (auto param = _request.getCursor().getBatchSize()) { + BSONObjBuilder cursorInfo = bob.subobjStart(AggregateCommandRequest::kCursorFieldName); + opts.appendLiteral(&cursorInfo, + SimpleCursorOptions::kBatchSizeFieldName, + static_cast(param.get())); + cursorInfo.doneFast(); + } + + // maxTimeMS + if (auto param = _request.getMaxTimeMS()) { + opts.appendLiteral(&bob, + AggregateCommandRequest::kMaxTimeMSFieldName, + static_cast(param.get())); + } + + // bypassDocumentValidation + if (auto param = _request.getBypassDocumentValidation()) { + opts.appendLiteral( + &bob, AggregateCommandRequest::kBypassDocumentValidationFieldName, bool(param.get())); + } + + // otherNss + if (!_involvedNamespaces.empty()) { + BSONArrayBuilder otherNss = bob.subarrayStart(kOtherNssFieldName); + for (const auto& nss : _involvedNamespaces) { + BSONObjBuilder otherNsEntryBob = otherNss.subobjStart(); + query_shape::appendNamespaceShape(otherNsEntryBob, nss, opts); + otherNsEntryBob.doneFast(); + } + otherNss.doneFast(); + } +} + +BSONObj AggregateKeyGenerator::makeQueryStatsKey( + const SerializationOptions& opts, const boost::intrusive_ptr& expCtx) const { + auto pipeline = Pipeline::parse(_request.getPipeline(), expCtx); + expCtx->setUserRoles(); + return _makeQueryStatsKeyHelper(opts, expCtx, *pipeline); +} + +BSONObj AggregateKeyGenerator::_makeQueryStatsKeyHelper( + const SerializationOptions& opts, + const boost::intrusive_ptr& expCtx, + const Pipeline& pipeline) const { + return generateWithQueryShape( + query_shape::extractQueryShape(_request, pipeline, opts, expCtx, _origNss), opts); +} + +namespace { + +int64_t sum(const std::initializer_list& sizes) { + return std::accumulate(sizes.begin(), sizes.end(), 0, std::plus{}); +} + +int64_t size(const std::vector& objects) { + return std::accumulate(objects.begin(), objects.end(), 0, [](int64_t total, const auto& obj) { + // Include the 'sizeof' to account for the variable number in the vector. + return total + sizeof(BSONObj) + obj.objsize(); + }); +} + +int64_t size(const boost::optional& passthroughToShardOpts) { + if (!passthroughToShardOpts) { + return 0; + } + return passthroughToShardOpts->getShard().size(); +} + +int64_t size(const boost::optional& exchange) { + if (!exchange) { + return 0; + } + return sum( + {exchange->getKey().objsize(), + (exchange->getBoundaries() ? size(exchange->getBoundaries().get()) : 0), + (exchange->getConsumerIds() ? 4 * static_cast(exchange->getConsumerIds()->size()) + : 0)}); +} + +int64_t size(const boost::optional& encryptInfo) { + if (!encryptInfo) { + return 0; + } + tasserted(7659700, + "Unexpected encryption information - not expecting to collect query shape stats on " + "encrypted querys"); +} + +int64_t singleDataSourceSize(int64_t runningTotal, const ExternalDataSourceInfo& source) { + // Here we include the 'sizeof' since its expected to be contained in a vector, which will have + // a variable number of these. + return runningTotal + sizeof(ExternalDataSourceInfo) + source.getUrl().size(); +} + +int64_t size(const boost::optional>& externalDataSources) { + if (!externalDataSources) { + return 0; + } + // External data sources aren't currently expected to be used much in production Atlas clusters, + // so it's probably pretty unlikely that this code will ever be exercised. That said, there's + // not reason it shouldn't work and be tracked correctly. + return std::accumulate( + externalDataSources->begin(), + externalDataSources->end(), + 0, + [](int64_t runningTotal, const ExternalDataSourceOption& opt) { + const auto& sources = opt.getDataSources(); + return sum({runningTotal, + // Include the 'sizeof' to account for the variable number in the vector. + sizeof(ExternalDataSourceOption), + static_cast(opt.getCollName().size()), + std::accumulate(sources.begin(), sources.end(), 0, singleDataSourceSize)}); + }); +} + +int64_t size(const boost::optional& dbName) { + if (!dbName) { + return 0; + } + return dbName->db().size(); +} + +int64_t size(const boost::optional& obj) { + return optionalObjSize(obj); +} + +// variadic base case. +template +int64_t sumOfSizes(const T& t) { + return size(t); +} + +// variadic recursive case. Making the compiler expand the pluses everywhere to give us good +// formatting at the call site. sumOfSizes(x, y, z) rather than size(x) + size(y) + size(z). +template +int64_t sumOfSizes(const T& t, const Args&... args) { + return size(t) + sumOfSizes(args...); +} + +int64_t aggRequestSize(const AggregateCommandRequest& request) { + return sumOfSizes(request.getPipeline(), + request.getLet(), + request.getUnwrappedReadPref(), + request.getExchange(), + request.getPassthroughToShard(), + request.getEncryptionInformation(), + request.getExternalDataSources(), + request.getDbName()); +} + +} // namespace + +int64_t AggregateKeyGenerator::doGetSize() const { + return sum({sizeof(*this), + static_cast(_origNss.size()), + optionalObjSize(_initialQueryStatsKey), + aggRequestSize(_request)}); +} +} // namespace mongo::query_stats diff --git a/src/mongo/db/query/query_stats_aggregate_key_generator.h b/src/mongo/db/query/query_stats_aggregate_key_generator.h new file mode 100644 index 0000000000000..3c804116a33e0 --- /dev/null +++ b/src/mongo/db/query/query_stats_aggregate_key_generator.h @@ -0,0 +1,152 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/query_shape.h" +#include "mongo/db/query/query_stats_key_generator.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/intrusive_counter.h" + +namespace mongo::query_stats { + +/** + * Handles shapification for AggregateCommandRequests. Requires a pre-parsed pipeline in order to + * avoid parsing the raw pipeline multiple times, but users should be sure to provide a + * non-optimized pipeline. + */ +class AggregateKeyGenerator final : public KeyGenerator { +public: + static constexpr StringData kOtherNssFieldName = "otherNss"_sd; + + AggregateKeyGenerator(AggregateCommandRequest request, + const Pipeline& pipeline, + const boost::intrusive_ptr& expCtx, + stdx::unordered_set involvedNamespaces, + const NamespaceString& origNss, + boost::optional collectionType = boost::none) + : KeyGenerator( + expCtx->opCtx, + // TODO: SERVER-76330 Store representative agg query shape in telemetry store. + BSONObj(), + collectionType), + _request(std::move(request)), + _involvedNamespaces(std::move(involvedNamespaces)), + _origNss(origNss), + _initialQueryStatsKey(_makeQueryStatsKeyHelper( + SerializationOptions::kDebugQueryShapeSerializeOptions, expCtx, pipeline)) { + _queryShapeHash = query_shape::hash(*_initialQueryStatsKey); + } + + BSONObj generate(OperationContext* opCtx, + boost::optional) const final; + + + BSONObj makeQueryStatsKeyForTest(const SerializationOptions& opts, + const boost::intrusive_ptr& expCtx) const { + return makeQueryStatsKey(opts, expCtx); + } + +protected: + void appendCommandSpecificComponents(BSONObjBuilder& bob, + const SerializationOptions& opts) const final override; + +protected: + int64_t doGetSize() const final; + +private: + BSONObj _makeQueryStatsKeyHelper(const SerializationOptions& opts, + const boost::intrusive_ptr& expCtx, + const Pipeline& pipeline) const; + + BSONObj makeQueryStatsKey(const SerializationOptions& opts, + const boost::intrusive_ptr& expCtx) const; + + boost::intrusive_ptr makeDummyExpCtx(OperationContext* opCtx) const { + // TODO SERVER-76087 We will likely want to set a flag here to stop $search from calling out + // to mongot. + // TODO SERVER-76330 look into if this could be consolidated between query stats key + // generator types and potentially remove one of the makeQueryStatsKey() overrides + auto expCtx = make_intrusive( + opCtx, nullptr, _request.getNamespace(), boost::none, _request.getLet()); + expCtx->variables.setDefaultRuntimeConstants(opCtx); + expCtx->maxFeatureCompatibilityVersion = boost::none; // Ensure all features are allowed. + // Expression counters are reported in serverStatus to indicate how often + // clients use certain expressions/stages, so it's a side effect tied to parsing. We must + // stop expression counters before re-parsing to avoid adding to the counters more than once + // per a given query. + expCtx->stopExpressionCounters(); + expCtx->addResolvedNamespaces(_involvedNamespaces); + + return expCtx; + } + + // We make a copy of AggregateCommandRequest since this instance may outlive the + // original request once the KeyGenerator is moved to the query stats store. + AggregateCommandRequest _request; + + // The set of secondary namespaces involved in this query. + stdx::unordered_set _involvedNamespaces; + + // The original NSS of the request before views are resolved. + const NamespaceString _origNss; + + // This is computed and cached upon construction until asked for once - at which point this + // transitions to boost::none. This both a performance and a memory optimization. + // + // On the performance side: we try to construct the query stats key by simply viewing the + // pre-parsed pipeline. We initialize this instance before the regular command processing path + // goes on to optimize the pipeline. + // + // On the memory side: we could just make a copy of the pipeline. But we chose to avoid + // this due to a limited memory budget and since we need to store the backing BSON used to parse + // the Pipeline anyway - it would be redundant to copy everything here. We'll just re-parse on + // demand when asked. + mutable boost::optional _initialQueryStatsKey; +}; +} // namespace mongo::query_stats diff --git a/src/mongo/db/query/query_stats_find_key_generator.cpp b/src/mongo/db/query/query_stats_find_key_generator.cpp new file mode 100644 index 0000000000000..1d748f7dddd97 --- /dev/null +++ b/src/mongo/db/query/query_stats_find_key_generator.cpp @@ -0,0 +1,136 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/query/query_stats_find_key_generator.h" + +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/query_shape.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" + +namespace mongo::query_stats { + +std::unique_ptr FindKeyGenerator::reparse(OperationContext* opCtx) const { + // TODO: SERVER-76330 factor out building the parseable cmdObj into a helper function in + // query_shape.h. + BSONObjBuilder cmdBuilder; + NamespaceStringOrUUID nss = query_shape::parseNamespaceShape(_parseableQueryShape["cmdNs"]); + nss.serialize(&cmdBuilder, FindCommandRequest::kCommandName); + cmdBuilder.append("$db", nss.dbname()); + + + for (BSONElement e : _parseableQueryShape) { + if (e.fieldNameStringData() == "cmdNs" || e.fieldNameStringData() == "command") { + continue; + } + + cmdBuilder.append(e); + } + + auto cmdObj = cmdBuilder.obj(); + return std::make_unique( + FindCommandRequest::parse(IDLParserContext("Query Stats Key"), cmdObj)); +} + +BSONObj FindKeyGenerator::generate( + OperationContext* opCtx, + boost::optional hmacPolicy) const { + auto request = reparse(opCtx); + auto expCtx = makeDummyExpCtx(opCtx, *request); + auto parsedRequest = uassertStatusOK( + parsed_find_command::parse(expCtx, + std::move(request), + ExtensionsCallbackNoop(), + MatchExpressionParser::kAllowAllSpecialFeatures)); + expCtx->setUserRoles(); + + auto opts = hmacPolicy + ? SerializationOptions(*hmacPolicy, LiteralSerializationPolicy::kToDebugTypeString) + : SerializationOptions(LiteralSerializationPolicy::kToDebugTypeString); + + return generateWithQueryShape(query_shape::extractQueryShape(*parsedRequest, opts, expCtx), + opts); +} + +void FindKeyGenerator::appendCommandSpecificComponents(BSONObjBuilder& bob, + const SerializationOptions& opts) const { + if (auto optObj = _readConcern) { + // Read concern should not be considered a literal. + // afterClusterTime is distinct for every operation with causal consistency enabled. We + // normalize it in order not to blow out the telemetry store cache. + if (optObj.get()["afterClusterTime"]) { + BSONObjBuilder subObj = bob.subobjStart(FindCommandRequest::kReadConcernFieldName); + + if (auto levelElem = optObj.get()["level"]) { + subObj.append(levelElem); + } + opts.appendLiteral(&subObj, "afterClusterTime", optObj.get()["afterClusterTime"]); + subObj.doneFast(); + } else { + bob.append(FindCommandRequest::kReadConcernFieldName, optObj.get()); + } + } + + if (_allowPartialResults.has_value()) { + bob.append(FindCommandRequest::kAllowPartialResultsFieldName, + _allowPartialResults.value_or(false)); + } + + // Fields for literal redaction. Adds batchSize, maxTimeMS, and noCursorTimeOut. + + if (auto noCursorTimeout = _noCursorTimeout) { + // Capture whether noCursorTimeout was specified in the query, do not distinguish between + // true or false. + opts.appendLiteral( + &bob, FindCommandRequest::kNoCursorTimeoutFieldName, noCursorTimeout.has_value()); + } + + if (auto maxTimeMs = _maxTimeMS) { + opts.appendLiteral(&bob, FindCommandRequest::kMaxTimeMSFieldName, *maxTimeMs); + } + + if (auto batchSize = _batchSize) { + opts.appendLiteral( + &bob, FindCommandRequest::kBatchSizeFieldName, static_cast(*batchSize)); + } +} +} // namespace mongo::query_stats diff --git a/src/mongo/db/query/query_stats_find_key_generator.h b/src/mongo/db/query/query_stats_find_key_generator.h new file mode 100644 index 0000000000000..e321037a7e758 --- /dev/null +++ b/src/mongo/db/query/query_stats_find_key_generator.h @@ -0,0 +1,121 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/parsed_find_command.h" +#include "mongo/db/query/query_stats_key_generator.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/intrusive_counter.h" + +namespace mongo::query_stats { + +class FindKeyGenerator final : public KeyGenerator { +public: + FindKeyGenerator(const boost::intrusive_ptr& expCtx, + const ParsedFindCommand& request, + BSONObj parseableQueryShape, + boost::optional collectionType = boost::none) + : KeyGenerator(expCtx->opCtx, parseableQueryShape, collectionType), + _readConcern( + request.findCommandRequest->getReadConcern().has_value() + ? boost::optional(request.findCommandRequest->getReadConcern()->copy()) + : boost::none), + _allowPartialResults(request.findCommandRequest->getAllowPartialResults()), + _noCursorTimeout(request.findCommandRequest->getNoCursorTimeout()), + _maxTimeMS(request.findCommandRequest->getMaxTimeMS()), + _batchSize(request.findCommandRequest->getBatchSize()) {} + + + BSONObj generate(OperationContext* opCtx, + boost::optional) const final; + +protected: + int64_t doGetSize() const final { + return sizeof(*this) + optionalObjSize(_readConcern); + } + +private: + BSONObj makeQueryStatsKey(const boost::intrusive_ptr& expCtx, + const ParsedFindCommand& parsedRequest, + const SerializationOptions& opts) const; + + + void appendCommandSpecificComponents(BSONObjBuilder& bob, + const SerializationOptions& opts) const final override; + + std::unique_ptr reparse(OperationContext* opCtx) const; + + boost::intrusive_ptr makeDummyExpCtx( + OperationContext* opCtx, const FindCommandRequest& request) const { + auto expCtx = make_intrusive( + opCtx, request, nullptr /* collator doesn't matter here.*/, false /* mayDbProfile */); + expCtx->maxFeatureCompatibilityVersion = boost::none; // Ensure all features are allowed. + // Expression counters are reported in serverStatus to indicate how often clients use + // certain expressions/stages, so it's a side effect tied to parsing. We must stop + // expression counters before re-parsing to avoid adding to the counters more than once per + // a given query. + expCtx->stopExpressionCounters(); + return expCtx; + } + + // Preserved literal. + boost::optional _readConcern; + + // Preserved literal. + OptionalBool _allowPartialResults; + + // Shape. + OptionalBool _noCursorTimeout; + + // Shape. + boost::optional _maxTimeMS; + + // Shape. + boost::optional _batchSize; +}; +} // namespace mongo::query_stats diff --git a/src/mongo/db/query/query_stats_key_generator.h b/src/mongo/db/query/query_stats_key_generator.h new file mode 100644 index 0000000000000..9e217dc3b3674 --- /dev/null +++ b/src/mongo/db/query/query_stats_key_generator.h @@ -0,0 +1,226 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/query_shape.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/util/decorable.h" + +namespace mongo { + +int64_t inline optionalObjSize(boost::optional optionalObj) { + if (!optionalObj) + return 0; + return optionalObj->objsize(); +} + +template +int64_t optionalSize(boost::optional optionalVal) { + if (!optionalVal) + return 0; + return optionalVal->size(); +} + +namespace query_stats { + +/** + * An abstract base class to handle generating the query stats key for a given request. + */ +class KeyGenerator { +public: + virtual ~KeyGenerator() = default; + + /** + * Generate the query stats key with the given tokenization strategy. + */ + virtual BSONObj generate( + OperationContext* opCtx, + boost::optional) const = 0; + + /** + * Compute the query stats key hash by combining the hash components for this specific command + * with the pre-computed query shape hash. + */ + size_t hash() const { + BSONObjBuilder bob; + // Rather than the query shape itself, insert its hash into the key. + _queryShapeHash.appendAsBinData(bob, "queryShape"); + appendImmediateComponents(bob, + SerializationOptions::kRepresentativeQueryShapeSerializeOptions); + bob.doneFast(); + return absl::hash_internal::CityHash64(bob.bb().buf(), bob.bb().len()); + } + + int64_t size() const { + return doGetSize() + + _parseableQueryShape.objsize() + /* _collectionType is not owned here */ + (_apiParams ? sizeof(*_apiParams) + optionalSize(_apiParams->getAPIVersion()) : 0) + + optionalObjSize(_clientMetaData) + _commentObj.objsize() + + optionalObjSize(_readPreference); + } + + BSONObj getRepresentativeQueryShapeForDebug() const { + return _parseableQueryShape; + } + +protected: + KeyGenerator(OperationContext* opCtx, + BSONObj parseableQueryShape, + boost::optional collectionType = boost::none, + boost::optional queryShapeHash = boost::none) + : _parseableQueryShape(parseableQueryShape.getOwned()), + _collectionType(collectionType ? boost::make_optional(*collectionType) : boost::none), + _queryShapeHash(queryShapeHash.value_or(query_shape::hash(parseableQueryShape))) { + if (auto metadata = ClientMetadata::get(opCtx->getClient())) { + _clientMetaData = boost::make_optional(metadata->getDocument()); + } + + if (auto comment = opCtx->getCommentOwnedCopy()) { + _commentObj = std::move(comment.value()); + _comment = _commentObj.firstElement(); + } + + _apiParams = std::make_unique(APIParameters::get(opCtx)); + + if (!ReadPreferenceSetting::get(opCtx).toInnerBSON().isEmpty() && + !ReadPreferenceSetting::get(opCtx).usedDefaultReadPrefValue()) { + _readPreference = boost::make_optional(ReadPreferenceSetting::get(opCtx).toInnerBSON()); + } + } + + /** + * With a given BSONObjBuilder, append the command-specific components of the query stats key. + */ + virtual void appendCommandSpecificComponents(BSONObjBuilder& bob, + const SerializationOptions& opts) const = 0; + + /** + * Helper function to generate the Query Stats Key, using the passed-in query shape as the + * `queryShape` sub-object. + */ + BSONObj generateWithQueryShape(BSONObj queryShape, const SerializationOptions& opts) const { + BSONObjBuilder bob; + bob.append("queryShape", queryShape); + appendImmediateComponents(bob, opts); + return bob.obj(); + } + + /** + * Append all non-query shape components of the query stats key to the passed-in BSONObj + * builder. + */ + void appendImmediateComponents(BSONObjBuilder& bob, const SerializationOptions& opts) const { + appendCommandSpecificComponents(bob, opts); + appendUniversalComponents(bob, opts); + } + + // TODO: SERVER-76330 make everything below this line private once the aggregate key generator + // is properly using this interface. + /** + * Specifies the serialization of the query stats key components which apply to all commands. + */ + void appendUniversalComponents(BSONObjBuilder& bob, const SerializationOptions& opts) const { + if (_comment) { + opts.appendLiteral(&bob, "comment", *_comment); + } + + if (const auto& apiVersion = _apiParams->getAPIVersion()) { + bob.append("apiVersion", apiVersion.value()); + } + + if (const auto& apiStrict = _apiParams->getAPIStrict()) { + bob.append("apiStrict", apiStrict.value()); + } + + if (const auto& apiDeprecationErrors = _apiParams->getAPIDeprecationErrors()) { + bob.append("apiDeprecationErrors", apiDeprecationErrors.value()); + } + + if (_readPreference) { + bob.append("$readPreference", *_readPreference); + } + + if (_clientMetaData) { + bob.append("client", *_clientMetaData); + } + if (_collectionType) { + bob.append("collectionType", *_collectionType); + } + } + + /** + * Sub-classes should implement this to report how much memory is used. This is important to do + * carefully since we are under a budget in the query stats store and use this to do the + * accounting. Implementers should include sizeof(*derivedThis) and be sure to also include the + * size of any owned pointer-like objects such as BSONObj or NamespaceString which are + * indirectly using memory elsehwhere. + */ + virtual int64_t doGetSize() const = 0; + + BSONObj _parseableQueryShape; + // This value is not known when run a query is run on mongos over an unsharded collection, so it + // is not set through that code path. + boost::optional _collectionType; + query_shape::QueryShapeHash _queryShapeHash; + + // Preserve this value in the query shape. + std::unique_ptr _apiParams; + // Preserve this value in the query shape. + boost::optional _clientMetaData = boost::none; + // Shapify this value. + BSONObj _commentObj; + boost::optional _comment = boost::none; + // Preserve this value in the query shape. + boost::optional _readPreference = boost::none; +}; + +} // namespace query_stats +} // namespace mongo diff --git a/src/mongo/db/query/query_stats_store_test.cpp b/src/mongo/db/query/query_stats_store_test.cpp new file mode 100644 index 0000000000000..0efe0f2071df4 --- /dev/null +++ b/src/mongo/db/query/query_stats_store_test.cpp @@ -0,0 +1,1258 @@ +/** + * Copyright (C) 2022-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/parsed_find_command.h" +#include "mongo/db/query/query_shape.h" +#include "mongo/db/query/query_stats.h" +#include "mongo/db/query/query_stats_aggregate_key_generator.h" +#include "mongo/db/query/query_stats_find_key_generator.h" +#include "mongo/db/query/query_stats_key_generator.h" +#include "mongo/db/query/query_stats_transform_algorithm_gen.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" + +namespace mongo::query_stats { +/** + * A default hmac application strategy that generates easy to check results for testing purposes. + */ +std::string applyHmacForTest(StringData s) { + return str::stream() << "HASH<" << s << ">"; +} + +std::size_t hash(const BSONObj& obj) { + return absl::hash_internal::CityHash64(obj.objdata(), obj.objsize()); +} + +class QueryStatsStoreTest : public ServiceContextTest { +public: + boost::optional collectionType = boost::make_optional("collection"_sd); + BSONObj makeQueryStatsKeyFindRequest(const FindCommandRequest& fcr, + const boost::intrusive_ptr& expCtx, + bool applyHmac) { + auto fcrCopy = std::make_unique(fcr); + auto parsedFind = uassertStatusOK(parsed_find_command::parse(expCtx, std::move(fcrCopy))); + auto queryShape = query_shape::extractQueryShape( + *parsedFind, SerializationOptions::kRepresentativeQueryShapeSerializeOptions, expCtx); + FindKeyGenerator findKeyGenerator(expCtx, *parsedFind, queryShape, collectionType); + return findKeyGenerator.generate( + expCtx->opCtx, + applyHmac + ? boost::optional(applyHmacForTest) + : boost::none); + } + + BSONObj makeTelemetryKeyAggregateRequest( + AggregateCommandRequest acr, + const Pipeline& pipeline, + const boost::intrusive_ptr& expCtx, + bool applyHmac = false, + LiteralSerializationPolicy literalPolicy = LiteralSerializationPolicy::kUnchanged) { + AggregateKeyGenerator aggKeyGenerator(acr, + pipeline, + expCtx, + pipeline.getInvolvedCollections(), + acr.getNamespace(), + collectionType); + + SerializationOptions opts(literalPolicy); + if (applyHmac) { + opts.transformIdentifiers = true; + opts.transformIdentifiersCallback = applyHmacForTest; + } + return aggKeyGenerator.makeQueryStatsKeyForTest(opts, expCtx); + } +}; + +TEST_F(QueryStatsStoreTest, BasicUsage) { + QueryStatsStore telStore{5000000, 1000}; + + auto getMetrics = [&](const BSONObj& key) { + auto lookupResult = telStore.lookup(hash(key)); + return *lookupResult.getValue(); + }; + + auto collectMetrics = [&](BSONObj& key) { + std::shared_ptr metrics; + auto lookupResult = telStore.lookup(hash(key)); + if (!lookupResult.isOK()) { + telStore.put(hash(key), std::make_shared(nullptr)); + lookupResult = telStore.lookup(hash(key)); + } + metrics = *lookupResult.getValue(); + metrics->execCount += 1; + metrics->lastExecutionMicros += 123456; + }; + + auto query1 = BSON("query" << 1 << "xEquals" << 42); + // same value, different instance (tests hashing & equality) + auto query1x = BSON("query" << 1 << "xEquals" << 42); + auto query2 = BSON("query" << 2 << "yEquals" << 43); + + collectMetrics(query1); + collectMetrics(query1); + collectMetrics(query1x); + collectMetrics(query2); + + ASSERT_EQ(getMetrics(query1)->execCount, 3); + ASSERT_EQ(getMetrics(query1x)->execCount, 3); + ASSERT_EQ(getMetrics(query2)->execCount, 1); + + auto collectMetricsWithLock = [&](BSONObj& key) { + auto [lookupResult, lock] = telStore.getWithPartitionLock(hash(key)); + auto metrics = *lookupResult.getValue(); + metrics->execCount += 1; + metrics->lastExecutionMicros += 123456; + }; + + collectMetricsWithLock(query1x); + collectMetricsWithLock(query2); + + ASSERT_EQ(getMetrics(query1)->execCount, 4); + ASSERT_EQ(getMetrics(query1x)->execCount, 4); + ASSERT_EQ(getMetrics(query2)->execCount, 2); + + int numKeys = 0; + + telStore.forEach( + [&](std::size_t key, const std::shared_ptr& entry) { numKeys++; }); + + ASSERT_EQ(numKeys, 2); +} + + +TEST_F(QueryStatsStoreTest, EvictEntries) { + // This creates a queryStats store with 2 partitions, each with a size of 1200 bytes. + const auto cacheSize = 2400; + const auto numPartitions = 2; + QueryStatsStore telStore{cacheSize, numPartitions}; + + for (int i = 0; i < 30; i++) { + auto query = BSON("query" + std::to_string(i) << 1 << "xEquals" << 42); + telStore.put(hash(query), std::make_shared(nullptr)); + } + int numKeys = 0; + telStore.forEach( + [&](std::size_t key, const std::shared_ptr& entry) { numKeys++; }); + + int entriesPerPartition = + (cacheSize / numPartitions) / (sizeof(std::size_t) + sizeof(QueryStatsEntry)); + + ASSERT_EQ(numKeys, entriesPerPartition * numPartitions); +} + +TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestAllFields) { + auto expCtx = make_intrusive(); + FindCommandRequest fcr( + NamespaceStringOrUUID(NamespaceString::createNamespaceString_forTest("testDB.testColl"))); + + fcr.setFilter(BSON("a" << 1)); + + auto key = makeQueryStatsKeyFindRequest(fcr, expCtx, true); + + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "find", + "filter": { + "HASH": { + "$eq": "?number" + } + } + }, + "collectionType": "collection" + })", + key); + + // Add sort. + fcr.setSort(BSON("sortVal" << 1 << "otherSort" << -1)); + key = makeQueryStatsKeyFindRequest(fcr, expCtx, true); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "find", + "filter": { + "HASH": { + "$eq": "?number" + } + }, + "sort": { + "HASH": 1, + "HASH": -1 + } + }, + "collectionType": "collection" + })", + key); + + // Add inclusion projection. + fcr.setProjection(BSON("e" << true << "f" << true)); + key = makeQueryStatsKeyFindRequest(fcr, expCtx, true); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "find", + "filter": { + "HASH": { + "$eq": "?number" + } + }, + "projection": { + "HASH": true, + "HASH": true, + "HASH<_id>": true + }, + "sort": { + "HASH": 1, + "HASH": -1 + } + }, + "collectionType": "collection" + })", + key); + + // Add let. + fcr.setLet(BSON("var1" << 1 << "var2" + << "const1")); + key = makeQueryStatsKeyFindRequest(fcr, expCtx, true); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "find", + "filter": { + "HASH": { + "$eq": "?number" + } + }, + "let": { + "HASH": "?number", + "HASH": "?string" + }, + "projection": { + "HASH": true, + "HASH": true, + "HASH<_id>": true + }, + "sort": { + "HASH": 1, + "HASH": -1 + } + }, + "collectionType": "collection" + })", + key); + + // Add hinting fields. + fcr.setHint(BSON("z" << 1 << "c" << 1)); + fcr.setMax(BSON("z" << 25)); + fcr.setMin(BSON("z" << 80)); + key = makeQueryStatsKeyFindRequest(fcr, expCtx, true); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "find", + "filter": { + "HASH": { + "$eq": "?number" + } + }, + "let": { + "HASH": "?number", + "HASH": "?string" + }, + "projection": { + "HASH": true, + "HASH": true, + "HASH<_id>": true + }, + "hint": { + "HASH": 1, + "HASH": 1 + }, + "max": { + "HASH": "?number" + }, + "min": { + "HASH": "?number" + }, + "sort": { + "HASH": 1, + "HASH": -1 + } + }, + "collectionType": "collection" + })", + key); + + // Add the literal redaction fields. + fcr.setLimit(5); + fcr.setSkip(2); + fcr.setBatchSize(25); + fcr.setMaxTimeMS(1000); + fcr.setNoCursorTimeout(false); + + key = makeQueryStatsKeyFindRequest(fcr, expCtx, true); + + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "find", + "filter": { + "HASH": { + "$eq": "?number" + } + }, + "let": { + "HASH": "?number", + "HASH": "?string" + }, + "projection": { + "HASH": true, + "HASH": true, + "HASH<_id>": true + }, + "hint": { + "HASH": 1, + "HASH": 1 + }, + "max": { + "HASH": "?number" + }, + "min": { + "HASH": "?number" + }, + "sort": { + "HASH": 1, + "HASH": -1 + }, + "limit": "?number", + "skip": "?number" + }, + "maxTimeMS": "?number", + "batchSize": "?number", + "collectionType": "collection" + })", + key); + + // Add the fields that shouldn't be hmacApplied. + fcr.setSingleBatch(true); + fcr.setAllowDiskUse(false); + fcr.setAllowPartialResults(true); + fcr.setAllowDiskUse(false); + fcr.setShowRecordId(true); + fcr.setMirrored(true); + key = makeQueryStatsKeyFindRequest(fcr, expCtx, true); + + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "find", + "filter": { + "HASH": { + "$eq": "?number" + } + }, + "let": { + "HASH": "?number", + "HASH": "?string" + }, + "projection": { + "HASH": true, + "HASH": true, + "HASH<_id>": true + }, + "hint": { + "HASH": 1, + "HASH": 1 + }, + "max": { + "HASH": "?number" + }, + "min": { + "HASH": "?number" + }, + "sort": { + "HASH": 1, + "HASH": -1 + }, + "limit": "?number", + "skip": "?number", + "singleBatch": true, + "allowDiskUse": false, + "showRecordId": true, + "mirrored": true + }, + "allowPartialResults": true, + "maxTimeMS": "?number", + "batchSize": "?number", + "collectionType": "collection" + })", + key); + + fcr.setAllowPartialResults(false); + key = makeQueryStatsKeyFindRequest(fcr, expCtx, true); + // Make sure that a false allowPartialResults is also accurately captured. + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "find", + "filter": { + "HASH": { + "$eq": "?number" + } + }, + "let": { + "HASH": "?number", + "HASH": "?string" + }, + "projection": { + "HASH": true, + "HASH": true, + "HASH<_id>": true + }, + "hint": { + "HASH": 1, + "HASH": 1 + }, + "max": { + "HASH": "?number" + }, + "min": { + "HASH": "?number" + }, + "sort": { + "HASH": 1, + "HASH": -1 + }, + "limit": "?number", + "skip": "?number", + "singleBatch": true, + "allowDiskUse": false, + "showRecordId": true, + "mirrored": true + }, + "allowPartialResults": false, + "maxTimeMS": "?number", + "batchSize": "?number", + "collectionType": "collection" + })", + key); + + FindCommandRequest fcr2(NamespaceStringOrUUID(NamespaceString("testDB.testColl"))); + fcr2.setAwaitData(true); + fcr2.setTailable(true); + fcr2.setSort(BSON("$natural" << 1)); + key = makeQueryStatsKeyFindRequest(fcr2, expCtx, true); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "find", + "filter": {}, + "hint": { + "$natural": 1 + }, + "tailable": true, + "awaitData": true + }, + "collectionType": "collection" + })", + key); +} + +TEST_F(QueryStatsStoreTest, CorrectlyRedactsFindCommandRequestEmptyFields) { + auto expCtx = make_intrusive(); + FindCommandRequest fcr( + NamespaceStringOrUUID(NamespaceString::createNamespaceString_forTest("testDB.testColl"))); + fcr.setFilter(BSONObj()); + fcr.setSort(BSONObj()); + fcr.setProjection(BSONObj()); + + auto hmacApplied = makeQueryStatsKeyFindRequest(fcr, expCtx, true); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "find", + "filter": {} + }, + "collectionType": "collection" + })", + hmacApplied); // NOLINT (test auto-update) +} + +TEST_F(QueryStatsStoreTest, CorrectlyRedactsHintsWithOptions) { + auto expCtx = make_intrusive(); + FindCommandRequest fcr( + NamespaceStringOrUUID(NamespaceString::createNamespaceString_forTest("testDB.testColl"))); + + fcr.setFilter(BSON("b" << 1)); + fcr.setHint(BSON("z" << 1 << "c" << 1)); + fcr.setMax(BSON("z" << 25)); + fcr.setMin(BSON("z" << 80)); + + auto key = makeQueryStatsKeyFindRequest(fcr, expCtx, false); + + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "testDB", + "coll": "testColl" + }, + "command": "find", + "filter": { + "b": { + "$eq": "?number" + } + }, + "hint": { + "z": 1, + "c": 1 + }, + "max": { + "z": "?number" + }, + "min": { + "z": "?number" + } + }, + "collectionType": "collection" + })", + key); + // Test with a string hint. Note that this is the internal representation of the string hint + // generated at parse time. + fcr.setHint(BSON("$hint" + << "z")); + + key = makeQueryStatsKeyFindRequest(fcr, expCtx, false); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "testDB", + "coll": "testColl" + }, + "command": "find", + "filter": { + "b": { + "$eq": "?number" + } + }, + "hint": { + "$hint": "z" + }, + "max": { + "z": "?number" + }, + "min": { + "z": "?number" + } + }, + "collectionType": "collection" + })", + key); + + fcr.setHint(BSON("z" << 1 << "c" << 1)); + key = makeQueryStatsKeyFindRequest(fcr, expCtx, true); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "find", + "filter": { + "HASH": { + "$eq": "?number" + } + }, + "hint": { + "HASH": 1, + "HASH": 1 + }, + "max": { + "HASH": "?number" + }, + "min": { + "HASH": "?number" + } + }, + "collectionType": "collection" + })", + key); + + // Test that $natural comes through unmodified. + fcr.setHint(BSON("$natural" << -1)); + key = makeQueryStatsKeyFindRequest(fcr, expCtx, true); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "find", + "filter": { + "HASH": { + "$eq": "?number" + } + }, + "hint": { + "$natural": -1 + }, + "max": { + "HASH": "?number" + }, + "min": { + "HASH": "?number" + } + }, + "collectionType": "collection" + })", + key); +} + +TEST_F(QueryStatsStoreTest, DefinesLetVariables) { + // Test that the expression context we use to apply hmac will understand the 'let' part of the + // find command while parsing the other pieces of the command. + + // Note that this ExpressionContext will not have the let variables defined - we expect the + // 'makeQueryStatsKey' call to do that. + auto opCtx = makeOperationContext(); + auto fcr = std::make_unique( + NamespaceStringOrUUID(NamespaceString::createNamespaceString_forTest("testDB.testColl"))); + fcr->setLet(BSON("var" << 2)); + fcr->setFilter(fromjson("{$expr: [{$eq: ['$a', '$$var']}]}")); + fcr->setProjection(fromjson("{varIs: '$$var'}")); + + const auto cmdObj = fcr->toBSON(BSON("$db" + << "testDB")); + auto&& [expCtx, parsedFind] = + uassertStatusOK(parsed_find_command::parse(opCtx.get(), std::move(fcr))); + auto queryShape = query_shape::extractQueryShape( + *parsedFind, SerializationOptions::kRepresentativeQueryShapeSerializeOptions, expCtx); + QueryStatsEntry testMetrics{std::make_unique( + expCtx, *parsedFind, queryShape, collectionType)}; + + auto hmacApplied = + testMetrics.computeQueryStatsKey(opCtx.get(), TransformAlgorithmEnum::kNone, std::string{}); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "testDB", + "coll": "testColl" + }, + "command": "find", + "filter": { + "$expr": [ + { + "$eq": [ + "$a", + "$$var" + ] + } + ] + }, + "let": { + "var": "?number" + }, + "projection": { + "varIs": "$$var", + "_id": true + } + }, + "collectionType": "collection" + })", + hmacApplied); + + // Now be sure hmac is applied to variable names. We don't currently expose a different way to + // do the hashing, so we'll just stick with the big long strings here for now. + hmacApplied = testMetrics.computeQueryStatsKey( + opCtx.get(), TransformAlgorithmEnum::kHmacSha256, std::string{}); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "IyuPUD33jXD1td/VA/JyhbOPYY0MdGkXgdExniXmCyg=", + "coll": "QFhYnXorzWDLwH/wBgpXxp8fkfsZKo4n2cIN/O0uf/c=" + }, + "command": "find", + "filter": { + "$expr": [ + { + "$eq": [ + "$lhWpXUozYRjENbnNVMXoZEq5VrVzqikmJ0oSgLZnRxM=", + "$$adaJc6H3zDirh5/52MLv5yvnb6nXNP15Z4HzGfumvx8=" + ] + } + ] + }, + "let": { + "adaJc6H3zDirh5/52MLv5yvnb6nXNP15Z4HzGfumvx8=": "?number" + }, + "projection": { + "BL649QER7lTs0+8ozTMVNAa6JNjbhf57YT8YQ4EkT1E=": "$$adaJc6H3zDirh5/52MLv5yvnb6nXNP15Z4HzGfumvx8=", + "ljovqLSfuj6o2syO1SynOzHQK1YVij6+Wlx1fL8frUo=": true + } + }, + "collectionType": "collection" + })", + hmacApplied); +} + +TEST_F(QueryStatsStoreTest, CorrectlyTokenizesAggregateCommandRequestAllFieldsSimplePipeline) { + auto expCtx = make_intrusive(); + AggregateCommandRequest acr(NamespaceString::createNamespaceString_forTest("testDB.testColl")); + auto matchStage = fromjson(R"({ + $match: { + foo: { $in: ["a", "b"] }, + bar: { $gte: { $date: "2022-01-01T00:00:00Z" } } + } + })"); + auto unwindStage = fromjson("{$unwind: '$x'}"); + auto groupStage = fromjson(R"({ + $group: { + _id: "$_id", + c: { $first: "$d.e" }, + f: { $sum: 1 } + } + })"); + auto limitStage = fromjson("{$limit: 10}"); + auto outStage = fromjson(R"({$out: 'outColl'})"); + auto rawPipeline = {matchStage, unwindStage, groupStage, limitStage, outStage}; + acr.setPipeline(rawPipeline); + auto pipeline = Pipeline::parse(rawPipeline, expCtx); + + auto shapified = makeTelemetryKeyAggregateRequest(acr, *pipeline, expCtx); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "testDB", + "coll": "testColl" + }, + "command": "aggregate", + "pipeline": [ + { + "$match": { + "foo": { + "$in": [ + "a", + "b" + ] + }, + "bar": { + "$gte": {"$date":"2022-01-01T00:00:00.000Z"} + } + } + }, + { + "$unwind": { + "path": "$x" + } + }, + { + "$group": { + "_id": "$_id", + "c": { + "$first": "$d.e" + }, + "f": { + "$sum": { + "$const": 1 + } + } + } + }, + { + "$limit": 10 + }, + { + "$out": { + "coll": "outColl", + "db": "test" + } + } + ] + }, + "collectionType": "collection" + })", + shapified); + + shapified = makeTelemetryKeyAggregateRequest( + acr, *pipeline, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "aggregate", + "pipeline": [ + { + "$match": { + "$and": [ + { + "HASH": { + "$in": "?array" + } + }, + { + "HASH": { + "$gte": "?date" + } + } + ] + } + }, + { + "$unwind": { + "path": "$HASH" + } + }, + { + "$group": { + "_id": "$HASH<_id>", + "HASH": { + "$first": "$HASH.HASH" + }, + "HASH": { + "$sum": "?number" + } + } + }, + { + "$limit": "?number" + }, + { + "$out": { + "coll": "HASH", + "db": "HASH" + } + } + ] + }, + "collectionType": "collection" + })", + shapified); + + // Add the fields that shouldn't be abstracted. + acr.setExplain(ExplainOptions::Verbosity::kExecStats); + acr.setAllowDiskUse(false); + acr.setHint(BSON("z" << 1 << "c" << 1)); + acr.setCollation(BSON("locale" + << "simple")); + shapified = makeTelemetryKeyAggregateRequest( + acr, *pipeline, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "aggregate", + "pipeline": [ + { + "$match": { + "$and": [ + { + "HASH": { + "$in": "?array" + } + }, + { + "HASH": { + "$gte": "?date" + } + } + ] + } + }, + { + "$unwind": { + "path": "$HASH" + } + }, + { + "$group": { + "_id": "$HASH<_id>", + "HASH": { + "$first": "$HASH.HASH" + }, + "HASH": { + "$sum": "?number" + } + } + }, + { + "$limit": "?number" + }, + { + "$out": { + "coll": "HASH", + "db": "HASH" + } + } + ], + "explain": true, + "allowDiskUse": false, + "collation": { + "locale": "simple" + }, + "hint": { + "HASH": 1, + "HASH": 1 + } + }, + "collectionType": "collection" + })", + shapified); + + // Add let. + acr.setLet(BSON("var1" + << "$foo" + << "var2" + << "bar")); + shapified = makeTelemetryKeyAggregateRequest( + acr, *pipeline, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "aggregate", + "pipeline": [ + { + "$match": { + "$and": [ + { + "HASH": { + "$in": "?array" + } + }, + { + "HASH": { + "$gte": "?date" + } + } + ] + } + }, + { + "$unwind": { + "path": "$HASH" + } + }, + { + "$group": { + "_id": "$HASH<_id>", + "HASH": { + "$first": "$HASH.HASH" + }, + "HASH": { + "$sum": "?number" + } + } + }, + { + "$limit": "?number" + }, + { + "$out": { + "coll": "HASH", + "db": "HASH" + } + } + ], + "explain": true, + "allowDiskUse": false, + "collation": { + "locale": "simple" + }, + "hint": { + "HASH": 1, + "HASH": 1 + }, + "let": { + "HASH": "$HASH", + "HASH": "?string" + } + }, + "collectionType": "collection" + })", + shapified); + + // Add the fields that should be abstracted. + auto cursorOptions = SimpleCursorOptions(); + cursorOptions.setBatchSize(10); + acr.setCursor(cursorOptions); + acr.setMaxTimeMS(500); + acr.setBypassDocumentValidation(true); + expCtx->opCtx->setComment(BSON("comment" + << "note to self")); + shapified = makeTelemetryKeyAggregateRequest( + acr, *pipeline, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "aggregate", + "pipeline": [ + { + "$match": { + "$and": [ + { + "HASH": { + "$in": "?array" + } + }, + { + "HASH": { + "$gte": "?date" + } + } + ] + } + }, + { + "$unwind": { + "path": "$HASH" + } + }, + { + "$group": { + "_id": "$HASH<_id>", + "HASH": { + "$first": "$HASH.HASH" + }, + "HASH": { + "$sum": "?number" + } + } + }, + { + "$limit": "?number" + }, + { + "$out": { + "coll": "HASH", + "db": "HASH" + } + } + ], + "explain": true, + "allowDiskUse": false, + "collation": { + "locale": "simple" + }, + "hint": { + "HASH": 1, + "HASH": 1 + }, + "let": { + "HASH": "$HASH", + "HASH": "?string" + } + }, + "cursor": { + "batchSize": "?number" + }, + "maxTimeMS": "?number", + "bypassDocumentValidation": "?bool", + "comment": "?string", + "collectionType": "collection" + })", + shapified); +} +TEST_F(QueryStatsStoreTest, CorrectlyTokenizesAggregateCommandRequestEmptyFields) { + auto expCtx = make_intrusive(); + AggregateCommandRequest acr(NamespaceString::createNamespaceString_forTest("testDB.testColl")); + acr.setPipeline({}); + auto pipeline = Pipeline::parse({}, expCtx); + + auto shapified = makeTelemetryKeyAggregateRequest( + acr, *pipeline, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "aggregate", + "pipeline": [] + }, + "collectionType": "collection" + })", + shapified); // NOLINT (test auto-update) +} + +TEST_F(QueryStatsStoreTest, + CorrectlyTokenizesAggregateCommandRequestPipelineWithSecondaryNamespaces) { + auto expCtx = make_intrusive(); + auto nsToUnionWith = + NamespaceString::createNamespaceString_forTest(expCtx->ns.dbName(), "otherColl"); + expCtx->addResolvedNamespaces({nsToUnionWith}); + + AggregateCommandRequest acr( + NamespaceString::createNamespaceString_forTest(expCtx->ns.dbName(), "testColl")); + auto unionWithStage = fromjson(R"({ + $unionWith: { + coll: "otherColl", + pipeline: [{$match: {val: "foo"}}] + } + })"); + auto sortStage = fromjson("{$sort: {age: 1}}"); + auto rawPipeline = {unionWithStage, sortStage}; + acr.setPipeline(rawPipeline); + auto pipeline = Pipeline::parse(rawPipeline, expCtx); + + auto shapified = makeTelemetryKeyAggregateRequest( + acr, *pipeline, expCtx, true, LiteralSerializationPolicy::kToDebugTypeString); + ASSERT_BSONOBJ_EQ_AUTO( // NOLINT + R"({ + "queryShape": { + "cmdNs": { + "db": "HASH", + "coll": "HASH" + }, + "command": "aggregate", + "pipeline": [ + { + "$unionWith": { + "coll": "HASH", + "pipeline": [ + { + "$match": { + "HASH": { + "$eq": "?string" + } + } + } + ] + } + }, + { + "$sort": { + "HASH": 1 + } + } + ] + }, + "otherNss": [ + { + "db": "HASH", + "coll": "HASH" + } + ], + "collectionType": "collection" + })", + shapified); +} +} // namespace mongo::query_stats diff --git a/src/mongo/db/query/query_stats_transform_algorithm.idl b/src/mongo/db/query/query_stats_transform_algorithm.idl new file mode 100644 index 0000000000000..cd0a5ba43dbec --- /dev/null +++ b/src/mongo/db/query/query_stats_transform_algorithm.idl @@ -0,0 +1,37 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# +global: + cpp_namespace: "mongo" + +enums: + TransformAlgorithm: + description: "The type of algorithm to be used for the transformIdentifiers field of $queryStats." + type: string + values: + kHmacSha256: "hmac-sha-256" + kNone: "none" diff --git a/src/mongo/db/query/query_stats_util.cpp b/src/mongo/db/query/query_stats_util.cpp new file mode 100644 index 0000000000000..b126e8b9a0ffe --- /dev/null +++ b/src/mongo/db/query/query_stats_util.cpp @@ -0,0 +1,101 @@ +/** + * Copyright (C) 2022-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/query/query_stats_util.h" + +#include +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/db/client.h" +#include "mongo/db/query/util/memory_util.h" +#include "mongo/db/service_context.h" + + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery + +namespace mongo::query_stats_util { + +namespace { +/** + * Given the current 'Client', returns a pointer to the 'ServiceContext' and an interface for + * updating the queryStats store. + */ +std::pair getUpdater(const Client& client) { + auto serviceCtx = client.getServiceContext(); + tassert(7106500, "ServiceContext must be non null", serviceCtx); + + auto updater = queryStatsStoreOnParamChangeUpdater(serviceCtx).get(); + tassert(7106501, "Telemetry store size updater must be non null", updater); + return {serviceCtx, updater}; +} +} // namespace + + +Status onQueryStatsStoreSizeUpdate(const std::string& str) { + auto newSize = memory_util::MemorySize::parse(str); + if (!newSize.isOK()) { + return newSize.getStatus(); + } + + // The client is nullptr if the parameter is supplied from the command line. In this case, we + // ignore the update event, the parameter will be processed when initializing the service + // context. + if (auto client = Client::getCurrent()) { + auto&& [serviceCtx, updater] = getUpdater(*client); + updater->updateCacheSize(serviceCtx, newSize.getValue()); + } + + return Status::OK(); +} + +Status validateQueryStatsStoreSize(const std::string& str, const boost::optional&) { + return memory_util::MemorySize::parse(str).getStatus(); +} + +Status onQueryStatsSamplingRateUpdate(int samplingRate) { + // The client is nullptr if the parameter is supplied from the command line. In this case, we + // ignore the update event, the parameter will be processed when initializing the service + // context. + if (auto client = Client::getCurrent()) { + auto&& [serviceCtx, updater] = getUpdater(*client); + updater->updateSamplingRate(serviceCtx, samplingRate < 0 ? INT_MAX : samplingRate); + } + + return Status::OK(); +} + +const Decorable::Decoration> + queryStatsStoreOnParamChangeUpdater = + ServiceContext::declareDecoration>(); +} // namespace mongo::query_stats_util diff --git a/src/mongo/db/query/query_stats_util.h b/src/mongo/db/query/query_stats_util.h new file mode 100644 index 0000000000000..e8766f0c54e1f --- /dev/null +++ b/src/mongo/db/query/query_stats_util.h @@ -0,0 +1,101 @@ +/** + * Copyright (C) 2022-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/query/partitioned_cache.h" +#include "mongo/db/query/util/memory_util.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" + + +namespace mongo::query_stats_util { + +Status onQueryStatsStoreSizeUpdate(const std::string& str); + + +Status validateQueryStatsStoreSize(const std::string& str, const boost::optional&); + +Status onQueryStatsSamplingRateUpdate(int samplingRate); + +/** + * An interface used to modify the queryStats store when query setParameters are modified. This is + * done via an interface decorating the 'ServiceContext' in order to avoid a link-time dependency + * of the query knobs library on the queryStats code. + */ +class OnParamChangeUpdater { +public: + virtual ~OnParamChangeUpdater() = default; + + /** + * Resizes the queryStats store decorating 'serviceCtx' to the new size given by 'memSize'. If + * the new size is smaller than the old, cache entries are evicted in order to ensure the + * cache fits within the new size bound. + */ + virtual void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) = 0; + + /** + * Updates the sampling rate for the queryStats rate limiter. + */ + virtual void updateSamplingRate(ServiceContext* serviceCtx, int samplingRate) = 0; +}; + +/** + * A stub implementation that does not allow changing any parameters - to be used if the queryStats + * store is disabled and cannot be re-enabled without restarting, as with a feature flag. + */ +class NoChangesAllowedTelemetryParamUpdater : public OnParamChangeUpdater { +public: + void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) final { + uasserted(7373500, + "Cannot configure queryStats store - it is currently disabled and a restart is " + "required to activate."); + } + + void updateSamplingRate(ServiceContext* serviceCtx, int samplingRate) { + uasserted(7506200, + "Cannot configure queryStats store - it is currently disabled and a restart is " + "required to activate."); + } +}; + +/** + * Decorated accessor to the 'OnParamChangeUpdater' stored in 'ServiceContext'. + */ +extern const Decorable::Decoration> + queryStatsStoreOnParamChangeUpdater; +} // namespace mongo::query_stats_util diff --git a/src/mongo/db/query/query_test_service_context.cpp b/src/mongo/db/query/query_test_service_context.cpp index e4ee1b40c491c..9372faba10666 100644 --- a/src/mongo/db/query/query_test_service_context.cpp +++ b/src/mongo/db/query/query_test_service_context.cpp @@ -27,22 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/query_test_service_context.h" - #include -#include "mongo/db/concurrency/locker_noop_client_observer.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/collation/collator_factory_mock.h" +#include "mongo/db/query/query_test_service_context.h" namespace mongo { QueryTestServiceContext::QueryTestServiceContext() : _service(ServiceContext::make()), _client(_service->makeClient("query_test")) { - _service->registerClientObserver( - std::make_unique()); CollatorFactoryInterface::set(getServiceContext(), std::make_unique()); } diff --git a/src/mongo/db/query/query_test_service_context.h b/src/mongo/db/query/query_test_service_context.h index e83901a3fdd07..4b7b2a5839770 100644 --- a/src/mongo/db/query/query_test_service_context.h +++ b/src/mongo/db/query/query_test_service_context.h @@ -32,6 +32,7 @@ #include "mongo/db/client.h" #include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" namespace mongo { diff --git a/src/mongo/db/query/query_utils.cpp b/src/mongo/db/query/query_utils.cpp index 573f1d53d050d..3848063cec626 100644 --- a/src/mongo/db/query/query_utils.cpp +++ b/src/mongo/db/query/query_utils.cpp @@ -30,7 +30,24 @@ #include "mongo/db/query/query_utils.h" +#include +#include + +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/sbe/match_path.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/projection.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -59,52 +76,36 @@ bool isIdHackEligibleQuery(const CollectionPtr& collection, const CanonicalQuery } bool isQuerySbeCompatible(const CollectionPtr* collection, const CanonicalQuery* cq) { - tassert(6071400, "Expected CanonicalQuery pointer to not be nullptr", cq); - invariant(cq); + tassert(6071400, + "Expected CanonicalQuery and Collection pointer to not be nullptr", + cq && collection); auto expCtx = cq->getExpCtxRaw(); - const auto& sortPattern = cq->getSortPattern(); - const bool allExpressionsSupported = - expCtx && expCtx->sbeCompatibility != SbeCompatibility::notCompatible; - const auto nss = cq->nss(); - const bool isNotOplog = !nss.isOplog(); - const bool isNotChangeCollection = !nss.isChangeCollection(); - const bool doesNotContainMetadataRequirements = cq->metadataDeps().none(); - const bool doesNotSortOnMetaOrPathWithNumericComponents = - !sortPattern || std::all_of(sortPattern->begin(), sortPattern->end(), [](auto&& part) { - return part.fieldPath && - !sbe::MatchPath(part.fieldPath->fullPath()).hasNumericPathComponents(); - }); - - // Queries against a time-series collection are not currently supported by SBE. - const bool isQueryNotAgainstTimeseriesCollection = !nss.isTimeseriesBucketsCollection(); - // Queries against a clustered collection are not currently supported by SBE. - tassert(6038600, "Expected CollectionPtr to not be nullptr", collection); - const bool isQueryNotAgainstClusteredCollection = - !(collection->get() && collection->get()->isClustered()); + // If we don't support all expressions used or the query is eligible for IDHack, don't use SBE. + if (!expCtx || expCtx->sbeCompatibility == SbeCompatibility::notCompatible || + (*collection && isIdHackEligibleQuery(*collection, *cq))) { + return false; + } const auto* proj = cq->getProj(); + if (proj && (proj->requiresMatchDetails() || proj->containsElemMatch())) { + return false; + } - const bool doesNotRequireMatchDetails = !proj || !proj->requiresMatchDetails(); - - const bool doesNotHaveElemMatchProject = !proj || !proj->containsElemMatch(); - - const bool isNotInnerSideOfLookup = !(expCtx && expCtx->inLookup); - - return allExpressionsSupported && doesNotContainMetadataRequirements && - isQueryNotAgainstTimeseriesCollection && isQueryNotAgainstClusteredCollection && - doesNotSortOnMetaOrPathWithNumericComponents && isNotOplog && doesNotRequireMatchDetails && - doesNotHaveElemMatchProject && isNotChangeCollection && isNotInnerSideOfLookup && - !(*collection && isIdHackEligibleQuery(*collection, *cq)); -} - -bool isQueryPlanSbeCompatible(const QuerySolution* root) { - tassert(7061701, "Expected QuerySolution pointer to not be nullptr", root); - - // TODO SERVER-52958: Add support in the SBE stage builders for the COUNT_SCAN stage. - const bool isNotCountScan = !root->hasNode(StageType::STAGE_COUNT_SCAN); + // Queries against the oplog, a change collection, or a time-series collection are not + // supported. Also queries on the inner side of a $lookup are not considered for SBE. + const auto& nss = cq->nss(); + if (expCtx->inLookup || nss.isOplog() || nss.isChangeCollection() || + nss.isTimeseriesBucketsCollection() || !cq->metadataDeps().none()) { + return false; + } - return isNotCountScan; + const auto& sortPattern = cq->getSortPattern(); + // If the sort has meta or numeric path components, we cannot use SBE. + return !sortPattern || std::all_of(sortPattern->begin(), sortPattern->end(), [](auto&& part) { + return part.fieldPath && + !sbe::MatchPath(part.fieldPath->fullPath()).hasNumericPathComponents(); + }); } } // namespace mongo diff --git a/src/mongo/db/query/query_utils.h b/src/mongo/db/query/query_utils.h index 55a5e069ad336..440966af9d8ea 100644 --- a/src/mongo/db/query/query_utils.h +++ b/src/mongo/db/query/query_utils.h @@ -31,6 +31,7 @@ #include "mongo/db/catalog/collection.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/sort_pattern.h" namespace mongo { /** @@ -56,14 +57,4 @@ bool isIdHackEligibleQuery(const CollectionPtr& collection, const CanonicalQuery * planning can be short-circuited as it is already known that the query is ineligible for SBE. */ bool isQuerySbeCompatible(const CollectionPtr* collection, const CanonicalQuery* cq); - -/** - * Checks if the given query can be executed with the SBE engine based on the query solution. - * - * This method determines whether the query may be compatible with SBE based on the query solution - * (such as ineligible plan stages). It should be used in conjunction with the higher level - * isQuerySbeCompatible() check to ensure that all aspects of the query are validated for - * compatibility. - */ -bool isQueryPlanSbeCompatible(const QuerySolution* root); } // namespace mongo diff --git a/src/mongo/db/query/rate_limiting.cpp b/src/mongo/db/query/rate_limiting.cpp index 3aee39facae4b..0fe77c1f39d74 100644 --- a/src/mongo/db/query/rate_limiting.cpp +++ b/src/mongo/db/query/rate_limiting.cpp @@ -29,6 +29,8 @@ #include "rate_limiting.h" +#include "mongo/util/system_clock_source.h" + namespace mongo { RateLimiting::RateLimiting(RequestCount samplingRate, Milliseconds timePeriod) : _clockSource(SystemClockSource::get()), diff --git a/src/mongo/db/query/rate_limiting.h b/src/mongo/db/query/rate_limiting.h index 67b8a7fc106da..1598ba330c873 100644 --- a/src/mongo/db/query/rate_limiting.h +++ b/src/mongo/db/query/rate_limiting.h @@ -29,8 +29,14 @@ #pragma once +#include +#include + +#include "mongo/platform/atomic_word.h" #include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" #include "mongo/util/system_clock_source.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/query/rate_limiting_test.cpp b/src/mongo/db/query/rate_limiting_test.cpp index 2d9ef35647a5c..e1f5c459cce23 100644 --- a/src/mongo/db/query/rate_limiting_test.cpp +++ b/src/mongo/db/query/rate_limiting_test.cpp @@ -28,7 +28,12 @@ */ #include "mongo/db/query/rate_limiting.h" -#include "mongo/unittest/unittest.h" + +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/query/sbe_and_hash_test.cpp b/src/mongo/db/query/sbe_and_hash_test.cpp index 03533faf2923a..afc6733a1b6b6 100644 --- a/src/mongo/db/query/sbe_and_hash_test.cpp +++ b/src/mongo/db/query/sbe_and_hash_test.cpp @@ -27,13 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/exec/shard_filterer_mock.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" #include "mongo/db/query/sbe_stage_builder_test_fixture.h" -#include "mongo/db/query/shard_filterer_factory_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/shard_filterer_factory_interface.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { @@ -68,7 +78,7 @@ class SbeAndHashTest : public SbeStageBuilderTestFixture { buildPlanStage(std::move(querySolution), false, nullptr); // Prepare the sbe::PlanStage for execution and collect all results. - auto resultAccessors = prepareTree(&data.ctx, stage.get(), resultSlots); + auto resultAccessors = prepareTree(&data.env.ctx, stage.get(), resultSlots); auto [resultsTag, resultsVal] = getAllResults(stage.get(), resultAccessors[0]); sbe::value::ValueGuard resultGuard{resultsTag, resultsVal}; diff --git a/src/mongo/db/query/sbe_and_sorted_test.cpp b/src/mongo/db/query/sbe_and_sorted_test.cpp index dfdc2c13459ca..e4484fb89c4eb 100644 --- a/src/mongo/db/query/sbe_and_sorted_test.cpp +++ b/src/mongo/db/query/sbe_and_sorted_test.cpp @@ -27,11 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" #include "mongo/db/query/sbe_stage_builder_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/shard_filterer_factory_interface.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { @@ -66,7 +78,7 @@ class SbeAndSortedTest : public SbeStageBuilderTestFixture { buildPlanStage(std::move(querySolution), false, nullptr); // Prepare the sbe::PlanStage for execution and collect all results. - auto resultAccessors = prepareTree(&data.ctx, stage.get(), resultSlots); + auto resultAccessors = prepareTree(&data.env.ctx, stage.get(), resultSlots); auto [resultsTag, resultsVal] = getAllResults(stage.get(), resultAccessors[0]); sbe::value::ValueGuard resultGuard{resultsTag, resultsVal}; diff --git a/src/mongo/db/query/sbe_cached_solution_planner.cpp b/src/mongo/db/query/sbe_cached_solution_planner.cpp index 9f551b809e9f6..18c16efbbbec5 100644 --- a/src/mongo/db/query/sbe_cached_solution_planner.cpp +++ b/src/mongo/db/query/sbe_cached_solution_planner.cpp @@ -27,22 +27,44 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/sbe_cached_solution_planner.h" +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/exec/plan_cache_util.h" #include "mongo/db/exec/sbe/stages/plan_stats.h" -#include "mongo/db/query/collection_query_info.h" -#include "mongo/db/query/explain.h" +#include "mongo/db/exec/trial_period_utils.h" +#include "mongo/db/exec/trial_run_tracker.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/get_executor.h" +#include "mongo/db/query/optimizer/explain_interface.h" +#include "mongo/db/query/plan_cache.h" #include "mongo/db/query/plan_cache_key_factory.h" +#include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/plan_explainer_factory.h" +#include "mongo/db/query/plan_ranker.h" #include "mongo/db/query/planner_analysis.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner.h" +#include "mongo/db/query/sbe_cached_solution_planner.h" #include "mongo/db/query/sbe_multi_planner.h" +#include "mongo/db/query/sbe_plan_cache.h" #include "mongo/db/query/stage_builder_util.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/query/util/make_data_structure.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_proxy.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -58,7 +80,8 @@ CandidatePlans CachedSolutionPlanner::plan( auto secondaryCollectionsInfo = fillOutSecondaryCollectionsInformation(_opCtx, _collections, &_cq); - for (const auto& foreignCollection : roots[0].second.foreignHashJoinCollections) { + for (const auto& foreignCollection : + roots[0].second.staticData->foreignHashJoinCollections) { const auto collectionInfo = secondaryCollectionsInfo.find(foreignCollection); tassert(6693500, "Foreign collection must be present in the collections info", @@ -67,7 +90,8 @@ CandidatePlans CachedSolutionPlanner::plan( if (!QueryPlannerAnalysis::isEligibleForHashJoin(collectionInfo->second)) { return replan(/* shouldCache */ true, - str::stream() << "Foreign collection " << foreignCollection + str::stream() << "Foreign collection " + << foreignCollection.toStringForErrorMsg() << " is not eligible for hash join anymore"); } } @@ -98,17 +122,15 @@ CandidatePlans CachedSolutionPlanner::plan( std::move(roots[0].second), maxReadsBeforeReplan); - auto explainer = plan_explainer_factory::make( - candidate.root.get(), - &candidate.data.stageData, - candidate.solution.get(), - {}, /* optimizedData */ - {}, /* rejectedCandidates */ - false, /* isMultiPlan */ - true, /* isFromPlanCache */ - candidate.data.stageData.debugInfo ? std::make_unique( - *candidate.data.stageData.debugInfo) - : nullptr); + tassert(6488200, "'debugInfo' should be initialized", candidate.data.stageData.debugInfo); + auto explainer = plan_explainer_factory::make(candidate.root.get(), + &candidate.data.stageData, + candidate.solution.get(), + {}, /* optimizedData */ + {}, /* rejectedCandidates */ + false, /* isMultiPlan */ + true, /* isFromPlanCache */ + candidate.data.stageData.debugInfo); if (!candidate.status.isOK()) { // On failure, fall back to replanning the whole query. We neither evict the existing cache diff --git a/src/mongo/db/query/sbe_cached_solution_planner.h b/src/mongo/db/query/sbe_cached_solution_planner.h index 5070c4f25c8f9..72d97c8edbbc2 100644 --- a/src/mongo/db/query/sbe_cached_solution_planner.h +++ b/src/mongo/db/query/sbe_cached_solution_planner.h @@ -29,10 +29,24 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/all_indices_required_checker.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/multiple_collection_accessor.h" +#include "mongo/db/query/plan_yield_policy_sbe.h" #include "mongo/db/query/query_planner_params.h" +#include "mongo/db/query/query_solution.h" #include "mongo/db/query/sbe_plan_ranker.h" #include "mongo/db/query/sbe_runtime_planner.h" +#include "mongo/db/query/sbe_stage_builder.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/query/sbe_expression_bm.cpp b/src/mongo/db/query/sbe_expression_bm.cpp index c7df43db689d3..6d0e0e0e24867 100644 --- a/src/mongo/db/query/sbe_expression_bm.cpp +++ b/src/mongo/db/query/sbe_expression_bm.cpp @@ -27,17 +27,46 @@ * it in the license file. */ +#include #include - +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/sbe/expressions/compile_ctx.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" #include "mongo/db/exec/sbe/stages/bson_scan.h" +#include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_bm_fixture.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/datetime/date_time_support.h" #include "mongo/db/query/query_test_service_context.h" #include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/query/sbe_stage_builder_eval_frame.h" #include "mongo/db/query/sbe_stage_builder_expression.h" - +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -48,21 +77,20 @@ std::string debugPrint(const T* sbeElement) { return sbeElement ? sbe::DebugPrinter{}.print(sbeElement->debugPrint()) : nullptr; } -const NamespaceString kNss{"test.bm"}; +const NamespaceString kNss = NamespaceString::createNamespaceString_forTest("test.bm"); class SbeExpressionBenchmarkFixture : public ExpressionBenchmarkFixture { public: - SbeExpressionBenchmarkFixture() : _planStageData(std::make_unique()) { - _inputSlotId = _planStageData.env->registerSlot( + SbeExpressionBenchmarkFixture() : _env(std::make_unique()) { + _inputSlotId = _env->registerSlot( "input"_sd, sbe::value::TypeTags::Nothing, 0, false, &_slotIdGenerator); _timeZoneDB = std::make_unique(); - _planStageData.env->registerSlot( - "timeZoneDB"_sd, - sbe::value::TypeTags::timeZoneDB, - sbe::value::bitcastFrom(_timeZoneDB.get()), - false, - &_slotIdGenerator); - _inputSlotAccessor = _planStageData.env->getAccessor(_inputSlotId); + _env->registerSlot("timeZoneDB"_sd, + sbe::value::TypeTags::timeZoneDB, + sbe::value::bitcastFrom(_timeZoneDB.get()), + false, + &_slotIdGenerator); + _inputSlotAccessor = _env->getAccessor(_inputSlotId); } void benchmarkExpression(BSONObj expressionSpec, @@ -97,7 +125,8 @@ class SbeExpressionBenchmarkFixture : public ExpressionBenchmarkFixture { stage_builder::StageBuilderState state{ opCtx.get(), - &_planStageData, + _env, + _planStageData.get(), _variables, &_slotIdGenerator, &_frameIdGenerator, @@ -113,17 +142,17 @@ class SbeExpressionBenchmarkFixture : public ExpressionBenchmarkFixture { "sbe expression benchmark PlanStage", "stage"_attr = debugPrint(stage.get())); - auto expr = evalExpr.extractExpr(state.slotVarMap, *_planStageData.env); + auto expr = evalExpr.extractExpr(state.slotVarMap, *_env); LOGV2_DEBUG(6979802, 1, "sbe expression benchmark EExpression", "expression"_attr = debugPrint(expr.get())); stage->attachToOperationContext(opCtx.get()); - stage->prepare(_planStageData.ctx); + stage->prepare(_env.ctx); - _planStageData.ctx.root = stage.get(); - sbe::vm::CodeFragment code = expr->compileDirect(_planStageData.ctx); + _env.ctx.root = stage.get(); + sbe::vm::CodeFragment code = expr->compileDirect(_env.ctx); sbe::vm::ByteCode vm; stage->open(/*reopen =*/false); for (auto keepRunning : benchmarkState) { @@ -154,7 +183,8 @@ class SbeExpressionBenchmarkFixture : public ExpressionBenchmarkFixture { } } - stage_builder::PlanStageData _planStageData; + stage_builder::PlanStageEnvironment _env; + std::unique_ptr _planStageData; Variables _variables; sbe::value::SlotIdGenerator _slotIdGenerator; sbe::value::FrameIdGenerator _frameIdGenerator; diff --git a/src/mongo/db/query/sbe_multi_planner.cpp b/src/mongo/db/query/sbe_multi_planner.cpp index 714fd4ae0d4b4..904fd8eb5d7e7 100644 --- a/src/mongo/db/query/sbe_multi_planner.cpp +++ b/src/mongo/db/query/sbe_multi_planner.cpp @@ -27,19 +27,43 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/sbe_multi_planner.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/db/commands/server_status_metric.h" #include "mongo/db/exec/histogram_server_status_metric.h" -#include "mongo/db/exec/sbe/expressions/expression.h" -#include "mongo/db/exec/sbe/values/bson.h" -#include "mongo/db/query/collection_query_info.h" -#include "mongo/db/query/explain.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" +#include "mongo/db/exec/trial_period_utils.h" +#include "mongo/db/exec/trial_run_tracker.h" +#include "mongo/db/query/plan_cache_debug_info.h" +#include "mongo/db/query/plan_explainer.h" +#include "mongo/db/query/plan_explainer_factory.h" +#include "mongo/db/query/plan_ranker.h" #include "mongo/db/query/plan_ranker_util.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner.h" +#include "mongo/db/query/sbe_multi_planner.h" #include "mongo/db/query/stage_builder_util.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_proxy.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" +#include "mongo/util/tick_source.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -292,15 +316,20 @@ CandidatePlans MultiPlanner::finalizeExecutionPlans( // explain operation, save the stats from the old tree before we discard it. if (_cq.getExplain()) { winner.data.stageData.savedStatsOnEarlyExit = - winner.root->getStats(true /* includeDebugInfo */); + winner.root->getStats(true /* includeDebugInfo */); } winner.root = winner.clonedPlan->first->clone(); - stage_builder::prepareSlotBasedExecutableTree( - _opCtx, winner.root.get(), &winner.data.stageData, _cq, _collections, _yieldPolicy); + stage_builder::prepareSlotBasedExecutableTree(_opCtx, + winner.root.get(), + &winner.data.stageData, + _cq, + _collections, + _yieldPolicy, + false /* preparingFromCache */); // Clear the results queue. winner.results = {}; - winner.root->open(false); + winner.root->open(false /* reOpen*/); } // Extend the winning candidate with the agg pipeline and rebuild the execution tree. Because @@ -322,8 +351,13 @@ CandidatePlans MultiPlanner::finalizeExecutionPlans( // cache prior to preparation, whereas the original copy of the tree will be prepared and // used to execute this query. auto clonedPlan = std::make_pair(rootStage->clone(), plan_ranker::CandidatePlanData{data}); - stage_builder::prepareSlotBasedExecutableTree( - _opCtx, rootStage.get(), &data, _cq, _collections, _yieldPolicy); + stage_builder::prepareSlotBasedExecutableTree(_opCtx, + rootStage.get(), + &data, + _cq, + _collections, + _yieldPolicy, + false /* preparingFromCache */); candidates[winnerIdx] = sbe::plan_ranker::CandidatePlan{std::move(solution), std::move(rootStage), @@ -347,7 +381,7 @@ CandidatePlans MultiPlanner::finalizeExecutionPlans( } // Writes a cache entry for the winning plan to the plan cache if possible. - plan_cache_util::updatePlanCache( + plan_cache_util::updatePlanCacheFromCandidates( _opCtx, _collections, _cachingMode, _cq, std::move(decision), candidates); return {std::move(candidates), winnerIdx}; diff --git a/src/mongo/db/query/sbe_multi_planner.h b/src/mongo/db/query/sbe_multi_planner.h index 61685a2f09e9e..ddfa49fb6f76c 100644 --- a/src/mongo/db/query/sbe_multi_planner.h +++ b/src/mongo/db/query/sbe_multi_planner.h @@ -29,10 +29,26 @@ #pragma once +#include +#include +#include +#include +#include + #include "mongo/db/exec/plan_cache_util.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_ranker.h" +#include "mongo/db/query/plan_ranking_decision.h" +#include "mongo/db/query/plan_yield_policy_sbe.h" +#include "mongo/db/query/query_planner_params.h" +#include "mongo/db/query/query_solution.h" +#include "mongo/db/query/sbe_plan_ranker.h" #include "mongo/db/query/sbe_runtime_planner.h" +#include "mongo/db/query/sbe_stage_builder.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/query/sbe_plan_cache.cpp b/src/mongo/db/query/sbe_plan_cache.cpp index 2129554a2d914..a22920177eabd 100644 --- a/src/mongo/db/query/sbe_plan_cache.cpp +++ b/src/mongo/db/query/sbe_plan_cache.cpp @@ -30,10 +30,20 @@ #include "mongo/db/query/sbe_plan_cache.h" +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/sbe_plan_cache_on_parameter_change.h" #include "mongo/db/query/util/memory_util.h" -#include "mongo/db/server_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/decorable.h" #include "mongo/util/processinfo.h" +#include "mongo/util/synchronized_value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/query/sbe_plan_cache.h b/src/mongo/db/query/sbe_plan_cache.h index 1fe468cd09375..1c7fac40a8c12 100644 --- a/src/mongo/db/query/sbe_plan_cache.h +++ b/src/mongo/db/query/sbe_plan_cache.h @@ -29,15 +29,33 @@ #pragma once +#include #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/util/partitioned.h" #include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/hasher.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/plan_cache.h" +#include "mongo/db/query/plan_cache_debug_info.h" #include "mongo/db/query/plan_cache_key_info.h" #include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/container_size_helper.h" +#include "mongo/util/uuid.h" namespace mongo { namespace sbe { @@ -59,16 +77,12 @@ struct PlanCacheKeyShardingEpoch { struct PlanCacheKeyCollectionState { bool operator==(const PlanCacheKeyCollectionState& other) const { return other.uuid == uuid && other.version == version && - other.newestVisibleIndexTimestamp == newestVisibleIndexTimestamp && other.collectionGeneration == collectionGeneration; } size_t hashCode() const { size_t hash = UUID::Hash{}(uuid); boost::hash_combine(hash, version); - if (newestVisibleIndexTimestamp) { - boost::hash_combine(hash, newestVisibleIndexTimestamp->asULL()); - } if (collectionGeneration) { collectionGeneration->epoch.hash_combine(hash); boost::hash_combine(hash, collectionGeneration->ts.asULL()); @@ -88,20 +102,6 @@ struct PlanCacheKeyCollectionState { // all readers seeing this version of the collection have drained. size_t version; - // The '_collectionVersion' is not currently sufficient in order to ensure that the indexes - // visible to the reader are consistent with the indexes present in the cache entry. The reason - // is that all readers see the latest copy-on-write version of the 'Collection' object, even - // though they are allowed to read at an older timestamp, potentially at a time before an index - // build completed. - // - // To solve this problem, we incorporate the timestamp of the newest index visible to the reader - // into the plan cache key. This ensures that the set of indexes visible to the reader match - // those present in the plan cache entry, preventing a situation where the plan cache entry - // reflects a newer version of the index catalog than the one visible to the reader. - // - // In the future, this could instead be solved with point-in-time catalog lookups. - boost::optional newestVisibleIndexTimestamp; - // Ensures that a cached SBE plan cannot be reused if the collection has since become sharded or // changed its shard key. The cached plan may no longer be valid after sharding or shard key // refining since the structure of the plan depends on whether the collection is sharded, and if @@ -165,6 +165,14 @@ class PlanCacheKey { return _info.toString(); } + /** + * Returns the estimated size of the plan cache key in bytes. + */ + uint64_t estimatedKeySizeBytes() const { + return sizeof(*this) + _info.keySizeInBytes() + + container_size_helper::estimateObjectSizeInBytes(_secondaryCollectionStates); + } + private: // Contains the actual encoding of the query shape as well as the index discriminators. const PlanCacheKeyInfo _info; @@ -198,7 +206,7 @@ struct PlanCachePartitioner { struct CachedSbePlan { CachedSbePlan(std::unique_ptr root, stage_builder::PlanStageData data) : root(std::move(root)), planStageData(std::move(data)) { - tassert(5968206, "The RuntimeEnvironment should not be null", planStageData.env); + tassert(5968206, "The RuntimeEnvironment should not be null", planStageData.env.runtimeEnv); } std::unique_ptr clone() const { @@ -223,9 +231,7 @@ struct BudgetEstimator { */ size_t operator()(const sbe::PlanCacheKey& key, const std::shared_ptr& entry) { - // TODO: SERVER-73649 include size of underlying query shape and size of int_32 key hash in - // total size estimation. - return entry->estimatedEntrySizeBytes; + return entry->estimatedEntrySizeBytes + key.estimatedKeySizeBytes(); } }; diff --git a/src/mongo/db/query/sbe_plan_cache_on_parameter_change.cpp b/src/mongo/db/query/sbe_plan_cache_on_parameter_change.cpp index 5854cea3c5777..464b7a1b6e1d3 100644 --- a/src/mongo/db/query/sbe_plan_cache_on_parameter_change.cpp +++ b/src/mongo/db/query/sbe_plan_cache_on_parameter_change.cpp @@ -29,7 +29,14 @@ #include "mongo/db/query/sbe_plan_cache_on_parameter_change.h" +#include +#include + +#include + +#include "mongo/base/status_with.h" #include "mongo/db/client.h" +#include "mongo/util/assert_util.h" namespace mongo::plan_cache_util { namespace { diff --git a/src/mongo/db/query/sbe_plan_cache_on_parameter_change.h b/src/mongo/db/query/sbe_plan_cache_on_parameter_change.h index 415b44d82c33a..d3a8f6c73827e 100644 --- a/src/mongo/db/query/sbe_plan_cache_on_parameter_change.h +++ b/src/mongo/db/query/sbe_plan_cache_on_parameter_change.h @@ -29,11 +29,16 @@ #pragma once +#include #include +#include + #include "mongo/base/status.h" #include "mongo/db/query/util/memory_util.h" #include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/decorable.h" namespace mongo::plan_cache_util { diff --git a/src/mongo/db/query/sbe_plan_ranker.cpp b/src/mongo/db/query/sbe_plan_ranker.cpp index 046f45c0c10cb..53cb7fe999e28 100644 --- a/src/mongo/db/query/sbe_plan_ranker.cpp +++ b/src/mongo/db/query/sbe_plan_ranker.cpp @@ -27,9 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/query/sbe_plan_ranker.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/util/assert_util_core.h" namespace mongo::sbe::plan_ranker { namespace { diff --git a/src/mongo/db/query/sbe_plan_ranker.h b/src/mongo/db/query/sbe_plan_ranker.h index 0eaf19fb6edde..a2b80792c113f 100644 --- a/src/mongo/db/query/sbe_plan_ranker.h +++ b/src/mongo/db/query/sbe_plan_ranker.h @@ -29,8 +29,21 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/trial_run_tracker.h" #include "mongo/db/query/plan_ranker.h" +#include "mongo/db/query/query_solution.h" #include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/record_id.h" namespace mongo::sbe::plan_ranker { diff --git a/src/mongo/db/query/sbe_runtime_planner.cpp b/src/mongo/db/query/sbe_runtime_planner.cpp index ed4527fbaebce..a9e6f4a65ccdb 100644 --- a/src/mongo/db/query/sbe_runtime_planner.cpp +++ b/src/mongo/db/query/sbe_runtime_planner.cpp @@ -29,12 +29,19 @@ #include "mongo/db/query/sbe_runtime_planner.h" -#include "mongo/db/catalog/collection.h" -#include "mongo/db/exec/histogram_server_status_metric.h" -#include "mongo/db/exec/sbe/expressions/expression.h" -#include "mongo/db/exec/trial_period_utils.h" -#include "mongo/db/exec/trial_run_tracker.h" +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/query/plan_executor_sbe.h" +#include "mongo/db/record_id.h" +#include "mongo/util/assert_util.h" namespace mongo::sbe { bool BaseRuntimePlanner::fetchNextDocument(plan_ranker::CandidatePlan* candidate, @@ -86,15 +93,17 @@ std::pair BaseRuntimePlanner::prepar stage_builder::prepareSlotBasedExecutableTree( _opCtx, root, data, _cq, _collections, _yieldPolicy, preparingFromCache); + const stage_builder::PlanStageSlots& outputs = data->staticData->outputs; + value::SlotAccessor* resultSlot{nullptr}; - if (auto slot = data->outputs.getIfExists(stage_builder::PlanStageSlots::kResult); slot) { - resultSlot = root->getAccessor(data->ctx, *slot); + if (auto slot = outputs.getIfExists(stage_builder::PlanStageSlots::kResult)) { + resultSlot = root->getAccessor(data->env.ctx, *slot); tassert(4822871, "Query does not have a result slot.", resultSlot); } value::SlotAccessor* recordIdSlot{nullptr}; - if (auto slot = data->outputs.getIfExists(stage_builder::PlanStageSlots::kRecordId); slot) { - recordIdSlot = root->getAccessor(data->ctx, *slot); + if (auto slot = outputs.getIfExists(stage_builder::PlanStageSlots::kRecordId)) { + recordIdSlot = root->getAccessor(data->env.ctx, *slot); tassert(4822872, "Query does not have a recordId slot.", recordIdSlot); } diff --git a/src/mongo/db/query/sbe_runtime_planner.h b/src/mongo/db/query/sbe_runtime_planner.h index 44e781335c6b3..85c5d6ae27c0a 100644 --- a/src/mongo/db/query/sbe_runtime_planner.h +++ b/src/mongo/db/query/sbe_runtime_planner.h @@ -29,8 +29,16 @@ #pragma once +#include +#include +#include +#include +#include + #include "mongo/db/catalog/collection.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/all_indices_required_checker.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/multiple_collection_accessor.h" @@ -38,6 +46,8 @@ #include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_solution.h" #include "mongo/db/query/sbe_plan_ranker.h" +#include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/util/assert_util_core.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/query/sbe_shard_filter_test.cpp b/src/mongo/db/query/sbe_shard_filter_test.cpp index a2e55b6bc01b3..39089f6eb4cd8 100644 --- a/src/mongo/db/query/sbe_shard_filter_test.cpp +++ b/src/mongo/db/query/sbe_shard_filter_test.cpp @@ -27,15 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/shard_filterer_mock.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/projection_parser.h" +#include "mongo/db/query/projection_policies.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" #include "mongo/db/query/sbe_stage_builder_test_fixture.h" +#include "mongo/db/query/shard_filterer_factory_interface.h" #include "mongo/db/query/shard_filterer_factory_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -111,7 +129,7 @@ class SbeShardFilterTest : public SbeStageBuilderTestFixture { buildPlanStage(std::move(querySolution), false, std::move(shardFiltererFactory)); // Prepare the sbe::PlanStage for execution and collect all results. - auto resultAccessors = prepareTree(&data.ctx, stage.get(), resultSlots); + auto resultAccessors = prepareTree(&data.env.ctx, stage.get(), resultSlots); auto [resultsTag, resultsVal] = getAllResults(stage.get(), resultAccessors[0]); sbe::value::ValueGuard resultGuard{resultsTag, resultsVal}; diff --git a/src/mongo/db/query/sbe_stage_builder.cpp b/src/mongo/db/query/sbe_stage_builder.cpp index 1a6231719f136..b683a9fd94db5 100644 --- a/src/mongo/db/query/sbe_stage_builder.cpp +++ b/src/mongo/db/query/sbe_stage_builder.cpp @@ -29,15 +29,47 @@ #include "mongo/db/query/sbe_stage_builder.h" -#include - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog/clustered_collection_util.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/docval_to_sbeval.h" +#include "mongo/db/exec/sbe/abt/abt_lower_defs.h" #include "mongo/db/exec/sbe/makeobj_spec.h" #include "mongo/db/exec/sbe/match_path.h" #include "mongo/db/exec/sbe/stages/co_scan.h" #include "mongo/db/exec/sbe/stages/column_scan.h" #include "mongo/db/exec/sbe/stages/filter.h" +#include "mongo/db/exec/sbe/stages/hash_agg.h" #include "mongo/db/exec/sbe/stages/hash_join.h" #include "mongo/db/exec/sbe/stages/limit_skip.h" #include "mongo/db/exec/sbe/stages/makeobj.h" @@ -47,31 +79,62 @@ #include "mongo/db/exec/sbe/stages/sorted_merge.h" #include "mongo/db/exec/sbe/stages/union.h" #include "mongo/db/exec/sbe/stages/unique.h" +#include "mongo/db/exec/sbe/values/bson.h" #include "mongo/db/exec/sbe/values/sort_spec.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/shard_filterer.h" -#include "mongo/db/fts/fts_index_format.h" +#include "mongo/db/exec/shard_filterer_impl.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/fts/fts_matcher.h" +#include "mongo/db/fts/fts_query.h" #include "mongo/db/fts/fts_query_impl.h" #include "mongo/db/index/fts_access_method.h" +#include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/expression_type.h" #include "mongo/db/matcher/match_expression_dependencies.h" +#include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/pipeline/abt/field_map_builder.h" +#include "mongo/db/pipeline/accumulation_statement.h" +#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/accumulator_multi.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_visitor.h" +#include "mongo/db/pipeline/expression_walker.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/bind_input_params.h" +#include "mongo/db/query/datetime/date_time_support.h" #include "mongo/db/query/expression_walker.h" -#include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/projection.h" #include "mongo/db/query/query_utils.h" #include "mongo/db/query/sbe_stage_builder_abt_helpers.h" #include "mongo/db/query/sbe_stage_builder_accumulator.h" #include "mongo/db/query/sbe_stage_builder_coll_scan.h" +#include "mongo/db/query/sbe_stage_builder_eval_frame.h" #include "mongo/db/query/sbe_stage_builder_expression.h" #include "mongo/db/query/sbe_stage_builder_filter.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" #include "mongo/db/query/sbe_stage_builder_index_scan.h" #include "mongo/db/query/sbe_stage_builder_projection.h" #include "mongo/db/query/shard_filterer_factory_impl.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/query/util/make_data_structure.h" -#include "mongo/db/storage/execution_context.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/id_generator.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -102,8 +165,11 @@ std::pair, PlanStageSlots> generateEofPlan( return {std::move(stage), std::move(outputs)}; } -} // namespace +/** + * Creates a new compilation environment and registers global values within the + * new environment. + */ std::unique_ptr makeRuntimeEnvironment( const CanonicalQuery& cq, OperationContext* opCtx, @@ -117,14 +183,6 @@ std::unique_ptr makeRuntimeEnvironment( false, slotIdGenerator); - if (auto collator = cq.getCollator(); collator) { - env->registerSlot("collator"_sd, - sbe::value::TypeTags::collator, - sbe::value::bitcastFrom(collator), - false, - slotIdGenerator); - } - for (auto&& [id, name] : Variables::kIdToBuiltinVarName) { if (id != Variables::kRootId && id != Variables::kRemoveId && cq.getExpCtx()->variables.hasValue(id)) { @@ -141,6 +199,7 @@ std::unique_ptr makeRuntimeEnvironment( return env; } +} // namespace sbe::value::SlotVector getSlotsToForward(const PlanStageReqs& reqs, const PlanStageSlots& outputs, @@ -168,6 +227,15 @@ sbe::value::SlotVector getSlotsToForward(const PlanStageReqs& reqs, return outputSlots; } +/** + * Performs necessary initialization steps to execute an SBE tree 'root', including binding params + * from the current query 'cq' into the plan if it was cloned from the SBE plan cache. + * root - root node of the execution tree + * data - slot metadata (not actual parameter data!) that goes with the execution tree + * preparingFromCache - if true, 'root' and 'data' may have come from the SBE plan cache (though + * sometimes the caller says true even for non-cached plans). This means current parameters from + * 'cq' need to be substituted into the execution plan. + */ void prepareSlotBasedExecutableTree(OperationContext* opCtx, sbe::PlanStage* root, PlanStageData* data, @@ -192,19 +260,27 @@ void prepareSlotBasedExecutableTree(OperationContext* opCtx, // Register this plan to yield according to the configured policy. yieldPolicy->registerPlan(root); - root->prepare(data->ctx); + auto& env = data->env; + + root->prepare(env.ctx); - auto env = data->env; // Populate/renew "shardFilterer" if there exists a "shardFilterer" slot. The slot value should // be set to Nothing in the plan cache to avoid extending the lifetime of the ownership filter. if (auto shardFiltererSlot = env->getSlotIfExists("shardFilterer"_sd)) { - const auto& collection = collections.getMainCollection(); - tassert(6108307, - "Setting shard filterer slot on un-sharded collection", - collection.isSharded()); + auto shardFilterer = [&]() -> std::unique_ptr { + if (collections.isAcquisition()) { + return std::make_unique( + *collections.getMainAcquisition().getShardingFilter()); + } else { + const auto& collection = collections.getMainCollection(); + tassert(6108307, + "Setting shard filterer slot on un-sharded collection", + collection.isSharded()); - ShardFiltererFactoryImpl shardFiltererFactory(collection); - auto shardFilterer = shardFiltererFactory.makeShardFilterer(opCtx); + ShardFiltererFactoryImpl shardFiltererFactory(collection); + return shardFiltererFactory.makeShardFilterer(opCtx); + } + }(); env->resetSlot(*shardFiltererSlot, sbe::value::TypeTags::shardFilterer, sbe::value::bitcastFrom(shardFilterer.release()), @@ -215,7 +291,8 @@ void prepareSlotBasedExecutableTree(OperationContext* opCtx, auto ids = expCtx->variablesParseState.getDefinedVariableIDs(); for (auto id : ids) { // Variables defined in "ExpressionContext" may not always be translated into SBE slots. - if (auto it = data->variableIdToSlotMap.find(id); it != data->variableIdToSlotMap.end()) { + if (auto it = data->staticData->variableIdToSlotMap.find(id); + it != data->staticData->variableIdToSlotMap.end()) { auto slotId = it->second; auto [tag, val] = sbe::value::makeValue(expCtx->variables.getValue(id)); env->resetSlot(slotId, tag, val, true); @@ -233,13 +310,18 @@ void prepareSlotBasedExecutableTree(OperationContext* opCtx, } } - input_params::bind(cq, data->inputParamToSlotMap, env, preparingFromCache); + input_params::bind(cq, *data, preparingFromCache); interval_evaluation_tree::IndexBoundsEvaluationCache indexBoundsEvaluationCache; - for (auto&& indexBoundsInfo : data->indexBoundsEvaluationInfos) { - input_params::bindIndexBounds(cq, indexBoundsInfo, env, &indexBoundsEvaluationCache); + for (auto&& indexBoundsInfo : data->staticData->indexBoundsEvaluationInfos) { + input_params::bindIndexBounds( + cq, indexBoundsInfo, env.runtimeEnv, &indexBoundsEvaluationCache); } -} + + if (preparingFromCache && data->staticData->doSbeClusteredCollectionScan) { + input_params::bindClusteredCollectionBounds(cq, root, data, env.runtimeEnv); + } +} // prepareSlotBasedExecutableTree PlanStageSlots::PlanStageSlots(const PlanStageReqs& reqs, sbe::value::SlotIdGenerator* slotIdGenerator) { @@ -251,10 +333,10 @@ PlanStageSlots::PlanStageSlots(const PlanStageReqs& reqs, std::string PlanStageData::debugString() const { StringBuilder builder; - if (auto slot = outputs.getIfExists(PlanStageSlots::kResult); slot) { + if (auto slot = staticData->outputs.getIfExists(PlanStageSlots::kResult)) { builder << "$$RESULT=s" << *slot << " "; } - if (auto slot = outputs.getIfExists(PlanStageSlots::kRecordId); slot) { + if (auto slot = staticData->outputs.getIfExists(PlanStageSlots::kRecordId)) { builder << "$$RID=s" << *slot << " "; } @@ -316,19 +398,19 @@ std::unique_ptr makeFtsMatcher(OperationContext* opCtx, auto desc = collection->getIndexCatalog()->findIndexByName(opCtx, indexName); tassert(5432209, str::stream() << "index descriptor not found for index named '" << indexName - << "' in collection '" << collection->ns() << "'", + << "' in collection '" << collection->ns().toStringForErrorMsg() << "'", desc); auto entry = collection->getIndexCatalog()->getEntry(desc); tassert(5432210, str::stream() << "index entry not found for index named '" << indexName - << "' in collection '" << collection->ns() << "'", + << "' in collection '" << collection->ns().toStringForErrorMsg() << "'", entry); auto accessMethod = static_cast(entry->accessMethod()); tassert(5432211, str::stream() << "access method is not defined for index named '" << indexName - << "' in collection '" << collection->ns() << "'", + << "' in collection '" << collection->ns().toStringForErrorMsg() << "'", accessMethod); // We assume here that node->ftsQuery is an FTSQueryImpl, not an FTSQueryNoop. In practice, this @@ -338,6 +420,21 @@ std::unique_ptr makeFtsMatcher(OperationContext* opCtx, tassert(5432220, "expected FTSQueryImpl", query); return std::make_unique(*query, accessMethod->getSpec()); } + +void initCollator(const CanonicalQuery& cq, + PlanStageEnvironment& env, + PlanStageStaticData* data, + sbe::value::SlotIdGenerator* slotIdGenerator) { + data->queryCollator = cq.getCollatorShared(); + + if (auto coll = data->queryCollator.get()) { + env->registerSlot("collator"_sd, + sbe::value::TypeTags::collator, + sbe::value::bitcastFrom(coll), + false, + slotIdGenerator); + } +} } // namespace SlotBasedStageBuilder::SlotBasedStageBuilder(OperationContext* opCtx, @@ -345,39 +442,64 @@ SlotBasedStageBuilder::SlotBasedStageBuilder(OperationContext* opCtx, const CanonicalQuery& cq, const QuerySolution& solution, PlanYieldPolicySBE* yieldPolicy) - : StageBuilder(opCtx, cq, solution), + : BaseType(opCtx, cq, solution), _collections(collections), _mainNss(cq.nss()), _yieldPolicy(yieldPolicy), - _data(makeRuntimeEnvironment(_cq, _opCtx, &_slotIdGenerator)), + _env(makeRuntimeEnvironment(_cq, _opCtx, &_slotIdGenerator)), + _data(std::make_unique()), _state(_opCtx, - &_data, + _env, + _data.get(), _cq.getExpCtxRaw()->variables, &_slotIdGenerator, &_frameIdGenerator, &_spoolIdGenerator, _cq.getExpCtx()->needsMerge, _cq.getExpCtx()->allowDiskUse) { + initCollator(cq, _env, _data.get(), &_slotIdGenerator); + // SERVER-52803: In the future if we need to gather more information from the QuerySolutionNode // tree, rather than doing one-off scans for each piece of information, we should add a formal // analysis pass here. - // NOTE: Currently, we assume that each query operates on at most one collection, so there can - // be only one STAGE_COLLSCAN node. + // Currently, we assume that each query operates on at most one collection, but a rooted $or + // queries can have more than one collscan stages with clustered collections. auto [node, ct] = getFirstNodeByType(solution.root(), STAGE_COLLSCAN); - const auto count = ct; + auto [_, orCt] = getFirstNodeByType(solution.root(), STAGE_OR); + const unsigned long numCollscanStages = ct; + const unsigned long numOrStages = orCt; tassert(7182000, - str::stream() << "Found " << count << " nodes of type COLLSCAN, expected one or zero", - count <= 1); + str::stream() << "Found " << numCollscanStages << " nodes of type COLLSCAN, and " + << numOrStages + << " nodes of type OR, expected less than one COLLSCAN nodes or at " + "least one OR stage.", + numCollscanStages <= 1 || numOrStages > 0); if (node) { auto csn = static_cast(node); - _data.shouldTrackLatestOplogTimestamp = csn->shouldTrackLatestOplogTimestamp; - _data.shouldTrackResumeToken = csn->requestResumeToken; - _data.shouldUseTailableScan = csn->tailable; + + bool doSbeClusteredCollectionScan = csn->doSbeClusteredCollectionScan(); + + _data->shouldTrackLatestOplogTimestamp = csn->shouldTrackLatestOplogTimestamp; + _data->shouldTrackResumeToken = csn->requestResumeToken; + _data->shouldUseTailableScan = csn->tailable; + _data->direction = csn->direction; + _data->doSbeClusteredCollectionScan = doSbeClusteredCollectionScan; + + if (doSbeClusteredCollectionScan) { + _data->clusterKeyFieldName = + clustered_util::getClusterKeyFieldName(*(csn->clusteredIndex)).toString(); + + const auto& collection = _collections.getMainCollection(); + const CollatorInterface* ccCollator = collection->getDefaultCollator(); + if (ccCollator) { + _data->ccCollator = ccCollator->cloneShared(); + } + } } } -std::unique_ptr SlotBasedStageBuilder::build(const QuerySolutionNode* root) { +SlotBasedStageBuilder::PlanType SlotBasedStageBuilder::build(const QuerySolutionNode* root) { // For a given SlotBasedStageBuilder instance, this build() method can only be called once. invariant(!_buildHasStarted); _buildHasStarted = true; @@ -389,7 +511,7 @@ std::unique_ptr SlotBasedStageBuilder::build(const QuerySolution // resumed (via a resume token or a tailable cursor) or if the caller simply expects to be able // to read it. reqs.setIf(kRecordId, - (_data.shouldUseTailableScan || _data.shouldTrackResumeToken || + (_data->shouldUseTailableScan || _data->shouldTrackResumeToken || _cq.getForceGenerateRecordId())); // Set the target namespace to '_mainNss'. This is necessary as some QuerySolutionNodes that @@ -405,9 +527,9 @@ std::unique_ptr SlotBasedStageBuilder::build(const QuerySolution invariant(outputs.has(kResult)); invariant(reqs.has(kRecordId) == outputs.has(kRecordId)); - _data.outputs = std::move(outputs); + _data->outputs = std::move(outputs); - return std::move(stage); + return {std::move(stage), PlanStageData(std::move(_env), std::move(_data))}; } std::pair, PlanStageSlots> SlotBasedStageBuilder::buildCollScan( @@ -534,12 +656,9 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder } // If the slots necessary for performing an index consistency check were not requested in - // 'reqs', then don't pass a pointer to 'iamMap' so 'generateIndexScan' doesn't generate the - // necessary slots. - auto iamMap = &_data.iamMap; - if (!(reqs.has(kSnapshotId) && reqs.has(kIndexId) && reqs.has(kIndexKey))) { - iamMap = nullptr; - } + // 'reqs', then set 'doIndexConsistencyCheck' to false to avoid generating unnecessary logic. + bool doIndexConsistencyCheck = + reqs.has(kSnapshotId) && reqs.has(kIndexIdent) && reqs.has(kIndexKey); const auto generateIndexScanFunc = ixn->iets.empty() ? generateIndexScan : generateIndexScanWithDynamicBounds; @@ -549,7 +668,7 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder fieldBitset, sortKeyBitset, _yieldPolicy, - iamMap, + doIndexConsistencyCheck, reqs.has(kIndexKeyPattern)); auto stage = std::move(scanStage); @@ -596,6 +715,87 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder return {std::move(stage), std::move(outputs)}; } +std::pair, PlanStageSlots> SlotBasedStageBuilder::buildCountScan( + const QuerySolutionNode* root, const PlanStageReqs& reqs) { + // COUNT_SCAN node doesn't expected to return index info. + tassert(5295800, "buildCountScan() does not support kReturnKey", !reqs.has(kReturnKey)); + tassert(5295801, "buildCountScan() does not support kSnapshotId", !reqs.has(kSnapshotId)); + tassert(5295802, "buildCountScan() does not support kIndexIdent", !reqs.has(kIndexIdent)); + tassert(5295803, "buildCountScan() does not support kIndexKey", !reqs.has(kIndexKey)); + tassert( + 5295804, "buildCountScan() does not support kIndexKeyPattern", !reqs.has(kIndexKeyPattern)); + tassert(5295805, "buildCountScan() does not support kSortKey", !reqs.hasSortKeys()); + + auto csn = static_cast(root); + + const auto& collection = getCurrentCollection(reqs); + auto indexName = csn->index.identifier.catalogName; + auto indexDescriptor = collection->getIndexCatalog()->findIndexByName(_state.opCtx, indexName); + auto indexAccessMethod = + collection->getIndexCatalog()->getEntry(indexDescriptor)->accessMethod()->asSortedData(); + + std::unique_ptr lowKey, highKey; + if (csn->iets.empty()) { + std::tie(lowKey, highKey) = + makeKeyStringPair(csn->startKey, + csn->startKeyInclusive, + csn->endKey, + csn->endKeyInclusive, + indexAccessMethod->getSortedDataInterface()->getKeyStringVersion(), + indexAccessMethod->getSortedDataInterface()->getOrdering(), + true /* forward */); + } + + auto [stage, planStageSlots, indexScanBoundsSlots] = + generateSingleIntervalIndexScan(_state, + collection, + indexName, + indexDescriptor->keyPattern(), + true /* forward */, + std::move(lowKey), + std::move(highKey), + {} /* indexKeysToInclude */, + {} /* indexKeySlots */, + reqs, + _yieldPolicy, + csn->nodeId(), + false /* lowPriority */); + + if (!csn->iets.empty()) { + tassert(7681500, + "lowKey and highKey runtime environment slots must be present", + indexScanBoundsSlots); + _state.data->indexBoundsEvaluationInfos.emplace_back(IndexBoundsEvaluationInfo{ + csn->index, + indexAccessMethod->getSortedDataInterface()->getKeyStringVersion(), + indexAccessMethod->getSortedDataInterface()->getOrdering(), + 1 /* direction */, + std::move(csn->iets), + {ParameterizedIndexScanSlots::SingleIntervalPlan{indexScanBoundsSlots->first, + indexScanBoundsSlots->second}}}); + } + + if (csn->index.multikey || + (indexDescriptor->getIndexType() == IndexType::INDEX_WILDCARD && + indexDescriptor->keyPattern().nFields() > 1)) { + stage = + sbe::makeS(std::move(stage), + sbe::makeSV(planStageSlots.get(PlanStageSlots::kRecordId)), + csn->nodeId()); + } + + if (reqs.has(kResult)) { + // COUNT_SCAN stage doesn't produce any output, make an empty obj for kResult. + auto resultSlot = _slotIdGenerator.generate(); + planStageSlots.set(kResult, resultSlot); + stage = sbe::makeProjectStage( + std::move(stage), csn->nodeId(), resultSlot, makeFunction("newObj")); + } + + planStageSlots.clearNonRequiredSlots(reqs); + return {std::move(stage), std::move(planStageSlots)}; +} + namespace { std::unique_ptr generatePerColumnPredicate(StageBuilderState& state, const MatchExpression* me, @@ -830,8 +1030,8 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder // separately project both a document and its sub-fields (e.g., both 'a' and 'a.b'). Compute the // the subset of 'csn->allFields' that only includes a field if no other field in // 'csn->allFields' is its prefix. - fieldsToProject = - DepsTracker::simplifyDependencies(fieldsToProject, DepsTracker::TruncateToRootLevel::no); + fieldsToProject = DepsTracker::simplifyDependencies(std::move(fieldsToProject), + DepsTracker::TruncateToRootLevel::no); for (const std::string& field : fieldsToProject) { builder.integrateFieldPath(FieldPath(field), [](const bool isLastElement, optimizer::FieldMapEntry& entry) { @@ -854,7 +1054,7 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder 6935000, "ABT must be valid if have fields to project", fieldsToProject.empty() || abt); optimizer::SlotVarMap slotMap{}; slotMap[rootStr] = rowStoreSlot; - rowStoreExpr = abt ? abtToExpr(*abt, slotMap, *_data.env) + rowStoreExpr = abt ? abtToExpr(*abt, slotMap, *_env) : sbe::makeE("newObj", sbe::EExpression::Vector{}); } @@ -916,19 +1116,17 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder auto childReqs = forwardingReqs.copy() .set(kRecordId) .set(kSnapshotId) - .set(kIndexId) + .set(kIndexIdent) .set(kIndexKey) .set(kIndexKeyPattern); auto [stage, outputs] = build(child, childReqs); - auto iamMap = _data.iamMap; - uassert(4822880, "RecordId slot is not defined", outputs.has(kRecordId)); uassert( 4953600, "ReturnKey slot is not defined", !reqs.has(kReturnKey) || outputs.has(kReturnKey)); uassert(5290701, "Snapshot id slot is not defined", outputs.has(kSnapshotId)); - uassert(5290702, "Index id slot is not defined", outputs.has(kIndexId)); + uassert(7566701, "Index ident slot is not defined", outputs.has(kIndexIdent)); uassert(5290711, "Index key slot is not defined", outputs.has(kIndexKey)); uassert(5113713, "Index key pattern slot is not defined", outputs.has(kIndexKeyPattern)); @@ -964,11 +1162,10 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder topLevelFieldSlots, childRidSlot, outputs.get(kSnapshotId), - outputs.get(kIndexId), + outputs.get(kIndexIdent), outputs.get(kIndexKey), outputs.get(kIndexKeyPattern), getCurrentCollection(reqs), - std::move(iamMap), root->nodeId(), std::move(relevantSlots)); @@ -1010,7 +1207,7 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder &outputs); stage = std::move(outStage); - auto collatorSlot = _data.env->getSlotIfExists("collator"_sd); + auto collatorSlot = _env->getSlotIfExists("collator"_sd); sbe::value::SlotMap> projects; for (size_t i = 0; i < fieldsAndSortKeys.size(); ++i) { @@ -1256,7 +1453,7 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder auto [stage, childOutputs] = build(child, childReqs); auto outputs = std::move(childOutputs); - auto collatorSlot = _data.env->getSlotIfExists("collator"_sd); + auto collatorSlot = _env->getSlotIfExists("collator"_sd); sbe::value::SlotVector orderBy; std::vector direction; @@ -1474,7 +1671,7 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder auto [stage, outputs] = build(child, childReqs); - auto collatorSlot = _data.env->getSlotIfExists("collator"_sd); + auto collatorSlot = _env->getSlotIfExists("collator"_sd); sbe::value::SlotVector orderBy; std::vector direction; @@ -2018,14 +2215,14 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder auto innerIdSlot = innerOutputs.get(kRecordId); auto innerResultSlot = innerOutputs.get(kResult); auto innerSnapshotIdSlot = innerOutputs.getIfExists(kSnapshotId); - auto innerIndexIdSlot = innerOutputs.getIfExists(kIndexId); + auto innerIndexIdentSlot = innerOutputs.getIfExists(kIndexIdent); auto innerIndexKeySlot = innerOutputs.getIfExists(kIndexKey); auto innerIndexKeyPatternSlot = innerOutputs.getIfExists(kIndexKeyPattern); auto innerCondSlots = sbe::makeSV(innerIdSlot); auto innerProjectSlots = sbe::makeSV(innerResultSlot); - auto collatorSlot = _data.env->getSlotIfExists("collator"_sd); + auto collatorSlot = _env->getSlotIfExists("collator"_sd); // Designate outputs. PlanStageSlots outputs; @@ -2040,10 +2237,10 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder innerProjectSlots.push_back(slot); outputs.set(kSnapshotId, slot); } - if (reqs.has(kIndexId) && innerIndexIdSlot) { - auto slot = *innerIndexIdSlot; + if (reqs.has(kIndexIdent) && innerIndexIdentSlot) { + auto slot = *innerIndexIdentSlot; innerProjectSlots.push_back(slot); - outputs.set(kIndexId, slot); + outputs.set(kIndexIdent, slot); } if (reqs.has(kIndexKey) && innerIndexKeySlot) { auto slot = *innerIndexKeySlot; @@ -2108,7 +2305,7 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder auto outerChildReqs = childReqs.copy() .clear(kSnapshotId) - .clear(kIndexId) + .clear(kIndexIdent) .clear(kIndexKey) .clear(kIndexKeyPattern); auto [outerStage, outerOutputs] = build(outerChild, outerChildReqs); @@ -2141,10 +2338,10 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder innerProjectSlots.push_back(innerSnapshotSlot); outputs.set(kSnapshotId, innerSnapshotSlot); } - if (reqs.has(kIndexId)) { - auto innerIndexIdSlot = innerOutputs.get(kIndexId); - innerProjectSlots.push_back(innerIndexIdSlot); - outputs.set(kIndexId, innerIndexIdSlot); + if (reqs.has(kIndexIdent)) { + auto innerIndexIdentSlot = innerOutputs.get(kIndexIdent); + innerProjectSlots.push_back(innerIndexIdentSlot); + outputs.set(kIndexIdent, innerIndexIdentSlot); } if (reqs.has(kIndexKey)) { auto innerIndexKeySlot = innerOutputs.get(kIndexKey); @@ -2298,22 +2495,134 @@ std::tuple> } } -sbe::value::SlotVector generateAccumulator(StageBuilderState& state, - const AccumulationStatement& accStmt, - const PlanStageSlots& outputs, - sbe::value::SlotIdGenerator* slotIdGenerator, - sbe::HashAggStage::AggExprVector& aggSlotExprs) { +template +std::unique_ptr getSortSpecFromTopBottomN( + const AccumulatorTopBottomN* acc) { + tassert(5807013, "Accumulator state must not be null", acc); + auto sortPattern = + acc->getSortPattern().serialize(SortPattern::SortKeySerialization::kForExplain).toBson(); + auto sortSpec = std::make_unique(sortPattern); + auto sortSpecExpr = + makeConstant(sbe::value::TypeTags::sortSpec, + sbe::value::bitcastFrom(sortSpec.release())); + return sortSpecExpr; +} + +std::unique_ptr getSortSpecFromTopBottomN(const AccumulationStatement& accStmt) { + auto acc = accStmt.expr.factory(); + if (accStmt.expr.name == AccumulatorTopBottomN::getName()) { + return getSortSpecFromTopBottomN( + dynamic_cast*>(acc.get())); + } else if (accStmt.expr.name == AccumulatorTopBottomN::getName()) { + return getSortSpecFromTopBottomN( + dynamic_cast*>(acc.get())); + } else if (accStmt.expr.name == AccumulatorTopBottomN::getName()) { + return getSortSpecFromTopBottomN( + dynamic_cast*>(acc.get())); + } else if (accStmt.expr.name == AccumulatorTopBottomN::getName()) { + return getSortSpecFromTopBottomN( + dynamic_cast*>(acc.get())); + } else { + MONGO_UNREACHABLE; + } +} + +bool isTopBottomN(const AccumulationStatement& accStmt) { + return accStmt.expr.name == AccumulatorTopBottomN::getName() || + accStmt.expr.name == AccumulatorTopBottomN::getName() || + accStmt.expr.name == AccumulatorTopBottomN::getName() || + accStmt.expr.name == AccumulatorTopBottomN::getName(); +} + +sbe::value::SlotVector generateAccumulator( + StageBuilderState& state, + const AccumulationStatement& accStmt, + const PlanStageSlots& outputs, + sbe::value::SlotIdGenerator* slotIdGenerator, + sbe::HashAggStage::AggExprVector& aggSlotExprs, + boost::optional initializerRootSlot) { auto rootSlot = outputs.getIfExists(PlanStageSlots::kResult); - auto argExpr = generateExpression(state, accStmt.expr.argument.get(), rootSlot, &outputs); - auto initExpr = generateExpression(state, accStmt.expr.initializer.get(), rootSlot, &outputs); + auto collatorSlot = state.env->getSlotIfExists("collator"_sd); // One accumulator may be translated to multiple accumulator expressions. For example, The // $avg will have two accumulators expressions, a sum(..) and a count which is implemented // as sum(1). - auto collatorSlot = state.data->env->getSlotIfExists("collator"_sd); - auto accExprs = stage_builder::buildAccumulator( - accStmt, argExpr.extractExpr(state), collatorSlot, *state.frameIdGenerator); - std::vector> accInitExprs(accExprs.size()); + auto accExprs = [&]() { + // $topN/$bottomN accumulators require multiple arguments to the accumulator builder. + if (isTopBottomN(accStmt)) { + StringDataMap> accArgs; + auto sortSpecExpr = getSortSpecFromTopBottomN(accStmt); + accArgs.emplace(AccArgs::kTopBottomNSortSpec, sortSpecExpr->clone()); + + // Build the key expression for the accumulator. + tassert(5807014, + str::stream() << accStmt.expr.name + << " accumulator must have the root slot set", + rootSlot); + auto key = collatorSlot ? makeFunction("generateCheapSortKey", + std::move(sortSpecExpr), + makeVariable(*rootSlot), + makeVariable(*collatorSlot)) + : makeFunction("generateCheapSortKey", + std::move(sortSpecExpr), + makeVariable(*rootSlot)); + accArgs.emplace(AccArgs::kTopBottomNKey, + makeFunction("sortKeyComponentVectorToArray", std::move(key))); + + // Build the value expression for the accumulator. + if (auto expObj = dynamic_cast(accStmt.expr.argument.get())) { + for (auto& [key, value] : expObj->getChildExpressions()) { + if (key == AccumulatorN::kFieldNameOutput) { + auto outputExpr = + generateExpression(state, value.get(), rootSlot, &outputs); + accArgs.emplace(AccArgs::kTopBottomNValue, + makeFillEmptyNull(outputExpr.extractExpr(state))); + break; + } + } + } else if (auto expConst = + dynamic_cast(accStmt.expr.argument.get())) { + auto objConst = expConst->getValue(); + tassert(7767100, + str::stream() + << accStmt.expr.name << " accumulator must have an object argument", + objConst.isObject()); + auto objBson = objConst.getDocument().toBson(); + auto outputField = objBson.getField(AccumulatorN::kFieldNameOutput); + if (outputField.ok()) { + auto [outputTag, outputVal] = + sbe::bson::convertFrom(outputField); + auto outputExpr = makeConstant(outputTag, outputVal); + accArgs.emplace(AccArgs::kTopBottomNValue, + makeFillEmptyNull(std::move(outputExpr))); + } + } else { + tasserted(5807015, + str::stream() + << accStmt.expr.name << " accumulator must have an object argument"); + } + tassert(5807016, + str::stream() << accStmt.expr.name + << " accumulator must have an output field in the argument", + accArgs.find(AccArgs::kTopBottomNValue) != accArgs.end()); + + auto accExprs = stage_builder::buildAccumulator( + accStmt, std::move(accArgs), collatorSlot, *state.frameIdGenerator); + + return accExprs; + } else { + auto argExpr = + generateExpression(state, accStmt.expr.argument.get(), rootSlot, &outputs); + auto accExprs = stage_builder::buildAccumulator( + accStmt, argExpr.extractExpr(state), collatorSlot, *state.frameIdGenerator); + return accExprs; + } + }(); + + auto initExpr = + generateExpression(state, accStmt.expr.initializer.get(), initializerRootSlot, nullptr); + auto accInitExprs = stage_builder::buildInitialize( + accStmt, initExpr.extractExpr(state), *state.frameIdGenerator); tassert(7567301, "The accumulation and initialization expression should have the same length", @@ -2350,9 +2659,19 @@ sbe::SlotExprPairVector generateMergingExpressions(StageBuilderState& state, tassert(7039557, "expected non-null 'frameIdGenerator' pointer", frameIdGenerator); auto spillSlots = slotIdGenerator->generateMultiple(numInputSlots); - auto collatorSlot = state.data->env->getSlotIfExists("collator"_sd); - auto mergingExprs = - buildCombinePartialAggregates(accStmt, spillSlots, collatorSlot, *frameIdGenerator); + auto collatorSlot = state.env->getSlotIfExists("collator"_sd); + + auto mergingExprs = [&]() { + if (isTopBottomN(accStmt)) { + StringDataMap> mergeArgs; + mergeArgs.emplace(AccArgs::kTopBottomNSortSpec, getSortSpecFromTopBottomN(accStmt)); + return buildCombinePartialAggregates( + accStmt, spillSlots, std::move(mergeArgs), collatorSlot, *frameIdGenerator); + } else { + return buildCombinePartialAggregates( + accStmt, spillSlots, collatorSlot, *frameIdGenerator); + } + }(); // Zip the slot vector and expression vector into a vector of pairs. tassert(7039550, @@ -2398,13 +2717,33 @@ std::tuple, sbe::value::SlotVector, EvalStage> generate } }(); + auto collatorSlot = state.env->getSlotIfExists("collator"_sd); auto finalSlots{sbe::value::SlotVector{finalGroupBySlot}}; std::vector fieldNames{"_id"}; size_t idxAccFirstSlot = dedupedGroupBySlots.size(); for (size_t idxAcc = 0; idxAcc < accStmts.size(); ++idxAcc) { // Gathers field names for the output object from accumulator statements. fieldNames.push_back(accStmts[idxAcc].fieldName); - auto finalExpr = stage_builder::buildFinalize(state, accStmts[idxAcc], aggSlotsVec[idxAcc]); + + auto finalExpr = [&]() { + const auto& accStmt = accStmts[idxAcc]; + if (isTopBottomN(accStmt)) { + StringDataMap> finalArgs; + finalArgs.emplace(AccArgs::kTopBottomNSortSpec, getSortSpecFromTopBottomN(accStmt)); + return buildFinalize(state, + accStmts[idxAcc], + aggSlotsVec[idxAcc], + std::move(finalArgs), + collatorSlot, + *state.frameIdGenerator); + } else { + return buildFinalize(state, + accStmts[idxAcc], + aggSlotsVec[idxAcc], + collatorSlot, + *state.frameIdGenerator); + } + }(); // The final step may not return an expression if it's trivial. For example, $first and // $last's final steps are trivial. @@ -2493,14 +2832,15 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder // If the group node references any top level fields, we take all of them and add them to // 'childReqs'. Note that this happens regardless of whether we need the whole document because // it can be the case that this stage references '$$ROOT' as well as some top level fields. - auto topLevelFields = getTopLevelFields(groupNode->requiredFields); - if (!topLevelFields.empty()) { - childReqs.setFields(topLevelFields); + if (auto topLevelFields = getTopLevelFields(groupNode->requiredFields); + !topLevelFields.empty()) { + childReqs.setFields(std::move(topLevelFields)); } if (!groupNode->needWholeDocument) { // Tracks whether we need to request kResult. bool rootDocIsNeeded = false; + bool sortKeyIsNeeded = false; auto referencesRoot = [&](const ExpressionFieldPath* fieldExpr) { rootDocIsNeeded = rootDocIsNeeded || fieldExpr->isROOT(); }; @@ -2509,23 +2849,29 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder walkAndActOnFieldPaths(idExpr.get(), referencesRoot); for (const auto& accStmt : accStmts) { walkAndActOnFieldPaths(accStmt.expr.argument.get(), referencesRoot); + if (isTopBottomN(accStmt)) { + sortKeyIsNeeded = true; + } } - // If the group node doesn't have any dependency (e.g. $count) or if the dependency can be - // satisfied by the child node (e.g. covered index scan), we can clear the kResult - // requirement for the child. - if (groupNode->requiredFields.empty() || !rootDocIsNeeded) { - childReqs.clear(kResult); - } else if (childNode->getType() == StageType::STAGE_PROJECTION_COVERED) { - auto pn = static_cast(childNode); - std::set providedFieldSet; - for (auto&& elt : pn->coveredKeyObj) { - providedFieldSet.emplace(elt.fieldNameStringData()); - } - if (std::all_of(groupNode->requiredFields.begin(), - groupNode->requiredFields.end(), - [&](const std::string& f) { return providedFieldSet.count(f); })) { + // If any accumulator requires generating sort key, we cannot clear the kResult. + if (!sortKeyIsNeeded) { + // If the group node doesn't have any dependency (e.g. $count) or if the dependency can + // be satisfied by the child node (e.g. covered index scan), we can clear the kResult + // requirement for the child. + if (groupNode->requiredFields.empty() || !rootDocIsNeeded) { childReqs.clear(kResult); + } else if (childNode->getType() == StageType::STAGE_PROJECTION_COVERED) { + auto pn = static_cast(childNode); + std::set providedFieldSet; + for (auto&& elt : pn->coveredKeyObj) { + providedFieldSet.emplace(elt.fieldNameStringData()); + } + if (std::all_of(groupNode->requiredFields.begin(), + groupNode->requiredFields.end(), + [&](const std::string& f) { return providedFieldSet.count(f); })) { + childReqs.clear(kResult); + } } } } @@ -2602,9 +2948,57 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder childEvalStage = makeProject(std::move(childEvalStage), std::move(fpMap), nodeId); } - auto [groupBySlots, groupByEvalStage, idFinalExpr] = generateGroupByKey( + bool isVariableGroupInitializer = false; + for (const auto& accStmt : accStmts) { + isVariableGroupInitializer = isVariableGroupInitializer || + !ExpressionConstant::isNullOrConstant(accStmt.expr.initializer); + } + + sbe::value::SlotVector groupBySlots; + EvalStage groupByEvalStage; + std::unique_ptr idFinalExpr; + + std::tie(groupBySlots, groupByEvalStage, idFinalExpr) = generateGroupByKey( _state, idExpr, childOutputs, std::move(childEvalStage), nodeId, &_slotIdGenerator); + auto initializerRootSlot = [&]() { + if (isVariableGroupInitializer) { + sbe::value::SlotId idSlot; + // We materialize the groupId before the group stage to provide it as root to + // initializer expression + if (idFinalExpr) { + auto [slot, projectStage] = projectEvalExpr(std::move(idFinalExpr), + std::move(groupByEvalStage), + nodeId, + &_slotIdGenerator, + _state); + groupBySlots.clear(); + groupBySlots.push_back(slot); + idFinalExpr = nullptr; + groupByEvalStage = std::move(projectStage); + idSlot = slot; + } else { + idSlot = groupBySlots[0]; + } + + // As per the mql semantics add a project expression 'isObject(id) ? id : {}' + // which will be provided as root to initializer expression + auto [emptyObjTag, emptyObjVal] = sbe::value::makeNewObject(); + auto isObjectExpr = sbe::makeE( + sbe::makeE("isObject"_sd, sbe::makeEs(makeVariable(idSlot))), + makeVariable(idSlot), + makeConstant(emptyObjTag, emptyObjVal)); + auto [isObjSlot, isObjStage] = projectEvalExpr(std::move(isObjectExpr), + std::move(groupByEvalStage), + nodeId, + &_slotIdGenerator, + _state); + groupByEvalStage = std::move(isObjStage); + return boost::optional{isObjSlot}; + } + return boost::optional{}; + }(); + // Translates accumulators which are executed inside the group stage and gets slots for // accumulators. stage_builder::EvalStage currentStage = std::move(groupByEvalStage); @@ -2615,8 +3009,8 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder // to combine partial aggregates that have been spilled to disk. sbe::SlotExprPairVector mergingExprs; for (const auto& accStmt : accStmts) { - sbe::value::SlotVector curAggSlots = - generateAccumulator(_state, accStmt, childOutputs, &_slotIdGenerator, aggSlotExprs); + sbe::value::SlotVector curAggSlots = generateAccumulator( + _state, accStmt, childOutputs, &_slotIdGenerator, aggSlotExprs, initializerRootSlot); sbe::SlotExprPairVector curMergingExprs = generateMergingExpressions(_state, accStmt, curAggSlots.size()); @@ -2635,7 +3029,7 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder auto groupEvalStage = makeHashAgg(std::move(currentStage), dedupedGroupBySlots, std::move(aggSlotExprs), - _state.data->env->getSlotIfExists("collator"_sd), + _state.env->getSlotIfExists("collator"_sd), _cq.getExpCtx()->allowDiskUse, std::move(mergingExprs), nodeId); @@ -2735,7 +3129,7 @@ SlotBasedStageBuilder::makeUnionForTailableCollScan(const QuerySolutionNode* roo // Register a SlotId in the global environment which would contain a recordId to resume a // tailable collection scan from. A PlanStage executor will track the last seen recordId and // will reset a SlotAccessor for the resumeRecordIdSlot with this recordId. - auto resumeRecordIdSlot = _data.env->registerSlot( + auto resumeRecordIdSlot = _env->registerSlot( "resumeRecordId"_sd, sbe::value::TypeTags::Nothing, 0, false, &_slotIdGenerator); // For tailable collection scan we need to build a special union sub-tree consisting of two @@ -2826,7 +3220,7 @@ SlotBasedStageBuilder::buildShardFilterCovered(const QuerySolutionNode* root, // We register the "shardFilterer" slot but we don't construct the ShardFilterer here, because // once constructed the ShardFilterer will prevent orphaned documents from being deleted. We // will construct the ShardFilterer later while preparing the SBE tree for execution. - auto shardFiltererSlot = _data.env->registerSlot( + auto shardFiltererSlot = _env->registerSlot( "shardFilterer"_sd, sbe::value::TypeTags::Nothing, 0, false, &_slotIdGenerator); for (auto&& shardKeyElt : shardKeyPattern) { @@ -2928,7 +3322,7 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder // We register the "shardFilterer" slot but we don't construct the ShardFilterer here, because // once constructed the ShardFilterer will prevent orphaned documents from being deleted. We // will construct the ShardFilterer later while preparing the SBE tree for execution. - auto shardFiltererSlot = _data.env->registerSlot( + auto shardFiltererSlot = _env->registerSlot( "shardFilterer"_sd, sbe::value::TypeTags::Nothing, 0, false, &_slotIdGenerator); // Request slots for top level shard key fields and cache parsed key path. @@ -3032,6 +3426,7 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder SlotBasedStageBuilder&, const QuerySolutionNode* root, const PlanStageReqs& reqs)>> kStageBuilders = { {STAGE_COLLSCAN, &SlotBasedStageBuilder::buildCollScan}, + {STAGE_COUNT_SCAN, &SlotBasedStageBuilder::buildCountScan}, {STAGE_VIRTUAL_SCAN, &SlotBasedStageBuilder::buildVirtualScan}, {STAGE_IXSCAN, &SlotBasedStageBuilder::buildIndexScan}, {STAGE_COLUMN_SCAN, &SlotBasedStageBuilder::buildColumnScan}, diff --git a/src/mongo/db/query/sbe_stage_builder.h b/src/mongo/db/query/sbe_stage_builder.h index 5b636b156334c..55e282bc32925 100644 --- a/src/mongo/db/query/sbe_stage_builder.h +++ b/src/mongo/db/query/sbe_stage_builder.h @@ -30,37 +30,64 @@ #pragma once #include +#include #include +#include #include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include #include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/sbe/expressions/compile_ctx.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/expressions/runtime_environment.h" #include "mongo/db/exec/sbe/stages/collection_helpers.h" +#include "mongo/db/exec/sbe/stages/plan_stats.h" +#include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/trial_period_utils.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/index_entry.h" #include "mongo/db/query/interval_evaluation_tree.h" #include "mongo/db/query/multiple_collection_accessor.h" +#include "mongo/db/query/plan_cache_debug_info.h" #include "mongo/db/query/plan_yield_policy_sbe.h" +#include "mongo/db/query/query_solution.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" #include "mongo/db/query/shard_filterer_factory_interface.h" #include "mongo/db/query/stage_builder.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::stage_builder { -/** - * Creates a new compilation environment and registers global values within the - * new environment. - */ -std::unique_ptr makeRuntimeEnvironment( - const CanonicalQuery& cq, - OperationContext* opCtx, - sbe::value::SlotIdGenerator* slotIdGenerator); class PlanStageReqs; class PlanStageSlots; + +struct PlanStageData; sbe::value::SlotVector getSlotsToForward(const PlanStageReqs& reqs, const PlanStageSlots& outputs, const sbe::value::SlotVector& exclude = sbe::makeSV()); @@ -79,7 +106,7 @@ void prepareSlotBasedExecutableTree(OperationContext* opCtx, const CanonicalQuery& cq, const MultipleCollectionAccessor& collections, PlanYieldPolicySBE* yieldPolicy, - bool preparingFromCache = false); + bool preparingFromCache); /** * The ParameterizedIndexScanSlots struct is used by SlotBasedStageBuilder while building the index @@ -111,6 +138,13 @@ struct ParameterizedIndexScanSlots { stdx::variant slots; }; +// Holds the slots for the clustered collection scan bounds. +struct ParameterizedClusteredScanSlots { + // Holds the min and max record for the bounds of a clustered collection scan. + boost::optional minRecord; + boost::optional maxRecord; +}; + /** * The PlanStageSlots class is used by SlotBasedStageBuilder to return the output slots produced * after building a stage. @@ -151,7 +185,7 @@ class PlanStageSlots { static constexpr Name kRecordId = {kMeta, "recordId"_sd}; static constexpr Name kReturnKey = {kMeta, "returnKey"_sd}; static constexpr Name kSnapshotId = {kMeta, "snapshotId"_sd}; - static constexpr Name kIndexId = {kMeta, "indexId"_sd}; + static constexpr Name kIndexIdent = {kMeta, "indexIdent"_sd}; static constexpr Name kIndexKey = {kMeta, "indexKey"_sd}; static constexpr Name kIndexKeyPattern = {kMeta, "indexKeyPattern"_sd}; @@ -439,7 +473,7 @@ using VariableIdToSlotMap = stdx::unordered_map iets; @@ -447,64 +481,70 @@ struct IndexBoundsEvaluationInfo { }; /** - * Some auxiliary data returned by a 'SlotBasedStageBuilder' along with a PlanStage tree root, which - * is needed to execute the PlanStage tree. + * This class holds the RuntimeEnvironment and CompileCtx for an SBE plan. The RuntimeEnvironment + * owns various SlotAccessors which are accessed when the SBE plan is executed. The CompileCtx is + * used when the SBE plan needs to be "prepared" (via the prepare() method). */ -struct PlanStageData { - PlanStageData(PlanStageData&&) = default; - PlanStageData& operator=(PlanStageData&&) = default; +struct PlanStageEnvironment { + PlanStageEnvironment(std::unique_ptr runtimeEnv) + : runtimeEnv(runtimeEnv.get()), ctx(std::move(runtimeEnv)) {} - explicit PlanStageData(std::unique_ptr env) - : env(env.get()), ctx(std::move(env)) {} - - PlanStageData(const PlanStageData& other) : PlanStageData(other.env->makeDeepCopy()) { - copyFrom(other); + PlanStageEnvironment makeCopy() const { + return PlanStageEnvironment(runtimeEnv->makeDeepCopy()); } - PlanStageData& operator=(const PlanStageData& other) { - if (this != &other) { - auto envCopy = other.env->makeDeepCopy(); - env = envCopy.get(); - ctx = sbe::CompileCtx(std::move(envCopy)); - copyFrom(other); - } - return *this; + sbe::RuntimeEnvironment* operator->() noexcept { + return runtimeEnv; } - std::string debugString() const; + const sbe::RuntimeEnvironment* operator->() const noexcept { + return runtimeEnv; + } - // This holds the output slots produced by SBE plan (resultSlot, recordIdSlot, etc). - PlanStageSlots outputs; + sbe::RuntimeEnvironment& operator*() noexcept { + return *runtimeEnv; + } - // Map from index name to IAM. - StringMap iamMap; + const sbe::RuntimeEnvironment& operator*() const noexcept { + return *runtimeEnv; + } - // The CompileCtx object owns the RuntimeEnvironment. The RuntimeEnvironment owns various - // SlotAccessors which are accessed when the SBE plan is executed. - sbe::RuntimeEnvironment* env{nullptr}; + sbe::RuntimeEnvironment* runtimeEnv{nullptr}; sbe::CompileCtx ctx; +}; + +/** + * This struct used to hold all of a PlanStageData's immutable data. + */ +struct PlanStageStaticData { + // This holds the output slots produced by SBE plan (resultSlot, recordIdSlot, etc). + PlanStageSlots outputs; + // Various flags copied from the CollectionScanNode. If the plan generated by the query planner + // did not have a CollectionScanNode, then each of these flags is initialized to its respective + // default value. bool shouldTrackLatestOplogTimestamp{false}; bool shouldTrackResumeToken{false}; bool shouldUseTailableScan{false}; - // If this execution tree was built as a result of replanning of the cached plan, this string - // will include the reason for replanning. - boost::optional replanReason; + // Scan direction if this plan has a collection scan: 1 means forward; -1 means reverse. + int direction{1}; - // If this candidate plan has completed the trial run early by achieving one of the trial run - // metrics, the stats are cached in here. - std::unique_ptr savedStatsOnEarlyExit{nullptr}; + // True iff this plan does an SBE clustered collection scan. + bool doSbeClusteredCollectionScan{false}; + + // Iff 'doSbeClusteredCollectionScan', this holds the cluster key field name. + std::string clusterKeyFieldName; - // Stores plan cache entry information used as debug information or for "explain" purpose. - // Note that 'debugInfo' is present only if this PlanStageData is recovered from the plan cache. - std::shared_ptr debugInfo; + // Iff 'doSbeClusteredCollectionScan', this holds the clustered collection's native collator, + // needed to compute scan bounds. + std::shared_ptr ccCollator; // If the query has been auto-parameterized, then the mapping from input parameter id to the // id of a slot in the runtime environment is maintained here. This mapping is established - // during stage building and stored in the cache. When a cached plan is used for a subsequent - // query, this mapping is used to set the new constant value associated with each input - // parameter id in the runtime environment. + // during stage building and stored in the cache. When a cached plan is used for a + // subsequent query, this mapping is used to set the new constant value associated with each + // input parameter id in the runtime environment. // // For example, imagine an auto-parameterized query {a: , b: } is present in the SBE // plan cache. Also present in the cache is this mapping: @@ -513,55 +553,98 @@ struct PlanStageData { // // A new query {a: 5, b: 6} runs. Using this mapping, we set a value of 5 in s3 and 6 in s4. InputParamToSlotMap inputParamToSlotMap; - // This Variable-to-SlotId map stores all the Variables that were translated into corresponding + + // This Variable-to-SlotId map stores all Variables that were translated into corresponding // SBE Slots. The slots are registered in the 'RuntimeEnvironment'. VariableIdToSlotMap variableIdToSlotMap; - // Stores auxiliary data to restore index bounds for a cached auto-parameterized SBE plan for - // every index used by the plan. + // Stores auxiliary data to restore index bounds for a cached auto-parameterized SBE plan + // for every index used by the plan. std::vector indexBoundsEvaluationInfos; - // Stores all namespaces involved in the build side of a hash join plan. Needed to check if the - // plan should be evicted as the size of the foreign namespace changes. - stdx::unordered_set foreignHashJoinCollections; + // Stores data to restore collection scan bounds for a cached auto-parameterized SBE plan for + // every clustered collection scan used by the plan. + std::vector clusteredCollBoundsInfos; -private: - // This copy function copies data from 'other' but will not create a copy of its - // RuntimeEnvironment and CompileCtx. - void copyFrom(const PlanStageData& other) { - outputs = other.outputs; - iamMap = other.iamMap; - shouldTrackLatestOplogTimestamp = other.shouldTrackLatestOplogTimestamp; - shouldTrackResumeToken = other.shouldTrackResumeToken; - shouldUseTailableScan = other.shouldUseTailableScan; - replanReason = other.replanReason; - if (other.savedStatsOnEarlyExit) { - savedStatsOnEarlyExit.reset(other.savedStatsOnEarlyExit->clone()); - } else { - savedStatsOnEarlyExit.reset(); - } - if (other.debugInfo) { - debugInfo = std::make_unique(*other.debugInfo); - } else { - debugInfo.reset(); + // Stores all namespaces involved in the build side of a hash join plan. Needed to check if + // the plan should be evicted as the size of the foreign namespace changes. + absl::flat_hash_set foreignHashJoinCollections; + + // Stores CollatorInterface to be used for this plan. Raw pointer may be stored inside data + // structures, so it must be kept stable. + std::shared_ptr queryCollator; +}; + +/** + * Some auxiliary data returned by a 'SlotBasedStageBuilder' along with a PlanStage tree root, which + * is needed to execute the PlanStage tree. + */ +struct PlanStageData { + using DebugInfoSBE = plan_cache_debug_info::DebugInfoSBE; + + explicit PlanStageData(PlanStageEnvironment env, + std::shared_ptr staticData) + : env(std::move(env)), staticData(std::move(staticData)) {} + + PlanStageData(PlanStageData&&) = default; + + PlanStageData(const PlanStageData& other) + : env(other.env.makeCopy()), + staticData(other.staticData), + replanReason(other.replanReason), + savedStatsOnEarlyExit(std::unique_ptr( + other.savedStatsOnEarlyExit ? other.savedStatsOnEarlyExit->clone() : nullptr)), + debugInfo(other.debugInfo) {} + + PlanStageData& operator=(PlanStageData&&) = default; + + PlanStageData& operator=(const PlanStageData& other) { + if (this != &other) { + env = other.env.makeCopy(); + staticData = other.staticData; + replanReason = other.replanReason; + savedStatsOnEarlyExit = std::unique_ptr{ + other.savedStatsOnEarlyExit ? other.savedStatsOnEarlyExit->clone() : nullptr}; + debugInfo = other.debugInfo; } - inputParamToSlotMap = other.inputParamToSlotMap; - variableIdToSlotMap = other.variableIdToSlotMap; - indexBoundsEvaluationInfos = other.indexBoundsEvaluationInfos; - foreignHashJoinCollections = other.foreignHashJoinCollections; + return *this; } + + std::string debugString() const; + + // This field holds the RuntimeEnvironment and the CompileCtx. + PlanStageEnvironment env; + + // This field holds all of the immutable data that needs to accompany an SBE PlanStage tree. + std::shared_ptr staticData; + + // If this execution tree was built as a result of replanning of the cached plan, this string + // will include the reason for replanning. + boost::optional replanReason; + + // If this candidate plan has completed the trial run early by achieving one of the trial run + // metrics, the stats are cached in here. + std::unique_ptr savedStatsOnEarlyExit{nullptr}; + + // Stores plan cache entry information used as debug information or for "explain" purpose. Note + // that 'debugInfo' is present only if this PlanStageData is recovered from the plan cache. + std::shared_ptr debugInfo; }; /** * A stage builder which builds an executable tree using slot-based PlanStages. */ -class SlotBasedStageBuilder final : public StageBuilder { +class SlotBasedStageBuilder final + : public StageBuilder, PlanStageData>> { public: + using PlanType = std::pair, PlanStageData>; + using BaseType = StageBuilder; + static constexpr auto kResult = PlanStageSlots::kResult; static constexpr auto kRecordId = PlanStageSlots::kRecordId; static constexpr auto kReturnKey = PlanStageSlots::kReturnKey; static constexpr auto kSnapshotId = PlanStageSlots::kSnapshotId; - static constexpr auto kIndexId = PlanStageSlots::kIndexId; + static constexpr auto kIndexIdent = PlanStageSlots::kIndexIdent; static constexpr auto kIndexKey = PlanStageSlots::kIndexKey; static constexpr auto kIndexKeyPattern = PlanStageSlots::kIndexKeyPattern; @@ -577,11 +660,7 @@ class SlotBasedStageBuilder final : public StageBuilder { * * This method is a wrapper around 'build(const QuerySolutionNode*, const PlanStageReqs&)'. */ - std::unique_ptr build(const QuerySolutionNode* root) final; - - PlanStageData getPlanStageData() { - return std::move(_data); - } + PlanType build(const QuerySolutionNode* root) final; private: /** @@ -604,6 +683,9 @@ class SlotBasedStageBuilder final : public StageBuilder { std::pair, PlanStageSlots> buildIndexScan( const QuerySolutionNode* root, const PlanStageReqs& reqs); + std::pair, PlanStageSlots> buildCountScan( + const QuerySolutionNode* root, const PlanStageReqs& reqs); + std::pair, PlanStageSlots> buildColumnScan( const QuerySolutionNode* root, const PlanStageReqs& reqs); @@ -698,9 +780,13 @@ class SlotBasedStageBuilder final : public StageBuilder { PlanYieldPolicySBE* const _yieldPolicy{nullptr}; - // Apart from generating just an execution tree, this builder will also produce some auxiliary - // data which is needed to execute the tree. - PlanStageData _data; + // Aside from generating the PlanStage tree, this builder also produces a few auxiliary data + // structures that are needed to execute the tree: the RuntimeEnvironment, the CompileCtx, + // and the PlanStageStaticData. Note that the PlanStageStaticData ('_data') is mutable inside + // SlotBasedStageBuilder, but after the 'build(const QuerySolutionNode*)' method is called the + // data will become immutable. + PlanStageEnvironment _env; + std::unique_ptr _data; bool _buildHasStarted{false}; diff --git a/src/mongo/db/query/sbe_stage_builder_abt_helpers.cpp b/src/mongo/db/query/sbe_stage_builder_abt_helpers.cpp index f90e1b1b5e0f0..cd33885b8eee9 100644 --- a/src/mongo/db/query/sbe_stage_builder_abt_helpers.cpp +++ b/src/mongo/db/query/sbe_stage_builder_abt_helpers.cpp @@ -29,12 +29,34 @@ #include "mongo/db/query/sbe_stage_builder_abt_helpers.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/sbe/abt/abt_lower.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include "mongo/db/query/bson_typemask.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/reference_tracker.h" #include "mongo/db/query/optimizer/rewrites/path_lower.h" +#include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/query/sbe_stage_builder_abt_holder_impl.h" #include "mongo/db/query/sbe_stage_builder_const_eval.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" #include "mongo/db/query/sbe_stage_builder_type_checker.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::stage_builder { @@ -78,7 +100,7 @@ EvalExpr makeBalancedBooleanOpTree(sbe::EPrimBinary::Op logicOp, std::vector> exprs; exprs.reserve(leaves.size()); for (auto&& e : leaves) { - exprs.emplace_back(e.extractExpr(state.slotVarMap, *state.data->env)); + exprs.emplace_back(e.extractExpr(state.slotVarMap, *state.env)); } return EvalExpr{makeBalancedBooleanOpTree(logicOp, std::move(exprs))}; } @@ -140,6 +162,11 @@ optimizer::ABT makeFillEmptyTrue(optimizer::ABT e) { return makeFillEmpty(std::move(e), true); } +optimizer::ABT makeFillEmptyNull(optimizer::ABT e) { + return optimizer::make( + optimizer::Operations::FillEmpty, std::move(e), optimizer::Constant::null()); +} + optimizer::ABT makeNot(optimizer::ABT e) { return makeUnaryOp(optimizer::Operations::Not, std::move(e)); } diff --git a/src/mongo/db/query/sbe_stage_builder_abt_helpers.h b/src/mongo/db/query/sbe_stage_builder_abt_helpers.h index c3099c16aec1c..0375b70b90fe0 100644 --- a/src/mongo/db/query/sbe_stage_builder_abt_helpers.h +++ b/src/mongo/db/query/sbe_stage_builder_abt_helpers.h @@ -29,7 +29,22 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/abt/abt_lower.h" +#include "mongo/db/exec/sbe/abt/abt_lower_defs.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/sbe_stage_builder_eval_frame.h" @@ -75,6 +90,10 @@ optimizer::ABT makeFillEmptyFalse(optimizer::ABT e); * expression. */ optimizer::ABT makeFillEmptyTrue(optimizer::ABT e); +/** + * Check if expression returns Nothing and return null if so. Otherwise, return the expression. + */ +optimizer::ABT makeFillEmptyNull(optimizer::ABT e); optimizer::ABT makeNot(optimizer::ABT e); optimizer::ProjectionName makeVariableName(sbe::value::SlotId slotId); diff --git a/src/mongo/db/query/sbe_stage_builder_abt_holder_impl.cpp b/src/mongo/db/query/sbe_stage_builder_abt_holder_impl.cpp index 2293fd178a051..80826b4db2bee 100644 --- a/src/mongo/db/query/sbe_stage_builder_abt_holder_impl.cpp +++ b/src/mongo/db/query/sbe_stage_builder_abt_holder_impl.cpp @@ -29,6 +29,9 @@ #include "mongo/db/query/sbe_stage_builder_abt_holder_impl.h" +#include +#include + namespace mongo::stage_builder::abt { void HolderDeleter::operator()(Holder* ptr) const { diff --git a/src/mongo/db/query/sbe_stage_builder_abt_holder_impl.h b/src/mongo/db/query/sbe_stage_builder_abt_holder_impl.h index 8772b700ebf04..ef5428be44a2a 100644 --- a/src/mongo/db/query/sbe_stage_builder_abt_holder_impl.h +++ b/src/mongo/db/query/sbe_stage_builder_abt_holder_impl.h @@ -29,7 +29,9 @@ #pragma once -#include "mongo/db/query/optimizer/node.h" +#include + +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep #include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/sbe_stage_builder_abt_holder_def.h" diff --git a/src/mongo/db/query/sbe_stage_builder_accumulator.cpp b/src/mongo/db/query/sbe_stage_builder_accumulator.cpp index b02c64d100ed0..517ec79636391 100644 --- a/src/mongo/db/query/sbe_stage_builder_accumulator.cpp +++ b/src/mongo/db/query/sbe_stage_builder_accumulator.cpp @@ -27,17 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/base/string_data_comparator_interface.h" +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/sbe/accumulator_sum_value_enum.h" +#include "mongo/db/exec/sbe/values/arith_common.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" #include "mongo/db/pipeline/accumulator.h" -#include "mongo/db/pipeline/accumulator_for_window_functions.h" -#include "mongo/db/pipeline/accumulator_js_reduce.h" -#include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/pipeline/accumulator_multi.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/sbe_stage_builder_accumulator.h" -#include "mongo/db/query/sbe_stage_builder_expression.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::stage_builder { namespace { @@ -84,7 +102,9 @@ std::vector> buildCombinePartialAggsMin( std::unique_ptr buildFinalizeMin(StageBuilderState& state, const AccumulationExpression& expr, - const sbe::value::SlotVector& minSlots) { + const sbe::value::SlotVector& minSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { // We can get away with not building a project stage since there's no finalize step but we // will stick the slot into an EVariable in case a $min is one of many group clauses and it // can be combined into a final project stage. @@ -125,7 +145,9 @@ std::vector> buildCombinePartialAggsMax( std::unique_ptr buildFinalizeMax(StageBuilderState& state, const AccumulationExpression& expr, - const sbe::value::SlotVector& maxSlots) { + const sbe::value::SlotVector& maxSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { tassert(5755100, str::stream() << "Expected one input slot for finalization of max, got: " << maxSlots.size(), @@ -222,7 +244,9 @@ std::vector> buildCombinePartialAggsAvg( std::unique_ptr buildFinalizeAvg(StageBuilderState& state, const AccumulationExpression& expr, - const sbe::value::SlotVector& aggSlots) { + const sbe::value::SlotVector& aggSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { // Slot 0 contains the accumulated sum, and slot 1 contains the count of summed items. tassert(5754703, str::stream() << "Expected two slots to finalize avg, got: " << aggSlots.size(), @@ -331,7 +355,9 @@ std::vector> buildCombinePartialAggsSum( std::unique_ptr buildFinalizeSum(StageBuilderState& state, const AccumulationExpression& expr, - const sbe::value::SlotVector& sumSlots) { + const sbe::value::SlotVector& sumSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { tassert(5755300, str::stream() << "Expected one input slot for finalization of sum, got: " << sumSlots.size(), @@ -409,7 +435,9 @@ std::vector> buildCombinePartialAggsAddToSet( std::unique_ptr buildFinalizeCappedAccumulator( StageBuilderState& state, const AccumulationExpression& expr, - const sbe::value::SlotVector& accSlots) { + const sbe::value::SlotVector& accSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { tassert(6526500, str::stream() << "Expected one input slot for finalization of capped accumulator, got: " << accSlots.size(), @@ -513,7 +541,9 @@ std::unique_ptr buildFinalizePartialStdDev(sbe::value::SlotId std::unique_ptr buildFinalizeStdDevPop( StageBuilderState& state, const AccumulationExpression& expr, - const sbe::value::SlotVector& stdDevSlots) { + const sbe::value::SlotVector& stdDevSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { tassert(5755204, str::stream() << "Expected one input slot for finalization of stdDevPop, got: " << stdDevSlots.size(), @@ -530,7 +560,9 @@ std::unique_ptr buildFinalizeStdDevPop( std::unique_ptr buildFinalizeStdDevSamp( StageBuilderState& state, const AccumulationExpression& expr, - const sbe::value::SlotVector& stdDevSlots) { + const sbe::value::SlotVector& stdDevSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { tassert(5755209, str::stream() << "Expected one input slot for finalization of stdDevSamp, got: " << stdDevSlots.size(), @@ -579,7 +611,362 @@ std::vector> buildCombinePartialAggsMergeObjec auto arg = makeVariable(inputSlots[0]); return buildAccumulatorMergeObjects(expr, std::move(arg), collatorSlot, frameIdGenerator); } -}; // namespace + +std::vector> buildInitializeAccumulatorMulti( + std::unique_ptr maxSizeExpr, sbe::value::FrameIdGenerator& frameIdGenerator) { + // Create an array of four elements [value holder, max size, memory used, memory limit]. + std::vector> aggs; + auto maxAccumulatorBytes = internalQueryTopNAccumulatorBytes.load(); + if (auto* maxSizeConstExpr = maxSizeExpr->as()) { + auto [tag, val] = maxSizeConstExpr->getConstant(); + auto [convertOwn, convertTag, convertVal] = + genericNumConvert(tag, val, sbe::value::TypeTags::NumberInt64); + uassert(7548606, + "parameter 'n' must be coercible to a positive 64-bit integer", + convertTag != sbe::value::TypeTags::Nothing && + static_cast(convertVal) > 0); + aggs.push_back( + makeFunction("newArray", + makeFunction("newArray"), + makeConstant(sbe::value::TypeTags::NumberInt64, 0), + makeConstant(convertTag, convertVal), + makeConstant(sbe::value::TypeTags::NumberInt32, 0), + makeConstant(sbe::value::TypeTags::NumberInt32, maxAccumulatorBytes))); + } else { + auto localBind = makeLocalBind( + &frameIdGenerator, + [&](sbe::EVariable maxSizeConvertVar) { + return sbe::makeE( + sbe::makeE( + sbe::EPrimBinary::logicAnd, + makeFunction("exists", maxSizeConvertVar.clone()), + sbe::makeE( + sbe::EPrimBinary::greater, + maxSizeConvertVar.clone(), + makeConstant(sbe::value::TypeTags::NumberInt64, 0))), + makeFunction( + "newArray", + makeFunction("newArray"), + makeConstant(sbe::value::TypeTags::NumberInt64, 0), + maxSizeConvertVar.clone(), + makeConstant(sbe::value::TypeTags::NumberInt32, 0), + makeConstant(sbe::value::TypeTags::NumberInt32, maxAccumulatorBytes)), + makeFail(7548607, + "parameter 'n' must be coercible to a positive 64-bit integer")); + }, + sbe::makeE(std::move(maxSizeExpr), + sbe::value::TypeTags::NumberInt64)); + aggs.push_back(std::move(localBind)); + } + return aggs; +} + +std::vector> buildAccumulatorFirstN( + const AccumulationExpression& expr, + std::unique_ptr arg, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + std::vector> aggs; + aggs.push_back(makeLocalBind( + &frameIdGenerator, + [&](sbe::EVariable accumulatorState) { + return sbe::makeE( + makeFunction("aggFirstNNeedsMoreInput", accumulatorState.clone()), + makeFunction( + "aggFirstN", + makeMoveVariable(*accumulatorState.getFrameId(), accumulatorState.getSlotId()), + makeFillEmptyNull(std::move(arg))), + makeMoveVariable(*accumulatorState.getFrameId(), accumulatorState.getSlotId())); + }, + makeFunction("aggState"))); + return aggs; +} + +std::vector> buildCombinePartialAggsFirstN( + const AccumulationExpression& expr, + const sbe::value::SlotVector& inputSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + uassert(7548608, + str::stream() << "Expected one input slot for merging $firstN, got: " + << inputSlots.size(), + inputSlots.size() == 1); + + std::vector> aggs; + aggs.push_back(makeFunction("aggFirstNMerge", makeVariable(inputSlots[0]))); + return aggs; +} + +std::unique_ptr buildFinalizeFirstN( + StageBuilderState& state, + const AccumulationExpression& expr, + const sbe::value::SlotVector& inputSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + uassert(7548609, + str::stream() << "Expected one input slot for finalization of $firstN, got: " + << inputSlots.size(), + inputSlots.size() == 1); + return makeFunction("aggFirstNFinalize", makeVariable(inputSlots[0])); +} + +std::vector> buildAccumulatorLastN( + const AccumulationExpression& expr, + std::unique_ptr arg, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + std::vector> aggs; + aggs.push_back(makeFunction("aggLastN", makeFillEmptyNull(std::move(arg)))); + return aggs; +} + +std::vector> buildCombinePartialAggsLastN( + const AccumulationExpression& expr, + const sbe::value::SlotVector& inputSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + uassert(7548701, + str::stream() << "Expected one input slot for merging $lastN, got: " + << inputSlots.size(), + inputSlots.size() == 1); + + std::vector> aggs; + aggs.push_back(makeFunction("aggLastNMerge", makeVariable(inputSlots[0]))); + return aggs; +} + +std::unique_ptr buildFinalizeLastN( + StageBuilderState& state, + const AccumulationExpression& expr, + const sbe::value::SlotVector& inputSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + uassert(7548702, + str::stream() << "Expected one input slot for finalization of $lastN, got: " + << inputSlots.size(), + inputSlots.size() == 1); + return makeFunction("aggLastNFinalize", makeVariable(inputSlots[0])); +} + +bool isAccumulatorTopN(const AccumulationExpression& expr) { + return expr.name == AccumulatorTopBottomN::getName() || + expr.name == AccumulatorTopBottomN::getName(); +} + +std::vector> buildAccumulatorTopBottomN( + const AccumulationExpression& expr, + StringDataMap> args, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + auto it = args.find(AccArgs::kTopBottomNKey); + tassert(5807009, + str::stream() << "Accumulator " << expr.name << " expects a '" + << AccArgs::kTopBottomNKey << "' argument", + it != args.end()); + auto key = std::move(it->second); + + it = args.find(AccArgs::kTopBottomNValue); + tassert(5807010, + str::stream() << "Accumulator " << expr.name << " expects a '" + << AccArgs::kTopBottomNValue << "' argument", + it != args.end()); + auto value = std::move(it->second); + + it = args.find(AccArgs::kTopBottomNSortSpec); + tassert(5807021, + str::stream() << "Accumulator " << expr.name << " expects a '" + << AccArgs::kTopBottomNSortSpec << "' argument", + it != args.end()); + auto sortSpec = std::move(it->second); + + std::vector> aggs; + aggs.push_back(makeFunction(isAccumulatorTopN(expr) ? "aggTopN" : "aggBottomN", + std::move(key), + std::move(value), + std::move(sortSpec))); + return aggs; +} + +std::vector> buildCombinePartialTopBottomN( + const AccumulationExpression& expr, + const sbe::value::SlotVector& inputSlots, + StringDataMap> args, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + tassert(5807011, + str::stream() << "Expected one input slot for merging " << expr.name + << ", got: " << inputSlots.size(), + inputSlots.size() == 1); + + auto it = args.find(AccArgs::kTopBottomNSortSpec); + tassert(5807022, + str::stream() << "Accumulator " << expr.name << " expects a '" + << AccArgs::kTopBottomNSortSpec << "' argument", + it != args.end()); + auto sortSpec = std::move(it->second); + + std::vector> aggs; + aggs.push_back(makeFunction(isAccumulatorTopN(expr) ? "aggTopNMerge" : "aggBottomNMerge", + makeVariable(inputSlots[0]), + std::move(sortSpec))); + return aggs; +} + +std::unique_ptr buildFinalizeTopBottomNImpl( + StageBuilderState& state, + const AccumulationExpression& expr, + const sbe::value::SlotVector& inputSlots, + StringDataMap> args, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator, + bool single) { + tassert(5807012, + str::stream() << "Expected one input slot for finalization of " << expr.name + << ", got: " << inputSlots.size(), + inputSlots.size() == 1); + auto inputVar = makeVariable(inputSlots[0]); + + auto it = args.find(AccArgs::kTopBottomNSortSpec); + tassert(5807023, + str::stream() << "Accumulator " << expr.name << " expects a '" + << AccArgs::kTopBottomNSortSpec << "' argument", + it != args.end()); + auto sortSpec = std::move(it->second); + + if (state.needsMerge) { + // When the data will be merged, the heap itself doesn't need to be sorted since the merging + // code will handle the sorting. + auto heapExpr = + makeFunction("getElement", + inputVar->clone(), + makeConstant(sbe::value::TypeTags::NumberInt32, + static_cast(sbe::vm::AggMultiElems::kInternalArr))); + auto lambdaFrameId = frameIdGenerator.generate(); + auto pairVar = makeVariable(lambdaFrameId, 0); + auto lambdaExpr = sbe::makeE( + lambdaFrameId, + makeNewObjFunction( + FieldPair{AccumulatorN::kFieldNameGeneratedSortKey, + makeFunction("getElement", + pairVar->clone(), + makeConstant(sbe::value::TypeTags::NumberInt32, 0))}, + FieldPair{AccumulatorN::kFieldNameOutput, + makeFunction("getElement", + pairVar->clone(), + makeConstant(sbe::value::TypeTags::NumberInt32, 1))})); + // Convert the array pair representation [key, output] to an object format that the merging + // code expects. + return makeFunction("traverseP", + std::move(heapExpr), + std::move(lambdaExpr), + makeConstant(sbe::value::TypeTags::NumberInt32, 1)); + } else { + auto finalExpr = + makeFunction(isAccumulatorTopN(expr) ? "aggTopNFinalize" : "aggBottomNFinalize", + inputVar->clone(), + std::move(sortSpec)); + if (single) { + finalExpr = makeFunction("getElement", + std::move(finalExpr), + makeConstant(sbe::value::TypeTags::NumberInt32, 0)); + } + return finalExpr; + } +} + +std::unique_ptr buildFinalizeTopBottomN( + StageBuilderState& state, + const AccumulationExpression& expr, + const sbe::value::SlotVector& inputSlots, + StringDataMap> args, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + return buildFinalizeTopBottomNImpl(state, + expr, + inputSlots, + std::move(args), + collatorSlot, + frameIdGenerator, + false /* single */); +} + +std::unique_ptr buildFinalizeTopBottom( + StageBuilderState& state, + const AccumulationExpression& expr, + const sbe::value::SlotVector& inputSlots, + StringDataMap> args, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + return buildFinalizeTopBottomNImpl(state, + expr, + inputSlots, + std::move(args), + collatorSlot, + frameIdGenerator, + true /* single */); +} + +std::vector> buildAccumulatorMinMaxN( + const AccumulationExpression& expr, + std::unique_ptr arg, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + std::vector> aggs; + auto aggExprName = expr.name == AccumulatorMaxN::kName ? "aggMaxN" : "aggMinN"; + if (collatorSlot) { + aggs.push_back( + makeFunction(std::move(aggExprName), std::move(arg), makeVariable(*collatorSlot))); + + } else { + aggs.push_back(makeFunction(std::move(aggExprName), std::move(arg))); + } + return aggs; +} + +std::vector> buildCombinePartialAggsMinMaxN( + const AccumulationExpression& expr, + const sbe::value::SlotVector& inputSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + uassert(7548808, + str::stream() << "Expected one input slot for merging, got: " << inputSlots.size(), + inputSlots.size() == 1); + + std::vector> aggs; + auto aggExprName = expr.name == AccumulatorMaxN::kName ? "aggMaxNMerge" : "aggMinNMerge"; + if (collatorSlot) { + aggs.push_back(makeFunction( + std::move(aggExprName), makeVariable(inputSlots[0]), makeVariable(*collatorSlot))); + } else { + aggs.push_back(makeFunction(std::move(aggExprName), makeVariable(inputSlots[0]))); + } + return aggs; +} + +std::unique_ptr buildFinalizeMinMaxN( + StageBuilderState& state, + const AccumulationExpression& expr, + const sbe::value::SlotVector& inputSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + uassert(7548809, + str::stream() << "Expected one input slot for finalization, got: " << inputSlots.size(), + inputSlots.size() == 1); + auto aggExprName = expr.name == AccumulatorMaxN::kName ? "aggMaxNFinalize" : "aggMinNFinalize"; + if (collatorSlot) { + return makeFunction( + std::move(aggExprName), makeVariable(inputSlots[0]), makeVariable(*collatorSlot)); + } else { + return makeFunction(std::move(aggExprName), makeVariable(inputSlots[0])); + } +} + +template +std::vector> emptyInitializer( + std::unique_ptr maxSizeExpr, sbe::value::FrameIdGenerator& frameIdGenerator) { + return std::vector>{N}; +} +} // namespace std::vector> buildAccumulator( const AccumulationStatement& acc, @@ -604,6 +991,10 @@ std::vector> buildAccumulator( {AccumulatorMergeObjects::kName, &buildAccumulatorMergeObjects}, {AccumulatorStdDevPop::kName, &buildAccumulatorStdDev}, {AccumulatorStdDevSamp::kName, &buildAccumulatorStdDev}, + {AccumulatorFirstN::kName, &buildAccumulatorFirstN}, + {AccumulatorLastN::kName, &buildAccumulatorLastN}, + {AccumulatorMaxN::kName, &buildAccumulatorMinMaxN}, + {AccumulatorMinN::kName, &buildAccumulatorMinMaxN}, }; auto accExprName = acc.expr.name; @@ -618,6 +1009,37 @@ std::vector> buildAccumulator( frameIdGenerator); } +std::vector> buildAccumulator( + const AccumulationStatement& acc, + StringDataMap> argExprs, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + using BuildAccumulatorFn = std::function>( + const AccumulationExpression&, + StringDataMap>, + boost::optional, + sbe::value::FrameIdGenerator&)>; + + static const StringDataMap kAccumulatorBuilders = { + {AccumulatorTopBottomN::getName(), &buildAccumulatorTopBottomN}, + {AccumulatorTopBottomN::getName(), &buildAccumulatorTopBottomN}, + {AccumulatorTopBottomN::getName(), &buildAccumulatorTopBottomN}, + {AccumulatorTopBottomN::getName(), + &buildAccumulatorTopBottomN}, + }; + + auto accExprName = acc.expr.name; + uassert(5807017, + str::stream() << "Unsupported Accumulator in SBE accumulator builder: " << accExprName, + kAccumulatorBuilders.find(accExprName) != kAccumulatorBuilders.end()); + + return std::invoke(kAccumulatorBuilders.at(accExprName), + acc.expr, + std::move(argExprs), + collatorSlot, + frameIdGenerator); +} + std::vector> buildCombinePartialAggregates( const AccumulationStatement& acc, const sbe::value::SlotVector& inputSlots, @@ -641,6 +1063,10 @@ std::vector> buildCombinePartialAggregates( {AccumulatorStdDevPop::kName, &buildCombinePartialAggsStdDev}, {AccumulatorStdDevSamp::kName, &buildCombinePartialAggsStdDev}, {AccumulatorSum::kName, &buildCombinePartialAggsSum}, + {AccumulatorFirstN::kName, &buildCombinePartialAggsFirstN}, + {AccumulatorLastN::kName, &buildCombinePartialAggsLastN}, + {AccumulatorMaxN::kName, &buildCombinePartialAggsMinMaxN}, + {AccumulatorMinN::kName, &buildCombinePartialAggsMinMaxN}, }; auto accExprName = acc.expr.name; @@ -652,11 +1078,53 @@ std::vector> buildCombinePartialAggregates( kAggCombinerBuilders.at(accExprName), acc.expr, inputSlots, collatorSlot, frameIdGenerator); } +std::vector> buildCombinePartialAggregates( + const AccumulationStatement& acc, + const sbe::value::SlotVector& inputSlots, + StringDataMap> argExprs, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + using BuildAggCombinerFn = std::function>( + const AccumulationExpression&, + const sbe::value::SlotVector&, + StringDataMap>, + boost::optional, + sbe::value::FrameIdGenerator&)>; + + static const StringDataMap kAggCombinerBuilders = { + {AccumulatorTopBottomN::getName(), &buildCombinePartialTopBottomN}, + {AccumulatorTopBottomN::getName(), + &buildCombinePartialTopBottomN}, + {AccumulatorTopBottomN::getName(), + &buildCombinePartialTopBottomN}, + {AccumulatorTopBottomN::getName(), + &buildCombinePartialTopBottomN}, + }; + + auto accExprName = acc.expr.name; + uassert(5807019, + str::stream() << "Unsupported Accumulator in SBE accumulator builder: " << accExprName, + kAggCombinerBuilders.find(accExprName) != kAggCombinerBuilders.end()); + + return std::invoke(kAggCombinerBuilders.at(accExprName), + acc.expr, + inputSlots, + std::move(argExprs), + collatorSlot, + frameIdGenerator); +} + std::unique_ptr buildFinalize(StageBuilderState& state, const AccumulationStatement& acc, - const sbe::value::SlotVector& aggSlots) { - using BuildFinalizeFn = std::function( - StageBuilderState&, const AccumulationExpression&, sbe::value::SlotVector)>; + const sbe::value::SlotVector& aggSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + using BuildFinalizeFn = + std::function(StageBuilderState&, + const AccumulationExpression&, + sbe::value::SlotVector, + boost::optional, + sbe::value::FrameIdGenerator&)>; static const StringDataMap kAccumulatorBuilders = { {AccumulatorMin::kName, &buildFinalizeMin}, @@ -670,6 +1138,10 @@ std::unique_ptr buildFinalize(StageBuilderState& state, {AccumulatorMergeObjects::kName, nullptr}, {AccumulatorStdDevPop::kName, &buildFinalizeStdDevPop}, {AccumulatorStdDevSamp::kName, &buildFinalizeStdDevSamp}, + {AccumulatorFirstN::kName, &buildFinalizeFirstN}, + {AccumulatorLastN::kName, &buildFinalizeLastN}, + {AccumulatorMaxN::kName, &buildFinalizeMinMaxN}, + {AccumulatorMinN::kName, &buildFinalizeMinMaxN}, }; auto accExprName = acc.expr.name; @@ -678,27 +1150,87 @@ std::unique_ptr buildFinalize(StageBuilderState& state, kAccumulatorBuilders.find(accExprName) != kAccumulatorBuilders.end()); if (auto fn = kAccumulatorBuilders.at(accExprName); fn) { - return std::invoke(fn, state, acc.expr, aggSlots); + return std::invoke(fn, state, acc.expr, aggSlots, collatorSlot, frameIdGenerator); } else { // nullptr for 'EExpression' signifies that no final project is necessary. return nullptr; } } +std::unique_ptr buildFinalize( + StageBuilderState& state, + const AccumulationStatement& acc, + const sbe::value::SlotVector& aggSlots, + StringDataMap> argExprs, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator) { + using BuildFinalizeFn = std::function( + StageBuilderState&, + const AccumulationExpression&, + sbe::value::SlotVector, + StringDataMap>, + boost::optional, + sbe::value::FrameIdGenerator&)>; + + static const StringDataMap kAccumulatorBuilders = { + {AccumulatorTopBottomN::getName(), &buildFinalizeTopBottom}, + {AccumulatorTopBottomN::getName(), &buildFinalizeTopBottom}, + {AccumulatorTopBottomN::getName(), &buildFinalizeTopBottomN}, + {AccumulatorTopBottomN::getName(), &buildFinalizeTopBottomN}, + }; + + auto accExprName = acc.expr.name; + uassert(5807020, + str::stream() << "Unsupported Accumulator in SBE accumulator builder: " << accExprName, + kAccumulatorBuilders.find(accExprName) != kAccumulatorBuilders.end()); + + return std::invoke(kAccumulatorBuilders.at(accExprName), + state, + acc.expr, + aggSlots, + std::move(argExprs), + collatorSlot, + frameIdGenerator); +} + std::vector> buildInitialize( const AccumulationStatement& acc, std::unique_ptr initExpr, - boost::optional collatorSlot) { + sbe::value::FrameIdGenerator& frameIdGenerator) { using BuildInitializeFn = std::function>( - std::unique_ptr, boost::optional)>; - - static const StringDataMap kAccumulatorBuilders = {}; + std::unique_ptr, sbe::value::FrameIdGenerator&)>; + + static const StringDataMap kAccumulatorBuilders = { + {AccumulatorMin::kName, &emptyInitializer<1>}, + {AccumulatorMax::kName, &emptyInitializer<1>}, + {AccumulatorFirst::kName, &emptyInitializer<1>}, + {AccumulatorLast::kName, &emptyInitializer<1>}, + {AccumulatorAvg::kName, &emptyInitializer<2>}, + {AccumulatorAddToSet::kName, &emptyInitializer<1>}, + {AccumulatorSum::kName, &emptyInitializer<1>}, + {AccumulatorPush::kName, &emptyInitializer<1>}, + {AccumulatorMergeObjects::kName, &emptyInitializer<1>}, + {AccumulatorStdDevPop::kName, &emptyInitializer<1>}, + {AccumulatorStdDevSamp::kName, &emptyInitializer<1>}, + {AccumulatorFirstN::kName, &buildInitializeAccumulatorMulti}, + {AccumulatorLastN::kName, &buildInitializeAccumulatorMulti}, + {AccumulatorTopBottomN::getName(), + &buildInitializeAccumulatorMulti}, + {AccumulatorTopBottomN::getName(), + &buildInitializeAccumulatorMulti}, + {AccumulatorTopBottomN::getName(), + &buildInitializeAccumulatorMulti}, + {AccumulatorTopBottomN::getName(), + &buildInitializeAccumulatorMulti}, + {AccumulatorMaxN::kName, &buildInitializeAccumulatorMulti}, + {AccumulatorMinN::kName, &buildInitializeAccumulatorMulti}, + }; auto accExprName = acc.expr.name; uassert(7567300, str::stream() << "Unsupported Accumulator in SBE accumulator builder: " << accExprName, kAccumulatorBuilders.find(accExprName) != kAccumulatorBuilders.end()); - return std::invoke(kAccumulatorBuilders.at(accExprName), std::move(initExpr), collatorSlot); + return std::invoke(kAccumulatorBuilders.at(accExprName), std::move(initExpr), frameIdGenerator); } } // namespace mongo::stage_builder diff --git a/src/mongo/db/query/sbe_stage_builder_accumulator.h b/src/mongo/db/query/sbe_stage_builder_accumulator.h index c2ad47611b363..6c71ce6a64f4e 100644 --- a/src/mongo/db/query/sbe_stage_builder_accumulator.h +++ b/src/mongo/db/query/sbe_stage_builder_accumulator.h @@ -29,16 +29,29 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/accumulation_statement.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/util/string_map.h" namespace mongo::stage_builder { class PlanStageSlots; +namespace AccArgs { +const StringData kTopBottomNSortSpec = "sortSpec"_sd; +const StringData kTopBottomNKey = "key"_sd; +const StringData kTopBottomNValue = "value"_sd; +} // namespace AccArgs + /** * Translates an input AccumulationStatement into an SBE EExpression for accumulation expressions. */ @@ -48,6 +61,15 @@ std::vector> buildAccumulator( boost::optional collatorSlot, sbe::value::FrameIdGenerator&); +/** + * Similar to above but takes multiple arguments. + */ +std::vector> buildAccumulator( + const AccumulationStatement& acc, + StringDataMap> argExprs, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator&); + /** * When SBE hash aggregation spills to disk, it spills partial aggregates which need to be combined * later. This function returns the expressions that can be used to combine partial aggregates for @@ -61,6 +83,16 @@ std::vector> buildCombinePartialAggregates( boost::optional collatorSlot, sbe::value::FrameIdGenerator&); +/** + * Similar to above but takes multiple arguments. + */ +std::vector> buildCombinePartialAggregates( + const AccumulationStatement& acc, + const sbe::value::SlotVector& inputSlots, + StringDataMap> argExprs, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator&); + /** * Translates an input AccumulationStatement into an SBE EExpression that represents an * AccumulationStatement's finalization step. The 'stage' parameter provides the input subtree to @@ -68,7 +100,20 @@ std::vector> buildCombinePartialAggregates( */ std::unique_ptr buildFinalize(StageBuilderState& state, const AccumulationStatement& acc, - const sbe::value::SlotVector& aggSlots); + const sbe::value::SlotVector& aggSlots, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator); + +/** + * Similar to above but takes multiple arguments. + */ +std::unique_ptr buildFinalize( + StageBuilderState& state, + const AccumulationStatement& acc, + const sbe::value::SlotVector& aggSlots, + StringDataMap> argExprs, + boost::optional collatorSlot, + sbe::value::FrameIdGenerator& frameIdGenerator); /** * Translates an input AccumulationStatement into an SBE EExpression for the initialization of the @@ -77,5 +122,5 @@ std::unique_ptr buildFinalize(StageBuilderState& state, std::vector> buildInitialize( const AccumulationStatement& acc, std::unique_ptr initExpr, - boost::optional collatorSlot); + sbe::value::FrameIdGenerator&); } // namespace mongo::stage_builder diff --git a/src/mongo/db/query/sbe_stage_builder_accumulator_test.cpp b/src/mongo/db/query/sbe_stage_builder_accumulator_test.cpp index 03ce1f424eb90..4e87cfcf35527 100644 --- a/src/mongo/db/query/sbe_stage_builder_accumulator_test.cpp +++ b/src/mongo/db/query/sbe_stage_builder_accumulator_test.cpp @@ -27,20 +27,67 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include - +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: keep +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +// IWYU pragma: no_include "format.h" + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/sbe/expression_test_base.h" +#include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/exec/sbe/values/sort_spec.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/values/value_printer.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/pipeline/accumulator.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_group.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/query/sbe_stage_builder_accumulator.h" #include "mongo/db/query/sbe_stage_builder_test_fixture.h" +#include "mongo/db/query/shard_filterer_factory_interface.h" +#include "mongo/db/storage/key_string.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/summation.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -127,7 +174,7 @@ class SbeStageBuilderGroupTest : public SbeStageBuilderTestFixture { std::move(querySolution), false /*hasRecordId*/, nullptr, std::move(collator)); ASSERT_EQ(resultSlots.size(), 1); - auto resultAccessors = prepareTree(&data.ctx, stage.get(), resultSlots[0]); + auto resultAccessors = prepareTree(&data.env.ctx, stage.get(), resultSlots[0]); return getAllResults(stage.get(), &resultAccessors[0]); } @@ -155,7 +202,7 @@ class SbeStageBuilderGroupTest : public SbeStageBuilderTestFixture { ErrorCodes::Error expectedError, std::unique_ptr collator = nullptr) { try { - getResultsForAggregation(fromjson(groupSpec.rawData()), inputDocs, std::move(collator)); + getResultsForAggregation(fromjson(groupSpec), inputDocs, std::move(collator)); ASSERT(false) << "Expected error: " << expectedError << " for " << groupSpec << " but succeeded"; } catch (const DBException& e) { @@ -1605,6 +1652,410 @@ TEST_F(SbeStageBuilderGroupTest, StdDevSampAccumulatorTranslationNonNumber) { BSON_ARRAY(BSON("_id" << BSONNULL << "x" << 0))); } +TEST_F(SbeStageBuilderGroupTest, FirstNLastNAccumulatorSingleGroup) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 11 << "b" << 1)), + BSON_ARRAY(BSON("a" << 22 << "b" << 2)), + BSON_ARRAY(BSON("a" << 33 << "b" << 3)), + BSON_ARRAY(BSON("a" << 44 << "b" << 4))}; + runGroupAggregationTest("{_id: null, x: {$firstN: {input: {a: '$a', b: '$b'}, n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" + << BSON_ARRAY(BSON("a" << 11 << "b" << 1) + << BSON("a" << 22 << "b" << 2) + << BSON("a" << 33 << "b" << 3))))); + + runGroupAggregationTest("{_id: null, x: {$lastN: {input: {a: '$a', b: '$b'}, n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" + << BSON_ARRAY(BSON("a" << 22 << "b" << 2) + << BSON("a" << 33 << "b" << 3) + << BSON("a" << 44 << "b" << 4))))); +} + +TEST_F(SbeStageBuilderGroupTest, FirstNLastNAccumulatorNotEnoughElement) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 22 << "b" << 2)), + BSON_ARRAY(BSON("a" << 11 << "b" << 1))}; + runGroupAggregationTest("{_id: null, x: {$firstN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(22 << 11)))); + + runGroupAggregationTest("{_id: null, x: {$lastN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(22 << 11)))); +} + +TEST_F(SbeStageBuilderGroupTest, FirstNLastNAccumulatorMultiGroup) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 44 << "b" << 4 << "n" << 2)), + BSON_ARRAY(BSON("a" << 77 << "b" << 7 << "n" << 4)), + BSON_ARRAY(BSON("a" << 33 << "b" << 3 << "n" << 2)), + BSON_ARRAY(BSON("a" << 88 << "b" << 8 << "n" << 4)), + BSON_ARRAY(BSON("a" << 22 << "b" << 2 << "n" << 2)), + BSON_ARRAY(BSON("a" << 66 << "b" << 6 << "n" << 4)), + BSON_ARRAY(BSON("a" << 11 << "b" << 1 << "n" << 2)), + BSON_ARRAY(BSON("a" << 55 << "b" << 5 << "n" << 4))}; + runGroupAggregationTest("{_id: '$n', x: {$firstN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << 2 << "x" << BSON_ARRAY(44 << 33 << 22)) + << BSON("_id" << 4 << "x" << BSON_ARRAY(77 << 88 << 66)))); + runGroupAggregationTest("{_id: '$n', x: {$lastN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << 2 << "x" << BSON_ARRAY(33 << 22 << 11)) + << BSON("_id" << 4 << "x" << BSON_ARRAY(88 << 66 << 55)))); +} + +TEST_F(SbeStageBuilderGroupTest, FirstNLastNAccumulatorDynamicN) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 44 << "b" << 4 << "n" << 2)), + BSON_ARRAY(BSON("a" << 33 << "b" << 3 << "n" << 2)), + BSON_ARRAY(BSON("a" << 22 << "b" << 2 << "n" << 2)), + BSON_ARRAY(BSON("a" << 11 << "b" << 1 << "n" << 2)), + BSON_ARRAY(BSON("a" << 88 << "b" << 8 << "n" << 4)), + BSON_ARRAY(BSON("a" << 77 << "b" << 7 << "n" << 4)), + BSON_ARRAY(BSON("a" << 66 << "b" << 6 << "n" << 4)), + BSON_ARRAY(BSON("a" << 55 << "b" << 5 << "n" << 4))}; + runGroupAggregationTest( + "{_id: {k: '$n'}, x: {$firstN: {input: '$a', n: '$k'}}}", + docs, + BSON_ARRAY(BSON("_id" << BSON("k" << 2) << "x" << BSON_ARRAY(44 << 33)) + << BSON("_id" << BSON("k" << 4) << "x" << BSON_ARRAY(88 << 77 << 66 << 55)))); + + runGroupAggregationTest( + "{_id: {k: '$n'}, x: {$lastN: {input: '$a', n: '$k'}}}", + docs, + BSON_ARRAY(BSON("_id" << BSON("k" << 2) << "x" << BSON_ARRAY(22 << 11)) + << BSON("_id" << BSON("k" << 4) << "x" << BSON_ARRAY(88 << 77 << 66 << 55)))); +} + +TEST_F(SbeStageBuilderGroupTest, MultiAccumulatorInvalidConstantN) { + const std::vector inputFieldMultiAccumulators{ + "$firstN", "$lastN", "$maxN", "$minN"}; + const std::vector outputFieldMultiAccumulators{"$topN", "$bottomN"}; + const std::vector testCases{"'string'", "4.2", "-1", "0"}; + auto docs = std::vector{BSON_ARRAY(BSON("a" << 11 << "b" << 1))}; + for (const auto& acc : inputFieldMultiAccumulators) { + for (const auto& testCase : testCases) { + runGroupAggregationToFail(str::stream() << "{_id: null, x: {" << acc + << ": {input: '$a', n: " << testCase << "}}}", + docs, + static_cast(7548606)); + } + } + for (const auto& acc : outputFieldMultiAccumulators) { + for (const auto& testCase : testCases) { + runGroupAggregationToFail(str::stream() << "{_id: null, x: {" << acc + << ": {output: '$a', sortBy: {s: 1}, n: " + << testCase << "}}}", + docs, + static_cast(7548606)); + } + } +} + +TEST_F(SbeStageBuilderGroupTest, MultiAccumulatorInvalidDynamicN) { + const std::vector inputFieldMultiAccumulators{ + "$firstN", "$lastN", "$maxN", "$minN"}; + const std::vector outputFieldMultiAccumulators{"$topN", "$bottomN"}; + const std::vector testCases{BSON("n" + << "string"), + BSON("n" << 4.2), + BSON("n" << -1), + BSON("n" << 0)}; + for (const auto& acc : inputFieldMultiAccumulators) { + for (const auto& testCase : testCases) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 11 << "n1" << testCase))}; + + runGroupAggregationToFail(str::stream() << "{_id: null, x: {" << acc + << ": {input: '$a', n: '$n'}}}", + docs, + static_cast(7548607)); + runGroupAggregationToFail(str::stream() << "{_id: {n: '$n1.n'}, x: {" << acc + << ": {input: '$a', n: '$n'}}}", + docs, + static_cast(7548607)); + } + } + for (const auto& acc : outputFieldMultiAccumulators) { + for (const auto& testCase : testCases) { + auto docs = + std::vector{BSON_ARRAY(BSON("a" << 11 << "s" << 1 << "n1" << testCase))}; + + runGroupAggregationToFail(str::stream() + << "{_id: null, x: {" << acc + << ": {output: '$a', sortBy: {s: 1}, n: '$n'}}}", + docs, + static_cast(7548607)); + runGroupAggregationToFail(str::stream() + << "{_id: {n: '$n1.n'}, x: {" << acc + << ": {output: '$a', sortBy: {s: 1}, n: '$n'}}}", + docs, + static_cast(7548607)); + } + } +} + +TEST_F(SbeStageBuilderGroupTest, TopBottomNAccumulatorSingleGroup) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 44 << "s" << 4)), + BSON_ARRAY(BSON("a" << 33 << "s" << 3)), + BSON_ARRAY(BSON("a" << 22 << "s" << 2)), + BSON_ARRAY(BSON("a" << 11 << "s" << 1))}; + runGroupAggregationTest("{_id: null, x: {$top: {output: '$a', sortBy: {s: 1}}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << 11))); + runGroupAggregationTest("{_id: null, x: {$bottom: {output: '$a', sortBy: {s: 1}}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << 44))); + runGroupAggregationTest( + "{_id: null, x: {$topN: {output: '$a', sortBy: {s: 1}, n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(11 << 22 << 33)))); + runGroupAggregationTest( + "{_id: null, x: {$bottomN: {output: '$a', sortBy: {s: 1}, n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(22 << 33 << 44)))); +} + +TEST_F(SbeStageBuilderGroupTest, TopBottomNAccumulatorCompoundSort) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 11 << "s1" << 1 << "s2" << 1)), + BSON_ARRAY(BSON("a" << 12 << "s1" << 1 << "s2" << 2)), + BSON_ARRAY(BSON("a" << 21 << "s1" << 2 << "s2" << 1)), + BSON_ARRAY(BSON("a" << 22 << "s1" << 2 << "s2" << 2))}; + runGroupAggregationTest("{_id: null, x: {$top: {output: '$a', sortBy: {s1: 1, s2: -1}}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << 12))); + runGroupAggregationTest("{_id: null, x: {$bottom: {output: '$a', sortBy: {s1: 1, s2: -1}}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << 21))); + runGroupAggregationTest( + "{_id: null, x: {$topN: {output: '$a', sortBy: {s1: 1, s2: -1}, n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(12 << 11 << 22)))); + runGroupAggregationTest( + "{_id: null, x: {$bottomN: {output: '$a', sortBy: {s1: 1, s2: -1}, n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(11 << 22 << 21)))); +} + +TEST_F(SbeStageBuilderGroupTest, TopBottomNAccumulatorCollation) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 41 << "s" + << "41")), + BSON_ARRAY(BSON("a" << 32 << "s" + << "32")), + BSON_ARRAY(BSON("a" << 23 << "s" + << "23")), + BSON_ARRAY(BSON("a" << 14 << "s" + << "14"))}; + runGroupAggregationTest( + "{_id: null, x: {$top: {output: '$a', sortBy: {s: 1}}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << 41)), + std::make_unique(CollatorInterfaceMock::MockType::kReverseString)); + runGroupAggregationTest( + "{_id: null, x: {$bottom: {output: '$a', sortBy: {s: 1}}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << 14)), + std::make_unique(CollatorInterfaceMock::MockType::kReverseString)); + runGroupAggregationTest( + "{_id: null, x: {$topN: {output: '$a', sortBy: {s: 1}, n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(41 << 32 << 23))), + std::make_unique(CollatorInterfaceMock::MockType::kReverseString)); + runGroupAggregationTest( + "{_id: null, x: {$bottomN: {output: '$a', sortBy: {s: 1}, n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(32 << 23 << 14))), + std::make_unique(CollatorInterfaceMock::MockType::kReverseString)); +} + +TEST_F(SbeStageBuilderGroupTest, TopBottomNAccumulatorNotEnoughElement) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 22 << "s" << 2)), + BSON_ARRAY(BSON("a" << 11 << "s" << 1))}; + runGroupAggregationTest("{_id: null, x: {$topN: {output: '$a', sortBy: {s: 1}, n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(11 << 22)))); + runGroupAggregationTest("{_id: null, x: {$bottomN: {output: '$a', sortBy: {s: 1}, n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(11 << 22)))); +} + +TEST_F(SbeStageBuilderGroupTest, TopBottomNAccumulatorMultiGroup) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 44 << "s" << 4 << "n" << 1)), + BSON_ARRAY(BSON("a" << 33 << "s" << 3 << "n" << 1)), + BSON_ARRAY(BSON("a" << 22 << "s" << 2 << "n" << 1)), + BSON_ARRAY(BSON("a" << 11 << "s" << 1 << "n" << 1)), + BSON_ARRAY(BSON("a" << 88 << "s" << 8 << "n" << 2)), + BSON_ARRAY(BSON("a" << 77 << "s" << 7 << "n" << 2)), + BSON_ARRAY(BSON("a" << 66 << "s" << 6 << "n" << 2)), + BSON_ARRAY(BSON("a" << 55 << "s" << 5 << "n" << 2))}; + runGroupAggregationTest( + "{_id: '$n', x: {$top: {output: '$a', sortBy: {s: 1}}}}", + docs, + BSON_ARRAY(BSON("_id" << 1 << "x" << 11) << BSON("_id" << 2 << "x" << 55))); + runGroupAggregationTest( + "{_id: '$n', x: {$bottom: {output: '$a', sortBy: {s: 1}}}}", + docs, + BSON_ARRAY(BSON("_id" << 1 << "x" << 44) << BSON("_id" << 2 << "x" << 88))); + runGroupAggregationTest("{_id: '$n', x: {$topN: {output: '$a', sortBy: {s: 1}, n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << 1 << "x" << BSON_ARRAY(11 << 22 << 33)) + << BSON("_id" << 2 << "x" << BSON_ARRAY(55 << 66 << 77)))); + runGroupAggregationTest("{_id: '$n', x: {$bottomN: {output: '$a', sortBy: {s: 1}, n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << 1 << "x" << BSON_ARRAY(22 << 33 << 44)) + << BSON("_id" << 2 << "x" << BSON_ARRAY(66 << 77 << 88)))); +} + +TEST_F(SbeStageBuilderGroupTest, TopBottomNAccumulatorDynamicN) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 44 << "s" << 4 << "n" << 2)), + BSON_ARRAY(BSON("a" << 33 << "s" << 3 << "n" << 2)), + BSON_ARRAY(BSON("a" << 22 << "s" << 2 << "n" << 2)), + BSON_ARRAY(BSON("a" << 11 << "s" << 1 << "n" << 2)), + BSON_ARRAY(BSON("a" << 88 << "s" << 8 << "n" << 3)), + BSON_ARRAY(BSON("a" << 77 << "s" << 7 << "n" << 3)), + BSON_ARRAY(BSON("a" << 66 << "s" << 6 << "n" << 3)), + BSON_ARRAY(BSON("a" << 55 << "s" << 5 << "n" << 3))}; + runGroupAggregationTest( + "{_id: {n1: '$n'}, x: {$topN: {output: '$a', sortBy: {s: 1}, n: '$n1'}}}", + docs, + BSON_ARRAY(BSON("_id" << BSON("n1" << 2) << "x" << BSON_ARRAY(11 << 22)) + << BSON("_id" << BSON("n1" << 3) << "x" << BSON_ARRAY(55 << 66 << 77)))); + runGroupAggregationTest( + "{_id: {n1: '$n'}, x: {$bottomN: {output: '$a', sortBy: {s: 1}, n: '$n1'}}}", + docs, + BSON_ARRAY(BSON("_id" << BSON("n1" << 2) << "x" << BSON_ARRAY(33 << 44)) + << BSON("_id" << BSON("n1" << 3) << "x" << BSON_ARRAY(66 << 77 << 88)))); +} + +TEST_F(SbeStageBuilderGroupTest, MinMaxNAccumulatorSingleGroup) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 44)), + BSON_ARRAY(BSON("a" << 33)), + BSON_ARRAY(BSON("a" << 22)), + BSON_ARRAY(BSON("a" << 11)), + BSON_ARRAY(BSONObjBuilder().obj())}; + runGroupAggregationTest( + "{_id: null, x: {$minN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(11 << 22 << 33)))); + runGroupAggregationTest( + "{_id: null, x: {$maxN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(44 << 33 << 22)))); +} + +TEST_F(SbeStageBuilderGroupTest, MinMaxNAccumulatorWithDifferentNumericTypes) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 11ll)), + BSON_ARRAY(BSON("a" << 12)), + BSON_ARRAY(BSON("a" << 13.12)), + BSON_ARRAY(BSON("a" << 14.0f)), + BSON_ARRAY(BSON("a" << 15ll))}; + runGroupAggregationTest( + "{_id: null, x: {$maxN: {input: '$a', n: 4}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(15ll << 14.0f << 13.12 << 12)))); + runGroupAggregationTest( + "{_id: null, x: {$minN: {input: '$a', n: 4}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(11ll << 12 << 13.12 << 14.0f)))); +} + +TEST_F(SbeStageBuilderGroupTest, MinMaxNAccumulatorWithStrings) { + auto docs = std::vector{BSON_ARRAY(BSON("a" + << "az")), + BSON_ARRAY(BSON("a" + << "by")), + BSON_ARRAY(BSON("a" + << "cx")), + BSON_ARRAY(BSON("a" + << "dw"))}; + runGroupAggregationTest("{_id: null, x: {$maxN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" + << BSON_ARRAY("dw" + << "cx" + << "by")))); + runGroupAggregationTest("{_id: null, x: {$minN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" + << BSON_ARRAY("az" + << "by" + << "cx")))); +} + +TEST_F(SbeStageBuilderGroupTest, MinMaxNAccumulatorCollation) { + auto docs = std::vector{BSON_ARRAY(BSON("a" + << "az")), + BSON_ARRAY(BSON("a" + << "by")), + BSON_ARRAY(BSON("a" + << "cx")), + BSON_ARRAY(BSON("a" + << "dw"))}; + runGroupAggregationTest( + "{_id: null, x: {$maxN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" + << BSON_ARRAY("az" + << "by" + << "cx"))), + std::make_unique(CollatorInterfaceMock::MockType::kReverseString)); + runGroupAggregationTest( + "{_id: null, x: {$minN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" + << BSON_ARRAY("dw" + << "cx" + << "by"))), + std::make_unique(CollatorInterfaceMock::MockType::kReverseString)); +} + +TEST_F(SbeStageBuilderGroupTest, MinMaxNAccumulatorNotEnoughElement) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 22)), BSON_ARRAY(BSON("a" << 11))}; + runGroupAggregationTest("{_id: null, x: {$maxN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(22 << 11)))); + runGroupAggregationTest("{_id: null, x: {$minN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << BSONNULL << "x" << BSON_ARRAY(11 << 22)))); +} + +TEST_F(SbeStageBuilderGroupTest, MinMaxNAccumulatorMultiGroup) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 44 << "n" << 1)), + BSON_ARRAY(BSON("a" << 33 << "n" << 1)), + BSON_ARRAY(BSON("a" << 22 << "n" << 1)), + BSON_ARRAY(BSON("a" << 11 << "n" << 1)), + BSON_ARRAY(BSON("a" << 88 << "n" << 2)), + BSON_ARRAY(BSON("a" << 77 << "n" << 2)), + BSON_ARRAY(BSON("a" << 66 << "n" << 2)), + BSON_ARRAY(BSON("a" << 55 << "n" << 2))}; + runGroupAggregationTest("{_id: '$n', x: {$minN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << 1 << "x" << BSON_ARRAY(11 << 22 << 33)) + << BSON("_id" << 2 << "x" << BSON_ARRAY(55 << 66 << 77)))); + runGroupAggregationTest("{_id: '$n', x: {$maxN: {input: '$a', n: 3}}}", + docs, + BSON_ARRAY(BSON("_id" << 1 << "x" << BSON_ARRAY(44 << 33 << 22)) + << BSON("_id" << 2 << "x" << BSON_ARRAY(88 << 77 << 66)))); +} + +TEST_F(SbeStageBuilderGroupTest, MinMaxNAccumulatorDynamicN) { + auto docs = std::vector{BSON_ARRAY(BSON("a" << 44 << "n" << 2)), + BSON_ARRAY(BSON("a" << 33 << "n" << 2)), + BSON_ARRAY(BSON("a" << 22 << "n" << 2)), + BSON_ARRAY(BSON("a" << 11 << "n" << 2)), + BSON_ARRAY(BSON("a" << 88 << "n" << 3)), + BSON_ARRAY(BSON("a" << 77 << "n" << 3)), + BSON_ARRAY(BSON("a" << 66 << "n" << 3)), + BSON_ARRAY(BSON("a" << 55 << "n" << 3))}; + runGroupAggregationTest( + "{_id: {n1: '$n'}, x: {$minN: {input: '$a', n: '$n1'}}}", + docs, + BSON_ARRAY(BSON("_id" << BSON("n1" << 2) << "x" << BSON_ARRAY(11 << 22)) + << BSON("_id" << BSON("n1" << 3) << "x" << BSON_ARRAY(55 << 66 << 77)))); + runGroupAggregationTest( + "{_id: {n1: '$n'}, x: {$maxN: {input: '$a', n: '$n1'}}}", + docs, + BSON_ARRAY(BSON("_id" << BSON("n1" << 2) << "x" << BSON_ARRAY(44 << 33)) + << BSON("_id" << BSON("n1" << 3) << "x" << BSON_ARRAY(88 << 77 << 66)))); +} + class AccumulatorSBEIncompatible final : public AccumulatorState { public: static constexpr auto kName = "$incompatible"_sd; @@ -1874,6 +2325,17 @@ class SbeStageBuilderGroupAggCombinerTest : public sbe::EExpressionTestFixture { return {resultTag, resultVal}; } + std::pair bsonArrayToSbe(BSONArray arr) { + auto [arrTag, arrVal] = sbe::value::makeNewArray(); + auto arrView = sbe::value::getArrayView(arrVal); + + for (auto elem : arr) { + auto [tag, val] = sbe::bson::convertFrom(elem); + arrView->push_back(tag, val); + } + return {arrTag, arrVal}; + } + /** * Given the name of an SBE agg function ('aggFuncName') and an array of values expressed as a * BSON array, aggregates the values inside the array and returns the resulting SBE value. @@ -1941,6 +2403,118 @@ class SbeStageBuilderGroupAggCombinerTest : public sbe::EExpressionTestFixture { return {arrTag, arrVal}; } + std::pair convertFromBSONArray(BSONArray arr) { + auto [arrTag, arrVal] = sbe::value::makeNewArray(); + auto arrView = sbe::value::getArrayView(arrVal); + + for (auto elem : arr) { + auto [tag, val] = sbe::bson::convertFrom(elem); + arrView->push_back(tag, val); + } + return {arrTag, arrVal}; + } + + template + void testCombinePartialAggsMultiAccumulator(std::string aggExpr, + BSONArray mergeState, + BSONArray inputState, + BSONArray expArr) { + CollatorInterfaceMock collator{CollatorInterfaceMock::MockType::kReverseString}; + auto aggSlot = bindAccessor(&_aggAccessor); + auto [expr, finalizeExpr] = [&]() + -> std::pair, std::unique_ptr> { + if constexpr (Collation) { + auto expr = + stage_builder::makeFunction(aggExpr + "Merge", + stage_builder::makeVariable(_inputSlotId), + stage_builder::makeVariable(_collatorSlotId)); + auto finalizeExpr = + stage_builder::makeFunction(aggExpr + "Finalize", + stage_builder::makeVariable(aggSlot), + stage_builder::makeVariable(_collatorSlotId)); + + _collatorAccessor.reset( + false, + sbe::value::TypeTags::collator, + sbe::value::bitcastFrom(&collator)); + return {std::move(expr), std::move(finalizeExpr)}; + } else { + auto expr = stage_builder::makeFunction(aggExpr + "Merge", + stage_builder::makeVariable(_inputSlotId)); + auto finalizeExpr = stage_builder::makeFunction( + aggExpr + "Finalize", stage_builder::makeVariable(aggSlot)); + return {std::move(expr), std::move(finalizeExpr)}; + } + }(); + + auto compiledExpr = compileAggExpression(*expr, &_aggAccessor); + auto finalizeCompiledExpr = compileExpression(*finalizeExpr); + + auto [mergeStateTag, mergeStateVal] = convertFromBSONArray(mergeState); + _aggAccessor.reset(true, mergeStateTag, mergeStateVal); + + auto [inputStateTag, inputStateVal] = convertFromBSONArray(inputState); + _inputAccessor.reset(true, inputStateTag, inputStateVal); + + auto [resultTag, resultVal] = runCompiledExpression(compiledExpr.get()); + _aggAccessor.reset(true, resultTag, resultVal); + std::tie(resultTag, resultVal) = runCompiledExpression(finalizeCompiledExpr.get()); + + auto [compareTag, compareVal] = + sbe::value::compareValue(resultTag, + resultVal, + sbe::value::TypeTags::bsonArray, + sbe::value::bitcastFrom(expArr.objdata())); + + ASSERT_EQ(resultTag, sbe::value::TypeTags::Array); + ASSERT_EQ(compareTag, sbe::value::TypeTags::NumberInt32); + ASSERT_EQ(compareVal, 0); + sbe::value::releaseValue(resultTag, resultVal); + } + + void testCombinePartialAggsMultiAccumulatorWithSortPattern(std::string aggExpr, + BSONArray mergeState, + BSONArray inputState, + sbe::value::SortSpec* sortSpec, + BSONArray expected) { + auto sortSpecConstant = + stage_builder::makeConstant(sbe::value::TypeTags::sortSpec, + sbe::value::bitcastFrom(sortSpec)); + + auto expr = stage_builder::makeFunction(aggExpr + "Merge", + stage_builder::makeVariable(_inputSlotId), + sortSpecConstant->clone()); + + auto aggSlot = bindAccessor(&_aggAccessor); + auto finalExpr = stage_builder::makeFunction(aggExpr + "Finalize", + stage_builder::makeVariable(aggSlot), + std::move(sortSpecConstant)); + + auto [mergeStateTag, mergeStateVal] = convertFromBSONArray(mergeState); + _aggAccessor.reset(true, mergeStateTag, mergeStateVal); + + auto [inputStateTag, inputStateVal] = convertFromBSONArray(inputState); + _inputAccessor.reset(true, inputStateTag, inputStateVal); + + auto compiledExpr = compileAggExpression(*expr, &_aggAccessor); + + auto [newAccTag, newAccVal] = runCompiledExpression(compiledExpr.get()); + _aggAccessor.reset(true, newAccTag, newAccVal); + + auto compiledFinalExpr = compileExpression(*finalExpr); + + auto [resultTag, resultVal] = runCompiledExpression(compiledFinalExpr.get()); + + auto [expectedTag, expectedVal] = bsonArrayToSbe(expected); + auto [compareTag, compareVal] = + sbe::value::compareValue(resultTag, resultVal, expectedTag, expectedVal); + + ASSERT_EQ(compareTag, sbe::value::TypeTags::NumberInt32); + ASSERT_EQ(compareVal, 0); + sbe::value::releaseValue(resultTag, resultVal); + sbe::value::releaseValue(expectedTag, expectedVal); + } + protected: sbe::value::FrameIdGenerator _frameIdGenerator; boost::intrusive_ptr _expCtx; @@ -2394,4 +2968,135 @@ TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsStdDevSamp) { ASSERT_APPROX_EQUAL(sbe::value::bitcastTo(finalizedRes), 3.2660, 0.0001); } +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsFirstNMergeBothArray) { + testCombinePartialAggsMultiAccumulator( + "aggFirstN", + BSON_ARRAY(BSON_ARRAY(1 << 2) << 0ll << 3ll << 16 << 1024), + BSON_ARRAY(BSON_ARRAY(3 << 4 << 5) << 0ll << 3ll << 24 << 1024), + BSON_ARRAY(1 << 2 << 3)); +} + +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsFirstNNoMerge) { + testCombinePartialAggsMultiAccumulator( + "aggFirstN", + BSON_ARRAY(BSON_ARRAY(1 << 2 << 6) << 0ll << 3ll << 24 << 1024), + BSON_ARRAY(BSON_ARRAY(3 << 4 << 5) << 0ll << 3ll << 24 << 1024), + BSON_ARRAY(1 << 2 << 6)); +} + +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsFirstNMergeArrayEmpty) { + testCombinePartialAggsMultiAccumulator( + "aggFirstN", + BSON_ARRAY(BSONArrayBuilder().arr() << 0ll << 3ll << 0 << 1024), + BSON_ARRAY(BSON_ARRAY(3 << 4 << 5) << 0ll << 3ll << 24 << 1024), + BSON_ARRAY(3 << 4 << 5)); +} + +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsFirstNInputArrayEmpty) { + testCombinePartialAggsMultiAccumulator( + "aggFirstN", + BSON_ARRAY(BSON_ARRAY(3 << 4 << 5) << 0ll << 3ll << 24 << 1024), + BSON_ARRAY(BSONArrayBuilder().arr() << 0ll << 3ll << 0 << 1024), + BSON_ARRAY(3 << 4 << 5)); +} + +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsLastNMergeBothArray) { + testCombinePartialAggsMultiAccumulator( + "aggLastN", + BSON_ARRAY(BSON_ARRAY(1 << 2 << 3) << 1ll << 3ll << 24 << 1024), + BSON_ARRAY(BSON_ARRAY(4 << 5) << 0ll << 3ll << 16 << 1024), + BSON_ARRAY(1 << 4 << 5)); +} + +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsLastNNoMerge) { + testCombinePartialAggsMultiAccumulator( + "aggLastN", + BSON_ARRAY(BSON_ARRAY(1 << 2 << 6) << 2ll << 3ll << 24 << 1024), + BSON_ARRAY(BSON_ARRAY(3 << 4 << 5) << 1ll << 3ll << 24 << 1024), + BSON_ARRAY(4 << 5 << 3)); +} + +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsLastNInputArrayFull) { + testCombinePartialAggsMultiAccumulator( + "aggLastN", + BSON_ARRAY(BSON_ARRAY(1 << 2) << 0ll << 3ll << 0 << 1024), + BSON_ARRAY(BSON_ARRAY(3 << 4 << 5) << 2ll << 3ll << 24 << 1024), + BSON_ARRAY(5 << 3 << 4)); +} + +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsLastNInputArrayEmpty) { + testCombinePartialAggsMultiAccumulator( + "aggLastN", + BSON_ARRAY(BSON_ARRAY(3 << 4 << 5) << 2ll << 3ll << 24 << 1024), + BSON_ARRAY(BSONArrayBuilder().arr() << 0ll << 3ll << 0 << 1024), + BSON_ARRAY(5 << 3 << 4)); +} + +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsTopN) { + testCombinePartialAggsMultiAccumulatorWithSortPattern( + "aggTopN", + BSON_ARRAY(BSON_ARRAY(BSON_ARRAY(5 << 5) << BSON_ARRAY(3 << 3) << BSON_ARRAY(1 << 1)) + << 0ll << 3ll << 0 << INT_MAX), + BSON_ARRAY(BSON_ARRAY(BSON_ARRAY(6 << 6) << BSON_ARRAY(4 << 4) << BSON_ARRAY(2 << 2)) + << 0ll << 3ll << 0 << INT_MAX), + new sbe::value::SortSpec(BSON("x" << 1)), + BSON_ARRAY(1 << 2 << 3)); +} + +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsBottomN) { + testCombinePartialAggsMultiAccumulatorWithSortPattern( + "aggBottomN", + BSON_ARRAY(BSON_ARRAY(BSON_ARRAY(1 << 1) << BSON_ARRAY(3 << 3) << BSON_ARRAY(5 << 5)) + << 0ll << 3ll << 0 << INT_MAX), + BSON_ARRAY(BSON_ARRAY(BSON_ARRAY(2 << 2) << BSON_ARRAY(4 << 4) << BSON_ARRAY(6 << 6)) + << 0ll << 3ll << 0 << INT_MAX), + new sbe::value::SortSpec(BSON("x" << 1)), + BSON_ARRAY(4 << 5 << 6)); +} + +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsMinN) { + testCombinePartialAggsMultiAccumulator( + "aggMinN", + BSON_ARRAY(BSON_ARRAY(5 << 3 << 1) << 0ll << 3ll << 24 << 1024), + BSON_ARRAY(BSON_ARRAY(6 << 4 << 2) << 0ll << 3ll << 24 << 1024), + BSON_ARRAY(1 << 2 << 3)); +} + +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsMaxN) { + testCombinePartialAggsMultiAccumulator( + "aggMaxN", + BSON_ARRAY(BSON_ARRAY(1 << 3 << 5) << 0ll << 3ll << 24 << 1024), + BSON_ARRAY(BSON_ARRAY(2 << 4 << 6) << 0ll << 3ll << 24 << 1024), + BSON_ARRAY(6 << 5 << 4)); +} + +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsMinNCollator) { + testCombinePartialAggsMultiAccumulator("aggMinN", + BSON_ARRAY(BSON_ARRAY("az" + << "cx" + << "ev") + << 0ll << 3ll << 24 << 1024), + BSON_ARRAY(BSON_ARRAY("by" + << "dw" + << "fu") + << 0ll << 3ll << 24 << 1024), + BSON_ARRAY("fu" + << "ev" + << "dw")); +} + +TEST_F(SbeStageBuilderGroupAggCombinerTest, CombinePartialAggsMaxNCollator) { + testCombinePartialAggsMultiAccumulator("aggMaxN", + BSON_ARRAY(BSON_ARRAY("ev" + << "cx" + << "az") + << 0ll << 3ll << 24 << 1024), + BSON_ARRAY(BSON_ARRAY("fu" + << "dw" + << "by") + << 0ll << 3ll << 24 << 1024), + BSON_ARRAY("az" + << "by" + << "cx")); +} } // namespace mongo diff --git a/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp b/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp index c2225c02ffb1f..5bbc1691ab5a0 100644 --- a/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp +++ b/src/mongo/db/query/sbe_stage_builder_coll_scan.cpp @@ -28,25 +28,46 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/sbe_stage_builder_coll_scan.h" - +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/exec/collection_scan_common.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" #include "mongo/db/exec/sbe/stages/co_scan.h" -#include "mongo/db/exec/sbe/stages/exchange.h" #include "mongo/db/exec/sbe/stages/filter.h" #include "mongo/db/exec/sbe/stages/limit_skip.h" #include "mongo/db/exec/sbe/stages/loop_join.h" #include "mongo/db/exec/sbe/stages/project.h" #include "mongo/db/exec/sbe/stages/scan.h" #include "mongo/db/exec/sbe/stages/union.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/matcher/match_expression_dependencies.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/dependencies.h" +#include "mongo/db/query/record_id_bound.h" #include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/query/sbe_stage_builder_coll_scan.h" +#include "mongo/db/query/sbe_stage_builder_eval_frame.h" #include "mongo/db/query/sbe_stage_builder_filter.h" -#include "mongo/db/query/util/make_data_structure.h" -#include "mongo/db/record_id_helpers.h" -#include "mongo/logv2/log.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -55,9 +76,9 @@ namespace mongo::stage_builder { namespace { -boost::optional registerOplogTs(sbe::RuntimeEnvironment* env, +boost::optional registerOplogTs(PlanStageEnvironment& env, sbe::value::SlotIdGenerator* slotIdGenerator) { - auto slotId = env->getSlotIfExists("oplogTs"_sd); + boost::optional slotId = env->getSlotIfExists("oplogTs"_sd); if (!slotId) { return env->registerSlot( "oplogTs"_sd, sbe::value::TypeTags::Nothing, 0, false, slotIdGenerator); @@ -71,16 +92,31 @@ boost::optional registerOplogTs(sbe::RuntimeEnvironment* env * standalone value of the same SlotId (the latter is returned purely for convenience purposes). */ std::tuple, sbe::value::SlotVector, boost::optional> -makeOplogTimestampSlotsIfNeeded(sbe::RuntimeEnvironment* env, - sbe::value::SlotIdGenerator* slotIdGenerator, - bool shouldTrackLatestOplogTimestamp) { +makeOplogTimestampSlotIfNeeded(PlanStageEnvironment& env, + sbe::value::SlotIdGenerator* slotIdGenerator, + bool shouldTrackLatestOplogTimestamp) { if (shouldTrackLatestOplogTimestamp) { - auto slotId = registerOplogTs(env, slotIdGenerator); + boost::optional slotId = registerOplogTs(env, slotIdGenerator); return {{repl::OpTime::kTimestampFieldName.toString()}, sbe::makeSV(*slotId), slotId}; } return {}; } +void openCallback(OperationContext* opCtx, const CollectionPtr& collection) { + // Forward, non-tailable scans from the oplog need to wait until all oplog entries + // before the read begins to be visible. This isn't needed for reverse scans because + // we only hide oplog entries from forward scans, and it isn't necessary for tailing + // cursors because they ignore EOF and will eventually see all writes. Forward, + // non-tailable scans are the only case where a meaningful EOF will be seen that + // might not include writes that finished before the read started. This also must be + // done before we create the cursor as that is when we establish the endpoint for + // the cursor. Also call abandonSnapshot to make sure that we are using a fresh + // storage engine snapshot while waiting. Otherwise, we will end up reading from the + // snapshot where the oplog entries are not yet visible even after the wait. + opCtx->recoveryUnit()->abandonSnapshot(); + collection->getRecordStore()->waitForAllEarlierOplogWritesToBeVisible(opCtx); +} + /** * Checks whether a callback function should be created for a ScanStage and returns it, if so. The * logic in the provided callback will be executed when the ScanStage is opened (but not reopened). @@ -88,26 +124,13 @@ makeOplogTimestampSlotsIfNeeded(sbe::RuntimeEnvironment* env, sbe::ScanOpenCallback makeOpenCallbackIfNeeded(const CollectionPtr& collection, const CollectionScanNode* csn) { if (csn->direction == CollectionScanParams::FORWARD && csn->shouldWaitForOplogVisibility) { - invariant(!csn->tailable); - invariant(collection->ns().isOplog()); - - return [](OperationContext* opCtx, const CollectionPtr& collection) { - // Forward, non-tailable scans from the oplog need to wait until all oplog entries - // before the read begins to be visible. This isn't needed for reverse scans because - // we only hide oplog entries from forward scans, and it isn't necessary for tailing - // cursors because they ignore EOF and will eventually see all writes. Forward, - // non-tailable scans are the only case where a meaningful EOF will be seen that - // might not include writes that finished before the read started. This also must be - // done before we create the cursor as that is when we establish the endpoint for - // the cursor. Also call abandonSnapshot to make sure that we are using a fresh - // storage engine snapshot while waiting. Otherwise, we will end up reading from the - // snapshot where the oplog entries are not yet visible even after the wait. - - opCtx->recoveryUnit()->abandonSnapshot(); - collection->getRecordStore()->waitForAllEarlierOplogWritesToBeVisible(opCtx); - }; + tassert(7714200, "Expected 'tailable' to be false", !csn->tailable); + tassert(7714201, "Expected 'collection' to be the oplog", collection->ns().isOplog()); + + return &openCallback; + } else { + return nullptr; } - return {}; } // If the scan should be started after the provided resume RecordId, we will construct a nested-loop @@ -142,7 +165,7 @@ std::unique_ptr buildResumeFromRecordIdSubtree( sbe::value::SlotId seekRecordIdSlot, std::unique_ptr seekRecordIdExpression, PlanYieldPolicy* yieldPolicy, - bool isTailableResumeBranch, + bool isResumingTailableScan, bool resumeAfterRecordId) { invariant(seekRecordIdExpression); @@ -165,13 +188,15 @@ std::unique_ptr buildResumeFromRecordIdSubtree( boost::none /* recordSlot */, boost::none /* recordIdSlot*/, boost::none /* snapshotIdSlot */, - boost::none /* indexIdSlot */, + boost::none /* indexIdentSlot */, boost::none /* indexKeySlot */, boost::none /* keyPatternSlot */, boost::none /* oplogTsSlot */, std::vector{}, sbe::makeSV(), seekSlot, + boost::none /* minRecordIdSlot */, + boost::none /* maxRecordIdSlot */, forward, yieldPolicy, csn->nodeId(), @@ -188,12 +213,12 @@ std::unique_ptr buildResumeFromRecordIdSubtree( // $_resumeAfter. auto unusedSlot = state.slotId(); auto [errorCode, errorMessage] = [&]() -> std::pair { - if (isTailableResumeBranch) { + if (isResumingTailableScan) { return {ErrorCodes::CappedPositionLost, "CollectionScan died due to failure to restore tailable cursor position."}; } return {ErrorCodes::ErrorCodes::KeyNotFound, - str::stream() << "Failed to resume collection scan the recordId from which we are " + str::stream() << "Failed to resume collection scan: the recordId from which we are " "attempting to resume no longer exists in the collection: " << csn->resumeAfterRecordId}; }(); @@ -211,11 +236,11 @@ std::unique_ptr buildResumeFromRecordIdSubtree( csn->nodeId()); // Construct the final loop join. Note that for the resume branch of a tailable cursor case we - // use the 'seek' stage as an inner branch, since we need to produce all records starting from + // use the 'seek' stage as an inner branch, since we need to produce all records starting from // the supplied position. For a resume token case we also inject a 'skip 1' stage on top of the // inner branch, as we need to start _after_ the resume RecordId. In both cases we inject a // 'limit 1' stage on top of the outer branch, as it should produce just a single seek recordId. - auto innerStage = isTailableResumeBranch || !resumeAfterRecordId + auto innerStage = isResumingTailableScan || !resumeAfterRecordId ? std::move(inputStage) : sbe::makeS(std::move(inputStage), boost::none, 1, csn->nodeId()); return sbe::makeS( @@ -228,98 +253,110 @@ std::unique_ptr buildResumeFromRecordIdSubtree( } /** - * Creates a collection scan sub-tree optimized for oplog scans. We can built an optimized scan - * when any of the following scenarios apply: + * Creates a collection scan sub-tree optimized for clustered collection scans. Should only be + * called on clustered collections. We can build an optimized scan when any of the following + * scenarios apply: * - * 1. There is a predicate on the 'ts' field of the oplog collection. - * 1.1 If a lower bound on 'ts' is present, the collection scan will seek directly to the - * RecordId of an oplog entry as close to this lower bound as possible without going higher. - * 1.2 If the query is *only* a lower bound on 'ts' on a forward scan, every document in the - * collection after the first matching one must also match. To avoid wasting time running the - * filter on every document to be returned, we will stop applying the filter once it finds - * the first match. - * 1.3 If an upper bound on 'ts' is present, the collection scan will stop and return EOF the - * first time it fetches a document that does not pass the filter and has 'ts' greater than - * the upper bound. - * 2. The user request specified a $_resumeAfter recordId from which to begin the scan. + * 1. 'csn->minRecord' and/or 'csn->maxRecord' exist. + * 1.1 CollectionScanParams::FORWARD scan: + * a. If 'csn->minRecord' is present, the collection scan will seek directly to the RecordId + * of a record as close to this lower bound as possible without going higher. + * b. If 'csn->maxRecord' is present, the collection scan will stop and return EOF the first + * time it fetches a document greater than this upper bound. + * 1.2 CollectionScanParams::BACKWARD scan: + * a. If 'csn->maxRecord' is present, the collection scan will seek directly to the RecordId + * of a record as close to this upper bound as possible without going lower. + * b. If 'csn->minRecord' is present, the collection scan will stop and return EOF the first + * time it fetches a document less than this lower bound. + * 2. The user request specified a $_resumeAfter RecordId from which to begin the scan AND the scan + * is forward AND neither 'csn->minRecord' nor 'csn->maxRecord' exist. + * 2a. The scan will continue with the next RecordId after $_resumeAfter. */ -std::pair, PlanStageSlots> generateOptimizedOplogScan( +std::pair, PlanStageSlots> generateClusteredCollScan( StageBuilderState& state, const CollectionPtr& collection, const CollectionScanNode* csn, - std::vector fields, + std::vector scanFieldNames, PlanYieldPolicy* yieldPolicy, - bool isTailableResumeBranch) { - invariant(collection->ns().isOplog()); - // We can apply oplog scan optimizations only when at least one of the following was specified. - invariant(csn->resumeAfterRecordId || csn->minRecord || csn->maxRecord); + bool isResumingTailableScan) { + + const bool forward = csn->direction == CollectionScanParams::FORWARD; + sbe::RuntimeEnvironment* env = state.env.runtimeEnv; + + invariant(csn->doSbeClusteredCollectionScan()); + invariant(!csn->resumeAfterRecordId || forward); + invariant(!csn->resumeAfterRecordId || !csn->tailable); // The minRecord and maxRecord optimizations are not compatible with resumeAfterRecordId. invariant(!(csn->resumeAfterRecordId && (csn->minRecord || csn->maxRecord))); - // Oplog scan optimizations can only be done for a forward scan. - invariant(csn->direction == CollectionScanParams::FORWARD); + // 'stopApplyingFilterAfterFirstMatch' is only for oplog scans; this method doesn't do them. + invariant(!csn->stopApplyingFilterAfterFirstMatch); - auto fieldSlots = state.slotIdGenerator->generateMultiple(fields.size()); + auto scanFieldSlots = state.slotIdGenerator->generateMultiple(scanFieldNames.size()); - auto resultSlot = state.slotId(); - auto recordIdSlot = state.slotId(); + sbe::value::SlotId resultSlot = state.slotId(); + sbe::value::SlotId recordIdSlot = state.slotId(); - // Start the scan from the RecordId stored in seekRecordId. - // Otherwise, if we're building a collection scan for a resume branch of a special union - // sub-tree implementing a tailable cursor scan, we can use the seekRecordIdSlot directly - // to access the recordId to resume the scan from. + // Iff this is a resume or fetch, set 'seekRecordIdSlot' and 'seekRecordIdExpression' to the + // RecordId resume point of the scan. If we're building a collection scan for a resume branch of + // a special union sub-tree implementing a tailable cursor scan, we can use the already existing + // 'resumeRecordIdSlot' directly as the 'seekRecordIdSlot' to access the recordId to resume the + // scan from. Otherwise we must create a slot for it. auto [seekRecordIdSlot, seekRecordIdExpression] = [&]() -> std::pair, std::unique_ptr> { - if (isTailableResumeBranch) { - auto resumeRecordIdSlot = state.data->env->getSlot("resumeRecordId"_sd); + if (isResumingTailableScan) { + sbe::value::SlotId resumeRecordIdSlot = env->getSlot("resumeRecordId"_sd); return {resumeRecordIdSlot, makeVariable(resumeRecordIdSlot)}; } else if (csn->resumeAfterRecordId) { auto [tag, val] = sbe::value::makeCopyRecordId(*csn->resumeAfterRecordId); return {state.slotId(), makeConstant(tag, val)}; - } else if (csn->minRecord) { - auto cursor = collection->getRecordStore()->getCursor(state.opCtx); - auto startRec = cursor->seekNear(csn->minRecord->recordId()); - if (startRec) { - LOGV2_DEBUG(205841, 3, "Using direct oplog seek"); - auto [tag, val] = sbe::value::makeCopyRecordId(startRec->id); - return {state.slotId(), makeConstant(tag, val)}; - } } return {}; - }(); - - // Check if we need to project out an oplog 'ts' field as part of the collection scan. We will - // need it either when 'maxRecord' bound has been provided, so that we can apply an EOF filter, - // of if we need to track the latest oplog timestamp. - const auto shouldTrackLatestOplogTimestamp = - (csn->maxRecord || csn->shouldTrackLatestOplogTimestamp); - auto&& [scanFields, scanFieldSlots, tsSlot] = makeOplogTimestampSlotsIfNeeded( - state.data->env, state.slotIdGenerator, shouldTrackLatestOplogTimestamp); - - bool createScanWithAndWithoutFilter = (csn->filter && csn->stopApplyingFilterAfterFirstMatch); - - if (!createScanWithAndWithoutFilter) { - scanFields.insert(scanFields.end(), fields.begin(), fields.end()); - scanFieldSlots.insert(scanFieldSlots.end(), fieldSlots.begin(), fieldSlots.end()); + }(); // lambda end and call + + // Create minRecordId and/or maxRecordId slots as needed. + boost::optional minRecordSlot; + boost::optional maxRecordSlot; + if (csn->minRecord) { + auto [tag, val] = sbe::value::makeCopyRecordId(csn->minRecord->recordId()); + minRecordSlot = + boost::make_optional(state.env->registerSlot(tag, val, true, state.slotIdGenerator)); } - - sbe::ScanCallbacks callbacks({}, {}, makeOpenCallbackIfNeeded(collection, csn)); + if (csn->maxRecord) { + auto [tag, val] = sbe::value::makeCopyRecordId(csn->maxRecord->recordId()); + maxRecordSlot = + boost::make_optional(state.env->registerSlot(tag, val, true, state.slotIdGenerator)); + } + state.data->clusteredCollBoundsInfos.emplace_back( + ParameterizedClusteredScanSlots{minRecordSlot, maxRecordSlot}); + + // Create the ScanStage. + bool excludeScanEndRecordId = + (csn->boundInclusion == + CollectionScanParams::ScanBoundInclusion::kExcludeBothStartAndEndRecords || + csn->boundInclusion == CollectionScanParams::ScanBoundInclusion::kIncludeStartRecordOnly); auto stage = sbe::makeS(collection->uuid(), resultSlot, recordIdSlot, boost::none /* snapshotIdSlot */, - boost::none /* indexIdSlot */, + boost::none /* indexIdentSlot */, boost::none /* indexKeySlot */, boost::none /* keyPatternSlot */, - tsSlot, - std::move(scanFields), - std::move(scanFieldSlots), + boost::none /* oplogTsSlot */, + scanFieldNames, // do not std::move - used later + scanFieldSlots, // do not std::move - used later seekRecordIdSlot, - true /* forward */, + minRecordSlot, + maxRecordSlot, + forward, yieldPolicy, csn->nodeId(), - std::move(callbacks)); + sbe::ScanCallbacks{}, + false /* lowPriority default */, + false /* useRandomCursor default */, + true /* participateInTrialRunTracking default */, + excludeScanEndRecordId); - // Start the scan from the seekRecordId. + // Iff this is a resume or fetch, build the subtree to start the scan from the seekRecordId. if (seekRecordIdSlot) { stage = buildResumeFromRecordIdSubtree(state, collection, @@ -328,234 +365,38 @@ std::pair, PlanStageSlots> generateOptimizedOplo *seekRecordIdSlot, std::move(seekRecordIdExpression), yieldPolicy, - isTailableResumeBranch, + isResumingTailableScan, csn->resumeAfterRecordId.has_value()); } - // Create a filter which checks the first document to ensure either that its 'ts' is less than - // or equal the minimum timestamp that should not have rolled off the oplog, or that it is a - // replica set initialization message. If this fails, then we throw - // ErrorCodes::OplogQueryMinTsMissing. We avoid doing this check on the resumable branch of a - // tailable scan; it only needs to be done once, when the initial branch is run. - if (csn->assertTsHasNotFallenOff && !isTailableResumeBranch) { - invariant(csn->shouldTrackLatestOplogTimestamp); - - // There should always be a 'tsSlot' already allocated on the RuntimeEnvironment for the - // existing scan that we created previously. - invariant(tsSlot); - - // We will be constructing a filter that needs to see the 'ts' field. We name it 'minTsSlot' - // here so that it does not shadow the 'tsSlot' which we allocated earlier. Our filter will - // also need to see the 'op' and 'o.msg' fields. - auto opTypeSlot = state.slotId(); - auto oObjSlot = state.slotId(); - auto minTsSlot = state.slotId(); - sbe::value::SlotVector minTsSlots = {minTsSlot, opTypeSlot, oObjSlot}; - std::vector minTsFields = { - repl::OpTime::kTimestampFieldName.toString(), "op", "o"}; - - // If the first entry we see in the oplog is the replset initialization, then it doesn't - // matter if its timestamp is later than the specified minTs; no events earlier than the - // minTs can have fallen off this oplog. Otherwise, we must verify that the timestamp of the - // first observed oplog entry is earlier than or equal to the minTs time. - // - // To achieve this, we build a two-branch union subtree. The left branch is a scan with a - // filter that checks the first entry in the oplog for the above criteria, throws via EFail - // if they are not met, and EOFs otherwise. The right branch of the union plan is the tree - // that we originally built above. - // - // union [s9, s10, s11] [ - // [s6, s7, s8] efilter {if (ts <= minTs || op == "n" && isObject (o) && - // getField (o, "msg") == "initiating set", false, fail ( 326 ))} - // scan [s6 = ts, s7 = op, s8 = o] @oplog, - // - - // Set up the filter stage to be used in the left branch of the union. If the main body of - // the expression does not match the input document, it throws OplogQueryMinTsMissing. If - // the expression does match, then it returns 'false', which causes the filter (and as a - // result, the branch) to EOF immediately. Note that the resultSlot and recordIdSlot - // arguments to the ScanStage are boost::none, as we do not need them. - sbe::ScanCallbacks branchCallbacks{}; - auto minTsBranch = sbe::makeS>( - sbe::makeS(collection->uuid(), - boost::none /* resultSlot */, - boost::none /* recordIdSlot */, - boost::none /* snapshotIdSlot */, - boost::none /* indexIdSlot */, - boost::none /* indexKeySlot */, - boost::none /* keyPatternSlot */, - boost::none /* oplogTsSlot*/, - std::move(minTsFields), - minTsSlots, /* don't move this */ - boost::none, - true /* forward */, - yieldPolicy, - csn->nodeId(), - branchCallbacks), - sbe::makeE( - makeBinaryOp( - sbe::EPrimBinary::logicOr, - makeBinaryOp(sbe::EPrimBinary::lessEq, - makeVariable(minTsSlot), - makeConstant(sbe::value::TypeTags::Timestamp, - csn->assertTsHasNotFallenOff->asULL())), - makeBinaryOp( - sbe::EPrimBinary::logicAnd, - makeBinaryOp(sbe::EPrimBinary::eq, - makeVariable(opTypeSlot), - makeConstant("n")), - makeBinaryOp(sbe::EPrimBinary::logicAnd, - makeFunction("isObject", makeVariable(oObjSlot)), - makeBinaryOp(sbe::EPrimBinary::eq, - makeFunction("getField", - makeVariable(oObjSlot), - makeConstant("msg")), - makeConstant(repl::kInitiatingSetMsg))))), - makeConstant(sbe::value::TypeTags::Boolean, false), - sbe::makeE(ErrorCodes::OplogQueryMinTsMissing, - "Specified minTs has already fallen off the oplog")), - csn->nodeId()); - - // All branches of the UnionStage must have the same number of input and output slots, and - // we want to remap all slots from the basic scan we constructed earlier through the union - // stage to the output. We're lucky that the real scan happens to have the same number of - // slots (resultSlot, recordSlot, tsSlot) as the minTs check branch (minTsSlot, opTypeSlot, - // oObjSlot), so we don't have to compensate with any unused slots. Note that the minTsSlots - // will never be mapped to output in practice, since the minTs branch either throws or EOFs. - // - // We also need to update the local variables for each slot to their remapped values, so - // subsequent subtrees constructed by this function refer to the correct post-union slots. - auto realSlots = sbe::makeSV(resultSlot, recordIdSlot, *tsSlot); - resultSlot = state.slotId(); - recordIdSlot = state.slotId(); - tsSlot = state.slotId(); - auto outputSlots = sbe::makeSV(resultSlot, recordIdSlot, *tsSlot); - - if (!createScanWithAndWithoutFilter) { - auto unusedFieldSlots = state.slotIdGenerator->generateMultiple(fieldSlots.size()); - minTsSlots.insert(minTsSlots.end(), unusedFieldSlots.begin(), unusedFieldSlots.end()); - - realSlots.insert(realSlots.end(), fieldSlots.begin(), fieldSlots.end()); - - size_t numFieldSlots = fieldSlots.size(); - fieldSlots = state.slotIdGenerator->generateMultiple(numFieldSlots); - - outputSlots.insert(outputSlots.end(), fieldSlots.begin(), fieldSlots.end()); - } - - // Create the union stage. The left branch, which runs first, is our resumability check. - stage = sbe::makeS( - sbe::makeSs(std::move(minTsBranch), std::move(stage)), - makeVector(std::move(minTsSlots), std::move(realSlots)), - std::move(outputSlots), - csn->nodeId()); - } - - // Add an EOF filter to stop the scan after we fetch the first document that has 'ts' greater - // than the upper bound. - if (csn->maxRecord) { - // The 'maxRecord' optimization is not compatible with 'stopApplyingFilterAfterFirstMatch'. - invariant(!csn->stopApplyingFilterAfterFirstMatch); - invariant(tsSlot); - - stage = sbe::makeS>( - std::move(stage), - makeBinaryOp(sbe::EPrimBinary::lessEq, - makeVariable(*tsSlot), - makeConstant(sbe::value::TypeTags::Timestamp, - csn->maxRecord->recordId().getLong())), - csn->nodeId()); - } - - // If csn->stopApplyingFilterAfterFirstMatch is true, assert that csn has a filter. - invariant(!csn->stopApplyingFilterAfterFirstMatch || csn->filter); - - if (csn->filter) { - auto filterExpr = generateFilter(state, csn->filter.get(), resultSlot, nullptr); - if (!filterExpr.isNull()) { - stage = sbe::makeS>( - std::move(stage), filterExpr.extractExpr(state), csn->nodeId()); - } - - // We may be requested to stop applying the filter after the first match. This can happen - // if the query is just a lower bound on 'ts' on a forward scan. In this case every document - // in the collection after the first matching one must also match, so there is no need to - // run the filter on such elements. - // - // To apply this optimization we will construct the following sub-tree: - // - // nlj [] [seekRecordIdSlot] - // left - // limit 1 - // filter - // - // right - // seek seekRecordIdSlot resultSlot recordIdSlot @coll - // - // Here, the nested loop join outer branch is the collection scan we constructed above, with - // a csn->filter predicate sitting on top. The 'limit 1' stage is to ensure this branch - // returns a single row. Once executed, this branch will filter out documents which doesn't - // satisfy the predicate, and will return the first document, along with a RecordId, that - // matches. This RecordId is then used as a starting point of the collection scan in the - // inner branch, and the execution will continue from this point further on, without - // applying the filter. - if (createScanWithAndWithoutFilter) { - invariant(!csn->maxRecord); - invariant(csn->direction == CollectionScanParams::FORWARD); - - seekRecordIdSlot = recordIdSlot; - resultSlot = state.slotId(); - recordIdSlot = state.slotId(); - - std::tie(scanFields, scanFieldSlots, tsSlot) = makeOplogTimestampSlotsIfNeeded( - state.data->env, state.slotIdGenerator, shouldTrackLatestOplogTimestamp); - - scanFields.insert(scanFields.end(), fields.begin(), fields.end()); - scanFieldSlots.insert(scanFieldSlots.end(), fieldSlots.begin(), fieldSlots.end()); - - stage = sbe::makeS( - sbe::makeS(std::move(stage), 1, boost::none, csn->nodeId()), - sbe::makeS(collection->uuid(), - resultSlot, - recordIdSlot, - boost::none /* snapshotIdSlot */, - boost::none /* indexIdSlot */, - boost::none /* indexKeySlot */, - boost::none /* keyPatternSlot */, - tsSlot, - std::move(scanFields), - std::move(scanFieldSlots), - seekRecordIdSlot, - true /* forward */, - yieldPolicy, - csn->nodeId(), - sbe::ScanCallbacks{}), - sbe::makeSV(), - sbe::makeSV(*seekRecordIdSlot), - nullptr, - csn->nodeId()); - } + // When the start and/or end scan bounds are from an expression, ScanStage::getNext() treats + // them both as inclusive, and 'csn->filter' will enforce any exclusions. If the bound(s) came + // from the "min" (always inclusive) and/or "max" (always exclusive) keywords, there may be no + // filter, so ScanStage->getNext() must directly enforce the bounds. min's inclusivity matches + // getNext()'s default behavior, but max's exclusivity does not and thus is enforced by the + // excludeScanEndRecordId argument to the ScanStage constructor above. + EvalExpr filterExpr = generateFilter(state, csn->filter.get(), resultSlot, nullptr); + if (!filterExpr.isNull()) { + stage = sbe::makeS>( + std::move(stage), filterExpr.extractExpr(state), csn->nodeId()); } - // If csn->shouldTrackLatestOplogTimestamp is true, assert that we generated tsSlot. - invariant(!csn->shouldTrackLatestOplogTimestamp || tsSlot); - PlanStageSlots outputs; outputs.set(PlanStageSlots::kResult, resultSlot); outputs.set(PlanStageSlots::kRecordId, recordIdSlot); - for (size_t i = 0; i < fields.size(); ++i) { - outputs.set(std::make_pair(PlanStageSlots::kField, fields[i]), fieldSlots[i]); + for (size_t i = 0; i < scanFieldNames.size(); ++i) { + outputs.set(std::make_pair(PlanStageSlots::kField, scanFieldNames[i]), scanFieldSlots[i]); } return {std::move(stage), std::move(outputs)}; -} +} // generateClusteredCollScan /** * Generates a generic collection scan sub-tree. * - If a resume token has been provided, the scan will start from a RecordId contained within this - * token. - * - Else if 'isTailableResumeBranch' is true, the scan will start from a RecordId contained in - * slot "resumeRecordId". + * token. + * - Else if 'isResumingTailableScan' is true, the scan will start from a RecordId contained in + * slot "resumeRecordId". * - Otherwise the scan will start from the beginning of the collection. */ std::pair, PlanStageSlots> generateGenericCollScan( @@ -564,8 +405,8 @@ std::pair, PlanStageSlots> generateGenericCollSc const CollectionScanNode* csn, std::vector fields, PlanYieldPolicy* yieldPolicy, - bool isTailableResumeBranch) { - const auto forward = csn->direction == CollectionScanParams::FORWARD; + bool isResumingTailableScan) { + const bool forward = csn->direction == CollectionScanParams::FORWARD; invariant(!csn->shouldTrackLatestOplogTimestamp || collection->ns().isOplog()); invariant(!csn->resumeAfterRecordId || forward); @@ -591,16 +432,16 @@ std::pair, PlanStageSlots> generateGenericCollSc if (csn->resumeAfterRecordId) { auto [tag, val] = sbe::value::makeCopyRecordId(*csn->resumeAfterRecordId); return {state.slotId(), makeConstant(tag, val)}; - } else if (isTailableResumeBranch) { - auto resumeRecordIdSlot = state.data->env->getSlot("resumeRecordId"_sd); + } else if (isResumingTailableScan) { + auto resumeRecordIdSlot = state.env->getSlot("resumeRecordId"_sd); return {resumeRecordIdSlot, makeVariable(resumeRecordIdSlot)}; } return {}; }(); // See if we need to project out an oplog latest timestamp. - auto&& [scanFields, scanFieldSlots, tsSlot] = makeOplogTimestampSlotsIfNeeded( - state.data->env, state.slotIdGenerator, csn->shouldTrackLatestOplogTimestamp); + auto&& [scanFields, scanFieldSlots, oplogTsSlot] = makeOplogTimestampSlotIfNeeded( + state.env, state.slotIdGenerator, csn->shouldTrackLatestOplogTimestamp); scanFields.insert(scanFields.end(), fields.begin(), fields.end()); scanFieldSlots.insert(scanFieldSlots.end(), fieldSlots.begin(), fieldSlots.end()); @@ -610,13 +451,15 @@ std::pair, PlanStageSlots> generateGenericCollSc resultSlot, recordIdSlot, boost::none /* snapshotIdSlot */, - boost::none /* indexIdSlot */, + boost::none /* indexIdentSlot */, boost::none /* indexKeySlot */, boost::none /* keyPatternSlot */, - tsSlot, + oplogTsSlot, std::move(scanFields), std::move(scanFieldSlots), seekRecordIdSlot, + boost::none /* minRecordIdSlot */, + boost::none /* maxRecordIdSlot */, forward, yieldPolicy, csn->nodeId(), @@ -631,7 +474,7 @@ std::pair, PlanStageSlots> generateGenericCollSc *seekRecordIdSlot, std::move(seekRecordIdExpression), yieldPolicy, - isTailableResumeBranch, + isResumingTailableScan, true /* resumeAfterRecordId */); } @@ -643,9 +486,7 @@ std::pair, PlanStageSlots> generateGenericCollSc } if (csn->filter) { - // The 'stopApplyingFilterAfterFirstMatch' optimization is only applicable when the 'ts' - // lower bound is also provided for an oplog scan, and is handled in - // 'generateOptimizedOplogScan()'. + // 'stopApplyingFilterAfterFirstMatch' is only for oplog scans; this method doesn't do them. invariant(!csn->stopApplyingFilterAfterFirstMatch); auto filterExpr = generateFilter(state, csn->filter.get(), resultSlot, &outputs); @@ -656,7 +497,8 @@ std::pair, PlanStageSlots> generateGenericCollSc } return {std::move(stage), std::move(outputs)}; -} +} // generateGenericCollScan + } // namespace std::pair, PlanStageSlots> generateCollScan( @@ -665,13 +507,14 @@ std::pair, PlanStageSlots> generateCollScan( const CollectionScanNode* csn, std::vector fields, PlanYieldPolicy* yieldPolicy, - bool isTailableResumeBranch) { - if (csn->minRecord || csn->maxRecord || csn->stopApplyingFilterAfterFirstMatch) { - return generateOptimizedOplogScan( - state, collection, csn, std::move(fields), yieldPolicy, isTailableResumeBranch); + bool isResumingTailableScan) { + + if (csn->doSbeClusteredCollectionScan()) { + return generateClusteredCollScan( + state, collection, csn, std::move(fields), yieldPolicy, isResumingTailableScan); } else { return generateGenericCollScan( - state, collection, csn, std::move(fields), yieldPolicy, isTailableResumeBranch); + state, collection, csn, std::move(fields), yieldPolicy, isResumingTailableScan); } } } // namespace mongo::stage_builder diff --git a/src/mongo/db/query/sbe_stage_builder_coll_scan.h b/src/mongo/db/query/sbe_stage_builder_coll_scan.h index c0eb9a098e72c..fead8258d21a2 100644 --- a/src/mongo/db/query/sbe_stage_builder_coll_scan.h +++ b/src/mongo/db/query/sbe_stage_builder_coll_scan.h @@ -29,10 +29,17 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/db/catalog/collection.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/stages/collection_helpers.h" #include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_solution.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" @@ -41,16 +48,16 @@ namespace mongo::stage_builder { class PlanStageSlots; /** - * Generates an SBE plan stage sub-tree implementing an collection scan. 'fields' can be used to + * Generates an SBE plan stage sub-tree implementing a collection scan. 'fields' can be used to * specify top-level fields that should be retrieved during the scan. For each name in 'fields', * there will be a corresponding kField slot in the PlanStageSlots object returned with the same * name. * * On success, a tuple containing the following data is returned: * * A slot to access a fetched document (a resultSlot) - * * A slot to access a recordId (a recordIdSlot) - * * An optional slot to access a latest oplog timestamp (oplogTsSlot), if we scan the oplog and - * were requested to track this data. + * * A slot to access the doc's RecordId (a recordIdSlot) + * * An optional slot to access the latest oplog timestamp ("ts" field) for oplog scans that were + * requested to track this data or that are clustered scans ("ts" is the oplog clustering key). * * A generated PlanStage sub-tree. * * In cases of an error, throws. @@ -59,8 +66,8 @@ std::pair, PlanStageSlots> generateCollScan( StageBuilderState& state, const CollectionPtr& collection, const CollectionScanNode* csn, - std::vector fields, + std::vector scanFieldNames, PlanYieldPolicy* yieldPolicy, - bool isTailableResumeBranch); + bool isResumingTailableScan); } // namespace mongo::stage_builder diff --git a/src/mongo/db/query/sbe_stage_builder_const_eval.cpp b/src/mongo/db/query/sbe_stage_builder_const_eval.cpp index 3b4262b173131..6908dd5ed9863 100644 --- a/src/mongo/db/query/sbe_stage_builder_const_eval.cpp +++ b/src/mongo/db/query/sbe_stage_builder_const_eval.cpp @@ -28,9 +28,22 @@ */ #include "mongo/db/query/sbe_stage_builder_const_eval.h" + +#include +#include +#include +#include + +#include +#include +#include + #include "mongo/db/exec/sbe/values/arith_common.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/collation/collator_interface.h" -#include "mongo/db/query/optimizer/utils/utils.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/util/assert_util.h" namespace mongo::stage_builder { bool ExpressionConstEval::optimize(optimizer::ABT& n) { diff --git a/src/mongo/db/query/sbe_stage_builder_const_eval.h b/src/mongo/db/query/sbe_stage_builder_const_eval.h index b31443658a7aa..96d7ce0e1bf1b 100644 --- a/src/mongo/db/query/sbe_stage_builder_const_eval.h +++ b/src/mongo/db/query/sbe_stage_builder_const_eval.h @@ -29,7 +29,16 @@ #pragma once +#include +#include +#include + +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/containers.h" #include "mongo/db/query/optimizer/reference_tracker.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/optimizer/utils/abt_hash.h" namespace mongo { diff --git a/src/mongo/db/query/sbe_stage_builder_const_eval_test.cpp b/src/mongo/db/query/sbe_stage_builder_const_eval_test.cpp index bdaa214d9c4c0..70c9b79820645 100644 --- a/src/mongo/db/query/sbe_stage_builder_const_eval_test.cpp +++ b/src/mongo/db/query/sbe_stage_builder_const_eval_test.cpp @@ -27,11 +27,18 @@ * it in the license file. */ +#include + +#include + +#include "mongo/base/string_data.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/comparison_op.h" #include "mongo/db/query/optimizer/utils/unit_test_abt_literals.h" -#include "mongo/db/query/optimizer/utils/unit_test_utils.h" #include "mongo/db/query/sbe_stage_builder_const_eval.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::stage_builder { namespace { diff --git a/src/mongo/db/query/sbe_stage_builder_eval_frame.cpp b/src/mongo/db/query/sbe_stage_builder_eval_frame.cpp index 0a9ec9008a7fa..f393afa8c423c 100644 --- a/src/mongo/db/query/sbe_stage_builder_eval_frame.cpp +++ b/src/mongo/db/query/sbe_stage_builder_eval_frame.cpp @@ -28,11 +28,19 @@ */ #include "mongo/db/query/sbe_stage_builder_eval_frame.h" -#include "mongo/db/query/optimizer/node.h" + +#include +#include + +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/expr.h" #include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/query/sbe_stage_builder_abt_helpers.h" #include "mongo/db/query/sbe_stage_builder_abt_holder_impl.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/util/assert_util.h" namespace mongo::stage_builder { @@ -56,7 +64,7 @@ std::unique_ptr EvalExpr::extractExpr(optimizer::SlotVarMap& v } std::unique_ptr EvalExpr::extractExpr(StageBuilderState& state) { - return extractExpr(state.slotVarMap, *state.data->env); + return extractExpr(state.slotVarMap, *state.env); } abt::HolderPtr EvalExpr::extractABT(optimizer::SlotVarMap& varMap) { diff --git a/src/mongo/db/query/sbe_stage_builder_eval_frame.h b/src/mongo/db/query/sbe_stage_builder_eval_frame.h index fa3c81609b096..10dc1f1c717d7 100644 --- a/src/mongo/db/query/sbe_stage_builder_eval_frame.h +++ b/src/mongo/db/query/sbe_stage_builder_eval_frame.h @@ -29,13 +29,22 @@ #pragma once +#include +#include +#include +#include #include +#include +#include #include "mongo/db/exec/sbe/abt/abt_lower_defs.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/stages/co_scan.h" #include "mongo/db/exec/sbe/stages/limit_skip.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/query/sbe_stage_builder_abt_holder_def.h" +#include "mongo/db/query/stage_types.h" #include "mongo/stdx/variant.h" namespace mongo::sbe { diff --git a/src/mongo/db/query/sbe_stage_builder_expression.cpp b/src/mongo/db/query/sbe_stage_builder_expression.cpp index 98e26148812a1..2f590754747c8 100644 --- a/src/mongo/db/query/sbe_stage_builder_expression.cpp +++ b/src/mongo/db/query/sbe_stage_builder_expression.cpp @@ -28,23 +28,61 @@ */ #include - -#include "mongo/db/query/sbe_stage_builder_expression.h" -#include "mongo/db/query/util/make_data_structure.h" - +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/docval_to_sbeval.h" +#include "mongo/db/exec/sbe/abt/abt_lower_defs.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" #include "mongo/db/exec/sbe/values/arith_common.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/datetime.h" #include "mongo/db/pipeline/accumulator.h" #include "mongo/db/pipeline/accumulator_multi.h" #include "mongo/db/pipeline/accumulator_percentile.h" #include "mongo/db/pipeline/expression_visitor.h" #include "mongo/db/pipeline/expression_walker.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/bson_typemask.h" +#include "mongo/db/query/datetime/date_time_support.h" #include "mongo/db/query/expression_walker.h" -#include "mongo/db/query/optimizer/explain.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" +#include "mongo/db/query/optimizer/utils/strong_alias.h" +#include "mongo/db/query/optimizer/utils/utils.h" #include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/query/sbe_stage_builder_abt_helpers.h" +#include "mongo/db/query/sbe_stage_builder_abt_holder_def.h" #include "mongo/db/query/sbe_stage_builder_abt_holder_impl.h" +#include "mongo/db/query/sbe_stage_builder_expression.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" @@ -820,7 +858,36 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { } void visit(const ExpressionArray* expr) final { - unsupportedExpression(expr->getOpName()); + auto arity = expr->getChildren().size(); + _context->ensureArity(arity); + + if (arity == 0) { + auto [emptyArrTag, emptyArrVal] = sbe::value::makeNewArray(); + pushABT(makeABTConstant(emptyArrTag, emptyArrVal)); + return; + } + + std::vector> binds; + for (size_t idx = 0; idx < arity; ++idx) { + binds.emplace_back(makeLocalVariableName(_context->state.frameId(), 0), + _context->popABTExpr()); + } + std::reverse(std::begin(binds), std::end(binds)); + + optimizer::ABTVector argVars; + for (auto& bind : binds) { + argVars.push_back(makeFillEmptyNull(makeVariable(bind.first))); + } + + auto arrayExpr = optimizer::make("newArray", std::move(argVars)); + + for (auto it = binds.begin(); it != binds.end(); it++) { + arrayExpr = optimizer::make( + it->first, std::move(it->second), std::move(arrayExpr)); + } + + pushABT(std::move(arrayExpr)); + return; } void visit(const ExpressionArrayElemAt* expr) final { unsupportedExpression(expr->getOpName()); @@ -1109,7 +1176,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { void visit(const ExpressionDateDiff* expr) final { using namespace std::literals; - auto children = expr->getChildren(); + const auto& children = expr->getChildren(); invariant(children.size() == 5); auto startDateName = makeLocalVariableName(_context->state.frameId(), 0); @@ -1140,7 +1207,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { auto endDateExpression = _context->popABTExpr(); auto startDateExpression = _context->popABTExpr(); - auto timeZoneDBSlot = _context->state.data->env->getSlot("timeZoneDB"_sd); + auto timeZoneDBSlot = _context->state.env->getSlot("timeZoneDB"_sd); auto timeZoneDBName = _context->registerVariable(timeZoneDBSlot); auto timeZoneDBVar = makeVariable(timeZoneDBName); @@ -1255,7 +1322,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { pushABT(std::move(dateDiffExpression)); } void visit(const ExpressionDateFromString* expr) final { - auto children = expr->getChildren(); + const auto& children = expr->getChildren(); invariant(children.size() == 5); _context->ensureArity( 1 + (expr->isFormatSpecified() ? 1 : 0) + (expr->isTimezoneSpecified() ? 1 : 0) + @@ -1279,7 +1346,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { auto dateStringExpression = _context->popABTExpr(); auto dateStringName = makeLocalVariableName(_context->state.frameId(), 0); - auto timeZoneDBSlot = _context->state.data->env->getSlot("timeZoneDB"_sd); + auto timeZoneDBSlot = _context->state.env->getSlot("timeZoneDB"_sd); auto timeZoneDBName = _context->registerVariable(timeZoneDBSlot); // Set parameters for an invocation of built-in "dateFromString" function. @@ -1383,7 +1450,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { "$dateFromString parameter 'timezone' must be a string", sbe::value::isString(timezoneTag)); auto [timezoneDBTag, timezoneDBVal] = - _context->state.data->env->getAccessor(timeZoneDBSlot)->getViewOfValue(); + _context->state.env->getAccessor(timeZoneDBSlot)->getViewOfValue(); uassert(4997801, "$dateFromString first argument must be a timezoneDB object", timezoneDBTag == sbe::value::TypeTags::timeZoneDB); @@ -1427,7 +1494,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { void visit(const ExpressionDateFromParts* expr) final { // This expression can carry null children depending on the set of fields provided, // to compute a date from parts so we only need to pop if a child exists. - auto children = expr->getChildren(); + const auto& children = expr->getChildren(); invariant(children.size() == 11); boost::optional eTimezone; @@ -1673,7 +1740,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { // for datetime computation. This global object is registered as an unowned value in the // runtime environment so we pass the corresponding slot to the datePartsWeekYear and // dateParts functions as a variable. - auto timeZoneDBSlot = _context->state.data->env->getSlot("timeZoneDB"_sd); + auto timeZoneDBSlot = _context->state.env->getSlot("timeZoneDB"_sd); auto timeZoneDBName = _context->registerVariable(timeZoneDBSlot); auto computeDate = makeABTFunction(eIsoWeekYear ? "datePartsWeekYear" : "dateParts", makeVariable(timeZoneDBName), @@ -1731,7 +1798,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { } void visit(const ExpressionDateToParts* expr) final { - auto children = expr->getChildren(); + const auto& children = expr->getChildren(); auto dateName = makeLocalVariableName(_context->state.frameId(), 0); auto timezoneName = makeLocalVariableName(_context->state.frameId(), 0); auto isoflagName = makeLocalVariableName(_context->state.frameId(), 0); @@ -1755,7 +1822,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { } auto date = _context->popABTExpr(); - auto timeZoneDBSlot = _context->state.data->env->getSlot("timeZoneDB"_sd); + auto timeZoneDBSlot = _context->state.env->getSlot("timeZoneDB"_sd); auto timeZoneDBName = _context->registerVariable(timeZoneDBSlot); auto timeZoneDBVar = makeVariable(timeZoneDBName); @@ -1801,29 +1868,30 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { } void visit(const ExpressionDateToString* expr) final { - auto children = expr->getChildren(); + const auto& children = expr->getChildren(); invariant(children.size() == 4); _context->ensureArity(1 + (expr->isFormatSpecified() ? 1 : 0) + (expr->isTimezoneSpecified() ? 1 : 0) + (expr->isOnNullSpecified() ? 1 : 0)); // Get child expressions. - auto onNullExpression = + optimizer::ABT onNullExpression = expr->isOnNullSpecified() ? _context->popABTExpr() : optimizer::Constant::null(); - auto timezoneExpression = expr->isTimezoneSpecified() ? _context->popABTExpr() - : optimizer::Constant::str("UTC"_sd); - auto dateExpression = _context->popABTExpr(); + optimizer::ABT timezoneExpression = expr->isTimezoneSpecified() + ? _context->popABTExpr() + : optimizer::Constant::str("UTC"_sd); + optimizer::ABT dateExpression = _context->popABTExpr(); - auto formatExpression = expr->isFormatSpecified() + optimizer::ABT formatExpression = expr->isFormatSpecified() ? _context->popABTExpr() - : optimizer::Constant::str("%Y-%m-%dT%H:%M:%S.%LZ"_sd); + : optimizer::Constant::str(kIsoFormatStringZ); // assumes UTC until disproven - auto timeZoneDBSlot = _context->state.data->env->getSlot("timeZoneDB"_sd); + auto timeZoneDBSlot = _context->state.env->getSlot("timeZoneDB"_sd); auto timeZoneDBName = _context->registerVariable(timeZoneDBSlot); auto timeZoneDBVar = makeVariable(timeZoneDBName); auto [timezoneDBTag, timezoneDBVal] = - _context->state.data->env->getAccessor(timeZoneDBSlot)->getViewOfValue(); + _context->state.env->getAccessor(timeZoneDBSlot)->getViewOfValue(); uassert(4997900, "$dateToString first argument must be a timezoneDB object", timezoneDBTag == sbe::value::TypeTags::timeZoneDB); @@ -1833,19 +1901,6 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { auto dateName = makeLocalVariableName(_context->state.frameId(), 0); auto dateVar = makeVariable(dateName); - // Set parameters for an invocation of built-in "dateToString" function. - optimizer::ABTVector arguments; - arguments.push_back(timeZoneDBVar); - arguments.push_back(dateExpression); - arguments.push_back(formatExpression); - arguments.push_back(timezoneExpression); - - // Create an expression to invoke built-in "dateToString" function. - auto dateToStringFunctionCall = - optimizer::make("dateToString", std::move(arguments)); - auto dateToStringName = makeLocalVariableName(_context->state.frameId(), 0); - auto dateToStringVar = makeVariable(dateToStringName); - // Create expressions to check that each argument to "dateToString" function exists, is not // null, and is of the correct type. std::vector inputValidationCases; @@ -1860,33 +1915,18 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { inputValidationCases.emplace_back(generateABTFailIfNotCoercibleToDate( dateVar, ErrorCodes::Error{4997901}, "$dateToString"_sd, "date"_sd)); - // "format" parameter validation. - if (auto* formatExpressionConst = formatExpression.cast(); - formatExpressionConst) { - auto [formatTag, formatVal] = formatExpressionConst->get(); - if (!sbe::value::isNullish(formatTag)) { - // We don't want to return an error on null. - uassert(4997902, - "$dateToString parameter 'format' must be a string", - sbe::value::isString(formatTag)); - TimeZone::validateToStringFormat(getStringView(formatTag, formatVal)); - } - } else { - inputValidationCases.emplace_back( - generateABTNonStringCheck(formatExpression), - makeABTFail(ErrorCodes::Error{4997903}, - "$dateToString parameter 'format' must be a string")); - inputValidationCases.emplace_back( - makeNot(makeABTFunction("isValidToStringFormat", formatExpression)), - makeABTFail(ErrorCodes::Error{4997904}, - "$dateToString parameter 'format' must be a valid format")); - } - // "timezone" parameter validation. if (auto* timezoneExpressionConst = timezoneExpression.cast(); timezoneExpressionConst) { auto [timezoneTag, timezoneVal] = timezoneExpressionConst->get(); if (!sbe::value::isNullish(timezoneTag)) { + // If the query did not specify a format string and a non-UTC timezone was + // specified, the default format should not use a 'Z' suffix. + if (!expr->isFormatSpecified() && + !(sbe::vm::getTimezone(timezoneTag, timezoneVal, timezoneDB).isUtcZone())) { + formatExpression = optimizer::Constant::str(kIsoFormatStringNonZ); + } + // We don't want to error on null. uassert(4997905, "$dateToString parameter 'timezone' must be a string", @@ -1906,6 +1946,41 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { "$dateToString parameter 'timezone' must be a valid timezone")); } + // "format" parameter validation. + if (auto* formatExpressionConst = formatExpression.cast(); + formatExpressionConst) { + auto [formatTag, formatVal] = formatExpressionConst->get(); + if (!sbe::value::isNullish(formatTag)) { + // We don't want to return an error on null. + uassert(4997902, + "$dateToString parameter 'format' must be a string", + sbe::value::isString(formatTag)); + TimeZone::validateToStringFormat(getStringView(formatTag, formatVal)); + } + } else { + inputValidationCases.emplace_back( + generateABTNonStringCheck(formatExpression), + makeABTFail(ErrorCodes::Error{4997903}, + "$dateToString parameter 'format' must be a string")); + inputValidationCases.emplace_back( + makeNot(makeABTFunction("isValidToStringFormat", formatExpression)), + makeABTFail(ErrorCodes::Error{4997904}, + "$dateToString parameter 'format' must be a valid format")); + } + + // Set parameters for an invocation of built-in "dateToString" function. + optimizer::ABTVector arguments; + arguments.push_back(timeZoneDBVar); + arguments.push_back(dateExpression); + arguments.push_back(formatExpression); + arguments.push_back(timezoneExpression); + + // Create an expression to invoke built-in "dateToString" function. + auto dateToStringFunctionCall = + optimizer::make("dateToString", std::move(arguments)); + auto dateToStringName = makeLocalVariableName(_context->state.frameId(), 0); + auto dateToStringVar = makeVariable(dateToStringName); + pushABT(optimizer::make( std::move(dateName), std::move(dateExpression), @@ -1919,7 +1994,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { std::move(inputValidationCases), optimizer::Constant::nothing()))))); } void visit(const ExpressionDateTrunc* expr) final { - auto children = expr->getChildren(); + const auto& children = expr->getChildren(); invariant(children.size() == 5); _context->ensureArity(2 + (expr->isBinSizeSpecified() ? 1 : 0) + (expr->isTimezoneSpecified() ? 1 : 0) + @@ -1936,11 +2011,11 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { auto unitExpression = _context->popABTExpr(); auto dateExpression = _context->popABTExpr(); - auto timeZoneDBSlot = _context->state.data->env->getSlot("timeZoneDB"_sd); + auto timeZoneDBSlot = _context->state.env->getSlot("timeZoneDB"_sd); auto timeZoneDBName = _context->registerVariable(timeZoneDBSlot); auto timeZoneDBVar = makeVariable(timeZoneDBName); auto [timezoneDBTag, timezoneDBVal] = - _context->state.data->env->getAccessor(timeZoneDBSlot)->getViewOfValue(); + _context->state.env->getAccessor(timeZoneDBSlot)->getViewOfValue(); tassert(7157927, "$dateTrunc first argument must be a timezoneDB object", timezoneDBTag == sbe::value::TypeTags::timeZoneDB); @@ -2204,7 +2279,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { "Encountered unexpected system variable ID", it != Variables::kIdToBuiltinVarName.end()); - auto slot = _context->state.data->env->getSlotIfExists(it->second); + auto slot = _context->state.env->getSlotIfExists(it->second); uassert(5611301, str::stream() << "Builtin variable '$$" << it->second << "' is not available", @@ -2820,7 +2895,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { auto [specTag, specVal] = makeValue(expr->getSortPattern()); auto specConstant = makeABTConstant(specTag, specVal); - auto collatorSlot = _context->state.data->env->getSlotIfExists("collator"_sd); + auto collatorSlot = _context->state.env->getSlotIfExists("collator"_sd); auto collatorVar = collatorSlot.map( [&](auto slotId) { return _context->registerVariable(*collatorSlot); }); @@ -2852,42 +2927,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { unsupportedExpression(expr->getOpName()); } void visit(const ExpressionRound* expr) final { - invariant(expr->getChildren().size() == 1 || expr->getChildren().size() == 2); - const bool hasPlaceArg = expr->getChildren().size() == 2; - _context->ensureArity(expr->getChildren().size()); - - auto inputNumName = makeLocalVariableName(_context->state.frameId(), 0); - auto inputPlaceName = makeLocalVariableName(_context->state.frameId(), 0); - - // We always need to validate the number parameter, since it will always exist. - std::vector inputValidationCases{ - generateABTReturnNullIfNullOrMissing(makeVariable(inputNumName)), - ABTCaseValuePair{ - generateABTNonNumericCheck(inputNumName), - makeABTFail(ErrorCodes::Error{5155300}, "$round only supports numeric types")}}; - // Only add these cases if we have a "place" argument. - if (hasPlaceArg) { - inputValidationCases.emplace_back( - generateABTReturnNullIfNullOrMissing(makeVariable(inputPlaceName))); - inputValidationCases.emplace_back( - generateInvalidRoundPlaceArgCheck(inputPlaceName), - makeABTFail(ErrorCodes::Error{5155301}, - "$round requires \"place\" argument to be " - "an integer between -20 and 100")); - } - - auto roundExpr = buildABTMultiBranchConditionalFromCaseValuePairs( - std::move(inputValidationCases), - makeABTFunction("round"_sd, makeVariable(inputNumName), makeVariable(inputPlaceName))); - - // "place" argument defaults to 0. - auto placeABT = hasPlaceArg ? _context->popABTExpr() : optimizer::Constant::int32(0); - auto inputABT = _context->popABTExpr(); - pushABT(optimizer::make( - std::move(inputNumName), - std::move(inputABT), - optimizer::make( - std::move(inputPlaceName), std::move(placeABT), std::move(roundExpr)))); + visitRoundTruncExpression(expr); } void visit(const ExpressionSplit* expr) final { invariant(expr->getChildren().size() == 2); @@ -2967,7 +3007,21 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { unsupportedExpression(expr->getOpName()); } void visit(const ExpressionStrLenBytes* expr) final { - unsupportedExpression(expr->getOpName()); + invariant(expr->getChildren().size() == 1); + _context->ensureArity(1); + + auto strName = makeLocalVariableName(_context->state.frameId(), 0); + auto strExpression = _context->popABTExpr(); + auto strVar = makeVariable(strName); + + auto strLenBytesExpr = optimizer::make( + makeFillEmptyFalse(makeABTFunction("isString", strVar)), + makeABTFunction("strLenBytes", strVar), + makeABTFail(ErrorCodes::Error{5155800}, "$strLenBytes requires a string argument")); + + pushABT(optimizer::make( + std::move(strName), std::move(strExpression), std::move(strLenBytesExpr))); + return; } void visit(const ExpressionBinarySize* expr) final { unsupportedExpression(expr->getOpName()); @@ -3031,10 +3085,62 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { generateStringCaseConversionExpression(_context, "toUpper"); } void visit(const ExpressionTrim* expr) final { - unsupportedExpression("$trim"); + tassert(5156301, + "trim expressions must have spots in their children vector for 'input' and " + "'chars' fields", + expr->getChildren().size() == 2); + auto numProvidedArgs = 1; + if (expr->hasCharactersExpr()) // 'chars' is not null + ++numProvidedArgs; + _context->ensureArity(numProvidedArgs); + + auto inputName = makeLocalVariableName(_context->state.frameId(), 0); + auto charsName = makeLocalVariableName(_context->state.frameId(), 0); + + auto charsString = + numProvidedArgs == 2 ? _context->popABTExpr() : optimizer::Constant::null(); + auto inputString = _context->popABTExpr(); + auto trimBuiltinName = expr->getTrimTypeString(); + + auto checkCharsNotNullString = makeNot(optimizer::make( + optimizer::Operations::Or, + generateABTNullOrMissing(charsName), + makeABTFunction("isString"_sd, makeVariable(charsName)))); + + /* + Trim Functionality (invariant that 'input' has been provided, otherwise would've failed + at parse time) + + if ('input' is not a string) { + -> fail with error code 5156302 + } + else if ('chars' is provided but is not a string) { + -> fail with error code 5156303 + } + else { + -> make an ABT function for the correct $trim variant with 'input' and 'chars' + parameters + } + */ + auto trimFunc = buildABTMultiBranchConditional( + ABTCaseValuePair{ + makeNot(makeABTFunction("isString"_sd, makeVariable(inputName))), + makeABTFail(ErrorCodes::Error{5156302}, + "$" + trimBuiltinName + " input expression must be a string")}, + ABTCaseValuePair{std::move(checkCharsNotNullString), + makeABTFail(ErrorCodes::Error{5156303}, + "$" + trimBuiltinName + + " chars expression must be a string if provided")}, + makeABTFunction(trimBuiltinName, makeVariable(inputName), makeVariable(charsName))); + + pushABT(optimizer::make( + std::move(inputName), + std::move(inputString), + optimizer::make( + std::move(charsName), std::move(charsString), std::move(trimFunc)))); } void visit(const ExpressionTrunc* expr) final { - unsupportedExpression(expr->getOpName()); + visitRoundTruncExpression(expr); } void visit(const ExpressionType* expr) final { unsupportedExpression(expr->getOpName()); @@ -3274,6 +3380,56 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { } private: + /** + * Shared logic for $round and $trunc expressions + */ + template + void visitRoundTruncExpression(const ExprType* expr) { + const std::string opName(expr->getOpName()); + invariant(opName == "$round" || opName == "$trunc"); + + const auto& children = expr->getChildren(); + invariant(children.size() == 1 || children.size() == 2); + const bool hasPlaceArg = (children.size() == 2); + _context->ensureArity(children.size()); + + auto inputNumName = makeLocalVariableName(_context->state.frameId(), 0); + auto inputPlaceName = makeLocalVariableName(_context->state.frameId(), 0); + + // We always need to validate the number parameter, since it will always exist. + std::vector inputValidationCases{ + generateABTReturnNullIfNullOrMissing(makeVariable(inputNumName)), + ABTCaseValuePair{ + generateABTNonNumericCheck(inputNumName), + makeABTFail(ErrorCodes::Error{5155300}, opName + " only supports numeric types")}}; + // Only add these cases if we have a "place" argument. + if (hasPlaceArg) { + inputValidationCases.emplace_back( + generateABTReturnNullIfNullOrMissing(makeVariable(inputPlaceName))); + inputValidationCases.emplace_back(generateInvalidRoundPlaceArgCheck(inputPlaceName), + makeABTFail(ErrorCodes::Error{5155301}, + opName + + " requires \"place\" argument to be " + "an integer between -20 and 100")); + } + + optimizer::ABT abtExpr = buildABTMultiBranchConditionalFromCaseValuePairs( + std::move(inputValidationCases), + makeABTFunction((opName == "$round" ? "round"_sd : "trunc"_sd), + makeVariable(inputNumName), + makeVariable(inputPlaceName))); + + // "place" argument defaults to 0. + optimizer::ABT placeABT = + hasPlaceArg ? _context->popABTExpr() : optimizer::Constant::int32(0); + optimizer::ABT inputABT = _context->popABTExpr(); + pushABT(optimizer::make( + std::move(inputNumName), + std::move(inputABT), + optimizer::make( + std::move(inputPlaceName), std::move(placeABT), std::move(abtExpr)))); + } + /** * Shared logic for $and, $or. Converts each child into an EExpression that evaluates to Boolean * true or false, based on MQL rules for $and and $or branches, and then chains the branches @@ -3335,7 +3491,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { } void generateDateExpressionAcceptingTimeZone(StringData exprName, const Expression* expr) { - auto children = expr->getChildren(); + const auto& children = expr->getChildren(); invariant(children.size() == 2); auto timezoneExpression = @@ -3346,7 +3502,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { auto dateName = makeLocalVariableName(_context->state.frameId(), 0); auto dateVar = makeVariable(dateName); - auto timeZoneDBSlot = _context->state.data->env->getSlot("timeZoneDB"_sd); + auto timeZoneDBSlot = _context->state.env->getSlot("timeZoneDB"_sd); // Set parameters for an invocation of the built-in function. optimizer::ABTVector arguments; @@ -3364,7 +3520,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { if (timezoneExpression.is()) { auto [timezoneTag, timezoneVal] = timezoneExpression.cast()->get(); auto [timezoneDBTag, timezoneDBVal] = - _context->state.data->env->getAccessor(timeZoneDBSlot)->getViewOfValue(); + _context->state.env->getAccessor(timeZoneDBSlot)->getViewOfValue(); auto timezoneDB = sbe::value::getTimeZoneDBView(timezoneDBVal); uassert(5157900, str::stream() << "$" << exprName.toString() @@ -3600,7 +3756,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { void visitIndexOfFunction(const Expression* expr, ExpressionVisitorContext* _context, const std::string& indexOfFunction) { - auto children = expr->getChildren(); + const auto& children = expr->getChildren(); auto operandSize = children.size() <= 3 ? 3 : 4; optimizer::ABTVector operands; operands.reserve(operandSize); @@ -3734,7 +3890,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { optimizer::ABTVector checkNulls; optimizer::ABTVector checkNotArrays; - auto collatorSlot = _context->state.data->env->getSlotIfExists("collator"_sd); + auto collatorSlot = _context->state.env->getSlotIfExists("collator"_sd); args.reserve(arity); argNames.reserve(arity); @@ -4010,7 +4166,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { */ void generateDateArithmeticsExpression(const ExpressionDateArithmetics* expr, const std::string& dateExprName) { - auto children = expr->getChildren(); + const auto& children = expr->getChildren(); auto arity = children.size(); invariant(arity == 4); auto timezoneExpr = @@ -4043,7 +4199,7 @@ class ExpressionPostVisitor final : public ExpressionConstVisitor { } }(); - auto timeZoneDBSlot = _context->state.data->env->getSlot("timeZoneDB"_sd); + auto timeZoneDBSlot = _context->state.env->getSlot("timeZoneDB"_sd); auto timeZoneDBVar = makeVariable(_context->registerVariable(timeZoneDBSlot)); optimizer::ABTVector checkNullArg; diff --git a/src/mongo/db/query/sbe_stage_builder_expression.h b/src/mongo/db/query/sbe_stage_builder_expression.h index d918d19a7cf1a..6e66a230639ee 100644 --- a/src/mongo/db/query/sbe_stage_builder_expression.h +++ b/src/mongo/db/query/sbe_stage_builder_expression.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/query/sbe_stage_builder_eval_frame.h" diff --git a/src/mongo/db/query/sbe_stage_builder_filter.cpp b/src/mongo/db/query/sbe_stage_builder_filter.cpp index 20d0d693f8c32..f4f7bcb1383e0 100644 --- a/src/mongo/db/query/sbe_stage_builder_filter.cpp +++ b/src/mongo/db/query/sbe_stage_builder_filter.cpp @@ -27,21 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/sbe_stage_builder_filter.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/js_function.h" +#include "mongo/db/exec/sbe/abt/abt_lower_defs.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" #include "mongo/db/exec/sbe/match_path.h" -#include "mongo/db/exec/sbe/stages/co_scan.h" -#include "mongo/db/exec/sbe/stages/filter.h" -#include "mongo/db/exec/sbe/stages/limit_skip.h" -#include "mongo/db/exec/sbe/stages/loop_join.h" -#include "mongo/db/exec/sbe/stages/project.h" -#include "mongo/db/exec/sbe/stages/traverse.h" -#include "mongo/db/exec/sbe/stages/union.h" #include "mongo/db/exec/sbe/values/bson.h" +#include "mongo/db/field_ref.h" #include "mongo/db/matcher/expression_always_boolean.h" #include "mongo/db/matcher/expression_array.h" #include "mongo/db/matcher/expression_expr.h" @@ -56,6 +66,7 @@ #include "mongo/db/matcher/expression_where.h" #include "mongo/db/matcher/expression_where_noop.h" #include "mongo/db/matcher/match_expression_walker.h" +#include "mongo/db/matcher/matcher_type_set.h" #include "mongo/db/matcher/schema/expression_internal_schema_all_elem_match_from_index.h" #include "mongo/db/matcher/schema/expression_internal_schema_allowed_properties.h" #include "mongo/db/matcher/schema/expression_internal_schema_cond.h" @@ -72,12 +83,16 @@ #include "mongo/db/matcher/schema/expression_internal_schema_root_doc_eq.h" #include "mongo/db/matcher/schema/expression_internal_schema_unique_items.h" #include "mongo/db/matcher/schema/expression_internal_schema_xor.h" +#include "mongo/db/query/bson_typemask.h" +#include "mongo/db/query/optimizer/syntax/expr.h" #include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/query/sbe_stage_builder_abt_helpers.h" #include "mongo/db/query/sbe_stage_builder_abt_holder_impl.h" #include "mongo/db/query/sbe_stage_builder_eval_frame.h" #include "mongo/db/query/sbe_stage_builder_expression.h" -#include "mongo/db/query/util/make_data_structure.h" +#include "mongo/db/query/sbe_stage_builder_filter.h" +#include "mongo/db/query/tree_walker.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo::stage_builder { @@ -87,7 +102,6 @@ EvalExpr toEvalExpr(boost::optional slot) { return slot ? EvalExpr{*slot} : EvalExpr{}; } -struct MatchExpressionVisitorContext; /** * A function of type 'MakePredicateFn' can be called to generate an EExpression which applies @@ -392,7 +406,7 @@ void generatePredicate(MatchExpressionVisitorContext* context, 0, /* level */ context->state.frameIdGenerator, context->state.slotVarMap, - *context->state.data->env, + *context->state.env, makePredicate, matchesNothing, mode)); @@ -431,12 +445,11 @@ void generateArraySize(MatchExpressionVisitorContext* context, auto makePredicate = [&](EvalExpr inputExpr) { auto sizeExpr = inputParamSlotId ? makeVariable(*inputParamSlotId) : makeConstant(sbe::value::TypeTags::NumberInt32, size); - return makeFillEmptyFalse( - makeBinaryOp(sbe::EPrimBinary::eq, - makeFunction("getArraySize", - inputExpr.extractExpr(context->state.slotVarMap, - *context->state.data->env)), - std::move(sizeExpr))); + return makeFillEmptyFalse(makeBinaryOp( + sbe::EPrimBinary::eq, + makeFunction("getArraySize", + inputExpr.extractExpr(context->state.slotVarMap, *context->state.env)), + std::move(sizeExpr))); }; const auto traversalMode = LeafTraversalMode::kDoNotTraverseLeaf; @@ -452,7 +465,7 @@ void generateComparison(MatchExpressionVisitorContext* context, sbe::EPrimBinary::Op binaryOp) { auto makePredicate = [context, expr, binaryOp](EvalExpr inputExpr) { return generateComparisonExpr(context->state, expr, binaryOp, std::move(inputExpr)) - .extractExpr(context->state.slotVarMap, *context->state.data->env); + .extractExpr(context->state.slotVarMap, *context->state.env); }; // A 'kArrayAndItsElements' traversal mode matches the following semantics: when the path we are @@ -492,7 +505,7 @@ void generateBitTest(MatchExpressionVisitorContext* context, const sbe::BitTestBehavior& bitOp) { auto makePredicate = [context, expr, bitOp](EvalExpr inputExpr) { return generateBitTestExpr(context->state, expr, bitOp, std::move(inputExpr)) - .extractExpr(context->state.slotVarMap, *context->state.data->env); + .extractExpr(context->state.slotVarMap, *context->state.env); }; const auto traversalMode = LeafTraversalMode::kArrayElementsOnly; @@ -709,7 +722,7 @@ std::tuple, bool, bool, bool> _generateInExprI } auto&& [arrSetTag, arrSetVal, hasArray, hasObject, hasNull] = - convertInExpressionEqualities(expr); + convertInExpressionEqualities(expr, state.data->queryCollator.get()); sbe::value::ValueGuard arrSetGuard{arrSetTag, arrSetVal}; auto equalities = sbe::makeE(arrSetTag, arrSetVal); arrSetGuard.reset(); @@ -777,16 +790,14 @@ class MatchExpressionPostVisitor final : public MatchExpressionConstVisitor { auto lambdaExpr = sbe::makeE(lambdaFrameId, std::move(lambdaBodyExpr)); auto makePredicate = [&](EvalExpr inputExpr) { - return makeFillEmptyFalse( - makeBinaryOp(sbe::EPrimBinary::logicAnd, - makeFunction("isArray", - inputExpr.getExpr(_context->state.slotVarMap, - *_context->state.data->env)), - makeFunction("traverseF", - inputExpr.getExpr(_context->state.slotVarMap, - *_context->state.data->env), - std::move(lambdaExpr), - makeConstant(sbe::value::TypeTags::Boolean, false)))); + return makeFillEmptyFalse(makeBinaryOp( + sbe::EPrimBinary::logicAnd, + makeFunction("isArray", + inputExpr.getExpr(_context->state.slotVarMap, *_context->state.env)), + makeFunction("traverseF", + inputExpr.getExpr(_context->state.slotVarMap, *_context->state.env), + std::move(lambdaExpr), + makeConstant(sbe::value::TypeTags::Boolean, false)))); }; const auto traversalMode = LeafTraversalMode::kDoNotTraverseLeaf; @@ -820,16 +831,14 @@ class MatchExpressionPostVisitor final : public MatchExpressionConstVisitor { lambdaFrameId, lambdaBodyExpr.extractExpr(_context->state)); auto makePredicate = [&](EvalExpr inputExpr) { - return makeFillEmptyFalse( - makeBinaryOp(sbe::EPrimBinary::logicAnd, - makeFunction("isArray", - inputExpr.getExpr(_context->state.slotVarMap, - *_context->state.data->env)), - makeFunction("traverseF", - inputExpr.getExpr(_context->state.slotVarMap, - *_context->state.data->env), - std::move(lambdaExpr), - makeConstant(sbe::value::TypeTags::Boolean, false)))); + return makeFillEmptyFalse(makeBinaryOp( + sbe::EPrimBinary::logicAnd, + makeFunction("isArray", + inputExpr.getExpr(_context->state.slotVarMap, *_context->state.env)), + makeFunction("traverseF", + inputExpr.getExpr(_context->state.slotVarMap, *_context->state.env), + std::move(lambdaExpr), + makeConstant(sbe::value::TypeTags::Boolean, false)))); }; const auto traversalMode = LeafTraversalMode::kDoNotTraverseLeaf; @@ -895,11 +904,10 @@ class MatchExpressionPostVisitor final : public MatchExpressionConstVisitor { : sbe::makeE( generateNullOrMissing(inputExpr.clone(), _context->state), makeConstant(sbe::value::TypeTags::Null, 0), - inputExpr.getExpr(_context->state.slotVarMap, - *_context->state.data->env)); + inputExpr.getExpr(_context->state.slotVarMap, *_context->state.env)); return makeIsMember( - std::move(valueExpr), std::move(equalitiesExpr), _context->state.data->env); + std::move(valueExpr), std::move(equalitiesExpr), _context->state.env); }; generatePredicate(_context, *expr->fieldRef(), makePredicate, traversalMode, hasNull); @@ -946,12 +954,12 @@ class MatchExpressionPostVisitor final : public MatchExpressionConstVisitor { sbe::EPrimBinary::logicOr, makeFillEmptyFalse(makeFunction( "isMember", - inputExpr.getExpr(_context->state.slotVarMap, *_context->state.data->env), + inputExpr.getExpr(_context->state.slotVarMap, *_context->state.env), std::move(regexSetConstant))), makeFillEmptyFalse(makeFunction( "regexMatch", std::move(pcreRegexesConstant), - inputExpr.getExpr(_context->state.slotVarMap, *_context->state.data->env)))); + inputExpr.getExpr(_context->state.slotVarMap, *_context->state.env)))); if (expr->getEqualities().size() > 0) { // We have to match nulls and undefined if a 'null' is present in equalities. @@ -959,13 +967,13 @@ class MatchExpressionPostVisitor final : public MatchExpressionConstVisitor { inputExpr = sbe::makeE( generateNullOrMissing(inputExpr.clone(), _context->state), makeConstant(sbe::value::TypeTags::Null, 0), - inputExpr.getExpr(_context->state.slotVarMap, *_context->state.data->env)); + inputExpr.getExpr(_context->state.slotVarMap, *_context->state.env)); } resultExpr = makeBinaryOp(sbe::EPrimBinary::logicOr, makeIsMember(inputExpr.extractExpr(_context->state), std::move(equalitiesExpr), - _context->state.data->env), + _context->state.env), std::move(resultExpr)); } @@ -1225,9 +1233,9 @@ EvalExpr generateFilter(StageBuilderState& state, } std::tuple convertInExpressionEqualities( - const InMatchExpression* expr) { + const InMatchExpression* expr, const CollatorInterface* coll) { auto& equalities = expr->getEqualities(); - auto [arrSetTag, arrSetVal] = sbe::value::makeNewArraySet(); + auto [arrSetTag, arrSetVal] = sbe::value::makeNewArraySet(coll); sbe::value::ValueGuard arrSetGuard{arrSetTag, arrSetVal}; auto arrSet = sbe::value::getArraySetView(arrSetVal); @@ -1259,7 +1267,7 @@ std::tuple convertInE std::pair convertBitTestBitPositions( const BitTestMatchExpression* expr) { - auto bitPositions = expr->getBitPositions(); + const auto& bitPositions = expr->getBitPositions(); // Build an array set of bit positions for the bitmask, and remove duplicates in the // bitPositions vector since duplicates aren't handled in the match expression parser by @@ -1336,12 +1344,12 @@ EvalExpr generateComparisonExpr(StageBuilderState& state, inputExpr = buildMultiBranchConditional( CaseValuePair{generateNullOrMissing(inputExpr.clone(), state), makeConstant(sbe::value::TypeTags::Null, 0)}, - inputExpr.getExpr(state.slotVarMap, *state.data->env)); + inputExpr.getExpr(state.slotVarMap, *state.env)); return makeFillEmptyFalse(makeBinaryOp(binaryOp, inputExpr.extractExpr(state), makeConstant(sbe::value::TypeTags::Null, 0), - state.data->env)); + state.env)); } else if (sbe::value::isNaN(tagView, valView)) { // Construct an expression to perform a NaN check. switch (binaryOp) { @@ -1372,7 +1380,7 @@ EvalExpr generateComparisonExpr(StageBuilderState& state, }(tagView, valView); return makeFillEmptyFalse( - makeBinaryOp(binaryOp, inputExpr.extractExpr(state), std::move(valExpr), state.data->env)); + makeBinaryOp(binaryOp, inputExpr.extractExpr(state), std::move(valExpr), state.env)); } EvalExpr generateInExpr(StageBuilderState& state, @@ -1384,7 +1392,7 @@ EvalExpr generateInExpr(StageBuilderState& state, auto [equalities, hasArray, hasObject, hasNull] = _generateInExprInternal(state, expr); - return makeIsMember(inputExpr.extractExpr(state), std::move(equalities), state.data->env); + return makeIsMember(inputExpr.extractExpr(state), std::move(equalities), state.env.runtimeEnv); } EvalExpr generateBitTestExpr(StageBuilderState& state, @@ -1409,7 +1417,7 @@ EvalExpr generateBitTestExpr(StageBuilderState& state, auto binaryBitTestExpr = makeFunction("bitTestPosition"_sd, std::move(bitPosExpr), - inputExpr.getExpr(state.slotVarMap, *state.data->env), + inputExpr.getExpr(state.slotVarMap, *state.env), makeConstant(sbe::value::TypeTags::NumberInt32, static_cast(bitOp))); // Build An EExpression for the numeric bitmask case. The AllSet case tests if (mask & @@ -1431,12 +1439,12 @@ EvalExpr generateBitTestExpr(StageBuilderState& state, // consistent with MongoDB's documentation. auto numericBitTestInputExpr = sbe::makeE( makeFunction("typeMatch", - inputExpr.getExpr(state.slotVarMap, *state.data->env), + inputExpr.getExpr(state.slotVarMap, *state.env), makeConstant(sbe::value::TypeTags::NumberInt64, sbe::value::bitcastFrom( getBSONTypeMask(sbe::value::TypeTags::NumberDecimal)))), - makeFunction("round"_sd, inputExpr.getExpr(state.slotVarMap, *state.data->env)), - inputExpr.getExpr(state.slotVarMap, *state.data->env)); + makeFunction("round"_sd, inputExpr.getExpr(state.slotVarMap, *state.env)), + inputExpr.getExpr(state.slotVarMap, *state.env)); std::unique_ptr bitMaskExpr = [&]() -> std::unique_ptr { if (auto bitMaskParamId = expr->getBitMaskParamId()) { @@ -1480,7 +1488,7 @@ EvalExpr generateModExpr(StageBuilderState& state, EvalExpr inputExpr) { auto& dividend = inputExpr; auto truncatedArgument = sbe::makeE( - makeFunction("trunc"_sd, dividend.getExpr(state.slotVarMap, *state.data->env)), + makeFunction("trunc"_sd, dividend.getExpr(state.slotVarMap, *state.env)), sbe::value::TypeTags::NumberInt64); tassert(6142202, "Either both divisor and remainer are parameterized or none", @@ -1545,11 +1553,11 @@ EvalExpr generateRegexExpr(StageBuilderState& state, auto resultExpr = makeBinaryOp( sbe::EPrimBinary::logicOr, makeFillEmptyFalse(makeBinaryOp(sbe::EPrimBinary::eq, - inputExpr.getExpr(state.slotVarMap, *state.data->env), + inputExpr.getExpr(state.slotVarMap, *state.env), std::move(bsonRegexExpr))), makeFillEmptyFalse(makeFunction("regexMatch", std::move(compiledRegexExpr), - inputExpr.getExpr(state.slotVarMap, *state.data->env)))); + inputExpr.getExpr(state.slotVarMap, *state.env)))); return std::move(resultExpr); } diff --git a/src/mongo/db/query/sbe_stage_builder_filter.h b/src/mongo/db/query/sbe_stage_builder_filter.h index f5b8b86d84787..d1125f1d102a3 100644 --- a/src/mongo/db/query/sbe_stage_builder_filter.h +++ b/src/mongo/db/query/sbe_stage_builder_filter.h @@ -29,11 +29,20 @@ #pragma once +#include +#include +#include +#include +#include + #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_visitor.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/sbe_stage_builder_eval_frame.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" @@ -73,7 +82,7 @@ EvalExpr generateFilter(StageBuilderState& state, * value. */ std::tuple convertInExpressionEqualities( - const InMatchExpression* expr); + const InMatchExpression* expr, const CollatorInterface* coll); /** * Converts the list of bit positions inside of any of the bit-test match expressions diff --git a/src/mongo/db/query/sbe_stage_builder_helpers.cpp b/src/mongo/db/query/sbe_stage_builder_helpers.cpp index fb91fd340f012..fb07122910a1c 100644 --- a/src/mongo/db/query/sbe_stage_builder_helpers.cpp +++ b/src/mongo/db/query/sbe_stage_builder_helpers.cpp @@ -28,33 +28,69 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/sbe_stage_builder_helpers.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include #include #include +#include +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/health_log_gen.h" #include "mongo/db/catalog/health_log_interface.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/stages/branch.h" #include "mongo/db/exec/sbe/stages/co_scan.h" #include "mongo/db/exec/sbe/stages/hash_agg.h" #include "mongo/db/exec/sbe/stages/limit_skip.h" #include "mongo/db/exec/sbe/stages/loop_join.h" +#include "mongo/db/exec/sbe/stages/makeobj.h" #include "mongo/db/exec/sbe/stages/project.h" #include "mongo/db/exec/sbe/stages/scan.h" #include "mongo/db/exec/sbe/stages/traverse.h" #include "mongo/db/exec/sbe/stages/union.h" #include "mongo/db/exec/sbe/stages/unwind.h" -#include "mongo/db/exec/sbe/values/bson.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/matcher/matcher_type_set.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/query/bson_typemask.h" +#include "mongo/db/query/projection.h" +#include "mongo/db/query/projection_ast.h" +#include "mongo/db/query/projection_ast_visitor.h" #include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/tree_walker.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/execution_context.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/shared_buffer_fragment.h" #include "mongo/util/stacktrace.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -87,15 +123,22 @@ std::unique_ptr makeBinaryOp(sbe::EPrimBinary::Op binaryOp, std::unique_ptr makeBinaryOp(sbe::EPrimBinary::Op binaryOp, std::unique_ptr lhs, std::unique_ptr rhs, - sbe::RuntimeEnvironment* env) { - invariant(env); + sbe::RuntimeEnvironment* runtimeEnv) { + invariant(runtimeEnv); - auto collatorSlot = env->getSlotIfExists("collator"_sd); + auto collatorSlot = runtimeEnv->getSlotIfExists("collator"_sd); auto collatorVar = collatorSlot ? sbe::makeE(*collatorSlot) : nullptr; return makeBinaryOp(binaryOp, std::move(lhs), std::move(rhs), std::move(collatorVar)); } +std::unique_ptr makeBinaryOp(sbe::EPrimBinary::Op binaryOp, + std::unique_ptr lhs, + std::unique_ptr rhs, + PlanStageEnvironment& env) { + return makeBinaryOp(binaryOp, std::move(lhs), std::move(rhs), env.runtimeEnv); +} + std::unique_ptr makeIsMember(std::unique_ptr input, std::unique_ptr arr, std::unique_ptr collator) { @@ -108,15 +151,21 @@ std::unique_ptr makeIsMember(std::unique_ptr std::unique_ptr makeIsMember(std::unique_ptr input, std::unique_ptr arr, - sbe::RuntimeEnvironment* env) { - invariant(env); + sbe::RuntimeEnvironment* runtimeEnv) { + invariant(runtimeEnv); - auto collatorSlot = env->getSlotIfExists("collator"_sd); + auto collatorSlot = runtimeEnv->getSlotIfExists("collator"_sd); auto collatorVar = collatorSlot ? sbe::makeE(*collatorSlot) : nullptr; return makeIsMember(std::move(input), std::move(arr), std::move(collatorVar)); } +std::unique_ptr makeIsMember(std::unique_ptr input, + std::unique_ptr arr, + PlanStageEnvironment& env) { + return makeIsMember(std::move(input), std::move(arr), env.runtimeEnv); +} + std::unique_ptr generateNullOrMissingExpr(const sbe::EExpression& expr) { return makeBinaryOp(sbe::EPrimBinary::fillEmpty, makeFunction("typeMatch", @@ -144,7 +193,7 @@ std::unique_ptr generateNullOrMissing(std::unique_ptr generateNullOrMissing(EvalExpr arg, StageBuilderState& state) { - auto expr = arg.extractExpr(state.slotVarMap, *state.data->env); + auto expr = arg.extractExpr(state.slotVarMap, *state.env); return generateNullOrMissingExpr(*expr); } @@ -153,7 +202,7 @@ std::unique_ptr generateNonNumericCheck(const sbe::EVariable& } std::unique_ptr generateNonNumericCheck(EvalExpr expr, StageBuilderState& state) { - return makeNot(makeFunction("isNumber", expr.extractExpr(state.slotVarMap, *state.data->env))); + return makeNot(makeFunction("isNumber", expr.extractExpr(state.slotVarMap, *state.env))); } std::unique_ptr generateLongLongMinCheck(const sbe::EVariable& var) { @@ -176,7 +225,7 @@ std::unique_ptr generateNaNCheck(const sbe::EVariable& var) { } std::unique_ptr generateNaNCheck(EvalExpr expr, StageBuilderState& state) { - return makeFunction("isNaN", expr.extractExpr(state.slotVarMap, *state.data->env)); + return makeFunction("isNaN", expr.extractExpr(state.slotVarMap, *state.env)); } std::unique_ptr generateInfinityCheck(const sbe::EVariable& var) { @@ -184,7 +233,7 @@ std::unique_ptr generateInfinityCheck(const sbe::EVariable& va } std::unique_ptr generateInfinityCheck(EvalExpr expr, StageBuilderState& state) { - return makeFunction("isInfinity"_sd, expr.extractExpr(state.slotVarMap, *state.data->env)); + return makeFunction("isInfinity"_sd, expr.extractExpr(state.slotVarMap, *state.env)); } std::unique_ptr generateNonPositiveCheck(const sbe::EVariable& var) { @@ -342,7 +391,7 @@ std::pair projectEvalExpr( // into a slot. auto slot = slotIdGenerator->generate(); stage = makeProject( - std::move(stage), planNodeId, slot, expr.extractExpr(state.slotVarMap, *state.data->env)); + std::move(stage), planNodeId, slot, expr.extractExpr(state.slotVarMap, *state.env)); return {slot, std::move(stage)}; } @@ -641,8 +690,8 @@ sbe::value::SlotId StageBuilderState::getGlobalVariableSlot(Variables::Id variab return it->second; } - auto slotId = data->env->registerSlot( - sbe::value::TypeTags::Nothing, 0, false /* owned */, slotIdGenerator); + auto slotId = + env->registerSlot(sbe::value::TypeTags::Nothing, 0, false /* owned */, slotIdGenerator); data->variableIdToSlotMap.emplace(variableId, slotId); return slotId; } @@ -693,7 +742,7 @@ void indexKeyCorruptionCheckCallback(OperationContext* opCtx, tassert(5113708, "KeyString does not exist", keyString); BSONObj bsonKeyPattern(sbe::value::bitcastTo(kpVal)); - auto bsonKeyString = KeyString::toBson(*keyString, Ordering::make(bsonKeyPattern)); + auto bsonKeyString = key_string::toBson(*keyString, Ordering::make(bsonKeyPattern)); auto hydratedKey = IndexKeyEntry::rehydrateKey(bsonKeyPattern, bsonKeyString); HealthLogEntry entry; @@ -731,12 +780,13 @@ void indexKeyCorruptionCheckCallback(OperationContext* opCtx, * or that the index keys are still part of the underlying index. */ bool indexKeyConsistencyCheckCallback(OperationContext* opCtx, - const StringMap& iamTable, + StringMap& entryMap, sbe::value::SlotAccessor* snapshotIdAccessor, - sbe::value::SlotAccessor* indexIdAccessor, + sbe::value::SlotAccessor* indexIdentAccessor, sbe::value::SlotAccessor* indexKeyAccessor, const CollectionPtr& collection, const Record& nextRecord) { + // The index consistency check is only performed when 'snapshotIdAccessor' is set. if (snapshotIdAccessor) { auto currentSnapshotId = opCtx->recoveryUnit()->getSnapshotId(); auto [snapshotIdTag, snapshotIdVal] = snapshotIdAccessor->getViewOfValue(); @@ -748,15 +798,15 @@ bool indexKeyConsistencyCheckCallback(OperationContext* opCtx, auto snapshotId = sbe::value::bitcastTo(snapshotIdVal); if (currentSnapshotId.toNumber() != snapshotId) { tassert(5290707, "Should have index key accessor", indexKeyAccessor); - tassert(5290714, "Should have index id accessor", indexIdAccessor); + tassert(5290714, "Should have index ident accessor", indexIdentAccessor); - auto [indexIdTag, indexIdVal] = indexIdAccessor->getViewOfValue(); + auto [identTag, identVal] = indexIdentAccessor->getViewOfValue(); auto [ksTag, ksVal] = indexKeyAccessor->getViewOfValue(); - const auto msgIndexIdTag = indexIdTag; + const auto msgIdentTag = identTag; tassert(5290708, - str::stream() << "Index name is of wrong type: " << msgIndexIdTag, - sbe::value::isString(indexIdTag)); + str::stream() << "Index name is of wrong type: " << msgIdentTag, + sbe::value::isString(identTag)); const auto msgKsTag = ksTag; tassert(5290710, @@ -764,24 +814,39 @@ bool indexKeyConsistencyCheckCallback(OperationContext* opCtx, ksTag == sbe::value::TypeTags::ksValue); auto keyString = sbe::value::getKeyStringView(ksVal); - auto indexId = sbe::value::getStringView(indexIdTag, indexIdVal); + auto indexIdent = sbe::value::getStringView(identTag, identVal); tassert(5290712, "KeyString does not exist", keyString); - auto it = iamTable.find(indexId); - tassert(5290713, - str::stream() << "IndexAccessMethod not found for index " << indexId, - it != iamTable.end()); + auto it = entryMap.find(indexIdent); + + // If 'entryMap' doesn't contain an entry for 'indexIdent', create one. + if (it == entryMap.end()) { + auto indexCatalog = collection->getIndexCatalog(); + auto indexDesc = indexCatalog->findIndexByIdent(opCtx, indexIdent); + auto entry = indexDesc ? indexDesc->getEntry() : nullptr; + + // Throw an error if we can't get the IndexDescriptor or the IndexCatalogEntry + // (or if the index is dropped). + uassert(ErrorCodes::QueryPlanKilled, + str::stream() << "query plan killed :: index dropped: " << indexIdent, + indexDesc && entry && !entry->isDropped()); + + auto [newIt, _] = entryMap.emplace(indexIdent, entry); - auto iam = it->second->asSortedData(); + it = newIt; + } + + auto entry = it->second; + auto iam = entry->accessMethod()->asSortedData(); tassert(5290709, - str::stream() << "Expected to find SortedDataIndexAccessMethod for index " - << indexId, + str::stream() << "Expected to find SortedDataIndexAccessMethod for index: " + << indexIdent, iam); auto& executionCtx = StorageExecutionContext::get(opCtx); auto keys = executionCtx.keys(); SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // There's no need to compute the prefixes of the indexed fields that cause the // index to be multikey when ensuring the keyData is still valid. @@ -790,6 +855,7 @@ bool indexKeyConsistencyCheckCallback(OperationContext* opCtx, iam->getKeys(opCtx, collection, + entry, pooledBuilder, nextRecord.data.toBson(), InsertDeleteOptions::ConstraintEnforcementMode::kEnforceConstraints, @@ -802,6 +868,7 @@ bool indexKeyConsistencyCheckCallback(OperationContext* opCtx, return keys->count(*keyString); } } + return true; } @@ -810,13 +877,12 @@ std::unique_ptr makeLoopJoinForFetch(std::unique_ptr fields, sbe::value::SlotVector fieldSlots, - sbe::value::SlotId seekKeySlot, + sbe::value::SlotId seekRecordIdSlot, sbe::value::SlotId snapshotIdSlot, - sbe::value::SlotId indexIdSlot, + sbe::value::SlotId indexIdentSlot, sbe::value::SlotId indexKeySlot, sbe::value::SlotId indexKeyPatternSlot, const CollectionPtr& collToFetch, - StringMap iamMap, PlanNodeId planNodeId, sbe::value::SlotVector slotsToForward) { // It is assumed that we are generating a fetch loop join over the main collection. If we are @@ -824,26 +890,23 @@ std::unique_ptr makeLoopJoinForFetch(std::unique_ptr(collToFetch->uuid(), resultSlot, recordIdSlot, snapshotIdSlot, - indexIdSlot, + indexIdentSlot, indexKeySlot, indexKeyPatternSlot, boost::none, std::move(fields), std::move(fieldSlots), - seekKeySlot, - true, + seekRecordIdSlot, + boost::none /* minRecordIdSlot */, + boost::none /* maxRecordIdSlot */, + true /* forward */, nullptr, planNodeId, std::move(callbacks)); @@ -854,7 +917,8 @@ std::unique_ptr makeLoopJoinForFetch(std::unique_ptr(std::move(scanStage), 1, boost::none, planNodeId), std::move(slotsToForward), - sbe::makeSV(seekKeySlot, snapshotIdSlot, indexIdSlot, indexKeySlot, indexKeyPatternSlot), + sbe::makeSV( + seekRecordIdSlot, snapshotIdSlot, indexIdentSlot, indexKeySlot, indexKeyPatternSlot), nullptr, planNodeId); } @@ -869,8 +933,8 @@ sbe::value::SlotId StageBuilderState::registerInputParamSlot( return it->second; } - auto slotId = data->env->registerSlot( - sbe::value::TypeTags::Nothing, 0, false /* owned */, slotIdGenerator); + auto slotId = + env->registerSlot(sbe::value::TypeTags::Nothing, 0, false /* owned */, slotIdGenerator); data->inputParamToSlotMap.emplace(paramId, slotId); return slotId; } @@ -1193,12 +1257,12 @@ std::pair, sbe::value::SlotVector> projectFields tassert(7182002, "Expected DfsState to have at least 2 entries", dfs.size() >= 2); auto parent = dfs[dfs.size() - 2].first; - auto getFieldExpr = makeFunction( - "getField"_sd, - parent->value.hasSlot() - ? makeVariable(*parent->value.getSlot()) - : parent->value.extractExpr(state.slotVarMap, *state.data->env), - makeConstant(node->name)); + auto getFieldExpr = + makeFunction("getField"_sd, + parent->value.hasSlot() + ? makeVariable(*parent->value.getSlot()) + : parent->value.extractExpr(state.slotVarMap, *state.env), + makeConstant(node->name)); auto hasOneChildToVisit = [&] { size_t count = 0; diff --git a/src/mongo/db/query/sbe_stage_builder_helpers.h b/src/mongo/db/query/sbe_stage_builder_helpers.h index 79ea6cfddb4a6..6958a00a6dd60 100644 --- a/src/mongo/db/query/sbe_stage_builder_helpers.h +++ b/src/mongo/db/query/sbe_stage_builder_helpers.h @@ -29,22 +29,59 @@ #pragma once +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include +#include #include +#include +#include #include +#include #include +#include #include - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/ordering.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/exec/sbe/abt/abt_lower_defs.h" #include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include "mongo/db/exec/sbe/makeobj_enums.h" #include "mongo/db/exec/sbe/match_path.h" #include "mongo/db/exec/sbe/stages/filter.h" #include "mongo/db/exec/sbe/stages/hash_agg.h" #include "mongo/db/exec/sbe/stages/makeobj.h" #include "mongo/db/exec/sbe/stages/project.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/optimizer/comparison_op.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/projection_ast.h" #include "mongo/db/query/sbe_stage_builder_eval_frame.h" #include "mongo/db/query/stage_types.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/string_map.h" namespace mongo::projection_ast { class Projection; @@ -53,6 +90,8 @@ class Projection; namespace mongo::stage_builder { class PlanStageSlots; +struct PlanStageEnvironment; +struct PlanStageStaticData; std::unique_ptr makeUnaryOp(sbe::EPrimUnary::Op unaryOp, std::unique_ptr operand); @@ -72,6 +111,11 @@ std::unique_ptr makeBinaryOp(sbe::EPrimBinary::Op binaryOp, std::unique_ptr rhs, sbe::RuntimeEnvironment* env); +std::unique_ptr makeBinaryOp(sbe::EPrimBinary::Op binaryOp, + std::unique_ptr lhs, + std::unique_ptr rhs, + PlanStageEnvironment& env); + std::unique_ptr makeIsMember(std::unique_ptr input, std::unique_ptr arr, std::unique_ptr collator = {}); @@ -80,6 +124,10 @@ std::unique_ptr makeIsMember(std::unique_ptr std::unique_ptr arr, sbe::RuntimeEnvironment* env); +std::unique_ptr makeIsMember(std::unique_ptr input, + std::unique_ptr arr, + PlanStageEnvironment& env); + /** * Generates an EExpression that checks if the input expression is null or missing. */ @@ -518,13 +566,12 @@ std::unique_ptr makeLoopJoinForFetch(std::unique_ptr fields, sbe::value::SlotVector fieldSlots, - sbe::value::SlotId seekKeySlot, + sbe::value::SlotId seekRecordIdSlot, sbe::value::SlotId snapshotIdSlot, - sbe::value::SlotId indexIdSlot, + sbe::value::SlotId indexIdentSlot, sbe::value::SlotId indexKeySlot, sbe::value::SlotId indexKeyPatternSlot, const CollectionPtr& collToFetch, - StringMap iamMap, PlanNodeId planNodeId, sbe::value::SlotVector slotsToForward); @@ -557,15 +604,14 @@ std::pair> makeIndexKeyIncl return {std::move(indexKeyBitset), std::move(keyFieldNames)}; } -struct PlanStageData; - /** * Common parameters to SBE stage builder functions extracted into separate class to simplify * argument passing. Also contains a mapping of global variable ids to slot ids. */ struct StageBuilderState { StageBuilderState(OperationContext* opCtx, - PlanStageData* data, + PlanStageEnvironment& env, + PlanStageStaticData* data, const Variables& variables, sbe::value::SlotIdGenerator* slotIdGenerator, sbe::value::FrameIdGenerator* frameIdGenerator, @@ -576,6 +622,7 @@ struct StageBuilderState { frameIdGenerator{frameIdGenerator}, spoolIdGenerator{spoolIdGenerator}, opCtx{opCtx}, + env{env}, data{data}, variables{variables}, needsMerge{needsMerge}, @@ -610,7 +657,8 @@ struct StageBuilderState { sbe::value::SpoolIdGenerator* const spoolIdGenerator; OperationContext* const opCtx; - PlanStageData* const data; + PlanStageEnvironment& env; + PlanStageStaticData* const data; const Variables& variables; // When the mongos splits $group stage and sends it to shards, it adds 'needsMerge'/'fromMongs' @@ -1166,4 +1214,27 @@ inline std::vector appendVectorUnique(std::vector lhs, std::vector rhs) return lhs; } +inline std::pair, std::unique_ptr> +makeKeyStringPair(const BSONObj& lowKey, + bool lowKeyInclusive, + const BSONObj& highKey, + bool highKeyInclusive, + key_string::Version version, + Ordering ordering, + bool forward) { + // Note that 'makeKeyFromBSONKeyForSeek()' is intended to compute the "start" key for an + // index scan. The logic for computing a "discriminator" for an "end" key is reversed, which + // is why we use 'makeKeyStringFromBSONKey()' to manually specify the discriminator for the + // end key. + return { + std::make_unique(IndexEntryComparison::makeKeyStringFromBSONKeyForSeek( + lowKey, version, ordering, forward, lowKeyInclusive)), + std::make_unique(IndexEntryComparison::makeKeyStringFromBSONKey( + highKey, + version, + ordering, + forward != highKeyInclusive ? key_string::Discriminator::kExclusiveBefore + : key_string::Discriminator::kExclusiveAfter))}; +} + } // namespace mongo::stage_builder diff --git a/src/mongo/db/query/sbe_stage_builder_index_scan.cpp b/src/mongo/db/query/sbe_stage_builder_index_scan.cpp index 1434480fc2981..40820e81101ae 100644 --- a/src/mongo/db/query/sbe_stage_builder_index_scan.cpp +++ b/src/mongo/db/query/sbe_stage_builder_index_scan.cpp @@ -28,34 +28,62 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/sbe_stage_builder_index_scan.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" #include "mongo/db/exec/sbe/stages/branch.h" -#include "mongo/db/exec/sbe/stages/co_scan.h" #include "mongo/db/exec/sbe/stages/filter.h" -#include "mongo/db/exec/sbe/stages/hash_agg.h" #include "mongo/db/exec/sbe/stages/ix_scan.h" -#include "mongo/db/exec/sbe/stages/limit_skip.h" #include "mongo/db/exec/sbe/stages/loop_join.h" -#include "mongo/db/exec/sbe/stages/makeobj.h" #include "mongo/db/exec/sbe/stages/project.h" -#include "mongo/db/exec/sbe/stages/spool.h" -#include "mongo/db/exec/sbe/stages/union.h" #include "mongo/db/exec/sbe/stages/unique.h" #include "mongo/db/exec/sbe/stages/unwind.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/match_expression_dependencies.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/interval.h" +#include "mongo/db/query/interval_evaluation_tree.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/query/sbe_stage_builder_eval_frame.h" #include "mongo/db/query/sbe_stage_builder_filter.h" -#include "mongo/db/query/util/make_data_structure.h" +#include "mongo/db/query/sbe_stage_builder_index_scan.h" +#include "mongo/db/storage/sorted_data_interface.h" #include "mongo/logv2/log.h" -#include "mongo/util/overloaded_visitor.h" -#include +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/id_generator.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -203,6 +231,7 @@ PlanStageSlots buildPlanStageSlots(StageBuilderState& state, const BSONObj& keyPattern, sbe::value::SlotId recordIdSlot, boost::optional snapshotIdSlot, + boost::optional indexIdentSlot, boost::optional indexKeySlot) { PlanStageSlots outputs; @@ -213,25 +242,16 @@ PlanStageSlots buildPlanStageSlots(StageBuilderState& state, outputs.set(PlanStageSlots::kSnapshotId, *snapshotIdSlot); } + if (reqs.has(PlanStageSlots::kIndexIdent)) { + tassert(7566702, "Expected 'indexIdentSlot' to be set", indexIdentSlot.has_value()); + outputs.set(PlanStageSlots::kIndexIdent, *indexIdentSlot); + } + if (reqs.has(PlanStageSlots::kIndexKey)) { tassert(7104001, "Expected 'indexKeySlot' to be set", indexKeySlot.has_value()); outputs.set(PlanStageSlots::kIndexKey, *indexKeySlot); } - if (reqs.has(PlanStageSlots::kIndexId)) { - auto it = state.stringConstantToSlotMap.find(indexName); - - if (it != state.stringConstantToSlotMap.end()) { - outputs.set(PlanStageSlots::kIndexId, it->second); - } else { - auto [indexNameTag, indexNameVal] = sbe::value::makeNewString(indexName); - auto slot = state.data->env->registerSlot( - indexNameTag, indexNameVal, true, state.slotIdGenerator); - state.stringConstantToSlotMap[indexName] = slot; - outputs.set(PlanStageSlots::kIndexId, slot); - } - } - if (reqs.has(PlanStageSlots::kIndexKeyPattern)) { auto it = state.keyPatternToSlotMap.find(keyPattern); @@ -242,7 +262,7 @@ PlanStageSlots buildPlanStageSlots(StageBuilderState& state, sbe::value::copyValue(sbe::value::TypeTags::bsonObject, sbe::value::bitcastFrom(keyPattern.objdata())); auto slot = - state.data->env->registerSlot(bsonObjTag, bsonObjVal, true, state.slotIdGenerator); + state.env->registerSlot(bsonObjTag, bsonObjVal, true, state.slotIdGenerator); state.keyPatternToSlotMap[keyPattern] = slot; outputs.set(PlanStageSlots::kIndexKeyPattern, slot); } @@ -294,9 +314,9 @@ generateOptimizedMultiIntervalIndexScan(StageBuilderState& state, auto boundsSlot = [&] { if (intervals) { auto [boundsTag, boundsVal] = packIndexIntervalsInSbeArray(std::move(*intervals)); - return state.data->env->registerSlot(boundsTag, boundsVal, true, state.slotIdGenerator); + return state.env->registerSlot(boundsTag, boundsVal, true, state.slotIdGenerator); } else { - return state.data->env->registerSlot( + return state.env->registerSlot( sbe::value::TypeTags::Nothing, 0, true, state.slotIdGenerator); } }(); @@ -326,6 +346,9 @@ generateOptimizedMultiIntervalIndexScan(StageBuilderState& state, auto snapshotIdSlot = reqs.has(PlanStageSlots::kSnapshotId) ? boost::make_optional(slotIdGenerator->generate()) : boost::none; + auto indexIdentSlot = reqs.has(PlanStageSlots::kIndexIdent) + ? boost::make_optional(slotIdGenerator->generate()) + : boost::none; auto indexKeySlot = reqs.has(PlanStageSlots::kIndexKey) ? boost::make_optional(slotIdGenerator->generate()) : boost::none; @@ -336,6 +359,7 @@ generateOptimizedMultiIntervalIndexScan(StageBuilderState& state, indexKeySlot, recordIdSlot, snapshotIdSlot, + indexIdentSlot, indexKeysToInclude, std::move(indexKeySlots), makeVariable(lowKeySlot), @@ -343,8 +367,14 @@ generateOptimizedMultiIntervalIndexScan(StageBuilderState& state, yieldPolicy, planNodeId); - auto outputs = buildPlanStageSlots( - state, reqs, indexName, keyPattern, recordIdSlot, snapshotIdSlot, indexKeySlot); + auto outputs = buildPlanStageSlots(state, + reqs, + indexName, + keyPattern, + recordIdSlot, + snapshotIdSlot, + indexIdentSlot, + indexKeySlot); // Finally, get the keys from the outer side and feed them to the inner side (ixscan). return {sbe::makeS(std::move(project), @@ -375,7 +405,7 @@ generateGenericMultiIntervalIndexScan(StageBuilderState& state, const std::string& indexName, const IndexScanNode* ixn, const BSONObj& keyPattern, - KeyString::Version version, + key_string::Version version, Ordering ordering, sbe::IndexKeysInclusionSet indexKeysToInclude, sbe::value::SlotVector indexKeySlots, @@ -388,7 +418,7 @@ generateGenericMultiIntervalIndexScan(StageBuilderState& state, std::unique_ptr boundsExpr; if (hasDynamicIndexBounds) { - boundsSlot.emplace(state.data->env->registerSlot( + boundsSlot.emplace(state.env->registerSlot( sbe::value::TypeTags::Nothing, 0, true /* owned */, state.slotIdGenerator)); boundsExpr = makeVariable(*boundsSlot); } else { @@ -402,6 +432,9 @@ generateGenericMultiIntervalIndexScan(StageBuilderState& state, auto snapshotIdSlot = reqs.has(PlanStageSlots::kSnapshotId) ? boost::make_optional(state.slotIdGenerator->generate()) : boost::none; + auto indexIdentSlot = reqs.has(PlanStageSlots::kIndexIdent) + ? boost::make_optional(state.slotIdGenerator->generate()) + : boost::none; auto indexKeySlot = reqs.has(PlanStageSlots::kIndexKey) ? boost::make_optional(state.slotIdGenerator->generate()) : boost::none; @@ -413,13 +446,20 @@ generateGenericMultiIntervalIndexScan(StageBuilderState& state, indexKeySlot, recordIdSlot, snapshotIdSlot, + indexIdentSlot, indexKeysToInclude, indexKeySlots, yieldPolicy, ixn->nodeId()); - auto outputs = buildPlanStageSlots( - state, reqs, indexName, keyPattern, recordIdSlot, snapshotIdSlot, indexKeySlot); + auto outputs = buildPlanStageSlots(state, + reqs, + indexName, + keyPattern, + recordIdSlot, + snapshotIdSlot, + indexIdentSlot, + indexKeySlot); return {std::move(stage), std::move(outputs), boundsSlot}; } @@ -489,15 +529,6 @@ bool canGenerateSingleIntervalIndexScan(const std::vector, PlanStageSlots, boost::optional>> @@ -506,8 +537,8 @@ generateSingleIntervalIndexScan(StageBuilderState& state, const std::string& indexName, const BSONObj& keyPattern, bool forward, - std::unique_ptr lowKey, - std::unique_ptr highKey, + std::unique_ptr lowKey, + std::unique_ptr highKey, sbe::IndexKeysInclusionSet indexKeysToInclude, sbe::value::SlotVector indexKeySlots, const PlanStageReqs& reqs, @@ -521,10 +552,10 @@ generateSingleIntervalIndexScan(StageBuilderState& state, (lowKey && highKey) || (!lowKey && !highKey)); const bool shouldRegisterLowHighKeyInRuntimeEnv = !lowKey; - auto lowKeySlot = !lowKey ? boost::make_optional(state.data->env->registerSlot( + auto lowKeySlot = !lowKey ? boost::make_optional(state.env->registerSlot( sbe::value::TypeTags::Nothing, 0, true, slotIdGenerator)) : boost::none; - auto highKeySlot = !highKey ? boost::make_optional(state.data->env->registerSlot( + auto highKeySlot = !highKey ? boost::make_optional(state.env->registerSlot( sbe::value::TypeTags::Nothing, 0, true, slotIdGenerator)) : boost::none; @@ -536,6 +567,9 @@ generateSingleIntervalIndexScan(StageBuilderState& state, auto snapshotIdSlot = reqs.has(PlanStageSlots::kSnapshotId) ? boost::make_optional(slotIdGenerator->generate()) : boost::none; + auto indexIdentSlot = reqs.has(PlanStageSlots::kIndexIdent) + ? boost::make_optional(slotIdGenerator->generate()) + : boost::none; auto indexKeySlot = reqs.has(PlanStageSlots::kIndexKey) ? boost::make_optional(slotIdGenerator->generate()) : boost::none; @@ -549,6 +583,7 @@ generateSingleIntervalIndexScan(StageBuilderState& state, indexKeySlot, recordIdSlot, snapshotIdSlot, + indexIdentSlot, indexKeysToInclude, std::move(indexKeySlots), lowKeyExpr->clone(), @@ -557,8 +592,14 @@ generateSingleIntervalIndexScan(StageBuilderState& state, planNodeId, lowPriority); - auto outputs = buildPlanStageSlots( - state, reqs, indexName, keyPattern, recordIdSlot, snapshotIdSlot, indexKeySlot); + auto outputs = buildPlanStageSlots(state, + reqs, + indexName, + keyPattern, + recordIdSlot, + snapshotIdSlot, + indexIdentSlot, + indexKeySlot); // If low and high keys are provided in the runtime environment, then we need to create // a cfilter stage on top of project in order to be sure that the single interval @@ -587,7 +628,7 @@ std::pair, PlanStageSlots> generateIndexScan( const sbe::IndexKeysInclusionSet& originalFieldBitset, const sbe::IndexKeysInclusionSet& sortKeyBitset, PlanYieldPolicy* yieldPolicy, - StringMap* iamMap, + bool doIndexConsistencyCheck, bool needsCorruptionCheck) { auto indexName = ixn->index.identifier.catalogName; auto descriptor = collection->getIndexCatalog()->findIndexByName(state.opCtx, indexName); @@ -605,13 +646,7 @@ std::pair, PlanStageSlots> generateIndexScan( accessMethod->getSortedDataInterface()->getKeyStringVersion(), accessMethod->getSortedDataInterface()->getOrdering()); - auto keyPattern = descriptor->keyPattern(); - - // Add the access method corresponding to 'indexName' to the 'iamMap' if a parent stage needs to - // execute a consistency check. - if (iamMap) { - iamMap->insert({indexName, accessMethod}); - } + auto keyPattern = ixn->index.keyPattern; // Determine the set of fields from the index required to apply the filter and union those with // the set of fields from the index required by the parent stage. @@ -633,9 +668,9 @@ std::pair, PlanStageSlots> generateIndexScan( // requested. PlanStageReqs reqs; reqs.set(PlanStageSlots::kRecordId) - .setIf(PlanStageSlots::kSnapshotId, iamMap) - .setIf(PlanStageSlots::kIndexId, iamMap) - .setIf(PlanStageSlots::kIndexKey, iamMap) + .setIf(PlanStageSlots::kSnapshotId, doIndexConsistencyCheck) + .setIf(PlanStageSlots::kIndexIdent, doIndexConsistencyCheck) + .setIf(PlanStageSlots::kIndexKey, doIndexConsistencyCheck) .setIf(PlanStageSlots::kIndexKeyPattern, needsCorruptionCheck); PlanStageSlots outputs; @@ -727,7 +762,7 @@ std::pair, PlanStageSlots> generateIndexScan( IndexIntervals makeIntervalsFromIndexBounds(const IndexBounds& bounds, bool forward, - KeyString::Version version, + key_string::Version version, Ordering ordering) { auto lowKeyInclusive{IndexBounds::isStartIncludedInBound(bounds.boundInclusion)}; auto highKeyInclusive{IndexBounds::isEndIncludedInBound(bounds.boundInclusion)}; @@ -756,20 +791,8 @@ IndexIntervals makeIntervalsFromIndexBounds(const IndexBounds& bounds, "Generated interval [lowKey, highKey]", "lowKey"_attr = lowKey, "highKey"_attr = highKey); - // Note that 'makeKeyFromBSONKeyForSeek()' is intended to compute the "start" key for an - // index scan. The logic for computing a "discriminator" for an "end" key is reversed, which - // is why we use 'makeKeyStringFromBSONKey()' to manually specify the discriminator for the - // end key. - result.push_back( - {std::make_unique( - IndexEntryComparison::makeKeyStringFromBSONKeyForSeek( - lowKey, version, ordering, forward, lowKeyInclusive)), - std::make_unique(IndexEntryComparison::makeKeyStringFromBSONKey( - highKey, - version, - ordering, - forward != highKeyInclusive ? KeyString::Discriminator::kExclusiveBefore - : KeyString::Discriminator::kExclusiveAfter))}); + result.push_back(makeKeyStringPair( + lowKey, lowKeyInclusive, highKey, highKeyInclusive, version, ordering, forward)); } return result; } @@ -787,10 +810,10 @@ std::pair packIndexIntervalsInSbeArray( obj->reserve(2); obj->push_back("l"_sd, sbe::value::TypeTags::ksValue, - sbe::value::bitcastFrom(lowKey.release())); + sbe::value::bitcastFrom(lowKey.release())); obj->push_back("h"_sd, sbe::value::TypeTags::ksValue, - sbe::value::bitcastFrom(highKey.release())); + sbe::value::bitcastFrom(highKey.release())); guard.reset(); arr->push_back(tag, val); } @@ -805,7 +828,7 @@ std::pair, PlanStageSlots> generateIndexScanWith const sbe::IndexKeysInclusionSet& originalFieldBitset, const sbe::IndexKeysInclusionSet& sortKeyBitset, PlanYieldPolicy* yieldPolicy, - StringMap* iamMap, + bool doIndexConsistencyCheck, bool needsCorruptionCheck) { const bool forward = ixn->direction == 1; auto indexName = ixn->index.identifier.catalogName; @@ -822,13 +845,7 @@ std::pair, PlanStageSlots> generateIndexScanWith sbe::value::SlotId recordIdSlot; ParameterizedIndexScanSlots parameterizedScanSlots; - auto keyPattern = descriptor->keyPattern(); - - // Add the access method corresponding to 'indexName' to the 'iamMap' if a parent stage needs to - // execute a consistency check. - if (iamMap) { - iamMap->insert({indexName, accessMethod}); - } + auto keyPattern = ixn->index.keyPattern; // Determine the set of fields from the index required to apply the filter and union those with // the set of fields from the index required by the parent stage. @@ -850,9 +867,9 @@ std::pair, PlanStageSlots> generateIndexScanWith // requested. PlanStageReqs reqs; reqs.set(PlanStageSlots::kRecordId) - .setIf(PlanStageSlots::kSnapshotId, iamMap) - .setIf(PlanStageSlots::kIndexId, iamMap) - .setIf(PlanStageSlots::kIndexKey, iamMap) + .setIf(PlanStageSlots::kSnapshotId, doIndexConsistencyCheck) + .setIf(PlanStageSlots::kIndexIdent, doIndexConsistencyCheck) + .setIf(PlanStageSlots::kIndexKey, doIndexConsistencyCheck) .setIf(PlanStageSlots::kIndexKeyPattern, needsCorruptionCheck); PlanStageSlots outputs; @@ -937,9 +954,9 @@ std::pair, PlanStageSlots> generateIndexScanWith mergeThenElseBranches(PlanStageSlots::kRecordId); recordIdSlot = outputs.get(PlanStageSlots::kRecordId); - if (iamMap) { + if (doIndexConsistencyCheck) { mergeThenElseBranches(PlanStageSlots::kSnapshotId); - mergeThenElseBranches(PlanStageSlots::kIndexId); + mergeThenElseBranches(PlanStageSlots::kIndexIdent); mergeThenElseBranches(PlanStageSlots::kIndexKey); } @@ -949,7 +966,7 @@ std::pair, PlanStageSlots> generateIndexScanWith // Generate a branch stage that will either execute an optimized or a generic index scan // based on the condition in the slot 'isGenericScanSlot'. - auto isGenericScanSlot = state.data->env->registerSlot( + auto isGenericScanSlot = state.env->registerSlot( sbe::value::TypeTags::Nothing, 0, true /* owned */, state.slotIdGenerator); auto isGenericScanCondition = makeVariable(isGenericScanSlot); stage = sbe::makeS(std::move(genericStage), diff --git a/src/mongo/db/query/sbe_stage_builder_index_scan.h b/src/mongo/db/query/sbe_stage_builder_index_scan.h index 67e3b0158170a..3e7cd985d8979 100644 --- a/src/mongo/db/query/sbe_stage_builder_index_scan.h +++ b/src/mongo/db/query/sbe_stage_builder_index_scan.h @@ -29,12 +29,27 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/stages/collection_helpers.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_solution.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/storage/key_string.h" namespace mongo::stage_builder { @@ -45,7 +60,7 @@ class PlanStageSlots; * A list of low and high key values representing ranges over a particular index. */ using IndexIntervals = - std::vector, std::unique_ptr>>; + std::vector, std::unique_ptr>>; /** * This method returns a pair containing: (1) an SBE plan stage tree implementing an index scan; @@ -60,9 +75,35 @@ std::pair, PlanStageSlots> generateIndexScan( const sbe::IndexKeysInclusionSet& fieldBitset, const sbe::IndexKeysInclusionSet& sortKeyBitset, PlanYieldPolicy* yieldPolicy, - StringMap* iamMap, + bool doIndexConsistencyCheck, bool needsCorruptionCheck); +/** + * Constructs the most simple version of an index scan from the single interval index bounds. + * + * In case when the 'lowKey' and 'highKey' are not specified, slots will be registered for them in + * the runtime environment and their slot ids returned as a pair in the third element of the tuple. + * + * If 'indexKeySlot' is provided, than the corresponding slot will be filled out with each KeyString + * in the index. + */ +std::tuple, + PlanStageSlots, + boost::optional>> +generateSingleIntervalIndexScan(StageBuilderState& state, + const CollectionPtr& collection, + const std::string& indexName, + const BSONObj& keyPattern, + bool forward, + std::unique_ptr lowKey, + std::unique_ptr highKey, + sbe::IndexKeysInclusionSet indexKeysToInclude, + sbe::value::SlotVector indexKeySlots, + const PlanStageReqs& reqs, + PlanYieldPolicy* yieldPolicy, + PlanNodeId planNodeId, + bool lowPriority); + /** * Constructs low/high key values from the given index 'bounds' if they can be represented either as * a single interval between the low and high keys, or multiple single intervals. If index bounds @@ -70,7 +111,7 @@ std::pair, PlanStageSlots> generateIndexScan( */ IndexIntervals makeIntervalsFromIndexBounds(const IndexBounds& bounds, bool forward, - KeyString::Version version, + key_string::Version version, Ordering ordering); /** @@ -98,6 +139,6 @@ std::pair, PlanStageSlots> generateIndexScanWith const sbe::IndexKeysInclusionSet& fieldBitset, const sbe::IndexKeysInclusionSet& sortKeyBitset, PlanYieldPolicy* yieldPolicy, - StringMap* iamMap, + bool doIndexConsistencyCheck, bool needsCorruptionCheck); } // namespace mongo::stage_builder diff --git a/src/mongo/db/query/sbe_stage_builder_lookup.cpp b/src/mongo/db/query/sbe_stage_builder_lookup.cpp index 77ca217ef615f..39150063254a7 100644 --- a/src/mongo/db/query/sbe_stage_builder_lookup.cpp +++ b/src/mongo/db/query/sbe_stage_builder_lookup.cpp @@ -28,34 +28,68 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/sbe_stage_builder.h" - -#include - +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/curop.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" #include "mongo/db/exec/sbe/stages/branch.h" +#include "mongo/db/exec/sbe/stages/filter.h" #include "mongo/db/exec/sbe/stages/hash_agg.h" #include "mongo/db/exec/sbe/stages/hash_lookup.h" #include "mongo/db/exec/sbe/stages/ix_scan.h" -#include "mongo/db/exec/sbe/stages/limit_skip.h" #include "mongo/db/exec/sbe/stages/loop_join.h" +#include "mongo/db/exec/sbe/stages/makeobj.h" +#include "mongo/db/exec/sbe/stages/project.h" #include "mongo/db/exec/sbe/stages/scan.h" +#include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/exec/sbe/stages/union.h" #include "mongo/db/exec/sbe/stages/unique.h" #include "mongo/db/exec/sbe/stages/unwind.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/sbe/vm/vm.h" +#include "mongo/db/field_ref.h" #include "mongo/db/index/index_access_method.h" -#include "mongo/db/query/sbe_stage_builder_coll_scan.h" -#include "mongo/db/query/sbe_stage_builder_expression.h" -#include "mongo/db/query/sbe_stage_builder_filter.h" +#include "mongo/db/index_names.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/query/bson_typemask.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/multiple_collection_accessor.h" +#include "mongo/db/query/plan_yield_policy_sbe.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/query_solution.h" +#include "mongo/db/query/sbe_stage_builder.h" +#include "mongo/db/query/sbe_stage_builder_eval_frame.h" #include "mongo/db/query/sbe_stage_builder_helpers.h" -#include "mongo/db/query/sbe_stage_builder_index_scan.h" -#include "mongo/db/query/sbe_stage_builder_projection.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/query/util/make_data_structure.h" -#include "mongo/logv2/log.h" - -#include "mongo/db/query/sbe_stage_builder_filter.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -703,13 +737,11 @@ std::pair> buildIndexJoinLookupStage( const FieldPath& foreignFieldName, const CollectionPtr& foreignColl, const IndexEntry& index, - StringMap& iamMap, PlanYieldPolicySBE* yieldPolicy, boost::optional collatorSlot, const PlanNodeId nodeId, SlotIdGenerator& slotIdGenerator, - FrameIdGenerator& frameIdGenerator, - RuntimeEnvironment* env) { + FrameIdGenerator& frameIdGenerator) { CurOp::get(state.opCtx)->debug().indexedLoopJoin += 1; const auto foreignCollUUID = foreignColl->uuid(); @@ -724,7 +756,6 @@ std::pair> buildIndexJoinLookupStage( foreignColl->getIndexCatalog()->getEntry(indexDescriptor)->accessMethod()->asSortedData(); const auto indexVersion = indexAccessMethod->getSortedDataInterface()->getKeyStringVersion(); const auto indexOrdering = indexAccessMethod->getSortedDataInterface()->getOrdering(); - iamMap.insert({indexName, indexAccessMethod}); // Build the outer branch that produces the correlated local key slot. auto [localKeysSetSlot, localKeysSetStage] = buildKeySet(JoinSide::Local, @@ -836,16 +867,15 @@ std::pair> buildIndexJoinLookupStage( // Calculate the low key and high key of each individual local field. They are stored in // 'lowKeySlot' and 'highKeySlot', respectively. These two slots will be made available in - // the loop join stage to perform index seek. We also set 'indexIdSlot' and - // 'indexKeyPatternSlot' constants for the seek stage later to perform consistency check. + // the loop join stage to perform index seek. We also set the 'indexKeyPatternSlot' constant + // for the seek stage later to perform consistency check. auto lowKeySlot = slotIdGenerator.generate(); auto highKeySlot = slotIdGenerator.generate(); - auto indexIdSlot = slotIdGenerator.generate(); auto indexKeyPatternSlot = slotIdGenerator.generate(); auto [_, indexKeyPatternValue] = copyValue(TypeTags::bsonObject, bitcastFrom(index.keyPattern.objdata())); - auto makeNewKeyStringCall = [&](KeyString::Discriminator discriminator) { + auto makeNewKeyStringCall = [&](key_string::Discriminator discriminator) { StringData functionName = "ks"; EExpression::Vector args; args.emplace_back( @@ -864,11 +894,9 @@ std::pair> buildIndexJoinLookupStage( makeProjectStage(std::move(valueGeneratorStage), nodeId, lowKeySlot, - makeNewKeyStringCall(KeyString::Discriminator::kExclusiveBefore), + makeNewKeyStringCall(key_string::Discriminator::kExclusiveBefore), highKeySlot, - makeNewKeyStringCall(KeyString::Discriminator::kExclusiveAfter), - indexIdSlot, - makeConstant(indexName), + makeNewKeyStringCall(key_string::Discriminator::kExclusiveAfter), indexKeyPatternSlot, makeConstant(value::TypeTags::bsonObject, indexKeyPatternValue)); @@ -889,12 +917,14 @@ std::pair> buildIndexJoinLookupStage( auto foreignRecordIdSlot = slotIdGenerator.generate(); auto indexKeySlot = slotIdGenerator.generate(); auto snapshotIdSlot = slotIdGenerator.generate(); + auto indexIdentSlot = slotIdGenerator.generate(); auto ixScanStage = makeS(foreignCollUUID, indexName, true /* forward */, indexKeySlot, foreignRecordIdSlot, snapshotIdSlot, + indexIdentSlot, IndexKeysInclusionSet{} /* indexKeysToInclude */, makeSV() /* vars */, makeVariable(lowKeySlot), @@ -907,7 +937,7 @@ std::pair> buildIndexJoinLookupStage( auto ixScanNljStage = makeS(std::move(indexBoundKeyStage), std::move(ixScanStage), - makeSV(indexIdSlot, indexKeyPatternSlot) /* outerProjects */, + makeSV(indexKeyPatternSlot) /* outerProjects */, makeSV(lowKeySlot, highKeySlot) /* outerCorrelated */, nullptr /* predicate */, nodeId); @@ -926,9 +956,8 @@ std::pair> buildIndexJoinLookupStage( // Loop join the foreign record id produced by the index seek on the outer side with seek // stage on the inner side to get matched foreign documents. The foreign documents are - // stored in 'foreignRecordSlot'. We also pass in 'snapshotIdSlot', 'indexIdSlot', - // 'indexKeySlot' and 'indexKeyPatternSlot' to perform index consistency check during the - // seek. + // stored in 'foreignRecordSlot'. We also pass in 'snapshotIdSlot', 'indexIdentSlot', + // 'indexKeySlot' and 'indexKeyPatternSlot' to perform index consistency check during the seek. auto foreignRecordSlot = slotIdGenerator.generate(); auto scanNljStage = makeLoopJoinForFetch(std::move(ixScanNljStage), foreignRecordSlot, @@ -937,11 +966,10 @@ std::pair> buildIndexJoinLookupStage( makeSV(), foreignRecordIdSlot, snapshotIdSlot, - indexIdSlot, + indexIdentSlot, indexKeySlot, indexKeyPatternSlot, foreignColl, - iamMap, nodeId, makeSV() /* slotsToForward */); @@ -1169,7 +1197,7 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder const auto& foreignColl = _collections.lookupCollection(NamespaceString(eqLookupNode->foreignCollection)); - boost::optional collatorSlot = _state.data->env->getSlotIfExists("collator"_sd); + boost::optional collatorSlot = _state.env->getSlotIfExists("collator"_sd); switch (eqLookupNode->lookupStrategy) { // When foreign collection doesn't exist, we create stages that simply append empty // arrays to each local document and do not consider the case that foreign collection @@ -1193,13 +1221,11 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder eqLookupNode->joinFieldForeign, foreignColl, *eqLookupNode->idxEntry, - _data.iamMap, _yieldPolicy, collatorSlot, eqLookupNode->nodeId(), _slotIdGenerator, - _frameIdGenerator, - _data.env); + _frameIdGenerator); } case EqLookupNode::LookupStrategy::kNestedLoopJoin: case EqLookupNode::LookupStrategy::kHashJoin: { @@ -1209,21 +1235,24 @@ std::pair, PlanStageSlots> SlotBasedStageBuilder auto foreignResultSlot = _slotIdGenerator.generate(); auto foreignRecordIdSlot = _slotIdGenerator.generate(); - auto foreignStage = makeS(foreignColl->uuid(), - foreignResultSlot, - foreignRecordIdSlot, - boost::none /* snapshotIdSlot */, - boost::none /* indexIdSlot */, - boost::none /* indexKeySlot */, - boost::none /* indexKeyPatternSlot */, - boost::none /* tsSlot */, - std::vector{} /* fields */, - makeSV() /* vars */, - boost::none /* seekKeySlot */, - true /* forward */, - _yieldPolicy, - eqLookupNode->nodeId(), - ScanCallbacks{}); + auto foreignStage = + makeS(foreignColl->uuid(), + foreignResultSlot, + foreignRecordIdSlot, + boost::none /* snapshotIdSlot */, + boost::none /* indexIdentSlot */, + boost::none /* indexKeySlot */, + boost::none /* indexKeyPatternSlot */, + boost::none /* oplogTsSlot */, + std::vector{} /* scanFieldNames */, + makeSV() /* scanFieldSlots */, + boost::none /* seekRecordIdSlot */, + boost::none /* minRecordIdSlot */, + boost::none /* maxRecordIdSlot */, + true /* forward */, + _yieldPolicy, + eqLookupNode->nodeId(), + ScanCallbacks{}); return buildLookupStage(_state, eqLookupNode->lookupStrategy, diff --git a/src/mongo/db/query/sbe_stage_builder_lookup_test.cpp b/src/mongo/db/query/sbe_stage_builder_lookup_test.cpp index d6ce29790d128..6198628a4d789 100644 --- a/src/mongo/db/query/sbe_stage_builder_lookup_test.cpp +++ b/src/mongo/db/query/sbe_stage_builder_lookup_test.cpp @@ -31,18 +31,45 @@ * This file contains tests for building execution stages that implement $lookup operator. */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/exec/sbe/sbe_plan_stage_test.h" -#include "mongo/db/exec/sbe/stages/loop_join.h" -#include "mongo/db/pipeline/document_source_lookup.h" -#include "mongo/db/pipeline/expression_context.h" -#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/sbe/expressions/compile_ctx.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/util/debug_print.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/query_solution.h" -#include "mongo/db/query/query_test_service_context.h" +#include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/query/sbe_stage_builder_test_fixture.h" -#include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/repl/storage_interface_impl.h" -#include "mongo/util/assert_util.h" +#include "mongo/db/query/shard_filterer_factory_interface.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/uuid.h" namespace mongo::sbe { namespace { @@ -104,7 +131,7 @@ class LookupStageBuilderTest : public SbeStageBuilderTestFixture { const std::string& asKey) { // Documents from the local collection are provided using collection scan. auto localScanNode = std::make_unique(); - localScanNode->name = _nss.toString(); + localScanNode->nss = _nss; // Construct logical query solution. auto lookupNode = std::make_unique(std::move(localScanNode), @@ -132,7 +159,7 @@ class LookupStageBuilderTest : public SbeStageBuilderTestFixture { auto ctx = makeCompileCtx(); prepareTree(ctx.get(), stage.get()); - auto resultSlot = data.outputs.get(stage_builder::PlanStageSlots::kResult); + auto resultSlot = data.staticData->outputs.get(stage_builder::PlanStageSlots::kResult); SlotAccessor* resultSlotAccessor = stage->getAccessor(*ctx, resultSlot); return CompiledTree{std::move(stage), std::move(data), std::move(ctx), resultSlotAccessor}; @@ -150,8 +177,11 @@ class LookupStageBuilderTest : public SbeStageBuilderTestFixture { << std::endl; } - AutoGetCollection localColl(operationContext(), _nss, LockMode::MODE_IS); - AutoGetCollection foreignColl(operationContext(), _foreignNss, LockMode::MODE_IS); + AutoGetCollection localColl( + operationContext(), + _nss, + LockMode::MODE_IS, + AutoGetCollection::Options{}.secondaryNssOrUUIDs({_foreignNss})); MultipleCollectionAccessor colls(operationContext(), &localColl.getCollection(), diff --git a/src/mongo/db/query/sbe_stage_builder_projection.cpp b/src/mongo/db/query/sbe_stage_builder_projection.cpp index 66e122af08540..2254eab8f7f29 100644 --- a/src/mongo/db/query/sbe_stage_builder_projection.cpp +++ b/src/mongo/db/query/sbe_stage_builder_projection.cpp @@ -27,30 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/sbe_stage_builder_projection.h" - -#include "mongo/base/exact_cast.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/makeobj_enums.h" #include "mongo/db/exec/sbe/makeobj_spec.h" -#include "mongo/db/exec/sbe/stages/branch.h" -#include "mongo/db/exec/sbe/stages/co_scan.h" -#include "mongo/db/exec/sbe/stages/filter.h" -#include "mongo/db/exec/sbe/stages/limit_skip.h" -#include "mongo/db/exec/sbe/stages/loop_join.h" -#include "mongo/db/exec/sbe/stages/makeobj.h" -#include "mongo/db/exec/sbe/stages/project.h" -#include "mongo/db/exec/sbe/stages/traverse.h" -#include "mongo/db/exec/sbe/stages/union.h" -#include "mongo/db/exec/sbe/values/bson.h" -#include "mongo/db/matcher/expression_array.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/projection_ast.h" +#include "mongo/db/query/projection_ast_visitor.h" #include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/query/sbe_stage_builder_expression.h" -#include "mongo/db/query/sbe_stage_builder_filter.h" +#include "mongo/db/query/sbe_stage_builder_projection.h" #include "mongo/db/query/tree_walker.h" -#include "mongo/db/query/util/make_data_structure.h" -#include "mongo/util/overloaded_visitor.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo::stage_builder { namespace { @@ -87,7 +93,7 @@ struct ProjectionTraversalVisitorContext { return inputExpr.clone(); } std::unique_ptr getInputExpr() const { - return inputExpr.getExpr(state.slotVarMap, *state.data->env); + return inputExpr.getExpr(state.slotVarMap, *state.env); } EvalExpr extractInputEvalExpr() { diff --git a/src/mongo/db/query/sbe_stage_builder_projection.h b/src/mongo/db/query/sbe_stage_builder_projection.h index f2ee1646492b8..340745044e604 100644 --- a/src/mongo/db/query/sbe_stage_builder_projection.h +++ b/src/mongo/db/query/sbe_stage_builder_projection.h @@ -31,6 +31,7 @@ #include "mongo/db/exec/sbe/expressions/expression.h" #include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/projection.h" diff --git a/src/mongo/db/query/sbe_stage_builder_test.cpp b/src/mongo/db/query/sbe_stage_builder_test.cpp index bb18e0a9c2ec4..eaf4ca944958c 100644 --- a/src/mongo/db/query/sbe_stage_builder_test.cpp +++ b/src/mongo/db/query/sbe_stage_builder_test.cpp @@ -27,13 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/sbe/values/slot.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/exec/shard_filterer_mock.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/query/sbe_stage_builder_test_fixture.h" +#include "mongo/db/query/shard_filterer_factory_interface.h" #include "mongo/db/query/shard_filterer_factory_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { @@ -63,7 +77,7 @@ TEST_F(SbeStageBuilderTest, TestVirtualScan) { auto shardFiltererInterface = makeAlwaysPassShardFiltererInterface(); auto [resultSlots, stage, data, _] = buildPlanStage(std::move(querySolution), true, std::move(shardFiltererInterface)); - auto resultAccessors = prepareTree(&data.ctx, stage.get(), resultSlots); + auto resultAccessors = prepareTree(&data.env.ctx, stage.get(), resultSlots); int64_t index = 0; for (auto st = stage->getNext(); st == sbe::PlanState::ADVANCED; st = stage->getNext()) { @@ -103,7 +117,7 @@ TEST_F(SbeStageBuilderTest, TestLimitOneVirtualScan) { buildPlanStage(std::move(querySolution), true, std::move(shardFiltererInterface)); // Prepare the sbe::PlanStage for execution. - auto resultAccessors = prepareTree(&data.ctx, stage.get(), resultSlots); + auto resultAccessors = prepareTree(&data.env.ctx, stage.get(), resultSlots); int64_t index = 0; for (auto st = stage->getNext(); st == sbe::PlanState::ADVANCED; st = stage->getNext()) { @@ -139,7 +153,7 @@ TEST_F(SbeStageBuilderTest, VirtualCollScanWithoutRecordId) { buildPlanStage(std::move(querySolution), false, std::move(shardFiltererInterface)); // Prepare the sbe::PlanStage for execution. - auto resultAccessors = prepareTree(&data.ctx, stage.get(), resultSlots); + auto resultAccessors = prepareTree(&data.env.ctx, stage.get(), resultSlots); ASSERT_EQ(resultAccessors.size(), 1u); int64_t index = 0; @@ -169,7 +183,7 @@ TEST_F(SbeStageBuilderTest, VirtualIndexScan) { auto shardFiltererInterface = makeAlwaysPassShardFiltererInterface(); auto [resultSlots, stage, data, _] = buildPlanStage(std::move(querySolution), true, std::move(shardFiltererInterface)); - auto resultAccessors = prepareTree(&data.ctx, stage.get(), resultSlots); + auto resultAccessors = prepareTree(&data.env.ctx, stage.get(), resultSlots); ASSERT_EQ(resultAccessors.size(), 2u); int64_t index = 0; @@ -203,7 +217,7 @@ TEST_F(SbeStageBuilderTest, VirtualIndexScanWithoutRecordId) { auto shardFiltererInterface = makeAlwaysPassShardFiltererInterface(); auto [resultSlots, stage, data, _] = buildPlanStage(std::move(querySolution), false, std::move(shardFiltererInterface)); - auto resultAccessors = prepareTree(&data.ctx, stage.get(), resultSlots); + auto resultAccessors = prepareTree(&data.env.ctx, stage.get(), resultSlots); ASSERT_EQ(resultAccessors.size(), 1u); int64_t index = 0; diff --git a/src/mongo/db/query/sbe_stage_builder_test_fixture.cpp b/src/mongo/db/query/sbe_stage_builder_test_fixture.cpp index a90624f7431ea..5d3885708ebe7 100644 --- a/src/mongo/db/query/sbe_stage_builder_test_fixture.cpp +++ b/src/mongo/db/query/sbe_stage_builder_test_fixture.cpp @@ -27,15 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_mock.h" -#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/exec/sbe/expressions/expression.h" +#include "mongo/db/exec/sbe/expressions/runtime_environment.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/keypattern.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/query/mock_yield_policies.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/query/sbe_stage_builder_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { std::unique_ptr SbeStageBuilderTestFixture::makeQuerySolution( @@ -77,8 +89,7 @@ SbeStageBuilderTestFixture::buildPlanStage( stage_builder::SlotBasedStageBuilder builder{ operationContext(), colls, *statusWithCQ.getValue(), *querySolution, getYieldPolicy()}; - auto stage = builder.build(querySolution->root()); - auto data = builder.getPlanStageData(); + auto [stage, data] = builder.build(querySolution->root()); // Reset "shardFilterer". if (auto shardFiltererSlot = data.env->getSlotIfExists("shardFilterer"_sd); @@ -92,9 +103,9 @@ SbeStageBuilderTestFixture::buildPlanStage( auto slots = sbe::makeSV(); if (hasRecordId) { - slots.push_back(data.outputs.get(stage_builder::PlanStageSlots::kRecordId)); + slots.push_back(data.staticData->outputs.get(stage_builder::PlanStageSlots::kRecordId)); } - slots.push_back(data.outputs.get(stage_builder::PlanStageSlots::kResult)); + slots.push_back(data.staticData->outputs.get(stage_builder::PlanStageSlots::kResult)); // 'expCtx' owns the collator and a collator slot is registered into the runtime environment // while creating 'builder'. So, the caller should retain the 'expCtx' until the execution is diff --git a/src/mongo/db/query/sbe_stage_builder_test_fixture.h b/src/mongo/db/query/sbe_stage_builder_test_fixture.h index a19ec93d1e753..c509de84a0fee 100644 --- a/src/mongo/db/query/sbe_stage_builder_test_fixture.h +++ b/src/mongo/db/query/sbe_stage_builder_test_fixture.h @@ -29,12 +29,22 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/db/catalog/collection.h" #include "mongo/db/exec/sbe/sbe_plan_stage_test.h" #include "mongo/db/exec/sbe/stages/stages.h" #include "mongo/db/exec/sbe/values/slot.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/multiple_collection_accessor.h" #include "mongo/db/query/query_solution.h" +#include "mongo/db/query/sbe_stage_builder.h" #include "mongo/db/query/shard_filterer_factory_interface.h" #include "mongo/unittest/unittest.h" diff --git a/src/mongo/db/query/sbe_stage_builder_type_checker.cpp b/src/mongo/db/query/sbe_stage_builder_type_checker.cpp index aa8aa52e1ea6d..57e3df71c2e9b 100644 --- a/src/mongo/db/query/sbe_stage_builder_type_checker.cpp +++ b/src/mongo/db/query/sbe_stage_builder_type_checker.cpp @@ -29,9 +29,19 @@ #include "mongo/db/query/sbe_stage_builder_type_checker.h" -#include - +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include + +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" #include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/util/assert_util.h" namespace mongo::stage_builder { diff --git a/src/mongo/db/query/sbe_stage_builder_type_checker.h b/src/mongo/db/query/sbe_stage_builder_type_checker.h index 7a7948bb011fd..3b9927947227c 100644 --- a/src/mongo/db/query/sbe_stage_builder_type_checker.h +++ b/src/mongo/db/query/sbe_stage_builder_type_checker.h @@ -29,10 +29,16 @@ #pragma once +#include +#include #include +#include #include "mongo/db/exec/sbe/values/value.h" -#include "mongo/db/query/optimizer/node.h" +#include "mongo/db/query/optimizer/defs.h" +#include "mongo/db/query/optimizer/node.h" // IWYU pragma: keep +#include "mongo/db/query/optimizer/syntax/expr.h" +#include "mongo/db/query/optimizer/syntax/syntax.h" #include "mongo/stdx/unordered_map.h" namespace mongo::stage_builder { diff --git a/src/mongo/db/query/sbe_stage_builder_type_checker_test.cpp b/src/mongo/db/query/sbe_stage_builder_type_checker_test.cpp index ed26f84a5d075..391286f4d96af 100644 --- a/src/mongo/db/query/sbe_stage_builder_type_checker_test.cpp +++ b/src/mongo/db/query/sbe_stage_builder_type_checker_test.cpp @@ -27,9 +27,18 @@ * it in the license file. */ -#include "mongo/db/query/optimizer/utils/unit_test_utils.h" +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/optimizer/algebra/operator.h" +#include "mongo/db/query/optimizer/algebra/polyvalue.h" +#include "mongo/db/query/optimizer/comparison_op.h" #include "mongo/db/query/sbe_stage_builder_type_checker.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::stage_builder { namespace { diff --git a/src/mongo/db/query/sbe_sub_planner.cpp b/src/mongo/db/query/sbe_sub_planner.cpp index 4082e7bec398b..e1b79820b71d1 100644 --- a/src/mongo/db/query/sbe_sub_planner.cpp +++ b/src/mongo/db/query/sbe_sub_planner.cpp @@ -26,16 +26,27 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" +// IWYU pragma: no_include "ext/alloc_traits.h" -#include "mongo/db/query/sbe_sub_planner.h" -#include "mongo/db/query/collection_query_info.h" -#include "mongo/db/query/plan_cache_key_factory.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/db/exec/plan_cache_util.h" +#include "mongo/db/exec/trial_run_tracker.h" +#include "mongo/db/query/all_indices_required_checker.h" +#include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/query_planner.h" #include "mongo/db/query/sbe_multi_planner.h" +#include "mongo/db/query/sbe_plan_ranker.h" +#include "mongo/db/query/sbe_sub_planner.h" #include "mongo/db/query/stage_builder_util.h" #include "mongo/db/query/util/make_data_structure.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" namespace mongo::sbe { CandidatePlans SubPlanner::plan( diff --git a/src/mongo/db/query/sbe_sub_planner.h b/src/mongo/db/query/sbe_sub_planner.h index 78a462dd598ed..9533f12c3717b 100644 --- a/src/mongo/db/query/sbe_sub_planner.h +++ b/src/mongo/db/query/sbe_sub_planner.h @@ -29,10 +29,21 @@ #pragma once +#include +#include +#include + +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/all_indices_required_checker.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/multiple_collection_accessor.h" +#include "mongo/db/query/plan_yield_policy_sbe.h" #include "mongo/db/query/query_planner_params.h" +#include "mongo/db/query/query_solution.h" #include "mongo/db/query/sbe_plan_ranker.h" #include "mongo/db/query/sbe_runtime_planner.h" +#include "mongo/db/query/sbe_stage_builder.h" namespace mongo::sbe { /** diff --git a/src/mongo/db/query/serialization_options.cpp b/src/mongo/db/query/serialization_options.cpp new file mode 100644 index 0000000000000..71c27659bfa5c --- /dev/null +++ b/src/mongo/db/query/serialization_options.cpp @@ -0,0 +1,393 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery + +#include "mongo/db/query/serialization_options.h" + +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" + +namespace mongo { + +namespace { + +// We'll pre-declare all of these strings so that we can avoid the allocations when we reference +// them later. +static constexpr StringData kUndefinedTypeString = "?undefined"_sd; +static constexpr StringData kStringTypeString = "?string"_sd; +static constexpr StringData kNumberTypeString = "?number"_sd; +static constexpr StringData kMinKeyTypeString = "?minKey"_sd; +static constexpr StringData kObjectTypeString = "?object"_sd; +static constexpr StringData kArrayTypeString = "?array"_sd; +static constexpr StringData kBinDataTypeString = "?binData"_sd; +static constexpr StringData kObjectIdTypeString = "?objectId"_sd; +static constexpr StringData kBoolTypeString = "?bool"_sd; +static constexpr StringData kDateTypeString = "?date"_sd; +static constexpr StringData kNullTypeString = "?null"_sd; +static constexpr StringData kRegexTypeString = "?regex"_sd; +static constexpr StringData kDbPointerTypeString = "?dbPointer"_sd; +static constexpr StringData kJavascriptTypeString = "?javascript"_sd; +static constexpr StringData kJavascriptWithScopeTypeString = "?javascriptWithScope"_sd; +static constexpr StringData kTimestampTypeString = "?timestamp"_sd; +static constexpr StringData kMaxKeyTypeString = "?maxKey"_sd; + +static const StringMap kArrayTypeStringConstants{ + {kUndefinedTypeString.rawData(), "?array"_sd}, + {kStringTypeString.rawData(), "?array"_sd}, + {kNumberTypeString.rawData(), "?array"_sd}, + {kMinKeyTypeString.rawData(), "?array"_sd}, + {kObjectTypeString.rawData(), "?array"_sd}, + {kArrayTypeString.rawData(), "?array"_sd}, + {kBinDataTypeString.rawData(), "?array"_sd}, + {kObjectIdTypeString.rawData(), "?array"_sd}, + {kBoolTypeString.rawData(), "?array"_sd}, + {kDateTypeString.rawData(), "?array"_sd}, + {kNullTypeString.rawData(), "?array"_sd}, + {kRegexTypeString.rawData(), "?array"_sd}, + {kDbPointerTypeString.rawData(), "?array"_sd}, + {kJavascriptTypeString.rawData(), "?array"_sd}, + {kJavascriptWithScopeTypeString.rawData(), "?array"_sd}, + {kTimestampTypeString.rawData(), "?array"_sd}, + {kMaxKeyTypeString.rawData(), "?array"_sd}, +}; + +/** + * Computes a debug string meant to represent "any value of type t", where "t" is the type of the + * provided argument. For example "?number" for any number (int, double, etc.). + */ +StringData debugTypeString(BSONType t) { + // This is tightly coupled with 'canonicalizeBSONType' and therefore also with + // sorting/comparison semantics. + switch (t) { + case EOO: + case Undefined: + return kUndefinedTypeString; + case Symbol: + case String: + return kStringTypeString; + case NumberInt: + case NumberLong: + case NumberDouble: + case NumberDecimal: + return kNumberTypeString; + case MinKey: + return kMinKeyTypeString; + case Object: + return kObjectTypeString; + case Array: + // This case should only happen if we have an array within an array. + return kArrayTypeString; + case BinData: + return kBinDataTypeString; + case jstOID: + return kObjectIdTypeString; + case Bool: + return kBoolTypeString; + case Date: + return kDateTypeString; + case jstNULL: + return kNullTypeString; + case RegEx: + return kRegexTypeString; + case DBRef: + return kDbPointerTypeString; + case Code: + return kJavascriptTypeString; + case CodeWScope: + return kJavascriptWithScopeTypeString; + case bsonTimestamp: + return kTimestampTypeString; + case MaxKey: + return kMaxKeyTypeString; + default: + MONGO_UNREACHABLE_TASSERT(7539806); + } +} + +/** + * Returns an arbitrary value of the same type as the one given. For any number, this will be the + * number 1. For any boolean this will be true. + * TODO if you need a different value to make sure it will parse, you should not use this API. + */ +ImplicitValue defaultLiteralOfType(BSONType t) { + // This is tightly coupled with 'canonicalizeBSONType' and therefore also with + // sorting/comparison semantics. + switch (t) { + case EOO: + case Undefined: + return BSONUndefined; + case Symbol: + case String: + return "?"_sd; + case NumberInt: + case NumberLong: + case NumberDouble: + case NumberDecimal: + return 1; + case MinKey: + return MINKEY; + case Object: + return Document{{"?"_sd, "?"_sd}}; + case Array: + // This case should only happen if we have an array within an array. + return BSONArray(); + case BinData: + return BSONBinData(); + case jstOID: + return OID::max(); + case Bool: + return true; + case Date: + return Date_t::fromMillisSinceEpoch(0); + case jstNULL: + return BSONNULL; + case RegEx: + return BSONRegEx("/\?/"); + case DBRef: + return BSONDBRef("?.?", OID::max()); + case Code: + return BSONCode("return ?;"); + case CodeWScope: + return BSONCodeWScope("return ?;", BSONObj()); + case bsonTimestamp: + return Timestamp::min(); + case MaxKey: + return MAXKEY; + default: + MONGO_UNREACHABLE_TASSERT(7539803); + } +} + +/** + * A struct representing the sub-type information for an array. + */ +struct ArraySubtypeInfo { + /** + * Whether the values of an array are all the same BSON type or not (mixed). + */ + enum class NTypes { kEmpty, kOneType, kMixed }; + ArraySubtypeInfo(NTypes nTypes_) : nTypes(nTypes_) {} + ArraySubtypeInfo(BSONType oneType) : nTypes(NTypes::kOneType), singleType(oneType) {} + + NTypes nTypes; + boost::optional singleType = boost::none; +}; + +template +using GetTypeFn = std::function; + +static GetTypeFn getBSONElementType = [](const BSONElement& e) { + return e.type(); +}; +static GetTypeFn getValueType = [](const Value& v) { + return v.getType(); +}; + +/** + * Scans 'arrayOfValues' to see if all values are of the same type or not. Returns this info in a + * struct - see the struct definition for how it is represented. + * + * Templated algorithm to handle both iterators of BSONElements or iterators of Values. + * 'getTypeCallback' is provided to abstract away the different '.type()' vs '.getType()' APIs. + */ +template +ArraySubtypeInfo determineArraySubType(const ArrayType& arrayOfValues, + GetTypeFn getTypeCallback) { + boost::optional firstType = boost::none; + for (auto&& v : arrayOfValues) { + if (!firstType) { + firstType.emplace(getTypeCallback(v)); + } else if (*firstType != getTypeCallback(v)) { + return {ArraySubtypeInfo::NTypes::kMixed}; + } + } + return firstType ? ArraySubtypeInfo{*firstType} + : ArraySubtypeInfo{ArraySubtypeInfo::NTypes::kEmpty}; +} + +ArraySubtypeInfo determineArraySubType(const BSONObj& arrayAsObj) { + return determineArraySubType(arrayAsObj, getBSONElementType); +} +ArraySubtypeInfo determineArraySubType(const std::vector& values) { + return determineArraySubType, Value>(values, getValueType); +} + +template +StringData debugTypeString( + const ValueType& v, + GetTypeFn getTypeCallback, + std::function determineArraySubTypeCallback) { + if (getTypeCallback(v) == BSONType::Array) { + // Iterating the array as .Obj(), as if it were a BSONObj (with field names '0', '1', etc.) + // is faster than converting the whole thing to an array which would force a copy. + auto typeInfo = determineArraySubTypeCallback(v); + switch (typeInfo.nTypes) { + case ArraySubtypeInfo::NTypes::kEmpty: + return "[]"_sd; + case ArraySubtypeInfo::NTypes::kOneType: + return kArrayTypeStringConstants.at(debugTypeString(*typeInfo.singleType)); + case ArraySubtypeInfo::NTypes::kMixed: + return "?array<>"; + default: + MONGO_UNREACHABLE_TASSERT(7539801); + } + } + return debugTypeString(getTypeCallback(v)); +} + +template +ImplicitValue defaultLiteralOfType( + const ValueType& v, + GetTypeFn getTypeCallback, + std::function determineArraySubTypeCallback) { + if (getTypeCallback(v) == BSONType::Array) { + auto typeInfo = determineArraySubTypeCallback(v); + switch (typeInfo.nTypes) { + case ArraySubtypeInfo::NTypes::kEmpty: + return BSONArray(); + case ArraySubtypeInfo::NTypes::kOneType: + return std::vector{defaultLiteralOfType(*typeInfo.singleType)}; + case ArraySubtypeInfo::NTypes::kMixed: + // We don't care which types, we'll use a number and a string as the canonical + // mixed type array regardless. This is to ensure we don't get 2^N possibilities + // for mixed type scenarios - we wish to collapse all "mixed type" arrays to one + // canonical mix. The choice of int and string is mostly arbitrary - hopefully + // somewhat comprehensible at a glance. + return std::vector{Value(2), Value("or more types"_sd)}; + default: + MONGO_UNREACHABLE_TASSERT(7539805); + } + } + return defaultLiteralOfType(getTypeCallback(v)); +} + +ArraySubtypeInfo getSubTypeFromBSONElemArray(BSONElement arrayElem) { + // Iterating the array as .Obj(), as if it were a BSONObj (with field names '0', '1', etc.) + // is faster than converting the whole thing to an array which would force a copy. + return determineArraySubType(arrayElem.Obj()); +} +ArraySubtypeInfo getSubTypeFromValueArray(const Value& arrayVal) { + return determineArraySubType(arrayVal.getArray()); +} + +} // namespace + +const SerializationOptions SerializationOptions::kRepresentativeQueryShapeSerializeOptions = + SerializationOptions{LiteralSerializationPolicy::kToRepresentativeParseableValue}; + +const SerializationOptions SerializationOptions::kDebugQueryShapeSerializeOptions = + SerializationOptions{LiteralSerializationPolicy::kToDebugTypeString}; + +// Overloads for BSONElem and Value. +StringData debugTypeString(BSONElement e) { + return debugTypeString(e, getBSONElementType, getSubTypeFromBSONElemArray); +} +StringData debugTypeString(const Value& v) { + return debugTypeString(v, getValueType, getSubTypeFromValueArray); +} + +// Overloads for BSONElem and Value. +ImplicitValue defaultLiteralOfType(const Value& v) { + return defaultLiteralOfType(v, getValueType, getSubTypeFromValueArray); +} +ImplicitValue defaultLiteralOfType(BSONElement e) { + return defaultLiteralOfType(e, getBSONElementType, getSubTypeFromBSONElemArray); +} + +void SerializationOptions::appendLiteral(BSONObjBuilder* bob, const BSONElement& e) const { + serializeLiteral(e).addToBsonObj(bob, e.fieldNameStringData()); +} + +void SerializationOptions::appendLiteral(BSONObjBuilder* bob, + StringData fieldName, + const ImplicitValue& v) const { + serializeLiteral(v).addToBsonObj(bob, fieldName); +} + +Value SerializationOptions::serializeLiteral(const BSONElement& e) const { + switch (literalPolicy) { + case LiteralSerializationPolicy::kUnchanged: + return Value(e); + case LiteralSerializationPolicy::kToDebugTypeString: + return Value(debugTypeString(e)); + case LiteralSerializationPolicy::kToRepresentativeParseableValue: + return defaultLiteralOfType(e); + default: + MONGO_UNREACHABLE_TASSERT(7539802); + } +} + +Value SerializationOptions::serializeLiteral(const ImplicitValue& v) const { + switch (literalPolicy) { + case LiteralSerializationPolicy::kUnchanged: + return v; + case LiteralSerializationPolicy::kToDebugTypeString: + return Value(debugTypeString(v)); + case LiteralSerializationPolicy::kToRepresentativeParseableValue: + return defaultLiteralOfType(v); + default: + MONGO_UNREACHABLE_TASSERT(7539804); + } +} + +std::string SerializationOptions::serializeFieldPathFromString(StringData path) const { + if (transformIdentifiers) { + try { + return serializeFieldPath(FieldPath(path, false, false)); + } catch (DBException& ex) { + LOGV2_DEBUG(7549808, + 1, + "Failed to convert a path string to a FieldPath", + "pathString"_attr = path, + "failure"_attr = ex.toStatus()); + return serializeFieldPath("invalidFieldPathPlaceholder"); + } + } + return path.toString(); +} +} // namespace mongo diff --git a/src/mongo/db/query/serialization_options.h b/src/mongo/db/query/serialization_options.h index 4cd00dd3faf25..3e4b78999ccf0 100644 --- a/src/mongo/db/query/serialization_options.h +++ b/src/mongo/db/query/serialization_options.h @@ -28,28 +28,66 @@ */ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/explain_options.h" +#include "mongo/db/query/explain_verbosity_gen.h" #include "mongo/util/assert_util.h" -#include -#include namespace mongo { namespace { // Should never be called, throw to ensure we catch this in tests. -std::string defaultRedactionStrategy(StringData s) { +std::string defaultHmacStrategy(StringData s) { MONGO_UNREACHABLE_TASSERT(7332410); } } // namespace +/** + * A policy enum for how to serialize literal values. + */ +enum class LiteralSerializationPolicy { + // The default way to serialize. Just serialize whatever literals were given if they are still + // available, or whatever you parsed them to. This is expected to be able to parse again, since + // it worked the first time. + kUnchanged, + // Serialize any literal value as "?number" or similar. For example "?bool" for any boolean. Use + // 'debugTypeString()' helper. + kToDebugTypeString, + // Serialize any literal value to one canonical value of the given type, with the constraint + // that the chosen representative value should be parseable in this context. There are some + // default implementations that will usually work (e.g. using the number 1 almost always works + // for numbers), but serializers should be careful to think about and test this if their parsers + // reject certain values. + kToRepresentativeParseableValue, +}; + /** * A struct with options for how you want to serialize a match or aggregation expression. */ struct SerializationOptions { + using TokenizeIdentifierFunc = std::function; + + // The default serialization options for a query shape. No need to redact identifiers for the + // this purpose. We may do that on the $queryStats read path. + static const SerializationOptions kRepresentativeQueryShapeSerializeOptions; + static const SerializationOptions kDebugQueryShapeSerializeOptions; + SerializationOptions() {} SerializationOptions(bool explain_) @@ -63,32 +101,49 @@ struct SerializationOptions { SerializationOptions(ExplainOptions::Verbosity verbosity_) : verbosity(verbosity_) {} - SerializationOptions(std::function identifierRedactionPolicy_, - boost::optional replacementForLiteralArgs_) - : replacementForLiteralArgs(replacementForLiteralArgs_), - redactIdentifiers(identifierRedactionPolicy_), - identifierRedactionPolicy(identifierRedactionPolicy_) {} + SerializationOptions(std::function fieldNamesHmacPolicy_, + LiteralSerializationPolicy policy) + : literalPolicy(policy), + transformIdentifiers(fieldNamesHmacPolicy_), + transformIdentifiersCallback(fieldNamesHmacPolicy_) {} + + SerializationOptions(LiteralSerializationPolicy policy) : literalPolicy(policy) {} - // Helper function for redacting identifiable information (like collection/db names). - // Note: serializeFieldPath/serializeFieldPathFromString should be used for redacting field + /** + * Checks if this SerializationOptions represents the same options as another + * SerializationOptions. Note it cannot compare whether the two 'transformIdentifiersCallback's + * are the same - the language purposefully leaves the comparison operator undefined. + */ + bool operator==(const SerializationOptions& other) const { + return this->transformIdentifiers == other.transformIdentifiers && + this->includePath == other.includePath && + // You cannot well determine std::function equivalence in C++, so this is the best we'll + // do. + (this->transformIdentifiersCallback == nullptr) == + (other.transformIdentifiersCallback == nullptr) && + this->literalPolicy == other.literalPolicy && this->verbosity == other.verbosity; + } + + // Helper function for removing identifiable information (like collection/db names). + // Note: serializeFieldPath/serializeFieldPathFromString should be used for field // names. std::string serializeIdentifier(StringData str) const { - if (redactIdentifiers) { - return identifierRedactionPolicy(str); + if (transformIdentifiers) { + return transformIdentifiersCallback(str); } return str.toString(); } std::string serializeFieldPath(FieldPath path) const { - if (redactIdentifiers) { - std::stringstream redacted; + if (transformIdentifiers) { + std::stringstream hmaced; for (size_t i = 0; i < path.getPathLength(); ++i) { if (i > 0) { - redacted << "."; + hmaced << "."; } - redacted << identifierRedactionPolicy(path.getFieldName(i)); + hmaced << transformIdentifiersCallback(path.getFieldName(i)); } - return redacted.str(); + return hmaced.str(); } return path.fullPath(); } @@ -97,91 +152,100 @@ struct SerializationOptions { return "$" + serializeFieldPath(path); } - std::string serializeFieldPathFromString(StringData path) const { - if (redactIdentifiers) { - // Some valid field names are considered invalid as a FieldPath (for example, fields - // like "foo.$bar" where a sub-component is prefixed with "$"). For now, if - // serializeFieldPath errors due to an "invalid" field name, we'll serialize that field - // name with this placeholder. - // TODO SERVER-75623 Implement full redaction for all field names and remove placeholder - try { - return serializeFieldPath(path); - } catch (DBException&) { - return serializeFieldPath("dollarPlaceholder"); - } - } - return path.toString(); - } + std::string serializeFieldPathFromString(StringData path) const; - template - Value serializeLiteralValue(T n) const { - if (replacementForLiteralArgs) { - return Value(*replacementForLiteralArgs); + std::vector serializeFieldPathFromString( + const std::vector& paths) const { + std::vector result; + result.reserve(paths.size()); + for (auto& p : paths) { + result.push_back(serializeFieldPathFromString(p)); } - return Value(n); + return result; } - // Helper functions for redacting BSONObj. Does not take into account anything to do with MQL - // semantics, redacts all field names and literals in the passed in obj. - void redactArrayToBuilder(BSONArrayBuilder* bab, std::vector array) { + // Helper functions for applying hmac to BSONObj. Does not take into account anything to do with + // MQL semantics, removes all field names and literals in the passed in obj. + void addHmacedArrayToBuilder(BSONArrayBuilder* bab, std::vector array) { for (const auto& elem : array) { if (elem.type() == BSONType::Object) { BSONObjBuilder subObj(bab->subobjStart()); - redactObjToBuilder(&subObj, elem.Obj()); + addHmacedObjToBuilder(&subObj, elem.Obj()); subObj.done(); } else if (elem.type() == BSONType::Array) { BSONArrayBuilder subArr(bab->subarrayStart()); - redactArrayToBuilder(&subArr, elem.Array()); + addHmacedArrayToBuilder(&subArr, elem.Array()); subArr.done(); } else { - if (replacementForLiteralArgs) { - bab->append(replacementForLiteralArgs.get()); - } else { - bab->append(elem); - } + *bab << serializeLiteral(elem); } } } - void redactObjToBuilder(BSONObjBuilder* bob, BSONObj objToRedact) { - for (const auto& elem : objToRedact) { + void addHmacedObjToBuilder(BSONObjBuilder* bob, BSONObj objToHmac) { + for (const auto& elem : objToHmac) { auto fieldName = serializeFieldPath(elem.fieldName()); if (elem.type() == BSONType::Object) { BSONObjBuilder subObj(bob->subobjStart(fieldName)); - redactObjToBuilder(&subObj, elem.Obj()); + addHmacedObjToBuilder(&subObj, elem.Obj()); subObj.done(); } else if (elem.type() == BSONType::Array) { BSONArrayBuilder subArr(bob->subarrayStart(fieldName)); - redactArrayToBuilder(&subArr, elem.Array()); + addHmacedArrayToBuilder(&subArr, elem.Array()); subArr.done(); } else { - if (replacementForLiteralArgs) { - bob->append(fieldName, replacementForLiteralArgs.get()); - } else { - bob->appendAs(elem, fieldName); - } + appendLiteral(bob, fieldName, elem); } } } - // 'replacementForLiteralArgs' is an independent option to serialize in a genericized format - // with the aim of similar "shaped" queries serializing to the same object. For example, if - // set to '?' then the serialization of {a: {$gt: 2}} will result in {a: {$gt: '?'}}, as - // will the serialization of {a: {$gt: 3}}. + + /** + * Helper method to call 'serializeLiteral()' on 'e' and append the resulting value to 'bob' + * using the same name as 'e'. + */ + void appendLiteral(BSONObjBuilder* bob, const BSONElement& e) const; + /** + * Helper method to call 'serializeLiteral()' on 'v' and append the result to 'bob' using field + * name 'fieldName'. + */ + void appendLiteral(BSONObjBuilder* bob, StringData fieldName, const ImplicitValue& v) const; + + /** + * Depending on the configured 'literalPolicy', serializeLiteral will return the appropriate + * value for adding literals to serialization output: + * - If 'literalPolicy' is 'kUnchanged', returns the input value unmodified. + * - If it is 'kToDebugTypeString', computes and returns the type string as a string Value. + * - If it is 'kToRepresentativeValue', it returns an arbitrary value of the same type as the + * one given. For any number, this will be the number 1. For any boolean this will be true. + * + * Example usage: BSON("myArg" << options.serializeLiteral(_myArg)); + * + * TODO SERVER-76330 If you need a different value to make sure it will parse, you should not + * use this API - but use serializeConstrainedLiteral() instead. + */ + Value serializeLiteral(const BSONElement& e) const; + Value serializeLiteral(const ImplicitValue& v) const; + + // 'literalPolicy' is an independent option to serialize in a general format with the aim of + // similar "shaped" queries serializing to the same object. For example, if set to + // 'kToDebugTypeString', then the serialization of {a: {$gt: 2}} should result in {a: {$gt: + // '?number'}}, as will the serialization of {a: {$gt: 3}}. // - // "Literal" here is meant to stand in contrast to expression arguements, as in the $gt - // expressions in {$and: [{a: {$gt: 3}}, {b: {$gt: 4}}]}. There the only literals are 3 and - // 4, so the serialization expected would be {$and: [{a: {$gt: '?'}}, {b: {$lt: '?'}}]}. - boost::optional replacementForLiteralArgs = boost::none; - - // If true the caller must set identifierRedactionPolicy. 'redactIdentifiers' if set along with - // a strategy the redaction strategy will be called on any personal identifiable information - // (e.g., field paths/names, collection names) encountered before serializing them. - bool redactIdentifiers = false; - std::function identifierRedactionPolicy = defaultRedactionStrategy; - - // If set, serializes without including the path. For example {a: {$gt: 2}} would serialize - // as just {$gt: 2}. + // "Literal" here is meant to stand in contrast to expression arguments, as in the $gt + // expressions in {$and: [{a: {$gt: 3}}, {b: {$gt: 4}}]}. There the only literals are 3 and 4, + // so the serialization expected for 'kToDebugTypeString' would be {$and: [{a: {$gt: + // '?number'}}, {b: {$lt: '?number'}}]}. + LiteralSerializationPolicy literalPolicy = LiteralSerializationPolicy::kUnchanged; + + // If true the caller must set transformIdentifiersCallback. 'transformIdentifiers' if set along + // with a strategy the redaction strategy will be called on any personal identifiable + // information (e.g., field paths/names, collection names) encountered before serializing them. + bool transformIdentifiers = false; + std::function transformIdentifiersCallback = defaultHmacStrategy; + + // If set to false, serializes without including the path. For example {a: {$gt: 2}} would + // serialize as just {$gt: 2}. // // It is expected that most callers want to set 'includePath' to true to // get a correct serialization. Internally, we may set this to false if we have a situation diff --git a/src/mongo/db/query/shard_filterer_factory_impl.h b/src/mongo/db/query/shard_filterer_factory_impl.h index 54040fcd85636..31992e3d06986 100644 --- a/src/mongo/db/query/shard_filterer_factory_impl.h +++ b/src/mongo/db/query/shard_filterer_factory_impl.h @@ -27,7 +27,11 @@ * it in the license file. */ +#include + #include "mongo/db/catalog/collection.h" +#include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/shard_filterer_factory_interface.h" namespace mongo { diff --git a/src/mongo/db/query/shard_filterer_factory_mock.cpp b/src/mongo/db/query/shard_filterer_factory_mock.cpp index 2e28150d1ec99..8a0b0ebab8a42 100644 --- a/src/mongo/db/query/shard_filterer_factory_mock.cpp +++ b/src/mongo/db/query/shard_filterer_factory_mock.cpp @@ -27,7 +27,7 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/query/shard_filterer_factory_mock.h" diff --git a/src/mongo/db/query/shard_filterer_factory_mock.h b/src/mongo/db/query/shard_filterer_factory_mock.h index 356c1cc97747d..ff7aa4412067c 100644 --- a/src/mongo/db/query/shard_filterer_factory_mock.h +++ b/src/mongo/db/query/shard_filterer_factory_mock.h @@ -29,7 +29,10 @@ #pragma once +#include + #include "mongo/db/exec/shard_filterer.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/shard_filterer_factory_interface.h" namespace mongo { diff --git a/src/mongo/db/query/sort_pattern.cpp b/src/mongo/db/query/sort_pattern.cpp index 6d84418ce0e78..172532be4953f 100644 --- a/src/mongo/db/query/sort_pattern.cpp +++ b/src/mongo/db/query/sort_pattern.cpp @@ -27,10 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_dependencies.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/sort_pattern.h" +#include "mongo/db/server_options.h" #include "mongo/db/storage/storage_parameters_gen.h" namespace mongo { @@ -97,7 +112,13 @@ SortPattern::SortPattern(const BSONObj& obj, patternPart.fieldPath = FieldPath{fieldName}; patternPart.isAscending = (direction > 0); - _paths.insert(patternPart.fieldPath->fullPath()); + + const auto [_, inserted] = _paths.insert(patternPart.fieldPath->fullPath()); + uassert(7472500, + str::stream() << "$sort key must not contain duplicate keys (duplicate: '" + << patternPart.fieldPath->fullPath() << "')", + inserted); + _sortPattern.push_back(std::move(patternPart)); } } @@ -119,22 +140,8 @@ Document SortPattern::serialize(SortKeySerialization serializationMode, const size_t n = _sortPattern.size(); for (size_t i = 0; i < n; ++i) { if (_sortPattern[i].fieldPath) { - std::stringstream serializedFieldName; - if (!options.redactIdentifiers) { - // Append a named integer based on whether the sort is ascending/descending. - serializedFieldName << _sortPattern[i].fieldPath->fullPath(); - } else { - // Redact each field name in the full path. - for (size_t index = 0; index < _sortPattern[i].fieldPath->getPathLength(); - ++index) { - if (index > 0) { - serializedFieldName << "."; - } - serializedFieldName << options.identifierRedactionPolicy( - _sortPattern[i].fieldPath->getFieldName(index)); - } - } - keyObj.setField(serializedFieldName.str(), Value(_sortPattern[i].isAscending ? 1 : -1)); + keyObj.setField(options.serializeFieldPath(*_sortPattern[i].fieldPath), + Value(_sortPattern[i].isAscending ? 1 : -1)); } else { // Sorting by an expression, use a made up field name. auto computedFieldName = std::string(str::stream() << "$computed" << i); diff --git a/src/mongo/db/query/sort_pattern.h b/src/mongo/db/query/sort_pattern.h index 75d0e91a78421..b6e3975b3a598 100644 --- a/src/mongo/db/query/sort_pattern.h +++ b/src/mongo/db/query/sort_pattern.h @@ -28,12 +28,28 @@ */ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_path_support.h" #include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" #include "mongo/db/query/serialization_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { class SortPattern { @@ -65,8 +81,13 @@ class SortPattern { SortPattern(std::vector patterns) : _sortPattern(std::move(patterns)) { for (auto&& patternPart : _sortPattern) { - if (patternPart.fieldPath) - _paths.insert(patternPart.fieldPath->fullPath()); + if (patternPart.fieldPath) { + const auto [_, inserted] = _paths.insert(patternPart.fieldPath->fullPath()); + uassert(7472501, + str::stream() << "$sort key must not contain duplicate keys (field: '" + << patternPart.fieldPath->fullPath() << "')", + inserted); + } } } diff --git a/src/mongo/db/query/sort_pattern_test.cpp b/src/mongo/db/query/sort_pattern_test.cpp index 3bb4286137c22..88a4206cca789 100644 --- a/src/mongo/db/query/sort_pattern_test.cpp +++ b/src/mongo/db/query/sort_pattern_test.cpp @@ -26,17 +26,28 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ +#include +#include + +#include "serialization_options.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/sort_pattern.h" -#include "mongo/unittest/unittest.h" -#include "serialization_options.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" + namespace mongo { namespace { -std::string redactFieldNameForTest(StringData s) { +std::string applyHmacForTest(StringData s) { return str::stream() << "HASH<" << s << ">"; } @@ -49,8 +60,8 @@ TEST(SerializeSortPatternTest, SerializeAndRedactFieldName) { auto expCtx = getExpCtx(); auto sortPattern = SortPattern(fromjson("{val: 1}"), expCtx); SerializationOptions opts = {}; - opts.redactIdentifiers = true; - opts.identifierRedactionPolicy = redactFieldNameForTest; + opts.transformIdentifiers = true; + opts.transformIdentifiersCallback = applyHmacForTest; // Most basic sort pattern, confirm that field name gets redacted. ASSERT_DOCUMENT_EQ_AUTO( // NOLINT @@ -86,7 +97,7 @@ TEST(SerializeSortPatternTest, SerializeNoRedaction) { auto expCtx = getExpCtx(); auto sortPattern = SortPattern(fromjson("{val: 1}"), expCtx); SerializationOptions opts = {}; - opts.redactIdentifiers = false; + opts.transformIdentifiers = false; ASSERT_DOCUMENT_EQ_AUTO( // NOLINT R"({"val":1})", sortPattern.serialize(SortPattern::SortKeySerialization::kForPipelineSerialization, opts)); @@ -97,5 +108,28 @@ TEST(SerializeSortPatternTest, SerializeNoRedaction) { sortPattern.serialize(SortPattern::SortKeySerialization::kForPipelineSerialization)); } + +// Throw assertion in the case we have double defined sort order for a field. +TEST(SortStageDefaultTest, WrongSortKeyDefinition) { + auto expCtx = getExpCtx(); + ASSERT_THROWS_CODE(SortPattern(fromjson("{b: 1, b: 1}"), expCtx), AssertionException, 7472500); + + // Test if the sort order is ignored for the duplication detection. + ASSERT_THROWS_CODE(SortPattern(fromjson("{b: 1, b: -1}"), expCtx), AssertionException, 7472500); + + // Tests that include subdocuments. + ASSERT_DOES_NOT_THROW(SortPattern(fromjson("{a:1, 'b.a':1}"), expCtx)); + + ASSERT_THROWS_CODE( + SortPattern(fromjson("{a:1, 'b.a':1, 'b.a':-1}"), expCtx), AssertionException, 7472500); + + // Test the other SortPattern constructor. + std::vector sortKeys; + sortKeys.push_back(SortPattern::SortPatternPart{false, FieldPath("a")}); + sortKeys.push_back(SortPattern::SortPatternPart{false, FieldPath("b")}); + sortKeys.push_back(SortPattern::SortPatternPart{true, FieldPath("a")}); + ASSERT_THROWS_CODE(SortPattern(std::move(sortKeys)), AssertionException, 7472501); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/query/stage_builder.h b/src/mongo/db/query/stage_builder.h index 41b45c748b6d0..8d8bbde7c95be 100644 --- a/src/mongo/db/query/stage_builder.h +++ b/src/mongo/db/query/stage_builder.h @@ -35,9 +35,9 @@ namespace mongo::stage_builder { /** * The StageBuilder converts a QuerySolution tree to an executable tree of PlanStage(s), with the - * specific type defined by the 'PlanStageType' parameter. + * specific type defined by the 'PlanType' parameter. */ -template +template class StageBuilder { public: StageBuilder(OperationContext* opCtx, const CanonicalQuery& cq, const QuerySolution& solution) @@ -49,7 +49,7 @@ class StageBuilder { * Given a root node of a QuerySolution tree, builds and returns a corresponding executable * tree of PlanStages. */ - virtual std::unique_ptr build(const QuerySolutionNode* root) = 0; + virtual PlanType build(const QuerySolutionNode* root) = 0; protected: OperationContext* _opCtx; diff --git a/src/mongo/db/query/stage_builder_util.cpp b/src/mongo/db/query/stage_builder_util.cpp index 96becfae481ac..ed57e0116750e 100644 --- a/src/mongo/db/query/stage_builder_util.cpp +++ b/src/mongo/db/query/stage_builder_util.cpp @@ -27,18 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/stage_builder_util.h" +#include #include "mongo/db/query/classic_stage_builder.h" #include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/plan_yield_policy_sbe.h" #include "mongo/db/query/sbe_stage_builder.h" -#include "mongo/db/query/shard_filterer_factory_impl.h" +#include "mongo/db/query/stage_builder_util.h" +#include "mongo/util/assert_util_core.h" namespace mongo::stage_builder { std::unique_ptr buildClassicExecutableTree(OperationContext* opCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const CanonicalQuery& cq, const QuerySolution& solution, WorkingSet* ws) { @@ -46,10 +46,10 @@ std::unique_ptr buildClassicExecutableTree(OperationContext* opCtx, // queries that disallow extensions, can be properly executed. If the query does not have // $text/$where context (and $text/$where are allowed), then no attempt should be made to // execute the query. - invariant(!cq.canHaveNoopMatchNodes()); invariant(solution.root()); invariant(ws); - auto builder = std::make_unique(opCtx, collection, cq, solution, ws); + auto builder = + std::make_unique(opCtx, std::move(collection), cq, solution, ws); return builder->build(solution.root()); } @@ -63,7 +63,6 @@ buildSlotBasedExecutableTree(OperationContext* opCtx, // queries that disallow extensions, can be properly executed. If the query does not have // $text/$where context (and $text/$where are allowed), then no attempt should be made to // execute the query. - invariant(!cq.canHaveNoopMatchNodes()); invariant(solution.root()); auto sbeYieldPolicy = dynamic_cast(yieldPolicy); @@ -71,8 +70,7 @@ buildSlotBasedExecutableTree(OperationContext* opCtx, auto builder = std::make_unique(opCtx, collections, cq, solution, sbeYieldPolicy); - auto root = builder->build(solution.root()); - auto data = builder->getPlanStageData(); + auto [root, data] = builder->build(solution.root()); return {std::move(root), std::move(data)}; } diff --git a/src/mongo/db/query/stage_builder_util.h b/src/mongo/db/query/stage_builder_util.h index 943334e590c6d..06584d45caa23 100644 --- a/src/mongo/db/query/stage_builder_util.h +++ b/src/mongo/db/query/stage_builder_util.h @@ -29,8 +29,19 @@ #pragma once +#include +#include + +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/sbe/stages/stages.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/classic_stage_builder.h" +#include "mongo/db/query/multiple_collection_accessor.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/query_solution.h" #include "mongo/db/query/sbe_stage_builder.h" namespace mongo::stage_builder { @@ -45,7 +56,7 @@ namespace mongo::stage_builder { * will consist of. */ std::unique_ptr buildClassicExecutableTree(OperationContext* opCtx, - const CollectionPtr& collection, + VariantCollectionPtrOrAcquisition collection, const CanonicalQuery& cq, const QuerySolution& solution, WorkingSet* ws); diff --git a/src/mongo/db/query/stage_types.cpp b/src/mongo/db/query/stage_types.cpp index aca0d7370cd63..642f993b329a8 100644 --- a/src/mongo/db/query/stage_types.cpp +++ b/src/mongo/db/query/stage_types.cpp @@ -27,7 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include + +#include #include "mongo/db/query/stage_types.h" #include "mongo/stdx/unordered_map.h" @@ -65,6 +67,7 @@ StringData stageTypeToString(StageType stageType) { {STAGE_RECORD_STORE_FAST_COUNT, "RECORD_STORE_FAST_COUNT"_sd}, {STAGE_RETURN_KEY, "RETURN_KEY"_sd}, {STAGE_SAMPLE_FROM_TIMESERIES_BUCKET, "SAMPLE_FROM_TIMESERIES_BUCKET"_sd}, + {STAGE_SEARCH, "SEARCH"_sd}, {STAGE_SHARDING_FILTER, "SHARDING_FILTER"_sd}, {STAGE_SKIP, "SKIP"_sd}, {STAGE_SORT_DEFAULT, "SORT"_sd}, @@ -86,4 +89,9 @@ StringData stageTypeToString(StageType stageType) { } return kStageTypesMap.at(STAGE_UNKNOWN); } + +StringData sbeClusteredCollectionScanToString() { + static const StringData kClusteredIxscan = "CLUSTERED_IXSCAN"_sd; + return kClusteredIxscan; +} } // namespace mongo diff --git a/src/mongo/db/query/stage_types.h b/src/mongo/db/query/stage_types.h index a0f725dff0a2a..6080fcbe62715 100644 --- a/src/mongo/db/query/stage_types.h +++ b/src/mongo/db/query/stage_types.h @@ -44,14 +44,17 @@ using PlanNodeId = uint32_t; static constexpr PlanNodeId kEmptyPlanNodeId = 0u; /** - * These map to implementations of the PlanStage interface, all of which live in db/exec/ + * These map to implementations of the PlanStage interface, all of which live in db/exec/. These + * stage types are shared between Classic and SBE. */ enum StageType { STAGE_AND_HASH, STAGE_AND_SORTED, STAGE_BATCHED_DELETE, STAGE_CACHED_PLAN, + STAGE_COLLSCAN, + STAGE_COLUMN_SCAN, // A virtual scan stage that simulates a collection scan and doesn't depend on underlying @@ -135,6 +138,7 @@ enum StageType { // Stages for DocumentSources. STAGE_GROUP, STAGE_EQ_LOOKUP, + STAGE_SEARCH, STAGE_SENTINEL, }; @@ -160,4 +164,10 @@ inline bool isSortStageType(StageType stageType) { } StringData stageTypeToString(StageType stageType); + +/** + * Returns the explain() stage type string for a STAGE_COLLSCAN stage that is performing a clustered + * collection scan in SBE, to match Classic's explain() output. + */ +StringData sbeClusteredCollectionScanToString(); } // namespace mongo diff --git a/src/mongo/db/query/stats/SConscript b/src/mongo/db/query/stats/SConscript index 13457cccaeaa4..982efddc6f455 100644 --- a/src/mongo/db/query/stats/SConscript +++ b/src/mongo/db/query/stats/SConscript @@ -81,10 +81,10 @@ env.CppUnitTest( ) env.CppUnitTest( - target="stats_cache_test", + target='stats_cache_test', source=[ - "stats_cache_test.cpp", - "stats_cache_loader_mock.cpp", + 'stats_cache_test.cpp', + 'stats_cache_loader_mock.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/base', @@ -106,7 +106,7 @@ env.CppUnitTest( ) env.Library( - target="stats_test_utils", + target='stats_test_utils', source=[ 'collection_statistics_mock.cpp', 'rand_utils.cpp', @@ -116,7 +116,6 @@ env.Library( LIBDEPS=[ '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/db/exec/sbe/sbe_abt_test_util', - "$BUILD_DIR/mongo/unittest/unittest", 'stats_gen', 'stats_histograms', ], diff --git a/src/mongo/db/query/stats/array_histogram.cpp b/src/mongo/db/query/stats/array_histogram.cpp index 2f5a097f0bec7..52ad2dd034446 100644 --- a/src/mongo/db/query/stats/array_histogram.cpp +++ b/src/mongo/db/query/stats/array_histogram.cpp @@ -28,7 +28,24 @@ */ #include "mongo/db/query/stats/array_histogram.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/query/stats/value_utils.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::stats { namespace { @@ -453,7 +470,7 @@ std::shared_ptr ArrayHistogram::make(Statistics stats) { // Note that we don't run validation when loading a histogram from the Statistics collection // because we already validated this histogram before inserting it. const auto scalar = ScalarHistogram::make(stats.getScalarHistogram()); - const auto typeCounts = mapStatsTypeCountToTypeCounts(stats.getTypeCount()); + auto typeCounts = mapStatsTypeCountToTypeCounts(stats.getTypeCount()); const double trueCount = stats.getTrueCount(); const double falseCount = stats.getFalseCount(); const double nanCount = stats.getNanCount(); diff --git a/src/mongo/db/query/stats/array_histogram.h b/src/mongo/db/query/stats/array_histogram.h index 45fb37b4761d4..a8fe07dcf80fa 100644 --- a/src/mongo/db/query/stats/array_histogram.h +++ b/src/mongo/db/query/stats/array_histogram.h @@ -29,8 +29,15 @@ #pragma once +#include +#include +#include #include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/stats/scalar_histogram.h" #include "mongo/db/query/stats/stats_gen.h" diff --git a/src/mongo/db/query/stats/array_histogram_test.cpp b/src/mongo/db/query/stats/array_histogram_test.cpp index ec18c6097e6f7..f514441ef2c25 100644 --- a/src/mongo/db/query/stats/array_histogram_test.cpp +++ b/src/mongo/db/query/stats/array_histogram_test.cpp @@ -26,16 +26,33 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/json.h" #include "mongo/bson/timestamp.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/max_diff.h" #include "mongo/db/query/stats/rand_utils_new.h" +#include "mongo/db/query/stats/scalar_histogram.h" #include "mongo/db/query/stats/value_utils.h" #include "mongo/platform/decimal128.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/time_support.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/collection_statistics_impl.cpp b/src/mongo/db/query/stats/collection_statistics_impl.cpp index 0099435359661..27644242ddac7 100644 --- a/src/mongo/db/query/stats/collection_statistics_impl.cpp +++ b/src/mongo/db/query/stats/collection_statistics_impl.cpp @@ -29,8 +29,15 @@ #include "mongo/db/query/stats/collection_statistics_impl.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/db/client.h" #include "mongo/db/query/stats/stats_catalog.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::stats { @@ -58,8 +65,9 @@ const ArrayHistogram* CollectionStatisticsImpl::getHistogram(const std::string& if (!swHistogram.isOK()) { if (swHistogram != ErrorCodes::NamespaceNotFound) { uasserted(swHistogram.getStatus().code(), - str::stream() << "Error getting histograms for path " << _nss << " : " - << path << swHistogram.getStatus().reason()); + str::stream() + << "Error getting histograms for path " << _nss.toStringForErrorMsg() + << " : " << path << swHistogram.getStatus().reason()); } return nullptr; } diff --git a/src/mongo/db/query/stats/collection_statistics_impl.h b/src/mongo/db/query/stats/collection_statistics_impl.h index 71a7d0d0ef18d..bd13e48a36d68 100644 --- a/src/mongo/db/query/stats/collection_statistics_impl.h +++ b/src/mongo/db/query/stats/collection_statistics_impl.h @@ -29,6 +29,11 @@ #pragma once +#include +#include +#include + +#include "mongo/db/namespace_string.h" #include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/collection_statistics.h" diff --git a/src/mongo/db/query/stats/collection_statistics_mock.cpp b/src/mongo/db/query/stats/collection_statistics_mock.cpp index 7aa8856ba097f..c3a6cd5770513 100644 --- a/src/mongo/db/query/stats/collection_statistics_mock.cpp +++ b/src/mongo/db/query/stats/collection_statistics_mock.cpp @@ -29,6 +29,9 @@ #include "mongo/db/query/stats/collection_statistics_mock.h" +#include +#include + namespace mongo::stats { CollectionStatisticsMock::CollectionStatisticsMock(double cardinality) diff --git a/src/mongo/db/query/stats/collection_statistics_mock.h b/src/mongo/db/query/stats/collection_statistics_mock.h index 1e2a6c53b5e10..5981381b3d176 100644 --- a/src/mongo/db/query/stats/collection_statistics_mock.h +++ b/src/mongo/db/query/stats/collection_statistics_mock.h @@ -29,7 +29,11 @@ #pragma once +#include +#include + #include "mongo/db/namespace_string.h" +#include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/collection_statistics.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/max_diff.cpp b/src/mongo/db/query/stats/max_diff.cpp index 93b33499fac53..53c930ec13012 100644 --- a/src/mongo/db/query/stats/max_diff.cpp +++ b/src/mongo/db/query/stats/max_diff.cpp @@ -31,21 +31,22 @@ #include #include -#include #include -#include #include #include -#include #include +#include +#include + +#include +#include +#include -#include "mongo/base/string_data.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/bsontypes.h" -#include "mongo/db/exec/sbe/values/bson.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/stats/value_utils.h" +#include "mongo/db/storage/key_string.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::stats { namespace { diff --git a/src/mongo/db/query/stats/max_diff.h b/src/mongo/db/query/stats/max_diff.h index 2aa1d14ea8b9f..877ad05cdcea6 100644 --- a/src/mongo/db/query/stats/max_diff.h +++ b/src/mongo/db/query/stats/max_diff.h @@ -29,6 +29,11 @@ #pragma once +#include +#include +#include +#include +#include #include #include diff --git a/src/mongo/db/query/stats/maxdiff_test_utils.cpp b/src/mongo/db/query/stats/maxdiff_test_utils.cpp index cb0e66dc285c5..86a8cdd764590 100644 --- a/src/mongo/db/query/stats/maxdiff_test_utils.cpp +++ b/src/mongo/db/query/stats/maxdiff_test_utils.cpp @@ -29,13 +29,25 @@ #include "mongo/db/query/stats/maxdiff_test_utils.h" +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/sbe/abt/sbe_abt_test_util.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/max_diff.h" +#include "mongo/stdx/unordered_map.h" namespace mongo::stats { -static std::vector convertToJSON(const std::vector& input) { - std::vector result; +static std::vector convertToBSON(const std::vector& input) { + std::vector result; for (size_t i = 0; i < input.size(); i++) { const auto [objTag, objVal] = sbe::value::makeNewObject(); @@ -49,7 +61,7 @@ static std::vector convertToJSON(const std::vector& input std::ostringstream os; os << std::make_pair(objTag, objVal); - result.push_back(os.str()); + result.push_back(fromjson(os.str())); } return result; @@ -58,7 +70,7 @@ static std::vector convertToJSON(const std::vector& input size_t getActualCard(OperationContext* opCtx, const std::vector& input, const std::string& query) { - return mongo::optimizer::runPipeline(opCtx, query, convertToJSON(input)).size(); + return mongo::optimizer::runPipeline(opCtx, query, convertToBSON(input)).size(); } std::string makeMatchExpr(const SBEValue& val, optimizer::ce::EstimationType cmpOp) { diff --git a/src/mongo/db/query/stats/maxdiff_test_utils.h b/src/mongo/db/query/stats/maxdiff_test_utils.h index a34f7dd41eec6..72b6a3e4f8ccd 100644 --- a/src/mongo/db/query/stats/maxdiff_test_utils.h +++ b/src/mongo/db/query/stats/maxdiff_test_utils.h @@ -29,11 +29,13 @@ #pragma once +#include #include #include #include "mongo/db/exec/sbe/abt/sbe_abt_test_util.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/ce/histogram_predicate_estimation.h" #include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/scalar_histogram.h" diff --git a/src/mongo/db/query/stats/rand_utils.cpp b/src/mongo/db/query/stats/rand_utils.cpp index 4639cd76b1294..09e175aeb1f2a 100644 --- a/src/mongo/db/query/stats/rand_utils.cpp +++ b/src/mongo/db/query/stats/rand_utils.cpp @@ -29,12 +29,21 @@ #include "mongo/db/query/stats/rand_utils.h" +#include #include -#include +#include +#include +#include +#include #include +#include #include +#include + #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/util/assert_util.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/rand_utils.h b/src/mongo/db/query/stats/rand_utils.h index 42d62e4ac4022..a3b9535e3d3ad 100644 --- a/src/mongo/db/query/stats/rand_utils.h +++ b/src/mongo/db/query/stats/rand_utils.h @@ -29,9 +29,17 @@ #pragma once +#include +#include +#include #include +#include +#include #include +#include + +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/stats/value_utils.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/rand_utils_new.cpp b/src/mongo/db/query/stats/rand_utils_new.cpp index ae2f300c82b3d..29bf97dbd9d64 100644 --- a/src/mongo/db/query/stats/rand_utils_new.cpp +++ b/src/mongo/db/query/stats/rand_utils_new.cpp @@ -30,13 +30,16 @@ #include "mongo/db/query/stats/rand_utils_new.h" #include -#include +#include +#include #include +#include +#include #include -#include #include #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/storage/key_string.h" #include "mongo/util/assert_util.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/rand_utils_new.h b/src/mongo/db/query/stats/rand_utils_new.h index 1e3bcc0f19e94..4e1cb00148598 100644 --- a/src/mongo/db/query/stats/rand_utils_new.h +++ b/src/mongo/db/query/stats/rand_utils_new.h @@ -29,10 +29,18 @@ #pragma once +#include +#include +#include #include +#include +#include #include +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/stats/value_utils.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/scalar_histogram.cpp b/src/mongo/db/query/stats/scalar_histogram.cpp index 0e84e3948ce64..0d25aeb839010 100644 --- a/src/mongo/db/query/stats/scalar_histogram.cpp +++ b/src/mongo/db/query/stats/scalar_histogram.cpp @@ -29,9 +29,18 @@ #include "mongo/db/query/stats/scalar_histogram.h" +#include +#include +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/basic_types.h" #include "mongo/db/exec/sbe/values/bson.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/stats/value_utils.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::stats { namespace { diff --git a/src/mongo/db/query/stats/scalar_histogram.h b/src/mongo/db/query/stats/scalar_histogram.h index 6e81263f997f4..0c9e9fd673ce7 100644 --- a/src/mongo/db/query/stats/scalar_histogram.h +++ b/src/mongo/db/query/stats/scalar_histogram.h @@ -29,10 +29,13 @@ #pragma once +#include +#include #include #include #include +#include "mongo/bson/bsonobj.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/stats/stats_gen.h" diff --git a/src/mongo/db/query/stats/stats_cache.cpp b/src/mongo/db/query/stats/stats_cache.cpp index dfe5a43890e28..0494d2037d210 100644 --- a/src/mongo/db/query/stats/stats_cache.cpp +++ b/src/mongo/db/query/stats/stats_cache.cpp @@ -29,10 +29,17 @@ #include "mongo/db/query/stats/stats_cache.h" -#include "mongo/db/query/stats/collection_statistics.h" -#include "mongo/util/read_through_cache.h" +#include +#include +#include +#include +#include +#include -#include "mongo/logv2/log.h" +#include "mongo/base/error_codes.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/read_through_cache.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/query/stats/stats_cache.h b/src/mongo/db/query/stats/stats_cache.h index 37d3d238a4dba..5590f18b22362 100644 --- a/src/mongo/db/query/stats/stats_cache.h +++ b/src/mongo/db/query/stats/stats_cache.h @@ -29,11 +29,25 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include + #include "mongo/base/string_data.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/stats/collection_statistics.h" #include "mongo/db/query/stats/stats_cache_loader.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/thread_pool_interface.h" +#include "mongo/util/invalidating_lru_cache.h" #include "mongo/util/read_through_cache.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/stats_cache_loader.h b/src/mongo/db/query/stats/stats_cache_loader.h index 309324820db08..77f86cfca6f77 100644 --- a/src/mongo/db/query/stats/stats_cache_loader.h +++ b/src/mongo/db/query/stats/stats_cache_loader.h @@ -29,9 +29,18 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/stats/array_histogram.h" #include "mongo/stdx/thread.h" +#include "mongo/util/future.h" namespace mongo::stats { using StatsPathString = std::pair; diff --git a/src/mongo/db/query/stats/stats_cache_loader_impl.cpp b/src/mongo/db/query/stats/stats_cache_loader_impl.cpp index 1fda6dce78533..945d9981bc62e 100644 --- a/src/mongo/db/query/stats/stats_cache_loader_impl.cpp +++ b/src/mongo/db/query/stats/stats_cache_loader_impl.cpp @@ -29,11 +29,29 @@ #include "mongo/db/query/stats/stats_cache_loader_impl.h" +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/stats_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/stdx/thread.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -57,9 +75,9 @@ SemiFuture StatsCacheLoaderImpl::getStats(OperationContext* opCtx if (!cursor) { uasserted(ErrorCodes::OperationFailed, - str::stream() - << "Failed to establish a cursor for reading " << statsPath.first.ns() - << ", path " << statsPath.second << " from local storage"); + str::stream() << "Failed to establish a cursor for reading " + << statsPath.first.toStringForErrorMsg() << ", path " + << statsPath.second << " from local storage"); } if (cursor->more()) { @@ -71,8 +89,8 @@ SemiFuture StatsCacheLoaderImpl::getStats(OperationContext* opCtx } uasserted(ErrorCodes::NamespaceNotFound, - str::stream() << "Stats does not exists for " << statsNss.ns() << ", path " - << statsPath.second); + str::stream() << "Stats does not exists for " << statsNss.toStringForErrorMsg() + << ", path " << statsPath.second); } catch (const DBException& ex) { uassertStatusOK(ex.toStatus()); } diff --git a/src/mongo/db/query/stats/stats_cache_loader_impl.h b/src/mongo/db/query/stats/stats_cache_loader_impl.h index 979a1009acba8..37d3425e6cb61 100644 --- a/src/mongo/db/query/stats/stats_cache_loader_impl.h +++ b/src/mongo/db/query/stats/stats_cache_loader_impl.h @@ -30,9 +30,11 @@ #pragma once #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/stats/collection_statistics.h" #include "mongo/db/query/stats/stats_cache_loader.h" #include "mongo/stdx/thread.h" +#include "mongo/util/future.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/stats_cache_loader_mock.cpp b/src/mongo/db/query/stats/stats_cache_loader_mock.cpp index c190d61c31221..8d9f72cb6c03a 100644 --- a/src/mongo/db/query/stats/stats_cache_loader_mock.cpp +++ b/src/mongo/db/query/stats/stats_cache_loader_mock.cpp @@ -29,9 +29,12 @@ #include "mongo/db/query/stats/stats_cache_loader_mock.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/query/stats/collection_statistics.h" -#include "mongo/stdx/thread.h" +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/util/assert_util.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/stats_cache_loader_mock.h b/src/mongo/db/query/stats/stats_cache_loader_mock.h index 9951bcfd2ca04..f81bd9abfd84b 100644 --- a/src/mongo/db/query/stats/stats_cache_loader_mock.h +++ b/src/mongo/db/query/stats/stats_cache_loader_mock.h @@ -29,10 +29,16 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/stats/collection_statistics.h" #include "mongo/db/query/stats/stats_cache_loader.h" #include "mongo/stdx/thread.h" +#include "mongo/util/future.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/stats_cache_loader_test.cpp b/src/mongo/db/query/stats/stats_cache_loader_test.cpp index c58c057341cdd..296ee461d4318 100644 --- a/src/mongo/db/query/stats/stats_cache_loader_test.cpp +++ b/src/mongo/db/query/stats/stats_cache_loader_test.cpp @@ -27,18 +27,35 @@ * it in the license file. */ -#include "mongo/bson/oid.h" +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/max_diff.h" #include "mongo/db/query/stats/scalar_histogram.h" +#include "mongo/db/query/stats/stats_cache_loader.h" #include "mongo/db/query/stats/stats_cache_loader_impl.h" #include "mongo/db/query/stats/stats_cache_loader_test_fixture.h" -#include "mongo/db/query/stats/stats_gen.h" #include "mongo/db/query/stats/value_utils.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" -#include "mongo/util/fail_point.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/future.h" namespace mongo::stats { namespace { @@ -95,7 +112,8 @@ TEST_F(StatsCacheLoaderTest, VerifyStatsLoadsScalar) { // Initalize stats collection. NamespaceString nss = NamespaceString::createNamespaceString_forTest("test", "stats"); std::string statsColl(StatsCacheLoader::kStatsPrefix + "." + nss.coll()); - NamespaceString statsNss = NamespaceString::createNamespaceString_forTest(nss.db(), statsColl); + NamespaceString statsNss = + NamespaceString::createNamespaceString_forTest(nss.db_forTest(), statsColl); createStatsCollection(statsNss); // Write serialized stats path to collection. @@ -164,7 +182,8 @@ TEST_F(StatsCacheLoaderTest, VerifyStatsLoadsArray) { // Initalize stats collection. NamespaceString nss = NamespaceString::createNamespaceString_forTest("test", "stats"); std::string statsColl(StatsCacheLoader::kStatsPrefix + "." + nss.coll()); - NamespaceString statsNss = NamespaceString::createNamespaceString_forTest(nss.db(), statsColl); + NamespaceString statsNss = + NamespaceString::createNamespaceString_forTest(nss.db_forTest(), statsColl); createStatsCollection(statsNss); // Write serialized stats path to collection. diff --git a/src/mongo/db/query/stats/stats_cache_loader_test_fixture.cpp b/src/mongo/db/query/stats/stats_cache_loader_test_fixture.cpp index 1e353196b83cb..6df86986b7ecb 100644 --- a/src/mongo/db/query/stats/stats_cache_loader_test_fixture.cpp +++ b/src/mongo/db/query/stats/stats_cache_loader_test_fixture.cpp @@ -31,9 +31,14 @@ #include +#include "mongo/db/client.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/unittest/assert.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/stats_cache_loader_test_fixture.h b/src/mongo/db/query/stats/stats_cache_loader_test_fixture.h index 6c7d502fdf1b5..f5aa96bc805bf 100644 --- a/src/mongo/db/query/stats/stats_cache_loader_test_fixture.h +++ b/src/mongo/db/query/stats/stats_cache_loader_test_fixture.h @@ -29,9 +29,14 @@ #pragma once +#include +#include + #include "mongo/db/operation_context.h" #include "mongo/db/query/stats/stats_cache_loader.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/stats_cache_test.cpp b/src/mongo/db/query/stats/stats_cache_test.cpp index 14c859b8881aa..1f817bda46325 100644 --- a/src/mongo/db/query/stats/stats_cache_test.cpp +++ b/src/mongo/db/query/stats/stats_cache_test.cpp @@ -27,18 +27,27 @@ * it in the license file. */ +#include +#include +#include +#include #include +#include +#include -#include "mongo/db/client.h" -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/stats_cache.h" #include "mongo/db/query/stats/stats_cache_loader_mock.h" -#include "mongo/unittest/barrier.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future.h" #include "mongo/util/read_through_cache.h" -#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -51,7 +60,7 @@ using unittest::assertGet; * Fixture for tests, which do not need to exercise the multi-threading capabilities of the cache * and as such do not require control over the creation/destruction of their operation contexts. */ -class StatsCacheTest : public LockerNoopServiceContextTest { +class StatsCacheTest : public ServiceContextTest { protected: // Extends StatsCache and automatically provides it with a thread pool, which will be // shutdown and joined before the StatsCache is destroyed (which is part of the contract of diff --git a/src/mongo/db/query/stats/stats_catalog.cpp b/src/mongo/db/query/stats/stats_catalog.cpp index 6029edc2be5e4..9d91f9f55add9 100644 --- a/src/mongo/db/query/stats/stats_catalog.cpp +++ b/src/mongo/db/query/stats/stats_catalog.cpp @@ -29,12 +29,22 @@ #include "mongo/db/query/stats/stats_catalog.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/query/stats/array_histogram.h" -#include "mongo/db/query/stats/collection_statistics.h" #include "mongo/db/query/stats/stats_cache.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" #include "mongo/util/read_through_cache.h" - -#include "mongo/logv2/log.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -87,7 +97,8 @@ StatusWith> StatsCatalog::getHistogram( try { auto handle = _statsCache.acquire(opCtx, std::make_pair(nss, path)); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "path " << nss << " : " << path << " not found", + str::stream() << "path " << nss.toStringForErrorMsg() << " : " << path + << " not found", handle); return *(handle.get()); diff --git a/src/mongo/db/query/stats/stats_catalog.h b/src/mongo/db/query/stats/stats_catalog.h index 2ecd3c6a2a308..df6b29009c268 100644 --- a/src/mongo/db/query/stats/stats_catalog.h +++ b/src/mongo/db/query/stats/stats_catalog.h @@ -29,11 +29,19 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/collection_statistics.h" #include "mongo/db/query/stats/stats_cache.h" #include "mongo/db/query/stats/stats_cache_loader.h" +#include "mongo/db/service_context.h" #include "mongo/util/concurrency/thread_pool.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/stats_path_test.cpp b/src/mongo/db/query/stats/stats_path_test.cpp index 03ba400cfeba1..5dbd1dde4fad1 100644 --- a/src/mongo/db/query/stats/stats_path_test.cpp +++ b/src/mongo/db/query/stats/stats_path_test.cpp @@ -27,14 +27,17 @@ * it in the license file. */ -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/exec/sbe/values/bson.h" +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/scalar_histogram.h" #include "mongo/db/query/stats/stats_gen.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::stats { namespace { diff --git a/src/mongo/db/query/stats/type_collision_test.cpp b/src/mongo/db/query/stats/type_collision_test.cpp index 6a2a64c51aad4..64e870f3032d5 100644 --- a/src/mongo/db/query/stats/type_collision_test.cpp +++ b/src/mongo/db/query/stats/type_collision_test.cpp @@ -27,11 +27,26 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/docval_to_sbeval.h" -#include "mongo/db/query/sbe_stage_builder_helpers.h" +#include "mongo/db/exec/sbe/values/value.h" +#include "mongo/db/query/stats/array_histogram.h" #include "mongo/db/query/stats/max_diff.h" #include "mongo/db/query/stats/value_utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" namespace mongo::stats { TEST(TypeCollisionTest, ZeroedCollidingTypesHistogram) { diff --git a/src/mongo/db/query/stats/type_count_test.cpp b/src/mongo/db/query/stats/type_count_test.cpp index 68aff8ca1a027..87a34873b5650 100644 --- a/src/mongo/db/query/stats/type_count_test.cpp +++ b/src/mongo/db/query/stats/type_count_test.cpp @@ -27,8 +27,13 @@ * it in the license file. */ +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/exec/sbe/values/value.h" #include "mongo/db/query/stats/array_histogram.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::stats { diff --git a/src/mongo/db/query/stats/value_utils.cpp b/src/mongo/db/query/stats/value_utils.cpp index f0032bfc980d0..a4d89042a35d0 100644 --- a/src/mongo/db/query/stats/value_utils.cpp +++ b/src/mongo/db/query/stats/value_utils.cpp @@ -29,7 +29,18 @@ #include "mongo/db/query/stats/value_utils.h" -#include "mongo/db/query/stats/scalar_histogram.h" +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo::stats { namespace value = sbe::value; diff --git a/src/mongo/db/query/stats/value_utils.h b/src/mongo/db/query/stats/value_utils.h index 11a1f13b2ee51..fdb65573ac6ef 100644 --- a/src/mongo/db/query/stats/value_utils.h +++ b/src/mongo/db/query/stats/value_utils.h @@ -29,7 +29,15 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/sbe/values/value.h" +#include "mongo/util/time_support.h" namespace mongo::stats { /** diff --git a/src/mongo/db/query/str_trim_utils.cpp b/src/mongo/db/query/str_trim_utils.cpp new file mode 100644 index 0000000000000..4ec2354ea8062 --- /dev/null +++ b/src/mongo/db/query/str_trim_utils.cpp @@ -0,0 +1,135 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/query/str_trim_utils.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/errno_util.h" +#include "mongo/util/str.h" + +namespace mongo::str_trim_utils { + +size_t numberOfBytesForCodePoint(char charByte) { + if ((charByte & 0b11111000) == 0b11110000) { + return 4; + } else if ((charByte & 0b11110000) == 0b11100000) { + return 3; + } else if ((charByte & 0b11100000) == 0b11000000) { + return 2; + } else { + return 1; + } +} + +std::vector extractCodePointsFromChars(StringData utf8String) { + std::vector codePoints; + std::size_t i = 0; + while (i < utf8String.size()) { + uassert(5156305, + str::stream() << "Failed to parse \"chars\" argument to " + << "$trim/$ltrim/$rtrim" + << ": Detected invalid UTF-8. Got continuation byte when expecting " + "the start of a new code point.", + !str::isUTF8ContinuationByte(utf8String[i])); + codePoints.push_back(utf8String.substr(i, numberOfBytesForCodePoint(utf8String[i]))); + i += numberOfBytesForCodePoint(utf8String[i]); + } + uassert(5156304, + str::stream() + << "Failed to parse \"chars\" argument to " + << "$trim/$ltrim/$rtrim" + << ": Detected invalid UTF-8. Missing expected continuation byte at end of string.", + i <= utf8String.size()); + return codePoints; +} + +bool codePointMatchesAtIndex(const StringData& input, + std::size_t indexOfInput, + const StringData& testCP) { + for (size_t i = 0; i < testCP.size(); ++i) { + if (indexOfInput + i >= input.size() || input[indexOfInput + i] != testCP[i]) { + return false; + } + } + return true; +}; + +StringData trimFromLeft(StringData input, const std::vector& trimCPs) { + std::size_t bytesTrimmedFromLeft = 0u; + while (bytesTrimmedFromLeft < input.size()) { + // Look for any matching code point to trim. + auto matchingCP = std::find_if(trimCPs.begin(), trimCPs.end(), [&](auto& testCP) { + return codePointMatchesAtIndex(input, bytesTrimmedFromLeft, testCP); + }); + if (matchingCP == trimCPs.end()) { + // Nothing to trim, stop here. + break; + } + bytesTrimmedFromLeft += matchingCP->size(); + } + return input.substr(bytesTrimmedFromLeft); +} + +StringData trimFromRight(StringData input, const std::vector& trimCPs) { + std::size_t bytesTrimmedFromRight = 0u; + while (bytesTrimmedFromRight < input.size()) { + std::size_t indexToTrimFrom = input.size() - bytesTrimmedFromRight; + auto matchingCP = std::find_if(trimCPs.begin(), trimCPs.end(), [&](auto& testCP) { + if (indexToTrimFrom < testCP.size()) { + // We've gone off the left of the string. + return false; + } + return codePointMatchesAtIndex(input, indexToTrimFrom - testCP.size(), testCP); + }); + if (matchingCP == trimCPs.end()) { + // Nothing to trim, stop here. + break; + } + bytesTrimmedFromRight += matchingCP->size(); + } + return input.substr(0, input.size() - bytesTrimmedFromRight); +} + +StringData doTrim(StringData input, + const std::vector& trimCPs, + bool trimLeft, + bool trimRight) { + if (trimLeft) { + input = trimFromLeft(input, trimCPs); + } + if (trimRight) { + input = trimFromRight(input, trimCPs); + } + return input; +} + +} // namespace mongo::str_trim_utils diff --git a/src/mongo/db/query/str_trim_utils.h b/src/mongo/db/query/str_trim_utils.h new file mode 100644 index 0000000000000..59209bc943df5 --- /dev/null +++ b/src/mongo/db/query/str_trim_utils.h @@ -0,0 +1,92 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" + +namespace mongo::str_trim_utils { + +const std::vector kDefaultTrimWhitespaceChars = { + "\0"_sd, // Null character. Avoid using "\u0000" syntax to work around a gcc bug: + // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=53690. + "\u0020"_sd, // Space + "\u0009"_sd, // Horizontal tab + "\u000A"_sd, // Line feed/new line + "\u000B"_sd, // Vertical tab + "\u000C"_sd, // Form feed + "\u000D"_sd, // Horizontal tab + "\u00A0"_sd, // Non-breaking space + "\u1680"_sd, // Ogham space mark + "\u2000"_sd, // En quad + "\u2001"_sd, // Em quad + "\u2002"_sd, // En space + "\u2003"_sd, // Em space + "\u2004"_sd, // Three-per-em space + "\u2005"_sd, // Four-per-em space + "\u2006"_sd, // Six-per-em space + "\u2007"_sd, // Figure space + "\u2008"_sd, // Punctuation space + "\u2009"_sd, // Thin space + "\u200A"_sd // Hair space +}; + +/** + * Assuming 'charByte' is the beginning of a UTF-8 code point, returns the number of bytes that + * should be used to represent the code point. Said another way, computes how many continuation + * bytes are expected to be present after 'charByte' in a UTF-8 encoded string. + */ +size_t numberOfBytesForCodePoint(char charByte); + +/** + * Returns a vector with one entry per code point to trim, or throws an exception if 'utf8String' + * contains invalid UTF-8. + */ +std::vector extractCodePointsFromChars(StringData utf8String); + +bool codePointMatchesAtIndex(const StringData& input, + std::size_t indexOfInput, + const StringData& testCP); + +StringData trimFromLeft(StringData input, const std::vector& trimCPs); + +StringData trimFromRight(StringData input, const std::vector& trimCPs); + +StringData doTrim(StringData input, + const std::vector& trimCPs, + bool trimLeft, + bool trimRight); + +} // namespace mongo::str_trim_utils diff --git a/src/mongo/db/query/tailable_mode.cpp b/src/mongo/db/query/tailable_mode.cpp index c03e6c6e02664..74417e357d888 100644 --- a/src/mongo/db/query/tailable_mode.cpp +++ b/src/mongo/db/query/tailable_mode.cpp @@ -27,8 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" #include "mongo/db/query/tailable_mode.h" namespace mongo { diff --git a/src/mongo/db/query/telemetry.cpp b/src/mongo/db/query/telemetry.cpp deleted file mode 100644 index a338942d4d801..0000000000000 --- a/src/mongo/db/query/telemetry.cpp +++ /dev/null @@ -1,811 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/query/telemetry.h" - -#include "mongo/crypto/hash_block.h" -#include "mongo/crypto/sha256_block.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/concurrency/locker.h" -#include "mongo/db/curop.h" -#include "mongo/db/exec/projection_executor_builder.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/pipeline/aggregate_command_gen.h" -#include "mongo/db/query/find_command_gen.h" -#include "mongo/db/query/plan_explainer.h" -#include "mongo/db/query/projection_ast_util.h" -#include "mongo/db/query/projection_parser.h" -#include "mongo/db/query/query_feature_flags_gen.h" -#include "mongo/db/query/query_planner_params.h" -#include "mongo/db/query/query_request_helper.h" -#include "mongo/db/query/rate_limiting.h" -#include "mongo/db/query/sort_pattern.h" -#include "mongo/db/query/telemetry_util.h" -#include "mongo/logv2/log.h" -#include "mongo/rpc/metadata/client_metadata.h" -#include "mongo/util/assert_util.h" -#include "mongo/util/debug_util.h" -#include "mongo/util/processinfo.h" -#include "mongo/util/system_clock_source.h" -#include - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery - -namespace mongo { - -namespace telemetry { - -/** - * Redacts all BSONObj field names as if they were paths, unless the field name is a special hint - * operator. - */ -namespace { -static std::string hintSpecialField = "$hint"; -void addLiteralFieldsWithRedaction(BSONObjBuilder* bob, - const FindCommandRequest& findCommand, - StringData newLiteral) { - - if (findCommand.getLimit()) { - bob->append(FindCommandRequest::kLimitFieldName, newLiteral); - } - if (findCommand.getSkip()) { - bob->append(FindCommandRequest::kSkipFieldName, newLiteral); - } - if (findCommand.getBatchSize()) { - bob->append(FindCommandRequest::kBatchSizeFieldName, newLiteral); - } - if (findCommand.getMaxTimeMS()) { - bob->append(FindCommandRequest::kMaxTimeMSFieldName, newLiteral); - } - if (findCommand.getNoCursorTimeout()) { - bob->append(FindCommandRequest::kNoCursorTimeoutFieldName, newLiteral); - } -} - -void addLiteralFieldsWithoutRedaction(BSONObjBuilder* bob, const FindCommandRequest& findCommand) { - if (auto param = findCommand.getLimit()) { - bob->append(FindCommandRequest::kLimitFieldName, param.get()); - } - if (auto param = findCommand.getSkip()) { - bob->append(FindCommandRequest::kSkipFieldName, param.get()); - } - if (auto param = findCommand.getBatchSize()) { - bob->append(FindCommandRequest::kBatchSizeFieldName, param.get()); - } - if (auto param = findCommand.getMaxTimeMS()) { - bob->append(FindCommandRequest::kMaxTimeMSFieldName, param.get()); - } - if (findCommand.getNoCursorTimeout().has_value()) { - bob->append(FindCommandRequest::kNoCursorTimeoutFieldName, - findCommand.getNoCursorTimeout().value_or(false)); - } -} - - -static std::vector< - std::pair>> - boolArgMap = { - {FindCommandRequest::kSingleBatchFieldName, &FindCommandRequest::getSingleBatch}, - {FindCommandRequest::kAllowDiskUseFieldName, &FindCommandRequest::getAllowDiskUse}, - {FindCommandRequest::kReturnKeyFieldName, &FindCommandRequest::getReturnKey}, - {FindCommandRequest::kShowRecordIdFieldName, &FindCommandRequest::getShowRecordId}, - {FindCommandRequest::kTailableFieldName, &FindCommandRequest::getTailable}, - {FindCommandRequest::kAwaitDataFieldName, &FindCommandRequest::getAwaitData}, - {FindCommandRequest::kAllowPartialResultsFieldName, - &FindCommandRequest::getAllowPartialResults}, - {FindCommandRequest::kMirroredFieldName, &FindCommandRequest::getMirrored}, -}; -std::vector>> - objArgMap = { - {FindCommandRequest::kCollationFieldName, &FindCommandRequest::getCollation}, - -}; - -void addRemainingFindCommandFields(BSONObjBuilder* bob, const FindCommandRequest& findCommand) { - for (auto [fieldName, getterFunction] : boolArgMap) { - auto optBool = getterFunction(findCommand); - if (optBool.has_value()) { - bob->append(fieldName, optBool.value_or(false)); - } - } - if (auto optObj = findCommand.getReadConcern()) { - bob->append(FindCommandRequest::kReadConcernFieldName, optObj.get()); - } - auto collation = findCommand.getCollation(); - if (!collation.isEmpty()) { - bob->append(FindCommandRequest::kCollationFieldName, collation); - } -} -boost::optional getApplicationName(const OperationContext* opCtx) { - if (auto metadata = ClientMetadata::get(opCtx->getClient())) { - return metadata->getApplicationName().toString(); - } - return boost::none; -} -} // namespace -BSONObj redactHintComponent(BSONObj obj, const SerializationOptions& opts, bool redactValues) { - BSONObjBuilder bob; - for (BSONElement elem : obj) { - if (hintSpecialField.compare(elem.fieldName()) == 0) { - tassert(7421703, - "Hinted field must be a string with $hint operator", - elem.type() == BSONType::String); - bob.append(hintSpecialField, opts.serializeFieldPathFromString(elem.String())); - continue; - } - - if (opts.replacementForLiteralArgs && redactValues) { - bob.append(opts.serializeFieldPathFromString(elem.fieldName()), - opts.replacementForLiteralArgs.get()); - } else { - bob.appendAs(elem, opts.serializeFieldPathFromString(elem.fieldName())); - } - } - return bob.obj(); -} - -/** - * In a let specification all field names are variable names, and all values are either expressions - * or constants. - */ -BSONObj redactLetSpec(BSONObj letSpec, - const SerializationOptions& opts, - boost::intrusive_ptr expCtx) { - - BSONObjBuilder bob; - for (BSONElement elem : letSpec) { - auto redactedValue = - Expression::parseOperand(expCtx.get(), elem, expCtx->variablesParseState) - ->serialize(opts); - // Note that this will throw on deeply nested let variables. - redactedValue.addToBsonObj(&bob, opts.serializeFieldPathFromString(elem.fieldName())); - } - return bob.obj(); -} - -StatusWith makeTelemetryKey(const FindCommandRequest& findCommand, - const SerializationOptions& opts, - const boost::intrusive_ptr& expCtx, - boost::optional existingMetrics) { - // TODO: SERVER-75156 Factor query shape out of telemetry. That ticket will involve splitting - // this function up and moving most of it to another, non-telemetry related header. - - if (!opts.redactIdentifiers && !opts.replacementForLiteralArgs) { - // Short circuit if no redaction needs to be done. - BSONObjBuilder bob; - findCommand.serialize({}, &bob); - return bob.obj(); - } - - // This function enumerates all the fields in a find command and either copies or attempts to - // redact them. - BSONObjBuilder bob; - - // Serialize the namespace as part of the query shape. - { - BSONObjBuilder cmdNs = bob.subobjStart("cmdNs"); - auto ns = findCommand.getNamespaceOrUUID(); - if (ns.nss()) { - auto nss = ns.nss().value(); - if (nss.tenantId()) { - cmdNs.append("tenantId", - opts.serializeIdentifier(nss.tenantId().value().toString())); - } - cmdNs.append("db", opts.serializeIdentifier(nss.db())); - cmdNs.append("coll", opts.serializeIdentifier(nss.coll())); - } else { - cmdNs.append("uuid", opts.serializeIdentifier(ns.uuid()->toString())); - } - cmdNs.done(); - } - - // Redact the namespace of the command. - { - auto nssOrUUID = findCommand.getNamespaceOrUUID(); - std::string toSerialize; - if (nssOrUUID.uuid()) { - toSerialize = opts.serializeIdentifier(nssOrUUID.toString()); - } else { - // Database is set at the command level, only serialize the collection here. - toSerialize = opts.serializeIdentifier(nssOrUUID.nss()->coll()); - } - bob.append(FindCommandRequest::kCommandName, toSerialize); - } - - std::unique_ptr filterExpr; - // Filter. - { - auto filter = findCommand.getFilter(); - auto filterParsed = - MatchExpressionParser::parse(findCommand.getFilter(), - expCtx, - ExtensionsCallbackNoop(), - MatchExpressionParser::kAllowAllSpecialFeatures); - if (!filterParsed.isOK()) { - return filterParsed.getStatus(); - } - - filterExpr = std::move(filterParsed.getValue()); - bob.append(FindCommandRequest::kFilterFieldName, filterExpr->serialize(opts)); - } - - // Let Spec. - if (auto letSpec = findCommand.getLet()) { - auto redactedObj = redactLetSpec(letSpec.get(), opts, expCtx); - auto ownedObj = redactedObj.getOwned(); - bob.append(FindCommandRequest::kLetFieldName, std::move(ownedObj)); - } - - if (!findCommand.getProjection().isEmpty()) { - // Parse to Projection - auto projection = - projection_ast::parseAndAnalyze(expCtx, - findCommand.getProjection(), - filterExpr.get(), - findCommand.getFilter(), - ProjectionPolicies::findProjectionPolicies()); - - bob.append(FindCommandRequest::kProjectionFieldName, - projection_ast::serialize(*projection.root(), opts)); - } - - // Assume the hint is correct and contains field names. It is possible that this hint - // doesn't actually represent an index, but we can't detect that here. - // Hint, max, and min won't serialize if the object is empty. - if (!findCommand.getHint().isEmpty()) { - bob.append(FindCommandRequest::kHintFieldName, - redactHintComponent(findCommand.getHint(), opts, false)); - // Max/Min aren't valid without hint. - if (!findCommand.getMax().isEmpty()) { - bob.append(FindCommandRequest::kMaxFieldName, - redactHintComponent(findCommand.getMax(), opts, true)); - } - if (!findCommand.getMin().isEmpty()) { - bob.append(FindCommandRequest::kMinFieldName, - redactHintComponent(findCommand.getMin(), opts, true)); - } - } - - // Sort. - { - auto sortSpec = findCommand.getSort(); - if (!sortSpec.isEmpty()) { - auto sort = SortPattern(sortSpec, expCtx); - bob.append( - FindCommandRequest::kSortFieldName, - sort.serialize(SortPattern::SortKeySerialization::kForPipelineSerialization, opts) - .toBson()); - } - } - - // Fields for literal redaction. Adds limit, skip, batchSize, maxTimeMS, and noCursorTimeOut - if (opts.replacementForLiteralArgs) { - addLiteralFieldsWithRedaction(&bob, findCommand, opts.replacementForLiteralArgs.get()); - } else { - addLiteralFieldsWithoutRedaction(&bob, findCommand); - } - - // Add the fields that require no redaction. - addRemainingFindCommandFields(&bob, findCommand); - - - auto appName = [&]() -> boost::optional { - if (existingMetrics.has_value()) { - if (existingMetrics->applicationName.has_value()) { - return existingMetrics->applicationName; - } - } else { - if (auto appName = getApplicationName(expCtx->opCtx)) { - return appName.value(); - } - } - return boost::none; - }(); - if (appName.has_value()) { - bob.append("applicationName", opts.serializeIdentifier(appName.value())); - } - - return bob.obj(); -} - -CounterMetric telemetryStoreSizeEstimateBytesMetric("telemetry.telemetryStoreSizeEstimateBytes"); - -namespace { - -CounterMetric telemetryEvictedMetric("telemetry.numEvicted"); -CounterMetric telemetryRateLimitedRequestsMetric("telemetry.numRateLimitedRequests"); - -/** - * Cap the telemetry store size. - */ -size_t capTelemetryStoreSize(size_t requestedSize) { - size_t cappedStoreSize = memory_util::capMemorySize( - requestedSize /*requestedSizeBytes*/, 1 /*maximumSizeGB*/, 25 /*percentTotalSystemMemory*/); - // If capped size is less than requested size, the telemetry store has been capped at its - // upper limit. - if (cappedStoreSize < requestedSize) { - LOGV2_DEBUG(7106502, - 1, - "The telemetry store size has been capped", - "cappedSize"_attr = cappedStoreSize); - } - return cappedStoreSize; -} - -/** - * Get the telemetry store size based on the query job's value. - */ -size_t getTelemetryStoreSize() { - auto status = memory_util::MemorySize::parse(queryTelemetryStoreSize.get()); - uassertStatusOK(status); - size_t requestedSize = memory_util::convertToSizeInBytes(status.getValue()); - return capTelemetryStoreSize(requestedSize); -} - -/** - * A manager for the telemetry store allows a "pointer swap" on the telemetry store itself. The - * usage patterns are as follows: - * - * - Updating the telemetry store uses the `getTelemetryStore()` method. The telemetry store - * instance is obtained, entries are looked up and mutated, or created anew. - * - The telemetry store is "reset". This involves atomically allocating a new instance, once - * there are no more updaters (readers of the store "pointer"), and returning the existing - * instance. - */ -class TelemetryStoreManager { -public: - template - TelemetryStoreManager(size_t cacheSize, size_t numPartitions) - : _telemetryStore(std::make_unique(cacheSize, numPartitions)), - _maxSize(cacheSize) {} - - /** - * Acquire the instance of the telemetry store. - */ - TelemetryStore& getTelemetryStore() { - return *_telemetryStore; - } - - size_t getMaxSize() { - return _maxSize; - } - - /** - * Resize the telemetry store and return the number of evicted - * entries. - */ - size_t resetSize(size_t cacheSize) { - _maxSize = cacheSize; - return _telemetryStore->reset(cacheSize); - } - -private: - std::unique_ptr _telemetryStore; - - /** - * Max size of the telemetry store. Tracked here to avoid having to recompute after it's divided - * up into partitions. - */ - size_t _maxSize; -}; - -const auto telemetryStoreDecoration = - ServiceContext::declareDecoration>(); - -const auto telemetryRateLimiter = - ServiceContext::declareDecoration>(); - -class TelemetryOnParamChangeUpdaterImpl final : public telemetry_util::OnParamChangeUpdater { -public: - void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) final { - auto requestedSize = memory_util::convertToSizeInBytes(memSize); - auto cappedSize = capTelemetryStoreSize(requestedSize); - auto& telemetryStoreManager = telemetryStoreDecoration(serviceCtx); - size_t numEvicted = telemetryStoreManager->resetSize(cappedSize); - telemetryEvictedMetric.increment(numEvicted); - } - - void updateSamplingRate(ServiceContext* serviceCtx, int samplingRate) { - telemetryRateLimiter(serviceCtx).get()->setSamplingRate(samplingRate); - } -}; - -ServiceContext::ConstructorActionRegisterer telemetryStoreManagerRegisterer{ - "TelemetryStoreManagerRegisterer", [](ServiceContext* serviceCtx) { - // It is possible that this is called before FCV is properly set up. Setting up the store if - // the flag is enabled but FCV is incorrect is safe, and guards against the FCV being - // changed to a supported version later. - // TODO SERVER-73907. Move this to run after FCV is initialized. It could be we'd have to - // re-run this function if FCV changes later during the life of the process. - if (!feature_flags::gFeatureFlagTelemetry.isEnabledAndIgnoreFCVUnsafeAtStartup()) { - // featureFlags are not allowed to be changed at runtime. Therefore it's not an issue - // to not create a telemetry store in ConstructorActionRegisterer at start up with the - // flag off - because the flag can not be turned on at any point afterwards. - telemetry_util::telemetryStoreOnParamChangeUpdater(serviceCtx) = - std::make_unique(); - return; - } - - telemetry_util::telemetryStoreOnParamChangeUpdater(serviceCtx) = - std::make_unique(); - size_t size = getTelemetryStoreSize(); - auto&& globalTelemetryStoreManager = telemetryStoreDecoration(serviceCtx); - // The plan cache and telemetry store should use the same number of partitions. - // That is, the number of cpu cores. - size_t numPartitions = ProcessInfo::getNumCores(); - size_t partitionBytes = size / numPartitions; - size_t metricsSize = sizeof(TelemetryMetrics); - if (partitionBytes < metricsSize * 10) { - numPartitions = size / metricsSize; - if (numPartitions < 1) { - numPartitions = 1; - } - } - globalTelemetryStoreManager = std::make_unique(size, numPartitions); - auto configuredSamplingRate = queryTelemetrySamplingRate.load(); - telemetryRateLimiter(serviceCtx) = std::make_unique( - configuredSamplingRate < 0 ? INT_MAX : configuredSamplingRate); - }}; - -/** - * Top-level checks for whether telemetry collection is enabled. If this returns false, we must go - * no further. - */ -bool isTelemetryEnabled(const ServiceContext* serviceCtx) { - // During initialization FCV may not yet be setup but queries could be run. We can't - // check whether telemetry should be enabled without FCV, so default to not recording - // those queries. - return serverGlobalParams.featureCompatibility.isVersionInitialized() && - feature_flags::gFeatureFlagTelemetry.isEnabled(serverGlobalParams.featureCompatibility) && - telemetryStoreDecoration(serviceCtx)->getMaxSize() > 0; -} - -/** - * Internal check for whether we should collect metrics. This checks the rate limiting - * configuration for a global on/off decision and, if enabled, delegates to the rate limiter. - */ -bool shouldCollect(const ServiceContext* serviceCtx) { - // Quick escape if telemetry is turned off. - if (!isTelemetryEnabled(serviceCtx)) { - return false; - } - // Cannot collect telemetry if sampling rate is not greater than 0. Note that we do not - // increment telemetryRateLimitedRequestsMetric here since telemetry is entirely disabled. - if (telemetryRateLimiter(serviceCtx)->getSamplingRate() <= 0) { - return false; - } - // Check if rate limiting allows us to collect telemetry for this request. - if (telemetryRateLimiter(serviceCtx)->getSamplingRate() < INT_MAX && - !telemetryRateLimiter(serviceCtx)->handleRequestSlidingWindow()) { - telemetryRateLimitedRequestsMetric.increment(); - return false; - } - return true; -} - -/** - * Add a field to the find op's telemetry key. The `value` will be redacted. - */ -void addToFindKey(BSONObjBuilder& builder, const StringData& fieldName, const BSONObj& value) { - serializeBSONWhenNotEmpty(value.redact(false), fieldName, &builder); -} - -/** - * Recognize FLE payloads in a query and throw an exception if found. - */ -void throwIfEncounteringFLEPayload(const BSONElement& e) { - constexpr auto safeContentLabel = "__safeContent__"_sd; - constexpr auto fieldpath = "$__safeContent__"_sd; - if (e.type() == BSONType::Object) { - auto fieldname = e.fieldNameStringData(); - uassert(ErrorCodes::EncounteredFLEPayloadWhileRedacting, - "Encountered __safeContent__, or an $_internalFle operator, which indicate a " - "rewritten FLE2 query.", - fieldname != safeContentLabel && !fieldname.startsWith("$_internalFle"_sd)); - } else if (e.type() == BSONType::String) { - auto val = e.valueStringData(); - uassert(ErrorCodes::EncounteredFLEPayloadWhileRedacting, - "Encountered $__safeContent__ fieldpath, which indicates a rewritten FLE2 query.", - val != fieldpath); - } else if (e.type() == BSONType::BinData && e.isBinData(BinDataType::Encrypt)) { - int len; - auto data = e.binData(len); - uassert(ErrorCodes::EncounteredFLEPayloadWhileRedacting, - "FLE1 Payload encountered in expression.", - len > 1 && data[1] != char(EncryptedBinDataType::kDeterministic)); - } -} - -/** - * Upon reading telemetry data, we redact some keys. This is the list. See - * TelemetryMetrics::redactKey(). - */ -const stdx::unordered_set kKeysToRedact = {"pipeline", "find"}; - -std::string sha256StringDataHasher(const StringData& fieldName) { - auto hash = SHA256Block::computeHash({ConstDataRange(fieldName.rawData(), fieldName.size())}); - return hash.toString().substr(0, 12); -} - -std::string sha256FieldNameHasher(const BSONElement& e) { - auto&& fieldName = e.fieldNameStringData(); - return sha256StringDataHasher(fieldName); -} - -std::string constantFieldNameHasher(const BSONElement& e) { - return {"###"}; -} - -/** - * Admittedly an abuse of the BSON redaction interface, we recognize FLE payloads here and avoid - * collecting telemetry for the query. - */ -std::string fleSafeFieldNameRedactor(const BSONElement& e) { - throwIfEncounteringFLEPayload(e); - // Ideally we would change interface to avoid copying here. - return e.fieldNameStringData().toString(); -} - -/** - * Append the element to the builder and redact any literals within the element. The element may be - * of any type. - */ -void appendWithRedactedLiterals(BSONObjBuilder& builder, const BSONElement& el) { - if (el.type() == Object) { - builder.append(el.fieldNameStringData(), el.Obj().redact(false, fleSafeFieldNameRedactor)); - } else if (el.type() == Array) { - BSONObjBuilder arrayBuilder = builder.subarrayStart(fleSafeFieldNameRedactor(el)); - for (auto&& arrayElem : el.Obj()) { - appendWithRedactedLiterals(arrayBuilder, arrayElem); - } - arrayBuilder.done(); - } else { - auto fieldName = fleSafeFieldNameRedactor(el); - builder.append(fieldName, "###"_sd); - } -} - -static const StringData replacementForLiteralArgs = "?"_sd; - -} // namespace - -StatusWith TelemetryMetrics::redactKey(const BSONObj& key, - bool redactIdentifiers, - OperationContext* opCtx) const { - // The redacted key for each entry is cached on first computation. However, if the redaction - // straegy has flipped (from no redaction to SHA256, vice versa), we just return the key passed - // to the function, so entries returned to the user match the redaction strategy requested in - // the most recent telemetry command. - if (!redactIdentifiers) { - return key; - } - if (_redactedKey) { - return *_redactedKey; - } - - if (cmdObj.hasField(FindCommandRequest::kCommandName)) { - tassert(7198600, "Find command must have a namespace string.", this->nss.nss().has_value()); - auto findCommand = - query_request_helper::makeFromFindCommand(cmdObj, this->nss.nss().value(), false); - - SerializationOptions options(sha256StringDataHasher, replacementForLiteralArgs); - auto nss = findCommand->getNamespaceOrUUID().nss(); - uassert(7349400, "Namespace must be defined", nss.has_value()); - auto expCtx = make_intrusive(opCtx, nullptr, nss.value()); - expCtx->variables.setDefaultRuntimeConstants(opCtx); - expCtx->maxFeatureCompatibilityVersion = boost::none; // Ensure all features are allowed. - expCtx->stopExpressionCounters(); - auto swRedactedKey = makeTelemetryKey(*findCommand, options, expCtx, *this); - if (!swRedactedKey.isOK()) { - return swRedactedKey.getStatus(); - } - _redactedKey = std::move(swRedactedKey.getValue()); - return *_redactedKey; - } - - // The telemetry key is of the following form: - // { "": {...}, "namespace": "...", "applicationName": "...", ... } - // - // The part of the key we need to redact is the object in the element. In the case of - // an aggregate() command, it will look something like: - // > "pipeline" : [ { "$telemetry" : {} }, - // { "$addFields" : { "x" : { "$someExpr" {} } } } ], - // We should preserve the top-level stage names in the pipeline but redact all field names of - // children. - // - // The find-specific key will look like so: - // > "find" : { "find" : "###", "filter" : { "_id" : { "$ne" : "###" } } }, - // Again, we should preserve the top-level keys and redact all field names of children. - BSONObjBuilder redacted; - for (BSONElement e : key) { - if ((e.type() == Object || e.type() == Array) && - kKeysToRedact.count(e.fieldNameStringData().toString()) == 1) { - auto redactor = [&](BSONObjBuilder subObj, const BSONObj& obj) { - for (BSONElement e2 : obj) { - if (e2.type() == Object) { - // Sha256 redaction strategy. - subObj.append(e2.fieldNameStringData(), - e2.Obj().redact(false, sha256FieldNameHasher)); - } else { - subObj.append(e2); - } - } - subObj.done(); - }; - - // Now we're inside the :{} entry and want to preserve the top-level field - // names. If it's a [pipeline] array, we redact each element in isolation. - if (e.type() == Object) { - redactor(redacted.subobjStart(e.fieldNameStringData()), e.Obj()); - } else { - BSONObjBuilder subArr = redacted.subarrayStart(e.fieldNameStringData()); - for (BSONElement stage : e.Obj()) { - redactor(subArr.subobjStart(""), stage.Obj()); - } - } - } else { - redacted.append(e); - } - } - _redactedKey = redacted.obj(); - return *_redactedKey; -} - -// The originating command/query does not persist through the end of query execution. In order to -// pair the telemetry metrics that are collected at the end of execution with the original query, it -// is necessary to register the original query during planning and persist it after -// execution. - -// During planning, registerAggRequest or registerFindRequest are called to serialize the query -// shape and context (together, the telemetry context) and save it to OpDebug. Moreover, as query -// execution may span more than one request/operation and OpDebug does not persist through cursor -// iteration, it is necessary to communicate the telemetry context across operations. In this way, -// the telemetry context is registered to the cursor, so upon getMore() calls, the cursor manager -// passes the telemetry key from the pinned cursor to the new OpDebug. - -// Once query execution is complete, the telemetry context is grabbed from OpDebug, a telemetry key -// is generated from this and metrics are paired to this key in the telemetry store. -void registerAggRequest(const AggregateCommandRequest& request, OperationContext* opCtx) { - if (!isTelemetryEnabled(opCtx->getServiceContext())) { - return; - } - - // Queries against metadata collections should never appear in telemetry data. - if (request.getNamespace().isFLE2StateCollection()) { - return; - } - - if (!shouldCollect(opCtx->getServiceContext())) { - return; - } - - BSONObjBuilder telemetryKey; - BSONObjBuilder pipelineBuilder = telemetryKey.subarrayStart("pipeline"_sd); - try { - for (auto&& stage : request.getPipeline()) { - BSONObjBuilder stageBuilder = pipelineBuilder.subobjStart("stage"_sd); - appendWithRedactedLiterals(stageBuilder, stage.firstElement()); - stageBuilder.done(); - } - pipelineBuilder.done(); - telemetryKey.append("namespace", request.getNamespace().toString()); - if (request.getReadConcern()) { - telemetryKey.append("readConcern", *request.getReadConcern()); - } - if (auto metadata = ClientMetadata::get(opCtx->getClient())) { - telemetryKey.append("applicationName", metadata->getApplicationName()); - } - } catch (ExceptionFor&) { - return; - } - - CurOp::get(opCtx)->debug().telemetryStoreKey = telemetryKey.obj(); -} - -void registerFindRequest(const FindCommandRequest& request, - const NamespaceString& collection, - OperationContext* opCtx, - const boost::intrusive_ptr& expCtx) { - if (!isTelemetryEnabled(opCtx->getServiceContext())) { - return; - } - - // Queries against metadata collections should never appear in telemetry data. - if (collection.isFLE2StateCollection()) { - return; - } - - if (!shouldCollect(opCtx->getServiceContext())) { - return; - } - - SerializationOptions options; - options.replacementForLiteralArgs = replacementForLiteralArgs; - auto swTelemetryKey = makeTelemetryKey(request, options, expCtx); - tassert(7349402, - str::stream() << "Error encountered when extracting query shape from command for " - "telemetry collection: " - << swTelemetryKey.getStatus().toString(), - swTelemetryKey.isOK()); - - CurOp::get(opCtx)->debug().telemetryStoreKey = std::move(swTelemetryKey.getValue()); -} - -TelemetryStore& getTelemetryStore(OperationContext* opCtx) { - uassert(6579000, - "Telemetry is not enabled without the feature flag on and a cache size greater than 0 " - "bytes", - isTelemetryEnabled(opCtx->getServiceContext())); - return telemetryStoreDecoration(opCtx->getServiceContext())->getTelemetryStore(); -} - -void writeTelemetry(OperationContext* opCtx, - boost::optional telemetryKey, - const BSONObj& cmdObj, - const uint64_t queryExecMicros, - const uint64_t docsReturned) { - if (!telemetryKey) { - return; - } - auto&& telemetryStore = getTelemetryStore(opCtx); - auto&& [statusWithMetrics, partitionLock] = telemetryStore.getWithPartitionLock(*telemetryKey); - std::shared_ptr metrics; - if (statusWithMetrics.isOK()) { - metrics = *statusWithMetrics.getValue(); - } else { - size_t numEvicted = - telemetryStore.put(*telemetryKey, - std::make_shared( - cmdObj, getApplicationName(opCtx), CurOp::get(opCtx)->getNSS()), - partitionLock); - telemetryEvictedMetric.increment(numEvicted); - auto newMetrics = partitionLock->get(*telemetryKey); - // This can happen if the budget is immediately exceeded. Specifically if the there is - // not enough room for a single new entry if the number of partitions is too high - // relative to the size. - tassert(7064700, "Should find telemetry store entry", newMetrics.isOK()); - metrics = newMetrics.getValue()->second; - } - - metrics->lastExecutionMicros = queryExecMicros; - metrics->execCount++; - metrics->queryExecMicros.aggregate(queryExecMicros); - metrics->docsReturned.aggregate(docsReturned); -} - -void collectMetricsOnOpDebug(CurOp* curOp, long long nreturned) { - auto&& opDebug = curOp->debug(); - opDebug.additiveMetrics.nreturned = nreturned; - // executionTime is set with the final executionTime in CurOp::completeAndLogOperation, but for - // telemetry collection we want it set before incrementing cursor metrics using AdditiveMetrics. - // The value set here will be overwritten later in CurOp::completeAndLogOperation. - opDebug.additiveMetrics.executionTime = curOp->elapsedTimeExcludingPauses(); -} -} // namespace telemetry -} // namespace mongo diff --git a/src/mongo/db/query/telemetry.h b/src/mongo/db/query/telemetry.h deleted file mode 100644 index d6a86e77e0f89..0000000000000 --- a/src/mongo/db/query/telemetry.h +++ /dev/null @@ -1,246 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/base/status.h" -#include "mongo/bson/bsonobj.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/curop.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/query/partitioned_cache.h" -#include "mongo/db/query/plan_explainer.h" -#include "mongo/db/query/util/memory_util.h" -#include "mongo/db/service_context.h" -#include -#include - -namespace mongo { - -class OpDebug; -class AggregateCommandRequest; -class FindCommandRequest; - -namespace { -/** - * Type we use to render values to BSON. - */ -using BSONNumeric = long long; -} // namespace - -namespace telemetry { - -/** - * An aggregated metric stores a compressed view of data. It balances the loss of information - * with the reduction in required storage. - */ -struct AggregatedMetric { - - /** - * Aggregate an observed value into the metric. - */ - void aggregate(uint64_t val) { - sum += val; - max = std::max(val, max); - min = std::min(val, min); - sumOfSquares += val * val; - } - - void appendTo(BSONObjBuilder& builder, const StringData& fieldName) const { - BSONObjBuilder metricsBuilder = builder.subobjStart(fieldName); - metricsBuilder.append("sum", (BSONNumeric)sum); - metricsBuilder.append("max", (BSONNumeric)max); - metricsBuilder.append("min", (BSONNumeric)min); - metricsBuilder.append("sumOfSquares", (BSONNumeric)sumOfSquares); - metricsBuilder.done(); - } - - uint64_t sum = 0; - // Default to the _signed_ maximum (which fits in unsigned range) because we cast to - // BSONNumeric when serializing. - uint64_t min = (uint64_t)std::numeric_limits::max; - uint64_t max = 0; - - /** - * The sum of squares along with (an externally stored) count will allow us to compute the - * variance/stddev. - */ - uint64_t sumOfSquares = 0; -}; - -extern CounterMetric telemetryStoreSizeEstimateBytesMetric; -// Used to aggregate the metrics for one telemetry key over all its executions. -class TelemetryMetrics { -public: - TelemetryMetrics(const BSONObj& cmdObj, - boost::optional applicationName, - NamespaceStringOrUUID nss) - : firstSeenTimestamp(Date_t::now().toMillisSinceEpoch() / 1000, 0), - cmdObj(cmdObj.copy()), - applicationName(applicationName), - nss(nss) { - telemetryStoreSizeEstimateBytesMetric.increment(sizeof(TelemetryMetrics) + sizeof(BSONObj) + - cmdObj.objsize()); - } - - ~TelemetryMetrics() { - telemetryStoreSizeEstimateBytesMetric.decrement(sizeof(TelemetryMetrics) + sizeof(BSONObj) + - cmdObj.objsize()); - } - - BSONObj toBSON() const { - BSONObjBuilder builder{sizeof(TelemetryMetrics) + 100}; - builder.append("lastExecutionMicros", (BSONNumeric)lastExecutionMicros); - builder.append("execCount", (BSONNumeric)execCount); - queryExecMicros.appendTo(builder, "queryExecMicros"); - docsReturned.appendTo(builder, "docsReturned"); - builder.append("firstSeenTimestamp", firstSeenTimestamp); - return builder.obj(); - } - - /** - * Redact a given telemetry key and set _keySize. - */ - StatusWith redactKey(const BSONObj& key, - bool redactIdentifiers, - OperationContext* opCtx) const; - - /** - * Timestamp for when this query shape was added to the store. Set on construction. - */ - const Timestamp firstSeenTimestamp; - - /** - * Last execution time in microseconds. - */ - uint64_t lastExecutionMicros = 0; - - /** - * Number of query executions. - */ - uint64_t execCount = 0; - - AggregatedMetric queryExecMicros; - - AggregatedMetric docsReturned; - - /** - * A representative command for a given telemetry key. This is used to derive the redacted - * telemetry key at read-time. - */ - BSONObj cmdObj; - - /** - * The application name that is a part of the query shape. It is necessary to store this - * separately from the telemetry key since it exists on the OpCtx, not the cmdObj. - */ - boost::optional applicationName; - - NamespaceStringOrUUID nss; - -private: - /** - * We cache the redacted key the first time it's computed. - */ - mutable boost::optional _redactedKey; -}; - -struct TelemetryPartitioner { - // The partitioning function for use with the 'Partitioned' utility. - std::size_t operator()(const BSONObj& k, const std::size_t nPartitions) const { - return SimpleBSONObjComparator::Hasher()(k) % nPartitions; - } -}; - -struct TelemetryStoreEntryBudgetor { - size_t operator()(const BSONObj& key, const std::shared_ptr& value) { - // The buget estimator for pair in LRU cache accounts for size of value - // (TelemetryMetrics) size of the key, and size of the key's underlying data struture - // (BSONObj). - return sizeof(TelemetryMetrics) + sizeof(BSONObj) + key.objsize(); - } -}; - -using TelemetryStore = PartitionedCache, - TelemetryStoreEntryBudgetor, - TelemetryPartitioner, - SimpleBSONObjComparator::Hasher, - SimpleBSONObjComparator::EqualTo>; - -/** - * Acquire a reference to the global telemetry store. - */ -TelemetryStore& getTelemetryStore(OperationContext* opCtx); - -/** - * Register a request for telemetry collection. The telemetry machinery may decide not to - * collect anything but this should be called for all requests. The decision is made based on - * the feature flag and telemetry parameters such as rate limiting. - * - * The caller is still responsible for subsequently calling writeTelemetry() once the request is - * completed. - * - * Note that calling this affects internal state. It should be called once for each request for - * which telemetry may be collected. - */ -void registerAggRequest(const AggregateCommandRequest& request, OperationContext* opCtx); - -void registerFindRequest(const FindCommandRequest& request, - const NamespaceString& collection, - OperationContext* ocCtx, - const boost::intrusive_ptr& expCtx); - -/** - * Writes telemetry to the telemetry store for the operation identified by `telemetryKey`. - */ -void writeTelemetry(OperationContext* opCtx, - boost::optional telemetryKey, - const BSONObj& cmdObj, - uint64_t queryExecMicros, - uint64_t docsReturned); - -/** - * Serialize the FindCommandRequest according to the Options passed in. Returns the serialized BSON - * with all field names and literals redacted. - */ -StatusWith makeTelemetryKey( - const FindCommandRequest& findCommand, - const SerializationOptions& opts, - const boost::intrusive_ptr& expCtx, - boost::optional existingMetrics = boost::none); - -/** - * Collects metrics for telemetry from the current operation onto OpDebug. This must be called prior - * to incrementing metrics on cursors (either ClientCursor or ClusterClientCursor) since cursor - * metric aggregation happens via OpDebug::AdditiveMetrics. - */ -void collectMetricsOnOpDebug(CurOp* curOp, long long nreturned); -} // namespace telemetry -} // namespace mongo diff --git a/src/mongo/db/query/telemetry_store_test.cpp b/src/mongo/db/query/telemetry_store_test.cpp deleted file mode 100644 index d2e8e505008b3..0000000000000 --- a/src/mongo/db/query/telemetry_store_test.cpp +++ /dev/null @@ -1,587 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/db/catalog/rename_collection.h" -#include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/query/query_feature_flags_gen.h" -#include "mongo/db/query/telemetry.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/inline_auto_update.h" -#include "mongo/unittest/unittest.h" - -namespace mongo::telemetry { - -class TelemetryStoreTest : public ServiceContextTest {}; - -TEST_F(TelemetryStoreTest, BasicUsage) { - TelemetryStore telStore{5000000, 1000}; - - auto getMetrics = [&](BSONObj& key) { - auto lookupResult = telStore.lookup(key); - return *lookupResult.getValue(); - }; - - auto collectMetrics = [&](BSONObj& key) { - std::shared_ptr metrics; - auto lookupResult = telStore.lookup(key); - if (!lookupResult.isOK()) { - telStore.put( - key, std::make_shared(BSONObj(), boost::none, NamespaceString{})); - lookupResult = telStore.lookup(key); - } - metrics = *lookupResult.getValue(); - metrics->execCount += 1; - metrics->lastExecutionMicros += 123456; - }; - - auto query1 = BSON("query" << 1 << "xEquals" << 42); - // same value, different instance (tests hashing & equality) - auto query1x = BSON("query" << 1 << "xEquals" << 42); - auto query2 = BSON("query" << 2 << "yEquals" << 43); - - collectMetrics(query1); - collectMetrics(query1); - collectMetrics(query1x); - collectMetrics(query2); - - ASSERT_EQ(getMetrics(query1)->execCount, 3); - ASSERT_EQ(getMetrics(query1x)->execCount, 3); - ASSERT_EQ(getMetrics(query2)->execCount, 1); - - auto collectMetricsWithLock = [&](BSONObj& key) { - auto [lookupResult, lock] = telStore.getWithPartitionLock(key); - auto metrics = *lookupResult.getValue(); - metrics->execCount += 1; - metrics->lastExecutionMicros += 123456; - }; - - collectMetricsWithLock(query1x); - collectMetricsWithLock(query2); - - ASSERT_EQ(getMetrics(query1)->execCount, 4); - ASSERT_EQ(getMetrics(query1x)->execCount, 4); - ASSERT_EQ(getMetrics(query2)->execCount, 2); - - int numKeys = 0; - - telStore.forEach( - [&](const BSONObj& key, const std::shared_ptr& entry) { numKeys++; }); - - ASSERT_EQ(numKeys, 2); -} - - -TEST_F(TelemetryStoreTest, EvictEntries) { - // This creates a telemetry store with 2 partitions, each with a size of 1200 bytes. - const auto cacheSize = 2400; - const auto numPartitions = 2; - TelemetryStore telStore{cacheSize, numPartitions}; - - for (int i = 0; i < 20; i++) { - auto query = BSON("query" + std::to_string(i) << 1 << "xEquals" << 42); - telStore.put(query, - std::make_shared(BSONObj(), boost::none, NamespaceString{})); - } - int numKeys = 0; - telStore.forEach( - [&](const BSONObj& key, const std::shared_ptr& entry) { numKeys++; }); - - int entriesPerPartition = (cacheSize / numPartitions) / (46 + sizeof(TelemetryMetrics)); - ASSERT_EQ(numKeys, entriesPerPartition * numPartitions); -} - -/** - * A default redaction strategy that generates easy to check results for testing purposes. - */ -std::string redactFieldNameForTest(StringData s) { - return str::stream() << "HASH<" << s << ">"; -} -TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestAllFields) { - auto expCtx = make_intrusive(); - FindCommandRequest fcr(NamespaceStringOrUUID(NamespaceString("testDB.testColl"))); - fcr.setFilter(BSON("a" << 1)); - SerializationOptions opts; - opts.replacementForLiteralArgs = "?"; - opts.redactIdentifiers = true; - opts.identifierRedactionPolicy = redactFieldNameForTest; - - auto redacted = uassertStatusOK(telemetry::makeTelemetryKey(fcr, opts, expCtx)); - - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "HASH", - "coll": "HASH" - }, - "find": "HASH", - "filter": { - "HASH": { - "$eq": "?" - } - } - })", - redacted); - - // Add sort. - fcr.setSort(BSON("sortVal" << 1 << "otherSort" << -1)); - redacted = uassertStatusOK(telemetry::makeTelemetryKey(fcr, opts, expCtx)); - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "HASH", - "coll": "HASH" - }, - "find": "HASH", - "filter": { - "HASH": { - "$eq": "?" - } - }, - "sort": { - "HASH": 1, - "HASH": -1 - } - })", - redacted); - - // Add inclusion projection. - fcr.setProjection(BSON("e" << true << "f" << true)); - redacted = uassertStatusOK(telemetry::makeTelemetryKey(fcr, opts, expCtx)); - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "HASH", - "coll": "HASH" - }, - "find": "HASH", - "filter": { - "HASH": { - "$eq": "?" - } - }, - "projection": { - "HASH": true, - "HASH": true, - "HASH<_id>": true - }, - "sort": { - "HASH": 1, - "HASH": -1 - } - })", - redacted); - - // Add let. - fcr.setLet(BSON("var1" - << "$a" - << "var2" - << "const1")); - redacted = uassertStatusOK(telemetry::makeTelemetryKey(fcr, opts, expCtx)); - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "HASH", - "coll": "HASH" - }, - "find": "HASH", - "filter": { - "HASH": { - "$eq": "?" - } - }, - "let": { - "HASH": "$HASH", - "HASH": { - "$const": "?" - } - }, - "projection": { - "HASH": true, - "HASH": true, - "HASH<_id>": true - }, - "sort": { - "HASH": 1, - "HASH": -1 - } - })", - redacted); - - // Add hinting fields. - fcr.setHint(BSON("z" << 1 << "c" << 1)); - fcr.setMax(BSON("z" << 25)); - fcr.setMin(BSON("z" << 80)); - redacted = uassertStatusOK(telemetry::makeTelemetryKey(fcr, opts, expCtx)); - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "HASH", - "coll": "HASH" - }, - "find": "HASH", - "filter": { - "HASH": { - "$eq": "?" - } - }, - "let": { - "HASH": "$HASH", - "HASH": { - "$const": "?" - } - }, - "projection": { - "HASH": true, - "HASH": true, - "HASH<_id>": true - }, - "hint": { - "HASH": 1, - "HASH": 1 - }, - "max": { - "HASH": "?" - }, - "min": { - "HASH": "?" - }, - "sort": { - "HASH": 1, - "HASH": -1 - } - })", - redacted); - - // Add the literal redaction fields. - fcr.setLimit(5); - fcr.setSkip(2); - fcr.setBatchSize(25); - fcr.setMaxTimeMS(1000); - fcr.setNoCursorTimeout(false); - - redacted = uassertStatusOK(telemetry::makeTelemetryKey(fcr, opts, expCtx)); - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "HASH", - "coll": "HASH" - }, - "find": "HASH", - "filter": { - "HASH": { - "$eq": "?" - } - }, - "let": { - "HASH": "$HASH", - "HASH": { - "$const": "?" - } - }, - "projection": { - "HASH": true, - "HASH": true, - "HASH<_id>": true - }, - "hint": { - "HASH": 1, - "HASH": 1 - }, - "max": { - "HASH": "?" - }, - "min": { - "HASH": "?" - }, - "sort": { - "HASH": 1, - "HASH": -1 - }, - "limit": "?", - "skip": "?", - "batchSize": "?", - "maxTimeMS": "?" - })", - redacted); - - // Add the fields that shouldn't be redacted. - fcr.setSingleBatch(true); - fcr.setAllowDiskUse(false); - fcr.setAllowPartialResults(true); - fcr.setAllowDiskUse(false); - fcr.setShowRecordId(true); - fcr.setAwaitData(false); - fcr.setMirrored(true); - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "HASH", - "coll": "HASH" - }, - "find": "HASH", - "filter": { - "HASH": { - "$eq": "?" - } - }, - "let": { - "HASH": "$HASH", - "HASH": { - "$const": "?" - } - }, - "projection": { - "HASH": true, - "HASH": true, - "HASH<_id>": true - }, - "hint": { - "HASH": 1, - "HASH": 1 - }, - "max": { - "HASH": "?" - }, - "min": { - "HASH": "?" - }, - "sort": { - "HASH": 1, - "HASH": -1 - }, - "limit": "?", - "skip": "?", - "batchSize": "?", - "maxTimeMS": "?" - })", - redacted); -} -TEST_F(TelemetryStoreTest, CorrectlyRedactsFindCommandRequestEmptyFields) { - auto expCtx = make_intrusive(); - FindCommandRequest fcr(NamespaceStringOrUUID(NamespaceString("testDB.testColl"))); - fcr.setFilter(BSONObj()); - fcr.setSort(BSONObj()); - fcr.setProjection(BSONObj()); - SerializationOptions opts; - opts.replacementForLiteralArgs = "?"; - opts.redactIdentifiers = true; - opts.identifierRedactionPolicy = redactFieldNameForTest; - - auto redacted = uassertStatusOK(telemetry::makeTelemetryKey(fcr, opts, expCtx)); - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "HASH", - "coll": "HASH" - }, - "find": "HASH", - "filter": {} - })", - redacted); // NOLINT (test auto-update) -} - -TEST_F(TelemetryStoreTest, CorrectlyRedactsHintsWithOptions) { - auto expCtx = make_intrusive(); - FindCommandRequest fcr(NamespaceStringOrUUID(NamespaceString("testDB.testColl"))); - fcr.setFilter(BSON("b" << 1)); - SerializationOptions opts; - opts.replacementForLiteralArgs = "?"; - fcr.setHint(BSON("z" << 1 << "c" << 1)); - fcr.setMax(BSON("z" << 25)); - fcr.setMin(BSON("z" << 80)); - - auto redacted = uassertStatusOK(telemetry::makeTelemetryKey(fcr, opts, expCtx)); - - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "testDB", - "coll": "testColl" - }, - "find": "testColl", - "filter": { - "b": { - "$eq": "?" - } - }, - "hint": { - "z": 1, - "c": 1 - }, - "max": { - "z": "?" - }, - "min": { - "z": "?" - } - })", - redacted); - // Test with a string hint. Note that this is the internal representation of the string hint - // generated at parse time. - fcr.setHint(BSON("$hint" - << "z")); - - redacted = uassertStatusOK(telemetry::makeTelemetryKey(fcr, opts, expCtx)); - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "testDB", - "coll": "testColl" - }, - "find": "testColl", - "filter": { - "b": { - "$eq": "?" - } - }, - "hint": { - "$hint": "z" - }, - "max": { - "z": "?" - }, - "min": { - "z": "?" - } - })", - redacted); - - fcr.setHint(BSON("z" << 1 << "c" << 1)); - opts.identifierRedactionPolicy = redactFieldNameForTest; - opts.redactIdentifiers = true; - opts.replacementForLiteralArgs = boost::none; - redacted = uassertStatusOK(telemetry::makeTelemetryKey(fcr, opts, expCtx)); - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "HASH", - "coll": "HASH" - }, - "find": "HASH", - "filter": { - "HASH": { - "$eq": 1 - } - }, - "hint": { - "HASH": 1, - "HASH": 1 - }, - "max": { - "HASH": 25 - }, - "min": { - "HASH": 80 - } - })", - redacted); - - redacted = uassertStatusOK(telemetry::makeTelemetryKey(fcr, opts, expCtx)); - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "HASH", - "coll": "HASH" - }, - "find": "HASH", - "filter": { - "HASH": { - "$eq": 1 - } - }, - "hint": { - "HASH": 1, - "HASH": 1 - }, - "max": { - "HASH": 25 - }, - "min": { - "HASH": 80 - } - })", - redacted); - - opts.replacementForLiteralArgs = "?"; - redacted = uassertStatusOK(telemetry::makeTelemetryKey(fcr, opts, expCtx)); - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "HASH", - "coll": "HASH" - }, - "find": "HASH", - "filter": { - "HASH": { - "$eq": "?" - } - }, - "hint": { - "HASH": 1, - "HASH": 1 - }, - "max": { - "HASH": "?" - }, - "min": { - "HASH": "?" - } - })", - redacted); - - redacted = uassertStatusOK(telemetry::makeTelemetryKey(fcr, opts, expCtx)); - ASSERT_BSONOBJ_EQ_AUTO( // NOLINT - R"({ - "cmdNs": { - "db": "HASH", - "coll": "HASH" - }, - "find": "HASH", - "filter": { - "HASH": { - "$eq": "?" - } - }, - "hint": { - "HASH": 1, - "HASH": 1 - }, - "max": { - "HASH": "?" - }, - "min": { - "HASH": "?" - } - })", - redacted); -} - -} // namespace mongo::telemetry diff --git a/src/mongo/db/query/telemetry_util.cpp b/src/mongo/db/query/telemetry_util.cpp deleted file mode 100644 index eeaf7da71e6f0..0000000000000 --- a/src/mongo/db/query/telemetry_util.cpp +++ /dev/null @@ -1,97 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/query/telemetry_util.h" - -#include "mongo/base/status.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/query/partitioned_cache.h" -#include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/query/util/memory_util.h" -#include "mongo/db/service_context.h" -#include "mongo/logv2/log.h" - - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery - -namespace mongo::telemetry_util { - -namespace { -/** - * Given the current 'Client', returns a pointer to the 'ServiceContext' and an interface for - * updating the telemetry store. - */ -std::pair getUpdater(const Client& client) { - auto serviceCtx = client.getServiceContext(); - tassert(7106500, "ServiceContext must be non null", serviceCtx); - - auto updater = telemetryStoreOnParamChangeUpdater(serviceCtx).get(); - tassert(7106501, "Telemetry store size updater must be non null", updater); - return {serviceCtx, updater}; -} -} // namespace - - -Status onTelemetryStoreSizeUpdate(const std::string& str) { - auto newSize = memory_util::MemorySize::parse(str); - if (!newSize.isOK()) { - return newSize.getStatus(); - } - - // The client is nullptr if the parameter is supplied from the command line. In this case, we - // ignore the update event, the parameter will be processed when initializing the service - // context. - if (auto client = Client::getCurrent()) { - auto&& [serviceCtx, updater] = getUpdater(*client); - updater->updateCacheSize(serviceCtx, newSize.getValue()); - } - - return Status::OK(); -} - -Status validateTelemetryStoreSize(const std::string& str, const boost::optional&) { - return memory_util::MemorySize::parse(str).getStatus(); -} - -Status onTelemetrySamplingRateUpdate(int samplingRate) { - // The client is nullptr if the parameter is supplied from the command line. In this case, we - // ignore the update event, the parameter will be processed when initializing the service - // context. - if (auto client = Client::getCurrent()) { - auto&& [serviceCtx, updater] = getUpdater(*client); - updater->updateSamplingRate(serviceCtx, samplingRate < 0 ? INT_MAX : samplingRate); - } - - return Status::OK(); -} - -const Decorable::Decoration> - telemetryStoreOnParamChangeUpdater = - ServiceContext::declareDecoration>(); -} // namespace mongo::telemetry_util diff --git a/src/mongo/db/query/telemetry_util.h b/src/mongo/db/query/telemetry_util.h deleted file mode 100644 index c8fc37dc5c44f..0000000000000 --- a/src/mongo/db/query/telemetry_util.h +++ /dev/null @@ -1,93 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/base/status.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/query/partitioned_cache.h" -#include "mongo/db/query/util/memory_util.h" - - -namespace mongo::telemetry_util { - -Status onTelemetryStoreSizeUpdate(const std::string& str); - - -Status validateTelemetryStoreSize(const std::string& str, const boost::optional&); - -Status onTelemetrySamplingRateUpdate(int samplingRate); - -/** - * An interface used to modify the telemetry store when query setParameters are modified. This is - * done via an interface decorating the 'ServiceContext' in order to avoid a link-time dependency - * of the query knobs library on the telemetry code. - */ -class OnParamChangeUpdater { -public: - virtual ~OnParamChangeUpdater() = default; - - /** - * Resizes the telemetry store decorating 'serviceCtx' to the new size given by 'memSize'. If - * the new size is smaller than the old, cache entries are evicted in order to ensure the - * cache fits within the new size bound. - */ - virtual void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) = 0; - - /** - * Updates the sampling rate for the telemetry rate limiter. - */ - virtual void updateSamplingRate(ServiceContext* serviceCtx, int samplingRate) = 0; -}; - -/** - * A stub implementation that does not allow changing any parameters - to be used if the telemetry - * store is disabled and cannot be re-enabled without restarting, as with a feature flag. - */ -class NoChangesAllowedTelemetryParamUpdater : public OnParamChangeUpdater { -public: - void updateCacheSize(ServiceContext* serviceCtx, memory_util::MemorySize memSize) final { - uasserted(7373500, - "Cannot configure telemetry store - it is currently disabled and a restart is " - "required to activate."); - } - - void updateSamplingRate(ServiceContext* serviceCtx, int samplingRate) { - uasserted(7506200, - "Cannot configure telemetry store - it is currently disabled and a restart is " - "required to activate."); - } -}; - -/** - * Decorated accessor to the 'OnParamChangeUpdater' stored in 'ServiceContext'. - */ -extern const Decorable::Decoration> - telemetryStoreOnParamChangeUpdater; -} // namespace mongo::telemetry_util diff --git a/src/mongo/db/query/util/make_data_structure.h b/src/mongo/db/query/util/make_data_structure.h index 91e4cc709b1e1..098d14b8bf597 100644 --- a/src/mongo/db/query/util/make_data_structure.h +++ b/src/mongo/db/query/util/make_data_structure.h @@ -93,6 +93,23 @@ auto makeVector(Args&&... args) { return v; } +template +void addExprIfNotNull(std::vector& v, U&& expr) { + if (expr) { + v.push_back(std::forward(expr)); + } +} + +/** + * Creates a vector of which all elements are not null. + */ +template > +auto makeVectorIfNotNull(Args&&... args) { + std::vector v; + (addExprIfNotNull(v, std::forward(args)), ...); + return v; +} + /** * Create a list. unlike an initializer list, this function will allow passing elements by Rvalue * reference. If an argument is dereferenceable (operator*) to produce the new list's element type, diff --git a/src/mongo/db/query/util/memory_util.cpp b/src/mongo/db/query/util/memory_util.cpp index dbd4f23bee109..c2ac09a4ff553 100644 --- a/src/mongo/db/query/util/memory_util.cpp +++ b/src/mongo/db/query/util/memory_util.cpp @@ -27,10 +27,17 @@ * it in the license file. */ #include "mongo/db/query/util/memory_util.h" -#include "mongo/logv2/log.h" + +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/util/assert_util.h" #include "mongo/util/pcre.h" #include "mongo/util/processinfo.h" -#include #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/db/query/util/memory_util.h b/src/mongo/db/query/util/memory_util.h index 345780b4c84d5..29c9cdc043f86 100644 --- a/src/mongo/db/query/util/memory_util.h +++ b/src/mongo/db/query/util/memory_util.h @@ -29,6 +29,7 @@ #pragma once +#include #include #include "mongo/base/error_codes.h" diff --git a/src/mongo/db/query/util/memory_util_test.cpp b/src/mongo/db/query/util/memory_util_test.cpp index 78f7b3098d678..832f952d84f82 100644 --- a/src/mongo/db/query/util/memory_util_test.cpp +++ b/src/mongo/db/query/util/memory_util_test.cpp @@ -29,7 +29,11 @@ #include "mongo/db/query/util/memory_util.h" -#include "mongo/unittest/unittest.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::memory_util { diff --git a/src/mongo/db/query/view_response_formatter.cpp b/src/mongo/db/query/view_response_formatter.cpp index d169aa61c740e..1af1614bfeb91 100644 --- a/src/mongo/db/query/view_response_formatter.cpp +++ b/src/mongo/db/query/view_response_formatter.cpp @@ -27,14 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/query/view_response_formatter.h" +#include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/view_response_formatter.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/query/view_response_formatter.h b/src/mongo/db/query/view_response_formatter.h index 5146efd522908..a791dde860a59 100644 --- a/src/mongo/db/query/view_response_formatter.h +++ b/src/mongo/db/query/view_response_formatter.h @@ -29,7 +29,11 @@ #pragma once +#include + +#include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/tenant_id.h" namespace mongo { diff --git a/src/mongo/db/query/view_response_formatter_test.cpp b/src/mongo/db/query/view_response_formatter_test.cpp index f42ddd71898c0..586cb9a38f9ba 100644 --- a/src/mongo/db/query/view_response_formatter_test.cpp +++ b/src/mongo/db/query/view_response_formatter_test.cpp @@ -27,16 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/json.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/namespace_string.h" #include "mongo/db/query/cursor_response.h" #include "mongo/db/query/view_response_formatter.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -63,7 +73,7 @@ TEST(ViewResponseFormatter, FormatSubsequentCountResponseSuccessfully) { TEST(ViewResponseFormatter, FormatInitialCountResponseWithTenantIdSuccessfully) { const TenantId tenantId(OID::gen()); const NamespaceString nss = - NamespaceString::createNamespaceString_forTest(tenantId, testNss.toString()); + NamespaceString::createNamespaceString_forTest(tenantId, testNss.toString_forTest()); RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); @@ -121,7 +131,7 @@ TEST(ViewResponseFormatter, FormatSubsequentDistinctResponseSuccessfully) { TEST(ViewResponseFormatter, FormatInitialDistinctResponseWithTenantIdSuccessfully) { const TenantId tenantId(OID::gen()); const NamespaceString nss = - NamespaceString::createNamespaceString_forTest(tenantId, testNss.toString()); + NamespaceString::createNamespaceString_forTest(tenantId, testNss.toString_forTest()); RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); diff --git a/src/mongo/db/query/wildcard_multikey_paths.cpp b/src/mongo/db/query/wildcard_multikey_paths.cpp index fe87babee8da9..9047659873d14 100644 --- a/src/mongo/db/query/wildcard_multikey_paths.cpp +++ b/src/mongo/db/query/wildcard_multikey_paths.cpp @@ -27,15 +27,45 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/wildcard_multikey_paths.h" - +#include +#include +#include +#include + +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_metadata_access_stats.h" #include "mongo/db/index/wildcard_access_method.h" #include "mongo/db/index_names.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/interval.h" +#include "mongo/db/query/wildcard_multikey_paths.h" +#include "mongo/db/record_id.h" #include "mongo/db/record_id_helpers.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" /** * A wildcard index contains an unbounded set of multikey paths, therefore, it was decided to store @@ -137,18 +167,20 @@ static BSONObj buildIndexBoundsKeyPattern(const BSONObj& wiKeyPattern) { * Retrieves from the wildcard index the set of multikey path metadata keys bounded by * 'indexBounds'. Returns the set of multikey paths represented by the keys. */ -static std::set getWildcardMultikeyPathSetHelper(const WildcardAccessMethod* wam, - OperationContext* opCtx, +static std::set getWildcardMultikeyPathSetHelper(OperationContext* opCtx, + const IndexCatalogEntry* entry, const IndexBounds& indexBounds, MultikeyMetadataAccessStats* stats) { + const WildcardAccessMethod* wam = + static_cast(entry->accessMethod()); return writeConflictRetry( - opCtx, "wildcard multikey path retrieval", "", [&]() -> std::set { + opCtx, "wildcard multikey path retrieval", NamespaceString(), [&]() -> std::set { stats->numSeeks = 0; stats->keysExamined = 0; auto cursor = wam->newCursor(opCtx); constexpr int kForward = 1; - const auto keyPattern = buildIndexBoundsKeyPattern(wam->getKeyPattern()); + const auto keyPattern = buildIndexBoundsKeyPattern(entry->descriptor()->keyPattern()); IndexBoundsChecker checker(&indexBounds, keyPattern, kForward); IndexSeekPoint seekPoint; if (!checker.getStartSeekPoint(&seekPoint)) { @@ -318,14 +350,15 @@ static IndexBounds buildMetadataKeysIndexBounds(const BSONObj& keyPattern, return indexBounds; } -std::set getWildcardMultikeyPathSet(const WildcardAccessMethod* wam, - OperationContext* opCtx, +std::set getWildcardMultikeyPathSet(OperationContext* opCtx, + const IndexCatalogEntry* entry, const stdx::unordered_set& fieldSet, MultikeyMetadataAccessStats* stats) { tassert(7354610, "stats must be non-null", stats); - const auto& indexBounds = buildMetadataKeysIndexBounds(wam->getKeyPattern(), fieldSet); - return getWildcardMultikeyPathSetHelper(wam, opCtx, indexBounds, stats); + const auto& indexBounds = + buildMetadataKeysIndexBounds(entry->descriptor()->keyPattern(), fieldSet); + return getWildcardMultikeyPathSetHelper(opCtx, entry, indexBounds, stats); } /** @@ -352,18 +385,20 @@ static std::pair buildMetadataKeyRange(const BSONObj& keyPatte return std::make_pair(rangeBeginBuilder.obj(), rangeEndBuilder.obj()); } -std::set getWildcardMultikeyPathSet(const WildcardAccessMethod* wam, - OperationContext* opCtx, +std::set getWildcardMultikeyPathSet(OperationContext* opCtx, + const IndexCatalogEntry* entry, MultikeyMetadataAccessStats* stats) { - return writeConflictRetry(opCtx, "wildcard multikey path retrieval", "", [&]() { + return writeConflictRetry(opCtx, "wildcard multikey path retrieval", NamespaceString(), [&]() { tassert(7354611, "stats must be non-null", stats); stats->numSeeks = 0; stats->keysExamined = 0; + const WildcardAccessMethod* wam = + static_cast(entry->accessMethod()); auto cursor = wam->newCursor(opCtx); const auto [metadataKeyRangeBegin, metadataKeyRangeEnd] = - buildMetadataKeyRange(wam->getKeyPattern()); + buildMetadataKeyRange(entry->descriptor()->keyPattern()); constexpr bool inclusive = true; cursor->setEndPosition(metadataKeyRangeEnd, inclusive); diff --git a/src/mongo/db/query/wildcard_multikey_paths.h b/src/mongo/db/query/wildcard_multikey_paths.h index 0bad9f9656fcd..f0c43df4f7e6d 100644 --- a/src/mongo/db/query/wildcard_multikey_paths.h +++ b/src/mongo/db/query/wildcard_multikey_paths.h @@ -31,6 +31,7 @@ #include #include +#include #include "mongo/db/field_ref.h" #include "mongo/stdx/unordered_set.h" @@ -41,8 +42,8 @@ struct IndexBounds; struct IndexKeyEntry; struct Interval; struct MultikeyMetadataAccessStats; +class IndexCatalogEntry; class OperationContext; -class WildcardAccessMethod; /** * Returns an exact set or super-set of the bounds required to fetch the multikey metadata keys @@ -54,8 +55,8 @@ std::vector getMultikeyPathIndexIntervalsForField(FieldRef field); * Returns the intersection of 'fields' and the set of multikey metadata paths stored in the * wildcard index. Statistics reporting index seeks and keys examined are written to 'stats'. */ -std::set getWildcardMultikeyPathSet(const WildcardAccessMethod* wam, - OperationContext* opCtx, +std::set getWildcardMultikeyPathSet(OperationContext* opCtx, + const IndexCatalogEntry* entry, const stdx::unordered_set& fieldSet, MultikeyMetadataAccessStats* stats); @@ -63,8 +64,8 @@ std::set getWildcardMultikeyPathSet(const WildcardAccessMethod* wam, * Returns the set of all paths for which the wildcard index has multikey metadata keys. * Statistics reporting index seeks and keys examined are written to 'stats'. */ -std::set getWildcardMultikeyPathSet(const WildcardAccessMethod* wam, - OperationContext* opCtx, +std::set getWildcardMultikeyPathSet(OperationContext* opCtx, + const IndexCatalogEntry* entry, MultikeyMetadataAccessStats* stats); } // namespace mongo diff --git a/src/mongo/db/query/yield_policy_callbacks_impl.cpp b/src/mongo/db/query/yield_policy_callbacks_impl.cpp index ac1634086e2bb..3ac5650681932 100644 --- a/src/mongo/db/query/yield_policy_callbacks_impl.cpp +++ b/src/mongo/db/query/yield_policy_callbacks_impl.cpp @@ -27,13 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/query/yield_policy_callbacks_impl.h" +#include +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/curop.h" #include "mongo/db/curop_failpoint_helpers.h" +#include "mongo/db/query/yield_policy_callbacks_impl.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -61,8 +67,8 @@ void YieldPolicyCallbacksImpl::duringYield(OperationContext* opCtx) const { } }, [nss](const BSONObj& config) { - StringData ns = config.getStringField("namespace"); - return ns.empty() || ns == nss.ns(); + const auto fpNss = NamespaceStringUtil::parseFailPointData(config, "namespace"); + return fpNss.isEmpty() || fpNss == nss; }); }; failPointHang(&setYieldAllLocksHang); @@ -71,8 +77,8 @@ void YieldPolicyCallbacksImpl::duringYield(OperationContext* opCtx) const { setYieldAllLocksWait.executeIf( [&](const BSONObj& data) { sleepFor(Milliseconds(data["waitForMillis"].numberInt())); }, [&](const BSONObj& config) { - BSONElement dataNs = config["namespace"]; - return !dataNs || _nss.ns() == dataNs.str(); + const auto fpNss = NamespaceStringUtil::parseFailPointData(config, "namespace"); + return fpNss.isEmpty() || _nss == fpNss; }); } diff --git a/src/mongo/db/query/yield_policy_callbacks_impl.h b/src/mongo/db/query/yield_policy_callbacks_impl.h index e0fdc6588c8ee..c89faa80fe4f3 100644 --- a/src/mongo/db/query/yield_policy_callbacks_impl.h +++ b/src/mongo/db/query/yield_policy_callbacks_impl.h @@ -29,9 +29,9 @@ #pragma once -#include "mongo/db/query/plan_yield_policy.h" - #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/plan_yield_policy.h" namespace mongo { diff --git a/src/mongo/db/read_concern.cpp b/src/mongo/db/read_concern.cpp index 30a7f9c502bfe..a0cb9d6f2b065 100644 --- a/src/mongo/db/read_concern.cpp +++ b/src/mongo/db/read_concern.cpp @@ -28,6 +28,9 @@ */ #include "mongo/db/read_concern.h" + +#include + #include "mongo/base/shim.h" #include "mongo/db/repl/speculative_majority_read_info.h" diff --git a/src/mongo/db/read_concern.h b/src/mongo/db/read_concern.h index c1acc6f5d0669..d3a3f140aebb7 100644 --- a/src/mongo/db/read_concern.h +++ b/src/mongo/db/read_concern.h @@ -29,18 +29,22 @@ #pragma once +#include "mongo/base/status.h" #include "mongo/db/database_name.h" #include "mongo/util/duration.h" namespace mongo { class BSONObj; + class OperationContext; class Status; template class StatusWith; + enum class PrepareConflictBehavior; class StringData; + namespace repl { class ReadConcernArgs; class SpeculativeMajorityReadInfo; diff --git a/src/mongo/db/read_concern_mongod.cpp b/src/mongo/db/read_concern_mongod.cpp index 381f6c7963ef9..895bbf41e50d9 100644 --- a/src/mongo/db/read_concern_mongod.cpp +++ b/src/mongo/db/read_concern_mongod.cpp @@ -28,28 +28,71 @@ */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/shim.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" #include "mongo/db/curop_failpoint_helpers.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" -#include "mongo/db/read_concern.h" #include "mongo/db/read_concern_mongod_gen.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/speculative_majority_read_info.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" -#include "mongo/db/s/sharding_state.h" #include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/vector_clock.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/notification.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -287,11 +330,11 @@ Status waitForReadConcernImpl(OperationContext* opCtx, const repl::ReadConcernArgs& readConcernArgs, const DatabaseName& dbName, bool allowAfterClusterTime) { - // If we are in a direct client within a transaction, then we may be holding locks, so it is - // illegal to wait for read concern. This is fine, since the outer operation should have handled - // waiting for read concern. We don't want to ignore prepare conflicts because reads in - // transactions should block on prepared transactions. - if (opCtx->getClient()->isInDirectClient() && opCtx->inMultiDocumentTransaction()) { + // If we are in a direct client that's holding a global lock, then this means it is illegal to + // wait for read concern. This is fine, since the outer operation should have handled waiting + // for read concern. We don't want to ignore prepare conflicts because reads in transactions + // should block on prepared transactions. + if (opCtx->getClient()->isInDirectClient() && opCtx->lockState()->isLocked()) { return Status::OK(); } @@ -461,6 +504,11 @@ Status waitForReadConcernImpl(OperationContext* opCtx, Status waitForLinearizableReadConcernImpl(OperationContext* opCtx, const Milliseconds readConcernTimeout) { + // If we are in a direct client that's holding a global lock, then this means this is a + // sub-operation of the parent. In this case we delegate the wait to the parent. + if (opCtx->getClient()->isInDirectClient() && opCtx->lockState()->isLocked()) { + return Status::OK(); + } CurOpFailpointHelpers::waitWhileFailPointEnabled( &hangBeforeLinearizableReadConcern, opCtx, "hangBeforeLinearizableReadConcern", [opCtx]() { LOGV2(20994, @@ -473,7 +521,7 @@ Status waitForLinearizableReadConcernImpl(OperationContext* opCtx, { AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); - if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) { + if (!replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin)) { return {ErrorCodes::NotWritablePrimary, "No longer primary when waiting for linearizable read concern"}; } @@ -489,10 +537,7 @@ Status waitForLinearizableReadConcernImpl(OperationContext* opCtx, } writeConflictRetry( - opCtx, - "waitForLinearizableReadConcern", - NamespaceString::kRsOplogNamespace.ns(), - [&opCtx] { + opCtx, "waitForLinearizableReadConcern", NamespaceString::kRsOplogNamespace, [&opCtx] { WriteUnitOfWork uow(opCtx); opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage( opCtx, @@ -517,6 +562,12 @@ Status waitForSpeculativeMajorityReadConcernImpl( OperationContext* opCtx, repl::SpeculativeMajorityReadInfo speculativeReadInfo) { invariant(speculativeReadInfo.isSpeculativeRead()); + // If we are in a direct client that's holding a global lock, then this means this is a + // sub-operation of the parent. In this case we delegate the wait to the parent. + if (opCtx->getClient()->isInDirectClient() && opCtx->lockState()->isLocked()) { + return Status::OK(); + } + // Select the timestamp to wait on. A command may have selected a specific timestamp to wait on. // If not, then we use the timestamp selected by the read source. auto replCoord = repl::ReplicationCoordinator::get(opCtx); diff --git a/src/mongo/db/read_write_concern_defaults.cpp b/src/mongo/db/read_write_concern_defaults.cpp index efa429b0e8c72..edd16ed53611b 100644 --- a/src/mongo/db/read_write_concern_defaults.cpp +++ b/src/mongo/db/read_write_concern_defaults.cpp @@ -28,13 +28,32 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/logical_time.h" #include "mongo/db/read_write_concern_defaults.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" -#include "mongo/db/server_options.h" +#include "mongo/db/read_write_concern_provenance.h" +#include "mongo/db/read_write_concern_provenance_base_gen.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/vector_clock.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/read_write_concern_defaults.h b/src/mongo/db/read_write_concern_defaults.h index 0e6e0fca01d92..447f501136655 100644 --- a/src/mongo/db/read_write_concern_defaults.h +++ b/src/mongo/db/read_write_concern_defaults.h @@ -29,17 +29,33 @@ #pragma once +#include +#include +#include +#include #include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults_gen.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/service_context.h" #include "mongo/db/write_concern_options.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/thread_pool_interface.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/functional.h" +#include "mongo/util/invalidating_lru_cache.h" #include "mongo/util/read_through_cache.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/read_write_concern_defaults_cache_lookup_mock.cpp b/src/mongo/db/read_write_concern_defaults_cache_lookup_mock.cpp index 4ecb739b285a9..b8e826d874572 100644 --- a/src/mongo/db/read_write_concern_defaults_cache_lookup_mock.cpp +++ b/src/mongo/db/read_write_concern_defaults_cache_lookup_mock.cpp @@ -27,9 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include + +#include +#include #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/read_write_concern_defaults_cache_lookup_mock.h b/src/mongo/db/read_write_concern_defaults_cache_lookup_mock.h index 3d2473f10cc14..28dab1949c756 100644 --- a/src/mongo/db/read_write_concern_defaults_cache_lookup_mock.h +++ b/src/mongo/db/read_write_concern_defaults_cache_lookup_mock.h @@ -29,7 +29,13 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults.h" +#include "mongo/db/read_write_concern_defaults_gen.h" namespace mongo { diff --git a/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp b/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp index daadb0c1444fb..e31b2a111fe26 100644 --- a/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp +++ b/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.cpp @@ -30,11 +30,27 @@ #include "mongo/db/read_write_concern_defaults_cache_lookup_mongod.h" +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/namespace_string.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/server_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.h b/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.h index ee84498ebe85a..0ae4528ccf4d9 100644 --- a/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.h +++ b/src/mongo/db/read_write_concern_defaults_cache_lookup_mongod.h @@ -29,7 +29,11 @@ #pragma once +#include + +#include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults.h" +#include "mongo/db/read_write_concern_defaults_gen.h" namespace mongo { diff --git a/src/mongo/db/read_write_concern_defaults_test.cpp b/src/mongo/db/read_write_concern_defaults_test.cpp index 9ff5cb9b9cb91..fe9e72486de59 100644 --- a/src/mongo/db/read_write_concern_defaults_test.cpp +++ b/src/mongo/db/read_write_concern_defaults_test.cpp @@ -27,17 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" #include "mongo/db/repl/optime.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" -#include "mongo/db/server_options.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/db/vector_clock_mutable.h" #include "mongo/db/vector_clock_test_fixture.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" namespace mongo { namespace { diff --git a/src/mongo/db/read_write_concern_provenance.cpp b/src/mongo/db/read_write_concern_provenance.cpp index 39a4008a9e513..272dbd8abe228 100644 --- a/src/mongo/db/read_write_concern_provenance.cpp +++ b/src/mongo/db/read_write_concern_provenance.cpp @@ -27,11 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/read_write_concern_provenance.h" +#include -#include "mongo/db/server_options.h" +#include "mongo/db/read_write_concern_provenance.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/read_write_concern_provenance.h b/src/mongo/db/read_write_concern_provenance.h index dc8fc0e57daf6..b6b71c4b14a93 100644 --- a/src/mongo/db/read_write_concern_provenance.h +++ b/src/mongo/db/read_write_concern_provenance.h @@ -29,11 +29,17 @@ #pragma once +#include +#include +#include + #include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/read_write_concern_provenance_base_gen.h" +#include "mongo/idl/idl_parser.h" namespace mongo { diff --git a/src/mongo/db/read_write_concern_provenance_test.cpp b/src/mongo/db/read_write_concern_provenance_test.cpp index b477f8f825a67..8e11cc35712cd 100644 --- a/src/mongo/db/read_write_concern_provenance_test.cpp +++ b/src/mongo/db/read_write_concern_provenance_test.cpp @@ -27,12 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/jsobj.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/read_write_concern_provenance.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/rebuild_indexes.cpp b/src/mongo/db/rebuild_indexes.cpp index d605b2a78676f..733ab65faf3a9 100644 --- a/src/mongo/db/rebuild_indexes.cpp +++ b/src/mongo/db/rebuild_indexes.cpp @@ -28,17 +28,25 @@ */ -#include "mongo/platform/basic.h" - #include -#include "mongo/db/rebuild_indexes.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/index_key_validate.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/rebuild_indexes.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/rebuild_indexes.h b/src/mongo/db/rebuild_indexes.h index 27189980cdbd5..33b48fe78037b 100644 --- a/src/mongo/db/rebuild_indexes.h +++ b/src/mongo/db/rebuild_indexes.h @@ -31,7 +31,11 @@ #include #include +#include +#include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/record_id.h" diff --git a/src/mongo/db/record_id.h b/src/mongo/db/record_id.h index 3b08e49ed0be7..d1a5fe7311430 100644 --- a/src/mongo/db/record_id.h +++ b/src/mongo/db/record_id.h @@ -29,17 +29,33 @@ #pragma once +#include +#include #include #include +#include #include +#include #include #include #include +#include +#include #include +#include +#include #include +#include +#include "mongo/base/data_type_endian.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/util/assert_util.h" #include "mongo/util/bufreader.h" #include "mongo/util/hex.h" #include "mongo/util/shared_buffer.h" @@ -53,12 +69,11 @@ class RecordIdChecks; /** * The key that uniquely identifies a Record in a Collection or RecordStore. */ -#pragma pack(push, 1) class alignas(int64_t) RecordId { // The alignas is necessary in order to comply with memory alignment. Internally we're using // 8-byte aligned data members (int64_t / ConstSharedBuffer) but as we're packing the structure - // the compiler will set the alignment to 1 due to the pragma so we must correct its alignment - // information for users of the class. + // the compiler will set the alignment to 1 so we must correct its alignment information for + // users of the class. // Class used for static assertions that can only happen when RecordId is completely defined. friend class details::RecordIdChecks; @@ -92,98 +107,50 @@ class alignas(int64_t) RecordId { ~RecordId() { if (_format == Format::kBigStr) { - _data.heapStr.buffer.~ConstSharedBuffer(); + HeapStr::getBufferFrom(_data).~ConstSharedBuffer(); } } - RecordId(RecordId&& other) { - switch (other._format) { - case kNull: - break; - case kLong: - _data.longId.id = other._data.longId.id; - break; - case kSmallStr: - _data.inlineStr = other._data.inlineStr; - break; - case kBigStr: - new (&_data.heapStr.buffer) - ConstSharedBuffer(std::move(other._data.heapStr.buffer)); - break; - } - _format = other._format; - other._format = Format::kNull; + RecordId(RecordId&& other) : _format(other._format), _data(other._data) { + other._format = kNull; }; - RecordId(const RecordId& other) { - switch (other._format) { - case kNull: - break; - case kLong: - _data.longId.id = other._data.longId.id; - break; - case kSmallStr: - _data.inlineStr = other._data.inlineStr; - break; - case kBigStr: - new (&_data.heapStr.buffer) ConstSharedBuffer(other._data.heapStr.buffer); - break; + RecordId(const RecordId& other) : _format(other._format), _data(other._data) { + if (_format == Format::kBigStr) { + // Re-initialize the SharedBuffer to get the correct reference count. + auto* buffer = &HeapStr::getBufferFrom(_data); + new (buffer) ConstSharedBuffer(HeapStr::getBufferFrom(other._data)); } - _format = other._format; }; RecordId& operator=(const RecordId& other) { - if (_format == Format::kBigStr) { - _data.heapStr.buffer.~ConstSharedBuffer(); - } - switch (other._format) { - case kNull: - break; - case kLong: - _data.longId.id = other._data.longId.id; - break; - case kSmallStr: - _data.inlineStr = other._data.inlineStr; - break; - case kBigStr: - new (&_data.heapStr.buffer) ConstSharedBuffer(other._data.heapStr.buffer); - break; - } - _format = other._format; + RecordId tmp{other}; + *this = std::move(tmp); return *this; }; RecordId& operator=(RecordId&& other) { - if (_format == Format::kBigStr) { - _data.heapStr.buffer.~ConstSharedBuffer(); - } - switch (other._format) { - case kNull: - break; - case kLong: - _data.longId.id = other._data.longId.id; - break; - case kSmallStr: - _data.inlineStr = other._data.inlineStr; - break; - case kBigStr: - new (&_data.heapStr.buffer) - ConstSharedBuffer(std::move(other._data.heapStr.buffer)); - break; - } - _format = other._format; - other._format = Format::kNull; + swap(other); return *this; } + void swap(RecordId& other) { + // We perform a byte-wise swap here to avoid concerns with the actual underlying type of the + // RecordId. + std::array tmp; + std::memcpy(reinterpret_cast(tmp.data()), this, sizeof(RecordId)); + std::memcpy(reinterpret_cast(this), &other, sizeof(RecordId)); + std::memcpy(reinterpret_cast(&other), tmp.data(), sizeof(RecordId)); + } + /** * Construct a RecordId that holds an int64_t. The raw value for RecordStore storage may be * retrieved using getLong(). */ explicit RecordId(int64_t s) { _format = Format::kLong; - _data.longId.id = s; + LongId::getIdFrom(_data) = s; } /** @@ -198,13 +165,15 @@ class alignas(int64_t) RecordId { size <= kBigStrMaxSize); if (size <= kSmallStrMaxSize) { _format = Format::kSmallStr; - _data.inlineStr.size = static_cast(size); - std::memcpy(_data.inlineStr.dataArr.data(), str, size); + InlineStr::getSizeFrom(_data) = static_cast(size); + auto& arr = InlineStr::getArrayFrom(_data); + std::memcpy(arr.data(), str, size); } else { _format = Format::kBigStr; auto buffer = SharedBuffer::allocate(size); std::memcpy(buffer.get(), str, size); - new (&_data.heapStr.buffer) ConstSharedBuffer(std::move(buffer)); + auto* bufferPtr = &HeapStr::getBufferFrom(_data); + new (bufferPtr) ConstSharedBuffer(std::move(buffer)); } } @@ -350,6 +319,11 @@ class alignas(int64_t) RecordId { return hash; } + /** + * Returns a string form of this RecordId, but in the cases where RecordId is either + * Format::kSmallStr or Format::kBigStr this is a raw hex dump. For a human-readable string call + * toStringHumanReadable(). + */ std::string toString() const { return withFormat( [](Null n) { return std::string("null"); }, @@ -357,12 +331,38 @@ class alignas(int64_t) RecordId { [](const char* str, int size) { return hexblob::encodeLower(str, size); }); } + /** + * Returns a human-readable string form of this RecordId. (Call toString() if you prefer a hex + * dump of string IDs.) + */ + std::string toStringHumanReadable() const { + switch (_format) { + case Format::kNull: + return "null"; + case Format::kLong: + return std::to_string(_getLongNoCheck()); + case Format::kSmallStr: { + StringData str = _getSmallStrNoCheck(); + return "kSmallStr size: " + std::to_string(str.size()) + " string: '" + + std::string(str.rawData()) + "'"; + } + case Format::kBigStr: { + StringData str = _getBigStrNoCheck(); + return "kBigStr size: " + std::to_string(str.size()) + " string: '" + + std::string(str.rawData()) + "'"; + } + default: + MONGO_UNREACHABLE; + } + } + /** * Returns the total amount of memory used by this RecordId, including itself and any heap * buffers. */ size_t memUsage() const { - size_t largeStrSize = (_format == Format::kBigStr) ? _data.heapStr.buffer.capacity() : 0; + size_t largeStrSize = + (_format == Format::kBigStr) ? HeapStr::getBufferFrom(_data).capacity() : 0; return sizeof(RecordId) + largeStrSize; } @@ -449,6 +449,24 @@ class alignas(int64_t) RecordId { */ static constexpr auto kSmallStrMaxSize = 30; + /// members for Sorter + struct SorterDeserializeSettings {}; // unused + void serializeForSorter(BufBuilder& buf) const { + serializeToken(buf); + } + static RecordId deserializeForSorter(BufReader& buf, const SorterDeserializeSettings&) { + return deserializeToken(buf); + } + int memUsageForSorter() const { + return memUsage(); + } + RecordId getOwned() const { + MONGO_UNREACHABLE; + } + void makeOwned() { + MONGO_UNREACHABLE; + } + private: /** * Format specifies the in-memory representation of this RecordId. This does not represent any @@ -463,9 +481,9 @@ class alignas(int64_t) RecordId { */ kLong, /** - * Stores a variable-length binary string smaller than kSmallStrMaxSize. Data is stored in - * the InlineStr struct at '_data.inlineStr'. This RecordId may only be accessed using - * getStr(). + * Stores a variable-length binary string less than or equal to kSmallStrMaxSize. Data is + * stored in the InlineStr struct at '_data.inlineStr'. This RecordId may only be accessed + * using getStr(). */ kSmallStr, /** @@ -491,15 +509,16 @@ class alignas(int64_t) RecordId { } int64_t _getLongNoCheck() const { - return _data.longId.id; + return LongId::getIdFrom(_data); } StringData _getSmallStrNoCheck() const { - return StringData(_data.inlineStr.dataArr.data(), _data.inlineStr.size); + return StringData(InlineStr::getArrayFrom(_data).data(), InlineStr::getSizeFrom(_data)); } StringData _getBigStrNoCheck() const { - return StringData(_data.heapStr.buffer.get(), _data.heapStr.buffer.capacity()); + const auto& buffer = HeapStr::getBufferFrom(_data); + return StringData(buffer.get(), buffer.capacity()); } static constexpr auto kTargetSizeInBytes = 32; @@ -512,41 +531,58 @@ class alignas(int64_t) RecordId { static_assert(sizeof(Format) == 1); // All of this will work if and only if char size is 1 (std::byte) for the InlineString. static_assert(sizeof(std::byte) == sizeof(char)); + using Content = std::array; + Content _data; // Offsets/padding will be computed in respect to the whole class by taking into account the // Format data member. struct HeapStr { static_assert(std::alignment_of_v <= 8, "ConstSharedBuffer is expected to have 8 bytes alignment at most. Having a " "larger alignment requires changing the RecordId class alignment"); - std::byte _padding[std::alignment_of_v - - sizeof(Format)]; // offset = 1, size = 7 - ConstSharedBuffer buffer; // offset = 1 + 7, size = 8 + static constexpr auto kOffset = std::alignment_of_v - sizeof(Format); + ConstSharedBuffer buffer; // offset = 1 + 7, size = 8 + static ConstSharedBuffer& getBufferFrom(Content& data) { + ConstSharedBuffer* ptr = (ConstSharedBuffer*)(data.data() + kOffset); + return *ptr; + } + static const ConstSharedBuffer& getBufferFrom(const Content& data) { + ConstSharedBuffer* ptr = (ConstSharedBuffer*)(data.data() + kOffset); + return *ptr; + } }; struct InlineStr { - uint8_t size; // offset = 1, size = 1 - std::array - dataArr; // offset = 1 + 1, size = 30 + static constexpr auto kSizeOffset = 0; + static uint8_t& getSizeFrom(Content& data) { + uint8_t* ptr = (uint8_t*)(data.data() + kSizeOffset); + return *ptr; + }; + static const uint8_t& getSizeFrom(const Content& data) { + uint8_t* ptr = (uint8_t*)(data.data() + kSizeOffset); + return *ptr; + }; + static constexpr auto kArrayOffset = sizeof(uint8_t); + using arr = std::array; + static arr& getArrayFrom(Content& data) { + arr* ptr = (arr*)(data.data() + kArrayOffset); + return *ptr; + }; + static const arr& getArrayFrom(const Content& data) { + arr* ptr = (arr*)(data.data() + kArrayOffset); + return *ptr; + }; }; struct LongId { - std::byte _padding[std::alignment_of_v - sizeof(Format)]; // offset = 1, size = 7 - int64_t id; // offset = 1 + 7, size = 8 - }; - union Content { - // Constructor and destructor are needed so that the union class is default constructible. - // Placement new will be used to initialize the correct data member in the RecordId - // constructor. This is necessary due to ConstSharedBuffer not being trivially - // constructible. - Content(){}; - // Destructor is handled in the RecordId destructor as it handles knowing which data member - // is active. - ~Content(){}; - HeapStr heapStr; - InlineStr inlineStr; - LongId longId; + static constexpr auto kOffset = std::alignment_of_v - sizeof(Format); + static int64_t& getIdFrom(Content& data) { + int64_t* result = (int64_t*)(data.data() + kOffset); + return *result; + } + static const int64_t& getIdFrom(const Content& data) { + int64_t* result = (int64_t*)(data.data() + kOffset); + return *result; + } }; - Content _data; // offset = 1, size = 31 }; -#pragma pack(pop) namespace details { // Various assertions of RecordId that can only happen when the type is completely defined. @@ -583,4 +619,8 @@ inline std::ostream& operator<<(std::ostream& stream, const RecordId& id) { return stream << "RecordId(" << id.toString() << ')'; } +inline void swap(RecordId& lhs, RecordId& rhs) { + lhs.swap(rhs); +} + } // namespace mongo diff --git a/src/mongo/db/record_id_helpers.cpp b/src/mongo/db/record_id_helpers.cpp index ace2628be2740..9ba67a50c0a5e 100644 --- a/src/mongo/db/record_id_helpers.cpp +++ b/src/mongo/db/record_id_helpers.cpp @@ -30,17 +30,26 @@ #include "mongo/db/record_id_helpers.h" +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/timestamp.h" #include "mongo/db/catalog/clustered_collection_util.h" -#include "mongo/db/jsobj.h" #include "mongo/db/query/collation/collation_index_key.h" #include "mongo/db/record_id.h" #include "mongo/db/storage/key_string.h" #include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" #include "mongo/util/debug_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -68,7 +77,7 @@ StatusWith keyForOptime(const Timestamp& opTime, const KeyFormat keyFo return {std::move(out)}; } case KeyFormat::String: { - KeyString::Builder keyBuilder(KeyString::Version::kLatestVersion); + key_string::Builder keyBuilder(key_string::Version::kLatestVersion); keyBuilder.appendTimestamp(opTime); return RecordId(keyBuilder.getBuffer(), keyBuilder.getSize()); } @@ -125,7 +134,7 @@ RecordId keyForElem(const BSONElement& elem) { // Intentionally discard the TypeBits since the type information will be stored in the cluster // key of the original document. The consequence of this behavior is that cluster key values // that compare similarly, but are of different types may not be used concurrently. - KeyString::Builder keyBuilder(KeyString::Version::kLatestVersion); + key_string::Builder keyBuilder(key_string::Version::kLatestVersion); keyBuilder.appendBSONElement(elem); return RecordId(keyBuilder.getBuffer(), keyBuilder.getSize()); } @@ -135,13 +144,13 @@ RecordId keyForObj(const BSONObj& obj) { } RecordId keyForOID(OID oid) { - KeyString::Builder keyBuilder(KeyString::Version::kLatestVersion); + key_string::Builder keyBuilder(key_string::Version::kLatestVersion); keyBuilder.appendOID(oid); return RecordId(keyBuilder.getBuffer(), keyBuilder.getSize()); } RecordId keyForDate(Date_t date) { - KeyString::Builder keyBuilder(KeyString::Version::kLatestVersion); + key_string::Builder keyBuilder(key_string::Version::kLatestVersion); keyBuilder.appendDate(date); return RecordId(keyBuilder.getBuffer(), keyBuilder.getSize()); } @@ -150,7 +159,7 @@ void appendToBSONAs(const RecordId& rid, BSONObjBuilder* builder, StringData fie rid.withFormat([&](RecordId::Null) { builder->appendNull(fieldName); }, [&](int64_t val) { builder->append(fieldName, val); }, [&](const char* str, int len) { - KeyString::appendSingleFieldToBSONAs(str, len, fieldName, builder); + key_string::appendSingleFieldToBSONAs(str, len, fieldName, builder); }); } diff --git a/src/mongo/db/record_id_helpers.h b/src/mongo/db/record_id_helpers.h index 63aa4004a0e5d..7fcae8ff43e75 100644 --- a/src/mongo/db/record_id_helpers.h +++ b/src/mongo/db/record_id_helpers.h @@ -31,10 +31,17 @@ #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/key_format.h" +#include "mongo/util/time_support.h" namespace mongo { class Timestamp; diff --git a/src/mongo/db/record_id_test.cpp b/src/mongo/db/record_id_test.cpp index ed982f401658d..f40841f9ef1e5 100644 --- a/src/mongo/db/record_id_test.cpp +++ b/src/mongo/db/record_id_test.cpp @@ -31,9 +31,15 @@ #include "mongo/db/record_id.h" +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" #include "mongo/db/record_id_helpers.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/debug_util.h" namespace mongo { diff --git a/src/mongo/db/repair.cpp b/src/mongo/db/repair.cpp index 477f82dcaa811..7190298676680 100644 --- a/src/mongo/db/repair.cpp +++ b/src/mongo/db/repair.cpp @@ -27,35 +27,45 @@ * it in the license file. */ -#include +#include #include +#include +#include +#include +#include -#include "mongo/db/repair.h" +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" -#include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_validation.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/document_validation.h" -#include "mongo/db/catalog/index_key_validate.h" -#include "mongo/db/catalog/multi_index_block.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/catalog/validate_results.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/operation_context.h" #include "mongo/db/rebuild_indexes.h" +#include "mongo/db/repair.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl_set_member_in_standalone_mode.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_repair_observer.h" #include "mongo/db/storage/storage_util.h" -#include "mongo/db/vector_clock.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" -#include "mongo/util/scopeguard.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -117,7 +127,7 @@ Status dropUnfinishedIndexes(OperationContext* opCtx, Collection* collection) { StorageRepairObserver::get(opCtx->getServiceContext()) ->invalidatingModification(str::stream() << "Dropped unfinished index '" << indexName << "' on " - << collection->ns()); + << collection->ns().toStringForErrorMsg()); } } return Status::OK(); @@ -148,7 +158,6 @@ Status repairDatabase(OperationContext* opCtx, StorageEngine* engine, const Data LOGV2(21029, "repairDatabase", logAttrs(dbName)); - opCtx->checkForInterrupt(); // Close the db and invalidate all current users and caches. @@ -168,13 +177,11 @@ Status repairDatabase(OperationContext* opCtx, StorageEngine* engine, const Data } try { - // Ensure that we don't trigger an exception when attempting to take locks. - // TODO (SERVER-71610): Fix to be interruptible or document exception. - UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. - // Restore oplog Collection pointer cache. repl::acquireOplogCollectionForLogging(opCtx); } catch (...) { + // The only expected exception is an interrupt. + opCtx->checkForInterrupt(); LOGV2_FATAL_CONTINUE( 21031, "Unexpected exception encountered while reacquiring oplog collection after repair."); @@ -207,7 +214,7 @@ Status repairCollection(OperationContext* opCtx, // to run an expensive collection validation. if (status.code() == ErrorCodes::DataModifiedByRepair) { invariant(StorageRepairObserver::get(opCtx->getServiceContext())->isDataInvalidated(), - "Collection '{}' ({})"_format(collection->ns().toString(), + "Collection '{}' ({})"_format(toStringForLogging(collection->ns()), collection->uuid().toString())); // If we are a replica set member in standalone mode and we have unfinished indexes, diff --git a/src/mongo/db/repair.h b/src/mongo/db/repair.h index d2f120f714231..42e24e359d183 100644 --- a/src/mongo/db/repair.h +++ b/src/mongo/db/repair.h @@ -32,9 +32,11 @@ #include #include +#include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/database_name.h" #include "mongo/db/record_id.h" +#include "mongo/db/tenant_id.h" namespace mongo { class StorageEngine; diff --git a/src/mongo/db/repl/FCV_AND_FEATURE_FLAG_README.md b/src/mongo/db/repl/FCV_AND_FEATURE_FLAG_README.md index b60fcfc85e160..97381b122236f 100644 --- a/src/mongo/db/repl/FCV_AND_FEATURE_FLAG_README.md +++ b/src/mongo/db/repl/FCV_AND_FEATURE_FLAG_README.md @@ -21,9 +21,10 @@ document is also present on standalone nodes. ## FCV on Startup -On a clean startup (the server currently has no replicated collections), the server will create the -FCV document for the first time. If it is running as a shard server (with the `--shardsvr option`), -the server will set the FCV to be the last LTS version. This is to ensure compatibility when adding +On a clean startup (the server currently has no replicated collections), the server will [create the FCV document for the first time](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/startup_recovery.cpp#L619). +If it is running as a shard server (with the `--shardsvr option`), +the server will [set the FCV to be the last LTS version](https://github.com/10gen/mongo/blob/386b1c0c74aa24c306f0ef5bcbde892aec89c8f6/src/mongo/db/commands/feature_compatibility_version.cpp#L442). +This is to ensure compatibility when adding the shard to a downgraded version cluster. The config server will run `setFeatureCompatibilityVersion`on the shard to match the clusters FCV as part of `addShard`. If the server is not running as a shard server, then the server will set its FCV to the latest version by @@ -64,27 +65,56 @@ to the config servers which then forward request again to shard primaries. As mo non-data bearing, they do not have an FCV. Each `mongod` release will support the following upgrade/downgrade paths: -* Last-Continuous ←→ Latest +* Last-Continuous → Latest + * Note that we do not support downgrading to or from Last-Continuous. * Last-LTS ←→ Latest * Last-LTS → Last-Continuous * This upgrade-only transition is only possible when requested by the [config server](https://docs.mongodb.com/manual/core/sharded-cluster-config-servers/). -Additionally, the last LTS must not be equal to the last continuous release. + * Additionally, the last LTS must not be equal to the last continuous release. -As part of an upgrade/downgrade, the FCV will transition through three states: +The command also requires a `{confirm: true}` parameter. This is so that users acknowledge that an +FCV + binary downgrade will require support assistance. Without this parameter, the +setFeatureCompatibilityVersion command for downgrade will [error](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L290-L298) +and say that once they have downgraded the FCV, if you choose to downgrade the binary version, it +will require support assistance. Similarly, the setFeatureCompatibilityVersion command for upgrade +will also error and say that once the cluster is upgraded, FCV + binary downgrade will no longer be +possible without support assistance. + +As part of an upgrade/downgrade, the FCV will transition through these states:

 Upgrade:
    kVersion_X → kUpgradingFrom_X_To_Y → kVersion_Y
 
 Downgrade:
-   kVersion_X → kDowngradingFrom_X_To_Y → kVersion_Y
+   kVersion_X → kDowngradingFrom_X_To_Y → isCleaningServerMetadata → kVersion_Y
 
In above, X will be the source version that we are upgrading/downgrading from while Y is the target version that we are upgrading/downgrading to. -Transitioning to one of the `kUpgradingFrom_X_To_Y`/`kDowngradingFrom_X_To_Y` states updates +These are the steps that the setFCV command goes through. See [adding code to the setFCV command](#adding-upgradedowngrade-related-code-to-the-setfcv-command) +for more information on how to add upgrade/downgrade code to the command. + +1. **Transition to `kUpgradingFrom_X_To_Y` or `kDowngradingFrom_X_To_Y`** + + * In the first part, we start transition to `requestedVersion` by [updating the local FCV document to a +`kUpgradingFrom_X_To_Y` or `kDowngradingFrom_X_To_Y` state](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L430-L437), respectively. + + * Transitioning to one of the `kUpgradingFrom_X_To_Y`/`kDowngradingFrom_X_To_Y` states updates the FCV document in `admin.system.version` with a new `targetVersion` field. Transitioning to a `kDowngradingFrom_X_to_Y` state in particular will also add a `previousVersion` field along with the -`targetVersion` field. These updates are done with `writeConcern: majority`. +`targetVersion` field. These updates are done with `writeConcern: majority`. + + * Transitioning to one of the `kUpgradingFrom_X_To_Y`/`kDowngradingFrom_X_to_Y`/`kVersion_Y`(on +upgrade) states [sets the `minWireVersion` to `WireVersion::LATEST_WIRE_VERSION`](https://github.com/10gen/mongo/blob/386b1c0c74aa24c306f0ef5bcbde892aec89c8f6/src/mongo/db/op_observer/fcv_op_observer.cpp#L69) +and also [closes all incoming connections from internal clients with lower binary versions](https://github.com/10gen/mongo/blob/386b1c0c74aa24c306f0ef5bcbde892aec89c8f6/src/mongo/db/op_observer/fcv_op_observer.cpp#L76-L82). +The reason we do this on `kDowngradingFrom_X_to_Y` is because we shouldn’t decrease the +minWireVersion until we have fully downgraded to the lower FCV in case we get any backwards +compatibility breakages, since during `kDowngradingFrom_X_to_Y` we may still be stopping/cleaning up +any features from the upgraded FCV. In essence, a node with the upgraded FCV/binary should not be +able to communicate with downgraded binary nodes until the FCV is completely downgraded to `kVersion_Y`. + + * **This step is expected to be fast and always succeed** (except if the request parameters fail validation + e.g. if the requested FCV is not a valid transition). Some examples of on-disk representations of the upgrading and downgrading states:

@@ -102,7 +132,11 @@ kDowngradingFrom_5_1_To_5_0:
 }
 
-Once this transition is complete, the FCV full transition lock is acquired in shared + +2. **Run [`_prepareToUpgrade` or `_prepareToDowngrade`](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L497-L501):** + * First, we do any actions to prepare for upgrade/downgrade that must be taken before the FCV + full transition lock. For example, we cancel serverless migrations in this step. + * Then, the FCV full transition lock is acquired in shared mode and then released immediately. This creates a barrier and guarantees safety for operations that acquire the global lock either in exclusive or intent exclusive mode. If these operations begin and acquire the global lock prior to the FCV change, they will proceed in the context of the old @@ -111,16 +145,108 @@ after the FCV change, they will see the updated FCV and behave accordingly. This in order to make this barrier truly safe, **in any given operation, we should only check the feature flag/FCV after acquiring the appropriate locks**. See the [section about setFCV locks](#setfcv-locks) for more information on the locks used in the setFCV command. + * Finally, we check for any user data or settings that will be incompatible on + the new FCV, and uassert with the `CannotUpgrade` or `CannotDowngrade` code if the user needs to manually clean up + incompatible user data. This is especially important on downgrade. + * If an FCV downgrade fails at this point, the user can either remove the incompatible user data and retry the FCV downgrade, or they can upgrade the FCV back to the original FCV. + * On this part no metadata cleanup is performed yet. + +3. **Complete any [upgrade or downgrade specific code](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L524-L528), done in `_runUpgrade` or `_runDowngrade`.** This may include metadata cleanup. + * For upgrade, we update metadata to make sure the new features in the upgraded version work for + both sharded and non-sharded clusters. + * For downgrade, we transition from `kDowngradingFrom_X_to_Y` to +`isCleaningServerMetadata`, which indicates that we have started [cleaning up internal server metadata](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L1495). Transitioning to +`isCleaningServerMetadata` will add a `isCleaningServerMetadata` field, which will be removed upon +transitioning to `kVersion_Y`. This update is also done using `writeConcern: majority`. +After this point, if the FCV downgrade fails, it is no longer safe to transition back to the original +upgraded FCV, and the user must retry the FCV downgrade. Then we perform any internal server downgrade cleanup. + +Examples on-disk representation of the `isCleaningServerMetadata` state: +

+isCleaningServerMetadata after kDowngradingFrom_5_1_To_5_0:
+{ 
+    version: 5.0, 
+    targetVersion: 5.0,
+    previousVersion: 5.1,
+    isCleaningServerMetadata: true
+}
+
-Transitioning to one of the `kUpgradingFrom_X_To_Y`/`kDowngradingFrom_X_to_Y`/`kVersion_Y`(on -upgrade) states sets the `minWireVersion` to `WireVersion::LATEST_WIRE_VERSION` and also closes all -incoming connections from internal clients with lower binary versions. - -Finally, as part of transitioning to the `kVersion_Y` state, the `targetVersion` and the -`previousVersion` (if applicable) fields of the FCV document are deleted while the `version` field -is updated to reflect the new upgraded or downgraded state. This update is also done using -`writeConcern: majority`. The new in-memory FCV value will be updated to reflect the on-disk -changes. +4. Finally, we [complete transition](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L541-L548) by updating the +local FCV document to the fully upgraded or downgraded version. As part of transitioning to the +`kVersion_Y` state, the `targetVersion`, `previousVersion`, and `isCleaningServerMetadata` +(if applicable) fields of the FCV document are deleted while the `version` field is updated to +reflect the new upgraded or downgraded state. This update is also done using `writeConcern: majority`. +The new in-memory FCV value will be updated to reflect the on-disk changes. + + * Note that for an FCV upgrade, we do an extra step to run `_finalizeUpgrade` **after** updating + the FCV document to fully upgraded. This is for any tasks that cannot be done until after the + FCV is fully upgraded, because during `_runUpgrade`, the FCV is still in the transitional state + (which behaves like the downgraded FCV) + +## The SetFCV Command on Sharded Clusters +On a sharded cluster, the command is driven by the config server. The config server runs a 3-phase +protocol for updating the FCV on the cluster. Shard servers will go through all the steps outlined +above (please read the [setFeatureCompatibilityVersion Command Overview section](#setFeatureCompatibilityVersion-Command-Overview)), +but will be explicitly told when to do each step by the config servers. Config servers go through +the phases in lock step with the shard servers to make sure that they are always on the same phase +or one phase ahead of shard servers. For example, the config server cannot be in phase 3 if any +shard server is still in phase 1. + +Additionally, when the config server sends each command to each of +the shards, this is done [synchronously](https://github.com/10gen/mongo/blob/1c97952f194d80e0ba58a4fbe553f09326a5407f/src/mongo/db/s/config/sharding_catalog_manager.cpp#L858-L887), so the config will send the command to one shard and wait for +either a success or failure response. If it succeeds, then the config server will send the +command to the next shard. If it fails, then the whole FCV upgrade/downgrade will [fail](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L1032-L1033). This means that if one shard succeeds but another fails, the overall FCV upgrade/downgrade +will fail. + +1. First, the config server transitions to `kUpgradingFrom_X_To_Y` or `kDowngradingFrom_X_To_Y` (shards are still in the +old FCV). +2. Phase-1 + * a. Config server [sends phase-1 command to shards](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L476). + * b. Shard servers transition to `kUpgradingFrom_X_To_Y` or `kDowngradingFrom_X_To_Y`. + * c. Shard servers do any [phase-1 tasks](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L460) (for downgrading, this would include stopping new features). +3. Phase-2 (throughout this phase config and shards are all in the transitional FCV) + * a. Config server runs `_prepareToUpgrade` or `_prepareToDowngrade`, takes the full FCV transition lock, + and verifies user data compatibility for upgrade/downgrade. + * b. Config server [sends phase-2 command to shards](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L506-L507). + * c. Shard servers run `_prepareToUpgrade` or `_prepareToDowngrade`, takes the full FCV transition lock, + and verifies user data compatibility for upgrade/downgrade. +4. Phase-3 + * a. Config server runs `_runUpgrade` or `_runDowngrade`. For downgrade, this means the config + server enters the `isCleaningServerMetadata` phase and cleans up any internal server metadata. + * b. Config server [sends phase-3 command to shards](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L1499). + * c. Shard servers run `_runUpgrade` or `_runDowngrade`. For downgrade, this means the shard + servers enter the `isCleaningServerMetadata` phase and cleans up any internal server metadata. + * d. Shards finish and enter the fully upgraded or downgraded state (on upgrade, the config + server would still be in the `kUpgradingFrom_X_To_Y` phase, and on downgrade the config server + would still be in the `isCleaningServerMetadata` phase). + * e. Config finishes and enters the fully upgraded or downgraded state. + +Note that on downgrade, if the setFCV command fails at any point between 4a and 4e, the user will +not be able to transition back to the original upgraded FCV, since either the config server and/or +the shard servers are in the middle of cleaning up internal server metadata. + +## SetFCV Command Errors +The setFCV command can only fail with these error cases: +* Retryable error (such as `InterruptedDueToReplStateChange`) + * The user must retry the FCV upgrade/downgrade, so the code must be idempotent and retryable. +* `CannotDowngrade`: + * The user can either remove the incompatible user data and retry the FCV downgrade, or they can upgrade the FCV back to the original FCV. + * Because of this, the code in the upgrade path must be able to work if started from any point in the + transitional `kDowngradingFrom_X_To_Y` state. + * The code in the FCV downgrade path must be idempotent and retryable. +* `CannotUpgrade`: + * The user would need to fix the incompatible user data and retry the FCV upgrade. +* Other `uasserts`: + * For example, if the user attempted to upgrade the FCV after the previous FCV downgrade failed + during `isCleaningServerMetadata`. In this case the user would need to retry the FCV downgrade. +* `ManualInterventionRequired` or `fassert`: + * `ManualInterventionRequired` indicates a server bug + but that all the data is consistent on disk and for reads/writes, and an `fassert` + indicates a server bug and that the data is corrupted. + * `ManualInterventionRequired` + and `fasserts` are errors that should not occur in practice, but if they did, + they would turn into a Support case. ## SetFCV Locks There are three locks used in the setFCV command: @@ -130,7 +256,8 @@ There are three locks used in the setFCV command: * [fcvDocumentLock](https://github.com/mongodb/mongo/blob/bd8a8d4d880577302c777ff961f359b03435126a/src/mongo/db/commands/feature_compatibility_version.cpp#L215) * The setFCV command takes this lock in X mode when it modifies the FCV document. This includes from [fully upgraded -> downgrading](https://github.com/mongodb/mongo/blob/bd8a8d4d880577302c777ff961f359b03435126a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L350), - [downgrading -> fully downgraded](https://github.com/mongodb/mongo/blob/bd8a8d4d880577302c777ff961f359b03435126a/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L422), + [downgrading -> isCleaningServerMetadata](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L1459-L1460), + [isCleaningServerMetadata -> fully downgraded](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L533), and vice versa. * Other operations should [take this lock in shared mode](https://github.com/mongodb/mongo/blob/bd8a8d4d880577302c777ff961f359b03435126a/src/mongo/db/commands/feature_compatibility_version.cpp#L594-L599) if they want to ensure that the FCV state _does not change at all_ during the operation. @@ -161,48 +288,95 @@ _Code spelunking starting points:_ * [The `FCVTransitions` class, that determines valid FCV transitions](https://github.com/mongodb/mongo/blob/c4d2ed3292b0e113135dd85185c27a8235ea1814/src/mongo/db/commands/feature_compatibility_version.cpp#L75) -## Adding code to the setFCV command +## Adding upgrade/downgrade related code to the setFCV command The `setFeatureCompatibilityVersion` command is done in three parts. This corresponds to the different states that the FCV document can be in, as described in the above section. -In the first part, we start transition to `requestedVersion` by updating the local FCV document to a -`kUpgradingFrom_X_To_Y` or `kDowngradingFrom_X_To_Y` state, respectively. +In the first part, we start transition to `requestedVersion` by [updating the local FCV document to a +`kUpgradingFrom_X_To_Y` or `kDowngradingFrom_X_To_Y` state](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L430-L437), respectively. +**This step is expected to be fast and always succeed.** This means that code that +might fail or take a long time should ***not*** be added before this point in the +`setFeatureCompatibilityVersion` command. + +In the second part, we perform [upgrade/downgrade-ability checks](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L497-L501). This is done on `_prepareToUpgrade` +and `_prepareToDowngrade`. On this part no metadata cleanup is performed yet. -In the second part, we perform upgrade/downgrade-ability checks. This is done on `_prepareToUpgrade` -and `prepareToDowngrade`. On this part no metadata cleanup is performed yet. +In the last part, we complete any [upgrade or downgrade specific code](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L524-L528), done in `_runUpgrade` and +`_runDowngrade`. This includes possible metadata cleanup. Note that once we start `_runDowngrade`, +we cannot transition back to `kUpgradingFrom_X_To_Y`until the full downgrade completes. -In the last part, we complete any upgrade or downgrade specific code, done in `_runUpgrade` and -`_runDowngrade`. This includes possible metadata cleanup. Then we complete transition by updating the +Then we [complete transition](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L541-L548) by updating the local FCV document to the fully upgraded or downgraded version. -***All feature-specific FCV upgrade or downgrade code should go into the respective `_runUpgrade` and -`_runDowngrade` functions.*** Each of them have their own helper functions where all feature-specific -upgrade/downgrade code should be placed. +***All feature-specific FCV upgrade or downgrade code should go into the following functions.*** + +`_shardServerPhase1Tasks`: This helper function is only for any actions that should be done specifically on +shard servers during phase 1 of the 3-phase setFCV protocol for sharded clusters. +For example, before completing phase 1, we must wait for backward incompatible +ShardingDDLCoordinators to finish. This is important in order to ensure that no +shard that is currently a participant of such a backward-incompatible +ShardingDDLCoordinator can transition to the fully downgraded state (and thus, +possibly downgrade its binary) while the coordinator is still in progress. +The fact that the FCV has already transitioned to kDowngrading ensures that no +new backward-incompatible ShardingDDLCoordinators can start. +We do not expect any other feature-specific work to be done in the 'start' phase. `_prepareToUpgrade` performs all actions and checks that need to be done before proceeding to make any metadata changes as part of FCV upgrade. Any new feature specific upgrade code should be placed in the helper functions: * `_prepareToUpgradeActions`: for any upgrade actions that should be done before taking the FCV full -transition lock in S mode -* `_userCollectionsWorkForUpgrade`: for any user collections uasserts, creations, or deletions that -need to happen during the upgrade. This happens after the FCV full transition lock. - -`_runUpgrade` _runUpgrade performs all the metadata-changing actions of an FCV upgrade. Any new +transition lock in S mode. It is required that the code in this helper function is +idempotent and could be done after `_runDowngrade` even if `_runDowngrade` failed at any point. +* `_userCollectionsWorkForUpgrade`: for any user collections uasserts (with the `CannotUpgrade` error code), +creations, or deletions that need to happen during the upgrade. This happens after the FCV full +transition lock. It is required that the code in this helper function is idempotent and could be +done after `_runDowngrade` even if `_runDowngrade` failed at any point. + +`_runUpgrade`: _runUpgrade performs all the metadata-changing actions of an FCV upgrade. Any new feature specific upgrade code should be placed in the `_runUpgrade` helper functions: -* `_completeUpgrade`: for updating metadata to make sure the new features in the upgraded version -work for sharded and non-sharded clusters +* `_upgradeServerMetadata`: for updating server metadata to make sure the new features in the upgraded version +work for sharded and non-sharded clusters. It is required that the code in this helper function is +idempotent and could be done after `_runDowngrade` even if `_runDowngrade` failed at any point. + + +`_finalizeUpgrade`: only for any tasks that must be done to fully complete the FCV upgrade +AFTER the FCV document has already been updated to the UPGRADED FCV. +This is because during `_runUpgrade`, the FCV is still in the transitional state (which behaves +like the downgraded FCV), so certain tasks cannot be done yet until the FCV is fully +upgraded. + +Additionally, it's possible that during an FCV upgrade, the replset/shard server/config server +undergoes failover AFTER the FCV document has already been updated to the UPGRADED FCV, but +before the cluster has completed `_finalizeUpgrade`. In this case, since the cluster failed over, +the user/client may retry sending the setFCV command to the cluster, but the cluster is +already in the requestedVersion (i.e. `requestedVersion == actualVersion`). However, +the cluster should retry/complete the tasks from `_finalizeUpgrade` before sending ok:1 +back to the user/client. Therefore, these tasks **must** be idempotent/retryable. `_prepareToDowngrade` performs all actions and checks that need to be done before proceeding to make any metadata changes as part of FCV downgrade. Any new feature specific downgrade code should be placed in the helper functions: * `_prepareToDowngradeActions`: Any downgrade actions that should be done before taking the FCV full transition lock in S mode should go in this function. -* `_userCollectionsUassertsForDowngrade`: for any checks on user data or settings that will uassert -if users need to manually clean up user data or settings. +* `_userCollectionsUassertsForDowngrade`: for any checks on user data or settings that will uassert +with the `CannotDowngrade` code if users need to manually clean up user data or settings. -`_runDowngrade` _runDowngrade performs all the metadata-changing actions of an FCV downgrade. Any +`_runDowngrade:` _runDowngrade performs all the metadata-changing actions of an FCV downgrade. Any new feature specific downgrade code should be placed in the `_runDowngrade` helper functions: -* `_internalServerCleanupForDowngrade`: for any internal server downgrade cleanup +* `_internalServerCleanupForDowngrade`: for any internal server downgrade cleanup. Any code in this +function is required to be *idempotent* and *retryable* in case the node crashes or downgrade fails in a +way that the user has to run setFCV again. It cannot fail for a non-retryable reason since at this +point user data has already been cleaned up. It also must be able to be *rolled back*. This is +because we cannot guarantee the safety of any server metadata that is not replicated in the event of +a rollback. + * This function can only fail with some transient error that can be retried + (like `InterruptedDueToReplStateChange`), `ManualInterventionRequired`, or `fasserts`. For + any non-retryable error in this helper function, it should error either with an + uassert with `ManualInterventionRequired` as the error code (indicating a server bug + but that all the data is consistent on disk and for reads/writes) or with an `fassert` + (indicating a server bug and that the data is corrupted). `ManualInterventionRequired` + and `fasserts` are errors that are not expected to occur in practice, but if they did, + they would turn into a Support case. One common pattern for FCV downgrade is to check whether a feature needs to be cleaned up on @@ -210,25 +384,36 @@ downgrade because it is not enabled on the downgraded version. For example, if w downgrading to 6.0, we must check if there are any new features that may have been used that are not enabled on 6.0, and perform any necessary downgrade logic for that. -To do so, we must do the following ([example in the codebase](https://github.com/mongodb/mongo/blob/dab0694cd327eb0f7e540de5dee97c69f84ea45d/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L612-L613)): +To do so, we must do the following ([example in the codebase](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L1061-L1063)): ``` -if (!featureFlag.isEnabledOnVersion(requestedVersion)) { +if (!featureFlag.isDisabledOnTargetFCVButEnabledOnOriginalFCV(requestedVersion, originalVersion)) { // do feature specific checks/downgrade logic } ``` -where `requestedVersion` is the version we are downgrading to. This way, as long as the feature is -disabled on the downgraded version, the downgrade checks will trigger, regardless of whether we have -enabled the feature by default on the upgraded version. This protects against if the feature flag -was enabled, and the feature resulted in on-disk changes, and then somehow the feature flag was -disabled again. See the [feature flags](#feature-flags) section for more information on feature +where `requestedVersion` is the version we are downgrading to and `originalVersion` is the version +we are downgrading from. + +Similarly, we can use [isEnabledOnTargetFCVButDisabledOnOriginalFCV](https://github.com/10gen/mongo/blob/c6e5701933a98b4fe91c2409c212fcce2d3d34f0/src/mongo/db/commands/set_feature_compatibility_version_command.cpp#L809-L810) +for upgrade checks. + +``` +if (!featureFlag.isEnabledOnTargetFCVButDisabledOnOriginalFCV(requestedVersion, originalVersion)) { + // do feature specific checks/upgrade logic +} +``` +See the [feature flags](#feature-flags) section for more information on feature flags. # Generic FCV references Sometimes, we may want to make a generic FCV reference to implement logic around upgrade/downgrade that is not specific to a certain release version. -For these checks, we *must* use the [generic constants](https://github.com/mongodb/mongo/blob/e08eba28ab9ad4d54adb95e8517c9d43276e5336/src/mongo/db/server_options.h#L202-L216). For generic cases +For these checks, we *must* use the [generic constants](https://github.com/mongodb/mongo/blob/e08eba28ab9ad4d54adb95e8517c9d43276e5336/src/mongo/db/server_options.h#L202-L216). +We should not be using the FCV constants like kVersion_6_0 ([example of what to avoid](https://github.com/10gen/mongo/blob/ef8bdb8d0cbd584d47c54d64c3215ae29ec1a32f/src/mongo/db/pipeline/document_source_list_catalog.cpp#L130)). +Instead, we should branch +the different behavior using feature flags (see [When to Use Feature Flags](#when-to-use-feature-flags) and [Feature Flag Gating](#feature-flag-gating)). +For generic cases that only need to check if the server is currently in the middle of an upgrade/downgrade, use the [isUpgradingOrDowngrading()](https://github.com/mongodb/mongo/blob/e08eba28ab9ad4d54adb95e8517c9d43276e5336/src/mongo/db/server_options.h#L275-L281) helper. @@ -299,7 +484,8 @@ development. Additionally, any project or ticket that wants to introduce different behavior based on which FCV the server is running ***must*** add a feature flag. In the past, the branching of the different behavior would be done by directly checking which FCV the server was running. However, we now must -***not*** be using any references to FCV constants such as kVersion_6_0. Instead we should branch +***not*** be using any references to FCV constants such as kVersion_6_0 ([example of what to avoid](https://github.com/10gen/mongo/blob/ef8bdb8d0cbd584d47c54d64c3215ae29ec1a32f/src/mongo/db/pipeline/document_source_list_catalog.cpp#L130)). +Instead we should branch the different behavior using feature flags (see [Feature Flag Gating](#feature-flag-gating)). ***This means that individual ticket that wants to introduce an FCV check will also need to create a feature flag specific to that ticket.*** @@ -308,6 +494,12 @@ The motivation for using feature flags rather than checking FCV constants direct checking FCV constants directly is more error prone and has caused issues in the release process when updating/removing outdated FCV constants. +Note that ***we do not support disabling feature flags once they have been enabled via IDL in a release build***. +Therefore, feature flags should ***not*** be used for parameters that will be turned on and off. Our +entire feature flag system is built on the assumption that these are used for preventing +in-development code from being exposed to users, and not for turning off arbitrary features after +they've been released. + ## Lifecycle of a feature flag * Adding the feature flag * Disabled by default. This minimizes disruption to the CI system and BB process. @@ -352,6 +544,7 @@ Feature flags are created by adding it to an IDL file: description: "Create a feature flag" cpp_varname: gFeatureFlagToaster default: false + shouldBeFCVGated: true ``` A feature flag has the following properties: @@ -377,6 +570,13 @@ A feature flag has the following properties: * Version: string - a string for the FCV version * Required field if default is true, Must be a string acceptable to FeatureCompatibilityVersionParser. +* shouldBeFCVGated: boolean + * This should usually be true in order to gate the feature flag based on the FCV. + * However, some feature flags should not be FCV gated (for example, if the feature only exists + on mongos, which doesn't have an FCV, or if the feature doesn't have any upgrade downgrade + concerns and can be enabled as soon as the binary is upgraded/downgraded. + * When set to false, the feature flag won't require a version parameter, so it will only be + gated based on whether it is enabled by default on that binary version. To turn on a feature flag for testing when starting up a server, we would use the following command line (for the Toaster feature): @@ -407,26 +607,26 @@ if(feature_flags::gFeatureFlagToaster.isEnabled(serverGlobalParams.featureCompat } ``` -Note that this assumes that `serverGlobalParams.featureCompatibility` has already been initialized. -If we are calling `isEnabled(serverGlobalParams.featureCompatibility)` in a location where it might -not already be initialized, we must do this instead: +Note that `isEnabled` checks if the feature flag is enabled on the input FCV, which is usually +the server's current FCV `serverGlobalParams.featureCompatibility`. If the FCV has not been +initialized yet, it will check if the feature flag is enabled on the lastLTS FCV. +If the feature flag has `shouldBeFCVGated` set to false, then `isEnabled` will simply return +whether the feature flag is enabled. -``` -if(serverGlobalParams.featureCompatibility.isVersionInitialized() && -feature_flags::gFeatureFlagToaster.isEnabled(serverGlobalParams.featureCompatibility)) { - // code if feature is enabled. -} else { - // code if feature is not enabled. -} -``` There are some places where we only want to check if the feature flag is turned on, regardless of which FCV we are on. For example, this could be the case if we need to perform the check in a spot -in the code when the FCV has not been initialized yet. Only in these cases can we use the -`isEnabledAndIgnoreFCVUnsafe` helper. `isEnabledAndIgnoreFCVUnsafe` should only be used when we are -sure that we don't care what the FCV is. We should not use the `isEnabledAndIgnoreFCVUnsafe` helper -otherwise because it can result in unsafe scenarios where we enable a feature on an FCV where it is -not supported or where the feature has not been fully implemented yet. +in the code when the FCV has not been initialized yet during startup. In these cases we should use the +`isEnabledAndIgnoreFCVUnsafeAtStartup` helper. + +There are some cases outside of startup where we also want to check if the feature flag is turned on, +regardless of which FCV we are on. In these cases we can use the `isEnabledAndIgnoreFCVUnsafe` +helper, but it should only be used when we are sure that we don't care what the FCV is. We should +not use the `isEnabledAndIgnoreFCVUnsafe` helper otherwise because it can result in unsafe scenarios +where we enable a feature on an FCV where it is not supported or where the feature has not been +fully implemented yet. In order to use isEnabledAndIgnoreFCVUnsafe, you **must** add a comment above +that line starting with "(Ignore FCV check):" describing why we can safely ignore checking the FCV +here. ***Note that in a single operation, you must only check the feature flag once***. This is because if you checked if the feature flag was enabled multiple times within a single operation, it's possible diff --git a/src/mongo/db/repl/README.md b/src/mongo/db/repl/README.md index ec8e2354b1ec0..0bf640f66d8db 100644 --- a/src/mongo/db/repl/README.md +++ b/src/mongo/db/repl/README.md @@ -378,6 +378,8 @@ If the server does not support `hello`, the `helloOk` flag is ignored. A new dri not see "helloOk: true" in the response and continue to send `isMaster` on this connection. Old drivers will not specify this flag at all, so the behavior remains the same. +Communication between nodes in the cluster is always done using `hello`, never with `isMaster`. + ## Communication Each node has a copy of the **`ReplicaSetConfig`** in the `ReplicationCoordinator` that lists all @@ -525,7 +527,7 @@ assigns itself a priority takeover timeout proportional to its rank. After that node will check if it's eligible to run for election and if so will begin an election. The timeout is simply: `(election timeout) * (priority rank + 1)`. -Heartbeat threads belong to the +Heartbeat threads belong to the [`ReplCoordThreadPool`](https://github.com/mongodb/mongo/blob/674d57fc70d80dedbfd634ce00ca4b967ea89646/src/mongo/db/mongod_main.cpp#L944) connection pool started by the [`ReplicationCoordinator`](https://github.com/mongodb/mongo/blob/674d57fc70d80dedbfd634ce00ca4b967ea89646/src/mongo/db/mongod_main.cpp#L986). @@ -644,14 +646,11 @@ been replicated to a majority of nodes in the replica set. Any data seen in majo roll back in the future. Thus majority reads prevent **dirty reads**, though they often are **stale reads**. -Read concern majority reads usually return as fast as local reads, but sometimes will block. Read -concern majority reads do not wait for anything to be committed; they just use different snapshots -from local reads. They do block though when the node metadata (in the catalog cache) differs from -the committed snapshot. For example, index builds or drops, collection creates or drops, database -drops, or collmod’s could cause majority reads to block. If the primary receives a `createIndex` -command, subsequent majority reads will block until that index build is finished on a majority of -nodes. Majority reads also block right after startup or rollback when we do not yet have a committed -snapshot. +Read concern majority reads do not wait for anything to be committed; they just use different +snapshots from local reads. Read concern majority reads usually return as fast as local reads, but +sometimes will block. For example, right after startup or rollback when we do not have a committed +snapshot, majority reads will be blocked. Also, when some of the secondaries are unavailable or +lagging, majority reads could slow down or block. For information on how majority read concern works within a multi-document transaction, see the [Read Concern Behavior Within Transactions](#read-concern-behavior-within-transactions) section. @@ -845,7 +844,7 @@ node hasn't committed the transaction (and therefore, the WUOW) yet. A user can [add additional operations](https://github.com/mongodb/mongo/blob/r6.0.0/src/mongo/db/op_observer_impl.cpp#L554) to an existing multi-document transaction by running more commands on the same session. These operations are then stored in memory. Once a write completes on the primary, [we update the corresponding `sessionTxnRecord`](https://github.com/mongodb/mongo/blob/r6.0.0/src/mongo/db/op_observer_impl.cpp#L1664-L1673) -in the transactions table (`config.transactions`) with information about the transaction. +in the transactions table (`config.transactions`) with information about the transaction. This includes things like the `lsid`, the `txnNumber` currently associated with the session, and the `txnState`. This table was introduced for retryable writes and is used to keep track of retryable write and @@ -1187,6 +1186,47 @@ Note that secondaries can apply prepare oplog entries immediately but [recovering](#recovering-prepared-transactions) nodes must wait until they finish the process or see a commit oplog entry. +Another major difference between secondary nodes and recovering nodes is that recovering nodes +process prepare oplog entries one at a time and operations in a prepare oplog entry are applied in +serial, while secondary nodes batch process prepare oplog entries and use multiple threads to +parallelize the application of operations in each prepare oplog entry. + +In order to parallelize the application, a `prepareTransaction` oplog entry can be +[applied in the same batch](https://github.com/mongodb/mongo/blob/07e1e93c566243983b45385f5c85bc7df0026f39/src/mongo/db/repl/oplog_batcher.cpp#L243-L248) +as other CRUD or `prepareTransaction` oplog entries, and operations in each `prepareTransaction` +oplog entry are [split among the writer threads](https://github.com/mongodb/mongo/blob/07e1e93c566243983b45385f5c85bc7df0026f39/src/mongo/db/repl/oplog_applier_utils.cpp#L256) +in the same way as [applying a normal oplog entry](https://github.com/mongodb/mongo/blob/07e1e93c566243983b45385f5c85bc7df0026f39/src/mongo/db/repl/README.md#oplog-entry-application). +This splitting mechanism ensures operations on one document are applied by only one thread, +together with the primary ensuring no prepare conflicts between concurrent prepared transactions +and concurrent CRUD operations, we make it possible to fully parallelize the application of +`prepareTransaction` oplog entries with other CRUD or `prepareTransaction` oplog entries. Each +writer thread that gets assigned a subset of the transaction operations basically starts a split +prepared transaction with a new session and apply it using the steps described above. This means +that one `prepareTransaction` oplog entry might create multiple smaller prepared transactions. +All the sessions of the original prepared transactions and their split sessions, as well as the IDs +of those writer threads are tracked in the [SplitPrepareSessionManager](https://github.com/mongodb/mongo/blob/07e1e93c566243983b45385f5c85bc7df0026f39/src/mongo/db/repl/split_prepare_session_manager.h) +class. + +A `commitTransaction` or `abortTransaction` oplog entry on steady state secondary nodes may refer +to a non-split prepared transaction (e.g. prepared while being primary or during recovery) or a +split prepared transaction. The former case is handled in the same way as on recovering nodes. +For the latter case, we first query the `SplitPrepareSessionManager` to get the sessions and +thread IDs that have been used when splitting and applying the corresponding `prepareTransaction` +oplog entry, and then [split the commitTransaction or abortTransaction oplog entry](https://github.com/mongodb/mongo/blob/07e1e93c566243983b45385f5c85bc7df0026f39/src/mongo/db/repl/oplog_applier_utils.cpp#L340-L348) +to the same threads to make sure that each split of the original prepared transaction is correctly +committed or aborted. + +Note it is possible for a secondary node to step up after applying a split prepared transaction, +thus when a primary node receives a `commitTransaction` command, it needs to +[additionally commit all the splits](https://github.com/mongodb/mongo/blob/07e1e93c566243983b45385f5c85bc7df0026f39/src/mongo/db/transaction/transaction_participant.cpp#L1951-L1958) +of the original prepared transaction if they exist. Another caveat due to step-up is that we need +to prepare and commit/abort the original transaction (a.k.a. top-level transaction) in addition to +its split transactions, so that on step-up the in-memory transaction states of the original +transaction's session is correctly set, otherwise the session cannot be used to run new transaction +commands. However we do not need to apply any operations in the original transaction (treated like +an [empty transaction](https://github.com/mongodb/mongo/blob/07e1e93c566243983b45385f5c85bc7df0026f39/src/mongo/db/repl/transaction_oplog_application.cpp#L720-L731)) +since the operations should be applied by its split transactions. + ## Transaction Errors ### PreparedTransactionInProgress Errors @@ -1196,7 +1236,7 @@ the existing transaction to be **implicitly aborted**. Implicitly aborting a tra the transaction is aborted without an explicit `abortTransaction` command. However, prepared transactions cannot be implicitly aborted, since they can only complete after a `commitTransaction` or `abortTransaction` command from the `TransactionCoordinator`. As a result, any attempt to start a -new transaction on a session that already has a prepared trasaction on it will fail with a +new transaction on a session that already has a prepared transaction on it will fail with a `PreparedTransactionInProgress` error. Additionally, the only operations that can be run on a prepared transaction are @@ -1291,7 +1331,7 @@ There are a number of ways that a node will run for election: longer than `catchUpTakeoverDelayMillis` (default 30 seconds), it will run for election. This behvarior is known as a **catchup takeover**. If primary catchup is taking too long, catchup takeover can help allow the replica set to accept writes sooner, since a more up-to-date node will - not spend as much time (or any time) in catchup. See the "Transitioning to `PRIMARY`" section for + not spend as much time (or any time) in catchup. See the [Transitioning to `PRIMARY` section](https://github.com/mongodb/mongo/blob/master/src/mongo/db/repl/README.md#transitioning-to-primary) section for further details on primary catchup. * The `replSetStepUp` command can be run on an eligible node to cause it to run for election immediately. We don't expect users to call this command, but it is run internally for election @@ -1304,14 +1344,26 @@ There are a number of ways that a node will run for election: `enableElectionHandoff` is false, then nodes in the replica set will wait until the election timeout triggers to run for election. +### Code references +* [election timeout](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L345) ([defaults](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/repl_set_config.idl#L101)) +* [priority takeover](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp#L449) +* [priority takeover: priority check](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/topology_coordinator.cpp#L1568-L1578) +* [priority takeover: wait time calculation](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/repl_set_config.cpp#L705-L709) +* [newly elected primary catchup](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L4714) +* [primary catchup completion](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L4799-L4813) +* [primary start accepting writes](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L1361) +* [catchup takeover](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp#L466) +* [catchup takeover: takeover check](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp#L466) +* [election handoff](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L2924) +* [election handoff: skip wait](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L2917-L2921) ### Candidate Perspective A candidate node first runs a dry-run election. In a **dry-run election**, a node starts a -[`VoteRequester`](https://github.com/mongodb/mongo/blob/r4.2.0/src/mongo/db/repl/vote_requester.h), +[`VoteRequester`](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/vote_requester.h), which uses a -[`ScatterGatherRunner`](https://github.com/mongodb/mongo/blob/r4.2.0/src/mongo/db/repl/scatter_gather_runner.h) -to send a `replSetRequestVotes` command to every node asking if that node would vote for it. The +[`ScatterGatherRunner`](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/scatter_gather_runner.h) +to send a [`replSetRequestVotes` command](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/repl_set_request_votes.cpp#L47) to every node asking if that node would vote for it. The candidate node does not increase its term during a dry-run because if a primary ever sees a higher term than its own, it steps down. By first conducting a dry-run election, we make it unlikely that nodes will increase their own term when they would not win and prevent needless primary stepdowns. @@ -1330,6 +1382,13 @@ members in order to get elected. If the candidate received votes from a majority of nodes, including itself, the candidate wins the election. +#### Code references +* [dry-run election](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp#L203) +* [skipping dry-run](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp#L185) +* [real election](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp#L277) +* [candidate process vote response](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/vote_requester.cpp#L114) +* [candidate checks election result](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp#L416) + ### Voter Perspective When a node receives a `replSetRequestVotes` command, it first checks if the term is up to date and @@ -1351,6 +1410,10 @@ the `local.replset.election` collection. This information is read into memory at future elections. This ensures that even if a node restarts, it does not vote for two nodes in the same term. +#### Code references +* [node processing vote request](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/topology_coordinator.cpp#L3429) +* [recording LastVote durably](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L5739) + ### Transitioning to `PRIMARY` Now that the candidate has won, it must become `PRIMARY`. First it clears its sync source and @@ -1390,6 +1453,16 @@ Finally, the node drops all temporary collections, restores all locks for and logs “transition to primary complete”. At this point, new writes will be accepted by the primary. +#### Code references +* [clearing the sync source, notify nodes of election, prepare catch up](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L4697-L4707) +* [catchup to latest optime known via heartbeats](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L4800) +* [catchup-timeout](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L4746) +* [always allow chaining for catchup](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L5231) +* [enter drain mode after catchup attempt](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L4783) +* [exit drain mode](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L1205) +* [term bump](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L1300) +* [drop temporary collections](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp#L532) + ## Step Down ### Conditional @@ -1418,9 +1491,16 @@ conditions and steps down immediately after it reaches the `secondaryCatchUpPeri Upon a successful stepdown, it yields locks held by [prepared transactions](#stepdown-with-a-prepared-transaction) because we are now a secondary. Finally, we log stepdown metrics and update our member state to `SECONDARY`. -* User-facing documentation is -available [here](https://www.mongodb.com/docs/manual/reference/command/replSetStepDown/#command-fields). -* [Code spelunking point](https://github.com/mongodb/mongo/blob/843762120897ed2dbfe8bbc69dbbf99b641c009c/src/mongo/db/repl/replication_coordinator_impl.cpp#L2737). + +#### Code references +* [User-facing documentation](https://www.mongodb.com/docs/manual/reference/command/replSetStepDown/#command-fields). +* [Replication coordinator stepDown method](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L2729) +* [ReplSetStepDown command class](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/repl_set_commands.cpp#L527) +* [The node loops trying to step down](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L2836) +* [A majority of nodes need to have reached the last applied optime](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/topology_coordinator.cpp#L2733) +* [At least one caught up node needs to be electable](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/topology_coordinator.cpp#L2738) +* [Set the LeaderMode to kSteppingDown](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/topology_coordinator.cpp#L1721) +* [Upon a successful stepdown, it yields locks held by prepared transactions](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L2899) ### Unconditional @@ -1440,6 +1520,14 @@ During unconditional stepdown, we do not check preconditions before attempting t to conditional stepdowns, we must kill any conflicting user/system operations before acquiring the RSTL and yield locks of prepared transactions following a successful stepdown. +#### Code references +* [Stepping down on learning of a higher term](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L6066) +* [Liveness timeout checks](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/topology_coordinator.cpp#L1236-L1249) +* [Stepping down on liveness timeout](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp#L424) +* [ReplSetReconfig command class](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/repl_set_commands.cpp#L431) +* [Stepping on reconfig](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl.cpp#L4010) +* [Stepping down on heartbeat](https://github.com/mongodb/mongo/blob/r6.2.0/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp#L980) + ### Concurrent Stepdown Attempts It is possible to have concurrent conditional and unconditional stepdown attempts. In this case, @@ -1684,15 +1772,15 @@ The `initialSyncTransientErrorRetryPeriodSeconds` is also used to control retrie fetcher and all network operations in initial sync which take place after the data cloning has started. -As of v4.4, initial syncing a node with [two-phase index builds](https://github.com/mongodb/mongo/blob/0a7641e69031fcfdf25a1780a3b62bca5f59d68f/src/mongo/db/catalog/README.md#replica-set-index-builds) -will immediately build all ready indexes from the sync source and setup the index builder threads -for any unfinished index builds. -[See here](https://github.com/mongodb/mongo/blob/85d75907fd12c2360cf16b97f941386f343ca6fc/src/mongo/db/repl/collection_cloner.cpp#L247-L301). +As of v4.4, initial syncing a node with [two-phase index builds](https://github.com/mongodb/mongo/blob/0a7641e69031fcfdf25a1780a3b62bca5f59d68f/src/mongo/db/catalog/README.md#replica-set-index-builds) +will immediately build all ready indexes from the sync source and setup the index builder threads +for any unfinished index builds. +[See here](https://github.com/mongodb/mongo/blob/85d75907fd12c2360cf16b97f941386f343ca6fc/src/mongo/db/repl/collection_cloner.cpp#L247-L301). -This is necessary to avoid a scenario where the primary node cannot satisfy the index builds commit -quorum if it depends on the initial syncing nodes vote. Prior to this, initial syncing nodes would -start the index build when they came across the `commitIndexBuild` oplog entry, which is only -observable once the index builds commit quorum has been satisfied. +This is necessary to avoid a scenario where the primary node cannot satisfy the index builds commit +quorum if it depends on the initial syncing nodes vote. Prior to this, initial syncing nodes would +start the index build when they came across the `commitIndexBuild` oplog entry, which is only +observable once the index builds commit quorum has been satisfied. [See this test for an example](https://github.com/mongodb/mongo/blob/f495bdead326a06a76f8a980e44092deb096a21d/jstests/noPassthrough/commit_quorum_does_not_hang_with_initial_sync.js). ## Oplog application phase @@ -2058,11 +2146,11 @@ transaction. For a prepared transaction, we have the following guarantee: `prepa **`currentCommittedSnapshot`**: An optime maintained in `ReplicationCoordinator` that is used to serve majority reads and is always guaranteed to be <= `lastCommittedOpTime`. When `eMRC=true`, this -is currently [set to the stable optime](https://github.com/mongodb/mongo/blob/00fbc981646d9e6ebc391f45a31f4070d4466753/src/mongo/db/repl/replication_coordinator_impl.cpp#L4945). +is currently [set to the stable optime](https://github.com/mongodb/mongo/blob/00fbc981646d9e6ebc391f45a31f4070d4466753/src/mongo/db/repl/replication_coordinator_impl.cpp#L4945). Since it is reset every time we recalculate the stable optime, it will also be up to date. -When `eMRC=false`, this [is set](https://github.com/mongodb/mongo/blob/00fbc981646d9e6ebc391f45a31f4070d4466753/src/mongo/db/repl/replication_coordinator_impl.cpp#L4952-L4961) -to the minimum of the stable optime and the `lastCommittedOpTime`, even though it is not used to +When `eMRC=false`, this [is set](https://github.com/mongodb/mongo/blob/00fbc981646d9e6ebc391f45a31f4070d4466753/src/mongo/db/repl/replication_coordinator_impl.cpp#L4952-L4961) +to the minimum of the stable optime and the `lastCommittedOpTime`, even though it is not used to serve majority reads in that case. **`initialDataTimestamp`**: A timestamp used to indicate the timestamp at which history “begins”. @@ -2117,7 +2205,7 @@ populated internally from the `currentCommittedSnapshot` timestamp inside `Repli **`stable_timestamp`**: The newest timestamp at which the storage engine is allowed to take a checkpoint, which can be thought of as a consistent snapshot of the data. Replication informs the storage engine of where it is safe to take its next checkpoint. This timestamp is guaranteed to be -majority committed so that RTT rollback can use it. In the case when +majority committed so that RTT rollback can use it. In the case when [`eMRC=false`](#enableMajorityReadConcern-flag), the stable timestamp may not be majority committed, which is why we must use the Rollback via Refetch rollback algorithm. diff --git a/src/mongo/db/repl/SConscript b/src/mongo/db/repl/SConscript index 66ac2d0107864..b672e4275e8e3 100644 --- a/src/mongo/db/repl/SConscript +++ b/src/mongo/db/repl/SConscript @@ -82,7 +82,6 @@ env.Library( '$BUILD_DIR/mongo/db/catalog/import_collection_oplog_entry', '$BUILD_DIR/mongo/db/catalog/index_build_oplog_entry', '$BUILD_DIR/mongo/db/catalog/local_oplog_info', - '$BUILD_DIR/mongo/db/catalog/multi_index_block', '$BUILD_DIR/mongo/db/change_stream_change_collection_manager', '$BUILD_DIR/mongo/db/change_stream_pre_images_collection_manager', '$BUILD_DIR/mongo/db/change_stream_serverless_helpers', @@ -516,6 +515,7 @@ env.Library( ], LIBDEPS=[ '$BUILD_DIR/mongo/db/catalog/collection_crud', + '$BUILD_DIR/mongo/db/dbhelpers', '$BUILD_DIR/mongo/db/multitenancy', '$BUILD_DIR/mongo/db/op_observer/op_observer', '$BUILD_DIR/mongo/db/query_exec', @@ -563,7 +563,7 @@ env.Library( '$BUILD_DIR/mongo/db/serverless/serverless_lock', '$BUILD_DIR/mongo/db/session/kill_sessions_local', '$BUILD_DIR/mongo/db/session/session_catalog_mongod', - '$BUILD_DIR/mongo/db/storage/historical_ident_tracker', + '$BUILD_DIR/mongo/db/storage/remove_saver', '$BUILD_DIR/mongo/util/namespace_string_database_name_util', 'drop_pending_collection_reaper', ], @@ -693,8 +693,8 @@ env.Library( 'split_horizon.cpp', ], LIBDEPS=[ - '$BUILD_DIR/mongo/base', - '$BUILD_DIR/mongo/db/concurrency/lock_manager', + '$BUILD_DIR/mongo/db/server_base', + '$BUILD_DIR/mongo/util/concurrency/spin_lock', '$BUILD_DIR/mongo/util/net/network', ], ) @@ -1105,6 +1105,7 @@ env.Library( '$BUILD_DIR/mongo/db/dbdirectclient', '$BUILD_DIR/mongo/db/ops/write_ops_exec', '$BUILD_DIR/mongo/db/shard_role_api', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/db/storage/wiredtiger/storage_wiredtiger_import', '$BUILD_DIR/mongo/idl/cluster_parameter_synchronization_helpers', '$BUILD_DIR/mongo/rpc/metadata', @@ -1242,6 +1243,7 @@ env.Library( '$BUILD_DIR/mongo/db/multitenancy', '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/server_feature_flags', + '$BUILD_DIR/mongo/util/namespace_string_database_name_util', 'read_concern_args', ], ) @@ -1401,6 +1403,7 @@ env.Library( 'tenant_migration_recipient_entry_helpers.cpp', ], LIBDEPS=[ + '$BUILD_DIR/mongo/db/keys_collection_util', '$BUILD_DIR/mongo/util/future_util', 'repl_coordinator_interface', 'repl_server_parameters', @@ -1657,7 +1660,6 @@ env.Library( ) if wiredtiger: - env.Library( target='oplog_applier_impl_test_fixture', source=[ @@ -1678,6 +1680,9 @@ if wiredtiger: 'replmocks', 'storage_interface_impl', ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/op_observer/op_observer', + ], ) env.Library( @@ -1763,6 +1768,7 @@ if wiredtiger: 'rollback_impl_test.cpp', 'scatter_gather_test.cpp', 'shard_merge_recipient_op_observer_test.cpp', + 'shard_merge_recipient_service_test.cpp', 'speculative_majority_read_info_test.cpp', 'split_horizon_test.cpp', 'split_prepare_session_manager_test.cpp', @@ -1771,16 +1777,17 @@ if wiredtiger: 'sync_source_resolver_test.cpp', 'task_runner_test.cpp', 'task_runner_test_fixture.cpp', - 'tenant_oplog_applier_test.cpp', - 'tenant_oplog_batcher_test.cpp', - 'vote_requester_test.cpp', - 'wait_for_majority_service_test.cpp', + 'tenant_file_importer_service_test.cpp', 'tenant_migration_access_blocker_registry_test.cpp', 'tenant_migration_access_blocker_util_test.cpp', 'tenant_migration_recipient_access_blocker_test.cpp', 'tenant_migration_recipient_entry_helpers_test.cpp', 'tenant_migration_recipient_service_test.cpp', - 'tenant_migration_recipient_service_shard_merge_test.cpp', + 'tenant_oplog_applier_test.cpp', + 'tenant_oplog_applier_shard_merge_test.cpp', + 'tenant_oplog_batcher_test.cpp', + 'vote_requester_test.cpp', + 'wait_for_majority_service_test.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/bson/mutable/mutable_bson', @@ -1848,6 +1855,7 @@ if wiredtiger: 'primary_only_service_test_fixture', 'repl_coordinator_impl', 'repl_server_parameters', + 'replica_set_aware_service', 'replica_set_messages', 'replication_consistency_markers_impl', 'replication_process', @@ -1947,10 +1955,10 @@ env.CppUnitTest( 'tenant_file_cloner_test.cpp', ], LIBDEPS=[ - # Required for service context test fixture + '$BUILD_DIR/mongo/db/op_observer/op_observer', 'cloner_test_fixtures', 'initial_sync_cloners', - 'tenant_migration_cloners' + 'tenant_migration_cloners', ], ) @@ -2032,8 +2040,13 @@ env.Library( 'hello_command', ], ) - -env.Library( +wait_for_majority_service_env = env.Clone() +# TODO(SERVER-77205): Review and Possibly Remove '-Wno-deprecated' After Mozjs Update +wait_for_majority_service_env.Append( + CXXFLAGS=[] if wait_for_majority_service_env.TargetOSIs('windows') else [ + '-Wno-deprecated', + ], ) +wait_for_majority_service_env.Library( target='wait_for_majority_service', source=[ 'wait_for_majority_service.cpp', @@ -2141,12 +2154,10 @@ env.Benchmark( '$BUILD_DIR/mongo/db/catalog/catalog_helpers', '$BUILD_DIR/mongo/db/catalog/catalog_impl', '$BUILD_DIR/mongo/db/index_builds_coordinator_mongod', - '$BUILD_DIR/mongo/db/op_observer/op_observer', '$BUILD_DIR/mongo/db/op_observer/op_observer_impl', '$BUILD_DIR/mongo/db/op_observer/oplog_writer_impl', '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/service_context_d', - '$BUILD_DIR/mongo/db/session/session_catalog', '$BUILD_DIR/mongo/db/session/session_catalog_mongod', '$BUILD_DIR/mongo/db/shard_role_api', '$BUILD_DIR/mongo/db/storage/storage_control', diff --git a/src/mongo/db/repl/abstract_async_component.cpp b/src/mongo/db/repl/abstract_async_component.cpp index 48d27994eed24..d99c981d5879c 100644 --- a/src/mongo/db/repl/abstract_async_component.cpp +++ b/src/mongo/db/repl/abstract_async_component.cpp @@ -27,10 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include "mongo/base/status_with.h" #include "mongo/db/repl/abstract_async_component.h" - #include "mongo/util/assert_util.h" #include "mongo/util/str.h" diff --git a/src/mongo/db/repl/abstract_async_component.h b/src/mongo/db/repl/abstract_async_component.h index 1059e9865f991..80d91df6c6670 100644 --- a/src/mongo/db/repl/abstract_async_component.h +++ b/src/mongo/db/repl/abstract_async_component.h @@ -31,14 +31,18 @@ #include #include +#include #include #include +#include "mongo/base/error_codes.h" #include "mongo/base/static_assert.h" #include "mongo/base/status.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/abstract_async_component_test.cpp b/src/mongo/db/repl/abstract_async_component_test.cpp index bd126bd7ecf4b..716b74c0938fa 100644 --- a/src/mongo/db/repl/abstract_async_component_test.cpp +++ b/src/mongo/db/repl/abstract_async_component_test.cpp @@ -27,17 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/repl/abstract_async_component.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/task_executor_mock.h" +#include "mongo/executor/network_interface_mock.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" #include "mongo/platform/mutex.h" - -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" namespace { diff --git a/src/mongo/db/repl/all_database_cloner.cpp b/src/mongo/db/repl/all_database_cloner.cpp index bd3049d565d7b..c5945a96a77af 100644 --- a/src/mongo/db/repl/all_database_cloner.cpp +++ b/src/mongo/db/repl/all_database_cloner.cpp @@ -28,21 +28,45 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/client.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/multitenancy_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/all_database_cloner.h" +#include "mongo/db/repl/member_data.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replication_auth.h" #include "mongo/db/repl/replication_consistency_markers_gen.h" #include "mongo/db/repl/replication_consistency_markers_impl.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/util/assert_util.h" #include "mongo/util/database_name_util.h" +#include "mongo/util/net/ssl_options.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplicationInitialSync @@ -65,16 +89,17 @@ BaseCloner::ClonerStages AllDatabaseCloner::getStages() { } Status AllDatabaseCloner::ensurePrimaryOrSecondary( - const executor::RemoteCommandResponse& isMasterReply) { - if (!isMasterReply.isOK()) { - LOGV2(21054, "Cannot reconnect because isMaster command failed"); - return isMasterReply.status; + const executor::RemoteCommandResponse& helloReply) { + if (!helloReply.isOK()) { + LOGV2(21054, "Cannot reconnect because 'hello' command failed"); + return helloReply.status; } - if (isMasterReply.data["ismaster"].trueValue() || isMasterReply.data["secondary"].trueValue()) + if (helloReply.data["isWritablePrimary"].trueValue() || + helloReply.data["secondary"].trueValue()) return Status::OK(); // There is a window during startup where a node has an invalid configuration and will have - // an isMaster response the same as a removed node. So we must check to see if the node is + // an "hello" response the same as a removed node. So we must check to see if the node is // removed by checking local configuration. auto memberData = ReplicationCoordinator::get(getGlobalServiceContext())->getMemberData(); auto syncSourceIter = std::find_if( @@ -115,8 +140,8 @@ BaseCloner::AfterStageBehavior AllDatabaseCloner::connectStage() { // handle the reconnect itself. This is necessary to get correct backoff behavior. if (client->getServerHostAndPort() != getSource()) { client->setHandshakeValidationHook( - [this](const executor::RemoteCommandResponse& isMasterReply) { - return ensurePrimaryOrSecondary(isMasterReply); + [this](const executor::RemoteCommandResponse& helloReply) { + return ensurePrimaryOrSecondary(helloReply); }); uassertStatusOK(client->connect(getSource(), StringData(), boost::none)); } else { @@ -173,7 +198,7 @@ BaseCloner::AfterStageBehavior AllDatabaseCloner::listDatabasesStage() { : boost::none; DatabaseName dbName = DatabaseNameUtil::deserialize(tenantId, dbBSON["name"].str()); - if (dbName.db() == "local") { + if (dbName.isLocalDB()) { LOGV2_DEBUG(21056, 1, "Excluding database from the 'listDatabases' response: {db}", @@ -230,7 +255,6 @@ void AllDatabaseCloner::postStage() { BSONObj cmdObj = BSON("dbStats" << 1); BSONObjBuilder b(cmdObj); if (gMultitenancySupport && - serverGlobalParams.featureCompatibility.isVersionInitialized() && gFeatureFlagRequireTenantID.isEnabled(serverGlobalParams.featureCompatibility) && dbName.tenantId()) { dbName.tenantId()->serializeToBSON("$tenant", &b); @@ -322,9 +346,9 @@ void AllDatabaseCloner::postStage() { if (!foundAuthSchemaDoc && foundUser) { std::string msg = str::stream() << "During initial sync, found documents in " - << NamespaceString::kAdminUsersNamespace.ns() + << NamespaceString::kAdminUsersNamespace.toStringForErrorMsg() << " but could not find an auth schema version document in " - << NamespaceString::kServerConfigurationNamespace.ns() << ". " + << NamespaceString::kServerConfigurationNamespace.toStringForErrorMsg() << ". " << "This indicates that the primary of this replica set was not " "successfully " "upgraded to schema version " diff --git a/src/mongo/db/repl/all_database_cloner.h b/src/mongo/db/repl/all_database_cloner.h index 4f74754500090..150d7bda9f387 100644 --- a/src/mongo/db/repl/all_database_cloner.h +++ b/src/mongo/db/repl/all_database_cloner.h @@ -29,12 +29,24 @@ #pragma once +#include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_connection.h" +#include "mongo/db/database_name.h" #include "mongo/db/repl/base_cloner.h" #include "mongo/db/repl/database_cloner.h" #include "mongo/db/repl/initial_sync_base_cloner.h" #include "mongo/db/repl/initial_sync_shared_data.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace repl { @@ -82,7 +94,7 @@ class AllDatabaseCloner final : public InitialSyncBaseCloner { * Validation function to ensure we connect only to primary or secondary nodes. * * Because the cloner connection is separate from the usual inter-node connection pool and - * did not have the 'hangUpOnStepDown:false' flag set in the initial isMaster request, we + * did not have the 'hangUpOnStepDown:false' flag set in the initial "hello" request, we * will always disconnect if the sync source transitions to a state other than PRIMARY * or SECONDARY. It will not disconnect on a PRIMARY to SECONDARY or SECONDARY to PRIMARY * transition because we no longer do that (the flag name is anachronistic). After @@ -99,7 +111,7 @@ class AllDatabaseCloner final : public InitialSyncBaseCloner { * would succeed and we would have an inconsistent node. If other data was added we would * invariant during oplog application with a NamespaceNotFound error. */ - Status ensurePrimaryOrSecondary(const executor::RemoteCommandResponse& isMasterReply); + Status ensurePrimaryOrSecondary(const executor::RemoteCommandResponse& helloReply); /** * Stage function that makes a connection to the sync source. diff --git a/src/mongo/db/repl/all_database_cloner_test.cpp b/src/mongo/db/repl/all_database_cloner_test.cpp index b5310021a5b69..daabfb4b0dd89 100644 --- a/src/mongo/db/repl/all_database_cloner_test.cpp +++ b/src/mongo/db/repl/all_database_cloner_test.cpp @@ -28,20 +28,37 @@ */ -#include "mongo/platform/basic.h" - +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/db/client.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/all_database_cloner.h" #include "mongo/db/repl/initial_sync_cloner_test_fixture.h" #include "mongo/db/repl/replication_consistency_markers_impl.h" -#include "mongo/db/repl/storage_interface.h" -#include "mongo/db/repl/storage_interface_mock.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/dbtests/mock/mock_dbclient_connection.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/clock_source_mock.h" -#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -97,14 +114,14 @@ TEST_F(AllDatabaseClonerTest, ListDatabaseStageSortsAdminCorrectlyGlobalAdminBef auto databases = getDatabasesFromCloner(cloner.get()); ASSERT_EQUALS(5u, databases.size()); - ASSERT_EQUALS("admin", databases[0].db()); + ASSERT_EQUALS("admin", databases[0].toString_forTest()); ASSERT(!databases[0].tenantId()); - ASSERT_EQUALS("admin", databases[1].db()); + ASSERT_EQUALS("admin", databases[1].toString_forTest()); ASSERT(databases[1].tenantId()); - ASSERT_EQUALS("admin", databases[2].db()); + ASSERT_EQUALS("admin", databases[2].toString_forTest()); ASSERT(databases[2].tenantId()); - ASSERT_EQUALS("a", databases[3].db()); - ASSERT_EQUALS("aab", databases[4].db()); + ASSERT_EQUALS("a", databases[3].toString_forTest()); + ASSERT_EQUALS("aab", databases[4].toString_forTest()); } TEST_F(AllDatabaseClonerTest, ListDatabaseStageSortsAdminCorrectlyTenantAdminSetToFirst) { @@ -137,14 +154,14 @@ TEST_F(AllDatabaseClonerTest, ListDatabaseStageSortsAdminCorrectlyTenantAdminSet auto databases = getDatabasesFromCloner(cloner.get()); ASSERT_EQUALS(5u, databases.size()); - ASSERT_EQUALS("admin", databases[0].db()); + ASSERT_EQUALS("admin", databases[0].toString_forTest()); ASSERT(!databases[0].tenantId()); - ASSERT_EQUALS("admin", databases[1].db()); + ASSERT_EQUALS("admin", databases[1].toString_forTest()); ASSERT(databases[1].tenantId()); - ASSERT_EQUALS("admin", databases[2].db()); + ASSERT_EQUALS("admin", databases[2].toString_forTest()); ASSERT(databases[2].tenantId()); - ASSERT_EQUALS("a", databases[3].db()); - ASSERT_EQUALS("aab", databases[4].db()); + ASSERT_EQUALS("a", databases[3].toString_forTest()); + ASSERT_EQUALS("aab", databases[4].toString_forTest()); } @@ -493,7 +510,7 @@ TEST_F(AllDatabaseClonerTest, AdminIsSetToFirst) { ASSERT_OK(cloner->run()); auto databases = getDatabasesFromCloner(cloner.get()); - ASSERT_EQUALS("admin", databases[0].db()); + ASSERT_EQUALS("admin", databases[0].toString_forTest()); _mockServer->setCommandReply( "listDatabases", fromjson("{ok:1, databases:[{name:'admin'}, {name:'a'}, {name:'b'}]}")); @@ -503,7 +520,7 @@ TEST_F(AllDatabaseClonerTest, AdminIsSetToFirst) { ASSERT_OK(cloner->run()); databases = getDatabasesFromCloner(cloner.get()); - ASSERT_EQUALS("admin", databases[0].db()); + ASSERT_EQUALS("admin", databases[0].toString_forTest()); } TEST_F(AllDatabaseClonerTest, LocalIsRemoved) { @@ -516,8 +533,8 @@ TEST_F(AllDatabaseClonerTest, LocalIsRemoved) { auto databases = getDatabasesFromCloner(cloner.get()); ASSERT_EQUALS(2u, databases.size()); - ASSERT_EQUALS("a", databases[0].db()); - ASSERT_EQUALS("aab", databases[1].db()); + ASSERT_EQUALS("a", databases[0].toString_forTest()); + ASSERT_EQUALS("aab", databases[1].toString_forTest()); _mockServer->setCommandReply( "listDatabases", fromjson("{ok:1, databases:[{name:'local'}, {name:'a'}, {name:'b'}]}")); @@ -528,8 +545,8 @@ TEST_F(AllDatabaseClonerTest, LocalIsRemoved) { databases = getDatabasesFromCloner(cloner.get()); ASSERT_EQUALS(2u, databases.size()); - ASSERT_EQUALS("a", databases[0].db()); - ASSERT_EQUALS("b", databases[1].db()); + ASSERT_EQUALS("a", databases[0].toString_forTest()); + ASSERT_EQUALS("b", databases[1].toString_forTest()); } TEST_F(AllDatabaseClonerTest, DatabaseStats) { @@ -566,16 +583,18 @@ TEST_F(AllDatabaseClonerTest, DatabaseStats) { auto databases = getDatabasesFromCloner(cloner.get()); ASSERT_EQUALS(3u, databases.size()); - ASSERT_EQUALS("admin", databases[0].db()); - ASSERT_EQUALS("aab", databases[1].db()); - ASSERT_EQUALS("a", databases[2].db()); + ASSERT_EQUALS("admin", databases[0].toString_forTest()); + ASSERT_EQUALS("aab", databases[1].toString_forTest()); + ASSERT_EQUALS("a", databases[2].toString_forTest()); auto stats = cloner->getStats(); ASSERT_EQUALS(0, stats.databasesCloned); ASSERT_EQUALS(3, stats.databaseStats.size()); - ASSERT_EQUALS(DatabaseName(boost::none, "admin"), stats.databaseStats[0].dbname); - ASSERT_EQUALS(DatabaseName(boost::none, "aab"), stats.databaseStats[1].dbname); - ASSERT_EQUALS(DatabaseName(boost::none, "a"), stats.databaseStats[2].dbname); + ASSERT_EQUALS(DatabaseName::kAdmin, stats.databaseStats[0].dbname); + ASSERT_EQUALS(DatabaseName::createDatabaseName_forTest(boost::none, "aab"), + stats.databaseStats[1].dbname); + ASSERT_EQUALS(DatabaseName::createDatabaseName_forTest(boost::none, "a"), + stats.databaseStats[2].dbname); ASSERT_EQUALS(_clock.now(), stats.databaseStats[0].start); ASSERT_EQUALS(Date_t(), stats.databaseStats[0].end); ASSERT_EQUALS(Date_t(), stats.databaseStats[1].start); @@ -599,9 +618,11 @@ TEST_F(AllDatabaseClonerTest, DatabaseStats) { stats = cloner->getStats(); ASSERT_EQUALS(1, stats.databasesCloned); ASSERT_EQUALS(3, stats.databaseStats.size()); - ASSERT_EQUALS(DatabaseName(boost::none, "admin"), stats.databaseStats[0].dbname); - ASSERT_EQUALS(DatabaseName(boost::none, "aab"), stats.databaseStats[1].dbname); - ASSERT_EQUALS(DatabaseName(boost::none, "a"), stats.databaseStats[2].dbname); + ASSERT_EQUALS(DatabaseName::kAdmin, stats.databaseStats[0].dbname); + ASSERT_EQUALS(DatabaseName::createDatabaseName_forTest(boost::none, "aab"), + stats.databaseStats[1].dbname); + ASSERT_EQUALS(DatabaseName::createDatabaseName_forTest(boost::none, "a"), + stats.databaseStats[2].dbname); ASSERT_EQUALS(_clock.now(), stats.databaseStats[0].end); ASSERT_EQUALS(_clock.now(), stats.databaseStats[1].start); ASSERT_EQUALS(Date_t(), stats.databaseStats[1].end); @@ -625,9 +646,11 @@ TEST_F(AllDatabaseClonerTest, DatabaseStats) { stats = cloner->getStats(); ASSERT_EQUALS(2, stats.databasesCloned); ASSERT_EQUALS(3, stats.databaseStats.size()); - ASSERT_EQUALS(DatabaseName(boost::none, "admin"), stats.databaseStats[0].dbname); - ASSERT_EQUALS(DatabaseName(boost::none, "aab"), stats.databaseStats[1].dbname); - ASSERT_EQUALS(DatabaseName(boost::none, "a"), stats.databaseStats[2].dbname); + ASSERT_EQUALS(DatabaseName::kAdmin, stats.databaseStats[0].dbname); + ASSERT_EQUALS(DatabaseName::createDatabaseName_forTest(boost::none, "aab"), + stats.databaseStats[1].dbname); + ASSERT_EQUALS(DatabaseName::createDatabaseName_forTest(boost::none, "a"), + stats.databaseStats[2].dbname); ASSERT_EQUALS(_clock.now(), stats.databaseStats[1].end); ASSERT_EQUALS(_clock.now(), stats.databaseStats[2].start); ASSERT_EQUALS(Date_t(), stats.databaseStats[2].end); @@ -640,9 +663,11 @@ TEST_F(AllDatabaseClonerTest, DatabaseStats) { stats = cloner->getStats(); ASSERT_EQUALS(3, stats.databasesCloned); - ASSERT_EQUALS(DatabaseName(boost::none, "admin"), stats.databaseStats[0].dbname); - ASSERT_EQUALS(DatabaseName(boost::none, "aab"), stats.databaseStats[1].dbname); - ASSERT_EQUALS(DatabaseName(boost::none, "a"), stats.databaseStats[2].dbname); + ASSERT_EQUALS(DatabaseName::kAdmin, stats.databaseStats[0].dbname); + ASSERT_EQUALS(DatabaseName::createDatabaseName_forTest(boost::none, "aab"), + stats.databaseStats[1].dbname); + ASSERT_EQUALS(DatabaseName::createDatabaseName_forTest(boost::none, "a"), + stats.databaseStats[2].dbname); ASSERT_EQUALS(_clock.now(), stats.databaseStats[2].end); } @@ -701,20 +726,20 @@ TEST_F(AllDatabaseClonerTest, auto databases = getDatabasesFromCloner(cloner.get()); // Expect 4 dbs, since "local" should be removed - DatabaseName adminWithTenantId = DatabaseName(tid, "admin"); - DatabaseName aWithTenantId = DatabaseName(tid, "a"); - DatabaseName aabWithTenantId = DatabaseName(tid, "aab"); + DatabaseName adminWithTenantId = DatabaseName::createDatabaseName_forTest(tid, "admin"); + DatabaseName aWithTenantId = DatabaseName::createDatabaseName_forTest(tid, "a"); + DatabaseName aabWithTenantId = DatabaseName::createDatabaseName_forTest(tid, "aab"); // Checks admin is first db. ASSERT_EQUALS(4u, databases.size()); - ASSERT_EQUALS("admin", databases[0].db()); - ASSERT_EQUALS("admin", databases[1].db()); - ASSERT_EQUALS("aab", databases[2].db()); - ASSERT_EQUALS("a", databases[3].db()); + ASSERT_EQUALS("admin", databases[0].toString_forTest()); + ASSERT_EQUALS("admin", databases[1].toString_forTest()); + ASSERT_EQUALS("aab", databases[2].toString_forTest()); + ASSERT_EQUALS("a", databases[3].toString_forTest()); auto stats = cloner->getStats(); ASSERT_EQUALS(0, stats.databasesCloned); ASSERT_EQUALS(4, stats.databaseStats.size()); - ASSERT_EQUALS(DatabaseName(boost::none, "admin"), stats.databaseStats[0].dbname); + ASSERT_EQUALS(DatabaseName::kAdmin, stats.databaseStats[0].dbname); ASSERT_EQUALS(adminWithTenantId, stats.databaseStats[1].dbname); ASSERT_EQUALS(aabWithTenantId, stats.databaseStats[2].dbname); ASSERT_EQUALS(aWithTenantId, stats.databaseStats[3].dbname); @@ -733,20 +758,20 @@ TEST_F(AllDatabaseClonerTest, FailPoint::alwaysOn, 0, fromjson(str::stream() << "{cloner: 'DatabaseCloner', stage: 'listCollections', database: '" - << adminWithTenantId.toStringWithTenantId() << "'}")); + << adminWithTenantId.toStringWithTenantId_forTest() << "'}")); dbClonerAfterFailPoint->setMode( FailPoint::alwaysOn, 0, fromjson(str::stream() << "{cloner: 'DatabaseCloner', stage: 'listCollections', database: '" - << adminWithTenantId.toStringWithTenantId() << "'}")); + << adminWithTenantId.toStringWithTenantId_forTest() << "'}")); // Wait for the failpoint to be reached. dbClonerBeforeFailPoint->waitForTimesEntered(timesEntered + 1); stats = cloner->getStats(); ASSERT_EQUALS(1, stats.databasesCloned); ASSERT_EQUALS(4, stats.databaseStats.size()); - ASSERT_EQUALS(DatabaseName(boost::none, "admin"), stats.databaseStats[0].dbname); + ASSERT_EQUALS(DatabaseName::kAdmin, stats.databaseStats[0].dbname); ASSERT_EQUALS(adminWithTenantId, stats.databaseStats[1].dbname); ASSERT_EQUALS(aabWithTenantId, stats.databaseStats[2].dbname); ASSERT_EQUALS(aWithTenantId, stats.databaseStats[3].dbname); @@ -764,19 +789,19 @@ TEST_F(AllDatabaseClonerTest, FailPoint::alwaysOn, 0, fromjson(str::stream() << "{cloner: 'DatabaseCloner', stage: 'listCollections', database: '" - << aabWithTenantId.toStringWithTenantId() << "'}")); + << aabWithTenantId.toStringWithTenantId_forTest() << "'}")); dbClonerAfterFailPoint->setMode( FailPoint::alwaysOn, 0, fromjson(str::stream() << "{cloner: 'DatabaseCloner', stage: 'listCollections', database: '" - << aabWithTenantId.toStringWithTenantId() << "'}")); + << aabWithTenantId.toStringWithTenantId_forTest() << "'}")); // Wait for the failpoint to be reached. dbClonerBeforeFailPoint->waitForTimesEntered(timesEntered + 1); stats = cloner->getStats(); ASSERT_EQUALS(2, stats.databasesCloned); ASSERT_EQUALS(4, stats.databaseStats.size()); - ASSERT_EQUALS(DatabaseName(boost::none, "admin"), stats.databaseStats[0].dbname); + ASSERT_EQUALS(DatabaseName::kAdmin, stats.databaseStats[0].dbname); ASSERT_EQUALS(adminWithTenantId, stats.databaseStats[1].dbname); ASSERT_EQUALS(aabWithTenantId, stats.databaseStats[2].dbname); ASSERT_EQUALS(aWithTenantId, stats.databaseStats[3].dbname); @@ -793,19 +818,19 @@ TEST_F(AllDatabaseClonerTest, FailPoint::alwaysOn, 0, fromjson(str::stream() << "{cloner: 'DatabaseCloner', stage: 'listCollections', database: '" - << aWithTenantId.toStringWithTenantId() << "'}")); + << aWithTenantId.toStringWithTenantId_forTest() << "'}")); dbClonerAfterFailPoint->setMode( FailPoint::alwaysOn, 0, fromjson(str::stream() << "{cloner: 'DatabaseCloner', stage: 'listCollections', database: '" - << aWithTenantId.toStringWithTenantId() << "'}")); + << aWithTenantId.toStringWithTenantId_forTest() << "'}")); // Wait for the failpoint to be reached. dbClonerBeforeFailPoint->waitForTimesEntered(timesEntered + 1); stats = cloner->getStats(); ASSERT_EQUALS(3, stats.databasesCloned); ASSERT_EQUALS(4, stats.databaseStats.size()); - ASSERT_EQUALS(DatabaseName(boost::none, "admin"), stats.databaseStats[0].dbname); + ASSERT_EQUALS(DatabaseName::kAdmin, stats.databaseStats[0].dbname); ASSERT_EQUALS(adminWithTenantId, stats.databaseStats[1].dbname); ASSERT_EQUALS(aabWithTenantId, stats.databaseStats[2].dbname); ASSERT_EQUALS(aWithTenantId, stats.databaseStats[3].dbname); @@ -821,7 +846,7 @@ TEST_F(AllDatabaseClonerTest, stats = cloner->getStats(); ASSERT_EQUALS(4, stats.databasesCloned); - ASSERT_EQUALS(DatabaseName(boost::none, "admin"), stats.databaseStats[0].dbname); + ASSERT_EQUALS(DatabaseName::kAdmin, stats.databaseStats[0].dbname); ASSERT_EQUALS(adminWithTenantId, stats.databaseStats[1].dbname); ASSERT_EQUALS(aabWithTenantId, stats.databaseStats[2].dbname); ASSERT_EQUALS(aWithTenantId, stats.databaseStats[3].dbname); diff --git a/src/mongo/db/repl/always_allow_non_local_writes.cpp b/src/mongo/db/repl/always_allow_non_local_writes.cpp index 0417debe7984f..1d9fab2f937de 100644 --- a/src/mongo/db/repl/always_allow_non_local_writes.cpp +++ b/src/mongo/db/repl/always_allow_non_local_writes.cpp @@ -27,9 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/repl/always_allow_non_local_writes.h" +#include "mongo/util/decorable.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/apply_ops.cpp b/src/mongo/db/repl/apply_ops.cpp index 133ce9acc54e9..c719627796b03 100644 --- a/src/mongo/db/repl/apply_ops.cpp +++ b/src/mongo/db/repl/apply_ops.cpp @@ -29,28 +29,48 @@ #include "mongo/db/repl/apply_ops.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database.h" -#include "mongo/db/catalog/document_validation.h" -#include "mongo/db/client.h" +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/concurrency/lock_state.h" -#include "mongo/db/curop.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/apply_ops_command_info.h" +#include "mongo/db/repl/oplog_entry_or_grouped_inserts.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/service_context.h" -#include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -90,7 +110,7 @@ Status _applyOps(OperationContext* opCtx, // Need to check this here, or OldClientContext may fail an invariant. if (*opType != 'c' && !nss.isValid()) - return {ErrorCodes::InvalidNamespace, "invalid ns: " + nss.ns()}; + return {ErrorCodes::InvalidNamespace, "invalid ns: " + nss.toStringForErrorMsg()}; Status status = Status::OK(); @@ -98,7 +118,7 @@ Status _applyOps(OperationContext* opCtx, status = writeConflictRetry( opCtx, "applyOps", - nss.ns(), + nss, [opCtx, nss, opObj, opType, alwaysUpsert, oplogApplicationMode, &info, &dbName] { BSONObjBuilder builder; // Remove 'hash' field if it is set. A bit slow as it rebuilds the object. @@ -142,23 +162,30 @@ Status _applyOps(OperationContext* opCtx, auto nssFromUuid = catalog->lookupNSSByUUID(opCtx, uuid); if (nssFromUuid != nss) { return Status{ErrorCodes::Error(3318200), - str::stream() << "Namespace '" << nss.ns() - << "' and UUID '" << uuid.toString() - << "' point to different collections"}; + str::stream() + << "Namespace '" << nss.toStringForErrorMsg() + << "' and UUID '" << uuid.toString() + << "' point to different collections"}; } } - AutoGetCollection autoColl( - opCtx, nss, fixLockModeForSystemDotViewsChanges(nss, MODE_IX)); - if (!autoColl.getCollection()) { + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(nss, + AcquisitionPrerequisites::kPretendUnsharded, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + fixLockModeForSystemDotViewsChanges(nss, MODE_IX)); + if (!collection.exists()) { // For idempotency reasons, return success on delete operations. if (*opType == 'd') { return Status::OK(); } uasserted(ErrorCodes::NamespaceNotFound, - str::stream() << "cannot apply insert or update operation on a " - "non-existent namespace " - << nss.ns() << ": " << mongo::redact(opObj)); + str::stream() + << "cannot apply insert or update operation on a " + "non-existent namespace " + << nss.toStringForErrorMsg() << ": " << mongo::redact(opObj)); } OldClientContext ctx(opCtx, nss); @@ -169,7 +196,7 @@ Status _applyOps(OperationContext* opCtx, // application in the future. const bool isDataConsistent = true; return repl::applyOperation_inlock(opCtx, - ctx.db(), + collection, ApplierOperation{&entry}, alwaysUpsert, oplogApplicationMode, @@ -249,8 +276,8 @@ Status applyOps(OperationContext* opCtx, } auto replCoord = repl::ReplicationCoordinator::get(opCtx); - bool userInitiatedWritesAndNotPrimary = opCtx->writesAreReplicated() && - !replCoord->canAcceptWritesForDatabase(opCtx, dbName.toStringWithTenantId()); + bool userInitiatedWritesAndNotPrimary = + opCtx->writesAreReplicated() && !replCoord->canAcceptWritesForDatabase(opCtx, dbName); if (userInitiatedWritesAndNotPrimary) return Status(ErrorCodes::NotWritablePrimary, diff --git a/src/mongo/db/repl/apply_ops.h b/src/mongo/db/repl/apply_ops.h index d2429f91ee9c9..d26ed873a1ecd 100644 --- a/src/mongo/db/repl/apply_ops.h +++ b/src/mongo/db/repl/apply_ops.h @@ -31,6 +31,9 @@ #include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/apply_ops_command_info.h" #include "mongo/db/repl/apply_ops_gen.h" #include "mongo/db/repl/multiapplier.h" diff --git a/src/mongo/db/repl/apply_ops_command_info.cpp b/src/mongo/db/repl/apply_ops_command_info.cpp index 9b505b1613e9d..b9732171db469 100644 --- a/src/mongo/db/repl/apply_ops_command_info.cpp +++ b/src/mongo/db/repl/apply_ops_command_info.cpp @@ -28,13 +28,22 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/repl/apply_ops_command_info.h" +#include +#include -#include "mongo/bson/util/bson_extract.h" -#include "mongo/logv2/log.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/repl/apply_ops_command_info.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/db/repl/apply_ops_command_info.h b/src/mongo/db/repl/apply_ops_command_info.h index 22348ba4f8ae5..6963872fc9342 100644 --- a/src/mongo/db/repl/apply_ops_command_info.h +++ b/src/mongo/db/repl/apply_ops_command_info.h @@ -27,9 +27,12 @@ * it in the license file. */ +#pragma once + #include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/repl/apply_ops_gen.h" #include "mongo/db/repl/multiapplier.h" diff --git a/src/mongo/db/repl/apply_ops_test.cpp b/src/mongo/db/repl/apply_ops_test.cpp index d98cea106386a..08bd89a56b186 100644 --- a/src/mongo/db/repl/apply_ops_test.cpp +++ b/src/mongo/db/repl/apply_ops_test.cpp @@ -27,21 +27,50 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include - +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection_options.h" -#include "mongo/db/catalog/database_holder.h" #include "mongo/db/client.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_noop.h" #include "mongo/db/repl/apply_ops.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/apply_ops_command_info.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -143,10 +172,10 @@ TEST_F(ApplyOpsTest, CommandInNestedApplyOpsReturnsSuccess) { auto innerCmdObj = BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() << "o" << BSON("create" << nss.coll())); + << "ns" << nss.getCommandNS().ns_forTest() << "o" << BSON("create" << nss.coll())); auto innerApplyOpsObj = BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() << "o" + << "ns" << nss.getCommandNS().ns_forTest() << "o" << BSON("applyOps" << BSON_ARRAY(innerCmdObj))); auto cmdObj = BSON("applyOps" << BSON_ARRAY(innerApplyOpsObj)); @@ -162,10 +191,11 @@ BSONObj makeApplyOpsWithInsertOperation(const NamespaceString& nss, const BSONObj& documentToInsert) { auto insertOp = uuid ? BSON("op" << "i" - << "ns" << nss.ns() << "o" << documentToInsert << "ui" << *uuid) + << "ns" << nss.ns_forTest() << "o" << documentToInsert << "ui" + << *uuid) : BSON("op" << "i" - << "ns" << nss.ns() << "o" << documentToInsert); + << "ns" << nss.ns_forTest() << "o" << documentToInsert); return BSON("applyOps" << BSON_ARRAY(insertOp)); } @@ -177,7 +207,11 @@ TEST_F(ApplyOpsTest, ApplyOpsInsertIntoNonexistentCollectionReturnsNamespaceNotF auto cmdObj = makeApplyOpsWithInsertOperation(nss, boost::none, documentToInsert); BSONObjBuilder resultBuilder; ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, - applyOps(opCtx.get(), DatabaseName("test"), cmdObj, mode, &resultBuilder)); + applyOps(opCtx.get(), + DatabaseName::createDatabaseName_forTest(boost::none, "test"), + cmdObj, + mode, + &resultBuilder)); auto result = resultBuilder.obj(); auto status = getStatusFromApplyOpsResult(result); ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, status); @@ -202,7 +236,11 @@ TEST_F(ApplyOpsTest, ApplyOpsInsertWithUuidIntoCollectionWithOtherUuid) { auto cmdObj = makeApplyOpsWithInsertOperation(nss, applyOpsUuid, documentToInsert); BSONObjBuilder resultBuilder; ASSERT_EQUALS(ErrorCodes::UnknownError, - applyOps(opCtx.get(), DatabaseName("test"), cmdObj, mode, &resultBuilder)); + applyOps(opCtx.get(), + DatabaseName::createDatabaseName_forTest(boost::none, "test"), + cmdObj, + mode, + &resultBuilder)); } TEST_F(ApplyOpsTest, ApplyOpsPropagatesOplogApplicationMode) { @@ -300,20 +338,20 @@ TEST_F(ApplyOpsTest, ExtractOperationsReturnsOperationsWithSameOpTimeAsApplyOps) auto ui1 = UUID::gen(); auto op1 = BSON("op" << "i" - << "ns" << ns1.ns() << "ui" << ui1 << "o" << BSON("_id" << 1)); + << "ns" << ns1.ns_forTest() << "ui" << ui1 << "o" << BSON("_id" << 1)); NamespaceString ns2 = NamespaceString::createNamespaceString_forTest("test.b"); auto ui2 = UUID::gen(); auto op2 = BSON("op" << "i" - << "ns" << ns2.ns() << "ui" << ui2 << "o" << BSON("_id" << 2)); + << "ns" << ns2.ns_forTest() << "ui" << ui2 << "o" << BSON("_id" << 2)); NamespaceString ns3 = NamespaceString::createNamespaceString_forTest("test.c"); auto ui3 = UUID::gen(); auto op3 = BSON("op" << "u" - << "ns" << ns3.ns() << "ui" << ui3 << "b" << true << "o" << BSON("x" << 1) - << "o2" << BSON("_id" << 3)); + << "ns" << ns3.ns_forTest() << "ui" << ui3 << "b" << true << "o" + << BSON("x" << 1) << "o2" << BSON("_id" << 3)); auto oplogEntry = makeOplogEntry(OpTypeEnum::kCommand, BSON("applyOps" << BSON_ARRAY(op1 << op2 << op3))); @@ -375,14 +413,14 @@ TEST_F(ApplyOpsTest, ExtractOperationsFromApplyOpsMultiStmtIds) { auto ui1 = UUID::gen(); auto op1 = BSON("op" << "i" - << "ns" << ns1.ns() << "ui" << ui1 << "o" << BSON("_id" << 1)); + << "ns" << ns1.ns_forTest() << "ui" << ui1 << "o" << BSON("_id" << 1)); NamespaceString ns2 = NamespaceString::createNamespaceString_forTest("test.b"); auto ui2 = UUID::gen(); auto op2 = BSON("op" << "u" - << "ns" << ns2.ns() << "ui" << ui2 << "b" << true << "o" << BSON("x" << 1) - << "o2" << BSON("_id" << 2)); + << "ns" << ns2.ns_forTest() << "ui" << ui2 << "b" << true << "o" + << BSON("x" << 1) << "o2" << BSON("_id" << 2)); auto oplogEntry = makeOplogEntry(OpTypeEnum::kCommand, BSON("applyOps" << BSON_ARRAY(op1 << op2)), {0, 1}); @@ -438,7 +476,7 @@ TEST_F(ApplyOpsTest, ApplyOpsFailsToDropAdmin) { auto dropDatabaseOp = BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() << "o" + << "ns" << nss.getCommandNS().ns_forTest() << "o" << BSON("dropDatabase" << 1)); auto dropDatabaseCmdObj = BSON("applyOps" << BSON_ARRAY(dropDatabaseOp)); diff --git a/src/mongo/db/repl/base_cloner.cpp b/src/mongo/db/repl/base_cloner.cpp index 79a6bd8992985..7c6b582758af5 100644 --- a/src/mongo/db/repl/base_cloner.cpp +++ b/src/mongo/db/repl/base_cloner.cpp @@ -28,11 +28,20 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/repl/base_cloner.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/base_cloner.h b/src/mongo/db/repl/base_cloner.h index 524601725fd42..f3a89a704ec7f 100644 --- a/src/mongo/db/repl/base_cloner.h +++ b/src/mongo/db/repl/base_cloner.h @@ -29,18 +29,29 @@ #pragma once +#include +#include #include #include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/dbclient_connection.h" #include "mongo/db/repl/repl_sync_shared_data.h" #include "mongo/db/repl/replication_auth.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/executor/task_executor.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/mutex.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/db/repl/bgsync.cpp b/src/mongo/db/repl/bgsync.cpp index daa0389c9fa42..d184388b402cc 100644 --- a/src/mongo/db/repl/bgsync.cpp +++ b/src/mongo/db/repl/bgsync.cpp @@ -30,33 +30,61 @@ #include "mongo/db/repl/bgsync.h" +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include #include +#include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/bsonelement.h" #include "mongo/client/connection_pool.h" +#include "mongo/client/dbclient_base.h" +#include "mongo/client/dbclient_connection.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/client.h" +#include "mongo/db/commands/server_status_metric.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/data_replicator_external_state_impl.h" -#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog_buffer.h" +#include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/oplog_interface_local.h" #include "mongo/db/repl/oplog_interface_remote.h" #include "mongo/db/repl/repl_server_parameters_gen.h" -#include "mongo/db/repl/replication_consistency_markers_impl.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/repl/replication_coordinator_impl.h" +#include "mongo/db/repl/replication_coordinator_external_state.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/rollback_source_impl.h" #include "mongo/db/repl/rs_rollback.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/db/repl/sync_source_selector.h" +#include "mongo/db/service_context.h" #include "mongo/db/shutdown_in_progress_quiesce_info.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/metadata/oplog_query_metadata.h" #include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/str.h" #include "mongo/util/testing_proctor.h" #include "mongo/util/time_support.h" @@ -226,6 +254,11 @@ void BackgroundSync::_run() { Client::initThread("BackgroundSync"); AuthorizationSession::get(cc())->grantInternalAuthorization(&cc()); + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + while (!inShutdown()) { try { _runProducer(); @@ -795,10 +828,8 @@ void BackgroundSync::_runRollbackViaRecoverToCheckpoint( StorageInterface* storageInterface, OplogInterfaceRemote::GetConnectionFn getConnection) { - OplogInterfaceRemote remoteOplog(source, - getConnection, - NamespaceString::kRsOplogNamespace.ns(), - rollbackRemoteOplogQueryBatchSize.load()); + OplogInterfaceRemote remoteOplog( + source, getConnection, rollbackRemoteOplogQueryBatchSize.load()); { stdx::lock_guard lock(_mutex); @@ -838,10 +869,8 @@ void BackgroundSync::_fallBackOnRollbackViaRefetch( OplogInterface* localOplog, OplogInterfaceRemote::GetConnectionFn getConnection) { - RollbackSourceImpl rollbackSource(getConnection, - source, - NamespaceString::kRsOplogNamespace.ns(), - rollbackRemoteOplogQueryBatchSize.load()); + RollbackSourceImpl rollbackSource( + getConnection, source, rollbackRemoteOplogQueryBatchSize.load()); rollback(opCtx, *localOplog, rollbackSource, requiredRBID, _replCoord, _replicationProcess); } @@ -964,7 +993,7 @@ OpTime BackgroundSync::_readLastAppliedOpTime(OperationContext* opCtx) { BSONObj oplogEntry; try { bool success = writeConflictRetry( - opCtx, "readLastAppliedOpTime", NamespaceString::kRsOplogNamespace.ns(), [&] { + opCtx, "readLastAppliedOpTime", NamespaceString::kRsOplogNamespace, [&] { return Helpers::getLast(opCtx, NamespaceString::kRsOplogNamespace, oplogEntry); }); diff --git a/src/mongo/db/repl/bgsync.h b/src/mongo/db/repl/bgsync.h index 6b4882e690586..81a719c27e162 100644 --- a/src/mongo/db/repl/bgsync.h +++ b/src/mongo/db/repl/bgsync.h @@ -32,20 +32,28 @@ #include #include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/data_replicator_external_state.h" #include "mongo/db/repl/oplog_applier.h" #include "mongo/db/repl/oplog_buffer.h" #include "mongo/db/repl/oplog_fetcher.h" +#include "mongo/db/repl/oplog_interface.h" #include "mongo/db/repl/oplog_interface_remote.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/rollback_impl.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/sync_source_resolver.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/net/hostandport.h" namespace mongo { @@ -57,6 +65,7 @@ namespace repl { class OplogInterface; class ReplicationCoordinator; + class ReplicationCoordinatorExternalState; class ReplicationProcess; class StorageInterface; diff --git a/src/mongo/db/repl/bson_extract_optime.cpp b/src/mongo/db/repl/bson_extract_optime.cpp index 2654122dbb5dc..fbaf5d6a4fd94 100644 --- a/src/mongo/db/repl/bson_extract_optime.cpp +++ b/src/mongo/db/repl/bson_extract_optime.cpp @@ -29,8 +29,11 @@ #include "mongo/db/repl/bson_extract_optime.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" #include "mongo/db/repl/optime.h" namespace mongo { diff --git a/src/mongo/db/repl/bson_extract_optime.h b/src/mongo/db/repl/bson_extract_optime.h index bce07439158e0..4ae8b8d940e70 100644 --- a/src/mongo/db/repl/bson_extract_optime.h +++ b/src/mongo/db/repl/bson_extract_optime.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/base/error_extra_info.h" #include "mongo/base/status.h" #include "mongo/base/string_data.h" diff --git a/src/mongo/db/repl/change_stream_oplog_notification.cpp b/src/mongo/db/repl/change_stream_oplog_notification.cpp index 518c45607857f..bf3a8b7995443 100644 --- a/src/mongo/db/repl/change_stream_oplog_notification.cpp +++ b/src/mongo/db/repl/change_stream_oplog_notification.cpp @@ -29,12 +29,30 @@ #include "mongo/db/repl/change_stream_oplog_notification.h" +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/service_context.h" #include "mongo/db/shard_id.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/str.h" namespace mongo { @@ -43,7 +61,7 @@ namespace { void insertOplogEntry(OperationContext* opCtx, repl::MutableOplogEntry&& oplogEntry, StringData opStr) { - writeConflictRetry(opCtx, opStr, NamespaceString::kRsOplogNamespace.ns(), [&] { + writeConflictRetry(opCtx, opStr, NamespaceString::kRsOplogNamespace, [&] { AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); WriteUnitOfWork wunit(opCtx); const auto& oplogOpTime = repl::logOp(opCtx, &oplogEntry); @@ -102,20 +120,29 @@ void notifyChangeStreamsOnShardCollection(OperationContext* opCtx, void notifyChangeStreamsOnDatabaseAdded(OperationContext* opCtx, const DatabasesAdded& databasesAddedNotification) { - const auto& notifiedPhase = databasesAddedNotification.getPhase(); + const std::string operationName = [&] { + switch (databasesAddedNotification.getPhase()) { + case CommitPhaseEnum::kSuccessful: + return "createDatabase"; + case CommitPhaseEnum::kAborted: + return "createDatabaseAbort"; + case CommitPhaseEnum::kPrepare: + return "createDatabasePrepare"; + default: + MONGO_UNREACHABLE; + } + }(); + for (const auto& dbName : databasesAddedNotification.getNames()) { repl::MutableOplogEntry oplogEntry; oplogEntry.setOpType(repl::OpTypeEnum::kNoop); oplogEntry.setNss(NamespaceString(dbName)); oplogEntry.setTid(dbName.tenantId()); - oplogEntry.setObject(BSON("msg" << BSON("createDatabase" << dbName.db()))); + oplogEntry.setObject(BSON("msg" << BSON(operationName << dbName.db()))); BSONObjBuilder o2Builder; - o2Builder.append("createDatabase", dbName.db()); - if (notifiedPhase) { - o2Builder.append("phase", *notifiedPhase); - if (*notifiedPhase == CommitPhaseEnum::kPrepare) { - o2Builder.append("primaryShard", *databasesAddedNotification.getPrimaryShard()); - } + o2Builder.append(operationName, dbName.db()); + if (databasesAddedNotification.getPhase() == CommitPhaseEnum::kPrepare) { + o2Builder.append("primaryShard", *databasesAddedNotification.getPrimaryShard()); } o2Builder.append("isImported", databasesAddedNotification.getAreImported()); diff --git a/src/mongo/db/repl/change_stream_oplog_notification.h b/src/mongo/db/repl/change_stream_oplog_notification.h index b52a38832bc7d..a4ce6af210e39 100644 --- a/src/mongo/db/repl/change_stream_oplog_notification.h +++ b/src/mongo/db/repl/change_stream_oplog_notification.h @@ -29,9 +29,14 @@ #pragma once +#include +#include +#include + #include "mongo/bson/bsonobj.h" #include "mongo/client/connection_string.h" #include "mongo/db/commands/notify_sharding_event_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/shard_id.h" diff --git a/src/mongo/db/repl/check_quorum_for_config_change.cpp b/src/mongo/db/repl/check_quorum_for_config_change.cpp index 7e21387c602a0..68ea2dfdf9143 100644 --- a/src/mongo/db/repl/check_quorum_for_config_change.cpp +++ b/src/mongo/db/repl/check_quorum_for_config_change.cpp @@ -28,19 +28,33 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/repl/check_quorum_for_config_change.h" +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/repl/check_quorum_for_config_change.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_set_heartbeat_args_v1.h" #include "mongo/db/repl/repl_set_heartbeat_response.h" -#include "mongo/db/repl/scatter_gather_algorithm.h" #include "mongo/db/repl/scatter_gather_runner.h" -#include "mongo/db/server_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/check_quorum_for_config_change.h b/src/mongo/db/repl/check_quorum_for_config_change.h index 446dd89e03d26..e78e5ea8f1b8e 100644 --- a/src/mongo/db/repl/check_quorum_for_config_change.h +++ b/src/mongo/db/repl/check_quorum_for_config_change.h @@ -29,9 +29,15 @@ #pragma once +#include +#include + #include "mongo/base/status.h" #include "mongo/db/repl/scatter_gather_algorithm.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp index 6c1da8db76856..c03987a7ab6f5 100644 --- a/src/mongo/db/repl/check_quorum_for_config_change_test.cpp +++ b/src/mongo/db/repl/check_quorum_for_config_change_test.cpp @@ -28,24 +28,44 @@ */ -#include "mongo/platform/basic.h" - -#include +#include #include +#include +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/repl/check_quorum_for_config_change.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_set_heartbeat_args_v1.h" #include "mongo/db/repl/repl_set_heartbeat_response.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/task_executor_test_fixture.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/stdx/thread.h" #include "mongo/stdx/unordered_set.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/repl/cloner_test_fixture.cpp b/src/mongo/db/repl/cloner_test_fixture.cpp index 0cdbd27b18a1e..676d333ca968c 100644 --- a/src/mongo/db/repl/cloner_test_fixture.cpp +++ b/src/mongo/db/repl/cloner_test_fixture.cpp @@ -27,17 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/clientcursor.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/repl/cloner_test_fixture.h" -#include "mongo/db/repl/replication_consistency_markers_impl.h" -#include "mongo/db/repl/storage_interface.h" -#include "mongo/db/repl/storage_interface_mock.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/db/storage/storage_engine_mock.h" #include "mongo/dbtests/mock/mock_dbclient_connection.h" -#include "mongo/unittest/unittest.h" #include "mongo/util/concurrency/thread_pool.h" namespace mongo { @@ -49,9 +47,10 @@ BSONObj ClonerTestFixture::createCountResponse(int documentCount) { } /* static */ -BSONObj ClonerTestFixture::createCursorResponse(const std::string& nss, const BSONArray& docs) { - return BSON("cursor" << BSON("id" << CursorId(0) << "ns" << nss << "firstBatch" << docs) << "ok" - << 1); +BSONObj ClonerTestFixture::createCursorResponse(StringData nss, const BSONArray& docs) { + return BSON( + "cursor" << BSON("id" << CursorId(0) << "ns" << nss.toString() << "firstBatch" << docs) + << "ok" << 1); } void ClonerTestFixture::setUp() { diff --git a/src/mongo/db/repl/cloner_test_fixture.h b/src/mongo/db/repl/cloner_test_fixture.h index 35d36d5825056..c888743f3f947 100644 --- a/src/mongo/db/repl/cloner_test_fixture.h +++ b/src/mongo/db/repl/cloner_test_fixture.h @@ -29,18 +29,29 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/dbclient_connection.h" #include "mongo/db/repl/base_cloner.h" +#include "mongo/db/repl/repl_sync_shared_data.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/dbtests/mock/mock_dbclient_connection.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" #include "mongo/logv2/log_component.h" #include "mongo/logv2/log_severity.h" #include "mongo/unittest/log_test.h" #include "mongo/unittest/unittest.h" #include "mongo/util/clock_source_mock.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace repl { @@ -53,7 +64,7 @@ class ClonerTestFixture : public ServiceContextMongoDTest { // Since the DBClient handles the cursor iterating, we assume that works for the purposes of the // cloner unit test and just use a single batch for all mock responses. - static BSONObj createCursorResponse(const std::string& nss, const BSONArray& docs); + static BSONObj createCursorResponse(StringData nss, const BSONArray& docs); protected: void setUp() override; diff --git a/src/mongo/db/repl/cloner_utils.cpp b/src/mongo/db/repl/cloner_utils.cpp index 39dade1eeb85b..8a9c6fe4efdcb 100644 --- a/src/mongo/db/repl/cloner_utils.cpp +++ b/src/mongo/db/repl/cloner_utils.cpp @@ -27,13 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/logical_time.h" #include "mongo/db/multitenancy_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/cloner_utils.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { namespace repl { @@ -49,7 +58,7 @@ BSONObj ClonerUtils::makeTenantDatabaseFilter(StringData prefix) { BSONObj ClonerUtils::buildMajorityWaitRequest(Timestamp operationTime) { BSONObjBuilder bob; - bob.append("find", NamespaceString::kSystemReplSetNamespace.toString()); + bob.append("find", NamespaceStringUtil::serialize(NamespaceString::kSystemReplSetNamespace)); bob.append("filter", BSONObj()); ReadConcernArgs readConcern(LogicalTime(operationTime), ReadConcernLevel::kMajorityReadConcern); readConcern.appendInfo(&bob); diff --git a/src/mongo/db/repl/cloner_utils.h b/src/mongo/db/repl/cloner_utils.h index 969a12b70b919..610daf6725eb8 100644 --- a/src/mongo/db/repl/cloner_utils.h +++ b/src/mongo/db/repl/cloner_utils.h @@ -29,12 +29,16 @@ #pragma once +#include #include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/timestamp.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/serverless/serverless_types_gen.h" +#include "mongo/db/tenant_id.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/collection_bulk_loader.h b/src/mongo/db/repl/collection_bulk_loader.h index 261fc3e9bfd46..6d0ca696d6bb6 100644 --- a/src/mongo/db/repl/collection_bulk_loader.h +++ b/src/mongo/db/repl/collection_bulk_loader.h @@ -64,9 +64,6 @@ class CollectionBulkLoader { * Called when inserts are done and indexes can be committed. */ virtual Status commit() = 0; - - virtual std::string toString() const = 0; - virtual BSONObj toBSON() const = 0; }; } // namespace repl diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.cpp b/src/mongo/db/repl/collection_bulk_loader_impl.cpp index c3ff592161ccd..95f0e0676e5f6 100644 --- a/src/mongo/db/repl/collection_bulk_loader_impl.cpp +++ b/src/mongo/db/repl/collection_bulk_loader_impl.cpp @@ -29,22 +29,39 @@ #include "mongo/db/repl/collection_bulk_loader_impl.h" +#include +#include +#include + +#include + #include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" -#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/curop.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #include "mongo/util/destructor_guard.h" -#include "mongo/util/scopeguard.h" -#include "mongo/util/str.h" +#include "mongo/util/duration.h" +#include "mongo/util/shared_buffer_fragment.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -83,14 +100,14 @@ Status CollectionBulkLoaderImpl::init(const std::vector& secondaryIndex return writeConflictRetry( _opCtx.get(), "CollectionBulkLoader::init", - _acquisition.nss().ns(), + _acquisition.nss(), [&secondaryIndexSpecs, this] { + CollectionWriter collWriter(_opCtx.get(), &_acquisition); WriteUnitOfWork wuow(_opCtx.get()); // All writes in CollectionBulkLoaderImpl should be unreplicated. // The opCtx is accessed indirectly through _secondaryIndexesBlock. UnreplicatedWritesBlock uwb(_opCtx.get()); // This enforces the buildIndexes setting in the replica set configuration. - CollectionWriter collWriter(_opCtx.get(), &_acquisition); auto indexCatalog = collWriter.getWritableCollection(_opCtx.get())->getIndexCatalog(); auto specs = indexCatalog->removeExistingIndexesNoChecks( @@ -137,7 +154,7 @@ Status CollectionBulkLoaderImpl::_insertDocumentsForUncappedCollection( while (iter != end) { std::vector locs; Status status = writeConflictRetry( - _opCtx.get(), "CollectionBulkLoaderImpl/insertDocumentsUncapped", _nss.ns(), [&] { + _opCtx.get(), "CollectionBulkLoaderImpl/insertDocumentsUncapped", _nss, [&] { WriteUnitOfWork wunit(_opCtx.get()); auto insertIter = iter; int bytesInBlock = 0; @@ -152,7 +169,7 @@ Status CollectionBulkLoaderImpl::_insertDocumentsForUncappedCollection( const auto& doc = *insertIter++; bytesInBlock += doc.objsize(); // This version of insert will not update any indexes. - const auto status = collection_internal::insertDocumentForBulkLoader( + auto status = collection_internal::insertDocumentForBulkLoader( _opCtx.get(), _acquisition.getCollectionPtr(), doc, onRecordInserted); if (!status.isOK()) { return status; @@ -170,7 +187,7 @@ Status CollectionBulkLoaderImpl::_insertDocumentsForUncappedCollection( // Inserts index entries into the external sorter. This will not update pre-existing // indexes. Wrap this in a WUOW since the index entry insertion may modify the durable // record store which can throw a write conflict exception. - status = writeConflictRetry(_opCtx.get(), "_addDocumentToIndexBlocks", _nss.ns(), [&] { + status = writeConflictRetry(_opCtx.get(), "_addDocumentToIndexBlocks", _nss, [&] { WriteUnitOfWork wunit(_opCtx.get()); for (size_t index = 0; index < locs.size(); ++index) { status = _addDocumentToIndexBlocks(*iter++, locs.at(index)); @@ -195,11 +212,11 @@ Status CollectionBulkLoaderImpl::_insertDocumentsForCappedCollection( for (auto iter = begin; iter != end; ++iter) { const auto& doc = *iter; Status status = writeConflictRetry( - _opCtx.get(), "CollectionBulkLoaderImpl/insertDocumentsCapped", _nss.ns(), [&] { + _opCtx.get(), "CollectionBulkLoaderImpl/insertDocumentsCapped", _nss, [&] { WriteUnitOfWork wunit(_opCtx.get()); // For capped collections, we use regular insertDocument, which // will update pre-existing indexes. - const auto status = collection_internal::insertDocument( + auto status = collection_internal::insertDocument( _opCtx.get(), _acquisition.getCollectionPtr(), InsertStatement(doc), nullptr); if (!status.isOK()) { return status; @@ -247,10 +264,10 @@ Status CollectionBulkLoaderImpl::commit() { invariant(_secondaryIndexesBlock->checkConstraints(_opCtx.get(), _acquisition.getCollectionPtr())); - status = writeConflictRetry( - _opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss.ns(), [this] { - WriteUnitOfWork wunit(_opCtx.get()); + status = + writeConflictRetry(_opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss, [this] { CollectionWriter collWriter(_opCtx.get(), &_acquisition); + WriteUnitOfWork wunit(_opCtx.get()); auto status = _secondaryIndexesBlock->commit( _opCtx.get(), collWriter.getWritableCollection(_opCtx.get()), @@ -272,7 +289,7 @@ Status CollectionBulkLoaderImpl::commit() { auto status = _idIndexBlock->dumpInsertsFromBulk( _opCtx.get(), _acquisition.getCollectionPtr(), [&](const RecordId& rid) { writeConflictRetry( - _opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss.ns(), [this, &rid] { + _opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss, [this, &rid] { WriteUnitOfWork wunit(_opCtx.get()); auto doc = _acquisition.getCollectionPtr()->docFor(_opCtx.get(), rid); @@ -294,7 +311,7 @@ Status CollectionBulkLoaderImpl::commit() { } SharedBufferFragmentBuilder pooledBuilder{ - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes}; + key_string::HeapBuilder::kHeapAllocatorDefaultBytes}; InsertDeleteOptions options; options.dupsAllowed = !entry->descriptor()->unique(); @@ -303,6 +320,7 @@ Status CollectionBulkLoaderImpl::commit() { _opCtx.get(), pooledBuilder, _acquisition.getCollectionPtr(), + entry, doc.value(), rid, false /* logIfError */, @@ -324,10 +342,10 @@ Status CollectionBulkLoaderImpl::commit() { // Commit the _id index, there won't be any documents with duplicate _ids as they were // deleted prior to this. - status = writeConflictRetry( - _opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss.ns(), [this] { - WriteUnitOfWork wunit(_opCtx.get()); + status = + writeConflictRetry(_opCtx.get(), "CollectionBulkLoaderImpl::commit", _nss, [this] { CollectionWriter collWriter(_opCtx.get(), &_acquisition); + WriteUnitOfWork wunit(_opCtx.get()); auto status = _idIndexBlock->commit(_opCtx.get(), collWriter.getWritableCollection(_opCtx.get()), @@ -383,7 +401,7 @@ Status CollectionBulkLoaderImpl::_runTaskReleaseResourcesOnFailure(const F& task AlternativeClientRegion acr(_client); ScopeGuard guard([this] { _releaseResources(); }); try { - const auto status = task(); + auto status = task(); if (status.isOK()) { guard.dismiss(); } @@ -444,17 +462,5 @@ BSONObj CollectionBulkLoaderImpl::Stats::toBSON() const { return bob.obj(); } - -std::string CollectionBulkLoaderImpl::toString() const { - return toBSON().toString(); -} - -BSONObj CollectionBulkLoaderImpl::toBSON() const { - BSONObjBuilder bob; - bob.append("BulkLoader", _nss.toString()); - // TODO: Add index specs here. - return bob.done(); -} - } // namespace repl } // namespace mongo diff --git a/src/mongo/db/repl/collection_bulk_loader_impl.h b/src/mongo/db/repl/collection_bulk_loader_impl.h index f52b4983e69c7..f5c35c976d036 100644 --- a/src/mongo/db/repl/collection_bulk_loader_impl.h +++ b/src/mongo/db/repl/collection_bulk_loader_impl.h @@ -30,13 +30,21 @@ #pragma once +#include +#include +#include + #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/multi_index_block.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/collection_bulk_loader.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context.h" #include "mongo/db/shard_role.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { @@ -73,9 +81,6 @@ class CollectionBulkLoaderImpl : public CollectionBulkLoader { CollectionBulkLoaderImpl::Stats getStats() const; - virtual std::string toString() const override; - virtual BSONObj toBSON() const override; - private: void _releaseResources(); @@ -103,7 +108,7 @@ class CollectionBulkLoaderImpl : public CollectionBulkLoader { ServiceContext::UniqueClient _client; ServiceContext::UniqueOperationContext _opCtx; - ScopedCollectionAcquisition _acquisition; + CollectionAcquisition _acquisition; NamespaceString _nss; std::unique_ptr _idIndexBlock; std::unique_ptr _secondaryIndexesBlock; diff --git a/src/mongo/db/repl/collection_cloner.cpp b/src/mongo/db/repl/collection_cloner.cpp index 1f4021854c751..e1fb1a56e506e 100644 --- a/src/mongo/db/repl/collection_cloner.cpp +++ b/src/mongo/db/repl/collection_cloner.cpp @@ -28,26 +28,55 @@ */ -#include "mongo/db/index/index_descriptor_fwd.h" -#include "mongo/db/service_context.h" -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include #include "mongo/base/string_data.h" -#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/client/dbclient_base.h" +#include "mongo/client/read_preference.h" #include "mongo/db/catalog/clustered_collection_util.h" -#include "mongo/db/commands/list_collections_filter.h" -#include "mongo/db/index_build_entry_helpers.h" +#include "mongo/db/client.h" +#include "mongo/db/dbmessage.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/collection_bulk_loader.h" #include "mongo/db/repl/collection_cloner.h" -#include "mongo/db/repl/database_cloner_gen.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/server_feature_flags_gen.h" -#include "mongo/db/wire_version.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/get_status_from_command_result.h" - +#include "mongo/stdx/unordered_map.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplicationInitialSync @@ -88,7 +117,8 @@ CollectionCloner::CollectionCloner(const NamespaceString& sourceNss, kProgressMeterSecondsBetween, kProgressMeterCheckInterval, "documents copied", - str::stream() << _sourceNss.toString() << " collection clone progress"), + str::stream() << NamespaceStringUtil::serialize(_sourceNss) + << " collection clone progress"), _scheduleDbWorkFn([this](executor::TaskExecutor::CallbackFn work) { auto task = [this, work = std::move(work)]( OperationContext* opCtx, @@ -131,7 +161,7 @@ void CollectionCloner::preStage() { _stats.start = getSharedData()->getClock()->now(); BSONObjBuilder b(BSON("collStats" << _sourceNss.coll().toString())); - if (gMultitenancySupport && serverGlobalParams.featureCompatibility.isVersionInitialized() && + if (gMultitenancySupport && gFeatureFlagRequireTenantID.isEnabled(serverGlobalParams.featureCompatibility) && _sourceNss.tenantId()) { _sourceNss.tenantId()->serializeToBSON("$tenant", &b); @@ -411,12 +441,15 @@ void CollectionCloner::handleNextBatch(DBClientCursor& cursor) { } // Schedule the next document batch insertion. - auto&& scheduleResult = _scheduleDbWorkFn( - [=](const executor::TaskExecutor::CallbackArgs& cbd) { insertDocumentsCallback(cbd); }); + auto&& scheduleResult = + _scheduleDbWorkFn([=, this](const executor::TaskExecutor::CallbackArgs& cbd) { + insertDocumentsCallback(cbd); + }); if (!scheduleResult.isOK()) { Status newStatus = scheduleResult.getStatus().withContext( - str::stream() << "Error cloning collection '" << _sourceNss.ns() << "'"); + str::stream() << "Error cloning collection '" << _sourceNss.toStringForErrorMsg() + << "'"); // We must throw an exception to terminate query. uassertStatusOK(newStatus); } @@ -439,9 +472,9 @@ void CollectionCloner::handleNextBatch(DBClientCursor& cursor) { } }, [&](const BSONObj& data) { + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "nss"_sd); // Only hang when cloning the specified collection, or if no collection was specified. - auto nss = data["nss"].str(); - return nss.empty() || nss == _sourceNss.toString(); + return fpNss.isEmpty() || fpNss == _sourceNss; }); } @@ -482,14 +515,14 @@ void CollectionCloner::insertDocumentsCallback(const executor::TaskExecutor::Cal } }, [&](const BSONObj& data) { - return data["namespace"].String() == _sourceNss.ns() && + return NamespaceStringUtil::parseFailPointData(data, "namespace") == _sourceNss && static_cast(_stats.documentsCopied) >= data["numDocsToClone"].numberInt(); }); } bool CollectionCloner::isMyFailPoint(const BSONObj& data) const { - auto nss = data["nss"].str(); - return (nss.empty() || nss == _sourceNss.toString()) && BaseCloner::isMyFailPoint(data); + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "nss"_sd); + return (fpNss.isEmpty() || fpNss == _sourceNss) && BaseCloner::isMyFailPoint(data); } void CollectionCloner::waitForDatabaseWorkToComplete() { diff --git a/src/mongo/db/repl/collection_cloner.h b/src/mongo/db/repl/collection_cloner.h index f42ca29ffdca2..36eb72be4b272 100644 --- a/src/mongo/db/repl/collection_cloner.h +++ b/src/mongo/db/repl/collection_cloner.h @@ -29,14 +29,38 @@ #pragma once +#include #include +#include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_connection.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/base_cloner.h" +#include "mongo/db/repl/collection_bulk_loader.h" #include "mongo/db/repl/initial_sync_base_cloner.h" #include "mongo/db/repl/initial_sync_shared_data.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/task_runner.h" +#include "mongo/executor/task_executor.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/functional.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/progress_meter.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -100,7 +124,7 @@ class CollectionCloner final : public InitialSyncBaseCloner { return _sourceNss; } UUID getSourceUuid() const { - return *_sourceDbAndUuid.uuid(); + return _sourceDbAndUuid.uuid(); } /** @@ -163,7 +187,7 @@ class CollectionCloner final : public InitialSyncBaseCloner { std::string describeForFuzzer(BaseClonerStage* stage) const final { return _sourceNss.db() + " db: { " + stage->getName() + ": UUID(\"" + - _sourceDbAndUuid.uuid()->toString() + "\") coll: " + _sourceNss.coll() + " }"; + _sourceDbAndUuid.uuid().toString() + "\") coll: " + _sourceNss.coll() + " }"; } /** diff --git a/src/mongo/db/repl/collection_cloner_test.cpp b/src/mongo/db/repl/collection_cloner_test.cpp index c51b32e7c8955..d3c1f3ea2bb28 100644 --- a/src/mongo/db/repl/collection_cloner_test.cpp +++ b/src/mongo/db/repl/collection_cloner_test.cpp @@ -27,21 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include #include +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/db/client.h" #include "mongo/db/repl/collection_cloner.h" #include "mongo/db/repl/initial_sync_cloner_test_fixture.h" #include "mongo/db/repl/repl_server_parameters_gen.h" -#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/dbtests/mock/mock_dbclient_connection.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" namespace mongo { namespace repl { @@ -84,7 +94,7 @@ class CollectionClonerTest : public InitialSyncClonerTestFixture { _storageInterface.createCollectionForBulkFn = _standardCreateCollectionFn; - _mockServer->assignCollectionUuid(_nss.ns(), _collUuid); + _mockServer->assignCollectionUuid(_nss.ns_forTest(), _collUuid); _mockServer->setCommandReply("replSetGetRBID", BSON("ok" << 1 << "rbid" << getSharedData()->getRollBackId())); } @@ -234,8 +244,9 @@ TEST_F(CollectionClonerTestResumable, TEST_F(CollectionClonerTestResumable, ListIndexesReturnedNoIndexes) { auto cloner = makeCollectionCloner(); cloner->setStopAfterStage_forTest("listIndexes"); - setMockServerReplies( - BSON("size" << 10), createCountResponse(1), createCursorResponse(_nss.ns(), BSONArray())); + setMockServerReplies(BSON("size" << 10), + createCountResponse(1), + createCursorResponse(_nss.ns_forTest(), BSONArray())); ASSERT_OK(cloner->run()); ASSERT(getIdIndexSpec(cloner.get()).isEmpty()); ASSERT(getIndexSpecs(cloner.get()).empty()); @@ -262,7 +273,7 @@ TEST_F(CollectionClonerTestResumable, ListIndexesHasResults) { BSON("size" << 10), createCountResponse(1), createCursorResponse( - _nss.ns(), + _nss.ns_forTest(), BSON_ARRAY(_secondaryIndexSpecs[0] << _idIndexSpec << _secondaryIndexSpecs[1]))); ASSERT_OK(cloner->run()); ASSERT_BSONOBJ_EQ(_idIndexSpec, getIdIndexSpec(cloner.get())); @@ -282,7 +293,8 @@ TEST_F(CollectionClonerTestResumable, CollectionClonerResendsListIndexesCommandO Status(ErrorCodes::HostNotFound, "HostNotFound")); _mockServer->setCommandReply( "listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec << _secondaryIndexSpecs[0]))); + createCursorResponse(_nss.ns_forTest(), + BSON_ARRAY(_idIndexSpec << _secondaryIndexSpecs[0]))); ASSERT_OK(cloner->run()); ASSERT_BSONOBJ_EQ(_idIndexSpec, getIdIndexSpec(cloner.get())); @@ -318,11 +330,11 @@ TEST_F(CollectionClonerTestResumable, BeginCollection) { } setMockServerReplies(BSON("size" << 10), createCountResponse(1), - createCursorResponse(_nss.ns(), indexSpecs.arr())); + createCursorResponse(_nss.ns_forTest(), indexSpecs.arr())); ASSERT_EQUALS(Status::OK(), cloner->run()); - ASSERT_EQUALS(_nss.ns(), collNss.ns()); + ASSERT_EQUALS(_nss.ns_forTest(), collNss.ns_forTest()); ASSERT_BSONOBJ_EQ(_options.toBSON(), collOptions.toBSON()); ASSERT_EQUALS(_secondaryIndexSpecs.size(), collSecondaryIndexSpecs.size()); for (std::vector::size_type i = 0; i < _secondaryIndexSpecs.size(); ++i) { @@ -340,8 +352,9 @@ TEST_F(CollectionClonerTestResumable, BeginCollectionFailed) { auto cloner = makeCollectionCloner(); cloner->setStopAfterStage_forTest("createCollection"); - setMockServerReplies( - BSON("size" << 10), createCountResponse(1), createCursorResponse(_nss.ns(), BSONArray())); + setMockServerReplies(BSON("size" << 10), + createCountResponse(1), + createCursorResponse(_nss.ns_forTest(), BSONArray())); ASSERT_EQUALS(ErrorCodes::OperationFailed, cloner->run()); } @@ -349,7 +362,7 @@ TEST_F(CollectionClonerTestResumable, InsertDocumentsSingleBatch) { // Set up data for preliminary stages setMockServerReplies(BSON("size" << 10), createCountResponse(2), - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); // Set up documents to be returned from upstream node. _mockServer->insert(_nss, BSON("_id" << 1)); @@ -373,7 +386,7 @@ TEST_F(CollectionClonerTestResumable, BatchSizeStoredInConstructor) { // Set up data for preliminary stages. setMockServerReplies(BSON("size" << 10), createCountResponse(2), - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); // Set up documents to be returned from upstream node. It should take 3 batches to clone the // documents. @@ -399,7 +412,7 @@ TEST_F(CollectionClonerTestResumable, InsertDocumentsMultipleBatches) { // Set up data for preliminary stages setMockServerReplies(BSON("size" << 10), createCountResponse(2), - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); // Set up documents to be returned from upstream node. _mockServer->insert(_nss, BSON("_id" << 1)); @@ -421,7 +434,7 @@ TEST_F(CollectionClonerTestResumable, InsertDocumentsScheduleDBWorkFailed) { // Set up data for preliminary stages setMockServerReplies(BSON("size" << 10), createCountResponse(2), - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); // Set up documents to be returned from upstream node. _mockServer->insert(_nss, BSON("_id" << 1)); @@ -434,7 +447,7 @@ TEST_F(CollectionClonerTestResumable, InsertDocumentsScheduleDBWorkFailed) { auto timesEntered = collClonerBeforeFailPoint->setMode( FailPoint::alwaysOn, 0, - fromjson("{cloner: 'CollectionCloner', stage: 'query', nss: '" + _nss.ns() + "'}")); + fromjson("{cloner: 'CollectionCloner', stage: 'query', nss: '" + _nss.ns_forTest() + "'}")); // Run the cloner in a separate thread. stdx::thread clonerThread([&] { @@ -458,7 +471,7 @@ TEST_F(CollectionClonerTestResumable, InsertDocumentsCallbackCanceled) { // Set up data for preliminary stages setMockServerReplies(BSON("size" << 10), createCountResponse(2), - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); // Set up documents to be returned from upstream node. _mockServer->insert(_nss, BSON("_id" << 1)); @@ -471,7 +484,7 @@ TEST_F(CollectionClonerTestResumable, InsertDocumentsCallbackCanceled) { auto timesEntered = collClonerBeforeFailPoint->setMode( FailPoint::alwaysOn, 0, - fromjson("{cloner: 'CollectionCloner', stage: 'query', nss: '" + _nss.ns() + "'}")); + fromjson("{cloner: 'CollectionCloner', stage: 'query', nss: '" + _nss.ns_forTest() + "'}")); // Run the cloner in a separate thread. stdx::thread clonerThread([&] { @@ -501,7 +514,7 @@ TEST_F(CollectionClonerTestResumable, InsertDocumentsFailed) { // Set up data for preliminary stages setMockServerReplies(BSON("size" << 10), createCountResponse(2), - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); // Set up documents to be returned from upstream node. _mockServer->insert(_nss, BSON("_id" << 1)); @@ -514,7 +527,7 @@ TEST_F(CollectionClonerTestResumable, InsertDocumentsFailed) { auto timesEntered = collClonerBeforeFailPoint->setMode( FailPoint::alwaysOn, 0, - fromjson("{cloner: 'CollectionCloner', stage: 'query', nss: '" + _nss.ns() + "'}")); + fromjson("{cloner: 'CollectionCloner', stage: 'query', nss: '" + _nss.ns_forTest() + "'}")); // Run the cloner in a separate thread. stdx::thread clonerThread([&] { @@ -560,7 +573,7 @@ TEST_F(CollectionClonerTestResumable, DoNotCreateIDIndexIfAutoIndexIdUsed) { setMockServerReplies(BSON("size" << 10), createCountResponse(1), - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); CollectionOptions options; options.autoIndexId = CollectionOptions::NO; @@ -585,7 +598,7 @@ TEST_F(CollectionClonerTestResumable, ResumableQueryFailTransientlyBeforeFirstBa _mockServer->setCommandReply("collStats", BSON("size" << 10)); _mockServer->setCommandReply("count", createCountResponse(3)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(idIndexSpec))); // Set up documents to be returned from upstream node. _mockServer->insert(_nss, BSON("_id" << 1)); @@ -647,7 +660,7 @@ TEST_F(CollectionClonerTestResumable, ResumableQueryFailTransientlyAfterFirstBat << "_id_"); setMockServerReplies(BSON("size" << 10), createCountResponse(5), - createCursorResponse(_nss.ns(), BSON_ARRAY(idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(idIndexSpec))); // Set up documents to be returned from upstream node. _mockServer->insert(_nss, BSON("_id" << 1)); @@ -703,7 +716,7 @@ TEST_F(CollectionClonerTestResumable, ResumableQueryNonRetriableError) { << "_id_"); setMockServerReplies(BSON("size" << 10), createCountResponse(3), - createCursorResponse(_nss.ns(), BSON_ARRAY(idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(idIndexSpec))); // Set up documents to be returned from upstream node. _mockServer->insert(_nss, BSON("_id" << 1)); @@ -748,7 +761,7 @@ TEST_F(CollectionClonerTestResumable, << "_id_"); setMockServerReplies(BSON("size" << 10), createCountResponse(3), - createCursorResponse(_nss.ns(), BSON_ARRAY(idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(idIndexSpec))); // Set up documents to be returned from upstream node. _mockServer->insert(_nss, BSON("_id" << 1)); @@ -793,7 +806,7 @@ TEST_F(CollectionClonerTestResumable, ResumableQueryNonTransientErrorAtRetry) { << "_id_"); setMockServerReplies(BSON("size" << 10), createCountResponse(5), - createCursorResponse(_nss.ns(), BSON_ARRAY(idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(idIndexSpec))); // Set up documents to be returned from upstream node. _mockServer->insert(_nss, BSON("_id" << 1)); @@ -854,7 +867,7 @@ TEST_F(CollectionClonerTestResumable, ResumableQueryNonTransientErrorAfterPastRe << "_id_"); setMockServerReplies(BSON("size" << 10), createCountResponse(5), - createCursorResponse(_nss.ns(), BSON_ARRAY(idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(idIndexSpec))); // Set up documents to be returned from upstream node. _mockServer->insert(_nss, BSON("_id" << 1)); @@ -931,7 +944,7 @@ TEST_F(CollectionClonerTestResumable, ResumableQueryTwoResumes) { << "_id_"); setMockServerReplies(BSON("size" << 10), createCountResponse(5), - createCursorResponse(_nss.ns(), BSON_ARRAY(idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(idIndexSpec))); // Set up documents to be returned from upstream node. _mockServer->insert(_nss, BSON("_id" << 1)); @@ -1053,7 +1066,7 @@ TEST_F(CollectionClonerMultitenancyTest, CollectionClonerMultitenancy) { BSON("size" << 10), createCountResponse(numOperations), createCursorResponse( - _nss.ns(), + _nss.ns_forTest(), BSON_ARRAY(_secondaryIndexSpecs[0] << _idIndexSpec << _secondaryIndexSpecs[1]))); // Set up documents to be returned from upstream node. diff --git a/src/mongo/db/repl/data_replicator_external_state_impl.cpp b/src/mongo/db/repl/data_replicator_external_state_impl.cpp index 283d640cf2c51..31afd5bd1d586 100644 --- a/src/mongo/db/repl/data_replicator_external_state_impl.cpp +++ b/src/mongo/db/repl/data_replicator_external_state_impl.cpp @@ -28,11 +28,16 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/repl/data_replicator_external_state_impl.h" +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/db/repl/data_replicator_external_state_impl.h" #include "mongo/db/repl/oplog_applier_impl.h" #include "mongo/db/repl/oplog_buffer_blocking_queue.h" #include "mongo/db/repl/oplog_buffer_collection.h" @@ -40,9 +45,11 @@ #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_external_state.h" -#include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/data_replicator_external_state_impl.h b/src/mongo/db/repl/data_replicator_external_state_impl.h index 5ef6b8b713697..dd645691bcd6a 100644 --- a/src/mongo/db/repl/data_replicator_external_state_impl.h +++ b/src/mongo/db/repl/data_replicator_external_state_impl.h @@ -29,12 +29,34 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/data_replicator_external_state.h" +#include "mongo/db/repl/last_vote.h" +#include "mongo/db/repl/oplog_applier.h" +#include "mongo/db/repl/oplog_buffer.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/replication_consistency_markers.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/repl/sync_source_selector.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/executor/task_executor.h" +#include "mongo/rpc/metadata/oplog_query_metadata.h" +#include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace repl { class ReplicationCoordinator; + class ReplicationCoordinatorExternalState; /** diff --git a/src/mongo/db/repl/data_replicator_external_state_initial_sync.cpp b/src/mongo/db/repl/data_replicator_external_state_initial_sync.cpp index abb8f3bebd87d..bf62564783111 100644 --- a/src/mongo/db/repl/data_replicator_external_state_initial_sync.cpp +++ b/src/mongo/db/repl/data_replicator_external_state_initial_sync.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/repl/data_replicator_external_state_initial_sync.h" namespace mongo { diff --git a/src/mongo/db/repl/data_replicator_external_state_initial_sync.h b/src/mongo/db/repl/data_replicator_external_state_initial_sync.h index 55316e3da16e8..3f4f013ecf6ea 100644 --- a/src/mongo/db/repl/data_replicator_external_state_initial_sync.h +++ b/src/mongo/db/repl/data_replicator_external_state_initial_sync.h @@ -30,6 +30,12 @@ #pragma once #include "mongo/db/repl/data_replicator_external_state_impl.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/sync_source_selector.h" +#include "mongo/rpc/metadata/oplog_query_metadata.h" +#include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/data_replicator_external_state_mock.cpp b/src/mongo/db/repl/data_replicator_external_state_mock.cpp index ddd5613644f35..cc8b653923ff8 100644 --- a/src/mongo/db/repl/data_replicator_external_state_mock.cpp +++ b/src/mongo/db/repl/data_replicator_external_state_mock.cpp @@ -27,12 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/data_replicator_external_state_mock.h" - #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/repl/data_replicator_external_state_mock.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_buffer_blocking_queue.h" namespace mongo { diff --git a/src/mongo/db/repl/data_replicator_external_state_mock.h b/src/mongo/db/repl/data_replicator_external_state_mock.h index 2db9ae6b84951..f590b7d6ecfeb 100644 --- a/src/mongo/db/repl/data_replicator_external_state_mock.h +++ b/src/mongo/db/repl/data_replicator_external_state_mock.h @@ -29,7 +29,32 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/data_replicator_external_state.h" +#include "mongo/db/repl/last_vote.h" +#include "mongo/db/repl/oplog_applier.h" +#include "mongo/db/repl/oplog_buffer.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/replication_consistency_markers.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/repl/sync_source_selector.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/executor/task_executor.h" +#include "mongo/rpc/metadata/oplog_query_metadata.h" +#include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/database_cloner.cpp b/src/mongo/db/repl/database_cloner.cpp index 76e66aac53edb..24e0bcfa7cf94 100644 --- a/src/mongo/db/repl/database_cloner.cpp +++ b/src/mongo/db/repl/database_cloner.cpp @@ -28,17 +28,34 @@ */ -#include "mongo/db/service_context.h" -#include "mongo/platform/basic.h" - +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/commands/list_collections_filter.h" #include "mongo/db/repl/database_cloner.h" -#include "mongo/db/repl/database_cloner_common.h" #include "mongo/db/repl/database_cloner_gen.h" -#include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplicationInitialSync @@ -128,7 +145,7 @@ BaseCloner::AfterStageBehavior DatabaseCloner::listCollectionsStage() { } bool DatabaseCloner::isMyFailPoint(const BSONObj& data) const { - return data["database"].str() == _dbName.toStringWithTenantId() && + return data["database"].str() == DatabaseNameUtil::serializeForCatalog(_dbName) && BaseCloner::isMyFailPoint(data); } @@ -168,11 +185,12 @@ void DatabaseCloner::postStage() { "Collection clone failed", logAttrs(sourceNss), "error"_attr = collStatus.toString()); - setSyncFailedStatus({ErrorCodes::InitialSyncFailure, - collStatus - .withContext(str::stream() << "Error cloning collection '" - << sourceNss.toString() << "'") - .toString()}); + setSyncFailedStatus( + {ErrorCodes::InitialSyncFailure, + collStatus + .withContext(str::stream() << "Error cloning collection '" + << sourceNss.toStringForErrorMsg() << "'") + .toString()}); } { stdx::lock_guard lk(_mutex); diff --git a/src/mongo/db/repl/database_cloner.h b/src/mongo/db/repl/database_cloner.h index 86bc0cdce1259..abdf7c3c9588a 100644 --- a/src/mongo/db/repl/database_cloner.h +++ b/src/mongo/db/repl/database_cloner.h @@ -29,12 +29,27 @@ #pragma once +#include +#include +#include +#include #include +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_connection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/base_cloner.h" #include "mongo/db/repl/collection_cloner.h" #include "mongo/db/repl/initial_sync_base_cloner.h" #include "mongo/db/repl/initial_sync_shared_data.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { @@ -94,7 +109,8 @@ class DatabaseCloner final : public InitialSyncBaseCloner { void postStage() final; std::string describeForFuzzer(BaseClonerStage* stage) const final { - return _dbName.toStringWithTenantId() + " db: { " + stage->getName() + ": 1 } "; + return DatabaseNameUtil::serializeForCatalog(_dbName) + " db: { " + stage->getName() + + ": 1 } "; } // All member variables are labeled with one of the following codes indicating the diff --git a/src/mongo/db/repl/database_cloner_common.cpp b/src/mongo/db/repl/database_cloner_common.cpp index 13bb4a2591b21..1fe9d40d98a05 100644 --- a/src/mongo/db/repl/database_cloner_common.cpp +++ b/src/mongo/db/repl/database_cloner_common.cpp @@ -27,9 +27,8 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/repl/database_cloner_common.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/database_cloner_common.h b/src/mongo/db/repl/database_cloner_common.h index e7a8b6c171c7b..773470d408ae5 100644 --- a/src/mongo/db/repl/database_cloner_common.h +++ b/src/mongo/db/repl/database_cloner_common.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection_options.h" namespace mongo { diff --git a/src/mongo/db/repl/database_cloner_test.cpp b/src/mongo/db/repl/database_cloner_test.cpp index 3f1a6cec95d2a..bb1bbd0c7fc09 100644 --- a/src/mongo/db/repl/database_cloner_test.cpp +++ b/src/mongo/db/repl/database_cloner_test.cpp @@ -27,19 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/clientcursor.h" +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/db/client.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/repl/database_cloner.h" #include "mongo/db/repl/initial_sync_cloner_test_fixture.h" -#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/dbtests/mock/mock_dbclient_connection.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/clock_source_mock.h" -#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -53,7 +69,8 @@ const std::string dbNameStr = "testDb"; class DatabaseClonerTest : public InitialSyncClonerTestFixture { public: - DatabaseClonerTest() : _dbName(boost::none, dbNameStr) {} + DatabaseClonerTest() + : _dbName(DatabaseName::createDatabaseName_forTest(boost::none, dbNameStr)) {} protected: void setUp() override { @@ -344,7 +361,7 @@ TEST_F(DatabaseClonerTest, FirstCollectionListIndexesFailed) { {BSON("ok" << 0 << "errmsg" << "fake message" << "code" << ErrorCodes::CursorNotFound), - createCursorResponse(_dbName.db() + ".b", BSON_ARRAY(idIndexSpec))}); + createCursorResponse(_dbName.toString_forTest() + ".b", BSON_ARRAY(idIndexSpec))}); auto cloner = makeDatabaseCloner(); auto status = cloner->run(); ASSERT_NOT_OK(status); @@ -379,8 +396,8 @@ TEST_F(DatabaseClonerTest, CreateCollections) { _mockServer->setCommandReply("count", {createCountResponse(0), createCountResponse(0)}); _mockServer->setCommandReply( "listIndexes", - {createCursorResponse(_dbName.db() + ".a", BSON_ARRAY(idIndexSpec)), - createCursorResponse(_dbName.db() + ".b", BSON_ARRAY(idIndexSpec))}); + {createCursorResponse(_dbName.toString_forTest() + ".a", BSON_ARRAY(idIndexSpec)), + createCursorResponse(_dbName.toString_forTest() + ".b", BSON_ARRAY(idIndexSpec))}); auto cloner = makeDatabaseCloner(); auto status = cloner->run(); ASSERT_OK(status); @@ -426,8 +443,9 @@ TEST_F(DatabaseClonerTest, DatabaseAndCollectionStats) { _mockServer->setCommandReply("count", {createCountResponse(0), createCountResponse(0)}); _mockServer->setCommandReply( "listIndexes", - {createCursorResponse(_dbName.db() + ".a", BSON_ARRAY(idIndexSpec << extraIndexSpec)), - createCursorResponse(_dbName.db() + ".b", BSON_ARRAY(idIndexSpec))}); + {createCursorResponse(_dbName.toString_forTest() + ".a", + BSON_ARRAY(idIndexSpec << extraIndexSpec)), + createCursorResponse(_dbName.toString_forTest() + ".b", BSON_ARRAY(idIndexSpec))}); auto cloner = makeDatabaseCloner(); auto collClonerBeforeFailPoint = globalFailPointRegistry().find("hangBeforeClonerStage"); @@ -435,11 +453,13 @@ TEST_F(DatabaseClonerTest, DatabaseAndCollectionStats) { auto timesEntered = collClonerBeforeFailPoint->setMode( FailPoint::alwaysOn, 0, - fromjson("{cloner: 'CollectionCloner', stage: 'count', nss: '" + _dbName.db() + ".a'}")); + fromjson("{cloner: 'CollectionCloner', stage: 'count', nss: '" + + _dbName.toString_forTest() + ".a'}")); collClonerAfterFailPoint->setMode( FailPoint::alwaysOn, 0, - fromjson("{cloner: 'CollectionCloner', stage: 'count', nss: '" + _dbName.db() + ".a'}")); + fromjson("{cloner: 'CollectionCloner', stage: 'count', nss: '" + + _dbName.toString_forTest() + ".a'}")); // Run the cloner in a separate thread. stdx::thread clonerThread([&] { @@ -471,7 +491,8 @@ TEST_F(DatabaseClonerTest, DatabaseAndCollectionStats) { timesEntered = collClonerBeforeFailPoint->setMode( FailPoint::alwaysOn, 0, - fromjson("{cloner: 'CollectionCloner', stage: 'count', nss: '" + _dbName.db() + ".b'}")); + fromjson("{cloner: 'CollectionCloner', stage: 'count', nss: '" + + _dbName.toString_forTest() + ".b'}")); collClonerAfterFailPoint->setMode(FailPoint::off); // Wait for the failpoint to be reached @@ -513,7 +534,8 @@ TEST_F(DatabaseClonerTest, DatabaseAndCollectionStats) { class DatabaseClonerMultitenancyTest : public DatabaseClonerTest { public: - DatabaseClonerMultitenancyTest() : _dbName(TenantId(OID::gen()), dbNameStr) {} + DatabaseClonerMultitenancyTest() + : _dbName(DatabaseName::createDatabaseName_forTest(TenantId(OID::gen()), dbNameStr)) {} protected: void setUp() override { diff --git a/src/mongo/db/repl/dbcheck.cpp b/src/mongo/db/repl/dbcheck.cpp index 09f3d7a410bab..011b0b57acee2 100644 --- a/src/mongo/db/repl/dbcheck.cpp +++ b/src/mongo/db/repl/dbcheck.cpp @@ -27,22 +27,47 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/bson/simple_bsonelement_comparator.h" -#include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/database.h" -#include "mongo/db/catalog/database_holder.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/health_log_interface.h" #include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/exec/collection_scan_common.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/record_id_bound.h" #include "mongo/db/record_id_helpers.h" #include "mongo/db/repl/dbcheck.h" #include "mongo/db/repl/dbcheck_gen.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/md5.hpp" +#include "mongo/util/uuid.h" namespace mongo { @@ -203,10 +228,14 @@ std::unique_ptr dbCheckBatchEntry( if (hashesMatch) { return SeverityEnum::Info; } - // Implicitly replicated collections and capped collections not replicating truncation are - // not designed to be consistent, so inconsistency is not necessarily pathological. + // We relax inconsistency checks for some collections to a simple warning in some cases. + // preimages and change collections may be using untimestamped truncates on each node + // independently and can easily be inconsistent. In addition, by design + // the image_collection can skip a write during steady-state replication, and the preimages + // collection can be inconsistent during logical initial sync, all of which is + // harmless. if (nss.isChangeStreamPreImagesCollection() || nss.isConfigImagesCollection() || - (options && options->capped)) { + nss.isChangeCollection() || (options && options->capped)) { return SeverityEnum::Warning; } @@ -224,11 +253,27 @@ DbCheckHasher::DbCheckHasher(OperationContext* opCtx, const BSONKey& end, int64_t maxCount, int64_t maxBytes) - : _opCtx(opCtx), _maxKey(end), _maxCount(maxCount), _maxBytes(maxBytes) { + : _opCtx(opCtx), + _maxKey(end), + _maxCount(maxCount), + _maxBytes(maxBytes), + _previousDataCorruptionMode(opCtx->recoveryUnit()->getDataCorruptionDetectionMode()), + _previousPrepareConflictBehavior(opCtx->recoveryUnit()->getPrepareConflictBehavior()) { // Get the MD5 hasher set up. md5_init(&_state); + // We don't want detected data corruption to prevent us from finishing our scan. Locations where + // we throw these errors should already be writing to the health log anyways. + opCtx->recoveryUnit()->setDataCorruptionDetectionMode( + DataCorruptionDetectionMode::kLogAndContinue); + + // We need to enforce prepare conflicts in order to return correct results. This can't be done + // while a snapshot is already open. + if (_previousPrepareConflictBehavior != PrepareConflictBehavior::kEnforce) { + opCtx->recoveryUnit()->setPrepareConflictBehavior(PrepareConflictBehavior::kEnforce); + } + if (!collection->isClustered()) { // Get the _id index. const IndexDescriptor* desc = collection->getIndexCatalog()->findIdIndex(opCtx); @@ -260,6 +305,13 @@ DbCheckHasher::DbCheckHasher(OperationContext* opCtx, } } +DbCheckHasher::~DbCheckHasher() { + _opCtx->recoveryUnit()->setDataCorruptionDetectionMode(_previousDataCorruptionMode); + if (_previousPrepareConflictBehavior != PrepareConflictBehavior::kEnforce) { + _opCtx->recoveryUnit()->setPrepareConflictBehavior(_previousPrepareConflictBehavior); + } +} + template const md5_byte_t* md5Cast(const T* ptr) { @@ -364,22 +416,6 @@ Status dbCheckBatchOnSecondary(OperationContext* opCtx, // Set up the hasher, boost::optional hasher; try { - // We may not have a read timestamp if the dbCheck command was run on an older version of - // the server with snapshotRead:false. Since we don't implement this feature, we'll log an - // error about skipping the batch to ensure an operator notices. - if (!entry.getReadTimestamp().has_value()) { - auto logEntry = - dbCheckErrorHealthLogEntry(entry.getNss(), - "dbCheck failed", - OplogEntriesEnum::Batch, - Status{ErrorCodes::Error(6769502), - "no readTimestamp in oplog entry. Ensure dbCheck " - "command is not using snapshotRead:false"}, - entry.toBSON()); - HealthLogInterface::get(opCtx)->log(*logEntry); - return Status::OK(); - } - opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided, entry.getReadTimestamp()); diff --git a/src/mongo/db/repl/dbcheck.h b/src/mongo/db/repl/dbcheck.h index 0c9368b9ad566..43e9895bdc815 100644 --- a/src/mongo/db/repl/dbcheck.h +++ b/src/mongo/db/repl/dbcheck.h @@ -29,13 +29,32 @@ #pragma once +#include +#include +#include +#include #include +#include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/health_log_gen.h" #include "mongo/db/db_raii.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/plan_executor.h" #include "mongo/db/repl/dbcheck_gen.h" +#include "mongo/db/repl/dbcheck_idl.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/util/md5.h" #include "mongo/util/md5.hpp" +#include "mongo/util/time_support.h" namespace mongo { @@ -116,6 +135,8 @@ class DbCheckHasher { int64_t maxCount = std::numeric_limits::max(), int64_t maxBytes = std::numeric_limits::max()); + ~DbCheckHasher(); + /** * Hash all documents up to the deadline. */ @@ -155,6 +176,9 @@ class DbCheckHasher { int64_t _maxBytes = 0; int64_t _bytesSeen = 0; + + DataCorruptionDetectionMode _previousDataCorruptionMode; + PrepareConflictBehavior _previousPrepareConflictBehavior; }; namespace repl { diff --git a/src/mongo/db/repl/dbcheck.idl b/src/mongo/db/repl/dbcheck.idl index 5ded9ce6dafbb..cdb0b67453deb 100644 --- a/src/mongo/db/repl/dbcheck.idl +++ b/src/mongo/db/repl/dbcheck.idl @@ -65,6 +65,22 @@ enums: Collection: "collection" Start: "start" Stop: "stop" + DbCheckValidationMode: + description: "The type of consistency check that dbCheck will run." + type: string + # dataConsistency: Runs the existing data consistency checks between nodes of a replica set. + # This is intended for cases where running secondary index checks is too expensive, and we + # want to just test data consistency between nodes in a replica set. + # + # dataConsistencyAndMissingIndexKeysCheck: Runs the existing data consistency checks between + # nodes of a replica set and verifies that there are no missing index keys in all index tables. + # + # extraIndexKeysCheck: Verifies that there are no extra index keys in the index table. + values: + dataConsistency: "dataConsistency" + dataConsistencyAndMissingIndexKeysCheck: "dataConsistencyAndMissingIndexKeysCheck" + extraIndexKeysCheck: "extraIndexKeysCheck" + structs: DbCheckSingleInvocation: @@ -113,6 +129,14 @@ structs: description: Wait for this writeConcern at the end of every batch. Default is w:1 with no timeout. type: WriteConcern default: WriteConcernOptions() + validateMode: + description: The type of consistency checks that dbCheck will run. + type: DbCheckValidationMode + optional: true + secondaryIndex: + description: The name of the index to run extra index keys check on. + type: string + optional: true DbCheckAllInvocation: description: "Command object for database-wide form of dbCheck invocation" @@ -172,7 +196,7 @@ structs: optional: true readTimestamp: type: timestamp - optional: true + optional: false DbCheckOplogCollection: description: "Oplog entry for dbCheck collection metadata" diff --git a/src/mongo/db/repl/dbcheck_idl.cpp b/src/mongo/db/repl/dbcheck_idl.cpp index 91abc44189a6b..cf536e2609941 100644 --- a/src/mongo/db/repl/dbcheck_idl.cpp +++ b/src/mongo/db/repl/dbcheck_idl.cpp @@ -28,9 +28,10 @@ */ #include "mongo/db/repl/dbcheck_idl.h" + #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/simple_bsonelement_comparator.h" #include "mongo/bson/simple_bsonobj_comparator.h" namespace mongo { diff --git a/src/mongo/db/repl/dbcheck_idl.h b/src/mongo/db/repl/dbcheck_idl.h index 9e2d9c880e6cd..9cbbdaaea5671 100644 --- a/src/mongo/db/repl/dbcheck_idl.h +++ b/src/mongo/db/repl/dbcheck_idl.h @@ -29,6 +29,8 @@ #pragma once +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" namespace mongo { diff --git a/src/mongo/db/repl/delayable_timeout_callback.cpp b/src/mongo/db/repl/delayable_timeout_callback.cpp index d84d709977946..87b5d28f0596a 100644 --- a/src/mongo/db/repl/delayable_timeout_callback.cpp +++ b/src/mongo/db/repl/delayable_timeout_callback.cpp @@ -30,7 +30,15 @@ #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication #include "mongo/db/repl/delayable_timeout_callback.h" + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/delayable_timeout_callback.h b/src/mongo/db/repl/delayable_timeout_callback.h index d1cb8e29f956d..efbc40bb0bd9b 100644 --- a/src/mongo/db/repl/delayable_timeout_callback.h +++ b/src/mongo/db/repl/delayable_timeout_callback.h @@ -28,10 +28,16 @@ */ #pragma once +#include +#include #include +#include #include "mongo/base/status.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/repl/delayable_timeout_callback_test.cpp b/src/mongo/db/repl/delayable_timeout_callback_test.cpp index b293d2793bbdb..dbebf4e6bffc0 100644 --- a/src/mongo/db/repl/delayable_timeout_callback_test.cpp +++ b/src/mongo/db/repl/delayable_timeout_callback_test.cpp @@ -28,7 +28,19 @@ */ #include "mongo/db/repl/delayable_timeout_callback.h" + +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/executor/network_interface_mock.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/drop_pending_collection_reaper.cpp b/src/mongo/db/repl/drop_pending_collection_reaper.cpp index bf5cd467c6d44..f4a7fc880e83c 100644 --- a/src/mongo/db/repl/drop_pending_collection_reaper.cpp +++ b/src/mongo/db/repl/drop_pending_collection_reaper.cpp @@ -28,18 +28,28 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/drop_pending_collection_reaper.h" - #include +#include +#include +#include +#include #include +#include + +#include "mongo/base/status.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/drop_pending_collection_reaper.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/drop_pending_collection_reaper.h b/src/mongo/db/repl/drop_pending_collection_reaper.h index 18c359ada2a59..04f8fb2810f47 100644 --- a/src/mongo/db/repl/drop_pending_collection_reaper.h +++ b/src/mongo/db/repl/drop_pending_collection_reaper.h @@ -31,9 +31,12 @@ #pragma once #include +#include #include #include +#include +#include "mongo/db/auth/cluster_auth_mode.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/optime.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp index 54bb65f087060..3bd34a46ad887 100644 --- a/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp +++ b/src/mongo/db/repl/drop_pending_collection_reaper_test.cpp @@ -27,24 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include +#include + +#include -#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/client.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace { diff --git a/src/mongo/db/repl/election_reason_counter.h b/src/mongo/db/repl/election_reason_counter.h index bdb9a309faca1..e7e15fb20f416 100644 --- a/src/mongo/db/repl/election_reason_counter.h +++ b/src/mongo/db/repl/election_reason_counter.h @@ -29,7 +29,9 @@ #pragma once +#include "mongo/bson/bsonobj.h" #include "mongo/db/repl/election_reason_counter_gen.h" +#include "mongo/idl/idl_parser.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/election_reason_counter_parser.cpp b/src/mongo/db/repl/election_reason_counter_parser.cpp index a4e7fbba318f9..aabc0a1fc19dc 100644 --- a/src/mongo/db/repl/election_reason_counter_parser.cpp +++ b/src/mongo/db/repl/election_reason_counter_parser.cpp @@ -29,6 +29,9 @@ #include "mongo/db/repl/election_reason_counter_parser.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/idl/idl_parser.h" + namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/election_reason_counter_parser.h b/src/mongo/db/repl/election_reason_counter_parser.h index 960021ced2b7c..553d70ea74ac2 100644 --- a/src/mongo/db/repl/election_reason_counter_parser.h +++ b/src/mongo/db/repl/election_reason_counter_parser.h @@ -29,6 +29,9 @@ #pragma once +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/repl/election_reason_counter.h" namespace mongo { diff --git a/src/mongo/db/repl/heartbeat_response_action.cpp b/src/mongo/db/repl/heartbeat_response_action.cpp index 284b34def3a29..007509e71b569 100644 --- a/src/mongo/db/repl/heartbeat_response_action.cpp +++ b/src/mongo/db/repl/heartbeat_response_action.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/repl/heartbeat_response_action.h" namespace mongo { diff --git a/src/mongo/db/repl/hello_auth.cpp b/src/mongo/db/repl/hello_auth.cpp index 9ea72c57618ac..8e99e0bdb9b63 100644 --- a/src/mongo/db/repl/hello_auth.cpp +++ b/src/mongo/db/repl/hello_auth.cpp @@ -29,13 +29,23 @@ #include "mongo/db/repl/hello_auth.h" +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/authenticate.h" #include "mongo/db/auth/authentication_session.h" #include "mongo/db/auth/sasl_command_constants.h" #include "mongo/db/auth/sasl_commands.h" #include "mongo/db/auth/sasl_mechanism_registry.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/commands/authentication_commands.h" -#include "mongo/db/stats/counters.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/repl/hello_auth.h b/src/mongo/db/repl/hello_auth.h index e714a7a6bf4bf..488ec780ff312 100644 --- a/src/mongo/db/repl/hello_auth.h +++ b/src/mongo/db/repl/hello_auth.h @@ -31,6 +31,7 @@ #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/hello_gen.h" diff --git a/src/mongo/db/repl/hello_response.cpp b/src/mongo/db/repl/hello_response.cpp index 75a7ef153d675..d78a49041766b 100644 --- a/src/mongo/db/repl/hello_response.cpp +++ b/src/mongo/db/repl/hello_response.cpp @@ -28,15 +28,26 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/repl/hello_response.h" +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/oid.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/repl/hello_response.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/hello_response.h b/src/mongo/db/repl/hello_response.h index 953db13c5703c..03a4365c5a035 100644 --- a/src/mongo/db/repl/hello_response.h +++ b/src/mongo/db/repl/hello_response.h @@ -29,14 +29,23 @@ #pragma once +#include +#include +#include #include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/oid.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/optime_with.h" #include "mongo/rpc/topology_version_gen.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/repl/idempotency_document_structure.cpp b/src/mongo/db/repl/idempotency_document_structure.cpp index 4d132a77d3ac2..b02624304e106 100644 --- a/src/mongo/db/repl/idempotency_document_structure.cpp +++ b/src/mongo/db/repl/idempotency_document_structure.cpp @@ -27,12 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/idempotency_document_structure.h" +#include +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/repl/idempotency_document_structure.h" namespace mongo { diff --git a/src/mongo/db/repl/idempotency_document_structure.h b/src/mongo/db/repl/idempotency_document_structure.h index 9eaa99de4c7d6..58e73b56319ea 100644 --- a/src/mongo/db/repl/idempotency_document_structure.h +++ b/src/mongo/db/repl/idempotency_document_structure.h @@ -33,7 +33,9 @@ #include #include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/repl/idempotency_scalar_generator.h" namespace mongo { diff --git a/src/mongo/db/repl/idempotency_document_structure_test.cpp b/src/mongo/db/repl/idempotency_document_structure_test.cpp index 64bc10c0043b0..0b8931678ead1 100644 --- a/src/mongo/db/repl/idempotency_document_structure_test.cpp +++ b/src/mongo/db/repl/idempotency_document_structure_test.cpp @@ -27,14 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include #include +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" -#include "mongo/db/jsobj.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/repl/idempotency_document_structure.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/repl/idempotency_scalar_generator.cpp b/src/mongo/db/repl/idempotency_scalar_generator.cpp index 17c13c6a5a34e..4542a9c72a90f 100644 --- a/src/mongo/db/repl/idempotency_scalar_generator.cpp +++ b/src/mongo/db/repl/idempotency_scalar_generator.cpp @@ -27,15 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/repl/idempotency_scalar_generator.h" - -#include - -#include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/repl/idempotency_scalar_generator.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/repl/idempotency_test.cpp b/src/mongo/db/repl/idempotency_test.cpp index a4a0d52cce776..3842d650bc762 100644 --- a/src/mongo/db/repl/idempotency_test.cpp +++ b/src/mongo/db/repl/idempotency_test.cpp @@ -28,23 +28,48 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include -#include "mongo/db/catalog/index_catalog.h" +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/query/index_bounds.h" -#include "mongo/db/query/internal_plans.h" -#include "mongo/db/query/plan_executor.h" -#include "mongo/db/repl/idempotency_document_structure.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/idempotency_test_fixture.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/update/document_diff_calculator.h" #include "mongo/db/update/document_diff_test_helpers.h" #include "mongo/db/update/update_oplog_entry_serialization.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/random.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -213,9 +238,9 @@ void RandomizedIdempotencyTest::runUpdateV2IdempotencyTestCase() { // state would look like and compute diffs based on that. generatedDoc = generateDocWithId(kDocId); auto diffOutput = doc_diff::computeOplogDiff( - oldDoc, *generatedDoc, update_oplog_entry::kSizeOfDeltaOplogEntryMetadata, nullptr); + oldDoc, *generatedDoc, update_oplog_entry::kSizeOfDeltaOplogEntryMetadata); ASSERT(diffOutput); - oplogDiff = BSON("$v" << 2 << "diff" << diffOutput->diff); + oplogDiff = BSON("$v" << 2 << "diff" << *diffOutput); auto op = update(kDocId, oplogDiff); ASSERT_OK(runOpInitialSync(op)); if (generatedDoc) { diff --git a/src/mongo/db/repl/idempotency_test_fixture.cpp b/src/mongo/db/repl/idempotency_test_fixture.cpp index e14db65a6ca80..c365376581053 100644 --- a/src/mongo/db/repl/idempotency_test_fixture.cpp +++ b/src/mongo/db/repl/idempotency_test_fixture.cpp @@ -27,39 +27,44 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/idempotency_test_fixture.h" - +#include +#include +#include +#include #include #include #include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_validation.h" -#include "mongo/db/catalog/database.h" -#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/client.h" +#include "mongo/db/catalog/validate_results.h" #include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/curop.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/internal_plans.h" -#include "mongo/db/repl/bgsync.h" -#include "mongo/db/repl/drop_pending_collection_reaper.h" -#include "mongo/db/repl/oplog.h" -#include "mongo/db/repl/oplog_applier.h" -#include "mongo/db/repl/oplog_applier_impl.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/repl/idempotency_test_fixture.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/oplog_entry_test_helpers.h" -#include "mongo/db/repl/oplog_interface_local.h" -#include "mongo/db/repl/replication_consistency_markers_mock.h" -#include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/md5.h" #include "mongo/util/md5.hpp" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { @@ -360,7 +365,7 @@ std::vector IdempotencyTest::validateAllCollections() { auto dbNames = catalog->getAllDbNames(); for (auto& dbName : dbNames) { // Skip local database. - if (dbName.db() != "local") { + if (!dbName.isLocalDB()) { std::vector collectionNames; { Lock::DBLock lk(_opCtx.get(), dbName, MODE_S); @@ -459,7 +464,7 @@ template OplogEntry IdempotencyTest::update(char const* _id, const BSONObj makeInsertApplyOpsEntry(const NamespaceString& nss, const UUID& uuid, const BSONObj& doc) { return BSON("op" << "i" - << "ns" << nss.toString() << "ui" << uuid << "o" << doc); + << "ns" << nss.toString_forTest() << "ui" << uuid << "o" << doc); } } // namespace repl } // namespace mongo diff --git a/src/mongo/db/repl/idempotency_test_fixture.h b/src/mongo/db/repl/idempotency_test_fixture.h index 9412aac9aa93f..408679727dac1 100644 --- a/src/mongo/db/repl/idempotency_test_fixture.h +++ b/src/mongo/db/repl/idempotency_test_fixture.h @@ -29,21 +29,30 @@ #pragma once +#include +#include #include #include #include +#include +#include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator_interface.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/oplog_applier_impl_test_fixture.h" #include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/optime.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/tenant_id.h" #include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/repl/initial_sync_base_cloner.cpp b/src/mongo/db/repl/initial_sync_base_cloner.cpp index 6f6888fe9ce0d..15a8dc5f53304 100644 --- a/src/mongo/db/repl/initial_sync_base_cloner.cpp +++ b/src/mongo/db/repl/initial_sync_base_cloner.cpp @@ -28,12 +28,37 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/initial_sync_base_cloner.h" +#include "mongo/db/repl/repl_sync_shared_data.h" #include "mongo/db/repl/replication_consistency_markers_gen.h" #include "mongo/db/repl/replication_consistency_markers_impl.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplicationInitialSync @@ -145,8 +170,7 @@ Status InitialSyncBaseCloner::checkInitialSyncIdIsUnchanged() { Status InitialSyncBaseCloner::checkRollBackIdIsUnchanged() { BSONObj info; try { - getClient()->runCommand( - DatabaseName(boost::none, "admin"), BSON("replSetGetRBID" << 1), info); + getClient()->runCommand(DatabaseName::kAdmin, BSON("replSetGetRBID" << 1), info); } catch (DBException& e) { if (ErrorCodes::isRetriableError(e)) { static constexpr char errorMsg[] = diff --git a/src/mongo/db/repl/initial_sync_base_cloner.h b/src/mongo/db/repl/initial_sync_base_cloner.h index dc6bb9f931781..1ceb3b5975209 100644 --- a/src/mongo/db/repl/initial_sync_base_cloner.h +++ b/src/mongo/db/repl/initial_sync_base_cloner.h @@ -29,9 +29,18 @@ #pragma once +#include + #include "mongo/base/checked_cast.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/client/dbclient_connection.h" #include "mongo/db/repl/base_cloner.h" #include "mongo/db/repl/initial_sync_shared_data.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/initial_sync_cloner_test_fixture.cpp b/src/mongo/db/repl/initial_sync_cloner_test_fixture.cpp index 443f5460b125c..9e2ddf5fc26b7 100644 --- a/src/mongo/db/repl/initial_sync_cloner_test_fixture.cpp +++ b/src/mongo/db/repl/initial_sync_cloner_test_fixture.cpp @@ -27,11 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include #include "mongo/base/checked_cast.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/initial_sync_cloner_test_fixture.h" +#include "mongo/db/repl/repl_sync_shared_data.h" #include "mongo/db/repl/replication_consistency_markers_impl.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" +#include "mongo/util/duration.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/initial_sync_cloner_test_fixture.h b/src/mongo/db/repl/initial_sync_cloner_test_fixture.h index 07730b21aa160..0c89f72dc931f 100644 --- a/src/mongo/db/repl/initial_sync_cloner_test_fixture.h +++ b/src/mongo/db/repl/initial_sync_cloner_test_fixture.h @@ -31,6 +31,7 @@ #include "mongo/db/repl/cloner_test_fixture.h" #include "mongo/db/repl/initial_sync_shared_data.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/initial_sync_shared_data.cpp b/src/mongo/db/repl/initial_sync_shared_data.cpp index d133b4bbd667d..26b0e94c2422a 100644 --- a/src/mongo/db/repl/initial_sync_shared_data.cpp +++ b/src/mongo/db/repl/initial_sync_shared_data.cpp @@ -27,9 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include #include "mongo/db/repl/initial_sync_shared_data.h" +#include "mongo/util/assert_util_core.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/initial_sync_shared_data.h b/src/mongo/db/repl/initial_sync_shared_data.h index e8d6a9d17223f..26ee67deb44e6 100644 --- a/src/mongo/db/repl/initial_sync_shared_data.h +++ b/src/mongo/db/repl/initial_sync_shared_data.h @@ -29,10 +29,16 @@ #pragma once +#include #include #include "mongo/db/repl/repl_sync_shared_data.h" #include "mongo/db/server_options.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/initial_sync_shared_data_test.cpp b/src/mongo/db/repl/initial_sync_shared_data_test.cpp index e0d19048ac0f1..87035e59cb8f7 100644 --- a/src/mongo/db/repl/initial_sync_shared_data_test.cpp +++ b/src/mongo/db/repl/initial_sync_shared_data_test.cpp @@ -27,10 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/repl/initial_sync_shared_data.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/clock_source_mock.h" namespace mongo { diff --git a/src/mongo/db/repl/initial_syncer.cpp b/src/mongo/db/repl/initial_syncer.cpp index cd4bba6c98675..b43f7b8f2182b 100644 --- a/src/mongo/db/repl/initial_syncer.cpp +++ b/src/mongo/db/repl/initial_syncer.cpp @@ -28,53 +28,75 @@ */ -#include "mongo/platform/basic.h" - -#include "initial_syncer.h" - -#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include #include +#include #include -#include "mongo/base/counter.h" +#include "initial_syncer.h" + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/fetcher.h" +#include "mongo/client/read_preference.h" #include "mongo/client/remote_command_retry_scheduler.h" +#include "mongo/db/client.h" #include "mongo/db/commands/server_status_metric.h" -#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/feature_compatibility_version_parser.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/all_database_cloner.h" +#include "mongo/db/repl/collection_cloner.h" +#include "mongo/db/repl/database_cloner.h" #include "mongo/db/repl/initial_sync_state.h" #include "mongo/db/repl/initial_syncer_common_stats.h" #include "mongo/db/repl/initial_syncer_factory.h" -#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_batcher.h" #include "mongo/db/repl/oplog_buffer.h" #include "mongo/db/repl/oplog_fetcher.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_consistency_markers.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/sync_source_selector.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/transaction_oplog_application.h" +#include "mongo/db/server_options.h" #include "mongo/db/serverless/serverless_operation_lock_registry.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" -#include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" #include "mongo/util/destructor_guard.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" #include "mongo/util/scopeguard.h" #include "mongo/util/str.h" -#include "mongo/util/system_clock_source.h" #include "mongo/util/time_support.h" #include "mongo/util/timer.h" #include "mongo/util/version/releases.h" @@ -284,7 +306,7 @@ Status InitialSyncer::startup(OperationContext* opCtx, _clonerAttemptExec = std::make_unique( _clonerExec, Status(ErrorCodes::CallbackCanceled, "Initial Sync Attempt Canceled")); auto status = _scheduleWorkAndSaveHandle_inlock( - [=](const executor::TaskExecutor::CallbackArgs& args) { + [=, this](const executor::TaskExecutor::CallbackArgs& args) { _startInitialSyncAttemptCallback(args, initialSyncAttempt, initialSyncMaxAttempts); }, &_startInitialSyncAttemptHandle, @@ -577,7 +599,9 @@ void InitialSyncer::_tearDown_inlock(OperationContext* opCtx, const bool orderedCommit = true; _storage->oplogDiskLocRegister(opCtx, initialDataTimestamp, orderedCommit); - tenant_migration_access_blocker::recoverTenantMigrationAccessBlockers(opCtx); + if (ReplicationCoordinator::get(opCtx)->getSettings().isServerless()) { + tenant_migration_access_blocker::recoverTenantMigrationAccessBlockers(opCtx); + } ServerlessOperationLockRegistry::recoverLocks(opCtx); reconstructPreparedTransactions(opCtx, repl::OplogApplication::Mode::kInitialSync); @@ -681,7 +705,7 @@ void InitialSyncer::_startInitialSyncAttemptCallback( // _scheduleWorkAndSaveHandle_inlock() is shutdown-aware. status = _scheduleWorkAndSaveHandle_inlock( - [=](const executor::TaskExecutor::CallbackArgs& args) { + [=, this](const executor::TaskExecutor::CallbackArgs& args) { _chooseSyncSourceCallback( args, chooseSyncSourceAttempt, chooseSyncSourceMaxAttempts, onCompletionGuard); }, @@ -745,7 +769,7 @@ void InitialSyncer::_chooseSyncSourceCallback( "numInitialSyncConnectAttempts"_attr = numInitialSyncConnectAttempts.load()); auto status = _scheduleWorkAtAndSaveHandle_inlock( when, - [=](const executor::TaskExecutor::CallbackArgs& args) { + [=, this](const executor::TaskExecutor::CallbackArgs& args) { _chooseSyncSourceCallback(args, chooseSyncSourceAttempt + 1, chooseSyncSourceMaxAttempts, @@ -786,7 +810,7 @@ void InitialSyncer::_chooseSyncSourceCallback( // Schedule rollback ID checker. _rollbackChecker = std::make_unique(*_attemptExec, _syncSource); - auto scheduleResult = _rollbackChecker->reset([=](const RollbackChecker::Result& result) { + auto scheduleResult = _rollbackChecker->reset([=, this](const RollbackChecker::Result& result) { return _rollbackCheckerResetCallback(result, onCompletionGuard); }); status = scheduleResult.getStatus(); @@ -868,9 +892,9 @@ void InitialSyncer::_rollbackCheckerResetCallback( // which retries up to 'numInitialSyncOplogFindAttempts' times'. This will fail relatively // quickly in the presence of network errors, allowing us to choose a different sync source. status = _scheduleLastOplogEntryFetcher_inlock( - [=](const StatusWith& response, - mongo::Fetcher::NextAction*, - mongo::BSONObjBuilder*) mutable { + [=, this](const StatusWith& response, + mongo::Fetcher::NextAction*, + mongo::BSONObjBuilder*) mutable { _lastOplogEntryFetcherCallbackForDefaultBeginFetchingOpTime(response, onCompletionGuard); }, @@ -947,9 +971,9 @@ Status InitialSyncer::_scheduleGetBeginFetchingOpTime_inlock( _syncSource, NamespaceString::kSessionTransactionsTableNamespace.db().toString(), cmd.obj(), - [=](const StatusWith& response, - mongo::Fetcher::NextAction*, - mongo::BSONObjBuilder*) mutable { + [=, this](const StatusWith& response, + mongo::Fetcher::NextAction*, + mongo::BSONObjBuilder*) mutable { _getBeginFetchingOpTimeCallback( response, onCompletionGuard, defaultBeginFetchingOpTime); }, @@ -1019,9 +1043,9 @@ void InitialSyncer::_getBeginFetchingOpTimeCallback( // which retries up to 'numInitialSyncOplogFindAttempts' times'. This will fail relatively // quickly in the presence of network errors, allowing us to choose a different sync source. status = _scheduleLastOplogEntryFetcher_inlock( - [=](const StatusWith& response, - mongo::Fetcher::NextAction*, - mongo::BSONObjBuilder*) mutable { + [=, this](const StatusWith& response, + mongo::Fetcher::NextAction*, + mongo::BSONObjBuilder*) mutable { _lastOplogEntryFetcherCallbackForBeginApplyingTimestamp( response, onCompletionGuard, beginFetchingOpTime); }, @@ -1075,9 +1099,9 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForBeginApplyingTimestamp( _syncSource, NamespaceString::kServerConfigurationNamespace.db().toString(), queryBob.obj(), - [=](const StatusWith& response, - mongo::Fetcher::NextAction*, - mongo::BSONObjBuilder*) mutable { + [=, this](const StatusWith& response, + mongo::Fetcher::NextAction*, + mongo::BSONObjBuilder*) mutable { _fcvFetcherCallback(response, onCompletionGuard, lastOpTime, beginFetchingOpTime); }, ReadPreferenceSetting::secondaryPreferredMetadata(), @@ -1227,12 +1251,12 @@ void InitialSyncer::_fcvFetcherCallback(const StatusWith std::make_unique( _sharedData.get(), _opts.oplogFetcherMaxFetcherRestarts), _dataReplicatorExternalState.get(), - [=](OplogFetcher::Documents::const_iterator first, - OplogFetcher::Documents::const_iterator last, - const OplogFetcher::DocumentsInfo& info) { + [=, this](OplogFetcher::Documents::const_iterator first, + OplogFetcher::Documents::const_iterator last, + const OplogFetcher::DocumentsInfo& info) { return _enqueueDocuments(first, last, info); }, - [=](const Status& s, int rbid) { _oplogFetcherCallback(s, onCompletionGuard); }, + [=, this](const Status& s, int rbid) { _oplogFetcherCallback(s, onCompletionGuard); }, std::move(oplogFetcherConfig)); LOGV2_DEBUG(21178, @@ -1385,9 +1409,9 @@ void InitialSyncer::_allDatabaseClonerCallback( // strategy used when retrieving collection data, and avoids retrieving all the data and then // throwing it away due to a transient network outage. status = _scheduleLastOplogEntryFetcher_inlock( - [=](const StatusWith& status, - mongo::Fetcher::NextAction*, - mongo::BSONObjBuilder*) { + [=, this](const StatusWith& status, + mongo::Fetcher::NextAction*, + mongo::BSONObjBuilder*) { _lastOplogEntryFetcherCallbackForStopTimestamp(status, onCompletionGuard); }, kInitialSyncerHandlesRetries); @@ -1409,31 +1433,31 @@ void InitialSyncer::_lastOplogEntryFetcherCallbackForStopTimestamp( if (_shouldRetryError(lock, status)) { auto scheduleStatus = (*_attemptExec) - ->scheduleWork( - [this, onCompletionGuard](executor::TaskExecutor::CallbackArgs args) { - // It is not valid to schedule the retry from within this callback, - // hence we schedule a lambda to schedule the retry. - stdx::lock_guard lock(_mutex); - // Since the stopTimestamp is retrieved after we have done all the - // work of retrieving collection data, we handle retries within this - // class by retrying for - // 'initialSyncTransientErrorRetryPeriodSeconds' (default 24 hours). - // This is the same retry strategy used when retrieving collection - // data, and avoids retrieving all the data and then throwing it - // away due to a transient network outage. - auto status = _scheduleLastOplogEntryFetcher_inlock( - [=](const StatusWith& status, - mongo::Fetcher::NextAction*, - mongo::BSONObjBuilder*) { - _lastOplogEntryFetcherCallbackForStopTimestamp( - status, onCompletionGuard); - }, - kInitialSyncerHandlesRetries); - if (!status.isOK()) { - onCompletionGuard->setResultAndCancelRemainingWork_inlock( - lock, status); - } - }); + ->scheduleWork([this, onCompletionGuard]( + executor::TaskExecutor::CallbackArgs args) { + // It is not valid to schedule the retry from within this callback, + // hence we schedule a lambda to schedule the retry. + stdx::lock_guard lock(_mutex); + // Since the stopTimestamp is retrieved after we have done all the + // work of retrieving collection data, we handle retries within this + // class by retrying for + // 'initialSyncTransientErrorRetryPeriodSeconds' (default 24 hours). + // This is the same retry strategy used when retrieving collection + // data, and avoids retrieving all the data and then throwing it + // away due to a transient network outage. + auto status = _scheduleLastOplogEntryFetcher_inlock( + [=, this](const StatusWith& status, + mongo::Fetcher::NextAction*, + mongo::BSONObjBuilder*) { + _lastOplogEntryFetcherCallbackForStopTimestamp( + status, onCompletionGuard); + }, + kInitialSyncerHandlesRetries); + if (!status.isOK()) { + onCompletionGuard->setResultAndCancelRemainingWork_inlock(lock, + status); + } + }); if (scheduleStatus.isOK()) return; // If scheduling failed, we're shutting down and cannot retry. @@ -1569,7 +1593,7 @@ void InitialSyncer::_getNextApplierBatchCallback( Date_t lastAppliedWall = ops.back().getWallClockTime(); auto numApplied = ops.size(); - MultiApplier::CallbackFn onCompletionFn = [=](const Status& s) { + MultiApplier::CallbackFn onCompletionFn = [=, this](const Status& s) { return _multiApplierCallback( s, {lastApplied, lastAppliedWall}, numApplied, onCompletionGuard); }; @@ -1611,7 +1635,9 @@ void InitialSyncer::_getNextApplierBatchCallback( auto when = (*_attemptExec)->now() + _opts.getApplierBatchCallbackRetryWait; status = _scheduleWorkAtAndSaveHandle_inlock( when, - [=](const CallbackArgs& args) { _getNextApplierBatchCallback(args, onCompletionGuard); }, + [=, this](const CallbackArgs& args) { + _getNextApplierBatchCallback(args, onCompletionGuard); + }, &_getNextApplierBatchHandle, "_getNextApplierBatchCallback"); if (!status.isOK()) { @@ -1722,8 +1748,10 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWithscheduleWork( - [=](const mongo::executor::TaskExecutor::CallbackArgs&) { _finishCallback(result); }); + auto scheduleResult = + _exec->scheduleWork([=, this](const mongo::executor::TaskExecutor::CallbackArgs&) { + _finishCallback(result); + }); if (!scheduleResult.isOK()) { LOGV2_WARNING(21197, "Unable to schedule initial syncer completion task due to " @@ -1811,7 +1839,7 @@ void InitialSyncer::_finishInitialSyncAttempt(const StatusWithnow() + _opts.initialSyncRetryWait; auto status = _scheduleWorkAtAndSaveHandle_inlock( when, - [=](const executor::TaskExecutor::CallbackArgs& args) { + [=, this](const executor::TaskExecutor::CallbackArgs& args) { _startInitialSyncAttemptCallback( args, _stats.failedInitialSyncAttempts, _stats.maxFailedInitialSyncAttempts); }, @@ -1987,7 +2015,7 @@ void InitialSyncer::_checkApplierProgressAndScheduleGetNextApplierBatch_inlock( // Get another batch to apply. // _scheduleWorkAndSaveHandle_inlock() is shutdown-aware. auto status = _scheduleWorkAndSaveHandle_inlock( - [=](const executor::TaskExecutor::CallbackArgs& args) { + [=, this](const executor::TaskExecutor::CallbackArgs& args) { return _getNextApplierBatchCallback(args, onCompletionGuard); }, &_getNextApplierBatchHandle, @@ -2013,7 +2041,7 @@ void InitialSyncer::_scheduleRollbackCheckerCheckForRollback_inlock( } auto scheduleResult = - _rollbackChecker->checkForRollback([=](const RollbackChecker::Result& result) { + _rollbackChecker->checkForRollback([=, this](const RollbackChecker::Result& result) { _rollbackCheckerCheckForRollbackCallback(result, onCompletionGuard); }); diff --git a/src/mongo/db/repl/initial_syncer.h b/src/mongo/db/repl/initial_syncer.h index 3a124b6b02d77..3f7105565dad1 100644 --- a/src/mongo/db/repl/initial_syncer.h +++ b/src/mongo/db/repl/initial_syncer.h @@ -30,17 +30,25 @@ #pragma once +#include +#include #include #include #include #include +#include +#include +#include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_connection.h" #include "mongo/client/fetcher.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/callback_completion_guard.h" #include "mongo/db/repl/data_replicator_external_state.h" #include "mongo/db/repl/initial_sync_shared_data.h" @@ -48,17 +56,25 @@ #include "mongo/db/repl/multiapplier.h" #include "mongo/db/repl/oplog_applier.h" #include "mongo/db/repl/oplog_buffer.h" +#include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/oplog_fetcher.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/rollback_checker.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/sync_source_selector.h" #include "mongo/dbtests/mock/mock_dbclient_connection.h" #include "mongo/executor/scoped_task_executor.h" +#include "mongo/executor/task_executor.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/initial_syncer_common_stats.cpp b/src/mongo/db/repl/initial_syncer_common_stats.cpp index cd5c943f56fcb..03e494c7e711a 100644 --- a/src/mongo/db/repl/initial_syncer_common_stats.cpp +++ b/src/mongo/db/repl/initial_syncer_common_stats.cpp @@ -28,8 +28,14 @@ */ #include "mongo/db/repl/initial_syncer_common_stats.h" + +#include + #include "mongo/db/commands/server_status_metric.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplicationInitialSync diff --git a/src/mongo/db/repl/initial_syncer_common_stats.h b/src/mongo/db/repl/initial_syncer_common_stats.h index 4a0fd858f5022..d5ed3561930b5 100644 --- a/src/mongo/db/repl/initial_syncer_common_stats.h +++ b/src/mongo/db/repl/initial_syncer_common_stats.h @@ -29,6 +29,8 @@ #pragma once +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/repl/optime.h" diff --git a/src/mongo/db/repl/initial_syncer_factory.cpp b/src/mongo/db/repl/initial_syncer_factory.cpp index 3327ad666b756..448941a316783 100644 --- a/src/mongo/db/repl/initial_syncer_factory.cpp +++ b/src/mongo/db/repl/initial_syncer_factory.cpp @@ -29,6 +29,18 @@ #include "mongo/db/repl/initial_syncer_factory.h" +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" + namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/initial_syncer_factory.h b/src/mongo/db/repl/initial_syncer_factory.h index 42d725925ae53..dd87843065c31 100644 --- a/src/mongo/db/repl/initial_syncer_factory.h +++ b/src/mongo/db/repl/initial_syncer_factory.h @@ -30,12 +30,16 @@ #pragma once #include +#include +#include #include +#include "mongo/base/status_with.h" #include "mongo/db/repl/data_replicator_external_state.h" #include "mongo/db/repl/initial_syncer_interface.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context.h" #include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/string_map.h" diff --git a/src/mongo/db/repl/initial_syncer_test.cpp b/src/mongo/db/repl/initial_syncer_test.cpp index df015013477de..11b5483e83ad3 100644 --- a/src/mongo/db/repl/initial_syncer_test.cpp +++ b/src/mongo/db/repl/initial_syncer_test.cpp @@ -28,58 +28,95 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include +#include +#include #include #include - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/client.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/feature_compatibility_version_document_gen.h" -#include "mongo/db/feature_compatibility_version_parser.h" +#include "mongo/db/index_builds_coordinator.h" #include "mongo/db/index_builds_coordinator_mongod.h" -#include "mongo/db/json.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/query/getmore_command_gen.h" #include "mongo/db/repl/collection_cloner.h" #include "mongo/db/repl/data_replicator_external_state_mock.h" #include "mongo/db/repl/initial_syncer.h" -#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/oplog_fetcher.h" #include "mongo/db/repl/oplog_fetcher_mock.h" #include "mongo/db/repl/optime.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/db/repl/replication_consistency_markers_impl.h" #include "mongo/db/repl/replication_consistency_markers_mock.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/replication_recovery_mock.h" -#include "mongo/db/repl/reporter.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" -#include "mongo/db/repl/sync_source_resolver.h" #include "mongo/db/repl/sync_source_selector.h" #include "mongo/db/repl/sync_source_selector_mock.h" #include "mongo/db/repl/task_executor_mock.h" -#include "mongo/db/repl/update_position_args.h" +#include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_mock.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/mock/mock_dbclient_connection.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" #include "mongo/executor/mock_network_fixture.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_test_fixture.h" +#include "mongo/executor/thread_pool_mock.h" +#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/mutex.h" +#include "mongo/rpc/metadata/oplog_query_metadata.h" +#include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" -#include "mongo/util/concurrency/thread_name.h" #include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/fail_point.h" #include "mongo/util/scopeguard.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" #include "mongo/util/version/releases.h" -#include "mongo/logv2/log.h" -#include "mongo/unittest/barrier.h" -#include "mongo/unittest/unittest.h" - #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -330,7 +367,7 @@ class InitialSyncerTest : public executor::ThreadPoolExecutorTest, _storageInterface->dropCollFn = [this](OperationContext* opCtx, const NamespaceString& nss) { LockGuard lock(_storageInterfaceWorkDoneMutex); - _storageInterfaceWorkDone.droppedCollections.push_back(nss.ns()); + _storageInterfaceWorkDone.droppedCollections.push_back(nss.toString_forTest()); return Status::OK(); }; _storageInterface->dropUserDBsFn = [this](OperationContext* opCtx) { @@ -361,6 +398,11 @@ class InitialSyncerTest : public executor::ThreadPoolExecutorTest, }; auto* service = getGlobalServiceContext(); + + auto replSettings = createServerlessReplSettings(); + auto replCoord = std::make_unique(service, replSettings); + repl::ReplicationCoordinator::set(service, std::move(replCoord)); + service->setFastClockSource(std::make_unique()); service->setPreciseClockSource(std::make_unique()); ThreadPool::Options dbThreadPoolOptions; @@ -602,7 +644,7 @@ RemoteCommandResponse makeCursorResponse(CursorId cursorId, { BSONObjBuilder cursorBob(bob.subobjStart("cursor")); cursorBob.append("id", cursorId); - cursorBob.append("ns", nss.toString()); + cursorBob.append("ns", nss.toString_forTest()); { BSONArrayBuilder batchBob( cursorBob.subarrayStart(isFirstBatch ? "firstBatch" : "nextBatch")); @@ -2987,7 +3029,7 @@ TEST_F( const NamespaceStringOrUUID& nsOrUUID, const TimestampedBSONObj& doc, long long term) { - insertDocumentNss = *nsOrUUID.nss(); + insertDocumentNss = nsOrUUID.nss(); insertDocumentDoc = doc; insertDocumentTerm = term; return Status(ErrorCodes::OperationFailed, "failed to insert oplog entry"); @@ -3056,7 +3098,7 @@ TEST_F( const NamespaceStringOrUUID& nsOrUUID, const TimestampedBSONObj& doc, long long term) { - insertDocumentNss = *nsOrUUID.nss(); + insertDocumentNss = nsOrUUID.nss(); insertDocumentDoc = doc; insertDocumentTerm = term; initialSyncer->shutdown().transitional_ignore(); @@ -3601,10 +3643,10 @@ TEST_F(InitialSyncerTest, LastOpTimeShouldBeSetEvenIfNoOperationsAreAppliedAfter // sync source. We must do this setup before responding to the FCV, to avoid a race. NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.a"); _mockServer->setCommandReply("listDatabases", - makeListDatabasesResponse({nss.db().toString()})); + makeListDatabasesResponse({nss.db_forTest().toString()})); // Set up data for "a" - _mockServer->assignCollectionUuid(nss.ns(), *_options1.uuid); + _mockServer->assignCollectionUuid(nss.ns_forTest(), *_options1.uuid); _mockServer->insert(nss, BSON("_id" << 1 << "a" << 1)); // listCollections for "a" @@ -3634,7 +3676,7 @@ TEST_F(InitialSyncerTest, LastOpTimeShouldBeSetEvenIfNoOperationsAreAppliedAfter NamespaceString(nss.getCommandNS()), {BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name" << "_id_" - << "ns" << nss.ns())}) + << "ns" << nss.ns_forTest())}) .data); { @@ -4291,11 +4333,11 @@ TEST_F(InitialSyncerTest, // sync source. We must do this setup before responding to the FCV, to avoid a race. NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.a"); _mockServer->setCommandReply("listDatabases", - makeListDatabasesResponse({nss.db().toString()})); + makeListDatabasesResponse({nss.db_forTest().toString()})); // Set up data for "a" - _mockServer->assignCollectionUuid(nss.ns(), *_options1.uuid); + _mockServer->assignCollectionUuid(nss.ns_forTest(), *_options1.uuid); _mockServer->insert(nss, BSON("_id" << 1 << "a" << 1)); // listCollections for "a" @@ -4325,7 +4367,7 @@ TEST_F(InitialSyncerTest, NamespaceString(nss.getCommandNS()), {BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name" << "_id_" - << "ns" << nss.ns())}) + << "ns" << nss.ns_forTest())}) .data); { @@ -4460,7 +4502,9 @@ TEST_F(InitialSyncerTest, TestRemainingInitialSyncEstimatedMillisMetric) { globalFailPointRegistry().find("initialSyncHangDuringCollectionClone"); // Hang after all docs have been cloned in collection 'a.a'. auto timesEntered = hangDuringCloningFailPoint->setMode( - FailPoint::alwaysOn, 0, BSON("namespace" << nss.ns() << "numDocsToClone" << numDocs)); + FailPoint::alwaysOn, + 0, + BSON("namespace" << nss.ns_forTest() << "numDocsToClone" << numDocs)); { // Keep the cloner from finishing so end-of-clone-stage network events don't interfere. @@ -4496,13 +4540,13 @@ TEST_F(InitialSyncerTest, TestRemainingInitialSyncEstimatedMillisMetric) { // We do not populate database 'b' with data as we don't actually complete initial sync in // this test. _mockServer->setCommandReply("listDatabases", - makeListDatabasesResponse({nss.db().toString(), "b"})); + makeListDatabasesResponse({nss.db_forTest().toString(), "b"})); // The AllDatabaseCloner post stage calls dbStats to record initial sync progress // metrics. This will be used to calculate both the data size of "a" and "b". _mockServer->setCommandReply("dbStats", BSON("dataSize" << dbSize)); // Set up data for "a" - _mockServer->assignCollectionUuid(nss.ns(), *_options1.uuid); + _mockServer->assignCollectionUuid(nss.ns_forTest(), *_options1.uuid); for (int i = 1; i <= 5; ++i) { _mockServer->insert(nss, BSON("_id" << i << "a" << i)); } @@ -4535,7 +4579,7 @@ TEST_F(InitialSyncerTest, TestRemainingInitialSyncEstimatedMillisMetric) { NamespaceString(nss.getCommandNS()), {BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name" << "_id_" - << "ns" << nss.ns())}) + << "ns" << nss.ns_forTest())}) .data); // Release the 'hangBeforeCloningFailPoint' to continue the cloning phase. } @@ -4743,12 +4787,12 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressReturnsCorrectProgress) { // listDatabases: a NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.a"); _mockServer->setCommandReply("listDatabases", - makeListDatabasesResponse({nss.db().toString()})); + makeListDatabasesResponse({nss.db_forTest().toString()})); // The AllDatabaseCloner post stage calls dbStats to record initial sync progress metrics. _mockServer->setCommandReply("dbStats", BSON("dataSize" << 10)); // Set up data for "a" - _mockServer->assignCollectionUuid(nss.ns(), *_options1.uuid); + _mockServer->assignCollectionUuid(nss.ns_forTest(), *_options1.uuid); for (int i = 1; i <= 5; ++i) { _mockServer->insert(nss, BSON("_id" << i << "a" << i)); } @@ -4781,7 +4825,7 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressReturnsCorrectProgress) { NamespaceString(nss.getCommandNS()), {BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name" << "_id_" - << "ns" << nss.ns())}) + << "ns" << nss.ns_forTest())}) .data); // Play all but last of the successful round of responses. @@ -5120,7 +5164,7 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressOmitsClonerStatsIfClonerStatsExc // listDatabases NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.a"); _mockServer->setCommandReply("listDatabases", - makeListDatabasesResponse({nss.db().toString()})); + makeListDatabasesResponse({nss.db_forTest().toString()})); // listCollections for "a" // listCollections data has to be broken up or it will trigger BSONObjTooLarge @@ -5162,7 +5206,7 @@ TEST_F(InitialSyncerTest, GetInitialSyncProgressOmitsClonerStatsIfClonerStatsExc NamespaceString(nss.getCommandNS()), {BSON("v" << OplogEntry::kOplogVersion << "key" << BSON("_id" << 1) << "name" << "_id_" - << "ns" << nss.ns())}) + << "ns" << nss.ns_forTest())}) .data); // Feature Compatibility Version. diff --git a/src/mongo/db/repl/insert_group.cpp b/src/mongo/db/repl/insert_group.cpp index cd4af934c178a..23eb9e9a51676 100644 --- a/src/mongo/db/repl/insert_group.cpp +++ b/src/mongo/db/repl/insert_group.cpp @@ -28,18 +28,25 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/insert_group.h" - #include +#include +#include #include +#include -#include "mongo/bson/bsonobjbuilder.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/ops/write_ops.h" -#include "mongo/db/repl/oplog_applier_impl.h" +#include "mongo/db/repl/insert_group.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/insert_group.h b/src/mongo/db/repl/insert_group.h index e9116279bffa3..4482c58502aad 100644 --- a/src/mongo/db/repl/insert_group.h +++ b/src/mongo/db/repl/insert_group.h @@ -30,9 +30,16 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/multiapplier.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_applier.h" +#include "mongo/db/repl/oplog_entry_or_grouped_inserts.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/isself.cpp b/src/mongo/db/repl/isself.cpp index cc35bcacf4be5..6c47a4338aa69 100644 --- a/src/mongo/db/repl/isself.cpp +++ b/src/mongo/db/repl/isself.cpp @@ -28,25 +28,39 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/isself.h" - -#include - -#include "mongo/base/init.h" -#include "mongo/bson/util/builder.h" +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/client/authenticate.h" #include "mongo/client/dbclient_connection.h" -#include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/commands.h" +#include "mongo/client/internal_auth.h" +#include "mongo/db/database_name.h" +#include "mongo/db/repl/isself.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/errno_util.h" +#include "mongo/util/fail_point.h" #include "mongo/util/net/cidr.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/net/socket_utils.h" +#include "mongo/util/net/ssl_options.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" #if defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__) || defined(__sun) || \ defined(__OpenBSD__) @@ -57,18 +71,18 @@ #error isself needs to be implemented for this platform #endif +#ifndef _WIN32 +#include +#include +#endif #ifdef FASTPATH_UNIX #include #include -#ifdef __FreeBSD__ -#include -#endif - #elif defined(_WIN32) #include -#include +#include // IWYU pragma: keep #include #include #include @@ -267,11 +281,11 @@ bool isSelfSlowPath(const HostAndPort& hostAndPort, double timeoutSeconds = static_cast(durationCount(timeout)) / 1000.0; conn.setSoTimeout(timeoutSeconds); - // We need to avoid the isMaster call triggered by a normal connect, which would - // cause a deadlock. 'isSelf' is called by the Replication Coordinator when validating - // a replica set configuration document, but the 'isMaster' command requires a lock on the - // replication coordinator to execute. As such we call we call 'connectSocketOnly', which - // does not call 'isMaster'. + // We need to avoid the "hello" call triggered by a normal connect, which would cause a + // deadlock. 'isSelf' is called by the Replication Coordinator when validating a replica set + // configuration document, but the "hello" command requires a lock on the replication + // coordinator to execute. As such we call we call 'connectSocketOnly', which does not call + // "hello". auto connectSocketResult = conn.connectSocketOnly(hostAndPort, boost::none); if (!connectSocketResult.isOK()) { LOGV2(4834700, @@ -293,7 +307,7 @@ bool isSelfSlowPath(const HostAndPort& hostAndPort, } } BSONObj out; - bool ok = conn.runCommand(DatabaseName(boost::none, "admin"), BSON("_isSelf" << 1), out); + bool ok = conn.runCommand(DatabaseName::kAdmin, BSON("_isSelf" << 1), out); bool me = ok && out["id"].type() == jstOID && instanceId == out["id"].OID(); return me; diff --git a/src/mongo/db/repl/isself_test.cpp b/src/mongo/db/repl/isself_test.cpp index a9317d75adb35..2ca942a0338c1 100644 --- a/src/mongo/db/repl/isself_test.cpp +++ b/src/mongo/db/repl/isself_test.cpp @@ -27,13 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/string_data.h" #include "mongo/db/repl/isself.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/fail_point.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/net/socket_utils.h" #include "mongo/util/scopeguard.h" diff --git a/src/mongo/db/repl/last_vote.cpp b/src/mongo/db/repl/last_vote.cpp index 6a2a905d616c8..5e1747dca5525 100644 --- a/src/mongo/db/repl/last_vote.cpp +++ b/src/mongo/db/repl/last_vote.cpp @@ -29,9 +29,13 @@ #include "mongo/db/repl/last_vote.h" -#include "mongo/bson/util/bson_check.h" +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/last_vote.h b/src/mongo/db/repl/last_vote.h index 3bb4075579d76..5beccf34c2100 100644 --- a/src/mongo/db/repl/last_vote.h +++ b/src/mongo/db/repl/last_vote.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/base/error_extra_info.h" #include "mongo/base/status_with.h" namespace mongo { diff --git a/src/mongo/db/repl/member_config.cpp b/src/mongo/db/repl/member_config.cpp index 107e5ade678b2..5c4dbfe050d54 100644 --- a/src/mongo/db/repl/member_config.cpp +++ b/src/mongo/db/repl/member_config.cpp @@ -27,16 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/repl/member_config.h" - -#include - -#include "mongo/bson/util/bson_check.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/repl/member_config.h b/src/mongo/db/repl/member_config.h index e68c24b6f2d9e..aa48f62669426 100644 --- a/src/mongo/db/repl/member_config.h +++ b/src/mongo/db/repl/member_config.h @@ -29,14 +29,22 @@ #pragma once +#include +#include +#include +#include #include #include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/repl/member_config_gen.h" #include "mongo/db/repl/member_id.h" #include "mongo/db/repl/repl_set_tag.h" #include "mongo/db/repl/split_horizon.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/string_map.h" #include "mongo/util/time_support.h" @@ -115,7 +123,7 @@ class MemberConfig : private MemberConfigBase { } /** - * Gets the horizon name for which the parameters (captured during the first `isMaster`) + * Gets the horizon name for which the parameters (captured during the first `hello`) * correspond. */ StringData determineHorizon(const SplitHorizon::Parameters& params) const { @@ -194,7 +202,7 @@ class MemberConfig : private MemberConfigBase { } /** - * Returns true if this member is hidden (not reported by isMaster, not electable). + * Returns true if this member is hidden (not reported by "hello", not electable). */ bool isHidden() const { return getHidden(); diff --git a/src/mongo/db/repl/member_config_test.cpp b/src/mongo/db/repl/member_config_test.cpp index 9095392304565..7305fde439b14 100644 --- a/src/mongo/db/repl/member_config_test.cpp +++ b/src/mongo/db/repl/member_config_test.cpp @@ -27,14 +27,29 @@ * it in the license file. */ +#include #include -#include - +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" #include "mongo/db/repl/member_config.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/repl/member_data.cpp b/src/mongo/db/repl/member_data.cpp index 59fa6f057aa5b..e5b4880f3bcac 100644 --- a/src/mongo/db/repl/member_data.cpp +++ b/src/mongo/db/repl/member_data.cpp @@ -28,12 +28,17 @@ */ -#include "mongo/platform/basic.h" +#include -#include +#include +#include "mongo/base/string_data.h" #include "mongo/db/repl/member_data.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/member_data.h b/src/mongo/db/repl/member_data.h index 1b9bb68edb789..291cdfbb1f5eb 100644 --- a/src/mongo/db/repl/member_data.h +++ b/src/mongo/db/repl/member_data.h @@ -29,10 +29,15 @@ #pragma once +#include + #include "mongo/bson/timestamp.h" #include "mongo/db/repl/member_id.h" #include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_set_heartbeat_response.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" namespace mongo { @@ -129,11 +134,6 @@ class MemberData { bool up() const { return _health > 0; } - // Was this member up for the last hearbeeat - // (or we haven't received the first heartbeat yet) - bool maybeUp() const { - return _health != 0; - } OpTime getLastAppliedOpTime() const { return _lastAppliedOpTime; diff --git a/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp b/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp index 12093c4cbcdfd..a39fdcd990468 100644 --- a/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp +++ b/src/mongo/db/repl/mock_repl_coord_server_fixture.cpp @@ -28,23 +28,37 @@ */ #include "mongo/db/repl/mock_repl_coord_server_fixture.h" + +#include + +#include "mongo/client/connection_string.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/curop.h" -#include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_consistency_markers_mock.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/replication_recovery_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/storage/snapshot_manager.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/db/repl/mock_repl_coord_server_fixture.h b/src/mongo/db/repl/mock_repl_coord_server_fixture.h index 6fa3f07685183..99ca41a232f82 100644 --- a/src/mongo/db/repl/mock_repl_coord_server_fixture.h +++ b/src/mongo/db/repl/mock_repl_coord_server_fixture.h @@ -27,6 +27,8 @@ * it in the license file. */ +#include + #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" diff --git a/src/mongo/db/repl/multiapplier.cpp b/src/mongo/db/repl/multiapplier.cpp index e4c9a20079fc1..cd6c12f1c35d3 100644 --- a/src/mongo/db/repl/multiapplier.cpp +++ b/src/mongo/db/repl/multiapplier.cpp @@ -27,15 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/multiapplier.h" - +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include #include +#include "mongo/base/error_codes.h" #include "mongo/db/client.h" -#include "mongo/db/operation_context.h" +#include "mongo/db/repl/multiapplier.h" #include "mongo/db/repl/optime.h" +#include "mongo/util/assert_util.h" #include "mongo/util/destructor_guard.h" namespace mongo { @@ -84,7 +88,7 @@ Status MultiApplier::startup() noexcept { } auto scheduleResult = _executor->scheduleWork( - [=](const executor::TaskExecutor::CallbackArgs& cbd) { return _callback(cbd); }); + [=, this](const executor::TaskExecutor::CallbackArgs& cbd) { return _callback(cbd); }); if (!scheduleResult.isOK()) { _state = State::kComplete; return scheduleResult.getStatus(); diff --git a/src/mongo/db/repl/multiapplier.h b/src/mongo/db/repl/multiapplier.h index bd77a7a8ba8de..d1238c27ab6a2 100644 --- a/src/mongo/db/repl/multiapplier.h +++ b/src/mongo/db/repl/multiapplier.h @@ -29,6 +29,7 @@ #pragma once +#include #include #include #include @@ -40,11 +41,14 @@ #include "mongo/base/status_with.h" #include "mongo/db/jsobj.h" #include "mongo/db/multi_key_path_tracker.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/service_context.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/functional.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/multiapplier_test.cpp b/src/mongo/db/repl/multiapplier_test.cpp index 41ebf999c91c4..9f27a983baa74 100644 --- a/src/mongo/db/repl/multiapplier_test.cpp +++ b/src/mongo/db/repl/multiapplier_test.cpp @@ -27,15 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/client.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/multiapplier.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/thread_pool_mock.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace { diff --git a/src/mongo/db/repl/noop_writer.cpp b/src/mongo/db/repl/noop_writer.cpp index 4bf8188010bd0..edef24a7e4ca2 100644 --- a/src/mongo/db/repl/noop_writer.cpp +++ b/src/mongo/db/repl/noop_writer.cpp @@ -28,22 +28,41 @@ */ -#include "mongo/platform/basic.h" - +#include +// IWYU pragma: no_include "cxxabi.h" #include - -#include "mongo/db/commands.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/curop.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/noop_writer.h" -#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" #include "mongo/util/concurrency/idle_thread_block.h" #include "mongo/util/testing_proctor.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -83,6 +102,13 @@ class NoopWriter::PeriodicNoopRunner { private: void run(Seconds waitTime, NoopWriteFn noopWrite) { Client::initThread("NoopWriter"); + + // TODO(SERVER-74656): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + while (true) { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; @@ -162,7 +188,7 @@ void NoopWriter::_writeNoop(OperationContext* opCtx) { auto replCoord = ReplicationCoordinator::get(opCtx); // Its a proxy for being a primary - if (!replCoord->canAcceptWritesForDatabase(opCtx, "admin")) { + if (!replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin)) { LOGV2_DEBUG(21220, 1, "Not a primary, skipping the noop write"); return; } @@ -188,13 +214,12 @@ void NoopWriter::_writeNoop(OperationContext* opCtx) { "Writing noop to oplog as there has been no writes to this replica set " "within write interval", "writeInterval"_attr = _writeInterval); - writeConflictRetry( - opCtx, "writeNoop", NamespaceString::kRsOplogNamespace.ns(), [&opCtx] { - WriteUnitOfWork uow(opCtx); - opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage(opCtx, - kMsgObj); - uow.commit(); - }); + writeConflictRetry(opCtx, "writeNoop", NamespaceString::kRsOplogNamespace, [&opCtx] { + WriteUnitOfWork uow(opCtx); + opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage(opCtx, + kMsgObj); + uow.commit(); + }); } } diff --git a/src/mongo/db/repl/noop_writer.h b/src/mongo/db/repl/noop_writer.h index 999bc889a1d56..c1d28022568a5 100644 --- a/src/mongo/db/repl/noop_writer.h +++ b/src/mongo/db/repl/noop_writer.h @@ -30,9 +30,12 @@ #pragma once #include +#include +#include "mongo/base/status.h" #include "mongo/db/repl/optime.h" #include "mongo/platform/mutex.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/repl/oplog.cpp b/src/mongo/db/repl/oplog.cpp index d9da5a256e660..d08e3744531c6 100644 --- a/src/mongo/db/repl/oplog.cpp +++ b/src/mongo/db/repl/oplog.cpp @@ -29,97 +29,141 @@ #include "mongo/db/repl/oplog.h" -#include -#include +#include +#include +#include +#include +#include +#include +#include #include #include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/authorization_manager.h" -#include "mongo/db/auth/privilege.h" #include "mongo/db/catalog/capped_collection_maintenance.h" #include "mongo/db/catalog/capped_utils.h" #include "mongo/db/catalog/coll_mod.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/drop_collection.h" #include "mongo/db/catalog/drop_database.h" #include "mongo/db/catalog/drop_indexes.h" +#include "mongo/db/catalog/health_log_gen.h" #include "mongo/db/catalog/health_log_interface.h" #include "mongo/db/catalog/import_collection_oplog_entry_gen.h" +#include "mongo/db/catalog/index_build_oplog_entry.h" +#include "mongo/db/catalog/index_builds_manager.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/local_oplog_info.h" -#include "mongo/db/catalog/multi_index_block.h" #include "mongo/db/catalog/rename_collection.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/change_stream_change_collection_manager.h" #include "mongo/db/change_stream_pre_images_collection_manager.h" #include "mongo/db/change_stream_serverless_helpers.h" #include "mongo/db/client.h" #include "mongo/db/coll_mod_gen.h" #include "mongo/db/commands.h" +#include "mongo/db/commands/create_gen.h" +#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/global_index.h" -#include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_util.h" #include "mongo/db/ops/delete.h" #include "mongo/db/ops/delete_request_gen.h" #include "mongo/db/ops/update.h" +#include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/update_result.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/pipeline/change_stream_preimage_gen.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/apply_ops.h" -#include "mongo/db/repl/bgsync.h" #include "mongo/db/repl/dbcheck.h" #include "mongo/db/repl/image_collection_entry_gen.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_decoration.h" #include "mongo/db/repl/timestamp_block.h" #include "mongo/db/repl/transaction_oplog_application.h" -#include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/resumable_index_builds_gen.h" #include "mongo/db/s/sharding_index_catalog_ddl_util.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/shard_role.h" #include "mongo/db/stats/counters.h" #include "mongo/db/stats/server_write_concern_metrics.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_options.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/platform/random.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/type_index_catalog.h" -#include "mongo/scripting/engine.h" -#include "mongo/util/concurrency/idle_thread_block.h" -#include "mongo/util/elapsed_tracker.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" #include "mongo/util/file.h" #include "mongo/util/namespace_string_util.h" +#include "mongo/util/processinfo.h" +#include "mongo/util/serialization_context.h" #include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication namespace mongo { -using std::endl; -using std::string; -using std::stringstream; -using std::unique_ptr; -using std::vector; using namespace std::string_literals; using IndexVersion = IndexDescriptor::IndexVersion; @@ -127,11 +171,6 @@ using IndexVersion = IndexDescriptor::IndexVersion; namespace repl { namespace { -using namespace fmt::literals; - -MONGO_FAIL_POINT_DEFINE(addDestinedRecipient); -MONGO_FAIL_POINT_DEFINE(sleepBetweenInsertOpTimeGenerationAndLogOp); - // Failpoint to block after a write and its oplog entry have been written to the storage engine and // are visible, but before we have advanced 'lastApplied' for the write. MONGO_FAIL_POINT_DEFINE(hangBeforeLogOpAdvancesLastApplied); @@ -248,7 +287,8 @@ void createIndexForApplyOps(OperationContext* opCtx, auto indexCollection = CollectionPtr( db ? CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, indexNss) : nullptr); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Failed to create index due to missing collection: " << indexNss.ns(), + str::stream() << "Failed to create index due to missing collection: " + << indexNss.toStringForErrorMsg(), indexCollection); OpCounters* opCounters = opCtx->writesAreReplicated() ? &globalOpCounters : &replOpCounters; @@ -264,12 +304,12 @@ void createIndexForApplyOps(OperationContext* opCtx, if (OplogApplication::Mode::kInitialSync == mode) { auto normalSpecs = indexBuildsCoordinator->normalizeIndexSpecs(opCtx, indexCollection, {indexSpec}); - invariant(1U == normalSpecs.size(), - str::stream() << "Unexpected result from normalizeIndexSpecs - ns: " << indexNss - << "; uuid: " << indexCollection->uuid() - << "; original index spec: " << indexSpec - << "; normalized index specs: " - << BSON("normalSpecs" << normalSpecs)); + invariant( + 1U == normalSpecs.size(), + str::stream() << "Unexpected result from normalizeIndexSpecs - ns: " + << indexNss.toStringForErrorMsg() << "; uuid: " << indexCollection->uuid() + << "; original index spec: " << indexSpec + << "; normalized index specs: " << BSON("normalSpecs" << normalSpecs)); auto indexCatalog = indexCollection->getIndexCatalog(); auto prepareSpecResult = indexCatalog->prepareSpecForCreate(opCtx, indexCollection, normalSpecs[0], {}); @@ -287,8 +327,8 @@ void createIndexForApplyOps(OperationContext* opCtx, // the index build constraints to kRelax. invariant(ReplicationCoordinator::get(opCtx)->shouldRelaxIndexConstraints(opCtx, indexNss), str::stream() << "Unexpected result from shouldRelaxIndexConstraints - ns: " - << indexNss << "; uuid: " << indexCollection->uuid() - << "; original index spec: " << indexSpec); + << indexNss.toStringForErrorMsg() << "; uuid: " + << indexCollection->uuid() << "; original index spec: " << indexSpec); const auto constraints = IndexBuildsManager::IndexConstraints::kRelax; // Run single-phase builds synchronously with oplog batch application. For tenant migrations, @@ -351,7 +391,13 @@ void writeToImageCollection(OperationContext* opCtx, // stronger lock acquisition is taken on this namespace is during step up to create the // collection. AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(opCtx->lockState()); - AutoGetCollection autoColl(opCtx, NamespaceString::kConfigImagesNamespace, LockMode::MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString::kConfigImagesNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); repl::ImageEntry imageEntry; imageEntry.set_id(sessionId); imageEntry.setTxnNumber(txnNum); @@ -376,7 +422,7 @@ void writeToImageCollection(OperationContext* opCtx, request.setFromOplogApplication(true); try { // This code path can also be hit by things such as `applyOps` and tenant migrations. - ::mongo::update(opCtx, autoColl.getDb(), request); + ::mongo::update(opCtx, collection, request); } catch (const ExceptionFor&) { // We can get a duplicate key when two upserts race on inserting a document. *upsertConfigImage = false; @@ -402,26 +448,19 @@ void writeToImageCollection(OperationContext* opCtx, */ -/* - * records - a vector of oplog records to be written. - * timestamps - a vector of respective Timestamp objects for each oplog record. - * oplogCollection - collection to be written to. - * finalOpTime - the OpTime of the last oplog record. - * wallTime - the wall clock time of the last oplog record. - */ -void _logOpsInner(OperationContext* opCtx, - const NamespaceString& nss, - std::vector* records, - const std::vector& timestamps, - const CollectionPtr& oplogCollection, - OpTime finalOpTime, - Date_t wallTime, - bool isAbortIndexBuild) { +void logOplogRecords(OperationContext* opCtx, + const NamespaceString& nss, + std::vector* records, + const std::vector& timestamps, + const CollectionPtr& oplogCollection, + OpTime finalOpTime, + Date_t wallTime, + bool isAbortIndexBuild) { auto replCoord = ReplicationCoordinator::get(opCtx); if (replCoord->getReplicationMode() == ReplicationCoordinator::modeReplSet && !replCoord->canAcceptWritesFor(opCtx, nss)) { str::stream ss; - ss << "logOp() but can't accept write to collection " << nss; + ss << "logOp() but can't accept write to collection " << nss.toStringForErrorMsg(); ss << ": entries: " << records->size() << ": [ "; for (const auto& record : *records) { ss << "(" << record.id << ", " << redact(record.data.toBson()) << ") "; @@ -475,8 +514,14 @@ void _logOpsInner(OperationContext* opCtx, hangBeforeLogOpAdvancesLastApplied.pauseWhileSet(opCtx); } + // As an optimization, we skip advancing the global timestamp. In this path on the + // primary, the caller will have already advanced the clock to at least this value when + // allocating the timestamp. + const bool advanceGlobalTimestamp = false; + // Optimes on the primary should always represent consistent database states. - replCoord->setMyLastAppliedOpTimeAndWallTimeForward({finalOpTime, wallTime}); + replCoord->setMyLastAppliedOpTimeAndWallTimeForward({finalOpTime, wallTime}, + advanceGlobalTimestamp); // We set the last op on the client to 'finalOpTime', because that contains the // timestamp of the operation that the client actually performed. @@ -502,17 +547,23 @@ OpTime logOp(OperationContext* opCtx, MutableOplogEntry* oplogEntry) { if (replCoord->isOplogDisabledFor(opCtx, oplogEntry->getNss())) { uassert(ErrorCodes::IllegalOperation, str::stream() << "retryable writes is not supported for unreplicated ns: " - << oplogEntry->getNss().ns(), + << oplogEntry->getNss().toStringForErrorMsg(), oplogEntry->getStatementIds().empty()); return {}; } // If this oplog entry is from a tenant migration, include the tenant migration - // UUID. - const auto& recipientInfo = tenantMigrationInfo(opCtx); - if (recipientInfo) { + // UUID and optional donor timeline metadata. + if (const auto& recipientInfo = tenantMigrationInfo(opCtx)) { oplogEntry->setFromTenantMigration(recipientInfo->uuid); + if (oplogEntry->getTid() && + change_stream_serverless_helpers::isChangeStreamEnabled(opCtx, *oplogEntry->getTid()) && + recipientInfo->donorOplogEntryData) { + oplogEntry->setDonorOpTime(recipientInfo->donorOplogEntryData->donorOpTime); + oplogEntry->setDonorApplyOpsIndex(recipientInfo->donorOplogEntryData->applyOpsIndex); + } } + // TODO SERVER-51301 to remove this block. if (oplogEntry->getOpType() == repl::OpTypeEnum::kNoop) { opCtx->recoveryUnit()->ignoreAllMultiTimestampConstraints(); @@ -553,132 +604,18 @@ OpTime logOp(OperationContext* opCtx, MutableOplogEntry* oplogEntry) { std::vector timestamps{slot.getTimestamp()}; const auto isAbortIndexBuild = oplogEntry->getOpType() == OpTypeEnum::kCommand && parseCommandType(oplogEntry->getObject()) == OplogEntry::CommandType::kAbortIndexBuild; - _logOpsInner(opCtx, - oplogEntry->getNss(), - &records, - timestamps, - CollectionPtr(oplog), - slot, - wallClockTime, - isAbortIndexBuild); + logOplogRecords(opCtx, + oplogEntry->getNss(), + &records, + timestamps, + CollectionPtr(oplog), + slot, + wallClockTime, + isAbortIndexBuild); wuow.commit(); return slot; } -std::vector logInsertOps( - OperationContext* opCtx, - MutableOplogEntry* oplogEntryTemplate, - std::vector::const_iterator begin, - std::vector::const_iterator end, - const std::vector& fromMigrate, - std::function(const BSONObj& doc)> getDestinedRecipientFn, - const CollectionPtr& collectionPtr) { - invariant(begin != end); - - auto nss = oplogEntryTemplate->getNss(); - auto replCoord = ReplicationCoordinator::get(opCtx); - if (replCoord->isOplogDisabledFor(opCtx, nss)) { - invariant(!begin->stmtIds.empty()); - uassert(ErrorCodes::IllegalOperation, - str::stream() << "retryable writes is not supported for unreplicated ns: " - << nss.ns(), - begin->stmtIds.front() == kUninitializedStmtId); - return {}; - } - - // The number of entries in 'fromMigrate' should be consistent with the number of insert - // operations in [begin, end). Also, 'fromMigrate' is a sharding concept, so there is no - // need to check 'fromMigrate' for inserts that are not replicated. - invariant(std::distance(fromMigrate.begin(), fromMigrate.end()) == std::distance(begin, end), - oplogEntryTemplate->toReplOperation().toBSON().toString()); - - // If this oplog entry is from a tenant migration, include the tenant migration - // UUID. - const auto& recipientInfo = tenantMigrationInfo(opCtx); - if (recipientInfo) { - oplogEntryTemplate->setFromTenantMigration(recipientInfo->uuid); - } - - const size_t count = end - begin; - - // Use OplogAccessMode::kLogOp to avoid recursive locking. - AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kLogOp); - auto oplogInfo = oplogWrite.getOplogInfo(); - - WriteUnitOfWork wuow(opCtx); - - std::vector opTimes(count); - std::vector timestamps(count); - std::vector bsonOplogEntries(count); - std::vector records(count); - for (size_t i = 0; i < count; i++) { - // Make a copy from the template for each insert oplog entry. - MutableOplogEntry oplogEntry = *oplogEntryTemplate; - // Make a mutable copy. - auto insertStatementOplogSlot = begin[i].oplogSlot; - // Fetch optime now, if not already fetched. - if (insertStatementOplogSlot.isNull()) { - insertStatementOplogSlot = oplogInfo->getNextOpTimes(opCtx, 1U)[0]; - } - const auto docKey = getDocumentKey(opCtx, collectionPtr, begin[i].doc).getShardKeyAndId(); - oplogEntry.setObject(begin[i].doc); - oplogEntry.setObject2(docKey); - oplogEntry.setOpTime(insertStatementOplogSlot); - oplogEntry.setDestinedRecipient(getDestinedRecipientFn(begin[i].doc)); - addDestinedRecipient.execute([&](const BSONObj& data) { - auto recipient = data["destinedRecipient"].String(); - oplogEntry.setDestinedRecipient(boost::make_optional({recipient})); - }); - - OplogLink oplogLink; - if (i > 0) - oplogLink.prevOpTime = opTimes[i - 1]; - - oplogEntry.setFromMigrateIfTrue(fromMigrate[i]); - - appendOplogEntryChainInfo(opCtx, &oplogEntry, &oplogLink, begin[i].stmtIds); - - opTimes[i] = insertStatementOplogSlot; - timestamps[i] = insertStatementOplogSlot.getTimestamp(); - bsonOplogEntries[i] = oplogEntry.toBSON(); - // The storage engine will assign the RecordId based on the "ts" field of the oplog entry, - // see record_id_helpers::extractKey. - records[i] = Record{ - RecordId(), RecordData(bsonOplogEntries[i].objdata(), bsonOplogEntries[i].objsize())}; - } - - sleepBetweenInsertOpTimeGenerationAndLogOp.execute([&](const BSONObj& data) { - auto numMillis = data["waitForMillis"].numberInt(); - LOGV2(21244, - "Sleeping for {sleepMillis}ms after receiving {numOpTimesReceived} optimes from " - "{firstOpTime} to " - "{lastOpTime}", - "Sleeping due to sleepBetweenInsertOpTimeGenerationAndLogOp failpoint", - "sleepMillis"_attr = numMillis, - "numOpTimesReceived"_attr = count, - "firstOpTime"_attr = opTimes.front(), - "lastOpTime"_attr = opTimes.back()); - sleepmillis(numMillis); - }); - - invariant(!opTimes.empty()); - auto lastOpTime = opTimes.back(); - invariant(!lastOpTime.isNull()); - const Collection* oplog = oplogInfo->getCollection(); - auto wallClockTime = oplogEntryTemplate->getWallClockTime(); - const bool isAbortIndexBuild = false; - _logOpsInner(opCtx, - nss, - &records, - timestamps, - CollectionPtr(oplog), - lastOpTime, - wallClockTime, - isAbortIndexBuild); - wuow.commit(); - return opTimes; -} - void appendOplogEntryChainInfo(OperationContext* opCtx, MutableOplogEntry* oplogEntry, OplogLink* oplogLink, @@ -834,7 +771,7 @@ void createOplog(OperationContext* opCtx, options.cappedSize = sz; options.autoIndexId = CollectionOptions::NO; - writeConflictRetry(opCtx, "createCollection", oplogCollectionName.ns(), [&] { + writeConflictRetry(opCtx, "createCollection", oplogCollectionName, [&] { WriteUnitOfWork uow(opCtx); invariant(ctx.db()->createCollection(opCtx, oplogCollectionName, options)); acquireOplogCollectionForLogging(opCtx); @@ -948,6 +885,20 @@ const StringMap kOpsMap = { // complete. const bool allowRenameOutOfTheWay = (mode != repl::OplogApplication::Mode::kSecondary); + // Check whether there is an open but empty database where the name conflicts with the new + // collection's database name. It is possible for a secondary's in-memory database state + // to diverge from the primary's, if the primary rolls back the dropDatabase oplog entry + // after closing its own in-memory database state. In this case, the primary may accept + // creating a new database with a conflicting name to what the secondary still has open. + // It is okay to simply close the empty database on the secondary in this case. + if (auto duplicate = + DatabaseHolder::get(opCtx)->getNameWithConflictingCasing(nss.dbName())) { + if (CollectionCatalog::get(opCtx)->getAllCollectionUUIDsFromDb(*duplicate).size() == + 0) { + fassert(7727801, dropDatabaseForApplyOps(opCtx, *duplicate).isOK()); + } + } + Lock::DBLock dbLock(opCtx, nss.dbName(), MODE_IX); if (auto idIndexElem = cmd["idIndex"]) { // Remove "idIndex" field from command. @@ -1093,10 +1044,12 @@ const StringMap kOpsMap = { const auto& cmd = entry.getObject(); auto opMsg = OpMsgRequestBuilder::create(entry.getNss().dbName(), cmd); - auto collModCmd = CollMod::parse(IDLParserContext("collModOplogEntry", - false /* apiStrict */, - entry.getNss().tenantId()), - opMsg.body); + auto collModCmd = + CollMod::parse(IDLParserContext("collModOplogEntry", + false /* apiStrict */, + entry.getNss().tenantId(), + SerializationContext::stateStorageRequest()), + opMsg.body); const auto nssOrUUID([&collModCmd, &entry, mode]() -> NamespaceStringOrUUID { // Oplog entries from secondary oplog application will allways have the Uuid set and // it is only invocations of applyOps directly that may omit it @@ -1200,8 +1153,10 @@ const StringMap kOpsMap = { -> Status { const auto& entry = *op; auto importEntry = mongo::ImportCollectionOplogEntry::parse( - IDLParserContext( - "importCollectionOplogEntry", false /* apiStrict */, entry.getNss().tenantId()), + IDLParserContext("importCollectionOplogEntry", + false /* apiStrict */, + entry.getNss().tenantId(), + SerializationContext::stateStorageRequest()), entry.getObject()); applyImportCollection(opCtx, importEntry.getImportUUID(), @@ -1342,18 +1297,34 @@ const StringMap kOpsMap = { }; // Writes a change stream pre-image 'preImage' associated with oplog entry 'oplogEntry' and a write -// operation to collection 'collection' with "applyOpsIndex" 0. +// operation to collection 'collection'. If we are writing the pre-image during oplog application +// on a secondary for a serverless tenant migration, we will use the timestamp and applyOpsIndex +// from the donor timeline. If we are applying this entry on a primary during tenant oplog +// application, we skip writing of the pre-image. The op observer will handle inserting the +// correct pre-image on the primary in this case. void writeChangeStreamPreImage(OperationContext* opCtx, const CollectionPtr& collection, const mongo::repl::OplogEntry& oplogEntry, const BSONObj& preImage) { - ChangeStreamPreImageId preImageId{collection->uuid(), - oplogEntry.getTimestampForPreImage(), - static_cast(oplogEntry.getApplyOpsIndex())}; + Timestamp timestamp; + int64_t applyOpsIndex; + // If donorOpTime is set on the oplog entry, this is a write that is being applied on a + // secondary during the oplog catchup phase of a tenant migration. Otherwise, we are either + // applying a steady state write operation on a secondary or applying a write on the primary + // during tenant migration oplog catchup. + if (const auto& donorOpTime = oplogEntry.getDonorOpTime()) { + timestamp = donorOpTime->getTimestamp(); + applyOpsIndex = oplogEntry.getDonorApplyOpsIndex().get_value_or(0); + } else { + timestamp = oplogEntry.getTimestampForPreImage(); + applyOpsIndex = oplogEntry.getApplyOpsIndex(); + } + + ChangeStreamPreImageId preImageId{collection->uuid(), timestamp, applyOpsIndex}; ChangeStreamPreImage preImageDocument{ std::move(preImageId), oplogEntry.getWallClockTimeForPreImage(), preImage}; - ChangeStreamPreImagesCollectionManager::insertPreImage( + ChangeStreamPreImagesCollectionManager::get(opCtx).insertPreImage( opCtx, oplogEntry.getTid(), preImageDocument); } } // namespace @@ -1400,18 +1371,31 @@ StatusWith OplogApplication::parseMode(const std::string } void OplogApplication::checkOnOplogFailureForRecovery(OperationContext* opCtx, + const mongo::NamespaceString& nss, const mongo::BSONObj& oplogEntry, const std::string& errorMsg) { const bool isReplicaSet = repl::ReplicationCoordinator::get(opCtx->getServiceContext())->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet; - // Relax the constraints of oplog application if the node is not a replica set member. - if (!isReplicaSet) { + // Relax the constraints of oplog application if the node is not a replica set member or the + // node is in the middle of a backup and restore process. + if (!isReplicaSet || storageGlobalParams.restore) { return; } - // Only fassert in test environment. - if (getTestCommandsEnabled()) { + // During the recovery process, certain configuration collections such as + // 'config.image_collections' are handled differently, which may result in encountering oplog + // application failures in common scenarios, and therefore assert statements are not used. + if (nss.isConfigDB()) { + LOGV2_DEBUG( + 5415002, + 1, + "Error applying operation while recovering from stable checkpoint. This is related to " + "one of the configuration collections so this error might be benign.", + "oplogEntry"_attr = oplogEntry, + "error"_attr = errorMsg); + } else if (getTestCommandsEnabled()) { + // Only fassert in test environment. LOGV2_FATAL(5415000, "Error applying operation while recovering from stable " "checkpoint. This can lead to data corruption.", @@ -1459,7 +1443,7 @@ void logOplogConstraintViolation(OperationContext* opCtx, // @return failure status if an update should have happened and the document DNE. // See replset initial sync code. Status applyOperation_inlock(OperationContext* opCtx, - Database* db, + CollectionAcquisition& collectionAcquisition, const OplogEntryOrGroupedInserts& opOrGroupedInserts, bool alwaysUpsert, OplogApplication::Mode mode, @@ -1493,14 +1477,14 @@ Status applyOperation_inlock(OperationContext* opCtx, const bool inStableRecovery = mode == OplogApplication::Mode::kStableRecovering; NamespaceString requestNss; - CollectionPtr collection; if (auto uuid = op.getUuid()) { auto catalog = CollectionCatalog::get(opCtx); - collection = CollectionPtr(catalog->lookupCollectionByUUID(opCtx, uuid.value())); + const auto collection = CollectionPtr(catalog->lookupCollectionByUUID(opCtx, uuid.value())); if (!collection && inStableRecovery) { repl::OplogApplication::checkOnOplogFailureForRecovery( opCtx, - redact(opOrGroupedInserts.toBSON()), + op.getNss(), + redact(op.toBSONForLogging()), str::stream() << "(NamespaceNotFound): Failed to apply operation due to missing collection (" << uuid.value() << ")"); @@ -1515,7 +1499,7 @@ Status applyOperation_inlock(OperationContext* opCtx, "mode should be in initialSync or recovering", mode == OplogApplication::Mode::kInitialSync || OplogApplication::inRecovering(mode)); - writeConflictRetry(opCtx, "applyOps_imageInvalidation", op.getNss().toString(), [&] { + writeConflictRetry(opCtx, "applyOps_imageInvalidation", op.getNss(), [&] { WriteUnitOfWork wuow(opCtx); bool upsertConfigImage = true; writeToImageCollection(opCtx, @@ -1534,16 +1518,17 @@ Status applyOperation_inlock(OperationContext* opCtx, << uuid.value() << "): " << redact(opOrGroupedInserts.toBSON()), collection); requestNss = collection->ns(); + dassert(requestNss == collectionAcquisition.nss()); dassert(opCtx->lockState()->isCollectionLockedForMode(requestNss, MODE_IX)); } else { requestNss = op.getNss(); invariant(requestNss.coll().size()); dassert(opCtx->lockState()->isCollectionLockedForMode(requestNss, MODE_IX), - requestNss.ns()); - collection = CollectionPtr( - CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, requestNss)); + requestNss.toStringForErrorMsg()); } + const CollectionPtr& collection = collectionAcquisition.getCollectionPtr(); + assertInitialSyncCanContinueDuringShardMerge(opCtx, requestNss, op); BSONObj o = op.getObject(); @@ -1569,7 +1554,7 @@ Status applyOperation_inlock(OperationContext* opCtx, const IndexCatalog* indexCatalog = !collection ? nullptr : collection->getIndexCatalog(); const bool haveWrappingWriteUnitOfWork = opCtx->lockState()->inAWriteUnitOfWork(); uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "applyOps not supported on view: " << requestNss.ns(), + str::stream() << "applyOps not supported on view: " << requestNss.toStringForErrorMsg(), collection || !CollectionCatalog::get(opCtx)->lookupView(opCtx, requestNss)); // Decide whether to timestamp the write with the 'ts' field found in the operation. In general, @@ -1620,6 +1605,15 @@ Status applyOperation_inlock(OperationContext* opCtx, !requestNss.isTemporaryReshardingCollection(); }; + + // We are applying this entry on the primary during tenant oplog application. Decorate the opCtx + // with donor timeline metadata so that it will be available in the op observer and available + // for use here when oplog entries are logged. + if (auto& recipientInfo = tenantMigrationInfo(opCtx)) { + recipientInfo->donorOplogEntryData = + DonorOplogEntryData(op.getOpTime(), op.getApplyOpsIndex()); + } + switch (opType) { case OpTypeEnum::kInsert: { uassert(ErrorCodes::NamespaceNotFound, @@ -1772,7 +1766,7 @@ Status applyOperation_inlock(OperationContext* opCtx, } } else if (inStableRecovery) { repl::OplogApplication::checkOnOplogFailureForRecovery( - opCtx, redact(op.toBSONForLogging()), redact(status)); + opCtx, op.getNss(), redact(op.toBSONForLogging()), redact(status)); } // Continue to the next block to retry the operation as an upsert. needToDoUpsert = true; @@ -1797,15 +1791,14 @@ Status applyOperation_inlock(OperationContext* opCtx, request.setUpsert(); request.setFromOplogApplication(true); - const StringData ns = op.getNss().ns(); - writeConflictRetry(opCtx, "applyOps_upsert", ns, [&] { + writeConflictRetry(opCtx, "applyOps_upsert", op.getNss(), [&] { WriteUnitOfWork wuow(opCtx); // If `haveWrappingWriteUnitOfWork` is true, do not timestamp the write. if (assignOperationTimestamp && timestamp != Timestamp::min()) { uassertStatusOK(opCtx->recoveryUnit()->setTimestamp(timestamp)); } - UpdateResult res = update(opCtx, db, request); + UpdateResult res = update(opCtx, collectionAcquisition, request); if (res.numMatched == 0 && res.upsertedId.isEmpty()) { LOGV2_ERROR(21257, "No document was updated even though we got a DuplicateKey " @@ -1883,7 +1876,6 @@ Status applyOperation_inlock(OperationContext* opCtx, timestamp = op.getTimestamp(); } - const StringData ns = op.getNss().ns(); // Operations that were part of a retryable findAndModify have two formats for // replicating pre/post images. The classic format has primaries writing explicit noop // oplog entries that contain the necessary details for reconstructed a response to a @@ -1914,7 +1906,7 @@ Status applyOperation_inlock(OperationContext* opCtx, // to insert a document. We only have to make sure we didn't race with an insert that // won, but with an earlier `ts`. bool upsertConfigImage = true; - auto status = writeConflictRetry(opCtx, "applyOps_update", ns, [&] { + auto status = writeConflictRetry(opCtx, "applyOps_update", op.getNss(), [&] { WriteUnitOfWork wuow(opCtx); if (timestamp != Timestamp::min()) { uassertStatusOK(opCtx->recoveryUnit()->setTimestamp(timestamp)); @@ -1932,7 +1924,7 @@ Status applyOperation_inlock(OperationContext* opCtx, invariant(documentFound); } - UpdateResult ur = update(opCtx, db, request); + UpdateResult ur = update(opCtx, collectionAcquisition, request); if (ur.numMatched == 0 && ur.upsertedId.isEmpty()) { if (collection && collection->isCapped() && mode == OplogApplication::Mode::kSecondary) { @@ -2037,7 +2029,7 @@ Status applyOperation_inlock(OperationContext* opCtx, if (!status.isOK()) { if (inStableRecovery) { repl::OplogApplication::checkOnOplogFailureForRecovery( - opCtx, redact(op.toBSONForLogging()), redact(status)); + opCtx, op.getNss(), redact(op.toBSONForLogging()), redact(status)); } return status; } @@ -2073,9 +2065,8 @@ Status applyOperation_inlock(OperationContext* opCtx, // Determine if a change stream pre-image has to be recorded for the oplog entry. const bool recordChangeStreamPreImage = shouldRecordChangeStreamPreImage(); - const StringData ns = op.getNss().ns(); bool upsertConfigImage = true; - writeConflictRetry(opCtx, "applyOps_delete", ns, [&] { + writeConflictRetry(opCtx, "applyOps_delete", op.getNss(), [&] { WriteUnitOfWork wuow(opCtx); if (timestamp != Timestamp::min()) { uassertStatusOK(opCtx->recoveryUnit()->setTimestamp(timestamp)); @@ -2098,7 +2089,7 @@ Status applyOperation_inlock(OperationContext* opCtx, request.setReturnDeleted(true); } - DeleteResult result = deleteObject(opCtx, collection, request); + DeleteResult result = deleteObject(opCtx, collectionAcquisition, request); if (op.getNeedsRetryImage()) { // Even if `result.nDeleted` is 0, we want to perform a write to the // imageCollection to advance the txnNumber/ts and invalidate the image. This @@ -2123,18 +2114,24 @@ Status applyOperation_inlock(OperationContext* opCtx, if (result.nDeleted == 0 && inStableRecovery) { repl::OplogApplication::checkOnOplogFailureForRecovery( opCtx, + op.getNss(), redact(op.toBSONForLogging()), !collection ? str::stream() << "(NamespaceNotFound): Failed to apply operation due " "to missing collection (" - << requestNss << ")" + << requestNss.toStringForErrorMsg() << ")" : "Applied a delete which did not delete anything."s); } // It is legal for a delete operation on the pre-images collection to delete zero // documents - pre-image collections are not guaranteed to contain the same set of - // documents at all times. + // documents at all times. The same holds for change-collections as they both rely + // on unreplicated deletes when "featureFlagUseUnreplicatedTruncatesForDeletions" is + // enabled. + // + // TODO SERVER-70591: Remove feature flag requirement in comment above. if (result.nDeleted == 0 && mode == OplogApplication::Mode::kSecondary && - !requestNss.isChangeStreamPreImagesCollection()) { + !requestNss.isChangeStreamPreImagesCollection() && + !requestNss.isChangeCollection()) { // In FCV 4.4, each node is responsible for deleting the excess documents in // capped collections. This implies that capped deletes may not be synchronized // between nodes at times. When upgraded to FCV 5.0, the primary will generate @@ -2195,7 +2192,7 @@ Status applyOperation_inlock(OperationContext* opCtx, timestamp = op.getTimestamp(); } - writeConflictRetry(opCtx, "applyOps_insertGlobalIndexKey", collection->ns().ns(), [&] { + writeConflictRetry(opCtx, "applyOps_insertGlobalIndexKey", collection->ns(), [&] { WriteUnitOfWork wuow(opCtx); if (timestamp != Timestamp::min()) { uassertStatusOK(opCtx->recoveryUnit()->setTimestamp(timestamp)); @@ -2203,7 +2200,7 @@ Status applyOperation_inlock(OperationContext* opCtx, global_index::insertKey( opCtx, - collection, + collectionAcquisition, op.getObject().getObjectField(global_index::kOplogEntryIndexKeyFieldName), op.getObject().getObjectField(global_index::kOplogEntryDocKeyFieldName)); @@ -2219,7 +2216,7 @@ Status applyOperation_inlock(OperationContext* opCtx, timestamp = op.getTimestamp(); } - writeConflictRetry(opCtx, "applyOps_deleteGlobalIndexKey", collection->ns().ns(), [&] { + writeConflictRetry(opCtx, "applyOps_deleteGlobalIndexKey", collection->ns(), [&] { WriteUnitOfWork wuow(opCtx); if (timestamp != Timestamp::min()) { uassertStatusOK(opCtx->recoveryUnit()->setTimestamp(timestamp)); @@ -2227,7 +2224,7 @@ Status applyOperation_inlock(OperationContext* opCtx, global_index::deleteKey( opCtx, - collection, + collectionAcquisition, op.getObject().getObjectField(global_index::kOplogEntryIndexKeyFieldName), op.getObject().getObjectField(global_index::kOplogEntryDocKeyFieldName)); @@ -2275,13 +2272,14 @@ Status applyCommand_inlock(OperationContext* opCtx, const auto& nss = op->getNss(); if (!nss.isValid()) { - return {ErrorCodes::InvalidNamespace, "invalid ns: " + std::string(nss.ns())}; + return {ErrorCodes::InvalidNamespace, "invalid ns: " + nss.toStringForErrorMsg()}; } { auto catalog = CollectionCatalog::get(opCtx); if (!catalog->lookupCollectionByNamespace(opCtx, nss) && catalog->lookupView(opCtx, nss)) { return {ErrorCodes::CommandNotSupportedOnView, - str::stream() << "applyOps not supported on view:" << nss.ns()}; + str::stream() << "applyOps not supported on view:" + << nss.toStringForErrorMsg()}; } } @@ -2401,19 +2399,52 @@ Status applyCommand_inlock(OperationContext* opCtx, auto ns = cmd->parse(opCtx, OpMsgRequest::fromDBAndBody(nss.db(), o))->ns(); - // This error is only possible during initial sync mode. - invariant(mode == OplogApplication::Mode::kInitialSync); + if (mode == OplogApplication::Mode::kInitialSync) { + // Aborting an index build involves writing to the catalog. This write needs to + // be timestamped. It will be given 'writeTime' as the commit timestamp. + TimestampBlock tsBlock(opCtx, writeTime); + abortIndexBuilds(opCtx, + op->getCommandType(), + ns, + "Aborting index builds during initial sync"); + LOGV2_DEBUG(4665901, + 1, + "Conflicting DDL operation encountered during initial sync; " + "aborting index build and retrying", + logAttrs(ns)); + } else { + invariant(!opCtx->lockState()->isLocked()); + + auto swUUID = op->getUuid(); + if (!swUUID) { + LOGV2_ERROR(21261, + "Failed command during oplog application. Expected a UUID", + "command"_attr = redact(o), + logAttrs(ns)); + } - // Aborting an index build involves writing to the catalog. This write needs to be - // timestamped. It will be given 'writeTime' as the commit timestamp. - TimestampBlock tsBlock(opCtx, writeTime); - abortIndexBuilds( - opCtx, op->getCommandType(), ns, "Aborting index builds during initial sync"); - LOGV2_DEBUG(4665901, - 1, - "Conflicting DDL operation encountered during initial sync; " - "aborting index build and retrying", - logAttrs(ns)); + LOGV2_DEBUG( + 7702500, + 1, + "Waiting for index build(s) to complete on the namespace before retrying " + "the conflicting operation", + logAttrs(ns), + "oplogEntry"_attr = redact(op->toBSONForLogging())); + + IndexBuildsCoordinator::get(opCtx)->awaitNoIndexBuildInProgressForCollection( + opCtx, swUUID.get()); + + opCtx->recoveryUnit()->abandonSnapshot(); + opCtx->checkForInterrupt(); + + LOGV2_DEBUG( + 51775, + 1, + "Acceptable error during oplog application: background operation in " + "progress for namespace", + logAttrs(ns), + "oplogEntry"_attr = redact(op->toBSONForLogging())); + } break; } diff --git a/src/mongo/db/repl/oplog.h b/src/mongo/db/repl/oplog.h index ccae4432c4c37..9ce47d6119b7c 100644 --- a/src/mongo/db/repl/oplog.h +++ b/src/mongo/db/repl/oplog.h @@ -29,20 +29,35 @@ #pragma once +#include +#include +#include #include +#include #include +#include #include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection_options.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/oplog_constraint_violation_logger.h" #include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/oplog_entry_or_grouped_inserts.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { class Collection; @@ -51,6 +66,7 @@ class Database; class NamespaceString; class OperationContext; class OperationSessionInfo; +class CollectionAcquisition; class Session; using OplogSlot = repl::OpTime; @@ -117,35 +133,36 @@ void createOplog(OperationContext* opCtx, */ void createOplog(OperationContext* opCtx); -/** - * Log insert(s) to the local oplog. - * Returns the OpTime of every insert. - * @param oplogEntryTemplate: a template used to generate insert oplog entries. Callers must set the - * "ns", "ui", "fromMigrate" and "wall" fields before calling this function. This function will then - * augment the template with the "op" (which is set to kInsert), "lsid" and "txnNumber" fields if - * necessary. - * @param begin/end: first/last InsertStatement to be inserted. This function iterates from begin to - * end and generates insert oplog entries based on the augmented oplogEntryTemplate with the "ts", - * "t", "o", "prevOpTime" and "stmtId" fields replaced by the content of each InsertStatement - * defined by the begin-end range. - * @param fromMigrate: a list of 'fromMigrate' values for the inserts. - * - */ -std::vector logInsertOps( - OperationContext* opCtx, - MutableOplogEntry* oplogEntryTemplate, - std::vector::const_iterator begin, - std::vector::const_iterator end, - const std::vector& fromMigrate, - std::function(const BSONObj& doc)> getDestinedRecipientFn, - const CollectionPtr& collectionPtr); - /** * Returns the optime of the oplog entry written to the oplog. * Returns a null optime if oplog was not modified. */ OpTime logOp(OperationContext* opCtx, MutableOplogEntry* oplogEntry); +/** + * Low level oplog function used by logOp() and similar functions to append + * storage engine records to the oplog collection. + * + * This function has to be called within the scope of a WriteUnitOfWork with + * a valid CollectionPtr reference to the oplog. + * + * @param records a vector of oplog records to be written. Records hold references + * to unowned BSONObj data. + * @param timestamps a vector of respective Timestamp objects for each oplog record. + * @param oplogCollection collection to be written to. + * @param finalOpTime the OpTime of the last oplog record. + * @param wallTime the wall clock time of the last oplog record. + * @param isAbortIndexBuild for tenant migration use only. + */ +void logOplogRecords(OperationContext* opCtx, + const NamespaceString& nss, + std::vector* records, + const std::vector& timestamps, + const CollectionPtr& oplogCollection, + OpTime finalOpTime, + Date_t wallTime, + bool isAbortIndexBuild); + // Flush out the cached pointer to the oplog. void clearLocalOplogPtr(ServiceContext* service); @@ -212,6 +229,7 @@ class OplogApplication { // Server will crash on oplog application failure during recovery from stable checkpoint in the // test environment. static void checkOnOplogFailureForRecovery(OperationContext* opCtx, + const mongo::NamespaceString& nss, const mongo::BSONObj& oplogEntry, const std::string& errorMsg); }; @@ -239,7 +257,7 @@ void logOplogConstraintViolation(OperationContext* opCtx, * Returns failure status if the op was an update that could not be applied. */ Status applyOperation_inlock(OperationContext* opCtx, - Database* db, + CollectionAcquisition& collectionAcquisition, const OplogEntryOrGroupedInserts& opOrGroupedInserts, bool alwaysUpsert, OplogApplication::Mode mode, diff --git a/src/mongo/db/repl/oplog_application_bm.cpp b/src/mongo/db/repl/oplog_application_bm.cpp index 9d1cbf39a4140..d135855d398ec 100644 --- a/src/mongo/db/repl/oplog_application_bm.cpp +++ b/src/mongo/db/repl/oplog_application_bm.cpp @@ -27,51 +27,94 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_impl.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/database_holder_impl.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/cursor_manager.h" #include "mongo/db/global_settings.h" +#include "mongo/db/index_builds_coordinator.h" #include "mongo/db/index_builds_coordinator_mongod.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_impl.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_applier.h" #include "mongo/db/repl/oplog_applier_impl.h" +#include "mongo/db/repl/oplog_batcher.h" +#include "mongo/db/repl/oplog_buffer.h" #include "mongo/db/repl/oplog_buffer_blocking_queue.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_consistency_markers_mock.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/collection_sharding_state_factory_standalone.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" #include "mongo/db/service_entry_point_mongod.h" +#include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/storage/execution_control/concurrency_adjustment_parameters_gen.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/recovery_unit_noop.h" -#include "mongo/db/storage/storage_engine_parameters_gen.h" +#include "mongo/db/storage/storage_engine_init.h" #include "mongo/db/storage/storage_options.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log_domain_global.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_component_settings.h" #include "mongo/logv2/log_manager.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/transport/service_entry_point.h" #include "mongo/unittest/temp_dir.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" #include "mongo/util/concurrency/thread_pool.h" -#include "mongo/util/exit.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/periodic_runner.h" #include "mongo/util/periodic_runner_factory.h" #include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" +#include "mongo/util/version/releases.h" namespace mongo { namespace { @@ -80,7 +123,7 @@ class TestServiceContext { public: TestServiceContext() { // Disable execution control. - gStorageEngineConcurrencyAdjustmentAlgorithm = ""; + gStorageEngineConcurrencyAdjustmentAlgorithm = "fixedConcurrentTransactions"; // Disable server info logging so that the benchmark output is cleaner. logv2::LogManager::global().getGlobalSettings().setMinimumLoggedSeverity( @@ -496,7 +539,7 @@ class Fixture { // Advance timestamps. _testSvcCtx->getReplCoordMock()->setMyLastAppliedOpTimeAndWallTimeForward( - {lastOpTimeInBatch, lastWallTimeInBatch}); + {lastOpTimeInBatch, lastWallTimeInBatch}, true); _testSvcCtx->getReplCoordMock()->setMyLastDurableOpTimeAndWallTimeForward( {lastOpTimeInBatch, lastWallTimeInBatch}); repl::StorageInterface::get(opCtx)->setStableTimestamp( @@ -509,7 +552,7 @@ class Fixture { std::vector _oplogEntries; UUID _foobarUUID; - NamespaceString _foobarNs{"foo.bar"_sd}; + NamespaceString _foobarNs = NamespaceString::createNamespaceString_forTest("foo.bar"_sd); repl::OplogBatcher::BatchLimits _batchLimits{std::numeric_limits::max(), std::numeric_limits::max()}; }; diff --git a/src/mongo/db/repl/oplog_applier.cpp b/src/mongo/db/repl/oplog_applier.cpp index af33eaf9633ec..18820113dbd65 100644 --- a/src/mongo/db/repl/oplog_applier.cpp +++ b/src/mongo/db/repl/oplog_applier.cpp @@ -28,16 +28,30 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include -#include "mongo/db/repl/oplog_applier.h" +#include +#include #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/client.h" +#include "mongo/db/repl/oplog_applier.h" #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_name.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" #include "mongo/util/processinfo.h" -#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -171,9 +185,9 @@ std::unique_ptr makeReplWriterPool(int threadCount, auto client = Client::getCurrent(); AuthorizationSession::get(*client)->grantInternalAuthorization(client); - if (isKillableByStepdown) { + if (!isKillableByStepdown) { stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); + client->setSystemOperationUnkillableByStepdown(lk); } }; auto pool = std::make_unique(options); diff --git a/src/mongo/db/repl/oplog_applier.h b/src/mongo/db/repl/oplog_applier.h index 08ec06108a37e..1123f0c3337c4 100644 --- a/src/mongo/db/repl/oplog_applier.h +++ b/src/mongo/db/repl/oplog_applier.h @@ -31,18 +31,24 @@ #pragma once #include +#include +#include #include #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_batcher.h" #include "mongo/db/repl/oplog_buffer.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" #include "mongo/util/functional.h" #include "mongo/util/future.h" #include "mongo/util/net/hostandport.h" diff --git a/src/mongo/db/repl/oplog_applier_impl.cpp b/src/mongo/db/repl/oplog_applier_impl.cpp index 86e7b36872c75..551c2ea967760 100644 --- a/src/mongo/db/repl/oplog_applier_impl.cpp +++ b/src/mongo/db/repl/oplog_applier_impl.cpp @@ -29,31 +29,80 @@ #include "mongo/db/repl/oplog_applier_impl.h" -#include "mongo/db/catalog/collection_catalog.h" +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/catalog/database.h" -#include "mongo/db/catalog/database_holder.h" -#include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/change_stream_change_collection_manager.h" #include "mongo/db/change_stream_serverless_helpers.h" #include "mongo/db/client.h" #include "mongo/db/commands/fsync.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/repl/apply_ops.h" +#include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/apply_ops_command_info.h" +#include "mongo/db/repl/initial_syncer.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog_applier_utils.h" #include "mongo/db/repl/oplog_batcher.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/replication_metrics.h" +#include "mongo/db/repl/split_prepare_session_manager.h" #include "mongo/db/repl/transaction_oplog_application.h" -#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/stats/counters.h" #include "mongo/db/stats/timer_stats.h" #include "mongo/db/storage/control/journal_flusher.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_util.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/concurrency/mutex.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/log_with_sampling.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -140,14 +189,31 @@ Status _insertDocumentsToOplogAndChangeCollections( std::vector::const_iterator end, bool skipWritesToOplog) { WriteUnitOfWork wunit(opCtx); + boost::optional autoOplog; + boost::optional + changeCollectionWriter; + // Acquire locks. We must acquire the locks for all collections we intend to write to before + // performing any writes. This avoids potential deadlocks created by waiting for locks while + // having generated oplog holes. if (!skipWritesToOplog) { - AutoGetOplog autoOplog(opCtx, OplogAccessMode::kWrite); - auto& oplogColl = autoOplog.getCollection(); + autoOplog.emplace(opCtx, OplogAccessMode::kWrite); + } + const bool changeCollectionsMode = + change_stream_serverless_helpers::isChangeCollectionsModeActive(); + if (changeCollectionsMode) { + changeCollectionWriter = boost::make_optional( + ChangeStreamChangeCollectionManager::get(opCtx).createChangeCollectionsWriter( + opCtx, begin, end, nullptr /* opDebug */)); + changeCollectionWriter->acquireLocks(); + } + + // Write entries to the oplog. + if (!skipWritesToOplog) { + auto& oplogColl = autoOplog->getCollection(); if (!oplogColl) { return {ErrorCodes::NamespaceNotFound, "Oplog collection does not exist"}; } - auto status = collection_internal::insertDocuments( opCtx, oplogColl, begin, end, nullptr /* OpDebug */, false /* fromMigrate */); if (!status.isOK()) { @@ -157,14 +223,8 @@ Status _insertDocumentsToOplogAndChangeCollections( // Write the corresponding oplog entries to tenants respective change // collections in the serverless. - if (change_stream_serverless_helpers::isChangeCollectionsModeActive()) { - auto status = - ChangeStreamChangeCollectionManager::get(opCtx).insertDocumentsToChangeCollection( - opCtx, - begin, - end, - !skipWritesToOplog /* hasAcquiredGlobalIXLock */, - nullptr /* OpDebug */); + if (changeCollectionsMode) { + auto status = changeCollectionWriter->write(); if (!status.isOK()) { return status; } @@ -285,6 +345,11 @@ void ApplyBatchFinalizerForJournal::record(const OpTimeAndWallTime& newOpTimeAnd void ApplyBatchFinalizerForJournal::_run() { Client::initThread("ApplyBatchFinalizerForJournal"); + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + while (true) { OpTimeAndWallTime latestOpTimeAndWallTime = {OpTime(), Date_t()}; @@ -824,25 +889,22 @@ void OplogApplierImpl::_deriveOpsAndFillWriterVectors( continue; } - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (repl::feature_flags::gApplyPreparedTxnsInParallel.isEnabledAndIgnoreFCVUnsafe()) { - // Prepare entries in secondary mode do not come in their own batch, extract applyOps - // operations and fill writers with the extracted operations. - if (op.shouldPrepare() && (getOptions().mode == OplogApplication::Mode::kSecondary)) { - auto* partialTxnList = getPartialTxnList(op); - _addOplogChainOpsToWriterVectors( - opCtx, &op, partialTxnList, derivedOps, writerVectors, &collPropertiesCache); - continue; - } + // Prepare entries in secondary mode do not come in their own batch, extract applyOps + // operations and fill writers with the extracted operations. + if (op.shouldPrepare() && (getOptions().mode == OplogApplication::Mode::kSecondary)) { + auto* partialTxnList = getPartialTxnList(op); + _addOplogChainOpsToWriterVectors( + opCtx, &op, partialTxnList, derivedOps, writerVectors, &collPropertiesCache); + continue; + } - // Fill the writers with commit or abort operation. Depending on whether the operation - // refers to a split prepare, it might also be split into multiple ops. - if (op.isPreparedCommitOrAbort() && - (getOptions().mode == OplogApplication::Mode::kSecondary)) { - OplogApplierUtils::addDerivedCommitsOrAborts( - opCtx, &op, writerVectors, &collPropertiesCache); - continue; - } + // Fill the writers with commit or abort operation. Depending on whether the operation + // refers to a split prepare, it might also be split into multiple ops. + if (op.isPreparedCommitOrAbort() && + (getOptions().mode == OplogApplication::Mode::kSecondary)) { + OplogApplierUtils::addDerivedCommitsOrAborts( + opCtx, &op, writerVectors, &collPropertiesCache); + continue; } // If we see a commitTransaction command that is a part of a prepared transaction during diff --git a/src/mongo/db/repl/oplog_applier_impl.h b/src/mongo/db/repl/oplog_applier_impl.h index 095fc6a4d5dcd..720b30c1e7d97 100644 --- a/src/mongo/db/repl/oplog_applier_impl.h +++ b/src/mongo/db/repl/oplog_applier_impl.h @@ -30,15 +30,28 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" +#include "mongo/db/multi_key_path_tracker.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/initial_syncer.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_applier.h" +#include "mongo/db/repl/oplog_buffer.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_or_grouped_inserts.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_metrics.h" #include "mongo/db/repl/session_update_tracker.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/executor/task_executor.h" +#include "mongo/util/concurrency/thread_pool.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/oplog_applier_impl_test.cpp b/src/mongo/db/repl/oplog_applier_impl_test.cpp index fab8da483ed62..60d0f25da7aa8 100644 --- a/src/mongo/db/repl/oplog_applier_impl_test.cpp +++ b/src/mongo/db/repl/oplog_applier_impl_test.cpp @@ -27,61 +27,115 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include #include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_options.h" -#include "mongo/db/catalog/create_collection.h" -#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/document_validation.h" -#include "mongo/db/catalog/import_collection_oplog_entry_gen.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/change_stream_pre_images_collection_manager.h" -#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/curop.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/feature_compatibility_version_parser.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/multi_key_path_tracker.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/pipeline/change_stream_pre_and_post_images_options_gen.h" #include "mongo/db/pipeline/change_stream_preimage_gen.h" -#include "mongo/db/query/internal_plans.h" -#include "mongo/db/repl/bgsync.h" -#include "mongo/db/repl/drop_pending_collection_reaper.h" #include "mongo/db/repl/idempotency_test_fixture.h" #include "mongo/db/repl/image_collection_entry_gen.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_applier.h" -#include "mongo/db/repl/oplog_batcher.h" +#include "mongo/db/repl/oplog_applier_impl.h" +#include "mongo/db/repl/oplog_applier_impl_test_fixture.h" +#include "mongo/db/repl/oplog_buffer.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/oplog_entry_or_grouped_inserts.h" #include "mongo/db/repl/oplog_entry_test_helpers.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/storage_interface.h" -#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/internal_session_pool.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/db/stats/counters.h" -#include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" -#include "mongo/db/transaction/transaction_participant_gen.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/executor/task_executor.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" -#include "mongo/util/md5.hpp" -#include "mongo/util/scopeguard.h" -#include "mongo/util/string_map.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" +#include "mongo/util/version/releases.h" namespace mongo { namespace repl { @@ -162,7 +216,7 @@ TEST_F(OplogApplierImplTestEnableSteadyStateConstraints, TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsInsertDocumentCollectionLookupByUUIDFails) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.t"); - createDatabase(_opCtx.get(), nss.db()); + createDatabase(_opCtx.get(), nss.db_forTest()); NamespaceString otherNss = NamespaceString::createNamespaceString_forTest(nss.getSisterNS("othername")); auto op = makeOplogEntry(OpTypeEnum::kInsert, otherNss, kUuid); @@ -174,7 +228,7 @@ TEST_F(OplogApplierImplTest, TEST_F(OplogApplierImplTestDisableSteadyStateConstraints, applyOplogEntryOrGroupedInsertsDeleteDocumentCollectionLookupByUUIDFails) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.t"); - createDatabase(_opCtx.get(), nss.db()); + createDatabase(_opCtx.get(), nss.db_forTest()); NamespaceString otherNss = NamespaceString::createNamespaceString_forTest(nss.getSisterNS("othername")); auto op = makeOplogEntry(OpTypeEnum::kDelete, otherNss, kUuid); @@ -193,7 +247,7 @@ TEST_F(OplogApplierImplTestDisableSteadyStateConstraints, TEST_F(OplogApplierImplTestEnableSteadyStateConstraints, applyOplogEntryOrGroupedInsertsDeleteDocumentCollectionLookupByUUIDFails) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.t"); - createDatabase(_opCtx.get(), nss.db()); + createDatabase(_opCtx.get(), nss.db_forTest()); NamespaceString otherNss = NamespaceString::createNamespaceString_forTest(nss.getSisterNS("othername")); auto op = makeOplogEntry(OpTypeEnum::kDelete, otherNss, kUuid); @@ -204,7 +258,7 @@ TEST_F(OplogApplierImplTestEnableSteadyStateConstraints, TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsInsertDocumentCollectionMissing) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.t"); - createDatabase(_opCtx.get(), nss.db()); + createDatabase(_opCtx.get(), nss.db_forTest()); // Even though the collection doesn't exist, this is handled in the actual application function, // which in the case of this test just ignores such errors. This tests mostly that we don't // implicitly create the collection. @@ -218,7 +272,7 @@ TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsInsertDocumentCollec TEST_F(OplogApplierImplTestDisableSteadyStateConstraints, applyOplogEntryOrGroupedInsertsDeleteDocumentCollectionMissing) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.t"); - createDatabase(_opCtx.get(), nss.db()); + createDatabase(_opCtx.get(), nss.db_forTest()); // Even though the collection doesn't exist, this is handled in the actual application function, // which in the case of this test just ignores such errors. This tests mostly that we don't // implicitly create the collection. @@ -239,7 +293,7 @@ TEST_F(OplogApplierImplTestDisableSteadyStateConstraints, TEST_F(OplogApplierImplTestEnableSteadyStateConstraints, applyOplogEntryOrGroupedInsertsDeleteDocumentCollectionMissing) { const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.t"); - createDatabase(_opCtx.get(), nss.db()); + createDatabase(_opCtx.get(), nss.db_forTest()); // With steady state constraints enabled, attempting to delete from a missing collection is an // error. auto op = makeOplogEntry(OpTypeEnum::kDelete, nss, {}); @@ -284,8 +338,8 @@ TEST_F(OplogApplierImplTestEnableSteadyStateConstraints, TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsDeleteDocumentCollectionAndDocExist) { // Setup the pre-images collection. - ChangeStreamPreImagesCollectionManager::createPreImagesCollection(_opCtx.get(), - boost::none /* tenantId */); + ChangeStreamPreImagesCollectionManager::get(_opCtx.get()) + .createPreImagesCollection(_opCtx.get(), boost::none /* tenantId */); const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.t"); createCollection( _opCtx.get(), nss, createRecordChangeStreamPreAndPostImagesCollectionOptions()); @@ -407,8 +461,8 @@ TEST_F(OplogApplierImplTestEnableSteadyStateConstraints, TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsDeleteDocumentCollectionLockedByUUID) { // Setup the pre-images collection. - ChangeStreamPreImagesCollectionManager::createPreImagesCollection(_opCtx.get(), - boost::none /* tenantId */); + ChangeStreamPreImagesCollectionManager::get(_opCtx.get()) + .createPreImagesCollection(_opCtx.get(), boost::none /* tenantId */); const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.t"); CollectionOptions options = createRecordChangeStreamPreAndPostImagesCollectionOptions(); options.uuid = kUuid; @@ -426,8 +480,8 @@ TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsDeleteDocumentCollec TEST_F(OplogApplierImplTest, applyOplogEntryToRecordChangeStreamPreImages) { // Setup the pre-images collection. - ChangeStreamPreImagesCollectionManager::createPreImagesCollection(_opCtx.get(), - boost::none /* tenantId */); + ChangeStreamPreImagesCollectionManager::get(_opCtx.get()) + .createPreImagesCollection(_opCtx.get(), boost::none /* tenantId */); // Create the collection. const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.t"); @@ -533,7 +587,7 @@ TEST_F(OplogApplierImplTest, CreateCollectionCommand) { auto op = BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() << "wall" << Date_t() << "o" + << "ns" << nss.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" << BSON("create" << nss.coll()) << "ts" << Timestamp(1, 1) << "ui" << UUID::gen()); bool applyCmdCalled = false; _opObserver->onCreateCollectionFn = [&](OperationContext* opCtx, @@ -589,11 +643,11 @@ TEST_F(OplogApplierImplTest, CreateCollectionCommandMultitenantRequireTenantIDFa auto tid{TenantId(OID::gen())}; NamespaceString nss = NamespaceString::createNamespaceString_forTest(tid, "test.foo"); - auto op = - BSON("op" - << "c" - << "ns" << nss.getCommandNS().toStringWithTenantId() << "wall" << Date_t() << "o" - << BSON("create" << nss.coll()) << "ts" << Timestamp(1, 1) << "ui" << UUID::gen()); + auto op = BSON("op" + << "c" + << "ns" << nss.getCommandNS().toStringWithTenantId_forTest() << "wall" + << Date_t() << "o" << BSON("create" << nss.coll()) << "ts" << Timestamp(1, 1) + << "ui" << UUID::gen()); bool applyCmdCalled = false; @@ -687,8 +741,8 @@ TEST_F(OplogApplierImplTest, RenameCollectionCommandMultitenant) { const NamespaceString targetNss = NamespaceString::createNamespaceString_forTest(tid, "test.bar"); - auto oRename = BSON("renameCollection" << sourceNss.toString() << "to" << targetNss.toString() - << "tid" << tid); + auto oRename = BSON("renameCollection" << sourceNss.toString_forTest() << "to" + << targetNss.toString_forTest() << "tid" << tid); repl::createCollection(_opCtx.get(), sourceNss, {}); // createCollection uses an actual opTime, so we must generate an actually opTime in the future. @@ -717,8 +771,8 @@ TEST_F(OplogApplierImplTest, RenameCollectionCommandMultitenantRequireTenantIDFa const NamespaceString targetNss = NamespaceString::createNamespaceString_forTest(tid, "test.bar"); - auto oRename = BSON("renameCollection" << sourceNss.toStringWithTenantId() << "to" - << targetNss.toStringWithTenantId()); + auto oRename = BSON("renameCollection" << sourceNss.toStringWithTenantId_forTest() << "to" + << targetNss.toStringWithTenantId_forTest()); repl::createCollection(_opCtx.get(), sourceNss, {}); // createCollection uses an actual opTime, so we must generate an actually opTime in the future. @@ -748,12 +802,12 @@ TEST_F(OplogApplierImplTest, RenameCollectionCommandMultitenantAcrossTenantsRequ const NamespaceString targetNss = NamespaceString::createNamespaceString_forTest(tid, "test.bar"); const NamespaceString wrongTargetNss = - NamespaceString::createNamespaceString_forTest(wrongTid, targetNss.toString()); + NamespaceString::createNamespaceString_forTest(wrongTid, targetNss.toString_forTest()); ASSERT_NE(sourceNss, wrongTargetNss); - auto oRename = BSON("renameCollection" << sourceNss.toStringWithTenantId() << "to" - << wrongTargetNss.toStringWithTenantId()); + auto oRename = BSON("renameCollection" << sourceNss.toStringWithTenantId_forTest() << "to" + << wrongTargetNss.toStringWithTenantId_forTest()); repl::createCollection(_opCtx.get(), sourceNss, {}); // createCollection uses an actual opTime, so we must generate an actually opTime in the future. @@ -1067,7 +1121,7 @@ TEST_F(OplogApplierImplTest, OperationSessionInfo sessionInfo; sessionInfo.setSessionId(sessionId); sessionInfo.setTxnNumber(3); - const NamespaceString& nss{"test", "foo"}; + const NamespaceString& nss = NamespaceString::createNamespaceString_forTest("test", "foo"); repl::OpTime firstInsertOpTime(Timestamp(1, 0), 1); auto firstRetryableOp = makeInsertDocumentOplogEntryWithSessionInfo( firstInsertOpTime, nss, BSON("_id" << 1), sessionInfo); @@ -1157,7 +1211,8 @@ TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsInsertDocumentIncorr TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsDeleteDocumentIncludesTenantId) { // Setup the pre-images collection. const TenantId tid(OID::gen()); - ChangeStreamPreImagesCollectionManager::createPreImagesCollection(_opCtx.get(), tid); + ChangeStreamPreImagesCollectionManager::get(_opCtx.get()) + .createPreImagesCollection(_opCtx.get(), tid); setServerParameter("multitenancySupport", true); setServerParameter("featureFlagRequireTenantID", true); const NamespaceString nss = NamespaceString::createNamespaceString_forTest(tid, "test.t"); @@ -1295,7 +1350,9 @@ TEST_F(OplogApplierImplTest, applyOplogEntryOrGroupedInsertsUpdateDocumentIncorr class MultiOplogEntryOplogApplierImplTest : public OplogApplierImplTest { public: MultiOplogEntryOplogApplierImplTest() - : _nss1("test.preptxn1"), _nss2("test.preptxn2"), _txnNum(1) {} + : _nss1(NamespaceString::createNamespaceString_forTest("test.preptxn1")), + _nss2(NamespaceString::createNamespaceString_forTest("test.preptxn2")), + _txnNum(1) {} protected: void setUp() override { @@ -1314,8 +1371,8 @@ class MultiOplogEntryOplogApplierImplTest : public OplogApplierImplTest { cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o" - << BSON("_id" << 1))) + << "ns" << _nss1.ns_forTest() << "ui" << *_uuid1 + << "o" << BSON("_id" << 1))) << "partialTxn" << true), _lsid, _txnNum, @@ -1326,8 +1383,8 @@ class MultiOplogEntryOplogApplierImplTest : public OplogApplierImplTest { cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << _nss2.ns() << "ui" << *_uuid2 << "o" - << BSON("_id" << 2))) + << "ns" << _nss2.ns_forTest() << "ui" << *_uuid2 + << "o" << BSON("_id" << 2))) << "partialTxn" << true), _lsid, _txnNum, @@ -1338,8 +1395,8 @@ class MultiOplogEntryOplogApplierImplTest : public OplogApplierImplTest { cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << _nss2.ns() << "ui" << *_uuid2 << "o" - << BSON("_id" << 3)))), + << "ns" << _nss2.ns_forTest() << "ui" << *_uuid2 + << "o" << BSON("_id" << 3)))), _lsid, _txnNum, {StmtId(2)}, @@ -1356,7 +1413,8 @@ class MultiOplogEntryOplogApplierImplTest : public OplogApplierImplTest { // and there's no guarantee of the order. _insertedDocs[nss].insert(docs.begin(), docs.end()); } else - FAIL("Unexpected insert") << " into " << nss << " first doc: " << docs.front(); + FAIL("Unexpected insert") + << " into " << nss.toStringForErrorMsg() << " first doc: " << docs.front(); }; _writerPool = makeReplWriterPool(); @@ -1505,8 +1563,9 @@ TEST_F(MultiOplogEntryOplogApplierImplTest, MultiApplyUnpreparedTransactionTwoBa cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << (i == 1 ? _nss2.ns() : _nss1.ns()) << "ui" - << (i == 1 ? *_uuid2 : *_uuid1) << "o" + << "ns" + << (i == 1 ? _nss2.ns_forTest() : _nss1.ns_forTest()) + << "ui" << (i == 1 ? *_uuid2 : *_uuid1) << "o" << insertDocs.back())) << "partialTxn" << true), _lsid, @@ -1583,7 +1642,7 @@ TEST_F(MultiOplogEntryOplogApplierImplTest, MultiApplyTwoTransactionsOneBatch) { cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o" + << "ns" << _nss1.ns_forTest() << "ui" << *_uuid1 << "o" << BSON("_id" << 1))) << "partialTxn" << true), _lsid, @@ -1595,7 +1654,7 @@ TEST_F(MultiOplogEntryOplogApplierImplTest, MultiApplyTwoTransactionsOneBatch) { cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o" + << "ns" << _nss1.ns_forTest() << "ui" << *_uuid1 << "o" << BSON("_id" << 2))) << "partialTxn" << true), @@ -1608,7 +1667,7 @@ TEST_F(MultiOplogEntryOplogApplierImplTest, MultiApplyTwoTransactionsOneBatch) { cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o" + << "ns" << _nss1.ns_forTest() << "ui" << *_uuid1 << "o" << BSON("_id" << 3))) << "partialTxn" << true), _lsid, @@ -1620,7 +1679,7 @@ TEST_F(MultiOplogEntryOplogApplierImplTest, MultiApplyTwoTransactionsOneBatch) { cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o" + << "ns" << _nss1.ns_forTest() << "ui" << *_uuid1 << "o" << BSON("_id" << 4))) << "partialTxn" << true), _lsid, @@ -1712,7 +1771,7 @@ class MultiOplogEntryOplogApplierImplTestMultitenant : public OplogApplierImplTe // there's no guarantee of the order. (_docs[collNss]).push_back(BSON("create" << collNss.coll())); } else - FAIL("Unexpected create") << " on " << collNss; + FAIL("Unexpected create") << " on " << collNss.toStringForErrorMsg(); }; _opObserver->onInsertsFn = [&](OperationContext*, @@ -1727,7 +1786,8 @@ class MultiOplogEntryOplogApplierImplTestMultitenant : public OplogApplierImplTe // and there's no guarantee of the order. (_docs[nss]).insert(_docs[nss].end(), docs.begin(), docs.end()); } else - FAIL("Unexpected insert") << " into " << nss << " first doc: " << docs.front(); + FAIL("Unexpected insert") + << " into " << nss.toStringForErrorMsg() << " first doc: " << docs.front(); }; _writerPool = makeReplWriterPool(); @@ -1767,11 +1827,12 @@ TEST_F(MultiOplogEntryOplogApplierImplTestMultitenant, ops.push_back(makeCommandOplogEntryWithSessionInfoAndStmtIds( {Timestamp(Seconds(1), 1), 1LL}, _cmdNss, - BSON("applyOps" << BSON_ARRAY(BSON("op" - << "c" - << "tid" << _tenantId << "ns" << _nss.ns() << "ui" - << *_uuid << "o" << BSON("create" << _nss.coll()))) - << "partialTxn" << true), + BSON( + "applyOps" << BSON_ARRAY(BSON("op" + << "c" + << "tid" << _tenantId << "ns" << _nss.ns_forTest() << "ui" + << *_uuid << "o" << BSON("create" << _nss.coll()))) + << "partialTxn" << true), _lsid, _txnNum, {StmtId(0)}, @@ -1782,8 +1843,8 @@ TEST_F(MultiOplogEntryOplogApplierImplTestMultitenant, _cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "tid" << _tenantId << "ns" << _nss.ns() << "ui" - << *_uuid << "o" << BSON("_id" << 1))) + << "tid" << _tenantId << "ns" << _nss.ns_forTest() + << "ui" << *_uuid << "o" << BSON("_id" << 1))) << "partialTxn" << true), _lsid, _txnNum, @@ -1839,8 +1900,8 @@ TEST_F(MultiOplogEntryOplogApplierImplTestMultitenant, _cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "c" - << "ns" << _nss.toStringWithTenantId() << "ui" << *_uuid - << "o" << BSON("create" << _nss.coll()))) + << "ns" << _nss.toStringWithTenantId_forTest() << "ui" + << *_uuid << "o" << BSON("create" << _nss.coll()))) << "partialTxn" << true), _lsid, _txnNum, @@ -1852,8 +1913,8 @@ TEST_F(MultiOplogEntryOplogApplierImplTestMultitenant, _cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << _nss.toStringWithTenantId() << "ui" << *_uuid - << "o" << BSON("_id" << 1))) + << "ns" << _nss.toStringWithTenantId_forTest() << "ui" + << *_uuid << "o" << BSON("_id" << 1))) << "partialTxn" << true), _lsid, _txnNum, @@ -1907,8 +1968,8 @@ class MultiOplogEntryPreparedTransactionTest : public MultiOplogEntryOplogApplie _nss1, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << _nss2.ns() << "ui" << *_uuid2 << "o" - << BSON("_id" << 3))) + << "ns" << _nss2.ns_forTest() << "ui" << *_uuid2 + << "o" << BSON("_id" << 3))) << "prepare" << true), _lsid, _txnNum, @@ -1919,8 +1980,8 @@ class MultiOplogEntryPreparedTransactionTest : public MultiOplogEntryOplogApplie _nss1, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o" - << BSON("_id" << 0))) + << "ns" << _nss1.ns_forTest() << "ui" << *_uuid1 + << "o" << BSON("_id" << 0))) << "prepare" << true), _lsid, _txnNum, @@ -2464,7 +2525,7 @@ class MultiPreparedTransactionsInOneBatchTest : public MultiOplogEntryOplogAppli void setUp() override { MultiOplogEntryOplogApplierImplTest::setUp(); - const NamespaceString cmdNss("admin.$cmd"); + const NamespaceString cmdNss = NamespaceString::createNamespaceString_forTest("admin.$cmd"); _lsid1 = makeLogicalSessionId(_opCtx.get()); _lsid2 = makeLogicalSessionId(_opCtx.get()); _txnNum1 = _txnNum2 = 1; @@ -2474,12 +2535,12 @@ class MultiPreparedTransactionsInOneBatchTest : public MultiOplogEntryOplogAppli cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o" - << BSON("_id" << 1)) + << "ns" << _nss1.ns_forTest() << "ui" << *_uuid1 + << "o" << BSON("_id" << 1)) << BSON("op" << "i" - << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o" - << BSON("_id" << 2))) + << "ns" << _nss1.ns_forTest() << "ui" << *_uuid1 + << "o" << BSON("_id" << 2))) << "prepare" << true), _lsid1, _txnNum1, @@ -2494,12 +2555,12 @@ class MultiPreparedTransactionsInOneBatchTest : public MultiOplogEntryOplogAppli cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "d" - << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o" - << BSON("_id" << 3)) + << "ns" << _nss1.ns_forTest() << "ui" << *_uuid1 + << "o" << BSON("_id" << 3)) << BSON("op" << "d" - << "ns" << _nss1.ns() << "ui" << *_uuid1 << "o" - << BSON("_id" << 4))) + << "ns" << _nss1.ns_forTest() << "ui" << *_uuid1 + << "o" << BSON("_id" << 4))) << "prepare" << true), _lsid2, _txnNum2, @@ -2537,7 +2598,7 @@ class MultiPreparedTransactionsInOneBatchTest : public MultiOplogEntryOplogAppli // and there's no guarantee of the order. _deletedDocs[nss]++; } else - FAIL("Unexpected delete") << " from " << nss; + FAIL("Unexpected delete") << " from " << nss.toStringForErrorMsg(); }; } @@ -3206,7 +3267,8 @@ TEST_F(OplogApplierImplTest, nullptr, nullptr, OplogApplier::Options(OplogApplication::Mode::kInitialSync)); NamespaceString nss = NamespaceString::createNamespaceString_forTest( "test." + _agent.getSuiteName() + "_" + _agent.getTestName()); - NamespaceString badNss("test." + _agent.getSuiteName() + "_" + _agent.getTestName() + "bad"); + NamespaceString badNss = NamespaceString::createNamespaceString_forTest( + "test." + _agent.getSuiteName() + "_" + _agent.getTestName() + "bad"); auto doc1 = BSON("_id" << 1); auto keyPattern = BSON("a" << 1); auto doc3 = BSON("_id" << 3); @@ -3512,8 +3574,9 @@ TEST_F(IdempotencyTest, CreateCollectionWithView) { runOpInitialSync(makeCreateCollectionOplogEntry(nextOpTime(), viewNss, options.toBSON()))); auto viewDoc = BSON( - "_id" << NamespaceString::createNamespaceString_forTest(_nss.db(), "view").ns() << "viewOn" - << _nss.coll() << "pipeline" << fromjson("[ { '$project' : { 'x' : 1 } } ]")); + "_id" + << NamespaceString::createNamespaceString_forTest(_nss.db_forTest(), "view").ns_forTest() + << "viewOn" << _nss.coll() << "pipeline" << fromjson("[ { '$project' : { 'x' : 1 } } ]")); auto insertViewOp = makeInsertDocumentOplogEntry(nextOpTime(), viewNss, viewDoc); auto dropColl = makeCommandOplogEntry(nextOpTime(), _nss, BSON("drop" << _nss.coll())); @@ -3696,10 +3759,6 @@ class OplogApplierImplTxnTableTest : public OplogApplierImplTest { // secondary index creation does not. We use an UnreplicatedWritesBlock to avoid // timestamping any of the catalog setup. repl::UnreplicatedWritesBlock noRep(_opCtx.get()); - MongoDSessionCatalog::set( - _opCtx->getServiceContext(), - std::make_unique( - std::make_unique())); auto mongoDSessionCatalog = MongoDSessionCatalog::get(_opCtx.get()); mongoDSessionCatalog->onStepUp(_opCtx.get()); @@ -3996,7 +4055,7 @@ TEST_F(OplogApplierImplTxnTableTest, RetryableWriteThenMultiStatementTxnWriteOnS cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << nss().ns() << "ui" << uuid << "o" + << "ns" << nss().ns_forTest() << "ui" << uuid << "o" << BSON("_id" << 2))) << "partialTxn" << true), sessionId, @@ -4055,7 +4114,7 @@ TEST_F(OplogApplierImplTxnTableTest, MultiStatementTxnWriteThenRetryableWriteOnS cmdNss, BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << nss().ns() << "ui" << uuid << "o" + << "ns" << nss().ns_forTest() << "ui" << uuid << "o" << BSON("_id" << 2))) << "partialTxn" << true), sessionId, @@ -5030,8 +5089,8 @@ TEST_F(IdempotencyTestTxns, CommitPreparedTransactionIgnoresNamespaceNotFoundErr class PreparedTxnSplitTest : public OplogApplierImplTest { public: PreparedTxnSplitTest() - : _nss("test.prepTxnSplit"), - _cmdNss("admin.$cmd"), + : _nss(NamespaceString::createNamespaceString_forTest("test.prepTxnSplit")), + _cmdNss(NamespaceString::createNamespaceString_forTest("admin.$cmd")), _uuid(UUID::gen()), _txnNum1(1), _txnNum2(2) {} @@ -5147,10 +5206,11 @@ TEST_F(PreparedTxnSplitTest, MultiplePrepareTxnsInSameBatch) { for (int i = 0; i < kNumEntries; i++) { cruds1.push_back(BSON("op" << "i" - << "ns" << _nss.ns() << "ui" << *_uuid << "o" << BSON("_id" << i))); + << "ns" << _nss.ns_forTest() << "ui" << *_uuid << "o" + << BSON("_id" << i))); cruds2.push_back(BSON("op" << "i" - << "ns" << _nss.ns() << "ui" << *_uuid << "o" + << "ns" << _nss.ns_forTest() << "ui" << *_uuid << "o" << BSON("_id" << i + kNumEntries))); } @@ -5362,7 +5422,8 @@ class GlobalIndexTest : public OplogApplierImplTest { OperationSessionInfo sessionInfo; sessionInfo.setSessionId(sessionId); sessionInfo.setTxnNumber(3); - const NamespaceString& nss{"admin", "$cmd"}; + const NamespaceString& nss = + NamespaceString::createNamespaceString_forTest("admin", "$cmd"); repl::OpTime opTime(Timestamp(1, 0), 1); return {makeOplogEntry(opTime, // optime diff --git a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp index 0d2856fdd9ab5..34fc814f79a85 100644 --- a/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp +++ b/src/mongo/db/repl/oplog_applier_impl_test_fixture.cpp @@ -29,28 +29,58 @@ #include "mongo/db/repl/oplog_applier_impl_test_fixture.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/health_log.h" #include "mongo/db/catalog/health_log_interface.h" +#include "mongo/db/catalog/index_builds_manager.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/curop.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/multi_key_path_tracker.h" #include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/pipeline/change_stream_pre_and_post_images_options_gen.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog_applier.h" #include "mongo/db/repl/replication_consistency_markers_mock.h" #include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/repl/replication_process.h" -#include "mongo/db/repl/replication_recovery_mock.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" #include "mongo/db/vector_clock_mutable.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" +#include "mongo/util/version/releases.h" namespace mongo { namespace repl { @@ -60,7 +90,8 @@ void OplogApplierImplOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { if (!onInsertsFn) { return; } @@ -75,7 +106,8 @@ void OplogApplierImplOpObserver::onInserts(OperationContext* opCtx, void OplogApplierImplOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (!onDeleteFn) { return; } @@ -83,7 +115,8 @@ void OplogApplierImplOpObserver::onDelete(OperationContext* opCtx, } void OplogApplierImplOpObserver::onUpdate(OperationContext* opCtx, - const OplogUpdateEntryArgs& args) { + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (!onUpdateFn) { return; } @@ -109,12 +142,19 @@ void OplogApplierImplOpObserver::onRenameCollection(OperationContext* opCtx, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) { + bool stayTemp, + bool markFromMigrate) { if (!onRenameCollectionFn) { return; } - onRenameCollectionFn( - opCtx, fromCollection, toCollection, uuid, dropTargetUUID, numRecords, stayTemp); + onRenameCollectionFn(opCtx, + fromCollection, + toCollection, + uuid, + dropTargetUUID, + numRecords, + stayTemp, + markFromMigrate); } void OplogApplierImplOpObserver::onCreateIndex(OperationContext* opCtx, @@ -151,15 +191,6 @@ void OplogApplierImplOpObserver::onCollMod(OperationContext* opCtx, onCollModFn(opCtx, nss, uuid, collModCmd, oldCollOptions, indexInfo); } -std::unique_ptr -OplogApplierImplOpObserver::preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) { - return std::make_unique(/*prepare=*/true); -} - void OplogApplierImplTest::setUp() { ServiceContextMongoDTest::setUp(); @@ -422,7 +453,8 @@ StatusWith CollectionReader::next() { auto state = _exec->getNext(&obj, nullptr); if (state == PlanExecutor::IS_EOF) { return {ErrorCodes::CollectionIsEmpty, - str::stream() << "no more documents in " << _collToScan.getNss()}; + str::stream() << "no more documents in " + << _collToScan.getNss().toStringForErrorMsg()}; } // PlanExecutors that do not yield should only return ADVANCED or EOF. @@ -496,7 +528,7 @@ CollectionOptions createRecordChangeStreamPreAndPostImagesCollectionOptions() { void createCollection(OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options) { - writeConflictRetry(opCtx, "createCollection", nss.ns(), [&] { + writeConflictRetry(opCtx, "createCollection", nss, [&] { Lock::DBLock dbLk(opCtx, nss.dbName(), MODE_IX); Lock::CollectionLock collLk(opCtx, nss, MODE_X); @@ -522,7 +554,7 @@ void createDatabase(OperationContext* opCtx, StringData dbName) { Lock::GlobalWrite globalLock(opCtx); bool justCreated; auto databaseHolder = DatabaseHolder::get(opCtx); - const DatabaseName tenantDbName(boost::none, dbName); + const DatabaseName tenantDbName = DatabaseName::createDatabaseName_forTest(boost::none, dbName); auto db = databaseHolder->openDb(opCtx, tenantDbName, &justCreated); ASSERT_TRUE(db); ASSERT_TRUE(justCreated); diff --git a/src/mongo/db/repl/oplog_applier_impl_test_fixture.h b/src/mongo/db/repl/oplog_applier_impl_test_fixture.h index 3d3efe386613e..56f88039d7001 100644 --- a/src/mongo/db/repl/oplog_applier_impl_test_fixture.h +++ b/src/mongo/db/repl/oplog_applier_impl_test_fixture.h @@ -29,16 +29,50 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/db_raii.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_applier.h" #include "mongo/db/repl/oplog_applier_impl.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/oplog_entry_or_grouped_inserts.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_consistency_markers.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_txn_record_gen.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -79,7 +113,8 @@ class OplogApplierImplOpObserver : public OpObserverNoop { std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) override; + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) override; /** * This function is called whenever OplogApplierImpl deletes a document from a collection. @@ -87,12 +122,15 @@ class OplogApplierImplOpObserver : public OpObserverNoop { void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) override; + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override; /** * This function is called whenever OplogApplierImpl updates a document in a collection. */ - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) override; + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override; /** * Called when OplogApplierImpl creates a collection. @@ -108,14 +146,14 @@ class OplogApplierImplOpObserver : public OpObserverNoop { /** * Called when OplogApplierImpl renames a collection. */ - using OpObserver::onRenameCollection; void onRenameCollection(OperationContext* opCtx, const NamespaceString& fromCollection, const NamespaceString& toCollection, const UUID& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) override; + bool stayTemp, + bool markFromMigrate) override; /** * Called when OplogApplierImpl creates an index. @@ -145,16 +183,6 @@ class OplogApplierImplOpObserver : public OpObserverNoop { const CollectionOptions& oldCollOptions, boost::optional indexInfo) override; - /** - * Called when OplogApplierImpl prepares a multi-doc transaction using the - * TransactionParticipant. - */ - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) override; - // Hooks for OpObserver functions. Defaults to a no-op function but may be overridden to // check actual documents mutated. std::function&)> @@ -179,6 +207,7 @@ class OplogApplierImplOpObserver : public OpObserverNoop { boost::optional, boost::optional, std::uint64_t, + bool, bool)> onRenameCollectionFn; diff --git a/src/mongo/db/repl/oplog_applier_test.cpp b/src/mongo/db/repl/oplog_applier_test.cpp index e0e21698f8353..bd7f7c8c71c86 100644 --- a/src/mongo/db/repl/oplog_applier_test.cpp +++ b/src/mongo/db/repl/oplog_applier_test.cpp @@ -27,21 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include #include -#include +#include + +#include +#include -#include "mongo/db/commands/txn_cmds_gen.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/oplog_applier.h" #include "mongo/db/repl/oplog_batcher_test_fixture.h" #include "mongo/db/repl/oplog_buffer_blocking_queue.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { @@ -75,29 +85,26 @@ StatusWith OplogApplierMock::_applyOplogBatch(OperationContext* opCtx, return OpTime(); } -class OplogApplierTest : public unittest::Test { +class OplogApplierTest : public ServiceContextTest { public: void setUp() override; void tearDown() override; virtual OperationContext* opCtx() { - return _opCtxNoop.get(); + return _opCtxHolder.get(); } protected: std::unique_ptr _buffer; std::unique_ptr _applier; - std::unique_ptr _opCtxNoop; + ServiceContext::UniqueOperationContext _opCtxHolder; OplogApplier::BatchLimits _limits; }; void OplogApplierTest::setUp() { _buffer = std::make_unique(nullptr); _applier = std::make_unique(_buffer.get()); - // The OplogApplier interface expects an OperationContext* but the mock implementations in this - // test will not be dereferencing the pointer. Therefore, it is sufficient to use an - // OperationContextNoop. - _opCtxNoop = std::make_unique(); + _opCtxHolder = makeOperationContext(); _limits.bytes = std::numeric_limits::max(); _limits.ops = std::numeric_limits::max(); @@ -105,12 +112,12 @@ void OplogApplierTest::setUp() { void OplogApplierTest::tearDown() { _limits = {}; - _opCtxNoop = {}; + _opCtxHolder = {}; _applier = {}; _buffer = {}; } -constexpr auto dbName = "test"_sd; +const DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "test"_sd); TEST_F(OplogApplierTest, GetNextApplierBatchGroupsCrudOps) { std::vector srcOps; @@ -204,15 +211,11 @@ TEST_F(OplogApplierTest, GetNextApplierBatchGroupsPreparedApplyOpsOrPreparedComm ASSERT_EQUALS(srcOps[0], batch[0]); ASSERT_EQUALS(srcOps[1], batch[1]); - // Prepared commit needs to be processed individually. + // Prepared commit or abort must start a new batch with commits or aborts only. batch = unittest::assertGet(_applier->getNextApplierBatch(opCtx(), _limits)); - ASSERT_EQUALS(1U, batch.size()) << toString(batch); + ASSERT_EQUALS(2U, batch.size()) << toString(batch); ASSERT_EQUALS(srcOps[2], batch[0]); - - // Prepared abort needs to be processed individually. - batch = unittest::assertGet(_applier->getNextApplierBatch(opCtx(), _limits)); - ASSERT_EQUALS(1U, batch.size()) << toString(batch); - ASSERT_EQUALS(srcOps[3], batch[0]); + ASSERT_EQUALS(srcOps[3], batch[1]); // Prepares can be batched together. batch = unittest::assertGet(_applier->getNextApplierBatch(opCtx(), _limits)); @@ -239,17 +242,17 @@ TEST_F(OplogApplierTest, GetNextApplierBatchGroupsCrudOpsWithPreparedApplyOpsOrP ASSERT_EQUALS(srcOps[1], batch[1]); ASSERT_EQUALS(srcOps[2], batch[2]); - // Prepared commit needs to be processed individually. + // Prepared commit must start a new batch. batch = unittest::assertGet(_applier->getNextApplierBatch(opCtx(), _limits)); ASSERT_EQUALS(1U, batch.size()) << toString(batch); ASSERT_EQUALS(srcOps[3], batch[0]); - // Due to the next prepared abort, this insert is in a batch of 1. + // CRUD op cannot be in the same batch with the previous prepared commit. batch = unittest::assertGet(_applier->getNextApplierBatch(opCtx(), _limits)); ASSERT_EQUALS(1U, batch.size()) << toString(batch); ASSERT_EQUALS(srcOps[4], batch[0]); - // Prepared abort needs to be processed individually. + // Prepared abort must start a new batch. batch = unittest::assertGet(_applier->getNextApplierBatch(opCtx(), _limits)); ASSERT_EQUALS(1U, batch.size()) << toString(batch); ASSERT_EQUALS(srcOps[5], batch[0]); @@ -434,36 +437,22 @@ TEST_F(OplogApplierTest, LastOpInLargeTransactionIsProcessedIndividually) { ASSERT_EQUALS(srcOps[4], batch[0]); } -class OplogApplierDelayTest : public OplogApplierTest, public ScopedGlobalServiceContextForTest { +class OplogApplierDelayTest : public OplogApplierTest { public: void setUp() override { OplogApplierTest::setUp(); - auto* service = getServiceContext(); - _origThreadName = *getThreadNameRef().get(); - Client::initThread("OplogApplierDelayTest", service, nullptr); - _mockClock = std::make_shared(); // Avoid any issues due to a clock exactly at 0 (e.g. dates being default Date_t()); + _mockClock = std::make_shared(); _mockClock->advance(Milliseconds(60000)); + + auto service = getServiceContext(); service->setFastClockSource(std::make_unique(_mockClock)); service->setPreciseClockSource(std::make_unique(_mockClock)); - // The delay tests need a real operation context to use the service context clock. - _opCtxHolder = cc().makeOperationContext(); - // Use a smaller limit for these tests. _limits.ops = 3; } - void tearDown() override { - _opCtxHolder = nullptr; - Client::releaseCurrent(); - OplogApplierTest::tearDown(); - setThreadName(_origThreadName); - } - - OperationContext* opCtx() override { - return _opCtxHolder.get(); - } // Wait for the opCtx to be waited on, or for killWaits() to be run. bool waitForWait() { diff --git a/src/mongo/db/repl/oplog_applier_utils.cpp b/src/mongo/db/repl/oplog_applier_utils.cpp index 2386e52a70bce..d98e6dcd03345 100644 --- a/src/mongo/db/repl/oplog_applier_utils.cpp +++ b/src/mongo/db/repl/oplog_applier_utils.cpp @@ -27,27 +27,68 @@ * it in the license file. */ +#include #include - -#include "mongo/db/catalog_raii.h" -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonelement_comparator.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/document_validation.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/global_index.h" #include "mongo/db/multitenancy_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/oplog_applier_utils.h" +#include "mongo/db/repl/oplog_constraint_violation_logger.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/split_prepare_session_manager.h" #include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_role.h" #include "mongo/db/stats/counters.h" -#include "mongo/util/fail_point.h" - +#include "mongo/db/tenant_id.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -76,12 +117,12 @@ CachedCollectionProperties::getCollectionProperties(OperationContext* opCtx, namespace { /** - * Updates a CRUD op's hash and isForCappedCollection field if necessary. + * Populates a CRUD op's idHash and updates the isForCappedCollection field if necessary. */ void processCrudOp(OperationContext* opCtx, OplogEntry* op, - uint32_t* hash, - const CachedCollectionProperties::CollectionProperties& collProperties) { + const CachedCollectionProperties::CollectionProperties& collProperties, + boost::optional& idHash) { // Include the _id of the document in the hash so we get parallelism even if all writes are to a // single collection. // @@ -99,8 +140,7 @@ void processCrudOp(OperationContext* opCtx, }(); BSONElementComparator elementHasher(BSONElementComparator::FieldNamesMode::kIgnore, collProperties.collator); - const size_t idHash = elementHasher.hash(id); - MurmurHash3_x86_32(&idHash, sizeof(idHash), *hash, hash); + idHash.emplace(elementHasher.hash(id)); } if (op->getOpType() == OpTypeEnum::kInsert && collProperties.isCapped) { @@ -119,21 +159,18 @@ uint32_t getWriterId(OperationContext* opCtx, CachedCollectionProperties* collPropertiesCache, uint32_t numWriters, boost::optional forceWriterId = boost::none) { + boost::optional idHash; NamespaceString nss = op->isGlobalIndexCrudOpType() ? NamespaceString::makeGlobalIndexNSS(op->getUuid().value()) : op->getNss(); - // Reduce the hash from 64bit down to 32bit, just to allow combinations with murmur3 later - // on. Bit depth not important, we end up just doing integer modulo with this in the end. - // The hash function should provide entropy in the lower bits as it's used in hash tables. - auto hashedNs = absl::Hash{}(nss); - auto hash = static_cast(hashedNs); - if (op->isCrudOpType()) { auto collProperties = collPropertiesCache->getCollectionProperties(opCtx, nss); - processCrudOp(opCtx, op, &hash, collProperties); + processCrudOp(opCtx, op, collProperties, idHash); } + auto hash = idHash ? absl::HashOf(nss, *idHash) : absl::HashOf(nss); + return (forceWriterId ? *forceWriterId : hash) % numWriters; } @@ -394,7 +431,7 @@ Status OplogApplierUtils::applyOplogEntryOrGroupedInsertsCommon( const NamespaceString nss(op->getNss()); auto opType = op->getOpType(); - if ((gMultitenancySupport && serverGlobalParams.featureCompatibility.isVersionInitialized() && + if ((gMultitenancySupport && gFeatureFlagRequireTenantID.isEnabled(serverGlobalParams.featureCompatibility))) { invariant(op->getTid() == nss.tenantId()); } else { @@ -405,97 +442,110 @@ Status OplogApplierUtils::applyOplogEntryOrGroupedInsertsCommon( incrementOpsAppliedStats(); return Status::OK(); } else if (DurableOplogEntry::isCrudOpType(opType)) { - auto status = - writeConflictRetry(opCtx, "applyOplogEntryOrGroupedInserts_CRUD", nss.ns(), [&] { - // Need to throw instead of returning a status for it to be properly ignored. + auto status = writeConflictRetry(opCtx, "applyOplogEntryOrGroupedInserts_CRUD", nss, [&] { + // Need to throw instead of returning a status for it to be properly ignored. + try { + boost::optional coll; + Database* db = nullptr; + + // If the collection UUID does not resolve, acquire the collection using the + // namespace. This is so we reach `applyOperation_inlock` below and invalidate + // the preimage / postimage for the op if applicable. + + // TODO SERVER-41371 / SERVER-73661 this code is difficult to maintain and + // needs to be done everywhere this situation is possible. We should try + // to consolidate this into applyOperation_inlock. try { - boost::optional autoColl; - Database* db = nullptr; - - // If the collection UUID does not resolve, acquire the collection using the - // namespace. This is so we reach `applyOperation_inlock` below and invalidate - // the preimage / postimage for the op if applicable. - - // TODO SERVER-41371 / SERVER-73661 this code is difficult to maintain and - // needs to be done everywhere this situation is possible. We should try - // to consolidate this into applyOperation_inlock. - try { - autoColl.emplace(opCtx, - getNsOrUUID(nss, *op), - fixLockModeForSystemDotViewsChanges(nss, MODE_IX)); - db = autoColl->getDb(); - } catch (ExceptionFor& ex) { - if (!isDataConsistent) { - autoColl.emplace( - opCtx, nss, fixLockModeForSystemDotViewsChanges(nss, MODE_IX)); - db = autoColl->ensureDbExists(opCtx); - } else { - throw ex; - } - } - - uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "missing database (" - << nss.dbName().toStringForErrorMsg() << ")", - db); - OldClientContext ctx(opCtx, autoColl->getNss(), db); - - // We convert updates to upserts in secondary mode when the - // oplogApplicationEnforcesSteadyStateConstraints parameter is false, to avoid - // failing on the constraint that updates in steady state mode always update - // an existing document. - // - // In initial sync and recovery modes we always ignore errors about missing - // documents on update, so there is no reason to convert the updates to upsert. - - bool shouldAlwaysUpsert = !oplogApplicationEnforcesSteadyStateConstraints && - oplogApplicationMode == OplogApplication::Mode::kSecondary; - Status status = applyOperation_inlock(opCtx, - db, - entryOrGroupedInserts, - shouldAlwaysUpsert, - oplogApplicationMode, - isDataConsistent, - incrementOpsAppliedStats); - if (!status.isOK() && status.code() == ErrorCodes::WriteConflict) { - throwWriteConflictException( - str::stream() << "WriteConflict caught when applying operation." - << " Original error: " << status.reason()); - } - return status; + coll.emplace( + acquireCollection(opCtx, + {getNsOrUUID(nss, *op), + AcquisitionPrerequisites::kPretendUnsharded, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite}, + fixLockModeForSystemDotViewsChanges(nss, MODE_IX))); + + AutoGetDb autoDb(opCtx, coll->nss().dbName(), MODE_IX); + db = autoDb.getDb(); } catch (ExceptionFor& ex) { - // This can happen in initial sync or recovery modes (when a delete of the - // namespace appears later in the oplog), but we will ignore it in the caller. - // - // When we're not enforcing steady-state constraints, the error is ignored - // only for deletes, on the grounds that deleting from a non-existent collection - // is a no-op. - if (opType == OpTypeEnum::kDelete && - !oplogApplicationEnforcesSteadyStateConstraints && - oplogApplicationMode == OplogApplication::Mode::kSecondary) { - if (opCounters) { - const auto& opObj = redact(op->toBSONForLogging()); - opCounters->gotDeleteFromMissingNamespace(); - logOplogConstraintViolation( - opCtx, - op->getNss(), - OplogConstraintViolationEnum::kDeleteOnMissingNs, - "delete", - opObj, - boost::none /* status */); - } - return Status::OK(); + if (!isDataConsistent) { + coll.emplace( + acquireCollection(opCtx, + {nss, + AcquisitionPrerequisites::kPretendUnsharded, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite}, + fixLockModeForSystemDotViewsChanges(nss, MODE_IX))); + + AutoGetDb autoDb(opCtx, coll->nss().dbName(), MODE_IX); + db = autoDb.ensureDbExists(opCtx); + } else { + throw ex; } + } - ex.addContext(str::stream() << "Failed to apply operation: " - << redact(entryOrGroupedInserts.toBSON())); - throw; + invariant(coll); + uassert(ErrorCodes::NamespaceNotFound, + str::stream() + << "missing database (" << nss.dbName().toStringForErrorMsg() << ")", + db); + OldClientContext ctx(opCtx, coll->nss(), db); + + // We convert updates to upserts in secondary mode when the + // oplogApplicationEnforcesSteadyStateConstraints parameter is false, to avoid + // failing on the constraint that updates in steady state mode always update + // an existing document. + // + // In initial sync and recovery modes we always ignore errors about missing + // documents on update, so there is no reason to convert the updates to upsert. + + bool shouldAlwaysUpsert = !oplogApplicationEnforcesSteadyStateConstraints && + oplogApplicationMode == OplogApplication::Mode::kSecondary; + Status status = applyOperation_inlock(opCtx, + *coll, + entryOrGroupedInserts, + shouldAlwaysUpsert, + oplogApplicationMode, + isDataConsistent, + incrementOpsAppliedStats); + if (!status.isOK() && status.code() == ErrorCodes::WriteConflict) { + throwWriteConflictException(str::stream() + << "WriteConflict caught when applying operation." + << " Original error: " << status.reason()); } - }); + return status; + } catch (ExceptionFor& ex) { + // This can happen in initial sync or recovery modes (when a delete of the + // namespace appears later in the oplog), but we will ignore it in the caller. + // + // When we're not enforcing steady-state constraints, the error is ignored + // only for deletes, on the grounds that deleting from a non-existent collection + // is a no-op. + if (opType == OpTypeEnum::kDelete && + !oplogApplicationEnforcesSteadyStateConstraints && + oplogApplicationMode == OplogApplication::Mode::kSecondary) { + if (opCounters) { + const auto& opObj = redact(op->toBSONForLogging()); + opCounters->gotDeleteFromMissingNamespace(); + logOplogConstraintViolation( + opCtx, + op->getNss(), + OplogConstraintViolationEnum::kDeleteOnMissingNs, + "delete", + opObj, + boost::none /* status */); + } + return Status::OK(); + } + + ex.addContext(str::stream() << "Failed to apply operation: " + << redact(entryOrGroupedInserts.toBSON())); + throw; + } + }); return status; } else if (opType == OpTypeEnum::kCommand) { auto status = - writeConflictRetry(opCtx, "applyOplogEntryOrGroupedInserts_command", nss.ns(), [&] { + writeConflictRetry(opCtx, "applyOplogEntryOrGroupedInserts_command", nss, [&] { // A special case apply for commands to avoid implicit database creation. Status status = applyCommand_inlock(opCtx, op, oplogApplicationMode); incrementOpsAppliedStats(); @@ -547,7 +597,7 @@ Status OplogApplierUtils::applyOplogBatchCommon( // If we didn't create a group, try to apply the op individually. try { - const Status status = + Status status = applyOplogEntryOrGroupedInserts(opCtx, op, oplogApplicationMode, isDataConsistent); if (!status.isOK()) { @@ -560,7 +610,7 @@ Status OplogApplierUtils::applyOplogBatchCommon( OplogApplication::inRecovering(oplogApplicationMode))) { if (inStableRecovery) { repl::OplogApplication::checkOnOplogFailureForRecovery( - opCtx, redact(op->toBSONForLogging()), redact(status)); + opCtx, op->getNss(), redact(op->toBSONForLogging()), redact(status)); } continue; } @@ -581,7 +631,7 @@ Status OplogApplierUtils::applyOplogBatchCommon( allowNamespaceNotFoundErrorsOnCrudOps) { if (inStableRecovery) { repl::OplogApplication::checkOnOplogFailureForRecovery( - opCtx, redact(op->toBSONForLogging()), redact(e)); + opCtx, op->getNss(), redact(op->toBSONForLogging()), redact(e)); } continue; } diff --git a/src/mongo/db/repl/oplog_applier_utils.h b/src/mongo/db/repl/oplog_applier_utils.h index fd1bab251fef5..ce4f26db8f985 100644 --- a/src/mongo/db/repl/oplog_applier_utils.h +++ b/src/mongo/db/repl/oplog_applier_utils.h @@ -29,11 +29,24 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/repl/insert_group.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_or_grouped_inserts.h" #include "mongo/stdx/unordered_map.h" namespace mongo { class CollatorInterface; + class OpCounters; namespace repl { diff --git a/src/mongo/db/repl/oplog_batcher.cpp b/src/mongo/db/repl/oplog_batcher.cpp index fa6aa3aa798b1..d182f41b42a6c 100644 --- a/src/mongo/db/repl/oplog_batcher.cpp +++ b/src/mongo/db/repl/oplog_batcher.cpp @@ -30,11 +30,40 @@ #include "mongo/db/repl/oplog_batcher.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/commands/txn_cmds_gen.h" +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/apply_ops_gen.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_applier.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -192,6 +221,7 @@ StatusWith> OplogBatcher::getNextApplierBatch( batchStats.totalOps += opCount; batchStats.totalBytes += opBytes; batchStats.prepareOps += entry.shouldPrepare(); + batchStats.commitOrAbortOps += entry.isPreparedCommit() || entry.isPreparedAbort(); ops.push_back(std::move(entry)); _consume(opCtx, _oplogBuffer); @@ -223,7 +253,7 @@ StatusWith> OplogBatcher::getNextApplierBatch( * * 1) When in secondary steady state oplog application mode, a prepareTransaction entry can be * batched with other entries, while a prepared commitTransaction or abortTransaction entry - * is always processed individually in its own batch. + * can only be batched with other prepared commitTransaction or abortTransaction entries. * 2) An applyOps entry from batched writes or unprepared transactions will be expanded to CRUD * operation and thus can be safely batched with other CRUD operations in most cases, unless * it refers to the end of a large transaction (> 16MB) or a transaction that contains DDL @@ -231,24 +261,30 @@ StatusWith> OplogBatcher::getNextApplierBatch( */ OplogBatcher::BatchAction OplogBatcher::_getBatchActionForEntry(const OplogEntry& entry, const BatchStats& batchStats) { + // Used by non-commit and non-abort entries to cut the batch if it already contains any + // commit or abort entries. + auto continueOrStartNewBatch = [&] { + return batchStats.commitOrAbortOps > 0 ? OplogBatcher::BatchAction::kStartNewBatch + : OplogBatcher::BatchAction::kContinueBatch; + }; + if (!entry.isCommand()) { return entry.getNss().mustBeAppliedInOwnOplogBatch() ? OplogBatcher::BatchAction::kProcessIndividually - : OplogBatcher::BatchAction::kContinueBatch; + : continueOrStartNewBatch(); } - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (repl::feature_flags::gApplyPreparedTxnsInParallel.isEnabledAndIgnoreFCVUnsafe() && - _oplogApplier->getOptions().mode == OplogApplication::Mode::kSecondary) { + if (_oplogApplier->getOptions().mode == OplogApplication::Mode::kSecondary) { if (entry.shouldPrepare()) { // Grouping too many prepare ops in a batch may have performance implications, // so we break the batch when it contains enough prepare ops. return batchStats.prepareOps >= kMaxPrepareOpsPerBatch ? OplogBatcher::BatchAction::kStartNewBatch - : OplogBatcher::BatchAction::kContinueBatch; + : continueOrStartNewBatch(); } if (entry.isPreparedCommitOrAbort()) { - return OplogBatcher::BatchAction::kProcessIndividually; + return batchStats.commitOrAbortOps == 0 ? OplogBatcher::BatchAction::kStartNewBatch + : OplogBatcher::BatchAction::kContinueBatch; } } @@ -257,7 +293,7 @@ OplogBatcher::BatchAction OplogBatcher::_getBatchActionForEntry(const OplogEntry entry.isEndOfLargeTransaction(); return processIndividually ? OplogBatcher::BatchAction::kProcessIndividually - : OplogBatcher::BatchAction::kContinueBatch; + : continueOrStartNewBatch(); } /** @@ -289,6 +325,13 @@ void OplogBatcher::_consume(OperationContext* opCtx, OplogBuffer* oplogBuffer) { void OplogBatcher::_run(StorageInterface* storageInterface) { Client::initThread("ReplBatcher"); + { + // The OplogBatcher's thread has its own shutdown sequence triggered by the OplogApplier, + // so we don't want it to be killed in other ways. + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + BatchLimits batchLimits; while (true) { diff --git a/src/mongo/db/repl/oplog_batcher.h b/src/mongo/db/repl/oplog_batcher.h index 35ca7f0d0da7c..69c3c222bf560 100644 --- a/src/mongo/db/repl/oplog_batcher.h +++ b/src/mongo/db/repl/oplog_batcher.h @@ -29,12 +29,27 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/oplog_buffer.h" #include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { @@ -197,6 +212,7 @@ class OplogBatcher { std::size_t totalOps = 0; std::size_t totalBytes = 0; std::size_t prepareOps = 0; + std::size_t commitOrAbortOps = 0; }; /** diff --git a/src/mongo/db/repl/oplog_batcher_test_fixture.cpp b/src/mongo/db/repl/oplog_batcher_test_fixture.cpp index b8203ac2c9320..b014c269fc5d9 100644 --- a/src/mongo/db/repl/oplog_batcher_test_fixture.cpp +++ b/src/mongo/db/repl/oplog_batcher_test_fixture.cpp @@ -27,12 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/oplog_batcher_test_fixture.h" - +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/commands/txn_cmds_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog_batcher_test_fixture.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/str.h" namespace mongo { namespace repl { @@ -233,7 +252,7 @@ OplogEntry makeNoopOplogEntry(int t, const StringData& msg) { BSONObj oField = BSON("msg" << msg << "count" << t); return {DurableOplogEntry(OpTime(Timestamp(t, 1), 1), // optime OpTypeEnum::kNoop, // op type - NamespaceString(""), // namespace + NamespaceString(), // namespace boost::none, // uuid boost::none, // fromMigrate OplogEntry::kOplogVersion, // version @@ -290,7 +309,7 @@ OplogEntry makeApplyOpsOplogEntry(int t, bool prepare, const std::vector count) { auto nss = NamespaceString::createNamespaceString_forTest(dbName).getCommandNS(); @@ -334,7 +353,7 @@ OplogEntry makeCommitTransactionOplogEntry(int t, /** * Generates an abortTransaction oplog entry with the given number used for the timestamp. */ -OplogEntry makeAbortTransactionOplogEntry(int t, StringData dbName) { +OplogEntry makeAbortTransactionOplogEntry(int t, const DatabaseName& dbName) { auto nss = NamespaceString::createNamespaceString_forTest(dbName).getCommandNS(); BSONObj oField; @@ -425,7 +444,7 @@ OplogEntry makeLargeTransactionOplogEntries(int t, * Generates a mock large-transaction which has more than one oplog entry. */ std::vector makeMultiEntryTransactionOplogEntries(int t, - StringData dbName, + const DatabaseName& dbName, bool prepared, int count) { ASSERT_GTE(count, 2); @@ -442,7 +461,10 @@ std::vector makeMultiEntryTransactionOplogEntries(int t, * operations in innerOps. */ std::vector makeMultiEntryTransactionOplogEntries( - int t, StringData dbName, bool prepared, std::vector> innerOps) { + int t, + const DatabaseName& dbName, + bool prepared, + std::vector> innerOps) { std::size_t count = innerOps.size() + (prepared ? 1 : 0); ASSERT_GTE(count, 2); std::vector vec; diff --git a/src/mongo/db/repl/oplog_batcher_test_fixture.h b/src/mongo/db/repl/oplog_batcher_test_fixture.h index d56f15ffbf62a..812bb71deb87d 100644 --- a/src/mongo/db/repl/oplog_batcher_test_fixture.h +++ b/src/mongo/db/repl/oplog_batcher_test_fixture.h @@ -29,13 +29,30 @@ #pragma once +#include #include +#include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/oplog_buffer.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -97,19 +114,22 @@ OplogEntry makeApplyOpsOplogEntry(int t, const std::vector& innerOps = {}); OplogEntry makeCommitTransactionOplogEntry(int t, - StringData dbName, + const DatabaseName& dbName, bool prepared, boost::optional count = boost::none); -OplogEntry makeAbortTransactionOplogEntry(int t, StringData dbName); +OplogEntry makeAbortTransactionOplogEntry(int t, const DatabaseName& dbName); std::vector makeMultiEntryTransactionOplogEntries(int t, - StringData dbName, + const DatabaseName& dbName, bool prepared, int count); std::vector makeMultiEntryTransactionOplogEntries( - int t, StringData dbName, bool prepared, std::vector> innerOps); + int t, + const DatabaseName& dbName, + bool prepared, + std::vector> innerOps); std::string toString(const std::vector& ops); } // namespace repl } // namespace mongo diff --git a/src/mongo/db/repl/oplog_buffer_blocking_queue.cpp b/src/mongo/db/repl/oplog_buffer_blocking_queue.cpp index d81cc6adc9c16..328a920f5fa66 100644 --- a/src/mongo/db/repl/oplog_buffer_blocking_queue.cpp +++ b/src/mongo/db/repl/oplog_buffer_blocking_queue.cpp @@ -27,9 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include #include "mongo/db/repl/oplog_buffer_blocking_queue.h" +#include "mongo/util/assert_util_core.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/oplog_buffer_blocking_queue.h b/src/mongo/db/repl/oplog_buffer_blocking_queue.h index a881a7e0e4133..4245c7cb2c268 100644 --- a/src/mongo/db/repl/oplog_buffer_blocking_queue.h +++ b/src/mongo/db/repl/oplog_buffer_blocking_queue.h @@ -29,8 +29,18 @@ #pragma once +#include + +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/repl/oplog_buffer.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/duration.h" +#include "mongo/util/interruptible.h" #include "mongo/util/queue.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/oplog_buffer_collection.cpp b/src/mongo/db/repl/oplog_buffer_collection.cpp index 99dd142975177..855f5e2c7b03f 100644 --- a/src/mongo/db/repl/oplog_buffer_collection.cpp +++ b/src/mongo/db/repl/oplog_buffer_collection.cpp @@ -27,24 +27,41 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/oplog_buffer_collection.h" - #include +#include +#include +#include #include +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/document_validation.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/single_write_result_gen.h" #include "mongo/db/ops/write_ops_exec.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/repl/oplog_buffer_collection.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -257,6 +274,9 @@ std::size_t OplogBufferCollection::getCount() const { void OplogBufferCollection::clear(OperationContext* opCtx) { stdx::lock_guard lk(_mutex); + // We acquire the appropriate locks for the temporary oplog buffer collection here, + // so that we perform the drop and create under the same locks. + AutoGetCollection autoColl(opCtx, NamespaceString(kDefaultOplogCollectionNamespace), MODE_X); _dropCollection(opCtx); _createCollection(opCtx); _size = 0; @@ -460,8 +480,6 @@ void OplogBufferCollection::_createCollection(OperationContext* opCtx) { // overhead and improve _id query efficiency. options.clusteredIndex = clustered_util::makeDefaultClusteredIdIndex(); - // TODO (SERVER-71443): Fix to be interruptible or document exception. - UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. auto status = _storageInterface->createCollection(opCtx, _nss, options); if (status.code() == ErrorCodes::NamespaceExists) return; @@ -469,8 +487,6 @@ void OplogBufferCollection::_createCollection(OperationContext* opCtx) { } void OplogBufferCollection::_dropCollection(OperationContext* opCtx) { - // TODO (SERVER-71443): Fix to be interruptible or document exception. - UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. uassertStatusOK(_storageInterface->dropCollection(opCtx, _nss)); } diff --git a/src/mongo/db/repl/oplog_buffer_collection.h b/src/mongo/db/repl/oplog_buffer_collection.h index f4ff564432efc..8152a14795097 100644 --- a/src/mongo/db/repl/oplog_buffer_collection.h +++ b/src/mongo/db/repl/oplog_buffer_collection.h @@ -29,13 +29,24 @@ #pragma once +#include +#include #include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/oplog_buffer.h" #include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/interruptible.h" #include "mongo/util/queue.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/oplog_buffer_collection_test.cpp b/src/mongo/db/repl/oplog_buffer_collection_test.cpp index 6d6c0b8d1a324..416f2ba75554b 100644 --- a/src/mongo/db/repl/oplog_buffer_collection_test.cpp +++ b/src/mongo/db/repl/oplog_buffer_collection_test.cpp @@ -27,26 +27,43 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include #include - -#include "mongo/db/catalog/database.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/client.h" #include "mongo/db/db_raii.h" -#include "mongo/db/dbhelpers.h" -#include "mongo/db/json.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/oplog_applier_impl_test_fixture.h" #include "mongo/db/repl/oplog_buffer_collection.h" -#include "mongo/db/repl/oplog_interface_local.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/stdx/thread.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace { diff --git a/src/mongo/db/repl/oplog_buffer_proxy.cpp b/src/mongo/db/repl/oplog_buffer_proxy.cpp index 7e03501322635..1e9e5e4dcf469 100644 --- a/src/mongo/db/repl/oplog_buffer_proxy.cpp +++ b/src/mongo/db/repl/oplog_buffer_proxy.cpp @@ -27,7 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include #include "mongo/db/repl/oplog_buffer_proxy.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/db/repl/oplog_buffer_proxy.h b/src/mongo/db/repl/oplog_buffer_proxy.h index f827530c4f3d9..b026d8b43afce 100644 --- a/src/mongo/db/repl/oplog_buffer_proxy.h +++ b/src/mongo/db/repl/oplog_buffer_proxy.h @@ -30,10 +30,15 @@ #pragma once #include +#include +#include #include #include "mongo/db/repl/oplog_buffer.h" #include "mongo/platform/mutex.h" +#include "mongo/util/duration.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/oplog_buffer_proxy_test.cpp b/src/mongo/db/repl/oplog_buffer_proxy_test.cpp index 15e542f4dfc89..4c9ac41b1f8c0 100644 --- a/src/mongo/db/repl/oplog_buffer_proxy_test.cpp +++ b/src/mongo/db/repl/oplog_buffer_proxy_test.cpp @@ -27,16 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include #include #include +#include +#include +#include + +#include -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/repl/oplog_buffer_proxy.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/repl/oplog_constraint_violation_logger.cpp b/src/mongo/db/repl/oplog_constraint_violation_logger.cpp index dd06f1c3dee55..20ceca07d43d5 100644 --- a/src/mongo/db/repl/oplog_constraint_violation_logger.cpp +++ b/src/mongo/db/repl/oplog_constraint_violation_logger.cpp @@ -29,6 +29,15 @@ #include "mongo/db/repl/oplog_constraint_violation_logger.h" +#include + + +// IWYU pragma: no_include "ext/alloc_traits.h" + +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" + #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication namespace mongo { diff --git a/src/mongo/db/repl/oplog_constraint_violation_logger.h b/src/mongo/db/repl/oplog_constraint_violation_logger.h index 326b98a5d4be5..2ca2e17fd9a69 100644 --- a/src/mongo/db/repl/oplog_constraint_violation_logger.h +++ b/src/mongo/db/repl/oplog_constraint_violation_logger.h @@ -30,11 +30,15 @@ #pragma once #include +#include +#include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/logv2/log.h" #include "mongo/platform/mutex.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/repl/oplog_entry.cpp b/src/mongo/db/repl/oplog_entry.cpp index 7be06919caf23..90f72e5dcd4c9 100644 --- a/src/mongo/db/repl/oplog_entry.cpp +++ b/src/mongo/db/repl/oplog_entry.cpp @@ -30,16 +30,31 @@ #include "mongo/db/repl/oplog_entry.h" +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/global_index.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/multitenancy_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/server_options.h" #include "mongo/logv2/redaction.h" #include "mongo/s/catalog/type_index_catalog.h" #include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -81,7 +96,6 @@ BSONObj makeOplogEntryDoc(OpTime opTime, builder.append(OplogEntryBase::kVersionFieldName, version); builder.append(OplogEntryBase::kOpTypeFieldName, OpType_serializer(opType)); if (nss.tenantId() && gMultitenancySupport && - serverGlobalParams.featureCompatibility.isVersionInitialized() && gFeatureFlagRequireTenantID.isEnabled(serverGlobalParams.featureCompatibility)) { nss.tenantId()->serializeToBSON(OplogEntryBase::kTidFieldName, &builder); } @@ -191,7 +205,7 @@ void ReplOperation::extractPrePostImageForTransaction(boost::optional value) & { - if (gMultitenancySupport && serverGlobalParams.featureCompatibility.isVersionInitialized() && + if (gMultitenancySupport && gFeatureFlagRequireTenantID.isEnabled(serverGlobalParams.featureCompatibility)) DurableReplOperation::setTid(value); } @@ -356,7 +370,7 @@ ReplOperation MutableOplogEntry::toReplOperation() const noexcept { } void MutableOplogEntry::setTid(boost::optional value) & { - if (gMultitenancySupport && serverGlobalParams.featureCompatibility.isVersionInitialized() && + if (gMultitenancySupport && gFeatureFlagRequireTenantID.isEnabled(serverGlobalParams.featureCompatibility)) getDurableReplOperation().setTid(std::move(value)); } @@ -749,6 +763,14 @@ const boost::optional& OplogEntry::getFromTenantMigration() const& return _entry.getFromTenantMigration(); } +const boost::optional& OplogEntry::getDonorOpTime() const& { + return _entry.getDonorOpTime(); +} + +boost::optional OplogEntry::getDonorApplyOpsIndex() const& { + return _entry.getDonorApplyOpsIndex(); +} + const boost::optional& OplogEntry::getPrevWriteOpTimeInTransaction() const& { return _entry.getPrevWriteOpTimeInTransaction(); } diff --git a/src/mongo/db/repl/oplog_entry.h b/src/mongo/db/repl/oplog_entry.h index ca9aceca0daee..482bd85af2beb 100644 --- a/src/mongo/db/repl/oplog_entry.h +++ b/src/mongo/db/repl/oplog_entry.h @@ -29,14 +29,41 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection_options.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/apply_ops_gen.h" #include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/optime_base_gen.h" #include "mongo/db/session/logical_session_id.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -429,6 +456,8 @@ class DurableOplogEntry : private MutableOplogEntry { // Make field names accessible. using MutableOplogEntry::k_idFieldName; using MutableOplogEntry::kDestinedRecipientFieldName; + using MutableOplogEntry::kDonorApplyOpsIndexFieldName; + using MutableOplogEntry::kDonorOpTimeFieldName; using MutableOplogEntry::kDurableReplOperationFieldName; using MutableOplogEntry::kFromMigrateFieldName; using MutableOplogEntry::kFromTenantMigrationFieldName; @@ -456,6 +485,8 @@ class DurableOplogEntry : private MutableOplogEntry { // Make serialize() and getters accessible. using MutableOplogEntry::get_id; using MutableOplogEntry::getDestinedRecipient; + using MutableOplogEntry::getDonorApplyOpsIndex; + using MutableOplogEntry::getDonorOpTime; using MutableOplogEntry::getDurableReplOperation; using MutableOplogEntry::getFromMigrate; using MutableOplogEntry::getFromTenantMigration; @@ -720,6 +751,9 @@ class OplogEntry { static constexpr auto kFromMigrateFieldName = DurableOplogEntry::kFromMigrateFieldName; static constexpr auto kFromTenantMigrationFieldName = DurableOplogEntry::kFromTenantMigrationFieldName; + static constexpr auto kDonorOpTimeFieldName = DurableOplogEntry::kDonorOpTimeFieldName; + static constexpr auto kDonorApplyOpsIndexFieldName = + DurableOplogEntry::kDonorApplyOpsIndexFieldName; static constexpr auto kHashFieldName = DurableOplogEntry::kHashFieldName; static constexpr auto kTidFieldName = DurableOplogEntry::kTidFieldName; static constexpr auto kNssFieldName = DurableOplogEntry::kNssFieldName; @@ -791,6 +825,8 @@ class OplogEntry { std::int64_t getVersion() const; boost::optional getFromMigrate() const&; const boost::optional& getFromTenantMigration() const&; + const boost::optional& getDonorOpTime() const&; + boost::optional getDonorApplyOpsIndex() const&; const boost::optional& getPrevWriteOpTimeInTransaction() const&; const boost::optional& getPostImageOpTime() const&; boost::optional getNeedsRetryImage() const; diff --git a/src/mongo/db/repl/oplog_entry.idl b/src/mongo/db/repl/oplog_entry.idl index 742a612dd22f4..1b22366f6042d 100644 --- a/src/mongo/db/repl/oplog_entry.idl +++ b/src/mongo/db/repl/oplog_entry.idl @@ -132,7 +132,11 @@ structs: fromMigrate: type: bool optional: true - description: "An operation caused by a chunk migration" + description: "Marks the operation as an internal event that should be hidden to the + end user. The field (originally introduced to flag specific meta/data + deletions performed during a chunk migration) maintains its original + name for retro compatibility, but it is now used in a wider range of + contexts." OplogEntryBase: description: A document in which the server stores an oplog entry. @@ -161,6 +165,19 @@ structs: optional: true description: "Contains the UUID of a tenant migration for an operation caused by one." + donorOpTime: + type: optime + optional: true + description: "Used for Serverless shard merge during the oplog catchup phase. The + original opTime of the write on the donor timeline. Will only be set + for writes written during the oplog catchup phase." + donorApplyOpsIndex: + type: safeInt64 + optional: true + description: "Used for Serverless shard merge during the oplog catchup phase. If the + write was part of a transaction or applyOps entry, this will be the + index of the original applyOps entry. Will be set to 0 for any + non-applyOps writes." _id: cpp_name: _id type: Value diff --git a/src/mongo/db/repl/oplog_entry_or_grouped_inserts.cpp b/src/mongo/db/repl/oplog_entry_or_grouped_inserts.cpp index d55cca71f54f5..f8bbad7a96c2a 100644 --- a/src/mongo/db/repl/oplog_entry_or_grouped_inserts.cpp +++ b/src/mongo/db/repl/oplog_entry_or_grouped_inserts.cpp @@ -28,7 +28,11 @@ */ #include "mongo/db/repl/oplog_entry_or_grouped_inserts.h" + +#include + #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/repl/optime.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/oplog_entry_or_grouped_inserts.h b/src/mongo/db/repl/oplog_entry_or_grouped_inserts.h index 2c70c46eb2c3f..44bd591f718e4 100644 --- a/src/mongo/db/repl/oplog_entry_or_grouped_inserts.h +++ b/src/mongo/db/repl/oplog_entry_or_grouped_inserts.h @@ -29,9 +29,23 @@ #pragma once +#include +#include +#include +#include +#include + +#include +#include +#include +#include + #include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/session/internal_session_pool.h" +#include "mongo/util/assert_util_core.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/oplog_entry_test.cpp b/src/mongo/db/repl/oplog_entry_test.cpp index 11e566e6ef980..54f8924f40d0e 100644 --- a/src/mongo/db/repl/oplog_entry_test.cpp +++ b/src/mongo/db/repl/oplog_entry_test.cpp @@ -27,10 +27,42 @@ * it in the license file. */ -#include "mongo/db/repl/idempotency_test_fixture.h" +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/oplog_entry_test_helpers.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/optime_base_gen.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/assert_that.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/matcher.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -110,8 +142,8 @@ TEST(OplogEntryTest, Create) { TEST(OplogEntryTest, OpTimeBaseNonStrictParsing) { const BSONObj oplogEntryExtraField = BSON("ts" << Timestamp(0, 0) << "t" << 0LL << "op" << "c" - << "ns" << nss.ns() << "wall" << Date_t() << "o" - << BSON("_id" << 1) << "extraField" << 3); + << "ns" << nss.ns_forTest() << "wall" << Date_t() + << "o" << BSON("_id" << 1) << "extraField" << 3); // OpTimeBase should be successfully created from an OplogEntry, even though it has // extraneous fields. @@ -128,7 +160,7 @@ TEST(OplogEntryTest, OpTimeBaseNonStrictParsing) { const BSONObj oplogEntryMissingTimestamp = BSON("t" << 0LL << "op" << "c" - << "ns" << nss.ns() << "wall" << Date_t() << "o" << BSON("_id" << 1)); + << "ns" << nss.ns_forTest() << "wall" << Date_t() << "o" << BSON("_id" << 1)); // When an OplogEntryBase is created with a missing required field in a chained struct, it // should throw an exception. @@ -161,7 +193,7 @@ TEST(OplogEntryTest, ParseMutableOplogEntryIncludesTidField) { const TenantId tid(OID::gen()); const NamespaceString nssWithTid = - NamespaceString::createNamespaceString_forTest(tid, nss.ns()); + NamespaceString::createNamespaceString_forTest(tid, nss.ns_forTest()); const BSONObj oplogBson = [&] { BSONObjBuilder bob; @@ -169,7 +201,7 @@ TEST(OplogEntryTest, ParseMutableOplogEntryIncludesTidField) { bob.append("t", 0LL); bob.append("op", "c"); tid.serializeToBSON("tid", &bob); - bob.append("ns", nssWithTid.ns()); + bob.append("ns", nssWithTid.ns_forTest()); bob.append("wall", Date_t()); BSONObjBuilder{bob.subobjStart("o")}.append("_id", 1); return bob.obj(); @@ -187,7 +219,7 @@ TEST(OplogEntryTest, ParseDurableOplogEntryIncludesTidField) { const TenantId tid(OID::gen()); const NamespaceString nssWithTid = - NamespaceString::createNamespaceString_forTest(tid, nss.ns()); + NamespaceString::createNamespaceString_forTest(tid, nss.ns_forTest()); const BSONObj oplogBson = [&] { BSONObjBuilder bob; @@ -195,7 +227,7 @@ TEST(OplogEntryTest, ParseDurableOplogEntryIncludesTidField) { bob.append("t", 0LL); bob.append("op", "i"); tid.serializeToBSON("tid", &bob); - bob.append("ns", nssWithTid.ns()); + bob.append("ns", nssWithTid.ns_forTest()); bob.append("wall", Date_t()); BSONObjBuilder{bob.subobjStart("o")}.append("_id", 1).append("data", "x"); BSONObjBuilder{bob.subobjStart("o2")}.append("_id", 1); @@ -214,7 +246,8 @@ TEST(OplogEntryTest, ParseReplOperationIncludesTidField) { UUID uuid(UUID::gen()); TenantId tid(OID::gen()); - NamespaceString nssWithTid = NamespaceString::createNamespaceString_forTest(tid, nss.ns()); + NamespaceString nssWithTid = + NamespaceString::createNamespaceString_forTest(tid, nss.ns_forTest()); auto op = repl::DurableOplogEntry::makeInsertOperation( nssWithTid, @@ -234,7 +267,7 @@ TEST(OplogEntryTest, ConvertMutableOplogEntryToReplOperation) { RAIIServerParameterControllerForTest featureFlagController("featureFlagRequireTenantID", true); RAIIServerParameterControllerForTest multitenancySupportController("multitenancySupport", true); auto tid = TenantId(OID::gen()); - auto nssWithTid = NamespaceString::createNamespaceString_forTest(tid, nss.ns()); + auto nssWithTid = NamespaceString::createNamespaceString_forTest(tid, nss.ns_forTest()); auto opType = repl::OpTypeEnum::kCommand; auto uuid = UUID::gen(); std::vector stmtIds{StmtId(0), StmtId(1), StmtId(2)}; diff --git a/src/mongo/db/repl/oplog_entry_test_helpers.cpp b/src/mongo/db/repl/oplog_entry_test_helpers.cpp index 10f32d2dd6cbc..c29f87560b609 100644 --- a/src/mongo/db/repl/oplog_entry_test_helpers.cpp +++ b/src/mongo/db/repl/oplog_entry_test_helpers.cpp @@ -27,9 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/repl/oplog_entry_test_helpers.h" +#include "mongo/db/shard_id.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/oplog_entry_test_helpers.h b/src/mongo/db/repl/oplog_entry_test_helpers.h index 578f1fdd7017d..358786f81ee6b 100644 --- a/src/mongo/db/repl/oplog_entry_test_helpers.h +++ b/src/mongo/db/repl/oplog_entry_test_helpers.h @@ -29,11 +29,20 @@ #pragma once +#include +#include + +#include +#include + #include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/optime.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" namespace mongo { @@ -55,9 +64,10 @@ OplogEntry makeOplogEntry(repl::OpTime opTime, /** * Creates a create collection oplog entry with given optime. */ -OplogEntry makeCreateCollectionOplogEntry(OpTime opTime, - const NamespaceString& nss = NamespaceString("test.t"), - const BSONObj& options = BSONObj()); +OplogEntry makeCreateCollectionOplogEntry( + OpTime opTime, + const NamespaceString& nss = NamespaceString::createNamespaceString_forTest("test.t"), + const BSONObj& options = BSONObj()); /** * Creates an insert oplog entry with given optime and namespace. */ diff --git a/src/mongo/db/repl/oplog_fetcher.cpp b/src/mongo/db/repl/oplog_fetcher.cpp index d4687970b8df3..c6e76fa0b7697 100644 --- a/src/mongo/db/repl/oplog_fetcher.cpp +++ b/src/mongo/db/repl/oplog_fetcher.cpp @@ -30,22 +30,63 @@ #include "mongo/db/repl/oplog_fetcher.h" -#include "mongo/bson/mutable/document.h" +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include + +#include "mongo/base/counter.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/client.h" #include "mongo/db/commands/server_status_metric.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/matcher/matcher.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/document_source_find_and_modify_image_lookup.h" #include "mongo/db/pipeline/document_source_match.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/optime_with.h" #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/replication_auth.h" -#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/replication_process.h" +#include "mongo/db/repl/sync_source_selector.h" +#include "mongo/db/service_context.h" #include "mongo/db/stats/timer_stats.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/metadata/oplog_query_metadata.h" +#include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/s/resharding/resume_token_gen.h" #include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/net/ssl_options.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #include "mongo/util/time_support.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -240,7 +281,7 @@ std::string OplogFetcher::toString() { output << "OplogFetcher -"; output << " last optime fetched: " << _lastFetched.toString(); output << " source: " << _config.source.toString(); - output << " namespace: " << _nss.toString(); + output << " namespace: " << toStringForLogging(_nss); output << " active: " << _isActive_inlock(); output << " shutting down?:" << _isShuttingDown_inlock(); output << " first batch: " << _firstBatch; diff --git a/src/mongo/db/repl/oplog_fetcher.h b/src/mongo/db/repl/oplog_fetcher.h index b992f758aa289..659960c7af581 100644 --- a/src/mongo/db/repl/oplog_fetcher.h +++ b/src/mongo/db/repl/oplog_fetcher.h @@ -31,19 +31,33 @@ #include #include +#include +#include +#include +#include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/timestamp.h" #include "mongo/client/dbclient_connection.h" #include "mongo/client/dbclient_cursor.h" #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/abstract_async_component.h" #include "mongo/db/repl/data_replicator_external_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/vector_clock_metadata_hook.h" +#include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/oplog_fetcher_mock.cpp b/src/mongo/db/repl/oplog_fetcher_mock.cpp index df951ab76fd9d..21d1e58e0f31c 100644 --- a/src/mongo/db/repl/oplog_fetcher_mock.cpp +++ b/src/mongo/db/repl/oplog_fetcher_mock.cpp @@ -30,8 +30,20 @@ #include "mongo/db/repl/oplog_fetcher_mock.h" +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/db/client.h" +#include "mongo/db/repl/replication_process.h" +#include "mongo/util/assert_util.h" + #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/oplog_fetcher_mock.h b/src/mongo/db/repl/oplog_fetcher_mock.h index e8d428cd463f6..bcb9886e9d5b3 100644 --- a/src/mongo/db/repl/oplog_fetcher_mock.h +++ b/src/mongo/db/repl/oplog_fetcher_mock.h @@ -29,8 +29,24 @@ #pragma once +#include + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/cursor_id.h" +#include "mongo/db/repl/data_replicator_external_state.h" #include "mongo/db/repl/oplog_fetcher.h" +#include "mongo/db/repl/optime.h" +#include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/oplog_fetcher_test.cpp b/src/mongo/db/repl/oplog_fetcher_test.cpp index 66779d16be20f..df1e7afe6bd0a 100644 --- a/src/mongo/db/repl/oplog_fetcher_test.cpp +++ b/src/mongo/db/repl/oplog_fetcher_test.cpp @@ -27,25 +27,63 @@ * it in the license file. */ +#include +#include #include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/crypto/hash_block.h" +#include "mongo/db/client.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/cursor_response.h" #include "mongo/db/repl/data_replicator_external_state_mock.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/oplog_fetcher.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/replication_process.h" +#include "mongo/db/repl/sync_source_selector.h" #include "mongo/db/repl/task_executor_mock.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" #include "mongo/db/signed_logical_time.h" +#include "mongo/db/time_proof_service.h" #include "mongo/db/vector_clock.h" #include "mongo/dbtests/mock/mock_dbclient_connection.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" +#include "mongo/executor/task_executor_test_fixture.h" +#include "mongo/executor/thread_pool_mock.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/metadata.h" #include "mongo/rpc/metadata/oplog_query_metadata.h" #include "mongo/rpc/metadata/repl_set_metadata.h" -#include "mongo/unittest/death_test.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/task_executor_proxy.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace { diff --git a/src/mongo/db/repl/oplog_interface_local.cpp b/src/mongo/db/repl/oplog_interface_local.cpp index 564a5600c8082..6b39c29a1871b 100644 --- a/src/mongo/db/repl/oplog_interface_local.cpp +++ b/src/mongo/db/repl/oplog_interface_local.cpp @@ -27,14 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/repl/oplog_interface_local.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/db_raii.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/internal_plans.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog_interface_local.h" +#include "mongo/db/server_options.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/net/socket_utils.h" #include "mongo/util/str.h" @@ -90,8 +102,8 @@ OplogInterfaceLocal::OplogInterfaceLocal(OperationContext* opCtx) : _opCtx(opCtx std::string OplogInterfaceLocal::toString() const { return str::stream() << "LocalOplogInterface: " "operation context: " - << _opCtx->getOpID() - << "; collection: " << NamespaceString::kRsOplogNamespace; + << _opCtx->getOpID() << "; collection: " + << NamespaceString::kRsOplogNamespace.toStringForErrorMsg(); } std::unique_ptr OplogInterfaceLocal::makeIterator() const { diff --git a/src/mongo/db/repl/oplog_interface_local.h b/src/mongo/db/repl/oplog_interface_local.h index 9b0323a534cf2..b15f196153fb7 100644 --- a/src/mongo/db/repl/oplog_interface_local.h +++ b/src/mongo/db/repl/oplog_interface_local.h @@ -29,7 +29,13 @@ #pragma once +#include +#include + #include "mongo/db/repl/oplog_interface.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/transaction/transaction_history_iterator.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/db/repl/oplog_interface_mock.cpp b/src/mongo/db/repl/oplog_interface_mock.cpp index 767e58b8333d8..293fee798fb37 100644 --- a/src/mongo/db/repl/oplog_interface_mock.cpp +++ b/src/mongo/db/repl/oplog_interface_mock.cpp @@ -27,10 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/oplog_interface_mock.h" #include "mongo/db/transaction/transaction_history_iterator.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/oplog_interface_mock.h b/src/mongo/db/repl/oplog_interface_mock.h index e853fb2fa1878..e6a811af27c6b 100644 --- a/src/mongo/db/repl/oplog_interface_mock.h +++ b/src/mongo/db/repl/oplog_interface_mock.h @@ -29,8 +29,18 @@ #pragma once -#include "mongo/db/repl/oplog_interface.h" #include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog_interface.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/transaction/transaction_history_iterator.h" +#include "mongo/util/net/hostandport.h" namespace mongo { class TransactionHistoryIteratorBase; diff --git a/src/mongo/db/repl/oplog_interface_remote.cpp b/src/mongo/db/repl/oplog_interface_remote.cpp index 1c5457ab9afe4..118d5340ee0e1 100644 --- a/src/mongo/db/repl/oplog_interface_remote.cpp +++ b/src/mongo/db/repl/oplog_interface_remote.cpp @@ -27,15 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/oplog_interface_remote.h" - +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/dbclient_base.h" #include "mongo/client/dbclient_cursor.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog_interface_remote.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace repl { @@ -69,19 +80,15 @@ StatusWith OplogIteratorRemote::next() { OplogInterfaceRemote::OplogInterfaceRemote(HostAndPort hostAndPort, GetConnectionFn getConnection, - const std::string& collectionName, int batchSize) - : _hostAndPort(hostAndPort), - _getConnection(getConnection), - _collectionName(collectionName), - _batchSize(batchSize) {} + : _hostAndPort(hostAndPort), _getConnection(getConnection), _batchSize(batchSize) {} std::string OplogInterfaceRemote::toString() const { return _getConnection()->toString(); } std::unique_ptr OplogInterfaceRemote::makeIterator() const { - FindCommandRequest findRequest{NamespaceString{_collectionName}}; + FindCommandRequest findRequest{NamespaceString::kRsOplogNamespace}; findRequest.setProjection(BSON("ts" << 1 << "t" << 1LL)); findRequest.setSort(BSON("$natural" << -1)); findRequest.setBatchSize(_batchSize); diff --git a/src/mongo/db/repl/oplog_interface_remote.h b/src/mongo/db/repl/oplog_interface_remote.h index 8200056f34f4f..b6a001900126d 100644 --- a/src/mongo/db/repl/oplog_interface_remote.h +++ b/src/mongo/db/repl/oplog_interface_remote.h @@ -30,8 +30,13 @@ #pragma once #include +#include +#include #include "mongo/db/repl/oplog_interface.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/transaction/transaction_history_iterator.h" +#include "mongo/util/net/hostandport.h" namespace mongo { @@ -50,10 +55,7 @@ class OplogInterfaceRemote : public OplogInterface { */ using GetConnectionFn = std::function; - OplogInterfaceRemote(HostAndPort hostAndPort, - GetConnectionFn getConnection, - const std::string& collectionName, - int batchSize); + OplogInterfaceRemote(HostAndPort hostAndPort, GetConnectionFn getConnection, int batchSize); std::string toString() const override; std::unique_ptr makeIterator() const override; std::unique_ptr makeTransactionHistoryIterator( @@ -63,7 +65,6 @@ class OplogInterfaceRemote : public OplogInterface { private: HostAndPort _hostAndPort; GetConnectionFn _getConnection; - std::string _collectionName; int _batchSize; }; diff --git a/src/mongo/db/repl/oplog_test.cpp b/src/mongo/db/repl/oplog_test.cpp index e2dc0ddb9c35f..82af8a9ad7929 100644 --- a/src/mongo/db/repl/oplog_test.cpp +++ b/src/mongo/db/repl/oplog_test.cpp @@ -28,24 +28,44 @@ */ #include -#include +#include +#include +#include #include +#include +#include +#include +#include #include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" -#include "mongo/db/concurrency/lock_manager_test_help.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/oplog_interface.h" #include "mongo/db/repl/oplog_interface_local.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/tenant_migration_decoration.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/platform/mutex.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/decorable.h" namespace mongo { namespace repl { @@ -80,7 +100,8 @@ OplogEntry _getSingleOplogEntry(OperationContext* opCtx) { auto oplogIter = oplogInterface.makeIterator(); auto opEntry = unittest::assertGet(oplogIter->next()); ASSERT_EQUALS(ErrorCodes::CollectionIsEmpty, oplogIter->next().getStatus()) - << "Expected only 1 document in the oplog collection " << NamespaceString::kRsOplogNamespace + << "Expected only 1 document in the oplog collection " + << NamespaceString::kRsOplogNamespace.toStringForErrorMsg() << " but found more than 1 document instead"; return unittest::assertGet(OplogEntry::parse(opEntry.first)); } @@ -168,11 +189,13 @@ void _testConcurrentLogOp(const F& makeTaskFunction, const NamespaceString nss1 = NamespaceString::createNamespaceString_forTest("test1.coll"); const NamespaceString nss2 = NamespaceString::createNamespaceString_forTest("test2.coll"); pool.schedule([&](auto status) mutable { - ASSERT_OK(status) << "Failed to schedule logOp() task for namespace " << nss1; + ASSERT_OK(status) << "Failed to schedule logOp() task for namespace " + << nss1.toStringForErrorMsg(); makeTaskFunction(nss1, &mtx, opTimeNssMap, &barrier)(); }); pool.schedule([&](auto status) mutable { - ASSERT_OK(status) << "Failed to schedule logOp() task for namespace " << nss2; + ASSERT_OK(status) << "Failed to schedule logOp() task for namespace " + << nss2.toStringForErrorMsg(); makeTaskFunction(nss2, &mtx, opTimeNssMap, &barrier)(); }); barrier.countDownAndWait(); @@ -214,15 +237,15 @@ OpTime _logOpNoopWithMsg(OperationContext* opCtx, MutableOplogEntry oplogEntry; oplogEntry.setOpType(repl::OpTypeEnum::kNoop); oplogEntry.setNss(nss); - oplogEntry.setObject(BSON("msg" << nss.ns())); + oplogEntry.setObject(BSON("msg" << nss.ns_forTest())); oplogEntry.setWallClockTime(Date_t::now()); auto opTime = logOp(opCtx, &oplogEntry); ASSERT_FALSE(opTime.isNull()); stdx::lock_guard lock(*mtx); ASSERT(opTimeNssMap->find(opTime) == opTimeNssMap->end()) - << "Unable to add namespace " << nss << " to map - map contains duplicate entry for optime " - << opTime; + << "Unable to add namespace " << nss.toStringForErrorMsg() + << " to map - map contains duplicate entry for optime " << opTime; opTimeNssMap->insert(std::make_pair(opTime, nss)); return opTime; diff --git a/src/mongo/db/repl/optime.cpp b/src/mongo/db/repl/optime.cpp index fedc73796e05a..32189a3d6ac36 100644 --- a/src/mongo/db/repl/optime.cpp +++ b/src/mongo/db/repl/optime.cpp @@ -27,17 +27,26 @@ * it in the license file. */ +#include #include -#include #include #include -#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/optime_base_gen.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/optime.h b/src/mongo/db/repl/optime.h index e51d259ac7cf2..f3e789195a827 100644 --- a/src/mongo/db/repl/optime.h +++ b/src/mongo/db/repl/optime.h @@ -29,10 +29,18 @@ #pragma once +#include +#include +#include +#include #include +#include +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/timestamp.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/repl/optime_extract_test.cpp b/src/mongo/db/repl/optime_extract_test.cpp index fe47e9ac7a12b..adef533ce04ba 100644 --- a/src/mongo/db/repl/optime_extract_test.cpp +++ b/src/mongo/db/repl/optime_extract_test.cpp @@ -27,14 +27,19 @@ * it in the license file. */ -#include -#include #include -#include "mongo/db/jsobj.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/repl/bson_extract_optime.h" #include "mongo/db/repl/optime.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" using namespace mongo; diff --git a/src/mongo/db/repl/primary_only_service.cpp b/src/mongo/db/repl/primary_only_service.cpp index 44c2d446cbf56..21d33c1e91aa1 100644 --- a/src/mongo/db/repl/primary_only_service.cpp +++ b/src/mongo/db/repl/primary_only_service.cpp @@ -28,18 +28,31 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/primary_only_service.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/ops/write_ops.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/primary_only_service.h" #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/replica_set_aware_service.h" #include "mongo/db/repl/replication_coordinator.h" @@ -47,13 +60,21 @@ #include "mongo/db/service_context.h" #include "mongo/db/vector_clock_metadata_hook.h" #include "mongo/executor/network_connection_hook.h" -#include "mongo/executor/network_interface.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" -#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/rpc/metadata/metadata_hook.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -148,11 +169,11 @@ void PrimaryOnlyServiceRegistry::registerService(std::unique_ptrsecond; invariant(inserted2, str::stream() << "Attempted to register PrimaryOnlyService (" << name - << ") with state document namespace \"" << ns + << ") with state document namespace \"" << ns.toStringForErrorMsg() << "\" that is already in use by service " << existingService->getServiceName()); LOGV2_INFO(5123008, @@ -173,7 +194,7 @@ PrimaryOnlyService* PrimaryOnlyServiceRegistry::lookupServiceByName(StringData s PrimaryOnlyService* PrimaryOnlyServiceRegistry::lookupServiceByNamespace( const NamespaceString& ns) { - auto it = _servicesByNamespace.find(ns.toString()); + auto it = _servicesByNamespace.find(ns); if (it == _servicesByNamespace.end()) { return nullptr; } @@ -335,9 +356,6 @@ void PrimaryOnlyService::startup(OperationContext* opCtx) { auto client = Client::getCurrent(); AuthorizationSession::get(*client)->grantInternalAuthorization(&cc()); - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); - // Associate this Client with this PrimaryOnlyService primaryOnlyServiceStateForClient(client).primaryOnlyService = this; }; @@ -443,6 +461,30 @@ void PrimaryOnlyService::onStepUp(const OpTime& stepUpOpTime) { } _rebuildInstances(newTerm); }) + .onError([this, newTerm](Status s) { + LOGV2_ERROR(5165001, + "Failed to rebuild PrimaryOnlyService on stepup.", + "service"_attr = getServiceName(), + "error"_attr = s); + + stdx::lock_guard lk(_mutex); + if (_state != State::kRebuilding || _term != newTerm) { + // We've either stepped or shut down, or advanced to a new term. + // In either case, we rely on the stepdown/shutdown logic or the + // step-up of the new term to set _state and do nothing here. + bool steppedDown = _state == State::kPaused; + bool shutDown = _state == State::kShutdown; + bool termAdvanced = _term > newTerm; + invariant( + steppedDown || shutDown || termAdvanced, + "Unexpected _state or _term; _state is {}, _term is {}, term was {} "_format( + _getStateString(lk), _term, newTerm)); + return; + } + invariant(_state == State::kRebuilding); + _rebuildStatus = s; + _setState(State::kRebuildFailed, lk); + }) .getAsync([](auto&&) {}); // Ignore the result Future lk.unlock(); } @@ -679,25 +721,20 @@ bool PrimaryOnlyService::_getHasExecutor() const { return _hasExecutor.load(); } -void PrimaryOnlyService::_rebuildInstances(long long term) noexcept { +void PrimaryOnlyService::_rebuildInstances(long long term) { std::vector stateDocuments; auto serviceName = getServiceName(); - LOGV2_INFO(5123005, - "Rebuilding PrimaryOnlyService {service} due to stepUp", - "Rebuilding PrimaryOnlyService due to stepUp", - "service"_attr = serviceName); + LOGV2_INFO( + 5123005, "Rebuilding PrimaryOnlyService due to stepUp", "service"_attr = serviceName); if (!MONGO_unlikely(PrimaryOnlyServiceSkipRebuildingInstances.shouldFail())) { auto ns = getStateDocumentsNS(); - LOGV2_DEBUG( - 5123004, - 2, - "Querying {namespace} to look for state documents while rebuilding PrimaryOnlyService " - "{service}", - "Querying to look for state documents while rebuilding PrimaryOnlyService", - logAttrs(ns), - "service"_attr = serviceName); + LOGV2_DEBUG(5123004, + 2, + "Querying to look for state documents while rebuilding PrimaryOnlyService", + logAttrs(ns), + "service"_attr = serviceName); // The PrimaryOnlyServiceClientObserver will make any OpCtx created as part of a // PrimaryOnlyService immediately get interrupted if the service is not in state kRunning. @@ -718,65 +755,45 @@ void PrimaryOnlyService::_rebuildInstances(long long term) noexcept { while (cursor->more()) { stateDocuments.push_back(cursor->nextSafe().getOwned()); } - } catch (const DBException& e) { - LOGV2_ERROR( - 4923601, - "Failed to start PrimaryOnlyService {service} because the query on {namespace} " - "for state documents failed due to {error}", - "Failed to start PrimaryOnlyService because the query for state documents failed", - "service"_attr = serviceName, - logAttrs(ns), - "error"_attr = e); - - Status status = e.toStatus(); - status.addContext(str::stream() - << "Failed to start PrimaryOnlyService \"" << serviceName - << "\" because the query for state documents on ns \"" << ns - << "\" failed"); - - stdx::lock_guard lk(_mutex); - if (_state != State::kRebuilding || _term != term) { - _stateChangeCV.notify_all(); - return; - } - _setState(State::kRebuildFailed, lk); - _rebuildStatus = std::move(status); - return; + } catch (DBException& e) { + e.addContext(str::stream() + << "Error querying the state document collection " + << ns.toStringForErrorMsg() << " for service " << serviceName); + throw; } } + LOGV2_DEBUG(5123003, + 2, + "Found state documents while rebuilding PrimaryOnlyService that correspond to " + "instances of that service", + "service"_attr = serviceName, + "numDocuments"_attr = stateDocuments.size()); + while (MONGO_unlikely(PrimaryOnlyServiceHangBeforeRebuildingInstances.shouldFail())) { { stdx::lock_guard lk(_mutex); if (_state != State::kRebuilding || _term != term) { // Node stepped down - _stateChangeCV.notify_all(); return; } } sleepmillis(100); } - // Must create opCtx before taking _mutex to avoid deadlock. - AllowOpCtxWhenServiceRebuildingBlock allowOpCtxBlock(Client::getCurrent()); - auto opCtx = cc().makeOperationContext(); stdx::lock_guard lk(_mutex); if (_state != State::kRebuilding || _term != term) { // Node stepped down before finishing rebuilding service from previous stepUp. - _stateChangeCV.notify_all(); return; } invariant(_activeInstances.empty()); invariant(_term == term); - LOGV2_DEBUG(5123003, + // Construct new instances using the state documents and add to _activeInstances. + LOGV2_DEBUG(5165000, 2, - "While rebuilding PrimaryOnlyService {service}, found {numDocuments} state " - "documents corresponding to instances of that service", - "Found state documents while rebuilding PrimaryOnlyService that correspond to " - "instances of that service", + "Starting to construct and run instances for service", "service"_attr = serviceName, - "numDocuments"_attr = stateDocuments.size()); - + "numInstances"_attr = stateDocuments.size()); for (auto&& doc : stateDocuments) { auto idElem = doc["_id"]; fassert(4923602, !idElem.eoo()); @@ -785,6 +802,7 @@ void PrimaryOnlyService::_rebuildInstances(long long term) noexcept { [[maybe_unused]] auto newInstance = _insertNewInstance(lk, std::move(instance), std::move(instanceID)); } + _setState(State::kRunning, lk); } diff --git a/src/mongo/db/repl/primary_only_service.h b/src/mongo/db/repl/primary_only_service.h index 8d5c30856b4f8..141a06038ffe9 100644 --- a/src/mongo/db/repl/primary_only_service.h +++ b/src/mongo/db/repl/primary_only_service.h @@ -29,25 +29,46 @@ #pragma once +#include +#include #include +#include +#include +#include #include +#include #include +#include +#include #include "mongo/base/checked_cast.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/client.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/db/service_context.h" #include "mongo/executor/scoped_task_executor.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" #include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/concurrency/with_lock.h" #include "mongo/util/fail_point.h" #include "mongo/util/future.h" +#include "mongo/util/lockable_adapter.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/string_map.h" namespace mongo { @@ -444,7 +465,7 @@ class PrimaryOnlyService { * PrimaryOnlyService, constructs Instance objects for each document found, and schedules work * to run all the newly recreated Instances. */ - void _rebuildInstances(long long term) noexcept; + void _rebuildInstances(long long term); /** * Schedules work to call the provided instance's 'run' method and inserts the new instance into @@ -510,8 +531,11 @@ class PrimaryOnlyService { State _state = State::kPaused; // (M) - // If reloading the state documents from disk fails, this Status gets set to a non-ok value and - // calls to lookup() or getOrCreate() will throw this status until the node steps down. + // If rebuilding the instances fails, for example due to a failure reloading the state documents + // from disk, this Status gets set to a non-ok value and calls to lookup() or getOrCreate() will + // throw this status until the node steps down. Note that this status must be populated with the + // relevant error before _setState is used to change the status to kRebuildFailed and waiters on + // _stateChangeCV are notified, as those waiters may attempt to read this status. Status _rebuildStatus = Status::OK(); // (M) // The term that this service is running under. @@ -594,7 +618,7 @@ class PrimaryOnlyServiceRegistry final : public ReplicaSetAwareService _servicesByNamespace; + mongo::stdx::unordered_map _servicesByNamespace; }; } // namespace repl diff --git a/src/mongo/db/repl/primary_only_service_op_observer.cpp b/src/mongo/db/repl/primary_only_service_op_observer.cpp index d8b88d05c7968..721748be821f1 100644 --- a/src/mongo/db/repl/primary_only_service_op_observer.cpp +++ b/src/mongo/db/repl/primary_only_service_op_observer.cpp @@ -27,18 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/primary_only_service_op_observer.h" - +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/repl/primary_only_service_op_observer.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" namespace mongo { namespace repl { namespace { -const auto documentIdDecoration = OperationContext::declareDecoration(); +const auto documentIdDecoration = OplogDeleteEntryArgs::declareDecoration(); } // namespace @@ -51,18 +61,21 @@ PrimaryOnlyServiceOpObserver::~PrimaryOnlyServiceOpObserver() = default; void PrimaryOnlyServiceOpObserver::aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - BSONObj const& doc) { + BSONObj const& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { // Extract the _id field from the document. If it does not have an _id, use the // document itself as the _id. - documentIdDecoration(opCtx) = doc["_id"] ? doc["_id"].wrap() : doc; + documentIdDecoration(args) = doc["_id"] ? doc["_id"].wrap() : doc; } void PrimaryOnlyServiceOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { const auto& nss = coll->ns(); - auto& documentId = documentIdDecoration(opCtx); + auto& documentId = documentIdDecoration(args); invariant(!documentId.isEmpty()); auto service = _registry->lookupServiceByNamespace(nss); @@ -82,16 +95,18 @@ repl::OpTime PrimaryOnlyServiceOpObserver::onDropCollection(OperationContext* op const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) { + const CollectionDropType dropType, + bool markFromMigrate) { auto service = _registry->lookupServiceByNamespace(collectionName); if (service) { - opCtx->recoveryUnit()->onCommit([service, collectionName](OperationContext*, - boost::optional) { - // Release and interrupt all the instances since the state document collection is - // not supposed to be dropped. - service->releaseAllInstances( - Status(ErrorCodes::Interrupted, str::stream() << collectionName << " is dropped")); - }); + opCtx->recoveryUnit()->onCommit( + [service, collectionName](OperationContext*, boost::optional) { + // Release and interrupt all the instances since the state document collection is + // not supposed to be dropped. + service->releaseAllInstances( + Status(ErrorCodes::Interrupted, + str::stream() << collectionName.toStringForErrorMsg() << " is dropped")); + }); } return {}; } diff --git a/src/mongo/db/repl/primary_only_service_op_observer.h b/src/mongo/db/repl/primary_only_service_op_observer.h index ef4490c0099b4..4dc14eb784f69 100644 --- a/src/mongo/db/repl/primary_only_service_op_observer.h +++ b/src/mongo/db/repl/primary_only_service_op_observer.h @@ -29,7 +29,18 @@ #pragma once +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -39,7 +50,7 @@ class PrimaryOnlyServiceRegistry; /** * OpObserver for PrimaryOnlyService. */ -class PrimaryOnlyServiceOpObserver final : public OpObserver { +class PrimaryOnlyServiceOpObserver final : public OpObserverNoop { PrimaryOnlyServiceOpObserver(const PrimaryOnlyServiceOpObserver&) = delete; PrimaryOnlyServiceOpObserver& operator=(const PrimaryOnlyServiceOpObserver&) = delete; @@ -47,211 +58,30 @@ class PrimaryOnlyServiceOpObserver final : public OpObserver { explicit PrimaryOnlyServiceOpObserver(ServiceContext* serviceContext); ~PrimaryOnlyServiceOpObserver(); - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) final {} - - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - void onCreateIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc, - bool fromMigrate) final {} - - void onStartIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onStartIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) final {} - - void onInserts(OperationContext* opCtx, - const CollectionPtr& coll, - std::vector::const_iterator first, - std::vector::const_iterator last, - std::vector fromMigrate, - bool defaultFromMigrate) final {} - - void onInsertGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final{}; - - void onDeleteGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final {} - - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) final {} + NamespaceFilters getNamespaceFilters() const final { + return {/*update=*/NamespaceFilter::kNone, /*delete=*/NamespaceFilter::kAll}; + } void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) final; + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) final; - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final {} + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; - void onCreateCollection(OperationContext* opCtx, - const CollectionPtr& coll, - const NamespaceString& collectionName, - const CollectionOptions& options, - const BSONObj& idIndex, - const OplogSlot& createOpTime, - bool fromMigrate) final {} - - void onCollMod(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& collModCmd, - const CollectionOptions& oldCollOptions, - boost::optional indexInfo) final {} - - void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final {} - - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) final; - - void onDropIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const std::string& indexName, - const BSONObj& indexInfo) final {} - - using OpObserver::onRenameCollection; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final {} - - void onImportCollection(OperationContext* opCtx, - const UUID& importUUID, - const NamespaceString& nss, - long long numRecords, - long long dataSize, - const BSONObj& catalogEntry, - const BSONObj& storageMetadata, - bool isDryRun) final {} - - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final { - return repl::OpTime(); - } - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) final {} - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) final {} - - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) final {} - - void onTransactionStart(OperationContext* opCtx) final {} - - void onUnpreparedTransactionCommit(OperationContext* opCtx, - const TransactionOperations& transactionOperations) final {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept final {} - - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) final { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) final {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) final {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) final {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} - - void onMajorityCommitPointUpdate(ServiceContext* service, - const repl::OpTime& newCommitPoint) final {} + CollectionDropType dropType, + bool markFromMigrate) final; private: - void _onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) final{}; - PrimaryOnlyServiceRegistry* _registry; }; diff --git a/src/mongo/db/repl/primary_only_service_test.cpp b/src/mongo/db/repl/primary_only_service_test.cpp index e27ce16581bed..bcc73785a7880 100644 --- a/src/mongo/db/repl/primary_only_service_test.cpp +++ b/src/mongo/db/repl/primary_only_service_test.cpp @@ -27,31 +27,57 @@ * it in the license file. */ +#include #include - +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/repl/primary_only_service_test_fixture.h" +#include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/executor/network_interface.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/stdx/thread.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/fail_point.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/time_support.h" using namespace mongo; using namespace mongo::repl; +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest + namespace { constexpr StringData kTestServiceName = "TestService"_sd; @@ -61,6 +87,7 @@ MONGO_FAIL_POINT_DEFINE(TestServiceHangDuringStateTwo); MONGO_FAIL_POINT_DEFINE(TestServiceHangDuringCompletion); MONGO_FAIL_POINT_DEFINE(TestServiceHangBeforeMakingOpCtx); MONGO_FAIL_POINT_DEFINE(TestServiceHangAfterMakingOpCtx); +MONGO_FAIL_POINT_DEFINE(TestServiceFailRebuildService); } // namespace class TestService final : public PrimaryOnlyService { @@ -288,6 +315,9 @@ class TestService final : public PrimaryOnlyService { private: ExecutorFuture _rebuildService(std::shared_ptr executor, const CancellationToken& token) override { + if (TestServiceFailRebuildService.shouldFail()) { + uassertStatusOK(Status(ErrorCodes::InternalError, "_rebuildService error")); + } auto nss = getStateDocumentsNS(); AllowOpCtxWhenServiceRebuildingBlock allowOpCtxBlock(Client::getCurrent()); @@ -1215,3 +1245,49 @@ TEST_F(PrimaryOnlyServiceTest, PrimaryOnlyServiceLogSlowServices) { stopCapturingLogMessages(); ASSERT_EQ(1, countTextFormatLogLinesContaining(slowTotalTimeStepUpCompleteMsg)); } + +TEST_F(PrimaryOnlyServiceTest, RebuildServiceFailsShouldSetStateFromRebuilding) { + /** + * (1) onStepUp changes state to kRebuilding. lookupInstanceThread blocks waiting for state + * not rebuilding. + * (2) PrimaryOnlyService::_rebuildService fails due to fail point, causing rebuilding to + * fail, despite no stepDown/shutDown occurring. + * (3) Failure _rebuildService results in state change to kRebuildFailed. + * (4) lookupInstanceThread notified of the state change, unblocks and sees error that + * caused _rebuildService to fail. + */ + stepDown(); + stdx::thread stepUpThread; + stdx::thread lookUpInstanceThread; + Status lookupError = Status::OK(); + FailPointEnableBlock failRebuildServiceFailPoint("TestServiceFailRebuildService"); + + { + FailPointEnableBlock stepUpFailpoint("PrimaryOnlyServiceHangBeforeLaunchingStepUpLogic"); + stepUpThread = stdx::thread([this] { + ThreadClient tc("StepUpThread", getServiceContext()); + stepUp(); + }); + + stepUpFailpoint->waitForTimesEntered(stepUpFailpoint.initialTimesEntered() + 1); + + lookUpInstanceThread = stdx::thread([this, &lookupError] { + try { + ThreadClient tc("LookUpInstanceThread", getServiceContext()); + auto opCtx = makeOperationContext(); + TestService::Instance::lookup(opCtx.get(), _service, BSON("_id" << 0)); + } catch (DBException& ex) { + lookupError = ex.toStatus(); + } + }); + } + + failRebuildServiceFailPoint->waitForTimesEntered( + failRebuildServiceFailPoint.initialTimesEntered() + 1); + stepUpThread.join(); + lookUpInstanceThread.join(); + + ASSERT(!lookupError.isOK()) << "lookup thread did not receive an error"; + ASSERT_EQ(lookupError.code(), ErrorCodes::InternalError); + ASSERT_EQ(lookupError.reason(), "_rebuildService error"); +} diff --git a/src/mongo/db/repl/primary_only_service_test_fixture.cpp b/src/mongo/db/repl/primary_only_service_test_fixture.cpp index 68b44cffd6883..bf61e5cca2547 100644 --- a/src/mongo/db/repl/primary_only_service_test_fixture.cpp +++ b/src/mongo/db/repl/primary_only_service_test_fixture.cpp @@ -29,17 +29,20 @@ #include "mongo/db/repl/primary_only_service_test_fixture.h" +#include + +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_impl.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/primary_only_service_op_observer.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/wait_for_majority_service.h" -#include "mongo/executor/network_interface_factory.h" -#include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/rpc/metadata/egress_metadata_hook_list.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/fail_point.h" namespace mongo { @@ -53,7 +56,7 @@ void PrimaryOnlyServiceMongoDTest::setUp() { { auto opCtx = makeOperationContext(); - auto replCoord = std::make_unique(serviceContext); + auto replCoord = makeReplicationCoordinator(); repl::ReplicationCoordinator::set(serviceContext, std::move(replCoord)); repl::createOplog(opCtx.get()); @@ -111,6 +114,11 @@ void PrimaryOnlyServiceMongoDTest::stepDown() { repl::stepDown(getServiceContext(), _registry); } +std::unique_ptr +PrimaryOnlyServiceMongoDTest::makeReplicationCoordinator() { + return std::make_unique(getServiceContext()); +} + void stepUp(OperationContext* opCtx, ServiceContext* serviceCtx, repl::PrimaryOnlyServiceRegistry* registry, diff --git a/src/mongo/db/repl/primary_only_service_test_fixture.h b/src/mongo/db/repl/primary_only_service_test_fixture.h index 5f7e8c478ae0d..359cc34079e14 100644 --- a/src/mongo/db/repl/primary_only_service_test_fixture.h +++ b/src/mongo/db/repl/primary_only_service_test_fixture.h @@ -29,9 +29,13 @@ #pragma once -#include "mongo/db/repl/primary_only_service.h" #include +#include +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" namespace mongo { @@ -41,6 +45,7 @@ class TaskExecutor; } class OperationContext; + class OpObserverRegistry; class ServiceContext; @@ -67,6 +72,8 @@ class PrimaryOnlyServiceMongoDTest : public ServiceContextMongoDTest { virtual std::unique_ptr makeService( ServiceContext* serviceContext) = 0; + virtual std::unique_ptr makeReplicationCoordinator(); + /** * Used to add your own op observer to the op observer registry during setUp prior to running * your tests. diff --git a/src/mongo/db/repl/primary_only_service_util.cpp b/src/mongo/db/repl/primary_only_service_util.cpp index d845ac2a2621c..fbe4fe124ba57 100644 --- a/src/mongo/db/repl/primary_only_service_util.cpp +++ b/src/mongo/db/repl/primary_only_service_util.cpp @@ -27,13 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/db/repl/primary_only_service_util.h" +#include -#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/base/error_codes.h" +#include "mongo/db/client.h" +#include "mongo/db/repl/primary_only_service_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/future_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/primary_only_service_util.h b/src/mongo/db/repl/primary_only_service_util.h index d94af7afc2885..1971d2f0dbf99 100644 --- a/src/mongo/db/repl/primary_only_service_util.h +++ b/src/mongo/db/repl/primary_only_service_util.h @@ -29,8 +29,25 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" #include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" namespace mongo { diff --git a/src/mongo/db/repl/primary_only_service_util_test.cpp b/src/mongo/db/repl/primary_only_service_util_test.cpp index 02f9855038024..9cad76417c05a 100644 --- a/src/mongo/db/repl/primary_only_service_util_test.cpp +++ b/src/mongo/db/repl/primary_only_service_util_test.cpp @@ -27,12 +27,33 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/checked_cast.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/primary_only_service_test_fixture.h" #include "mongo/db/repl/primary_only_service_util.h" #include "mongo/db/repl/wait_for_majority_service.h" -#include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/fail_point.h" using namespace mongo; diff --git a/src/mongo/db/repl/read_concern_args.cpp b/src/mongo/db/repl/read_concern_args.cpp index 5906aad2b5153..d3706266e3411 100644 --- a/src/mongo/db/repl/read_concern_args.cpp +++ b/src/mongo/db/repl/read_concern_args.cpp @@ -27,29 +27,39 @@ * it in the license file. */ +#include "mongo/db/repl/read_concern_args.h" -#include "mongo/platform/basic.h" +#include -#include "mongo/db/repl/read_concern_args.h" +#include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" #include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" +#include "mongo/db/read_write_concern_provenance_base_gen.h" #include "mongo/db/repl/bson_extract_optime.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication - -using std::string; - namespace mongo { namespace repl { +namespace { const OperationContext::Decoration handle = OperationContext::declareDecoration(); +} // namespace + ReadConcernArgs& ReadConcernArgs::get(OperationContext* opCtx) { return handle(opCtx); } @@ -58,7 +68,6 @@ const ReadConcernArgs& ReadConcernArgs::get(const OperationContext* opCtx) { return handle(opCtx); } - // The "kImplicitDefault" read concern, used by internal operations, is deliberately empty (no // 'level' specified). This allows internal operations to specify a read concern, while still // allowing it to be either local or available on sharded secondaries. diff --git a/src/mongo/db/repl/read_concern_args.h b/src/mongo/db/repl/read_concern_args.h index 803bc150e80a3..2e84962e34d18 100644 --- a/src/mongo/db/repl/read_concern_args.h +++ b/src/mongo/db/repl/read_concern_args.h @@ -29,15 +29,26 @@ #pragma once +#include +#include #include +#include +#include #include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/json.h" #include "mongo/db/logical_time.h" #include "mongo/db/read_write_concern_provenance.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/read_concern_level.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/repl/read_concern_args_test.cpp b/src/mongo/db/repl/read_concern_args_test.cpp index 036e0fbd086f1..6305918374d91 100644 --- a/src/mongo/db/repl/read_concern_args_test.cpp +++ b/src/mongo/db/repl/read_concern_args_test.cpp @@ -27,11 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/jsobj.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/repl_client_info.cpp b/src/mongo/db/repl/repl_client_info.cpp index 402da18360a7a..d72dad498e4c3 100644 --- a/src/mongo/db/repl/repl_client_info.cpp +++ b/src/mongo/db/repl/repl_client_info.cpp @@ -28,14 +28,21 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/repl/repl_client_info.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/db/client.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/repl_server_parameters.idl b/src/mongo/db/repl/repl_server_parameters.idl index 4e2c0838e4d77..646686d53d90a 100644 --- a/src/mongo/db/repl/repl_server_parameters.idl +++ b/src/mongo/db/repl/repl_server_parameters.idl @@ -593,14 +593,6 @@ server_parameters: cpp_varname: startupRecoveryForRestore default: false - storeFindAndModifyImagesInSideCollection: - description: >- - Determines where document images for retryable find and modifies are to be stored. - set_at: [ startup, runtime ] - cpp_vartype: AtomicWord - cpp_varname: gStoreFindAndModifyImagesInSideCollection - default: true - enableReconfigRollbackCommittedWritesCheck: description: >- Enables the reconfig check to ensure that committed writes cannot be rolled back in @@ -760,36 +752,34 @@ server_parameters: cpp_vartype: bool cpp_varname: disableTransitionFromLatestToLastContinuous default: true - -feature_flags: - featureFlagRetryableFindAndModify: + + unsupportedSyncSource: description: >- - When enabled, storeFindAndModifyImagesInOplog=false will change the location of any - document images for retryable find and modifies. - cpp_varname: feature_flags::gFeatureFlagRetryableFindAndModify - default: true - version: 5.1 + **Not a supported feature**. Specifies the host/port for a node to use as a sync source. + It is a fatal error to specify a node that is not a part of the replica set config or to + specify the node itself. + set_at: startup + cpp_vartype: std::string + cpp_varname: unsupportedSyncSource + default: "" + validator: { callback: 'validateHostAndPort' } +feature_flags: featureFlagShardMerge: description: When enabled, multitenant migration uses the "shard merge" protocol. cpp_varname: feature_flags::gShardMerge default: false + shouldBeFCVGated: true featureFlagShardSplit: description: When enabled, multitenant migration can use the shard split commands. cpp_varname: feature_flags::gShardSplit default: true version: 6.3 + shouldBeFCVGated: true - featureFlagDowngradingToUpgrading: - description: When enabled, - allow kDowngradingFromLatestToLastLTS -> kUpgrading -> kUpgraded path. - cpp_varname: feature_flags::gDowngradingToUpgrading - default: true - version: 7.0 - - featureFlagApplyPreparedTxnsInParallel: - description: when enabled, secondaries will apply prepared transactions in parallel. - cpp_varname: feature_flags::gApplyPreparedTxnsInParallel - default: true - version: 7.0 + featureFlagSecondaryIndexChecksInDbCheck: + description: When enabled, dbCheck runs document and secondary index consistency checks in addition to replica set data consistency checks. + cpp_varname: feature_flags::gSecondaryIndexChecksInDbCheck + default: false + shouldBeFCVGated: true diff --git a/src/mongo/db/repl/repl_set_command.cpp b/src/mongo/db/repl/repl_set_command.cpp index 510d75d2c0ada..b5cf0a3f6ffe7 100644 --- a/src/mongo/db/repl/repl_set_command.cpp +++ b/src/mongo/db/repl/repl_set_command.cpp @@ -27,21 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/repl/repl_set_command.h" - +#include "mongo/base/error_codes.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" namespace mongo { namespace repl { Status ReplSetCommand::checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - getAuthActionSet())) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), getAuthActionSet())) { return {ErrorCodes::Unauthorized, "Unauthorized"}; } diff --git a/src/mongo/db/repl/repl_set_command.h b/src/mongo/db/repl/repl_set_command.h index b18a3538c69d4..502471d669465 100644 --- a/src/mongo/db/repl/repl_set_command.h +++ b/src/mongo/db/repl/repl_set_command.h @@ -31,7 +31,15 @@ #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/repl/repl_set_commands.cpp b/src/mongo/db/repl/repl_set_commands.cpp index 0392c393141e8..ebbf0cb76a1b6 100644 --- a/src/mongo/db/repl/repl_set_commands.cpp +++ b/src/mongo/db/repl/repl_set_commands.cpp @@ -32,39 +32,76 @@ LOGV2_DEBUG_OPTIONS( \ ID, DLEVEL, {logv2::LogComponent::kReplicationHeartbeats}, MESSAGE, ##__VA_ARGS__) -#include - -#include "mongo/db/repl/repl_set_command.h" - -#include "mongo/base/init.h" +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" -#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_set_command.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_set_heartbeat_args_v1.h" #include "mongo/db/repl/repl_set_heartbeat_response.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/replication_coordinator_external_state.h" #include "mongo/db/repl/replication_coordinator_external_state_impl.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/update_position_args.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/storage_engine.h" -#include "mongo/executor/network_interface.h" #include "mongo/logv2/log.h" -#include "mongo/transport/session.h" -#include "mongo/transport/transport_layer.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" #include "mongo/util/decimal_counter.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/sockaddr.h" #include "mongo/util/net/socket_utils.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -281,8 +318,8 @@ HostAndPort someHostAndPortForMe() { // We are listening externally, but we don't have a definite hostname. // Ask the OS. std::string h = getHostName(); - verify(!h.empty()); - verify(h != "localhost"); + MONGO_verify(!h.empty()); + MONGO_verify(h != "localhost"); return HostAndPort(h, serverGlobalParams.port); } @@ -733,7 +770,7 @@ bool replHasDatabases(OperationContext* opCtx) { if (dbNames.size() >= 2) return true; if (dbNames.size() == 1) { - if (dbNames[0].db() != "local") + if (!dbNames[0].isLocalDB()) return true; // we have a local database. return true if oplog isn't empty diff --git a/src/mongo/db/repl/repl_set_config.cpp b/src/mongo/db/repl/repl_set_config.cpp index 8f0f8a2a083b1..aa19764bff0ae 100644 --- a/src/mongo/db/repl/repl_set_config.cpp +++ b/src/mongo/db/repl/repl_set_config.cpp @@ -28,23 +28,44 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/repl_set_config.h" - +#include #include +#include +#include #include -#include -#include - -#include "mongo/bson/util/bson_check.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/mongod_options.h" +#include // IWYU pragma: keep +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/repl/member_config_gen.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_set_config_params_gen.h" +#include "mongo/db/repl/repl_set_write_concern_mode_definitions.h" +#include "mongo/db/repl/split_horizon.h" #include "mongo/db/server_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/net/cidr.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/repl_set_config.h b/src/mongo/db/repl/repl_set_config.h index b6e34afdc19c5..325d7b5efad82 100644 --- a/src/mongo/db/repl/repl_set_config.h +++ b/src/mongo/db/repl/repl_set_config.h @@ -29,16 +29,36 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" #include "mongo/client/connection_string.h" #include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_set_config_gen.h" #include "mongo/db/repl/repl_set_tag.h" #include "mongo/db/write_concern_options.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" #include "mongo/util/string_map.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/repl/repl_set_config_checks.cpp b/src/mongo/db/repl/repl_set_config_checks.cpp index 1a6e39194dee4..56265c73ef0be 100644 --- a/src/mongo/db/repl/repl_set_config_checks.cpp +++ b/src/mongo/db/repl/repl_set_config_checks.cpp @@ -27,19 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/repl_set_config_checks.h" - +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include #include - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_config_gen.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/repl_set_config_checks.h" +#include "mongo/db/repl/repl_set_config_gen.h" #include "mongo/db/repl/replication_coordinator_external_state.h" -#include "mongo/db/s/sharding_state.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/repl_set_config_checks.h b/src/mongo/db/repl/repl_set_config_checks.h index 5cc421bb464e4..47635727af8c1 100644 --- a/src/mongo/db/repl/repl_set_config_checks.h +++ b/src/mongo/db/repl/repl_set_config_checks.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/util/net/hostandport.h" diff --git a/src/mongo/db/repl/repl_set_config_checks_test.cpp b/src/mongo/db/repl/repl_set_config_checks_test.cpp index 0ad4082172afc..2bc8fba50bec6 100644 --- a/src/mongo/db/repl/repl_set_config_checks_test.cpp +++ b/src/mongo/db/repl/repl_set_config_checks_test.cpp @@ -27,23 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/db/repl/optime.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_set_config_checks.h" -#include "mongo/db/repl/replication_coordinator_external_state.h" #include "mongo/db/repl/replication_coordinator_external_state_mock.h" -#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" #include "mongo/unittest/ensure_fcv.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/repl_set_config_test.cpp b/src/mongo/db/repl/repl_set_config_test.cpp index dd260bc9994f4..989bf6938997e 100644 --- a/src/mongo/db/repl/repl_set_config_test.cpp +++ b/src/mongo/db/repl/repl_set_config_test.cpp @@ -27,19 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/bson/mutable/document.h" #include "mongo/bson/mutable/element.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_set_config_checks.h" #include "mongo/db/repl/repl_set_config_test.h" #include "mongo/db/server_options.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/safe_num.h" #include "mongo/util/scopeguard.h" namespace mongo { @@ -896,7 +909,7 @@ TEST(ReplSetConfig, ConfigServerField) { ASSERT_FALSE(configBSON.hasField("configsvr")); // Configs in which configsvr is not the same as the --configsvr flag are invalid. - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; ON_BLOCK_EXIT([&] { serverGlobalParams.clusterRole = ClusterRole::None; }); ASSERT_OK(config.validate()); @@ -1103,7 +1116,7 @@ TEST(ReplSetConfig, ConfigServerFieldDefaults) { OID::gen())); ASSERT_FALSE(config2.getConfigServer()); - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; ON_BLOCK_EXIT([&] { serverGlobalParams.clusterRole = ClusterRole::None; }); ReplSetConfig config3; @@ -1474,7 +1487,7 @@ TEST(ReplSetConfig, CheckConfigServerCantHaveSecondaryDelaySecs) { } TEST(ReplSetConfig, CheckConfigServerMustHaveTrueForWriteConcernMajorityJournalDefault) { - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; ON_BLOCK_EXIT([&] { serverGlobalParams.clusterRole = ClusterRole::None; }); ReplSetConfig configA; configA = ReplSetConfig::parse(BSON("_id" diff --git a/src/mongo/db/repl/repl_set_config_test.h b/src/mongo/db/repl/repl_set_config_test.h index 702a2696115b9..f1a4dbb717d05 100644 --- a/src/mongo/db/repl/repl_set_config_test.h +++ b/src/mongo/db/repl/repl_set_config_test.h @@ -29,8 +29,25 @@ #pragma once +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/db/basic_types.h" #include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_id.h" #include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/repl_set_tag.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/repl_set_config_validators.cpp b/src/mongo/db/repl/repl_set_config_validators.cpp index 065c8274c77ea..cc5c69383fd7e 100644 --- a/src/mongo/db/repl/repl_set_config_validators.cpp +++ b/src/mongo/db/repl/repl_set_config_validators.cpp @@ -27,10 +27,8 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/repl_set_config_gen.h" #include "mongo/db/repl/repl_set_config_validators.h" +#include "mongo/db/repl/repl_set_config_gen.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/repl_set_config_validators.h b/src/mongo/db/repl/repl_set_config_validators.h index 56eb9e80dd81a..938e0dde9262c 100644 --- a/src/mongo/db/repl/repl_set_config_validators.h +++ b/src/mongo/db/repl/repl_set_config_validators.h @@ -29,10 +29,20 @@ #pragma once +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/oid.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/write_concern_options.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/repl/repl_set_get_status_cmd.cpp b/src/mongo/db/repl/repl_set_get_status_cmd.cpp index 7b4e4a171ddc8..31a37fd413bd4 100644 --- a/src/mongo/db/repl/repl_set_get_status_cmd.cpp +++ b/src/mongo/db/repl/repl_set_get_status_cmd.cpp @@ -27,10 +27,22 @@ * it in the license file. */ +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/database_name.h" #include "mongo/db/not_primary_error_tracker.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/repl_set_command.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp b/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp index ad35ba994278c..ed8fa9500593e 100644 --- a/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp +++ b/src/mongo/db/repl/repl_set_heartbeat_args_v1.cpp @@ -27,14 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/repl/repl_set_heartbeat_args_v1.h" - -#include "mongo/bson/util/bson_check.h" +#include "mongo/base/error_codes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/repl/repl_set_heartbeat_args_v1.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/repl_set_heartbeat_args_v1.h b/src/mongo/db/repl/repl_set_heartbeat_args_v1.h index 4687ff13c7819..eee09d69c44ce 100644 --- a/src/mongo/db/repl/repl_set_heartbeat_args_v1.h +++ b/src/mongo/db/repl/repl_set_heartbeat_args_v1.h @@ -31,6 +31,11 @@ #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/util/net/hostandport.h" diff --git a/src/mongo/db/repl/repl_set_heartbeat_response.cpp b/src/mongo/db/repl/repl_set_heartbeat_response.cpp index 74a5ecac2e8f6..ef9a7f7cc1fff 100644 --- a/src/mongo/db/repl/repl_set_heartbeat_response.cpp +++ b/src/mongo/db/repl/repl_set_heartbeat_response.cpp @@ -28,17 +28,18 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/repl_set_heartbeat_response.h" - #include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" #include "mongo/db/repl/bson_extract_optime.h" -#include "mongo/db/server_options.h" +#include "mongo/db/repl/repl_set_heartbeat_response.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" diff --git a/src/mongo/db/repl/repl_set_heartbeat_response.h b/src/mongo/db/repl/repl_set_heartbeat_response.h index ce8fc367a953f..f82a1aaa5edbb 100644 --- a/src/mongo/db/repl/repl_set_heartbeat_response.h +++ b/src/mongo/db/repl/repl_set_heartbeat_response.h @@ -31,9 +31,15 @@ #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/repl/member_state.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_set_config.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp index 67e9b6c9a8bd3..4739736cb9ea5 100644 --- a/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp +++ b/src/mongo/db/repl/repl_set_heartbeat_response_test.cpp @@ -27,12 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/repl/repl_set_heartbeat_response.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/repl_set_request_votes.cpp b/src/mongo/db/repl/repl_set_request_votes.cpp index 93e1453c70fdd..bbb0333901f0b 100644 --- a/src/mongo/db/repl/repl_set_request_votes.cpp +++ b/src/mongo/db/repl/repl_set_request_votes.cpp @@ -27,18 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/client.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_set_command.h" #include "mongo/db/repl/repl_set_request_votes_args.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/executor/network_interface.h" -#include "mongo/transport/session.h" -#include "mongo/transport/transport_layer.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/repl_set_request_votes_args.cpp b/src/mongo/db/repl/repl_set_request_votes_args.cpp index 10f815d6edced..51689dc80b087 100644 --- a/src/mongo/db/repl/repl_set_request_votes_args.cpp +++ b/src/mongo/db/repl/repl_set_request_votes_args.cpp @@ -29,11 +29,8 @@ #include "mongo/db/repl/repl_set_request_votes_args.h" -#include "mongo/bson/util/bson_check.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" #include "mongo/db/repl/bson_extract_optime.h" -#include "mongo/db/server_options.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/repl_set_request_votes_args.h b/src/mongo/db/repl/repl_set_request_votes_args.h index 3d7c34e9fcfd7..7bfb688ac366c 100644 --- a/src/mongo/db/repl/repl_set_request_votes_args.h +++ b/src/mongo/db/repl/repl_set_request_votes_args.h @@ -31,6 +31,9 @@ #include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_set_config.h" diff --git a/src/mongo/db/repl/repl_set_tag.cpp b/src/mongo/db/repl/repl_set_tag.cpp index 573892a94b968..85c7da8bcd36e 100644 --- a/src/mongo/db/repl/repl_set_tag.cpp +++ b/src/mongo/db/repl/repl_set_tag.cpp @@ -27,15 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/repl_set_tag.h" - #include -#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" -#include "mongo/db/jsobj.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/repl/repl_set_tag.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/sequence_util.h" #include "mongo/util/str.h" diff --git a/src/mongo/db/repl/repl_set_tag.h b/src/mongo/db/repl/repl_set_tag.h index 420ace320523a..7a933ab003954 100644 --- a/src/mongo/db/repl/repl_set_tag.h +++ b/src/mongo/db/repl/repl_set_tag.h @@ -29,12 +29,14 @@ #pragma once +#include #include #include #include #include #include +#include "mongo/base/error_extra_info.h" #include "mongo/base/status.h" #include "mongo/base/string_data.h" diff --git a/src/mongo/db/repl/repl_set_tag_test.cpp b/src/mongo/db/repl/repl_set_tag_test.cpp index 25d84d38e6b58..12b397947d6af 100644 --- a/src/mongo/db/repl/repl_set_tag_test.cpp +++ b/src/mongo/db/repl/repl_set_tag_test.cpp @@ -28,7 +28,9 @@ */ #include "mongo/db/repl/repl_set_tag.h" -#include "mongo/unittest/unittest.h" + +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/repl_set_test_egress.cpp b/src/mongo/db/repl/repl_set_test_egress.cpp index 6fad40ebb5103..01baa4391bb27 100644 --- a/src/mongo/db/repl/repl_set_test_egress.cpp +++ b/src/mongo/db/repl/repl_set_test_egress.cpp @@ -28,15 +28,32 @@ */ +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_data.h" #include "mongo/db/repl/repl_set_test_egress_gen.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/network_interface.h" #include "mongo/executor/network_interface_factory.h" -#include "mongo/executor/network_interface_tl.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/util/assert_util.h" #include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/db/repl/repl_set_write_concern_mode_definitions.cpp b/src/mongo/db/repl/repl_set_write_concern_mode_definitions.cpp index 623d9e049595f..6ab1533201cb2 100644 --- a/src/mongo/db/repl/repl_set_write_concern_mode_definitions.cpp +++ b/src/mongo/db/repl/repl_set_write_concern_mode_definitions.cpp @@ -27,11 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/repl/repl_set_write_concern_mode_definitions.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/repl_set_write_concern_mode_definitions.h b/src/mongo/db/repl/repl_set_write_concern_mode_definitions.h index b8fb35de3fccc..4bc22100e6d96 100644 --- a/src/mongo/db/repl/repl_set_write_concern_mode_definitions.h +++ b/src/mongo/db/repl/repl_set_write_concern_mode_definitions.h @@ -29,10 +29,14 @@ #pragma once +#include +#include #include #include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/db/repl/repl_set_tag.h" #include "mongo/util/string_map.h" diff --git a/src/mongo/db/repl/repl_set_write_concern_mode_definitions_test.cpp b/src/mongo/db/repl/repl_set_write_concern_mode_definitions_test.cpp index e53dac5415620..bc06c9dc390f2 100644 --- a/src/mongo/db/repl/repl_set_write_concern_mode_definitions_test.cpp +++ b/src/mongo/db/repl/repl_set_write_concern_mode_definitions_test.cpp @@ -27,14 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include +#include + +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" #include "mongo/db/repl/repl_set_write_concern_mode_definitions.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/repl_settings.cpp b/src/mongo/db/repl/repl_settings.cpp index 4214f2b0ea8e6..88019ceb7cc7b 100644 --- a/src/mongo/db/repl/repl_settings.cpp +++ b/src/mongo/db/repl/repl_settings.cpp @@ -28,11 +28,14 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/repl/repl_settings.h" +#include #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/repl_sync_shared_data.cpp b/src/mongo/db/repl/repl_sync_shared_data.cpp index 8b6ffd0964ea9..94fb8f5b062db 100644 --- a/src/mongo/db/repl/repl_sync_shared_data.cpp +++ b/src/mongo/db/repl/repl_sync_shared_data.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/repl/repl_sync_shared_data.h" namespace mongo { diff --git a/src/mongo/db/repl/replica_set_aware_service.cpp b/src/mongo/db/repl/replica_set_aware_service.cpp index 7b83cfb3d25c6..8c1a3dd4d4b66 100644 --- a/src/mongo/db/repl/replica_set_aware_service.cpp +++ b/src/mongo/db/repl/replica_set_aware_service.cpp @@ -28,11 +28,20 @@ */ -#include "mongo/logv2/log.h" -#include "mongo/platform/basic.h" +#include + +#include #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/db/repl/replica_set_aware_service.h b/src/mongo/db/repl/replica_set_aware_service.h index 5192bb79482b3..b0df0834d6ddd 100644 --- a/src/mongo/db/repl/replica_set_aware_service.h +++ b/src/mongo/db/repl/replica_set_aware_service.h @@ -29,7 +29,16 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/db/cluster_role.h" #include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/repl/replica_set_aware_service_test.cpp b/src/mongo/db/repl/replica_set_aware_service_test.cpp index 93db6f650fdc9..55f78d4fdd120 100644 --- a/src/mongo/db/repl/replica_set_aware_service_test.cpp +++ b/src/mongo/db/repl/replica_set_aware_service_test.cpp @@ -27,15 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include + +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/database_holder_mock.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/unittest/log_test.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/repl/replication_auth.cpp b/src/mongo/db/repl/replication_auth.cpp index fc53b17183e03..9f5d77e3e93a2 100644 --- a/src/mongo/db/repl/replication_auth.cpp +++ b/src/mongo/db/repl/replication_auth.cpp @@ -27,14 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/repl/replication_auth.h" - -#include - -#include "mongo/client/authenticate.h" +#include "mongo/base/error_codes.h" +#include "mongo/client/internal_auth.h" #include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/replication_auth.h b/src/mongo/db/repl/replication_auth.h index e3771371eb790..ebfc92c09e69a 100644 --- a/src/mongo/db/repl/replication_auth.h +++ b/src/mongo/db/repl/replication_auth.h @@ -30,6 +30,8 @@ #pragma once +#include "mongo/base/status.h" +#include "mongo/client/dbclient_base.h" #include "mongo/client/dbclient_connection.h" #include "mongo/client/dbclient_cursor.h" #include "mongo/util/net/hostandport.h" diff --git a/src/mongo/db/repl/replication_consistency_markers.cpp b/src/mongo/db/repl/replication_consistency_markers.cpp index 45eb81a2940df..6bddc53d00bce 100644 --- a/src/mongo/db/repl/replication_consistency_markers.cpp +++ b/src/mongo/db/repl/replication_consistency_markers.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/repl/replication_consistency_markers.h" namespace mongo { diff --git a/src/mongo/db/repl/replication_consistency_markers.h b/src/mongo/db/repl/replication_consistency_markers.h index e7bdef7b28ffe..d29a5b2384e0f 100644 --- a/src/mongo/db/repl/replication_consistency_markers.h +++ b/src/mongo/db/repl/replication_consistency_markers.h @@ -29,11 +29,18 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/repl/optime.h" namespace mongo { class BSONObj; + class OperationContext; class Timestamp; diff --git a/src/mongo/db/repl/replication_consistency_markers_impl.cpp b/src/mongo/db/repl/replication_consistency_markers_impl.cpp index b2af76d99fdd3..2f9b1bde95fa4 100644 --- a/src/mongo/db/repl/replication_consistency_markers_impl.cpp +++ b/src/mongo/db/repl/replication_consistency_markers_impl.cpp @@ -27,19 +27,43 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/replication_consistency_markers_impl.h" - -#include "mongo/db/bson/bson_helper.h" +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_consistency_markers_impl.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/control/journal_flusher.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -48,7 +72,6 @@ namespace mongo { namespace repl { constexpr StringData ReplicationConsistencyMarkersImpl::kDefaultMinValidNamespace; -constexpr StringData ReplicationConsistencyMarkersImpl::kDefaultOplogTruncateAfterPointNamespace; constexpr StringData ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace; namespace { @@ -62,8 +85,7 @@ ReplicationConsistencyMarkersImpl::ReplicationConsistencyMarkersImpl( : ReplicationConsistencyMarkersImpl( storageInterface, NamespaceString(ReplicationConsistencyMarkersImpl::kDefaultMinValidNamespace), - NamespaceString( - ReplicationConsistencyMarkersImpl::kDefaultOplogTruncateAfterPointNamespace), + NamespaceString::kDefaultOplogTruncateAfterPointNamespace, NamespaceString(ReplicationConsistencyMarkersImpl::kDefaultInitialSyncIdNamespace)) {} ReplicationConsistencyMarkersImpl::ReplicationConsistencyMarkersImpl( @@ -279,7 +301,8 @@ void ReplicationConsistencyMarkersImpl::ensureFastCountOnOplogTruncateAfterPoint fassert(51265, {result.getStatus().code(), str::stream() << "More than one document was found in the '" - << kDefaultOplogTruncateAfterPointNamespace + << NamespaceString::kDefaultOplogTruncateAfterPointNamespace + .toStringForErrorMsg() << "' collection. Users should not write to this collection. Please " "delete the excess documents"}); } @@ -496,8 +519,8 @@ Status ReplicationConsistencyMarkersImpl::createInternalCollections(OperationCon auto status = _storageInterface->createCollection(opCtx, nss, CollectionOptions()); if (!status.isOK() && status.code() != ErrorCodes::NamespaceExists) { return {ErrorCodes::CannotCreateCollection, - str::stream() << "Failed to create collection. Ns: " << nss.ns() - << " Error: " << status.toString()}; + str::stream() << "Failed to create collection. Ns: " + << nss.toStringForErrorMsg() << " Error: " << status.toString()}; } } return Status::OK(); @@ -520,18 +543,27 @@ void ReplicationConsistencyMarkersImpl::setInitialSyncIdIfNotSet(OperationContex _initialSyncIdNss, TimestampedBSONObj{doc, Timestamp()}, OpTime::kUninitializedTerm)); + _initialSyncId = doc; } else if (!prevId.isOK()) { fassertFailedWithStatus(4608504, prevId.getStatus()); + } else { + _initialSyncId = prevId.getValue(); } } void ReplicationConsistencyMarkersImpl::clearInitialSyncId(OperationContext* opCtx) { fassert(4608501, _storageInterface->dropCollection(opCtx, _initialSyncIdNss)); + _initialSyncId = BSONObj(); } BSONObj ReplicationConsistencyMarkersImpl::getInitialSyncId(OperationContext* opCtx) { + if (!_initialSyncId.isEmpty()) { + return _initialSyncId; + } + auto idStatus = _storageInterface->findSingleton(opCtx, _initialSyncIdNss); if (idStatus.isOK()) { + _initialSyncId = idStatus.getValue(); return idStatus.getValue(); } if (idStatus.getStatus() != ErrorCodes::CollectionIsEmpty && diff --git a/src/mongo/db/repl/replication_consistency_markers_impl.h b/src/mongo/db/repl/replication_consistency_markers_impl.h index e6cd6597680f4..56e44d01a2624 100644 --- a/src/mongo/db/repl/replication_consistency_markers_impl.h +++ b/src/mongo/db/repl/replication_consistency_markers_impl.h @@ -29,9 +29,19 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/db/repl/replication_consistency_markers_gen.h" +#include "mongo/platform/mutex.h" namespace mongo { @@ -51,8 +61,6 @@ class ReplicationConsistencyMarkersImpl : public ReplicationConsistencyMarkers { public: static constexpr StringData kDefaultMinValidNamespace = "local.replset.minvalid"_sd; - static constexpr StringData kDefaultOplogTruncateAfterPointNamespace = - "local.replset.oplogTruncateAfterPoint"_sd; static constexpr StringData kDefaultInitialSyncIdNamespace = "local.replset.initialSyncId"_sd; explicit ReplicationConsistencyMarkersImpl(StorageInterface* storageInterface); @@ -162,6 +170,9 @@ class ReplicationConsistencyMarkersImpl : public ReplicationConsistencyMarkers { // serialization that exists in setting the oplog truncate after point. boost::optional _lastNoHolesOplogTimestamp; boost::optional _lastNoHolesOplogOpTimeAndWallTime; + + // Cached initialSyncId from last initial sync. Will only be set on startup or initial sync. + BSONObj _initialSyncId; }; } // namespace repl diff --git a/src/mongo/db/repl/replication_consistency_markers_impl_test.cpp b/src/mongo/db/repl/replication_consistency_markers_impl_test.cpp index cf24bb49a4cea..42af123cc24a4 100644 --- a/src/mongo/db/repl/replication_consistency_markers_impl_test.cpp +++ b/src/mongo/db/repl/replication_consistency_markers_impl_test.cpp @@ -27,28 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/replication_consistency_markers_impl.h" - +#include #include +#include +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/replication_consistency_markers_impl.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/db/storage/recovery_unit_noop.h" -#include "mongo/db/storage/storage_engine_impl.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/journal_listener.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" -#include "mongo/util/str.h" +#include "mongo/util/duration.h" namespace mongo { namespace { @@ -66,7 +72,7 @@ NamespaceString kInitialSyncIdNss = * Returns min valid document. */ BSONObj getMinValidDocument(OperationContext* opCtx, const NamespaceString& minValidNss) { - return writeConflictRetry(opCtx, "getMinValidDocument", minValidNss.ns(), [opCtx, minValidNss] { + return writeConflictRetry(opCtx, "getMinValidDocument", minValidNss, [opCtx, minValidNss] { Lock::DBLock dblk(opCtx, minValidNss.dbName(), MODE_IS); Lock::CollectionLock lk(opCtx, minValidNss, MODE_IS); BSONObj mv; diff --git a/src/mongo/db/repl/replication_consistency_markers_mock.cpp b/src/mongo/db/repl/replication_consistency_markers_mock.cpp index 9e6e1adbe1860..72461c925e8f9 100644 --- a/src/mongo/db/repl/replication_consistency_markers_mock.cpp +++ b/src/mongo/db/repl/replication_consistency_markers_mock.cpp @@ -27,9 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include #include "mongo/db/repl/replication_consistency_markers_mock.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/replication_consistency_markers_mock.h b/src/mongo/db/repl/replication_consistency_markers_mock.h index 7fe15f9489d47..6c1df76855960 100644 --- a/src/mongo/db/repl/replication_consistency_markers_mock.h +++ b/src/mongo/db/repl/replication_consistency_markers_mock.h @@ -29,6 +29,11 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/repl/replication_coordinator.cpp b/src/mongo/db/repl/replication_coordinator.cpp index bfccf8a98be34..be1bd98f0cc91 100644 --- a/src/mongo/db/repl/replication_coordinator.cpp +++ b/src/mongo/db/repl/replication_coordinator.cpp @@ -28,13 +28,14 @@ */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/client.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/service_context.h" -#include "mongo/logv2/log.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -90,7 +91,7 @@ bool ReplicationCoordinator::isOplogDisabledFor(OperationContext* opCtx, } bool ReplicationCoordinator::isOplogDisabledForNS(const NamespaceString& nss) { - if (nss.isLocal()) { + if (nss.isLocalDB()) { return true; } diff --git a/src/mongo/db/repl/replication_coordinator.h b/src/mongo/db/repl/replication_coordinator.h index 078aa9b628460..c4c43c37eae1c 100644 --- a/src/mongo/db/repl/replication_coordinator.h +++ b/src/mongo/db/repl/replication_coordinator.h @@ -29,30 +29,56 @@ #pragma once -#include "mongo/db/repl/replication_coordinator_fwd.h" - +#include +#include +#include +#include +#include +#include +#include #include #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_config.h" #include "mongo/db/repl/member_data.h" #include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/repl_set_heartbeat_response.h" #include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator_fwd.h" #include "mongo/db/repl/split_horizon.h" #include "mongo/db/repl/split_prepare_session_manager.h" #include "mongo/db/repl/sync_source_selector.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_init.h" +#include "mongo/db/write_concern_options.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/topology_version_gen.h" #include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/interruptible.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { class BSONObj; class BSONObjBuilder; + class CommitQuorumOptions; class IndexDescriptor; class NamespaceString; @@ -75,13 +101,17 @@ class ReplSetMetadata; namespace repl { class BackgroundSync; + class HelloResponse; class OpTime; class OpTimeAndWallTime; + class ReadConcernArgs; class ReplSetConfig; + class ReplSetHeartbeatArgsV1; class ReplSetHeartbeatResponse; + class ReplSetRequestVotesArgs; class ReplSetRequestVotesResponse; class UpdatePositionArgs; @@ -270,13 +300,15 @@ class ReplicationCoordinator : public SyncSourceSelector { * NOTE: This function can only be meaningfully called while the caller holds the * ReplicationStateTransitionLock in some mode other than MODE_NONE. */ - virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName) = 0; + virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, + const DatabaseName& dbName) = 0; /** * Version which does not check for the RSTL. Do not use in new code. Without the RSTL, the * return value may be inaccurate by the time the function returns. */ - virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName) = 0; + virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, + const DatabaseName& dbName) = 0; /** * Returns true if it is valid for this node to accept writes on the given namespace. @@ -386,9 +418,12 @@ class ReplicationCoordinator : public SyncSourceSelector { * "oplog holes" from oplog entries with earlier timestamps which commit after this one) * this method does not notify oplog waiters. Callers which know the new lastApplied is at * a no-holes point should call signalOplogWaiters after calling this method. + * + * If advanceGlobalTimestamp is false, we will not advance the global OpTime. The caller takes + * responsibility for doing this instead. */ virtual void setMyLastAppliedOpTimeAndWallTimeForward( - const OpTimeAndWallTime& opTimeAndWallTime) = 0; + const OpTimeAndWallTime& opTimeAndWallTime, bool advanceGlobalTimestamp = true) = 0; /** * Updates our internal tracking of the last OpTime durable to this node, but only @@ -653,6 +688,11 @@ class ReplicationCoordinator : public SyncSourceSelector { */ virtual std::vector getConfigVotingMembers() const = 0; + /** + * Returns how many voting members there are for the current ReplSetConfig. + */ + virtual size_t getNumConfigVotingMembers() const = 0; + /** * Returns the current ReplSetConfig's term. */ @@ -1198,6 +1238,12 @@ class ReplicationCoordinator : public SyncSourceSelector { */ virtual bool isRetryableWrite(OperationContext* opCtx) const = 0; + /** + * Returns the in-memory initialSyncId from last initial sync. boost::none will be returned if + * there is no initial sync. + */ + virtual boost::optional getInitialSyncId(OperationContext* opCtx) = 0; + protected: ReplicationCoordinator(); }; diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp index 1b659ed1b0a71..46f5c468890af 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp @@ -29,97 +29,123 @@ #include "mongo/db/repl/replication_coordinator_external_state_impl.h" +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" #include #include #include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" -#include "mongo/bson/oid.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/coll_mod.h" -#include "mongo/db/catalog/create_collection.h" -#include "mongo/db/catalog/database.h" -#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/drop_collection.h" #include "mongo/db/catalog/local_oplog_info.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/catalog_shard_feature_flag_gen.h" -#include "mongo/db/change_stream_change_collection_manager.h" #include "mongo/db/change_stream_pre_images_collection_manager.h" #include "mongo/db/change_stream_serverless_helpers.h" #include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/commands/rwc_defaults_commands_gen.h" -#include "mongo/db/commands/server_status_metric.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/free_mon/free_mon_mongod.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/logical_time.h" #include "mongo/db/logical_time_validator.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/read_write_concern_defaults_gen.h" #include "mongo/db/repl/always_allow_non_local_writes.h" #include "mongo/db/repl/bgsync.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" #include "mongo/db/repl/isself.h" #include "mongo/db/repl/last_vote.h" -#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/noop_writer.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_applier_impl.h" #include "mongo/db/repl/oplog_buffer_blocking_queue.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_server_parameters_gen.h" -#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_metrics.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/storage_interface.h" -#include "mongo/db/s/balancer/balancer.h" #include "mongo/db/s/config/index_on_config.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/migration_util.h" #include "mongo/db/s/periodic_sharded_index_consistency_checker.h" +#include "mongo/db/s/range_deletion_task_gen.h" #include "mongo/db/s/resharding/resharding_donor_recipient_common.h" -#include "mongo/db/s/shard_local.h" #include "mongo/db/s/sharding_initialization_mongod.h" -#include "mongo/db/s/sharding_state_recovery.h" +#include "mongo/db/s/sharding_state.h" #include "mongo/db/s/sharding_util.h" #include "mongo/db/s/transaction_coordinator_service.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" -#include "mongo/db/session/kill_sessions_local.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_role.h" #include "mongo/db/storage/control/journal_flusher.h" #include "mongo/db/storage/flow_control.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot_manager.h" #include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/system_index.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/transaction_resources.h" #include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_metadata_hook.h" #include "mongo/db/vector_clock_mutable.h" #include "mongo/executor/network_connection_hook.h" -#include "mongo/executor/network_interface.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" -#include "mongo/s/catalog/type_shard.h" +#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog_cache_loader.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_identity_loader.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_version.h" #include "mongo/s/sharding_feature_flags_gen.h" #include "mongo/stdx/thread.h" #include "mongo/transport/service_entry_point.h" +#include "mongo/transport/session.h" #include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" #include "mongo/util/concurrency/thread_pool.h" -#include "mongo/util/exit.h" #include "mongo/util/fail_point.h" +#include "mongo/util/functional.h" #include "mongo/util/net/hostandport.h" -#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" #include "mongo/util/time_support.h" @@ -131,8 +157,6 @@ namespace mongo { namespace repl { namespace { -const char kLocalDbName[] = "local"; - MONGO_FAIL_POINT_DEFINE(dropPendingCollectionReaperHang); // The count of items in the buffer @@ -147,6 +171,12 @@ auto makeThreadPool(const std::string& poolName, const std::string& threadName) threadPoolOptions.poolName = poolName; threadPoolOptions.onCreateThread = [](const std::string& threadName) { Client::initThread(threadName.c_str()); + + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + AuthorizationSession::get(cc())->grantInternalAuthorization(&cc()); }; return std::make_unique(threadPoolOptions); @@ -415,16 +445,23 @@ Status ReplicationCoordinatorExternalStateImpl::initializeReplSetStorage(Operati writeConflictRetry(opCtx, "initiate oplog entry", - NamespaceString::kRsOplogNamespace.toString(), + NamespaceString::kRsOplogNamespace, [this, &opCtx, &config] { // Permit writing to the oplog before we step up to primary. AllowNonLocalWritesBlock allowNonLocalWrites(opCtx); Lock::GlobalWrite globalWrite(opCtx); + auto coll = acquireCollection( + opCtx, + CollectionAcquisitionRequest( + NamespaceString::kSystemReplSetNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_X); { // Writes to 'local.system.replset' must be untimestamped. WriteUnitOfWork wuow(opCtx); - Helpers::putSingleton( - opCtx, NamespaceString::kSystemReplSetNamespace, config); + Helpers::putSingleton(opCtx, coll, config); wuow.commit(); } { @@ -494,15 +531,20 @@ OpTime ReplicationCoordinatorExternalStateImpl::onTransitionToPrimary(OperationC _replicationProcess->getConsistencyMarkers()->clearAppliedThrough(opCtx); LOGV2(6015309, "Logging transition to primary to oplog on stepup"); - writeConflictRetry(opCtx, "logging transition to primary to oplog", "local.oplog.rs", [&] { - AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); - WriteUnitOfWork wuow(opCtx); - opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage( - opCtx, - BSON(ReplicationCoordinator::newPrimaryMsgField - << ReplicationCoordinator::newPrimaryMsg)); - wuow.commit(); - }); + writeConflictRetry( + opCtx, "logging transition to primary to oplog", NamespaceString::kRsOplogNamespace, [&] { + AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); + WriteUnitOfWork wuow(opCtx); + opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage( + opCtx, + BSON(ReplicationCoordinator::newPrimaryMsgField + << ReplicationCoordinator::newPrimaryMsg)); + wuow.commit(); + }); + // As far as the storage system is concerned, we're still secondary here, and will be until we + // change readWriteAbility. So new and resumed lock-free reads will read from lastApplied. We + // just advanced lastApplied by writing the no-op, so we need to signal oplog waiters. + signalOplogWaiters(); const auto loadLastOpTimeAndWallTimeResult = loadLastOpTimeAndWallTime(opCtx); fassert(28665, loadLastOpTimeAndWallTimeResult); auto opTimeToReturn = loadLastOpTimeAndWallTimeResult.getValue().opTime; @@ -542,7 +584,7 @@ OpTime ReplicationCoordinatorExternalStateImpl::onTransitionToPrimary(OperationC // Create the pre-images collection if it doesn't exist yet in the non-serverless environment. if (!change_stream_serverless_helpers::isChangeCollectionsModeActive()) { - ChangeStreamPreImagesCollectionManager::createPreImagesCollection( + ChangeStreamPreImagesCollectionManager::get(opCtx).createPreImagesCollection( opCtx, boost::none /* tenantId */); } @@ -559,17 +601,14 @@ StatusWith ReplicationCoordinatorExternalStateImpl::loadLocalConfigDocu OperationContext* opCtx) { try { return writeConflictRetry( - opCtx, - "load replica set config", - NamespaceString::kSystemReplSetNamespace.ns(), - [opCtx] { + opCtx, "load replica set config", NamespaceString::kSystemReplSetNamespace, [opCtx] { BSONObj config; if (!Helpers::getSingleton( opCtx, NamespaceString::kSystemReplSetNamespace, config)) { return StatusWith( ErrorCodes::NoMatchingDocument, "Did not find replica set configuration document in {}"_format( - NamespaceString::kSystemReplSetNamespace.toString())); + NamespaceString::kSystemReplSetNamespace.toStringForErrorMsg())); } return StatusWith(config); }); @@ -583,12 +622,19 @@ Status ReplicationCoordinatorExternalStateImpl::storeLocalConfigDocument(Operati bool writeOplog) { try { writeConflictRetry( - opCtx, "save replica set config", NamespaceString::kSystemReplSetNamespace.ns(), [&] { + opCtx, "save replica set config", NamespaceString::kSystemReplSetNamespace, [&] { { // Writes to 'local.system.replset' must be untimestamped. WriteUnitOfWork wuow(opCtx); - AutoGetCollection coll(opCtx, NamespaceString::kSystemReplSetNamespace, MODE_X); - Helpers::putSingleton(opCtx, NamespaceString::kSystemReplSetNamespace, config); + auto coll = acquireCollection( + opCtx, + CollectionAcquisitionRequest( + NamespaceString::kSystemReplSetNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_X); + Helpers::putSingleton(opCtx, coll, config); wuow.commit(); } @@ -615,11 +661,18 @@ Status ReplicationCoordinatorExternalStateImpl::storeLocalConfigDocument(Operati Status ReplicationCoordinatorExternalStateImpl::replaceLocalConfigDocument( OperationContext* opCtx, const BSONObj& config) try { writeConflictRetry( - opCtx, "replace replica set config", NamespaceString::kSystemReplSetNamespace.ns(), [&] { + opCtx, "replace replica set config", NamespaceString::kSystemReplSetNamespace, [&] { WriteUnitOfWork wuow(opCtx); - AutoGetCollection coll(opCtx, NamespaceString::kSystemReplSetNamespace, MODE_X); - Helpers::emptyCollection(opCtx, NamespaceString::kSystemReplSetNamespace); - Helpers::putSingleton(opCtx, NamespaceString::kSystemReplSetNamespace, config); + auto coll = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + NamespaceString::kSystemReplSetNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_X); + Helpers::emptyCollection(opCtx, coll); + Helpers::putSingleton(opCtx, coll, config); wuow.commit(); }); return Status::OK(); @@ -634,27 +687,32 @@ Status ReplicationCoordinatorExternalStateImpl::createLocalLastVoteCollection( if (!status.isOK() && status.code() != ErrorCodes::NamespaceExists) { return {ErrorCodes::CannotCreateCollection, str::stream() << "Failed to create local last vote collection. Ns: " - << NamespaceString::kLastVoteNamespace.toString() + << NamespaceString::kLastVoteNamespace.toStringForErrorMsg() << " Error: " << status.toString()}; } // Make sure there's always a last vote document. try { - writeConflictRetry( - opCtx, - "create initial replica set lastVote", - NamespaceString::kLastVoteNamespace.toString(), - [opCtx] { - AutoGetCollection coll(opCtx, NamespaceString::kLastVoteNamespace, MODE_X); - BSONObj result; - bool exists = - Helpers::getSingleton(opCtx, NamespaceString::kLastVoteNamespace, result); - if (!exists) { - LastVote lastVote{OpTime::kInitialTerm, -1}; - Helpers::putSingleton( - opCtx, NamespaceString::kLastVoteNamespace, lastVote.toBSON()); - } - }); + writeConflictRetry(opCtx, + "create initial replica set lastVote", + NamespaceString::kLastVoteNamespace, + [opCtx] { + auto coll = acquireCollection( + opCtx, + CollectionAcquisitionRequest( + NamespaceString::kLastVoteNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_X); + + BSONObj result; + bool exists = Helpers::getSingleton(opCtx, coll.nss(), result); + if (!exists) { + LastVote lastVote{OpTime::kInitialTerm, -1}; + Helpers::putSingleton(opCtx, coll, lastVote.toBSON()); + } + }); } catch (const DBException& ex) { return ex.toStatus(); } @@ -666,17 +724,14 @@ StatusWith ReplicationCoordinatorExternalStateImpl::loadLocalLastVoteD OperationContext* opCtx) { try { return writeConflictRetry( - opCtx, - "load replica set lastVote", - NamespaceString::kLastVoteNamespace.toString(), - [opCtx] { + opCtx, "load replica set lastVote", NamespaceString::kLastVoteNamespace, [opCtx] { BSONObj lastVoteObj; if (!Helpers::getSingleton( opCtx, NamespaceString::kLastVoteNamespace, lastVoteObj)) { return StatusWith( ErrorCodes::NoMatchingDocument, str::stream() << "Did not find replica set lastVote document in " - << NamespaceString::kLastVoteNamespace.toString()); + << NamespaceString::kLastVoteNamespace.toStringForErrorMsg()); } return LastVote::readFromLastVote(lastVoteObj); }); @@ -709,16 +764,20 @@ Status ReplicationCoordinatorExternalStateImpl::storeLocalLastVoteDocument( noInterrupt.emplace(opCtx->lockState()); Status status = writeConflictRetry( - opCtx, - "save replica set lastVote", - NamespaceString::kLastVoteNamespace.toString(), - [&] { + opCtx, "save replica set lastVote", NamespaceString::kLastVoteNamespace, [&] { // Writes to non-replicated collections do not need concurrency control with the // OplogApplier that never accesses them. Skip taking the PBWM. ShouldNotConflictWithSecondaryBatchApplicationBlock shouldNotConflictBlock( opCtx->lockState()); - AutoGetCollection coll(opCtx, NamespaceString::kLastVoteNamespace, MODE_IX); + auto coll = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + NamespaceString::kLastVoteNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); WriteUnitOfWork wunit(opCtx); // We only want to replace the last vote document if the new last vote document @@ -735,7 +794,7 @@ Status ReplicationCoordinatorExternalStateImpl::storeLocalLastVoteDocument( return oldLastVoteDoc.getStatus(); } if (lastVote.getTerm() > oldLastVoteDoc.getValue().getTerm()) { - Helpers::putSingleton(opCtx, NamespaceString::kLastVoteNamespace, lastVoteObj); + Helpers::putSingleton(opCtx, coll, lastVoteObj); } wunit.commit(); return Status::OK(); @@ -778,14 +837,13 @@ StatusWith ReplicationCoordinatorExternalStateImpl::loadLastO BSONObj oplogEntry; - if (!writeConflictRetry( - opCtx, "Load last opTime", NamespaceString::kRsOplogNamespace.ns().c_str(), [&] { - return Helpers::getLast(opCtx, NamespaceString::kRsOplogNamespace, oplogEntry); - })) { - return StatusWith(ErrorCodes::NoMatchingDocument, - str::stream() - << "Did not find any entries in " - << NamespaceString::kRsOplogNamespace.ns()); + if (!writeConflictRetry(opCtx, "Load last opTime", NamespaceString::kRsOplogNamespace, [&] { + return Helpers::getLast(opCtx, NamespaceString::kRsOplogNamespace, oplogEntry); + })) { + return StatusWith( + ErrorCodes::NoMatchingDocument, + str::stream() << "Did not find any entries in " + << NamespaceString::kRsOplogNamespace.toStringForErrorMsg()); } return OpTimeAndWallTime::parseOpTimeAndWallTimeFromOplogEntry(oplogEntry); @@ -939,24 +997,11 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook PeriodicShardedIndexConsistencyChecker::get(_service).onStepUp(_service); TransactionCoordinatorService::get(_service)->onStepUp(opCtx); - // (Ignore FCV check): TODO(SERVER-75389): add why FCV is ignored here. - if (gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafe()) { - CatalogCacheLoader::get(_service).onStepUp(); - } + CatalogCacheLoader::get(_service).onStepUp(); } if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { if (ShardingState::get(opCtx)->enabled()) { VectorClockMutable::get(opCtx)->recoverDirect(opCtx); - Status status = ShardingStateRecovery_DEPRECATED::recover(opCtx); - - // If the node is shutting down or it lost quorum just as it was becoming primary, don't - // run the sharding onStepUp machinery. The onStepDown counterpart to these methods is - // already idempotent, so the machinery will remain in the stepped down state. - if (ErrorCodes::isShutdownError(status.code()) || - ErrorCodes::isNotPrimaryError(status.code())) { - return; - } - fassert(40107, status); CatalogCacheLoader::get(_service).onStepUp(); @@ -970,14 +1015,9 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook ShardingInitializationMongoD::get(opCtx)->updateShardIdentityConfigString( opCtx, configsvrConnStr); - // Note, these must be done after the configOpTime is recovered via - // ShardingStateRecovery::recover above, because they may trigger filtering metadata - // refreshes which should use the recovered configOpTime. - // (Ignore FCV check): This feature flag doesn't have upgrade/downgrade concern. The - // feature flag is used to turn on new range deleter on startup. - if (!mongo::feature_flags::gRangeDeleterService.isEnabledAndIgnoreFCVUnsafe()) { - migrationutil::resubmitRangeDeletionsOnStepUp(_service); - } + // Note, these must be done after the configTime is recovered via + // VectorClockMutable::recoverDirect above, because they may trigger filtering metadata + // refreshes which should use the recovered configTime. migrationutil::resumeMigrationCoordinationsOnStepUp(opCtx); migrationutil::resumeMigrationRecipientsOnStepUp(opCtx); @@ -1013,7 +1053,8 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook // (Ignore FCV check): TODO(SERVER-75389): add why FCV is ignored here. if (mongo::feature_flags::gGlobalIndexesShardingCatalog.isEnabledAndIgnoreFCVUnsafe()) { // Create indexes in config.shard.indexes if needed. - indexStatus = sharding_util::createShardingIndexCatalogIndexes(opCtx); + indexStatus = sharding_util::createShardingIndexCatalogIndexes( + opCtx, NamespaceString::kShardIndexCatalogNamespace); if (!indexStatus.isOK()) { // If the node is shutting down or it lost quorum just as it was becoming primary, // don't run the sharding onStepUp machinery. The onStepDown counterpart to these @@ -1025,10 +1066,11 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook } fassertFailedWithStatus( 6280501, - indexStatus.withContext(str::stream() - << "Failed to create index on " - << NamespaceString::kShardIndexCatalogNamespace - << " on shard's first transition to primary")); + indexStatus.withContext( + str::stream() + << "Failed to create index on " + << NamespaceString::kShardIndexCatalogNamespace.toStringForErrorMsg() + << " on shard's first transition to primary")); } // Create indexes in config.shard.collections if needed. @@ -1044,10 +1086,11 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook } fassertFailedWithStatus( 6711907, - indexStatus.withContext(str::stream() - << "Failed to create index on " - << NamespaceString::kShardCollectionCatalogNamespace - << " on shard's first transition to primary")); + indexStatus.withContext( + str::stream() + << "Failed to create index on " + << NamespaceString::kShardCollectionCatalogNamespace.toStringForErrorMsg() + << " on shard's first transition to primary")); } } } @@ -1057,9 +1100,7 @@ void ReplicationCoordinatorExternalStateImpl::_shardingOnTransitionToPrimaryHook } } - // (Ignore FCV check): TODO(SERVER-75389): add why FCV is ignored here. - if (gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafe() && - serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && + if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && !ShardingState::get(opCtx)->enabled()) { // Note this must be called after the config server has created the cluster ID and also // after the onStepUp logic for the shard role because this triggers sharding state diff --git a/src/mongo/db/repl/replication_coordinator_external_state_impl.h b/src/mongo/db/repl/replication_coordinator_external_state_impl.h index f7c3fb8a0df38..1cd6a3ec6b88a 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_impl.h +++ b/src/mongo/db/repl/replication_coordinator_external_state_impl.h @@ -29,17 +29,41 @@ #pragma once +#include +#include +#include #include - +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/bgsync.h" +#include "mongo/db/repl/last_vote.h" #include "mongo/db/repl/oplog_applier.h" +#include "mongo/db/repl/oplog_buffer.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_external_state.h" +#include "mongo/db/repl/replication_process.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/sync_source_feedback.h" #include "mongo/db/repl/task_runner.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/journal_listener.h" #include "mongo/db/storage/snapshot_manager.h" +#include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace repl { @@ -47,6 +71,7 @@ namespace repl { class DropPendingCollectionReaper; class ReplicationProcess; class StorageInterface; + class NoopWriter; class ReplicationCoordinatorExternalStateImpl final : public ReplicationCoordinatorExternalState, diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp index 5bfea1c24b846..d4ffbe2496bbd 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp +++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.cpp @@ -28,17 +28,18 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/replication_coordinator_external_state_mock.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" #include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" -#include "mongo/bson/oid.h" -#include "mongo/db/client.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/repl/oplog_buffer_blocking_queue.h" +#include "mongo/db/repl/replication_coordinator_external_state_mock.h" +#include "mongo/util/assert_util.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/sequence_util.h" diff --git a/src/mongo/db/repl/replication_coordinator_external_state_mock.h b/src/mongo/db/repl/replication_coordinator_external_state_mock.h index 6b041a3ada5de..c75809d669ac5 100644 --- a/src/mongo/db/repl/replication_coordinator_external_state_mock.h +++ b/src/mongo/db/repl/replication_coordinator_external_state_mock.h @@ -29,18 +29,33 @@ #pragma once +#include +#include +#include +#include #include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" #include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/last_vote.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator_external_state.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/repl/replication_coordinator_impl.cpp b/src/mongo/db/repl/replication_coordinator_impl.cpp index 2884dbc70e8cb..233350e21294d 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl.cpp @@ -34,43 +34,66 @@ #include "mongo/db/repl/replication_coordinator_impl.h" -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +// IWYU pragma: no_include "cxxabi.h" +#include #include -#include +#include +#include +#include +#include #include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/json.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/bson/timestamp.h" #include "mongo/client/fetcher.h" #include "mongo/client/read_preference.h" +#include "mongo/client/read_preference_gen.h" #include "mongo/db/audit.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/commit_quorum_options.h" #include "mongo/db/catalog/local_oplog_info.h" #include "mongo/db/client.h" -#include "mongo/db/commands.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/commands/server_status_metric.h" #include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/concurrency/lock_manager.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/lock_stats.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" #include "mongo/db/curop.h" #include "mongo/db/curop_failpoint_helpers.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/logical_time.h" #include "mongo/db/mongod_options_storage_gen.h" #include "mongo/db/prepare_conflict_tracker.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/repl/always_allow_non_local_writes.h" #include "mongo/db/repl/check_quorum_for_config_change.h" +#include "mongo/db/repl/data_replicator_external_state.h" #include "mongo/db/repl/data_replicator_external_state_initial_sync.h" #include "mongo/db/repl/hello_response.h" #include "mongo/db/repl/initial_syncer_factory.h" #include "mongo/db/repl/isself.h" #include "mongo/db/repl/last_vote.h" +#include "mongo/db/repl/member_config_gen.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/repl_set_config_checks.h" #include "mongo/db/repl/repl_set_heartbeat_args_v1.h" @@ -78,42 +101,67 @@ #include "mongo/db/repl/repl_set_request_votes_args.h" #include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/db/repl/replication_consistency_markers.h" +#include "mongo/db/repl/replication_consistency_markers_gen.h" #include "mongo/db/repl/replication_coordinator_impl_gen.h" #include "mongo/db/repl/replication_metrics.h" #include "mongo/db/repl/replication_process.h" +#include "mongo/db/repl/replication_recovery.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_decoration.h" #include "mongo/db/repl/transaction_oplog_application.h" #include "mongo/db/repl/update_position_args.h" -#include "mongo/db/repl/vote_requester.h" #include "mongo/db/server_options.h" #include "mongo/db/serverless/serverless_operation_lock_registry.h" +#include "mongo/db/session/internal_session_pool.h" +#include "mongo/db/session/kill_sessions.h" #include "mongo/db/session/kill_sessions_local.h" #include "mongo/db/session/session_catalog.h" +#include "mongo/db/session/session_killer.h" #include "mongo/db/shutdown_in_progress_quiesce_info.h" #include "mongo/db/storage/control/journal_flusher.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_options.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_mutable.h" -#include "mongo/db/write_concern.h" #include "mongo/db/write_concern_options.h" #include "mongo/executor/connection_pool_stats.h" -#include "mongo/executor/network_interface.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/platform/mutex.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/metadata/oplog_query_metadata.h" #include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/stdx/variant.h" #include "mongo/transport/hello_metrics.h" +#include "mongo/transport/session.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/functional.h" +#include "mongo/util/net/cidr.h" #include "mongo/util/scopeguard.h" -#include "mongo/util/stacktrace.h" +#include "mongo/util/str.h" #include "mongo/util/synchronized_value.h" #include "mongo/util/testing_proctor.h" #include "mongo/util/time_support.h" #include "mongo/util/timer.h" +#include "mongo/util/uuid.h" +#include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -188,7 +236,6 @@ using NextAction = Fetcher::NextAction; namespace { -const char kLocalDB[] = "local"; void lockAndCall(stdx::unique_lock* lk, const std::function& fn) { if (!lk->owns_lock()) { @@ -517,12 +564,12 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig( LOGV2_FATAL_NOTRACE( 28545, "Locally stored replica set configuration does not parse; See " - "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config " - "for information on how to recover from this. Got \"{error}\" while parsing " + "https://www.mongodb.com/docs/manual/reference/method/rs.reconfig/ " + "for more information about replica set reconfig. Got \"{error}\" while parsing " "{config}", "Locally stored replica set configuration does not parse; See " - "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config " - "for information on how to recover from this", + "https://www.mongodb.com/docs/manual/reference/method/rs.reconfig/ " + "for more information about replica set reconfig", "error"_attr = status, "config"_attr = cfg.getValue()); } @@ -540,7 +587,9 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig( "Throwing exception."); } - tenant_migration_access_blocker::recoverTenantMigrationAccessBlockers(opCtx); + if (_settings.isServerless()) { + tenant_migration_access_blocker::recoverTenantMigrationAccessBlockers(opCtx); + } ServerlessOperationLockRegistry::recoverLocks(opCtx); LOGV2(4280506, "Reconstructing prepared transactions"); reconstructPreparedTransactions(opCtx, @@ -553,7 +602,7 @@ bool ReplicationCoordinatorImpl::_startLoadLocalConfig( // that the server's networking layer be up and running and accepting connections, which // doesn't happen until startReplication finishes. auto handle = - _replExecutor->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& args) { + _replExecutor->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& args) { _finishLoadLocalConfig(args, localConfig, lastOpTimeAndWallTimeResult, lastVote); }); if (handle == ErrorCodes::ShutdownInProgress) { @@ -610,12 +659,12 @@ void ReplicationCoordinatorImpl::_finishLoadLocalConfig( } else { LOGV2_ERROR(21415, "Locally stored replica set configuration is invalid; See " - "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config" - " for information on how to recover from this. Got \"{error}\" " + "https://www.mongodb.com/docs/manual/reference/method/rs.reconfig/ " + "for more information about replica set reconfig. Got \"{error}\" " "while validating {localConfig}", "Locally stored replica set configuration is invalid; See " - "http://www.mongodb.org/dochub/core/recover-replica-set-from-invalid-config" - " for information on how to recover from this", + "https://www.mongodb.com/docs/manual/reference/method/rs.reconfig/ " + "for more information about replica set reconfig", "error"_attr = myIndex.getStatus(), "localConfig"_attr = localConfig.toBSON()); fassertFailedNoTrace(28544); @@ -817,15 +866,16 @@ void ReplicationCoordinatorImpl::_initialSyncerCompletionFunction( "error"_attr = opTimeStatus.getStatus()); lock.unlock(); clearSyncSourceDenylist(); - _scheduleWorkAt(_replExecutor->now(), - [=](const mongo::executor::TaskExecutor::CallbackArgs& cbData) { - _startInitialSync( - cc().makeOperationContext().get(), - [this](const StatusWith& opTimeStatus) { - _initialSyncerCompletionFunction(opTimeStatus); - }, - true /* fallbackToLogical */); - }); + _scheduleWorkAt( + _replExecutor->now(), + [=, this](const mongo::executor::TaskExecutor::CallbackArgs& cbData) { + _startInitialSync( + cc().makeOperationContext().get(), + [this](const StatusWith& opTimeStatus) { + _initialSyncerCompletionFunction(opTimeStatus); + }, + true /* fallbackToLogical */); + }); return; } else { LOGV2_ERROR(21416, @@ -1388,11 +1438,16 @@ void ReplicationCoordinatorImpl::setMyHeartbeatMessage(const std::string& msg) { } void ReplicationCoordinatorImpl::setMyLastAppliedOpTimeAndWallTimeForward( - const OpTimeAndWallTime& opTimeAndWallTime) { + const OpTimeAndWallTime& opTimeAndWallTime, bool advanceGlobalTimestamp) { // Update the global timestamp before setting the last applied opTime forward so the last // applied optime is never greater than the latest cluster time in the logical clock. const auto opTime = opTimeAndWallTime.opTime; - _externalState->setGlobalTimestamp(getServiceContext(), opTime.getTimestamp()); + + // The caller may have already advanced the global timestamp, so they may request that we skip + // this step. + if (advanceGlobalTimestamp) { + _externalState->setGlobalTimestamp(getServiceContext(), opTime.getTimestamp()); + } stdx::unique_lock lock(_mutex); auto myLastAppliedOpTime = _getMyLastAppliedOpTime_inlock(); @@ -1650,9 +1705,14 @@ Status ReplicationCoordinatorImpl::waitUntilOpTimeForReadUntil(OperationContext* "node needs to be a replica set member to use read concern"}; } - if (_rsConfigState == kConfigUninitialized || _rsConfigState == kConfigInitiating) { - return {ErrorCodes::NotYetInitialized, + { + stdx::lock_guard lock(_mutex); + if (_rsConfigState == kConfigUninitialized || _rsConfigState == kConfigInitiating || + (_rsConfigState == kConfigHBReconfiguring && !_rsConfig.isInitialized())) { + return { + ErrorCodes::NotYetInitialized, "Cannot use non-local read concern until replica set is finished initializing."}; + } } if (readConcern.getArgsAfterClusterTime() || readConcern.getArgsAtClusterTime()) { @@ -2055,7 +2115,6 @@ ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::awaitRepli invariant(OperationContextSession::get(opCtx) == nullptr); Timer timer; - WriteConcernOptions fixedWriteConcern = populateUnsetWriteConcernOptionsSyncMode(writeConcern); // We should never wait for replication if we are holding any locks, because this can // potentially block for long time while doing network activity. @@ -2083,6 +2142,9 @@ ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::awaitRepli auto future = [&] { stdx::lock_guard lock(_mutex); + WriteConcernOptions fixedWriteConcern = + _populateUnsetWriteConcernOptionsSyncMode(lock, writeConcern); + return _startWaitingForReplication(lock, opTime, fixedWriteConcern); }(); auto status = futureGetNoThrowWithDeadline(opCtx, future, wTimeoutDate, timeoutError); @@ -2112,14 +2174,16 @@ ReplicationCoordinator::StatusAndDuration ReplicationCoordinatorImpl::awaitRepli SharedSemiFuture ReplicationCoordinatorImpl::awaitReplicationAsyncNoWTimeout( const OpTime& opTime, const WriteConcernOptions& writeConcern) { - WriteConcernOptions fixedWriteConcern = populateUnsetWriteConcernOptionsSyncMode(writeConcern); + stdx::lock_guard lg(_mutex); + + WriteConcernOptions fixedWriteConcern = + _populateUnsetWriteConcernOptionsSyncMode(lg, writeConcern); // The returned future won't account for wTimeout or wDeadline, so reject any write concerns // with either option to avoid misuse. invariant(fixedWriteConcern.wDeadline == Date_t::max()); invariant(fixedWriteConcern.wTimeout == WriteConcernOptions::kNoTimeout); - stdx::lock_guard lg(_mutex); return _startWaitingForReplication(lg, opTime, fixedWriteConcern); } @@ -2627,7 +2691,7 @@ ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::AutoGetRstlForStepUpSt } // Dump all locks to identify which thread(s) are holding RSTL. - getGlobalLockManager()->dump(); + LockManager::get(opCtx)->dump(); auto lockerInfo = opCtx->lockState()->getLockerInfo(CurOp::get(opCtx)->getLockStatsBase()); BSONObjBuilder lockRep; @@ -2649,6 +2713,11 @@ void ReplicationCoordinatorImpl::AutoGetRstlForStepUpStepDown::_killOpThreadFn() invariant(!cc().isFromUserConnection()); + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + LOGV2(21343, "Starting to kill user operations"); auto uniqueOpCtx = cc().makeOperationContext(); OperationContext* opCtx = uniqueOpCtx.get(); @@ -2921,7 +2990,7 @@ void ReplicationCoordinatorImpl::stepDown(OperationContext* opCtx, updateMemberState(); // Schedule work to (potentially) step back up once the stepdown period has ended. - _scheduleWorkAt(stepDownUntil, [=](const executor::TaskExecutor::CallbackArgs& cbData) { + _scheduleWorkAt(stepDownUntil, [=, this](const executor::TaskExecutor::CallbackArgs& cbData) { _handleTimePassing(cbData); }); @@ -3001,14 +3070,14 @@ bool ReplicationCoordinatorImpl::isWritablePrimaryForReportingPurposes() { } bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase(OperationContext* opCtx, - StringData dbName) { + const DatabaseName& dbName) { // The answer isn't meaningful unless we hold the ReplicationStateTransitionLock. invariant(opCtx->lockState()->isRSTLLocked() || opCtx->isLockFreeReadsOp()); return canAcceptWritesForDatabase_UNSAFE(opCtx, dbName); } bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, - StringData dbName) { + const DatabaseName& dbName) { // _canAcceptNonLocalWrites is always true for standalone nodes, and adjusted based on // primary+drain state in replica sets. // @@ -3017,7 +3086,7 @@ bool ReplicationCoordinatorImpl::canAcceptWritesForDatabase_UNSAFE(OperationCont if (_readWriteAbility->canAcceptNonLocalWrites_UNSAFE() || alwaysAllowNonLocalWrites(opCtx)) { return true; } - if (dbName == kLocalDB) { + if (dbName == DatabaseName::kLocal) { return true; } return false; @@ -3030,22 +3099,21 @@ bool ReplicationCoordinatorImpl::canAcceptNonLocalWrites() const { namespace { bool isSystemDotProfile(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID) { - if (auto ns = nsOrUUID.nss()) { + if (nsOrUUID.isNamespaceString()) { + return nsOrUUID.nss().isSystemDotProfile(); + } + + if (auto ns = CollectionCatalog::get(opCtx)->lookupNSSByUUID(opCtx, nsOrUUID.uuid())) { return ns->isSystemDotProfile(); - } else { - auto uuid = nsOrUUID.uuid(); - invariant(uuid, nsOrUUID.toString()); - if (auto ns = CollectionCatalog::get(opCtx)->lookupNSSByUUID(opCtx, *uuid)) { - return ns->isSystemDotProfile(); - } } + return false; } } // namespace bool ReplicationCoordinatorImpl::canAcceptWritesFor(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID) { - if (!isReplEnabled() || nsOrUUID.db() == kLocalDB) { + if (!isReplEnabled() || nsOrUUID.dbName().isLocalDB()) { // Writes on stand-alone nodes or "local" database are always permitted. return true; } @@ -3056,13 +3124,13 @@ bool ReplicationCoordinatorImpl::canAcceptWritesFor(OperationContext* opCtx, return true; } - invariant(opCtx->lockState()->isRSTLLocked(), nsOrUUID.toString()); + invariant(opCtx->lockState()->isRSTLLocked(), toStringForLogging(nsOrUUID)); return canAcceptWritesFor_UNSAFE(opCtx, nsOrUUID); } bool ReplicationCoordinatorImpl::canAcceptWritesFor_UNSAFE(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID) { - bool canWriteToDB = canAcceptWritesForDatabase_UNSAFE(opCtx, nsOrUUID.db()); + bool canWriteToDB = canAcceptWritesForDatabase_UNSAFE(opCtx, nsOrUUID.dbName()); if (!canWriteToDB && !isSystemDotProfile(opCtx, nsOrUUID)) { return false; @@ -3077,14 +3145,12 @@ bool ReplicationCoordinatorImpl::canAcceptWritesFor_UNSAFE(OperationContext* opC return true; } - if (auto ns = nsOrUUID.nss()) { - if (!ns->isOplog()) { + if (nsOrUUID.isNamespaceString()) { + if (!nsOrUUID.nss().isOplog()) { return true; } } else if (const auto& oplogCollection = LocalOplogInfo::get(opCtx)->getCollection()) { - auto uuid = nsOrUUID.uuid(); - invariant(uuid, nsOrUUID.toString()); - if (oplogCollection->uuid() != *uuid) { + if (oplogCollection->uuid() != nsOrUUID.uuid()) { return true; } } @@ -3129,7 +3195,7 @@ Status ReplicationCoordinatorImpl::checkCanServeReadsFor_UNSAFE(OperationContext // Non-oplog local reads from the user are not allowed during initial sync when the initial // sync method disallows it. "isFromUserConnection" means DBDirectClient reads are not blocked; // "isInternalClient" means reads from other cluster members are not blocked. - if (!isPrimaryOrSecondary && getReplicationMode() == modeReplSet && ns.db() == kLocalDB && + if (!isPrimaryOrSecondary && getReplicationMode() == modeReplSet && ns.isLocalDB() && client->isFromUserConnection()) { stdx::lock_guard lock(_mutex); auto isInternalClient = !client->session() || @@ -3188,8 +3254,7 @@ bool ReplicationCoordinatorImpl::shouldRelaxIndexConstraints(OperationContext* o } OID ReplicationCoordinatorImpl::getElectionId() { - stdx::lock_guard lock(_mutex); - return _electionId; + return OID::fromTerm(_electionIdTerm.load()); } int ReplicationCoordinatorImpl::getMyId() const { @@ -3320,6 +3385,11 @@ std::vector ReplicationCoordinatorImpl::getConfigVotingMembers() c return _rsConfig.votingMembers(); } +size_t ReplicationCoordinatorImpl::getNumConfigVotingMembers() const { + stdx::lock_guard lock(_mutex); + return _rsConfig.votingMembers().size(); +} + std::int64_t ReplicationCoordinatorImpl::getConfigTerm() const { stdx::lock_guard lock(_mutex); return _rsConfig.getConfigTerm(); @@ -3526,7 +3596,7 @@ Status ReplicationCoordinatorImpl::processReplSetSyncFrom(OperationContext* opCt } Status ReplicationCoordinatorImpl::processReplSetFreeze(int secs, BSONObjBuilder* resultObj) { - auto result = [=]() { + auto result = [=, this]() { stdx::lock_guard lock(_mutex); return _topCoord->prepareFreezeResponse(_replExecutor->now(), secs, resultObj); }(); @@ -3758,7 +3828,7 @@ Status ReplicationCoordinatorImpl::_doReplSetReconfig(OperationContext* opCtx, _setConfigState_inlock(kConfigReconfiguring); auto configStateGuard = - ScopeGuard([&] { lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigSteady); }); }); + ScopeGuard([&] { lockAndCall(&lk, [=, this] { _setConfigState_inlock(kConfigSteady); }); }); ReplSetConfig oldConfig = _rsConfig; int myIndex = _selfIndex; @@ -4307,7 +4377,7 @@ Status ReplicationCoordinatorImpl::processReplSetInitiate(OperationContext* opCt _setConfigState_inlock(kConfigInitiating); ScopeGuard configStateGuard = [&] { - lockAndCall(&lk, [=] { _setConfigState_inlock(kConfigUninitialized); }); + lockAndCall(&lk, [=, this] { _setConfigState_inlock(kConfigUninitialized); }); }; // When writing our first oplog entry below, disable advancement of the stable timestamp so that @@ -4661,7 +4731,7 @@ ReplicationCoordinatorImpl::_updateMemberStateFromTopologyCoordinator(WithLock l if (_memberState.removed() && !newState.arbiter()) { LOGV2(5268000, "Scheduling a task to begin or continue replication"); _scheduleWorkAt(_replExecutor->now(), - [=](const mongo::executor::TaskExecutor::CallbackArgs& cbData) { + [=, this](const mongo::executor::TaskExecutor::CallbackArgs& cbData) { _externalState->startThreads(); auto opCtx = cc().makeOperationContext(); _startDataReplication(opCtx.get()); @@ -4722,9 +4792,17 @@ void ReplicationCoordinatorImpl::_performPostMemberStateUpdateAction( void ReplicationCoordinatorImpl::_postWonElectionUpdateMemberState(WithLock lk) { invariant(_topCoord->getTerm() != OpTime::kUninitializedTerm); - _electionId = OID::fromTerm(_topCoord->getTerm()); + + // Get the term from the topology coordinator, which we then use to generate the election ID. + // We intentionally wait until the end of this function + int64_t electionIdTerm = _topCoord->getTerm(); + OID electionId = OID::fromTerm(electionIdTerm); + + ON_BLOCK_EXIT([&] { _electionIdTerm.store(electionIdTerm); }); + auto ts = VectorClockMutable::get(getServiceContext())->tickClusterTime(1).asTimestamp(); - _topCoord->processWinElection(_electionId, ts); + _topCoord->processWinElection(electionId, ts); + const PostMemberStateUpdateAction nextAction = _updateMemberStateFromTopologyCoordinator(lk); invariant(nextAction == kActionFollowerModeStateChange, @@ -5338,7 +5416,7 @@ void ReplicationCoordinatorImpl::_undenylistSyncSource( void ReplicationCoordinatorImpl::denylistSyncSource(const HostAndPort& host, Date_t until) { stdx::lock_guard lock(_mutex); _topCoord->denylistSyncSource(host, until); - _scheduleWorkAt(until, [=](const executor::TaskExecutor::CallbackArgs& cbData) { + _scheduleWorkAt(until, [=, this](const executor::TaskExecutor::CallbackArgs& cbData) { _undenylistSyncSource(cbData, host); }); } @@ -5993,7 +6071,7 @@ Status ReplicationCoordinatorImpl::updateTerm(OperationContext* opCtx, long long } // Check we haven't acquired any lock, because potential stepdown needs global lock. - dassert(!opCtx->lockState()->isLocked() || opCtx->lockState()->isNoop()); + invariant(!opCtx->lockState()->isLocked()); // If the term is already up to date, we can skip the update and the mutex acquisition. if (!_needToUpdateTerm(term)) @@ -6377,5 +6455,15 @@ bool ReplicationCoordinatorImpl::isRetryableWrite(OperationContext* opCtx) const (!opCtx->inMultiDocumentTransaction() || txnParticipant.transactionIsOpen()); } +boost::optional ReplicationCoordinatorImpl::getInitialSyncId(OperationContext* opCtx) { + BSONObj initialSyncId = _replicationProcess->getConsistencyMarkers()->getInitialSyncId(opCtx); + if (initialSyncId.hasField(InitialSyncIdDocument::k_idFieldName)) { + InitialSyncIdDocument initialSyncIdDoc = + InitialSyncIdDocument::parse(IDLParserContext("initialSyncId"), initialSyncId); + return initialSyncIdDoc.get_id(); + } + return boost::none; +} + } // namespace repl } // namespace mongo diff --git a/src/mongo/db/repl/replication_coordinator_impl.h b/src/mongo/db/repl/replication_coordinator_impl.h index 6ab9643fac8bc..d428b242cb215 100644 --- a/src/mongo/db/repl/replication_coordinator_impl.h +++ b/src/mongo/db/repl/replication_coordinator_impl.h @@ -29,35 +29,90 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include +#include +#include +#include #include #include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/delayable_timeout_callback.h" +#include "mongo/db/repl/hello_response.h" #include "mongo/db/repl/initial_syncer.h" #include "mongo/db/repl/initial_syncer_interface.h" +#include "mongo/db/repl/last_vote.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_data.h" +#include "mongo/db/repl/member_id.h" #include "mongo/db/repl/member_state.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/repl_set_heartbeat_args_v1.h" +#include "mongo/db/repl/repl_set_heartbeat_response.h" +#include "mongo/db/repl/repl_set_request_votes_args.h" +#include "mongo/db/repl/repl_set_tag.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_external_state.h" +#include "mongo/db/repl/replication_metrics_gen.h" +#include "mongo/db/repl/replication_process.h" +#include "mongo/db/repl/split_horizon.h" +#include "mongo/db/repl/split_prepare_session_manager.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/sync_source_resolver.h" +#include "mongo/db/repl/sync_source_selector.h" #include "mongo/db/repl/topology_coordinator.h" #include "mongo/db/repl/update_position_args.h" #include "mongo/db/repl/vote_requester.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/write_concern_options.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" #include "mongo/platform/random.h" +#include "mongo/rpc/metadata/oplog_query_metadata.h" +#include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/rpc/topology_version_gen.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/mutex.h" +#include "mongo/stdx/thread.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/interruptible.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -149,8 +204,9 @@ class ReplicationCoordinatorImpl : public ReplicationCoordinator { virtual bool isWritablePrimaryForReportingPurposes(); - virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName); - virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName); + virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, const DatabaseName& dbName); + virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, + const DatabaseName& dbName); bool canAcceptWritesFor(OperationContext* opCtx, const NamespaceStringOrUUID& nsorUUID) override; @@ -179,7 +235,7 @@ class ReplicationCoordinatorImpl : public ReplicationCoordinator { virtual void setMyLastDurableOpTimeAndWallTime(const OpTimeAndWallTime& opTimeAndWallTime); virtual void setMyLastAppliedOpTimeAndWallTimeForward( - const OpTimeAndWallTime& opTimeAndWallTime); + const OpTimeAndWallTime& opTimeAndWallTime, bool advanceGlobalTimestamp); virtual void setMyLastDurableOpTimeAndWallTimeForward( const OpTimeAndWallTime& opTimeAndWallTime); @@ -238,6 +294,8 @@ class ReplicationCoordinatorImpl : public ReplicationCoordinator { virtual std::vector getConfigVotingMembers() const override; + virtual size_t getNumConfigVotingMembers() const override; + virtual std::int64_t getConfigTerm() const override; virtual std::int64_t getConfigVersion() const override; @@ -593,6 +651,8 @@ class ReplicationCoordinatorImpl : public ReplicationCoordinator { bool isRetryableWrite(OperationContext* opCtx) const override; + boost::optional getInitialSyncId(OperationContext* opCtx) override; + private: using CallbackFn = executor::TaskExecutor::CallbackFn; @@ -1723,8 +1783,8 @@ class ReplicationCoordinatorImpl : public ReplicationCoordinator { // Set to true when we are in the process of shutting down replication. bool _inShutdown; // (M) - // Election ID of the last election that resulted in this node becoming primary. - OID _electionId; // (M) + // The term of the last election that resulted in this node becoming primary. + AtomicWord _electionIdTerm; // (S) // Used to signal threads waiting for changes to _memberState. stdx::condition_variable _memberStateChange; // (M) @@ -1849,7 +1909,7 @@ class ReplicationCoordinatorImpl : public ReplicationCoordinator { // The cached value of the 'counter' field in the server's TopologyVersion. AtomicWord _cachedTopologyVersionCounter; // (S) - // This should be set during sharding initialization except on catalog shard. + // This should be set during sharding initialization except on config shard. boost::optional _wasCWWCSetOnConfigServerOnStartup; InitialSyncerInterface::OnCompletionFn _onCompletion; diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp index 7a4ce7c783a0e..4178a41483098 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1.cpp @@ -28,18 +28,47 @@ */ -#include "mongo/platform/basic.h" - #include - +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/last_vote.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/replication_coordinator_external_state.h" #include "mongo/db/repl/replication_coordinator_impl.h" #include "mongo/db/repl/replication_metrics.h" +#include "mongo/db/repl/replication_metrics_gen.h" #include "mongo/db/repl/topology_coordinator.h" #include "mongo/db/repl/vote_requester.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/platform/mutex.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplicationElection @@ -221,7 +250,7 @@ void ReplicationCoordinatorImpl::ElectionState::start(WithLock lk, StartElection fassert(28685, nextPhaseEvh.getStatus()); _replExecutor ->onEvent(nextPhaseEvh.getValue(), - [=](const executor::TaskExecutor::CallbackArgs&) { + [=, this](const executor::TaskExecutor::CallbackArgs&) { _processDryRunResult(term, reason); }) .status_with_transitional_ignore(); @@ -402,7 +431,7 @@ void ReplicationCoordinatorImpl::ElectionState::_requestVotesForRealElection( fassert(28643, nextPhaseEvh.getStatus()); _replExecutor ->onEvent(nextPhaseEvh.getValue(), - [=](const executor::TaskExecutor::CallbackArgs&) { + [=, this](const executor::TaskExecutor::CallbackArgs&) { if (MONGO_unlikely(hangBeforeOnVoteRequestCompleteCallback.shouldFail())) { LOGV2(7277400, "Hang due to hangBeforeOnVoteRequestCompleteCallback failpoint"); diff --git a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp index 3c84207e7dcb5..6464a7b70b184 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_elect_v1_test.cpp @@ -27,33 +27,75 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/hello_response.h" +#include "mongo/db/repl/last_vote.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_data.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_set_heartbeat_args_v1.h" #include "mongo/db/repl/repl_set_heartbeat_response.h" +#include "mongo/db/repl/repl_set_request_votes_args.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_external_state_mock.h" #include "mongo/db/repl/replication_coordinator_impl.h" #include "mongo/db/repl/replication_coordinator_test_fixture.h" #include "mongo/db/repl/replication_metrics.h" +#include "mongo/db/repl/replication_metrics_gen.h" #include "mongo/db/repl/topology_coordinator.h" #include "mongo/db/repl/vote_requester.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/executor/mock_network_fixture.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/death_test.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - namespace mongo { namespace repl { namespace { @@ -82,7 +124,6 @@ class ReplCoordMockTest : public ReplCoordTest { assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); replCoordSetMyLastAppliedOpTime(time1, Date_t() + Seconds(time1.getSecs())); replCoordSetMyLastDurableOpTime(time1, Date_t() + Seconds(time1.getSecs())); @@ -227,7 +268,8 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyElectableNode) { const auto opCtxPtr = makeOperationContext(); auto& opCtx = *opCtxPtr; - // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true. + // Since we're still in drain mode, expect that we report isWritablePrimary:false, + // issecondary:true. auto helloResponse = getReplCoord()->awaitHelloResponse(opCtxPtr.get(), {}, boost::none, boost::none); ASSERT_FALSE(helloResponse->isWritablePrimary()) << helloResponse->toBSON().toString(); @@ -284,7 +326,8 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenNodeIsTheOnlyNode) { const auto opCtxPtr = makeOperationContext(); auto& opCtx = *opCtxPtr; - // Since we're still in drain mode, expect that we report ismaster: false, issecondary:true. + // Since we're still in drain mode, expect that we report isWritablePrimary:false, + // issecondary:true. auto helloResponse = getReplCoord()->awaitHelloResponse(opCtxPtr.get(), {}, boost::none, boost::none); ASSERT_FALSE(helloResponse->isWritablePrimary()) << helloResponse->toBSON().toString(); @@ -322,7 +365,7 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) { << "node3:12345")) << "protocolVersion" << 1); assertStartSuccess(configObj, HostAndPort("node1", 12345)); - OperationContextNoop opCtx; + replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100)); replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100)); ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY)); @@ -330,8 +373,11 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenAllNodesVoteYea) { simulateSuccessfulV1Election(); getReplCoord()->waitForElectionFinish_forTest(); + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + // Check last vote - auto lastVote = getExternalState()->loadLocalLastVoteDocument(nullptr); + auto lastVote = getExternalState()->loadLocalLastVoteDocument(opCtx); ASSERT(lastVote.isOK()); ASSERT_EQ(0, lastVote.getValue().getCandidateIndex()); ASSERT_EQ(1, lastVote.getValue().getTerm()); @@ -376,7 +422,6 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenMaxSevenNodesVoteYea) { << "node7:12345")) << "protocolVersion" << 1); assertStartSuccess(configObj, HostAndPort("node1", 12345)); - OperationContextNoop opCtx; replCoordSetMyLastAppliedOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100)); replCoordSetMyLastDurableOpTime(OpTime(Timestamp(100, 1), 0), Date_t() + Seconds(100)); ASSERT_OK(getReplCoord()->setFollowerMode(MemberState::RS_SECONDARY)); @@ -384,8 +429,11 @@ TEST_F(ReplCoordTest, ElectionSucceedsWhenMaxSevenNodesVoteYea) { simulateSuccessfulV1Election(); getReplCoord()->waitForElectionFinish_forTest(); + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + // Check last vote - auto lastVote = getExternalState()->loadLocalLastVoteDocument(nullptr); + auto lastVote = getExternalState()->loadLocalLastVoteDocument(opCtx); ASSERT(lastVote.isOK()); ASSERT_EQ(0, lastVote.getValue().getCandidateIndex()); ASSERT_EQ(1, lastVote.getValue().getTerm()); @@ -417,7 +465,6 @@ TEST_F(ReplCoordMockTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDry // Trigger election. _mock->runUntil(electionTimeoutWhen); - _mock->runUntilExpectationsSatisfied(); stopCapturingLogMessages(); @@ -496,7 +543,6 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringDryRun) assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); replCoordSetMyLastAppliedOpTime(time1, Date_t() + Seconds(time1.getSecs())); replCoordSetMyLastDurableOpTime(time1, Date_t() + Seconds(time1.getSecs())); @@ -574,7 +620,6 @@ TEST_F(ReplCoordTest, ElectionFailsWhenDryRunResponseContainsANewerTerm) { assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); replCoordSetMyLastAppliedOpTime(time1, Date_t() + Seconds(time1.getSecs())); replCoordSetMyLastDurableOpTime(time1, Date_t() + Seconds(time1.getSecs())); @@ -635,7 +680,6 @@ TEST_F(ReplCoordTest, ElectionParticipantMetricsAreCollected) { << "node2:12345")) << "protocolVersion" << 1); assertStartSuccess(configObj, HostAndPort("node1", 12345)); - OperationContextNoop opCtx; OpTime lastOplogEntry = OpTime(Timestamp(999, 0), 1); auto metricsAfterVoteRequestWithDryRun = [&](bool dryRun) { @@ -647,12 +691,15 @@ TEST_F(ReplCoordTest, ElectionParticipantMetricsAreCollected) { -1 /* primaryIndex */ ); + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + auto voteRequest = requester.getRequests()[0]; ReplSetRequestVotesArgs requestVotesArgs; ASSERT_OK(requestVotesArgs.initialize(voteRequest.cmdObj)); ReplSetRequestVotesResponse requestVotesResponse; ASSERT_OK(getReplCoord()->processReplSetRequestVotes( - &opCtx, requestVotesArgs, &requestVotesResponse)); + opCtx, requestVotesArgs, &requestVotesResponse)); auto electionParticipantMetrics = ReplicationMetrics::get(getServiceContext()).getElectionParticipantMetricsBSON(); @@ -674,7 +721,6 @@ TEST_F(ReplCoordTest, ElectionParticipantMetricsAreCollected) { TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) { // start up, receive reconfig via heartbeat while at the same time, become candidate. // candidate state should be cleared. - OperationContextNoop opCtx; assertStartSuccess(BSON("_id" << "mySet" << "version" << 2 << "members" @@ -721,12 +767,17 @@ TEST_F(ReplCoordTest, NodeWillNotStandForElectionDuringHeartbeatReconfig) { getNet()->exitNetwork(); // prepare candidacy - BSONObjBuilder result; - ReplicationCoordinator::ReplSetReconfigArgs args; - args.force = false; - args.newConfigObj = config.toBSON(); - ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress, - getReplCoord()->processReplSetReconfig(&opCtx, args, &result)); + { + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + + BSONObjBuilder result; + ReplicationCoordinator::ReplSetReconfigArgs args; + args.force = false; + args.newConfigObj = config.toBSON(); + ASSERT_EQUALS(ErrorCodes::ConfigurationInProgress, + getReplCoord()->processReplSetReconfig(opCtx, args, &result)); + } auto severityGuard = unittest::MinimumLoggedSeverityGuard{logv2::LogComponent::kDefault, logv2::LogSeverity::Debug(2)}; @@ -810,7 +861,6 @@ TEST_F(ReplCoordTest, ElectionFailsWhenInsufficientVotesAreReceivedDuringRequest assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); replCoordSetMyLastAppliedOpTime(time1, Date_t() + Seconds(time1.getSecs())); replCoordSetMyLastDurableOpTime(time1, Date_t() + Seconds(time1.getSecs())); @@ -898,7 +948,6 @@ TEST_F(ReplCoordTest, ElectionFailsWhenVoteRequestResponseContainsANewerTerm) { assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); replCoordSetMyLastAppliedOpTime(time1, Date_t() + Seconds(time1.getSecs())); replCoordSetMyLastDurableOpTime(time1, Date_t() + Seconds(time1.getSecs())); @@ -966,7 +1015,6 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringDryRun) { assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); replCoordSetMyLastAppliedOpTime(time1, Date_t() + Seconds(time1.getSecs())); replCoordSetMyLastDurableOpTime(time1, Date_t() + Seconds(time1.getSecs())); @@ -1003,7 +1051,6 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) { assertStartSuccess(configObj, HostAndPort("node1", 12345)); ReplSetConfig config = assertMakeRSConfig(configObj); - OperationContextNoop opCtx; OpTime time1(Timestamp(100, 1), 0); replCoordSetMyLastAppliedOpTime(time1, Date_t() + Seconds(time1.getSecs())); replCoordSetMyLastDurableOpTime(time1, Date_t() + Seconds(time1.getSecs())); @@ -1011,8 +1058,12 @@ TEST_F(ReplCoordTest, ElectionFailsWhenTermChangesDuringActualElection) { simulateEnoughHeartbeatsForAllNodesUp(); simulateSuccessfulDryRun(); - // update to a future term before the election completes - getReplCoord()->updateTerm(&opCtx, 1000).transitional_ignore(); + { + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + // update to a future term before the election completes + getReplCoord()->updateTerm(opCtx, 1000).transitional_ignore(); + } NetworkInterfaceMock* net = getNet(); net->enterNetwork(); @@ -1152,18 +1203,19 @@ class TakeoverTest : public ReplCoordTest { return net->now(); } - void performSuccessfulTakeover(Date_t takeoverTime, + void performSuccessfulTakeover(OperationContext* opCtx, + Date_t takeoverTime, StartElectionReasonEnum reason, const LastVote& lastVoteExpected) { startCapturingLogMessages(); - simulateSuccessfulV1ElectionAt(takeoverTime); + simulateSuccessfulV1ElectionAt(opCtx, takeoverTime); getReplCoord()->waitForElectionFinish_forTest(); stopCapturingLogMessages(); ASSERT(getReplCoord()->getMemberState().primary()); // Check last vote - auto lastVote = getExternalState()->loadLocalLastVoteDocument(nullptr); + auto lastVote = getExternalState()->loadLocalLastVoteDocument(opCtx); ASSERT(lastVote.isOK()); ASSERT_EQ(lastVoteExpected.getCandidateIndex(), lastVote.getValue().getCandidateIndex()); ASSERT_EQ(lastVoteExpected.getTerm(), lastVote.getValue().getTerm()); @@ -1257,12 +1309,16 @@ TEST_F(TakeoverTest, DoesntScheduleCatchupTakeoverIfCatchupDisabledButTakeoverDe auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime currentOptime(Timestamp(200, 1), 0); replCoordSetMyLastAppliedOpTime(currentOptime, Date_t() + Seconds(currentOptime.getSecs())); replCoordSetMyLastDurableOpTime(currentOptime, Date_t() + Seconds(currentOptime.getSecs())); OpTime behindOptime(Timestamp(100, 1), 0); - ASSERT_EQUALS(ErrorCodes::StaleTerm, replCoord->updateTerm(&opCtx, 1)); + { + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + + ASSERT_EQUALS(ErrorCodes::StaleTerm, replCoord->updateTerm(opCtx, 1)); + } // Make sure we're secondary and that no catchup takeover has been scheduled yet. ASSERT_OK(replCoord->setFollowerMode(MemberState::RS_SECONDARY)); @@ -1293,7 +1349,6 @@ TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfNodeIsFresherThanCurrentPrimary) auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime currentOptime(Timestamp(200, 1), 0); // Update the current term to simulate a scenario where an election has occured // and some other node became the new primary. Once you hear about a primary election @@ -1339,7 +1394,6 @@ TEST_F(TakeoverTest, SchedulesCatchupTakeoverIfBothTakeoversAnOption) { auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime currentOptime(Timestamp(200, 1), 0); // Update the current term to simulate a scenario where an election has occured // and some other node became the new primary. Once you hear about a primary election @@ -1390,7 +1444,6 @@ TEST_F(TakeoverTest, PrefersPriorityToCatchupTakeoverIfNodeHasHighestPriority) { auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime currentOptime(Timestamp(200, 1), 0); // Update the current term to simulate a scenario where an election has occured // and some other node became the new primary. Once you hear about a primary election @@ -1437,7 +1490,6 @@ TEST_F(TakeoverTest, CatchupTakeoverNotScheduledTwice) { auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime currentOptime(Timestamp(200, 1), 0); // Update the current term to simulate a scenario where an election has occured // and some other node became the new primary. Once you hear about a primary election @@ -1497,7 +1549,6 @@ TEST_F(TakeoverTest, CatchupAndPriorityTakeoverNotScheduledAtSameTime) { auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime currentOptime(Timestamp(200, 1), 0); // Update the current term to simulate a scenario where an election has occured // and some other node became the new primary. Once you hear about a primary election @@ -1555,7 +1606,6 @@ TEST_F(TakeoverTest, CatchupTakeoverCallbackCanceledIfElectionTimeoutRuns) { auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime currentOptime(Timestamp(200, 1), 0); // Update the current term to simulate a scenario where an election has occured // and some other node became the new primary. Once you hear about a primary election @@ -1690,7 +1740,6 @@ TEST_F(TakeoverTest, SuccessfulCatchupTakeover) { auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime currentOptime(Timestamp(100, 5000), 0); OpTime behindOptime(Timestamp(100, 4000), 0); @@ -1700,7 +1749,13 @@ TEST_F(TakeoverTest, SuccessfulCatchupTakeover) { // Update the term so that the current term is ahead of the term of // the last applied op time. This means that the primary is still in // catchup mode since it hasn't written anything this term. - ASSERT_EQUALS(ErrorCodes::StaleTerm, replCoord->updateTerm(&opCtx, replCoord->getTerm() + 1)); + { + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + + ASSERT_EQUALS(ErrorCodes::StaleTerm, + replCoord->updateTerm(opCtx, replCoord->getTerm() + 1)); + } // Make sure we're secondary and that no takeover has been scheduled. ASSERT_OK(replCoord->setFollowerMode(MemberState::RS_SECONDARY)); @@ -1730,9 +1785,16 @@ TEST_F(TakeoverTest, SuccessfulCatchupTakeover) { ASSERT_EQUALS(1, countTextFormatLogLinesContaining("Starting an election for a catchup takeover")); - LastVote lastVoteExpected = LastVote(replCoord->getTerm() + 1, 0); - performSuccessfulTakeover( - catchupTakeoverTime, StartElectionReasonEnum::kCatchupTakeover, lastVoteExpected); + { + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + + LastVote lastVoteExpected = LastVote(replCoord->getTerm() + 1, 0); + performSuccessfulTakeover(opCtx, + catchupTakeoverTime, + StartElectionReasonEnum::kCatchupTakeover, + lastVoteExpected); + } // Check that the numCatchUpTakeoversCalled and the numCatchUpTakeoversSuccessful election // metrics have been incremented, and that none of the metrics that track the number of @@ -1774,7 +1836,6 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) { auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime currentOptime(Timestamp(100, 5000), 0); OpTime behindOptime(Timestamp(100, 4000), 0); @@ -1784,7 +1845,13 @@ TEST_F(TakeoverTest, CatchupTakeoverDryRunFailsPrimarySaysNo) { // Update the term so that the current term is ahead of the term of // the last applied op time. This means that the primary is still in // catchup mode since it hasn't written anything this term. - ASSERT_EQUALS(ErrorCodes::StaleTerm, replCoord->updateTerm(&opCtx, replCoord->getTerm() + 1)); + { + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + + ASSERT_EQUALS(ErrorCodes::StaleTerm, + replCoord->updateTerm(opCtx, replCoord->getTerm() + 1)); + } // Make sure we're secondary and that no takeover has been scheduled. ASSERT_OK(replCoord->setFollowerMode(MemberState::RS_SECONDARY)); @@ -1881,7 +1948,6 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeCatchupTakeover) { auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime currentOptime(Timestamp(200, 1), 0); replCoordSetMyLastAppliedOpTime(currentOptime, Date_t() + Seconds(currentOptime.getSecs())); replCoordSetMyLastDurableOpTime(currentOptime, Date_t() + Seconds(currentOptime.getSecs())); @@ -1889,7 +1955,13 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeCatchupTakeover) { // Update the term so that the current term is ahead of the term of // the last applied op time. - ASSERT_EQUALS(ErrorCodes::StaleTerm, replCoord->updateTerm(&opCtx, replCoord->getTerm() + 1)); + { + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + + ASSERT_EQUALS(ErrorCodes::StaleTerm, + replCoord->updateTerm(opCtx, replCoord->getTerm() + 1)); + } // Make sure we're secondary and that no catchup takeover has been scheduled. ASSERT_OK(replCoord->setFollowerMode(MemberState::RS_SECONDARY)); @@ -1942,7 +2014,6 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeHighPriorityNodeCatchupTakeover) { auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime currentOptime(Timestamp(200, 1), 0); replCoordSetMyLastAppliedOpTime(currentOptime, Date_t() + Seconds(currentOptime.getSecs())); replCoordSetMyLastDurableOpTime(currentOptime, Date_t() + Seconds(currentOptime.getSecs())); @@ -1950,7 +2021,13 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeHighPriorityNodeCatchupTakeover) { // Update the term so that the current term is ahead of the term of // the last applied op time. - ASSERT_EQUALS(ErrorCodes::StaleTerm, replCoord->updateTerm(&opCtx, replCoord->getTerm() + 1)); + { + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + + ASSERT_EQUALS(ErrorCodes::StaleTerm, + replCoord->updateTerm(opCtx, replCoord->getTerm() + 1)); + } // Make sure we're secondary and that no catchup takeover has been scheduled. ASSERT_OK(replCoord->setFollowerMode(MemberState::RS_SECONDARY)); @@ -1997,9 +2074,16 @@ TEST_F(TakeoverTest, PrimaryCatchesUpBeforeHighPriorityNodeCatchupTakeover) { now = respondToHeartbeatsUntil( config, now + longElectionTimeout, HostAndPort("node2", 12345), currentOptime); - LastVote lastVoteExpected = LastVote(replCoord->getTerm() + 1, 0); - performSuccessfulTakeover( - priorityTakeoverTime, StartElectionReasonEnum::kPriorityTakeover, lastVoteExpected); + { + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + + LastVote lastVoteExpected = LastVote(replCoord->getTerm() + 1, 0); + performSuccessfulTakeover(opCtx, + priorityTakeoverTime, + StartElectionReasonEnum::kPriorityTakeover, + lastVoteExpected); + } } TEST_F(TakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurrentPrimary) { @@ -2020,7 +2104,6 @@ TEST_F(TakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurrent auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime myOptime(Timestamp(100, 1), 0); replCoordSetMyLastAppliedOpTime(myOptime, Date_t() + Seconds(myOptime.getSecs())); replCoordSetMyLastDurableOpTime(myOptime, Date_t() + Seconds(myOptime.getSecs())); @@ -2041,7 +2124,13 @@ TEST_F(TakeoverTest, SchedulesPriorityTakeoverIfNodeHasHigherPriorityThanCurrent assertValidPriorityTakeoverDelay(config, now, priorityTakeoverTime, 0); // Also make sure that updating the term cancels the scheduled priority takeover. - ASSERT_EQUALS(ErrorCodes::StaleTerm, replCoord->updateTerm(&opCtx, replCoord->getTerm() + 1)); + { + const auto opCtxPtr = makeOperationContext(); + auto& opCtx = *opCtxPtr; + + ASSERT_EQUALS(ErrorCodes::StaleTerm, + replCoord->updateTerm(&opCtx, replCoord->getTerm() + 1)); + } ASSERT_FALSE(replCoord->getPriorityTakeover_forTest()); } @@ -2063,7 +2152,6 @@ TEST_F(TakeoverTest, SuccessfulPriorityTakeover) { auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime myOptime(Timestamp(100, 1), 0); replCoordSetMyLastAppliedOpTime(myOptime, Date_t() + Seconds(myOptime.getSecs())); replCoordSetMyLastDurableOpTime(myOptime, Date_t() + Seconds(myOptime.getSecs())); @@ -2090,9 +2178,16 @@ TEST_F(TakeoverTest, SuccessfulPriorityTakeover) { now = respondToHeartbeatsUntil( config, now + halfElectionTimeout, HostAndPort("node2", 12345), myOptime); - LastVote lastVoteExpected = LastVote(replCoord->getTerm() + 1, 0); - performSuccessfulTakeover( - priorityTakeoverTime, StartElectionReasonEnum::kPriorityTakeover, lastVoteExpected); + { + const auto opCtxPtr = makeOperationContext(); + auto& opCtx = *opCtxPtr; + + LastVote lastVoteExpected = LastVote(replCoord->getTerm() + 1, 0); + performSuccessfulTakeover(&opCtx, + priorityTakeoverTime, + StartElectionReasonEnum::kPriorityTakeover, + lastVoteExpected); + } // Check that the numPriorityTakeoversCalled and the numPriorityTakeoversSuccessful election // metrics have been incremented, and that none of the metrics that track the number of @@ -2130,7 +2225,6 @@ TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedSameSecond) { auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime currentOpTime(Timestamp(100, 5000), 0); OpTime behindOpTime(Timestamp(100, 3999), 0); OpTime closeEnoughOpTime(Timestamp(100, 4000), 0); @@ -2195,9 +2289,16 @@ TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedSameSecond) { now = respondToHeartbeatsUntil( config, now + halfElectionTimeout, primaryHostAndPort, currentOpTime); - LastVote lastVoteExpected = LastVote(replCoord->getTerm() + 1, 0); - performSuccessfulTakeover( - priorityTakeoverTime, StartElectionReasonEnum::kPriorityTakeover, lastVoteExpected); + { + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + + LastVote lastVoteExpected = LastVote(replCoord->getTerm() + 1, 0); + performSuccessfulTakeover(opCtx, + priorityTakeoverTime, + StartElectionReasonEnum::kPriorityTakeover, + lastVoteExpected); + } } TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedDifferentSecond) { @@ -2219,7 +2320,6 @@ TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedDifferentSecond) { auto replCoord = getReplCoord(); auto now = getNet()->now(); - OperationContextNoop opCtx; OpTime currentOpTime(Timestamp(100, 1), 0); OpTime behindOpTime(Timestamp(97, 1), 0); OpTime closeEnoughOpTime(Timestamp(98, 1), 0); @@ -2283,9 +2383,16 @@ TEST_F(TakeoverTest, DontCallForPriorityTakeoverWhenLaggedDifferentSecond) { now = respondToHeartbeatsUntil( config, now + halfElectionTimeout, primaryHostAndPort, currentOpTime); - LastVote lastVoteExpected = LastVote(replCoord->getTerm() + 1, 0); - performSuccessfulTakeover( - priorityTakeoverTime, StartElectionReasonEnum::kPriorityTakeover, lastVoteExpected); + { + auto opCtxHolder{makeOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; + + LastVote lastVoteExpected = LastVote(replCoord->getTerm() + 1, 0); + performSuccessfulTakeover(opCtx, + priorityTakeoverTime, + StartElectionReasonEnum::kPriorityTakeover, + lastVoteExpected); + } } TEST_F(ReplCoordTest, NodeCancelsElectionUponReceivingANewConfigDuringDryRun) { @@ -2717,7 +2824,8 @@ TEST_F(PrimaryCatchUpTest, PrimaryDoesNotNeedToCatchUp) { auto opCtx = makeOperationContext(); signalDrainComplete(opCtx.get()); Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test"))); // Check that the number of elections requiring primary catchup was not incremented. ASSERT_EQ(0, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting()); @@ -2772,7 +2880,8 @@ TEST_F(PrimaryCatchUpTest, CatchupSucceeds) { auto opCtx = makeOperationContext(); signalDrainComplete(opCtx.get()); Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test"))); // Check that the number of elections requiring primary catchup was incremented. ASSERT_EQ(1, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting()); @@ -2806,7 +2915,8 @@ TEST_F(PrimaryCatchUpTest, CatchupTimeout) { auto opCtx = makeOperationContext(); signalDrainComplete(opCtx.get()); Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test"))); // Check that the number of elections requiring primary catchup was incremented. ASSERT_EQ(1, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting()); @@ -2847,7 +2957,8 @@ TEST_F(PrimaryCatchUpTest, CannotSeeAllNodes) { auto opCtx = makeOperationContext(); signalDrainComplete(opCtx.get()); Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test"))); // Check that the number of elections requiring primary catchup was not incremented. ASSERT_EQ(0, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting()); @@ -2891,7 +3002,8 @@ TEST_F(PrimaryCatchUpTest, HeartbeatTimeout) { auto opCtx = makeOperationContext(); signalDrainComplete(opCtx.get()); Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test"))); // Check that the number of elections requiring primary catchup was not incremented. ASSERT_EQ(0, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting()); @@ -2929,7 +3041,8 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownBeforeHeartbeatRefreshing) { ASSERT_EQUALS(0, countTextFormatLogLinesContaining("Catchup timed out")); auto opCtx = makeOperationContext(); Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); + ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test"))); // Check that the number of elections requiring primary catchup was not incremented. ASSERT_EQ(0, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting()); @@ -2977,7 +3090,8 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringCatchUp) { ASSERT_EQUALS(0, countTextFormatLogLinesContaining("Catchup timed out")); auto opCtx = makeOperationContext(); Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); + ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test"))); // Check that the number of elections requiring primary catchup was incremented. ASSERT_EQ(1, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting()); @@ -3053,12 +3167,14 @@ TEST_F(PrimaryCatchUpTest, PrimaryStepsDownDuringDrainMode) { ASSERT(replCoord->getApplierState() == ApplierState::Draining); { Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_FALSE(replCoord->canAcceptWritesForDatabase(opCtx.get(), "test")); + ASSERT_FALSE(replCoord->canAcceptWritesForDatabase( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test"))); } signalDrainComplete(opCtx.get()); Lock::GlobalLock lock(opCtx.get(), MODE_IX); ASSERT(replCoord->getApplierState() == ApplierState::Stopped); - ASSERT_TRUE(replCoord->canAcceptWritesForDatabase(opCtx.get(), "test")); + ASSERT_TRUE(replCoord->canAcceptWritesForDatabase( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test"))); // Check that the number of elections requiring primary catchup was not incremented again. ASSERT_EQ(1, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting()); @@ -3140,7 +3256,8 @@ TEST_F(PrimaryCatchUpTest, FreshestNodeBecomesAvailableLater) { auto opCtx = makeOperationContext(); signalDrainComplete(opCtx.get()); Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test"))); // Check that the number of elections requiring primary catchup was incremented. ASSERT_EQ(1, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting()); @@ -3201,7 +3318,8 @@ TEST_F(PrimaryCatchUpTest, InfiniteTimeoutAndAbort) { auto opCtx = makeOperationContext(); signalDrainComplete(opCtx.get()); Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test"))); // Check that the number of elections requiring primary catchup was incremented. ASSERT_EQ(1, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting()); @@ -3230,7 +3348,8 @@ TEST_F(PrimaryCatchUpTest, ZeroTimeout) { auto opCtx = makeOperationContext(); signalDrainComplete(opCtx.get()); Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "test")); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase( + opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "test"))); // Check that the number of elections requiring primary catchup was not incremented. ASSERT_EQ(0, ReplicationMetrics::get(opCtx.get()).getNumCatchUps_forTesting()); diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp index 4330baaf03696..e3e8fba50b274 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp @@ -36,37 +36,76 @@ #define LOGV2_FOR_SHARD_SPLIT(ID, DLEVEL, MESSAGE, ...) \ LOGV2_DEBUG_OPTIONS(ID, DLEVEL, {logv2::LogComponent::kTenantMigration}, MESSAGE, ##__VA_ARGS__) -#include "mongo/platform/basic.h" - #include -#include - +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/client.h" #include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/global_settings.h" -#include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/delayable_timeout_callback.h" #include "mongo/db/repl/heartbeat_response_action.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_set_config_checks.h" #include "mongo/db/repl/repl_set_heartbeat_args_v1.h" #include "mongo/db/repl/repl_set_heartbeat_response.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/db/repl/replication_consistency_markers.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/replication_coordinator_external_state.h" #include "mongo/db/repl/replication_coordinator_impl.h" #include "mongo/db/repl/replication_metrics.h" +#include "mongo/db/repl/replication_metrics_gen.h" #include "mongo/db/repl/replication_process.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/topology_coordinator.h" -#include "mongo/db/repl/vote_requester.h" -#include "mongo/db/service_context.h" #include "mongo/db/session/kill_sessions_local.h" #include "mongo/db/storage/control/journal_flusher.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/platform/mutex.h" -#include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/str.h" #include "mongo/util/time_support.h" @@ -123,7 +162,7 @@ void ReplicationCoordinatorImpl::_doMemberHeartbeat(executor::TaskExecutor::Call const RemoteCommandRequest request( target, "admin", heartbeatObj, BSON(rpc::kReplSetMetadataFieldName << 1), nullptr, timeout); const executor::TaskExecutor::RemoteCommandCallbackFn callback = - [=](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) { + [=, this](const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData) { return _handleHeartbeatResponse(cbData, replSetName); }; @@ -149,7 +188,7 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatToTarget_inlock(const HostAnd "when"_attr = when); _trackHeartbeatHandle_inlock( _replExecutor->scheduleWorkAt(when, - [=, replSetName = std::move(replSetName)]( + [=, this, replSetName = std::move(replSetName)]( const executor::TaskExecutor::CallbackArgs& cbData) { _doMemberHeartbeat(cbData, target, replSetName); }), @@ -354,7 +393,7 @@ void ReplicationCoordinatorImpl::_handleHeartbeatResponse( if (mem && mem->isNewlyAdded()) { const auto memId = mem->getId(); auto status = _replExecutor->scheduleWork( - [=](const executor::TaskExecutor::CallbackArgs& cbData) { + [=, this](const executor::TaskExecutor::CallbackArgs& cbData) { _reconfigToRemoveNewlyAddedField( cbData, memId, _rsConfig.getConfigVersionAndTerm()); }); @@ -445,7 +484,8 @@ stdx::unique_lock ReplicationCoordinatorImpl::_handleHeartbeatResponseAct "Scheduling priority takeover", "when"_attr = _priorityTakeoverWhen); _priorityTakeoverCbh = _scheduleWorkAt( - _priorityTakeoverWhen, [=](const mongo::executor::TaskExecutor::CallbackArgs&) { + _priorityTakeoverWhen, + [=, this](const mongo::executor::TaskExecutor::CallbackArgs&) { _startElectSelfIfEligibleV1(StartElectionReasonEnum::kPriorityTakeover); }); } @@ -462,7 +502,8 @@ stdx::unique_lock ReplicationCoordinatorImpl::_handleHeartbeatResponseAct "Scheduling catchup takeover", "when"_attr = _catchupTakeoverWhen); _catchupTakeoverCbh = _scheduleWorkAt( - _catchupTakeoverWhen, [=](const mongo::executor::TaskExecutor::CallbackArgs&) { + _catchupTakeoverWhen, + [=, this](const mongo::executor::TaskExecutor::CallbackArgs&) { _startElectSelfIfEligibleV1(StartElectionReasonEnum::kCatchupTakeover); }); } @@ -512,7 +553,7 @@ executor::TaskExecutor::EventHandle ReplicationCoordinatorImpl::_stepDownStart() } _replExecutor - ->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& cbData) { + ->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& cbData) { _stepDownFinish(cbData, finishEvent); }) .status_with_transitional_ignore(); @@ -658,7 +699,7 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig(WithLock lk, _rsConfig.getConfigVersionAndTerm() < newConfig.getConfigVersionAndTerm() || _selfIndex < 0); _replExecutor - ->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& cbData) { + ->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& cbData) { const auto [swConfig, isSplitRecipientConfig] = _resolveConfigToApply(newConfig); if (!swConfig.isOK()) { LOGV2_WARNING( @@ -679,24 +720,24 @@ void ReplicationCoordinatorImpl::_scheduleHeartbeatReconfig(WithLock lk, } LOGV2(8423366, "Waiting for oplog buffer to drain before applying recipient config."); - _drainForShardSplit().getAsync( - [this, - resolvedConfig = swConfig.getValue(), - replExecutor = _replExecutor.get(), - isSplitRecipientConfig = isSplitRecipientConfig](Status status) { - if (!status.isOK()) { - stdx::lock_guard lg(_mutex); - _setConfigState_inlock(!_rsConfig.isInitialized() ? kConfigUninitialized - : kConfigSteady); - return; - } - - replExecutor - ->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& cbData) { - _heartbeatReconfigStore(cbData, resolvedConfig, isSplitRecipientConfig); - }) - .status_with_transitional_ignore(); - }); + _drainForShardSplit().getAsync([this, + resolvedConfig = swConfig.getValue(), + replExecutor = _replExecutor.get(), + isSplitRecipientConfig = + isSplitRecipientConfig](Status status) { + if (!status.isOK()) { + stdx::lock_guard lg(_mutex); + _setConfigState_inlock(!_rsConfig.isInitialized() ? kConfigUninitialized + : kConfigSteady); + return; + } + + replExecutor + ->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& cbData) { + _heartbeatReconfigStore(cbData, resolvedConfig, isSplitRecipientConfig); + }) + .status_with_transitional_ignore(); + }); }) .status_with_transitional_ignore(); } @@ -938,7 +979,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish( "_heartbeatReconfigFinish until fail point is disabled"); _replExecutor ->scheduleWorkAt(_replExecutor->now() + Milliseconds{10}, - [=](const executor::TaskExecutor::CallbackArgs& cbData) { + [=, this](const executor::TaskExecutor::CallbackArgs& cbData) { _heartbeatReconfigFinish( cbData, newConfig, myIndex, isSplitRecipientConfig); }) @@ -963,7 +1004,7 @@ void ReplicationCoordinatorImpl::_heartbeatReconfigFinish( // Wait for the election to complete and the node's Role to be set to follower. _replExecutor ->onEvent(electionFinishedEvent, - [=](const executor::TaskExecutor::CallbackArgs& cbData) { + [=, this](const executor::TaskExecutor::CallbackArgs& cbData) { _heartbeatReconfigFinish( cbData, newConfig, myIndex, isSplitRecipientConfig); }) diff --git a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp index 447f79c90d59f..2c9ac631257bb 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_heartbeat_v1_test.cpp @@ -27,30 +27,83 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - -#include "mongo/bson/json.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/operation_context_noop.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/replication_state_transition_lock_guard.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/repl/hello_response.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/repl_set_config_gen.h" #include "mongo/db/repl/repl_set_heartbeat_args_v1.h" +#include "mongo/db/repl/repl_set_heartbeat_response.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_external_state_mock.h" #include "mongo/db/repl/replication_coordinator_impl.h" #include "mongo/db/repl/replication_coordinator_test_fixture.h" -#include "mongo/db/repl/task_runner_test_fixture.h" #include "mongo/db/repl/topology_coordinator.h" #include "mongo/db/serverless/shard_split_utils.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" #include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - namespace mongo { namespace repl { namespace { @@ -212,9 +265,9 @@ TEST_F(ReplCoordHBV1Test, performSyncToFinishReconfigHeartbeat(); assertMemberState(MemberState::RS_STARTUP2); - OperationContextNoop opCtx; + auto opCtx{makeOperationContext()}; auto storedConfig = ReplSetConfig::parse( - unittest::assertGet(getExternalState()->loadLocalConfigDocument(&opCtx))); + unittest::assertGet(getExternalState()->loadLocalConfigDocument(opCtx.get()))); ASSERT_OK(storedConfig.validate()); ASSERT_EQUALS(3, storedConfig.getConfigVersion()); ASSERT_EQUALS(3, storedConfig.getNumMembers()); @@ -257,9 +310,9 @@ TEST_F(ReplCoordHBV1Test, performSyncToFinishReconfigHeartbeat(); assertMemberState(MemberState::RS_STARTUP2); - OperationContextNoop opCtx; + auto opCtx{makeOperationContext()}; auto storedConfig = ReplSetConfig::parse( - unittest::assertGet(getExternalState()->loadLocalConfigDocument(&opCtx))); + unittest::assertGet(getExternalState()->loadLocalConfigDocument(opCtx.get()))); ASSERT_OK(storedConfig.validate()); ASSERT_EQUALS(3, storedConfig.getConfigVersion()); ASSERT_EQUALS(3, storedConfig.getNumMembers()); @@ -436,9 +489,9 @@ TEST_F(ReplCoordHBV1Test, UninitializedDonorNodeAcceptsSplitConfigOnFirstHeartbe performSyncToFinishReconfigHeartbeat(); assertMemberState(MemberState::RS_STARTUP2); - OperationContextNoop opCtx; + auto opCtx{makeOperationContext()}; auto storedConfig = ReplSetConfig::parse( - unittest::assertGet(getExternalState()->loadLocalConfigDocument(&opCtx))); + unittest::assertGet(getExternalState()->loadLocalConfigDocument(opCtx.get()))); ASSERT_OK(storedConfig.validate()); ASSERT_EQUALS(3, storedConfig.getConfigVersion()); ASSERT_EQUALS(3, storedConfig.getNumMembers()); @@ -1471,9 +1524,9 @@ TEST_F(ReplCoordHBV1Test, performSyncToFinishReconfigHeartbeat(); assertMemberState(MemberState::RS_ARBITER); - OperationContextNoop opCtx; + auto opCtx{makeOperationContext()}; auto storedConfig = ReplSetConfig::parse( - unittest::assertGet(getExternalState()->loadLocalConfigDocument(&opCtx))); + unittest::assertGet(getExternalState()->loadLocalConfigDocument(opCtx.get()))); ASSERT_OK(storedConfig.validate()); ASSERT_EQUALS(3, storedConfig.getConfigVersion()); ASSERT_EQUALS(3, storedConfig.getNumMembers()); @@ -1526,9 +1579,8 @@ TEST_F(ReplCoordHBV1Test, performSyncToFinishReconfigHeartbeat(); assertMemberState(MemberState::RS_STARTUP, "2"); - OperationContextNoop opCtx; - - StatusWith loadedConfig(getExternalState()->loadLocalConfigDocument(&opCtx)); + auto opCtx{makeOperationContext()}; + StatusWith loadedConfig(getExternalState()->loadLocalConfigDocument(opCtx.get())); ASSERT_NOT_OK(loadedConfig.getStatus()) << loadedConfig.getValue(); exitNetwork(); } diff --git a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp index af33378a336fb..7d917e7991423 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_reconfig_test.cpp @@ -28,11 +28,34 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/jsobj.h" +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/read_write_concern_defaults_gen.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_set_heartbeat_args_v1.h" #include "mongo/db/repl/repl_set_heartbeat_response.h" @@ -40,14 +63,27 @@ #include "mongo/db/repl/replication_coordinator_external_state_mock.h" #include "mongo/db/repl/replication_coordinator_impl.h" #include "mongo/db/repl/replication_coordinator_test_fixture.h" +#include "mongo/db/repl/topology_coordinator.h" #include "mongo/db/serverless/shard_split_utils.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" #include "mongo/stdx/future.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/db/repl/replication_coordinator_impl_test.cpp b/src/mongo/db/repl/replication_coordinator_impl_test.cpp index 9a044655c0ee7..15d2d9e3ba199 100644 --- a/src/mongo/db/repl/replication_coordinator_impl_test.cpp +++ b/src/mongo/db/repl/replication_coordinator_impl_test.cpp @@ -27,27 +27,39 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - -#include -#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include #include +#include #include +#include +#include +#include #include -#include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/catalog/commit_quorum_options.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/concurrency/locker_impl.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/repl/bson_extract_optime.h" #include "mongo/db/repl/data_replicator_external_state_impl.h" -#include "mongo/db/repl/heartbeat_response_action.h" #include "mongo/db/repl/hello_response.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_set_heartbeat_args_v1.h" #include "mongo/db/repl/repl_set_request_votes_args.h" @@ -64,26 +76,33 @@ #include "mongo/db/service_context.h" #include "mongo/db/shutdown_in_progress_quiesce_info.h" #include "mongo/db/write_concern_options.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" #include "mongo/rpc/metadata/oplog_query_metadata.h" #include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/stdx/future.h" #include "mongo/stdx/thread.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/transport/hello_metrics.h" -#include "mongo/unittest/barrier.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" #include "mongo/unittest/ensure_fcv.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" #include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" #include "mongo/util/scopeguard.h" #include "mongo/util/time_support.h" -#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - namespace mongo { namespace repl { namespace { @@ -137,7 +156,7 @@ std::shared_ptr awaitHelloWithNewOpCtx( return replCoord->awaitHelloResponse(newOpCtx.get(), horizonParams, topologyVersion, deadline); } -TEST_F(ReplCoordTest, IsMasterIsFalseDuringStepdown) { +TEST_F(ReplCoordTest, IsWritablePrimaryFalseDuringStepdown) { BSONObj configObj = BSON("_id" << "mySet" << "version" << 1 << "members" @@ -162,13 +181,13 @@ TEST_F(ReplCoordTest, IsMasterIsFalseDuringStepdown) { replCoord->updateTerm_forTest(replCoord->getTerm() + 1, &updateTermResult); ASSERT(TopologyCoordinator::UpdateTermResult::kTriggerStepDown == updateTermResult); - // Test that "ismaster" is immediately false, although "secondary" is not yet true. + // Test that "isWritablePrimary" is immediately false, although "secondary" is not yet true. auto opCtx = makeOperationContext(); const auto response = getReplCoord()->awaitHelloResponse(opCtx.get(), {}, boost::none, boost::none); ASSERT_TRUE(response->isConfigSet()); - BSONObj responseObj = response->toBSON(); - ASSERT_FALSE(responseObj["ismaster"].Bool()); + BSONObj responseObj = response->toBSON(false /*useLegacyResponseFields*/); + ASSERT_FALSE(responseObj["isWritablePrimary"].Bool()); ASSERT_FALSE(responseObj["secondary"].Bool()); ASSERT_FALSE(responseObj.hasField("isreplicaset")); @@ -616,7 +635,7 @@ TEST_F( ReplCoordTest, NodeReturnsNoReplicationEnabledAndInfoConfigsvrWhenCheckReplEnabledForCommandWhileConfigsvr) { ReplSettings settings; - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; init(settings); start(); @@ -1561,7 +1580,7 @@ class StepDownTest : public ReplCoordTest { bool force, Milliseconds waitTime, Milliseconds stepDownTime) { using PromisedClientAndOperation = stdx::promise; auto task = stdx::packaged_task(PromisedClientAndOperation)>( - [=](PromisedClientAndOperation operationPromise) -> boost::optional { + [=, this](PromisedClientAndOperation operationPromise) -> boost::optional { auto result = SharedClientAndOperation::make(getServiceContext()); operationPromise.set_value(result); try { @@ -2019,7 +2038,7 @@ TEST_F(StepDownTest, StepDownFailureRestoresDrainState) { { // We can't take writes yet since we're still in drain mode. Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "admin")); + ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kAdmin)); } // Step down where the secondary actually has to catch up before the stepDown can succeed. @@ -2044,7 +2063,7 @@ TEST_F(StepDownTest, StepDownFailureRestoresDrainState) { // in drain mode. { Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "admin")); + ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kAdmin)); } // Now complete drain mode and ensure that we become capable of taking writes. @@ -2053,7 +2072,7 @@ TEST_F(StepDownTest, StepDownFailureRestoresDrainState) { ASSERT_TRUE(getReplCoord()->getMemberState().primary()); Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "admin")); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kAdmin)); } class StepDownTestWithUnelectableNode : public StepDownTest { @@ -2941,7 +2960,7 @@ TEST_F(StepDownTest, InterruptingStepDownCommandRestoresWriteAvailability) { // This is the important check, that we stepped back up when aborting the stepdown command // attempt. Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "admin")); + ASSERT_TRUE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kAdmin)); } // Test that if a stepdown command is blocked waiting for secondaries to catch up when an @@ -3001,7 +3020,7 @@ TEST_F(StepDownTest, InterruptingAfterUnconditionalStepdownDoesNotRestoreWriteAv // This is the important check, that we didn't accidentally step back up when aborting the // stepdown command attempt. Lock::GlobalLock lock(opCtx.get(), MODE_IX); - ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), "admin")); + ASSERT_FALSE(getReplCoord()->canAcceptWritesForDatabase(opCtx.get(), DatabaseName::kAdmin)); } TEST_F(ReplCoordTest, GetReplicationModeNone) { @@ -3602,10 +3621,10 @@ TEST_F(ReplCoordTest, AwaitHelloResponseReturnsOnStepDown) { responseAfterDisablingWrites->getTopologyVersion(); ASSERT_EQUALS(topologyVersionAfterDisablingWrites->getCounter(), expectedCounter); ASSERT_EQUALS(topologyVersionAfterDisablingWrites->getProcessId(), expectedProcessId); - // We expect the server to increment the TopologyVersion and respond to waiting hellos - // once we disable writes on the node that is stepping down from primary. At this time, - // the 'ismaster' response field will be false but the node will have yet to transition to - // secondary. + // We expect the server to increment the TopologyVersion and respond to waiting hellos once + // we disable writes on the node that is stepping down from primary. At this time, the + // 'isWritablePrimary' response field will be false but the node will have yet to transition + // to secondary. ASSERT_FALSE(responseAfterDisablingWrites->isWritablePrimary()); ASSERT_FALSE(responseAfterDisablingWrites->isSecondary()); ASSERT_EQUALS(responseAfterDisablingWrites->getPrimary().host(), "node1"); @@ -5121,8 +5140,8 @@ TEST_F(ReplCoordTest, HelloResponseMentionsLackOfReplicaSetConfig) { const auto response = getReplCoord()->awaitHelloResponse(opCtx.get(), {}, boost::none, boost::none); ASSERT_FALSE(response->isConfigSet()); - BSONObj responseObj = response->toBSON(); - ASSERT_FALSE(responseObj["ismaster"].Bool()); + BSONObj responseObj = response->toBSON(false /*useLegacyResponseFields*/); + ASSERT_FALSE(responseObj["isWritablePrimary"].Bool()); ASSERT_FALSE(responseObj["secondary"].Bool()); ASSERT_TRUE(responseObj["isreplicaset"].Bool()); ASSERT_EQUALS("Does not have a valid replica set config", responseObj["info"].String()); diff --git a/src/mongo/db/repl/replication_coordinator_mock.cpp b/src/mongo/db/repl/replication_coordinator_mock.cpp index 2352767e59669..05f6159f65fca 100644 --- a/src/mongo/db/repl/replication_coordinator_mock.cpp +++ b/src/mongo/db/repl/replication_coordinator_mock.cpp @@ -27,21 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include -#include "mongo/db/repl/replication_coordinator_mock.h" +#include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/hello_response.h" #include "mongo/db/repl/isself.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/repl/sync_source_resolver.h" +#include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/tenant_migration_decoration.h" +#include "mongo/db/session/internal_session_pool.h" #include "mongo/db/storage/snapshot_manager.h" #include "mongo/db/write_concern_options.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future_impl.h" namespace mongo { namespace repl { @@ -171,6 +182,11 @@ void ReplicationCoordinatorMock::setAwaitReplicationReturnValueFunction( _awaitReplicationReturnValueFunction = std::move(returnValueFunction); } +void ReplicationCoordinatorMock::setRunCmdOnPrimaryAndAwaitResponseFunction( + RunCmdOnPrimaryAndAwaitResponseFunction runCmdFunction) { + _runCmdOnPrimaryAndAwaitResponseFn = std::move(runCmdFunction); +} + SharedSemiFuture ReplicationCoordinatorMock::awaitReplicationAsyncNoWTimeout( const OpTime& opTime, const WriteConcernOptions& writeConcern) { auto opCtx = cc().makeOperationContext(); @@ -189,7 +205,7 @@ bool ReplicationCoordinatorMock::isWritablePrimaryForReportingPurposes() { } bool ReplicationCoordinatorMock::canAcceptWritesForDatabase(OperationContext* opCtx, - StringData dbName) { + const DatabaseName& dbName) { stdx::lock_guard lk(_mutex); // Return true if we allow writes explicitly even when not in primary state, as in sharding @@ -198,18 +214,18 @@ bool ReplicationCoordinatorMock::canAcceptWritesForDatabase(OperationContext* op if (_alwaysAllowWrites) { return true; } - return dbName == "local" || _memberState.primary(); + return dbName == DatabaseName::kLocal || _memberState.primary(); } bool ReplicationCoordinatorMock::canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, - StringData dbName) { + const DatabaseName& dbName) { return canAcceptWritesForDatabase(opCtx, dbName); } bool ReplicationCoordinatorMock::canAcceptWritesFor(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID) { // TODO - return canAcceptWritesForDatabase(opCtx, nsOrUUID.db()); + return canAcceptWritesForDatabase(opCtx, nsOrUUID.dbName()); } bool ReplicationCoordinatorMock::canAcceptWritesFor_UNSAFE(OperationContext* opCtx, @@ -271,7 +287,7 @@ void ReplicationCoordinatorMock::setMyLastDurableOpTimeAndWallTime( } void ReplicationCoordinatorMock::setMyLastAppliedOpTimeAndWallTimeForward( - const OpTimeAndWallTime& opTimeAndWallTime) { + const OpTimeAndWallTime& opTimeAndWallTime, bool advanceGlobalTimestamp) { stdx::lock_guard lk(_mutex); if (opTimeAndWallTime.opTime > _myLastAppliedOpTime) { @@ -407,6 +423,11 @@ std::vector ReplicationCoordinatorMock::getConfigVotingMembers() c return _getConfigReturnValue.votingMembers(); } +size_t ReplicationCoordinatorMock::getNumConfigVotingMembers() const { + stdx::lock_guard lock(_mutex); + return _getConfigReturnValue.votingMembers().size(); +} + std::int64_t ReplicationCoordinatorMock::getConfigTerm() const { stdx::lock_guard lock(_mutex); return _getConfigReturnValue.getConfigTerm(); @@ -802,6 +823,10 @@ BSONObj ReplicationCoordinatorMock::runCmdOnPrimaryAndAwaitResponse( const BSONObj& cmdObj, OnRemoteCmdScheduledFn onRemoteCmdScheduled, OnRemoteCmdCompleteFn onRemoteCmdComplete) { + if (_runCmdOnPrimaryAndAwaitResponseFn) { + return _runCmdOnPrimaryAndAwaitResponseFn( + opCtx, dbName, cmdObj, onRemoteCmdScheduled, onRemoteCmdComplete); + } return BSON("ok" << 1); } void ReplicationCoordinatorMock::restartScheduledHeartbeats_forTest() { @@ -821,5 +846,9 @@ SplitPrepareSessionManager* ReplicationCoordinatorMock::getSplitPrepareSessionMa return &_splitSessionManager; } +boost::optional ReplicationCoordinatorMock::getInitialSyncId(OperationContext* opCtx) { + return boost::none; +} + } // namespace repl } // namespace mongo diff --git a/src/mongo/db/repl/replication_coordinator_mock.h b/src/mongo/db/repl/replication_coordinator_mock.h index 77a8c715d692e..9ddb22f0ff432 100644 --- a/src/mongo/db/repl/replication_coordinator_mock.h +++ b/src/mongo/db/repl/replication_coordinator_mock.h @@ -29,14 +29,52 @@ #pragma once +#include +#include +#include #include +#include +#include +#include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_data.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/repl_set_heartbeat_response.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/split_horizon.h" +#include "mongo/db/repl/split_prepare_session_manager.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/db/repl/sync_source_selector.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor.h" #include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/rpc/metadata/oplog_query_metadata.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -46,6 +84,13 @@ struct ConnectionPoolStats; namespace repl { +inline repl::ReplSettings createServerlessReplSettings() { + repl::ReplSettings settings; + settings.setOplogSizeBytes(5 * 1024 * 1024); + settings.setServerlessMode(); + return settings; +} + /** * A mock ReplicationCoordinator. Currently it is extremely simple and exists solely to link * into dbtests. @@ -114,9 +159,10 @@ class ReplicationCoordinatorMock : public ReplicationCoordinator { virtual bool isWritablePrimaryForReportingPurposes(); - virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName); + virtual bool canAcceptWritesForDatabase(OperationContext* opCtx, const DatabaseName& dbName); - virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName); + virtual bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, + const DatabaseName& dbName); bool canAcceptWritesFor(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID) override; @@ -144,7 +190,7 @@ class ReplicationCoordinatorMock : public ReplicationCoordinator { virtual void setMyLastDurableOpTimeAndWallTime(const OpTimeAndWallTime& opTimeAndWallTime); virtual void setMyLastAppliedOpTimeAndWallTimeForward( - const OpTimeAndWallTime& opTimeAndWallTime); + const OpTimeAndWallTime& opTimeAndWallTime, bool advanceGlobalTimestamp); virtual void setMyLastDurableOpTimeAndWallTimeForward( const OpTimeAndWallTime& opTimeAndWallTime); @@ -205,6 +251,8 @@ class ReplicationCoordinatorMock : public ReplicationCoordinator { virtual std::vector getConfigVotingMembers() const override; + virtual size_t getNumConfigVotingMembers() const override; + virtual std::int64_t getConfigTerm() const override; virtual std::int64_t getConfigVersion() const override; @@ -349,6 +397,15 @@ class ReplicationCoordinatorMock : public ReplicationCoordinator { void setAwaitReplicationReturnValueFunction( AwaitReplicationReturnValueFunction returnValueFunction); + using RunCmdOnPrimaryAndAwaitResponseFunction = + std::function; + void setRunCmdOnPrimaryAndAwaitResponseFunction( + RunCmdOnPrimaryAndAwaitResponseFunction runCmdFunction); + /** * Always allow writes even if this node is a writable primary. Used by sharding unit tests. */ @@ -438,6 +495,8 @@ class ReplicationCoordinatorMock : public ReplicationCoordinator { return false; } + boost::optional getInitialSyncId(OperationContext* opCtx) override; + private: void _setMyLastAppliedOpTimeAndWallTime(WithLock lk, const OpTimeAndWallTime& opTimeAndWallTime); @@ -450,6 +509,7 @@ class ReplicationCoordinatorMock : public ReplicationCoordinator { const OpTime&) { return StatusAndDuration(Status::OK(), Milliseconds(0)); }; + RunCmdOnPrimaryAndAwaitResponseFunction _runCmdOnPrimaryAndAwaitResponseFn; // Guards all the variables below mutable Mutex _mutex = MONGO_MAKE_LATCH("ReplicationCoordinatorExternalStateMock::_mutex"); diff --git a/src/mongo/db/repl/replication_coordinator_noop.cpp b/src/mongo/db/repl/replication_coordinator_noop.cpp index 839c9f5802021..afb0eb90cab02 100644 --- a/src/mongo/db/repl/replication_coordinator_noop.cpp +++ b/src/mongo/db/repl/replication_coordinator_noop.cpp @@ -27,10 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "replication_coordinator_noop.h" +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/util/assert_util.h" + namespace mongo { namespace repl { @@ -77,12 +82,12 @@ bool ReplicationCoordinatorNoOp::isWritablePrimaryForReportingPurposes() { } bool ReplicationCoordinatorNoOp::canAcceptWritesForDatabase(OperationContext* opCtx, - StringData dbName) { + const DatabaseName& dbName) { MONGO_UNREACHABLE; } bool ReplicationCoordinatorNoOp::canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, - StringData dbName) { + const DatabaseName& dbName) { MONGO_UNREACHABLE; } @@ -199,8 +204,8 @@ void ReplicationCoordinatorNoOp::setMyHeartbeatMessage(const std::string&) { MONGO_UNREACHABLE; } -void ReplicationCoordinatorNoOp::setMyLastAppliedOpTimeAndWallTimeForward( - const OpTimeAndWallTime&) { +void ReplicationCoordinatorNoOp::setMyLastAppliedOpTimeAndWallTimeForward(const OpTimeAndWallTime&, + bool) { MONGO_UNREACHABLE; } @@ -309,6 +314,10 @@ std::vector ReplicationCoordinatorNoOp::getConfigVotingMembers() c MONGO_UNREACHABLE; } +size_t ReplicationCoordinatorNoOp::getNumConfigVotingMembers() const { + MONGO_UNREACHABLE; +} + std::int64_t ReplicationCoordinatorNoOp::getConfigTerm() const { MONGO_UNREACHABLE; } @@ -625,5 +634,9 @@ bool ReplicationCoordinatorNoOp::isRetryableWrite(OperationContext* opCtx) const MONGO_UNREACHABLE; } +boost::optional ReplicationCoordinatorNoOp::getInitialSyncId(OperationContext* opCtx) { + MONGO_UNREACHABLE; +} + } // namespace repl } // namespace mongo diff --git a/src/mongo/db/repl/replication_coordinator_noop.h b/src/mongo/db/repl/replication_coordinator_noop.h index 1a8eca06917af..405a326fa1ccc 100644 --- a/src/mongo/db/repl/replication_coordinator_noop.h +++ b/src/mongo/db/repl/replication_coordinator_noop.h @@ -29,7 +29,46 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_data.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/repl_set_heartbeat_response.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/split_horizon.h" +#include "mongo/db/repl/split_prepare_session_manager.h" +#include "mongo/db/repl/sync_source_selector.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor.h" +#include "mongo/rpc/metadata/oplog_query_metadata.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -71,8 +110,9 @@ class ReplicationCoordinatorNoOp final : public ReplicationCoordinator { bool isInPrimaryOrSecondaryState(OperationContext* opCtx) const final; bool isInPrimaryOrSecondaryState_UNSAFE() const final; - bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName) final; - bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName) final; + bool canAcceptWritesForDatabase(OperationContext* opCtx, const DatabaseName& dbName) final; + bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, + const DatabaseName& dbName) final; bool canAcceptWritesFor(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID) final; bool canAcceptWritesFor_UNSAFE(OperationContext* opCtx, @@ -123,7 +163,8 @@ class ReplicationCoordinatorNoOp final : public ReplicationCoordinator { void setMyLastAppliedOpTimeAndWallTime(const OpTimeAndWallTime& opTimeAndWallTime) final; void setMyLastDurableOpTimeAndWallTime(const OpTimeAndWallTime& opTimeAndWallTime) final; - void setMyLastAppliedOpTimeAndWallTimeForward(const OpTimeAndWallTime& opTimeAndWallTime) final; + void setMyLastAppliedOpTimeAndWallTimeForward(const OpTimeAndWallTime& opTimeAndWallTime, + bool advanceGlobalTimestamp) final; void setMyLastDurableOpTimeAndWallTimeForward(const OpTimeAndWallTime& opTimeAndWallTime) final; void resetMyLastOpTimes() final; @@ -179,6 +220,8 @@ class ReplicationCoordinatorNoOp final : public ReplicationCoordinator { std::vector getConfigVotingMembers() const final; + size_t getNumConfigVotingMembers() const final; + std::int64_t getConfigTerm() const final; std::int64_t getConfigVersion() const final; @@ -355,6 +398,8 @@ class ReplicationCoordinatorNoOp final : public ReplicationCoordinator { virtual bool isRetryableWrite(OperationContext* opCtx) const override; + boost::optional getInitialSyncId(OperationContext* opCtx) override; + private: ServiceContext* const _service; }; diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp index 744e1cf824f9a..0df8d2945753e 100644 --- a/src/mongo/db/repl/replication_coordinator_test_fixture.cpp +++ b/src/mongo/db/repl/replication_coordinator_test_fixture.cpp @@ -27,38 +27,56 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - #include "mongo/db/repl/replication_coordinator_test_fixture.h" -#include -#include - +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/repl/hello_response.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/repl_set_heartbeat_args_v1.h" +#include "mongo/db/repl/repl_set_heartbeat_response.h" #include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_consistency_markers_mock.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_external_state_mock.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/replication_recovery_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/repl/topology_coordinator.h" -#include "mongo/db/storage/storage_engine_init.h" -#include "mongo/db/storage/storage_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_mock.h" #include "mongo/executor/thread_pool_mock.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/concurrency/admission_context.h" #include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - namespace mongo { namespace repl { @@ -116,7 +134,10 @@ void ReplCoordTest::addSelf(const HostAndPort& selfHost) { void ReplCoordTest::init() { invariant(!_repl); invariant(!_callShutdown); - + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } auto service = getGlobalServiceContext(); _storageInterface = new StorageInterfaceMock(); StorageInterface::set(service, std::unique_ptr(_storageInterface)); @@ -160,6 +181,9 @@ void ReplCoordTest::init() { executor::ThreadPoolMock::Options tpOptions; tpOptions.onCreateThread = []() { Client::initThread("replexec"); + + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); }; auto pool = std::make_unique(_net, seed, tpOptions); auto replExec = @@ -355,7 +379,8 @@ void ReplCoordTest::simulateSuccessfulV1Election() { "Election timeout scheduled at {electionTimeoutWhen} (simulator time)", "electionTimeoutWhen"_attr = electionTimeoutWhen); - simulateSuccessfulV1ElectionAt(electionTimeoutWhen); + auto opCtx{makeOperationContext()}; + simulateSuccessfulV1ElectionAt(opCtx.get(), electionTimeoutWhen); } void ReplCoordTest::simulateSuccessfulV1ElectionWithoutExitingDrainMode(Date_t electionTime, @@ -424,15 +449,14 @@ void ReplCoordTest::simulateSuccessfulV1ElectionWithoutExitingDrainMode(Date_t e ASSERT_TRUE(helloResponse->isSecondary()) << helloResponse->toBSON().toString(); } -void ReplCoordTest::simulateSuccessfulV1ElectionAt(Date_t electionTime) { - auto opCtx = makeOperationContext(); - simulateSuccessfulV1ElectionWithoutExitingDrainMode(electionTime, opCtx.get()); +void ReplCoordTest::simulateSuccessfulV1ElectionAt(OperationContext* opCtx, Date_t electionTime) { + simulateSuccessfulV1ElectionWithoutExitingDrainMode(electionTime, opCtx); ReplicationCoordinatorImpl* replCoord = getReplCoord(); - signalDrainComplete(opCtx.get()); + signalDrainComplete(opCtx); ASSERT(replCoord->getApplierState() == ReplicationCoordinator::ApplierState::Stopped); - auto helloResponse = replCoord->awaitHelloResponse(opCtx.get(), {}, boost::none, boost::none); + auto helloResponse = replCoord->awaitHelloResponse(opCtx, {}, boost::none, boost::none); ASSERT_TRUE(helloResponse->isWritablePrimary()) << helloResponse->toBSON().toString(); ASSERT_FALSE(helloResponse->isSecondary()) << helloResponse->toBSON().toString(); diff --git a/src/mongo/db/repl/replication_coordinator_test_fixture.h b/src/mongo/db/repl/replication_coordinator_test_fixture.h index e116a05ed2320..5afa09adaef3c 100644 --- a/src/mongo/db/repl/replication_coordinator_test_fixture.h +++ b/src/mongo/db/repl/replication_coordinator_test_fixture.h @@ -29,17 +29,33 @@ #pragma once +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" +#include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_impl.h" +#include "mongo/db/repl/topology_coordinator.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" #include "mongo/unittest/unittest.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -49,8 +65,10 @@ struct HostAndPort; namespace repl { class ReplSetConfig; + class ReplicationCoordinatorExternalStateMock; class ReplicationCoordinatorImpl; + class StorageInterfaceMock; class TopologyCoordinator; @@ -131,7 +149,7 @@ class ReplCoordTest : public ServiceContextMongoDTest { if (wallTime == Date_t()) { wallTime = Date_t() + Seconds(opTime.getSecs()); } - getReplCoord()->setMyLastAppliedOpTimeAndWallTimeForward({opTime, wallTime}); + getReplCoord()->setMyLastAppliedOpTimeAndWallTimeForward({opTime, wallTime}, true); } void replCoordSetMyLastDurableOpTime(const OpTime& opTime, Date_t wallTime = Date_t()) { @@ -268,7 +286,7 @@ class ReplCoordTest : public ServiceContextMongoDTest { * progressing time past that point, takes in what time to expect an election to occur at. * Useful for simulating elections triggered via priority takeover. */ - void simulateSuccessfulV1ElectionAt(Date_t electionTime); + void simulateSuccessfulV1ElectionAt(OperationContext* opCtx, Date_t electionTime); /** * When the test has been configured with a replica set config with a single member, use this diff --git a/src/mongo/db/repl/replication_info.cpp b/src/mongo/db/repl/replication_info.cpp index d08a7f4a27be4..5897ef0ce768b 100644 --- a/src/mongo/db/repl/replication_info.cpp +++ b/src/mongo/db/repl/replication_info.cpp @@ -27,44 +27,91 @@ * it in the license file. */ -#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/client/dbclient_connection.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/audit.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/commands.h" #include "mongo/db/commands/server_status.h" #include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/curop.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/not_primary_error_tracker.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" -#include "mongo/db/query/internal_plans.h" +#include "mongo/db/read_concern_support_result.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/repl/hello_auth.h" #include "mongo/db/repl/hello_gen.h" #include "mongo/db/repl/hello_response.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/primary_only_service.h" -#include "mongo/db/repl/replication_auth.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_process.h" -#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/repl/split_horizon.h" #include "mongo/db/s/global_user_write_block_state.h" -#include "mongo/db/session/logical_session_id.h" -#include "mongo/db/storage/storage_options.h" +#include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/stats/counters.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/wire_version.h" -#include "mongo/executor/network_interface.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/rpc/topology_version_gen.h" #include "mongo/transport/hello_metrics.h" -#include "mongo/util/decimal_counter.h" +#include "mongo/transport/message_compressor_manager.h" +#include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kFTDC diff --git a/src/mongo/db/repl/replication_metrics.cpp b/src/mongo/db/repl/replication_metrics.cpp index addb4f7027df7..c60c81b8598ec 100644 --- a/src/mongo/db/repl/replication_metrics.cpp +++ b/src/mongo/db/repl/replication_metrics.cpp @@ -29,8 +29,21 @@ #include "mongo/db/repl/replication_metrics.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/db/commands/server_status.h" #include "mongo/db/repl/election_reason_counter.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/replication_metrics.h b/src/mongo/db/repl/replication_metrics.h index 6eef7fc5268cf..7ecf9bb005868 100644 --- a/src/mongo/db/repl/replication_metrics.h +++ b/src/mongo/db/repl/replication_metrics.h @@ -29,10 +29,20 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_metrics_gen.h" #include "mongo/db/repl/topology_coordinator.h" #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/replication_process.cpp b/src/mongo/db/repl/replication_process.cpp index a5b82235fc6b1..7441561828d68 100644 --- a/src/mongo/db/repl/replication_process.cpp +++ b/src/mongo/db/repl/replication_process.cpp @@ -28,21 +28,23 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/base/string_data.h" -#include "mongo/db/catalog/collection_options.h" +#include + +#include "mongo/base/status_with.h" #include "mongo/db/client.h" -#include "mongo/db/jsobj.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/db/repl/replication_process.h" -#include "mongo/db/repl/rollback_gen.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" -#include "mongo/util/str.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/replication_process_test.cpp b/src/mongo/db/repl/replication_process_test.cpp index d0592001a2036..af316fc248e30 100644 --- a/src/mongo/db/repl/replication_process_test.cpp +++ b/src/mongo/db/repl/replication_process_test.cpp @@ -27,22 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/client.h" -#include "mongo/db/jsobj.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_consistency_markers_impl.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/replication_recovery_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" -#include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/repl/replication_recovery.cpp b/src/mongo/db/repl/replication_recovery.cpp index baa63df0cb77d..79e502365d37e 100644 --- a/src/mongo/db/repl/replication_recovery.cpp +++ b/src/mongo/db/repl/replication_recovery.cpp @@ -30,34 +30,76 @@ LOGV2_DEBUG_OPTIONS(ID, DLEVEL, {logv2::LogComponent::kStorageRecovery}, MESSAGE, ##__VA_ARGS__) -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/replication_recovery.h" - -#include "mongo/db/catalog/document_validation.h" -#include "mongo/db/db_raii.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/index_builds_coordinator.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/apply_ops.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_applier.h" #include "mongo/db/repl/oplog_applier_impl.h" +#include "mongo/db/repl/oplog_batcher.h" #include "mongo/db/repl/oplog_buffer.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_interface.h" #include "mongo/db/repl/oplog_interface_local.h" #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/replica_set_aware_service.h" -#include "mongo/db/repl/replication_consistency_markers_impl.h" +#include "mongo/db/repl/replication_consistency_markers.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/replication_recovery.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/transaction_oplog_application.h" +#include "mongo/db/server_options.h" #include "mongo/db/server_recovery.h" -#include "mongo/db/session/session.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/control/journal_flusher.h" #include "mongo/db/storage/durable_history_pin.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_parameters_gen.h" -#include "mongo/db/transaction/transaction_history_iterator.h" -#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/interruptible.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -821,7 +863,8 @@ void ReplicationRecoveryImpl::_truncateOplogTo(OperationContext* opCtx, fassertFailedWithStatusNoTrace( 34418, Status(ErrorCodes::NamespaceNotFound, - str::stream() << "Can't find " << NamespaceString::kRsOplogNamespace.ns())); + str::stream() << "Can't find " + << NamespaceString::kRsOplogNamespace.toStringForErrorMsg())); } // Find an oplog entry <= truncateAfterTimestamp. diff --git a/src/mongo/db/repl/replication_recovery.h b/src/mongo/db/repl/replication_recovery.h index 6eb7782de00eb..ae4babcf65eaf 100644 --- a/src/mongo/db/repl/replication_recovery.h +++ b/src/mongo/db/repl/replication_recovery.h @@ -29,7 +29,10 @@ #pragma once +#include + #include "mongo/base/status_with.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/repl/optime.h" namespace mongo { diff --git a/src/mongo/db/repl/replication_recovery_test.cpp b/src/mongo/db/repl/replication_recovery_test.cpp index 4d21f4d9d9187..b9f4bb2c65a20 100644 --- a/src/mongo/db/repl/replication_recovery_test.cpp +++ b/src/mongo/db/repl/replication_recovery_test.cpp @@ -28,36 +28,83 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/client.h" #include "mongo/db/db_raii.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_noop.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_applier_impl_test_fixture.h" #include "mongo/db/repl/oplog_entry.h" -#include "mongo/db/repl/oplog_interface_local.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/db/repl/replication_consistency_markers_mock.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/replication_recovery.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/db/storage/durable_history_pin.h" -#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" #include "mongo/util/assert_util.h" -#include "mongo/util/str.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -67,7 +114,7 @@ namespace repl { namespace { const auto& oplogNs = NamespaceString::kRsOplogNamespace; -const NamespaceString testNs("a.a"); +const NamespaceString testNs = NamespaceString::createNamespaceString_forTest("a.a"); class StorageInterfaceRecovery : public StorageInterfaceImpl { public: @@ -122,12 +169,12 @@ class StorageInterfaceRecovery : public StorageInterfaceImpl { class ReplicationRecoveryTestObObserver : public OpObserverNoop { public: - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) override { + const CollectionDropType dropType, + bool markFromMigrate) override { // If the oplog is not disabled for this namespace, then we need to reserve an op time for // the drop. if (!repl::ReplicationCoordinator::get(opCtx)->isOplogDisabledFor(opCtx, collectionName)) { @@ -136,17 +183,6 @@ class ReplicationRecoveryTestObObserver : public OpObserverNoop { return {}; } - /** - * Called when we prepare a multi-doc transaction using the TransactionParticipant. - */ - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) override { - return std::make_unique(/*prepare=*/false); - } - const repl::OpTime dropOpTime = {Timestamp(Seconds(100), 1U), 1LL}; }; @@ -302,7 +338,7 @@ repl::OplogEntry _makeTransactionOplogEntry(repl::OpTime opTime, builder.append("t", opTime.getTerm()); builder.append("v", repl::OplogEntry::kOplogVersion); builder.append("op", "c"); - builder.append("ns", testNs.toString()); + builder.append("ns", testNs.toString_forTest()); builder.append("o", object); builder.append("wall", wallTime); builder.append("stmtId", stmtId); @@ -1134,7 +1170,7 @@ TEST_F(ReplicationRecoveryTest, CommitTransactionOplogEntryCorrectlyUpdatesConfi const auto txnOperations = BSON_ARRAY(BSON("op" << "i" - << "ns" << testNs.toString() << "o" + << "ns" << testNs.toString_forTest() << "o" << BSON("_id" << 1))); const auto prepareDate = Date_t::now(); const auto prepareOp = @@ -1209,7 +1245,7 @@ TEST_F(ReplicationRecoveryTest, const auto txnOperations = BSON_ARRAY(BSON("op" << "i" - << "ns" << testNs.toString() << "o" + << "ns" << testNs.toString_forTest() << "o" << BSON("_id" << 1))); const auto prepareDate = Date_t::now(); const auto prepareOp = diff --git a/src/mongo/db/repl/reporter.cpp b/src/mongo/db/repl/reporter.cpp index c135c17987752..03fd60251e028 100644 --- a/src/mongo/db/repl/reporter.cpp +++ b/src/mongo/db/repl/reporter.cpp @@ -30,10 +30,17 @@ #include "mongo/db/repl/reporter.h" -#include "mongo/bson/util/bson_extract.h" +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/commands/server_status_metric.h" -#include "mongo/db/repl/update_position_args.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/util/assert_util.h" #include "mongo/util/destructor_guard.h" @@ -142,7 +149,7 @@ Status Reporter::trigger() { } auto scheduleResult = - _executor->scheduleWork([=](const executor::TaskExecutor::CallbackArgs& args) { + _executor->scheduleWork([=, this](const executor::TaskExecutor::CallbackArgs& args) { _prepareAndSendCommandCallback(args, true); }); @@ -252,7 +259,7 @@ void Reporter::_processResponseCallback( auto when = _executor->now() + _keepAliveInterval; bool fromTrigger = false; auto scheduleResult = _executor->scheduleWorkAt( - when, [=](const executor::TaskExecutor::CallbackArgs& args) { + when, [=, this](const executor::TaskExecutor::CallbackArgs& args) { _prepareAndSendCommandCallback(args, fromTrigger); }); _status = scheduleResult.getStatus(); diff --git a/src/mongo/db/repl/reporter.h b/src/mongo/db/repl/reporter.h index 824ae59b17ef7..8e8ef1dfc0e1a 100644 --- a/src/mongo/db/repl/reporter.h +++ b/src/mongo/db/repl/reporter.h @@ -29,15 +29,20 @@ #pragma once +#include #include +#include #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/repl/reporter_test.cpp b/src/mongo/db/repl/reporter_test.cpp index 45a6bb84e1ca0..8be28afd01244 100644 --- a/src/mongo/db/repl/reporter_test.cpp +++ b/src/mongo/db/repl/reporter_test.cpp @@ -27,16 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/baton.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/reporter.h" #include "mongo/db/repl/update_position_args.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_test_fixture.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/task_executor_proxy.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" namespace { diff --git a/src/mongo/db/repl/roll_back_local_operations.cpp b/src/mongo/db/repl/roll_back_local_operations.cpp index bd8e7fd8d6ff2..eff021401c27d 100644 --- a/src/mongo/db/repl/roll_back_local_operations.cpp +++ b/src/mongo/db/repl/roll_back_local_operations.cpp @@ -28,11 +28,21 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/repl/roll_back_local_operations.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/roll_back_local_operations.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" diff --git a/src/mongo/db/repl/roll_back_local_operations.h b/src/mongo/db/repl/roll_back_local_operations.h index a693deadec782..5d18bfdf8706d 100644 --- a/src/mongo/db/repl/roll_back_local_operations.h +++ b/src/mongo/db/repl/roll_back_local_operations.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include "mongo/base/status.h" #include "mongo/base/status_with.h" @@ -39,6 +40,7 @@ #include "mongo/db/repl/oplog_interface.h" #include "mongo/db/repl/optime.h" #include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/roll_back_local_operations_test.cpp b/src/mongo/db/repl/roll_back_local_operations_test.cpp index 6c092bdb26934..38773ee22a3f6 100644 --- a/src/mongo/db/repl/roll_back_local_operations_test.cpp +++ b/src/mongo/db/repl/roll_back_local_operations_test.cpp @@ -28,20 +28,40 @@ */ -#include "mongo/platform/basic.h" - +#include #include - -#include "mongo/client/connection_pool.h" +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_base.h" #include "mongo/client/dbclient_connection.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/client/dbclient_mockcursor.h" -#include "mongo/db/client.h" -#include "mongo/db/jsobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/oplog_interface_mock.h" #include "mongo/db/repl/oplog_interface_remote.h" #include "mongo/db/repl/roll_back_local_operations.h" +#include "mongo/db/transaction/transaction_history_iterator.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -352,8 +372,7 @@ void checkRemoteIterator(int numNetworkFailures, bool expectedToSucceed) { }; auto localOperation = makeOpAndRecordId(1); - OplogInterfaceRemote remoteOplogMock( - HostAndPort("229w43rd", 10036), getConnection, "somecollection", 0); + OplogInterfaceRemote remoteOplogMock(HostAndPort("229w43rd", 10036), getConnection, 0); auto result = Status::OK(); diff --git a/src/mongo/db/repl/rollback_checker.cpp b/src/mongo/db/repl/rollback_checker.cpp index 93de51c82a832..14e92a34d6775 100644 --- a/src/mongo/db/repl/rollback_checker.cpp +++ b/src/mongo/db/repl/rollback_checker.cpp @@ -28,9 +28,18 @@ */ -#include "mongo/platform/basic.h" - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/repl/rollback_checker.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/rollback_checker.h b/src/mongo/db/repl/rollback_checker.h index 75a948af1cbcc..0ddc6916d70fd 100644 --- a/src/mongo/db/repl/rollback_checker.h +++ b/src/mongo/db/repl/rollback_checker.h @@ -29,9 +29,13 @@ #pragma once +#include + +#include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/rollback_checker_test.cpp b/src/mongo/db/repl/rollback_checker_test.cpp index 1b5adcafb7830..2ce64a25cb209 100644 --- a/src/mongo/db/repl/rollback_checker_test.cpp +++ b/src/mongo/db/repl/rollback_checker_test.cpp @@ -27,15 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include - -#include "mongo/db/client.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/rollback_checker.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/task_executor_test_fixture.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/repl/rollback_impl.cpp b/src/mongo/db/repl/rollback_impl.cpp index d10a1673061b3..2475fe9e44e87 100644 --- a/src/mongo/db/repl/rollback_impl.cpp +++ b/src/mongo/db/repl/rollback_impl.cpp @@ -29,16 +29,41 @@ #include "mongo/db/repl/rollback_impl.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/drop_collection.h" #include "mongo/db/catalog/import_collection_oplog_entry_gen.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" #include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" @@ -46,30 +71,63 @@ #include "mongo/db/dbhelpers.h" #include "mongo/db/global_index.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/logical_time_validator.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/get_executor.h" -#include "mongo/db/repl/apply_ops.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/repl/apply_ops_command_info.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_process.h" +#include "mongo/db/repl/replication_recovery.h" #include "mongo/db/repl/roll_back_local_operations.h" #include "mongo/db/repl/rollback_impl_gen.h" +#include "mongo/db/repl/split_prepare_session_manager.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/transaction_oplog_application.h" #include "mongo/db/s/type_shard_identity.h" +#include "mongo/db/server_options.h" #include "mongo/db/server_recovery.h" #include "mongo/db/serverless/serverless_operation_lock_registry.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/kill_sessions_local.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" -#include "mongo/db/storage/historical_ident_tracker.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/remove_saver.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/transaction/transaction_history_iterator.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/s/catalog/type_config_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplicationRollback @@ -530,15 +588,23 @@ void RollbackImpl::_restoreTxnsTableEntryFromRetryableWrites(OperationContext* o sessionTxnRecord.setLastWriteDate(wallClockTime); } const auto nss = NamespaceString::kSessionTransactionsTableNamespace; - writeConflictRetry(opCtx, "updateSessionTransactionsTableInRollback", nss.ns(), [&] { + writeConflictRetry(opCtx, "updateSessionTransactionsTableInRollback", nss, [&] { opCtx->recoveryUnit()->allowOneUntimestampedWrite(); - AutoGetCollection collection(opCtx, nss, MODE_IX); + auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); auto filter = BSON(SessionTxnRecord::kSessionIdFieldName << sessionId.toBSON()); UnreplicatedWritesBlock uwb(opCtx); // Perform an untimestamped write so that it will not be rolled back on recovering // to the 'stableTimestamp' if we were to crash. This is safe because this update is // meant to be consistent with the 'stableTimestamp' and not the common point. - Helpers::upsert(opCtx, nss, filter, sessionTxnRecord.toBSON(), /*fromMigrate=*/false); + Helpers::upsert( + opCtx, collection, filter, sessionTxnRecord.toBSON(), /*fromMigrate=*/false); }); } // Take a stable checkpoint so that writes to the 'config.transactions' table are @@ -603,9 +669,6 @@ void RollbackImpl::_runPhaseFromAbortToReconstructPreparedTxns( _rollbackStats.stableTimestamp = stableTimestamp; _listener->onRecoverToStableTimestamp(stableTimestamp); - // Rollback historical ident entries. - HistoricalIdentTracker::get(opCtx).rollbackTo(stableTimestamp); - // Log the total number of insert and update operations that have been rolled back as a // result of recovering to the stable timestamp. auto getCommandCount = [&](StringData key) { @@ -664,7 +727,9 @@ void RollbackImpl::_runPhaseFromAbortToReconstructPreparedTxns( // rollback. _correctRecordStoreCounts(opCtx); - tenant_migration_access_blocker::recoverTenantMigrationAccessBlockers(opCtx); + if (_replicationCoordinator->getSettings().isServerless()) { + tenant_migration_access_blocker::recoverTenantMigrationAccessBlockers(opCtx); + } ServerlessOperationLockRegistry::recoverLocks(opCtx); // Reconstruct prepared transactions after counts have been adjusted. Since prepared @@ -722,8 +787,8 @@ void RollbackImpl::_correctRecordStoreCounts(OperationContext* opCtx) { "uuid"_attr = uuid.toString()); AutoGetCollectionForRead collToScan(opCtx, nss); invariant(coll == collToScan.getCollection().get(), - str::stream() << "Catalog returned invalid collection: " << nss.ns() << " (" - << uuid.toString() << ")"); + str::stream() << "Catalog returned invalid collection: " + << nss.toStringForErrorMsg() << " (" << uuid.toString() << ")"); auto exec = getCollectionScanExecutor(opCtx, collToScan.getCollection(), PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY, @@ -750,8 +815,7 @@ void RollbackImpl::_correctRecordStoreCounts(OperationContext* opCtx) { newCount = countFromScan; } - auto status = - _storageInterface->setCollectionCount(opCtx, {nss.db().toString(), uuid}, newCount); + auto status = _storageInterface->setCollectionCount(opCtx, {nss.dbName(), uuid}, newCount); if (!status.isOK()) { // We ignore errors here because crashing or leaving rollback would only leave // collection counts more inaccurate. @@ -1352,6 +1416,13 @@ Timestamp RollbackImpl::_recoverToStableTimestamp(OperationContext* opCtx) { // Recover to the stable timestamp while holding the global exclusive lock. This may throw, // which the caller must handle. Lock::GlobalWrite globalWrite(opCtx); + + // Reset the drop pending reaper state prior to recovering to the stable timestamp, which + // re-opens the catalog and can add drop pending idents. This prevents collisions with idents + // already registered with the reaper. + auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); + storageEngine->clearDropPendingState(opCtx); + return _storageInterface->recoverToStableTimestamp(opCtx); } @@ -1360,7 +1431,15 @@ Status RollbackImpl::_triggerOpObserver(OperationContext* opCtx) { return Status(ErrorCodes::ShutdownInProgress, "rollback shutting down"); } LOGV2(21610, "Triggering the rollback op observer"); - opCtx->getServiceContext()->getOpObserver()->onReplicationRollback(opCtx, _observerInfo); + + // Any exceptions thrown from onReplicationRollback() indicates a rollback failure that may + // have led us to some inconsistent on-disk or memory state, so we crash instead. + try { + opCtx->getServiceContext()->getOpObserver()->onReplicationRollback(opCtx, _observerInfo); + } catch (const DBException& ex) { + fassert(6050902, ex.toStatus()); + } + return Status::OK(); } @@ -1396,11 +1475,7 @@ void RollbackImpl::_resetDropPendingState(OperationContext* opCtx) { // replication subsystem or the storage engine. DropPendingCollectionReaper::get(opCtx)->clearDropPendingState(); - // After recovering to a timestamp, the list of drop-pending idents maintained by the storage - // engine is no longer accurate and needs to be cleared. auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); - storageEngine->clearDropPendingState(); - std::vector dbNames = storageEngine->listDatabases(); for (const auto& dbName : dbNames) { Lock::DBLock dbLock(opCtx, dbName, MODE_X); diff --git a/src/mongo/db/repl/rollback_impl.h b/src/mongo/db/repl/rollback_impl.h index 33bc856b5f3c0..af5d4bdb73976 100644 --- a/src/mongo/db/repl/rollback_impl.h +++ b/src/mongo/db/repl/rollback_impl.h @@ -29,14 +29,34 @@ #pragma once +#include +#include #include +#include +#include +#include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_interface.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/roll_back_local_operations.h" #include "mongo/db/repl/rollback.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -46,6 +66,7 @@ namespace repl { class OplogInterface; class ReplicationCoordinator; + class ReplicationProcess; /** diff --git a/src/mongo/db/repl/rollback_impl_test.cpp b/src/mongo/db/repl/rollback_impl_test.cpp index 45f8e35e942a2..46dee6d397ea4 100644 --- a/src/mongo/db/repl/rollback_impl_test.cpp +++ b/src/mongo/db/repl/rollback_impl_test.cpp @@ -27,45 +27,86 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include -#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_mock.h" -#include "mongo/db/catalog/drop_collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/oplog_interface_local.h" #include "mongo/db/repl/oplog_interface_mock.h" +#include "mongo/db/repl/replication_consistency_markers.h" +#include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/rollback_impl.h" #include "mongo/db/repl/rollback_test_fixture.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/type_shard_identity.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/catalog/type_config_version.h" #include "mongo/stdx/thread.h" #include "mongo/transport/session.h" #include "mongo/transport/transport_layer_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplicationRollback - namespace mongo { namespace repl { namespace { -NamespaceString kOplogNSS("local.oplog.rs"); -NamespaceString nss("test.coll"); +NamespaceString kOplogNSS = NamespaceString::createNamespaceString_forTest("local.oplog.rs"); +NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.coll"); std::string kGenericUUIDStr = "b4c66a44-c1ca-4d86-8d25-12e82fa2de5b"; BSONObj makeInsertOplogEntry(long long time, BSONObj obj, StringData ns, UUID uuid) { @@ -187,7 +228,7 @@ class RollbackImplTest : public RollbackTest { NamespaceString nss, boost::optional time = boost::none) { const auto optime = time.value_or(_counter++); - ASSERT_OK(_insertOplogEntry(makeInsertOplogEntry(optime, doc, nss.ns(), uuid))); + ASSERT_OK(_insertOplogEntry(makeInsertOplogEntry(optime, doc, nss.ns_forTest(), uuid))); ASSERT_OK(_storageInterface->insertDocument( _opCtx.get(), nss, {doc, Timestamp(optime, optime)}, optime)); } @@ -204,7 +245,8 @@ class RollbackImplTest : public RollbackTest { const auto optime = time.value_or(_counter++); ASSERT_OK(_storageInterface->insertDocument( _opCtx.get(), nss, {doc, Timestamp(optime, optime)}, optime)); - return std::make_pair(makeInsertOplogEntry(optime, doc, nss.ns(), uuid), RecordId(optime)); + return std::make_pair(makeInsertOplogEntry(optime, doc, nss.ns_forTest(), uuid), + RecordId(optime)); } /** @@ -219,7 +261,8 @@ class RollbackImplTest : public RollbackTest { NamespaceString nss, boost::optional optime = boost::none) { const auto time = optime.value_or(_counter++); - ASSERT_OK(_insertOplogEntry(makeUpdateOplogEntry(time, query, newDoc, nss.ns(), uuid))); + ASSERT_OK( + _insertOplogEntry(makeUpdateOplogEntry(time, query, newDoc, nss.ns_forTest(), uuid))); ASSERT_OK(_storageInterface->insertDocument( _opCtx.get(), nss, {newDoc, Timestamp(time, time)}, time)); } @@ -235,7 +278,7 @@ class RollbackImplTest : public RollbackTest { NamespaceString nss, boost::optional optime = boost::none) { const auto time = optime.value_or(_counter++); - ASSERT_OK(_insertOplogEntry(makeDeleteOplogEntry(time, id.wrap(), nss.ns(), uuid))); + ASSERT_OK(_insertOplogEntry(makeDeleteOplogEntry(time, id.wrap(), nss.ns_forTest(), uuid))); WriteUnitOfWork wuow{_opCtx.get()}; ASSERT_OK(_storageInterface->deleteById(_opCtx.get(), nss, id)); ASSERT_OK(_opCtx->recoveryUnit()->setTimestamp(Timestamp(time, time))); @@ -389,8 +432,8 @@ BSONObj makeOp(OpTime time) { auto kGenericUUID = unittest::assertGet(UUID::parse(kGenericUUIDStr)); return BSON("ts" << time.getTimestamp() << "t" << time.getTerm() << "op" << "n" - << "o" << BSONObj() << "ns" << nss.ns() << "ui" << kGenericUUID << "wall" - << Date_t()); + << "o" << BSONObj() << "ns" << nss.ns_forTest() << "ui" << kGenericUUID + << "wall" << Date_t()); } BSONObj makeOp(int count) { @@ -779,7 +822,8 @@ TEST_F(RollbackImplTest, RollbackReturnsBadStatusIfIncrementRollbackIDFails) { ASSERT_OK(_insertOplogEntry(makeOp(2))); // Delete the rollback id collection. - auto rollbackIdNss = NamespaceString(_storageInterface->kDefaultRollbackIdNamespace); + auto rollbackIdNss = NamespaceString::createNamespaceString_forTest( + _storageInterface->kDefaultRollbackIdNamespace); ASSERT_OK(_storageInterface->dropCollection(_opCtx.get(), rollbackIdNss)); _assertDocsInOplog(_opCtx.get(), {1, 2}); @@ -893,10 +937,10 @@ TEST_F(RollbackImplTest, // Insert another document so the collection count is 2. const Timestamp time = Timestamp(2, 2); ASSERT_OK(_storageInterface->insertDocument( - _opCtx.get(), {nss.db().toString(), uuid}, {BSON("_id" << 2), time}, time.asULL())); + _opCtx.get(), {nss.db_forTest().toString(), uuid}, {BSON("_id" << 2), time}, time.asULL())); ASSERT_EQ(2ULL, unittest::assertGet(_storageInterface->getCollectionCount( - _opCtx.get(), {nss.db().toString(), uuid}))); + _opCtx.get(), {nss.db_forTest().toString(), uuid}))); _assertDocsInOplog(_opCtx.get(), {1, 2}); auto truncateAfterPoint = @@ -1014,12 +1058,12 @@ TEST_F(RollbackImplTest, RollbackDoesNotWriteRollbackFilesIfNoInsertsOrUpdatesAf _storageInterface->setStableTimestamp(nullptr, Timestamp(1, 1)); const auto uuid = UUID::gen(); - const auto nss = NamespaceString("db.coll"); + const auto nss = NamespaceString::createNamespaceString_forTest("db.coll"); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); const auto oplogEntry = BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op" << "c" << "wall" << Date_t() << "o" << BSON("create" << nss.coll()) - << "ns" << nss.ns() << "ui" << uuid); + << "ns" << nss.ns_forTest() << "ui" << uuid); ASSERT_OK(_insertOplogEntry(oplogEntry)); ASSERT_OK(_rollback->runRollback(_opCtx.get())); @@ -1032,7 +1076,7 @@ TEST_F(RollbackImplTest, RollbackSavesInsertedDocumentToFile) { ASSERT_OK(_insertOplogEntry(commonOp.first)); _storageInterface->setStableTimestamp(nullptr, Timestamp(1, 1)); - const auto nss = NamespaceString("db.people"); + const auto nss = NamespaceString::createNamespaceString_forTest("db.people"); const auto uuid = UUID::gen(); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); @@ -1053,7 +1097,7 @@ TEST_F(RollbackImplTest, RollbackSavesLatestVersionOfDocumentWhenThereAreMultipl ASSERT_OK(_insertOplogEntry(commonOp.first)); _storageInterface->setStableTimestamp(nullptr, Timestamp(1, 1)); - const auto nss = NamespaceString("db.people"); + const auto nss = NamespaceString::createNamespaceString_forTest("db.people"); const auto uuid = UUID::gen(); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); @@ -1078,7 +1122,7 @@ TEST_F(RollbackImplTest, RollbackSavesUpdatedDocumentToFile) { ASSERT_OK(_insertOplogEntry(commonOp.first)); _storageInterface->setStableTimestamp(nullptr, Timestamp(1, 1)); - const auto nss = NamespaceString("db.people"); + const auto nss = NamespaceString::createNamespaceString_forTest("db.people"); const auto uuid = UUID::gen(); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); @@ -1100,7 +1144,7 @@ TEST_F(RollbackImplTest, RollbackSavesLatestVersionOfDocumentWhenThereAreMultipl ASSERT_OK(_insertOplogEntry(commonOp.first)); _storageInterface->setStableTimestamp(nullptr, Timestamp(1, 1)); - const auto nss = NamespaceString("db.people"); + const auto nss = NamespaceString::createNamespaceString_forTest("db.people"); const auto uuid = UUID::gen(); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); @@ -1126,7 +1170,7 @@ TEST_F(RollbackImplTest, RollbackDoesNotWriteDocumentToFileIfInsertIsRevertedByD ASSERT_OK(_insertOplogEntry(commonOp.first)); _storageInterface->setStableTimestamp(nullptr, Timestamp(1, 1)); - const auto nss = NamespaceString("db.numbers"); + const auto nss = NamespaceString::createNamespaceString_forTest("db.numbers"); const auto uuid = UUID::gen(); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); @@ -1149,7 +1193,7 @@ TEST_F(RollbackImplTest, RollbackDoesNotWriteDocumentToFileIfUpdateIsFollowedByD ASSERT_OK(_insertOplogEntry(commonOp.first)); _storageInterface->setStableTimestamp(nullptr, Timestamp(1, 1)); - const auto nss = NamespaceString("db.numbers"); + const auto nss = NamespaceString::createNamespaceString_forTest("db.numbers"); const auto uuid = UUID::gen(); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); @@ -1172,7 +1216,7 @@ TEST_F(RollbackImplTest, RollbackProperlySavesFilesWhenCollectionIsRenamed) { ASSERT_OK(_insertOplogEntry(commonOp.first)); _storageInterface->setStableTimestamp(nullptr, Timestamp(1, 1)); - const auto nssBeforeRename = NamespaceString("db.firstColl"); + const auto nssBeforeRename = NamespaceString::createNamespaceString_forTest("db.firstColl"); const auto uuidBeforeRename = UUID::gen(); const auto collBeforeRename = _initializeCollection(_opCtx.get(), uuidBeforeRename, nssBeforeRename); @@ -1183,11 +1227,11 @@ TEST_F(RollbackImplTest, RollbackProperlySavesFilesWhenCollectionIsRenamed) { _insertDocAndGenerateOplogEntry(objInRenamedCollection, uuidBeforeRename, nssBeforeRename, 2); // Rename the original collection. - const auto nssAfterRename = NamespaceString("db.secondColl"); - auto renameCmdObj = - BSON("renameCollection" << nssBeforeRename.ns() << "to" << nssAfterRename.ns()); - auto renameCmdOp = - makeCommandOp(Timestamp(3, 3), uuidBeforeRename, nssBeforeRename.ns(), renameCmdObj, 3); + const auto nssAfterRename = NamespaceString::createNamespaceString_forTest("db.secondColl"); + auto renameCmdObj = BSON("renameCollection" << nssBeforeRename.ns_forTest() << "to" + << nssAfterRename.ns_forTest()); + auto renameCmdOp = makeCommandOp( + Timestamp(3, 3), uuidBeforeRename, nssBeforeRename.ns_forTest(), renameCmdObj, 3); ASSERT_OK(_insertOplogEntry(renameCmdOp.first)); ASSERT_OK( _storageInterface->renameCollection(_opCtx.get(), nssBeforeRename, nssAfterRename, true)); @@ -1222,7 +1266,8 @@ TEST_F(RollbackImplTest, RollbackProperlySavesFilesWhenInsertsAndDropOfCollectio // Create the collection, but as a drop-pending collection. const auto dropOpTime = OpTime(Timestamp(200, 200), 200L); - const auto nss = NamespaceString("db.people").makeDropPendingNamespace(dropOpTime); + const auto nss = NamespaceString::createNamespaceString_forTest("db.people") + .makeDropPendingNamespace(dropOpTime); const auto uuid = UUID::gen(); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); DropPendingCollectionReaper::get(_opCtx.get()) @@ -1241,8 +1286,8 @@ TEST_F(RollbackImplTest, RollbackProperlySavesFilesWhenInsertsAndDropOfCollectio const auto oplogEntry = BSON("ts" << dropOpTime.getTimestamp() << "t" << dropOpTime.getTerm() << "op" << "c" - << "wall" << Date_t() << "o" << BSON("drop" << nss.coll()) << "ns" << nss.ns() - << "ui" << uuid); + << "wall" << Date_t() << "o" << BSON("drop" << nss.coll()) << "ns" + << nss.ns_forTest() << "ui" << uuid); ASSERT_OK(_insertOplogEntry(oplogEntry)); ASSERT_OK(_rollback->runRollback(_opCtx.get())); @@ -1264,13 +1309,13 @@ TEST_F(RollbackImplTest, RollbackProperlySavesFilesWhenCreateCollAndInsertsAreRo _storageInterface->setStableTimestamp(nullptr, Timestamp(1, 1)); // Create the collection and make an oplog entry for the creation event. - const auto nss = NamespaceString("db.people"); + const auto nss = NamespaceString::createNamespaceString_forTest("db.people"); const auto uuid = UUID::gen(); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); const auto oplogEntry = BSON("ts" << Timestamp(3, 3) << "t" << 3LL << "op" << "c" << "wall" << Date_t() << "o" << BSON("create" << nss.coll()) - << "ns" << nss.ns() << "ui" << uuid); + << "ns" << nss.ns_forTest() << "ui" << uuid); ASSERT_OK(_insertOplogEntry(oplogEntry)); // Insert documents into the collection. @@ -1303,7 +1348,7 @@ DEATH_TEST_F(RollbackImplTest, ASSERT_OK(_insertOplogEntry(commonOp.first)); _storageInterface->setStableTimestamp(nullptr, Timestamp(1, 1)); - const auto nss = NamespaceString("db.people"); + const auto nss = NamespaceString::createNamespaceString_forTest("db.people"); const auto uuid = UUID::gen(); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); @@ -1329,7 +1374,8 @@ DEATH_TEST_F(RollbackImplTest, const auto commonOp = makeOpAndRecordId(1); _remoteOplog->setOperations({commonOp}); ASSERT_OK(_insertOplogEntry(commonOp.first)); - ASSERT_OK(_insertOplogEntry(makeDeleteOplogEntry(2, BSON("_id" << 1), nss.ns(), kGenericUUID))); + ASSERT_OK(_insertOplogEntry( + makeDeleteOplogEntry(2, BSON("_id" << 1), nss.ns_forTest(), kGenericUUID))); _storageInterface->setStableTimestamp(nullptr, Timestamp(1, 1)); @@ -1345,37 +1391,41 @@ TEST_F(RollbackImplTest, RollbackSetsMultipleCollectionCounts) { ASSERT_OK(_insertOplogEntry(commonOp.first)); auto uuid1 = UUID::gen(); - auto nss1 = NamespaceString("test.coll1"); + auto nss1 = NamespaceString::createNamespaceString_forTest("test.coll1"); const auto obj1 = BSON("_id" << 1); const auto coll1 = _initializeCollection(_opCtx.get(), uuid1, nss1); _insertDocAndGenerateOplogEntry(obj1, uuid1, nss1, 2); const Timestamp time1 = Timestamp(2, 2); - ASSERT_OK(_storageInterface->insertDocument( - _opCtx.get(), {nss1.db().toString(), uuid1}, {BSON("_id" << 2), time1}, time1.asULL())); + ASSERT_OK(_storageInterface->insertDocument(_opCtx.get(), + {nss1.db_forTest().toString(), uuid1}, + {BSON("_id" << 2), time1}, + time1.asULL())); ASSERT_EQ(2ULL, unittest::assertGet(_storageInterface->getCollectionCount( - _opCtx.get(), {nss1.db().toString(), uuid1}))); - ASSERT_OK( - _storageInterface->setCollectionCount(_opCtx.get(), {nss1.db().toString(), uuid1}, 2)); + _opCtx.get(), {nss1.db_forTest().toString(), uuid1}))); + ASSERT_OK(_storageInterface->setCollectionCount( + _opCtx.get(), {nss1.db_forTest().toString(), uuid1}, 2)); auto uuid2 = UUID::gen(); - auto nss2 = NamespaceString("test.coll2"); + auto nss2 = NamespaceString::createNamespaceString_forTest("test.coll2"); const auto obj2 = BSON("_id" << 1); const auto coll2 = _initializeCollection(_opCtx.get(), uuid2, nss2); const Timestamp time2 = Timestamp(3, 3); ASSERT_OK(_storageInterface->insertDocument( - _opCtx.get(), {nss2.db().toString(), uuid2}, {obj2, time2}, time2.asULL())); + _opCtx.get(), {nss2.db_forTest().toString(), uuid2}, {obj2, time2}, time2.asULL())); _deleteDocAndGenerateOplogEntry(obj2["_id"], uuid2, nss2, 3); const Timestamp time3 = Timestamp(4, 4); - ASSERT_OK(_storageInterface->insertDocument( - _opCtx.get(), {nss2.db().toString(), uuid2}, {BSON("_id" << 2), time3}, time3.asULL())); + ASSERT_OK(_storageInterface->insertDocument(_opCtx.get(), + {nss2.db_forTest().toString(), uuid2}, + {BSON("_id" << 2), time3}, + time3.asULL())); ASSERT_EQ(1ULL, unittest::assertGet(_storageInterface->getCollectionCount( - _opCtx.get(), {nss2.db().toString(), uuid2}))); - ASSERT_OK( - _storageInterface->setCollectionCount(_opCtx.get(), {nss2.db().toString(), uuid2}, 1)); + _opCtx.get(), {nss2.db_forTest().toString(), uuid2}))); + ASSERT_OK(_storageInterface->setCollectionCount( + _opCtx.get(), {nss2.db_forTest().toString(), uuid2}, 1)); _assertDocsInOplog(_opCtx.get(), {1, 2, 3}); @@ -1396,7 +1446,7 @@ TEST_F(RollbackImplTest, CountChangesCancelOut) { const auto obj = BSON("_id" << 2); const Timestamp time = Timestamp(2, 2); ASSERT_OK(_storageInterface->insertDocument( - _opCtx.get(), {nss.db().toString(), uuid}, {obj, time}, time.asULL())); + _opCtx.get(), {nss.db_forTest().toString(), uuid}, {obj, time}, time.asULL())); _insertDocAndGenerateOplogEntry(BSON("_id" << 1), uuid, nss, 2); _deleteDocAndGenerateOplogEntry(obj["_id"], uuid, nss, 3); @@ -1405,7 +1455,7 @@ TEST_F(RollbackImplTest, CountChangesCancelOut) { // Test that we read the collection count from drop entries. ASSERT_OK(_insertOplogEntry(makeCommandOp(Timestamp(5, 5), uuid, - nss.getCommandNS().toString(), + nss.getCommandNS().toString_forTest(), BSON("drop" << nss.coll()), 5, BSON("numRecords" << 1)) @@ -1413,7 +1463,7 @@ TEST_F(RollbackImplTest, CountChangesCancelOut) { ASSERT_EQ(2ULL, unittest::assertGet(_storageInterface->getCollectionCount( - _opCtx.get(), {nss.db().toString(), uuid}))); + _opCtx.get(), {nss.db_forTest().toString(), uuid}))); ASSERT_OK(_storageInterface->setCollectionCount(nullptr, {"", uuid}, 2)); _assertDocsInOplog(_opCtx.get(), {1, 2, 3, 4, 5}); @@ -1430,31 +1480,35 @@ TEST_F(RollbackImplTest, RollbackIgnoresSetCollectionCountError) { ASSERT_OK(_insertOplogEntry(commonOp.first)); auto uuid1 = UUID::gen(); - auto nss1 = NamespaceString("test.coll1"); + auto nss1 = NamespaceString::createNamespaceString_forTest("test.coll1"); const auto obj1 = BSON("_id" << 1); const auto coll1 = _initializeCollection(_opCtx.get(), uuid1, nss1); _insertDocAndGenerateOplogEntry(obj1, uuid1, nss1, 2); const Timestamp time1 = Timestamp(2, 2); - ASSERT_OK(_storageInterface->insertDocument( - _opCtx.get(), {nss1.db().toString(), uuid1}, {BSON("_id" << 2), time1}, time1.asULL())); + ASSERT_OK(_storageInterface->insertDocument(_opCtx.get(), + {nss1.db_forTest().toString(), uuid1}, + {BSON("_id" << 2), time1}, + time1.asULL())); ASSERT_EQ(2ULL, unittest::assertGet(_storageInterface->getCollectionCount( - _opCtx.get(), {nss1.db().toString(), uuid1}))); + _opCtx.get(), {nss1.db_forTest().toString(), uuid1}))); ASSERT_OK(_storageInterface->setCollectionCount(nullptr, {"", uuid1}, 2)); auto uuid2 = UUID::gen(); - auto nss2 = NamespaceString("test.coll2"); + auto nss2 = NamespaceString::createNamespaceString_forTest("test.coll2"); const auto obj2 = BSON("_id" << 1); const auto coll2 = _initializeCollection(_opCtx.get(), uuid2, nss2); _insertDocAndGenerateOplogEntry(obj2, uuid2, nss2, 3); const Timestamp time2 = Timestamp(3, 3); - ASSERT_OK(_storageInterface->insertDocument( - _opCtx.get(), {nss2.db().toString(), uuid2}, {BSON("_id" << 2), time2}, time2.asULL())); + ASSERT_OK(_storageInterface->insertDocument(_opCtx.get(), + {nss2.db_forTest().toString(), uuid2}, + {BSON("_id" << 2), time2}, + time2.asULL())); ASSERT_EQ(2ULL, unittest::assertGet(_storageInterface->getCollectionCount( - _opCtx.get(), {nss2.db().toString(), uuid2}))); + _opCtx.get(), {nss2.db_forTest().toString(), uuid2}))); ASSERT_OK(_storageInterface->setCollectionCount(nullptr, {"", uuid2}, 2)); _assertDocsInOplog(_opCtx.get(), {1, 2, 3}); @@ -1471,7 +1525,8 @@ TEST_F(RollbackImplTest, ResetToZeroIfCountGoesNegative) { const auto commonOp = makeOpAndRecordId(1); _remoteOplog->setOperations({commonOp}); ASSERT_OK(_insertOplogEntry(commonOp.first)); - ASSERT_OK(_insertOplogEntry(makeInsertOplogEntry(2, BSON("_id" << 1), nss.ns(), kGenericUUID))); + ASSERT_OK(_insertOplogEntry( + makeInsertOplogEntry(2, BSON("_id" << 1), nss.ns_forTest(), kGenericUUID))); const auto coll = _initializeCollection(_opCtx.get(), kGenericUUID, nss); @@ -1488,14 +1543,15 @@ RollbackImplTest::_setUpUnpreparedTransactionForCountTest(UUID collId) { // Initialize the collection with one document inserted outside a transaction. // The final collection count after rolling back the transaction, which has one entry before the // stable timestamp, should be 1. - auto nss = NamespaceString("test.coll1"); + auto nss = NamespaceString::createNamespaceString_forTest("test.coll1"); _initializeCollection(_opCtx.get(), collId, nss); auto insertOp1 = _insertDocAndReturnOplogEntry(BSON("_id" << 1), collId, nss, 1); ops.push_back(insertOp1); ASSERT_OK(_insertOplogEntry(insertOp1.first)); // Common field values for applyOps oplog entries. - auto adminCmdNss = NamespaceString(DatabaseName::kAdmin).getCommandNS(); + auto adminCmdNss = + NamespaceString::createNamespaceString_forTest(DatabaseName::kAdmin).getCommandNS(); OperationSessionInfo sessionInfo; sessionInfo.setSessionId(makeLogicalSessionId(_opCtx.get())); sessionInfo.setTxnNumber(1); @@ -1566,7 +1622,8 @@ RollbackImplTest::_setUpUnpreparedTransactionForCountTest(UUID collId) { ASSERT_OK(_insertOplogEntry(commitApplyOpsOplogEntry.toBSON())); ops.push_back(std::make_pair(commitApplyOpsOplogEntry.toBSON(), insertOp3.second)); - ASSERT_OK(_storageInterface->setCollectionCount(nullptr, {nss.db().toString(), collId}, 3)); + ASSERT_OK( + _storageInterface->setCollectionCount(nullptr, {nss.db_forTest().toString(), collId}, 3)); _assertDocsInOplog(_opCtx.get(), {1, 2, 3}); return ops; @@ -1910,13 +1967,13 @@ class RollbackImplObserverInfoTest : public RollbackImplTest { auto doc = BSON("_id" << 1); const Timestamp time = Timestamp(2, 1); ASSERT_OK(_storageInterface->insertDocument( - _opCtx.get(), {nss.db().toString(), collId}, {doc, time}, time.asULL())); + _opCtx.get(), {nss.db_forTest().toString(), collId}, {doc, time}, time.asULL())); BSONObjBuilder bob; bob.append("ts", time); bob.append("op", "i"); collId.appendToBuilder(&bob, "ui"); - bob.append("ns", nss.ns()); + bob.append("ns", nss.ns_forTest()); bob.append("o", doc); bob.append("wall", Date_t()); bob.append("lsid", @@ -1944,10 +2001,15 @@ class RollbackImplObserverInfoTest : public RollbackImplTest { }; TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfInsertOplogEntry) { - auto insertNss = NamespaceString("test", "coll"); + auto insertNss = NamespaceString::createNamespaceString_forTest("test", "coll"); auto ts = Timestamp(2, 2); - auto insertOp = makeCRUDOp( - OpTypeEnum::kInsert, ts, UUID::gen(), insertNss.ns(), BSON("_id" << 1), boost::none, 2); + auto insertOp = makeCRUDOp(OpTypeEnum::kInsert, + ts, + UUID::gen(), + insertNss.ns_forTest(), + BSON("_id" << 1), + boost::none, + 2); std::set expectedNamespaces = {insertNss}; auto namespaces = @@ -1956,11 +2018,11 @@ TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfInsertOp } TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfUpdateOplogEntry) { - auto updateNss = NamespaceString("test", "coll"); + auto updateNss = NamespaceString::createNamespaceString_forTest("test", "coll"); auto ts = Timestamp(2, 2); auto o = BSON("$set" << BSON("x" << 2)); - auto updateOp = - makeCRUDOp(OpTypeEnum::kUpdate, ts, UUID::gen(), updateNss.ns(), o, BSON("_id" << 1), 2); + auto updateOp = makeCRUDOp( + OpTypeEnum::kUpdate, ts, UUID::gen(), updateNss.ns_forTest(), o, BSON("_id" << 1), 2); std::set expectedNamespaces = {updateNss}; auto namespaces = @@ -1969,10 +2031,15 @@ TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfUpdateOp } TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfDeleteOplogEntry) { - auto deleteNss = NamespaceString("test", "coll"); + auto deleteNss = NamespaceString::createNamespaceString_forTest("test", "coll"); auto ts = Timestamp(2, 2); - auto deleteOp = makeCRUDOp( - OpTypeEnum::kDelete, ts, UUID::gen(), deleteNss.ns(), BSON("_id" << 1), boost::none, 2); + auto deleteOp = makeCRUDOp(OpTypeEnum::kDelete, + ts, + UUID::gen(), + deleteNss.ns_forTest(), + BSON("_id" << 1), + boost::none, + 2); std::set expectedNamespaces = {deleteNss}; auto namespaces = @@ -1981,10 +2048,10 @@ TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfDeleteOp } TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsIgnoresNamespaceOfNoopOplogEntry) { - auto noopNss = NamespaceString("test", "coll"); + auto noopNss = NamespaceString::createNamespaceString_forTest("test", "coll"); auto ts = Timestamp(2, 2); - auto noop = - makeCRUDOp(OpTypeEnum::kNoop, ts, UUID::gen(), noopNss.ns(), BSONObj(), boost::none, 2); + auto noop = makeCRUDOp( + OpTypeEnum::kNoop, ts, UUID::gen(), noopNss.ns_forTest(), BSONObj(), boost::none, 2); std::set expectedNamespaces = {}; auto namespaces = @@ -1994,10 +2061,10 @@ TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsIgnoresNamespaceOfNoopOplog TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfCreateCollectionOplogEntry) { - auto nss = NamespaceString("test", "coll"); + auto nss = NamespaceString::createNamespaceString_forTest("test", "coll"); auto cmdOp = makeCommandOp(Timestamp(2, 2), UUID::gen(), - nss.getCommandNS().toString(), + nss.getCommandNS().toString_forTest(), BSON("create" << nss.coll()), 2); std::set expectedNamespaces = {nss}; @@ -2007,9 +2074,12 @@ TEST_F(RollbackImplObserverInfoTest, } TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfDropCollectionOplogEntry) { - auto nss = NamespaceString("test", "coll"); - auto cmdOp = makeCommandOp( - Timestamp(2, 2), UUID::gen(), nss.getCommandNS().toString(), BSON("drop" << nss.coll()), 2); + auto nss = NamespaceString::createNamespaceString_forTest("test", "coll"); + auto cmdOp = makeCommandOp(Timestamp(2, 2), + UUID::gen(), + nss.getCommandNS().toString_forTest(), + BSON("drop" << nss.coll()), + 2); std::set expectedNamespaces = {nss}; auto namespaces = @@ -2018,15 +2088,15 @@ TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfDropColl } TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfCreateIndexOplogEntry) { - auto nss = NamespaceString("test", "coll"); + auto nss = NamespaceString::createNamespaceString_forTest("test", "coll"); auto indexObj = BSON("createIndexes" << nss.coll() << "v" << static_cast(IndexDescriptor::IndexVersion::kV2) << "key" << "x" << "name" << "x_1"); - auto cmdOp = - makeCommandOp(Timestamp(2, 2), UUID::gen(), nss.getCommandNS().toString(), indexObj, 2); + auto cmdOp = makeCommandOp( + Timestamp(2, 2), UUID::gen(), nss.getCommandNS().toString_forTest(), indexObj, 2); std::set expectedNamespaces = {nss}; auto namespaces = @@ -2035,10 +2105,10 @@ TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfCreateIn } TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfDropIndexOplogEntry) { - auto nss = NamespaceString("test", "coll"); + auto nss = NamespaceString::createNamespaceString_forTest("test", "coll"); auto cmdOp = makeCommandOp(Timestamp(2, 2), UUID::gen(), - nss.getCommandNS().toString(), + nss.getCommandNS().toString_forTest(), BSON("dropIndexes" << nss.coll() << "index" << "x_1"), 2); @@ -2050,12 +2120,12 @@ TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespaceOfDropInde TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespacesOfRenameCollectionOplogEntry) { - auto fromNss = NamespaceString("test", "source"); - auto toNss = NamespaceString("test", "dest"); + auto fromNss = NamespaceString::createNamespaceString_forTest("test", "source"); + auto toNss = NamespaceString::createNamespaceString_forTest("test", "dest"); - auto cmdObj = BSON("renameCollection" << fromNss.ns() << "to" << toNss.ns()); + auto cmdObj = BSON("renameCollection" << fromNss.ns_forTest() << "to" << toNss.ns_forTest()); auto cmdOp = - makeCommandOp(Timestamp(2, 2), UUID::gen(), fromNss.getCommandNS().ns(), cmdObj, 2); + makeCommandOp(Timestamp(2, 2), UUID::gen(), fromNss.getCommandNS().ns_forTest(), cmdObj, 2); std::set expectedNamespaces = {fromNss, toNss}; auto namespaces = @@ -2109,9 +2179,10 @@ TEST_F(RollbackImplObserverInfoTest, } TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsIgnoresNamespaceOfDropDatabaseOplogEntry) { - auto nss = NamespaceString("test", "coll"); + auto nss = NamespaceString::createNamespaceString_forTest("test", "coll"); auto cmdObj = BSON("dropDatabase" << 1); - auto cmdOp = makeCommandOp(Timestamp(2, 2), boost::none, nss.getCommandNS().ns(), cmdObj, 2); + auto cmdOp = + makeCommandOp(Timestamp(2, 2), boost::none, nss.getCommandNS().ns_forTest(), cmdObj, 2); std::set expectedNamespaces = {}; auto namespaces = @@ -2120,10 +2191,11 @@ TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsIgnoresNamespaceOfDropDatab } TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsExtractsNamespacesOfCollModOplogEntry) { - auto nss = NamespaceString("test", "coll"); + auto nss = NamespaceString::createNamespaceString_forTest("test", "coll"); auto cmdObj = BSON("collMod" << nss.coll() << "validationLevel" << "off"); - auto cmdOp = makeCommandOp(Timestamp(2, 2), UUID::gen(), nss.getCommandNS().ns(), cmdObj, 2); + auto cmdOp = + makeCommandOp(Timestamp(2, 2), UUID::gen(), nss.getCommandNS().ns_forTest(), cmdObj, 2); std::set expectedNamespaces = {nss}; auto namespaces = @@ -2144,10 +2216,10 @@ DEATH_TEST_F(RollbackImplObserverInfoTest, NamespacesForOpsInvariantsOnApplyOpsOplogEntry, "_namespacesForOp does not handle 'applyOps' oplog entries.") { // Add one sub-op. - auto createNss = NamespaceString("test", "createColl"); + auto createNss = NamespaceString::createNamespaceString_forTest("test", "createColl"); auto createOp = makeCommandOp(Timestamp(2, 2), UUID::gen(), - createNss.getCommandNS().toString(), + createNss.getCommandNS().toString_forTest(), BSON("create" << createNss.coll()), 2); @@ -2167,20 +2239,22 @@ DEATH_TEST_F(RollbackImplObserverInfoTest, TEST_F(RollbackImplObserverInfoTest, RollbackRecordsNamespacesOfApplyOpsOplogEntry) { // Add a few different sub-ops from different namespaces to make sure they all get recorded. - auto createNss = NamespaceString("test", "createColl"); - auto createOp = makeCommandOpForApplyOps( - UUID::gen(), createNss.getCommandNS().toString(), BSON("create" << createNss.coll()), 2); + auto createNss = NamespaceString::createNamespaceString_forTest("test", "createColl"); + auto createOp = makeCommandOpForApplyOps(UUID::gen(), + createNss.getCommandNS().toString_forTest(), + BSON("create" << createNss.coll()), + 2); - auto dropNss = NamespaceString("test", "dropColl"); + auto dropNss = NamespaceString::createNamespaceString_forTest("test", "dropColl"); auto dropUuid = UUID::gen(); auto dropOp = makeCommandOpForApplyOps( - dropUuid, dropNss.getCommandNS().toString(), BSON("drop" << dropNss.coll()), 2); + dropUuid, dropNss.getCommandNS().toString_forTest(), BSON("drop" << dropNss.coll()), 2); _initializeCollection(_opCtx.get(), dropUuid, dropNss); - auto collModNss = NamespaceString("test", "collModColl"); + auto collModNss = NamespaceString::createNamespaceString_forTest("test", "collModColl"); auto collModOp = makeCommandOpForApplyOps(UUID::gen(), - collModNss.getCommandNS().ns(), + collModNss.getCommandNS().ns_forTest(), BSON("collMod" << collModNss.coll() << "validationLevel" << "off"), 2); @@ -2213,7 +2287,7 @@ TEST_F(RollbackImplObserverInfoTest, RollbackFailsOnMalformedApplyOpsOplogEntry) } TEST_F(RollbackImplObserverInfoTest, RollbackRecordsNamespaceOfSingleOplogEntry) { - const auto nss = NamespaceString("test", "coll"); + const auto nss = NamespaceString::createNamespaceString_forTest("test", "coll"); const auto uuid = UUID::gen(); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); const auto insertOp = _insertDocAndReturnOplogEntry(BSON("_id" << 1), uuid, nss, 2); @@ -2228,9 +2302,9 @@ TEST_F(RollbackImplObserverInfoTest, RollbackRecordsMultipleNamespacesOfOplogEnt return _insertDocAndReturnOplogEntry(BSON("_id" << 1), uuid, nss, recordId); }; - const auto nss1 = NamespaceString("test", "coll1"); - const auto nss2 = NamespaceString("test", "coll2"); - const auto nss3 = NamespaceString("test", "coll3"); + const auto nss1 = NamespaceString::createNamespaceString_forTest("test", "coll1"); + const auto nss2 = NamespaceString::createNamespaceString_forTest("test", "coll2"); + const auto nss3 = NamespaceString::createNamespaceString_forTest("test", "coll3"); const auto uuid1 = UUID::gen(); const auto uuid2 = UUID::gen(); @@ -2280,7 +2354,7 @@ TEST_F(RollbackImplObserverInfoTest, RollbackRecordsSessionIdFromOplogEntry) { TEST_F(RollbackImplObserverInfoTest, RollbackDoesntRecordSessionIdFromOplogEntryWithoutSessionInfo) { - const auto nss = NamespaceString("test", "coll"); + const auto nss = NamespaceString::createNamespaceString_forTest("test", "coll"); const auto uuid = UUID::gen(); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); @@ -2319,14 +2393,14 @@ TEST_F(RollbackImplObserverInfoTest, RollbackDoesntRecordShardIdentityRollbackFo } TEST_F(RollbackImplObserverInfoTest, RollbackRecordsConfigVersionRollback) { - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; const auto uuid = UUID::gen(); const auto nss = VersionType::ConfigNS; const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); auto insertOp = makeCRUDOp(OpTypeEnum::kInsert, Timestamp(2, 2), uuid, - nss.ns(), + nss.ns_forTest(), BSON("_id" << "a"), boost::none, @@ -2344,7 +2418,7 @@ TEST_F(RollbackImplObserverInfoTest, RollbackDoesntRecordConfigVersionRollbackFo auto insertOp = makeCRUDOp(OpTypeEnum::kInsert, Timestamp(2, 2), uuid, - nss.ns(), + nss.ns_forTest(), BSON("_id" << "a"), boost::none, @@ -2354,14 +2428,14 @@ TEST_F(RollbackImplObserverInfoTest, RollbackDoesntRecordConfigVersionRollbackFo } TEST_F(RollbackImplObserverInfoTest, RollbackDoesntRecordConfigVersionRollbackForNonInsert) { - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; const auto uuid = UUID::gen(); const auto nss = VersionType::ConfigNS; const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); auto deleteOp = makeCRUDOp(OpTypeEnum::kDelete, Timestamp(2, 2), uuid, - nss.ns(), + nss.ns_forTest(), BSON("_id" << "a"), boost::none, @@ -2371,14 +2445,14 @@ TEST_F(RollbackImplObserverInfoTest, RollbackDoesntRecordConfigVersionRollbackFo } TEST_F(RollbackImplObserverInfoTest, RollbackRecordsInsertOpsInUUIDToIdMap) { - const auto nss1 = NamespaceString("db.people"); + const auto nss1 = NamespaceString::createNamespaceString_forTest("db.people"); const auto uuid1 = UUID::gen(); const auto coll1 = _initializeCollection(_opCtx.get(), uuid1, nss1); const auto obj1 = BSON("_id" << "kyle"); const auto insertOp1 = _insertDocAndReturnOplogEntry(obj1, uuid1, nss1, 2); - const auto nss2 = NamespaceString("db.persons"); + const auto nss2 = NamespaceString::createNamespaceString_forTest("db.persons"); const auto uuid2 = UUID::gen(); const auto coll2 = _initializeCollection(_opCtx.get(), uuid2, nss2); const auto obj2 = BSON("_id" @@ -2393,26 +2467,26 @@ TEST_F(RollbackImplObserverInfoTest, RollbackRecordsInsertOpsInUUIDToIdMap) { } TEST_F(RollbackImplObserverInfoTest, RollbackRecordsUpdateOpsInUUIDToIdMap) { - const auto nss1 = NamespaceString("db.coll1"); + const auto nss1 = NamespaceString::createNamespaceString_forTest("db.coll1"); const auto uuid1 = UUID::gen(); const auto coll1 = _initializeCollection(_opCtx.get(), uuid1, nss1); const auto id1 = BSON("_id" << 1); const auto updateOp1 = makeCRUDOp(OpTypeEnum::kUpdate, Timestamp(2, 2), uuid1, - nss1.ns(), + nss1.ns_forTest(), BSON("$set" << BSON("foo" << 1)), id1, 2); - const auto nss2 = NamespaceString("db.coll2"); + const auto nss2 = NamespaceString::createNamespaceString_forTest("db.coll2"); const auto uuid2 = UUID::gen(); const auto coll2 = _initializeCollection(_opCtx.get(), uuid2, nss2); const auto id2 = BSON("_id" << 2); const auto updateOp2 = makeCRUDOp(OpTypeEnum::kUpdate, Timestamp(3, 3), uuid2, - nss2.ns(), + nss2.ns_forTest(), BSON("$set" << BSON("foo" << 1)), id2, 3); @@ -2425,7 +2499,7 @@ TEST_F(RollbackImplObserverInfoTest, RollbackRecordsUpdateOpsInUUIDToIdMap) { } TEST_F(RollbackImplObserverInfoTest, RollbackRecordsMultipleInsertOpsForSameNamespace) { - const auto nss = NamespaceString("db.coll"); + const auto nss = NamespaceString::createNamespaceString_forTest("db.coll"); const auto uuid = UUID::gen(); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); @@ -2443,14 +2517,14 @@ TEST_F(RollbackImplObserverInfoTest, RollbackRecordsMultipleInsertOpsForSameName } TEST_F(RollbackImplObserverInfoTest, RollbackRecordsMultipleUpdateOpsForSameNamespace) { - const auto nss = NamespaceString("db.coll"); + const auto nss = NamespaceString::createNamespaceString_forTest("db.coll"); const auto uuid = UUID::gen(); const auto coll = _initializeCollection(_opCtx.get(), uuid, nss); const auto obj1 = BSON("_id" << 1); const auto updateOp1 = makeCRUDOp(OpTypeEnum::kUpdate, Timestamp(2, 2), uuid, - nss.ns(), + nss.ns_forTest(), BSON("$set" << BSON("foo" << 1)), obj1, 2); @@ -2459,7 +2533,7 @@ TEST_F(RollbackImplObserverInfoTest, RollbackRecordsMultipleUpdateOpsForSameName const auto updateOp2 = makeCRUDOp(OpTypeEnum::kUpdate, Timestamp(3, 3), uuid, - nss.ns(), + nss.ns_forTest(), BSON("$set" << BSON("bar" << 2)), obj2, 3); diff --git a/src/mongo/db/repl/rollback_source_impl.cpp b/src/mongo/db/repl/rollback_source_impl.cpp index de16558a010fc..996fd2b12670e 100644 --- a/src/mongo/db/repl/rollback_source_impl.cpp +++ b/src/mongo/db/repl/rollback_source_impl.cpp @@ -27,14 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/rollback_source_impl.h" - -#include "mongo/db/jsobj.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_base.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/client/read_preference.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/repl/replication_auth.h" +#include "mongo/db/repl/rollback_source_impl.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" @@ -43,12 +55,8 @@ namespace repl { RollbackSourceImpl::RollbackSourceImpl(GetConnectionFn getConnection, const HostAndPort& source, - const std::string& collectionName, int batchSize) - : _getConnection(getConnection), - _source(source), - _collectionName(collectionName), - _oplog(source, getConnection, collectionName, batchSize) {} + : _getConnection(getConnection), _source(source), _oplog(source, getConnection, batchSize) {} const OplogInterface& RollbackSourceImpl::getOplog() const { return _oplog; @@ -61,13 +69,12 @@ const HostAndPort& RollbackSourceImpl::getSource() const { int RollbackSourceImpl::getRollbackId() const { bo info; - _getConnection()->runCommand( - DatabaseName(boost::none, "admin"), BSON("replSetGetRBID" << 1), info); + _getConnection()->runCommand(DatabaseName::kAdmin, BSON("replSetGetRBID" << 1), info); return info["rbid"].numberInt(); } BSONObj RollbackSourceImpl::getLastOperation() const { - FindCommandRequest findCmd{NamespaceString{_collectionName}}; + FindCommandRequest findCmd{NamespaceString::kRsOplogNamespace}; findCmd.setSort(BSON("$natural" << -1)); findCmd.setReadConcern(ReadConcernArgs::kLocal); return _getConnection()->findOne(std::move(findCmd), @@ -122,7 +129,8 @@ StatusWith RollbackSourceImpl::getCollectionInfo(const NamespaceString& _getConnection()->getCollectionInfos(nss.dbName(), BSON("name" << nss.coll())); if (info.empty()) { return StatusWith(ErrorCodes::NoSuchKey, - str::stream() << "no collection info found: " << nss.ns()); + str::stream() << "no collection info found: " + << nss.toStringForErrorMsg()); } invariant(info.size() == 1U); return info.front(); diff --git a/src/mongo/db/repl/rollback_source_impl.h b/src/mongo/db/repl/rollback_source_impl.h index 82c9135bf704e..40ced515ab988 100644 --- a/src/mongo/db/repl/rollback_source_impl.h +++ b/src/mongo/db/repl/rollback_source_impl.h @@ -31,10 +31,17 @@ #include #include +#include +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/oplog_interface.h" #include "mongo/db/repl/oplog_interface_remote.h" #include "mongo/db/repl/rollback_source.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -53,10 +60,7 @@ class RollbackSourceImpl : public RollbackSource { */ using GetConnectionFn = std::function; - RollbackSourceImpl(GetConnectionFn getConnection, - const HostAndPort& source, - const std::string& collectionName, - int batchSize); + RollbackSourceImpl(GetConnectionFn getConnection, const HostAndPort& source, int batchSize); const OplogInterface& getOplog() const override; @@ -80,7 +84,6 @@ class RollbackSourceImpl : public RollbackSource { private: GetConnectionFn _getConnection; HostAndPort _source; - std::string _collectionName; OplogInterfaceRemote _oplog; }; diff --git a/src/mongo/db/repl/rollback_test_fixture.cpp b/src/mongo/db/repl/rollback_test_fixture.cpp index f88973b38b952..1561da8cf13db 100644 --- a/src/mongo/db/repl/rollback_test_fixture.cpp +++ b/src/mongo/db/repl/rollback_test_fixture.cpp @@ -29,26 +29,47 @@ #include "mongo/db/repl/rollback_test_fixture.h" +#include +#include #include +#include -#include "mongo/db/catalog/collection_write_path.h" +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/dbhelpers.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_noop.h" #include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" -#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/db/repl/replication_consistency_markers_mock.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/replication_recovery.h" -#include "mongo/db/repl/rs_rollback.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" -#include "mongo/unittest/log_test.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/duration.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { @@ -67,12 +88,12 @@ ReplSettings createReplSettings() { class RollbackTestOpObserver : public OpObserverNoop { public: - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) override { + const CollectionDropType dropType, + bool markFromMigrate) override { // If the oplog is not disabled for this namespace, then we need to reserve an op time for // the drop. if (!repl::ReplicationCoordinator::get(opCtx)->isOplogDisabledFor(opCtx, collectionName)) { @@ -88,7 +109,10 @@ class RollbackTestOpObserver : public OpObserverNoop { void RollbackTest::setUp() { ServiceContextMongoDTest::setUp(); - + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } _storageInterface = new StorageInterfaceRollback(); auto serviceContext = getServiceContext(); auto consistencyMarkers = std::make_unique(); @@ -233,27 +257,31 @@ Collection* RollbackTest::_createCollection(OperationContext* opCtx, Collection* RollbackTest::_createCollection(OperationContext* opCtx, const std::string& nss, const CollectionOptions& options) { - return _createCollection(opCtx, NamespaceString(nss), options); + return _createCollection(opCtx, NamespaceString::createNamespaceString_forTest(nss), options); } void RollbackTest::_insertDocument(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& doc) { + const auto collection = [&]() { + while (true) { + auto collection = acquireCollection(opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + if (collection.exists()) { + return collection; + } + + CollectionOptions options; + options.uuid = UUID::gen(); + _createCollection(opCtx, nss, options); + } + }(); - auto insertDoc = [opCtx, &doc](const CollectionPtr& collection) { - WriteUnitOfWork wuow(opCtx); - ASSERT_OK(collection_internal::insertDocument( - opCtx, collection, InsertStatement(doc), nullptr /* OpDebug */)); - wuow.commit(); - }; - AutoGetCollection collection(opCtx, nss, MODE_X); - if (collection) { - insertDoc(collection.getCollection()); - } else { - CollectionOptions options; - options.uuid = UUID::gen(); - insertDoc(CollectionPtr(_createCollection(opCtx, nss, options))); - } + WriteUnitOfWork wuow(opCtx); + ASSERT_OK(Helpers::insert(opCtx, collection, doc)); + wuow.commit(); } Status RollbackTest::_insertOplogEntry(const BSONObj& doc) { @@ -297,7 +325,7 @@ std::pair RollbackSourceMock::findOneByUUID(const std: } StatusWith RollbackSourceMock::getCollectionInfo(const NamespaceString& nss) const { - return BSON("name" << nss.ns() << "options" << BSONObj()); + return BSON("name" << nss.ns_forTest() << "options" << BSONObj()); } StatusWith RollbackSourceMock::getCollectionInfoByUUID(const DatabaseName& dbName, diff --git a/src/mongo/db/repl/rollback_test_fixture.h b/src/mongo/db/repl/rollback_test_fixture.h index 274529b63080e..1c67e42d0126d 100644 --- a/src/mongo/db/repl/rollback_test_fixture.h +++ b/src/mongo/db/repl/rollback_test_fixture.h @@ -29,21 +29,52 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/oplog_interface.h" #include "mongo/db/repl/oplog_interface_mock.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/replication_process.h" +#include "mongo/db/repl/replication_recovery.h" #include "mongo/db/repl/rollback_source.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log_component.h" #include "mongo/logv2/log_severity.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/unittest/log_test.h" #include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -200,7 +231,7 @@ class RollbackTest::StorageInterfaceRollback : public StorageInterfaceImpl { nsOrUUID.uuid() == _setCollectionCountStatusUUID) { return *_setCollectionCountStatus; } - _newCounts[*nsOrUUID.uuid()] = newCount; + _newCounts[nsOrUUID.uuid()] = newCount; return Status::OK(); } diff --git a/src/mongo/db/repl/rs_rollback.cpp b/src/mongo/db/repl/rs_rollback.cpp index 02d4826e12630..6adb9a1f17f63 100644 --- a/src/mongo/db/repl/rs_rollback.cpp +++ b/src/mongo/db/repl/rs_rollback.cpp @@ -29,56 +29,100 @@ #include "mongo/db/repl/rs_rollback.h" +#include +#include +#include +#include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" #include - +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" #include "mongo/bson/bsonelement_comparator.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/catalog/capped_collection_maintenance.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/collection_options_gen.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/index_build_oplog_entry.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/rename_collection.h" #include "mongo/db/catalog/unique_collection_name.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/client.h" -#include "mongo/db/commands.h" -#include "mongo/db/commands/txn_cmds_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/exec/working_set_common.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/logical_time_validator.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/delete.h" #include "mongo/db/ops/update.h" #include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/read_write_concern_defaults.h" -#include "mongo/db/repl/bgsync.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/oplog_interface.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/repl/replication_coordinator_impl.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/roll_back_local_operations.h" +#include "mongo/db/repl/rollback_impl.h" #include "mongo/db/repl/rollback_source.h" +#include "mongo/db/resumable_index_builds_gen.h" #include "mongo/db/s/shard_identity_rollback_notifier.h" -#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_role.h" #include "mongo/db/storage/control/journal_flusher.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/remove_saver.h" -#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction/transaction_history_iterator.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" -#include "mongo/s/client/shard_registry.h" -#include "mongo/s/grid.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" #include "mongo/util/exit.h" #include "mongo/util/fail_point.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplicationRollback @@ -317,9 +361,10 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o fixUpInfo.refetchTransactionDocs = true; } else { throw RSFatalException( - str::stream() << NamespaceString::kSessionTransactionsTableNamespace.ns() - << " does not have a UUID, but local op has a transaction number: " - << redact(oplogEntry.toBSONForLogging())); + str::stream() + << NamespaceString::kSessionTransactionsTableNamespace.toStringForErrorMsg() + << " does not have a UUID, but local op has a transaction number: " + << redact(oplogEntry.toBSONForLogging())); } if (oplogEntry.isPartialTransaction()) { // If this is a transaction which did not commit, we need do nothing more than @@ -794,7 +839,7 @@ Status rollback_internal::updateFixUpInfoFromLocalOplogEntry(OperationContext* o message, logAttrs(nss), "oplogEntry"_attr = redact(oplogEntry.toBSONForLogging())); - throw RSFatalException(str::stream() << message << ". ns: " << nss.ns()); + throw RSFatalException(str::stream() << message << ". ns: " << nss.toStringForErrorMsg()); } fixUpInfo.docsToRefetch.insert(doc); return Status::OK(); @@ -862,11 +907,11 @@ void dropIndex(OperationContext* opCtx, const string& indexName, NamespaceString& nss) { IndexCatalog* indexCatalog = collection->getIndexCatalog(); - auto indexDescriptor = indexCatalog->findIndexByName( + auto writableEntry = indexCatalog->getWritableEntryByName( opCtx, indexName, IndexCatalog::InclusionPolicy::kReady | IndexCatalog::InclusionPolicy::kUnfinished); - if (!indexDescriptor) { + if (!writableEntry) { LOGV2_WARNING(21725, "Rollback failed to drop index {indexName} in {namespace}: index not found.", "Rollback failed to drop index: index not found", @@ -875,9 +920,8 @@ void dropIndex(OperationContext* opCtx, return; } - auto entry = indexCatalog->getEntry(indexDescriptor); - if (entry->isReady(opCtx)) { - auto status = indexCatalog->dropIndex(opCtx, collection, indexDescriptor); + if (writableEntry->isReady()) { + auto status = indexCatalog->dropIndexEntry(opCtx, collection, writableEntry); if (!status.isOK()) { LOGV2_ERROR(21738, "Rollback failed to drop index {indexName} in {namespace}: {error}", @@ -887,7 +931,7 @@ void dropIndex(OperationContext* opCtx, "error"_attr = redact(status)); } } else { - auto status = indexCatalog->dropUnfinishedIndex(opCtx, collection, indexDescriptor); + auto status = indexCatalog->dropUnfinishedIndex(opCtx, collection, writableEntry); if (!status.isOK()) { LOGV2_ERROR( 21739, @@ -1264,7 +1308,7 @@ void syncFixUp(OperationContext* opCtx, throw RSFatalException( str::stream() << "A fetch on the transactions collection returned an unexpected namespace: " - << resNss.ns() + << resNss.toStringForErrorMsg() << ". The transactions collection cannot be correctly rolled back, a full " "resync is required."); } @@ -1526,9 +1570,9 @@ void syncFixUp(OperationContext* opCtx, opCtx, options.validator, options.validationLevel, options.validationAction); if (!validatorStatus.isOK()) { throw RSFatalException(str::stream() - << "Failed to update validator for " << nss->toString() - << " (" << uuid << ") with " << redact(info) - << ". Got: " << validatorStatus.toString()); + << "Failed to update validator for " + << nss->toStringForErrorMsg() << " (" << uuid << ") with " + << redact(info) << ". Got: " << validatorStatus.toString()); } wuow.commit(); @@ -1622,7 +1666,12 @@ void syncFixUp(OperationContext* opCtx, const NamespaceString docNss(doc.ns); Lock::DBLock docDbLock(opCtx, docNss.dbName(), MODE_X); OldClientContext ctx(opCtx, docNss); - CollectionWriter collection(opCtx, uuid); + auto collection = acquireCollection(opCtx, + {NamespaceStringOrUUID(docNss.dbName(), uuid), + AcquisitionPrerequisites::kPretendUnsharded, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite}, + MODE_X); // Adds the doc to our rollback file if the collection was not dropped while // rolling back createCollection operations. Does not log an error when @@ -1630,9 +1679,10 @@ void syncFixUp(OperationContext* opCtx, // the collection was dropped as part of rolling back a createCollection // command and the document no longer exists. - if (collection && removeSaver) { + if (collection.exists() && removeSaver) { BSONObj obj; - bool found = Helpers::findOne(opCtx, collection.get(), pattern, obj); + bool found = + Helpers::findOne(opCtx, collection.getCollectionPtr(), pattern, obj); if (found) { auto status = removeSaver->goingToDelete(obj); if (!status.isOK()) { @@ -1671,8 +1721,8 @@ void syncFixUp(OperationContext* opCtx, // here. deletes++; - if (collection) { - if (collection->isCapped()) { + if (collection.exists()) { + if (collection.getCollectionPtr()->isCapped()) { // Can't delete from a capped collection - so we truncate instead. // if this item must go, so must all successors. @@ -1683,7 +1733,8 @@ void syncFixUp(OperationContext* opCtx, const auto clock = opCtx->getServiceContext()->getFastClockSource(); const auto findOneStart = clock->now(); - RecordId loc = Helpers::findOne(opCtx, collection.get(), pattern); + RecordId loc = + Helpers::findOne(opCtx, collection.getCollectionPtr(), pattern); if (clock->now() - findOneStart > Milliseconds(200)) LOGV2_WARNING( 21726, @@ -1695,21 +1746,23 @@ void syncFixUp(OperationContext* opCtx, if (!loc.isNull()) { try { writeConflictRetry( - opCtx, - "cappedTruncateAfter", - collection->ns().ns(), - [&] { + opCtx, "cappedTruncateAfter", collection.nss(), [&] { collection_internal::cappedTruncateAfter( - opCtx, collection.get(), loc, true); + opCtx, + collection.getCollectionPtr(), + loc, + true); }); } catch (const DBException& e) { if (e.code() == 13415) { // hack: need to just make cappedTruncate do this... + CollectionWriter collectionWriter(opCtx, &collection); writeConflictRetry( - opCtx, "truncate", collection->ns().ns(), [&] { + opCtx, "truncate", collection.nss(), [&] { WriteUnitOfWork wunit(opCtx); uassertStatusOK( - collection.getWritableCollection(opCtx) + collectionWriter + .getWritableCollection(opCtx) ->truncate(opCtx)); wunit.commit(); }); @@ -1736,8 +1789,7 @@ void syncFixUp(OperationContext* opCtx, } } else { deleteObjects(opCtx, - collection.get(), - *nss, + collection, pattern, true, // justOne true); // god @@ -1765,7 +1817,7 @@ void syncFixUp(OperationContext* opCtx, request.setGod(); request.setUpsert(); - update(opCtx, ctx.db(), request); + update(opCtx, collection, request); } } catch (const DBException& e) { LOGV2(21713, @@ -1846,7 +1898,8 @@ void syncFixUp(OperationContext* opCtx, fassertFailedWithStatusNoTrace( 40495, Status(ErrorCodes::UnrecoverableRollbackError, - str::stream() << "Can't find " << NamespaceString::kRsOplogNamespace.ns())); + str::stream() << "Can't find " + << NamespaceString::kRsOplogNamespace.toStringForErrorMsg())); } // The oplog collection doesn't have indexes and therefore can take full advantage of the @@ -1958,7 +2011,7 @@ Status _syncRollback(OperationContext* opCtx, auto res = syncRollBackLocalOperations( localOplog, rollbackSource.getOplog(), processOperationForFixUp); if (!res.isOK()) { - const auto status = res.getStatus(); + auto status = res.getStatus(); switch (status.code()) { case ErrorCodes::OplogStartMissing: case ErrorCodes::UnrecoverableRollbackError: diff --git a/src/mongo/db/repl/rs_rollback.h b/src/mongo/db/repl/rs_rollback.h index b5bd468091a1b..12531794c9f88 100644 --- a/src/mongo/db/repl/rs_rollback.h +++ b/src/mongo/db/repl/rs_rollback.h @@ -29,15 +29,30 @@ #pragma once +#include +#include +#include +#include #include +#include +#include +#include +#include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/index_builds.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/record_id.h" #include "mongo/db/repl/optime.h" #include "mongo/db/storage/storage_engine.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/util/time_support.h" #include "mongo/util/uuid.h" diff --git a/src/mongo/db/repl/scatter_gather_algorithm.cpp b/src/mongo/db/repl/scatter_gather_algorithm.cpp index ab3fc90faeaa2..64f34816084ce 100644 --- a/src/mongo/db/repl/scatter_gather_algorithm.cpp +++ b/src/mongo/db/repl/scatter_gather_algorithm.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/repl/scatter_gather_algorithm.h" namespace mongo { diff --git a/src/mongo/db/repl/scatter_gather_runner.cpp b/src/mongo/db/repl/scatter_gather_runner.cpp index 0cc501dd5e108..c522ec4873ed0 100644 --- a/src/mongo/db/repl/scatter_gather_runner.cpp +++ b/src/mongo/db/repl/scatter_gather_runner.cpp @@ -28,16 +28,22 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/scatter_gather_runner.h" - +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include -#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" #include "mongo/db/repl/scatter_gather_algorithm.h" +#include "mongo/db/repl/scatter_gather_runner.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" #include "mongo/util/scopeguard.h" diff --git a/src/mongo/db/repl/scatter_gather_runner.h b/src/mongo/db/repl/scatter_gather_runner.h index 610d21a675dea..06ad76ad69ee0 100644 --- a/src/mongo/db/repl/scatter_gather_runner.h +++ b/src/mongo/db/repl/scatter_gather_runner.h @@ -30,8 +30,12 @@ #pragma once #include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/repl/scatter_gather_test.cpp b/src/mongo/db/repl/scatter_gather_test.cpp index 2eb7d9fc94a60..21378d2fa0548 100644 --- a/src/mongo/db/repl/scatter_gather_test.cpp +++ b/src/mongo/db/repl/scatter_gather_test.cpp @@ -27,16 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include #include - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/repl/scatter_gather_algorithm.h" #include "mongo/db/repl/scatter_gather_runner.h" +#include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor.h" +#include "mongo/executor/task_executor_test_fixture.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/platform/atomic_word.h" #include "mongo/stdx/thread.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/session_update_tracker.cpp b/src/mongo/db/repl/session_update_tracker.cpp index 1ca136186a1ae..ca7487f39295f 100644 --- a/src/mongo/db/repl/session_update_tracker.cpp +++ b/src/mongo/db/repl/session_update_tracker.cpp @@ -28,20 +28,41 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/session_update_tracker.h" - +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/namespace_string.h" #include "mongo/db/ops/write_ops_retryability.h" #include "mongo/db/repl/oplog_entry.h" -#include "mongo/db/server_options.h" -#include "mongo/db/session/session.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/session_update_tracker.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_txn_record_gen.h" -#include "mongo/db/transaction/transaction_participant_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/session_update_tracker.h b/src/mongo/db/repl/session_update_tracker.h index c3d16da1465a4..c282ecbe8b634 100644 --- a/src/mongo/db/repl/session_update_tracker.h +++ b/src/mongo/db/repl/session_update_tracker.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include #include diff --git a/src/mongo/db/repl/shard_merge_recipient_op_observer.cpp b/src/mongo/db/repl/shard_merge_recipient_op_observer.cpp index 3c6f8180ef16d..ea19b4a5f4d24 100644 --- a/src/mongo/db/repl/shard_merge_recipient_op_observer.cpp +++ b/src/mongo/db/repl/shard_merge_recipient_op_observer.cpp @@ -30,17 +30,36 @@ #include "mongo/db/repl/shard_merge_recipient_op_observer.h" +#include #include - +#include +#include +#include + +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" -#include "mongo/db/catalog/drop_database.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/multitenancy_gen.h" -#include "mongo/db/repl/shard_merge_recipient_service.h" #include "mongo/db/repl/tenant_file_importer_service.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" +#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_decoration.h" #include "mongo/db/repl/tenant_migration_recipient_access_blocker.h" @@ -48,7 +67,18 @@ #include "mongo/db/repl/tenant_migration_state_machine_gen.h" #include "mongo/db/repl/tenant_migration_util.h" #include "mongo/db/serverless/serverless_operation_lock_registry.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -97,9 +127,7 @@ void deleteTenantDataWhenMergeAborts(OperationContext* opCtx, IndexBuildsCoordinator::get(opCtx)->assertNoBgOpInProgForDb(db->name()); auto catalog = CollectionCatalog::get(opCtx); - for (auto collIt = catalog->begin(opCtx, db->name()); collIt != catalog->end(opCtx); - ++collIt) { - auto collection = *collIt; + for (auto&& collection : catalog->range(db->name())) { if (!collection) { break; } @@ -160,10 +188,13 @@ void onShardMergeRecipientsNssInsert(OperationContext* opCtx, migrationId); }); - auto mtab = std::make_shared( - opCtx->getServiceContext(), migrationId); - TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) - .add(recipientStateDoc.getTenantIds(), mtab); + auto& registry = + TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()); + for (const auto& tenantId : recipientStateDoc.getTenantIds()) { + registry.add(tenantId, + std::make_shared( + opCtx->getServiceContext(), migrationId)); + } opCtx->recoveryUnit()->onRollback([migrationId](OperationContext* opCtx) { TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) .removeAccessBlockersForMigration( @@ -240,11 +271,15 @@ void onTransitioningToConsistent(OperationContext* opCtx, assertImportDoneMarkerLocalCollectionExists(opCtx, recipientStateDoc.getId()); if (recipientStateDoc.getRejectReadsBeforeTimestamp()) { opCtx->recoveryUnit()->onCommit([recipientStateDoc](OperationContext* opCtx, auto _) { - auto mtab = tenant_migration_access_blocker::getRecipientAccessBlockerForMigration( - opCtx->getServiceContext(), recipientStateDoc.getId()); - invariant(mtab); - mtab->startRejectingReadsBefore( - recipientStateDoc.getRejectReadsBeforeTimestamp().get()); + auto mtabVector = + TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .getRecipientAccessBlockersForMigration(recipientStateDoc.getId()); + invariant(!mtabVector.empty()); + for (auto& mtab : mtabVector) { + invariant(mtab); + mtab->startRejectingReadsBefore( + recipientStateDoc.getRejectReadsBeforeTimestamp().get()); + } }); } } @@ -262,12 +297,15 @@ void onTransitioningToCommitted(OperationContext* opCtx, if (markedGCAfterMigrationStart) { opCtx->recoveryUnit()->onCommit([migrationId](OperationContext* opCtx, auto _) { - auto mtab = tenant_migration_access_blocker::getRecipientAccessBlockerForMigration( - opCtx->getServiceContext(), migrationId); - invariant(mtab); - // Once the migration is committed and state doc is marked garbage collectable, - // the TTL deletions should be unblocked for the imported donor collections. - mtab->stopBlockingTTL(); + auto mtabVector = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .getRecipientAccessBlockersForMigration(migrationId); + invariant(!mtabVector.empty()); + for (auto& mtab : mtabVector) { + invariant(mtab); + // Once the migration is committed and state doc is marked garbage collectable, + // the TTL deletions should be unblocked for the imported donor collections. + mtab->stopBlockingTTL(); + } ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) .releaseLock(ServerlessOperationLockRegistry::LockType::kMergeRecipient, @@ -353,7 +391,8 @@ void ShardMergeRecipientOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { if (coll->ns() == NamespaceString::kShardMergeRecipientsNamespace) { onShardMergeRecipientsNssInsert(opCtx, first, last); return; @@ -366,7 +405,8 @@ void ShardMergeRecipientOpObserver::onInserts(OperationContext* opCtx, } void ShardMergeRecipientOpObserver::onUpdate(OperationContext* opCtx, - const OplogUpdateEntryArgs& args) { + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (args.coll->ns() != NamespaceString::kShardMergeRecipientsNamespace || tenant_migration_access_blocker::inRecoveryMode(opCtx)) { return; @@ -404,7 +444,9 @@ void ShardMergeRecipientOpObserver::onUpdate(OperationContext* opCtx, void ShardMergeRecipientOpObserver::aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - BSONObj const& doc) { + BSONObj const& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { if (coll->ns() != NamespaceString::kShardMergeRecipientsNamespace || tenant_migration_access_blocker::inRecoveryMode(opCtx)) { return; @@ -433,7 +475,8 @@ void ShardMergeRecipientOpObserver::aboutToDelete(OperationContext* opCtx, void ShardMergeRecipientOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (coll->ns() != NamespaceString::kShardMergeRecipientsNamespace || tenant_migration_access_blocker::inRecoveryMode(opCtx)) { return; @@ -455,15 +498,17 @@ repl::OpTime ShardMergeRecipientOpObserver::onDropCollection(OperationContext* o const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) { + const CollectionDropType dropType, + bool markFromMigrate) { if (collectionName == NamespaceString::kShardMergeRecipientsNamespace && !tenant_migration_access_blocker::inRecoveryMode(opCtx)) { - uassert(ErrorCodes::IllegalOperation, - str::stream() << "Cannot drop " - << NamespaceString::kShardMergeRecipientsNamespace.ns() - << " collection as it is not empty", - !numRecords); + uassert( + ErrorCodes::IllegalOperation, + str::stream() << "Cannot drop " + << NamespaceString::kShardMergeRecipientsNamespace.toStringForErrorMsg() + << " collection as it is not empty", + !numRecords); } return OpTime(); } diff --git a/src/mongo/db/repl/shard_merge_recipient_op_observer.h b/src/mongo/db/repl/shard_merge_recipient_op_observer.h index 57b4cd668edb6..64cdda2f67942 100644 --- a/src/mongo/db/repl/shard_merge_recipient_op_observer.h +++ b/src/mongo/db/repl/shard_merge_recipient_op_observer.h @@ -29,14 +29,27 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/util/uuid.h" namespace mongo::repl { /** * OpObserver for shard merge recipient. */ -class ShardMergeRecipientOpObserver final : public OpObserver { +class ShardMergeRecipientOpObserver final : public OpObserverNoop { ShardMergeRecipientOpObserver(const ShardMergeRecipientOpObserver&) = delete; ShardMergeRecipientOpObserver& operator=(const ShardMergeRecipientOpObserver&) = delete; @@ -44,91 +57,33 @@ class ShardMergeRecipientOpObserver final : public OpObserver { ShardMergeRecipientOpObserver() = default; ~ShardMergeRecipientOpObserver() = default; - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) final {} - - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - void onCreateIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc, - bool fromMigrate) final {} - - void onStartIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onStartIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) final {} + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kConfig, NamespaceFilter::kConfig}; + } void onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) final; - - void onInsertGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final{}; - - void onDeleteGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final {} + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) final; + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) final; + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) final; - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final {} + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; void onCreateCollection(OperationContext* opCtx, const CollectionPtr& coll, @@ -138,116 +93,12 @@ class ShardMergeRecipientOpObserver final : public OpObserver { const OplogSlot& createOpTime, bool fromMigrate) final; - void onCollMod(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& collModCmd, - const CollectionOptions& oldCollOptions, - boost::optional indexInfo) final {} - - void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final {} - - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) final; - - void onDropIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const std::string& indexName, - const BSONObj& indexInfo) final {} - - using OpObserver::onRenameCollection; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final {} - - void onImportCollection(OperationContext* opCtx, - const UUID& importUUID, - const NamespaceString& nss, - long long numRecords, - long long dataSize, - const BSONObj& catalogEntry, - const BSONObj& storageMetadata, - bool isDryRun) final {} - - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final { - return repl::OpTime(); - } - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) final {} - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) final {} - - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) final {} - - void onTransactionStart(OperationContext* opCtx) final {} - - void onUnpreparedTransactionCommit(OperationContext* opCtx, - const TransactionOperations& transactionOperations) final {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept final {} - - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) final { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) final {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) final {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) final {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} - - void onMajorityCommitPointUpdate(ServiceContext* service, - const repl::OpTime& newCommitPoint) final {} - -private: - void _onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final { - } + CollectionDropType dropType, + bool markFromMigrate) final; }; } // namespace mongo::repl diff --git a/src/mongo/db/repl/shard_merge_recipient_op_observer_test.cpp b/src/mongo/db/repl/shard_merge_recipient_op_observer_test.cpp index d7208bba443a7..7f6fad821590f 100644 --- a/src/mongo/db/repl/shard_merge_recipient_op_observer_test.cpp +++ b/src/mongo/db/repl/shard_merge_recipient_op_observer_test.cpp @@ -27,19 +27,36 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/commands/create_gen.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/shard_merge_recipient_op_observer.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_shard_merge_util.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/dbtests/mock/mock_replica_set.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" namespace mongo::repl { @@ -84,8 +101,10 @@ class ShardMergeRecipientOpObserverTest : public ServiceContextMongoDTest { AutoGetCollection collection( opCtx(), NamespaceString::kShardMergeRecipientsNamespace, MODE_IX); if (!collection) - FAIL(str::stream() << "Collection " << NamespaceString::kShardMergeRecipientsNamespace - << " doesn't exist"); + FAIL(str::stream() + << "Collection " + << NamespaceString::kShardMergeRecipientsNamespace.toStringForErrorMsg() + << " doesn't exist"); CollectionUpdateArgs updateArgs{preImageDoc}; updateArgs.updatedDoc = UpdatedDoc; diff --git a/src/mongo/db/repl/shard_merge_recipient_service.cpp b/src/mongo/db/repl/shard_merge_recipient_service.cpp index 7bd286b4b7f70..38812e17585b2 100644 --- a/src/mongo/db/repl/shard_merge_recipient_service.cpp +++ b/src/mongo/db/repl/shard_merge_recipient_service.cpp @@ -28,60 +28,135 @@ */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include +#include +#include #include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/dbclient_base.h" #include "mongo/client/dbclient_connection.h" +#include "mongo/client/internal_auth.h" +#include "mongo/client/remote_command_retry_scheduler.h" #include "mongo/client/replica_set_monitor.h" -#include "mongo/client/replica_set_monitor_manager.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/document_validation.h" +#include "mongo/db/catalog/local_oplog_info.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" -#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/dbmessage.h" +#include "mongo/db/feature_compatibility_version_parser.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/keys_collection_util.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/ops/single_write_result_gen.h" +#include "mongo/db/ops/update_result.h" #include "mongo/db/ops/write_ops_exec.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/cloner_utils.h" #include "mongo/db/repl/data_replicator_external_state.h" +#include "mongo/db/repl/last_vote.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_applier.h" +#include "mongo/db/repl/oplog_buffer.h" #include "mongo/db/repl/oplog_buffer_collection.h" #include "mongo/db/repl/oplog_entry.h" -#include "mongo/db/repl/oplog_interface_local.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/replication_auth.h" +#include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/shard_merge_recipient_service.h" -#include "mongo/db/repl/tenant_migration_access_blocker_util.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/repl/sync_source_selector.h" #include "mongo/db/repl/tenant_migration_decoration.h" #include "mongo/db/repl/tenant_migration_shard_merge_util.h" #include "mongo/db/repl/tenant_migration_statistics.h" +#include "mongo/db/repl/tenant_migration_util.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/server_options.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_import.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/transaction_resources.h" #include "mongo/db/vector_clock_mutable.h" #include "mongo/db/write_concern_options.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/metadata/oplog_query_metadata.h" +#include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/transport/transport_layer.h" #include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/future_util.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -1253,6 +1328,12 @@ SemiFuture ShardMergeRecipientService::Instance::_getStartOpTimesFromDonor void ShardMergeRecipientService::Instance::_processCommittedTransactionEntry(const BSONObj& entry) { auto sessionTxnRecord = SessionTxnRecord::parse(IDLParserContext("SessionTxnRecord"), entry); auto sessionId = sessionTxnRecord.getSessionId(); + uassert( + ErrorCodes::RetryableInternalTransactionNotSupported, + str::stream() << "Shard merge doesn't support retryable internal transaction. SessionId:: " + << sessionId.toBSON(), + !isInternalSessionForRetryableWrite(sessionId)); + auto txnNumber = sessionTxnRecord.getTxnNum(); auto optTxnRetryCounter = sessionTxnRecord.getTxnRetryCounter(); uassert(ErrorCodes::InvalidOptions, @@ -1335,7 +1416,7 @@ void ShardMergeRecipientService::Instance::_processCommittedTransactionEntry(con AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); writeConflictRetry( - opCtx, "writeDonorCommittedTxnEntry", NamespaceString::kRsOplogNamespace.ns(), [&] { + opCtx, "writeDonorCommittedTxnEntry", NamespaceString::kRsOplogNamespace, [&] { WriteUnitOfWork wuow(opCtx); // Write the no-op entry and update 'config.transactions'. @@ -1468,7 +1549,7 @@ ShardMergeRecipientService::Instance::_fetchRetryableWritesOplogBeforeStartOpTim // re-create the collection. auto coordinator = repl::ReplicationCoordinator::get(opCtx.get()); Lock::GlobalLock globalLock(opCtx.get(), MODE_IX); - if (!coordinator->canAcceptWritesForDatabase(opCtx.get(), oplogBufferNS.db())) { + if (!coordinator->canAcceptWritesForDatabase(opCtx.get(), oplogBufferNS.dbName())) { uassertStatusOK( Status(ErrorCodes::NotWritablePrimary, "Recipient node is not primary, cannot clear oplog buffer collection.")); @@ -1573,8 +1654,7 @@ ShardMergeRecipientService::Instance::_fetchRetryableWritesOplogBeforeStartOpTim BSONObj readResult; BSONObj cmd = ClonerUtils::buildMajorityWaitRequest(*operationTime); - _client.get()->runCommand( - DatabaseName(boost::none, "admin"), cmd, readResult, QueryOption_SecondaryOk); + _client.get()->runCommand(DatabaseName::kAdmin, cmd, readResult, QueryOption_SecondaryOk); uassertStatusOKWithContext( getStatusFromCommandResult(readResult), "Failed to wait for retryable writes pre-fetch result majority committed"); @@ -1592,8 +1672,8 @@ void ShardMergeRecipientService::Instance::_startOplogBuffer(OperationContext* o repl::ReplicationStateTransitionLockGuard rstl(opCtx, MODE_IX); auto oplogBufferNS = getOplogBufferNs(getMigrationUUID()); - if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx, - oplogBufferNS.db())) { + if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase( + opCtx, oplogBufferNS.dbName())) { uassertStatusOK( Status(ErrorCodes::NotWritablePrimary, "Recipient node is no longer a primary.")); } @@ -1790,7 +1870,7 @@ ShardMergeRecipientService::Instance::_advanceMajorityCommitTsToBkpCursorCheckpo writeConflictRetry(opCtx, "mergeRecipientWriteNoopToAdvanceStableTimestamp", - NamespaceString::kRsOplogNamespace.ns(), + NamespaceString::kRsOplogNamespace, [&] { if (token.isCanceled()) { return; @@ -2018,12 +2098,19 @@ void ShardMergeRecipientService::Instance::_writeStateDoc( OpType opType, const RegisterChangeCbk& registerChange) { const auto& nss = NamespaceString::kShardMergeRecipientsNamespace; - AutoGetCollection collection(opCtx, nss, MODE_IX); - - uassert( - ErrorCodes::NamespaceNotFound, str::stream() << nss.ns() << " does not exist", collection); - - writeConflictRetry(opCtx, "writeShardMergeRecipientStateDoc", nss.ns(), [&]() { + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << nss.toStringForErrorMsg() << " does not exist", + collection.exists()); + + writeConflictRetry(opCtx, "writeShardMergeRecipientStateDoc", nss, [&]() { WriteUnitOfWork wunit(opCtx); if (registerChange) @@ -2032,7 +2119,7 @@ void ShardMergeRecipientService::Instance::_writeStateDoc( const auto filter = BSON(TenantMigrationRecipientDocument::kIdFieldName << stateDoc.getId()); auto updateResult = Helpers::upsert(opCtx, - nss, + collection, filter, stateDoc.toBSON(), /*fromMigrate=*/false); @@ -2116,12 +2203,12 @@ void ShardMergeRecipientService::Instance::_startOplogApplier() { _tenantOplogApplier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kShardMerge, - boost::none, *startApplyingDonorOpTime, + *cloneFinishedRecipientOpTime, + boost::none, _donorOplogBuffer.get(), **_scopedExecutor, _writerPool.get()); - _tenantOplogApplier->setCloneFinishedRecipientOpTime(*cloneFinishedRecipientOpTime); LOGV2_DEBUG(7339750, 1, @@ -2164,11 +2251,12 @@ void ShardMergeRecipientService::Instance::_fetchAndStoreDonorClusterTimeKeyDocs auto cursor = _client->find(std::move(findRequest), _readPreference); while (cursor->more()) { const auto doc = cursor->nextSafe().getOwned(); - keyDocs.push_back( - tenant_migration_util::makeExternalClusterTimeKeyDoc(_migrationUuid, doc)); + keyDocs.push_back(keys_collection_util::makeExternalClusterTimeKeyDoc( + doc, _migrationUuid, boost::none /* expireAt */)); } - tenant_migration_util::storeExternalClusterTimeKeyDocs(std::move(keyDocs)); + auto opCtx = cc().makeOperationContext(); + keys_collection_util::storeExternalClusterTimeKeyDocs(opCtx.get(), std::move(keyDocs)); } bool ShardMergeRecipientService::Instance::_isCommitOrAbortState(WithLock) const { @@ -2266,15 +2354,23 @@ SemiFuture ShardMergeRecipientService::Instance::_durablyPersistCommitAbor auto opCtx = opCtxHolder.get(); const auto& nss = NamespaceString::kShardMergeRecipientsNamespace; - AutoGetCollection collection(opCtx, nss, MODE_IX); - uassert( - ErrorCodes::NamespaceNotFound, str::stream() << nss.ns() << " does not exist", collection); + const auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << nss.toStringForErrorMsg() << " does not exist", + collection.exists()); - writeConflictRetry(opCtx, "markShardMergeStateDocGarbageCollectable", nss.ns(), [&]() { + writeConflictRetry(opCtx, "markShardMergeStateDocGarbageCollectable", nss, [&]() { WriteUnitOfWork wuow(opCtx); auto oplogSlot = LocalOplogInfo::get(opCtx)->getNextOpTimes(opCtx, 1U)[0]; const auto originalRecordId = - Helpers::findById(opCtx, collection.getCollection(), BSON("_id" << _migrationUuid)); + Helpers::findById(opCtx, collection.getCollectionPtr(), BSON("_id" << _migrationUuid)); auto stateDoc = [&]() { stdx::lock_guard lg(_mutex); @@ -2313,23 +2409,24 @@ SemiFuture ShardMergeRecipientService::Instance::_durablyPersistCommitAbor if (originalRecordId.isNull()) { uassertStatusOK(collection_internal::insertDocument( opCtx, - *collection, + collection.getCollectionPtr(), InsertStatement(kUninitializedStmtId, stateDoc, oplogSlot), nullptr)); } else { - auto preImageDoc = collection->docFor(opCtx, originalRecordId); + auto preImageDoc = collection.getCollectionPtr()->docFor(opCtx, originalRecordId); CollectionUpdateArgs args{preImageDoc.value()}; args.criteria = BSON("_id" << _migrationUuid); args.oplogSlots = {oplogSlot}; args.update = stateDoc; collection_internal::updateDocument(opCtx, - *collection, + collection.getCollectionPtr(), originalRecordId, preImageDoc, stateDoc, collection_internal::kUpdateAllIndexes, + nullptr /* indexesAffected */, nullptr /* OpDebug* */, &args); } @@ -2471,7 +2568,7 @@ SemiFuture ShardMergeRecipientService::Instance::run( "migrationId"_attr = getMigrationUUID(), "status"_attr = status); - // We should only hit here on a stepdown or shudDown errors. + // We should only hit here on a stepdown or shutdown errors. invariant(ErrorCodes::isShutdownError(status) || ErrorCodes::isNotPrimaryError(status)); stdx::lock_guard lk(_mutex); diff --git a/src/mongo/db/repl/shard_merge_recipient_service.h b/src/mongo/db/repl/shard_merge_recipient_service.h index c863e86267819..e442c09c4877a 100644 --- a/src/mongo/db/repl/shard_merge_recipient_service.h +++ b/src/mongo/db/repl/shard_merge_recipient_service.h @@ -29,19 +29,59 @@ #pragma once +#include +#include #include +#include +#include #include - +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_connection.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/client/fetcher.h" +#include "mongo/client/mongo_uri.h" +#include "mongo/client/read_preference.h" #include "mongo/db/commands/tenant_migration_donor_cmds_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/repl/data_replicator_external_state.h" +#include "mongo/db/repl/oplog_buffer_collection.h" #include "mongo/db/repl/oplog_fetcher.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/repl/tenant_migration_pem_payload_gen.h" #include "mongo/db/repl/tenant_migration_shared_data.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" #include "mongo/db/repl/tenant_oplog_applier.h" +#include "mongo/db/serverless/serverless_types_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_options.h" #include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -203,7 +243,6 @@ class ShardMergeRecipientService final : public PrimaryOnlyService { private: friend class ShardMergeRecipientServiceTest; - friend class ShardMergeRecipientServiceShardMergeTest; /** * Only used for testing. Allows setting a custom task executor for backup cursor fetcher. diff --git a/src/mongo/db/repl/shard_merge_recipient_service_test.cpp b/src/mongo/db/repl/shard_merge_recipient_service_test.cpp new file mode 100644 index 0000000000000..96db06a1dfdfe --- /dev/null +++ b/src/mongo/db/repl/shard_merge_recipient_service_test.cpp @@ -0,0 +1,810 @@ +/** + * Copyright (C) 2020-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include // IWYU pragma: keep +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/connpool.h" +#include "mongo/client/replica_set_monitor_protocol_test_util.h" +#include "mongo/client/sdam/mock_topology_manager.h" +#include "mongo/client/streamable_replica_set_monitor_for_testing.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/database_name.h" +#include "mongo/db/db_raii.h" +#include "mongo/db/dbhelpers.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_impl.h" +#include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/op_observer/oplog_writer_impl.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/oplog_fetcher_mock.h" +#include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/repl/primary_only_service_op_observer.h" +#include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/shard_merge_recipient_op_observer.h" +#include "mongo/db/repl/shard_merge_recipient_service.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" +#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" +#include "mongo/db/repl/tenant_migration_state_machine_gen.h" +#include "mongo/db/repl/tenant_migration_util.h" +#include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/dbtests/mock/mock_conn_registry.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" +#include "mongo/dbtests/mock/mock_replica_set.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/thread_pool_mock.h" +#include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/log_test.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/clock_source_mock.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/net/ssl_util.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest + + +namespace mongo { +namespace repl { + +namespace { +constexpr std::int32_t stopFailPointErrorCode = 4880402; +const Timestamp kDefaultStartMigrationTimestamp(1, 1); + +OplogEntry makeOplogEntry(OpTime opTime, + OpTypeEnum opType, + NamespaceString nss, + const boost::optional& uuid, + BSONObj o, + boost::optional o2) { + return {DurableOplogEntry(opTime, // optime + opType, // opType + nss, // namespace + uuid, // uuid + boost::none, // fromMigrate + OplogEntry::kOplogVersion, // version + o, // o + o2, // o2 + {}, // sessionInfo + boost::none, // upsert + Date_t(), // wall clock time + {}, // statement ids + boost::none, // optime of previous write within same transaction + boost::none, // pre-image optime + boost::none, // post-image optime + boost::none, // ShardId of resharding recipient + boost::none, // _id + boost::none)}; // needsRetryImage +} + +} // namespace + +class ShardMergeRecipientServiceTest : public ServiceContextMongoDTest { +public: + class stopFailPointEnableBlock : public FailPointEnableBlock { + public: + explicit stopFailPointEnableBlock(StringData failPointName, + std::int32_t error = stopFailPointErrorCode) + : FailPointEnableBlock(failPointName, + BSON("action" + << "stop" + << "stopErrorCode" << error)) {} + }; + + void setUp() override { + ServiceContextMongoDTest::setUp(); + auto serviceContext = getServiceContext(); + + // Fake replSet just for creating consistent URI for monitor + MockReplicaSet replSet("donorSet", 1, true /* hasPrimary */, true /* dollarPrefixHosts */); + _rsmMonitor.setup(replSet.getURI()); + + ConnectionString::setConnectionHook(mongo::MockConnRegistry::get()->getConnStrHook()); + + WaitForMajorityService::get(serviceContext).startup(serviceContext); + + // Automatically mark the state doc garbage collectable after data sync completion. + globalFailPointRegistry() + .find("autoRecipientForgetMigrationAbort") + ->setMode(FailPoint::alwaysOn, + 0, + BSON("state" + << "aborted")); + + { + auto opCtx = cc().makeOperationContext(); + auto replCoord = std::make_unique( + serviceContext, createServerlessReplSettings()); + ReplicationCoordinator::set(serviceContext, std::move(replCoord)); + + repl::createOplog(opCtx.get()); + { + Lock::GlobalWrite lk(opCtx.get()); + OldClientContext ctx(opCtx.get(), NamespaceString::kRsOplogNamespace); + tenant_migration_util::createOplogViewForTenantMigrations(opCtx.get(), ctx.db()); + } + + // Need real (non-mock) storage for the oplog buffer. + StorageInterface::set(serviceContext, std::make_unique()); + + // The DropPendingCollectionReaper is required to drop the oplog buffer collection. + repl::DropPendingCollectionReaper::set( + serviceContext, + std::make_unique( + StorageInterface::get(serviceContext))); + + // Set up OpObserver so that repl::logOp() will store the oplog entry's optime in + // ReplClientInfo. + OpObserverRegistry* opObserverRegistry = + dynamic_cast(serviceContext->getOpObserver()); + opObserverRegistry->addObserver( + std::make_unique(std::make_unique())); + opObserverRegistry->addObserver( + std::make_unique(serviceContext)); + + // Add OpObserver needed by subclasses. + addOpObserver(opObserverRegistry); + + _registry = repl::PrimaryOnlyServiceRegistry::get(getServiceContext()); + std::unique_ptr service = + std::make_unique(getServiceContext()); + _registry->registerService(std::move(service)); + _registry->onStartup(opCtx.get()); + } + stepUp(); + + _service = _registry->lookupServiceByName( + ShardMergeRecipientService::kShardMergeRecipientServiceName); + ASSERT(_service); + + // MockReplicaSet uses custom connection string which does not support auth. + auto authFp = globalFailPointRegistry().find("skipTenantMigrationRecipientAuth"); + authFp->setMode(FailPoint::alwaysOn); + + // Set the sslMode to allowSSL to avoid validation error. + sslGlobalParams.sslMode.store(SSLParams::SSLMode_allowSSL); + // Skipped unless tested explicitly, as we will not receive an FCV document from the donor + // in these unittests without (unsightly) intervention. + auto compFp = globalFailPointRegistry().find("skipComparingRecipientAndDonorFCV"); + compFp->setMode(FailPoint::alwaysOn); + + // Skip fetching retryable writes, as we will test this logic entirely in integration + // tests. + auto fetchRetryableWritesFp = + globalFailPointRegistry().find("skipFetchingRetryableWritesEntriesBeforeStartOpTime"); + fetchRetryableWritesFp->setMode(FailPoint::alwaysOn); + + // Skip fetching committed transactions, as we will test this logic entirely in integration + // tests. + auto fetchCommittedTransactionsFp = + globalFailPointRegistry().find("skipFetchingCommittedTransactions"); + fetchCommittedTransactionsFp->setMode(FailPoint::alwaysOn); + + // setup mock networking that will be use to mock the backup cursor traffic. + auto net = std::make_unique(); + _net = net.get(); + + executor::ThreadPoolMock::Options dbThreadPoolOptions; + dbThreadPoolOptions.onCreateThread = []() { + Client::initThread("FetchMockTaskExecutor"); + }; + + auto pool = std::make_unique(_net, 1, dbThreadPoolOptions); + _threadpoolTaskExecutor = + std::make_shared(std::move(pool), std::move(net)); + _threadpoolTaskExecutor->startup(); + } + + void tearDown() override { + _threadpoolTaskExecutor->shutdown(); + _threadpoolTaskExecutor->join(); + + auto authFp = globalFailPointRegistry().find("skipTenantMigrationRecipientAuth"); + authFp->setMode(FailPoint::off); + + // Unset the sslMode. + sslGlobalParams.sslMode.store(SSLParams::SSLMode_disabled); + + WaitForMajorityService::get(getServiceContext()).shutDown(); + + _registry->onShutdown(); + _service = nullptr; + + StorageInterface::set(getServiceContext(), {}); + + // Clearing the connection pool is necessary when doing tests which use the + // ReplicaSetMonitor. See src/mongo/dbtests/mock/mock_replica_set.h for details. + ScopedDbConnection::clearPool(); + ReplicaSetMonitorProtocolTestUtil::resetRSMProtocol(); + ServiceContextMongoDTest::tearDown(); + } + + void stepDown() { + ASSERT_OK(ReplicationCoordinator::get(getServiceContext()) + ->setFollowerMode(MemberState::RS_SECONDARY)); + _registry->onStepDown(); + } + + void stepUp() { + auto opCtx = cc().makeOperationContext(); + auto replCoord = ReplicationCoordinator::get(getServiceContext()); + + // Advance term + _term++; + + ASSERT_OK(replCoord->setFollowerMode(MemberState::RS_PRIMARY)); + ASSERT_OK(replCoord->updateTerm(opCtx.get(), _term)); + replCoord->setMyLastAppliedOpTimeAndWallTime( + OpTimeAndWallTime(OpTime(Timestamp(1, 1), _term), Date_t())); + + _registry->onStepUpComplete(opCtx.get(), _term); + } + +protected: + ShardMergeRecipientServiceTest() : ServiceContextMongoDTest(Options{}.useMockClock(true)) {} + + PrimaryOnlyServiceRegistry* _registry; + PrimaryOnlyService* _service; + long long _term = 0; + + bool _collCreated = false; + size_t _numSecondaryIndexesCreated{0}; + size_t _numDocsInserted{0}; + + const TenantId _tenantA{OID::gen()}; + const TenantId _tenantB{OID::gen()}; + const std::vector _tenants{_tenantA, _tenantB}; + + const TenantMigrationPEMPayload kRecipientPEMPayload = [&] { + std::ifstream infile("jstests/libs/client.pem"); + std::string buf((std::istreambuf_iterator(infile)), std::istreambuf_iterator()); + + auto swCertificateBlob = + ssl_util::findPEMBlob(buf, "CERTIFICATE"_sd, 0 /* position */, false /* allowEmpty */); + ASSERT_TRUE(swCertificateBlob.isOK()); + + auto swPrivateKeyBlob = + ssl_util::findPEMBlob(buf, "PRIVATE KEY"_sd, 0 /* position */, false /* allowEmpty */); + ASSERT_TRUE(swPrivateKeyBlob.isOK()); + + return TenantMigrationPEMPayload{swCertificateBlob.getValue().toString(), + swPrivateKeyBlob.getValue().toString()}; + }(); + + void checkStateDocPersisted(OperationContext* opCtx, + const ShardMergeRecipientService::Instance* instance) { + auto memoryStateDoc = getStateDoc(instance); + + // Read the most up to date data. + ReadSourceScope readSourceScope(opCtx, RecoveryUnit::ReadSource::kNoTimestamp); + AutoGetCollectionForRead collection(opCtx, NamespaceString::kShardMergeRecipientsNamespace); + ASSERT(collection); + + BSONObj result; + auto foundDoc = Helpers::findOne( + opCtx, collection.getCollection(), BSON("_id" << memoryStateDoc.getId()), result); + ASSERT(foundDoc); + + auto persistedStateDoc = + ShardMergeRecipientDocument::parse(IDLParserContext("recipientStateDoc"), result); + + ASSERT_BSONOBJ_EQ(memoryStateDoc.toBSON(), persistedStateDoc.toBSON()); + } + void insertToNodes(MockReplicaSet* replSet, + const NamespaceString& nss, + BSONObj obj, + const std::vector& hosts) { + for (const auto& host : hosts) { + replSet->getNode(host.toString())->insert(nss, obj); + } + } + + void clearCollection(MockReplicaSet* replSet, + const NamespaceString& nss, + const std::vector& hosts) { + for (const auto& host : hosts) { + replSet->getNode(host.toString())->remove(nss, BSONObj{} /*filter*/); + } + } + + void insertTopOfOplog(MockReplicaSet* replSet, + const OpTime& topOfOplogOpTime, + const std::vector hosts = {}) { + const auto targetHosts = hosts.empty() ? replSet->getHosts() : hosts; + // The MockRemoteDBService does not actually implement the database, so to make our + // find work correctly we must make sure there's only one document to find. + clearCollection(replSet, NamespaceString::kRsOplogNamespace, targetHosts); + insertToNodes(replSet, + NamespaceString::kRsOplogNamespace, + makeOplogEntry(topOfOplogOpTime, + OpTypeEnum::kNoop, + {} /* namespace */, + boost::none /* uuid */, + BSONObj() /* o */, + boost::none /* o2 */) + .getEntry() + .toBSON(), + targetHosts); + } + + // Accessors to class private members + DBClientConnection* getClient(const ShardMergeRecipientService::Instance* instance) const { + return instance->_client.get(); + } + + const ShardMergeRecipientDocument& getStateDoc( + const ShardMergeRecipientService::Instance* instance) const { + return instance->_stateDoc; + } + + sdam::MockTopologyManager* getTopologyManager() { + return _rsmMonitor.getTopologyManager(); + } + + ClockSource* clock() { + return &_clkSource; + } + + executor::NetworkInterfaceMock* getNet() { + return _net; + } + + executor::NetworkInterfaceMock* _net = nullptr; + std::shared_ptr _threadpoolTaskExecutor; + + void setInstanceBackupCursorFetcherExecutor( + std::shared_ptr instance) { + instance->setBackupCursorFetcherExecutor_forTest(_threadpoolTaskExecutor); + } + +private: + virtual void addOpObserver(OpObserverRegistry* opObserverRegistry){}; + + ClockSourceMock _clkSource; + + unittest::MinimumLoggedSeverityGuard _replicationSeverityGuard{ + logv2::LogComponent::kReplication, logv2::LogSeverity::Debug(1)}; + unittest::MinimumLoggedSeverityGuard _tenantMigrationSeverityGuard{ + logv2::LogComponent::kTenantMigration, logv2::LogSeverity::Debug(1)}; + + StreamableReplicaSetMonitorForTesting _rsmMonitor; + RAIIServerParameterControllerForTest _findHostTimeout{"defaultFindReplicaSetHostTimeoutMS", 10}; +}; + +#ifdef MONGO_CONFIG_SSL + +void waitForReadyRequest(executor::NetworkInterfaceMock* net) { + while (!net->hasReadyRequests()) { + net->advanceTime(net->now() + Milliseconds{1}); + } +} + +BSONObj createEmptyCursorResponse(const NamespaceString& nss, CursorId backupCursorId) { + return BSON("cursor" << BSON("nextBatch" << BSONArray() << "id" << backupCursorId << "ns" + << nss.ns_forTest()) + << "ok" << 1.0); +} + +BSONObj createBackupCursorResponse(const Timestamp& checkpointTimestamp, + const NamespaceString& nss, + CursorId backupCursorId) { + const UUID backupId = + UUID(uassertStatusOK(UUID::parse(("2b068e03-5961-4d8e-b47a-d1c8cbd4b835")))); + StringData remoteDbPath = "/data/db/job0/mongorunner/test-1"; + BSONObjBuilder cursor; + BSONArrayBuilder batch(cursor.subarrayStart("firstBatch")); + auto metaData = BSON("backupId" << backupId << "checkpointTimestamp" << checkpointTimestamp + << "dbpath" << remoteDbPath); + batch.append(BSON("metadata" << metaData)); + + batch.done(); + cursor.append("id", backupCursorId); + cursor.append("ns", nss.ns_forTest()); + BSONObjBuilder backupCursorReply; + backupCursorReply.append("cursor", cursor.obj()); + backupCursorReply.append("ok", 1.0); + return backupCursorReply.obj(); +} + +void sendReponseToExpectedRequest(const BSONObj& backupCursorResponse, + const std::string& expectedRequestFieldName, + executor::NetworkInterfaceMock* net) { + auto noi = net->getNextReadyRequest(); + auto request = noi->getRequest(); + ASSERT_EQUALS(expectedRequestFieldName, request.cmdObj.firstElementFieldNameStringData()); + net->scheduleSuccessfulResponse( + noi, executor::RemoteCommandResponse(backupCursorResponse, Milliseconds())); + net->runReadyNetworkOperations(); +} + +BSONObj createServerAggregateReply() { + return CursorResponse(NamespaceString::makeCollectionlessAggregateNSS(DatabaseName::kAdmin), + 0 /* cursorId */, + {BSON("byteOffset" << 0 << "endOfFile" << true << "data" + << BSONBinData(0, 0, BinDataGeneral))}) + .toBSONAsInitialResponse(); +} + +/** + * This class adds the TenantMigrationRecipientOpObserver to the main test fixture class. The + * OpObserver uses the TenantFileImporter, which is a ReplicaSetAwareService that creates its own + * worker thread when a state document is inserted. We need to ensure the ReplicaSetAwareService + * shutdown procedure is executed in order to properly clean up and join the worker thread. + */ +class ShardMergeRecipientServiceTestInsert : public ShardMergeRecipientServiceTest { +private: + void addOpObserver(OpObserverRegistry* opObserverRegistry) { + opObserverRegistry->addObserver(std::make_unique()); + } + + void tearDown() override { + ShardMergeRecipientServiceTest::tearDown(); + ReplicaSetAwareServiceRegistry::get(getServiceContext()).onShutdown(); + } +}; + +TEST_F(ShardMergeRecipientServiceTestInsert, TestBlockersAreInsertedWhenInsertingStateDocument) { + stopFailPointEnableBlock fp("fpBeforeFetchingDonorClusterTimeKeys"); + const UUID migrationUUID = UUID::gen(); + + MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /* dollarPrefixHosts */); + getTopologyManager()->setTopologyDescription(replSet.getTopologyDescription(clock())); + insertTopOfOplog(&replSet, OpTime(Timestamp(5, 1), 1)); + + // Mock the aggregate response from the donor. + MockRemoteDBServer* const _donorServer = + mongo::MockConnRegistry::get()->getMockRemoteDBServer(replSet.getPrimary()); + _donorServer->setCommandReply("aggregate", createServerAggregateReply()); + + ShardMergeRecipientDocument initialStateDocument( + migrationUUID, + replSet.getConnectionString(), + _tenants, + kDefaultStartMigrationTimestamp, + ReadPreferenceSetting(ReadPreference::PrimaryOnly)); + initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); + + auto opCtx = makeOperationContext(); + std::shared_ptr instance; + { + auto fp = globalFailPointRegistry().find( + "fpAfterPersistingTenantMigrationRecipientInstanceStateDoc"); + auto initialTimesEntered = fp->setMode(FailPoint::alwaysOn, + 0, + BSON("action" + << "hang")); + instance = ShardMergeRecipientService::Instance::getOrCreate( + opCtx.get(), _service, initialStateDocument.toBSON()); + ASSERT(instance.get()); + + fp->waitForTimesEntered(initialTimesEntered + 1); + + // Test that access blocker exists. + for (const auto& tenantId : _tenants) { + auto blocker = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .getTenantMigrationAccessBlockerForTenantId( + tenantId, TenantMigrationAccessBlocker::BlockerType::kRecipient); + ASSERT(!!blocker); + } + fp->setMode(FailPoint::off); + } + + ASSERT_EQ(stopFailPointErrorCode, instance->getMigrationCompletionFuture().getNoThrow().code()); + ASSERT_OK(instance->getForgetMigrationDurableFuture().getNoThrow()); +} + +TEST_F(ShardMergeRecipientServiceTest, OpenBackupCursorSuccessfully) { + stopFailPointEnableBlock fp("fpBeforeAdvancingStableTimestamp"); + const UUID migrationUUID = UUID::gen(); + const CursorId backupCursorId = 12345; + const NamespaceString aggregateNs = + NamespaceString::createNamespaceString_forTest("admin.$cmd.aggregate"); + + auto taskFp = globalFailPointRegistry().find("hangBeforeTaskCompletion"); + auto initialTimesEntered = taskFp->setMode(FailPoint::alwaysOn); + + MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /* dollarPrefixHosts */); + getTopologyManager()->setTopologyDescription(replSet.getTopologyDescription(clock())); + insertTopOfOplog(&replSet, OpTime(Timestamp(5, 1), 1)); + + // Mock the aggregate response from the donor. + MockRemoteDBServer* const _donorServer = + mongo::MockConnRegistry::get()->getMockRemoteDBServer(replSet.getPrimary()); + _donorServer->setCommandReply("aggregate", createServerAggregateReply()); + + ShardMergeRecipientDocument initialStateDocument( + migrationUUID, + replSet.getConnectionString(), + _tenants, + kDefaultStartMigrationTimestamp, + ReadPreferenceSetting(ReadPreference::PrimaryOnly)); + initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); + + auto opCtx = makeOperationContext(); + std::shared_ptr instance; + { + auto fp = globalFailPointRegistry().find("pauseBeforeRunTenantMigrationRecipientInstance"); + auto initialTimesEntered = fp->setMode(FailPoint::alwaysOn); + instance = ShardMergeRecipientService::Instance::getOrCreate( + opCtx.get(), _service, initialStateDocument.toBSON()); + ASSERT(instance.get()); + fp->waitForTimesEntered(initialTimesEntered + 1); + setInstanceBackupCursorFetcherExecutor(instance); + instance->setCreateOplogFetcherFn_forTest(std::make_unique()); + fp->setMode(FailPoint::off); + } + + { + auto net = getNet(); + executor::NetworkInterfaceMock::InNetworkGuard guard(net); + waitForReadyRequest(net); + // Mocking the aggregate command network response of the backup cursor in order to have + // data to parse. + sendReponseToExpectedRequest(createBackupCursorResponse(kDefaultStartMigrationTimestamp, + aggregateNs, + backupCursorId), + "aggregate", + net); + sendReponseToExpectedRequest( + createEmptyCursorResponse(aggregateNs, backupCursorId), "getMore", net); + sendReponseToExpectedRequest( + createEmptyCursorResponse(aggregateNs, backupCursorId), "getMore", net); + } + + taskFp->waitForTimesEntered(initialTimesEntered + 1); + + checkStateDocPersisted(opCtx.get(), instance.get()); + + taskFp->setMode(FailPoint::off); + + ASSERT_EQ(stopFailPointErrorCode, instance->getMigrationCompletionFuture().getNoThrow().code()); + ASSERT_OK(instance->getForgetMigrationDurableFuture().getNoThrow()); +} + +TEST_F(ShardMergeRecipientServiceTest, OpenBackupCursorAndRetriesDueToTs) { + stopFailPointEnableBlock fp("fpBeforeAdvancingStableTimestamp"); + const UUID migrationUUID = UUID::gen(); + const CursorId backupCursorId = 12345; + const NamespaceString aggregateNs = + NamespaceString::createNamespaceString_forTest("admin.$cmd.aggregate"); + + auto taskFp = globalFailPointRegistry().find("hangBeforeTaskCompletion"); + auto initialTimesEntered = taskFp->setMode(FailPoint::alwaysOn); + + MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /* dollarPrefixHosts */); + getTopologyManager()->setTopologyDescription(replSet.getTopologyDescription(clock())); + insertTopOfOplog(&replSet, OpTime(Timestamp(5, 1), 1)); + + // Mock the aggregate response from the donor. + MockRemoteDBServer* const _donorServer = + mongo::MockConnRegistry::get()->getMockRemoteDBServer(replSet.getPrimary()); + _donorServer->setCommandReply("aggregate", createServerAggregateReply()); + + ShardMergeRecipientDocument initialStateDocument( + migrationUUID, + replSet.getConnectionString(), + _tenants, + kDefaultStartMigrationTimestamp, + ReadPreferenceSetting(ReadPreference::PrimaryOnly)); + initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); + + auto opCtx = makeOperationContext(); + std::shared_ptr instance; + { + auto fp = globalFailPointRegistry().find("pauseBeforeRunTenantMigrationRecipientInstance"); + auto initialTimesEntered = fp->setMode(FailPoint::alwaysOn); + instance = ShardMergeRecipientService::Instance::getOrCreate( + opCtx.get(), _service, initialStateDocument.toBSON()); + ASSERT(instance.get()); + fp->waitForTimesEntered(initialTimesEntered + 1); + setInstanceBackupCursorFetcherExecutor(instance); + instance->setCreateOplogFetcherFn_forTest(std::make_unique()); + fp->setMode(FailPoint::off); + } + + { + auto net = getNet(); + executor::NetworkInterfaceMock::InNetworkGuard guard(net); + waitForReadyRequest(net); + + // Mocking the aggregate command network response of the backup cursor in order to have data + // to parse. In this case we pass a timestamp that is inferior to the + // startMigrationTimestamp which will cause a retry. We then provide a correct timestamp in + // the next response and succeed. + sendReponseToExpectedRequest( + createBackupCursorResponse(Timestamp(0, 0), aggregateNs, backupCursorId), + "aggregate", + net); + sendReponseToExpectedRequest(createBackupCursorResponse(kDefaultStartMigrationTimestamp, + aggregateNs, + backupCursorId), + "killCursors", + net); + sendReponseToExpectedRequest( + createEmptyCursorResponse(aggregateNs, backupCursorId), "killCursors", net); + sendReponseToExpectedRequest(createBackupCursorResponse(kDefaultStartMigrationTimestamp, + aggregateNs, + backupCursorId), + "aggregate", + net); + sendReponseToExpectedRequest( + createEmptyCursorResponse(aggregateNs, backupCursorId), "getMore", net); + sendReponseToExpectedRequest( + createEmptyCursorResponse(aggregateNs, backupCursorId), "getMore", net); + } + + taskFp->waitForTimesEntered(initialTimesEntered + 1); + + checkStateDocPersisted(opCtx.get(), instance.get()); + + taskFp->setMode(FailPoint::off); + + ASSERT_EQ(stopFailPointErrorCode, instance->getMigrationCompletionFuture().getNoThrow().code()); + ASSERT_OK(instance->getForgetMigrationDurableFuture().getNoThrow()); +} + +TEST_F(ShardMergeRecipientServiceTest, TestGarbageCollectionStarted) { + const UUID migrationUUID = UUID::gen(); + + MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /* dollarPrefixHosts */); + + auto fp = globalFailPointRegistry().find("pauseTenantMigrationRecipientBeforeDeletingStateDoc"); + auto initialTimesEntered = fp->setMode(FailPoint::alwaysOn); + + ShardMergeRecipientDocument initialStateDocument( + migrationUUID, + replSet.getConnectionString(), + _tenants, + kDefaultStartMigrationTimestamp, + ReadPreferenceSetting(ReadPreference::PrimaryOnly)); + initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); + initialStateDocument.setState(ShardMergeRecipientStateEnum::kStarted); + + // Set startGarbageCollect to true to simulate the case where 'recipientForgetMigration' is + // received before 'recipientSyncData'. + initialStateDocument.setStartGarbageCollect(true); + + auto opCtx = makeOperationContext(); + auto instance = ShardMergeRecipientService::Instance::getOrCreate( + opCtx.get(), _service, initialStateDocument.toBSON()); + ASSERT(instance.get()); + + ASSERT_EQ(ErrorCodes::TenantMigrationForgotten, + instance->getMigrationCompletionFuture().getNoThrow().code()); + + fp->waitForTimesEntered(initialTimesEntered + 1); + checkStateDocPersisted(opCtx.get(), instance.get()); + + auto stateDoc = getStateDoc(instance.get()); + ASSERT_EQ(stateDoc.getState(), ShardMergeRecipientStateEnum::kAborted); + + fp->setMode(FailPoint::off); + + ASSERT_OK(instance->getForgetMigrationDurableFuture().getNoThrow().code()); +} + +TEST_F(ShardMergeRecipientServiceTest, TestForgetMigrationAborted) { + const UUID migrationUUID = UUID::gen(); + + auto deletionFp = + globalFailPointRegistry().find("pauseTenantMigrationRecipientBeforeDeletingStateDoc"); + auto deletionFpTimesEntered = deletionFp->setMode(FailPoint::alwaysOn); + + auto fp = + globalFailPointRegistry().find("fpAfterPersistingTenantMigrationRecipientInstanceStateDoc"); + auto initialTimesEntered = fp->setMode(FailPoint::alwaysOn, + 0, + BSON("action" + << "hang")); + + MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /* dollarPrefixHosts */); + getTopologyManager()->setTopologyDescription(replSet.getTopologyDescription(clock())); + insertTopOfOplog(&replSet, OpTime(Timestamp(5, 1), 1)); + + + ShardMergeRecipientDocument initialStateDocument( + migrationUUID, + replSet.getConnectionString(), + _tenants, + kDefaultStartMigrationTimestamp, + ReadPreferenceSetting(ReadPreference::PrimaryOnly)); + initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); + + auto opCtx = makeOperationContext(); + auto instance = ShardMergeRecipientService::Instance::getOrCreate( + opCtx.get(), _service, initialStateDocument.toBSON()); + ASSERT(instance.get()); + fp->waitForTimesEntered(initialTimesEntered + 1); + + instance->onReceiveRecipientForgetMigration(opCtx.get(), MigrationDecisionEnum::kAborted); + + fp->setMode(FailPoint::off); + + ASSERT_EQ(ErrorCodes::TenantMigrationForgotten, + instance->getMigrationCompletionFuture().getNoThrow().code()); + + deletionFp->waitForTimesEntered(deletionFpTimesEntered + 1); + checkStateDocPersisted(opCtx.get(), instance.get()); + auto stateDoc = getStateDoc(instance.get()); + ASSERT_EQ(stateDoc.getState(), ShardMergeRecipientStateEnum::kAborted); + + deletionFp->setMode(FailPoint::off); + + ASSERT_OK(instance->getForgetMigrationDurableFuture().getNoThrow().code()); +} + +#endif +} // namespace repl +} // namespace mongo diff --git a/src/mongo/db/repl/speculative_majority_read_info.cpp b/src/mongo/db/repl/speculative_majority_read_info.cpp index 9664c110ad100..f33244ab41108 100644 --- a/src/mongo/db/repl/speculative_majority_read_info.cpp +++ b/src/mongo/db/repl/speculative_majority_read_info.cpp @@ -28,12 +28,16 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include -#include "mongo/db/client.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/speculative_majority_read_info.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication diff --git a/src/mongo/db/repl/speculative_majority_read_info.h b/src/mongo/db/repl/speculative_majority_read_info.h index 9add9ea1a0893..01cde5f518e7a 100644 --- a/src/mongo/db/repl/speculative_majority_read_info.h +++ b/src/mongo/db/repl/speculative_majority_read_info.h @@ -29,9 +29,12 @@ #pragma once +#include #include +#include #include "mongo/bson/timestamp.h" +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/client.h" namespace mongo { diff --git a/src/mongo/db/repl/speculative_majority_read_info_test.cpp b/src/mongo/db/repl/speculative_majority_read_info_test.cpp index 8ef9b62dd4e31..a74ed7dacd70a 100644 --- a/src/mongo/db/repl/speculative_majority_read_info_test.cpp +++ b/src/mongo/db/repl/speculative_majority_read_info_test.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/repl/speculative_majority_read_info.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/split_horizon.cpp b/src/mongo/db/repl/split_horizon.cpp index 440d9ddcf27fa..f76de73211906 100644 --- a/src/mongo/db/repl/split_horizon.cpp +++ b/src/mongo/db/repl/split_horizon.cpp @@ -27,27 +27,31 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - #include "mongo/db/repl/split_horizon.h" +#include +#include +#include #include +#include + +#include +#include +#include +#include -#include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/client.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication - -using namespace std::literals::string_literals; - -using std::begin; -using std::end; - namespace mongo { namespace repl { namespace { + +using namespace std::literals::string_literals; + const auto getSplitHorizonParameters = Client::declareDecoration(); using AllMappings = SplitHorizon::AllMappings; @@ -125,8 +129,10 @@ SplitHorizon::ForwardMapping computeForwardMappings( const auto horizonEntries = [&] { std::vector rv; - std::transform( - begin(*horizonsObject), end(*horizonsObject), inserter(rv, end(rv)), convert); + std::transform(std::begin(*horizonsObject), + std::end(*horizonsObject), + inserter(rv, end(rv)), + convert); return rv; }(); diff --git a/src/mongo/db/repl/split_horizon.h b/src/mongo/db/repl/split_horizon.h index b597cc7dec360..9f3c73b2532c2 100644 --- a/src/mongo/db/repl/split_horizon.h +++ b/src/mongo/db/repl/split_horizon.h @@ -29,16 +29,25 @@ #pragma once +#include +#include +#include +#include +#include #include #include +#include +#include -#include - +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" #include "mongo/db/repl/repl_set_tag.h" +#include "mongo/util/assert_util.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/repl/split_horizon_test.cpp b/src/mongo/db/repl/split_horizon_test.cpp index 486c43f2ade9f..80e1a459f2071 100644 --- a/src/mongo/db/repl/split_horizon_test.cpp +++ b/src/mongo/db/repl/split_horizon_test.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/split_horizon.h" - #include -#include +#include +#include #include +#include +#include -#include "mongo/stdx/utility.h" -#include "mongo/unittest/unittest.h" +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/repl/split_horizon.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" using namespace std::literals::string_literals; diff --git a/src/mongo/db/repl/split_prepare_session_manager.cpp b/src/mongo/db/repl/split_prepare_session_manager.cpp index c3e2bfdca6b37..ab90f488a2d88 100644 --- a/src/mongo/db/repl/split_prepare_session_manager.cpp +++ b/src/mongo/db/repl/split_prepare_session_manager.cpp @@ -29,6 +29,15 @@ #include "mongo/db/repl/split_prepare_session_manager.h" +#include + +#include +#include +#include +#include + +#include "mongo/util/assert_util_core.h" + namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/split_prepare_session_manager.h b/src/mongo/db/repl/split_prepare_session_manager.h index de221410a1966..d970cd1481a74 100644 --- a/src/mongo/db/repl/split_prepare_session_manager.h +++ b/src/mongo/db/repl/split_prepare_session_manager.h @@ -29,9 +29,15 @@ #pragma once +#include +#include +#include +#include + #include "mongo/db/session/internal_session_pool.h" #include "mongo/db/session/logical_session_id.h" -#include +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/platform/mutex.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/split_prepare_session_manager_test.cpp b/src/mongo/db/repl/split_prepare_session_manager_test.cpp index 0ff1135516b7f..b3b511667ef41 100644 --- a/src/mongo/db/repl/split_prepare_session_manager_test.cpp +++ b/src/mongo/db/repl/split_prepare_session_manager_test.cpp @@ -27,13 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/repl/split_prepare_session_manager.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/db/session/internal_session_pool.h" +#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/storage_interface.cpp b/src/mongo/db/repl/storage_interface.cpp index 093c8890747b3..c18da13556a7f 100644 --- a/src/mongo/db/repl/storage_interface.cpp +++ b/src/mongo/db/repl/storage_interface.cpp @@ -27,13 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/client.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/service_context.h" -#include "mongo/util/str.h" +#include "mongo/util/decorable.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/storage_interface.h b/src/mongo/db/repl/storage_interface.h index d5b8da9e51908..d5d8fb8005250 100644 --- a/src/mongo/db/repl/storage_interface.h +++ b/src/mongo/db/repl/storage_interface.h @@ -31,18 +31,32 @@ #pragma once #include +#include #include +#include #include +#include #include +#include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/index_bounds.h" #include "mongo/db/repl/collection_bulk_loader.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/optime.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/storage_interface_impl.cpp b/src/mongo/db/repl/storage_interface_impl.cpp index 95d17e51a85de..1c34e3d4d7a74 100644 --- a/src/mongo/db/repl/storage_interface_impl.cpp +++ b/src/mongo/db/repl/storage_interface_impl.cpp @@ -29,59 +29,88 @@ #include "mongo/db/repl/storage_interface_impl.h" -#include +#include +#include #include +#include +#include +#include #include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/db/auth/authorization_manager.h" -#include "mongo/db/catalog/coll_mod.h" -#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/collection_catalog_helper.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/change_stream_change_collection_manager.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" #include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/exec/delete_stage.h" #include "mongo/db/exec/update_stage.h" -#include "mongo/db/exec/working_set_common.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/jsobj.h" #include "mongo/db/keypattern.h" -#include "mongo/db/matcher/extensions_callback_real.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/delete_request_gen.h" +#include "mongo/db/ops/parsed_delete.h" #include "mongo/db/ops/parsed_update.h" #include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/get_executor.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/db/record_id.h" #include "mongo/db/record_id_helpers.h" #include "mongo/db/repl/collection_bulk_loader_impl.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/rollback_gen.h" #include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" #include "mongo/db/storage/checkpointer.h" #include "mongo/db/storage/control/journal_flusher.h" #include "mongo/db/storage/control/storage_control.h" #include "mongo/db/storage/oplog_cap_maintainer_thread.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_util.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/mutex.h" #include "mongo/util/assert_util.h" -#include "mongo/util/background.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -127,8 +156,6 @@ StatusWith StorageInterfaceImpl::getRollbackID(OperationContext* opCtx) { } StatusWith StorageInterfaceImpl::initializeRollbackID(OperationContext* opCtx) { - // TODO (SERVER-71443): Fix to be interruptible or document exception. - UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. auto status = createCollection(opCtx, _rollbackIdNss, CollectionOptions()); if (!status.isOK()) { return status; @@ -225,6 +252,12 @@ StorageInterfaceImpl::createCollectionForBulkLoading( auto opCtx = cc().makeOperationContext(); opCtx->setEnforceConstraints(false); + // TODO(SERVER-74656): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + // DocumentValidationSettings::kDisableInternalValidation is currently inert. // But, it's logically ok to disable internal validation as this function gets called // only during initial sync. @@ -234,7 +267,7 @@ StorageInterfaceImpl::createCollectionForBulkLoading( std::unique_ptr loader; // Retry if WCE. - Status status = writeConflictRetry(opCtx.get(), "beginCollectionClone", nss.ns(), [&] { + Status status = writeConflictRetry(opCtx.get(), "beginCollectionClone", nss, [&] { UnreplicatedWritesBlock uwb(opCtx.get()); // Get locks and create the collection. @@ -242,7 +275,8 @@ StorageInterfaceImpl::createCollectionForBulkLoading( AutoGetCollection coll(opCtx.get(), nss, MODE_X); if (coll) { return Status(ErrorCodes::NamespaceExists, - str::stream() << "Collection " << nss.ns() << " already exists."); + str::stream() + << "Collection " << nss.toStringForErrorMsg() << " already exists."); } { // Create the collection. @@ -322,7 +356,7 @@ StatusWith getCollection(const AutoGetCollectionType& auto const auto& collection = autoGetCollection.getCollection(); if (!collection) { return {ErrorCodes::NamespaceNotFound, - str::stream() << "Collection [" << nsOrUUID.toString() << "] not found. " + str::stream() << "Collection [" << nsOrUUID.toStringForErrorMsg() << "] not found. " << message}; } @@ -337,8 +371,7 @@ Status insertDocumentsSingleBatch(OperationContext* opCtx, boost::optional autoOplog; const CollectionPtr* collection; - auto nss = nsOrUUID.nss(); - if (nss && nss->isOplog()) { + if (nsOrUUID.isNamespaceString() && nsOrUUID.nss().isOplog()) { // Simplify locking rules for oplog collection. autoOplog.emplace(opCtx, OplogAccessMode::kWrite); collection = &autoOplog->getCollection(); @@ -395,18 +428,19 @@ Status StorageInterfaceImpl::dropReplicatedDatabases(OperationContext* opCtx) { auto databaseHolder = DatabaseHolder::get(opCtx); auto hasLocalDatabase = false; for (const auto& dbName : dbNames) { - if (dbName.db() == "local") { + if (dbName.isLocalDB()) { hasLocalDatabase = true; continue; } - writeConflictRetry(opCtx, "dropReplicatedDatabases", dbName.toString(), [&] { + writeConflictRetry(opCtx, "dropReplicatedDatabases", NamespaceString(dbName), [&] { if (auto db = databaseHolder->getDb(opCtx, dbName)) { WriteUnitOfWork wuow(opCtx); databaseHolder->dropDb(opCtx, db); wuow.commit(); } else { // This is needed since dropDatabase can't be rolled back. - // This is safe be replaced by "invariant(db);dropDatabase(opCtx, db);" once fixed. + // This is safe be replaced by "invariant(db);dropDatabase(opCtx, db);" once + // fixed. LOGV2(21755, "dropReplicatedDatabases - database disappeared after retrieving list of " "database names but before drop: {dbName}", @@ -439,7 +473,8 @@ StatusWith StorageInterfaceImpl::getOplogMaxSize(OperationContext* opCtx const auto options = oplog->getCollectionOptions(); if (!options.capped) return {ErrorCodes::BadValue, - str::stream() << NamespaceString::kRsOplogNamespace.ns() << " isn't capped"}; + str::stream() << NamespaceString::kRsOplogNamespace.toStringForErrorMsg() + << " isn't capped"}; return options.cappedSize; } @@ -448,35 +483,35 @@ Status StorageInterfaceImpl::createCollection(OperationContext* opCtx, const CollectionOptions& options, const bool createIdIndex, const BSONObj& idIndexSpec) { - return writeConflictRetry(opCtx, "StorageInterfaceImpl::createCollection", nss.ns(), [&] { - AutoGetDb databaseWriteGuard(opCtx, nss.dbName(), MODE_IX); - auto db = databaseWriteGuard.ensureDbExists(opCtx); - invariant(db); - - // Check if there already exist a Collection/view on the given namespace 'nss'. The answer - // may change at any point after this call as we make this call without holding the - // collection lock. But, it is fine as we properly handle while registering the uncommitted - // collection with CollectionCatalog. This check is just here to prevent it from being - // created in the common case. - Status status = mongo::catalog::checkIfNamespaceExists(opCtx, nss); - if (!status.isOK()) { - return status; - } + try { + return writeConflictRetry(opCtx, "StorageInterfaceImpl::createCollection", nss, [&] { + AutoGetDb databaseWriteGuard(opCtx, nss.dbName(), MODE_IX); + auto db = databaseWriteGuard.ensureDbExists(opCtx); + invariant(db); + + // Check if there already exist a Collection/view on the given namespace 'nss'. The + // answer may change at any point after this call as we make this call without holding + // the collection lock. But, it is fine as we properly handle while registering the + // uncommitted collection with CollectionCatalog. This check is just here to prevent it + // from being created in the common case. + Status status = mongo::catalog::checkIfNamespaceExists(opCtx, nss); + if (!status.isOK()) { + return status; + } - Lock::CollectionLock lk(opCtx, nss, MODE_IX); - WriteUnitOfWork wuow(opCtx); - try { + Lock::CollectionLock lk(opCtx, nss, MODE_IX); + WriteUnitOfWork wuow(opCtx); auto coll = db->createCollection(opCtx, nss, options, createIdIndex, idIndexSpec); invariant(coll); - // This commit call can throw if a view already exists while registering the collection. + // This commit call can throw if a view already exists while registering the + // collection. wuow.commit(); - } catch (const AssertionException& ex) { - return ex.toStatus(); - } - - return Status::OK(); - }); + return Status::OK(); + }); + } catch (const DBException& ex) { + return ex.toStatus(); + } } Status StorageInterfaceImpl::createIndexesOnEmptyCollection( @@ -487,20 +522,19 @@ Status StorageInterfaceImpl::createIndexesOnEmptyCollection( return Status::OK(); try { - writeConflictRetry( - opCtx, "StorageInterfaceImpl::createIndexesOnEmptyCollection", nss.ns(), [&] { - AutoGetCollection autoColl( - opCtx, nss, fixLockModeForSystemDotViewsChanges(nss, MODE_X)); - CollectionWriter collection(opCtx, nss); - - WriteUnitOfWork wunit(opCtx); - // Use IndexBuildsCoordinator::createIndexesOnEmptyCollection() rather than - // IndexCatalog::createIndexOnEmptyCollection() as the former generates - // 'createIndexes' oplog entry for replicated writes. - IndexBuildsCoordinator::get(opCtx)->createIndexesOnEmptyCollection( - opCtx, collection, secondaryIndexSpecs, false /* fromMigrate */); - wunit.commit(); - }); + writeConflictRetry(opCtx, "StorageInterfaceImpl::createIndexesOnEmptyCollection", nss, [&] { + AutoGetCollection autoColl( + opCtx, nss, fixLockModeForSystemDotViewsChanges(nss, MODE_X)); + CollectionWriter collection(opCtx, nss); + + WriteUnitOfWork wunit(opCtx); + // Use IndexBuildsCoordinator::createIndexesOnEmptyCollection() rather than + // IndexCatalog::createIndexOnEmptyCollection() as the former generates + // 'createIndexes' oplog entry for replicated writes. + IndexBuildsCoordinator::get(opCtx)->createIndexesOnEmptyCollection( + opCtx, collection, secondaryIndexSpecs, false /* fromMigrate */); + wunit.commit(); + }); } catch (DBException& ex) { return ex.toStatus(); } @@ -509,26 +543,30 @@ Status StorageInterfaceImpl::createIndexesOnEmptyCollection( } Status StorageInterfaceImpl::dropCollection(OperationContext* opCtx, const NamespaceString& nss) { - return writeConflictRetry(opCtx, "StorageInterfaceImpl::dropCollection", nss.ns(), [&] { - AutoGetDb autoDb(opCtx, nss.dbName(), MODE_IX); - Lock::CollectionLock collLock(opCtx, nss, MODE_X); - if (!autoDb.getDb()) { - // Database does not exist - nothing to do. + try { + return writeConflictRetry(opCtx, "StorageInterfaceImpl::dropCollection", nss, [&] { + AutoGetDb autoDb(opCtx, nss.dbName(), MODE_IX); + Lock::CollectionLock collLock(opCtx, nss, MODE_X); + if (!autoDb.getDb()) { + // Database does not exist - nothing to do. + return Status::OK(); + } + WriteUnitOfWork wunit(opCtx); + auto status = autoDb.getDb()->dropCollectionEvenIfSystem(opCtx, nss); + if (!status.isOK()) { + return status; + } + wunit.commit(); return Status::OK(); - } - WriteUnitOfWork wunit(opCtx); - const auto status = autoDb.getDb()->dropCollectionEvenIfSystem(opCtx, nss); - if (!status.isOK()) { - return status; - } - wunit.commit(); - return Status::OK(); - }); + }); + } catch (const DBException& ex) { + return ex.toStatus(); + } } Status StorageInterfaceImpl::truncateCollection(OperationContext* opCtx, const NamespaceString& nss) { - return writeConflictRetry(opCtx, "StorageInterfaceImpl::truncateCollection", nss.ns(), [&] { + return writeConflictRetry(opCtx, "StorageInterfaceImpl::truncateCollection", nss, [&] { AutoGetCollection autoColl(opCtx, nss, MODE_X); auto collectionResult = getCollection(autoColl, nss, "The collection must exist before truncating."); @@ -537,7 +575,7 @@ Status StorageInterfaceImpl::truncateCollection(OperationContext* opCtx, } WriteUnitOfWork wunit(opCtx); - const auto status = autoColl.getWritableCollection(opCtx)->truncate(opCtx); + auto status = autoColl.getWritableCollection(opCtx)->truncate(opCtx); if (!status.isOK()) { return status; } @@ -553,19 +591,21 @@ Status StorageInterfaceImpl::renameCollection(OperationContext* opCtx, if (fromNS.db() != toNS.db()) { return Status(ErrorCodes::InvalidNamespace, str::stream() << "Cannot rename collection between databases. From NS: " - << fromNS.ns() << "; to NS: " << toNS.ns()); + << fromNS.toStringForErrorMsg() + << "; to NS: " << toNS.toStringForErrorMsg()); } - return writeConflictRetry(opCtx, "StorageInterfaceImpl::renameCollection", fromNS.ns(), [&] { + return writeConflictRetry(opCtx, "StorageInterfaceImpl::renameCollection", fromNS, [&] { AutoGetDb autoDB(opCtx, fromNS.dbName(), MODE_X); if (!autoDB.getDb()) { return Status(ErrorCodes::NamespaceNotFound, - str::stream() << "Cannot rename collection from " << fromNS.ns() << " to " - << toNS.ns() << ". Database " - << fromNS.dbName().toStringForErrorMsg() << " not found."); + str::stream() + << "Cannot rename collection from " << fromNS.toStringForErrorMsg() + << " to " << toNS.toStringForErrorMsg() << ". Database " + << fromNS.dbName().toStringForErrorMsg() << " not found."); } WriteUnitOfWork wunit(opCtx); - const auto status = autoDB.getDb()->renameCollection(opCtx, fromNS, toNS, stayTemp); + auto status = autoDB.getDb()->renameCollection(opCtx, fromNS, toNS, stayTemp); if (!status.isOK()) { return status; } @@ -583,11 +623,12 @@ Status StorageInterfaceImpl::setIndexIsMultikey(OperationContext* opCtx, Timestamp ts) { if (ts.isNull()) { return Status(ErrorCodes::InvalidOptions, - str::stream() << "Cannot set index " << indexName << " on " << nss.ns() - << " (" << collectionUUID << ") as multikey at null timestamp"); + str::stream() + << "Cannot set index " << indexName << " on " << nss.toStringForErrorMsg() + << " (" << collectionUUID << ") as multikey at null timestamp"); } - return writeConflictRetry(opCtx, "StorageInterfaceImpl::setIndexIsMultikey", nss.ns(), [&] { + return writeConflictRetry(opCtx, "StorageInterfaceImpl::setIndexIsMultikey", nss, [&] { const NamespaceStringOrUUID nsOrUUID(nss.dbName(), collectionUUID); boost::optional autoColl; try { @@ -614,9 +655,9 @@ Status StorageInterfaceImpl::setIndexIsMultikey(OperationContext* opCtx, IndexCatalog::InclusionPolicy::kReady | IndexCatalog::InclusionPolicy::kUnfinished); if (!idx) { return Status(ErrorCodes::IndexNotFound, - str::stream() - << "Could not find index " << indexName << " in " << nss.ns() << " (" - << collectionUUID << ") to set to multikey."); + str::stream() << "Could not find index " << indexName << " in " + << nss.toStringForErrorMsg() << " (" << collectionUUID + << ") to set to multikey."); } collection->getIndexCatalog()->setMultikeyPaths( opCtx, collection, idx, multikeyMetadataKeys, paths); @@ -655,177 +696,181 @@ StatusWith> _findOrDeleteDocuments( auto isFind = mode == FindDeleteMode::kFind; auto opStr = isFind ? "StorageInterfaceImpl::find" : "StorageInterfaceImpl::delete"; - return writeConflictRetry( - opCtx, opStr, nsOrUUID.toString(), [&]() -> StatusWith> { - // We need to explicitly use this in a few places to help the type inference. Use a - // shorthand. - using Result = StatusWith>; - - auto collectionAccessMode = isFind ? MODE_IS : MODE_IX; - AutoGetCollection autoColl(opCtx, nsOrUUID, collectionAccessMode); - auto collectionResult = getCollection( - autoColl, nsOrUUID, str::stream() << "Unable to proceed with " << opStr << "."); - if (!collectionResult.isOK()) { - return Result(collectionResult.getStatus()); - } - const auto& collection = *collectionResult.getValue(); + return writeConflictRetry(opCtx, opStr, nsOrUUID, [&]() -> StatusWith> { + // We need to explicitly use this in a few places to help the type inference. Use a + // shorthand. + using Result = StatusWith>; + + auto collectionAccessMode = isFind ? MODE_IS : MODE_IX; + const auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, nsOrUUID, AcquisitionPrerequisites::kWrite), + collectionAccessMode); + if (!collection.exists()) { + return Status{ErrorCodes::NamespaceNotFound, + str::stream() + << "Collection [" << nsOrUUID.toStringForErrorMsg() << "] not found. " + << "Unable to proceed with " << opStr << "."}; + } - auto isForward = scanDirection == StorageInterface::ScanDirection::kForward; - auto direction = isForward ? InternalPlanner::FORWARD : InternalPlanner::BACKWARD; + auto isForward = scanDirection == StorageInterface::ScanDirection::kForward; + auto direction = isForward ? InternalPlanner::FORWARD : InternalPlanner::BACKWARD; - std::unique_ptr planExecutor; - if (!indexName) { - if (!startKey.isEmpty()) { - return Result(ErrorCodes::NoSuchKey, - "non-empty startKey not allowed for collection scan"); - } - if (boundInclusion != BoundInclusion::kIncludeStartKeyOnly) { - return Result( - ErrorCodes::InvalidOptions, - "bound inclusion must be BoundInclusion::kIncludeStartKeyOnly for " - "collection scan"); - } - // Use collection scan. - planExecutor = isFind - ? InternalPlanner::collectionScan( - opCtx, &collection, PlanYieldPolicy::YieldPolicy::NO_YIELD, direction) - : InternalPlanner::deleteWithCollectionScan( - opCtx, - &collection, - makeDeleteStageParamsForDeleteDocuments(), - PlanYieldPolicy::YieldPolicy::NO_YIELD, - direction); - } else if (*indexName == kIdIndexName && collection->isClustered() && - collection->getClusteredInfo() - ->getIndexSpec() - .getKey() - .firstElement() - .fieldNameStringData() == "_id") { - - auto collScanBoundInclusion = [boundInclusion]() { - switch (boundInclusion) { - case BoundInclusion::kExcludeBothStartAndEndKeys: - return CollectionScanParams::ScanBoundInclusion:: - kExcludeBothStartAndEndRecords; - case BoundInclusion::kIncludeStartKeyOnly: - return CollectionScanParams::ScanBoundInclusion:: - kIncludeStartRecordOnly; - case BoundInclusion::kIncludeEndKeyOnly: - return CollectionScanParams::ScanBoundInclusion::kIncludeEndRecordOnly; - case BoundInclusion::kIncludeBothStartAndEndKeys: - return CollectionScanParams::ScanBoundInclusion:: - kIncludeBothStartAndEndRecords; - default: - MONGO_UNREACHABLE; - } - }(); - - boost::optional minRecord, maxRecord; - if (direction == InternalPlanner::FORWARD) { - if (!startKey.isEmpty()) { - minRecord = RecordIdBound(record_id_helpers::keyForObj(startKey)); - } - if (!endKey.isEmpty()) { - maxRecord = RecordIdBound(record_id_helpers::keyForObj(endKey)); - } - } else { - if (!startKey.isEmpty()) { - maxRecord = RecordIdBound(record_id_helpers::keyForObj(startKey)); - } - if (!endKey.isEmpty()) { - minRecord = RecordIdBound(record_id_helpers::keyForObj(endKey)); - } + std::unique_ptr planExecutor; + if (!indexName) { + if (!startKey.isEmpty()) { + return Result(ErrorCodes::NoSuchKey, + "non-empty startKey not allowed for collection scan"); + } + if (boundInclusion != BoundInclusion::kIncludeStartKeyOnly) { + return Result(ErrorCodes::InvalidOptions, + "bound inclusion must be BoundInclusion::kIncludeStartKeyOnly for " + "collection scan"); + } + // Use collection scan. + planExecutor = isFind + ? InternalPlanner::collectionScan(opCtx, + &collection.getCollectionPtr(), + PlanYieldPolicy::YieldPolicy::NO_YIELD, + direction) + : InternalPlanner::deleteWithCollectionScan( + opCtx, + collection, + makeDeleteStageParamsForDeleteDocuments(), + PlanYieldPolicy::YieldPolicy::NO_YIELD, + direction); + } else if (*indexName == kIdIndexName && collection.getCollectionPtr()->isClustered() && + collection.getCollectionPtr() + ->getClusteredInfo() + ->getIndexSpec() + .getKey() + .firstElement() + .fieldNameStringData() == "_id") { + + auto collScanBoundInclusion = [boundInclusion]() { + switch (boundInclusion) { + case BoundInclusion::kExcludeBothStartAndEndKeys: + return CollectionScanParams::ScanBoundInclusion:: + kExcludeBothStartAndEndRecords; + case BoundInclusion::kIncludeStartKeyOnly: + return CollectionScanParams::ScanBoundInclusion::kIncludeStartRecordOnly; + case BoundInclusion::kIncludeEndKeyOnly: + return CollectionScanParams::ScanBoundInclusion::kIncludeEndRecordOnly; + case BoundInclusion::kIncludeBothStartAndEndKeys: + return CollectionScanParams::ScanBoundInclusion:: + kIncludeBothStartAndEndRecords; + default: + MONGO_UNREACHABLE; } + }(); - planExecutor = isFind - ? InternalPlanner::collectionScan(opCtx, - &collection, - PlanYieldPolicy::YieldPolicy::NO_YIELD, - direction, - boost::none /* resumeAfterId */, - minRecord, - maxRecord, - collScanBoundInclusion) - : InternalPlanner::deleteWithCollectionScan( - opCtx, - &collection, - makeDeleteStageParamsForDeleteDocuments(), - PlanYieldPolicy::YieldPolicy::NO_YIELD, - direction, - minRecord, - maxRecord, - collScanBoundInclusion); - } else { - // Use index scan. - auto indexCatalog = collection->getIndexCatalog(); - invariant(indexCatalog); - const IndexDescriptor* indexDescriptor = indexCatalog->findIndexByName( - opCtx, *indexName, IndexCatalog::InclusionPolicy::kReady); - if (!indexDescriptor) { - return Result(ErrorCodes::IndexNotFound, - str::stream() << "Index not found, ns:" << nsOrUUID.toString() - << ", index: " << *indexName); + boost::optional minRecord, maxRecord; + if (direction == InternalPlanner::FORWARD) { + if (!startKey.isEmpty()) { + minRecord = RecordIdBound(record_id_helpers::keyForObj(startKey)); } - if (indexDescriptor->isPartial()) { - return Result(ErrorCodes::IndexOptionsConflict, - str::stream() - << "Partial index is not allowed for this operation, ns:" - << nsOrUUID.toString() << ", index: " << *indexName); + if (!endKey.isEmpty()) { + maxRecord = RecordIdBound(record_id_helpers::keyForObj(endKey)); } - - KeyPattern keyPattern(indexDescriptor->keyPattern()); - auto minKey = Helpers::toKeyFormat(keyPattern.extendRangeBound({}, false)); - auto maxKey = Helpers::toKeyFormat(keyPattern.extendRangeBound({}, true)); - auto bounds = - isForward ? std::make_pair(minKey, maxKey) : std::make_pair(maxKey, minKey); + } else { if (!startKey.isEmpty()) { - bounds.first = startKey; + maxRecord = RecordIdBound(record_id_helpers::keyForObj(startKey)); } if (!endKey.isEmpty()) { - bounds.second = endKey; + minRecord = RecordIdBound(record_id_helpers::keyForObj(endKey)); } - planExecutor = isFind - ? InternalPlanner::indexScan(opCtx, - &collection, - indexDescriptor, - bounds.first, - bounds.second, - boundInclusion, - PlanYieldPolicy::YieldPolicy::NO_YIELD, - direction, - InternalPlanner::IXSCAN_FETCH) - : InternalPlanner::deleteWithIndexScan( - opCtx, - &collection, - makeDeleteStageParamsForDeleteDocuments(), - indexDescriptor, - bounds.first, - bounds.second, - boundInclusion, - PlanYieldPolicy::YieldPolicy::NO_YIELD, - direction); } - std::vector docs; + planExecutor = isFind + ? InternalPlanner::collectionScan(opCtx, + &collection.getCollectionPtr(), + PlanYieldPolicy::YieldPolicy::NO_YIELD, + direction, + boost::none /* resumeAfterId */, + minRecord, + maxRecord, + collScanBoundInclusion) + : InternalPlanner::deleteWithCollectionScan( + opCtx, + collection, + makeDeleteStageParamsForDeleteDocuments(), + PlanYieldPolicy::YieldPolicy::NO_YIELD, + direction, + minRecord, + maxRecord, + collScanBoundInclusion); + } else { + // Use index scan. + auto indexCatalog = collection.getCollectionPtr()->getIndexCatalog(); + invariant(indexCatalog); + const IndexDescriptor* indexDescriptor = indexCatalog->findIndexByName( + opCtx, *indexName, IndexCatalog::InclusionPolicy::kReady); + if (!indexDescriptor) { + return Result(ErrorCodes::IndexNotFound, + str::stream() + << "Index not found, ns:" << nsOrUUID.toStringForErrorMsg() + << ", index: " << *indexName); + } + if (indexDescriptor->isPartial()) { + return Result(ErrorCodes::IndexOptionsConflict, + str::stream() + << "Partial index is not allowed for this operation, ns:" + << nsOrUUID.toStringForErrorMsg() << ", index: " << *indexName); + } - try { - BSONObj out; - PlanExecutor::ExecState state = PlanExecutor::ExecState::ADVANCED; - while (state == PlanExecutor::ExecState::ADVANCED && docs.size() < limit) { - state = planExecutor->getNext(&out, nullptr); - if (state == PlanExecutor::ExecState::ADVANCED) { - docs.push_back(out.getOwned()); - } + KeyPattern keyPattern(indexDescriptor->keyPattern()); + auto minKey = Helpers::toKeyFormat(keyPattern.extendRangeBound({}, false)); + auto maxKey = Helpers::toKeyFormat(keyPattern.extendRangeBound({}, true)); + auto bounds = + isForward ? std::make_pair(minKey, maxKey) : std::make_pair(maxKey, minKey); + if (!startKey.isEmpty()) { + bounds.first = startKey; + } + if (!endKey.isEmpty()) { + bounds.second = endKey; + } + planExecutor = isFind + ? InternalPlanner::indexScan(opCtx, + &collection.getCollectionPtr(), + indexDescriptor, + bounds.first, + bounds.second, + boundInclusion, + PlanYieldPolicy::YieldPolicy::NO_YIELD, + direction, + InternalPlanner::IXSCAN_FETCH) + : InternalPlanner::deleteWithIndexScan(opCtx, + collection, + makeDeleteStageParamsForDeleteDocuments(), + indexDescriptor, + bounds.first, + bounds.second, + boundInclusion, + PlanYieldPolicy::YieldPolicy::NO_YIELD, + direction); + } + + std::vector docs; + + try { + BSONObj out; + PlanExecutor::ExecState state = PlanExecutor::ExecState::ADVANCED; + while (state == PlanExecutor::ExecState::ADVANCED && docs.size() < limit) { + state = planExecutor->getNext(&out, nullptr); + if (state == PlanExecutor::ExecState::ADVANCED) { + docs.push_back(out.getOwned()); } - } catch (const WriteConflictException&) { - // Re-throw the WCE, since it will get caught be a retry loop at a higher level. - throw; - } catch (const DBException&) { - return exceptionToStatus(); } + } catch (const WriteConflictException&) { + // Re-throw the WCE, since it will get caught be a retry loop at a higher level. + throw; + } catch (const DBException&) { + return exceptionToStatus(); + } - return Result{docs}; - }); + return Result{docs}; + }); } StatusWith _findOrDeleteById(OperationContext* opCtx, @@ -849,7 +894,7 @@ StatusWith _findOrDeleteById(OperationContext* opCtx, if (docs.empty()) { return {ErrorCodes::NoSuchKey, str::stream() << "No document found with _id: " << redact(idKey) << " in namespace " - << nsOrUUID.toString()}; + << nsOrUUID.toStringForErrorMsg()}; } return docs.front(); @@ -911,10 +956,11 @@ StatusWith StorageInterfaceImpl::findSingleton(OperationContext* opCtx, const auto& docs = result.getValue(); if (docs.empty()) { return {ErrorCodes::CollectionIsEmpty, - str::stream() << "No document found in namespace: " << nss.ns()}; + str::stream() << "No document found in namespace: " << nss.toStringForErrorMsg()}; } else if (docs.size() != 1U) { return {ErrorCodes::TooManyMatchingDocuments, - str::stream() << "More than singleton document found in namespace: " << nss.ns()}; + str::stream() << "More than singleton document found in namespace: " + << nss.toStringForErrorMsg()}; } return docs.front(); @@ -959,27 +1005,28 @@ Status _updateWithQuery(OperationContext* opCtx, invariant(PlanYieldPolicy::YieldPolicy::NO_YIELD == request.getYieldPolicy()); auto& nss = request.getNamespaceString(); - return writeConflictRetry(opCtx, "_updateWithQuery", nss.ns(), [&] { + return writeConflictRetry(opCtx, "_updateWithQuery", nss, [&] { + const auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + if (!collection.exists()) { + return Status{ErrorCodes::NamespaceNotFound, + str::stream() + << "Collection [" << nss.toStringForErrorMsg() << "] not found. " + << "Unable to update documents in " << nss.toStringForErrorMsg() + << " using query " << request.getQuery()}; + } + // ParsedUpdate needs to be inside the write conflict retry loop because it may create a // CanonicalQuery whose ownership will be transferred to the plan executor in // getExecutorUpdate(). - const ExtensionsCallbackReal extensionsCallback(opCtx, &request.getNamespaceString()); - ParsedUpdate parsedUpdate(opCtx, &request, extensionsCallback); + ParsedUpdate parsedUpdate(opCtx, &request, collection.getCollectionPtr()); auto parsedUpdateStatus = parsedUpdate.parseRequest(); if (!parsedUpdateStatus.isOK()) { return parsedUpdateStatus; } - AutoGetCollection autoColl(opCtx, nss, MODE_IX); - auto collectionResult = - getCollection(autoColl, - nss, - str::stream() << "Unable to update documents in " << nss.ns() - << " using query " << request.getQuery()); - if (!collectionResult.isOK()) { - return collectionResult.getStatus(); - } - const auto& collection = *collectionResult.getValue(); WriteUnitOfWork wuow(opCtx); if (!ts.isNull()) { uassertStatusOK(opCtx->recoveryUnit()->setTimestamp(ts)); @@ -987,7 +1034,7 @@ Status _updateWithQuery(OperationContext* opCtx, } auto planExecutorResult = mongo::getExecutorUpdate( - nullptr, &collection, &parsedUpdate, boost::none /* verbosity */); + nullptr, collection, &parsedUpdate, boost::none /* verbosity */); if (!planExecutorResult.isOK()) { return planExecutorResult.getStatus(); } @@ -1021,18 +1068,23 @@ Status StorageInterfaceImpl::upsertById(OperationContext* opCtx, } auto query = queryResult.getValue(); - return writeConflictRetry(opCtx, "StorageInterfaceImpl::upsertById", nsOrUUID.toString(), [&] { - AutoGetCollection autoColl(opCtx, nsOrUUID, MODE_IX); - auto collectionResult = getCollection(autoColl, nsOrUUID, "Unable to update document."); - if (!collectionResult.isOK()) { - return collectionResult.getStatus(); + return writeConflictRetry(opCtx, "StorageInterfaceImpl::upsertById", nsOrUUID, [&] { + const auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, nsOrUUID, AcquisitionPrerequisites::kWrite), + MODE_IX); + if (!collection.exists()) { + return Status{ErrorCodes::NamespaceNotFound, + str::stream() + << "Collection [" << nsOrUUID.toStringForErrorMsg() << "] not found. " + << "Unable to update document."}; } - const auto& collection = *collectionResult.getValue(); // We can create an UpdateRequest now that the collection's namespace has been resolved, in // the event it was specified as a UUID. auto request = UpdateRequest(); - request.setNamespaceString(collection->ns()); + request.setNamespaceString(collection.nss()); request.setQuery(query); request.setUpdateModification( write_ops::UpdateModification::parseFromClassicUpdate(update)); @@ -1043,8 +1095,7 @@ Status StorageInterfaceImpl::upsertById(OperationContext* opCtx, // ParsedUpdate needs to be inside the write conflict retry loop because it contains // the UpdateDriver whose state may be modified while we are applying the update. - const ExtensionsCallbackReal extensionsCallback(opCtx, &request.getNamespaceString()); - ParsedUpdate parsedUpdate(opCtx, &request, extensionsCallback); + ParsedUpdate parsedUpdate(opCtx, &request, collection.getCollectionPtr()); auto parsedUpdateStatus = parsedUpdate.parseRequest(); if (!parsedUpdateStatus.isOK()) { return parsedUpdateStatus; @@ -1052,7 +1103,7 @@ Status StorageInterfaceImpl::upsertById(OperationContext* opCtx, // We're using the ID hack to perform the update so we have to disallow collections // without an _id index. - auto descriptor = collection->getIndexCatalog()->findIdIndex(opCtx); + auto descriptor = collection.getCollectionPtr()->getIndexCatalog()->findIdIndex(opCtx); if (!descriptor) { return Status(ErrorCodes::IndexNotFound, "Unable to update document in a collection without an _id index."); @@ -1061,7 +1112,7 @@ Status StorageInterfaceImpl::upsertById(OperationContext* opCtx, UpdateStageParams updateStageParams( parsedUpdate.getRequest(), parsedUpdate.getDriver(), nullptr); auto planExecutor = InternalPlanner::updateWithIdHack(opCtx, - &collection, + collection, updateStageParams, descriptor, idKey.wrap(""), @@ -1118,29 +1169,30 @@ Status StorageInterfaceImpl::deleteByFilter(OperationContext* opCtx, // disallow client deletes from unrecognized system collections. request.setGod(true); - return writeConflictRetry(opCtx, "StorageInterfaceImpl::deleteByFilter", nss.ns(), [&] { + return writeConflictRetry(opCtx, "StorageInterfaceImpl::deleteByFilter", nss, [&] { + const auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + if (!collection.exists()) { + return Status{ErrorCodes::NamespaceNotFound, + str::stream() + << "Collection [" << nss.toStringForErrorMsg() << "] not found. " + << "Unable to delete documents in " << nss.toStringForErrorMsg() + << " using filter " << filter}; + } + // ParsedDelete needs to be inside the write conflict retry loop because it may create a // CanonicalQuery whose ownership will be transferred to the plan executor in // getExecutorDelete(). - ParsedDelete parsedDelete(opCtx, &request); + ParsedDelete parsedDelete(opCtx, &request, collection.getCollectionPtr()); auto parsedDeleteStatus = parsedDelete.parseRequest(); if (!parsedDeleteStatus.isOK()) { return parsedDeleteStatus; } - AutoGetCollection autoColl(opCtx, nss, MODE_IX); - auto collectionResult = - getCollection(autoColl, - nss, - str::stream() << "Unable to delete documents in " << nss.ns() - << " using filter " << filter); - if (!collectionResult.isOK()) { - return collectionResult.getStatus(); - } - const auto& collection = *collectionResult.getValue(); - auto planExecutorResult = mongo::getExecutorDelete( - nullptr, &collection, &parsedDelete, boost::none /* verbosity */); + nullptr, collection, &parsedDelete, boost::none /* verbosity */); if (!planExecutorResult.isOK()) { return planExecutorResult.getStatus(); } @@ -1337,7 +1389,9 @@ StatusWith StorageInterfaceImpl::getCollectionUUID(OperationContext* opCtx AutoGetCollectionForRead autoColl(opCtx, nss); auto collectionResult = getCollection( - autoColl, nss, str::stream() << "Unable to get UUID of " << nss.ns() << " collection."); + autoColl, + nss, + str::stream() << "Unable to get UUID of " << nss.toStringForErrorMsg() << " collection."); if (!collectionResult.isOK()) { return collectionResult.getStatus(); } @@ -1433,7 +1487,8 @@ void StorageInterfaceImpl::waitForAllEarlierOplogWritesToBeVisible(OperationCont AutoGetOplog oplogRead(opCtx, OplogAccessMode::kRead); if (primaryOnly && - !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx, "admin")) + !repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx, + DatabaseName::kAdmin)) return; const auto& oplog = oplogRead.getCollection(); uassert(ErrorCodes::NotYetInitialized, "The oplog does not exist", oplog); @@ -1460,7 +1515,7 @@ boost::optional StorageInterfaceImpl::getLastStableRecoveryTimestamp( return boost::none; } - const auto ret = serviceCtx->getStorageEngine()->getLastStableRecoveryTimestamp(); + auto ret = serviceCtx->getStorageEngine()->getLastStableRecoveryTimestamp(); if (ret == boost::none) { return Timestamp::min(); } diff --git a/src/mongo/db/repl/storage_interface_impl.h b/src/mongo/db/repl/storage_interface_impl.h index 27b75b9da72e4..9293337e88225 100644 --- a/src/mongo/db/repl/storage_interface_impl.h +++ b/src/mongo/db/repl/storage_interface_impl.h @@ -30,12 +30,31 @@ #pragma once +#include +#include +#include +#include + +#include + +#include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/repl/collection_bulk_loader.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/storage_interface_impl_test.cpp b/src/mongo/db/repl/storage_interface_impl_test.cpp index e586e013e40cd..c6d48d6861e64 100644 --- a/src/mongo/db/repl/storage_interface_impl_test.cpp +++ b/src/mongo/db/repl/storage_interface_impl_test.cpp @@ -28,35 +28,57 @@ */ #include -#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/database.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/catalog/validate_results.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_applier_impl_test_fixture.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/db/storage/snapshot_manager.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/stdx/thread.h" #include "mongo/transport/transport_layer_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/str.h" @@ -120,7 +142,7 @@ CollectionOptions createOplogCollectionOptions() { void createCollection(OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options = generateOptionsWithUuid()) { - writeConflictRetry(opCtx, "createCollection", nss.ns(), [&] { + writeConflictRetry(opCtx, "createCollection", nss, [&] { Lock::DBLock dblk(opCtx, nss.dbName(), MODE_X); OldClientContext ctx(opCtx, nss); auto db = ctx.db(); @@ -403,7 +425,7 @@ TEST_F(StorageInterfaceImplTest, InsertDocumentsReturnsOKWhenNoOperationsAreGive StorageInterfaceImpl storage; ASSERT_OK(storage.insertDocuments(opCtx, nss, {})); - ASSERT_OK(storage.insertDocuments(opCtx, {nss.db().toString(), *options.uuid}, {})); + ASSERT_OK(storage.insertDocuments(opCtx, {nss.dbName(), *options.uuid}, {})); } TEST_F(StorageInterfaceImplTest, @@ -423,9 +445,9 @@ TEST_F(StorageInterfaceImplTest, ASSERT_STRING_CONTAINS(status.reason(), "Collection::insertDocument got document without _id"); // Again, but specify the collection with its UUID. - ASSERT_EQ(ErrorCodes::InternalError, - storage.insertDocuments( - opCtx, {nss.db().toString(), *options.uuid}, transformInserts({op}))); + ASSERT_EQ( + ErrorCodes::InternalError, + storage.insertDocuments(opCtx, {nss.dbName(), *options.uuid}, transformInserts({op}))); } TEST_F(StorageInterfaceImplTest, @@ -487,7 +509,7 @@ TEST_F(StorageInterfaceImplTest, InsertDocumentsSavesOperationsWhenCollSpecified auto op1 = makeOplogEntry({Timestamp(Seconds(1), 0), 1LL}); auto op2 = makeOplogEntry({Timestamp(Seconds(1), 0), 1LL}); ASSERT_OK(storage.insertDocuments( - opCtx, {nss.db().toString(), *options.uuid}, transformInserts({op1, op2}))); + opCtx, {nss.dbName(), *options.uuid}, transformInserts({op1, op2}))); // Check contents of oplog. _assertDocumentsInCollectionEquals(opCtx, nss, {op1.obj, op2.obj}); @@ -538,7 +560,7 @@ TEST_F(StorageInterfaceImplTest, InsertDocWorksWithExistingCappedCollectionSpeci StorageInterfaceImpl storage; ASSERT_OK(storage.insertDocument(opCtx, - {nss.db().toString(), *options.uuid}, + {nss.dbName(), *options.uuid}, {BSON("_id" << 1), Timestamp(1)}, OpTime::kUninitializedTerm)); AutoGetCollectionForReadCommand autoColl(opCtx, nss); @@ -682,7 +704,7 @@ TEST_F(StorageInterfaceImplTest, CreateOplogCreateCappedCollection) { { AutoGetCollectionForReadCommand autoColl(opCtx, nss); ASSERT_TRUE(autoColl.getCollection()); - ASSERT_EQ(nss.toString(), autoColl.getCollection()->ns().toString()); + ASSERT_EQ(nss.toString_forTest(), autoColl.getCollection()->ns().toString_forTest()); ASSERT_TRUE(autoColl.getCollection()->isCapped()); } } @@ -714,12 +736,12 @@ TEST_F(StorageInterfaceImplTest, CreateCollectionFailsIfCollectionExists) { { AutoGetCollectionForReadCommand autoColl(opCtx, nss); ASSERT_TRUE(autoColl.getCollection()); - ASSERT_EQ(nss.toString(), autoColl.getCollection()->ns().toString()); + ASSERT_EQ(nss.toString_forTest(), autoColl.getCollection()->ns().toString_forTest()); } auto status = storage.createCollection(opCtx, nss, generateOptionsWithUuid()); ASSERT_EQUALS(ErrorCodes::NamespaceExists, status); ASSERT_STRING_CONTAINS(status.reason(), - str::stream() << "Collection " << nss.ns() << " already exists"); + str::stream() << "Collection " << nss.ns_forTest() << " already exists"); } TEST_F(StorageInterfaceImplTest, DropCollectionWorksWithExistingWithDataCollection) { @@ -746,11 +768,11 @@ TEST_F(StorageInterfaceImplTest, DropCollectionWorksWithMissingCollection) { auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); - ASSERT_FALSE(AutoGetDb(opCtx, nss.db(), MODE_IS).getDb()); + ASSERT_FALSE(AutoGetDb(opCtx, nss.dbName(), MODE_IS).getDb()); ASSERT_OK(storage.dropCollection(opCtx, nss)); ASSERT_FALSE(AutoGetCollectionForReadCommand(opCtx, nss).getCollection()); // Database should not be created after running dropCollection. - ASSERT_FALSE(AutoGetDb(opCtx, nss.db(), MODE_IS).getDb()); + ASSERT_FALSE(AutoGetDb(opCtx, nss.dbName(), MODE_IS).getDb()); } TEST_F(StorageInterfaceImplTest, DropCollectionWorksWithSystemCollection) { @@ -2551,9 +2573,8 @@ TEST_F(StorageInterfaceImplTest, FindByIdReturnsNoSuchKeyWhenDocumentIsNotFound) {doc3, Timestamp(0), OpTime::kUninitializedTerm}})); ASSERT_EQUALS(ErrorCodes::NoSuchKey, storage.findById(opCtx, nss, doc2["_id"]).getStatus()); - ASSERT_EQUALS( - ErrorCodes::NoSuchKey, - storage.findById(opCtx, {nss.db().toString(), *options.uuid}, doc2["_id"]).getStatus()); + ASSERT_EQUALS(ErrorCodes::NoSuchKey, + storage.findById(opCtx, {nss.dbName(), *options.uuid}, doc2["_id"]).getStatus()); } TEST_F(StorageInterfaceImplTest, FindByIdReturnsDocumentWhenDocumentExists) { @@ -2573,9 +2594,9 @@ TEST_F(StorageInterfaceImplTest, FindByIdReturnsDocumentWhenDocumentExists) { {doc3, Timestamp(0), OpTime::kUninitializedTerm}})); ASSERT_BSONOBJ_EQ(doc2, unittest::assertGet(storage.findById(opCtx, nss, doc2["_id"]))); - ASSERT_BSONOBJ_EQ(doc2, - unittest::assertGet(storage.findById( - opCtx, {nss.db().toString(), *options.uuid}, doc2["_id"]))); + ASSERT_BSONOBJ_EQ( + doc2, + unittest::assertGet(storage.findById(opCtx, {nss.dbName(), *options.uuid}, doc2["_id"]))); } TEST_F(StorageInterfaceImplTest, FindByIdReturnsBadStatusIfPlanExecutorFails) { @@ -2645,7 +2666,7 @@ TEST_F(StorageInterfaceImplTest, DeleteByIdReturnsNoSuchKeyWhenDocumentIsNotFoun ASSERT_EQUALS(ErrorCodes::NoSuchKey, storage.deleteById(opCtx, nss, doc2["_id"]).getStatus()); ASSERT_EQUALS( ErrorCodes::NoSuchKey, - storage.deleteById(opCtx, {nss.db().toString(), *options.uuid}, doc2["_id"]).getStatus()); + storage.deleteById(opCtx, {nss.dbName(), *options.uuid}, doc2["_id"]).getStatus()); _assertDocumentsInCollectionEquals(opCtx, nss, {doc1, doc3}); } @@ -2698,7 +2719,8 @@ TEST_F(StorageInterfaceImplTest, auto opCtx = getOperationContext(); StorageInterfaceImpl storage; NamespaceString nss = NamespaceString::createNamespaceString_forTest("mydb.coll"); - NamespaceString wrongColl(nss.db(), "wrongColl"_sd); + NamespaceString wrongColl = + NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd); ASSERT_OK(storage.createCollection(opCtx, nss, generateOptionsWithUuid())); auto doc = BSON("_id" << 0 << "x" << 1); auto status = storage.upsertById(opCtx, wrongColl, doc["_id"], doc); @@ -2732,7 +2754,7 @@ TEST_F(StorageInterfaceImplTest, UpsertSingleDocumentReplacesExistingDocumentInC // Again, but specify the collection's UUID. ASSERT_OK(storage.upsertById( - opCtx, {nss.db().toString(), *options.uuid}, originalDoc["_id"], BSON("x" << 200))); + opCtx, {nss.dbName(), *options.uuid}, originalDoc["_id"], BSON("x" << 200))); _assertDocumentsInCollectionEquals(opCtx, nss, {BSON("_id" << 0 << "x" << 0), @@ -2764,10 +2786,8 @@ TEST_F(StorageInterfaceImplTest, UpsertSingleDocumentInsertsNewDocumentInCollect BSON("_id" << 2 << "x" << 2), BSON("_id" << 1 << "x" << 100)}); - ASSERT_OK(storage.upsertById(opCtx, - {nss.db().toString(), *options.uuid}, - BSON("" << 3).firstElement(), - BSON("x" << 300))); + ASSERT_OK(storage.upsertById( + opCtx, {nss.dbName(), *options.uuid}, BSON("" << 3).firstElement(), BSON("x" << 300))); _assertDocumentsInCollectionEquals(opCtx, nss, {BSON("_id" << 0 << "x" << 0), @@ -2805,7 +2825,7 @@ TEST_F(StorageInterfaceImplTest, BSON("_id" << 2 << "x" << 2)}); ASSERT_OK(storage.upsertById( - opCtx, {nss.db().toString(), *options.uuid}, originalDoc["_id"], BSON("x" << 200))); + opCtx, {nss.dbName(), *options.uuid}, originalDoc["_id"], BSON("x" << 200))); _assertDocumentsInCollectionEquals(opCtx, nss, {BSON("_id" << 0 << "x" << 0), @@ -2828,7 +2848,7 @@ TEST_F(StorageInterfaceImplTest, UpsertSingleDocumentReturnsFailedToParseOnNonSi "Unable to update document with a non-simple _id query:"); ASSERT_EQ(storage.upsertById(opCtx, - {nss.db().toString(), *options.uuid}, + {nss.dbName(), *options.uuid}, BSON("" << BSON("$gt" << 3)).firstElement(), BSON("x" << 100)), ErrorCodes::InvalidIdField); @@ -2850,7 +2870,7 @@ TEST_F(StorageInterfaceImplTest, ASSERT_STRING_CONTAINS(status.reason(), "Unable to update document in a collection without an _id index."); - ASSERT_EQ(storage.upsertById(opCtx, {nss.db().toString(), *options.uuid}, doc["_id"], doc), + ASSERT_EQ(storage.upsertById(opCtx, {nss.dbName(), *options.uuid}, doc["_id"], doc), ErrorCodes::IndexNotFound); } @@ -2871,12 +2891,11 @@ TEST_F(StorageInterfaceImplTest, "Unknown modifier: $unknownUpdateOp. Expected a valid update modifier or pipeline-style " "update specified as an array"); - ASSERT_THROWS_CODE(storage.upsertById(opCtx, - {nss.db().toString(), *options.uuid}, - BSON("" << 1).firstElement(), - unknownUpdateOp), - DBException, - ErrorCodes::FailedToParse); + ASSERT_THROWS_CODE( + storage.upsertById( + opCtx, {nss.dbName(), *options.uuid}, BSON("" << 1).firstElement(), unknownUpdateOp), + DBException, + ErrorCodes::FailedToParse); } TEST_F(StorageInterfaceImplTest, DeleteByFilterReturnsNamespaceNotFoundWhenDatabaseDoesNotExist) { @@ -2975,7 +2994,8 @@ TEST_F(StorageInterfaceImplTest, DeleteByFilterReturnsNamespaceNotFoundWhenColle auto opCtx = getOperationContext(); StorageInterfaceImpl storage; NamespaceString nss = NamespaceString::createNamespaceString_forTest("mydb.coll"); - NamespaceString wrongColl(nss.db(), "wrongColl"_sd); + NamespaceString wrongColl = + NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd); ASSERT_OK(storage.createCollection(opCtx, nss, generateOptionsWithUuid())); auto filter = BSON("x" << 1); auto status = storage.deleteByFilter(opCtx, wrongColl, filter); @@ -2983,7 +3003,7 @@ TEST_F(StorageInterfaceImplTest, DeleteByFilterReturnsNamespaceNotFoundWhenColle ASSERT_EQUALS(std::string( str::stream() << "Collection [mydb.wrongColl] not found. Unable to delete documents in " - << wrongColl.ns() << " using filter " << filter), + << wrongColl.ns_forTest() << " using filter " << filter), status.reason()); } @@ -3145,7 +3165,8 @@ TEST_F(StorageInterfaceImplTest, auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); - NamespaceString wrongColl(nss.db(), "wrongColl"_sd); + NamespaceString wrongColl = + NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd); ASSERT_OK(storage.createCollection(opCtx, nss, generateOptionsWithUuid())); ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, storage.getCollectionCount(opCtx, wrongColl).getStatus()); @@ -3188,7 +3209,8 @@ TEST_F(StorageInterfaceImplTest, auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); - NamespaceString wrongColl(nss.db(), "wrongColl"_sd); + NamespaceString wrongColl = + NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd); ASSERT_OK(storage.createCollection(opCtx, nss, generateOptionsWithUuid())); ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, storage.setCollectionCount(opCtx, wrongColl, 3)); } @@ -3206,7 +3228,8 @@ TEST_F(StorageInterfaceImplTest, auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); - NamespaceString wrongColl(nss.db(), "wrongColl"_sd); + NamespaceString wrongColl = + NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd); ASSERT_OK(storage.createCollection(opCtx, nss, generateOptionsWithUuid())); ASSERT_EQUALS(ErrorCodes::NamespaceNotFound, storage.getCollectionSize(opCtx, wrongColl).getStatus()); @@ -3249,7 +3272,8 @@ TEST_F(StorageInterfaceImplTest, SetIndexIsMultikeyReturnsNamespaceNotFoundForMi auto opCtx = getOperationContext(); StorageInterfaceImpl storage; auto nss = makeNamespace(_agent); - NamespaceString wrongColl(nss.db(), "wrongColl"_sd); + NamespaceString wrongColl = + NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd); ASSERT_OK(storage.createCollection(opCtx, nss, CollectionOptions())); ASSERT_EQUALS( ErrorCodes::NamespaceNotFound, @@ -3262,7 +3286,8 @@ TEST_F(StorageInterfaceImplTest, SetIndexIsMultikeyLooksUpCollectionByUUID) { auto nss = makeNamespace(_agent); auto options = generateOptionsWithUuid(); ASSERT_OK(storage.createCollection(opCtx, nss, options)); - NamespaceString wrongColl(nss.db(), "wrongColl"_sd); + NamespaceString wrongColl = + NamespaceString::createNamespaceString_forTest(nss.dbName(), "wrongColl"_sd); ASSERT_EQUALS(ErrorCodes::IndexNotFound, storage.setIndexIsMultikey( opCtx, wrongColl, *options.uuid, "foo", {}, {}, Timestamp(3, 3))); diff --git a/src/mongo/db/repl/storage_interface_mock.cpp b/src/mongo/db/repl/storage_interface_mock.cpp index 3cd4344151e3c..cee9d5532f4bf 100644 --- a/src/mongo/db/repl/storage_interface_mock.cpp +++ b/src/mongo/db/repl/storage_interface_mock.cpp @@ -27,14 +27,14 @@ * it in the license file. */ -#include +#include +#include -#include "mongo/platform/basic.h" +#include #include "mongo/db/repl/storage_interface_mock.h" - #include "mongo/logv2/log.h" -#include "mongo/util/str.h" +#include "mongo/logv2/log_component.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -109,7 +109,7 @@ Status CollectionBulkLoaderMock::init(const std::vector& secondaryIndex Status CollectionBulkLoaderMock::insertDocuments(const std::vector::const_iterator begin, const std::vector::const_iterator end) { LOGV2_DEBUG(21758, 1, "CollectionBulkLoaderMock::insertDocuments called"); - const auto status = insertDocsFn(begin, end); + auto status = insertDocsFn(begin, end); // Only count if it succeeds. if (status.isOK()) { diff --git a/src/mongo/db/repl/storage_interface_mock.h b/src/mongo/db/repl/storage_interface_mock.h index 31831f8cbd73e..a94769c2738ec 100644 --- a/src/mongo/db/repl/storage_interface_mock.h +++ b/src/mongo/db/repl/storage_interface_mock.h @@ -34,16 +34,33 @@ #include #include #include +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/repl/collection_bulk_loader.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/key_string.h" #include "mongo/platform/mutex.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -68,13 +85,6 @@ class CollectionBulkLoaderMock : public CollectionBulkLoader { std::vector::const_iterator end) override; Status commit() override; - std::string toString() const override { - return toBSON().toString(); - }; - BSONObj toBSON() const override { - return BSONObj(); - }; - std::shared_ptr stats; // Override functions. diff --git a/src/mongo/db/repl/storage_timestamp_test.cpp b/src/mongo/db/repl/storage_timestamp_test.cpp index 355d2d9181ce6..dd3b33df29f2d 100644 --- a/src/mongo/db/repl/storage_timestamp_test.cpp +++ b/src/mongo/db/repl/storage_timestamp_test.cpp @@ -27,72 +27,164 @@ * it in the license file. */ +#include +#include +#include +// IWYU pragma: no_include "boost/container/detail/flat_tree.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include // IWYU pragma: keep +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" -#include "mongo/bson/mutable/algorithm.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/mutable/damage_vector.h" +#include "mongo/bson/mutable/document.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/catalog/drop_database.h" #include "mongo/db/catalog/drop_indexes.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/multi_index_block.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/field_ref.h" #include "mongo/db/global_settings.h" #include "mongo/db/index/index_build_interceptor.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/index/wildcard_access_method.h" +#include "mongo/db/index/multikey_metadata_access_stats.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/index/skipped_record_tracker.h" #include "mongo/db/index_build_entry_helpers.h" -#include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/logical_time.h" #include "mongo/db/multi_key_path_tracker.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_impl.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/wildcard_multikey_paths.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/apply_ops.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" -#include "mongo/db/repl/multiapplier.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_applier.h" #include "mongo/db/repl/oplog_applier_impl.h" -#include "mongo/db/repl/oplog_applier_impl_test_fixture.h" +#include "mongo/db/repl/oplog_buffer.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/oplog_entry_or_grouped_inserts.h" #include "mongo/db/repl/oplog_entry_test_helpers.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/replication_consistency_markers.h" +#include "mongo/db/repl/replication_consistency_markers_gen.h" #include "mongo/db/repl/replication_consistency_markers_impl.h" #include "mongo/db/repl/replication_consistency_markers_mock.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/replication_process.h" +#include "mongo/db/repl/replication_recovery.h" #include "mongo/db/repl/replication_recovery_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/repl/timestamp_block.h" -#include "mongo/db/s/collection_sharding_state_factory_shard.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/db/session/session.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/bson_collection_catalog_entry.h" +#include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/durable_catalog_entry.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/db/storage/snapshot_manager.h" -#include "mongo/db/storage/storage_engine_impl.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/transaction/transaction_participant_gen.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_mutable.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/executor/task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/stdx/future.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/future.h" // IWYU pragma: keep +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_name.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/stacktrace.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -346,7 +438,7 @@ class StorageTimestampTest : public ServiceContextMongoDTest { } void create(NamespaceString nss) const { - ::mongo::writeConflictRetry(_opCtx, "deleteAll", nss.ns(), [&] { + ::mongo::writeConflictRetry(_opCtx, "deleteAll", nss, [&] { _opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kNoTimestamp); _opCtx->recoveryUnit()->abandonSnapshot(); AutoGetCollection collRaii(_opCtx, nss, LockMode::MODE_X); @@ -437,7 +529,11 @@ class StorageTimestampTest : public ServiceContextMongoDTest { std::shared_ptr getMetaDataAtTime( DurableCatalog* durableCatalog, RecordId catalogId, const Timestamp& ts) { OneOffRead oor(_opCtx, ts); - return durableCatalog->getMetaData(_opCtx, catalogId); + auto catalogEntry = durableCatalog->getParsedCatalogEntry(_opCtx, catalogId); + if (!catalogEntry) { + return nullptr; + } + return catalogEntry->metadata; } StatusWith doApplyOps(const DatabaseName& dbName, @@ -506,11 +602,11 @@ class StorageTimestampTest : public ServiceContextMongoDTest { const BSONObj& expectedDoc) { OneOffRead oor(_opCtx, ts); if (expectedDoc.isEmpty()) { - ASSERT_EQ(0, itCount(coll)) - << "Should not find any documents in " << coll->ns() << " at ts: " << ts; + ASSERT_EQ(0, itCount(coll)) << "Should not find any documents in " + << coll->ns().toStringForErrorMsg() << " at ts: " << ts; } else { - ASSERT_EQ(1, itCount(coll)) - << "Should find one document in " << coll->ns() << " at ts: " << ts; + ASSERT_EQ(1, itCount(coll)) << "Should find one document in " + << coll->ns().toStringForErrorMsg() << " at ts: " << ts; auto doc = findOne(coll); ASSERT_EQ(0, SimpleBSONObjComparator::kInstance.compare(doc, expectedDoc)) << "Doc: " << doc.toString() << " Expected: " << expectedDoc.toString(); @@ -525,11 +621,12 @@ class StorageTimestampTest : public ServiceContextMongoDTest { BSONObj doc; bool found = Helpers::findOne(_opCtx, coll, query, doc); if (!expectedDoc) { - ASSERT_FALSE(found) << "Should not find any documents in " << coll->ns() << " matching " - << query << " at ts: " << ts; + ASSERT_FALSE(found) << "Should not find any documents in " + << coll->ns().toStringForErrorMsg() << " matching " << query + << " at ts: " << ts; } else { - ASSERT(found) << "Should find document in " << coll->ns() << " matching " << query - << " at ts: " << ts; + ASSERT(found) << "Should find document in " << coll->ns().toStringForErrorMsg() + << " matching " << query << " at ts: " << ts; ASSERT_BSONOBJ_EQ(doc, *expectedDoc); } } @@ -593,10 +690,11 @@ class StorageTimestampTest : public ServiceContextMongoDTest { auto found = std::find(idents.begin(), idents.end(), expectedIdent); if (shouldExpect) { - ASSERT(found != idents.end()) << nss.ns() << " was not found at " << ts.toString(); + ASSERT(found != idents.end()) + << nss.toStringForErrorMsg() << " was not found at " << ts.toString(); } else { - ASSERT(found == idents.end()) << nss.ns() << " was found at " << ts.toString() - << " when it should not have been."; + ASSERT(found == idents.end()) << nss.toStringForErrorMsg() << " was found at " + << ts.toString() << " when it should not have been."; } } @@ -822,24 +920,24 @@ TEST_F(StorageTimestampTest, SecondaryInsertTimes) { const LogicalTime firstInsertTime = _clock->tickClusterTime(docsToInsert); for (std::int32_t idx = 0; idx < docsToInsert; ++idx) { BSONObjBuilder result; - ASSERT_OK(applyOps( - _opCtx, - nss.dbName(), - BSON("applyOps" << BSON_ARRAY( - BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() << "t" << 1LL << "v" - << 2 << "op" - << "i" - << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid() - << "wall" << Date_t() << "o" << BSON("_id" << idx)) - << BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() << "t" << 1LL - << "op" - << "c" - << "ns" - << "test.$cmd" - << "wall" << Date_t() << "o" - << BSON("applyOps" << BSONArrayBuilder().arr())))), - repl::OplogApplication::Mode::kApplyOpsCmd, - &result)); + ASSERT_OK(applyOps(_opCtx, + nss.dbName(), + BSON("applyOps" << BSON_ARRAY( + BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() << "t" + << 1LL << "v" << 2 << "op" + << "i" + << "ns" << nss.ns_forTest() << "ui" + << autoColl.getCollection()->uuid() << "wall" + << Date_t() << "o" << BSON("_id" << idx)) + << BSON("ts" << firstInsertTime.addTicks(idx).asTimestamp() + << "t" << 1LL << "op" + << "c" + << "ns" + << "test.$cmd" + << "wall" << Date_t() << "o" + << BSON("applyOps" << BSONArrayBuilder().arr())))), + repl::OplogApplication::Mode::kApplyOpsCmd, + &result)); } for (std::int32_t idx = 0; idx < docsToInsert; ++idx) { @@ -871,8 +969,8 @@ TEST_F(StorageTimestampTest, SecondaryArrayInsertTimes) { AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_IX); oplogCommonBuilder << "v" << 2 << "op" << "i" - << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid() << "wall" - << Date_t(); + << "ns" << nss.ns_forTest() << "ui" << autoColl.getCollection()->uuid() + << "wall" << Date_t(); } auto oplogCommon = oplogCommonBuilder.done(); @@ -941,14 +1039,14 @@ TEST_F(StorageTimestampTest, SecondaryDeleteTimes) { // Delete all documents one at a time. const LogicalTime startDeleteTime = _clock->tickClusterTime(docsToInsert); for (std::int32_t num = 0; num < docsToInsert; ++num) { - ASSERT_OK( - doApplyOps(nss.dbName(), - {BSON("ts" << startDeleteTime.addTicks(num).asTimestamp() << "t" << 0LL - << "v" << 2 << "op" - << "d" - << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid() - << "wall" << Date_t() << "o" << BSON("_id" << num))}) - .getStatus()); + ASSERT_OK(doApplyOps(nss.dbName(), + {BSON("ts" << startDeleteTime.addTicks(num).asTimestamp() << "t" << 0LL + << "v" << 2 << "op" + << "d" + << "ns" << nss.ns_forTest() << "ui" + << autoColl.getCollection()->uuid() << "wall" << Date_t() + << "o" << BSON("_id" << num))}) + .getStatus()); } for (std::int32_t num = 0; num <= docsToInsert; ++num) { @@ -1018,7 +1116,7 @@ TEST_F(StorageTimestampTest, SecondaryUpdateTimes) { {BSON("ts" << firstUpdateTime.addTicks(idx).asTimestamp() << "t" << 0LL << "v" << 2 << "op" << "u" - << "ns" << nss.ns() << "ui" + << "ns" << nss.ns_forTest() << "ui" << autoColl.getCollection()->uuid() << "wall" << Date_t() << "o2" << BSON("_id" << 0) << "o" << updates[idx].first)}) .getStatus()); @@ -1052,16 +1150,16 @@ TEST_F(StorageTimestampTest, SecondaryInsertToUpsert) { // on the same collection with `{_id: 0}`. It's expected for this second insert to be // turned into an upsert. The goal document does not contain `field: 0`. BSONObjBuilder resultBuilder; - auto result = unittest::assertGet( - doApplyOps(nss.dbName(), - {BSON("ts" << insertTime.asTimestamp() << "t" << 1LL << "op" - << "i" - << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid() - << "wall" << Date_t() << "o" << BSON("_id" << 0 << "field" << 0)), - BSON("ts" << insertTime.addTicks(1).asTimestamp() << "t" << 1LL << "op" - << "i" - << "ns" << nss.ns() << "ui" << autoColl.getCollection()->uuid() - << "wall" << Date_t() << "o" << BSON("_id" << 0))})); + auto result = unittest::assertGet(doApplyOps( + nss.dbName(), + {BSON("ts" << insertTime.asTimestamp() << "t" << 1LL << "op" + << "i" + << "ns" << nss.ns_forTest() << "ui" << autoColl.getCollection()->uuid() << "wall" + << Date_t() << "o" << BSON("_id" << 0 << "field" << 0)), + BSON("ts" << insertTime.addTicks(1).asTimestamp() << "t" << 1LL << "op" + << "i" + << "ns" << nss.ns_forTest() << "ui" << autoColl.getCollection()->uuid() << "wall" + << Date_t() << "o" << BSON("_id" << 0))})); ASSERT_EQ(2, result.getIntField("applied")); ASSERT(result["results"].Array()[0].Bool()); @@ -1102,8 +1200,8 @@ TEST_F(StorageTimestampTest, SecondaryCreateCollection) { { BSON("ts" << _presentTs << "t" << 1LL << "op" << "c" - << "ui" << UUID::gen() << "ns" << nss.getCommandNS().ns() << "wall" - << Date_t() << "o" << BSON("create" << nss.coll())), + << "ui" << UUID::gen() << "ns" << nss.getCommandNS().ns_forTest() + << "wall" << Date_t() << "o" << BSON("create" << nss.coll())), }); ASSERT_OK(swResult); @@ -1135,15 +1233,15 @@ TEST_F(StorageTimestampTest, SecondaryCreateTwoCollections) { BSONObjBuilder resultBuilder; auto swResult = - doApplyOps(DatabaseName(dbName), + doApplyOps(DatabaseName::createDatabaseName_forTest(boost::none, dbName), { BSON("ts" << _presentTs << "t" << 1LL << "op" << "c" - << "ui" << UUID::gen() << "ns" << nss1.getCommandNS().ns() + << "ui" << UUID::gen() << "ns" << nss1.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" << BSON("create" << nss1.coll())), BSON("ts" << _futureTs << "t" << 1LL << "op" << "c" - << "ui" << UUID::gen() << "ns" << nss2.getCommandNS().ns() + << "ui" << UUID::gen() << "ns" << nss2.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" << BSON("create" << nss2.coll())), }); ASSERT_OK(swResult); @@ -1194,19 +1292,19 @@ TEST_F(StorageTimestampTest, SecondaryCreateCollectionBetweenInserts) { BSONObjBuilder resultBuilder; auto swResult = doApplyOps( - DatabaseName(dbName), + DatabaseName::createDatabaseName_forTest(boost::none, dbName), { BSON("ts" << _presentTs << "t" << 1LL << "op" << "i" - << "ns" << nss1.ns() << "ui" << autoColl.getCollection()->uuid() << "wall" - << Date_t() << "o" << doc1), + << "ns" << nss1.ns_forTest() << "ui" << autoColl.getCollection()->uuid() + << "wall" << Date_t() << "o" << doc1), BSON("ts" << _futureTs << "t" << 1LL << "op" << "c" - << "ui" << uuid2 << "ns" << nss2.getCommandNS().ns() << "wall" << Date_t() - << "o" << BSON("create" << nss2.coll())), + << "ui" << uuid2 << "ns" << nss2.getCommandNS().ns_forTest() << "wall" + << Date_t() << "o" << BSON("create" << nss2.coll())), BSON("ts" << insert2Ts << "t" << 1LL << "op" << "i" - << "ns" << nss2.ns() << "ui" << uuid2 << "wall" << Date_t() << "o" + << "ns" << nss2.ns_forTest() << "ui" << uuid2 << "wall" << Date_t() << "o" << doc2), }); ASSERT_OK(swResult); @@ -1256,8 +1354,8 @@ TEST_F(StorageTimestampTest, PrimaryCreateCollectionInApplyOps) { { BSON("ts" << _presentTs << "t" << 1LL << "op" << "c" - << "ui" << UUID::gen() << "ns" << nss.getCommandNS().ns() << "wall" - << Date_t() << "o" << BSON("create" << nss.coll())), + << "ui" << UUID::gen() << "ns" << nss.getCommandNS().ns_forTest() + << "wall" << Date_t() << "o" << BSON("create" << nss.coll())), }); ASSERT_OK(swResult); @@ -1270,7 +1368,7 @@ TEST_F(StorageTimestampTest, PrimaryCreateCollectionInApplyOps) { // The next logOp() call will get 'futureTs', which will be the timestamp at which we do // the write. Thus we expect the write to appear at 'futureTs' and not before. ASSERT_EQ(op.getTimestamp(), _futureTs) << op.toBSONForLogging(); - ASSERT_EQ(op.getNss().ns(), nss.getCommandNS().ns()) << op.toBSONForLogging(); + ASSERT_EQ(op.getNss().ns_forTest(), nss.getCommandNS().ns_forTest()) << op.toBSONForLogging(); ASSERT_BSONOBJ_EQ(op.getObject(), BSON("create" << nss.coll())); assertNamespaceInIdents(nss, _pastTs, false); @@ -1294,7 +1392,7 @@ TEST_F(StorageTimestampTest, SecondarySetIndexMultikeyOnInsert) { auto indexName = "a_1"; auto indexSpec = BSON("name" << indexName << "key" << BSON("a" << 1) << "v" << static_cast(kIndexVersion)); - ASSERT_OK(createIndexFromSpec(_opCtx, _clock, nss.ns(), indexSpec)); + ASSERT_OK(createIndexFromSpec(_opCtx, _clock, nss.ns_forTest(), indexSpec)); _coordinatorMock->alwaysAllowWrites(false); @@ -1306,18 +1404,18 @@ TEST_F(StorageTimestampTest, SecondarySetIndexMultikeyOnInsert) { BSONObj doc0 = BSON("_id" << 0 << "a" << 3); BSONObj doc1 = BSON("_id" << 1 << "a" << BSON_ARRAY(1 << 2)); BSONObj doc2 = BSON("_id" << 2 << "a" << BSON_ARRAY(1 << 2)); - auto op0 = repl::OplogEntry( - BSON("ts" << insertTime0.asTimestamp() << "t" << 1LL << "v" << 2 << "op" - << "i" - << "ns" << nss.ns() << "ui" << uuid << "wall" << Date_t() << "o" << doc0)); - auto op1 = repl::OplogEntry( - BSON("ts" << insertTime1.asTimestamp() << "t" << 1LL << "v" << 2 << "op" - << "i" - << "ns" << nss.ns() << "ui" << uuid << "wall" << Date_t() << "o" << doc1)); - auto op2 = repl::OplogEntry( - BSON("ts" << insertTime2.asTimestamp() << "t" << 1LL << "v" << 2 << "op" - << "i" - << "ns" << nss.ns() << "ui" << uuid << "wall" << Date_t() << "o" << doc2)); + auto op0 = repl::OplogEntry(BSON( + "ts" << insertTime0.asTimestamp() << "t" << 1LL << "v" << 2 << "op" + << "i" + << "ns" << nss.ns_forTest() << "ui" << uuid << "wall" << Date_t() << "o" << doc0)); + auto op1 = repl::OplogEntry(BSON( + "ts" << insertTime1.asTimestamp() << "t" << 1LL << "v" << 2 << "op" + << "i" + << "ns" << nss.ns_forTest() << "ui" << uuid << "wall" << Date_t() << "o" << doc1)); + auto op2 = repl::OplogEntry(BSON( + "ts" << insertTime2.asTimestamp() << "t" << 1LL << "v" << 2 << "op" + << "i" + << "ns" << nss.ns_forTest() << "ui" << uuid << "wall" << Date_t() << "o" << doc2)); std::vector ops = {op0, op1, op2}; DoNothingOplogApplierObserver observer; @@ -1367,7 +1465,7 @@ TEST_F(StorageTimestampTest, SecondarySetWildcardIndexMultikeyOnInsert) { auto indexName = "a_1"; auto indexSpec = BSON("name" << indexName << "key" << BSON("$**" << 1) << "v" << static_cast(kIndexVersion)); - ASSERT_OK(createIndexFromSpec(_opCtx, _clock, nss.ns(), indexSpec)); + ASSERT_OK(createIndexFromSpec(_opCtx, _clock, nss.ns_forTest(), indexSpec)); _coordinatorMock->alwaysAllowWrites(false); @@ -1378,18 +1476,18 @@ TEST_F(StorageTimestampTest, SecondarySetWildcardIndexMultikeyOnInsert) { BSONObj doc0 = BSON("_id" << 0 << "a" << 3); BSONObj doc1 = BSON("_id" << 1 << "a" << BSON_ARRAY(1 << 2)); BSONObj doc2 = BSON("_id" << 2 << "a" << BSON_ARRAY(1 << 2)); - auto op0 = repl::OplogEntry( - BSON("ts" << insertTime0.asTimestamp() << "t" << 1LL << "v" << 2 << "op" - << "i" - << "ns" << nss.ns() << "ui" << uuid << "wall" << Date_t() << "o" << doc0)); - auto op1 = repl::OplogEntry( - BSON("ts" << insertTime1.asTimestamp() << "t" << 1LL << "v" << 2 << "op" - << "i" - << "ns" << nss.ns() << "ui" << uuid << "wall" << Date_t() << "o" << doc1)); - auto op2 = repl::OplogEntry( - BSON("ts" << insertTime2.asTimestamp() << "t" << 1LL << "v" << 2 << "op" - << "i" - << "ns" << nss.ns() << "ui" << uuid << "wall" << Date_t() << "o" << doc2)); + auto op0 = repl::OplogEntry(BSON( + "ts" << insertTime0.asTimestamp() << "t" << 1LL << "v" << 2 << "op" + << "i" + << "ns" << nss.ns_forTest() << "ui" << uuid << "wall" << Date_t() << "o" << doc0)); + auto op1 = repl::OplogEntry(BSON( + "ts" << insertTime1.asTimestamp() << "t" << 1LL << "v" << 2 << "op" + << "i" + << "ns" << nss.ns_forTest() << "ui" << uuid << "wall" << Date_t() << "o" << doc1)); + auto op2 = repl::OplogEntry(BSON( + "ts" << insertTime2.asTimestamp() << "t" << 1LL << "v" << 2 << "op" + << "i" + << "ns" << nss.ns_forTest() << "ui" << uuid << "wall" << Date_t() << "o" << doc2)); // Coerce oplog application to apply op2 before op1. This does not guarantee the actual // order of application however, because the oplog applier applies these operations in @@ -1416,18 +1514,14 @@ TEST_F(StorageTimestampTest, SecondarySetWildcardIndexMultikeyOnInsert) { AutoGetCollectionForRead autoColl(_opCtx, nss); auto wildcardIndexDescriptor = autoColl.getCollection()->getIndexCatalog()->findIndexByName(_opCtx, indexName); - const IndexAccessMethod* wildcardIndexAccessMethod = autoColl.getCollection() - ->getIndexCatalog() - ->getEntry(wildcardIndexDescriptor) - ->accessMethod(); + const IndexCatalogEntry* entry = + autoColl.getCollection()->getIndexCatalog()->getEntry(wildcardIndexDescriptor); { // Verify that, even though op2 was applied first, the multikey state is observed in all // WiredTiger transactions that can contain the data written by op1. OneOffRead oor(_opCtx, insertTime1.asTimestamp()); - const WildcardAccessMethod* wam = - dynamic_cast(wildcardIndexAccessMethod); MultikeyMetadataAccessStats stats; - std::set paths = getWildcardMultikeyPathSet(wam, _opCtx, &stats); + std::set paths = getWildcardMultikeyPathSet(_opCtx, entry, &stats); ASSERT_EQUALS(1, paths.size()); ASSERT_EQUALS("a", paths.begin()->dottedField()); } @@ -1440,10 +1534,8 @@ TEST_F(StorageTimestampTest, SecondarySetWildcardIndexMultikeyOnInsert) { // we were to construct a query plan that incorrectly believes a path is NOT multikey, // it could produce incorrect results. OneOffRead oor(_opCtx, insertTime0.asTimestamp()); - const WildcardAccessMethod* wam = - dynamic_cast(wildcardIndexAccessMethod); MultikeyMetadataAccessStats stats; - std::set paths = getWildcardMultikeyPathSet(wam, _opCtx, &stats); + std::set paths = getWildcardMultikeyPathSet(_opCtx, entry, &stats); ASSERT_EQUALS(1, paths.size()); ASSERT_EQUALS("a", paths.begin()->dottedField()); } @@ -1464,7 +1556,7 @@ TEST_F(StorageTimestampTest, SecondarySetWildcardIndexMultikeyOnUpdate) { auto indexName = "a_1"; auto indexSpec = BSON("name" << indexName << "key" << BSON("$**" << 1) << "v" << static_cast(kIndexVersion)); - ASSERT_OK(createIndexFromSpec(_opCtx, _clock, nss.ns(), indexSpec)); + ASSERT_OK(createIndexFromSpec(_opCtx, _clock, nss.ns_forTest(), indexSpec)); _coordinatorMock->alwaysAllowWrites(false); @@ -1475,20 +1567,20 @@ TEST_F(StorageTimestampTest, SecondarySetWildcardIndexMultikeyOnUpdate) { BSONObj doc0 = fromjson("{_id: 0, a: 3}"); BSONObj doc1 = fromjson("{$v: 2, diff: {u: {a: [1,2]}}}"); BSONObj doc2 = fromjson("{$v: 2, diff: {u: {a: [1,2]}}}"); - auto op0 = repl::OplogEntry( - BSON("ts" << insertTime0.asTimestamp() << "t" << 1LL << "v" << 2 << "op" - << "i" - << "ns" << nss.ns() << "ui" << uuid << "wall" << Date_t() << "o" << doc0)); + auto op0 = repl::OplogEntry(BSON( + "ts" << insertTime0.asTimestamp() << "t" << 1LL << "v" << 2 << "op" + << "i" + << "ns" << nss.ns_forTest() << "ui" << uuid << "wall" << Date_t() << "o" << doc0)); auto op1 = repl::OplogEntry(BSON("ts" << updateTime1.asTimestamp() << "t" << 1LL << "v" << 2 << "op" << "u" - << "ns" << nss.ns() << "ui" << uuid << "wall" << Date_t() << "o" - << doc1 << "o2" << BSON("_id" << 0))); + << "ns" << nss.ns_forTest() << "ui" << uuid << "wall" << Date_t() + << "o" << doc1 << "o2" << BSON("_id" << 0))); auto op2 = repl::OplogEntry(BSON("ts" << updateTime2.asTimestamp() << "t" << 1LL << "v" << 2 << "op" << "u" - << "ns" << nss.ns() << "ui" << uuid << "wall" << Date_t() << "o" - << doc2 << "o2" << BSON("_id" << 0))); + << "ns" << nss.ns_forTest() << "ui" << uuid << "wall" << Date_t() + << "o" << doc2 << "o2" << BSON("_id" << 0))); // Coerce oplog application to apply op2 before op1. This does not guarantee the actual // order of application however, because the oplog applier applies these operations in @@ -1515,18 +1607,14 @@ TEST_F(StorageTimestampTest, SecondarySetWildcardIndexMultikeyOnUpdate) { AutoGetCollectionForRead autoColl(_opCtx, nss); auto wildcardIndexDescriptor = autoColl.getCollection()->getIndexCatalog()->findIndexByName(_opCtx, indexName); - const IndexAccessMethod* wildcardIndexAccessMethod = autoColl.getCollection() - ->getIndexCatalog() - ->getEntry(wildcardIndexDescriptor) - ->accessMethod(); + const IndexCatalogEntry* entry = + autoColl.getCollection()->getIndexCatalog()->getEntry(wildcardIndexDescriptor); { // Verify that, even though op2 was applied first, the multikey state is observed in all // WiredTiger transactions that can contain the data written by op1. OneOffRead oor(_opCtx, updateTime1.asTimestamp()); - const WildcardAccessMethod* wam = - dynamic_cast(wildcardIndexAccessMethod); MultikeyMetadataAccessStats stats; - std::set paths = getWildcardMultikeyPathSet(wam, _opCtx, &stats); + std::set paths = getWildcardMultikeyPathSet(_opCtx, entry, &stats); ASSERT_EQUALS(1, paths.size()); ASSERT_EQUALS("a", paths.begin()->dottedField()); } @@ -1539,10 +1627,8 @@ TEST_F(StorageTimestampTest, SecondarySetWildcardIndexMultikeyOnUpdate) { // we were to construct a query plan that incorrectly believes a path is NOT multikey, // it could produce incorrect results. OneOffRead oor(_opCtx, insertTime0.asTimestamp()); - const WildcardAccessMethod* wam = - dynamic_cast(wildcardIndexAccessMethod); MultikeyMetadataAccessStats stats; - std::set paths = getWildcardMultikeyPathSet(wam, _opCtx, &stats); + std::set paths = getWildcardMultikeyPathSet(_opCtx, entry, &stats); ASSERT_EQUALS(1, paths.size()); ASSERT_EQUALS("a", paths.begin()->dottedField()); } @@ -1556,7 +1642,7 @@ TEST_F(StorageTimestampTest, PrimarySetIndexMultikeyOnInsert) { auto indexName = "a_1"; auto indexSpec = BSON("name" << indexName << "key" << BSON("a" << 1) << "v" << static_cast(kIndexVersion)); - ASSERT_OK(createIndexFromSpec(_opCtx, _clock, nss.ns(), indexSpec)); + ASSERT_OK(createIndexFromSpec(_opCtx, _clock, nss.ns_forTest(), indexSpec)); const LogicalTime pastTime = _clock->tickClusterTime(1); const LogicalTime insertTime = pastTime.addTicks(1); @@ -1583,7 +1669,7 @@ TEST_F(StorageTimestampTest, PrimarySetIndexMultikeyOnInsertUnreplicated) { auto indexName = "a_1"; auto indexSpec = BSON("name" << indexName << "key" << BSON("a" << 1) << "v" << static_cast(kIndexVersion)); - ASSERT_OK(createIndexFromSpec(_opCtx, _clock, nss.ns(), indexSpec)); + ASSERT_OK(createIndexFromSpec(_opCtx, _clock, nss.ns_forTest(), indexSpec)); const LogicalTime pastTime = _clock->tickClusterTime(1); const LogicalTime insertTime = pastTime.addTicks(1); @@ -1612,11 +1698,11 @@ TEST_F(StorageTimestampTest, PrimarySetsMultikeyInsideMultiDocumentTransaction) create(nss); auto indexName = "a_1"; - auto indexSpec = BSON("name" << indexName << "ns" << nss.ns() << "key" << BSON("a" << 1) << "v" - << static_cast(kIndexVersion)); + auto indexSpec = BSON("name" << indexName << "ns" << nss.ns_forTest() << "key" << BSON("a" << 1) + << "v" << static_cast(kIndexVersion)); auto doc = BSON("_id" << 1 << "a" << BSON_ARRAY(1 << 2)); - ASSERT_OK(createIndexFromSpec(_opCtx, _clock, nss.ns(), indexSpec)); + ASSERT_OK(createIndexFromSpec(_opCtx, _clock, nss.ns_forTest(), indexSpec)); const auto currentTime = _clock->getTime(); const auto presentTs = currentTime.clusterTime().asTimestamp(); @@ -1787,7 +1873,8 @@ class KVDropDatabase : public StorageTimestampTest { // no leftover collections carry-over. const NamespaceString nss = NamespaceString::createNamespaceString_forTest("unittestsDropDB.kvDropDatabase"); - const NamespaceString sysProfile("unittestsDropDB.system.profile"); + const NamespaceString sysProfile = + NamespaceString::createNamespaceString_forTest("unittestsDropDB.system.profile"); std::string collIdent; std::string indexIdent; @@ -2201,7 +2288,8 @@ TEST_F(StorageTimestampTest, TimestampMultiIndexBuildsDuringRename) { AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X); - NamespaceString renamedNss("unittestsRename.timestampMultiIndexBuildsDuringRename"); + NamespaceString renamedNss = NamespaceString::createNamespaceString_forTest( + "unittestsRename.timestampMultiIndexBuildsDuringRename"); create(renamedNss); // Save the pre-state idents so we can capture the specific ident related to index @@ -2210,10 +2298,11 @@ TEST_F(StorageTimestampTest, TimestampMultiIndexBuildsDuringRename) { // Rename collection. BSONObj renameResult; - ASSERT(client.runCommand( - DatabaseName(boost::none, "admin"), - BSON("renameCollection" << nss.ns() << "to" << renamedNss.ns() << "dropTarget" << true), - renameResult)) + ASSERT(client.runCommand(DatabaseName::kAdmin, + BSON("renameCollection" << nss.ns_forTest() << "to" + << renamedNss.ns_forTest() << "dropTarget" + << true), + renameResult)) << renameResult; NamespaceString tmpName; @@ -2221,12 +2310,12 @@ TEST_F(StorageTimestampTest, TimestampMultiIndexBuildsDuringRename) { // Empty temporary collections generate createIndexes oplog entry even if the node // supports 2 phase index build. const auto createIndexesDocument = - queryOplog(BSON("ns" << renamedNss.db() + ".$cmd" + queryOplog(BSON("ns" << renamedNss.db_forTest() + ".$cmd" << "o.createIndexes" << BSON("$exists" << true) << "o.name" << "b_1")); const auto tmpCollName = createIndexesDocument.getObjectField("o").getStringField("createIndexes"); - tmpName = NamespaceString::createNamespaceString_forTest(renamedNss.db(), tmpCollName); + tmpName = NamespaceString::createNamespaceString_forTest(renamedNss.db_forTest(), tmpCollName); indexCommitTs = createIndexesDocument["ts"].timestamp(); const Timestamp indexCreateInitTs = queryOplog(BSON("op" << "c" @@ -2330,12 +2419,12 @@ TEST_F(StorageTimestampTest, TimestampAbortIndexBuild) { // Confirm that startIndexBuild and abortIndexBuild oplog entries have been written to the // oplog. auto indexStartDocument = - queryOplog(BSON("ns" << nss.db() + ".$cmd" + queryOplog(BSON("ns" << nss.db_forTest() + ".$cmd" << "o.startIndexBuild" << nss.coll() << "o.indexes.0.name" << "a_1")); auto indexStartTs = indexStartDocument["ts"].timestamp(); auto indexAbortDocument = - queryOplog(BSON("ns" << nss.db() + ".$cmd" + queryOplog(BSON("ns" << nss.db_forTest() + ".$cmd" << "o.abortIndexBuild" << nss.coll() << "o.indexes.0.name" << "a_1")); auto indexAbortTs = indexAbortDocument["ts"].timestamp(); @@ -2599,8 +2688,12 @@ TEST_F(StorageTimestampTest, IndexBuildsResolveErrorsDuringStateChangeToPrimary) NamespaceString::createNamespaceString_forTest("unittests.timestampIndexBuilds"); create(nss); - AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X); - CollectionWriter collection(_opCtx, autoColl); + auto collectionAcquisition = acquireCollection( + _opCtx, + CollectionAcquisitionRequest::fromOpCtx(_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_X); + + CollectionWriter collection(_opCtx, &collectionAcquisition); // Indexing of parallel arrays is not allowed, so these are deemed "bad". const auto badDoc1 = BSON("_id" << 0 << "a" << BSON_ARRAY(0 << 1) << "b" << BSON_ARRAY(0 << 1)); @@ -2651,7 +2744,8 @@ TEST_F(StorageTimestampTest, IndexBuildsResolveErrorsDuringStateChangeToPrimary) collection, {BSON("v" << 2 << "name" << "a_1_b_1" - << "ns" << collection->ns().ns() << "key" << BSON("a" << 1 << "b" << 1))}, + << "ns" << collection->ns().ns_forTest() << "key" + << BSON("a" << 1 << "b" << 1))}, MultiIndexBlock::makeTimestampedIndexOnInitFn(_opCtx, collection.get())); ASSERT_OK(swSpecs.getStatus()); } @@ -2708,12 +2802,15 @@ TEST_F(StorageTimestampTest, IndexBuildsResolveErrorsDuringStateChangeToPrimary) // Update one documents to be valid, and delete the other. These modifications are written // to the side writes table and must be drained. - Helpers::upsert(_opCtx, collection->ns(), BSON("_id" << 0 << "a" << 1 << "b" << 1)); + Helpers::upsert(_opCtx, collectionAcquisition, BSON("_id" << 0 << "a" << 1 << "b" << 1)); { RecordId badRecord = Helpers::findOne(_opCtx, collection.get(), BSON("_id" << 1)); WriteUnitOfWork wuow(_opCtx); - collection_internal::deleteDocument( - _opCtx, *autoColl, kUninitializedStmtId, badRecord, nullptr); + collection_internal::deleteDocument(_opCtx, + collectionAcquisition.getCollectionPtr(), + kUninitializedStmtId, + badRecord, + nullptr); wuow.commit(); } @@ -2786,7 +2883,7 @@ TEST_F(StorageTimestampTest, SecondaryReadsDuringBatchApplicationAreAllowed) { BSONObj doc0 = BSON("_id" << 0 << "a" << 0); auto insertOp = repl::OplogEntry(BSON("ts" << _futureTs << "t" << 1LL << "v" << 2 << "op" << "i" - << "ns" << ns.ns() << "ui" << uuid << "wall" + << "ns" << ns.ns_forTest() << "ui" << uuid << "wall" << Date_t() << "o" << doc0)); DoNothingOplogApplierObserver observer; // Apply the operation. @@ -2952,13 +3049,14 @@ TEST_F(StorageTimestampTest, ViewCreationSeparateTransaction) { auto storageEngine = _opCtx->getServiceContext()->getStorageEngine(); auto durableCatalog = storageEngine->getCatalog(); - const NamespaceString backingCollNss("unittests.backingColl"); + const NamespaceString backingCollNss = + NamespaceString::createNamespaceString_forTest("unittests.backingColl"); create(backingCollNss); const NamespaceString viewNss = NamespaceString::createNamespaceString_forTest("unittests.view"); - const NamespaceString systemViewsNss = - NamespaceString::makeSystemDotViewsNamespace({boost::none, "unittests"}); + const NamespaceString systemViewsNss = NamespaceString::makeSystemDotViewsNamespace( + DatabaseName::createDatabaseName_forTest(boost::none, "unittests")); ASSERT_OK(createCollection(_opCtx, viewNss.dbName(), @@ -2967,14 +3065,15 @@ TEST_F(StorageTimestampTest, ViewCreationSeparateTransaction) { const Timestamp systemViewsCreateTs = queryOplog(BSON("op" << "c" - << "ns" << (viewNss.db() + ".$cmd") + << "ns" + << (viewNss.db_forTest() + ".$cmd") << "o.create" << "system.views"))["ts"] .timestamp(); const Timestamp viewCreateTs = queryOplog(BSON("op" << "i" - << "ns" << systemViewsNss.ns() << "o._id" - << viewNss.ns()))["ts"] + << "ns" << systemViewsNss.ns_forTest() << "o._id" + << viewNss.ns_forTest()))["ts"] .timestamp(); { @@ -2985,7 +3084,7 @@ TEST_F(StorageTimestampTest, ViewCreationSeparateTransaction) { auto systemViewsMd = getMetaDataAtTime( durableCatalog, catalogId, Timestamp(systemViewsCreateTs.asULL() - 1)); ASSERT(systemViewsMd == nullptr) - << systemViewsNss + << systemViewsNss.toStringForErrorMsg() << " incorrectly exists before creation. CreateTs: " << systemViewsCreateTs; systemViewsMd = getMetaDataAtTime(durableCatalog, catalogId, systemViewsCreateTs); @@ -2995,8 +3094,9 @@ TEST_F(StorageTimestampTest, ViewCreationSeparateTransaction) { assertDocumentAtTimestamp(autoColl.getCollection(), systemViewsCreateTs, BSONObj()); assertDocumentAtTimestamp(autoColl.getCollection(), viewCreateTs, - BSON("_id" << viewNss.ns() << "viewOn" << backingCollNss.coll() - << "pipeline" << BSONArray())); + BSON("_id" << viewNss.ns_forTest() << "viewOn" + << backingCollNss.coll() << "pipeline" + << BSONArray())); } } @@ -3016,7 +3116,7 @@ TEST_F(StorageTimestampTest, CreateCollectionWithSystemIndex) { BSONObj result = queryOplog(BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() << "o.create" + << "ns" << nss.getCommandNS().ns_forTest() << "o.create" << nss.coll())); repl::OplogEntry op(result); // The logOp() call for createCollection should have timestamp 'futureTs', which will also @@ -3033,7 +3133,7 @@ TEST_F(StorageTimestampTest, CreateCollectionWithSystemIndex) { indexStartTs = op.getTimestamp(); indexCreateTs = repl::OplogEntry(queryOplog(BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() + << "ns" << nss.getCommandNS().ns_forTest() << "o.createIndexes" << nss.coll() << "o.name" << "user_1_db_1"))) .getTimestamp(); @@ -3153,7 +3253,9 @@ class RetryableFindAndModifyTest : public StorageTimestampTest { const StringData dbName = "unittest"_sd; const BSONObj oldObj = BSON("_id" << 0 << "a" << 1); - RetryableFindAndModifyTest() : nss(dbName, "retryableFindAndModifyTest") { + RetryableFindAndModifyTest() + : nss(NamespaceString::createNamespaceString_forTest(dbName, + "retryableFindAndModifyTest")) { auto service = _opCtx->getServiceContext(); auto sessionCatalog = SessionCatalog::get(service); sessionCatalog->reset_forTest(); @@ -3217,9 +3319,6 @@ class RetryableFindAndModifyTest : public StorageTimestampTest { }; TEST_F(RetryableFindAndModifyTest, RetryableFindAndModifyUpdate) { - RAIIServerParameterControllerForTest ffRaii("featureFlagRetryableFindAndModify", true); - RAIIServerParameterControllerForTest storeImageInSideCollection( - "storeFindAndModifyImagesInSideCollection", true); AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X); CollectionWriter collection(_opCtx, autoColl); const auto criteria = BSON("_id" << 0); @@ -3244,7 +3343,8 @@ TEST_F(RetryableFindAndModifyTest, RetryableFindAndModifyUpdate) { Snapshotted(_opCtx->recoveryUnit()->getSnapshotId(), oldObj), newObj, collection_internal::kUpdateNoIndexes, - nullptr, + nullptr /* indexesAffected */, + nullptr /* opDebug */, &args); wuow.commit(); } @@ -3264,9 +3364,6 @@ TEST_F(RetryableFindAndModifyTest, RetryableFindAndModifyUpdate) { TEST_F(RetryableFindAndModifyTest, RetryableFindAndModifyUpdateWithDamages) { namespace mmb = mongo::mutablebson; - RAIIServerParameterControllerForTest ffRaii("featureFlagRetryableFindAndModify", true); - RAIIServerParameterControllerForTest storeImageInSideCollection( - "storeFindAndModifyImagesInSideCollection", true); const auto bsonObj = BSON("_id" << 0 << "a" << 1); // Create a new document representing BSONObj with the above contents. mmb::Document doc(bsonObj, mmb::Document::kInPlaceEnabled); @@ -3304,7 +3401,8 @@ TEST_F(RetryableFindAndModifyTest, RetryableFindAndModifyUpdateWithDamages) { source, damages, collection_internal::kUpdateNoIndexes, - nullptr, + nullptr /* indexesAffected */, + nullptr /* opDebug */, &args); wuow.commit(); ASSERT_OK(statusWith.getStatus()); @@ -3324,9 +3422,6 @@ TEST_F(RetryableFindAndModifyTest, RetryableFindAndModifyUpdateWithDamages) { } TEST_F(RetryableFindAndModifyTest, RetryableFindAndModifyDelete) { - RAIIServerParameterControllerForTest ffRaii("featureFlagRetryableFindAndModify", true); - RAIIServerParameterControllerForTest storeImageInSideCollection( - "storeFindAndModifyImagesInSideCollection", true); AutoGetCollection autoColl(_opCtx, nss, LockMode::MODE_X); CollectionWriter collection(_opCtx, autoColl); const auto bsonObj = BSON("_id" << 0 << "a" << 1); @@ -3370,7 +3465,9 @@ class MultiDocumentTransactionTest : public StorageTimestampTest { const BSONObj doc = BSON("_id" << 1 << "TestValue" << 1); const BSONObj docKey = BSON("_id" << 1); - MultiDocumentTransactionTest() : nss(dbName, "multiDocumentTransactionTest") { + MultiDocumentTransactionTest() + : nss(NamespaceString::createNamespaceString_forTest(dbName, + "multiDocumentTransactionTest")) { auto service = _opCtx->getServiceContext(); auto sessionCatalog = SessionCatalog::get(service); sessionCatalog->reset_forTest(); @@ -3549,13 +3646,14 @@ TEST_F(MultiDocumentTransactionTest, MultiOplogEntryTransaction) { assertFilteredDocumentAtTimestamp(coll, query2, _nullTs, doc2); // Implicit commit oplog entry should exist at commitEntryTs. - const auto commitFilter = BSON( - "ts" << commitEntryTs << "o" - << BSON("applyOps" << BSON_ARRAY(BSON("op" - << "i" - << "ns" << nss.ns() << "ui" << coll->uuid() - << "o" << doc2 << "o2" << doc2Key)) - << "count" << 2)); + const auto commitFilter = + BSON("ts" << commitEntryTs << "o" + << BSON("applyOps" + << BSON_ARRAY(BSON("op" + << "i" + << "ns" << nss.ns_forTest() << "ui" << coll->uuid() + << "o" << doc2 << "o2" << doc2Key)) + << "count" << 2)); assertOplogDocumentExistsAtTimestamp(commitFilter, presentTs, false); assertOplogDocumentExistsAtTimestamp(commitFilter, beforeTxnTs, false); assertOplogDocumentExistsAtTimestamp(commitFilter, firstOplogEntryTs, false); @@ -3571,13 +3669,14 @@ TEST_F(MultiDocumentTransactionTest, MultiOplogEntryTransaction) { assertOldestActiveTxnTimestampEquals(boost::none, _nullTs); // first oplog entry should exist at firstOplogEntryTs and after it. - const auto firstOplogEntryFilter = BSON( - "ts" << firstOplogEntryTs << "o" - << BSON("applyOps" << BSON_ARRAY(BSON("op" - << "i" - << "ns" << nss.ns() << "ui" << coll->uuid() - << "o" << doc << "o2" << docKey)) - << "partialTxn" << true)); + const auto firstOplogEntryFilter = + BSON("ts" << firstOplogEntryTs << "o" + << BSON("applyOps" + << BSON_ARRAY(BSON("op" + << "i" + << "ns" << nss.ns_forTest() << "ui" << coll->uuid() + << "o" << doc << "o2" << docKey)) + << "partialTxn" << true)); assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, presentTs, false); assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, beforeTxnTs, false); assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, firstOplogEntryTs, true); @@ -3738,13 +3837,14 @@ TEST_F(MultiDocumentTransactionTest, CommitPreparedMultiOplogEntryTransaction) { assertOplogDocumentExistsAtTimestamp(commitFilter, _nullTs, true); // The first oplog entry should exist at firstOplogEntryTs and onwards. - const auto firstOplogEntryFilter = BSON( - "ts" << firstOplogEntryTs << "o" - << BSON("applyOps" << BSON_ARRAY(BSON("op" - << "i" - << "ns" << nss.ns() << "ui" << coll->uuid() - << "o" << doc << "o2" << docKey)) - << "partialTxn" << true)); + const auto firstOplogEntryFilter = + BSON("ts" << firstOplogEntryTs << "o" + << BSON("applyOps" + << BSON_ARRAY(BSON("op" + << "i" + << "ns" << nss.ns_forTest() << "ui" << coll->uuid() + << "o" << doc << "o2" << docKey)) + << "partialTxn" << true)); assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, presentTs, false); assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, beforeTxnTs, false); assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, firstOplogEntryTs, true); @@ -3752,13 +3852,14 @@ TEST_F(MultiDocumentTransactionTest, CommitPreparedMultiOplogEntryTransaction) { assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, commitEntryTs, true); assertOplogDocumentExistsAtTimestamp(firstOplogEntryFilter, _nullTs, true); // The prepare oplog entry should exist at prepareEntryTs and onwards. - const auto prepareOplogEntryFilter = BSON( - "ts" << prepareEntryTs << "o" - << BSON("applyOps" << BSON_ARRAY(BSON("op" - << "i" - << "ns" << nss.ns() << "ui" << coll->uuid() - << "o" << doc2 << "o2" << doc2Key)) - << "prepare" << true << "count" << 2)); + const auto prepareOplogEntryFilter = + BSON("ts" << prepareEntryTs << "o" + << BSON("applyOps" + << BSON_ARRAY(BSON("op" + << "i" + << "ns" << nss.ns_forTest() << "ui" << coll->uuid() + << "o" << doc2 << "o2" << doc2Key)) + << "prepare" << true << "count" << 2)); assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, presentTs, false); assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, beforeTxnTs, false); assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, firstOplogEntryTs, false); @@ -3866,8 +3967,8 @@ TEST_F(MultiDocumentTransactionTest, AbortPreparedMultiOplogEntryTransaction) { BSON("ts" << prepareEntryTs << "o" << BSON("applyOps" << BSON_ARRAY(BSON("op" << "i" - << "ns" << nss.ns() << "ui" << ui << "o" - << doc << "o2" << docKey)) + << "ns" << nss.ns_forTest() << "ui" + << ui << "o" << doc << "o2" << docKey)) << "prepare" << true)); assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, presentTs, false); assertOplogDocumentExistsAtTimestamp(prepareOplogEntryFilter, beforeTxnTs, false); diff --git a/src/mongo/db/repl/sync_source_feedback.cpp b/src/mongo/db/repl/sync_source_feedback.cpp index 9b74f405eb0aa..cc912f01608a8 100644 --- a/src/mongo/db/repl/sync_source_feedback.cpp +++ b/src/mongo/db/repl/sync_source_feedback.cpp @@ -28,21 +28,30 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/sync_source_feedback.h" - +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/client.h" #include "mongo/db/repl/bgsync.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/reporter.h" +#include "mongo/db/repl/sync_source_feedback.h" #include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/concurrency/idle_thread_block.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/scopeguard.h" -#include "mongo/util/time_support.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -159,6 +168,12 @@ void SyncSourceFeedback::run(executor::TaskExecutor* executor, ReplicationCoordinator* replCoord) { Client::initThread("SyncSourceFeedback"); + // TODO(SERVER-74656): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + HostAndPort syncTarget; // keepAliveInterval indicates how frequently to forward progress in the absence of updates. diff --git a/src/mongo/db/repl/sync_source_feedback.h b/src/mongo/db/repl/sync_source_feedback.h index 0ac723ee6a1fa..5111e1e30e431 100644 --- a/src/mongo/db/repl/sync_source_feedback.h +++ b/src/mongo/db/repl/sync_source_feedback.h @@ -32,6 +32,7 @@ #include "mongo/base/status.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" diff --git a/src/mongo/db/repl/sync_source_resolver.cpp b/src/mongo/db/repl/sync_source_resolver.cpp index 8460ce006d45a..c2c9f67da7754 100644 --- a/src/mongo/db/repl/sync_source_resolver.cpp +++ b/src/mongo/db/repl/sync_source_resolver.cpp @@ -28,20 +28,36 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/sync_source_resolver.h" - +#include +#include +// IWYU pragma: no_include "cxxabi.h" #include - -#include "mongo/db/jsobj.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/sync_source_resolver.h" #include "mongo/db/repl/sync_source_selector.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/util/assert_util.h" #include "mongo/util/destructor_guard.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -174,9 +190,9 @@ std::unique_ptr SyncSourceResolver::_makeFirstOplogEntryFetcher( << BSON(OplogEntryBase::kTimestampFieldName << 1 << OplogEntryBase::kTermFieldName << 1) << ReadConcernArgs::kReadConcernFieldName << ReadConcernArgs::kLocal), - [=](const StatusWith& response, - Fetcher::NextAction*, - BSONObjBuilder*) { + [=, this](const StatusWith& response, + Fetcher::NextAction*, + BSONObjBuilder*) { return _firstOplogEntryFetcherCallback(response, candidate, earliestOpTimeSeen); }, ReadPreferenceSetting::secondaryPreferredMetadata(), @@ -198,9 +214,9 @@ std::unique_ptr SyncSourceResolver::_makeRequiredOpTimeFetcher(HostAndP << BSON("ts" << BSON("$gte" << _requiredOpTime.getTimestamp() << "$lte" << _requiredOpTime.getTimestamp())) << ReadConcernArgs::kReadConcernFieldName << ReadConcernArgs::kLocal), - [=](const StatusWith& response, - Fetcher::NextAction*, - BSONObjBuilder*) { + [=, this](const StatusWith& response, + Fetcher::NextAction*, + BSONObjBuilder*) { return _requiredOpTimeFetcherCallback(response, candidate, earliestOpTimeSeen, rbid); }, ReadPreferenceSetting::secondaryPreferredMetadata(), @@ -401,7 +417,7 @@ Status SyncSourceResolver::_scheduleRBIDRequest(HostAndPort candidate, OpTime ea invariant(_state == State::kRunning); auto handle = _taskExecutor->scheduleRemoteCommand( {candidate, "admin", BSON("replSetGetRBID" << 1), nullptr, kFetcherTimeout}, - [=](const executor::TaskExecutor::RemoteCommandCallbackArgs& rbidReply) { + [=, this](const executor::TaskExecutor::RemoteCommandCallbackArgs& rbidReply) { _rbidRequestCallback(candidate, earliestOpTimeSeen, rbidReply); }); if (!handle.isOK()) { diff --git a/src/mongo/db/repl/sync_source_resolver.h b/src/mongo/db/repl/sync_source_resolver.h index d7d86d4c4f00c..5843f0908e29a 100644 --- a/src/mongo/db/repl/sync_source_resolver.h +++ b/src/mongo/db/repl/sync_source_resolver.h @@ -29,18 +29,24 @@ #pragma once +#include +#include #include #include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/client/fetcher.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_process.h" +#include "mongo/db/repl/sync_source_selector.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/repl/sync_source_resolver_test.cpp b/src/mongo/db/repl/sync_source_resolver_test.cpp index 6905e0d1d9f0c..ac75d6fd922fa 100644 --- a/src/mongo/db/repl/sync_source_resolver_test.cpp +++ b/src/mongo/db/repl/sync_source_resolver_test.cpp @@ -27,22 +27,42 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include #include - +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/baton.h" #include "mongo/db/cursor_id.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/sync_source_resolver.h" -#include "mongo/db/repl/sync_source_selector.h" #include "mongo/db/repl/sync_source_selector_mock.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/task_executor_proxy.h" -#include "mongo/unittest/unittest.h" #include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace { @@ -154,7 +174,7 @@ BSONObj makeCursorResponse(CursorId cursorId, { BSONObjBuilder cursorBob(bob.subobjStart("cursor")); cursorBob.append("id", cursorId); - cursorBob.append("ns", nss.toString()); + cursorBob.append("ns", nss.toString_forTest()); { BSONArrayBuilder batchBob( cursorBob.subarrayStart(isFirstBatch ? "firstBatch" : "nextBatch")); diff --git a/src/mongo/db/repl/sync_source_selector_mock.cpp b/src/mongo/db/repl/sync_source_selector_mock.cpp index 2368064e4e8b8..ff5a29f7787aa 100644 --- a/src/mongo/db/repl/sync_source_selector_mock.cpp +++ b/src/mongo/db/repl/sync_source_selector_mock.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/repl/sync_source_selector_mock.h" namespace mongo { diff --git a/src/mongo/db/repl/sync_source_selector_mock.h b/src/mongo/db/repl/sync_source_selector_mock.h index f8d2a86456b63..2f33176926cdf 100644 --- a/src/mongo/db/repl/sync_source_selector_mock.h +++ b/src/mongo/db/repl/sync_source_selector_mock.h @@ -30,9 +30,13 @@ #pragma once #include +#include #include "mongo/db/repl/optime.h" #include "mongo/db/repl/sync_source_selector.h" +#include "mongo/rpc/metadata/oplog_query_metadata.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/task_executor_mock.cpp b/src/mongo/db/repl/task_executor_mock.cpp index 6909e354633ec..02704a01f8f89 100644 --- a/src/mongo/db/repl/task_executor_mock.cpp +++ b/src/mongo/db/repl/task_executor_mock.cpp @@ -27,9 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/db/repl/task_executor_mock.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/task_executor_mock.h b/src/mongo/db/repl/task_executor_mock.h index a8b101f8f99d2..fbb9b7403eabe 100644 --- a/src/mongo/db/repl/task_executor_mock.h +++ b/src/mongo/db/repl/task_executor_mock.h @@ -29,7 +29,14 @@ #pragma once +#include + +#include "mongo/base/status_with.h" +#include "mongo/db/baton.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor.h" #include "mongo/unittest/task_executor_proxy.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/task_runner.cpp b/src/mongo/db/repl/task_runner.cpp index a109c7cbd647b..0dfb31d2fa554 100644 --- a/src/mongo/db/repl/task_runner.cpp +++ b/src/mongo/db/repl/task_runner.cpp @@ -28,20 +28,23 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/task_runner.h" - -#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/client.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/task_runner.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/util/assert_util.h" -#include "mongo/util/concurrency/thread_name.h" #include "mongo/util/destructor_guard.h" #include "mongo/util/str.h" diff --git a/src/mongo/db/repl/task_runner.h b/src/mongo/db/repl/task_runner.h index 4f7303e3ad6e4..d86c37092d4a6 100644 --- a/src/mongo/db/repl/task_runner.h +++ b/src/mongo/db/repl/task_runner.h @@ -31,7 +31,9 @@ #include #include +#include +#include "mongo/base/status.h" #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" diff --git a/src/mongo/db/repl/task_runner_test.cpp b/src/mongo/db/repl/task_runner_test.cpp index 089a76381f1b7..670b0fac7f482 100644 --- a/src/mongo/db/repl/task_runner_test.cpp +++ b/src/mongo/db/repl/task_runner_test.cpp @@ -27,23 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "cxxabi.h" +#include +#include #include -#include "mongo/db/operation_context_noop.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/task_runner.h" #include "mongo/db/repl/task_runner_test_fixture.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" +namespace mongo::repl { namespace { -using namespace mongo; -using namespace mongo::repl; - using Task = TaskRunner::Task; TEST_F(TaskRunnerTest, InvalidConstruction) { @@ -357,3 +361,4 @@ TEST_F(TaskRunnerTest, DestroyShouldWaitForTasksToComplete) { } } // namespace +} // namespace mongo::repl diff --git a/src/mongo/db/repl/task_runner_test_fixture.cpp b/src/mongo/db/repl/task_runner_test_fixture.cpp index f7d1f85f182bc..ad7d0908eec1b 100644 --- a/src/mongo/db/repl/task_runner_test_fixture.cpp +++ b/src/mongo/db/repl/task_runner_test_fixture.cpp @@ -27,14 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/task_runner_test_fixture.h" - #include #include +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/client.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/task_runner.h" +#include "mongo/db/repl/task_runner_test_fixture.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/tenant_all_database_cloner.cpp b/src/mongo/db/repl/tenant_all_database_cloner.cpp index ca207c074b58b..c789e17fea29d 100644 --- a/src/mongo/db/repl/tenant_all_database_cloner.cpp +++ b/src/mongo/db/repl/tenant_all_database_cloner.cpp @@ -28,18 +28,39 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/dbmessage.h" #include "mongo/db/repl/cloner_utils.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/tenant_all_database_cloner.h" #include "mongo/db/repl/tenant_database_cloner.h" #include "mongo/db/repl/tenant_migration_decoration.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -99,8 +120,7 @@ BaseCloner::AfterStageBehavior TenantAllDatabaseCloner::listDatabasesStage() { BSONObj readResult; BSONObj cmd = ClonerUtils::buildMajorityWaitRequest(_operationTime); - getClient()->runCommand( - DatabaseName(boost::none, "admin"), cmd, readResult, QueryOption_SecondaryOk); + getClient()->runCommand(DatabaseName::kAdmin, cmd, readResult, QueryOption_SecondaryOk); uassertStatusOKWithContext( getStatusFromCommandResult(readResult), "TenantAllDatabaseCloner failed to get listDatabases result majority-committed"); @@ -163,7 +183,8 @@ BaseCloner::AfterStageBehavior TenantAllDatabaseCloner::listExistingDatabasesSta clonedDatabases.emplace_back(dbName); BSONObj res; - client.runCommand(DatabaseName(boost::none, dbName), BSON("dbStats" << 1), res); + client.runCommand( + DatabaseNameUtil::deserialize(boost::none, dbName), BSON("dbStats" << 1), res); if (auto status = getStatusFromCommandResult(res); !status.isOK()) { LOGV2_WARNING(5522900, "Skipping recording of data size metrics for database due to failure " @@ -233,7 +254,8 @@ BaseCloner::AfterStageBehavior TenantAllDatabaseCloner::initializeStatsStage() { long long approxTotalDataSizeLeftOnRemote = 0; for (const auto& dbName : _databases) { BSONObj res; - getClient()->runCommand(DatabaseName(boost::none, dbName), BSON("dbStats" << 1), res); + getClient()->runCommand( + DatabaseNameUtil::deserialize(boost::none, dbName), BSON("dbStats" << 1), res); if (auto status = getStatusFromCommandResult(res); !status.isOK()) { LOGV2_WARNING(5426600, "Skipping recording of data size metrics for database due to failure " diff --git a/src/mongo/db/repl/tenant_all_database_cloner.h b/src/mongo/db/repl/tenant_all_database_cloner.h index cc993cefdfd48..bdbdfeeb9e789 100644 --- a/src/mongo/db/repl/tenant_all_database_cloner.h +++ b/src/mongo/db/repl/tenant_all_database_cloner.h @@ -29,12 +29,25 @@ #pragma once +#include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_connection.h" #include "mongo/db/repl/base_cloner.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/tenant_base_cloner.h" #include "mongo/db/repl/tenant_database_cloner.h" #include "mongo/db/repl/tenant_migration_shared_data.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/tenant_all_database_cloner_test.cpp b/src/mongo/db/repl/tenant_all_database_cloner_test.cpp index 0371229668169..59f6b2276e706 100644 --- a/src/mongo/db/repl/tenant_all_database_cloner_test.cpp +++ b/src/mongo/db/repl/tenant_all_database_cloner_test.cpp @@ -28,18 +28,25 @@ */ -#include "mongo/platform/basic.h" - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/json.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/client.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/storage_interface.h" -#include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/repl/tenant_all_database_cloner.h" #include "mongo/db/repl/tenant_cloner_test_fixture.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/dbtests/mock/mock_dbclient_connection.h" -#include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/clock_source_mock.h" -#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/repl/tenant_base_cloner.cpp b/src/mongo/db/repl/tenant_base_cloner.cpp index f532c31c69ec6..2862372eb7aca 100644 --- a/src/mongo/db/repl/tenant_base_cloner.cpp +++ b/src/mongo/db/repl/tenant_base_cloner.cpp @@ -28,10 +28,7 @@ */ -#include "mongo/platform/basic.h" - #include "mongo/db/repl/tenant_base_cloner.h" -#include "mongo/logv2/log.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration diff --git a/src/mongo/db/repl/tenant_base_cloner.h b/src/mongo/db/repl/tenant_base_cloner.h index 0aff903bbdd57..49f01372b05a8 100644 --- a/src/mongo/db/repl/tenant_base_cloner.h +++ b/src/mongo/db/repl/tenant_base_cloner.h @@ -30,8 +30,14 @@ #pragma once #include "mongo/base/checked_cast.h" +#include "mongo/base/string_data.h" +#include "mongo/client/dbclient_connection.h" #include "mongo/db/repl/base_cloner.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/tenant_migration_shared_data.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/tenant_cloner_test_fixture.cpp b/src/mongo/db/repl/tenant_cloner_test_fixture.cpp index 19103eae5fc56..9afcee42fff0b 100644 --- a/src/mongo/db/repl/tenant_cloner_test_fixture.cpp +++ b/src/mongo/db/repl/tenant_cloner_test_fixture.cpp @@ -27,13 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/base/checked_cast.h" +#include "mongo/client/dbclient_connection.h" +#include "mongo/db/client.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/repl_sync_shared_data.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/repl/tenant_cloner_test_fixture.h" +#include "mongo/unittest/assert.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/tenant_cloner_test_fixture.h b/src/mongo/db/repl/tenant_cloner_test_fixture.h index a8584a6ca9b09..0bb7fb51ba70e 100644 --- a/src/mongo/db/repl/tenant_cloner_test_fixture.h +++ b/src/mongo/db/repl/tenant_cloner_test_fixture.h @@ -29,8 +29,24 @@ #pragma once +#include +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/cloner_test_fixture.h" #include "mongo/db/repl/tenant_migration_shared_data.h" +#include "mongo/db/service_context.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/unittest/log_test.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/repl/tenant_collection_cloner.cpp b/src/mongo/db/repl/tenant_collection_cloner.cpp index ff5899c20efb6..9d414257cb80a 100644 --- a/src/mongo/db/repl/tenant_collection_cloner.cpp +++ b/src/mongo/db/repl/tenant_collection_cloner.cpp @@ -28,25 +28,63 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/client/dbclient_base.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/document_validation.h" -#include "mongo/db/commands/list_collections_filter.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/dbmessage.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/single_write_result_gen.h" #include "mongo/db/ops/write_ops_exec.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/cloner_utils.h" -#include "mongo/db/repl/database_cloner_gen.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/tenant_collection_cloner.h" #include "mongo/db/repl/tenant_migration_decoration.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/metadata.h" #include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -98,13 +136,14 @@ TenantCollectionCloner::TenantCollectionCloner(const NamespaceString& sourceNss, kProgressMeterSecondsBetween, kProgressMeterCheckInterval, "documents copied", - str::stream() << _sourceNss.toString() << " tenant collection clone progress"), + str::stream() << NamespaceStringUtil::serialize(_sourceNss) + << " tenant collection clone progress"), _tenantId(tenantId) { invariant(sourceNss.isValid()); invariant(ClonerUtils::isNamespaceForTenant(sourceNss, tenantId)); invariant(collectionOptions.uuid); _sourceDbAndUuid = NamespaceStringOrUUID(sourceNss.dbName(), *collectionOptions.uuid); - _stats.ns = _sourceNss.ns(); + _stats.ns = _sourceNss.ns().toString(); } BaseCloner::ClonerStages TenantCollectionCloner::getStages() { @@ -237,15 +276,14 @@ BaseCloner::AfterStageBehavior TenantCollectionCloner::listIndexesStage() { } }, [&](const BSONObj& data) { + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "nss"_sd); // Only hang when cloning the specified collection, or if no collection was specified. - auto nss = data["nss"].str(); - return nss.empty() || nss == _sourceNss.toString(); + return fpNss.isEmpty() || fpNss == _sourceNss; }); BSONObj readResult; BSONObj cmd = ClonerUtils::buildMajorityWaitRequest(_operationTime); - getClient()->runCommand( - DatabaseName(boost::none, "admin"), cmd, readResult, QueryOption_SecondaryOk); + getClient()->runCommand(DatabaseName::kAdmin, cmd, readResult, QueryOption_SecondaryOk); uassertStatusOKWithContext( getStatusFromCommandResult(readResult), "TenantCollectionCloner failed to get listIndexes result majority-committed"); @@ -277,7 +315,7 @@ BaseCloner::AfterStageBehavior TenantCollectionCloner::listIndexesStage() { ErrorCodes::IllegalOperation, str::stream() << "Found empty '_id' index spec but the collection is not specified with " "'autoIndexId' as false, tenantId: " - << _tenantId << ", namespace: " << this->_sourceNss, + << _tenantId << ", namespace: " << this->_sourceNss.toStringForErrorMsg(), _collectionOptions.clusteredIndex || !_idIndexSpec.isEmpty() || _collectionOptions.autoIndexId == CollectionOptions::NO); @@ -307,7 +345,8 @@ BaseCloner::AfterStageBehavior TenantCollectionCloner::createCollectionStage() { << " already exists but does not belong to the same database", collection->ns().db() == _sourceNss.db()); uassert(ErrorCodes::NamespaceExists, - str::stream() << "Tenant '" << _tenantId << "': collection '" << collection->ns() + str::stream() << "Tenant '" << _tenantId << "': collection '" + << collection->ns().toStringForErrorMsg() << "' already exists prior to data sync", getSharedData()->getResumePhase() == ResumePhase::kDataSync); @@ -513,9 +552,9 @@ void TenantCollectionCloner::handleNextBatch(DBClientCursor& cursor) { } }, [&](const BSONObj& data) { + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "nss"_sd); // Only hang when cloning the specified collection, or if no collection was specified. - auto nss = data["nss"].str(); - return nss.empty() || nss == _sourceNss.toString(); + return fpNss.isEmpty() || fpNss == _sourceNss; }); } @@ -523,7 +562,7 @@ void TenantCollectionCloner::handleNextBatch(DBClientCursor& cursor) { void TenantCollectionCloner::insertDocuments(std::vector docsToInsert) { invariant(docsToInsert.size(), "Document size can't be non-zero:: namespace: {}, tenantId: {}"_format( - _sourceNss.toString(), _tenantId)); + toStringForLogging(_sourceNss), _tenantId)); { stdx::lock_guard lk(_mutex); @@ -567,8 +606,8 @@ void TenantCollectionCloner::insertDocuments(std::vector docsToInsert) } bool TenantCollectionCloner::isMyFailPoint(const BSONObj& data) const { - auto nss = data["nss"].str(); - return (nss.empty() || nss == _sourceNss.toString()) && BaseCloner::isMyFailPoint(data); + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "nss"_sd); + return (fpNss.isEmpty() || fpNss == _sourceNss) && BaseCloner::isMyFailPoint(data); } TenantCollectionCloner::Stats TenantCollectionCloner::getStats() const { diff --git a/src/mongo/db/repl/tenant_collection_cloner.h b/src/mongo/db/repl/tenant_collection_cloner.h index 57678588766bb..95ada9962aad1 100644 --- a/src/mongo/db/repl/tenant_collection_cloner.h +++ b/src/mongo/db/repl/tenant_collection_cloner.h @@ -29,14 +29,33 @@ #pragma once +#include +#include +#include #include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_connection.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/base_cloner.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/task_runner.h" #include "mongo/db/repl/tenant_base_cloner.h" #include "mongo/db/repl/tenant_migration_shared_data.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/progress_meter.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -84,7 +103,7 @@ class TenantCollectionCloner : public TenantBaseCloner { return _sourceNss; } UUID getSourceUuid() const { - return *_sourceDbAndUuid.uuid(); + return _sourceDbAndUuid.uuid(); } const std::string& getTenantId() const { return _tenantId; diff --git a/src/mongo/db/repl/tenant_collection_cloner_test.cpp b/src/mongo/db/repl/tenant_collection_cloner_test.cpp index b04132dfe0809..de3b495eb5e2e 100644 --- a/src/mongo/db/repl/tenant_collection_cloner_test.cpp +++ b/src/mongo/db/repl/tenant_collection_cloner_test.cpp @@ -27,22 +27,43 @@ * it in the license file. */ +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include #include +#include "mongo/base/status_with.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/json.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/client.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_noop.h" #include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" -#include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/repl/tenant_cloner_test_fixture.h" #include "mongo/db/repl/tenant_collection_cloner.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/dbtests/mock/mock_dbclient_connection.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/db/service_context.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" +#include "mongo/executor/task_executor.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source_mock.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" namespace mongo { namespace repl { @@ -93,7 +114,8 @@ class TenantCollectionClonerTestOpObserver final : public OpObserverNoop { std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) final { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final { if (coll->ns() == nssToCapture) { numDocsInserted += std::distance(begin, end); } @@ -115,7 +137,7 @@ class TenantCollectionClonerTest : public TenantClonerTestFixture { void setUp() override { TenantClonerTestFixture::setUp(); - _mockServer->assignCollectionUuid(_nss.ns(), _collUuid); + _mockServer->assignCollectionUuid(_nss.ns_forTest(), _collUuid); _mockServer->setCommandReply("dbStats", StatusWith(BSON("dataSize" << 1))); _mockServer->setCommandReply("collStats", BSON("size" << 1)); @@ -245,7 +267,8 @@ TEST_F(TenantCollectionClonerTest, ListIndexesReturnedNoIndexesShouldFail) { auto cloner = makeCollectionCloner(); cloner->setStopAfterStage_forTest("listIndexes"); _mockServer->setCommandReply("count", createCountResponse(1)); - _mockServer->setCommandReply("listIndexes", createCursorResponse(_nss.ns(), BSONArray())); + _mockServer->setCommandReply("listIndexes", + createCursorResponse(_nss.ns_forTest(), BSONArray())); _mockServer->setCommandReply("find", createFindResponse()); ASSERT_EQUALS(ErrorCodes::IllegalOperation, cloner->run()); @@ -261,7 +284,7 @@ TEST_F(TenantCollectionClonerTest, ListIndexesHasResults) { _mockServer->setCommandReply( "listIndexes", createCursorResponse( - _nss.ns(), + _nss.ns_forTest(), BSON_ARRAY(_secondaryIndexSpecs[0] << _idIndexSpec << _secondaryIndexSpecs[1]))); _mockServer->setCommandReply("find", createFindResponse()); ASSERT_OK(cloner->run()); @@ -283,7 +306,7 @@ TEST_F(TenantCollectionClonerTest, ListIndexesRemoteUnreachableBeforeMajorityFin auto cloner = makeCollectionCloner(); _mockServer->setCommandReply("count", createCountResponse(1)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); auto clonerOperationTimeFP = globalFailPointRegistry().find("tenantCollectionClonerHangAfterGettingOperationTime"); @@ -307,7 +330,7 @@ TEST_F(TenantCollectionClonerTest, ListIndexesRecordsCorrectOperationTime) { auto cloner = makeCollectionCloner(); _mockServer->setCommandReply("count", createCountResponse(1)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); _mockServer->setCommandReply("find", createFindResponse()); auto clonerOperationTimeFP = @@ -337,12 +360,13 @@ TEST_F(TenantCollectionClonerTest, BeginCollection) { for (const auto& secondaryIndexSpec : _secondaryIndexSpecs) { indexSpecs.append(secondaryIndexSpec); } - _mockServer->setCommandReply("listIndexes", createCursorResponse(_nss.ns(), indexSpecs.arr())); + _mockServer->setCommandReply("listIndexes", + createCursorResponse(_nss.ns_forTest(), indexSpecs.arr())); _mockServer->setCommandReply("find", createFindResponse()); ASSERT_EQUALS(Status::OK(), cloner->run()); - ASSERT_EQUALS(_nss.ns(), _opObserver->nssToCapture.ns()); + ASSERT_EQUALS(_nss.ns_forTest(), _opObserver->nssToCapture.ns_forTest()); ASSERT_TRUE(_opObserver->collCreated); ASSERT_BSONOBJ_EQ(_options.toBSON(), _opObserver->collectionOptions.toBSON()); @@ -358,13 +382,13 @@ TEST_F(TenantCollectionClonerTest, BeginCollectionFailed) { auto createCollectionFp = globalFailPointRegistry().find("hangAndFailAfterCreateCollectionReservesOpTime"); auto initialTimesEntered = - createCollectionFp->setMode(FailPoint::alwaysOn, 0, BSON("nss" << _nss.toString())); + createCollectionFp->setMode(FailPoint::alwaysOn, 0, BSON("nss" << _nss.toString_forTest())); auto cloner = makeCollectionCloner(); cloner->setStopAfterStage_forTest("createCollection"); _mockServer->setCommandReply("count", createCountResponse(1)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); _mockServer->setCommandReply("find", createFindResponse()); // Run the cloner in a separate thread. @@ -380,7 +404,7 @@ TEST_F(TenantCollectionClonerTest, BeginCollectionFailed) { clonerThread.join(); - ASSERT_EQUALS(_nss.ns(), _opObserver->nssToCapture.ns()); + ASSERT_EQUALS(_nss.ns_forTest(), _opObserver->nssToCapture.ns_forTest()); ASSERT_FALSE(_opObserver->collCreated); } @@ -388,7 +412,7 @@ TEST_F(TenantCollectionClonerTest, InsertDocumentsSingleBatch) { // Set up data for preliminary stages _mockServer->setCommandReply("count", createCountResponse(2)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); _mockServer->setCommandReply("find", createFindResponse()); // Set up documents to be returned from upstream node. @@ -398,7 +422,7 @@ TEST_F(TenantCollectionClonerTest, InsertDocumentsSingleBatch) { auto cloner = makeCollectionCloner(); ASSERT_OK(cloner->run()); - ASSERT_EQUALS(_nss.ns(), _opObserver->nssToCapture.ns()); + ASSERT_EQUALS(_nss.ns_forTest(), _opObserver->nssToCapture.ns_forTest()); ASSERT_EQUALS(2, _opObserver->numDocsInserted); auto stats = cloner->getStats(); @@ -414,7 +438,7 @@ TEST_F(TenantCollectionClonerTest, BatchSizeStoredInConstructor) { // Set up data for preliminary stages. _mockServer->setCommandReply("count", createCountResponse(7)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); _mockServer->setCommandReply("find", createFindResponse()); // Set up documents to be returned from upstream node. It should take 3 batches to clone the @@ -430,7 +454,7 @@ TEST_F(TenantCollectionClonerTest, BatchSizeStoredInConstructor) { auto cloner = makeCollectionCloner(); ASSERT_OK(cloner->run()); - ASSERT_EQUALS(_nss.ns(), _opObserver->nssToCapture.ns()); + ASSERT_EQUALS(_nss.ns_forTest(), _opObserver->nssToCapture.ns_forTest()); ASSERT_EQUALS(7, _opObserver->numDocsInserted); auto stats = cloner->getStats(); @@ -442,7 +466,7 @@ TEST_F(TenantCollectionClonerTest, InsertDocumentsMultipleBatches) { // Set up data for preliminary stages _mockServer->setCommandReply("count", createCountResponse(5)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); _mockServer->setCommandReply("find", createFindResponse()); // Set up documents to be returned from upstream node. @@ -456,7 +480,7 @@ TEST_F(TenantCollectionClonerTest, InsertDocumentsMultipleBatches) { cloner->setBatchSize_forTest(2); ASSERT_OK(cloner->run()); - ASSERT_EQUALS(_nss.ns(), _opObserver->nssToCapture.ns()); + ASSERT_EQUALS(_nss.ns_forTest(), _opObserver->nssToCapture.ns_forTest()); ASSERT_EQUALS(5, _opObserver->numDocsInserted); auto stats = cloner->getStats(); @@ -468,7 +492,7 @@ TEST_F(TenantCollectionClonerTest, InsertDocumentsFailed) { // Set up data for preliminary stages _mockServer->setCommandReply("count", createCountResponse(3)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); _mockServer->setCommandReply("find", createFindResponse()); // Set up documents to be returned from upstream node. @@ -479,7 +503,8 @@ TEST_F(TenantCollectionClonerTest, InsertDocumentsFailed) { auto cloner = makeCollectionCloner(); // Enable failpoint to make collection inserts to fail. - FailPointEnableBlock fp("failCollectionInserts", BSON("collectionNS" << _nss.toString())); + FailPointEnableBlock fp("failCollectionInserts", + BSON("collectionNS" << _nss.toString_forTest())); // Run the cloner in a separate thread. stdx::thread clonerThread([&] { @@ -496,7 +521,7 @@ TEST_F(TenantCollectionClonerTest, QueryFailure) { << "_id_"); _mockServer->setCommandReply("count", createCountResponse(3)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(idIndexSpec))); _mockServer->setCommandReply("find", createFindResponse()); auto beforeStageFailPoint = globalFailPointRegistry().find("hangBeforeClonerStage"); @@ -558,7 +583,7 @@ TEST_F(TenantCollectionClonerTest, QueryStageNamespaceNotFoundOnFirstBatch) { // Set up data for preliminary stages. _mockServer->setCommandReply("count", createCountResponse(2)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); _mockServer->setCommandReply("find", createFindResponse()); // majority read after listIndexes // Set up before-stage failpoint. @@ -602,7 +627,7 @@ TEST_F(TenantCollectionClonerTest, QueryStageNamespaceNotFoundOnSubsequentBatch) // Set up data for preliminary stages. _mockServer->setCommandReply("count", createCountResponse(2)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); _mockServer->setCommandReply("find", createFindResponse()); // majority read after listIndexes // Set up after-first-batch failpoint. @@ -699,7 +724,7 @@ TEST_F(TenantCollectionClonerTest, QueryPlanKilledThenNamespaceNotFoundFirstBatc // Set up data for preliminary stages. _mockServer->setCommandReply("count", createCountResponse(3)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); _mockServer->setCommandReply("find", createFindResponse()); // majority read after listIndexes // Set up failpoints. @@ -756,7 +781,7 @@ TEST_F(TenantCollectionClonerTest, QueryPlanKilledThenNamespaceNotFoundSubsequen // Set up data for preliminary stages. _mockServer->setCommandReply("count", createCountResponse(3)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); _mockServer->setCommandReply("find", createFindResponse()); // majority read after listIndexes // Set up failpoints. @@ -822,7 +847,8 @@ TEST_F(TenantCollectionClonerTest, ResumeFromEmptyCollectionMissingAllSecondaryI for (const auto& secondaryIndexSpec : _secondaryIndexSpecs) { indexSpecs.append(secondaryIndexSpec); } - _mockServer->setCommandReply("listIndexes", createCursorResponse(_nss.ns(), indexSpecs.arr())); + _mockServer->setCommandReply("listIndexes", + createCursorResponse(_nss.ns_forTest(), indexSpecs.arr())); _mockServer->setCommandReply("find", createFindResponse()); ASSERT_EQUALS(Status::OK(), cloner->run()); @@ -859,7 +885,8 @@ TEST_F(TenantCollectionClonerTest, ResumeFromEmptyCollectionMissingSomeSecondary for (const auto& secondaryIndexSpec : _secondaryIndexSpecs) { indexSpecs.append(secondaryIndexSpec); } - _mockServer->setCommandReply("listIndexes", createCursorResponse(_nss.ns(), indexSpecs.arr())); + _mockServer->setCommandReply("listIndexes", + createCursorResponse(_nss.ns_forTest(), indexSpecs.arr())); _mockServer->setCommandReply("find", createFindResponse()); ASSERT_EQUALS(Status::OK(), cloner->run()); @@ -889,7 +916,8 @@ TEST_F(TenantCollectionClonerTest, ResumeFromEmptyCollectionMissingNoSecondaryIn for (const auto& secondaryIndexSpec : _secondaryIndexSpecs) { indexSpecs.append(secondaryIndexSpec); } - _mockServer->setCommandReply("listIndexes", createCursorResponse(_nss.ns(), indexSpecs.arr())); + _mockServer->setCommandReply("listIndexes", + createCursorResponse(_nss.ns_forTest(), indexSpecs.arr())); _mockServer->setCommandReply("find", createFindResponse()); ASSERT_EQUALS(Status::OK(), cloner->run()); @@ -920,7 +948,8 @@ TEST_F(TenantCollectionClonerTest, ResumeFromNonEmptyCollection) { for (const auto& secondaryIndexSpec : _secondaryIndexSpecs) { indexSpecs.append(secondaryIndexSpec); } - _mockServer->setCommandReply("listIndexes", createCursorResponse(_nss.ns(), indexSpecs.arr())); + _mockServer->setCommandReply("listIndexes", + createCursorResponse(_nss.ns_forTest(), indexSpecs.arr())); _mockServer->setCommandReply("find", createFindResponse()); ASSERT_EQUALS(Status::OK(), cloner->run()); @@ -942,7 +971,7 @@ TEST_F(TenantCollectionClonerTest, ResumeFromRecreatedCollection) { _mockServer->setCommandReply("count", createCountResponse(3)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); _mockServer->setCommandReply("find", createFindResponse()); // majority read after listIndexes _mockServer->insert(_nss, BSON("_id" << 1)); @@ -954,7 +983,7 @@ TEST_F(TenantCollectionClonerTest, ResumeFromRecreatedCollection) { // Test that the cloner correctly skips cloning this collection as it must have been dropped and // re-created on the donor. And the drop and the re-create will be covered by the oplog // application phase. - ASSERT_EQUALS(_nss.ns(), _opObserver->nssToCapture.ns()); + ASSERT_EQUALS(_nss.ns_forTest(), _opObserver->nssToCapture.ns_forTest()); ASSERT_EQUALS(0, _opObserver->numDocsInserted); auto stats = cloner->getStats(); ASSERT_EQUALS(0, stats.documentsCopied); @@ -967,7 +996,7 @@ TEST_F(TenantCollectionClonerTest, ResumeFromRenamedCollection) { // Simulate that the collection already exists under a different name with no index and no data. const NamespaceString oldNss = - NamespaceString::createNamespaceString_forTest(_nss.db(), "testcoll_old"); + NamespaceString::createNamespaceString_forTest(_nss.db_forTest(), "testcoll_old"); ASSERT_OK(createCollection(oldNss, _options)); _mockServer->setCommandReply("count", createCountResponse(1)); @@ -976,7 +1005,8 @@ TEST_F(TenantCollectionClonerTest, ResumeFromRenamedCollection) { for (const auto& secondaryIndexSpec : _secondaryIndexSpecs) { indexSpecs.append(secondaryIndexSpec); } - _mockServer->setCommandReply("listIndexes", createCursorResponse(_nss.ns(), indexSpecs.arr())); + _mockServer->setCommandReply("listIndexes", + createCursorResponse(_nss.ns_forTest(), indexSpecs.arr())); _mockServer->setCommandReply("find", createFindResponse()); // Set up documents to be returned from upstream node. @@ -1009,15 +1039,15 @@ TEST_F(TenantCollectionClonerTest, NoDocumentsIfInsertedAfterListIndexes) { // Set up data for preliminary stages _mockServer->setCommandReply("count", createCountResponse(0)); _mockServer->setCommandReply("listIndexes", - createCursorResponse(_nss.ns(), BSON_ARRAY(_idIndexSpec))); + createCursorResponse(_nss.ns_forTest(), BSON_ARRAY(_idIndexSpec))); _mockServer->setCommandReply("find", createFindResponse()); auto collClonerAfterFailPoint = globalFailPointRegistry().find("hangAfterClonerStage"); auto timesEntered = collClonerAfterFailPoint->setMode( FailPoint::alwaysOn, 0, - fromjson("{cloner: 'TenantCollectionCloner', stage: 'listIndexes', nss: '" + _nss.ns() + - "'}")); + fromjson("{cloner: 'TenantCollectionCloner', stage: 'listIndexes', nss: '" + + _nss.ns_forTest() + "'}")); auto cloner = makeCollectionCloner(); stdx::thread clonerThread([&] { Client::initThread("ClonerRunner"); diff --git a/src/mongo/db/repl/tenant_database_cloner.cpp b/src/mongo/db/repl/tenant_database_cloner.cpp index ca06222ceaad2..1dc1044ce472f 100644 --- a/src/mongo/db/repl/tenant_database_cloner.cpp +++ b/src/mongo/db/repl/tenant_database_cloner.cpp @@ -28,19 +28,48 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/client.h" #include "mongo/db/commands/list_collections_filter.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/dbmessage.h" #include "mongo/db/repl/cloner_utils.h" #include "mongo/db/repl/database_cloner_gen.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/tenant_collection_cloner.h" #include "mongo/db/repl/tenant_database_cloner.h" #include "mongo/db/repl/tenant_migration_decoration.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -87,8 +116,9 @@ BaseCloner::AfterStageBehavior TenantDatabaseCloner::listCollectionsStage() { // This will be set after a successful listCollections command. _operationTime = Timestamp(); - auto collectionInfos = getClient()->getCollectionInfos( - DatabaseName(boost::none, _dbName), ListCollectionsFilter::makeTypeCollectionFilter()); + auto collectionInfos = + getClient()->getCollectionInfos(DatabaseNameUtil::deserialize(boost::none, _dbName), + ListCollectionsFilter::makeTypeCollectionFilter()); // Do a majority read on the sync source to make sure the collections listed exist on a majority // of nodes in the set. We do not check the rollbackId - rollback would lead to the sync source @@ -115,8 +145,7 @@ BaseCloner::AfterStageBehavior TenantDatabaseCloner::listCollectionsStage() { BSONObj readResult; BSONObj cmd = ClonerUtils::buildMajorityWaitRequest(_operationTime); - getClient()->runCommand( - DatabaseName(boost::none, "admin"), cmd, readResult, QueryOption_SecondaryOk); + getClient()->runCommand(DatabaseName::kAdmin, cmd, readResult, QueryOption_SecondaryOk); uassertStatusOKWithContext( getStatusFromCommandResult(readResult), "TenantDatabaseCloner failed to get listCollections result majority-committed"); @@ -198,8 +227,9 @@ BaseCloner::AfterStageBehavior TenantDatabaseCloner::listExistingCollectionsStag long long approxTotalDBSizeOnDisk = 0; std::vector clonedCollectionUUIDs; - auto collectionInfos = client.getCollectionInfos( - DatabaseName(boost::none, _dbName), ListCollectionsFilter::makeTypeCollectionFilter()); + auto collectionInfos = + client.getCollectionInfos(DatabaseNameUtil::deserialize(boost::none, _dbName), + ListCollectionsFilter::makeTypeCollectionFilter()); for (auto&& info : collectionInfos) { ListCollectionResult result; try { @@ -225,8 +255,9 @@ BaseCloner::AfterStageBehavior TenantDatabaseCloner::listExistingCollectionsStag clonedCollectionUUIDs.emplace_back(result.getInfo().getUuid()); BSONObj res; - client.runCommand( - DatabaseName(boost::none, _dbName), BSON("collStats" << result.getName()), res); + client.runCommand(DatabaseNameUtil::deserialize(boost::none, _dbName), + BSON("collStats" << result.getName()), + res); if (auto status = getStatusFromCommandResult(res); !status.isOK()) { LOGV2_WARNING(5522901, "Skipping recording of data size metrics for database due to failure " @@ -304,7 +335,7 @@ void TenantDatabaseCloner::postStage() { _stats.collectionStats.reserve(_collections.size()); for (const auto& coll : _collections) { _stats.collectionStats.emplace_back(); - _stats.collectionStats.back().ns = coll.first.ns(); + _stats.collectionStats.back().ns = coll.first.ns().toString(); } } for (const auto& coll : _collections) { @@ -335,8 +366,9 @@ void TenantDatabaseCloner::postStage() { logAttrs(sourceNss), "error"_attr = collStatus.toString(), "tenantId"_attr = _tenantId); - auto message = collStatus.withContext(str::stream() << "Error cloning collection '" - << sourceNss.toString() << "'"); + auto message = + collStatus.withContext(str::stream() << "Error cloning collection '" + << sourceNss.toStringForErrorMsg() << "'"); setSyncFailedStatus(collStatus.withReason(message.toString())); } { diff --git a/src/mongo/db/repl/tenant_database_cloner.h b/src/mongo/db/repl/tenant_database_cloner.h index f0aaaca38d3d9..3b7c3af2e1d83 100644 --- a/src/mongo/db/repl/tenant_database_cloner.h +++ b/src/mongo/db/repl/tenant_database_cloner.h @@ -29,12 +29,28 @@ #pragma once +#include +#include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_connection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/base_cloner.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/tenant_base_cloner.h" #include "mongo/db/repl/tenant_collection_cloner.h" #include "mongo/db/repl/tenant_migration_shared_data.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/tenant_database_cloner_test.cpp b/src/mongo/db/repl/tenant_database_cloner_test.cpp index 9f0d0ef7a3b4c..38fb3fc2f6101 100644 --- a/src/mongo/db/repl/tenant_database_cloner_test.cpp +++ b/src/mongo/db/repl/tenant_database_cloner_test.cpp @@ -27,18 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/clientcursor.h" +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/json.h" +#include "mongo/db/client.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/repl/tenant_cloner_test_fixture.h" #include "mongo/db/repl/tenant_database_cloner.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/dbtests/mock/mock_dbclient_connection.h" -#include "mongo/unittest/unittest.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/clock_source_mock.h" -#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -72,7 +88,7 @@ class TenantDatabaseClonerTest : public TenantClonerTestFixture { _storageInterface.insertDocumentsFn = [this](OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID, const std::vector& ops) { - const auto collInfo = &_collections[nsOrUUID.nss().value()]; + const auto collInfo = &_collections[nsOrUUID.nss()]; collInfo->numDocsInserted += ops.size(); return Status::OK(); }; diff --git a/src/mongo/db/repl/tenant_file_cloner.cpp b/src/mongo/db/repl/tenant_file_cloner.cpp index 0da47917aab7f..601168160c57b 100644 --- a/src/mongo/db/repl/tenant_file_cloner.cpp +++ b/src/mongo/db/repl/tenant_file_cloner.cpp @@ -28,19 +28,42 @@ */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include // IWYU pragma: keep +#include +#include +#include +#include + +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/tenant_file_cloner.h" #include "mongo/db/repl/tenant_migration_shard_merge_util.h" +#include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" - +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -231,9 +254,10 @@ void TenantFileCloner::handleNextBatch(DBClientCursor& cursor) { } // Schedule the next set of writes. - auto&& scheduleResult = _scheduleFsWorkFn([=](const executor::TaskExecutor::CallbackArgs& cbd) { - writeDataToFilesystemCallback(cbd); - }); + auto&& scheduleResult = + _scheduleFsWorkFn([=, this](const executor::TaskExecutor::CallbackArgs& cbd) { + writeDataToFilesystemCallback(cbd); + }); if (!scheduleResult.isOK()) { Status newStatus = scheduleResult.getStatus().withContext( diff --git a/src/mongo/db/repl/tenant_file_cloner.h b/src/mongo/db/repl/tenant_file_cloner.h index 27ff89fbc3ae2..897a96ede8c27 100644 --- a/src/mongo/db/repl/tenant_file_cloner.h +++ b/src/mongo/db/repl/tenant_file_cloner.h @@ -31,14 +31,35 @@ #include #include +#include +#include +#include +#include #include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_connection.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/repl/base_cloner.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/task_runner.h" #include "mongo/db/repl/tenant_base_cloner.h" #include "mongo/db/repl/tenant_migration_shared_data.h" +#include "mongo/executor/task_executor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/functional.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/progress_meter.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo::repl { diff --git a/src/mongo/db/repl/tenant_file_cloner_test.cpp b/src/mongo/db/repl/tenant_file_cloner_test.cpp index b26616fc23935..0dc73feedc56e 100644 --- a/src/mongo/db/repl/tenant_file_cloner_test.cpp +++ b/src/mongo/db/repl/tenant_file_cloner_test.cpp @@ -27,18 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" -#include "mongo/db/repl/storage_interface.h" -#include "mongo/db/repl/storage_interface_mock.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/cursor_response.h" #include "mongo/db/repl/tenant_cloner_test_fixture.h" #include "mongo/db/repl/tenant_file_cloner.h" #include "mongo/db/repl/tenant_migration_shard_merge_util.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/dbtests/mock/mock_dbclient_connection.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/log_test.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/interruptible.h" namespace mongo::repl { diff --git a/src/mongo/db/repl/tenant_file_importer_service.cpp b/src/mongo/db/repl/tenant_file_importer_service.cpp index b88437df0940f..c7b27bdf18379 100644 --- a/src/mongo/db/repl/tenant_file_importer_service.cpp +++ b/src/mongo/db/repl/tenant_file_importer_service.cpp @@ -30,35 +30,49 @@ #include "mongo/db/repl/tenant_file_importer_service.h" -#include +#include +#include +#include +#include #include +#include +#include -#include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/collection_options.h" -#include "mongo/db/catalog_raii.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/client.h" #include "mongo/db/commands/tenant_migration_recipient_cmds_gen.h" -#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/database_name.h" #include "mongo/db/repl/oplog_applier.h" +#include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/replication_auth.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_shard_merge_util.h" #include "mongo/db/repl/tenant_migration_shared_data.h" #include "mongo/db/service_context.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_import.h" -#include "mongo/executor/network_interface_factory.h" -#include "mongo/executor/network_interface_thread_pool.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_options.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration +MONGO_FAIL_POINT_DEFINE(hangBeforeFileImporterThreadExit); namespace mongo::repl { using namespace fmt::literals; -using namespace shard_merge_utils; -using namespace tenant_migration_access_blocker; namespace { const auto _TenantFileImporterService = @@ -68,37 +82,13 @@ const ReplicaSetAwareServiceRegistry::Registerer _TenantFileImporterServiceRegisterer("TenantFileImporterService"); /** - * Makes a connection to the provided 'source'. + * Connect to the donor source. */ -Status connect(const HostAndPort& source, DBClientConnection* client) { - Status status = client->connect(source, "TenantFileImporterService", boost::none); - if (!status.isOK()) - return status; - return replAuthenticate(client).withContext(str::stream() - << "Failed to authenticate to " << source); -} - -void importCopiedFiles(OperationContext* opCtx, const UUID& migrationId) { - auto tempWTDirectory = fileClonerTempDir(migrationId); - uassert(6113315, - str::stream() << "Missing file cloner's temporary dbpath directory: " - << tempWTDirectory.string(), - boost::filesystem::exists(tempWTDirectory)); - - // TODO SERVER-63204: Evaluate correct place to remove the temporary WT dbpath. - ON_BLOCK_EXIT([&tempWTDirectory, &migrationId] { - LOGV2_INFO(6113324, - "Done importing files, removing the temporary WT dbpath", - "migrationId"_attr = migrationId, - "tempDbPath"_attr = tempWTDirectory.string()); - boost::system::error_code ec; - boost::filesystem::remove_all(tempWTDirectory, ec); - }); - - auto metadatas = - wiredTigerRollbackToStableAndGetMetadata(opCtx, tempWTDirectory.string(), migrationId); - wiredTigerImportFromBackupCursor( - opCtx, migrationId, tempWTDirectory.string(), std::move(metadatas)); +void connect(const BSONObj& metadataDoc, DBClientConnection* client) { + auto source = HostAndPort::parseThrowing(metadataDoc[shard_merge_utils::kDonorFieldName].str()); + uassertStatusOK(client->connect(source, "TenantFileImporterService", boost::none)); + uassertStatusOK(replAuthenticate(client).withContext( + str::stream() << "Failed to authenticate to " << source)); } } // namespace @@ -111,6 +101,11 @@ TenantFileImporterService* TenantFileImporterService::get(OperationContext* opCt return get(opCtx->getServiceContext()); } +TenantFileImporterService::TenantFileImporterService() + : _importFiles(shard_merge_utils::wiredTigerImport), _createConnection([]() { + return std::make_shared(true /* autoReconnect */); + }) {} + void TenantFileImporterService::startMigration(const UUID& migrationId) { _reset(); @@ -134,6 +129,13 @@ void TenantFileImporterService::startMigration(const UUID& migrationId) { _workerThread = std::make_unique([this, migrationId] { Client::initThread("TenantFileImporterService"); + + // TODO(SERVER-74661): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + try { _handleEvents(migrationId); } catch (const DBException& err) { @@ -141,6 +143,8 @@ void TenantFileImporterService::startMigration(const UUID& migrationId) { "TenantFileImporterService::_handleEvents encountered an error", "error"_attr = err.toString()); } + + hangBeforeFileImporterThreadExit.pauseWhileSet(); }); } @@ -244,15 +248,14 @@ void TenantFileImporterService::_handleEvents(const UUID& migrationId) { std::shared_ptr writerPool; std::shared_ptr sharedData; - auto setUpImporterResourcesIfNeeded = [&](const BSONObj& metadataDoc) { + auto setUpDonorConnectionIfNeeded = [&](const BSONObj& metadataDoc) { // Return early if we have already set up the donor connection. if (donorConnection) { return; } - auto conn = std::make_shared(true /* autoReconnect */); - auto donor = HostAndPort::parseThrowing(metadataDoc[kDonorFieldName].str()); - uassertStatusOK(connect(donor, conn.get())); + auto conn = _createConnection(); + connect(metadataDoc, conn.get()); stdx::lock_guard lk(_mutex); uassert(ErrorCodes::Interrupted, @@ -265,7 +268,6 @@ void TenantFileImporterService::_handleEvents(const UUID& migrationId) { makeReplWriterPool(tenantApplierThreadCount, "TenantFileImporterServiceWriter"_sd); _sharedData = std::make_shared( getGlobalServiceContext()->getFastClockSource(), _migrationId.get()); - donorConnection = _donorConnection; writerPool = _writerPool; sharedData = _sharedData; @@ -284,21 +286,20 @@ void TenantFileImporterService::_handleEvents(const UUID& migrationId) { case eventType::kNone: continue; case eventType::kLearnedFileName: { - // we won't have valid donor metadata until the first + // We won't have valid donor metadata until the first // 'TenantFileImporterService::learnedFilename' call, so we need to set up the // connection for the first kLearnedFileName event. - setUpImporterResourcesIfNeeded(event.metadataDoc); + setUpDonorConnectionIfNeeded(event.metadataDoc); - cloneFile(opCtx, - donorConnection.get(), - writerPool.get(), - sharedData.get(), - event.metadataDoc); + shard_merge_utils::cloneFile(opCtx, + donorConnection.get(), + writerPool.get(), + sharedData.get(), + event.metadataDoc); continue; } case eventType::kLearnedAllFilenames: - importCopiedFiles(opCtx, migrationId); - shard_merge_utils::createImportDoneMarkerLocalCollection(opCtx, migrationId); + _importFiles(opCtx, migrationId); // Take a stable checkpoint so that all the imported donor & marker collection // metadata infos are persisted to disk. opCtx->recoveryUnit()->waitUntilUnjournaledWritesDurable(opCtx, @@ -318,7 +319,7 @@ void TenantFileImporterService::_voteImportedFiles(OperationContext* opCtx, auto voteResponse = replCoord->runCmdOnPrimaryAndAwaitResponse( opCtx, - DatabaseName::kAdmin.db(), + DatabaseName::kAdmin.db().toString(), cmd.toBSON({}), [](executor::TaskExecutor::CallbackHandle handle) {}, [](executor::TaskExecutor::CallbackHandle handle) {}); diff --git a/src/mongo/db/repl/tenant_file_importer_service.h b/src/mongo/db/repl/tenant_file_importer_service.h index 2ac95e3bed648..58da785695a3f 100644 --- a/src/mongo/db/repl/tenant_file_importer_service.h +++ b/src/mongo/db/repl/tenant_file_importer_service.h @@ -29,14 +29,27 @@ #pragma once +#include #include - +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/dbclient_connection.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/replica_set_aware_service.h" #include "mongo/db/repl/tenant_migration_shared_data.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/mutex.h" +#include "mongo/stdx/thread.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/producer_consumer_queue.h" #include "mongo/util/string_map.h" #include "mongo/util/uuid.h" @@ -49,7 +62,7 @@ class TenantFileImporterService : public ReplicaSetAwareService()> fn) { + _createConnection = fn; + }; + + /** + * Set the function used for importing file data. Used for testing. + */ + void setImportFilesForTest( + std::function fn) { + _importFiles = fn; + }; + + BSONObj getState() { + stdx::lock_guard lk(_mutex); + auto migrationId = _migrationId ? _migrationId->toString() : "(empty)"; + auto state = stateToString(_state); + return BSON("migrationId" << migrationId << "state" << state); + } + private: void onInitialDataAvailable(OperationContext*, bool) final {} @@ -203,5 +238,11 @@ class TenantFileImporterService : public ReplicaSetAwareService _eventQueue; // (I) pointer set under mutex, copied by callers. + + // Called after all filenames have been learned to import file data. + std::function _importFiles = {}; // (W) + + // Used to create a new DBClientConnection to the donor. + std::function()> _createConnection = {}; // (W) }; } // namespace mongo::repl diff --git a/src/mongo/db/repl/tenant_file_importer_service_test.cpp b/src/mongo/db/repl/tenant_file_importer_service_test.cpp new file mode 100644 index 0000000000000..f24cf34163c8e --- /dev/null +++ b/src/mongo/db/repl/tenant_file_importer_service_test.cpp @@ -0,0 +1,233 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/repl/tenant_file_importer_service.h" +#include "mongo/db/repl/tenant_migration_shard_merge_util.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/dbtests/mock/mock_dbclient_connection.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" +#include "mongo/executor/task_executor_test_fixture.h" +#include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/log_test.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest + +namespace mongo { + +using executor::TaskExecutor; +using executor::ThreadPoolExecutorTest; + +namespace repl { +class TenantFileImporterServiceTest : public ServiceContextMongoDTest { +public: + void setUp() override { + ServiceContextMongoDTest::setUp(); + auto serviceContext = getServiceContext(); + auto replCoord = std::make_unique(serviceContext); + replCoord->setRunCmdOnPrimaryAndAwaitResponseFunction([this](OperationContext* opCtx, + const std::string& dbName, + const BSONObj& cmdObj, + ReplicationCoordinator:: + OnRemoteCmdScheduledFn + onRemoteCmdScheduled, + ReplicationCoordinator:: + OnRemoteCmdCompleteFn + onRemoteCmdComplete) { + runCmdOnPrimaryAndAwaitResponseFnCalls.push_back(RunCmdOnPrimaryCall{dbName, cmdObj}); + return runCmdOnPrimaryAndAwaitResponseFnResponse; + }); + ASSERT_OK(replCoord->setFollowerMode(repl::MemberState::RS_PRIMARY)); + ReplicationCoordinator::set(serviceContext, std::move(replCoord)); + StorageInterface::set(serviceContext, std::make_unique()); + } + + void tearDown() override { + ReplicaSetAwareServiceRegistry::get(getServiceContext()).onShutdown(); + StorageInterface::set(getServiceContext(), {}); + ReplicationCoordinator::set(getServiceContext(), {}); + ServiceContextMongoDTest::tearDown(); + } + + struct RunCmdOnPrimaryCall { + std::string dbName; + BSONObj cmdObj; + }; + std::vector runCmdOnPrimaryAndAwaitResponseFnCalls; + BSONObj runCmdOnPrimaryAndAwaitResponseFnResponse = BSON("ok" << 1); + +private: + unittest::MinimumLoggedSeverityGuard _replicationSeverityGuard{ + logv2::LogComponent::kReplication, logv2::LogSeverity::Debug(1)}; + unittest::MinimumLoggedSeverityGuard _tenantMigrationSeverityGuard{ + logv2::LogComponent::kTenantMigration, logv2::LogSeverity::Debug(1)}; +}; + +TEST_F(TenantFileImporterServiceTest, WillNotStartConcurrentMigrationsForDifferentMigrationIds) { + auto tenantFileImporterService = repl::TenantFileImporterService::get(getServiceContext()); + + auto migrationId = UUID::gen(); + tenantFileImporterService->startMigration(migrationId); + + // startMigration calls for other migrationIds are ignored. + tenantFileImporterService->startMigration(UUID::gen()); + + auto state = tenantFileImporterService->getState(); + ASSERT_EQ(state["migrationId"].str(), migrationId.toString()); + ASSERT_EQ(state["state"].str(), "started"); +} + +TEST_F(TenantFileImporterServiceTest, WillNotStartConcurrentMigrationsForTheSameMigrationId) { + auto tenantFileImporterService = repl::TenantFileImporterService::get(getServiceContext()); + + auto migrationId = UUID::gen(); + tenantFileImporterService->startMigration(migrationId); + + // startMigration calls with the same migrationId are ignored. + tenantFileImporterService->startMigration(migrationId); + + auto state = tenantFileImporterService->getState(); + ASSERT_EQ(state["migrationId"].str(), migrationId.toString()); + ASSERT_EQ(state["state"].str(), "started"); +} + +TEST_F(TenantFileImporterServiceTest, CanBeSafelyInterruptedBeforeMigrationStart) { + auto tenantFileImporterService = repl::TenantFileImporterService::get(getServiceContext()); + tenantFileImporterService->interruptAll(); + auto state = tenantFileImporterService->getState(); + ASSERT_EQ(state["state"].str(), "uninitialized"); +} + +TEST_F(TenantFileImporterServiceTest, CanBeSafelyInterruptedAfterMigrationStart) { + auto tenantFileImporterService = repl::TenantFileImporterService::get(getServiceContext()); + + auto migrationId = UUID::gen(); + tenantFileImporterService->startMigration(migrationId); + auto state = tenantFileImporterService->getState(); + ASSERT_EQ(state["migrationId"].str(), migrationId.toString()); + ASSERT_EQ(state["state"].str(), "started"); + + tenantFileImporterService->interruptAll(); + state = tenantFileImporterService->getState(); + ASSERT_EQ(state["migrationId"].str(), migrationId.toString()); + ASSERT_EQ(state["state"].str(), "interrupted"); +} + +TEST_F(TenantFileImporterServiceTest, ImportsFilesWhenAllFilenamesLearned) { + auto migrationId = UUID::gen(); + auto tenantFileImporterService = repl::TenantFileImporterService::get(getServiceContext()); + + std::string fileData = "Here is the file data"; + auto bindata = BSONBinData(fileData.data(), fileData.size(), BinDataGeneral); + CursorResponse aggResponse( + NamespaceString::makeCollectionlessAggregateNSS(DatabaseName::kAdmin), + 0 /* cursorId */, + {BSON("byteOffset" << 0 << "endOfFile" << true << "data" << bindata)}); + + MockRemoteDBServer server("test"); + auto conn = std::make_shared(&server); + tenantFileImporterService->setCreateConnectionForTest([&]() { return conn; }); + + int importFilesCallCount = 0; + tenantFileImporterService->setImportFilesForTest( + [&](OperationContext* opCtx, const UUID& migrationId) { importFilesCallCount++; }); + + server.setCommandReply("aggregate", aggResponse.toBSONAsInitialResponse()); + + auto filePath = shard_merge_utils::fileClonerTempDir(migrationId); + auto metadataDoc = + BSON("filename" << filePath.string() + "/some-file.wt" << shard_merge_utils::kDonorFieldName + << server.getServerHostAndPort().toString() + << shard_merge_utils::kMigrationIdFieldName << migrationId + << shard_merge_utils::kBackupIdFieldName << UUID::gen() << "remoteDbPath" + << filePath.string() << "fileSize" << std::to_string(fileData.size()) + << shard_merge_utils::kDonorDbPathFieldName << filePath.string()); + + auto hangBeforeFileImporterThreadExit = + globalFailPointRegistry().find("hangBeforeFileImporterThreadExit"); + hangBeforeFileImporterThreadExit->setMode(FailPoint::alwaysOn); + + tenantFileImporterService->startMigration(migrationId); + + tenantFileImporterService->learnedFilename(migrationId, metadataDoc); + auto state = tenantFileImporterService->getState(); + ASSERT_EQ(state["migrationId"].str(), migrationId.toString()); + ASSERT_EQ(state["state"].str(), "learned filename"); + + tenantFileImporterService->learnedAllFilenames(migrationId); + + state = tenantFileImporterService->getState(); + ASSERT_EQ(state["migrationId"].str(), migrationId.toString()); + ASSERT_EQ(state["state"].str(), "learned all filenames"); + + hangBeforeFileImporterThreadExit->waitForTimesEntered(1); + + ASSERT(boost::filesystem::exists(filePath.string() + "/some-file.wt")); + ASSERT_EQ(fileData.size(), boost::filesystem::file_size(filePath.string() + "/some-file.wt")); + ASSERT_EQ(importFilesCallCount, 1); + ASSERT_EQ(runCmdOnPrimaryAndAwaitResponseFnCalls.size(), 1); + + auto recipientVoteImportedFilesCmdCall = runCmdOnPrimaryAndAwaitResponseFnCalls.front(); + ASSERT_EQ(recipientVoteImportedFilesCmdCall.dbName, DatabaseName::kAdmin.db().toString()); + ASSERT_BSONOBJ_EQ(recipientVoteImportedFilesCmdCall.cmdObj, + BSON("recipientVoteImportedFiles" << 1 << "migrationId" << migrationId + << "from" + << ":27017" + << "success" << true)); + + hangBeforeFileImporterThreadExit->setMode(FailPoint::off); +} +} // namespace repl +} // namespace mongo diff --git a/src/mongo/db/repl/tenant_migration_access_blocker.h b/src/mongo/db/repl/tenant_migration_access_blocker.h index 0ff8928d5c795..fcd6f43796e97 100644 --- a/src/mongo/db/repl/tenant_migration_access_blocker.h +++ b/src/mongo/db/repl/tenant_migration_access_blocker.h @@ -63,8 +63,8 @@ class TenantMigrationAccessBlocker { virtual Status checkIfLinearizableReadWasAllowed(OperationContext* opCtx) = 0; - virtual SharedSemiFuture getCanReadFuture(OperationContext* opCtx, - StringData command) = 0; + virtual SharedSemiFuture getCanRunCommandFuture(OperationContext* opCtx, + StringData command) = 0; // // Called by index build user threads before acquiring an index build slot, and again right @@ -72,6 +72,16 @@ class TenantMigrationAccessBlocker { // virtual Status checkIfCanBuildIndex() = 0; + /** + * Checks if opening a new change stream should block. + */ + virtual Status checkIfCanOpenChangeStream() = 0; + + /** + * Checks if getMores for change streams should fail. + */ + virtual Status checkIfCanGetMoreChangeStream() = 0; + // We suspend TTL deletions at the recipient side to avoid the race when a document is updated // at the donor side, which may prevent it from being garbage collected by TTL, while the // recipient side document is deleted by the TTL. The donor side update will fail to propagate diff --git a/src/mongo/db/repl/tenant_migration_access_blocker_registry.cpp b/src/mongo/db/repl/tenant_migration_access_blocker_registry.cpp index 0b020e66722cf..28ba3382f148b 100644 --- a/src/mongo/db/repl/tenant_migration_access_blocker_registry.cpp +++ b/src/mongo/db/repl/tenant_migration_access_blocker_registry.cpp @@ -29,9 +29,32 @@ #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" + +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/db/client.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" +#include "mongo/executor/network_interface_factory.h" +#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/logv2/log.h" -#include "mongo/util/database_name_util.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -72,6 +95,10 @@ TenantMigrationAccessBlockerRegistry::TenantMigrationAccessBlockerRegistry() { threadPoolOptions.poolName = "TenantMigrationBlockerAsyncThreadPool"; threadPoolOptions.onCreateThread = [](const std::string& threadName) { Client::initThread(threadName.c_str()); + + // TODO(SERVER-74661): Please revisit if this thread could be made killable. + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); }; _asyncBlockingOperationsExecutor = std::make_shared( std::make_unique(threadPoolOptions), @@ -146,15 +173,6 @@ void TenantMigrationAccessBlockerRegistry::_remove(WithLock, } } -void TenantMigrationAccessBlockerRegistry::remove(const TenantId& tenantId, MtabType type) { - stdx::lock_guard lg(_mutex); - if (type == MtabType::kDonor && _getGlobalTenantDonorAccessBlocker(lg)) { - tasserted(8423348, "Using remove() for new-style donor access blocker"); - } - - _remove(lg, tenantId, type); -} - void TenantMigrationAccessBlockerRegistry::removeAccessBlockersForMigration( const UUID& migrationId, TenantMigrationAccessBlocker::BlockerType type) { stdx::lock_guard lg(_mutex); @@ -242,25 +260,6 @@ TenantMigrationAccessBlockerRegistry::_getGlobalTenantDonorAccessBlocker(WithLoc it->second.getDonorAccessBlocker()); } -std::shared_ptr -TenantMigrationAccessBlockerRegistry::getAccessBlockerForMigration( - const UUID& migrationId, TenantMigrationAccessBlocker::BlockerType type) { - stdx::lock_guard lg(_mutex); - auto mtab = - std::find_if(_tenantMigrationAccessBlockers.begin(), - _tenantMigrationAccessBlockers.end(), - [type, migrationId](const auto& pair) { - return pair.second.getAccessBlocker(type) && - pair.second.getAccessBlocker(type)->getMigrationId() == migrationId; - }); - - if (mtab == _tenantMigrationAccessBlockers.end()) { - return nullptr; - } - - return mtab->second.getAccessBlocker(type); -} - std::shared_ptr TenantMigrationAccessBlockerRegistry::_getGlobalTenantDonorAccessBlocker( WithLock lk, const DatabaseName& dbName) const { @@ -306,6 +305,22 @@ TenantMigrationAccessBlockerRegistry::getDonorAccessBlockersForMigration(const U return blockers; } +std::vector> +TenantMigrationAccessBlockerRegistry::getRecipientAccessBlockersForMigration( + const UUID& migrationId) { + stdx::lock_guard lg(_mutex); + + std::vector> blockers; + for (const auto& pair : _tenantMigrationAccessBlockers) { + if (auto recipientMtab = pair.second.getRecipientAccessBlocker(); + recipientMtab && recipientMtab->getMigrationId() == migrationId) { + blockers.push_back(recipientMtab); + } + } + + return blockers; +} + void TenantMigrationAccessBlockerRegistry::applyAll(TenantMigrationAccessBlocker::BlockerType type, applyAllCallback&& callback) { stdx::lock_guard lg(_mutex); diff --git a/src/mongo/db/repl/tenant_migration_access_blocker_registry.h b/src/mongo/db/repl/tenant_migration_access_blocker_registry.h index 4460cd44f5cc5..2877abe5076fa 100644 --- a/src/mongo/db/repl/tenant_migration_access_blocker_registry.h +++ b/src/mongo/db/repl/tenant_migration_access_blocker_registry.h @@ -29,13 +29,33 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/checked_cast.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/database_name.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" #include "mongo/db/repl/tenant_migration_donor_access_blocker.h" #include "mongo/db/repl/tenant_migration_recipient_access_blocker.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/executor/network_interface_factory.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -126,11 +146,6 @@ class TenantMigrationAccessBlockerRegistry { */ void addGlobalDonorAccessBlocker(std::shared_ptr mtab); - /** - * Removes the entry for (tenantId, mtab) - */ - void remove(const TenantId& tenantId, TenantMigrationAccessBlocker::BlockerType type); - /** * Remove all access blockers of the provided type for a migration. */ @@ -149,13 +164,6 @@ class TenantMigrationAccessBlockerRegistry { boost::optional getAccessBlockersForDbName( const DatabaseName& dbName); - - /** - * Returns the access blocker associated with a migration, if it exists. - */ - std::shared_ptr getAccessBlockerForMigration( - const UUID& migrationId, TenantMigrationAccessBlocker::BlockerType type); - /** * Iterates through each of the TenantMigrationAccessBlockers and * returns the first 'TenantMigrationAccessBlocker' it finds whose 'tenantId' is a prefix for @@ -178,6 +186,12 @@ class TenantMigrationAccessBlockerRegistry { std::vector> getDonorAccessBlockersForMigration(const UUID& migrationId); + /** + * Return the recipient access blockers associated with a migration. + */ + std::vector> + getRecipientAccessBlockersForMigration(const UUID& migrationId); + using applyAllCallback = std::function& mtab)>; /** diff --git a/src/mongo/db/repl/tenant_migration_access_blocker_registry_test.cpp b/src/mongo/db/repl/tenant_migration_access_blocker_registry_test.cpp index 0e5fd37c5fc99..5d2319289e5f0 100644 --- a/src/mongo/db/repl/tenant_migration_access_blocker_registry_test.cpp +++ b/src/mongo/db/repl/tenant_migration_access_blocker_registry_test.cpp @@ -28,8 +28,25 @@ */ #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" + +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { @@ -81,26 +98,6 @@ TEST_F(TenantMigrationAccessBlockerRegistryTest, AddAccessBlocker) { ErrorCodes::ConflictingServerlessOperation); } -TEST_F(TenantMigrationAccessBlockerRegistryTest, RemoveAccessBlocker) { - auto& registry = TenantMigrationAccessBlockerRegistry::get(getServiceContext()); - const auto uuid = UUID::gen(); - const auto tenant = TenantId{OID::gen()}; - - registry.add(tenant, - std::make_shared(getServiceContext(), uuid)); - ASSERT(registry.getTenantMigrationAccessBlockerForTenantId( - tenant, TenantMigrationAccessBlocker::BlockerType::kDonor)); - - registry.remove(tenant, TenantMigrationAccessBlocker::BlockerType::kRecipient); - ASSERT(registry.getTenantMigrationAccessBlockerForTenantId( - tenant, TenantMigrationAccessBlocker::BlockerType::kDonor)); - - registry.remove(tenant, TenantMigrationAccessBlocker::BlockerType::kDonor); - - ASSERT_FALSE(registry.getTenantMigrationAccessBlockerForTenantId( - tenant, TenantMigrationAccessBlocker::BlockerType::kDonor)); -} - TEST_F(TenantMigrationAccessBlockerRegistryTest, RemoveAccessBlockersForMigration) { auto& registry = TenantMigrationAccessBlockerRegistry::get(getServiceContext()); const auto uuid = UUID::gen(); @@ -139,13 +136,14 @@ TEST_F(TenantMigrationAccessBlockerRegistryTest, GetAccessBlockerForDbName) { const auto tenant = TenantId{OID::gen()}; const auto uuid = UUID::gen(); - ASSERT_FALSE( - registry.getAccessBlockersForDbName(DatabaseName{boost::none, tenant.toString() + "_foo"})); + ASSERT_FALSE(registry.getAccessBlockersForDbName( + DatabaseName::createDatabaseName_forTest(boost::none, tenant.toString() + "_foo"))); // If the MTAB registry is empty (such as in non-serverless deployments) using an invalid // tenantId simply returns boost::none. This is required as the underscore can be present in db // names for non-serverless deployments. - ASSERT_FALSE(registry.getAccessBlockersForDbName(DatabaseName{boost::none, "tenant_foo"})); + ASSERT_FALSE(registry.getAccessBlockersForDbName( + DatabaseName::createDatabaseName_forTest(boost::none, "tenant_foo"))); auto globalAccessBlocker = std::make_shared(getServiceContext(), UUID::gen()); @@ -153,46 +151,53 @@ TEST_F(TenantMigrationAccessBlockerRegistryTest, GetAccessBlockerForDbName) { // If the MTAB registry is not empty, it implies we have a serverless deployment. In that case // anything before the underscore should be a valid TenantId. - ASSERT_THROWS_CODE(registry.getAccessBlockersForDbName(DatabaseName{boost::none, "tenant_foo"}), + ASSERT_THROWS_CODE(registry.getAccessBlockersForDbName( + DatabaseName::createDatabaseName_forTest(boost::none, "tenant_foo")), DBException, ErrorCodes::BadValue); - ASSERT_EQ( - registry.getAccessBlockersForDbName(DatabaseName{boost::none, tenant.toString() + "_foo"}) - ->getDonorAccessBlocker(), - globalAccessBlocker); - ASSERT_FALSE(registry.getAccessBlockersForDbName(DatabaseName{boost::none, "admin"})); + ASSERT_EQ(registry + .getAccessBlockersForDbName(DatabaseName::createDatabaseName_forTest( + boost::none, tenant.toString() + "_foo")) + ->getDonorAccessBlocker(), + globalAccessBlocker); + ASSERT_FALSE(registry.getAccessBlockersForDbName(DatabaseName::kAdmin)); auto recipientBlocker = std::make_shared(getServiceContext(), uuid); registry.add(tenant, recipientBlocker); - ASSERT_EQ( - registry.getAccessBlockersForDbName(DatabaseName{boost::none, tenant.toString() + "_foo"}) - ->getDonorAccessBlocker(), - globalAccessBlocker); - ASSERT_EQ( - registry.getAccessBlockersForDbName(DatabaseName{boost::none, tenant.toString() + "_foo"}) - ->getRecipientAccessBlocker(), - recipientBlocker); + ASSERT_EQ(registry + .getAccessBlockersForDbName(DatabaseName::createDatabaseName_forTest( + boost::none, tenant.toString() + "_foo")) + ->getDonorAccessBlocker(), + globalAccessBlocker); + ASSERT_EQ(registry + .getAccessBlockersForDbName(DatabaseName::createDatabaseName_forTest( + boost::none, tenant.toString() + "_foo")) + ->getRecipientAccessBlocker(), + recipientBlocker); auto donorBlocker = std::make_shared(getServiceContext(), uuid); registry.add(tenant, donorBlocker); - ASSERT_EQ( - registry.getAccessBlockersForDbName(DatabaseName{boost::none, tenant.toString() + "_foo"}) - ->getDonorAccessBlocker(), - donorBlocker); - ASSERT_EQ( - registry.getAccessBlockersForDbName(DatabaseName{boost::none, tenant.toString() + "_foo"}) - ->getRecipientAccessBlocker(), - recipientBlocker); + ASSERT_EQ(registry + .getAccessBlockersForDbName(DatabaseName::createDatabaseName_forTest( + boost::none, tenant.toString() + "_foo")) + ->getDonorAccessBlocker(), + donorBlocker); + ASSERT_EQ(registry + .getAccessBlockersForDbName(DatabaseName::createDatabaseName_forTest( + boost::none, tenant.toString() + "_foo")) + ->getRecipientAccessBlocker(), + recipientBlocker); { RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); // since we enabled multitenancySupport, having underscore in the dbName won't throw because // we have constructed a DatabaseName with a TenantId. Therefore `my` won't be identified as // the tenantId. - const DatabaseName validUnderscoreDbName = DatabaseName(tenant, "my_Db"); + const DatabaseName validUnderscoreDbName = + DatabaseName::createDatabaseName_forTest(tenant, "my_Db"); ASSERT(registry.getAccessBlockersForDbName(validUnderscoreDbName) != boost::none); } } @@ -251,18 +256,15 @@ TEST_F(TenantMigrationAccessBlockerRegistryTest, GetDonorAccessBlockersForMigrat TenantId{OID::gen()}, std::make_shared(getServiceContext(), UUID::gen())); ASSERT(registry.getDonorAccessBlockersForMigration(uuid).empty()); - std::cout << "1" << std::endl; auto donorBlocker = std::make_shared(getServiceContext(), uuid); registry.add(TenantId{OID::gen()}, donorBlocker); assertVector(registry.getDonorAccessBlockersForMigration(uuid), {donorBlocker}); - std::cout << "2" << std::endl; auto globalBlocker = std::make_shared(getServiceContext(), uuid); registry.addGlobalDonorAccessBlocker(globalBlocker); assertVector(registry.getDonorAccessBlockersForMigration(uuid), {globalBlocker, donorBlocker}); - std::cout << "3" << std::endl; ASSERT(registry.getDonorAccessBlockersForMigration(UUID::gen()).empty()); @@ -271,4 +273,39 @@ TEST_F(TenantMigrationAccessBlockerRegistryTest, GetDonorAccessBlockersForMigrat ASSERT_EQ(registry.getDonorAccessBlockersForMigration(uuid).size(), 3); } +TEST_F(TenantMigrationAccessBlockerRegistryTest, GetRecipientAccessBlockersForMigration) { + auto& registry = TenantMigrationAccessBlockerRegistry::get(getServiceContext()); + const auto uuid = UUID::gen(); + + auto assertVector = + [](const std::vector>& result, + const std::vector>& expected) { + // Order might change. Check that vector size is equal and all expected entries are + // found. + ASSERT_EQ(result.size(), expected.size()); + for (const auto& ptr : expected) { + ASSERT_NE(std::find_if(result.begin(), + result.end(), + [ptr](const auto& entry) { return entry == ptr; }), + result.end()); + } + }; + + registry.add( + TenantId{OID::gen()}, + std::make_shared(getServiceContext(), UUID::gen())); + ASSERT(registry.getRecipientAccessBlockersForMigration(uuid).empty()); + + auto recipientBlocker = + std::make_shared(getServiceContext(), uuid); + registry.add(TenantId{OID::gen()}, recipientBlocker); + assertVector(registry.getRecipientAccessBlockersForMigration(uuid), {recipientBlocker}); + + auto secondBlocker = + std::make_shared(getServiceContext(), uuid); + registry.add(TenantId{OID::gen()}, secondBlocker); + assertVector(registry.getRecipientAccessBlockersForMigration(uuid), + {recipientBlocker, secondBlocker}); +} + } // namespace mongo diff --git a/src/mongo/db/repl/tenant_migration_access_blocker_server_status_section.cpp b/src/mongo/db/repl/tenant_migration_access_blocker_server_status_section.cpp index 98ef5d31ec88a..bceafd412e843 100644 --- a/src/mongo/db/repl/tenant_migration_access_blocker_server_status_section.cpp +++ b/src/mongo/db/repl/tenant_migration_access_blocker_server_status_section.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp b/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp index 95aeb6312feb4..63417630d1c10 100644 --- a/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp +++ b/src/mongo/db/repl/tenant_migration_access_blocker_util.cpp @@ -28,27 +28,63 @@ */ -#include "mongo/platform/basic.h" -#include "mongo/util/str.h" - -#include "mongo/db/repl/tenant_migration_access_blocker_util.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/multitenancy_gen.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" +#include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_conflict_info.h" #include "mongo/db/repl/tenant_migration_decoration.h" +#include "mongo/db/repl/tenant_migration_donor_access_blocker.h" +#include "mongo/db/repl/tenant_migration_recipient_access_blocker.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" +#include "mongo/db/serverless/serverless_types_gen.h" #include "mongo/db/serverless/shard_split_state_machine_gen.h" -#include "mongo/db/serverless/shard_split_utils.h" -#include "mongo/executor/network_interface_factory.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/executor/task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/transport/service_executor.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/transport/session.h" #include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -256,53 +292,13 @@ void fassertOnUnsafeInitialSync(const UUID& migrationId) { "migrationId"_attr = migrationId); } -std::shared_ptr getDonorAccessBlockerForMigration( - ServiceContext* serviceContext, const UUID& migrationId) { - return checked_pointer_cast( - TenantMigrationAccessBlockerRegistry::get(serviceContext) - .getAccessBlockerForMigration(migrationId, - TenantMigrationAccessBlocker::BlockerType::kDonor)); -} - -std::shared_ptr getRecipientAccessBlockerForMigration( - ServiceContext* serviceContext, const UUID& migrationId) { - return checked_pointer_cast( - TenantMigrationAccessBlockerRegistry::get(serviceContext) - .getAccessBlockerForMigration(migrationId, - TenantMigrationAccessBlocker::BlockerType::kRecipient)); -} - -std::shared_ptr getTenantMigrationRecipientAccessBlocker( - ServiceContext* const serviceContext, StringData tenantId) { - - TenantId tid = TenantId::parseFromString(tenantId); - - return checked_pointer_cast( - TenantMigrationAccessBlockerRegistry::get(serviceContext) - .getTenantMigrationAccessBlockerForTenantId(tid, MtabType::kRecipient)); -} - -void addTenantMigrationRecipientAccessBlocker(ServiceContext* serviceContext, - const StringData& tenantId, - const UUID& migrationId) { - if (getTenantMigrationRecipientAccessBlocker(serviceContext, tenantId)) { - return; - } - - auto mtab = - std::make_shared(serviceContext, migrationId); - - const auto tid = TenantId::parseFromString(tenantId); - TenantMigrationAccessBlockerRegistry::get(serviceContext).add(tid, mtab); -} - void validateNssIsBeingMigrated(const boost::optional& tenantId, const NamespaceString& nss, const UUID& migrationId) { if (!tenantId) { uassert(ErrorCodes::InvalidTenantId, str::stream() << "Failed to extract a valid tenant from namespace '" - << nss.toStringWithTenantId() << "'.", + << nss.toStringForErrorMsg() << "'.", nss.isOnInternalDb()); return; } @@ -311,12 +307,12 @@ void validateNssIsBeingMigrated(const boost::optional& tenantId, .getTenantMigrationAccessBlockerForTenantId( *tenantId, TenantMigrationAccessBlocker::BlockerType::kRecipient); uassert(ErrorCodes::InvalidTenantId, - str::stream() << "The collection '" << nss.toStringWithTenantId() + str::stream() << "The collection '" << nss.toStringForErrorMsg() << "' does not belong to a tenant being migrated.", mtab); uassert(ErrorCodes::InvalidTenantId, - str::stream() << "The collection '" << nss.toStringWithTenantId() + str::stream() << "The collection '" << nss.toStringForErrorMsg() << "' is not being migrated in migration " << migrationId, mtab->getMigrationId() == migrationId); } @@ -372,9 +368,13 @@ TenantMigrationDonorDocument parseDonorStateDocument(const BSONObj& doc) { return donorStateDoc; } -SemiFuture checkIfCanReadOrBlock(OperationContext* opCtx, - const DatabaseName& dbName, - const OpMsgRequest& request) { +SemiFuture checkIfCanRunCommandOrBlock(OperationContext* opCtx, + const DatabaseName& dbName, + const OpMsgRequest& request) { + if (!repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()) { + return Status::OK(); + } + // We need to check both donor and recipient access blockers in the case where two // migrations happen back-to-back before the old recipient state (from the first // migration) is garbage collected. @@ -387,46 +387,50 @@ SemiFuture checkIfCanReadOrBlock(OperationContext* opCtx, // Source to cancel the timeout if the operation completed in time. CancellationSource cancelTimeoutSource; - // Source to cancel waiting on the 'canReadFutures'. - CancellationSource cancelCanReadSource(opCtx->getCancellationToken()); + // Source to cancel waiting on the canRunCommandFuture's. + CancellationSource cancelCanRunCommandSource(opCtx->getCancellationToken()); const auto donorMtab = mtabPair->getDonorAccessBlocker(); const auto recipientMtab = mtabPair->getRecipientAccessBlocker(); - // A vector of futures where the donor access blocker's 'getCanReadFuture' will always precede - // the recipient's. + // A vector of futures where the donor access blocker's 'getCanRunCommandFuture' will always + // precede the recipient's. std::vector> futures; std::shared_ptr executor; if (donorMtab) { - auto canReadFuture = donorMtab->getCanReadFuture(opCtx, request.getCommandName()); - if (canReadFuture.isReady()) { - auto status = canReadFuture.getNoThrow(); + auto canRunCommandFuture = + donorMtab->getCanRunCommandFuture(opCtx, request.getCommandName()); + if (canRunCommandFuture.isReady()) { + auto status = canRunCommandFuture.getNoThrow(); donorMtab->recordTenantMigrationError(status); if (!recipientMtab) { return status; } } executor = blockerRegistry.getAsyncBlockingOperationsExecutor(); - futures.emplace_back(std::move(canReadFuture).semi().thenRunOn(executor)); + futures.emplace_back(std::move(canRunCommandFuture).semi().thenRunOn(executor)); } if (recipientMtab) { - auto canReadFuture = recipientMtab->getCanReadFuture(opCtx, request.getCommandName()); - if (canReadFuture.isReady()) { - auto status = canReadFuture.getNoThrow(); + auto canRunCommandFuture = + recipientMtab->getCanRunCommandFuture(opCtx, request.getCommandName()); + if (canRunCommandFuture.isReady()) { + auto status = canRunCommandFuture.getNoThrow(); recipientMtab->recordTenantMigrationError(status); if (!donorMtab) { return status; } } executor = blockerRegistry.getAsyncBlockingOperationsExecutor(); - futures.emplace_back(std::move(canReadFuture).semi().thenRunOn(executor)); + futures.emplace_back(std::move(canRunCommandFuture).semi().thenRunOn(executor)); } if (opCtx->hasDeadline()) { // Cancel waiting for operations if we timeout. executor->sleepUntil(opCtx->getDeadline(), cancelTimeoutSource.token()) - .getAsync([cancelCanReadSource](auto) mutable { cancelCanReadSource.cancel(); }); + .getAsync( + [cancelCanRunCommandSource](auto) mutable { cancelCanRunCommandSource.cancel(); }); } - return future_util::withCancellation(whenAll(std::move(futures)), cancelCanReadSource.token()) + return future_util::withCancellation(whenAll(std::move(futures)), + cancelCanRunCommandSource.token()) .thenRunOn(executor) .then([cancelTimeoutSource, donorMtab, recipientMtab](std::vector results) mutable { cancelTimeoutSource.cancel(); @@ -456,30 +460,34 @@ SemiFuture checkIfCanReadOrBlock(OperationContext* opCtx, return Status::OK(); }) - .onError( - [cancelTimeoutSource, - cancelCanReadSource, - donorMtab, - recipientMtab, - timeoutError = opCtx->getTimeoutError()](Status status) mutable { - auto isCanceledDueToTimeout = cancelTimeoutSource.token().isCanceled(); - - if (!isCanceledDueToTimeout) { - cancelTimeoutSource.cancel(); - } + .onError([cancelTimeoutSource, + cancelCanRunCommandSource, + donorMtab, + recipientMtab, + timeoutError = opCtx->getTimeoutError()]( + Status status) mutable { + auto isCanceledDueToTimeout = cancelTimeoutSource.token().isCanceled(); + + if (!isCanceledDueToTimeout) { + cancelTimeoutSource.cancel(); + } - if (isCanceledDueToTimeout) { - return Status(timeoutError, - "Blocked read timed out waiting for an internal data migration " - "to commit or abort"); - } + if (isCanceledDueToTimeout) { + return Status(timeoutError, + "Blocked command timed out waiting for an internal data migration " + "to commit or abort"); + } - return status.withContext("Canceled read blocked by internal data migration"); - }) + return status.withContext("Canceled command blocked by internal data migration"); + }) .semi(); // To require continuation in the user executor. } void checkIfLinearizableReadWasAllowedOrThrow(OperationContext* opCtx, const DatabaseName& dbName) { + if (!repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()) { + return; + } + if (repl::ReadConcernArgs::get(opCtx).getLevel() == repl::ReadConcernLevel::kLinearizableReadConcern) { // Only the donor access blocker will block linearizable reads. @@ -495,6 +503,10 @@ void checkIfLinearizableReadWasAllowedOrThrow(OperationContext* opCtx, const Dat void checkIfCanWriteOrThrow(OperationContext* opCtx, const DatabaseName& dbName, Timestamp writeTs) { + if (!repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()) { + return; + } + // The migration protocol guarantees the recipient will not get writes until the migration // is committed. auto mtab = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) @@ -508,6 +520,10 @@ void checkIfCanWriteOrThrow(OperationContext* opCtx, } Status checkIfCanBuildIndex(OperationContext* opCtx, const DatabaseName& dbName) { + if (!repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()) { + return Status::OK(); + } + // We only block index builds on the donor. auto mtab = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) .getTenantMigrationAccessBlockerForDbName(dbName, MtabType::kDonor); @@ -529,11 +545,41 @@ Status checkIfCanBuildIndex(OperationContext* opCtx, const DatabaseName& dbName) return Status::OK(); } +void assertCanOpenChangeStream(OperationContext* opCtx, const DatabaseName& dbName) { + // We only block opening change streams on the donor. + auto mtab = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .getTenantMigrationAccessBlockerForDbName(dbName, MtabType::kDonor); + if (mtab) { + auto status = mtab->checkIfCanOpenChangeStream(); + mtab->recordTenantMigrationError(status); + uassertStatusOK(status); + } +} + +void assertCanGetMoreChangeStream(OperationContext* opCtx, const DatabaseName& dbName) { + if (!repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()) { + return; + } + + // We only block change stream getMores on the donor. + auto mtab = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .getTenantMigrationAccessBlockerForDbName(dbName, MtabType::kDonor); + if (mtab) { + auto status = mtab->checkIfCanGetMoreChangeStream(); + mtab->recordTenantMigrationError(status); + uassertStatusOK(status); + } +} + bool hasActiveTenantMigration(OperationContext* opCtx, const DatabaseName& dbName) { if (dbName.db().empty()) { return false; } + if (!repl::ReplicationCoordinator::get(opCtx)->getSettings().isServerless()) { + return false; + } + return bool(TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) .getAccessBlockersForDbName(dbName)); } @@ -593,6 +639,7 @@ void recoverTenantMigrationAccessBlockers(OperationContext* opCtx) { case ShardSplitDonorStateEnum::kAbortingIndexBuilds: break; case ShardSplitDonorStateEnum::kBlocking: + case ShardSplitDonorStateEnum::kRecipientCaughtUp: invariant(doc.getBlockOpTime()); mtab->startBlockingWrites(); mtab->startBlockingReadsAfter(doc.getBlockOpTime()->getTimestamp()); @@ -652,10 +699,10 @@ void performNoopWrite(OperationContext* opCtx, StringData msg) { AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); uassert(ErrorCodes::NotWritablePrimary, "Not primary when performing noop write for {}"_format(msg), - replCoord->canAcceptWritesForDatabase(opCtx, "admin")); + replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin)); writeConflictRetry( - opCtx, "performNoopWrite", NamespaceString::kRsOplogNamespace.ns(), [&opCtx, &msg] { + opCtx, "performNoopWrite", NamespaceString::kRsOplogNamespace, [&opCtx, &msg] { WriteUnitOfWork wuow(opCtx); opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage( opCtx, BSON("msg" << msg)); @@ -674,7 +721,7 @@ bool inRecoveryMode(OperationContext* opCtx) { return memberState.startup() || memberState.startup2() || memberState.rollback(); } -bool shouldExcludeRead(OperationContext* opCtx) { +bool shouldExclude(OperationContext* opCtx) { return repl::tenantMigrationInfo(opCtx) || opCtx->getClient()->isInDirectClient() || (opCtx->getClient()->session() && (opCtx->getClient()->session()->getTags() & transport::Session::kInternalClient)); @@ -714,7 +761,7 @@ boost::optional extractTenantFromDatabaseName(const DatabaseName& d return boost::none; } - return dbName.db().substr(0, pos); + return dbName.db().substr(0, pos).toString(); } } // namespace tenant_migration_access_blocker diff --git a/src/mongo/db/repl/tenant_migration_access_blocker_util.h b/src/mongo/db/repl/tenant_migration_access_blocker_util.h index fc3a9035ca536..ffbfcf748b123 100644 --- a/src/mongo/db/repl/tenant_migration_access_blocker_util.h +++ b/src/mongo/db/repl/tenant_migration_access_blocker_util.h @@ -29,35 +29,32 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_conflict_info.h" #include "mongo/db/repl/tenant_migration_donor_access_blocker.h" #include "mongo/db/repl/tenant_migration_recipient_access_blocker.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" +#include "mongo/db/tenant_id.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/future.h" +#include "mongo/util/uuid.h" namespace mongo { namespace tenant_migration_access_blocker { -std::shared_ptr getDonorAccessBlockerForMigration( - ServiceContext* serviceContext, const UUID& migrationId); - -std::shared_ptr getRecipientAccessBlockerForMigration( - ServiceContext* serviceContext, const UUID& migrationId); - -std::shared_ptr getTenantMigrationRecipientAccessBlocker( - ServiceContext* serviceContext, StringData tenantId); - void fassertOnUnsafeInitialSync(const UUID& migrationId); -/** - * Add an access blocker if one does not already exist. - */ -void addTenantMigrationRecipientAccessBlocker(ServiceContext* serviceContext, - const StringData& tenantId, - const UUID& migrationId); - /** * Parse the tenantId from a database name, or return boost::none if there is no tenantId. */ @@ -79,16 +76,16 @@ void validateNssIsBeingMigrated(const boost::optional& tenantId, TenantMigrationDonorDocument parseDonorStateDocument(const BSONObj& doc); /** - * Checks if a request is allowed to read based on the tenant migration states of this node as a + * Checks if a command is allowed to run based on the tenant migration states of this node as a * donor or as a recipient. TenantMigrationCommitted is returned if the request needs to be * re-routed to the new owner of the tenant. If the tenant is currently being migrated and the * request needs to block, a future for when the request is unblocked is returned, and the promise * will be set for the returned future when the migration is committed or aborted. Note: for better * performance, check if the future is immediately ready. */ -SemiFuture checkIfCanReadOrBlock(OperationContext* opCtx, - const DatabaseName& dbName, - const OpMsgRequest& request); +SemiFuture checkIfCanRunCommandOrBlock(OperationContext* opCtx, + const DatabaseName& dbName, + const OpMsgRequest& request); /** * If the operation has read concern "linearizable", throws TenantMigrationCommitted error if the @@ -108,6 +105,16 @@ void checkIfCanWriteOrThrow(OperationContext* opCtx, const DatabaseName& dbName, */ Status checkIfCanBuildIndex(OperationContext* opCtx, const DatabaseName& dbName); +/** + * Asserts if opening a new change stream should block. + */ +void assertCanOpenChangeStream(OperationContext* opCtx, const DatabaseName& dbName); + +/** + * Asserts if getMores for change streams should fail. + */ +void assertCanGetMoreChangeStream(OperationContext* opCtx, const DatabaseName& dbName); + /** * Returns true if there is either a donor or recipient access blocker for the given dbName. */ @@ -115,7 +122,7 @@ bool hasActiveTenantMigration(OperationContext* opCtx, const DatabaseName& dbNam /** * Scan config.tenantMigrationDonors and creates the necessary TenantMigrationAccessBlockers for - * unfinished migrations. + * unfinished migrations. Must only be called in --serverless mode. */ void recoverTenantMigrationAccessBlockers(OperationContext* opCtx); @@ -139,9 +146,9 @@ void performNoopWrite(OperationContext* opCtx, StringData msg); bool inRecoveryMode(OperationContext* opCtx); /* - * Returns true if a read should be excluded from access blocker filtering. + * Returns true if a command should be excluded from access blocker filtering. */ -bool shouldExcludeRead(OperationContext* opCtx); +bool shouldExclude(OperationContext* opCtx); /** * Parse the 'TenantId' from the provided DatabaseName. diff --git a/src/mongo/db/repl/tenant_migration_access_blocker_util_test.cpp b/src/mongo/db/repl/tenant_migration_access_blocker_util_test.cpp index 229c79816688d..e665242c4d0fa 100644 --- a/src/mongo/db/repl/tenant_migration_access_blocker_util_test.cpp +++ b/src/mongo/db/repl/tenant_migration_access_blocker_util_test.cpp @@ -27,22 +27,43 @@ * it in the license file. */ -#include "mongo/db/catalog_raii.h" -#include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/dbhelpers.h" +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/oid.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" +#include "mongo/db/repl/tenant_migration_conflict_info.h" #include "mongo/db/repl/tenant_migration_donor_access_blocker.h" #include "mongo/db/repl/tenant_migration_recipient_access_blocker.h" +#include "mongo/db/serverless/serverless_types_gen.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/dbtests/mock/mock_replica_set.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { @@ -58,10 +79,16 @@ static const UUID kMigrationId = UUID::gen(); class TenantMigrationAccessBlockerUtilTest : public ServiceContextTest { public: const TenantId kTenantId = TenantId(OID::gen()); - const DatabaseName kTenantDB = DatabaseName(kTenantId.toString() + "_ db"); + const DatabaseName kTenantDB = + DatabaseName::createDatabaseName_forTest(boost::none, kTenantId.toString() + "_ db"); void setUp() { _opCtx = makeOperationContext(); + auto service = getServiceContext(); + + repl::ReplicationCoordinator::set( + service, std::make_unique(service, _replSettings)); + TenantMigrationAccessBlockerRegistry::get(getServiceContext()).startup(); } @@ -75,6 +102,7 @@ class TenantMigrationAccessBlockerUtilTest : public ServiceContextTest { private: ServiceContext::UniqueOperationContext _opCtx; + const repl::ReplSettings _replSettings = repl::createServerlessReplSettings(); }; @@ -95,7 +123,8 @@ TEST_F(TenantMigrationAccessBlockerUtilTest, HasActiveShardMergeTrueWithDonor) { std::make_shared(getServiceContext(), UUID::gen()); TenantMigrationAccessBlockerRegistry::get(getServiceContext()) .addGlobalDonorAccessBlocker(donorMtab); - ASSERT_FALSE(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), "local"_sd)); + ASSERT_FALSE( + tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), DatabaseName::kLocal)); ASSERT(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), kTenantDB)); } @@ -129,7 +158,8 @@ TEST_F(TenantMigrationAccessBlockerUtilTest, HasActiveShardMergeTrueWithBoth) { TenantMigrationAccessBlockerRegistry::get(getServiceContext()) .addGlobalDonorAccessBlocker(donorMtab); // Access blocker do not impact ns without tenants. - ASSERT_FALSE(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), "config"_sd)); + ASSERT_FALSE( + tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), DatabaseName::kConfig)); ASSERT(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), kTenantDB)); } @@ -138,7 +168,8 @@ TEST_F(TenantMigrationAccessBlockerUtilTest, HasActiveTenantMigrationDonorFalseF std::make_shared(getServiceContext(), UUID::gen()); TenantMigrationAccessBlockerRegistry::get(getServiceContext()).add(kTenantId, donorMtab); - ASSERT_FALSE(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), StringData())); + ASSERT_FALSE( + tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), DatabaseName::kEmpty)); } TEST_F(TenantMigrationAccessBlockerUtilTest, HasActiveShardMergeDonorFalseForNoDbName) { @@ -146,14 +177,16 @@ TEST_F(TenantMigrationAccessBlockerUtilTest, HasActiveShardMergeDonorFalseForNoD std::make_shared(getServiceContext(), UUID::gen()); TenantMigrationAccessBlockerRegistry::get(getServiceContext()) .addGlobalDonorAccessBlocker(donorMtab); - ASSERT_FALSE(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), StringData())); + ASSERT_FALSE( + tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), DatabaseName::kEmpty)); } TEST_F(TenantMigrationAccessBlockerUtilTest, HasActiveShardMergeRecipientFalseForNoDbName) { auto recipientMtab = std::make_shared(getServiceContext(), UUID::gen()); TenantMigrationAccessBlockerRegistry::get(getServiceContext()).add(kTenantId, recipientMtab); - ASSERT_FALSE(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), StringData())); + ASSERT_FALSE( + tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), DatabaseName::kEmpty)); } TEST_F(TenantMigrationAccessBlockerUtilTest, HasActiveTenantMigrationFalseForUnrelatedDb) { @@ -165,28 +198,33 @@ TEST_F(TenantMigrationAccessBlockerUtilTest, HasActiveTenantMigrationFalseForUnr std::make_shared(getServiceContext(), UUID::gen()); TenantMigrationAccessBlockerRegistry::get(getServiceContext()).add(kTenantId, donorMtab); - ASSERT_FALSE(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), "config"_sd)); + ASSERT_FALSE( + tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), DatabaseName::kConfig)); } TEST_F(TenantMigrationAccessBlockerUtilTest, HasActiveTenantMigrationFalseAfterRemoveWithBoth) { + auto recipientId = UUID::gen(); auto recipientMtab = - std::make_shared(getServiceContext(), UUID::gen()); + std::make_shared(getServiceContext(), recipientId); TenantMigrationAccessBlockerRegistry::get(getServiceContext()).add(kTenantId, recipientMtab); + auto donorId = UUID::gen(); auto donorMtab = - std::make_shared(getServiceContext(), UUID::gen()); + std::make_shared(getServiceContext(), donorId); TenantMigrationAccessBlockerRegistry::get(getServiceContext()).add(kTenantId, donorMtab); ASSERT(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), kTenantDB)); // Remove donor, should still be a migration. TenantMigrationAccessBlockerRegistry::get(getServiceContext()) - .remove(kTenantId, TenantMigrationAccessBlocker::BlockerType::kDonor); + .removeAccessBlockersForMigration(donorId, + TenantMigrationAccessBlocker::BlockerType::kDonor); ASSERT(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), kTenantDB)); // Remove recipient, there should be no migration. TenantMigrationAccessBlockerRegistry::get(getServiceContext()) - .remove(kTenantId, TenantMigrationAccessBlocker::BlockerType::kRecipient); + .removeAccessBlockersForMigration(recipientId, + TenantMigrationAccessBlocker::BlockerType::kRecipient); ASSERT_FALSE(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), kTenantDB)); } @@ -202,20 +240,24 @@ TEST_F(TenantMigrationAccessBlockerUtilTest, HasActiveShardMergeFalseAfterRemove .addGlobalDonorAccessBlocker(donorMtab); ASSERT(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), kTenantDB)); - ASSERT_FALSE(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), "admin"_sd)); + ASSERT_FALSE( + tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), DatabaseName::kAdmin)); // Remove donor, should still be a migration for the tenants migrating to the recipient. TenantMigrationAccessBlockerRegistry::get(getServiceContext()) .removeAccessBlockersForMigration(migrationId, TenantMigrationAccessBlocker::BlockerType::kDonor); ASSERT(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), kTenantDB)); - ASSERT_FALSE(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), "admin"_sd)); + ASSERT_FALSE( + tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), DatabaseName::kAdmin)); // Remove recipient, there should be no migration. TenantMigrationAccessBlockerRegistry::get(getServiceContext()) - .remove(kTenantId, TenantMigrationAccessBlocker::BlockerType::kRecipient); + .removeAccessBlockersForMigration(migrationId, + TenantMigrationAccessBlocker::BlockerType::kRecipient); ASSERT_FALSE(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), kTenantDB)); - ASSERT_FALSE(tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), "admin"_sd)); + ASSERT_FALSE( + tenant_migration_access_blocker::hasActiveTenantMigration(opCtx(), DatabaseName::kAdmin)); } TEST_F(TenantMigrationAccessBlockerUtilTest, TestValidateNssBeingMigrated) { @@ -269,7 +311,8 @@ class RecoverAccessBlockerTest : public ServiceContextMongoDTest { // Need real (non-mock) storage to insert state doc. repl::StorageInterface::set(serviceContext, std::make_unique()); - auto replCoord = std::make_unique(serviceContext); + auto replCoord = + std::make_unique(serviceContext, _replSettings); ASSERT_OK(replCoord->setFollowerMode(repl::MemberState::RS_PRIMARY)); _replMock = replCoord.get(); repl::ReplicationCoordinator::set(serviceContext, std::move(replCoord)); @@ -300,6 +343,7 @@ class RecoverAccessBlockerTest : public ServiceContextMongoDTest { private: ServiceContext::UniqueOperationContext _opCtx; + const repl::ReplSettings _replSettings = repl::createServerlessReplSettings(); }; TEST_F(RecoverAccessBlockerTest, ShardMergeRecipientBlockerStarted) { @@ -319,12 +363,13 @@ TEST_F(RecoverAccessBlockerTest, ShardMergeRecipientBlockerStarted) { .getTenantMigrationAccessBlockerForTenantId( tenantId, TenantMigrationAccessBlocker::BlockerType::kRecipient); ASSERT(mtab); - auto readFuture = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_TRUE(readFuture.isReady()); - ASSERT_THROWS_CODE_AND_WHAT(readFuture.get(), - DBException, - ErrorCodes::SnapshotTooOld, - "Tenant read is not allowed before migration completes"); + auto cmdFuture = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_TRUE(cmdFuture.isReady()); + ASSERT_THROWS_CODE_AND_WHAT( + cmdFuture.get(), + DBException, + ErrorCodes::IllegalOperation, + "Tenant command 'dummyCmd' is not allowed before migration completes"); } } @@ -366,12 +411,13 @@ TEST_F(RecoverAccessBlockerTest, ShardMergeRecipientAbortedAfterDataCopy) { .getTenantMigrationAccessBlockerForTenantId( tenantId, TenantMigrationAccessBlocker::BlockerType::kRecipient); ASSERT(mtab); - auto readFuture = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_TRUE(readFuture.isReady()); - ASSERT_THROWS_CODE_AND_WHAT(readFuture.get(), - DBException, - ErrorCodes::SnapshotTooOld, - "Tenant read is not allowed before migration completes"); + auto cmdFuture = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_TRUE(cmdFuture.isReady()); + ASSERT_THROWS_CODE_AND_WHAT( + cmdFuture.get(), + DBException, + ErrorCodes::IllegalOperation, + "Tenant command 'dummyCmd' is not allowed before migration completes"); } } @@ -413,12 +459,13 @@ TEST_F(RecoverAccessBlockerTest, ShardMergeRecipientCommittedAfterDataCopy) { .getTenantMigrationAccessBlockerForTenantId( tenantId, TenantMigrationAccessBlocker::BlockerType::kRecipient); ASSERT(mtab); - auto readFuture = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_TRUE(readFuture.isReady()); - ASSERT_THROWS_CODE_AND_WHAT(readFuture.get(), - DBException, - ErrorCodes::SnapshotTooOld, - "Tenant read is not allowed before migration completes"); + auto cmdFuture = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_TRUE(cmdFuture.isReady()); + ASSERT_THROWS_CODE_AND_WHAT( + cmdFuture.get(), + DBException, + ErrorCodes::IllegalOperation, + "Tenant command 'dummyCmd' is not allowed before migration completes"); } } @@ -439,12 +486,13 @@ TEST_F(RecoverAccessBlockerTest, ShardMergeRecipientLearnedFiles) { .getTenantMigrationAccessBlockerForTenantId( tenantId, TenantMigrationAccessBlocker::BlockerType::kRecipient); ASSERT(mtab); - auto readFuture = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_TRUE(readFuture.isReady()); - ASSERT_THROWS_CODE_AND_WHAT(readFuture.get(), - DBException, - ErrorCodes::SnapshotTooOld, - "Tenant read is not allowed before migration completes"); + auto cmdFuture = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_TRUE(cmdFuture.isReady()); + ASSERT_THROWS_CODE_AND_WHAT( + cmdFuture.get(), + DBException, + ErrorCodes::IllegalOperation, + "Tenant command 'dummyCmd' is not allowed before migration completes"); } } @@ -465,12 +513,13 @@ TEST_F(RecoverAccessBlockerTest, ShardMergeRecipientConsistent) { .getTenantMigrationAccessBlockerForTenantId( tenantId, TenantMigrationAccessBlocker::BlockerType::kRecipient); ASSERT(mtab); - auto readFuture = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_TRUE(readFuture.isReady()); - ASSERT_THROWS_CODE_AND_WHAT(readFuture.get(), - DBException, - ErrorCodes::SnapshotTooOld, - "Tenant read is not allowed before migration completes"); + auto cmdFuture = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_TRUE(cmdFuture.isReady()); + ASSERT_THROWS_CODE_AND_WHAT( + cmdFuture.get(), + DBException, + ErrorCodes::IllegalOperation, + "Tenant command 'dummyCmd' is not allowed before migration completes"); } } @@ -495,18 +544,19 @@ TEST_F(RecoverAccessBlockerTest, ShardMergeRecipientRejectBeforeTimestamp) { repl::ReadConcernArgs::get(opCtx()) = repl::ReadConcernArgs(repl::ReadConcernLevel::kMajorityReadConcern); - auto readFuture = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_OK(readFuture.getNoThrow()); + auto cmdFuture = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_OK(cmdFuture.getNoThrow()); repl::ReadConcernArgs::get(opCtx()) = repl::ReadConcernArgs(repl::ReadConcernLevel::kSnapshotReadConcern); repl::ReadConcernArgs::get(opCtx()).setArgsAtClusterTimeForSnapshot(Timestamp{15, 1}); - auto readFutureAtClusterTime = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_TRUE(readFutureAtClusterTime.isReady()); - ASSERT_THROWS_CODE_AND_WHAT(readFutureAtClusterTime.get(), - DBException, - ErrorCodes::SnapshotTooOld, - "Tenant read is not allowed before migration completes"); + auto cmdFutureAtClusterTime = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_TRUE(cmdFutureAtClusterTime.isReady()); + ASSERT_THROWS_CODE_AND_WHAT( + cmdFutureAtClusterTime.get(), + DBException, + ErrorCodes::SnapshotTooOld, + "Tenant command 'dummyCmd' is not allowed before migration completes"); } } @@ -564,9 +614,9 @@ TEST_F(RecoverAccessBlockerTest, ShardMergeDonorAbortingIndex) { tenantId, TenantMigrationAccessBlocker::BlockerType::kDonor); ASSERT(mtab); - auto readFuture = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_TRUE(readFuture.isReady()); - ASSERT_OK(readFuture.getNoThrow()); + auto cmdFuture = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_TRUE(cmdFuture.isReady()); + ASSERT_OK(cmdFuture.getNoThrow()); ASSERT_OK(mtab->checkIfCanWrite(Timestamp{10, 1})); @@ -600,15 +650,15 @@ TEST_F(RecoverAccessBlockerTest, ShardMergeDonorBlocking) { repl::ReadConcernArgs::get(opCtx()) = repl::ReadConcernArgs(repl::ReadConcernLevel::kMajorityReadConcern); - auto readFuture = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_TRUE(readFuture.isReady()); - ASSERT_OK(readFuture.getNoThrow()); + auto cmdFuture = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_TRUE(cmdFuture.isReady()); + ASSERT_OK(cmdFuture.getNoThrow()); repl::ReadConcernArgs::get(opCtx()) = repl::ReadConcernArgs(repl::ReadConcernLevel::kSnapshotReadConcern); repl::ReadConcernArgs::get(opCtx()).setArgsAtClusterTimeForSnapshot(Timestamp{101, 1}); - auto afterReadFuture = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_FALSE(afterReadFuture.isReady()); + auto afterCmdFuture = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_FALSE(afterCmdFuture.isReady()); ASSERT_EQ(mtab->checkIfCanWrite(Timestamp{101, 1}).code(), ErrorCodes::TenantMigrationConflict); @@ -644,16 +694,16 @@ TEST_F(RecoverAccessBlockerTest, ShardMergeDonorCommitted) { repl::ReadConcernArgs::get(opCtx()) = repl::ReadConcernArgs(repl::ReadConcernLevel::kSnapshotReadConcern); repl::ReadConcernArgs::get(opCtx()).setArgsAtClusterTimeForSnapshot(Timestamp{90, 1}); - auto readFuture = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_TRUE(readFuture.isReady()); - ASSERT_OK(readFuture.getNoThrow()); + auto cmdFuture = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_TRUE(cmdFuture.isReady()); + ASSERT_OK(cmdFuture.getNoThrow()); repl::ReadConcernArgs::get(opCtx()) = repl::ReadConcernArgs(repl::ReadConcernLevel::kSnapshotReadConcern); repl::ReadConcernArgs::get(opCtx()).setArgsAtClusterTimeForSnapshot(Timestamp{102, 1}); - auto afterReadFuture = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_TRUE(afterReadFuture.isReady()); - ASSERT_EQ(afterReadFuture.getNoThrow().code(), ErrorCodes::TenantMigrationCommitted); + auto afterCmdFuture = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_TRUE(afterCmdFuture.isReady()); + ASSERT_EQ(afterCmdFuture.getNoThrow().code(), ErrorCodes::TenantMigrationCommitted); ASSERT_EQ(mtab->checkIfCanWrite(Timestamp{102, 1}).code(), ErrorCodes::TenantMigrationCommitted); @@ -689,16 +739,16 @@ TEST_F(RecoverAccessBlockerTest, ShardMergeDonorAborted) { repl::ReadConcernArgs::get(opCtx()) = repl::ReadConcernArgs(repl::ReadConcernLevel::kSnapshotReadConcern); repl::ReadConcernArgs::get(opCtx()).setArgsAtClusterTimeForSnapshot(Timestamp{90, 1}); - auto readFuture = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_TRUE(readFuture.isReady()); - ASSERT_OK(readFuture.getNoThrow()); + auto cmdFuture = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_TRUE(cmdFuture.isReady()); + ASSERT_OK(cmdFuture.getNoThrow()); repl::ReadConcernArgs::get(opCtx()) = repl::ReadConcernArgs(repl::ReadConcernLevel::kSnapshotReadConcern); repl::ReadConcernArgs::get(opCtx()).setArgsAtClusterTimeForSnapshot(Timestamp{102, 1}); - auto afterReadFuture = mtab->getCanReadFuture(opCtx(), "dummyCmd"); - ASSERT_TRUE(afterReadFuture.isReady()); - ASSERT_OK(afterReadFuture.getNoThrow()); + auto afterCmdFuture = mtab->getCanRunCommandFuture(opCtx(), "dummyCmd"); + ASSERT_TRUE(afterCmdFuture.isReady()); + ASSERT_OK(afterCmdFuture.getNoThrow()); ASSERT_OK(mtab->checkIfCanWrite(Timestamp{102, 1})); diff --git a/src/mongo/db/repl/tenant_migration_conflict_info.cpp b/src/mongo/db/repl/tenant_migration_conflict_info.cpp index 0b9f3c44498ef..e59552a6d0a54 100644 --- a/src/mongo/db/repl/tenant_migration_conflict_info.cpp +++ b/src/mongo/db/repl/tenant_migration_conflict_info.cpp @@ -27,11 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/repl/tenant_migration_conflict_info.h" - -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" namespace mongo { diff --git a/src/mongo/db/repl/tenant_migration_conflict_info.h b/src/mongo/db/repl/tenant_migration_conflict_info.h index 4d9be9fd4beea..c170c1781158e 100644 --- a/src/mongo/db/repl/tenant_migration_conflict_info.h +++ b/src/mongo/db/repl/tenant_migration_conflict_info.h @@ -29,10 +29,18 @@ #pragma once +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" +#include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" #include "mongo/db/repl/tenant_migration_donor_access_blocker.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/repl/tenant_migration_decoration.cpp b/src/mongo/db/repl/tenant_migration_decoration.cpp index 4fee0fb3c29b9..180d40d2289f9 100644 --- a/src/mongo/db/repl/tenant_migration_decoration.cpp +++ b/src/mongo/db/repl/tenant_migration_decoration.cpp @@ -27,7 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include + +#include #include "mongo/db/repl/tenant_migration_decoration.h" diff --git a/src/mongo/db/repl/tenant_migration_decoration.h b/src/mongo/db/repl/tenant_migration_decoration.h index bbc61a0046dc3..7741ff8b4d650 100644 --- a/src/mongo/db/repl/tenant_migration_decoration.h +++ b/src/mongo/db/repl/tenant_migration_decoration.h @@ -30,18 +30,30 @@ #pragma once #include +#include +#include #include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" +#include "mongo/util/decorable.h" #include "mongo/util/uuid.h" namespace mongo { namespace repl { +struct DonorOplogEntryData { + DonorOplogEntryData(const OpTime& donorOpTime, const int64_t applyOpsIndex) + : donorOpTime(donorOpTime), applyOpsIndex(applyOpsIndex) {} + OpTime donorOpTime; + int64_t applyOpsIndex; +}; + struct TenantMigrationInfo { TenantMigrationInfo(const UUID& in_uuid) : uuid(in_uuid) {} + UUID uuid; + boost::optional donorOplogEntryData; }; extern const OperationContext::Decoration> tenantMigrationInfo; - } // namespace repl } // namespace mongo diff --git a/src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp b/src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp index a4e45fdbc916e..80e3f8fdedad8 100644 --- a/src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp +++ b/src/mongo/db/repl/tenant_migration_donor_access_blocker.cpp @@ -28,19 +28,59 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/client.h" -#include "mongo/db/commands/tenant_migration_donor_cmds_gen.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" +#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_conflict_info.h" +#include "mongo/db/repl/tenant_migration_donor_access_blocker.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -135,13 +175,13 @@ Status TenantMigrationDonorAccessBlocker::waitUntilCommittedOrAborted(OperationC MONGO_UNREACHABLE; } -SharedSemiFuture TenantMigrationDonorAccessBlocker::getCanReadFuture(OperationContext* opCtx, - StringData command) { +SharedSemiFuture TenantMigrationDonorAccessBlocker::getCanRunCommandFuture( + OperationContext* opCtx, StringData command) { // Exclude internal client requests - if (tenant_migration_access_blocker::shouldExcludeRead(opCtx)) { + if (tenant_migration_access_blocker::shouldExclude(opCtx)) { LOGV2_DEBUG(6397500, 1, - "Internal tenant read got excluded from the MTAB filtering", + "Internal tenant command got excluded from the MTAB filtering", "migrationId"_attr = getMigrationId(), "opId"_attr = opCtx->getOpID()); return SharedSemiFuture(); @@ -190,7 +230,7 @@ SharedSemiFuture TenantMigrationDonorAccessBlocker::getCanReadFuture(Opera return SharedSemiFuture( Status(ErrorCodes::TenantMigrationCommitted, - "Read must be re-routed to the new owner of this tenant")); + "Command must be re-routed to the new owner of this tenant")); default: MONGO_UNREACHABLE; @@ -224,6 +264,41 @@ Status TenantMigrationDonorAccessBlocker::checkIfCanBuildIndex() { MONGO_UNREACHABLE; } +Status TenantMigrationDonorAccessBlocker::checkIfCanOpenChangeStream() { + stdx::lock_guard lg(_mutex); + switch (_state.getState()) { + case BlockerState::State::kAllow: + return Status::OK(); + case BlockerState::State::kBlockWrites: + case BlockerState::State::kBlockWritesAndReads: + return {TenantMigrationConflictInfo(getMigrationId(), shared_from_this()), + "Change stream must wait for commit or abort to get a safe start time"}; + case BlockerState::State::kReject: + // At this point checkIfCanReadOrBlock should have blocked this at the command level. + return {ErrorCodes::TenantMigrationCommitted, + "Change stream must be resumed on the new owner of this tenant"}; + case BlockerState::State::kAborted: + return Status::OK(); + } + MONGO_UNREACHABLE; +} + +Status TenantMigrationDonorAccessBlocker::checkIfCanGetMoreChangeStream() { + stdx::lock_guard lg(_mutex); + switch (_state.getState()) { + case BlockerState::State::kAllow: + case BlockerState::State::kBlockWrites: + case BlockerState::State::kBlockWritesAndReads: + return Status::OK(); + case BlockerState::State::kReject: + return {ErrorCodes::ResumeTenantChangeStream, + "Change stream must be resumed on the new owner of this tenant"}; + case BlockerState::State::kAborted: + return Status::OK(); + } + MONGO_UNREACHABLE; +} + void TenantMigrationDonorAccessBlocker::startBlockingWrites() { stdx::lock_guard lg(_mutex); diff --git a/src/mongo/db/repl/tenant_migration_donor_access_blocker.h b/src/mongo/db/repl/tenant_migration_donor_access_blocker.h index 488359f09f28a..2a3b416c5b0e2 100644 --- a/src/mongo/db/repl/tenant_migration_donor_access_blocker.h +++ b/src/mongo/db/repl/tenant_migration_donor_access_blocker.h @@ -29,14 +29,33 @@ #pragma once +#include #include +#include +#include +#include +#include +#include +#include +#include "tenant_migration_access_blocker.h" + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/timestamp.h" #include "mongo/db/commands/tenant_migration_donor_cmds_gen.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/service_context.h" #include "mongo/executor/task_executor.h" -#include "tenant_migration_access_blocker.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -111,14 +130,14 @@ inline RepeatableSharedPromise::~RepeatableSharedPromise() { * blocked inside handleTenantMigrationConflict instead of checkIfCanWrite because writes must not * block between being assigned an OpTime and committing. * - * Every command calls getCanReadFuture (via tenant_migration_access_blocker::checkIfCanReadOrBlock) - * at some point after waiting for readConcern. If the migration has committed, the method will - * return TenantMigrationCommitted if the command is in the commandDenyListAfterMigration. If the - * migration has aborted, the method will return an OK status and the command can just proceed. - * Otherwise, if the command has afterClusterTime or atClusterTime >= blockTimestamp, the promise - * will remain unfulfilled until the migration either commits (in which case - * TenantMigrationCommitted will be returned) or aborts (in which case an OK status will be returned - * and the reads will be unblocked). + * Every command calls getCanRunCommand (via + * tenant_migration_access_blocker::checkIfCanRunCommandOrBlock) at some point after waiting for + * readConcern. If the migration has committed, the method will return TenantMigrationCommitted if + * the command is in the commandDenyListAfterMigration. If the migration has aborted, the method + * will return an OK status and the command can just proceed. Otherwise, if the command has + * afterClusterTime or atClusterTime >= blockTimestamp, the promise will remain unfulfilled until + * the migration either commits (in which case TenantMigrationCommitted will be returned) or aborts + * (in which case an OK status will be returned and the command will be unblocked). * * Linearizable reads call checkIfLinearizableReadWasAllowed after doing the noop write at the end * the reads. The method returns TenantMigrationCommitted if the migration has committed, and an @@ -127,6 +146,17 @@ inline RepeatableSharedPromise::~RepeatableSharedPromise() { * to be rejected if it is possible that some writes have been accepted by the recipient (i.e. the * migration has committed). * + * When opening new change streams without a resume token the server needs to pick a start time to + * avoid reprocessing events that happened before the change stream was opened. This is usually the + * optime of the last committed write in the oplog. However, while in the kBlockingWrites and + * kBlockingWritesAndReads states, the latest global oplog time might wind up after the + * blockTimestamp, so we to delay these commands by calling assertCanOpenChangeStream. + * + * Change stream getMore commands call assertCanGetMoreChangeStream. After a commit normal getMores + * are allowed to proceed and drain the cursors, but change stream cursors are infinite and can't be + * fully drained. We added this special check for change streams specifically to signal that they + * must be resumed on the recipient. + * * Index build user threads call checkIfCanBuildIndex. Index builds are blocked and rejected * similarly to regular writes except that they are blocked from the start of the migration (i.e. * before "blockTimestamp" is chosen). @@ -156,10 +186,10 @@ inline RepeatableSharedPromise::~RepeatableSharedPromise() { * "blockTimestamp". * * At this point: - * - Reads on the node that have already passed getCanReadFuture must have a clusterTime before + * - Reads on the node that have already passed getCanRunCommand must have a clusterTime before * the blockTimestamp, since the write at blockTimestamp hasn't committed yet (i.e., there's still * an oplog hole at blockTimestamp). - * - Reads on the node that have not yet passed getCanReadFuture will end up blocking. + * - Reads on the node that have not yet passed getCanRunCommand will end up blocking. * * If the "start blocking" write aborts or the write rolls back via replication rollback, the node * calls rollBackStartBlocking. @@ -190,7 +220,8 @@ class TenantMigrationDonorAccessBlocker Status waitUntilCommittedOrAborted(OperationContext* opCtx) final; Status checkIfLinearizableReadWasAllowed(OperationContext* opCtx) final; - SharedSemiFuture getCanReadFuture(OperationContext* opCtx, StringData command) final; + SharedSemiFuture getCanRunCommandFuture(OperationContext* opCtx, + StringData command) final; // // Called by index build user threads before acquiring an index build slot, and again right @@ -198,6 +229,16 @@ class TenantMigrationDonorAccessBlocker // Status checkIfCanBuildIndex() final; + /** + * Checks if opening change streams should fail. + */ + Status checkIfCanOpenChangeStream() final; + + /** + * Returns error status if "getMore" command of a change stream should fail. + */ + Status checkIfCanGetMoreChangeStream() final; + bool checkIfShouldBlockTTL() const final { // There is no TTL race at the donor side. See parent class for details. return false; diff --git a/src/mongo/db/repl/tenant_migration_donor_op_observer.cpp b/src/mongo/db/repl/tenant_migration_donor_op_observer.cpp index 1d46d492a4470..d01b6419c1b0d 100644 --- a/src/mongo/db/repl/tenant_migration_donor_op_observer.cpp +++ b/src/mongo/db/repl/tenant_migration_donor_op_observer.cpp @@ -27,15 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog_raii.h" +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" +#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_decoration.h" +#include "mongo/db/repl/tenant_migration_donor_access_blocker.h" #include "mongo/db/repl/tenant_migration_donor_op_observer.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" #include "mongo/db/serverless/serverless_operation_lock_registry.h" +#include "mongo/db/serverless/serverless_types_gen.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -206,21 +228,9 @@ class TenantMigrationDonorCommitOrAbortHandler final : public RecoveryUnit::Chan // The migration durably aborted and is now marked as garbage collectable, // remove its TenantMigrationDonorAccessBlocker right away to allow back-to-back // migration retries. - if (_donorStateDoc.getProtocol().value_or( - MigrationProtocolEnum::kMultitenantMigrations) == - MigrationProtocolEnum::kMultitenantMigrations) { - const auto tenantId = TenantId::parseFromString(_donorStateDoc.getTenantId()); - TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) - .remove(tenantId, TenantMigrationAccessBlocker::BlockerType::kDonor); - } else { - tassert(6448701, - "Bad protocol", - _donorStateDoc.getProtocol() == MigrationProtocolEnum::kShardMerge); - TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) - .removeAccessBlockersForMigration( - _donorStateDoc.getId(), - TenantMigrationAccessBlocker::BlockerType::kDonor); - } + TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .removeAccessBlockersForMigration( + _donorStateDoc.getId(), TenantMigrationAccessBlocker::BlockerType::kDonor); } return; } @@ -250,7 +260,8 @@ void TenantMigrationDonorOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { if (coll->ns() == NamespaceString::kTenantMigrationDonorsNamespace && !tenant_migration_access_blocker::inRecoveryMode(opCtx)) { for (auto it = first; it != last; it++) { @@ -279,7 +290,8 @@ void TenantMigrationDonorOpObserver::onInserts(OperationContext* opCtx, } void TenantMigrationDonorOpObserver::onUpdate(OperationContext* opCtx, - const OplogUpdateEntryArgs& args) { + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (args.coll->ns() == NamespaceString::kTenantMigrationDonorsNamespace && !tenant_migration_access_blocker::inRecoveryMode(opCtx)) { auto donorStateDoc = @@ -307,7 +319,9 @@ void TenantMigrationDonorOpObserver::onUpdate(OperationContext* opCtx, void TenantMigrationDonorOpObserver::aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - BSONObj const& doc) { + BSONObj const& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { if (coll->ns() == NamespaceString::kTenantMigrationDonorsNamespace && !tenant_migration_access_blocker::inRecoveryMode(opCtx)) { auto donorStateDoc = tenant_migration_access_blocker::parseDonorStateDocument(doc); @@ -330,7 +344,8 @@ void TenantMigrationDonorOpObserver::aboutToDelete(OperationContext* opCtx, void TenantMigrationDonorOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (coll->ns() == NamespaceString::kTenantMigrationDonorsNamespace && !tenant_migration_access_blocker::inRecoveryMode(opCtx)) { auto tmi = tenantMigrationInfo(opCtx); @@ -355,7 +370,8 @@ repl::OpTime TenantMigrationDonorOpObserver::onDropCollection(OperationContext* const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) { + const CollectionDropType dropType, + bool markFromMigrate) { if (collectionName == NamespaceString::kTenantMigrationDonorsNamespace) { opCtx->recoveryUnit()->onCommit([](OperationContext* opCtx, boost::optional) { TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) diff --git a/src/mongo/db/repl/tenant_migration_donor_op_observer.h b/src/mongo/db/repl/tenant_migration_donor_op_observer.h index f7ded62694c32..84c927cda2bcc 100644 --- a/src/mongo/db/repl/tenant_migration_donor_op_observer.h +++ b/src/mongo/db/repl/tenant_migration_donor_op_observer.h @@ -29,7 +29,20 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -37,7 +50,7 @@ namespace repl { /** * OpObserver for tenant migration donor. */ -class TenantMigrationDonorOpObserver final : public OpObserver { +class TenantMigrationDonorOpObserver final : public OpObserverNoop { TenantMigrationDonorOpObserver(const TenantMigrationDonorOpObserver&) = delete; TenantMigrationDonorOpObserver& operator=(const TenantMigrationDonorOpObserver&) = delete; @@ -45,209 +58,43 @@ class TenantMigrationDonorOpObserver final : public OpObserver { TenantMigrationDonorOpObserver() = default; ~TenantMigrationDonorOpObserver() = default; - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) final {} - - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - void onCreateIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc, - bool fromMigrate) final {} - - void onStartIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onStartIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) final {} + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kConfig, NamespaceFilter::kConfig}; + } void onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) final; - - void onInsertGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final{}; + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; - void onDeleteGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final {} - - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) final; + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) final; + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) final; - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final {} + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; - void onCreateCollection(OperationContext* opCtx, - const CollectionPtr& coll, - const NamespaceString& collectionName, - const CollectionOptions& options, - const BSONObj& idIndex, - const OplogSlot& createOpTime, - bool fromMigrate) final {} - - void onCollMod(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& collModCmd, - const CollectionOptions& oldCollOptions, - boost::optional indexInfo) final {} - - void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final {} - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) final; - - void onDropIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const std::string& indexName, - const BSONObj& indexInfo) final {} - - using OpObserver::onRenameCollection; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final {} - - void onImportCollection(OperationContext* opCtx, - const UUID& importUUID, - const NamespaceString& nss, - long long numRecords, - long long dataSize, - const BSONObj& catalogEntry, - const BSONObj& storageMetadata, - bool isDryRun) final {} - - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final { - return repl::OpTime(); - } - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) final {} - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) final {} - - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) final {} - - void onTransactionStart(OperationContext* opCtx) final {} - - void onUnpreparedTransactionCommit(OperationContext* opCtx, - const TransactionOperations& transactionOperations) final {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept final {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) final {} - - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) final { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) final {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) final {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} + CollectionDropType dropType, + bool markFromMigrate) final; void onMajorityCommitPointUpdate(ServiceContext* service, const repl::OpTime& newCommitPoint) final; - -private: - void _onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final { - } }; } // namespace repl diff --git a/src/mongo/db/repl/tenant_migration_donor_service.cpp b/src/mongo/db/repl/tenant_migration_donor_service.cpp index 1775b8479a197..ad8c9516c4eb8 100644 --- a/src/mongo/db/repl/tenant_migration_donor_service.cpp +++ b/src/mongo/db/repl/tenant_migration_donor_service.cpp @@ -30,35 +30,101 @@ #include "mongo/db/repl/tenant_migration_donor_service.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/async_remote_command_targeter_adapter.h" #include "mongo/client/connection_string.h" -#include "mongo/client/replica_set_monitor.h" -#include "mongo/config.h" +#include "mongo/client/remote_command_retry_scheduler.h" +#include "mongo/client/remote_command_targeter_rs.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db//shard_role.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/commands/tenant_migration_donor_cmds_gen.h" +#include "mongo/db/catalog/local_oplog_info.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/commands/tenant_migration_recipient_cmds_gen.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/keys_collection_util.h" +#include "mongo/db/ops/update_result.h" #include "mongo/db/persistent_task_store.h" -#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_donor_access_blocker.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" #include "mongo/db/repl/tenant_migration_statistics.h" +#include "mongo/db/repl/tenant_migration_util.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/server_options.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/db/write_concern_options.h" #include "mongo/executor/async_rpc.h" +#include "mongo/executor/async_rpc_error_info.h" #include "mongo/executor/async_rpc_retry_policy.h" +#include "mongo/executor/async_rpc_targeter.h" #include "mongo/executor/connection_pool.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_factory.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/future_util.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_options.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -73,6 +139,7 @@ MONGO_FAIL_POINT_DEFINE(pauseTenantMigrationBeforeLeavingAbortingIndexBuildsStat MONGO_FAIL_POINT_DEFINE(pauseTenantMigrationBeforeLeavingBlockingState); MONGO_FAIL_POINT_DEFINE(pauseTenantMigrationBeforeLeavingDataSyncState); MONGO_FAIL_POINT_DEFINE(pauseTenantMigrationBeforeFetchingKeys); +MONGO_FAIL_POINT_DEFINE(pauseTenantMigrationDonorBeforeStoringExternalClusterTimeKeyDocs); MONGO_FAIL_POINT_DEFINE(pauseTenantMigrationDonorBeforeWaitingForKeysToReplicate); MONGO_FAIL_POINT_DEFINE(pauseTenantMigrationDonorBeforeMarkingStateGarbageCollectable); MONGO_FAIL_POINT_DEFINE(pauseTenantMigrationDonorAfterMarkingStateGarbageCollectable); @@ -382,17 +449,6 @@ TenantMigrationDonorService::Instance::_makeRecipientCmdExecutor() { Client::initThread(threadName.c_str()); auto client = Client::getCurrent(); AuthorizationSession::get(*client)->grantInternalAuthorization(&cc()); - - // Ideally, we should also associate the client created by _recipientCmdExecutor with the - // TenantMigrationDonorService to make the opCtxs created by the task executor get - // registered in the TenantMigrationDonorService, and killed on stepdown. But that would - // require passing the pointer to the TenantMigrationService into the Instance and making - // constructInstance not const so we can set the client's decoration here. Right now there - // is no need for that since the task executor is only used with scheduleRemoteCommand and - // no opCtx will be created (the cancellation token is responsible for canceling the - // outstanding work on the task executor). - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); }; auto hookList = std::make_unique(); @@ -539,10 +595,17 @@ ExecutorFuture TenantMigrationDonorService::Instance::_insertState pauseTenantMigrationBeforeInsertingDonorStateDoc.pauseWhileSet(opCtx); - AutoGetCollection collection(opCtx, _stateDocumentsNS, MODE_IX); + auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + _stateDocumentsNS, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); writeConflictRetry( - opCtx, "TenantMigrationDonorInsertStateDoc", _stateDocumentsNS.ns(), [&] { + opCtx, "TenantMigrationDonorInsertStateDoc", _stateDocumentsNS, [&] { const auto filter = BSON(TenantMigrationDonorDocument::kIdFieldName << _migrationUuid); const auto updateMod = [&]() { @@ -550,7 +613,7 @@ ExecutorFuture TenantMigrationDonorService::Instance::_insertState return BSON("$setOnInsert" << _stateDoc.toBSON()); }(); auto updateResult = Helpers::upsert( - opCtx, _stateDocumentsNS, filter, updateMod, /*fromMigrate=*/false); + opCtx, collection, filter, updateMod, /*fromMigrate=*/false); // '$setOnInsert' update operator can never modify an existing on-disk state // doc. @@ -592,11 +655,12 @@ ExecutorFuture TenantMigrationDonorService::Instance::_updateState AutoGetCollection collection(opCtx, _stateDocumentsNS, MODE_IX); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << _stateDocumentsNS.ns() << " does not exist", + str::stream() + << _stateDocumentsNS.toStringForErrorMsg() << " does not exist", collection); writeConflictRetry( - opCtx, "TenantMigrationDonorUpdateStateDoc", _stateDocumentsNS.ns(), [&] { + opCtx, "TenantMigrationDonorUpdateStateDoc", _stateDocumentsNS, [&] { WriteUnitOfWork wuow(opCtx); const auto originalRecordId = Helpers::findOne( @@ -674,6 +738,7 @@ ExecutorFuture TenantMigrationDonorService::Instance::_updateState originalSnapshot, updatedStateDocBson, collection_internal::kUpdateNoIndexes, + nullptr /* indexesAffected */, nullptr /* OpDebug* */, &args); @@ -707,12 +772,19 @@ TenantMigrationDonorService::Instance::_markStateDocAsGarbageCollectable( pauseTenantMigrationDonorBeforeMarkingStateGarbageCollectable.pauseWhileSet(opCtx); - AutoGetCollection collection(opCtx, _stateDocumentsNS, MODE_IX); + auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + _stateDocumentsNS, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); writeConflictRetry( opCtx, "TenantMigrationDonorMarkStateDocAsGarbageCollectable", - _stateDocumentsNS.ns(), + _stateDocumentsNS, [&] { const auto filter = BSON(TenantMigrationDonorDocument::kIdFieldName << _migrationUuid); @@ -721,7 +793,7 @@ TenantMigrationDonorService::Instance::_markStateDocAsGarbageCollectable( return _stateDoc.toBSON(); }(); auto updateResult = Helpers::upsert( - opCtx, _stateDocumentsNS, filter, updateMod, /*fromMigrate=*/false); + opCtx, collection, filter, updateMod, /*fromMigrate=*/false); invariant(updateResult.numDocsModified == 1); }); @@ -1132,32 +1204,34 @@ TenantMigrationDonorService::Instance::_fetchAndStoreRecipientClusterTimeKeyDocs std::make_shared>(); auto fetchStatus = std::make_shared>(); - auto fetcherCallback = - [this, self = shared_from_this(), fetchStatus, keyDocs]( - const Fetcher::QueryResponseStatus& dataStatus, - Fetcher::NextAction* nextAction, - BSONObjBuilder* getMoreBob) { - // Throw out any accumulated results on error - if (!dataStatus.isOK()) { - *fetchStatus = dataStatus.getStatus(); - keyDocs->clear(); - return; - } + auto fetcherCallback = [this, + self = shared_from_this(), + fetchStatus, + keyDocs]( + const Fetcher::QueryResponseStatus& dataStatus, + Fetcher::NextAction* nextAction, + BSONObjBuilder* getMoreBob) { + // Throw out any accumulated results on error + if (!dataStatus.isOK()) { + *fetchStatus = dataStatus.getStatus(); + keyDocs->clear(); + return; + } - const auto& data = dataStatus.getValue(); - for (const BSONObj& doc : data.documents) { - keyDocs->push_back( - tenant_migration_util::makeExternalClusterTimeKeyDoc( - _migrationUuid, doc.getOwned())); - } - *fetchStatus = Status::OK(); + const auto& data = dataStatus.getValue(); + for (const BSONObj& doc : data.documents) { + keyDocs->push_back( + keys_collection_util::makeExternalClusterTimeKeyDoc( + doc.getOwned(), _migrationUuid, boost::none /* expireAt */)); + } + *fetchStatus = Status::OK(); - if (!getMoreBob) { - return; - } - getMoreBob->append("getMore", data.cursorId); - getMoreBob->append("collection", data.nss.coll()); - }; + if (!getMoreBob) { + return; + } + getMoreBob->append("getMore", data.cursorId); + getMoreBob->append("collection", data.nss.coll()); + }; auto fetcher = std::make_shared( _recipientCmdExecutor.get(), @@ -1212,8 +1286,11 @@ TenantMigrationDonorService::Instance::_fetchAndStoreRecipientClusterTimeKeyDocs .then([this, self = shared_from_this(), executor, token](auto keyDocs) { checkForTokenInterrupt(token); - return tenant_migration_util::storeExternalClusterTimeKeyDocs( - std::move(keyDocs)); + auto opCtx = cc().makeOperationContext(); + pauseTenantMigrationDonorBeforeStoringExternalClusterTimeKeyDocs + .pauseWhileSet(opCtx.get()); + return keys_collection_util::storeExternalClusterTimeKeyDocs( + opCtx.get(), std::move(keyDocs)); }) .then([this, self = shared_from_this(), token](repl::OpTime lastKeyOpTime) { pauseTenantMigrationDonorBeforeWaitingForKeysToReplicate.pauseWhileSet(); diff --git a/src/mongo/db/repl/tenant_migration_donor_service.h b/src/mongo/db/repl/tenant_migration_donor_service.h index d67dde730cecf..a75381f59f594 100644 --- a/src/mongo/db/repl/tenant_migration_donor_service.h +++ b/src/mongo/db/repl/tenant_migration_donor_service.h @@ -29,14 +29,45 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/fetcher.h" +#include "mongo/client/mongo_uri.h" +#include "mongo/client/read_preference.h" +#include "mongo/client/remote_command_targeter.h" #include "mongo/client/remote_command_targeter_rs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" +#include "mongo/db/repl/tenant_migration_pem_payload_gen.h" +#include "mongo/db/repl/tenant_migration_state_machine_gen.h" +#include "mongo/db/serverless/serverless_types_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/transport/transport_layer.h" #include "mongo/util/cancellation.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/repl/tenant_migration_donor_service_test.cpp b/src/mongo/db/repl/tenant_migration_donor_service_test.cpp index 325e76ccf0042..3f7aade5c3a0f 100644 --- a/src/mongo/db/repl/tenant_migration_donor_service_test.cpp +++ b/src/mongo/db/repl/tenant_migration_donor_service_test.cpp @@ -27,27 +27,41 @@ * it in the license file. */ -#include +#include // IWYU pragma: keep +#include #include -#include "mongo/client/replica_set_monitor.h" -#include "mongo/client/replica_set_monitor_protocol_test_util.h" -#include "mongo/config.h" +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/client.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_mock.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/repl/primary_only_service_op_observer.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/tenant_migration_donor_service.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/dbtests/mock/mock_conn_registry.h" -#include "mongo/dbtests/mock/mock_replica_set.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" -#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/net/ssl_options.h" #include "mongo/util/net/ssl_util.h" namespace mongo { diff --git a/src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp b/src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp index 02fec7a5d92e0..a7ac7f747c159 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp +++ b/src/mongo/db/repl/tenant_migration_recipient_access_blocker.cpp @@ -28,20 +28,37 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/client.h" -#include "mongo/db/commands/tenant_migration_donor_cmds_gen.h" +#include "mongo/db/logical_time.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/storage_interface.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_recipient_access_blocker.h" #include "mongo/logv2/log.h" -#include "mongo/util/cancellation.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" -#include "mongo/util/future_util.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -71,17 +88,18 @@ Status TenantMigrationRecipientAccessBlocker::waitUntilCommittedOrAborted(Operat MONGO_UNREACHABLE; } -SharedSemiFuture TenantMigrationRecipientAccessBlocker::getCanReadFuture( +SharedSemiFuture TenantMigrationRecipientAccessBlocker::getCanRunCommandFuture( OperationContext* opCtx, StringData command) { if (MONGO_unlikely(tenantMigrationRecipientNotRejectReads.shouldFail())) { return SharedSemiFuture(); } - if (tenant_migration_access_blocker::shouldExcludeRead(opCtx)) { + if (tenant_migration_access_blocker::shouldExclude(opCtx)) { LOGV2_DEBUG(5739900, 1, - "Internal tenant read got excluded from the MTAB filtering", + "Internal tenant command got excluded from the MTAB filtering", "migrationId"_attr = getMigrationId(), + "command"_attr = command, "opId"_attr = opCtx->getOpID()); return SharedSemiFuture(); } @@ -97,29 +115,33 @@ SharedSemiFuture TenantMigrationRecipientAccessBlocker::getCanReadFuture( }(); stdx::lock_guard lk(_mutex); - if (_state.isReject()) { + if (_state.isRejectReadsAndWrites()) { + // Something is likely wrong with the proxy if we end up here. Traffic should not be routed + // to the recipient while in the `kRejectReadsAndWrites` state. LOGV2_DEBUG(5749100, 1, - "Tenant read is blocked on the recipient before migration completes", + "Tenant command is blocked on the recipient before migration completes", "migrationId"_attr = getMigrationId(), "opId"_attr = opCtx->getOpID(), "command"_attr = command); return SharedSemiFuture(Status( - ErrorCodes::SnapshotTooOld, "Tenant read is not allowed before migration completes")); + ErrorCodes::IllegalOperation, + "Tenant command '{}' is not allowed before migration completes"_format(command))); } - invariant(_state.isRejectBefore()); + invariant(_state.isRejectReadsBefore()); invariant(_rejectBeforeTimestamp); if (atClusterTime && *atClusterTime < *_rejectBeforeTimestamp) { LOGV2_DEBUG(5749101, 1, - "Tenant read is blocked on the recipient before migration completes", + "Tenant command is blocked on the recipient before migration completes", "migrationId"_attr = getMigrationId(), "opId"_attr = opCtx->getOpID(), "command"_attr = command, "atClusterTime"_attr = *atClusterTime, "rejectBeforeTimestamp"_attr = *_rejectBeforeTimestamp); return SharedSemiFuture(Status( - ErrorCodes::SnapshotTooOld, "Tenant read is not allowed before migration completes")); + ErrorCodes::SnapshotTooOld, + "Tenant command '{}' is not allowed before migration completes"_format(command))); } if (readConcernArgs.getLevel() == repl::ReadConcernLevel::kMajorityReadConcern) { // Speculative majority reads are only used for change streams (against the oplog @@ -158,6 +180,14 @@ Status TenantMigrationRecipientAccessBlocker::checkIfCanBuildIndex() { return Status::OK(); } +Status TenantMigrationRecipientAccessBlocker::checkIfCanOpenChangeStream() { + return Status::OK(); +} + +Status TenantMigrationRecipientAccessBlocker::checkIfCanGetMoreChangeStream() { + return Status::OK(); +} + bool TenantMigrationRecipientAccessBlocker::checkIfShouldBlockTTL() const { stdx::lock_guard lg(_mutex); return _ttlIsBlocked; @@ -187,10 +217,10 @@ void TenantMigrationRecipientAccessBlocker::appendInfoForServerStatus( std::string TenantMigrationRecipientAccessBlocker::BlockerState::toString() const { switch (_state) { - case State::kReject: - return "reject"; - case State::kRejectBefore: - return "rejectBefore"; + case State::kRejectReadsAndWrites: + return "rejectReadsAndWrites"; + case State::kRejectReadsBefore: + return "rejectReadsBefore"; default: MONGO_UNREACHABLE; } @@ -198,7 +228,7 @@ std::string TenantMigrationRecipientAccessBlocker::BlockerState::toString() cons void TenantMigrationRecipientAccessBlocker::startRejectingReadsBefore(const Timestamp& timestamp) { stdx::lock_guard lk(_mutex); - _state.transitionToRejectBefore(); + _state.transitionToRejectReadsBefore(); if (!_rejectBeforeTimestamp || timestamp > *_rejectBeforeTimestamp) { LOGV2(5358100, "Tenant migration recipient starting to reject reads before timestamp", diff --git a/src/mongo/db/repl/tenant_migration_recipient_access_blocker.h b/src/mongo/db/repl/tenant_migration_recipient_access_blocker.h index 39caef917aa8a..5b2faffda27c8 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_access_blocker.h +++ b/src/mongo/db/repl/tenant_migration_recipient_access_blocker.h @@ -29,14 +29,26 @@ #pragma once +#include #include +#include +#include +#include +#include "tenant_migration_access_blocker.h" + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/timestamp.h" #include "mongo/db/commands/tenant_migration_donor_cmds_gen.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/service_context.h" #include "mongo/executor/task_executor.h" -#include "tenant_migration_access_blocker.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/future.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -47,13 +59,15 @@ namespace mongo { * * When data cloning is finished (and therefore a consistent donor optime established) an opObserver * that is observing the recipient state document will create a - * TenantMigrationRecipientAccessBlocker in state `kReject` that will reject all reads (with - * SnapshotTooOld) for that tenant. + * TenantMigrationRecipientAccessBlocker in state `kRejectReadsAndWrites` that will reject all + * commands (with IllegalOperation) for that tenant. If a command is received during this + * `kRejectReadsAndWrites` phase it suggests that something is wrong with the proxy since traffic + * should not be routed to the recipient yet. * * When oplog application reaches this consistent point, the recipient primary will wait for * the earlier state document write to be committed on all recipient nodes before doing the state - * machine write for the consistent state. The TenantMigrationRecipientAccessBlocker, upon see the - * write for the consistent state, will transition to `kRejectBefore` state with the + * machine write for the consistent state. The TenantMigrationRecipientAccessBlocker, upon seeing + * the write for the consistent state, will transition to `kRejectReadsBefore` state with the * `rejectBeforeTimestamp` set to the recipient consistent timestamp and will start allowing reads * for read concerns which read the latest snapshot, and "atClusterTime" or "majority" read concerns * which are after the `rejectBeforeTimestamp`. Reads for older snapshots, except "majority" until @@ -83,7 +97,8 @@ class TenantMigrationRecipientAccessBlocker Status waitUntilCommittedOrAborted(OperationContext* opCtx) final; Status checkIfLinearizableReadWasAllowed(OperationContext* opCtx) final; - SharedSemiFuture getCanReadFuture(OperationContext* opCtx, StringData command) final; + SharedSemiFuture getCanRunCommandFuture(OperationContext* opCtx, + StringData command) final; // // Called by index build user threads before acquiring an index build slot, and again right @@ -91,6 +106,16 @@ class TenantMigrationRecipientAccessBlocker // Status checkIfCanBuildIndex() final; + /** + * Checks if opening a new change stream should block. + */ + Status checkIfCanOpenChangeStream() final; + + /** + * Returns error status if "getMore" command of a change stream should fail. + */ + Status checkIfCanGetMoreChangeStream() final; + // @return true if TTL is blocked bool checkIfShouldBlockTTL() const final; @@ -112,7 +137,7 @@ class TenantMigrationRecipientAccessBlocker void startRejectingReadsBefore(const Timestamp& timestamp); bool inStateReject() const { - return _state.isReject(); + return _state.isRejectReadsAndWrites(); } private: @@ -121,24 +146,24 @@ class TenantMigrationRecipientAccessBlocker */ class BlockerState { public: - void transitionToRejectBefore() { - _state = State::kRejectBefore; + void transitionToRejectReadsBefore() { + _state = State::kRejectReadsBefore; } - bool isReject() const { - return _state == State::kReject; + bool isRejectReadsAndWrites() const { + return _state == State::kRejectReadsAndWrites; } - bool isRejectBefore() const { - return _state == State::kRejectBefore; + bool isRejectReadsBefore() const { + return _state == State::kRejectReadsBefore; } std::string toString() const; private: - enum class State { kReject, kRejectBefore }; + enum class State { kRejectReadsAndWrites, kRejectReadsBefore }; - State _state = State::kReject; + State _state = State::kRejectReadsAndWrites; }; ServiceContext* _serviceContext; diff --git a/src/mongo/db/repl/tenant_migration_recipient_access_blocker_test.cpp b/src/mongo/db/repl/tenant_migration_recipient_access_blocker_test.cpp index 9f497ea1ce864..bc4935d068f40 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_access_blocker_test.cpp +++ b/src/mongo/db/repl/tenant_migration_recipient_access_blocker_test.cpp @@ -28,22 +28,36 @@ */ +#include #include +#include -#include "mongo/client/connpool.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/client.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_recipient_access_blocker.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/recovery_unit_noop.h" -#include "mongo/logv2/log.h" -#include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/fail_point.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/future.h" -#include "mongo/util/net/ssl_util.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -129,7 +143,7 @@ TEST_F(TenantMigrationRecipientAccessBlockerTest, NoopFunctions) { ASSERT_OK(mtab.checkIfCanBuildIndex()); } -TEST_F(TenantMigrationRecipientAccessBlockerTest, StateReject) { +TEST_F(TenantMigrationRecipientAccessBlockerTest, StateRejectReadsAndWrites) { TenantMigrationRecipientAccessBlocker mtab(getServiceContext(), getMigrationId()); { @@ -137,34 +151,38 @@ TEST_F(TenantMigrationRecipientAccessBlockerTest, StateReject) { mtab.appendInfoForServerStatus(&builder); ASSERT_BSONOBJ_EQ(builder.obj(), BSON("migrationId" << getMigrationId() << "state" - << "reject" + << "rejectReadsAndWrites" << "ttlIsBlocked" << true)); } // Default read concern. - ASSERT_THROWS_CODE( - mtab.getCanReadFuture(opCtx(), "find").get(), DBException, ErrorCodes::SnapshotTooOld); + ASSERT_THROWS_CODE(mtab.getCanRunCommandFuture(opCtx(), "find").get(), + DBException, + ErrorCodes::IllegalOperation); // Majority read concern. ReadConcernArgs::get(opCtx()) = ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern); - ASSERT_THROWS_CODE( - mtab.getCanReadFuture(opCtx(), "find").get(), DBException, ErrorCodes::SnapshotTooOld); + ASSERT_THROWS_CODE(mtab.getCanRunCommandFuture(opCtx(), "find").get(), + DBException, + ErrorCodes::IllegalOperation); // Snapshot read concern. ReadConcernArgs::get(opCtx()) = ReadConcernArgs(ReadConcernLevel::kSnapshotReadConcern); opCtx()->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided, Timestamp(1, 1)); - ASSERT_THROWS_CODE( - mtab.getCanReadFuture(opCtx(), "find").get(), DBException, ErrorCodes::SnapshotTooOld); + ASSERT_THROWS_CODE(mtab.getCanRunCommandFuture(opCtx(), "find").get(), + DBException, + ErrorCodes::IllegalOperation); // Snapshot read concern with atClusterTime. ReadConcernArgs::get(opCtx()) = ReadConcernArgs(ReadConcernLevel::kSnapshotReadConcern); ReadConcernArgs::get(opCtx()).setArgsAtClusterTimeForSnapshot(Timestamp(1, 1)); - ASSERT_THROWS_CODE( - mtab.getCanReadFuture(opCtx(), "find").get(), DBException, ErrorCodes::SnapshotTooOld); + ASSERT_THROWS_CODE(mtab.getCanRunCommandFuture(opCtx(), "find").get(), + DBException, + ErrorCodes::IllegalOperation); } -TEST_F(TenantMigrationRecipientAccessBlockerTest, StateRejectBefore) { +TEST_F(TenantMigrationRecipientAccessBlockerTest, StateRejectReadsBefore) { TenantMigrationRecipientAccessBlocker mtab(getServiceContext(), getMigrationId()); mtab.startRejectingReadsBefore(Timestamp(1, 1)); @@ -173,7 +191,7 @@ TEST_F(TenantMigrationRecipientAccessBlockerTest, StateRejectBefore) { mtab.appendInfoForServerStatus(&builder); ASSERT_BSONOBJ_EQ(builder.obj(), BSON("migrationId" << getMigrationId() << "state" - << "rejectBefore" + << "rejectReadsBefore" << "rejectBeforeTimestamp" << Timestamp(1, 1) << "ttlIsBlocked" << true)); } @@ -185,28 +203,29 @@ TEST_F(TenantMigrationRecipientAccessBlockerTest, StateRejectBefore) { mtab.appendInfoForServerStatus(&builder); ASSERT_BSONOBJ_EQ(builder.obj(), BSON("migrationId" << getMigrationId() << "state" - << "rejectBefore" + << "rejectReadsBefore" << "rejectBeforeTimestamp" << Timestamp(2, 1) << "ttlIsBlocked" << true)); } // Default read concern. - ASSERT_OK(mtab.getCanReadFuture(opCtx(), "find").getNoThrow()); + ASSERT_OK(mtab.getCanRunCommandFuture(opCtx(), "find").getNoThrow()); // Majority read concern. ReadConcernArgs::get(opCtx()) = ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern); - ASSERT_OK(mtab.getCanReadFuture(opCtx(), "find").getNoThrow()); + ASSERT_OK(mtab.getCanRunCommandFuture(opCtx(), "find").getNoThrow()); // Snapshot read at a later timestamp. ReadConcernArgs::get(opCtx()) = ReadConcernArgs(ReadConcernLevel::kSnapshotReadConcern); ReadConcernArgs::get(opCtx()).setArgsAtClusterTimeForSnapshot(Timestamp(3, 1)); - ASSERT_OK(mtab.getCanReadFuture(opCtx(), "find").getNoThrow()); + ASSERT_OK(mtab.getCanRunCommandFuture(opCtx(), "find").getNoThrow()); // Snapshot read at an earlier timestamp. ReadConcernArgs::get(opCtx()) = ReadConcernArgs(ReadConcernLevel::kSnapshotReadConcern); ReadConcernArgs::get(opCtx()).setArgsAtClusterTimeForSnapshot(Timestamp(1, 1)); - ASSERT_THROWS_CODE( - mtab.getCanReadFuture(opCtx(), "find").get(), DBException, ErrorCodes::SnapshotTooOld); + ASSERT_THROWS_CODE(mtab.getCanRunCommandFuture(opCtx(), "find").get(), + DBException, + ErrorCodes::SnapshotTooOld); } } // namespace repl diff --git a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp index 83690fe0f5a5d..bfba9dd3a323a 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp +++ b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.cpp @@ -28,22 +28,43 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog/database.h" +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/delete.h" -#include "mongo/db/ops/update.h" -#include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/update_result.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/tenant_migration_recipient_entry_helpers.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" -#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/repl/tenant_migration_util.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -55,7 +76,13 @@ namespace tenantMigrationRecipientEntryHelpers { Status insertStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDocument& stateDoc) { const auto nss = NamespaceString::kTenantMigrationRecipientsNamespace; - AutoGetCollection collection(opCtx, nss, MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); // Sanity check uassert(ErrorCodes::PrimarySteppedDown, @@ -64,7 +91,7 @@ Status insertStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDoc repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)); return writeConflictRetry( - opCtx, "insertTenantMigrationRecipientStateDoc", nss.ns(), [&]() -> Status { + opCtx, "insertTenantMigrationRecipientStateDoc", nss, [&]() -> Status { // Insert the 'stateDoc' if no active tenant migration found for the 'tenantId' provided // in the 'stateDoc'. Tenant Migration is considered as active for a tenantId if a state // document exists on the disk for that 'tenantId' and not marked to be garbage @@ -75,7 +102,7 @@ Status insertStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDoc << BSON("$exists" << false)); const auto updateMod = BSON("$setOnInsert" << stateDoc.toBSON()); auto updateResult = - Helpers::upsert(opCtx, nss, filter, updateMod, /*fromMigrate=*/false); + Helpers::upsert(opCtx, collection, filter, updateMod, /*fromMigrate=*/false); // '$setOnInsert' update operator can no way modify the existing on-disk state doc. invariant(!updateResult.numDocsModified); @@ -92,17 +119,23 @@ Status insertStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDoc Status updateStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDocument& stateDoc) { const auto nss = NamespaceString::kTenantMigrationRecipientsNamespace; - AutoGetCollection collection(opCtx, nss, MODE_IX); - - if (!collection) { + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + if (!collection.exists()) { return Status(ErrorCodes::NamespaceNotFound, - str::stream() << nss.ns() << " does not exist"); + str::stream() << nss.toStringForErrorMsg() << " does not exist"); } return writeConflictRetry( - opCtx, "updateTenantMigrationRecipientStateDoc", nss.ns(), [&]() -> Status { + opCtx, "updateTenantMigrationRecipientStateDoc", nss, [&]() -> Status { auto updateResult = - Helpers::upsert(opCtx, nss, stateDoc.toBSON(), /*fromMigrate=*/false); + Helpers::upsert(opCtx, collection, stateDoc.toBSON(), /*fromMigrate=*/false); if (updateResult.numMatched == 0) { return {ErrorCodes::NoSuchKey, str::stream() @@ -116,35 +149,45 @@ Status updateStateDoc(OperationContext* opCtx, const TenantMigrationRecipientDoc StatusWith deleteStateDocIfMarkedAsGarbageCollectable(OperationContext* opCtx, StringData tenantId) { const auto nss = NamespaceString::kTenantMigrationRecipientsNamespace; - AutoGetCollection collection(opCtx, nss, MODE_IX); - - if (!collection) { + const auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + if (!collection.exists()) { return Status(ErrorCodes::NamespaceNotFound, - str::stream() << nss.ns() << " does not exist"); + str::stream() << nss.toStringForErrorMsg() << " does not exist"); } auto query = BSON(TenantMigrationRecipientDocument::kTenantIdFieldName << tenantId << TenantMigrationRecipientDocument::kExpireAtFieldName << BSON("$exists" << 1)); - return writeConflictRetry( - opCtx, "deleteTenantMigrationRecipientStateDoc", nss.ns(), [&]() -> bool { - auto nDeleted = - deleteObjects(opCtx, collection.getCollection(), nss, query, true /* justOne */); - return nDeleted > 0; - }); + return writeConflictRetry(opCtx, "deleteTenantMigrationRecipientStateDoc", nss, [&]() -> bool { + auto nDeleted = deleteObjects(opCtx, collection, query, true /* justOne */); + return nDeleted > 0; + }); } StatusWith getStateDoc(OperationContext* opCtx, const UUID& migrationUUID) { // Read the most up to date data. ReadSourceScope readSourceScope(opCtx, RecoveryUnit::ReadSource::kNoTimestamp); + // ReadConcern must also be fixed for the new scope. It will get restored when exiting this. + auto originalReadConcern = + std::exchange(repl::ReadConcernArgs::get(opCtx), repl::ReadConcernArgs()); + ON_BLOCK_EXIT([&] { repl::ReadConcernArgs::get(opCtx) = std::move(originalReadConcern); }); + AutoGetCollectionForRead collection(opCtx, NamespaceString::kTenantMigrationRecipientsNamespace); if (!collection) { return Status(ErrorCodes::NamespaceNotFound, str::stream() << "Collection not found: " - << NamespaceString::kTenantMigrationRecipientsNamespace.ns()); + << NamespaceString::kTenantMigrationRecipientsNamespace + .toStringForErrorMsg()); } BSONObj result; diff --git a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.h b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.h index 1f28e1b75b876..60df32206269a 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.h +++ b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers.h @@ -32,6 +32,7 @@ namespace mongo { class OperationContext; + class TenantMigrationRecipientDocument; class Status; class UUID; diff --git a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers_test.cpp b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers_test.cpp index 9a916aa8a161a..056b966012e8d 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_entry_helpers_test.cpp +++ b/src/mongo/db/repl/tenant_migration_recipient_entry_helpers_test.cpp @@ -27,19 +27,41 @@ * it in the license file. */ -#include +#include // IWYU pragma: keep +#include +#include +#include -#include "mongo/platform/basic.h" +#include -#include "mongo/config.h" -#include "mongo/db/namespace_string.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/client.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/repl/tenant_migration_pem_payload_gen.h" #include "mongo/db/repl/tenant_migration_recipient_entry_helpers.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" +#include "mongo/db/serverless/serverless_types_gen.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/ssl_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp b/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp index 14ed2a8fa256a..1d3dd219e9a42 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp +++ b/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp @@ -31,18 +31,33 @@ #include "mongo/db/repl/tenant_migration_recipient_op_observer.h" #include - -#include "mongo/db/multitenancy_gen.h" -#include "mongo/db/repl/tenant_file_importer_service.h" +#include + +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" +#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/repl/tenant_migration_decoration.h" #include "mongo/db/repl/tenant_migration_recipient_access_blocker.h" -#include "mongo/db/repl/tenant_migration_recipient_service.h" -#include "mongo/db/repl/tenant_migration_shard_merge_util.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" -#include "mongo/db/repl/tenant_migration_util.h" #include "mongo/db/serverless/serverless_operation_lock_registry.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -51,6 +66,23 @@ namespace repl { using namespace fmt; namespace { +void addTenantMigrationRecipientAccessBlocker(ServiceContext* serviceContext, + const StringData& tenantId, + const UUID& migrationId) { + auto& registry = TenantMigrationAccessBlockerRegistry::get(serviceContext); + TenantId tid = TenantId::parseFromString(tenantId); + + if (registry.getTenantMigrationAccessBlockerForTenantId( + tid, TenantMigrationAccessBlocker::BlockerType::kRecipient)) { + return; + } + + auto mtab = + std::make_shared(serviceContext, migrationId); + registry.add(tid, mtab); +} + + /** * Transitions the TenantMigrationRecipientAccessBlocker to the rejectBefore state. */ @@ -59,31 +91,26 @@ void onSetRejectReadsBeforeTimestamp(OperationContext* opCtx, invariant(recipientStateDoc.getState() == TenantMigrationRecipientStateEnum::kConsistent); invariant(recipientStateDoc.getRejectReadsBeforeTimestamp()); - if (recipientStateDoc.getProtocol() == MigrationProtocolEnum::kMultitenantMigrations) { - auto mtab = tenant_migration_access_blocker::getTenantMigrationRecipientAccessBlocker( - opCtx->getServiceContext(), recipientStateDoc.getTenantId()); + auto mtabVector = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .getRecipientAccessBlockersForMigration(recipientStateDoc.getId()); + invariant(!mtabVector.empty()); + for (auto& mtab : mtabVector) { invariant(mtab); mtab->startRejectingReadsBefore(recipientStateDoc.getRejectReadsBeforeTimestamp().value()); - } else { - auto mtab = tenant_migration_access_blocker::getRecipientAccessBlockerForMigration( - opCtx->getServiceContext(), recipientStateDoc.getId()); - invariant(mtab); - mtab->startRejectingReadsBefore(recipientStateDoc.getRejectReadsBeforeTimestamp().get()); } } -void handleMTMStateChange(OperationContext* opCtx, - const TenantMigrationRecipientDocument& recipientStateDoc) { +void handleStateChange(OperationContext* opCtx, + const TenantMigrationRecipientDocument& recipientStateDoc) { auto state = recipientStateDoc.getState(); switch (state) { case TenantMigrationRecipientStateEnum::kUninitialized: break; case TenantMigrationRecipientStateEnum::kStarted: - tenant_migration_access_blocker::addTenantMigrationRecipientAccessBlocker( - opCtx->getServiceContext(), - recipientStateDoc.getTenantId(), - recipientStateDoc.getId()); + addTenantMigrationRecipientAccessBlocker(opCtx->getServiceContext(), + recipientStateDoc.getTenantId(), + recipientStateDoc.getId()); break; case TenantMigrationRecipientStateEnum::kConsistent: if (recipientStateDoc.getRejectReadsBeforeTimestamp()) { @@ -98,66 +125,6 @@ void handleMTMStateChange(OperationContext* opCtx, MONGO_UNREACHABLE_TASSERT(6112900); } } - -void handleShardMergeStateChange(OperationContext* opCtx, - const TenantMigrationRecipientDocument& recipientStateDoc) { - auto state = recipientStateDoc.getState(); - - auto fileImporter = repl::TenantFileImporterService::get(opCtx->getServiceContext()); - - switch (state) { - case TenantMigrationRecipientStateEnum::kUninitialized: - break; - case TenantMigrationRecipientStateEnum::kStarted: - fileImporter->startMigration(recipientStateDoc.getId()); - break; - case TenantMigrationRecipientStateEnum::kLearnedFilenames: - fileImporter->learnedAllFilenames(recipientStateDoc.getId()); - break; - case TenantMigrationRecipientStateEnum::kConsistent: - if (recipientStateDoc.getRejectReadsBeforeTimestamp()) { - onSetRejectReadsBeforeTimestamp(opCtx, recipientStateDoc); - } - break; - case TenantMigrationRecipientStateEnum::kDone: - case TenantMigrationRecipientStateEnum::kCommitted: - case TenantMigrationRecipientStateEnum::kAborted: - break; - } -} - -void handleShardMergeDocInsertion(const TenantMigrationRecipientDocument& doc, - OperationContext* opCtx) { - switch (doc.getState()) { - case TenantMigrationRecipientStateEnum::kUninitialized: - case TenantMigrationRecipientStateEnum::kLearnedFilenames: - case TenantMigrationRecipientStateEnum::kConsistent: - uasserted(ErrorCodes::IllegalOperation, - str::stream() << "Inserting the TenantMigrationRecipient document in state " - << TenantMigrationRecipientState_serializer(doc.getState()) - << " is illegal"); - break; - case TenantMigrationRecipientStateEnum::kStarted: { - invariant(doc.getTenantIds()); - auto mtab = std::make_shared( - opCtx->getServiceContext(), doc.getId()); - TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) - .add(*doc.getTenantIds(), mtab); - - opCtx->recoveryUnit()->onRollback([migrationId = doc.getId()](OperationContext* opCtx) { - TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) - .removeAccessBlockersForMigration( - migrationId, TenantMigrationAccessBlocker::BlockerType::kRecipient); - }); - } break; - case TenantMigrationRecipientStateEnum::kDone: - case TenantMigrationRecipientStateEnum::kAborted: - case TenantMigrationRecipientStateEnum::kCommitted: - break; - default: - MONGO_UNREACHABLE; - } -} } // namespace void TenantMigrationRecipientOpObserver::onInserts( @@ -166,7 +133,8 @@ void TenantMigrationRecipientOpObserver::onInserts( std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { if (coll->ns() == NamespaceString::kTenantMigrationRecipientsNamespace && !tenant_migration_access_blocker::inRecoveryMode(opCtx)) { for (auto it = first; it != last; it++) { @@ -176,92 +144,77 @@ void TenantMigrationRecipientOpObserver::onInserts( ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) .acquireLock(ServerlessOperationLockRegistry::LockType::kTenantRecipient, recipientStateDoc.getId()); - opCtx->recoveryUnit()->onRollback( - [migrationId = recipientStateDoc.getId()](OperationContext* opCtx) { - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .releaseLock(ServerlessOperationLockRegistry::LockType::kTenantDonor, - migrationId); - }); - } - - if (auto protocol = recipientStateDoc.getProtocol().value_or(kDefaultMigrationProtocol); - protocol == MigrationProtocolEnum::kShardMerge) { - handleShardMergeDocInsertion(recipientStateDoc, opCtx); + opCtx->recoveryUnit()->onRollback([migrationId = recipientStateDoc.getId()]( + OperationContext* opCtx) { + ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) + .releaseLock(ServerlessOperationLockRegistry::LockType::kTenantRecipient, + migrationId); + }); } } } } void TenantMigrationRecipientOpObserver::onUpdate(OperationContext* opCtx, - const OplogUpdateEntryArgs& args) { + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (args.coll->ns() == NamespaceString::kTenantMigrationRecipientsNamespace && !tenant_migration_access_blocker::inRecoveryMode(opCtx)) { auto recipientStateDoc = TenantMigrationRecipientDocument::parse( IDLParserContext("recipientStateDoc"), args.updateArgs->updatedDoc); - opCtx->recoveryUnit()->onCommit([recipientStateDoc](OperationContext* opCtx, - boost::optional) { - if (recipientStateDoc.getExpireAt()) { - repl::TenantFileImporterService::get(opCtx->getServiceContext()) - ->interrupt(recipientStateDoc.getId()); - - ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) - .releaseLock(ServerlessOperationLockRegistry::LockType::kTenantRecipient, - recipientStateDoc.getId()); - - std::vector tenantIdsToRemove; - auto cleanUpBlockerIfGarbage = - [&](const TenantId& tenantId, - std::shared_ptr& mtab) { - if (recipientStateDoc.getId() != mtab->getMigrationId()) { - return; - } - - auto recipientMtab = - checked_pointer_cast(mtab); - if (recipientMtab->inStateReject()) { - // The TenantMigrationRecipientAccessBlocker entry needs to be removed - // to re-allow reads and future migrations with the same tenantId as - // this migration has already been aborted and forgotten. - tenantIdsToRemove.push_back(tenantId); - return; - } - // Once the state doc is marked garbage collectable the TTL deletions should - // be unblocked. - recipientMtab->stopBlockingTTL(); - }; - - // TODO SERVER-68799 Simplify cleanup logic for shard merge as the tenants share a - // single RTAB - TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) - .applyAll(TenantMigrationAccessBlocker::BlockerType::kRecipient, - cleanUpBlockerIfGarbage); + opCtx->recoveryUnit()->onCommit( + [recipientStateDoc](OperationContext* opCtx, boost::optional) { + if (recipientStateDoc.getExpireAt()) { + ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) + .releaseLock(ServerlessOperationLockRegistry::LockType::kTenantRecipient, + recipientStateDoc.getId()); + + bool shouldCleanAccessBlockers = false; + auto cleanUpBlockerIfGarbage = + [&](const TenantId& tenantId, + std::shared_ptr& mtab) { + if (recipientStateDoc.getId() != mtab->getMigrationId()) { + return; + } + + auto recipientMtab = + checked_pointer_cast(mtab); + if (recipientMtab->inStateReject()) { + // The TenantMigrationRecipientAccessBlocker entry needs to be + // removed to re-allow reads and future migrations with the same + // tenantId as this migration has already been aborted and + // forgotten. + shouldCleanAccessBlockers = true; + return; + } + // Once the state doc is marked garbage collectable the TTL deletions + // should be unblocked. + recipientMtab->stopBlockingTTL(); + }; - for (const auto& tenantId : tenantIdsToRemove) { - // TODO SERVER-68799: Remove TenantMigrationAccessBlocker removal logic. TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) - .remove(tenantId, TenantMigrationAccessBlocker::BlockerType::kRecipient); + .applyAll(TenantMigrationAccessBlocker::BlockerType::kRecipient, + cleanUpBlockerIfGarbage); + + if (shouldCleanAccessBlockers) { + TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .removeAccessBlockersForMigration( + recipientStateDoc.getId(), + TenantMigrationAccessBlocker::BlockerType::kRecipient); + } } - } - auto protocol = recipientStateDoc.getProtocol().value_or(kDefaultMigrationProtocol); - switch (protocol) { - case MigrationProtocolEnum::kMultitenantMigrations: - handleMTMStateChange(opCtx, recipientStateDoc); - break; - case MigrationProtocolEnum::kShardMerge: - handleShardMergeStateChange(opCtx, recipientStateDoc); - break; - default: - MONGO_UNREACHABLE; - } - }); + handleStateChange(opCtx, recipientStateDoc); + }); } } void TenantMigrationRecipientOpObserver::aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - BSONObj const& doc) { + BSONObj const& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { if (coll->ns() == NamespaceString::kTenantMigrationRecipientsNamespace && !tenant_migration_access_blocker::inRecoveryMode(opCtx)) { auto recipientStateDoc = @@ -285,7 +238,8 @@ void TenantMigrationRecipientOpObserver::aboutToDelete(OperationContext* opCtx, void TenantMigrationRecipientOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (coll->ns() == NamespaceString::kTenantMigrationRecipientsNamespace && !tenant_migration_access_blocker::inRecoveryMode(opCtx)) { auto tmi = tenantMigrationInfo(opCtx); @@ -299,8 +253,6 @@ void TenantMigrationRecipientOpObserver::onDelete(OperationContext* opCtx, LOGV2_INFO(6114101, "Removing expired migration access blocker", "migrationId"_attr = migrationId); - repl::TenantFileImporterService::get(opCtx->getServiceContext()) - ->interrupt(migrationId); TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) .removeAccessBlockersForMigration( migrationId, TenantMigrationAccessBlocker::BlockerType::kRecipient); @@ -313,10 +265,10 @@ repl::OpTime TenantMigrationRecipientOpObserver::onDropCollection( const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) { + const CollectionDropType dropType, + bool markFromMigrate) { if (collectionName == NamespaceString::kTenantMigrationRecipientsNamespace) { opCtx->recoveryUnit()->onCommit([](OperationContext* opCtx, boost::optional) { - repl::TenantFileImporterService::get(opCtx->getServiceContext())->interruptAll(); TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) .removeAll(TenantMigrationAccessBlocker::BlockerType::kRecipient); diff --git a/src/mongo/db/repl/tenant_migration_recipient_op_observer.h b/src/mongo/db/repl/tenant_migration_recipient_op_observer.h index 3ff69365c7eda..61e51c4b42e07 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_op_observer.h +++ b/src/mongo/db/repl/tenant_migration_recipient_op_observer.h @@ -29,7 +29,19 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { @@ -37,7 +49,7 @@ namespace repl { /** * OpObserver for tenant migration recipient. */ -class TenantMigrationRecipientOpObserver final : public OpObserver { +class TenantMigrationRecipientOpObserver final : public OpObserverNoop { TenantMigrationRecipientOpObserver(const TenantMigrationRecipientOpObserver&) = delete; TenantMigrationRecipientOpObserver& operator=(const TenantMigrationRecipientOpObserver&) = delete; @@ -46,210 +58,40 @@ class TenantMigrationRecipientOpObserver final : public OpObserver { TenantMigrationRecipientOpObserver() = default; ~TenantMigrationRecipientOpObserver() = default; - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) final {} - - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - void onCreateIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc, - bool fromMigrate) final {} - - void onStartIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onStartIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) final {} + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kConfig, NamespaceFilter::kConfig}; + } void onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) final; - - void onInsertGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final{}; - - void onDeleteGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final {} + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) final; + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) final; + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) final; - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final {} - - void onCreateCollection(OperationContext* opCtx, - const CollectionPtr& coll, - const NamespaceString& collectionName, - const CollectionOptions& options, - const BSONObj& idIndex, - const OplogSlot& createOpTime, - bool fromMigrate) final{}; + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; - void onCollMod(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& collModCmd, - const CollectionOptions& oldCollOptions, - boost::optional indexInfo) final {} - - void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final {} - - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) final; - - void onDropIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const std::string& indexName, - const BSONObj& indexInfo) final {} - - using OpObserver::onRenameCollection; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final {} - - void onImportCollection(OperationContext* opCtx, - const UUID& importUUID, - const NamespaceString& nss, - long long numRecords, - long long dataSize, - const BSONObj& catalogEntry, - const BSONObj& storageMetadata, - bool isDryRun) final {} - - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final { - return repl::OpTime(); - } - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) final {} - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) final {} - - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) final {} - - void onTransactionStart(OperationContext* opCtx) final {} - - void onUnpreparedTransactionCommit(OperationContext* opCtx, - const TransactionOperations& transactionOperations) final {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept final {} - - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) final { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) final {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) final {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) final {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} - - void onMajorityCommitPointUpdate(ServiceContext* service, - const repl::OpTime& newCommitPoint) final {} - -private: - void _onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final { - } + CollectionDropType dropType, + bool markFromMigrate) final; }; } // namespace repl diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.cpp b/src/mongo/db/repl/tenant_migration_recipient_service.cpp index 77829e61f4f3d..b1e0228ea5be6 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_service.cpp +++ b/src/mongo/db/repl/tenant_migration_recipient_service.cpp @@ -28,66 +28,129 @@ */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include +#include +#include +#include #include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" #include "mongo/client/dbclient_connection.h" +#include "mongo/client/internal_auth.h" #include "mongo/client/replica_set_monitor.h" -#include "mongo/client/replica_set_monitor_manager.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/catalog/document_validation.h" +#include "mongo/db/catalog/local_oplog_info.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" -#include "mongo/db/commands/tenant_migration_donor_cmds_gen.h" -#include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/dbmessage.h" +#include "mongo/db/feature_compatibility_version_parser.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/keys_collection_util.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/ops/write_ops_exec.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/cloner_utils.h" #include "mongo/db/repl/data_replicator_external_state.h" +#include "mongo/db/repl/last_vote.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_applier.h" +#include "mongo/db/repl/oplog_buffer.h" #include "mongo/db/repl/oplog_buffer_collection.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/oplog_interface.h" #include "mongo/db/repl/oplog_interface_local.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/replication_auth.h" +#include "mongo/db/repl/replication_consistency_markers.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/repl/tenant_migration_access_blocker_util.h" +#include "mongo/db/repl/replication_process.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/repl/sync_source_selector.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" +#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_decoration.h" #include "mongo/db/repl/tenant_migration_recipient_entry_helpers.h" #include "mongo/db/repl/tenant_migration_recipient_service.h" -#include "mongo/db/repl/tenant_migration_shard_merge_util.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" #include "mongo/db/repl/tenant_migration_statistics.h" +#include "mongo/db/repl/tenant_migration_util.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/server_options.h" +#include "mongo/db/serverless/serverless_types_gen.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_import.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/vector_clock_mutable.h" #include "mongo/db/write_concern_options.h" #include "mongo/executor/task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/metadata/oplog_query_metadata.h" +#include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/future_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" +#include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration - namespace mongo { namespace repl { namespace { @@ -95,9 +158,6 @@ using namespace fmt; const std::string kTTLIndexName = "TenantMigrationRecipientTTLIndex"; const Backoff kExponentialBackoff(Seconds(1), Milliseconds::max()); constexpr StringData kOplogBufferPrefix = "repl.migration.oplog_"_sd; -constexpr int kBackupCursorFileFetcherRetryAttempts = 10; -constexpr int kCheckpointTsBackupCursorErrorCode = 6929900; -constexpr int kCloseCursorBeforeOpenErrorCode = 50886; NamespaceString getOplogBufferNs(const UUID& migrationUUID) { return NamespaceString::makeGlobalConfigCollection(kOplogBufferPrefix + @@ -165,7 +225,6 @@ std::shared_ptr createOplogBuffer(OperationContext* opCtx } // namespace MONGO_FAIL_POINT_DEFINE(hangMigrationBeforeRetryCheck); -MONGO_FAIL_POINT_DEFINE(skipCreatingIndexDuringRebuildService); MONGO_FAIL_POINT_DEFINE(pauseTenantMigrationRecipientInstanceBeforeDeletingOldStateDoc); namespace { @@ -289,9 +348,6 @@ void TenantMigrationRecipientService::abortAllMigrations(OperationContext* opCtx ExecutorFuture TenantMigrationRecipientService::_rebuildService( std::shared_ptr executor, const CancellationToken& token) { return AsyncTry([this] { - if (MONGO_unlikely(skipCreatingIndexDuringRebuildService.shouldFail())) { - return; - } auto nss = getStateDocumentsNS(); AllowOpCtxWhenServiceRebuildingBlock allowOpCtxBlock(Client::getCurrent()); @@ -322,8 +378,6 @@ void TenantMigrationRecipientService::checkIfConflictsWithOtherInstances( auto recipientStateDocument = TenantMigrationRecipientDocument::parse( IDLParserContext("recipientStateDoc"), initialStateDoc); - auto protocol = recipientStateDocument.getProtocol().value_or( - MigrationProtocolEnum::kMultitenantMigrations); for (auto& instance : existingInstances) { auto existingTypedInstance = @@ -335,17 +389,6 @@ void TenantMigrationRecipientService::checkIfConflictsWithOtherInstances( continue; } - uassert(ErrorCodes::ConflictingOperationInProgress, - "an existing shard merge is in progress", - existingTypedInstance->getProtocol() != MigrationProtocolEnum::kShardMerge); - - uassert(ErrorCodes::ConflictingOperationInProgress, - str::stream() << "cannot start " - << MigrationProtocol_serializer(MigrationProtocolEnum::kShardMerge) - << " migration, tenant " << existingTypedInstance->getTenantId() - << " is already migrating", - protocol != MigrationProtocolEnum::kShardMerge); - uassert(ErrorCodes::ConflictingOperationInProgress, str::stream() << "tenant " << tenantId << " is already migrating", existingTypedInstance->getTenantId() != tenantId); @@ -368,8 +411,6 @@ TenantMigrationRecipientService::Instance::Instance( _stateDoc( TenantMigrationRecipientDocument::parse(IDLParserContext("recipientStateDoc"), stateDoc)), _tenantId(_stateDoc.getTenantId().toString()), - _tenantIds(_stateDoc.getTenantIds() ? *_stateDoc.getTenantIds() : std::vector()), - _protocol(_stateDoc.getProtocol().value_or(MigrationProtocolEnum::kMultitenantMigrations)), _migrationUuid(_stateDoc.getId()), _donorConnectionString(_stateDoc.getDonorConnectionString().toString()), _donorUri(uassertStatusOK(MongoURI::parse(_stateDoc.getDonorConnectionString().toString()))), @@ -397,9 +438,6 @@ TenantMigrationRecipientService::Instance::Instance( return boost::none; } }()) { - - // Validate the provided tenantIds matches with the protocol - _validateTenantIdsForProtocol(); } boost::optional TenantMigrationRecipientService::Instance::reportForCurrentOp( @@ -411,9 +449,7 @@ boost::optional TenantMigrationRecipientService::Instance::reportForCur stdx::lock_guard lk(_mutex); bob.append("desc", "tenant recipient migration"); _migrationUuid.appendToBuilder(&bob, "instanceID"_sd); - if (getProtocol() == MigrationProtocolEnum::kMultitenantMigrations) { - bob.append("tenantId", _stateDoc.getTenantId()); - } + bob.append("tenantId", _stateDoc.getTenantId()); bob.append("donorConnectionString", _stateDoc.getDonorConnectionString()); bob.append("readPreference", _stateDoc.getReadPreference().toInnerBSON()); bob.append("state", TenantMigrationRecipientState_serializer(_stateDoc.getState())); @@ -479,7 +515,7 @@ void TenantMigrationRecipientService::Instance::checkIfOptionsConflict( invariant(stateDoc.getId() == _migrationUuid); - if (stateDoc.getProtocol() != _protocol || stateDoc.getTenantId() != _tenantId || + if (stateDoc.getTenantId() != _tenantId || stateDoc.getDonorConnectionString() != _donorConnectionString || !stateDoc.getReadPreference().equals(_readPreference) || stateDoc.getRecipientCertificateForDonor() != _recipientCertificateForDonor) { @@ -531,11 +567,6 @@ TenantMigrationRecipientService::Instance::waitUntilMigrationReachesReturnAfterR // In the event of a donor failover, it is possible that a new donor has stepped up and // initiated this 'recipientSyncData' cmd. Make sure the recipient is not in the middle of // restarting the oplog applier to retry the future chain. - // - // For shard merge protocol, we start tenant oplog applier after recipient informs donor, - // the data is in consistent state. So, there is a possibility, recipient might receive - // recipientSyncData cmd with `returnAfterReachingDonorTimestamp` from donor before the - // recipient has started the tenant oplog applier. opCtx->waitForConditionOrInterrupt(_oplogApplierReadyCondVar, lk, [&] { return _oplogApplierReady || _dataSyncCompletionPromise.getFuture().isReady(); }); @@ -935,293 +966,6 @@ SemiFuture TenantMigrationRecipientService::Instance::_initializeStateDoc( .semi(); } -SemiFuture TenantMigrationRecipientService::Instance::_killBackupCursor() { - stdx::lock_guard lk(_mutex); - - auto& donorBackupCursorInfo = _getDonorBackupCursorInfo(lk); - if (donorBackupCursorInfo.cursorId <= 0) { - return SemiFuture::makeReady(); - } - - if (_backupCursorKeepAliveFuture) { - _backupCursorKeepAliveCancellation.cancel(); - } - - return std::exchange(_backupCursorKeepAliveFuture, {}) - .value_or(SemiFuture::makeReady()) - .thenRunOn(_recipientService->getInstanceCleanupExecutor()) - .then([this, self = shared_from_this(), donorBackupCursorInfo] { - LOGV2_INFO(6113421, - "Killing backup cursor", - "migrationId"_attr = getMigrationUUID(), - "cursorId"_attr = donorBackupCursorInfo.cursorId); - - stdx::lock_guard lk(_mutex); - executor::RemoteCommandRequest request( - _client->getServerHostAndPort(), - donorBackupCursorInfo.nss.db().toString(), - BSON("killCursors" << donorBackupCursorInfo.nss.coll().toString() << "cursors" - << BSON_ARRAY(donorBackupCursorInfo.cursorId)), - nullptr); - request.sslMode = _donorUri.getSSLMode(); - - const auto scheduleResult = _scheduleKillBackupCursorWithLock( - lk, _recipientService->getInstanceCleanupExecutor()); - if (!scheduleResult.isOK()) { - LOGV2_WARNING(6113004, - "Failed to run killCursors command on backup cursor", - "status"_attr = scheduleResult.getStatus()); - } - }) - .semi(); -} - -SemiFuture TenantMigrationRecipientService::Instance::_openBackupCursor( - const CancellationToken& token) { - - const auto aggregateCommandRequestObj = [] { - AggregateCommandRequest aggRequest( - NamespaceString::makeCollectionlessAggregateNSS(DatabaseName::kAdmin), - {BSON("$backupCursor" << BSONObj())}); - // We must set a writeConcern on internal commands. - aggRequest.setWriteConcern(WriteConcernOptions()); - return aggRequest.toBSON(BSONObj()); - }(); - - stdx::lock_guard lk(_mutex); - LOGV2_DEBUG(6113000, - 1, - "Trying to open backup cursor on donor primary", - "migrationId"_attr = _stateDoc.getId(), - "donorConnectionString"_attr = _stateDoc.getDonorConnectionString()); - - const auto startMigrationDonorTimestamp = _stateDoc.getStartMigrationDonorTimestamp(); - - auto fetchStatus = std::make_shared>(); - auto uniqueMetadataInfo = std::make_unique>(); - const auto fetcherCallback = [this, - self = shared_from_this(), - fetchStatus, - metadataInfoPtr = uniqueMetadataInfo.get(), - token, - startMigrationDonorTimestamp]( - const Fetcher::QueryResponseStatus& dataStatus, - Fetcher::NextAction* nextAction, - BSONObjBuilder* getMoreBob) noexcept { - try { - uassertStatusOK(dataStatus); - uassert(ErrorCodes::CallbackCanceled, "backup cursor interrupted", !token.isCanceled()); - - const auto uniqueOpCtx = cc().makeOperationContext(); - const auto opCtx = uniqueOpCtx.get(); - - const auto& data = dataStatus.getValue(); - for (const BSONObj& doc : data.documents) { - if (doc["metadata"]) { - // First batch must contain the metadata. - const auto& metadata = doc["metadata"].Obj(); - auto checkpointTimestamp = metadata["checkpointTimestamp"].timestamp(); - - LOGV2_INFO(6113001, - "Opened backup cursor on donor", - "migrationId"_attr = getMigrationUUID(), - "backupCursorId"_attr = data.cursorId, - "backupCursorCheckpointTimestamp"_attr = checkpointTimestamp); - - { - stdx::lock_guard lk(_mutex); - stdx::lock_guard sharedDatalk(*_sharedData); - _sharedData->setDonorBackupCursorInfo( - sharedDatalk, - BackupCursorInfo{data.cursorId, data.nss, checkpointTimestamp}); - } - - // This ensures that the recipient won’t receive any 2 phase index build donor - // oplog entries during the migration. We also have a check in the tenant oplog - // applier to detect such oplog entries. Adding a check here helps us to detect - // the problem earlier. - uassert(kCheckpointTsBackupCursorErrorCode, - "backupCursorCheckpointTimestamp should be greater than or equal to " - "startMigrationDonorTimestamp", - checkpointTimestamp >= startMigrationDonorTimestamp); - - invariant(metadataInfoPtr && !*metadataInfoPtr); - (*metadataInfoPtr) = shard_merge_utils::MetadataInfo::constructMetadataInfo( - getMigrationUUID(), _client->getServerAddress(), metadata); - } else { - LOGV2_DEBUG(6113002, - 1, - "Backup cursor entry", - "migrationId"_attr = getMigrationUUID(), - "filename"_attr = doc["filename"].String(), - "backupCursorId"_attr = data.cursorId); - - invariant(metadataInfoPtr && *metadataInfoPtr); - auto docs = - std::vector{(*metadataInfoPtr)->toBSON(doc).getOwned()}; - - // Disabling internal document validation because the fetcher batch size - // can exceed the max data size limit BSONObjMaxUserSize with the - // additional fields we add to documents. - DisableDocumentValidation documentValidationDisabler( - opCtx, DocumentValidationSettings::kDisableInternalValidation); - - write_ops::InsertCommandRequest insertOp( - shard_merge_utils::getDonatedFilesNs(getMigrationUUID())); - insertOp.setDocuments(std::move(docs)); - insertOp.setWriteCommandRequestBase([] { - write_ops::WriteCommandRequestBase wcb; - wcb.setOrdered(true); - return wcb; - }()); - - auto writeResult = write_ops_exec::performInserts(opCtx, insertOp); - invariant(!writeResult.results.empty()); - // Writes are ordered, check only the last writeOp result. - uassertStatusOK(writeResult.results.back()); - } - } - - *fetchStatus = Status::OK(); - if (!getMoreBob || data.documents.empty()) { - // Exit fetcher but keep the backupCursor alive to prevent WT on Donor from - // modifying file bytes. backupCursor can be closed after all Recipient nodes - // have copied files from Donor primary. - *nextAction = Fetcher::NextAction::kExitAndKeepCursorAlive; - return; - } - - getMoreBob->append("getMore", data.cursorId); - getMoreBob->append("collection", data.nss.coll()); - } catch (DBException& ex) { - LOGV2_ERROR(6409801, - "Error fetching backup cursor entries", - "migrationId"_attr = getMigrationUUID(), - "error"_attr = ex.toString()); - *fetchStatus = ex.toStatus(); - } - }; - - _donorFilenameBackupCursorFileFetcher = std::make_unique( - _backupCursorExecutor.get(), - _client->getServerHostAndPort(), - DatabaseName::kAdmin.toString(), - aggregateCommandRequestObj, - fetcherCallback, - ReadPreferenceSetting(ReadPreference::PrimaryPreferred).toContainingBSON(), - executor::RemoteCommandRequest::kNoTimeout, /* aggregateTimeout */ - executor::RemoteCommandRequest::kNoTimeout, /* getMoreNetworkTimeout */ - RemoteCommandRetryScheduler::makeRetryPolicy( - kBackupCursorFileFetcherRetryAttempts, executor::RemoteCommandRequest::kNoTimeout), - transport::kGlobalSSLMode); - - uassertStatusOK(_donorFilenameBackupCursorFileFetcher->schedule()); - - return _donorFilenameBackupCursorFileFetcher->onCompletion() - .thenRunOn(**_scopedExecutor) - .then([fetchStatus, uniqueMetadataInfo = std::move(uniqueMetadataInfo)] { - if (!*fetchStatus) { - // the callback was never invoked - uasserted(6113007, "Internal error running cursor callback in command"); - } - - uassertStatusOK(fetchStatus->get()); - }) - .semi(); -} - -StatusWith -TenantMigrationRecipientService::Instance::_scheduleKillBackupCursorWithLock( - WithLock lk, std::shared_ptr executor) { - auto& donorBackupCursorInfo = _getDonorBackupCursorInfo(lk); - executor::RemoteCommandRequest killCursorsRequest( - _client->getServerHostAndPort(), - donorBackupCursorInfo.nss.db().toString(), - BSON("killCursors" << donorBackupCursorInfo.nss.coll().toString() << "cursors" - << BSON_ARRAY(donorBackupCursorInfo.cursorId)), - nullptr); - killCursorsRequest.sslMode = _donorUri.getSSLMode(); - - return executor->scheduleRemoteCommand( - killCursorsRequest, [](const executor::TaskExecutor::RemoteCommandCallbackArgs& args) { - if (!args.response.isOK()) { - LOGV2_WARNING(6113005, - "killCursors command task failed", - "error"_attr = redact(args.response.status)); - return; - } - auto status = getStatusFromCommandResult(args.response.data); - if (status.isOK()) { - LOGV2_INFO(6113415, "Killed backup cursor"); - } else { - LOGV2_WARNING(6113006, "killCursors command failed", "error"_attr = redact(status)); - } - }); -} - -SemiFuture TenantMigrationRecipientService::Instance::_openBackupCursorWithRetry( - const CancellationToken& token) { - return AsyncTry([this, self = shared_from_this(), token] { return _openBackupCursor(token); }) - .until([this, self = shared_from_this()](Status status) { - if (status == ErrorCodes::BackupCursorOpenConflictWithCheckpoint) { - LOGV2_INFO(6113008, - "Retrying backup cursor creation after transient error", - "migrationId"_attr = getMigrationUUID(), - "status"_attr = status); - - return false; - } else if (status.code() == kCheckpointTsBackupCursorErrorCode || - status.code() == kCloseCursorBeforeOpenErrorCode) { - LOGV2_INFO(6955100, - "Closing backup cursor and retrying after getting retryable error", - "migrationId"_attr = getMigrationUUID(), - "status"_attr = status); - - stdx::lock_guard lk(_mutex); - const auto scheduleResult = - _scheduleKillBackupCursorWithLock(lk, _backupCursorExecutor); - uassertStatusOK(scheduleResult); - - return false; - } - - return true; - }) - .on(**_scopedExecutor, token) - .semi(); -} - -const BackupCursorInfo& TenantMigrationRecipientService::Instance::_getDonorBackupCursorInfo( - WithLock) const { - stdx::lock_guard sharedDatalk(*_sharedData); - return _sharedData->getDonorBackupCursorInfo(sharedDatalk); -} - -void TenantMigrationRecipientService::Instance::_keepBackupCursorAlive( - const CancellationToken& token) { - LOGV2_DEBUG(6113200, - 1, - "Starting periodic 'getMore' requests to keep " - "backup cursor alive.", - "migrationId"_attr = getMigrationUUID()); - - stdx::lock_guard lk(_mutex); - _backupCursorKeepAliveCancellation = CancellationSource(token); - auto& donorBackupCursorInfo = _getDonorBackupCursorInfo(lk); - _backupCursorKeepAliveFuture = - shard_merge_utils::keepBackupCursorAlive(_backupCursorKeepAliveCancellation, - _backupCursorExecutor, - _client->getServerHostAndPort(), - donorBackupCursorInfo.cursorId, - donorBackupCursorInfo.nss); -} - -SemiFuture TenantMigrationRecipientService::Instance::_enterLearnedFilenamesState() { - stdx::lock_guard lk(_mutex); - _stateDoc.setState(TenantMigrationRecipientStateEnum::kLearnedFilenames); - return _updateStateDocForMajority(lk); -} - boost::optional TenantMigrationRecipientService::Instance::_getOldestActiveTransactionAt( Timestamp ReadTimestamp) { const auto preparedState = DurableTxnState_serializer(DurableTxnStateEnum::kPrepared); @@ -1264,7 +1008,6 @@ boost::optional TenantMigrationRecipientService::Instance::_getOldestAct } SemiFuture TenantMigrationRecipientService::Instance::_getStartOpTimesFromDonor() { - OpTime startApplyingDonorOpTime; stdx::unique_lock lk(_mutex); // Get the last oplog entry at the read concern majority optime in the remote oplog. It // does not matter which tenant it is for. @@ -1273,20 +1016,14 @@ SemiFuture TenantMigrationRecipientService::Instance::_getStartOpTimesFrom return SemiFuture::makeReady(); } - if (getProtocol() == MigrationProtocolEnum::kShardMerge) { - startApplyingDonorOpTime = - OpTime(_getDonorBackupCursorInfo(lk).checkpointTimestamp, OpTime::kUninitializedTerm); - lk.unlock(); - } else { - lk.unlock(); - startApplyingDonorOpTime = _getDonorMajorityOpTime(_client); - LOGV2_DEBUG(4880600, - 2, - "Found last oplog entry at read concern majority optime on remote node", - "migrationId"_attr = getMigrationUUID(), - "tenantId"_attr = _stateDoc.getTenantId(), - "lastOplogEntry"_attr = startApplyingDonorOpTime.toBSON()); - } + lk.unlock(); + const auto startApplyingDonorOpTime = _getDonorMajorityOpTime(_client); + LOGV2_DEBUG(4880600, + 2, + "Found last oplog entry at read concern majority optime on remote node", + "migrationId"_attr = getMigrationUUID(), + "tenantId"_attr = _stateDoc.getTenantId(), + "lastOplogEntry"_attr = startApplyingDonorOpTime.toBSON()); auto oldestActiveTxnOpTime = _getOldestActiveTransactionAt(startApplyingDonorOpTime.getTimestamp()); @@ -1339,6 +1076,12 @@ void TenantMigrationRecipientService::Instance::_processCommittedTransactionEntr const BSONObj& entry) { auto sessionTxnRecord = SessionTxnRecord::parse(IDLParserContext("SessionTxnRecord"), entry); auto sessionId = sessionTxnRecord.getSessionId(); + uassert(ErrorCodes::RetryableInternalTransactionNotSupported, + str::stream() + << "Tenant migration doesn't support retryable internal transaction. SessionId:: " + << sessionId.toBSON(), + !isInternalSessionForRetryableWrite(sessionId)); + auto txnNumber = sessionTxnRecord.getTxnNum(); auto optTxnRetryCounter = sessionTxnRecord.getTxnRetryCounter(); uassert(ErrorCodes::InvalidOptions, @@ -1405,30 +1148,13 @@ void TenantMigrationRecipientService::Instance::_processCommittedTransactionEntr MutableOplogEntry noopEntry; noopEntry.setOpType(repl::OpTypeEnum::kNoop); - auto tenantNss = [&] { - if (_protocol == MigrationProtocolEnum::kShardMerge) { - // For shard merge, we must set an empty NamespaceString because nss is non-optional in - // the oplog entry idl definition. - return NamespaceString(); - } - - return NamespaceString(getTenantId() + "_", ""); - }(); - + auto tenantNss = NamespaceString(getTenantId() + "_", ""); noopEntry.setNss(tenantNss); // Write a fake applyOps with the tenantId as the namespace so that this will be picked // up by the committed transaction prefetch pipeline in subsequent migrations. - // - // Unlike MTM, shard merge copies all tenants from the donor. This means that merge does - // not need to filter prefetched committed transactions by tenantId. As a result, setting - // a nss containing the tenantId for the fake transaction applyOps entry isn't necessary. - if (_protocol == MigrationProtocolEnum::kShardMerge) { - noopEntry.setObject({}); - } else { - noopEntry.setObject( - BSON("applyOps" << BSON_ARRAY(BSON(OplogEntry::kNssFieldName << tenantNss.ns())))); - } + noopEntry.setObject( + BSON("applyOps" << BSON_ARRAY(BSON(OplogEntry::kNssFieldName << tenantNss.ns())))); noopEntry.setWallClockTime(opCtx->getServiceContext()->getFastClockSource()->now()); noopEntry.setSessionId(sessionId); @@ -1441,7 +1167,7 @@ void TenantMigrationRecipientService::Instance::_processCommittedTransactionEntr AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); writeConflictRetry( - opCtx, "writeDonorCommittedTxnEntry", NamespaceString::kRsOplogNamespace.ns(), [&] { + opCtx, "writeDonorCommittedTxnEntry", NamespaceString::kRsOplogNamespace, [&] { WriteUnitOfWork wuow(opCtx); // Write the no-op entry and update 'config.transactions'. @@ -1492,12 +1218,7 @@ TenantMigrationRecipientService::Instance::_fetchCommittedTransactionsBeforeStar } } - std::unique_ptr cursor; - if (_protocol == MigrationProtocolEnum::kShardMerge) { - cursor = _openCommittedTransactionsFindCursor(); - } else { - cursor = _openCommittedTransactionsAggregationCursor(); - } + auto cursor = _openCommittedTransactionsAggregationCursor(); while (cursor->more()) { auto transactionEntry = cursor->next(); @@ -1521,27 +1242,6 @@ TenantMigrationRecipientService::Instance::_fetchCommittedTransactionsBeforeStar .semi(); } -std::unique_ptr -TenantMigrationRecipientService::Instance::_openCommittedTransactionsFindCursor() { - Timestamp startApplyingDonorTimestamp; - { - stdx::lock_guard lk(_mutex); - invariant(_stateDoc.getStartApplyingDonorOpTime()); - startApplyingDonorTimestamp = _stateDoc.getStartApplyingDonorOpTime()->getTimestamp(); - } - - FindCommandRequest findCommandRequest{NamespaceString::kSessionTransactionsTableNamespace}; - findCommandRequest.setFilter(BSON("state" - << "committed" - << "lastWriteOpTime.ts" - << BSON("$lte" << startApplyingDonorTimestamp))); - findCommandRequest.setReadConcern( - ReadConcernArgs(ReadConcernLevel::kMajorityReadConcern).toBSONInner()); - findCommandRequest.setHint(BSON("$natural" << 1)); - - return _client->find(std::move(findCommandRequest), _readPreference, ExhaustMode::kOn); -} - std::unique_ptr TenantMigrationRecipientService::Instance::_openCommittedTransactionsAggregationCursor() { auto aggRequest = _makeCommittedTransactionsAggregation(); @@ -1558,23 +1258,6 @@ TenantMigrationRecipientService::Instance::_openCommittedTransactionsAggregation return std::move(statusWith.getValue()); } -void TenantMigrationRecipientService::Instance::_validateTenantIdsForProtocol() { - switch (_protocol) { - case MigrationProtocolEnum::kMultitenantMigrations: - uassert(ErrorCodes::InvalidOptions, - "The field 'tenantIds' must not be set for protocol 'multitenant migration'", - _tenantIds.empty() && !_tenantId.empty()); - break; - case MigrationProtocolEnum::kShardMerge: - uassert(ErrorCodes::InvalidOptions, - "The field 'tenantIds' must be set and non-empty for protocol 'shard merge'", - _tenantId.empty() && !_tenantIds.empty()); - break; - default: - MONGO_UNREACHABLE; - } -} - SemiFuture TenantMigrationRecipientService::Instance::_fetchRetryableWritesOplogBeforeStartOpTime() { _stopOrHangOnFailPoint(&fpAfterRetrievingStartOpTimesMigrationRecipientInstance); @@ -1613,7 +1296,7 @@ TenantMigrationRecipientService::Instance::_fetchRetryableWritesOplogBeforeStart // re-create the collection. auto coordinator = repl::ReplicationCoordinator::get(opCtx.get()); Lock::GlobalLock globalLock(opCtx.get(), MODE_IX); - if (!coordinator->canAcceptWritesForDatabase(opCtx.get(), oplogBufferNS.db())) { + if (!coordinator->canAcceptWritesForDatabase(opCtx.get(), oplogBufferNS.dbName())) { uassertStatusOK( Status(ErrorCodes::NotWritablePrimary, "Recipient node is not primary, cannot clear oplog buffer collection.")); @@ -1629,17 +1312,9 @@ TenantMigrationRecipientService::Instance::_fetchRetryableWritesOplogBeforeStart "migrationId"_attr = getMigrationUUID()); // Fetch the oplog chains of all retryable writes that occurred before startFetchingTimestamp. - std::vector serializedPipeline; - if (MigrationProtocolEnum::kShardMerge == getProtocol()) { - serializedPipeline = - tenant_migration_util::createRetryableWritesOplogFetchingPipelineForAllTenants( - expCtx, startFetchingTimestamp) - ->serializeToBson(); - } else { - serializedPipeline = tenant_migration_util::createRetryableWritesOplogFetchingPipeline( - expCtx, startFetchingTimestamp, getTenantId()) - ->serializeToBson(); - } + auto serializedPipeline = tenant_migration_util::createRetryableWritesOplogFetchingPipeline( + expCtx, startFetchingTimestamp, getTenantId()) + ->serializeToBson(); AggregateCommandRequest aggRequest(NamespaceString::kSessionTransactionsTableNamespace, std::move(serializedPipeline)); @@ -1722,8 +1397,7 @@ TenantMigrationRecipientService::Instance::_fetchRetryableWritesOplogBeforeStart BSONObj readResult; BSONObj cmd = ClonerUtils::buildMajorityWaitRequest(*operationTime); - _client.get()->runCommand( - DatabaseName(boost::none, "admin"), cmd, readResult, QueryOption_SecondaryOk); + _client.get()->runCommand(DatabaseName::kAdmin, cmd, readResult, QueryOption_SecondaryOk); uassertStatusOKWithContext( getStatusFromCommandResult(readResult), "Failed to wait for retryable writes pre-fetch result majority committed"); @@ -1799,11 +1473,8 @@ void TenantMigrationRecipientService::Instance::_startOplogFetcher() { OplogFetcher::RequireFresherSyncSource::kDontRequireFresherSyncSource, true /* forTenantMigration */); - // Fetch all oplog entries for shard merge protocol. - if (_stateDoc.getProtocol() != MigrationProtocolEnum::kShardMerge) { - oplogFetcherConfig.queryFilter = _getOplogFetcherFilter(); - oplogFetcherConfig.requestResumeToken = true; - } + oplogFetcherConfig.queryFilter = _getOplogFetcherFilter(); + oplogFetcherConfig.requestResumeToken = true; oplogFetcherConfig.queryReadConcern = ReadConcernArgs(repl::ReadConcernLevel::kMajorityReadConcern); @@ -1851,12 +1522,6 @@ Status TenantMigrationRecipientService::Instance::_enqueueDocuments( donorOplogBuffer->push(opCtx.get(), begin, end); } - if (_protocol == MigrationProtocolEnum::kShardMerge) { - // Shard merge does not need to write no-op oplog entries since it fetches all oplog - // entries. - return Status::OK(); - } - if (info.resumeToken.isNull()) { return Status(ErrorCodes::Error(5124600), "Resume token returned is null"); } @@ -2068,58 +1733,6 @@ Future TenantMigrationRecipientService::Instance::_startTenantAllDatabaseC return std::move(startClonerFuture); } -SemiFuture -TenantMigrationRecipientService::Instance::_advanceMajorityCommitTsToBkpCursorCheckpointTs( - const CancellationToken& token) { - auto uniqueOpCtx = cc().makeOperationContext(); - auto opCtx = uniqueOpCtx.get(); - - Timestamp donorBkpCursorCkptTs; - { - stdx::lock_guard lk(_mutex); - donorBkpCursorCkptTs = _getDonorBackupCursorInfo(lk).checkpointTimestamp; - } - - if (opCtx->getServiceContext()->getStorageEngine()->getStableTimestamp() >= - donorBkpCursorCkptTs) { - return SemiFuture::makeReady(); - } - - LOGV2( - 6114000, - "Advancing recipient's majority commit timestamp to be at least the donor's backup cursor " - "checkpoint timestamp", - "migrationId"_attr = getMigrationUUID(), - "tenantId"_attr = getTenantId(), - "donorBackupCursorCheckpointTimestamp"_attr = donorBkpCursorCkptTs); - - _stopOrHangOnFailPoint(&fpBeforeAdvancingStableTimestamp, opCtx); - - // Advance the cluster time to the donorBkpCursorCkptTs so that we ensure we - // write the no-op entry below at ts > donorBkpCursorCkptTs. - VectorClockMutable::get(_serviceContext)->tickClusterTimeTo(LogicalTime(donorBkpCursorCkptTs)); - - writeConflictRetry(opCtx, - "mergeRecipientWriteNoopToAdvanceStableTimestamp", - NamespaceString::kRsOplogNamespace.ns(), - [&] { - if (token.isCanceled()) { - return; - } - AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); - WriteUnitOfWork wuow(opCtx); - const std::string msg = str::stream() - << "Merge recipient advancing stable timestamp"; - opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage( - opCtx, BSON("msg" << msg)); - wuow.commit(); - }); - - // Get the timestamp of the no-op. This will have ts > donorBkpCursorCkptTs. - auto noOpTs = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); - return WaitForMajorityService::get(opCtx->getServiceContext()).waitUntilMajority(noOpTs, token); -} - SemiFuture TenantMigrationRecipientService::Instance::_onCloneSuccess() { _stopOrHangOnFailPoint(&fpBeforeMarkingCloneSuccess); stdx::lock_guard lk(_mutex); @@ -2129,7 +1742,7 @@ SemiFuture TenantMigrationRecipientService::Instance::_onCloneSuccess() { return SemiFuture::makeReady(); } - if (_protocol == MigrationProtocolEnum::kMultitenantMigrations) { + { stdx::lock_guard sharedDatalk(*_sharedData); auto lastVisibleMajorityCommittedDonorOpTime = _sharedData->getLastVisibleOpTime(sharedDatalk); @@ -2177,17 +1790,7 @@ SemiFuture TenantMigrationRecipientService::Instance::_enterConsistentStat _stopOrHangOnFailPoint(&fpBeforeFulfillingDataConsistentPromise); stdx::lock_guard lk(_mutex); - auto donorConsistentOpTime = [&]() { - switch (_protocol) { - case MigrationProtocolEnum::kMultitenantMigrations: - return _stateDoc.getDataConsistentStopDonorOpTime(); - case MigrationProtocolEnum::kShardMerge: - return _stateDoc.getStartApplyingDonorOpTime(); - default: - MONGO_UNREACHABLE; - } - boost::optional(); - }(); + auto donorConsistentOpTime = _stateDoc.getDataConsistentStopDonorOpTime(); invariant(donorConsistentOpTime && !donorConsistentOpTime->isNull()); LOGV2_DEBUG(4881101, @@ -2254,45 +1857,6 @@ void setPromiseValueIfNotReady(WithLock lk, Promise& promise, Value& value) { } // namespace -void TenantMigrationRecipientService::Instance::onMemberImportedFiles( - const HostAndPort& host, bool success, const boost::optional& reason) { - stdx::lock_guard lk(_mutex); - if (!_waitingForMembersToImportFiles) { - LOGV2_WARNING(8423343, - "Ignoring delayed recipientVoteImportedFiles", - "host"_attr = host.toString(), - "migrationId"_attr = _migrationUuid); - return; - } - - auto state = _stateDoc.getState(); - uassert(8423341, - "The migration is at the wrong stage for recipientVoteImportedFiles: {}"_format( - TenantMigrationRecipientState_serializer(state)), - state == TenantMigrationRecipientStateEnum::kLearnedFilenames); - - if (!success) { - _importedFilesPromise.setError( - {ErrorCodes::OperationFailed, - "Migration failed on {}, error: {}"_format(host, reason.value_or("null"))}); - _waitingForMembersToImportFiles = false; - return; - } - - _membersWhoHaveImportedFiles.insert(host); - // Not reconfig-safe, we must not do a reconfig concurrent with a migration. - if (static_cast(_membersWhoHaveImportedFiles.size()) == - repl::ReplicationCoordinator::get(_serviceContext) - ->getConfig() - .getNumDataBearingMembers()) { - LOGV2_INFO(6112809, - "All members finished importing donated files", - "migrationId"_attr = _migrationUuid); - _importedFilesPromise.emplaceValue(); - _waitingForMembersToImportFiles = false; - } -} - SemiFuture TenantMigrationRecipientService::Instance::_markStateDocAsGarbageCollectable() { _stopOrHangOnFailPoint(&fpAfterReceivingRecipientForgetMigration); @@ -2321,14 +1885,15 @@ SemiFuture TenantMigrationRecipientService::Instance::_markStateDocAsGarba AutoGetCollection collection( opCtx, NamespaceString::kTenantMigrationRecipientsNamespace, MODE_IX); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << NamespaceString::kTenantMigrationRecipientsNamespace.ns() + str::stream() << NamespaceString::kTenantMigrationRecipientsNamespace + .toStringForErrorMsg() << " does not exist", collection); writeConflictRetry( opCtx, "markTenantMigrationRecipientStateDocGarbageCollectable", - NamespaceString::kTenantMigrationRecipientsNamespace.ns(), + NamespaceString::kTenantMigrationRecipientsNamespace, [&]() { WriteUnitOfWork wuow(opCtx); auto oplogSlot = LocalOplogInfo::get(opCtx)->getNextOpTimes(opCtx, 1U)[0]; @@ -2377,6 +1942,7 @@ SemiFuture TenantMigrationRecipientService::Instance::_markStateDocAsGarba originalSnapshot, stateDoc, collection_internal::kUpdateAllIndexes, + nullptr /* indexesAffected */, nullptr /* OpDebug* */, &args); @@ -2440,10 +2006,6 @@ void TenantMigrationRecipientService::Instance::_interrupt(Status status, _cancelRemainingWork(lk); - if (_donorFilenameBackupCursorFileFetcher) { - _donorFilenameBackupCursorFileFetcher->shutdown(); - } - // If the task is running, then setting promise result will be taken care by the main task // continuation chain. if (_taskState.isNotStarted()) { @@ -2477,19 +2039,7 @@ void TenantMigrationRecipientService::Instance::cancelMigration() { void TenantMigrationRecipientService::Instance::onReceiveRecipientForgetMigration( OperationContext* opCtx, const TenantMigrationRecipientStateEnum& nextState) { - - switch (_protocol) { - case MigrationProtocolEnum::kMultitenantMigrations: - invariant(nextState == TenantMigrationRecipientStateEnum::kDone); - break; - case MigrationProtocolEnum::kShardMerge: - invariant(nextState == TenantMigrationRecipientStateEnum::kAborted || - nextState == TenantMigrationRecipientStateEnum::kCommitted); - break; - default: - MONGO_UNREACHABLE; - } - + invariant(nextState == TenantMigrationRecipientStateEnum::kDone); LOGV2(4881400, "Forgetting migration due to recipientForgetMigration command", "migrationId"_attr = getMigrationUUID(), @@ -2519,17 +2069,10 @@ void TenantMigrationRecipientService::Instance::_cleanupOnDataSyncCompletion(Sta std::unique_ptr savedDonorOplogFetcher; std::shared_ptr savedTenantOplogApplier; std::unique_ptr savedWriterPool; - std::unique_ptr savedDonorFilenameBackupCursorFileFetcher; { stdx::lock_guard lk(_mutex); _cancelRemainingWork(lk); - _backupCursorKeepAliveCancellation = {}; - _backupCursorKeepAliveFuture = boost::none; - - if (_donorFilenameBackupCursorFileFetcher) { - shutdownTarget(lk, _donorFilenameBackupCursorFileFetcher); - } shutdownTarget(lk, _donorOplogFetcher); shutdownTargetWithOpCtx(lk, _donorOplogBuffer, opCtx.get()); @@ -2548,14 +2091,8 @@ void TenantMigrationRecipientService::Instance::_cleanupOnDataSyncCompletion(Sta swap(savedDonorOplogFetcher, _donorOplogFetcher); swap(savedTenantOplogApplier, _tenantOplogApplier); swap(savedWriterPool, _writerPool); - swap(savedDonorFilenameBackupCursorFileFetcher, _donorFilenameBackupCursorFileFetcher); } - // Perform join outside the lock to avoid deadlocks. - if (savedDonorFilenameBackupCursorFileFetcher) { - invariantStatusOK( - savedDonorFilenameBackupCursorFileFetcher->join(Interruptible::notInterruptible())); - } joinTarget(savedDonorOplogFetcher); joinTarget(savedTenantOplogApplier); if (savedWriterPool) { @@ -2601,11 +2138,12 @@ void TenantMigrationRecipientService::Instance::_fetchAndStoreDonorClusterTimeKe auto cursor = _client->find(std::move(findRequest), _readPreference); while (cursor->more()) { const auto doc = cursor->nextSafe().getOwned(); - keyDocs.push_back( - tenant_migration_util::makeExternalClusterTimeKeyDoc(_migrationUuid, doc)); + keyDocs.push_back(keys_collection_util::makeExternalClusterTimeKeyDoc( + doc, _migrationUuid, boost::none /* expireAt */)); } - tenant_migration_util::storeExternalClusterTimeKeyDocs(std::move(keyDocs)); + auto opCtx = cc().makeOperationContext(); + keys_collection_util::storeExternalClusterTimeKeyDocs(opCtx.get(), std::move(keyDocs)); } SemiFuture @@ -2700,16 +2238,15 @@ void TenantMigrationRecipientService::Instance::_startOplogApplier() { _tenantOplogApplier = std::make_shared( _migrationUuid, - _protocol, - (_protocol != MigrationProtocolEnum::kShardMerge) ? boost::make_optional(_tenantId) - : boost::none, + MigrationProtocolEnum::kMultitenantMigrations, (!resumeOpTime.isNull()) ? std::max(resumeOpTime, *startApplyingDonorOpTime) : *startApplyingDonorOpTime, + *cloneFinishedRecipientOpTime, + _tenantId, _donorOplogBuffer.get(), **_scopedExecutor, _writerPool.get(), resumeOpTime.getTimestamp()); - _tenantOplogApplier->setCloneFinishedRecipientOpTime(*cloneFinishedRecipientOpTime); LOGV2_DEBUG(4881202, 1, @@ -2772,7 +2309,7 @@ void TenantMigrationRecipientService::Instance::_setup() { auto oplogBufferNS = getOplogBufferNs(getMigrationUUID()); if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase( - opCtx, oplogBufferNS.db())) { + opCtx, oplogBufferNS.dbName())) { uassertStatusOK( Status(ErrorCodes::NotWritablePrimary, "Recipient node is no longer a primary.")); } @@ -2795,8 +2332,7 @@ TenantMigrationRecipientService::Instance::_waitForOplogApplierToStop() { return _tenantOplogApplier->getNotificationForOpTime(OpTime::max()); } -SemiFuture -TenantMigrationRecipientService::Instance::_migrateUsingMTMProtocol( +SemiFuture TenantMigrationRecipientService::Instance::_migrate( const CancellationToken& token) { return ExecutorFuture(**_scopedExecutor) .then([this, self = shared_from_this()] { return _getStartOpTimesFromDonor(); }) @@ -2825,50 +2361,15 @@ TenantMigrationRecipientService::Instance::_migrateUsingMTMProtocol( .semi(); } -SemiFuture -TenantMigrationRecipientService::Instance::_migrateUsingShardMergeProtocol( - const CancellationToken& token) { - return ExecutorFuture(**_scopedExecutor) - .then( - [this, self = shared_from_this(), token] { return _openBackupCursorWithRetry(token); }) - .then([this, self = shared_from_this(), token] { _keepBackupCursorAlive(token); }) - .then([this, self = shared_from_this(), token] { - return _advanceMajorityCommitTsToBkpCursorCheckpointTs(token); - }) - .then([this, self = shared_from_this()] { return _enterLearnedFilenamesState(); }) - .then([this, self = shared_from_this()]() { return _getStartOpTimesFromDonor(); }) - .then([this, self = shared_from_this()] { - return _fetchRetryableWritesOplogBeforeStartOpTime(); - }) - .then([this, self = shared_from_this()] { _startOplogFetcher(); }) - .then([this, self = shared_from_this()] { - LOGV2_INFO(6113402, "Waiting for all nodes to call recipientVoteImportedFiles"); - return _importedFilesPromise.getFuture().semi(); - }) - .then([this, self = shared_from_this()] { return _killBackupCursor(); }) - .then([this, self = shared_from_this()] { return _onCloneSuccess(); }) - .then([this, self = shared_from_this()] { return _enterConsistentState(); }) - .then([this, self = shared_from_this()] { - return _fetchCommittedTransactionsBeforeStartOpTime(); - }) - .then([this, self = shared_from_this()] { return _startOplogApplier(); }) - .then([this, self = shared_from_this()] { return _waitForOplogApplierToStop(); }) - .semi(); -} - void TenantMigrationRecipientService::Instance::_dropTempCollections() { _stopOrHangOnFailPoint(&fpBeforeDroppingTempCollections); auto opCtx = cc().makeOperationContext(); auto storageInterface = StorageInterface::get(opCtx.get()); - // The donated files and oplog buffer collections can be safely dropped at this - // point. In case either collection does not exist, dropping will be a no-op. - // It isn't necessary that a given drop is majority-committed. A new primary will - // attempt to drop the collection anyway. - uassertStatusOK(storageInterface->dropCollection( - opCtx.get(), shard_merge_utils::getDonatedFilesNs(getMigrationUUID()))); - + // The oplog buffer collection can be safely dropped at this point. In case either collection + // does not exist, dropping will be a no-op. It isn't necessary that a given drop is + // majority-committed. A new primary will attempt to drop the collection anyway. uassertStatusOK( storageInterface->dropCollection(opCtx.get(), getOplogBufferNs(getMigrationUUID()))); } @@ -2877,14 +2378,12 @@ SemiFuture TenantMigrationRecipientService::Instance::run( std::shared_ptr executor, const CancellationToken& token) noexcept { _scopedExecutor = executor; - _backupCursorExecutor = **_scopedExecutor; auto scopedOutstandingMigrationCounter = TenantMigrationStatistics::get(_serviceContext)->getScopedOutstandingReceivingCount(); LOGV2(4879607, "Starting tenant migration recipient instance: ", "migrationId"_attr = getMigrationUUID(), - "protocol"_attr = MigrationProtocol_serializer(getProtocol()), "tenantId"_attr = getTenantId(), "connectionString"_attr = _donorConnectionString, "readPreference"_attr = _readPreference); @@ -2938,47 +2437,46 @@ SemiFuture TenantMigrationRecipientService::Instance::run( } pauseAfterRunTenantMigrationRecipientInstance.pauseWhileSet(); - if (_protocol != MigrationProtocolEnum::kShardMerge) { - auto mtab = tenant_migration_access_blocker:: - getTenantMigrationRecipientAccessBlocker(_serviceContext, - _stateDoc.getTenantId()); - if (mtab && mtab->getMigrationId() != _migrationUuid) { - // There is a conflicting migration. If its state doc has already - // been marked as garbage collectable, this instance must correspond - // to a retry and we can delete immediately to allow the migration to - // restart. Otherwise, there is a real conflict so we should throw - // ConflictingInProgress. - lk.unlock(); - - auto existingStateDoc = - tenantMigrationRecipientEntryHelpers::getStateDoc( - opCtx.get(), mtab->getMigrationId()); - uassertStatusOK(existingStateDoc.getStatus()); - - uassert(ErrorCodes::ConflictingOperationInProgress, - str::stream() - << "Found active migration for tenantId \"" << _tenantId - << "\" with migration id " << mtab->getMigrationId(), - existingStateDoc.getValue().getExpireAt()); - - pauseTenantMigrationRecipientInstanceBeforeDeletingOldStateDoc - .pauseWhileSet(); - - auto deleted = - uassertStatusOK(tenantMigrationRecipientEntryHelpers:: - deleteStateDocIfMarkedAsGarbageCollectable( - opCtx.get(), _tenantId)); - // The doc has an expireAt but was deleted before we had time to - // delete it above therefore it's safe to pursue since it has been - // cleaned up. - if (!deleted) { - LOGV2_WARNING( - 6792601, - "Existing state document was deleted before we could " - "delete it ourselves."); - } - lk.lock(); + TenantId tid = TenantId::parseFromString(_stateDoc.getTenantId()); + auto mtab = + TenantMigrationAccessBlockerRegistry::get(_serviceContext) + .getTenantMigrationAccessBlockerForTenantId( + tid, TenantMigrationAccessBlocker::BlockerType::kRecipient); + if (mtab && mtab->getMigrationId() != _migrationUuid) { + // There is a conflicting migration. If its state doc has already + // been marked as garbage collectable, this instance must correspond + // to a retry and we can delete immediately to allow the migration to + // restart. Otherwise, there is a real conflict so we should throw + // ConflictingInProgress. + lk.unlock(); + + auto existingStateDoc = + tenantMigrationRecipientEntryHelpers::getStateDoc( + opCtx.get(), mtab->getMigrationId()); + uassertStatusOK(existingStateDoc.getStatus()); + + uassert(ErrorCodes::ConflictingOperationInProgress, + str::stream() + << "Found active migration for tenantId \"" << _tenantId + << "\" with migration id " << mtab->getMigrationId(), + existingStateDoc.getValue().getExpireAt()); + + pauseTenantMigrationRecipientInstanceBeforeDeletingOldStateDoc + .pauseWhileSet(); + + auto deleted = + uassertStatusOK(tenantMigrationRecipientEntryHelpers:: + deleteStateDocIfMarkedAsGarbageCollectable( + opCtx.get(), _tenantId)); + // The doc has an expireAt but was deleted before we had time to + // delete it above therefore it's safe to pursue since it has been + // cleaned up. + if (!deleted) { + LOGV2_WARNING(6792601, + "Existing state document was deleted before we could " + "delete it ourselves."); } + lk.lock(); } if (_stateDoc.getState() != @@ -2989,15 +2487,11 @@ SemiFuture TenantMigrationRecipientService::Instance::run( // If our state is initialized and we haven't fulfilled the // '_stateDocPersistedPromise' yet, it means we are restarting the future // chain due to recipient failover. - _stateDoc.setNumRestartsDueToRecipientFailure( - _stateDoc.getNumRestartsDueToRecipientFailure() + 1); - const auto stateDoc = _stateDoc; - lk.unlock(); - // Update the state document outside the mutex to avoid a deadlock in the - // case of a concurrent stepdown. - uassertStatusOK(tenantMigrationRecipientEntryHelpers::updateStateDoc( - opCtx.get(), stateDoc)); - return SemiFuture::makeReady(); + _stateDocPersistedPromise.emplaceValue(); + uasserted(ErrorCodes::TenantMigrationAborted, + str::stream() << "Recipient failover happened during " + "migration :: migrationId: " + << getMigrationUUID()); } return _initializeStateDoc(lk); }) @@ -3054,27 +2548,13 @@ SemiFuture TenantMigrationRecipientService::Instance::run( // Sets up internal state to begin migration. _setup(); }) - .then([this, self = shared_from_this(), token] { - switch (_protocol) { - case MigrationProtocolEnum::kMultitenantMigrations: - return _migrateUsingMTMProtocol(token); - case MigrationProtocolEnum::kShardMerge: - return _migrateUsingShardMergeProtocol(token); - default: - MONGO_UNREACHABLE; - } - }); + .then([this, self = shared_from_this(), token] { return _migrate(token); }); }) .until([this, self = shared_from_this()]( StatusOrStatusWith applierStatus) { hangMigrationBeforeRetryCheck.pauseWhileSet(); auto shouldRetryMigration = [&](WithLock, Status status) -> bool { - // Shard merge is not resumable for any replica set state transitions or network - // errors. - if (getProtocol() == MigrationProtocolEnum::kShardMerge) - return false; - // We shouldn't retry migration after receiving the recipientForgetMigration command // or on stepDown/shutDown. if (_receivedRecipientForgetMigrationPromise.getFuture().isReady()) @@ -3178,9 +2658,7 @@ SemiFuture TenantMigrationRecipientService::Instance::run( auto state = _stateDoc.getState(); setPromiseValueIfNotReady(lk, _receivedRecipientForgetMigrationPromise, state); } else if (status.code() == ErrorCodes::ConflictingServerlessOperation) { - auto state = _protocol == MigrationProtocolEnum::kShardMerge - ? TenantMigrationRecipientStateEnum::kAborted - : TenantMigrationRecipientStateEnum::kDone; + auto state = TenantMigrationRecipientStateEnum::kDone; setPromiseValueIfNotReady(lk, _receivedRecipientForgetMigrationPromise, state); } else if (MONGO_unlikely(autoRecipientForgetMigration.shouldFail())) { auto state = _getTerminalStateFromFailpoint(&autoRecipientForgetMigration); @@ -3322,10 +2800,6 @@ const std::string& TenantMigrationRecipientService::Instance::getTenantId() cons return _tenantId; } -const MigrationProtocolEnum& TenantMigrationRecipientService::Instance::getProtocol() const { - return _protocol; -} - TenantMigrationRecipientDocument TenantMigrationRecipientService::Instance::getState() const { stdx::lock_guard lk(_mutex); return _stateDoc; diff --git a/src/mongo/db/repl/tenant_migration_recipient_service.h b/src/mongo/db/repl/tenant_migration_recipient_service.h index 994df6db5fa38..2056e26431ca0 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_service.h +++ b/src/mongo/db/repl/tenant_migration_recipient_service.h @@ -29,18 +29,56 @@ #pragma once +#include +#include #include +#include +#include #include - +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_connection.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/client/fetcher.h" +#include "mongo/client/mongo_uri.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/repl/data_replicator_external_state.h" #include "mongo/db/repl/oplog_fetcher.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/repl/tenant_all_database_cloner.h" +#include "mongo/db/repl/tenant_migration_pem_payload_gen.h" +#include "mongo/db/repl/tenant_migration_shared_data.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" #include "mongo/db/repl/tenant_oplog_applier.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_options.h" +#include "mongo/util/str.h" #include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -51,6 +89,7 @@ class ServiceContext; namespace repl { class OplogBufferCollection; + /** * TenantMigrationRecipientService is a primary only service to handle * data copy portion of a multitenant migration on recipient side. @@ -145,11 +184,6 @@ class TenantMigrationRecipientService final : public PrimaryOnlyService { */ const std::string& getTenantId() const; - /* - * Returns the migration protocol. - */ - const MigrationProtocolEnum& getProtocol() const; - /* * Returns the recipient document state. */ @@ -177,13 +211,6 @@ class TenantMigrationRecipientService final : public PrimaryOnlyService { OpTime waitUntilMigrationReachesReturnAfterReachingTimestamp( OperationContext* opCtx, const Timestamp& returnAfterReachingTimestamp); - /* - * Called when a replica set member (self, or a secondary) finishes importing donated files. - */ - void onMemberImportedFiles(const HostAndPort& host, - bool success, - const boost::optional& reason = boost::none); - /* * Set the oplog creator functor, to allow use of a mock oplog fetcher. */ @@ -214,15 +241,6 @@ class TenantMigrationRecipientService final : public PrimaryOnlyService { private: friend class TenantMigrationRecipientServiceTest; - friend class TenantMigrationRecipientServiceShardMergeTest; - - /** - * Only used for testing. Allows setting a custom task executor for backup cursor fetcher. - */ - void setBackupCursorFetcherExecutor_forTest( - std::shared_ptr taskExecutor) { - _backupCursorExecutor = taskExecutor; - } const NamespaceString _stateDocumentsNS = NamespaceString::kTenantMigrationRecipientsNamespace; @@ -377,28 +395,6 @@ class TenantMigrationRecipientService final : public PrimaryOnlyService { */ void _fetchAndStoreDonorClusterTimeKeyDocs(const CancellationToken& token); - /** - * Opens a backup cursor on the donor primary and fetches the - * list of donor files to be cloned. - */ - SemiFuture _openBackupCursor(const CancellationToken& token); - SemiFuture _openBackupCursorWithRetry(const CancellationToken& token); - - /** - * Keeps the donor backup cursor alive. - */ - void _keepBackupCursorAlive(const CancellationToken& token); - - /** - * Kills the Donor backup cursor - */ - SemiFuture _killBackupCursor(); - - /** - * Gets the backup cursor metadata info. - */ - const BackupCursorInfo& _getDonorBackupCursorInfo(WithLock) const; - /** * Get the oldest active multi-statement transaction optime by reading * config.transactions collection at given ReadTimestamp (i.e, equal to @@ -422,12 +418,6 @@ class TenantMigrationRecipientService final : public PrimaryOnlyService { OplogFetcher::Documents::const_iterator end, const OplogFetcher::DocumentsInfo& info); - /** - * Validates the tenantIds field is consistent with the protocol given. Throws an exception - * if there is a mismatch. - */ - void _validateTenantIdsForProtocol(); - /** * Runs an aggregation that gets the entire oplog chain for every retryable write entry in * `config.transactions`. Only returns oplog entries in the chain where @@ -440,12 +430,6 @@ class TenantMigrationRecipientService final : public PrimaryOnlyService { */ SemiFuture _fetchCommittedTransactionsBeforeStartOpTime(); - /** - * Opens and returns a cursor for all entries with 'lastWriteOpTime' <= - * 'startApplyingDonorOpTime' and state 'committed'. - */ - std::unique_ptr _openCommittedTransactionsFindCursor(); - /** * Opens and returns a cursor for entries from '_makeCommittedTransactionsAggregation()'. */ @@ -503,25 +487,6 @@ class TenantMigrationRecipientService final : public PrimaryOnlyService { */ SemiFuture _waitForOplogApplierToStop(); - /* - * Advances the majority commit timestamp to be >= donor's backup cursor checkpoint - * timestamp(CkptTs) by: - * 1. Advancing the clusterTime to CkptTs. - * 2. Writing a no-op oplog entry with ts > CkptTs - * 3. Waiting for the majority commit timestamp to be the time of the no-op write. - * - * Notes: This method should be called before transitioning the instance state to - * 'kLearnedFilenames' which causes donor collections to get imported. Current import rule - * is that the import table's checkpoint timestamp can't be later than the recipient's - * stable timestamp. Due to the fact, we don't have a mechanism to wait until a specific - * stable timestamp on a given node or set of nodes in the replica set and the majority - * commit point and stable timestamp aren't atomically updated, advancing the majority - * commit point on the recipient before import collection stage is a best-effort attempt to - * prevent import retry attempts on import timestamp rule violation. - */ - SemiFuture _advanceMajorityCommitTsToBkpCursorCheckpointTs( - const CancellationToken& token); - /* * Gets called when the logical/file cloner completes cloning data successfully. * And, it is responsible to populate the 'dataConsistentStopDonorOpTime' @@ -529,22 +494,11 @@ class TenantMigrationRecipientService final : public PrimaryOnlyService { */ SemiFuture _onCloneSuccess(); - /* - * Returns a future that will be fulfilled when the tenant migration reaches consistent - * state. - */ - SemiFuture _getDataConsistentFuture(); - /* * Wait for the data cloned via logical cloner to be consistent. */ SemiFuture _waitForDataToBecomeConsistent(); - /* - * Transitions the instance state to 'kLearnedFilenames'. - */ - SemiFuture _enterLearnedFilenamesState(); - /* * Transitions the instance state to 'kConsistent'. */ @@ -616,24 +570,13 @@ class TenantMigrationRecipientService final : public PrimaryOnlyService { */ void _setup(); - SemiFuture _migrateUsingMTMProtocol( - const CancellationToken& token); - - SemiFuture _migrateUsingShardMergeProtocol( - const CancellationToken& token); + SemiFuture _migrate(const CancellationToken& token); /* * Drops ephemeral collections used for tenant migrations. */ void _dropTempCollections(); - /* - * Send the killBackupCursor command to the remote in order to close the backup cursor - * connection on the donor. - */ - StatusWith _scheduleKillBackupCursorWithLock( - WithLock lk, std::shared_ptr executor); - mutable Mutex _mutex = MONGO_MAKE_LATCH("TenantMigrationRecipientService::_mutex"); // All member variables are labeled with one of the following codes indicating the @@ -647,14 +590,11 @@ class TenantMigrationRecipientService final : public PrimaryOnlyService { ServiceContext* const _serviceContext; const TenantMigrationRecipientService* const _recipientService; // (R) (not owned) std::shared_ptr _scopedExecutor; // (M) - std::shared_ptr _backupCursorExecutor; // (M) TenantMigrationRecipientDocument _stateDoc; // (M) // This data is provided in the initial state doc and never changes. We keep copies to // avoid having to obtain the mutex to access them. const std::string _tenantId; // (R) - const std::vector _tenantIds; // (R) - const MigrationProtocolEnum _protocol; // (R) const UUID _migrationUuid; // (R) const std::string _donorConnectionString; // (R) const MongoURI _donorUri; // (R) @@ -678,11 +618,6 @@ class TenantMigrationRecipientService final : public PrimaryOnlyService { // Follow DBClientCursor synchonization rules. std::unique_ptr _client; // (S) std::unique_ptr _oplogFetcherClient; // (S) - - std::unique_ptr _donorFilenameBackupCursorFileFetcher; // (M) - CancellationSource _backupCursorKeepAliveCancellation = {}; // (X) - boost::optional> _backupCursorKeepAliveFuture; // (M) - std::unique_ptr _createOplogFetcherFn = std::make_unique(); // (M) std::shared_ptr _donorOplogBuffer; // (M) @@ -704,12 +639,6 @@ class TenantMigrationRecipientService final : public PrimaryOnlyService { // Promise that is resolved Signaled when the instance has started tenant database cloner // and tenant oplog fetcher. SharedPromise _dataSyncStartedPromise; // (W) - // Promise that is resolved when all recipient nodes have imported all donor files. - SharedPromise _importedFilesPromise; // (W) - // Whether we are waiting for members to import donor files. - bool _waitingForMembersToImportFiles = true; - // Which members have imported all donor files. - stdx::unordered_set _membersWhoHaveImportedFiles; // Promise that is resolved when the tenant data sync has reached consistent point. SharedPromise _dataConsistentPromise; // (W) // Promise that is resolved when the data sync has completed. diff --git a/src/mongo/db/repl/tenant_migration_recipient_service_shard_merge_test.cpp b/src/mongo/db/repl/tenant_migration_recipient_service_shard_merge_test.cpp deleted file mode 100644 index c95aa6ed7414c..0000000000000 --- a/src/mongo/db/repl/tenant_migration_recipient_service_shard_merge_test.cpp +++ /dev/null @@ -1,799 +0,0 @@ -/** - * Copyright (C) 2020-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include -#include - -#include "mongo/client/connpool.h" -#include "mongo/client/replica_set_monitor.h" -#include "mongo/client/replica_set_monitor_protocol_test_util.h" -#include "mongo/client/streamable_replica_set_monitor_for_testing.h" -#include "mongo/config.h" -#include "mongo/db/client.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/feature_compatibility_version_document_gen.h" -#include "mongo/db/op_observer/op_observer_impl.h" -#include "mongo/db/op_observer/op_observer_registry.h" -#include "mongo/db/op_observer/oplog_writer_impl.h" -#include "mongo/db/repl/drop_pending_collection_reaper.h" -#include "mongo/db/repl/oplog.h" -#include "mongo/db/repl/oplog_buffer_collection.h" -#include "mongo/db/repl/oplog_fetcher_mock.h" -#include "mongo/db/repl/primary_only_service.h" -#include "mongo/db/repl/primary_only_service_op_observer.h" -#include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/repl/storage_interface_impl.h" -#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" -#include "mongo/db/repl/tenant_migration_recipient_entry_helpers.h" -#include "mongo/db/repl/tenant_migration_recipient_op_observer.h" -#include "mongo/db/repl/tenant_migration_recipient_service.h" -#include "mongo/db/repl/tenant_migration_state_machine_gen.h" -#include "mongo/db/repl/wait_for_majority_service.h" -#include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/db/session/session_txn_record_gen.h" -#include "mongo/db/storage/backup_cursor_hooks.h" -#include "mongo/dbtests/mock/mock_conn_registry.h" -#include "mongo/dbtests/mock/mock_replica_set.h" -#include "mongo/executor/mock_network_fixture.h" -#include "mongo/executor/network_interface.h" -#include "mongo/executor/network_interface_mock.h" -#include "mongo/executor/thread_pool_mock.h" -#include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/executor/thread_pool_task_executor_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" -#include "mongo/rpc/metadata/egress_metadata_hook_list.h" -#include "mongo/transport/transport_layer_manager.h" -#include "mongo/transport/transport_layer_mock.h" -#include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" -#include "mongo/util/concurrency/thread_pool.h" -#include "mongo/util/fail_point.h" -#include "mongo/util/future.h" -#include "mongo/util/net/ssl_util.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - - -namespace mongo { -namespace repl { - -namespace { -constexpr std::int32_t stopFailPointErrorCode = 4880402; -const Timestamp kDefaultStartMigrationTimestamp(1, 1); - -OplogEntry makeOplogEntry(OpTime opTime, - OpTypeEnum opType, - NamespaceString nss, - const boost::optional& uuid, - BSONObj o, - boost::optional o2) { - return {DurableOplogEntry(opTime, // optime - opType, // opType - nss, // namespace - uuid, // uuid - boost::none, // fromMigrate - OplogEntry::kOplogVersion, // version - o, // o - o2, // o2 - {}, // sessionInfo - boost::none, // upsert - Date_t(), // wall clock time - {}, // statement ids - boost::none, // optime of previous write within same transaction - boost::none, // pre-image optime - boost::none, // post-image optime - boost::none, // ShardId of resharding recipient - boost::none, // _id - boost::none)}; // needsRetryImage -} - -} // namespace - -class TenantMigrationRecipientServiceShardMergeTest : public ServiceContextMongoDTest { -public: - class stopFailPointEnableBlock : public FailPointEnableBlock { - public: - explicit stopFailPointEnableBlock(StringData failPointName, - std::int32_t error = stopFailPointErrorCode) - : FailPointEnableBlock(failPointName, - BSON("action" - << "stop" - << "stopErrorCode" << error)) {} - }; - - void setUp() override { - ServiceContextMongoDTest::setUp(); - auto serviceContext = getServiceContext(); - - // Fake replSet just for creating consistent URI for monitor - MockReplicaSet replSet("donorSet", 1, true /* hasPrimary */, true /* dollarPrefixHosts */); - _rsmMonitor.setup(replSet.getURI()); - - ConnectionString::setConnectionHook(mongo::MockConnRegistry::get()->getConnStrHook()); - - WaitForMajorityService::get(serviceContext).startup(serviceContext); - - // Automatically mark the state doc garbage collectable after data sync completion. - globalFailPointRegistry() - .find("autoRecipientForgetMigration") - ->setMode(FailPoint::alwaysOn, - 0, - BSON("state" - << "aborted")); - - { - auto opCtx = cc().makeOperationContext(); - auto replCoord = std::make_unique(serviceContext); - ReplicationCoordinator::set(serviceContext, std::move(replCoord)); - - repl::createOplog(opCtx.get()); - { - Lock::GlobalWrite lk(opCtx.get()); - OldClientContext ctx(opCtx.get(), NamespaceString::kRsOplogNamespace); - tenant_migration_util::createOplogViewForTenantMigrations(opCtx.get(), ctx.db()); - } - - // Need real (non-mock) storage for the oplog buffer. - StorageInterface::set(serviceContext, std::make_unique()); - - // The DropPendingCollectionReaper is required to drop the oplog buffer collection. - repl::DropPendingCollectionReaper::set( - serviceContext, - std::make_unique( - StorageInterface::get(serviceContext))); - - // Set up OpObserver so that repl::logOp() will store the oplog entry's optime in - // ReplClientInfo. - OpObserverRegistry* opObserverRegistry = - dynamic_cast(serviceContext->getOpObserver()); - opObserverRegistry->addObserver( - std::make_unique(std::make_unique())); - opObserverRegistry->addObserver( - std::make_unique(serviceContext)); - - // Add OpObserver needed by subclasses. - addOpObserver(opObserverRegistry); - - _registry = repl::PrimaryOnlyServiceRegistry::get(getServiceContext()); - std::unique_ptr service = - std::make_unique(getServiceContext()); - _registry->registerService(std::move(service)); - _registry->onStartup(opCtx.get()); - } - stepUp(); - - _service = _registry->lookupServiceByName( - TenantMigrationRecipientService::kTenantMigrationRecipientServiceName); - ASSERT(_service); - - // MockReplicaSet uses custom connection string which does not support auth. - auto authFp = globalFailPointRegistry().find("skipTenantMigrationRecipientAuth"); - authFp->setMode(FailPoint::alwaysOn); - - // Set the sslMode to allowSSL to avoid validation error. - sslGlobalParams.sslMode.store(SSLParams::SSLMode_allowSSL); - // Skipped unless tested explicitly, as we will not receive an FCV document from the donor - // in these unittests without (unsightly) intervention. - auto compFp = globalFailPointRegistry().find("skipComparingRecipientAndDonorFCV"); - compFp->setMode(FailPoint::alwaysOn); - - // Skip fetching retryable writes, as we will test this logic entirely in integration - // tests. - auto fetchRetryableWritesFp = - globalFailPointRegistry().find("skipFetchingRetryableWritesEntriesBeforeStartOpTime"); - fetchRetryableWritesFp->setMode(FailPoint::alwaysOn); - - // Skip fetching committed transactions, as we will test this logic entirely in integration - // tests. - auto fetchCommittedTransactionsFp = - globalFailPointRegistry().find("skipFetchingCommittedTransactions"); - fetchCommittedTransactionsFp->setMode(FailPoint::alwaysOn); - - // setup mock networking that will be use to mock the backup cursor traffic. - auto net = std::make_unique(); - _net = net.get(); - - executor::ThreadPoolMock::Options dbThreadPoolOptions; - dbThreadPoolOptions.onCreateThread = []() { - Client::initThread("FetchMockTaskExecutor"); - }; - - auto pool = std::make_unique(_net, 1, dbThreadPoolOptions); - _threadpoolTaskExecutor = - std::make_shared(std::move(pool), std::move(net)); - _threadpoolTaskExecutor->startup(); - } - - void tearDown() override { - _threadpoolTaskExecutor->shutdown(); - _threadpoolTaskExecutor->join(); - - auto authFp = globalFailPointRegistry().find("skipTenantMigrationRecipientAuth"); - authFp->setMode(FailPoint::off); - - // Unset the sslMode. - sslGlobalParams.sslMode.store(SSLParams::SSLMode_disabled); - - WaitForMajorityService::get(getServiceContext()).shutDown(); - - _registry->onShutdown(); - _service = nullptr; - - StorageInterface::set(getServiceContext(), {}); - - // Clearing the connection pool is necessary when doing tests which use the - // ReplicaSetMonitor. See src/mongo/dbtests/mock/mock_replica_set.h for details. - ScopedDbConnection::clearPool(); - ReplicaSetMonitorProtocolTestUtil::resetRSMProtocol(); - ServiceContextMongoDTest::tearDown(); - } - - void stepDown() { - ASSERT_OK(ReplicationCoordinator::get(getServiceContext()) - ->setFollowerMode(MemberState::RS_SECONDARY)); - _registry->onStepDown(); - } - - void stepUp() { - auto opCtx = cc().makeOperationContext(); - auto replCoord = ReplicationCoordinator::get(getServiceContext()); - - // Advance term - _term++; - - ASSERT_OK(replCoord->setFollowerMode(MemberState::RS_PRIMARY)); - ASSERT_OK(replCoord->updateTerm(opCtx.get(), _term)); - replCoord->setMyLastAppliedOpTimeAndWallTime( - OpTimeAndWallTime(OpTime(Timestamp(1, 1), _term), Date_t())); - - _registry->onStepUpComplete(opCtx.get(), _term); - } - -protected: - TenantMigrationRecipientServiceShardMergeTest() - : ServiceContextMongoDTest(Options{}.useMockClock(true)) {} - - PrimaryOnlyServiceRegistry* _registry; - PrimaryOnlyService* _service; - long long _term = 0; - - bool _collCreated = false; - size_t _numSecondaryIndexesCreated{0}; - size_t _numDocsInserted{0}; - - const TenantId _tenantA{OID::gen()}; - const TenantId _tenantB{OID::gen()}; - const std::vector _tenants{_tenantA, _tenantB}; - - const TenantMigrationPEMPayload kRecipientPEMPayload = [&] { - std::ifstream infile("jstests/libs/client.pem"); - std::string buf((std::istreambuf_iterator(infile)), std::istreambuf_iterator()); - - auto swCertificateBlob = - ssl_util::findPEMBlob(buf, "CERTIFICATE"_sd, 0 /* position */, false /* allowEmpty */); - ASSERT_TRUE(swCertificateBlob.isOK()); - - auto swPrivateKeyBlob = - ssl_util::findPEMBlob(buf, "PRIVATE KEY"_sd, 0 /* position */, false /* allowEmpty */); - ASSERT_TRUE(swPrivateKeyBlob.isOK()); - - return TenantMigrationPEMPayload{swCertificateBlob.getValue().toString(), - swPrivateKeyBlob.getValue().toString()}; - }(); - - void checkStateDocPersisted(OperationContext* opCtx, - const TenantMigrationRecipientService::Instance* instance) { - auto memoryStateDoc = getStateDoc(instance); - auto persistedStateDocWithStatus = - tenantMigrationRecipientEntryHelpers::getStateDoc(opCtx, memoryStateDoc.getId()); - ASSERT_OK(persistedStateDocWithStatus.getStatus()); - ASSERT_BSONOBJ_EQ(memoryStateDoc.toBSON(), persistedStateDocWithStatus.getValue().toBSON()); - } - void insertToNodes(MockReplicaSet* replSet, - const NamespaceString& nss, - BSONObj obj, - const std::vector& hosts) { - for (const auto& host : hosts) { - replSet->getNode(host.toString())->insert(nss, obj); - } - } - - void clearCollection(MockReplicaSet* replSet, - const NamespaceString& nss, - const std::vector& hosts) { - for (const auto& host : hosts) { - replSet->getNode(host.toString())->remove(nss, BSONObj{} /*filter*/); - } - } - - void insertTopOfOplog(MockReplicaSet* replSet, - const OpTime& topOfOplogOpTime, - const std::vector hosts = {}) { - const auto targetHosts = hosts.empty() ? replSet->getHosts() : hosts; - // The MockRemoteDBService does not actually implement the database, so to make our - // find work correctly we must make sure there's only one document to find. - clearCollection(replSet, NamespaceString::kRsOplogNamespace, targetHosts); - insertToNodes(replSet, - NamespaceString::kRsOplogNamespace, - makeOplogEntry(topOfOplogOpTime, - OpTypeEnum::kNoop, - {} /* namespace */, - boost::none /* uuid */, - BSONObj() /* o */, - boost::none /* o2 */) - .getEntry() - .toBSON(), - targetHosts); - } - - // Accessors to class private members - DBClientConnection* getClient(const TenantMigrationRecipientService::Instance* instance) const { - return instance->_client.get(); - } - - const TenantMigrationRecipientDocument& getStateDoc( - const TenantMigrationRecipientService::Instance* instance) const { - return instance->_stateDoc; - } - - sdam::MockTopologyManager* getTopologyManager() { - return _rsmMonitor.getTopologyManager(); - } - - ClockSource* clock() { - return &_clkSource; - } - - executor::NetworkInterfaceMock* getNet() { - return _net; - } - - executor::NetworkInterfaceMock* _net = nullptr; - std::shared_ptr _threadpoolTaskExecutor; - - void setInstanceBackupCursorFetcherExecutor( - std::shared_ptr instance) { - instance->setBackupCursorFetcherExecutor_forTest(_threadpoolTaskExecutor); - } - -private: - virtual void addOpObserver(OpObserverRegistry* opObserverRegistry){}; - - ClockSourceMock _clkSource; - - unittest::MinimumLoggedSeverityGuard _replicationSeverityGuard{ - logv2::LogComponent::kReplication, logv2::LogSeverity::Debug(1)}; - unittest::MinimumLoggedSeverityGuard _tenantMigrationSeverityGuard{ - logv2::LogComponent::kTenantMigration, logv2::LogSeverity::Debug(1)}; - - StreamableReplicaSetMonitorForTesting _rsmMonitor; - RAIIServerParameterControllerForTest _findHostTimeout{"defaultFindReplicaSetHostTimeoutMS", 10}; -}; - -#ifdef MONGO_CONFIG_SSL - -void waitForReadyRequest(executor::NetworkInterfaceMock* net) { - while (!net->hasReadyRequests()) { - net->advanceTime(net->now() + Milliseconds{1}); - } -} - -BSONObj createEmptyCursorResponse(const NamespaceString& nss, CursorId backupCursorId) { - return BSON( - "cursor" << BSON("nextBatch" << BSONArray() << "id" << backupCursorId << "ns" << nss.ns()) - << "ok" << 1.0); -} - -BSONObj createBackupCursorResponse(const Timestamp& checkpointTimestamp, - const NamespaceString& nss, - CursorId backupCursorId) { - const UUID backupId = - UUID(uassertStatusOK(UUID::parse(("2b068e03-5961-4d8e-b47a-d1c8cbd4b835")))); - StringData remoteDbPath = "/data/db/job0/mongorunner/test-1"; - BSONObjBuilder cursor; - BSONArrayBuilder batch(cursor.subarrayStart("firstBatch")); - auto metaData = BSON("backupId" << backupId << "checkpointTimestamp" << checkpointTimestamp - << "dbpath" << remoteDbPath); - batch.append(BSON("metadata" << metaData)); - - batch.done(); - cursor.append("id", backupCursorId); - cursor.append("ns", nss.ns()); - BSONObjBuilder backupCursorReply; - backupCursorReply.append("cursor", cursor.obj()); - backupCursorReply.append("ok", 1.0); - return backupCursorReply.obj(); -} - -void sendReponseToExpectedRequest(const BSONObj& backupCursorResponse, - const std::string& expectedRequestFieldName, - executor::NetworkInterfaceMock* net) { - auto noi = net->getNextReadyRequest(); - auto request = noi->getRequest(); - ASSERT_EQUALS(expectedRequestFieldName, request.cmdObj.firstElementFieldNameStringData()); - net->scheduleSuccessfulResponse( - noi, executor::RemoteCommandResponse(backupCursorResponse, Milliseconds())); - net->runReadyNetworkOperations(); -} - -BSONObj createServerAggregateReply() { - return CursorResponse(NamespaceString::makeCollectionlessAggregateNSS(DatabaseName::kAdmin), - 0 /* cursorId */, - {BSON("byteOffset" << 0 << "endOfFile" << true << "data" - << BSONBinData(0, 0, BinDataGeneral))}) - .toBSONAsInitialResponse(); -} - -/** - * This class adds the TenantMigrationRecipientOpObserver to the main test fixture class. It cannot - * be used in tests after insertion of the state document because the OpObserver uses - * TenantFileImporter service when the state document is updated. This importer is not mocked - * currently and does not work with unit tests as it creates its own thread. - */ -class TenantMigrationRecipientServiceShardMergeTestInsert - : public TenantMigrationRecipientServiceShardMergeTest { -private: - void addOpObserver(OpObserverRegistry* opObserverRegistry) { - opObserverRegistry->addObserver(std::make_unique()); - } -}; - -TEST_F(TenantMigrationRecipientServiceShardMergeTestInsert, - TestBlockersAreInsertedWhenInsertingStateDocument) { - stopFailPointEnableBlock fp("fpBeforeFetchingDonorClusterTimeKeys"); - const UUID migrationUUID = UUID::gen(); - - MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /* dollarPrefixHosts */); - getTopologyManager()->setTopologyDescription(replSet.getTopologyDescription(clock())); - insertTopOfOplog(&replSet, OpTime(Timestamp(5, 1), 1)); - - // Mock the aggregate response from the donor. - MockRemoteDBServer* const _donorServer = - mongo::MockConnRegistry::get()->getMockRemoteDBServer(replSet.getPrimary()); - _donorServer->setCommandReply("aggregate", createServerAggregateReply()); - - TenantMigrationRecipientDocument initialStateDocument( - migrationUUID, - replSet.getConnectionString(), - "", - kDefaultStartMigrationTimestamp, - ReadPreferenceSetting(ReadPreference::PrimaryOnly)); - initialStateDocument.setProtocol(MigrationProtocolEnum::kShardMerge); - initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); - initialStateDocument.setTenantIds(_tenants); - - auto opCtx = makeOperationContext(); - std::shared_ptr instance; - { - auto fp = globalFailPointRegistry().find( - "fpAfterPersistingTenantMigrationRecipientInstanceStateDoc"); - auto initialTimesEntered = fp->setMode(FailPoint::alwaysOn, - 0, - BSON("action" - << "hang")); - instance = TenantMigrationRecipientService::Instance::getOrCreate( - opCtx.get(), _service, initialStateDocument.toBSON()); - ASSERT(instance.get()); - - fp->waitForTimesEntered(initialTimesEntered + 1); - - // Test that access blocker exists. - for (const auto& tenantId : _tenants) { - auto blocker = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) - .getTenantMigrationAccessBlockerForTenantId( - tenantId, TenantMigrationAccessBlocker::BlockerType::kRecipient); - ASSERT(!!blocker); - } - fp->setMode(FailPoint::off); - } - - ASSERT_EQ(stopFailPointErrorCode, instance->getDataSyncCompletionFuture().getNoThrow().code()); - ASSERT_OK(instance->getForgetMigrationDurableFuture().getNoThrow()); -} - -TEST_F(TenantMigrationRecipientServiceShardMergeTest, CannotCreateServiceWithoutTenants) { - const UUID migrationUUID = UUID::gen(); - const NamespaceString aggregateNs = - NamespaceString::createNamespaceString_forTest("admin.$cmd.aggregate"); - - MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /* dollarPrefixHosts */); - - TenantMigrationRecipientDocument initialStateDocument( - migrationUUID, - replSet.getConnectionString(), - "", - kDefaultStartMigrationTimestamp, - ReadPreferenceSetting(ReadPreference::PrimaryOnly)); - initialStateDocument.setProtocol(MigrationProtocolEnum::kShardMerge); - initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); - - auto opCtx = makeOperationContext(); - - ASSERT_THROWS_CODE(TenantMigrationRecipientService::Instance::getOrCreate( - opCtx.get(), _service, initialStateDocument.toBSON()), - DBException, - ErrorCodes::InvalidOptions); -} - -TEST_F(TenantMigrationRecipientServiceShardMergeTest, OpenBackupCursorSuccessfully) { - stopFailPointEnableBlock fp("fpBeforeAdvancingStableTimestamp"); - const UUID migrationUUID = UUID::gen(); - const CursorId backupCursorId = 12345; - const NamespaceString aggregateNs = - NamespaceString::createNamespaceString_forTest("admin.$cmd.aggregate"); - - auto taskFp = globalFailPointRegistry().find("hangBeforeTaskCompletion"); - auto initialTimesEntered = taskFp->setMode(FailPoint::alwaysOn); - - MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /* dollarPrefixHosts */); - getTopologyManager()->setTopologyDescription(replSet.getTopologyDescription(clock())); - insertTopOfOplog(&replSet, OpTime(Timestamp(5, 1), 1)); - - // Mock the aggregate response from the donor. - MockRemoteDBServer* const _donorServer = - mongo::MockConnRegistry::get()->getMockRemoteDBServer(replSet.getPrimary()); - _donorServer->setCommandReply("aggregate", createServerAggregateReply()); - - TenantMigrationRecipientDocument initialStateDocument( - migrationUUID, - replSet.getConnectionString(), - "", - kDefaultStartMigrationTimestamp, - ReadPreferenceSetting(ReadPreference::PrimaryOnly)); - initialStateDocument.setProtocol(MigrationProtocolEnum::kShardMerge); - initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); - initialStateDocument.setTenantIds(_tenants); - - auto opCtx = makeOperationContext(); - std::shared_ptr instance; - { - auto fp = globalFailPointRegistry().find("pauseBeforeRunTenantMigrationRecipientInstance"); - auto initialTimesEntered = fp->setMode(FailPoint::alwaysOn); - instance = TenantMigrationRecipientService::Instance::getOrCreate( - opCtx.get(), _service, initialStateDocument.toBSON()); - ASSERT(instance.get()); - fp->waitForTimesEntered(initialTimesEntered + 1); - setInstanceBackupCursorFetcherExecutor(instance); - instance->setCreateOplogFetcherFn_forTest(std::make_unique()); - fp->setMode(FailPoint::off); - } - - { - auto net = getNet(); - executor::NetworkInterfaceMock::InNetworkGuard guard(net); - waitForReadyRequest(net); - // Mocking the aggregate command network response of the backup cursor in order to have - // data to parse. - sendReponseToExpectedRequest(createBackupCursorResponse(kDefaultStartMigrationTimestamp, - aggregateNs, - backupCursorId), - "aggregate", - net); - sendReponseToExpectedRequest( - createEmptyCursorResponse(aggregateNs, backupCursorId), "getMore", net); - sendReponseToExpectedRequest( - createEmptyCursorResponse(aggregateNs, backupCursorId), "getMore", net); - } - - taskFp->waitForTimesEntered(initialTimesEntered + 1); - - checkStateDocPersisted(opCtx.get(), instance.get()); - - taskFp->setMode(FailPoint::off); - - ASSERT_EQ(stopFailPointErrorCode, instance->getDataSyncCompletionFuture().getNoThrow().code()); - ASSERT_OK(instance->getForgetMigrationDurableFuture().getNoThrow()); -} - -TEST_F(TenantMigrationRecipientServiceShardMergeTest, OpenBackupCursorAndRetriesDueToTs) { - stopFailPointEnableBlock fp("fpBeforeAdvancingStableTimestamp"); - const UUID migrationUUID = UUID::gen(); - const CursorId backupCursorId = 12345; - const NamespaceString aggregateNs = - NamespaceString::createNamespaceString_forTest("admin.$cmd.aggregate"); - - auto taskFp = globalFailPointRegistry().find("hangBeforeTaskCompletion"); - auto initialTimesEntered = taskFp->setMode(FailPoint::alwaysOn); - - MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /* dollarPrefixHosts */); - getTopologyManager()->setTopologyDescription(replSet.getTopologyDescription(clock())); - insertTopOfOplog(&replSet, OpTime(Timestamp(5, 1), 1)); - - // Mock the aggregate response from the donor. - MockRemoteDBServer* const _donorServer = - mongo::MockConnRegistry::get()->getMockRemoteDBServer(replSet.getPrimary()); - _donorServer->setCommandReply("aggregate", createServerAggregateReply()); - - TenantMigrationRecipientDocument initialStateDocument( - migrationUUID, - replSet.getConnectionString(), - "", - kDefaultStartMigrationTimestamp, - ReadPreferenceSetting(ReadPreference::PrimaryOnly)); - initialStateDocument.setProtocol(MigrationProtocolEnum::kShardMerge); - initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); - initialStateDocument.setTenantIds(_tenants); - - auto opCtx = makeOperationContext(); - std::shared_ptr instance; - { - auto fp = globalFailPointRegistry().find("pauseBeforeRunTenantMigrationRecipientInstance"); - auto initialTimesEntered = fp->setMode(FailPoint::alwaysOn); - instance = TenantMigrationRecipientService::Instance::getOrCreate( - opCtx.get(), _service, initialStateDocument.toBSON()); - ASSERT(instance.get()); - fp->waitForTimesEntered(initialTimesEntered + 1); - setInstanceBackupCursorFetcherExecutor(instance); - instance->setCreateOplogFetcherFn_forTest(std::make_unique()); - fp->setMode(FailPoint::off); - } - - { - auto net = getNet(); - executor::NetworkInterfaceMock::InNetworkGuard guard(net); - waitForReadyRequest(net); - - // Mocking the aggregate command network response of the backup cursor in order to have data - // to parse. In this case we pass a timestamp that is inferior to the - // startMigrationTimestamp which will cause a retry. We then provide a correct timestamp in - // the next response and succeed. - sendReponseToExpectedRequest( - createBackupCursorResponse(Timestamp(0, 0), aggregateNs, backupCursorId), - "aggregate", - net); - sendReponseToExpectedRequest(createBackupCursorResponse(kDefaultStartMigrationTimestamp, - aggregateNs, - backupCursorId), - "killCursors", - net); - sendReponseToExpectedRequest( - createEmptyCursorResponse(aggregateNs, backupCursorId), "killCursors", net); - sendReponseToExpectedRequest(createBackupCursorResponse(kDefaultStartMigrationTimestamp, - aggregateNs, - backupCursorId), - "aggregate", - net); - sendReponseToExpectedRequest( - createEmptyCursorResponse(aggregateNs, backupCursorId), "getMore", net); - sendReponseToExpectedRequest( - createEmptyCursorResponse(aggregateNs, backupCursorId), "getMore", net); - } - - taskFp->waitForTimesEntered(initialTimesEntered + 1); - - checkStateDocPersisted(opCtx.get(), instance.get()); - - taskFp->setMode(FailPoint::off); - - ASSERT_EQ(stopFailPointErrorCode, instance->getDataSyncCompletionFuture().getNoThrow().code()); - ASSERT_OK(instance->getForgetMigrationDurableFuture().getNoThrow()); -} - -TEST_F(TenantMigrationRecipientServiceShardMergeTestInsert, TestInsertAbortedDocument) { - const UUID migrationUUID = UUID::gen(); - - MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /* dollarPrefixHosts */); - - auto fp = globalFailPointRegistry().find("pauseTenantMigrationRecipientBeforeDeletingStateDoc"); - auto initialTimesEntered = fp->setMode(FailPoint::alwaysOn); - - TenantMigrationRecipientDocument initialStateDocument( - migrationUUID, - replSet.getConnectionString(), - "", - kDefaultStartMigrationTimestamp, - ReadPreferenceSetting(ReadPreference::PrimaryOnly)); - initialStateDocument.setProtocol(MigrationProtocolEnum::kShardMerge); - initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); - initialStateDocument.setTenantIds(_tenants); - initialStateDocument.setState(TenantMigrationRecipientStateEnum::kAborted); - - auto opCtx = makeOperationContext(); - std::shared_ptr instance; - { - instance = TenantMigrationRecipientService::Instance::getOrCreate( - opCtx.get(), _service, initialStateDocument.toBSON()); - ASSERT(instance.get()); - } - - ASSERT_EQ(ErrorCodes::TenantMigrationForgotten, - instance->getDataSyncCompletionFuture().getNoThrow().code()); - - fp->waitForTimesEntered(initialTimesEntered + 1); - checkStateDocPersisted(opCtx.get(), instance.get()); - auto stateDoc = getStateDoc(instance.get()); - ASSERT_EQ(stateDoc.getState(), TenantMigrationRecipientStateEnum::kAborted); - - fp->setMode(FailPoint::off); - - ASSERT_OK(instance->getForgetMigrationDurableFuture().getNoThrow().code()); -} - -TEST_F(TenantMigrationRecipientServiceShardMergeTest, TestForgetMigrationAborted) { - const UUID migrationUUID = UUID::gen(); - - auto deletionFp = - globalFailPointRegistry().find("pauseTenantMigrationRecipientBeforeDeletingStateDoc"); - auto deletionFpTimesEntered = deletionFp->setMode(FailPoint::alwaysOn); - - auto fp = - globalFailPointRegistry().find("fpAfterPersistingTenantMigrationRecipientInstanceStateDoc"); - auto initialTimesEntered = fp->setMode(FailPoint::alwaysOn, - 0, - BSON("action" - << "hang")); - - MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /* dollarPrefixHosts */); - getTopologyManager()->setTopologyDescription(replSet.getTopologyDescription(clock())); - insertTopOfOplog(&replSet, OpTime(Timestamp(5, 1), 1)); - - - TenantMigrationRecipientDocument initialStateDocument( - migrationUUID, - replSet.getConnectionString(), - "", - kDefaultStartMigrationTimestamp, - ReadPreferenceSetting(ReadPreference::PrimaryOnly)); - initialStateDocument.setProtocol(MigrationProtocolEnum::kShardMerge); - initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); - initialStateDocument.setTenantIds(_tenants); - - auto opCtx = makeOperationContext(); - std::shared_ptr instance; - { - instance = TenantMigrationRecipientService::Instance::getOrCreate( - opCtx.get(), _service, initialStateDocument.toBSON()); - ASSERT(instance.get()); - fp->waitForTimesEntered(initialTimesEntered + 1); - - instance->onReceiveRecipientForgetMigration(opCtx.get(), - TenantMigrationRecipientStateEnum::kAborted); - - fp->setMode(FailPoint::off); - } - - ASSERT_EQ(ErrorCodes::TenantMigrationForgotten, - instance->getDataSyncCompletionFuture().getNoThrow().code()); - - deletionFp->waitForTimesEntered(deletionFpTimesEntered + 1); - checkStateDocPersisted(opCtx.get(), instance.get()); - auto stateDoc = getStateDoc(instance.get()); - ASSERT_EQ(stateDoc.getState(), TenantMigrationRecipientStateEnum::kAborted); - - deletionFp->setMode(FailPoint::off); - - ASSERT_OK(instance->getForgetMigrationDurableFuture().getNoThrow().code()); -} - -#endif -} // namespace repl -} // namespace mongo diff --git a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp index 36c74cdacbe6b..28492f2e93f21 100644 --- a/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp +++ b/src/mongo/db/repl/tenant_migration_recipient_service_test.cpp @@ -27,52 +27,91 @@ * it in the license file. */ -#include +#include +#include +#include +#include +#include +#include // IWYU pragma: keep +#include #include - +#include + +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/client/connection_string.h" #include "mongo/client/connpool.h" -#include "mongo/client/replica_set_monitor.h" #include "mongo/client/replica_set_monitor_protocol_test_util.h" +#include "mongo/client/sdam/mock_topology_manager.h" #include "mongo/client/streamable_replica_set_monitor_for_testing.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/client.h" #include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/feature_compatibility_version_document_gen.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_impl.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_buffer.h" #include "mongo/db/repl/oplog_buffer_collection.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/oplog_fetcher_mock.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/repl/primary_only_service_op_observer.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_recipient_access_blocker.h" #include "mongo/db/repl/tenant_migration_recipient_entry_helpers.h" #include "mongo/db/repl/tenant_migration_recipient_service.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" +#include "mongo/db/repl/tenant_migration_util.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/server_options.h" +#include "mongo/db/serverless/serverless_types_gen.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" #include "mongo/dbtests/mock/mock_conn_registry.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" #include "mongo/dbtests/mock/mock_replica_set.h" -#include "mongo/executor/network_interface.h" -#include "mongo/executor/network_interface_mock.h" -#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" -#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/future.h" #include "mongo/util/net/ssl_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -193,7 +232,8 @@ class TenantMigrationRecipientServiceTest : public ServiceContextMongoDTest { << "done")); { auto opCtx = cc().makeOperationContext(); - auto replCoord = std::make_unique(serviceContext); + auto replCoord = std::make_unique( + serviceContext, createServerlessReplSettings()); ReplicationCoordinator::set(serviceContext, std::move(replCoord)); repl::createOplog(opCtx.get()); @@ -3775,18 +3815,14 @@ TEST_F(TenantMigrationRecipientServiceTest, RecipientReceivesNonRetriableClonerE checkStateDocPersisted(opCtx.get(), instance.get()); } -TEST_F(TenantMigrationRecipientServiceTest, IncrementNumRestartsDueToRecipientFailureCounter) { - FailPointEnableBlock createIndexesFailpointBlock("skipCreatingIndexDuringRebuildService"); - stopFailPointEnableBlock fp("fpAfterPersistingTenantMigrationRecipientInstanceStateDoc"); +TEST_F(TenantMigrationRecipientServiceTest, MigrationFailsOnRecipientFailover) { // Hang before deleting the state doc so that we can check the state doc was persisted. FailPointEnableBlock fpDeletingStateDoc("pauseTenantMigrationRecipientBeforeDeletingStateDoc"); const UUID migrationUUID = UUID::gen(); - const OpTime topOfOplogOpTime(Timestamp(1, 1), 1); MockReplicaSet replSet("donorSet", 3, true /* hasPrimary */, true /* dollarPrefixHosts */); getTopologyManager()->setTopologyDescription(replSet.getTopologyDescription(clock())); - insertTopOfOplog(&replSet, topOfOplogOpTime); TenantMigrationRecipientDocument initialStateDocument( migrationUUID, @@ -3796,44 +3832,41 @@ TEST_F(TenantMigrationRecipientServiceTest, IncrementNumRestartsDueToRecipientFa ReadPreferenceSetting(ReadPreference::PrimaryOnly)); initialStateDocument.setProtocol(MigrationProtocolEnum::kMultitenantMigrations); initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); - // Starting a migration where the state is not 'kUninitialized' indicates that we are restarting - // from failover. + // Starting a migration where the state is not 'kUninitialized' indicates that we are attempting + // to restart from recipient failover. initialStateDocument.setState(TenantMigrationRecipientStateEnum::kStarted); - ASSERT_EQ(0, initialStateDocument.getNumRestartsDueToRecipientFailure()); auto opCtx = makeOperationContext(); - CollectionOptions collectionOptions; - collectionOptions.uuid = UUID::gen(); + + auto pauseFp = globalFailPointRegistry().find("pauseAfterRunTenantMigrationRecipientInstance"); + pauseFp->setMode(FailPoint::alwaysOn); + auto pauseFpTimesEntered = pauseFp->setMode(FailPoint::alwaysOn, + 0, + BSON("action" + << "hang")); + + // Create and start the instance, which ensures that the rebuild has completed and the tenant + // recipient collection and indexes have been created. + auto instance = TenantMigrationRecipientService::Instance::getOrCreate( + opCtx.get(), _service, initialStateDocument.toBSON()); + ASSERT(instance.get()); + + pauseFp->waitForTimesEntered(pauseFpTimesEntered + 1); + auto storage = StorageInterface::get(opCtx->getServiceContext()); - const auto status = storage->createCollection( - opCtx.get(), NamespaceString::kTenantMigrationRecipientsNamespace, collectionOptions); - if (!status.isOK()) { - // It's possible to race with the test fixture setup in creating the tenant recipient - // collection. - ASSERT_EQ(ErrorCodes::NamespaceExists, status.code()); - } ASSERT_OK(storage->insertDocument(opCtx.get(), NamespaceString::kTenantMigrationRecipientsNamespace, {initialStateDocument.toBSON()}, 0)); - // Create and start the instance. - auto instance = TenantMigrationRecipientService::Instance::getOrCreate( - opCtx.get(), _service, initialStateDocument.toBSON()); - ASSERT(instance.get()); - - ASSERT_EQ(stopFailPointErrorCode, instance->getDataSyncCompletionFuture().getNoThrow().code()); - ASSERT_OK(instance->getForgetMigrationDurableFuture().getNoThrow()); + pauseFp->setMode(FailPoint::off); - const auto stateDoc = getStateDoc(instance.get()); - ASSERT_EQ(stateDoc.getNumRestartsDueToDonorConnectionFailure(), 0); - ASSERT_EQ(stateDoc.getNumRestartsDueToRecipientFailure(), 1); - checkStateDocPersisted(opCtx.get(), instance.get()); + ASSERT_EQ(ErrorCodes::TenantMigrationAborted, + instance->getDataSyncCompletionFuture().getNoThrow().code()); } TEST_F(TenantMigrationRecipientServiceTest, RecipientFailureCounterNotIncrementedWhenMigrationForgotten) { - FailPointEnableBlock createIndexesFailpointBlock("skipCreatingIndexDuringRebuildService"); // Hang before deleting the state doc so that we can check the state doc was persisted. FailPointEnableBlock fpDeletingStateDoc("pauseTenantMigrationRecipientBeforeDeletingStateDoc"); @@ -3861,25 +3894,28 @@ TEST_F(TenantMigrationRecipientServiceTest, initialStateDocument.setExpireAt(opCtx->getServiceContext()->getFastClockSource()->now()); ASSERT_EQ(0, initialStateDocument.getNumRestartsDueToRecipientFailure()); - CollectionOptions collectionOptions; - collectionOptions.uuid = UUID::gen(); + auto pauseFp = globalFailPointRegistry().find("pauseAfterRunTenantMigrationRecipientInstance"); + pauseFp->setMode(FailPoint::alwaysOn); + auto pauseFpTimesEntered = pauseFp->setMode(FailPoint::alwaysOn, + 0, + BSON("action" + << "hang")); + + // Create and start the instance, which ensures that the rebuild has completed and the tenant + // recipient collection and indexes have been created. + auto instance = TenantMigrationRecipientService::Instance::getOrCreate( + opCtx.get(), _service, initialStateDocument.toBSON()); + ASSERT(instance.get()); + + pauseFp->waitForTimesEntered(pauseFpTimesEntered + 1); + auto storage = StorageInterface::get(opCtx->getServiceContext()); - const auto status = storage->createCollection( - opCtx.get(), NamespaceString::kTenantMigrationRecipientsNamespace, collectionOptions); - if (!status.isOK()) { - // It's possible to race with the test fixture setup in creating the tenant recipient - // collection. - ASSERT_EQ(ErrorCodes::NamespaceExists, status.code()); - } ASSERT_OK(storage->insertDocument(opCtx.get(), NamespaceString::kTenantMigrationRecipientsNamespace, {initialStateDocument.toBSON()}, 0)); - // Create and start the instance. - auto instance = TenantMigrationRecipientService::Instance::getOrCreate( - opCtx.get(), _service, initialStateDocument.toBSON()); - ASSERT(instance.get()); + pauseFp->setMode(FailPoint::off); ASSERT_EQ(ErrorCodes::TenantMigrationForgotten, instance->getDataSyncCompletionFuture().getNoThrow().code()); @@ -3937,7 +3973,6 @@ TEST_F(TenantMigrationRecipientServiceTest, TEST_F(TenantMigrationRecipientServiceTest, RecipientDeletesExistingStateDocMarkedForGarbageCollection) { - FailPointEnableBlock createIndexesFailpointBlock("skipCreatingIndexDuringRebuildService"); stopFailPointEnableBlock fp("fpAfterPersistingTenantMigrationRecipientInstanceStateDoc"); auto beforeDeleteFp = globalFailPointRegistry().find( "pauseTenantMigrationRecipientInstanceBeforeDeletingOldStateDoc"); @@ -3946,10 +3981,36 @@ TEST_F(TenantMigrationRecipientServiceTest, auto initialTimesEntered = beforeDeleteFp->setMode(FailPoint::alwaysOn); auto opCtx = makeOperationContext(); - // Insert a state doc to simulate running a migration with an existing state doc NOT marked for - // garbage collection. const auto kTenantId = TenantId(OID::gen()); const std::string kConnectionString = "donor-rs/localhost:12345"; + const UUID migrationUUID = UUID::gen(); + TenantMigrationRecipientDocument initialStateDocument( + migrationUUID, + kConnectionString, + kTenantId.toString(), + kDefaultStartMigrationTimestamp, + ReadPreferenceSetting(ReadPreference::PrimaryOnly, TagSet::primaryOnly())); + initialStateDocument.setProtocol(MigrationProtocolEnum::kMultitenantMigrations); + initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); + + auto pauseFp = globalFailPointRegistry().find("pauseAfterRunTenantMigrationRecipientInstance"); + pauseFp->setMode(FailPoint::alwaysOn); + auto pauseFpTimesEntered = pauseFp->setMode(FailPoint::alwaysOn, + 0, + BSON("action" + << "hang")); + + // Create and start the instance, which ensures that the rebuild has completed and the tenant + // recipient collection and indexes have been created. + auto instance = TenantMigrationRecipientService::Instance::getOrCreate( + opCtx.get(), _service, initialStateDocument.toBSON()); + ASSERT(instance.get()); + ASSERT_EQ(migrationUUID, instance->getMigrationUUID()); + + pauseFp->waitForTimesEntered(pauseFpTimesEntered + 1); + + // Insert a state doc to simulate running a migration with an existing state doc NOT marked for + // garbage collection. const UUID existingMigrationId = UUID::gen(); TenantMigrationRecipientDocument previousStateDoc( existingMigrationId, @@ -3977,21 +4038,7 @@ TEST_F(TenantMigrationRecipientServiceTest, TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) .add(kTenantId, recipientMtab); - const UUID migrationUUID = UUID::gen(); - TenantMigrationRecipientDocument initialStateDocument( - migrationUUID, - kConnectionString, - kTenantId.toString(), - kDefaultStartMigrationTimestamp, - ReadPreferenceSetting(ReadPreference::PrimaryOnly, TagSet::primaryOnly())); - initialStateDocument.setProtocol(MigrationProtocolEnum::kMultitenantMigrations); - initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); - - // Create and start the instance. - auto instance = TenantMigrationRecipientService::Instance::getOrCreate( - opCtx.get(), _service, initialStateDocument.toBSON()); - ASSERT(instance.get()); - ASSERT_EQ(migrationUUID, instance->getMigrationUUID()); + pauseFp->setMode(FailPoint::off); // We block and wait right before the service deletes the previous state document. beforeDeleteFp->waitForTimesEntered(initialTimesEntered + 1); @@ -4012,14 +4059,41 @@ TEST_F(TenantMigrationRecipientServiceTest, } TEST_F(TenantMigrationRecipientServiceTest, RecipientFailsDueToOperationConflict) { - FailPointEnableBlock createIndexesFailpointBlock("skipCreatingIndexDuringRebuildService"); stopFailPointEnableBlock fp("fpAfterPersistingTenantMigrationRecipientInstanceStateDoc"); FailPointEnableBlock skipRebuildFp("PrimaryOnlyServiceSkipRebuildingInstances"); - // Insert a state doc to simulate running a migration with an existing state doc NOT marked for - // garbage collection. + auto opCtx = makeOperationContext(); + const auto kTenantId = TenantId(OID::gen()); const std::string kConnectionString = "donor-rs/localhost:12345"; + const UUID migrationUUID = UUID::gen(); + TenantMigrationRecipientDocument initialStateDocument( + migrationUUID, + kConnectionString, + kTenantId.toString(), + kDefaultStartMigrationTimestamp, + ReadPreferenceSetting(ReadPreference::PrimaryOnly, TagSet::primaryOnly())); + initialStateDocument.setProtocol(MigrationProtocolEnum::kMultitenantMigrations); + initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); + + auto pauseFp = globalFailPointRegistry().find("pauseAfterRunTenantMigrationRecipientInstance"); + pauseFp->setMode(FailPoint::alwaysOn); + auto pauseFpTimesEntered = pauseFp->setMode(FailPoint::alwaysOn, + 0, + BSON("action" + << "hang")); + + // Create and start the instance, which ensures that the rebuild has completed and the tenant + // recipient collection and indexes have been created. + auto instance = TenantMigrationRecipientService::Instance::getOrCreate( + opCtx.get(), _service, initialStateDocument.toBSON()); + ASSERT(instance.get()); + ASSERT_EQ(migrationUUID, instance->getMigrationUUID()); + + pauseFp->waitForTimesEntered(pauseFpTimesEntered + 1); + + // Insert a state doc to simulate running a migration with an existing state doc NOT marked for + // garbage collection. const UUID existingMigrationId = UUID::gen(); TenantMigrationRecipientDocument previousStateDoc( existingMigrationId, @@ -4034,8 +4108,6 @@ TEST_F(TenantMigrationRecipientServiceTest, RecipientFailsDueToOperationConflict // from failover. previousStateDoc.setState(TenantMigrationRecipientStateEnum::kStarted); - auto opCtx = makeOperationContext(); - // Insert existing state document for the same tenant but different migration id uassertStatusOK( tenantMigrationRecipientEntryHelpers::insertStateDoc(opCtx.get(), previousStateDoc)); @@ -4047,21 +4119,7 @@ TEST_F(TenantMigrationRecipientServiceTest, RecipientFailsDueToOperationConflict TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) .add(kTenantId, recipientMtab); - const UUID migrationUUID = UUID::gen(); - TenantMigrationRecipientDocument initialStateDocument( - migrationUUID, - kConnectionString, - kTenantId.toString(), - kDefaultStartMigrationTimestamp, - ReadPreferenceSetting(ReadPreference::PrimaryOnly, TagSet::primaryOnly())); - initialStateDocument.setProtocol(MigrationProtocolEnum::kMultitenantMigrations); - initialStateDocument.setRecipientCertificateForDonor(kRecipientPEMPayload); - - // Create and start the instance. - auto instance = TenantMigrationRecipientService::Instance::getOrCreate( - opCtx.get(), _service, initialStateDocument.toBSON()); - ASSERT(instance.get()); - ASSERT_EQ(migrationUUID, instance->getMigrationUUID()); + pauseFp->setMode(FailPoint::off); // Since the previous state doc did not have expireAt set we will assert with // ConflictingOperationInProgress. diff --git a/src/mongo/db/repl/tenant_migration_server_status_section.cpp b/src/mongo/db/repl/tenant_migration_server_status_section.cpp index 4db3fa3727bdd..6b0bf87afed03 100644 --- a/src/mongo/db/repl/tenant_migration_server_status_section.cpp +++ b/src/mongo/db/repl/tenant_migration_server_status_section.cpp @@ -27,10 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/tenant_migration_statistics.h" namespace mongo { diff --git a/src/mongo/db/repl/tenant_migration_shard_merge_util.cpp b/src/mongo/db/repl/tenant_migration_shard_merge_util.cpp index 3c9585415c08c..b5ccd74bd66e3 100644 --- a/src/mongo/db/repl/tenant_migration_shard_merge_util.cpp +++ b/src/mongo/db/repl/tenant_migration_shard_merge_util.cpp @@ -30,28 +30,62 @@ #include "mongo/db/repl/tenant_migration_shard_merge_util.h" -#include +#include #include #include +#include +#include #include - +#include +#include +#include + +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/create_collection.h" -#include "mongo/db/catalog/uncommitted_catalog_updates.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/import_options.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/cursor_server_params_gen.h" #include "mongo/db/db_raii.h" -#include "mongo/db/multitenancy.h" #include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/repl/oplog_applier.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/tenant_file_cloner.h" #include "mongo/db/repl/tenant_migration_shared_data.h" +#include "mongo/db/service_context.h" +#include "mongo/db/stats/top.h" +#include "mongo/db/storage/bson_collection_catalog_entry.h" #include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/durable_catalog_entry.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_import.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/idl/cluster_parameter_synchronization_helpers.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/duration.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -119,6 +153,135 @@ std::string _getPathRelativeTo(const std::string& path, const std::string& baseP std::replace(result.begin(), result.end(), '\\', '/'); return result; } + +/** + * Generate a new ident and move the file. + */ +std::string moveWithNewIdent(OperationContext* opCtx, + const std::string& tempWTDirectory, + const mongo::NamespaceString& metadataNS, + const std::string& oldIdent, + const char* kind, + std::vector>& revertMoves) { + auto srcFilePath = constructSourcePath(tempWTDirectory, oldIdent); + + while (true) { + try { + auto newIdent = DurableCatalog::get(opCtx)->generateUniqueIdent(metadataNS, kind); + auto destFilePath = constructDestinationPath(newIdent); + + moveFile(srcFilePath, destFilePath); + // Register revert file move in case of failure to import collection and it's + // indexes. + revertMoves.emplace_back(std::move(srcFilePath), std::move(destFilePath)); + + return newIdent; + } catch (const DBException& ex) { + // Retry move on "destination file already exists" error. This can happen due to + // ident collision between this import and another parallel import via + // importCollection command. + if (ex.code() == 6114401) { + LOGV2(7199801, + "Failed to move file from temp to active WT directory. Retrying " + "the move operation using another new unique ident.", + "error"_attr = redact(ex.toStatus())); + continue; + } + throw; + } + } + MONGO_UNREACHABLE; +} + +/** + * Import the collection and its indexes into the main wiretiger instance. + */ +void importCollectionInMainWTInstance(OperationContext* opCtx, + const CollectionImportMetadata& metadata, + const UUID& migrationId, + const BSONObj& storageMetaObj) { + const auto nss = metadata.ns; + writeConflictRetry(opCtx, "importCollection", nss, [&] { + LOGV2_DEBUG(6114303, 1, "Importing donor collection", "ns"_attr = nss); + AutoGetDb autoDb(opCtx, nss.dbName(), MODE_IX); + auto db = autoDb.ensureDbExists(opCtx); + invariant(db); + Lock::CollectionLock collLock(opCtx, nss, MODE_X); + auto catalog = CollectionCatalog::get(opCtx); + WriteUnitOfWork wunit(opCtx); + AutoStatsTracker statsTracker(opCtx, + nss, + Top::LockType::NotLocked, + AutoStatsTracker::LogMode::kUpdateTopAndCurOp, + catalog->getDatabaseProfileLevel(nss.dbName())); + + // If the collection creation rolls back, ensure that the Top entry created for the + // collection is deleted. + opCtx->recoveryUnit()->onRollback( + [nss, serviceContext = opCtx->getServiceContext()](OperationContext*) { + Top::get(serviceContext).collectionDropped(nss); + }); + + uassert(ErrorCodes::NamespaceExists, + str::stream() << "Collection already exists. NS: " << nss.toStringForErrorMsg(), + !CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss)); + + // Create Collection object. + auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); + auto durableCatalog = storageEngine->getCatalog(); + ImportOptions importOptions(ImportOptions::ImportCollectionUUIDOption::kKeepOld); + importOptions.importTimestampRule = ImportOptions::ImportTimestampRule::kStable; + // Since we are using the ident id generated by this recipient node, ident collisions in + // the future after import is not possible. So, it's ok to skip the ident collision + // check. Otherwise, we would unnecessarily generate new rand after each collection + // import. + importOptions.skipIdentCollisionCheck = true; + + auto importResult = uassertStatusOK(DurableCatalog::get(opCtx)->importCollection( + opCtx, nss, metadata.catalogObject, storageMetaObj, importOptions)); + + const auto catalogEntry = + durableCatalog->getParsedCatalogEntry(opCtx, importResult.catalogId); + const auto md = catalogEntry->metadata; + for (const auto& index : md->indexes) { + uassert(6114301, "Cannot import non-ready indexes", index.ready); + } + + std::shared_ptr ownedCollection = Collection::Factory::get(opCtx)->make( + opCtx, nss, importResult.catalogId, md, std::move(importResult.rs)); + ownedCollection->init(opCtx); + + // Update the number of records and data size on commit. + opCtx->recoveryUnit()->registerChange( + makeCountsChange(ownedCollection->getRecordStore(), metadata)); + + CollectionCatalog::get(opCtx)->onCreateCollection(opCtx, std::move(ownedCollection)); + + auto importedCatalogEntry = + storageEngine->getCatalog()->getCatalogEntry(opCtx, importResult.catalogId); + opCtx->getServiceContext()->getOpObserver()->onImportCollection(opCtx, + migrationId, + nss, + metadata.numRecords, + metadata.dataSize, + importedCatalogEntry, + storageMetaObj, + /*dryRun=*/false); + + wunit.commit(); + + if (metadata.numRecords > 0) { + cluster_parameters::maybeUpdateClusterParametersPostImportCollectionCommit(opCtx, nss); + } + + LOGV2(6114300, + "Imported donor collection", + "ns"_attr = nss, + "numRecordsApprox"_attr = metadata.numRecords, + "dataSizeApprox"_attr = metadata.dataSize); + }); +} + } // namespace void createImportDoneMarkerLocalCollection(OperationContext* opCtx, const UUID& migrationId) { @@ -151,10 +314,26 @@ void dropImportDoneMarkerLocalCollection(OperationContext* opCtx, const UUID& mi } } -void wiredTigerImportFromBackupCursor(OperationContext* opCtx, - const UUID& migrationId, - const std::string& importPath, - std::vector&& metadatas) { +void wiredTigerImport(OperationContext* opCtx, const UUID& migrationId) { + auto tempWTDirectory = fileClonerTempDir(migrationId); + uassert(6113315, + str::stream() << "Missing file cloner's temporary dbpath directory: " + << tempWTDirectory.string(), + boost::filesystem::exists(tempWTDirectory)); + + // TODO SERVER-63204: Evaluate correct place to remove the temporary WT dbpath. + ON_BLOCK_EXIT([&tempWTDirectory, &migrationId] { + LOGV2_INFO(6113324, + "Done importing files, removing the temporary WT dbpath", + "migrationId"_attr = migrationId, + "tempDbPath"_attr = tempWTDirectory.string()); + boost::system::error_code ec; + boost::filesystem::remove_all(tempWTDirectory, ec); + }); + + auto metadatas = + wiredTigerRollbackToStableAndGetMetadata(opCtx, tempWTDirectory.string(), migrationId); + // Disable replication because this logic is executed on all nodes during a Shard Merge. repl::UnreplicatedWritesBlock uwb(opCtx); @@ -175,44 +354,17 @@ void wiredTigerImportFromBackupCursor(OperationContext* opCtx, } }); - auto moveWithNewIdent = [&](const std::string& oldIdent, const char* kind) -> std::string { - auto srcFilePath = constructSourcePath(importPath, oldIdent); - - while (true) { - try { - auto newIdent = - DurableCatalog::get(opCtx)->generateUniqueIdent(metadata.ns, kind); - auto destFilePath = constructDestinationPath(newIdent); - - moveFile(srcFilePath, destFilePath); - // Register revert file move in case of failure to import collection and it's - // indexes. - revertMoves.emplace_back(std::move(srcFilePath), std::move(destFilePath)); - - return newIdent; - } catch (const DBException& ex) { - // Retry move on "destination file already exists" error. This can happen due to - // ident collision between this import and another parallel import via - // importCollection command. - if (ex.code() == 6114401) { - LOGV2(7199801, - "Failed to move file from temp to active WT directory. Retrying " - "the move operation using another new unique ident.", - "error"_attr = redact(ex.toStatus())); - continue; - } - throw; - } - } - MONGO_UNREACHABLE; - }; - BSONObjBuilder catalogMetaBuilder; BSONObjBuilder storageMetaBuilder; // Moves the collection file and it's associated index files from temp dir to dbpath. // And, regenerate metadata info with new unique ident id. - auto newCollIdent = moveWithNewIdent(metadata.collection.ident, "collection"); + auto newCollIdent = moveWithNewIdent(opCtx, + tempWTDirectory.string(), + metadata.ns, + metadata.collection.ident, + "collection", + revertMoves); catalogMetaBuilder.append("ident", newCollIdent); // Update the collection ident id. @@ -221,7 +373,8 @@ void wiredTigerImportFromBackupCursor(OperationContext* opCtx, BSONObjBuilder newIndexIdentMap; for (auto&& index : metadata.indexes) { - auto newIndexIdent = moveWithNewIdent(index.ident, "index"); + auto newIndexIdent = moveWithNewIdent( + opCtx, tempWTDirectory.string(), metadata.ns, index.ident, "index", revertMoves); newIndexIdentMap.append(index.indexName, newIndexIdent); // Update the index ident id. index.ident = std::move(newIndexIdent); @@ -232,86 +385,12 @@ void wiredTigerImportFromBackupCursor(OperationContext* opCtx, metadata.catalogObject = metadata.catalogObject.addFields(catalogMetaBuilder.obj()); const auto storageMetaObj = storageMetaBuilder.done(); - // Import the collection and it's indexes. - const auto nss = metadata.ns; - writeConflictRetry(opCtx, "importCollection", nss.ns(), [&] { - LOGV2_DEBUG(6114303, 1, "Importing donor collection", "ns"_attr = nss); - AutoGetDb autoDb(opCtx, nss.dbName(), MODE_IX); - auto db = autoDb.ensureDbExists(opCtx); - invariant(db); - Lock::CollectionLock collLock(opCtx, nss, MODE_X); - auto catalog = CollectionCatalog::get(opCtx); - WriteUnitOfWork wunit(opCtx); - AutoStatsTracker statsTracker(opCtx, - nss, - Top::LockType::NotLocked, - AutoStatsTracker::LogMode::kUpdateTopAndCurOp, - catalog->getDatabaseProfileLevel(nss.dbName())); - - // If the collection creation rolls back, ensure that the Top entry created for the - // collection is deleted. - opCtx->recoveryUnit()->onRollback( - [nss, serviceContext = opCtx->getServiceContext()](OperationContext*) { - Top::get(serviceContext).collectionDropped(nss); - }); - - // Create Collection object. - auto storageEngine = opCtx->getServiceContext()->getStorageEngine(); - auto durableCatalog = storageEngine->getCatalog(); - ImportOptions importOptions(ImportOptions::ImportCollectionUUIDOption::kKeepOld); - importOptions.importTimestampRule = ImportOptions::ImportTimestampRule::kStable; - // Since we are using the ident id generated by this recipient node, ident collisions in - // the future after import is not possible. So, it's ok to skip the ident collision - // check. Otherwise, we would unnecessarily generate new rand after each collection - // import. - importOptions.skipIdentCollisionCheck = true; - - auto importResult = uassertStatusOK(DurableCatalog::get(opCtx)->importCollection( - opCtx, nss, metadata.catalogObject, storageMetaObj, importOptions)); - - const auto md = durableCatalog->getMetaData(opCtx, importResult.catalogId); - for (const auto& index : md->indexes) { - uassert(6114301, "Cannot import non-ready indexes", index.ready); - } - - std::shared_ptr ownedCollection = Collection::Factory::get(opCtx)->make( - opCtx, nss, importResult.catalogId, md, std::move(importResult.rs)); - ownedCollection->init(opCtx); - ownedCollection->setCommitted(false); - - // Update the number of records and data size on commit. - opCtx->recoveryUnit()->registerChange( - makeCountsChange(ownedCollection->getRecordStore(), metadata)); - - CollectionCatalog::get(opCtx)->onCreateCollection(opCtx, std::move(ownedCollection)); - - auto importedCatalogEntry = - storageEngine->getCatalog()->getCatalogEntry(opCtx, importResult.catalogId); - opCtx->getServiceContext()->getOpObserver()->onImportCollection(opCtx, - migrationId, - nss, - metadata.numRecords, - metadata.dataSize, - importedCatalogEntry, - storageMetaObj, - /*dryRun=*/false); - - wunit.commit(); - - if (metadata.numRecords > 0) { - cluster_parameters::maybeUpdateClusterParametersPostImportCollectionCommit(opCtx, - nss); - } - - LOGV2(6114300, - "Imported donor collection", - "ns"_attr = nss, - "numRecordsApprox"_attr = metadata.numRecords, - "dataSizeApprox"_attr = metadata.dataSize); - }); + importCollectionInMainWTInstance(opCtx, metadata, migrationId, storageMetaObj); revertFileMoves.dismiss(); } + + createImportDoneMarkerLocalCollection(opCtx, migrationId); } void cloneFile(OperationContext* opCtx, diff --git a/src/mongo/db/repl/tenant_migration_shard_merge_util.h b/src/mongo/db/repl/tenant_migration_shard_merge_util.h index 0694830c1f73d..d19258108acce 100644 --- a/src/mongo/db/repl/tenant_migration_shard_merge_util.h +++ b/src/mongo/db/repl/tenant_migration_shard_merge_util.h @@ -27,22 +27,34 @@ * it in the license file. */ -#include -#include - #include +#include #include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/client/dbclient_connection.h" #include "mongo/db/cursor_id.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/tenant_migration_shared_data.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/wiredtiger/wiredtiger_import.h" #include "mongo/executor/scoped_task_executor.h" +#include "mongo/executor/task_executor.h" +#include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo::repl::shard_merge_utils { @@ -126,10 +138,7 @@ void cloneFile(OperationContext* opCtx, /** * Import a donor collection after its files have been cloned to a temp dir. */ -void wiredTigerImportFromBackupCursor(OperationContext* opCtx, - const UUID& migrationId, - const std::string& importPath, - std::vector&& metadatas); +void wiredTigerImport(OperationContext* opCtx, const UUID& migrationId); /** * Send a "getMore" to keep a backup cursor from timing out. diff --git a/src/mongo/db/repl/tenant_migration_shared_data.h b/src/mongo/db/repl/tenant_migration_shared_data.h index bc8c9bfba744b..893831c9e73a0 100644 --- a/src/mongo/db/repl/tenant_migration_shared_data.h +++ b/src/mongo/db/repl/tenant_migration_shared_data.h @@ -29,10 +29,16 @@ #pragma once +#include + +#include "mongo/bson/timestamp.h" #include "mongo/db/cursor_id.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_sync_shared_data.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/tenant_migration_state_machine.idl b/src/mongo/db/repl/tenant_migration_state_machine.idl index 1ec1314d49cdc..d7d002d4a1867 100644 --- a/src/mongo/db/repl/tenant_migration_state_machine.idl +++ b/src/mongo/db/repl/tenant_migration_state_machine.idl @@ -258,6 +258,8 @@ structs: A counter that is incremented on each restart due to a donor connection failure. type: long default: 0 + # Not used but kept for parsing backwards compatibility with <= 6.2 which supports + # recipient failover. numRestartsDueToRecipientFailure: description: >- A counter that is incremented on each restart due to a recipient failure. diff --git a/src/mongo/db/repl/tenant_migration_statistics.cpp b/src/mongo/db/repl/tenant_migration_statistics.cpp index 40f4c69725ba3..8f0f016767acb 100644 --- a/src/mongo/db/repl/tenant_migration_statistics.cpp +++ b/src/mongo/db/repl/tenant_migration_statistics.cpp @@ -29,6 +29,10 @@ #include "mongo/db/repl/tenant_migration_statistics.h" +#include + +#include "mongo/util/decorable.h" + namespace mongo { // static diff --git a/src/mongo/db/repl/tenant_migration_statistics.h b/src/mongo/db/repl/tenant_migration_statistics.h index 1043858ef9bfd..694ce4bb565ec 100644 --- a/src/mongo/db/repl/tenant_migration_statistics.h +++ b/src/mongo/db/repl/tenant_migration_statistics.h @@ -29,6 +29,10 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" #include "mongo/util/scopeguard.h" diff --git a/src/mongo/db/repl/tenant_migration_util.cpp b/src/mongo/db/repl/tenant_migration_util.cpp index 9f1109b7b72e5..5633970b1051b 100644 --- a/src/mongo/db/repl/tenant_migration_util.cpp +++ b/src/mongo/db/repl/tenant_migration_util.cpp @@ -29,16 +29,32 @@ #include "mongo/db/repl/tenant_migration_util.h" +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/bson/mutable/algorithm.h" #include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/dbhelpers.h" -#include "mongo/db/logical_time_validator.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/ops/update.h" #include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/pipeline/document_source_add_fields.h" #include "mongo/db/pipeline/document_source_find_and_modify_image_lookup.h" #include "mongo/db/pipeline/document_source_graph_lookup.h" @@ -46,11 +62,22 @@ #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_project.h" #include "mongo/db/pipeline/document_source_replace_root.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/pipeline/document_source_unwind.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_server_parameters_gen.h" -#include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" #include "mongo/util/cancellation.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -105,53 +132,14 @@ const std::set kSensitiveFieldNames{"donorCertificateForRecipient", "recipientCertificateForDonor"}; MONGO_FAIL_POINT_DEFINE(pauseTenantMigrationBeforeMarkingExternalKeysGarbageCollectable); -MONGO_FAIL_POINT_DEFINE(pauseTenantMigrationBeforeStoringExternalClusterTimeKeyDocs); } // namespace const Backoff kExponentialBackoff(Seconds(1), Milliseconds::max()); -ExternalKeysCollectionDocument makeExternalClusterTimeKeyDoc(UUID migrationId, BSONObj keyDoc) { - auto originalKeyDoc = KeysCollectionDocument::parse(IDLParserContext("keyDoc"), keyDoc); - - ExternalKeysCollectionDocument externalKeyDoc( - OID::gen(), originalKeyDoc.getKeyId(), migrationId); - externalKeyDoc.setKeysCollectionDocumentBase(originalKeyDoc.getKeysCollectionDocumentBase()); - - return externalKeyDoc; -} - -repl::OpTime storeExternalClusterTimeKeyDocs(std::vector keyDocs) { - auto opCtxHolder = cc().makeOperationContext(); - auto opCtx = opCtxHolder.get(); - auto nss = NamespaceString::kExternalKeysCollectionNamespace; - - pauseTenantMigrationBeforeStoringExternalClusterTimeKeyDocs.pauseWhileSet(opCtx); - - for (auto& keyDoc : keyDocs) { - AutoGetCollection collection(opCtx, nss, MODE_IX); - - writeConflictRetry(opCtx, "CloneExternalKeyDocs", nss.ns(), [&] { - // Note that each external key's _id is generated by the migration, so this upsert can - // only insert. - const auto filter = - BSON(ExternalKeysCollectionDocument::kIdFieldName << keyDoc.getId()); - const auto updateMod = keyDoc.toBSON(); - - Helpers::upsert(opCtx, - nss, - filter, - updateMod, - /*fromMigrate=*/false); - }); - } - - return repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); -} - void createOplogViewForTenantMigrations(OperationContext* opCtx, Database* db) { writeConflictRetry( - opCtx, "createDonorOplogView", NamespaceString::kTenantMigrationOplogView.ns(), [&] { + opCtx, "createDonorOplogView", NamespaceString::kTenantMigrationOplogView, [&] { { // Create 'system.views' in a separate WUOW if it does not exist. WriteUnitOfWork wuow(opCtx); @@ -602,10 +590,17 @@ ExecutorFuture markExternalKeysAsGarbageCollectable( opCtx); const auto& nss = NamespaceString::kExternalKeysCollectionNamespace; - AutoGetCollection coll(opCtx, nss, MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest( + nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); writeConflictRetry( - opCtx, "TenantMigrationMarkExternalKeysAsGarbageCollectable", nss.ns(), [&] { + opCtx, "TenantMigrationMarkExternalKeysAsGarbageCollectable", nss, [&] { auto request = UpdateRequest(); request.setNamespaceString(nss); request.setQuery( @@ -623,7 +618,7 @@ ExecutorFuture markExternalKeysAsGarbageCollectable( // may fail to match any keys if they were previously marked garbage // collectable and deleted by the TTL monitor. Because of this we can't // assert on the update result's numMatched or numDocsModified. - update(opCtx, coll.getDb(), request); + update(opCtx, collection, request); }); }); }) diff --git a/src/mongo/db/repl/tenant_migration_util.h b/src/mongo/db/repl/tenant_migration_util.h index 624b48ad89891..751a08190332a 100644 --- a/src/mongo/db/repl/tenant_migration_util.h +++ b/src/mongo/db/repl/tenant_migration_util.h @@ -29,21 +29,51 @@ #pragma once +#include +#include +#include +#include #include +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" #include "mongo/client/mongo_uri.h" -#include "mongo/config.h" +#include "mongo/client/read_preference.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/catalog/database.h" +#include "mongo/db/client.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/server_options.h" #include "mongo/db/serverless/serverless_types_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/db/tenant_id.h" #include "mongo/executor/scoped_task_executor.h" +#include "mongo/executor/task_executor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/net/ssl_util.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -250,14 +280,6 @@ inline void protocolTenantIdCompatibilityCheck(const MigrationProtocolEnum proto inline void protocolTenantIdsCompatibilityCheck( const MigrationProtocolEnum protocol, const boost::optional>& tenantIds) { - if (serverGlobalParams.featureCompatibility.isLessThan( - multiversion::FeatureCompatibilityVersion::kVersion_6_3)) { - uassert(ErrorCodes::InvalidOptions, - "'tenantIds' is not supported for FCV below 6.3'", - !tenantIds); - return; - } - switch (protocol) { case MigrationProtocolEnum::kShardMerge: { { @@ -327,19 +349,6 @@ inline void protocolCheckRecipientForgetDecision( } } -/* - * Creates an ExternalKeysCollectionDocument representing an config.external_validation_keys - * document from the given the admin.system.keys document BSONObj. - */ -ExternalKeysCollectionDocument makeExternalClusterTimeKeyDoc(UUID migrationId, BSONObj keyDoc); - -/* - * For each given ExternalKeysCollectionDocument, inserts it if there is not an existing document in - * config.external_validation_keys for it with the same keyId and replicaSetName. Otherwise, - * updates the ttlExpiresAt of the existing document if it is less than the new ttlExpiresAt. - */ -repl::OpTime storeExternalClusterTimeKeyDocs(std::vector keyDocs); - /** * Sets the "ttlExpiresAt" field for the external keys so they can be garbage collected by the ttl * monitor. diff --git a/src/mongo/db/repl/tenant_oplog_applier.cpp b/src/mongo/db/repl/tenant_oplog_applier.cpp index 72c22005f925f..30de29082b323 100644 --- a/src/mongo/db/repl/tenant_oplog_applier.cpp +++ b/src/mongo/db/repl/tenant_oplog_applier.cpp @@ -27,24 +27,41 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - #include "mongo/db/repl/tenant_oplog_applier.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include - -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/document_validation.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/oid.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/repl/apply_ops.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/cloner_utils.h" -#include "mongo/db/repl/insert_group.h" +#include "mongo/db/repl/oplog_applier.h" #include "mongo/db/repl/oplog_applier_utils.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/session_update_tracker.h" @@ -52,25 +69,75 @@ #include "mongo/db/repl/tenant_migration_decoration.h" #include "mongo/db/repl/tenant_migration_recipient_service.h" #include "mongo/db/repl/tenant_oplog_batcher.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/concurrency/thread_pool.h" -#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration - namespace mongo { namespace repl { MONGO_FAIL_POINT_DEFINE(hangInTenantOplogApplication); MONGO_FAIL_POINT_DEFINE(fpBeforeTenantOplogApplyingBatch); +enum OplogEntryType { + kOplogEntryTypeTransaction, + kOplogEntryTypePartialTransaction, + kOplogEntryTypeRetryableWrite, + kOplogEntryTypeRetryableWritePrePostImage, + kOplogEntryTypePreviouslyWrappedRetryableWrite, +}; +OplogEntryType getOplogEntryType(const OplogEntry& entry) { + // Final applyOp for a transaction. + if (entry.getTxnNumber() && !entry.isPartialTransaction() && + (entry.getCommandType() == repl::OplogEntry::CommandType::kCommitTransaction || + entry.getCommandType() == repl::OplogEntry::CommandType::kApplyOps)) { + return OplogEntryType::kOplogEntryTypeTransaction; + } + + // If it has a statement id but isn't a transaction, it's a retryable write. + const auto isRetryableWriteEntry = + !entry.getStatementIds().empty() && !SessionUpdateTracker::isTransactionEntry(entry); + + // There are two types of no-ops we expect here. One is pre/post image, which will have an empty + // o2 field. The other is previously transformed retryable write entries from earlier + // migrations, which we should avoid re-wrapping. + if (isRetryableWriteEntry && entry.getOpType() == repl::OpTypeEnum::kNoop) { + if (entry.getObject2()) { + return OplogEntryType::kOplogEntryTypePreviouslyWrappedRetryableWrite; + } + + return OplogEntryType::kOplogEntryTypeRetryableWritePrePostImage; + } + + if (isRetryableWriteEntry) { + return OplogEntryType::kOplogEntryTypeRetryableWrite; + } + + return OplogEntryType::kOplogEntryTypePartialTransaction; +}; + TenantOplogApplier::TenantOplogApplier(const UUID& migrationUuid, const MigrationProtocolEnum& protocol, + const OpTime& startApplyingAfterOpTime, + const OpTime& cloneFinishedRecipientOpTime, boost::optional tenantId, - OpTime startApplyingAfterOpTime, RandomAccessOplogBuffer* oplogBuffer, std::shared_ptr executor, ThreadPool* writerPool, @@ -79,12 +146,31 @@ TenantOplogApplier::TenantOplogApplier(const UUID& migrationUuid, std::string("TenantOplogApplier_") + migrationUuid.toString()), _migrationUuid(migrationUuid), _protocol(protocol), - _tenantId(tenantId), _startApplyingAfterOpTime(startApplyingAfterOpTime), + _cloneFinishedRecipientOpTime(cloneFinishedRecipientOpTime), + _tenantId(tenantId), _oplogBuffer(oplogBuffer), _executor(std::move(executor)), _writerPool(writerPool), - _resumeBatchingTs(resumeBatchingTs) { + _resumeBatchingTs(resumeBatchingTs), + _options([&] { + switch (protocol) { + case MigrationProtocolEnum::kMultitenantMigrations: + // Since multi-tenant migration uses logical cloning, the oplog entries will be + // applied on a inconsistent copy of donor data. Hence, using + // OplogApplication::Mode::kInitialSync. + return OplogApplication::Mode::kInitialSync; + case MigrationProtocolEnum::kShardMerge: + // Since shard merge uses backup cursor for database cloning and tenant oplog + // catchup phase is not resumable on failovers, the oplog entries will be applied + // on a consistent copy of donor data. Hence, using + // OplogApplication::Mode::kSecondary. + return OplogApplication::Mode::kSecondary; + default: + MONGO_UNREACHABLE; + } + }()) { + invariant(!_cloneFinishedRecipientOpTime.isNull()); if (_protocol != MigrationProtocolEnum::kShardMerge) { invariant(_tenantId); } else { @@ -124,14 +210,6 @@ Timestamp TenantOplogApplier::getResumeBatchingTs() const { return _resumeBatchingTs; } -void TenantOplogApplier::setCloneFinishedRecipientOpTime(OpTime cloneFinishedRecipientOpTime) { - stdx::lock_guard lk(_mutex); - invariant(!_isActive_inlock()); - invariant(!cloneFinishedRecipientOpTime.isNull()); - invariant(_cloneFinishedRecipientOpTime.isNull()); - _cloneFinishedRecipientOpTime = cloneFinishedRecipientOpTime; -} - void TenantOplogApplier::_doStartup_inlock() { _oplogBatcher = std::make_shared( _migrationUuid, _oplogBuffer, _executor, _resumeBatchingTs, _startApplyingAfterOpTime); @@ -429,6 +507,205 @@ bool isResumeTokenNoop(const OplogEntry& entry) { } } // namespace +void TenantOplogApplier::_writeRetryableWriteEntryNoOp( + OperationContext* opCtx, + MutableOplogEntry& noopEntry, + const OplogEntry& entry, + const boost::optional& prePostImageEntry, + const OpTime& originalPrePostImageOpTime) { + + auto sessionId = *entry.getSessionId(); + auto txnNumber = *entry.getTxnNumber(); + auto stmtIds = entry.getStatementIds(); + LOGV2_DEBUG(5351000, + 2, + "Tenant Oplog Applier processing retryable write", + "entry"_attr = redact(entry.toBSONForLogging()), + "sessionId"_attr = sessionId, + "txnNumber"_attr = txnNumber, + "statementIds"_attr = stmtIds, + "protocol"_attr = _protocol, + "migrationId"_attr = _migrationUuid); + + const auto hasPreOrPostImageOpTime = entry.getPreImageOpTime() || entry.getPostImageOpTime(); + if (prePostImageEntry && entry.getPreImageOpTime()) { + uassert(5351002, + str::stream() + << "Tenant oplog application cannot apply retryable write with txnNumber " + << txnNumber << " statementNumber " << stmtIds.front() << " on session " + << sessionId << " because the preImage op time " + << originalPrePostImageOpTime.toString() + << " does not match the expected optime " + << entry.getPreImageOpTime()->toString(), + originalPrePostImageOpTime == entry.getPreImageOpTime()); + noopEntry.setPreImageOpTime(prePostImageEntry->getOpTime()); + } else if (prePostImageEntry && entry.getPostImageOpTime()) { + uassert(5351007, + str::stream() + << "Tenant oplog application cannot apply retryable write with txnNumber " + << txnNumber << " statementNumber " << stmtIds.front() << " on session " + << sessionId << " because the postImage op time " + << originalPrePostImageOpTime.toString() + << " does not match the expected optime " + << entry.getPostImageOpTime()->toString(), + originalPrePostImageOpTime == entry.getPostImageOpTime()); + noopEntry.setPostImageOpTime(prePostImageEntry->getOpTime()); + } else if (!prePostImageEntry && hasPreOrPostImageOpTime) { + LOGV2(5535302, + "Tenant Oplog Applier omitting pre- or post- image for findAndModify", + "entry"_attr = redact(entry.toBSONForLogging()), + "protocol"_attr = _protocol, + "migrationId"_attr = _migrationUuid); + } + + auto txnParticipant = TransactionParticipant::get(opCtx); + uassert(5350900, + str::stream() << "Tenant oplog application failed to get retryable write " + "for transaction " + << txnNumber << " on session " << sessionId, + txnParticipant); + + TxnNumberAndRetryCounter txnNumberAndRetryCounter{txnNumber}; + if (txnParticipant.getLastWriteOpTime() > _cloneFinishedRecipientOpTime) { + // Out-of-order processing within a migration lifetime is not possible, + // except in recipient failovers. However, merge and tenant migration + // are not resilient to recipient failovers. If attempted, beginOrContinue() + // will throw ErrorCodes::TransactionTooOld. + txnParticipant.beginOrContinue(opCtx, + txnNumberAndRetryCounter, + boost::none /* autocommit */, + boost::none /* startTransaction */); + noopEntry.setPrevWriteOpTimeInTransaction(txnParticipant.getLastWriteOpTime()); + } else { + // We can end up here under the following circumstances: + // 1) LastWriteOpTime is not null. + // - During a back-to-back migration (rs0->rs1->rs0) or a migration retry, + // when 'txnNum'== txnParticipant.o().activeTxnNumber and rs0 already has + // the oplog chain. + // + // 2) LastWriteOpTime is null. + // - During a back-to-back migration (rs0->rs1->rs0) when + // 'txnNum' < txnParticipant.o().activeTxnNumber and last activeTxnNumber corresponds + // to a no-op session write, like, no-op retryable update, read transaction, etc. + // - New session with no transaction started yet on this node (this will be a no-op). + LOGV2_DEBUG(5709800, + 2, + "Tenant oplog applier resetting existing retryable write state", + "lastWriteOpTime"_attr = txnParticipant.getLastWriteOpTime(), + "lastActiveTxnNumber"_attr = + txnParticipant.getActiveTxnNumberAndRetryCounter().toBSON()); + + // Reset the statements executed list in the txnParticipant. + txnParticipant.invalidate(opCtx); + txnParticipant.refreshFromStorageIfNeededNoOplogEntryFetch(opCtx); + + txnParticipant.beginOrContinue(opCtx, + txnNumberAndRetryCounter, + boost::none /* autocommit */, + boost::none /* startTransaction */); + + // Reset the retryable write history chain. + noopEntry.setPrevWriteOpTimeInTransaction(OpTime()); + } + + // We should never process the same donor statement twice, except in failover + // cases where we'll also have "forgotten" the statement was executed. + uassert(5350902, + str::stream() << "Tenant oplog application processed same retryable write " + "twice for transaction " + << txnNumber << " statement " << stmtIds.front() << " on session " + << sessionId, + !txnParticipant.checkStatementExecutedNoOplogEntryFetch(opCtx, stmtIds.front())); + + // Set sessionId, txnNumber, and statementId for all ops in a retryable write. + noopEntry.setSessionId(sessionId); + noopEntry.setTxnNumber(txnNumber); + noopEntry.setStatementIds(stmtIds); + + // set fromMigrate on the no-op so the session update tracker recognizes it. + noopEntry.setFromMigrate(true); + + // Use the same wallclock time as the noop entry. The lastWriteOpTime will be filled + // in after the no-op is written. + auto sessionTxnRecord = + SessionTxnRecord{sessionId, txnNumber, OpTime(), noopEntry.getWallClockTime()}; + + // If we have a prePostImage no-op without the original entry, do not write it. This can + // happen in some very unlikely rollback situations. + auto isValidPrePostImageEntry = prePostImageEntry && hasPreOrPostImageOpTime; + + _writeSessionNoOp(opCtx, + noopEntry, + sessionTxnRecord, + stmtIds, + isValidPrePostImageEntry ? prePostImageEntry : boost::none); +} + +void TenantOplogApplier::_writeTransactionEntryNoOp(OperationContext* opCtx, + MutableOplogEntry& noopEntry, + const OplogEntry& entry) { + auto sessionId = *entry.getSessionId(); + auto txnNumber = *entry.getTxnNumber(); + auto optTxnRetryCounter = entry.getOperationSessionInfo().getTxnRetryCounter(); + uassert(ErrorCodes::InvalidOptions, + "txnRetryCounter is only supported in sharded clusters", + !optTxnRetryCounter.has_value()); + + LOGV2_DEBUG(5351502, + 1, + "Tenant Oplog Applier committing transaction", + "sessionId"_attr = sessionId, + "txnNumber"_attr = txnNumber, + "txnRetryCounter"_attr = optTxnRetryCounter, + "protocol"_attr = _protocol, + "migrationId"_attr = _migrationUuid, + "op"_attr = redact(entry.toBSONForLogging())); + + auto txnParticipant = TransactionParticipant::get(opCtx); + uassert(5351500, + str::stream() << "Tenant oplog application failed to get transaction participant " + "for transaction " + << txnNumber << " on session " << sessionId, + txnParticipant); + // We should only write the noop entry for this transaction commit once. + uassert(5351501, + str::stream() << "Tenant oplog application cannot apply transaction " << txnNumber + << " on session " << sessionId + << " because the transaction with txnNumberAndRetryCounter " + << txnParticipant.getActiveTxnNumberAndRetryCounter().toBSON() + << " has already started", + txnParticipant.getActiveTxnNumberAndRetryCounter().getTxnNumber() < txnNumber); + txnParticipant.beginOrContinueTransactionUnconditionally(opCtx, + {txnNumber, optTxnRetryCounter}); + + // Only set sessionId, txnNumber and txnRetryCounter for the final applyOp in a + // transaction. + noopEntry.setSessionId(sessionId); + noopEntry.setTxnNumber(txnNumber); + noopEntry.getOperationSessionInfo().setTxnRetryCounter(optTxnRetryCounter); + + // Write a fake applyOps with the tenantId as the namespace so that this will be picked + // up by the committed transaction prefetch pipeline in subsequent migrations. + // + // Unlike MTM, shard merge copies all tenants from the donor. This means that merge does + // not need to filter prefetched committed transactions by tenantId. As a result, + // setting a nss containing the tenantId for the fake transaction applyOps entry isn't + // necessary. + if (_protocol != MigrationProtocolEnum::kShardMerge) { + noopEntry.setObject( + BSON("applyOps" << BSON_ARRAY(BSON(OplogEntry::kNssFieldName + << NamespaceString(*_tenantId + "_", "").ns())))); + } + + // Use the same wallclock time as the noop entry. + auto sessionTxnRecord = + SessionTxnRecord{sessionId, txnNumber, OpTime(), noopEntry.getWallClockTime()}; + sessionTxnRecord.setState(DurableTxnStateEnum::kCommitted); + sessionTxnRecord.setTxnRetryCounter(optTxnRetryCounter); + + _writeSessionNoOp(opCtx, noopEntry, sessionTxnRecord); +} + TenantOplogApplier::OpTimePair TenantOplogApplier::_writeNoOpEntries( OperationContext* opCtx, const TenantOplogBatch& batch) { auto* opObserver = cc().getServiceContext()->getOpObserver(); @@ -466,6 +743,12 @@ TenantOplogApplier::OpTimePair TenantOplogApplier::_writeNoOpEntries( } // Group oplog entries from the same session for noop writes. if (auto sessionId = op.entry.getOperationSessionInfo().getSessionId()) { + uassert( + ErrorCodes::RetryableInternalTransactionNotSupported, + str::stream() << "Retryable internal transactions are not supported. Protocol:: " + << MigrationProtocol_serializer(_protocol) + << ", SessionId:: " << sessionId->toBSON(), + !isInternalSessionForRetryableWrite(*sessionId)); sessionOps[*sessionId].emplace_back(&op.entry, slotIter); } else { nonSessionOps.emplace_back(&op.entry, slotIter); @@ -501,7 +784,7 @@ TenantOplogApplier::OpTimePair TenantOplogApplier::_writeNoOpEntries( if (thread == numOplogThreads - 1) { numOps = numOpsRemaining; } - _writerPool->schedule([=, &status = statusVector.at(thread)](auto scheduleStatus) { + _writerPool->schedule([=, this, &status = statusVector.at(thread)](auto scheduleStatus) { if (!scheduleStatus.isOK()) { status = scheduleStatus; } else { @@ -520,18 +803,19 @@ TenantOplogApplier::OpTimePair TenantOplogApplier::_writeNoOpEntries( // Dispatch noop writes for oplog entries from the same session into the same writer thread. size_t sessionThreadNum = 0; for (const auto& s : sessionOps) { - _writerPool->schedule([=, &status = statusVector.at(numOplogThreads + sessionThreadNum)]( - auto scheduleStatus) { - if (!scheduleStatus.isOK()) { - status = scheduleStatus; - } else { - try { - _writeSessionNoOpsForRange(s.second.begin(), s.second.end()); - } catch (const DBException& e) { - status = e.toStatus(); + _writerPool->schedule( + [=, this, &status = statusVector.at(numOplogThreads + sessionThreadNum)]( + auto scheduleStatus) { + if (!scheduleStatus.isOK()) { + status = scheduleStatus; + } else { + try { + _writeSessionNoOpsForRange(s.second.begin(), s.second.end()); + } catch (const DBException& e) { + status = e.toStatus(); + } } - } - }); + }); sessionThreadNum++; } @@ -551,6 +835,42 @@ TenantOplogApplier::OpTimePair TenantOplogApplier::_writeNoOpEntries( return {batch.ops.back().entry.getOpTime(), greatestOplogSlotUsed}; } +void TenantOplogApplier::_writeSessionNoOp(OperationContext* opCtx, + MutableOplogEntry& noopEntry, + boost::optional sessionTxnRecord, + std::vector stmtIds, + boost::optional prePostImageEntry) { + LOGV2_DEBUG(5535700, + 2, + "Tenant Oplog Applier writing session no-op", + "protocol"_attr = _protocol, + "migrationId"_attr = _migrationUuid, + "op"_attr = redact(noopEntry.toBSON())); + + AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); + boost::optional tenantLock; + if (auto tid = noopEntry.getTid()) { + tenantLock.emplace(opCtx, *tid, MODE_IX); + } + + writeConflictRetry(opCtx, "writeTenantNoOps", NamespaceString::kRsOplogNamespace, [&] { + WriteUnitOfWork wuow(opCtx); + + // Write the pre/post image entry, if it exists. + if (prePostImageEntry) + repl::logOp(opCtx, &*prePostImageEntry); + // Write the noop entry and update config.transactions. + auto oplogOpTime = repl::logOp(opCtx, &noopEntry); + if (sessionTxnRecord) { + sessionTxnRecord->setLastWriteOpTime(oplogOpTime); + TransactionParticipant::get(opCtx).onWriteOpCompletedOnPrimary( + opCtx, {stmtIds}, *sessionTxnRecord); + } + + wuow.commit(); + }); +} + void TenantOplogApplier::_writeSessionNoOpsForRange( std::vector::const_iterator begin, std::vector::const_iterator end) { @@ -594,297 +914,103 @@ void TenantOplogApplier::_writeSessionNoOpsForRange( noopEntry.setObject2(entry.getEntry().toBSON()); noopEntry.setOpTime(*iter->second); noopEntry.setWallClockTime(opCtx->getServiceContext()->getFastClockSource()->now()); - noopEntry.setTid(entry.getTid()); - - boost::optional sessionTxnRecord; - std::vector stmtIds; - boost::optional prevWriteOpTime = boost::none; - if (entry.getTxnNumber() && !entry.isPartialTransaction() && - (entry.getCommandType() == repl::OplogEntry::CommandType::kCommitTransaction || - entry.getCommandType() == repl::OplogEntry::CommandType::kApplyOps)) { - // Final applyOp for a transaction. - auto sessionId = *entry.getSessionId(); - auto txnNumber = *entry.getTxnNumber(); - auto optTxnRetryCounter = entry.getOperationSessionInfo().getTxnRetryCounter(); - uassert(ErrorCodes::InvalidOptions, - "txnRetryCounter is only supported in sharded clusters", - !optTxnRetryCounter.has_value()); - { - auto lk = stdx::lock_guard(*opCtx->getClient()); - opCtx->setLogicalSessionId(sessionId); - opCtx->setTxnNumber(txnNumber); - opCtx->setInMultiDocumentTransaction(); - } - LOGV2_DEBUG(5351502, - 1, - "Tenant Oplog Applier committing transaction", - "sessionId"_attr = sessionId, - "txnNumber"_attr = txnNumber, - "txnRetryCounter"_attr = optTxnRetryCounter, - "protocol"_attr = _protocol, - "migrationId"_attr = _migrationUuid, - "op"_attr = redact(entry.toBSONForLogging())); - // Check out the session. - if (!scopedSession) { - auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx.get()); - scopedSession = mongoDSessionCatalog->checkOutSessionWithoutOplogRead(opCtx.get()); + boost::optional tenantId = [&]() -> boost::optional { + if (_protocol == MigrationProtocolEnum::kMultitenantMigrations && _tenantId) { + return TenantId{OID::createFromString(*_tenantId)}; } - - auto txnParticipant = TransactionParticipant::get(opCtx.get()); - uassert( - 5351500, - str::stream() << "Tenant oplog application failed to get transaction participant " - "for transaction " - << txnNumber << " on session " << sessionId, - txnParticipant); - // We should only write the noop entry for this transaction commit once. - uassert(5351501, - str::stream() << "Tenant oplog application cannot apply transaction " - << txnNumber << " on session " << sessionId - << " because the transaction with txnNumberAndRetryCounter " - << txnParticipant.getActiveTxnNumberAndRetryCounter().toBSON() - << " has already started", - txnParticipant.getActiveTxnNumberAndRetryCounter().getTxnNumber() < txnNumber); - txnParticipant.beginOrContinueTransactionUnconditionally( - opCtx.get(), {txnNumber, optTxnRetryCounter}); - - // Only set sessionId, txnNumber and txnRetryCounter for the final applyOp in a - // transaction. - noopEntry.setSessionId(sessionId); - noopEntry.setTxnNumber(txnNumber); - noopEntry.getOperationSessionInfo().setTxnRetryCounter(optTxnRetryCounter); - - // Write a fake applyOps with the tenantId as the namespace so that this will be picked - // up by the committed transaction prefetch pipeline in subsequent migrations. - // - // Unlike MTM, shard merge copies all tenants from the donor. This means that merge does - // not need to filter prefetched committed transactions by tenantId. As a result, - // setting a nss containing the tenantId for the fake transaction applyOps entry isn't - // necessary. - if (_protocol != MigrationProtocolEnum::kShardMerge) { - noopEntry.setObject(BSON( - "applyOps" << BSON_ARRAY(BSON(OplogEntry::kNssFieldName - << NamespaceString(*_tenantId + "_", "").ns())))); + if (_protocol == MigrationProtocolEnum::kShardMerge && entry.getTid()) { + return *entry.getTid(); } - - // Use the same wallclock time as the noop entry. - sessionTxnRecord.emplace(sessionId, txnNumber, OpTime(), noopEntry.getWallClockTime()); - sessionTxnRecord->setState(DurableTxnStateEnum::kCommitted); - sessionTxnRecord->setTxnRetryCounter(optTxnRetryCounter); - - // If we have a prePostImage no-op here, it is orphaned; this can happen in some - // very unlikely rollback situations. - prePostImageEntry = boost::none; - } else if (!entry.getStatementIds().empty() && - !SessionUpdateTracker::isTransactionEntry(entry)) { - // If it has a statement id but isn't a transaction, it's a retryable write. - auto sessionId = *entry.getSessionId(); - auto txnNumber = *entry.getTxnNumber(); - auto entryStmtIds = entry.getStatementIds(); - LOGV2_DEBUG(5351000, - 2, - "Tenant Oplog Applier processing retryable write", - "entry"_attr = redact(entry.toBSONForLogging()), - "sessionId"_attr = sessionId, - "txnNumber"_attr = txnNumber, - "statementIds"_attr = entryStmtIds, - "protocol"_attr = _protocol, - "migrationId"_attr = _migrationUuid); - if (entry.getOpType() == repl::OpTypeEnum::kNoop) { - // There are two types of no-ops we expect here. One is pre/post image, which - // will have an empty o2 field. The other is previously transformed oplog - // entries from earlier migrations. - - // We don't wrap the no-ops in another no-op. - // If object2 is missing, this is a preImage/postImage. - if (!entry.getObject2()) { - // *noopEntry.getObject2() is the original migrated no-op in BSON format. - prePostImageEntry = - uassertStatusOK(MutableOplogEntry::parse(*noopEntry.getObject2())); - originalPrePostImageOpTime = entry.getOpTime(); - prePostImageEntry->setOpTime(noopEntry.getOpTime()); - prePostImageEntry->setWallClockTime(noopEntry.getWallClockTime()); - prePostImageEntry->setFromMigrate(true); - // Clear the old tenant migration UUID. - prePostImageEntry->setFromTenantMigration(boost::none); - // Don't write the no-op entry. - continue; - } else { - // Otherwise this is a previously migrated retryable write. Avoid - // re-wrapping it. - uassert(5351003, - str::stream() << "Tenant Oplog Applier received unexpected Empty o2 " - "field (original oplog entry) in migrated noop: " - << redact(entry.toBSONForLogging()), - !entry.getObject2()->isEmpty()); - // *noopEntry.getObject2() is the original migrated no-op in BSON format. - noopEntry = uassertStatusOK(MutableOplogEntry::parse(*noopEntry.getObject2())); - noopEntry.setOpTime(*iter->second); - noopEntry.setWallClockTime( - opCtx->getServiceContext()->getFastClockSource()->now()); - // Clear the old tenant migration UUID. - noopEntry.setFromTenantMigration(boost::none); - - // Set the inner 'o2' optime to the donor entry's optime because the recipient - // uses the timestamp in 'o2' to determine where to resume applying from. - auto o2Entry = uassertStatusOK(MutableOplogEntry::parse(*entry.getObject2())); - o2Entry.setOpTime(entry.getOpTime()); - o2Entry.setWallClockTime(entry.getWallClockTime()); - noopEntry.setObject2(o2Entry.toBSON()); - } + return boost::none; + }(); + noopEntry.setTid(tenantId); + + switch (getOplogEntryType(entry)) { + case OplogEntryType::kOplogEntryTypeRetryableWritePrePostImage: { + // entry.getEntry().toBSON() is the pre- or post-image in BSON format. + prePostImageEntry = + uassertStatusOK(MutableOplogEntry::parse(entry.getEntry().toBSON())); + originalPrePostImageOpTime = entry.getOpTime(); + prePostImageEntry->setOpTime(*iter->second); + prePostImageEntry->setWallClockTime( + opCtx->getServiceContext()->getFastClockSource()->now()); + prePostImageEntry->setFromMigrate(true); + // Clear the old tenant migration UUID. + prePostImageEntry->setFromTenantMigration(boost::none); + // Don't write the no-op entry, both the no-op entry and prePostImage entry will be + // written on the next iteration. + continue; } - stmtIds.insert(stmtIds.end(), entryStmtIds.begin(), entryStmtIds.end()); - - if (!prePostImageEntry && (entry.getPreImageOpTime() || entry.getPostImageOpTime())) { - LOGV2(5535302, - "Tenant Oplog Applier omitting pre- or post- image for findAndModify", - "entry"_attr = redact(entry.toBSONForLogging()), - "protocol"_attr = _protocol, - "migrationId"_attr = _migrationUuid); - } else if (entry.getPreImageOpTime()) { - uassert( - 5351002, - str::stream() - << "Tenant oplog application cannot apply retryable write with txnNumber " - << txnNumber << " statementNumber " << stmtIds.front() << " on session " - << sessionId << " because the preImage op time " - << originalPrePostImageOpTime.toString() - << " does not match the expected optime " - << entry.getPreImageOpTime()->toString(), - originalPrePostImageOpTime == entry.getPreImageOpTime()); - noopEntry.setPreImageOpTime(prePostImageEntry->getOpTime()); - } else if (entry.getPostImageOpTime()) { - uassert( - 5351007, - str::stream() - << "Tenant oplog application cannot apply retryable write with txnNumber " - << txnNumber << " statementNumber " << stmtIds.front() << " on session " - << sessionId << " because the postImage op time " - << originalPrePostImageOpTime.toString() - << " does not match the expected optime " - << entry.getPostImageOpTime()->toString(), - originalPrePostImageOpTime == entry.getPostImageOpTime()); - noopEntry.setPostImageOpTime(prePostImageEntry->getOpTime()); - } else { - // Got a prePostImage no-op without the original entry; this can happen in some - // very unlikely rollback situations. - prePostImageEntry = boost::none; + case OplogEntryType::kOplogEntryTypePreviouslyWrappedRetryableWrite: { + uassert(5351003, + str::stream() << "Tenant Oplog Applier received unexpected Empty o2 " + "field (original oplog entry) in migrated noop: " + << redact(entry.toBSONForLogging()), + !entry.getObject2()->isEmpty()); + // entry.getEntry().toBSON() is the original migrated no-op in BSON format. + noopEntry = uassertStatusOK(MutableOplogEntry::parse(entry.getEntry().toBSON())); + noopEntry.setOpTime(*iter->second); + noopEntry.setWallClockTime(opCtx->getServiceContext()->getFastClockSource()->now()); + // Clear the old tenant migration UUID. + noopEntry.setFromTenantMigration(boost::none); + + // Set the inner 'o2' optime to the donor entry's optime because the recipient + // uses the timestamp in 'o2' to determine where to resume applying from. + auto o2Entry = uassertStatusOK(MutableOplogEntry::parse(*entry.getObject2())); + o2Entry.setOpTime(entry.getOpTime()); + o2Entry.setWallClockTime(entry.getWallClockTime()); + noopEntry.setObject2(o2Entry.toBSON()); + + // Handle as for kOplogEntryTypeRetryableWrite after extracting original op. + [[fallthrough]]; } + case OplogEntryType::kOplogEntryTypeRetryableWrite: { + { + auto lk = stdx::lock_guard(*opCtx->getClient()); + opCtx->setLogicalSessionId(*entry.getSessionId()); + opCtx->setTxnNumber(*entry.getTxnNumber()); + } + + if (!scopedSession) { + auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx.get()); + scopedSession = + mongoDSessionCatalog->checkOutSessionWithoutOplogRead(opCtx.get()); + } - { - auto lk = stdx::lock_guard(*opCtx->getClient()); - opCtx->setLogicalSessionId(sessionId); - opCtx->setTxnNumber(txnNumber); + _writeRetryableWriteEntryNoOp( + opCtx.get(), noopEntry, entry, prePostImageEntry, originalPrePostImageOpTime); + break; } - if (!scopedSession) { - auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx.get()); - scopedSession = mongoDSessionCatalog->checkOutSessionWithoutOplogRead(opCtx.get()); + case OplogEntryType::kOplogEntryTypePartialTransaction: { + _writeSessionNoOp(opCtx.get(), noopEntry); + break; } + case OplogEntryType::kOplogEntryTypeTransaction: { + { + auto lk = stdx::lock_guard(*opCtx->getClient()); + opCtx->setLogicalSessionId(*entry.getSessionId()); + opCtx->setTxnNumber(*entry.getTxnNumber()); + opCtx->setInMultiDocumentTransaction(); + } - auto txnParticipant = TransactionParticipant::get(opCtx.get()); - uassert(5350900, - str::stream() << "Tenant oplog application failed to get retryable write " - "for transaction " - << txnNumber << " on session " << sessionId, - txnParticipant); - // beginOrContinue throws on failure, which will abort the migration. Failure should - // only result from out-of-order processing, which should not happen. - TxnNumberAndRetryCounter txnNumberAndRetryCounter{txnNumber}; - txnParticipant.beginOrContinue(opCtx.get(), - txnNumberAndRetryCounter, - boost::none /* autocommit */, - boost::none /* startTransaction */); - - // We could have an existing lastWriteOpTime for the same retryable write chain from a - // previously aborted migration. This could also happen if the tenant being migrated has - // previously resided in this replica set. So we want to start a new history chain - // instead of linking the newly generated no-op to the existing chain before the current - // migration starts. Otherwise, we could have duplicate entries for the same stmtId. - invariant(!_cloneFinishedRecipientOpTime.isNull()); - if (txnParticipant.getLastWriteOpTime() > _cloneFinishedRecipientOpTime) { - prevWriteOpTime = txnParticipant.getLastWriteOpTime(); - } else { - prevWriteOpTime = OpTime(); + // Check out the session. + if (!scopedSession) { + auto mongoDSessionCatalog = MongoDSessionCatalog::get(opCtx.get()); + scopedSession = + mongoDSessionCatalog->checkOutSessionWithoutOplogRead(opCtx.get()); + } - // Before we start a new history chain, reset the in-memory retryable write - // state in the txnParticipant so it can be built up from scratch again with - // the new chain. - LOGV2_DEBUG(5709800, - 2, - "Tenant oplog applier resetting existing retryable write state", - "lastWriteOpTime"_attr = txnParticipant.getLastWriteOpTime(), - "_cloneFinishedRecipientOpTime"_attr = _cloneFinishedRecipientOpTime, - "sessionId"_attr = sessionId, - "txnNumber"_attr = txnNumber, - "statementIds"_attr = entryStmtIds, - "protocol"_attr = _protocol, - "migrationId"_attr = _migrationUuid); - txnParticipant.invalidate(opCtx.get()); - txnParticipant.refreshFromStorageIfNeededNoOplogEntryFetch(opCtx.get()); - TxnNumberAndRetryCounter txnNumberAndRetryCounter{txnNumber}; - txnParticipant.beginOrContinue(opCtx.get(), - txnNumberAndRetryCounter, - boost::none /* autocommit */, - boost::none /* startTransaction */); + _writeTransactionEntryNoOp(opCtx.get(), noopEntry, entry); + break; } - - // We should never process the same donor statement twice, except in failover - // cases where we'll also have "forgotten" the statement was executed. - uassert(5350902, - str::stream() << "Tenant oplog application processed same retryable write " - "twice for transaction " - << txnNumber << " statement " << entryStmtIds.front() - << " on session " << sessionId, - !txnParticipant.checkStatementExecutedNoOplogEntryFetch(opCtx.get(), - entryStmtIds.front())); - - // Set sessionId, txnNumber, and statementId for all ops in a retryable write. - noopEntry.setSessionId(sessionId); - noopEntry.setTxnNumber(txnNumber); - noopEntry.setStatementIds(entryStmtIds); - - // set fromMigrate on the no-op so the session update tracker recognizes it. - noopEntry.setFromMigrate(true); - - // Use the same wallclock time as the noop entry. The lastWriteOpTime will be filled - // in after the no-op is written. - sessionTxnRecord.emplace(sessionId, txnNumber, OpTime(), noopEntry.getWallClockTime()); - } else { - // This is a partial transaction oplog entry. - - // If we have a prePostImage no-op here, it is orphaned; this can happen in some - // very unlikely rollback situations. - prePostImageEntry = boost::none; + default: + MONGO_UNREACHABLE; } - noopEntry.setPrevWriteOpTimeInTransaction(prevWriteOpTime); - - LOGV2_DEBUG(5535700, - 2, - "Tenant Oplog Applier writing session no-op", - "protocol"_attr = _protocol, - "migrationId"_attr = _migrationUuid, - "op"_attr = redact(noopEntry.toBSON())); - - AutoGetOplog oplogWrite(opCtx.get(), OplogAccessMode::kWrite); - writeConflictRetry( - opCtx.get(), "writeTenantNoOps", NamespaceString::kRsOplogNamespace.ns(), [&] { - WriteUnitOfWork wuow(opCtx.get()); - - // Write the pre/post image entry, if it exists. - if (prePostImageEntry) - repl::logOp(opCtx.get(), &*prePostImageEntry); - // Write the noop entry and update config.transactions. - auto oplogOpTime = repl::logOp(opCtx.get(), &noopEntry); - if (sessionTxnRecord) { - sessionTxnRecord->setLastWriteOpTime(oplogOpTime); - TransactionParticipant::get(opCtx.get()) - .onWriteOpCompletedOnPrimary(opCtx.get(), {stmtIds}, *sessionTxnRecord); - } - - wuow.commit(); - }); + // If we have a prePostImage no-op here that hasn't already been logged, it is orphaned; + // this can happen in some very unlikely rollback situations. Otherwise, the image entry + // should have been written at this point so we need to reset it for the next iteration. prePostImageEntry = boost::none; // Invalidate in-memory state so that the next time the session is checked out, it @@ -914,38 +1040,66 @@ void TenantOplogApplier::_writeNoOpsForRange(OpObserver* opObserver, opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); AutoGetOplog oplogWrite(opCtx.get(), OplogAccessMode::kWrite); - writeConflictRetry( - opCtx.get(), "writeTenantNoOps", NamespaceString::kRsOplogNamespace.ns(), [&] { - WriteUnitOfWork wuow(opCtx.get()); - for (auto iter = begin; iter != end; iter++) { - const auto& entry = *iter->first; - if (isResumeTokenNoop(entry)) { - // We don't want to write noops for resume token noop oplog entries. They would - // not be applied in a change stream anyways. - continue; + auto tenantLocks = _acquireIntentExclusiveTenantLocks(opCtx.get(), begin, end); + + writeConflictRetry(opCtx.get(), "writeTenantNoOps", NamespaceString::kRsOplogNamespace, [&] { + WriteUnitOfWork wuow(opCtx.get()); + for (auto iter = begin; iter != end; iter++) { + const auto& entry = *iter->first; + if (isResumeTokenNoop(entry)) { + // We don't want to write noops for resume token noop oplog entries. They would + // not be applied in a change stream anyways. + continue; + } + // We don't need to link no-ops entries for operations done outside of a session. + const boost::optional preImageOpTime = boost::none; + const boost::optional postImageOpTime = boost::none; + const boost::optional prevWriteOpTimeInTransaction = boost::none; + opObserver->onInternalOpMessage( + opCtx.get(), + entry.getNss(), + entry.getUuid(), + {}, // Empty 'o' field. + entry.getEntry().toBSON(), + // We link the no-ops together by recipient op time the same way the actual ops + // were linked together by donor op time. This is to allow retryable writes + // and changestreams to find the ops they need. + preImageOpTime, + postImageOpTime, + prevWriteOpTimeInTransaction, + *iter->second); + } + wuow.commit(); + }); +} +std::vector TenantOplogApplier::_acquireIntentExclusiveTenantLocks( + OperationContext* opCtx, + std::vector::const_iterator entryBegin, + std::vector::const_iterator entryEnd) const { + // Determine all involved tenants. + std::set tenantIds = [&] { + std::set tenantIds; + if (_tenantId) { + tenantIds.emplace(OID::createFromString(*_tenantId)); + } else { + for (auto iter = entryBegin; iter != entryEnd; ++iter) { + const auto& oplogEntry = *iter->first; + if (oplogEntry.getTid()) { + tenantIds.insert(*oplogEntry.getTid()); } - // We don't need to link no-ops entries for operations done outside of a session. - const boost::optional preImageOpTime = boost::none; - const boost::optional postImageOpTime = boost::none; - const boost::optional prevWriteOpTimeInTransaction = boost::none; - opObserver->onInternalOpMessage( - opCtx.get(), - entry.getNss(), - entry.getUuid(), - {}, // Empty 'o' field. - entry.getEntry().toBSON(), - // We link the no-ops together by recipient op time the same way the actual ops - // were linked together by donor op time. This is to allow retryable writes - // and changestreams to find the ops they need. - preImageOpTime, - postImageOpTime, - prevWriteOpTimeInTransaction, - *iter->second); } - wuow.commit(); - }); + } + return tenantIds; + }(); + + // Acquire a lock for each tenant. + std::vector tenantLocks; + tenantLocks.reserve(tenantIds.size()); + for (auto&& tenantId : tenantIds) { + tenantLocks.emplace_back(opCtx, tenantId, MODE_IX); + } + return tenantLocks; } - std::vector> TenantOplogApplier::_fillWriterVectors( OperationContext* opCtx, TenantOplogBatch* batch) { std::vector> writerVectors( @@ -994,6 +1148,7 @@ std::vector> TenantOplogApplier::_fillWriterVector isTransactionWithCommand = true; } } + if (op.ignore) { continue; } @@ -1090,39 +1245,19 @@ Status TenantOplogApplier::_applyOplogBatchPerWorker(std::vectorlockState()->setShouldConflictWithSecondaryBatchApplication(false); - const bool allowNamespaceNotFoundErrorsOnCrudOps(true); - bool isDataConsistent; - OplogApplication::Mode mode; - switch (_protocol) { - case MigrationProtocolEnum::kMultitenantMigrations: - // Multi-tenant migration always use oplog application mode 'kInitialSync' and - // isDataConsistent 'false', because we're applying oplog entries to a cloned database - // the way initial sync does. - isDataConsistent = false; - mode = OplogApplication::Mode::kInitialSync; - break; - case MigrationProtocolEnum::kShardMerge: - // Since shard merge protocol uses backup cursor for database cloning and tenant oplog - // catchup phase is not resumable on failovers, the oplog entries will be applied on a - // consistent copy of donor data. - isDataConsistent = true; - mode = OplogApplication::Mode::kSecondary; - break; - default: - MONGO_UNREACHABLE; - } auto status = OplogApplierUtils::applyOplogBatchCommon( opCtx.get(), ops, - mode, - allowNamespaceNotFoundErrorsOnCrudOps, - isDataConsistent, + _options.mode, + _options.allowNamespaceNotFoundErrorsOnCrudOps, + _options.isDataConsistent, [this](OperationContext* opCtx, const OplogEntryOrGroupedInserts& opOrInserts, OplogApplication::Mode mode, const bool isDataConsistent) { return _applyOplogEntryOrGroupedInserts(opCtx, opOrInserts, mode, isDataConsistent); }); + if (!status.isOK()) { LOGV2_ERROR(4886008, "Tenant migration writer worker batch application failed", diff --git a/src/mongo/db/repl/tenant_oplog_applier.h b/src/mongo/db/repl/tenant_oplog_applier.h index c4c218b7289e5..07355ba83a576 100644 --- a/src/mongo/db/repl/tenant_oplog_applier.h +++ b/src/mongo/db/repl/tenant_oplog_applier.h @@ -29,16 +29,42 @@ #pragma once +#include +#include +#include +#include +#include +#include #include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/abstract_async_component.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_buffer.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_or_grouped_inserts.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/tenant_oplog_batcher.h" #include "mongo/db/serverless/serverless_types_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/uuid.h" namespace mongo { class ThreadPool; @@ -72,10 +98,32 @@ class TenantOplogApplier : public AbstractAsyncComponent, OpTime recipientOpTime; }; + /** + * Used to configure behavior of this TenantOplogApplier. + **/ + struct Options { + explicit Options(OplogApplication::Mode inputMode) + : mode(inputMode), + allowNamespaceNotFoundErrorsOnCrudOps(inputMode != + OplogApplication::Mode::kSecondary), + isDataConsistent(inputMode == OplogApplication::Mode::kSecondary) { + + // Safety rail to prevent incorrect values for 'isDataConsistent' & + // 'allowNamespaceNotFoundErrorsOnCrudOps' for future oplog application modes. + invariant(mode == OplogApplication::Mode::kInitialSync || + mode == OplogApplication::Mode::kSecondary); + } + + const OplogApplication::Mode mode; + const bool allowNamespaceNotFoundErrorsOnCrudOps; + const bool isDataConsistent; + }; + TenantOplogApplier(const UUID& migrationUuid, const MigrationProtocolEnum& protocol, + const OpTime& StartApplyingAfterOpTime, + const OpTime& cloneFinishedRecipientOpTime, boost::optional tenantId, - OpTime StartApplyingAfterOpTime, RandomAccessOplogBuffer* oplogBuffer, std::shared_ptr executor, ThreadPool* writerPool, @@ -97,11 +145,6 @@ class TenantOplogApplier : public AbstractAsyncComponent, return _numOpsApplied; } - /** - * This should only be called once before the applier starts. - */ - void setCloneFinishedRecipientOpTime(OpTime cloneFinishedRecipientOpTime); - /** * Returns the optime the applier will start applying from. */ @@ -126,6 +169,19 @@ class TenantOplogApplier : public AbstractAsyncComponent, void _applyOplogBatch(TenantOplogBatch* batch); Status _applyOplogBatchPerWorker(std::vector* ops); void _checkNsAndUuidsBelongToTenant(OperationContext* opCtx, const TenantOplogBatch& batch); + void _writeTransactionEntryNoOp(OperationContext* opCtx, + MutableOplogEntry& noopEntry, + const OplogEntry& entry); + void _writeRetryableWriteEntryNoOp(OperationContext* opCtx, + MutableOplogEntry& noopEntry, + const OplogEntry& entry, + const boost::optional& prePostImageEntry, + const OpTime& originalPrePostImageOpTime); + void _writeSessionNoOp(OperationContext* opCtx, + MutableOplogEntry& noopEntry, + boost::optional sessionTxnRecord = boost::none, + std::vector stmtIds = {}, + boost::optional prePostImageEntry = boost::none); OpTimePair _writeNoOpEntries(OperationContext* opCtx, const TenantOplogBatch& batch); using TenantNoOpEntry = std::pair::iterator>; @@ -142,6 +198,15 @@ class TenantOplogApplier : public AbstractAsyncComponent, std::vector> _fillWriterVectors(OperationContext* opCtx, TenantOplogBatch* batch); + /** + * Acquires Intent Exclusive (IX) lock for each tenant referred to by oplog entries [entryBegin; + * entryEnd) and returns lock objects. + */ + std::vector _acquireIntentExclusiveTenantLocks( + OperationContext* opCtx, + std::vector::const_iterator entryBegin, + std::vector::const_iterator entryEnd) const; + /** * Sets the _finalStatus to the new status if and only if the old status is "OK". */ @@ -164,21 +229,24 @@ class TenantOplogApplier : public AbstractAsyncComponent, std::shared_ptr _oplogBatcher; // (R) const UUID _migrationUuid; // (R) const MigrationProtocolEnum _protocol; // (R) - // For multi-tenant migration protocol, _tenantId is set. - // But, for shard merge protcol, _tenantId is empty. - const boost::optional _tenantId; // (R) const OpTime _startApplyingAfterOpTime; // (R) - RandomAccessOplogBuffer* _oplogBuffer; // (R) - std::shared_ptr _executor; // (R) - // All no-op entries written by this tenant migration should have OpTime greater than this + // All no-op entries written by this migration should have OpTime greater than this // OpTime. - OpTime _cloneFinishedRecipientOpTime = OpTime(); // (R) + const OpTime _cloneFinishedRecipientOpTime; // (R) + // For multi-tenant migration protocol, _tenantId is set. + // But, for shard merge protcol, _tenantId is empty. + const boost::optional _tenantId; // (R) + + RandomAccessOplogBuffer* _oplogBuffer; // (R) + std::shared_ptr + _executor; // (R) + // Pool of worker threads for writing ops to the databases. + // Not owned by us. + ThreadPool* const _writerPool; // (S) // Keeps track of last applied donor and recipient optimes by the tenant oplog applier. // This gets updated only on batch boundaries. OpTimePair _lastAppliedOpTimesUpToLastBatch; // (M) - // Pool of worker threads for writing ops to the databases. - // Not owned by us. - ThreadPool* const _writerPool; // (S) + // The timestamp to resume batching from. A null timestamp indicates that the oplog applier // is starting fresh (not a retry), and will start batching from the beginning of the oplog // buffer. @@ -188,6 +256,7 @@ class TenantOplogApplier : public AbstractAsyncComponent, stdx::unordered_set _knownGoodUuids; // (X) bool _applyLoopApplyingBatch = false; // (M) size_t _numOpsApplied{0}; // (M) + const Options _options; // (R) }; /** diff --git a/src/mongo/db/repl/tenant_oplog_applier_shard_merge_test.cpp b/src/mongo/db/repl/tenant_oplog_applier_shard_merge_test.cpp new file mode 100644 index 0000000000000..6e03a7a58fb23 --- /dev/null +++ b/src/mongo/db/repl/tenant_oplog_applier_shard_merge_test.cpp @@ -0,0 +1,709 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/platform/basic.h" + +#include +#include + +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/repl/oplog_applier_impl_test_fixture.h" +#include "mongo/db/repl/oplog_batcher_test_fixture.h" +#include "mongo/db/repl/oplog_entry_test_helpers.h" +#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" +#include "mongo/db/repl/tenant_migration_decoration.h" +#include "mongo/db/repl/tenant_migration_recipient_access_blocker.h" +#include "mongo/db/repl/tenant_migration_recipient_service.h" +#include "mongo/db/repl/tenant_oplog_applier.h" +#include "mongo/db/repl/tenant_oplog_batcher.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" +#include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/logv2/log.h" +#include "mongo/unittest/log_test.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest + +namespace mongo { + +using executor::TaskExecutor; +using executor::ThreadPoolExecutorTest; + +namespace repl { + +namespace { +const OpTime kDefaultCloneFinishedRecipientOpTime(Timestamp(1, 1), 1); +} // namespace + +class TenantOplogApplierTestOpObserver : public OplogApplierImplOpObserver { +public: + void onInternalOpMessage(OperationContext* opCtx, + const NamespaceString& nss, + const boost::optional& uuid, + const BSONObj& msgObj, + const boost::optional o2MsgObj, + const boost::optional preImageOpTime, + const boost::optional postImageOpTime, + const boost::optional prevWriteOpTimeInTransaction, + const boost::optional slot) final { + MutableOplogEntry oplogEntry; + oplogEntry.setOpType(repl::OpTypeEnum::kNoop); + oplogEntry.setNss(nss); + oplogEntry.setUuid(uuid); + oplogEntry.setObject(msgObj); + oplogEntry.setObject2(o2MsgObj); + oplogEntry.setPreImageOpTime(preImageOpTime); + oplogEntry.setPostImageOpTime(postImageOpTime); + oplogEntry.setPrevWriteOpTimeInTransaction(prevWriteOpTimeInTransaction); + if (slot) { + oplogEntry.setOpTime(*slot); + } else { + oplogEntry.setOpTime(getNextOpTime(opCtx)); + } + const auto& recipientInfo = tenantMigrationInfo(opCtx); + if (recipientInfo) { + oplogEntry.setFromTenantMigration(recipientInfo->uuid); + } + stdx::lock_guard lk(_mutex); + _entries.push_back(oplogEntry); + } + + // Returns a vector of the oplog entries recorded, in optime order. + std::vector getEntries() { + std::vector entries; + { + stdx::lock_guard lk(_mutex); + entries = _entries; + } + std::sort(entries.begin(), + entries.end(), + [](const MutableOplogEntry& a, const MutableOplogEntry& b) { + return a.getOpTime() < b.getOpTime(); + }); + return entries; + } + +private: + mutable Mutex _mutex = MONGO_MAKE_LATCH("TenantOplogApplierTestOpObserver::_mutex"); + std::vector _entries; +}; + +class TenantOplogApplierMergeTest : public ServiceContextMongoDTest { +public: + const TenantId kTenantId = TenantId(OID::gen()); + const UUID kMigrationUuid = UUID::gen(); + const DatabaseName kTenantDB = DatabaseName::createDatabaseName_forTest(kTenantId, "test"); + void setUp() override { + ServiceContextMongoDTest::setUp(); + + // These defaults are generated from the repl_server_paremeters IDL file. Set them here + // to start each test case from a clean state. + tenantApplierBatchSizeBytes.store(kTenantApplierBatchSizeBytesDefault); + tenantApplierBatchSizeOps.store(kTenantApplierBatchSizeOpsDefault); + + // Set up an OpObserver to track the documents OplogApplierImpl inserts. + auto service = getServiceContext(); + auto opObserver = std::make_unique(); + _opObserver = opObserver.get(); + auto opObserverRegistry = dynamic_cast(service->getOpObserver()); + opObserverRegistry->addObserver(std::move(opObserver)); + + auto network = std::make_unique(); + _net = network.get(); + executor::ThreadPoolMock::Options thread_pool_options; + thread_pool_options.onCreateThread = [] { + Client::initThread("TenantOplogApplier"); + }; + _executor = makeSharedThreadPoolTestExecutor(std::move(network), thread_pool_options); + _executor->startup(); + _oplogBuffer.startup(nullptr); + + // Set up a replication coordinator and storage interface, needed for opObservers. + repl::StorageInterface::set(service, std::make_unique()); + repl::ReplicationCoordinator::set( + service, std::make_unique(service)); + + // Set up oplog collection. If the WT storage engine is used, the oplog collection is + // expected to exist when fetching the next opTime (LocalOplogInfo::getNextOpTimes) to use + // for a write. + _opCtx = cc().makeOperationContext(); + repl::createOplog(_opCtx.get()); + + MongoDSessionCatalog::set( + service, + std::make_unique( + std::make_unique())); + + // Ensure that we are primary. + auto replCoord = ReplicationCoordinator::get(_opCtx.get()); + ASSERT_OK(replCoord->setFollowerMode(MemberState::RS_PRIMARY)); + + auto recipientMtab = + std::make_shared(service, kMigrationUuid); + TenantMigrationAccessBlockerRegistry::get(service).add(kTenantId, std::move(recipientMtab)); + + _writerPool = makeTenantMigrationWriterPool(1); + _applier = std::make_shared(kMigrationUuid, + MigrationProtocolEnum::kShardMerge, + OpTime(), + kDefaultCloneFinishedRecipientOpTime, + boost::none, + &_oplogBuffer, + _executor, + _writerPool.get()); + } + + void tearDown() override { + _applier->shutdown(); + _applier->join(); + + _writerPool->shutdown(); + _writerPool->join(); + + _executor->shutdown(); + _executor->join(); + + _oplogBuffer.shutdown(_opCtx.get()); + + TenantMigrationAccessBlockerRegistry::get(getGlobalServiceContext()) + .removeAccessBlockersForMigration( + kMigrationUuid, TenantMigrationAccessBlocker::BlockerType::kRecipient); + } + + void assertNoOpMatches(const OplogEntry& op, const MutableOplogEntry& noOp) { + ASSERT_BSONOBJ_EQ(op.getEntry().toBSON(), *noOp.getObject2()); + ASSERT_EQ(op.getNss(), noOp.getNss()); + ASSERT_EQ(op.getUuid(), noOp.getUuid()); + ASSERT_EQ(kMigrationUuid, noOp.getFromTenantMigration()); + } + + void pushOps(const std::vector& ops) { + std::vector bsonOps; + for (const auto& op : ops) { + bsonOps.push_back(op.getEntry().toBSON()); + } + _oplogBuffer.push(nullptr, bsonOps.begin(), bsonOps.end()); + } + + StorageInterface* getStorageInterface() { + return StorageInterface::get(_opCtx->getServiceContext()); + } + +protected: + OplogBufferMock _oplogBuffer; + executor::NetworkInterfaceMock* _net; + std::shared_ptr _executor; + ServiceContext::UniqueOperationContext _opCtx; + TenantOplogApplierTestOpObserver* _opObserver; // Owned by service context opObserverRegistry + std::unique_ptr _writerPool; + std::shared_ptr _applier; + + +private: + unittest::MinimumLoggedSeverityGuard _replicationSeverityGuard{ + logv2::LogComponent::kReplication, logv2::LogSeverity::Debug(1)}; + unittest::MinimumLoggedSeverityGuard _tenantMigrationSeverityGuard{ + logv2::LogComponent::kTenantMigration, logv2::LogSeverity::Debug(1)}; +}; + +TEST_F(TenantOplogApplierMergeTest, NoOpsForSingleBatch) { + NamespaceString nss1 = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "foo"); + const auto& uuid1 = createCollectionWithUuid(_opCtx.get(), nss1); + NamespaceString nss2 = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "bar"); + const auto& uuid2 = createCollectionWithUuid(_opCtx.get(), nss2); + + std::vector srcOps; + srcOps.push_back(makeInsertOplogEntry(1, nss1, uuid1)); + srcOps.push_back(makeInsertOplogEntry(2, nss2, uuid2)); + pushOps(srcOps); + ASSERT_OK(_applier->startup()); + // Even if we wait for the first op in a batch, it is the last op we should be notified on. + auto lastBatchTimes = _applier->getNotificationForOpTime(srcOps.front().getOpTime()).get(); + ASSERT_EQ(srcOps.back().getOpTime(), lastBatchTimes.donorOpTime); + auto entries = _opObserver->getEntries(); + ASSERT_EQ(2, entries.size()); + assertNoOpMatches(srcOps[0], entries[0]); + assertNoOpMatches(srcOps[1], entries[1]); + ASSERT_EQ(srcOps.size(), _applier->getNumOpsApplied()); +} + +TEST_F(TenantOplogApplierMergeTest, NoOpsForMultipleBatches) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "foo"); + const auto& uuid = createCollectionWithUuid(_opCtx.get(), nss); + + std::vector srcOps; + srcOps.push_back(makeInsertOplogEntry(1, nss, uuid)); + srcOps.push_back(makeInsertOplogEntry(2, nss, uuid)); + srcOps.push_back(makeInsertOplogEntry(3, nss, uuid)); + srcOps.push_back(makeInsertOplogEntry(4, nss, uuid)); + + tenantApplierBatchSizeBytes.store(100 * 1024 /* bytes */); + tenantApplierBatchSizeOps.store(2 /* ops */); + + ASSERT_OK(_applier->startup()); + auto firstBatchFuture = _applier->getNotificationForOpTime(srcOps[0].getOpTime()); + auto secondBatchFuture = _applier->getNotificationForOpTime(srcOps[2].getOpTime()); + pushOps(srcOps); + // We should see the last batch optime for each batch in our notifications. + ASSERT_EQ(srcOps[1].getOpTime(), firstBatchFuture.get().donorOpTime); + ASSERT_EQ(srcOps[3].getOpTime(), secondBatchFuture.get().donorOpTime); + auto entries = _opObserver->getEntries(); + ASSERT_EQ(4, entries.size()); + assertNoOpMatches(srcOps[0], entries[0]); + assertNoOpMatches(srcOps[1], entries[1]); + assertNoOpMatches(srcOps[2], entries[2]); + assertNoOpMatches(srcOps[3], entries[3]); +} + +TEST_F(TenantOplogApplierMergeTest, NoOpsForLargeTransaction) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "bar"); + const auto& uuid = createCollectionWithUuid(_opCtx.get(), nss); + + std::vector innerOps1; + innerOps1.push_back(makeInsertOplogEntry(11, nss, uuid)); + innerOps1.push_back(makeInsertOplogEntry(12, nss, uuid)); + + std::vector innerOps2; + innerOps2.push_back(makeInsertOplogEntry(21, nss, uuid)); + innerOps2.push_back(makeInsertOplogEntry(22, nss, uuid)); + + std::vector innerOps3; + innerOps3.push_back(makeInsertOplogEntry(31, nss, uuid)); + innerOps3.push_back(makeInsertOplogEntry(32, nss, uuid)); + + // Makes entries with ts from range [2, 5). + std::vector srcOps = makeMultiEntryTransactionOplogEntries( + 2, kTenantDB, /* prepared */ false, {innerOps1, innerOps2, innerOps3}); + + pushOps(srcOps); + ASSERT_OK(_applier->startup()); + // The first two ops should come in the first batch. + auto firstBatchFuture = _applier->getNotificationForOpTime(srcOps[0].getOpTime()); + ASSERT_EQ(srcOps[1].getOpTime(), firstBatchFuture.get().donorOpTime); + // The last op is in a batch by itself. + auto secondBatchFuture = _applier->getNotificationForOpTime(srcOps[2].getOpTime()); + ASSERT_EQ(srcOps[2].getOpTime(), secondBatchFuture.get().donorOpTime); + auto entries = _opObserver->getEntries(); + ASSERT_EQ(srcOps.size(), entries.size()); + for (size_t i = 0; i < srcOps.size(); i++) { + assertNoOpMatches(srcOps[i], entries[i]); + } +} + +TEST_F(TenantOplogApplierMergeTest, ApplyInsert_Success) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "bar"); + auto uuid = createCollectionWithUuid(_opCtx.get(), nss); + auto entry = makeInsertOplogEntry(1, nss, uuid); + bool onInsertsCalled = false; + _opObserver->onInsertsFn = + [&](OperationContext* opCtx, const NamespaceString& nss, const std::vector& docs) { + ASSERT_FALSE(onInsertsCalled); + onInsertsCalled = true; + // TODO Check that (nss.dbName() == kTenantDB) once the OplogEntry deserializer passes + // "tid" to the NamespaceString constructor + ASSERT_EQUALS(nss.dbName().db(), kTenantDB.toStringWithTenantId_forTest()); + ASSERT_EQUALS(nss.coll(), "bar"); + ASSERT_EQUALS(1, docs.size()); + ASSERT_BSONOBJ_EQ(docs[0], entry.getObject()); + }; + pushOps({entry}); + ASSERT_OK(_applier->startup()); + auto opAppliedFuture = _applier->getNotificationForOpTime(entry.getOpTime()); + ASSERT_OK(opAppliedFuture.getNoThrow().getStatus()); + ASSERT_TRUE(onInsertsCalled); +} + +TEST_F(TenantOplogApplierMergeTest, ApplyInserts_Grouped) { + // TODO(SERVER-50256): remove nss_workaround, which is used to work around a bug where + // the first operation assigned to a worker cannot be grouped. + NamespaceString nss_workaround = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "a"); + NamespaceString nss1 = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "bar"); + NamespaceString nss2 = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "baz"); + auto uuid1 = createCollectionWithUuid(_opCtx.get(), nss1); + auto uuid2 = createCollectionWithUuid(_opCtx.get(), nss2); + auto uuid_workaround = createCollectionWithUuid(_opCtx.get(), nss_workaround); + + std::vector entries; + bool onInsertsCalledNss1 = false; + bool onInsertsCalledNss2 = false; + // Despite the odd one in the middle, all the others should be grouped into a single insert. + entries.push_back(makeInsertOplogEntry(1, nss1, uuid1)); + entries.push_back(makeInsertOplogEntry(2, nss1, uuid1)); + entries.push_back(makeInsertOplogEntry(3, nss1, uuid1)); + entries.push_back(makeInsertOplogEntry(4, nss2, uuid2)); + entries.push_back(makeInsertOplogEntry(5, nss1, uuid1)); + entries.push_back(makeInsertOplogEntry(6, nss1, uuid1)); + entries.push_back(makeInsertOplogEntry(7, nss1, uuid1)); + entries.push_back(makeInsertOplogEntry(8, nss_workaround, uuid_workaround)); + _opObserver->onInsertsFn = + [&](OperationContext* opCtx, const NamespaceString& nss, const std::vector& docs) { + if (nss == nss1) { + ASSERT_FALSE(onInsertsCalledNss1); + onInsertsCalledNss1 = true; + ASSERT_EQUALS(6, docs.size()); + for (int i = 0; i < 3; i++) { + ASSERT_BSONOBJ_EQ(docs[i], entries[i].getObject()); + } + for (int i = 3; i < 6; i++) { + ASSERT_BSONOBJ_EQ(docs[i], entries[i + 1].getObject()); + } + } else if (nss == nss2) { + ASSERT_FALSE(onInsertsCalledNss2); + onInsertsCalledNss2 = true; + ASSERT_EQUALS(1, docs.size()); + ASSERT_BSONOBJ_EQ(docs[0], entries[3].getObject()); + } + }; + pushOps(entries); + + ASSERT_OK(_applier->startup()); + auto opAppliedFuture = _applier->getNotificationForOpTime(entries.back().getOpTime()); + ASSERT_OK(opAppliedFuture.getNoThrow().getStatus()); + ASSERT_TRUE(onInsertsCalledNss1); + ASSERT_TRUE(onInsertsCalledNss2); +} + +TEST_F(TenantOplogApplierMergeTest, ApplyUpdate_Success) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "bar"); + auto uuid = createCollectionWithUuid(_opCtx.get(), nss); + ASSERT_OK(getStorageInterface()->insertDocument(_opCtx.get(), nss, {BSON("_id" << 0)}, 0)); + auto entry = makeOplogEntry(repl::OpTypeEnum::kUpdate, + nss, + uuid, + update_oplog_entry::makeDeltaOplogEntry( + BSON(doc_diff::kUpdateSectionFieldName << fromjson("{a: 1}"))), + BSON("_id" << 0)); + bool onUpdateCalled = false; + _opObserver->onUpdateFn = [&](OperationContext* opCtx, const OplogUpdateEntryArgs& args) { + onUpdateCalled = true; + ASSERT_EQUALS(nss, args.coll->ns()); + ASSERT_EQUALS(uuid, args.coll->uuid()); + }; + pushOps({entry}); + ASSERT_OK(_applier->startup()); + auto opAppliedFuture = _applier->getNotificationForOpTime(entry.getOpTime()); + ASSERT_OK(opAppliedFuture.getNoThrow().getStatus()); + ASSERT_TRUE(onUpdateCalled); +} + +TEST_F(TenantOplogApplierMergeTest, ApplyDelete_DatabaseMissing) { + auto entry = makeOplogEntry(OpTypeEnum::kDelete, + NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "bar"), + UUID::gen()); + bool onDeleteCalled = false; + _opObserver->onDeleteFn = + [&](OperationContext* opCtx, const CollectionPtr&, StmtId, const OplogDeleteEntryArgs&) { + onDeleteCalled = true; + }; + pushOps({entry}); + ASSERT_OK(_applier->startup()); + auto opAppliedFuture = _applier->getNotificationForOpTime(entry.getOpTime()); + ASSERT_OK(opAppliedFuture.getNoThrow().getStatus()); + // Since no database was available, the delete shouldn't actually happen. + ASSERT_FALSE(onDeleteCalled); +} + +TEST_F(TenantOplogApplierMergeTest, ApplyDelete_CollectionMissing) { + createDatabase(_opCtx.get(), kTenantDB.toString_forTest()); + auto entry = makeOplogEntry(OpTypeEnum::kDelete, + NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "bar"), + UUID::gen()); + bool onDeleteCalled = false; + _opObserver->onDeleteFn = + [&](OperationContext* opCtx, const CollectionPtr&, StmtId, const OplogDeleteEntryArgs&) { + onDeleteCalled = true; + }; + pushOps({entry}); + ASSERT_OK(_applier->startup()); + auto opAppliedFuture = _applier->getNotificationForOpTime(entry.getOpTime()); + ASSERT_OK(opAppliedFuture.getNoThrow().getStatus()); + // Since no collection was available, the delete shouldn't actually happen. + ASSERT_FALSE(onDeleteCalled); +} + +TEST_F(TenantOplogApplierMergeTest, ApplyDelete_Success) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "bar"); + auto uuid = createCollectionWithUuid(_opCtx.get(), nss); + ASSERT_OK(getStorageInterface()->insertDocument(_opCtx.get(), nss, {BSON("_id" << 0)}, 0)); + auto entry = makeOplogEntry(OpTypeEnum::kDelete, nss, uuid, BSON("_id" << 0)); + bool onDeleteCalled = false; + _opObserver->onDeleteFn = [&](OperationContext* opCtx, + const CollectionPtr& coll, + StmtId, + const OplogDeleteEntryArgs& args) { + onDeleteCalled = true; + ASSERT_TRUE(opCtx); + ASSERT_TRUE(opCtx->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); + ASSERT_TRUE(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX)); + ASSERT_TRUE(opCtx->writesAreReplicated()); + ASSERT_FALSE(args.fromMigrate); + ASSERT_EQUALS(nss.dbName().db(), kTenantDB.toStringWithTenantId_forTest()); + ASSERT_EQUALS(nss.coll(), "bar"); + ASSERT_EQUALS(uuid, coll->uuid()); + }; + pushOps({entry}); + ASSERT_OK(_applier->startup()); + auto opAppliedFuture = _applier->getNotificationForOpTime(entry.getOpTime()); + ASSERT_OK(opAppliedFuture.getNoThrow().getStatus()); + ASSERT_TRUE(onDeleteCalled); +} + +TEST_F(TenantOplogApplierMergeTest, ApplyCreateCollCommand_Success) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "t"); + auto op = + BSON("op" + << "c" + << "ns" << nss.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" + << BSON("create" << nss.coll()) << "ts" << Timestamp(1, 1) << "ui" << UUID::gen()); + bool applyCmdCalled = false; + _opObserver->onCreateCollectionFn = [&](OperationContext* opCtx, + const CollectionPtr&, + const NamespaceString& collNss, + const CollectionOptions&, + const BSONObj&) { + applyCmdCalled = true; + ASSERT_TRUE(opCtx); + ASSERT_TRUE(opCtx->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); + ASSERT_TRUE(opCtx->writesAreReplicated()); + ASSERT_EQUALS(nss, collNss); + }; + auto entry = OplogEntry(op); + pushOps({entry}); + ASSERT_OK(_applier->startup()); + auto opAppliedFuture = _applier->getNotificationForOpTime(entry.getOpTime()); + ASSERT_OK(opAppliedFuture.getNoThrow().getStatus()); + ASSERT_TRUE(applyCmdCalled); +} + +TEST_F(TenantOplogApplierMergeTest, ApplyCreateCollCommand_WrongNSS) { + // Should not be able to apply a command in the wrong namespace. + auto invalidTenantDB = DatabaseName::createDatabaseName_forTest(TenantId(OID::gen()), "test"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + invalidTenantDB.toStringWithTenantId_forTest(), "bar"); + auto op = + BSON("op" + << "c" + << "ns" << nss.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" + << BSON("create" << nss.coll()) << "ts" << Timestamp(1, 1) << "ui" << UUID::gen()); + bool applyCmdCalled = false; + _opObserver->onCreateCollectionFn = [&](OperationContext* opCtx, + const CollectionPtr&, + const NamespaceString& collNss, + const CollectionOptions&, + const BSONObj&) { + applyCmdCalled = true; + }; + auto entry = OplogEntry(op); + pushOps({entry}); + ASSERT_OK(_applier->startup()); + auto opAppliedFuture = _applier->getNotificationForOpTime(entry.getOpTime()); + auto error = opAppliedFuture.getNoThrow().getStatus(); + + ASSERT_EQ(error, ErrorCodes::InvalidTenantId); + ASSERT_STRING_CONTAINS(error.reason(), "does not belong to a tenant being migrated"); + ASSERT_FALSE(applyCmdCalled); +} + +TEST_F(TenantOplogApplierMergeTest, ApplyCreateIndexesCommand_Success) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "t"); + auto uuid = createCollectionWithUuid(_opCtx.get(), nss); + auto op = + BSON("op" + << "c" + << "ns" << nss.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" + << BSON("createIndexes" << nss.coll() << "v" << 2 << "key" << BSON("a" << 1) << "name" + << "a_1") + << "ts" << Timestamp(1, 1) << "ui" << uuid); + bool applyCmdCalled = false; + _opObserver->onCreateIndexFn = [&](OperationContext* opCtx, + const NamespaceString& collNss, + const UUID& collUuid, + BSONObj indexDoc, + bool fromMigrate) { + ASSERT_FALSE(applyCmdCalled); + applyCmdCalled = true; + ASSERT_TRUE(opCtx->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); + ASSERT_TRUE(opCtx->writesAreReplicated()); + ASSERT_BSONOBJ_EQ(indexDoc, + BSON("v" << 2 << "key" << BSON("a" << 1) << "name" + << "a_1")); + ASSERT_EQUALS(nss, collNss); + ASSERT_EQUALS(uuid, collUuid); + }; + auto entry = OplogEntry(op); + pushOps({entry}); + ASSERT_OK(_applier->startup()); + auto opAppliedFuture = _applier->getNotificationForOpTime(entry.getOpTime()); + ASSERT_OK(opAppliedFuture.getNoThrow().getStatus()); + ASSERT_TRUE(applyCmdCalled); +} + +TEST_F(TenantOplogApplierMergeTest, ApplyStartIndexBuildCommand_Failure) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "t"); + auto uuid = createCollectionWithUuid(_opCtx.get(), nss); + auto op = BSON("op" + << "c" + << "ns" << nss.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" + << BSON("startIndexBuild" << nss.coll() << "v" << 2 << "key" << BSON("a" << 1) + << "name" + << "a_1") + << "ts" << Timestamp(1, 1) << "ui" << uuid); + auto entry = OplogEntry(op); + pushOps({entry}); + ASSERT_OK(_applier->startup()); + auto opAppliedFuture = _applier->getNotificationForOpTime(entry.getOpTime()); + ASSERT_EQUALS(opAppliedFuture.getNoThrow().getStatus().code(), 5434700); +} + +TEST_F(TenantOplogApplierMergeTest, ApplyCRUD_WrongNSS) { + auto invalidTenantDB = DatabaseName::createDatabaseName_forTest(TenantId(OID::gen()), "test"); + + // Should not be able to apply a CRUD operation to a namespace not belonging to us. + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + invalidTenantDB.toStringWithTenantId_forTest(), "bar"); + auto uuid = createCollectionWithUuid(_opCtx.get(), nss); + auto entry = makeInsertOplogEntry(1, nss, uuid); + bool onInsertsCalled = false; + _opObserver->onInsertsFn = + [&](OperationContext* opCtx, const NamespaceString& nss, const std::vector& docs) { + onInsertsCalled = true; + }; + pushOps({entry}); + ASSERT_OK(_applier->startup()); + auto opAppliedFuture = _applier->getNotificationForOpTime(entry.getOpTime()); + auto error = opAppliedFuture.getNoThrow().getStatus(); + + ASSERT_EQ(error, ErrorCodes::InvalidTenantId); + ASSERT_STRING_CONTAINS(error.reason(), "does not belong to a tenant being migrated"); + ASSERT_FALSE(onInsertsCalled); +} + +TEST_F(TenantOplogApplierMergeTest, ApplyCRUD_WrongUUID) { + // Should not be able to apply a CRUD operation to a namespace not belonging to us, even if + // we claim it does in the nss field. + auto notMyTenantDB = DatabaseName::createDatabaseName_forTest(TenantId(OID::gen()), "test"); + NamespaceString notMyTenantNss = NamespaceString::createNamespaceString_forTest( + notMyTenantDB.toStringWithTenantId_forTest(), "bar"); + + NamespaceString nss_to_apply = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "bar"); + auto uuid = createCollectionWithUuid(_opCtx.get(), notMyTenantNss); + auto entry = makeInsertOplogEntry(1, nss_to_apply, uuid); + bool onInsertsCalled = false; + _opObserver->onInsertsFn = + [&](OperationContext* opCtx, const NamespaceString& nss, const std::vector& docs) { + onInsertsCalled = true; + }; + pushOps({entry}); + ASSERT_OK(_applier->startup()); + auto opAppliedFuture = _applier->getNotificationForOpTime(entry.getOpTime()); + auto error = opAppliedFuture.getNoThrow().getStatus(); + + ASSERT_EQ(error, ErrorCodes::NamespaceNotFound); + ASSERT_STRING_CONTAINS(error.reason(), "Database name mismatch for"); + ASSERT_FALSE(onInsertsCalled); +} + +TEST_F(TenantOplogApplierMergeTest, ApplyNoop_Success) { + std::vector srcOps; + srcOps.push_back(makeNoopOplogEntry(1, "foo")); + pushOps(srcOps); + ASSERT_OK(_applier->startup()); + auto opAppliedFuture = _applier->getNotificationForOpTime(srcOps[0].getOpTime()); + auto futureRes = opAppliedFuture.getNoThrow(); + + auto entries = _opObserver->getEntries(); + ASSERT_EQ(1, entries.size()); + + ASSERT_OK(futureRes.getStatus()); + ASSERT_EQUALS(futureRes.getValue().donorOpTime, srcOps[0].getOpTime()); + ASSERT_EQUALS(futureRes.getValue().recipientOpTime, entries[0].getOpTime()); +} + +TEST_F(TenantOplogApplierMergeTest, ApplyInsert_MultiKeyIndex) { + createCollectionWithUuid(_opCtx.get(), NamespaceString::kSessionTransactionsTableNamespace); + NamespaceString indexedNss = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "indexedColl"); + NamespaceString nonIndexedNss = NamespaceString::createNamespaceString_forTest( + kTenantDB.toStringWithTenantId_forTest(), "nonIndexedColl"); + auto indexedCollUUID = createCollectionWithUuid(_opCtx.get(), indexedNss); + createCollection(_opCtx.get(), nonIndexedNss, CollectionOptions()); + + // Create index on the collection. + auto indexKey = BSON("val" << 1); + auto spec = BSON("v" << int(IndexDescriptor::kLatestIndexVersion) << "key" << indexKey << "name" + << "val_1"); + createIndex(_opCtx.get(), indexedNss, indexedCollUUID, spec); + + const BSONObj multiKeyDoc = BSON("_id" << 1 << "val" << BSON_ARRAY(1 << 2)); + const BSONObj singleKeyDoc = BSON("_id" << 2 << "val" << 1); + + auto indexedOp = + makeInsertDocumentOplogEntry(OpTime(Timestamp(1, 1), 1LL), indexedNss, multiKeyDoc); + auto unindexedOp = + makeInsertDocumentOplogEntry(OpTime(Timestamp(2, 1), 1LL), nonIndexedNss, singleKeyDoc); + + pushOps({indexedOp, unindexedOp}); + + ASSERT_OK(_applier->startup()); + + auto opAppliedFuture = _applier->getNotificationForOpTime(unindexedOp.getOpTime()); + ASSERT_OK(opAppliedFuture.getNoThrow().getStatus()); + + ASSERT_TRUE(docExists(_opCtx.get(), indexedNss, multiKeyDoc)); + ASSERT_TRUE(docExists(_opCtx.get(), nonIndexedNss, singleKeyDoc)); +} + +} // namespace repl +} // namespace mongo diff --git a/src/mongo/db/repl/tenant_oplog_applier_test.cpp b/src/mongo/db/repl/tenant_oplog_applier_test.cpp index 78c67b21be041..6e0fd57aeb70b 100644 --- a/src/mongo/db/repl/tenant_oplog_applier_test.cpp +++ b/src/mongo/db/repl/tenant_oplog_applier_test.cpp @@ -27,36 +27,67 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include +#include +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog_applier_impl_test_fixture.h" #include "mongo/db/repl/oplog_batcher_test_fixture.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/oplog_entry_test_helpers.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" -#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_decoration.h" -#include "mongo/db/repl/tenant_migration_recipient_access_blocker.h" #include "mongo/db/repl/tenant_migration_recipient_service.h" #include "mongo/db/repl/tenant_oplog_applier.h" -#include "mongo/db/repl/tenant_oplog_batcher.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/db/service_context_test_fixture.h" #include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/tenant_id.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" +#include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/task_executor_test_fixture.h" +#include "mongo/executor/thread_pool_mock.h" +#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" -#include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -67,6 +98,10 @@ using executor::ThreadPoolExecutorTest; namespace repl { +namespace { +const OpTime kDefaultCloneFinishedRecipientOpTime(Timestamp(1, 1), 1); +} // namespace + class TenantOplogApplierTestOpObserver : public OplogApplierImplOpObserver { public: void onInternalOpMessage(OperationContext* opCtx, @@ -197,7 +232,8 @@ class TenantOplogApplierTest : public ServiceContextMongoDTest { executor::NetworkInterfaceMock* _net; std::shared_ptr _executor; std::string _tenantId = OID::gen().toString(); - DatabaseName _dbName = DatabaseName(TenantId(OID(_tenantId)), "test"); + DatabaseName _dbName = + DatabaseName::createDatabaseName_forTest(TenantId(OID(_tenantId)), "test"); UUID _migrationUuid = UUID::gen(); ServiceContext::UniqueOperationContext _opCtx; TenantOplogApplierTestOpObserver* _opObserver; // Owned by service context opObserverRegistry @@ -211,14 +247,14 @@ class TenantOplogApplierTest : public ServiceContextMongoDTest { TEST_F(TenantOplogApplierTest, NoOpsForSingleBatch) { std::vector srcOps; - srcOps.push_back(makeInsertOplogEntry( - 1, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "foo"), - UUID::gen())); - srcOps.push_back(makeInsertOplogEntry( - 2, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"), - UUID::gen())); + srcOps.push_back(makeInsertOplogEntry(1, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "foo"), + UUID::gen())); + srcOps.push_back(makeInsertOplogEntry(2, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"), + UUID::gen())); pushOps(srcOps); auto writerPool = makeTenantMigrationWriterPool(); @@ -226,8 +262,9 @@ TEST_F(TenantOplogApplierTest, NoOpsForSingleBatch) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -249,10 +286,10 @@ TEST_F(TenantOplogApplierTest, NoOpsForLargeBatch) { std::vector srcOps; // This should be big enough to use several threads to do the writing for (int i = 0; i < 64; i++) { - srcOps.push_back(makeInsertOplogEntry( - i + 1, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "foo"), - UUID::gen())); + srcOps.push_back(makeInsertOplogEntry(i + 1, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "foo"), + UUID::gen())); } pushOps(srcOps); @@ -261,8 +298,9 @@ TEST_F(TenantOplogApplierTest, NoOpsForLargeBatch) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -283,30 +321,31 @@ TEST_F(TenantOplogApplierTest, NoOpsForLargeBatch) { TEST_F(TenantOplogApplierTest, NoOpsForMultipleBatches) { std::vector srcOps; - srcOps.push_back(makeInsertOplogEntry( - 1, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "foo"), - UUID::gen())); - srcOps.push_back(makeInsertOplogEntry( - 2, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"), - UUID::gen())); - srcOps.push_back(makeInsertOplogEntry( - 3, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "baz"), - UUID::gen())); - srcOps.push_back(makeInsertOplogEntry( - 4, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bif"), - UUID::gen())); + srcOps.push_back(makeInsertOplogEntry(1, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "foo"), + UUID::gen())); + srcOps.push_back(makeInsertOplogEntry(2, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"), + UUID::gen())); + srcOps.push_back(makeInsertOplogEntry(3, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "baz"), + UUID::gen())); + srcOps.push_back(makeInsertOplogEntry(4, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bif"), + UUID::gen())); auto writerPool = makeTenantMigrationWriterPool(); auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -334,36 +373,36 @@ TEST_F(TenantOplogApplierTest, NoOpsForMultipleBatches) { TEST_F(TenantOplogApplierTest, NoOpsForLargeTransaction) { std::vector innerOps1; - innerOps1.push_back(makeInsertOplogEntry( - 11, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"), - UUID::gen())); - innerOps1.push_back(makeInsertOplogEntry( - 12, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"), - UUID::gen())); + innerOps1.push_back(makeInsertOplogEntry(11, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"), + UUID::gen())); + innerOps1.push_back(makeInsertOplogEntry(12, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"), + UUID::gen())); std::vector innerOps2; - innerOps2.push_back(makeInsertOplogEntry( - 21, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"), - UUID::gen())); - innerOps2.push_back(makeInsertOplogEntry( - 22, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"), - UUID::gen())); + innerOps2.push_back(makeInsertOplogEntry(21, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"), + UUID::gen())); + innerOps2.push_back(makeInsertOplogEntry(22, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"), + UUID::gen())); std::vector innerOps3; - innerOps3.push_back(makeInsertOplogEntry( - 31, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"), - UUID::gen())); - innerOps3.push_back(makeInsertOplogEntry( - 32, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"), - UUID::gen())); + innerOps3.push_back(makeInsertOplogEntry(31, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"), + UUID::gen())); + innerOps3.push_back(makeInsertOplogEntry(32, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"), + UUID::gen())); // Makes entries with ts from range [2, 5). std::vector srcOps = makeMultiEntryTransactionOplogEntries( - 2, _dbName.db(), /* prepared */ false, {innerOps1, innerOps2, innerOps3}); + 2, _dbName, /* prepared */ false, {innerOps1, innerOps2, innerOps3}); pushOps(srcOps); auto writerPool = makeTenantMigrationWriterPool(); @@ -371,8 +410,9 @@ TEST_F(TenantOplogApplierTest, NoOpsForLargeTransaction) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -400,8 +440,8 @@ TEST_F(TenantOplogApplierTest, CommitUnpreparedTransaction_DataPartiallyApplied) client.createIndexes(NamespaceString::kSessionTransactionsTableNamespace, {MongoDSessionCatalog::getConfigTxnPartialIndexSpec()}); } - NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss); auto lsid = makeLogicalSessionId(_opCtx.get()); TxnNumber txnNum(0); @@ -441,8 +481,9 @@ TEST_F(TenantOplogApplierTest, CommitUnpreparedTransaction_DataPartiallyApplied) auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -459,10 +500,10 @@ TEST_F(TenantOplogApplierTest, CommitUnpreparedTransaction_DataPartiallyApplied) } TEST_F(TenantOplogApplierTest, ApplyInsert_DatabaseMissing) { - auto entry = makeInsertOplogEntry( - 1, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"), - UUID::gen()); + auto entry = makeInsertOplogEntry(1, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"), + UUID::gen()); bool onInsertsCalled = false; _opObserver->onInsertsFn = [&](OperationContext* opCtx, const NamespaceString&, const std::vector&) { @@ -474,8 +515,9 @@ TEST_F(TenantOplogApplierTest, ApplyInsert_DatabaseMissing) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -490,11 +532,11 @@ TEST_F(TenantOplogApplierTest, ApplyInsert_DatabaseMissing) { } TEST_F(TenantOplogApplierTest, ApplyInsert_CollectionMissing) { - createDatabase(_opCtx.get(), _dbName.toString()); - auto entry = makeInsertOplogEntry( - 1, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"), - UUID::gen()); + createDatabase(_opCtx.get(), _dbName.toString_forTest()); + auto entry = makeInsertOplogEntry(1, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"), + UUID::gen()); bool onInsertsCalled = false; _opObserver->onInsertsFn = [&](OperationContext* opCtx, const NamespaceString&, const std::vector&) { @@ -506,8 +548,9 @@ TEST_F(TenantOplogApplierTest, ApplyInsert_CollectionMissing) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -522,8 +565,8 @@ TEST_F(TenantOplogApplierTest, ApplyInsert_CollectionMissing) { } TEST_F(TenantOplogApplierTest, ApplyInsert_InsertExisting) { - NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss); ASSERT_OK(getStorageInterface()->insertDocument(_opCtx.get(), nss, @@ -545,8 +588,9 @@ TEST_F(TenantOplogApplierTest, ApplyInsert_InsertExisting) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -562,8 +606,8 @@ TEST_F(TenantOplogApplierTest, ApplyInsert_InsertExisting) { } TEST_F(TenantOplogApplierTest, ApplyInsert_UniqueKey_InsertExisting) { - NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss); // Create unique key index on the collection. @@ -589,8 +633,9 @@ TEST_F(TenantOplogApplierTest, ApplyInsert_UniqueKey_InsertExisting) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -605,8 +650,8 @@ TEST_F(TenantOplogApplierTest, ApplyInsert_UniqueKey_InsertExisting) { } TEST_F(TenantOplogApplierTest, ApplyInsert_Success) { - NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss); auto entry = makeInsertOplogEntry(1, nss, uuid); bool onInsertsCalled = false; @@ -616,7 +661,7 @@ TEST_F(TenantOplogApplierTest, ApplyInsert_Success) { onInsertsCalled = true; // TODO Check that (nss.dbName() == _dbName) once the OplogEntry deserializer passes // "tid" to the NamespaceString constructor - ASSERT_EQUALS(nss.dbName().db(), _dbName.toStringWithTenantId()); + ASSERT_EQUALS(nss.dbName().toString_forTest(), _dbName.toStringWithTenantId_forTest()); ASSERT_EQUALS(nss.coll(), "bar"); ASSERT_EQUALS(1, docs.size()); ASSERT_BSONOBJ_EQ(docs[0], entry.getObject()); @@ -627,8 +672,9 @@ TEST_F(TenantOplogApplierTest, ApplyInsert_Success) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -644,11 +690,12 @@ TEST_F(TenantOplogApplierTest, ApplyInsert_Success) { TEST_F(TenantOplogApplierTest, ApplyInserts_Grouped) { // TODO(SERVER-50256): remove nss_workaround, which is used to work around a bug where // the first operation assigned to a worker cannot be grouped. - NamespaceString nss_workaround(_dbName.toStringWithTenantId(), "a"); - NamespaceString nss1 = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); - NamespaceString nss2 = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "baz"); + NamespaceString nss_workaround = + NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId_forTest(), "a"); + NamespaceString nss1 = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); + NamespaceString nss2 = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "baz"); auto uuid1 = createCollectionWithUuid(_opCtx.get(), nss1); auto uuid2 = createCollectionWithUuid(_opCtx.get(), nss2); std::vector entries; @@ -690,8 +737,9 @@ TEST_F(TenantOplogApplierTest, ApplyInserts_Grouped) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -706,8 +754,8 @@ TEST_F(TenantOplogApplierTest, ApplyInserts_Grouped) { } TEST_F(TenantOplogApplierTest, ApplyUpdate_MissingDocument) { - NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss); auto entry = makeOplogEntry(repl::OpTypeEnum::kUpdate, nss, @@ -730,8 +778,9 @@ TEST_F(TenantOplogApplierTest, ApplyUpdate_MissingDocument) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -747,8 +796,8 @@ TEST_F(TenantOplogApplierTest, ApplyUpdate_MissingDocument) { } TEST_F(TenantOplogApplierTest, ApplyUpdate_Success) { - NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss); ASSERT_OK(getStorageInterface()->insertDocument(_opCtx.get(), nss, {BSON("_id" << 0)}, 0)); auto entry = makeOplogEntry(repl::OpTypeEnum::kUpdate, @@ -769,8 +818,9 @@ TEST_F(TenantOplogApplierTest, ApplyUpdate_Success) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -784,10 +834,10 @@ TEST_F(TenantOplogApplierTest, ApplyUpdate_Success) { } TEST_F(TenantOplogApplierTest, ApplyDelete_DatabaseMissing) { - auto entry = makeOplogEntry( - OpTypeEnum::kDelete, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"), - UUID::gen()); + auto entry = makeOplogEntry(OpTypeEnum::kDelete, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"), + UUID::gen()); bool onDeleteCalled = false; _opObserver->onDeleteFn = [&](OperationContext* opCtx, const CollectionPtr&, StmtId, const OplogDeleteEntryArgs&) { @@ -799,8 +849,9 @@ TEST_F(TenantOplogApplierTest, ApplyDelete_DatabaseMissing) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -815,11 +866,11 @@ TEST_F(TenantOplogApplierTest, ApplyDelete_DatabaseMissing) { } TEST_F(TenantOplogApplierTest, ApplyDelete_CollectionMissing) { - createDatabase(_opCtx.get(), _dbName.toString()); - auto entry = makeOplogEntry( - OpTypeEnum::kDelete, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"), - UUID::gen()); + createDatabase(_opCtx.get(), _dbName.toString_forTest()); + auto entry = makeOplogEntry(OpTypeEnum::kDelete, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"), + UUID::gen()); bool onDeleteCalled = false; _opObserver->onDeleteFn = [&](OperationContext* opCtx, const CollectionPtr&, StmtId, const OplogDeleteEntryArgs&) { @@ -831,8 +882,9 @@ TEST_F(TenantOplogApplierTest, ApplyDelete_CollectionMissing) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -847,8 +899,8 @@ TEST_F(TenantOplogApplierTest, ApplyDelete_CollectionMissing) { } TEST_F(TenantOplogApplierTest, ApplyDelete_DocumentMissing) { - NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss); auto entry = makeOplogEntry(OpTypeEnum::kDelete, nss, uuid, BSON("_id" << 0)); bool onDeleteCalled = false; @@ -862,8 +914,9 @@ TEST_F(TenantOplogApplierTest, ApplyDelete_DocumentMissing) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -878,8 +931,8 @@ TEST_F(TenantOplogApplierTest, ApplyDelete_DocumentMissing) { } TEST_F(TenantOplogApplierTest, ApplyDelete_Success) { - NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss); ASSERT_OK(getStorageInterface()->insertDocument(_opCtx.get(), nss, {BSON("_id" << 0)}, 0)); auto entry = makeOplogEntry(OpTypeEnum::kDelete, nss, uuid, BSON("_id" << 0)); @@ -894,7 +947,7 @@ TEST_F(TenantOplogApplierTest, ApplyDelete_Success) { ASSERT_TRUE(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX)); ASSERT_TRUE(opCtx->writesAreReplicated()); ASSERT_FALSE(args.fromMigrate); - ASSERT_EQUALS(nss.dbName().db(), _dbName.toStringWithTenantId()); + ASSERT_EQUALS(nss.dbName().toString_forTest(), _dbName.toStringWithTenantId_forTest()); ASSERT_EQUALS(nss.coll(), "bar"); ASSERT_EQUALS(uuid, coll->uuid()); }; @@ -904,8 +957,9 @@ TEST_F(TenantOplogApplierTest, ApplyDelete_Success) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -919,12 +973,12 @@ TEST_F(TenantOplogApplierTest, ApplyDelete_Success) { } TEST_F(TenantOplogApplierTest, ApplyCreateCollCommand_CollExisting) { - NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss); auto op = BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() << "wall" << Date_t() << "o" + << "ns" << nss.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" << BSON("create" << nss.coll()) << "ts" << Timestamp(1, 1) << "ui" << uuid); bool applyCmdCalled = false; _opObserver->onCreateCollectionFn = [&](OperationContext* opCtx, @@ -941,8 +995,9 @@ TEST_F(TenantOplogApplierTest, ApplyCreateCollCommand_CollExisting) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -957,17 +1012,17 @@ TEST_F(TenantOplogApplierTest, ApplyCreateCollCommand_CollExisting) { } TEST_F(TenantOplogApplierTest, ApplyRenameCollCommand_CollExisting) { - NamespaceString nss1 = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "foo"); - NamespaceString nss2 = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); + NamespaceString nss1 = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "foo"); + NamespaceString nss2 = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss2); - auto op = - BSON("op" - << "c" - << "ns" << nss1.getCommandNS().ns() << "wall" << Date_t() << "o" - << BSON("renameCollection" << nss1.ns() << "to" << nss2.ns() << "stayTemp" << false) - << "ts" << Timestamp(1, 1) << "ui" << uuid); + auto op = BSON("op" + << "c" + << "ns" << nss1.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" + << BSON("renameCollection" << nss1.ns_forTest() << "to" << nss2.ns_forTest() + << "stayTemp" << false) + << "ts" << Timestamp(1, 1) << "ui" << uuid); bool applyCmdCalled = false; _opObserver->onRenameCollectionFn = [&](OperationContext* opCtx, const NamespaceString& fromColl, @@ -975,7 +1030,8 @@ TEST_F(TenantOplogApplierTest, ApplyRenameCollCommand_CollExisting) { const boost::optional& uuid, const boost::optional& dropTargetUUID, std::uint64_t numRecords, - bool stayTemp) { + bool stayTemp, + bool markFromMigrate) { applyCmdCalled = true; }; auto entry = OplogEntry(op); @@ -985,8 +1041,9 @@ TEST_F(TenantOplogApplierTest, ApplyRenameCollCommand_CollExisting) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1002,11 +1059,11 @@ TEST_F(TenantOplogApplierTest, ApplyRenameCollCommand_CollExisting) { TEST_F(TenantOplogApplierTest, ApplyCreateCollCommand_Success) { NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "t"); + NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId_forTest(), "t"); auto op = BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() << "wall" << Date_t() << "o" + << "ns" << nss.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" << BSON("create" << nss.coll()) << "ts" << Timestamp(1, 1) << "ui" << UUID::gen()); bool applyCmdCalled = false; _opObserver->onCreateCollectionFn = [&](OperationContext* opCtx, @@ -1027,8 +1084,9 @@ TEST_F(TenantOplogApplierTest, ApplyCreateCollCommand_Success) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1043,12 +1101,12 @@ TEST_F(TenantOplogApplierTest, ApplyCreateCollCommand_Success) { TEST_F(TenantOplogApplierTest, ApplyCreateIndexesCommand_Success) { NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "t"); + NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId_forTest(), "t"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss); auto op = BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() << "wall" << Date_t() << "o" + << "ns" << nss.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" << BSON("createIndexes" << nss.coll() << "v" << 2 << "key" << BSON("a" << 1) << "name" << "a_1") << "ts" << Timestamp(1, 1) << "ui" << uuid); @@ -1075,8 +1133,9 @@ TEST_F(TenantOplogApplierTest, ApplyCreateIndexesCommand_Success) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1091,11 +1150,11 @@ TEST_F(TenantOplogApplierTest, ApplyCreateIndexesCommand_Success) { TEST_F(TenantOplogApplierTest, ApplyStartIndexBuildCommand_Failure) { NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "t"); + NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId_forTest(), "t"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss); auto op = BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() << "wall" << Date_t() << "o" + << "ns" << nss.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" << BSON("startIndexBuild" << nss.coll() << "v" << 2 << "key" << BSON("a" << 1) << "name" << "a_1") @@ -1107,8 +1166,9 @@ TEST_F(TenantOplogApplierTest, ApplyStartIndexBuildCommand_Failure) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1126,7 +1186,7 @@ TEST_F(TenantOplogApplierTest, ApplyCreateCollCommand_WrongNSS) { auto op = BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() << "wall" << Date_t() << "o" + << "ns" << nss.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" << BSON("create" << nss.coll()) << "ts" << Timestamp(1, 1) << "ui" << UUID::gen()); bool applyCmdCalled = false; _opObserver->onCreateCollectionFn = [&](OperationContext* opCtx, @@ -1143,8 +1203,9 @@ TEST_F(TenantOplogApplierTest, ApplyCreateCollCommand_WrongNSS) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1157,49 +1218,13 @@ TEST_F(TenantOplogApplierTest, ApplyCreateCollCommand_WrongNSS) { applier->join(); } -TEST_F(TenantOplogApplierTest, ApplyCreateCollCommand_WrongNSS_Merge) { - // Should not be able to apply a command in the wrong namespace. - NamespaceString nss = NamespaceString::createNamespaceString_forTest("noTenantDB", "t"); - auto op = - BSON("op" - << "c" - << "ns" << nss.getCommandNS().ns() << "wall" << Date_t() << "o" - << BSON("create" << nss.coll()) << "ts" << Timestamp(1, 1) << "ui" << UUID::gen()); - bool applyCmdCalled = false; - _opObserver->onCreateCollectionFn = [&](OperationContext* opCtx, - const CollectionPtr&, - const NamespaceString& collNss, - const CollectionOptions&, - const BSONObj&) { - applyCmdCalled = true; - }; - auto entry = OplogEntry(op); - pushOps({entry}); - auto writerPool = makeTenantMigrationWriterPool(); - - auto applier = std::make_shared(_migrationUuid, - MigrationProtocolEnum::kShardMerge, - boost::none, - OpTime(), - &_oplogBuffer, - _executor, - writerPool.get()); - ASSERT_OK(applier->startup()); - auto opAppliedFuture = applier->getNotificationForOpTime(entry.getOpTime()); - ASSERT_EQ(opAppliedFuture.getNoThrow().getStatus().code(), ErrorCodes::InvalidTenantId); - ASSERT_FALSE(applyCmdCalled); - applier->shutdown(); - _oplogBuffer.shutdown(_opCtx.get()); - applier->join(); -} - TEST_F(TenantOplogApplierTest, ApplyDropIndexesCommand_IndexNotFound) { - NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss); auto op = BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() << "wall" << Date_t() << "o" + << "ns" << nss.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" << BSON("dropIndexes" << nss.coll() << "index" << "a_1") << "ts" << Timestamp(1, 1) << "ui" << uuid); @@ -1219,8 +1244,9 @@ TEST_F(TenantOplogApplierTest, ApplyDropIndexesCommand_IndexNotFound) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1235,12 +1261,12 @@ TEST_F(TenantOplogApplierTest, ApplyDropIndexesCommand_IndexNotFound) { } TEST_F(TenantOplogApplierTest, ApplyCollModCommand_IndexNotFound) { - NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); auto uuid = createCollectionWithUuid(_opCtx.get(), nss); auto op = BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() << "wall" << Date_t() << "o" + << "ns" << nss.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" << BSON("collMod" << nss.coll() << "index" << BSON("name" << "data_1" @@ -1263,8 +1289,9 @@ TEST_F(TenantOplogApplierTest, ApplyCollModCommand_IndexNotFound) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1279,13 +1306,13 @@ TEST_F(TenantOplogApplierTest, ApplyCollModCommand_IndexNotFound) { } TEST_F(TenantOplogApplierTest, ApplyCollModCommand_CollectionMissing) { - createDatabase(_opCtx.get(), _dbName.toString()); - NamespaceString nss = - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "bar"); + createDatabase(_opCtx.get(), _dbName.toString_forTest()); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "bar"); UUID uuid(UUID::gen()); auto op = BSON("op" << "c" - << "ns" << nss.getCommandNS().ns() << "wall" << Date_t() << "o" + << "ns" << nss.getCommandNS().ns_forTest() << "wall" << Date_t() << "o" << BSON("collMod" << nss.coll() << "index" << BSON("name" << "data_1" @@ -1308,8 +1335,9 @@ TEST_F(TenantOplogApplierTest, ApplyCollModCommand_CollectionMissing) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1339,8 +1367,9 @@ TEST_F(TenantOplogApplierTest, ApplyCRUD_WrongNSS) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1353,38 +1382,6 @@ TEST_F(TenantOplogApplierTest, ApplyCRUD_WrongNSS) { applier->join(); } -TEST_F(TenantOplogApplierTest, ApplyCRUD_WrongNSS_Merge) { - auto invalidTenant = TenantId(OID::gen()); - - // Should not be able to apply a CRUD operation to a namespace not belonging to us. - NamespaceString nss = - NamespaceString::createNamespaceString_forTest(DatabaseName(invalidTenant, "test"), "bar"); - auto uuid = createCollectionWithUuid(_opCtx.get(), nss); - auto entry = makeInsertOplogEntry(1, nss, uuid); - bool onInsertsCalled = false; - _opObserver->onInsertsFn = - [&](OperationContext* opCtx, const NamespaceString& nss, const std::vector& docs) { - onInsertsCalled = true; - }; - pushOps({entry}); - auto writerPool = makeTenantMigrationWriterPool(); - - auto applier = std::make_shared(_migrationUuid, - MigrationProtocolEnum::kShardMerge, - boost::none, - OpTime(), - &_oplogBuffer, - _executor, - writerPool.get()); - ASSERT_OK(applier->startup()); - auto opAppliedFuture = applier->getNotificationForOpTime(entry.getOpTime()); - ASSERT_EQ(opAppliedFuture.getNoThrow().getStatus().code(), ErrorCodes::InvalidTenantId); - ASSERT_FALSE(onInsertsCalled); - applier->shutdown(); - _oplogBuffer.shutdown(_opCtx.get()); - applier->join(); -} - TEST_F(TenantOplogApplierTest, ApplyCRUD_WrongUUID) { // Should not be able to apply a CRUD operation to a namespace not belonging to us, even if // we claim it does in the nss field. @@ -1403,8 +1400,9 @@ TEST_F(TenantOplogApplierTest, ApplyCRUD_WrongUUID) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1426,8 +1424,9 @@ TEST_F(TenantOplogApplierTest, ApplyNoop_Success) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1456,8 +1455,9 @@ TEST_F(TenantOplogApplierTest, ApplyResumeTokenNoop_Success) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1479,10 +1479,10 @@ TEST_F(TenantOplogApplierTest, ApplyResumeTokenNoop_Success) { TEST_F(TenantOplogApplierTest, ApplyInsertThenResumeTokenNoopInDifferentBatch_Success) { std::vector srcOps; - srcOps.push_back(makeInsertOplogEntry( - 1, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "foo"), - UUID::gen())); + srcOps.push_back(makeInsertOplogEntry(1, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "foo"), + UUID::gen())); srcOps.push_back(makeNoopOplogEntry(2, TenantMigrationRecipientService::kNoopMsg)); pushOps(srcOps); auto writerPool = makeTenantMigrationWriterPool(); @@ -1490,8 +1490,9 @@ TEST_F(TenantOplogApplierTest, ApplyInsertThenResumeTokenNoopInDifferentBatch_Su auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1519,18 +1520,19 @@ TEST_F(TenantOplogApplierTest, ApplyInsertThenResumeTokenNoopInDifferentBatch_Su TEST_F(TenantOplogApplierTest, ApplyResumeTokenNoopThenInsertInSameBatch_Success) { std::vector srcOps; srcOps.push_back(makeNoopOplogEntry(1, TenantMigrationRecipientService::kNoopMsg)); - srcOps.push_back(makeInsertOplogEntry( - 2, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "foo"), - UUID::gen())); + srcOps.push_back(makeInsertOplogEntry(2, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "foo"), + UUID::gen())); pushOps(srcOps); auto writerPool = makeTenantMigrationWriterPool(); auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1553,10 +1555,10 @@ TEST_F(TenantOplogApplierTest, ApplyResumeTokenNoopThenInsertInSameBatch_Success TEST_F(TenantOplogApplierTest, ApplyResumeTokenInsertThenNoopSameTimestamp_Success) { std::vector srcOps; - srcOps.push_back(makeInsertOplogEntry( - 1, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "foo"), - UUID::gen())); + srcOps.push_back(makeInsertOplogEntry(1, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "foo"), + UUID::gen())); srcOps.push_back(makeNoopOplogEntry(1, TenantMigrationRecipientService::kNoopMsg)); pushOps(srcOps); ASSERT_EQ(srcOps[0].getOpTime(), srcOps[1].getOpTime()); @@ -1565,8 +1567,9 @@ TEST_F(TenantOplogApplierTest, ApplyResumeTokenInsertThenNoopSameTimestamp_Succe auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1589,10 +1592,10 @@ TEST_F(TenantOplogApplierTest, ApplyResumeTokenInsertThenNoopSameTimestamp_Succe TEST_F(TenantOplogApplierTest, ApplyResumeTokenInsertThenNoop_Success) { std::vector srcOps; - srcOps.push_back(makeInsertOplogEntry( - 1, - NamespaceString::createNamespaceString_forTest(_dbName.toStringWithTenantId(), "foo"), - UUID::gen())); + srcOps.push_back(makeInsertOplogEntry(1, + NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "foo"), + UUID::gen())); srcOps.push_back(makeNoopOplogEntry(2, TenantMigrationRecipientService::kNoopMsg)); pushOps(srcOps); auto writerPool = makeTenantMigrationWriterPool(); @@ -1600,8 +1603,9 @@ TEST_F(TenantOplogApplierTest, ApplyResumeTokenInsertThenNoop_Success) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); @@ -1624,8 +1628,10 @@ TEST_F(TenantOplogApplierTest, ApplyResumeTokenInsertThenNoop_Success) { TEST_F(TenantOplogApplierTest, ApplyInsert_MultiKeyIndex) { createCollectionWithUuid(_opCtx.get(), NamespaceString::kSessionTransactionsTableNamespace); - NamespaceString indexedNss(_dbName.toStringWithTenantId(), "indexedColl"); - NamespaceString nonIndexedNss(_dbName.toStringWithTenantId(), "nonIndexedColl"); + NamespaceString indexedNss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "indexedColl"); + NamespaceString nonIndexedNss = NamespaceString::createNamespaceString_forTest( + _dbName.toStringWithTenantId_forTest(), "nonIndexedColl"); auto indexedCollUUID = createCollectionWithUuid(_opCtx.get(), indexedNss); createCollection(_opCtx.get(), nonIndexedNss, CollectionOptions()); @@ -1652,8 +1658,9 @@ TEST_F(TenantOplogApplierTest, ApplyInsert_MultiKeyIndex) { auto applier = std::make_shared(_migrationUuid, MigrationProtocolEnum::kMultitenantMigrations, - _tenantId, OpTime(), + kDefaultCloneFinishedRecipientOpTime, + _tenantId, &_oplogBuffer, _executor, writerPool.get()); diff --git a/src/mongo/db/repl/tenant_oplog_batcher.cpp b/src/mongo/db/repl/tenant_oplog_batcher.cpp index ee03aac5d4f3e..d6f07037c6837 100644 --- a/src/mongo/db/repl/tenant_oplog_batcher.cpp +++ b/src/mongo/db/repl/tenant_oplog_batcher.cpp @@ -28,13 +28,36 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include -#include "mongo/db/repl/tenant_oplog_batcher.h" +#include +#include +#include -#include "mongo/db/repl/apply_ops.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/client.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/apply_ops_command_info.h" #include "mongo/db/repl/oplog_batcher.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/tenant_oplog_batcher.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration diff --git a/src/mongo/db/repl/tenant_oplog_batcher.h b/src/mongo/db/repl/tenant_oplog_batcher.h index e9b5b450e5846..b7cf06bbeb7b6 100644 --- a/src/mongo/db/repl/tenant_oplog_batcher.h +++ b/src/mongo/db/repl/tenant_oplog_batcher.h @@ -29,13 +29,23 @@ #pragma once +#include +#include #include #include +#include "mongo/base/status_with.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/abstract_async_component.h" #include "mongo/db/repl/oplog_buffer.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" +#include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/future.h" +#include "mongo/util/uuid.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/tenant_oplog_batcher_test.cpp b/src/mongo/db/repl/tenant_oplog_batcher_test.cpp index 358cac16e98cf..acadf8792f506 100644 --- a/src/mongo/db/repl/tenant_oplog_batcher_test.cpp +++ b/src/mongo/db/repl/tenant_oplog_batcher_test.cpp @@ -28,14 +28,38 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/oplog_batcher_test_fixture.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/tenant_oplog_batcher.h" #include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/tenant_id.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/task_executor_test_fixture.h" +#include "mongo/executor/thread_pool_mock.h" +#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" -#include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -103,7 +127,7 @@ std::string toString(TenantOplogBatch& batch) { return sb.str(); } -constexpr auto dbName = "tenant_test"_sd; +const DatabaseName dbName = DatabaseName::createDatabaseName_forTest(boost::none, "tenant_test"_sd); TEST_F(TenantOplogBatcherTest, CannotRequestTwoBatchesAtOnce) { auto batcher = std::make_shared( diff --git a/src/mongo/db/repl/timestamp_block.cpp b/src/mongo/db/repl/timestamp_block.cpp index 211d61d3f9c61..dcdc632769099 100644 --- a/src/mongo/db/repl/timestamp_block.cpp +++ b/src/mongo/db/repl/timestamp_block.cpp @@ -27,12 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/repl/timestamp_block.h" - +#include "mongo/base/error_codes.h" #include "mongo/db/storage/recovery_unit.h" -#include "mongo/db/storage/storage_options.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/repl/topology_coordinator.cpp b/src/mongo/db/repl/topology_coordinator.cpp index 3c48aa8d3821a..e89479dd62b3f 100644 --- a/src/mongo/db/repl/topology_coordinator.cpp +++ b/src/mongo/db/repl/topology_coordinator.cpp @@ -36,31 +36,45 @@ ID, DLEVEL, {logv2::LogComponent::kReplicationHeartbeats}, MESSAGE, ##__VA_ARGS__) #include "mongo/db/repl/topology_coordinator.h" -#include "mongo/db/repl/topology_coordinator_gen.h" +#include +#include +#include +#include +#include +#include #include #include #include +#include +#include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/db/audit.h" +#include "mongo/db/basic_types.h" #include "mongo/db/catalog/commit_quorum_options.h" -#include "mongo/db/client.h" #include "mongo/db/commands/server_status_metric.h" -#include "mongo/db/mongod_options.h" -#include "mongo/db/operation_context.h" #include "mongo/db/repl/heartbeat_response_action.h" #include "mongo/db/repl/isself.h" #include "mongo/db/repl/member_data.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_set_config_params_gen.h" +#include "mongo/db/repl/topology_coordinator_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/metadata/oplog_query_metadata.h" #include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" #include "mongo/util/fail_point.h" #include "mongo/util/hex.h" -#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -265,8 +279,23 @@ void TopologyCoordinator::_setSyncSource(HostAndPort newSyncSource, HostAndPort TopologyCoordinator::chooseNewSyncSource(Date_t now, const OpTime& lastOpTimeFetched, ReadPreference readPreference) { + // If we are not a member of the current replica set configuration, no sync source is valid. + if (_selfIndex == -1) { + LOGV2_DEBUG( + 21778, 1, "Cannot sync from any members because we are not in the replica set config"); + return HostAndPort(); + } + + // Check to see if we should choose a sync source because 'unsupportedSyncSource' was + // set. + auto maybeSyncSource = _chooseSyncSourceUnsupportedSyncSourceParameter(now); + if (maybeSyncSource) { + _setSyncSource(*maybeSyncSource, now, false /* fromReplSetSyncFrom */); + return _syncSource; + } + // Check to see if we should choose a sync source because the 'replSetSyncFrom' command was set. - auto maybeSyncSource = _chooseSyncSourceReplSetSyncFrom(now); + maybeSyncSource = _chooseSyncSourceReplSetSyncFrom(now); if (maybeSyncSource) { // If we have a forced sync source via 'replSetSyncFrom', set the _replSetSyncFromSet flag // to true. @@ -524,9 +553,7 @@ bool TopologyCoordinator::_isEligibleSyncSource(int candidateIndex, } boost::optional TopologyCoordinator::_chooseSyncSourceReplSetSyncFrom(Date_t now) { - if (_selfIndex == -1) { - return boost::none; - } + invariant(_selfIndex != -1, "Unexpectedly not in the replica set config"); if (_forceSyncSourceIndex == -1) { return boost::none; @@ -542,14 +569,44 @@ boost::optional TopologyCoordinator::_chooseSyncSourceReplSetSyncFr return syncSource; } -boost::optional TopologyCoordinator::_chooseSyncSourceInitialChecks(Date_t now) { - // If we are not a member of the current replica set configuration, no sync source is valid. - if (_selfIndex == -1) { - LOGV2_DEBUG( - 21778, 1, "Cannot sync from any members because we are not in the replica set config"); - return HostAndPort(); +boost::optional TopologyCoordinator::_chooseSyncSourceUnsupportedSyncSourceParameter( + Date_t now) { + invariant(_selfIndex != -1, "Unexpectedly not in the replica set config"); + + auto syncSourceStr = repl::unsupportedSyncSource; + if (syncSourceStr.empty()) { + return boost::none; + } + auto syncSource = HostAndPort(syncSourceStr); + const int syncSourceIndex = _rsConfig.findMemberIndexByHostAndPort(syncSource); + if (syncSourceIndex < 0) { + LOGV2_FATAL( + 7785600, + "Selecting node specified in 'unsupportedSyncSource' parameter failed due to host " + "and port not in replica set config.", + "unsupportedSyncSource"_attr = syncSourceStr); } + if (_selfIndex == syncSourceIndex) { + LOGV2_FATAL( + 7785601, + "Node specified in 'unsupportedSyncSource' parameter is self: cannot select self as " + "a sync source", + "unsupportedSyncSource"_attr = syncSourceStr); + } + + LOGV2(7785602, + "Choosing sync source candidate specified by 'unsupportedSyncSource' parameter", + "syncSource"_attr = syncSourceStr, + "syncsourceobj"_attr = syncSource); + std::string msg(str::stream() << "syncing from: " << syncSourceStr << " by request"); + setMyHeartbeatMessage(now, msg); + return syncSource; +} + +boost::optional TopologyCoordinator::_chooseSyncSourceInitialChecks(Date_t now) { + invariant(_selfIndex != -1, "Unexpectedly not in the replica set config"); + if (auto sfp = forceSyncSourceCandidate.scoped(); MONGO_unlikely(sfp.isActive())) { const auto& data = sfp.getData(); const auto hostAndPortElem = data["hostAndPort"]; @@ -703,6 +760,13 @@ void TopologyCoordinator::prepareSyncFromResponse(const HostAndPort& target, return; } + if (!repl::unsupportedSyncSource.empty()) { + *result = + Status(ErrorCodes::IllegalOperation, + "replSetSyncFrom may not be used when 'unsupportedSyncSource' parameter is set"); + return; + } + ReplSetConfig::MemberIterator targetConfig = _rsConfig.membersEnd(); int targetIndex = 0; for (ReplSetConfig::MemberIterator it = _rsConfig.membersBegin(); it != _rsConfig.membersEnd(); @@ -3132,6 +3196,13 @@ TopologyCoordinator::_shouldChangeSyncSourceInitialChecks(const HostAndPort& cur return {ChangeSyncSourceDecision::kNo, -1}; } + if (!repl::unsupportedSyncSource.empty()) { + LOGV2(7785604, + "Not choosing new sync source because sync source is forced via " + "'unsupportedSyncSource' parameter"); + return {ChangeSyncSourceDecision::kNo, -1}; + } + // If the user requested a sync source change, return kYes. if (_forceSyncSourceIndex != -1) { LOGV2(21829, @@ -3318,10 +3389,11 @@ bool TopologyCoordinator::shouldChangeSyncSourceDueToPingTime(const HostAndPort& // If we find an eligible sync source that is significantly closer than our current sync source, // return true. - // Do not re-evaluate our sync source if it was set via the replSetSyncFrom command or the - // forceSyncSourceCandidate failpoint. + // Do not re-evaluate our sync source if it was set via the replSetSyncFrom command, the + // forceSyncSourceCandidate failpoint, or the 'unsupportedSyncSource' server parameter. auto sfp = forceSyncSourceCandidate.scoped(); - if (_replSetSyncFromSet || MONGO_unlikely(sfp.isActive())) { + if (_replSetSyncFromSet || MONGO_unlikely(sfp.isActive()) || + !repl::unsupportedSyncSource.empty()) { return false; } diff --git a/src/mongo/db/repl/topology_coordinator.h b/src/mongo/db/repl/topology_coordinator.h index 3285a5b48255b..2bde19c0ef5b5 100644 --- a/src/mongo/db/repl/topology_coordinator.h +++ b/src/mongo/db/repl/topology_coordinator.h @@ -29,23 +29,46 @@ #pragma once +#include #include #include +#include +#include #include #include - +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/read_preference.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/repl/hello_response.h" #include "mongo/db/repl/last_vote.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_data.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_set_heartbeat_args_v1.h" #include "mongo/db/repl/repl_set_heartbeat_response.h" #include "mongo/db/repl/repl_set_request_votes_args.h" +#include "mongo/db/repl/repl_set_tag.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_metrics_gen.h" #include "mongo/db/repl/split_horizon.h" #include "mongo/db/repl/update_position_args.h" #include "mongo/db/server_options.h" +#include "mongo/rpc/metadata/oplog_query_metadata.h" #include "mongo/rpc/topology_version_gen.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" @@ -906,6 +929,9 @@ class TopologyCoordinator { // Returns a HostAndPort if one is forced via the 'replSetSyncFrom' command. boost::optional _chooseSyncSourceReplSetSyncFrom(Date_t now); + // Returns a HostAndPort if one is forced via the 'unsupportedSyncSource' startup parameter. + boost::optional _chooseSyncSourceUnsupportedSyncSourceParameter(Date_t now); + // Does preliminary checks involved in choosing sync source // * Do we have a valid configuration? // * Is the 'forceSyncSourceCandidate' failpoint enabled? diff --git a/src/mongo/db/repl/topology_coordinator_v1_test.cpp b/src/mongo/db/repl/topology_coordinator_v1_test.cpp index 2d9c448ae036f..d2864614c8bf9 100644 --- a/src/mongo/db/repl/topology_coordinator_v1_test.cpp +++ b/src/mongo/db/repl/topology_coordinator_v1_test.cpp @@ -28,34 +28,75 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" #include "mongo/db/catalog/commit_quorum_options.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/repl/heartbeat_response_action.h" #include "mongo/db/repl/isself.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_set_heartbeat_args_v1.h" #include "mongo/db/repl/repl_set_heartbeat_response.h" #include "mongo/db/repl/repl_set_request_votes_args.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/replication_metrics_gen.h" #include "mongo/db/repl/topology_coordinator.h" #include "mongo/db/server_options.h" -#include "mongo/db/service_context.h" #include "mongo/executor/task_executor.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" #include "mongo/rpc/metadata/oplog_query_metadata.h" #include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/net/hostandport.h" -#include "mongo/util/net/socket_utils.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -3663,9 +3704,9 @@ TEST_F(TopoCoordTest, DoNotGrantDryRunVoteWhenOpTimeIsStale) { TEST_F(TopoCoordTest, NodeTransitionsToRemovedIfCSRSButHaveNoReadCommittedSupport) { ON_BLOCK_EXIT([]() { serverGlobalParams.clusterRole = ClusterRole::None; }); - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; TopologyCoordinator::Options options; - options.clusterRole = ClusterRole::ConfigServer; + options.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; setOptions(options); getTopoCoord().setStorageEngineSupportsReadCommitted(false); @@ -3685,9 +3726,9 @@ TEST_F(TopoCoordTest, NodeTransitionsToRemovedIfCSRSButHaveNoReadCommittedSuppor TEST_F(TopoCoordTest, NodeBecomesSecondaryAsNormalWhenReadCommittedSupportedAndCSRS) { ON_BLOCK_EXIT([]() { serverGlobalParams.clusterRole = ClusterRole::None; }); - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; TopologyCoordinator::Options options; - options.clusterRole = ClusterRole::ConfigServer; + options.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; setOptions(options); getTopoCoord().setStorageEngineSupportsReadCommitted(true); @@ -4879,6 +4920,107 @@ TEST_F(ReevalSyncSourceTest, NoChangeWhenSyncSourceForcedByFailPoint) { ReadPreference::Nearest)); } +// Test that we will select the node specified by the 'unsupportedSyncSource' parameter as a sync +// source even if it is farther away. +TEST_F(ReevalSyncSourceTest, ChooseSyncSourceForcedByStartupParameterEvenIfFarther) { + RAIIServerParameterControllerForTest syncSourceParamGuard{"unsupportedSyncSource", + "host2:27017"}; + + // Make the desired host much farther away. + getTopoCoord().setPing_forTest(HostAndPort("host2"), pingTime); + getTopoCoord().setPing_forTest(HostAndPort("host3"), significantlyCloserPingTime); + + // Select a sync source. + auto syncSource = + getTopoCoord().chooseNewSyncSource(now()++, OpTime(), ReadPreference::Nearest); + ASSERT_EQ(syncSource, HostAndPort("host2:27017")); +} + +// Test that we will not change from the node specified by the 'unsupportedSyncSource' parameter +// due to ping time. +TEST_F(ReevalSyncSourceTest, NoChangeDueToPingTimeWhenSyncSourceForcedByStartupParameter) { + RAIIServerParameterControllerForTest syncSourceParamGuard{"unsupportedSyncSource", + "host2:27017"}; + // Select a sync source. + auto syncSource = + getTopoCoord().chooseNewSyncSource(now()++, OpTime(), ReadPreference::Nearest); + ASSERT_EQ(syncSource, HostAndPort("host2:27017")); + + // Set up so that without forcing the sync source, the node otherwise would have changed sync + // sources. + getTopoCoord().setPing_forTest(HostAndPort("host2"), pingTime); + getTopoCoord().setPing_forTest(HostAndPort("host3"), significantlyCloserPingTime); + + ASSERT_FALSE(getTopoCoord().shouldChangeSyncSourceDueToPingTime(HostAndPort("host2"), + MemberState::RS_SECONDARY, + lastOpTimeFetched, + now(), + ReadPreference::Nearest)); +} + +// Test that we will not change sync sources due to the replSetSyncFrom command being run when +// the 'unsupportedSyncSource' startup parameter is set - that is, the parameter should always +// take priority. +TEST_F(ReevalSyncSourceTest, NoChangeDueToReplSetSyncFromWhenSyncSourceForcedByStartupParameter) { + RAIIServerParameterControllerForTest syncSourceParamGuard{"unsupportedSyncSource", + "host2:27017"}; + // Select a sync source. + auto syncSource = + getTopoCoord().chooseNewSyncSource(now()++, OpTime(), ReadPreference::Nearest); + ASSERT_EQ(syncSource, HostAndPort("host2:27017")); + + // Simulate calling replSetSyncFrom and selecting host3. + BSONObjBuilder response; + auto result = Status::OK(); + getTopoCoord().prepareSyncFromResponse(HostAndPort("host3:27017"), &response, &result); + + // Assert the command failed. + ASSERT_EQ(result.code(), ErrorCodes::IllegalOperation); + + // Reselect a sync source, and confirm it hasn't changed. + syncSource = getTopoCoord().chooseNewSyncSource(now()++, OpTime(), ReadPreference::Nearest); + ASSERT_EQ(syncSource, HostAndPort("host2:27017")); +} + +// Test that if a node is REMOVED but has 'unsupportedSyncSource' specified, we select no sync +// source. +TEST_F(ReevalSyncSourceTest, RemovedNodeSpecifiesSyncSourceStartupParameter) { + RAIIServerParameterControllerForTest syncSourceParamGuard{"unsupportedSyncSource", + "host2:27017"}; + // Remove ourselves from the config. + updateConfig(BSON("_id" + << "rs0" + << "version" << 2 << "members" + << BSON_ARRAY(BSON("_id" << 1 << "host" + << "host2:27017") + << BSON("_id" << 2 << "host" + << "host3:27017"))), + -1); + // Confirm we were actually removed. + ASSERT_EQUALS(MemberState::RS_REMOVED, getTopoCoord().getMemberState().s); + // Confirm we select no sync source. + auto syncSource = + getTopoCoord().chooseNewSyncSource(now()++, OpTime(), ReadPreference::Nearest); + ASSERT_EQUALS(syncSource, HostAndPort()); +} + +// Test that we crash if the 'unsupportedSyncSource' parameter specifies a node that is not in the +// replica set config. +DEATH_TEST_F(ReevalSyncSourceTest, CrashOnSyncSourceParameterNotInReplSet, "7785600") { + RAIIServerParameterControllerForTest syncSourceParamGuard{"unsupportedSyncSource", + "host4:27017"}; + auto syncSource = + getTopoCoord().chooseNewSyncSource(now()++, OpTime(), ReadPreference::Nearest); +} + +// Test that we crash if the 'unsupportedSyncSource' parameter specifies ourself as a node. +DEATH_TEST_F(ReevalSyncSourceTest, CrashOnSyncSourceParameterIsSelf, "7785601") { + RAIIServerParameterControllerForTest syncSourceParamGuard{"unsupportedSyncSource", + "host1:27017"}; + auto syncSource = + getTopoCoord().chooseNewSyncSource(now()++, OpTime(), ReadPreference::Nearest); +} + class HeartbeatResponseReconfigTestV1 : public TopoCoordTest { public: virtual void setUp() { diff --git a/src/mongo/db/repl/topology_version_observer.cpp b/src/mongo/db/repl/topology_version_observer.cpp index 07eb6c5f3273b..bccf7b8cf33bc 100644 --- a/src/mongo/db/repl/topology_version_observer.cpp +++ b/src/mongo/db/repl/topology_version_observer.cpp @@ -30,12 +30,26 @@ #include "mongo/db/repl/topology_version_observer.h" -#include "mongo/base/status_with.h" +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/client.h" #include "mongo/db/operation_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -178,6 +192,12 @@ void TopologyVersionObserver::_workerThreadBody() noexcept try { invariant(_serviceContext); ThreadClient tc(kTopologyVersionObserverName, _serviceContext); + // TODO(SERVER-74656): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + auto getTopologyVersion = [&]() -> boost::optional { // Only the observer thread updates `_cache`, thus there is no need to hold the lock before // accessing `_cache` here. diff --git a/src/mongo/db/repl/topology_version_observer.h b/src/mongo/db/repl/topology_version_observer.h index 01e27f8b96fee..6df666c3c0ec8 100644 --- a/src/mongo/db/repl/topology_version_observer.h +++ b/src/mongo/db/repl/topology_version_observer.h @@ -29,16 +29,25 @@ #pragma once +#include #include #include +#include +#include #include #include +#include "mongo/db/operation_context.h" #include "mongo/db/repl/hello_response.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/repl/topology_version_observer_test.cpp b/src/mongo/db/repl/topology_version_observer_test.cpp index c42547030911d..621f9ba01823e 100644 --- a/src/mongo/db/repl/topology_version_observer_test.cpp +++ b/src/mongo/db/repl/topology_version_observer_test.cpp @@ -28,24 +28,35 @@ */ -#include "mongo/platform/basic.h" - -#include +#include #include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/timestamp.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/db/client.h" #include "mongo/db/repl/hello_response.h" -#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/replication_coordinator_impl.h" #include "mongo/db/repl/replication_coordinator_test_fixture.h" #include "mongo/db/repl/topology_version_observer.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" -#include "mongo/util/clock_source.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/repl/transaction_oplog_application.cpp b/src/mongo/db/repl/transaction_oplog_application.cpp index 6b67609df1713..027a05521e59a 100644 --- a/src/mongo/db/repl/transaction_oplog_application.cpp +++ b/src/mongo/db/repl/transaction_oplog_application.cpp @@ -28,23 +28,68 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/transaction_oplog_application.h" - -#include "mongo/db/catalog_raii.h" +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/client.h" #include "mongo/db/commands/txn_cmds_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/repl/apply_ops.h" -#include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/apply_ops_command_info.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/timestamp_block.h" +#include "mongo/db/repl/transaction_oplog_application.h" +#include "mongo/db/repl_index_build_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/internal_session_pool.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/transaction_history_iterator.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -123,10 +168,16 @@ Status _applyOperationsForTransaction(OperationContext* opCtx, // Presently, it is not allowed to run a prepared transaction with a command // inside. TODO(SERVER-46105) invariant(!op.isCommand()); - AutoGetCollection coll(opCtx, op.getNss(), MODE_IX); + auto coll = acquireCollection( + opCtx, + CollectionAcquisitionRequest(op.getNss(), + AcquisitionPrerequisites::kPretendUnsharded, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); const bool isDataConsistent = true; auto status = repl::applyOperation_inlock(opCtx, - coll.getDb(), + coll, ApplierOperation{&op}, false /*alwaysUpsert*/, oplogApplicationMode, @@ -154,7 +205,7 @@ Status _applyOperationsForTransaction(OperationContext* opCtx, if (ex.code() == ErrorCodes::NamespaceNotFound && oplogApplicationMode == repl::OplogApplication::Mode::kStableRecovering) { repl::OplogApplication::checkOnOplogFailureForRecovery( - opCtx, redact(op.toBSONForLogging()), redact(ex)); + opCtx, op.getNss(), redact(op.toBSONForLogging()), redact(ex)); } if (!ignoreException) { @@ -202,7 +253,7 @@ Status _applyTransactionFromOplogChain(OperationContext* opCtx, const auto dbName = entry.getNss().dbName(); Status status = Status::OK(); - writeConflictRetry(opCtx, "replaying prepared transaction", dbName.db(), [&] { + writeConflictRetry(opCtx, "replaying prepared transaction", NamespaceString(dbName), [&] { WriteUnitOfWork wunit(opCtx); // We might replay a prepared transaction behind oldest timestamp. @@ -563,7 +614,7 @@ Status _applyPrepareTransaction(OperationContext* opCtx, opCtx->resetMultiDocumentTransactionState(); }); - return writeConflictRetry(opCtx, "applying prepare transaction", prepareOp.getNss().ns(), [&] { + return writeConflictRetry(opCtx, "applying prepare transaction", prepareOp.getNss(), [&] { // The write on transaction table may be applied concurrently, so refreshing // state from disk may read that write, causing starting a new transaction // on an existing txnNumber. Thus, we start a new transaction without @@ -639,7 +690,8 @@ Status _applyPrepareTransaction(OperationContext* opCtx, auto opObserver = opCtx->getServiceContext()->getOpObserver(); invariant(opObserver); - opObserver->onTransactionPrepareNonPrimary(opCtx, txnOps, prepareOp.getOpTime()); + opObserver->onTransactionPrepareNonPrimary( + opCtx, *prepareOp.getSessionId(), txnOps, prepareOp.getOpTime()); // Prepare transaction success. abortOnError.dismiss(); @@ -712,14 +764,8 @@ Status applyPrepareTransaction(OperationContext* opCtx, case repl::OplogApplication::Mode::kSecondary: { switch (op.instruction) { case repl::ApplicationInstruction::applyOplogEntry: { - // Checkout the session and apply non-split prepare op. - // TODO (SERVER-70578): This can no longer happen once the feature flag - // is removed. - invariant(!op.subSession); - invariant(!op.preparedTxnOps); - auto ops = readTransactionOperationsFromOplogChain(opCtx, *op, {}); - return _applyPrepareTransaction( - opCtx, *op, *op->getSessionId(), *op->getTxnNumber(), ops, mode); + // Not possible for secondary when applying prepare oplog entries. + MONGO_UNREACHABLE; } case repl::ApplicationInstruction::applySplitPreparedTxnOp: { // Checkout the session and apply split prepare op. @@ -794,10 +840,20 @@ void reconstructPreparedTransactions(OperationContext* opCtx, repl::OplogApplica // transaction oplog entry. auto newClient = opCtx->getServiceContext()->makeClient("reconstruct-prepared-transactions"); + + // TODO(SERVER-74656): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*newClient.get()); + newClient.get()->setSystemOperationUnkillableByStepdown(lk); + } + AlternativeClientRegion acr(newClient); const auto newOpCtx = cc().makeOperationContext(); - _reconstructPreparedTransaction(newOpCtx.get(), prepareOplogEntry, mode); + // Ignore interruptions while reconstructing prepared transactions, so that we do not + // fassert and crash due to interruptions inside this call. + newOpCtx->runWithoutInterruptionExceptAtGlobalShutdown( + [&] { _reconstructPreparedTransaction(newOpCtx.get(), prepareOplogEntry, mode); }); } } } diff --git a/src/mongo/db/repl/transaction_oplog_application.h b/src/mongo/db/repl/transaction_oplog_application.h index 6fb6e08560651..501b468cae1f9 100644 --- a/src/mongo/db/repl/transaction_oplog_application.h +++ b/src/mongo/db/repl/transaction_oplog_application.h @@ -29,10 +29,15 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/multiapplier.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_or_grouped_inserts.h" namespace mongo { diff --git a/src/mongo/db/repl/update_position_args.cpp b/src/mongo/db/repl/update_position_args.cpp index d1128f3c2c073..bd54d321480cd 100644 --- a/src/mongo/db/repl/update_position_args.cpp +++ b/src/mongo/db/repl/update_position_args.cpp @@ -27,14 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/repl/update_position_args.h" - #include "mongo/base/status.h" -#include "mongo/bson/util/bson_check.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" #include "mongo/db/repl/bson_extract_optime.h" namespace mongo { diff --git a/src/mongo/db/repl/update_position_args.h b/src/mongo/db/repl/update_position_args.h index 2bad4f8363597..dbff040fbf9b7 100644 --- a/src/mongo/db/repl/update_position_args.h +++ b/src/mongo/db/repl/update_position_args.h @@ -31,8 +31,11 @@ #include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" #include "mongo/db/repl/optime.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/repl/vote_requester.cpp b/src/mongo/db/repl/vote_requester.cpp index 7ce94e116c413..b3eab05c0a373 100644 --- a/src/mongo/db/repl/vote_requester.cpp +++ b/src/mongo/db/repl/vote_requester.cpp @@ -28,17 +28,28 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/vote_requester.h" - +#include +#include +#include +#include #include +#include + #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/repl/member_config.h" #include "mongo/db/repl/repl_set_request_votes_args.h" #include "mongo/db/repl/scatter_gather_runner.h" +#include "mongo/db/repl/vote_requester.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/util/duration.h" +#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplicationElection diff --git a/src/mongo/db/repl/vote_requester.h b/src/mongo/db/repl/vote_requester.h index 3962e4dec77bf..88aa4b92cf70f 100644 --- a/src/mongo/db/repl/vote_requester.h +++ b/src/mongo/db/repl/vote_requester.h @@ -29,16 +29,22 @@ #pragma once +#include #include #include #include +#include "mongo/base/status_with.h" #include "mongo/bson/timestamp.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/scatter_gather_algorithm.h" #include "mongo/db/repl/scatter_gather_runner.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/db/repl/vote_requester_test.cpp b/src/mongo/db/repl/vote_requester_test.cpp index 48c401dd87653..ade2d46e92215 100644 --- a/src/mongo/db/repl/vote_requester_test.cpp +++ b/src/mongo/db/repl/vote_requester_test.cpp @@ -27,20 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include #include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/commands.h" -#include "mongo/db/jsobj.h" #include "mongo/db/repl/repl_set_request_votes_args.h" #include "mongo/db/repl/vote_requester.h" #include "mongo/executor/remote_command_request.h" #include "mongo/executor/remote_command_response.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/str.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/repl/wait_for_majority_service.cpp b/src/mongo/db/repl/wait_for_majority_service.cpp index 37d6407e0feeb..751ef9b22a81a 100644 --- a/src/mongo/db/repl/wait_for_majority_service.cpp +++ b/src/mongo/db/repl/wait_for_majority_service.cpp @@ -28,20 +28,32 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl/wait_for_majority_service.h" - +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" #include "mongo/db/read_concern.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" #include "mongo/db/write_concern.h" -#include "mongo/executor/network_interface_factory.h" -#include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/logv2/log.h" +#include "mongo/db/write_concern_options.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/decorable.h" #include "mongo/util/future_util.h" #include "mongo/util/static_immortal.h" @@ -107,6 +119,16 @@ void WaitForMajorityServiceImplBase::startup(ServiceContext* ctx) { ClientStrand::make(ctx->makeClient(kWaitClientName + _getReadOrWrite())); _waitForMajorityCancellationClient = ClientStrand::make(ctx->makeClient(kCancelClientName + _getReadOrWrite())); + // TODO(SERVER-74656): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*_waitForMajorityClient->getClientPointer()); + _waitForMajorityClient->getClientPointer()->setSystemOperationUnkillableByStepdown(lk); + } + { + stdx::lock_guard lk(*_waitForMajorityCancellationClient->getClientPointer()); + _waitForMajorityCancellationClient->getClientPointer() + ->setSystemOperationUnkillableByStepdown(lk); + } _backgroundWorkComplete = _periodicallyWaitForMajority(); _pool->startup(); _state = State::kRunning; diff --git a/src/mongo/db/repl/wait_for_majority_service.h b/src/mongo/db/repl/wait_for_majority_service.h index e55b337b85824..0879a999fe4b7 100644 --- a/src/mongo/db/repl/wait_for_majority_service.h +++ b/src/mongo/db/repl/wait_for_majority_service.h @@ -29,21 +29,32 @@ #pragma once +#include +#include +#include +#include +#include +#include // IWYU pragma: keep #include #include +#include #include -#include -#include - +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/client_strand.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/optime.h" #include "mongo/db/service_context.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/thread.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" #include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" namespace mongo { diff --git a/src/mongo/db/repl/wait_for_majority_service_test.cpp b/src/mongo/db/repl/wait_for_majority_service_test.cpp index 7569b03292127..aafc5ef38ca69 100644 --- a/src/mongo/db/repl/wait_for_majority_service_test.cpp +++ b/src/mongo/db/repl/wait_for_majority_service_test.cpp @@ -27,15 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +// IWYU pragma: no_include "cxxabi.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/storage/snapshot_manager.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/platform/mutex.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/cancellation.h" +#include "mongo/util/duration.h" namespace mongo { namespace { diff --git a/src/mongo/db/repl_index_build_state.cpp b/src/mongo/db/repl_index_build_state.cpp index 72b447a0f5372..8a9f074c6a08b 100644 --- a/src/mongo/db/repl_index_build_state.cpp +++ b/src/mongo/db/repl_index_build_state.cpp @@ -27,16 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/repl_index_build_state.h" - -#include "mongo/db/query/index_bounds_builder.h" +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" +#include "mongo/db/repl_index_build_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -95,40 +113,41 @@ void IndexBuildState::setState(State state, str::stream() << "current state :" << toString(_state) << ", new state: " << toString(state)); } + LOGV2_DEBUG(6826201, + 1, + "Index build: transitioning state", + "current"_attr = toString(_state), + "new"_attr = toString(state)); _state = state; if (timestamp) _timestamp = timestamp; if (abortStatus) { - invariant(_state == kAborted || _state == kAwaitPrimaryAbort || _state == kForceSelfAbort); + invariant(_state == kAborted || _state == kFailureCleanUp || _state == kExternalAbort); _abortStatus = *abortStatus; } } bool IndexBuildState::_checkIfValidTransition(IndexBuildState::State currentState, IndexBuildState::State newState) const { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - const auto graceful = - feature_flags::gIndexBuildGracefulErrorHandling.isEnabledAndIgnoreFCVUnsafe(); + const auto graceful = feature_flags::gIndexBuildGracefulErrorHandling.isEnabled( + serverGlobalParams.featureCompatibility); switch (currentState) { case IndexBuildState::State::kSetup: return // Normal case. newState == IndexBuildState::State::kPostSetup || - // Setup failed on a primary. - newState == IndexBuildState::State::kAborted || - // Setup failed and we signalled the current primary to abort. - (graceful && newState == IndexBuildState::State::kAwaitPrimaryAbort); + // Setup failed on a primary, proceed to cleanup. At this point cleanup only + // requires unregistering. + newState == IndexBuildState::State::kFailureCleanUp; case IndexBuildState::State::kPostSetup: return // Normal case. newState == IndexBuildState::State::kInProgress || - // After setup, the primary aborted the index build. - newState == IndexBuildState::State::kAborted || - // After setup, we signalled the current primary to abort the index build. - (graceful && newState == IndexBuildState::State::kAwaitPrimaryAbort) || - // We were forced to abort ourselves externally. - (graceful && newState == IndexBuildState::State::kForceSelfAbort); + // The index build was aborted, and the caller took responsibility for cleanup. + newState == IndexBuildState::State::kExternalAbort || + // Internal failure or interrupted (user killOp, disk space monitor, or shutdown). + newState == IndexBuildState::State::kFailureCleanUp; case IndexBuildState::State::kInProgress: return @@ -137,12 +156,9 @@ bool IndexBuildState::_checkIfValidTransition(IndexBuildState::State currentStat // As a secondary, we received a commit oplog entry. newState == IndexBuildState::State::kApplyCommitOplogEntry || // The index build was aborted, and the caller took responsibility for cleanup. - newState == IndexBuildState::State::kAborted || - // The index build failed and we are waiting for the primary to send an abort oplog - // entry. - (graceful && newState == IndexBuildState::State::kAwaitPrimaryAbort) || - // We were forced to abort ourselves externally and cleanup is required. - (graceful && newState == IndexBuildState::State::kForceSelfAbort); + newState == IndexBuildState::State::kExternalAbort || + // Internal failure or interrupted (user killOp, disk space monitor, or shutdown). + newState == IndexBuildState::State::kFailureCleanUp; case IndexBuildState::State::kApplyCommitOplogEntry: return @@ -151,17 +167,23 @@ bool IndexBuildState::_checkIfValidTransition(IndexBuildState::State currentStat case IndexBuildState::State::kAwaitPrimaryAbort: return - // We successfully aborted the index build as a primary or secondary. - (graceful && newState == IndexBuildState::State::kAborted); + // Abort for shutdown. + newState == IndexBuildState::State::kAborted || + // The oplog applier is externally aborting the index build while applying + // 'abortIndexBuild'. + (graceful && newState == IndexBuildState::State::kExternalAbort); - case IndexBuildState::State::kForceSelfAbort: + case IndexBuildState::State::kFailureCleanUp: return - // After being signalled to self-abort a second caller explicitly aborted the index - // build. The second caller has taken responsibility of cleanup. - (graceful && newState == IndexBuildState::State::kAborted) || + // A primary node completed self-abort or abort for shutdown. + newState == IndexBuildState::State::kAborted || // We are waiting for the current primary to abort the index build. (graceful && newState == IndexBuildState::State::kAwaitPrimaryAbort); + case IndexBuildState::State::kExternalAbort: + // The external aborter has finished cleaned up the index build. + return newState == IndexBuildState::State::kAborted; + case IndexBuildState::State::kAborted: return false; @@ -211,34 +233,28 @@ void ReplIndexBuildState::completeSetup() { _cleanUpRequired = true; } -Status ReplIndexBuildState::tryStart(OperationContext* opCtx) { +void ReplIndexBuildState::setInProgress(OperationContext* opCtx) { stdx::lock_guard lk(_mutex); // The index build might have been aborted/interrupted before reaching this point. Trying to // transtion to kInProgress would be an error. - auto interruptCheck = opCtx->checkForInterruptNoAssert(); - if (interruptCheck.isOK()) { - _indexBuildState.setState(IndexBuildState::kInProgress, false /* skipCheck */); - } - return interruptCheck; -} - -void ReplIndexBuildState::commit(OperationContext* opCtx) { - auto skipCheck = _shouldSkipIndexBuildStateTransitionCheck(opCtx); - opCtx->recoveryUnit()->onCommit( - [this, skipCheck](OperationContext*, boost::optional) { - stdx::lock_guard lk(_mutex); - _indexBuildState.setState(IndexBuildState::kCommitted, skipCheck); - }); + opCtx->checkForInterrupt(); + _indexBuildState.setState(IndexBuildState::kInProgress, false /* skipCheck */); } -bool ReplIndexBuildState::requestAbortFromPrimary(const Status& abortStatus) { - invariant(protocol == IndexBuildProtocol::kTwoPhase); +void ReplIndexBuildState::setPostFailureState(const Status& status) { stdx::lock_guard lk(_mutex); + if (_indexBuildState.isFailureCleanUp() || _indexBuildState.isExternalAbort() || + _indexBuildState.isAborted()) { + LOGV2_DEBUG(7693500, + 1, + "Index build: already in an abort handling state", + "state"_attr = _indexBuildState.toString()); + return; + } // It is possible that a 'commitIndexBuild' oplog entry is applied while the index builder is // transitioning to an abort, or even to have been in a state where the oplog applier is already - // waiting for the index build to finish. In such instances, the node cannot try to recover by - // requesting an abort from the primary, as the commitQuorum already decided to commit. + // waiting for the index build to finish. if (_indexBuildState.isApplyingCommitOplogEntry()) { LOGV2_FATAL(7329407, "Trying to abort an index build while a 'commitIndexBuild' oplog entry is " @@ -247,14 +263,35 @@ bool ReplIndexBuildState::requestAbortFromPrimary(const Status& abortStatus) { "buildUUID"_attr = buildUUID); } - if (_indexBuildState.isAborted()) { - return false; - } - _indexBuildState.setState( - IndexBuildState::kAwaitPrimaryAbort, false /* skipCheck */, boost::none, abortStatus); + IndexBuildState::kFailureCleanUp, false /* skipCheck */, boost::none, status); +} - return true; +void ReplIndexBuildState::setVotedForCommitReadiness(OperationContext* opCtx) { + stdx::lock_guard lk(_mutex); + invariant(!_votedForCommitReadiness); + opCtx->checkForInterrupt(); + _votedForCommitReadiness = true; +} + +bool ReplIndexBuildState::canVoteForAbort() const { + stdx::lock_guard lk(_mutex); + return !_votedForCommitReadiness; +} + +void ReplIndexBuildState::commit(OperationContext* opCtx) { + auto skipCheck = _shouldSkipIndexBuildStateTransitionCheck(opCtx); + opCtx->recoveryUnit()->onCommit( + [this, skipCheck](OperationContext*, boost::optional) { + stdx::lock_guard lk(_mutex); + _indexBuildState.setState(IndexBuildState::kCommitted, skipCheck); + }); +} + +void ReplIndexBuildState::requestAbortFromPrimary() { + invariant(protocol == IndexBuildProtocol::kTwoPhase); + stdx::lock_guard lk(_mutex); + _indexBuildState.setState(IndexBuildState::kAwaitPrimaryAbort, false /* skipCheck */); } Timestamp ReplIndexBuildState::getCommitTimestamp() const { @@ -269,7 +306,7 @@ void ReplIndexBuildState::onOplogCommit(bool isPrimary) const { << ", index build state: " << _indexBuildState.toString()); } -void ReplIndexBuildState::abortSelf(OperationContext* opCtx) { +void ReplIndexBuildState::completeAbort(OperationContext* opCtx) { auto skipCheck = _shouldSkipIndexBuildStateTransitionCheck(opCtx); stdx::lock_guard lk(_mutex); _indexBuildState.setState(IndexBuildState::kAborted, skipCheck); @@ -292,7 +329,7 @@ void ReplIndexBuildState::onOplogAbort(OperationContext* opCtx, const NamespaceS invariant(!isPrimary, str::stream() << "Index build: " << buildUUID); stdx::lock_guard lk(_mutex); - invariant(_indexBuildState.isAborted(), + invariant(_indexBuildState.isExternalAbort(), str::stream() << "Index build: " << buildUUID << ", index build state: " << _indexBuildState.toString()); invariant(_indexBuildState.getTimestamp() && _indexBuildState.getAbortReason(), @@ -318,7 +355,7 @@ bool ReplIndexBuildState::isAborted() const { bool ReplIndexBuildState::isAborting() const { stdx::lock_guard lk(_mutex); - return _indexBuildState.isAwaitingPrimaryAbort() || _indexBuildState.isForceSelfAbort(); + return _indexBuildState.isAborting(); } bool ReplIndexBuildState::isCommitted() const { @@ -331,9 +368,19 @@ bool ReplIndexBuildState::isSettingUp() const { return _indexBuildState.isSettingUp(); } +bool ReplIndexBuildState::isExternalAbort() const { + stdx::lock_guard lk(_mutex); + return _indexBuildState.isExternalAbort(); +} + +bool ReplIndexBuildState::isFailureCleanUp() const { + stdx::lock_guard lk(_mutex); + return _indexBuildState.isFailureCleanUp(); +} + std::string ReplIndexBuildState::getAbortReason() const { stdx::lock_guard lk(_mutex); - invariant(_indexBuildState.isAborted() || _indexBuildState.isAwaitingPrimaryAbort(), + invariant(_indexBuildState.isAborted() || _indexBuildState.isAborting(), str::stream() << "Index build: " << buildUUID << ", index build state: " << _indexBuildState.toString()); auto reason = _indexBuildState.getAbortReason(); @@ -396,7 +443,7 @@ bool ReplIndexBuildState::tryCommit(OperationContext* opCtx) { // If the node is secondary, and awaiting a primary abort, the transition is invalid, and the // node should crash. - if (_indexBuildState.isAwaitingPrimaryAbort() || _indexBuildState.isForceSelfAbort()) { + if (_indexBuildState.isAwaitingPrimaryAbort() || _indexBuildState.isFailureCleanUp()) { LOGV2_FATAL(7329403, "Received an index build commit from the primary for an index build that we " "were unable to build successfully and was waiting for an abort", @@ -429,6 +476,13 @@ ReplIndexBuildState::TryAbortResult ReplIndexBuildState::tryAbort(OperationConte IndexBuildAction signalAction, std::string reason) { stdx::lock_guard lk(_mutex); + // It is not possible for the index build to be in kExternalAbort state, as the collection + // MODE_X lock is held and there cannot be concurrent external aborters. + auto nssOptional = CollectionCatalog::get(opCtx)->lookupNSSByUUID(opCtx, collectionUUID); + invariant(!_indexBuildState.isExternalAbort()); + invariant(nssOptional && + opCtx->lockState()->isCollectionLockedForMode(nssOptional.get(), MODE_X)); + // Wait until the build is done setting up. This indicates that all required state is // initialized to attempt an abort. if (_indexBuildState.isSettingUp()) { @@ -438,6 +492,18 @@ ReplIndexBuildState::TryAbortResult ReplIndexBuildState::tryAbort(OperationConte "buildUUID"_attr = buildUUID); return TryAbortResult::kRetry; } + // Wait until an earlier self-abort finishes. The kAwaitPrimaryAbort state must be allowed, in + // case the voteAbortIndexBuild command ends up in a loopback or 'abortIndexBuild' is being + // applied. We retry here instead of returning kAlreadyAborted to ensure that by the time the + // external aborter receives TryAbortResult::kAlreadyAborted, the build is actually aborted and + // not in the process of aborting. + if (_indexBuildState.isFailureCleanUp()) { + LOGV2_DEBUG(7693501, + 2, + "waiting until index build is finishes abort", + "buildUUID"_attr = buildUUID); + return TryAbortResult::kRetry; + } if (_indexBuildState.isAborted()) { // Returns if a concurrent operation already aborted the index build. return TryAbortResult::kAlreadyAborted; @@ -482,7 +548,8 @@ ReplIndexBuildState::TryAbortResult ReplIndexBuildState::tryAbort(OperationConte ? tenant_migration_access_blocker::checkIfCanBuildIndex(opCtx, dbName) : Status(ErrorCodes::IndexBuildAborted, reason); invariant(!abortStatus.isOK()); - _indexBuildState.setState(IndexBuildState::kAborted, skipCheck, abortTimestamp, abortStatus); + _indexBuildState.setState( + IndexBuildState::kExternalAbort, skipCheck, abortTimestamp, abortStatus); // Aside from setting the tenantMigrationAbortStatus, tenant migration aborts are identical to // primary aborts. @@ -499,7 +566,7 @@ ReplIndexBuildState::TryAbortResult ReplIndexBuildState::tryAbort(OperationConte auto serviceContext = opCtx->getServiceContext(); if (auto target = serviceContext->getLockedClient(*_opId)) { auto targetOpCtx = target->getOperationContext(); - serviceContext->killOperation(target, targetOpCtx, ErrorCodes::IndexBuildAborted); + serviceContext->killOperation(target, targetOpCtx); } // Set the signal. Because we have already interrupted the index build, it will not observe @@ -512,14 +579,20 @@ ReplIndexBuildState::TryAbortResult ReplIndexBuildState::tryAbort(OperationConte bool ReplIndexBuildState::forceSelfAbort(OperationContext* opCtx, const Status& error) { stdx::lock_guard lk(_mutex); if (_indexBuildState.isSettingUp() || _indexBuildState.isAborted() || - _indexBuildState.isCommitted() || _indexBuildState.isAwaitingPrimaryAbort() || - _indexBuildState.isApplyingCommitOplogEntry()) { + _indexBuildState.isCommitted() || _indexBuildState.isAborting() || + _indexBuildState.isApplyingCommitOplogEntry() || _votedForCommitReadiness) { // If the build is setting up, it is not yet abortable. If the index build has already - // passed a point of no return, interrupting will not be productive. + // passed a point of no return, interrupting will not be productive. If the index build is + // already in the process of aborting, it cannot be aborted again. + LOGV2(7617000, + "Index build: cannot force abort", + "buildUUID"_attr = buildUUID, + "state"_attr = _indexBuildState, + "votedForCommit"_attr = _votedForCommitReadiness); return false; } - _indexBuildState.setState(IndexBuildState::kForceSelfAbort, + _indexBuildState.setState(IndexBuildState::kFailureCleanUp, false /* skipCheck */, boost::none /* timestamp */, error); @@ -531,14 +604,8 @@ bool ReplIndexBuildState::forceSelfAbort(OperationContext* opCtx, const Status& LOGV2(7419400, "Forcefully aborting index build", "buildUUID"_attr = buildUUID); - // If there is a pending voteCommitIndexBuild request, cancel it and clear the callback. - // Otherwise the index build will try to issue a voteAbortIndexBuild, and set the callback - // handle, while the previous one is still valid. - _cancelAndClearVoteRequestCbk(lk, opCtx); - - // We don't pass IndexBuildAborted as the interruption error code because that would imply - // that we are taking responsibility for cleaning up the index build, when in fact the index - // builder thread is responsible. + // The index builder thread is responsible for cleaning up, as indicated by the + // kFailureCleanUp state. serviceContext->killOperation(target, targetOpCtx); } return true; @@ -670,6 +737,17 @@ void ReplIndexBuildState::appendBuildInfo(BSONObjBuilder* builder) const { bool ReplIndexBuildState::_shouldSkipIndexBuildStateTransitionCheck(OperationContext* opCtx) const { const auto replCoord = repl::ReplicationCoordinator::get(opCtx); if (replCoord->isReplEnabled() && protocol == IndexBuildProtocol::kTwoPhase) { + if (replCoord->getMemberState() == repl::MemberState::RS_STARTUP2 && + !serverGlobalParams.featureCompatibility.isVersionInitialized()) { + // We're likely at the initial stages of a new logical initial sync attempt, and we + // haven't yet replicated the FCV from the sync source. Skip the index build state + // transition checks because they rely on the FCV. + LOGV2_DEBUG(6826202, + 2, + "Index build: skipping index build state transition checks because the FCV " + "isn't known yet"); + return true; + } return false; } return true; diff --git a/src/mongo/db/repl_index_build_state.h b/src/mongo/db/repl_index_build_state.h index e085cb92206a4..e3179c70d5cc6 100644 --- a/src/mongo/db/repl_index_build_state.h +++ b/src/mongo/db/repl_index_build_state.h @@ -29,24 +29,36 @@ #pragma once -#include "mongo/stdx/mutex.h" -#include "mongo/util/concurrency/with_lock.h" #include +#include +#include +#include #include +#include #include +#include #include +#include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/timestamp.h" #include "mongo/db/catalog/commit_quorum_options.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/database_name.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/operation_id.h" #include "mongo/db/repl/optime.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/mutex.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/uuid.h" @@ -144,26 +156,37 @@ class IndexBuildState { */ kApplyCommitOplogEntry, /** - * Below state indicates that index build was successfully able to commit or abort. For - * kCommitted, the state is set immediately before it commits the index build. For - * kAborted, this state is set after the build is cleaned up and the abort oplog entry is - * replicated. + * Below state indicates that index build was successfully able to commit. For kCommitted, + * the state is set immediately before it commits the index build. */ kCommitted, + /** + * Below state indicates that index build was successfully able to abort. For kAborted, this + * state is set after the build is cleaned up and the abort oplog entry is replicated. + */ kAborted, + /** + * Indicates that an internal error caused the index build to fail, or that an internal + * operation or user killOp forced the index build abort by itself. In this state, + * concurrent external aborts are not allowed. The index builder thread is responsible for + * handling clean up. If it is determined that voting for abort is allowed, transitions to + * kAwaitPrimaryAbort. Otherwise it attemps to cleanup directly. + */ + kFailureCleanUp, /** * Below state indicates that the index build thread has voted for an abort to the current * primary, and is waiting for the index build to actually be aborted either because the * command is a loopback to itself (vote issuer is primary itself) or due to - * 'abortIndexBuild' oplog entry being replicated by the primary. + * 'abortIndexBuild' oplog entry being replicated by the primary. Concurrent external aborts + * are allowed again (after being disallowed in kFailureCleanUp), as both loopback and + * 'abortIndexBuild' are external aborts. */ kAwaitPrimaryAbort, /** - * This state indicates that an internal operation, regardless of replication state, - * requested that this index build abort. The index builder thread is responsible for - * handling and cleaning up. + * Indicates that an external abort is ongoing. It is the responsibility of the external + * aborter to clean up the resources. */ - kForceSelfAbort, + kExternalAbort }; /** @@ -201,8 +224,17 @@ class IndexBuildState { return _state == kAwaitPrimaryAbort; } - bool isForceSelfAbort() const { - return _state == kForceSelfAbort; + bool isFailureCleanUp() const { + return _state == kFailureCleanUp; + } + + bool isExternalAbort() const { + return _state == kExternalAbort; + } + + bool isAborting() const { + return _state == kAwaitPrimaryAbort || _state == kFailureCleanUp || + _state == kExternalAbort; } boost::optional getTimestamp() const { @@ -237,8 +269,10 @@ class IndexBuildState { return "Aborted"; case kAwaitPrimaryAbort: return "Await primary abort oplog entry"; - case kForceSelfAbort: - return "Forced self-abort"; + case kFailureCleanUp: + return "Cleaning up"; + case kExternalAbort: + return "External abort"; } MONGO_UNREACHABLE; } @@ -297,10 +331,31 @@ class ReplIndexBuildState { void completeSetup(); /** - * Try to set the index build to in-progress state. Returns true on success, or false if the - * build is already aborted / interrupted. + * Try to set the index build to in-progress state, does an interrupt check and throws if the + * build is already killed. + */ + void setInProgress(OperationContext* opCtx); + + /** + * Transition the index build to kFailureCleanUp state if the build isn't already in kAborted, + * kExternalAbort, or kFailureCleanUp state. In case it already is in an abort state, does + * nothing and preserves the previous status. */ - Status tryStart(OperationContext* opCtx); + void setPostFailureState(const Status& status); + + /** + * Indicate that the index build has attempted to vote for commit readiness. After calling this, + * the index build cannot vote for abort. Performs an interrupt check, in case the build was + * concurrently forced to self abort or received a killop, in which case the vote for abort is + * necessary. + */ + void setVotedForCommitReadiness(OperationContext* opCtx); + + /** + * Returns true if this index build can still vote for abort. Voting for abort is not possible + * after the index build has voted for commit. + */ + bool canVoteForAbort() const; /** * This index build has completed successfully and there is no further work to be done. @@ -309,13 +364,9 @@ class ReplIndexBuildState { /** * Only for two-phase index builds. Requests the primary to abort the build, and transitions - * into a waiting state. - * - * Returns true if the thread has transitioned into the waiting state. - * Returns false if the build is already in abort state. This can happen if the build detected - * an error while an external operation (e.g. a collection drop) is concurrently aborting it. + * into kAwaitPrimaryAbort state. */ - bool requestAbortFromPrimary(const Status& abortStatus); + void requestAbortFromPrimary(); /** * Returns timestamp for committing this index build. @@ -333,7 +384,7 @@ class ReplIndexBuildState { /** * This index build has failed while running in the builder thread due to a non-shutdown reason. */ - void abortSelf(OperationContext* opCtx); + void completeAbort(OperationContext* opCtx); /** * This index build was interrupted because the server is shutting down. @@ -373,12 +424,22 @@ class ReplIndexBuildState { bool isSettingUp() const; /** - * Returns abort reason. Invariants if not in aborted state. + * Returns true if this index build is being externally aborted. + */ + bool isExternalAbort() const; + + /** + * Returns true if this index build is performing self cleanup. + */ + bool isFailureCleanUp() const; + + /** + * Returns abort reason. */ std::string getAbortReason() const; /** - * Returns abort status. Invariants if not in aborted state. + * Returns abort status. Returns Status::OK() if not in aborted state. */ Status getAbortStatus() const; @@ -568,6 +629,9 @@ class ReplIndexBuildState { // Set once setup is complete, indicating that a clean up is required in case of abort. bool _cleanUpRequired = false; + + // Set once before attempting to vote for commit readiness. + bool _votedForCommitReadiness = false; }; } // namespace mongo diff --git a/src/mongo/db/repl_set_member_in_standalone_mode.cpp b/src/mongo/db/repl_set_member_in_standalone_mode.cpp index 9859ab9333bd9..a5aede29d798f 100644 --- a/src/mongo/db/repl_set_member_in_standalone_mode.cpp +++ b/src/mongo/db/repl_set_member_in_standalone_mode.cpp @@ -27,10 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/repl_set_member_in_standalone_mode.h" #include "mongo/db/service_context.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/db/rs_local_client.cpp b/src/mongo/db/rs_local_client.cpp index a2934ef2903fe..5c045ac5fac15 100644 --- a/src/mongo/db/rs_local_client.cpp +++ b/src/mongo/db/rs_local_client.cpp @@ -27,18 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/rs_local_client.h" - -#include "mongo/db/curop.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/rs_local_client.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" #include "mongo/rpc/unique_message.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" namespace mongo { @@ -162,8 +181,8 @@ StatusWith RSLocalClient::queryOnce( if (!cursor) { return {ErrorCodes::OperationFailed, - str::stream() << "Failed to establish a cursor for reading " << nss.ns() - << " from local storage"}; + str::stream() << "Failed to establish a cursor for reading " + << nss.toStringForErrorMsg() << " from local storage"}; } std::vector documentVector; diff --git a/src/mongo/db/rs_local_client.h b/src/mongo/db/rs_local_client.h index 46f523e0e3791..6a061f8e08c9e 100644 --- a/src/mongo/db/rs_local_client.h +++ b/src/mongo/db/rs_local_client.h @@ -29,7 +29,23 @@ #pragma once +#include +#include + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/platform/mutex.h" #include "mongo/s/client/shard.h" #include "mongo/util/hierarchical_acquisition.h" diff --git a/src/mongo/db/s/README.md b/src/mongo/db/s/README.md index 6f31189e0b4d2..d39caa8fab81a 100644 --- a/src/mongo/db/s/README.md +++ b/src/mongo/db/s/README.md @@ -1,1440 +1,66 @@ -# Sharding Internals +# Sharding Architecture Guide +This page contains details of the source code architecture of the MongoDB Sharding system. It is intended to be used by engineers working on the core server, with some sections containing low-level details which are most appropriate for new engineers on the sharding team. -## Recommended prerequisite reading +It is not intended to be a tutorial on how to operate sharding as a user and it requires that the reader is already familiar with the general concepts of [sharding](https://docs.mongodb.com/manual/sharding/#sharding), the [architecture of a MongoDB sharded cluster](https://docs.mongodb.com/manual/sharding/#sharded-cluster), and the concept of a [shard key](https://docs.mongodb.com/manual/sharding/#shard-keys). -A reader should be familiar with the -[**general concept**](https://docs.mongodb.com/manual/sharding/#sharding) -of horizontal scaling or "sharding", the -[**architecture of a MongoDB sharded cluster**](https://docs.mongodb.com/manual/sharding/#sharded-cluster), -and the concept of a -[**shard key in MongoDB**](https://docs.mongodb.com/manual/sharding/#shard-keys). - -## Sharding acronyms - -* CSRS: **C**onfig **S**erver as a **R**eplica **S**et. This is a fancy name for the [Config server](https://www.mongodb.com/docs/manual/core/sharded-cluster-config-servers/). Comes from the times of version 3.2 and earlier, when there was a legacy type of Config server called [SCCC](https://www.mongodb.com/docs/manual/release-notes/3.4-compatibility/#removal-of-support-for-sccc-config-servers). +## Sharding terminology and acronyms +* Config Data: All the [catalog containers](README_sharding_catalog.md#catalog-containers) residing on the CSRS. * Config Shard: Same as CSRS. -* ConfigData: The set of collections residing on the CSRS, which contain state about the cluster. - ---- - -# Routing - -There is an authoritative routing table stored on the config server replica set, and all nodes cache -the routing table in memory so that they can route requests to the shard(s) that own the -corresponding data. - -## The authoritative routing table - -The authoritative routing table is stored in a set of unsharded collections in the config database -on the config server replica set. The schemas of the relevant collections are: - -* [**config.databases**](https://www.mongodb.com/docs/manual/reference/config-database/#mongodb-data-config.databases) -* [**config.collections**](https://www.mongodb.com/docs/manual/reference/config-database/#mongodb-data-config.collections) -* [**config.chunks**](https://www.mongodb.com/docs/manual/reference/config-database/#mongodb-data-config.chunks) -* [**config.shards**](https://www.mongodb.com/docs/manual/reference/config-database/#mongodb-data-config.shards) - -#### Code references - -* The -[**indexes created**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/config/sharding_catalog_manager.cpp#L295-L372) -on each of these collections for efficient querying - -## The routing table cache - -The routing table cache is a read-only in-memory cache. This cache keeps track of the last-seen -routing information for databases and sharded collections in a sharded cluster. An independent -copy of the routing table cache exists on each router and shard. - -Operations consult the routing table cache in order to route requests to shards that hold data for -a collection. - -This [section](README_routing_info_cache_consistency_model.md#consistency-model-of-the-routing-table-cache) describes the conceptual consistency model of the routing table cache. - -### How the routing table cache refreshes - -The authoritative routing information exists persisted to disk on the config server. Certain node -types load information (refresh) directly from the config server. Other node types refresh from an -intermediary source. At any given time, the state of the in-memory cache is the result of the -latest refresh from that node’s source. As a result, the cache may or may not be up-to-date with -its corresponding source. - -We define the refresh behavior based on node type below: - -| Node Type | Information Source | Additional Behavior | -| --------- | ------------------ | ------------------ | -| Router | Config server | N/A | -| Shard server acting as replica set primary | Config server | Persists refreshed information to disk. This information should always be consistent with the in-memory cache. The persisted information is replicated to all replica set secondaries. | -| Shard server acting as replica set secondary | On-disk information replicated from the replica set primary | N/A | - -When referring to a refresh in this section, we imply that the given node’s routing table cache -will update its in-memory state with its corresponding source. - -### When the routing table cache will refresh - -The routing table cache is "lazy." It does not refresh from its source unless necessary. The cache -will refresh in two scenarios: - -1. A request sent to a shard returns an error indicating that the shard’s known routing information for that request doesn't match the sender's routing information. -2. The cache attempts to access information for a collection that has been locally marked as having out-of-date routing information (stale). - -Operations that change a collection’s routing information (for example, a moveChunk command that -updates a chunk’s location) will mark the local node’s routing table cache as "stale" for affected -shards. Subsequent attempts to access routing information for affected shards will block on a -routing table cache refresh. Some operations, such as dropCollection, will affect all shards. In -this case, the entire collection will be marked as "stale." Accordingly, subsequent attempts to -access any routing information for the collection will block on a routing table cache refresh. - -### Types of refreshes - -The routing table cache performs two types of refreshes for a database or collection. - -1. A full refresh clears all cached information, and replaces the cache with the information that exists on the node’s source. -2. An incremental refresh only replaces modified routing information from the node’s source. - -An incremental refresh occurs when the routing table cache already has a local notion of the -collection or database. A full refresh occurs when: - -* The cache has no notion of a collection or database, or -* A collection or database has been marked as dropped by a shard or the local node’s routing information, or -* A collection has otherwise been marked as having a mismatched epoch. - -### Operational Caveats - -1. If the routing table cache receives an error in attempting to refresh, it will retry up to twice before giving up and returning stale information. -2. If the gEnableFinerGrainedCatalogCacheRefresh startup parameter is disabled, then all attempts to access routing information for a stale namespace will block on a routing table cache refresh, regardless if a particular targeted shard is marked as stale. - -#### Code References - -* [The CatalogCache (routing table cache) class](https://github.com/mongodb/mongo/blob/master/src/mongo/s/catalog_cache.h) -* [The CachedDatabaseInfo class](https://github.com/mongodb/mongo/blob/62d9485657717bf61fbb870cb3d09b52b1a614dd/src/mongo/s/catalog_cache.h#L61-L81) - -Methods that will mark routing table cache information as stale (sharded collection). - -* [invalidateShardOrEntireCollectionEntryForShardedCollection](https://github.com/mongodb/mongo/blob/62d9485657717bf61fbb870cb3d09b52b1a614dd/src/mongo/s/catalog_cache.h#L226-L236) -* [invalidateEntriesThatReferenceShard](https://github.com/mongodb/mongo/blob/62d9485657717bf61fbb870cb3d09b52b1a614dd/src/mongo/s/catalog_cache.h#L270-L274) -* [invalidateCollectionEntry_LINEARIZABLE](https://github.com/mongodb/mongo/blob/32fe49396dec58836033bca67ad1360b1a80f03c/src/mongo/s/catalog_cache.h#L211-L216) - -Methods that will mark routing table cache information as stale (database). - -* [onStaleDatabaseVersion](https://github.com/mongodb/mongo/blob/62d9485657717bf61fbb870cb3d09b52b1a614dd/src/mongo/s/catalog_cache.h#L197-L205) -* [purgeDatabase](https://github.com/mongodb/mongo/blob/62d9485657717bf61fbb870cb3d09b52b1a614dd/src/mongo/s/catalog_cache.h#L282-L286) - -## Shard versioning and database versioning - -In a sharded cluster, the placement of collections is determined by a versioning protocol. We use -this versioning protocol in tracking the location of both chunks for sharded collections and -databases for unsharded collections. - -### Shard versioning - -The shard versioning protocol tracks the placement of chunks for sharded collections. - -Each chunk has a version called the "chunk version." A chunk version is represented as C and consists of four elements: - -1. The *E* epoch - an object ID shared among all chunks for a collection that distinguishes a unique instance of the collection. -1. The *T* timestamp - a new unique identifier for a collection introduced in version 5.0. The difference between epoch and timestamp is that timestamps are comparable, allowing for distinguishing between two instances of a collection in which the epoch/timestamp do not match. -1. The *M* major version - an integer used to specify a change on the data placement (i.e. chunk migration). -1. The *m* minor version - An integer used to specify that a chunk has been resized (i.e. split or merged). - -To completely define the shard versioning protocol, we introduce two extra terms - the "shard -version" and "collection version." - -1. Shard version - For a sharded collection, this is the highest chunk version seen on a particular shard. The version of the *i* shard is represented as SViSVi, TSVi, MSVi, mSVi>. -1. Collection version - For a sharded collection, this is the highest chunk version seen across all shards. The collection version is represented as CVcv, Tcv, Mcv, mcv>. - -### Database versioning - -The database versioning protocol tracks the placement of databases for unsharded collections. The -"database version" indicates on which shard a database currently exists. A database version is represented as DBV and consists of two elements: - -1. The UUID - a unique identifier to distinguish different instances of the database. The UUID remains unchanged for the lifetime of the database, unless the database is dropped and recreated. -1. The *T* timestamp - a new unique identifier introduced in version 5.0. Unlike the UUID, the timestamp allows for ordering database versions in which the UUID/Timestamp do not match. -1. The last modified field - an integer incremented when the database changes its primary shard. - -### Versioning updates - -Nodes that track chunk/database versions "lazily" load versioning information. A router or shard -will only find out that its internally-stored versioning information is stale via receiving changed -version information from another node. - -For each request sent from an origin node to a remote node, the origin node will attach its cached -version information for the corresponding chunk or database. There are two possible versioning -scenarios: - -1. If the remote node detects a shard version mismatch, the remote node will return a message to the origin node stating as such. Whichever node that reports having an older version will attempt to refresh. The origin node will then retry the request. -1. If the remote node and the origin node have the same version, the request will proceed. - -### Types of operations that will cause the versioning information to become stale - -Before going through the table that explains which operations modify the versioning information and how, it is important to give a bit more information about the move chunk operation. When we move a chunk C from the *i* shard to the *j* shard, where *i* and *j* are different, we end up updating the shard version of both shards. For the recipient shard (i.e. *j* shard), the version of the migrated chunk defines its shard version. For the donor shard (i.e. *i* shard) what we do is look for another chunk of that collection on that shard and update its version. That chunk is called the control chunk and its version defines the *i* shard version. If there are no other chunks, the shard version is updated to SVicv, Tcv, 0, 0>. - -Operation Type | Version Modification Behavior | --------------- | ----------------------------- | -Moving a chunk C
C | Ccv, Tcv, Mcv + 1, 0>
ControlChunkcv, Tcv, Mcv + 1, 1> if any | -Splitting a chunk C into n pieces
C | Cnew 1cv, Tcv, Mcv, mcv + 1>
...
Cnew ncv, Tcv, Mcv, mcv + n> | -Merging chunks C1, ..., Cn
C11, T1, M1, m1>
...
Cnn, Tn, Mn, mn>
| Cnewcv, Tcv, Mcv, mcv + 1> | -Dropping a collection | The dropped collection doesn't have a SV - all chunks are deleted | -Refining a collection's shard key | Cinew, Tnow, Mi, mi> forall i in 1 <= i <= #Chunks | -Changing the primary shard for a DB
DBV | DBV | -Dropping a database | The dropped DB doesn't have a DBV | - -### Special versioning conventions - -Chunk versioning conventions - -Convention Type | Epoch | Timestamp | Major Version | Minor Version | ---------------- | ----- |-------------- | ------------- | ------------- | -First chunk for sharded collection | ObjectId() | current time | 1 | 0 | -Collection is unsharded | ObjectId() | Timestamp() | 0 | 0 | -Collection was dropped | ObjectId() | Timestamp() | 0 | 0 | -Ignore the chunk version for this request | Max DateTime | Timestamp::max() | 0 | 0 | - -Database version conventions - -Convention Type | UUID | Timestamp | Last Modified | ---------------- | ---- | --------- | ------------- | -New database | UUID() | current time | 1 | -Config database | UUID() | current time | 0 | -Admin database | UUID() | current time | 0 | - -#### Code references - -* [The chunk version class](https://github.com/mongodb/mongo/blob/master/src/mongo/s/chunk_version.h) -* [The database version IDL](https://github.com/mongodb/mongo/blob/master/src/mongo/s/database_version.idl) -* [The database version class](https://github.com/mongodb/mongo/blob/master/src/mongo/s/database_version.h) -* [Where shard versions are stored in a routing table cache](https://github.com/mongodb/mongo/blob/1df41757d5d1e04c51eeeee786a17b005e025b93/src/mongo/s/catalog_cache.h#L499-L500) -* [Where database versions are stored in a routing table cache](https://github.com/mongodb/mongo/blob/1df41757d5d1e04c51eeeee786a17b005e025b93/src/mongo/s/catalog_cache.h#L497-L498) -* [Method used to attach the shard version to outbound requests](https://github.com/mongodb/mongo/blob/1df41757d5d1e04c51eeeee786a17b005e025b93/src/mongo/s/cluster_commands_helpers.h#L118-L121) -* [Where shard versions are parsed in the ServiceEntryPoint and put on the OperationShardingState](https://github.com/mongodb/mongo/blob/1df41757d5d1e04c51eeeee786a17b005e025b93/src/mongo/db/service_entry_point_common.cpp#L1136-L1150) -* [Where shard versions are stored in a shard's filtering cache](https://github.com/mongodb/mongo/blob/554ec671f7acb6a4df62664f80f68ec3a85bccac/src/mongo/db/s/collection_sharding_runtime.h#L249-L253) -* [The method that checks the equality of a shard version on a shard](https://github.com/mongodb/mongo/blob/554ec671f7acb6a4df62664f80f68ec3a85bccac/src/mongo/db/s/collection_sharding_state.h#L126-L131) -* [The method that checks the equality of a database version on a shard](https://github.com/mongodb/mongo/blob/554ec671f7acb6a4df62664f80f68ec3a85bccac/src/mongo/db/s/database_sharding_state.h#L98-L103) -* [Where stale config exceptions are handled on a shard](https://github.com/mongodb/mongo/blob/8fb7a62652c5fe54da47eab77e28111f00b99d7f/src/mongo/db/service_entry_point_mongod.cpp#L187-L213) -* [Where a mongos catches a StaleConfigInfo](https://github.com/mongodb/mongo/blob/5bd87925a006fa591692e097d7929b6764da6d0c/src/mongo/s/commands/strategy.cpp#L723-L780) -* [Where a cluster find catches a StaleConfigInfo](https://github.com/mongodb/mongo/blob/5bd87925a006fa591692e097d7929b6764da6d0c/src/mongo/s/query/cluster_find.cpp#L578-L585) - -## The shard registry - -The shard registry is an in-memory cache mirroring the `config.shards` collection on the config -server. The collection (and thus the cache) contains an entry for each shard in the cluster. Each -entry contains the connection string for that shard. - -An independent cache exists on each node across all node types (router, shard server, config -server). - -Retrieving a shard from the registry returns a `Shard` object. Using that object, one can access -more information about a shard and run commands against that shard. A `Shard` object can be -retrieved from the registry by using any of: - -* The shard's name -* The replica set's name -* The HostAndPort object -* The connection string - -The shard registry refreshes itself in these scenarios: - -1. Upon the node's start-up -1. Upon completion of a background job that runs every thirty seconds -1. Upon an attempt to retrieve a shard that doesn’t have a matching entry in the cache -1. Upon calling the ShardRegistry’s reload function (ShardRegistry::reload()) - -The shard registry makes updates to the `config.shards` collection in one case. If the shard -registry discovers an updated connection string for another shard via a replica set topology -change, it will persist that update to `config.shards`. - -#### Code references -* [The ShardRegistry class](https://github.com/mongodb/mongo/blob/master/src/mongo/s/client/shard_registry.h) -* [The Shard class](https://github.com/mongodb/mongo/blob/master/src/mongo/s/client/shard.h) - -## Targeting a specific host within a shard -When routing a query to a replica set, a cluster node must determine which member to target for a given read preference. A cluster node either has or creates a ReplicaSetMonitor for each remote shard to which it needs to route requests. Information from the ReplicaSetMonitor interface is used to route requests to a specific node within a shard. - -Further details on replica set monitoring and host targeting can be found [here](../../../mongo/client/README.md). - ---- - -# Migrations - -Data is migrated from one shard to another at the granularity of a single chunk. - -It is also possible to move unsharded collections as a group by changing the primary shard of a -database. This uses a protocol similar to but less robust than the one for moving a chunk, so only -moving a chunk is discussed here. - -## The live migration protocol -A chunk is moved from one shard to another by the moveChunk command. This command can be issued either manually or by the balancer. The live migration protocol consists of an exchange of messages between two shards, the donor and the recipient. This exchange is orchestrated by the donor shard in the [moveChunk command](https://github.com/mongodb/mongo/blob/3f849d508692c038afb643b1acb99b8a8cb98d38/src/mongo/db/s/move_chunk_command.cpp#L214) which follows a series of steps. - -1. **Start the migration** - The ActiveMigrationsRegistry is [updated on the donor side](https://github.com/mongodb/mongo/blob/3f849d508692c038afb643b1acb99b8a8cb98d38/src/mongo/db/s/move_chunk_command.cpp#L138) to reflect that a specific chunk is being moved. This prevents any other chunk migrations from happening on this shard until the migration is completed. If an existing incoming or outgoing migration is in flight then the registration will fail and the migration will be aborted. If the inflight operation is for the same chunk then the the registration call will return an object that the moveChunk command can use to join the current operation. -1. **Start cloning the chunk** - After validating the migration parameters, the donor starts the migration process by sending the _recvChunkStart message to the recipient. This causes the recipient to then [initiate the transfer of documents](https://github.com/mongodb/mongo/blob/5c72483523561c0331769abc3250cf623817883f/src/mongo/db/s/migration_destination_manager.cpp#L955) from the donor. The initial transfer of documents is done by [repeatedly sending the _migrateClone command to the donor](https://github.com/mongodb/mongo/blob/5c72483523561c0331769abc3250cf623817883f/src/mongo/db/s/migration_destination_manager.cpp#L1042) and inserting the fetched documents on the recipient. -1. **Transfer queued modifications** - Once the initial batch of documents are copied, the recipient then needs to retrieve any modifications that have been queued up on the donor. This is done by [repeatedly sending the _transferMods command to the donor](https://github.com/mongodb/mongo/blob/5c72483523561c0331769abc3250cf623817883f/src/mongo/db/s/migration_destination_manager.cpp#L1060-L1111). These are [inserts, updates and deletes](https://github.com/mongodb/mongo/blob/11eddfac181ff6ff9faf3e1d55c050373bc6fc24/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp#L534-L550) that occurred on the donor while the initial batch was being transferred. -1. **Wait for recipient to clone documents** - The donor [polls the recipient](https://github.com/mongodb/mongo/blob/3f849d508692c038afb643b1acb99b8a8cb98d38/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp#L984) to see when the transfer of documents has been completed or the timeout has been exceeded. This is indicated when the recipient returns a state of "steady" as a result of the _recvChunkStatus command. -1. **Enter the critical section** - Once the recipient has cloned the initial documents, the donor then [declares that it is in a critical section](https://github.com/mongodb/mongo/blob/3f849d508692c038afb643b1acb99b8a8cb98d38/src/mongo/db/s/migration_source_manager.cpp#L344). This indicates that the next operations must not be interrupted and will require recovery actions if they are interrupted. Writes to the donor shard will be suspended while the critical section is in effect. The mechanism to implement the critical section writes the ShardingStateRecovery document to store the minimum optime of the sharding config metadata. If this document exists on stepup it is used to update the optime so that the correct metadata is used. -1. **Commit on the recipient** - While in the critical section, the [_recvChunkCommit](https://github.com/mongodb/mongo/blob/3f849d508692c038afb643b1acb99b8a8cb98d38/src/mongo/db/s/migration_chunk_cloner_source_legacy.cpp#L360) command is sent to the recipient directing it to fetch any remaining documents for this chunk. The recipient responds by sending _transferMods to fetch the remaining documents while writes are blocked on the donor. Once the documents are transferred successfully, the _recvChunkCommit command returns its status to unblock the donor. -1. **Commit on the config server** - The donor sends the _configsvrCommitChunkMigration command to the config server. Before the command is sent, [reads are also suspended](https://github.com/mongodb/mongo/blob/3f849d508692c038afb643b1acb99b8a8cb98d38/src/mongo/db/s/migration_source_manager.cpp#L436) on the donor shard. - -#### Code references -* [ActiveMigrationRegistry](https://github.com/mongodb/mongo/blob/9be1041342b666e979aaea483c2fdb929c801796/src/mongo/db/s/active_migrations_registry.h#L52) class -* [MigrationSourceManager](https://github.com/mongodb/mongo/blob/2c87953010c2c1ec2d39dc9a7dbbd5f7d49dab10/src/mongo/db/s/migration_source_manager.h#L70) class -* [MigrationDestinationManager](https://github.com/mongodb/mongo/blob/2c87953010c2c1ec2d39dc9a7dbbd5f7d49dab10/src/mongo/db/s/migration_destination_manager.h#L71) class -* [MigrationChunkClonerSource](https://github.com/mongodb/mongo/blob/11eddfac181ff6ff9faf3e1d55c050373bc6fc24/src/mongo/db/s/migration_chunk_cloner_source.h#L82) class -* [ShardingStateRecovery](https://github.com/mongodb/mongo/blob/2c87953010c2c1ec2d39dc9a7dbbd5f7d49dab10/src/mongo/db/s/sharding_state_recovery.h#L47) class - -## Orphaned range deletion -After the migration protocol moves a chunk from one shard to another, the documents that were in the moved range need to be deleted from the donor shard. If the migration failed for any reason and was aborted, then any documents that have been copied over to the recipient need to be deleted. These documents are called orphans since they're not owned by the shard they reside on. - -The migration protocol handles orphaned range deletion by recording the range that is being moved into the config.rangeDeletions collection on both the donor and recipient shards. The range deletion document contains the range that is to be deleted along with a pending flag that indicates if the range is ready for deletion. - -If the migration completes successfully, the range is submitted for deletion on the donor and the range deletion document is deleted from the config.rangeDeletions collection on the recipient. If the migration fails, the range deletion document is deleted from the donor and the range is submitted for deletion on the recipient. - -This sequence of steps is orchestrated by the MigrationCoordinator: -1. The donor shard writes the [migration coordinator document](https://github.com/mongodb/mongo/blob/49159e1cf859d21c767f6b582dd6e6b2d675808d/src/mongo/db/s/migration_coordinator_document.idl#L67-L102) to its local -config.migrationCoordinators collection. This document contains a unique ID along with other fields that are needed to recover the migration upon failure. -1. The donor shard writes the [range deletion document](https://github.com/mongodb/mongo/blob/49159e1cf859d21c767f6b582dd6e6b2d675808d/src/mongo/db/s/range_deletion_task.idl#L50-L76) to its local config.rangeDeletions collection with the pending flag set to true. This will prevent the range from being deleted until it is marked as ready. -1. Before the recipient shard begins migrating documents from the donor, if there is an overlapping range already in the config.rangeDeletions collection, the recipient will [wait for it to be deleted](https://github.com/mongodb/mongo/blob/ea576519e5c3445bf11aa7f880aedbee1501010c/src/mongo/db/s/migration_destination_manager.cpp#L865-L885). The recipient then [writes a range deletion document](https://github.com/mongodb/mongo/blob/ea576519e5c3445bf11aa7f880aedbee1501010c/src/mongo/db/s/migration_destination_manager.cpp#L895) to its local config.rangeDeletions collection before the clone step begins. -1. Once the migration is completed, the MigrationCoordinator records the decision in the migration coordinator document on the donor. - * If the migration succeeded, then the commit sequence is executed. This involves [deleting the range deletion document on the recipient](https://github.com/mongodb/mongo/blob/49159e1cf859d21c767f6b582dd6e6b2d675808d/src/mongo/db/s/migration_coordinator.cpp#L204) and then [marking the range](https://github.com/mongodb/mongo/blob/49159e1cf859d21c767f6b582dd6e6b2d675808d/src/mongo/db/s/migration_coordinator.cpp#L211) as ready to be deleted on the donor. The range is then [submitted for deletion](https://github.com/mongodb/mongo/blob/49159e1cf859d21c767f6b582dd6e6b2d675808d/src/mongo/db/s/migration_coordinator.cpp#L225) on the donor. - * If the migration failed, then the abort sequence is executed. This involves [deleting the range deletion task on the donor](https://github.com/mongodb/mongo/blob/49159e1cf859d21c767f6b582dd6e6b2d675808d/src/mongo/db/s/migration_coordinator.cpp#L255) and then [marking the range as ready](https://github.com/mongodb/mongo/blob/49159e1cf859d21c767f6b582dd6e6b2d675808d/src/mongo/db/s/migration_coordinator.cpp#L261) to be deleted on the recipient. The range is then [submitted for deletion](https://github.com/mongodb/mongo/blob/52a73692175cad37f942ff5e6f3d70aacbbb113d/src/mongo/db/s/shard_server_op_observer.cpp#L383-L393) on the recipient by the ShardServerOpObserver when the [write is committed]((https://github.com/mongodb/mongo/blob/52a73692175cad37f942ff5e6f3d70aacbbb113d/src/mongo/db/s/shard_server_op_observer.cpp#L131)). -1. The migration coordinator document is then [deleted](https://github.com/mongodb/mongo/blob/49159e1cf859d21c767f6b582dd6e6b2d675808d/src/mongo/db/s/migration_coordinator.cpp#L270). - -On either donor or recipient, the range deletion is [submitted asynchronously](https://github.com/mongodb/mongo/blob/49159e1cf859d21c767f6b582dd6e6b2d675808d/src/mongo/db/s/range_deletion_util.cpp#L396) to a separate executor that maintains one thread. On the donor, there is a risk of deleting documents that are being accessed in ongoing queries. We first wait for any queries on the primary to complete by [waiting on a promise](https://github.com/mongodb/mongo/blob/52a73692175cad37f942ff5e6f3d70aacbbb113d/src/mongo/db/s/metadata_manager.h#L212-L221) that is signalled by the [reference counting mechanism](https://github.com/mongodb/mongo/blob/ab21bf5ef46689cf4503a3b089def71113c437e2/src/mongo/db/s/metadata_manager.cpp#L126) in RangePreserver and [CollectionMetadataTracker](https://github.com/mongodb/mongo/blob/52a73692175cad37f942ff5e6f3d70aacbbb113d/src/mongo/db/s/metadata_manager.h#L201). We then [wait for a specified amount of time](https://github.com/mongodb/mongo/blob/49159e1cf859d21c767f6b582dd6e6b2d675808d/src/mongo/db/s/range_deletion_util.cpp#L417-L418) for any queries running on secondaries to complete before starting the deletion. The delay defaults to 15 minutes but can be configured through a server parameter. The documents in the range are then [deleted in batches](https://github.com/mongodb/mongo/blob/49159e1cf859d21c767f6b582dd6e6b2d675808d/src/mongo/db/s/range_deletion_util.cpp#L312) with a [delay between each batch](https://github.com/mongodb/mongo/blob/49159e1cf859d21c767f6b582dd6e6b2d675808d/src/mongo/db/s/range_deletion_util.cpp#L338). This rate limiting is intended to reduce the I/O load from excessive deletions happening at the same time. When the deletion has been completed, the [range deletion document is deleted](https://github.com/mongodb/mongo/blob/52a73692175cad37f942ff5e6f3d70aacbbb113d/src/mongo/db/s/range_deletion_util.cpp#L485) from the local config.rangeDeletions collection. - -## Orphan filtering -There are two cases that arise where orphaned documents need to be filtered out from the results of commands. The first case occurs while the migration protocol described above is in progress. Queries on the recipient that include documents in the chunk that is being migrated will need to be filtered out. This is because this chunk is not yet owned by the recipient shard and should not be visible there until the migration commits. - -The other case where orphans need to be filtered occurs once the migration is completed but the orphaned documents on the donor have not yet been deleted. The results of the filtering depend on what version of the chunk is in use by the query. If the query was in flight before the migration was completed, any documents that were moved by the migration must still be returned. The orphan deletion mechanism described above respects this and will not delete these orphans until the outstanding queries complete. If the query has started after the migration was committed, then the orphaned documents will not be returned since they are not owned by this shard. - -Shards store a copy of the chunk distribution for each collection for which they own data. This copy, often called the "filtering metadata" since it is used to filter out orphaned documents for chunks the shard does not own, is stored in memory in the [CollectionShardingStateMap](https://github.com/mongodb/mongo/blob/r4.4.0-rc3/src/mongo/db/s/collection_sharding_state.cpp#L45). The map is keyed by namespace, and the values are instances of [CollectionShardingRuntime](https://github.com/mongodb/mongo/blob/8b8488340f53a71f29f40ead546e36c59323ca93/src/mongo/db/s/collection_sharding_runtime.h). A CollectionShardingRuntime stores the filtering metadata for the collection [in its MetadataManager member](https://github.com/mongodb/mongo/blob/8b8488340f53a71f29f40ead546e36c59323ca93/src/mongo/db/s/metadata_manager.h#L277-L281). - -A query obtains a reference to the current filtering metadata for the collection -from the [MetadataManager](https://github.com/mongodb/mongo/blob/af62a3eeaf0b1101cb2f6e8e7595b70f2fe2f10f/src/mongo/db/s/metadata_manager.cpp#L162-L194) for the collection by calling the [CollectionShardingRuntime::getOwnershipFilter()](https://github.com/mongodb/mongo/blob/8b8488340f53a71f29f40ead546e36c59323ca93/src/mongo/db/s/collection_sharding_state.h#L99-L124) function. The MetadataManager keeps previous versions of the filtering metadata for queries that were still in flight before the migration was committed. If a cluster timestamp is specified, then an [earlier version](https://github.com/mongodb/mongo/blob/af62a3eeaf0b1101cb2f6e8e7595b70f2fe2f10f/src/mongo/db/s/metadata_manager.cpp#L177-L178) of the metadata is returned. The filtering metadata is [used by query commands](https://github.com/mongodb/mongo/blob/8b8488340f53a71f29f40ead546e36c59323ca93/src/mongo/db/query/stage_builder.cpp#L294-L305) to determine if a specific [document is owned](https://github.com/mongodb/mongo/blob/b9bd6ded04f0136157c50c85c8bdc6bb176cccc9/src/mongo/db/exec/shard_filter.cpp#L81) by the current shard. - -## Replicating the orphan filtering table - ---- - -# Auto-splitting and auto-balancing - -Data may need to redistributed for many reasons, such as that a shard was added, a shard was -requested to be removed, or data was inserted in an imbalanced way. - -The config server replica set durably stores settings for the maximum chunk size and whether chunks -should be automatically split and balanced. - -## Auto-splitting -When the mongos routes an update or insert to a chunk, the chunk may grow beyond the configured -chunk size (specified by the server parameter maxChunkSizeBytes) and trigger an auto-split, which -partitions the oversized chunk into smaller chunks. The shard that houses the chunk is responsible -for: -* determining if the chunk should be auto-split -* selecting the split points -* committing the split points to the config server -* refreshing the routing table cache -* updating in memory chunk size estimates - -### Deciding when to auto-split a chunk -The server schedules an auto-split if: -1. it detected that the chunk exceeds a threshold based on the maximum chunk size -2. there is not already a split in progress for the chunk - -Every time an update or insert gets routed to a chunk, the server tracks the bytes written to the -chunk in memory through the collection's ChunkManager. The ChunkManager has a ChunkInfo object for -each of the collection's entries in the local config.chunks. Through the ChunkManager, the server -retrieves the chunk's ChunkInfo and uses its ChunkWritesTracker to increment the estimated chunk -size. - -Even if the new size estimate exceeds the maximum chunk size, the server still needs to check that -there is not already a split in progress for the chunk. If the ChunkWritesTracker is locked, there -is already a split in progress on the chunk and trying to start another split is prohibited. -Otherwise, if the chunk is oversized and there is no split for the chunk in progress, the server -submits the chunk to the ChunkSplitter to be auto-split. - -### The auto split task -The ChunkSplitter is a replica set primary-only service that manages the process of auto-splitting -chunks. The ChunkSplitter runs auto-split tasks asynchronously - thus, distinct chunks can -undergo an auto-split concurrently. - -To prepare for the split point selection process, the ChunkSplitter flags that an auto-split for the -chunk is in progress. There may be incoming writes to the original chunk while the split is in -progress. For this reason, the estimated data size in the ChunkWritesTracker for this chunk is -reset, and the same counter is used to track the number of bytes written to the chunk while the -auto-split is in progress. - -splitVector manages the bulk of the split point selection logic. First, the data size and number of -records are retrieved from the storage engine to approximate the number of keys that each chunk -partition should have. This number is calculated such that if each document were uniform in size, -each chunk would be half of maxChunkSizeBytes. - -If the actual data size is less than the maximum chunk size, no splits are made at all. -Additionally, if all documents in the chunk have the same shard key, no splits can be made. In this -case, the chunk may be classified as a jumbo chunk. - -In the general case, splitVector: -* performs an index scan on the shard key index -* adds every k'th key to the vector of split points, where k is the approximate number of keys each chunk should have -* returns the split points - -If no split points were returned, then the auto-split task gets abandoned and the task is done. - -If split points are successfully generated, the ChunkSplitter executes the final steps of the -auto-split task where the shard: -* commits the split points to config.chunks on the config server by removing the document containing - the original chunk and inserting new documents corresponding to the new chunks indicated by the -split points -* refreshes the routing table cache -* replaces the original oversized chunk's ChunkInfo with a ChunkInfo object for each partition. The - estimated data size for each new chunk is the number bytes written to the original chunk while the -auto-split was in progress - -### Top Chunk Optimization -While there are several optimizations in the auto-split process that won't be covered here, it's -worthwhile to note the concept of top chunk optimization. If the chunk being split is the first or -last one on the collection, there is an assumption that the chunk is likely to see more insertions -if the user is inserting in ascending/descending order with respect to the shard key. So, in top -chunk optimization, the first (or last) key in the chunk is set as a split point. Once the split -points get committed to the config server, and the shard refreshes its CatalogCache, the -ChunkSplitter tries to move the top chunk out of the shard to prevent the hot spot from sitting on a -single shard. - -#### Code references -* [**ChunkInfo**](https://github.com/mongodb/mongo/blob/18f88ce0680ab946760b599437977ffd60c49678/src/mongo/s/chunk.h#L44) class -* [**ChunkManager**](https://github.com/mongodb/mongo/blob/master/src/mongo/s/chunk_manager.h) class -* [**ChunkSplitter**](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/chunk_splitter.h) class -* [**ChunkWritesTracker**](https://github.com/mongodb/mongo/blob/master/src/mongo/s/chunk_writes_tracker.h) class -* [**splitVector**](https://github.com/mongodb/mongo/blob/18f88ce0680ab946760b599437977ffd60c49678/src/mongo/db/s/split_vector.cpp#L61) method -* [**splitChunk**](https://github.com/mongodb/mongo/blob/18f88ce0680ab946760b599437977ffd60c49678/src/mongo/db/s/split_chunk.cpp#L128) method -* [**commitSplitChunk**](https://github.com/mongodb/mongo/blob/18f88ce0680ab946760b599437977ffd60c49678/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp#L316) method where chunk splits are committed - -## Auto-balancing - -The balancer is a background process that monitors the chunk distribution in a cluster. It is enabled by default and can be turned off for the entire cluster or per-collection at any time. - -The balancer process [runs in a separate thread](https://github.com/mongodb/mongo/blob/b4094a6541bf5745cb225639c2486fcf390c4c38/src/mongo/db/s/balancer/balancer.cpp#L318-L490) on the config server primary. It runs continuously, but in "rounds" with a 10 second delay between each round. During a round, the balancer uses the current chunk distribution and zone information for the cluster to decide if any chunk migrations or chunk splits are necessary. - -In order to retrieve the necessary distribution information, the balancer has a reference to the ClusterStatistics which is an interface that [obtains the data distribution and shard utilization statistics for the cluster](https://github.com/mongodb/mongo/blob/d501442a8ed07ba6e05cce3db8b83a5d7f4b7313/src/mongo/db/s/balancer/cluster_statistics_impl.cpp#L101-L166). During each round, the balancer uses the ClusterStatistics to get the current stats in order to [create a DistributionStatus for every collection](https://github.com/mongodb/mongo/blob/d501442a8ed07ba6e05cce3db8b83a5d7f4b7313/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp#L63-L116). The DistributionStatus contains information about which chunks are owned by which shards, the zones defined for the collection, and which chunks are a part of which zones. Note that because the DistributionStatus is per collection, this means that the balancer optimizes for an even distribution per collection rather than for the entire cluster. - -### What happens during a balancer round - -During each round, the balancer uses the DistributionStatus for each collection to [check if any chunk has a range that violates a zone boundary](https://github.com/mongodb/mongo/blob/d501442a8ed07ba6e05cce3db8b83a5d7f4b7313/src/mongo/db/s/balancer/balancer.cpp#L410). Any such chunks will be split into smaller chunks with new min and max values equal to the zone boundaries using the splitChunk command. - -After any chunk splits have completed, the balancer then selects one or more chunks to migrate. The balancer again uses the DistributionStatus for each collection in order to select chunks to move. The balancer [prioritizes which chunks to move](https://github.com/mongodb/mongo/blob/d501442a8ed07ba6e05cce3db8b83a5d7f4b7313/src/mongo/db/s/balancer/balancer_policy.cpp#L360-L543) by the following: -1. If any chunk in this collection is owned by a shard that is draining (being removed), select this chunk first. -1. If no chunks for this collection belong to a draining shard, check for any chunks that violate zones. -1. If neither of the above is true, the balancer can select chunks to move in order to obtain the "ideal" number of chunks per shard. This value is calculated by dividing [the total number of chunks associated with some zone] / [the total number of shards associated with this zone]. For chunks that do not belong to any zone, this value is instead calculated by dividing [the total number of chunks that do not belong to any zone] / [the total number of shards]. The balancer will pick a chunk currently owned by the shard that is [most overloaded](https://github.com/mongodb/mongo/blob/d501442a8ed07ba6e05cce3db8b83a5d7f4b7313/src/mongo/db/s/balancer/balancer_policy.cpp#L272-L293) (has the highest number of chunks in the zone). - -In each of these cases, the balancer will pick the ["least loaded" shard](https://github.com/mongodb/mongo/blob/d501442a8ed07ba6e05cce3db8b83a5d7f4b7313/src/mongo/db/s/balancer/balancer_policy.cpp#L244-L270) (the shard with the lowest number of chunks in the zone) as the recipient shard for the chunk. If a shard already has more chunks than the "ideal" number, it is draining, or it is already involved in a migration during this balancer round, the balancer will not pick this shard as the recipient. Similarly, the balancer will not select a chunk to move that is currently owned by a shard that is already involved in a migration. This is because a shard cannot be involved in more than one migration at any given time. - -If the balancer has selected any chunks to move during a round, it will [schedule a migration for each of them](https://github.com/mongodb/mongo/blob/d501442a8ed07ba6e05cce3db8b83a5d7f4b7313/src/mongo/db/s/balancer/balancer.cpp#L631-L693) using the migration procedure outlined above. A balancer round is finished once all of the scheduled migrations have completed. - -## Important caveats - -### Jumbo Chunks - -By default, a chunk is considered "too large to migrate" if its size exceeds the maximum size specified in the chunk size configuration parameter. If a chunk is this large and the balancer schedules either a migration or splitChunk, the migration or splitChunk will fail and the balancer will set the chunk's "jumbo" flag to true. However, if the balancer configuration setting 'attemptToBalanceJumboChunks' is set to true, the balancer will not fail a migration or splitChunk due to the chunk's size. Regardless of whether 'attemptToBalanceJumboChunks' is true or false, the balancer will not attempt to schedule a migration or splitChunk if the chunk's "jumbo" flag is set to true. Note that because a chunk's "jumbo" flag is not set to true until a migration or splitChunk has failed due to its size, it is possible for a chunk to be larger than the maximum chunk size and not actually be marked "jumbo" internally. The reason that the balancer will not schedule a migration for a chunk marked "jumbo" is to avoid the risk of forever scheduling the same migration or split - if a chunk is marked "jumbo" it means a migration or splitChunk has already failed. The clearJumboFlag command can be run for a chunk in order to clear its "jumbo" flag so that the balancer will schedule this migration in the future. - -#### Code references -* [**Balancer class**](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/balancer/balancer.h) -* [**BalancerPolicy class**](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/balancer/balancer_policy.h) -* [**BalancerChunkSelectionPolicy class**](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h) -* [**ClusterStatistics class**](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/balancer/cluster_statistics.h) - ---- - -# DDL operations - -Indexes are not stored in the routing table, so a router forwards index operations to all shards -that own chunks for the collection. - -Collections are always created as unsharded, meaning they are not stored in the routing table, so -a router forwards create collection requests directly to the primary shard for the database. A -router also forwards rename collection requests directly to the primary shard, since only renaming -unsharded collections is supported. - -A router forwards all other DDL operations, such as dropping a database or sharding a collection, -to the config server primary. The config server primary serializes conflicting operations, and -either itself coordinates the DDL operation or hands off the coordination to a shard. Coordinating -the DDL operation involves applying the operation on the correct set of shards and updating the -authoritative routing table. - -#### Code references -* Example of a DDL command (create indexes) that mongos -[**forwards to all shards that own chunks**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/commands/cluster_create_indexes_cmd.cpp#L83) -* Example of a DDL command (create collection) that mongos -[**forwards to the primary shard**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/commands/cluster_create_cmd.cpp#L128) -* Example of a DDL command (drop collection) mongos -[**forwards to the config server primary**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/commands/cluster_drop_cmd.cpp#L81-L82) -The business logic for most DDL commands that the config server coordinates lives in the -[**ShardingCatalogManager class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/config/sharding_catalog_manager.h#L86), -including the logic for -[**dropCollection**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp#L417). - -## Important caveats - -### Database creation - -There is no explicit command to create a database. When a router receives a write command, an entry -for the database is created in the config.databases collection if one doesn't already exist. Unlike -all other DDL operations, creating a database only involves choosing a primary shard (the shard with -the smallest total data size is chosen) and writing the database entry to the authoritative routing -table. That is, creating a database does not involve modifying any state on shards, since on shards, -a database only exists once a collection in it is created. - -#### Code references -* Example of mongos -[**asking the config server to create a database**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/commands/cluster_create_cmd.cpp#L116) -if needed - -### Retrying internally - -DDL operations often involve multiple hops between nodes. Generally, if a command is idempotent on -the receiving node, the sending node will retry it upon receiving a retryable error, such as a -network or NotPrimaryError. There are some cases where the sending node retries even though the -command is not idempotent, such as in shardCollection. In this case, the receiving node may return -ManualInterventionRequired if the first attempt failed partway. - -#### Code references -* Example of a DDL command (shard collection) -[**failing with ManualInterventionRequired**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/shardsvr_shard_collection.cpp#L129) - -### Serializing conflicting operations - -The concurrency control scheme has evolved over time and involves several different locks. - -Distributed locks are locks on a string resource, typically a database name or collection name. They -are acquired by doing a majority write to a document in the `config.locks` collection on the config -servers. The write includes the identity of the process acquiring the lock. The process holding a -distributed lock must also periodically "ping" (update) a document in the `config.lockpings` -collection on the config servers containing its process id. If the process's document is not pinged -for 15 minutes or more, the process's distributed locks are allowed to be "overtaken" by another -process. Note that this means a distributed lock can be overtaken even though the original process -that had acquired the lock continues to believe it owns the lock. See -[**this blog post**](https://martin.kleppmann.com/2016/02/08/how-to-do-distributed-locking.html) for -an excellent description of the distributed locking problem. - -In the first implementation of distributed locks, a thread would wait for a lock to be released by -polling the lock document every 500 milliseconds for 20 seconds (and return a LockBusy error if the -thread never saw the lock as available.) NamespaceSerializer locks were introduced to allow a thread -to be notified more efficiently when a lock held by another thread on the same node was released. -NamespaceSerializer locks were added only to DDL operations which were seen to frequently fail with -"LockBusy" when run concurrently on the same database, such as dropCollection. - -Global ResourceMutexes are the most recent, and are taken to serialize modifying specific config -collections, such as config.shards, config.chunks, and config.tags. For example, splitChunk, -mergeChunks, and moveChunk all take the chunk ResourceMutex. - -#### Code references -* [**DDLLockManager class**](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/ddl_lock_manager.h) -* The -[**global ResourceMutexes**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/config/sharding_catalog_manager.h#L555-L581) - ---- - -# The vector clock and causal consistency - -The vector clock is used to manage various logical times and clocks across the distributed system, for the purpose of ensuring various guarantees about the ordering of distributed events (ie. "causality"). - -These causality guarantees are implemented by assigning certain _logical times_ to relevant _events_ in the system. These logical times are strictly monotonically increasing, and are communicated ("gossiped") between nodes on all messages (both requests and responses). This allows the order of distributed operations to be controlled in the same manner as with a Lamport clock. - -## Vector clock - -The VectorClock class manages these logical time, all of which have similar requirements (and possibly special relationships with each other). There are currently three such components of the vector clock: - -1. _ClusterTime_ -1. _ConfigTime_ -1. _TopologyTime_ - -Each of these has a type of LogicalTime, which is similar to Timestamp - it is an unsigned 64 bit integer representing a combination of unix epoch (high 32 bit) and an integer 32 bit counter (low 32 bit). Together, the LogicalTimes for all vector clock components are known as the VectorTime. Each LogicalTime must always strictly monotonically increase, and there are two ways that this can happen: - -1. _Ticking_ is when a node encounters circumstances that require it to unilaterally increase the value. This can either be some incremental amount (usually 1), or to some appropriate LogicalTime value. -1. _Advancing_ happens in response to learning about a larger value from some other node, ie. gossiping. - -Each component has rules regarding when (and how) it is ticked, and when (and how) it is gossiped. These define the system state that the component "tracks", what the component can be used for, and its relationship to the other components. - -Since mongoses are stateless, they can never tick any vector clock component. In order to enforce this, the VectorClockMutable class (a sub-class of VectorClock that provides the ticking API) is not linked on mongos. - -## Component relationships - -As explained in more detail below, certain relationships are preserved between between the vector clock components, most importantly: -``` -ClusterTime >= ConfigTime >= TopologyTime -``` - -As a result, it is important to ensure that times are fetched correctly from the VectorClock. The `getTime()` function returns a `VectorTime` which contains an atomic snapshot of all components. Thus code should always be written such as: -``` -auto currentTime = VectorClock::get(opCtx)->getTime(); -doSomeWork(currentTime.clusterTime()); -doOtherWork(currentTime.configTime()); // Always passes a timestamp <= what was passed to doSomeWork() -``` - -And generally speaking, code such as the following is incorrect: -``` -doSomeWork(VectorClock::get(opCtx)->getTime().clusterTime()); -doOtherWork(VectorClock::get(opCtx)->getTime().configTime()); // Might pass a timestamp > what was passed to doSomeWork() -``` -because the timestamp received by `doOtherWork()` could be greater than the one received by `doSomeWork()` (ie. apparently violating the property). - -To discourage this incorrect pattern, it is forbidden to use the result of getTime() as a temporary (r-value) in this way; it must always first be stored in a variable. - -## ClusterTime - -Starting from v3.6 MongoDB provides session based causal consistency. All operations in the causally -consistent session will be execute in the order that preserves the causality. In particular it -means that client of the session has guarantees to - -* Read own writes -* Monotonic reads and writes -* Writes follow reads - -Causal consistency guarantees described in details in the [**server -documentation**](https://docs.mongodb.com/v4.0/core/read-isolation-consistency-recency/#causal-consistency). - -### ClusterTime ticking -The ClusterTime tracks the state of user databases. As such, it is ticked only when the state of user databases change, i.e. when a mongod in PRIMARY state performs a write. (In fact, there are a small number of other situations that tick ClusterTime, such as during step-up after a mongod node has won an election.) The OpTime value used in the oplog entry for the write is obtained by converting this ticked ClusterTime to a Timestamp, and appending the current replication election term. - -The ticking itself is performed by first incrementing the unix epoch part to the current walltime (if necessary), and then incrementing the counter by 1. (Parallel insertion into the oplog will increment by N, rather than 1, and allocate the resulting range of ClusterTime values to the oplog entries.) - -### ClusterTime gossiping -The ClusterTime is gossiped by all nodes in the system: mongoses, shard mongods, config server mongods, and clients such as drivers or the shell. It is gossiped with both internal clients (other mongod/mongos nodes in the cluster) and external clients (drivers, the shell). It uses the `$clusterTime` field to do this, using the `SignedComponentFormat` described below. - -### ClusterTime example -Example of ClusterTime gossiping and ticking: -1. Client sends a write command to the primary, the message includes its current value of the ClusterTime: T1. -1. Primary node receives the message and advances its ClusterTime to T1, if T1 is greater than the primary -node's current ClusterTime value. -1. Primary node increments the cluster time to T2 in the process of preparing the OpTime for the write. This is -the only time a new value of ClusterTime is generated. -1. Primary node writes to the oplog. -1. Result is returned to the client, it includes the new ClusterTime T2. -1. The client advances its ClusterTime to T2. - -### SignedComponentFormat: ClusterTime signing - -As explained above, nodes advance their ClusterTime to the maximum value that they receive in the client -messages. The next oplog entry will use this value in the timestamp portion of the OpTime. But a malicious client -could modify their maximum ClusterTime sent in a message. For example, it could send the ``. This value, once written to the oplogs of replica set nodes, will not be incrementable (since LogicalTimes are unsigned) and the -nodes will then be unable to accept any changes (writes against the database). This ClusterTime -would eventually be gossiped across the entire cluster, affecting the availability of the whole -system. The only ways to recover from this situation involve downtime (eg. dump and restore the -entire cluster). - -To mitigate this risk, a HMAC-SHA1 signature is used to verify the value of the ClusterTime on the -server. ClusterTime values can be read by any node, but only MongoDB processes can sign new values. -The signature cannot be generated by clients. This means that servers can trust that validly signed -ClusterTime values supplied by (otherwise untrusted) clients must have been generated by a server. - -Here is an example of the document that gossips ClusterTime: -``` -"$clusterTime" : { - "clusterTime" : - Timestamp(1495470881, 5), - "signature" : { - "hash" : BinData(0, "7olYjQCLtnfORsI9IAhdsftESR4="), - "keyId" : "6422998367101517844" - } +* CRUD operation: Comes from [Create, Read, Update, Delete](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete), and indicates operations which modify a collection's data as opposed to the catalog. +* CSRS: **C**onfig **S**erver as a **R**eplica **S**et. This is a fancy name for the [config server](https://www.mongodb.com/docs/manual/core/sharded-cluster-config-servers/). Comes from the times of version 3.2 and earlier, when there was a legacy type of Config server called [SCCC](https://www.mongodb.com/docs/manual/release-notes/3.4-compatibility/#removal-of-support-for-sccc-config-servers) which didn't operate as a replica set. +* CSS: [Collection Sharding State](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/collection_sharding_state.h#L59) +* DDL operation: Comes from [Data Definition Language](https://en.wikipedia.org/wiki/Data_definition_language), and indicates operations which modify the catalog (e.g., create collection, create index, drop database) as opposed to CRUD, which modifies the data. +* DSS: [Database Sharding State](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/database_sharding_state.h#L42) +* Routing Info: The subset of data stored in the [catalog containers](README_sharding_catalog.md#catalog-containers) which is used for making routing decisions. As of the time of this writing, the contents of *config.databases*, *config.collections*, *config.indexes* and *config.chunks*. +* SS: [Sharding State](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/sharding_state.h#L51) + +## Sharding code architecture +The graph further down visualises the architecture of the MongoDB Sharding system and the relationships between its various components and the links below point to the relevant sections describing these components. + +- [Sharding catalog](README_sharding_catalog.md#sharding-catalog) +- [Router role](README_sharding_catalog.md#router-role) +- [Shard role](README_sharding_catalog.md#router-role) +- [Routing Info Consistency Model](README_routing_info_cache_consistency_model.md) +- [Shard versioning protocol](README_versioning_protocols.md) +- [Balancer](README_balancer.md) +- [DDL Operations](README_ddl_operations.md) + - [Migrations](README_migrations.md) + - [UserWriteBlocking](README_user_write_blocking.md) +- [Sessions and Transactions](README_sessions_and_transactions.md) +- [Startup and Shutdown](README_startup_and_shutdown.md) + +```mermaid +C4Component + +Container_Boundary(Sharding, "Sharding", $link="README_new.md") { + Container_Boundary(CatalogAPI, "Catalog API", $link="README_sharding_catalog.md") { + Component(RouterRole, "Router Role", $link="README_sharding_catalog.md#router-role") + Component(ShardRole, "Shard Role", $link="README_sharding_catalog.md#shard-role") + } + + Container_Boundary(CatalogRuntime, "Catalog Runtime") { + Container_Boundary(CatalogContainers, "Catalog Containers") { + ContainerDb(CSRSCollections, "CSRS Collections") + ContainerDb(MDBCatalog, "__mdb_catalog") + ContainerDb(AuthoritativeShardLocalCollections, "Authoritative Shard Local Collections") + ContainerDb(ShardLocalCollections, "Shard Local Collections") + } + + Container_Boundary(CatalogContainerCaches, "Catalog Container Caches") { + Component(CatalogCache, "Catalog Cache") + Component(SSCache, "Sharding State (cache)") + Component(DSSCache, "Database Sharding State (cache)") + Component(CSSCache, "Collection Sharding State (cache)") + } + } + + Container_Boundary(ShardingServices, "Sharding Services") { + Component(Balancer, "Balancer") + } + + Container_Boundary(ShardingDDL, "Sharding DDL") { + Component(DDLCoordinators, "DDL Coordinators") + Component(DataCloning, "Data Cloning") + } } ``` -The keyId is used to find the key that generated the hash. The keys are stored and generated only on MongoDB -processes. This seals the ClusterTime value, as time can only be incremented on a server that has access to a signing key. -Every time the mongod or mongos receives a message that includes a -ClusterTime that is greater than the value of its logical clock, they will validate it by generating the signature using the key -with the keyId from the message. If the signature does not match, the message will be rejected. - -### Key management -To provide HMAC message verification all nodes inside a security perimeter i.e. mongos and mongod need to access a secret key to generate and -verify message signatures. MongoDB maintains keys in a `system.keys` collection in the `admin` -database. In a sharded cluster this collection is located on the config server replica set and managed by the config server primary. -In a replica set, this collection is managed by the primary node and propagated to secondaries via normal replication. - -The key document has the following format: -``` -{ - _id: , - purpose: , - key: , - expiresAt: -} -``` - -The node that has the `system.keys` collection runs a thread that periodically checks if the keys -need to be updated, by checking its `expiresAt` field. The new keys are generated in advance, so -there is always one key that is valid for the next 3 months (the default). The signature validation -requests the key that was used for signing the message by its Id which is also stored in the -signature. Since the old keys are never deleted from the `system.keys` collection they are always -available to verify the messages signed in the past. - -As the message verification is on the critical path each node also keeps an in memory cache of the -valid keys. - -### Handling operator errors -The risk of malicious clients affecting ClusterTime is mitigated by a signature, but it is still possible to advance the -ClusterTime to the end of time by changing the wall clock value. This may happen as a result of operator error. Once -the data with the OpTime containing the end of time timestamp is committed to the majority of nodes it cannot be -changed. To mitigate this, there is a limit on the magnitude by which the (epoch part of the) ClusterTime can be -advanced. This limit is the `maxAcceptableLogicalClockDriftSecs` parameter (default value is one year). - -### Causal consistency in sessions -When a write event is sent from a client, that client has no idea what time is associated with the write, because the time -was assigned after the message was sent. But the node that processes the write does know, as it incremented its -ClusterTime and applied the write to the oplog. To make the client aware of the write's ClusterTime, it will be included -in the `operationTime` field of the response. To make sure that the client knows the time of all events, every -response (including errors) will include the `operationTime` field, representing the Stable Cluster -Time i.e. the ClusterTime of the latest item added to the oplog at the time the command was executed. -Now, to make the follow up read causally consistent the client will pass the exact time of the data it needs to read - -the received `operationTime` - in the `afterClusterTime` field of the request. The data node -needs to return data with an associated ClusterTime greater than or equal to the requested `afterClusterTime` value. - -Below is an example of causally consistent "read own write" for the products collection that is sharded and has chunks on Shards A and B. -1. The client sends `db.products.insert({_id: 10, price: 100})` to a mongos and it gets routed to Shard A. -1. The primary on Shard A computes the ClusterTime, and ticks as described in the previous sections. -1. Shard A returns the result with the `operationTime` that was written to the oplog. -1. The client conditionally updates its local `lastOperationTime` value with the returned `operationTime` value -1. The client sends a read `db.products.aggregate([{$count: "numProducts"}])` to mongos and it gets routed to all shards where this collection has chunks: i.e. Shard A and Shard B. - To be sure that it can "read own write" the client includes the `afterClusterTime` field in the request and passes the `operationTime` value it received from the write. -1. Shard B checks if the data with the requested OpTime is in its oplog. If not, it performs a noop write, then returns the result to mongos. - It includes the `operationTime` that was the top of the oplog at the moment the read was performed. -1. Shard A checks if the data with the requested OpTime is in its oplog and returns the result to mongos. It includes the `operationTime` that was the top of the oplog at the moment the read was performed. -1. mongos aggregates the results and returns to the client with the largest `operationTime` it has seen in the responses from shards A and B. - -## ConfigTime - -ConfigTime is similar to the legacy `configOpTime` value used for causally consistent reads from config servers, but as a LogicalTime rather than an OpTime. - -### ConfigTime ticking -The ConfigTime tracks the sharding state stored on the config servers. As such, it is ticked only by config servers when they advance their majority commit point, and is ticked by increasing to that majority commit point value. Since the majority commit point is based on oplog OpTimes, which are based the ClusterTime, this means that the ConfigTime ticks between ClusterTime values. It also means that it is always true that ConfigTime <= ClusterTime, ie. ConfigTime "lags" ClusterTime. - -The ConfigTime value is then used when querying the config servers to ensure that the returned state -is causally consistent. This is done by using the ConfigTime as the parameter to `$afterOpTime` -field of the Read Concern (with an Uninitialised term, so that it's not used in comparisons), and as -the `minClusterTime` parameter to the read preference (to ensure that a current config server is -targeted, if possible). - -### ConfigTime gossiping -The ConfigTime is gossiped only by sharded cluster nodes: mongoses, shard mongods, and config server mongods. Clients (drivers/shell), and plain replica sets do not gossip ConfigTime. In addition, ConfigTime is only gossiped with internal clients (other mongos/mongod nodes), as identified by the kInternalClient flag (set during the `hello` command sent by mongos/mongod). - -It uses the `$configTime` field with the `PlainComponentFormat`, which simply represents the LogicalTime value as a Timestamp: -``` -"$configTime" : Timestamp(1495470881, 5) -``` - -## TopologyTime - -TopologyTime is related to the "topology" of the sharded cluster, in terms of the shards present. - -### TopologyTime ticking -Since the TopologyTime tracks the cluster topology, it ticks when a shard is added or removed from the cluster. This is done by ticking TopologyTime to the ConfigTime of the write issued by the `_configsvrAddShard` or `_configsvrRemoveShard` command. Thus, the property holds that TopologyTime <= ConfigTime, ie. TopologyTime "lags" ConfigTime. - -The TopologyTime value is then used by the ShardRegistry to know when it needs to refresh from the config servers. - -### TopologyTime gossiping -The TopologyTime is gossiped identically to ConfigTime, except with a field name of `$topologyTime`. (Note that this name is very similar to the unrelated `$topologyVersion` field returned by the streaming `hello` command response.) - -## Code references - -* [**Base VectorClock class**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/db/vector_clock.h) (contains querying, advancing, gossiping the time) -* [**VectorClockMutable class**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/db/vector_clock_mutable.h) (adds ticking and persistence, not linked on mongos) -* [**VectorClockMongoD class**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/db/vector_clock_mongod.cpp) (specific implementation used by mongod nodes) -* [**VectorClockMongoS class**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/s/vector_clock_mongos.cpp) (specific implementation used by mongos nodes) - -* [**Definition of which components use which gossiping format**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/db/vector_clock.cpp#L322-L330) -* [**PlainComponentFormat class**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/db/vector_clock.cpp#L125-L155) (for gossiping without signatures, and persistence formatting) -* [**SignedComponentFormat class**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/db/vector_clock.cpp#L186-L320) (for signed gossiping of ClusterTime) -* [**LogicalTimeValidator class**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/db/logical_time_validator.h) (generates and validates ClusterTime signatures) -* [**KeysCollectionManager class**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/db/keys_collection_manager.h) (maintains the ClusterTime signing keys in `admin.system.keys`) - -* [**Definition of which components are gossiped internally/externally by mongod**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/db/vector_clock_mongod.cpp#L389-L406) -* [**Definition of when components may be ticked by mongod**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/db/vector_clock_mongod.cpp#L408-L450) -* [**Definition of which components are gossiped internally/externally by mongos**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/s/vector_clock_mongos.cpp#L71-L79) - -* [**Ticking ClusterTime**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/db/repl/local_oplog_info.cpp#L125) (main usage, search for `tickClusterTime` to find unusual cases) -* [**Ticking ConfigTime and TopologyTime**](https://github.com/mongodb/mongo/blob/3681b03baa/src/mongo/db/s/config_server_op_observer.cpp#L252-L256) - ---- - -# Logical Sessions - -Some operations, such as retryable writes and transactions, require durably storing metadata in the -cluster about the operation. However, it's important that this metadata does not remain in the -cluster forever. - -Logical sessions provide a way to durably store metadata for the _latest_ operation in a sequence of -operations. The metadata is reaped if the cluster does not receive a new operation under the logical -session for a reasonably long time (the default is 30 minutes). - -A logical session is identified by its "logical session id," or `lsid`. An `lsid` is a combination -of up to four pieces of information: - -1. `id` - A globally unique id (UUID) generated by the mongo shell, driver, or the `startSession` server command -1. `uid` (user id) - The identification information for the logged-in user (if authentication is enabled) -1. `txnNumber` - An optional parameter set only for internal transactions spawned from retryable writes. Strictly-increasing counter set by the transaction API to match the txnNumber of the corresponding retryable write. -1. `txnUUID` - An optional parameter set only for internal transactions spawned inside client sessions. The txnUUID is a globally unique id generated by the transaction API. - -A logical session with a `txnNumber` and `txnUUID` is considered a child of the session with matching `id` and `uid` values. There may be multiple child sessions per parent session, and checking out a child/parents session checks out the other and updates the `lastUsedTime` of both. Killing a parent session also kills all of its child sessions. - -The order of operations in the logical session that need to durably store metadata is defined by an -integer counter, called the `txnNumber`. When the cluster receives a retryable write or transaction -with a higher `txnNumber` than the previous known `txnNumber`, the cluster overwrites the previous -metadata with the metadata for the new operation. - -Operations sent with an `lsid` that do not need to durably store metadata simply bump the time at -which the session's metadata expires. - -## The logical session cache - -The logical session cache is an in-memory cache of sessions that are open and in use on a certain -node. Each node (router, shard, config server) has its own in-memory cache. A cache entry contains: -1. `_id` - The session’s logical session id -1. `user` - The session’s logged-in username (if authentication is enabled) -1. `lastUse` - The date and time that the session was last used - -The in-memory cache periodically persists entries to the `config.system.sessions` collection, known -as the "sessions collection." The sessions collection has different placement behavior based on -whether the user is running a standalone node, a replica set, or a sharded cluster. - -| Cluster Type | Sessions Collection Durable Storage | -|-----------------|------------------------------------------------------------------------------------------------------------------| -| Standalone Node | Sessions collection exists on the same node as the in-memory cache. | -| Replica Set | Sessions collection exists on the primary node and replicates to secondaries. | -| Sharded Cluster | Sessions collection is a regular sharded collection - can exist on multiple shards and can have multiple chunks. | - -### Session expiration - -There is a TTL index on the `lastUse` field in the sessions collection. The TTL expiration date is -thirty (30) minutes out by default, but is user-configurable. This means that if no requests come -in that use a session for thirty minutes, the TTL index will remove the session from the sessions -collection. When the logical session cache performs its periodic refresh (defined below), it will -find all sessions that currently exist in the cache that no longer exist in the sessions -collection. This is the set of sessions that we consider "expired". The expired sessions are then -removed from the in-memory cache. - -### How a session gets placed into the logical session cache - -When a node receives a request with attached session info, it will place that session into the -logical session cache. If a request corresponds to a session that already exists in the cache, the -cache will update the cache entry's `lastUse` field to the current date and time. - -### How the logical session cache syncs with the sessions collection - -At a regular interval of five (5) minutes (user-configurable), the logical session cache will sync -with the sessions collection. Inside the class, this is known as the "refresh" function. There are -four steps to this process: - -1. All sessions that have been used on this node since the last refresh will be upserted to the sessions collection. This means that sessions that already exist in the sessions collection will just have their `lastUse` fields updated. -1. All sessions that have been ended in the cache on this node (via the endSessions command) will be removed from the sessions collection. -1. Sessions that have expired from the sessions collection will be removed from the logical session cache on this node. -1. All cursors registered on this node that match sessions that have been ended (step 2) or were expired (step 3) will be killed. - -### Periodic cleanup of the session catalog and transactions table - -The logical session cache class holds the periodic job to clean up the -[session catalog](#the-logical-session-catalog) and [transactions table](#the-transactions-table). -Inside the class, this is known as the "reap" function. Every five (5) minutes (user-configurable), -the following steps will be performed: - -1. Find all sessions in the session catalog that were last checked out more than thirty minutes ago (default session expiration time). -1. For each session gathered in step 1, if the session no longer exists in the sessions collection (i.e. the session has expired or was explicitly ended), remove the session from the session catalog. -1. Find all entries in the transactions table that have a last-write date of more than thirty minutes ago (default session expiration time). -1. For each entry gathered in step 3, if the session no longer exists in the sessions collection (i.e. the session has expired or was explicitly ended), remove the entry from the transactions table. - -#### Configurable parameters related to the logical session cache - -| Parameter | Value Type | Default Value | Startup/Runtime | Description | -|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|----------------------|-----------------|----------------------------------------------------------------------------------------------------------------------------------------| -| [disableLogicalSessionCacheRefresh](https://github.com/mongodb/mongo/blob/9cbbb66d7536ab4f92baf99ef5332e96be0e4153/src/mongo/db/logical_session_cache.idl#L49-L54) | boolean | false | Startup | Disables the logical session cache's periodic "refresh" and "reap" functions on this node. Recommended for testing only. | -| [logicalSessionRefreshMillis](https://github.com/mongodb/mongo/blob/9cbbb66d7536ab4f92baf99ef5332e96be0e4153/src/mongo/db/logical_session_cache.idl#L34-L40) | integer | 300000ms (5 minutes) | Startup | Changes how often the logical session cache runs its periodic "refresh" and "reap" functions on this node. | -| [localLogicalSessionTimeoutMinutes](https://github.com/mongodb/mongo/blob/9cbbb66d7536ab4f92baf99ef5332e96be0e4153/src/mongo/db/logical_session_id.idl#L191-L196) | integer | 30 minutes | Startup | Changes the TTL index timeout for the sessions collection. In sharded clusters, this parameter is supported only on the config server. | - -#### Code references - -* [Place where a session is placed (or replaced) in the logical session cache](https://github.com/mongodb/mongo/blob/1f94484d52064e12baedc7b586a8238d63560baf/src/mongo/db/logical_session_cache.h#L71-L75) -* [The logical session cache refresh function](https://github.com/mongodb/mongo/blob/1f94484d52064e12baedc7b586a8238d63560baf/src/mongo/db/logical_session_cache_impl.cpp#L207-L355) -* [The periodic job to clean up the session catalog and transactions table (the "reap" function)](https://github.com/mongodb/mongo/blob/1f94484d52064e12baedc7b586a8238d63560baf/src/mongo/db/logical_session_cache_impl.cpp#L141-L205) -* [Location of the session catalog and transactions table cleanup code on mongod](https://github.com/mongodb/mongo/blob/1f94484d52064e12baedc7b586a8238d63560baf/src/mongo/db/session/session_catalog_mongod.cpp#L331-L398) - -## The logical session catalog - -The logical session catalog of a mongod or mongos is an in-memory catalog that stores the runtime state -for sessions with transactions or retryable writes on that node. The runtime state of each session is -maintained by the session checkout mechanism, which also serves to serialize client operations on -the session. This mechanism requires every operation with an `lsid` and a `txnNumber` (i.e. -transaction and retryable write) to check out its session from the session catalog prior to execution, -and to check the session back in upon completion. When a session is checked out, it remains unavailable -until it is checked back in, forcing other operations to wait for the ongoing operation to complete -or yield the session. - -Checking out an internal/child session additionally checks out its parent session (the session with the same `id` and `uid` value in the lsid, but without a `txnNumber` or `txnUUID` value), and vice versa. - -The runtime state for a session consists of the last checkout time and operation, the number of operations -waiting to check out the session, and the number of kills requested. Retryable internal sessions are reaped from the logical session catalog [eagerly](https://github.com/mongodb/mongo/blob/67e37f8e806a6a5d402e20eee4b3097e2b11f820/src/mongo/db/session/session_catalog.cpp#L342), meaning that if a transaction session with a higher transaction number has successfully started, sessions with lower txnNumbers are removed from the session catalog and inserted into an in-memory buffer by the [InternalTransactionsReapService](https://github.com/mongodb/mongo/blob/67e37f8e806a6a5d402e20eee4b3097e2b11f820/src/mongo/db/internal_transactions_reap_service.h#L42) until a configurable threshold is met (1000 by default), after which they are deleted from the transactions table (`config.transactions`) and `config.image_collection` all at once. Eager reaping is best-effort, in that the in-memory buffer is cleared on stepdown or restart. - -The last checkout time is used by -the [periodic job inside the logical session cache](#periodic-cleanup-of-the-session-catalog-and-transactions-table) -to determine when a session should be reaped from the session catalog, whereas the number of -operations waiting to check out a session is used to block reaping of sessions that are still in -use. The last checkout operation is used to determine the operation to kill when a session is -killed, whereas the number of kills requested is used to make sure that sessions are only killed on -the first kill request. - -### The transactions table - -The runtime state in a node's in-memory session catalog is made durable in the node's -`config.transactions` collection, also called its transactions table. The in-memory session catalog - is -[invalidated](https://github.com/mongodb/mongo/blob/56655b06ac46825c5937ccca5947dc84ccbca69c/src/mongo/db/session/session_catalog_mongod.cpp#L324) -if the `config.transactions` collection is dropped and whenever there is a rollback. When -invalidation occurs, all active sessions are killed, and the in-memory transaction state is marked -as invalid to force it to be -[reloaded from storage the next time a session is checked out](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/session/session_catalog_mongod.cpp#L426). - -#### Code references -* [**SessionCatalog class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/session/session_catalog.h) -* [**MongoDSessionCatalog class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/session/session_catalog_mongod.h) -* [**RouterSessionCatalog class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/session_catalog_router.h) -* How [**mongod**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/service_entry_point_common.cpp#L537) and [**mongos**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/commands/strategy.cpp#L412) check out a session prior to executing a command. - -## Retryable writes - -Retryable writes allow drivers to automatically retry non-idempotent write commands on network errors or failovers. -They are supported in logical sessions with `retryableWrites` enabled (default), with the caveat that the writes -are executed with write concern `w` greater than 0 and outside of transactions. [Here](https://github.com/mongodb/specifications/blob/49589d66d49517f10cc8e1e4b0badd61dbb1917e/source/retryable-writes/retryable-writes.rst#supported-write-operations) -is a complete list of retryable write commands. - -When a command is executed as a retryable write, it is sent from the driver with `lsid` and `txnNumber` attached. -After that, all write operations inside the command are assigned a unique integer statement id `stmtId` by the -mongos or mongod that executes the command. In other words, each write operation inside a batch write command -is given its own `stmtId` and is individually retryable. The `lsid`, `txnNumber`, and `stmtId` constitute a -unique identifier for a retryable write operation. - -This unique identifier enables a primary mongod to track and record its progress for a retryable -write command using the `config.transactions` collection and augmented oplog entries. The oplog -entry for a retryable write operation is written with a number of additional fields including -`lsid`, `txnNumber`, `stmtId` and `prevOpTime`, where `prevOpTime` is the opTime of the write that -precedes it. In certain cases, such as time-series inserts, a single oplog entry may encode -multiple client writes, and thus may contain an array value for `stmtId` rather than the more -typical single value. All of this results in a chain of write history that can be used to -reconstruct the result of writes that have already executed. After generating the oplog entry for a -retryable write operation, a primary mongod performs an upsert into `config.transactions` to write -a document containing the `lsid` (`_id`), `txnNumber`, `stmtId` and `lastWriteOpTime`, where -`lastWriteOpTime` is the opTime of the newly generated oplog entry. The `config.transactions` -collection is indexed by `_id` so this document is replaced every time there is a new retryable -write command (or transaction) on the session. - -The opTimes for all committed statements for the latest retryable write command is cached in an [in-memory table](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/transaction_participant.h#L928) that gets [updated](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/transaction_participant.cpp#L2125-L2127) after each -write oplog entry is generated, and gets cleared every time a new retryable write command starts. Prior to executing -a retryable write operation, a primary mongod first checks to see if it has the commit opTime for the `stmtId` of -that write. If it does, the write operation is skipped and a response is constructed immediately based on the oplog -entry with that opTime. Otherwise, the write operation is performed with the additional bookkeeping as described above. -This in-memory cache of opTimes for committed statements is invalidated along with the entire in-memory transaction -state whenever the `config.transactions` is dropped and whenever there is rollback. The invalidated transaction -state is overwritten by the on-disk transaction history at the next session checkout. - -To support retryability of writes across migrations, the session state for the migrated chunk is propagated -from the donor shard to the recipient shard. After entering the chunk cloning step, the recipient shard -repeatedly sends [\_getNextSessionMods](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp#L240-L359) (also referred to as MigrateSession) commands to -the donor shard until the migration reaches the commit phase to clone any oplog entries that contain session -information for the migrated chunk. Upon receiving each response, the recipient shard writes the oplog entries -to disk and [updates](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/transaction_participant.cpp#L2142-L2144) its in-memory transaction state to restore the session state for the chunk. - -### Retryable writes and findAndModify - -For most writes, persisting only the (lsid, txnId) pair alone is sufficient to reconstruct a -response. For findAndModify however, we also need to respond with the document that would have -originally been returned. In version 5.0 and earlier, the default behavior is to -[record the document image into the oplog](https://github.com/mongodb/mongo/blob/33ad68c0dc4bda897a5647608049422ae784a15e/src/mongo/db/op_observer_impl.cpp#L191) -as a no-op entry. The oplog entries generated would look something like: - -* `{ op: "d", o: {_id: 1}, ts: Timestamp(100, 2), preImageOpTime: Timestamp(100, 1), lsid: ..., txnNumber: ...}` -* `{ op: "n", o: {_id: 1, imageBeforeDelete: "foobar"}, ts: Timestamp(100, 1)}` - -There's a cost in "explicitly" replicating these images via the oplog. We've addressed this cost -with 5.1 where the default is to instead [save the image into a side collection](https://github.com/mongodb/mongo/blob/33ad68c0dc4bda897a5647608049422ae784a15e/src/mongo/db/op_observer_impl.cpp#L646-L650) -with the namespace `config.image_collection`. A primary will add `needsRetryImage: -` to the oplog entry to communicate to secondaries that they must make a -corollary write to `config.image_collection`. - -Note that this feature was backported to 4.0, 4.2, 4.4 and 5.0. Released binaries with this -capability can be turned on by [setting the `storeFindAndModifyImagesInSideCollection` server -parameter](https://github.com/mongodb/mongo/blob/2ac9fd6e613332f02636c6a7ec7f6cff4a8d05ab/src/mongo/db/repl/repl_server_parameters.idl#L506-L512). - -Partial cloning mechanisms such as chunk migrations, tenant migrations and resharding all support -the destination picking up the responsibility for satisfying a retryable write the source had -originally processed (to some degree). These cloning mechanisms naturally tail the oplog to pick up -on changes. Because the traditional retryable findAndModify algorithm places the images into the -oplog, the destination just needs to relink the timestamps for its oplog to support retryable -findAndModify. - -For retry images saved in the image collection, the source will "downconvert" oplog entries with -`needsRetryImage: true` into two oplog entries, simulating the old format. As chunk migrations use -internal commands, [this downconverting procedure](https://github.com/mongodb/mongo/blob/0beb0cacfcaf7b24259207862e1d0d489e1c16f1/src/mongo/db/s/session_catalog_migration_source.cpp#L58-L97) -is installed under the hood. For resharding and tenant migrations, a new aggregation stage, -[_internalFindAndModifyImageLookup](https://github.com/mongodb/mongo/blob/e27dfa10b994f6deff7c59a122b87771cdfa8aba/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup.cpp#L61), -was introduced to perform the identical substitution. In order for this stage to have a valid timestamp -to assign to the forged no-op oplog entry as result of the "downconvert", we must always assign an -extra oplog slot when writing the original retryable findAndModify oplog entry with -`needsRetryImage: true`. - -In order to avoid certain WiredTiger constraints surrounding setting multiple timestamps in a single storage transaction, we must reserve -oplog slots before entering the OpObserver, which is where we would normally create an oplog entry -and assign it the next available timestamp. Here, we have a table that describes the different -scenarios, along with the timestamps that are reserved and the oplog entries assigned to each of -those timestamps: -| Parameters | NumSlotsReserved | TS - 1 | TS | Oplog fields for entry with timestamp: TS | -| --- | --- | --- | --- | --- | -| Update, NeedsRetryImage=preImage | 2 | Reserved for forged no-op entry eventually used by tenant migrations/resharding|Update oplog entry|NeedsRetryImage: preImage | -| Update, NeedsRetryImage=postImage | 2 | Reserved for forged no-op entry eventually used by tenant migrations/resharding|Update oplog entry | NeedsRetryImage: postImage | -|Delete, NeedsRetryImage=preImage |2|Reserved for forged no-op entry eventually used by tenant migrations/resharding|Delete oplog entry|NeedsRetryImage: preImage| - -#### Code references -* [**TransactionParticipant class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/transaction_participant.h) -* How a write operation [checks if a statement has been executed](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/ops/write_ops_exec.cpp#L811-L816) -* How mongos [assigns statement ids to writes in a batch write command](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/write_ops/batch_write_op.cpp#L483-L486) -* How mongod [assigns statement ids to insert operations](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/ops/write_ops_exec.cpp#L573) -* [Retryable writes specifications](https://github.com/mongodb/specifications/blob/49589d66d49517f10cc8e1e4b0badd61dbb1917e/source/retryable-writes/retryable-writes.rst) - -## Transactions - -Cross-shard transactions provide ACID guarantees for multi-statement operations that involve documents on -multiple shards in a cluster. Similar to [transactions on a single replica set](https://github.com/mongodb/mongo/blob/r4.4.0-rc7/src/mongo/db/repl/README.md#transactions), cross-shard transactions are only supported in logical -sessions. They have a configurable lifetime limit, and are automatically aborted when they are expired -or when the session is killed. - -To run a cross-shard transaction, a client sends all statements, including the `commitTransaction` and -`abortTransaction` command, to a single mongos with common `lsid` and `txnNumber` attached. The first -statement is sent with `startTransaction: true` to indicate the start of a transaction. Once a transaction -is started, it remains active until it is explicitly committed or aborted by the client, or unilaterally -aborted by a participant shard, or overwritten by a transaction with a higher `txnNumber`. - -When a mongos executes a transaction, it is responsible for keeping track of all participant shards, and -choosing a coordinator shard and a recovery shard for the transaction. In addition, if the transaction -uses read concern `"snapshot"`, the mongos is also responsible for choosing a global read timestamp (i.e. -`atClusterTime`) at the start of the transaction. The mongos will, by design, always choose the first participant -shard as the coordinator shard, and the first shard that the transaction writes to as the recovery shard. -Similarly, the global read timestamp will always be the logical clock time on the mongos when it receives -the first statement for the transaction. If a participant shard cannot provide a snapshot at the chosen -read timestamp, it will throw a snapshot error, which will trigger a client level retry of the transaction. -The mongos will only keep this information in memory as it relies on the participant shards to persist their -respective transaction states in their local `config.transactions` collection. - -The execution of a statement inside a cross-shard transaction works very similarly to that of a statement -outside a transaction. One difference is that mongos attaches the transaction information (e.g. `lsid`, -`txnNumber` and `coordinator`) in every statement it forwards to targeted shards. Additionally, the first -statement to a participant shard is sent with `startTransaction: true` and `readConcern`, which contains -the `atClusterTime` if the transaction uses read concern `"snapshot"`. When a participant shard receives -a transaction statement with `coordinator: true` for the first time, it will infer that it has been chosen -as the transaction coordinator and will set up in-memory state immediately to prepare for coordinating -transaction commit. One other difference is that the response from each participant shard includes an -additional `readOnly` flag which is set to true if the statement does not do a write on the shard. Mongos -uses this to determine how a transaction should be committed or aborted, and to choose the recovery shard -as described above. The id of the recovery shard is included in the `recoveryToken` in the response to -the client. - -### Committing a Transaction - -The commit procedure begins when a client sends a `commitTransaction` command to the mongos that the -transaction runs on. The command is retryable as long as no new transaction has been started on the session -and the session is still alive. The number of participant shards and the number of write shards determine -the commit path for the transaction. - -* If the number of participant shards is zero, the mongos skips the commit and returns immediately. -* If the number of participant shards is one, the mongos forwards `commitTransaction` directly to that shard. -* If the number of participant shards is greater than one: - * If the number of write shards is zero, the mongos forwards `commitTransaction` to each shard individually. - * Otherwise, the mongos sends `coordinateCommitTransaction` with the participant list to the coordinator shard to - initiate two-phase commit. - -To recover the commit decision after the original mongos has become unreachable, the client can send `commitTransaction` -along with the `recoveryToken` to a different mongos. This will not initiate committing the transaction, instead -the mongos will send `coordinateCommitTransaction` with an empty participant list to the recovery shard to try to -join the progress of the existing coordinator if any, and to retrieve the commit outcome for the transaction. - -#### Two-phase Commit Protocol - -The two-phase commit protocol consists of the prepare phase and the commit phase. To support recovery from -failovers, a coordinator keeps a document inside the `config.transaction_coordinators` collection that contains -information about the transaction it is trying commit. This document is deleted when the commit procedure finishes. - -Below are the steps in the two-phase commit protocol. - -* Prepare Phase - 1. The coordinator writes the participant list to the `config.transaction_coordinators` document for the -transaction, and waits for it to be majority committed. - 1. The coordinator sends [`prepareTransaction`](https://github.com/mongodb/mongo/blob/r4.4.0-rc7/src/mongo/db/repl/README.md#lifetime-of-a-prepared-transaction) to the participants, and waits for vote reponses. Each participant -shard responds with a vote, marks the transaction as prepared, and updates the `config.transactions` -document for the transaction. - 1. The coordinator writes the decision to the `config.transaction_coordinators` document and waits for it to -be majority committed. If the `coordinateCommitTransactionReturnImmediatelyAfterPersistingDecision` server parameter is -true (default), the `coordinateCommitTransaction` command returns immediately after waiting for client's write concern -(i.e. let the remaining work continue in the background). - -* Commit Phase - 1. If the decision is 'commit', the coordinator sends `commitTransaction` to the participant shards, and waits -for responses. If the decision is 'abort', it sends `abortTransaction` instead. Each participant shard marks -the transaction as committed or aborted, and updates the `config.transactions` document. - 1. The coordinator deletes the coordinator document with write concern `{w: 1}`. - -The prepare phase is skipped if the coordinator already has the participant list and the commit decision persisted. -This can be the case if the coordinator was created as part of step-up recovery. - -### Aborting a Transaction - -Mongos will implicitly abort a transaction on any error except the view resolution error from a participant shard -if a two phase commit has not been initiated. To explicitly abort a transaction, a client must send an `abortTransaction` -command to the mongos that the transaction runs on. The command is also retryable as long as no new transaction has -been started on the session and the session is still alive. In both cases, the mongos simply sends `abortTransaction` -to all participant shards. - -#### Code references -* [**TransactionRouter class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/transaction_router.h) -* [**TransactionCoordinatorService class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/transaction_coordinator_service.h) -* [**TransactionCoordinator class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/transaction_coordinator.h) - -## Internal Transactions - -Internal transactions are transactions that mongos and mongod can run on behalf of a client command regardless of a client's session option configuration. These transactions are started and managed internally by mongos/mongod, thus clients are unaware of the execution of internal transactions. All internal transactions will be run within an a session started internally, which we will refer to as `internal sessions`, except for in the case where the client is already running a transaction within a session, to which we let the transaction execute as a regular client transaction. - -An internal transaction started on behalf of a client command is subject to the client command's constraints such as terminating execution if the command's `$maxTimeMS` is reached, or guaranteeing retryability if the issued command was a retryable write. These constraints lead to the following concepts. - -### Non-Retryable Internal Transactions - -If a client runs a command in a without a session or with session where retryable writes are disabled I.E. `retryWrites: false`, the server will start a non-retryable internal transaction. - -### Retryable Internal Transactions - -If a client runs a command in a session where retryable writes are enabled I.E. `retryWrites: true`, the server will start a retryable internal transaction. - -**Note**: The distinction between **Retryable** and **Non-Retryable** here is the requirement that Retryable Internal Transactions must fulfill the retryable write contract, which is described below. Both types of transactions will be [retried internally on transient errors](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_api.cpp#L207-L227). The only exception is an internal transaction that is started on behalf of a `client transaction`, which can only be retried by the client. - -#### How retryability is guaranteed - -We expect that retryable write commands that start retryable internal transactions conform to the retryable write contract which has the following stipulations: - -1. Write statements within the command are guaranteed to apply only once regardless of how many times a client retries. -2. The response for the command is guaranteed to be reconstructable on retry. - -To do this, retryable write statements executed inside of a retryable internal transaction try to emulate the behavior of ordinary retryable writes. - -Each statement inside of a retryable write command should have a corresponding entry within a retryable internal transaction with the same `stmtId` as the original write statement. When a transaction participant for a retryable internal transaction notices a write statement with a previously seen `stmtId`, it will not execute the statement and instead generate the original response for the already executed statement using the oplog entry generated by the initial execution. The check for previously executed statements is done using the `retriedStmtIds` array, which contains the `stmtIds` of already retried statements, inside of a write command's response. - -In cases where a client retryable write command implicitly expects an auxiliary operation to be executed atomically with its current request, a retryable internal transaction may contain additional write statements that are not explicitly requested by a client retryable write command. An example could be that the client expects to atomically update an index when executing a write. Since these auxiliary write statements do not have a corresponding entry within the original client command, the `stmtId` field for these statements will be set to `{stmtId: kUninitializedStmtId}`. These auxiliary write statements are non-retryable, thus it is crucial that we use the `retriedStmtIds` to determine which client write statements were already successfully retried to avoid re-applying the corresponding auxilary write statements. Additionally, these statements will be excluded from the history check involving `retriedStmtIds`. - -To guarantee that we can reconstruct the response regardless of retries, we do a "cross sectional" write history check for retryable writes and retryable internal transactions prior to running a client retryable write/retryable internal transaction command. This ensures we do not double apply non-idempotent operations, and instead recover the response for a successful execution when appropriate. To support this, the [RetryableWriteTransactionParticipantCatalog](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.h#L1221-L1299) was added as a decoration on an external session and it stores the transaction participants for all active retryable writes on the session, which we use to do our [write history check](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L3205-L3208). - -#### Reconstructing write responses - -To reconstruct responses for retryable internal transactions, we use the applyOps oplog entry, which contains an inner entry with the operation run under the `o` field that has a corresponding `stmtId`. We use the `stmtId` and `opTime` cached in the `TransactionParticipant` to lookup the operation in the applyOps oplog entry, which gives us the necessary details to reconstruct the original write response. The process for reconstructing retryable write responses works the same way. - - -#### Special considerations for findAndModify - -`findAndModify` additionally, requires the storage of pre/post images. The behavior of recovery differs based on the setting of `storeFindAndModifyImagesInSideCollection`. - -If `storeFindAndModifyImagesInSideCollection` is **false**, then upon committing or preparing an internal transaction, we generate a no-op oplog entry that stores either stores the pre or post image of the document involved. The operation entry for the `findAndModify` statement inside the applyOps oplog entry will have a `preImageOpTime` or a `postImageOpTime` field that is set to the opTime of the no-op oplog entry. That opTime will be used to lookup the pre/post image when reconstructing the write response. - -If `storeFindAndModifyImagesInSideCollection` is **true**, then upon committing or preparing an internal transaction, we insert a document into `config.image_collection` containing the pre/post image. The operation entry for the findAndModify statement inside the applyOps oplog entry will have a `needsRetryImage` field that is set to `true` to indicate that a pre/post image should be loaded from the side collection when reconstructing the write response. We can do the lookup using a transaction's `lsid` and `txnNumber`. - -Currently, a retryable internal transaction can only support a **single** `findAndModify` statement at a time, due to the limitation that `config.image_collection` can only support storing one pre/post image entry for a given `(lsid, txnNumber)`. - -#### Retryability across failover and restart - -To be able to guarantee retryability under failover, we need to make sure that a mongod **always** has all the necessary transaction state loaded while executing a retryable write command. To do this, we recover the transaction state of the client and internal sessions [when checking out sessions](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/session/session_catalog_mongod.cpp#L694) on recovery. During checkout, we call [refreshFromStorageIfNeeded()](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L2901) on the current client session (if we are running in one) to refresh the TransactionParticipant for that session. We then [fetch any relevant active internal sessions associated with the current client session and refresh the TransactionParticipants for those sessions](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L2987). - -#### Handling retry conflicts - -Due to the use of `txnUUID` in the lsid for de-duplication purposes, retries of client write statements will always spawn a different internal session/transaction than the one originally used to do the initial attempt. This has two implications for conflict resolution: - -1. If the client retries on the same mongos/mongod that the original write was run on, retries are blocked by mongos/mongod until the original attempt finishes execution. This is due to the [session checkout mechanism](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/service_entry_point_common.cpp#L973) that prevents checkout of an in-use session, which in this case would block the retry attempt from checking out the parent session. Once the original write finishes execution, the retry would either retry(if necessary) or recover the write response as described above. - -2. If the client retries on a different mongos than the original write was run on, the new mongos will not have visibility over in-progress internal transactions run by another mongos, so this retry will not be blocked and legally begin execution. When the new mongos begins execution of the retried command, it will send commands with `startTransaction` to relevant transaction participants. The transaction participants will then [check if there is already an in-progress internal transaction that will conflict](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L2827-L2846) with the new internal transaction that is attempting to start. If so, then the transaction participant will throw `RetryableTransactionInProgress`, which will be caught and cause the new transaction to [block until the existing transaction is finished](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/service_entry_point_common.cpp#L1029-L1036). - - -#### Supporting retryability across chunk migration and resharding - - The session history, oplog entries, and image collection entries involving the chunk being migrated are cloned from the donor shard to the recipient shard during chunk migration. Once the recipient receives the relevant oplog entries from the donor, it will [nest and apply the each of the received oplog entries in a no-op oplog entry](https://github.com/mongodb/mongo/blob/0d84f4bab0945559abcd5b00be5ec322c5214642/src/mongo/db/s/session_catalog_migration_destination.cpp#L204-L347). Depending on the type of operation run, the behavior will differ as such. - -* If a non-retryable write/non-retryable internal transaction is run, then the donor shard will [send a sentinel no-op oplog entry](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/s/session_catalog_migration_destination.cpp#L204-L354), which when parsed by the TransactionParticipant upon getting a retry against the recipient shard will [throw IncompleteTransactionHistory](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L323-L331). - -* If a retryable write/retryable internal transaction is run, then the donor shard will send a ["downconverted" oplog entry](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/s/session_catalog_migration_source.cpp#L669-L680), which when parsed by the TransactionParticipant upon getting a retry against the recipient shard will return the original write response. - -`Note`: "Downconverting" in this context, is the process of extracting the operation information inside an applyOps entry for an internal transaction and constructing a new retryable write oplog entry with `lsid` and `txnNumber` set to the associated client's session id and txnNumber. - -For resharding, the process is similar to how chunk migrations are handled. The session history, oplog entries, and image collection entries for operations run during resharding are cloned from the donor shard to the recipient shard. The only difference is that the recipient in this case will handle the "downconverting", nesting, and applying of the received oplog entries. The two cases discussed above apply to resharding as well. - - -#### Code References - -* [**Session checkout logic**](https://github.com/mongodb/mongo/blob/0d84f4bab0945559abcd5b00be5ec322c5214642/src/mongo/db/session/session_catalog_mongod.cpp#L694) -* [**Cross-section history check logic**](https://github.com/mongodb/mongo/blob/0d84f4bab0945559abcd5b00be5ec322c5214642/src/mongo/db/transaction/transaction_participant.cpp#L3206) -* [**Conflicting internal transaction check logic**](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L2827-L2846) -* [**Refreshing client and internal sessions logic**](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L2889-L2899) -* [**RetryableWriteTransactionParticipantCatalog**](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.h#L1221-L1299) - -### Transaction API - -The [transaction API](https://github.com/mongodb/mongo/blob/master/src/mongo/db/transaction/transaction_api.h) is used to initiate transactions from within the server. The API starts an internal transaction on its local process, executes transaction statements specified in a callback, and completes the transaction by committing/aborting/retrying on transient errors. By default, a transaction can be retried 120 times to mirror the 2 minute timeout used by the [driver’s convenient transactions API](https://github.com/mongodb/specifications/blob/92d77a6d/source/transactions-convenient-api/transactions-convenient-api.rst). - -Additionally, the API can use router commands when running on a mongod. Each command will execute as if on a mongos, targeting remote shards and initiating a two phase commit if necessary. To enable this router behavior the [`cluster_transaction_api`](https://github.com/mongodb/mongo/blob/master/src/mongo/db/cluster_transaction_api.h) defines an additional set of behaviors to rename commands to their [cluster command names](https://github.com/mongodb/mongo/blob/63f99193df82777239f038666270e4bfb2be3567/src/mongo/db/cluster_transaction_api.cpp#L44-L52). - -Transactions for non-retryable operations or operations without a session initiated through the API use sessions from the [InternalSessionPool](https://github.com/mongodb/mongo/blob/master/src/mongo/db/internal_session_pool.h) to prevent the creation and maintenance of many single-use sessions. - -To use the transaction API, [instantiate a transaction client](https://github.com/mongodb/mongo/blob/63f99193df82777239f038666270e4bfb2be3567/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp#L250-L253) by providing the opCtx, an executor, and resource yielder. Then, run the commands to be grouped in the same transaction session on the transaction object. Some examples of this are listed below. - -* [Cluster Find and Modify Command](https://github.com/mongodb/mongo/blob/63f99193df82777239f038666270e4bfb2be3567/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp#L255-L265) -* [Queryable Encryption](https://github.com/mongodb/mongo/blob/63f99193df82777239f038666270e4bfb2be3567/src/mongo/db/commands/fle2_compact.cpp#L636-L648) -* [Cluster Write Command - WouldChangeOwningShard Error](https://github.com/mongodb/mongo/blob/63f99193df82777239f038666270e4bfb2be3567/src/mongo/s/commands/cluster_write_cmd.cpp#L162-L190) - -## The historical routing table - -When a mongos or mongod executes a command that requires shard targeting, it must use routing information -that matches the read concern of the command. If the command uses `"snapshot"` read concern, it must use -the historical routing table at the selected read timestamp. If the command uses any other read concern, -it must use the latest cached routing table. - -The [routing table cache](#the-routing-table-cache) provides an interface for obtaining the routing table -at a particular timestamp and collection version, namely the `ChunkManager`. The `ChunkManager` has an -optional clusterTime associated with it and a `RoutingTableHistory` that contains historical routing -information for all chunks in the collection. That information is stored in an ordered map from the max -key of each chunk to an entry that contains routing information for the chunk, such as chunk range, -chunk version and chunk history. The chunk history contains the shard id for the shard that currently -owns the chunk, and the shard id for any other shards that used to own the chunk in the past -`minSnapshotHistoryWindowInSeconds` (defaults to 300 seconds). It corresponds to the chunk history in -the `config.chunks` document for the chunk which gets updated whenever the chunk goes through an -operation, such as merge or migration. The `ChunkManager` uses this information to determine the -shards to target for a query. If the clusterTime is not provided, it will return the shards that -currently own the target chunks. Otherwise, it will return the shards that owned the target chunks -at that clusterTime and will throw a `StaleChunkHistory` error if it cannot find them. - -#### Code references -* [**ChunkManager class**](https://github.com/mongodb/mongo/blob/r4.3.6/src/mongo/s/chunk_manager.h#L233-L451) -* [**RoutingTableHistory class**](https://github.com/mongodb/mongo/blob/r4.3.6/src/mongo/s/chunk_manager.h#L70-L231) -* [**ChunkHistory class**](https://github.com/mongodb/mongo/blob/r4.3.6/src/mongo/s/catalog/type_chunk.h#L131-L145) - ---- - -# Node startup and shutdown - -## Startup and sharding component initialization -The mongod intialization process is split into three phases. The first phase runs on startup and initializes the set of stateless components based on the cluster role. The second phase then initializes additional components that must be initialized with state read from the config server. The third phase is run on the [transition to primary](https://github.com/mongodb/mongo/blob/879d50a73179d0dd94fead476468af3ee4511b8f/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp#L822-L901) and starts services that only run on primaries. - -### Shard Server initialization - -#### Phase 1: -1. On a shard server, the `CollectionShardingState` factory is set to an instance of the `CollectionShardingStateFactoryShard` implementation. The component lives on the service context. -1. The sharding [OpObservers are created](https://github.com/mongodb/mongo/blob/0e08b33037f30094e9e213eacfe16fe88b52ff84/src/mongo/db/mongod_main.cpp#L1000-L1001) and registered with the service context. The `OpObserverShardingImpl` class forwards operations during migration to the chunk cloner. The `ShardServerOpObserver` class is used to handle the majority of sharding related events. These include loading the shard identity document when it is inserted and performing range deletions when they are marked as ready. - -#### Phase 2: -1. The [shardIdentity document is loaded](https://github.com/mongodb/mongo/blob/37ff80f6234137fd314d00e2cd1ff77cde90ce11/src/mongo/db/s/sharding_initialization_mongod.cpp#L366-L373) if it already exists on startup. For shards, the shard identity document specifies the config server connection string. If the shard does not have a shardIdentity document, it has not been added to a cluster yet, and the "Phase 2" initialization happens when the shard receives a shardIdentity document as part of addShard. -1. If the shard identity document was found, then the [ShardingState is intialized](https://github.com/mongodb/mongo/blob/37ff80f6234137fd314d00e2cd1ff77cde90ce11/src/mongo/db/s/sharding_initialization_mongod.cpp#L416-L462) from its fields. -1. The global sharding state is set on the Grid. The Grid contains the sharding context for a running server. It exists both on mongod and mongos because the Grid holds all the components needed for routing, and both mongos and shard servers can act as routers. -1. `KeysCollectionManager` is set on the `LogicalTimeValidator`. -1. The `ShardingReplicaSetChangeListener` is instantiated and set on the `ReplicaSetMonitor`. -1. The remaining sharding components are [initialized for the current replica set role](https://github.com/mongodb/mongo/blob/37ff80f6234137fd314d00e2cd1ff77cde90ce11/src/mongo/db/s/sharding_initialization_mongod.cpp#L255-L286) before the Grid is marked as initialized. - -#### Phase 3: -Shard servers [start up several services](https://github.com/mongodb/mongo/blob/879d50a73179d0dd94fead476468af3ee4511b8f/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp#L885-L894) that only run on primaries. - -### Config Server initialization - -#### Phase 1: -The sharding [OpObservers are created](https://github.com/mongodb/mongo/blob/0e08b33037f30094e9e213eacfe16fe88b52ff84/src/mongo/db/mongod_main.cpp#L1000-L1001) and registered with the service context. The config server registers the OpObserverImpl and ConfigServerOpObserver observers. - -#### Phase 2: -The global sharding state is set on the Grid. The Grid contains the sharding context for a running server. The config server does not need to be provided with the config server connection string explicitly as it is part of its local state. - -#### Phase 3: -Config servers [run some services](https://github.com/mongodb/mongo/blob/879d50a73179d0dd94fead476468af3ee4511b8f/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp#L866-L867) that only run on primaries. - -### Mongos initialization -#### Phase 2: -The global sharding state is set on the Grid. The Grid contains the sharding context for a running server. Mongos is provided with the config server connection string as a startup parameter. - -#### Code references -* Function to [initialize global sharding state](https://github.com/mongodb/mongo/blob/eeca550092d9601d433e04c3aa71b8e1ff9795f7/src/mongo/s/sharding_initialization.cpp#L188-L237). -* Function to [initialize sharding environment](https://github.com/mongodb/mongo/blob/37ff80f6234137fd314d00e2cd1ff77cde90ce11/src/mongo/db/s/sharding_initialization_mongod.cpp#L255-L286) on shard server. -* Hook for sharding [transition to primary](https://github.com/mongodb/mongo/blob/879d50a73179d0dd94fead476468af3ee4511b8f/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp#L822-L901). - -## Shutdown - -If the mongod server is primary, it will [try to step down](https://github.com/mongodb/mongo/blob/0987c120f552ab6d347f6b1b6574345e8c938c32/src/mongo/db/mongod_main.cpp#L1046-L1072). Mongod and mongos then run their respective shutdown tasks which cleanup the remaining sharding components. - -#### Code references -* [Shutdown logic](https://github.com/mongodb/mongo/blob/2bb2f2225d18031328722f98fe05a169064a8a8a/src/mongo/db/mongod_main.cpp#L1163) for mongod. -* [Shutdown logic](https://github.com/mongodb/mongo/blob/30f5448e95114d344e6acffa92856536885e35dd/src/mongo/s/mongos_main.cpp#L336-L354) for mongos. - -### Quiesce mode on shutdown -mongos enters quiesce mode prior to shutdown, to allow short-running operations to finish. -During this time, new and existing operations are allowed to run, but `isMaster`/`hello` -requests return a `ShutdownInProgress` error, to indicate that clients should start routing -operations to other nodes. Entering quiesce mode is considered a significant topology change -in the streaming `hello` protocol, so mongos tracks a `TopologyVersion`, which it increments -on entering quiesce mode, prompting it to respond to all waiting hello requests. - -### helloOk Protocol Negotation - -In order to preserve backwards compatibility with old drivers, mongos currently supports both -the [`isMaster`] command and the [`hello`] command. New drivers and 5.0+ versions of the server -will support `hello`. When connecting to a sharded cluster via mongos, a new driver will send -"helloOk: true" as a part of the initial handshake. If mongos supports hello, it will respond -with "helloOk: true" as well. This way, new drivers know that they're communicating with a version -of the mongos that supports `hello` and can start sending `hello` instead of `isMaster` on this -connection. - -If mongos does not support `hello`, the `helloOk` flag is ignored. A new driver will subsequently -not see "helloOk: true" in the response and must continue to send `isMaster` on this connection. Old -drivers will not specify this flag at all, so the behavior remains the same. - -#### Code references -* [isMaster command](https://github.com/mongodb/mongo/blob/r4.8.0-alpha/src/mongo/s/commands/cluster_is_master_cmd.cpp#L248) for mongos. -* [hello command](https://github.com/mongodb/mongo/blob/r4.8.0-alpha/src/mongo/s/commands/cluster_is_master_cmd.cpp#L64) for mongos. - -# Cluster DDL operations - -[Data Definition Language](https://en.wikipedia.org/wiki/Data_definition_language) (DDL) operations are operations that change the metadata; -some examples of DDLs are create/drop database or create/rename/drop collection. - -Metadata are tracked in the two main MongoDB catalogs: -- *[Local catalog](https://github.com/mongodb/mongo/blob/master/src/mongo/db/catalog/README.md#the-catalog)*: present on each shard, keeping -track of databases/collections/indexes the shard owns or has knowledge of. -- *Sharded Catalog*: residing on the config server, keeping track of the metadata of databases and sharded collections for which it serves -as the authoritative source of information. - -## Sharding DDL Coordinator -The [ShardingDDLCoordinator](https://github.com/mongodb/mongo/blob/106b96548c5214a8e246a1cf6ac005a3985c16d4/src/mongo/db/s/sharding_ddl_coordinator.h#L47-L191) -is the main component of the DDL infrastructure for sharded clusters: it is an abstract class whose concrete implementations have the -responsibility of coordinating the different DDL operations between shards and the config server in order to keep the two catalogs -consistent. When a DDL request is received by a router, it gets forwarded to the [primary shard](https://docs.mongodb.com/manual/core/sharded-cluster-shards/#primary-shard) -of the targeted database. For the sake of clarity, createDatabase is the only DDL operation that cannot possibly get forwarded to the -database primary but is instead routed to the config server, as the database may not exist yet. - -##### Serialization and joinability of DDL operations -When a primary shard receives a DDL request, it tries to construct a DDL coordinator performing the following steps: -- Acquire the [distributed lock for the database](https://github.com/mongodb/mongo/blob/908e394d39b223ce498fde0d40e18c9200c188e2/src/mongo/db/s/sharding_ddl_coordinator.cpp#L155). This ensures that at most one DDL operation at a time will run for namespaces belonging to the same database on that particular primary node. -- Acquire the distributed lock for the [collection](https://github.com/mongodb/mongo/blob/908e394d39b223ce498fde0d40e18c9200c188e2/src/mongo/db/s/sharding_ddl_coordinator.cpp#L171) (or [collections](https://github.com/mongodb/mongo/blob/908e394d39b223ce498fde0d40e18c9200c188e2/src/mongo/db/s/sharding_ddl_coordinator.cpp#L181)) involved in the operation. - -In case a new DDL petition on the same namespace gets forwarded by a router while a DDL coordinator is instantiated, a [check is performed](https://github.com/mongodb/mongo/blob/b7a055f55a202ba870730fb865579acf5d9fb90f/src/mongo/db/s/sharding_ddl_coordinator.h#L54-L61) -on the shard in order to join the ongoing operation if the options match (same operation with same parameters) or fail if they don't -(different operation or same operation with different parameters). - -##### Execution of DDL coordinators -Once the distributed locks have been acquired, it is guaranteed that no other concurrent DDLs are happening for the same database, -hence a DDL coordinator can safely start [executing the operation](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/sharding_ddl_coordinator.cpp#L207). - -As first step, each coordinator is required to [majority commit a document](https://github.com/mongodb/mongo/blob/2ae2bcedfb7d48e64843dd56b9e4f107c56944b6/src/mongo/db/s/sharding_ddl_coordinator.h#L105-L116) - -that we will refer to as state document - containing all information regarding the running operation such as name of the DDL, namespaces -involved and other metadata identifying the original request. At this point, the coordinator is entitled to start making both local and -remote catalog modifications, eventually after blocking CRUD operations on the changing namespaces; when the execution reaches relevant -points, the state can be checkpointed by [updating the state document](https://github.com/mongodb/mongo/blob/b7a055f55a202ba870730fb865579acf5d9fb90f/src/mongo/db/s/sharding_ddl_coordinator.h#L118-L127). - -The completion of a DDL operation is marked by the [state document removal](https://github.com/mongodb/mongo/blob/b7a055f55a202ba870730fb865579acf5d9fb90f/src/mongo/db/s/sharding_ddl_coordinator.cpp#L258) -followed by the [release of the distributed locks](https://github.com/mongodb/mongo/blob/b7a055f55a202ba870730fb865579acf5d9fb90f/src/mongo/db/s/sharding_ddl_coordinator.cpp#L291-L298) -in inverse order of acquisition. - -Some DDL operations are required to block migrations before actually executing so that the coordinator has a consistent view of which -shards contain data for the collection. The [setAllowMigration command](https://github.com/mongodb/mongo/blob/c5fd926e176fcaf613d9fb785f5bdc70e1aa14be/src/mongo/db/s/config/configsvr_set_allow_migrations_command.cpp#L42) -serves the purpose of blocking ongoing migrations and avoiding new ones to start. - -##### Resiliency to elections, crashes and errors - -DDL coordinators are resilient to elections and sudden crashes because they're instances of a [primary only service](https://github.com/mongodb/mongo/blob/master/docs/primary_only_service.md) -that - by definition - gets automatically resumed when the node of a shard steps up. - -The coordinator state document has a double aim: -- It serves the purpose of primary only service state document. -- It tracks the progress of a DDL operation. - -Steps executed by coordinators are implemented in idempotent phases. When entering a phase, the state is checkpointed as majority committed -on the state document before actually executing the phase. If a node fails or steps down, it is then safe to resume the DDL operation as -follows: skip previous phases and re-execute starting from the checkpointed phase. - -When a new primary node is elected, the DDL primary only service is [rebuilt](https://github.com/mongodb/mongo/blob/20549d58943b586749d1570eee834c71bdef1b37/src/mongo/db/s/sharding_ddl_coordinator_service.cpp#L158-L185) -resuming outstanding coordinators, if present; during this recovery phase, incoming DDL operations are [temporarily put on hold](https://github.com/mongodb/mongo/blob/20549d58943b586749d1570eee834c71bdef1b37/src/mongo/db/s/sharding_ddl_coordinator_service.cpp#L152-L156) -waiting for pre-existing DDL coordinators to be re-instantiated in order to avoid conflicts in the acquisition of the distributed locks. - -If a [recoverable error](https://github.com/mongodb/mongo/blob/a1752a0f5300b3a4df10c0a704c07e597c3cd291/src/mongo/db/s/sharding_ddl_coordinator.cpp#L216-L226) -is caught at execution-time, it will be retried indefinitely; all other errors errors have the effect of stopping and destructing the DDL coordinator and - -because of that - are never expected to happen after a coordinator performs destructive operations. - -# User Write Blocking - -User write blocking prevents user initiated writes from being performed on C2C source and destination -clusters during certain phases of C2C replication, allowing durable state to be propagated from the -source without experiencing conflicts. Because the source and destination clusters are different -administrative domains and thus can have separate configurations and metadata, operations which -affect metadata, such as replset reconfig, are permitted. Also, internal operations which affect user -collections but leave user data logically unaffected, such as chunk migration, are still permitted. -Finally, users with certain privileges can bypass user write blocking; this is necessary so that the -C2C sync daemon itself can write to user data. - -User write blocking is enabled and disabled by the command `{setUserWriteBlockMode: 1, global: -}`. On replica sets, this command is invoked on the primary, and enables/disables user -write blocking replica-set-wide. On sharded clusters, this command is invoked on `mongos`, and -enables/disables user write blocking cluster-wide. We define a write as a "user write" if the target -database is not internal (the `admin`, `local`, and `config` databases being defined as internal), -and if the user that initiated the write cannot perform the `bypassWriteBlockingMode` action on the -`cluster` resource. By default, only the `restore`, `root`, and `__system` built-in roles have this -privilege. - -The `UserWriteBlockModeOpObserver` is responsible for blocking disallowed writes. Upon any operation -which writes, this `OpObserver` checks whether the `GlobalUserWriteBlockState` [allows writes to the -target -namespace](https://github.com/mongodb/mongo/blob/387f8c0e26a352b95ecfc6bc51f749d26a929390/src/mongo/db/op_observer/user_write_block_mode_op_observer.cpp#L281-L288). -The `GlobalUserWriteBlockState` stores whether user write blocking is enabled in a given -`ServiceContext`. As part of its write access check, it [checks whether the `WriteBlockBypass` -associated with the given `OperationContext` is -enabled](https://github.com/mongodb/mongo/blob/25377181476e4140c970afa5b018f9b4fcc951e8/src/mongo/db/s/global_user_write_block_state.cpp#L59-L67). -The `WriteBlockBypass` stores whether the user that initiated the write is able to perform writes -when user write blocking is enabled. On internal requests (i.e. from other `mongod` or `mongos` -instances in the sharded cluster/replica set), the request originator propagates `WriteBlockBypass` -[through the request -metadata](https://github.com/mongodb/mongo/blob/182616b7b45a1e360839c612c9ee8acaa130fe17/src/mongo/rpc/metadata.cpp#L115). -On external requests, `WriteBlockBypass` is enabled [if the authenticated user is privileged to -bypass user -writes](https://github.com/mongodb/mongo/blob/07c3d2ebcd3ca8127ed5a5aaabf439b57697b530/src/mongo/db/write_block_bypass.cpp#L60-L63). -The `AuthorizationSession`, which is responsible for maintaining the authorization state, keeps track -of whether the user has the privilege to bypass user write blocking by [updating a cached variable -upon any changes to the authorization -state](https://github.com/mongodb/mongo/blob/e4032fe5c39f1974c76de4cefdc07d98ab25aeef/src/mongo/db/auth/authorization_session_impl.cpp#L1119-L1121). -This structure enables, for example, sharded writes to work correctly with user write blocking, -because the `WriteBlockBypass` state is initially set on the `mongos` based on the -`AuthorizationSession`, which knows the privileges of the user making the write request, and then -propagates from the `mongos` to the shards involved in the write. Note that this means on requests -from `mongos`, shard servers don't check their own `AuthorizationSession`s when setting -`WriteBlockBypass`. This would be incorrect behavior since internal requests have internal -authorization, which confers all privileges, including the privilege to bypass user write blocking. - -The `setUserWriteBlockMode` command, before enabling user write blocking, blocks creation of new -index builds and aborts all currently running index builds on non-internal databases, and drains the -index builds it cannot abort. This upholds the invariant that while user write blocking is enabled, -all running index builds are allowed to bypass write blocking and therefore can commit without -additional checks. - -In sharded clusters, enabling user write blocking is a two-phase operation, coordinated by the config -server. The first phase disallows creation of new `ShardingDDLCoordinator`s and drains all currently -running `DDLCoordinator`s. The config server waits for all shards to complete this phase before -moving onto the second phase, which aborts index builds and enables write blocking. This structure is -used because enabling write blocking while there are ongoing `ShardingDDLCoordinator`s would prevent -those operations from completing. - -#### Code references -* The [`UserWriteBlockModeOpObserver` - class](https://github.com/mongodb/mongo/blob/387f8c0e26a352b95ecfc6bc51f749d26a929390/src/mongo/db/op_observer/user_write_block_mode_op_observer.h#L40) -* The [`GlobalUserWriteBlockState` - class](https://github.com/mongodb/mongo/blob/25377181476e4140c970afa5b018f9b4fcc951e8/src/mongo/db/s/global_user_write_block_state.h#L37) -* The [`WriteBlockBypass` - class](https://github.com/mongodb/mongo/blob/07c3d2ebcd3ca8127ed5a5aaabf439b57697b530/src/mongo/db/write_block_bypass.h#L38) -* The [`abortUserIndexBuildsForUserWriteBlocking` - function](https://github.com/mongodb/mongo/blob/25377181476e4140c970afa5b018f9b4fcc951e8/src/mongo/db/index_builds_coordinator.cpp#L850), - used to abort and drain all current user index builds -* The [`SetUserWriteBlockModeCoordinator` - class](https://github.com/mongodb/mongo/blob/ce908a66890bcdd87e709b584682c6b3a3a851be/src/mongo/db/s/config/set_user_write_block_mode_coordinator.h#L38), - used to coordinate the `setUserWriteBlockMode` command for sharded clusters -* The [`UserWritesRecoverableCriticalSectionService` - class](https://github.com/mongodb/mongo/blob/1c4e5ba241829145026f8aa0db70707f15fbe7b3/src/mongo/db/s/user_writes_recoverable_critical_section_service.h#L88), - used to manage and persist the user write blocking state -* The `setUserWriteBlockMode` command invocation: - - [On a non-sharded - `mongod`](https://github.com/mongodb/mongo/blob/25377181476e4140c970afa5b018f9b4fcc951e8/src/mongo/db/commands/set_user_write_block_mode_command.cpp#L68) - - [On a shard - server](https://github.com/mongodb/mongo/blob/25377181476e4140c970afa5b018f9b4fcc951e8/src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp#L61) - - [On a config - server](https://github.com/mongodb/mongo/blob/c96f8dacc4c71b4774c932a07be4fac71b6db628/src/mongo/db/s/config/configsvr_set_user_write_block_mode_command.cpp#L56) - - [On a - `mongos`](https://github.com/mongodb/mongo/blob/4ba31bc8627426538307848866d3165a17aa29fb/src/mongo/s/commands/cluster_set_user_write_block_mode_command.cpp#L61) diff --git a/src/mongo/db/s/README_ddl_operations.md b/src/mongo/db/s/README_ddl_operations.md index 040fe768d33c1..e33a716ee09d0 100644 --- a/src/mongo/db/s/README_ddl_operations.md +++ b/src/mongo/db/s/README_ddl_operations.md @@ -1,5 +1,5 @@ # DDL Operations -On the Sharding team, we use the term *DDL* to mean any operation that needs to update any subset of [catalog containers](https://github.com/mongodb/mongo/blob/f8a2113103a509ffa361c5aacb3ec0fa94858f9b/src/mongo/db/s/README_sharding_catalog.md#catalog-containers). Within this definition, there are standard DDLs that use the DDL coordinator infrastructure as well as non-standard DDLs that each have their own implementations. +On the Sharding team, we use the term *DDL* to mean any operation that needs to update any subset of [catalog containers](README_sharding_catalog.md#catalog-containers). Within this definition, there are standard DDLs that use the DDL coordinator infrastructure as well as non-standard DDLs that each have their own implementations. ## Standard DDLs Most DDL operations are built upon the DDL coordinator infrastructure which provides some [retriability](#retriability), [synchronization](#synchronization), and [recoverability](#recovery) guarantees. @@ -54,9 +54,15 @@ DDL coordinators are resilient to elections and sudden crashes because they are When a new primary node is elected, the DDL primary only service is rebuilt, and any ongoing coordinators will be restarted based on their persisted state document. During this recovery phase, any new requests for DDL operations are put on hold, waiting for existing coordinators to be re-instatiated to avoid conflicts with the DDL locks. +### Sections about specific standard DDL operations +- [User write blocking](README_user_write_blocking.md) + ## Non-Standard DDLs -Some DDL operations do not follow the structure outlined in the section above. These operations are chunk migration, resharding, and refine collection shard key. There are also other operations such as add and remove shard that do not modify the sharding catalog but do modify local metadata and need to coordinate with ddl operations. These operations also do not use the DDL coordinator infrastructure, but they do take the DDl lock to synchronize with other ddls. +Some DDL operations do not follow the structure outlined in the section above. These operations are [chunk migration](README_migrations.md), resharding, and refine collection shard key. There are also other operations such as add and remove shard that do not modify the sharding catalog but do modify local metadata and need to coordinate with ddl operations. These operations also do not use the DDL coordinator infrastructure, but they do take the DDl lock to synchronize with other ddls. Both chunk migration and resharding have to copy user data across shards. This is too time intensive to happen entirely while holding the collection critical section, so these operations have separate machinery to transfer the data and commit the changes. These commands do not commit transactionally across the shards and the config server, rather they commit on the config server and rely on shards pulling the updated commit information from the config server after learning via a router that there is new information. They also do not have the same requirement as standard DDL operations that they must complete after starting except after entering their commit phases. Refine shard key commits only on the config server, again relying on shards to pull updated information from the config server after hearing about this more recent information from a router. In this case, this was done not because of the cost of transfering data, but so that refine shard key did not need to involve the shards. This allows the refineShardKey command to run quickly and not block operations. + +### Sections explaining specific non-standard DDL operations +- [Chunk Migration](README_migrations.md) diff --git a/src/mongo/db/s/README_new.md b/src/mongo/db/s/README_new.md deleted file mode 100644 index 271d2e517f30b..0000000000000 --- a/src/mongo/db/s/README_new.md +++ /dev/null @@ -1,65 +0,0 @@ -> **Warning** -> This is work in progress and some sections are incomplete - -# Sharding Architecture Guide -This page contains details of the source code architecture of the MongoDB Sharding system. It is intended to be used by engineers working on the core server, with some sections containing low-level details which are most appropriate for new engineers on the sharding team. - -It is not intended to be a tutorial on how to operate sharding as a user and it requires that the reader is already familiar with the general concepts of [sharding](https://docs.mongodb.com/manual/sharding/#sharding), the [architecture of a MongoDB sharded cluster](https://docs.mongodb.com/manual/sharding/#sharded-cluster), and the concept of a [shard key](https://docs.mongodb.com/manual/sharding/#shard-keys). - -## Sharding terminology and acronyms -* Config Data: All the [catalog containers](README_sharding_catalog.md#catalog-containers) residing on the CSRS. -* Config Shard: Same as CSRS. -* CRUD operation: Comes from [Create, Read, Update, Delete](https://en.wikipedia.org/wiki/Create,_read,_update_and_delete), and indicates operations which modify a collection's data as opposed to the catalog. -* CSRS: **C**onfig **S**erver as a **R**eplica **S**et. This is a fancy name for the [config server](https://www.mongodb.com/docs/manual/core/sharded-cluster-config-servers/). Comes from the times of version 3.2 and earlier, when there was a legacy type of Config server called [SCCC](https://www.mongodb.com/docs/manual/release-notes/3.4-compatibility/#removal-of-support-for-sccc-config-servers) which didn't operate as a replica set. -* CSS: [Collection Sharding State](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/collection_sharding_state.h#L59) -* DDL operation: Comes from [Data Definition Language](https://en.wikipedia.org/wiki/Data_definition_language), and indicates operations which modify the catalog (e.g., create collection, create index, drop database) as opposed to CRUD, which modifies the data. -* DSS: [Database Sharding State](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/database_sharding_state.h#L42) -* Routing Info: The subset of data stored in the [catalog containers](README_sharding_catalog.md#catalog-containers) which is used for making routing decisions. As of the time of this writing, the contents of *config.databases*, *config.collections*, *config.indexes* and *config.chunks*. -* SS: [Sharding State](https://github.com/mongodb/mongo/blob/master/src/mongo/db/s/sharding_state.h#L51) - -## Sharding code architecture -The graph further down visualises the architecture of the MongoDB Sharding system and the relationships between its various components and the links below point to the relevant sections describing these components. - -- [Sharding catalog](README_sharding_catalog.md#sharding-catalog) -- [Router role](README_sharding_catalog.md#router-role) -- [Shard role](README_sharding_catalog.md#router-role) -- [Shard versioning protocol](README_versioning_protocols.md) -- [Balancer](README_balancer.md) -- [DDL Operations](README_ddl_operations.md) -- [Migrations](README_migrations.md) - -```mermaid -C4Component - -Container_Boundary(Sharding, "Sharding", $link="README_new.md") { - Container_Boundary(CatalogAPI, "Catalog API", $link="README_sharding_catalog.md") { - Component(RouterRole, "Router Role", $link="README_sharding_catalog.md#router-role") - Component(ShardRole, "Shard Role", $link="README_sharding_catalog.md#shard-role") - } - - Container_Boundary(CatalogRuntime, "Catalog Runtime") { - Container_Boundary(CatalogContainers, "Catalog Containers") { - ContainerDb(CSRSCollections, "CSRS Collections") - ContainerDb(MDBCatalog, "__mdb_catalog") - ContainerDb(AuthoritativeShardLocalCollections, "Authoritative Shard Local Collections") - ContainerDb(ShardLocalCollections, "Shard Local Collections") - } - - Container_Boundary(CatalogContainerCaches, "Catalog Container Caches") { - Component(CatalogCache, "Catalog Cache") - Component(SSCache, "Sharding State (cache)") - Component(DSSCache, "Database Sharding State (cache)") - Component(CSSCache, "Collection Sharding State (cache)") - } - } - - Container_Boundary(ShardingServices, "Sharding Services") { - Component(Balancer, "Balancer") - } - - Container_Boundary(ShardingDDL, "Sharding DDL") { - Component(DDLCoordinators, "DDL Coordinators") - Component(DataCloning, "Data Cloning") - } -} -``` diff --git a/src/mongo/db/s/README_routing_info_cache_consistency_model.md b/src/mongo/db/s/README_routing_info_cache_consistency_model.md index ed4305f6c82c3..e7d328a636c5c 100644 --- a/src/mongo/db/s/README_routing_info_cache_consistency_model.md +++ b/src/mongo/db/s/README_routing_info_cache_consistency_model.md @@ -1,5 +1,5 @@ # Consistency Model of the Routing Info Cache -This section builds upon the definitions of the sharding catalog in [this section](https://github.com/mongodb/mongo/blob/9b4ddb11af242d7c8d48181c26ca091fe4533642/src/mongo/db/s/README_sharding_catalog.md#catalog-containers) and elaborates on the consistency model of the [CatalogCache](https://github.com/mongodb/mongo/blob/r6.0.0/src/mongo/s/catalog_cache.h#L134), which is what backs the [Router role](README_sharding_catalog.md#router-role). +This section builds upon the definitions of the sharding catalog in [this section](README_sharding_catalog.md#catalog-containers) and elaborates on the consistency model of the [CatalogCache](https://github.com/mongodb/mongo/blob/r6.0.0/src/mongo/s/catalog_cache.h#L134), which is what backs the [Router role](README_sharding_catalog.md#router-role). ## Timelines Let's define the set of operations which a DDL coordinator performs over a set of catalog objects as the **timeline** of that object. The timelines of different objects can be **causally dependent** (or just *dependent* for brevity) on one another, or they can be **independent**. diff --git a/src/mongo/db/s/README_sessions_and_transactions.md b/src/mongo/db/s/README_sessions_and_transactions.md new file mode 100644 index 0000000000000..3615d9c73d706 --- /dev/null +++ b/src/mongo/db/s/README_sessions_and_transactions.md @@ -0,0 +1,455 @@ + +# Logical Sessions + +Some operations, such as retryable writes and transactions, require durably storing metadata in the +cluster about the operation. However, it's important that this metadata does not remain in the +cluster forever. + +Logical sessions provide a way to durably store metadata for the _latest_ operation in a sequence of +operations. The metadata is reaped if the cluster does not receive a new operation under the logical +session for a reasonably long time (the default is 30 minutes). + +A logical session is identified by its "logical session id," or `lsid`. An `lsid` is a combination +of up to four pieces of information: + +1. `id` - A globally unique id (UUID) generated by the mongo shell, driver, or the `startSession` server command +1. `uid` (user id) - The identification information for the logged-in user (if authentication is enabled) +1. `txnNumber` - An optional parameter set only for internal transactions spawned from retryable writes. Strictly-increasing counter set by the transaction API to match the txnNumber of the corresponding retryable write. +1. `txnUUID` - An optional parameter set only for internal transactions spawned inside client sessions. The txnUUID is a globally unique id generated by the transaction API. + +A logical session with a `txnNumber` and `txnUUID` is considered a child of the session with matching `id` and `uid` values. There may be multiple child sessions per parent session, and checking out a child/parents session checks out the other and updates the `lastUsedTime` of both. Killing a parent session also kills all of its child sessions. + +The order of operations in the logical session that need to durably store metadata is defined by an +integer counter, called the `txnNumber`. When the cluster receives a retryable write or transaction +with a higher `txnNumber` than the previous known `txnNumber`, the cluster overwrites the previous +metadata with the metadata for the new operation. + +Operations sent with an `lsid` that do not need to durably store metadata simply bump the time at +which the session's metadata expires. + +## The logical session cache + +The logical session cache is an in-memory cache of sessions that are open and in use on a certain +node. Each node (router, shard, config server) has its own in-memory cache. A cache entry contains: +1. `_id` - The session’s logical session id +1. `user` - The session’s logged-in username (if authentication is enabled) +1. `lastUse` - The date and time that the session was last used + +The in-memory cache periodically persists entries to the `config.system.sessions` collection, known +as the "sessions collection." The sessions collection has different placement behavior based on +whether the user is running a standalone node, a replica set, or a sharded cluster. + +| Cluster Type | Sessions Collection Durable Storage | +|-----------------|------------------------------------------------------------------------------------------------------------------| +| Standalone Node | Sessions collection exists on the same node as the in-memory cache. | +| Replica Set | Sessions collection exists on the primary node and replicates to secondaries. | +| Sharded Cluster | Sessions collection is a regular sharded collection - can exist on multiple shards and can have multiple chunks. | + +### Session expiration + +There is a TTL index on the `lastUse` field in the sessions collection. The TTL expiration date is +thirty (30) minutes out by default, but is user-configurable. This means that if no requests come +in that use a session for thirty minutes, the TTL index will remove the session from the sessions +collection. When the logical session cache performs its periodic refresh (defined below), it will +find all sessions that currently exist in the cache that no longer exist in the sessions +collection. This is the set of sessions that we consider "expired". The expired sessions are then +removed from the in-memory cache. + +### How a session gets placed into the logical session cache + +When a node receives a request with attached session info, it will place that session into the +logical session cache. If a request corresponds to a session that already exists in the cache, the +cache will update the cache entry's `lastUse` field to the current date and time. + +### How the logical session cache syncs with the sessions collection + +At a regular interval of five (5) minutes (user-configurable), the logical session cache will sync +with the sessions collection. Inside the class, this is known as the "refresh" function. There are +four steps to this process: + +1. All sessions that have been used on this node since the last refresh will be upserted to the sessions collection. This means that sessions that already exist in the sessions collection will just have their `lastUse` fields updated. +1. All sessions that have been ended in the cache on this node (via the endSessions command) will be removed from the sessions collection. +1. Sessions that have expired from the sessions collection will be removed from the logical session cache on this node. +1. All cursors registered on this node that match sessions that have been ended (step 2) or were expired (step 3) will be killed. + +### Periodic cleanup of the session catalog and transactions table + +The logical session cache class holds the periodic job to clean up the +[session catalog](#the-logical-session-catalog) and [transactions table](#the-transactions-table). +Inside the class, this is known as the "reap" function. Every five (5) minutes (user-configurable), +the following steps will be performed: + +1. Find all sessions in the session catalog that were last checked out more than thirty minutes ago (default session expiration time). +1. For each session gathered in step 1, if the session no longer exists in the sessions collection (i.e. the session has expired or was explicitly ended), remove the session from the session catalog. +1. Find all entries in the transactions table that have a last-write date of more than thirty minutes ago (default session expiration time). +1. For each entry gathered in step 3, if the session no longer exists in the sessions collection (i.e. the session has expired or was explicitly ended), remove the entry from the transactions table. + +#### Configurable parameters related to the logical session cache + +| Parameter | Value Type | Default Value | Startup/Runtime | Description | +|--------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------|----------------------|-----------------|----------------------------------------------------------------------------------------------------------------------------------------| +| [disableLogicalSessionCacheRefresh](https://github.com/mongodb/mongo/blob/9cbbb66d7536ab4f92baf99ef5332e96be0e4153/src/mongo/db/logical_session_cache.idl#L49-L54) | boolean | false | Startup | Disables the logical session cache's periodic "refresh" and "reap" functions on this node. Recommended for testing only. | +| [logicalSessionRefreshMillis](https://github.com/mongodb/mongo/blob/9cbbb66d7536ab4f92baf99ef5332e96be0e4153/src/mongo/db/logical_session_cache.idl#L34-L40) | integer | 300000ms (5 minutes) | Startup | Changes how often the logical session cache runs its periodic "refresh" and "reap" functions on this node. | +| [localLogicalSessionTimeoutMinutes](https://github.com/mongodb/mongo/blob/9cbbb66d7536ab4f92baf99ef5332e96be0e4153/src/mongo/db/logical_session_id.idl#L191-L196) | integer | 30 minutes | Startup | Changes the TTL index timeout for the sessions collection. In sharded clusters, this parameter is supported only on the config server. | + +#### Code references + +* [Place where a session is placed (or replaced) in the logical session cache](https://github.com/mongodb/mongo/blob/1f94484d52064e12baedc7b586a8238d63560baf/src/mongo/db/logical_session_cache.h#L71-L75) +* [The logical session cache refresh function](https://github.com/mongodb/mongo/blob/1f94484d52064e12baedc7b586a8238d63560baf/src/mongo/db/logical_session_cache_impl.cpp#L207-L355) +* [The periodic job to clean up the session catalog and transactions table (the "reap" function)](https://github.com/mongodb/mongo/blob/1f94484d52064e12baedc7b586a8238d63560baf/src/mongo/db/logical_session_cache_impl.cpp#L141-L205) +* [Location of the session catalog and transactions table cleanup code on mongod](https://github.com/mongodb/mongo/blob/1f94484d52064e12baedc7b586a8238d63560baf/src/mongo/db/session/session_catalog_mongod.cpp#L331-L398) + +## The logical session catalog + +The logical session catalog of a mongod or mongos is an in-memory catalog that stores the runtime state +for sessions with transactions or retryable writes on that node. The runtime state of each session is +maintained by the session checkout mechanism, which also serves to serialize client operations on +the session. This mechanism requires every operation with an `lsid` and a `txnNumber` (i.e. +transaction and retryable write) to check out its session from the session catalog prior to execution, +and to check the session back in upon completion. When a session is checked out, it remains unavailable +until it is checked back in, forcing other operations to wait for the ongoing operation to complete +or yield the session. + +Checking out an internal/child session additionally checks out its parent session (the session with the same `id` and `uid` value in the lsid, but without a `txnNumber` or `txnUUID` value), and vice versa. + +The runtime state for a session consists of the last checkout time and operation, the number of operations +waiting to check out the session, and the number of kills requested. Retryable internal sessions are reaped from the logical session catalog [eagerly](https://github.com/mongodb/mongo/blob/67e37f8e806a6a5d402e20eee4b3097e2b11f820/src/mongo/db/session/session_catalog.cpp#L342), meaning that if a transaction session with a higher transaction number has successfully started, sessions with lower txnNumbers are removed from the session catalog and inserted into an in-memory buffer by the [InternalTransactionsReapService](https://github.com/mongodb/mongo/blob/67e37f8e806a6a5d402e20eee4b3097e2b11f820/src/mongo/db/internal_transactions_reap_service.h#L42) until a configurable threshold is met (1000 by default), after which they are deleted from the transactions table (`config.transactions`) and `config.image_collection` all at once. Eager reaping is best-effort, in that the in-memory buffer is cleared on stepdown or restart. + +The last checkout time is used by +the [periodic job inside the logical session cache](#periodic-cleanup-of-the-session-catalog-and-transactions-table) +to determine when a session should be reaped from the session catalog, whereas the number of +operations waiting to check out a session is used to block reaping of sessions that are still in +use. The last checkout operation is used to determine the operation to kill when a session is +killed, whereas the number of kills requested is used to make sure that sessions are only killed on +the first kill request. + +### The transactions table + +The runtime state in a node's in-memory session catalog is made durable in the node's +`config.transactions` collection, also called its transactions table. The in-memory session catalog + is +[invalidated](https://github.com/mongodb/mongo/blob/56655b06ac46825c5937ccca5947dc84ccbca69c/src/mongo/db/session/session_catalog_mongod.cpp#L324) +if the `config.transactions` collection is dropped and whenever there is a rollback. When +invalidation occurs, all active sessions are killed, and the in-memory transaction state is marked +as invalid to force it to be +[reloaded from storage the next time a session is checked out](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/session/session_catalog_mongod.cpp#L426). + +#### Code references +* [**SessionCatalog class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/session/session_catalog.h) +* [**MongoDSessionCatalog class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/session/session_catalog_mongod.h) +* [**RouterSessionCatalog class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/session_catalog_router.h) +* How [**mongod**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/service_entry_point_common.cpp#L537) and [**mongos**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/commands/strategy.cpp#L412) check out a session prior to executing a command. + +## Retryable writes + +Retryable writes allow drivers to automatically retry non-idempotent write commands on network errors or failovers. +They are supported in logical sessions with `retryableWrites` enabled (default), with the caveat that the writes +are executed with write concern `w` greater than 0 and outside of transactions. [Here](https://github.com/mongodb/specifications/blob/49589d66d49517f10cc8e1e4b0badd61dbb1917e/source/retryable-writes/retryable-writes.rst#supported-write-operations) +is a complete list of retryable write commands. + +When a command is executed as a retryable write, it is sent from the driver with `lsid` and `txnNumber` attached. +After that, all write operations inside the command are assigned a unique integer statement id `stmtId` by the +mongos or mongod that executes the command. In other words, each write operation inside a batch write command +is given its own `stmtId` and is individually retryable. The `lsid`, `txnNumber`, and `stmtId` constitute a +unique identifier for a retryable write operation. + +This unique identifier enables a primary mongod to track and record its progress for a retryable +write command using the `config.transactions` collection and augmented oplog entries. The oplog +entry for a retryable write operation is written with a number of additional fields including +`lsid`, `txnNumber`, `stmtId` and `prevOpTime`, where `prevOpTime` is the opTime of the write that +precedes it. In certain cases, such as time-series inserts, a single oplog entry may encode +multiple client writes, and thus may contain an array value for `stmtId` rather than the more +typical single value. All of this results in a chain of write history that can be used to +reconstruct the result of writes that have already executed. After generating the oplog entry for a +retryable write operation, a primary mongod performs an upsert into `config.transactions` to write +a document containing the `lsid` (`_id`), `txnNumber`, `stmtId` and `lastWriteOpTime`, where +`lastWriteOpTime` is the opTime of the newly generated oplog entry. The `config.transactions` +collection is indexed by `_id` so this document is replaced every time there is a new retryable +write command (or transaction) on the session. + +The opTimes for all committed statements for the latest retryable write command is cached in an [in-memory table](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/transaction_participant.h#L928) that gets [updated](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/transaction_participant.cpp#L2125-L2127) after each +write oplog entry is generated, and gets cleared every time a new retryable write command starts. Prior to executing +a retryable write operation, a primary mongod first checks to see if it has the commit opTime for the `stmtId` of +that write. If it does, the write operation is skipped and a response is constructed immediately based on the oplog +entry with that opTime. Otherwise, the write operation is performed with the additional bookkeeping as described above. +This in-memory cache of opTimes for committed statements is invalidated along with the entire in-memory transaction +state whenever the `config.transactions` is dropped and whenever there is rollback. The invalidated transaction +state is overwritten by the on-disk transaction history at the next session checkout. + +To support retryability of writes across migrations, the session state for the migrated chunk is propagated +from the donor shard to the recipient shard. After entering the chunk cloning step, the recipient shard +repeatedly sends [\_getNextSessionMods](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp#L240-L359) (also referred to as MigrateSession) commands to +the donor shard until the migration reaches the commit phase to clone any oplog entries that contain session +information for the migrated chunk. Upon receiving each response, the recipient shard writes the oplog entries +to disk and [updates](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/transaction_participant.cpp#L2142-L2144) its in-memory transaction state to restore the session state for the chunk. + +### Retryable writes and findAndModify + +For most writes, persisting only the (lsid, txnId) pair alone is sufficient to reconstruct a +response. For findAndModify however, we also need to respond with the document that would have +originally been returned. In version 5.0 and earlier, the default behavior is to +[record the document image into the oplog](https://github.com/mongodb/mongo/blob/33ad68c0dc4bda897a5647608049422ae784a15e/src/mongo/db/op_observer_impl.cpp#L191) +as a no-op entry. The oplog entries generated would look something like: + +* `{ op: "d", o: {_id: 1}, ts: Timestamp(100, 2), preImageOpTime: Timestamp(100, 1), lsid: ..., txnNumber: ...}` +* `{ op: "n", o: {_id: 1, imageBeforeDelete: "foobar"}, ts: Timestamp(100, 1)}` + +There's a cost in "explicitly" replicating these images via the oplog. We've addressed this cost +with 5.1 where the default is to instead [save the image into a side collection](https://github.com/mongodb/mongo/blob/33ad68c0dc4bda897a5647608049422ae784a15e/src/mongo/db/op_observer_impl.cpp#L646-L650) +with the namespace `config.image_collection`. A primary will add `needsRetryImage: +` to the oplog entry to communicate to secondaries that they must make a +corollary write to `config.image_collection`. + +Note that this feature was backported to 4.0, 4.2, 4.4 and 5.0. Released binaries with this +capability can be turned on by [setting the `storeFindAndModifyImagesInSideCollection` server +parameter](https://github.com/mongodb/mongo/blob/2ac9fd6e613332f02636c6a7ec7f6cff4a8d05ab/src/mongo/db/repl/repl_server_parameters.idl#L506-L512). + +Partial cloning mechanisms such as chunk migrations, tenant migrations and resharding all support +the destination picking up the responsibility for satisfying a retryable write the source had +originally processed (to some degree). These cloning mechanisms naturally tail the oplog to pick up +on changes. Because the traditional retryable findAndModify algorithm places the images into the +oplog, the destination just needs to relink the timestamps for its oplog to support retryable +findAndModify. + +For retry images saved in the image collection, the source will "downconvert" oplog entries with +`needsRetryImage: true` into two oplog entries, simulating the old format. As chunk migrations use +internal commands, [this downconverting procedure](https://github.com/mongodb/mongo/blob/0beb0cacfcaf7b24259207862e1d0d489e1c16f1/src/mongo/db/s/session_catalog_migration_source.cpp#L58-L97) +is installed under the hood. For resharding and tenant migrations, a new aggregation stage, +[_internalFindAndModifyImageLookup](https://github.com/mongodb/mongo/blob/e27dfa10b994f6deff7c59a122b87771cdfa8aba/src/mongo/db/pipeline/document_source_find_and_modify_image_lookup.cpp#L61), +was introduced to perform the identical substitution. In order for this stage to have a valid timestamp +to assign to the forged no-op oplog entry as result of the "downconvert", we must always assign an +extra oplog slot when writing the original retryable findAndModify oplog entry with +`needsRetryImage: true`. + +In order to avoid certain WiredTiger constraints surrounding setting multiple timestamps in a single storage transaction, we must reserve +oplog slots before entering the OpObserver, which is where we would normally create an oplog entry +and assign it the next available timestamp. Here, we have a table that describes the different +scenarios, along with the timestamps that are reserved and the oplog entries assigned to each of +those timestamps: +| Parameters | NumSlotsReserved | TS - 1 | TS | Oplog fields for entry with timestamp: TS | +| --- | --- | --- | --- | --- | +| Update, NeedsRetryImage=preImage | 2 | Reserved for forged no-op entry eventually used by tenant migrations/resharding|Update oplog entry|NeedsRetryImage: preImage | +| Update, NeedsRetryImage=postImage | 2 | Reserved for forged no-op entry eventually used by tenant migrations/resharding|Update oplog entry | NeedsRetryImage: postImage | +|Delete, NeedsRetryImage=preImage |2|Reserved for forged no-op entry eventually used by tenant migrations/resharding|Delete oplog entry|NeedsRetryImage: preImage| + +#### Code references +* [**TransactionParticipant class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/transaction_participant.h) +* How a write operation [checks if a statement has been executed](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/ops/write_ops_exec.cpp#L811-L816) +* How mongos [assigns statement ids to writes in a batch write command](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/write_ops/batch_write_op.cpp#L483-L486) +* How mongod [assigns statement ids to insert operations](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/ops/write_ops_exec.cpp#L573) +* [Retryable writes specifications](https://github.com/mongodb/specifications/blob/49589d66d49517f10cc8e1e4b0badd61dbb1917e/source/retryable-writes/retryable-writes.rst) + +## Transactions + +Cross-shard transactions provide ACID guarantees for multi-statement operations that involve documents on +multiple shards in a cluster. Similar to [transactions on a single replica set](https://github.com/mongodb/mongo/blob/r4.4.0-rc7/src/mongo/db/repl/README.md#transactions), cross-shard transactions are only supported in logical +sessions. They have a configurable lifetime limit, and are automatically aborted when they are expired +or when the session is killed. + +To run a cross-shard transaction, a client sends all statements, including the `commitTransaction` and +`abortTransaction` command, to a single mongos with common `lsid` and `txnNumber` attached. The first +statement is sent with `startTransaction: true` to indicate the start of a transaction. Once a transaction +is started, it remains active until it is explicitly committed or aborted by the client, or unilaterally +aborted by a participant shard, or overwritten by a transaction with a higher `txnNumber`. + +When a mongos executes a transaction, it is responsible for keeping track of all participant shards, and +choosing a coordinator shard and a recovery shard for the transaction. In addition, if the transaction +uses read concern `"snapshot"`, the mongos is also responsible for choosing a global read timestamp (i.e. +`atClusterTime`) at the start of the transaction. The mongos will, by design, always choose the first participant +shard as the coordinator shard, and the first shard that the transaction writes to as the recovery shard. +Similarly, the global read timestamp will always be the logical clock time on the mongos when it receives +the first statement for the transaction. If a participant shard cannot provide a snapshot at the chosen +read timestamp, it will throw a snapshot error, which will trigger a client level retry of the transaction. +The mongos will only keep this information in memory as it relies on the participant shards to persist their +respective transaction states in their local `config.transactions` collection. + +The execution of a statement inside a cross-shard transaction works very similarly to that of a statement +outside a transaction. One difference is that mongos attaches the transaction information (e.g. `lsid`, +`txnNumber` and `coordinator`) in every statement it forwards to targeted shards. Additionally, the first +statement to a participant shard is sent with `startTransaction: true` and `readConcern`, which contains +the `atClusterTime` if the transaction uses read concern `"snapshot"`. When a participant shard receives +a transaction statement with `coordinator: true` for the first time, it will infer that it has been chosen +as the transaction coordinator and will set up in-memory state immediately to prepare for coordinating +transaction commit. One other difference is that the response from each participant shard includes an +additional `readOnly` flag which is set to true if the statement does not do a write on the shard. Mongos +uses this to determine how a transaction should be committed or aborted, and to choose the recovery shard +as described above. The id of the recovery shard is included in the `recoveryToken` in the response to +the client. + +### Committing a Transaction + +The commit procedure begins when a client sends a `commitTransaction` command to the mongos that the +transaction runs on. The command is retryable as long as no new transaction has been started on the session +and the session is still alive. The number of participant shards and the number of write shards determine +the commit path for the transaction. + +* If the number of participant shards is zero, the mongos skips the commit and returns immediately. +* If the number of participant shards is one, the mongos forwards `commitTransaction` directly to that shard. +* If the number of participant shards is greater than one: + * If the number of write shards is zero, the mongos forwards `commitTransaction` to each shard individually. + * Otherwise, the mongos sends `coordinateCommitTransaction` with the participant list to the coordinator shard to + initiate two-phase commit. + +To recover the commit decision after the original mongos has become unreachable, the client can send `commitTransaction` +along with the `recoveryToken` to a different mongos. This will not initiate committing the transaction, instead +the mongos will send `coordinateCommitTransaction` with an empty participant list to the recovery shard to try to +join the progress of the existing coordinator if any, and to retrieve the commit outcome for the transaction. + +#### Two-phase Commit Protocol + +The two-phase commit protocol consists of the prepare phase and the commit phase. To support recovery from +failovers, a coordinator keeps a document inside the `config.transaction_coordinators` collection that contains +information about the transaction it is trying commit. This document is deleted when the commit procedure finishes. + +Below are the steps in the two-phase commit protocol. + +* Prepare Phase + 1. The coordinator writes the participant list to the `config.transaction_coordinators` document for the +transaction, and waits for it to be majority committed. + 1. The coordinator sends [`prepareTransaction`](https://github.com/mongodb/mongo/blob/r4.4.0-rc7/src/mongo/db/repl/README.md#lifetime-of-a-prepared-transaction) to the participants, and waits for vote reponses. Each participant +shard responds with a vote, marks the transaction as prepared, and updates the `config.transactions` +document for the transaction. + 1. The coordinator writes the decision to the `config.transaction_coordinators` document and waits for it to +be majority committed. If the `coordinateCommitTransactionReturnImmediatelyAfterPersistingDecision` server parameter is +true (default), the `coordinateCommitTransaction` command returns immediately after waiting for client's write concern +(i.e. let the remaining work continue in the background). + +* Commit Phase + 1. If the decision is 'commit', the coordinator sends `commitTransaction` to the participant shards, and waits +for responses. If the decision is 'abort', it sends `abortTransaction` instead. Each participant shard marks +the transaction as committed or aborted, and updates the `config.transactions` document. + 1. The coordinator deletes the coordinator document with write concern `{w: 1}`. + +The prepare phase is skipped if the coordinator already has the participant list and the commit decision persisted. +This can be the case if the coordinator was created as part of step-up recovery. + +### Aborting a Transaction + +Mongos will implicitly abort a transaction on any error except the view resolution error from a participant shard +if a two phase commit has not been initiated. To explicitly abort a transaction, a client must send an `abortTransaction` +command to the mongos that the transaction runs on. The command is also retryable as long as no new transaction has +been started on the session and the session is still alive. In both cases, the mongos simply sends `abortTransaction` +to all participant shards. + +#### Code references +* [**TransactionRouter class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/s/transaction_router.h) +* [**TransactionCoordinatorService class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/transaction_coordinator_service.h) +* [**TransactionCoordinator class**](https://github.com/mongodb/mongo/blob/r4.3.4/src/mongo/db/s/transaction_coordinator.h) + +## Internal Transactions + +Internal transactions are transactions that mongos and mongod can run on behalf of a client command regardless of a client's session option configuration. These transactions are started and managed internally by mongos/mongod, thus clients are unaware of the execution of internal transactions. All internal transactions will be run within an a session started internally, which we will refer to as `internal sessions`, except for in the case where the client is already running a transaction within a session, to which we let the transaction execute as a regular client transaction. + +An internal transaction started on behalf of a client command is subject to the client command's constraints such as terminating execution if the command's `$maxTimeMS` is reached, or guaranteeing retryability if the issued command was a retryable write. These constraints lead to the following concepts. + +### Non-Retryable Internal Transactions + +If a client runs a command in a without a session or with session where retryable writes are disabled I.E. `retryWrites: false`, the server will start a non-retryable internal transaction. + +### Retryable Internal Transactions + +If a client runs a command in a session where retryable writes are enabled I.E. `retryWrites: true`, the server will start a retryable internal transaction. + +**Note**: The distinction between **Retryable** and **Non-Retryable** here is the requirement that Retryable Internal Transactions must fulfill the retryable write contract, which is described below. Both types of transactions will be [retried internally on transient errors](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_api.cpp#L207-L227). The only exception is an internal transaction that is started on behalf of a `client transaction`, which can only be retried by the client. + +#### How retryability is guaranteed + +We expect that retryable write commands that start retryable internal transactions conform to the retryable write contract which has the following stipulations: + +1. Write statements within the command are guaranteed to apply only once regardless of how many times a client retries. +2. The response for the command is guaranteed to be reconstructable on retry. + +To do this, retryable write statements executed inside of a retryable internal transaction try to emulate the behavior of ordinary retryable writes. + +Each statement inside of a retryable write command should have a corresponding entry within a retryable internal transaction with the same `stmtId` as the original write statement. When a transaction participant for a retryable internal transaction notices a write statement with a previously seen `stmtId`, it will not execute the statement and instead generate the original response for the already executed statement using the oplog entry generated by the initial execution. The check for previously executed statements is done using the `retriedStmtIds` array, which contains the `stmtIds` of already retried statements, inside of a write command's response. + +In cases where a client retryable write command implicitly expects an auxiliary operation to be executed atomically with its current request, a retryable internal transaction may contain additional write statements that are not explicitly requested by a client retryable write command. An example could be that the client expects to atomically update an index when executing a write. Since these auxiliary write statements do not have a corresponding entry within the original client command, the `stmtId` field for these statements will be set to `{stmtId: kUninitializedStmtId}`. These auxiliary write statements are non-retryable, thus it is crucial that we use the `retriedStmtIds` to determine which client write statements were already successfully retried to avoid re-applying the corresponding auxilary write statements. Additionally, these statements will be excluded from the history check involving `retriedStmtIds`. + +To guarantee that we can reconstruct the response regardless of retries, we do a "cross sectional" write history check for retryable writes and retryable internal transactions prior to running a client retryable write/retryable internal transaction command. This ensures we do not double apply non-idempotent operations, and instead recover the response for a successful execution when appropriate. To support this, the [RetryableWriteTransactionParticipantCatalog](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.h#L1221-L1299) was added as a decoration on an external session and it stores the transaction participants for all active retryable writes on the session, which we use to do our [write history check](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L3205-L3208). + +#### Reconstructing write responses + +To reconstruct responses for retryable internal transactions, we use the applyOps oplog entry, which contains an inner entry with the operation run under the `o` field that has a corresponding `stmtId`. We use the `stmtId` and `opTime` cached in the `TransactionParticipant` to lookup the operation in the applyOps oplog entry, which gives us the necessary details to reconstruct the original write response. The process for reconstructing retryable write responses works the same way. + + +#### Special considerations for findAndModify + +`findAndModify` additionally, requires the storage of pre/post images. Upon committing or preparing an internal transaction, we insert a document into `config.image_collection` containing the pre/post image. The operation entry for the findAndModify statement inside the applyOps oplog entry will have a `needsRetryImage` field that is set to `true` to indicate that a pre/post image should be loaded from the side collection when reconstructing the write response. We can do the lookup using a transaction's `lsid` and `txnNumber`. + +Currently, a retryable internal transaction can only support a **single** `findAndModify` statement at a time, due to the limitation that `config.image_collection` can only support storing one pre/post image entry for a given `(lsid, txnNumber)`. + +#### Retryability across failover and restart + +To be able to guarantee retryability under failover, we need to make sure that a mongod **always** has all the necessary transaction state loaded while executing a retryable write command. To do this, we recover the transaction state of the client and internal sessions [when checking out sessions](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/session/session_catalog_mongod.cpp#L694) on recovery. During checkout, we call [refreshFromStorageIfNeeded()](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L2901) on the current client session (if we are running in one) to refresh the TransactionParticipant for that session. We then [fetch any relevant active internal sessions associated with the current client session and refresh the TransactionParticipants for those sessions](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L2987). + +#### Handling retry conflicts + +Due to the use of `txnUUID` in the lsid for de-duplication purposes, retries of client write statements will always spawn a different internal session/transaction than the one originally used to do the initial attempt. This has two implications for conflict resolution: + +1. If the client retries on the same mongos/mongod that the original write was run on, retries are blocked by mongos/mongod until the original attempt finishes execution. This is due to the [session checkout mechanism](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/service_entry_point_common.cpp#L973) that prevents checkout of an in-use session, which in this case would block the retry attempt from checking out the parent session. Once the original write finishes execution, the retry would either retry(if necessary) or recover the write response as described above. + +2. If the client retries on a different mongos than the original write was run on, the new mongos will not have visibility over in-progress internal transactions run by another mongos, so this retry will not be blocked and legally begin execution. When the new mongos begins execution of the retried command, it will send commands with `startTransaction` to relevant transaction participants. The transaction participants will then [check if there is already an in-progress internal transaction that will conflict](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L2827-L2846) with the new internal transaction that is attempting to start. If so, then the transaction participant will throw `RetryableTransactionInProgress`, which will be caught and cause the new transaction to [block until the existing transaction is finished](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/service_entry_point_common.cpp#L1029-L1036). + + +#### Supporting retryability across chunk migration and resharding + + The session history, oplog entries, and image collection entries involving the chunk being migrated are cloned from the donor shard to the recipient shard during chunk migration. Once the recipient receives the relevant oplog entries from the donor, it will [nest and apply the each of the received oplog entries in a no-op oplog entry](https://github.com/mongodb/mongo/blob/0d84f4bab0945559abcd5b00be5ec322c5214642/src/mongo/db/s/session_catalog_migration_destination.cpp#L204-L347). Depending on the type of operation run, the behavior will differ as such. + +* If a non-retryable write/non-retryable internal transaction is run, then the donor shard will [send a sentinel no-op oplog entry](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/s/session_catalog_migration_destination.cpp#L204-L354), which when parsed by the TransactionParticipant upon getting a retry against the recipient shard will [throw IncompleteTransactionHistory](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L323-L331). + +* If a retryable write/retryable internal transaction is run, then the donor shard will send a ["downconverted" oplog entry](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/s/session_catalog_migration_source.cpp#L669-L680), which when parsed by the TransactionParticipant upon getting a retry against the recipient shard will return the original write response. + +`Note`: "Downconverting" in this context, is the process of extracting the operation information inside an applyOps entry for an internal transaction and constructing a new retryable write oplog entry with `lsid` and `txnNumber` set to the associated client's session id and txnNumber. + +For resharding, the process is similar to how chunk migrations are handled. The session history, oplog entries, and image collection entries for operations run during resharding are cloned from the donor shard to the recipient shard. The only difference is that the recipient in this case will handle the "downconverting", nesting, and applying of the received oplog entries. The two cases discussed above apply to resharding as well. + + +#### Code References + +* [**Session checkout logic**](https://github.com/mongodb/mongo/blob/0d84f4bab0945559abcd5b00be5ec322c5214642/src/mongo/db/session/session_catalog_mongod.cpp#L694) +* [**Cross-section history check logic**](https://github.com/mongodb/mongo/blob/0d84f4bab0945559abcd5b00be5ec322c5214642/src/mongo/db/transaction/transaction_participant.cpp#L3206) +* [**Conflicting internal transaction check logic**](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L2827-L2846) +* [**Refreshing client and internal sessions logic**](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.cpp#L2889-L2899) +* [**RetryableWriteTransactionParticipantCatalog**](https://github.com/mongodb/mongo/blob/d8ce3ee2e020d1ab2fa611a2a0f0a222b06b9779/src/mongo/db/transaction/transaction_participant.h#L1221-L1299) + +### Transaction API + +The [transaction API](https://github.com/mongodb/mongo/blob/master/src/mongo/db/transaction/transaction_api.h) is used to initiate transactions from within the server. The API starts an internal transaction on its local process, executes transaction statements specified in a callback, and completes the transaction by committing/aborting/retrying on transient errors. By default, a transaction can be retried 120 times to mirror the 2 minute timeout used by the [driver’s convenient transactions API](https://github.com/mongodb/specifications/blob/92d77a6d/source/transactions-convenient-api/transactions-convenient-api.rst). + +Additionally, the API can use router commands when running on a mongod. Each command will execute as if on a mongos, targeting remote shards and initiating a two phase commit if necessary. To enable this router behavior the [`cluster_transaction_api`](https://github.com/mongodb/mongo/blob/master/src/mongo/db/cluster_transaction_api.h) defines an additional set of behaviors to rename commands to their [cluster command names](https://github.com/mongodb/mongo/blob/63f99193df82777239f038666270e4bfb2be3567/src/mongo/db/cluster_transaction_api.cpp#L44-L52). + +Transactions for non-retryable operations or operations without a session initiated through the API use sessions from the [InternalSessionPool](https://github.com/mongodb/mongo/blob/master/src/mongo/db/internal_session_pool.h) to prevent the creation and maintenance of many single-use sessions. + +To use the transaction API, [instantiate a transaction client](https://github.com/mongodb/mongo/blob/63f99193df82777239f038666270e4bfb2be3567/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp#L250-L253) by providing the opCtx, an executor, and resource yielder. Then, run the commands to be grouped in the same transaction session on the transaction object. Some examples of this are listed below. + +* [Cluster Find and Modify Command](https://github.com/mongodb/mongo/blob/63f99193df82777239f038666270e4bfb2be3567/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp#L255-L265) +* [Queryable Encryption](https://github.com/mongodb/mongo/blob/63f99193df82777239f038666270e4bfb2be3567/src/mongo/db/commands/fle2_compact.cpp#L636-L648) +* [Cluster Write Command - WouldChangeOwningShard Error](https://github.com/mongodb/mongo/blob/63f99193df82777239f038666270e4bfb2be3567/src/mongo/s/commands/cluster_write_cmd.cpp#L162-L190) + +## The historical routing table + +When a mongos or mongod executes a command that requires shard targeting, it must use routing information +that matches the read concern of the command. If the command uses `"snapshot"` read concern, it must use +the historical routing table at the selected read timestamp. If the command uses any other read concern, +it must use the latest cached routing table. + +The [routing table cache](#the-routing-table-cache) provides an interface for obtaining the routing table +at a particular timestamp and collection version, namely the `ChunkManager`. The `ChunkManager` has an +optional clusterTime associated with it and a `RoutingTableHistory` that contains historical routing +information for all chunks in the collection. That information is stored in an ordered map from the max +key of each chunk to an entry that contains routing information for the chunk, such as chunk range, +chunk version and chunk history. The chunk history contains the shard id for the shard that currently +owns the chunk, and the shard id for any other shards that used to own the chunk in the past +`minSnapshotHistoryWindowInSeconds` (defaults to 300 seconds). It corresponds to the chunk history in +the `config.chunks` document for the chunk which gets updated whenever the chunk goes through an +operation, such as merge or migration. The `ChunkManager` uses this information to determine the +shards to target for a query. If the clusterTime is not provided, it will return the shards that +currently own the target chunks. Otherwise, it will return the shards that owned the target chunks +at that clusterTime and will throw a `StaleChunkHistory` error if it cannot find them. + +#### Code references +* [**ChunkManager class**](https://github.com/mongodb/mongo/blob/r4.3.6/src/mongo/s/chunk_manager.h#L233-L451) +* [**RoutingTableHistory class**](https://github.com/mongodb/mongo/blob/r4.3.6/src/mongo/s/chunk_manager.h#L70-L231) +* [**ChunkHistory class**](https://github.com/mongodb/mongo/blob/r4.3.6/src/mongo/s/catalog/type_chunk.h#L131-L145) + +--- diff --git a/src/mongo/db/s/README_sharding_catalog.md b/src/mongo/db/s/README_sharding_catalog.md index b614cf6810ef8..f1db553c5a708 100644 --- a/src/mongo/db/s/README_sharding_catalog.md +++ b/src/mongo/db/s/README_sharding_catalog.md @@ -38,7 +38,7 @@ Based on the above, as it stands, different containers on different nodes are au ### Synchronisation The most important requirement of any sharded feature is that it scales linearly with the size of the data or the workload. -In order to scale, sharding utilises "optimistic" distributed synchronisation protocols to avoid creating nodes which are a bottleneck (i.e., the CSRS). One of these protocols, named [shard versioning](TODO), allows the routers to use cached information to send queries to one or more shards, and only read from the CSRS if the state of the world changes (e.g. chunk migration). +In order to scale, sharding utilises "optimistic" distributed synchronisation protocols to avoid creating nodes which are a bottleneck (i.e., the CSRS). One of these protocols, named [shard versioning](README_versioning_protocols.md), allows the routers to use cached information to send queries to one or more shards, and only read from the CSRS if the state of the world changes (e.g. chunk migration). The main goal of these protocols is to maintain certain causal relationships between the different catalog containers, where *routers* operate on cached information and rely on the *shards* to "correct" them if the data is no longer where the router thinks it is. diff --git a/src/mongo/db/s/README_startup_and_shutdown.md b/src/mongo/db/s/README_startup_and_shutdown.md new file mode 100644 index 0000000000000..73c61db7ea3de --- /dev/null +++ b/src/mongo/db/s/README_startup_and_shutdown.md @@ -0,0 +1,79 @@ +# Node startup and shutdown + +## Startup and sharding component initialization +The mongod intialization process is split into three phases. The first phase runs on startup and initializes the set of stateless components based on the cluster role. The second phase then initializes additional components that must be initialized with state read from the config server. The third phase is run on the [transition to primary](https://github.com/mongodb/mongo/blob/879d50a73179d0dd94fead476468af3ee4511b8f/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp#L822-L901) and starts services that only run on primaries. + +### Shard Server initialization + +#### Phase 1: +1. On a shard server, the `CollectionShardingState` factory is set to an instance of the `CollectionShardingStateFactoryShard` implementation. The component lives on the service context. +1. The sharding [OpObservers are created](https://github.com/mongodb/mongo/blob/0e08b33037f30094e9e213eacfe16fe88b52ff84/src/mongo/db/mongod_main.cpp#L1000-L1001) and registered with the service context. The `MigrationChunkClonerSourceOpObserver` class forwards operations during migration to the chunk cloner. The `ShardServerOpObserver` class is used to handle the majority of sharding related events. These include loading the shard identity document when it is inserted and performing range deletions when they are marked as ready. + +#### Phase 2: +1. The [shardIdentity document is loaded](https://github.com/mongodb/mongo/blob/37ff80f6234137fd314d00e2cd1ff77cde90ce11/src/mongo/db/s/sharding_initialization_mongod.cpp#L366-L373) if it already exists on startup. For shards, the shard identity document specifies the config server connection string. If the shard does not have a shardIdentity document, it has not been added to a cluster yet, and the "Phase 2" initialization happens when the shard receives a shardIdentity document as part of addShard. +1. If the shard identity document was found, then the [ShardingState is intialized](https://github.com/mongodb/mongo/blob/37ff80f6234137fd314d00e2cd1ff77cde90ce11/src/mongo/db/s/sharding_initialization_mongod.cpp#L416-L462) from its fields. +1. The global sharding state is set on the Grid. The Grid contains the sharding context for a running server. It exists both on mongod and mongos because the Grid holds all the components needed for routing, and both mongos and shard servers can act as routers. +1. `KeysCollectionManager` is set on the `LogicalTimeValidator`. +1. The `ShardingReplicaSetChangeListener` is instantiated and set on the `ReplicaSetMonitor`. +1. The remaining sharding components are [initialized for the current replica set role](https://github.com/mongodb/mongo/blob/37ff80f6234137fd314d00e2cd1ff77cde90ce11/src/mongo/db/s/sharding_initialization_mongod.cpp#L255-L286) before the Grid is marked as initialized. + +#### Phase 3: +Shard servers [start up several services](https://github.com/mongodb/mongo/blob/879d50a73179d0dd94fead476468af3ee4511b8f/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp#L885-L894) that only run on primaries. + +### Config Server initialization + +#### Phase 1: +The sharding [OpObservers are created](https://github.com/mongodb/mongo/blob/0e08b33037f30094e9e213eacfe16fe88b52ff84/src/mongo/db/mongod_main.cpp#L1000-L1001) and registered with the service context. The config server registers the OpObserverImpl and ConfigServerOpObserver observers. + +#### Phase 2: +The global sharding state is set on the Grid. The Grid contains the sharding context for a running server. The config server does not need to be provided with the config server connection string explicitly as it is part of its local state. + +#### Phase 3: +Config servers [run some services](https://github.com/mongodb/mongo/blob/879d50a73179d0dd94fead476468af3ee4511b8f/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp#L866-L867) that only run on primaries. + +### Mongos initialization +#### Phase 2: +The global sharding state is set on the Grid. The Grid contains the sharding context for a running server. Mongos is provided with the config server connection string as a startup parameter. + +#### Code references +* Function to [initialize global sharding state](https://github.com/mongodb/mongo/blob/eeca550092d9601d433e04c3aa71b8e1ff9795f7/src/mongo/s/sharding_initialization.cpp#L188-L237). +* Function to [initialize sharding environment](https://github.com/mongodb/mongo/blob/37ff80f6234137fd314d00e2cd1ff77cde90ce11/src/mongo/db/s/sharding_initialization_mongod.cpp#L255-L286) on shard server. +* Hook for sharding [transition to primary](https://github.com/mongodb/mongo/blob/879d50a73179d0dd94fead476468af3ee4511b8f/src/mongo/db/repl/replication_coordinator_external_state_impl.cpp#L822-L901). + +## Shutdown + +If the mongod server is primary, it will [try to step down](https://github.com/mongodb/mongo/blob/0987c120f552ab6d347f6b1b6574345e8c938c32/src/mongo/db/mongod_main.cpp#L1046-L1072). Mongod and mongos then run their respective shutdown tasks which cleanup the remaining sharding components. + +#### Code references +* [Shutdown logic](https://github.com/mongodb/mongo/blob/2bb2f2225d18031328722f98fe05a169064a8a8a/src/mongo/db/mongod_main.cpp#L1163) for mongod. +* [Shutdown logic](https://github.com/mongodb/mongo/blob/30f5448e95114d344e6acffa92856536885e35dd/src/mongo/s/mongos_main.cpp#L336-L354) for mongos. + +### Quiesce mode on shutdown +mongos enters quiesce mode prior to shutdown, to allow short-running operations to finish. +During this time, new and existing operations are allowed to run, but `isMaster`/`hello` +requests return a `ShutdownInProgress` error, to indicate that clients should start routing +operations to other nodes. Entering quiesce mode is considered a significant topology change +in the streaming `hello` protocol, so mongos tracks a `TopologyVersion`, which it increments +on entering quiesce mode, prompting it to respond to all waiting hello requests. + +### helloOk Protocol Negotation + +In order to preserve backwards compatibility with old drivers, mongos currently supports both +the [`isMaster`] command and the [`hello`] command. New drivers and 5.0+ versions of the server +will support `hello`. When connecting to a sharded cluster via mongos, a new driver will send +"helloOk: true" as a part of the initial handshake. If mongos supports hello, it will respond +with "helloOk: true" as well. This way, new drivers know that they're communicating with a version +of the mongos that supports `hello` and can start sending `hello` instead of `isMaster` on this +connection. + +If mongos does not support `hello`, the `helloOk` flag is ignored. A new driver will subsequently +not see "helloOk: true" in the response and must continue to send `isMaster` on this connection. Old +drivers will not specify this flag at all, so the behavior remains the same. + +When mongos establishes outgoing connections to mongod nodes in the cluster, it always uses `hello` +rather than `isMaster`. + +#### Code references +* [isMaster command](https://github.com/mongodb/mongo/blob/r4.8.0-alpha/src/mongo/s/commands/cluster_is_master_cmd.cpp#L248) for mongos. +* [hello command](https://github.com/mongodb/mongo/blob/r4.8.0-alpha/src/mongo/s/commands/cluster_is_master_cmd.cpp#L64) for mongos. + diff --git a/src/mongo/db/s/README_user_write_blocking.md b/src/mongo/db/s/README_user_write_blocking.md new file mode 100644 index 0000000000000..b37ad59fd7a57 --- /dev/null +++ b/src/mongo/db/s/README_user_write_blocking.md @@ -0,0 +1,60 @@ +# User Write Blocking + +User write blocking prevents user initiated writes from being performed on C2C source and destination +clusters during certain phases of C2C replication, allowing durable state to be propagated from the +source without experiencing conflicts. Because the source and destination clusters are different +administrative domains and thus can have separate configurations and metadata, operations which +affect metadata, such as replset reconfig, are permitted. Also, internal operations which affect user +collections but leave user data logically unaffected, such as chunk migration, are still permitted. +Finally, users with certain privileges can bypass user write blocking; this is necessary so that the +C2C sync daemon itself can write to user data. + +User write blocking is enabled and disabled by the command `{setUserWriteBlockMode: 1, global: +}`. On replica sets, this command is invoked on the primary, and enables/disables user +write blocking replica-set-wide. On sharded clusters, this command is invoked on `mongos`, and +enables/disables user write blocking cluster-wide. We define a write as a "user write" if the target +database is not internal (the `admin`, `local`, and `config` databases being defined as internal), +and if the user that initiated the write cannot perform the `bypassWriteBlockingMode` action on the +`cluster` resource. By default, only the `restore`, `root`, and `__system` built-in roles have this +privilege. + +The `UserWriteBlockModeOpObserver` is responsible for blocking disallowed writes. Upon any operation +which writes, this `OpObserver` checks whether the `GlobalUserWriteBlockState` [allows writes to the +target +namespace](https://github.com/mongodb/mongo/blob/387f8c0e26a352b95ecfc6bc51f749d26a929390/src/mongo/db/op_observer/user_write_block_mode_op_observer.cpp#L281-L288). +The `GlobalUserWriteBlockState` stores whether user write blocking is enabled in a given +`ServiceContext`. As part of its write access check, it [checks whether the `WriteBlockBypass` +associated with the given `OperationContext` is +enabled](https://github.com/mongodb/mongo/blob/25377181476e4140c970afa5b018f9b4fcc951e8/src/mongo/db/s/global_user_write_block_state.cpp#L59-L67). +The `WriteBlockBypass` stores whether the user that initiated the write is able to perform writes +when user write blocking is enabled. On internal requests (i.e. from other `mongod` or `mongos` +instances in the sharded cluster/replica set), the request originator propagates `WriteBlockBypass` +[through the request +metadata](https://github.com/mongodb/mongo/blob/182616b7b45a1e360839c612c9ee8acaa130fe17/src/mongo/rpc/metadata.cpp#L115). +On external requests, `WriteBlockBypass` is enabled [if the authenticated user is privileged to +bypass user +writes](https://github.com/mongodb/mongo/blob/07c3d2ebcd3ca8127ed5a5aaabf439b57697b530/src/mongo/db/write_block_bypass.cpp#L60-L63). +The `AuthorizationSession`, which is responsible for maintaining the authorization state, keeps track +of whether the user has the privilege to bypass user write blocking by [updating a cached variable +upon any changes to the authorization +state](https://github.com/mongodb/mongo/blob/e4032fe5c39f1974c76de4cefdc07d98ab25aeef/src/mongo/db/auth/authorization_session_impl.cpp#L1119-L1121). +This structure enables, for example, sharded writes to work correctly with user write blocking, +because the `WriteBlockBypass` state is initially set on the `mongos` based on the +`AuthorizationSession`, which knows the privileges of the user making the write request, and then +propagates from the `mongos` to the shards involved in the write. Note that this means on requests +from `mongos`, shard servers don't check their own `AuthorizationSession`s when setting +`WriteBlockBypass`. This would be incorrect behavior since internal requests have internal +authorization, which confers all privileges, including the privilege to bypass user write blocking. + +The `setUserWriteBlockMode` command, before enabling user write blocking, blocks creation of new +index builds and aborts all currently running index builds on non-internal databases, and drains the +index builds it cannot abort. This upholds the invariant that while user write blocking is enabled, +all running index builds are allowed to bypass write blocking and therefore can commit without +additional checks. + +In sharded clusters, enabling user write blocking is a two-phase operation, coordinated by the config +server. The first phase disallows creation of new `ShardingDDLCoordinator`s and drains all currently +running `DDLCoordinator`s. The config server waits for all shards to complete this phase before +moving onto the second phase, which aborts index builds and enables write blocking. This structure is +used because enabling write blocking while there are ongoing `ShardingDDLCoordinator`s would prevent +those operations from completing. diff --git a/src/mongo/db/s/README_versioning_protocols.md b/src/mongo/db/s/README_versioning_protocols.md index 86a288e0b0af3..c5b94cacce221 100644 --- a/src/mongo/db/s/README_versioning_protocols.md +++ b/src/mongo/db/s/README_versioning_protocols.md @@ -8,9 +8,9 @@ When a router uses its cached information to send a request to a shard, it attac When a shard receives the request, it will check this token to make sure that it matches the shard's local information. If it matches, then the request will proceed. If the version does not match, the shard will throw [an exception](https://github.com/mongodb/mongo/blob/r6.0.0/src/mongo/s/stale_exception.h). -When the router recieves this exception, it knows that the routing information must have changed, and so it will contact the config server to get more recent information before sending the request again. +When the router recieves this exception, it knows that the routing information must have changed, and so it will [perform a refresh](#routing-information-refreshes) to get more recent information before sending the request again. -The following diagram depicts a simple example of the shard versioning protocol in action. +The following diagram depicts a simple example of the shard versioning protocol in action. It assumes that the router is a shard server primary, thus the refresh is simply fetching newer information from the config server. ```mermaid sequenceDiagram @@ -83,3 +83,19 @@ A change in the collection generation indicates that the collection has changed A placement version change indicates that something has changed about what data is placed on what shard. The most important operation that changes the placement version is migration, however split, merge and even some other operations change it as well, even though they don't actually move any data around. These changes are more targeted than generation changes, and will only cause the router to refresh if it is targeting a shard that was affected by the operation. #### Index Version Changes An index version change indicates that there has been some change in the global index information of the collection, such as from adding or removing a global index. + +## Routing Information Refreshes +For sharded collections, there are two sets of information that compose the routing information - the chunk placement information and the collection index information. The config server is [authoritative](README_sharding_catalog.md#authoritative-containers) for the placement information, while both the shards and the config server are authoritative for the index information. + +When a router receives a stale config error, it will refresh whichever component is stale. If the router has an older CollectionGeneration or CollectionPlacement, it will refresh the placement information, whereas if it has an older IndexVersion, it will refresh the index information. + +### Placement Information Refreshes +MongoS and shard primaries refresh their placement information from the config server. Shard secondaries, however, refresh from the shard primaries through a component called the Shard Server Catalog Cache Loader. When a shard primary refreshes from a config server, it persists the refreshed information to disk. This information is then replicated to secondaries who will refresh their cache from this on-disk information. + +#### Incremental and Full Refreshes +A full refresh clears all cached information, and replaces the cache with the information that exists on the node’s source whereas an incremental refresh only replaces modified routing information from the node’s source. + +Incremental refreshes will happen whenever there has been a [placement version change](#placement-version-changes), while [collection generation changes](#generation-changes) will cause a full refresh. + +### Index Information Refreshes +Index information refreshes are always done from the config server. The router will fetch the whole index information from the config server and replace what it has in its cache with the new information. diff --git a/src/mongo/db/s/SConscript b/src/mongo/db/s/SConscript index 8b8d4c4bbf1a1..ef73244fd5972 100644 --- a/src/mongo/db/s/SConscript +++ b/src/mongo/db/s/SConscript @@ -4,65 +4,6 @@ Import("env") env = env.Clone() -env.Library( - target='move_primary_shared_data', - source=[ - 'move_primary/move_primary_shared_data.cpp', - ], - LIBDEPS=[ - '$BUILD_DIR/mongo/base', - '$BUILD_DIR/mongo/db/repl/repl_sync_shared_data', - ], -) - -env.Library( - target='move_primary_cloners', - source=[ - 'move_primary/move_primary_base_cloner.cpp', - 'move_primary/move_primary_collection_cloner.cpp', - 'move_primary/move_primary_database_cloner.cpp', - ], - LIBDEPS=[ - '$BUILD_DIR/mongo/db/repl/base_cloner', - '$BUILD_DIR/mongo/db/repl/cloner_utils', - '$BUILD_DIR/mongo/db/repl/task_runner', - 'move_primary_shared_data', - ], - LIBDEPS_PRIVATE=[ - '$BUILD_DIR/mongo/base', - '$BUILD_DIR/mongo/db/catalog/collection_catalog', - '$BUILD_DIR/mongo/db/catalog/document_validation', - '$BUILD_DIR/mongo/db/commands/list_collections_filter', - '$BUILD_DIR/mongo/db/dbdirectclient', - '$BUILD_DIR/mongo/db/ops/write_ops_exec', - '$BUILD_DIR/mongo/db/repl/oplog', - '$BUILD_DIR/mongo/db/repl/repl_server_parameters', - '$BUILD_DIR/mongo/db/shard_role_api', - '$BUILD_DIR/mongo/rpc/metadata', - '$BUILD_DIR/mongo/util/progress_meter', - ], -) - -env.Library( - target='move_primary_test_fixtures', - source=[ - 'move_primary/move_primary_cloner_test_fixture.cpp', - ], - LIBDEPS=[ - # Required for service context test fixture - '$BUILD_DIR/mongo/db/auth/authmocks', - '$BUILD_DIR/mongo/db/catalog/collection_options', - '$BUILD_DIR/mongo/db/repl/cloner_test_fixtures', - '$BUILD_DIR/mongo/db/repl/repl_server_parameters', - '$BUILD_DIR/mongo/db/repl/replmocks', - '$BUILD_DIR/mongo/db/repl/storage_interface_impl', - '$BUILD_DIR/mongo/db/service_context_d_test_fixture', - '$BUILD_DIR/mongo/dbtests/mocklib', - '$BUILD_DIR/mongo/util/clock_source_mock', - 'move_primary_shared_data', - ], -) - env.Library( target='balancer_stats_registry', source=[ @@ -125,6 +66,7 @@ env.Library( 'collection_sharding_state_factory_shard.cpp', 'commit_chunk_migration.idl', 'config_server_op_observer.cpp', + 'cqf_utils.cpp', 'document_source_analyze_shard_key_read_write_distribution.cpp', 'document_source_analyze_shard_key_read_write_distribution.idl', 'global_index/common_types.idl', @@ -153,6 +95,7 @@ env.Library( 'migration_batch_fetcher.cpp', 'migration_batch_inserter.cpp', 'migration_chunk_cloner_source.cpp', + 'migration_chunk_cloner_source_op_observer.cpp', 'migration_coordinator.cpp', 'migration_coordinator_document.idl', 'migration_destination_manager.cpp', @@ -160,23 +103,14 @@ env.Library( 'migration_session_id.cpp', 'migration_source_manager.cpp', 'migration_util.cpp', - 'move_primary_source_manager.cpp', - 'move_primary/move_primary_common_metadata.idl', - 'move_primary/move_primary_cumulative_metrics.cpp', - 'move_primary/move_primary_donor_service.cpp', - 'move_primary/move_primary_metrics.cpp', - 'move_primary/move_primary_metrics_field_name_provider.cpp', - 'move_primary/move_primary_oplog_applier_progress.idl', - 'move_primary/move_primary_recipient_cmds.idl', - 'move_primary/move_primary_recipient_service.cpp', - 'move_primary/move_primary_server_parameters.idl', - 'move_primary/move_primary_state_machine.idl', 'move_timing_helper.cpp', 'namespace_metadata_change_notifications.cpp', - 'op_observer_sharding_impl.cpp', 'periodic_sharded_index_consistency_checker.cpp', 'query_analysis_coordinator.cpp', 'query_analysis_op_observer.cpp', + 'query_analysis_op_observer_configsvr.cpp', + 'query_analysis_op_observer_rs.cpp', + 'query_analysis_op_observer_shardsvr.cpp', 'range_deleter_service.cpp', 'range_deleter_service_op_observer.cpp', 'range_deletion_task.idl', @@ -233,7 +167,6 @@ env.Library( 'sharding_initialization_mongod.cpp', 'sharding_recovery_service.cpp', 'sharding_runtime_d_params.idl', - 'sharding_state_recovery.cpp', 'split_chunk.cpp', 'split_vector.cpp', 'start_chunk_clone_request.cpp', @@ -262,6 +195,7 @@ env.Library( '$BUILD_DIR/mongo/db/shard_role', '$BUILD_DIR/mongo/db/transaction/transaction', '$BUILD_DIR/mongo/db/vector_clock_mongod', + '$BUILD_DIR/mongo/executor/async_rpc', '$BUILD_DIR/mongo/s/analyze_shard_key_util', '$BUILD_DIR/mongo/s/query/cluster_aggregate', '$BUILD_DIR/mongo/s/sharding_api', @@ -279,6 +213,7 @@ env.Library( '$BUILD_DIR/mongo/db/index_builds_coordinator_interface', '$BUILD_DIR/mongo/db/internal_transactions_feature_flag', '$BUILD_DIR/mongo/db/keys_collection_client_direct', + '$BUILD_DIR/mongo/db/op_observer/op_observer_util', '$BUILD_DIR/mongo/db/ops/write_ops', '$BUILD_DIR/mongo/db/repl/image_collection_entry', '$BUILD_DIR/mongo/db/repl/primary_only_service', @@ -286,16 +221,13 @@ env.Library( '$BUILD_DIR/mongo/db/repl/wait_for_majority_service', '$BUILD_DIR/mongo/db/rs_local_client', '$BUILD_DIR/mongo/db/server_base', - '$BUILD_DIR/mongo/db/session/session_catalog', '$BUILD_DIR/mongo/db/session/session_catalog_mongod', - '$BUILD_DIR/mongo/db/storage/remove_saver', '$BUILD_DIR/mongo/db/timeseries/bucket_catalog/bucket_catalog', '$BUILD_DIR/mongo/db/transaction/transaction_operations', '$BUILD_DIR/mongo/executor/inline_executor', '$BUILD_DIR/mongo/s/common_s', '$BUILD_DIR/mongo/util/future_util', 'balancer_stats_registry', - 'move_primary_cloners', 'query_analysis_writer', 'sharding_catalog', 'sharding_logging', @@ -372,10 +304,9 @@ env.Library( 'add_shard_util.cpp', 'balancer/auto_merger_policy.cpp', 'balancer/balance_stats.cpp', - 'balancer/balancer_chunk_selection_policy_impl.cpp', 'balancer/balancer_chunk_selection_policy.cpp', 'balancer/balancer_commands_scheduler_impl.cpp', - 'balancer/balancer_defragmentation_policy_impl.cpp', + 'balancer/balancer_defragmentation_policy.cpp', 'balancer/balancer_policy.cpp', 'balancer/balancer.cpp', 'balancer/cluster_statistics_impl.cpp', @@ -418,6 +349,7 @@ env.Library( '$BUILD_DIR/mongo/db/dbdirectclient', '$BUILD_DIR/mongo/db/index_builds_coordinator_interface', '$BUILD_DIR/mongo/db/internal_transactions_feature_flag', + '$BUILD_DIR/mongo/db/keys_collection_util', '$BUILD_DIR/mongo/db/pipeline/process_interface/shardsvr_process_interface', '$BUILD_DIR/mongo/db/pipeline/sharded_agg_helpers', '$BUILD_DIR/mongo/db/repl/hello_command', @@ -433,6 +365,7 @@ env.Library( '$BUILD_DIR/mongo/db/transaction/transaction', '$BUILD_DIR/mongo/db/transaction/transaction_api', '$BUILD_DIR/mongo/db/vector_clock_mongod', + '$BUILD_DIR/mongo/executor/async_rpc', '$BUILD_DIR/mongo/executor/inline_executor', '$BUILD_DIR/mongo/executor/network_interface', '$BUILD_DIR/mongo/idl/cluster_server_parameter_common', @@ -442,6 +375,7 @@ env.Library( '$BUILD_DIR/mongo/s/coreshard', '$BUILD_DIR/mongo/s/query/cluster_aggregate', '$BUILD_DIR/mongo/util/log_and_backoff', + '$BUILD_DIR/mongo/util/namespace_string_database_name_util', '$BUILD_DIR/mongo/util/pcre_wrapper', 'forwardable_operation_metadata', 'sharding_logging', @@ -458,8 +392,11 @@ env.Library( 'auto_split_vector_command.cpp', 'check_sharding_index_command.cpp', 'cleanup_orphaned_cmd.cpp', + 'cleanup_structured_encryption_data_coordinator.cpp', + 'cleanup_structured_encryption_data_coordinator.idl', 'clone_catalog_data_command.cpp', 'cluster_abort_transaction_cmd_d.cpp', + 'cluster_bulk_write_cmd_d.cpp', 'cluster_commit_transaction_cmd_d.cpp', 'cluster_count_cmd_d.cpp', 'cluster_find_cmd_d.cpp', @@ -501,15 +438,15 @@ env.Library( 'config/configsvr_remove_shard_command.cpp', 'config/configsvr_remove_shard_from_zone_command.cpp', 'config/configsvr_remove_tags_command.cpp', - 'config/configsvr_rename_collection_metadata_command.cpp', 'config/configsvr_repair_sharded_collection_chunks_history_command.cpp', + 'config/configsvr_reset_placement_history_command.cpp', 'config/configsvr_reshard_collection_cmd.cpp', 'config/configsvr_run_restore_command.cpp', 'config/configsvr_set_allow_migrations_command.cpp', 'config/configsvr_set_cluster_parameter_command.cpp', 'config/configsvr_set_user_write_block_mode_command.cpp', 'config/configsvr_split_chunk_command.cpp', - 'config/configsvr_transition_to_catalog_shard_command.cpp', + 'config/configsvr_transition_from_dedicated_config_server_command.cpp', 'config/configsvr_transition_to_dedicated_config_server_command.cpp', 'config/configsvr_update_zone_key_range_command.cpp', 'config/set_cluster_parameter_coordinator_document.idl', @@ -531,8 +468,6 @@ env.Library( 'migration_destination_manager_commands.cpp', 'move_primary_coordinator_document.idl', 'move_primary_coordinator.cpp', - 'move_primary_coordinator_no_resilient.cpp', - 'move_primary/move_primary_recipient_cmds.cpp', 'refine_collection_shard_key_coordinator_document.idl', 'refine_collection_shard_key_coordinator.cpp', 'refresh_query_analyzer_configuration_cmd.cpp', @@ -557,6 +492,7 @@ env.Library( 'shardsvr_check_metadata_consistency_command.cpp', 'shardsvr_check_metadata_consistency_participant_command.cpp', 'shardsvr_cleanup_reshard_collection_command.cpp', + 'shardsvr_cleanup_structured_encryption_data_command.cpp', 'shardsvr_collmod_command.cpp', 'shardsvr_collmod_participant_command.cpp', 'shardsvr_index_catalog_test_commands.cpp', @@ -606,6 +542,7 @@ env.Library( '$BUILD_DIR/mongo/db/bson/dotted_path_support', '$BUILD_DIR/mongo/db/catalog/catalog_helpers', '$BUILD_DIR/mongo/db/catalog/collection_uuid_mismatch_info', + '$BUILD_DIR/mongo/db/catalog/database_holder', '$BUILD_DIR/mongo/db/cloner', '$BUILD_DIR/mongo/db/cluster_transaction_api', '$BUILD_DIR/mongo/db/commands/cluster_server_parameter_commands_invocation', @@ -636,6 +573,7 @@ env.Library( '$BUILD_DIR/mongo/db/timeseries/timeseries_conversion_util', '$BUILD_DIR/mongo/db/timeseries/timeseries_options', '$BUILD_DIR/mongo/db/transaction/transaction_api', + '$BUILD_DIR/mongo/executor/async_rpc', '$BUILD_DIR/mongo/executor/inline_executor', '$BUILD_DIR/mongo/idl/cluster_server_parameter', '$BUILD_DIR/mongo/s/commands/cluster_commands_common', @@ -752,10 +690,6 @@ env.CppUnitTest( 'migration_destination_manager_test.cpp', 'migration_session_id_test.cpp', 'migration_util_test.cpp', - 'move_primary/move_primary_collection_cloner_test.cpp', - 'move_primary/move_primary_database_cloner_test.cpp', - 'move_primary/move_primary_donor_service_test.cpp', - 'move_primary/move_primary_recipient_service_test.cpp', 'namespace_metadata_change_notifications_test.cpp', 'op_observer_sharding_test.cpp', 'operation_sharding_state_test.cpp', @@ -793,6 +727,7 @@ env.CppUnitTest( 'shard_local_test.cpp', 'shard_metadata_util_test.cpp', 'shard_server_catalog_cache_loader_test.cpp', + 'sharding_ddl_coordinator_service_test.cpp', 'sharding_initialization_mongod_test.cpp', 'sharding_initialization_op_observer_test.cpp', 'sharding_logging_test.cpp', @@ -843,8 +778,6 @@ env.CppUnitTest( '$BUILD_DIR/mongo/s/analyze_shard_key_util', '$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_mock', '$BUILD_DIR/mongo/s/sharding_router_test_fixture', - 'move_primary_cloners', - 'move_primary_test_fixtures', 'query_analysis_writer', 'shard_server_test_fixture', 'sharding_catalog', @@ -948,6 +881,8 @@ env.Benchmark( LIBDEPS=[ '$BUILD_DIR/mongo/db/auth/authmocks', '$BUILD_DIR/mongo/db/auth/authorization_manager_global', + '$BUILD_DIR/mongo/db/service_context_non_d', + '$BUILD_DIR/mongo/s/catalog/sharding_catalog_client_impl', '$BUILD_DIR/mongo/s/sharding_test_fixture_common', 'sharding_runtime_d', ], @@ -964,10 +899,7 @@ env.Benchmark( '$BUILD_DIR/mongo/db/read_write_concern_defaults_mock', '$BUILD_DIR/mongo/db/repl/wait_for_majority_service', '$BUILD_DIR/mongo/db/session/session_catalog_mongod', - '$BUILD_DIR/mongo/db/shard_role_api', - '$BUILD_DIR/mongo/s/sharding_test_fixture_common', 'config_server_test_fixture', 'sharding_catalog_manager', - 'sharding_runtime_d', ], ) diff --git a/src/mongo/db/s/active_migrations_registry.cpp b/src/mongo/db/s/active_migrations_registry.cpp index c5213efe948a4..e413908af65cc 100644 --- a/src/mongo/db/s/active_migrations_registry.cpp +++ b/src/mongo/db/s/active_migrations_registry.cpp @@ -29,14 +29,31 @@ #include "mongo/db/s/active_migrations_registry.h" +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/migration_destination_manager.h" -#include "mongo/db/s/migration_session_id.h" #include "mongo/db/s/migration_source_manager.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kShardingMigration @@ -89,7 +106,7 @@ void ActiveMigrationsRegistry::lock(OperationContext* opCtx, StringData reason) uassert(ErrorCodes::NotWritablePrimary, "Cannot lock the registry while the node is in draining mode", repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase( - opCtx, DatabaseName::kAdmin.toString())); + opCtx, DatabaseName::kAdmin)); } unblockMigrationsOnError.dismiss(); @@ -282,7 +299,7 @@ Status ActiveMigrationsRegistry::ActiveMoveChunkState::constructErrorStatus() co "'{}{}' for namespace {} to shard {}", (args.getMin() ? "min: " + args.getMin()->toString() + " - " : ""), (args.getMax() ? "max: " + args.getMax()->toString() : ""), - args.getCommandParameter().ns(), + args.getCommandParameter().toStringForErrorMsg(), args.getToShard().toString()); return {ErrorCodes::ConflictingOperationInProgress, std::move(errMsg)}; } @@ -291,8 +308,8 @@ Status ActiveMigrationsRegistry::ActiveReceiveChunkState::constructErrorStatus() return {ErrorCodes::ConflictingOperationInProgress, str::stream() << "Unable to start new balancer operation because this shard is " "currently receiving chunk " - << range.toString() << " for namespace " << nss.ns() << " from " - << fromShardId}; + << range.toString() << " for namespace " << nss.toStringForErrorMsg() + << " from " << fromShardId}; } ScopedDonateChunk::ScopedDonateChunk(ActiveMigrationsRegistry* registry, diff --git a/src/mongo/db/s/active_migrations_registry.h b/src/mongo/db/s/active_migrations_registry.h index ea61210782a44..1e748bee64e63 100644 --- a/src/mongo/db/s/active_migrations_registry.h +++ b/src/mongo/db/s/active_migrations_registry.h @@ -29,17 +29,36 @@ #pragma once +#include #include - +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/migration_session_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/platform/mutex.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/request_types/move_range_request_gen.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/concurrency/notification.h" namespace mongo { class OperationContext; + class ScopedDonateChunk; class ScopedReceiveChunk; class ScopedSplitMergeChunk; diff --git a/src/mongo/db/s/active_migrations_registry_test.cpp b/src/mongo/db/s/active_migrations_registry_test.cpp index 3cdaa0a021a27..c2ee3ace75d5d 100644 --- a/src/mongo/db/s/active_migrations_registry_test.cpp +++ b/src/mongo/db/s/active_migrations_registry_test.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/baton.h" #include "mongo/db/client.h" #include "mongo/db/s/active_migrations_registry.h" #include "mongo/db/s/shard_server_test_fixture.h" -#include "mongo/s/commands/cluster_commands_gen.h" #include "mongo/stdx/future.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -95,7 +103,7 @@ TEST_F(MoveChunkRegistration, GetActiveMigrationNamespace) { auto originalScopedDonateChunk = assertGet(_registry.registerDonateChunk(operationContext(), createMoveRangeRequest(nss))); - ASSERT_EQ(nss.ns(), _registry.getActiveDonateChunkNss()->ns()); + ASSERT_EQ(nss, _registry.getActiveDonateChunkNss()); // Need to signal the registered migration so the destructor doesn't invariant originalScopedDonateChunk.signalComplete(Status::OK()); diff --git a/src/mongo/db/s/add_shard_cmd.cpp b/src/mongo/db/s/add_shard_cmd.cpp index 89ee5e5533c94..2eec34ac78603 100644 --- a/src/mongo/db/s/add_shard_cmd.cpp +++ b/src/mongo/db/s/add_shard_cmd.cpp @@ -28,20 +28,35 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/audit.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/add_shard_cmd_gen.h" #include "mongo/db/s/add_shard_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/balancer_configuration.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -102,8 +117,9 @@ class AddShardCommand : public TypedCommand { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/add_shard_util.cpp b/src/mongo/db/s/add_shard_util.cpp index 841b72fb10121..b0c69e5375145 100644 --- a/src/mongo/db/s/add_shard_util.cpp +++ b/src/mongo/db/s/add_shard_util.cpp @@ -27,18 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/s/add_shard_util.h" - +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/db/ops/write_ops.h" -#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/add_shard_cmd_gen.h" #include "mongo/db/shard_id.h" -#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/cluster_identity_loader.h" #include "mongo/s/write_ops/batched_command_request.h" diff --git a/src/mongo/db/s/add_shard_util.h b/src/mongo/db/s/add_shard_util.h index 7ae28d52dba82..852d8f4e6fcd9 100644 --- a/src/mongo/db/s/add_shard_util.h +++ b/src/mongo/db/s/add_shard_util.h @@ -32,12 +32,15 @@ #include #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/write_concern_options.h" namespace mongo { class AddShard; class BSONObj; class OperationContext; + class ShardId; // Contains a collection of utility functions relating to the addShard command diff --git a/src/mongo/db/s/analyze_shard_key_cmd.cpp b/src/mongo/db/s/analyze_shard_key_cmd.cpp index bd3a14f80a6c6..11878a8feaebe 100644 --- a/src/mongo/db/s/analyze_shard_key_cmd.cpp +++ b/src/mongo/db/s/analyze_shard_key_cmd.cpp @@ -27,17 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/analyze_shard_key_cmd_util.h" -#include "mongo/db/s/shard_key_index_util.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/analyze_shard_key_cmd_gen.h" -#include "mongo/s/analyze_shard_key_feature_flag_gen.h" #include "mongo/s/analyze_shard_key_util.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -47,15 +62,7 @@ namespace analyze_shard_key { namespace { -MONGO_FAIL_POINT_DEFINE(analyzeShardKeySkipCalcalutingKeyCharactericsMetrics); -MONGO_FAIL_POINT_DEFINE(analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics); - -const std::string kOrphanDocsWarningMessage = "If \"" + - KeyCharacteristicsMetrics::kNumOrphanDocsFieldName + "\" is large relative to \"" + - KeyCharacteristicsMetrics::kNumDocsFieldName + - "\", you may want to rerun the command at some other time to get more accurate \"" + - KeyCharacteristicsMetrics::kNumDistinctValuesFieldName + "\" and \"" + - KeyCharacteristicsMetrics::kMostCommonValuesFieldName + "\" metrics."; +MONGO_FAIL_POINT_DEFINE(analyzeShardKeyFailBeforeMetricsCalculation); class AnalyzeShardKeyCmd : public TypedCommand { public: @@ -71,44 +78,77 @@ class AnalyzeShardKeyCmd : public TypedCommand { "analyzeShardKey command is not supported on a standalone mongod", repl::ReplicationCoordinator::get(opCtx)->isReplEnabled()); uassert(ErrorCodes::IllegalOperation, - "configQueryAnalyzer command is not supported on a multitenant replica set", + "analyzeShardKey command is not supported on a multitenant replica set", !gMultitenancySupport); - uassert(ErrorCodes::IllegalOperation, - "analyzeShardKey command is not supported on a configsvr mongod", - !serverGlobalParams.clusterRole.exclusivelyHasConfigRole()); + + uassert(ErrorCodes::InvalidOptions, + "Cannot skip analyzing all metrics", + request().getAnalyzeKeyCharacteristics() || + request().getAnalyzeReadWriteDistribution()); + uassert(ErrorCodes::InvalidOptions, + "Cannot specify both 'sampleRate' and 'sampleSize'", + !request().getSampleRate() || !request().getSampleSize()); const auto& nss = ns(); const auto& key = request().getKey(); uassertStatusOK(validateNamespace(nss)); - const auto collUuid = uassertStatusOK(validateCollectionOptionsLocally(opCtx, nss)); + const auto collUuid = uassertStatusOK(validateCollectionOptions(opCtx, nss)); + + if (MONGO_unlikely(analyzeShardKeyFailBeforeMetricsCalculation.shouldFail())) { + uasserted( + ErrorCodes::InternalError, + "Failing analyzeShardKey command before metrics calculation via a fail point"); + } - LOGV2(6875001, "Start analyzing shard key", logAttrs(nss), "shardKey"_attr = key); + const auto analyzeShardKeyId = UUID::gen(); + LOGV2(7790010, + "Start analyzing shard key", + logAttrs(nss), + "analyzeShardKeyId"_attr = analyzeShardKeyId, + "shardKey"_attr = key); Response response; // Calculate metrics about the characteristics of the shard key. - if (!MONGO_unlikely( - analyzeShardKeySkipCalcalutingKeyCharactericsMetrics.shouldFail())) { + if (request().getAnalyzeKeyCharacteristics()) { auto keyCharacteristics = analyze_shard_key::calculateKeyCharacteristicsMetrics( - opCtx, nss, collUuid, key); - response.setKeyCharacteristics(keyCharacteristics); - if (response.getNumOrphanDocs()) { - response.setNote(StringData(kOrphanDocsWarningMessage)); + opCtx, + analyzeShardKeyId, + nss, + collUuid, + key, + request().getSampleRate(), + request().getSampleSize()); + if (!keyCharacteristics) { + // No calculation was performed. By design this must be because the shard key + // does not have a supporting index. If the command is not requesting the + // metrics about the read and write distribution, there are no metrics to + // return to the user. So throw an error here. + uassert( + ErrorCodes::IllegalOperation, + "Cannot analyze the characteristics of a shard key that does not have a " + "supporting index", + request().getAnalyzeReadWriteDistribution()); } + response.setKeyCharacteristics(keyCharacteristics); } // Calculate metrics about the read and write distribution from sampled queries. Query // sampling is not supported on multitenant replica sets. - if (!gMultitenancySupport && - !MONGO_unlikely( - analyzeShardKeySkipCalcalutingReadWriteDistributionMetrics.shouldFail())) { + if (request().getAnalyzeReadWriteDistribution()) { auto [readDistribution, writeDistribution] = analyze_shard_key::calculateReadWriteDistributionMetrics( - opCtx, nss, collUuid, key); + opCtx, analyzeShardKeyId, nss, collUuid, key); response.setReadDistribution(readDistribution); response.setWriteDistribution(writeDistribution); } + LOGV2(7790011, + "Finished analyzing shard key", + logAttrs(nss), + "analyzeShardKeyId"_attr = analyzeShardKeyId, + "shardKey"_attr = key); + return response; } @@ -145,10 +185,7 @@ class AnalyzeShardKeyCmd : public TypedCommand { std::string help() const override { return "Returns metrics for evaluating a shard key for a collection."; } -}; - -MONGO_REGISTER_FEATURE_FLAGGED_COMMAND(AnalyzeShardKeyCmd, - analyze_shard_key::gFeatureFlagAnalyzeShardKey); +} analyzeShardKeyCmd; } // namespace diff --git a/src/mongo/db/s/analyze_shard_key_cmd_util.cpp b/src/mongo/db/s/analyze_shard_key_cmd_util.cpp index c43263f7a5509..9bc1507f49cb6 100644 --- a/src/mongo/db/s/analyze_shard_key_cmd_util.cpp +++ b/src/mongo/db/s/analyze_shard_key_cmd_util.cpp @@ -27,35 +27,108 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_uuid_mismatch_info.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/shardsvr_process_interface.h" -#include "mongo/db/pipeline/sharded_agg_helpers.h" +#include "mongo/db/query/collation/collation_spec.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/analyze_shard_key_cmd_util.h" #include "mongo/db/s/analyze_shard_key_read_write_distribution.h" #include "mongo/db/s/config/initial_split_policy.h" #include "mongo/db/s/document_source_analyze_shard_key_read_write_distribution.h" -#include "mongo/db/s/range_deletion_task_gen.h" +#include "mongo/db/s/document_source_analyze_shard_key_read_write_distribution_gen.h" #include "mongo/db/s/shard_key_index_util.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/factory.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/random.h" #include "mongo/s/analyze_shard_key_documents_gen.h" #include "mongo/s/analyze_shard_key_server_parameters_gen.h" #include "mongo/s/analyze_shard_key_util.h" +#include "mongo/s/catalog/type_tags.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/grid.h" #include "mongo/s/query_analysis_client.h" -#include "mongo/s/service_entry_point_mongos.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/stale_shard_version_helpers.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -66,24 +139,83 @@ namespace { MONGO_FAIL_POINT_DEFINE(analyzeShardKeyPauseBeforeCalculatingKeyCharacteristicsMetrics); MONGO_FAIL_POINT_DEFINE(analyzeShardKeyPauseBeforeCalculatingReadWriteDistributionMetrics); +MONGO_FAIL_POINT_DEFINE(analyzeShardKeyPauseBeforeCalculatingCollStatsMetrics); constexpr StringData kIndexKeyFieldName = "key"_sd; +constexpr StringData kDocFieldName = "doc"_sd; constexpr StringData kNumDocsFieldName = "numDocs"_sd; constexpr StringData kNumBytesFieldName = "numBytes"_sd; constexpr StringData kNumDistinctValuesFieldName = "numDistinctValues"_sd; +constexpr StringData kMostCommonValuesFieldName = "mostCommonValues"_sd; constexpr StringData kFrequencyFieldName = "frequency"_sd; constexpr StringData kNumOrphanDocsFieldName = "numOrphanDocs"_sd; -const int64_t kEmptyDocSizeBytes = BSONObj().objsize(); +const std::string kOrphanDocsWarningMessage = + "Due to performance reasons, the analyzeShardKey command does not filter out orphan documents " + "when calculating metrics about the characteristics of the shard key. Therefore, if \"" + + KeyCharacteristicsMetrics::kNumOrphanDocsFieldName + "\" is large relative to \"" + + KeyCharacteristicsMetrics::kNumDocsTotalFieldName + + "\", you may want to rerun the command at some other time to get more accurate \"" + + KeyCharacteristicsMetrics::kNumDistinctValuesFieldName + "\" and \"" + + KeyCharacteristicsMetrics::kMostCommonValuesFieldName + "\" metrics."; + +/** + * Validates that exactly one of 'sampleRate' and 'sampleSize' is specified. + */ +void validateSamplingOptions(boost::optional sampleRate, + boost::optional sampleSize) { + invariant(sampleRate || sampleSize, "Must specify one of 'sampleRate' and 'sampleSize'"); + invariant(!sampleRate || !sampleSize, "Cannot specify both 'sampleRate' and 'sampleSize'"); +} + +/** + * Validates the metrics about the characteristics of a shard key. + */ +void validateKeyCharacteristicsMetrics(KeyCharacteristicsMetrics metrics) { + const auto msg = + "Unexpected error when calculating metrics about the cardinality and frequency of the " + "shard key " + + metrics.toBSON().toString(); + tassert(7826508, msg, metrics.getNumDocsTotal() >= metrics.getNumDocsSampled()); + if (metrics.getIsUnique()) { + tassert(7826509, msg, metrics.getNumDocsSampled() == metrics.getNumDistinctValues()); + } else { + tassert(7826510, msg, metrics.getNumDocsSampled() >= metrics.getNumDistinctValues()); + } + tassert( + 7826511, msg, metrics.getNumDocsSampled() >= (int64_t)metrics.getMostCommonValues().size()); +} /** * Returns an aggregate command request for calculating the cardinality and frequency metrics for * the given shard key. + * + * If the hint index is a hashed index and the shard key contains the hashed field, the aggregation + * will return documents of the following format, where 'doc' is a document whose shard key value + * has the attached 'frequency'. + * { + * doc: + * frequency: + * numDocs: + * numDistinctValues: + * } + * Otherwise, the aggregation will return documents of the following format, where 'key' is the + * hint index value for the shard key value that has the attached 'frequency'. + * { + * key: + * frequency: + * numDocs: + * numDistinctValues: + * } + * The former case involves an additional FETCH for every document returned since it needs to look + * up a document from the index value. */ AggregateCommandRequest makeAggregateRequestForCardinalityAndFrequency(const NamespaceString& nss, const BSONObj& shardKey, const BSONObj& hintIndexKey, - int numMostCommonValues) { + int numMostCommonValues, + int64_t numDocsTotal, + int64_t numDocsToSample) { uassertStatusOK(validateIndexKey(hintIndexKey)); std::vector pipeline; @@ -92,26 +224,93 @@ AggregateCommandRequest makeAggregateRequestForCardinalityAndFrequency(const Nam << BSON("$meta" << "indexKey")))); + if (numDocsTotal > numDocsToSample) { + pipeline.push_back( + BSON("$match" << BSON("$sampleRate" << (numDocsToSample * 1.0 / numDocsTotal)))); + pipeline.push_back(BSON("$limit" << numDocsToSample)); + } + + // Calculate the "frequency" of each original/hashed shard key value by doing a $group. BSONObjBuilder groupByBuilder; int fieldNum = 0; + boost::optional origHashedFieldName; + boost::optional tempHashedFieldName; + StringMap origToTempFieldName; for (const auto& element : shardKey) { - const auto fieldName = element.fieldNameStringData(); - groupByBuilder.append(kIndexKeyFieldName + std::to_string(fieldNum), - BSON("$getField" << BSON("field" << fieldName << "input" + // Use a temporary field name since it is invalid to group by a field name that contains + // dots. + const auto origFieldName = element.fieldNameStringData(); + const auto tempFieldName = kIndexKeyFieldName + std::to_string(fieldNum); + groupByBuilder.append(tempFieldName, + BSON("$getField" << BSON("field" << origFieldName << "input" << ("$" + kIndexKeyFieldName)))); + if (ShardKeyPattern::isHashedPatternEl(hintIndexKey.getField(origFieldName))) { + origHashedFieldName.emplace(origFieldName); + tempHashedFieldName.emplace(tempFieldName); + } + origToTempFieldName.emplace(origFieldName, tempFieldName); fieldNum++; } pipeline.push_back(BSON("$group" << BSON("_id" << groupByBuilder.obj() << kFrequencyFieldName << BSON("$sum" << 1)))); - pipeline.push_back(BSON("$setWindowFields" - << BSON("sortBy" - << BSON(kFrequencyFieldName << -1) << "output" - << BSON(kNumDocsFieldName - << BSON("$sum" << ("$" + kFrequencyFieldName)) - << kNumDistinctValuesFieldName << BSON("$sum" << 1))))); - - pipeline.push_back(BSON("$limit" << numMostCommonValues)); + // Calculate the "numDocs", "numDistinctValues" and "mostCommonValues" by doing a $group with + // $topN. + pipeline.push_back(BSON( + "$group" << BSON( + "_id" << BSONNULL << kNumDocsFieldName << BSON("$sum" << ("$" + kFrequencyFieldName)) + << kNumDistinctValuesFieldName << BSON("$sum" << 1) << kMostCommonValuesFieldName + << BSON("$topN" << BSON("n" << numMostCommonValues << "sortBy" + << BSON(kFrequencyFieldName << -1) << "output" + << BSON("_id" + << "$_id" << kFrequencyFieldName + << ("$" + kFrequencyFieldName))))))); + + // Unwind "mostCommonValues" to return each shard value in its own document. + pipeline.push_back(BSON("$unwind" << ("$" + kMostCommonValuesFieldName))); + + // If the supporting index is hashed and the hashed field is one of the shard key fields, look + // up the corresponding values by doing a $lookup with $toHashedIndexKey. Replace "_id" + // with "doc" or "key" accordingly. + if (origHashedFieldName) { + invariant(tempHashedFieldName); + + pipeline.push_back( + BSON("$set" << BSON( + "_id" << ("$" + kMostCommonValuesFieldName + "._id") << kFrequencyFieldName + << ("$" + kMostCommonValuesFieldName + "." + kFrequencyFieldName)))); + pipeline.push_back(BSON("$unset" << kMostCommonValuesFieldName)); + + BSONObjBuilder letBuilder; + BSONObjBuilder matchBuilder; + BSONArrayBuilder matchArrayBuilder(matchBuilder.subarrayStart("$and")); + for (const auto& [origFieldName, tempFieldName] : origToTempFieldName) { + letBuilder.append(tempFieldName, ("$_id." + tempFieldName)); + auto eqArray = (origFieldName == *origHashedFieldName) + ? BSON_ARRAY(BSON("$toHashedIndexKey" << ("$" + *origHashedFieldName)) + << ("$$" + tempFieldName)) + : BSON_ARRAY(("$" + origFieldName) << ("$$" + tempFieldName)); + matchArrayBuilder.append(BSON("$expr" << BSON("$eq" << eqArray))); + } + matchArrayBuilder.done(); + + pipeline.push_back(BSON( + "$lookup" << BSON( + "from" << nss.coll().toString() << "let" << letBuilder.obj() << "pipeline" + << BSON_ARRAY(BSON("$match" << matchBuilder.obj()) << BSON("$limit" << 1)) + << "as" + << "docs"))); + pipeline.push_back(BSON("$set" << BSON(kDocFieldName << BSON("$first" + << "$docs")))); + pipeline.push_back(BSON("$unset" << BSON_ARRAY("docs" + << "_id"))); + } else { + pipeline.push_back(BSON( + "$set" << BSON(kIndexKeyFieldName + << ("$" + kMostCommonValuesFieldName + "._id") << kFrequencyFieldName + << ("$" + kMostCommonValuesFieldName + "." + kFrequencyFieldName)))); + pipeline.push_back(BSON("$unset" << BSON_ARRAY(kMostCommonValuesFieldName << "_id"))); + } AggregateCommandRequest aggRequest(nss, pipeline); aggRequest.setHint(hintIndexKey); @@ -292,7 +491,8 @@ boost::optional findCompatiblePrefixedIndex(OperationContext* opCtx, } struct CardinalityFrequencyMetrics { - int64_t numDocs = 0; + int64_t numDocsTotal = 0; + int64_t numDocsSampled = 0; int64_t numDistinctValues = 0; std::vector mostCommonValues; }; @@ -343,17 +543,24 @@ BSONObj truncateBSONObj(const BSONObj& obj, int maxSize, int depth = 0) { * and the collection has the the given fast count of the number of documents. */ CardinalityFrequencyMetrics calculateCardinalityAndFrequencyUnique(OperationContext* opCtx, + const UUID& analyzeShardKeyId, const NamespaceString& nss, const BSONObj& shardKey, - int64_t numDocs) { + int64_t numDocsTotal, + int64_t numDocsToSample, + int numMostCommonValues) { LOGV2(6915302, "Calculating cardinality and frequency for a unique shard key", logAttrs(nss), - "shardKey"_attr = shardKey); + "analyzeShardKeyId"_attr = analyzeShardKeyId, + "shardKey"_attr = shardKey, + "numDocsTotal"_attr = numDocsTotal, + "numDocsToSample"_attr = numDocsToSample, + "numMostCommonValues"_attr = numMostCommonValues); CardinalityFrequencyMetrics metrics; - const auto numMostCommonValues = gNumMostCommonValues.load(); + numMostCommonValues = std::min(numMostCommonValues, (int)numDocsToSample); const auto maxSizeBytesPerValue = kMaxBSONObjSizeMostCommonValues / numMostCommonValues; std::vector pipeline; @@ -370,24 +577,13 @@ CardinalityFrequencyMetrics calculateCardinalityAndFrequencyUnique(OperationCont metrics.mostCommonValues.emplace_back(std::move(value), 1); }); - metrics.numDistinctValues = [&] { - if (int64_t numMostCommonValues = metrics.mostCommonValues.size(); - numDocs < numMostCommonValues) { - LOGV2_WARNING( - 7477402, - "The number of documents returned by $collStats appears to be less than the number " - "of sampled documents for cardinality and frequency metrics calculation. This is " - "likely caused by an unclean shutdown that resulted in an inaccurate fast count " - "or by insertions that have occurred since the command started. Setting the number " - "of documents to the number of sampled documents.", - "numCountedDocs"_attr = numDocs, - "numSampledDocs"_attr = numMostCommonValues); - return numMostCommonValues; - } - return numDocs; - }(); - metrics.numDocs = metrics.numDistinctValues; + uassert(7826506, + "Cannot analyze the cardinality and frequency of a shard key for an empty collection", + metrics.mostCommonValues.size() > 0); + metrics.numDistinctValues = numDocsToSample; + metrics.numDocsSampled = numDocsToSample; + metrics.numDocsTotal = numDocsTotal; return metrics; } @@ -397,30 +593,37 @@ CardinalityFrequencyMetrics calculateCardinalityAndFrequencyUnique(OperationCont * above since the metrics can be determined without running any aggregations. */ CardinalityFrequencyMetrics calculateCardinalityAndFrequencyGeneric(OperationContext* opCtx, + const UUID& analyzeShardKeyId, const NamespaceString& nss, const BSONObj& shardKey, - const BSONObj& hintIndexKey) { + const BSONObj& hintIndexKey, + int64_t numDocsTotal, + int64_t numDocsToSample, + int numMostCommonValues) { LOGV2(6915303, "Calculating cardinality and frequency for a non-unique shard key", logAttrs(nss), + "analyzeShardKeyId"_attr = analyzeShardKeyId, "shardKey"_attr = shardKey, - "indexKey"_attr = hintIndexKey); + "indexKey"_attr = hintIndexKey, + "numDocsTotal"_attr = numDocsTotal, + "numDocsToSample"_attr = numDocsToSample, + "numMostCommonValues"_attr = numMostCommonValues); CardinalityFrequencyMetrics metrics; - const auto numMostCommonValues = gNumMostCommonValues.load(); const auto maxSizeBytesPerValue = kMaxBSONObjSizeMostCommonValues / numMostCommonValues; auto aggRequest = makeAggregateRequestForCardinalityAndFrequency( - nss, shardKey, hintIndexKey, numMostCommonValues); + nss, shardKey, hintIndexKey, numMostCommonValues, numDocsTotal, numDocsToSample); runAggregate(opCtx, aggRequest, [&](const BSONObj& doc) { auto numDocs = doc.getField(kNumDocsFieldName).exactNumberLong(); invariant(numDocs > 0); - if (metrics.numDocs == 0) { - metrics.numDocs = numDocs; + if (metrics.numDocsSampled == 0) { + metrics.numDocsSampled = numDocs; } else { - invariant(metrics.numDocs == numDocs); + invariant(metrics.numDocsSampled == numDocs); } auto numDistinctValues = doc.getField(kNumDistinctValuesFieldName).exactNumberLong(); @@ -431,8 +634,23 @@ CardinalityFrequencyMetrics calculateCardinalityAndFrequencyGeneric(OperationCon invariant(metrics.numDistinctValues == numDistinctValues); } - auto value = dotted_path_support::extractElementsBasedOnTemplate( - doc.getObjectField("_id").replaceFieldNames(shardKey), shardKey); + auto value = [&] { + if (doc.hasField(kIndexKeyFieldName)) { + return dotted_path_support::extractElementsBasedOnTemplate( + doc.getObjectField(kIndexKeyFieldName).replaceFieldNames(shardKey), shardKey); + } + if (doc.hasField(kDocFieldName)) { + return dotted_path_support::extractElementsBasedOnTemplate( + doc.getObjectField(kDocFieldName), shardKey); + } + uasserted(7588600, + str::stream() << "Failed to look up documents for most common shard key " + "values in the command with \"analyzeShardKeyId\" " + << analyzeShardKeyId + << ". This is likely caused by concurrent deletions. " + "Please try running the analyzeShardKey command again. " + << redact(doc)); + }(); if (value.objsize() > maxSizeBytesPerValue) { value = truncateBSONObj(value, maxSizeBytesPerValue); } @@ -441,11 +659,57 @@ CardinalityFrequencyMetrics calculateCardinalityAndFrequencyGeneric(OperationCon metrics.mostCommonValues.emplace_back(std::move(value), frequency); }); + uassert(7826507, + "Cannot analyze the cardinality and frequency of a shard key because the number of " + "sampled documents is zero", + metrics.numDocsSampled > 0); + + metrics.numDocsTotal = std::max(numDocsTotal, metrics.numDocsSampled); + return metrics; +} + +/** + * Returns the cardinality and frequency metrics for a shard key given that the shard key is unique + * and the collection has the the given fast count of the number of documents. + */ +CardinalityFrequencyMetrics calculateCardinalityAndFrequency(OperationContext* opCtx, + const UUID& analyzeShardKeyId, + const NamespaceString& nss, + const BSONObj& shardKey, + const BSONObj& hintIndexKey, + bool isUnique, + int64_t numDocsTotal, + boost::optional sampleRate, + boost::optional sampleSize) { + validateSamplingOptions(sampleRate, sampleSize); uassert(ErrorCodes::IllegalOperation, "Cannot analyze the cardinality and frequency of a shard key for an empty collection", - metrics.numDocs > 0); + numDocsTotal > 0); - return metrics; + const auto numMostCommonValues = gNumMostCommonValues.load(); + uassert(ErrorCodes::InvalidOptions, + str::stream() << "The requested number of most common values is " << numMostCommonValues + << " but the requested number of documents according to 'sampleSize'" + << " is " << sampleSize, + !sampleSize || (*sampleSize >= numMostCommonValues)); + + const auto numDocsToSample = + sampleRate ? std::ceil(*sampleRate * numDocsTotal) : std::min(*sampleSize, numDocsTotal); + return isUnique ? calculateCardinalityAndFrequencyUnique(opCtx, + analyzeShardKeyId, + nss, + shardKey, + numDocsTotal, + numDocsToSample, + numMostCommonValues) + : calculateCardinalityAndFrequencyGeneric(opCtx, + analyzeShardKeyId, + nss, + shardKey, + hintIndexKey, + numDocsTotal, + numDocsToSample, + numMostCommonValues); } /** @@ -455,12 +719,20 @@ CardinalityFrequencyMetrics calculateCardinalityAndFrequencyGeneric(OperationCon * have a supporting index, returns 'unknown' and none. */ MonotonicityMetrics calculateMonotonicity(OperationContext* opCtx, + const UUID& analyzeShardKeyId, const CollectionPtr& collection, - const BSONObj& shardKey) { + const BSONObj& shardKey, + boost::optional sampleRate, + boost::optional sampleSize) { + validateSamplingOptions(sampleRate, sampleSize); + LOGV2(6915304, "Calculating monotonicity", logAttrs(collection->ns()), - "shardKey"_attr = shardKey); + "analyzeShardKeyId"_attr = analyzeShardKeyId, + "shardKey"_attr = shardKey, + "sampleRate"_attr = sampleRate, + "sampleSize"_attr = sampleSize); MonotonicityMetrics metrics; @@ -469,9 +741,15 @@ MonotonicityMetrics calculateMonotonicity(OperationContext* opCtx, return metrics; } - if (KeyPattern::isHashedKeyPattern(shardKey) && shardKey.nFields() == 1) { - metrics.setType(MonotonicityTypeEnum::kNotMonotonic); - metrics.setRecordIdCorrelationCoefficient(0); + if (KeyPattern::isHashedKeyPattern(shardKey)) { + if (shardKey.nFields() == 1 || shardKey.firstElement().valueStringDataSafe() == "hashed") { + metrics.setType(MonotonicityTypeEnum::kNotMonotonic); + metrics.setRecordIdCorrelationCoefficient(0); + } else { + // The monotonicity cannot be inferred from the recordIds in the index since hashing + // introduces randomness. + metrics.setType(MonotonicityTypeEnum::kUnknown); + } return metrics; } @@ -488,8 +766,30 @@ MonotonicityMetrics calculateMonotonicity(OperationContext* opCtx, invariant(index->descriptor()); std::vector recordIds; - BSONObj prevKey; - int64_t numKeys = 0; + bool scannedMultipleShardKeys = false; + BSONObj firstShardKey; + + const int64_t numRecordsTotal = collection->numRecords(opCtx); + uassert(serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) + ? ErrorCodes::CollectionIsEmptyLocally + : ErrorCodes::IllegalOperation, + "Cannot analyze the monotonicity of a shard key for an empty collection", + numRecordsTotal > 0); + + const auto numRecordsToSample = sampleRate ? std::ceil(*sampleRate * numRecordsTotal) + : std::min(*sampleSize, numRecordsTotal); + const auto recordSampleRate = + sampleRate ? *sampleRate : (numRecordsToSample * 1.0 / numRecordsTotal); + + LOGV2(7826504, + "Start scanning the supporting index to get record ids", + logAttrs(collection->ns()), + "analyzeShardKeyId"_attr = analyzeShardKeyId, + "shardKey"_attr = shardKey, + "indexKey"_attr = index->keyPattern(), + "numRecordsTotal"_attr = numRecordsTotal, + "recordSampleRate"_attr = recordSampleRate, + "numRecordsToSample"_attr = numRecordsToSample); KeyPattern indexKeyPattern(index->keyPattern()); auto exec = InternalPlanner::indexScan(opCtx, @@ -499,16 +799,35 @@ MonotonicityMetrics calculateMonotonicity(OperationContext* opCtx, indexKeyPattern.globalMax(), BoundInclusion::kExcludeBothStartAndEndKeys, PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY); + auto prng = opCtx->getClient()->getPrng(); + try { RecordId recordId; BSONObj recordVal; - while (PlanExecutor::ADVANCED == exec->getNext(&recordVal, &recordId)) { + while (recordIds.size() < numRecordsToSample) { + auto shouldSample = + (recordSampleRate == 1) || (prng.nextCanonicalDouble() < recordSampleRate); + auto execState = shouldSample + ? exec->getNext(scannedMultipleShardKeys ? nullptr : &recordVal, &recordId) + : exec->getNext(nullptr, nullptr); + + if (execState != PlanExecutor::ADVANCED) { + break; + } + if (!shouldSample) { + continue; + } + recordIds.push_back(recordId.getLong()); - auto currentKey = dotted_path_support::extractElementsBasedOnTemplate( - recordVal.replaceFieldNames(shardKey), shardKey); - if (SimpleBSONObjComparator::kInstance.evaluate(prevKey != currentKey)) { - prevKey = currentKey; - numKeys++; + if (!scannedMultipleShardKeys) { + auto currentShardKey = dotted_path_support::extractElementsBasedOnTemplate( + recordVal.replaceFieldNames(shardKey), shardKey); + if (recordIds.size() == 1) { + firstShardKey = currentShardKey; + } else if (SimpleBSONObjComparator::kInstance.evaluate(firstShardKey != + currentShardKey)) { + scannedMultipleShardKeys = true; + } } } } catch (DBException& ex) { @@ -517,13 +836,20 @@ MonotonicityMetrics calculateMonotonicity(OperationContext* opCtx, throw; } - uassert(serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) - ? ErrorCodes::CollectionIsEmptyLocally - : ErrorCodes::IllegalOperation, - "Cannot analyze the monotonicity of a shard key for an empty collection", + uassert(7826505, + "Cannot analyze the monotonicity because the number of sampled records is zero", recordIds.size() > 0); - if (numKeys == 1) { + LOGV2(779009, + "Finished scanning the supporting index. Start calculating correlation coefficient for " + "the record ids in the supporting index", + logAttrs(collection->ns()), + "analyzeShardKeyId"_attr = analyzeShardKeyId, + "shardKey"_attr = shardKey, + "indexKey"_attr = indexKeyPattern, + "numRecordIds"_attr = recordIds.size()); + + if (!scannedMultipleShardKeys) { metrics.setType(MonotonicityTypeEnum::kNotMonotonic); metrics.setRecordIdCorrelationCoefficient(0); return metrics; @@ -538,11 +864,9 @@ MonotonicityMetrics calculateMonotonicity(OperationContext* opCtx, }()); auto coefficientThreshold = gMonotonicityCorrelationCoefficientThreshold.load(); LOGV2(6875302, - "Calculated monotonicity", + "Finished calculating correlation coefficient for the record ids in the supporting index", logAttrs(collection->ns()), - "shardKey"_attr = shardKey, - "indexKey"_attr = indexKeyPattern, - "numRecords"_attr = recordIds.size(), + "analyzeShardKeyId"_attr = analyzeShardKeyId, "coefficient"_attr = metrics.getRecordIdCorrelationCoefficient(), "coefficientThreshold"_attr = coefficientThreshold); @@ -564,6 +888,8 @@ struct CollStatsMetrics { * document size in bytes and the number of orphan documents if the collection is sharded. */ CollStatsMetrics calculateCollStats(OperationContext* opCtx, const NamespaceString& nss) { + analyzeShardKeyPauseBeforeCalculatingCollStatsMetrics.pauseWhileSet(opCtx); + CollStatsMetrics metrics; std::vector pipeline; @@ -592,22 +918,24 @@ CollStatsMetrics calculateCollStats(OperationContext* opCtx, const NamespaceStri runAggregate(opCtx, aggRequest, [&](const BSONObj& doc) { metrics.numDocs = doc.getField(kNumDocsFieldName).exactNumberLong(); - if (metrics.numDocs == 0) { - LOGV2_WARNING( - 7477403, - "The number of documents returned by $collStats indicates that the collection is " - "empty. This is likely caused by an unclean shutdown that resulted in an " - "inaccurate fast count or by deletions that have occurred since the command " - "started."); - metrics.avgDocSizeBytes = 0; - if (isShardedCollection) { - metrics.numOrphanDocs = 0; - } - return; - } + uassert(7826501, + str::stream() << "The number of documents returned by $collStats indicates " + "that the collection is empty. This is likely caused by an " + "unclean shutdown that resulted in an inaccurate fast count or " + "by deletions that have occurred since the command started. " + << doc, + metrics.numDocs > 0); metrics.avgDocSizeBytes = doc.getField(kNumBytesFieldName).exactNumberLong() / metrics.numDocs; + uassert(7826502, + str::stream() << "The average document size calculated from metrics returned " + "by $collStats is zero. This is likely caused by an unclean " + "shutdown that resulted in an inaccurate fast count or by " + "deletions that have occurred since the command started. " + << doc, + metrics.avgDocSizeBytes > 0); + if (isShardedCollection) { metrics.numOrphanDocs = doc.getField(kNumOrphanDocsFieldName).exactNumberLong(); } @@ -624,6 +952,7 @@ CollStatsMetrics calculateCollStats(OperationContext* opCtx, const NamespaceStri * latter corresponds to the 'operationTime' in the response for the last insert command. */ std::pair generateSplitPoints(OperationContext* opCtx, + const UUID& analyzeShardKeyId, const NamespaceString& nss, const UUID& collUuid, const KeyPattern& shardKey) { @@ -632,17 +961,16 @@ std::pair generateSplitPoints(OperationContext* opCtx, str::stream() << "Cannot analyze a shard key for a non-existing collection", origCollUuid); // Perform best-effort validation that the collection has not been dropped and recreated. - uassert(CollectionUUIDMismatchInfo(nss.db(), collUuid, nss.coll().toString(), boost::none), + uassert(CollectionUUIDMismatchInfo(nss.dbName(), collUuid, nss.coll().toString(), boost::none), str::stream() << "Found that the collection UUID has changed from " << collUuid << " to " << origCollUuid << " since the command started", origCollUuid == collUuid); - auto commandId = UUID::gen(); LOGV2(7559400, "Generating split points using the shard key being analyzed", logAttrs(nss), - "shardKey"_attr = shardKey, - "commandId"_attr = commandId); + "analyzeShardKeyId"_attr = analyzeShardKeyId, + "shardKey"_attr = shardKey); auto tempCollUuid = UUID::gen(); auto shardKeyPattern = ShardKeyPattern(shardKey); @@ -650,7 +978,8 @@ std::pair generateSplitPoints(OperationContext* opCtx, nss, shardKeyPattern, gNumShardKeyRanges.load(), - boost::none, + boost::none /*zones*/, + boost::none /*availableShardIds*/, gNumSamplesPerShardKeyRange.load()); const SplitPolicyParams splitParams{tempCollUuid, ShardingState::get(opCtx)->shardId()}; auto splitPoints = [&] { @@ -710,7 +1039,7 @@ std::pair generateSplitPoints(OperationContext* opCtx, splitPointsToInsert.clear(); } AnalyzeShardKeySplitPointDocument doc; - doc.setId({commandId, UUID::gen() /* splitPointId */}); + doc.setId({analyzeShardKeyId, UUID::gen() /* splitPointId */}); doc.setNs(nss); doc.setSplitPoint(splitPoint); doc.setExpireAt(expireAt); @@ -728,17 +1057,27 @@ std::pair generateSplitPoints(OperationContext* opCtx, invariant(!splitPointsAfterClusterTime.isNull()); auto splitPointsFilter = BSON((AnalyzeShardKeySplitPointDocument::kIdFieldName + "." + - AnalyzeShardKeySplitPointId::kCommandIdFieldName) - << commandId); + AnalyzeShardKeySplitPointId::kAnalyzeShardKeyIdFieldName) + << analyzeShardKeyId); return {std::move(splitPointsFilter), splitPointsAfterClusterTime}; } } // namespace -KeyCharacteristicsMetrics calculateKeyCharacteristicsMetrics(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUuid, - const KeyPattern& shardKey) { +boost::optional calculateKeyCharacteristicsMetrics( + OperationContext* opCtx, + const UUID& analyzeShardKeyId, + const NamespaceString& nss, + const UUID& collUuid, + const KeyPattern& shardKey, + boost::optional sampleRate, + boost::optional sampleSize) { + invariant(!sampleRate || !sampleSize, "Cannot specify both 'sampleRate' and 'sampleSize'"); + // If both 'sampleRate' and 'sampleSize' are not specified, set 'sampleSize' to the default. + if (!sampleRate && !sampleSize) { + sampleSize = gKeyCharacteristicsDefaultSampleSize.load(); + } + KeyCharacteristicsMetrics metrics; auto shardKeyBson = shardKey.toBSON(); @@ -750,10 +1089,11 @@ KeyCharacteristicsMetrics calculateKeyCharacteristicsMetrics(OperationContext* o str::stream() << "Cannot analyze a shard key for a non-existing collection", collection); // Perform best-effort validation that the collection has not been dropped and recreated. - uassert(CollectionUUIDMismatchInfo(nss.db(), collUuid, nss.coll().toString(), boost::none), - str::stream() << "Found that the collection UUID has changed from " << collUuid - << " to " << collection->uuid() << " since the command started", - collection->uuid() == collUuid); + uassert( + CollectionUUIDMismatchInfo(nss.dbName(), collUuid, nss.coll().toString(), boost::none), + str::stream() << "Found that the collection UUID has changed from " << collUuid + << " to " << collection->uuid() << " since the command started", + collection->uuid() == collUuid); // Performs best-effort validation that the shard key does not contain an array field by // extracting the shard key value from a random document in the collection and asserting @@ -781,52 +1121,103 @@ KeyCharacteristicsMetrics calculateKeyCharacteristicsMetrics(OperationContext* o opCtx, *collection, collection->getIndexCatalog(), shardKeyBson); if (!indexSpec) { - return {}; + return boost::none; } indexKeyBson = indexSpec->keyPattern.getOwned(); LOGV2(6915305, - "Calculating metrics about the characteristics of the shard key", + "Start calculating metrics about the characteristics of the shard key", logAttrs(nss), + "analyzeShardKeyId"_attr = analyzeShardKeyId, "shardKey"_attr = shardKeyBson, - "indexKey"_attr = indexKeyBson); + "indexKey"_attr = indexKeyBson, + "sampleRate"_attr = sampleRate, + "sampleSize"_attr = sampleSize); analyzeShardKeyPauseBeforeCalculatingKeyCharacteristicsMetrics.pauseWhileSet(opCtx); metrics.setIsUnique(shardKeyBson.nFields() == indexKeyBson.nFields() ? indexSpec->isUnique : false); - auto monotonicityMetrics = calculateMonotonicity(opCtx, *collection, shardKeyBson); + LOGV2(7790001, + "Start calculating metrics about the monotonicity of the shard key", + logAttrs(nss), + "analyzeShardKeyId"_attr = analyzeShardKeyId); + auto monotonicityMetrics = calculateMonotonicity( + opCtx, analyzeShardKeyId, *collection, shardKeyBson, sampleRate, sampleSize); + LOGV2(7790002, + "Finished calculating metrics about the monotonicity of the shard key", + logAttrs(nss), + "analyzeShardKeyId"_attr = analyzeShardKeyId); + metrics.setMonotonicity(monotonicityMetrics); } + LOGV2(7790003, + "Start calculating metrics about the collection", + logAttrs(nss), + "analyzeShardKeyId"_attr = analyzeShardKeyId); auto collStatsMetrics = calculateCollStats(opCtx, nss); - auto cardinalityFrequencyMetrics = *metrics.getIsUnique() - ? calculateCardinalityAndFrequencyUnique(opCtx, nss, shardKeyBson, collStatsMetrics.numDocs) - : calculateCardinalityAndFrequencyGeneric(opCtx, nss, shardKeyBson, indexKeyBson); + LOGV2(7790004, + "Finished calculating metrics about the collection", + logAttrs(nss), + "analyzeShardKeyId"_attr = analyzeShardKeyId); + + LOGV2(7790005, + "Start calculating metrics about the cardinality and frequency of the shard key", + logAttrs(nss), + "analyzeShardKeyId"_attr = analyzeShardKeyId); + auto cardinalityFrequencyMetrics = calculateCardinalityAndFrequency(opCtx, + analyzeShardKeyId, + nss, + shardKeyBson, + indexKeyBson, + metrics.getIsUnique(), + collStatsMetrics.numDocs, + sampleRate, + sampleSize); + LOGV2(7790006, + "Finished calculating metrics about the cardinality and frequency of the shard key", + logAttrs(nss), + "analyzeShardKeyId"_attr = analyzeShardKeyId); + + // Use a tassert here since the IllegalOperation error should have been thrown while + // calculating about the cardinality and frequency of the shard key. + tassert(ErrorCodes::IllegalOperation, + "Cannot analyze the characteristics of a shard key for an empty collection", + cardinalityFrequencyMetrics.numDocsSampled > 0); + + metrics.setNumDocsTotal(cardinalityFrequencyMetrics.numDocsTotal); + if (collStatsMetrics.numOrphanDocs) { + metrics.setNumOrphanDocs(collStatsMetrics.numOrphanDocs); + metrics.setNote(StringData(kOrphanDocsWarningMessage)); + } + metrics.setAvgDocSizeBytes(collStatsMetrics.avgDocSizeBytes); - metrics.setNumDocs(cardinalityFrequencyMetrics.numDocs); + metrics.setNumDocsSampled(cardinalityFrequencyMetrics.numDocsSampled); metrics.setNumDistinctValues(cardinalityFrequencyMetrics.numDistinctValues); metrics.setMostCommonValues(cardinalityFrequencyMetrics.mostCommonValues); - // The average document size returned by $collStats can be inaccurate (or even zero) if there - // has been an unclean shutdown since that can result in inaccurate fast data statistics. To - // avoid nonsensical metrics, if the collection is not empty, specify the lower limit for the - // average document size to the size of an empty document. - metrics.setAvgDocSizeBytes(cardinalityFrequencyMetrics.numDocs > 0 - ? std::max(kEmptyDocSizeBytes, collStatsMetrics.avgDocSizeBytes) - : 0); - metrics.setNumOrphanDocs(collStatsMetrics.numOrphanDocs); + validateKeyCharacteristicsMetrics(metrics); + + LOGV2(7790007, + "Finished calculating metrics about the characteristics of the shard key", + logAttrs(nss), + "analyzeShardKeyId"_attr = analyzeShardKeyId, + "shardKey"_attr = shardKeyBson, + "indexKey"_attr = indexKeyBson); return metrics; } std::pair calculateReadWriteDistributionMetrics( OperationContext* opCtx, + const UUID& analyzeShardKeyId, const NamespaceString& nss, const UUID& collUuid, const KeyPattern& shardKey) { LOGV2(6915306, - "Calculating metrics about the read and write distribution", + "Start calculating metrics about the read and write distribution", logAttrs(nss), + "analyzeShardKeyId"_attr = analyzeShardKeyId, "shardKey"_attr = shardKey); analyzeShardKeyPauseBeforeCalculatingReadWriteDistributionMetrics.pauseWhileSet(opCtx); @@ -834,7 +1225,7 @@ std::pair calculateReadWriteD WriteDistributionMetrics writeDistributionMetrics; auto [splitPointsFilter, splitPointsAfterClusterTime] = - generateSplitPoints(opCtx, nss, collUuid, shardKey); + generateSplitPoints(opCtx, analyzeShardKeyId, nss, collUuid, shardKey); std::vector pipeline; DocumentSourceAnalyzeShardKeyReadWriteDistributionSpec spec( @@ -865,6 +1256,12 @@ std::pair calculateReadWriteD writeDistributionMetrics.setNumSingleWritesWithoutShardKey(boost::none); writeDistributionMetrics.setNumMultiWritesWithoutShardKey(boost::none); + LOGV2(7790008, + "Finished calculating metrics about the read and write distribution", + logAttrs(nss), + "analyzeShardKeyId"_attr = analyzeShardKeyId, + "shardKey"_attr = shardKey); + return std::make_pair(readDistributionMetrics, writeDistributionMetrics); } diff --git a/src/mongo/db/s/analyze_shard_key_cmd_util.h b/src/mongo/db/s/analyze_shard_key_cmd_util.h index c9d36e6dd57ca..4473fa7d42c8d 100644 --- a/src/mongo/db/s/analyze_shard_key_cmd_util.h +++ b/src/mongo/db/s/analyze_shard_key_cmd_util.h @@ -29,27 +29,39 @@ #pragma once -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/platform/basic.h" #include "mongo/s/analyze_shard_key_cmd_gen.h" +#include "mongo/util/uuid.h" namespace mongo { namespace analyze_shard_key { /** * Returns metrics about the characteristics of the shard key (i.e. the cardinality, frequency - * and monotonicity). + * and monotonicity) if the shard key has a supporting index. */ -KeyCharacteristicsMetrics calculateKeyCharacteristicsMetrics(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUuid, - const KeyPattern& shardKey); +boost::optional calculateKeyCharacteristicsMetrics( + OperationContext* opCtx, + const UUID& analyzeShardKeyId, + const NamespaceString& nss, + const UUID& collUuid, + const KeyPattern& shardKey, + boost::optional sampleRate, + boost::optional sampleSize); /** * Returns metrics about the read and write distribution based on sampled queries. */ std::pair calculateReadWriteDistributionMetrics( OperationContext* opCtx, + const UUID& analyzeShardKeyId, const NamespaceString& nss, const UUID& collUuid, const KeyPattern& shardKey); diff --git a/src/mongo/db/s/analyze_shard_key_read_write_distribution.cpp b/src/mongo/db/s/analyze_shard_key_read_write_distribution.cpp index fdb9b58fbc9b0..ce81aa056ab06 100644 --- a/src/mongo/db/s/analyze_shard_key_read_write_distribution.cpp +++ b/src/mongo/db/s/analyze_shard_key_read_write_distribution.cpp @@ -29,19 +29,28 @@ #include "mongo/db/s/analyze_shard_key_read_write_distribution.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" +#include + +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" -#include "mongo/db/query/collation/collation_index_key.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/query/internal_plans.h" -#include "mongo/db/s/shard_key_index_util.h" -#include "mongo/logv2/log.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/analyze_shard_key_util.h" -#include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/collection_routing_info_targeter.h" -#include "mongo/s/grid.h" #include "mongo/s/shard_key_pattern_query_util.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/analyze_shard_key_read_write_distribution.h b/src/mongo/db/s/analyze_shard_key_read_write_distribution.h index cacdc65199949..ba66cf3147699 100644 --- a/src/mongo/db/s/analyze_shard_key_read_write_distribution.h +++ b/src/mongo/db/s/analyze_shard_key_read_write_distribution.h @@ -29,15 +29,42 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/basic.h" - #include "mongo/s/analyze_shard_key_cmd_gen.h" #include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/analyze_shard_key_documents_gen.h" #include "mongo/s/analyze_shard_key_server_parameters_gen.h" #include "mongo/s/analyze_shard_key_util.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/collection_routing_info_targeter.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/shard_key_pattern_query_util.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace analyze_shard_key { @@ -254,7 +281,7 @@ template std::vector addNumByRange(const std::vector& l, const std::vector& r) { invariant(!l.empty()); invariant(!r.empty()); - tassert( + uassert( 7559401, str::stream() << "Failed to combine the 'numByRange' metrics from two shards since one has length " diff --git a/src/mongo/db/s/analyze_shard_key_read_write_distribution_test.cpp b/src/mongo/db/s/analyze_shard_key_read_write_distribution_test.cpp index a7244804b6f64..03f5bed3eb676 100644 --- a/src/mongo/db/s/analyze_shard_key_read_write_distribution_test.cpp +++ b/src/mongo/db/s/analyze_shard_key_read_write_distribution_test.cpp @@ -29,18 +29,47 @@ #include "mongo/db/s/analyze_shard_key_read_write_distribution.h" +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/hasher.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/collation/collation_spec.h" #include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/db/shard_id.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" #include "mongo/platform/random.h" +#include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/analyze_shard_key_documents_gen.h" #include "mongo/s/analyze_shard_key_server_parameters_gen.h" #include "mongo/s/analyze_shard_key_util.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -170,7 +199,7 @@ struct ReadWriteDistributionTest : public ShardServerTestFixture { nss, collUuid, SampledCommandNameEnum::kUpdate, - cmd.toBSON(BSON("$db" << nss.db().toString())), + cmd.toBSON(BSON("$db" << nss.db_forTest().toString())), Date_t::now() + mongo::Milliseconds( analyze_shard_key::gQueryAnalysisSampleExpirationSecs.load() * 1000)}; @@ -186,7 +215,7 @@ struct ReadWriteDistributionTest : public ShardServerTestFixture { nss, collUuid, SampledCommandNameEnum::kDelete, - cmd.toBSON(BSON("$db" << nss.db().toString())), + cmd.toBSON(BSON("$db" << nss.db_forTest().toString())), Date_t::now() + mongo::Milliseconds( analyze_shard_key::gQueryAnalysisSampleExpirationSecs.load() * 1000)}; @@ -210,7 +239,7 @@ struct ReadWriteDistributionTest : public ShardServerTestFixture { nss, collUuid, SampledCommandNameEnum::kFindAndModify, - cmd.toBSON(BSON("$db" << nss.db().toString())), + cmd.toBSON(BSON("$db" << nss.db_forTest().toString())), Date_t::now() + mongo::Milliseconds( analyze_shard_key::gQueryAnalysisSampleExpirationSecs.load() * 1000)}; diff --git a/src/mongo/db/s/auto_split_vector.cpp b/src/mongo/db/s/auto_split_vector.cpp index c4acaca437661..7b01db43a29d8 100644 --- a/src/mongo/db/s/auto_split_vector.cpp +++ b/src/mongo/db/s/auto_split_vector.cpp @@ -29,17 +29,38 @@ #include "mongo/db/s/auto_split_vector.h" -#include "mongo/base/status_with.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/index/index_descriptor.h" #include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/internal_plans.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/s/shard_key_index_util.h" +#include "mongo/db/server_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -123,7 +144,7 @@ std::pair, bool> autoSplitVector(OperationContext* opCtx, AutoGetCollection collection(opCtx, nss, MODE_IS); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "namespace " << nss << " does not exists", + str::stream() << "namespace " << nss.toStringForErrorMsg() << " does not exists", collection); // Get the size estimate for this namespace diff --git a/src/mongo/db/s/auto_split_vector.h b/src/mongo/db/s/auto_split_vector.h index 9e09d029308e8..3a6b700f2aef3 100644 --- a/src/mongo/db/s/auto_split_vector.h +++ b/src/mongo/db/s/auto_split_vector.h @@ -29,8 +29,19 @@ #pragma once +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/s/auto_split_vector_command.cpp b/src/mongo/db/s/auto_split_vector_command.cpp index 2c4ea47e8adad..f980855d9bc50 100644 --- a/src/mongo/db/s/auto_split_vector_command.cpp +++ b/src/mongo/db/s/auto_split_vector_command.cpp @@ -28,12 +28,29 @@ */ +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/auto_split_vector.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/logv2/log.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/auto_split_vector_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -107,8 +124,9 @@ class AutoSplitVectorCommand final : public TypedCommand uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::splitVector)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::splitVector)); } }; } autoSplitVectorCommand; diff --git a/src/mongo/db/s/auto_split_vector_test.cpp b/src/mongo/db/s/auto_split_vector_test.cpp index ad0957e911f52..690bead11adac 100644 --- a/src/mongo/db/s/auto_split_vector_test.cpp +++ b/src/mongo/db/s/auto_split_vector_test.cpp @@ -28,16 +28,28 @@ */ +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/create_collection.h" -#include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/s/auto_split_vector.h" -#include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/shard_server_test_fixture.h" -#include "mongo/db/s/split_vector.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/random.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/s/balancer/auto_merger_policy.cpp b/src/mongo/db/s/balancer/auto_merger_policy.cpp index 9bcf25a869a01..7007206d25bdf 100644 --- a/src/mongo/db/s/balancer/auto_merger_policy.cpp +++ b/src/mongo/db/s/balancer/auto_merger_policy.cpp @@ -28,14 +28,52 @@ */ #include "mongo/db/s/balancer/auto_merger_policy.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/document_source_lookup.h" +#include "mongo/db/pipeline/document_source_match.h" +#include "mongo/db/pipeline/document_source_unwind.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/sharding_config_server_parameters_gen.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -68,8 +106,7 @@ bool AutoMergerPolicy::isEnabled() { void AutoMergerPolicy::checkInternalUpdates() { stdx::lock_guard lk(_mutex); - if (!feature_flags::gAutoMerger.isEnabled(serverGlobalParams.featureCompatibility) || - !_enabled) { + if (!_enabled) { return; } _checkInternalUpdatesWithLock(lk); @@ -83,8 +120,7 @@ boost::optional AutoMergerPolicy::getNextStreamingAction( OperationContext* opCtx) { stdx::unique_lock lk(_mutex); - if (!feature_flags::gAutoMerger.isEnabled(serverGlobalParams.featureCompatibility) || - !_enabled) { + if (!_enabled) { return boost::none; } @@ -166,6 +202,11 @@ void AutoMergerPolicy::applyActionResult(OperationContext* opCtx, if (status.code() == ErrorCodes::ConflictingOperationInProgress) { // Reschedule auto-merge for because commit overlapped with other chunk ops _rescheduledCollectionsToMergePerShard[mergeAction.shardId].push_back(mergeAction.nss); + } else if (status.code() == ErrorCodes::KeyPatternShorterThanBound || status.code() == 16634) { + LOGV2_WARNING(7805201, + "Auto-merger skipping namespace due to misconfigured zones", + "namespace"_attr = mergeAction.nss, + "error"_attr = redact(status)); } else { // Reset the history window to consider during next round because chunk merges may have // been potentially missed due to an unexpected error @@ -184,6 +225,7 @@ void AutoMergerPolicy::_init(WithLock lk) { _intervalTimer.reset(); _collectionsToMergePerShard.clear(); _firstAction = true; + _outstandingActions = 0; _onStateUpdated(); } diff --git a/src/mongo/db/s/balancer/auto_merger_policy.h b/src/mongo/db/s/balancer/auto_merger_policy.h index 24e816bc19cb7..95be9a16a5283 100644 --- a/src/mongo/db/s/balancer/auto_merger_policy.h +++ b/src/mongo/db/s/balancer/auto_merger_policy.h @@ -29,7 +29,22 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/s/balancer/actions_stream_policy.h" +#include "mongo/db/s/balancer/balancer_policy.h" +#include "mongo/db/s/balancer/cluster_statistics.h" +#include "mongo/db/shard_id.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/timer.h" namespace mongo { diff --git a/src/mongo/db/s/balancer/auto_merger_policy_test.cpp b/src/mongo/db/s/balancer/auto_merger_policy_test.cpp index 090fb6e456732..654e150eb868f 100644 --- a/src/mongo/db/s/balancer/auto_merger_policy_test.cpp +++ b/src/mongo/db/s/balancer/auto_merger_policy_test.cpp @@ -29,15 +29,45 @@ #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/balancer/auto_merger_policy.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/db/vector_clock_mutable.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/db/vector_clock.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_version.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -168,11 +198,12 @@ class AutoMergerPolicyTest : public ConfigServerTestFixture { if (!enableAutoMerge) { setBuilder.appendBool(CollectionType::kEnableAutoMergeFieldName, false); } - ASSERT_OK(updateToConfigCollection(operationContext(), - CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << nss.toString()), - BSON("$set" << setBuilder.obj()), - false /*upsert*/)); + ASSERT_OK( + updateToConfigCollection(operationContext(), + CollectionType::ConfigNS, + BSON(CollectionType::kNssFieldName << nss.toString_forTest()), + BSON("$set" << setBuilder.obj()), + false /*upsert*/)); } void assertAutomergerConsidersCollectionsWithMergeableChunks( @@ -194,8 +225,8 @@ class AutoMergerPolicyTest : public ConfigServerTestFixture { nssWithMergeableChunks.end(), expectedNss) != nssWithMergeableChunks.end(); ASSERT_EQ(true, expectedNssIsFetched) - << "expected collection " << expectedNss << " on shard " << shardId - << " wasn't fetched"; + << "expected collection " << expectedNss.toStringForErrorMsg() << " on shard " + << shardId << " wasn't fetched"; } ASSERT_EQ(expectedNamespaces.size(), nssWithMergeableChunks.size()) diff --git a/src/mongo/db/s/balancer/balance_stats.cpp b/src/mongo/db/s/balancer/balance_stats.cpp index aa644b3f929b9..1d96045edef0f 100644 --- a/src/mongo/db/s/balancer/balance_stats.cpp +++ b/src/mongo/db/s/balancer/balance_stats.cpp @@ -28,14 +28,21 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/db/s/balancer/balance_stats.h" +#include +#include +#include "mongo/db/s/balancer/balance_stats.h" #include "mongo/db/s/balancer/balancer_policy.h" -#include "mongo/logv2/log.h" +#include "mongo/db/shard_id.h" #include "mongo/s/chunk_manager.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/balancer/balance_stats.h b/src/mongo/db/s/balancer/balance_stats.h index 0a36a65ddc500..a8d1b76f7b4ba 100644 --- a/src/mongo/db/s/balancer/balance_stats.h +++ b/src/mongo/db/s/balancer/balance_stats.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include #include "mongo/s/catalog/type_shard.h" diff --git a/src/mongo/db/s/balancer/balance_stats_test.cpp b/src/mongo/db/s/balancer/balance_stats_test.cpp index c8b6addb8703b..501fae310d677 100644 --- a/src/mongo/db/s/balancer/balance_stats_test.cpp +++ b/src/mongo/db/s/balancer/balance_stats_test.cpp @@ -27,12 +27,36 @@ * it in the license file. */ +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/s/balancer/balance_stats.h" #include "mongo/db/s/balancer/balancer_policy.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/chunk_manager.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/balancer/balancer.cpp b/src/mongo/db/s/balancer/balancer.cpp index 7bdedd64a87dd..b82b3121ed517 100644 --- a/src/mongo/db/s/balancer/balancer.cpp +++ b/src/mongo/db/s/balancer/balancer.cpp @@ -30,40 +30,88 @@ #include "mongo/db/s/balancer/balancer.h" +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" #include +#include +#include +#include #include +#include +#include #include +#include +#include +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/read_preference.h" -#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/client.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/s/balancer/actions_stream_policy.h" #include "mongo/db/s/balancer/auto_merger_policy.h" -#include "mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h" +#include "mongo/db/s/balancer/balancer_commands_scheduler.h" #include "mongo/db/s/balancer/balancer_commands_scheduler_impl.h" -#include "mongo/db/s/balancer/balancer_defragmentation_policy_impl.h" +#include "mongo/db/s/balancer/balancer_defragmentation_policy.h" #include "mongo/db/s/balancer/cluster_statistics_impl.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/sharding_config_server_parameters_gen.h" #include "mongo/db/s/sharding_logging.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/write_concern_options.h" #include "mongo/executor/scoped_task_executor.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/random.h" #include "mongo/s/balancer_configuration.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/balancer_collection_status_gen.h" -#include "mongo/s/request_types/configure_collection_balancing_gen.h" +#include "mongo/s/request_types/migration_secondary_throttle_options.h" #include "mongo/s/shard_util.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/idle_thread_block.h" -#include "mongo/util/exit.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/pcre.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" #include "mongo/util/timer.h" #include "mongo/util/version.h" @@ -293,12 +341,10 @@ Balancer* Balancer::get(OperationContext* operationContext) { Balancer::Balancer() : _balancedLastTime(0), - _random(std::random_device{}()), - _clusterStats(std::make_unique(_random)), - _chunkSelectionPolicy( - std::make_unique(_clusterStats.get(), _random)), + _clusterStats(std::make_unique()), + _chunkSelectionPolicy(std::make_unique(_clusterStats.get())), _commandScheduler(std::make_unique()), - _defragmentationPolicy(std::make_unique( + _defragmentationPolicy(std::make_unique( _clusterStats.get(), [this]() { _onActionsStreamPolicyStateUpdate(); })), _autoMergerPolicy( std::make_unique([this]() { _onActionsStreamPolicyStateUpdate(); })), @@ -435,22 +481,45 @@ void Balancer::report(OperationContext* opCtx, BSONObjBuilder* builder) { } void Balancer::_consumeActionStreamLoop() { - ScopeGuard onExitCleanup([this] { - _defragmentationPolicy->interruptAllDefragmentations(); - _autoMergerPolicy->disable(); - }); - Client::initThread("BalancerSecondary"); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + auto opCtx = cc().makeOperationContext(); executor::ScopedTaskExecutor executor( Grid::get(opCtx.get())->getExecutorPool()->getFixedExecutor()); + ScopeGuard onExitCleanup([this, &executor] { + _defragmentationPolicy->interruptAllDefragmentations(); + _autoMergerPolicy->disable(); + // Explicitly cancel and drain any outstanding streaming action already dispatched to the + // task executor. + executor->shutdown(); + executor->join(); + // When shutting down, the task executor may or may not invoke the + // applyActionResponseTo()callback for canceled streaming actions: to ensure a consistent + // state of the balancer after a step down, _outstandingStreamingOps needs then to be reset + // to 0 once all the tasks have been drained. + _outstandingStreamingOps.store(0); + }); + // Lambda function for applying action response auto applyActionResponseTo = [this](const BalancerStreamAction& action, const BalancerStreamActionResponse& response, ActionsStreamPolicy* policy) { invariant(_outstandingStreamingOps.addAndFetch(-1) >= 0); ThreadClient tc("BalancerSecondaryThread::applyActionResponse", getGlobalServiceContext()); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + auto opCtx = tc->makeOperationContext(); policy->applyActionResult(opCtx.get(), action, response); }; @@ -521,7 +590,8 @@ void Balancer::_consumeActionStreamLoop() { // Get next action from a random stream together with its stream auto [nextAction, sourcedStream] = [&]() -> std::tuple, ActionsStreamPolicy*> { - std::shuffle(activeStreams.begin(), activeStreams.end(), _random); + auto client = opCtx->getClient(); + std::shuffle(activeStreams.begin(), activeStreams.end(), client->getPrng().urbg()); for (auto stream : activeStreams) { try { auto action = stream->getNextStreamingAction(opCtx.get()); @@ -626,6 +696,13 @@ void Balancer::_mainThread() { }); Client::initThread("Balancer"); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + auto opCtx = cc().makeOperationContext(); auto shardingContext = Grid::get(opCtx.get()); diff --git a/src/mongo/db/s/balancer/balancer.h b/src/mongo/db/s/balancer/balancer.h index 16fe7cc76f352..80f4d71f79222 100644 --- a/src/mongo/db/s/balancer/balancer.h +++ b/src/mongo/db/s/balancer/balancer.h @@ -29,20 +29,34 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replica_set_aware_service.h" #include "mongo/db/s/balancer/auto_merger_policy.h" #include "mongo/db/s/balancer/balancer_chunk_selection_policy.h" -#include "mongo/db/s/balancer/balancer_random.h" +#include "mongo/db/s/balancer/balancer_policy.h" +#include "mongo/db/s/balancer/cluster_statistics.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/s/request_types/balancer_collection_status_gen.h" #include "mongo/s/request_types/move_range_request_gen.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/duration.h" namespace mongo { class ChunkType; class ClusterStatistics; + class BalancerCommandsScheduler; class BalancerDefragmentationPolicy; class MigrationSecondaryThrottleOptions; @@ -266,9 +280,6 @@ class Balancer : public ReplicaSetAwareServiceConfigSvr { // Number of moved chunks in last round int _balancedLastTime; - // Source of randomness when metadata needs to be randomized. - BalancerRandomSource _random; - // Source for cluster statistics. Depends on the source of randomness above so it should be // created after it and destroyed before it. std::unique_ptr _clusterStats; diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.cpp index e7f5eb8f782e6..bc6f8c228d231 100644 --- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.cpp +++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.cpp @@ -27,16 +27,822 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include -#include "mongo/db/s/balancer/balancer_chunk_selection_policy.h" +#include +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobj_comparator_interface.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/s/balancer/balancer_chunk_selection_policy.h" +#include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/s/sharding_config_server_parameters_gen.h" +#include "mongo/db/s/sharding_util.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/bits.h" +#include "mongo/platform/random.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/balancer_configuration.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/grid.h" +#include "mongo/s/request_types/get_stats_for_balancing_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/str.h" +#include "mongo/util/timer.h" +#include "mongo/util/uuid.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding + +MONGO_FAIL_POINT_DEFINE(overrideStatsForBalancingBatchSize); namespace mongo { -BalancerChunkSelectionPolicy::BalancerChunkSelectionPolicy() = default; +namespace { + +/** + * Does a linear pass over the information cached in the specified chunk manager and extracts chunk + * distribution and chunk placement information which is needed by the balancer policy. + */ +StatusWith createCollectionDistributionStatus( + OperationContext* opCtx, + const NamespaceString& nss, + const ShardStatisticsVector& allShards, + const ChunkManager& chunkMgr) { + ShardToChunksMap shardToChunksMap; + + // Makes sure there is an entry in shardToChunksMap for every shard, so empty shards will also + // be accounted for + for (const auto& stat : allShards) { + shardToChunksMap[stat.shardId]; + } + + chunkMgr.forEachChunk([&](const auto& chunkEntry) { + ChunkType chunk; + chunk.setCollectionUUID(chunkMgr.getUUID()); + chunk.setMin(chunkEntry.getMin()); + chunk.setMax(chunkEntry.getMax()); + chunk.setJumbo(chunkEntry.isJumbo()); + chunk.setShard(chunkEntry.getShardId()); + chunk.setVersion(chunkEntry.getLastmod()); + + shardToChunksMap[chunkEntry.getShardId()].push_back(chunk); + + return true; + }); + + const auto& keyPattern = chunkMgr.getShardKeyPattern().getKeyPattern(); + + // Cache the collection zones + auto swZoneInfo = ZoneInfo::getZonesForCollection(opCtx, nss, keyPattern); + if (!swZoneInfo.isOK()) { + return swZoneInfo.getStatus(); + } + + DistributionStatus distribution(nss, std::move(shardToChunksMap), swZoneInfo.getValue()); + + return {std::move(distribution)}; +} + +stdx::unordered_map +getDataSizeInfoForCollections(OperationContext* opCtx, + const std::vector& collections) { + const auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration(); + uassertStatusOK(balancerConfig->refreshAndCheck(opCtx)); + + const auto shardRegistry = Grid::get(opCtx)->shardRegistry(); + const auto shardIds = shardRegistry->getAllShardIds(opCtx); + + // Map to be returned, incrementally populated with the collected statistics + stdx::unordered_map dataSizeInfoMap; + + std::vector namespacesWithUUIDsForStatsRequest; + for (const auto& coll : collections) { + const auto& nss = coll.getNss(); + const auto maxChunkSizeBytes = + coll.getMaxChunkSizeBytes().value_or(balancerConfig->getMaxChunkSizeBytes()); + + dataSizeInfoMap.emplace( + nss, + CollectionDataSizeInfoForBalancing(std::map(), maxChunkSizeBytes)); + + NamespaceWithOptionalUUID nssWithUUID(nss); + nssWithUUID.setUUID(coll.getUuid()); + namespacesWithUUIDsForStatsRequest.push_back(nssWithUUID); + } + + ShardsvrGetStatsForBalancing req{namespacesWithUUIDsForStatsRequest}; + req.setScaleFactor(1); + const auto reqObj = req.toBSON({}); + + const auto executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); + const auto responsesFromShards = + sharding_util::sendCommandToShards(opCtx, + DatabaseName::kAdmin.toString(), + reqObj, + shardIds, + executor, + false /* throwOnError */); + + for (auto&& response : responsesFromShards) { + try { + const auto& shardId = response.shardId; + auto errorContext = + "Failed to get stats for balancing from shard '{}'"_format(shardId.toString()); + const auto responseValue = + uassertStatusOKWithContext(std::move(response.swResponse), errorContext); + + const ShardsvrGetStatsForBalancingReply reply = + ShardsvrGetStatsForBalancingReply::parse( + IDLParserContext("ShardsvrGetStatsForBalancingReply"), + std::move(responseValue.data)); + const auto collStatsFromShard = reply.getStats(); + + invariant(collStatsFromShard.size() == collections.size()); + for (const auto& stats : collStatsFromShard) { + invariant(dataSizeInfoMap.contains(stats.getNs())); + dataSizeInfoMap.at(stats.getNs()).shardToDataSizeMap[shardId] = stats.getCollSize(); + } + } catch (const ExceptionFor& ex) { + // Handle `removeShard`: skip shards removed during a balancing round + LOGV2_DEBUG(6581603, + 1, + "Skipping shard for the current balancing round", + "error"_attr = redact(ex)); + } + } + + return dataSizeInfoMap; +} + +CollectionDataSizeInfoForBalancing getDataSizeInfoForCollection(OperationContext* opCtx, + const NamespaceString& nss) { + const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); + const auto coll = catalogClient->getCollection(opCtx, nss); + std::vector vec{coll}; + return std::move(getDataSizeInfoForCollections(opCtx, vec).at(nss)); +} + +/** + * Helper class used to accumulate the split points for the same chunk together so they can be + * submitted to the shard as a single call versus multiple. This is necessary in order to avoid + * refreshing the chunk metadata after every single split point (if done one by one), because + * splitting a chunk does not yield the same chunk anymore. + */ +class SplitCandidatesBuffer { + SplitCandidatesBuffer(const SplitCandidatesBuffer&) = delete; + SplitCandidatesBuffer& operator=(const SplitCandidatesBuffer&) = delete; + +public: + SplitCandidatesBuffer(NamespaceString nss, ChunkVersion collectionPlacementVersion) + : _nss(std::move(nss)), + _collectionPlacementVersion(collectionPlacementVersion), + _chunkSplitPoints(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap()) { + } + + /** + * Adds the specified split point to the chunk. The split points must always be within the + * boundaries of the chunk and must come in increasing order. + */ + void addSplitPoint(const Chunk& chunk, const BSONObj& splitPoint) { + auto it = _chunkSplitPoints.find(chunk.getMin()); + if (it == _chunkSplitPoints.end()) { + _chunkSplitPoints.emplace(chunk.getMin(), + SplitInfo(chunk.getShardId(), + _nss, + _collectionPlacementVersion, + chunk.getLastmod(), + chunk.getMin(), + chunk.getMax(), + {splitPoint})); + } else if (splitPoint.woCompare(it->second.splitKeys.back()) > 0) { + it->second.splitKeys.push_back(splitPoint); + } else { + // Split points must come in order + invariant(splitPoint.woCompare(it->second.splitKeys.back()) == 0); + } + } + + /** + * May be called only once for the lifetime of the buffer. Moves the contents of the buffer into + * a vector of split infos to be passed to the split call. + */ + SplitInfoVector done() { + SplitInfoVector splitPoints; + for (const auto& entry : _chunkSplitPoints) { + splitPoints.push_back(std::move(entry.second)); + } + + return splitPoints; + } + +private: + // Namespace and expected collection placement version + const NamespaceString _nss; + const ChunkVersion _collectionPlacementVersion; + + // Chunk min key and split vector associated with that chunk + BSONObjIndexedMap _chunkSplitPoints; +}; + +/** + * Populates splitCandidates with chunk and splitPoint pairs for chunks that violate zone + * range boundaries. + */ +void getSplitCandidatesToEnforceZoneRanges(const ChunkManager& cm, + const DistributionStatus& distribution, + SplitCandidatesBuffer* splitCandidates) { + const auto& globalMax = cm.getShardKeyPattern().getKeyPattern().globalMax(); + + // For each zone range, find chunks that need to be split. + for (const auto& zoneRangeEntry : distribution.zoneRanges()) { + const auto& zoneRange = zoneRangeEntry.second; + + const auto chunkAtZoneMin = cm.findIntersectingChunkWithSimpleCollation(zoneRange.min); + invariant(chunkAtZoneMin.getMax().woCompare(zoneRange.min) > 0); + + if (chunkAtZoneMin.getMin().woCompare(zoneRange.min)) { + splitCandidates->addSplitPoint(chunkAtZoneMin, zoneRange.min); + } + + // The global max key can never fall in the middle of a chunk. + if (!zoneRange.max.woCompare(globalMax)) + continue; + + const auto chunkAtZoneMax = cm.findIntersectingChunkWithSimpleCollation(zoneRange.max); + + // We need to check that both the chunk's minKey does not match the zone's max and also that + // the max is not equal, which would only happen in the case of the zone ending in MaxKey. + if (chunkAtZoneMax.getMin().woCompare(zoneRange.max) && + chunkAtZoneMax.getMax().woCompare(zoneRange.max)) { + splitCandidates->addSplitPoint(chunkAtZoneMax, zoneRange.max); + } + } +} + +/** + * If the number of chunks as given by the ChunkManager is less than the configured minimum + * number of chunks for the sessions collection (minNumChunksForSessionsCollection), calculates + * split points that evenly partition the key space into N ranges (where N is + * minNumChunksForSessionsCollection rounded up to the next power of 2), and populates + * splitCandidates with chunk and splitPoint pairs for chunks that need to split. + */ +void getSplitCandidatesForSessionsCollection(OperationContext* opCtx, + const ChunkManager& cm, + SplitCandidatesBuffer* splitCandidates) { + const auto minNumChunks = minNumChunksForSessionsCollection.load(); + + if (cm.numChunks() >= minNumChunks) { + return; + } + + // Use the next power of 2 as the target number of chunks. + const size_t numBits = 64 - countLeadingZeros64(minNumChunks - 1); + const uint32_t numChunks = 1 << numBits; + + // Compute split points for _id.id that partition the UUID 128-bit data space into numChunks + // equal ranges. Since the numChunks is a power of 2, the split points are the permutations + // of the prefix numBits right-padded with 0's. + std::vector splitPoints; + for (uint32_t i = 1; i < numChunks; i++) { + // Start with a buffer of 0's. + std::array buf{0b0}; + + // Left-shift i to fill the remaining bits in the prefix 32 bits with 0's. + const uint32_t high = i << (CHAR_BIT * 4 - numBits); + + // Fill the prefix 4 bytes with high's bytes. + buf[0] = static_cast(high >> CHAR_BIT * 3); + buf[1] = static_cast(high >> CHAR_BIT * 2); + buf[2] = static_cast(high >> CHAR_BIT * 1); + buf[3] = static_cast(high); + + ConstDataRange cdr(buf.data(), sizeof(buf)); + splitPoints.push_back(BSON("_id" << BSON("id" << UUID::fromCDR(cdr)))); + } + + // For each split point, find a chunk that needs to be split. + for (auto& splitPoint : splitPoints) { + const auto chunkAtSplitPoint = cm.findIntersectingChunkWithSimpleCollation(splitPoint); + invariant(chunkAtSplitPoint.getMax().woCompare(splitPoint) > 0); + + if (chunkAtSplitPoint.getMin().woCompare(splitPoint)) { + splitCandidates->addSplitPoint(chunkAtSplitPoint, splitPoint); + } + } + + return; +} + +} // namespace + +BalancerChunkSelectionPolicy::BalancerChunkSelectionPolicy(ClusterStatistics* clusterStats) + : _clusterStats(clusterStats) {} BalancerChunkSelectionPolicy::~BalancerChunkSelectionPolicy() = default; +StatusWith BalancerChunkSelectionPolicy::selectChunksToSplit( + OperationContext* opCtx) { + auto shardStatsStatus = _clusterStats->getStats(opCtx); + if (!shardStatsStatus.isOK()) { + return shardStatsStatus.getStatus(); + } + + const auto& shardStats = shardStatsStatus.getValue(); + + const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); + auto collections = catalogClient->getCollections(opCtx, {}); + if (collections.empty()) { + return SplitInfoVector{}; + } + + SplitInfoVector splitCandidates; + + auto client = opCtx->getClient(); + std::shuffle(collections.begin(), collections.end(), client->getPrng().urbg()); + + for (const auto& coll : collections) { + const NamespaceString& nss(coll.getNss()); + + auto candidatesStatus = _getSplitCandidatesForCollection(opCtx, nss, shardStats); + if (candidatesStatus == ErrorCodes::NamespaceNotFound) { + // Namespace got dropped before we managed to get to it, so just skip it + continue; + } else if (!candidatesStatus.isOK()) { + if (nss == NamespaceString::kLogicalSessionsNamespace) { + LOGV2_WARNING(4562402, + "Unable to split sessions collection chunks", + "error"_attr = candidatesStatus.getStatus()); + + } else { + LOGV2_WARNING( + 21852, + "Unable to enforce zone range policy for collection {namespace}: {error}", + "Unable to enforce zone range policy for collection", + logAttrs(nss), + "error"_attr = candidatesStatus.getStatus()); + } + + continue; + } + + splitCandidates.insert(splitCandidates.end(), + std::make_move_iterator(candidatesStatus.getValue().begin()), + std::make_move_iterator(candidatesStatus.getValue().end())); + } + + return splitCandidates; +} + +StatusWith BalancerChunkSelectionPolicy::selectChunksToSplit( + OperationContext* opCtx, const NamespaceString& nss) { + + auto shardStatsStatus = _clusterStats->getStats(opCtx); + if (!shardStatsStatus.isOK()) { + return shardStatsStatus.getStatus(); + } + + const auto& shardStats = shardStatsStatus.getValue(); + + return _getSplitCandidatesForCollection(opCtx, nss, shardStats); +} + +StatusWith BalancerChunkSelectionPolicy::selectChunksToMove( + OperationContext* opCtx, + const std::vector& shardStats, + stdx::unordered_set* availableShards, + stdx::unordered_set* imbalancedCollectionsCachePtr) { + invariant(availableShards); + invariant(imbalancedCollectionsCachePtr); + + if (availableShards->size() < 2) { + return MigrateInfoVector{}; + } + + Timer chunksSelectionTimer; + + const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); + auto collections = catalogClient->getCollections(opCtx, + {}, + repl::ReadConcernLevel::kMajorityReadConcern, + BSON(CollectionType::kNssFieldName << 1)); + if (collections.empty()) { + return MigrateInfoVector{}; + } + + MigrateInfoVector candidateChunks; + + const uint32_t kStatsForBalancingBatchSize = [&]() { + auto batchSize = 100U; + overrideStatsForBalancingBatchSize.execute([&batchSize](const BSONObj& data) { + batchSize = data["size"].numberInt(); + LOGV2(7617200, "Overriding collections batch size", "size"_attr = batchSize); + }); + return batchSize; + }(); + + const uint32_t kMaxCachedCollectionsSize = 0.75 * kStatsForBalancingBatchSize; + + // Lambda function used to get a CollectionType leveraging the `collections` vector + // The `collections` vector must be sorted by nss when it is called + auto getCollectionTypeByNss = [&collections](const NamespaceString& nss) + -> std::pair, std::vector::iterator> { + // Using a lower_bound to perform a binary search on the `collections` vector + const auto collIt = + std::lower_bound(collections.begin(), + collections.end(), + nss, + [](const CollectionType& coll, const NamespaceString& ns) { + return coll.getNss() < ns; + }); + + if (collIt == collections.end() || collIt->getNss() != nss) { + return std::make_pair(boost::none, collections.end()); + } + return std::make_pair(*collIt, collIt); + }; + + // Lambda function to check if a collection is explicitly disabled for balancing + const auto canBalanceCollection = [](const CollectionType& coll) -> bool { + if (!coll.getAllowBalance() || !coll.getAllowMigrations() || !coll.getPermitMigrations() || + coll.getDefragmentCollection()) { + LOGV2_DEBUG(5966401, + 1, + "Not balancing explicitly disabled collection", + logAttrs(coll.getNss()), + "allowBalance"_attr = coll.getAllowBalance(), + "allowMigrations"_attr = coll.getAllowMigrations(), + "permitMigrations"_attr = coll.getPermitMigrations(), + "defragmentCollection"_attr = coll.getDefragmentCollection()); + return false; + } + return true; + }; + + // Lambda function to select migrate candidates from a batch of collections + const auto processBatch = [&](std::vector& collBatch) { + const auto collsDataSizeInfo = getDataSizeInfoForCollections(opCtx, collBatch); + + auto client = opCtx->getClient(); + std::shuffle(collBatch.begin(), collBatch.end(), client->getPrng().urbg()); + for (const auto& coll : collBatch) { + + if (availableShards->size() < 2) { + break; + } + + const auto& nss = coll.getNss(); + + auto swMigrateCandidates = _getMigrateCandidatesForCollection( + opCtx, nss, shardStats, collsDataSizeInfo.at(nss), availableShards); + if (swMigrateCandidates == ErrorCodes::NamespaceNotFound) { + // Namespace got dropped before we managed to get to it, so just skip it + imbalancedCollectionsCachePtr->erase(nss); + continue; + } else if (!swMigrateCandidates.isOK()) { + LOGV2_WARNING(21853, + "Unable to balance collection", + logAttrs(nss), + "error"_attr = swMigrateCandidates.getStatus()); + continue; + } + + candidateChunks.insert( + candidateChunks.end(), + std::make_move_iterator(swMigrateCandidates.getValue().first.begin()), + std::make_move_iterator(swMigrateCandidates.getValue().first.end())); + + const auto& migrateCandidates = swMigrateCandidates.getValue().first; + if (migrateCandidates.empty()) { + imbalancedCollectionsCachePtr->erase(nss); + } else if (imbalancedCollectionsCachePtr->size() < kMaxCachedCollectionsSize) { + imbalancedCollectionsCachePtr->insert(nss); + } + } + }; + + // To assess if a collection has chunks to migrate, we need to ask shards the size of that + // collection. For efficiency, we ask for a batch of collections per every shard request instead + // of a single request per collection + std::vector collBatch; + + // The first batch is partially filled by the imbalanced cached collections + for (auto imbalancedNssIt = imbalancedCollectionsCachePtr->begin(); + imbalancedNssIt != imbalancedCollectionsCachePtr->end();) { + + const auto& [imbalancedColl, collIt] = getCollectionTypeByNss(*imbalancedNssIt); + + if (!imbalancedColl.has_value() || !canBalanceCollection(imbalancedColl.value())) { + // The collection was dropped or is no longer enabled for balancing. + imbalancedCollectionsCachePtr->erase(imbalancedNssIt++); + continue; + } + + collBatch.push_back(imbalancedColl.value()); + ++imbalancedNssIt; + + // Remove the collection from the whole list of collections to avoid processing it twice + collections.erase(collIt); + } + + // Iterate all the remaining collections randomly + auto client = opCtx->getClient(); + std::shuffle(collections.begin(), collections.end(), client->getPrng().urbg()); + for (const auto& coll : collections) { + + if (canBalanceCollection(coll)) { + collBatch.push_back(coll); + } + + if (collBatch.size() == kStatsForBalancingBatchSize) { + processBatch(collBatch); + if (availableShards->size() < 2) { + return candidateChunks; + } + collBatch.clear(); + } + + const auto maxTimeMs = balancerChunksSelectionTimeoutMs.load(); + if (candidateChunks.size() > 0 && chunksSelectionTimer.millis() > maxTimeMs) { + LOGV2_DEBUG( + 7100900, + 1, + "Exceeded max time while searching for candidate chunks to migrate in this round.", + "maxTime"_attr = Milliseconds(maxTimeMs), + "chunksSelectionTime"_attr = chunksSelectionTimer.elapsed(), + "numCandidateChunks"_attr = candidateChunks.size()); + + return candidateChunks; + } + } + + if (collBatch.size() > 0) { + processBatch(collBatch); + } + + return candidateChunks; +} + +StatusWith BalancerChunkSelectionPolicy::selectChunksToMove( + OperationContext* opCtx, const NamespaceString& nss) { + auto shardStatsStatus = _clusterStats->getStats(opCtx); + if (!shardStatsStatus.isOK()) { + return shardStatsStatus.getStatus(); + } + + const auto& shardStats = shardStatsStatus.getValue(); + + // Used to check locally if the collection exists, it should trow NamespaceNotFound if it + // doesn't. + ShardingCatalogManager::get(opCtx)->localCatalogClient()->getCollection(opCtx, nss); + + stdx::unordered_set availableShards; + std::transform(shardStats.begin(), + shardStats.end(), + std::inserter(availableShards, availableShards.end()), + [](const ClusterStatistics::ShardStatistics& shardStatistics) -> ShardId { + return shardStatistics.shardId; + }); + + + const auto dataSizeInfo = getDataSizeInfoForCollection(opCtx, nss); + + auto candidatesStatus = + _getMigrateCandidatesForCollection(opCtx, nss, shardStats, dataSizeInfo, &availableShards); + if (!candidatesStatus.isOK()) { + return candidatesStatus.getStatus(); + } + + return candidatesStatus; +} + +StatusWith> BalancerChunkSelectionPolicy::selectSpecificChunkToMove( + OperationContext* opCtx, const NamespaceString& nss, const ChunkType& chunk) { + auto shardStatsStatus = _clusterStats->getStats(opCtx); + if (!shardStatsStatus.isOK()) { + return shardStatsStatus.getStatus(); + } + + const auto& shardStats = shardStatsStatus.getValue(); + + auto routingInfoStatus = + Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithPlacementRefresh(opCtx, + nss); + if (!routingInfoStatus.isOK()) { + return routingInfoStatus.getStatus(); + } + + const auto& [cm, _] = routingInfoStatus.getValue(); + + const auto collInfoStatus = createCollectionDistributionStatus(opCtx, nss, shardStats, cm); + if (!collInfoStatus.isOK()) { + return collInfoStatus.getStatus(); + } + + const DistributionStatus& distribution = collInfoStatus.getValue(); + + const auto dataSizeInfo = getDataSizeInfoForCollection(opCtx, nss); + + return BalancerPolicy::balanceSingleChunk(chunk, shardStats, distribution, dataSizeInfo); +} + +Status BalancerChunkSelectionPolicy::checkMoveAllowed(OperationContext* opCtx, + const ChunkType& chunk, + const ShardId& newShardId) { + auto shardStatsStatus = _clusterStats->getStats(opCtx); + if (!shardStatsStatus.isOK()) { + return shardStatsStatus.getStatus(); + } + + const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); + const CollectionType collection = catalogClient->getCollection( + opCtx, chunk.getCollectionUUID(), repl::ReadConcernLevel::kLocalReadConcern); + const auto& nss = collection.getNss(); + + + auto shardStats = std::move(shardStatsStatus.getValue()); + + auto routingInfoStatus = + Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithPlacementRefresh(opCtx, + nss); + if (!routingInfoStatus.isOK()) { + return routingInfoStatus.getStatus(); + } + + const auto& [cm, _] = routingInfoStatus.getValue(); + + const auto collInfoStatus = createCollectionDistributionStatus(opCtx, nss, shardStats, cm); + if (!collInfoStatus.isOK()) { + return collInfoStatus.getStatus(); + } + + const DistributionStatus& distribution = collInfoStatus.getValue(); + + auto newShardIterator = + std::find_if(shardStats.begin(), + shardStats.end(), + [&newShardId](const ClusterStatistics::ShardStatistics& stat) { + return stat.shardId == newShardId; + }); + if (newShardIterator == shardStats.end()) { + return {ErrorCodes::ShardNotFound, + str::stream() << "Unable to find constraints information for shard " << newShardId + << ". Move to this shard will be disallowed."}; + } + + return BalancerPolicy::isShardSuitableReceiver(*newShardIterator, + distribution.getZoneForChunk(chunk)); +} + +StatusWith BalancerChunkSelectionPolicy::_getSplitCandidatesForCollection( + OperationContext* opCtx, const NamespaceString& nss, const ShardStatisticsVector& shardStats) { + auto routingInfoStatus = + Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithPlacementRefresh(opCtx, + nss); + if (!routingInfoStatus.isOK()) { + return routingInfoStatus.getStatus(); + } + + const auto& [cm, _] = routingInfoStatus.getValue(); + + const auto collInfoStatus = createCollectionDistributionStatus(opCtx, nss, shardStats, cm); + if (!collInfoStatus.isOK()) { + return collInfoStatus.getStatus(); + } + + const DistributionStatus& distribution = collInfoStatus.getValue(); + + // Accumulate split points for the same chunk together + SplitCandidatesBuffer splitCandidates(nss, cm.getVersion()); + + if (nss == NamespaceString::kLogicalSessionsNamespace) { + if (!distribution.zones().empty()) { + LOGV2_WARNING(4562401, + "Ignoring zones for the sessions collection", + "zones"_attr = distribution.zones()); + } + + getSplitCandidatesForSessionsCollection(opCtx, cm, &splitCandidates); + } else { + getSplitCandidatesToEnforceZoneRanges(cm, distribution, &splitCandidates); + } + + return splitCandidates.done(); +} + +StatusWith BalancerChunkSelectionPolicy::_getMigrateCandidatesForCollection( + OperationContext* opCtx, + const NamespaceString& nss, + const ShardStatisticsVector& shardStats, + const CollectionDataSizeInfoForBalancing& collDataSizeInfo, + stdx::unordered_set* availableShards) { + auto routingInfoStatus = + Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithPlacementRefresh(opCtx, + nss); + if (!routingInfoStatus.isOK()) { + return routingInfoStatus.getStatus(); + } + + const auto& [cm, _] = routingInfoStatus.getValue(); + + const auto& shardKeyPattern = cm.getShardKeyPattern().getKeyPattern(); + + const auto collInfoStatus = createCollectionDistributionStatus(opCtx, nss, shardStats, cm); + if (!collInfoStatus.isOK()) { + return collInfoStatus.getStatus(); + } + + const DistributionStatus& distribution = collInfoStatus.getValue(); + + for (const auto& zoneRangeEntry : distribution.zoneRanges()) { + const auto& zoneRange = zoneRangeEntry.second; + + const auto chunkAtZoneMin = cm.findIntersectingChunkWithSimpleCollation(zoneRange.min); + + if (chunkAtZoneMin.getMin().woCompare(zoneRange.min)) { + return {ErrorCodes::IllegalOperation, + str::stream() + << "Zone boundaries " << zoneRange.toString() + << " fall in the middle of an existing chunk " + << ChunkRange(chunkAtZoneMin.getMin(), chunkAtZoneMin.getMax()).toString() + << ". Balancing for collection " << nss.toStringForErrorMsg() + << " will be postponed until the chunk is split appropriately."}; + } + + // The global max key can never fall in the middle of a chunk + if (!zoneRange.max.woCompare(shardKeyPattern.globalMax())) + continue; + + const auto chunkAtZoneMax = cm.findIntersectingChunkWithSimpleCollation(zoneRange.max); + + // We need to check that both the chunk's minKey does not match the zone's max and also that + // the max is not equal, which would only happen in the case of the zone ending in MaxKey. + if (chunkAtZoneMax.getMin().woCompare(zoneRange.max) && + chunkAtZoneMax.getMax().woCompare(zoneRange.max)) { + return {ErrorCodes::IllegalOperation, + str::stream() + << "Zone boundaries " << zoneRange.toString() + << " fall in the middle of an existing chunk " + << ChunkRange(chunkAtZoneMax.getMin(), chunkAtZoneMax.getMax()).toString() + << ". Balancing for collection " << nss.toStringForErrorMsg() + << " will be postponed until the chunk is split appropriately."}; + } + } + + return BalancerPolicy::balance( + shardStats, + distribution, + collDataSizeInfo, + availableShards, + Grid::get(opCtx)->getBalancerConfiguration()->attemptToBalanceJumboChunks()); +} + } // namespace mongo diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h index e2c54767ffbcb..a59c0f59e89c4 100644 --- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h +++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy.h @@ -30,81 +30,107 @@ #pragma once #include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/s/balancer/balancer_policy.h" +#include "mongo/db/s/balancer/cluster_statistics.h" +#include "mongo/db/shard_id.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { -class NamespaceString; -class OperationContext; -template -class StatusWith; - /** - * Interface used by the balancer for selecting chunks, which need to be moved around in order for - * the sharded cluster to be balanced. It is up to the implementation to decide what exactly - * 'balanced' means. + * Class used by the balancer for selecting chunks, which need to be moved around in order for + * the sharded cluster to be balanced. */ class BalancerChunkSelectionPolicy { BalancerChunkSelectionPolicy(const BalancerChunkSelectionPolicy&) = delete; BalancerChunkSelectionPolicy& operator=(const BalancerChunkSelectionPolicy&) = delete; public: - virtual ~BalancerChunkSelectionPolicy(); + BalancerChunkSelectionPolicy(ClusterStatistics* clusterStats); + ~BalancerChunkSelectionPolicy(); /** * Potentially blocking method, which gives out a set of chunks, which need to be split because * they violate the policy for some reason. The reason is decided by the policy and may include * chunk is too big or chunk straddles a zone range. */ - virtual StatusWith selectChunksToSplit(OperationContext* opCtx) = 0; + StatusWith selectChunksToSplit(OperationContext* opCtx); /** - * Given a valid namespace returns all the splits the balancer would need to perform - * with the current state + * Given a valid namespace returns all the splits the balancer would need to perform with the + * current state */ - virtual StatusWith selectChunksToSplit(OperationContext* opCtx, - const NamespaceString& nss) = 0; + StatusWith selectChunksToSplit(OperationContext* opCtx, + const NamespaceString& ns); /** * Potentially blocking method, which gives out a set of chunks to be moved. */ - virtual StatusWith selectChunksToMove( + StatusWith selectChunksToMove( OperationContext* opCtx, const std::vector& shardStats, stdx::unordered_set* availableShards, - stdx::unordered_set* imbalancedCollectionsCachePtr) = 0; - + stdx::unordered_set* imbalancedCollectionsCachePtr); /** - * Given a valid namespace returns all the Migrations the balancer would need to perform - * with the current state + * Given a valid namespace returns all the Migrations the balancer would need to perform with + * the current state. */ - virtual StatusWith selectChunksToMove(OperationContext* opCtx, - const NamespaceString& nss) = 0; + StatusWith selectChunksToMove(OperationContext* opCtx, + const NamespaceString& ns); /** * Requests a single chunk to be relocated to a different shard, if possible. If some error * occurs while trying to determine the best location for the chunk, a failed status is - * returned. If the chunk is already at the best shard that it can be, returns boost::none. + * returned. If the chunk is already at the best shard that it can be, returns `boost::none`. * Otherwise returns migration information for where the chunk should be moved. */ - virtual StatusWith> selectSpecificChunkToMove( - OperationContext* opCtx, const NamespaceString& nss, const ChunkType& chunk) = 0; + StatusWith> selectSpecificChunkToMove(OperationContext* opCtx, + const NamespaceString& nss, + const ChunkType& chunk); /** * Asks the chunk selection policy to validate that the specified chunk migration is allowed - * given the current rules. Returns OK if the migration won't violate any rules or any other + * given the current rules. Returns `OK` if the migration won't violate any rules or any other * failed status otherwise. */ - virtual Status checkMoveAllowed(OperationContext* opCtx, - const ChunkType& chunk, - const ShardId& newShardId) = 0; + Status checkMoveAllowed(OperationContext* opCtx, + const ChunkType& chunk, + const ShardId& newShardId); -protected: - BalancerChunkSelectionPolicy(); +private: + /** + * Synchronous method, which iterates the collection's chunks and uses the zones information to + * figure out whether some of them validate the zone range boundaries and need to be split. + */ + StatusWith _getSplitCandidatesForCollection( + OperationContext* opCtx, + const NamespaceString& nss, + const ShardStatisticsVector& shardStats); + + /** + * Synchronous method, which iterates the collection's size per shard to figure out where to + * place them. + */ + StatusWith _getMigrateCandidatesForCollection( + OperationContext* opCtx, + const NamespaceString& nss, + const ShardStatisticsVector& shardStats, + const CollectionDataSizeInfoForBalancing& collDataSizeInfo, + stdx::unordered_set* availableShards); + + // Source for obtaining cluster statistics. Not owned and must not be destroyed before the + // policy object is destroyed. + ClusterStatistics* const _clusterStats; }; } // namespace mongo diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp deleted file mode 100644 index 6ffbe40a4da83..0000000000000 --- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.cpp +++ /dev/null @@ -1,776 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - - -#include "mongo/platform/basic.h" - -#include "mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h" - -#include -#include -#include - -#include "mongo/base/status_with.h" -#include "mongo/bson/bsonobj_comparator_interface.h" -#include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/db/s/sharding_config_server_parameters_gen.h" -#include "mongo/db/s/sharding_util.h" -#include "mongo/logv2/log.h" -#include "mongo/platform/bits.h" -#include "mongo/s/balancer_configuration.h" -#include "mongo/s/catalog/type_chunk.h" -#include "mongo/s/catalog/type_collection.h" -#include "mongo/s/catalog_cache.h" -#include "mongo/s/grid.h" -#include "mongo/s/request_types/get_stats_for_balancing_gen.h" -#include "mongo/s/sharding_feature_flags_gen.h" -#include "mongo/util/str.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding - - -namespace mongo { - -namespace { - -/** - * Does a linear pass over the information cached in the specified chunk manager and extracts chunk - * distribution and chunk placement information which is needed by the balancer policy. - */ -StatusWith createCollectionDistributionStatus( - OperationContext* opCtx, - const NamespaceString& nss, - const ShardStatisticsVector& allShards, - const ChunkManager& chunkMgr) { - ShardToChunksMap shardToChunksMap; - - // Makes sure there is an entry in shardToChunksMap for every shard, so empty shards will also - // be accounted for - for (const auto& stat : allShards) { - shardToChunksMap[stat.shardId]; - } - - chunkMgr.forEachChunk([&](const auto& chunkEntry) { - ChunkType chunk; - chunk.setCollectionUUID(chunkMgr.getUUID()); - chunk.setMin(chunkEntry.getMin()); - chunk.setMax(chunkEntry.getMax()); - chunk.setJumbo(chunkEntry.isJumbo()); - chunk.setShard(chunkEntry.getShardId()); - chunk.setVersion(chunkEntry.getLastmod()); - - shardToChunksMap[chunkEntry.getShardId()].push_back(chunk); - - return true; - }); - - const auto& keyPattern = chunkMgr.getShardKeyPattern().getKeyPattern(); - - // Cache the collection zones - auto swZoneInfo = ZoneInfo::getZonesForCollection(opCtx, nss, keyPattern); - if (!swZoneInfo.isOK()) { - return swZoneInfo.getStatus(); - } - - DistributionStatus distribution(nss, std::move(shardToChunksMap), swZoneInfo.getValue()); - - return {std::move(distribution)}; -} - -stdx::unordered_map -getDataSizeInfoForCollections(OperationContext* opCtx, - const std::vector& collections) { - const auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration(); - uassertStatusOK(balancerConfig->refreshAndCheck(opCtx)); - - const auto shardRegistry = Grid::get(opCtx)->shardRegistry(); - const auto shardIds = shardRegistry->getAllShardIds(opCtx); - - // Map to be returned, incrementally populated with the collected statistics - stdx::unordered_map dataSizeInfoMap; - - std::vector namespacesWithUUIDsForStatsRequest; - for (const auto& coll : collections) { - const auto& nss = coll.getNss(); - const auto maxChunkSizeBytes = - coll.getMaxChunkSizeBytes().value_or(balancerConfig->getMaxChunkSizeBytes()); - - dataSizeInfoMap.emplace( - nss, - CollectionDataSizeInfoForBalancing(std::map(), maxChunkSizeBytes)); - - NamespaceWithOptionalUUID nssWithUUID(nss); - nssWithUUID.setUUID(coll.getUuid()); - namespacesWithUUIDsForStatsRequest.push_back(nssWithUUID); - } - - ShardsvrGetStatsForBalancing req{namespacesWithUUIDsForStatsRequest}; - req.setScaleFactor(1); - const auto reqObj = req.toBSON({}); - - const auto executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); - const auto responsesFromShards = - sharding_util::sendCommandToShards(opCtx, - DatabaseName::kAdmin.toString(), - reqObj, - shardIds, - executor, - false /* throwOnError */); - - for (auto&& response : responsesFromShards) { - try { - const auto& shardId = response.shardId; - const auto errorContext = - "Failed to get stats for balancing from shard '{}'"_format(shardId.toString()); - const auto responseValue = - uassertStatusOKWithContext(std::move(response.swResponse), errorContext); - - const ShardsvrGetStatsForBalancingReply reply = - ShardsvrGetStatsForBalancingReply::parse( - IDLParserContext("ShardsvrGetStatsForBalancingReply"), - std::move(responseValue.data)); - const auto collStatsFromShard = reply.getStats(); - - invariant(collStatsFromShard.size() == collections.size()); - for (const auto& stats : collStatsFromShard) { - invariant(dataSizeInfoMap.contains(stats.getNs())); - dataSizeInfoMap.at(stats.getNs()).shardToDataSizeMap[shardId] = stats.getCollSize(); - } - } catch (const ExceptionFor& ex) { - // Handle `removeShard`: skip shards removed during a balancing round - LOGV2_DEBUG(6581603, - 1, - "Skipping shard for the current balancing round", - "error"_attr = redact(ex)); - } - } - - return dataSizeInfoMap; -} - -CollectionDataSizeInfoForBalancing getDataSizeInfoForCollection(OperationContext* opCtx, - const NamespaceString& nss) { - const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); - const auto coll = catalogClient->getCollection(opCtx, nss); - std::vector vec{coll}; - return std::move(getDataSizeInfoForCollections(opCtx, vec).at(nss)); -} - -/** - * Helper class used to accumulate the split points for the same chunk together so they can be - * submitted to the shard as a single call versus multiple. This is necessary in order to avoid - * refreshing the chunk metadata after every single split point (if done one by one), because - * splitting a chunk does not yield the same chunk anymore. - */ -class SplitCandidatesBuffer { - SplitCandidatesBuffer(const SplitCandidatesBuffer&) = delete; - SplitCandidatesBuffer& operator=(const SplitCandidatesBuffer&) = delete; - -public: - SplitCandidatesBuffer(NamespaceString nss, ChunkVersion collectionPlacementVersion) - : _nss(std::move(nss)), - _collectionPlacementVersion(collectionPlacementVersion), - _chunkSplitPoints(SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap()) { - } - - /** - * Adds the specified split point to the chunk. The split points must always be within the - * boundaries of the chunk and must come in increasing order. - */ - void addSplitPoint(const Chunk& chunk, const BSONObj& splitPoint) { - auto it = _chunkSplitPoints.find(chunk.getMin()); - if (it == _chunkSplitPoints.end()) { - _chunkSplitPoints.emplace(chunk.getMin(), - SplitInfo(chunk.getShardId(), - _nss, - _collectionPlacementVersion, - chunk.getLastmod(), - chunk.getMin(), - chunk.getMax(), - {splitPoint})); - } else if (splitPoint.woCompare(it->second.splitKeys.back()) > 0) { - it->second.splitKeys.push_back(splitPoint); - } else { - // Split points must come in order - invariant(splitPoint.woCompare(it->second.splitKeys.back()) == 0); - } - } - - /** - * May be called only once for the lifetime of the buffer. Moves the contents of the buffer into - * a vector of split infos to be passed to the split call. - */ - SplitInfoVector done() { - SplitInfoVector splitPoints; - for (const auto& entry : _chunkSplitPoints) { - splitPoints.push_back(std::move(entry.second)); - } - - return splitPoints; - } - -private: - // Namespace and expected collection placement version - const NamespaceString _nss; - const ChunkVersion _collectionPlacementVersion; - - // Chunk min key and split vector associated with that chunk - BSONObjIndexedMap _chunkSplitPoints; -}; - -/** - * Populates splitCandidates with chunk and splitPoint pairs for chunks that violate zone - * range boundaries. - */ -void getSplitCandidatesToEnforceZoneRanges(const ChunkManager& cm, - const DistributionStatus& distribution, - SplitCandidatesBuffer* splitCandidates) { - const auto& globalMax = cm.getShardKeyPattern().getKeyPattern().globalMax(); - - // For each zone range, find chunks that need to be split. - for (const auto& zoneRangeEntry : distribution.zoneRanges()) { - const auto& zoneRange = zoneRangeEntry.second; - - const auto chunkAtZoneMin = cm.findIntersectingChunkWithSimpleCollation(zoneRange.min); - invariant(chunkAtZoneMin.getMax().woCompare(zoneRange.min) > 0); - - if (chunkAtZoneMin.getMin().woCompare(zoneRange.min)) { - splitCandidates->addSplitPoint(chunkAtZoneMin, zoneRange.min); - } - - // The global max key can never fall in the middle of a chunk. - if (!zoneRange.max.woCompare(globalMax)) - continue; - - const auto chunkAtZoneMax = cm.findIntersectingChunkWithSimpleCollation(zoneRange.max); - - // We need to check that both the chunk's minKey does not match the zone's max and also that - // the max is not equal, which would only happen in the case of the zone ending in MaxKey. - if (chunkAtZoneMax.getMin().woCompare(zoneRange.max) && - chunkAtZoneMax.getMax().woCompare(zoneRange.max)) { - splitCandidates->addSplitPoint(chunkAtZoneMax, zoneRange.max); - } - } -} - -/** - * If the number of chunks as given by the ChunkManager is less than the configured minimum - * number of chunks for the sessions collection (minNumChunksForSessionsCollection), calculates - * split points that evenly partition the key space into N ranges (where N is - * minNumChunksForSessionsCollection rounded up to the next power of 2), and populates - * splitCandidates with chunk and splitPoint pairs for chunks that need to split. - */ -void getSplitCandidatesForSessionsCollection(OperationContext* opCtx, - const ChunkManager& cm, - SplitCandidatesBuffer* splitCandidates) { - const auto minNumChunks = minNumChunksForSessionsCollection.load(); - - if (cm.numChunks() >= minNumChunks) { - return; - } - - // Use the next power of 2 as the target number of chunks. - const size_t numBits = 64 - countLeadingZeros64(minNumChunks - 1); - const uint32_t numChunks = 1 << numBits; - - // Compute split points for _id.id that partition the UUID 128-bit data space into numChunks - // equal ranges. Since the numChunks is a power of 2, the split points are the permutations - // of the prefix numBits right-padded with 0's. - std::vector splitPoints; - for (uint32_t i = 1; i < numChunks; i++) { - // Start with a buffer of 0's. - std::array buf{0b0}; - - // Left-shift i to fill the remaining bits in the prefix 32 bits with 0's. - const uint32_t high = i << (CHAR_BIT * 4 - numBits); - - // Fill the prefix 4 bytes with high's bytes. - buf[0] = static_cast(high >> CHAR_BIT * 3); - buf[1] = static_cast(high >> CHAR_BIT * 2); - buf[2] = static_cast(high >> CHAR_BIT * 1); - buf[3] = static_cast(high); - - ConstDataRange cdr(buf.data(), sizeof(buf)); - splitPoints.push_back(BSON("_id" << BSON("id" << UUID::fromCDR(cdr)))); - } - - // For each split point, find a chunk that needs to be split. - for (auto& splitPoint : splitPoints) { - const auto chunkAtSplitPoint = cm.findIntersectingChunkWithSimpleCollation(splitPoint); - invariant(chunkAtSplitPoint.getMax().woCompare(splitPoint) > 0); - - if (chunkAtSplitPoint.getMin().woCompare(splitPoint)) { - splitCandidates->addSplitPoint(chunkAtSplitPoint, splitPoint); - } - } - - return; -} - -} // namespace - -BalancerChunkSelectionPolicyImpl::BalancerChunkSelectionPolicyImpl(ClusterStatistics* clusterStats, - BalancerRandomSource& random) - : _clusterStats(clusterStats), _random(random) {} - -BalancerChunkSelectionPolicyImpl::~BalancerChunkSelectionPolicyImpl() = default; - -StatusWith BalancerChunkSelectionPolicyImpl::selectChunksToSplit( - OperationContext* opCtx) { - auto shardStatsStatus = _clusterStats->getStats(opCtx); - if (!shardStatsStatus.isOK()) { - return shardStatsStatus.getStatus(); - } - - const auto& shardStats = shardStatsStatus.getValue(); - - const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); - auto collections = catalogClient->getCollections(opCtx, {}); - if (collections.empty()) { - return SplitInfoVector{}; - } - - SplitInfoVector splitCandidates; - - std::shuffle(collections.begin(), collections.end(), _random); - - for (const auto& coll : collections) { - const NamespaceString& nss(coll.getNss()); - - auto candidatesStatus = _getSplitCandidatesForCollection(opCtx, nss, shardStats); - if (candidatesStatus == ErrorCodes::NamespaceNotFound) { - // Namespace got dropped before we managed to get to it, so just skip it - continue; - } else if (!candidatesStatus.isOK()) { - if (nss == NamespaceString::kLogicalSessionsNamespace) { - LOGV2_WARNING(4562402, - "Unable to split sessions collection chunks", - "error"_attr = candidatesStatus.getStatus()); - - } else { - LOGV2_WARNING( - 21852, - "Unable to enforce zone range policy for collection {namespace}: {error}", - "Unable to enforce zone range policy for collection", - logAttrs(nss), - "error"_attr = candidatesStatus.getStatus()); - } - - continue; - } - - splitCandidates.insert(splitCandidates.end(), - std::make_move_iterator(candidatesStatus.getValue().begin()), - std::make_move_iterator(candidatesStatus.getValue().end())); - } - - return splitCandidates; -} - -StatusWith BalancerChunkSelectionPolicyImpl::selectChunksToSplit( - OperationContext* opCtx, const NamespaceString& nss) { - - auto shardStatsStatus = _clusterStats->getStats(opCtx); - if (!shardStatsStatus.isOK()) { - return shardStatsStatus.getStatus(); - } - - const auto& shardStats = shardStatsStatus.getValue(); - - return _getSplitCandidatesForCollection(opCtx, nss, shardStats); -} - -StatusWith BalancerChunkSelectionPolicyImpl::selectChunksToMove( - OperationContext* opCtx, - const std::vector& shardStats, - stdx::unordered_set* availableShards, - stdx::unordered_set* imbalancedCollectionsCachePtr) { - invariant(availableShards); - invariant(imbalancedCollectionsCachePtr); - - if (availableShards->size() < 2) { - return MigrateInfoVector{}; - } - - const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); - auto collections = catalogClient->getCollections(opCtx, - {}, - repl::ReadConcernLevel::kMajorityReadConcern, - BSON(CollectionType::kNssFieldName << 1)); - if (collections.empty()) { - return MigrateInfoVector{}; - } - - MigrateInfoVector candidateChunks; - static constexpr auto kStatsForBalancingBatchSize = 50; - static constexpr auto kMaxCachedCollectionsSize = int(0.75 * kStatsForBalancingBatchSize); - - // Lambda function used to get a CollectionType leveraging the `collections` vector - // The `collections` vector must be sorted by nss when it is called - auto getCollectionTypeByNss = [&collections](const NamespaceString& nss) - -> std::pair, std::vector::iterator> { - // Using a lower_bound to perform a binary search on the `collections` vector - const auto collIt = - std::lower_bound(collections.begin(), - collections.end(), - nss, - [](const CollectionType& coll, const NamespaceString& ns) { - return coll.getNss() < ns; - }); - - if (collIt == collections.end() || collIt->getNss() != nss) { - return std::make_pair(boost::none, collections.end()); - } - return std::make_pair(*collIt, collIt); - }; - - // Lambda function to check if a collection is explicitly disabled for balancing - const auto canBalanceCollection = [](const CollectionType& coll) -> bool { - if (!coll.getAllowBalance() || !coll.getAllowMigrations() || !coll.getPermitMigrations() || - coll.getDefragmentCollection()) { - LOGV2_DEBUG(5966401, - 1, - "Not balancing explicitly disabled collection", - logAttrs(coll.getNss()), - "allowBalance"_attr = coll.getAllowBalance(), - "allowMigrations"_attr = coll.getAllowMigrations(), - "permitMigrations"_attr = coll.getPermitMigrations(), - "defragmentCollection"_attr = coll.getDefragmentCollection()); - return false; - } - return true; - }; - - // Lambda function to select migrate candidates from a batch of collections - const auto processBatch = [&](std::vector& collBatch) { - const auto collsDataSizeInfo = getDataSizeInfoForCollections(opCtx, collBatch); - - std::shuffle(collBatch.begin(), collBatch.end(), _random); - for (const auto& coll : collBatch) { - - if (availableShards->size() < 2) { - break; - } - - const auto& nss = coll.getNss(); - - auto swMigrateCandidates = _getMigrateCandidatesForCollection( - opCtx, nss, shardStats, collsDataSizeInfo.at(nss), availableShards); - if (swMigrateCandidates == ErrorCodes::NamespaceNotFound) { - // Namespace got dropped before we managed to get to it, so just skip it - imbalancedCollectionsCachePtr->erase(nss); - continue; - } else if (!swMigrateCandidates.isOK()) { - LOGV2_WARNING(21853, - "Unable to balance collection", - logAttrs(nss), - "error"_attr = swMigrateCandidates.getStatus()); - continue; - } - - candidateChunks.insert( - candidateChunks.end(), - std::make_move_iterator(swMigrateCandidates.getValue().first.begin()), - std::make_move_iterator(swMigrateCandidates.getValue().first.end())); - - const auto& migrateCandidates = swMigrateCandidates.getValue().first; - if (migrateCandidates.empty()) { - imbalancedCollectionsCachePtr->erase(nss); - } else if (imbalancedCollectionsCachePtr->size() < kMaxCachedCollectionsSize) { - imbalancedCollectionsCachePtr->insert(nss); - } - } - }; - - // To assess if a collection has chunks to migrate, we need to ask shards the size of that - // collection. For efficiency, we ask for a batch of collections per every shard request instead - // of a single request per collection - std::vector collBatch; - - // The first batch is partially filled by the imbalanced cached collections - for (auto imbalancedNssIt = imbalancedCollectionsCachePtr->begin(); - imbalancedNssIt != imbalancedCollectionsCachePtr->end();) { - - const auto& [imbalancedColl, collIt] = getCollectionTypeByNss(*imbalancedNssIt); - - if (!imbalancedColl.has_value() || !canBalanceCollection(imbalancedColl.value())) { - // The collection was dropped or is no longer enabled for balancing. - imbalancedCollectionsCachePtr->erase(imbalancedNssIt++); - continue; - } - - collBatch.push_back(imbalancedColl.value()); - ++imbalancedNssIt; - - // Remove the collection from the whole list of collections to avoid processing it twice - collections.erase(collIt); - } - - // Iterate all the remaining collections randomly - std::shuffle(collections.begin(), collections.end(), _random); - for (const auto& coll : collections) { - - if (canBalanceCollection(coll)) { - collBatch.push_back(coll); - } - - if (collBatch.size() == kStatsForBalancingBatchSize) { - processBatch(collBatch); - if (availableShards->size() < 2) { - return candidateChunks; - } - collBatch.clear(); - } - } - - if (collBatch.size() > 0) { - processBatch(collBatch); - } - - return candidateChunks; -} - -StatusWith BalancerChunkSelectionPolicyImpl::selectChunksToMove( - OperationContext* opCtx, const NamespaceString& nss) { - auto shardStatsStatus = _clusterStats->getStats(opCtx); - if (!shardStatsStatus.isOK()) { - return shardStatsStatus.getStatus(); - } - - const auto& shardStats = shardStatsStatus.getValue(); - - // Used to check locally if the collection exists, it should trow NamespaceNotFound if it - // doesn't. - ShardingCatalogManager::get(opCtx)->localCatalogClient()->getCollection(opCtx, nss); - - stdx::unordered_set availableShards; - std::transform(shardStats.begin(), - shardStats.end(), - std::inserter(availableShards, availableShards.end()), - [](const ClusterStatistics::ShardStatistics& shardStatistics) -> ShardId { - return shardStatistics.shardId; - }); - - - const auto dataSizeInfo = getDataSizeInfoForCollection(opCtx, nss); - - auto candidatesStatus = - _getMigrateCandidatesForCollection(opCtx, nss, shardStats, dataSizeInfo, &availableShards); - if (!candidatesStatus.isOK()) { - return candidatesStatus.getStatus(); - } - - return candidatesStatus; -} - -StatusWith> -BalancerChunkSelectionPolicyImpl::selectSpecificChunkToMove(OperationContext* opCtx, - const NamespaceString& nss, - const ChunkType& chunk) { - auto shardStatsStatus = _clusterStats->getStats(opCtx); - if (!shardStatsStatus.isOK()) { - return shardStatsStatus.getStatus(); - } - - const auto& shardStats = shardStatsStatus.getValue(); - - auto routingInfoStatus = - Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithPlacementRefresh(opCtx, - nss); - if (!routingInfoStatus.isOK()) { - return routingInfoStatus.getStatus(); - } - - const auto& [cm, _] = routingInfoStatus.getValue(); - - const auto collInfoStatus = createCollectionDistributionStatus(opCtx, nss, shardStats, cm); - if (!collInfoStatus.isOK()) { - return collInfoStatus.getStatus(); - } - - const DistributionStatus& distribution = collInfoStatus.getValue(); - - const auto dataSizeInfo = getDataSizeInfoForCollection(opCtx, nss); - - return BalancerPolicy::balanceSingleChunk(chunk, shardStats, distribution, dataSizeInfo); -} - -Status BalancerChunkSelectionPolicyImpl::checkMoveAllowed(OperationContext* opCtx, - const ChunkType& chunk, - const ShardId& newShardId) { - auto shardStatsStatus = _clusterStats->getStats(opCtx); - if (!shardStatsStatus.isOK()) { - return shardStatsStatus.getStatus(); - } - - const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); - const CollectionType collection = catalogClient->getCollection( - opCtx, chunk.getCollectionUUID(), repl::ReadConcernLevel::kLocalReadConcern); - const auto& nss = collection.getNss(); - - - auto shardStats = std::move(shardStatsStatus.getValue()); - - auto routingInfoStatus = - Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithPlacementRefresh(opCtx, - nss); - if (!routingInfoStatus.isOK()) { - return routingInfoStatus.getStatus(); - } - - const auto& [cm, _] = routingInfoStatus.getValue(); - - const auto collInfoStatus = createCollectionDistributionStatus(opCtx, nss, shardStats, cm); - if (!collInfoStatus.isOK()) { - return collInfoStatus.getStatus(); - } - - const DistributionStatus& distribution = collInfoStatus.getValue(); - - auto newShardIterator = - std::find_if(shardStats.begin(), - shardStats.end(), - [&newShardId](const ClusterStatistics::ShardStatistics& stat) { - return stat.shardId == newShardId; - }); - if (newShardIterator == shardStats.end()) { - return {ErrorCodes::ShardNotFound, - str::stream() << "Unable to find constraints information for shard " << newShardId - << ". Move to this shard will be disallowed."}; - } - - return BalancerPolicy::isShardSuitableReceiver(*newShardIterator, - distribution.getZoneForChunk(chunk)); -} - -StatusWith BalancerChunkSelectionPolicyImpl::_getSplitCandidatesForCollection( - OperationContext* opCtx, const NamespaceString& nss, const ShardStatisticsVector& shardStats) { - auto routingInfoStatus = - Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithPlacementRefresh(opCtx, - nss); - if (!routingInfoStatus.isOK()) { - return routingInfoStatus.getStatus(); - } - - const auto& [cm, _] = routingInfoStatus.getValue(); - - const auto collInfoStatus = createCollectionDistributionStatus(opCtx, nss, shardStats, cm); - if (!collInfoStatus.isOK()) { - return collInfoStatus.getStatus(); - } - - const DistributionStatus& distribution = collInfoStatus.getValue(); - - // Accumulate split points for the same chunk together - SplitCandidatesBuffer splitCandidates(nss, cm.getVersion()); - - if (nss == NamespaceString::kLogicalSessionsNamespace) { - if (!distribution.zones().empty()) { - LOGV2_WARNING(4562401, - "Ignoring zones for the sessions collection", - "zones"_attr = distribution.zones()); - } - - getSplitCandidatesForSessionsCollection(opCtx, cm, &splitCandidates); - } else { - getSplitCandidatesToEnforceZoneRanges(cm, distribution, &splitCandidates); - } - - return splitCandidates.done(); -} - -StatusWith -BalancerChunkSelectionPolicyImpl::_getMigrateCandidatesForCollection( - OperationContext* opCtx, - const NamespaceString& nss, - const ShardStatisticsVector& shardStats, - const CollectionDataSizeInfoForBalancing& collDataSizeInfo, - stdx::unordered_set* availableShards) { - auto routingInfoStatus = - Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithPlacementRefresh(opCtx, - nss); - if (!routingInfoStatus.isOK()) { - return routingInfoStatus.getStatus(); - } - - const auto& [cm, _] = routingInfoStatus.getValue(); - - const auto& shardKeyPattern = cm.getShardKeyPattern().getKeyPattern(); - - const auto collInfoStatus = createCollectionDistributionStatus(opCtx, nss, shardStats, cm); - if (!collInfoStatus.isOK()) { - return collInfoStatus.getStatus(); - } - - const DistributionStatus& distribution = collInfoStatus.getValue(); - - for (const auto& zoneRangeEntry : distribution.zoneRanges()) { - const auto& zoneRange = zoneRangeEntry.second; - - const auto chunkAtZoneMin = cm.findIntersectingChunkWithSimpleCollation(zoneRange.min); - - if (chunkAtZoneMin.getMin().woCompare(zoneRange.min)) { - return {ErrorCodes::IllegalOperation, - str::stream() - << "Zone boundaries " << zoneRange.toString() - << " fall in the middle of an existing chunk " - << ChunkRange(chunkAtZoneMin.getMin(), chunkAtZoneMin.getMax()).toString() - << ". Balancing for collection " << nss.ns() - << " will be postponed until the chunk is split appropriately."}; - } - - // The global max key can never fall in the middle of a chunk - if (!zoneRange.max.woCompare(shardKeyPattern.globalMax())) - continue; - - const auto chunkAtZoneMax = cm.findIntersectingChunkWithSimpleCollation(zoneRange.max); - - // We need to check that both the chunk's minKey does not match the zone's max and also that - // the max is not equal, which would only happen in the case of the zone ending in MaxKey. - if (chunkAtZoneMax.getMin().woCompare(zoneRange.max) && - chunkAtZoneMax.getMax().woCompare(zoneRange.max)) { - return {ErrorCodes::IllegalOperation, - str::stream() - << "Zone boundaries " << zoneRange.toString() - << " fall in the middle of an existing chunk " - << ChunkRange(chunkAtZoneMax.getMin(), chunkAtZoneMax.getMax()).toString() - << ". Balancing for collection " << nss.ns() - << " will be postponed until the chunk is split appropriately."}; - } - } - - return BalancerPolicy::balance( - shardStats, - distribution, - collDataSizeInfo, - availableShards, - Grid::get(opCtx)->getBalancerConfiguration()->attemptToBalanceJumboChunks()); -} - -} // namespace mongo diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h deleted file mode 100644 index b84c8fb674627..0000000000000 --- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/s/balancer/balancer_chunk_selection_policy.h" -#include "mongo/db/s/balancer/balancer_random.h" - -namespace mongo { - -class ClusterStatistics; - -class BalancerChunkSelectionPolicyImpl final : public BalancerChunkSelectionPolicy { -public: - BalancerChunkSelectionPolicyImpl(ClusterStatistics* clusterStats, BalancerRandomSource& random); - ~BalancerChunkSelectionPolicyImpl(); - - StatusWith selectChunksToSplit(OperationContext* opCtx) override; - - StatusWith selectChunksToSplit(OperationContext* opCtx, - const NamespaceString& ns) override; - - StatusWith selectChunksToMove( - OperationContext* opCtx, - const std::vector& shardStats, - stdx::unordered_set* availableShards, - stdx::unordered_set* imbalancedCollectionsCachePtr) override; - - StatusWith selectChunksToMove(OperationContext* opCtx, - const NamespaceString& ns) override; - - StatusWith> selectSpecificChunkToMove( - OperationContext* opCtx, const NamespaceString& nss, const ChunkType& chunk) override; - - Status checkMoveAllowed(OperationContext* opCtx, - const ChunkType& chunk, - const ShardId& newShardId) override; - -private: - /** - * Synchronous method, which iterates the collection's chunks and uses the zones information to - * figure out whether some of them validate the zone range boundaries and need to be split. - */ - StatusWith _getSplitCandidatesForCollection( - OperationContext* opCtx, - const NamespaceString& nss, - const ShardStatisticsVector& shardStats); - - /** - * Synchronous method, which iterates the collection's size per shard to figure out where to - * place them. - */ - StatusWith _getMigrateCandidatesForCollection( - OperationContext* opCtx, - const NamespaceString& nss, - const ShardStatisticsVector& shardStats, - const CollectionDataSizeInfoForBalancing& collDataSizeInfo, - stdx::unordered_set* availableShards); - - // Source for obtaining cluster statistics. Not owned and must not be destroyed before the - // policy object is destroyed. - ClusterStatistics* const _clusterStats; - - // Source of randomness when metadata needs to be randomized. - BalancerRandomSource& _random; -}; - -} // namespace mongo diff --git a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp index 226de26b12c03..cdc29218bca66 100644 --- a/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp +++ b/src/mongo/db/s/balancer/balancer_chunk_selection_policy_test.cpp @@ -27,15 +27,58 @@ * it in the license file. */ +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobj_comparator_interface.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" -#include "mongo/db/s/balancer/balancer_chunk_selection_policy_impl.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/balancer/balancer_chunk_selection_policy.h" #include "mongo/db/s/balancer/cluster_statistics_impl.h" #include "mongo/db/s/balancer/migration_test_fixture.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/idl/idl_parser.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" -#include "mongo/platform/random.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/balancer_configuration.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/grid.h" #include "mongo/s/request_types/get_stats_for_balancing_gen.h" -#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -45,16 +88,33 @@ namespace { using executor::RemoteCommandRequest; const std::string kDbName = "TestDb"; -const NamespaceString kNamespace(kDbName, "TestColl"); +const auto kNamespace = NamespaceString::createNamespaceString_forTest(kDbName, "TestColl"); const int kSizeOnDisk = 1; class BalancerChunkSelectionTest : public MigrationTestFixture { protected: BalancerChunkSelectionTest() - : _random(std::random_device{}()), - _clusterStats(std::make_unique(_random)), + : _clusterStats(std::make_unique()), _chunkSelectionPolicy( - std::make_unique(_clusterStats.get(), _random)) {} + std::make_unique(_clusterStats.get())) {} + + /** + * Generates a default chunks distribution across shards with the form: + * [MinKey, 0), [0, 1), [1, 2) ... [N - 2, MaxKey) + */ + std::map> generateDefaultChunkRanges( + const std::vector& shards) { + + std::map> chunksPerShard; + for (auto i = 0U; i < shards.size(); ++i) { + const ShardId& shardId = shards[i]; + const auto min = (i == 0 ? kKeyPattern.globalMin() : BSON(kPattern << int(i - 1))); + const auto max = + (i == shards.size() - 1 ? kKeyPattern.globalMax() : BSON(kPattern << int(i))); + chunksPerShard[shardId].push_back(ChunkRange(min, max)); + } + return chunksPerShard; + } /** * Sets up mock network to expect a listDatabases command and returns a BSON response with @@ -151,7 +211,7 @@ class BalancerChunkSelectionTest : public MigrationTestFixture { BSONObjBuilder resultBuilder; CommandHelpers::appendCommandStatusNoThrow(resultBuilder, Status::OK()); - // Build a response for given request + // Build a response for every given request onCommand([&](const RemoteCommandRequest& request) { ASSERT(request.cmdObj[ShardsvrGetStatsForBalancing::kCommandName]); @@ -159,7 +219,7 @@ class BalancerChunkSelectionTest : public MigrationTestFixture { ShardId shardId = getShardIdByHost(request.target); resultBuilder.append("shardId", shardId); - // Build `stats` array + // Build `stats` array: [ {"namespace": , "collSize": }, ...] { BSONArrayBuilder statsArrayBuilder(resultBuilder.subarrayStart("stats")); @@ -183,7 +243,65 @@ class BalancerChunkSelectionTest : public MigrationTestFixture { } /** - * Sets up a collection and its chunks according to the given range distribution across shards + * Same as expectGetStatsForBalancingCommands with the difference that this function will expect + * only one migration between the specified shards + */ + void expectGetStatsForBalancingCommandsWithOneMigration(uint32_t numShards, + ShardId donorShardId, + ShardId recipientShardId) { + ASSERT_NE(donorShardId, recipientShardId); + + const auto maxChunkSizeBytes = + Grid::get(operationContext())->getBalancerConfiguration()->getMaxChunkSizeBytes(); + const auto defaultCollSizeOnShard = 2 * maxChunkSizeBytes; + const auto imbalancedCollSizeOnRecipient = maxChunkSizeBytes; + const auto imbalancedCollSizeOnDonor = 5 * maxChunkSizeBytes; + + for (auto i = 0U; i < numShards; ++i) { + BSONObjBuilder resultBuilder; + CommandHelpers::appendCommandStatusNoThrow(resultBuilder, Status::OK()); + + // Build a response for every given request + onCommand([&](const RemoteCommandRequest& request) { + ASSERT(request.cmdObj[ShardsvrGetStatsForBalancing::kCommandName]); + + // Get `shardId` + ShardId shardId = getShardIdByHost(request.target); + resultBuilder.append("shardId", shardId); + + // Build `stats` array: [ {"namespace": , "collSize": }, ...] + { + bool firstColl = true; + BSONArrayBuilder statsArrayBuilder(resultBuilder.subarrayStart("stats")); + for (const auto& reqColl : + request.cmdObj[ShardsvrGetStatsForBalancing::kCollectionsFieldName] + .Array()) { + const auto nss = + NamespaceWithOptionalUUID::parse( + IDLParserContext("BalancerChunkSelectionPolicyTest"), reqColl.Obj()) + .getNs(); + + const auto collSize = [&]() { + if (firstColl && shardId == donorShardId) { + return imbalancedCollSizeOnDonor; + } else if (firstColl && shardId == recipientShardId) { + return imbalancedCollSizeOnRecipient; + } + return defaultCollSizeOnShard; + }(); + + statsArrayBuilder.append(CollStatsForBalancing(nss, collSize).toBSON()); + firstColl = false; + } + } + return resultBuilder.obj(); + }); + } + } + + /** + * Sets up a collection and its chunks according to the given range distribution across + * shards */ UUID setUpCollectionWithChunks( const NamespaceString& ns, @@ -205,53 +323,52 @@ class BalancerChunkSelectionTest : public MigrationTestFixture { } /** - * Returns a new BSON object with the zone encoded using the legacy field "tags" - * (to mimic the expected schema of config.shards) + * Returns a new ShardType object with the specified zones appended to the given shard */ - BSONObj appendZones(const BSONObj shardBSON, std::vector zones) { - BSONObjBuilder appendedShardBSON(shardBSON); - BSONArrayBuilder zonesBuilder; - for (auto& zone : zones) { - zonesBuilder.append(zone); - } - zonesBuilder.done(); - appendedShardBSON.append("tags", zonesBuilder.arr()); - return appendedShardBSON.obj(); + ShardType appendZones(const ShardType& shard, std::vector zones) { + auto alteredShard = shard; + alteredShard.setTags(zones); + return alteredShard; + } + + std::vector getShardStats(OperationContext* opCtx) { + return uassertStatusOK(_clusterStats.get()->getStats(opCtx)); + } + + stdx::unordered_set getAllShardIds(OperationContext* opCtx) { + const auto& shards = shardRegistry()->getAllShardIds(opCtx); + return stdx::unordered_set(shards.begin(), shards.end()); + } + + /** + * Syntactic sugar function for calling BalancerChunkSelectionPolicy::selectChunksToMove() + */ + MigrateInfoVector selectChunksToMove(OperationContext* opCtx) { + auto availableShards = getAllShardIds(opCtx); + + const auto& swChunksToMove = _chunkSelectionPolicy.get()->selectChunksToMove( + opCtx, getShardStats(opCtx), &availableShards, &_imbalancedCollectionsCache); + ASSERT_OK(swChunksToMove.getStatus()); + + return swChunksToMove.getValue(); } - BalancerRandomSource _random; std::unique_ptr _clusterStats; - std::unique_ptr _chunkSelectionPolicy; stdx::unordered_set _imbalancedCollectionsCache; -}; + RAIIServerParameterControllerForTest _balancerChunksSelectionTimeout{ + "balancerChunksSelectionTimeoutMs", 60000}; -stdx::unordered_set getAllShardIds( - const std::vector& shardStats) { - stdx::unordered_set shards; - std::transform(shardStats.begin(), - shardStats.end(), - std::inserter(shards, shards.end()), - [](const ClusterStatistics::ShardStatistics& shardStaticstics) -> ShardId { - return shardStaticstics.shardId; - }); - return shards; -} + // Object under test + std::unique_ptr _chunkSelectionPolicy; +}; TEST_F(BalancerChunkSelectionTest, ZoneRangesOverlap) { - // Set up two shards in the metadata. - ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - NamespaceString::kConfigsvrShardsNamespace, - kShard0, - kMajorityWriteConcern)); - ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - NamespaceString::kConfigsvrShardsNamespace, - kShard1, - kMajorityWriteConcern)); + setupShards({kShard0, kShard1}); // Set up a database and a sharded collection in the metadata. const auto collUUID = UUID::gen(); ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0}); - setUpDatabase(kDbName, kShardId0); + setupDatabase(kDbName, kShardId0); setUpCollection(kNamespace, collUUID, version); // Set up one chunk for the collection in the metadata. @@ -267,11 +384,6 @@ TEST_F(BalancerChunkSelectionTest, ZoneRangesOverlap) { ThreadClient tc(getServiceContext()); auto opCtx = Client::getCurrent()->makeOperationContext(); - // Requesting chunks to be relocated requires running commands on each shard to get - // shard statistics. Set up dummy hosts for the source shards. - shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0); - shardTargeterMock(opCtx.get(), kShardId1)->setFindHostReturnValue(kShardHost1); - auto migrateInfoStatus = _chunkSelectionPolicy.get()->selectSpecificChunkToMove( opCtx.get(), kNamespace, chunk); ASSERT_EQUALS(ErrorCodes::RangeOverlapConflict, @@ -295,20 +407,12 @@ TEST_F(BalancerChunkSelectionTest, ZoneRangesOverlap) { } TEST_F(BalancerChunkSelectionTest, ZoneRangeMaxNotAlignedWithChunkMax) { - // Set up two shards in the metadata. - ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - NamespaceString::kConfigsvrShardsNamespace, - appendZones(kShard0, {"A"}), - kMajorityWriteConcern)); - ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - NamespaceString::kConfigsvrShardsNamespace, - appendZones(kShard1, {"A"}), - kMajorityWriteConcern)); + setupShards({appendZones(kShard0, {"A"}), appendZones(kShard1, {"A"})}); // Set up a database and a sharded collection in the metadata. const auto collUUID = UUID::gen(); ChunkVersion version({OID::gen(), Timestamp(42)}, {2, 0}); - setUpDatabase(kDbName, kShardId0); + setupDatabase(kDbName, kShardId0); setUpCollection(kNamespace, collUUID, version); // Set up the zone. @@ -326,23 +430,13 @@ TEST_F(BalancerChunkSelectionTest, ZoneRangeMaxNotAlignedWithChunkMax) { ThreadClient tc(getServiceContext()); auto opCtx = Client::getCurrent()->makeOperationContext(); - // Requests chunks to be relocated requires running commands on each shard to - // get shard statistics. Set up dummy hosts for the source shards. - shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0); - shardTargeterMock(opCtx.get(), kShardId1)->setFindHostReturnValue(kShardHost1); - - std::vector shardStats = - uassertStatusOK(_clusterStats.get()->getStats(opCtx.get())); - auto availableShards = getAllShardIds(shardStats); _imbalancedCollectionsCache.clear(); - auto candidateChunksStatus = _chunkSelectionPolicy.get()->selectChunksToMove( - opCtx.get(), shardStats, &availableShards, &_imbalancedCollectionsCache); - ASSERT_OK(candidateChunksStatus.getStatus()); + const auto& chunksToMove = selectChunksToMove(opCtx.get()); // The balancer does not bubble up the IllegalOperation error, but it is expected // to postpone the balancing work for the zones with the error until the chunks // are split appropriately. - ASSERT_EQUALS(0U, candidateChunksStatus.getValue().size()); + ASSERT_EQUALS(0U, chunksToMove.size()); }); const int numShards = 2; @@ -361,31 +455,21 @@ TEST_F(BalancerChunkSelectionTest, ZoneRangeMaxNotAlignedWithChunkMax) { } TEST_F(BalancerChunkSelectionTest, AllImbalancedCollectionsShouldEventuallyBeSelectedForBalancing) { - // Set up two shards in the metadata. - ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - NamespaceString::kConfigsvrShardsNamespace, - kShard0, - kMajorityWriteConcern)); - ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - NamespaceString::kConfigsvrShardsNamespace, - kShard1, - kMajorityWriteConcern)); - - // Set up database - setUpDatabase(kDbName, kShardId0); - - // Set up N imbalanced collections (more than `kStatsForBalancingBatchSize`) - const int numCollections = 60; - const int maxIterations = 3000; - - // Routing table for each collection: - // Shard0 -> {minKey, maxKey} (512 MB) - // Shard1 -> {} ( 0 MB) + setupShards({kShard0, kShard1}); + setupDatabase(kDbName, kShardId0); + + // Override collections batch size to 4 for speeding up the test + FailPointEnableBlock overrideBatchSizeGuard("overrideStatsForBalancingBatchSize", + BSON("size" << 4)); + + // Set up 7 imbalanced collections (more than `kStatsForBalancingBatchSize`) + const int numCollections = 7; + const int maxIterations = 1000; + for (auto i = 0; i < numCollections; ++i) { - const std::string collName = "TestColl" + std::to_string(i); setUpCollectionWithChunks( - NamespaceString(kDbName, collName), - {{kShardId0, {{kKeyPattern.globalMin(), kKeyPattern.globalMax()}}}, {kShardId1, {}}}); + NamespaceString::createNamespaceString_forTest(kDbName, "TestColl" + std::to_string(i)), + generateDefaultChunkRanges({kShardId0, kShardId1})); } std::set collectionsSelected; @@ -398,25 +482,18 @@ TEST_F(BalancerChunkSelectionTest, AllImbalancedCollectionsShouldEventuallyBeSel ThreadClient tc(getServiceContext()); auto opCtx = Client::getCurrent()->makeOperationContext(); - // Requests chunks to be relocated requires running commands on each shard to - // get shard statistics. Set up dummy hosts for the source shards. - shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0); - shardTargeterMock(opCtx.get(), kShardId1)->setFindHostReturnValue(kShardHost1); - - std::vector shardStats = - uassertStatusOK(_clusterStats.get()->getStats(opCtx.get())); - auto availableShards = getAllShardIds(shardStats); - - auto chunksToMoveWithStatus = _chunkSelectionPolicy->selectChunksToMove( - opCtx.get(), shardStats, &availableShards, &_imbalancedCollectionsCache); - ASSERT_OK(chunksToMoveWithStatus.getStatus()); + const auto& chunksToMove = selectChunksToMove(opCtx.get()); - for (const auto& chunkToMove : chunksToMoveWithStatus.getValue()) { + for (const auto& chunkToMove : chunksToMove) { collectionsSelected.insert(chunkToMove.nss); } }); expectGetStatsCommands(2 /*numShards*/); + + // Collection size distribution for each collection: + // Shard0 -> 512 MB + // Shard1 -> 0 MB expectGetStatsForBalancingCommands( {{kShardId0, 512 * 1024 * 1024 /*Bytes*/}, {kShardId1, 0 /*Bytes*/}}); @@ -436,30 +513,16 @@ TEST_F(BalancerChunkSelectionTest, AllImbalancedCollectionsShouldEventuallyBeSel ASSERT_EQ(numCollections, collectionsSelected.size()); } -TEST_F(BalancerChunkSelectionTest, SelectedCollectionsShouldBeCached) { - // Set up two shards in the metadata. - ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - NamespaceString::kConfigsvrShardsNamespace, - kShard0, - kMajorityWriteConcern)); - ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - NamespaceString::kConfigsvrShardsNamespace, - kShard1, - kMajorityWriteConcern)); - - // Set up database - setUpDatabase(kDbName, kShardId0); - - // Set up 10 imbalanced collections - // Routing table for each collection: - // Shard0 -> {minKey, maxKey} (512 MB) - // Shard1 -> {} ( 0 MB) - const int numCollections = 10; +TEST_F(BalancerChunkSelectionTest, CollectionsSelectedShouldBeCached) { + setupShards({kShard0, kShard1}); + setupDatabase(kDbName, kShardId0); + + // Set up 4 collections + const int numCollections = 4; for (auto i = 0; i < numCollections; ++i) { - const std::string collName = "TestColl" + std::to_string(i); setUpCollectionWithChunks( - NamespaceString(kDbName, collName), - {{kShardId0, {{kKeyPattern.globalMin(), kKeyPattern.globalMax()}}}, {kShardId1, {}}}); + NamespaceString::createNamespaceString_forTest(kDbName, "TestColl" + std::to_string(i)), + generateDefaultChunkRanges({kShardId0, kShardId1})); } std::set collectionsSelected; @@ -471,25 +534,19 @@ TEST_F(BalancerChunkSelectionTest, SelectedCollectionsShouldBeCached) { ThreadClient tc(getServiceContext()); auto opCtx = Client::getCurrent()->makeOperationContext(); - // Requests chunks to be relocated requires running commands on each shard to - // get shard statistics. Set up dummy hosts for the source shards. - shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0); - shardTargeterMock(opCtx.get(), kShardId1)->setFindHostReturnValue(kShardHost1); - std::vector shardStats = - uassertStatusOK(_clusterStats.get()->getStats(opCtx.get())); - auto availableShards = getAllShardIds(shardStats); + const auto& chunksToMove = selectChunksToMove(opCtx.get()); - auto chunksToMoveWithStatus = _chunkSelectionPolicy->selectChunksToMove( - opCtx.get(), shardStats, &availableShards, &_imbalancedCollectionsCache); - ASSERT_OK(chunksToMoveWithStatus.getStatus()); - - for (const auto& chunkToMove : chunksToMoveWithStatus.getValue()) { + for (const auto& chunkToMove : chunksToMove) { collectionsSelected.insert(chunkToMove.nss); } }); expectGetStatsCommands(2 /*numShards*/); + + // Collection size distribution for each collection: + // Shard0 -> 512 MB + // Shard1 -> 0 MB expectGetStatsForBalancingCommands( {{kShardId0, 512 * 1024 * 1024 /*Bytes*/}, {kShardId1, 0 /*Bytes*/}}); @@ -504,31 +561,20 @@ TEST_F(BalancerChunkSelectionTest, SelectedCollectionsShouldBeCached) { } TEST_F(BalancerChunkSelectionTest, CachedCollectionsShouldBeSelected) { - // Set up two shards in the metadata. - ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - NamespaceString::kConfigsvrShardsNamespace, - kShard0, - kMajorityWriteConcern)); - ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - NamespaceString::kConfigsvrShardsNamespace, - kShard1, - kMajorityWriteConcern)); - - // Set up database - setUpDatabase(kDbName, kShardId0); + setupShards({kShard0, kShard1}); + setupDatabase(kDbName, kShardId0); _imbalancedCollectionsCache.clear(); std::vector allCollections; - // Set up 10 imbalanced collections and add all them in the imbalanced collections cache - const int numCollections = 10; + // Set up 4 collections and add all them into the imbalanced collections cache + const int numCollections = 4; for (auto i = 0; i < numCollections; ++i) { - NamespaceString nss(kDbName, "TestColl" + std::to_string(i)); - allCollections.push_back(nss); - setUpCollectionWithChunks( - nss, - {{kShardId0, {{kKeyPattern.globalMin(), kKeyPattern.globalMax()}}}, {kShardId1, {}}}); + NamespaceString nss = + NamespaceString::createNamespaceString_forTest(kDbName, "TestColl" + std::to_string(i)); + setUpCollectionWithChunks(nss, generateDefaultChunkRanges({kShardId0, kShardId1})); + allCollections.push_back(nss); _imbalancedCollectionsCache.insert(nss); } @@ -540,25 +586,18 @@ TEST_F(BalancerChunkSelectionTest, CachedCollectionsShouldBeSelected) { ThreadClient tc(getServiceContext()); auto opCtx = Client::getCurrent()->makeOperationContext(); - // Requests chunks to be relocated requires running commands on each shard to - // get shard statistics. Set up dummy hosts for the source shards. - shardTargeterMock(opCtx.get(), kShardId0)->setFindHostReturnValue(kShardHost0); - shardTargeterMock(opCtx.get(), kShardId1)->setFindHostReturnValue(kShardHost1); - - std::vector shardStats = - uassertStatusOK(_clusterStats.get()->getStats(opCtx.get())); - auto availableShards = getAllShardIds(shardStats); + const auto& chunksToMove = selectChunksToMove(opCtx.get()); - auto chunksToMoveWithStatus = _chunkSelectionPolicy->selectChunksToMove( - opCtx.get(), shardStats, &availableShards, &_imbalancedCollectionsCache); - ASSERT_OK(chunksToMoveWithStatus.getStatus()); - - for (const auto& chunkToMove : chunksToMoveWithStatus.getValue()) { + for (const auto& chunkToMove : chunksToMove) { collectionsSelected.insert(chunkToMove.nss); } }); expectGetStatsCommands(2 /*numShards*/); + + // Collection size distribution for each collection: + // Shard0 -> 512 MB + // Shard1 -> 0 MB expectGetStatsForBalancingCommands( {{kShardId0, 512 * 1024 * 1024 /*Bytes*/}, {kShardId1, 0 /*Bytes*/}}); @@ -575,5 +614,336 @@ TEST_F(BalancerChunkSelectionTest, CachedCollectionsShouldBeSelected) { } ASSERT_EQ(allCollections.size(), collectionsSelected.size()); } + +TEST_F(BalancerChunkSelectionTest, MaxTimeToScheduleBalancingOperationsExceeded) { + setupShards({kShard0, kShard1, kShard2, kShard3}); + setupDatabase(kDbName, kShardId0); + + // Override collections batch size to 4 for speeding up the test + FailPointEnableBlock overrideBatchSizeGuard("overrideStatsForBalancingBatchSize", + BSON("size" << 4)); + + // Set up 5 collections to process more than 1 batch + for (auto i = 0U; i < 5; ++i) { + setUpCollectionWithChunks( + NamespaceString::createNamespaceString_forTest(kDbName, "coll" + std::to_string(i)), + generateDefaultChunkRanges({kShardId0, kShardId1, kShardId2, kShardId3})); + } + + auto future = launchAsync([&] { + ThreadClient tc(getServiceContext()); + auto opCtx = Client::getCurrent()->makeOperationContext(); + + // Forcing timeout to exceed by setting it to 0 + RAIIServerParameterControllerForTest balancerChunksSelectionTimeoutMsIsZero( + "balancerChunksSelectionTimeoutMs", 0); + + const auto& chunksToMove = selectChunksToMove(opCtx.get()); + + // We know that timeout exceeded because we only got 1 migration instead of the 2 migrations + // expected in a normal scenario with 4 shards + ASSERT_EQUALS(1U, chunksToMove.size()); + }); + + expectGetStatsCommands(4); + + // We need to get at least 1 migration per batch since the timeout only exceeds when balancer + // has found at least one candidate migration On the other side, we must get less than 2 + // migrations per batch since the maximum number of migrations per balancing round is 2 (with 4 + // shards) + expectGetStatsForBalancingCommandsWithOneMigration( + 4 /*numShards*/, kShardId0 /*donor*/, kShardId1 /*recipient*/); + + future.default_timed_get(); +} + +TEST_F(BalancerChunkSelectionTest, MoreThanOneBatchIsProcessedIfNeeded) { + setupShards({kShard0, kShard1, kShard2, kShard3}); + setupDatabase(kDbName, kShardId0); + + // Override collections batch size to 4 for speeding up the test + FailPointEnableBlock overrideBatchSizeGuard("overrideStatsForBalancingBatchSize", + BSON("size" << 4)); + + // Set up 5 collections to process 2 batches + for (auto i = 0; i < 5; ++i) { + setUpCollectionWithChunks( + NamespaceString::createNamespaceString_forTest(kDbName, "coll" + std::to_string(i)), + generateDefaultChunkRanges({kShardId0, kShardId1, kShardId2, kShardId3})); + } + + auto future = launchAsync([&] { + ThreadClient tc(getServiceContext()); + auto opCtx = Client::getCurrent()->makeOperationContext(); + + const auto& chunksToMove = selectChunksToMove(opCtx.get()); + + ASSERT_EQUALS(2U, chunksToMove.size()); + }); + + expectGetStatsCommands(4); + + // We are scheduling one migration on the first batch to make sure that the second batch is + // processed + expectGetStatsForBalancingCommandsWithOneMigration( + 4 /*numShards*/, kShardId0 /*donor*/, kShardId1 /*recipient*/); + expectGetStatsForBalancingCommandsWithOneMigration( + 4 /*numShards*/, kShardId2 /*donor*/, kShardId3 /*recipient*/); + + future.default_timed_get(); +} + +TEST_F(BalancerChunkSelectionTest, StopChunksSelectionIfThereAreNoMoreShardsAvailable) { + setupShards({kShard0, kShard1}); + setupDatabase(kDbName, kShardId0); + + // Override collections batch size to 4 for speeding up the test + FailPointEnableBlock overrideBatchSizeGuard("overrideStatsForBalancingBatchSize", + BSON("size" << 4)); + + // Set up 10 collections (more than 1 batch) + const int numCollections = 10; + for (auto i = 0; i < numCollections; ++i) { + setUpCollectionWithChunks( + NamespaceString::createNamespaceString_forTest(kDbName, "coll" + std::to_string(i)), + generateDefaultChunkRanges({kShardId0, kShardId1})); + } + + auto future = launchAsync([&] { + ThreadClient tc(getServiceContext()); + auto opCtx = Client::getCurrent()->makeOperationContext(); + + const auto& chunksToMove = selectChunksToMove(opCtx.get()); + + ASSERT_EQUALS(1U, chunksToMove.size()); + }); + + expectGetStatsCommands(2 /*numShards*/); + + // Only 1 batch must be processed, so we expect only 1 call to `getStatsForBalancing` since + // the cluster has 2 shards + expectGetStatsForBalancingCommandsWithOneMigration( + 2 /*numShards*/, kShardId0 /*donor*/, kShardId1 /*recipient*/); + + future.default_timed_get(); +} + +TEST_F(BalancerChunkSelectionTest, DontSelectChunksFromCollectionsWithDefragmentationEnabled) { + setupShards({kShard0, kShard1}); + setupDatabase(kDbName, kShardId0); + + const auto uuid1 = + setUpCollectionWithChunks(NamespaceString::createNamespaceString_forTest(kDbName, "coll1"), + generateDefaultChunkRanges({kShardId0, kShardId1})); + const auto uuid2 = + setUpCollectionWithChunks(NamespaceString::createNamespaceString_forTest(kDbName, "coll2"), + generateDefaultChunkRanges({kShardId0, kShardId1})); + + // Enable defragmentation on collection 1 + ASSERT_OK(updateToConfigCollection( + operationContext(), + NamespaceString::kConfigsvrCollectionsNamespace, + BSON(CollectionType::kUuidFieldName << uuid1), + BSON("$set" << BSON(CollectionType::kDefragmentCollectionFieldName << true)), + false)); + + auto future = launchAsync([&] { + ThreadClient tc(getServiceContext()); + auto opCtx = Client::getCurrent()->makeOperationContext(); + + const auto& chunksToMove = selectChunksToMove(opCtx.get()); + + ASSERT_EQ(1, chunksToMove.size()); + ASSERT_EQ(uuid2, chunksToMove[0].uuid); + }); + + expectGetStatsCommands(2 /*numShards*/); + expectGetStatsForBalancingCommandsWithOneMigration( + 2 /*numShards*/, kShardId0 /*donor*/, kShardId1 /*recipient*/); + future.default_timed_get(); +} + +TEST_F(BalancerChunkSelectionTest, DontSelectChunksFromCollectionsWithBalancingDisabled) { + setupShards({kShard0, kShard1}); + setupDatabase(kDbName, kShardId0); + + const auto uuid1 = setUpCollectionWithChunks( + NamespaceString::createNamespaceString_forTest(kDbName, "TestColl1"), + generateDefaultChunkRanges({kShardId0, kShardId1})); + const auto uuid2 = setUpCollectionWithChunks( + NamespaceString::createNamespaceString_forTest(kDbName, "TestColl2"), + generateDefaultChunkRanges({kShardId0, kShardId1})); + + // Disable balancing on collection 1 + ASSERT_OK(updateToConfigCollection(operationContext(), + CollectionType::ConfigNS, + BSON(CollectionType::kUuidFieldName << uuid1), + BSON("$set" << BSON("noBalance" << true)), + false)); + + auto future = launchAsync([&] { + ThreadClient tc(getServiceContext()); + auto opCtx = Client::getCurrent()->makeOperationContext(); + + const auto& chunksToMove = selectChunksToMove(opCtx.get()); + + ASSERT_EQ(1, chunksToMove.size()); + ASSERT_EQ(uuid2, chunksToMove[0].uuid); + }); + + expectGetStatsCommands(2 /*numShards*/); + expectGetStatsForBalancingCommandsWithOneMigration( + 2 /*numShards*/, kShardId0 /*donor*/, kShardId1 /*recipient*/); + future.default_timed_get(); +} + +TEST_F(BalancerChunkSelectionTest, DontGetMigrationCandidatesIfAllCollectionsAreBalanced) { + setupShards({kShard0, kShard1}); + setupDatabase(kDbName, kShardId0); + + // Override collections batch size to 4 for speeding up the test + FailPointEnableBlock overrideBatchSizeGuard("overrideStatsForBalancingBatchSize", + BSON("size" << 4)); + + // Set up 7 collections (2 batches) + const int numCollections = 7; + for (auto i = 0; i < numCollections; ++i) { + setUpCollectionWithChunks( + NamespaceString::createNamespaceString_forTest(kDbName, "TestColl" + std::to_string(i)), + generateDefaultChunkRanges({kShardId0, kShardId1})); + } + + auto future = launchAsync([&] { + ThreadClient tc(getServiceContext()); + auto opCtx = Client::getCurrent()->makeOperationContext(); + + const auto& chunksToMove = selectChunksToMove(opCtx.get()); + + ASSERT(chunksToMove.empty()); + }); + + expectGetStatsCommands(2 /*numShards*/); + + // Expecting 2 calls to getStatsForBalancing commands since there are 7 collections (2 batches) + // All collection distributions are balanced: + // Shard0 -> 512 MB + // Shard1 -> 500 MB + expectGetStatsForBalancingCommands( + {{kShardId0, 512 * 1024 * 1024 /*Bytes*/}, {kShardId1, 500 * 1024 * 1024 /*Bytes*/}}); + expectGetStatsForBalancingCommands( + {{kShardId0, 512 * 1024 * 1024 /*Bytes*/}, {kShardId1, 500 * 1024 * 1024 /*Bytes*/}}); + + future.default_timed_get(); +} + +TEST_F(BalancerChunkSelectionTest, SelectChunksToSplit) { + setupShards({appendZones(kShard0, {"A"}), appendZones(kShard1, {"B"})}); + setupDatabase(kDbName, kShardId0); + + // Create 3 collections with no zones + for (auto i = 0; i < 3; ++i) { + setUpCollectionWithChunks( + NamespaceString::createNamespaceString_forTest(kDbName, "TestColl" + std::to_string(i)), + generateDefaultChunkRanges({kShardId0, kShardId1})); + } + + // Setup the collection under test with the following routing table: + // Shard0 -> [MinKey, 0) + // Shard1 -> [0, MaxKey) + const auto nss = NamespaceString::createNamespaceString_forTest(kDbName, "TestColl"); + setUpCollectionWithChunks(nss, generateDefaultChunkRanges({kShardId0, kShardId1})); + + // Lambda function to assign specific zones to the collection under test and verify the output + // of `selectChunksToSplit` + auto assertSplitPoints = + [&](const StringMap& zoneChunkRanges, + const BSONObjIndexedMap& expectedSplitPointsPerChunk) { + setUpZones(nss, zoneChunkRanges); + + LOGV2(7381300, "Getting split points", "zoneChunkRanges"_attr = zoneChunkRanges); + + auto future = launchAsync([&] { + ThreadClient tc(getServiceContext()); + auto opCtx = Client::getCurrent()->makeOperationContext(); + + const auto& swSplitInfo = + _chunkSelectionPolicy.get()->selectChunksToSplit(opCtx.get()); + ASSERT_OK(swSplitInfo.getStatus()); + + for (const auto& [chunkMin, splitPoints] : expectedSplitPointsPerChunk) { + + bool found = false; + for (const auto& splitInfo : swSplitInfo.getValue()) { + if (splitInfo.minKey.woCompare(chunkMin) == 0 && splitInfo.nss == nss) { + found = true; + ASSERT_EQ(splitPoints.size(), splitInfo.splitKeys.size()); + for (size_t i = 0; i < splitPoints.size(); ++i) { + ASSERT_EQ(splitPoints[i].woCompare(splitInfo.splitKeys[i]), 0) + << "Expected " << splitPoints[i].toString() << " but got " + << splitInfo.splitKeys[i].toString(); + } + } + } + ASSERT(found) << "Expected to find split points for chunk " + << chunkMin.toString() << " but didn't"; + } + + ASSERT_EQ(expectedSplitPointsPerChunk.size(), swSplitInfo.getValue().size()) + << "Got unexpected split points"; + }); + expectGetStatsCommands(2 /*numShards*/); + future.default_timed_get(); + removeAllZones(nss); + }; + + // Zone A: [-20, -10) + // Expected split point: -20, -10 + assertSplitPoints( + {{"A", {BSON(kPattern << -20), BSON(kPattern << -10)}}}, + SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap( + {{kKeyPattern.globalMin(), {BSON(kPattern << -20), BSON(kPattern << -10)}}})); + + // Zone A: [10, 20) + // Expected split points: 10, 20 + assertSplitPoints({{"A", {BSON(kPattern << 10), BSON(kPattern << 20)}}}, + SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap( + {{BSON(kPattern << 0), {BSON(kPattern << 10), BSON(kPattern << 20)}}})); + + // Zone A: [MinKey, 10) + // Expected split point: 10 + assertSplitPoints({{"A", {kKeyPattern.globalMin(), BSON(kPattern << 10)}}}, + SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap( + {{BSON(kPattern << 0), {BSON(kPattern << 10)}}})); + + // Zone B: [-10, MaxKey) + // Expected split point: -10 + assertSplitPoints({{"B", {BSON(kPattern << -10), kKeyPattern.globalMax()}}}, + SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap( + {{kKeyPattern.globalMin(), {BSON(kPattern << -10)}}})); + + + // Zone A: [-10, 20) + // Expected split points: 6 + assertSplitPoints({{"A", {BSON(kPattern << -10), BSON(kPattern << 20)}}}, + SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap( + {{kKeyPattern.globalMin(), {BSON(kPattern << -10)}}, + {BSON(kPattern << 0), {BSON(kPattern << 20)}}})); + + // Zone B: [-20, -10) + // Zone A: [-10, 20) + // Expected split points: -20, -10, 20 + assertSplitPoints( + {{"A", {BSON(kPattern << -20), BSON(kPattern << -10)}}, + {"B", {BSON(kPattern << -10), BSON(kPattern << 20)}}}, + SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap( + {{kKeyPattern.globalMin(), {BSON(kPattern << -20), BSON(kPattern << -10)}}, + {BSON(kPattern << 0), {BSON(kPattern << 20)}}})); + + // Zone A: [0, MaxKey) + // Expected split point: NONE + assertSplitPoints({{"A", {BSON(kPattern << 0), kKeyPattern.globalMax()}}}, + SimpleBSONObjComparator::kInstance.makeBSONObjIndexedMap()); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler.h b/src/mongo/db/s/balancer/balancer_commands_scheduler.h index 2c0e69000c011..2d4ac4ba1faa0 100644 --- a/src/mongo/db/s/balancer/balancer_commands_scheduler.h +++ b/src/mongo/db/s/balancer/balancer_commands_scheduler.h @@ -29,11 +29,21 @@ #pragma once +#include + #include "mongo/bson/bsonobj.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/balancer/balancer_policy.h" #include "mongo/db/shard_id.h" +#include "mongo/db/write_concern_options.h" #include "mongo/executor/task_executor.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/request_types/move_range_request_gen.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/future.h" namespace mongo { diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp b/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp index 82db31f043716..bc73c98cda8c8 100644 --- a/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp +++ b/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.cpp @@ -29,17 +29,42 @@ #include "mongo/db/s/balancer/balancer_commands_scheduler_impl.h" +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/client/read_preference.h" +#include "mongo/client/remote_command_targeter.h" #include "mongo/db/client.h" -#include "mongo/db/dbdirectclient.h" #include "mongo/db/s/sharding_util.h" #include "mongo/db/shard_id.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_requests_sender.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" -#include "mongo/s/request_types/migration_secondary_throttle_options.h" #include "mongo/s/request_types/shardsvr_join_migrations_request_gen.h" -#include "mongo/s/shard_key_pattern.h" #include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -82,11 +107,7 @@ Status processRemoteResponse(const executor::RemoteCommandResponse& remoteRespon return remoteResponse.status; } auto remoteStatus = getStatusFromCommandResult(remoteResponse.data); - return Shard::shouldErrorBePropagated(remoteStatus.code()) - ? remoteStatus - : Status(ErrorCodes::OperationFailed, - str::stream() << "Command request failed on source shard. " - << causedBy(remoteStatus)); + return remoteStatus.withContext("Command request failed on source shard."); } } // namespace @@ -360,6 +381,13 @@ void BalancerCommandsSchedulerImpl::_workerThread() { }); Client::initThread("BalancerCommandsScheduler"); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + bool stopWorkerRequested = false; LOGV2(5847205, "Balancer scheduler thread started"); diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.h b/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.h index 627d3a0834828..1e3654868afaf 100644 --- a/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.h +++ b/src/mongo/db/s/balancer/balancer_commands_scheduler_impl.h @@ -29,19 +29,56 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/database_name.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/balancer/auto_merger_policy.h" #include "mongo/db/s/balancer/balancer_commands_scheduler.h" +#include "mongo/db/s/balancer/balancer_policy.h" #include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/scoped_task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/client/shard.h" #include "mongo/s/request_types/merge_chunk_request_gen.h" #include "mongo/s/request_types/migration_secondary_throttle_options.h" #include "mongo/s/request_types/move_range_request_gen.h" +#include "mongo/s/shard_version.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -158,7 +195,7 @@ class MergeChunksCommandInfo : public CommandInfo { boundsArrayBuilder.append(_lowerBoundKey).append(_upperBoundKey); BSONObjBuilder commandBuilder; - commandBuilder.append(kCommandName, getNameSpace().toString()) + commandBuilder.append(kCommandName, NamespaceStringUtil::serialize(getNameSpace())) .appendArray(kBounds, boundsArrayBuilder.arr()) .append(kShardName, getTarget().toString()) .append(kEpoch, _version.epoch()) @@ -201,7 +238,7 @@ class DataSizeCommandInfo : public CommandInfo { BSONObj serialise() const override { BSONObjBuilder commandBuilder; - commandBuilder.append(kCommandName, getNameSpace().toString()) + commandBuilder.append(kCommandName, NamespaceStringUtil::serialize(getNameSpace())) .append(kKeyPattern, _shardKeyPattern) .append(kMinValue, _lowerBoundKey) .append(kMaxValue, _upperBoundKey) diff --git a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp index 575cdc0bec23d..5d652e8147c8a 100644 --- a/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp +++ b/src/mongo/db/s/balancer/balancer_commands_scheduler_test.cpp @@ -27,13 +27,46 @@ * it in the license file. */ +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/s/balancer/balancer_commands_scheduler.h" #include "mongo/db/s/balancer/balancer_commands_scheduler_impl.h" #include "mongo/db/s/config/config_server_test_fixture.h" -#include "mongo/s/catalog/sharding_catalog_client_mock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/index_version.h" #include "mongo/s/request_types/move_range_request_gen.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy.cpp new file mode 100644 index 0000000000000..fe594de7d4bfb --- /dev/null +++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy.cpp @@ -0,0 +1,1628 @@ +/** + * Copyright (C) 2021-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/s/balancer/balancer_defragmentation_policy.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/client.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/persistent_task_store.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/s/balancer/cluster_statistics.h" +#include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/write_concern.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/random.h" +#include "mongo/s/balancer_configuration.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/grid.h" +#include "mongo/s/request_types/move_range_request_gen.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding + + +using namespace fmt::literals; + +namespace mongo { + +namespace { + +MONGO_FAIL_POINT_DEFINE(skipDefragmentationPhaseTransition); +MONGO_FAIL_POINT_DEFINE(afterBuildingNextDefragmentationPhase); + +using ShardStatistics = ClusterStatistics::ShardStatistics; + +const std::string kCurrentPhase("currentPhase"); +const std::string kProgress("progress"); +const std::string kNoPhase("none"); +const std::string kRemainingChunksToProcess("remainingChunksToProcess"); + +static constexpr int64_t kBigChunkMarker = std::numeric_limits::max(); + +ShardVersion getShardVersion(OperationContext* opCtx, + const ShardId& shardId, + const NamespaceString& nss) { + auto cri = Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfo(opCtx, nss); + return cri.getShardVersion(shardId); +} + +std::vector getCollectionChunks(OperationContext* opCtx, const CollectionType& coll) { + auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); + return uassertStatusOK( + catalogClient->getChunks(opCtx, + BSON(ChunkType::collectionUUID() << coll.getUuid()) /*query*/, + BSON(ChunkType::min() << 1) /*sort*/, + boost::none /*limit*/, + nullptr /*opTime*/, + coll.getEpoch(), + coll.getTimestamp(), + repl::ReadConcernLevel::kLocalReadConcern, + boost::none)); +} + +uint64_t getCollectionMaxChunkSizeBytes(OperationContext* opCtx, const CollectionType& coll) { + const auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration(); + uassertStatusOK(balancerConfig->refreshAndCheck(opCtx)); + return coll.getMaxChunkSizeBytes().value_or(balancerConfig->getMaxChunkSizeBytes()); +} + +ZoneInfo getCollectionZones(OperationContext* opCtx, const CollectionType& coll) { + auto zones = uassertStatusOK( + ZoneInfo::getZonesForCollection(opCtx, coll.getNss(), coll.getKeyPattern())); + return zones; +} + +bool isRetriableForDefragmentation(const Status& status) { + if (ErrorCodes::isA(status)) + return true; + + if (status == ErrorCodes::StaleConfig) { + if (auto staleInfo = status.extraInfo()) { + // If the staleInfo error contains a "wanted" version, this means the donor shard which + // returned this error has its versioning information up-to-date (as opposed to UNKNOWN) + // and it couldn't find the chunk that the defragmenter expected. Such a situation can + // only arise as a result of manual split/merge/move concurrently with the defragmenter. + return !staleInfo->getVersionWanted(); + } + } + + return false; +} + +void handleActionResult(OperationContext* opCtx, + const NamespaceString& nss, + const UUID& uuid, + const DefragmentationPhaseEnum currentPhase, + const Status& status, + std::function onSuccess, + std::function onRetriableError, + std::function onNonRetriableError) { + if (status.isOK()) { + onSuccess(); + return; + } + + if (status == ErrorCodes::StaleConfig) { + if (auto staleInfo = status.extraInfo()) { + Grid::get(opCtx) + ->catalogCache() + ->invalidateShardOrEntireCollectionEntryForShardedCollection( + nss, staleInfo->getVersionWanted(), staleInfo->getShardId()); + } + } + + if (isRetriableForDefragmentation(status)) { + LOGV2_DEBUG(6261701, + 1, + "Hit retriable error while defragmenting collection", + logAttrs(nss), + "uuid"_attr = uuid, + "currentPhase"_attr = currentPhase, + "error"_attr = redact(status)); + onRetriableError(); + } else { + LOGV2_ERROR(6258601, + "Defragmentation for collection hit non-retriable error", + logAttrs(nss), + "uuid"_attr = uuid, + "currentPhase"_attr = currentPhase, + "error"_attr = redact(status)); + onNonRetriableError(); + } +} + +bool areMergeable(const ChunkType& firstChunk, + const ChunkType& secondChunk, + const ZoneInfo& collectionZones) { + return firstChunk.getShard() == secondChunk.getShard() && + collectionZones.getZoneForChunk(firstChunk.getRange()) == + collectionZones.getZoneForChunk(secondChunk.getRange()) && + SimpleBSONObjComparator::kInstance.evaluate(firstChunk.getMax() == secondChunk.getMin()); +} + +class MergeAndMeasureChunksPhase : public DefragmentationPhase { +public: + static std::unique_ptr build(OperationContext* opCtx, + const CollectionType& coll) { + auto collectionChunks = getCollectionChunks(opCtx, coll); + const auto collectionZones = getCollectionZones(opCtx, coll); + + // Calculate small chunk threshold to limit dataSize commands + const auto maxChunkSizeBytes = getCollectionMaxChunkSizeBytes(opCtx, coll); + const int64_t smallChunkSizeThreshold = + (maxChunkSizeBytes / 100) * kSmallChunkSizeThresholdPctg; + + stdx::unordered_map pendingActionsByShards; + // Find ranges of chunks; for single-chunk ranges, request DataSize; for multi-range, issue + // merge + while (!collectionChunks.empty()) { + auto upperRangeBound = std::prev(collectionChunks.cend()); + auto lowerRangeBound = upperRangeBound; + while (lowerRangeBound != collectionChunks.cbegin() && + areMergeable(*std::prev(lowerRangeBound), *lowerRangeBound, collectionZones)) { + --lowerRangeBound; + } + if (lowerRangeBound != upperRangeBound) { + pendingActionsByShards[upperRangeBound->getShard()].rangesToMerge.emplace_back( + lowerRangeBound->getMin(), upperRangeBound->getMax()); + } else { + if (!upperRangeBound->getEstimatedSizeBytes().has_value()) { + pendingActionsByShards[upperRangeBound->getShard()] + .rangesWithoutDataSize.emplace_back(upperRangeBound->getMin(), + upperRangeBound->getMax()); + } + } + collectionChunks.erase(lowerRangeBound, std::next(upperRangeBound)); + } + return std::unique_ptr( + new MergeAndMeasureChunksPhase(coll.getNss(), + coll.getUuid(), + coll.getKeyPattern().toBSON(), + smallChunkSizeThreshold, + std::move(pendingActionsByShards))); + } + + DefragmentationPhaseEnum getType() const override { + return DefragmentationPhaseEnum::kMergeAndMeasureChunks; + } + + DefragmentationPhaseEnum getNextPhase() const override { + return _nextPhase; + } + + boost::optional popNextStreamableAction( + OperationContext* opCtx) override { + boost::optional nextAction = boost::none; + if (!_pendingActionsByShards.empty()) { + auto it = _shardToProcess ? _pendingActionsByShards.find(*_shardToProcess) + : _pendingActionsByShards.begin(); + + invariant(it != _pendingActionsByShards.end()); + + auto& [shardId, pendingActions] = *it; + auto shardVersion = getShardVersion(opCtx, shardId, _nss); + + if (pendingActions.rangesWithoutDataSize.size() > pendingActions.rangesToMerge.size()) { + const auto& rangeToMeasure = pendingActions.rangesWithoutDataSize.back(); + nextAction = boost::optional( + DataSizeInfo(shardId, + _nss, + _uuid, + rangeToMeasure, + shardVersion, + _shardKey, + true /* estimate */, + _smallChunkSizeThresholdBytes /* maxSize */)); + pendingActions.rangesWithoutDataSize.pop_back(); + } else if (!pendingActions.rangesToMerge.empty()) { + const auto& rangeToMerge = pendingActions.rangesToMerge.back(); + nextAction = boost::optional( + MergeInfo(shardId, _nss, _uuid, shardVersion.placementVersion(), rangeToMerge)); + pendingActions.rangesToMerge.pop_back(); + } + if (nextAction.has_value()) { + ++_outstandingActions; + if (pendingActions.rangesToMerge.empty() && + pendingActions.rangesWithoutDataSize.empty()) { + it = _pendingActionsByShards.erase(it, std::next(it)); + } else { + ++it; + } + } + if (it != _pendingActionsByShards.end()) { + _shardToProcess = it->first; + } else { + _shardToProcess = boost::none; + } + } + return nextAction; + } + + boost::optional popNextMigration( + OperationContext* opCtx, stdx::unordered_set* availableShards) override { + return boost::none; + } + + void applyActionResult(OperationContext* opCtx, + const BalancerStreamAction& action, + const BalancerStreamActionResponse& response) override { + ScopeGuard scopedGuard([&] { --_outstandingActions; }); + if (_aborted) { + return; + } + stdx::visit(OverloadedVisitor{ + [&](const MergeInfo& mergeAction) { + auto& mergeResponse = stdx::get(response); + auto& shardingPendingActions = + _pendingActionsByShards[mergeAction.shardId]; + handleActionResult( + opCtx, + _nss, + _uuid, + getType(), + mergeResponse, + [&]() { + shardingPendingActions.rangesWithoutDataSize.emplace_back( + mergeAction.chunkRange); + }, + [&]() { + shardingPendingActions.rangesToMerge.emplace_back( + mergeAction.chunkRange); + }, + [&]() { _abort(getType()); }); + }, + [&](const DataSizeInfo& dataSizeAction) { + auto& dataSizeResponse = + stdx::get>(response); + handleActionResult( + opCtx, + _nss, + _uuid, + getType(), + dataSizeResponse.getStatus(), + [&]() { + ChunkType chunk(dataSizeAction.uuid, + dataSizeAction.chunkRange, + dataSizeAction.version.placementVersion(), + dataSizeAction.shardId); + auto catalogManager = ShardingCatalogManager::get(opCtx); + // Max out the chunk size if it has has been estimated as bigger + // than _smallChunkSizeThresholdBytes; this will exlude the + // chunk from the list of candidates considered by + // MoveAndMergeChunksPhase + auto estimatedSize = dataSizeResponse.getValue().maxSizeReached + ? kBigChunkMarker + : dataSizeResponse.getValue().sizeBytes; + catalogManager->setChunkEstimatedSize( + opCtx, + chunk, + estimatedSize, + ShardingCatalogClient::kMajorityWriteConcern); + }, + [&]() { + auto& shardingPendingActions = + _pendingActionsByShards[dataSizeAction.shardId]; + shardingPendingActions.rangesWithoutDataSize.emplace_back( + dataSizeAction.chunkRange); + }, + [&]() { _abort(getType()); }); + }, + [](const MigrateInfo& _) { + uasserted(ErrorCodes::BadValue, "Unexpected action type"); + }, + [](const MergeAllChunksOnShardInfo& _) { + uasserted(ErrorCodes::BadValue, "Unexpected action type"); + }}, + action); + } + + bool isComplete() const override { + return _pendingActionsByShards.empty() && _outstandingActions == 0; + } + + void userAbort() override { + _abort(DefragmentationPhaseEnum::kFinished); + } + + BSONObj reportProgress() const override { + + size_t rangesToMerge = 0, rangesWithoutDataSize = 0; + for (const auto& [_, pendingActions] : _pendingActionsByShards) { + rangesToMerge += pendingActions.rangesToMerge.size(); + rangesWithoutDataSize += pendingActions.rangesWithoutDataSize.size(); + } + auto remainingChunksToProcess = static_cast(_outstandingActions) + + static_cast(rangesToMerge) + static_cast(rangesWithoutDataSize); + + return BSON(kRemainingChunksToProcess << remainingChunksToProcess); + } + +private: + struct PendingActions { + std::vector rangesToMerge; + std::vector rangesWithoutDataSize; + }; + MergeAndMeasureChunksPhase( + const NamespaceString& nss, + const UUID& uuid, + const BSONObj& shardKey, + const int64_t smallChunkSizeThresholdBytes, + stdx::unordered_map&& pendingActionsByShards) + : _nss(nss), + _uuid(uuid), + _shardKey(shardKey), + _smallChunkSizeThresholdBytes(smallChunkSizeThresholdBytes), + _pendingActionsByShards(std::move(pendingActionsByShards)) {} + + void _abort(const DefragmentationPhaseEnum nextPhase) { + _aborted = true; + _nextPhase = nextPhase; + _pendingActionsByShards.clear(); + } + + const NamespaceString _nss; + const UUID _uuid; + const BSONObj _shardKey; + const int64_t _smallChunkSizeThresholdBytes; + stdx::unordered_map _pendingActionsByShards; + boost::optional _shardToProcess; + size_t _outstandingActions{0}; + bool _aborted{false}; + DefragmentationPhaseEnum _nextPhase{DefragmentationPhaseEnum::kMoveAndMergeChunks}; +}; + +class MoveAndMergeChunksPhase : public DefragmentationPhase { +public: + static std::unique_ptr build( + OperationContext* opCtx, + const CollectionType& coll, + std::vector&& collectionShardStats) { + auto collectionZones = getCollectionZones(opCtx, coll); + + stdx::unordered_map shardInfos; + for (const auto& shardStats : collectionShardStats) { + shardInfos.emplace(shardStats.shardId, + ShardInfo(shardStats.currSizeBytes, shardStats.isDraining)); + } + + auto collectionChunks = getCollectionChunks(opCtx, coll); + const auto maxChunkSizeBytes = getCollectionMaxChunkSizeBytes(opCtx, coll); + const uint64_t smallChunkSizeThresholdBytes = + (maxChunkSizeBytes / 100) * kSmallChunkSizeThresholdPctg; + + return std::unique_ptr( + new MoveAndMergeChunksPhase(coll.getNss(), + coll.getUuid(), + std::move(collectionChunks), + std::move(shardInfos), + std::move(collectionZones), + smallChunkSizeThresholdBytes, + maxChunkSizeBytes)); + } + + DefragmentationPhaseEnum getType() const override { + return DefragmentationPhaseEnum::kMoveAndMergeChunks; + } + + DefragmentationPhaseEnum getNextPhase() const override { + return _nextPhase; + } + + boost::optional popNextStreamableAction( + OperationContext* opCtx) override { + if (_actionableMerges.empty()) { + return boost::none; + } + + _outstandingMerges.push_back(std::move(_actionableMerges.front())); + _actionableMerges.pop_front(); + const auto& nextRequest = _outstandingMerges.back(); + auto version = getShardVersion(opCtx, nextRequest.getDestinationShard(), _nss); + return boost::optional( + nextRequest.asMergeInfo(_uuid, _nss, version.placementVersion())); + } + + boost::optional popNextMigration( + OperationContext* opCtx, stdx::unordered_set* availableShards) override { + for (const auto& shardId : _shardProcessingOrder) { + if (availableShards->count(shardId) == 0) { + // the shard is already busy in a migration + continue; + } + + ChunkRangeInfoIterator nextSmallChunk; + std::list candidateSiblings; + if (!_findNextSmallChunkInShard( + shardId, *availableShards, &nextSmallChunk, &candidateSiblings)) { + // there isn't a chunk in this shard that can currently be moved and merged with one + // of its siblings. + continue; + } + + // We have a chunk that can be moved&merged with at least one sibling. Choose one... + invariant(candidateSiblings.size() <= 2); + auto targetSibling = candidateSiblings.front(); + if (auto challenger = candidateSiblings.back(); targetSibling != challenger) { + auto targetScore = _rankMergeableSibling(*nextSmallChunk, *targetSibling); + auto challengerScore = _rankMergeableSibling(*nextSmallChunk, *challenger); + if (challengerScore > targetScore || + (challengerScore == targetScore && + _shardInfos.at(challenger->shard).currentSizeBytes < + _shardInfos.at(targetSibling->shard).currentSizeBytes)) { + targetSibling = challenger; + } + } + + // ... then build up the migration request, marking the needed resources as busy. + nextSmallChunk->busyInOperation = true; + targetSibling->busyInOperation = true; + availableShards->erase(nextSmallChunk->shard); + availableShards->erase(targetSibling->shard); + auto smallChunkVersion = getShardVersion(opCtx, nextSmallChunk->shard, _nss); + _outstandingMigrations.emplace_back(nextSmallChunk, targetSibling); + return _outstandingMigrations.back().asMigrateInfo( + _uuid, _nss, smallChunkVersion.placementVersion(), _maxChunkSizeBytes); + } + + return boost::none; + } + + void applyActionResult(OperationContext* opCtx, + const BalancerStreamAction& action, + const BalancerStreamActionResponse& response) override { + stdx::visit( + OverloadedVisitor{ + [&](const MigrateInfo& migrationAction) { + auto& migrationResponse = stdx::get(response); + auto match = + std::find_if(_outstandingMigrations.begin(), + _outstandingMigrations.end(), + [&migrationAction](const MoveAndMergeRequest& request) { + return (migrationAction.minKey.woCompare( + request.getMigrationMinKey()) == 0); + }); + invariant(match != _outstandingMigrations.end()); + MoveAndMergeRequest moveRequest(std::move(*match)); + _outstandingMigrations.erase(match); + + if (_aborted) { + return; + } + + if (migrationResponse.isOK()) { + Grid::get(opCtx) + ->catalogCache() + ->invalidateShardOrEntireCollectionEntryForShardedCollection( + _nss, boost::none, moveRequest.getDestinationShard()); + + auto transferredAmount = moveRequest.getMovedDataSizeBytes(); + invariant(transferredAmount <= _smallChunkSizeThresholdBytes); + _shardInfos.at(moveRequest.getSourceShard()).currentSizeBytes -= + transferredAmount; + _shardInfos.at(moveRequest.getDestinationShard()).currentSizeBytes += + transferredAmount; + _shardProcessingOrder.sort([this](const ShardId& lhs, const ShardId& rhs) { + return _shardInfos.at(lhs).currentSizeBytes > + _shardInfos.at(rhs).currentSizeBytes; + }); + _actionableMerges.push_back(std::move(moveRequest)); + return; + } + + LOGV2_DEBUG(6290000, + 1, + "Migration failed during collection defragmentation", + logAttrs(_nss), + "uuid"_attr = _uuid, + "currentPhase"_attr = getType(), + "error"_attr = redact(migrationResponse)); + + moveRequest.chunkToMove->busyInOperation = false; + moveRequest.chunkToMergeWith->busyInOperation = false; + + if (migrationResponse.code() == ErrorCodes::ChunkTooBig || + migrationResponse.code() == ErrorCodes::ExceededMemoryLimit) { + // Never try moving this chunk again, it isn't actually small + _removeIteratorFromSmallChunks(moveRequest.chunkToMove, + moveRequest.chunkToMove->shard); + return; + } + + if (isRetriableForDefragmentation(migrationResponse)) { + // The migration will be eventually retried + return; + } + + const auto exceededTimeLimit = [&] { + // All errors thrown by the migration destination shard are converted + // into OperationFailed. Thus we need to inspect the error message to + // match the real error code. + + // TODO SERVER-62990 introduce and propagate specific error code for + // migration failed due to range deletion pending + return migrationResponse == ErrorCodes::OperationFailed && + migrationResponse.reason().find(ErrorCodes::errorString( + ErrorCodes::ExceededTimeLimit)) != std::string::npos; + }; + + if (exceededTimeLimit()) { + // The migration failed because there is still a range deletion + // pending on the recipient. + moveRequest.chunkToMove->shardsToAvoid.emplace( + moveRequest.getDestinationShard()); + return; + } + + LOGV2_ERROR(6290001, + "Encountered non-retriable error on migration during " + "collection defragmentation", + logAttrs(_nss), + "uuid"_attr = _uuid, + "currentPhase"_attr = getType(), + "error"_attr = redact(migrationResponse)); + _abort(DefragmentationPhaseEnum::kMergeAndMeasureChunks); + }, + [&](const MergeInfo& mergeAction) { + auto& mergeResponse = stdx::get(response); + auto match = std::find_if(_outstandingMerges.begin(), + _outstandingMerges.end(), + [&mergeAction](const MoveAndMergeRequest& request) { + return mergeAction.chunkRange.containsKey( + request.getMigrationMinKey()); + }); + invariant(match != _outstandingMerges.end()); + MoveAndMergeRequest mergeRequest(std::move(*match)); + _outstandingMerges.erase(match); + + auto onSuccess = [&] { + // The sequence is complete; update the state of the merged chunk... + auto& mergedChunk = mergeRequest.chunkToMergeWith; + + Grid::get(opCtx) + ->catalogCache() + ->invalidateShardOrEntireCollectionEntryForShardedCollection( + _nss, boost::none, mergedChunk->shard); + + auto& chunkToDelete = mergeRequest.chunkToMove; + mergedChunk->range = mergeRequest.asMergedRange(); + if (mergedChunk->estimatedSizeBytes != kBigChunkMarker && + chunkToDelete->estimatedSizeBytes != kBigChunkMarker) { + mergedChunk->estimatedSizeBytes += chunkToDelete->estimatedSizeBytes; + } else { + mergedChunk->estimatedSizeBytes = kBigChunkMarker; + } + + mergedChunk->busyInOperation = false; + auto deletedChunkShard = chunkToDelete->shard; + // the lookup data structures... + _removeIteratorFromSmallChunks(chunkToDelete, deletedChunkShard); + if (mergedChunk->estimatedSizeBytes > _smallChunkSizeThresholdBytes) { + _removeIteratorFromSmallChunks(mergedChunk, mergedChunk->shard); + } else { + // Keep the list of small chunk iterators in the recipient sorted + auto match = _smallChunksByShard.find(mergedChunk->shard); + if (match != _smallChunksByShard.end()) { + auto& [_, smallChunksInRecipient] = *match; + smallChunksInRecipient.sort(compareChunkRangeInfoIterators); + } + } + //... and the collection + _collectionChunks.erase(chunkToDelete); + }; + + auto onRetriableError = [&] { + _actionableMerges.push_back(std::move(mergeRequest)); + }; + + auto onNonRetriableError = [&]() { + _abort(DefragmentationPhaseEnum::kMergeAndMeasureChunks); + }; + + if (!_aborted) { + handleActionResult(opCtx, + _nss, + _uuid, + getType(), + mergeResponse, + onSuccess, + onRetriableError, + onNonRetriableError); + } + }, + [](const DataSizeInfo& dataSizeAction) { + uasserted(ErrorCodes::BadValue, "Unexpected action type"); + }, + [](const MergeAllChunksOnShardInfo& _) { + uasserted(ErrorCodes::BadValue, "Unexpected action type"); + }}, + action); + } + + bool isComplete() const override { + return _smallChunksByShard.empty() && _outstandingMigrations.empty() && + _actionableMerges.empty() && _outstandingMerges.empty(); + } + + void userAbort() override { + _abort(DefragmentationPhaseEnum::kFinished); + } + + BSONObj reportProgress() const override { + size_t numSmallChunks = 0; + for (const auto& [shardId, smallChunks] : _smallChunksByShard) { + numSmallChunks += smallChunks.size(); + } + return BSON(kRemainingChunksToProcess << static_cast(numSmallChunks)); + } + +private: + // Internal representation of the chunk metadata required to generate a MoveAndMergeRequest + struct ChunkRangeInfo { + ChunkRangeInfo(ChunkRange&& range, const ShardId& shard, long long estimatedSizeBytes) + : range(std::move(range)), + shard(shard), + estimatedSizeBytes(estimatedSizeBytes), + busyInOperation(false) {} + ChunkRange range; + const ShardId shard; + long long estimatedSizeBytes; + bool busyInOperation; + // Last time we failed to find a suitable destination shard due to temporary constraints + boost::optional lastFailedAttemptTime; + // Shards that still have a deletion pending for this range + stdx::unordered_set shardsToAvoid; + }; + + struct ShardInfo { + ShardInfo(uint64_t currentSizeBytes, bool draining) + : currentSizeBytes(currentSizeBytes), draining(draining) {} + + bool isDraining() const { + return draining; + } + + uint64_t currentSizeBytes; + const bool draining; + }; + + using ChunkRangeInfos = std::list; + using ChunkRangeInfoIterator = ChunkRangeInfos::iterator; + + static bool compareChunkRangeInfoIterators(const ChunkRangeInfoIterator& lhs, + const ChunkRangeInfoIterator& rhs) { + // Small chunks are ordered by decreasing order of estimatedSizeBytes + // except the ones that we failed to move due to temporary constraints that will be at the + // end of the list ordered by last attempt time + auto lhsLastFailureTime = lhs->lastFailedAttemptTime.value_or(Date_t::min()); + auto rhsLastFailureTime = rhs->lastFailedAttemptTime.value_or(Date_t::min()); + return std::tie(lhsLastFailureTime, lhs->estimatedSizeBytes) < + std::tie(rhsLastFailureTime, rhs->estimatedSizeBytes); + } + + // Helper class to generate the Migration and Merge actions required to join together the chunks + // specified in the constructor + struct MoveAndMergeRequest { + public: + MoveAndMergeRequest(const ChunkRangeInfoIterator& chunkToMove, + const ChunkRangeInfoIterator& chunkToMergeWith) + : chunkToMove(chunkToMove), + chunkToMergeWith(chunkToMergeWith), + _isChunkToMergeLeftSibling( + chunkToMergeWith->range.getMax().woCompare(chunkToMove->range.getMin()) == 0) {} + + MigrateInfo asMigrateInfo(const UUID& collUuid, + const NamespaceString& nss, + const ChunkVersion& version, + uint64_t maxChunkSizeBytes) const { + return MigrateInfo(chunkToMergeWith->shard, + chunkToMove->shard, + nss, + collUuid, + chunkToMove->range.getMin(), + chunkToMove->range.getMax(), + version, + ForceJumbo::kDoNotForce, + maxChunkSizeBytes); + } + + ChunkRange asMergedRange() const { + return ChunkRange(_isChunkToMergeLeftSibling ? chunkToMergeWith->range.getMin() + : chunkToMove->range.getMin(), + _isChunkToMergeLeftSibling ? chunkToMove->range.getMax() + : chunkToMergeWith->range.getMax()); + } + + MergeInfo asMergeInfo(const UUID& collUuid, + const NamespaceString& nss, + const ChunkVersion& version) const { + return MergeInfo(chunkToMergeWith->shard, nss, collUuid, version, asMergedRange()); + } + + const ShardId& getSourceShard() const { + return chunkToMove->shard; + } + + const ShardId& getDestinationShard() const { + return chunkToMergeWith->shard; + } + + const BSONObj& getMigrationMinKey() const { + return chunkToMove->range.getMin(); + } + + int64_t getMovedDataSizeBytes() const { + return chunkToMove->estimatedSizeBytes; + } + + ChunkRangeInfoIterator chunkToMove; + ChunkRangeInfoIterator chunkToMergeWith; + + private: + bool _isChunkToMergeLeftSibling; + }; + + const NamespaceString _nss; + + const UUID _uuid; + + // The collection routing table - expressed in ChunkRangeInfo + ChunkRangeInfos _collectionChunks; + + // List of indexes to elements in _collectionChunks that are eligible to be moved. + std::map> _smallChunksByShard; + + stdx::unordered_map _shardInfos; + + // Sorted list of shard IDs by decreasing current size (@see _shardInfos) + std::list _shardProcessingOrder; + + // Set of attributes representing the currently active move&merge sequences + std::list _outstandingMigrations; + std::list _actionableMerges; + std::list _outstandingMerges; + + ZoneInfo _zoneInfo; + + const int64_t _smallChunkSizeThresholdBytes; + + const uint64_t _maxChunkSizeBytes; + + bool _aborted{false}; + + DefragmentationPhaseEnum _nextPhase{DefragmentationPhaseEnum::kMergeChunks}; + + MoveAndMergeChunksPhase(const NamespaceString& nss, + const UUID& uuid, + std::vector&& collectionChunks, + stdx::unordered_map&& shardInfos, + ZoneInfo&& collectionZones, + uint64_t smallChunkSizeThresholdBytes, + uint64_t maxChunkSizeBytes) + : _nss(nss), + _uuid(uuid), + _collectionChunks(), + _smallChunksByShard(), + _shardInfos(std::move(shardInfos)), + _shardProcessingOrder(), + _outstandingMigrations(), + _actionableMerges(), + _outstandingMerges(), + _zoneInfo(std::move(collectionZones)), + _smallChunkSizeThresholdBytes(smallChunkSizeThresholdBytes), + _maxChunkSizeBytes(maxChunkSizeBytes) { + + // Load the collection routing table in a std::list to ease later manipulation + for (auto&& chunk : collectionChunks) { + if (!chunk.getEstimatedSizeBytes().has_value()) { + LOGV2_WARNING( + 6172701, + "Chunk with no estimated size detected while building MoveAndMergeChunksPhase", + logAttrs(_nss), + "uuid"_attr = _uuid, + "range"_attr = chunk.getRange()); + _abort(DefragmentationPhaseEnum::kMergeAndMeasureChunks); + return; + } + const uint64_t estimatedChunkSize = chunk.getEstimatedSizeBytes().value(); + _collectionChunks.emplace_back(chunk.getRange(), chunk.getShard(), estimatedChunkSize); + } + + // Compose the index of small chunks + for (auto chunkIt = _collectionChunks.begin(); chunkIt != _collectionChunks.end(); + ++chunkIt) { + if (chunkIt->estimatedSizeBytes <= _smallChunkSizeThresholdBytes) { + _smallChunksByShard[chunkIt->shard].emplace_back(chunkIt); + } + } + // Each small chunk within a shard must be sorted by increasing chunk size + for (auto& [_, smallChunksInShard] : _smallChunksByShard) { + smallChunksInShard.sort(compareChunkRangeInfoIterators); + } + + // Set the initial shard processing order + for (const auto& [shardId, _] : _shardInfos) { + _shardProcessingOrder.push_back(shardId); + } + _shardProcessingOrder.sort([this](const ShardId& lhs, const ShardId& rhs) { + return _shardInfos.at(lhs).currentSizeBytes > _shardInfos.at(rhs).currentSizeBytes; + }); + } + + void _abort(const DefragmentationPhaseEnum nextPhase) { + _aborted = true; + _nextPhase = nextPhase; + _actionableMerges.clear(); + _smallChunksByShard.clear(); + _shardProcessingOrder.clear(); + } + + // Returns the list of siblings that are eligible to be move&merged with the specified chunk, + // based on shard zones and data capacity. (It does NOT take into account whether chunks are + // currently involved in a move/merge operation). + std::list _getChunkSiblings( + const ChunkRangeInfoIterator& chunkIt) const { + std::list siblings; + auto canBeMoveAndMerged = [this](const ChunkRangeInfoIterator& chunkIt, + const ChunkRangeInfoIterator& siblingIt) { + auto onSameZone = _zoneInfo.getZoneForChunk(chunkIt->range) == + _zoneInfo.getZoneForChunk(siblingIt->range); + auto destinationAvailable = chunkIt->shard == siblingIt->shard || + !_shardInfos.at(siblingIt->shard).isDraining(); + return (onSameZone && destinationAvailable); + }; + + if (auto rightSibling = std::next(chunkIt); + rightSibling != _collectionChunks.end() && canBeMoveAndMerged(chunkIt, rightSibling)) { + siblings.push_back(rightSibling); + } + if (chunkIt != _collectionChunks.begin()) { + auto leftSibling = std::prev(chunkIt); + if (canBeMoveAndMerged(chunkIt, leftSibling)) { + siblings.push_back(leftSibling); + } + } + return siblings; + } + + // Computes whether there is a chunk in the specified shard that can be moved&merged with one or + // both of its siblings. Chunks/siblings that are currently being moved/merged are not eligible. + // + // The function also clears the internal state from elements that cannot be processed by the + // phase (chunks with no siblings, shards with no small chunks). + // + // Returns true on success (storing the related info in nextSmallChunk + smallChunkSiblings), + // false otherwise. + bool _findNextSmallChunkInShard(const ShardId& shard, + const stdx::unordered_set& availableShards, + ChunkRangeInfoIterator* nextSmallChunk, + std::list* smallChunkSiblings) { + auto matchingShardInfo = _smallChunksByShard.find(shard); + if (matchingShardInfo == _smallChunksByShard.end()) { + return false; + } + + smallChunkSiblings->clear(); + auto& smallChunksInShard = matchingShardInfo->second; + for (auto candidateIt = smallChunksInShard.begin(); + candidateIt != smallChunksInShard.end();) { + if ((*candidateIt)->busyInOperation) { + ++candidateIt; + continue; + } + auto candidateSiblings = _getChunkSiblings(*candidateIt); + if (candidateSiblings.empty()) { + // The current chunk cannot be processed by the algorithm - remove it. + candidateIt = smallChunksInShard.erase(candidateIt); + continue; + } + + size_t siblingsDiscardedDueToRangeDeletion = 0; + + for (const auto& sibling : candidateSiblings) { + if (sibling->busyInOperation || !availableShards.count(sibling->shard)) { + continue; + } + if ((*candidateIt)->shardsToAvoid.count(sibling->shard)) { + ++siblingsDiscardedDueToRangeDeletion; + continue; + } + smallChunkSiblings->push_back(sibling); + } + + if (!smallChunkSiblings->empty()) { + *nextSmallChunk = *candidateIt; + return true; + } + + + if (siblingsDiscardedDueToRangeDeletion == candidateSiblings.size()) { + // All the siblings have been discarded because an overlapping range deletion is + // still pending on the destination shard. + if (!(*candidateIt)->lastFailedAttemptTime) { + // This is the first time we discard this chunk due to overlapping range + // deletions pending. Enqueue it back on the list so we will try to move it + // again when we will have drained all the other chunks for this shard. + LOGV2_DEBUG(6290002, + 1, + "Postponing small chunk processing due to pending range deletion " + "on recipient shard(s)", + logAttrs(_nss), + "uuid"_attr = _uuid, + "range"_attr = (*candidateIt)->range, + "estimatedSizeBytes"_attr = (*candidateIt)->estimatedSizeBytes, + "numCandidateSiblings"_attr = candidateSiblings.size()); + (*candidateIt)->lastFailedAttemptTime = Date_t::now(); + (*candidateIt)->shardsToAvoid.clear(); + smallChunksInShard.emplace_back(*candidateIt); + } else { + LOGV2(6290003, + "Discarding small chunk due to pending range deletion on recipient shard", + logAttrs(_nss), + "uuid"_attr = _uuid, + "range"_attr = (*candidateIt)->range, + "estimatedSizeBytes"_attr = (*candidateIt)->estimatedSizeBytes, + "numCandidateSiblings"_attr = candidateSiblings.size(), + "lastFailedAttempt"_attr = (*candidateIt)->lastFailedAttemptTime); + } + candidateIt = smallChunksInShard.erase(candidateIt); + continue; + } + + ++candidateIt; + } + // No candidate could be found - clear the shard entry if needed + if (smallChunksInShard.empty()) { + _smallChunksByShard.erase(matchingShardInfo); + } + return false; + } + + uint32_t _rankMergeableSibling(const ChunkRangeInfo& chunkTobeMovedAndMerged, + const ChunkRangeInfo& mergeableSibling) { + static constexpr uint32_t kNoMoveRequired = 1 << 3; + static constexpr uint32_t kConvenientMove = 1 << 2; + static constexpr uint32_t kMergeSolvesTwoPendingChunks = 1 << 1; + static constexpr uint32_t kMergeSolvesOnePendingChunk = 1; + uint32_t ranking = 0; + if (chunkTobeMovedAndMerged.shard == mergeableSibling.shard) { + ranking += kNoMoveRequired; + } else if (chunkTobeMovedAndMerged.estimatedSizeBytes < + mergeableSibling.estimatedSizeBytes) { + ranking += kConvenientMove; + } + auto estimatedMergedSize = (chunkTobeMovedAndMerged.estimatedSizeBytes == kBigChunkMarker || + mergeableSibling.estimatedSizeBytes == kBigChunkMarker) + ? kBigChunkMarker + : chunkTobeMovedAndMerged.estimatedSizeBytes + mergeableSibling.estimatedSizeBytes; + if (estimatedMergedSize > _smallChunkSizeThresholdBytes) { + ranking += mergeableSibling.estimatedSizeBytes < _smallChunkSizeThresholdBytes + ? kMergeSolvesTwoPendingChunks + : kMergeSolvesOnePendingChunk; + } + + return ranking; + } + + void _removeIteratorFromSmallChunks(const ChunkRangeInfoIterator& chunkIt, + const ShardId& parentShard) { + auto matchingShardIt = _smallChunksByShard.find(parentShard); + if (matchingShardIt == _smallChunksByShard.end()) { + return; + } + auto& smallChunksInShard = matchingShardIt->second; + auto match = std::find(smallChunksInShard.begin(), smallChunksInShard.end(), chunkIt); + if (match == smallChunksInShard.end()) { + return; + } + smallChunksInShard.erase(match); + if (smallChunksInShard.empty()) { + _smallChunksByShard.erase(parentShard); + } + } +}; + +class MergeChunksPhase : public DefragmentationPhase { +public: + static std::unique_ptr build(OperationContext* opCtx, + const CollectionType& coll) { + auto collectionChunks = getCollectionChunks(opCtx, coll); + const auto collectionZones = getCollectionZones(opCtx, coll); + + // Find ranges of mergeable chunks + stdx::unordered_map> unmergedRangesByShard; + while (!collectionChunks.empty()) { + auto upperRangeBound = std::prev(collectionChunks.cend()); + auto lowerRangeBound = upperRangeBound; + while (lowerRangeBound != collectionChunks.cbegin() && + areMergeable(*std::prev(lowerRangeBound), *lowerRangeBound, collectionZones)) { + --lowerRangeBound; + } + if (lowerRangeBound != upperRangeBound) { + unmergedRangesByShard[upperRangeBound->getShard()].emplace_back( + lowerRangeBound->getMin(), upperRangeBound->getMax()); + } + + collectionChunks.erase(lowerRangeBound, std::next(upperRangeBound)); + } + return std::unique_ptr( + new MergeChunksPhase(coll.getNss(), coll.getUuid(), std::move(unmergedRangesByShard))); + } + + DefragmentationPhaseEnum getType() const override { + return DefragmentationPhaseEnum::kMergeChunks; + } + + DefragmentationPhaseEnum getNextPhase() const override { + return _nextPhase; + } + + boost::optional popNextStreamableAction( + OperationContext* opCtx) override { + if (_unmergedRangesByShard.empty()) { + return boost::none; + } + + auto it = _shardToProcess ? _unmergedRangesByShard.find(*_shardToProcess) + : _unmergedRangesByShard.begin(); + + invariant(it != _unmergedRangesByShard.end()); + + auto& [shardId, unmergedRanges] = *it; + invariant(!unmergedRanges.empty()); + auto shardVersion = getShardVersion(opCtx, shardId, _nss); + const auto& rangeToMerge = unmergedRanges.back(); + boost::optional nextAction = boost::optional( + MergeInfo(shardId, _nss, _uuid, shardVersion.placementVersion(), rangeToMerge)); + unmergedRanges.pop_back(); + ++_outstandingActions; + if (unmergedRanges.empty()) { + it = _unmergedRangesByShard.erase(it, std::next(it)); + } else { + ++it; + } + if (it != _unmergedRangesByShard.end()) { + _shardToProcess = it->first; + } else { + _shardToProcess = boost::none; + } + + return nextAction; + } + + boost::optional popNextMigration( + OperationContext* opCtx, stdx::unordered_set* availableShards) override { + return boost::none; + } + + void applyActionResult(OperationContext* opCtx, + const BalancerStreamAction& action, + const BalancerStreamActionResponse& response) override { + ScopeGuard scopedGuard([&] { --_outstandingActions; }); + if (_aborted) { + return; + } + stdx::visit( + OverloadedVisitor{[&](const MergeInfo& mergeAction) { + auto& mergeResponse = stdx::get(response); + auto onSuccess = [] { + }; + auto onRetriableError = [&] { + _unmergedRangesByShard[mergeAction.shardId].emplace_back( + mergeAction.chunkRange); + }; + auto onNonretriableError = [this] { + _abort(getType()); + }; + handleActionResult(opCtx, + _nss, + _uuid, + getType(), + mergeResponse, + onSuccess, + onRetriableError, + onNonretriableError); + }, + [](const DataSizeInfo& _) { + uasserted(ErrorCodes::BadValue, "Unexpected action type"); + }, + [](const MigrateInfo& _) { + uasserted(ErrorCodes::BadValue, "Unexpected action type"); + }, + [](const MergeAllChunksOnShardInfo& _) { + uasserted(ErrorCodes::BadValue, "Unexpected action type"); + }}, + action); + } + + bool isComplete() const override { + return _unmergedRangesByShard.empty() && _outstandingActions == 0; + } + + void userAbort() override { + _abort(DefragmentationPhaseEnum::kFinished); + } + + BSONObj reportProgress() const override { + size_t rangesToMerge = 0; + for (const auto& [_, unmergedRanges] : _unmergedRangesByShard) { + rangesToMerge += unmergedRanges.size(); + } + auto remainingRangesToProcess = + static_cast(_outstandingActions) + static_cast(rangesToMerge); + + return BSON(kRemainingChunksToProcess << remainingRangesToProcess); + } + +private: + MergeChunksPhase(const NamespaceString& nss, + const UUID& uuid, + stdx::unordered_map>&& unmergedRangesByShard) + : _nss(nss), _uuid(uuid), _unmergedRangesByShard(std::move(unmergedRangesByShard)) {} + + void _abort(const DefragmentationPhaseEnum nextPhase) { + _aborted = true; + _nextPhase = nextPhase; + _unmergedRangesByShard.clear(); + } + + const NamespaceString _nss; + const UUID _uuid; + stdx::unordered_map> _unmergedRangesByShard; + boost::optional _shardToProcess; + size_t _outstandingActions{0}; + bool _aborted{false}; + DefragmentationPhaseEnum _nextPhase{DefragmentationPhaseEnum::kFinished}; +}; + +} // namespace + +void BalancerDefragmentationPolicy::startCollectionDefragmentations(OperationContext* opCtx) { + stdx::lock_guard lk(_stateMutex); + + // Fetch all collections with `defragmentCollection` flag enabled + static const auto query = BSON(CollectionType::kDefragmentCollectionFieldName << true); + const auto& configShard = ShardingCatalogManager::get(opCtx)->localConfigShard(); + const auto& collDocs = uassertStatusOK(configShard->exhaustiveFindOnConfig( + opCtx, + ReadPreferenceSetting(ReadPreference::Nearest), + repl::ReadConcernLevel::kMajorityReadConcern, + NamespaceString::kConfigsvrCollectionsNamespace, + query, + BSONObj(), + boost::none)) + .docs; + + for (const BSONObj& obj : collDocs) { + const CollectionType coll{obj}; + if (_defragmentationStates.contains(coll.getUuid())) { + continue; + } + _initializeCollectionState(lk, opCtx, coll); + } + _onStateUpdated(); +} + +void BalancerDefragmentationPolicy::abortCollectionDefragmentation(OperationContext* opCtx, + const NamespaceString& nss) { + stdx::lock_guard lk(_stateMutex); + auto coll = + ShardingCatalogManager::get(opCtx)->localCatalogClient()->getCollection(opCtx, nss, {}); + if (coll.getDefragmentCollection()) { + if (_defragmentationStates.contains(coll.getUuid())) { + // Notify phase to abort current phase + _defragmentationStates.at(coll.getUuid())->userAbort(); + _onStateUpdated(); + } + _persistPhaseUpdate(opCtx, DefragmentationPhaseEnum::kFinished, coll.getUuid()); + } +} + +void BalancerDefragmentationPolicy::interruptAllDefragmentations() { + stdx::lock_guard lk(_stateMutex); + _defragmentationStates.clear(); +} + +bool BalancerDefragmentationPolicy::isDefragmentingCollection(const UUID& uuid) { + stdx::lock_guard lk(_stateMutex); + return _defragmentationStates.contains(uuid); +} + +BSONObj BalancerDefragmentationPolicy::reportProgressOn(const UUID& uuid) { + stdx::lock_guard lk(_stateMutex); + auto match = _defragmentationStates.find(uuid); + if (match == _defragmentationStates.end() || !match->second) { + return BSON(kCurrentPhase << kNoPhase); + } + const auto& collDefragmentationPhase = match->second; + return BSON( + kCurrentPhase << DefragmentationPhase_serializer(collDefragmentationPhase->getType()) + << kProgress << collDefragmentationPhase->reportProgress()); +} + +MigrateInfoVector BalancerDefragmentationPolicy::selectChunksToMove( + OperationContext* opCtx, stdx::unordered_set* availableShards) { + + MigrateInfoVector chunksToMove; + { + stdx::lock_guard lk(_stateMutex); + + std::vector collectionUUIDs; + collectionUUIDs.reserve(_defragmentationStates.size()); + for (const auto& defragState : _defragmentationStates) { + collectionUUIDs.push_back(defragState.first); + } + + auto client = opCtx->getClient(); + std::shuffle(collectionUUIDs.begin(), collectionUUIDs.end(), client->getPrng().urbg()); + + auto popCollectionUUID = + [&](std::vector::iterator elemIt) -> std::vector::iterator { + if (std::next(elemIt) == collectionUUIDs.end()) { + return collectionUUIDs.erase(elemIt); + } + + *elemIt = std::move(collectionUUIDs.back()); + collectionUUIDs.pop_back(); + return elemIt; + }; + + while (!collectionUUIDs.empty()) { + for (auto it = collectionUUIDs.begin(); it != collectionUUIDs.end();) { + const auto& collUUID = *it; + + if (availableShards->size() == 0) { + return chunksToMove; + } + + try { + auto defragStateIt = _defragmentationStates.find(collUUID); + if (defragStateIt == _defragmentationStates.end()) { + it = popCollectionUUID(it); + continue; + }; + + auto& collDefragmentationPhase = defragStateIt->second; + if (!collDefragmentationPhase) { + _defragmentationStates.erase(defragStateIt); + it = popCollectionUUID(it); + continue; + } + auto actionableMigration = + collDefragmentationPhase->popNextMigration(opCtx, availableShards); + if (!actionableMigration.has_value()) { + it = popCollectionUUID(it); + continue; + } + chunksToMove.push_back(std::move(*actionableMigration)); + ++it; + } catch (DBException& e) { + // Catch getCollection and getShardVersion errors. Should only occur if + // collection has been removed. + LOGV2_ERROR(6172700, + "Error while getting next migration", + "uuid"_attr = collUUID, + "error"_attr = redact(e)); + _defragmentationStates.erase(collUUID); + it = popCollectionUUID(it); + } + } + } + } + + if (chunksToMove.empty()) { + // If the policy cannot produce new migrations even in absence of temporary constraints, it + // is possible that some streaming actions must be processed first. Notify an update of the + // internal state to make it happen. + _onStateUpdated(); + } + return chunksToMove; +} + +StringData BalancerDefragmentationPolicy::getName() const { + return StringData(kPolicyName); +} + +boost::optional BalancerDefragmentationPolicy::getNextStreamingAction( + OperationContext* opCtx) { + stdx::lock_guard lk(_stateMutex); + // Visit the defrag state in round robin fashion starting from a random one + auto stateIt = [&] { + auto it = _defragmentationStates.begin(); + if (_defragmentationStates.size() > 1) { + auto client = opCtx->getClient(); + std::advance(it, client->getPrng().nextInt32(_defragmentationStates.size())); + } + return it; + }(); + + for (auto stateToVisit = _defragmentationStates.size(); stateToVisit != 0; --stateToVisit) { + try { + _advanceToNextActionablePhase(opCtx, stateIt->first); + auto& currentCollectionDefragmentationState = stateIt->second; + if (currentCollectionDefragmentationState) { + // Get next action + auto nextAction = + currentCollectionDefragmentationState->popNextStreamableAction(opCtx); + if (nextAction) { + return nextAction; + } + ++stateIt; + } else { + stateIt = _defragmentationStates.erase(stateIt, std::next(stateIt)); + } + } catch (DBException& e) { + // Catch getCollection and getShardVersion errors. Should only occur if collection has + // been removed. + LOGV2_ERROR(6153301, + "Error while getting next defragmentation action", + "uuid"_attr = stateIt->first, + "error"_attr = redact(e)); + stateIt = _defragmentationStates.erase(stateIt, std::next(stateIt)); + } + + if (stateIt == _defragmentationStates.end()) { + stateIt = _defragmentationStates.begin(); + } + } + + return boost::none; +} + +bool BalancerDefragmentationPolicy::_advanceToNextActionablePhase(OperationContext* opCtx, + const UUID& collUuid) { + auto& currentPhase = _defragmentationStates.at(collUuid); + auto phaseTransitionNeeded = [¤tPhase] { + return currentPhase && currentPhase->isComplete() && + MONGO_likely(!skipDefragmentationPhaseTransition.shouldFail()); + }; + bool advanced = false; + boost::optional coll(boost::none); + while (phaseTransitionNeeded()) { + if (!coll) { + coll = ShardingCatalogManager::get(opCtx)->localCatalogClient()->getCollection( + opCtx, collUuid); + } + currentPhase = _transitionPhases(opCtx, *coll, currentPhase->getNextPhase()); + advanced = true; + } + return advanced; +} + +void BalancerDefragmentationPolicy::applyActionResult( + OperationContext* opCtx, + const BalancerStreamAction& action, + const BalancerStreamActionResponse& response) { + { + stdx::lock_guard lk(_stateMutex); + DefragmentationPhase* targetState = nullptr; + stdx::visit( + OverloadedVisitor{[&](const MergeInfo& act) { + if (_defragmentationStates.contains(act.uuid)) { + targetState = _defragmentationStates.at(act.uuid).get(); + } + }, + [&](const DataSizeInfo& act) { + if (_defragmentationStates.contains(act.uuid)) { + targetState = _defragmentationStates.at(act.uuid).get(); + } + }, + [&](const MigrateInfo& act) { + if (_defragmentationStates.contains(act.uuid)) { + targetState = _defragmentationStates.at(act.uuid).get(); + } + }, + [](const MergeAllChunksOnShardInfo& _) { + uasserted(ErrorCodes::BadValue, "Unexpected action type"); + }}, + action); + + if (targetState) { + targetState->applyActionResult(opCtx, action, response); + } + } + _onStateUpdated(); +} + +std::unique_ptr BalancerDefragmentationPolicy::_transitionPhases( + OperationContext* opCtx, + const CollectionType& coll, + DefragmentationPhaseEnum nextPhase, + bool shouldPersistPhase) { + std::unique_ptr nextPhaseObject(nullptr); + + try { + if (shouldPersistPhase) { + _persistPhaseUpdate(opCtx, nextPhase, coll.getUuid()); + } + switch (nextPhase) { + case DefragmentationPhaseEnum::kMergeAndMeasureChunks: + nextPhaseObject = MergeAndMeasureChunksPhase::build(opCtx, coll); + break; + case DefragmentationPhaseEnum::kMoveAndMergeChunks: { + auto collectionShardStats = + uassertStatusOK(_clusterStats->getCollStats(opCtx, coll.getNss())); + nextPhaseObject = + MoveAndMergeChunksPhase::build(opCtx, coll, std::move(collectionShardStats)); + } break; + case DefragmentationPhaseEnum::kMergeChunks: + nextPhaseObject = MergeChunksPhase::build(opCtx, coll); + break; + case DefragmentationPhaseEnum::kFinished: + default: // Exit defragmentation in case of unexpected phase + _clearDefragmentationState(opCtx, coll.getUuid()); + break; + } + afterBuildingNextDefragmentationPhase.pauseWhileSet(); + LOGV2(6172702, + "Collection defragmentation transitioned to new phase", + logAttrs(coll.getNss()), + "phase"_attr = nextPhaseObject + ? DefragmentationPhase_serializer(nextPhaseObject->getType()) + : kNoPhase, + "details"_attr = nextPhaseObject ? nextPhaseObject->reportProgress() : BSONObj()); + } catch (const DBException& e) { + LOGV2_ERROR(6153101, + "Error while building defragmentation phase on collection", + logAttrs(coll.getNss()), + "uuid"_attr = coll.getUuid(), + "phase"_attr = nextPhase, + "error"_attr = e); + } + return nextPhaseObject; +} + +void BalancerDefragmentationPolicy::_initializeCollectionState(WithLock, + OperationContext* opCtx, + const CollectionType& coll) { + if (MONGO_unlikely(skipDefragmentationPhaseTransition.shouldFail())) { + return; + } + auto phaseToBuild = coll.getDefragmentationPhase() + ? coll.getDefragmentationPhase().value() + : DefragmentationPhaseEnum::kMergeAndMeasureChunks; + auto collectionPhase = + _transitionPhases(opCtx, coll, phaseToBuild, !coll.getDefragmentationPhase().has_value()); + while (collectionPhase && collectionPhase->isComplete() && + MONGO_likely(!skipDefragmentationPhaseTransition.shouldFail())) { + collectionPhase = _transitionPhases(opCtx, coll, collectionPhase->getNextPhase()); + } + if (collectionPhase) { + auto [_, inserted] = + _defragmentationStates.insert_or_assign(coll.getUuid(), std::move(collectionPhase)); + dassert(inserted); + } +} + +void BalancerDefragmentationPolicy::_persistPhaseUpdate(OperationContext* opCtx, + DefragmentationPhaseEnum phase, + const UUID& uuid) { + DBDirectClient dbClient(opCtx); + write_ops::UpdateCommandRequest updateOp(CollectionType::ConfigNS); + updateOp.setUpdates({[&] { + write_ops::UpdateOpEntry entry; + entry.setQ(BSON(CollectionType::kUuidFieldName << uuid)); + entry.setU(write_ops::UpdateModification::parseFromClassicUpdate( + BSON("$set" << BSON(CollectionType::kDefragmentationPhaseFieldName + << DefragmentationPhase_serializer(phase))))); + return entry; + }()}); + auto response = write_ops::checkWriteErrors(dbClient.update(updateOp)); + uassert(ErrorCodes::NoMatchingDocument, + "Collection {} not found while persisting phase change"_format(uuid.toString()), + response.getN() > 0); + WriteConcernResult ignoreResult; + const auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); + uassertStatusOK(waitForWriteConcern( + opCtx, latestOpTime, WriteConcerns::kMajorityWriteConcernShardingTimeout, &ignoreResult)); +} + +void BalancerDefragmentationPolicy::_clearDefragmentationState(OperationContext* opCtx, + const UUID& uuid) { + DBDirectClient dbClient(opCtx); + + // Clear datasize estimates from chunks + write_ops::checkWriteErrors(dbClient.update(write_ops::UpdateCommandRequest( + ChunkType::ConfigNS, {[&] { + write_ops::UpdateOpEntry entry; + entry.setQ(BSON(CollectionType::kUuidFieldName << uuid)); + entry.setU(write_ops::UpdateModification::parseFromClassicUpdate( + BSON("$unset" << BSON(ChunkType::estimatedSizeBytes.name() << "")))); + entry.setMulti(true); + return entry; + }()}))); + + // Clear defragmentation phase and defragmenting flag from collection + write_ops::checkWriteErrors(dbClient.update(write_ops::UpdateCommandRequest( + CollectionType::ConfigNS, {[&] { + write_ops::UpdateOpEntry entry; + entry.setQ(BSON(CollectionType::kUuidFieldName << uuid)); + entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(BSON( + "$unset" << BSON(CollectionType::kDefragmentCollectionFieldName + << "" << CollectionType::kDefragmentationPhaseFieldName << "")))); + return entry; + }()}))); + + WriteConcernResult ignoreResult; + const auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); + uassertStatusOK(waitForWriteConcern( + opCtx, latestOpTime, WriteConcerns::kMajorityWriteConcernShardingTimeout, &ignoreResult)); +} + +} // namespace mongo diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy.h b/src/mongo/db/s/balancer/balancer_defragmentation_policy.h index 0ccdf2f929ac3..d85dab9140927 100644 --- a/src/mongo/db/s/balancer/balancer_defragmentation_policy.h +++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy.h @@ -29,55 +29,170 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/s/balancer/actions_stream_policy.h" +#include "mongo/db/s/balancer/balancer_policy.h" +#include "mongo/db/s/balancer/cluster_statistics.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/shard_id.h" +#include "mongo/platform/mutex.h" #include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/uuid.h" namespace mongo { + /** - * Helper class that + * Interface describing the interactions that the defragmentation policy can establish with the + * phase of the algorithm that is currently active on a collection. + * With the exception getType(), its methods do not guarantee thread safety. + */ +class DefragmentationPhase { +public: + virtual ~DefragmentationPhase() {} + + virtual DefragmentationPhaseEnum getType() const = 0; + + virtual DefragmentationPhaseEnum getNextPhase() const = 0; + + virtual boost::optional popNextStreamableAction( + OperationContext* opCtx) = 0; + + virtual boost::optional popNextMigration( + OperationContext* opCtx, stdx::unordered_set* availableShards) = 0; + + virtual void applyActionResult(OperationContext* opCtx, + const BalancerStreamAction& action, + const BalancerStreamActionResponse& response) = 0; + + virtual BSONObj reportProgress() const = 0; + + virtual bool isComplete() const = 0; + + virtual void userAbort() = 0; + +protected: + static constexpr uint64_t kSmallChunkSizeThresholdPctg = 25; +}; + +/** + * Helper class that: * - stores the progress of the defragmentation algorithm on each collection * - generates a single sequence of action descriptors to fairly execute the defragmentation - * algorithm across collections. + * algorithm across collections */ class BalancerDefragmentationPolicy : public ActionsStreamPolicy { + BalancerDefragmentationPolicy(const BalancerDefragmentationPolicy&) = delete; + BalancerDefragmentationPolicy& operator=(const BalancerDefragmentationPolicy&) = delete; public: - virtual ~BalancerDefragmentationPolicy() {} + BalancerDefragmentationPolicy(ClusterStatistics* clusterStats, + const std::function& onStateUpdated) + : _clusterStats(clusterStats), _onStateUpdated(onStateUpdated) {} + + ~BalancerDefragmentationPolicy() {} + + StringData getName() const override; + + boost::optional getNextStreamingAction(OperationContext* opCtx) override; + + void applyActionResult(OperationContext* opCtx, + const BalancerStreamAction& action, + const BalancerStreamActionResponse& response) override; /** * Requests the execution of the defragmentation algorithm on the required collections. */ - virtual void startCollectionDefragmentations(OperationContext* opCtx) = 0; + void startCollectionDefragmentations(OperationContext* opCtx); /** - * Checks if the collection is currently being defragmented, and signals the defragmentation - * to end if so. + * Checks if the collection is currently being defragmented, and signals the defragmentation to + * end if so. */ - virtual void abortCollectionDefragmentation(OperationContext* opCtx, - const NamespaceString& nss) = 0; + void abortCollectionDefragmentation(OperationContext* opCtx, const NamespaceString& nss); /** * Requests to stop the emission of any new defragmentation action request. Does not alter the - * persisted state of the affected collections. startCollectionDefragmentation() can be invoked + * persisted state of the affected collections. `startCollectionDefragmentation` can be invoked * on a later stage to resume the defragmentation on each item. */ - virtual void interruptAllDefragmentations() = 0; + void interruptAllDefragmentations(); /** - * Returns true if the specified collection is currently being defragmented. + * Returns `true` if the specified collection is currently being defragmented. */ - virtual bool isDefragmentingCollection(const UUID& uuid) = 0; - - virtual BSONObj reportProgressOn(const UUID& uuid) = 0; + bool isDefragmentingCollection(const UUID& uuid); + BSONObj reportProgressOn(const UUID& uuid); /** * Pulls the next batch of actionable chunk migration requests, given the current internal state * and the passed in list of available shards. * Every chunk migration request is then expected to be acknowledged by the balancer by issuing - * a call to applyActionResult() (declared in ActionsStreamPolicy) + * a call to `applyActionResult` (declared in `ActionsStreamPolicy`). */ - virtual MigrateInfoVector selectChunksToMove(OperationContext* opCtx, - stdx::unordered_set* availableShards) = 0; + MigrateInfoVector selectChunksToMove(OperationContext* opCtx, + stdx::unordered_set* availableShards); + +private: + /** + * Advances the defragmentation state of the specified collection to the next actionable phase + * (or sets the related DefragmentationPhase object to nullptr if nothing more can be done). + */ + bool _advanceToNextActionablePhase(OperationContext* opCtx, const UUID& collUuid); + + /** + * Move to the next phase and persist the phase change. This will end defragmentation if the + * next phase is kFinished. + * Must be called while holding the _stateMutex. + */ + std::unique_ptr _transitionPhases(OperationContext* opCtx, + const CollectionType& coll, + DefragmentationPhaseEnum nextPhase, + bool shouldPersistPhase = true); + + /** + * Builds the defragmentation phase object matching the current state of the passed + * collection and sets it into _defragmentationStates. + */ + void _initializeCollectionState(WithLock, OperationContext* opCtx, const CollectionType& coll); + + /** + * Write the new phase to the defragmentationPhase field in config.collections. If phase is + * kFinished, the field will be removed. + * Must be called while holding the _stateMutex. + */ + void _persistPhaseUpdate(OperationContext* opCtx, + DefragmentationPhaseEnum phase, + const UUID& uuid); + + /** + * Remove all datasize fields from config.chunks for the given namespace. + * Must be called while holding the _stateMutex. + */ + void _clearDefragmentationState(OperationContext* opCtx, const UUID& uuid); + + const std::string kPolicyName{"BalancerDefragmentationPolicy"}; + + Mutex _stateMutex = MONGO_MAKE_LATCH("BalancerChunkMergerImpl::_stateMutex"); + + ClusterStatistics* const _clusterStats; + + const std::function _onStateUpdated; + + stdx::unordered_map, UUID::Hash> + _defragmentationStates; }; + } // namespace mongo diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp deleted file mode 100644 index 93f3fea7d23c4..0000000000000 --- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.cpp +++ /dev/null @@ -1,1573 +0,0 @@ -/** - * Copyright (C) 2021-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/balancer/balancer_defragmentation_policy_impl.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/persistent_task_store.h" -#include "mongo/db/s/balancer/cluster_statistics.h" -#include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/logv2/log.h" -#include "mongo/s/balancer_configuration.h" -#include "mongo/s/catalog/type_chunk.h" -#include "mongo/s/grid.h" -#include "mongo/s/request_types/move_range_request_gen.h" -#include "mongo/s/sharding_feature_flags_gen.h" - -#include -#include - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding - - -using namespace fmt::literals; - -namespace mongo { - -namespace { - -MONGO_FAIL_POINT_DEFINE(skipDefragmentationPhaseTransition); -MONGO_FAIL_POINT_DEFINE(afterBuildingNextDefragmentationPhase); - -using ShardStatistics = ClusterStatistics::ShardStatistics; - -const std::string kCurrentPhase("currentPhase"); -const std::string kProgress("progress"); -const std::string kNoPhase("none"); -const std::string kRemainingChunksToProcess("remainingChunksToProcess"); - -static constexpr int64_t kBigChunkMarker = std::numeric_limits::max(); - -ShardVersion getShardVersion(OperationContext* opCtx, - const ShardId& shardId, - const NamespaceString& nss) { - auto cri = Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfo(opCtx, nss); - return cri.getShardVersion(shardId); -} - -std::vector getCollectionChunks(OperationContext* opCtx, const CollectionType& coll) { - auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); - return uassertStatusOK( - catalogClient->getChunks(opCtx, - BSON(ChunkType::collectionUUID() << coll.getUuid()) /*query*/, - BSON(ChunkType::min() << 1) /*sort*/, - boost::none /*limit*/, - nullptr /*opTime*/, - coll.getEpoch(), - coll.getTimestamp(), - repl::ReadConcernLevel::kLocalReadConcern, - boost::none)); -} - -uint64_t getCollectionMaxChunkSizeBytes(OperationContext* opCtx, const CollectionType& coll) { - const auto balancerConfig = Grid::get(opCtx)->getBalancerConfiguration(); - uassertStatusOK(balancerConfig->refreshAndCheck(opCtx)); - return coll.getMaxChunkSizeBytes().value_or(balancerConfig->getMaxChunkSizeBytes()); -} - -ZoneInfo getCollectionZones(OperationContext* opCtx, const CollectionType& coll) { - auto zones = uassertStatusOK( - ZoneInfo::getZonesForCollection(opCtx, coll.getNss(), coll.getKeyPattern())); - return zones; -} - -bool isRetriableForDefragmentation(const Status& status) { - if (ErrorCodes::isA(status)) - return true; - - if (status == ErrorCodes::StaleConfig) { - if (auto staleInfo = status.extraInfo()) { - // If the staleInfo error contains a "wanted" version, this means the donor shard which - // returned this error has its versioning information up-to-date (as opposed to UNKNOWN) - // and it couldn't find the chunk that the defragmenter expected. Such a situation can - // only arise as a result of manual split/merge/move concurrently with the defragmenter. - return !staleInfo->getVersionWanted(); - } - } - - return false; -} - -void handleActionResult(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const DefragmentationPhaseEnum currentPhase, - const Status& status, - std::function onSuccess, - std::function onRetriableError, - std::function onNonRetriableError) { - if (status.isOK()) { - onSuccess(); - return; - } - - if (status == ErrorCodes::StaleConfig) { - if (auto staleInfo = status.extraInfo()) { - Grid::get(opCtx) - ->catalogCache() - ->invalidateShardOrEntireCollectionEntryForShardedCollection( - nss, staleInfo->getVersionWanted(), staleInfo->getShardId()); - } - } - - if (isRetriableForDefragmentation(status)) { - LOGV2_DEBUG(6261701, - 1, - "Hit retriable error while defragmenting collection", - logAttrs(nss), - "uuid"_attr = uuid, - "currentPhase"_attr = currentPhase, - "error"_attr = redact(status)); - onRetriableError(); - } else { - LOGV2_ERROR(6258601, - "Defragmentation for collection hit non-retriable error", - logAttrs(nss), - "uuid"_attr = uuid, - "currentPhase"_attr = currentPhase, - "error"_attr = redact(status)); - onNonRetriableError(); - } -} - -bool areMergeable(const ChunkType& firstChunk, - const ChunkType& secondChunk, - const ZoneInfo& collectionZones) { - return firstChunk.getShard() == secondChunk.getShard() && - collectionZones.getZoneForChunk(firstChunk.getRange()) == - collectionZones.getZoneForChunk(secondChunk.getRange()) && - SimpleBSONObjComparator::kInstance.evaluate(firstChunk.getMax() == secondChunk.getMin()); -} - -class MergeAndMeasureChunksPhase : public DefragmentationPhase { -public: - static std::unique_ptr build(OperationContext* opCtx, - const CollectionType& coll) { - auto collectionChunks = getCollectionChunks(opCtx, coll); - const auto collectionZones = getCollectionZones(opCtx, coll); - - // Calculate small chunk threshold to limit dataSize commands - const auto maxChunkSizeBytes = getCollectionMaxChunkSizeBytes(opCtx, coll); - const int64_t smallChunkSizeThreshold = - (maxChunkSizeBytes / 100) * kSmallChunkSizeThresholdPctg; - - stdx::unordered_map pendingActionsByShards; - // Find ranges of chunks; for single-chunk ranges, request DataSize; for multi-range, issue - // merge - while (!collectionChunks.empty()) { - auto upperRangeBound = std::prev(collectionChunks.cend()); - auto lowerRangeBound = upperRangeBound; - while (lowerRangeBound != collectionChunks.cbegin() && - areMergeable(*std::prev(lowerRangeBound), *lowerRangeBound, collectionZones)) { - --lowerRangeBound; - } - if (lowerRangeBound != upperRangeBound) { - pendingActionsByShards[upperRangeBound->getShard()].rangesToMerge.emplace_back( - lowerRangeBound->getMin(), upperRangeBound->getMax()); - } else { - if (!upperRangeBound->getEstimatedSizeBytes().has_value()) { - pendingActionsByShards[upperRangeBound->getShard()] - .rangesWithoutDataSize.emplace_back(upperRangeBound->getMin(), - upperRangeBound->getMax()); - } - } - collectionChunks.erase(lowerRangeBound, std::next(upperRangeBound)); - } - return std::unique_ptr( - new MergeAndMeasureChunksPhase(coll.getNss(), - coll.getUuid(), - coll.getKeyPattern().toBSON(), - smallChunkSizeThreshold, - std::move(pendingActionsByShards))); - } - - DefragmentationPhaseEnum getType() const override { - return DefragmentationPhaseEnum::kMergeAndMeasureChunks; - } - - DefragmentationPhaseEnum getNextPhase() const override { - return _nextPhase; - } - - boost::optional popNextStreamableAction( - OperationContext* opCtx) override { - boost::optional nextAction = boost::none; - if (!_pendingActionsByShards.empty()) { - auto it = _shardToProcess ? _pendingActionsByShards.find(*_shardToProcess) - : _pendingActionsByShards.begin(); - - invariant(it != _pendingActionsByShards.end()); - - auto& [shardId, pendingActions] = *it; - auto shardVersion = getShardVersion(opCtx, shardId, _nss); - - if (pendingActions.rangesWithoutDataSize.size() > pendingActions.rangesToMerge.size()) { - const auto& rangeToMeasure = pendingActions.rangesWithoutDataSize.back(); - nextAction = boost::optional( - DataSizeInfo(shardId, - _nss, - _uuid, - rangeToMeasure, - shardVersion, - _shardKey, - true /* estimate */, - _smallChunkSizeThresholdBytes /* maxSize */)); - pendingActions.rangesWithoutDataSize.pop_back(); - } else if (!pendingActions.rangesToMerge.empty()) { - const auto& rangeToMerge = pendingActions.rangesToMerge.back(); - nextAction = boost::optional( - MergeInfo(shardId, _nss, _uuid, shardVersion.placementVersion(), rangeToMerge)); - pendingActions.rangesToMerge.pop_back(); - } - if (nextAction.has_value()) { - ++_outstandingActions; - if (pendingActions.rangesToMerge.empty() && - pendingActions.rangesWithoutDataSize.empty()) { - it = _pendingActionsByShards.erase(it, std::next(it)); - } else { - ++it; - } - } - if (it != _pendingActionsByShards.end()) { - _shardToProcess = it->first; - } else { - _shardToProcess = boost::none; - } - } - return nextAction; - } - - boost::optional popNextMigration( - OperationContext* opCtx, stdx::unordered_set* availableShards) override { - return boost::none; - } - - void applyActionResult(OperationContext* opCtx, - const BalancerStreamAction& action, - const BalancerStreamActionResponse& response) override { - ScopeGuard scopedGuard([&] { --_outstandingActions; }); - if (_aborted) { - return; - } - stdx::visit(OverloadedVisitor{ - [&](const MergeInfo& mergeAction) { - auto& mergeResponse = stdx::get(response); - auto& shardingPendingActions = - _pendingActionsByShards[mergeAction.shardId]; - handleActionResult( - opCtx, - _nss, - _uuid, - getType(), - mergeResponse, - [&]() { - shardingPendingActions.rangesWithoutDataSize.emplace_back( - mergeAction.chunkRange); - }, - [&]() { - shardingPendingActions.rangesToMerge.emplace_back( - mergeAction.chunkRange); - }, - [&]() { _abort(getType()); }); - }, - [&](const DataSizeInfo& dataSizeAction) { - auto& dataSizeResponse = - stdx::get>(response); - handleActionResult( - opCtx, - _nss, - _uuid, - getType(), - dataSizeResponse.getStatus(), - [&]() { - ChunkType chunk(dataSizeAction.uuid, - dataSizeAction.chunkRange, - dataSizeAction.version.placementVersion(), - dataSizeAction.shardId); - auto catalogManager = ShardingCatalogManager::get(opCtx); - // Max out the chunk size if it has has been estimated as bigger - // than _smallChunkSizeThresholdBytes; this will exlude the - // chunk from the list of candidates considered by - // MoveAndMergeChunksPhase - auto estimatedSize = dataSizeResponse.getValue().maxSizeReached - ? kBigChunkMarker - : dataSizeResponse.getValue().sizeBytes; - catalogManager->setChunkEstimatedSize( - opCtx, - chunk, - estimatedSize, - ShardingCatalogClient::kMajorityWriteConcern); - }, - [&]() { - auto& shardingPendingActions = - _pendingActionsByShards[dataSizeAction.shardId]; - shardingPendingActions.rangesWithoutDataSize.emplace_back( - dataSizeAction.chunkRange); - }, - [&]() { _abort(getType()); }); - }, - [](const MigrateInfo& _) { - uasserted(ErrorCodes::BadValue, "Unexpected action type"); - }, - [](const MergeAllChunksOnShardInfo& _) { - uasserted(ErrorCodes::BadValue, "Unexpected action type"); - }}, - action); - } - - bool isComplete() const override { - return _pendingActionsByShards.empty() && _outstandingActions == 0; - } - - void userAbort() override { - _abort(DefragmentationPhaseEnum::kFinished); - } - - BSONObj reportProgress() const override { - - size_t rangesToMerge = 0, rangesWithoutDataSize = 0; - for (const auto& [_, pendingActions] : _pendingActionsByShards) { - rangesToMerge += pendingActions.rangesToMerge.size(); - rangesWithoutDataSize += pendingActions.rangesWithoutDataSize.size(); - } - auto remainingChunksToProcess = static_cast(_outstandingActions) + - static_cast(rangesToMerge) + static_cast(rangesWithoutDataSize); - - return BSON(kRemainingChunksToProcess << remainingChunksToProcess); - } - -private: - struct PendingActions { - std::vector rangesToMerge; - std::vector rangesWithoutDataSize; - }; - MergeAndMeasureChunksPhase( - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& shardKey, - const int64_t smallChunkSizeThresholdBytes, - stdx::unordered_map&& pendingActionsByShards) - : _nss(nss), - _uuid(uuid), - _shardKey(shardKey), - _smallChunkSizeThresholdBytes(smallChunkSizeThresholdBytes), - _pendingActionsByShards(std::move(pendingActionsByShards)) {} - - void _abort(const DefragmentationPhaseEnum nextPhase) { - _aborted = true; - _nextPhase = nextPhase; - _pendingActionsByShards.clear(); - } - - const NamespaceString _nss; - const UUID _uuid; - const BSONObj _shardKey; - const int64_t _smallChunkSizeThresholdBytes; - stdx::unordered_map _pendingActionsByShards; - boost::optional _shardToProcess; - size_t _outstandingActions{0}; - bool _aborted{false}; - DefragmentationPhaseEnum _nextPhase{DefragmentationPhaseEnum::kMoveAndMergeChunks}; -}; - -class MoveAndMergeChunksPhase : public DefragmentationPhase { -public: - static std::unique_ptr build( - OperationContext* opCtx, - const CollectionType& coll, - std::vector&& collectionShardStats) { - auto collectionZones = getCollectionZones(opCtx, coll); - - stdx::unordered_map shardInfos; - for (const auto& shardStats : collectionShardStats) { - shardInfos.emplace(shardStats.shardId, - ShardInfo(shardStats.currSizeBytes, shardStats.isDraining)); - } - - auto collectionChunks = getCollectionChunks(opCtx, coll); - const auto maxChunkSizeBytes = getCollectionMaxChunkSizeBytes(opCtx, coll); - const uint64_t smallChunkSizeThresholdBytes = - (maxChunkSizeBytes / 100) * kSmallChunkSizeThresholdPctg; - - return std::unique_ptr( - new MoveAndMergeChunksPhase(coll.getNss(), - coll.getUuid(), - std::move(collectionChunks), - std::move(shardInfos), - std::move(collectionZones), - smallChunkSizeThresholdBytes, - maxChunkSizeBytes)); - } - - DefragmentationPhaseEnum getType() const override { - return DefragmentationPhaseEnum::kMoveAndMergeChunks; - } - - DefragmentationPhaseEnum getNextPhase() const override { - return _nextPhase; - } - - boost::optional popNextStreamableAction( - OperationContext* opCtx) override { - if (_actionableMerges.empty()) { - return boost::none; - } - - _outstandingMerges.push_back(std::move(_actionableMerges.front())); - _actionableMerges.pop_front(); - const auto& nextRequest = _outstandingMerges.back(); - auto version = getShardVersion(opCtx, nextRequest.getDestinationShard(), _nss); - return boost::optional( - nextRequest.asMergeInfo(_uuid, _nss, version.placementVersion())); - } - - boost::optional popNextMigration( - OperationContext* opCtx, stdx::unordered_set* availableShards) override { - for (const auto& shardId : _shardProcessingOrder) { - if (availableShards->count(shardId) == 0) { - // the shard is already busy in a migration - continue; - } - - ChunkRangeInfoIterator nextSmallChunk; - std::list candidateSiblings; - if (!_findNextSmallChunkInShard( - shardId, *availableShards, &nextSmallChunk, &candidateSiblings)) { - // there isn't a chunk in this shard that can currently be moved and merged with one - // of its siblings. - continue; - } - - // We have a chunk that can be moved&merged with at least one sibling. Choose one... - invariant(candidateSiblings.size() <= 2); - auto targetSibling = candidateSiblings.front(); - if (auto challenger = candidateSiblings.back(); targetSibling != challenger) { - auto targetScore = _rankMergeableSibling(*nextSmallChunk, *targetSibling); - auto challengerScore = _rankMergeableSibling(*nextSmallChunk, *challenger); - if (challengerScore > targetScore || - (challengerScore == targetScore && - _shardInfos.at(challenger->shard).currentSizeBytes < - _shardInfos.at(targetSibling->shard).currentSizeBytes)) { - targetSibling = challenger; - } - } - - // ... then build up the migration request, marking the needed resources as busy. - nextSmallChunk->busyInOperation = true; - targetSibling->busyInOperation = true; - availableShards->erase(nextSmallChunk->shard); - availableShards->erase(targetSibling->shard); - auto smallChunkVersion = getShardVersion(opCtx, nextSmallChunk->shard, _nss); - _outstandingMigrations.emplace_back(nextSmallChunk, targetSibling); - return _outstandingMigrations.back().asMigrateInfo( - _uuid, _nss, smallChunkVersion.placementVersion(), _maxChunkSizeBytes); - } - - return boost::none; - } - - void applyActionResult(OperationContext* opCtx, - const BalancerStreamAction& action, - const BalancerStreamActionResponse& response) override { - stdx::visit( - OverloadedVisitor{ - [&](const MigrateInfo& migrationAction) { - auto& migrationResponse = stdx::get(response); - auto match = - std::find_if(_outstandingMigrations.begin(), - _outstandingMigrations.end(), - [&migrationAction](const MoveAndMergeRequest& request) { - return (migrationAction.minKey.woCompare( - request.getMigrationMinKey()) == 0); - }); - invariant(match != _outstandingMigrations.end()); - MoveAndMergeRequest moveRequest(std::move(*match)); - _outstandingMigrations.erase(match); - - if (_aborted) { - return; - } - - if (migrationResponse.isOK()) { - Grid::get(opCtx) - ->catalogCache() - ->invalidateShardOrEntireCollectionEntryForShardedCollection( - _nss, boost::none, moveRequest.getDestinationShard()); - - auto transferredAmount = moveRequest.getMovedDataSizeBytes(); - invariant(transferredAmount <= _smallChunkSizeThresholdBytes); - _shardInfos.at(moveRequest.getSourceShard()).currentSizeBytes -= - transferredAmount; - _shardInfos.at(moveRequest.getDestinationShard()).currentSizeBytes += - transferredAmount; - _shardProcessingOrder.sort([this](const ShardId& lhs, const ShardId& rhs) { - return _shardInfos.at(lhs).currentSizeBytes > - _shardInfos.at(rhs).currentSizeBytes; - }); - _actionableMerges.push_back(std::move(moveRequest)); - return; - } - - LOGV2_DEBUG(6290000, - 1, - "Migration failed during collection defragmentation", - logAttrs(_nss), - "uuid"_attr = _uuid, - "currentPhase"_attr = getType(), - "error"_attr = redact(migrationResponse)); - - moveRequest.chunkToMove->busyInOperation = false; - moveRequest.chunkToMergeWith->busyInOperation = false; - - if (migrationResponse.code() == ErrorCodes::ChunkTooBig || - migrationResponse.code() == ErrorCodes::ExceededMemoryLimit) { - // Never try moving this chunk again, it isn't actually small - _removeIteratorFromSmallChunks(moveRequest.chunkToMove, - moveRequest.chunkToMove->shard); - return; - } - - if (isRetriableForDefragmentation(migrationResponse)) { - // The migration will be eventually retried - return; - } - - const auto exceededTimeLimit = [&] { - // All errors thrown by the migration destination shard are converted - // into OperationFailed. Thus we need to inspect the error message to - // match the real error code. - - // TODO SERVER-62990 introduce and propagate specific error code for - // migration failed due to range deletion pending - return migrationResponse == ErrorCodes::OperationFailed && - migrationResponse.reason().find(ErrorCodes::errorString( - ErrorCodes::ExceededTimeLimit)) != std::string::npos; - }; - - if (exceededTimeLimit()) { - // The migration failed because there is still a range deletion - // pending on the recipient. - moveRequest.chunkToMove->shardsToAvoid.emplace( - moveRequest.getDestinationShard()); - return; - } - - LOGV2_ERROR(6290001, - "Encountered non-retriable error on migration during " - "collection defragmentation", - logAttrs(_nss), - "uuid"_attr = _uuid, - "currentPhase"_attr = getType(), - "error"_attr = redact(migrationResponse)); - _abort(DefragmentationPhaseEnum::kMergeAndMeasureChunks); - }, - [&](const MergeInfo& mergeAction) { - auto& mergeResponse = stdx::get(response); - auto match = std::find_if(_outstandingMerges.begin(), - _outstandingMerges.end(), - [&mergeAction](const MoveAndMergeRequest& request) { - return mergeAction.chunkRange.containsKey( - request.getMigrationMinKey()); - }); - invariant(match != _outstandingMerges.end()); - MoveAndMergeRequest mergeRequest(std::move(*match)); - _outstandingMerges.erase(match); - - auto onSuccess = [&] { - // The sequence is complete; update the state of the merged chunk... - auto& mergedChunk = mergeRequest.chunkToMergeWith; - - Grid::get(opCtx) - ->catalogCache() - ->invalidateShardOrEntireCollectionEntryForShardedCollection( - _nss, boost::none, mergedChunk->shard); - - auto& chunkToDelete = mergeRequest.chunkToMove; - mergedChunk->range = mergeRequest.asMergedRange(); - if (mergedChunk->estimatedSizeBytes != kBigChunkMarker && - chunkToDelete->estimatedSizeBytes != kBigChunkMarker) { - mergedChunk->estimatedSizeBytes += chunkToDelete->estimatedSizeBytes; - } else { - mergedChunk->estimatedSizeBytes = kBigChunkMarker; - } - - mergedChunk->busyInOperation = false; - auto deletedChunkShard = chunkToDelete->shard; - // the lookup data structures... - _removeIteratorFromSmallChunks(chunkToDelete, deletedChunkShard); - if (mergedChunk->estimatedSizeBytes > _smallChunkSizeThresholdBytes) { - _removeIteratorFromSmallChunks(mergedChunk, mergedChunk->shard); - } else { - // Keep the list of small chunk iterators in the recipient sorted - auto match = _smallChunksByShard.find(mergedChunk->shard); - if (match != _smallChunksByShard.end()) { - auto& [_, smallChunksInRecipient] = *match; - smallChunksInRecipient.sort(compareChunkRangeInfoIterators); - } - } - //... and the collection - _collectionChunks.erase(chunkToDelete); - }; - - auto onRetriableError = [&] { - _actionableMerges.push_back(std::move(mergeRequest)); - }; - - auto onNonRetriableError = [&]() { - _abort(DefragmentationPhaseEnum::kMergeAndMeasureChunks); - }; - - if (!_aborted) { - handleActionResult(opCtx, - _nss, - _uuid, - getType(), - mergeResponse, - onSuccess, - onRetriableError, - onNonRetriableError); - } - }, - [](const DataSizeInfo& dataSizeAction) { - uasserted(ErrorCodes::BadValue, "Unexpected action type"); - }, - [](const MergeAllChunksOnShardInfo& _) { - uasserted(ErrorCodes::BadValue, "Unexpected action type"); - }}, - action); - } - - bool isComplete() const override { - return _smallChunksByShard.empty() && _outstandingMigrations.empty() && - _actionableMerges.empty() && _outstandingMerges.empty(); - } - - void userAbort() override { - _abort(DefragmentationPhaseEnum::kFinished); - } - - BSONObj reportProgress() const override { - size_t numSmallChunks = 0; - for (const auto& [shardId, smallChunks] : _smallChunksByShard) { - numSmallChunks += smallChunks.size(); - } - return BSON(kRemainingChunksToProcess << static_cast(numSmallChunks)); - } - -private: - // Internal representation of the chunk metadata required to generate a MoveAndMergeRequest - struct ChunkRangeInfo { - ChunkRangeInfo(ChunkRange&& range, const ShardId& shard, long long estimatedSizeBytes) - : range(std::move(range)), - shard(shard), - estimatedSizeBytes(estimatedSizeBytes), - busyInOperation(false) {} - ChunkRange range; - const ShardId shard; - long long estimatedSizeBytes; - bool busyInOperation; - // Last time we failed to find a suitable destination shard due to temporary constraints - boost::optional lastFailedAttemptTime; - // Shards that still have a deletion pending for this range - stdx::unordered_set shardsToAvoid; - }; - - struct ShardInfo { - ShardInfo(uint64_t currentSizeBytes, bool draining) - : currentSizeBytes(currentSizeBytes), draining(draining) {} - - bool isDraining() const { - return draining; - } - - uint64_t currentSizeBytes; - const bool draining; - }; - - using ChunkRangeInfos = std::list; - using ChunkRangeInfoIterator = ChunkRangeInfos::iterator; - - static bool compareChunkRangeInfoIterators(const ChunkRangeInfoIterator& lhs, - const ChunkRangeInfoIterator& rhs) { - // Small chunks are ordered by decreasing order of estimatedSizeBytes - // except the ones that we failed to move due to temporary constraints that will be at the - // end of the list ordered by last attempt time - auto lhsLastFailureTime = lhs->lastFailedAttemptTime.value_or(Date_t::min()); - auto rhsLastFailureTime = rhs->lastFailedAttemptTime.value_or(Date_t::min()); - return std::tie(lhsLastFailureTime, lhs->estimatedSizeBytes) < - std::tie(rhsLastFailureTime, rhs->estimatedSizeBytes); - } - - // Helper class to generate the Migration and Merge actions required to join together the chunks - // specified in the constructor - struct MoveAndMergeRequest { - public: - MoveAndMergeRequest(const ChunkRangeInfoIterator& chunkToMove, - const ChunkRangeInfoIterator& chunkToMergeWith) - : chunkToMove(chunkToMove), - chunkToMergeWith(chunkToMergeWith), - _isChunkToMergeLeftSibling( - chunkToMergeWith->range.getMax().woCompare(chunkToMove->range.getMin()) == 0) {} - - MigrateInfo asMigrateInfo(const UUID& collUuid, - const NamespaceString& nss, - const ChunkVersion& version, - uint64_t maxChunkSizeBytes) const { - return MigrateInfo(chunkToMergeWith->shard, - chunkToMove->shard, - nss, - collUuid, - chunkToMove->range.getMin(), - chunkToMove->range.getMax(), - version, - ForceJumbo::kDoNotForce, - maxChunkSizeBytes); - } - - ChunkRange asMergedRange() const { - return ChunkRange(_isChunkToMergeLeftSibling ? chunkToMergeWith->range.getMin() - : chunkToMove->range.getMin(), - _isChunkToMergeLeftSibling ? chunkToMove->range.getMax() - : chunkToMergeWith->range.getMax()); - } - - MergeInfo asMergeInfo(const UUID& collUuid, - const NamespaceString& nss, - const ChunkVersion& version) const { - return MergeInfo(chunkToMergeWith->shard, nss, collUuid, version, asMergedRange()); - } - - const ShardId& getSourceShard() const { - return chunkToMove->shard; - } - - const ShardId& getDestinationShard() const { - return chunkToMergeWith->shard; - } - - const BSONObj& getMigrationMinKey() const { - return chunkToMove->range.getMin(); - } - - int64_t getMovedDataSizeBytes() const { - return chunkToMove->estimatedSizeBytes; - } - - ChunkRangeInfoIterator chunkToMove; - ChunkRangeInfoIterator chunkToMergeWith; - - private: - bool _isChunkToMergeLeftSibling; - }; - - const NamespaceString _nss; - - const UUID _uuid; - - // The collection routing table - expressed in ChunkRangeInfo - ChunkRangeInfos _collectionChunks; - - // List of indexes to elements in _collectionChunks that are eligible to be moved. - std::map> _smallChunksByShard; - - stdx::unordered_map _shardInfos; - - // Sorted list of shard IDs by decreasing current size (@see _shardInfos) - std::list _shardProcessingOrder; - - // Set of attributes representing the currently active move&merge sequences - std::list _outstandingMigrations; - std::list _actionableMerges; - std::list _outstandingMerges; - - ZoneInfo _zoneInfo; - - const int64_t _smallChunkSizeThresholdBytes; - - const uint64_t _maxChunkSizeBytes; - - bool _aborted{false}; - - DefragmentationPhaseEnum _nextPhase{DefragmentationPhaseEnum::kMergeChunks}; - - MoveAndMergeChunksPhase(const NamespaceString& nss, - const UUID& uuid, - std::vector&& collectionChunks, - stdx::unordered_map&& shardInfos, - ZoneInfo&& collectionZones, - uint64_t smallChunkSizeThresholdBytes, - uint64_t maxChunkSizeBytes) - : _nss(nss), - _uuid(uuid), - _collectionChunks(), - _smallChunksByShard(), - _shardInfos(std::move(shardInfos)), - _shardProcessingOrder(), - _outstandingMigrations(), - _actionableMerges(), - _outstandingMerges(), - _zoneInfo(std::move(collectionZones)), - _smallChunkSizeThresholdBytes(smallChunkSizeThresholdBytes), - _maxChunkSizeBytes(maxChunkSizeBytes) { - - // Load the collection routing table in a std::list to ease later manipulation - for (auto&& chunk : collectionChunks) { - if (!chunk.getEstimatedSizeBytes().has_value()) { - LOGV2_WARNING( - 6172701, - "Chunk with no estimated size detected while building MoveAndMergeChunksPhase", - logAttrs(_nss), - "uuid"_attr = _uuid, - "range"_attr = chunk.getRange()); - _abort(DefragmentationPhaseEnum::kMergeAndMeasureChunks); - return; - } - const uint64_t estimatedChunkSize = chunk.getEstimatedSizeBytes().value(); - _collectionChunks.emplace_back(chunk.getRange(), chunk.getShard(), estimatedChunkSize); - } - - // Compose the index of small chunks - for (auto chunkIt = _collectionChunks.begin(); chunkIt != _collectionChunks.end(); - ++chunkIt) { - if (chunkIt->estimatedSizeBytes <= _smallChunkSizeThresholdBytes) { - _smallChunksByShard[chunkIt->shard].emplace_back(chunkIt); - } - } - // Each small chunk within a shard must be sorted by increasing chunk size - for (auto& [_, smallChunksInShard] : _smallChunksByShard) { - smallChunksInShard.sort(compareChunkRangeInfoIterators); - } - - // Set the initial shard processing order - for (const auto& [shardId, _] : _shardInfos) { - _shardProcessingOrder.push_back(shardId); - } - _shardProcessingOrder.sort([this](const ShardId& lhs, const ShardId& rhs) { - return _shardInfos.at(lhs).currentSizeBytes > _shardInfos.at(rhs).currentSizeBytes; - }); - } - - void _abort(const DefragmentationPhaseEnum nextPhase) { - _aborted = true; - _nextPhase = nextPhase; - _actionableMerges.clear(); - _smallChunksByShard.clear(); - _shardProcessingOrder.clear(); - } - - // Returns the list of siblings that are eligible to be move&merged with the specified chunk, - // based on shard zones and data capacity. (It does NOT take into account whether chunks are - // currently involved in a move/merge operation). - std::list _getChunkSiblings( - const ChunkRangeInfoIterator& chunkIt) const { - std::list siblings; - auto canBeMoveAndMerged = [this](const ChunkRangeInfoIterator& chunkIt, - const ChunkRangeInfoIterator& siblingIt) { - auto onSameZone = _zoneInfo.getZoneForChunk(chunkIt->range) == - _zoneInfo.getZoneForChunk(siblingIt->range); - auto destinationAvailable = chunkIt->shard == siblingIt->shard || - !_shardInfos.at(siblingIt->shard).isDraining(); - return (onSameZone && destinationAvailable); - }; - - if (auto rightSibling = std::next(chunkIt); - rightSibling != _collectionChunks.end() && canBeMoveAndMerged(chunkIt, rightSibling)) { - siblings.push_back(rightSibling); - } - if (chunkIt != _collectionChunks.begin()) { - auto leftSibling = std::prev(chunkIt); - if (canBeMoveAndMerged(chunkIt, leftSibling)) { - siblings.push_back(leftSibling); - } - } - return siblings; - } - - // Computes whether there is a chunk in the specified shard that can be moved&merged with one or - // both of its siblings. Chunks/siblings that are currently being moved/merged are not eligible. - // - // The function also clears the internal state from elements that cannot be processed by the - // phase (chunks with no siblings, shards with no small chunks). - // - // Returns true on success (storing the related info in nextSmallChunk + smallChunkSiblings), - // false otherwise. - bool _findNextSmallChunkInShard(const ShardId& shard, - const stdx::unordered_set& availableShards, - ChunkRangeInfoIterator* nextSmallChunk, - std::list* smallChunkSiblings) { - auto matchingShardInfo = _smallChunksByShard.find(shard); - if (matchingShardInfo == _smallChunksByShard.end()) { - return false; - } - - smallChunkSiblings->clear(); - auto& smallChunksInShard = matchingShardInfo->second; - for (auto candidateIt = smallChunksInShard.begin(); - candidateIt != smallChunksInShard.end();) { - if ((*candidateIt)->busyInOperation) { - ++candidateIt; - continue; - } - auto candidateSiblings = _getChunkSiblings(*candidateIt); - if (candidateSiblings.empty()) { - // The current chunk cannot be processed by the algorithm - remove it. - candidateIt = smallChunksInShard.erase(candidateIt); - continue; - } - - size_t siblingsDiscardedDueToRangeDeletion = 0; - - for (const auto& sibling : candidateSiblings) { - if (sibling->busyInOperation || !availableShards.count(sibling->shard)) { - continue; - } - if ((*candidateIt)->shardsToAvoid.count(sibling->shard)) { - ++siblingsDiscardedDueToRangeDeletion; - continue; - } - smallChunkSiblings->push_back(sibling); - } - - if (!smallChunkSiblings->empty()) { - *nextSmallChunk = *candidateIt; - return true; - } - - - if (siblingsDiscardedDueToRangeDeletion == candidateSiblings.size()) { - // All the siblings have been discarded because an overlapping range deletion is - // still pending on the destination shard. - if (!(*candidateIt)->lastFailedAttemptTime) { - // This is the first time we discard this chunk due to overlapping range - // deletions pending. Enqueue it back on the list so we will try to move it - // again when we will have drained all the other chunks for this shard. - LOGV2_DEBUG(6290002, - 1, - "Postponing small chunk processing due to pending range deletion " - "on recipient shard(s)", - logAttrs(_nss), - "uuid"_attr = _uuid, - "range"_attr = (*candidateIt)->range, - "estimatedSizeBytes"_attr = (*candidateIt)->estimatedSizeBytes, - "numCandidateSiblings"_attr = candidateSiblings.size()); - (*candidateIt)->lastFailedAttemptTime = Date_t::now(); - (*candidateIt)->shardsToAvoid.clear(); - smallChunksInShard.emplace_back(*candidateIt); - } else { - LOGV2(6290003, - "Discarding small chunk due to pending range deletion on recipient shard", - logAttrs(_nss), - "uuid"_attr = _uuid, - "range"_attr = (*candidateIt)->range, - "estimatedSizeBytes"_attr = (*candidateIt)->estimatedSizeBytes, - "numCandidateSiblings"_attr = candidateSiblings.size(), - "lastFailedAttempt"_attr = (*candidateIt)->lastFailedAttemptTime); - } - candidateIt = smallChunksInShard.erase(candidateIt); - continue; - } - - ++candidateIt; - } - // No candidate could be found - clear the shard entry if needed - if (smallChunksInShard.empty()) { - _smallChunksByShard.erase(matchingShardInfo); - } - return false; - } - - uint32_t _rankMergeableSibling(const ChunkRangeInfo& chunkTobeMovedAndMerged, - const ChunkRangeInfo& mergeableSibling) { - static constexpr uint32_t kNoMoveRequired = 1 << 3; - static constexpr uint32_t kConvenientMove = 1 << 2; - static constexpr uint32_t kMergeSolvesTwoPendingChunks = 1 << 1; - static constexpr uint32_t kMergeSolvesOnePendingChunk = 1; - uint32_t ranking = 0; - if (chunkTobeMovedAndMerged.shard == mergeableSibling.shard) { - ranking += kNoMoveRequired; - } else if (chunkTobeMovedAndMerged.estimatedSizeBytes < - mergeableSibling.estimatedSizeBytes) { - ranking += kConvenientMove; - } - auto estimatedMergedSize = (chunkTobeMovedAndMerged.estimatedSizeBytes == kBigChunkMarker || - mergeableSibling.estimatedSizeBytes == kBigChunkMarker) - ? kBigChunkMarker - : chunkTobeMovedAndMerged.estimatedSizeBytes + mergeableSibling.estimatedSizeBytes; - if (estimatedMergedSize > _smallChunkSizeThresholdBytes) { - ranking += mergeableSibling.estimatedSizeBytes < _smallChunkSizeThresholdBytes - ? kMergeSolvesTwoPendingChunks - : kMergeSolvesOnePendingChunk; - } - - return ranking; - } - - void _removeIteratorFromSmallChunks(const ChunkRangeInfoIterator& chunkIt, - const ShardId& parentShard) { - auto matchingShardIt = _smallChunksByShard.find(parentShard); - if (matchingShardIt == _smallChunksByShard.end()) { - return; - } - auto& smallChunksInShard = matchingShardIt->second; - auto match = std::find(smallChunksInShard.begin(), smallChunksInShard.end(), chunkIt); - if (match == smallChunksInShard.end()) { - return; - } - smallChunksInShard.erase(match); - if (smallChunksInShard.empty()) { - _smallChunksByShard.erase(parentShard); - } - } -}; - -class MergeChunksPhase : public DefragmentationPhase { -public: - static std::unique_ptr build(OperationContext* opCtx, - const CollectionType& coll) { - auto collectionChunks = getCollectionChunks(opCtx, coll); - const auto collectionZones = getCollectionZones(opCtx, coll); - - // Find ranges of mergeable chunks - stdx::unordered_map> unmergedRangesByShard; - while (!collectionChunks.empty()) { - auto upperRangeBound = std::prev(collectionChunks.cend()); - auto lowerRangeBound = upperRangeBound; - while (lowerRangeBound != collectionChunks.cbegin() && - areMergeable(*std::prev(lowerRangeBound), *lowerRangeBound, collectionZones)) { - --lowerRangeBound; - } - if (lowerRangeBound != upperRangeBound) { - unmergedRangesByShard[upperRangeBound->getShard()].emplace_back( - lowerRangeBound->getMin(), upperRangeBound->getMax()); - } - - collectionChunks.erase(lowerRangeBound, std::next(upperRangeBound)); - } - return std::unique_ptr( - new MergeChunksPhase(coll.getNss(), coll.getUuid(), std::move(unmergedRangesByShard))); - } - - DefragmentationPhaseEnum getType() const override { - return DefragmentationPhaseEnum::kMergeChunks; - } - - DefragmentationPhaseEnum getNextPhase() const override { - return _nextPhase; - } - - boost::optional popNextStreamableAction( - OperationContext* opCtx) override { - if (_unmergedRangesByShard.empty()) { - return boost::none; - } - - auto it = _shardToProcess ? _unmergedRangesByShard.find(*_shardToProcess) - : _unmergedRangesByShard.begin(); - - invariant(it != _unmergedRangesByShard.end()); - - auto& [shardId, unmergedRanges] = *it; - invariant(!unmergedRanges.empty()); - auto shardVersion = getShardVersion(opCtx, shardId, _nss); - const auto& rangeToMerge = unmergedRanges.back(); - boost::optional nextAction = boost::optional( - MergeInfo(shardId, _nss, _uuid, shardVersion.placementVersion(), rangeToMerge)); - unmergedRanges.pop_back(); - ++_outstandingActions; - if (unmergedRanges.empty()) { - it = _unmergedRangesByShard.erase(it, std::next(it)); - } else { - ++it; - } - if (it != _unmergedRangesByShard.end()) { - _shardToProcess = it->first; - } else { - _shardToProcess = boost::none; - } - - return nextAction; - } - - boost::optional popNextMigration( - OperationContext* opCtx, stdx::unordered_set* availableShards) override { - return boost::none; - } - - void applyActionResult(OperationContext* opCtx, - const BalancerStreamAction& action, - const BalancerStreamActionResponse& response) override { - ScopeGuard scopedGuard([&] { --_outstandingActions; }); - if (_aborted) { - return; - } - stdx::visit( - OverloadedVisitor{[&](const MergeInfo& mergeAction) { - auto& mergeResponse = stdx::get(response); - auto onSuccess = [] { - }; - auto onRetriableError = [&] { - _unmergedRangesByShard[mergeAction.shardId].emplace_back( - mergeAction.chunkRange); - }; - auto onNonretriableError = [this] { - _abort(getType()); - }; - handleActionResult(opCtx, - _nss, - _uuid, - getType(), - mergeResponse, - onSuccess, - onRetriableError, - onNonretriableError); - }, - [](const DataSizeInfo& _) { - uasserted(ErrorCodes::BadValue, "Unexpected action type"); - }, - [](const MigrateInfo& _) { - uasserted(ErrorCodes::BadValue, "Unexpected action type"); - }, - [](const MergeAllChunksOnShardInfo& _) { - uasserted(ErrorCodes::BadValue, "Unexpected action type"); - }}, - action); - } - - bool isComplete() const override { - return _unmergedRangesByShard.empty() && _outstandingActions == 0; - } - - void userAbort() override { - _abort(DefragmentationPhaseEnum::kFinished); - } - - BSONObj reportProgress() const override { - size_t rangesToMerge = 0; - for (const auto& [_, unmergedRanges] : _unmergedRangesByShard) { - rangesToMerge += unmergedRanges.size(); - } - auto remainingRangesToProcess = - static_cast(_outstandingActions) + static_cast(rangesToMerge); - - return BSON(kRemainingChunksToProcess << remainingRangesToProcess); - } - -private: - MergeChunksPhase(const NamespaceString& nss, - const UUID& uuid, - stdx::unordered_map>&& unmergedRangesByShard) - : _nss(nss), _uuid(uuid), _unmergedRangesByShard(std::move(unmergedRangesByShard)) {} - - void _abort(const DefragmentationPhaseEnum nextPhase) { - _aborted = true; - _nextPhase = nextPhase; - _unmergedRangesByShard.clear(); - } - - const NamespaceString _nss; - const UUID _uuid; - stdx::unordered_map> _unmergedRangesByShard; - boost::optional _shardToProcess; - size_t _outstandingActions{0}; - bool _aborted{false}; - DefragmentationPhaseEnum _nextPhase{DefragmentationPhaseEnum::kFinished}; -}; - -} // namespace - -void BalancerDefragmentationPolicyImpl::startCollectionDefragmentations(OperationContext* opCtx) { - stdx::lock_guard lk(_stateMutex); - - // Fetch all collections with `defragmentCollection` flag enabled - static const auto query = BSON(CollectionType::kDefragmentCollectionFieldName << true); - const auto& configShard = ShardingCatalogManager::get(opCtx)->localConfigShard(); - const auto& collDocs = uassertStatusOK(configShard->exhaustiveFindOnConfig( - opCtx, - ReadPreferenceSetting(ReadPreference::Nearest), - repl::ReadConcernLevel::kMajorityReadConcern, - NamespaceString::kConfigsvrCollectionsNamespace, - query, - BSONObj(), - boost::none)) - .docs; - - for (const BSONObj& obj : collDocs) { - const CollectionType coll{obj}; - if (_defragmentationStates.contains(coll.getUuid())) { - continue; - } - _initializeCollectionState(lk, opCtx, coll); - } - _onStateUpdated(); -} - -void BalancerDefragmentationPolicyImpl::abortCollectionDefragmentation(OperationContext* opCtx, - const NamespaceString& nss) { - stdx::lock_guard lk(_stateMutex); - auto coll = - ShardingCatalogManager::get(opCtx)->localCatalogClient()->getCollection(opCtx, nss, {}); - if (coll.getDefragmentCollection()) { - if (_defragmentationStates.contains(coll.getUuid())) { - // Notify phase to abort current phase - _defragmentationStates.at(coll.getUuid())->userAbort(); - _onStateUpdated(); - } - _persistPhaseUpdate(opCtx, DefragmentationPhaseEnum::kFinished, coll.getUuid()); - } -} - -void BalancerDefragmentationPolicyImpl::interruptAllDefragmentations() { - stdx::lock_guard lk(_stateMutex); - _defragmentationStates.clear(); -} - -bool BalancerDefragmentationPolicyImpl::isDefragmentingCollection(const UUID& uuid) { - stdx::lock_guard lk(_stateMutex); - return _defragmentationStates.contains(uuid); -} - -BSONObj BalancerDefragmentationPolicyImpl::reportProgressOn(const UUID& uuid) { - stdx::lock_guard lk(_stateMutex); - auto match = _defragmentationStates.find(uuid); - if (match == _defragmentationStates.end() || !match->second) { - return BSON(kCurrentPhase << kNoPhase); - } - const auto& collDefragmentationPhase = match->second; - return BSON( - kCurrentPhase << DefragmentationPhase_serializer(collDefragmentationPhase->getType()) - << kProgress << collDefragmentationPhase->reportProgress()); -} - -MigrateInfoVector BalancerDefragmentationPolicyImpl::selectChunksToMove( - OperationContext* opCtx, stdx::unordered_set* availableShards) { - - MigrateInfoVector chunksToMove; - { - stdx::lock_guard lk(_stateMutex); - - std::vector collectionUUIDs; - collectionUUIDs.reserve(_defragmentationStates.size()); - for (const auto& defragState : _defragmentationStates) { - collectionUUIDs.push_back(defragState.first); - } - std::shuffle(collectionUUIDs.begin(), collectionUUIDs.end(), _random); - - auto popCollectionUUID = - [&](std::vector::iterator elemIt) -> std::vector::iterator { - if (std::next(elemIt) == collectionUUIDs.end()) { - return collectionUUIDs.erase(elemIt); - } - - *elemIt = std::move(collectionUUIDs.back()); - collectionUUIDs.pop_back(); - return elemIt; - }; - - while (!collectionUUIDs.empty()) { - for (auto it = collectionUUIDs.begin(); it != collectionUUIDs.end();) { - const auto& collUUID = *it; - - if (availableShards->size() == 0) { - return chunksToMove; - } - - try { - auto defragStateIt = _defragmentationStates.find(collUUID); - if (defragStateIt == _defragmentationStates.end()) { - it = popCollectionUUID(it); - continue; - }; - - auto& collDefragmentationPhase = defragStateIt->second; - if (!collDefragmentationPhase) { - _defragmentationStates.erase(defragStateIt); - it = popCollectionUUID(it); - continue; - } - auto actionableMigration = - collDefragmentationPhase->popNextMigration(opCtx, availableShards); - if (!actionableMigration.has_value()) { - it = popCollectionUUID(it); - continue; - } - chunksToMove.push_back(std::move(*actionableMigration)); - ++it; - } catch (DBException& e) { - // Catch getCollection and getShardVersion errors. Should only occur if - // collection has been removed. - LOGV2_ERROR(6172700, - "Error while getting next migration", - "uuid"_attr = collUUID, - "error"_attr = redact(e)); - _defragmentationStates.erase(collUUID); - it = popCollectionUUID(it); - } - } - } - } - - if (chunksToMove.empty()) { - // If the policy cannot produce new migrations even in absence of temporary constraints, it - // is possible that some streaming actions must be processed first. Notify an update of the - // internal state to make it happen. - _onStateUpdated(); - } - return chunksToMove; -} - -StringData BalancerDefragmentationPolicyImpl::getName() const { - return StringData(kPolicyName); -} - -boost::optional BalancerDefragmentationPolicyImpl::getNextStreamingAction( - OperationContext* opCtx) { - stdx::lock_guard lk(_stateMutex); - // Visit the defrag state in round robin fashion starting from a random one - auto stateIt = [&] { - auto it = _defragmentationStates.begin(); - if (_defragmentationStates.size() > 1) { - std::uniform_int_distribution uniDist{0, _defragmentationStates.size() - 1}; - std::advance(it, uniDist(_random)); - } - return it; - }(); - - for (auto stateToVisit = _defragmentationStates.size(); stateToVisit != 0; --stateToVisit) { - try { - _advanceToNextActionablePhase(opCtx, stateIt->first); - auto& currentCollectionDefragmentationState = stateIt->second; - if (currentCollectionDefragmentationState) { - // Get next action - auto nextAction = - currentCollectionDefragmentationState->popNextStreamableAction(opCtx); - if (nextAction) { - return nextAction; - } - ++stateIt; - } else { - stateIt = _defragmentationStates.erase(stateIt, std::next(stateIt)); - } - } catch (DBException& e) { - // Catch getCollection and getShardVersion errors. Should only occur if collection has - // been removed. - LOGV2_ERROR(6153301, - "Error while getting next defragmentation action", - "uuid"_attr = stateIt->first, - "error"_attr = redact(e)); - stateIt = _defragmentationStates.erase(stateIt, std::next(stateIt)); - } - - if (stateIt == _defragmentationStates.end()) { - stateIt = _defragmentationStates.begin(); - } - } - - return boost::none; -} - -bool BalancerDefragmentationPolicyImpl::_advanceToNextActionablePhase(OperationContext* opCtx, - const UUID& collUuid) { - auto& currentPhase = _defragmentationStates.at(collUuid); - auto phaseTransitionNeeded = [¤tPhase] { - return currentPhase && currentPhase->isComplete() && - MONGO_likely(!skipDefragmentationPhaseTransition.shouldFail()); - }; - bool advanced = false; - boost::optional coll(boost::none); - while (phaseTransitionNeeded()) { - if (!coll) { - coll = ShardingCatalogManager::get(opCtx)->localCatalogClient()->getCollection( - opCtx, collUuid); - } - currentPhase = _transitionPhases(opCtx, *coll, currentPhase->getNextPhase()); - advanced = true; - } - return advanced; -} - -void BalancerDefragmentationPolicyImpl::applyActionResult( - OperationContext* opCtx, - const BalancerStreamAction& action, - const BalancerStreamActionResponse& response) { - { - stdx::lock_guard lk(_stateMutex); - DefragmentationPhase* targetState = nullptr; - stdx::visit( - OverloadedVisitor{[&](const MergeInfo& act) { - if (_defragmentationStates.contains(act.uuid)) { - targetState = _defragmentationStates.at(act.uuid).get(); - } - }, - [&](const DataSizeInfo& act) { - if (_defragmentationStates.contains(act.uuid)) { - targetState = _defragmentationStates.at(act.uuid).get(); - } - }, - [&](const MigrateInfo& act) { - if (_defragmentationStates.contains(act.uuid)) { - targetState = _defragmentationStates.at(act.uuid).get(); - } - }, - [](const MergeAllChunksOnShardInfo& _) { - uasserted(ErrorCodes::BadValue, "Unexpected action type"); - }}, - action); - - if (targetState) { - targetState->applyActionResult(opCtx, action, response); - } - } - _onStateUpdated(); -} - -std::unique_ptr BalancerDefragmentationPolicyImpl::_transitionPhases( - OperationContext* opCtx, - const CollectionType& coll, - DefragmentationPhaseEnum nextPhase, - bool shouldPersistPhase) { - std::unique_ptr nextPhaseObject(nullptr); - - try { - if (shouldPersistPhase) { - _persistPhaseUpdate(opCtx, nextPhase, coll.getUuid()); - } - switch (nextPhase) { - case DefragmentationPhaseEnum::kMergeAndMeasureChunks: - nextPhaseObject = MergeAndMeasureChunksPhase::build(opCtx, coll); - break; - case DefragmentationPhaseEnum::kMoveAndMergeChunks: { - auto collectionShardStats = - uassertStatusOK(_clusterStats->getCollStats(opCtx, coll.getNss())); - nextPhaseObject = - MoveAndMergeChunksPhase::build(opCtx, coll, std::move(collectionShardStats)); - } break; - case DefragmentationPhaseEnum::kMergeChunks: - nextPhaseObject = MergeChunksPhase::build(opCtx, coll); - break; - case DefragmentationPhaseEnum::kFinished: - default: // Exit defragmentation in case of unexpected phase - _clearDefragmentationState(opCtx, coll.getUuid()); - break; - } - afterBuildingNextDefragmentationPhase.pauseWhileSet(); - LOGV2(6172702, - "Collection defragmentation transitioned to new phase", - logAttrs(coll.getNss()), - "phase"_attr = nextPhaseObject - ? DefragmentationPhase_serializer(nextPhaseObject->getType()) - : kNoPhase, - "details"_attr = nextPhaseObject ? nextPhaseObject->reportProgress() : BSONObj()); - } catch (const DBException& e) { - LOGV2_ERROR(6153101, - "Error while building defragmentation phase on collection", - logAttrs(coll.getNss()), - "uuid"_attr = coll.getUuid(), - "phase"_attr = nextPhase, - "error"_attr = e); - } - return nextPhaseObject; -} - -void BalancerDefragmentationPolicyImpl::_initializeCollectionState(WithLock, - OperationContext* opCtx, - const CollectionType& coll) { - if (MONGO_unlikely(skipDefragmentationPhaseTransition.shouldFail())) { - return; - } - auto phaseToBuild = coll.getDefragmentationPhase() - ? coll.getDefragmentationPhase().value() - : DefragmentationPhaseEnum::kMergeAndMeasureChunks; - auto collectionPhase = - _transitionPhases(opCtx, coll, phaseToBuild, !coll.getDefragmentationPhase().has_value()); - while (collectionPhase && collectionPhase->isComplete() && - MONGO_likely(!skipDefragmentationPhaseTransition.shouldFail())) { - collectionPhase = _transitionPhases(opCtx, coll, collectionPhase->getNextPhase()); - } - if (collectionPhase) { - auto [_, inserted] = - _defragmentationStates.insert_or_assign(coll.getUuid(), std::move(collectionPhase)); - dassert(inserted); - } -} - -void BalancerDefragmentationPolicyImpl::_persistPhaseUpdate(OperationContext* opCtx, - DefragmentationPhaseEnum phase, - const UUID& uuid) { - DBDirectClient dbClient(opCtx); - write_ops::UpdateCommandRequest updateOp(CollectionType::ConfigNS); - updateOp.setUpdates({[&] { - write_ops::UpdateOpEntry entry; - entry.setQ(BSON(CollectionType::kUuidFieldName << uuid)); - entry.setU(write_ops::UpdateModification::parseFromClassicUpdate( - BSON("$set" << BSON(CollectionType::kDefragmentationPhaseFieldName - << DefragmentationPhase_serializer(phase))))); - return entry; - }()}); - auto response = write_ops::checkWriteErrors(dbClient.update(updateOp)); - uassert(ErrorCodes::NoMatchingDocument, - "Collection {} not found while persisting phase change"_format(uuid.toString()), - response.getN() > 0); - WriteConcernResult ignoreResult; - const auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); - uassertStatusOK(waitForWriteConcern( - opCtx, latestOpTime, WriteConcerns::kMajorityWriteConcernShardingTimeout, &ignoreResult)); -} - -void BalancerDefragmentationPolicyImpl::_clearDefragmentationState(OperationContext* opCtx, - const UUID& uuid) { - DBDirectClient dbClient(opCtx); - - // Clear datasize estimates from chunks - write_ops::checkWriteErrors(dbClient.update(write_ops::UpdateCommandRequest( - ChunkType::ConfigNS, {[&] { - write_ops::UpdateOpEntry entry; - entry.setQ(BSON(CollectionType::kUuidFieldName << uuid)); - entry.setU(write_ops::UpdateModification::parseFromClassicUpdate( - BSON("$unset" << BSON(ChunkType::estimatedSizeBytes.name() << "")))); - entry.setMulti(true); - return entry; - }()}))); - - // Clear defragmentation phase and defragmenting flag from collection - write_ops::checkWriteErrors(dbClient.update(write_ops::UpdateCommandRequest( - CollectionType::ConfigNS, {[&] { - write_ops::UpdateOpEntry entry; - entry.setQ(BSON(CollectionType::kUuidFieldName << uuid)); - entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(BSON( - "$unset" << BSON(CollectionType::kDefragmentCollectionFieldName - << "" << CollectionType::kDefragmentationPhaseFieldName << "")))); - return entry; - }()}))); - - WriteConcernResult ignoreResult; - const auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); - uassertStatusOK(waitForWriteConcern( - opCtx, latestOpTime, WriteConcerns::kMajorityWriteConcernShardingTimeout, &ignoreResult)); -} - -} // namespace mongo diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.h b/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.h deleted file mode 100644 index 1b9f639b0b2ce..0000000000000 --- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_impl.h +++ /dev/null @@ -1,158 +0,0 @@ -/** - * Copyright (C) 2021-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/s/balancer/balancer_defragmentation_policy.h" -#include "mongo/db/s/balancer/balancer_policy.h" -#include "mongo/db/s/balancer/balancer_random.h" -#include "mongo/s/catalog/type_collection.h" - -namespace mongo { - -/** - * Interface describing the interactions that the defragmentation policy can establish with the - * phase of the algorithm that is currently active on a collection. - * With the exception getType(), its methods do not guarantee thread safety. - */ -class DefragmentationPhase { -public: - virtual ~DefragmentationPhase() {} - - virtual DefragmentationPhaseEnum getType() const = 0; - - virtual DefragmentationPhaseEnum getNextPhase() const = 0; - - virtual boost::optional popNextStreamableAction( - OperationContext* opCtx) = 0; - - virtual boost::optional popNextMigration( - OperationContext* opCtx, stdx::unordered_set* availableShards) = 0; - - virtual void applyActionResult(OperationContext* opCtx, - const BalancerStreamAction& action, - const BalancerStreamActionResponse& response) = 0; - - virtual BSONObj reportProgress() const = 0; - - virtual bool isComplete() const = 0; - - virtual void userAbort() = 0; - -protected: - static constexpr uint64_t kSmallChunkSizeThresholdPctg = 25; -}; - -class BalancerDefragmentationPolicyImpl : public BalancerDefragmentationPolicy { - BalancerDefragmentationPolicyImpl(const BalancerDefragmentationPolicyImpl&) = delete; - BalancerDefragmentationPolicyImpl& operator=(const BalancerDefragmentationPolicyImpl&) = delete; - -public: - BalancerDefragmentationPolicyImpl(ClusterStatistics* clusterStats, - const std::function& onStateUpdated) - : _clusterStats(clusterStats), - _random(std::random_device{}()), - _onStateUpdated(onStateUpdated) {} - - ~BalancerDefragmentationPolicyImpl() {} - - void interruptAllDefragmentations() override; - - bool isDefragmentingCollection(const UUID& uuid) override; - - virtual BSONObj reportProgressOn(const UUID& uuid) override; - - MigrateInfoVector selectChunksToMove(OperationContext* opCtx, - stdx::unordered_set* availableShards) override; - - StringData getName() const override; - - boost::optional getNextStreamingAction(OperationContext* opCtx) override; - - void applyActionResult(OperationContext* opCtx, - const BalancerStreamAction& action, - const BalancerStreamActionResponse& response) override; - - void startCollectionDefragmentations(OperationContext* opCtx) override; - - void abortCollectionDefragmentation(OperationContext* opCtx, - const NamespaceString& nss) override; - -private: - /** - * Advances the defragmentation state of the specified collection to the next actionable phase - * (or sets the related DefragmentationPhase object to nullptr if nothing more can be done). - */ - bool _advanceToNextActionablePhase(OperationContext* opCtx, const UUID& collUuid); - - /** - * Move to the next phase and persist the phase change. This will end defragmentation if the - * next phase is kFinished. - * Must be called while holding the _stateMutex. - */ - std::unique_ptr _transitionPhases(OperationContext* opCtx, - const CollectionType& coll, - DefragmentationPhaseEnum nextPhase, - bool shouldPersistPhase = true); - - /** - * Builds the defragmentation phase object matching the current state of the passed - * collection and sets it into _defragmentationStates. - */ - void _initializeCollectionState(WithLock, OperationContext* opCtx, const CollectionType& coll); - - /** - * Write the new phase to the defragmentationPhase field in config.collections. If phase is - * kFinished, the field will be removed. - * Must be called while holding the _stateMutex. - */ - void _persistPhaseUpdate(OperationContext* opCtx, - DefragmentationPhaseEnum phase, - const UUID& uuid); - - /** - * Remove all datasize fields from config.chunks for the given namespace. - * Must be called while holding the _stateMutex. - */ - void _clearDefragmentationState(OperationContext* opCtx, const UUID& uuid); - - const std::string kPolicyName{"BalancerDefragmentationPolicy"}; - - Mutex _stateMutex = MONGO_MAKE_LATCH("BalancerChunkMergerImpl::_stateMutex"); - - ClusterStatistics* const _clusterStats; - - BalancerRandomSource _random; - - const std::function _onStateUpdated; - - stdx::unordered_map, UUID::Hash> - _defragmentationStates; -}; -} // namespace mongo diff --git a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp index 4a3f73aca5421..efe157eecd2be 100644 --- a/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp +++ b/src/mongo/db/s/balancer/balancer_defragmentation_policy_test.cpp @@ -27,11 +27,51 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/s/balancer/balancer_defragmentation_policy_impl.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/s/balancer/balancer_defragmentation_policy.h" #include "mongo/db/s/balancer/cluster_statistics_mock.h" #include "mongo/db/s/config/config_server_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/grid.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -147,7 +187,7 @@ class BalancerDefragmentationPolicyTest : public ConfigServerTestFixture { } ClusterStatisticsMock _clusterStats; - BalancerDefragmentationPolicyImpl _defragmentationPolicy; + BalancerDefragmentationPolicy _defragmentationPolicy; ShardStatistics buildShardStats(ShardId id, uint64_t currentSizeBytes, diff --git a/src/mongo/db/s/balancer/balancer_policy.cpp b/src/mongo/db/s/balancer/balancer_policy.cpp index cb914a55058b0..04e6c25e111ff 100644 --- a/src/mongo/db/s/balancer/balancer_policy.cpp +++ b/src/mongo/db/s/balancer/balancer_policy.cpp @@ -28,19 +28,40 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/balancer/balancer_policy.h" - +#include +#include +#include +#include +#include +#include +#include +#include #include - +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/balancer/balancer_policy.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/logv2/log.h" -#include "mongo/s/balancer_configuration.h" -#include "mongo/s/catalog/type_shard.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_tags.h" -#include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -160,7 +181,7 @@ StatusWith ZoneInfo::getZonesForCollection(OperationContext* opCtx, ShardingCatalogManager::get(opCtx)->localCatalogClient()->getTagsForCollection(opCtx, nss); if (!swCollectionZones.isOK()) { return swCollectionZones.getStatus().withContext( - str::stream() << "Unable to load zones for collection " << nss); + str::stream() << "Unable to load zones for collection " << nss.toStringForErrorMsg()); } const auto& collectionZones = swCollectionZones.getValue(); @@ -388,11 +409,18 @@ MigrateInfosWithReason BalancerPolicy::balance( invariant(to != stat.shardId); - migrations.emplace_back(to, - distribution.nss(), - chunk, - ForceJumbo::kForceBalancer, - collDataSizeInfo.maxChunkSizeBytes); + migrations.emplace_back( + to, + chunk.getShard(), + distribution.nss(), + chunk.getCollectionUUID(), + chunk.getMin(), + boost::none /* max */, + chunk.getVersion(), + // Always force jumbo chunks to be migrated off draining shards + ForceJumbo::kForceBalancer, + collDataSizeInfo.maxChunkSizeBytes); + if (firstReason == MigrationReason::none) { firstReason = MigrationReason::drain; } @@ -462,11 +490,16 @@ MigrateInfosWithReason BalancerPolicy::balance( invariant(to != stat.shardId); migrations.emplace_back(to, + chunk.getShard(), distribution.nss(), - chunk, + chunk.getCollectionUUID(), + chunk.getMin(), + boost::none /* max */, + chunk.getVersion(), forceJumbo ? ForceJumbo::kForceBalancer : ForceJumbo::kDoNotForce, collDataSizeInfo.maxChunkSizeBytes); + if (firstReason == MigrationReason::none) { firstReason = MigrationReason::zoneViolation; } @@ -522,7 +555,8 @@ MigrateInfosWithReason BalancerPolicy::balance( tassert(ErrorCodes::BadValue, str::stream() << "Total data size for shards in zone " << zone << " and collection " - << distribution.nss() << " must be greater or equal than zero but is " + << distribution.nss().toStringForErrorMsg() + << " must be greater or equal than zero but is " << totalDataSizeOfShardsWithZone, totalDataSizeOfShardsWithZone >= 0); @@ -634,7 +668,7 @@ bool BalancerPolicy::_singleZoneBalanceBasedOnDataSize( distribution.nss(), chunk.getCollectionUUID(), chunk.getMin(), - boost::none /* call moveRange*/, + boost::none /* max */, chunk.getVersion(), forceJumbo, collDataSizeInfo.maxChunkSizeBytes); @@ -749,7 +783,7 @@ std::string SplitInfo::toString() const { } return "Splitting chunk in {} [ {}, {} ), residing on {} at [ {} ] with version {} and collection placement version {}"_format( - nss.ns(), + toStringForLogging(nss), minKey.toString(), maxKey.toString(), shardId.toString(), @@ -772,7 +806,7 @@ MergeInfo::MergeInfo(const ShardId& shardId, std::string MergeInfo::toString() const { return "Merging chunk range {} in {} residing on {} with collection placement version {}"_format( chunkRange.toString(), - nss.toString(), + NamespaceStringUtil::serialize(nss), shardId.toString(), collectionPlacementVersion.toString()); } @@ -783,7 +817,7 @@ MergeAllChunksOnShardInfo::MergeAllChunksOnShardInfo(const ShardId& shardId, std::string MergeAllChunksOnShardInfo::toString() const { return "Merging all contiguous chunks residing on shard {} for collection {}"_format( - shardId.toString(), nss.toString()); + shardId.toString(), NamespaceStringUtil::serialize(nss)); } DataSizeInfo::DataSizeInfo(const ShardId& shardId, diff --git a/src/mongo/db/s/balancer/balancer_policy.h b/src/mongo/db/s/balancer/balancer_policy.h index e8f78d4ffd46e..2267a9fcab6fc 100644 --- a/src/mongo/db/s/balancer/balancer_policy.h +++ b/src/mongo/db/s/balancer/balancer_policy.h @@ -29,19 +29,36 @@ #pragma once +#include +#include +#include +#include +#include +#include #include +#include +#include +#include #include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobj_comparator_interface.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" #include "mongo/db/s/balancer/cluster_statistics.h" #include "mongo/db/shard_id.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/request_types/move_range_request_gen.h" #include "mongo/s/shard_version.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/balancer/balancer_policy_test.cpp b/src/mongo/db/s/balancer/balancer_policy_test.cpp index f4a247802589f..153c114454bf8 100644 --- a/src/mongo/db/s/balancer/balancer_policy_test.cpp +++ b/src/mongo/db/s/balancer/balancer_policy_test.cpp @@ -27,12 +27,28 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/keypattern.h" #include "mongo/db/s/balancer/balancer_policy.h" -#include "mongo/platform/random.h" #include "mongo/s/balancer_configuration.h" #include "mongo/s/catalog/type_chunk.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -55,7 +71,9 @@ const auto kShardId2 = ShardId("shard2"); const auto kShardId3 = ShardId("shard3"); const auto kShardId4 = ShardId("shard4"); const auto kShardId5 = ShardId("shard5"); -const NamespaceString kNamespace("TestDB", "TestColl"); +const NamespaceString kNamespace = + NamespaceString::createNamespaceString_forTest("TestDB", "TestColl"); +const KeyPattern kShardKeyPattern(BSON("x" << 1)); /** * Constructs a shard statistics vector and a consistent mapping of chunks to shards given the @@ -78,8 +96,6 @@ std::pair generateCluster( ChunkVersion chunkVersion({OID::gen(), Timestamp(1, 1)}, {1, 0}); const UUID uuid = UUID::gen(); - const KeyPattern shardKeyPattern(BSON("x" << 1)); - for (const auto& shard : statsVector) { // Ensure that an entry is created chunkMap[shard.shardId]; @@ -87,8 +103,8 @@ std::pair generateCluster( ChunkType chunk; chunk.setCollectionUUID(uuid); - chunk.setMin(currentChunk == 0 ? shardKeyPattern.globalMin() : BSON("x" << currentChunk)); - chunk.setMax(currentChunk == totalNumChunks - 1 ? shardKeyPattern.globalMax() + chunk.setMin(currentChunk == 0 ? kShardKeyPattern.globalMin() : BSON("x" << currentChunk)); + chunk.setMax(currentChunk == totalNumChunks - 1 ? kShardKeyPattern.globalMax() : BSON("x" << ++currentChunk)); chunk.setShard(shard.shardId); chunk.setVersion(chunkVersion); @@ -458,7 +474,7 @@ TEST(BalancerPolicy, DrainingFromShardWithFewData) { auto cluster = generateCluster({ShardStatistics(kShardId0, 20 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, - false /* draining */, + false /* not draining */, emptyZoneSet, emptyShardVersion, ShardStatistics::use_bytes_t()), @@ -483,14 +499,14 @@ TEST(BalancerPolicy, DrainingSingleChunkPerShard) { auto cluster = generateCluster({ShardStatistics(kShardId0, 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, - true, + true /* draining */, emptyZoneSet, emptyShardVersion, ShardStatistics::use_bytes_t()), ShardStatistics(kShardId1, 0, false, emptyZoneSet, emptyShardVersion), ShardStatistics(kShardId2, 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, - true, + true /* draining */, emptyZoneSet, emptyShardVersion, ShardStatistics::use_bytes_t()), @@ -510,13 +526,13 @@ TEST(BalancerPolicy, DrainingSingleChunkPerShard) { ASSERT_BSONOBJ_EQ(cluster.second[kShardId2][0].getMin(), migrations[1].minKey); } -TEST(BalancerPolicy, DrainingMultipleShardsFirstOneSelected) { - // shard0 and shard1 are both draining with very little data in them and chunks will go to - // shard2, even though it has a lot more data that the other two +TEST(BalancerPolicy, DrainingMultipleShardsAtLeastOneSelected) { + // shard1 and shard2 are both draining with very little data in them and chunks will go to + // shard0, even though it has a lot more data that the other two auto cluster = generateCluster({ShardStatistics(kShardId0, 50 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, - false /* draining */, + false /* not draining */, emptyZoneSet, emptyShardVersion, ShardStatistics::use_bytes_t()), @@ -546,7 +562,7 @@ TEST(BalancerPolicy, DrainingMultipleShardsWontAcceptMigrations) { auto cluster = generateCluster( {ShardStatistics(kShardId0, 20 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, - false /* draining */, + false /* not draining */, emptyZoneSet, emptyShardVersion, ShardStatistics::use_bytes_t()), @@ -575,7 +591,7 @@ TEST(BalancerPolicy, DrainingSingleAppropriateShardFoundDueToZone) { ShardStatistics::use_bytes_t()), ShardStatistics(kShardId2, ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, - true, + true /* draining */, {"LAX"}, emptyShardVersion, ShardStatistics::use_bytes_t())}); @@ -608,7 +624,7 @@ TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToZone) { ShardStatistics::use_bytes_t()), ShardStatistics(kShardId2, ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, - true, + true /* draining */, {"SEA"}, emptyShardVersion, ShardStatistics::use_bytes_t())}); @@ -624,14 +640,14 @@ TEST(BalancerPolicy, DrainingNoAppropriateShardsFoundDueToZone) { TEST(BalancerPolicy, NoBalancingDueToAllNodesDraining) { auto cluster = generateCluster({ShardStatistics(kShardId0, - 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, - true, + 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, + true /* draining */, emptyZoneSet, emptyShardVersion, ShardStatistics::use_bytes_t()), ShardStatistics(kShardId2, ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, - true, + true /* draining */, emptyZoneSet, emptyShardVersion, ShardStatistics::use_bytes_t())}); @@ -652,7 +668,7 @@ TEST(BalancerPolicy, BalancerRespectsZonesWhenDraining) { ShardStatistics::use_bytes_t()), ShardStatistics(kShardId1, 5 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, - true, + true /* draining */, {"a", "b"}, emptyShardVersion, ShardStatistics::use_bytes_t()), @@ -664,8 +680,10 @@ TEST(BalancerPolicy, BalancerRespectsZonesWhenDraining) { ShardStatistics::use_bytes_t())}); DistributionStatus distribution(kNamespace, cluster.second); - ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 7), "a"))); - ASSERT_OK(distribution.addRangeToZone(ZoneRange(BSON("x" << 8), kMaxBSONKey, "b"))); + ASSERT_OK( + distribution.addRangeToZone(ZoneRange(kShardKeyPattern.globalMin(), BSON("x" << 7), "a"))); + ASSERT_OK( + distribution.addRangeToZone(ZoneRange(BSON("x" << 8), kShardKeyPattern.globalMax(), "b"))); const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false); ASSERT_EQ(1U, migrations.size()); @@ -693,10 +711,120 @@ TEST(BalancerPolicy, BalancerZoneAlreadyBalanced) { }); DistributionStatus distribution(kNamespace, cluster.second); - ASSERT_OK(distribution.addRangeToZone(ZoneRange(kMinBSONKey, kMaxBSONKey, "a"))); + ASSERT_OK(distribution.addRangeToZone( + ZoneRange(kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax(), "a"))); ASSERT(balanceChunks(cluster.first, distribution, false, false).first.empty()); } +TEST(BalancerPolicy, ScheduleMigrationForChunkViolatingZone) { + // Zone violation: shard1 owns a chunk from zone "a" + auto cluster = generateCluster({ + ShardStatistics(kShardId0, + 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, + false, + {"a"}, + emptyShardVersion, + ShardStatistics::use_bytes_t()), + ShardStatistics(kShardId1, + 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, + false, + {"b"}, + emptyShardVersion, + ShardStatistics::use_bytes_t()), + }); + + DistributionStatus distribution(kNamespace, cluster.second); + ASSERT_OK(distribution.addRangeToZone( + ZoneRange(kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax(), "a"))); + + const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false); + ASSERT_EQ(1U, migrations.size()); + ASSERT_EQ(kShardId1, migrations[0].from); + ASSERT_EQ(kShardId0, migrations[0].to); + ASSERT_BSONOBJ_EQ(cluster.second[kShardId1][0].getMin(), migrations[0].minKey); + ASSERT_EQ(MigrationReason::zoneViolation, reason); +} + +TEST(BalancerPolicy, ScheduleParallelMigrationsForZoneViolations) { + // shard2 and shard3 own chunks from zone "a" that are violating the shards zone + auto cluster = generateCluster({ + ShardStatistics(kShardId0, + 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, + false, + {"a"}, + emptyShardVersion, + ShardStatistics::use_bytes_t()), + ShardStatistics(kShardId1, + 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, + false, + {"a"}, + emptyShardVersion, + ShardStatistics::use_bytes_t()), + ShardStatistics(kShardId2, + 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, + false, + {"b"}, + emptyShardVersion, + ShardStatistics::use_bytes_t()), + ShardStatistics(kShardId3, + 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, + false, + {"b"}, + emptyShardVersion, + ShardStatistics::use_bytes_t()), + + }); + + DistributionStatus distribution(kNamespace, cluster.second); + ASSERT_OK(distribution.addRangeToZone( + ZoneRange(kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax(), "a"))); + + const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false); + ASSERT_EQ(2U, migrations.size()); + ASSERT_EQ(kShardId2, migrations[0].from); + ASSERT_EQ(kShardId0, migrations[0].to); + + ASSERT_EQ(kShardId3, migrations[1].from); + ASSERT_EQ(kShardId1, migrations[1].to); + + ASSERT_EQ(MigrationReason::zoneViolation, reason); +} + +TEST(BalancerPolicy, DrainingHasPrecedenceOverZoneViolation) { + // shard1 owns a chunk from zone "a" that is violating the shards zone, however shard2 is in + // draining mode so it has preference over shard1 + auto cluster = generateCluster({ + ShardStatistics(kShardId0, + 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, + false, + {"a"}, + emptyShardVersion, + ShardStatistics::use_bytes_t()), + ShardStatistics(kShardId1, + 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, + false, + {"b"}, + emptyShardVersion, + ShardStatistics::use_bytes_t()), + ShardStatistics(kShardId2, + 2 * ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes, + true /*draining*/, + {"a"}, + emptyShardVersion, + ShardStatistics::use_bytes_t()), + }); + + DistributionStatus distribution(kNamespace, cluster.second); + ASSERT_OK(distribution.addRangeToZone( + ZoneRange(kShardKeyPattern.globalMin(), kShardKeyPattern.globalMax(), "a"))); + + const auto [migrations, reason] = balanceChunks(cluster.first, distribution, false, false); + ASSERT_EQ(1U, migrations.size()); + ASSERT_EQ(kShardId2, migrations[0].from); + ASSERT_EQ(kShardId0, migrations[0].to); + ASSERT_EQ(MigrationReason::drain, reason); +} + TEST(BalancerPolicy, BalancerHandlesNoShardsWithZone) { auto cluster = generateCluster({ShardStatistics(kShardId0, @@ -713,8 +841,8 @@ TEST(BalancerPolicy, BalancerHandlesNoShardsWithZone) { ShardStatistics::use_bytes_t())}); DistributionStatus distribution(kNamespace, cluster.second); - ASSERT_OK( - distribution.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 7), "NonExistentZone"))); + ASSERT_OK(distribution.addRangeToZone( + ZoneRange(kShardKeyPattern.globalMin(), BSON("x" << 7), "NonExistentZone"))); ASSERT(balanceChunks(cluster.first, distribution, false, false).first.empty()); } @@ -727,19 +855,23 @@ TEST(DistributionStatus, AddZoneRangeOverlap) { ASSERT_OK(d.addRangeToZone(ZoneRange(BSON("x" << 20), BSON("x" << 30), "b"))); ASSERT_EQ(ErrorCodes::RangeOverlapConflict, - d.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << 2), "d"))); + d.addRangeToZone(ZoneRange(kShardKeyPattern.globalMin(), BSON("x" << 2), "d"))); ASSERT_EQ(ErrorCodes::RangeOverlapConflict, d.addRangeToZone(ZoneRange(BSON("x" << -1), BSON("x" << 5), "d"))); ASSERT_EQ(ErrorCodes::RangeOverlapConflict, d.addRangeToZone(ZoneRange(BSON("x" << 5), BSON("x" << 9), "d"))); ASSERT_EQ(ErrorCodes::RangeOverlapConflict, d.addRangeToZone(ZoneRange(BSON("x" << 1), BSON("x" << 10), "d"))); + ASSERT_EQ(ErrorCodes::RangeOverlapConflict, + d.addRangeToZone(ZoneRange(BSON("x" << 5), BSON("x" << 12), "d"))); ASSERT_EQ(ErrorCodes::RangeOverlapConflict, d.addRangeToZone(ZoneRange(BSON("x" << 5), BSON("x" << 25), "d"))); + ASSERT_EQ(ErrorCodes::RangeOverlapConflict, + d.addRangeToZone(ZoneRange(BSON("x" << 19), BSON("x" << 21), "d"))); ASSERT_EQ(ErrorCodes::RangeOverlapConflict, d.addRangeToZone(ZoneRange(BSON("x" << -1), BSON("x" << 32), "d"))); ASSERT_EQ(ErrorCodes::RangeOverlapConflict, - d.addRangeToZone(ZoneRange(BSON("x" << 25), kMaxBSONKey, "d"))); + d.addRangeToZone(ZoneRange(BSON("x" << 25), kShardKeyPattern.globalMax(), "d"))); } TEST(DistributionStatus, ChunkZonesSelectorWithRegularKeys) { @@ -751,7 +883,7 @@ TEST(DistributionStatus, ChunkZonesSelectorWithRegularKeys) { { ChunkType chunk; - chunk.setMin(kMinBSONKey); + chunk.setMin(kShardKeyPattern.globalMin()); chunk.setMax(BSON("x" << 1)); ASSERT_EQUALS("", d.getZoneForChunk(chunk)); } @@ -801,14 +933,14 @@ TEST(DistributionStatus, ChunkZonesSelectorWithRegularKeys) { { ChunkType chunk; chunk.setMin(BSON("x" << 30)); - chunk.setMax(kMaxBSONKey); + chunk.setMax(kShardKeyPattern.globalMax()); ASSERT_EQUALS("", d.getZoneForChunk(chunk)); } { ChunkType chunk; chunk.setMin(BSON("x" << 40)); - chunk.setMax(kMaxBSONKey); + chunk.setMax(kShardKeyPattern.globalMax()); ASSERT_EQUALS("", d.getZoneForChunk(chunk)); } } @@ -816,13 +948,13 @@ TEST(DistributionStatus, ChunkZonesSelectorWithRegularKeys) { TEST(DistributionStatus, ChunkZonesSelectorWithMinMaxKeys) { DistributionStatus d(kNamespace, ShardToChunksMap{}); - ASSERT_OK(d.addRangeToZone(ZoneRange(kMinBSONKey, BSON("x" << -100), "a"))); + ASSERT_OK(d.addRangeToZone(ZoneRange(kShardKeyPattern.globalMin(), BSON("x" << -100), "a"))); ASSERT_OK(d.addRangeToZone(ZoneRange(BSON("x" << -10), BSON("x" << 10), "b"))); - ASSERT_OK(d.addRangeToZone(ZoneRange(BSON("x" << 100), kMaxBSONKey, "c"))); + ASSERT_OK(d.addRangeToZone(ZoneRange(BSON("x" << 100), kShardKeyPattern.globalMax(), "c"))); { ChunkType chunk; - chunk.setMin(kMinBSONKey); + chunk.setMin(kShardKeyPattern.globalMin()); chunk.setMax(BSON("x" << -100)); ASSERT_EQUALS("a", d.getZoneForChunk(chunk)); } @@ -865,7 +997,7 @@ TEST(DistributionStatus, ChunkZonesSelectorWithMinMaxKeys) { { ChunkType chunk; chunk.setMin(BSON("x" << 200)); - chunk.setMax(kMaxBSONKey); + chunk.setMax(kShardKeyPattern.globalMax()); ASSERT_EQUALS("c", d.getZoneForChunk(chunk)); } } diff --git a/src/mongo/db/s/balancer/balancer_random.h b/src/mongo/db/s/balancer/balancer_random.h deleted file mode 100644 index eb1712ee96d81..0000000000000 --- a/src/mongo/db/s/balancer/balancer_random.h +++ /dev/null @@ -1,38 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include - -namespace mongo { - -using BalancerRandomSource = std::minstd_rand; - -} // namespace mongo diff --git a/src/mongo/db/s/balancer/cluster_statistics.cpp b/src/mongo/db/s/balancer/cluster_statistics.cpp index 817f1e567f0e5..24a33c5d64d75 100644 --- a/src/mongo/db/s/balancer/cluster_statistics.cpp +++ b/src/mongo/db/s/balancer/cluster_statistics.cpp @@ -27,13 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/db/s/balancer/cluster_statistics.h" -#include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" - namespace { uint64_t convertMBToBytes(uint64_t inMB) { if (inMB > std::numeric_limits::max() / (1024 * 1024)) { diff --git a/src/mongo/db/s/balancer/cluster_statistics.h b/src/mongo/db/s/balancer/cluster_statistics.h index fe117689d93af..dad1ff9e48d72 100644 --- a/src/mongo/db/s/balancer/cluster_statistics.h +++ b/src/mongo/db/s/balancer/cluster_statistics.h @@ -29,6 +29,7 @@ #pragma once +#include #include #include #include @@ -40,6 +41,7 @@ namespace mongo { class BSONObj; + class OperationContext; /** diff --git a/src/mongo/db/s/balancer/cluster_statistics_impl.cpp b/src/mongo/db/s/balancer/cluster_statistics_impl.cpp index 97e114b0f8d64..ceb6835281952 100644 --- a/src/mongo/db/s/balancer/cluster_statistics_impl.cpp +++ b/src/mongo/db/s/balancer/cluster_statistics_impl.cpp @@ -28,18 +28,38 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/balancer/cluster_statistics_impl.h" - #include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/client/read_preference.h" +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/s/balancer/cluster_statistics_impl.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/shard_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/random.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/shard_util.h" @@ -96,8 +116,6 @@ StatusWith retrieveShardMongoDVersion(OperationContext* opCtx, Shar using ShardStatistics = ClusterStatistics::ShardStatistics; -ClusterStatisticsImpl::ClusterStatisticsImpl(BalancerRandomSource& random) : _random(random) {} - ClusterStatisticsImpl::~ClusterStatisticsImpl() = default; StatusWith> ClusterStatisticsImpl::getStats(OperationContext* opCtx) { @@ -125,7 +143,8 @@ StatusWith> ClusterStatisticsImpl::_getStats( auto& shards = shardsStatus.getValue().value; - std::shuffle(shards.begin(), shards.end(), _random); + auto client = opCtx->getClient(); + std::shuffle(shards.begin(), shards.end(), client->getPrng().urbg()); std::vector stats; diff --git a/src/mongo/db/s/balancer/cluster_statistics_impl.h b/src/mongo/db/s/balancer/cluster_statistics_impl.h index 56037628a4715..e35f19ca6a08c 100644 --- a/src/mongo/db/s/balancer/cluster_statistics_impl.h +++ b/src/mongo/db/s/balancer/cluster_statistics_impl.h @@ -29,7 +29,12 @@ #pragma once -#include "mongo/db/s/balancer/balancer_random.h" +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/s/balancer/cluster_statistics.h" namespace mongo { @@ -41,7 +46,6 @@ namespace mongo { */ class ClusterStatisticsImpl final : public ClusterStatistics { public: - ClusterStatisticsImpl(BalancerRandomSource& random); ~ClusterStatisticsImpl(); StatusWith> getStats(OperationContext* opCtx) override; @@ -52,9 +56,6 @@ class ClusterStatisticsImpl final : public ClusterStatistics { private: StatusWith> _getStats(OperationContext* opCtx, boost::optional ns); - - // Source of randomness when metadata needs to be randomized. - BalancerRandomSource& _random; }; } // namespace mongo diff --git a/src/mongo/db/s/balancer/core_options_stub.cpp b/src/mongo/db/s/balancer/core_options_stub.cpp index 234d91c322a93..1f6d88f3858d6 100644 --- a/src/mongo/db/s/balancer/core_options_stub.cpp +++ b/src/mongo/db/s/balancer/core_options_stub.cpp @@ -27,8 +27,10 @@ * it in the license file. */ -#include "mongo/base/init.h" -#include "mongo/base/status.h" +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/balancer/migration_test_fixture.cpp b/src/mongo/db/s/balancer/migration_test_fixture.cpp index e5b511810037e..c14ea655a38bf 100644 --- a/src/mongo/db/s/balancer/migration_test_fixture.cpp +++ b/src/mongo/db/s/balancer/migration_test_fixture.cpp @@ -27,9 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bson_field.h" #include "mongo/db/s/balancer/migration_test_fixture.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_tags.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -45,14 +58,6 @@ std::shared_ptr MigrationTestFixture::shardTargeterMo uassertStatusOK(shardRegistry()->getShard(opCtx, shardId))->getTargeter()); } -void MigrationTestFixture::setUpDatabase(const std::string& dbName, const ShardId primaryShard) { - DatabaseType db(dbName, primaryShard, DatabaseVersion(UUID::gen(), Timestamp())); - ASSERT_OK(catalogClient()->insertConfigDocument(operationContext(), - NamespaceString::kConfigDatabasesNamespace, - db.toBSON(), - kMajorityWriteConcern)); -} - void MigrationTestFixture::setUpCollection( const NamespaceString& collName, const UUID& collUUID, @@ -86,10 +91,10 @@ void MigrationTestFixture::setUpZones(const NamespaceString& collName, const StringMap& zoneChunkRanges) { for (auto const& zoneChunkRange : zoneChunkRanges) { BSONObjBuilder zoneDocBuilder; - zoneDocBuilder.append( - "_id", - BSON(TagsType::ns(collName.ns()) << TagsType::min(zoneChunkRange.second.getMin()))); - zoneDocBuilder.append(TagsType::ns(), collName.ns()); + zoneDocBuilder.append("_id", + BSON(TagsType::ns(collName.toString_forTest()) + << TagsType::min(zoneChunkRange.second.getMin()))); + zoneDocBuilder.append(TagsType::ns(), collName.ns_forTest()); zoneDocBuilder.append(TagsType::min(), zoneChunkRange.second.getMin()); zoneDocBuilder.append(TagsType::max(), zoneChunkRange.second.getMax()); zoneDocBuilder.append(TagsType::tag(), zoneChunkRange.first); @@ -100,7 +105,7 @@ void MigrationTestFixture::setUpZones(const NamespaceString& collName, } void MigrationTestFixture::removeAllZones(const NamespaceString& collName) { - const auto query = BSON("ns" << collName.ns()); + const auto query = BSON("ns" << collName.ns_forTest()); ASSERT_OK(catalogClient()->removeConfigDocuments( operationContext(), TagsType::ConfigNS, query, kMajorityWriteConcern)); auto findStatus = findOneOnConfigCollection(operationContext(), collName, query); diff --git a/src/mongo/db/s/balancer/migration_test_fixture.h b/src/mongo/db/s/balancer/migration_test_fixture.h index 7540521ca8dcf..a78f212a0539d 100644 --- a/src/mongo/db/s/balancer/migration_test_fixture.h +++ b/src/mongo/db/s/balancer/migration_test_fixture.h @@ -27,15 +27,36 @@ * it in the license file. */ +#include +#include +#include #include - +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/config/config_server_test_fixture.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/shard_id.h" #include "mongo/db/write_concern_options.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -61,10 +82,19 @@ class MigrationTestFixture : public ConfigServerTestFixture { ShardId shardId); /** - * Inserts a document into the config.databases collection to indicate that "dbName" is sharded - * with primary "primaryShard". + * Setup the config.shards collection to contain the given shards. + * Additionally set up dummy hosts for the targeted shards */ - void setUpDatabase(const std::string& dbName, ShardId primaryShard); + void setupShards(const std::vector& shards) override { + ConfigServerTestFixture::setupShards(shards); + + // Requests chunks to be relocated requires running commands on each shard to + // get shard statistics. Set up dummy hosts for the source shards. + for (const auto& shard : shards) { + shardTargeterMock(operationContext(), shard.getName()) + ->setFindHostReturnValue(HostAndPort(shard.getHost())); + } + } /** * Inserts a document into the config.collections collection to indicate that "collName" is @@ -119,16 +149,10 @@ class MigrationTestFixture : public ConfigServerTestFixture { const HostAndPort kShardHost2 = HostAndPort("TestHost2", 12347); const HostAndPort kShardHost3 = HostAndPort("TestHost3", 12348); - const long long kMaxSizeMB = 100; - - const BSONObj kShard0 = - BSON(ShardType::name(kShardId0.toString()) << ShardType::host(kShardHost0.toString())); - const BSONObj kShard1 = - BSON(ShardType::name(kShardId1.toString()) << ShardType::host(kShardHost1.toString())); - const BSONObj kShard2 = - BSON(ShardType::name(kShardId2.toString()) << ShardType::host(kShardHost2.toString())); - const BSONObj kShard3 = - BSON(ShardType::name(kShardId3.toString()) << ShardType::host(kShardHost3.toString())); + const ShardType kShard0{kShardId0.toString(), kShardHost0.toString()}; + const ShardType kShard1{kShardId1.toString(), kShardHost1.toString()}; + const ShardType kShard2{kShardId2.toString(), kShardHost2.toString()}; + const ShardType kShard3{kShardId3.toString(), kShardHost3.toString()}; const std::string kPattern = "_id"; const KeyPattern kKeyPattern = KeyPattern(BSON(kPattern << 1)); diff --git a/src/mongo/db/s/balancer_stats_registry.cpp b/src/mongo/db/s/balancer_stats_registry.cpp index 6e31af3754c1b..611fa0763a35a 100644 --- a/src/mongo/db/s/balancer_stats_registry.cpp +++ b/src/mongo/db/s/balancer_stats_registry.cpp @@ -29,15 +29,45 @@ #include "mongo/db/s/balancer_stats_registry.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregate_command_gen.h" -#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/s/range_deleter_service.h" #include "mongo/db/s/range_deletion_task_gen.h" -#include "mongo/db/s/range_deletion_util.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -89,6 +119,12 @@ void BalancerStatsRegistry::initializeAsync(OperationContext* opCtx) { ThreadClient tc("BalancerStatsRegistry::asynchronousInitialization", getGlobalServiceContext()); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + { stdx::lock_guard lk{_stateMutex}; if (const auto currentState = _state.load(); currentState != State::kPrimaryIdle) { @@ -233,7 +269,8 @@ void BalancerStatsRegistry::onRangeDeletionTaskDeletion(const UUID& collectionUU stdx::lock_guard lk{_mutex}; auto collStatsIt = _collStatsMap.find(collectionUUID); if (collStatsIt == _collStatsMap.end()) { - LOGV2_ERROR(6419612, + LOGV2_DEBUG(6419612, + 1, "Couldn't find cached range deletion tasks count during decrese attempt", "collectionUUID"_attr = collectionUUID, "numOrphanDocs"_attr = numOrphanDocs); @@ -246,7 +283,8 @@ void BalancerStatsRegistry::onRangeDeletionTaskDeletion(const UUID& collectionUU if (stats.numRangeDeletionTasks <= 0) { if (MONGO_unlikely(stats.numRangeDeletionTasks < 0)) { - LOGV2_ERROR(6419613, + LOGV2_DEBUG(6419613, + 1, "Cached count of range deletion tasks became negative. Resetting it to 0", "collectionUUID"_attr = collectionUUID, "numRangeDeletionTasks"_attr = stats.numRangeDeletionTasks, diff --git a/src/mongo/db/s/balancer_stats_registry.h b/src/mongo/db/s/balancer_stats_registry.h index a5effc6b0e25e..3abf6c2de5c25 100644 --- a/src/mongo/db/s/balancer_stats_registry.h +++ b/src/mongo/db/s/balancer_stats_registry.h @@ -29,30 +29,21 @@ #pragma once -#include "mongo/db/concurrency/d_concurrency.h" +#include +#include + #include "mongo/db/operation_context.h" #include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/db/s/range_deleter_service.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/uuid.h" namespace mongo { -/** - * Scoped lock to synchronize with the execution of range deletions. - * The range-deleter acquires a scoped lock in IX mode while orphans are being deleted. - * Acquiring the scoped lock in MODE_X ensures that no orphan counter in `config.rangeDeletions` - * entries is going to be updated concurrently. - */ -class ScopedRangeDeleterLock { -public: - ScopedRangeDeleterLock(OperationContext* opCtx, LockMode mode) - : _resourceLock(opCtx, _mutex.getRid(), mode) {} - -private: - const Lock::ResourceLock _resourceLock; - static inline const Lock::ResourceMutex _mutex{"ScopedRangeDeleterLock"}; -}; - /** * The BalancerStatsRegistry is used to cache metadata on shards, such as the orphan documents * count. The blancer (on the config sever) periodically fetches this metadata through the diff --git a/src/mongo/db/s/check_sharding_index_command.cpp b/src/mongo/db/s/check_sharding_index_command.cpp index b1094698bdec5..81e80b503f431 100644 --- a/src/mongo/db/s/check_sharding_index_command.cpp +++ b/src/mongo/db/s/check_sharding_index_command.cpp @@ -27,16 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" -#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/shard_key_index_util.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -81,7 +94,8 @@ class CheckShardingIndex : public ErrmsgCommandDeprecated { const BSONObj& jsobj, std::string& errmsg, BSONObjBuilder& result) override { - const NamespaceString nss(parseNs({boost::none, dbname}, jsobj)); + const NamespaceString nss( + parseNs(DatabaseNameUtil::deserialize(boost::none, dbname), jsobj)); BSONObj keyPattern = jsobj.getObjectField("keyPattern"); if (keyPattern.isEmpty()) { diff --git a/src/mongo/db/s/chunk_manager_refresh_bm.cpp b/src/mongo/db/s/chunk_manager_refresh_bm.cpp index fad17e0b70104..bc4ba21a4f0ab 100644 --- a/src/mongo/db/s/chunk_manager_refresh_bm.cpp +++ b/src/mongo/db/s/chunk_manager_refresh_bm.cpp @@ -27,19 +27,54 @@ * it in the license file. */ +#include #include - -#include "mongo/base/init.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/s/collection_metadata.h" +#include "mongo/db/shard_id.h" +#include "mongo/platform/compiler.h" #include "mongo/platform/random.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/type_collection_common_types_gen.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { -const NamespaceString kNss("test", "foo"); +const NamespaceString kNss = NamespaceString::createNamespaceString_forTest("test", "foo"); RoutingTableHistoryValueHandle makeStandaloneRoutingTableHistory(RoutingTableHistory rt) { const auto version = rt.getVersion(); @@ -48,17 +83,17 @@ RoutingTableHistoryValueHandle makeStandaloneRoutingTableHistory(RoutingTableHis ComparableChunkVersion::makeComparableChunkVersion(version)); } +ShardId getShardId(int i) { + return {std::string(str::stream() << "shard_" << i)}; +} + ChunkRange getRangeForChunk(int i, int nChunks) { invariant(i >= 0); invariant(nChunks > 0); invariant(i < nChunks); - if (i == 0) { - return {BSON("_id" << MINKEY), BSON("_id" << 0)}; - } - if (i + 1 == nChunks) { - return {BSON("_id" << (i - 1) * 100), BSON("_id" << MAXKEY)}; - } - return {BSON("_id" << (i - 1) * 100), BSON("_id" << i * 100)}; + auto min = (i == 0) ? BSON("_id" << MINKEY) : BSON("_id" << (i - 1) * 100); + auto max = (i == nChunks - 1) ? BSON("_id" << MAXKEY) : BSON("_id" << i * 100); + return {std::move(min), std::move(max)}; } template @@ -90,21 +125,21 @@ CollectionMetadata makeChunkManagerWithShardSelector(int nShards, boost::none /* reshardingFields */, true, chunks); - return CollectionMetadata(ChunkManager(ShardId("Shard0"), + return CollectionMetadata(ChunkManager(getShardId(0), DatabaseVersion(UUID::gen(), Timestamp(1, 0)), makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), - ShardId("shard0")); + getShardId(0)); } ShardId pessimalShardSelector(int i, int nShards, int nChunks) { - return ShardId(str::stream() << "shard" << (i % nShards)); + return getShardId(i % nShards); } ShardId optimalShardSelector(int i, int nShards, int nChunks) { invariant(nShards <= nChunks); const auto shardNum = (int64_t(i) * nShards / nChunks) % nShards; - return ShardId(str::stream() << "shard" << shardNum); + return getShardId(shardNum); } MONGO_COMPILER_NOINLINE auto makeChunkManagerWithPessimalBalancedDistribution(int nShards, @@ -121,35 +156,133 @@ MONGO_COMPILER_NOINLINE auto runIncrementalUpdate(const CollectionMetadata& cm, const std::vector& newChunks) { auto rt = cm.getChunkManager()->getRoutingTableHistory_ForTest().makeUpdated( boost::none /* timeseriesFields */, boost::none /* reshardingFields */, true, newChunks); - return CollectionMetadata(ChunkManager(ShardId("shard0"), + return CollectionMetadata(ChunkManager(getShardId(0), DatabaseVersion(UUID::gen(), Timestamp(1, 0)), makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), - ShardId("shard0")); + getShardId(0)); } -void BM_IncrementalRefreshOfPessimalBalancedDistribution(benchmark::State& state) { +/* + * Simulate a refresh of the ChunkManager where a number of chunks is migrated from one shard to + * another. + * + * The chunks modified in the routing table are equally spaced. + */ +void BM_IncrementalSpacedRefreshMoveChunks(benchmark::State& state) { const int nShards = state.range(0); const int nChunks = state.range(1); + const int nUpdates = state.range(2); auto metadata = makeChunkManagerWithPessimalBalancedDistribution(nShards, nChunks); - auto postMoveVersion = metadata.getChunkManager()->getVersion(); - const UUID uuid = metadata.getUUID(); + auto lastVersion = metadata.getCollPlacementVersion(); + std::vector newChunks; - postMoveVersion.incMajor(); - newChunks.emplace_back(uuid, getRangeForChunk(1, nChunks), postMoveVersion, ShardId("shard0")); - postMoveVersion.incMajor(); - newChunks.emplace_back(uuid, getRangeForChunk(3, nChunks), postMoveVersion, ShardId("shard1")); + newChunks.reserve(nUpdates); + const auto updateSpacing = nChunks / nUpdates; + for (int i = 0; i < nUpdates; i++) { + const auto idx = i * updateSpacing; + lastVersion.incMajor(); + newChunks.emplace_back(metadata.getUUID(), + getRangeForChunk(idx, nChunks), + lastVersion, + pessimalShardSelector(idx, nShards, nChunks)); + } - for (auto keepRunning : state) { + std::mt19937 g; + g.seed(456); + std::shuffle(newChunks.begin(), newChunks.end(), g); + + for (auto _ : state) { + benchmark::DoNotOptimize(runIncrementalUpdate(metadata, newChunks)); + } +} + +BENCHMARK(BM_IncrementalSpacedRefreshMoveChunks) + ->Args({4, 1, 1}) + ->Args({4, 10, 1}) + ->Args({4, 100, 1}) + ->Args({4, 1000, 1}) + ->Args({4, 10000, 1}) + ->Args({4, 100000, 1}) + ->Args({4, 10000, 10}) + ->Args({4, 10000, 100}) + ->Args({4, 10000, 1000}) + ->Args({4, 10000, 10000}); + +/* + * Simulate a refresh of the ChunkManager where a number of chunks is merged together. + */ +void BM_IncrementalSpacedRefreshMergeChunks(benchmark::State& state) { + const int nShards = state.range(0); + const int nChunks = state.range(1); + const int nUpdates = state.range(2); + auto metadata = makeChunkManagerWithOptimalBalancedDistribution(nShards, nChunks); + + auto lastVersion = metadata.getCollPlacementVersion(); + + std::vector newChunks; + newChunks.reserve(nUpdates); + invariant(nUpdates <= nShards); + const auto shardSpacing = nShards / (nUpdates + 1); + std::set shardsToMerge; + for (int i = 0; i < nUpdates; i++) { + invariant(i * shardSpacing <= nShards); + shardsToMerge.emplace(getShardId(i * shardSpacing)); + } + + ShardId shardId; + std::vector rangesToMerge; + + const auto flushRanges = [&] { + if (rangesToMerge.empty()) { + return; + } + + lastVersion.incMajor(); + newChunks.emplace_back( + metadata.getUUID(), + ChunkRange(rangesToMerge.front().getMin(), rangesToMerge.back().getMax()), + lastVersion, + shardId); + rangesToMerge.clear(); + }; + + for (int i = 0; i < nChunks; i++) { + auto nextShardId = pessimalShardSelector(i, nShards, nChunks); + if (nextShardId != shardId) { + flushRanges(); + shardId = nextShardId; + } + if (shardsToMerge.count(shardId) == 1) { + rangesToMerge.emplace_back(getRangeForChunk(i, nChunks)); + } + } + flushRanges(); + + std::mt19937 g; + g.seed(456); + std::shuffle(newChunks.begin(), newChunks.end(), g); + + for (auto _ : state) { benchmark::DoNotOptimize(runIncrementalUpdate(metadata, newChunks)); } } -BENCHMARK(BM_IncrementalRefreshOfPessimalBalancedDistribution) - ->Args({2, 50000}) - ->Args({2, 250000}) - ->Args({2, 500000}); +/* + * Simulate chunks merge on a routing table of 10000 chunks partitioned among 4 shards. + * + * [ 0, 2500) -> shard0 + * [2500, 5000) -> shard1 + * [5000, 7500) -> shard2 + * [7500, 10000) -> shard3 + */ + +BENCHMARK(BM_IncrementalSpacedRefreshMergeChunks) + ->Args({4, 10000, 1}) // merge all chunks on shard2 + ->Args({4, 10000, 2}) // merge all chunks on shard2 and shard3 + ->Args({4, 10000, 3}) // merge all chunks on shard1, shard2 and shard3 + ->Args({4, 10000, 4}); // merge all chunks on shard1, shard2, shard3 and shard4 template auto BM_FullBuildOfChunkManager(benchmark::State& state, ShardSelectorFn selectShard) { @@ -183,11 +316,11 @@ auto BM_FullBuildOfChunkManager(benchmark::State& state, ShardSelectorFn selectS true, chunks); benchmark::DoNotOptimize( - CollectionMetadata(ChunkManager(ShardId("shard0"), + CollectionMetadata(ChunkManager(getShardId(0), DatabaseVersion(UUID::gen(), Timestamp(1, 0)), makeStandaloneRoutingTableHistory(std::move(rt)), boost::none), - ShardId("shard0"))); + getShardId(0))); } } @@ -409,11 +542,14 @@ MONGO_INITIALIZER(RegisterBenchmarks)(InitializerContext* context) { }; for (auto bmCase : bmCases) { - bmCase->Args({2, 50000}) - ->Args({10, 50000}) - ->Args({100, 50000}) - ->Args({1000, 50000}) - ->Args({2, 2}); + bmCase->Args({2, 2}) + ->Args({1, 10000}) + ->Args({10, 10000}) + ->Args({100, 10000}) + ->Args({1000, 10000}) + ->Args({10, 10}) + ->Args({10, 100}) + ->Args({10, 1000}); } } diff --git a/src/mongo/db/s/chunk_move_write_concern_options.cpp b/src/mongo/db/s/chunk_move_write_concern_options.cpp index 151596b2f4948..139c611d56700 100644 --- a/src/mongo/db/s/chunk_move_write_concern_options.cpp +++ b/src/mongo/db/s/chunk_move_write_concern_options.cpp @@ -28,15 +28,16 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/chunk_move_write_concern_options.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/service_context.h" +#include "mongo/db/s/chunk_move_write_concern_options.h" #include "mongo/s/request_types/migration_secondary_throttle_options.h" +#include "mongo/util/duration.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/chunk_move_write_concern_options.h b/src/mongo/db/s/chunk_move_write_concern_options.h index 30e9e08dfcfc4..73d124d6b2f71 100644 --- a/src/mongo/db/s/chunk_move_write_concern_options.h +++ b/src/mongo/db/s/chunk_move_write_concern_options.h @@ -29,6 +29,8 @@ #pragma once +#include "mongo/base/status_with.h" +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/write_concern_options.h" namespace mongo { diff --git a/src/mongo/db/s/chunk_operation_precondition_checks.cpp b/src/mongo/db/s/chunk_operation_precondition_checks.cpp index fc7eda91b137e..353d9b4d87afb 100644 --- a/src/mongo/db/s/chunk_operation_precondition_checks.cpp +++ b/src/mongo/db/s/chunk_operation_precondition_checks.cpp @@ -27,11 +27,35 @@ * it in the license file. */ +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" -#include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -63,7 +87,7 @@ CollectionPlacementAndIndexInfo checkCollectionIdentity( ShardVersionPlacementIgnoredNoIndexes() /* receivedVersion */, boost::none /* wantedVersion */, shardId), - str::stream() << "Collection " << nss.ns() << " needs to be recovered", + str::stream() << "Collection " << nss.toStringForErrorMsg() << " needs to be recovered", optMetadata); auto metadata = *optMetadata; @@ -72,7 +96,7 @@ CollectionPlacementAndIndexInfo checkCollectionIdentity( ShardVersionPlacementIgnoredNoIndexes() /* receivedVersion */, ShardVersion::UNSHARDED() /* wantedVersion */, shardId), - str::stream() << "Collection " << nss.ns() << " is not sharded", + str::stream() << "Collection " << nss.toStringForErrorMsg() << " is not sharded", metadata.isSharded()); uassert(ErrorCodes::NamespaceNotFound, @@ -87,7 +111,7 @@ CollectionPlacementAndIndexInfo checkCollectionIdentity( ShardVersionPlacementIgnoredNoIndexes() /* receivedVersion */, shardVersion /* wantedVersion */, shardId), - str::stream() << "Collection " << nss.ns() + str::stream() << "Collection " << nss.toStringForErrorMsg() << " has changed since operation was sent (sent epoch: " << expectedEpoch << ", current epoch: " << placementVersion.epoch() << ")", expectedEpoch == placementVersion.epoch() && @@ -120,8 +144,8 @@ void checkShardKeyPattern(OperationContext* opCtx, shardVersion /* wantedVersion */, shardId), str::stream() << "The range " << chunkRange.toString() - << " is not valid for collection " << nss.ns() << " with key pattern " - << keyPattern.toString(), + << " is not valid for collection " << nss.toStringForErrorMsg() + << " with key pattern " << keyPattern.toString(), metadata.isValidKey(chunkRange.getMin()) && metadata.isValidKey(chunkRange.getMax())); } diff --git a/src/mongo/db/s/cleanup_orphaned_cmd.cpp b/src/mongo/db/s/cleanup_orphaned_cmd.cpp index 690f6b57a90f2..4da362647277d 100644 --- a/src/mongo/db/s/cleanup_orphaned_cmd.cpp +++ b/src/mongo/db/s/cleanup_orphaned_cmd.cpp @@ -27,13 +27,31 @@ * it in the license file. */ -#include - +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/field_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/migration_util.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" @@ -41,6 +59,18 @@ #include "mongo/db/s/sharding_state.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -161,11 +191,12 @@ class CleanupOrphanedCommand : public ErrmsgCommandDeprecated { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::cleanupOrphaned)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::cleanupOrphaned)) { return Status(ErrorCodes::Unauthorized, "Not authorized for cleanupOrphaned command."); } return Status::OK(); @@ -191,7 +222,7 @@ class CleanupOrphanedCommand : public ErrmsgCommandDeprecated { const NamespaceString nss(ns); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid namespace: " << nss.ns(), + str::stream() << "Invalid namespace: " << nss.toStringForErrorMsg(), nss.isValid()); BSONObj startingFromKey; diff --git a/src/mongo/db/s/cleanup_structured_encryption_data_coordinator.cpp b/src/mongo/db/s/cleanup_structured_encryption_data_coordinator.cpp new file mode 100644 index 0000000000000..5ff0cc8a583b6 --- /dev/null +++ b/src/mongo/db/s/cleanup_structured_encryption_data_coordinator.cpp @@ -0,0 +1,579 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/crypto/fle_options_gen.h" +#include "mongo/crypto/fle_stats.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/client.h" +#include "mongo/db/commands/create_gen.h" +#include "mongo/db/commands/rename_collection_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/drop_gen.h" +#include "mongo/db/fle_crud.h" +#include "mongo/db/s/cleanup_structured_encryption_data_coordinator.h" +#include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/shard_filtering_metadata_refresh.h" +#include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/server_parameter_with_storage.h" +#include "mongo/db/tenant_id.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/router_role.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding + + +namespace mongo { +namespace { + +MONGO_FAIL_POINT_DEFINE(fleCleanupHangBeforeECOCCreate); +MONGO_FAIL_POINT_DEFINE(fleCleanupHangBeforeCleanupESCNonAnchors); +MONGO_FAIL_POINT_DEFINE(fleCleanupHangAfterCleanupESCAnchors); +MONGO_FAIL_POINT_DEFINE(fleCleanupHangAfterDropTempCollection); + +const auto kMajorityWriteConcern = BSON("writeConcern" << BSON("w" + << "majority")); +/** + * Issue a simple success/fail command such as renameCollection or drop + * using majority write concern. + */ +template +Status doRunCommand(OperationContext* opCtx, const DatabaseName& dbname, const Request& request) { + DBDirectClient client(opCtx); + BSONObj cmd = request.toBSON(kMajorityWriteConcern); + auto reply = + client.runCommand(OpMsgRequestBuilder::create(dbname, std::move(cmd)))->getCommandReply(); + return getStatusFromCommandResult(reply); +} + +void createQEClusteredStateCollection(OperationContext* opCtx, const NamespaceString& nss) { + CreateCommand createCmd(nss); + static const mongo::ClusteredIndexSpec clusterIdxSpec(BSON("_id" << 1), true); + createCmd.setClusteredIndex( + stdx::variant(std::move(clusterIdxSpec))); + auto status = doRunCommand(opCtx, nss.dbName(), createCmd); + if (!status.isOK()) { + if (status != ErrorCodes::NamespaceExists) { + uassertStatusOK(status); + } + LOGV2_DEBUG(7647901, + 1, + "Create collection failed because namespace already exists", + "namespace"_attr = nss); + } +} + +void dropQEStateCollection(OperationContext* opCtx, + const NamespaceString& nss, + boost::optional collId) { + Drop cmd(nss); + cmd.setCollectionUUID(collId); + uassertStatusOK(doRunCommand(opCtx, nss.dbName(), cmd)); +} + +void checkRequiredOperation(const CleanupStructuredEncryptionDataState& state, + OperationContext* opCtx, + bool* needLeftoverAnchorCleanup, + bool* needRename, + bool* needEcocCreate, + bool* needCleanup) { + const auto& ecocNss = state.getEcocNss(); + const auto& ecocRenameNss = state.getEcocRenameNss(); + const auto& escDeletesNss = state.getEscDeletesNss(); + + auto catalog = CollectionCatalog::get(opCtx); + + auto ecocUuid = catalog->lookupUUIDByNSS(opCtx, ecocNss); + auto ecocRenameUuid = catalog->lookupUUIDByNSS(opCtx, ecocRenameNss); + auto escDeletesUuid = catalog->lookupUUIDByNSS(opCtx, escDeletesNss); + + auto hasEcocBefore = state.getEcocUuid().has_value(); + auto hasEcocNow = !!ecocUuid; + auto hasEcocRenameBefore = state.getEcocRenameUuid().has_value(); + auto hasEcocRenameNow = !!ecocRenameUuid; + auto hasEscDeletesBefore = state.getEscDeletesUuid().has_value(); + auto hasEscDeletesNow = !!escDeletesUuid; + + *needLeftoverAnchorCleanup = false; + *needRename = false; + *needEcocCreate = false; + *needCleanup = true; + + // Check the current state of the 'esc.deletes' collection is consistent with the + // state document. If not, then don't do cleanup. + if (hasEscDeletesBefore != hasEscDeletesNow) { + LOGV2_DEBUG(7647902, + 1, + "Skipping cleanup due to change in collection state", + "escDeletesNss"_attr = escDeletesNss, + "hasEscDeletesBefore"_attr = hasEscDeletesBefore, + "hasEscDeletesNow"_attr = hasEscDeletesNow); + *needCleanup = false; + return; + } else if (hasEscDeletesNow && escDeletesUuid.value() != state.getEscDeletesUuid().value()) { + LOGV2_DEBUG(7647903, + 1, + "Skipping cleanup due to mismatched collection uuid", + "escDeletesNss"_attr = escDeletesNss, + "uuid"_attr = escDeletesUuid.value(), + "expectedUUID"_attr = state.getEscDeletesUuid().value()); + *needCleanup = false; + return; + } else { + *needLeftoverAnchorCleanup = hasEscDeletesNow; + } + + if (hasEcocRenameBefore != hasEcocRenameNow) { + LOGV2_DEBUG(7647904, + 1, + "Skipping cleanup due to change in collection state", + "ecocRenameNss"_attr = ecocRenameNss, + "hasEcocRenameBefore"_attr = hasEcocRenameBefore, + "hasEcocRenameNow"_attr = hasEcocRenameNow); + *needCleanup = false; + return; + } + + if (hasEcocRenameNow) { + if (ecocRenameUuid.value() != state.getEcocRenameUuid().value()) { + LOGV2_DEBUG(7647905, + 1, + "Skipping cleanup due to mismatched collection uuid", + "ecocRenameNss"_attr = ecocRenameNss, + "uuid"_attr = ecocRenameUuid.value(), + "expectedUUID"_attr = state.getEcocRenameUuid().value()); + *needCleanup = false; + } + // If the ECOC does not exist, create it + *needEcocCreate = !hasEcocNow; + // The temp ECOC from a previous cleanup/compact still exists, so no need to rename. + // This cleanup will use the existing temp ECOC. + return; + } + + if (!hasEcocNow) { + // Nothing to rename & there's no existing temp ECOC, so skip cleanup. + LOGV2_DEBUG(7647906, + 1, + "Skipping rename stage as there is no source collection", + "ecocNss"_attr = ecocNss); + *needCleanup = false; + } else if (!hasEcocBefore) { + // Mismatch of before/after state, so skip rename & cleanup. + LOGV2_DEBUG(7647907, + 1, + "Skipping cleanup due to change in collection state", + "ecocNss"_attr = ecocNss); + *needCleanup = false; + } else if (ecocUuid.value() != state.getEcocUuid().value()) { + // The generation of the collection to be cleaned up is different than the one which was + // requested. Skip rename & cleanup. + LOGV2_DEBUG(7647908, + 1, + "Skipping rename of mismatched collection uuid", + "ecocNss"_attr = ecocNss, + "uuid"_attr = ecocUuid.value(), + "expectedUUID"_attr = state.getEcocUuid().value()); + *needCleanup = false; + } else { + // ECOC is safe to rename & create; cleanup can be performed + *needRename = true; + *needEcocCreate = true; + } +} + +void doAnchorCleanupWithUpdatedCollectionState(OperationContext* opCtx, + const NamespaceString& escNss, + const NamespaceString& escDeletesNss, + StringData description, + ECStats* escStats) { + auto tagsPerDelete = + ServerParameterSet::getClusterParameterSet() + ->get>("fleCompactionOptions") + ->getValue(boost::none) + .getMaxESCEntriesPerCompactionDelete(); + + // Run the anchor cleanups in CollectionRouters to force refresh of catalog cache entries + // for the ESC and ESC.deletes collections, and retry if write errors occur due to StaleConfig. + sharding::router::CollectionRouter escDeletesRouter(opCtx->getServiceContext(), escDeletesNss); + sharding::router::CollectionRouter escRouter(opCtx->getServiceContext(), escNss); + + // TODO: SERVER-77402 refactor once sharding API has better support for this use case + escDeletesRouter.route( + opCtx, + description, + [&](OperationContext* outerOpCtx, const CollectionRoutingInfo& outerCri) { + tassert(7647923, + str::stream() << "Namespace " << escDeletesNss.toStringForErrorMsg() + << " is expected to be unsharded, but is sharded", + !outerCri.cm.isSharded()); + + onCollectionPlacementVersionMismatch( + outerOpCtx, escDeletesNss, ChunkVersion::UNSHARDED()); + ScopedSetShardRole escDeletesShardRole( + outerOpCtx, escDeletesNss, ShardVersion::UNSHARDED(), outerCri.cm.dbVersion()); + + escRouter.route( + outerOpCtx, + description, + [&](OperationContext* innerOpCtx, const CollectionRoutingInfo& innerCri) { + tassert(7647924, + str::stream() << "Namespace " << escNss.toStringForErrorMsg() + << " is expected to be unsharded, but is sharded", + !innerCri.cm.isSharded()); + + onCollectionPlacementVersionMismatch( + innerOpCtx, escNss, ChunkVersion::UNSHARDED()); + ScopedSetShardRole escShardRole( + innerOpCtx, escNss, ShardVersion::UNSHARDED(), innerCri.cm.dbVersion()); + + cleanupESCAnchors(innerOpCtx, escNss, escDeletesNss, tagsPerDelete, escStats); + }); + }); +} + +bool doRenameOperation(const CleanupStructuredEncryptionDataState& state, + boost::optional* newEcocRenameUuid, + boost::optional* newEscDeletesUuid, + FLECompactESCDeleteSet* escDeleteSet, + ECStats* escStats) { + LOGV2_DEBUG( + 7647909, 1, "Queryable Encryption cleanup entered rename phase", "state"_attr = state); + + const auto& ecocNss = state.getEcocNss(); + const auto& ecocRenameNss = state.getEcocRenameNss(); + const auto& escNss = state.getEscNss(); + const auto& escDeletesNss = state.getEscDeletesNss(); + auto opCtx = cc().makeOperationContext(); + + bool needLeftoverAnchorCleanup, needRename, needEcocCreate, needCleanup; + + checkRequiredOperation( + state, opCtx.get(), &needLeftoverAnchorCleanup, &needRename, &needEcocCreate, &needCleanup); + + *newEcocRenameUuid = state.getEcocRenameUuid(); + *newEscDeletesUuid = state.getEscDeletesUuid(); + + if (needLeftoverAnchorCleanup) { + LOGV2(7647910, + "Cleaning up ESC deletes collection from a prior cleanup operation", + logAttrs(escDeletesNss)); + doAnchorCleanupWithUpdatedCollectionState( + opCtx.get(), + escNss, + escDeletesNss, + "rename phase of queryable encryption cleanup coordinator"_sd, + escStats); + dropQEStateCollection(opCtx.get(), escDeletesNss, state.getEscDeletesUuid()); + } + + if (needRename) { + invariant(needEcocCreate); + + if (escDeleteSet) { + auto memoryLimit = + ServerParameterSet::getClusterParameterSet() + ->get>("fleCompactionOptions") + ->getValue(boost::none) + .getMaxCompactionSize(); + + *escDeleteSet = + readRandomESCNonAnchorIds(opCtx.get(), state.getEscNss(), memoryLimit, escStats); + } + + // Perform the rename so long as the target namespace does not exist. + RenameCollectionCommand cmd(ecocNss, ecocRenameNss); + cmd.setDropTarget(false); + cmd.setCollectionUUID(state.getEcocUuid().value()); + + uassertStatusOK(doRunCommand(opCtx.get(), ecocNss.dbName(), cmd)); + *newEcocRenameUuid = state.getEcocUuid(); + } + + if (needEcocCreate) { + if (MONGO_unlikely(fleCleanupHangBeforeECOCCreate.shouldFail())) { + LOGV2(7647911, "Hanging due to fleCleanupHangBeforeECOCCreate fail point"); + fleCleanupHangBeforeECOCCreate.pauseWhileSet(); + } + + // Create the new ECOC collection + createQEClusteredStateCollection(opCtx.get(), ecocNss); + } + + if (needCleanup) { + // Create the temporary 'esc.deletes' clustered collection + createQEClusteredStateCollection(opCtx.get(), escDeletesNss); + + auto catalog = CollectionCatalog::get(opCtx.get()); + *newEscDeletesUuid = catalog->lookupUUIDByNSS(opCtx.get(), escDeletesNss); + invariant(newEscDeletesUuid->has_value()); + } + + // returns whether we can skip the remaining phases of cleanup + return !needCleanup; +} + +void doCleanupOperation(const CleanupStructuredEncryptionDataState& state, + const FLECompactESCDeleteSet& escDeleteSet, + ECStats* escStats, + ECOCStats* ecocStats) { + LOGV2_DEBUG( + 7647912, 1, "Queryable Encryption cleanup entered cleanup phase", "state"_attr = state); + + if (state.getSkipCleanup()) { + LOGV2_DEBUG(7647913, + 1, + "Skipping cleanup structured encryption data phase", + logAttrs(state.getId().getNss())); + return; + } + + EncryptedStateCollectionsNamespaces namespaces; + namespaces.edcNss = state.getId().getNss(); + namespaces.escNss = state.getEscNss(); + namespaces.ecocNss = state.getEcocNss(); + namespaces.ecocRenameNss = state.getEcocRenameNss(); + namespaces.escDeletesNss = state.getEscDeletesNss(); + auto opCtx = cc().makeOperationContext(); + CleanupStructuredEncryptionData request(namespaces.edcNss, state.getCleanupTokens()); + + processFLECleanup( + opCtx.get(), request, &getTransactionWithRetriesForMongoS, namespaces, escStats, ecocStats); + + if (MONGO_unlikely(fleCleanupHangBeforeCleanupESCNonAnchors.shouldFail())) { + LOGV2(7647914, "Hanging due to fleCleanupHangBeforeCleanupESCNonAnchors fail point"); + fleCleanupHangBeforeCleanupESCNonAnchors.pauseWhileSet(); + } + + auto tagsPerDelete = + ServerParameterSet::getClusterParameterSet() + ->get>("fleCompactionOptions") + ->getValue(boost::none) + .getMaxESCEntriesPerCompactionDelete(); + + cleanupESCNonAnchors(opCtx.get(), namespaces.escNss, escDeleteSet, tagsPerDelete, escStats); +} + +void doAnchorRemovalOperation(const CleanupStructuredEncryptionDataState& state, + ECStats* escStats) { + LOGV2_DEBUG(7647915, + 1, + "Queryable Encryption cleanup entered anchor deletes phase", + "state"_attr = state); + + if (state.getSkipCleanup()) { + LOGV2_DEBUG(7647916, 1, "Skipping anchor cleanup phase", logAttrs(state.getId().getNss())); + return; + } + + auto opCtx = cc().makeOperationContext(); + auto escNss = state.getEscNss(); + auto escDeletesNss = state.getEscDeletesNss(); + + doAnchorCleanupWithUpdatedCollectionState( + opCtx.get(), + escNss, + escDeletesNss, + "anchor deletes phase of queryable encryption cleanup coordinator"_sd, + escStats); + + if (MONGO_unlikely(fleCleanupHangAfterCleanupESCAnchors.shouldFail())) { + LOGV2(7647917, "Hanging due to fleCleanupHangAfterCleanupESCAnchors fail point"); + fleCleanupHangAfterCleanupESCAnchors.pauseWhileSet(); + } +} + +void doDropOperation(const CleanupStructuredEncryptionDataState& state) { + LOGV2_DEBUG( + 7647918, 1, "Queryable Encryption cleanup entered drop phase", "state"_attr = state); + + if (state.getSkipCleanup()) { + LOGV2_DEBUG(7647919, 1, "Skipping cleanup drop phase", logAttrs(state.getId().getNss())); + return; + } + + auto opCtx = cc().makeOperationContext(); + auto catalog = CollectionCatalog::get(opCtx.get()); + auto escDeletesNss = state.getEscDeletesNss(); + auto escDeletesUuid = catalog->lookupUUIDByNSS(opCtx.get(), escDeletesNss); + auto ecocCompactNss = state.getEcocRenameNss(); + auto ecocCompactUuid = catalog->lookupUUIDByNSS(opCtx.get(), ecocCompactNss); + + if (escDeletesUuid) { + dropQEStateCollection(opCtx.get(), escDeletesNss, state.getEscDeletesUuid()); + } else { + LOGV2_DEBUG(7647920, + 1, + "Skipping drop operation as 'esc.deletes' does not exist", + logAttrs(escDeletesNss)); + } + + if (ecocCompactUuid) { + dropQEStateCollection(opCtx.get(), ecocCompactNss, state.getEcocRenameUuid()); + } else { + LOGV2_DEBUG(7647921, + 1, + "Skipping drop operation as 'ecoc.compact' does not exist", + logAttrs(ecocCompactNss)); + } + + if (MONGO_unlikely(fleCleanupHangAfterDropTempCollection.shouldFail())) { + LOGV2(7647922, "Hanging due to fleCleanupHangAfterDropTempCollection fail point"); + fleCleanupHangAfterDropTempCollection.pauseWhileSet(); + } +} +} // namespace + + +boost::optional CleanupStructuredEncryptionDataCoordinator::reportForCurrentOp( + MongoProcessInterface::CurrentOpConnectionsMode connMode, + MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept { + auto bob = basicReportBuilder(); + + stdx::lock_guard lg{_docMutex}; + bob.append("escNss", NamespaceStringUtil::serialize(_doc.getEscNss())); + bob.append("ecocNss", NamespaceStringUtil::serialize(_doc.getEcocNss())); + bob.append("ecocUuid", _doc.getEcocUuid() ? _doc.getEcocUuid().value().toString() : "none"); + bob.append("ecocRenameNss", NamespaceStringUtil::serialize(_doc.getEcocRenameNss())); + bob.append("ecocRenameUuid", + _doc.getEcocRenameUuid() ? _doc.getEcocRenameUuid().value().toString() : "none"); + bob.append("escDeletesNss", NamespaceStringUtil::serialize(_doc.getEscDeletesNss())); + bob.append("escDeletesUuid", + _doc.getEscDeletesUuid() ? _doc.getEscDeletesUuid().value().toString() : "none"); + return bob.obj(); +} + +void CleanupStructuredEncryptionDataCoordinator::updateCleanupStats(const ECOCStats& phaseEcocStats, + const ECStats& phaseEscStats) { + // update stats in server status + FLEStatusSection::get().updateCleanupStats(CleanupStats(phaseEcocStats, phaseEscStats)); + + // update stats in state document + stdx::lock_guard lg(_docMutex); + auto docEscStats = _doc.getEscStats().value_or(ECStats{}); + auto docEcocStats = _doc.getEcocStats().value_or(ECOCStats{}); + FLEStatsUtil::accumulateStats(docEscStats, phaseEscStats); + FLEStatsUtil::accumulateStats(docEcocStats, phaseEcocStats); + _doc.setEscStats(docEscStats); + _doc.setEcocStats(docEcocStats); +} + +std::set CleanupStructuredEncryptionDataCoordinator::_getAdditionalLocksToAcquire( + OperationContext* opCtx) { + return {_doc.getEcocNss(), _doc.getEscNss(), _doc.getEcocRenameNss(), _doc.getEscDeletesNss()}; +} + +ExecutorFuture CleanupStructuredEncryptionDataCoordinator::_runImpl( + std::shared_ptr executor, + const CancellationToken& token) noexcept { + return ExecutorFuture(**executor) + .then(_buildPhaseHandler( + Phase::kRenameEcocForCleanup, + [this, anchor = shared_from_this()]() { + // if this was resumed from an interrupt, the _escDeleteSet + // might not be empty, so clear it. + _escDeleteSet.clear(); + + ECStats phaseEscStats; + boost::optional ecocRenameUuid; + boost::optional escDeletesUuid; + + bool skipCleanup = doRenameOperation( + _doc, &ecocRenameUuid, &escDeletesUuid, &_escDeleteSet, &phaseEscStats); + + updateCleanupStats(ECOCStats{}, phaseEscStats); + + stdx::lock_guard lg(_docMutex); + _doc.setSkipCleanup(skipCleanup); + _doc.setEcocRenameUuid(ecocRenameUuid); + _doc.setEscDeletesUuid(escDeletesUuid); + })) + .then(_buildPhaseHandler(Phase::kCleanupStructuredEncryptionData, + [this, anchor = shared_from_this()]() { + ECStats phaseEscStats; + ECOCStats phaseEcocStats; + + doCleanupOperation( + _doc, _escDeleteSet, &phaseEscStats, &phaseEcocStats); + updateCleanupStats(phaseEcocStats, phaseEscStats); + })) + .then(_buildPhaseHandler(Phase::kDeleteAnchors, + [this, anchor = shared_from_this()]() { + ECStats phaseEscStats; + + doAnchorRemovalOperation(_doc, &phaseEscStats); + updateCleanupStats(ECOCStats{}, phaseEscStats); + })) + .then(_buildPhaseHandler(Phase::kDropTempCollection, [this, anchor = shared_from_this()] { + ECStats phaseEscStats = _doc.getEscStats().value_or(ECStats{}); + ECOCStats phaseEcocStats = _doc.getEcocStats().value_or(ECOCStats{}); + + _response = CleanupStructuredEncryptionDataCommandReply( + CleanupStats(phaseEcocStats, phaseEscStats)); + + doDropOperation(_doc); + })); +} + +} // namespace mongo diff --git a/src/mongo/db/s/cleanup_structured_encryption_data_coordinator.h b/src/mongo/db/s/cleanup_structured_encryption_data_coordinator.h new file mode 100644 index 0000000000000..ea22067265aed --- /dev/null +++ b/src/mongo/db/s/cleanup_structured_encryption_data_coordinator.h @@ -0,0 +1,111 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/crypto/fle_stats_gen.h" +#include "mongo/db/commands/fle2_cleanup_gen.h" +#include "mongo/db/commands/fle2_compact.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/s/cleanup_structured_encryption_data_coordinator_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" + +namespace mongo { + +class CleanupStructuredEncryptionDataCoordinator final + : public RecoverableShardingDDLCoordinator { +public: + static constexpr auto kStateContext = "CleanupStructuredEncryptionDataState"_sd; + using StateDoc = CleanupStructuredEncryptionDataState; + using Phase = CleanupStructuredEncryptionDataPhaseEnum; + + CleanupStructuredEncryptionDataCoordinator(ShardingDDLCoordinatorService* service, + const BSONObj& doc) + : RecoverableShardingDDLCoordinator( + service, "CleanupStructuredEncryptionDataCoordinator", doc) {} + + boost::optional reportForCurrentOp( + MongoProcessInterface::CurrentOpConnectionsMode connMode, + MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept final; + + CleanupStructuredEncryptionDataCommandReply getResponse(OperationContext* opCtx) { + getCompletionFuture().get(opCtx); + invariant(_response); + return *_response; + } + + void checkIfOptionsConflict(const BSONObj& stateDoc) const final {} + +private: + StringData serializePhase(const Phase& phase) const override { + return CleanupStructuredEncryptionDataPhase_serializer(phase); + } + + ExecutorFuture _runImpl(std::shared_ptr executor, + const CancellationToken& token) noexcept final; + + std::set _getAdditionalLocksToAcquire(OperationContext* opCtx) override; + +private: + // Updates the cleanup stats in the state doc with the supplied stats by + // adding onto the current stats in the state doc. + void updateCleanupStats(const ECOCStats& phaseEcocStats, const ECStats& phaseEscStats); + + // The response to the cleanup command + boost::optional _response; + + // Contains the set of _id values of documents that must be deleted from the ESC + // during the cleanup phase. This is populated during the rename phase. + // It is by design that this is not persisted to disk between phases, as this should + // be emptied (and hence no ESC deletions must happen) if the coordinator were resumed + // from disk during the cleanup phase. + FLECompactESCDeleteSet _escDeleteSet; +}; + + +} // namespace mongo diff --git a/src/mongo/db/s/cleanup_structured_encryption_data_coordinator.idl b/src/mongo/db/s/cleanup_structured_encryption_data_coordinator.idl new file mode 100644 index 0000000000000..ee7457d76c31a --- /dev/null +++ b/src/mongo/db/s/cleanup_structured_encryption_data_coordinator.idl @@ -0,0 +1,98 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +global: + cpp_namespace: "mongo" + +imports: + - "mongo/db/basic_types.idl" + - "mongo/db/commands/fle2_cleanup.idl" + - "mongo/db/s/sharding_ddl_coordinator.idl" + +enums: + CleanupStructuredEncryptionDataPhase: + description: "The current phase of the cleanupStructuredEncryptionData pipeline" + type: string + values: + kUnset: "unset" + kRenameEcocForCleanup: "rename-collections-for-cleanup" + kCleanupStructuredEncryptionData: "cleanup-structured-encryption-data" + kDeleteAnchors: "delete-anchors" + kDropTempCollection: "drop-temp-collection" + +structs: + CleanupStructuredEncryptionDataState: + description: "Represents the state of the cleanupStructuredEncryptionData pipeline + for protocol version 2" + strict: false + chained_structs: + ShardingDDLCoordinatorMetadata: ShardingDDLCoordinatorMetadata + fields: + phase: + description: "Current phase" + type: CleanupStructuredEncryptionDataPhase + default: kUnset + skipCleanup: + description: "Whether to skip the cleanup operation" + type: bool + default: false + escNss: + description: "Collection containing insertions metadata" + type: namespacestring + ecocNss: + description: "Collection containing cleanup metadata to perform cleanup with" + type: namespacestring + ecocUuid: + description: "UUID of the collection identified by ecocNss" + type: uuid + optional: true + ecocRenameNss: + description: "Temporary name to use while performing cleanup" + type: namespacestring + ecocRenameUuid: + description: "UUID of the collection identified by ecocRenameNss" + type: uuid + optional: true + escDeletesNss: + description: "Temporary name to use while performing cleanup of ESC anchors" + type: namespacestring + escDeletesUuid: + description: "UUID of the collection identified by escDeletesNss" + type: uuid + optional: true + cleanupTokens: + description: "Cleanup tokens for the cleanup operation" + type: object_owned + escStats: + description: "Statistics for the ESC collection" + type: ECStats + optional: true + ecocStats: + description: "Statistics for the temporary ECOC collection" + type: ECOCStats + optional: true diff --git a/src/mongo/db/s/clone_catalog_data_command.cpp b/src/mongo/db/s/clone_catalog_data_command.cpp index fee1c7a14cebe..81fc54339500e 100644 --- a/src/mongo/db/s/clone_catalog_data_command.cpp +++ b/src/mongo/db/s/clone_catalog_data_command.cpp @@ -28,18 +28,35 @@ */ -#include "mongo/db/auth/action_set.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/cloner.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/clone_catalog_data_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -75,11 +92,12 @@ class CloneCatalogDataCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } @@ -102,17 +120,17 @@ class CloneCatalogDataCommand : public BasicCommand { const auto cloneCatalogDataRequest = CloneCatalogData::parse(IDLParserContext("_shardsvrCloneCatalogData"), cmdObj); - const auto dbname = cloneCatalogDataRequest.getCommandParameter().toString(); + const auto dbname = cloneCatalogDataRequest.getCommandParameter().dbName(); uassert( ErrorCodes::InvalidNamespace, - str::stream() << "invalid db name specified: " << dbname, + str::stream() << "invalid db name specified: " << dbname.toStringForErrorMsg(), NamespaceString::validDBName(dbname, NamespaceString::DollarInDbNameBehavior::Allow)); uassert(ErrorCodes::InvalidOptions, - str::stream() << "Can't clone catalog data for " << dbname << " database", - dbname != DatabaseName::kAdmin.db() && dbname != DatabaseName::kConfig.db() && - dbname != DatabaseName::kLocal.db()); + str::stream() << "Can't clone catalog data for " << dbname.toStringForErrorMsg() + << " database", + !dbname.isAdminDB() && !dbname.isConfigDB() && !dbname.isLocalDB()); auto from = cloneCatalogDataRequest.getFrom(); @@ -122,7 +140,9 @@ class CloneCatalogDataCommand : public BasicCommand { auto const catalogClient = Grid::get(opCtx)->catalogClient(); const auto shardedColls = catalogClient->getAllShardedCollectionsForDb( - opCtx, dbname, repl::ReadConcernLevel::kMajorityReadConcern); + opCtx, + DatabaseNameUtil::serialize(dbname), + repl::ReadConcernLevel::kMajorityReadConcern); DisableDocumentValidation disableValidation(opCtx); @@ -130,7 +150,11 @@ class CloneCatalogDataCommand : public BasicCommand { std::set clonedColls; Cloner cloner; - uassertStatusOK(cloner.copyDb(opCtx, dbname, from.toString(), shardedColls, &clonedColls)); + uassertStatusOK(cloner.copyDb(opCtx, + DatabaseNameUtil::serialize(dbname), + from.toString(), + shardedColls, + &clonedColls)); { BSONArrayBuilder cloneBarr = result.subarrayStart("clonedColls"); cloneBarr.append(clonedColls); diff --git a/src/mongo/db/s/cluster_abort_transaction_cmd_d.cpp b/src/mongo/db/s/cluster_abort_transaction_cmd_d.cpp index d3a4e3217b514..53a2234ffc8c7 100644 --- a/src/mongo/db/s/cluster_abort_transaction_cmd_d.cpp +++ b/src/mongo/db/s/cluster_abort_transaction_cmd_d.cpp @@ -27,9 +27,23 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/sharding_state.h" #include "mongo/s/commands/cluster_abort_transaction_cmd.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -44,10 +58,13 @@ struct ClusterAbortTransactionCmdD { return kNoApiVersions; } - static Status checkAuthForOperation(OperationContext* opCtx) { + static Status checkAuthForOperation(OperationContext* opCtx, + const DatabaseName& dbName, + const BSONObj&) { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/s/cluster_bulk_write_cmd_d.cpp b/src/mongo/db/s/cluster_bulk_write_cmd_d.cpp new file mode 100644 index 0000000000000..17b4875054ac6 --- /dev/null +++ b/src/mongo/db/s/cluster_bulk_write_cmd_d.cpp @@ -0,0 +1,65 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/s/sharding_state.h" +#include "mongo/s/commands/cluster_bulk_write_cmd.h" +#include "mongo/s/grid.h" + +namespace mongo { +namespace { + +struct ClusterBulkWriteCmdD { + static constexpr StringData kName = "clusterBulkWrite"_sd; + + static const std::set& getApiVersions() { + return kNoApiVersions; + } + + static void doCheckAuthorization(AuthorizationSession* authzSession, + bool bypass, + const BulkWriteCommandRequest& op) { + uassert( + ErrorCodes::Unauthorized, + "Unauthorized", + authzSession->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(op.getDollarTenant()), ActionType::internal)); + } + + static void checkCanRunHere(OperationContext* opCtx) { + Grid::get(opCtx)->assertShardingIsInitialized(); + + // A cluster command on the config server may attempt to use a ShardLocal to target itself, + // which triggers an invariant, so only shard servers can run this. + uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands()); + } +}; +ClusterBulkWriteCmd clusterBulkWriteCmdD{ClusterBulkWriteCmdD::kName}; + +} // namespace +} // namespace mongo diff --git a/src/mongo/db/s/cluster_commit_transaction_cmd_d.cpp b/src/mongo/db/s/cluster_commit_transaction_cmd_d.cpp index 9b7d8d88cbcae..1b8ecbe9333fe 100644 --- a/src/mongo/db/s/cluster_commit_transaction_cmd_d.cpp +++ b/src/mongo/db/s/cluster_commit_transaction_cmd_d.cpp @@ -27,9 +27,23 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/sharding_state.h" #include "mongo/s/commands/cluster_commit_transaction_cmd.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -44,10 +58,13 @@ struct ClusterCommitTransactionCmdD { return kNoApiVersions; } - static Status checkAuthForOperation(OperationContext* opCtx) { + static Status checkAuthForOperation(OperationContext* opCtx, + const DatabaseName& dbName, + const BSONObj& cmdObj) { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/s/cluster_count_cmd_d.cpp b/src/mongo/db/s/cluster_count_cmd_d.cpp index a593a299b4fc3..c9037df80065d 100644 --- a/src/mongo/db/s/cluster_count_cmd_d.cpp +++ b/src/mongo/db/s/cluster_count_cmd_d.cpp @@ -27,10 +27,24 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/sharding_state.h" #include "mongo/s/commands/cluster_count_cmd.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -45,10 +59,12 @@ struct ClusterCountCmdD { return kNoApiVersions; } - static Status checkAuthForOperation(OperationContext* opCtx) { + static Status checkAuthForOperation(OperationContext* opCtx, + const DatabaseName& dbName, + const BSONObj& cmdObj) { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::internal)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/db/s/cluster_find_cmd_d.cpp b/src/mongo/db/s/cluster_find_cmd_d.cpp index e98315c06addd..0c84618b4e79b 100644 --- a/src/mongo/db/s/cluster_find_cmd_d.cpp +++ b/src/mongo/db/s/cluster_find_cmd_d.cpp @@ -27,9 +27,26 @@ * it in the license file. */ +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/query/parsed_find_command.h" #include "mongo/db/s/sharding_state.h" #include "mongo/s/commands/cluster_find_cmd.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -50,8 +67,8 @@ struct ClusterFindCmdD { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(nss.tenantId()), ActionType::internal)); } static void checkCanRunHere(OperationContext* opCtx) { diff --git a/src/mongo/db/s/cluster_getmore_cmd_d.cpp b/src/mongo/db/s/cluster_getmore_cmd_d.cpp index 634ede8744322..65c66b97ba9de 100644 --- a/src/mongo/db/s/cluster_getmore_cmd_d.cpp +++ b/src/mongo/db/s/cluster_getmore_cmd_d.cpp @@ -27,9 +27,22 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/sharding_state.h" #include "mongo/s/commands/cluster_getmore_cmd.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -51,8 +64,8 @@ struct ClusterGetMoreCmdD { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(nss.tenantId()), ActionType::internal)); } static void checkCanRunHere(OperationContext* opCtx) { diff --git a/src/mongo/db/s/cluster_pipeline_cmd_d.cpp b/src/mongo/db/s/cluster_pipeline_cmd_d.cpp index 1b3eed4242a0f..cd25764c8898f 100644 --- a/src/mongo/db/s/cluster_pipeline_cmd_d.cpp +++ b/src/mongo/db/s/cluster_pipeline_cmd_d.cpp @@ -27,10 +27,34 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/commands/cluster_pipeline_cmd.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" namespace mongo { namespace { @@ -45,12 +69,15 @@ struct ClusterPipelineCommandD { return kNoApiVersions; } - static void doCheckAuthorization(OperationContext* opCtx, const PrivilegeVector& privileges) { + static void doCheckAuthorization(OperationContext* opCtx, + const OpMsgRequest& opMsgRequest, + const PrivilegeVector& privileges) { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(opMsgRequest.getValidatedTenantId()), + ActionType::internal)); } static void checkCanRunHere(OperationContext* opCtx) { @@ -76,7 +103,8 @@ struct ClusterPipelineCommandD { opMsgRequest.body.replaceFieldNames(BSON(AggregateCommandRequest::kCommandName << 1)); return aggregation_request_helper::parseFromBSON( opCtx, - DatabaseName(opMsgRequest.getValidatedTenantId(), opMsgRequest.getDatabase()), + DatabaseNameUtil::deserialize(opMsgRequest.getValidatedTenantId(), + opMsgRequest.getDatabase()), modifiedRequestBody, explainVerbosity, apiStrict); diff --git a/src/mongo/db/s/cluster_write_cmd_d.cpp b/src/mongo/db/s/cluster_write_cmd_d.cpp index 66167ab2b30cf..dc59a683d5701 100644 --- a/src/mongo/db/s/cluster_write_cmd_d.cpp +++ b/src/mongo/db/s/cluster_write_cmd_d.cpp @@ -27,9 +27,23 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/s/sharding_state.h" #include "mongo/s/commands/cluster_write_cmd.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -47,7 +61,8 @@ struct ClusterInsertCmdD { uassert(ErrorCodes::Unauthorized, "Unauthorized", authzSession->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::internal)); + ResourcePattern::forClusterResource(op.getDbName().tenantId()), + ActionType::internal)); } static void checkCanRunHere(OperationContext* opCtx) { @@ -78,7 +93,8 @@ struct ClusterUpdateCmdD { uassert(ErrorCodes::Unauthorized, "Unauthorized", authzSession->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::internal)); + ResourcePattern::forClusterResource(op.getDbName().tenantId()), + ActionType::internal)); } static void checkCanRunHere(OperationContext* opCtx) { @@ -108,7 +124,8 @@ struct ClusterDeleteCmdD { uassert(ErrorCodes::Unauthorized, "Unauthorized", authzSession->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::internal)); + ResourcePattern::forClusterResource(op.getDbName().tenantId()), + ActionType::internal)); } static void checkCanRunHere(OperationContext* opCtx) { diff --git a/src/mongo/db/s/collection_metadata.cpp b/src/mongo/db/s/collection_metadata.cpp index 973bf10158089..2f966922bba58 100644 --- a/src/mongo/db/s/collection_metadata.cpp +++ b/src/mongo/db/s/collection_metadata.cpp @@ -28,17 +28,27 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/collection_metadata.h" - +#include #include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/bson/util/builder.h" #include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -77,6 +87,7 @@ boost::optional CollectionMetadata::getReshardingKeyIfShouldFor case CoordinatorStateEnum::kBlockingWrites: case CoordinatorStateEnum::kAborting: case CoordinatorStateEnum::kCommitting: + case CoordinatorStateEnum::kQuiesced: case CoordinatorStateEnum::kDone: return boost::none; case CoordinatorStateEnum::kPreparingToDonate: @@ -104,7 +115,8 @@ void CollectionMetadata::throwIfReshardingInProgress(NamespaceString const& nss) LOGV2(5277122, "reshardCollection in progress", logAttrs(nss)); uasserted(ErrorCodes::ReshardCollectionInProgress, - "reshardCollection is in progress for namespace " + nss.toString()); + "reshardCollection is in progress for namespace " + + nss.toStringForErrorMsg()); } } } diff --git a/src/mongo/db/s/collection_metadata.h b/src/mongo/db/s/collection_metadata.h index 88b215ed90073..d07c45d9f36d2 100644 --- a/src/mongo/db/s/collection_metadata.h +++ b/src/mongo/db/s/collection_metadata.h @@ -29,8 +29,28 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/s/range_arithmetic.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -93,6 +113,19 @@ class CollectionMetadata { return (isSharded() ? _cm->getVersion(_thisShardId) : ChunkVersion::UNSHARDED()); } + /** + * Returns the current shard's latest placement timestamp or Timestamp(0, 0) if it is not + * sharded. This value indicates the commit time of the latest placement change that this shard + * participated in and is used to answer the question of "did any chunks move since some + * timestamp". + * + * Will throw ShardInvalidatedForTargeting if _thisShardId is marked as stale by + * the CollectionMetadata's current chunk manager. + */ + Timestamp getShardMaxValidAfter() const { + return (isSharded() ? _cm->getMaxValidAfter(_thisShardId) : Timestamp(0, 0)); + } + /** * Returns the current shard's placement version for the collection or UNSHARDED if it is not * sharded. diff --git a/src/mongo/db/s/collection_metadata_filtering_test.cpp b/src/mongo/db/s/collection_metadata_filtering_test.cpp index aae08d987eb0d..e7cb67610a98a 100644 --- a/src/mongo/db/s/collection_metadata_filtering_test.cpp +++ b/src/mongo/db/s/collection_metadata_filtering_test.cpp @@ -27,13 +27,50 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/s/metadata_manager.h" #include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/scoped_collection_metadata.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/db/shard_id.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" #include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -127,7 +164,7 @@ class CollectionMetadataFilteringTest : public ShardServerTestFixture { } _manager = std::make_shared( - getServiceContext(), kNss, executor(), CollectionMetadata(cm, ShardId("0"))); + getServiceContext(), kNss, CollectionMetadata(cm, ShardId("0"))); return CollectionMetadata(std::move(cm), ShardId("0")); } diff --git a/src/mongo/db/s/collection_metadata_test.cpp b/src/mongo/db/s/collection_metadata_test.cpp index a7ecaa3657751..5bc9407fdad2b 100644 --- a/src/mongo/db/s/collection_metadata_test.cpp +++ b/src/mongo/db/s/collection_metadata_test.cpp @@ -27,12 +27,29 @@ * it in the license file. */ -#include "mongo/base/status.h" +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/common_types_gen.h" #include "mongo/s/sharding_test_fixture_common.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -128,7 +145,7 @@ class NoChunkFixture : public unittest::Test { reshardingFields.setRecipientFields(std::move(recipientFields)); } else if (state == CoordinatorStateEnum::kBlockingWrites) { TypeCollectionDonorFields donorFields{ - resharding::constructTemporaryReshardingNss(kNss.db(), existingUuid), + resharding::constructTemporaryReshardingNss(kNss.db_forTest(), existingUuid), KeyPattern{BSON("newKey" << 1)}, {kThisShard, kOtherShard}}; reshardingFields.setDonorFields(std::move(donorFields)); diff --git a/src/mongo/db/s/collection_sharding_runtime.cpp b/src/mongo/db/s/collection_sharding_runtime.cpp index 990b8c0d209b6..471bb9f877d10 100644 --- a/src/mongo/db/s/collection_sharding_runtime.cpp +++ b/src/mongo/db/s/collection_sharding_runtime.cpp @@ -29,21 +29,56 @@ #include "mongo/db/s/collection_sharding_runtime.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/global_settings.h" #include "mongo/db/operation_context.h" -#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/query/plan_cache.h" #include "mongo/db/query/sbe_plan_cache.h" -#include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/range_deleter_service.h" #include "mongo/db/s/sharding_runtime_d_params_gen.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/grid.h" #include "mongo/s/shard_version_factory.h" #include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/duration.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -93,13 +128,9 @@ CollectionShardingRuntime::ScopedExclusiveCollectionShardingRuntime:: ScopedExclusiveCollectionShardingRuntime(ScopedCollectionShardingState&& scopedCss) : _scopedCss(std::move(scopedCss)) {} -CollectionShardingRuntime::CollectionShardingRuntime( - ServiceContext* service, - NamespaceString nss, - std::shared_ptr rangeDeleterExecutor) +CollectionShardingRuntime::CollectionShardingRuntime(ServiceContext* service, NamespaceString nss) : _serviceContext(service), _nss(std::move(nss)), - _rangeDeleterExecutor(std::move(rangeDeleterExecutor)), _metadataType(_nss.isNamespaceAlwaysUnsharded() ? MetadataType::kUnsharded : MetadataType::kUnknown) {} @@ -144,14 +175,6 @@ ScopedCollectionFilter CollectionShardingRuntime::getOwnershipFilter( optReceivedShardVersion, supportNonVersionedOperations); - if (!supportNonVersionedOperations) { - tassert(7032301, - "For sharded collections getOwnershipFilter cannot be relied on without a valid " - "shard version", - !ShardVersion::isPlacementVersionIgnored(*optReceivedShardVersion) || - !metadata->get().allowMigrations() || !metadata->get().isSharded()); - } - return {std::move(metadata)}; } @@ -190,7 +213,7 @@ ScopedCollectionDescription CollectionShardingRuntime::getCollectionDescription( : ShardVersionPlacementIgnoredNoIndexes(), boost::none /* wantedVersion */, ShardingState::get(_serviceContext)->shardId()), - str::stream() << "sharding status of collection " << _nss.ns() + str::stream() << "sharding status of collection " << _nss.toStringForErrorMsg() << " is not currently available for description and needs to be recovered " << "from the config server", optMetadata); @@ -255,7 +278,8 @@ boost::optional> CollectionShardingRuntime::getCriticalSe void CollectionShardingRuntime::setFilteringMetadata(OperationContext* opCtx, CollectionMetadata newMetadata) { tassert(7032302, - str::stream() << "Namespace " << _nss.ns() << " must never be sharded.", + str::stream() << "Namespace " << _nss.toStringForErrorMsg() + << " must never be sharded.", !newMetadata.isSharded() || !_nss.isNamespaceAlwaysUnsharded()); stdx::lock_guard lk(_metadataManagerLock); @@ -284,8 +308,8 @@ void CollectionShardingRuntime::setFilteringMetadata(OperationContext* opCtx, _metadataType = MetadataType::kSharded; if (!_metadataManager || !newMetadata.uuidMatches(_metadataManager->getCollectionUuid())) { - _metadataManager = std::make_shared( - opCtx->getServiceContext(), _nss, _rangeDeleterExecutor, newMetadata); + _metadataManager = + std::make_shared(opCtx->getServiceContext(), _nss, newMetadata); ++_numMetadataManagerChanges; } else { _metadataManager->setFilteringMetadata(std::move(newMetadata)); @@ -326,20 +350,6 @@ void CollectionShardingRuntime::clearFilteringMetadataForDroppedCollection( _clearFilteringMetadata(opCtx, /* collIsDropped */ true); } -SharedSemiFuture CollectionShardingRuntime::cleanUpRange(ChunkRange const& range, - CleanWhen when) const { - // (Ignore FCV check): This feature doesn't have any upgrade/downgrade concerns. The feature - // flag is used to turn on new range deleter on startup. - if (!feature_flags::gRangeDeleterService.isEnabledAndIgnoreFCVUnsafe()) { - stdx::lock_guard lk(_metadataManagerLock); - invariant(_metadataType == MetadataType::kSharded); - return _metadataManager->cleanUpRange(range, when == kDelayed); - } - - // This method must never be called if the range deleter service feature flag is enabled - MONGO_UNREACHABLE; -} - Status CollectionShardingRuntime::waitForClean(OperationContext* opCtx, const NamespaceString& nss, const UUID& collectionUuid, @@ -362,14 +372,11 @@ Status CollectionShardingRuntime::waitForClean(OperationContext* opCtx, "metadata reset"}; } - // (Ignore FCV check): This feature doesn't have any upgrade/downgrade concerns. The - // feature flag is used to turn on new range deleter on startup. - if (feature_flags::gRangeDeleterService.isEnabledAndIgnoreFCVUnsafe()) { - return RangeDeleterService::get(opCtx)->getOverlappingRangeDeletionsFuture( - self->_metadataManager->getCollectionUuid(), orphanRange); - } else { - return self->_metadataManager->trackOrphanedDataCleanup(orphanRange); - } + const auto rangeDeleterService = RangeDeleterService::get(opCtx); + rangeDeleterService->getRangeDeleterServiceInitializationFuture().get(opCtx); + + return rangeDeleterService->getOverlappingRangeDeletionsFuture( + self->_metadataManager->getCollectionUuid(), orphanRange); }(); if (!swOrphanCleanupFuture.isOK()) { @@ -402,8 +409,9 @@ Status CollectionShardingRuntime::waitForClean(OperationContext* opCtx, // collection could either never exist or get dropped directly from the shard after the // range deletion task got scheduled. if (result != ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist) { - return result.withContext(str::stream() << "Failed to delete orphaned " << nss.ns() - << " range " << orphanRange.toString()); + return result.withContext(str::stream() << "Failed to delete orphaned " + << nss.toStringForErrorMsg() << " range " + << orphanRange.toString()); } } } @@ -479,7 +487,7 @@ CollectionShardingRuntime::_getMetadataWithVersionCheckAt( opCtx->lockState()->isWriteLocked() ? StaleConfigInfo::OperationType::kWrite : StaleConfigInfo::OperationType::kRead), - str::stream() << "The critical section for " << _nss.ns() + str::stream() << "The critical section for " << _nss.toStringForErrorMsg() << " is acquired with reason: " << reason, !criticalSectionSignal); } @@ -489,7 +497,7 @@ CollectionShardingRuntime::_getMetadataWithVersionCheckAt( receivedShardVersion, boost::none /* wantedVersion */, ShardingState::get(opCtx)->shardId()), - str::stream() << "sharding status of collection " << _nss.ns() + str::stream() << "sharding status of collection " << _nss.toStringForErrorMsg() << " is not currently known and needs to be recovered", optCurrentMetadata); @@ -522,21 +530,23 @@ CollectionShardingRuntime::_getMetadataWithVersionCheckAt( _nss, receivedShardVersion, wantedShardVersion, ShardingState::get(opCtx)->shardId()); uassert(std::move(sci), - str::stream() << "timestamp mismatch detected for " << _nss.ns(), + str::stream() << "timestamp mismatch detected for " << _nss.toStringForErrorMsg(), isPlacementVersionIgnored || wantedPlacementVersion.isSameCollection(receivedPlacementVersion)); if (isPlacementVersionIgnored || (!wantedPlacementVersion.isSet() && receivedPlacementVersion.isSet())) { uasserted(std::move(sci), - str::stream() << "this shard no longer contains chunks for " << _nss.ns() << ", " + str::stream() << "this shard no longer contains chunks for " + << _nss.toStringForErrorMsg() << ", " << "the collection may have been dropped"); } if (isPlacementVersionIgnored || (wantedPlacementVersion.isSet() && !receivedPlacementVersion.isSet())) { uasserted(std::move(sci), - str::stream() << "this shard contains chunks for " << _nss.ns() << ", " + str::stream() << "this shard contains chunks for " << _nss.toStringForErrorMsg() + << ", " << "but the client expects unsharded collection"); } @@ -545,12 +555,14 @@ CollectionShardingRuntime::_getMetadataWithVersionCheckAt( // Could be > or < - wanted is > if this is the source of a migration, wanted < if this is // the target of a migration uasserted(std::move(sci), - str::stream() << "placement version mismatch detected for " << _nss.ns()); + str::stream() << "placement version mismatch detected for " + << _nss.toStringForErrorMsg()); } if (indexFeatureFlag && wantedIndexVersion != receivedIndexVersion) { uasserted(std::move(sci), - str::stream() << "index version mismatch detected for " << _nss.ns()); + str::stream() << "index version mismatch detected for " + << _nss.toStringForErrorMsg()); } // Those are all the reasons the versions can mismatch @@ -560,22 +572,13 @@ CollectionShardingRuntime::_getMetadataWithVersionCheckAt( void CollectionShardingRuntime::appendShardVersion(BSONObjBuilder* builder) const { auto optCollDescr = getCurrentMetadataIfKnown(); if (optCollDescr) { - BSONObjBuilder versionBuilder(builder->subobjStart(_nss.ns())); + BSONObjBuilder versionBuilder(builder->subobjStart(NamespaceStringUtil::serialize(_nss))); versionBuilder.appendTimestamp("placementVersion", optCollDescr->getShardPlacementVersion().toLong()); versionBuilder.append("timestamp", optCollDescr->getShardPlacementVersion().getTimestamp()); } } -size_t CollectionShardingRuntime::numberOfRangesScheduledForDeletion() const { - stdx::lock_guard lk(_metadataManagerLock); - if (_metadataType == MetadataType::kSharded) { - return _metadataManager->numberOfRangesScheduledForDeletion(); - } - return 0; -} - - void CollectionShardingRuntime::setPlacementVersionRecoverRefreshFuture( SharedSemiFuture future, CancellationSource cancellationSource) { invariant(!_placementVersionInRecoverOrRefresh); @@ -703,10 +706,6 @@ void CollectionShardingRuntime::_cleanupBeforeInstallingNewCollectionMetadata( ExecutorFuture{Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()} .then([svcCtx{opCtx->getServiceContext()}, oldUUID, oldShardVersion] { ThreadClient tc{"CleanUpShardedMetadata", svcCtx}; - { - stdx::lock_guard lk{*tc.get()}; - tc->setSystemOperationKillableByStepdown(lk); - } auto uniqueOpCtx{tc->makeOperationContext()}; auto opCtx{uniqueOpCtx.get()}; @@ -772,7 +771,7 @@ void CollectionShardingRuntime::_checkCritSecForIndexMetadata(OperationContext* opCtx->lockState()->isWriteLocked() ? StaleConfigInfo::OperationType::kWrite : StaleConfigInfo::OperationType::kRead), - str::stream() << "The critical section for " << _nss.ns() + str::stream() << "The critical section for " << _nss.toStringForErrorMsg() << " is acquired with reason: " << reason, !criticalSectionSignal); } diff --git a/src/mongo/db/s/collection_sharding_runtime.h b/src/mongo/db/s/collection_sharding_runtime.h index 6da67980fa3c6..99e71b77fc5c0 100644 --- a/src/mongo/db/s/collection_sharding_runtime.h +++ b/src/mongo/db/s/collection_sharding_runtime.h @@ -29,12 +29,40 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/metadata_manager.h" +#include "mongo/db/s/scoped_collection_metadata.h" #include "mongo/db/s/sharding_migration_critical_section.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/sharding_index_catalog_cache.h" #include "mongo/util/cancellation.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/decorable.h" +#include "mongo/util/future.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -51,9 +79,7 @@ class CollectionShardingRuntime final : public CollectionShardingState, CollectionShardingRuntime& operator=(const CollectionShardingRuntime&) = delete; public: - CollectionShardingRuntime(ServiceContext* service, - NamespaceString nss, - std::shared_ptr rangeDeleterExecutor); + CollectionShardingRuntime(ServiceContext* service, NamespaceString nss); /** * Obtains the sharding runtime for the specified collection, along with a resource lock in @@ -143,8 +169,6 @@ class CollectionShardingRuntime final : public CollectionShardingState, void appendShardVersion(BSONObjBuilder* builder) const override; - size_t numberOfRangesScheduledForDeletion() const override; - boost::optional getIndexesInCritSec(OperationContext* opCtx) const; /** @@ -218,18 +242,6 @@ class CollectionShardingRuntime final : public CollectionShardingState, boost::optional> getCriticalSectionSignal( OperationContext* opCtx, ShardingMigrationCriticalSection::Operation op) const; - /** - * Schedules documents in `range` for cleanup after any running queries that may depend on them - * have terminated. Does not block. Fails if range overlaps any current local shard chunk. - * Passed kDelayed, an additional delay (configured via server parameter orphanCleanupDelaySecs) - * is added to permit (most) dependent queries on secondaries to complete, too. - * - * Returns a future that will be resolved when the deletion completes or fails. If that - * succeeds, waitForClean can be called to ensure no other deletions are pending for the range. - */ - enum CleanWhen { kNow, kDelayed }; - SharedSemiFuture cleanUpRange(ChunkRange const& range, CleanWhen when) const; - /** * Waits for all ranges deletion tasks with UUID 'collectionUuid' overlapping range * 'orphanRange' to be processed, even if the collection does not exist in the storage catalog. @@ -353,9 +365,6 @@ class CollectionShardingRuntime final : public CollectionShardingState, // Namespace this state belongs to. const NamespaceString _nss; - // The executor used for deleting ranges of orphan chunks. - std::shared_ptr _rangeDeleterExecutor; - // Tracks the migration critical section state for this collection. ShardingMigrationCriticalSection _critSec; diff --git a/src/mongo/db/s/collection_sharding_runtime_test.cpp b/src/mongo/db/s/collection_sharding_runtime_test.cpp index ea93429425644..aafa7e3abb34a 100644 --- a/src/mongo/db/s/collection_sharding_runtime_test.cpp +++ b/src/mongo/db/s/collection_sharding_runtime_test.cpp @@ -27,29 +27,72 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/remote_command_targeter_factory_mock.h" +#include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/global_settings.h" -#include "mongo/db/op_observer/op_observer_impl.h" -#include "mongo/db/persistent_task_store.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/range_deleter_service.h" #include "mongo/db/s/range_deleter_service_test.h" #include "mongo/db/s/range_deletion_task_gen.h" -#include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/db/s/sharding_index_catalog_ddl_util.h" +#include "mongo/db/s/sharding_mongod_test_fixture.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" #include "mongo/db/vector_clock.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_mock.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/catalog_cache_loader.h" #include "mongo/s/catalog_cache_loader_mock.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" #include "mongo/s/shard_version_factory.h" -#include "mongo/stdx/chrono.h" +#include "mongo/s/type_collection_common_types_gen.h" #include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace { @@ -97,7 +140,7 @@ class CollectionShardingRuntimeTest : public ShardServerTestFixture { TEST_F(CollectionShardingRuntimeTest, GetCollectionDescriptionThrowsStaleConfigBeforeSetFilteringMetadataIsCalledAndNoOSSSet) { OperationContext* opCtx = operationContext(); - CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); + CollectionShardingRuntime csr(getServiceContext(), kTestNss); ASSERT_FALSE(csr.getCollectionDescription(opCtx).isSharded()); auto metadata = makeShardedMetadata(opCtx); ScopedSetShardRole scopedSetShardRole{ @@ -111,14 +154,14 @@ TEST_F(CollectionShardingRuntimeTest, TEST_F( CollectionShardingRuntimeTest, GetCollectionDescriptionReturnsUnshardedAfterSetFilteringMetadataIsCalledWithUnshardedMetadata) { - CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); + CollectionShardingRuntime csr(getServiceContext(), kTestNss); csr.setFilteringMetadata(operationContext(), CollectionMetadata()); ASSERT_FALSE(csr.getCollectionDescription(operationContext()).isSharded()); } TEST_F(CollectionShardingRuntimeTest, GetCollectionDescriptionReturnsShardedAfterSetFilteringMetadataIsCalledWithShardedMetadata) { - CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); + CollectionShardingRuntime csr(getServiceContext(), kTestNss); OperationContext* opCtx = operationContext(); auto metadata = makeShardedMetadata(opCtx); csr.setFilteringMetadata(opCtx, metadata); @@ -132,14 +175,14 @@ TEST_F(CollectionShardingRuntimeTest, TEST_F(CollectionShardingRuntimeTest, GetCurrentMetadataIfKnownReturnsNoneBeforeSetFilteringMetadataIsCalled) { - CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); + CollectionShardingRuntime csr(getServiceContext(), kTestNss); ASSERT_FALSE(csr.getCurrentMetadataIfKnown()); } TEST_F( CollectionShardingRuntimeTest, GetCurrentMetadataIfKnownReturnsUnshardedAfterSetFilteringMetadataIsCalledWithUnshardedMetadata) { - CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); + CollectionShardingRuntime csr(getServiceContext(), kTestNss); csr.setFilteringMetadata(operationContext(), CollectionMetadata()); const auto optCurrMetadata = csr.getCurrentMetadataIfKnown(); ASSERT_TRUE(optCurrMetadata); @@ -150,7 +193,7 @@ TEST_F( TEST_F( CollectionShardingRuntimeTest, GetCurrentMetadataIfKnownReturnsShardedAfterSetFilteringMetadataIsCalledWithShardedMetadata) { - CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); + CollectionShardingRuntime csr(getServiceContext(), kTestNss); OperationContext* opCtx = operationContext(); auto metadata = makeShardedMetadata(opCtx); csr.setFilteringMetadata(opCtx, metadata); @@ -162,7 +205,7 @@ TEST_F( TEST_F(CollectionShardingRuntimeTest, GetCurrentMetadataIfKnownReturnsNoneAfterClearFilteringMetadataIsCalled) { - CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); + CollectionShardingRuntime csr(getServiceContext(), kTestNss); OperationContext* opCtx = operationContext(); csr.setFilteringMetadata(opCtx, makeShardedMetadata(opCtx)); csr.clearFilteringMetadata(opCtx); @@ -170,7 +213,7 @@ TEST_F(CollectionShardingRuntimeTest, } TEST_F(CollectionShardingRuntimeTest, SetFilteringMetadataWithSameUUIDKeepsSameMetadataManager) { - CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); + CollectionShardingRuntime csr(getServiceContext(), kTestNss); ASSERT_EQ(csr.getNumMetadataManagerChanges_forTest(), 0); OperationContext* opCtx = operationContext(); auto metadata = makeShardedMetadata(opCtx); @@ -185,7 +228,7 @@ TEST_F(CollectionShardingRuntimeTest, SetFilteringMetadataWithSameUUIDKeepsSameM TEST_F(CollectionShardingRuntimeTest, SetFilteringMetadataWithDifferentUUIDReplacesPreviousMetadataManager) { - CollectionShardingRuntime csr(getServiceContext(), kTestNss, executor()); + CollectionShardingRuntime csr(getServiceContext(), kTestNss); OperationContext* opCtx = operationContext(); auto metadata = makeShardedMetadata(opCtx); csr.setFilteringMetadata(opCtx, metadata); @@ -226,7 +269,7 @@ TEST_F(CollectionShardingRuntimeTest, ReturnUnshardedMetadataInServerlessMode) { boost::none /* databaseVersion */ }; - CollectionShardingRuntime csr(getServiceContext(), testNss, executor()); + CollectionShardingRuntime csr(getServiceContext(), testNss); auto collectionFilter = csr.getOwnershipFilter( opCtx, CollectionShardingRuntime::OrphanCleanupPolicy::kAllowOrphanCleanup, true); ASSERT_FALSE(collectionFilter.isSharded()); @@ -244,8 +287,8 @@ TEST_F(CollectionShardingRuntimeTest, ReturnUnshardedMetadataInServerlessMode) { boost::none /* databaseVersion */ }; - CollectionShardingRuntime csrLogicalSession( - getServiceContext(), NamespaceString::kLogicalSessionsNamespace, executor()); + CollectionShardingRuntime csrLogicalSession(getServiceContext(), + NamespaceString::kLogicalSessionsNamespace); ASSERT(csrLogicalSession.getCurrentMetadataIfKnown() == boost::none); ASSERT_THROWS_CODE( csrLogicalSession.getCollectionDescription(opCtx), DBException, ErrorCodes::StaleConfig); diff --git a/src/mongo/db/s/collection_sharding_state.cpp b/src/mongo/db/s/collection_sharding_state.cpp index 132485fe1d71b..befa9bc82ae05 100644 --- a/src/mongo/db/s/collection_sharding_state.cpp +++ b/src/mongo/db/s/collection_sharding_state.cpp @@ -29,8 +29,24 @@ #include "mongo/db/s/collection_sharding_state.h" -#include "mongo/logv2/log.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/server_options.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -50,7 +66,8 @@ class CollectionShardingStateMap { struct CSSAndLock { CSSAndLock(std::unique_ptr css) - : cssMutex("CSSMutex::" + css->nss().toString()), css(std::move(css)) {} + : cssMutex("CSSMutex::" + NamespaceStringUtil::serialize(css->nss())), + css(std::move(css)) {} const Lock::ResourceMutex cssMutex; std::unique_ptr css; @@ -66,11 +83,11 @@ class CollectionShardingStateMap { CSSAndLock* getOrCreate(const NamespaceString& nss) noexcept { stdx::lock_guard lg(_mutex); - - auto it = _collections.find(nss.ns()); + const auto nssStr = NamespaceStringUtil::serialize(nss); + auto it = _collections.find(nssStr); if (it == _collections.end()) { - auto inserted = _collections.try_emplace( - nss.ns(), std::make_unique(_factory->make(nss))); + auto inserted = + _collections.try_emplace(nssStr, std::make_unique(_factory->make(nss))); invariant(inserted.second); it = std::move(inserted.first); } @@ -91,25 +108,6 @@ class CollectionShardingStateMap { versionB.done(); } - void appendInfoForServerStatus(BSONObjBuilder* builder) { - // (Ignore FCV check): This feature doesn't have any upgrade/downgrade concerns. The feature - // flag is used to turn on new range deleter on startup. - if (!mongo::feature_flags::gRangeDeleterService.isEnabledAndIgnoreFCVUnsafe()) { - auto totalNumberOfRangesScheduledForDeletion = ([this] { - stdx::lock_guard lg(_mutex); - return std::accumulate( - _collections.begin(), - _collections.end(), - 0LL, - [](long long total, const auto& coll) { - return total + coll.second->css->numberOfRangesScheduledForDeletion(); - }); - })(); - - builder->appendNumber("rangeDeleterTasks", totalNumberOfRangesScheduledForDeletion); - } - } - std::vector getCollectionNames() { stdx::lock_guard lg(_mutex); std::vector result; @@ -141,6 +139,10 @@ CollectionShardingState::ScopedCollectionShardingState::ScopedCollectionSharding Lock::ResourceLock lock, CollectionShardingState* css) : _lock(std::move(lock)), _css(css) {} +CollectionShardingState::ScopedCollectionShardingState::ScopedCollectionShardingState( + CollectionShardingState* css) + : _lock(boost::none), _css(css) {} + CollectionShardingState::ScopedCollectionShardingState::ScopedCollectionShardingState( ScopedCollectionShardingState&& other) : _lock(std::move(other._lock)), _css(other._css) { @@ -155,11 +157,16 @@ CollectionShardingState::ScopedCollectionShardingState::acquireScopedCollectionS CollectionShardingStateMap::CSSAndLock* cssAndLock = CollectionShardingStateMap::get(opCtx->getServiceContext())->getOrCreate(nss); - // First lock the RESOURCE_MUTEX associated to this nss to guarantee stability of the - // CollectionShardingState* . After that, it is safe to get and store the - // CollectionShadingState*, as long as the RESOURCE_MUTEX is kept locked. - Lock::ResourceLock lock(opCtx->lockState(), cssAndLock->cssMutex.getRid(), mode); - return ScopedCollectionShardingState(std::move(lock), cssAndLock->css.get()); + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { + // First lock the RESOURCE_MUTEX associated to this nss to guarantee stability of the + // CollectionShardingState* . After that, it is safe to get and store the + // CollectionShadingState*, as long as the RESOURCE_MUTEX is kept locked. + Lock::ResourceLock lock(opCtx->lockState(), cssAndLock->cssMutex.getRid(), mode); + return ScopedCollectionShardingState(std::move(lock), cssAndLock->css.get()); + } else { + // No need to lock the CSSLock on non-shardsvrs. For performance, skip doing it. + return ScopedCollectionShardingState(cssAndLock->css.get()); + } } CollectionShardingState::ScopedCollectionShardingState @@ -181,12 +188,6 @@ void CollectionShardingState::appendInfoForShardingStateCommand(OperationContext collectionsMap->appendInfoForShardingStateCommand(builder); } -void CollectionShardingState::appendInfoForServerStatus(OperationContext* opCtx, - BSONObjBuilder* builder) { - auto& collectionsMap = CollectionShardingStateMap::get(opCtx->getServiceContext()); - collectionsMap->appendInfoForServerStatus(builder); -} - std::vector CollectionShardingState::getCollectionNames(OperationContext* opCtx) { auto& collectionsMap = CollectionShardingStateMap::get(opCtx->getServiceContext()); return collectionsMap->getCollectionNames(); diff --git a/src/mongo/db/s/collection_sharding_state.h b/src/mongo/db/s/collection_sharding_state.h index 6e7cc99144b80..a338d725a4710 100644 --- a/src/mongo/db/s/collection_sharding_state.h +++ b/src/mongo/db/s/collection_sharding_state.h @@ -29,10 +29,21 @@ #pragma once +#include #include +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/service_context.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_version.h" #include "mongo/s/sharding_index_catalog_cache.h" namespace mongo { @@ -90,10 +101,14 @@ class CollectionShardingState { ScopedCollectionShardingState(Lock::ResourceLock lock, CollectionShardingState* css); + // Constructor without the ResourceLock. + // Important: Only for use in non-shard servers! + ScopedCollectionShardingState(CollectionShardingState* css); + static ScopedCollectionShardingState acquireScopedCollectionShardingState( OperationContext* opCtx, const NamespaceString& nss, LockMode mode); - Lock::ResourceLock _lock; + boost::optional _lock; CollectionShardingState* _css; }; static ScopedCollectionShardingState assertCollectionLockedAndAcquire( @@ -111,11 +126,6 @@ class CollectionShardingState { */ static void appendInfoForShardingStateCommand(OperationContext* opCtx, BSONObjBuilder* builder); - /** - * Attaches info for server status. - */ - static void appendInfoForServerStatus(OperationContext* opCtx, BSONObjBuilder* builder); - /** * Returns the namespace to which this CSS corresponds. */ @@ -203,11 +213,6 @@ class CollectionShardingState { * Appends information about the shard version of the collection. */ virtual void appendShardVersion(BSONObjBuilder* builder) const = 0; - - /** - * Returns the number of ranges scheduled for deletion on the collection. - */ - virtual size_t numberOfRangesScheduledForDeletion() const = 0; }; /** diff --git a/src/mongo/db/s/collection_sharding_state_factory_shard.cpp b/src/mongo/db/s/collection_sharding_state_factory_shard.cpp index 0dd89232c3dae..7acf184457a78 100644 --- a/src/mongo/db/s/collection_sharding_state_factory_shard.cpp +++ b/src/mongo/db/s/collection_sharding_state_factory_shard.cpp @@ -27,67 +27,19 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - -#include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/collection_sharding_state_factory_shard.h" -#include "mongo/db/service_context.h" -#include "mongo/executor/network_interface_factory.h" -#include "mongo/executor/network_interface_thread_pool.h" -#include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/db/s/collection_sharding_runtime.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding - namespace mongo { CollectionShardingStateFactoryShard::CollectionShardingStateFactoryShard( ServiceContext* serviceContext) : CollectionShardingStateFactory(serviceContext) {} -CollectionShardingStateFactoryShard::~CollectionShardingStateFactoryShard() { - join(); -} - -void CollectionShardingStateFactoryShard::join() { - if (_rangeDeletionExecutor) { - _rangeDeletionExecutor->shutdown(); - _rangeDeletionExecutor->join(); - } -} - std::unique_ptr CollectionShardingStateFactoryShard::make( const NamespaceString& nss) { - return std::make_unique( - _serviceContext, nss, _getRangeDeletionExecutor()); -} - -std::shared_ptr -CollectionShardingStateFactoryShard::_getRangeDeletionExecutor() { - // (Ignore FCV check): This feature doesn't have any upgrade/downgrade concerns. The feature - // flag is used to turn on new range deleter on startup. - if (feature_flags::gRangeDeleterService.isEnabledAndIgnoreFCVUnsafe()) { - return nullptr; - } - - stdx::lock_guard lg(_mutex); - if (!_rangeDeletionExecutor) { - const std::string kExecName("CollectionRangeDeleter-TaskExecutor"); - - // CAUTION: The safety of range deletion depends on using a task executor that schedules - // work on a single thread. - auto net = executor::makeNetworkInterface(kExecName); - auto pool = std::make_unique(net.get()); - auto taskExecutor = - std::make_shared(std::move(pool), std::move(net)); - taskExecutor->startup(); - - _rangeDeletionExecutor = std::move(taskExecutor); - } - - return _rangeDeletionExecutor; + return std::make_unique(_serviceContext, nss); } - } // namespace mongo diff --git a/src/mongo/db/s/collection_sharding_state_factory_shard.h b/src/mongo/db/s/collection_sharding_state_factory_shard.h index a3a0c04e82a94..50272e9f4f161 100644 --- a/src/mongo/db/s/collection_sharding_state_factory_shard.h +++ b/src/mongo/db/s/collection_sharding_state_factory_shard.h @@ -29,8 +29,11 @@ #pragma once +#include + +#include "mongo/db/namespace_string.h" #include "mongo/db/s/collection_sharding_state.h" -#include "mongo/executor/task_executor.h" +#include "mongo/db/service_context.h" namespace mongo { @@ -38,20 +41,9 @@ class CollectionShardingStateFactoryShard final : public CollectionShardingState public: CollectionShardingStateFactoryShard(ServiceContext* serviceContext); - ~CollectionShardingStateFactoryShard(); - - void join() override; + void join() override{}; std::unique_ptr make(const NamespaceString& nss) override; - -private: - std::shared_ptr _getRangeDeletionExecutor(); - - // Serializes the instantiation of the task executor - Mutex _mutex = MONGO_MAKE_LATCH("CollectionShardingStateFactoryShard::_mutex"); - - // Required to be a shared_ptr since it is used as an executor for ExecutorFutures. - std::shared_ptr _rangeDeletionExecutor = {nullptr}; }; } // namespace mongo diff --git a/src/mongo/db/s/collection_sharding_state_factory_standalone.cpp b/src/mongo/db/s/collection_sharding_state_factory_standalone.cpp index 48fed34a98937..2072889642e24 100644 --- a/src/mongo/db/s/collection_sharding_state_factory_standalone.cpp +++ b/src/mongo/db/s/collection_sharding_state_factory_standalone.cpp @@ -29,6 +29,18 @@ #include "mongo/db/s/collection_sharding_state_factory_standalone.h" +#include +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/collection_metadata.h" +#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/sharding_index_catalog_cache.h" + namespace mongo { namespace { @@ -92,10 +104,6 @@ class CollectionShardingStateStandalone final : public CollectionShardingState { void appendShardVersion(BSONObjBuilder* builder) const override {} - size_t numberOfRangesScheduledForDeletion() const override { - return 0; - } - private: const NamespaceString& _nss; }; diff --git a/src/mongo/db/s/collection_sharding_state_factory_standalone.h b/src/mongo/db/s/collection_sharding_state_factory_standalone.h index c860aa57a1122..adf0589644f09 100644 --- a/src/mongo/db/s/collection_sharding_state_factory_standalone.h +++ b/src/mongo/db/s/collection_sharding_state_factory_standalone.h @@ -29,7 +29,11 @@ #pragma once +#include + +#include "mongo/db/namespace_string.h" #include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/s/collmod_coordinator.cpp b/src/mongo/db/s/collmod_coordinator.cpp index f6164cad0a255..dc094b2ddf982 100644 --- a/src/mongo/db/s/collmod_coordinator.cpp +++ b/src/mongo/db/s/collmod_coordinator.cpp @@ -30,26 +30,56 @@ #include "mongo/db/s/collmod_coordinator.h" +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" #include "mongo/db/catalog/coll_mod.h" -#include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/collection_uuid_mismatch.h" -#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/coll_mod_gen.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/ops/insert.h" +#include "mongo/db/commands.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/participant_block_gen.h" #include "mongo/db/s/sharded_collmod_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" #include "mongo/db/s/sharding_ddl_util.h" -#include "mongo/db/s/sharding_state.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/timeseries/catalog_helper.h" #include "mongo/db/timeseries/timeseries_collmod.h" #include "mongo/db/timeseries/timeseries_options.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/async_rpc.h" +#include "mongo/executor/async_rpc_util.h" #include "mongo/idl/idl_parser.h" -#include "mongo/logv2/log.h" #include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/read_through_cache.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -78,6 +108,18 @@ bool hasTimeSeriesBucketingUpdate(const CollModRequest& request) { return ts->getGranularity() || ts->getBucketMaxSpanSeconds() || ts->getBucketRoundingSeconds(); } +template +std::vector sendAuthenticatedCommandWithOsiToShards( + OperationContext* opCtx, + std::shared_ptr> opts, + const std::vector& shardIds, + const OperationSessionInfo& osi, + WriteConcernOptions wc = WriteConcernOptions()) { + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(opts->genericArgs, wc); + async_rpc::AsyncRPCCommandHelpers::appendOSI(opts->genericArgs, osi); + return sharding_ddl_util::sendAuthenticatedCommandToShards(opCtx, opts, shardIds); +} + } // namespace CollModCoordinator::CollModCoordinator(ShardingDDLCoordinatorService* service, @@ -93,7 +135,7 @@ void CollModCoordinator::checkIfOptionsConflict(const BSONObj& doc) const { const auto& otherReq = otherDoc.getCollModRequest().toBSON(); uassert(ErrorCodes::ConflictingOperationInProgress, - str::stream() << "Another collMod for namespace " << originalNss() + str::stream() << "Another collMod for namespace " << originalNss().toStringForErrorMsg() << " is being executed with different parameters: " << selfReq, SimpleBSONObjComparator::kInstance.evaluate(selfReq == otherReq)); } @@ -102,11 +144,6 @@ void CollModCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const cmdInfoBuilder->appendElements(_request.toBSON()); }; -// TODO SERVER-68008 Remove once 7.0 becomes last LTS -bool CollModCoordinator::_isPre61Compatible() const { - return operationType() == DDLCoordinatorTypeEnum::kCollModPre61Compatible; -} - void CollModCoordinator::_performNoopRetryableWriteOnParticipants( OperationContext* opCtx, const std::shared_ptr& executor) { auto shardsAndConfigsvr = [&] { @@ -120,9 +157,8 @@ void CollModCoordinator::_performNoopRetryableWriteOnParticipants( return participants; }(); - _updateSession(opCtx); sharding_ddl_util::performNoopRetryableWriteOnShards( - opCtx, shardsAndConfigsvr, getCurrentSession(), executor); + opCtx, shardsAndConfigsvr, getNewSession(opCtx), executor); } void CollModCoordinator::_saveCollectionInfoOnCoordinatorIfNecessary(OperationContext* opCtx) { @@ -188,9 +224,6 @@ ExecutorFuture CollModCoordinator::_runImpl( } }) .then([this, executor = executor, anchor = shared_from_this()] { - if (_isPre61Compatible()) { - return; - } _buildPhaseHandler( Phase::kFreezeMigrations, [this, executor = executor, anchor = shared_from_this()] { auto opCtxHolder = cc().makeOperationContext(); @@ -200,51 +233,36 @@ ExecutorFuture CollModCoordinator::_runImpl( _saveCollectionInfoOnCoordinatorIfNecessary(opCtx); if (_collInfo->isSharded) { - _doc.setCollUUID( - sharding_ddl_util::getCollectionUUID(opCtx, _collInfo->nsForTargeting)); + const auto& collUUID = + sharding_ddl_util::getCollectionUUID(opCtx, _collInfo->nsForTargeting); + _doc.setCollUUID(collUUID); sharding_ddl_util::stopMigrations( - opCtx, _collInfo->nsForTargeting, _doc.getCollUUID()); + opCtx, _collInfo->nsForTargeting, collUUID, getNewSession(opCtx)); } })(); }) .then(_buildPhaseHandler( Phase::kBlockShards, - [this, executor = executor, anchor = shared_from_this()] { + [this, token, executor = executor, anchor = shared_from_this()] { auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); - _updateSession(opCtx); - _saveCollectionInfoOnCoordinatorIfNecessary(opCtx); - - if (_isPre61Compatible() && _collInfo->isSharded) { - const auto migrationsAlreadyBlockedForBucketNss = - hasTimeSeriesBucketingUpdate(_request) && - _doc.getMigrationsAlreadyBlockedForBucketNss(); - - if (!migrationsAlreadyBlockedForBucketNss) { - _doc.setCollUUID(sharding_ddl_util::getCollectionUUID( - opCtx, _collInfo->nsForTargeting, true /* allowViews */)); - sharding_ddl_util::stopMigrations( - opCtx, _collInfo->nsForTargeting, _doc.getCollUUID()); - } - } - _saveShardingInfoOnCoordinatorIfNecessary(opCtx); if (_collInfo->isSharded && hasTimeSeriesBucketingUpdate(_request)) { - if (_isPre61Compatible()) { - auto newDoc = _doc; - newDoc.setMigrationsAlreadyBlockedForBucketNss(true); - _updateStateDocument(opCtx, std::move(newDoc)); - } - ShardsvrParticipantBlock blockCRUDOperationsRequest(_collInfo->nsForTargeting); - const auto cmdObj = CommandHelpers::appendMajorityWriteConcern( - blockCRUDOperationsRequest.toBSON({})); - sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, nss().db(), cmdObj, _shardingInfo->shardsOwningChunks, **executor); + blockCRUDOperationsRequest.setBlockType( + CriticalSectionBlockTypeEnum::kReadsAndWrites); + auto opts = + std::make_shared>( + blockCRUDOperationsRequest, + **executor, + token, + async_rpc::GenericArgs()); + sendAuthenticatedCommandWithOsiToShards( + opCtx, opts, _shardingInfo->shardsOwningChunks, getNewSession(opCtx)); } })) .then(_buildPhaseHandler( @@ -256,8 +274,6 @@ ExecutorFuture CollModCoordinator::_runImpl( auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); - _updateSession(opCtx); - _saveCollectionInfoOnCoordinatorIfNecessary(opCtx); _saveShardingInfoOnCoordinatorIfNecessary(opCtx); @@ -277,14 +293,11 @@ ExecutorFuture CollModCoordinator::_runImpl( } })) .then(_buildPhaseHandler( - Phase::kUpdateShards, - [this, executor = executor, anchor = shared_from_this()] { + Phase::kUpdateShards, [this, token, executor = executor, anchor = shared_from_this()] { auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); - _updateSession(opCtx); - _saveCollectionInfoOnCoordinatorIfNecessary(opCtx); _saveShardingInfoOnCoordinatorIfNecessary(opCtx); @@ -298,11 +311,8 @@ ExecutorFuture CollModCoordinator::_runImpl( return; } else if (allowMigrations) { // Previous run on a different node completed, but we lost the - // result in the stepdown. Restart from stage in which we disallow - // migrations. - auto newPhase = _isPre61Compatible() ? Phase::kBlockShards - : Phase::kFreezeMigrations; - _enterPhase(newPhase); + // result in the stepdown. Restart from kFreezeMigrations. + _enterPhase(Phase::kFreezeMigrations); uasserted(ErrorCodes::Interrupted, "Retriable error to move to previous stage"); } @@ -331,39 +341,37 @@ ExecutorFuture CollModCoordinator::_runImpl( // strip out other incompatible options. auto dryRunRequest = ShardsvrCollModParticipant{ originalNss(), makeCollModDryRunRequest(_request)}; + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + auto optsDryRun = std::make_shared< + async_rpc::AsyncRPCOptions>( + dryRunRequest, **executor, token, args); sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, - nss().db(), - CommandHelpers::appendMajorityWriteConcern( - dryRunRequest.toBSON({})), - shardsOwningChunks, - **executor); + opCtx, optsDryRun, shardsOwningChunks); } // A view definition will only be present on the primary shard. So we pass // an addition 'performViewChange' flag only to the primary shard. if (primaryShardOwningChunk != shardsOwningChunks.end()) { request.setPerformViewChange(true); - const auto& primaryResponse = - sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, - nss().db(), - CommandHelpers::appendMajorityWriteConcern(request.toBSON({})), - {_shardingInfo->primaryShard}, - **executor); + auto opts = std::make_shared< + async_rpc::AsyncRPCOptions>( + request, **executor, token, async_rpc::GenericArgs()); + const auto& primaryResponse = sendAuthenticatedCommandWithOsiToShards( + opCtx, opts, {_shardingInfo->primaryShard}, getNewSession(opCtx)); + responses.insert( responses.end(), primaryResponse.begin(), primaryResponse.end()); shardsOwningChunks.erase(primaryShardOwningChunk); } request.setPerformViewChange(false); - const auto& secondaryResponses = - sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, - nss().db(), - CommandHelpers::appendMajorityWriteConcern(request.toBSON({})), - shardsOwningChunks, - **executor); + auto opts = std::make_shared< + async_rpc::AsyncRPCOptions>( + request, **executor, token, async_rpc::GenericArgs()); + const auto& secondaryResponses = sendAuthenticatedCommandWithOsiToShards( + opCtx, opts, shardsOwningChunks, getNewSession(opCtx)); + responses.insert( responses.end(), secondaryResponses.begin(), secondaryResponses.end()); @@ -375,12 +383,15 @@ ExecutorFuture CollModCoordinator::_runImpl( CommandHelpers::appendSimpleCommandStatus(builder, ok, errmsg); } _result = builder.obj(); + + const auto collUUID = _doc.getCollUUID(); sharding_ddl_util::resumeMigrations( - opCtx, _collInfo->nsForTargeting, _doc.getCollUUID()); + opCtx, _collInfo->nsForTargeting, collUUID, getNewSession(opCtx)); } catch (DBException& ex) { if (!_isRetriableErrorForDDLCoordinator(ex.toStatus())) { + const auto collUUID = _doc.getCollUUID(); sharding_ddl_util::resumeMigrations( - opCtx, _collInfo->nsForTargeting, _doc.getCollUUID()); + opCtx, _collInfo->nsForTargeting, collUUID, getNewSession(opCtx)); } throw; } @@ -403,17 +414,7 @@ ExecutorFuture CollModCoordinator::_runImpl( subBuilder.doneFast(); _result = builder.obj(); } - })) - .onError([this, anchor = shared_from_this()](const Status& status) { - if (!status.isA() && - !status.isA()) { - LOGV2_ERROR(5757002, - "Error running collMod", - logAttrs(nss()), - "error"_attr = redact(status)); - } - return status; - }); + })); } } // namespace mongo diff --git a/src/mongo/db/s/collmod_coordinator.h b/src/mongo/db/s/collmod_coordinator.h index d27f981c93e89..f56d5031c467e 100644 --- a/src/mongo/db/s/collmod_coordinator.h +++ b/src/mongo/db/s/collmod_coordinator.h @@ -29,9 +29,31 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/coll_mod_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" #include "mongo/db/s/collmod_coordinator_document_gen.h" #include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/executor/task_executor.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" namespace mongo { @@ -88,9 +110,6 @@ class CollModCoordinator final void _saveShardingInfoOnCoordinatorIfNecessary(OperationContext* opCtx); - // TODO SERVER-68008 Remove once 7.0 becomes last LTS - bool _isPre61Compatible() const; - const mongo::CollModRequest _request; boost::optional _result; diff --git a/src/mongo/db/s/collmod_coordinator_document.idl b/src/mongo/db/s/collmod_coordinator_document.idl index 06fb23aa9c1d2..f0616633cabd7 100644 --- a/src/mongo/db/s/collmod_coordinator_document.idl +++ b/src/mongo/db/s/collmod_coordinator_document.idl @@ -69,8 +69,3 @@ structs: type: uuid description: "Collection uuid." optional: true - # TODO SERVER-68008 remove once 7.0 becomes last LTS - migrationsAlreadyBlockedForBucketNss: - type: safeBool - description: "Flag set by the coordinator before acquiring critical section on bucket namespace" - optional: true diff --git a/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp b/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp index 3b4c63f714fb5..82d8072cc8a08 100644 --- a/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp +++ b/src/mongo/db/s/compact_structured_encryption_data_coordinator.cpp @@ -28,20 +28,48 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/compact_structured_encryption_data_coordinator.h" - -#include "mongo/base/checked_cast.h" +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/crypto/fle_options_gen.h" +#include "mongo/crypto/fle_stats.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/client.h" #include "mongo/db/commands/create_gen.h" #include "mongo/db/commands/rename_collection_gen.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/drop_gen.h" -#include "mongo/db/persistent_task_store.h" +#include "mongo/db/fle_crud.h" +#include "mongo/db/s/compact_structured_encryption_data_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/server_parameter_with_storage.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -221,14 +249,6 @@ bool doRenameOperation(const CompactionState& state, return !needCompact; } -CompactStats doCompactOperationPre70Compatible( - const CompactStructuredEncryptionDataStatePre70Compatible& state) { - LOGV2_DEBUG(6517005, 1, "Skipping compaction"); - CompactStats stats({}, {}); - stats.setEcc({}); - return stats; -} - void doCompactOperation(const CompactStructuredEncryptionDataState& state, const FLECompactESCDeleteSet& escDeleteSet, ECStats* escStats, @@ -299,117 +319,24 @@ void doDropOperation(const State& state) { } // namespace -boost::optional -CompactStructuredEncryptionDataCoordinatorPre70Compatible::reportForCurrentOp( +boost::optional CompactStructuredEncryptionDataCoordinator::reportForCurrentOp( MongoProcessInterface::CurrentOpConnectionsMode connMode, MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept { auto bob = basicReportBuilder(); stdx::lock_guard lg{_docMutex}; - bob.append("escNss", _doc.getEscNss().ns()); - bob.append("eccNss", _doc.getEccNss().ns()); - bob.append("ecocNss", _doc.getEcocNss().ns()); + bob.append("escNss", NamespaceStringUtil::serialize(_doc.getEscNss())); + bob.append("ecocNss", NamespaceStringUtil::serialize(_doc.getEcocNss())); bob.append("ecocUuid", _doc.getEcocUuid() ? _doc.getEcocUuid().value().toString() : "none"); - bob.append("ecocRenameNss", _doc.getEcocRenameNss().ns()); + bob.append("ecocRenameNss", NamespaceStringUtil::serialize(_doc.getEcocRenameNss())); bob.append("ecocRenameUuid", _doc.getEcocRenameUuid() ? _doc.getEcocRenameUuid().value().toString() : "none"); return bob.obj(); } -// TODO: SERVER-68373 remove once 7.0 becomes last LTS -void CompactStructuredEncryptionDataCoordinatorPre70Compatible::_enterPhase(const Phase& newPhase) { - // Before 6.1, this coordinator persists the result of the doCompactOperation() - // by reusing the compactionTokens field to store the _response BSON. - // If newPhase is kDropTempCollection, this override of _enterPhase performs this - // replacement on the in-memory state document (_doc), before calling the base _enterPhase() - // which persists _doc to disk. In the event that updating the persisted document fails, - // the replaced compaction tokens are restored in _doc. - using Base = - RecoverableShardingDDLCoordinator; - bool useOverload = _isPre61Compatible() && (newPhase == Phase::kDropTempCollection); - - if (useOverload) { - BSONObj compactionTokensCopy; - { - stdx::lock_guard lg(_docMutex); - compactionTokensCopy = _doc.getCompactionTokens().getOwned(); - _doc.setCompactionTokens(_response->toBSON()); - } - - try { - Base::_enterPhase(newPhase); - } catch (...) { - // on error, restore the compaction tokens - stdx::lock_guard lg(_docMutex); - _doc.setCompactionTokens(std::move(compactionTokensCopy)); - throw; - } - } else { - Base::_enterPhase(newPhase); - } -} - -ExecutorFuture CompactStructuredEncryptionDataCoordinatorPre70Compatible::_runImpl( - std::shared_ptr executor, - const CancellationToken& token) noexcept { - return ExecutorFuture(**executor) - .then(_buildPhaseHandler(Phase::kRenameEcocForCompact, - [this, anchor = shared_from_this()]() { - _skipCompact = doRenameOperation( - _doc, &_ecocRenameUuid, nullptr, nullptr); - stdx::unique_lock ul{_docMutex}; - _doc.setSkipCompact(_skipCompact); - _doc.setEcocRenameUuid(_ecocRenameUuid); - })) - .then(_buildPhaseHandler(Phase::kCompactStructuredEncryptionData, - [this, anchor = shared_from_this()]() { - _response = doCompactOperationPre70Compatible(_doc); - if (!_isPre61Compatible()) { - stdx::lock_guard lg(_docMutex); - _doc.setResponse(_response); - } - })) - .then(_buildPhaseHandler(Phase::kDropTempCollection, [this, anchor = shared_from_this()] { - if (!_isPre61Compatible()) { - invariant(_doc.getResponse()); - _response = *_doc.getResponse(); - } else { - try { - // restore the response that was stored in the compactionTokens field - IDLParserContext ctxt("response"); - _response = CompactStructuredEncryptionDataCommandReply::parse( - ctxt, _doc.getCompactionTokens()); - } catch (...) { - LOGV2_ERROR(6846101, - "Failed to parse response from " - "CompactStructuredEncryptionDataState document", - "response"_attr = _doc.getCompactionTokens()); - // ignore for compatibility with 6.0.0 - } - } - - doDropOperation(_doc); - if (MONGO_unlikely(fleCompactHangAfterDropTempCollection.shouldFail())) { - LOGV2(6790902, "Hanging due to fleCompactHangAfterDropTempCollection fail point"); - fleCompactHangAfterDropTempCollection.pauseWhileSet(); - } - })); -} - -boost::optional CompactStructuredEncryptionDataCoordinator::reportForCurrentOp( - MongoProcessInterface::CurrentOpConnectionsMode connMode, - MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept { - auto bob = basicReportBuilder(); - - stdx::lock_guard lg{_docMutex}; - bob.append("escNss", _doc.getEscNss().ns()); - bob.append("ecocNss", _doc.getEcocNss().ns()); - bob.append("ecocUuid", _doc.getEcocUuid() ? _doc.getEcocUuid().value().toString() : "none"); - bob.append("ecocRenameNss", _doc.getEcocRenameNss().ns()); - bob.append("ecocRenameUuid", - _doc.getEcocRenameUuid() ? _doc.getEcocRenameUuid().value().toString() : "none"); - return bob.obj(); +std::set CompactStructuredEncryptionDataCoordinator::_getAdditionalLocksToAcquire( + OperationContext* opCtx) { + return {_doc.getEcocNss(), _doc.getEscNss(), _doc.getEcocRenameNss()}; } ExecutorFuture CompactStructuredEncryptionDataCoordinator::_runImpl( diff --git a/src/mongo/db/s/compact_structured_encryption_data_coordinator.h b/src/mongo/db/s/compact_structured_encryption_data_coordinator.h index 2779cd72e2f84..3a9736fafaf4b 100644 --- a/src/mongo/db/s/compact_structured_encryption_data_coordinator.h +++ b/src/mongo/db/s/compact_structured_encryption_data_coordinator.h @@ -29,66 +29,34 @@ #pragma once +#include #include +#include +#include #include +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/crypto/fle_stats_gen.h" #include "mongo/db/commands/fle2_compact.h" +#include "mongo/db/commands/fle2_compact_gen.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/s/compact_structured_encryption_data_coordinator_gen.h" #include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" +#include "mongo/util/uuid.h" namespace mongo { -// TODO: SERVER-68373 remove once 7.0 becomes last LTS -class CompactStructuredEncryptionDataCoordinatorPre70Compatible final - : public RecoverableShardingDDLCoordinator { -public: - static constexpr auto kStateContext = "CompactStructuredEncryptionDataStatePre70Compatible"_sd; - using StateDoc = CompactStructuredEncryptionDataStatePre70Compatible; - using Phase = CompactStructuredEncryptionDataPhaseEnum; - - CompactStructuredEncryptionDataCoordinatorPre70Compatible( - ShardingDDLCoordinatorService* service, const BSONObj& doc) - : RecoverableShardingDDLCoordinator( - service, "CompactStructuredEncryptionDataCoordinatorPre70Compatible", doc) {} - - boost::optional reportForCurrentOp( - MongoProcessInterface::CurrentOpConnectionsMode connMode, - MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept final; - - CompactStructuredEncryptionDataCommandReply getResponse(OperationContext* opCtx) { - getCompletionFuture().get(opCtx); - invariant(_response); - return *_response; - } - - void checkIfOptionsConflict(const BSONObj& stateDoc) const final {} - -private: - StringData serializePhase(const Phase& phase) const override { - return CompactStructuredEncryptionDataPhase_serializer(phase); - } - - ExecutorFuture _runImpl(std::shared_ptr executor, - const CancellationToken& token) noexcept final; - - // TODO SERVER-68373 remove once 7.0 becomes last LTS - bool _isPre61Compatible() const { - return operationType() == - DDLCoordinatorTypeEnum::kCompactStructuredEncryptionDataPre61Compatible; - } - - // TODO SERVER-68373 remove once 7.0 becomes last LTS - void _enterPhase(const Phase& newPhase) override; - -private: - boost::optional _response; - bool _skipCompact{false}; - boost::optional _ecocRenameUuid; -}; - class CompactStructuredEncryptionDataCoordinator final : public RecoverableShardingDDLCoordinator { @@ -122,6 +90,8 @@ class CompactStructuredEncryptionDataCoordinator final ExecutorFuture _runImpl(std::shared_ptr executor, const CancellationToken& token) noexcept final; + std::set _getAdditionalLocksToAcquire(OperationContext* opCtx) override; + private: // The response to the compact command boost::optional _response; diff --git a/src/mongo/db/s/config/config_server_test_fixture.cpp b/src/mongo/db/s/config/config_server_test_fixture.cpp index 55611252ce05e..197eea6cb937b 100644 --- a/src/mongo/db/s/config/config_server_test_fixture.cpp +++ b/src/mongo/db/s/config/config_server_test_fixture.cpp @@ -27,53 +27,61 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/config/config_server_test_fixture.h" - -#include +#include +#include +#include #include +#include +#include #include +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" -#include "mongo/client/remote_command_targeter_factory_mock.h" -#include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/client.h" -#include "mongo/db/commands.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_impl.h" -#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/query/cursor_response.h" -#include "mongo/db/query/query_request_helper.h" -#include "mongo/db/repl/oplog.h" -#include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/config_server_op_observer.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/shard_id.h" -#include "mongo/executor/task_executor_pool.h" +#include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor_test_fixture.h" +#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" -#include "mongo/rpc/metadata/repl_set_metadata.h" -#include "mongo/rpc/metadata/tracking_metadata.h" #include "mongo/s/balancer_configuration.h" #include "mongo/s/catalog/sharding_catalog_client_impl.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" #include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/s/catalog_cache.h" +#include "mongo/s/catalog_cache_loader.h" +#include "mongo/s/client/config_shard_wrapper.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/config_server_catalog_cache_loader.h" #include "mongo/s/database_version.h" #include "mongo/s/query/cluster_cursor_manager.h" #include "mongo/s/write_ops/batched_command_response.h" -#include "mongo/util/clock_source_mock.h" -#include "mongo/util/tick_source_mock.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -111,7 +119,7 @@ void ConfigServerTestFixture::setUp() { replicationCoordinator()->alwaysAllowWrites(true); // Initialize sharding components as a config server. - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; // The catalog manager requires a special executor used for operations during addShard. auto specialNet(std::make_unique()); @@ -128,6 +136,7 @@ void ConfigServerTestFixture::setUp() { uassertStatusOK(initializeGlobalShardingStateForMongodForTest(ConnectionString::forLocal())); auto shardLocal = Grid::get(getServiceContext())->shardRegistry()->createLocalConfigShard(); + ASSERT_EQ(typeid(*shardLocal).name(), typeid(ConfigShardWrapper).name()); auto localCatalogClient = std::make_unique(shardLocal); ShardingCatalogManager::create(getServiceContext(), std::move(specialExec), @@ -360,7 +369,7 @@ StatusWith ConfigServerTestFixture::getChunkDoc(OperationContext* opC StatusWith ConfigServerTestFixture::getCollectionPlacementVersion( OperationContext* opCtx, const NamespaceString& nss) { auto collectionDoc = findOneOnConfigCollection( - opCtx, CollectionType::ConfigNS, BSON(CollectionType::kNssFieldName << nss.ns())); + opCtx, CollectionType::ConfigNS, BSON(CollectionType::kNssFieldName << nss.ns_forTest())); if (!collectionDoc.isOK()) return collectionDoc.getStatus(); diff --git a/src/mongo/db/s/config/config_server_test_fixture.h b/src/mongo/db/s/config/config_server_test_fixture.h index c9e1411258016..dba1f73d3e1c0 100644 --- a/src/mongo/db/s/config/config_server_test_fixture.h +++ b/src/mongo/db/s/config/config_server_test_fixture.h @@ -29,9 +29,39 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/sharding_mongod_test_fixture.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/task_executor.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/database_version.h" +#include "mongo/s/grid.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -130,7 +160,7 @@ class ConfigServerTestFixture : public ShardingMongodTestFixture { /** * Setup the config.shards collection to contain the given shards. */ - void setupShards(const std::vector& shards); + virtual void setupShards(const std::vector& shards); /** * Retrieves the shard document from the config server. diff --git a/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp b/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp index 9388b0e735a76..ae59a3d285f46 100644 --- a/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp +++ b/src/mongo/db/s/config/configsvr_abort_reshard_collection_command.cpp @@ -28,18 +28,42 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/s/resharding/coordinator_document_gen.h" #include "mongo/db/s/resharding/resharding_coordinator_service.h" #include "mongo/db/s/resharding/resharding_donor_recipient_common.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/request_types/abort_reshard_collection_gen.h" -#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -99,11 +123,6 @@ class ConfigsvrAbortReshardCollectionCommand final using InvocationBase::InvocationBase; void typedRun(OperationContext* opCtx) { - uassert(ErrorCodes::CommandNotSupported, - "abortReshardCollection command not enabled", - resharding::gFeatureFlagResharding.isEnabled( - serverGlobalParams.featureCompatibility)); - opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); uassert(ErrorCodes::IllegalOperation, @@ -158,8 +177,9 @@ class ConfigsvrAbortReshardCollectionCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_add_shard_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_command.cpp index a9ef2a14dc251..69562e9c0b22c 100644 --- a/src/mongo/db/s/config/configsvr_add_shard_command.cpp +++ b/src/mongo/db/s/config/configsvr_add_shard_command.cpp @@ -28,27 +28,50 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" #include "mongo/db/audit.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/feature_compatibility_version.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/sharding_cluster_parameters_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/server_parameter_with_storage.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" -#include "mongo/s/catalog/type_shard.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/add_shard_request_type.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" -#include "mongo/s/sharding_feature_flags_gen.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -58,11 +81,6 @@ namespace mongo { namespace { Status notifyShardsOfSecondShardIfNeeded(OperationContext* opCtx) { - if (!feature_flags::gClusterCardinalityParameter.isEnabled( - serverGlobalParams.featureCompatibility)) { - return Status::OK(); - } - auto* clusterParameters = ServerParameterSet::getClusterParameterSet(); auto* clusterCardinalityParam = clusterParameters->get>( @@ -86,7 +104,7 @@ Status notifyShardsOfSecondShardIfNeeded(OperationContext* opCtx) { // Set the cluster parameter to disallow direct writes to shards ConfigsvrSetClusterParameter configsvrSetClusterParameter( BSON("shardedClusterCardinalityForDirectConns" << BSON("hasTwoOrMoreShards" << true))); - configsvrSetClusterParameter.setDbName(DatabaseName(boost::none, "admin")); + configsvrSetClusterParameter.setDbName(DatabaseName::kAdmin); const auto cmdResponse = shardRegistry->getConfigShard()->runCommandWithFixedRetryAttempts( opCtx, @@ -130,11 +148,12 @@ class ConfigSvrAddShardCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp b/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp index 0fdf8d5bc0b88..e2b76f2b7dde4 100644 --- a/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp +++ b/src/mongo/db/s/config/configsvr_add_shard_to_zone_command.cpp @@ -28,17 +28,29 @@ */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/s/request_types/add_shard_to_zone_request_type.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -83,11 +95,12 @@ class ConfigSvrAddShardToZoneCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/s/config/configsvr_balancer_collection_status_command.cpp b/src/mongo/db/s/config/configsvr_balancer_collection_status_command.cpp index 168d89609251c..71b6a42da2174 100644 --- a/src/mongo/db/s/config/configsvr_balancer_collection_status_command.cpp +++ b/src/mongo/db/s/config/configsvr_balancer_collection_status_command.cpp @@ -28,23 +28,26 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/auth/action_set.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/balancer/balancer.h" -#include "mongo/db/s/shard_filtering_metadata_refresh.h" -#include "mongo/db/s/sharding_state.h" -#include "mongo/s/catalog_cache_loader.h" -#include "mongo/s/grid.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/balancer_collection_status_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -71,7 +74,8 @@ class ConfigsvrBalancerCollectionStatusCmd final const NamespaceString& nss = ns(); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid namespace specified '" << nss.ns() << "'", + str::stream() << "Invalid namespace specified '" << nss.toStringForErrorMsg() + << "'", nss.isValid()); return Balancer::get(opCtx)->getBalancerStatusForNs(opCtx, nss); } @@ -89,8 +93,9 @@ class ConfigsvrBalancerCollectionStatusCmd final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_check_cluster_metadata_consistency_command.cpp b/src/mongo/db/s/config/configsvr_check_cluster_metadata_consistency_command.cpp index 468304083ba57..ca91a24b88fbe 100644 --- a/src/mongo/db/s/config/configsvr_check_cluster_metadata_consistency_command.cpp +++ b/src/mongo/db/s/config/configsvr_check_cluster_metadata_consistency_command.cpp @@ -28,13 +28,53 @@ */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/clientcursor.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/metadata_consistency_types_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/query/cursor_response_gen.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/metadata_consistency_util.h" -#include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -135,19 +175,21 @@ class ConfigsvrCheckClusterMetadataConsistencyCommand final std::make_move_iterator(hiddenCollectionsIncon.begin()), std::make_move_iterator(hiddenCollectionsIncon.end())); + const auto nss = ns(); auto exec = metadata_consistency_util::makeQueuedPlanExecutor( - opCtx, std::move(inconsistencies), ns()); + opCtx, std::move(inconsistencies), nss); ClientCursorParams cursorParams{ std::move(exec), - ns(), + nss, AuthorizationSession::get(opCtx->getClient())->getAuthenticatedUserName(), APIParameters::get(opCtx), opCtx->getWriteConcern(), repl::ReadConcernArgs::get(opCtx), ReadPreferenceSetting::get(opCtx), request().toBSON({}), - {Privilege(ResourcePattern::forClusterResource(), ActionType::internal)}}; + {Privilege(ResourcePattern::forClusterResource(nss.tenantId()), + ActionType::internal)}}; const auto batchSize = [&]() -> long long { const auto& cursorOpts = request().getCursor(); @@ -175,8 +217,9 @@ class ConfigsvrCheckClusterMetadataConsistencyCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_check_metadata_consistency_command.cpp b/src/mongo/db/s/config/configsvr_check_metadata_consistency_command.cpp index 80fee806647a0..41279820a1483 100644 --- a/src/mongo/db/s/config/configsvr_check_metadata_consistency_command.cpp +++ b/src/mongo/db/s/config/configsvr_check_metadata_consistency_command.cpp @@ -28,11 +28,59 @@ */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/clientcursor.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/metadata_consistency_types_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/query/cursor_response_gen.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/metadata_consistency_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_tags.h" +#include "mongo/s/client/shard_remote_gen.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -111,7 +159,8 @@ class ConfigsvrCheckMetadataConsistencyCommand final repl::ReadConcernArgs::get(opCtx), ReadPreferenceSetting::get(opCtx), request().toBSON({}), - {Privilege(ResourcePattern::forClusterResource(), ActionType::internal)}}; + {Privilege(ResourcePattern::forClusterResource(nss.tenantId()), + ActionType::internal)}}; const auto batchSize = [&]() -> long long { const auto& cursorOpts = request().getCursor(); @@ -137,21 +186,42 @@ class ConfigsvrCheckMetadataConsistencyCommand final inconsistenciesMerged.insert(inconsistenciesMerged.end(), std::make_move_iterator(chunksInconsistencies.begin()), std::make_move_iterator(chunksInconsistencies.end())); + + auto zonesInconsistencies = metadata_consistency_util::checkZonesInconsistencies( + opCtx, coll, _getCollectionZones(opCtx, coll.getNss())); + + inconsistenciesMerged.insert(inconsistenciesMerged.end(), + std::make_move_iterator(zonesInconsistencies.begin()), + std::make_move_iterator(zonesInconsistencies.end())); } std::vector _getCollectionChunks(OperationContext* opCtx, const CollectionType& coll) { + auto matchStage = BSON("$match" << BSON(ChunkType::collectionUUID() << coll.getUuid())); + static const auto sortStage = BSON("$sort" << BSON(ChunkType::min() << 1)); + + AggregateCommandRequest aggRequest{ChunkType::ConfigNS, + {std::move(matchStage), sortStage}}; + auto aggResponse = + ShardingCatalogManager::get(opCtx)->localCatalogClient()->runCatalogAggregation( + opCtx, + aggRequest, + {repl::ReadConcernLevel::kSnapshotReadConcern}, + Milliseconds(gFindChunksOnConfigTimeoutMS.load())); + + std::vector chunks; + chunks.reserve(aggResponse.size()); + for (auto&& responseEntry : aggResponse) { + chunks.emplace_back(uassertStatusOK(ChunkType::parseFromConfigBSON( + responseEntry, coll.getEpoch(), coll.getTimestamp()))); + } + return chunks; + } + + std::vector _getCollectionZones(OperationContext* opCtx, + const NamespaceString& nss) { const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); - // TODO SERVER-75490: Use kSnapshotReadConcern when getting chunks from the catalog - return uassertStatusOK(catalogClient->getChunks( - opCtx, - BSON(ChunkType::collectionUUID() << coll.getUuid()) /*query*/, - BSON(ChunkType::min() << 1) /*sort*/, - boost::none /*limit*/, - nullptr /*opTime*/, - coll.getEpoch(), - coll.getTimestamp(), - repl::ReadConcernLevel::kMajorityReadConcern)); + return uassertStatusOK(catalogClient->getTagsForCollection(opCtx, nss)); } NamespaceString ns() const override { @@ -166,8 +236,9 @@ class ConfigsvrCheckMetadataConsistencyCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_cleanup_reshard_collection_command.cpp b/src/mongo/db/s/config/configsvr_cleanup_reshard_collection_command.cpp index bb87c8a8aacc0..f05d252d6be2b 100644 --- a/src/mongo/db/s/config/configsvr_cleanup_reshard_collection_command.cpp +++ b/src/mongo/db/s/config/configsvr_cleanup_reshard_collection_command.cpp @@ -28,17 +28,42 @@ */ -#include "mongo/platform/basic.h" - +#include +#include #include - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/resharding/resharding_manual_cleanup.h" -#include "mongo/s/grid.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" #include "mongo/s/request_types/cleanup_reshard_collection_gen.h" -#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -50,7 +75,7 @@ using namespace fmt::literals; auto constructFinalMetadataRemovalUpdateOperation(OperationContext* opCtx, const NamespaceString& nss) { - auto query = BSON(CollectionType::kNssFieldName << nss.toString()); + auto query = BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss)); auto collEntryFieldsToUnset = BSON(CollectionType::kReshardingFieldsFieldName << 1 << CollectionType::kAllowMigrationsFieldName << 1); @@ -78,11 +103,6 @@ class ConfigsvrCleanupReshardCollectionCommand final using InvocationBase::InvocationBase; void typedRun(OperationContext* opCtx) { - uassert(ErrorCodes::CommandNotSupported, - "cleanupReshardCollection command not enabled", - resharding::gFeatureFlagResharding.isEnabled( - serverGlobalParams.featureCompatibility)); - uassert(ErrorCodes::IllegalOperation, "_configsvrCleanupReshardCollection can only be run on config servers", serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); @@ -115,7 +135,7 @@ class ConfigsvrCleanupReshardCollectionCommand final uassert(5403504, "Expected collection entry for {} to no longer have resharding metadata, but " "metadata documents still exist; please rerun the cleanupReshardCollection " - "command"_format(ns().toString()), + "command"_format(ns().toStringForErrorMsg()), !collEntry.getReshardingFields()); } @@ -132,8 +152,9 @@ class ConfigsvrCleanupReshardCollectionCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp b/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp index 5946e6a3baac8..53e1ac9b457e1 100644 --- a/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp +++ b/src/mongo/db/s/config/configsvr_clear_jumbo_flag_command.cpp @@ -28,12 +28,33 @@ */ +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/oid.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/s/grid.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -71,12 +92,13 @@ class ConfigsvrClearJumboFlagCommand final : public TypedCommand&) { uasserted(ErrorCodes::NamespaceNotSharded, - str::stream() << "clearJumboFlag namespace " << nss << " is not sharded"); + str::stream() << "clearJumboFlag namespace " << nss.toStringForErrorMsg() + << " is not sharded"); } uassert(ErrorCodes::StaleEpoch, str::stream() - << "clearJumboFlag namespace " << nss.toString() + << "clearJumboFlag namespace " << nss.toStringForErrorMsg() << " has a different epoch than mongos had in its routing table cache", request().getEpoch() == collType.getEpoch()); @@ -100,8 +122,9 @@ class ConfigsvrClearJumboFlagCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_collmod_command.cpp b/src/mongo/db/s/config/configsvr_collmod_command.cpp index c87d12874be7c..9f3c784d3f1e9 100644 --- a/src/mongo/db/s/config/configsvr_collmod_command.cpp +++ b/src/mongo/db/s/config/configsvr_collmod_command.cpp @@ -28,18 +28,31 @@ */ +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/coll_mod_gen.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/sharded_collmod_gen.h" -#include "mongo/logv2/log.h" -#include "mongo/util/str.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -110,8 +123,9 @@ class ConfigsvrCollModCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; } configsvrCollModCmd; diff --git a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp index fff3535488811..f4bc7baf8ebed 100644 --- a/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp +++ b/src/mongo/db/s/config/configsvr_commit_chunk_migration_command.cpp @@ -27,21 +27,31 @@ * it in the license file. */ +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/s/chunk_move_write_concern_options.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/commit_chunk_migration_gen.h" #include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/db/s/sharding_state.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/type_chunk.h" -#include "mongo/s/client/shard_registry.h" -#include "mongo/s/grid.h" +#include "mongo/s/chunk_version.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -156,8 +166,9 @@ class ConfigSvrCommitChunkMigrationCommand uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_commit_index_command.cpp b/src/mongo/db/s/config/configsvr_commit_index_command.cpp index 4d39704890cf5..b1fecd7a6308d 100644 --- a/src/mongo/db/s/config/configsvr_commit_index_command.cpp +++ b/src/mongo/db/s/config/configsvr_commit_index_command.cpp @@ -27,18 +27,59 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/resource_yielder.h" #include "mongo/db/s/sharded_index_catalog_commands_gen.h" -#include "mongo/db/s/sharding_index_catalog_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/transaction/transaction_api.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/transaction/transaction_participant_resource_yielder.h" -#include "mongo/logv2/log.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/executor/task_executor.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/grid.h" #include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -81,9 +122,9 @@ void commitIndexInTransaction(OperationContext* opCtx, std::make_shared(CollectionType::ConfigNS); updateCollectionOp->setUpdates({[&] { write_ops::UpdateOpEntry entry; - entry.setQ(BSON(CollectionType::kNssFieldName << userCollectionNss.ns() - << CollectionType::kUuidFieldName - << collectionUUID)); + entry.setQ(BSON(CollectionType::kNssFieldName + << NamespaceStringUtil::serialize(userCollectionNss) + << CollectionType::kUuidFieldName << collectionUUID)); entry.setU(write_ops::UpdateModification::parseFromClassicUpdate( BSON("$set" << BSON(CollectionType::kUuidFieldName << collectionUUID << CollectionType::kIndexVersionFieldName @@ -94,11 +135,9 @@ void commitIndexInTransaction(OperationContext* opCtx, }()}); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); - txn_api::SyncTransactionWithRetries txn( opCtx, - sleepInlineExecutor, + executor, TransactionParticipantResourceYielder::make("commitIndexCatalogEntry"), inlineExecutor); @@ -203,8 +242,9 @@ class ConfigsvrCommitIndexCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_commit_move_primary_command.cpp b/src/mongo/db/s/config/configsvr_commit_move_primary_command.cpp index adb2873fd9cf9..226d259ee8ad9 100644 --- a/src/mongo/db/s/config/configsvr_commit_move_primary_command.cpp +++ b/src/mongo/db/s/config/configsvr_commit_move_primary_command.cpp @@ -29,11 +29,30 @@ #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/move_primary_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -80,8 +99,9 @@ class ConfigsvrCommitMovePrimaryCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_commit_reshard_collection_command.cpp b/src/mongo/db/s/config/configsvr_commit_reshard_collection_command.cpp index 0e7fe758552ec..c9f1a8dd0b459 100644 --- a/src/mongo/db/s/config/configsvr_commit_reshard_collection_command.cpp +++ b/src/mongo/db/s/config/configsvr_commit_reshard_collection_command.cpp @@ -28,18 +28,36 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/s/resharding/coordinator_document_gen.h" #include "mongo/db/s/resharding/resharding_coordinator_service.h" #include "mongo/db/s/resharding/resharding_donor_recipient_common.h" -#include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/request_types/commit_reshard_collection_gen.h" -#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -55,9 +73,10 @@ UUID retrieveReshardingUUID(OperationContext* opCtx, const NamespaceString& ns) const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); const auto collEntry = catalogClient->getCollection(opCtx, ns); - uassert(ErrorCodes::NoSuchReshardCollection, - format(FMT_STRING("Could not find resharding metadata for {}"), ns.toString()), - collEntry.getReshardingFields()); + uassert( + ErrorCodes::NoSuchReshardCollection, + format(FMT_STRING("Could not find resharding metadata for {}"), ns.toStringForErrorMsg()), + collEntry.getReshardingFields()); return collEntry.getReshardingFields()->getReshardingUUID(); } @@ -73,10 +92,6 @@ class ConfigsvrCommitReshardCollectionCommand final using InvocationBase::InvocationBase; void typedRun(OperationContext* opCtx) { - uassert(ErrorCodes::CommandNotSupported, - format(FMT_STRING("{} command not enabled"), definition()->getName()), - resharding::gFeatureFlagResharding.isEnabled( - serverGlobalParams.featureCompatibility)); opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); uassert( ErrorCodes::IllegalOperation, @@ -111,8 +126,9 @@ class ConfigsvrCommitReshardCollectionCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_configure_collection_balancing.cpp b/src/mongo/db/s/config/configsvr_configure_collection_balancing.cpp index 0b4c60d382973..0039290812313 100644 --- a/src/mongo/db/s/config/configsvr_configure_collection_balancing.cpp +++ b/src/mongo/db/s/config/configsvr_configure_collection_balancing.cpp @@ -28,22 +28,26 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/auth/action_set.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/s/balancer/balancer.h" #include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/s/shard_filtering_metadata_refresh.h" -#include "mongo/s/balancer_configuration.h" -#include "mongo/s/grid.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/configure_collection_balancing_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -69,7 +73,8 @@ class ConfigsvrConfigureCollectionBalancingCmd final const NamespaceString& nss = ns(); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid namespace specified '" << nss.ns() << "'", + str::stream() << "Invalid namespace specified '" << nss.toStringForErrorMsg() + << "'", nss.isValid()); // throws if collection does not exist or parameters are invalid @@ -94,8 +99,9 @@ class ConfigsvrConfigureCollectionBalancingCmd final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_control_balancer_command.cpp b/src/mongo/db/s/config/configsvr_control_balancer_command.cpp index 7d725366a4d29..0438f3c575b4e 100644 --- a/src/mongo/db/s/config/configsvr_control_balancer_command.cpp +++ b/src/mongo/db/s/config/configsvr_control_balancer_command.cpp @@ -27,19 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/base/init.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/balancer/balancer.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/sharding_logging.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/s/balancer_configuration.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { @@ -72,11 +87,12 @@ class ConfigSvrBalancerControlCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/s/config/configsvr_coordinator.cpp b/src/mongo/db/s/config/configsvr_coordinator.cpp index ce778da3e7fe9..b78d842045adf 100644 --- a/src/mongo/db/s/config/configsvr_coordinator.cpp +++ b/src/mongo/db/s/config/configsvr_coordinator.cpp @@ -28,13 +28,33 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" #include "mongo/db/s/config/configsvr_coordinator.h" - #include "mongo/db/s/config/configsvr_coordinator_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/future_util.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/config/configsvr_coordinator.h b/src/mongo/db/s/config/configsvr_coordinator.h index aa48a2477c449..e380dfd19f399 100644 --- a/src/mongo/db/s/config/configsvr_coordinator.h +++ b/src/mongo/db/s/config/configsvr_coordinator.h @@ -29,10 +29,26 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" #include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/s/config/configsvr_coordinator_gen.h" #include "mongo/db/s/config/set_user_write_block_mode_coordinator_document_gen.h" #include "mongo/db/session/internal_session_pool.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" namespace mongo { diff --git a/src/mongo/db/s/config/configsvr_coordinator_service.cpp b/src/mongo/db/s/config/configsvr_coordinator_service.cpp index 72a6c0fbcd053..f8b7d06ff1171 100644 --- a/src/mongo/db/s/config/configsvr_coordinator_service.cpp +++ b/src/mongo/db/s/config/configsvr_coordinator_service.cpp @@ -28,16 +28,29 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/config/configsvr_coordinator_service.h" +#include +#include +#include #include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/s/config/configsvr_coordinator.h" +#include "mongo/db/s/config/configsvr_coordinator_service.h" #include "mongo/db/s/config/set_cluster_parameter_coordinator.h" #include "mongo/db/s/config/set_user_write_block_mode_coordinator.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/config/configsvr_coordinator_service.h b/src/mongo/db/s/config/configsvr_coordinator_service.h index dfc2ccddb38c9..0e9277a5ce310 100644 --- a/src/mongo/db/s/config/configsvr_coordinator_service.h +++ b/src/mongo/db/s/config/configsvr_coordinator_service.h @@ -29,12 +29,22 @@ #pragma once +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/s/config/configsvr_coordinator_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/util/concurrency/thread_pool.h" namespace mongo { class ConfigsvrCoordinator; + class ConfigsvrCoordinatorService final : public repl::PrimaryOnlyService { public: static constexpr StringData kServiceName = "ConfigsvrCoordinatorService"_sd; diff --git a/src/mongo/db/s/config/configsvr_coordinator_service_test.cpp b/src/mongo/db/s/config/configsvr_coordinator_service_test.cpp index 30c0fc56b1c02..87683c019fbc7 100644 --- a/src/mongo/db/s/config/configsvr_coordinator_service_test.cpp +++ b/src/mongo/db/s/config/configsvr_coordinator_service_test.cpp @@ -27,15 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" #include "mongo/db/repl/primary_only_service_test_fixture.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/s/config/configsvr_coordinator.h" #include "mongo/db/s/config/configsvr_coordinator_service.h" #include "mongo/db/s/config/set_cluster_parameter_coordinator_document_gen.h" #include "mongo/db/s/config/set_user_write_block_mode_coordinator_document_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" namespace mongo { diff --git a/src/mongo/db/s/config/configsvr_create_database_command.cpp b/src/mongo/db/s/config/configsvr_create_database_command.cpp index df184528e0c8a..a00fd5e516391 100644 --- a/src/mongo/db/s/config/configsvr_create_database_command.cpp +++ b/src/mongo/db/s/config/configsvr_create_database_command.cpp @@ -28,22 +28,30 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include +#include +#include "mongo/base/error_codes.h" #include "mongo/db/audit.h" -#include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/s/grid.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -105,8 +113,9 @@ class ConfigSvrCreateDatabaseCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_drop_index_catalog_command.cpp b/src/mongo/db/s/config/configsvr_drop_index_catalog_command.cpp index 7875a4e817831..05c191cc9d121 100644 --- a/src/mongo/db/s/config/configsvr_drop_index_catalog_command.cpp +++ b/src/mongo/db/s/config/configsvr_drop_index_catalog_command.cpp @@ -27,19 +27,57 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/resource_yielder.h" #include "mongo/db/s/sharded_index_catalog_commands_gen.h" -#include "mongo/db/s/sharding_index_catalog_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/transaction/transaction_api.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/transaction/transaction_participant_resource_yielder.h" -#include "mongo/logv2/log.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/executor/task_executor.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/grid.h" -#include "mongo/s/request_types/sharded_ddl_commands_gen.h" #include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -73,9 +111,9 @@ void dropIndexInTransaction(OperationContext* opCtx, std::make_shared(CollectionType::ConfigNS); updateCollectionOp->setUpdates({[&] { write_ops::UpdateOpEntry entry; - entry.setQ(BSON(CollectionType::kNssFieldName << userCollectionNss.ns() - << CollectionType::kUuidFieldName - << collectionUUID)); + entry.setQ(BSON(CollectionType::kNssFieldName + << NamespaceStringUtil::serialize(userCollectionNss) + << CollectionType::kUuidFieldName << collectionUUID)); entry.setU(write_ops::UpdateModification::parseFromClassicUpdate( BSON("$set" << BSON(CollectionType::kUuidFieldName << collectionUUID << CollectionType::kIndexVersionFieldName @@ -86,11 +124,10 @@ void dropIndexInTransaction(OperationContext* opCtx, }()}); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); txn_api::SyncTransactionWithRetries txn( opCtx, - sleepInlineExecutor, + executor, TransactionParticipantResourceYielder::make("dropIndexCatalogEntry"), inlineExecutor); @@ -192,8 +229,9 @@ class ConfigsvrDropIndexCatalogEntryCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp b/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp index 959f255500ace..b4c43d10c56b3 100644 --- a/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp +++ b/src/mongo/db/s/config/configsvr_ensure_chunk_version_is_greater_than_command.cpp @@ -27,13 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/audit.h" +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/ensure_chunk_version_is_greater_than_gen.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -75,8 +86,9 @@ class ConfigsvrEnsureChunkVersionIsGreaterThanCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp b/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp index f6672440f6dcf..2531485d45d5f 100644 --- a/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp +++ b/src/mongo/db/s/config/configsvr_get_historical_placement_info.cpp @@ -27,11 +27,41 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/s/request_types/get_historical_placement_info_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_namespace_placement_gen.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/request_types/placement_history_commands_gen.h" #include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -60,20 +90,6 @@ class ConfigsvrGetHistoricalPlacementCommand final const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); - if (!feature_flags::gHistoricalPlacementShardingCatalog.isEnabled( - serverGlobalParams.featureCompatibility)) { - auto shardsWithOpTime = uassertStatusOK(catalogClient->getAllShards( - opCtx, repl::ReadConcernLevel::kMajorityReadConcern)); - std::vector shardIds; - std::transform(shardsWithOpTime.value.begin(), - shardsWithOpTime.value.end(), - std::back_inserter(shardIds), - [](const ShardType& s) { return s.getName(); }); - HistoricalPlacement historicalPlacement{std::move(shardIds), false}; - ConfigsvrGetHistoricalPlacementResponse response(std::move(historicalPlacement)); - return response; - } - boost::optional targetedNs = request().getTargetWholeCluster() ? (boost::optional)boost::none : nss; @@ -94,8 +110,9 @@ class ConfigsvrGetHistoricalPlacementCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_merge_all_chunks_on_shard_command.cpp b/src/mongo/db/s/config/configsvr_merge_all_chunks_on_shard_command.cpp index d35e28cc1e26a..943af31588de8 100644 --- a/src/mongo/db/s/config/configsvr_merge_all_chunks_on_shard_command.cpp +++ b/src/mongo/db/s/config/configsvr_merge_all_chunks_on_shard_command.cpp @@ -28,14 +28,31 @@ */ -#include "mongo/base/status_with.h" +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/s/grid.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/merge_chunk_request_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -105,8 +122,9 @@ class ConfigSvrCommitMergeAllChunksOnShardCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp b/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp index bdc4a1b26e292..9acb1d7706cdc 100644 --- a/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp +++ b/src/mongo/db/s/config/configsvr_merge_chunks_command.cpp @@ -27,10 +27,28 @@ * it in the license file. */ +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/merge_chunk_request_gen.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -109,8 +127,9 @@ class ConfigSvrMergeChunksCommand : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)) { uasserted(ErrorCodes::Unauthorized, "Unauthorized"); } } diff --git a/src/mongo/db/s/config/configsvr_move_range_command.cpp b/src/mongo/db/s/config/configsvr_move_range_command.cpp index e3c08325514f6..96c590711b6a5 100644 --- a/src/mongo/db/s/config/configsvr_move_range_command.cpp +++ b/src/mongo/db/s/config/configsvr_move_range_command.cpp @@ -28,13 +28,32 @@ */ +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/balancer/balancer.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/move_range_request_gen.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -100,8 +119,9 @@ class ConfigSvrMoveRangeCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; } _cfgsvrMoveRange; diff --git a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp index 03a9a24a8c3d3..cf524bc47ef32 100644 --- a/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp +++ b/src/mongo/db/s/config/configsvr_refine_collection_shard_key_command.cpp @@ -28,17 +28,55 @@ */ +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/audit.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/shard_key_util.h" #include "mongo/db/s/sharding_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/stale_shard_version_helpers.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -68,7 +106,8 @@ class ConfigsvrRefineCollectionShardKeyCommand final void _internalRun(OperationContext* opCtx) { const NamespaceString& nss = ns(); - audit::logRefineCollectionShardKey(opCtx->getClient(), nss.ns(), request().getKey()); + audit::logRefineCollectionShardKey( + opCtx->getClient(), NamespaceStringUtil::serialize(nss), request().getKey()); // Set the operation context read concern level to local for reads into the config // database. @@ -86,8 +125,8 @@ class ConfigsvrRefineCollectionShardKeyCommand final opCtx, nss, repl::ReadConcernLevel::kLocalReadConcern); } catch (const ExceptionFor&) { uasserted(ErrorCodes::NamespaceNotSharded, - str::stream() - << "refineCollectionShardKey namespace " << nss << " is not sharded"); + str::stream() << "refineCollectionShardKey namespace " + << nss.toStringForErrorMsg() << " is not sharded"); } const ShardKeyPattern oldShardKeyPattern(collType.getKeyPattern()); @@ -102,7 +141,7 @@ class ConfigsvrRefineCollectionShardKeyCommand final uassert(ErrorCodes::StaleEpoch, str::stream() - << "refineCollectionShardKey namespace " << nss.toString() + << "refineCollectionShardKey namespace " << nss.toStringForErrorMsg() << " has a different epoch than mongos had in its routing table cache", request().getEpoch() == collType.getEpoch()); @@ -186,8 +225,9 @@ class ConfigsvrRefineCollectionShardKeyCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp b/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp index 5f6709ae9e571..28a79e7883356 100644 --- a/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp +++ b/src/mongo/db/s/config/configsvr_remove_chunks_command.cpp @@ -28,20 +28,41 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/remove_chunks_gen.h" -#include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -81,11 +102,6 @@ class ConfigsvrRemoveChunksCommand final : public TypedCommandgetServiceContext()->makeClient("RemoveChunksMetadata"); - { - stdx::lock_guard lk(*newClient.get()); - newClient->setSystemOperationKillableByStepdown(lk); - } - AlternativeClientRegion acr(newClient); auto executor = Grid::get(opCtx->getServiceContext())->getExecutorPool()->getFixedExecutor(); @@ -128,8 +144,9 @@ class ConfigsvrRemoveChunksCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp index d2238f6effa3d..3378d38691294 100644 --- a/src/mongo/db/s/config/configsvr_remove_shard_command.cpp +++ b/src/mongo/db/s/config/configsvr_remove_shard_command.cpp @@ -28,24 +28,40 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include +#include -#include "mongo/db/audit.h" -#include "mongo/db/auth/action_set.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/logv2/log.h" -#include "mongo/s/catalog_cache.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -84,11 +100,12 @@ class ConfigSvrRemoveShardCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp b/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp index 55162a97efcd0..1fef55f7cfcd6 100644 --- a/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp +++ b/src/mongo/db/s/config/configsvr_remove_shard_from_zone_command.cpp @@ -28,18 +28,29 @@ */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/s/grid.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/s/request_types/remove_shard_from_zone_request_type.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -86,11 +97,12 @@ class ConfigSvrRemoveShardFromZoneCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/s/config/configsvr_remove_tags_command.cpp b/src/mongo/db/s/config/configsvr_remove_tags_command.cpp index 8f5fd5f7ec3b3..4585efc4706ef 100644 --- a/src/mongo/db/s/config/configsvr_remove_tags_command.cpp +++ b/src/mongo/db/s/config/configsvr_remove_tags_command.cpp @@ -28,20 +28,42 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/remove_tags_gen.h" -#include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_tags.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -79,11 +101,6 @@ class ConfigsvrRemoveTagsCommand final : public TypedCommandgetServiceContext()->makeClient("RemoveTagsMetadata"); - { - stdx::lock_guard lk(*newClient.get()); - newClient->setSystemOperationKillableByStepdown(lk); - } - AlternativeClientRegion acr(newClient); auto executor = Grid::get(opCtx->getServiceContext())->getExecutorPool()->getFixedExecutor(); @@ -95,7 +112,7 @@ class ConfigsvrRemoveTagsCommand final : public TypedCommandremoveConfigDocuments( newOpCtxPtr.get(), TagsType::ConfigNS, - BSON(TagsType::ns(nss.ns())), + BSON(TagsType::ns(NamespaceStringUtil::serialize(nss))), ShardingCatalogClient::kLocalWriteConcern)); } @@ -123,8 +140,9 @@ class ConfigsvrRemoveTagsCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp b/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp deleted file mode 100644 index 6209d5992d001..0000000000000 --- a/src/mongo/db/s/config/configsvr_rename_collection_metadata_command.cpp +++ /dev/null @@ -1,158 +0,0 @@ -/** - * Copyright (C) 2021-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - - -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/cancelable_operation_context.h" -#include "mongo/db/commands.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/db/s/sharding_ddl_util.h" -#include "mongo/db/transaction/transaction_participant.h" -#include "mongo/s/grid.h" -#include "mongo/s/request_types/sharded_ddl_commands_gen.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding - - -namespace mongo { -namespace { - -class ConfigsvrRenameCollectionMetadataCommand final - : public TypedCommand { -public: - using Request = ConfigsvrRenameCollectionMetadata; - - bool skipApiVersionCheck() const override { - // Internal command (server to server). - return true; - } - - std::string help() const override { - return "Internal command. Do not call directly. Renames a collection."; - } - - bool adminOnly() const override { - return false; - } - - AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { - return AllowedOnSecondary::kNever; - } - - bool supportsRetryableWrite() const final { - return true; - } - - class Invocation final : public InvocationBase { - public: - using InvocationBase::InvocationBase; - - void typedRun(OperationContext* opCtx) { - uassert(ErrorCodes::IllegalOperation, - "_configsvrRenameCollectionMetadata can only be run on config servers", - serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); - CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, - opCtx->getWriteConcern()); - - opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); - - const auto& req = request(); - - // Set the operation context read concern level to local for reads into the config - // database. - repl::ReadConcernArgs::get(opCtx) = - repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern); - - auto txnParticipant = TransactionParticipant::get(opCtx); - uassert(ErrorCodes::InvalidOptions, - str::stream() << Request::kCommandName - << " expected to be called within a transaction", - txnParticipant); - - { - auto newClient = opCtx->getServiceContext()->makeClient("RenameCollectionMetadata"); - AuthorizationSession::get(newClient.get()) - ->grantInternalAuthorization(newClient.get()); - { - stdx::lock_guard lk(*newClient.get()); - newClient->setSystemOperationKillableByStepdown(lk); - } - - AlternativeClientRegion acr(newClient); - auto executor = - Grid::get(opCtx->getServiceContext())->getExecutorPool()->getFixedExecutor(); - CancelableOperationContext newOpCtxPtr( - cc().makeOperationContext(), opCtx->getCancellationToken(), executor); - - ShardingCatalogManager::get(newOpCtxPtr.get()) - ->renameShardedMetadata(newOpCtxPtr.get(), - ns(), - req.getTo(), - ShardingCatalogClient::kLocalWriteConcern, - req.getOptFromCollection()); - } - - // Since we no write happened on this txnNumber, we need to make a dummy write so that - // secondaries can be aware of this txn. - // Such write will also guarantee that the lastOpTime of opCtx will be inclusive of any - // write executed under the AlternativeClientRegion. - DBDirectClient client(opCtx); - client.update(NamespaceString::kServerConfigurationNamespace, - BSON("_id" - << "RenameCollectionMetadataStats"), - BSON("$inc" << BSON("count" << 1)), - true /* upsert */, - false /* multi */); - } - - private: - NamespaceString ns() const override { - return request().getNamespace(); - } - - bool supportsWriteConcern() const override { - return true; - } - - void doCheckAuthorization(OperationContext* opCtx) const override { - uassert(ErrorCodes::Unauthorized, - "Unauthorized", - AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); - } - }; - -} _configsvrRenameCollectionMetadata; - -} // namespace -} // namespace mongo diff --git a/src/mongo/db/s/config/configsvr_repair_sharded_collection_chunks_history_command.cpp b/src/mongo/db/s/config/configsvr_repair_sharded_collection_chunks_history_command.cpp index 41809d3744c56..53195fbbc04a1 100644 --- a/src/mongo/db/s/config/configsvr_repair_sharded_collection_chunks_history_command.cpp +++ b/src/mongo/db/s/config/configsvr_repair_sharded_collection_chunks_history_command.cpp @@ -28,17 +28,35 @@ */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/vector_clock.h" -#include "mongo/logv2/log.h" +#include "mongo/s/catalog_cache.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -74,11 +92,12 @@ class ConfigSvrRepairShardedCollectionChunksHistoryCommand : public BasicCommand } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/s/config/configsvr_reset_placement_history_command.cpp b/src/mongo/db/s/config/configsvr_reset_placement_history_command.cpp new file mode 100644 index 0000000000000..c92c816294e30 --- /dev/null +++ b/src/mongo/db/s/config/configsvr_reset_placement_history_command.cpp @@ -0,0 +1,115 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/request_types/placement_history_commands_gen.h" +#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand + + +namespace mongo { +namespace { + +class ConfigSvrResetPlacementHistoryCommand final + : public TypedCommand { +public: + using Request = ConfigsvrResetPlacementHistory; + + std::string help() const override { + return "Internal command only invokable on the config server. Do not call directly. " + "Reinitializes the content of config.placementHistory based on a recent snapshot of " + "the Sharding catalog."; + } + + AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { + return AllowedOnSecondary::kNever; + } + + bool adminOnly() const override { + return true; + } + + class Invocation final : public InvocationBase { + public: + using InvocationBase::InvocationBase; + + void typedRun(OperationContext* opCtx) { + uassert(ErrorCodes::IllegalOperation, + str::stream() << Request::kCommandName + << " can only be run on the config server", + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); + + opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); + + ShardingCatalogManager::get(opCtx)->initializePlacementHistory(opCtx); + } + + private: + NamespaceString ns() const override { + return NamespaceString(request().getDbName()); + } + + bool supportsWriteConcern() const override { + return true; + } + + void doCheckAuthorization(OperationContext* opCtx) const override { + uassert(ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); + } + }; +} _cfgsvrResetPlacementHistory; + +} // namespace +} // namespace mongo diff --git a/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp b/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp index 0e9e1f82e9e44..bc7ff66a079df 100644 --- a/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp +++ b/src/mongo/db/s/config/configsvr_reshard_collection_cmd.cpp @@ -28,27 +28,62 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" #include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/replication_state_transition_lock_guard.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/db/s/metrics/sharding_data_transform_metrics.h" #include "mongo/db/s/resharding/coordinator_document_gen.h" #include "mongo/db/s/resharding/resharding_coordinator_service.h" -#include "mongo/db/s/resharding/resharding_server_parameters_gen.h" #include "mongo/db/s/resharding/resharding_util.h" -#include "mongo/db/vector_clock.h" -#include "mongo/logv2/log.h" -#include "mongo/s/catalog/type_tags.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/reshard_collection_gen.h" +#include "mongo/s/resharding/common_types_gen.h" #include "mongo/s/resharding/resharding_coordinator_service_conflicting_op_in_progress_info.h" #include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -73,11 +108,20 @@ class ConfigsvrReshardCollectionCommand final serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); + const NamespaceString& nss = ns(); + + { + repl::ReplicationStateTransitionLockGuard rstl(opCtx, MODE_IX); + auto const replCoord = repl::ReplicationCoordinator::get(opCtx); + uassert(ErrorCodes::InterruptedDueToReplStateChange, + "node is not primary", + replCoord->canAcceptWritesForDatabase(opCtx, nss.dbName())); + opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); + } + repl::ReadConcernArgs::get(opCtx) = repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern); - const NamespaceString& nss = ns(); - const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); try { const auto collEntry = catalogClient->getCollection(opCtx, nss); @@ -107,7 +151,7 @@ class ConfigsvrReshardCollectionCommand final const auto& authoritativeTags = uassertStatusOK(catalogClient->getTagsForCollection(opCtx, nss)); - if (!authoritativeTags.empty()) { + if (!authoritativeTags.empty() && !request().getForceRedistribution()) { uassert(ErrorCodes::BadValue, "Must specify value for zones field", request().getZones()); @@ -127,16 +171,31 @@ class ConfigsvrReshardCollectionCommand final *presetChunks, opCtx, ShardKeyPattern(request().getKey()).getKeyPattern()); } + if (!resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + uassert( + ErrorCodes::InvalidOptions, + "Resharding improvements is not enabled, reject shardDistribution parameter", + !request().getShardDistribution().has_value()); + uassert( + ErrorCodes::InvalidOptions, + "Resharding improvements is not enabled, reject forceRedistribution parameter", + !request().getForceRedistribution().has_value()); + uassert(ErrorCodes::InvalidOptions, + "Resharding improvements is not enabled, reject reshardingUUID parameter", + !request().getReshardingUUID().has_value()); + } + + if (const auto& shardDistribution = request().getShardDistribution()) { + resharding::validateShardDistribution( + *shardDistribution, opCtx, ShardKeyPattern(request().getKey())); + } + // Returns boost::none if there isn't any work to be done by the resharding operation. auto instance = ([&]() -> boost::optional> { FixedFCVRegion fixedFcv(opCtx); - uassert(ErrorCodes::CommandNotSupported, - "reshardCollection command not enabled", - resharding::gFeatureFlagResharding.isEnabled( - serverGlobalParams.featureCompatibility)); - // (Generic FCV reference): To run this command and ensure the consistency of // the metadata we need to make sure we are on a stable state. uassert( @@ -173,13 +232,17 @@ class ConfigsvrReshardCollectionCommand final request().getKey()); commonMetadata.setStartTime( opCtx->getServiceContext()->getFastClockSource()->now()); + if (request().getReshardingUUID()) { + commonMetadata.setUserReshardingUUID(*request().getReshardingUUID()); + } coordinatorDoc.setCommonReshardingMetadata(std::move(commonMetadata)); coordinatorDoc.setZones(request().getZones()); coordinatorDoc.setPresetReshardedChunks(request().get_presetReshardedChunks()); coordinatorDoc.setNumInitialChunks(request().getNumInitialChunks()); + coordinatorDoc.setShardDistribution(request().getShardDistribution()); + coordinatorDoc.setForceRedistribution(request().getForceRedistribution()); - opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); auto instance = getOrCreateReshardingCoordinator(opCtx, coordinatorDoc); instance->getCoordinatorDocWrittenFuture().get(opCtx); return instance; @@ -214,8 +277,9 @@ class ConfigsvrReshardCollectionCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_run_restore_command.cpp b/src/mongo/db/s/config/configsvr_run_restore_command.cpp index 774b110f77734..ab519a94f1290 100644 --- a/src/mongo/db/s/config/configsvr_run_restore_command.cpp +++ b/src/mongo/db/s/config/configsvr_run_restore_command.cpp @@ -28,16 +28,59 @@ */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/create_collection.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/repl/storage_interface.h" +#include "mongo/db/s/config/known_collections.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/stdx/unordered_map.h" #include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/testing_proctor.h" #include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -63,9 +106,10 @@ ShouldRestoreDocument shouldRestoreDocument(OperationContext* opCtx, auto findRequest = FindCommandRequest(NamespaceString::kConfigsvrRestoreNamespace); if (nss && uuid) { - findRequest.setFilter(BSON("ns" << nss->toString() << "uuid" << *uuid)); + findRequest.setFilter( + BSON("ns" << NamespaceStringUtil::serialize(*nss) << "uuid" << *uuid)); } else if (nss) { - findRequest.setFilter(BSON("ns" << nss->toString())); + findRequest.setFilter(BSON("ns" << NamespaceStringUtil::serialize(*nss))); } else if (uuid) { findRequest.setFilter(BSON("uuid" << *uuid)); } @@ -86,9 +130,10 @@ ShouldRestoreDocument shouldRestoreDocument(OperationContext* opCtx, (void)UUID::parse(doc); } catch (const AssertionException&) { uasserted(ErrorCodes::BadValue, - str::stream() << "The uuid field of '" << doc.toString() << "' in '" - << NamespaceString::kConfigsvrRestoreNamespace.toString() - << "' needs to be of type UUID"); + str::stream() + << "The uuid field of '" << doc.toString() << "' in '" + << NamespaceString::kConfigsvrRestoreNamespace.toStringForErrorMsg() + << "' needs to be of type UUID"); } } } @@ -154,11 +199,12 @@ class ConfigSvrRunRestoreCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); @@ -181,10 +227,31 @@ class ConfigSvrRunRestoreCommand : public BasicCommand { // this command. CollectionPtr restoreColl(CollectionCatalog::get(opCtx)->lookupCollectionByNamespace( opCtx, NamespaceString::kConfigsvrRestoreNamespace)); - uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection " << NamespaceString::kConfigsvrRestoreNamespace - << " is missing", - restoreColl); + uassert( + ErrorCodes::NamespaceNotFound, + str::stream() << "Collection " + << NamespaceString::kConfigsvrRestoreNamespace.toStringForErrorMsg() + << " is missing", + restoreColl); + } + + DBDirectClient client(opCtx); + + if (TestingProctor::instance().isEnabled()) { + // All collections in the config server must be defined in kConfigCollections. + // Collections to restore should be defined in kCollectionEntries. + auto collInfos = + client.getCollectionInfos(DatabaseNameUtil::deserialize(boost::none, "config")); + for (auto&& info : collInfos) { + StringData collName = info.getStringField("name"); + // Ignore cache collections as they will be dropped later in the restore procedure. + if (kConfigCollections.find(collName) == kConfigCollections.end() && + !collName.startsWith("cache")) { + LOGV2_FATAL(6863300, + "Identified unknown collection in config server.", + "collName"_attr = collName); + } + } } for (const auto& collectionEntry : kCollectionEntries) { @@ -201,7 +268,6 @@ class ConfigSvrRunRestoreCommand : public BasicCommand { continue; } - DBDirectClient client(opCtx); auto findRequest = FindCommandRequest(nss); auto cursor = client.find(findRequest); diff --git a/src/mongo/db/s/config/configsvr_set_allow_migrations_command.cpp b/src/mongo/db/s/config/configsvr_set_allow_migrations_command.cpp index 8d6ade7eeb46d..b6fa6e193bc1e 100644 --- a/src/mongo/db/s/config/configsvr_set_allow_migrations_command.cpp +++ b/src/mongo/db/s/config/configsvr_set_allow_migrations_command.cpp @@ -28,12 +28,26 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/dbdirectclient.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/s/sharding_util.h" +#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/transaction/transaction_participant_resource_yielder.h" +#include "mongo/s/grid.h" #include "mongo/s/request_types/set_allow_migrations_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -41,6 +55,30 @@ namespace mongo { namespace { +void tellShardsToRefresh(OperationContext* opCtx, + const NamespaceString& nss, + const std::string& cmdName) { + // If we have a session checked out, we need to yield it, considering we'll be doing a network + // operation that may block. + std::unique_ptr resourceYielder; + if (TransactionParticipant::get(opCtx)) { + resourceYielder = TransactionParticipantResourceYielder::make(cmdName); + resourceYielder->yield(opCtx); + } + + // Trigger a refresh on every shard. We send this to every shard and not just shards that own + // chunks for the collection because the set of shards owning chunks is updated before the + // critical section is released during chunk migrations. If the last chunk is moved off of a + // shard and this flush is not sent to that donor, stopMigrations will not wait for the critical + // section to finish on that shard (SERVER-73984). + const auto executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); + const auto allShardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx); + sharding_util::tellShardsToRefreshCollection(opCtx, allShardIds, nss, executor); + if (resourceYielder) { + resourceYielder->unyield(opCtx); + } +} + class ConfigsvrSetAllowMigrationsCommand final : public TypedCommand { public: @@ -61,16 +99,49 @@ class ConfigsvrSetAllowMigrationsCommand final CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); - // Set the operation context read concern level to local for reads into the config - // database. - repl::ReadConcernArgs::get(opCtx) = - repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern); - - const auto allowMigrations = request().getAllowMigrations(); - const auto& collectionUUID = request().getCollectionUUID(); - - ShardingCatalogManager::get(opCtx)->setAllowMigrationsAndBumpOneChunk( - opCtx, nss, collectionUUID, allowMigrations); + { + // Use ACR to have a thread holding the session while we do the metadata updates so + // we can serialize concurrent requests to setAllowMigrations (i.e. a stepdown + // happens and the new primary sends a setAllowMigrations with the same sessionId). + // We could think about weakening the serialization guarantee in the future because + // the replay protection comes from the oplog write with a specific txnNumber. Using + // ACR also prevents having deadlocks with the shutdown thread because the + // cancellation of the new operation context is linked to the parent one. + auto newClient = opCtx->getServiceContext()->makeClient("SetAllowMigrations"); + AlternativeClientRegion acr(newClient); + auto executor = + Grid::get(opCtx->getServiceContext())->getExecutorPool()->getFixedExecutor(); + auto newOpCtxPtr = CancelableOperationContext( + cc().makeOperationContext(), opCtx->getCancellationToken(), executor); + + AuthorizationSession::get(newOpCtxPtr.get()->getClient()) + ->grantInternalAuthorization(newOpCtxPtr.get()->getClient()); + newOpCtxPtr->setWriteConcern(opCtx->getWriteConcern()); + + // Set the operation context read concern level to local for reads into the config + // database. + repl::ReadConcernArgs::get(newOpCtxPtr.get()) = + repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern); + + const auto allowMigrations = request().getAllowMigrations(); + const auto& collectionUUID = request().getCollectionUUID(); + + ShardingCatalogManager::get(newOpCtxPtr.get()) + ->setAllowMigrationsAndBumpOneChunk( + newOpCtxPtr.get(), nss, collectionUUID, allowMigrations); + } + + tellShardsToRefresh(opCtx, ns(), ConfigsvrSetAllowMigrations::kCommandName.toString()); + + // Since we no write happened on this txnNumber, we need to make a dummy write to + // protect against older requests with old txnNumbers. + DBDirectClient client(opCtx); + client.update(NamespaceString::kServerConfigurationNamespace, + BSON("_id" + << "SetAllowMigrationsStats"), + BSON("$inc" << BSON("count" << 1)), + true /* upsert */, + false /* multi */); } private: @@ -86,8 +157,9 @@ class ConfigsvrSetAllowMigrationsCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; @@ -108,6 +180,10 @@ class ConfigsvrSetAllowMigrationsCommand final AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { return AllowedOnSecondary::kNever; } + + bool supportsRetryableWrite() const final { + return true; + } } configsvrSetAllowMigrationsCmd; } // namespace diff --git a/src/mongo/db/s/config/configsvr_set_cluster_parameter_command.cpp b/src/mongo/db/s/config/configsvr_set_cluster_parameter_command.cpp index f87cfff653cde..fb96ad45dc18a 100644 --- a/src/mongo/db/s/config/configsvr_set_cluster_parameter_command.cpp +++ b/src/mongo/db/s/config/configsvr_set_cluster_parameter_command.cpp @@ -28,18 +28,42 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/commands/set_cluster_parameter_invocation.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/config/configsvr_coordinator.h" +#include "mongo/db/s/config/configsvr_coordinator_gen.h" #include "mongo/db/s/config/configsvr_coordinator_service.h" -#include "mongo/db/s/config/set_cluster_parameter_coordinator.h" -#include "mongo/idl/cluster_server_parameter_gen.h" -#include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/db/s/config/set_cluster_parameter_coordinator_document_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -111,8 +135,9 @@ class ConfigsvrSetClusterParameterCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_set_user_write_block_mode_command.cpp b/src/mongo/db/s/config/configsvr_set_user_write_block_mode_command.cpp index 395c646bf12c7..38b99b624c718 100644 --- a/src/mongo/db/s/config/configsvr_set_user_write_block_mode_command.cpp +++ b/src/mongo/db/s/config/configsvr_set_user_write_block_mode_command.cpp @@ -28,15 +28,30 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/config/configsvr_coordinator.h" +#include "mongo/db/s/config/configsvr_coordinator_gen.h" #include "mongo/db/s/config/configsvr_coordinator_service.h" -#include "mongo/db/s/config/set_user_write_block_mode_coordinator.h" -#include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/db/s/config/set_user_write_block_mode_coordinator_document_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -90,8 +105,9 @@ class ConfigsvrSetUserWriteBlockModeCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp index 0fd3f7c9de07e..30d5e7ccdb676 100644 --- a/src/mongo/db/s/config/configsvr_split_chunk_command.cpp +++ b/src/mongo/db/s/config/configsvr_split_chunk_command.cpp @@ -28,19 +28,32 @@ */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/split_chunk_request_type.h" -#include "mongo/s/grid.h" -#include "mongo/util/str.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/s/chunk_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -94,11 +107,12 @@ class ConfigSvrSplitChunkCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/s/config/configsvr_transition_from_dedicated_config_server_command.cpp b/src/mongo/db/s/config/configsvr_transition_from_dedicated_config_server_command.cpp new file mode 100644 index 0000000000000..6aa3dbf7ea2e7 --- /dev/null +++ b/src/mongo/db/s/config/configsvr_transition_from_dedicated_config_server_command.cpp @@ -0,0 +1,139 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog_shard_feature_flag_gen.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/request_types/transition_from_dedicated_config_server_gen.h" +#include "mongo/util/assert_util.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding + +namespace mongo { +namespace { + +class ConfigsvrTransitionFromDedicatedConfigServerCommand + : public TypedCommand { +public: + using Request = ConfigsvrTransitionFromDedicatedConfigServer; + + bool skipApiVersionCheck() const override { + // Internal command (server to server). + return true; + } + + std::string help() const override { + return "Internal command, which is exported by the sharding config server. Do not call " + "directly. Transitions cluster into config shard config servers."; + } + + AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { + return AllowedOnSecondary::kNever; + } + + bool adminOnly() const override { + return true; + } + + class Invocation final : public InvocationBase { + public: + using InvocationBase::InvocationBase; + + void typedRun(OperationContext* opCtx) { + uassert( + ErrorCodes::IllegalOperation, + "_configsvrTransitionFromDedicatedConfigServer can only be run on config servers", + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); + + CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, + opCtx->getWriteConcern()); + + // Set the operation context read concern level to local for reads into the config + // database. + repl::ReadConcernArgs::get(opCtx) = + repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern); + + auto configConnString = + repl::ReplicationCoordinator::get(opCtx)->getConfigConnectionString(); + + auto shardingState = ShardingState::get(opCtx); + uassert(7368500, "sharding state not enabled", shardingState->enabled()); + + std::string shardName = shardingState->shardId().toString(); + uassertStatusOK(ShardingCatalogManager::get(opCtx)->addShard( + opCtx, &shardName, configConnString, true)); + } + + private: + NamespaceString ns() const override { + return {}; + } + + bool supportsWriteConcern() const override { + return true; + } + + void doCheckAuthorization(OperationContext* opCtx) const override { + uassert(ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); + } + }; +}; + +MONGO_REGISTER_FEATURE_FLAGGED_COMMAND(ConfigsvrTransitionFromDedicatedConfigServerCommand, + gFeatureFlagTransitionToCatalogShard); + +} // namespace +} // namespace mongo diff --git a/src/mongo/db/s/config/configsvr_transition_to_catalog_shard_command.cpp b/src/mongo/db/s/config/configsvr_transition_to_catalog_shard_command.cpp deleted file mode 100644 index ec1bc45194531..0000000000000 --- a/src/mongo/db/s/config/configsvr_transition_to_catalog_shard_command.cpp +++ /dev/null @@ -1,125 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/catalog_shard_feature_flag_gen.h" -#include "mongo/db/commands.h" -#include "mongo/db/commands/feature_compatibility_version.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/db/s/sharding_state.h" -#include "mongo/logv2/log.h" -#include "mongo/s/catalog/type_shard.h" -#include "mongo/s/request_types/transition_to_catalog_shard_gen.h" -#include "mongo/util/assert_util.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding - -namespace mongo { -namespace { - -class ConfigsvrTransitionToCatalogShardCommand - : public TypedCommand { -public: - using Request = ConfigsvrTransitionToCatalogShard; - - bool skipApiVersionCheck() const override { - // Internal command (server to server). - return true; - } - - std::string help() const override { - return "Internal command, which is exported by the sharding config server. Do not call " - "directly. Transitions cluster into catalog shard config servers."; - } - - AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { - return AllowedOnSecondary::kNever; - } - - bool adminOnly() const override { - return true; - } - - class Invocation final : public InvocationBase { - public: - using InvocationBase::InvocationBase; - - void typedRun(OperationContext* opCtx) { - uassert(7467202, - "The catalog shard feature is disabled", - gFeatureFlagCatalogShard.isEnabled(serverGlobalParams.featureCompatibility)); - - uassert(ErrorCodes::IllegalOperation, - "_configsvrTransitionToCatalogShard can only be run on config servers", - serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); - - CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, - opCtx->getWriteConcern()); - - // Set the operation context read concern level to local for reads into the config - // database. - repl::ReadConcernArgs::get(opCtx) = - repl::ReadConcernArgs(repl::ReadConcernLevel::kLocalReadConcern); - - auto configConnString = - repl::ReplicationCoordinator::get(opCtx)->getConfigConnectionString(); - - auto shardingState = ShardingState::get(opCtx); - uassert(7368500, "sharding state not enabled", shardingState->enabled()); - - std::string shardName = shardingState->shardId().toString(); - uassertStatusOK(ShardingCatalogManager::get(opCtx)->addShard( - opCtx, &shardName, configConnString, true)); - } - - private: - NamespaceString ns() const override { - return {}; - } - - bool supportsWriteConcern() const override { - return true; - } - - void doCheckAuthorization(OperationContext* opCtx) const override { - uassert(ErrorCodes::Unauthorized, - "Unauthorized", - AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); - } - }; -}; - -MONGO_REGISTER_FEATURE_FLAGGED_COMMAND(ConfigsvrTransitionToCatalogShardCommand, - gFeatureFlagTransitionToCatalogShard); - -} // namespace -} // namespace mongo diff --git a/src/mongo/db/s/config/configsvr_transition_to_dedicated_config_server_command.cpp b/src/mongo/db/s/config/configsvr_transition_to_dedicated_config_server_command.cpp index 475d2f9b27b0b..712b58c9a6fe2 100644 --- a/src/mongo/db/s/config/configsvr_transition_to_dedicated_config_server_command.cpp +++ b/src/mongo/db/s/config/configsvr_transition_to_dedicated_config_server_command.cpp @@ -27,16 +27,39 @@ * it in the license file. */ +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog_shard_feature_flag_gen.h" -#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -44,7 +67,7 @@ namespace mongo { namespace { /** - * Internal sharding command run on config servers for transitioning from catalog shard to + * Internal sharding command run on config servers for transitioning from config shard to * dedicated config server. */ class ConfigSvrTransitionToDedicatedConfigCommand : public BasicCommand { @@ -75,11 +98,12 @@ class ConfigSvrTransitionToDedicatedConfigCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); @@ -91,11 +115,8 @@ class ConfigSvrTransitionToDedicatedConfigCommand : public BasicCommand { BSONObjBuilder& result) override { // (Ignore FCV check): TODO(SERVER-75389): add why FCV is ignored here. uassert(7368402, - "The transition to catalog shard feature is disabled", + "The transition to config shard feature is disabled", gFeatureFlagTransitionToCatalogShard.isEnabledAndIgnoreFCVUnsafe()); - uassert(7467203, - "The catalog shard feature is disabled", - gFeatureFlagCatalogShard.isEnabled(serverGlobalParams.featureCompatibility)); uassert(ErrorCodes::IllegalOperation, "_configsvrTransitionToDedicatedConfigServer can only be run on config servers", diff --git a/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp b/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp index 92ae02ef6b6f3..bd89f03fd300a 100644 --- a/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp +++ b/src/mongo/db/s/config/configsvr_update_zone_key_range_command.cpp @@ -28,18 +28,29 @@ */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/s/grid.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/s/request_types/update_zone_key_range_request_type.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -88,11 +99,12 @@ class ConfigsvrUpdateZoneKeyRangeCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/db/s/config/index_on_config.cpp b/src/mongo/db/s/config/index_on_config.cpp index d88368e5fcafb..8f6904d5c2a7e 100644 --- a/src/mongo/db/s/config/index_on_config.cpp +++ b/src/mongo/db/s/config/index_on_config.cpp @@ -29,12 +29,10 @@ #include "mongo/db/s/config/index_on_config.h" -#include "mongo/db/catalog/index_builds_manager.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/index_builds_coordinator.h" +#include + #include "mongo/db/s/sharding_util.h" -#include "mongo/logv2/log.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -44,7 +42,7 @@ Status createIndexOnConfigCollection(OperationContext* opCtx, const NamespaceString& ns, const BSONObj& keys, bool unique) { - invariant(ns.db() == DatabaseName::kConfig.db() || ns.db() == DatabaseName::kAdmin.db()); + invariant(ns.isConfigDB() || ns.isAdminDB()); return sharding_util::createIndexOnCollection(opCtx, ns, keys, unique); } diff --git a/src/mongo/db/s/config/index_on_config_test.cpp b/src/mongo/db/s/config/index_on_config_test.cpp index fa32def69f319..219f8a5d92646 100644 --- a/src/mongo/db/s/config/index_on_config_test.cpp +++ b/src/mongo/db/s/config/index_on_config_test.cpp @@ -27,15 +27,21 @@ * it in the license file. */ -#include "mongo/db/rs_local_client.h" - -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/index_on_config.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/config/initial_split_policy.cpp b/src/mongo/db/s/config/initial_split_policy.cpp index db1b0332d9533..446b04fcbcd73 100644 --- a/src/mongo/db/s/config/initial_split_policy.cpp +++ b/src/mongo/db/s/config/initial_split_policy.cpp @@ -29,21 +29,56 @@ #include "mongo/db/s/config/initial_split_policy.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/client/read_preference.h" -#include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/curop.h" -#include "mongo/db/pipeline/document_source.h" -#include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/pipeline/process_interface/shardsvr_process_interface.h" -#include "mongo/db/pipeline/sharded_agg_helpers.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/balancer/balancer_policy.h" -#include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/db/vector_clock.h" -#include "mongo/logv2/log.h" -#include "mongo/s/balancer_configuration.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -103,12 +138,17 @@ ShardId selectBestShard(const ChunkDistributionMap& chunkMap, for (const auto& shard : shards) { auto candidateIter = chunkMap.find(shard); + // If limitedShardIds is provided, only pick shard in that set. if (bestShardIter == chunkMap.end() || candidateIter->second < bestShardIter->second) { bestShardIter = candidateIter; } } - invariant(bestShardIter != chunkMap.end()); + uassert(ErrorCodes::InvalidOptions, + str::stream() << "No shards found for chunk: " << chunkRange.toString() + << " in zone: " << zone, + bestShardIter != chunkMap.end()); + return bestShardIter->first; } @@ -149,6 +189,67 @@ StringMap> buildTagsToShardIdsMap(OperationContext* opCtx, return tagToShardIds; } +/** + * Returns a set of split points to ensure that chunk boundaries will align with the zone + * ranges. + */ +BSONObjSet extractSplitPointsFromZones(const ShardKeyPattern& shardKey, + const boost::optional>& zones) { + auto splitPoints = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); + + if (!zones) { + return splitPoints; + } + + for (const auto& zone : *zones) { + splitPoints.insert(zone.getMinKey()); + splitPoints.insert(zone.getMaxKey()); + } + + const auto keyPattern = shardKey.getKeyPattern(); + splitPoints.erase(keyPattern.globalMin()); + splitPoints.erase(keyPattern.globalMax()); + + return splitPoints; +} + +/* + * Returns a map mapping shard id to a set of zone tags. + */ +stdx::unordered_map> buildShardIdToTagsMap( + OperationContext* opCtx, const std::vector& shards) { + stdx::unordered_map> shardIdToTags; + if (shards.empty()) { + return shardIdToTags; + } + + // Get all docs in config.shards through a query instead of going through the shard registry + // because we need the zones as well + const auto configServer = Grid::get(opCtx)->shardRegistry()->getConfigShard(); + const auto shardDocs = uassertStatusOK( + configServer->exhaustiveFindOnConfig(opCtx, + ReadPreferenceSetting(ReadPreference::Nearest), + repl::ReadConcernLevel::kMajorityReadConcern, + NamespaceString::kConfigsvrShardsNamespace, + BSONObj(), + BSONObj(), + boost::none)); + uassert( + 7661502, str::stream() << "Could not find any shard documents", !shardDocs.docs.empty()); + + for (const auto& shard : shards) { + shardIdToTags[shard.getShard()] = {}; + } + + for (const auto& shardDoc : shardDocs.docs) { + auto parsedShard = uassertStatusOK(ShardType::fromBSON(shardDoc)); + for (const auto& tag : parsedShard.getTags()) { + shardIdToTags[ShardId(parsedShard.getName())].insert(tag); + } + } + + return shardIdToTags; +} } // namespace std::vector InitialSplitPolicy::calculateHashedSplitPoints( @@ -374,7 +475,8 @@ InitialSplitPolicy::ShardCollectionConfig AbstractTagsBasedSplitPolicy::createFi invariant(it != tagToShards.end()); uassert(50973, str::stream() - << "Cannot shard collection " << tag.getNS() << " due to zone " << tag.getTag() + << "Cannot shard collection " << tag.getNS().toStringForErrorMsg() + << " due to zone " << tag.getTag() << " which is not assigned to a shard. Please assign this zone to a shard.", !it->second.empty()); @@ -620,20 +722,30 @@ SamplingBasedSplitPolicy SamplingBasedSplitPolicy::make( const ShardKeyPattern& shardKey, int numInitialChunks, boost::optional> zones, + boost::optional> availableShardIds, int samplesPerChunk) { uassert(4952603, "samplesPerChunk should be > 0", samplesPerChunk > 0); return SamplingBasedSplitPolicy( numInitialChunks, zones, - _makePipelineDocumentSource(opCtx, nss, shardKey, numInitialChunks, samplesPerChunk)); + _makePipelineDocumentSource(opCtx, nss, shardKey, numInitialChunks, samplesPerChunk), + availableShardIds); } -SamplingBasedSplitPolicy::SamplingBasedSplitPolicy(int numInitialChunks, - boost::optional> zones, - std::unique_ptr samples) - : _numInitialChunks(numInitialChunks), _zones(std::move(zones)), _samples(std::move(samples)) { +SamplingBasedSplitPolicy::SamplingBasedSplitPolicy( + int numInitialChunks, + boost::optional> zones, + std::unique_ptr samples, + boost::optional> availableShardIds) + : _numInitialChunks(numInitialChunks), + _zones(std::move(zones)), + _samples(std::move(samples)), + _availableShardIds(std::move(availableShardIds)) { uassert(4952602, "numInitialChunks should be > 0", numInitialChunks > 0); uassert(4952604, "provided zones should not be empty", !_zones || _zones->size()); + uassert(7679103, + "provided availableShardIds should not be empty", + !_availableShardIds || !_availableShardIds->empty()); } BSONObjSet SamplingBasedSplitPolicy::createFirstSplitPoints(OperationContext* opCtx, @@ -646,7 +758,7 @@ BSONObjSet SamplingBasedSplitPolicy::createFirstSplitPoints(OperationContext* op } } - auto splitPoints = _extractSplitPointsFromZones(shardKey); + auto splitPoints = extractSplitPointsFromZones(shardKey, _zones); if (splitPoints.size() < static_cast(_numInitialChunks - 1)) { // The BlockingResultsMerger underlying the $mergeCursors stage records how long was // spent waiting for samples from the donor shards. It doing so requires the CurOp @@ -684,7 +796,12 @@ InitialSplitPolicy::ShardCollectionConfig SamplingBasedSplitPolicy::createFirstC } } - { + if (_availableShardIds) { + for (const auto& shardId : *_availableShardIds) { + chunkDistribution.emplace(shardId, 0); + } + zoneToShardMap.emplace("", *_availableShardIds); + } else { auto allShardIds = getAllShardIdsShuffled(opCtx); for (const auto& shard : allShardIds) { chunkDistribution.emplace(shard, 0); @@ -718,25 +835,6 @@ InitialSplitPolicy::ShardCollectionConfig SamplingBasedSplitPolicy::createFirstC return {std::move(chunks)}; } -BSONObjSet SamplingBasedSplitPolicy::_extractSplitPointsFromZones(const ShardKeyPattern& shardKey) { - auto splitPoints = SimpleBSONObjComparator::kInstance.makeBSONObjSet(); - - if (!_zones) { - return splitPoints; - } - - for (const auto& zone : *_zones) { - splitPoints.insert(zone.getMinKey()); - splitPoints.insert(zone.getMaxKey()); - } - - const auto keyPattern = shardKey.getKeyPattern(); - splitPoints.erase(keyPattern.globalMin()); - splitPoints.erase(keyPattern.globalMax()); - - return splitPoints; -} - void SamplingBasedSplitPolicy::_appendSplitPointsFromSample(BSONObjSet* splitPoints, const ShardKeyPattern& shardKey, int nToAppend) { @@ -805,7 +903,7 @@ SamplingBasedSplitPolicy::_makePipelineDocumentSource(OperationContext* opCtx, std::move(resolvedNamespaces), boost::none); /* collUUID */ - expCtx->tempDir = storageGlobalParams.dbpath + "/tmp"; + expCtx->tempDir = storageGlobalParams.dbpath + "/_tmp"; return std::make_unique( Pipeline::makePipeline(rawPipeline, expCtx, opts), samplesPerChunk - 1); @@ -836,4 +934,126 @@ boost::optional SamplingBasedSplitPolicy::PipelineDocumentSource::getNe return val->toBson(); } +ShardDistributionSplitPolicy ShardDistributionSplitPolicy::make( + OperationContext* opCtx, + const ShardKeyPattern& shardKey, + std::vector shardDistribution, + boost::optional> zones) { + uassert(7661501, "ShardDistribution should not be empty", shardDistribution.size() > 0); + return ShardDistributionSplitPolicy(shardDistribution, zones); +} + +ShardDistributionSplitPolicy::ShardDistributionSplitPolicy( + std::vector& shardDistribution, boost::optional> zones) + : _shardDistribution(std::move(shardDistribution)), _zones(std::move(zones)) {} + +InitialSplitPolicy::ShardCollectionConfig ShardDistributionSplitPolicy::createFirstChunks( + OperationContext* opCtx, + const ShardKeyPattern& shardKeyPattern, + const SplitPolicyParams& params) { + const auto& keyPattern = shardKeyPattern.getKeyPattern(); + if (_zones) { + for (auto& zone : *_zones) { + zone.setMinKey(keyPattern.extendRangeBound(zone.getMinKey(), false)); + zone.setMaxKey(keyPattern.extendRangeBound(zone.getMaxKey(), false)); + } + } + + auto splitPoints = extractSplitPointsFromZones(shardKeyPattern, _zones); + std::vector chunks; + uassert(7679102, + "ShardDistribution without min/max must not use this split policy.", + _shardDistribution[0].getMin()); + + unsigned long shardDistributionIdx = 0; + const auto currentTime = VectorClock::get(opCtx)->getTime(); + const auto validAfter = currentTime.clusterTime().asTimestamp(); + ChunkVersion version({OID::gen(), validAfter}, {1, 0}); + for (const auto& splitPoint : splitPoints) { + _appendChunks(params, splitPoint, keyPattern, shardDistributionIdx, version, chunks); + } + _appendChunks( + params, keyPattern.globalMax(), keyPattern, shardDistributionIdx, version, chunks); + + if (_zones) { + _checkShardsMatchZones(opCtx, chunks, *_zones); + } + + return {std::move(chunks)}; +} + +void ShardDistributionSplitPolicy::_appendChunks(const SplitPolicyParams& params, + const BSONObj& splitPoint, + const KeyPattern& keyPattern, + unsigned long& shardDistributionIdx, + ChunkVersion& version, + std::vector& chunks) { + while (shardDistributionIdx < _shardDistribution.size()) { + auto shardMin = + keyPattern.extendRangeBound(*_shardDistribution[shardDistributionIdx].getMin(), false); + auto shardMax = + keyPattern.extendRangeBound(*_shardDistribution[shardDistributionIdx].getMax(), false); + auto lastChunkMax = + chunks.empty() ? keyPattern.globalMin() : chunks.back().getRange().getMax(); + /* When we compare a defined shard range with a splitPoint, there are three cases: + * 1. The whole shard range is on the left side of the splitPoint -> Add this shard as a + * whole chunk and move to next shard. + * 2. The splitPoint is in the middle of the shard range. -> Append (shardMin, + * splitPoint) as a chunk and move to next split point. + * 3. The whole shard range is on the right side of the splitPoint -> Move to the next + * splitPoint. + * This algorithm relies on the shardDistribution is continuous and complete to be + * correct, which is validated in the cmd handler. + */ + if (SimpleBSONObjComparator::kInstance.evaluate(shardMin < splitPoint)) { + // The whole shard range is on the left side of the splitPoint. + if (SimpleBSONObjComparator::kInstance.evaluate(shardMax <= splitPoint)) { + appendChunk(params, + lastChunkMax, + shardMax, + &version, + _shardDistribution[shardDistributionIdx].getShard(), + &chunks); + lastChunkMax = shardMax; + shardDistributionIdx++; + } else { // The splitPoint is in the middle of the shard range. + appendChunk(params, + lastChunkMax, + splitPoint, + &version, + _shardDistribution[shardDistributionIdx].getShard(), + &chunks); + lastChunkMax = splitPoint; + return; + } + } else { // The whole shard range is on the right side of the splitPoint. + return; + } + } +} + +void ShardDistributionSplitPolicy::_checkShardsMatchZones( + OperationContext* opCtx, + const std::vector& chunks, + const std::vector& zones) { + ZoneInfo zoneInfo; + auto shardIdToTags = buildShardIdToTagsMap(opCtx, _shardDistribution); + for (const auto& zone : zones) { + uassertStatusOK( + zoneInfo.addRangeToZone({zone.getMinKey(), zone.getMaxKey(), zone.getTag()})); + } + + for (const auto& chunk : chunks) { + auto zoneFromCmdParameter = zoneInfo.getZoneForChunk({chunk.getMin(), chunk.getMax()}); + auto iter = shardIdToTags.find(chunk.getShard()); + uassert(ErrorCodes::InvalidOptions, + str::stream() << "Specified zones and shardDistribution are conflicting with the " + "existing shard/zone, shard " + << chunk.getShard() << "doesn't belong to zone " + << zoneFromCmdParameter, + iter != shardIdToTags.end() && + iter->second.find(zoneFromCmdParameter) != iter->second.end()); + } +} + } // namespace mongo diff --git a/src/mongo/db/s/config/initial_split_policy.h b/src/mongo/db/s/config/initial_split_policy.h index 1d8774a781c1c..24cc3f56935a2 100644 --- a/src/mongo/db/s/config/initial_split_policy.h +++ b/src/mongo/db/s/config/initial_split_policy.h @@ -29,17 +29,32 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include #include #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobj_comparator_interface.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection_options.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/shard_id.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_tags.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/resharding/common_types_gen.h" #include "mongo/s/shard_key_pattern.h" #include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" + namespace mongo { struct SplitPolicyParams { @@ -287,11 +302,13 @@ class SamplingBasedSplitPolicy : public InitialSplitPolicy { const ShardKeyPattern& shardKey, int numInitialChunks, boost::optional> zones, + boost::optional> availableShardIds, int samplesPerChunk = kDefaultSamplesPerChunk); SamplingBasedSplitPolicy(int numInitialChunks, boost::optional> zones, - std::unique_ptr samples); + std::unique_ptr samples, + boost::optional> availableShardIds); /** * Generates the initial split points and returns them in ascending shard key order. Does not @@ -330,12 +347,6 @@ class SamplingBasedSplitPolicy : public InitialSplitPolicy { int samplesPerChunk, MakePipelineOptions opts = {}); - /** - * Returns a set of split points to ensure that chunk boundaries will align with the zone - * ranges. - */ - BSONObjSet _extractSplitPointsFromZones(const ShardKeyPattern& shardKey); - /** * Append split points based from the samples taken from the collection. */ @@ -347,5 +358,44 @@ class SamplingBasedSplitPolicy : public InitialSplitPolicy { const int _numInitialChunks; boost::optional> _zones; std::unique_ptr _samples; + // If provided, only pick shard that is in this vector. + boost::optional> _availableShardIds; }; + +class ShardDistributionSplitPolicy : public InitialSplitPolicy { +public: + static ShardDistributionSplitPolicy make(OperationContext* opCtx, + const ShardKeyPattern& shardKey, + std::vector shardDistribution, + boost::optional> zones); + + ShardDistributionSplitPolicy(std::vector& shardDistribution, + boost::optional> zones); + + ShardCollectionConfig createFirstChunks(OperationContext* opCtx, + const ShardKeyPattern& shardKeyPattern, + const SplitPolicyParams& params) override; + +private: + /** + * Given a splitPoint, create chunks from _shardDistribution until passing the splitPoint. + */ + void _appendChunks(const SplitPolicyParams& params, + const BSONObj& splitPoint, + const KeyPattern& keyPattern, + unsigned long& shardDistributionIdx, + ChunkVersion& version, + std::vector& chunks); + + /** + * Check the chunks created from command parameter "zones" and "shardDistribution" are + * satisfying the existing zone mapping rules in config. + */ + void _checkShardsMatchZones(OperationContext* opCtx, + const std::vector& chunks, + const std::vector& zones); + std::vector _shardDistribution; + boost::optional> _zones; +}; + } // namespace mongo diff --git a/src/mongo/db/s/config/initial_split_policy_test.cpp b/src/mongo/db/s/config/initial_split_policy_test.cpp index 923a59e01ea9a..f90641fcb6494 100644 --- a/src/mongo/db/s/config/initial_split_policy_test.cpp +++ b/src/mongo/db/s/config/initial_split_policy_test.cpp @@ -27,14 +27,39 @@ * it in the license file. */ +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/db/logical_time.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/initial_split_policy.h" #include "mongo/db/vector_clock.h" -#include "mongo/logv2/log.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -394,9 +419,9 @@ class SingleChunkPerTagSplitPolicyTest : public GenerateInitialSplitChunksTestBa TagsType makeTag(const ChunkRange range, std::string zoneName) { BSONObjBuilder tagDocBuilder; - tagDocBuilder.append("_id", - BSON(TagsType::ns(nss().ns()) << TagsType::min(range.getMin()))); - tagDocBuilder.append(TagsType::ns(), nss().ns()); + tagDocBuilder.append( + "_id", BSON(TagsType::ns(nss().toString_forTest()) << TagsType::min(range.getMin()))); + tagDocBuilder.append(TagsType::ns(), nss().ns_forTest()); tagDocBuilder.append(TagsType::min(), range.getMin()); tagDocBuilder.append(TagsType::max(), range.getMax()); tagDocBuilder.append(TagsType::tag(), zoneName); @@ -1851,10 +1876,11 @@ class SamplingBasedInitSplitTest : public SingleChunkPerTagSplitPolicyTest { std::unique_ptr makeInitialSplitPolicy( int numInitialChunks, boost::optional> zones, - std::list samples) { + std::list samples, + boost::optional> availableShardIds) { auto sampleSource = std::make_unique(std::move(samples)); return std::make_unique( - numInitialChunks, zones, std::move(sampleSource)); + numInitialChunks, zones, std::move(sampleSource), availableShardIds); } /** @@ -1959,16 +1985,22 @@ TEST_F(SamplingBasedInitSplitTest, NoZones) { std::vector> expectedShardForEachChunk = { boost::none, boost::none, boost::none, boost::none}; - checkGeneratedInitialZoneChunks( - makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, mockSamples).get(), - shardKey, - shardList, - expectedChunkRanges, - expectedShardForEachChunk); - checkGeneratedInitialSplitPoints( - makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, mockSamples).get(), - shardKey, - expectedChunkRanges); + checkGeneratedInitialZoneChunks(makeInitialSplitPolicy(numInitialChunks, + boost::none /* zones */, + mockSamples, + boost::none /* availableShardIds */) + .get(), + shardKey, + shardList, + expectedChunkRanges, + expectedShardForEachChunk); + checkGeneratedInitialSplitPoints(makeInitialSplitPolicy(numInitialChunks, + boost::none /* zones */, + mockSamples, + boost::none /* availableShardIds */) + .get(), + shardKey, + expectedChunkRanges); } TEST_F(SamplingBasedInitSplitTest, HashedShardKey) { @@ -2002,16 +2034,22 @@ TEST_F(SamplingBasedInitSplitTest, HashedShardKey) { std::vector> expectedShardForEachChunk = { boost::none, boost::none, boost::none, boost::none}; - checkGeneratedInitialZoneChunks( - makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, mockSamples).get(), - shardKey, - shardList, - expectedChunkRanges, - expectedShardForEachChunk); - checkGeneratedInitialSplitPoints( - makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, mockSamples).get(), - shardKey, - expectedChunkRanges); + checkGeneratedInitialZoneChunks(makeInitialSplitPolicy(numInitialChunks, + boost::none /* zones */, + mockSamples, + boost::none /* availableShardIds */) + .get(), + shardKey, + shardList, + expectedChunkRanges, + expectedShardForEachChunk); + checkGeneratedInitialSplitPoints(makeInitialSplitPolicy(numInitialChunks, + boost::none /* zones */, + mockSamples, + boost::none /* availableShardIds */) + .get(), + shardKey, + expectedChunkRanges); } TEST_F(SamplingBasedInitSplitTest, SingleInitialChunk) { @@ -2036,16 +2074,22 @@ TEST_F(SamplingBasedInitSplitTest, SingleInitialChunk) { boost::none // Not in any zone. Can go to any shard. }; - checkGeneratedInitialZoneChunks( - makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, {} /* samples */).get(), - shardKey, - shardList, - expectedChunkRanges, - expectedShardForEachChunk); - checkGeneratedInitialSplitPoints( - makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, {} /* samples */).get(), - shardKey, - expectedChunkRanges); + checkGeneratedInitialZoneChunks(makeInitialSplitPolicy(numInitialChunks, + boost::none /* zones */, + {} /* samples */, + boost::none /* availableShardIds */) + .get(), + shardKey, + shardList, + expectedChunkRanges, + expectedShardForEachChunk); + checkGeneratedInitialSplitPoints(makeInitialSplitPolicy(numInitialChunks, + boost::none /* zones */, + {} /* samples */, + boost::none /* availableShardIds */) + .get(), + shardKey, + expectedChunkRanges); } TEST_F(SamplingBasedInitSplitTest, ZonesCoversEntireDomainButInsufficient) { @@ -2082,13 +2126,17 @@ TEST_F(SamplingBasedInitSplitTest, ZonesCoversEntireDomainButInsufficient) { shardId("1"), shardId("0"), shardId("0"), shardId("0")}; checkGeneratedInitialZoneChunks( - makeInitialSplitPolicy(numInitialChunks, zones, mockSamples).get(), + makeInitialSplitPolicy( + numInitialChunks, zones, mockSamples, boost::none /* availableShardIds */) + .get(), shardKey, shardList, expectedChunkRanges, expectedShardForEachChunk); checkGeneratedInitialSplitPoints( - makeInitialSplitPolicy(numInitialChunks, zones, mockSamples).get(), + makeInitialSplitPolicy( + numInitialChunks, zones, mockSamples, boost::none /* availableShardIds */) + .get(), shardKey, expectedChunkRanges); } @@ -2130,13 +2178,17 @@ TEST_F(SamplingBasedInitSplitTest, SamplesCoincidingWithZones) { }; checkGeneratedInitialZoneChunks( - makeInitialSplitPolicy(numInitialChunks, zones, mockSamples).get(), + makeInitialSplitPolicy( + numInitialChunks, zones, mockSamples, boost::none /* availableShardIds */) + .get(), shardKey, shardList, expectedChunkRanges, expectedShardForEachChunk); checkGeneratedInitialSplitPoints( - makeInitialSplitPolicy(numInitialChunks, zones, mockSamples).get(), + makeInitialSplitPolicy( + numInitialChunks, zones, mockSamples, boost::none /* availableShardIds */) + .get(), shardKey, expectedChunkRanges); } @@ -2176,13 +2228,17 @@ TEST_F(SamplingBasedInitSplitTest, ZoneWithHoles) { }; checkGeneratedInitialZoneChunks( - makeInitialSplitPolicy(numInitialChunks, zones, {} /* samples */).get(), + makeInitialSplitPolicy( + numInitialChunks, zones, {} /* samples */, boost::none /* availableShardIds */) + .get(), shardKey, shardList, expectedChunkRanges, expectedShardForEachChunk); checkGeneratedInitialSplitPoints( - makeInitialSplitPolicy(numInitialChunks, zones, {} /* samples */).get(), + makeInitialSplitPolicy( + numInitialChunks, zones, {} /* samples */, boost::none /* availableShardIds */) + .get(), shardKey, expectedChunkRanges); } @@ -2222,13 +2278,17 @@ TEST_F(SamplingBasedInitSplitTest, UnsortedZoneWithHoles) { }; checkGeneratedInitialZoneChunks( - makeInitialSplitPolicy(numInitialChunks, zones, {} /* samples */).get(), + makeInitialSplitPolicy( + numInitialChunks, zones, {} /* samples */, boost::none /* availableShardIds */) + .get(), shardKey, shardList, expectedChunkRanges, expectedShardForEachChunk); checkGeneratedInitialSplitPoints( - makeInitialSplitPolicy(numInitialChunks, zones, {} /* samples */).get(), + makeInitialSplitPolicy( + numInitialChunks, zones, {} /* samples */, boost::none /* availableShardIds */) + .get(), shardKey, expectedChunkRanges); } @@ -2265,13 +2325,17 @@ TEST_F(SamplingBasedInitSplitTest, ZonesIsPrefixOfShardKey) { }; checkGeneratedInitialZoneChunks( - makeInitialSplitPolicy(numInitialChunks, zones, {} /* samples */).get(), + makeInitialSplitPolicy( + numInitialChunks, zones, {} /* samples */, boost::none /* availableShardIds */) + .get(), shardKey, shardList, expectedChunkRanges, expectedShardForEachChunk); checkGeneratedInitialSplitPoints( - makeInitialSplitPolicy(numInitialChunks, zones, {} /* samples */).get(), + makeInitialSplitPolicy( + numInitialChunks, zones, {} /* samples */, boost::none /* availableShardIds */) + .get(), shardKey, expectedChunkRanges); } @@ -2298,12 +2362,14 @@ TEST_F(SamplingBasedInitSplitTest, ZonesHasIncompatibleShardKey) { auto numInitialChunks = 2; SplitPolicyParams params{UUID::gen(), shardId("0")}; { - auto initSplitPolicy = makeInitialSplitPolicy(numInitialChunks, zones, mockSamples); + auto initSplitPolicy = makeInitialSplitPolicy( + numInitialChunks, zones, mockSamples, boost::none /* availableShardIds */); ASSERT_THROWS(initSplitPolicy->createFirstChunks(operationContext(), shardKey, params), DBException); } { - auto initSplitPolicy = makeInitialSplitPolicy(numInitialChunks, zones, mockSamples); + auto initSplitPolicy = makeInitialSplitPolicy( + numInitialChunks, zones, mockSamples, boost::none /* availableShardIds */); ASSERT_THROWS(initSplitPolicy->createFirstSplitPoints(operationContext(), shardKey, params), DBException); } @@ -2328,14 +2394,18 @@ TEST_F(SamplingBasedInitSplitTest, InsufficientSamples) { auto numInitialChunks = 10; SplitPolicyParams params{UUID::gen(), shardId("0")}; { - auto initSplitPolicy = - makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, mockSamples); + auto initSplitPolicy = makeInitialSplitPolicy(numInitialChunks, + boost::none /* zones */, + mockSamples, + boost::none /* availableShardIds */); ASSERT_THROWS(initSplitPolicy->createFirstChunks(operationContext(), shardKey, params), DBException); } { - auto initSplitPolicy = - makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, mockSamples); + auto initSplitPolicy = makeInitialSplitPolicy(numInitialChunks, + boost::none /* zones */, + mockSamples, + boost::none /* availableShardIds */); ASSERT_THROWS(initSplitPolicy->createFirstSplitPoints(operationContext(), shardKey, params), DBException); } @@ -2359,18 +2429,240 @@ TEST_F(SamplingBasedInitSplitTest, ZeroInitialChunks) { auto numInitialChunks = 10; SplitPolicyParams params{UUID::gen(), shardId("0")}; { - auto initSplitPolicy = - makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, mockSamples); + auto initSplitPolicy = makeInitialSplitPolicy(numInitialChunks, + boost::none /* zones */, + mockSamples, + boost::none /* availableShardIds */); ASSERT_THROWS(initSplitPolicy->createFirstChunks(operationContext(), shardKey, params), DBException); } { - auto initSplitPolicy = - makeInitialSplitPolicy(numInitialChunks, boost::none /* zones */, mockSamples); + auto initSplitPolicy = makeInitialSplitPolicy(numInitialChunks, + boost::none /* zones */, + mockSamples, + boost::none /* availableShardIds */); ASSERT_THROWS(initSplitPolicy->createFirstSplitPoints(operationContext(), shardKey, params), DBException); } } +TEST_F(SamplingBasedInitSplitTest, WithShardIds) { + const NamespaceString ns = NamespaceString::createNamespaceString_forTest("foo", "bar"); + const ShardKeyPattern shardKey(BSON("y" << 1)); + + std::vector shardList; + shardList.emplace_back( + ShardType(shardId("0").toString(), "rs0/fakeShard0:123", {std::string("zoneA")})); + shardList.emplace_back( + ShardType(shardId("1").toString(), "rs1/fakeShard1:123", {std::string("zoneB")})); + + setupShards(shardList); + shardRegistry()->reload(operationContext()); + + std::list mockSamples; + mockSamples.push_back(BSON("y" << 10)); + mockSamples.push_back(BSON("y" << 20)); + + std::vector zones; + zones.emplace_back(nss(), "zoneA", ChunkRange(BSON("y" << MINKEY), BSON("y" << 0))); + zones.emplace_back(nss(), "zoneB", ChunkRange(BSON("y" << 0), BSON("y" << MAXKEY))); + + std::vector availableShardIds = {shardId("0"), shardId("1")}; + + auto numInitialChunks = 4; + + std::vector expectedChunkRanges = { + ChunkRange(BSON("y" << MINKEY), BSON("y" << 0)), + ChunkRange(BSON("y" << 0), BSON("y" << 10)), + ChunkRange(BSON("y" << 10), BSON("y" << 20)), + ChunkRange(BSON("y" << 20), BSON("y" << MAXKEY))}; + + std::vector> expectedShardForEachChunk = { + shardId("0"), shardId("1"), shardId("1"), shardId("1")}; + + checkGeneratedInitialZoneChunks( + makeInitialSplitPolicy(numInitialChunks, zones, mockSamples, availableShardIds).get(), + shardKey, + shardList, + expectedChunkRanges, + expectedShardForEachChunk); +} + +TEST_F(SamplingBasedInitSplitTest, NoAvailableShardInZone) { + const NamespaceString ns = NamespaceString::createNamespaceString_forTest("foo", "bar"); + const ShardKeyPattern shardKey(BSON("y" << 1)); + + std::vector shardList; + shardList.emplace_back( + ShardType(shardId("0").toString(), "rs0/fakeShard0:123", {std::string("zoneA")})); + shardList.emplace_back( + ShardType(shardId("1").toString(), "rs1/fakeShard1:123", {std::string("zoneB")})); + + setupShards(shardList); + shardRegistry()->reload(operationContext()); + + std::vector zones; + zones.emplace_back(nss(), "zoneA", ChunkRange(BSON("y" << MINKEY), BSON("y" << 0))); + zones.emplace_back(nss(), "zoneB", ChunkRange(BSON("y" << 0), BSON("y" << MAXKEY))); + + std::vector availableShardIds = {shardId("0")}; + + std::list mockSamples; + + auto numInitialChunks = 10; + SplitPolicyParams params{UUID::gen(), shardId("0")}; + { + auto initSplitPolicy = makeInitialSplitPolicy( + numInitialChunks, boost::none /* zones */, mockSamples, availableShardIds); + ASSERT_THROWS(initSplitPolicy->createFirstChunks(operationContext(), shardKey, params), + DBException); + } +} + + +class ShardDistributionInitSplitTest : public SingleChunkPerTagSplitPolicyTest { +public: + std::unique_ptr makeInitialSplitPolicy( + std::vector& shardDistribution, + boost::optional> zones) { + return std::make_unique(shardDistribution, zones); + } + + /** + * Calls createFirstChunks() according to the given arguments and asserts that returned chunks + * match with the chunks created using expectedChunkRanges and expectedShardIds. + */ + void checkGeneratedInitialZoneChunks(std::unique_ptr splitPolicy, + const ShardKeyPattern& shardKeyPattern, + const std::vector& shardList, + const std::vector& shardDistribution, + const std::vector& expectedChunkRanges, + const std::vector& expectedShardIds) { + const ShardId primaryShard("doesntMatter"); + + const auto shardCollectionConfig = splitPolicy->createFirstChunks( + operationContext(), shardKeyPattern, {UUID::gen(), primaryShard}); + + ASSERT_EQ(expectedShardIds.size(), expectedChunkRanges.size()); + ASSERT_EQ(expectedChunkRanges.size(), shardCollectionConfig.chunks.size()); + for (size_t i = 0; i < shardCollectionConfig.chunks.size(); ++i) { + // Check the chunk range matches the expected range. + ASSERT_EQ(expectedChunkRanges[i], shardCollectionConfig.chunks[i].getRange()); + + // Check that the shardId matches the expected. + const auto& actualShardId = shardCollectionConfig.chunks[i].getShard(); + ASSERT_EQ(expectedShardIds[i], actualShardId); + } + } +}; + +TEST_F(ShardDistributionInitSplitTest, WithoutZones) { + const NamespaceString ns = NamespaceString::createNamespaceString_forTest("foo", "bar"); + const ShardKeyPattern shardKey(BSON("y" << 1)); + + std::vector shardList; + shardList.emplace_back( + ShardType(shardId("0").toString(), "rs0/fakeShard0:123", {std::string("zoneA")})); + shardList.emplace_back( + ShardType(shardId("1").toString(), "rs1/fakeShard1:123", {std::string("zoneB")})); + + setupShards(shardList); + shardRegistry()->reload(operationContext()); + ShardKeyRange range0(shardId("0")); + range0.setMin(BSON("y" << MINKEY)); + range0.setMax(BSON("y" << 0)); + ShardKeyRange range1(shardId("1")); + range1.setMin(BSON("y" << 0)); + range1.setMax(BSON("y" << MAXKEY)); + std::vector shardDistribution = {range0, range1}; + + std::vector expectedChunkRanges = {ChunkRange(BSON("y" << MINKEY), BSON("y" << 0)), + ChunkRange(BSON("y" << 0), BSON("y" << MAXKEY))}; + std::vector expectedShardForEachChunk = {shardId("0"), shardId("1")}; + + checkGeneratedInitialZoneChunks( + makeInitialSplitPolicy(shardDistribution, boost::none /*zones*/), + shardKey, + shardList, + shardDistribution, + expectedChunkRanges, + expectedShardForEachChunk); +} + +TEST_F(ShardDistributionInitSplitTest, ZonesConflictShardDistribution) { + const NamespaceString ns = NamespaceString::createNamespaceString_forTest("foo", "bar"); + const ShardKeyPattern shardKey(BSON("y" << 1)); + + std::vector shardList; + shardList.emplace_back( + ShardType(shardId("0").toString(), "rs0/fakeShard0:123", {std::string("zoneA")})); + shardList.emplace_back( + ShardType(shardId("1").toString(), "rs1/fakeShard1:123", {std::string("zoneB")})); + + setupShards(shardList); + shardRegistry()->reload(operationContext()); + + std::vector zones; + zones.emplace_back(nss(), "zoneB", ChunkRange(BSON("y" << MINKEY), BSON("y" << 0))); + zones.emplace_back(nss(), "zoneA", ChunkRange(BSON("y" << 0), BSON("y" << MAXKEY))); + + ShardKeyRange range0(shardId("0")); + range0.setMin(BSON("y" << MINKEY)); + range0.setMax(BSON("y" << 0)); + ShardKeyRange range1(shardId("1")); + range1.setMin(BSON("y" << 0)); + range1.setMax(BSON("y" << MAXKEY)); + std::vector shardDistribution = {range0, range1}; + + SplitPolicyParams params{UUID::gen(), shardId("0")}; + auto initSplitPolicy = makeInitialSplitPolicy(shardDistribution, zones); + ASSERT_THROWS(initSplitPolicy->createFirstChunks(operationContext(), shardKey, params), + DBException); +} + +TEST_F(ShardDistributionInitSplitTest, InterleaveWithZones) { + const NamespaceString ns = NamespaceString::createNamespaceString_forTest("foo", "bar"); + const ShardKeyPattern shardKey(BSON("y" << 1)); + + std::vector shardList; + shardList.emplace_back(ShardType(shardId("0").toString(), + "rs0/fakeShard0:123", + {std::string("zoneA"), std::string("zoneB")})); + shardList.emplace_back( + ShardType(shardId("1").toString(), "rs1/fakeShard1:123", {std::string("zoneB")})); + + setupShards(shardList); + shardRegistry()->reload(operationContext()); + + std::vector zones; + zones.emplace_back(nss(), "zoneA", ChunkRange(BSON("y" << MINKEY), BSON("y" << 0))); + zones.emplace_back(nss(), "zoneB", ChunkRange(BSON("y" << 0), BSON("y" << MAXKEY))); + + ShardKeyRange range0(shardId("0")); + range0.setMin(BSON("y" << MINKEY)); + range0.setMax(BSON("y" << -1)); + ShardKeyRange range1(shardId("0")); + range1.setMin(BSON("y" << -1)); + range1.setMax(BSON("y" << 1)); + ShardKeyRange range2(shardId("1")); + range2.setMin(BSON("y" << 1)); + range2.setMax(BSON("y" << MAXKEY)); + std::vector shardDistribution = {range0, range1, range2}; + + std::vector expectedChunkRanges = {ChunkRange(BSON("y" << MINKEY), BSON("y" << -1)), + ChunkRange(BSON("y" << -1), BSON("y" << 0)), + ChunkRange(BSON("y" << 0), BSON("y" << 1)), + ChunkRange(BSON("y" << 1), BSON("y" << MAXKEY))}; + std::vector expectedShardForEachChunk = { + shardId("0"), shardId("0"), shardId("0"), shardId("1")}; + + checkGeneratedInitialZoneChunks(makeInitialSplitPolicy(shardDistribution, zones), + shardKey, + shardList, + shardDistribution, + expectedChunkRanges, + expectedShardForEachChunk); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/s/config/known_collections.h b/src/mongo/db/s/config/known_collections.h new file mode 100644 index 0000000000000..f7fc34a1a90fc --- /dev/null +++ b/src/mongo/db/s/config/known_collections.h @@ -0,0 +1,93 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#include "mongo/db/namespace_string.h" +#include "mongo/util/string_map.h" + +namespace mongo { +// Check for necessary restore procedures before adding collection to this set. If restore procedure +// is necessary, additions to this set should also be added to kCollectionEntries in +// 'configsvr_run_restore_command.cpp'. +const StringDataSet kConfigCollections{ + "actionlog", + "changelog", + "chunks", + "migrations", + "mongos", + "system.preimages", + "tags", + "version", + NamespaceString::kClusterParametersNamespace.coll(), + NamespaceString::kCollectionCriticalSectionsNamespace.coll(), + NamespaceString::kCompactStructuredEncryptionCoordinatorNamespace.coll(), + NamespaceString::kConfigAnalyzeShardKeySplitPointsNamespace.coll(), + NamespaceString::kConfigDatabasesNamespace.coll(), + NamespaceString::kConfigImagesNamespace.coll(), + NamespaceString::kConfigQueryAnalyzersNamespace.coll(), + NamespaceString::kConfigReshardingOperationsNamespace.coll(), + NamespaceString::kConfigSampledQueriesDiffNamespace.coll(), + NamespaceString::kConfigSampledQueriesNamespace.coll(), + NamespaceString::kConfigSettingsNamespace.coll(), + NamespaceString::kConfigsvrCollectionsNamespace.coll(), + NamespaceString::kConfigsvrCoordinatorsNamespace.coll(), + NamespaceString::kConfigsvrIndexCatalogNamespace.coll(), + NamespaceString::kConfigsvrPlacementHistoryNamespace.coll(), + NamespaceString::kConfigsvrShardsNamespace.coll(), + NamespaceString::kDistLocksNamepsace.coll(), + NamespaceString::kDonorReshardingOperationsNamespace.coll(), + NamespaceString::kExternalKeysCollectionNamespace.coll(), + NamespaceString::kForceOplogBatchBoundaryNamespace.coll(), + NamespaceString::kGlobalIndexClonerNamespace.coll(), + NamespaceString::kIndexBuildEntryNamespace.coll(), + NamespaceString::kLockpingsNamespace.coll(), + NamespaceString::kLogicalSessionsNamespace.coll(), + NamespaceString::kMigrationCoordinatorsNamespace.coll(), + NamespaceString::kMigrationRecipientsNamespace.coll(), + NamespaceString::kRangeDeletionForRenameNamespace.coll(), + NamespaceString::kRangeDeletionNamespace.coll(), + NamespaceString::kRecipientReshardingOperationsNamespace.coll(), + NamespaceString::kReshardingApplierProgressNamespace.coll(), + NamespaceString::kReshardingApplierProgressNamespace.coll(), + NamespaceString::kReshardingTxnClonerProgressNamespace.coll(), + NamespaceString::kSessionTransactionsTableNamespace.coll(), + NamespaceString::kSessionTransactionsTableNamespace.coll(), + NamespaceString::kSetChangeStreamStateCoordinatorNamespace.coll(), + NamespaceString::kShardCollectionCatalogNamespace.coll(), + NamespaceString::kShardConfigCollectionsNamespace.coll(), + NamespaceString::kShardConfigDatabasesNamespace.coll(), + NamespaceString::kShardIndexCatalogNamespace.coll(), + NamespaceString::kShardingDDLCoordinatorsNamespace.coll(), + NamespaceString::kShardingRenameParticipantsNamespace.coll(), + NamespaceString::kShardSplitDonorsNamespace.coll(), + NamespaceString::kTenantMigrationDonorsNamespace.coll(), + NamespaceString::kTenantMigrationRecipientsNamespace.coll(), + NamespaceString::kTransactionCoordinatorsNamespace.coll(), + NamespaceString::kUserWritesCriticalSectionsNamespace.coll(), + NamespaceString::kVectorClockNamespace.coll(), +}; +} // namespace mongo diff --git a/src/mongo/db/s/config/placement_history_cleaner.cpp b/src/mongo/db/s/config/placement_history_cleaner.cpp index 7e1a5215de3af..609cbbc59e65b 100644 --- a/src/mongo/db/s/config/placement_history_cleaner.cpp +++ b/src/mongo/db/s/config/placement_history_cleaner.cpp @@ -28,11 +28,44 @@ */ #include "mongo/db/s/config/placement_history_cleaner.h" + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/persistent_task_store.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/sharding_util.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_namespace_placement_gen.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -98,17 +131,15 @@ PlacementHistoryCleaner* PlacementHistoryCleaner::get(OperationContext* opCtx) { } void PlacementHistoryCleaner::runOnce(Client* client, size_t minPlacementHistoryDocs) { - auto opCtx = client->makeOperationContext(); - opCtx.get()->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); + auto opCtxHolder = client->makeOperationContext(); + auto opCtx = opCtxHolder.get(); try { // Count the number of entries in the placementHistory collection; skip cleanup if below // threshold. - const auto numPlacementHistoryDocs = [&] { - PersistentTaskStore store( - NamespaceString::kConfigsvrPlacementHistoryNamespace); - return store.count(opCtx.get(), BSONObj()); - }(); + PersistentTaskStore store( + NamespaceString::kConfigsvrPlacementHistoryNamespace); + const auto numPlacementHistoryDocs = store.count(opCtx, BSONObj()); if (numPlacementHistoryDocs <= minPlacementHistoryDocs) { LOGV2_DEBUG(7068801, 3, "PlacementHistoryCleaner: nothing to be deleted on this round"); @@ -117,7 +148,7 @@ void PlacementHistoryCleaner::runOnce(Client* client, size_t minPlacementHistory // Get the time of the oldest op entry still persisted among the cluster shards; historical // placement entries that precede it may be safely dropped. - auto earliestOplogTime = getEarliestOpLogTimestampAmongAllShards(opCtx.get()); + auto earliestOplogTime = getEarliestOpLogTimestampAmongAllShards(opCtx); if (!earliestOplogTime) { LOGV2(7068802, "Skipping cleanup of config.placementHistory - no earliestOplogTime could " @@ -125,28 +156,74 @@ void PlacementHistoryCleaner::runOnce(Client* client, size_t minPlacementHistory return; } - ShardingCatalogManager::get(opCtx.get()) - ->cleanUpPlacementHistory(opCtx.get(), *earliestOplogTime); + // Check the latest initialization time is not greater than the earliestOpTime. + // The clean-up must always move the new initialization time forward. + const auto match = + BSON(NamespacePlacementType::kNssFieldName + << NamespaceStringUtil::serialize( + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker) + << NamespacePlacementType::kTimestampFieldName + << BSON("$gte" << earliestOplogTime->toBSON())); + + if (store.count(opCtx, match) > 0) { + return; + } + + ShardingCatalogManager::get(opCtx)->cleanUpPlacementHistory(opCtx, *earliestOplogTime); } catch (const DBException& e) { LOGV2(7068804, "Periodic cleanup of config.placementHistory failed", "error"_attr = e); } } +void PlacementHistoryCleaner::onStepDown() { + _stop(true /* steppingDown*/); +} + void PlacementHistoryCleaner::onStepUpComplete(OperationContext* opCtx, long long term) { - auto periodicRunner = opCtx->getServiceContext()->getPeriodicRunner(); - invariant(periodicRunner); + _start(opCtx, true /* steppingUp*/); +} - PeriodicRunner::PeriodicJob placementHistoryCleanerJob( - "PlacementHistoryCleanUpJob", - [](Client* client) { runOnce(client, kminPlacementHistoryEntries); }, - kJobExecutionPeriod); +void PlacementHistoryCleaner::pause() { + _stop(false /* steppingDown*/); +} - _anchor = periodicRunner->makeJob(std::move(placementHistoryCleanerJob)); - _anchor.start(); +void PlacementHistoryCleaner::resume(OperationContext* opCtx) { + _start(opCtx, false /* steppingUp*/); } -void PlacementHistoryCleaner::onStepDown() { + +void PlacementHistoryCleaner::_start(OperationContext* opCtx, bool steppingUp) { + stdx::lock_guard scopedLock(_mutex); + + if (steppingUp) { + _runningAsPrimary = true; + } + + if (_runningAsPrimary && !_anchor.isValid()) { + auto periodicRunner = opCtx->getServiceContext()->getPeriodicRunner(); + invariant(periodicRunner); + + PeriodicRunner::PeriodicJob placementHistoryCleanerJob( + "PlacementHistoryCleanUpJob", + [](Client* client) { runOnce(client, kminPlacementHistoryEntries); }, + kJobExecutionPeriod, + // TODO(SERVER-74658): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/); + + _anchor = periodicRunner->makeJob(std::move(placementHistoryCleanerJob)); + _anchor.start(); + } +} + +void PlacementHistoryCleaner::_stop(bool steppingDown) { + stdx::lock_guard scopedLock(_mutex); + + if (steppingDown) { + _runningAsPrimary = false; + } + if (_anchor.isValid()) { _anchor.stop(); } } + } // namespace mongo diff --git a/src/mongo/db/s/config/placement_history_cleaner.h b/src/mongo/db/s/config/placement_history_cleaner.h index 0fec9a72a4e69..56286a49d0191 100644 --- a/src/mongo/db/s/config/placement_history_cleaner.h +++ b/src/mongo/db/s/config/placement_history_cleaner.h @@ -29,7 +29,15 @@ #pragma once +#include +#include + +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/periodic_runner.h" namespace mongo { @@ -47,12 +55,19 @@ class PlacementHistoryCleaner : public ReplicaSetAwareServiceConfigSvr +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/client.h" +#include "mongo/db/commands.h" #include "mongo/db/commands/cluster_server_parameter_cmds_gen.h" #include "mongo/db/commands/set_cluster_parameter_invocation.h" -#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/persistent_task_store.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/s/config/set_cluster_parameter_coordinator.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/sharding_logging.h" #include "mongo/db/s/sharding_util.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/vector_clock.h" +#include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/future_impl.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -146,7 +178,7 @@ void SetClusterParameterCoordinator::_sendSetClusterParameterToAllShards( LOGV2_DEBUG(6387001, 1, "Sending setClusterParameter to shards:", "shards"_attr = shards); ShardsvrSetClusterParameter request(_doc.getParameter()); - request.setDbName(DatabaseName(_doc.getTenantId(), DatabaseName::kAdmin.db())); + request.setDbName(DatabaseNameUtil::deserialize(_doc.getTenantId(), DatabaseName::kAdmin.db())); request.setClusterParameterTime(*_doc.getClusterParameterTime()); sharding_util::sendCommandToShards( opCtx, @@ -161,14 +193,17 @@ void SetClusterParameterCoordinator::_commit(OperationContext* opCtx) { SetClusterParameter setClusterParameterRequest(_doc.getParameter()); setClusterParameterRequest.setDbName( - DatabaseName(_doc.getTenantId(), DatabaseName::kAdmin.db())); + DatabaseNameUtil::deserialize(_doc.getTenantId(), DatabaseName::kAdmin.db())); std::unique_ptr parameterService = std::make_unique(); DBDirectClient client(opCtx); ClusterParameterDBClientService dbService(client); SetClusterParameterInvocation invocation{std::move(parameterService), dbService}; - invocation.invoke( - opCtx, setClusterParameterRequest, _doc.getClusterParameterTime(), kMajorityWriteConcern); + invocation.invoke(opCtx, + setClusterParameterRequest, + _doc.getClusterParameterTime(), + kMajorityWriteConcern, + true /* skipValidation */); } const ConfigsvrCoordinatorMetadata& SetClusterParameterCoordinator::metadata() const { @@ -202,7 +237,7 @@ ExecutorFuture SetClusterParameterCoordinator::_runImpl( ShardingLogging::get(opCtx)->logChange( opCtx, "setClusterParameter.start", - NamespaceString::kClusterParametersNamespace.toString(), + toStringForLogging(NamespaceString::kClusterParametersNamespace), _doc.getParameter(), kMajorityWriteConcern, catalogManager->localConfigShard(), @@ -233,7 +268,7 @@ ExecutorFuture SetClusterParameterCoordinator::_runImpl( ShardingLogging::get(opCtx)->logChange( opCtx, "setClusterParameter.end", - NamespaceString::kClusterParametersNamespace.toString(), + toStringForLogging(NamespaceString::kClusterParametersNamespace), _doc.getParameter(), kMajorityWriteConcern, catalogManager->localConfigShard(), diff --git a/src/mongo/db/s/config/set_cluster_parameter_coordinator.h b/src/mongo/db/s/config/set_cluster_parameter_coordinator.h index 13d6c755bb581..038f0cbf07200 100644 --- a/src/mongo/db/s/config/set_cluster_parameter_coordinator.h +++ b/src/mongo/db/s/config/set_cluster_parameter_coordinator.h @@ -29,9 +29,21 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/s/config/configsvr_coordinator.h" +#include "mongo/db/s/config/configsvr_coordinator_gen.h" #include "mongo/db/s/config/set_cluster_parameter_coordinator_document_gen.h" #include "mongo/db/session/internal_session_pool.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" namespace mongo { @@ -79,7 +91,7 @@ class SetClusterParameterCoordinator : public ConfigsvrCoordinator { template auto _buildPhaseHandler(const Phase& newPhase, Func&& handlerFn) { - return [=] { + return [=, this] { const auto& currPhase = _doc.getPhase(); if (currPhase > newPhase) { diff --git a/src/mongo/db/s/config/set_user_write_block_mode_coordinator.cpp b/src/mongo/db/s/config/set_user_write_block_mode_coordinator.cpp index e89e9409772ea..4656559b13e74 100644 --- a/src/mongo/db/s/config/set_user_write_block_mode_coordinator.cpp +++ b/src/mongo/db/s/config/set_user_write_block_mode_coordinator.cpp @@ -28,21 +28,43 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/config/set_user_write_block_mode_coordinator.h" - -#include "mongo/base/checked_cast.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/commands.h" +#include "mongo/db/commands/set_user_write_block_mode_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/s/config/set_user_write_block_mode_coordinator.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/sharding_util.h" #include "mongo/db/s/user_writes_recoverable_critical_section_service.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/write_concern.h" +#include "mongo/executor/task_executor.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future_impl.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -73,12 +95,13 @@ void sendSetUserWriteBlockModeCmdToAllShards(OperationContext* opCtx, const auto shardsvrSetUserWriteBlockModeCmd = makeShardsvrSetUserWriteBlockModeCommand(block, phase); - sharding_util::sendCommandToShards(opCtx, - shardsvrSetUserWriteBlockModeCmd.getDbName().db(), - CommandHelpers::appendMajorityWriteConcern( - shardsvrSetUserWriteBlockModeCmd.toBSON(osi.toBSON())), - allShards, - executor); + sharding_util::sendCommandToShards( + opCtx, + DatabaseNameUtil::serialize(shardsvrSetUserWriteBlockModeCmd.getDbName()), + CommandHelpers::appendMajorityWriteConcern( + shardsvrSetUserWriteBlockModeCmd.toBSON(osi.toBSON())), + allShards, + executor); } } // namespace diff --git a/src/mongo/db/s/config/set_user_write_block_mode_coordinator.h b/src/mongo/db/s/config/set_user_write_block_mode_coordinator.h index d30bb66e4713d..b2e8661419120 100644 --- a/src/mongo/db/s/config/set_user_write_block_mode_coordinator.h +++ b/src/mongo/db/s/config/set_user_write_block_mode_coordinator.h @@ -29,9 +29,21 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/s/config/configsvr_coordinator.h" +#include "mongo/db/s/config/configsvr_coordinator_gen.h" #include "mongo/db/s/config/set_user_write_block_mode_coordinator_document_gen.h" #include "mongo/db/session/internal_session_pool.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" namespace mongo { @@ -63,7 +75,7 @@ class SetUserWriteBlockModeCoordinator : public ConfigsvrCoordinator { template auto _buildPhaseHandler(const Phase& newPhase, Func&& handlerFn) { - return [=] { + return [=, this] { const auto& currPhase = _doc.getPhase(); if (currPhase > newPhase) { diff --git a/src/mongo/db/s/config/sharding_catalog_manager.cpp b/src/mongo/db/s/config/sharding_catalog_manager.cpp index 3b4cf2c915062..b7e9ad9f1d4a4 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager.cpp @@ -29,49 +29,108 @@ #include "mongo/db/s/config/sharding_catalog_manager.h" +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include -#include "mongo/db/auth/authorization_session_impl.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/cancelable_operation_context.h" #include "mongo/db/catalog/coll_mod.h" +#include "mongo/db/catalog/collection_options_gen.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/coll_mod_gen.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/dbmessage.h" #include "mongo/db/error_labels.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_group.h" #include "mongo/db/pipeline/document_source_lookup.h" +#include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_merge.h" #include "mongo/db/pipeline/document_source_project.h" #include "mongo/db/pipeline/document_source_union_with.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" -#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/read_write_concern_defaults.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/tenant_migration_access_blocker_util.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/resource_yielder.h" #include "mongo/db/s/config/index_on_config.h" +#include "mongo/db/s/config/placement_history_cleaner.h" #include "mongo/db/s/sharding_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/session/logical_session_cache.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/vector_clock.h" +#include "mongo/executor/connection_pool_stats.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/metadata/impersonated_user_metadata.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" #include "mongo/s/balancer_configuration.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" #include "mongo/s/catalog/type_config_version.h" #include "mongo/s/catalog/type_namespace_placement_gen.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" #include "mongo/s/client/shard_registry.h" -#include "mongo/s/database_version.h" #include "mongo/s/grid.h" #include "mongo/s/sharding_feature_flags_gen.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" #include "mongo/transport/service_entry_point.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/log_and_backoff.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -97,7 +156,7 @@ OpMsg runCommandInLocalTxn(OperationContext* opCtx, bob.append("startTransaction", true); } bob.append("autocommit", false); - bob.append(OperationSessionInfo::kTxnNumberFieldName, txnNumber); + bob.append(OperationSessionInfoFromClient::kTxnNumberFieldName, txnNumber); BSONObjBuilder lsidBuilder(bob.subobjStart("lsid")); opCtx->getLogicalSessionId()->serialize(&bob); @@ -148,10 +207,6 @@ BSONObj commitOrAbortTransaction(OperationContext* opCtx, // that have been run on this opCtx would have set the timeout in the locker on the opCtx, but // commit should not have a lock timeout. auto newClient = getGlobalServiceContext()->makeClient("ShardingCatalogManager"); - { - stdx::lock_guard lk(*newClient); - newClient->setSystemOperationKillableByStepdown(lk); - } AlternativeClientRegion acr(newClient); auto newOpCtx = cc().makeOperationContext(); newOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); @@ -166,7 +221,7 @@ BSONObj commitOrAbortTransaction(OperationContext* opCtx, BSONObjBuilder bob; bob.append(cmdName, true); bob.append("autocommit", false); - bob.append(OperationSessionInfo::kTxnNumberFieldName, txnNumber); + bob.append(OperationSessionInfoFromClient::kTxnNumberFieldName, txnNumber); bob.append(WriteConcernOptions::kWriteConcernField, writeConcern.toBSON()); BSONObjBuilder lsidBuilder(bob.subobjStart("lsid")); @@ -503,7 +558,7 @@ void setInitializationTimeOnPlacementHistory( std::vector placementResponseForPreInitQueries) { /* * The initialization metadata of config.placementHistory is composed by two special docs, - * identified by kConfigsvrPlacementHistoryFcvMarkerNamespace: + * identified by kConfigPlacementHistoryInitializationMarker: * - initializationTimeInfo: contains the time of the initialization and an empty set of shards. * It will allow ShardingCatalogClient to serve accurate responses to historical placement * queries within the [initializationTime, +inf) range. @@ -513,13 +568,14 @@ void setInitializationTimeOnPlacementHistory( * placement queries within the [-inf, initializationTime) range. */ NamespacePlacementType initializationTimeInfo; - initializationTimeInfo.setNss(NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace); + initializationTimeInfo.setNss( + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker); initializationTimeInfo.setTimestamp(initializationTime); initializationTimeInfo.setShards({}); NamespacePlacementType approximatedPlacementForPreInitQueries; approximatedPlacementForPreInitQueries.setNss( - NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace); + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker); approximatedPlacementForPreInitQueries.setTimestamp(Timestamp(0, 1)); approximatedPlacementForPreInitQueries.setShards(placementResponseForPreInitQueries); @@ -533,8 +589,8 @@ void setInitializationTimeOnPlacementHistory( NamespaceString::kConfigsvrPlacementHistoryNamespace); write_ops::DeleteOpEntry entryDelMarker; entryDelMarker.setQ( - BSON(NamespacePlacementType::kNssFieldName - << NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns())); + BSON(NamespacePlacementType::kNssFieldName << NamespaceStringUtil::serialize( + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker))); entryDelMarker.setMulti(true); deleteRequest.setDeletes({entryDelMarker}); @@ -561,11 +617,10 @@ void setInitializationTimeOnPlacementHistory( ScopeGuard resetWriteConcerGuard([opCtx, &originalWC] { opCtx->setWriteConcern(originalWC); }); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor( - Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()); + auto& executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); txn_api::SyncTransactionWithRetries txn( - opCtx, sleepInlineExecutor, nullptr /* resourceYielder */, inlineExecutor); + opCtx, executor, nullptr /* resourceYielder */, inlineExecutor); txn.run(opCtx, transactionChain); LOGV2(7068807, @@ -618,7 +673,8 @@ ShardingCatalogManager::ShardingCatalogManager( _localCatalogClient(std::move(localCatalogClient)), _kShardMembershipLock("shardMembershipLock"), _kChunkOpLock("chunkOpLock"), - _kZoneOpLock("zoneOpLock") { + _kZoneOpLock("zoneOpLock"), + _kPlacementHistoryInitializationLock("placementHistoryInitializationOpLock") { startup(); } @@ -665,11 +721,9 @@ Status ShardingCatalogManager::initializeConfigDatabaseIfNeeded(OperationContext return status; } - if (feature_flags::gConfigSettingsSchema.isEnabled(serverGlobalParams.featureCompatibility)) { - status = _initConfigSettings(opCtx); - if (!status.isOK()) { - return status; - } + status = _initConfigSettings(opCtx); + if (!status.isOK()) { + return status; } // Make sure to write config.version last since we detect rollbacks of config.version and @@ -686,10 +740,6 @@ Status ShardingCatalogManager::initializeConfigDatabaseIfNeeded(OperationContext return Status::OK(); } -Status ShardingCatalogManager::upgradeConfigSettings(OperationContext* opCtx) { - return _initConfigSettings(opCtx); -} - ShardingCatalogClient* ShardingCatalogManager::localCatalogClient() { invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); return _localCatalogClient.get(); @@ -753,7 +803,8 @@ Status ShardingCatalogManager::_initConfigIndexes(OperationContext* opCtx) { if (feature_flags::gGlobalIndexesShardingCatalog.isEnabled( serverGlobalParams.featureCompatibility)) { - result = sharding_util::createShardingIndexCatalogIndexes(opCtx); + result = sharding_util::createShardingIndexCatalogIndexes( + opCtx, NamespaceString::kConfigsvrIndexCatalogNamespace); if (!result.isOK()) { return result; } @@ -962,12 +1013,18 @@ Status ShardingCatalogManager::_notifyClusterOnNewDatabases( // Setup an AlternativeClientRegion and a non-interruptible Operation Context to ensure that // the notification may be also sent out while the node is stepping down. auto altClient = opCtx->getServiceContext()->makeClient("_notifyClusterOnNewDatabases"); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + mongo::stdx::lock_guard lk(*altClient.get()); + altClient.get()->setSystemOperationUnkillableByStepdown(lk); + } AlternativeClientRegion acr(altClient); auto altOpCtxHolder = cc().makeOperationContext(); auto altOpCtx = altOpCtxHolder.get(); // Compose the request and decorate it with the needed write concern and auth parameters. - ShardsvrNotifyShardingEventRequest request(EventTypeEnum::kDatabasesAdded, event.toBSON()); + ShardsvrNotifyShardingEventRequest request(notify_sharding_event::kDatabasesAdded, + event.toBSON()); BSONObjBuilder bob; request.serialize( BSON(WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority), &bob); @@ -984,17 +1041,30 @@ Status ShardingCatalogManager::_notifyClusterOnNewDatabases( size_t successfulNotifications = 0, incompatibleRecipients = 0, retriableFailures = 0; for (const auto& cmdResponse : responses) { - if (cmdResponse.swResponse.isOK()) { + const auto responseStatus = [&cmdResponse] { + if (!cmdResponse.swResponse.isOK()) { + return cmdResponse.swResponse.getStatus(); + } + + const auto& remoteCmdResponse = cmdResponse.swResponse.getValue().data; + if (auto remoteResponseStatus = getStatusFromCommandResult(remoteCmdResponse); + !remoteResponseStatus.isOK()) { + return remoteResponseStatus; + } + + return getWriteConcernStatusFromCommandResult(remoteCmdResponse); + }(); + + if (responseStatus.isOK()) { ++successfulNotifications; } else { LOGV2_WARNING(7175401, "Failed to send sharding event notification", "recipient"_attr = cmdResponse.shardId, - "error"_attr = cmdResponse.swResponse.getStatus()); - if (cmdResponse.swResponse.getStatus().code() == ErrorCodes::CommandNotFound) { + "error"_attr = responseStatus); + if (responseStatus == ErrorCodes::CommandNotFound) { ++incompatibleRecipients; - } else if (ErrorCodes::isA( - cmdResponse.swResponse.getStatus().code())) { + } else if (ErrorCodes::isA(responseStatus.code())) { ++retriableFailures; } } @@ -1104,7 +1174,7 @@ boost::optional ShardingCatalogManager::findOneConfigDocumentInTxn( BSONObj ShardingCatalogManager::findOneConfigDocument(OperationContext* opCtx, const NamespaceString& nss, const BSONObj& query) { - invariant(nss.dbName().db() == DatabaseName::kConfig.db()); + invariant(nss.isConfigDB()); FindCommandRequest findCommand(nss); findCommand.setFilter(query); @@ -1117,11 +1187,10 @@ void ShardingCatalogManager::withTransactionAPI(OperationContext* opCtx, const NamespaceString& namespaceForInitialFind, txn_api::Callback callback) { auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor( - Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()); + auto& executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto txn = txn_api::SyncTransactionWithRetries( - opCtx, sleepInlineExecutor, nullptr /* resourceYielder */, inlineExecutor); + opCtx, executor, nullptr /* resourceYielder */, inlineExecutor); txn.run(opCtx, [innerCallback = std::move(callback), namespaceForInitialFind](const txn_api::TransactionClient& txnClient, @@ -1157,10 +1226,6 @@ void ShardingCatalogManager::withTransaction( AlternativeSessionRegion asr(opCtx); auto* const client = asr.opCtx()->getClient(); - { - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); - } asr.opCtx()->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); AuthorizationSession::get(client)->grantInternalAuthorization(client); TxnNumber txnNumber = 0; @@ -1257,6 +1322,42 @@ void ShardingCatalogManager::initializePlacementHistory(OperationContext* opCtx) * - incoming (or not yet materialized) DDLs will insert more recent placement information, * which will have the effect of "updating" the snapshot produced by this function. */ + Lock::ExclusiveLock lk(opCtx, _kPlacementHistoryInitializationLock); + + // Suspend the periodic cleanup job that runs in background. + ScopeGuard restartHistoryCleaner( + [opCtx]() { PlacementHistoryCleaner::get(opCtx)->resume(opCtx); }); + + PlacementHistoryCleaner::get(opCtx)->pause(); + + // Delete any existing document that has been already majority committed. + { + repl::ReadConcernArgs::get(opCtx) = + repl::ReadConcernArgs(repl::ReadConcernLevel::kMajorityReadConcern); + + write_ops::DeleteCommandRequest deleteOp( + NamespaceString::kConfigsvrPlacementHistoryNamespace); + deleteOp.setDeletes({[&] { + write_ops::DeleteOpEntry entry; + entry.setQ({}); + entry.setMulti(true); + return entry; + }()}); + + uassertStatusOK(_localConfigShard->runCommandWithFixedRetryAttempts( + opCtx, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + NamespaceString::kConfigsvrPlacementHistoryNamespace.db().toString(), + deleteOp.toBSON(BSON(WriteConcernOptions::kWriteConcernField + << ShardingCatalogClient::kLocalWriteConcern.toBSON())), + Shard::RetryPolicy::kNotIdempotent)); + + const auto& replClient = repl::ReplClientInfo::forClient(opCtx->getClient()); + auto awaitReplicationResult = repl::ReplicationCoordinator::get(opCtx)->awaitReplication( + opCtx, replClient.getLastOp(), ShardingCatalogClient::kMajorityWriteConcern); + } + + // Set the time of the initialization. Timestamp initializationTime; std::vector shardsAtInitializationTime; { @@ -1294,6 +1395,11 @@ void ShardingCatalogManager::initializePlacementHistory(OperationContext* opCtx) // internal client credentials). { auto altClient = opCtx->getServiceContext()->makeClient("initializePlacementHistory"); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*altClient.get()); + altClient.get()->setSystemOperationUnkillableByStepdown(lk); + } AuthorizationSession::get(altClient.get())->grantInternalAuthorization(altClient.get()); AlternativeClientRegion acr(altClient); auto executor = @@ -1361,7 +1467,7 @@ void ShardingCatalogManager::cleanUpPlacementHistory(OperationContext* opCtx, * }, * { * $match : { - * _id : { $ne : "kConfigsvrPlacementHistoryFcvMarkerNamespace"} + * _id : { $ne : "kConfigPlacementHistoryInitializationMarker"} * } * } * ]) @@ -1375,9 +1481,9 @@ void ShardingCatalogManager::cleanUpPlacementHistory(OperationContext* opCtx, << "$" + NamespacePlacementType::kNssFieldName << "mostRecentTimestamp" << BSON("$max" << "$" + NamespacePlacementType::kTimestampFieldName))); - pipeline.addStage( - BSON("_id" << BSON( - "$ne" << NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns()))); + pipeline.addStage(BSON( + "_id" << BSON("$ne" << NamespaceStringUtil::serialize( + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker)))); auto aggRequest = pipeline.buildAsAggregateCommandRequest(); @@ -1398,7 +1504,8 @@ void ShardingCatalogManager::cleanUpPlacementHistory(OperationContext* opCtx, const auto minTimeToPreserve = std::min(timeOfMostRecentDoc, earliestClusterTime); stmt.setQ(BSON(NamespacePlacementType::kNssFieldName - << nss.ns() << NamespacePlacementType::kTimestampFieldName + << NamespaceStringUtil::serialize(nss) + << NamespacePlacementType::kTimestampFieldName << BSON("$lt" << minTimeToPreserve))); stmt.setMulti(true); deleteStatements.emplace_back(std::move(stmt)); @@ -1430,22 +1537,4 @@ void ShardingCatalogManager::cleanUpPlacementHistory(OperationContext* opCtx, LOGV2_DEBUG(7068808, 2, "Cleaning up placement history - done deleting entries"); } -void ShardingCatalogManager::_performLocalNoopWriteWithWAllWriteConcern(OperationContext* opCtx, - StringData msg) { - tenant_migration_access_blocker::performNoopWrite(opCtx, msg); - - auto allMembersWriteConcern = - WriteConcernOptions(repl::ReplSetConfig::kConfigAllWriteConcernName, - WriteConcernOptions::SyncMode::NONE, - // Defaults to no timeout if none was set. - opCtx->getWriteConcern().wTimeout); - - const auto& replClient = repl::ReplClientInfo::forClient(opCtx->getClient()); - auto awaitReplicationResult = repl::ReplicationCoordinator::get(opCtx)->awaitReplication( - opCtx, replClient.getLastOp(), allMembersWriteConcern); - uassertStatusOKWithContext(awaitReplicationResult.status, - str::stream() << "Waiting for replication of noop with message: \"" - << msg << "\" failed"); -} - } // namespace mongo diff --git a/src/mongo/db/s/config/sharding_catalog_manager.h b/src/mongo/db/s/config/sharding_catalog_manager.h index 4f2bff1a33299..4a7b9b5c29b01 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager.h +++ b/src/mongo/db/s/config/sharding_catalog_manager.h @@ -29,15 +29,44 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/fetcher.h" +#include "mongo/client/read_preference.h" +#include "mongo/client/remote_command_targeter.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands/notify_sharding_event_gen.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_cache.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/transaction/transaction_api.h" +#include "mongo/db/write_concern_options.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/s/catalog/sharding_catalog_client.h" @@ -45,10 +74,14 @@ #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" +#include "mongo/s/database_version.h" #include "mongo/s/shard_key_pattern.h" #include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/util/functional.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -485,7 +518,7 @@ class ShardingCatalogManager { StatusWith addShard(OperationContext* opCtx, const std::string* shardProposedName, const ConnectionString& shardConnectionString, - bool isCatalogShard); + bool isConfigShard); /** * Inserts the config server shard identity document using a sentinel shard id. Requires the @@ -518,18 +551,6 @@ class ShardingCatalogManager { */ Status setFeatureCompatibilityVersionOnShards(OperationContext* opCtx, const BSONObj& cmdObj); - /* - * Rename collection metadata as part of a renameCollection operation. - * - * - Updates the FROM collection entry if the source collection is sharded - * - Removes the TO collection entry if the target collection was sharded - */ - void renameShardedMetadata(OperationContext* opCtx, - const NamespaceString& from, - const NamespaceString& to, - const WriteConcernOptions& writeConcern, - boost::optional optFromCollType); - // // For Diagnostics // @@ -565,19 +586,6 @@ class ShardingCatalogManager { bool force, const Timestamp& validAfter); - /** - * Creates config.settings (if needed) and adds a schema to the collection. - */ - Status upgradeConfigSettings(OperationContext* opCtx); - - /** - * Set `onCurrentShardSince` to the same value as `history[0].validAfter` for all config.chunks - * entries. - * Only called on the FCV upgrade - * TODO (SERVER-72791): Remove the method once FCV 7.0 becomes last-lts. - */ - void setOnCurrentShardSinceFieldOnChunks(OperationContext* opCtx); - /** * Returns a catalog client that will always run commands locally. Can only be used on a * config server node. @@ -666,7 +674,7 @@ class ShardingCatalogManager { std::shared_ptr targeter, const std::string* shardProposedName, const ConnectionString& connectionString, - bool isCatalogShard); + bool isConfigShard); /** * Drops the sessions collection on the specified host. @@ -735,6 +743,28 @@ class ShardingCatalogManager { */ void _setUserWriteBlockingStateOnNewShard(OperationContext* opCtx, RemoteCommandTargeter* targeter); + + using FetcherDocsCallbackFn = std::function& batch)>; + using FetcherStatusCallbackFn = std::function; + + /** + * Creates a Fetcher task for fetching documents in the given collection on the given shard. + * After the task is scheduled, applies 'processDocsCallback' to each fetched batch and + * 'processStatusCallback' to the fetch status. + */ + std::unique_ptr _createFetcher(OperationContext* opCtx, + std::shared_ptr targeter, + const NamespaceString& nss, + const repl::ReadConcernLevel& readConcernLevel, + FetcherDocsCallbackFn processDocsCallback, + FetcherStatusCallbackFn processStatusCallback); + + /** + * Gets the cluster time keys on the given shard and then saves them locally. + */ + Status _pullClusterTimeKeys(OperationContext* opCtx, + std::shared_ptr targeter); + /** * Given a vector of cluster parameters in disk format, sets them locally. */ @@ -827,13 +857,6 @@ class ShardingCatalogManager { const ChunkVersion& collPlacementVersion, const std::vector& splitPoints); - /** - * Performs a noop write locally on the current process and waits for all nodes to replicate it. - * - * TODO SERVER-75391: Remove. - */ - void _performLocalNoopWriteWithWAllWriteConcern(OperationContext* opCtx, StringData msg); - // The owning service context ServiceContext* const _serviceContext; @@ -890,6 +913,12 @@ class ShardingCatalogManager { * taking this. */ Lock::ResourceMutex _kZoneOpLock; + + /** + * Lock for serializing internal/external initialization requests of config.placementHistory. + * Regular DDL and chunk operations over the same collection may be run concurrently. + */ + Lock::ResourceMutex _kPlacementHistoryInitializationLock; }; } // namespace mongo diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp index bc446b2a279a4..4c552647a20b6 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_test.cpp @@ -28,44 +28,96 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/connection_string.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/client/read_preference.h" #include "mongo/client/remote_command_targeter_factory_mock.h" #include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/commands.h" +#include "mongo/db/client.h" +#include "mongo/db/commands/cluster_server_parameter_cmds_gen.h" #include "mongo/db/commands/set_cluster_parameter_invocation.h" #include "mongo/db/commands/set_feature_compatibility_version_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/logical_time.h" #include "mongo/db/multitenancy_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/add_shard_cmd_gen.h" #include "mongo/db/s/add_shard_util.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/transaction_coordinator_service.h" -#include "mongo/db/s/type_shard_identity.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/idl/cluster_server_parameter_common.h" -#include "mongo/idl/cluster_server_parameter_gen.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/time_proof_service.h" +#include "mongo/db/wire_version.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/metadata.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_changelog.h" -#include "mongo/s/catalog/type_config_version.h" #include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_identity_loader.h" #include "mongo/s/database_version.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -107,6 +159,7 @@ class AddShardTest : public ConfigServerTestFixture { LogicalSessionCache::set(getServiceContext(), std::make_unique()); TransactionCoordinatorService::get(operationContext()) ->onShardingInitialization(operationContext(), true); + WaitForMajorityService::get(getServiceContext()).startup(getServiceContext()); _skipShardingEventNotificationFP = globalFailPointRegistry().find("shardingCatalogManagerSkipNotifyClusterOnNewDatabases"); @@ -115,27 +168,28 @@ class AddShardTest : public ConfigServerTestFixture { void tearDown() override { _skipShardingEventNotificationFP->setMode(FailPoint::off); + WaitForMajorityService::get(getServiceContext()).shutDown(); TransactionCoordinatorService::get(operationContext())->onStepDown(); ConfigServerTestFixture::tearDown(); } /** - * addShard validates the host as a shard. It calls "isMaster" on the host to determine what + * addShard validates the host as a shard. It calls "hello" on the host to determine what * kind of host it is -- mongos, regular mongod, config mongod -- and whether the replica set - * details are correct. "isMasterResponse" defines the response of the "isMaster" request and + * details are correct. "helloResponse" defines the response of the "hello" request and * should be a command response BSONObj, or a failed Status. * * ShardingTestFixture::expectGetShards() should be called before this function, otherwise - * addShard will never reach the isMaster command -- a find query is called first. + * addShard will never reach the "hello" command -- a find query is called first. */ - void expectIsMaster(const HostAndPort& target, StatusWith isMasterResponse) { - onCommandForAddShard([&, target, isMasterResponse](const RemoteCommandRequest& request) { + void expectHello(const HostAndPort& target, StatusWith helloResponse) { + onCommandForAddShard([&, target, helloResponse](const RemoteCommandRequest& request) { ASSERT_EQ(request.target, target); ASSERT_EQ(request.dbname, "admin"); - ASSERT_BSONOBJ_EQ(request.cmdObj, BSON("isMaster" << 1)); + ASSERT_BSONOBJ_EQ(request.cmdObj, BSON("hello" << 1)); ASSERT_BSONOBJ_EQ(rpc::makeEmptyMetadata(), request.metadata); - return isMasterResponse; + return helloResponse; }); } @@ -158,7 +212,7 @@ class AddShardTest : public ConfigServerTestFixture { void expectCollectionDrop(const HostAndPort& target, const NamespaceString& nss) { onCommandForAddShard([&](const RemoteCommandRequest& request) { ASSERT_EQ(request.target, target); - ASSERT_EQ(request.dbname, nss.db()); + ASSERT_EQ(request.dbname, nss.db_forTest()); ASSERT_BSONOBJ_EQ(request.cmdObj, BSON("drop" << nss.coll() << "writeConcern" << BSON("w" @@ -228,7 +282,8 @@ class AddShardTest : public ConfigServerTestFixture { std::vector dbnamesOnTarget; for (const auto& tenantId : tenantsOnTarget) { dbnamesOnTarget.push_back( - DatabaseName(tenantId, DatabaseName::kConfig.db()).toStringWithTenantId()); + DatabaseName::createDatabaseName_forTest(tenantId, DatabaseName::kConfig.db()) + .toStringWithTenantId_forTest()); } if (gMultitenancySupport) { @@ -258,7 +313,8 @@ class AddShardTest : public ConfigServerTestFixture { ASSERT_EQ(results.size(), 1); ASSERT_EQ(results[0]["_id"].String(), "testStrClusterParameter"); ASSERT_EQ(results[0]["strData"].String(), - DatabaseName(tenantId, DatabaseName::kConfig.db()).toStringWithTenantId()); + DatabaseName::createDatabaseName_forTest(tenantId, DatabaseName::kConfig.db()) + .toStringWithTenantId_forTest()); } } @@ -313,7 +369,7 @@ class AddShardTest : public ConfigServerTestFixture { for (auto& param : params) { SetClusterParameter setClusterParameterRequest(param); setClusterParameterRequest.setDbName( - DatabaseName(tenantId, DatabaseName::kAdmin.db())); + DatabaseName::createDatabaseName_forTest(tenantId, DatabaseName::kAdmin.db())); DBDirectClient client(operationContext()); ClusterParameterDBClientService dbService(client); std::unique_ptr parameterService = @@ -374,22 +430,43 @@ class AddShardTest : public ConfigServerTestFixture { dbnamesOnTarget.erase(it); ASSERT_BSONOBJ_EQ(request.cmdObj, BSON("find" << NamespaceString::kClusterParametersNamespace.coll() - << "maxTimeMS" << 30000 << "readConcern" + << "maxTimeMS" << 60000 << "readConcern" << BSON("level" << "majority"))); auto cursorRes = - CursorResponse(NamespaceString::createNamespaceString_forTest( - DatabaseName(request.dbname), - NamespaceString::kClusterParametersNamespace.coll()), - 0, - {BSON("_id" - << "testStrClusterParameter" - << "strData" << request.dbname)}); + CursorResponse( + NamespaceString::createNamespaceString_forTest( + request.dbname, NamespaceString::kClusterParametersNamespace.coll()), + 0, + {BSON("_id" + << "testStrClusterParameter" + << "strData" << request.dbname)}); return cursorRes.toBSON(CursorResponse::ResponseType::InitialResponse); }); } } + void expectClusterTimeKeysPullRequest(const HostAndPort& target) { + onCommandForAddShard([&](const RemoteCommandRequest& request) { + ASSERT_EQ(request.target, target); + ASSERT_BSONOBJ_EQ(request.cmdObj, + BSON("find" << NamespaceString::kKeysCollectionNamespace.coll() + << "maxTimeMS" << 60000 << "readConcern" + << BSON("level" + << "local"))); + + KeysCollectionDocument key(1); + key.setKeysCollectionDocumentBase( + {"dummy", TimeProofService::generateRandomKey(), LogicalTime(Timestamp(105, 0))}); + auto cursorRes = CursorResponse( + NamespaceString::createNamespaceString_forTest( + request.dbname, NamespaceString::kKeysCollectionNamespace.coll()), + 0, + {key.toBSON()}); + return cursorRes.toBSON(CursorResponse::ResponseType::InitialResponse); + }); + } + /** * Waits for a request for the shardIdentity document to be upserted into a shard from the * config server on addShard. @@ -437,7 +514,7 @@ class AddShardTest : public ConfigServerTestFixture { ASSERT_EQUALS(expectedHost, request.target); // Check that the db name in the request matches the expected db name. - ASSERT_EQUALS(expectedNss.db(), request.dbname); + ASSERT_EQUALS(expectedNss.db_forTest(), request.dbname); const auto addShardOpMsgRequest = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj); @@ -491,7 +568,7 @@ class AddShardTest : public ConfigServerTestFixture { ASSERT_EQUALS(expectedHost, request.target); // Check that the db name in the request matches the expected db name. - ASSERT_EQUALS(expectedNss.db(), request.dbname); + ASSERT_EQUALS(expectedNss.db_forTest(), request.dbname); const auto opMsgRequest = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj); const auto updateOp = UpdateOp::parse(opMsgRequest); @@ -541,7 +618,6 @@ class AddShardTest : public ConfigServerTestFixture { operationContext(), expectedDB.getName(), repl::ReadConcernLevel::kMajorityReadConcern); ASSERT_EQUALS(expectedDB.getName(), foundDB.getName()); ASSERT_EQUALS(expectedDB.getPrimary(), foundDB.getPrimary()); - ASSERT_EQUALS(expectedDB.getSharded(), foundDB.getSharded()); } /** @@ -644,9 +720,9 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) { ASSERT_EQUALS(expectedShardName, shardName); }); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "maxWireVersion" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); // Get databases list from new shard expectListDatabases( @@ -660,6 +736,9 @@ TEST_F(AddShardTest, StandaloneBasicSuccess) { expectCollectionDrop( shardTarget, NamespaceString::createNamespaceString_forTest("config", "system.sessions")); + // The shard receives a find to pull all clusterTime keys from the new shard. + expectClusterTimeKeysPullRequest(shardTarget); + // The shard receives the _addShard command expectAddShardCmdReturnSuccess(shardTarget, expectedShardName); @@ -732,9 +811,9 @@ TEST_F(AddShardTest, StandaloneBasicPushSuccess) { ASSERT_EQUALS(expectedShardName, shardName); }); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "maxWireVersion" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); // Get databases list from new shard expectListDatabases( @@ -748,6 +827,9 @@ TEST_F(AddShardTest, StandaloneBasicPushSuccess) { expectCollectionDrop( shardTarget, NamespaceString::createNamespaceString_forTest("config", "system.sessions")); + // The shard receives a find to pull all clusterTime keys from the new shard. + expectClusterTimeKeysPullRequest(shardTarget); + // The shard receives the _addShard command expectAddShardCmdReturnSuccess(shardTarget, expectedShardName); @@ -814,9 +896,9 @@ TEST_F(AddShardTest, StandaloneMultitenantPullSuccess) { ASSERT_EQUALS(expectedShardName, shardName); }); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "maxWireVersion" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); // Get databases list from new shard expectListDatabases( @@ -830,6 +912,9 @@ TEST_F(AddShardTest, StandaloneMultitenantPullSuccess) { expectCollectionDrop( shardTarget, NamespaceString::createNamespaceString_forTest("config", "system.sessions")); + // The shard receives a find to pull all clusterTime keys from the new shard. + expectClusterTimeKeysPullRequest(shardTarget); + // The shard receives the _addShard command expectAddShardCmdReturnSuccess(shardTarget, expectedShardName); @@ -918,9 +1003,9 @@ TEST_F(AddShardTest, StandaloneMultitenantPushSuccess) { ASSERT_EQUALS(expectedShardName, shardName); }); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "maxWireVersion" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); // Get databases list from new shard expectListDatabases( @@ -934,6 +1019,9 @@ TEST_F(AddShardTest, StandaloneMultitenantPushSuccess) { expectCollectionDrop( shardTarget, NamespaceString::createNamespaceString_forTest("config", "system.sessions")); + // The shard receives a find to pull all clusterTime keys from the new shard. + expectClusterTimeKeysPullRequest(shardTarget); + // The shard receives the _addShard command expectAddShardCmdReturnSuccess(shardTarget, expectedShardName); @@ -1009,9 +1097,9 @@ TEST_F(AddShardTest, StandaloneGenerateName) { ASSERT_EQUALS(expectedShardName, shardName); }); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "maxWireVersion" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); // Get databases list from new shard expectListDatabases( @@ -1025,6 +1113,9 @@ TEST_F(AddShardTest, StandaloneGenerateName) { expectCollectionDrop( shardTarget, NamespaceString::createNamespaceString_forTest("config", "system.sessions")); + // The shard receives a find to pull all clusterTime keys from the new shard. + expectClusterTimeKeysPullRequest(shardTarget); + // The shard receives the _addShard command expectAddShardCmdReturnSuccess(shardTarget, expectedShardName); @@ -1113,7 +1204,7 @@ TEST_F(AddShardTest, UnreachableHost) { }); Status hostUnreachableStatus = Status(ErrorCodes::HostUnreachable, "host unreachable"); - expectIsMaster(shardTarget, hostUnreachableStatus); + expectHello(shardTarget, hostUnreachableStatus); future.timed_get(kLongFutureTimeout); } @@ -1138,9 +1229,9 @@ TEST_F(AddShardTest, AddMongosAsShard) { ASSERT_EQUALS(ErrorCodes::IllegalOperation, status); }); - expectIsMaster(shardTarget, - BSON("msg" - << "isdbgrid")); + expectHello(shardTarget, + BSON("msg" + << "isdbgrid")); future.timed_get(kLongFutureTimeout); } @@ -1166,10 +1257,10 @@ TEST_F(AddShardTest, AddReplicaSetShardAsStandalone) { ASSERT_STRING_CONTAINS(status.getStatus().reason(), "use replica set url format"); }); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "setName" << "myOtherSet" << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); future.timed_get(kLongFutureTimeout); } @@ -1196,9 +1287,9 @@ TEST_F(AddShardTest, AddStandaloneHostShardAsReplicaSet) { ASSERT_STRING_CONTAINS(status.getStatus().reason(), "host did not return a set name"); }); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "maxWireVersion" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); future.timed_get(kLongFutureTimeout); } @@ -1225,10 +1316,10 @@ TEST_F(AddShardTest, ReplicaSetMistmatchedReplicaSetName) { ASSERT_STRING_CONTAINS(status.getStatus().reason(), "does not match the actual set name"); }); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "setName" << "myOtherSet" << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); future.timed_get(kLongFutureTimeout); } @@ -1257,10 +1348,10 @@ TEST_F(AddShardTest, ShardIsCSRSConfigServer) { }); BSONObj commandResponse = - BSON("ok" << 1 << "ismaster" << true << "setName" + BSON("ok" << 1 << "isWritablePrimary" << true << "setName" << "config" << "configsvr" << true << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); future.timed_get(kLongFutureTimeout); } @@ -1290,11 +1381,11 @@ TEST_F(AddShardTest, ReplicaSetMissingHostsProvidedInSeedList) { BSONArrayBuilder hosts; hosts.append("host1:12345"); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "setName" << "mySet" << "hosts" << hosts.arr() << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); future.timed_get(kLongFutureTimeout); } @@ -1325,11 +1416,11 @@ TEST_F(AddShardTest, AddShardWithNameConfigFails) { BSONArrayBuilder hosts; hosts.append("host1:12345"); hosts.append("host2:12345"); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "setName" << "mySet" << "hosts" << hosts.arr() << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); future.timed_get(kLongFutureTimeout); } @@ -1371,11 +1462,11 @@ TEST_F(AddShardTest, ShardContainsExistingDatabase) { BSONArrayBuilder hosts; hosts.append("host1:12345"); hosts.append("host2:12345"); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "setName" << "mySet" << "hosts" << hosts.arr() << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); expectListDatabases(shardTarget, {BSON("name" << existingDB.getName())}); @@ -1414,11 +1505,11 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) { BSONArrayBuilder hosts; hosts.append("host1:12345"); hosts.append("host2:12345"); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "setName" << "mySet" << "hosts" << hosts.arr() << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); // Get databases list from new shard expectListDatabases(shardTarget, std::vector{BSON("name" << discoveredDB.getName())}); @@ -1426,6 +1517,9 @@ TEST_F(AddShardTest, SuccessfullyAddReplicaSet) { expectCollectionDrop( shardTarget, NamespaceString::createNamespaceString_forTest("config", "system.sessions")); + // The shard receives a find to pull all clusterTime keys from the new shard. + expectClusterTimeKeysPullRequest(shardTarget); + // The shard receives the _addShard command expectAddShardCmdReturnSuccess(shardTarget, expectedShardName); @@ -1486,16 +1580,17 @@ TEST_F(AddShardTest, SuccessfullyAddConfigShard) { BSONArrayBuilder hosts; hosts.append("host1:12345"); hosts.append("host2:12345"); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "setName" << "mySet" << "hosts" << hosts.arr() << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); // Get databases list from new shard expectListDatabases(shardTarget, std::vector{BSON("name" << discoveredDB.getName())}); - expectCollectionDrop(shardTarget, NamespaceString("config", "system.sessions")); + expectCollectionDrop( + shardTarget, NamespaceString::createNamespaceString_forTest("config", "system.sessions")); // Should not run _addShard command, touch user_writes_critical_sections, setParameter, setFCV @@ -1545,11 +1640,11 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) { BSONArrayBuilder hosts; hosts.append("host1:12345"); hosts.append("host2:12345"); - BSONObj commandResponse = BSON("ok" << 1 << "ismaster" << true << "setName" + BSONObj commandResponse = BSON("ok" << 1 << "isWritablePrimary" << true << "setName" << "mySet" << "hosts" << hosts.arr() << "maxWireVersion" << WireVersion::LATEST_WIRE_VERSION); - expectIsMaster(shardTarget, commandResponse); + expectHello(shardTarget, commandResponse); // Get databases list from new shard expectListDatabases(shardTarget, std::vector{BSON("name" << discoveredDB.getName())}); @@ -1557,6 +1652,9 @@ TEST_F(AddShardTest, ReplicaSetExtraHostsDiscovered) { expectCollectionDrop( shardTarget, NamespaceString::createNamespaceString_forTest("config", "system.sessions")); + // The shard receives a find to pull all clusterTime keys from the new shard. + expectClusterTimeKeysPullRequest(shardTarget); + // The shard receives the _addShard command expectAddShardCmdReturnSuccess(shardTarget, expectedShardName); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_to_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_to_zone_test.cpp index db5c2fb64f102..a2c48960be5a7 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_add_shard_to_zone_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_add_shard_to_zone_test.cpp @@ -27,14 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/client/read_preference.h" -#include "mongo/db/namespace_string.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/s/client/shard.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp index 377d17292ff04..897ea44bba774 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_assign_key_range_to_zone_test.cpp @@ -27,19 +27,46 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/read_preference.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" #include "mongo/s/client/shard.h" +#include "mongo/s/grid.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -83,7 +110,7 @@ class AssignKeyRangeToZoneTestFixture : public ConfigServerTestFixture { */ void assertNoZoneDocWithNamespace(NamespaceString ns) { auto findStatus = findOneOnConfigCollection( - operationContext(), TagsType::ConfigNS, BSON("ns" << ns.toString())); + operationContext(), TagsType::ConfigNS, BSON("ns" << ns.toString_forTest())); ASSERT_EQ(ErrorCodes::NoMatchingDocument, findStatus); } @@ -225,13 +252,14 @@ TEST_F(AssignKeyRangeToZoneTestFixture, RemoveZoneWithDollarPrefixedShardKeysSho // Manually insert a zone with illegal keys in order to bypass the checks performed by // assignKeyRangeToZone - BSONObj updateQuery(BSON("_id" << BSON(TagsType::ns(shardedNS().ns()) + BSONObj updateQuery(BSON("_id" << BSON(TagsType::ns(shardedNS().toString_forTest()) << TagsType::min(zoneWithDollarKeys.getMin())))); BSONObjBuilder updateBuilder; - updateBuilder.append( - "_id", BSON(TagsType::ns(shardedNS().ns()) << TagsType::min(zoneWithDollarKeys.getMin()))); - updateBuilder.append(TagsType::ns(), shardedNS().ns()); + updateBuilder.append("_id", + BSON(TagsType::ns(shardedNS().toString_forTest()) + << TagsType::min(zoneWithDollarKeys.getMin()))); + updateBuilder.append(TagsType::ns(), shardedNS().ns_forTest()); updateBuilder.append(TagsType::min(), zoneWithDollarKeys.getMin()); updateBuilder.append(TagsType::max(), zoneWithDollarKeys.getMax()); updateBuilder.append(TagsType::tag(), "TestZone"); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp index 0d937879cea1d..6b7d758d42348 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_bump_collection_version_and_change_metadata_test.cpp @@ -27,17 +27,45 @@ * it in the license file. */ +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/db/transaction/transaction_participant.h" -#include "mongo/logv2/log.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp index 068188b21cacc..417cb554ee0d7 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_chunk_operations.cpp @@ -27,44 +27,105 @@ * it in the license file. */ -#include "mongo/db/s/config/sharding_catalog_manager.h" - +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/client/connection_string.h" +#include "mongo/bson/util/builder.h" #include "mongo/client/read_preference.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/query/cursor_response.h" #include "mongo/db/query/distinct_command_gen.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/resource_yielder.h" +#include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/sharding_logging.h" #include "mongo/db/s/sharding_util.h" -#include "mongo/db/server_options.h" -#include "mongo/db/session/logical_session_cache.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" #include "mongo/db/snapshot_window_options_gen.h" #include "mongo/db/transaction/transaction_api.h" #include "mongo/db/transaction/transaction_participant_gen.h" -#include "mongo/db/vector_clock_mutable.h" +#include "mongo/db/transaction/transaction_participant_resource_yielder.h" +#include "mongo/db/vector_clock.h" +#include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/balancer_configuration.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" #include "mongo/s/catalog/type_namespace_placement_gen.h" +#include "mongo/s/catalog/type_tags.h" #include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/shard_key_pattern.h" #include "mongo/s/shard_util.h" -#include "mongo/s/sharding_feature_flags_gen.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/functional.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" MONGO_FAIL_POINT_DEFINE(overrideHistoryWindowInSecs); @@ -143,7 +204,7 @@ BSONObj buildCountChunksInRangeCommand(const UUID& collectionUUID, AggregateCommandRequest countRequest(ChunkType::ConfigNS); BSONObjBuilder builder; - builder.append("aggregate", ChunkType::ConfigNS.ns()); + builder.append("aggregate", NamespaceStringUtil::serialize(ChunkType::ConfigNS)); BSONObjBuilder queryBuilder; queryBuilder << ChunkType::collectionUUID << collectionUUID; @@ -218,7 +279,7 @@ StatusWith getMaxChunkVersionFromQueryResponse( const auto& chunksVector = queryResponse.getValue().docs; if (chunksVector.empty()) { return {ErrorCodes::Error(50577), - str::stream() << "Collection '" << coll.getNss().ns() + str::stream() << "Collection '" << coll.getNss().toStringForErrorMsg() << "' no longer either exists, is sharded, or has chunks"}; } @@ -233,21 +294,22 @@ StatusWith getMaxChunkVersionFromQueryResponse( */ StatusWith> getCollectionAndVersion( OperationContext* opCtx, Shard* configShard, const NamespaceString& nss) { - auto findCollResponse = - configShard->exhaustiveFindOnConfig(opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - repl::ReadConcernLevel::kLocalReadConcern, - CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << nss.ns()), - {}, - 1); + auto findCollResponse = configShard->exhaustiveFindOnConfig( + opCtx, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + repl::ReadConcernLevel::kLocalReadConcern, + CollectionType::ConfigNS, + BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss)), + {}, + 1); if (!findCollResponse.isOK()) { return findCollResponse.getStatus(); } if (findCollResponse.getValue().docs.empty()) { return {ErrorCodes::ConflictingOperationInProgress, - str::stream() << "Sharded collection '" << nss.ns() << "' no longer exists"}; + str::stream() << "Sharded collection '" << nss.toStringForErrorMsg() + << "' no longer exists"}; } const CollectionType coll(findCollResponse.getValue().docs[0]); @@ -302,14 +364,14 @@ void bumpCollectionMinorVersion(OperationContext* opCtx, Shard* configShard, const NamespaceString& nss, TxnNumber txnNumber) { - const auto findCollResponse = uassertStatusOK( - configShard->exhaustiveFindOnConfig(opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - repl::ReadConcernLevel::kLocalReadConcern, - CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << nss.ns()), - {}, - 1)); + const auto findCollResponse = uassertStatusOK(configShard->exhaustiveFindOnConfig( + opCtx, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + repl::ReadConcernLevel::kLocalReadConcern, + CollectionType::ConfigNS, + BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss)), + {}, + 1)); uassert( ErrorCodes::NamespaceNotFound, "Collection does not exist", !findCollResponse.docs.empty()); const CollectionType coll(findCollResponse.docs[0]); @@ -325,8 +387,8 @@ void bumpCollectionMinorVersion(OperationContext* opCtx, 1 /* limit */)); uassert(ErrorCodes::IncompatibleShardingMetadata, - str::stream() << "Tried to find max chunk version for collection '" << nss.ns() - << ", but found no chunks", + str::stream() << "Tried to find max chunk version for collection '" + << nss.toStringForErrorMsg() << ", but found no chunks", !findChunkResponse.docs.empty()); const auto newestChunk = uassertStatusOK(ChunkType::parseFromConfigBSON( @@ -400,7 +462,7 @@ void logMergeToChangelog(OperationContext* opCtx, ShardingLogging::get(opCtx)->logChange(opCtx, "merge", - nss.ns(), + NamespaceStringUtil::serialize(nss), logDetail.obj(), WriteConcernOptions(), std::move(configShard), @@ -456,9 +518,8 @@ void mergeAllChunksOnShardInTransaction(OperationContext* opCtx, auto executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); - txn_api::SyncTransactionWithRetries txn(opCtx, sleepInlineExecutor, nullptr, inlineExecutor); + txn_api::SyncTransactionWithRetries txn(opCtx, executor, nullptr, inlineExecutor); txn.run(opCtx, updateChunksFn); } @@ -674,9 +735,8 @@ ShardingCatalogManager::_splitChunkInTransaction(OperationContext* opCtx, auto executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); - txn_api::SyncTransactionWithRetries txn(opCtx, sleepInlineExecutor, nullptr, inlineExecutor); + txn_api::SyncTransactionWithRetries txn(opCtx, executor, nullptr, inlineExecutor); // TODO: SERVER-72431 Make split chunk commit idempotent, with that we won't need anymore the // transaction precondition and we will be able to remove the try/catch on the transaction run @@ -727,7 +787,8 @@ ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx, (requestTimestamp && coll.getTimestamp() != requestTimestamp)) { return {ErrorCodes::StaleEpoch, str::stream() << "splitChunk cannot split chunk " << range.toString() - << ". Epoch of collection '" << nss.ns() << "' has changed." + << ". Epoch of collection '" << nss.toStringForErrorMsg() + << "' has changed." << " Current epoch: " << coll.getEpoch() << ", cmd epoch: " << requestEpoch}; } @@ -764,7 +825,7 @@ ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx, ShardingLogging::get(opCtx)->logChange(opCtx, "split", - nss.ns(), + NamespaceStringUtil::serialize(nss), logDetail.obj(), WriteConcernOptions(), _localConfigShard, @@ -786,7 +847,7 @@ ShardingCatalogManager::commitChunkSplit(OperationContext* opCtx, const auto status = ShardingLogging::get(opCtx)->logChangeChecked(opCtx, "multi-split", - nss.ns(), + NamespaceStringUtil::serialize(nss), chunkDetail.obj(), WriteConcernOptions(), _localConfigShard, @@ -901,9 +962,8 @@ void ShardingCatalogManager::_mergeChunksInTransaction( auto executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); - txn_api::SyncTransactionWithRetries txn(opCtx, sleepInlineExecutor, nullptr, inlineExecutor); + txn_api::SyncTransactionWithRetries txn(opCtx, executor, nullptr, inlineExecutor); txn.run(opCtx, updateChunksFn); } @@ -1071,7 +1131,8 @@ ShardingCatalogManager::commitMergeAllChunksOnShard(OperationContext* opCtx, // 1. Retrieve the collection entry and the initial version. const auto [coll, originalVersion] = uassertStatusOK(getCollectionAndVersion(opCtx, _localConfigShard.get(), nss)); - auto& collUuid = coll.getUuid(); + const auto& collUuid = coll.getUuid(); + const auto& keyPattern = coll.getKeyPattern(); auto newVersion = originalVersion; // 2. Retrieve the list of mergeable chunks belonging to the requested shard/collection. @@ -1080,6 +1141,8 @@ ShardingCatalogManager::commitMergeAllChunksOnShard(OperationContext* opCtx, // - The last migration occurred before the current history window const auto oldestTimestampSupportedForHistory = getOldestTimestampSupportedForSnapshotHistory(opCtx); + + // TODO SERVER-78701 scan cursor rather than getting the whole vector of chunks const auto chunksBelongingToShard = uassertStatusOK(_localConfigShard->exhaustiveFindOnConfig( opCtx, @@ -1123,6 +1186,49 @@ ShardingCatalogManager::commitMergeAllChunksOnShard(OperationContext* opCtx, rangeOnCurrentShardSince = minValidTimestamp; }; + DBDirectClient zonesClient{opCtx}; + FindCommandRequest zonesFindRequest{TagsType::ConfigNS}; + zonesFindRequest.setSort(BSON(TagsType::min << 1)); + zonesFindRequest.setFilter(BSON(TagsType::ns(NamespaceStringUtil::serialize(nss)))); + zonesFindRequest.setProjection(BSON(TagsType::min << 1 << TagsType::max << 1)); + const auto zonesCursor{zonesClient.find(std::move(zonesFindRequest))}; + + // Initialize bounds lower than any zone [(), Minkey) so that it can be later advanced + boost::optional currentZone = ChunkRange(BSONObj(), keyPattern.globalMin()); + + auto advanceZoneIfNeeded = [&](const BSONObj& advanceZoneUpToThisBound) { + // This lambda advances zones taking into account the whole shard key space, + // also considering the "no-zone" as a zone itself. + // + // Example: + // - Zones set by the user: [1, 10), [20, 30), [30, 40) + // - Real zones: [Minkey, 1), [1, 10), [10, 20), [20, 30), [30, 40), [40, MaxKey) + // + // Returns a bool indicating whether the zone has changed or not. + bool zoneChanged = false; + while (currentZone && + advanceZoneUpToThisBound.woCompare(currentZone->getMin()) > 0 && + advanceZoneUpToThisBound.woCompare(currentZone->getMax()) > 0) { + zoneChanged = true; + if (zonesCursor->more()) { + const auto nextZone = zonesCursor->peekFirst(); + const auto nextZoneMin = keyPattern.extendRangeBound( + nextZone.getObjectField(TagsType::min()), false); + if (nextZoneMin.woCompare(currentZone->getMax()) > 0) { + currentZone = ChunkRange(currentZone->getMax(), nextZoneMin); + } else { + const auto nextZoneMax = keyPattern.extendRangeBound( + nextZone.getObjectField(TagsType::max()), false); + currentZone = ChunkRange(nextZoneMin, nextZoneMax); + zonesCursor->next(); // Advance cursor + } + } else { + currentZone = boost::none; + } + } + return zoneChanged; + }; + for (const auto& chunkDoc : chunksBelongingToShard) { const auto& chunkMin = chunkDoc.getObjectField(ChunkType::min()); const auto& chunkMax = chunkDoc.getObjectField(ChunkType::max()); @@ -1133,7 +1239,8 @@ ShardingCatalogManager::commitMergeAllChunksOnShard(OperationContext* opCtx, return t; }(); - if (rangeMax.woCompare(chunkMin) != 0) { + bool zoneChanged = advanceZoneIfNeeded(chunkMax); + if (rangeMax.woCompare(chunkMin) != 0 || zoneChanged) { processRange(); } @@ -1265,7 +1372,7 @@ ShardingCatalogManager::commitChunkMigration(OperationContext* opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, repl::ReadConcernLevel::kLocalReadConcern, CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << nss.ns()), + BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss)), {}, 1)); uassert(ErrorCodes::ConflictingOperationInProgress, @@ -1288,8 +1395,8 @@ ShardingCatalogManager::commitChunkMigration(OperationContext* opCtx, BSON(ChunkType::lastmod << -1), 1)); uassert(ErrorCodes::IncompatibleShardingMetadata, - str::stream() << "Tried to find max chunk version for collection '" << nss.ns() - << ", but found no chunks", + str::stream() << "Tried to find max chunk version for collection '" + << nss.toStringForErrorMsg() << ", but found no chunks", !findResponse.docs.empty()); const auto chunk = uassertStatusOK( @@ -1309,7 +1416,7 @@ ShardingCatalogManager::commitChunkMigration(OperationContext* opCtx, if (currentCollectionPlacementVersion.epoch() != collectionEpoch || currentCollectionPlacementVersion.getTimestamp() != collectionTimestamp) { return {ErrorCodes::StaleEpoch, - str::stream() << "The epoch of collection '" << nss.ns() + str::stream() << "The epoch of collection '" << nss.toStringForErrorMsg() << "' has changed since the migration began. The config server's " "collection placement version epoch is now '" << currentCollectionPlacementVersion.epoch().toString() @@ -1352,8 +1459,8 @@ ShardingCatalogManager::commitChunkMigration(OperationContext* opCtx, uassert(4914702, str::stream() << "Migrated chunk " << migratedChunk.toString() - << " from ns: " << nss.ns() << " not owned by donor " << fromShard - << " neither by recipient " << toShard, + << " from ns: " << nss.toStringForErrorMsg() << " not owned by donor " + << fromShard << " neither by recipient " << toShard, currentChunk.getShard() == fromShard); if (migratedChunk.getVersion().isNotComparableWith(currentChunk.getVersion()) || @@ -1408,9 +1515,9 @@ ShardingCatalogManager::commitChunkMigration(OperationContext* opCtx, if (!newHistory.empty() && newHistory.front().getValidAfter() >= validAfter) { return {ErrorCodes::IncompatibleShardingMetadata, - str::stream() << "The chunk history for chunk with namespace " << nss.ns() - << " and min key " << migratedChunk.getMin() - << " is corrupted. The last validAfter " + str::stream() << "The chunk history for chunk with namespace " + << nss.toStringForErrorMsg() << " and min key " + << migratedChunk.getMin() << " is corrupted. The last validAfter " << newHistory.back().getValidAfter().toString() << " is greater or equal to the new validAfter " << validAfter.toString()}; @@ -1566,7 +1673,7 @@ void ShardingCatalogManager::upgradeChunksHistory(OperationContext* opCtx, uassertStatusOK(response.toStatus()); uassert(ErrorCodes::Error(5760502), - str::stream() << "No chunks found for collection " << nss.ns(), + str::stream() << "No chunks found for collection " << nss.toStringForErrorMsg(), response.getN() > 0); } @@ -1581,7 +1688,7 @@ void ShardingCatalogManager::upgradeChunksHistory(OperationContext* opCtx, BSONObj(), boost::none)); uassert(ErrorCodes::Error(5760503), - str::stream() << "No chunks found for collection " << nss.ns(), + str::stream() << "No chunks found for collection " << nss.toStringForErrorMsg(), !findChunksResponse.docs.empty()); return std::move(findChunksResponse.docs); }(); @@ -1598,7 +1705,8 @@ void ShardingCatalogManager::upgradeChunksHistory(OperationContext* opCtx, if (historyIsAt40) { uassert( ErrorCodes::Error(5760504), - str::stream() << "Chunk " << upgradeChunk.getName() << " in collection " << nss.ns() + str::stream() << "Chunk " << upgradeChunk.getName() << " in collection " + << nss.toStringForErrorMsg() << " indicates that it has been upgraded to version 4.0, but is " "missing the history field. This indicates a corrupted routing " "table and requires a manual intervention to be fixed.", @@ -1643,64 +1751,11 @@ void ShardingCatalogManager::upgradeChunksHistory(OperationContext* opCtx, opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, "admin", - BSON("_flushRoutingTableCacheUpdates" << nss.ns()), + BSON("_flushRoutingTableCacheUpdates" << NamespaceStringUtil::serialize(nss)), Shard::RetryPolicy::kIdempotent))); } } -void ShardingCatalogManager::setOnCurrentShardSinceFieldOnChunks(OperationContext* opCtx) { - { - // Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications - Lock::ExclusiveLock lk(opCtx, _kChunkOpLock); - - DBDirectClient dbClient(opCtx); - - // 1st match only chunks with non empty history - BSONObj query = BSON("history.0" << BSON("$exists" << true)); - - // 2nd use the $set aggregation stage pipeline to set `onCurrentShardSince` to the same - // value as the `validAfter` field on the first element of `history` array - // [ - // { - // $set: { - // onCurrentShardSince: { - // $getField: { field: "validAfter", input: { $first : "$history" } } - // } - // } - // ] - - BSONObj update = - BSON("$set" << BSON( - ChunkType::onCurrentShardSince() - << BSON("$getField" - << BSON("field" << ChunkHistoryBase::kValidAfterFieldName << "input" - << BSON("$first" << ("$" + ChunkType::history())))))); - - auto response = dbClient.runCommand([&] { - write_ops::UpdateCommandRequest updateOp(ChunkType::ConfigNS); - - updateOp.setUpdates({[&] { - // Sending a vector as an update to make sure we use an aggregation pipeline - write_ops::UpdateOpEntry entry; - entry.setQ(query); - entry.setU(std::vector{update.getOwned()}); - entry.setMulti(true); - entry.setUpsert(false); - return entry; - }()}); - updateOp.getWriteCommandRequestBase().setOrdered(false); - return updateOp.serialize({}); - }()); - - uassertStatusOK(getStatusFromWriteCommandReply(response->getCommandReply())); - } - - const auto clientOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); - WriteConcernResult unusedWCResult; - uassertStatusOK(waitForWriteConcern( - opCtx, clientOpTime, ShardingCatalogClient::kMajorityWriteConcern, &unusedWCResult)); -} - void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx, const NamespaceString& nss, const OID& collectionEpoch, @@ -1718,7 +1773,7 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, repl::ReadConcernLevel::kLocalReadConcern, CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << nss.ns()), + BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss)), {}, 1)); uassert(ErrorCodes::ConflictingOperationInProgress, @@ -1742,7 +1797,7 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx, const auto targetChunkVector = std::move(targetChunkResult.docs); uassert(51262, str::stream() << "Unable to locate chunk " << chunk.toString() - << " from ns: " << nss.ns(), + << " from ns: " << nss.toStringForErrorMsg(), !targetChunkVector.empty()); const auto targetChunk = uassertStatusOK(ChunkType::parseFromConfigBSON( @@ -1766,8 +1821,8 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx, const auto chunksVector = std::move(findResponse.docs); uassert(ErrorCodes::IncompatibleShardingMetadata, - str::stream() << "Tried to find max chunk version for collection '" << nss.ns() - << ", but found no chunks", + str::stream() << "Tried to find max chunk version for collection '" + << nss.toStringForErrorMsg() << ", but found no chunks", !chunksVector.empty()); const auto highestVersionChunk = uassertStatusOK( @@ -1780,7 +1835,7 @@ void ShardingCatalogManager::clearJumboFlag(OperationContext* opCtx, // or had its shard key refined since the migration began, unbeknown to the shard when the // command was sent. uassert(ErrorCodes::StaleEpoch, - str::stream() << "The epoch of collection '" << nss.ns() + str::stream() << "The epoch of collection '" << nss.toStringForErrorMsg() << "' has changed since the migration began. The config server's " "collection placement version epoch is now '" << currentCollectionPlacementVersion.epoch().toString() @@ -2100,7 +2155,7 @@ void ShardingCatalogManager::splitOrMarkJumbo(OperationContext* opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, repl::ReadConcernLevel::kLocalReadConcern, CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << nss.ns()), + BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss)), {}, 1)); uassert(ErrorCodes::ConflictingOperationInProgress, @@ -2152,83 +2207,114 @@ void ShardingCatalogManager::setAllowMigrationsAndBumpOneChunk( const NamespaceString& nss, const boost::optional& collectionUUID, bool allowMigrations) { - std::set cmShardIds; - { - // Mark opCtx as interruptible to ensure that all reads and writes to the metadata - // collections under the exclusive _kChunkOpLock happen on the same term. - opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); + // Mark opCtx as interruptible to ensure that all reads and writes to the metadata + // collections under the exclusive _kChunkOpLock happen on the same term. + opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); - // Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and - // migrations - Lock::ExclusiveLock lk(opCtx, _kChunkOpLock); + // Take _kChunkOpLock in exclusive mode to prevent concurrent chunk splits, merges, and + // migrations + Lock::ExclusiveLock lk(opCtx, _kChunkOpLock); - const auto cm = - uassertStatusOK(Grid::get(opCtx) - ->catalogCache() - ->getShardedCollectionRoutingInfoWithPlacementRefresh(opCtx, nss)) - .cm; + const auto cm = + uassertStatusOK( + Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithPlacementRefresh( + opCtx, nss)) + .cm; + + uassert(ErrorCodes::InvalidUUID, + str::stream() << "Collection uuid " << collectionUUID + << " in the request does not match the current uuid " << cm.getUUID() + << " for ns " << nss.toStringForErrorMsg(), + !collectionUUID || collectionUUID == cm.getUUID()); + + auto updateCollectionAndChunkFn = [allowMigrations, &nss, &collectionUUID]( + const txn_api::TransactionClient& txnClient, + ExecutorPtr txnExec) { + write_ops::UpdateCommandRequest updateCollOp(CollectionType::ConfigNS); + updateCollOp.setUpdates([&] { + write_ops::UpdateOpEntry entry; + const auto update = allowMigrations + ? BSON("$unset" << BSON(CollectionType::kAllowMigrationsFieldName << "")) + : BSON("$set" << BSON(CollectionType::kAllowMigrationsFieldName << false)); + + BSONObj query = + BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss)); + if (collectionUUID) { + query = query.addFields(BSON(CollectionType::kUuidFieldName << *collectionUUID)); + } + entry.setQ(query); + entry.setU(update); + entry.setMulti(false); + return std::vector{entry}; + }()); - uassert(ErrorCodes::InvalidUUID, - str::stream() << "Collection uuid " << collectionUUID - << " in the request does not match the current uuid " << cm.getUUID() - << " for ns " << nss, - !collectionUUID || collectionUUID == cm.getUUID()); + auto updateCollResponse = txnClient.runCRUDOpSync(updateCollOp, {0}); + uassertStatusOK(updateCollResponse.toStatus()); + uassert(ErrorCodes::ConflictingOperationInProgress, + str::stream() << "Expected to match one doc but matched " + << updateCollResponse.getN(), + updateCollResponse.getN() == 1); + + FindCommandRequest collQuery{CollectionType::ConfigNS}; + collQuery.setFilter( + BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss))); + collQuery.setLimit(1); + + const auto findCollResponse = txnClient.exhaustiveFindSync(collQuery); + uassert(ErrorCodes::NamespaceNotFound, + "Collection does not exist", + findCollResponse.size() == 1); + const CollectionType coll(findCollResponse[0]); + + // Find the newest chunk + FindCommandRequest chunkQuery{ChunkType::ConfigNS}; + chunkQuery.setFilter(BSON(ChunkType::collectionUUID << coll.getUuid())); + chunkQuery.setSort(BSON(ChunkType::lastmod << -1)); + chunkQuery.setLimit(1); + const auto findChunkResponse = txnClient.exhaustiveFindSync(chunkQuery); + + uassert(ErrorCodes::IncompatibleShardingMetadata, + str::stream() << "Tried to find max chunk version for collection " + << nss.toStringForErrorMsg() << ", but found no chunks", + findChunkResponse.size() == 1); + + const auto newestChunk = uassertStatusOK(ChunkType::parseFromConfigBSON( + findChunkResponse[0], coll.getEpoch(), coll.getTimestamp())); + const auto targetVersion = [&]() { + ChunkVersion version = newestChunk.getVersion(); + version.incMinor(); + return version; + }(); - cm.getAllShardIds(&cmShardIds); - withTransaction( - opCtx, - CollectionType::ConfigNS, - [this, allowMigrations, &nss, &collectionUUID](OperationContext* opCtx, - TxnNumber txnNumber) { - // Update the 'allowMigrations' field. An unset 'allowMigrations' field implies - // 'true'. To ease backwards compatibility we omit 'allowMigrations' instead of - // setting it explicitly to 'true'. - const auto update = allowMigrations - ? BSON("$unset" << BSON(CollectionType::kAllowMigrationsFieldName << "")) - : BSON("$set" << BSON(CollectionType::kAllowMigrationsFieldName << false)); - - BSONObj query = BSON(CollectionType::kNssFieldName << nss.ns()); - if (collectionUUID) { - query = - query.addFields(BSON(CollectionType::kUuidFieldName << *collectionUUID)); - } + write_ops::UpdateCommandRequest updateChunkOp(ChunkType::ConfigNS); + BSONObjBuilder updateBuilder; + BSONObjBuilder updateVersionClause(updateBuilder.subobjStart("$set")); + updateVersionClause.appendTimestamp(ChunkType::lastmod(), targetVersion.toLong()); + updateVersionClause.doneFast(); + const auto update = updateBuilder.obj(); + updateChunkOp.setUpdates([&] { + write_ops::UpdateOpEntry entry; + entry.setQ(BSON(ChunkType::name << newestChunk.getName())); + entry.setU(update); + entry.setMulti(false); + entry.setUpsert(false); + return std::vector{entry}; + }()); + auto updateChunkResponse = txnClient.runCRUDOpSync(updateChunkOp, {1}); + uassertStatusOK(updateChunkResponse.toStatus()); + LOGV2_DEBUG( + 7353900, 1, "Finished all transaction operations in setAllowMigrations command"); - const auto res = writeToConfigDocumentInTxn( - opCtx, - CollectionType::ConfigNS, - BatchedCommandRequest::buildUpdateOp(CollectionType::ConfigNS, - query, - update /* update */, - false /* upsert */, - false /* multi */), - txnNumber); - const auto numDocsModified = UpdateOp::parseResponse(res).getN(); - uassert(ErrorCodes::ConflictingOperationInProgress, - str::stream() << "Expected to match one doc for query " << query - << " but matched " << numDocsModified, - numDocsModified == 1); - - bumpCollectionMinorVersion(opCtx, _localConfigShard.get(), nss, txnNumber); - }); - - // From now on migrations are not allowed anymore, so it is not possible that new shards - // will own chunks for this collection. - } + return SemiFuture::makeReady(); + }; + auto executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); + auto inlineExecutor = std::make_shared(); - // Trigger a refresh on each shard containing chunks for this collection. - const auto executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); - // TODO (SERVER-74477): Remove cmShardIds and always send the refresh to all shards. - if (feature_flags::gAllowMigrationsRefreshToAll.isEnabled( - serverGlobalParams.featureCompatibility)) { - const auto allShardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx); - sharding_util::tellShardsToRefreshCollection(opCtx, allShardIds, nss, executor); - } else { - sharding_util::tellShardsToRefreshCollection(opCtx, - {std::make_move_iterator(cmShardIds.begin()), - std::make_move_iterator(cmShardIds.end())}, - nss, - executor); - } + txn_api::SyncTransactionWithRetries txn(opCtx, executor, nullptr, inlineExecutor); + + txn.run(opCtx, updateCollectionAndChunkFn); + // From now on migrations are not allowed anymore, so it is not possible that new shards + // will own chunks for this collection. } void ShardingCatalogManager::bumpCollectionMinorVersionInTxn(OperationContext* opCtx, @@ -2302,14 +2388,17 @@ void ShardingCatalogManager::_commitChunkMigrationInTransaction( std::shared_ptr> splitChunks, std::shared_ptr controlChunk, const ShardId& donorShardId) { - // Verify the placement info for collectionUUID needs to be updated because the donor is losing - // its last chunk for the namespace. - const auto removeDonorFromPlacementHistory = !controlChunk && splitChunks->empty(); - - // Verify the placement info for collectionUUID needs to be updated because the recipient is - // acquiring its first chunk for the namespace. - const auto addRecipientToPlacementHistory = [&] { - const auto chunkQuery = + + const auto placementChangeInParentColl = [&] { + // Check if the donor will stop owning data of the parent collection once the migration + // is committed. + if (!controlChunk && splitChunks->empty()) { + return true; + } + + // Check if the recipient isn't owning data of the parent collection prior to the + // migration commit. + const auto query = BSON(ChunkType::collectionUUID << migratedChunk->getCollectionUUID() << ChunkType::shard << migratedChunk->getShard()); auto findResponse = uassertStatusOK(_localConfigShard->exhaustiveFindOnConfig( @@ -2317,10 +2406,14 @@ void ShardingCatalogManager::_commitChunkMigrationInTransaction( ReadPreferenceSetting{ReadPreference::PrimaryOnly}, repl::ReadConcernLevel::kLocalReadConcern, ChunkType::ConfigNS, - chunkQuery, + query, BSONObj(), 1 /* limit */)); - return findResponse.docs.empty(); + if (findResponse.docs.empty()) { + return true; + } + + return false; }(); const auto configChunksUpdateRequest = [&migratedChunk, &splitChunks, &controlChunk] { @@ -2362,9 +2455,8 @@ void ShardingCatalogManager::_commitChunkMigrationInTransaction( recipientShardId = migratedChunk->getShard(), migrationCommitTime = migratedChunk->getHistory().front().getValidAfter(), configChunksUpdateRequest = std::move(configChunksUpdateRequest), - removeDonorFromPlacementHistory, - addRecipientToPlacementHistory](const txn_api::TransactionClient& txnClient, - ExecutorPtr txnExec) -> SemiFuture { + placementChangeInParentColl](const txn_api::TransactionClient& txnClient, + ExecutorPtr txnExec) -> SemiFuture { const long long nChunksToUpdate = configChunksUpdateRequest.getUpdates().size(); auto updateConfigChunksFuture = @@ -2379,133 +2471,49 @@ void ShardingCatalogManager::_commitChunkMigrationInTransaction( updateResponse.getN() == nChunksToUpdate); }); - if (!(removeDonorFromPlacementHistory || addRecipientToPlacementHistory)) { + if (!placementChangeInParentColl) { // End the transaction here. return std::move(updateConfigChunksFuture).semi(); } - // The main method to store placement info as part of the transaction, given a valid - // descriptor. - auto persistPlacementInfoSubchain = [txnExec, - &txnClient](NamespacePlacementType&& placementInfo) { - write_ops::InsertCommandRequest insertPlacementEntry( - NamespaceString::kConfigsvrPlacementHistoryNamespace, {placementInfo.toBSON()}); - return txnClient.runCRUDOp(insertPlacementEntry, {}) - .thenRunOn(txnExec) - .then([](const BatchedCommandResponse& insertPlacementEntryResponse) { - uassertStatusOK(insertPlacementEntryResponse.toStatus()); - }) - .semi(); - }; - - // Obtain a valid placement descriptor from config.chunks and then store it as part of the - // transaction. - auto generateAndPersistPlacementInfoSubchain = - [txnExec, &txnClient](const NamespaceString& nss, - const UUID& collUuid, - const Timestamp& migrationCommitTime) { - // Compose the query - equivalent to - // 'configDb.chunks.distinct("shard", {uuid:collectionUuid})' - DistinctCommandRequest distinctRequest(ChunkType::ConfigNS); - distinctRequest.setKey(ChunkType::shard.name()); - distinctRequest.setQuery(BSON(ChunkType::collectionUUID.name() << collUuid)); - return txnClient.runCommand(DatabaseName::kConfig, distinctRequest.toBSON({})) - .thenRunOn(txnExec) - .then([=, &txnClient](BSONObj reply) { - uassertStatusOK(getStatusFromWriteCommandReply(reply)); - std::vector shardIds; - for (const auto& valueElement : reply.getField("values").Array()) { - shardIds.emplace_back(valueElement.String()); - } - - NamespacePlacementType placementInfo( - nss, migrationCommitTime, std::move(shardIds)); - placementInfo.setUuid(collUuid); - write_ops::InsertCommandRequest insertPlacementEntry( - NamespaceString::kConfigsvrPlacementHistoryNamespace, - {placementInfo.toBSON()}); - - return txnClient.runCRUDOp(insertPlacementEntry, {}); - }) - .thenRunOn(txnExec) - .then([](const BatchedCommandResponse& insertPlacementEntryResponse) { - uassertStatusOK(insertPlacementEntryResponse.toStatus()); - }) - .semi(); - }; - - // Extend the transaction to also upsert the placement information that matches the - // migration commit. + // Extend the transaction to also persist the collection placement change. return std::move(updateConfigChunksFuture) .thenRunOn(txnExec) .then([&] { - // Retrieve the previous placement entry - it will be used as a base for the next - // update. - FindCommandRequest placementInfoQuery{ - NamespaceString::kConfigsvrPlacementHistoryNamespace}; - placementInfoQuery.setFilter(BSON(NamespacePlacementType::kNssFieldName - << nss.toString() - << NamespacePlacementType::kTimestampFieldName - << BSON("$lte" << migrationCommitTime))); - placementInfoQuery.setSort(BSON(NamespacePlacementType::kTimestampFieldName << -1)); - placementInfoQuery.setLimit(1); - return txnClient.exhaustiveFind(placementInfoQuery); + // Use the updated content of config.chunks to build the collection placement + // metadata. + // The request is equivalent to "configDb.chunks.distinct('shard',{uuid:collUuid})". + DistinctCommandRequest distinctRequest(ChunkType::ConfigNS); + distinctRequest.setKey(ChunkType::shard.name()); + distinctRequest.setQuery(BSON(ChunkType::collectionUUID.name() << collUuid)); + return txnClient.runCommand(DatabaseName::kConfig, distinctRequest.toBSON({})); }) .thenRunOn(txnExec) - .then([&, - persistPlacementInfo = std::move(persistPlacementInfoSubchain), - generateAndPersistPlacementInfo = - std::move(generateAndPersistPlacementInfoSubchain)]( - const std::vector& queryResponse) { - tassert(6892800, - str::stream() - << "Unexpected number of placement entries retrieved" << nss.toString(), - queryResponse.size() <= 1); - - if (queryResponse.size() == 0) { - // Historical placement data may not be available due to an FCV transition - - // invoke the more expensive fallback method. - return generateAndPersistPlacementInfo(nss, collUuid, migrationCommitTime); + .then([=, &txnClient](BSONObj reply) { + uassertStatusOK(getStatusFromWriteCommandReply(reply)); + std::vector shardIds; + for (const auto& valueElement : reply.getField("values").Array()) { + shardIds.emplace_back(valueElement.String()); } - // Leverage the most recent placement info to build the new version. - auto placementInfo = NamespacePlacementType::parse( - IDLParserContext("CommitMoveChunk"), queryResponse.front()); - placementInfo.setTimestamp(migrationCommitTime); - - const auto& originalShardList = placementInfo.getShards(); - std::vector updatedShardList; - updatedShardList.reserve(originalShardList.size() + 1); - if (addRecipientToPlacementHistory) { - updatedShardList.push_back(recipientShardId); - } + NamespacePlacementType placementInfo(nss, migrationCommitTime, std::move(shardIds)); + placementInfo.setUuid(collUuid); + write_ops::InsertCommandRequest insertPlacementEntry( + NamespaceString::kConfigsvrPlacementHistoryNamespace, {placementInfo.toBSON()}); - std::copy_if(std::make_move_iterator(originalShardList.begin()), - std::make_move_iterator(originalShardList.end()), - std::back_inserter(updatedShardList), - [&](const ShardId& shardId) { - if (removeDonorFromPlacementHistory && shardId == donorShardId) { - return false; - } - if (addRecipientToPlacementHistory && - shardId == recipientShardId) { - // Ensure that the added recipient will only appear once. - return false; - } - return true; - }); - placementInfo.setShards(std::move(updatedShardList)); - - return persistPlacementInfo(std::move(placementInfo)); + return txnClient.runCRUDOp(insertPlacementEntry, {}); + }) + .thenRunOn(txnExec) + .then([](const BatchedCommandResponse& insertPlacementEntryResponse) { + uassertStatusOK(insertPlacementEntryResponse.toStatus()); }) .semi(); }; auto executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); - txn_api::SyncTransactionWithRetries txn(opCtx, sleepInlineExecutor, nullptr, inlineExecutor); + txn_api::SyncTransactionWithRetries txn(opCtx, executor, nullptr, inlineExecutor); txn.run(opCtx, transactionChain); } diff --git a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp index 26294c605d2ee..29d5795fc43b0 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_clear_jumbo_flag_test.cpp @@ -27,15 +27,26 @@ * it in the license file. */ -#include "mongo/bson/bsonobj.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/client/read_preference.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/s/chunk_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp index fa1663bbe0255..8b229e7671e33 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_collection_operations.cpp @@ -28,25 +28,82 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/config/sharding_catalog_manager.h" - +#include +#include +#include +#include #include - +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/read_preference.h" -#include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/balancer/balancer.h" -#include "mongo/db/s/sharding_ddl_util.h" +#include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/sharding_logging.h" #include "mongo/db/s/sharding_util.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_options.h" +#include "mongo/db/transaction/transaction_api.h" #include "mongo/db/vector_clock.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/s/balancer_configuration.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" +#include "mongo/s/catalog_cache.h" #include "mongo/s/chunk_constraints.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/timer.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -87,11 +144,11 @@ void triggerFireAndForgetShardRefreshes(OperationContext* opCtx, // This is a best-effort attempt to refresh the shard 'shardEntry'. Fire and forget an // asynchronous '_flushRoutingTableCacheUpdates' request. - shard->runFireAndForgetCommand( - opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - DatabaseName::kAdmin.toString(), - BSON("_flushRoutingTableCacheUpdates" << coll.getNss().ns())); + shard->runFireAndForgetCommand(opCtx, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + DatabaseName::kAdmin.toString(), + BSON("_flushRoutingTableCacheUpdates" + << NamespaceStringUtil::serialize(coll.getNss()))); } } } @@ -234,7 +291,7 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx, uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked( opCtx, "refineCollectionShardKey.start", - nss.ns(), + NamespaceStringUtil::serialize(nss), BSON("oldKey" << oldShardKeyPattern.toBSON() << "newKey" << newShardKeyPattern.toBSON() << "oldEpoch" << collType.getEpoch() << "newEpoch" << newEpoch), ShardingCatalogClient::kLocalWriteConcern, @@ -258,12 +315,12 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx, auto [chunkUpdates, tagUpdates] = makeChunkAndTagUpdatesForRefine(newFields); // Update the config.collections entry for the given namespace. - auto catalogUpdateRequest = - BatchedCommandRequest::buildUpdateOp(CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << nss.ns()), - collType.toBSON(), - false /* upsert */, - false /* multi */); + auto catalogUpdateRequest = BatchedCommandRequest::buildUpdateOp( + CollectionType::ConfigNS, + BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss)), + collType.toBSON(), + false /* upsert */, + false /* multi */); return txnClient.runCRUDOp(catalogUpdateRequest, {}) .thenRunOn(txnExec) .then([&txnClient, timers, collType, nss, chunkUpdates = std::move(chunkUpdates)]( @@ -316,12 +373,12 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx, // Update all config.tags entries for the given namespace by setting their // bounds for each new field in the refined key to MinKey (except for the global // max tag where the max bounds are set to MaxKey). - auto tagUpdateRequest = - BatchedCommandRequest::buildPipelineUpdateOp(TagsType::ConfigNS, - BSON("ns" << nss.ns()), - tagUpdates, - false /* upsert */, - true /* useMultiUpdate */); + auto tagUpdateRequest = BatchedCommandRequest::buildPipelineUpdateOp( + TagsType::ConfigNS, + BSON("ns" << NamespaceStringUtil::serialize(nss)), + tagUpdates, + false /* upsert */, + true /* useMultiUpdate */); return txnClient.runCRUDOp(tagUpdateRequest, {}); }) .thenRunOn(txnExec) @@ -358,7 +415,7 @@ void ShardingCatalogManager::refineCollectionShardKey(OperationContext* opCtx, ShardingLogging::get(opCtx)->logChange(opCtx, "refineCollectionShardKey.end", - nss.ns(), + NamespaceStringUtil::serialize(nss), BSONObj(), ShardingCatalogClient::kLocalWriteConcern, _localConfigShard, @@ -407,7 +464,7 @@ void ShardingCatalogManager::configureCollectionBalancing( ShardingLogging::get(opCtx)->logChange(opCtx, "configureCollectionBalancing", - nss.ns(), + NamespaceStringUtil::serialize(nss), logChangeDetail.obj(), ShardingCatalogClient::kMajorityWriteConcern, _localConfigShard, @@ -473,7 +530,8 @@ void ShardingCatalogManager::configureCollectionBalancing( withTransaction(opCtx, CollectionType::ConfigNS, [this, &nss, &update](OperationContext* opCtx, TxnNumber txnNumber) { - const auto query = BSON(CollectionType::kNssFieldName << nss.ns()); + const auto query = BSON(CollectionType::kNssFieldName + << NamespaceStringUtil::serialize(nss)); const auto res = writeToConfigDocumentInTxn( opCtx, CollectionType::ConfigNS, @@ -513,50 +571,6 @@ void ShardingCatalogManager::configureCollectionBalancing( logConfigureCollectionBalancing(); } -void ShardingCatalogManager::renameShardedMetadata( - OperationContext* opCtx, - const NamespaceString& from, - const NamespaceString& to, - const WriteConcernOptions& writeConcern, - boost::optional optFromCollType) { - // Take _kChunkOpLock in exclusive mode to prevent concurrent chunk modifications and generate - // strictly monotonously increasing collection placement versions - Lock::ExclusiveLock chunkLk(opCtx, _kChunkOpLock); - Lock::ExclusiveLock zoneLk(opCtx, _kZoneOpLock); - - std::string logMsg = str::stream() << from << " to " << to; - if (optFromCollType) { - // Rename CSRS metadata in case the source collection is sharded - auto collType = *optFromCollType; - sharding_ddl_util::shardedRenameMetadata( - opCtx, _localConfigShard.get(), _localCatalogClient.get(), collType, to, writeConcern); - ShardingLogging::get(opCtx)->logChange( - opCtx, - "renameCollection.metadata", - str::stream() << logMsg << ": dropped target collection and renamed source collection", - BSON("newCollMetadata" << collType.toBSON()), - ShardingCatalogClient::kLocalWriteConcern, - _localConfigShard, - _localCatalogClient.get()); - } else { - // Remove stale CSRS metadata in case the source collection is unsharded and the - // target collection was sharded - // throws if the provided UUID does not match - sharding_ddl_util::removeCollAndChunksMetadataFromConfig_notIdempotent( - opCtx, _localCatalogClient.get(), to, writeConcern); - sharding_ddl_util::removeTagsMetadataFromConfig_notIdempotent( - opCtx, _localConfigShard.get(), to, writeConcern); - ShardingLogging::get(opCtx)->logChange(opCtx, - "renameCollection.metadata", - str::stream() - << logMsg << " : dropped target collection.", - BSONObj(), - ShardingCatalogClient::kLocalWriteConcern, - _localConfigShard, - _localCatalogClient.get()); - } -} - void ShardingCatalogManager::updateTimeSeriesBucketingParameters( OperationContext* opCtx, const NamespaceString& nss, @@ -571,57 +585,57 @@ void ShardingCatalogManager::updateTimeSeriesBucketingParameters( std::set shardIds; cm.getAllShardIds(&shardIds); - withTransaction(opCtx, - CollectionType::ConfigNS, - [this, &nss, ×eriesParameters, &shardIds](OperationContext* opCtx, - TxnNumber txnNumber) { - auto granularityFieldName = CollectionType::kTimeseriesFieldsFieldName + - "." + TypeCollectionTimeseriesFields::kGranularityFieldName; - auto bucketSpanFieldName = CollectionType::kTimeseriesFieldsFieldName + - "." + TypeCollectionTimeseriesFields::kBucketMaxSpanSecondsFieldName; - auto bucketRoundingFieldName = CollectionType::kTimeseriesFieldsFieldName + - "." + TypeCollectionTimeseriesFields::kBucketRoundingSecondsFieldName; - - BSONObjBuilder updateCmd; - BSONObj bucketUp; - if (timeseriesParameters.getGranularity().has_value()) { - auto bucketSpan = timeseries::getMaxSpanSecondsFromGranularity( - timeseriesParameters.getGranularity().get()); - updateCmd.append("$unset", BSON(bucketRoundingFieldName << "")); - bucketUp = BSON(granularityFieldName - << BucketGranularity_serializer( - timeseriesParameters.getGranularity().get()) - << bucketSpanFieldName << bucketSpan); - } else { - invariant(timeseriesParameters.getBucketMaxSpanSeconds().has_value() && - timeseriesParameters.getBucketRoundingSeconds().has_value()); - updateCmd.append("$unset", BSON(granularityFieldName << "")); - bucketUp = - BSON(bucketSpanFieldName - << timeseriesParameters.getBucketMaxSpanSeconds().get() - << bucketRoundingFieldName - << timeseriesParameters.getBucketRoundingSeconds().get()); - } - updateCmd.append("$set", bucketUp); - - writeToConfigDocumentInTxn( - opCtx, - CollectionType::ConfigNS, - BatchedCommandRequest::buildUpdateOp( - CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << nss.ns()) /* query */, - updateCmd.obj() /* update */, - false /* upsert */, - false /* multi */), - txnNumber); - - // Bump the chunk version for shards. - bumpMajorVersionOneChunkPerShard(opCtx, - nss, - txnNumber, - {std::make_move_iterator(shardIds.begin()), - std::make_move_iterator(shardIds.end())}); - }); + withTransaction( + opCtx, + CollectionType::ConfigNS, + [this, &nss, ×eriesParameters, &shardIds](OperationContext* opCtx, + TxnNumber txnNumber) { + auto granularityFieldName = CollectionType::kTimeseriesFieldsFieldName + "." + + TypeCollectionTimeseriesFields::kGranularityFieldName; + auto bucketSpanFieldName = CollectionType::kTimeseriesFieldsFieldName + "." + + TypeCollectionTimeseriesFields::kBucketMaxSpanSecondsFieldName; + auto bucketRoundingFieldName = CollectionType::kTimeseriesFieldsFieldName + "." + + TypeCollectionTimeseriesFields::kBucketRoundingSecondsFieldName; + + BSONObjBuilder updateCmd; + BSONObj bucketUp; + if (timeseriesParameters.getGranularity().has_value()) { + auto bucketSpan = timeseries::getMaxSpanSecondsFromGranularity( + timeseriesParameters.getGranularity().get()); + updateCmd.append("$unset", BSON(bucketRoundingFieldName << "")); + bucketUp = BSON( + granularityFieldName + << BucketGranularity_serializer(timeseriesParameters.getGranularity().get()) + << bucketSpanFieldName << bucketSpan); + } else { + invariant(timeseriesParameters.getBucketMaxSpanSeconds().has_value() && + timeseriesParameters.getBucketRoundingSeconds().has_value()); + updateCmd.append("$unset", BSON(granularityFieldName << "")); + bucketUp = BSON(bucketSpanFieldName + << timeseriesParameters.getBucketMaxSpanSeconds().get() + << bucketRoundingFieldName + << timeseriesParameters.getBucketRoundingSeconds().get()); + } + updateCmd.append("$set", bucketUp); + + writeToConfigDocumentInTxn(opCtx, + CollectionType::ConfigNS, + BatchedCommandRequest::buildUpdateOp( + CollectionType::ConfigNS, + BSON(CollectionType::kNssFieldName + << NamespaceStringUtil::serialize(nss)) /* query */, + updateCmd.obj() /* update */, + false /* upsert */, + false /* multi */), + txnNumber); + + // Bump the chunk version for shards. + bumpMajorVersionOneChunkPerShard(opCtx, + nss, + txnNumber, + {std::make_move_iterator(shardIds.begin()), + std::make_move_iterator(shardIds.end())}); + }); } } // namespace mongo diff --git a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp index 3be144e4e3a49..434d7d1e993d3 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_commit_chunk_migration_test.cpp @@ -27,10 +27,27 @@ * it in the license file. */ +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/client/read_preference.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" @@ -38,13 +55,22 @@ #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" #include "mongo/db/vector_clock.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/random.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/s/chunk_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -82,7 +108,8 @@ class CommitChunkMigrate : public ConfigServerTestFixture { ReadWriteConcernDefaultsLookupMock _lookupMock; }; -const NamespaceString kNamespace("TestDB.TestColl"); +const NamespaceString kNamespace = + NamespaceString::createNamespaceString_forTest("TestDB.TestColl"); const KeyPattern kKeyPattern(BSON("x" << 1)); TEST_F(CommitChunkMigrate, ChunksUpdatedCorrectly) { diff --git a/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp index 13051202030a4..a81bcd269d6df 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_config_initialization_test.cpp @@ -27,43 +27,76 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include #include +#include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/curop.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/config_server_op_observer.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/vector_clock.h" -#include "mongo/db/vector_clock_mutable.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_config_version.h" -#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_namespace_placement_gen.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" -#include "mongo/s/client/shard.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/database_version.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding namespace mongo { namespace { @@ -245,7 +278,7 @@ TEST_F(ConfigInitializationTest, ReRunsIfDocRolledBackThenReElected) { auto opCtx = operationContext(); repl::UnreplicatedWritesBlock uwb(opCtx); auto nss = VersionType::ConfigNS; - writeConflictRetry(opCtx, "removeConfigDocuments", nss.ns(), [&] { + writeConflictRetry(opCtx, "removeConfigDocuments", nss, [&] { AutoGetCollection coll(opCtx, nss, MODE_IX); ASSERT_TRUE(coll); auto cursor = coll->getCursor(opCtx); @@ -281,8 +314,6 @@ TEST_F(ConfigInitializationTest, ReRunsIfDocRolledBackThenReElected) { } TEST_F(ConfigInitializationTest, BuildsNecessaryIndexes) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; ASSERT_OK(ShardingCatalogManager::get(operationContext()) ->initializeConfigDatabaseIfNeeded(operationContext())); @@ -361,18 +392,21 @@ TEST_F(ConfigInitializationTest, InizializePlacementHistory) { setupDatabase(dbName, ShardId(primaryShard), DatabaseVersion(UUID::gen(), timestamp)); } - NamespaceString coll1Name("dbWithCollections_1_2", "coll1"); + NamespaceString coll1Name = + NamespaceString::createNamespaceString_forTest("dbWithCollections_1_2", "coll1"); std::vector expectedColl1Placement{ShardId("shard1"), ShardId("shard4")}; const auto [coll1, coll1Chunks] = createCollectionAndChunksMetadata(operationContext(), coll1Name, 2, expectedColl1Placement); - NamespaceString coll2Name("dbWithCollections_1_2", "coll2"); + NamespaceString coll2Name = + NamespaceString::createNamespaceString_forTest("dbWithCollections_1_2", "coll2"); std::vector expectedColl2Placement{ ShardId("shard1"), ShardId("shard2"), ShardId("shard3"), ShardId("shard4")}; const auto [coll2, coll2Chunks] = createCollectionAndChunksMetadata(operationContext(), coll2Name, 8, expectedColl2Placement); - NamespaceString corruptedCollName("dbWithCorruptedCollection", "corruptedColl"); + NamespaceString corruptedCollName = NamespaceString::createNamespaceString_forTest( + "dbWithCorruptedCollection", "corruptedColl"); std::vector expectedCorruptedCollPlacement{ ShardId("shard1"), ShardId("shard2"), ShardId("shard3")}; const auto [corruptedColl, corruptedCollChunks] = @@ -390,102 +424,133 @@ TEST_F(ConfigInitializationTest, InizializePlacementHistory) { opObserver.onMajorityCommitPointUpdate(getServiceContext(), majorityCommitPoint); now = VectorClock::get(operationContext())->getTime(); - auto timeAtInitialization = now.configTime().asTimestamp(); + const auto timeAtFirstInvocation = now.configTime().asTimestamp(); // init placement history ShardingCatalogManager::get(operationContext())->initializePlacementHistory(operationContext()); - // Verify the outcome - DBDirectClient dbClient(operationContext()); - - // The expected amount of documents has been generated - ASSERT_EQUALS(dbClient.count(NamespaceString::kConfigsvrPlacementHistoryNamespace, BSONObj()), - 3 /*numDatabases*/ + 3 /*numCollections*/ + 2 /*numMarkers*/); + auto verifyOutcome = [&, + coll1 = coll1, + coll1Chunks = coll1Chunks, + coll2 = coll2, + coll2Chunks = coll2Chunks, + corruptedColl = corruptedColl](const Timestamp& timeAtInitialization) { + DBDirectClient dbClient(operationContext()); + + // The expected amount of documents has been generated + ASSERT_EQUALS( + dbClient.count(NamespaceString::kConfigsvrPlacementHistoryNamespace, BSONObj()), + 3 /*numDatabases*/ + 3 /*numCollections*/ + 2 /*numMarkers*/); + + // Each database is correctly described + for (const auto& [dbName, primaryShard, timeOfCreation] : databaseInfos) { + const NamespacePlacementType expectedEntry( + NamespaceString::createNamespaceString_forTest(dbName), + timeOfCreation, + {primaryShard}); + const auto generatedEntry = findOneOnConfigCollection( + operationContext(), + NamespaceString::kConfigsvrPlacementHistoryNamespace, + BSON("nss" << dbName)); + + assertSamePlacementInfo(expectedEntry, generatedEntry); + } - // Each database is correctly described - for (const auto& [dbName, primaryShard, timeOfCreation] : databaseInfos) { - const NamespacePlacementType expectedEntry( - NamespaceString(dbName), timeOfCreation, {primaryShard}); - const auto generatedEntry = findOneOnConfigCollection( + // Each collection is properly described: + const auto getExpectedTimestampForColl = [](const std::vector& collChunks) { + return std::max_element(collChunks.begin(), + collChunks.end(), + [](const ChunkType& lhs, const ChunkType& rhs) { + return *lhs.getOnCurrentShardSince() < + *rhs.getOnCurrentShardSince(); + }) + ->getOnCurrentShardSince() + .value(); + }; + + // - coll1 + NamespacePlacementType expectedEntryForColl1( + coll1.getNss(), getExpectedTimestampForColl(coll1Chunks), expectedColl1Placement); + expectedEntryForColl1.setUuid(coll1.getUuid()); + const auto generatedEntryForColl1 = findOneOnConfigCollection( operationContext(), NamespaceString::kConfigsvrPlacementHistoryNamespace, - BSON("nss" << dbName)); + BSON("nss" << coll1.getNss().ns_forTest())); - assertSamePlacementInfo(expectedEntry, generatedEntry); - } - - // Each collection is properly described: - const auto getExpectedTimestampForColl = [](const std::vector& collChunks) { - return std::max_element(collChunks.begin(), - collChunks.end(), - [](const ChunkType& lhs, const ChunkType& rhs) { - return *lhs.getOnCurrentShardSince() < - *rhs.getOnCurrentShardSince(); - }) - ->getOnCurrentShardSince() - .value(); - }; + assertSamePlacementInfo(expectedEntryForColl1, generatedEntryForColl1); - // - coll1 - NamespacePlacementType expectedEntryForColl1( - coll1.getNss(), getExpectedTimestampForColl(coll1Chunks), expectedColl1Placement); - expectedEntryForColl1.setUuid(coll1.getUuid()); - const auto generatedEntryForColl1 = findOneOnConfigCollection( - operationContext(), - NamespaceString::kConfigsvrPlacementHistoryNamespace, - BSON("nss" << coll1.getNss().ns())); - - assertSamePlacementInfo(expectedEntryForColl1, generatedEntryForColl1); - - // - coll2 - NamespacePlacementType expectedEntryForColl2( - coll2.getNss(), getExpectedTimestampForColl(coll2Chunks), expectedColl2Placement); - expectedEntryForColl2.setUuid(coll2.getUuid()); - const auto generatedEntryForColl2 = findOneOnConfigCollection( - operationContext(), - NamespaceString::kConfigsvrPlacementHistoryNamespace, - BSON("nss" << coll2.getNss().ns())); - - assertSamePlacementInfo(expectedEntryForColl2, generatedEntryForColl2); - - // - corruptedColl - NamespacePlacementType expectedEntryForCorruptedColl( - corruptedColl.getNss(), timeAtInitialization, expectedCorruptedCollPlacement); - expectedEntryForCorruptedColl.setUuid(corruptedColl.getUuid()); - const auto generatedEntryForCorruptedColl = findOneOnConfigCollection( - operationContext(), - NamespaceString::kConfigsvrPlacementHistoryNamespace, - BSON("nss" << corruptedColl.getNss().ns())); - - assertSamePlacementInfo(expectedEntryForCorruptedColl, generatedEntryForCorruptedColl); - - // Check FCV special markers: - // - one entry at begin-of-time with all the currently existing shards (and no UUID set). - const NamespacePlacementType expectedMarkerForDawnOfTime( - NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace, - Timestamp(0, 1), - allShardIds); - const auto generatedMarkerForDawnOfTime = findOneOnConfigCollection( - operationContext(), - NamespaceString::kConfigsvrPlacementHistoryNamespace, - BSON("nss" << NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns() - << "timestamp" << Timestamp(0, 1))); - - assertSamePlacementInfo(expectedMarkerForDawnOfTime, generatedMarkerForDawnOfTime); - - // - one entry at the time the initialization is performed with an empty set of shards - // (and no UUID set). - const NamespacePlacementType expectedMarkerForInitializationTime( - NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace, timeAtInitialization, {}); - const auto generatedMarkerForInitializationTime = - findOneOnConfigCollection( + // - coll2 + NamespacePlacementType expectedEntryForColl2( + coll2.getNss(), getExpectedTimestampForColl(coll2Chunks), expectedColl2Placement); + expectedEntryForColl2.setUuid(coll2.getUuid()); + const auto generatedEntryForColl2 = findOneOnConfigCollection( + operationContext(), + NamespaceString::kConfigsvrPlacementHistoryNamespace, + BSON("nss" << coll2.getNss().ns_forTest())); + + assertSamePlacementInfo(expectedEntryForColl2, generatedEntryForColl2); + + // - corruptedColl + NamespacePlacementType expectedEntryForCorruptedColl( + corruptedColl.getNss(), timeAtInitialization, expectedCorruptedCollPlacement); + expectedEntryForCorruptedColl.setUuid(corruptedColl.getUuid()); + const auto generatedEntryForCorruptedColl = + findOneOnConfigCollection( + operationContext(), + NamespaceString::kConfigsvrPlacementHistoryNamespace, + BSON("nss" << corruptedColl.getNss().ns_forTest())); + + assertSamePlacementInfo(expectedEntryForCorruptedColl, generatedEntryForCorruptedColl); + + // Check placement initialization markers: + // - one entry at begin-of-time with all the currently existing shards (and no UUID set). + const NamespacePlacementType expectedMarkerForDawnOfTime( + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker, + Timestamp(0, 1), + allShardIds); + const auto generatedMarkerForDawnOfTime = findOneOnConfigCollection( operationContext(), NamespaceString::kConfigsvrPlacementHistoryNamespace, - BSON("nss" << NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns() - << "timestamp" << timeAtInitialization)); + BSON("nss" + << ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.ns_forTest() + << "timestamp" << Timestamp(0, 1))); + + assertSamePlacementInfo(expectedMarkerForDawnOfTime, generatedMarkerForDawnOfTime); + + // - one entry at the time the initialization is performed with an empty set of shards + // (and no UUID set). + const NamespacePlacementType expectedMarkerForInitializationTime( + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker, + timeAtInitialization, + {}); + const auto generatedMarkerForInitializationTime = + findOneOnConfigCollection( + operationContext(), + NamespaceString::kConfigsvrPlacementHistoryNamespace, + BSON("nss" << ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker + .ns_forTest() + << "timestamp" << timeAtInitialization)); + + assertSamePlacementInfo(expectedMarkerForInitializationTime, + generatedMarkerForInitializationTime); + }; + + verifyOutcome(timeAtFirstInvocation); + + // Perform a second invocation - the content created by the previous invocation should have been + // fully replaced by a new full representation with updated initialization markers + + now = VectorClock::get(operationContext())->getTime(); + majorityCommitPoint = repl::OpTime(now.clusterTime().asTimestamp(), 1); + opObserver.onMajorityCommitPointUpdate(getServiceContext(), majorityCommitPoint); + + now = VectorClock::get(operationContext())->getTime(); + const auto timeAtSecondInvocation = now.configTime().asTimestamp(); + ASSERT_GT(timeAtSecondInvocation, timeAtFirstInvocation); + + ShardingCatalogManager::get(operationContext())->initializePlacementHistory(operationContext()); - assertSamePlacementInfo(expectedMarkerForInitializationTime, - generatedMarkerForInitializationTime); + verifyOutcome(timeAtSecondInvocation); } } // unnamed namespace diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp index f7e70364c6954..ddc97b1f011e0 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations.cpp @@ -28,31 +28,73 @@ */ -#include "mongo/db/s/config/sharding_catalog_manager.h" - +#include +#include +#include +#include +#include #include - -#include "mongo/bson/util/bson_extract.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/commands/notify_sharding_event_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/ops/write_ops.h" -#include "mongo/db/persistent_task_store.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/resource_yielder.h" +#include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/ddl_lock_manager.h" -#include "mongo/db/server_options.h" +#include "mongo/db/s/sharding_logging.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/transaction/transaction_api.h" #include "mongo/db/vector_clock.h" -#include "mongo/db/write_concern.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_namespace_placement_gen.h" +#include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" #include "mongo/s/shard_util.h" #include "mongo/s/write_ops/batched_command_response.h" -#include "mongo/util/fail_point.h" -#include "mongo/util/pcre.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/pcre_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -60,9 +102,6 @@ namespace mongo { namespace { -MONGO_FAIL_POINT_DEFINE(hangBeforeNotifyingCreateDatabaseCommitted); - - using namespace fmt::literals; /** @@ -108,7 +147,7 @@ DatabaseType ShardingCatalogManager::createDatabase( // casing. It is allowed to create the 'config' database (handled by the early return above), // but only with that exact casing. uassert(ErrorCodes::InvalidOptions, - str::stream() << "Cannot manually create database'" << dbName << "'", + str::stream() << "Cannot manually create database '" << dbName << "'", !dbName.equalCaseInsensitive(DatabaseName::kAdmin.db()) && !dbName.equalCaseInsensitive(DatabaseName::kLocal.db()) && !dbName.equalCaseInsensitive(DatabaseName::kConfig.db())); @@ -124,8 +163,6 @@ DatabaseType ShardingCatalogManager::createDatabase( DBDirectClient client(opCtx); - boost::optional dbLock; - // Resolve the shard against the received parameter (which may encode either a shard ID or a // connection string). if (optPrimaryShard) { @@ -148,6 +185,7 @@ DatabaseType ShardingCatalogManager::createDatabase( return filterBuilder.obj(); }(); + boost::optional dbLock; // First perform an optimistic attempt without taking the lock to check if database exists. // If the database is not found take the lock and try again. @@ -164,10 +202,13 @@ DatabaseType ShardingCatalogManager::createDatabase( // Do another loop, with the db lock held in order to avoid taking the expensive path on // concurrent create database operations - dbLock.emplace(DDLLockManager::get(opCtx)->lock(opCtx, - str::toLower(dbName), - "createDatabase" /* reason */, - DDLLockManager::kDefaultLockTimeout)); + dbLock.emplace(opCtx, + opCtx->lockState(), + DatabaseNameUtil::deserialize(boost::none, str::toLower(dbName)), + "createDatabase" /* reason */, + MODE_X, + Date_t::now() + DDLLockManager::kDefaultLockTimeout, + true /*waitForRecovery*/); } // Expensive createDatabase code path @@ -212,6 +253,14 @@ DatabaseType ShardingCatalogManager::createDatabase( opCtx, selectShardForNewDatabase(opCtx, shardRegistry))); } + ShardingLogging::get(opCtx)->logChange(opCtx, + "createDatabase.start", + dbName, + /* details */ BSONObj(), + ShardingCatalogClient::kMajorityWriteConcern, + _localConfigShard, + _localCatalogClient.get()); + const auto now = VectorClock::get(opCtx)->getTime(); const auto clusterTime = now.clusterTime().asTimestamp(); @@ -233,12 +282,14 @@ DatabaseType ShardingCatalogManager::createDatabase( // - a "commitSuccessful" notification after completing the write into config.databases // will allow change streams to stop collecting events on the namespace created from // shards != resolvedPrimaryShard. + const auto allShards = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx); { - DatabasesAdded prepareCommitEvent({DatabaseName(dbName)}, false /*areImported*/); - prepareCommitEvent.setPhase(CommitPhaseEnum::kPrepare); + DatabasesAdded prepareCommitEvent( + {DatabaseNameUtil::deserialize(boost::none, dbName)}, + false /*areImported*/, + CommitPhaseEnum::kPrepare); prepareCommitEvent.setPrimaryShard(resolvedPrimaryShard->getId()); - uassertStatusOK(_notifyClusterOnNewDatabases( - opCtx, prepareCommitEvent, {resolvedPrimaryShard->getId()})); + uassertStatusOK(_notifyClusterOnNewDatabases(opCtx, prepareCommitEvent, allShards)); } const auto transactionChain = [db](const txn_api::TransactionClient& txnClient, @@ -270,24 +321,32 @@ DatabaseType ShardingCatalogManager::createDatabase( auto& executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); txn_api::SyncTransactionWithRetries txn( - opCtx, sleepInlineExecutor, nullptr /*resourceYielder*/, inlineExecutor); + opCtx, executor, nullptr /*resourceYielder*/, inlineExecutor); txn.run(opCtx, transactionChain); - hangBeforeNotifyingCreateDatabaseCommitted.pauseWhileSet(); - - DatabasesAdded commitCompletedEvent({DatabaseName(dbName)}, false /*areImported*/); - commitCompletedEvent.setPhase(CommitPhaseEnum::kSuccessful); - const auto notificationOutcome = _notifyClusterOnNewDatabases( - opCtx, commitCompletedEvent, {resolvedPrimaryShard->getId()}); + DatabasesAdded commitCompletedEvent( + {DatabaseNameUtil::deserialize(boost::none, dbName)}, + false /*areImported*/, + CommitPhaseEnum::kSuccessful); + const auto notificationOutcome = + _notifyClusterOnNewDatabases(opCtx, commitCompletedEvent, allShards); if (!notificationOutcome.isOK()) { LOGV2_WARNING(7175500, "Unable to send out notification of successful createDatabase", "db"_attr = db, "err"_attr = notificationOutcome); } + + ShardingLogging::get(opCtx)->logChange(opCtx, + "createDatabase", + dbName, + /* details */ BSONObj(), + ShardingCatalogClient::kMajorityWriteConcern, + _localConfigShard, + _localCatalogClient.get()); + return std::make_pair(resolvedPrimaryShard, db); } }(); @@ -343,7 +402,8 @@ void ShardingCatalogManager::commitMovePrimary(OperationContext* opCtx, const auto updateDatabaseEntryOp = [&] { const auto query = [&] { BSONObjBuilder bsonBuilder; - bsonBuilder.append(DatabaseType::kNameFieldName, dbName.db()); + bsonBuilder.append(DatabaseType::kNameFieldName, + DatabaseNameUtil::serialize(dbName)); // Include the version in the update filter to be resilient to potential network // retries and delayed messages. for (const auto [fieldName, fieldValue] : expectedDbVersion.toBSON()) { @@ -409,10 +469,9 @@ void ShardingCatalogManager::commitMovePrimary(OperationContext* opCtx, auto& executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); txn_api::SyncTransactionWithRetries txn(opCtx, - sleepInlineExecutor, + executor, nullptr, /*resourceYielder*/ inlineExecutor); txn.run(opCtx, transactionChain); diff --git a/src/mongo/db/s/config/sharding_catalog_manager_database_operations_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_database_operations_test.cpp index 53aff8a7b06b9..a7424c3d27716 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_database_operations_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_database_operations_test.cpp @@ -27,8 +27,20 @@ * it in the license file. */ +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/shard_id.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp index e0cf9bd774e6c..9a0f34813ed10 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_ensure_chunk_version_is_greater_than_test.cpp @@ -27,8 +27,30 @@ * it in the license file. */ +#include +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -43,7 +65,8 @@ class EnsureChunkVersionIsGreaterThanTest : public ConfigServerTestFixture { shard.setHost(_shardName + ":12"); setupShards({shard}); } - const NamespaceString _nss{"TestDB", "TestColl"}; + const NamespaceString _nss = + NamespaceString::createNamespaceString_forTest("TestDB", "TestColl"); const UUID _collUuid = UUID::gen(); const KeyPattern _keyPattern{BSON("x" << 1)}; }; diff --git a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp index 1f260d2fea494..cc85d2717691d 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_merge_chunks_test.cpp @@ -29,20 +29,51 @@ #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/read_preference.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" #include "mongo/logv2/log.h" -#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/random.h" #include "mongo/s/catalog/type_changelog.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_tags.h" namespace mongo { namespace { @@ -694,9 +725,27 @@ class MergeAllChunksOnShardTest : public ConfigServerTestFixture { ConfigServerTestFixture::tearDown(); } - /* Setup shareded collection randomly spreading chunks across shards */ + /* Returns `numBounds` random split points sampled from the following list: + * - [MinKey, 0, 1, 2, ..., `_maxNumChunks - 1`, MaxKey] + */ + std::vector getRandomBoundsOnShardKeySpace(int numBounds) { + std::vector potentialBounds{_keyPattern.globalMin()}; + for (int i = 0; i < _maxNumChunks; i++) { + potentialBounds.push_back(BSON("x" << i)); + } + potentialBounds.push_back(_keyPattern.globalMax()); + + std::vector randomlySelectedBounds; + std::sample(potentialBounds.begin(), + potentialBounds.end(), + std::back_inserter(randomlySelectedBounds), + numBounds, + _random.urbg()); + return randomlySelectedBounds; + } + + /* Setup sharded collection randomly spreading chunks across shards */ void setupCollectionWithRandomRoutingTable() { - PseudoRandom random(SecureRandom().nextInt64()); ChunkVersion collPlacementVersion{{_epoch, _ts}, {1, 0}}; // Generate chunk with the provided parameters and increase current collection placement @@ -713,34 +762,62 @@ class MergeAllChunksOnShardTest : public ConfigServerTestFixture { // When `onCurrentShardSince` is set to "Timestamp(0, 1)", the chunk is mergeable // because the snapshot window passed. When it is set to "max", the chunk is not // mergeable because the snapshot window did not pass - auto randomValidAfter = random.nextInt64() % 2 ? Timestamp(0, 1) : Timestamp::max(); + auto randomValidAfter = _random.nextInt64() % 2 ? Timestamp(0, 1) : Timestamp::max(); chunk.setOnCurrentShardSince(randomValidAfter); chunk.setHistory({ChunkHistory{randomValidAfter, shard.getName()}}); // Rarely create a jumbo chunk (not mergeable) - chunk.setJumbo(random.nextInt64() % 10 == 0); + chunk.setJumbo(_random.nextInt64() % 10 == 0); return chunk; }; - int numChunks = random.nextInt32(19) + 1; // minimum 1 chunks, maximum 20 chunks - std::vector chunks; + int numChunks = + _random.nextInt32(_maxNumChunks) + 1; // minimum 1 chunks, maximum 20 chunks + + std::vector chunksBounds = getRandomBoundsOnShardKeySpace(numChunks + 1); + { + // Make sure the whole shard key space is covered, potentially replacing first/last + // bounds + std::replace_if( + chunksBounds.begin(), + chunksBounds.begin() + 1, + [&](BSONObj& minBound) { return minBound.woCompare(_keyPattern.globalMin()) != 0; }, + _keyPattern.globalMin()); + std::replace_if( + chunksBounds.end() - 1, + chunksBounds.end(), + [&](BSONObj& maxBound) { return maxBound.woCompare(_keyPattern.globalMax()) != 0; }, + _keyPattern.globalMax()); + } - // Loop generating random routing table: [MinKey, 0), [1, 2), [2, 3), ... [x, MaxKey] - int nextMin; - for (int nextMax = 0; nextMax < numChunks; nextMax++) { - auto randomShard = _shards.at(random.nextInt64() % _shards.size()); - // set min as `MinKey` during first iteration, otherwise next min - auto min = nextMax == 0 ? _keyPattern.globalMin() : BSON("x" << nextMin); - // set max as `MaxKey` during last iteration, otherwise next max - auto max = nextMax == numChunks - 1 ? _keyPattern.globalMax() : BSON("x" << nextMax); - auto chunk = generateChunk(randomShard, min, max); - nextMin = nextMax; + std::vector chunks; + for (size_t i = 0; i < chunksBounds.size() - 1; i++) { + auto randomShard = _shards.at(_random.nextInt64() % _shards.size()); + auto chunk = generateChunk(randomShard, chunksBounds.at(i), chunksBounds.at(i + 1)); chunks.push_back(chunk); } setupCollection(_nss, _keyPattern, chunks); }; + /* Randomly setup minimum 0 zones, maximum 3 zones */ + void setupRandomZones() { + int numZones = _random.nextInt32(4); // minimum 0 zones, maximum 3 zones + if (numZones == 0) { + return; + } + + // Create random zones on the same portion of shard key space covered by chunks generation + std::vector zonesBounds = + getRandomBoundsOnShardKeySpace(numZones * 2); // 2 bounds per zone + + for (int i = 0; i < numZones; i = i + 2) { + const auto zoneRange = ChunkRange(zonesBounds.at(i), zonesBounds.at(i + 1)); + ShardingCatalogManager::get(operationContext()) + ->assignKeyRangeToZone(operationContext(), _nss, zoneRange, _zoneName); + } + } + /* Get routing table for the collection under testing */ std::vector getChunks() { const auto query = BSON(ChunkType::collectionUUID() << _collUuid); @@ -760,6 +837,62 @@ class MergeAllChunksOnShardTest : public ConfigServerTestFixture { return chunks; } + /* + * Return a vector of zones (and no-zones) overlapping with the current chunk (or with the whole + * shard key space when min/max not specified) + */ + std::vector getZones(boost::optional chunk = boost::none) { + const auto& chunkMinKey = + chunk.is_initialized() ? chunk->getMin() : _keyPattern.globalMin(); + const auto& chunkMaxKey = + chunk.is_initialized() ? chunk->getMax() : _keyPattern.globalMax(); + + DBDirectClient zonesClient{operationContext()}; + FindCommandRequest zonesFindRequest{TagsType::ConfigNS}; + zonesFindRequest.setSort(BSON(TagsType::min << 1)); + + const auto onlyZonesOverlappingWithChunkFilter = [&]() { + BSONObjBuilder queryBuilder; + queryBuilder.append(TagsType::ns(), NamespaceStringUtil::serialize(_nss)); + BSONArrayBuilder norBuilder(queryBuilder.subarrayStart("$nor")); + norBuilder.append(BSON(TagsType::min() << BSON("$gte" << chunkMaxKey))); + norBuilder.append(BSON(TagsType::max() << BSON("$lte" << chunkMinKey))); + norBuilder.done(); + return queryBuilder.obj(); + }(); + + zonesFindRequest.setFilter(onlyZonesOverlappingWithChunkFilter); + const auto zonesCursor{zonesClient.find(std::move(zonesFindRequest))}; + + std::vector zones; + while ((zonesCursor->more())) { + zones.push_back(zonesCursor->next()); + } + + if (chunk.is_initialized() && zones.size() > 0) { + // Account for no-zone: two contiguous chunks could be partially overlapping with the + // same zone, that does not necessarily means they need to merged. + // + // Example: + // - ZONE: [4, 7) + // -- Chunk 0: [3, 5) + // -- Chunk 1: [5, 8) + // + // They will not be merged because the balancer will consider the following (no-)zones: + // [MinKey, 4), [4, 7), [7,MaxKey) + const auto& zonesMin = zones.front().getObjectField(ChunkType::min()); + const auto& zonesMax = zones.back().getObjectField(ChunkType::max()); + if (zonesMin.woCompare(chunkMinKey) > 0) { + zones.insert(zones.begin(), BSON("NOZONE" << 1)); + } + if (zonesMax.woCompare(chunkMaxKey) > 0) { + zones.insert(zones.end(), BSON("NOZONE" << 1)); + } + } + + return zones; + } + void assertConsistentRoutingTableWithNoContiguousMergeableChunksOnTheSameShard( std::vector routingTable) { ASSERT_GTE(routingTable.size(), 0); @@ -774,12 +907,34 @@ class MergeAllChunksOnShardTest : public ConfigServerTestFixture { // Chunks with the following carachteristics are not mergeable: // - Jumbo chunks // - Chunks with `onCurrentShardSince` higher than "now + snapshot window" - // So it is excpected for them to potentially have a contiguous chunk on the same shard. + // - Contiguous chunks belonging to the same shard but falling into different zones + // So it is expected for them to potentially have a contiguous chunk on the same shard. if (!prevChunk.getJumbo() && !(*(prevChunk.getOnCurrentShardSince()) == Timestamp::max()) && !currChunk.getJumbo() && !(*(currChunk.getOnCurrentShardSince()) == Timestamp::max())) { - ASSERT_NOT_EQUALS(prevChunk.getShard().compare(currChunk.getShard()), 0); + if (prevChunk.getShard().compare(currChunk.getShard()) != 0) { + // Chunks belong to different shards + continue; + } + // Chunks belong to the same shard, make sure they fall into different zones + const auto zonesPrevChunk = + getZones(ChunkRange{prevChunk.getMin(), prevChunk.getMax()}); + const auto zonesCurrChunk = + getZones(ChunkRange{currChunk.getMin(), currChunk.getMax()}); + if (zonesPrevChunk.size() == zonesCurrChunk.size()) { + if (std::equal(zonesPrevChunk.begin(), + zonesPrevChunk.end(), + zonesCurrChunk.begin(), + [](const BSONObj& l, const BSONObj& r) { + return l.woCompare(r) == 0; + })) { + FAIL(str::stream() + << "Chunks " << prevChunk.toString() << " and " << currChunk.toString() + << " not merged despite belonging to the same shard and falling " + "into the same zone (or both in no zone)"); + } + } } } @@ -893,7 +1048,7 @@ class MergeAllChunksOnShardTest : public ConfigServerTestFixture { size_t numMerges = 0; for (const auto& chunkDiff : chunksDiff) { BSONObjBuilder query; - query << ChangeLogType::what("merge") << ChangeLogType::ns(nss.ns()); + query << ChangeLogType::what("merge") << ChangeLogType::ns(nss.toString_forTest()); chunkDiff.getVersion().serialize("details.mergedVersion", &query); auto response = assertGet(getConfigShard()->exhaustiveFindOnConfig( @@ -924,8 +1079,11 @@ class MergeAllChunksOnShardTest : public ConfigServerTestFixture { originalRoutingTable.size() - mergedRoutingTable.size() + numMerges); } + inline const static std::string _zoneName{"collZoneName"}; + inline const static auto _shards = - std::vector{ShardType{"shard0", "host0:123"}, ShardType{"shard1", "host1:123"}}; + std::vector{ShardType{"shard0", "host0:123", {_zoneName}}, + ShardType{"shard1", "host1:123", {_zoneName}}}; const NamespaceString _nss = NamespaceString::createNamespaceString_forTest("test.coll"); const UUID _collUuid = UUID::gen(); @@ -935,6 +1093,10 @@ class MergeAllChunksOnShardTest : public ConfigServerTestFixture { const KeyPattern _keyPattern{BSON("x" << 1)}; + const int _maxNumChunks = 20; + + inline static PseudoRandom _random{SecureRandom().nextInt64()}; + ReadWriteConcernDefaultsLookupMock _lookupMock; }; @@ -952,6 +1114,7 @@ class MergeAllChunksOnShardTest : public ConfigServerTestFixture { */ TEST_F(MergeAllChunksOnShardTest, AllMergeableChunksGetSquashed) { setupCollectionWithRandomRoutingTable(); + setupRandomZones(); const auto chunksBeforeMerges = getChunks(); @@ -970,7 +1133,7 @@ TEST_F(MergeAllChunksOnShardTest, AllMergeableChunksGetSquashed) { assertConsistentChunkVersionsAfterMerges(chunksBeforeMerges, chunksAfterMerges); assertChangesWereLoggedAfterMerges(_nss, chunksBeforeMerges, chunksAfterMerges); } catch (...) { - // Log original and merged routing tables only in case of error + // Log zones and original/merged routing tables only in case of error LOGV2_INFO(7161200, "CHUNKS BEFORE MERGE", "numberOfChunks"_attr = chunksBeforeMerges.size(), @@ -979,6 +1142,8 @@ TEST_F(MergeAllChunksOnShardTest, AllMergeableChunksGetSquashed) { "CHUNKS AFTER MERGE", "numberOfChunks"_attr = chunksAfterMerges.size(), "chunks"_attr = chunksAfterMerges); + LOGV2_INFO(7805200, "ZONES", "zones"_attr = getZones()); + throw; } } diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp index fa0b28e547b5a..fd26e607e846e 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_from_zone_test.cpp @@ -27,15 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/read_preference.h" #include "mongo/db/namespace_string.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" -#include "mongo/s/client/shard.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp index e7ea22d1dd09f..ac6670a42d7ba 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_remove_shard_test.cpp @@ -27,31 +27,54 @@ * it in the license file. */ +#include +#include #include #include -#include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/commands.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/ops/write_ops.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor.h" #include "mongo/rpc/metadata/repl_set_metadata.h" -#include "mongo/rpc/metadata/tracking_metadata.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_identity_loader.h" -#include "mongo/s/grid.h" -#include "mongo/s/write_ops/batched_command_response.h" -#include "mongo/stdx/chrono.h" -#include "mongo/stdx/future.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp index 5424ecebd464c..21ebda523e79e 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_collection_test.cpp @@ -28,20 +28,48 @@ */ +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include #include -#include "mongo/client/read_preference.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_factory_mock.h" #include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_mock.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/initial_split_policy.h" -#include "mongo/rpc/metadata/tracking_metadata.h" -#include "mongo/s/balancer_configuration.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/catalog/type_tags.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" #include "mongo/s/shard_key_pattern.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -91,8 +119,7 @@ TEST_F(CreateFirstChunksTest, NonEmptyCollection_NoZones_OneChunkToPrimary) { Lock::GlobalWrite lk(operationContext()); CollectionCatalog::write(getServiceContext(), [&](CollectionCatalog& catalog) { catalog.registerCollection(operationContext(), - uuid, - std::make_shared(kNamespace), + std::make_shared(uuid, kNamespace), /*ts=*/boost::none); }); } diff --git a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp index 367f9a757abda..f5ee025bd89a8 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_shard_operations.cpp @@ -27,74 +27,133 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/config/sharding_catalog_manager.h" - +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/client/connection_string.h" #include "mongo/client/fetcher.h" #include "mongo/client/read_preference.h" #include "mongo/client/remote_command_targeter.h" #include "mongo/client/replica_set_monitor.h" -#include "mongo/db/api_parameters.h" #include "mongo/db/audit.h" -#include "mongo/db/catalog_raii.h" #include "mongo/db/catalog_shard_feature_flag_gen.h" -#include "mongo/db/client.h" +#include "mongo/db/commands.h" #include "mongo/db/commands/cluster_server_parameter_cmds_gen.h" #include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/commands/notify_sharding_event_gen.h" #include "mongo/db/commands/set_cluster_parameter_invocation.h" #include "mongo/db/commands/set_feature_compatibility_version_gen.h" -#include "mongo/db/feature_compatibility_version_parser.h" +#include "mongo/db/commands/set_user_write_block_mode_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/keys_collection_util.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/repl/hello_gen.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/resource_yielder.h" #include "mongo/db/s/add_shard_cmd_gen.h" #include "mongo/db/s/add_shard_util.h" +#include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/range_deletion_task_gen.h" -#include "mongo/db/s/sharding_ddl_util.h" +#include "mongo/db/s/sharding_config_server_parameters_gen.h" #include "mongo/db/s/sharding_logging.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/db/s/sharding_util.h" -#include "mongo/db/s/type_shard_identity.h" #include "mongo/db/s/user_writes_critical_section_document_gen.h" #include "mongo/db/s/user_writes_recoverable_critical_section_service.h" +#include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/transaction_api.h" #include "mongo/db/vector_clock_mutable.h" -#include "mongo/db/wire_version.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/connection_pool_stats.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/idl/cluster_server_parameter_common.h" -#include "mongo/idl/cluster_server_parameter_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/rpc/metadata/repl_set_metadata.h" -#include "mongo/rpc/metadata/tracking_metadata.h" +#include "mongo/rpc/metadata.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_namespace_placement_gen.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" -#include "mongo/s/cluster_identity_loader.h" #include "mongo/s/database_version.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" -#include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/scopeguard.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -103,8 +162,7 @@ namespace mongo { namespace { -MONGO_FAIL_POINT_DEFINE(hangBeforeNotifyingaddShardCommitted); -MONGO_FAIL_POINT_DEFINE(hangAfterDroppingCollectionInTransitionToDedicatedConfigServer); +MONGO_FAIL_POINT_DEFINE(hangAfterDroppingDatabaseInTransitionToDedicatedConfigServer); using CallbackHandle = executor::TaskExecutor::CallbackHandle; using CallbackArgs = executor::TaskExecutor::CallbackArgs; @@ -116,6 +174,8 @@ const WriteConcernOptions kMajorityWriteConcern{WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, WriteConcernOptions::kNoTimeout}; +const Seconds kRemoteCommandTimeout{60}; + /** * Generates a unique name to be given to a newly added shard. */ @@ -173,7 +233,7 @@ StatusWith ShardingCatalogManager::_runCommandForAddShar auto host = std::move(swHost.getValue()); executor::RemoteCommandRequest request( - host, dbName.toString(), cmdObj, rpc::makeEmptyMetadata(), opCtx, Seconds(60)); + host, dbName.toString(), cmdObj, rpc::makeEmptyMetadata(), opCtx, kRemoteCommandTimeout); executor::RemoteCommandResponse response = Status(ErrorCodes::InternalError, "Internal error running command"); @@ -350,9 +410,9 @@ StatusWith ShardingCatalogManager::_validateHostAsShard( std::shared_ptr targeter, const std::string* shardProposedName, const ConnectionString& connectionString, - bool isCatalogShard) { + bool isConfigShard) { auto swCommandResponse = _runCommandForAddShard( - opCtx, targeter.get(), DatabaseName::kAdmin.db(), BSON("isMaster" << 1)); + opCtx, targeter.get(), DatabaseName::kAdmin.db(), BSON("hello" << 1)); if (swCommandResponse.getStatus() == ErrorCodes::IncompatibleServerVersion) { return swCommandResponse.getStatus().withReason( str::stream() << "Cannot add " << connectionString.toString() @@ -363,17 +423,16 @@ StatusWith ShardingCatalogManager::_validateHostAsShard( } // Check for a command response error - auto resIsMasterStatus = std::move(swCommandResponse.getValue().commandStatus); - if (!resIsMasterStatus.isOK()) { - return resIsMasterStatus.withContext(str::stream() - << "Error running isMaster against " - << targeter->connectionString().toString()); + auto resHelloStatus = std::move(swCommandResponse.getValue().commandStatus); + if (!resHelloStatus.isOK()) { + return resHelloStatus.withContext(str::stream() << "Error running 'hello' against " + << targeter->connectionString().toString()); } - auto resIsMaster = std::move(swCommandResponse.getValue().response); + auto resHello = std::move(swCommandResponse.getValue().response); // Fail if the node being added is a mongos. - const std::string msg = resIsMaster.getStringField("msg").toString(); + const std::string msg = resHello.getStringField("msg").toString(); if (msg == "isdbgrid") { return {ErrorCodes::IllegalOperation, "cannot add a mongos as a shard"}; } @@ -384,23 +443,23 @@ StatusWith ShardingCatalogManager::_validateHostAsShard( // because of our internal wire version protocol. So we can safely invariant here that the node // is compatible. long long maxWireVersion; - Status status = bsonExtractIntegerField(resIsMaster, "maxWireVersion", &maxWireVersion); + Status status = bsonExtractIntegerField(resHello, "maxWireVersion", &maxWireVersion); if (!status.isOK()) { - return status.withContext(str::stream() << "isMaster returned invalid 'maxWireVersion' " + return status.withContext(str::stream() << "hello returned invalid 'maxWireVersion' " << "field when attempting to add " << connectionString.toString() << " as a shard"); } - // Check whether there is a master. If there isn't, the replica set may not have been - // initiated. If the connection is a standalone, it will return true for isMaster. - bool isMaster; - status = bsonExtractBooleanField(resIsMaster, "ismaster", &isMaster); + // Check whether the host is a writable primary. If not, the replica set may not have been + // initiated. If the connection is a standalone, it will return true for "isWritablePrimary". + bool isWritablePrimary; + status = bsonExtractBooleanField(resHello, "isWritablePrimary", &isWritablePrimary); if (!status.isOK()) { - return status.withContext(str::stream() << "isMaster returned invalid 'ismaster' " + return status.withContext(str::stream() << "hello returned invalid 'isWritablePrimary' " << "field when attempting to add " << connectionString.toString() << " as a shard"); } - if (!isMaster) { + if (!isWritablePrimary) { return {ErrorCodes::NotWritablePrimary, str::stream() << connectionString.toString() @@ -409,7 +468,7 @@ StatusWith ShardingCatalogManager::_validateHostAsShard( } const std::string providedSetName = connectionString.getSetName(); - const std::string foundSetName = resIsMaster["setName"].str(); + const std::string foundSetName = resHello["setName"].str(); // Make sure the specified replica set name (if any) matches the actual shard's replica set if (providedSetName.empty() && !foundSetName.empty()) { @@ -422,7 +481,7 @@ StatusWith ShardingCatalogManager::_validateHostAsShard( if (!providedSetName.empty() && foundSetName.empty()) { return {ErrorCodes::OperationFailed, str::stream() << "host did not return a set name; " - << "is the replica set still initializing? " << resIsMaster}; + << "is the replica set still initializing? " << resHello}; } // Make sure the set name specified in the connection string matches the one where its hosts @@ -434,14 +493,14 @@ StatusWith ShardingCatalogManager::_validateHostAsShard( } // Is it a config server? - if (resIsMaster.hasField("configsvr") && !isCatalogShard) { + if (resHello.hasField("configsvr") && !isConfigShard) { return {ErrorCodes::OperationFailed, str::stream() << "Cannot add " << connectionString.toString() << " as a shard since it is a config server"}; } - if (resIsMaster.hasField(HelloCommandReply::kIsImplicitDefaultMajorityWCFieldName) && - !resIsMaster.getBoolField(HelloCommandReply::kIsImplicitDefaultMajorityWCFieldName) && + if (resHello.hasField(HelloCommandReply::kIsImplicitDefaultMajorityWCFieldName) && + !resHello.getBoolField(HelloCommandReply::kIsImplicitDefaultMajorityWCFieldName) && !ReadWriteConcernDefaults::get(opCtx).isCWWCSet(opCtx)) { return { ErrorCodes::OperationFailed, @@ -454,11 +513,11 @@ StatusWith ShardingCatalogManager::_validateHostAsShard( "using the setDefaultRWConcern command and try again."}; } - if (resIsMaster.hasField(HelloCommandReply::kCwwcFieldName)) { - auto cwwcOnShard = WriteConcernOptions::parse( - resIsMaster.getObjectField(HelloCommandReply::kCwwcFieldName)) - .getValue() - .toBSON(); + if (resHello.hasField(HelloCommandReply::kCwwcFieldName)) { + auto cwwcOnShard = + WriteConcernOptions::parse(resHello.getObjectField(HelloCommandReply::kCwwcFieldName)) + .getValue() + .toBSON(); auto cachedCWWC = ReadWriteConcernDefaults::get(opCtx).getCWWC(opCtx); if (!cachedCWWC) { @@ -491,20 +550,20 @@ StatusWith ShardingCatalogManager::_validateHostAsShard( if (!providedSetName.empty()) { std::set hostSet; - BSONObjIterator iter(resIsMaster["hosts"].Obj()); + BSONObjIterator iter(resHello["hosts"].Obj()); while (iter.more()) { hostSet.insert(iter.next().String()); // host:port } - if (resIsMaster["passives"].isABSONObj()) { - BSONObjIterator piter(resIsMaster["passives"].Obj()); + if (resHello["passives"].isABSONObj()) { + BSONObjIterator piter(resHello["passives"].Obj()); while (piter.more()) { hostSet.insert(piter.next().String()); // host:port } } - if (resIsMaster["arbiters"].isABSONObj()) { - BSONObjIterator piter(resIsMaster["arbiters"].Obj()); + if (resHello["arbiters"].isABSONObj()) { + BSONObjIterator piter(resHello["arbiters"].Obj()); while (piter.more()) { hostSet.insert(piter.next().String()); // host:port } @@ -516,7 +575,7 @@ StatusWith ShardingCatalogManager::_validateHostAsShard( return {ErrorCodes::OperationFailed, str::stream() << "in seed list " << connectionString.toString() << ", host " << host << " does not belong to replica set " << foundSetName - << "; found " << resIsMaster.toString()}; + << "; found " << resHello.toString()}; } } } @@ -531,7 +590,7 @@ StatusWith ShardingCatalogManager::_validateHostAsShard( } // Disallow adding shard replica set with name 'config' - if (!isCatalogShard && actualShardName == DatabaseName::kConfig.db()) { + if (!isConfigShard && actualShardName == DatabaseName::kConfig.db()) { return {ErrorCodes::BadValue, "use of shard replica set with name 'config' is not allowed"}; } @@ -627,7 +686,7 @@ StatusWith ShardingCatalogManager::addShard( OperationContext* opCtx, const std::string* shardProposedName, const ConnectionString& shardConnectionString, - bool isCatalogShard) { + bool isConfigShard) { if (!shardConnectionString) { return {ErrorCodes::BadValue, "Invalid connection string"}; } @@ -677,7 +736,7 @@ StatusWith ShardingCatalogManager::addShard( // Validate the specified connection string may serve as shard at all auto shardStatus = _validateHostAsShard( - opCtx, targeter, shardProposedName, shardConnectionString, isCatalogShard); + opCtx, targeter, shardProposedName, shardConnectionString, isConfigShard); if (!shardStatus.isOK()) { return shardStatus.getStatus(); } @@ -710,6 +769,15 @@ StatusWith ShardingCatalogManager::addShard( "collection from the shard manually and try again."); } + if (!isConfigShard) { + // If the shard is also the config server itself, there is no need to pull the keys since + // the keys already exists in the local admin.system.keys collection. + auto pullKeysStatus = _pullClusterTimeKeys(opCtx, targeter); + if (!pullKeysStatus.isOK()) { + return pullKeysStatus; + } + } + // If a name for a shard wasn't provided, generate one if (shardType.getName().empty()) { auto result = generateNewShardName(opCtx, _localConfigShard.get()); @@ -734,7 +802,7 @@ StatusWith ShardingCatalogManager::addShard( return Shard::CommandResponse::processBatchWriteResponse(commandResponse, &batchResponse); }; - if (!isCatalogShard) { + if (!isConfigShard) { AddShard addShardCmd = add_shard_util::createAddShardCmd(opCtx, shardType.getName()); // Use the _addShard command to add the shard, which in turn inserts a shardIdentity @@ -760,30 +828,6 @@ StatusWith ShardingCatalogManager::addShard( // while blocking on the network). FixedFCVRegion fcvRegion(opCtx); - // Prevent the race where an FCV downgrade happens concurrently with the catalogShard - // being added and the FCV downgrade finishes before the catalogShard is added. - uassert( - 5563604, - "Cannot add catalog shard because it is not supported in featureCompatibilityVersion: {}"_format( - multiversion::toString(serverGlobalParams.featureCompatibility.getVersion())), - gFeatureFlagCatalogShard.isEnabled(serverGlobalParams.featureCompatibility) || - !isCatalogShard); - - if (isCatalogShard) { - // TODO SERVER-75391: Remove. - // - // At this point we know the config primary is in the latest FCV, but secondaries may - // not yet have replicated the FCV update, so write a noop and wait for it to replicate - // to all nodes in the config server to guarantee they have replicated up to the latest - // FCV. - // - // This guarantees all secondaries use the shard server method to refresh their - // metadata, which contains synchronization to prevent secondaries from serving reads - // for owned chunks that have not yet replicated to them. - _performLocalNoopWriteWithWAllWriteConcern( - opCtx, "w:all write barrier in transitionToCatalogShard"); - } - uassert(5563603, "Cannot add shard while in upgrading/downgrading FCV state", !fcvRegion->isUpgradingOrDowngrading()); @@ -793,7 +837,7 @@ StatusWith ShardingCatalogManager::addShard( fcvRegion == multiversion::GenericFCV::kLastContinuous || fcvRegion == multiversion::GenericFCV::kLastLTS); - if (!isCatalogShard) { + if (!isConfigShard) { SetFeatureCompatibilityVersion setFcvCmd(fcvRegion->getVersion()); setFcvCmd.setDbName(DatabaseName::kAdmin); setFcvCmd.setFromConfigServer(true); @@ -977,24 +1021,37 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx, RemoveShardProgress::PENDING_RANGE_DELETIONS, boost::none, pendingRangeDeletions}; } - // Drop the drained collections locally so the config server can transition back to catalog - // shard mode in the future without requiring users to manually drop them. - LOGV2(7509600, "Locally dropping drained collections", "shardId"_attr = name); + // Drop all tracked databases locally now that all user data has been drained so the config + // server can transition back to catalog shard mode without requiring users to manually drop + // them. + LOGV2(7509600, "Locally dropping drained databases", "shardId"_attr = name); - auto shardedCollections = _localCatalogClient->getCollections(opCtx, {}); - for (auto&& collection : shardedCollections) { - DBDirectClient client(opCtx); + auto trackedDBs = + _localCatalogClient->getAllDBs(opCtx, repl::ReadConcernLevel::kLocalReadConcern); + for (auto&& db : trackedDBs) { + // Assume no multitenancy since we're dropping all user namespaces. + const auto dbName = DatabaseNameUtil::deserialize(boost::none, db.getName()); + tassert(7783700, + "Cannot drop admin or config database from the config server", + dbName != DatabaseName::kConfig && dbName != DatabaseName::kAdmin); + DBDirectClient client(opCtx); BSONObj result; - if (!client.dropCollection( - collection.getNss(), ShardingCatalogClient::kLocalWriteConcern, &result)) { - // Note attempting to drop a non-existent collection does not return an error, so - // it's safe to assert the status is ok even if an earlier attempt was interrupted - // by a failover. + if (!client.dropDatabase(dbName, ShardingCatalogClient::kLocalWriteConcern, &result)) { uassertStatusOK(getStatusFromCommandResult(result)); } - hangAfterDroppingCollectionInTransitionToDedicatedConfigServer.pauseWhileSet(opCtx); + hangAfterDroppingDatabaseInTransitionToDedicatedConfigServer.pauseWhileSet(opCtx); + } + + // Also drop the sessions collection, which we assume is the only sharded collection in the + // config database. + DBDirectClient client(opCtx); + BSONObj result; + if (!client.dropCollection(NamespaceString::kLogicalSessionsNamespace, + ShardingCatalogClient::kLocalWriteConcern, + &result)) { + uassertStatusOK(getStatusFromCommandResult(result)); } } @@ -1038,7 +1095,7 @@ RemoveShardProgress ShardingCatalogManager::removeShard(OperationContext* opCtx, Grid::get(opCtx)->shardRegistry()->reload(opCtx); if (shardId != ShardId::kConfigServerId) { - // Don't remove the catalog shard's RSM because it is used to target the config server. + // Don't remove the config shard's RSM because it is used to target the config server. ReplicaSetMonitor::remove(name); } @@ -1213,6 +1270,109 @@ void ShardingCatalogManager::_setUserWriteBlockingStateOnNewShard(OperationConte }); } +std::unique_ptr ShardingCatalogManager::_createFetcher( + OperationContext* opCtx, + std::shared_ptr targeter, + const NamespaceString& nss, + const repl::ReadConcernLevel& readConcernLevel, + FetcherDocsCallbackFn processDocsCallback, + FetcherStatusCallbackFn processStatusCallback) { + auto host = uassertStatusOK( + targeter->findHost(opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly})); + + FindCommandRequest findCommand(nss); + const auto readConcern = + repl::ReadConcernArgs(boost::optional(readConcernLevel)); + findCommand.setReadConcern(readConcern.toBSONInner()); + const Milliseconds maxTimeMS = + std::min(opCtx->getRemainingMaxTimeMillis(), Milliseconds(kRemoteCommandTimeout)); + findCommand.setMaxTimeMS(durationCount(maxTimeMS)); + + auto fetcherCallback = [processDocsCallback, + processStatusCallback](const Fetcher::QueryResponseStatus& dataStatus, + Fetcher::NextAction* nextAction, + BSONObjBuilder* getMoreBob) { + // Throw out any accumulated results on error. + if (!dataStatus.isOK()) { + processStatusCallback(dataStatus.getStatus()); + return; + } + const auto& data = dataStatus.getValue(); + + try { + if (!processDocsCallback(data.documents)) { + *nextAction = Fetcher::NextAction::kNoAction; + } + } catch (DBException& ex) { + processStatusCallback(ex.toStatus()); + return; + } + processStatusCallback(Status::OK()); + + if (!getMoreBob) { + return; + } + getMoreBob->append("getMore", data.cursorId); + getMoreBob->append("collection", data.nss.coll()); + }; + + return std::make_unique(_executorForAddShard.get(), + host, + DatabaseNameUtil::serialize(nss.dbName()), + findCommand.toBSON({}), + fetcherCallback, + BSONObj(), /* metadata tracking, only used for shards */ + maxTimeMS, /* command network timeout */ + maxTimeMS /* getMore network timeout */); +} + +Status ShardingCatalogManager::_pullClusterTimeKeys( + OperationContext* opCtx, std::shared_ptr targeter) { + Status fetchStatus = + Status(ErrorCodes::InternalError, "Internal error running cursor callback in command"); + std::vector keyDocs; + + auto expireAt = opCtx->getServiceContext()->getFastClockSource()->now() + + Seconds(gNewShardExistingClusterTimeKeysExpirationSecs.load()); + auto fetcher = _createFetcher( + opCtx, + targeter, + NamespaceString::kKeysCollectionNamespace, + repl::ReadConcernLevel::kLocalReadConcern, + [&](const std::vector& docs) -> bool { + for (const BSONObj& doc : docs) { + keyDocs.push_back(keys_collection_util::makeExternalClusterTimeKeyDoc( + doc.getOwned(), boost::none /* migrationId */, expireAt)); + } + return true; + }, + [&](const Status& status) { fetchStatus = status; }); + + auto scheduleStatus = fetcher->schedule(); + if (!scheduleStatus.isOK()) { + return scheduleStatus; + } + + auto joinStatus = fetcher->join(opCtx); + if (!joinStatus.isOK()) { + return joinStatus; + } + + if (keyDocs.empty()) { + return fetchStatus; + } + + auto opTime = keys_collection_util::storeExternalClusterTimeKeyDocs(opCtx, std::move(keyDocs)); + auto waitStatus = WaitForMajorityService::get(opCtx->getServiceContext()) + .waitUntilMajority(opTime, opCtx->getCancellationToken()) + .getNoThrow(); + if (!waitStatus.isOK()) { + return waitStatus; + } + + return fetchStatus; +} + void ShardingCatalogManager::_setClusterParametersLocally(OperationContext* opCtx, const boost::optional& tenantId, const std::vector& parameters) { @@ -1222,7 +1382,8 @@ void ShardingCatalogManager::_setClusterParametersLocally(OperationContext* opCt SetClusterParameter setClusterParameterRequest( BSON(parameter["_id"].String() << parameter.filterFieldsUndotted( BSON("_id" << 1 << "clusterParameterTime" << 1), false))); - setClusterParameterRequest.setDbName(DatabaseName(tenantId, DatabaseName::kAdmin.db())); + setClusterParameterRequest.setDbName( + DatabaseNameUtil::deserialize(tenantId, DatabaseName::kAdmin.db())); std::unique_ptr parameterService = std::make_unique(); SetClusterParameterInvocation invocation{std::move(parameterService), dbService}; @@ -1241,12 +1402,8 @@ void ShardingCatalogManager::_pullClusterParametersFromNewShard(OperationContext // We can safely query the cluster parameters because the replica set must have been started // with --shardsvr in order to add it into the cluster, and in this mode no setClusterParameter // can be called on the replica set directly. - auto host = uassertStatusOK( - targeter->findHost(opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly})); auto tenantIds = uassertStatusOK(getTenantsWithConfigDbsOnShard(opCtx, shard, _executorForAddShard.get())); - const Milliseconds maxTimeMS = - std::min(opCtx->getRemainingMaxTimeMillis(), Milliseconds(Seconds{30})); std::vector> fetchers; fetchers.reserve(tenantIds.size()); @@ -1256,58 +1413,25 @@ void ShardingCatalogManager::_pullClusterParametersFromNewShard(OperationContext tenantIds.size(), Status(ErrorCodes::InternalError, "Internal error running cursor callback in command")); std::vector> allParameters(tenantIds.size()); + int i = 0; for (const auto& tenantId : tenantIds) { - BSONObjBuilder findCmdBuilder; - { - FindCommandRequest findCommand(NamespaceString::makeClusterParametersNSS(tenantId)); - auto readConcern = repl::ReadConcernArgs(boost::optional( - repl::ReadConcernLevel::kMajorityReadConcern)); - findCommand.setReadConcern(readConcern.toBSONInner()); - findCommand.setMaxTimeMS(durationCount(maxTimeMS)); - findCommand.serialize(BSONObj(), &findCmdBuilder); - } - - auto fetcherCallback = - [this, &statuses, &allParameters, i](const Fetcher::QueryResponseStatus& dataStatus, - Fetcher::NextAction* nextAction, - BSONObjBuilder* getMoreBob) { - // Throw out any accumulated results on error - if (!dataStatus.isOK()) { - statuses[i] = dataStatus.getStatus(); - return; - } - const auto& data = dataStatus.getValue(); - + auto fetcher = _createFetcher( + opCtx, + targeter, + NamespaceString::makeClusterParametersNSS(tenantId), + repl::ReadConcernLevel::kMajorityReadConcern, + [&allParameters, i](const std::vector& docs) -> bool { std::vector parameters; - for (const BSONObj& doc : data.documents) { + for (const BSONObj& doc : docs) { parameters.push_back(doc.getOwned()); } - allParameters[i] = parameters; - statuses[i] = Status::OK(); - - if (!getMoreBob) { - return; - } - getMoreBob->append("getMore", data.cursorId); - getMoreBob->append("collection", data.nss.coll()); - }; - - auto fetcher = std::make_unique( - _executorForAddShard.get(), - host, - NamespaceString::makeClusterParametersNSS(tenantId).dbName().toStringWithTenantId(), - findCmdBuilder.obj(), - fetcherCallback, - BSONObj(), /* metadata tracking, only used for shards */ - maxTimeMS, /* command network timeout */ - maxTimeMS /* getMore network timeout */); - + return true; + }, + [&statuses, i](const Status& status) { statuses[i] = status; }); uassertStatusOK(fetcher->schedule()); - fetchers.push_back(std::move(fetcher)); - i++; } @@ -1315,7 +1439,6 @@ void ShardingCatalogManager::_pullClusterParametersFromNewShard(OperationContext for (const auto& tenantId : tenantIds) { uassertStatusOK(fetchers[i]->join(opCtx)); uassertStatusOK(statuses[i]); - _setClusterParametersLocally(opCtx, tenantId, allParameters[i]); i++; @@ -1330,16 +1453,16 @@ void ShardingCatalogManager::_removeAllClusterParametersFromShard(OperationConte // Remove possible leftovers config.clusterParameters documents from the new shard. for (const auto& tenantId : tenantsOnTarget) { - write_ops::DeleteCommandRequest deleteOp( - NamespaceString::makeClusterParametersNSS(tenantId)); + const auto& nss = NamespaceString::makeClusterParametersNSS(tenantId); + write_ops::DeleteCommandRequest deleteOp(nss); write_ops::DeleteOpEntry query({}, true /*multi*/); deleteOp.setDeletes({query}); - const auto swCommandResponse = _runCommandForAddShard( - opCtx, - targeter.get(), - NamespaceString::makeClusterParametersNSS(tenantId).dbName().toStringWithTenantId(), - CommandHelpers::appendMajorityWriteConcern(deleteOp.toBSON({}))); + const auto swCommandResponse = + _runCommandForAddShard(opCtx, + targeter.get(), + DatabaseNameUtil::serialize(nss.dbName()), + CommandHelpers::appendMajorityWriteConcern(deleteOp.toBSON({}))); uassertStatusOK(swCommandResponse.getStatus()); uassertStatusOK(getStatusFromWriteCommandReply(swCommandResponse.getValue().response)); } @@ -1356,19 +1479,20 @@ void ShardingCatalogManager::_pushClusterParametersToNewShard( LOGV2(6360600, "Pushing cluster parameters into new shard"); for (const auto& [tenantId, clusterParameters] : allClusterParameters) { + const auto& dbName = DatabaseNameUtil::deserialize(tenantId, DatabaseName::kAdmin.db()); // Push cluster parameters into the newly added shard. for (auto& parameter : clusterParameters) { ShardsvrSetClusterParameter setClusterParamsCmd( BSON(parameter["_id"].String() << parameter.filterFieldsUndotted( BSON("_id" << 1 << "clusterParameterTime" << 1), false))); - setClusterParamsCmd.setDbName(DatabaseName(tenantId, DatabaseName::kAdmin.db())); + setClusterParamsCmd.setDbName(dbName); setClusterParamsCmd.setClusterParameterTime( parameter["clusterParameterTime"].timestamp()); const auto cmdResponse = _runCommandForAddShard( opCtx, targeter.get(), - DatabaseName(tenantId, DatabaseName::kAdmin.db()).toStringWithTenantId(), + DatabaseNameUtil::serialize(dbName), CommandHelpers::appendMajorityWriteConcern(setClusterParamsCmd.toBSON({}))); uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(cmdResponse)); } @@ -1433,8 +1557,8 @@ void ShardingCatalogManager::_addShardInTransaction( databasesInNewShard.end(), std::back_inserter(importedDbNames), [](const std::string& s) { return DatabaseNameUtil::deserialize(boost::none, s); }); - DatabasesAdded notification(std::move(importedDbNames), true /*addImported*/); - notification.setPhase(CommitPhaseEnum::kPrepare); + DatabasesAdded notification( + std::move(importedDbNames), true /*addImported*/, CommitPhaseEnum::kPrepare); notification.setPrimaryShard(ShardId(newShard.getName())); uassertStatusOK(_notifyClusterOnNewDatabases(opCtx, notification, existingShardIds)); @@ -1505,13 +1629,10 @@ void ShardingCatalogManager::_addShardInTransaction( auto& executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); - txn_api::SyncTransactionWithRetries txn(opCtx, sleepInlineExecutor, nullptr, inlineExecutor); + txn_api::SyncTransactionWithRetries txn(opCtx, executor, nullptr, inlineExecutor); txn.run(opCtx, transactionChain); - hangBeforeNotifyingaddShardCommitted.pauseWhileSet(); - // 3. Reuse the existing notification object to also broadcast the event of successful commit. notification.setPhase(CommitPhaseEnum::kSuccessful); notification.setPrimaryShard(boost::none); @@ -1568,11 +1689,10 @@ void ShardingCatalogManager::_removeShardInTransaction(OperationContext* opCtx, .semi(); }; + auto& executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor( - Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()); - txn_api::SyncTransactionWithRetries txn(opCtx, sleepInlineExecutor, nullptr, inlineExecutor); + txn_api::SyncTransactionWithRetries txn(opCtx, executor, nullptr, inlineExecutor); txn.run(opCtx, removeShardFn); } diff --git a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp index 02ad97f27ca6f..06bb97832d166 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_split_chunk_test.cpp @@ -27,15 +27,42 @@ * it in the license file. */ -#include "mongo/client/read_preference.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp index 4fd943c3bfbf0..b12ea029ee3ef 100644 --- a/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp +++ b/src/mongo/db/s/config/sharding_catalog_manager_zone_operations.cpp @@ -28,25 +28,46 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/s/config/sharding_catalog_manager.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/client/read_preference.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/balancer/balancer_policy.h" +#include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/db/write_concern_options.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" #include "mongo/s/client/shard.h" -#include "mongo/s/client/shard_registry.h" -#include "mongo/s/grid.h" #include "mongo/s/shard_key_pattern.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -70,13 +91,14 @@ Status checkForOverlappingZonedKeyRange(OperationContext* opCtx, const KeyPattern& shardKeyPattern) { DistributionStatus chunkDist(nss, ShardToChunksMap{}); - auto tagStatus = configServer->exhaustiveFindOnConfig(opCtx, - kConfigPrimarySelector, - repl::ReadConcernLevel::kLocalReadConcern, - TagsType::ConfigNS, - BSON(TagsType::ns(nss.ns())), - BSONObj(), - 0); + auto tagStatus = configServer->exhaustiveFindOnConfig( + opCtx, + kConfigPrimarySelector, + repl::ReadConcernLevel::kLocalReadConcern, + TagsType::ConfigNS, + BSON(TagsType::ns(NamespaceStringUtil::serialize(nss))), + BSONObj(), + 0); if (!tagStatus.isOK()) { return tagStatus.getStatus(); } @@ -126,12 +148,13 @@ ChunkRange includeFullShardKey(OperationContext* opCtx, kConfigPrimarySelector, repl::ReadConcernLevel::kLocalReadConcern, CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << nss.ns()), + BSON(CollectionType::kNssFieldName + << NamespaceStringUtil::serialize(nss)), BSONObj(), 1)) .docs; uassert(ErrorCodes::NamespaceNotSharded, - str::stream() << nss.ns() << " is not sharded", + str::stream() << nss.toStringForErrorMsg() << " is not sharded", !findCollResult.empty()); CollectionType coll(findCollResult.front()); @@ -141,11 +164,11 @@ ChunkRange includeFullShardKey(OperationContext* opCtx, uassert(ErrorCodes::ShardKeyNotFound, str::stream() << "min: " << range.getMin() << " is not a prefix of the shard key " - << shardKeyBSON << " of ns: " << nss.ns(), + << shardKeyBSON << " of ns: " << nss.toStringForErrorMsg(), range.getMin().isFieldNamePrefixOf(shardKeyBSON)); uassert(ErrorCodes::ShardKeyNotFound, str::stream() << "max: " << range.getMax() << " is not a prefix of the shard key " - << shardKeyBSON << " of ns: " << nss.ns(), + << shardKeyBSON << " of ns: " << nss.toStringForErrorMsg(), range.getMax().isFieldNamePrefixOf(shardKeyBSON)); return ChunkRange(shardKeyPattern.extendRangeBound(range.getMin(), false), @@ -354,7 +377,7 @@ void ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx, } BSONObjBuilder updateBuilder; - updateBuilder.append(TagsType::ns(), nss.ns()); + updateBuilder.append(TagsType::ns(), NamespaceStringUtil::serialize(nss)); updateBuilder.append(TagsType::min(), actualRange.getMin()); updateBuilder.append(TagsType::max(), actualRange.getMax()); updateBuilder.append(TagsType::tag(), zoneName); @@ -362,7 +385,8 @@ void ShardingCatalogManager::assignKeyRangeToZone(OperationContext* opCtx, uassertStatusOK(_localCatalogClient->updateConfigDocument( opCtx, TagsType::ConfigNS, - BSON(TagsType::ns(nss.ns()) << TagsType::min(actualRange.getMin())), + BSON(TagsType::ns(NamespaceStringUtil::serialize(nss)) + << TagsType::min(actualRange.getMin())), updateBuilder.obj(), true, kNoWaitWriteConcern)); @@ -384,7 +408,7 @@ void ShardingCatalogManager::removeKeyRangeFromZone(OperationContext* opCtx, } BSONObjBuilder removeBuilder; - removeBuilder.append(TagsType::ns(), nss.ns()); + removeBuilder.append(TagsType::ns(), NamespaceStringUtil::serialize(nss)); removeBuilder.append(TagsType::min(), actualRange.getMin()); removeBuilder.append(TagsType::max(), actualRange.getMax()); diff --git a/src/mongo/db/s/config_server_op_observer.cpp b/src/mongo/db/s/config_server_op_observer.cpp index 267b9a23663a1..5fd235c5e3cd1 100644 --- a/src/mongo/db/s/config_server_op_observer.cpp +++ b/src/mongo/db/s/config_server_op_observer.cpp @@ -28,19 +28,31 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/config_server_op_observer.h" - +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/s/config_server_op_observer.h" #include "mongo/db/s/topology_time_ticker.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/update/update_oplog_entry_serialization.h" #include "mongo/db/vector_clock_mutable.h" -#include "mongo/logv2/log.h" #include "mongo/s/catalog/type_config_version.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog_cache_loader.h" #include "mongo/s/cluster_identity_loader.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -54,7 +66,8 @@ ConfigServerOpObserver::~ConfigServerOpObserver() = default; void ConfigServerOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (coll->ns() == VersionType::ConfigNS) { if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) { uasserted(40302, "cannot delete config.version document while in --configsvr mode"); @@ -72,7 +85,8 @@ repl::OpTime ConfigServerOpObserver::onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) { + const CollectionDropType dropType, + bool markFromMigrate) { if (collectionName == VersionType::ConfigNS) { if (!repl::ReplicationCoordinator::get(opCtx)->getMemberState().rollback()) { uasserted(40303, "cannot drop config.version document while in --configsvr mode"); @@ -88,8 +102,8 @@ repl::OpTime ConfigServerOpObserver::onDropCollection(OperationContext* opCtx, return {}; } -void ConfigServerOpObserver::_onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) { +void ConfigServerOpObserver::onReplicationRollback(OperationContext* opCtx, + const RollbackObserverInfo& rbInfo) { if (rbInfo.configServerConfigVersionRolledBack) { // Throw out any cached information related to the cluster ID. ShardingCatalogManager::get(opCtx)->discardCachedConfigDatabaseInitializationState(); @@ -110,7 +124,8 @@ void ConfigServerOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { if (coll->ns().isServerConfigurationCollection()) { auto idElement = begin->doc["_id"]; if (idElement.type() == BSONType::String && @@ -155,7 +170,9 @@ void ConfigServerOpObserver::onInserts(OperationContext* opCtx, } } -void ConfigServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) { +void ConfigServerOpObserver::onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (args.coll->ns().isServerConfigurationCollection()) { auto idElement = args.updateArgs->updatedDoc["_id"]; if (idElement.type() == BSONType::String && diff --git a/src/mongo/db/s/config_server_op_observer.h b/src/mongo/db/s/config_server_op_observer.h index f551d57bc55b8..9ccd06d3b4e7a 100644 --- a/src/mongo/db/s/config_server_op_observer.h +++ b/src/mongo/db/s/config_server_op_observer.h @@ -29,8 +29,21 @@ #pragma once +#include +#include + +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/platform/mutex.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -38,7 +51,7 @@ namespace mongo { * OpObserver which is installed on the op observers chain when the server is running as a config * server (--configsvr). */ -class ConfigServerOpObserver final : public OpObserver { +class ConfigServerOpObserver final : public OpObserverNoop { ConfigServerOpObserver(const ConfigServerOpObserver&) = delete; ConfigServerOpObserver& operator=(const ConfigServerOpObserver&) = delete; @@ -46,210 +59,41 @@ class ConfigServerOpObserver final : public OpObserver { ConfigServerOpObserver(); ~ConfigServerOpObserver(); - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) override {} - - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - void onCreateIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc, - bool fromMigrate) override {} - - void onStartIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) override {} - - void onStartIndexBuildSinglePhase(OperationContext* opCtx, - const NamespaceString& nss) override {} - - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, - const NamespaceString& nss) override {} - - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) override {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) override {} + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kConfigAndSystem, NamespaceFilter::kConfigAndSystem}; + } void onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) override; - - void onInsertGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final{}; - - void onDeleteGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final {} + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) override; - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) override; - - void aboutToDelete(OperationContext* opCtx, - const CollectionPtr& coll, - const BSONObj& doc) override {} + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) override; - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final{}; - - void onCreateCollection(OperationContext* opCtx, - const CollectionPtr& coll, - const NamespaceString& collectionName, - const CollectionOptions& options, - const BSONObj& idIndex, - const OplogSlot& createOpTime, - bool fromMigrate) override {} - - void onCollMod(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& collModCmd, - const CollectionOptions& oldCollOptions, - boost::optional indexInfo) override {} + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override; - void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) override {} - - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) override; - - void onDropIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const std::string& indexName, - const BSONObj& indexInfo) override {} - - using OpObserver::onRenameCollection; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) override {} - void onImportCollection(OperationContext* opCtx, - const UUID& importUUID, - const NamespaceString& nss, - long long numRecords, - long long dataSize, - const BSONObj& catalogEntry, - const BSONObj& storageMetadata, - bool isDryRun) override {} - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) override { - return repl::OpTime(); - } - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) override {} - - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) override {} - - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) override {} - - void onTransactionStart(OperationContext* opCtx) override {} - - void onUnpreparedTransactionCommit( - OperationContext* opCtx, const TransactionOperations& transactionOperations) override {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept override {} - - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) override { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) override {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) override {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) override {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} + CollectionDropType dropType, + bool markFromMigrate) override; - void onBatchedWriteAbort(OperationContext* opCtx) final {} + void onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final; void onMajorityCommitPointUpdate(ServiceContext* service, const repl::OpTime& newCommitPoint) override; private: - void _onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo); void _tickTopologyTimeIfNecessary(ServiceContext* service, Timestamp newCommitPointTime); }; diff --git a/src/mongo/db/s/config_server_op_observer_test.cpp b/src/mongo/db/s/config_server_op_observer_test.cpp index e2ed5c1ff6dfa..ee558e0a23ce3 100644 --- a/src/mongo/db/s/config_server_op_observer_test.cpp +++ b/src/mongo/db/s/config_server_op_observer_test.cpp @@ -27,14 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/config_server_op_observer.h" -#include "mongo/db/vector_clock_mutable.h" +#include "mongo/db/vector_clock.h" #include "mongo/s/cluster_identity_loader.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/configure_query_analyzer_cmd.cpp b/src/mongo/db/s/configure_query_analyzer_cmd.cpp index b212ab96f4957..245444d1f0a1a 100644 --- a/src/mongo/db/s/configure_query_analyzer_cmd.cpp +++ b/src/mongo/db/s/configure_query_analyzer_cmd.cpp @@ -27,22 +27,68 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/list_collections_gen.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/multitenancy_gen.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/s/ddl_lock_manager.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/analyze_shard_key_documents_gen.h" -#include "mongo/s/analyze_shard_key_feature_flag_gen.h" #include "mongo/s/analyze_shard_key_util.h" -#include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/configure_query_analyzer_cmd_gen.h" #include "mongo/s/grid.h" -#include "mongo/s/stale_shard_version_helpers.h" +#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/testing_proctor.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -52,81 +98,58 @@ namespace analyze_shard_key { namespace { -constexpr int kMaxSampleRate = 1'000'000; +constexpr int kMaxSamplesPerSecond = 50; -/* - * The helper for 'validateCollectionOptions'. Performs the same validation as - * 'validateCollectionOptionsLocally' but does that based on the listCollections response from the - * primary shard for the database. +/** + * RAII type for the DDL lock. On a sharded cluster, the lock is the DDLLockManager collection lock. + * On a replica set, the lock is the collection IX lock. */ -StatusWith validateCollectionOptionsOnPrimaryShard(OperationContext* opCtx, - const NamespaceString& nss) { - ListCollections listCollections; - listCollections.setDbName(nss.db()); - listCollections.setFilter(BSON("name" << nss.coll())); - auto listCollectionsCmdObj = - CommandHelpers::filterCommandRequestForPassthrough(listCollections.toBSON({})); - - auto catalogCache = Grid::get(opCtx)->catalogCache(); - return shardVersionRetry( - opCtx, - catalogCache, - nss, - "validateCollectionOptionsOnPrimaryShard"_sd, - [&]() -> StatusWith { - auto dbInfo = uassertStatusOK(catalogCache->getDatabaseWithRefresh(opCtx, nss.db())); - auto cmdResponse = executeCommandAgainstDatabasePrimary( - opCtx, - nss.db(), - dbInfo, - listCollectionsCmdObj, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - Shard::RetryPolicy::kIdempotent); - auto remoteResponse = uassertStatusOK(cmdResponse.swResponse); - uassertStatusOK(getStatusFromCommandResult(remoteResponse.data)); - - auto cursorResponse = - uassertStatusOK(CursorResponse::parseFromBSON(remoteResponse.data)); - auto firstBatch = cursorResponse.getBatch(); - - if (firstBatch.empty()) { - return Status{ErrorCodes::NamespaceNotFound, - str::stream() << "The namespace does not exist"}; - } - uassert(6915300, - str::stream() << "The namespace corresponds to multiple collections", - firstBatch.size() == 1); +class ScopedDDLLock { + ScopedDDLLock(const ScopedDDLLock&) = delete; + ScopedDDLLock& operator=(const ScopedDDLLock&) = delete; - auto listCollRepItem = ListCollectionsReplyItem::parse( - IDLParserContext("ListCollectionsReplyItem"), firstBatch[0]); - - if (listCollRepItem.getType() == "view") { - return Status{ErrorCodes::CommandNotSupportedOnView, - "The namespace corresponds to a view"}; - } - if (auto obj = listCollRepItem.getOptions()) { - auto options = uassertStatusOK(CollectionOptions::parse(*obj)); - if (options.encryptedFieldConfig.has_value()) { - return Status{ErrorCodes::IllegalOperation, - str::stream() - << "The collection has queryable encryption enabled"}; - } +public: + static constexpr StringData lockReason{"configureQueryAnalyzer"_sd}; + + ScopedDDLLock(OperationContext* opCtx, const NamespaceString& nss) { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { + + // TODO SERVER-77546 remove db ddl lock acquisition on feature flag removal since it + // will be implicitly taken in IX mode under the collection ddl lock acquisition + boost::optional dbDDLLock; + if (!feature_flags::gMultipleGranularityDDLLocking.isEnabled( + serverGlobalParams.featureCompatibility)) { + dbDDLLock.emplace( + opCtx, nss.dbName(), lockReason, MODE_X, DDLLockManager::kDefaultLockTimeout); } - auto info = listCollRepItem.getInfo(); - uassert(6915301, - str::stream() << "The listCollections reply for '" << nss - << "' does not have the 'info' field", - info); - return *info->getUuid(); - }); -} - -StatusWith validateCollectionOptions(OperationContext* opCtx, const NamespaceString& nss) { - if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { - return validateCollectionOptionsLocally(opCtx, nss); + // Acquire the DDL lock to serialize with other DDL operations. It also makes sure that + // we are targeting the primary shard for this database. + _collDDLLock.emplace( + opCtx, nss, lockReason, MODE_X, DDLLockManager::kDefaultLockTimeout); + } else { + _autoColl.emplace(opCtx, + nss, + MODE_IX, + AutoGetCollection::Options{}.viewMode( + auto_get_collection::ViewMode::kViewsPermitted)); + } } - return validateCollectionOptionsOnPrimaryShard(opCtx, nss); + +private: + boost::optional _collDDLLock; + boost::optional _autoColl; +}; + +/** + * Waits for the system last opTime to be majority committed. + */ +void waitUntilMajorityLastOpTime(OperationContext* opCtx) { + repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); + WaitForMajorityService::get(opCtx->getServiceContext()) + .waitUntilMajority(repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(), + CancellationToken::uncancelable()) + .get(); } class ConfigureQueryAnalyzerCmd : public TypedCommand { @@ -140,37 +163,48 @@ class ConfigureQueryAnalyzerCmd : public TypedCommand Response typedRun(OperationContext* opCtx) { uassert(ErrorCodes::IllegalOperation, - "configQueryAnalyzer command is not supported on a standalone mongod", + "configureQueryAnalyzer command is not supported on a standalone mongod", repl::ReplicationCoordinator::get(opCtx)->isReplEnabled()); uassert(ErrorCodes::IllegalOperation, - "configQueryAnalyzer command is not supported on a multitenant replica set", + "configureQueryAnalyzer command is not supported on a multitenant replica set", !gMultitenancySupport); uassert(ErrorCodes::IllegalOperation, - "configQueryAnalyzer command is not supported on a shardsvr mongod", - !serverGlobalParams.clusterRole.exclusivelyHasShardRole()); + "Cannot run configureQueryAnalyzer command directly against a shardsvr mongod", + serverGlobalParams.clusterRole.has(ClusterRole::None) || + isInternalClient(opCtx) || TestingProctor::instance().isEnabled()); const auto& nss = ns(); const auto mode = request().getMode(); - const auto sampleRate = request().getSampleRate(); + const auto samplesPerSec = request().getSamplesPerSecond(); const auto newConfig = request().getConfiguration(); uassertStatusOK(validateNamespace(nss)); if (mode == QueryAnalyzerModeEnum::kOff) { uassert(ErrorCodes::InvalidOptions, - "Cannot specify 'sampleRate' when 'mode' is \"off\"", - !sampleRate); + "Cannot specify 'samplesPerSecond' when 'mode' is \"off\"", + !samplesPerSec); } else { uassert(ErrorCodes::InvalidOptions, str::stream() - << "'sampleRate' must be specified when 'mode' is not \"off\"", - sampleRate); + << "'samplesPerSecond' must be specified when 'mode' is not \"off\"", + samplesPerSec); uassert(ErrorCodes::InvalidOptions, - str::stream() << "'sampleRate' must be greater than 0", - *sampleRate > 0); + str::stream() << "'samplesPerSecond' must be greater than 0", + *samplesPerSec > 0); uassert(ErrorCodes::InvalidOptions, - str::stream() << "'sampleRate' must be less than " << kMaxSampleRate, - *sampleRate < kMaxSampleRate); + str::stream() << "'samplesPerSecond' must be less than or equal to " + << kMaxSamplesPerSecond, + (*samplesPerSec <= kMaxSamplesPerSecond) || + TestingProctor::instance().isEnabled()); } + + // Take the DDL lock to serialize this command with DDL commands. + boost::optional ddlLock; + ddlLock.emplace(opCtx, nss); + + // Wait for the metadata for this collection in the CollectionCatalog to be majority + // committed before validating its options and persisting the configuration. + waitUntilMajorityLastOpTime(opCtx); auto collUuid = uassertStatusOK(validateCollectionOptions(opCtx, nss)); LOGV2(6915001, @@ -178,7 +212,7 @@ class ConfigureQueryAnalyzerCmd : public TypedCommand logAttrs(nss), "collectionUUID"_attr = collUuid, "mode"_attr = mode, - "sampleRate"_attr = sampleRate); + "samplesPerSecond"_attr = samplesPerSec); write_ops::FindAndModifyCommandRequest request( NamespaceString::kConfigQueryAnalyzersNamespace); @@ -191,8 +225,8 @@ class ConfigureQueryAnalyzerCmd : public TypedCommand // If the mode is 'off', do not perform the update since that would overwrite the // existing stop time. request.setQuery(BSON( - doc::kCollectionUuidFieldName - << collUuid << doc::kModeFieldName + doc::kNsFieldName + << NamespaceStringUtil::serialize(nss) << doc::kModeFieldName << BSON("$ne" << QueryAnalyzerMode_serializer(QueryAnalyzerModeEnum::kOff)))); std::vector updates; @@ -203,41 +237,74 @@ class ConfigureQueryAnalyzerCmd : public TypedCommand request.setUpdate(write_ops::UpdateModification(updates)); } else { request.setUpsert(true); - request.setQuery(BSON(doc::kCollectionUuidFieldName << collUuid)); + request.setQuery(BSON(doc::kNsFieldName << NamespaceStringUtil::serialize(nss))); std::vector updates; BSONObjBuilder setBuilder; setBuilder.appendElements(BSON(doc::kCollectionUuidFieldName - << collUuid << doc::kNsFieldName << nss.toString())); + << collUuid << doc::kNsFieldName + << NamespaceStringUtil::serialize(nss))); setBuilder.appendElements(newConfig.toBSON()); - // If the mode remains the same, keep the original start time. Otherwise, set a new - // start time. + // If the mode or collection UUID is different, set a new start time. Otherwise, + // keep the original start time. setBuilder.append( doc::kStartTimeFieldName, - BSON("$cond" << BSON("if" << BSON("$ne" << BSON_ARRAY( - ("$" + doc::kModeFieldName) - << QueryAnalyzerMode_serializer(mode))) - << "then" << currentTime << "else" - << ("$" + doc::kStartTimeFieldName)))); + BSON("$cond" << BSON( + "if" << BSON("$or" << BSON_ARRAY( + BSON("$ne" << BSON_ARRAY( + ("$" + doc::kModeFieldName) + << QueryAnalyzerMode_serializer(mode))) + << BSON("$ne" << BSON_ARRAY( + ("$" + doc::kCollectionUuidFieldName) + << collUuid)))) + << "then" << currentTime << "else" + << ("$" + doc::kStartTimeFieldName)))); updates.push_back(BSON("$set" << setBuilder.obj())); updates.push_back(BSON("$unset" << doc::kStopTimeFieldName)); request.setUpdate(write_ops::UpdateModification(updates)); } - request.setWriteConcern(WriteConcerns::kMajorityWriteConcernNoTimeout.toBSON()); - DBDirectClient client(opCtx); - auto writeResult = client.findAndModify(request); + auto writeResult = [&] { + if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { + request.setWriteConcern(WriteConcerns::kMajorityWriteConcernNoTimeout.toBSON()); + + const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); + auto swResponse = configShard->runCommandWithFixedRetryAttempts( + opCtx, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + DatabaseName::kConfig.toString(), + request.toBSON({}), + Shard::RetryPolicy::kIdempotent); + uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(swResponse)); + return write_ops::FindAndModifyCommandReply::parse( + IDLParserContext("configureQueryAnalyzer"), swResponse.getValue().response); + } + + DBDirectClient client(opCtx); + // It is illegal to wait for replication while holding a lock so instead wait below + // after releasing the lock. + request.setWriteConcern(BSONObj()); + return client.findAndModify(request); + }(); + + ddlLock.reset(); + if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { + // Wait for the write above to be majority committed. + waitUntilMajorityLastOpTime(opCtx); + } Response response; response.setNewConfiguration(newConfig); - if (auto preImageDoc = writeResult.getValue()) { - auto oldConfig = QueryAnalyzerConfiguration::parse( - IDLParserContext("configureQueryAnalyzer"), *preImageDoc); - response.setOldConfiguration(oldConfig); - } else { - uassert(ErrorCodes::IllegalOperation, - "Attempted to disable query sampling but query sampling was not active", - mode != QueryAnalyzerModeEnum::kOff); + if (writeResult.getValue()) { + auto preImageDoc = + doc::parse(IDLParserContext("configureQueryAnalyzer"), *writeResult.getValue()); + if (preImageDoc.getCollectionUuid() == collUuid) { + response.setOldConfiguration(preImageDoc.getConfiguration()); + } + } else if (mode != QueryAnalyzerModeEnum::kOff) { + LOGV2_WARNING( + 7724700, + "Attempted to disable query sampling but query sampling was not active"); } return response; @@ -277,10 +344,7 @@ class ConfigureQueryAnalyzerCmd : public TypedCommand return "Starts or stops collecting metrics about read and write queries against a " "collection."; } -}; - -MONGO_REGISTER_FEATURE_FLAGGED_COMMAND(ConfigureQueryAnalyzerCmd, - analyze_shard_key::gFeatureFlagAnalyzeShardKey); +} configureQueryAnalyzerCmd; } // namespace diff --git a/src/mongo/db/s/cqf_utils.cpp b/src/mongo/db/s/cqf_utils.cpp new file mode 100644 index 0000000000000..c28b07eec4f6e --- /dev/null +++ b/src/mongo/db/s/cqf_utils.cpp @@ -0,0 +1,65 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/pipeline/abt/document_source_visitor.h" +#include "mongo/db/pipeline/visitors/document_source_visitor_registry_mongod.h" +#include "mongo/db/query/cqf_command_utils.h" +#include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + +namespace mongo::optimizer { + +template +void visit(ABTUnsupportedDocumentSourceVisitorContext* ctx, const T&) { + ctx->eligible = false; +} + +const ServiceContext::ConstructorActionRegisterer abtUnsupportedRegisterer{ + "ABTUnsupportedRegistererShardingRuntimeD", [](ServiceContext* service) { + registerShardingRuntimeDVisitor(service); + }}; + +template +void visit(ABTDocumentSourceTranslationVisitorContext*, const T& source) { + uasserted(ErrorCodes::InternalErrorNotSupported, + str::stream() << "Stage is not supported: " << source.getSourceName()); +} + +const ServiceContext::ConstructorActionRegisterer abtTranslationRegisterer{ + "ABTTranslationRegistererShardingRuntimeD", [](ServiceContext* service) { + registerShardingRuntimeDVisitor(service); + }}; + +} // namespace mongo::optimizer diff --git a/src/mongo/db/s/create_collection_coordinator.cpp b/src/mongo/db/s/create_collection_coordinator.cpp index f21222c1e2dc0..b98619b83586c 100644 --- a/src/mongo/db/s/create_collection_coordinator.cpp +++ b/src/mongo/db/s/create_collection_coordinator.cpp @@ -28,42 +28,112 @@ */ -#include "mongo/db/s/create_collection_coordinator_document_gen.h" -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/client/read_preference.h" #include "mongo/db/audit.h" -#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_uuid_mismatch.h" -#include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/cluster_transaction_api.h" +#include "mongo/db/client.h" +#include "mongo/db/commands.h" #include "mongo/db/commands/create_gen.h" -#include "mongo/db/commands/feature_compatibility_version.h" -#include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/persistent_task_store.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/repl/change_stream_oplog_notification.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/create_collection_coordinator.h" +#include "mongo/db/s/create_collection_coordinator_document_gen.h" +#include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/remove_chunks_gen.h" +#include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/shard_key_util.h" +#include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" #include "mongo/db/s/sharding_ddl_util.h" #include "mongo/db/s/sharding_logging.h" #include "mongo/db/s/sharding_recovery_service.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/db/timeseries/catalog_helper.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" +#include "mongo/db/timeseries/timeseries_options.h" #include "mongo/db/transaction/transaction_api.h" +#include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_namespace_placement_gen.h" +#include "mongo/s/catalog/type_tags.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/cluster_write.h" #include "mongo/s/grid.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_util.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/s/write_ops/batch_write_exec.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -86,7 +156,7 @@ OptionsAndIndexes getCollectionOptionsAndIndexes(OperationContext* opCtx, BSONObjBuilder optionsBob; auto all = - localClient.getCollectionInfos(*nssOrUUID.dbName(), BSON("info.uuid" << *nssOrUUID.uuid())); + localClient.getCollectionInfos(nssOrUUID.dbName(), BSON("info.uuid" << nssOrUUID.uuid())); // There must be a collection at this time. invariant(!all.empty()); @@ -320,10 +390,6 @@ void insertChunks(OperationContext* opCtx, { auto newClient = opCtx->getServiceContext()->makeClient("CreateCollectionCoordinator::insertChunks"); - { - stdx::lock_guard lk(*newClient.get()); - newClient->setSystemOperationKillableByStepdown(lk); - } AlternativeClientRegion acr(newClient); auto executor = @@ -338,7 +404,7 @@ void insertChunks(OperationContext* opCtx, BatchedCommandResponse response; BatchWriteExecStats stats; - cluster::write(newOpCtx.get(), insertRequest, &stats, &response); + cluster::write(newOpCtx.get(), insertRequest, nullptr /* nss */, &stats, &response); uassertStatusOK(response.toStatus()); } } @@ -386,8 +452,11 @@ void insertCollectionAndPlacementEntries(OperationContext* opCtx, WriteConcernOptions::SyncMode::UNSET, WriteConcernOptions::kNoTimeout}; + // This always runs in the shard role so should use a cluster transaction to guarantee targeting + // the config server. + bool useClusterTransaction = true; sharding_ddl_util::runTransactionOnShardingCatalog( - opCtx, std::move(insertionChain), wc, osi, executor); + opCtx, std::move(insertionChain), wc, osi, useClusterTransaction, executor); } void broadcastDropCollection(OperationContext* opCtx, @@ -412,10 +481,6 @@ void CreateCollectionCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuild } const NamespaceString& CreateCollectionCoordinator::nss() const { - if (_timeseriesNssResolvedByCommandHandler()) { - return originalNss(); - } - // Rely on the resolved request parameters to retrieve the nss to be targeted by the // coordinator. stdx::lock_guard lk{_docMutex}; @@ -454,8 +519,9 @@ ExecutorFuture CreateCollectionCoordinator::_runImpl( _result = createCollectionResponseOpt; // Launch an exception to directly jump to the end of the continuation chain uasserted(ErrorCodes::RequestAlreadyFulfilled, - str::stream() << "The collection" << originalNss() - << "was already sharded by a past request"); + str::stream() + << "The collection" << originalNss().toStringForErrorMsg() + << "was already sharded by a past request"); } } }) @@ -474,28 +540,14 @@ ExecutorFuture CreateCollectionCoordinator::_runImpl( // Additionally we want to perform a majority write on the CSRS to ensure that // all the subsequent reads will see all the writes performed from a previous // execution of this coordinator. - _updateSession(opCtx); _performNoopRetryableWriteOnAllShardsAndConfigsvr( - opCtx, getCurrentSession(), **executor); - - if (_timeseriesNssResolvedByCommandHandler() || - _doc.getTranslatedRequestParams()) { - - const auto shardKeyPattern = ShardKeyPattern( - _timeseriesNssResolvedByCommandHandler() - ? *_request.getShardKey() - : _doc.getTranslatedRequestParams()->getKeyPattern()); - const auto collation = _timeseriesNssResolvedByCommandHandler() - ? resolveCollationForUserQueries(opCtx, nss(), _request.getCollation()) - : _doc.getTranslatedRequestParams()->getCollation(); - - if (_timeseriesNssResolvedByCommandHandler()) { - // If the request is being re-attempted after a binary upgrade, the UUID - // could have not been previously checked. Do it now. - AutoGetCollection coll{opCtx, nss(), MODE_IS}; - checkCollectionUUIDMismatch( - opCtx, nss(), coll.getCollection(), _request.getCollectionUUID()); - } + opCtx, getNewSession(opCtx), **executor); + + if (_doc.getTranslatedRequestParams()) { + + const auto shardKeyPattern = + ShardKeyPattern(_doc.getTranslatedRequestParams()->getKeyPattern()); + const auto& collation = _doc.getTranslatedRequestParams()->getCollation(); // Check if the collection was already sharded by a past request if (auto createCollectionResponseOpt = @@ -534,12 +586,10 @@ ExecutorFuture CreateCollectionCoordinator::_runImpl( "Removing partial changes from previous run", logAttrs(nss())); - _updateSession(opCtx); cleanupPartialChunksFromPreviousAttempt( - opCtx, *uuid, getCurrentSession()); + opCtx, *uuid, getNewSession(opCtx)); - _updateSession(opCtx); - broadcastDropCollection(opCtx, nss(), **executor, getCurrentSession()); + broadcastDropCollection(opCtx, nss(), **executor, getNewSession(opCtx)); } } } @@ -548,35 +598,8 @@ ExecutorFuture CreateCollectionCoordinator::_runImpl( _acquireCriticalSections(opCtx); + // Translate request parameters and persist them in the coordiantor document _doc.setTranslatedRequestParams(_translateRequestParameters(opCtx)); - - // Check if the collection was already sharded by a past request - if (auto createCollectionResponseOpt = - sharding_ddl_util::checkIfCollectionAlreadySharded( - opCtx, - nss(), - _doc.getTranslatedRequestParams()->getKeyPattern().toBSON(), - _doc.getTranslatedRequestParams()->getCollation(), - _request.getUnique().value_or(false))) { - - // A previous request already created and committed the collection - // but there was a stepdown before completing the execution of the coordinator. - // Ensure that the change stream event gets emitted at least once. - notifyChangeStreamsOnShardCollection( - opCtx, - nss(), - *createCollectionResponseOpt->getCollectionUUID(), - _request.toBSON(), - CommitPhase::kSuccessful); - - // Return any previously acquired resource. - _releaseCriticalSections(opCtx); - - _result = createCollectionResponseOpt; - return; - } - - // Persist the coordinator document including the translated request params _updateStateDocument(opCtx, CreateCollectionCoordinatorDocument(_doc)); ShardKeyPattern shardKeyPattern(_doc.getTranslatedRequestParams()->getKeyPattern()); @@ -584,7 +607,7 @@ ExecutorFuture CreateCollectionCoordinator::_runImpl( _createCollectionAndIndexes(opCtx, shardKeyPattern); audit::logShardCollection(opCtx->getClient(), - nss().toString(), + NamespaceStringUtil::serialize(nss()), *_request.getShardKey(), _request.getUnique().value_or(false)); @@ -596,8 +619,7 @@ ExecutorFuture CreateCollectionCoordinator::_runImpl( // shard _promoteCriticalSectionsToBlockReads(opCtx); - _updateSession(opCtx); - _createCollectionOnNonPrimaryShards(opCtx, getCurrentSession()); + _createCollectionOnNonPrimaryShards(opCtx, getNewSession(opCtx)); _commit(opCtx, **executor); } @@ -626,11 +648,6 @@ ExecutorFuture CreateCollectionCoordinator::_runImpl( if (!status.isA() && !status.isA()) { - LOGV2_ERROR(5458702, - "Error running create collection", - logAttrs(originalNss()), - "error"_attr = redact(status)); - auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); @@ -649,8 +666,7 @@ CreateCollectionCoordinator::_checkIfCollectionAlreadyShardedWithSameOptions( // If the request is part of a C2C synchronisation, the check on the received UUID must be // performed first to honor the contract with mongosync (see SERVER-67885 for details). if (_request.getCollectionUUID()) { - if (AutoGetCollection stdColl{opCtx, originalNss(), MODE_IS}; - stdColl || _timeseriesNssResolvedByCommandHandler()) { + if (AutoGetCollection stdColl{opCtx, originalNss(), MODE_IS}; stdColl) { checkCollectionUUIDMismatch( opCtx, originalNss(), *stdColl, _request.getCollectionUUID()); } else { @@ -664,15 +680,6 @@ CreateCollectionCoordinator::_checkIfCollectionAlreadyShardedWithSameOptions( } } - if (_timeseriesNssResolvedByCommandHandler()) { - // It is OK to access information directly from the request object. - const auto shardKeyPattern = ShardKeyPattern(*_request.getShardKey()).toBSON(); - const auto collation = - resolveCollationForUserQueries(opCtx, nss(), _request.getCollation()); - return sharding_ddl_util::checkIfCollectionAlreadySharded( - opCtx, nss(), shardKeyPattern, collation, _request.getUnique().value_or(false)); - } - // Check is there is a standard sharded collection that matches the original request parameters auto cri = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithRefresh( @@ -708,7 +715,8 @@ CreateCollectionCoordinator::_checkIfCollectionAlreadyShardedWithSameOptions( }(); uassert(ErrorCodes::AlreadyInitialized, - str::stream() << "sharding already enabled for collection " << originalNss(), + str::stream() << "sharding already enabled for collection " + << originalNss().toStringForErrorMsg(), requestMatchesExistingCollection); CreateCollectionResponse response(cri.getCollectionVersion()); @@ -761,7 +769,8 @@ CreateCollectionCoordinator::_checkIfCollectionAlreadyShardedWithSameOptions( }(); uassert(ErrorCodes::AlreadyInitialized, - str::stream() << "sharding already enabled for collection " << bucketsNss, + str::stream() << "sharding already enabled for collection " + << bucketsNss.toStringForErrorMsg(), requestMatchesExistingCollection); CreateCollectionResponse response(cri.getCollectionVersion()); @@ -772,12 +781,9 @@ CreateCollectionCoordinator::_checkIfCollectionAlreadyShardedWithSameOptions( void CreateCollectionCoordinator::_checkCommandArguments(OperationContext* opCtx) { LOGV2_DEBUG(5277902, 2, "Create collection _checkCommandArguments", logAttrs(originalNss())); - if (originalNss().dbName() == DatabaseName::kConfig) { - // Only allowlisted collections in config may be sharded (unless we are in test mode) - uassert(ErrorCodes::IllegalOperation, - "only special collections in the config db may be sharded", - originalNss() == NamespaceString::kLogicalSessionsNamespace); - } + uassert(ErrorCodes::IllegalOperation, + "Special collection '" + originalNss().toStringForErrorMsg() + "' cannot be sharded", + !originalNss().isNamespaceAlwaysUnsharded()); // Ensure that hashed and unique are not both set. uassert(ErrorCodes::InvalidOptions, @@ -786,26 +792,6 @@ void CreateCollectionCoordinator::_checkCommandArguments(OperationContext* opCtx !ShardKeyPattern(*_request.getShardKey()).isHashedPattern() || !_request.getUnique().value_or(false)); - if (_timeseriesNssResolvedByCommandHandler()) { - // Ensure that a time-series collection cannot be sharded unless the feature flag is - // enabled. - if (originalNss().isTimeseriesBucketsCollection()) { - uassert(ErrorCodes::IllegalOperation, - str::stream() << "can't shard time-series collection " << nss(), - feature_flags::gFeatureFlagShardedTimeSeries.isEnabled( - serverGlobalParams.featureCompatibility) || - !timeseries::getTimeseriesOptions(opCtx, nss(), false)); - } - } - - // Ensure the namespace is valid. - uassert(ErrorCodes::IllegalOperation, - "can't shard system namespaces", - !originalNss().isSystem() || - originalNss() == NamespaceString::kLogicalSessionsNamespace || - originalNss().isTemporaryReshardingCollection() || - originalNss().isTimeseriesBucketsCollection()); - if (_request.getNumInitialChunks()) { // Ensure numInitialChunks is within valid bounds. // Cannot have more than kMaxSplitPoints initial chunks per shard. Setting a maximum of @@ -862,11 +848,12 @@ TranslatedRequestParams CreateCollectionCoordinator::_translateRequestParameters auto targetingStandardCollection = !_request.getTimeseries() && !existingBucketsColl; - if (_timeseriesNssResolvedByCommandHandler() || targetingStandardCollection) { + if (targetingStandardCollection) { const auto& resolvedNamespace = originalNss(); performCheckOnCollectionUUID(resolvedNamespace); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Namespace too long. Namespace: " << resolvedNamespace + str::stream() << "Namespace too long. Namespace: " + << resolvedNamespace.toStringForErrorMsg() << " Max: " << NamespaceString::MaxNsShardedCollectionLen, resolvedNamespace.size() <= NamespaceString::MaxNsShardedCollectionLen); return TranslatedRequestParams( @@ -879,13 +866,10 @@ TranslatedRequestParams CreateCollectionCoordinator::_translateRequestParameters // patched yet. const auto& resolvedNamespace = bucketsNs; performCheckOnCollectionUUID(resolvedNamespace); - uassert(ErrorCodes::IllegalOperation, - "Sharding a timeseries collection feature is not enabled", - feature_flags::gFeatureFlagShardedTimeSeries.isEnabled( - serverGlobalParams.featureCompatibility)); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Namespace too long. Namespace: " << resolvedNamespace + str::stream() << "Namespace too long. Namespace: " + << resolvedNamespace.toStringForErrorMsg() << " Max: " << NamespaceString::MaxNsShardedCollectionLen, resolvedNamespace.size() <= NamespaceString::MaxNsShardedCollectionLen); @@ -896,7 +880,7 @@ TranslatedRequestParams CreateCollectionCoordinator::_translateRequestParameters } uassert(6159000, - str::stream() << "the collection '" << bucketsNs + str::stream() << "the collection '" << bucketsNs.toStringForErrorMsg() << "' does not have 'timeseries' options", existingBucketsColl->getTimeseriesOptions()); return existingBucketsColl->getTimeseriesOptions(); @@ -905,7 +889,7 @@ TranslatedRequestParams CreateCollectionCoordinator::_translateRequestParameters if (_request.getTimeseries() && existingTimeseriesOptions) { uassert(5731500, str::stream() << "the 'timeseries' spec provided must match that of exists '" - << originalNss() << "' collection", + << originalNss().toStringForErrorMsg() << "' collection", timeseries::optionsAreEqual(*_request.getTimeseries(), *existingTimeseriesOptions)); } else if (!_request.getTimeseries()) { _request.setTimeseries(existingTimeseriesOptions); @@ -940,76 +924,46 @@ TranslatedRequestParams CreateCollectionCoordinator::_translateRequestParameters resolveCollationForUserQueries(opCtx, resolvedNamespace, _request.getCollation())); } -bool CreateCollectionCoordinator::_timeseriesNssResolvedByCommandHandler() const { - return operationType() == DDLCoordinatorTypeEnum::kCreateCollectionPre61Compatible; -} - void CreateCollectionCoordinator::_acquireCriticalSections(OperationContext* opCtx) { - // TODO SERVER-68084 call ShardingRecoveryService without the try/catch block - try { - ShardingRecoveryService::get(opCtx)->acquireRecoverableCriticalSectionBlockWrites( - opCtx, originalNss(), _critSecReason, ShardingCatalogClient::kMajorityWriteConcern); - } catch (const ExceptionFor&) { - if (_timeseriesNssResolvedByCommandHandler()) { - throw; - } + ShardingRecoveryService::get(opCtx)->acquireRecoverableCriticalSectionBlockWrites( + opCtx, originalNss(), _critSecReason, ShardingCatalogClient::kMajorityWriteConcern); - // In case we acquisition was rejected because it targets an existing view, the critical - // section is not needed and the error can be dropped because: - // 1. We will not shard the view namespace - // 2. This collection will remain a view since we are holding the DDL coll lock and - // thus the collection can't be dropped. - _doc.setDisregardCriticalSectionOnOriginalNss(true); - } - - if (!_timeseriesNssResolvedByCommandHandler()) { - // Preventively acquire the critical section protecting the buckets namespace that the - // creation of a timeseries collection would require. - const auto bucketsNamespace = originalNss().makeTimeseriesBucketsNamespace(); - ShardingRecoveryService::get(opCtx)->acquireRecoverableCriticalSectionBlockWrites( - opCtx, bucketsNamespace, _critSecReason, ShardingCatalogClient::kMajorityWriteConcern); - } + // Preventively acquire the critical section protecting the buckets namespace that the + // creation of a timeseries collection would require. + ShardingRecoveryService::get(opCtx)->acquireRecoverableCriticalSectionBlockWrites( + opCtx, + originalNss().makeTimeseriesBucketsNamespace(), + _critSecReason, + ShardingCatalogClient::kMajorityWriteConcern); } void CreateCollectionCoordinator::_promoteCriticalSectionsToBlockReads( OperationContext* opCtx) const { - // TODO SERVER-68084 call ShardingRecoveryService without the if blocks. - if (!_doc.getDisregardCriticalSectionOnOriginalNss()) { - ShardingRecoveryService::get(opCtx)->promoteRecoverableCriticalSectionToBlockAlsoReads( - opCtx, originalNss(), _critSecReason, ShardingCatalogClient::kMajorityWriteConcern); - } + ShardingRecoveryService::get(opCtx)->promoteRecoverableCriticalSectionToBlockAlsoReads( + opCtx, originalNss(), _critSecReason, ShardingCatalogClient::kMajorityWriteConcern); - if (!_timeseriesNssResolvedByCommandHandler()) { - const auto bucketsNamespace = originalNss().makeTimeseriesBucketsNamespace(); - ShardingRecoveryService::get(opCtx)->promoteRecoverableCriticalSectionToBlockAlsoReads( - opCtx, bucketsNamespace, _critSecReason, ShardingCatalogClient::kMajorityWriteConcern); - } + ShardingRecoveryService::get(opCtx)->promoteRecoverableCriticalSectionToBlockAlsoReads( + opCtx, + originalNss().makeTimeseriesBucketsNamespace(), + _critSecReason, + ShardingCatalogClient::kMajorityWriteConcern); } void CreateCollectionCoordinator::_releaseCriticalSections(OperationContext* opCtx, bool throwIfReasonDiffers) { - // TODO SERVER-68084 call ShardingRecoveryService without the try/catch block. - try { - ShardingRecoveryService::get(opCtx)->releaseRecoverableCriticalSection( - opCtx, - originalNss(), - _critSecReason, - ShardingCatalogClient::kMajorityWriteConcern, - throwIfReasonDiffers); - } catch (ExceptionFor&) { - // Ignore the error (when it is raised, we can assume that no critical section for the view - // was previously acquired). - } + ShardingRecoveryService::get(opCtx)->releaseRecoverableCriticalSection( + opCtx, + originalNss(), + _critSecReason, + ShardingCatalogClient::kMajorityWriteConcern, + throwIfReasonDiffers); - if (!_timeseriesNssResolvedByCommandHandler()) { - const auto bucketsNamespace = originalNss().makeTimeseriesBucketsNamespace(); - ShardingRecoveryService::get(opCtx)->releaseRecoverableCriticalSection( - opCtx, - bucketsNamespace, - _critSecReason, - ShardingCatalogClient::kMajorityWriteConcern, - throwIfReasonDiffers); - } + ShardingRecoveryService::get(opCtx)->releaseRecoverableCriticalSection( + opCtx, + originalNss().makeTimeseriesBucketsNamespace(), + _critSecReason, + ShardingCatalogClient::kMajorityWriteConcern, + throwIfReasonDiffers); } void CreateCollectionCoordinator::_createCollectionAndIndexes( @@ -1025,40 +979,6 @@ void CreateCollectionCoordinator::_createCollectionAndIndexes( // We need to implicitly create a timeseries view and underlying bucket collection. if (_collectionEmpty && _request.getTimeseries()) { - // TODO SERVER-68084 Remove viewLock and the whole if section that constructs it while - // releasing the critical section on the originalNss. - boost::optional viewLock; - if (auto criticalSectionAcquiredOnOriginalNss = - !_doc.getDisregardCriticalSectionOnOriginalNss(); - !_timeseriesNssResolvedByCommandHandler() && criticalSectionAcquiredOnOriginalNss) { - // This is the subcase of a not yet existing pair of view (originalNss)+ bucket (nss) - // timeseries collection that the DDL will have to create. Due to the current - // constraints of the code: - // - Such creation cannot be performed while holding the critical section over the views - // namespace (once the view gets created, the CS will not be releasable); instead, - // exclusive access must be enforced through a collection lock - // - The critical section cannot be released while holding a collection lock, so this - // operation must be performed first (leaving a small window open to data races) - ShardingRecoveryService::get(opCtx)->releaseRecoverableCriticalSection( - opCtx, originalNss(), _critSecReason, ShardingCatalogClient::kMajorityWriteConcern); - _doc.setDisregardCriticalSectionOnOriginalNss(true); - viewLock.emplace(opCtx, - originalNss(), - LockMode::MODE_X, - AutoGetCollection::Options{}.viewMode( - auto_get_collection::ViewMode::kViewsPermitted)); - // Once the exclusive access has been reacquired, ensure that no data race occurred. - auto catalog = CollectionCatalog::get(opCtx); - if (catalog->lookupView(opCtx, originalNss()) || - catalog->lookupCollectionByNamespace(opCtx, originalNss())) { - _completeOnError = true; - uasserted(ErrorCodes::NamespaceExists, - str::stream() << "A conflicting DDL operation was completed while trying " - "to shard collection: " - << originalNss()); - } - } - const auto viewName = nss().getTimeseriesViewNamespace(); auto createCmd = makeCreateCommand(viewName, collation, *_request.getTimeseries()); @@ -1185,17 +1105,17 @@ void CreateCollectionCoordinator::_createCollectionOnNonPrimaryShards( for (const auto& response : responses) { auto shardResponse = uassertStatusOKWithContext( std::move(response.swResponse), - str::stream() << "Unable to create collection " << nss().ns() << " on " - << response.shardId); + str::stream() << "Unable to create collection " << nss().toStringForErrorMsg() + << " on " << response.shardId); auto status = getStatusFromCommandResult(shardResponse.data); - uassertStatusOK(status.withContext(str::stream() - << "Unable to create collection " << nss().ns() - << " on " << response.shardId)); + uassertStatusOK(status.withContext(str::stream() << "Unable to create collection " + << nss().toStringForErrorMsg() + << " on " << response.shardId)); auto wcStatus = getWriteConcernStatusFromCommandResult(shardResponse.data); - uassertStatusOK(wcStatus.withContext(str::stream() - << "Unable to create collection " << nss().ns() - << " on " << response.shardId)); + uassertStatusOK(wcStatus.withContext(str::stream() << "Unable to create collection " + << nss().toStringForErrorMsg() + << " on " << response.shardId)); } } } @@ -1211,8 +1131,7 @@ void CreateCollectionCoordinator::_commit(OperationContext* opCtx, } // Upsert Chunks. - _updateSession(opCtx); - insertChunks(opCtx, _initialChunks->chunks, getCurrentSession()); + insertChunks(opCtx, _initialChunks->chunks, getNewSession(opCtx)); // The coll and shardsHoldingData objects will be used by both this function and // insertCollectionAndPlacementEntries(), which accesses their content from a separate thread @@ -1235,10 +1154,8 @@ void CreateCollectionCoordinator::_commit(OperationContext* opCtx, const auto& placementVersion = _initialChunks->chunks.back().getVersion(); if (_request.getTimeseries()) { - TimeseriesOptions timeseriesOptions = *_request.getTimeseries(); - (void)timeseries::validateAndSetBucketingParameters(timeseriesOptions); TypeCollectionTimeseriesFields timeseriesFields; - timeseriesFields.setTimeseriesOptions(std::move(timeseriesOptions)); + timeseriesFields.setTimeseriesOptions(*_request.getTimeseries()); coll->setTimeseriesFields(std::move(timeseriesFields)); } @@ -1251,7 +1168,7 @@ void CreateCollectionCoordinator::_commit(OperationContext* opCtx, coll->setUnique(*_request.getUnique()); } - _updateSession(opCtx); + const auto& osi = getNewSession(opCtx); try { notifyChangeStreamsOnShardCollection(opCtx, nss(), @@ -1261,7 +1178,7 @@ void CreateCollectionCoordinator::_commit(OperationContext* opCtx, *shardsHoldingData); insertCollectionAndPlacementEntries( - opCtx, executor, coll, placementVersion, shardsHoldingData, getCurrentSession()); + opCtx, executor, coll, placementVersion, shardsHoldingData, osi); notifyChangeStreamsOnShardCollection( opCtx, nss(), *_collectionUUID, _request.toBSON(), CommitPhase::kSuccessful); @@ -1303,10 +1220,11 @@ void CreateCollectionCoordinator::_commit(OperationContext* opCtx, } auto shard = uassertStatusOK(shardRegistry->getShard(opCtx, shardid)); - shard->runFireAndForgetCommand(opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - DatabaseName::kAdmin.toString(), - BSON("_flushRoutingTableCacheUpdates" << nss().ns())); + shard->runFireAndForgetCommand( + opCtx, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + DatabaseName::kAdmin.toString(), + BSON("_flushRoutingTableCacheUpdates" << NamespaceStringUtil::serialize(nss()))); } LOGV2(5277901, @@ -1329,11 +1247,12 @@ void CreateCollectionCoordinator::_commit(OperationContext* opCtx, void CreateCollectionCoordinator::_logStartCreateCollection(OperationContext* opCtx) { BSONObjBuilder collectionDetail; + const auto serializedNss = NamespaceStringUtil::serialize(originalNss()); collectionDetail.append("shardKey", *_request.getShardKey()); - collectionDetail.append("collection", originalNss().ns()); + collectionDetail.append("collection", serializedNss); collectionDetail.append("primary", ShardingState::get(opCtx)->shardId().toString()); ShardingLogging::get(opCtx)->logChange( - opCtx, "shardCollection.start", originalNss().ns(), collectionDetail.obj()); + opCtx, "shardCollection.start", serializedNss, collectionDetail.obj()); } void CreateCollectionCoordinator::_logEndCreateCollection(OperationContext* opCtx) { @@ -1345,8 +1264,10 @@ void CreateCollectionCoordinator::_logEndCreateCollection(OperationContext* opCt if (_initialChunks) collectionDetail.appendNumber("numChunks", static_cast(_initialChunks->chunks.size())); - ShardingLogging::get(opCtx)->logChange( - opCtx, "shardCollection.end", originalNss().ns(), collectionDetail.obj()); + ShardingLogging::get(opCtx)->logChange(opCtx, + "shardCollection.end", + NamespaceStringUtil::serialize(originalNss()), + collectionDetail.obj()); } } // namespace mongo diff --git a/src/mongo/db/s/create_collection_coordinator.h b/src/mongo/db/s/create_collection_coordinator.h index 82ef299f039d3..412541ccda137 100644 --- a/src/mongo/db/s/create_collection_coordinator.h +++ b/src/mongo/db/s/create_collection_coordinator.h @@ -29,13 +29,35 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" #include "mongo/db/s/config/initial_split_policy.h" #include "mongo/db/s/create_collection_coordinator_document_gen.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/executor/task_executor.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" #include "mongo/util/future.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -51,7 +73,7 @@ class CreateCollectionCoordinator _request(_doc.getCreateCollectionRequest()), _critSecReason(BSON("command" << "createCollection" - << "ns" << originalNss().toString())) {} + << "ns" << NamespaceStringUtil::serialize(originalNss()))) {} ~CreateCollectionCoordinator() = default; @@ -91,10 +113,6 @@ class CreateCollectionCoordinator TranslatedRequestParams _translateRequestParameters(OperationContext* opCtx); - // TODO SERVER-68008 Remove once 7.0 becomes last LTS; when the function appears in if clauses, - // modify the code assuming that a "false" value gets returned - bool _timeseriesNssResolvedByCommandHandler() const; - void _acquireCriticalSections(OperationContext* opCtx); void _promoteCriticalSectionsToBlockReads(OperationContext* opCtx) const; diff --git a/src/mongo/db/s/create_collection_coordinator_document.idl b/src/mongo/db/s/create_collection_coordinator_document.idl index 9a38b094fa38c..feaff8b3545ff 100644 --- a/src/mongo/db/s/create_collection_coordinator_document.idl +++ b/src/mongo/db/s/create_collection_coordinator_document.idl @@ -80,7 +80,3 @@ structs: type: TranslatedRequestParams description: "The field is populated only once the kTranslateRequest phase is completed" optional: true - # TODO SERVER-68084 remove the following field - disregardCriticalSectionOnOriginalNss: - type: optionalBool - description: "When set to true, the DDL operation is being performed without acquiring a critical section over the NSS specified by the user." diff --git a/src/mongo/db/s/database_sharding_state.cpp b/src/mongo/db/s/database_sharding_state.cpp index fb905beebab20..53e668cdd81b9 100644 --- a/src/mongo/db/s/database_sharding_state.cpp +++ b/src/mongo/db/s/database_sharding_state.cpp @@ -29,16 +29,38 @@ #include "mongo/db/s/database_sharding_state.h" +#include +#include +#include +#include +#include #include +#include +#include +#include -#include "mongo/db/catalog_shard_feature_flag_gen.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/operation_context.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/mutex.h" #include "mongo/s/stale_exception.h" #include "mongo/stdx/unordered_map.h" -#include "mongo/util/fail_point.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -56,7 +78,7 @@ class DatabaseShardingStateMap { struct DSSAndLock { DSSAndLock(const DatabaseName& dbName) - : dssMutex("DSSMutex::" + dbName.db()), + : dssMutex("DSSMutex::" + DatabaseNameUtil::serialize(dbName)), dss(std::make_unique(dbName)) {} const Lock::ResourceMutex dssMutex; @@ -159,7 +181,7 @@ std::vector DatabaseShardingState::getDatabaseNames(OperationConte void DatabaseShardingState::assertMatchingDbVersion(OperationContext* opCtx, const DatabaseName& dbName) { - const auto receivedVersion = OperationShardingState::get(opCtx).getDbVersion(dbName.toString()); + const auto receivedVersion = OperationShardingState::get(opCtx).getDbVersion(dbName); if (!receivedVersion) { return; } @@ -179,7 +201,8 @@ void DatabaseShardingState::assertMatchingDbVersion(OperationContext* opCtx, const auto optCritSecReason = scopedDss->getCriticalSectionReason(); uassert( - StaleDbRoutingVersion(dbName.toString(), receivedVersion, boost::none, critSecSignal), + StaleDbRoutingVersion( + DatabaseNameUtil::serialize(dbName), receivedVersion, boost::none, critSecSignal), str::stream() << "The critical section for the database " << dbName.toStringForErrorMsg() << " is acquired with reason: " << scopedDss->getCriticalSectionReason(), @@ -187,25 +210,28 @@ void DatabaseShardingState::assertMatchingDbVersion(OperationContext* opCtx, } const auto wantedVersion = scopedDss->getDbVersion(opCtx); - uassert(StaleDbRoutingVersion(dbName.toString(), receivedVersion, boost::none), - str::stream() << "No cached info for the database " << dbName.toStringForErrorMsg(), - wantedVersion); - - uassert(StaleDbRoutingVersion(dbName.toString(), receivedVersion, *wantedVersion), - str::stream() << "Version mismatch for the database " << dbName.toStringForErrorMsg(), - receivedVersion == *wantedVersion); + uassert( + StaleDbRoutingVersion(DatabaseNameUtil::serialize(dbName), receivedVersion, boost::none), + str::stream() << "No cached info for the database " << dbName.toStringForErrorMsg(), + wantedVersion); + + uassert( + StaleDbRoutingVersion(DatabaseNameUtil::serialize(dbName), receivedVersion, *wantedVersion), + str::stream() << "Version mismatch for the database " << dbName.toStringForErrorMsg(), + receivedVersion == *wantedVersion); } void DatabaseShardingState::assertIsPrimaryShardForDb(OperationContext* opCtx, const DatabaseName& dbName) { if (dbName == DatabaseName::kConfig || dbName == DatabaseName::kAdmin) { uassert(7393700, - "The config server is the primary shard for database: {}"_format(dbName.toString()), + "The config server is the primary shard for database: {}"_format( + dbName.toStringForErrorMsg()), serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); return; } - auto expectedDbVersion = OperationShardingState::get(opCtx).getDbVersion(dbName.toString()); + auto expectedDbVersion = OperationShardingState::get(opCtx).getDbVersion(dbName); uassert(ErrorCodes::IllegalOperation, str::stream() << "Received request without the version for the database " @@ -302,4 +328,10 @@ void DatabaseShardingState::_cancelDbMetadataRefresh() { } } +boost::optional DatabaseShardingState::_isPrimaryShardForDb(OperationContext* opCtx) const { + return _dbInfo + ? boost::optional(_dbInfo->getPrimary() == ShardingState::get(opCtx)->shardId()) + : boost::none; +} + } // namespace mongo diff --git a/src/mongo/db/s/database_sharding_state.h b/src/mongo/db/s/database_sharding_state.h index 7e9331b6ad920..974827b17e6bb 100644 --- a/src/mongo/db/s/database_sharding_state.h +++ b/src/mongo/db/s/database_sharding_state.h @@ -29,10 +29,20 @@ #pragma once +#include +#include +#include +#include + #include "mongo/bson/bsonobj.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/sharding_migration_critical_section.h" #include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/database_version.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" namespace mongo { @@ -258,6 +268,18 @@ class DatabaseShardingState { // Tracks the ongoing database metadata refresh. Possibly keeps a future for other threads to // wait on it, and a cancellation source to cancel the ongoing database metadata refresh. boost::optional _dbMetadataRefresh; + + /** + * If there is cached database info, returns `true` if the current shard is the primary shard + * for the database of the current sharding state. If there is no cached database info, returns + * `boost::none`. + * + * This method is unsafe to use since it doesn't honor the critical section. + */ + boost::optional _isPrimaryShardForDb(OperationContext* opCtx) const; + + // Permit the `getDatabaseVersion` command to access the private method `_isPrimaryShardForDb`. + friend class GetDatabaseVersionCmd; }; } // namespace mongo diff --git a/src/mongo/db/s/database_sharding_state_test.cpp b/src/mongo/db/s/database_sharding_state_test.cpp index b37f16c421287..3ac757c6cb0ee 100644 --- a/src/mongo/db/s/database_sharding_state_test.cpp +++ b/src/mongo/db/s/database_sharding_state_test.cpp @@ -27,17 +27,48 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/remote_command_targeter_factory_mock.h" +#include "mongo/client/remote_command_targeter_mock.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/db/s/sharding_mongod_test_fixture.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_mock.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/catalog_cache_loader.h" #include "mongo/s/catalog_cache_loader_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -135,9 +166,10 @@ TEST_F(DatabaseShardingStateTestWithMockedLoader, OnDbVersionMismatch) { auto opCtx = operationContext(); auto getActiveDbVersion = [&] { - AutoGetDb autoDb(opCtx, DatabaseName(boost::none, kDbName), MODE_IS); + AutoGetDb autoDb( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, kDbName), MODE_IS); const auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireShared( - opCtx, DatabaseName(boost::none, kDbName)); + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, kDbName)); return scopedDss->getDbVersion(opCtx); }; @@ -170,9 +202,10 @@ TEST_F(DatabaseShardingStateTestWithMockedLoader, ForceDatabaseRefresh) { ASSERT_OK(onDbVersionMismatchNoExcept(opCtx, kDbName, boost::none)); boost::optional activeDbVersion = [&] { - AutoGetDb autoDb(opCtx, DatabaseName(boost::none, kDbName), MODE_IS); + AutoGetDb autoDb( + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, kDbName), MODE_IS); const auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireShared( - opCtx, DatabaseName(boost::none, kDbName)); + opCtx, DatabaseName::createDatabaseName_forTest(boost::none, kDbName)); return scopedDss->getDbVersion(opCtx); }(); ASSERT_TRUE(activeDbVersion); diff --git a/src/mongo/db/s/ddl_lock_manager.cpp b/src/mongo/db/s/ddl_lock_manager.cpp index 0ca87c60869e2..3940927e7daab 100644 --- a/src/mongo/db/s/ddl_lock_manager.cpp +++ b/src/mongo/db/s/ddl_lock_manager.cpp @@ -30,8 +30,32 @@ #include "mongo/db/s/ddl_lock_manager.h" +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/operation_context.h" +#include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/server_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -57,52 +81,101 @@ DDLLockManager* DDLLockManager::get(OperationContext* opCtx) { return get(opCtx->getServiceContext()); } -DDLLockManager::ScopedLock DDLLockManager::lock(OperationContext* opCtx, - StringData ns, - StringData reason, - Milliseconds timeout) { +void DDLLockManager::setState(const State& state) { + stdx::unique_lock lock(_mutex); + _state = state; + _stateCV.notify_all(); +} + +void DDLLockManager::_lock(OperationContext* opCtx, + Locker* locker, + StringData ns, + const ResourceId& resId, + StringData reason, + LockMode mode, + Date_t deadline, + bool waitForRecovery) { stdx::unique_lock lock(_mutex); - auto iter = _inProgressMap.find(ns); - if (iter == _inProgressMap.end()) { - _inProgressMap.try_emplace(ns, std::make_shared(reason)); - } else { - auto nsLock = iter->second; - nsLock->numWaiting++; - ScopeGuard guard([&] { nsLock->numWaiting--; }); - if (!opCtx->waitForConditionOrInterruptFor( - nsLock->cvLocked, lock, timeout, [nsLock]() { return !nsLock->isInProgress; })) { + Timer waitingTime; + + // Wait for primary and DDL recovered state + if (!opCtx->waitForConditionOrInterruptUntil(_stateCV, lock, deadline, [&] { + return _state == State::kPrimaryAndRecovered || !waitForRecovery; + })) { + using namespace fmt::literals; + uasserted(ErrorCodes::LockTimeout, + "Failed to acquire DDL lock for namespace '{}' in mode {} after {} with reason " + "'{}' while waiting recovery of DDLCoordinatorService"_format( + ns, modeName(mode), waitingTime.elapsed().toString(), reason)); + } + + if (feature_flags::gMultipleGranularityDDLLocking.isEnabled( + serverGlobalParams.featureCompatibility)) { + lock.unlock(); + + tassert(7742100, + "None hierarchy lock (Global/DB/Coll) must be hold when acquiring a DDL lock", + !locker->isLocked()); + + try { + locker->lock(opCtx, resId, mode, deadline); + } catch (const ExceptionFor& e) { using namespace fmt::literals; - uasserted( - ErrorCodes::LockBusy, - "Failed to acquire DDL lock for namespace '{}' after {} that is currently locked with reason '{}'"_format( - ns, timeout.toString(), nsLock->reason)); + uasserted(ErrorCodes::LockBusy, + "Failed to acquire DDL lock for '{}' in mode {} after {}. {}"_format( + ns, modeName(mode), waitingTime.elapsed().toString(), e.what())); + } catch (DBException& e) { + e.addContext("Failed to acquire DDL lock for '{}' in mode {} after {}"_format( + ns, modeName(mode), waitingTime.elapsed().toString())); + throw; + } + + } else { + invariant(mode == MODE_X, "DDL lock modes other than exclusive are not supported yet"); + + auto iter = _inProgressMap.find(ns); + + if (iter == _inProgressMap.end()) { + _inProgressMap.try_emplace(ns, std::make_shared(reason)); + } else { + auto nsLock = iter->second; + nsLock->numWaiting++; + ScopeGuard guard([&] { nsLock->numWaiting--; }); + if (!opCtx->waitForConditionOrInterruptUntil( + nsLock->cvLocked, lock, deadline, [nsLock]() { + return !nsLock->isInProgress; + })) { + using namespace fmt::literals; + uasserted( + ErrorCodes::LockBusy, + "Failed to acquire DDL lock for namespace '{}' in mode {} after {} that is currently locked with reason '{}'"_format( + ns, modeName(mode), waitingTime.elapsed().toString(), nsLock->reason)); + } + guard.dismiss(); + nsLock->reason = reason.toString(); + nsLock->isInProgress = true; } - guard.dismiss(); - nsLock->reason = reason.toString(); - nsLock->isInProgress = true; } - LOGV2(6855301, "Acquired DDL lock", "resource"_attr = ns, "reason"_attr = reason); - return {ns, reason, this}; + LOGV2(6855301, + "Acquired DDL lock", + "resource"_attr = ns, + "reason"_attr = reason, + "mode"_attr = modeName(mode)); } -DDLLockManager::ScopedLock::ScopedLock(StringData ns, - StringData reason, - DDLLockManager* lockManager) - : _ns(ns.toString()), _reason(reason.toString()), _lockManager(lockManager) {} +void DDLLockManager::_unlock( + Locker* locker, StringData ns, const ResourceId& resId, StringData reason, LockMode mode) { -DDLLockManager::ScopedLock::ScopedLock(ScopedLock&& other) - : _ns(std::move(other._ns)), - _reason(std::move(other._reason)), - _lockManager(other._lockManager) { - other._lockManager = nullptr; -} + if (feature_flags::gMultipleGranularityDDLLocking.isEnabled( + serverGlobalParams.featureCompatibility)) { + dassert(locker); + locker->unlock(resId); -DDLLockManager::ScopedLock::~ScopedLock() { - if (_lockManager) { - stdx::unique_lock lock(_lockManager->_mutex); - auto iter = _lockManager->_inProgressMap.find(_ns); + } else { + stdx::unique_lock lock(_mutex); + auto iter = _inProgressMap.find(ns); iter->second->numWaiting--; iter->second->reason.clear(); @@ -110,10 +183,130 @@ DDLLockManager::ScopedLock::~ScopedLock() { iter->second->cvLocked.notify_one(); if (iter->second->numWaiting == 0) { - _lockManager->_inProgressMap.erase(_ns); + _inProgressMap.erase(ns); } - LOGV2(6855302, "Released DDL lock", "resource"_attr = _ns, "reason"_attr = _reason); } + LOGV2(6855302, + "Released DDL lock", + "resource"_attr = ns, + "reason"_attr = reason, + "mode"_attr = modeName(mode)); +} + +DDLLockManager::ScopedDatabaseDDLLock::ScopedDatabaseDDLLock(OperationContext* opCtx, + const DatabaseName& db, + StringData reason, + LockMode mode, + Milliseconds timeout) + : DDLLockManager::ScopedBaseDDLLock(opCtx, + opCtx->lockState(), + db, + reason, + mode, + Date_t::now() + timeout, + true /*waitForRecovery*/) { + + // Check under the DDL dbLock if this is the primary shard for the database + DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, db); +} + +DDLLockManager::ScopedCollectionDDLLock::ScopedCollectionDDLLock(OperationContext* opCtx, + const NamespaceString& ns, + StringData reason, + LockMode mode, + Milliseconds timeout) { + const Date_t deadline = Date_t::now() + timeout; + + if (feature_flags::gMultipleGranularityDDLLocking.isEnabled( + serverGlobalParams.featureCompatibility)) { + // Acquire implicitly the db DDL lock + _dbLock.emplace(opCtx, + opCtx->lockState(), + ns.dbName(), + reason, + isSharedLockMode(mode) ? MODE_IS : MODE_IX, + deadline, + true /*waitForRecovery*/); + + // Check under the DDL db lock if this is the primary shard for the database + DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, ns.dbName()); + } + + // Finally, acquire the collection DDL lock + _collLock.emplace( + opCtx, opCtx->lockState(), ns, reason, mode, deadline, true /*waitForRecovery*/); +} + +DDLLockManager::ScopedBaseDDLLock::ScopedBaseDDLLock(OperationContext* opCtx, + Locker* locker, + StringData resName, + const ResourceId& resId, + StringData reason, + LockMode mode, + Date_t deadline, + bool waitForRecovery) + : _resourceName(resName.toString()), + _resourceId(resId), + _reason(reason.toString()), + _mode(mode), + _result(LockResult::LOCK_INVALID), + _locker(locker), + _lockManager(DDLLockManager::get(opCtx)) { + + invariant(_lockManager); + _lockManager->_lock( + opCtx, _locker, _resourceName, _resourceId, _reason, _mode, deadline, waitForRecovery); + _result = LockResult::LOCK_OK; +} + +DDLLockManager::ScopedBaseDDLLock::ScopedBaseDDLLock(OperationContext* opCtx, + Locker* locker, + const NamespaceString& ns, + StringData reason, + LockMode mode, + Date_t deadline, + bool waitForRecovery) + : ScopedBaseDDLLock(opCtx, + locker, + NamespaceStringUtil::serialize(ns), + ResourceId{RESOURCE_DDL_COLLECTION, NamespaceStringUtil::serialize(ns)}, + reason, + mode, + deadline, + waitForRecovery) {} + +DDLLockManager::ScopedBaseDDLLock::ScopedBaseDDLLock(OperationContext* opCtx, + Locker* locker, + const DatabaseName& db, + StringData reason, + LockMode mode, + Date_t deadline, + bool waitForRecovery) + : ScopedBaseDDLLock(opCtx, + locker, + DatabaseNameUtil::serialize(db), + ResourceId{RESOURCE_DDL_DATABASE, DatabaseNameUtil::serialize(db)}, + reason, + mode, + deadline, + waitForRecovery) {} + +DDLLockManager::ScopedBaseDDLLock::~ScopedBaseDDLLock() { + if (_lockManager && _result == LockResult::LOCK_OK) { + _lockManager->_unlock(_locker, _resourceName, _resourceId, _reason, _mode); + } +} + +DDLLockManager::ScopedBaseDDLLock::ScopedBaseDDLLock(ScopedBaseDDLLock&& other) + : _resourceName(std::move(other._resourceName)), + _resourceId(std::move(other._resourceId)), + _reason(std::move(other._reason)), + _mode(std::move(other._mode)), + _result(std::move(other._result)), + _locker(other._locker), + _lockManager(other._lockManager) { + other._locker = nullptr; + other._lockManager = nullptr; } } // namespace mongo diff --git a/src/mongo/db/s/ddl_lock_manager.h b/src/mongo/db/s/ddl_lock_manager.h index c40e5efb2c009..b4ba8ac36f14d 100644 --- a/src/mongo/db/s/ddl_lock_manager.h +++ b/src/mongo/db/s/ddl_lock_manager.h @@ -29,10 +29,20 @@ #pragma once +#include +#include + #include "mongo/base/string_data.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/duration.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -40,6 +50,61 @@ namespace mongo { * Service to manage DDL locks. */ class DDLLockManager { + + /** + * ScopedBaseDDLLock will hold a DDL lock for the given resource without performing any check. + */ + class ScopedBaseDDLLock { + ScopedBaseDDLLock(const ScopedBaseDDLLock&) = delete; + ScopedBaseDDLLock& operator=(const ScopedBaseDDLLock&) = delete; + + ScopedBaseDDLLock(OperationContext* opCtx, + Locker* locker, + StringData resName, + const ResourceId& resId, + StringData reason, + LockMode mode, + Date_t deadline, + bool waitForRecovery); + + public: + ScopedBaseDDLLock(OperationContext* opCtx, + Locker* locker, + const NamespaceString& ns, + StringData reason, + LockMode mode, + Date_t deadline, + bool waitForRecovery); + + ScopedBaseDDLLock(OperationContext* opCtx, + Locker* locker, + const DatabaseName& db, + StringData reason, + LockMode mode, + Date_t deadline, + bool waitForRecovery); + + virtual ~ScopedBaseDDLLock(); + + ScopedBaseDDLLock(ScopedBaseDDLLock&& other); + + StringData getResourceName() const { + return _resourceName; + } + StringData getReason() const { + return _reason; + } + + protected: + const std::string _resourceName; + const ResourceId _resourceId; + const std::string _reason; + const LockMode _mode; + LockResult _result; + Locker* _locker; + DDLLockManager* _lockManager; + }; + public: // Default timeout which will be used if one is not passed to the lock method. static const Minutes kDefaultLockTimeout; @@ -48,30 +113,69 @@ class DDLLockManager { // should be made to wait for it to become free. static const Milliseconds kSingleLockAttemptTimeout; - /** - * RAII type for the DDL lock. - */ - class ScopedLock { - ScopedLock(const ScopedLock&) = delete; - ScopedLock& operator=(const ScopedLock&) = delete; - + // RAII-style class to acquire a DDL lock on the given database + class ScopedDatabaseDDLLock : public ScopedBaseDDLLock { public: - ScopedLock(StringData lockName, StringData reason, DDLLockManager* lockManager); - ~ScopedLock(); - - ScopedLock(ScopedLock&& other); + /** + * Constructs a ScopedDatabaseDDLLock object + * + * @db Database to lock. + * @reason Reason for which the lock is being acquired (e.g. 'createCollection'). + * @mode Lock mode. + * @timeout Time after which this acquisition attempt will give up in case of lock + * contention. A timeout value of -1 means the acquisition will be retried forever. + * + * Throws: + * ErrorCodes::LockBusy in case the timeout is reached. + * ErrorCodes::LockTimeout when not being on kPrimaryAndRecovered state and timeout + * is reached. + * ErrorCategory::Interruption in case the operation context is interrupted. + * ErrorCodes::IllegalOperation in case of not being on the db primary shard. + * + * It's caller's responsibility to ensure this lock is acquired only on primary node of + * replica set and released on step-down. + */ + ScopedDatabaseDDLLock(OperationContext* opCtx, + const DatabaseName& db, + StringData reason, + LockMode mode, + Milliseconds timeout = kDefaultLockTimeout); + }; - StringData getNs() { - return _ns; - } - StringData getReason() { - return _reason; - } + // RAII-style class to acquire a DDL lock on the given collection. The database DDL lock will + // also be implicitly acquired in the corresponding intent mode. + class ScopedCollectionDDLLock { + public: + /** + * Constructs a ScopedCollectionDDLLock object + * + * @ns Collection to lock. + * @reason Reason for which the lock is being acquired (e.g. 'createCollection'). + * @mode Lock mode. + * @timeout Time after which this acquisition attempt will give up in case of lock + * contention. A timeout value of -1 means the acquisition will be retried forever. + * + * Throws: + * ErrorCodes::LockBusy in case the timeout is reached. + * ErrorCodes::LockTimeout when not being on kPrimaryAndRecovered state and timeout + * is reached. + * ErrorCategory::Interruption in case the operation context is interrupted. + * ErrorCodes::IllegalOperation in case of not being on the db primary shard. + * + * It's caller's responsibility to ensure this lock is acquired only on primary node of + * replica set and released on step-down. + */ + ScopedCollectionDDLLock(OperationContext* opCtx, + const NamespaceString& ns, + StringData reason, + LockMode mode, + Milliseconds timeout = kDefaultLockTimeout); private: - std::string _ns; - std::string _reason; - DDLLockManager* _lockManager; + // Make sure _dbLock is instantiated before _collLock to don't break the hierarchy locking + // acquisition order + boost::optional _dbLock; + boost::optional _collLock; }; DDLLockManager() = default; @@ -83,23 +187,6 @@ class DDLLockManager { static DDLLockManager* get(ServiceContext* service); static DDLLockManager* get(OperationContext* opCtx); - /** - * Returns a RAII style lock on the given namespace @ns. - * - * @ns Namespace to lock (both database and collections). - * @reason Reson for which the lock is being acquired (e.g. 'createCollection'). - * @timeout Time after which this acquisition attempt will give up in case of lock contention. - * A timeout value of -1 means the acquisition will be retried forever. - * - * - * Throws ErrorCodes::LockBusy in case the timeout is reached. - * Throws ErrorCategory::Interruption in case the opeartion context is interrupted. - */ - ScopedLock lock(OperationContext* opCtx, - StringData ns, - StringData reason, - Milliseconds timeout); - protected: struct NSLock { NSLock(StringData reason) : reason(reason.toString()) {} @@ -112,6 +199,43 @@ class DDLLockManager { Mutex _mutex = MONGO_MAKE_LATCH("DDLLockManager::_mutex"); StringMap> _inProgressMap; + + enum class State { + /** + * When the node become secondary the state is set to kPaused and all the lock acquisitions + * will be blocked except if the request comes from a DDLCoordinator. + */ + kPaused, + + /** + * After the node became primary and the ShardingDDLCoordinatorService already re-acquired + * all the previously acquired DDL locks for ongoing DDL coordinators the state transition + * to kPrimaryAndRecovered and the lock acquisitions are unblocked. + */ + kPrimaryAndRecovered, + }; + + State _state = State::kPaused; + mutable stdx::condition_variable _stateCV; + + void setState(const State& state); + + void _lock(OperationContext* opCtx, + Locker* locker, + StringData ns, + const ResourceId& resId, + StringData reason, + LockMode mode, + Date_t deadline, + bool waitForRecovery); + + void _unlock( + Locker* locker, StringData ns, const ResourceId& resId, StringData reason, LockMode mode); + + friend class ShardingDDLCoordinatorService; + friend class ShardingDDLCoordinator; + friend class ShardingDDLCoordinatorServiceTest; + friend class ShardingCatalogManager; }; } // namespace mongo diff --git a/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.cpp b/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.cpp index b7db42cf4cbb6..d60bcf7da5a74 100644 --- a/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.cpp +++ b/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.cpp @@ -29,16 +29,60 @@ #include "mongo/db/s/document_source_analyze_shard_key_read_write_distribution.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/analyze_shard_key_read_write_distribution.h" -#include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" #include "mongo/db/vector_clock.h" -#include "mongo/logv2/log.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/s/analyze_shard_key_cmd_gen.h" +#include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/analyze_shard_key_documents_gen.h" -#include "mongo/s/analyze_shard_key_feature_flag_gen.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/collection_routing_info_targeter.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/util/decorable.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -292,12 +336,10 @@ void processSampledDiffs(OperationContext* opCtx, } // namespace -REGISTER_DOCUMENT_SOURCE_WITH_FEATURE_FLAG( - _analyzeShardKeyReadWriteDistribution, - DocumentSourceAnalyzeShardKeyReadWriteDistribution::LiteParsed::parse, - DocumentSourceAnalyzeShardKeyReadWriteDistribution::createFromBson, - AllowedWithApiStrict::kNeverInVersion1, - analyze_shard_key::gFeatureFlagAnalyzeShardKey); +REGISTER_DOCUMENT_SOURCE(_analyzeShardKeyReadWriteDistribution, + DocumentSourceAnalyzeShardKeyReadWriteDistribution::LiteParsed::parse, + DocumentSourceAnalyzeShardKeyReadWriteDistribution::createFromBson, + AllowedWithApiStrict::kNeverInVersion1); boost::intrusive_ptr DocumentSourceAnalyzeShardKeyReadWriteDistribution::createFromBson( @@ -314,11 +356,7 @@ DocumentSourceAnalyzeShardKeyReadWriteDistribution::createFromBson( Value DocumentSourceAnalyzeShardKeyReadWriteDistribution::serialize( SerializationOptions opts) const { - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484305); - } - - return Value(Document{{getSourceName(), _spec.toBSON()}}); + return Value(Document{{getSourceName(), _spec.toBSON(opts)}}); } DocumentSource::GetNextResult DocumentSourceAnalyzeShardKeyReadWriteDistribution::doGetNext() { @@ -328,7 +366,7 @@ DocumentSource::GetNextResult DocumentSourceAnalyzeShardKeyReadWriteDistribution _finished = true; - auto collUuid = uassertStatusOK(validateCollectionOptionsLocally(pExpCtx->opCtx, pExpCtx->ns)); + auto collUuid = uassertStatusOK(validateCollectionOptions(pExpCtx->opCtx, pExpCtx->ns)); auto targeter = makeCollectionRoutingInfoTargeter(pExpCtx->opCtx, pExpCtx->ns, _spec.getKey(), diff --git a/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.h b/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.h index 21a3b59f356e4..b47b7a53583cf 100644 --- a/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.h +++ b/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.h @@ -29,10 +29,43 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/document_source_analyze_shard_key_read_write_distribution_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" #include "mongo/s/analyze_shard_key_util.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace analyze_shard_key { @@ -51,9 +84,6 @@ class DocumentSourceAnalyzeShardKeyReadWriteDistribution final : public Document uassert(ErrorCodes::IllegalOperation, str::stream() << kStageName << " is not supported on a multitenant replica set", !gMultitenancySupport); - uassert(ErrorCodes::IllegalOperation, - str::stream() << kStageName << " is not supported on a configsvr mongod", - !serverGlobalParams.clusterRole.exclusivelyHasConfigRole()); uassert(6875700, str::stream() << kStageName << " must take a nested object but found: " << specElem, diff --git a/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.idl b/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.idl index 3a41239c348f1..84ba8129dd493 100644 --- a/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.idl +++ b/src/mongo/db/s/document_source_analyze_shard_key_read_write_distribution.idl @@ -40,22 +40,27 @@ structs: DocumentSourceAnalyzeShardKeyReadWriteDistributionSpec: description: Specification for a $_analyzeShardKeyReadWriteDistribution stage. strict: false + query_shape_component: true fields: key: description: The shard key to evaluate. type: KeyPattern + query_shape: custom validator: callback: validateShardKeyPattern splitPointsFilter: + query_shape: literal description: The filter to use to fetch the split point documents generated by the command running this aggregation stage. type: object_owned splitPointsAfterClusterTime: + query_shape: literal description: The afterClusterTime to use when fetching the split point documents. This must be greater or equal to the timestamp at which the insert for the last split point document occurred. type: timestamp splitPointsShardId: + query_shape: anonymize description: The id of the shard that the analyzeShardKey command is running on, and therefore contains the temporary collection storing the split points for the shard key. Only set when running on a sharded cluster. diff --git a/src/mongo/db/s/drop_collection_coordinator.cpp b/src/mongo/db/s/drop_collection_coordinator.cpp index 612294f0fa434..96e99a0d32cf9 100644 --- a/src/mongo/db/s/drop_collection_coordinator.cpp +++ b/src/mongo/db/s/drop_collection_coordinator.cpp @@ -29,24 +29,60 @@ #include "mongo/db/s/drop_collection_coordinator.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/cancelable_operation_context.h" -#include "mongo/db/catalog/collection_uuid_mismatch.h" -#include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/drop_collection.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/drop_gen.h" +#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/participant_block_gen.h" #include "mongo/db/s/range_deletion_util.h" -#include "mongo/db/s/sharded_index_catalog_commands_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator.h" #include "mongo/db/s/sharding_ddl_util.h" #include "mongo/db/s/sharding_index_catalog_ddl_util.h" #include "mongo/db/s/sharding_logging.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/vector_clock_mutable.h" +#include "mongo/executor/async_rpc.h" +#include "mongo/executor/async_rpc_util.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/analyze_shard_key_documents_gen.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/catalog_cache_loader.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" -#include "mongo/s/request_types/sharded_ddl_commands_gen.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -86,10 +122,6 @@ void DropCollectionCoordinator::dropCollectionLocally(OperationContext* opCtx, // an alternative client. auto newClient = opCtx->getServiceContext()->makeClient("removeRangeDeletions-" + collectionUUID->toString()); - { - stdx::lock_guard lk(*newClient.get()); - newClient->setSystemOperationKillableByStepdown(lk); - } AlternativeClientRegion acr{newClient}; auto executor = Grid::get(opCtx->getServiceContext())->getExecutorPool()->getFixedExecutor(); @@ -152,37 +184,27 @@ ExecutorFuture DropCollectionCoordinator::_runImpl( _freezeMigrations(executor); })) - .then([this, executor = executor, anchor = shared_from_this()] { + .then([this, token, executor = executor, anchor = shared_from_this()] { if (_isPre70Compatible()) return; _buildPhaseHandler(Phase::kEnterCriticalSection, - [this, executor = executor, anchor = shared_from_this()] { - _enterCriticalSection(executor); + [this, token, executor = executor, anchor = shared_from_this()] { + _enterCriticalSection(executor, token); })(); }) .then(_buildPhaseHandler(Phase::kDropCollection, [this, executor = executor, anchor = shared_from_this()] { _commitDropCollection(executor); })) - .then([this, executor = executor, anchor = shared_from_this()] { + .then([this, token, executor = executor, anchor = shared_from_this()] { if (_isPre70Compatible()) return; _buildPhaseHandler(Phase::kReleaseCriticalSection, - [this, executor = executor, anchor = shared_from_this()] { - _exitCriticalSection(executor); + [this, token, executor = executor, anchor = shared_from_this()] { + _exitCriticalSection(executor, token); })(); - }) - .onError([this, anchor = shared_from_this()](const Status& status) { - if (!status.isA() && - !status.isA()) { - LOGV2_ERROR(5280901, - "Error running drop collection", - logAttrs(nss()), - "error"_attr = redact(status)); - } - return status; }); } @@ -211,6 +233,19 @@ void DropCollectionCoordinator::_checkPreconditionsAndSaveArgumentsOnDoc() { AutoGetCollection::Options{} .viewMode(auto_get_collection::ViewMode::kViewsPermitted) .expectedUUID(_doc.getCollectionUUID())}; + + // The drop operation is aborted if the namespace does not exist or does not comply with + // naming restrictions. Non-system namespaces require additional logic that cannot be done + // at this level, such as the time series collection must be resolved to remove the + // corresponding bucket collection, or tag documents associated to non-existing collections + // must be cleaned up. + if (nss().isSystem()) { + uassert(ErrorCodes::NamespaceNotFound, + "namespace {} does not exist"_format(nss().toStringForErrorMsg()), + *coll); + + uassertStatusOK(isDroppableCollection(opCtx, nss())); + } } _saveCollInfo(opCtx); @@ -247,36 +282,36 @@ void DropCollectionCoordinator::_freezeMigrations( logChangeDetail.append("collectionUUID", _doc.getCollInfo()->getUuid().toBSON()); } - ShardingLogging::get(opCtx)->logChange( - opCtx, "dropCollection.start", nss().ns(), logChangeDetail.obj()); + ShardingLogging::get(opCtx)->logChange(opCtx, + "dropCollection.start", + NamespaceStringUtil::serialize(nss()), + logChangeDetail.obj()); if (_doc.getCollInfo()) { - sharding_ddl_util::stopMigrations(opCtx, nss(), _doc.getCollInfo()->getUuid()); + const auto collUUID = _doc.getCollInfo()->getUuid(); + sharding_ddl_util::stopMigrations(opCtx, nss(), collUUID, getNewSession(opCtx)); } } void DropCollectionCoordinator::_enterCriticalSection( - std::shared_ptr executor) { + std::shared_ptr executor, const CancellationToken& token) { LOGV2_DEBUG(7038100, 2, "Acquiring critical section", logAttrs(nss())); auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); - _updateSession(opCtx); ShardsvrParticipantBlock blockCRUDOperationsRequest(nss()); blockCRUDOperationsRequest.setBlockType(mongo::CriticalSectionBlockTypeEnum::kReadsAndWrites); blockCRUDOperationsRequest.setReason(_critSecReason); - blockCRUDOperationsRequest.setAllowViews(true); - const auto cmdObj = - CommandHelpers::appendMajorityWriteConcern(blockCRUDOperationsRequest.toBSON({})); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + async_rpc::AsyncRPCCommandHelpers::appendOSI(args, getNewSession(opCtx)); + auto opts = std::make_shared>( + blockCRUDOperationsRequest, **executor, token, args); sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, - nss().db(), - cmdObj.addFields(getCurrentSession().toBSON()), - Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx), - **executor); + opCtx, opts, Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx)); LOGV2_DEBUG(7038101, 2, "Acquired critical section", logAttrs(nss())); } @@ -291,24 +326,37 @@ void DropCollectionCoordinator::_commitDropCollection( LOGV2_DEBUG(5390504, 2, "Dropping collection", logAttrs(nss()), "sharded"_attr = collIsSharded); - _updateSession(opCtx); + // Remove the query sampling configuration document for this collection, if it exists. + sharding_ddl_util::removeQueryAnalyzerMetadataFromConfig( + opCtx, + BSON(analyze_shard_key::QueryAnalyzerDocument::kNsFieldName + << NamespaceStringUtil::serialize(nss()))); + if (collIsSharded) { invariant(_doc.getCollInfo()); - const auto& coll = _doc.getCollInfo().value(); + const auto coll = _doc.getCollInfo().value(); + + // This always runs in the shard role so should use a cluster transaction to guarantee + // targeting the config server. + bool useClusterTransaction = true; sharding_ddl_util::removeCollAndChunksMetadataFromConfig( opCtx, + Grid::get(opCtx)->shardRegistry()->getConfigShard(), + Grid::get(opCtx)->catalogClient(), coll, ShardingCatalogClient::kMajorityWriteConcern, - getCurrentSession(), + getNewSession(opCtx), + useClusterTransaction, **executor); } // Remove tags even if the collection is not sharded or didn't exist - _updateSession(opCtx); - sharding_ddl_util::removeTagsMetadataFromConfig(opCtx, nss(), getCurrentSession()); + sharding_ddl_util::removeTagsMetadataFromConfig(opCtx, nss(), getNewSession(opCtx)); - // get a Lsid and an incremented txnNumber. Ensures we are the primary - _updateSession(opCtx); + // Checkpoint the configTime to ensure that, in the case of a stepdown, the new primary will + // start-up from a configTime that is inclusive of the metadata removable that was committed + // during the critical section. + VectorClockMutable::get(opCtx)->waitForDurableConfigTime().get(opCtx); const auto primaryShardId = ShardingState::get(opCtx)->shardId(); @@ -320,45 +368,38 @@ void DropCollectionCoordinator::_commitDropCollection( participants.end()); sharding_ddl_util::sendDropCollectionParticipantCommandToShards( - opCtx, nss(), participants, **executor, getCurrentSession(), true /*fromMigrate*/); + opCtx, nss(), participants, **executor, getNewSession(opCtx), true /*fromMigrate*/); // The sharded collection must be dropped on the primary shard after it has been // dropped on all of the other shards to ensure it can only be re-created as // unsharded with a higher optime than all of the drops. sharding_ddl_util::sendDropCollectionParticipantCommandToShards( - opCtx, nss(), {primaryShardId}, **executor, getCurrentSession(), false /*fromMigrate*/); + opCtx, nss(), {primaryShardId}, **executor, getNewSession(opCtx), false /*fromMigrate*/); - // Remove potential query analyzer document only after purging the collection from - // the catalog. This ensures no leftover documents referencing an old incarnation of - // a collection. - sharding_ddl_util::removeQueryAnalyzerMetadataFromConfig(opCtx, nss(), boost::none); - - ShardingLogging::get(opCtx)->logChange(opCtx, "dropCollection", nss().ns()); + ShardingLogging::get(opCtx)->logChange( + opCtx, "dropCollection", NamespaceStringUtil::serialize(nss())); LOGV2(5390503, "Collection dropped", logAttrs(nss())); } void DropCollectionCoordinator::_exitCriticalSection( - std::shared_ptr executor) { + std::shared_ptr executor, const CancellationToken& token) { LOGV2_DEBUG(7038102, 2, "Releasing critical section", logAttrs(nss())); auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); - _updateSession(opCtx); ShardsvrParticipantBlock unblockCRUDOperationsRequest(nss()); unblockCRUDOperationsRequest.setBlockType(CriticalSectionBlockTypeEnum::kUnblock); unblockCRUDOperationsRequest.setReason(_critSecReason); - unblockCRUDOperationsRequest.setAllowViews(true); - const auto cmdObj = - CommandHelpers::appendMajorityWriteConcern(unblockCRUDOperationsRequest.toBSON({})); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + async_rpc::AsyncRPCCommandHelpers::appendOSI(args, getNewSession(opCtx)); + auto opts = std::make_shared>( + unblockCRUDOperationsRequest, **executor, token, args); sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, - nss().db(), - cmdObj.addFields(getCurrentSession().toBSON()), - Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx), - **executor); + opCtx, opts, Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx)); LOGV2_DEBUG(7038103, 2, "Released critical section", logAttrs(nss())); } diff --git a/src/mongo/db/s/drop_collection_coordinator.h b/src/mongo/db/s/drop_collection_coordinator.h index cbde60fdbed07..99d6eeb2a5d80 100644 --- a/src/mongo/db/s/drop_collection_coordinator.h +++ b/src/mongo/db/s/drop_collection_coordinator.h @@ -29,9 +29,28 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/drop_collection.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" #include "mongo/db/s/drop_collection_coordinator_document_gen.h" #include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { @@ -46,7 +65,7 @@ class DropCollectionCoordinator final : RecoverableShardingDDLCoordinator(service, "DropCollectionCoordinator", initialState), _critSecReason(BSON("command" << "dropCollection" - << "ns" << originalNss().toString())) {} + << "ns" << NamespaceStringUtil::serialize(originalNss()))) {} ~DropCollectionCoordinator() = default; @@ -89,11 +108,13 @@ class DropCollectionCoordinator final void _freezeMigrations(std::shared_ptr executor); - void _enterCriticalSection(std::shared_ptr executor); + void _enterCriticalSection(std::shared_ptr executor, + const CancellationToken& token); void _commitDropCollection(std::shared_ptr executor); - void _exitCriticalSection(std::shared_ptr executor); + void _exitCriticalSection(std::shared_ptr executor, + const CancellationToken& token); }; } // namespace mongo diff --git a/src/mongo/db/s/drop_database_coordinator.cpp b/src/mongo/db/s/drop_database_coordinator.cpp index 42396bef6b39d..f3b205e19d1b9 100644 --- a/src/mongo/db/s/drop_database_coordinator.cpp +++ b/src/mongo/db/s/drop_database_coordinator.cpp @@ -30,30 +30,85 @@ #include "mongo/db/s/drop_database_coordinator.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/api_parameters.h" -#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/cluster_transaction_api.h" +#include "mongo/db/client.h" +#include "mongo/db/commands.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/s/ddl_lock_manager.h" +#include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/participant_block_gen.h" #include "mongo/db/s/shard_metadata_util.h" +#include "mongo/db/s/sharding_ddl_coordinator.h" #include "mongo/db/s/sharding_ddl_util.h" #include "mongo/db/s/sharding_logging.h" #include "mongo/db/s/sharding_recovery_service.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/type_shard_database.h" +#include "mongo/db/s/type_shard_database_gen.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/transaction_api.h" #include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_mutable.h" +#include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/async_rpc.h" +#include "mongo/executor/async_rpc_util.h" +#include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/s/analyze_shard_key_documents_gen.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_namespace_placement_gen.h" #include "mongo/s/client/shard_registry.h" -#include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/database_version.h" +#include "mongo/s/database_version_gen.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/flush_database_cache_updates_gen.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/pcre_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -102,18 +157,8 @@ void removeDatabaseFromConfigAndUpdatePlacementHistory( str::stream() << "Could not remove database metadata from config server for '" << dbName << "'."); - // pre-check to guarantee idempotence: in case of a retry, the placement history - // entry may already exist - if (deleteDatabaseEntryResponse.getN() == 0) { - BatchedCommandResponse noOp; - noOp.setN(0); - noOp.setStatus(Status::OK()); - return SemiFuture(std::move(noOp)); - } - const auto currentTime = VectorClock::get(opCtx)->getTime(); const auto currentTimestamp = currentTime.clusterTime().asTimestamp(); - NamespacePlacementType placementInfo(NamespaceString(dbName), currentTimestamp, {}); write_ops::InsertCommandRequest insertPlacementEntry( @@ -130,8 +175,11 @@ void removeDatabaseFromConfigAndUpdatePlacementHistory( auto wc = WriteConcernOptions{WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, WriteConcernOptions::kNoTimeout}; + // This always runs in the shard role so should use a cluster transaction to guarantee targeting + // the config server. + bool useClusterTransaction = true; sharding_ddl_util::runTransactionOnShardingCatalog( - opCtx, std::move(transactionChain), wc, osi, executor); + opCtx, std::move(transactionChain), wc, osi, useClusterTransaction, executor); } // TODO SERVER-73627: Remove once 7.0 becomes last LTS @@ -143,7 +191,7 @@ class ScopedDatabaseCriticalSection { : _opCtx(opCtx), _dbName(std::move(dbName)), _reason(std::move(reason)) { // TODO SERVER-67438 Once ScopedDatabaseCriticalSection holds a DatabaseName obj, use dbName // directly - DatabaseName databaseName(boost::none, _dbName); + DatabaseName databaseName = DatabaseNameUtil::deserialize(boost::none, _dbName); Lock::DBLock dbLock(_opCtx, databaseName, MODE_X); auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive(_opCtx, databaseName); @@ -156,7 +204,7 @@ class ScopedDatabaseCriticalSection { UninterruptibleLockGuard guard(_opCtx->lockState()); // NOLINT. // TODO SERVER-67438 Once ScopedDatabaseCriticalSection holds a DatabaseName obj, use dbName // directly - DatabaseName databaseName(boost::none, _dbName); + DatabaseName databaseName = DatabaseNameUtil::deserialize(boost::none, _dbName); Lock::DBLock dbLock(_opCtx, databaseName, MODE_X); auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive(_opCtx, databaseName); @@ -195,7 +243,7 @@ bool isDbAlreadyDropped(OperationContext* opCtx, BSONObj getReasonForDropCollection(const NamespaceString& nss) { return BSON("command" << "dropCollection fromDropDatabase" - << "nss" << nss.ns()); + << "nss" << NamespaceStringUtil::serialize(nss)); } } // namespace @@ -203,41 +251,52 @@ BSONObj getReasonForDropCollection(const NamespaceString& nss) { void DropDatabaseCoordinator::_dropShardedCollection( OperationContext* opCtx, const CollectionType& coll, - std::shared_ptr executor) { + std::shared_ptr executor, + const CancellationToken& token) { const auto& nss = coll.getNss(); - // Acquire the collection distributed lock in order to synchronize with an eventual ongoing - // moveChunk and to prevent new ones from happening. - const auto coorName = DDLCoordinatorType_serializer(_coordId.getOperationType()); - auto collDDLLock = DDLLockManager::get(opCtx)->lock( - opCtx, nss.ns(), coorName, DDLLockManager::kDefaultLockTimeout); + // TODO SERVER-77546 Remove the collection DDL lock acquisition on feature flag removal since it + // will be mandatory to acquire a db DDL lock in IX/IS mode before acquiring any collection DDL + // lock + boost::optional collDDLLock; + if (!feature_flags::gMultipleGranularityDDLLocking.isEnabled( + serverGlobalParams.featureCompatibility)) { + // Acquire the collection DDL lock in order to synchronize with other DDL operations that + // didn't take the DB DDL lock + const auto coorName = DDLCoordinatorType_serializer(_coordId.getOperationType()); + collDDLLock.emplace(opCtx, nss, coorName, MODE_X, DDLLockManager::kDefaultLockTimeout); + } if (!_isPre70Compatible()) { - _updateSession(opCtx); ShardsvrParticipantBlock blockCRUDOperationsRequest(nss); blockCRUDOperationsRequest.setBlockType( mongo::CriticalSectionBlockTypeEnum::kReadsAndWrites); blockCRUDOperationsRequest.setReason(getReasonForDropCollection(nss)); - blockCRUDOperationsRequest.setAllowViews(true); - const auto cmdObj = - CommandHelpers::appendMajorityWriteConcern(blockCRUDOperationsRequest.toBSON({})); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + async_rpc::AsyncRPCCommandHelpers::appendOSI(args, getNewSession(opCtx)); + auto opts = std::make_shared>( + blockCRUDOperationsRequest, **executor, token, args); sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, - nss.db(), - cmdObj.addFields(getCurrentSession().toBSON()), - Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx), - **executor); + opCtx, opts, Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx)); } - _updateSession(opCtx); + // This always runs in the shard role so should use a cluster transaction to guarantee + // targeting the config server. + bool useClusterTransaction = true; sharding_ddl_util::removeCollAndChunksMetadataFromConfig( - opCtx, coll, ShardingCatalogClient::kMajorityWriteConcern, getCurrentSession(), **executor); + opCtx, + Grid::get(opCtx)->shardRegistry()->getConfigShard(), + Grid::get(opCtx)->catalogClient(), + coll, + ShardingCatalogClient::kMajorityWriteConcern, + getNewSession(opCtx), + useClusterTransaction, + **executor); - _updateSession(opCtx); - sharding_ddl_util::removeTagsMetadataFromConfig(opCtx, nss, getCurrentSession()); + sharding_ddl_util::removeTagsMetadataFromConfig(opCtx, nss, getNewSession(opCtx)); const auto primaryShardId = ShardingState::get(opCtx)->shardId(); - _updateSession(opCtx); // We need to send the drop to all the shards because both movePrimary and // moveChunk leave garbage behind for sharded collections. @@ -246,37 +305,30 @@ void DropDatabaseCoordinator::_dropShardedCollection( participants.erase(std::remove(participants.begin(), participants.end(), primaryShardId), participants.end()); sharding_ddl_util::sendDropCollectionParticipantCommandToShards( - opCtx, nss, participants, **executor, getCurrentSession(), true /* fromMigrate */); + opCtx, nss, participants, **executor, getNewSession(opCtx), true /* fromMigrate */); // The sharded collection must be dropped on the primary shard after it has been dropped on all // of the other shards to ensure it can only be re-created as unsharded with a higher optime // than all of the drops. sharding_ddl_util::sendDropCollectionParticipantCommandToShards( - opCtx, nss, {primaryShardId}, **executor, getCurrentSession(), false /* fromMigrate */); - - // Remove collection's query analyzer configuration document, if it exists. - sharding_ddl_util::removeQueryAnalyzerMetadataFromConfig(opCtx, nss, coll.getUuid()); + opCtx, nss, {primaryShardId}, **executor, getNewSession(opCtx), false /* fromMigrate */); if (!_isPre70Compatible()) { - _updateSession(opCtx); ShardsvrParticipantBlock unblockCRUDOperationsRequest(nss); unblockCRUDOperationsRequest.setBlockType(CriticalSectionBlockTypeEnum::kUnblock); unblockCRUDOperationsRequest.setReason(getReasonForDropCollection(nss)); - unblockCRUDOperationsRequest.setAllowViews(true); - - const auto cmdObj = - CommandHelpers::appendMajorityWriteConcern(unblockCRUDOperationsRequest.toBSON({})); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + async_rpc::AsyncRPCCommandHelpers::appendOSI(args, getNewSession(opCtx)); + auto opts = std::make_shared>( + unblockCRUDOperationsRequest, **executor, token, args); sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, - nss.db(), - cmdObj.addFields(getCurrentSession().toBSON()), - Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx), - **executor); + opCtx, opts, Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx)); } } void DropDatabaseCoordinator::_clearDatabaseInfoOnPrimary(OperationContext* opCtx) { - DatabaseName dbName(boost::none, _dbName); + DatabaseName dbName = DatabaseNameUtil::deserialize(boost::none, _dbName); AutoGetDb autoDb(opCtx, dbName, MODE_X); auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx, dbName); scopedDss->clearDbInfo(opCtx); @@ -307,7 +359,7 @@ ExecutorFuture DropDatabaseCoordinator::_runImpl( return ExecutorFuture(**executor) .then(_buildPhaseHandler( Phase::kDrop, - [this, executor = executor, anchor = shared_from_this()] { + [this, token, executor = executor, anchor = shared_from_this()] { auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); @@ -316,9 +368,8 @@ ExecutorFuture DropDatabaseCoordinator::_runImpl( // Perform a noop write on the participants in order to advance the txnNumber // for this coordinator's lsid so that requests with older txnNumbers can no // longer execute. - _updateSession(opCtx); _performNoopRetryableWriteOnAllShardsAndConfigsvr( - opCtx, getCurrentSession(), **executor); + opCtx, getNewSession(opCtx), **executor); } ShardingLogging::get(opCtx)->logChange(opCtx, "dropDatabase.start", _dbName); @@ -350,25 +401,26 @@ ExecutorFuture DropDatabaseCoordinator::_runImpl( } if (_doc.getCollInfo()) { - const auto& coll = _doc.getCollInfo().value(); + const auto coll = _doc.getCollInfo().value(); LOGV2_DEBUG(5494504, 2, "Completing collection drop from previous primary", logAttrs(coll.getNss())); - _dropShardedCollection(opCtx, coll, executor); + _dropShardedCollection(opCtx, coll, executor, token); } for (const auto& coll : allCollectionsForDb) { const auto& nss = coll.getNss(); LOGV2_DEBUG(5494505, 2, "Dropping collection", logAttrs(nss)); - sharding_ddl_util::stopMigrations(opCtx, nss, coll.getUuid()); + sharding_ddl_util::stopMigrations( + opCtx, nss, coll.getUuid(), getNewSession(opCtx)); auto newStateDoc = _doc; newStateDoc.setCollInfo(coll); _updateStateDocument(opCtx, std::move(newStateDoc)); - _dropShardedCollection(opCtx, coll, executor); + _dropShardedCollection(opCtx, coll, executor, token); } // First of all, we will get all namespaces that still have zones associated to @@ -379,11 +431,18 @@ ExecutorFuture DropDatabaseCoordinator::_runImpl( const auto& nssWithZones = catalogClient->getAllNssThatHaveZonesForDatabase(opCtx, _dbName); for (const auto& nss : nssWithZones) { - _updateSession(opCtx); sharding_ddl_util::removeTagsMetadataFromConfig( - opCtx, nss, getCurrentSession()); + opCtx, nss, getNewSession(opCtx)); } + // Remove the query sampling configuration documents for all collections in this + // database, if they exist. + const std::string regex = "^" + pcre_util::quoteMeta(_dbName) + "\\..*"; + sharding_ddl_util::removeQueryAnalyzerMetadataFromConfig( + opCtx, + BSON(analyze_shard_key::QueryAnalyzerDocument::kNsFieldName + << BSON("$regex" << regex))); + const auto allShardIds = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx); { // Acquire the database critical section in order to disallow implicit @@ -407,7 +466,8 @@ ExecutorFuture DropDatabaseCoordinator::_runImpl( } auto dropDatabaseParticipantCmd = ShardsvrDropDatabaseParticipant(); - dropDatabaseParticipantCmd.setDbName(_dbName); + dropDatabaseParticipantCmd.setDbName( + DatabaseNameUtil::deserialize(boost::none, _dbName)); const auto cmdObj = CommandHelpers::appendMajorityWriteConcern( dropDatabaseParticipantCmd.toBSON({})); @@ -436,15 +496,19 @@ ExecutorFuture DropDatabaseCoordinator::_runImpl( participants.erase( std::remove(participants.begin(), participants.end(), primaryShardId), participants.end()); + + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + async_rpc::AsyncRPCCommandHelpers::appendDbVersionIfPresent( + args, *metadata().getDatabaseVersion()); // Drop DB on all other shards, attaching the dbVersion to the request to ensure // idempotency. + auto opts = std::make_shared< + async_rpc::AsyncRPCOptions>( + dropDatabaseParticipantCmd, **executor, token, args); try { sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, - _dbName, - appendDbVersionIfPresent(cmdObj, *metadata().getDatabaseVersion()), - participants, - **executor); + opCtx, opts, participants); } catch (ExceptionFor&) { // The DB metadata could have been removed by a network-partitioned former // primary @@ -456,13 +520,9 @@ ExecutorFuture DropDatabaseCoordinator::_runImpl( _clearDatabaseInfoOnPrimary(opCtx); _clearDatabaseInfoOnSecondaries(opCtx); - _updateSession(opCtx); + const auto& osi = getNewSession(opCtx); removeDatabaseFromConfigAndUpdatePlacementHistory( - opCtx, - **executor, - _dbName, - *metadata().getDatabaseVersion(), - getCurrentSession()); + opCtx, **executor, _dbName, *metadata().getDatabaseVersion(), osi); VectorClockMutable::get(opCtx)->waitForDurableConfigTime().get(opCtx); } @@ -480,7 +540,7 @@ ExecutorFuture DropDatabaseCoordinator::_runImpl( /* throwIfReasonDiffers */ false); } }) - .then([this, executor = executor, anchor = shared_from_this()] { + .then([this, token, executor = executor, anchor = shared_from_this()] { auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); @@ -494,29 +554,19 @@ ExecutorFuture DropDatabaseCoordinator::_runImpl( FlushDatabaseCacheUpdatesWithWriteConcern flushDbCacheUpdatesCmd( _dbName.toString()); flushDbCacheUpdatesCmd.setSyncFromConfig(true); - flushDbCacheUpdatesCmd.setDbName(_dbName); + flushDbCacheUpdatesCmd.setDbName(DatabaseName::kAdmin); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); IgnoreAPIParametersBlock ignoreApiParametersBlock{opCtx}; - sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, - "admin", - CommandHelpers::appendMajorityWriteConcern(flushDbCacheUpdatesCmd.toBSON({})), - participants, - **executor); + auto opts = std::make_shared< + async_rpc::AsyncRPCOptions>( + flushDbCacheUpdatesCmd, **executor, token, args); + sharding_ddl_util::sendAuthenticatedCommandToShards(opCtx, opts, participants); } ShardingLogging::get(opCtx)->logChange(opCtx, "dropDatabase", _dbName); LOGV2(5494506, "Database dropped", "db"_attr = _dbName); - }) - .onError([this, anchor = shared_from_this()](const Status& status) { - if (!status.isA() && - !status.isA()) { - LOGV2_ERROR(5494507, - "Error running drop database", - "db"_attr = _dbName, - "error"_attr = redact(status)); - } - return status; }); } diff --git a/src/mongo/db/s/drop_database_coordinator.h b/src/mongo/db/s/drop_database_coordinator.h index 581efaa2a4f28..dd6e98b82d148 100644 --- a/src/mongo/db/s/drop_database_coordinator.h +++ b/src/mongo/db/s/drop_database_coordinator.h @@ -29,8 +29,26 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" #include "mongo/db/s/drop_database_coordinator_document_gen.h" #include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" namespace mongo { @@ -69,7 +87,8 @@ class DropDatabaseCoordinator final void _dropShardedCollection(OperationContext* opCtx, const CollectionType& coll, - std::shared_ptr executor); + std::shared_ptr executor, + const CancellationToken& token); void _clearDatabaseInfoOnPrimary(OperationContext* opCtx); diff --git a/src/mongo/db/s/flush_database_cache_updates_command.cpp b/src/mongo/db/s/flush_database_cache_updates_command.cpp index 3509a8e9303d2..eb2ec86c4a795 100644 --- a/src/mongo/db/s/flush_database_cache_updates_command.cpp +++ b/src/mongo/db/s/flush_database_cache_updates_command.cpp @@ -28,27 +28,51 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/action_set.h" +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/database_sharding_state.h" -#include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" +#include "mongo/db/s/sharding_migration_critical_section.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog_cache_loader.h" -#include "mongo/s/grid.h" +#include "mongo/s/database_version.h" #include "mongo/s/request_types/flush_database_cache_updates_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -66,8 +90,9 @@ Status insertDatabaseEntryForBackwardCompatibility(OperationContext* opCtx, DBDirectClient client(opCtx); auto commandResponse = client.runCommand([&] { - auto dbMetadata = - DatabaseType(dbName.toString(), ShardId::kConfigServerId, DatabaseVersion::makeFixed()); + auto dbMetadata = DatabaseType(DatabaseNameUtil::serialize(dbName), + ShardId::kConfigServerId, + DatabaseVersion::makeFixed()); write_ops::InsertCommandRequest insertOp(NamespaceString::kShardConfigDatabasesNamespace); insertOp.setDocuments({dbMetadata.toBSON()}); @@ -124,11 +149,13 @@ class FlushDatabaseCacheUpdatesCmdBase : public TypedCommand { } void doCheckAuthorization(OperationContext* opCtx) const override { - uassert(ErrorCodes::Unauthorized, - "Unauthorized", - AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + uassert( + ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(Base::request().getDbName().tenantId()), + ActionType::internal)); } void typedRun(OperationContext* opCtx) { @@ -143,7 +170,7 @@ class FlushDatabaseCacheUpdatesCmdBase : public TypedCommand { "Can't call _flushDatabaseCacheUpdates if in read-only mode", !opCtx->readOnly()); - if (_dbName() == DatabaseName::kAdmin || _dbName() == DatabaseName::kConfig) { + if (_dbName() == DatabaseName::kAdmin.db() || _dbName() == DatabaseName::kConfig.db()) { // The admin and config databases have fixed metadata that does not need to be // refreshed. @@ -157,7 +184,8 @@ class FlushDatabaseCacheUpdatesCmdBase : public TypedCommand { 1, "Inserting a database collection entry with fixed metadata", "db"_attr = _dbName()); - uassertStatusOK(insertDatabaseEntryForBackwardCompatibility(opCtx, _dbName())); + uassertStatusOK(insertDatabaseEntryForBackwardCompatibility( + opCtx, DatabaseNameUtil::deserialize(boost::none, _dbName()))); } return; @@ -166,7 +194,8 @@ class FlushDatabaseCacheUpdatesCmdBase : public TypedCommand { boost::optional> criticalSectionSignal; { - AutoGetDb autoDb(opCtx, _dbName(), MODE_IS); + AutoGetDb autoDb( + opCtx, DatabaseNameUtil::deserialize(boost::none, _dbName()), MODE_IS); // If the primary is in the critical section, secondaries must wait for the commit // to finish on the primary in case a secondary's caller has an afterClusterTime diff --git a/src/mongo/db/s/flush_resharding_state_change_command.cpp b/src/mongo/db/s/flush_resharding_state_change_command.cpp index b9494be2d30df..921cc533cff4c 100644 --- a/src/mongo/db/s/flush_resharding_state_change_command.cpp +++ b/src/mongo/db/s/flush_resharding_state_change_command.cpp @@ -28,25 +28,41 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/action_set.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" -#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" -#include "mongo/s/catalog_cache_loader.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/flush_resharding_state_change_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -95,8 +111,9 @@ class FlushReshardingStateChangeCmd final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } void typedRun(OperationContext* opCtx) { @@ -114,11 +131,6 @@ class FlushReshardingStateChangeCmd final : public TypedCommand(Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor()) .then([svcCtx = opCtx->getServiceContext(), nss = ns()] { ThreadClient tc("FlushReshardingStateChange", svcCtx); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } - auto opCtx = tc->makeOperationContext(); onCollectionPlacementVersionMismatch( opCtx.get(), nss, boost::none /* chunkVersionReceived */); diff --git a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp index 6293ca44321d1..10234deee2967 100644 --- a/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp +++ b/src/mongo/db/s/flush_routing_table_cache_updates_command.cpp @@ -27,23 +27,37 @@ * it in the license file. */ -#include "mongo/db/auth/action_set.h" +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/collection_sharding_runtime.h" -#include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" +#include "mongo/db/s/sharding_migration_critical_section.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/catalog_cache_loader.h" -#include "mongo/s/grid.h" #include "mongo/s/request_types/flush_routing_table_cache_updates_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -89,11 +103,13 @@ class FlushRoutingTableCacheUpdatesCmdBase : public TypedCommand { } void doCheckAuthorization(OperationContext* opCtx) const override { - uassert(ErrorCodes::Unauthorized, - "Unauthorized", - AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + uassert( + ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(Base::request().getDbName().tenantId()), + ActionType::internal)); } void typedRun(OperationContext* opCtx) { diff --git a/src/mongo/db/s/forwardable_operation_metadata.cpp b/src/mongo/db/s/forwardable_operation_metadata.cpp index 1ceba18777ee9..bc67249828f3a 100644 --- a/src/mongo/db/s/forwardable_operation_metadata.cpp +++ b/src/mongo/db/s/forwardable_operation_metadata.cpp @@ -29,9 +29,26 @@ #include "mongo/db/s/forwardable_operation_metadata.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/client.h" #include "mongo/db/write_block_bypass.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/metadata/impersonated_user_metadata.h" +#include "mongo/rpc/metadata/impersonated_user_metadata_gen.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/s/forwardable_operation_metadata.h b/src/mongo/db/s/forwardable_operation_metadata.h index 4df938b071239..801281fdf3125 100644 --- a/src/mongo/db/s/forwardable_operation_metadata.h +++ b/src/mongo/db/s/forwardable_operation_metadata.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/bson/bsonobj.h" #include "mongo/db/operation_context.h" #include "mongo/db/s/forwardable_operation_metadata_gen.h" diff --git a/src/mongo/db/s/get_database_version_command.cpp b/src/mongo/db/s/get_database_version_command.cpp index af93287ca53fa..119d474b267f3 100644 --- a/src/mongo/db/s/get_database_version_command.cpp +++ b/src/mongo/db/s/get_database_version_command.cpp @@ -28,23 +28,40 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/auth/action_set.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/database_version.h" #include "mongo/s/request_types/get_database_version_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding namespace mongo { -namespace { class GetDatabaseVersionCmd final : public TypedCommand { public: @@ -62,8 +79,9 @@ class GetDatabaseVersionCmd final : public TypedCommand { // The command parameter happens to be string so it's historically been interpreted // by parseNs as a collection. Continuing to do so here for unexamined compatibility. NamespaceString ns() const override { - return NamespaceStringUtil::parseNamespaceFromRequest(request().getDbName(), - _targetDb()); + const auto& cmd = request(); + return NamespaceStringUtil::parseNamespaceFromRequest(cmd.getDbName(), + cmd.getCommandParameter()); } void doCheckAuthorization(OperationContext* opCtx) const override { @@ -80,7 +98,7 @@ class GetDatabaseVersionCmd final : public TypedCommand { str::stream() << definition()->getName() << " can only be run on shard servers", serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); - DatabaseName dbName(boost::none, _targetDb()); + auto dbName = _targetDb(); AutoGetDb autoDb(opCtx, dbName, MODE_IS); const auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireShared(opCtx, dbName); @@ -90,10 +108,16 @@ class GetDatabaseVersionCmd final : public TypedCommand { versionObj = dbVersion->toBSON(); } result->getBodyBuilder().append("dbVersion", versionObj); + + if (const auto isPrimaryShardForDb = scopedDss->_isPrimaryShardForDb(opCtx)) { + result->getBodyBuilder().append("isPrimaryShardForDb", *isPrimaryShardForDb); + } } - StringData _targetDb() const { - return request().getCommandParameter(); + DatabaseName _targetDb() const { + const auto& cmd = request(); + return DatabaseNameUtil::deserialize(cmd.getDbName().tenantId(), + cmd.getCommandParameter()); } }; @@ -110,5 +134,4 @@ class GetDatabaseVersionCmd final : public TypedCommand { } } getDatabaseVersionCmd; -} // namespace } // namespace mongo diff --git a/src/mongo/db/s/get_shard_version_command.cpp b/src/mongo/db/s/get_shard_version_command.cpp index 6bfbf1fc308a3..1031bca72e4dc 100644 --- a/src/mongo/db/s/get_shard_version_command.cpp +++ b/src/mongo/db/s/get_shard_version_command.cpp @@ -28,16 +28,42 @@ */ -#include "mongo/db/auth/action_set.h" +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/client/connection_string.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" -#include "mongo/util/str.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -146,10 +172,13 @@ class GetShardVersion : public BasicCommand { if (cmdObj["fullMetadata"].trueValue()) { BSONArrayBuilder indexesArrBuilder; + // Added to the result bson if the max bson size is exceeded + BSONObjBuilder exceededSizeElt(BSON("exceededSize" << true)); bool exceedsSizeLimit = false; scopedCsr->getIndexes(opCtx)->forEachIndex([&](const auto& index) { BSONObjBuilder indexB(index.toBSON()); - if (result.len() + indexesArrBuilder.len() + indexB.len() > + if (result.len() + exceededSizeElt.len() + indexesArrBuilder.len() + + indexB.len() > BSONObjMaxUserSize) { exceedsSizeLimit = true; } else { @@ -160,6 +189,9 @@ class GetShardVersion : public BasicCommand { }); result.append("indexes", indexesArrBuilder.arr()); + if (exceedsSizeLimit) { + result.appendElements(exceededSizeElt.done()); + } } } diff --git a/src/mongo/db/s/global_index/global_index_cloner_fetcher.cpp b/src/mongo/db/s/global_index/global_index_cloner_fetcher.cpp index 50289ce28cfc4..77b50055c99c9 100644 --- a/src/mongo/db/s/global_index/global_index_cloner_fetcher.cpp +++ b/src/mongo/db/s/global_index/global_index_cloner_fetcher.cpp @@ -29,21 +29,38 @@ #include "mongo/db/s/global_index/global_index_cloner_fetcher.h" +#include +#include +#include #include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/db/curop.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/operation_context.h" -#include "mongo/db/pipeline/aggregation_request_helper.h" -#include "mongo/db/pipeline/document_source_match.h" -#include "mongo/db/pipeline/document_source_replace_root.h" -#include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/process_interface/mongo_process_interface.h" -#include "mongo/db/pipeline/sharded_agg_helpers.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/resharding/document_source_resharding_ownership_match.h" -#include "mongo/logv2/log.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/s/grid.h" #include "mongo/s/shard_key_pattern.h" +#include "mongo/s/stale_shard_version_helpers.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kGlobalIndex diff --git a/src/mongo/db/s/global_index/global_index_cloner_fetcher.h b/src/mongo/db/s/global_index/global_index_cloner_fetcher.h index 8286c8bd1d49b..193316818f198 100644 --- a/src/mongo/db/s/global_index/global_index_cloner_fetcher.h +++ b/src/mongo/db/s/global_index/global_index_cloner_fetcher.h @@ -29,9 +29,19 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/shard_id.h" #include "mongo/util/future.h" diff --git a/src/mongo/db/s/global_index/global_index_cloner_fetcher_factory.cpp b/src/mongo/db/s/global_index/global_index_cloner_fetcher_factory.cpp index 9d8a87847d6bb..9fc05f9f6232a 100644 --- a/src/mongo/db/s/global_index/global_index_cloner_fetcher_factory.cpp +++ b/src/mongo/db/s/global_index/global_index_cloner_fetcher_factory.cpp @@ -29,6 +29,8 @@ #include "mongo/db/s/global_index/global_index_cloner_fetcher_factory.h" +#include + namespace mongo { namespace global_index { diff --git a/src/mongo/db/s/global_index/global_index_cloner_fetcher_factory.h b/src/mongo/db/s/global_index/global_index_cloner_fetcher_factory.h index 46c4422dd2b03..ec81018e19452 100644 --- a/src/mongo/db/s/global_index/global_index_cloner_fetcher_factory.h +++ b/src/mongo/db/s/global_index/global_index_cloner_fetcher_factory.h @@ -29,7 +29,14 @@ #pragma once +#include + +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/s/global_index/global_index_cloner_fetcher.h" +#include "mongo/db/shard_id.h" +#include "mongo/util/uuid.h" namespace mongo { namespace global_index { diff --git a/src/mongo/db/s/global_index/global_index_cloner_fetcher_test.cpp b/src/mongo/db/s/global_index/global_index_cloner_fetcher_test.cpp index 9d32f2f29343e..7b35bb008a406 100644 --- a/src/mongo/db/s/global_index/global_index_cloner_fetcher_test.cpp +++ b/src/mongo/db/s/global_index/global_index_cloner_fetcher_test.cpp @@ -29,9 +29,36 @@ #include "mongo/db/s/global_index/global_index_cloner_fetcher.h" +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_mock.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/s/shard_server_test_fixture.h" -#include "mongo/logv2/log.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog_cache_mock.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/s/global_index/global_index_cloning_external_state.cpp b/src/mongo/db/s/global_index/global_index_cloning_external_state.cpp index aa36da44ec108..cceafab1133f5 100644 --- a/src/mongo/db/s/global_index/global_index_cloning_external_state.cpp +++ b/src/mongo/db/s/global_index/global_index_cloning_external_state.cpp @@ -30,6 +30,7 @@ #include "mongo/db/s/global_index/global_index_cloning_external_state.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/s/catalog_cache.h" #include "mongo/s/grid.h" namespace mongo { diff --git a/src/mongo/db/s/global_index/global_index_cloning_external_state.h b/src/mongo/db/s/global_index/global_index_cloning_external_state.h index 0851b776a6ce2..af3917e799f5c 100644 --- a/src/mongo/db/s/global_index/global_index_cloning_external_state.h +++ b/src/mongo/db/s/global_index/global_index_cloning_external_state.h @@ -29,8 +29,11 @@ #pragma once +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/global_index/global_index_cloning_service.h" - +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/s/chunk_manager.h" namespace mongo { diff --git a/src/mongo/db/s/global_index/global_index_cloning_service.cpp b/src/mongo/db/s/global_index/global_index_cloning_service.cpp index 4142516abfeb8..4798eafd2d78c 100644 --- a/src/mongo/db/s/global_index/global_index_cloning_service.cpp +++ b/src/mongo/db/s/global_index/global_index_cloning_service.cpp @@ -29,23 +29,60 @@ #include "mongo/db/s/global_index/global_index_cloning_service.h" -#include "mongo/bson/simple_bsonobj_comparator.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/create_indexes_gen.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/ops/delete.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/persistent_task_store.h" #include "mongo/db/repl/optime.h" -#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/global_index/global_index_cloning_external_state.h" #include "mongo/db/s/global_index/global_index_server_parameters_gen.h" #include "mongo/db/s/global_index/global_index_util.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" #include "mongo/util/future_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #include "mongo/util/timer.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kGlobalIndex @@ -212,8 +249,14 @@ void GlobalIndexCloningService::CloningStateMachine::_init( _metadata.getNss(), indexSpec.getName(), _metadata.getIndexCollectionUUID(), **executor); auto client = _serviceContext->makeClient("globalIndexClonerServiceInit"); - AlternativeClientRegion clientRegion(client); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } + + AlternativeClientRegion clientRegion(client); auto opCtx = _serviceContext->makeOperationContext(Client::getCurrent()); auto routingInfo = @@ -221,7 +264,7 @@ void GlobalIndexCloningService::CloningStateMachine::_init( uassert(6755901, str::stream() << "Cannot create global index on unsharded ns " - << _metadata.getNss().ns(), + << _metadata.getNss().toStringForErrorMsg(), routingInfo.isSharded()); auto myShardId = _externalState->myShardId(_serviceContext); @@ -278,7 +321,8 @@ void GlobalIndexCloningService::CloningStateMachine::checkIfOptionsConflict( uassert(6755900, str::stream() << "New global index " << stateDoc << " is incompatible with ongoing global index build in namespace: " - << _metadata.getNss() << ", uuid: " << _metadata.getCollectionUUID(), + << _metadata.getNss().toStringForErrorMsg() + << ", uuid: " << _metadata.getCollectionUUID(), newCloning.getNss() == _metadata.getNss() && newCloning.getCollectionUUID() == _metadata.getCollectionUUID()); } @@ -430,12 +474,12 @@ ExecutorFuture GlobalIndexCloningService::CloningStateMachine::_processBat _lastProcessedIdSinceStepUp = Value(next.documentKey["_id"]); _fetcher->setResumeId(_lastProcessedIdSinceStepUp); - _fetchedDocs.pop(); - _metrics->onDocumentsProcessed(1, next.documentKey.objsize() + next.indexKeyValues.objsize(), duration_cast(timer.elapsed())); + + _fetchedDocs.pop(); }) .until([this](const Status& status) { return !status.isOK() || _fetchedDocs.empty(); }) .on(**executor, cancelToken) @@ -464,7 +508,7 @@ void GlobalIndexCloningService::CloningStateMachine::_ensureCollection(Operation invariant(!opCtx->lockState()->inAWriteUnitOfWork()); // Create the destination collection if necessary. - writeConflictRetry(opCtx, "CloningStateMachine::_ensureCollection", nss.toString(), [&] { + writeConflictRetry(opCtx, "CloningStateMachine::_ensureCollection", nss, [&] { const Collection* coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss); if (coll) { diff --git a/src/mongo/db/s/global_index/global_index_cloning_service.h b/src/mongo/db/s/global_index/global_index_cloning_service.h index 8424a4c29e146..b5acd52de6990 100644 --- a/src/mongo/db/s/global_index/global_index_cloning_service.h +++ b/src/mongo/db/s/global_index/global_index_cloning_service.h @@ -29,16 +29,41 @@ #pragma once +#include #include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/s/global_index/common_types_gen.h" +#include "mongo/db/s/global_index/global_index_cloner_fetcher.h" #include "mongo/db/s/global_index/global_index_cloner_fetcher_factory.h" #include "mongo/db/s/global_index/global_index_cloner_gen.h" #include "mongo/db/s/global_index/global_index_inserter.h" #include "mongo/db/s/global_index/global_index_metrics.h" #include "mongo/db/s/resharding/resharding_future_util.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/cancellation.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/time_support.h" namespace mongo { namespace global_index { diff --git a/src/mongo/db/s/global_index/global_index_cloning_service_test.cpp b/src/mongo/db/s/global_index/global_index_cloning_service_test.cpp index 96b8e60993a74..c5a51cd4e3ff6 100644 --- a/src/mongo/db/s/global_index/global_index_cloning_service_test.cpp +++ b/src/mongo/db/s/global_index/global_index_cloning_service_test.cpp @@ -27,10 +27,36 @@ * it in the license file. */ +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/client.h" #include "mongo/db/commands/list_collections_filter.h" +#include "mongo/db/create_indexes_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" #include "mongo/db/repl/database_cloner_gen.h" #include "mongo/db/repl/primary_only_service_test_fixture.h" @@ -40,12 +66,29 @@ #include "mongo/db/s/global_index/global_index_cloning_service.h" #include "mongo/db/s/global_index/global_index_util.h" #include "mongo/db/s/resharding/resharding_service_test_helpers.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" +#include "mongo/idl/idl_parser.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -307,10 +350,10 @@ class GlobalIndexClonerServiceTest : public repl::PrimaryOnlyServiceMongoDTest { replaceFetcherResultList(std::move(fetcherResults)); CreateGlobalIndex createGlobalIndex(_indexCollectionUUID); - createGlobalIndex.setDbName({boost::none, "admin"}); + createGlobalIndex.setDbName(DatabaseName::kAdmin); BSONObj cmdResult; auto success = - client.runCommand({boost::none, "admin"}, createGlobalIndex.toBSON({}), cmdResult); + client.runCommand(DatabaseName::kAdmin, createGlobalIndex.toBSON({}), cmdResult); ASSERT(success) << "createGlobalIndex cmd failed with result: " << cmdResult; } diff --git a/src/mongo/db/s/global_index/global_index_cumulative_metrics.cpp b/src/mongo/db/s/global_index/global_index_cumulative_metrics.cpp index c319c2e04183a..e07d8aea70e4b 100644 --- a/src/mongo/db/s/global_index/global_index_cumulative_metrics.cpp +++ b/src/mongo/db/s/global_index/global_index_cumulative_metrics.cpp @@ -29,6 +29,14 @@ #include "mongo/db/s/global_index/global_index_cumulative_metrics.h" +#include +#include +#include +#include + +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + namespace mongo { namespace global_index { diff --git a/src/mongo/db/s/global_index/global_index_cumulative_metrics.h b/src/mongo/db/s/global_index/global_index_cumulative_metrics.h index 2d2998e188fd3..e7ea62b36ffcf 100644 --- a/src/mongo/db/s/global_index/global_index_cumulative_metrics.h +++ b/src/mongo/db/s/global_index/global_index_cumulative_metrics.h @@ -29,6 +29,21 @@ #pragma once +#include +#include +#include +// IWYU pragma: no_include "boost/preprocessor/detail/limits/auto_rec_256.hpp" +#include +// IWYU pragma: no_include "boost/preprocessor/repetition/detail/limits/for_256.hpp" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/s/global_index/global_index_cloner_gen.h" #include "mongo/db/s/global_index/global_index_coordinator_state_enum_placeholder.h" #include "mongo/db/s/global_index/global_index_cumulative_metrics_field_name_provider.h" diff --git a/src/mongo/db/s/global_index/global_index_cumulative_metrics_field_name_provider.h b/src/mongo/db/s/global_index/global_index_cumulative_metrics_field_name_provider.h index 181ebb46d6e54..a3c3d4de2b997 100644 --- a/src/mongo/db/s/global_index/global_index_cumulative_metrics_field_name_provider.h +++ b/src/mongo/db/s/global_index/global_index_cumulative_metrics_field_name_provider.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/base/string_data.h" #include "mongo/db/s/metrics/field_names/sharding_data_transform_cumulative_metrics_field_name_provider.h" namespace mongo { diff --git a/src/mongo/db/s/global_index/global_index_cumulative_metrics_test.cpp b/src/mongo/db/s/global_index/global_index_cumulative_metrics_test.cpp index 51e3ed3edaa1e..9e481a36f2b8a 100644 --- a/src/mongo/db/s/global_index/global_index_cumulative_metrics_test.cpp +++ b/src/mongo/db/s/global_index/global_index_cumulative_metrics_test.cpp @@ -29,6 +29,9 @@ #include "mongo/db/s/global_index/global_index_cumulative_metrics.h" + +#include + #include "mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/s/global_index/global_index_inserter.cpp b/src/mongo/db/s/global_index/global_index_inserter.cpp index 6a7ba10378808..966e91f0ed941 100644 --- a/src/mongo/db/s/global_index/global_index_inserter.cpp +++ b/src/mongo/db/s/global_index/global_index_inserter.cpp @@ -29,14 +29,34 @@ #include "mongo/db/s/global_index/global_index_inserter.h" +#include +#include #include - +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/database_name.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/resource_yielder.h" #include "mongo/db/s/global_index/global_index_util.h" #include "mongo/db/s/global_index_crud_commands_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/transaction/transaction_api.h" -#include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/out_of_line_executor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kGlobalIndex @@ -64,54 +84,48 @@ NamespaceString GlobalIndexInserter::_skipIdNss() { void GlobalIndexInserter::processDoc(OperationContext* opCtx, const BSONObj& indexKeyValues, const BSONObj& documentKey) { - auto insertToGlobalIndexFn = - [this, service = opCtx->getServiceContext(), indexKeyValues, documentKey]( - const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { - FindCommandRequest skipIdQuery(_skipIdNss()); - skipIdQuery.setFilter(BSON("_id" << documentKey)); - skipIdQuery.setLimit(1); - - return txnClient.exhaustiveFind(skipIdQuery) - .thenRunOn(txnExec) - .then([this, service, indexKeyValues, documentKey, &txnClient, txnExec]( - const auto& skipIdDocResults) { - auto client = service->makeClient("globalIndexInserter"); - auto opCtx = service->makeOperationContext(client.get()); - - { - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); - } - - globalIndexInserterPauseAfterReadingSkipCollection.pauseWhileSet(opCtx.get()); - - if (!skipIdDocResults.empty()) { - return SemiFuture::makeReady(); - } - - InsertGlobalIndexKey globalIndexEntryInsert(_indexUUID); - // Note: dbName is unused by command but required by idl. - globalIndexEntryInsert.setDbName({boost::none, "admin"}); - globalIndexEntryInsert.setGlobalIndexKeyEntry( - GlobalIndexKeyEntry(indexKeyValues, documentKey)); - - return txnClient.runCommand(_nss.dbName(), globalIndexEntryInsert.toBSON({})) - .thenRunOn(txnExec) - .then([this, documentKey, &txnClient](const auto& commandResponse) { - write_ops::InsertCommandRequest skipIdInsert(_skipIdNss()); - - skipIdInsert.setDocuments({BSON("_id" << documentKey)}); - return txnClient.runCRUDOp({skipIdInsert}, {}).ignoreValue(); - }) - .semi(); - }) - .semi(); - }; + auto insertToGlobalIndexFn = [this, + service = opCtx->getServiceContext(), + indexKeyValues, + documentKey](const txn_api::TransactionClient& txnClient, + ExecutorPtr txnExec) { + FindCommandRequest skipIdQuery(_skipIdNss()); + skipIdQuery.setFilter(BSON("_id" << documentKey)); + skipIdQuery.setLimit(1); + + return txnClient.exhaustiveFind(skipIdQuery) + .thenRunOn(txnExec) + .then([this, service, indexKeyValues, documentKey, &txnClient, txnExec]( + const auto& skipIdDocResults) { + auto client = service->makeClient("globalIndexInserter"); + auto opCtx = service->makeOperationContext(client.get()); + globalIndexInserterPauseAfterReadingSkipCollection.pauseWhileSet(opCtx.get()); + + if (!skipIdDocResults.empty()) { + return SemiFuture::makeReady(); + } + + InsertGlobalIndexKey globalIndexEntryInsert(_indexUUID); + // Note: dbName is unused by command but required by idl. + globalIndexEntryInsert.setDbName(DatabaseName::kAdmin); + globalIndexEntryInsert.setGlobalIndexKeyEntry( + GlobalIndexKeyEntry(indexKeyValues, documentKey)); + + return txnClient.runCommandChecked(_nss.dbName(), globalIndexEntryInsert.toBSON({})) + .thenRunOn(txnExec) + .then([this, documentKey, &txnClient](const auto& commandResponse) { + write_ops::InsertCommandRequest skipIdInsert(_skipIdNss()); + + skipIdInsert.setDocuments({BSON("_id" << documentKey)}); + return txnClient.runCRUDOp({skipIdInsert}, {}).ignoreValue(); + }) + .semi(); + }) + .semi(); + }; auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(_executor); - - txn_api::SyncTransactionWithRetries txn(opCtx, sleepInlineExecutor, nullptr, inlineExecutor); + txn_api::SyncTransactionWithRetries txn(opCtx, _executor, nullptr, inlineExecutor); txn.run(opCtx, insertToGlobalIndexFn); } diff --git a/src/mongo/db/s/global_index/global_index_inserter.h b/src/mongo/db/s/global_index/global_index_inserter.h index a22f9da0eab7b..6a7e3b5fbf152 100644 --- a/src/mongo/db/s/global_index/global_index_inserter.h +++ b/src/mongo/db/s/global_index/global_index_inserter.h @@ -29,8 +29,13 @@ #pragma once +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/executor/task_executor.h" #include "mongo/util/uuid.h" diff --git a/src/mongo/db/s/global_index/global_index_inserter_test.cpp b/src/mongo/db/s/global_index/global_index_inserter_test.cpp index c65ca491f7415..a15be3a34016c 100644 --- a/src/mongo/db/s/global_index/global_index_inserter_test.cpp +++ b/src/mongo/db/s/global_index/global_index_inserter_test.cpp @@ -29,19 +29,36 @@ #include "mongo/db/s/global_index/global_index_inserter.h" -#include "mongo/db/auth/authorization_session.h" +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/s/global_index/global_index_util.h" #include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" +#include "mongo/rpc/metadata/metadata_hook.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/stdx/future.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/fail_point.h" @@ -76,10 +93,10 @@ class GlobalIndexInserterTest : public ShardServerTestFixture { _executor = makeTaskExecutorForCloner(); CreateGlobalIndex createGlobalIndex(_indexUUID); - createGlobalIndex.setDbName({boost::none, "admin"}); + createGlobalIndex.setDbName(DatabaseName::kAdmin); BSONObj cmdResult; auto success = - client.runCommand({boost::none, "admin"}, createGlobalIndex.toBSON({}), cmdResult); + client.runCommand(DatabaseName::kAdmin, createGlobalIndex.toBSON({}), cmdResult); ASSERT(success) << "createGlobalIndex cmd failed with result: " << cmdResult; } diff --git a/src/mongo/db/s/global_index/global_index_metrics.cpp b/src/mongo/db/s/global_index/global_index_metrics.cpp index 1b55d53d3f200..d5c774ac3eedc 100644 --- a/src/mongo/db/s/global_index/global_index_metrics.cpp +++ b/src/mongo/db/s/global_index/global_index_metrics.cpp @@ -29,8 +29,22 @@ #include "mongo/db/s/global_index/global_index_metrics.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/create_indexes_gen.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/s/global_index/global_index_cloner_gen.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { namespace global_index { @@ -64,7 +78,7 @@ BSONObj createOriginalCommand(const NamespaceString& nss, BSONObj keyPattern, bo using V = Value; return Doc{{"originatingCommand", - V{Doc{{"createIndexes", V{StringData{nss.toString()}}}, + V{Doc{{"createIndexes", V{StringData{NamespaceStringUtil::serialize(nss)}}}, {"key", std::move(keyPattern)}, {"unique", V{unique}}}}}} .toBson(); diff --git a/src/mongo/db/s/global_index/global_index_metrics.h b/src/mongo/db/s/global_index/global_index_metrics.h index 29c63ed7e7aab..eac13d4038964 100644 --- a/src/mongo/db/s/global_index/global_index_metrics.h +++ b/src/mongo/db/s/global_index/global_index_metrics.h @@ -29,15 +29,32 @@ #pragma once +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/s/global_index/common_types_gen.h" #include "mongo/db/s/global_index/global_index_cloner_gen.h" #include "mongo/db/s/global_index/global_index_coordinator_state_enum_placeholder.h" #include "mongo/db/s/global_index/global_index_cumulative_metrics.h" #include "mongo/db/s/global_index/global_index_metrics_field_name_provider.h" #include "mongo/db/s/metrics/metrics_state_holder.h" +#include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" +#include "mongo/db/s/metrics/sharding_data_transform_metrics.h" #include "mongo/db/s/metrics/with_phase_duration_management.h" +#include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/global_index/global_index_metrics_field_name_provider.h b/src/mongo/db/s/global_index/global_index_metrics_field_name_provider.h index 41bdf906d6b88..129221b689d9a 100644 --- a/src/mongo/db/s/global_index/global_index_metrics_field_name_provider.h +++ b/src/mongo/db/s/global_index/global_index_metrics_field_name_provider.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/base/string_data.h" #include "mongo/db/namespace_string.h" #include "mongo/db/s/metrics/field_names/sharding_data_transform_instance_metrics_field_name_provider.h" #include "mongo/util/duration.h" diff --git a/src/mongo/db/s/global_index/global_index_metrics_test.cpp b/src/mongo/db/s/global_index/global_index_metrics_test.cpp index 20ccb31e995c9..48091471e12e5 100644 --- a/src/mongo/db/s/global_index/global_index_metrics_test.cpp +++ b/src/mongo/db/s/global_index/global_index_metrics_test.cpp @@ -28,12 +28,15 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include #include "mongo/db/s/global_index/global_index_metrics.h" -#include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source_mock.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/s/global_index/global_index_util.h b/src/mongo/db/s/global_index/global_index_util.h index 657520558af04..6ad00647cb73f 100644 --- a/src/mongo/db/s/global_index/global_index_util.h +++ b/src/mongo/db/s/global_index/global_index_util.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/base/string_data.h" #include "mongo/db/namespace_string.h" namespace mongo { diff --git a/src/mongo/db/s/global_user_write_block_state.cpp b/src/mongo/db/s/global_user_write_block_state.cpp index 79bd254d7dc13..2286aeaaf3bac 100644 --- a/src/mongo/db/s/global_user_write_block_state.cpp +++ b/src/mongo/db/s/global_user_write_block_state.cpp @@ -28,10 +28,18 @@ */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/s/global_user_write_block_state.h" +#include "mongo/db/server_options.h" #include "mongo/db/write_block_bypass.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/global_user_write_block_state.h b/src/mongo/db/s/global_user_write_block_state.h index 16f1fd794881d..21d2a8c291a2d 100644 --- a/src/mongo/db/s/global_user_write_block_state.h +++ b/src/mongo/db/s/global_user_write_block_state.h @@ -29,8 +29,11 @@ #pragma once +#include "mongo/base/status.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/platform/atomic_word.h" namespace mongo { diff --git a/src/mongo/db/s/implicit_collection_creation_test.cpp b/src/mongo/db/s/implicit_collection_creation_test.cpp index ae1016e46443c..851ed9e0bbc46 100644 --- a/src/mongo/db/s/implicit_collection_creation_test.cpp +++ b/src/mongo/db/s/implicit_collection_creation_test.cpp @@ -28,12 +28,29 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/shard_server_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -66,6 +83,26 @@ TEST_F(ImplicitCollectionCreationTest, AllowImplicitCollectionCreate) { WriteUnitOfWork wuow(operationContext()); ASSERT_OK(db->userCreateNS(operationContext(), nss, CollectionOptions{})); wuow.commit(); + + const auto scopedCsr = + CollectionShardingRuntime::assertCollectionLockedAndAcquireShared(operationContext(), nss); + ASSERT_TRUE(scopedCsr->getCurrentMetadataIfKnown()); +} + +TEST_F(ImplicitCollectionCreationTest, AllowImplicitCollectionCreateWithSetCSRAsUnknown) { + NamespaceString nss = + NamespaceString::createNamespaceString_forTest("AllowImplicitCollectionCreateDB.TestColl"); + OperationShardingState::ScopedAllowImplicitCollectionCreate_UNSAFE unsafeCreateCollection( + operationContext(), /* forceCSRAsUnknownAfterCollectionCreation */ true); + AutoGetCollection autoColl(operationContext(), nss, MODE_IX); + auto db = autoColl.ensureDbExists(operationContext()); + WriteUnitOfWork wuow(operationContext()); + ASSERT_OK(db->userCreateNS(operationContext(), nss, CollectionOptions{})); + wuow.commit(); + + const auto scopedCsr = + CollectionShardingRuntime::assertCollectionLockedAndAcquireShared(operationContext(), nss); + ASSERT_FALSE(scopedCsr->getCurrentMetadataIfKnown()); } } // namespace diff --git a/src/mongo/db/s/metadata_consistency_util.cpp b/src/mongo/db/s/metadata_consistency_util.cpp index 1fc1bf3991b58..cb3cf3ba1fcf1 100644 --- a/src/mongo/db/s/metadata_consistency_util.cpp +++ b/src/mongo/db/s/metadata_consistency_util.cpp @@ -29,18 +29,45 @@ #include "mongo/db/s/metadata_consistency_util.h" -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/collection_catalog.h" + +#include +#include + +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/cursor_manager.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/queued_data_stage.h" #include "mongo/db/exec/working_set.h" +#include "mongo/db/keypattern.h" #include "mongo/db/metadata_consistency_types_gen.h" -#include "mongo/db/query/cursor_response.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/find_common.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/record_id.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/shard_key_index_util.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -49,6 +76,8 @@ namespace metadata_consistency_util { namespace { +MONGO_FAIL_POINT_DEFINE(insertFakeInconsistencies); + /* * Emit a warning log containing information about the given inconsistency */ @@ -80,7 +109,7 @@ void _checkShardKeyIndexInconsistencies(OperationContext* opCtx, std::vector tmpInconsistencies; - // Shards that do not own any chunks do not partecipate in the creation of new indexes, so they + // Shards that do not own any chunks do not participate in the creation of new indexes, so they // could potentially miss any indexes created after they no longer own chunks. Thus we first // perform a check optimistically without taking collection lock, if missing indexes are found // we check under the collection lock if this shard currently own any chunk and re-execute again @@ -96,7 +125,7 @@ void _checkShardKeyIndexInconsistencies(OperationContext* opCtx, AutoGetCollection ac(opCtx, nss, MODE_IS); tassert(7531700, str::stream() << "Collection unexpectedly disappeared while holding database DDL lock: " - << nss, + << nss.toStringForErrorMsg(), ac); const auto scopedCsr = @@ -111,11 +140,14 @@ void _checkShardKeyIndexInconsistencies(OperationContext* opCtx, return; } - tassert(7531702, - str::stream() - << "Collection unexpectedly became unsharded while holding database DDL lock: " - << nss, - optCollDescr->isSharded()); + if (!optCollDescr->isSharded()) { + // The collection is registered as SHARDED in the sharding catalog. This shard has the + // collection locally but is marked as UNSHARDED. + inconsistencies.emplace_back(metadata_consistency_util::makeInconsistency( + MetadataInconsistencyTypeEnum::kShardThinksCollectionIsUnsharded, + ShardThinksCollectionIsUnshardedDetails{localColl->ns(), localColl->uuid(), shardId})); + return; + } if (!optCollDescr->currentShardHasAnyChunks()) { LOGV2_DEBUG(7531703, @@ -153,6 +185,15 @@ std::unique_ptr makeQueuedPlanExecutor( auto ws = std::make_unique(); auto root = std::make_unique(expCtx.get(), ws.get()); + insertFakeInconsistencies.execute([&](const BSONObj& data) { + const auto numInconsistencies = data["numInconsistencies"].safeNumberLong(); + for (int i = 0; i < numInconsistencies; i++) { + inconsistencies.emplace_back(makeInconsistency( + MetadataInconsistencyTypeEnum::kCollectionUUIDMismatch, + CollectionUUIDMismatchDetails{nss, ShardId{"shard"}, UUID::gen(), UUID::gen()})); + } + }); + for (auto&& inconsistency : inconsistencies) { // Every inconsistency encountered need to be logged with the same format // to allow log injestion systems to correctly detect them. @@ -203,7 +244,13 @@ CursorInitialReply createInitialCursorReplyMongod(OperationContext* opCtx, firstBatch.push_back(std::move(nextDoc)); } + auto&& opDebug = CurOp::get(opCtx)->debug(); + opDebug.additiveMetrics.nBatches = 1; + opDebug.additiveMetrics.nreturned = firstBatch.size(); + if (exec->isEOF()) { + opDebug.cursorExhausted = true; + CursorInitialReply resp; InitialResponseCursor initRespCursor{std::move(firstBatch)}; initRespCursor.setResponseCursorBase({0LL /* cursorId */, nss}); @@ -221,8 +268,13 @@ CursorInitialReply createInitialCursorReplyMongod(OperationContext* opCtx, CursorInitialReply resp; InitialResponseCursor initRespCursor{std::move(firstBatch)}; - initRespCursor.setResponseCursorBase({pinnedCursor.getCursor()->cursorid(), nss}); + const auto cursorId = pinnedCursor.getCursor()->cursorid(); + initRespCursor.setResponseCursorBase({cursorId, nss}); resp.setCursor(std::move(initRespCursor)); + + // Record the cursorID in CurOp. + opDebug.cursorid = cursorId; + return resp; } @@ -240,14 +292,15 @@ std::vector checkCollectionMetadataInconsistencies( const auto& localColl = *itLocalCollections; const auto& localUUID = localColl->uuid(); const auto& localNss = localColl->ns(); - const auto& nss = itCatalogCollections->getNss(); + const auto& remoteNss = itCatalogCollections->getNss(); - const auto cmp = nss.coll().compare(localNss.coll()); + const auto cmp = remoteNss.coll().compare(localNss.coll()); if (cmp < 0) { // Case where we have found a collection in the catalog client that it is not in the // local catalog. itCatalogCollections++; } else if (cmp == 0) { + const auto& nss = remoteNss; // Case where we have found same collection in the catalog client than in the local // catalog. @@ -257,20 +310,24 @@ std::vector checkCollectionMetadataInconsistencies( inconsistencies.emplace_back(makeInconsistency( MetadataInconsistencyTypeEnum::kCollectionUUIDMismatch, CollectionUUIDMismatchDetails{localNss, shardId, localUUID, UUID})); + } else { + _checkShardKeyIndexInconsistencies(opCtx, + nss, + shardId, + itCatalogCollections->getKeyPattern().toBSON(), + localColl, + inconsistencies); } - _checkShardKeyIndexInconsistencies(opCtx, - nss, - shardId, - itCatalogCollections->getKeyPattern().toBSON(), - localColl, - inconsistencies); - itLocalCollections++; itCatalogCollections++; } else { // Case where we have found a local collection that is not in the catalog client. - if (shardId != primaryShardId) { + const auto& nss = localNss; + + // TODO SERVER-59957 use function introduced in this ticket to decide if a namesapce + // should be ignored and stop using isNamepsaceAlwaysUnsharded(). + if (!nss.isNamespaceAlwaysUnsharded() && shardId != primaryShardId) { inconsistencies.emplace_back( makeInconsistency(MetadataInconsistencyTypeEnum::kMisplacedCollection, MisplacedCollectionDetails{localNss, shardId, localUUID})); @@ -279,16 +336,21 @@ std::vector checkCollectionMetadataInconsistencies( } } - // Case where we have found more local collections than in the catalog client. It is a - // hidden unsharded collection inconsistency if we are not the db primary shard. - while (itLocalCollections != localCollections.end() && shardId != primaryShardId) { - const auto localColl = itLocalCollections->get(); - inconsistencies.emplace_back(makeInconsistency( - MetadataInconsistencyTypeEnum::kMisplacedCollection, - MisplacedCollectionDetails{localColl->ns(), shardId, localColl->uuid()})); - itLocalCollections++; + if (shardId != primaryShardId) { + // Case where we have found more local collections than in the catalog client. It is a + // hidden unsharded collection inconsistency if we are not the db primary shard. + while (itLocalCollections != localCollections.end()) { + const auto localColl = itLocalCollections->get(); + // TODO SERVER-59957 use function introduced in this ticket to decide if a namesapce + // should be ignored and stop using isNamepsaceAlwaysUnsharded(). + if (!localColl->ns().isNamespaceAlwaysUnsharded()) { + inconsistencies.emplace_back(makeInconsistency( + MetadataInconsistencyTypeEnum::kMisplacedCollection, + MisplacedCollectionDetails{localColl->ns(), shardId, localColl->uuid()})); + } + itLocalCollections++; + } } - return inconsistencies; } @@ -299,7 +361,6 @@ std::vector checkChunksInconsistencies( const auto& uuid = collection.getUuid(); const auto& nss = collection.getNss(); const auto shardKeyPattern = ShardKeyPattern{collection.getKeyPattern()}; - const auto configShardId = ShardId::kConfigServerId; std::vector inconsistencies; auto previousChunk = chunks.begin(); @@ -361,5 +422,44 @@ std::vector checkChunksInconsistencies( return inconsistencies; } +std::vector checkZonesInconsistencies( + OperationContext* opCtx, const CollectionType& collection, const std::vector& zones) { + const auto& uuid = collection.getUuid(); + const auto& nss = collection.getNss(); + const auto shardKeyPattern = ShardKeyPattern{collection.getKeyPattern()}; + + std::vector inconsistencies; + auto previousZone = zones.begin(); + for (auto it = zones.begin(); it != zones.end(); it++) { + const auto& zone = *it; + + // Skip the first iteration as we need to compare the current zone with the previous one. + if (it == zones.begin()) { + continue; + } + + if (!shardKeyPattern.isShardKey(zone.getMinKey()) || + !shardKeyPattern.isShardKey(zone.getMaxKey())) { + inconsistencies.emplace_back(makeInconsistency( + MetadataInconsistencyTypeEnum::kCorruptedZoneShardKey, + CorruptedZoneShardKeyDetails{nss, uuid, zone.toBSON(), shardKeyPattern.toBSON()})); + } + + // As the zones are sorted by minKey, we can check if the previous zone maxKey is less than + // the current zone minKey. + const auto& minKey = zone.getMinKey(); + auto cmp = previousZone->getMaxKey().woCompare(minKey); + if (cmp > 0) { + inconsistencies.emplace_back(makeInconsistency( + MetadataInconsistencyTypeEnum::kZonesRangeOverlap, + ZonesRangeOverlapDetails{nss, uuid, previousZone->toBSON(), zone.toBSON()})); + } + + previousZone = it; + } + + return inconsistencies; +} + } // namespace metadata_consistency_util } // namespace mongo diff --git a/src/mongo/db/s/metadata_consistency_util.h b/src/mongo/db/s/metadata_consistency_util.h index 1ce7f34ff6d7f..a944220b58024 100644 --- a/src/mongo/db/s/metadata_consistency_util.h +++ b/src/mongo/db/s/metadata_consistency_util.h @@ -29,11 +29,22 @@ #pragma once +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/clientcursor.h" #include "mongo/db/metadata_consistency_types_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/cursor_response_gen.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/shard_id.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_tags.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" @@ -98,5 +109,14 @@ std::vector checkChunksInconsistencies( const CollectionType& collection, const std::vector& chunks); +/** + * Check different types of inconsistencies from a given set of zones owned by a collection. + * + * The list of inconsistencies is returned as a vector of MetadataInconsistencies objects. If + * there is no inconsistency, it is returned an empty vector. + */ +std::vector checkZonesInconsistencies( + OperationContext* opCtx, const CollectionType& collection, const std::vector& zones); + } // namespace metadata_consistency_util } // namespace mongo diff --git a/src/mongo/db/s/metadata_consistency_util_test.cpp b/src/mongo/db/s/metadata_consistency_util_test.cpp index 33ea8727dd7f9..6e8a2d2f35bbe 100644 --- a/src/mongo/db/s/metadata_consistency_util_test.cpp +++ b/src/mongo/db/s/metadata_consistency_util_test.cpp @@ -30,16 +30,33 @@ #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest #include "mongo/db/s/metadata_consistency_util.h" + +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" #include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/logv2/log.h" -#include "mongo/util/fail_point.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/random.h" +#include "mongo/s/chunk_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { -ChunkType generateChunk(const NamespaceString& nss, - const UUID& collUuid, +ChunkType generateChunk(const UUID& collUuid, const ShardId& shardId, const BSONObj& minKey, const BSONObj& maxKey, @@ -57,12 +74,21 @@ ChunkType generateChunk(const NamespaceString& nss, return chunkType; } +TagsType generateZone(const NamespaceString& nss, const BSONObj& minKey, const BSONObj& maxKey) { + TagsType tagType; + tagType.setTag(OID::gen().toString()); + tagType.setNS(nss); + tagType.setMinKey(minKey); + tagType.setMaxKey(maxKey); + return tagType; +} + class MetadataConsistencyTest : public ShardServerTestFixture { protected: std::string _shardName = "shard0000"; - std::string _config = "config"; const ShardId _shardId{_shardName}; - const NamespaceString _nss{"TestDB", "TestColl"}; + const NamespaceString _nss = + NamespaceString::createNamespaceString_forTest("TestDB", "TestColl"); const UUID _collUuid = UUID::gen(); const KeyPattern _keyPattern{BSON("x" << 1)}; const CollectionType _coll{ @@ -71,7 +97,6 @@ class MetadataConsistencyTest : public ShardServerTestFixture { void assertOneInconsistencyFound( const MetadataInconsistencyTypeEnum& type, const NamespaceString& nss, - const ShardId& shard, const std::vector& inconsistencies) { ASSERT_EQ(1, inconsistencies.size()); ASSERT_EQ(type, inconsistencies[0].getType()); @@ -79,15 +104,13 @@ class MetadataConsistencyTest : public ShardServerTestFixture { }; TEST_F(MetadataConsistencyTest, FindRoutingTableRangeGapInconsistency) { - const auto chunk1 = generateChunk(_nss, - _collUuid, + const auto chunk1 = generateChunk(_collUuid, _shardId, _keyPattern.globalMin(), BSON("x" << 0), {ChunkHistory(Timestamp(1, 0), _shardId)}); - const auto chunk2 = generateChunk(_nss, - _collUuid, + const auto chunk2 = generateChunk(_collUuid, _shardId, BSON("x" << 1), _keyPattern.globalMax(), @@ -97,12 +120,11 @@ TEST_F(MetadataConsistencyTest, FindRoutingTableRangeGapInconsistency) { operationContext(), _coll, {chunk1, chunk2}); assertOneInconsistencyFound( - MetadataInconsistencyTypeEnum::kRoutingTableRangeGap, _nss, _config, inconsistencies); + MetadataInconsistencyTypeEnum::kRoutingTableRangeGap, _nss, inconsistencies); } TEST_F(MetadataConsistencyTest, FindMissingChunkWithMaxKeyInconsistency) { - const auto chunk = generateChunk(_nss, - _collUuid, + const auto chunk = generateChunk(_collUuid, _shardId, _keyPattern.globalMin(), BSON("x" << 0), @@ -112,12 +134,11 @@ TEST_F(MetadataConsistencyTest, FindMissingChunkWithMaxKeyInconsistency) { metadata_consistency_util::checkChunksInconsistencies(operationContext(), _coll, {chunk}); assertOneInconsistencyFound( - MetadataInconsistencyTypeEnum::kRoutingTableMissingMaxKey, _nss, _config, inconsistencies); + MetadataInconsistencyTypeEnum::kRoutingTableMissingMaxKey, _nss, inconsistencies); } TEST_F(MetadataConsistencyTest, FindMissingChunkWithMinKeyInconsistency) { - const auto chunk = generateChunk(_nss, - _collUuid, + const auto chunk = generateChunk(_collUuid, _shardId, BSON("x" << 0), _keyPattern.globalMax(), @@ -127,19 +148,17 @@ TEST_F(MetadataConsistencyTest, FindMissingChunkWithMinKeyInconsistency) { metadata_consistency_util::checkChunksInconsistencies(operationContext(), _coll, {chunk}); assertOneInconsistencyFound( - MetadataInconsistencyTypeEnum::kRoutingTableMissingMinKey, _nss, _config, inconsistencies); + MetadataInconsistencyTypeEnum::kRoutingTableMissingMinKey, _nss, inconsistencies); } TEST_F(MetadataConsistencyTest, FindRoutingTableRangeOverlapInconsistency) { - const auto chunk1 = generateChunk(_nss, - _collUuid, + const auto chunk1 = generateChunk(_collUuid, _shardId, _keyPattern.globalMin(), BSON("x" << 0), {ChunkHistory(Timestamp(1, 0), _shardId)}); - const auto chunk2 = generateChunk(_nss, - _collUuid, + const auto chunk2 = generateChunk(_collUuid, _shardId, BSON("x" << -10), _keyPattern.globalMax(), @@ -149,19 +168,17 @@ TEST_F(MetadataConsistencyTest, FindRoutingTableRangeOverlapInconsistency) { operationContext(), _coll, {chunk1, chunk2}); assertOneInconsistencyFound( - MetadataInconsistencyTypeEnum::kRoutingTableRangeOverlap, _nss, _config, inconsistencies); + MetadataInconsistencyTypeEnum::kRoutingTableRangeOverlap, _nss, inconsistencies); } TEST_F(MetadataConsistencyTest, FindCorruptedChunkShardKeyInconsistency) { - const auto chunk1 = generateChunk(_nss, - _collUuid, + const auto chunk1 = generateChunk(_collUuid, _shardId, _keyPattern.globalMin(), BSON("x" << 0), {ChunkHistory(Timestamp(1, 0), _shardId)}); - const auto chunk2 = generateChunk(_nss, - _collUuid, + const auto chunk2 = generateChunk(_collUuid, _shardId, BSON("y" << 0), _keyPattern.globalMax(), @@ -175,9 +192,34 @@ TEST_F(MetadataConsistencyTest, FindCorruptedChunkShardKeyInconsistency) { ASSERT_EQ(MetadataInconsistencyTypeEnum::kRoutingTableRangeGap, inconsistencies[1].getType()); } +TEST_F(MetadataConsistencyTest, FindCorruptedZoneShardKeyInconsistency) { + const auto zone1 = generateZone(_nss, _keyPattern.globalMin(), BSON("x" << 0)); + + const auto zone2 = generateZone(_nss, BSON("y" << 0), _keyPattern.globalMax()); + + const auto inconsistencies = metadata_consistency_util::checkZonesInconsistencies( + operationContext(), _coll, {zone1, zone2}); + + assertOneInconsistencyFound( + MetadataInconsistencyTypeEnum::kCorruptedZoneShardKey, _nss, inconsistencies); +} + +TEST_F(MetadataConsistencyTest, FindZoneRangeOverlapInconsistency) { + const auto zone1 = generateZone(_nss, _keyPattern.globalMin(), BSON("x" << 0)); + + const auto zone2 = generateZone(_nss, BSON("x" << -10), _keyPattern.globalMax()); + + const auto inconsistencies = metadata_consistency_util::checkZonesInconsistencies( + operationContext(), _coll, {zone1, zone2}); + + assertOneInconsistencyFound( + MetadataInconsistencyTypeEnum::kZonesRangeOverlap, _nss, inconsistencies); +} + class MetadataConsistencyRandomRoutingTableTest : public ShardServerTestFixture { protected: - const NamespaceString _nss{"TestDB", "TestColl"}; + const NamespaceString _nss = + NamespaceString::createNamespaceString_forTest("TestDB", "TestColl"); const UUID _collUuid = UUID::gen(); const KeyPattern _keyPattern{BSON("x" << 1)}; const CollectionType _coll{ @@ -197,12 +239,8 @@ class MetadataConsistencyRandomRoutingTableTest : public ShardServerTestFixture auto min = nextMax == 0 ? _keyPattern.globalMin() : BSON("x" << nextMin); // set max as `MaxKey` during last iteration, otherwise next max auto max = nextMax == numChunks - 1 ? _keyPattern.globalMax() : BSON("x" << nextMax); - auto chunk = generateChunk(_nss, - _collUuid, - randomShard, - min, - max, - {ChunkHistory(Timestamp(1, 0), randomShard)}); + auto chunk = generateChunk( + _collUuid, randomShard, min, max, {ChunkHistory(Timestamp(1, 0), randomShard)}); nextMin = nextMax; chunks.push_back(chunk); } diff --git a/src/mongo/db/s/metadata_manager.cpp b/src/mongo/db/s/metadata_manager.cpp index 99f0543f7d84f..7fff9fcff1ef3 100644 --- a/src/mongo/db/s/metadata_manager.cpp +++ b/src/mongo/db/s/metadata_manager.cpp @@ -29,23 +29,25 @@ #include "mongo/db/s/metadata_manager.h" -#include "mongo/base/string_data.h" -#include "mongo/bson/util/builder.h" +#include +#include +#include + +#include +#include +#include + #include "mongo/db/s/migration_util.h" -#include "mongo/db/s/range_deleter_service.h" -#include "mongo/db/s/range_deletion_util.h" -#include "mongo/db/s/sharding_runtime_d_params_gen.h" #include "mongo/logv2/log.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/chunk_manager.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding namespace mongo { namespace { -using TaskExecutor = executor::TaskExecutor; -using CallbackArgs = TaskExecutor::CallbackArgs; - /** * Returns whether the given metadata object has a chunk owned by this shard that overlaps the * input range. @@ -107,12 +109,10 @@ class RangePreserver : public ScopedCollectionDescription::Impl { MetadataManager::MetadataManager(ServiceContext* serviceContext, NamespaceString nss, - std::shared_ptr executor, CollectionMetadata initialMetadata) : _serviceContext(serviceContext), _nss(std::move(nss)), - _collectionUuid(initialMetadata.getChunkManager()->getUUID()), - _executor(std::move(executor)) { + _collectionUuid(initialMetadata.getChunkManager()->getUUID()) { _metadata.emplace_back(std::make_shared(std::move(initialMetadata))); } @@ -230,106 +230,6 @@ void MetadataManager::_retireExpiredMetadata(WithLock) { } } -void MetadataManager::append(BSONObjBuilder* builder) const { - stdx::lock_guard lg(_managerLock); - - BSONArrayBuilder arr(builder->subarrayStart("rangesToClean")); - for (auto const& [range, _] : _rangesScheduledForDeletion) { - BSONObjBuilder obj; - range.append(&obj); - arr.append(obj.done()); - } - - invariant(!_metadata.empty()); - - BSONArrayBuilder amrArr(builder->subarrayStart("activeMetadataRanges")); - for (const auto& entry : _metadata.back()->metadata->getChunks()) { - BSONObjBuilder obj; - ChunkRange r = ChunkRange(entry.first, entry.second); - r.append(&obj); - amrArr.append(obj.done()); - } - amrArr.done(); -} - -SharedSemiFuture MetadataManager::cleanUpRange(ChunkRange const& range, - bool shouldDelayBeforeDeletion) { - stdx::lock_guard lg(_managerLock); - invariant(!_metadata.empty()); - - auto* const activeMetadata = _metadata.back().get(); - auto* const overlapMetadata = _findNewestOverlappingMetadata(lg, range); - - if (overlapMetadata == activeMetadata) { - return Status{ErrorCodes::RangeOverlapConflict, - str::stream() << "Requested deletion range overlaps a live shard chunk"}; - } - - auto delayForActiveQueriesOnSecondariesToComplete = - shouldDelayBeforeDeletion ? Seconds(orphanCleanupDelaySecs.load()) : Seconds(0); - - if (overlapMetadata) { - LOGV2_OPTIONS(21989, - {logv2::LogComponent::kShardingMigration}, - "Deletion of {namespace} range {range} will be scheduled after all possibly " - "dependent queries finish", - "Deletion of the collection's specified range will be scheduled after all " - "possibly dependent queries finish", - logAttrs(_nss), - "range"_attr = redact(range.toString())); - ++overlapMetadata->numContingentRangeDeletionTasks; - // Schedule the range for deletion once the overlapping metadata object is destroyed - // (meaning no more queries can be using the range) and obtain a future which will be - // signaled when deletion is complete. - return _submitRangeForDeletion(lg, - overlapMetadata->onDestructionPromise.getFuture().semi(), - range, - delayForActiveQueriesOnSecondariesToComplete); - } else { - // No running queries can depend on this range, so queue it for deletion immediately. - LOGV2_OPTIONS(21990, - {logv2::LogComponent::kShardingMigration}, - "Scheduling deletion of {namespace} range {range}", - "Scheduling deletion of the collection's specified range", - logAttrs(_nss), - "range"_attr = redact(range.toString())); - - return _submitRangeForDeletion( - lg, SemiFuture::makeReady(), range, delayForActiveQueriesOnSecondariesToComplete); - } -} - -size_t MetadataManager::numberOfRangesToCleanStillInUse() const { - stdx::lock_guard lg(_managerLock); - size_t count = 0; - for (auto& tracker : _metadata) { - count += tracker->numContingentRangeDeletionTasks; - } - return count; -} - -size_t MetadataManager::numberOfRangesToClean() const { - auto rangesToCleanInUse = numberOfRangesToCleanStillInUse(); - stdx::lock_guard lg(_managerLock); - return _rangesScheduledForDeletion.size() - rangesToCleanInUse; -} - -size_t MetadataManager::numberOfRangesScheduledForDeletion() const { - stdx::lock_guard lg(_managerLock); - return _rangesScheduledForDeletion.size(); -} - -SharedSemiFuture MetadataManager::trackOrphanedDataCleanup(ChunkRange const& range) const { - stdx::lock_guard lg(_managerLock); - for (const auto& [orphanRange, deletionComplete] : _rangesScheduledForDeletion) { - if (orphanRange.overlapWith(range)) { - return deletionComplete; - } - } - - return SemiFuture::makeReady().share(); -} - SharedSemiFuture MetadataManager::getOngoingQueriesCompletionFuture(ChunkRange const& range) { stdx::lock_guard lg(_managerLock); @@ -344,13 +244,7 @@ auto MetadataManager::_findNewestOverlappingMetadata(WithLock, ChunkRange const& -> CollectionMetadataTracker* { invariant(!_metadata.empty()); - auto it = _metadata.rbegin(); - if (metadataOverlapsRange((*it)->metadata, range)) { - return (*it).get(); - } - - ++it; - for (; it != _metadata.rend(); ++it) { + for (auto it = _metadata.rbegin(); it != _metadata.rend(); ++it) { auto& tracker = *it; if (tracker->usageCounter && metadataOverlapsRange(tracker->metadata, range)) { return tracker.get(); @@ -360,47 +254,4 @@ auto MetadataManager::_findNewestOverlappingMetadata(WithLock, ChunkRange const& return nullptr; } -bool MetadataManager::_overlapsInUseChunk(WithLock lk, ChunkRange const& range) { - auto* cm = _findNewestOverlappingMetadata(lk, range); - return (cm != nullptr); -} - -SharedSemiFuture MetadataManager::_submitRangeForDeletion( - const WithLock&, - SemiFuture waitForActiveQueriesToComplete, - const ChunkRange& range, - Seconds delayForActiveQueriesOnSecondariesToComplete) { - auto cleanupComplete = [&]() { - const auto collUUID = _metadata.back()->metadata->getChunkManager()->getUUID(); - - // (Ignore FCV check): This feature doesn't have any upgrade/downgrade concerns. The feature - // flag is used to turn on new range deleter on startup. - if (feature_flags::gRangeDeleterService.isEnabledAndIgnoreFCVUnsafe()) { - return RangeDeleterService::get(_serviceContext) - ->getOverlappingRangeDeletionsFuture(collUUID, range); - } - - return removeDocumentsInRange(_executor, - std::move(waitForActiveQueriesToComplete), - _nss, - collUUID, - _metadata.back()->metadata->getKeyPattern().getOwned(), - range, - delayForActiveQueriesOnSecondariesToComplete); - }(); - - _rangesScheduledForDeletion.emplace_front(range, cleanupComplete); - // Attach a continuation so that once the range has been deleted, we will remove the deletion - // from the _rangesScheduledForDeletion. std::list iterators are never invalidated, which - // allows us to save the iterator pointing to the newly added element for use later when - // deleting it. - cleanupComplete.thenRunOn(_executor).getAsync( - [self = shared_from_this(), it = _rangesScheduledForDeletion.begin()](Status s) { - stdx::lock_guard lg(self->_managerLock); - self->_rangesScheduledForDeletion.erase(it); - }); - - return cleanupComplete; -} - } // namespace mongo diff --git a/src/mongo/db/s/metadata_manager.h b/src/mongo/db/s/metadata_manager.h index dc884aa835a21..b73df0f9b93a1 100644 --- a/src/mongo/db/s/metadata_manager.h +++ b/src/mongo/db/s/metadata_manager.h @@ -29,14 +29,30 @@ #pragma once +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include +#include +#include +#include #include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/scoped_collection_metadata.h" -#include "mongo/executor/task_executor.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_version.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -49,7 +65,6 @@ class MetadataManager : public std::enable_shared_from_this { public: MetadataManager(ServiceContext* serviceContext, NamespaceString nss, - std::shared_ptr executor, CollectionMetadata initialMetadata); ~MetadataManager() = default; @@ -100,50 +115,6 @@ class MetadataManager : public std::enable_shared_from_this { void setFilteringMetadata(CollectionMetadata newMetadata); - /** - * Appends information on all the chunk ranges in rangesToClean to builder. - */ - void append(BSONObjBuilder* builder) const; - - /** - * Schedules documents in `range` for cleanup after any running queries that may depend on them - * have terminated. Does not block. Fails if the range overlaps any current local shard chunk. - * - * If shouldDelayBeforeDeletion is false, deletion is scheduled immediately after the last - * dependent query completes; otherwise, deletion is postponed until after - * orphanCleanupDelaySecs after the last dependent query completes. - * - * Returns a future that will be fulfilled when the range deletion completes or fails. - */ - SharedSemiFuture cleanUpRange(ChunkRange const& range, bool shouldDelayBeforeDeletion); - - /** - * Returns the number of ranges scheduled to be cleaned, exclusive of such ranges that might - * still be in use by running queries. Outside of test drivers, the actual number may vary - * after it returns, so this is really only useful for unit tests. - */ - size_t numberOfRangesToClean() const; - - /** - * Returns the number of ranges scheduled to be cleaned once all queries that could depend on - * them have terminated. The actual number may vary after it returns, so this is really only - * useful for unit tests. - */ - size_t numberOfRangesToCleanStillInUse() const; - - /** - * Returns the number of ranges scheduled for deletion, regardless of whether they may still be - * in use by running queries. - */ - size_t numberOfRangesScheduledForDeletion() const; - - /** - * Reports whether any range still scheduled for deletion overlaps the argument range. If so, - * returns a future that will be resolved when the newest overlapping range's deletion (possibly - * the one of interest) completes or fails. - */ - SharedSemiFuture trackOrphanedDataCleanup(ChunkRange const& orphans) const; - /** * Returns a future marked as ready when all the ongoing queries retaining the range complete */ @@ -171,12 +142,6 @@ class MetadataManager : public std::enable_shared_from_this { boost::optional metadata; - /** - * Number of range deletion tasks waiting on this CollectionMetadataTracker to be destroyed - * before deleting documents. - */ - uint32_t numContingentRangeDeletionTasks{0}; - /** * Promise that will be signaled when this object is destroyed. * @@ -203,29 +168,11 @@ class MetadataManager : public std::enable_shared_from_this { void _setActiveMetadata(WithLock wl, CollectionMetadata newMetadata); /** - * Finds the most-recently pushed metadata that might depend on `range`, or nullptr if none. - * The result is usable until the lock is released. + * Finds the most-recently pushed metadata that depends on `range`, or nullptr if none. The + * result is usable until the lock is released. */ CollectionMetadataTracker* _findNewestOverlappingMetadata(WithLock, ChunkRange const& range); - /** - * Returns true if the specified range overlaps any chunk that might be currently in use by a - * running query. - */ - - bool _overlapsInUseChunk(WithLock, ChunkRange const& range); - - /** - * Schedule a task to delete the given range of documents once waitForActiveQueriesToComplete - * has been signaled, and store the resulting future for the task in - * _rangesScheduledForDeletion. - */ - SharedSemiFuture _submitRangeForDeletion( - const WithLock&, - SemiFuture waitForActiveQueriesToComplete, - const ChunkRange& range, - Seconds delayForActiveQueriesOnSecondariesToComplete); - // ServiceContext from which to obtain instances of global support objects ServiceContext* const _serviceContext; @@ -235,9 +182,6 @@ class MetadataManager : public std::enable_shared_from_this { // The UUID for the collection tracked by this manager object. const UUID _collectionUuid; - // The background task that deletes documents from orphaned chunk ranges. - std::shared_ptr const _executor; - // Mutex to protect the state below mutable Mutex _managerLock = MONGO_MAKE_LATCH("MetadataManager::_managerLock"); @@ -246,9 +190,6 @@ class MetadataManager : public std::enable_shared_from_this { // the most recent metadata and is what is returned to new queries. The rest are previously // active collection metadata instances still in use by active server operations or cursors. std::list> _metadata; - - // Ranges being deleted, or scheduled to be deleted, by a background task. - std::list>> _rangesScheduledForDeletion; }; } // namespace mongo diff --git a/src/mongo/db/s/metadata_manager_test.cpp b/src/mongo/db/s/metadata_manager_test.cpp index e8c2d3154ac09..6338de1c10ff3 100644 --- a/src/mongo/db/s/metadata_manager_test.cpp +++ b/src/mongo/db/s/metadata_manager_test.cpp @@ -27,29 +27,38 @@ * it in the license file. */ -#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/client.h" -#include "mongo/db/jsobj.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/s/metadata_manager.h" -#include "mongo/db/s/range_deletion_task_gen.h" +#include "mongo/db/s/range_arithmetic.h" #include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/db/s/sharding_runtime_d_params_gen.h" -#include "mongo/db/s/sharding_state.h" -#include "mongo/db/server_options.h" -#include "mongo/db/service_context.h" -#include "mongo/db/vector_clock.h" -#include "mongo/executor/task_executor.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/db/shard_id.h" +#include "mongo/platform/atomic_word.h" #include "mongo/s/catalog/type_chunk.h" -#include "mongo/s/client/shard_registry.h" -#include "mongo/stdx/condition_variable.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/s/chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -66,8 +75,8 @@ class MetadataManagerTest : public ShardServerTestFixture { protected: void setUp() override { ShardServerTestFixture::setUp(); - _manager = std::make_shared( - getServiceContext(), kNss, executor(), makeEmptyMetadata()); + _manager = + std::make_shared(getServiceContext(), kNss, makeEmptyMetadata()); orphanCleanupDelaySecs.store(1); } @@ -196,101 +205,6 @@ class MetadataManagerTest : public ShardServerTestFixture { const int _defaultOrphanCleanupDelaySecs = orphanCleanupDelaySecs.load(); }; -// The 'pending' field must not be set in order for a range deletion task to succeed, but the -// ShardServerOpObserver will submit the task for deletion upon seeing an insert without the -// 'pending' field. The tests call removeDocumentsFromRange directly, so we want to avoid having -// the op observer also submit the task. The ShardServerOpObserver will ignore replacement -// updates on the range deletions namespace though, so we can get around the issue by inserting -// the task with the 'pending' field set, and then remove the field using a replacement update -// after. -RangeDeletionTask insertRangeDeletionTask(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const ChunkRange& range, - int64_t numOrphans) { - PersistentTaskStore store(NamespaceString::kRangeDeletionNamespace); - auto migrationId = UUID::gen(); - RangeDeletionTask t(migrationId, nss, uuid, ShardId("donor"), range, CleanWhenEnum::kDelayed); - t.setPending(true); - t.setNumOrphanDocs(numOrphans); - const auto currentTime = VectorClock::get(opCtx)->getTime(); - t.setTimestamp(currentTime.clusterTime().asTimestamp()); - store.add(opCtx, t); - - auto query = BSON(RangeDeletionTask::kIdFieldName << migrationId); - t.setPending(boost::none); - auto update = t.toBSON(); - store.update(opCtx, query, update); - - return t; -} - -TEST_F(MetadataManagerTest, TrackOrphanedDataCleanupBlocksOnScheduledRangeDeletions) { - RAIIServerParameterControllerForTest enableFeatureFlag{"featureFlagRangeDeleterService", false}; - ChunkRange cr1(BSON("key" << 0), BSON("key" << 10)); - const auto task = - insertRangeDeletionTask(operationContext(), kNss, _manager->getCollectionUuid(), cr1, 0); - - // Enable fail point to suspendRangeDeletion. - globalFailPointRegistry().find("suspendRangeDeletion")->setMode(FailPoint::alwaysOn); - - auto notifn1 = _manager->cleanUpRange(cr1, false /*delayBeforeDeleting*/); - ASSERT_FALSE(notifn1.isReady()); - ASSERT_EQ(_manager->numberOfRangesToClean(), 1UL); - - auto future = _manager->trackOrphanedDataCleanup(cr1); - ASSERT_FALSE(notifn1.isReady()); - ASSERT_FALSE(future.isReady()); - - globalFailPointRegistry().find("suspendRangeDeletion")->setMode(FailPoint::off); -} - -TEST_F(MetadataManagerTest, CleanupNotificationsAreSignaledWhenMetadataManagerIsDestroyed) { - RAIIServerParameterControllerForTest enableFeatureFlag{"featureFlagRangeDeleterService", false}; - const ChunkRange rangeToClean(BSON("key" << 20), BSON("key" << 30)); - const auto task = insertRangeDeletionTask( - operationContext(), kNss, _manager->getCollectionUuid(), rangeToClean, 0); - - - _manager->setFilteringMetadata(cloneMetadataPlusChunk( - _manager->getActiveMetadata(boost::none)->get(), {BSON("key" << 0), BSON("key" << 20)})); - - _manager->setFilteringMetadata( - cloneMetadataPlusChunk(_manager->getActiveMetadata(boost::none)->get(), rangeToClean)); - - // Optional so that it can be reset. - boost::optional cursorOnMovedMetadata{ - _manager->getActiveMetadata(boost::none)}; - - _manager->setFilteringMetadata( - cloneMetadataMinusChunk(_manager->getActiveMetadata(boost::none)->get(), rangeToClean)); - - auto notif = _manager->cleanUpRange(rangeToClean, false /*delayBeforeDeleting*/); - ASSERT(!notif.isReady()); - - auto future = _manager->trackOrphanedDataCleanup(rangeToClean); - ASSERT(!future.isReady()); - - // Reset the original shared_ptr. The cursorOnMovedMetadata will still contain its own copy of - // the shared_ptr though, so the destructor of ~MetadataManager won't yet be called. - _manager.reset(); - ASSERT(!notif.isReady()); - ASSERT(!future.isReady()); - - // Destroys the ScopedCollectionDescription object and causes the destructor of MetadataManager - // to run, which should trigger all deletion notifications. - cursorOnMovedMetadata.reset(); - - // Advance time to simulate orphanCleanupDelaySecs passing. - { - executor::NetworkInterfaceMock::InNetworkGuard guard(network()); - network()->advanceTime(network()->now() + Seconds{5}); - } - - notif.wait(); - future.wait(); -} - TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationSinglePending) { ChunkRange cr1(BSON("key" << 0), BSON("key" << 10)); @@ -306,7 +220,6 @@ TEST_F(MetadataManagerTest, RefreshAfterSuccessfulMigrationMultiplePending) { { _manager->setFilteringMetadata( cloneMetadataPlusChunk(_manager->getActiveMetadata(boost::none)->get(), cr1)); - ASSERT_EQ(_manager->numberOfRangesToClean(), 0UL); ASSERT_EQ(_manager->getActiveMetadata(boost::none)->get().getChunks().size(), 1UL); } @@ -338,25 +251,6 @@ TEST_F(MetadataManagerTest, BeginReceiveWithOverlappingRange) { ChunkRange crOverlap(BSON("key" << 5), BSON("key" << 35)); } -// Tests membership functions for _rangesToClean -TEST_F(MetadataManagerTest, RangesToCleanMembership) { - RAIIServerParameterControllerForTest enableFeatureFlag{"featureFlagRangeDeleterService", false}; - ChunkRange cr(BSON("key" << 0), BSON("key" << 10)); - const auto task = - insertRangeDeletionTask(operationContext(), kNss, _manager->getCollectionUuid(), cr, 0); - - ASSERT_EQ(0UL, _manager->numberOfRangesToClean()); - - // Enable fail point to suspendRangeDeletion. - globalFailPointRegistry().find("suspendRangeDeletion")->setMode(FailPoint::alwaysOn); - - auto notifn = _manager->cleanUpRange(cr, false /*delayBeforeDeleting*/); - ASSERT(!notifn.isReady()); - ASSERT_EQ(1UL, _manager->numberOfRangesToClean()); - - globalFailPointRegistry().find("suspendRangeDeletion")->setMode(FailPoint::off); -} - TEST_F(MetadataManagerTest, ClearUnneededChunkManagerObjectsLastSnapshotInList) { ChunkRange cr1(BSON("key" << 0), BSON("key" << 10)); ChunkRange cr2(BSON("key" << 30), BSON("key" << 40)); @@ -365,7 +259,6 @@ TEST_F(MetadataManagerTest, ClearUnneededChunkManagerObjectsLastSnapshotInList) { _manager->setFilteringMetadata(cloneMetadataPlusChunk(scm1->get(), cr1)); ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 1UL); - ASSERT_EQ(_manager->numberOfRangesToClean(), 0UL); auto scm2 = _manager->getActiveMetadata(boost::none); ASSERT_EQ(scm2->get().getChunks().size(), 1UL); @@ -392,7 +285,6 @@ TEST_F(MetadataManagerTest, ClearUnneededChunkManagerObjectSnapshotInMiddleOfLis auto scm = _manager->getActiveMetadata(boost::none); _manager->setFilteringMetadata(cloneMetadataPlusChunk(scm->get(), cr1)); ASSERT_EQ(_manager->numberOfMetadataSnapshots(), 1UL); - ASSERT_EQ(_manager->numberOfRangesToClean(), 0UL); auto scm2 = _manager->getActiveMetadata(boost::none); ASSERT_EQ(scm2->get().getChunks().size(), 1UL); diff --git a/src/mongo/db/s/metrics/field_names/sharding_data_transform_cumulative_metrics_field_name_provider.cpp b/src/mongo/db/s/metrics/field_names/sharding_data_transform_cumulative_metrics_field_name_provider.cpp index dc6403e9fdb13..7af4c31ea1414 100644 --- a/src/mongo/db/s/metrics/field_names/sharding_data_transform_cumulative_metrics_field_name_provider.cpp +++ b/src/mongo/db/s/metrics/field_names/sharding_data_transform_cumulative_metrics_field_name_provider.cpp @@ -56,6 +56,10 @@ constexpr auto kCollectionCloningTotalRemoteBatchesRetrieved = constexpr auto kCollectionCloningTotalLocalInsertTimeMillis = "collectionCloningTotalLocalInsertTimeMillis"; constexpr auto kCollectionCloningTotalLocalInserts = "collectionCloningTotalLocalInserts"; +constexpr auto kCountSameKeyStarted = "countSameKeyStarted"; +constexpr auto kCountSameKeySucceeded = "countSameKeySucceeded"; +constexpr auto kCountSameKeyFailed = "countSameKeyFailed"; +constexpr auto kCountSameKeyCanceled = "countSameKeyCanceled"; } // namespace StringData Provider::getForCountStarted() const { @@ -104,5 +108,17 @@ StringData Provider::getForCollectionCloningTotalLocalInsertTimeMillis() const { StringData Provider::getForCollectionCloningTotalLocalInserts() const { return kCollectionCloningTotalLocalInserts; } +StringData Provider::getForCountSameKeyStarted() const { + return kCountSameKeyStarted; +} +StringData Provider::getForCountSameKeySucceeded() const { + return kCountSameKeySucceeded; +} +StringData Provider::getForCountSameKeyFailed() const { + return kCountSameKeyFailed; +} +StringData Provider::getForCountSameKeyCanceled() const { + return kCountSameKeyCanceled; +} } // namespace mongo diff --git a/src/mongo/db/s/metrics/field_names/sharding_data_transform_cumulative_metrics_field_name_provider.h b/src/mongo/db/s/metrics/field_names/sharding_data_transform_cumulative_metrics_field_name_provider.h index e8e68b0cc8398..db11d7ff48806 100644 --- a/src/mongo/db/s/metrics/field_names/sharding_data_transform_cumulative_metrics_field_name_provider.h +++ b/src/mongo/db/s/metrics/field_names/sharding_data_transform_cumulative_metrics_field_name_provider.h @@ -53,6 +53,10 @@ class ShardingDataTransformCumulativeMetricsFieldNameProvider { StringData getForCollectionCloningTotalRemoteBatchesRetrieved() const; StringData getForCollectionCloningTotalLocalInsertTimeMillis() const; StringData getForCollectionCloningTotalLocalInserts() const; + StringData getForCountSameKeyStarted() const; + StringData getForCountSameKeySucceeded() const; + StringData getForCountSameKeyFailed() const; + StringData getForCountSameKeyCanceled() const; }; } // namespace mongo diff --git a/src/mongo/db/s/metrics/field_names/sharding_data_transform_instance_metrics_field_name_provider.cpp b/src/mongo/db/s/metrics/field_names/sharding_data_transform_instance_metrics_field_name_provider.cpp index 77c2427bc0f72..c4bfb0891b3c0 100644 --- a/src/mongo/db/s/metrics/field_names/sharding_data_transform_instance_metrics_field_name_provider.cpp +++ b/src/mongo/db/s/metrics/field_names/sharding_data_transform_instance_metrics_field_name_provider.cpp @@ -48,6 +48,10 @@ constexpr auto kAllShardsLowestRemainingOperationTimeEstimatedSecs = "allShardsLowestRemainingOperationTimeEstimatedSecs"; constexpr auto kAllShardsHighestRemainingOperationTimeEstimatedSecs = "allShardsHighestRemainingOperationTimeEstimatedSecs"; +constexpr auto kIsSameKeyResharding = "isSameKeyResharding"; +constexpr auto kIndexesToBuild = "indexesToBuild"; +constexpr auto kIndexesBuilt = "indexesBuilt"; +constexpr auto kIndexBuildTimeElapsed = "indexBuildTimeElapsedSecs"; } // namespace StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForType() const { @@ -116,4 +120,19 @@ StringData ShardingDataTransformInstanceMetricsFieldNameProvider:: getForAllShardsHighestRemainingOperationTimeEstimatedSecs() const { return kAllShardsHighestRemainingOperationTimeEstimatedSecs; } + +StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForIsSameKeyResharding() + const { + return kIsSameKeyResharding; +} +StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForIndexesToBuild() const { + return kIndexesToBuild; +} +StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForIndexesBuilt() const { + return kIndexesBuilt; +} +StringData ShardingDataTransformInstanceMetricsFieldNameProvider::getForIndexBuildTimeElapsed() + const { + return kIndexBuildTimeElapsed; +} } // namespace mongo diff --git a/src/mongo/db/s/metrics/field_names/sharding_data_transform_instance_metrics_field_name_provider.h b/src/mongo/db/s/metrics/field_names/sharding_data_transform_instance_metrics_field_name_provider.h index eb4c634a868fe..2fd89bb5f8e35 100644 --- a/src/mongo/db/s/metrics/field_names/sharding_data_transform_instance_metrics_field_name_provider.h +++ b/src/mongo/db/s/metrics/field_names/sharding_data_transform_instance_metrics_field_name_provider.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/base/string_data.h" #include "mongo/db/namespace_string.h" #include "mongo/util/duration.h" @@ -58,5 +59,9 @@ class ShardingDataTransformInstanceMetricsFieldNameProvider { StringData getForCountReadsDuringCriticalSection() const; StringData getForAllShardsLowestRemainingOperationTimeEstimatedSecs() const; StringData getForAllShardsHighestRemainingOperationTimeEstimatedSecs() const; + StringData getForIsSameKeyResharding() const; + StringData getForIndexesToBuild() const; + StringData getForIndexesBuilt() const; + StringData getForIndexBuildTimeElapsed() const; }; } // namespace mongo diff --git a/src/mongo/db/s/metrics/phase_duration.cpp b/src/mongo/db/s/metrics/phase_duration.cpp index c6ca96702a44d..2fa2bdfd16086 100644 --- a/src/mongo/db/s/metrics/phase_duration.cpp +++ b/src/mongo/db/s/metrics/phase_duration.cpp @@ -29,6 +29,11 @@ #include "mongo/db/s/metrics/phase_duration.h" +#include + +#include +#include + namespace mongo { namespace { diff --git a/src/mongo/db/s/metrics/phase_duration.h b/src/mongo/db/s/metrics/phase_duration.h index 5942b918c830c..80c33cc673a3c 100644 --- a/src/mongo/db/s/metrics/phase_duration.h +++ b/src/mongo/db/s/metrics/phase_duration.h @@ -29,7 +29,12 @@ #pragma once +#include +#include + +#include "mongo/platform/atomic_word.h" #include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/s/metrics/phase_duration_test.cpp b/src/mongo/db/s/metrics/phase_duration_test.cpp index a756d033a0bba..41312d81c85fc 100644 --- a/src/mongo/db/s/metrics/phase_duration_test.cpp +++ b/src/mongo/db/s/metrics/phase_duration_test.cpp @@ -29,7 +29,12 @@ #include "mongo/db/s/metrics/phase_duration.h" -#include "mongo/unittest/unittest.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/clock_source_mock.h" namespace mongo { diff --git a/src/mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.cpp b/src/mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.cpp index a0aa0a34c8eab..279ae5b811d97 100644 --- a/src/mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.cpp +++ b/src/mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.cpp @@ -28,14 +28,19 @@ */ #include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" -#include "mongo/db/s/global_index/global_index_cumulative_metrics.h" -#include "mongo/db/s/move_primary/move_primary_cumulative_metrics.h" -#include "mongo/db/s/resharding/resharding_cumulative_metrics.h" +#include #include +#include +#include + +#include "mongo/db/s/global_index/global_index_cumulative_metrics.h" +#include "mongo/db/s/resharding/resharding_cumulative_metrics.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/duration.h" + namespace mongo { namespace { @@ -48,7 +53,6 @@ constexpr auto kEstimateNotAvailable = -1; struct Metrics { ReshardingCumulativeMetrics _resharding; global_index::GlobalIndexCumulativeMetrics _globalIndexes; - MovePrimaryCumulativeMetrics _movePrimary; }; using MetricsPtr = std::unique_ptr; const auto getMetrics = ServiceContext::declareDecoration(); @@ -71,12 +75,6 @@ ShardingDataTransformCumulativeMetrics* ShardingDataTransformCumulativeMetrics:: return &metrics->_globalIndexes; } -ShardingDataTransformCumulativeMetrics* ShardingDataTransformCumulativeMetrics::getForMovePrimary( - ServiceContext* context) { - auto& metrics = getMetrics(context); - return &metrics->_movePrimary; -} - ShardingDataTransformCumulativeMetrics::ShardingDataTransformCumulativeMetrics( const std::string& rootSectionName, std::unique_ptr fieldNameProvider) : _rootSectionName{rootSectionName}, @@ -150,6 +148,10 @@ void ShardingDataTransformCumulativeMetrics::reportForServerStatus(BSONObjBuilde root.append(_fieldNames->getForCountSucceeded(), _countSucceeded.load()); root.append(_fieldNames->getForCountFailed(), _countFailed.load()); root.append(_fieldNames->getForCountCanceled(), _countCancelled.load()); + root.append(_fieldNames->getForCountSameKeyStarted(), _countSameKeyStarted.load()); + root.append(_fieldNames->getForCountSameKeySucceeded(), _countSameKeySucceeded.load()); + root.append(_fieldNames->getForCountSameKeyFailed(), _countSameKeyFailed.load()); + root.append(_fieldNames->getForCountSameKeyCanceled(), _countSameKeyCancelled.load()); root.append(_fieldNames->getForLastOpEndingChunkImbalance(), _lastOpEndingChunkImbalance.load()); { @@ -243,20 +245,36 @@ void ShardingDataTransformCumulativeMetrics::deregisterMetrics( getMetricsSetForRole(role).erase(metricsIterator); } -void ShardingDataTransformCumulativeMetrics::onStarted() { - _countStarted.fetchAndAdd(1); +void ShardingDataTransformCumulativeMetrics::onStarted(bool isSameKeyResharding) { + if (isSameKeyResharding) { + _countSameKeyStarted.fetchAndAdd(1); + } else { + _countStarted.fetchAndAdd(1); + } } -void ShardingDataTransformCumulativeMetrics::onSuccess() { - _countSucceeded.fetchAndAdd(1); +void ShardingDataTransformCumulativeMetrics::onSuccess(bool isSameKeyResharding) { + if (isSameKeyResharding) { + _countSameKeySucceeded.fetchAndAdd(1); + } else { + _countSucceeded.fetchAndAdd(1); + } } -void ShardingDataTransformCumulativeMetrics::onFailure() { - _countFailed.fetchAndAdd(1); +void ShardingDataTransformCumulativeMetrics::onFailure(bool isSameKeyResharding) { + if (isSameKeyResharding) { + _countSameKeyFailed.fetchAndAdd(1); + } else { + _countFailed.fetchAndAdd(1); + } } -void ShardingDataTransformCumulativeMetrics::onCanceled() { - _countCancelled.fetchAndAdd(1); +void ShardingDataTransformCumulativeMetrics::onCanceled(bool isSameKeyResharding) { + if (isSameKeyResharding) { + _countSameKeyCancelled.fetchAndAdd(1); + } else { + _countCancelled.fetchAndAdd(1); + } } void ShardingDataTransformCumulativeMetrics::setLastOpEndingChunkImbalance(int64_t imbalanceCount) { diff --git a/src/mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h b/src/mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h index 964988faba148..6574751fe4762 100644 --- a/src/mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h +++ b/src/mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h @@ -29,15 +29,27 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include + #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/s/metrics/field_names/sharding_data_transform_cumulative_metrics_field_name_provider.h" +#include "mongo/db/s/metrics/sharding_data_transform_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_metrics_observer_interface.h" #include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/duration.h" #include "mongo/util/functional.h" -#include +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -96,10 +108,10 @@ class ShardingDataTransformCumulativeMetrics { size_t getObservedMetricsCount(Role role) const; void reportForServerStatus(BSONObjBuilder* bob) const; - void onStarted(); - void onSuccess(); - void onFailure(); - void onCanceled(); + void onStarted(bool isSameKeyResharding); + void onSuccess(bool isSameKeyResharding); + void onFailure(bool isSameKeyResharding); + void onCanceled(bool isSameKeyResharding); void setLastOpEndingChunkImbalance(int64_t imbalanceCount); @@ -153,6 +165,11 @@ class ShardingDataTransformCumulativeMetrics { AtomicWord _collectionCloningTotalLocalBatchInserts{0}; AtomicWord _collectionCloningTotalLocalInsertTimeMillis{0}; AtomicWord _writesToStashedCollections{0}; + + AtomicWord _countSameKeyStarted{0}; + AtomicWord _countSameKeySucceeded{0}; + AtomicWord _countSameKeyFailed{0}; + AtomicWord _countSameKeyCancelled{0}; }; } // namespace mongo diff --git a/src/mongo/db/s/metrics/sharding_data_transform_cumulative_metrics_test.cpp b/src/mongo/db/s/metrics/sharding_data_transform_cumulative_metrics_test.cpp index cd1eb935ee054..b0642ccc46dc4 100644 --- a/src/mongo/db/s/metrics/sharding_data_transform_cumulative_metrics_test.cpp +++ b/src/mongo/db/s/metrics/sharding_data_transform_cumulative_metrics_test.cpp @@ -27,17 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h" -#include "mongo/logv2/log.h" -#include "mongo/platform/random.h" -#include "mongo/stdx/thread.h" -#include "mongo/stdx/unordered_map.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/future.h" -#include "mongo/util/static_immortal.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -177,6 +175,7 @@ TEST_F(ShardingDataTransformMetricsTestFixture, ReportContainsTimeEstimates) { } TEST_F(ShardingDataTransformMetricsTestFixture, ReportContainsRunCount) { + RAIIServerParameterControllerForTest controller("featureFlagReshardingImprovements", true); using Role = ShardingDataTransformMetrics::Role; ObserverMock coordinator{Date_t::fromMillisSinceEpoch(200), 400, 300, Role::kCoordinator}; auto ignore = _cumulativeMetrics->registerInstanceMetrics(&coordinator); @@ -186,19 +185,23 @@ TEST_F(ShardingDataTransformMetricsTestFixture, ReportContainsRunCount) { _cumulativeMetrics->reportForServerStatus(&bob); auto report = bob.done(); ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countStarted"), 0); + ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countSameKeyStarted"), 0); } - _cumulativeMetrics->onStarted(); + _cumulativeMetrics->onStarted(false /*isSameKeyResharding*/); + _cumulativeMetrics->onStarted(true /*isSameKeyResharding*/); { BSONObjBuilder bob; _cumulativeMetrics->reportForServerStatus(&bob); auto report = bob.done(); ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countStarted"), 1); + ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countSameKeyStarted"), 1); } } TEST_F(ShardingDataTransformMetricsTestFixture, ReportContainsSucceededCount) { + RAIIServerParameterControllerForTest controller("featureFlagReshardingImprovements", true); using Role = ShardingDataTransformMetrics::Role; ObserverMock coordinator{Date_t::fromMillisSinceEpoch(200), 400, 300, Role::kCoordinator}; auto ignore = _cumulativeMetrics->registerInstanceMetrics(&coordinator); @@ -208,19 +211,23 @@ TEST_F(ShardingDataTransformMetricsTestFixture, ReportContainsSucceededCount) { _cumulativeMetrics->reportForServerStatus(&bob); auto report = bob.done(); ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countSucceeded"), 0); + ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countSameKeySucceeded"), 0); } - _cumulativeMetrics->onSuccess(); + _cumulativeMetrics->onSuccess(false /*isSameKeyResharding*/); + _cumulativeMetrics->onSuccess(true /*isSameKeyResharding*/); { BSONObjBuilder bob; _cumulativeMetrics->reportForServerStatus(&bob); auto report = bob.done(); ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countSucceeded"), 1); + ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countSameKeySucceeded"), 1); } } TEST_F(ShardingDataTransformMetricsTestFixture, ReportContainsFailedCount) { + RAIIServerParameterControllerForTest controller("featureFlagReshardingImprovements", true); using Role = ShardingDataTransformMetrics::Role; ObserverMock coordinator{Date_t::fromMillisSinceEpoch(200), 400, 300, Role::kCoordinator}; auto ignore = _cumulativeMetrics->registerInstanceMetrics(&coordinator); @@ -230,19 +237,23 @@ TEST_F(ShardingDataTransformMetricsTestFixture, ReportContainsFailedCount) { _cumulativeMetrics->reportForServerStatus(&bob); auto report = bob.done(); ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countFailed"), 0); + ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countSameKeyFailed"), 0); } - _cumulativeMetrics->onFailure(); + _cumulativeMetrics->onFailure(false /*isSameKeyResharding*/); + _cumulativeMetrics->onFailure(true /*isSameKeyResharding*/); { BSONObjBuilder bob; _cumulativeMetrics->reportForServerStatus(&bob); auto report = bob.done(); ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countFailed"), 1); + ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countSameKeyFailed"), 1); } } TEST_F(ShardingDataTransformMetricsTestFixture, ReportContainsCanceledCount) { + RAIIServerParameterControllerForTest controller("featureFlagReshardingImprovements", true); using Role = ShardingDataTransformMetrics::Role; ObserverMock coordinator{Date_t::fromMillisSinceEpoch(200), 400, 300, Role::kCoordinator}; auto ignore = _cumulativeMetrics->registerInstanceMetrics(&coordinator); @@ -252,15 +263,18 @@ TEST_F(ShardingDataTransformMetricsTestFixture, ReportContainsCanceledCount) { _cumulativeMetrics->reportForServerStatus(&bob); auto report = bob.done(); ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countCanceled"), 0); + ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countSameKeyCanceled"), 0); } - _cumulativeMetrics->onCanceled(); + _cumulativeMetrics->onCanceled(false /*isSameKeyResharding*/); + _cumulativeMetrics->onCanceled(true /*isSameKeyResharding*/); { BSONObjBuilder bob; _cumulativeMetrics->reportForServerStatus(&bob); auto report = bob.done(); ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countCanceled"), 1); + ASSERT_EQ(report.getObjectField(kTestMetricsName).getIntField("countSameKeyCanceled"), 1); } } diff --git a/src/mongo/db/s/metrics/sharding_data_transform_instance_metrics.cpp b/src/mongo/db/s/metrics/sharding_data_transform_instance_metrics.cpp index 14cac5c31119d..1f0c630bd0b09 100644 --- a/src/mongo/db/s/metrics/sharding_data_transform_instance_metrics.cpp +++ b/src/mongo/db/s/metrics/sharding_data_transform_instance_metrics.cpp @@ -28,8 +28,21 @@ */ #include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" + +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/s/metrics/sharding_data_transform_metrics_observer.h" +#include "mongo/db/server_options.h" +#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/util/assert_util.h" #include "mongo/util/duration.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { @@ -157,7 +170,7 @@ BSONObj ShardingDataTransformInstanceMetrics::reportForCurrentOp() const noexcep builder.append(_fieldNames->getForType(), "op"); builder.append(_fieldNames->getForDescription(), createOperationDescription()); builder.append(_fieldNames->getForOp(), "command"); - builder.append(_fieldNames->getForNamespace(), _sourceNs.toString()); + builder.append(_fieldNames->getForNamespace(), NamespaceStringUtil::serialize(_sourceNs)); builder.append(_fieldNames->getForOriginatingCommand(), _originalCommand); builder.append(_fieldNames->getForOpTimeElapsed(), getOperationRunningTimeSecs().count()); switch (_role) { @@ -171,6 +184,11 @@ BSONObj ShardingDataTransformInstanceMetrics::reportForCurrentOp() const noexcep _fieldNames->getForAllShardsLowestRemainingOperationTimeEstimatedSecs(), getLowEstimateRemainingTimeMillis()); builder.append(_fieldNames->getForCoordinatorState(), getStateString()); + if (resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + builder.append(_fieldNames->getForIsSameKeyResharding(), + _isSameKeyResharding.load()); + } break; case Role::kDonor: builder.append(_fieldNames->getForDonorState(), getStateString()); @@ -192,6 +210,11 @@ BSONObj ShardingDataTransformInstanceMetrics::reportForCurrentOp() const noexcep builder.append(_fieldNames->getForCountWritesToStashCollections(), _writesToStashCollections.load()); builder.append(_fieldNames->getForDocumentsProcessed(), _documentsProcessed.load()); + if (resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + builder.append(_fieldNames->getForIndexesToBuild(), _indexesToBuild.load()); + builder.append(_fieldNames->getForIndexesBuilt(), _indexesBuilt.load()); + } break; default: MONGO_UNREACHABLE; @@ -279,26 +302,38 @@ ClockSource* ShardingDataTransformInstanceMetrics::getClockSource() const { return _clockSource; } -void ShardingDataTransformInstanceMetrics::onStarted() { - _cumulativeMetrics->onStarted(); +void ShardingDataTransformInstanceMetrics::onStarted(bool isSameKeyResharding) { + _cumulativeMetrics->onStarted(isSameKeyResharding); } -void ShardingDataTransformInstanceMetrics::onSuccess() { - _cumulativeMetrics->onSuccess(); +void ShardingDataTransformInstanceMetrics::onSuccess(bool isSameKeyResharding) { + _cumulativeMetrics->onSuccess(isSameKeyResharding); } -void ShardingDataTransformInstanceMetrics::onFailure() { - _cumulativeMetrics->onFailure(); +void ShardingDataTransformInstanceMetrics::onFailure(bool isSameKeyResharding) { + _cumulativeMetrics->onFailure(isSameKeyResharding); } -void ShardingDataTransformInstanceMetrics::onCanceled() { - _cumulativeMetrics->onCanceled(); +void ShardingDataTransformInstanceMetrics::onCanceled(bool isSameKeyResharding) { + _cumulativeMetrics->onCanceled(isSameKeyResharding); } void ShardingDataTransformInstanceMetrics::setLastOpEndingChunkImbalance(int64_t imbalanceCount) { _cumulativeMetrics->setLastOpEndingChunkImbalance(imbalanceCount); } +void ShardingDataTransformInstanceMetrics::setIsSameKeyResharding(bool isSameKeyResharding) { + _isSameKeyResharding.store(isSameKeyResharding); +} + +void ShardingDataTransformInstanceMetrics::setIndexesToBuild(int64_t numIndexes) { + _indexesToBuild.store(numIndexes); +} + +void ShardingDataTransformInstanceMetrics::setIndexesBuilt(int64_t numIndexes) { + _indexesBuilt.store(numIndexes); +} + ShardingDataTransformInstanceMetrics::UniqueScopedObserver ShardingDataTransformInstanceMetrics::registerInstanceMetrics() { return _cumulativeMetrics->registerInstanceMetrics(_observer.get()); diff --git a/src/mongo/db/s/metrics/sharding_data_transform_instance_metrics.h b/src/mongo/db/s/metrics/sharding_data_transform_instance_metrics.h index 3947ffce29c56..1789e28271c2e 100644 --- a/src/mongo/db/s/metrics/sharding_data_transform_instance_metrics.h +++ b/src/mongo/db/s/metrics/sharding_data_transform_instance_metrics.h @@ -29,12 +29,24 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/s/metrics/field_names/sharding_data_transform_instance_metrics_field_name_provider.h" #include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_metrics_observer_interface.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/clock_source.h" #include "mongo/util/duration.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -72,10 +84,10 @@ class ShardingDataTransformInstanceMetrics { Date_t getStartTimestamp() const; const UUID& getInstanceId() const; - void onStarted(); - void onSuccess(); - void onFailure(); - void onCanceled(); + void onStarted(bool isSameKeyResharding); + void onSuccess(bool isSameKeyResharding); + void onFailure(bool isSameKeyResharding); + void onCanceled(bool isSameKeyResharding); void onDocumentsProcessed(int64_t documentCount, int64_t totalDocumentsSizeBytes, @@ -98,6 +110,10 @@ class ShardingDataTransformInstanceMetrics { void setLastOpEndingChunkImbalance(int64_t imbalanceCount); + void setIsSameKeyResharding(bool isSameKeyResharding); + void setIndexesToBuild(int64_t numIndexes); + void setIndexesBuilt(int64_t numIndexes); + protected: static constexpr auto kNoDate = Date_t::min(); using UniqueScopedObserver = ShardingDataTransformCumulativeMetrics::UniqueScopedObserver; @@ -151,6 +167,10 @@ class ShardingDataTransformInstanceMetrics { AtomicWord _readsDuringCriticalSection; AtomicWord _writesDuringCriticalSection; + + AtomicWord _isSameKeyResharding; + AtomicWord _indexesToBuild; + AtomicWord _indexesBuilt; }; } // namespace mongo diff --git a/src/mongo/db/s/metrics/sharding_data_transform_instance_metrics_test.cpp b/src/mongo/db/s/metrics/sharding_data_transform_instance_metrics_test.cpp index d6fabae7e2060..84eb51da68460 100644 --- a/src/mongo/db/s/metrics/sharding_data_transform_instance_metrics_test.cpp +++ b/src/mongo/db/s/metrics/sharding_data_transform_instance_metrics_test.cpp @@ -27,13 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h" -#include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source_mock.h" #include "mongo/util/duration.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -349,23 +360,51 @@ TEST_F(ShardingDataTransformInstanceMetricsTest, } TEST_F(ShardingDataTransformInstanceMetricsTest, OnStartedIncrementsCumulativeMetrics) { + RAIIServerParameterControllerForTest controller("featureFlagReshardingImprovements", true); + createMetricsAndAssertIncrementsCumulativeMetricsField( + [](auto metrics) { metrics->onStarted(false /*isSameKeyResharding*/); }, + Section::kRoot, + "countStarted"); createMetricsAndAssertIncrementsCumulativeMetricsField( - [](auto metrics) { metrics->onStarted(); }, Section::kRoot, "countStarted"); + [](auto metrics) { metrics->onStarted(true /*isSameKeyResharding*/); }, + Section::kRoot, + "countSameKeyStarted"); } TEST_F(ShardingDataTransformInstanceMetricsTest, OnSuccessIncrementsCumulativeMetrics) { + RAIIServerParameterControllerForTest controller("featureFlagReshardingImprovements", true); createMetricsAndAssertIncrementsCumulativeMetricsField( - [](auto metrics) { metrics->onSuccess(); }, Section::kRoot, "countSucceeded"); + [](auto metrics) { metrics->onSuccess(false /*isSameKeyResharding*/); }, + Section::kRoot, + "countSucceeded"); + createMetricsAndAssertIncrementsCumulativeMetricsField( + [](auto metrics) { metrics->onSuccess(true /*isSameKeyResharding*/); }, + Section::kRoot, + "countSameKeySucceeded"); } TEST_F(ShardingDataTransformInstanceMetricsTest, OnFailureIncrementsCumulativeMetrics) { + RAIIServerParameterControllerForTest controller("featureFlagReshardingImprovements", true); createMetricsAndAssertIncrementsCumulativeMetricsField( - [](auto metrics) { metrics->onFailure(); }, Section::kRoot, "countFailed"); + [](auto metrics) { metrics->onFailure(false /*isSameKeyResharding*/); }, + Section::kRoot, + "countFailed"); + createMetricsAndAssertIncrementsCumulativeMetricsField( + [](auto metrics) { metrics->onFailure(true /*isSameKeyResharding*/); }, + Section::kRoot, + "countSameKeyFailed"); } TEST_F(ShardingDataTransformInstanceMetricsTest, OnCanceledIncrementsCumulativeMetrics) { + RAIIServerParameterControllerForTest controller("featureFlagReshardingImprovements", true); createMetricsAndAssertIncrementsCumulativeMetricsField( - [](auto metrics) { metrics->onCanceled(); }, Section::kRoot, "countCanceled"); + [](auto metrics) { metrics->onCanceled(false /*isSameKeyResharding*/); }, + Section::kRoot, + "countCanceled"); + createMetricsAndAssertIncrementsCumulativeMetricsField( + [](auto metrics) { metrics->onCanceled(true /*isSameKeyResharding*/); }, + Section::kRoot, + "countSameKeyCanceled"); } TEST_F(ShardingDataTransformInstanceMetricsTest, SetChunkImbalanceIncrementsCumulativeMetrics) { diff --git a/src/mongo/db/s/metrics/sharding_data_transform_metrics.cpp b/src/mongo/db/s/metrics/sharding_data_transform_metrics.cpp index cdf5a1ba86051..30436a58be1a9 100644 --- a/src/mongo/db/s/metrics/sharding_data_transform_metrics.cpp +++ b/src/mongo/db/s/metrics/sharding_data_transform_metrics.cpp @@ -28,8 +28,14 @@ */ #include "mongo/db/s/metrics/sharding_data_transform_metrics.h" -#include "mongo/s/sharding_feature_flags_gen.h" + +#include + +#include +#include + #include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/s/metrics/sharding_data_transform_metrics.h b/src/mongo/db/s/metrics/sharding_data_transform_metrics.h index abb8608b32828..64b1f436c38f4 100644 --- a/src/mongo/db/s/metrics/sharding_data_transform_metrics.h +++ b/src/mongo/db/s/metrics/sharding_data_transform_metrics.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/base/string_data.h" namespace mongo { diff --git a/src/mongo/db/s/metrics/sharding_data_transform_metrics_macros_test.cpp b/src/mongo/db/s/metrics/sharding_data_transform_metrics_macros_test.cpp index 96d0e1b064ff5..7c509a80d75f7 100644 --- a/src/mongo/db/s/metrics/sharding_data_transform_metrics_macros_test.cpp +++ b/src/mongo/db/s/metrics/sharding_data_transform_metrics_macros_test.cpp @@ -29,8 +29,24 @@ #include "mongo/db/s/metrics/sharding_data_transform_metrics_macros.h" +#include +#include +#include +// IWYU pragma: no_include "boost/preprocessor/detail/limits/auto_rec_256.hpp" +#include +// IWYU pragma: no_include "boost/preprocessor/repetition/detail/limits/for_256.hpp" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/s/resharding/common_types_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/metrics/sharding_data_transform_metrics_observer.cpp b/src/mongo/db/s/metrics/sharding_data_transform_metrics_observer.cpp index 61b133b2397c1..c531c8ea8747c 100644 --- a/src/mongo/db/s/metrics/sharding_data_transform_metrics_observer.cpp +++ b/src/mongo/db/s/metrics/sharding_data_transform_metrics_observer.cpp @@ -29,6 +29,8 @@ #include "mongo/db/s/metrics/sharding_data_transform_metrics_observer.h" +#include + namespace mongo { ShardingDataTransformMetricsObserver::ShardingDataTransformMetricsObserver( diff --git a/src/mongo/db/s/metrics/sharding_data_transform_metrics_observer.h b/src/mongo/db/s/metrics/sharding_data_transform_metrics_observer.h index 50193bb1af622..38964bed32abe 100644 --- a/src/mongo/db/s/metrics/sharding_data_transform_metrics_observer.h +++ b/src/mongo/db/s/metrics/sharding_data_transform_metrics_observer.h @@ -29,8 +29,13 @@ #pragma once +#include + #include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" +#include "mongo/db/s/metrics/sharding_data_transform_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_metrics_observer_interface.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h b/src/mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h index 73a6fa3fe9fc5..2858cf3d519ff 100644 --- a/src/mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h +++ b/src/mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h @@ -359,7 +359,7 @@ class ShardingDataTransformMetricsTestFixture : public unittest::Test { : kNoSpecialBehavior; auto& done = threadPFs[i].promise; threads.emplace_back( - [=, &storage, specialBehavior = std::move(specialBehavior), &done] { + [=, this, &storage, specialBehavior = std::move(specialBehavior), &done] { performRandomOperations( storage, kIterations, kRemovalOdds, seed, specialBehavior); done.emplaceValue(); diff --git a/src/mongo/db/s/metrics/with_oplog_application_latency_metrics.h b/src/mongo/db/s/metrics/with_oplog_application_latency_metrics.h index 20028dce718c1..277b547c1bcba 100644 --- a/src/mongo/db/s/metrics/with_oplog_application_latency_metrics.h +++ b/src/mongo/db/s/metrics/with_oplog_application_latency_metrics.h @@ -41,6 +41,9 @@ class WithOplogApplicationLatencyMetrics : public Base { template WithOplogApplicationLatencyMetrics(Args&&... args) : Base{std::forward(args)...} {} + WithOplogApplicationLatencyMetrics(const WithOplogApplicationLatencyMetrics& other) = + default; + void onBatchRetrievedDuringOplogFetching(Milliseconds elapsed) { _oplogFetchingTotalRemoteBatchesRetrieved.fetchAndAdd(1); _oplogFetchingTotalRemoteBatchesRetrievalTimeMillis.fetchAndAdd( diff --git a/src/mongo/db/s/metrics/with_state_management_for_cumulative_metrics.h b/src/mongo/db/s/metrics/with_state_management_for_cumulative_metrics.h index 25568491835c8..fc2f64e9b63bf 100644 --- a/src/mongo/db/s/metrics/with_state_management_for_cumulative_metrics.h +++ b/src/mongo/db/s/metrics/with_state_management_for_cumulative_metrics.h @@ -49,6 +49,10 @@ class WithStateManagementForCumulativeMetrics : public Base { template WithStateManagementForCumulativeMetrics(Args&&... args) : Base{std::forward(args)...} {} + WithStateManagementForCumulativeMetrics( + const WithStateManagementForCumulativeMetrics& other) = + default; + template void onStateTransition(boost::optional before, boost::optional after) { getHolderFor().onStateTransition(before, after); diff --git a/src/mongo/db/s/migration_batch_fetcher.cpp b/src/mongo/db/s/migration_batch_fetcher.cpp index 80733c305d6d3..6f93b2324a816 100644 --- a/src/mongo/db/s/migration_batch_fetcher.cpp +++ b/src/mongo/db/s/migration_batch_fetcher.cpp @@ -28,6 +28,32 @@ */ #include "mongo/db/s/migration_batch_fetcher.h" + +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/s/migration_batch_mock_inserter.h" +#include "mongo/db/s/sharding_runtime_d_params_gen.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/grid.h" +#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/migration_batch_fetcher.h b/src/mongo/db/s/migration_batch_fetcher.h index 8d9bc13ad1365..d49089ea34f8e 100644 --- a/src/mongo/db/s/migration_batch_fetcher.h +++ b/src/mongo/db/s/migration_batch_fetcher.h @@ -27,7 +27,13 @@ * it in the license file. */ +#include +#include + #include "mongo/base/error_extra_info.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" @@ -35,14 +41,19 @@ #include "mongo/db/s/migration_batch_inserter.h" #include "mongo/db/s/migration_batch_mock_inserter.h" #include "mongo/db/s/migration_session_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/shard_id.h" +#include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/client/shard.h" #include "mongo/s/grid.h" #include "mongo/util/cancellation.h" #include "mongo/util/concurrency/semaphore_ticketholder.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/producer_consumer_queue.h" +#include "mongo/util/uuid.h" #pragma once @@ -145,7 +156,7 @@ class MigrationBatchFetcher { // Only should be created once for the lifetime of the object. BSONObj _createMigrateCloneRequest() const { BSONObjBuilder builder; - builder.append("_migrateClone", _nss.ns()); + builder.append("_migrateClone", NamespaceStringUtil::serialize(_nss)); _sessionId.append(&builder); return builder.obj(); } @@ -161,10 +172,6 @@ class MigrationBatchFetcher { static void onCreateThread(const std::string& threadName) { Client::initThread(threadName, getGlobalServiceContext(), nullptr); - { - stdx::lock_guard lk(cc()); - cc().setSystemOperationKillableByStepdown(lk); - } } }; // namespace mongo diff --git a/src/mongo/db/s/migration_batch_fetcher_test.cpp b/src/mongo/db/s/migration_batch_fetcher_test.cpp index b47a190d00a92..cd55df7cb492b 100644 --- a/src/mongo/db/s/migration_batch_fetcher_test.cpp +++ b/src/mongo/db/s/migration_batch_fetcher_test.cpp @@ -27,31 +27,45 @@ * it in the license file. */ +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/cancelable_operation_context.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/migration_batch_fetcher.h" +#include "mongo/db/s/migration_batch_mock_inserter.h" #include "mongo/db/s/migration_session_id.h" #include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/db/write_concern_options.h" -#include "mongo/dbtests/mock/mock_replica_set.h" -#include "mongo/executor/cancelable_executor.h" #include "mongo/executor/network_interface_mock.h" -#include "mongo/executor/thread_pool_mock.h" -#include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" -#include "mongo/platform/basic.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_mock.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/grid.h" #include "mongo/stdx/future.h" -#include "mongo/stdx/thread.h" #include "mongo/unittest/assert.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/duration.h" +#include "mongo/unittest/framework.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/migration_batch_inserter.cpp b/src/mongo/db/s/migration_batch_inserter.cpp index a73d2b52eec5c..61b76d039bb81 100644 --- a/src/mongo/db/s/migration_batch_inserter.cpp +++ b/src/mongo/db/s/migration_batch_inserter.cpp @@ -29,8 +29,46 @@ #include "mongo/db/s/migration_batch_inserter.h" +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/catalog/collection_operation_source.h" +#include "mongo/db/catalog/document_validation.h" +#include "mongo/db/client.h" +#include "mongo/db/ops/single_write_result_gen.h" +#include "mongo/db/ops/write_ops_exec.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/s/range_deletion_util.h" +#include "mongo/db/s/sharding_runtime_d_params_gen.h" #include "mongo/db/s/sharding_statistics.h" -#include "mongo/util/concurrency/ticketholder.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/session_catalog.h" +#include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/decorable.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kShardingMigration @@ -87,10 +125,6 @@ void runWithoutSession(OperationContext* opCtx, Callable callable) { void MigrationBatchInserter::onCreateThread(const std::string& threadName) { Client::initThread(threadName, getGlobalServiceContext(), nullptr); - { - stdx::lock_guard lk(cc()); - cc().setSystemOperationKillableByStepdown(lk); - } } void MigrationBatchInserter::run(Status status) const try { @@ -165,9 +199,9 @@ void MigrationBatchInserter::run(Status status) const try { repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp()); ShardingStatistics::get(opCtx).countDocsClonedOnRecipient.addAndFetch(batchNumCloned); + ShardingStatistics::get(opCtx).countBytesClonedOnRecipient.addAndFetch(batchClonedBytes); LOGV2(6718408, - "Incrementing numCloned count by {batchNumCloned} and numClonedBytes by " - "{batchClonedBytes}", + "Incrementing cloned count by ", "batchNumCloned"_attr = batchNumCloned, "batchClonedBytes"_attr = batchClonedBytes); _migrationProgress->incNumCloned(batchNumCloned); diff --git a/src/mongo/db/s/migration_batch_inserter.h b/src/mongo/db/s/migration_batch_inserter.h index 7512427a488aa..2515aa62da1e3 100644 --- a/src/mongo/db/s/migration_batch_inserter.h +++ b/src/mongo/db/s/migration_batch_inserter.h @@ -29,8 +29,13 @@ #pragma once +#include #include +#include +#include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/cancelable_operation_context.h" #include "mongo/db/catalog/document_validation.h" @@ -38,6 +43,7 @@ #include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops_exec.h" #include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/migration_session_id.h" @@ -46,6 +52,7 @@ #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/platform/mutex.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/grid.h" #include "mongo/util/concurrency/semaphore_ticketholder.h" diff --git a/src/mongo/db/s/migration_chunk_cloner_source.cpp b/src/mongo/db/s/migration_chunk_cloner_source.cpp index 7a313964fc693..0513226585fbc 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source.cpp @@ -29,41 +29,78 @@ #include "mongo/db/s/migration_chunk_cloner_source.h" +#include +#include +#include +#include +#include +#include #include - +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/client/read_preference.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/basic_types.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/exec/working_set_common.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/keypattern.h" #include "mongo/db/ops/write_ops_retryability.h" -#include "mongo/db/query/get_executor.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/optime.h" -#include "mongo/db/repl/replication_process.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/migration_source_manager.h" -#include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/shard_key_index_util.h" #include "mongo/db/s/sharding_runtime_d_params_gen.h" #include "mongo/db/s/sharding_statistics.h" #include "mongo/db/s/start_chunk_clone_request.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/shard_id.h" #include "mongo/executor/remote_command_request.h" #include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" #include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/balancer_configuration.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/grid.h" -#include "mongo/stdx/mutex.h" +#include "mongo/s/request_types/migration_secondary_throttle_options.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #include "mongo/util/elapsed_tracker.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/scopeguard.h" #include "mongo/util/str.h" #include "mongo/util/time_support.h" @@ -83,6 +120,7 @@ const int kMaxObjectPerChunk{250000}; const Hours kMaxWaitToCommitCloneForJumboChunk(6); MONGO_FAIL_POINT_DEFINE(failTooMuchMemoryUsed); +MONGO_FAIL_POINT_DEFINE(hangAfterProcessingDeferredXferMods); /** * Returns true if the given BSON object in the shard key value pair format is within the given @@ -107,7 +145,7 @@ BSONObj createRequestWithSessionId(StringData commandName, const MigrationSessionId& sessionId, bool waitForSteadyOrDone = false) { BSONObjBuilder builder; - builder.append(commandName, nss.ns()); + builder.append(commandName, NamespaceStringUtil::serialize(nss)); builder.append("waitForSteadyOrDone", waitForSteadyOrDone); sessionId.append(&builder); return builder.obj(); @@ -339,7 +377,7 @@ Status MigrationChunkClonerSource::startClone(OperationContext* opCtx, invariant(!opCtx->lockState()->isLocked()); if (_sessionCatalogSource) { - _sessionCatalogSource->init(opCtx); + _sessionCatalogSource->init(opCtx, lsid); // Prime up the session migration source if there are oplog entries to migrate. _sessionCatalogSource->fetchNextOplog(opCtx); @@ -448,7 +486,7 @@ StatusWith MigrationChunkClonerSource::commitClone(OperationContext* op auto responseStatus = _callRecipient(opCtx, [&] { BSONObjBuilder builder; - builder.append(kRecvChunkCommit, nss().ns()); + builder.append(kRecvChunkCommit, NamespaceStringUtil::serialize(nss())); // For backward compatibility with v6.0 recipients. // TODO (SERVER-67844): Remove it once 7.0 becomes LTS. builder.append("acquireCSOnRecipient", true); @@ -457,7 +495,7 @@ StatusWith MigrationChunkClonerSource::commitClone(OperationContext* op }()); if (responseStatus.isOK()) { - _cleanup(); + _cleanup(true); if (_sessionCatalogSource && _sessionCatalogSource->hasMoreOplog()) { return {ErrorCodes::SessionTransferIncomplete, @@ -496,7 +534,7 @@ void MigrationChunkClonerSource::cancelClone(OperationContext* opCtx) noexcept { [[fallthrough]]; } case kNew: - _cleanup(); + _cleanup(false); break; default: MONGO_UNREACHABLE; @@ -543,8 +581,7 @@ void MigrationChunkClonerSource::onInsertOp(OperationContext* opCtx, void MigrationChunkClonerSource::onUpdateOp(OperationContext* opCtx, boost::optional preImageDoc, const BSONObj& postImageDoc, - const repl::OpTime& opTime, - const repl::OpTime& prePostImageOpTime) { + const repl::OpTime& opTime) { dassert(opCtx->lockState()->isCollectionLockedForMode(nss(), MODE_IX)); BSONElement idElement = postImageDoc["_id"]; @@ -564,7 +601,7 @@ void MigrationChunkClonerSource::onUpdateOp(OperationContext* opCtx, // the deletion of the preImage document so that the destination chunk does not receive an // outdated version of this document. if (preImageDoc && isDocInRange(*preImageDoc, getMin(), getMax(), _shardKeyPattern)) { - onDeleteOp(opCtx, *preImageDoc, opTime, prePostImageOpTime); + onDeleteOp(opCtx, *preImageDoc, opTime); } return; } @@ -584,8 +621,7 @@ void MigrationChunkClonerSource::onUpdateOp(OperationContext* opCtx, void MigrationChunkClonerSource::onDeleteOp(OperationContext* opCtx, const BSONObj& deletedDocId, - const repl::OpTime& opTime, - const repl::OpTime&) { + const repl::OpTime& opTime) { dassert(opCtx->lockState()->isCollectionLockedForMode(nss(), MODE_IX)); BSONElement idElement = deletedDocId["_id"]; @@ -723,6 +759,7 @@ void MigrationChunkClonerSource::_nextCloneBatchFromIndexScan(OperationContext* lk.unlock(); ShardingStatistics::get(opCtx).countDocsClonedOnDonor.addAndFetch(1); + ShardingStatistics::get(opCtx).countBytesClonedOnDonor.addAndFetch(obj.objsize()); } } catch (DBException& exception) { exception.addContext("Executor error while scanning for documents belonging to chunk"); @@ -794,6 +831,7 @@ void MigrationChunkClonerSource::_nextCloneBatchFromCloneRecordIds(OperationCont arrBuilder->append(doc->value()); ShardingStatistics::get(opCtx).countDocsClonedOnDonor.addAndFetch(1); + ShardingStatistics::get(opCtx).countBytesClonedOnDonor.addAndFetch(doc->value().objsize()); } } @@ -888,6 +926,12 @@ void MigrationChunkClonerSource::_processDeferredXferMods(OperationContext* opCt CollectionMetadata::extractDocumentKey(&_shardKeyPattern, newerVersionDoc); static_cast(_processUpdateForXferMod(preImageDocKey, postImageDocKey)); } + + hangAfterProcessingDeferredXferMods.execute([&](const auto& data) { + if (!deferredReloadOrDeletePreImageDocKeys.empty()) { + hangAfterProcessingDeferredXferMods.pauseWhileSet(); + } + }); } Status MigrationChunkClonerSource::nextModsBatch(OperationContext* opCtx, BSONObjBuilder* builder) { @@ -913,6 +957,11 @@ Status MigrationChunkClonerSource::nextModsBatch(OperationContext* opCtx, BSONOb updateList.splice(updateList.cbegin(), _reload); } + // It's important to abandon any open snapshots before processing updates so that we are sure + // that our snapshot is at least as new as those updates. It's possible for a stale snapshot to + // still be open from reads performed by _processDeferredXferMods(), above. + opCtx->recoveryUnit()->abandonSnapshot(); + BSONArrayBuilder arrDel(builder->subarrayStart("deleted")); auto noopFn = [](BSONObj idDoc, BSONObj* fullDoc) { *fullDoc = idDoc; @@ -943,12 +992,18 @@ Status MigrationChunkClonerSource::nextModsBatch(OperationContext* opCtx, BSONOb return Status::OK(); } -void MigrationChunkClonerSource::_cleanup() { +void MigrationChunkClonerSource::_cleanup(bool wasSuccessful) { stdx::unique_lock lk(_mutex); _state = kDone; _drainAllOutstandingOperationTrackRequests(lk); + if (wasSuccessful) { + invariant(_reload.empty()); + invariant(_deleted.empty()); + invariant(_deferredReloadOrDeletePreImageDocKeys.empty()); + } + _reload.clear(); _untransferredUpsertsCounter = 0; _deleted.clear(); @@ -1014,7 +1069,7 @@ MigrationChunkClonerSource::_getIndexScanExecutor(OperationContext* opCtx, if (!shardKeyIdx) { return {ErrorCodes::IndexNotFound, str::stream() << "can't find index with prefix " << _shardKeyPattern.toBSON() - << " in storeCurrentRecordId for " << nss().ns()}; + << " in storeCurrentRecordId for " << nss().toStringForErrorMsg()}; } // Assume both min and max non-empty, append MinKey's to make them fit chosen index @@ -1040,7 +1095,8 @@ Status MigrationChunkClonerSource::_storeCurrentRecordId(OperationContext* opCtx AutoGetCollection collection(opCtx, nss(), MODE_IS); if (!collection) { return {ErrorCodes::NamespaceNotFound, - str::stream() << "Collection " << nss().ns() << " does not exist."}; + str::stream() << "Collection " << nss().toStringForErrorMsg() + << " does not exist."}; } auto swExec = _getIndexScanExecutor( @@ -1119,7 +1175,7 @@ Status MigrationChunkClonerSource::_storeCurrentRecordId(OperationContext* opCtx if (!idIdx || !idIdx->getEntry()) { return {ErrorCodes::IndexNotFound, str::stream() << "can't find index '_id' in storeCurrentRecordId for " - << nss().ns()}; + << nss().toStringForErrorMsg()}; } averageObjectIdSize = @@ -1133,7 +1189,8 @@ Status MigrationChunkClonerSource::_storeCurrentRecordId(OperationContext* opCtx << maxRecsWhenFull << ", the maximum chunk size is " << _args.getMaxChunkSizeBytes() << ", average document size is " << avgRecSize << ". Found " << recCount << " documents in chunk " - << " ns: " << nss().ns() << " " << getMin() << " -> " << getMax()}; + << " ns: " << nss().toStringForErrorMsg() << " " << getMin() << " -> " + << getMax()}; } stdx::lock_guard lk(_mutex); @@ -1262,7 +1319,8 @@ Status MigrationChunkClonerSource::_checkRecipientCloningStatus(OperationContext _args.getMaxChunkSizeBytes(); int64_t maxUntransferredSessionsSize = BSONObjMaxUserSize * _args.getMaxChunkSizeBytes() / ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes; - if (estimatedUntransferredChunkPercentage < maxCatchUpPercentageBeforeBlockingWrites && + if (estimatedUntransferredChunkPercentage < + maxCatchUpPercentageBeforeBlockingWrites.load() && estimateUntransferredSessionsSize < maxUntransferredSessionsSize) { // The recipient is sufficiently caught-up with the writes on the donor. // Block writes, so that it can drain everything. @@ -1294,7 +1352,7 @@ Status MigrationChunkClonerSource::_checkRecipientCloningStatus(OperationContext << migrationSessionIdStatus.getStatus().toString()}; } - if (res["ns"].str() != nss().ns() || + if (res["ns"].str() != NamespaceStringUtil::serialize(nss()) || (res.hasField("fromShardId") ? (res["fromShardId"].str() != _args.getFromShard().toString()) : (res["from"].str() != _donorConnStr.toString())) || diff --git a/src/mongo/db/s/migration_chunk_cloner_source.h b/src/mongo/db/s/migration_chunk_cloner_source.h index 0010f12c3bbbc..6c6ad3872f319 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source.h +++ b/src/mongo/db/s/migration_chunk_cloner_source.h @@ -29,27 +29,53 @@ #pragma once +#include +#include +#include +#include +#include +#include #include +#include #include #include #include #include +#include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/connection_string.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/optime.h" #include "mongo/db/s/migration_chunk_cloner_source.h" #include "mongo/db/s/migration_session_id.h" #include "mongo/db/s/session_catalog_migration_source.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/write_concern_options.h" #include "mongo/platform/mutex.h" #include "mongo/s/request_types/move_range_request_gen.h" #include "mongo/s/shard_key_pattern.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/concurrency/notification.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -192,9 +218,7 @@ class MigrationChunkClonerSource { void onUpdateOp(OperationContext* opCtx, boost::optional preImageDoc, const BSONObj& postImageDoc, - const repl::OpTime& opTime, - const repl::OpTime& prePostImageOpTime); - + const repl::OpTime& opTime); /** * Notifies this cloner that a delede happened to the collection, which it owns. It is up to the * cloner's implementation to decide what to do with this information and it is valid for the @@ -204,8 +228,7 @@ class MigrationChunkClonerSource { */ void onDeleteOp(OperationContext* opCtx, const BSONObj& deletedDocId, - const repl::OpTime& opTime, - const repl::OpTime& preImageOpTime); + const repl::OpTime& opTime); /** * Returns the migration session id associated with this cloner, so stale sessions can be @@ -475,7 +498,7 @@ class MigrationChunkClonerSource { // // If (_recordIdsIter == _recordIds.end() && _overflowDocs.empty() && // _inProgressReads == 0) then all documents have been returned to the destination. - RecordIdSet::size_type _inProgressReads = 0; + int64_t _inProgressReads = 0; // This condition variable allows us to wait on the following condition: // Either we're done and the above condition is satisfied, or there is some document to @@ -490,7 +513,7 @@ class MigrationChunkClonerSource { * Idempotent method, which cleans up any previously initialized state. It is safe to be called * at any time, but no methods should be called after it. */ - void _cleanup(); + void _cleanup(bool wasSuccessful); /** * Synchronously invokes the recipient shard with the specified command and either returns the diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_bm.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_bm.cpp index 23278638a1919..f2e0441dd8c0e 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_bm.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_bm.cpp @@ -28,7 +28,16 @@ */ #include +#include +#include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/s/migration_chunk_cloner_source.h" namespace mongo { diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp index 31ee26db25cd3..3abdd3721c974 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_commands.cpp @@ -28,21 +28,46 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/action_set.h" +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/s/active_migrations_registry.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/migration_chunk_cloner_source.h" +#include "mongo/db/s/migration_session_id.h" #include "mongo/db/s/migration_source_manager.h" +#include "mongo/db/service_context.h" #include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/notification.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -73,9 +98,14 @@ class AutoGetActiveCloner { _autoColl.emplace(opCtx, *nss, MODE_IS); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection " << nss->ns() << " does not exist", + str::stream() << "Collection " << nss->toStringForErrorMsg() << " does not exist", _autoColl->getCollection()); + uassert(ErrorCodes::NotWritablePrimary, + "No longer primary when trying to acquire active migrate cloner", + opCtx->writesAreReplicated() && + repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, *nss)); + { const auto scopedCsr = CollectionShardingRuntime::assertCollectionLockedAndAcquireShared(opCtx, *nss); @@ -84,8 +114,8 @@ class AutoGetActiveCloner { invariant(_chunkCloner); } else { uasserted(ErrorCodes::IllegalOperation, - str::stream() - << "No active migrations were found for collection " << nss->ns()); + str::stream() << "No active migrations were found for collection " + << nss->toStringForErrorMsg()); } } @@ -145,11 +175,11 @@ class InitialCloneCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::internal)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } @@ -216,11 +246,11 @@ class TransferModsCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::internal)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } @@ -274,11 +304,11 @@ class MigrateSessionCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::internal)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } @@ -302,7 +332,7 @@ class MigrateSessionCommand : public BasicCommand { writeConflictRetry( opCtx, "Fetching session related oplogs for migration", - NamespaceString::kRsOplogNamespace.ns(), + NamespaceString::kRsOplogNamespace, [&]() { AutoGetActiveCloner autoCloner(opCtx, migrationSessionId, false); opTime = autoCloner.getCloner()->nextSessionMigrationBatch(opCtx, arrBuilder); diff --git a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp index bceac76154cf2..61da6711162b8 100644 --- a/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp +++ b/src/mongo/db/s/migration_chunk_cloner_source_legacy_test.cpp @@ -27,22 +27,100 @@ * it in the license file. */ +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/catalog/capped_visibility.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/collection_options_gen.h" #include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/change_stream_pre_and_post_images_options_gen.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/migration_chunk_cloner_source.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/bson_collection_catalog_entry.h" +#include "mongo/db/storage/durable_catalog_entry.h" +#include "mongo/db/storage/ident.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_mock.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/s/database_version.h" +#include "mongo/s/request_types/move_range_request_gen.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" +#include "mongo/util/version/releases.h" namespace mongo { namespace { @@ -50,7 +128,7 @@ namespace { using executor::RemoteCommandRequest; using unittest::assertGet; -const NamespaceString kNss("TestDB", "TestColl"); +const NamespaceString kNss = NamespaceString::createNamespaceString_forTest("TestDB", "TestColl"); const std::string kShardKey = "X"; const BSONObj kShardKeyPattern{BSON(kShardKey << 1)}; const ConnectionString kDonorConnStr = @@ -230,6 +308,15 @@ class CollectionWithFault : public Collection { MONGO_UNREACHABLE; } + bool timeseriesBucketingParametersMayHaveChanged() const override { + return _coll->timeseriesBucketingParametersMayHaveChanged(); + } + + void setTimeseriesBucketingParametersChanged(OperationContext* opCtx, + boost::optional value) override { + MONGO_UNREACHABLE; + } + bool doesTimeseriesBucketsDocContainMixedSchemaData(const BSONObj& bucketsDoc) const override { return _coll->doesTimeseriesBucketsDocContainMixedSchemaData(bucketsDoc); } @@ -436,14 +523,6 @@ class CollectionWithFault : public Collection { return _coll->getIndexFreeStorageBytes(opCtx); } - boost::optional getMinimumVisibleSnapshot() const override { - return _coll->getMinimumVisibleSnapshot(); - } - - void setMinimumVisibleSnapshot(Timestamp name) override { - MONGO_UNREACHABLE; - } - boost::optional getMinimumValidSnapshot() const override { return _coll->getMinimumValidSnapshot(); } @@ -779,9 +858,9 @@ TEST_F(MigrationChunkClonerSourceTest, CorrectDocumentsFetched) { cloner.onInsertOp(operationContext(), createCollectionDocument(151), {}); cloner.onInsertOp(operationContext(), createCollectionDocument(210), {}); - cloner.onDeleteOp(operationContext(), createCollectionDocument(80), {}, {}); - cloner.onDeleteOp(operationContext(), createCollectionDocument(199), {}, {}); - cloner.onDeleteOp(operationContext(), createCollectionDocument(220), {}, {}); + cloner.onDeleteOp(operationContext(), createCollectionDocument(80), {}); + cloner.onDeleteOp(operationContext(), createCollectionDocument(199), {}); + cloner.onDeleteOp(operationContext(), createCollectionDocument(220), {}); wuow.commit(); } @@ -876,19 +955,17 @@ TEST_F(MigrationChunkClonerSourceTest, RemoveDuplicateDocuments) { WriteUnitOfWork wuow(operationContext()); - cloner.onDeleteOp(operationContext(), createCollectionDocument(100), {}, {}); + cloner.onDeleteOp(operationContext(), createCollectionDocument(100), {}); cloner.onInsertOp(operationContext(), createCollectionDocument(100), {}); - cloner.onDeleteOp(operationContext(), createCollectionDocument(100), {}, {}); + cloner.onDeleteOp(operationContext(), createCollectionDocument(100), {}); cloner.onUpdateOp(operationContext(), createCollectionDocument(199), createCollectionDocumentForUpdate(199, 198), - {}, {}); cloner.onUpdateOp(operationContext(), createCollectionDocument(199), createCollectionDocumentForUpdate(199, 197), - {}, {}); wuow.commit(); diff --git a/src/mongo/db/s/migration_chunk_cloner_source_op_observer.cpp b/src/mongo/db/s/migration_chunk_cloner_source_op_observer.cpp new file mode 100644 index 0000000000000..291c4b95050b4 --- /dev/null +++ b/src/mongo/db/s/migration_chunk_cloner_source_op_observer.cpp @@ -0,0 +1,378 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/s/migration_chunk_cloner_source_op_observer.h" + +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/catalog/collection_operation_source.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/op_observer/op_observer_util.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/s/migration_chunk_cloner_source.h" +#include "mongo/db/s/migration_source_manager.h" +#include "mongo/db/s/sharding_write_router.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding + +namespace mongo { +namespace { + +// Used to coordinate delete operations between aboutToDelete() and onDelete(). +const auto getIsMigrating = OplogDeleteEntryArgs::declareDecoration(); + +} // namespace + +// static +void MigrationChunkClonerSourceOpObserver::assertIntersectingChunkHasNotMoved( + OperationContext* opCtx, + const CollectionMetadata& metadata, + const BSONObj& shardKey, + const LogicalTime& atClusterTime) { + // We can assume the simple collation because shard keys do not support non-simple collations. + auto cmAtTimeOfWrite = + ChunkManager::makeAtTime(*metadata.getChunkManager(), atClusterTime.asTimestamp()); + auto chunk = cmAtTimeOfWrite.findIntersectingChunkWithSimpleCollation(shardKey); + + // Throws if the chunk has moved since the timestamp of the running transaction's atClusterTime + // read concern parameter. + chunk.throwIfMoved(); +} + +// static +void MigrationChunkClonerSourceOpObserver::assertNoMovePrimaryInProgress( + OperationContext* opCtx, const NamespaceString& nss) { + if (!nss.isNormalCollection() && nss.coll() != "system.views" && + !nss.isTimeseriesBucketsCollection()) { + return; + } + + // TODO SERVER-58222: evaluate whether this is safe or whether acquiring the lock can block. + AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(opCtx->lockState()); + Lock::DBLock dblock(opCtx, nss.dbName(), MODE_IS); + + const auto scopedDss = + DatabaseShardingState::assertDbLockedAndAcquireShared(opCtx, nss.dbName()); + if (scopedDss->isMovePrimaryInProgress()) { + LOGV2(4908600, "assertNoMovePrimaryInProgress", logAttrs(nss)); + + uasserted(ErrorCodes::MovePrimaryInProgress, + "movePrimary is in progress for namespace " + nss.toStringForErrorMsg()); + } +} + +void MigrationChunkClonerSourceOpObserver::onUnpreparedTransactionCommit( + OperationContext* opCtx, + const std::vector& reservedSlots, + const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + OpStateAccumulator* const opAccumulator) { + // Return early if we are secondary or in some replication state in which we are not + // appending entries to the oplog. + if (!opCtx->writesAreReplicated()) { + return; + } + + const auto& statements = transactionOperations.getOperationsForOpObserver(); + + // It is possible that the transaction resulted in no changes. In that case, we should + // not write an empty applyOps entry. + if (statements.empty()) { + return; + } + + if (!opAccumulator) { + return; + } + + const auto& commitOpTime = opAccumulator->opTime.writeOpTime; + invariant(!commitOpTime.isNull()); + + opCtx->recoveryUnit()->registerChange( + std::make_unique( + *opCtx->getLogicalSessionId(), statements, commitOpTime)); +} + +void MigrationChunkClonerSourceOpObserver::onInserts( + OperationContext* opCtx, + const CollectionPtr& coll, + std::vector::const_iterator first, + std::vector::const_iterator last, + std::vector fromMigrate, + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { + // Take ownership of ShardingWriteRouter attached to the op accumulator by OpObserverImpl. + // Release upon return from this function because this resource is not needed by downstream + // OpObserver instances. + // If there's no ShardingWriteRouter instance available, it means that OpObserverImpl did not + // get far enough to require one so there's nothing to do here but return early. + auto shardingWriteRouter = + std::move(shardingWriteRouterOpStateAccumulatorDecoration(opAccumulator)); + if (!shardingWriteRouter) { + return; + } + + if (defaultFromMigrate) { + return; + } + + const auto& nss = coll->ns(); + if (nss == NamespaceString::kSessionTransactionsTableNamespace) { + return; + } + + auto* const css = shardingWriteRouter->getCss(); + css->checkShardVersionOrThrow(opCtx); + DatabaseShardingState::assertMatchingDbVersion(opCtx, nss.dbName()); + + auto* const csr = checked_cast(css); + auto metadata = csr->getCurrentMetadataIfKnown(); + if (!metadata || !metadata->isSharded()) { + MigrationChunkClonerSourceOpObserver::assertNoMovePrimaryInProgress(opCtx, nss); + return; + } + + auto txnParticipant = TransactionParticipant::get(opCtx); + const bool inMultiDocumentTransaction = + txnParticipant && opCtx->writesAreReplicated() && txnParticipant.transactionIsOpen(); + if (inMultiDocumentTransaction && !opCtx->getWriteUnitOfWork()) { + return; + } + + int index = 0; + const auto& opTimeList = opAccumulator->insertOpTimes; + for (auto it = first; it != last; it++, index++) { + auto opTime = opTimeList.empty() ? repl::OpTime() : opTimeList[index]; + + if (inMultiDocumentTransaction) { + const auto atClusterTime = repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime(); + + if (atClusterTime) { + const auto shardKey = + metadata->getShardKeyPattern().extractShardKeyFromDocThrows(it->doc); + MigrationChunkClonerSourceOpObserver::assertIntersectingChunkHasNotMoved( + opCtx, *metadata, shardKey, *atClusterTime); + } + + return; + } + + auto cloner = MigrationSourceManager::getCurrentCloner(*csr); + if (cloner) { + cloner->onInsertOp(opCtx, it->doc, opTime); + } + } +} + +void MigrationChunkClonerSourceOpObserver::onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { + // Take ownership of ShardingWriteRouter attached to the op accumulator by OpObserverImpl. + // Release upon return from this function because this resource is not needed by downstream + // OpObserver instances. + // If there's no ShardingWriteRouter instance available, it means that OpObserverImpl did not + // get far enough to require one so there's nothing to do here but return early. + auto shardingWriteRouter = + std::move(shardingWriteRouterOpStateAccumulatorDecoration(opAccumulator)); + if (!shardingWriteRouter) { + return; + } + + if (args.updateArgs->source == OperationSource::kFromMigrate) { + return; + } + + if (args.updateArgs->update.isEmpty()) { + return; + } + + const auto& nss = args.coll->ns(); + if (nss == NamespaceString::kSessionTransactionsTableNamespace) { + return; + } + + const auto& preImageDoc = args.updateArgs->preImageDoc; + const auto& postImageDoc = args.updateArgs->updatedDoc; + + auto* const css = shardingWriteRouter->getCss(); + css->checkShardVersionOrThrow(opCtx); + DatabaseShardingState::assertMatchingDbVersion(opCtx, nss.dbName()); + + auto* const csr = checked_cast(css); + auto metadata = csr->getCurrentMetadataIfKnown(); + if (!metadata || !metadata->isSharded()) { + MigrationChunkClonerSourceOpObserver::assertNoMovePrimaryInProgress(opCtx, nss); + return; + } + + auto txnParticipant = TransactionParticipant::get(opCtx); + const bool inMultiDocumentTransaction = + txnParticipant && opCtx->writesAreReplicated() && txnParticipant.transactionIsOpen(); + if (inMultiDocumentTransaction) { + if (auto atClusterTime = repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime()) { + const auto shardKey = + metadata->getShardKeyPattern().extractShardKeyFromDocThrows(postImageDoc); + MigrationChunkClonerSourceOpObserver::assertIntersectingChunkHasNotMoved( + opCtx, *metadata, shardKey, *atClusterTime); + } + + return; + } + + auto cloner = MigrationSourceManager::getCurrentCloner(*csr); + if (cloner) { + cloner->onUpdateOp(opCtx, preImageDoc, postImageDoc, opAccumulator->opTime.writeOpTime); + } +} + +void MigrationChunkClonerSourceOpObserver::aboutToDelete(OperationContext* opCtx, + const CollectionPtr& coll, + const BSONObj& docToDelete, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { + const auto& nss = coll->ns(); + getIsMigrating(args) = MigrationSourceManager::isMigrating(opCtx, nss, docToDelete); +} + +void MigrationChunkClonerSourceOpObserver::onDelete(OperationContext* opCtx, + const CollectionPtr& coll, + StmtId stmtId, + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { + if (args.fromMigrate) { + return; + } + + const auto& nss = coll->ns(); + if (nss == NamespaceString::kSessionTransactionsTableNamespace) { + return; + } + + ShardingWriteRouter shardingWriteRouter(opCtx, nss); + auto* const css = shardingWriteRouter.getCss(); + css->checkShardVersionOrThrow(opCtx); + DatabaseShardingState::assertMatchingDbVersion(opCtx, nss.dbName()); + + auto* const csr = checked_cast(css); + auto metadata = csr->getCurrentMetadataIfKnown(); + if (!metadata || !metadata->isSharded()) { + assertNoMovePrimaryInProgress(opCtx, nss); + return; + } + + auto getShardKeyAndId = [&nss](const OplogDeleteEntryArgs& args) -> BSONObj { + auto optDocKey = documentKeyDecoration(args); + invariant(optDocKey, nss.toStringForErrorMsg()); + return optDocKey.value().getShardKeyAndId(); + }; + + auto txnParticipant = TransactionParticipant::get(opCtx); + const bool inMultiDocumentTransaction = + txnParticipant && opCtx->writesAreReplicated() && txnParticipant.transactionIsOpen(); + if (inMultiDocumentTransaction) { + const auto atClusterTime = repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime(); + + if (atClusterTime) { + const auto shardKey = + metadata->getShardKeyPattern().extractShardKeyFromDocumentKeyThrows( + getShardKeyAndId(args)); + assertIntersectingChunkHasNotMoved(opCtx, *metadata, shardKey, *atClusterTime); + } + + return; + } + + auto cloner = MigrationSourceManager::getCurrentCloner(*csr); + if (cloner && getIsMigrating(args)) { + const auto& opTime = opAccumulator->opTime.writeOpTime; + cloner->onDeleteOp(opCtx, getShardKeyAndId(args), opTime); + } +} + +void MigrationChunkClonerSourceOpObserver::onTransactionPrepare( + OperationContext* opCtx, + const std::vector& reservedSlots, + const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + size_t numberOfPrePostImagesToWrite, + Date_t wallClockTime) { + // Return early if we are secondary or in some replication state in which we are not + // appending entries to the oplog. + if (!opCtx->writesAreReplicated()) { + return; + } + + if (reservedSlots.empty()) { + return; + } + + const auto& prepareOpTime = reservedSlots.back(); + invariant(!prepareOpTime.isNull()); + + const auto& statements = transactionOperations.getOperationsForOpObserver(); + + opCtx->recoveryUnit()->registerChange( + std::make_unique( + *opCtx->getLogicalSessionId(), statements, prepareOpTime)); +} + +void MigrationChunkClonerSourceOpObserver::onTransactionPrepareNonPrimary( + OperationContext* opCtx, + const LogicalSessionId& lsid, + const std::vector& statements, + const repl::OpTime& prepareOpTime) { + opCtx->recoveryUnit()->registerChange( + std::make_unique( + lsid, statements, prepareOpTime)); +} + +} // namespace mongo diff --git a/src/mongo/db/s/migration_chunk_cloner_source_op_observer.h b/src/mongo/db/s/migration_chunk_cloner_source_op_observer.h new file mode 100644 index 0000000000000..7ab65a6b8330f --- /dev/null +++ b/src/mongo/db/s/migration_chunk_cloner_source_op_observer.h @@ -0,0 +1,131 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/s/collection_metadata.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/transaction/transaction_operations.h" +#include "mongo/util/time_support.h" + +namespace mongo { + +/** + * OpObserver that forwards operations during migration to the chunk cloner. + * + * Contains logic that used to reside in OpObserverImpl that was extracted in SERVER-36084 + * and placed in OpObserverShardingImpl as privately overridden functions separate from the + * OpObserver methods. + * + * This class replaces OpObserverShardingImpl without deriving directly from OpObserverImpl + * while implementing the standard OpObserver methods. OpObserverShardingImpl was removed in + * SERVER-76271. + * + * See ShardServerOpObserver. + */ +class MigrationChunkClonerSourceOpObserver final : public OpObserverNoop { +public: + /** + * Write operations do shard version checking, but if an update operation runs as part of a + * 'readConcern:snapshot' transaction, the router could have used the metadata at the snapshot + * time and yet set the latest shard version on the request. This is why the write can get + * routed to a shard which no longer owns the chunk being written to. In such cases, throw a + * MigrationConflict exception to indicate that the transaction needs to be rolled-back and + * restarted. + */ + static void assertIntersectingChunkHasNotMoved(OperationContext* opCtx, + const CollectionMetadata& metadata, + const BSONObj& shardKey, + const LogicalTime& atClusterTime); + + /** + * Ensures that there is no movePrimary operation in progress for the given namespace. + */ + static void assertNoMovePrimaryInProgress(OperationContext* opCtx, const NamespaceString& nss); + + void onInserts(OperationContext* opCtx, + const CollectionPtr& coll, + std::vector::const_iterator first, + std::vector::const_iterator last, + std::vector fromMigrate, + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; + + void aboutToDelete(OperationContext* opCtx, + const CollectionPtr& coll, + const BSONObj& docToDelete, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onDelete(OperationContext* opCtx, + const CollectionPtr& coll, + StmtId stmtId, + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onUnpreparedTransactionCommit( + OperationContext* opCtx, + const std::vector& reservedSlots, + const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onTransactionPrepare( + OperationContext* opCtx, + const std::vector& reservedSlots, + const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + size_t numberOfPrePostImagesToWrite, + Date_t wallClockTime) final; + + void onTransactionPrepareNonPrimary(OperationContext* opCtx, + const LogicalSessionId& lsid, + const std::vector& statements, + const repl::OpTime& prepareOpTime) final; +}; + +} // namespace mongo diff --git a/src/mongo/db/s/migration_coordinator.cpp b/src/mongo/db/s/migration_coordinator.cpp index a44be90514a75..0d8d026f30492 100644 --- a/src/mongo/db/s/migration_coordinator.cpp +++ b/src/mongo/db/s/migration_coordinator.cpp @@ -29,17 +29,38 @@ #include "mongo/db/s/migration_coordinator.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/persistent_task_store.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/migration_util.h" #include "mongo/db/s/range_deleter_service.h" #include "mongo/db/s/range_deletion_task_gen.h" #include "mongo/db/s/range_deletion_util.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_mutable.h" +#include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/atomic_word.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kShardingMigration @@ -259,26 +280,6 @@ SharedSemiFuture MigrationCoordinator::_commitMigrationOnDonorAndRecipient deletionTask.setKeyPattern(*_shardKeyPattern); } - // (Ignore FCV check): This feature doesn't have any upgrade/downgrade concerns. The feature - // flag is used to turn on new range deleter on startup. - if (!feature_flags::gRangeDeleterService.isEnabledAndIgnoreFCVUnsafe()) { - LOGV2_DEBUG(23897, - 2, - "Marking range deletion task on donor as ready for processing", - "migrationId"_attr = _migrationInfo.getId()); - migrationutil::markAsReadyRangeDeletionTaskLocally( - opCtx, _migrationInfo.getCollectionUuid(), _migrationInfo.getRange()); - - // At this point the decision cannot be changed and will be recovered in the event of a - // failover, so it is safe to schedule the deletion task after updating the persisted state. - LOGV2_DEBUG(23898, - 2, - "Scheduling range deletion task on donor", - "migrationId"_attr = _migrationInfo.getId()); - - return migrationutil::submitRangeDeletionTask(opCtx, deletionTask).share(); - } - auto waitForActiveQueriesToComplete = [&]() { AutoGetCollection autoColl(opCtx, deletionTask.getNss(), MODE_IS); diff --git a/src/mongo/db/s/migration_coordinator.h b/src/mongo/db/s/migration_coordinator.h index a819524433f4c..4b4f60bfb3d9f 100644 --- a/src/mongo/db/s/migration_coordinator.h +++ b/src/mongo/db/s/migration_coordinator.h @@ -29,10 +29,22 @@ #pragma once +#include + +#include +#include + +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/s/migration_coordinator_document_gen.h" +#include "mongo/db/s/migration_session_id.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_version.h" #include "mongo/util/future.h" +#include "mongo/util/uuid.h" namespace mongo { namespace migrationutil { diff --git a/src/mongo/db/s/migration_destination_manager.cpp b/src/mongo/db/s/migration_destination_manager.cpp index 81a062c24916c..bb70c6e177f63 100644 --- a/src/mongo/db/s/migration_destination_manager.cpp +++ b/src/mongo/db/s/migration_destination_manager.cpp @@ -28,33 +28,58 @@ */ -#include "mongo/db/s/migration_batch_fetcher.h" -#include "mongo/platform/basic.h" - -#include "mongo/db/s/migration_destination_manager.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include #include +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/document_validation.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/delete.h" -#include "mongo/db/ops/write_ops_exec.h" +#include "mongo/db/ops/update_result.h" #include "mongo/db/persistent_task_store.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" -#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/s/migration_batch_fetcher.h" +#include "mongo/db/s/migration_destination_manager.h" #include "mongo/db/s/migration_util.h" #include "mongo/db/s/move_timing_helper.h" #include "mongo/db/s/operation_sharding_state.h" @@ -68,32 +93,55 @@ #include "mongo/db/s/start_chunk_clone_request.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" -#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/db/storage/remove_saver.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/transaction_resources.h" #include "mongo/db/vector_clock.h" #include "mongo/db/write_block_bypass.h" +#include "mongo/db/write_concern.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" -#include "mongo/s/catalog/type_index_catalog.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/catalog_cache_loader.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/s/index_version.h" #include "mongo/s/shard_key_pattern.h" #include "mongo/s/sharding_feature_flags_gen.h" -#include "mongo/stdx/chrono.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/producer_consumer_queue.h" -#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kShardingMigration namespace mongo { namespace { +MONGO_FAIL_POINT_DEFINE(hangMigrationRecipientBeforeWaitingNoIndexBuildInProgress); + const auto getMigrationDestinationManager = ServiceContext::declareDecoration(); @@ -261,7 +309,7 @@ bool opReplicatedEnough(OperationContext* opCtx, */ BSONObj createMigrateCloneRequest(const NamespaceString& nss, const MigrationSessionId& sessionId) { BSONObjBuilder builder; - builder.append("_migrateClone", nss.ns()); + builder.append("_migrateClone", NamespaceStringUtil::serialize(nss)); sessionId.append(&builder); return builder.obj(); } @@ -273,7 +321,7 @@ BSONObj createMigrateCloneRequest(const NamespaceString& nss, const MigrationSes */ BSONObj createTransferModsRequest(const NamespaceString& nss, const MigrationSessionId& sessionId) { BSONObjBuilder builder; - builder.append("_transferMods", nss.ns()); + builder.append("_transferMods", NamespaceStringUtil::serialize(nss)); sessionId.append(&builder); return builder.obj(); } @@ -335,6 +383,7 @@ MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep3); MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep4); MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep5); MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep6); +MONGO_FAIL_POINT_DEFINE(migrateThreadHangAfterSteadyTransition); MONGO_FAIL_POINT_DEFINE(migrateThreadHangAtStep7); MONGO_FAIL_POINT_DEFINE(failMigrationOnRecipient); @@ -435,7 +484,7 @@ void MigrationDestinationManager::report(BSONObjBuilder& b, b.append("sessionId", _sessionId->toString()); } - b.append("ns", _nss.ns()); + b.append("ns", NamespaceStringUtil::serialize(_nss)); b.append("from", _fromShardConnString.toString()); b.append("fromShardId", _fromShard.toString()); b.append("min", _min); @@ -595,11 +644,6 @@ repl::OpTime MigrationDestinationManager::fetchAndApplyBatch( stdx::thread applicationThread{[&] { Client::initThread("batchApplier", opCtx->getServiceContext(), nullptr); - auto client = Client::getCurrent(); - { - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); - } auto executor = Grid::get(opCtx->getServiceContext())->getExecutorPool()->getFixedExecutor(); auto applicationOpCtx = CancelableOperationContext( @@ -830,8 +874,8 @@ MigrationDestinationManager::IndexesAndIdIndex MigrationDestinationManager::getC // Do not hold any locks while issuing remote calls. invariant(!opCtx->lockState()->isLocked()); - auto cmd = nssOrUUID.nss() ? BSON("listIndexes" << nssOrUUID.nss()->coll()) - : BSON("listIndexes" << *nssOrUUID.uuid()); + auto cmd = nssOrUUID.isNamespaceString() ? BSON("listIndexes" << nssOrUUID.nss().coll()) + : BSON("listIndexes" << nssOrUUID.uuid()); if (cri) { cmd = appendShardVersion(cmd, cri->getShardVersion(fromShardId)); } @@ -843,10 +887,9 @@ MigrationDestinationManager::IndexesAndIdIndex MigrationDestinationManager::getC auto indexes = uassertStatusOK( fromShard->runExhaustiveCursorCommand(opCtx, ReadPreferenceSetting(ReadPreference::PrimaryOnly), - nssOrUUID.db().toString(), + DatabaseNameUtil::serialize(nssOrUUID.dbName()), cmd, Milliseconds(-1))); - for (auto&& spec : indexes.docs) { if (spec[IndexDescriptor::kClusteredFieldName]) { // The 'clustered' index is implicitly created upon clustered collection creation. @@ -875,9 +918,9 @@ MigrationDestinationManager::getCollectionOptions(OperationContext* opCtx, BSONObj fromOptions; - auto cmd = nssOrUUID.nss() - ? BSON("listCollections" << 1 << "filter" << BSON("name" << nssOrUUID.nss()->coll())) - : BSON("listCollections" << 1 << "filter" << BSON("info.uuid" << *nssOrUUID.uuid())); + auto cmd = nssOrUUID.isNamespaceString() + ? BSON("listCollections" << 1 << "filter" << BSON("name" << nssOrUUID.nss().coll())) + : BSON("listCollections" << 1 << "filter" << BSON("info.uuid" << nssOrUUID.uuid())); if (cm) { cmd = appendDbVersionIfPresent(cmd, cm->dbVersion()); } @@ -889,15 +932,15 @@ MigrationDestinationManager::getCollectionOptions(OperationContext* opCtx, auto infosRes = uassertStatusOK( fromShard->runExhaustiveCursorCommand(opCtx, ReadPreferenceSetting(ReadPreference::PrimaryOnly), - nssOrUUID.db().toString(), + DatabaseNameUtil::serialize(nssOrUUID.dbName()), cmd, Milliseconds(-1))); auto infos = infosRes.docs; uassert(ErrorCodes::NamespaceNotFound, str::stream() << "expected listCollections against the primary shard for " - << nssOrUUID.toString() << " to return 1 entry, but got " << infos.size() - << " entries", + << nssOrUUID.toStringForErrorMsg() << " to return 1 entry, but got " + << infos.size() << " entries", infos.size() == 1); @@ -918,8 +961,9 @@ MigrationDestinationManager::getCollectionOptions(OperationContext* opCtx, uassert(ErrorCodes::InvalidUUID, str::stream() << "The from shard did not return a UUID for collection " - << nssOrUUID.toString() << " as part of its listCollections response: " - << entry << ", but this node expects to see a UUID.", + << nssOrUUID.toStringForErrorMsg() + << " as part of its listCollections response: " << entry + << ", but this node expects to see a UUID.", !info["uuid"].eoo()); auto fromUUID = info["uuid"].uuid(); @@ -943,10 +987,7 @@ void MigrationDestinationManager::_dropLocalIndexesIfNecessary( if (auto optMetadata = scopedCsr->getCurrentMetadataIfKnown()) { const auto& metadata = *optMetadata; if (metadata.isSharded()) { - auto chunks = metadata.getChunks(); - if (chunks.empty()) { - return true; - } + return !metadata.currentShardHasAnyChunks(); } } return false; @@ -994,28 +1035,44 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions( // Checks that the collection's UUID matches the donor's. auto checkUUIDsMatch = [&](const Collection* collection) { uassert(ErrorCodes::NotWritablePrimary, - str::stream() << "Unable to create collection " << nss.ns() + str::stream() << "Unable to create collection " << nss.toStringForErrorMsg() << " because the node is not primary", repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)); uassert(ErrorCodes::InvalidUUID, str::stream() - << "Cannot create collection " << nss.ns() + << "Cannot create collection " << nss.toStringForErrorMsg() << " because we already have an identically named collection with UUID " << collection->uuid() << ", which differs from the donor's UUID " << collectionOptionsAndIndexes.uuid << ". Manually drop the collection on this shard if it contains data from " "a previous incarnation of " - << nss.ns(), + << nss.toStringForErrorMsg(), collection->uuid() == collectionOptionsAndIndexes.uuid); }; - // Gets the missing indexes and checks if the collection is empty (auto-healing is - // possible). + bool isFirstMigration = [&] { + AutoGetCollection collection(opCtx, nss, MODE_IS); + const auto scopedCsr = + CollectionShardingRuntime::assertCollectionLockedAndAcquireShared(opCtx, nss); + if (auto optMetadata = scopedCsr->getCurrentMetadataIfKnown()) { + const auto& metadata = *optMetadata; + return metadata.isSharded() && !metadata.currentShardHasAnyChunks(); + } + return false; + }(); + + // Check if there are missing indexes on the recipient shard from the donor. + // If it is the first migration, do not consider in-progress index builds. Otherwise, + // consider in-progress index builds as ready. Then, if there are missing indexes and the + // collection is not empty, fail the migration. On the other hand, if the collection is + // empty, wait for index builds to finish if it is the first migration. + bool waitForInProgressIndexBuildCompletion = false; + auto checkEmptyOrGetMissingIndexesFromDonor = [&](const CollectionPtr& collection) { auto indexCatalog = collection->getIndexCatalog(); auto indexSpecs = indexCatalog->removeExistingIndexesNoChecks( - opCtx, collection, collectionOptionsAndIndexes.indexSpecs); + opCtx, collection, collectionOptionsAndIndexes.indexSpecs, !isFirstMigration); if (!indexSpecs.empty()) { // Only allow indexes to be copied if the collection does not have any documents. uassert(ErrorCodes::CannotCreateCollection, @@ -1024,6 +1081,10 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions( << "collection is not empty. Non-trivial " << "index creation should be scheduled manually", collection->isEmpty(opCtx)); + + // If it is the first migration, mark waitForInProgressIndexBuildCompletion as true + // to wait for index builds to be finished after releasing the locks. + waitForInProgressIndexBuildCompletion = isFirstMigration; } return indexSpecs; }; @@ -1041,6 +1102,19 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions( } } + // Before taking the exclusive database lock for cloning the remaining indexes, wait for + // index builds to finish if it is the first migration. + if (waitForInProgressIndexBuildCompletion) { + if (MONGO_unlikely( + hangMigrationRecipientBeforeWaitingNoIndexBuildInProgress.shouldFail())) { + LOGV2(7677900, "Hanging before waiting for in-progress index builds to finish"); + hangMigrationRecipientBeforeWaitingNoIndexBuildInProgress.pauseWhileSet(); + } + + IndexBuildsCoordinator::get(opCtx)->awaitNoIndexBuildInProgressForCollection( + opCtx, collectionOptionsAndIndexes.uuid); + } + // Take the exclusive database lock if the collection does not exist or indexes are missing // (needs auto-heal). AutoGetDb autoDb(opCtx, nss.dbName(), MODE_X); @@ -1055,16 +1129,15 @@ void MigrationDestinationManager::cloneCollectionIndexesAndOptions( opCtx, collectionOptionsAndIndexes.uuid)) { uasserted(5860300, str::stream() - << "Cannot create collection " << nss << " with UUID " - << collectionOptionsAndIndexes.uuid + << "Cannot create collection " << nss.toStringForErrorMsg() + << " with UUID " << collectionOptionsAndIndexes.uuid << " because it conflicts with the UUID of an existing collection " - << collectionByUUID->ns()); + << collectionByUUID->ns().toStringForErrorMsg()); } - // We do not have a collection by this name. Create the collection with the donor's - // options. + // We do not have a collection by this name. Create it with the donor's options. OperationShardingState::ScopedAllowImplicitCollectionCreate_UNSAFE - unsafeCreateCollection(opCtx); + unsafeCreateCollection(opCtx, /* forceCSRAsUnknownAfterCollectionCreation */ true); WriteUnitOfWork wuow(opCtx); CollectionOptions collectionOptions = uassertStatusOK( CollectionOptions::parse(collectionOptionsAndIndexes.options, @@ -1097,11 +1170,6 @@ void MigrationDestinationManager::_migrateThread(CancellationToken cancellationT Client::initThread("migrateThread"); auto client = Client::getCurrent(); - { - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); - } - bool recovering = false; while (true) { const auto executor = @@ -1191,8 +1259,15 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx, boost::optional timeInCriticalSection; if (!skipToCritSecTaken) { - timing.emplace( - outerOpCtx, "to", _nss.ns(), _min, _max, 8 /* steps */, &_errmsg, _toShard, _fromShard); + timing.emplace(outerOpCtx, + "to", + NamespaceStringUtil::serialize(_nss), + _min, + _max, + 8 /* steps */, + &_errmsg, + _toShard, + _fromShard); LOGV2( 22000, @@ -1263,13 +1338,17 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx, range, rangeDeletionWaitDeadline); - if (!status.isOK()) { + if (!status.isOK() && status != ErrorCodes::ExceededTimeLimit) { _setStateFail(redact(status.toString())); return; } - uassert(ErrorCodes::ExceededTimeLimit, - "Exceeded deadline waiting for overlapping range deletion to finish", + uassert( + ErrorCodes::ExceededTimeLimit, + "Migration failed because the orphans cleanup routine didn't clear yet a portion " + "of the range being migrated that was previously owned by the recipient " + "shard.", + status != ErrorCodes::ExceededTimeLimit && outerOpCtx->getServiceContext()->getFastClockSource()->now() < rangeDeletionWaitDeadline); @@ -1295,11 +1374,6 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx, outerOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); { auto newClient = outerOpCtx->getServiceContext()->makeClient("MigrationCoordinator"); - { - stdx::lock_guard lk(*newClient.get()); - newClient->setSystemOperationKillableByStepdown(lk); - } - AlternativeClientRegion acr(newClient); auto executor = Grid::get(outerOpCtx->getServiceContext())->getExecutorPool()->getFixedExecutor(); @@ -1360,10 +1434,6 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx, } auto newClient = outerOpCtx->getServiceContext()->makeClient("MigrationCoordinator"); - { - stdx::lock_guard lk(*newClient.get()); - newClient->setSystemOperationKillableByStepdown(lk); - } AlternativeClientRegion acr(newClient); auto executor = Grid::get(outerOpCtx->getServiceContext())->getExecutorPool()->getFixedExecutor(); @@ -1440,6 +1510,8 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx, if (!_applyMigrateOp(opCtx, nextBatch)) { return true; } + ShardingStatistics::get(opCtx).countBytesClonedOnCatchUpOnRecipient.addAndFetch( + nextBatch["size"].number()); const int maxIterations = 3600 * 50; @@ -1518,6 +1590,7 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx, { // 6. Wait for commit _setState(kSteady); + migrateThreadHangAfterSteadyTransition.pauseWhileSet(); bool transferAfterCommit = false; while (getState() == kSteady || getState() == kCommitStart) { @@ -1545,7 +1618,8 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx, auto mods = res.response; - if (mods["size"].number() > 0 && _applyMigrateOp(opCtx, mods)) { + if (mods["size"].number() > 0) { + (void)_applyMigrateOp(opCtx, mods); lastOpApplied = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); continue; } @@ -1631,10 +1705,6 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx, } else { outerOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); auto newClient = outerOpCtx->getServiceContext()->makeClient("MigrationCoordinator"); - { - stdx::lock_guard lk(*newClient.get()); - newClient->setSystemOperationKillableByStepdown(lk); - } AlternativeClientRegion acr(newClient); auto executor = Grid::get(outerOpCtx->getServiceContext())->getExecutorPool()->getFixedExecutor(); @@ -1664,10 +1734,6 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx, outerOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); auto newClient = outerOpCtx->getServiceContext()->makeClient("MigrationCoordinator"); - { - stdx::lock_guard lk(*newClient.get()); - newClient->setSystemOperationKillableByStepdown(lk); - } AlternativeClientRegion acr(newClient); auto executor = Grid::get(outerOpCtx->getServiceContext())->getExecutorPool()->getFixedExecutor(); @@ -1695,21 +1761,24 @@ void MigrationDestinationManager::_migrateDriver(OperationContext* outerOpCtx, bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const BSONObj& xfer) { bool didAnything = false; long long changeInOrphans = 0; + long long totalDocs = 0; // Deleted documents if (xfer["deleted"].isABSONObj()) { - boost::optional rs; - if (serverGlobalParams.moveParanoia) { - rs.emplace("moveChunk", _nss.ns(), "removedDuring"); - } - BSONObjIterator i(xfer["deleted"].Obj()); while (i.more()) { - AutoGetCollection autoColl(opCtx, _nss, MODE_IX); + totalDocs++; + const auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(_nss, + AcquisitionPrerequisites::kPretendUnsharded, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); uassert(ErrorCodes::ConflictingOperationInProgress, - str::stream() << "Collection " << _nss.ns() + str::stream() << "Collection " << _nss.toStringForErrorMsg() << " was dropped in the middle of the migration", - autoColl.getCollection()); + collection.exists()); BSONObj id = i.next().Obj(); @@ -1724,14 +1793,9 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const } } - if (rs) { - uassertStatusOK(rs->goingToDelete(fullObj)); - } - - writeConflictRetry(opCtx, "transferModsDeletes", _nss.ns(), [&] { + writeConflictRetry(opCtx, "transferModsDeletes", _nss, [&] { deleteObjects(opCtx, - autoColl.getCollection(), - _nss, + collection, id, true /* justOne */, false /* god */, @@ -1747,11 +1811,18 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const if (xfer["reload"].isABSONObj()) { BSONObjIterator i(xfer["reload"].Obj()); while (i.more()) { - AutoGetCollection autoColl(opCtx, _nss, MODE_IX); + totalDocs++; + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(_nss, + AcquisitionPrerequisites::kPretendUnsharded, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); uassert(ErrorCodes::ConflictingOperationInProgress, - str::stream() << "Collection " << _nss.ns() + str::stream() << "Collection " << _nss.toStringForErrorMsg() << " was dropped in the middle of the migration", - autoColl.getCollection()); + collection.exists()); BSONObj updatedDoc = i.next().Obj(); @@ -1780,8 +1851,8 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const } // We are in write lock here, so sure we aren't killing - writeConflictRetry(opCtx, "transferModsUpdates", _nss.ns(), [&] { - auto res = Helpers::upsert(opCtx, _nss, updatedDoc, true); + writeConflictRetry(opCtx, "transferModsUpdates", _nss, [&] { + auto res = Helpers::upsert(opCtx, collection, updatedDoc, true); if (!res.upsertedId.isEmpty()) { changeInOrphans++; } @@ -1794,6 +1865,9 @@ bool MigrationDestinationManager::_applyMigrateOp(OperationContext* opCtx, const if (changeInOrphans != 0) { persistUpdatedNumOrphans(opCtx, *_collectionUuid, ChunkRange(_min, _max), changeInOrphans); } + + ShardingStatistics::get(opCtx).countDocsClonedOnCatchUpOnRecipient.addAndFetch(totalDocs); + return didAnything; } diff --git a/src/mongo/db/s/migration_destination_manager.h b/src/mongo/db/s/migration_destination_manager.h index 49d1daa0eb88d..be1fc6f137f69 100644 --- a/src/mongo/db/s/migration_destination_manager.h +++ b/src/mongo/db/s/migration_destination_manager.h @@ -29,35 +29,54 @@ #pragma once +#include +#include +#include #include #include +#include +#include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/connection_string.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replica_set_aware_service.h" #include "mongo/db/s/active_migrations_registry.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/migration_batch_fetcher.h" +#include "mongo/db/s/migration_batch_inserter.h" #include "mongo/db/s/migration_recipient_recovery_document_gen.h" #include "mongo/db/s/migration_session_id.h" #include "mongo/db/s/session_catalog_migration_destination.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/shard_id.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/chunk_manager.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" #include "mongo/util/cancellation.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/timer.h" +#include "mongo/util/uuid.h" namespace mongo { class OperationContext; + class StartChunkCloneRequest; class Status; struct WriteConcernOptions; diff --git a/src/mongo/db/s/migration_destination_manager_commands.cpp b/src/mongo/db/s/migration_destination_manager_commands.cpp index 6e289ccf0cd64..402db1409d6fc 100644 --- a/src/mongo/db/s/migration_destination_manager_commands.cpp +++ b/src/mongo/db/s/migration_destination_manager_commands.cpp @@ -27,24 +27,55 @@ * it in the license file. */ -#include "mongo/db/auth/action_set.h" +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/active_migrations_registry.h" #include "mongo/db/s/chunk_move_write_concern_options.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/migration_destination_manager.h" +#include "mongo/db/s/migration_session_id.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/start_chunk_clone_request.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" -#include "mongo/s/request_types/migration_secondary_throttle_options.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/s/stale_exception.h" #include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/future.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -90,11 +121,11 @@ class RecvChunkStartCommand : public ErrmsgCommandDeprecated { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::internal)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } @@ -117,7 +148,7 @@ class RecvChunkStartCommand : public ErrmsgCommandDeprecated { opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands()); - auto nss = NamespaceString(parseNs({boost::none, dbname}, cmdObj)); + auto nss = parseNs(DatabaseNameUtil::deserialize(boost::none, dbname), cmdObj); auto cloneRequest = uassertStatusOK(StartChunkCloneRequest::createFromCommand(nss, cmdObj)); @@ -196,11 +227,11 @@ class RecvChunkStatusCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::internal)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } @@ -245,11 +276,11 @@ class RecvChunkCommitCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::internal)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } @@ -303,11 +334,11 @@ class RecvChunkAbortCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::internal)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } @@ -365,11 +396,11 @@ class RecvChunkReleaseCritSecCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::internal)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/db/s/migration_destination_manager_test.cpp b/src/mongo/db/s/migration_destination_manager_test.cpp index bd2968cbe6ed7..320f4ef539221 100644 --- a/src/mongo/db/s/migration_destination_manager_test.cpp +++ b/src/mongo/db/s/migration_destination_manager_test.cpp @@ -27,11 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/s/migration_destination_manager.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/catalog_cache_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -196,7 +211,8 @@ TEST_F(MigrationDestinationManagerNetworkTest, << BSON("v" << 2 << "key" << BSON("_id" << 1) << "name" << "_id_"))}; - std::string listCollectionsNs = str::stream() << nss.db() << "$cmd.listCollections"; + std::string listCollectionsNs = str::stream() + << nss.db_forTest() << "$cmd.listCollections"; return BSON( "ok" << 1 << "cursor" << BSON("id" << 0LL << "ns" << listCollectionsNs << "firstBatch" << colls)); @@ -218,8 +234,9 @@ TEST_F(MigrationDestinationManagerNetworkTest, const std::vector indexes = {BSON("v" << 2 << "key" << BSON("_id" << 1) << "name" << "_id_")}; - return BSON("ok" << 1 << "cursor" - << BSON("id" << 0LL << "ns" << nss.ns() << "firstBatch" << indexes)); + return BSON( + "ok" << 1 << "cursor" + << BSON("id" << 0LL << "ns" << nss.ns_forTest() << "firstBatch" << indexes)); }); }); diff --git a/src/mongo/db/s/migration_session_id.cpp b/src/mongo/db/s/migration_session_id.cpp index 7049a0870cf53..c41e4af06049a 100644 --- a/src/mongo/db/s/migration_session_id.cpp +++ b/src/mongo/db/s/migration_session_id.cpp @@ -27,14 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/s/migration_session_id.h" +#include +#include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/oid.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/db/s/migration_session_id.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/s/migration_session_id_test.cpp b/src/mongo/db/s/migration_session_id_test.cpp index 545c3e34bac0b..c15011b8e8c90 100644 --- a/src/mongo/db/s/migration_session_id_test.cpp +++ b/src/mongo/db/s/migration_session_id_test.cpp @@ -27,14 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/migration_session_id.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/s/migration_session_id.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/migration_source_manager.cpp b/src/mongo/db/s/migration_source_manager.cpp index e56f7effee8aa..332a404a6e60d 100644 --- a/src/mongo/db/s/migration_source_manager.cpp +++ b/src/mongo/db/s/migration_source_manager.cpp @@ -29,36 +29,84 @@ #include "mongo/db/s/migration_source_manager.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/keypattern.h" #include "mongo/db/operation_context.h" +#include "mongo/db/persistent_task_store.h" #include "mongo/db/read_concern.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/auto_split_vector.h" #include "mongo/db/s/chunk_operation_precondition_checks.h" #include "mongo/db/s/commit_chunk_migration_gen.h" #include "mongo/db/s/migration_chunk_cloner_source.h" #include "mongo/db/s/migration_coordinator.h" +#include "mongo/db/s/migration_coordinator_document_gen.h" #include "mongo/db/s/migration_util.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/shard_metadata_util.h" #include "mongo/db/s/sharding_logging.h" #include "mongo/db/s/sharding_runtime_d_params_gen.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/db/s/sharding_state_recovery.h" #include "mongo/db/s/sharding_statistics.h" #include "mongo/db/s/type_shard_collection.h" +#include "mongo/db/s/type_shard_collection_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" -#include "mongo/db/vector_clock.h" -#include "mongo/db/vector_clock_mutable.h" +#include "mongo/db/write_concern.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog_cache_loader.h" +#include "mongo/s/chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" #include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kShardingMigration @@ -143,6 +191,17 @@ std::shared_ptr MigrationSourceManager::getCurrentCl return msm->_cloneDriver; } +// static +bool MigrationSourceManager::isMigrating(OperationContext* opCtx, + NamespaceString const& nss, + BSONObj const& docToDelete) { + const auto scopedCsr = + CollectionShardingRuntime::assertCollectionLockedAndAcquireShared(opCtx, nss); + auto cloner = MigrationSourceManager::getCurrentCloner(*scopedCsr); + + return cloner && cloner->isDocumentInMigratingChunk(docToDelete); +} + MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx, ShardsvrMoveRange&& request, WriteConcernOptions&& writeConcern, @@ -160,7 +219,7 @@ MigrationSourceManager::MigrationSourceManager(OperationContext* opCtx, << _args.getToShard())), _moveTimingHelper(_opCtx, "from", - _args.getCommandParameter().ns(), + NamespaceStringUtil::serialize(_args.getCommandParameter()), _args.getMin(), _args.getMax(), 6, // Total number of steps @@ -294,7 +353,7 @@ void MigrationSourceManager::startClone() { uassertStatusOK(ShardingLogging::get(_opCtx)->logChangeChecked( _opCtx, "moveChunk.start", - nss().ns(), + NamespaceStringUtil::serialize(nss()), BSON("min" << *_args.getMin() << "max" << *_args.getMax() << "from" << _args.getFromShard() << "to" << _args.getToShard()), ShardingCatalogClient::kMajorityWriteConcern)); @@ -389,16 +448,13 @@ void MigrationSourceManager::enterCriticalSection() { if (!metadata.getChunkManager()->getVersion(_args.getToShard()).isSet()) { migrationutil::notifyChangeStreamsOnRecipientFirstChunk( _opCtx, nss(), _args.getFromShard(), _args.getToShard(), _collectionUUID); - } - // Mark the shard as running critical operation, which requires recovery on crash. - // - // NOTE: The 'migrateChunkToNewShard' oplog message written by the above call to - // 'notifyChangeStreamsOnRecipientFirstChunk' depends on this majority write to carry its local - // write to majority committed. - // TODO (SERVER-60110): Remove once 7.0 becomes last LTS. - uassertStatusOKWithContext(ShardingStateRecovery_DEPRECATED::startMetadataOp(_opCtx), - "Start metadata op"); + // Wait for the above 'migrateChunkToNewShard' oplog message to be majority acknowledged. + WriteConcernResult ignoreResult; + auto latestOpTime = repl::ReplClientInfo::forClient(_opCtx->getClient()).getLastOp(); + uassertStatusOK(waitForWriteConcern( + _opCtx, latestOpTime, WriteConcerns::kMajorityWriteConcernNoTimeout, &ignoreResult)); + } LOGV2_DEBUG_OPTIONS(4817402, 2, @@ -419,7 +475,7 @@ void MigrationSourceManager::enterCriticalSection() { uassertStatusOKWithContext( shardmetadatautil::updateShardCollectionsEntry( _opCtx, - BSON(ShardCollectionType::kNssFieldName << nss().ns()), + BSON(ShardCollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss())), BSON("$inc" << BSON(ShardCollectionType::kEnterCriticalSectionCounterFieldName << 1)), false /*upsert*/), "Persist critical section signal for secondaries"); @@ -609,7 +665,7 @@ void MigrationSourceManager::commitChunkMetadataOnConfig() { ShardingLogging::get(_opCtx)->logChange( _opCtx, "moveChunk.commit", - nss().ns(), + NamespaceStringUtil::serialize(nss()), BSON("min" << *_args.getMin() << "max" << *_args.getMax() << "from" << _args.getFromShard() << "to" << _args.getToShard() << "counts" << *_recipientCloneCounts), ShardingCatalogClient::kMajorityWriteConcern); @@ -617,8 +673,8 @@ void MigrationSourceManager::commitChunkMetadataOnConfig() { const ChunkRange range(*_args.getMin(), *_args.getMax()); std::string orphanedRangeCleanUpErrMsg = str::stream() - << "Moved chunks successfully but failed to clean up " << nss() << " range " - << redact(range.toString()) << " due to: "; + << "Moved chunks successfully but failed to clean up " << nss().toStringForErrorMsg() + << " range " << redact(range.toString()) << " due to: "; if (_args.getWaitForDelete()) { LOGV2(22019, @@ -652,7 +708,7 @@ void MigrationSourceManager::_cleanupOnError() noexcept { ShardingLogging::get(_opCtx)->logChange( _opCtx, "moveChunk.error", - _args.getCommandParameter().ns(), + NamespaceStringUtil::serialize(_args.getCommandParameter()), BSON("min" << *_args.getMin() << "max" << *_args.getMax() << "from" << _args.getFromShard() << "to" << _args.getToShard()), ShardingCatalogClient::kMajorityWriteConcern); @@ -743,10 +799,6 @@ void MigrationSourceManager::_cleanup(bool completeMigration) noexcept { } auto newClient = _opCtx->getServiceContext()->makeClient("MigrationCoordinator"); - { - stdx::lock_guard lk(*newClient.get()); - newClient->setSystemOperationKillableByStepdown(lk); - } AlternativeClientRegion acr(newClient); auto newOpCtxPtr = cc().makeOperationContext(); auto newOpCtx = newOpCtxPtr.get(); @@ -754,24 +806,15 @@ void MigrationSourceManager::_cleanup(bool completeMigration) noexcept { if (_state >= kCriticalSection && _state <= kCommittingOnConfig) { _stats.totalCriticalSectionTimeMillis.addAndFetch(_cloneAndCommitTimer.millis()); - // NOTE: The order of the operations below is important and the comments explain the - // reasoning behind it. - // // Wait for the updates to the cache of the routing table to be fully written to - // disk before clearing the 'minOpTime recovery' document. This way, we ensure that - // all nodes from a shard, which donated a chunk will always be at the placement - // version of the last migration it performed. + // disk. This way, we ensure that all nodes from a shard which donated a chunk will + // always be at the placement version of the last migration it performed. // // If the metadata is not persisted before clearing the 'inMigration' flag below, it // is possible that the persisted metadata is rolled back after step down, but the // write which cleared the 'inMigration' flag is not, a secondary node will report // itself at an older placement version. CatalogCacheLoader::get(newOpCtx).waitForCollectionFlush(newOpCtx, nss()); - - // Clear the 'minOpTime recovery' document so that the next time a node from this - // shard becomes a primary, it won't have to recover the config server optime. - // TODO (SERVER-60110): Remove once 7.0 becomes last LTS. - ShardingStateRecovery_DEPRECATED::endMetadataOp(newOpCtx); } if (completeMigration) { // This can be called on an exception path after the OperationContext has been diff --git a/src/mongo/db/s/migration_source_manager.h b/src/mongo/db/s/migration_source_manager.h index 3baf05e40c60a..276cb56f42a9d 100644 --- a/src/mongo/db/s/migration_source_manager.h +++ b/src/mongo/db/s/migration_source_manager.h @@ -29,15 +29,31 @@ #pragma once +#include +#include #include +#include +#include +#include +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" #include "mongo/client/connection_string.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/migration_chunk_cloner_source.h" #include "mongo/db/s/migration_coordinator.h" #include "mongo/db/s/move_timing_helper.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/request_types/move_range_request_gen.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/timer.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -86,6 +102,15 @@ class MigrationSourceManager { static std::shared_ptr getCurrentCloner( const CollectionShardingRuntime& csr); + /** + * Returns true if the document being deleted belongs to a chunk which, while still in the + * shard, is being migrated out. (Not to be confused with "fromMigrate", which tags + * operations that are steps in performing the migration.) + */ + static bool isMigrating(OperationContext* opCtx, + NamespaceString const& nss, + BSONObj const& docToDelete); + /** * Instantiates a new migration source manager with the specified migration parameters. Must be * called with the distributed lock acquired in advance (not asserted). @@ -176,6 +201,17 @@ class MigrationSourceManager { return _args.getCommandParameter(); } + boost::optional getMigrationId() { + if (_coordinator) { + return _coordinator->getMigrationId(); + } + return boost::none; + } + + long long getOpTimeMillis() { + return _entireOpTimer.millis(); + } + private: // Used to track the current state of the source manager. See the methods above, which have // comments explaining the various state transitions. diff --git a/src/mongo/db/s/migration_util.cpp b/src/mongo/db/s/migration_util.cpp index 6106752b5c5ef..868b0a56131d5 100644 --- a/src/mongo/db/s/migration_util.cpp +++ b/src/mongo/db/s/migration_util.cpp @@ -29,45 +29,85 @@ #include "mongo/db/s/migration_util.h" +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/catalog/collection_catalog_helper.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/client/read_preference.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/ops/write_ops.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/active_migrations_registry.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/migration_coordinator.h" #include "mongo/db/s/migration_destination_manager.h" -#include "mongo/db/s/range_deletion_util.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" -#include "mongo/db/s/sharding_runtime_d_params_gen.h" -#include "mongo/db/s/sharding_state.h" #include "mongo/db/s/sharding_statistics.h" -#include "mongo/db/session/logical_session_cache.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/vector_clock_mutable.h" -#include "mongo/db/write_concern.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/ensure_chunk_version_is_greater_than_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_name.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/exit.h" -#include "mongo/util/future_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kShardingMigration @@ -171,7 +211,7 @@ void sendWriteCommandToRecipient(OperationContext* opCtx, auto response = recipientShard->runCommandWithFixedRetryAttempts( opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - cmd.getDbName().toString(), + DatabaseNameUtil::serialize(cmd.getDbName()), cmdBSON, Shard::RetryPolicy::kIdempotent); @@ -220,12 +260,6 @@ void retryIdempotentWorkAsPrimaryUntilSuccessOrStepdown( try { auto newClient = opCtx->getServiceContext()->makeClient(newClientName); - - { - stdx::lock_guard lk(*newClient.get()); - newClient->setSystemOperationKillableByStepdown(lk); - } - auto newOpCtx = newClient->makeOperationContext(); AlternativeClientRegion altClient(newClient); @@ -342,7 +376,7 @@ BSONObjBuilder _makeMigrationStatusDocumentCommon(const NamespaceString& nss, builder.append(kDestinationShard, toShard.toString()); builder.append(kIsDonorShard, isDonorShard); builder.append(kChunk, BSON(ChunkType::min(min) << ChunkType::max(max))); - builder.append(kCollection, nss.ns()); + builder.append(kCollection, NamespaceStringUtil::serialize(nss)); return builder; } @@ -445,217 +479,6 @@ bool deletionTaskUuidMatchesFilteringMetadataUuid( optCollDescr->uuidMatches(deletionTask.getCollectionUuid()); } -ExecutorFuture cleanUpRange(ServiceContext* serviceContext, - const std::shared_ptr& executor, - const RangeDeletionTask& deletionTask) { - return AsyncTry([=]() mutable { - ThreadClient tc(kRangeDeletionThreadName, serviceContext); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } - auto uniqueOpCtx = tc->makeOperationContext(); - auto opCtx = uniqueOpCtx.get(); - opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); - - const auto dbName = deletionTask.getNss().dbName(); - const auto collectionUuid = deletionTask.getCollectionUuid(); - - while (true) { - boost::optional optNss; - try { - // Holding the locks while enqueueing the task protects against possible - // concurrent cleanups of the filtering metadata, that be serialized - AutoGetCollection autoColl( - opCtx, NamespaceStringOrUUID{dbName, collectionUuid}, MODE_IS); - optNss.emplace(autoColl.getNss()); - auto scopedCsr = - CollectionShardingRuntime::assertCollectionLockedAndAcquireShared( - opCtx, *optNss); - auto optCollDescr = scopedCsr->getCurrentMetadataIfKnown(); - - if (optCollDescr) { - uassert(ErrorCodes:: - RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist, - str::stream() << "Filtering metadata for " << *optNss - << (optCollDescr->isSharded() - ? " has UUID that does not match UUID of " - "the deletion task" - : " is unsharded"), - deletionTaskUuidMatchesFilteringMetadataUuid( - opCtx, optCollDescr, deletionTask)); - - LOGV2(6955500, - "Submitting range deletion task", - "deletionTask"_attr = redact(deletionTask.toBSON())); - - const auto whenToClean = - deletionTask.getWhenToClean() == CleanWhenEnum::kNow - ? CollectionShardingRuntime::kNow - : CollectionShardingRuntime::kDelayed; - - return scopedCsr->cleanUpRange(deletionTask.getRange(), whenToClean); - } - } catch (ExceptionFor&) { - uasserted( - ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist, - str::stream() << "Collection has been dropped since enqueuing this " - "range deletion task: " - << deletionTask.toBSON()); - } - - - refreshFilteringMetadataUntilSuccess(opCtx, *optNss); - } - }) - .until([](Status status) mutable { - // Resubmit the range for deletion on a RangeOverlapConflict error. - return status != ErrorCodes::RangeOverlapConflict; - }) - .withBackoffBetweenIterations(kExponentialBackoff) - .on(executor, CancellationToken::uncancelable()); -} - -ExecutorFuture submitRangeDeletionTask(OperationContext* opCtx, - const RangeDeletionTask& deletionTask) { - const auto serviceContext = opCtx->getServiceContext(); - auto executor = getMigrationUtilExecutor(serviceContext); - return ExecutorFuture(executor) - .then([=] { - ThreadClient tc(kRangeDeletionThreadName, serviceContext); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } - - uassert( - ErrorCodes::ResumableRangeDeleterDisabled, - str::stream() - << "Not submitting range deletion task " << redact(deletionTask.toBSON()) - << " because the disableResumableRangeDeleter server parameter is set to true", - !disableResumableRangeDeleter.load()); - - return AsyncTry([=]() { - return cleanUpRange(serviceContext, executor, deletionTask) - .onError([=](Status status) { - ThreadClient tc(kRangeDeletionThreadName, serviceContext); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } - auto uniqueOpCtx = tc->makeOperationContext(); - uniqueOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); - - LOGV2(55557, - "cleanUpRange failed due to keyPattern shorter than range " - "deletion bounds. Refreshing collection metadata to retry.", - logAttrs(deletionTask.getNss()), - "status"_attr = redact(status)); - - onCollectionPlacementVersionMismatch( - uniqueOpCtx.get(), deletionTask.getNss(), boost::none); - - return status; - }); - }) - .until( - [](Status status) { return status != ErrorCodes::KeyPatternShorterThanBound; }) - .on(executor, CancellationToken::uncancelable()); - }) - .onError([=](const Status status) { - ThreadClient tc(kRangeDeletionThreadName, serviceContext); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } - auto uniqueOpCtx = tc->makeOperationContext(); - auto opCtx = uniqueOpCtx.get(); - - LOGV2(22027, - "Failed to submit range deletion task", - "deletionTask"_attr = redact(deletionTask.toBSON()), - "error"_attr = redact(status), - "migrationId"_attr = deletionTask.getId()); - - if (status == ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist) { - deleteRangeDeletionTaskLocally(opCtx, - deletionTask.getCollectionUuid(), - deletionTask.getRange(), - ShardingCatalogClient::kLocalWriteConcern); - } - - // Note, we use onError and make it return its input status, because ExecutorFuture does - // not support tapError. - return status; - }); -} - -void submitPendingDeletions(OperationContext* opCtx) { - PersistentTaskStore store(NamespaceString::kRangeDeletionNamespace); - - auto query = BSON("pending" << BSON("$exists" << false)); - - store.forEach(opCtx, query, [&opCtx](const RangeDeletionTask& deletionTask) { - migrationutil::submitRangeDeletionTask(opCtx, deletionTask).getAsync([](auto) {}); - return true; - }); -} - -void resubmitRangeDeletionsOnStepUp(ServiceContext* serviceContext) { - LOGV2(22028, "Starting pending deletion submission thread."); - - ExecutorFuture(getMigrationUtilExecutor(serviceContext)) - .then([serviceContext] { - ThreadClient tc("ResubmitRangeDeletions", serviceContext); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } - - auto opCtx = tc->makeOperationContext(); - opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); - - DBDirectClient client(opCtx.get()); - FindCommandRequest findCommand(NamespaceString::kRangeDeletionNamespace); - findCommand.setFilter(BSON(RangeDeletionTask::kProcessingFieldName << true)); - auto cursor = client.find(std::move(findCommand)); - - auto retFuture = ExecutorFuture(getMigrationUtilExecutor(serviceContext)); - - int rangeDeletionsMarkedAsProcessing = 0; - while (cursor->more()) { - retFuture = migrationutil::submitRangeDeletionTask( - opCtx.get(), - RangeDeletionTask::parse(IDLParserContext("rangeDeletionRecovery"), - cursor->next())); - rangeDeletionsMarkedAsProcessing++; - } - - if (rangeDeletionsMarkedAsProcessing > 1) { - LOGV2_WARNING( - 6695800, - "Rescheduling several range deletions marked as processing. Orphans count " - "may be off while they are not drained", - "numRangeDeletionsMarkedAsProcessing"_attr = rangeDeletionsMarkedAsProcessing); - } - - return retFuture; - }) - .then([serviceContext] { - ThreadClient tc("ResubmitRangeDeletions", serviceContext); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } - - auto opCtx = tc->makeOperationContext(); - opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); - - submitPendingDeletions(opCtx.get()); - }) - .getAsync([](auto) {}); -} - void persistMigrationCoordinatorLocally(OperationContext* opCtx, const MigrationCoordinatorDocument& migrationDoc) { PersistentTaskStore store( @@ -745,28 +568,27 @@ void notifyChangeStreamsOnRecipientFirstChunk(OperationContext* opCtx, // The message expected by change streams const auto o2Message = - BSON("migrateChunkToNewShard" << collNss.toString() << "fromShardId" << fromShardId - << "toShardId" << toShardId); + BSON("migrateChunkToNewShard" << NamespaceStringUtil::serialize(collNss) << "fromShardId" + << fromShardId << "toShardId" << toShardId); auto const serviceContext = opCtx->getClient()->getServiceContext(); // TODO (SERVER-71444): Fix to be interruptible or document exception. UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); - writeConflictRetry( - opCtx, "migrateChunkToNewShard", NamespaceString::kRsOplogNamespace.ns(), [&] { - WriteUnitOfWork uow(opCtx); - serviceContext->getOpObserver()->onInternalOpMessage(opCtx, - collNss, - *collUUID, - BSON("msg" << dbgMessage), - o2Message, - boost::none, - boost::none, - boost::none, - boost::none); - uow.commit(); - }); + writeConflictRetry(opCtx, "migrateChunkToNewShard", NamespaceString::kRsOplogNamespace, [&] { + WriteUnitOfWork uow(opCtx); + serviceContext->getOpObserver()->onInternalOpMessage(opCtx, + collNss, + *collUUID, + BSON("msg" << dbgMessage), + o2Message, + boost::none, + boost::none, + boost::none, + boost::none); + uow.commit(); + }); } void notifyChangeStreamsOnDonorLastChunk(OperationContext* opCtx, @@ -775,31 +597,32 @@ void notifyChangeStreamsOnDonorLastChunk(OperationContext* opCtx, boost::optional collUUID) { const std::string oMessage = str::stream() - << "Migrate the last chunk for " << collNss << " off shard " << donorShardId; + << "Migrate the last chunk for " << collNss.toStringForErrorMsg() << " off shard " + << donorShardId; // The message expected by change streams const auto o2Message = - BSON("migrateLastChunkFromShard" << collNss.toString() << "shardId" << donorShardId); + BSON("migrateLastChunkFromShard" << NamespaceStringUtil::serialize(collNss) << "shardId" + << donorShardId); auto const serviceContext = opCtx->getClient()->getServiceContext(); // TODO (SERVER-71444): Fix to be interruptible or document exception. UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); - writeConflictRetry( - opCtx, "migrateLastChunkFromShard", NamespaceString::kRsOplogNamespace.ns(), [&] { - WriteUnitOfWork uow(opCtx); - serviceContext->getOpObserver()->onInternalOpMessage(opCtx, - collNss, - *collUUID, - BSON("msg" << oMessage), - o2Message, - boost::none, - boost::none, - boost::none, - boost::none); - uow.commit(); - }); + writeConflictRetry(opCtx, "migrateLastChunkFromShard", NamespaceString::kRsOplogNamespace, [&] { + WriteUnitOfWork uow(opCtx); + serviceContext->getOpObserver()->onInternalOpMessage(opCtx, + collNss, + *collUUID, + BSON("msg" << oMessage), + o2Message, + boost::none, + boost::none, + boost::none, + boost::none); + uow.commit(); + }); } void persistCommitDecision(OperationContext* opCtx, @@ -814,6 +637,7 @@ void persistCommitDecision(OperationContext* opCtx, store.update(opCtx, BSON(MigrationCoordinatorDocument::kIdFieldName << migrationDoc.getId()), migrationDoc.toBSON()); + ShardingStatistics::get(opCtx).countDonorMoveChunkCommitted.addAndFetch(1); } catch (const ExceptionFor&) { LOGV2_ERROR(6439800, "No coordination doc found on disk for migration", @@ -837,6 +661,7 @@ void persistAbortDecision(OperationContext* opCtx, store.update(opCtx, BSON(MigrationCoordinatorDocument::kIdFieldName << migrationDoc.getId()), migrationDoc.toBSON()); + ShardingStatistics::get(opCtx).countDonorMoveChunkAborted.addAndFetch(1); } catch (const ExceptionFor&) { LOGV2(6439801, "No coordination doc found on disk for migration", @@ -1042,7 +867,7 @@ void recoverMigrationCoordinations(OperationContext* opCtx, NamespaceString::kMigrationCoordinatorsNamespace); store.forEach( opCtx, - BSON(MigrationCoordinatorDocument::kNssFieldName << nss.toString()), + BSON(MigrationCoordinatorDocument::kNssFieldName << NamespaceStringUtil::serialize(nss)), [&opCtx, &nss, &migrationRecoveryCount, &cancellationToken]( const MigrationCoordinatorDocument& doc) { LOGV2_DEBUG(4798502, @@ -1054,7 +879,7 @@ void recoverMigrationCoordinations(OperationContext* opCtx, // namespace. invariant(++migrationRecoveryCount == 1, str::stream() << "Found more then one migration to recover for namespace '" - << nss << "'"); + << nss.toStringForErrorMsg() << "'"); // Create a MigrationCoordinator to complete the coordination. MigrationCoordinator coordinator(doc); @@ -1124,13 +949,12 @@ void recoverMigrationCoordinations(OperationContext* opCtx, "coordinatorDocumentUUID"_attr = doc.getCollectionUuid()); } - // TODO SERVER-71918 once the drop collection coordinator starts persisting the - // config time we can remove this. Since the collection has been dropped, - // persist config time inclusive of the drop collection event before deleting - // leftover migration metadata. - // This will ensure that in case of stepdown the new - // primary won't read stale data from config server and think that the sharded - // collection still exists. + // TODO SERVER-77472: remove this once we are sure all operations persist the config + // time after a collection drop. Since the collection has been dropped, persist + // config time inclusive of the drop collection event before deleting leftover + // migration metadata. This will ensure that in case of stepdown the new primary + // won't read stale data from config server and think that the sharded collection + // still exists. VectorClockMutable::get(opCtx)->waitForDurableConfigTime().get(opCtx); deleteRangeDeletionTaskOnRecipient(opCtx, @@ -1175,10 +999,6 @@ ExecutorFuture launchReleaseCriticalSectionOnRecipientFuture( return ExecutorFuture(executor).then([=] { ThreadClient tc("releaseRecipientCritSec", serviceContext); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } auto uniqueOpCtx = tc->makeOperationContext(); auto opCtx = uniqueOpCtx.get(); @@ -1186,7 +1006,7 @@ ExecutorFuture launchReleaseCriticalSectionOnRecipientFuture( uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, recipientShardId)); BSONObjBuilder builder; - builder.append("_recvChunkReleaseCritSec", nss.ns()); + builder.append("_recvChunkReleaseCritSec", NamespaceStringUtil::serialize(nss)); sessionId.append(&builder); const auto commandObj = CommandHelpers::appendMajorityWriteConcern(builder.obj()); @@ -1316,10 +1136,6 @@ void asyncRecoverMigrationUntilSuccessOrStepDown(OperationContext* opCtx, ExecutorFuture{Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()} .then([svcCtx{opCtx->getServiceContext()}, nss] { ThreadClient tc{"MigrationRecovery", svcCtx}; - { - stdx::lock_guard lk{*tc.get()}; - tc->setSystemOperationKillableByStepdown(lk); - } auto uniqueOpCtx{tc->makeOperationContext()}; auto opCtx{uniqueOpCtx.get()}; diff --git a/src/mongo/db/s/migration_util.h b/src/mongo/db/s/migration_util.h index 2c1872659f0e3..3eb64a2bfbe95 100644 --- a/src/mongo/db/s/migration_util.h +++ b/src/mongo/db/s/migration_util.h @@ -29,15 +29,32 @@ #pragma once +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" #include "mongo/db/repl/optime.h" #include "mongo/db/s/balancer_stats_registry.h" #include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/migration_coordinator_document_gen.h" #include "mongo/db/s/migration_recipient_recovery_document_gen.h" +#include "mongo/db/s/migration_session_id.h" #include "mongo/db/s/range_deletion_task_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/write_concern_options.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -122,31 +139,6 @@ size_t checkForConflictingDeletions(OperationContext* opCtx, const ChunkRange& range, const UUID& uuid); -/** - * Asynchronously attempts to submit the RangeDeletionTask for processing. - * - * Note that if the current filtering metadata's UUID does not match the task's UUID, the filtering - * metadata will be refreshed once. If the UUID's still don't match, the task will be deleted from - * disk. If the UUID's do match, the range will be submitted for deletion. - * - * If the range is submitted for deletion, the returned future is set when the range deletion - * completes. If the range is not submitted for deletion, the returned future is set with an error - * explaining why. - */ -ExecutorFuture submitRangeDeletionTask(OperationContext* oppCtx, - const RangeDeletionTask& deletionTask); - -/** - * Queries the rangeDeletions collection for ranges that are ready to be deleted and submits them to - * the range deleter. - */ -void submitPendingDeletions(OperationContext* opCtx); - -/** - * Asynchronously calls submitPendingDeletions using the fixed executor pool. - */ -void resubmitRangeDeletionsOnStepUp(ServiceContext* serviceContext); - /** * Writes the migration coordinator document to config.migrationCoordinators and waits for majority * write concern. diff --git a/src/mongo/db/s/migration_util_test.cpp b/src/mongo/db/s/migration_util_test.cpp index 029a7ff39fc0a..a61333246bd30 100644 --- a/src/mongo/db/s/migration_util_test.cpp +++ b/src/mongo/db/s/migration_util_test.cpp @@ -27,26 +27,28 @@ * it in the license file. */ -#include "mongo/client/remote_command_targeter_factory_mock.h" -#include "mongo/client/remote_command_targeter_mock.h" +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/logical_time.h" #include "mongo/db/persistent_task_store.h" -#include "mongo/db/repl/wait_for_majority_service.h" -#include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/collection_sharding_runtime_test.cpp" #include "mongo/db/s/migration_util.h" -#include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/range_deletion_util.h" -#include "mongo/db/s/shard_filtering_metadata_refresh.h" -#include "mongo/db/s/shard_server_catalog_cache_loader.h" #include "mongo/db/s/shard_server_test_fixture.h" -#include "mongo/db/s/sharding_state.h" #include "mongo/db/vector_clock.h" -#include "mongo/s/catalog/sharding_catalog_client_mock.h" -#include "mongo/s/catalog/type_shard.h" -#include "mongo/s/catalog_cache_loader_mock.h" -#include "mongo/s/database_version.h" -#include "mongo/util/future.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -366,373 +368,5 @@ TEST_F(MigrationUtilsTest, TestUpdateNumberOfOrphans) { ASSERT_EQ(store.count(opCtx, rangeDeletionDoc.toBSON().removeField("timestamp")), 1); } -/** - * Fixture that uses a mocked CatalogCacheLoader and CatalogClient to allow metadata refreshes - * without using the mock network. - */ -class SubmitRangeDeletionTaskTest : public CollectionShardingRuntimeWithRangeDeleterTest { -public: - const HostAndPort kConfigHostAndPort{"dummy", 123}; - const ShardKeyPattern kShardKeyPattern = ShardKeyPattern(BSON("_id" << 1)); - const UUID kDefaultUUID = UUID::gen(); - const OID kEpoch = OID::gen(); - const Timestamp kDefaultTimestamp = Timestamp(2, 0); - const DatabaseType kDefaultDatabaseType = DatabaseType( - kTestNss.db().toString(), ShardId("0"), DatabaseVersion(kDefaultUUID, kDefaultTimestamp)); - const std::vector kShardList = {ShardType("0", "Host0:12345"), - ShardType("1", "Host1:12345")}; - - void setUp() override { - // Don't call ShardServerTestFixture::setUp so we can install a mock catalog cache loader. - ShardingMongodTestFixture::setUp(); - - replicationCoordinator()->alwaysAllowWrites(true); - serverGlobalParams.clusterRole = ClusterRole::ShardServer; - - _clusterId = OID::gen(); - ShardingState::get(getServiceContext())->setInitialized(_myShardName, _clusterId); - - auto mockLoader = std::make_unique(); - _mockCatalogCacheLoader = mockLoader.get(); - CatalogCacheLoader::set(getServiceContext(), std::move(mockLoader)); - - uassertStatusOK( - initializeGlobalShardingStateForMongodForTest(ConnectionString(kConfigHostAndPort))); - - configTargeterMock()->setFindHostReturnValue(kConfigHostAndPort); - - WaitForMajorityService::get(getServiceContext()).startup(getServiceContext()); - - // Set up 2 default shards. - for (const auto& shard : kShardList) { - std::unique_ptr targeter( - std::make_unique()); - HostAndPort host(shard.getHost()); - targeter->setConnectionStringReturnValue(ConnectionString(host)); - targeter->setFindHostReturnValue(host); - targeterFactory()->addTargeterToReturn(ConnectionString(host), std::move(targeter)); - } - } - - void tearDown() override { - WaitForMajorityService::get(getServiceContext()).shutDown(); - - ShardServerTestFixture::tearDown(); - } - - // Mock for the ShardingCatalogClient used to satisfy loading all shards for the ShardRegistry - // and loading all collections when a database is loaded for the first time by the CatalogCache. - class StaticCatalogClient final : public ShardingCatalogClientMock { - public: - StaticCatalogClient(std::vector shards) : _shards(std::move(shards)) {} - - StatusWith>> getAllShards( - OperationContext* opCtx, repl::ReadConcernLevel readConcern) override { - return repl::OpTimeWith>(_shards); - } - - std::vector getCollections(OperationContext* opCtx, - StringData dbName, - repl::ReadConcernLevel readConcernLevel, - const BSONObj& sort) override { - return _colls; - } - - std::pair> - getCollectionAndShardingIndexCatalogEntries( - OperationContext* opCtx, - const NamespaceString& nss, - const repl::ReadConcernArgs& readConcern) override { - if (!_coll) { - uasserted(ErrorCodes::NamespaceNotFound, "dummy errmsg"); - } - return std::make_pair(*_coll, std::vector()); - } - - void setCollections(std::vector colls) { - _colls = std::move(colls); - } - - void setCollection(boost::optional coll) { - _coll = coll; - } - - private: - const std::vector _shards; - std::vector _colls; - boost::optional _coll; - }; - - UUID createCollectionAndGetUUID(const NamespaceString& nss) { - { - OperationShardingState::ScopedAllowImplicitCollectionCreate_UNSAFE - unsafeCreateCollection(operationContext()); - uassertStatusOK( - createCollection(operationContext(), nss.dbName(), BSON("create" << nss.coll()))); - } - - AutoGetCollection autoColl(operationContext(), nss, MODE_IX); - return autoColl.getCollection()->uuid(); - } - - std::unique_ptr makeShardingCatalogClient() override { - auto mockCatalogClient = std::make_unique(kShardList); - // Stash a pointer to the mock so its return values can be set. - _mockCatalogClient = mockCatalogClient.get(); - return mockCatalogClient; - } - - CollectionType makeCollectionType(UUID uuid, OID epoch, Timestamp timestamp) { - CollectionType coll( - kTestNss, epoch, timestamp, Date_t::now(), uuid, kShardKeyPattern.getKeyPattern()); - coll.setUnique(true); - return coll; - } - - std::vector makeChangedChunks(ChunkVersion startingVersion) { - const auto uuid = UUID::gen(); - ChunkType chunk1(uuid, - {kShardKeyPattern.getKeyPattern().globalMin(), BSON("_id" << -100)}, - startingVersion, - {"0"}); - chunk1.setName(OID::gen()); - startingVersion.incMinor(); - - ChunkType chunk2(uuid, {BSON("_id" << -100), BSON("_id" << 0)}, startingVersion, {"1"}); - chunk2.setName(OID::gen()); - startingVersion.incMinor(); - - ChunkType chunk3(uuid, {BSON("_id" << 0), BSON("_id" << 100)}, startingVersion, {"0"}); - chunk3.setName(OID::gen()); - startingVersion.incMinor(); - - ChunkType chunk4(uuid, - {BSON("_id" << 100), kShardKeyPattern.getKeyPattern().globalMax()}, - startingVersion, - {"1"}); - chunk4.setName(OID::gen()); - startingVersion.incMinor(); - - return std::vector{chunk1, chunk2, chunk3, chunk4}; - } - - CatalogCacheLoaderMock* _mockCatalogCacheLoader; - StaticCatalogClient* _mockCatalogClient; - - RAIIServerParameterControllerForTest enableFeatureFlag{"featureFlagRangeDeleterService", false}; -}; - -TEST_F(SubmitRangeDeletionTaskTest, - FailsAndDeletesTaskIfFilteringMetadataIsUnknownEvenAfterRefresh) { - auto opCtx = operationContext(); - auto deletionTask = createDeletionTask(opCtx, kTestNss, kDefaultUUID, 0, 10, _myShardName); - PersistentTaskStore store(NamespaceString::kRangeDeletionNamespace); - - store.add(opCtx, deletionTask); - ASSERT_EQ(store.count(opCtx), 1); - migrationutil::markAsReadyRangeDeletionTaskLocally( - opCtx, deletionTask.getCollectionUuid(), deletionTask.getRange()); - - // Make the refresh triggered by submitting the task return an empty result when loading the - // database. - _mockCatalogCacheLoader->setDatabaseRefreshReturnValue( - Status(ErrorCodes::NamespaceNotFound, "dummy errmsg")); - - auto cleanupCompleteFuture = migrationutil::submitRangeDeletionTask(opCtx, deletionTask); - - // The task should not have been submitted, and the task's entry should have been removed from - // the persistent store. - ASSERT_THROWS_CODE(cleanupCompleteFuture.get(opCtx), - AssertionException, - ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist); - ASSERT_EQ(store.count(opCtx), 0); -} - -TEST_F(SubmitRangeDeletionTaskTest, FailsAndDeletesTaskIfNamespaceIsUnshardedEvenAfterRefresh) { - auto opCtx = operationContext(); - - auto deletionTask = createDeletionTask(opCtx, kTestNss, kDefaultUUID, 0, 10, _myShardName); - - PersistentTaskStore store(NamespaceString::kRangeDeletionNamespace); - - store.add(opCtx, deletionTask); - ASSERT_EQ(store.count(opCtx), 1); - migrationutil::markAsReadyRangeDeletionTaskLocally( - opCtx, deletionTask.getCollectionUuid(), deletionTask.getRange()); - - // Make the refresh triggered by submitting the task return an empty result when loading the - // collection so it is considered unsharded. - _mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType); - _mockCatalogCacheLoader->setCollectionRefreshReturnValue( - Status(ErrorCodes::NamespaceNotFound, "dummy errmsg")); - - auto cleanupCompleteFuture = migrationutil::submitRangeDeletionTask(opCtx, deletionTask); - - // The task should not have been submitted, and the task's entry should have been removed from - // the persistent store. - ASSERT_THROWS_CODE(cleanupCompleteFuture.get(opCtx), - AssertionException, - ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist); - ASSERT_EQ(store.count(opCtx), 0); -} - -TEST_F(SubmitRangeDeletionTaskTest, - FailsAndDeletesTaskIfNamespaceIsUnshardedBeforeAndAfterRefresh) { - auto opCtx = operationContext(); - - auto deletionTask = createDeletionTask(opCtx, kTestNss, kDefaultUUID, 0, 10, _myShardName); - - PersistentTaskStore store(NamespaceString::kRangeDeletionNamespace); - - store.add(opCtx, deletionTask); - ASSERT_EQ(store.count(opCtx), 1); - migrationutil::markAsReadyRangeDeletionTaskLocally( - opCtx, deletionTask.getCollectionUuid(), deletionTask.getRange()); - - // Mock an empty result for the task's collection and force a refresh so the node believes the - // collection is unsharded. - _mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType); - _mockCatalogCacheLoader->setCollectionRefreshReturnValue( - Status(ErrorCodes::NamespaceNotFound, "dummy errmsg")); - forceShardFilteringMetadataRefresh(opCtx, kTestNss); - - auto cleanupCompleteFuture = migrationutil::submitRangeDeletionTask(opCtx, deletionTask); - - // The task should not have been submitted, and the task's entry should have been removed from - // the persistent store. - ASSERT_THROWS_CODE(cleanupCompleteFuture.get(opCtx), - AssertionException, - ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist); - ASSERT_EQ(store.count(opCtx), 0); -} - -TEST_F(SubmitRangeDeletionTaskTest, SucceedsIfFilteringMetadataUUIDMatchesTaskUUID) { - auto opCtx = operationContext(); - - auto collectionUUID = createCollectionAndGetUUID(kTestNss); - auto deletionTask = createDeletionTask(opCtx, kTestNss, collectionUUID, 0, 10, _myShardName); - - PersistentTaskStore store(NamespaceString::kRangeDeletionNamespace); - - store.add(opCtx, deletionTask); - ASSERT_EQ(store.count(opCtx), 1); - migrationutil::markAsReadyRangeDeletionTaskLocally( - opCtx, deletionTask.getCollectionUuid(), deletionTask.getRange()); - - // Force a metadata refresh with the task's UUID before the task is submitted. - auto coll = makeCollectionType(collectionUUID, kEpoch, kDefaultTimestamp); - _mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType); - _mockCatalogCacheLoader->setCollectionRefreshReturnValue(coll); - _mockCatalogCacheLoader->setChunkRefreshReturnValue( - makeChangedChunks(ChunkVersion({kEpoch, kDefaultTimestamp}, {1, 0}))); - _mockCatalogClient->setCollections({coll}); - _mockCatalogClient->setCollection(coll); - forceShardFilteringMetadataRefresh(opCtx, kTestNss); - - // The task should have been submitted successfully. - auto cleanupCompleteFuture = migrationutil::submitRangeDeletionTask(opCtx, deletionTask); - cleanupCompleteFuture.get(opCtx); -} - -TEST_F( - SubmitRangeDeletionTaskTest, - SucceedsIfFilteringMetadataInitiallyUnknownButFilteringMetadataUUIDMatchesTaskUUIDAfterRefresh) { - auto opCtx = operationContext(); - - auto collectionUUID = createCollectionAndGetUUID(kTestNss); - auto deletionTask = createDeletionTask(opCtx, kTestNss, collectionUUID, 0, 10, _myShardName); - - PersistentTaskStore store(NamespaceString::kRangeDeletionNamespace); - - store.add(opCtx, deletionTask); - ASSERT_EQ(store.count(opCtx), 1); - migrationutil::markAsReadyRangeDeletionTaskLocally( - opCtx, deletionTask.getCollectionUuid(), deletionTask.getRange()); - - // Make the refresh triggered by submitting the task return a UUID that matches the task's UUID. - auto coll = makeCollectionType(collectionUUID, kEpoch, kDefaultTimestamp); - _mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType); - _mockCatalogCacheLoader->setCollectionRefreshReturnValue(coll); - _mockCatalogCacheLoader->setChunkRefreshReturnValue( - makeChangedChunks(ChunkVersion({kEpoch, kDefaultTimestamp}, {1, 0}))); - _mockCatalogClient->setCollections({coll}); - - auto metadata = makeShardedMetadata(opCtx, collectionUUID); - csr()->setFilteringMetadata(opCtx, metadata); - - // The task should have been submitted successfully. - auto cleanupCompleteFuture = migrationutil::submitRangeDeletionTask(opCtx, deletionTask); - cleanupCompleteFuture.get(opCtx); -} - -TEST_F(SubmitRangeDeletionTaskTest, - SucceedsIfTaskNamespaceInitiallyUnshardedButUUIDMatchesAfterRefresh) { - auto opCtx = operationContext(); - - // Force a metadata refresh with no collection entry so the node believes the namespace is - // unsharded when the task is submitted. - _mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType); - _mockCatalogCacheLoader->setCollectionRefreshReturnValue( - Status(ErrorCodes::NamespaceNotFound, "dummy errmsg")); - forceShardFilteringMetadataRefresh(opCtx, kTestNss); - - auto collectionUUID = createCollectionAndGetUUID(kTestNss); - auto deletionTask = createDeletionTask(opCtx, kTestNss, collectionUUID, 0, 10, _myShardName); - - PersistentTaskStore store(NamespaceString::kRangeDeletionNamespace); - - store.add(opCtx, deletionTask); - ASSERT_EQ(store.count(opCtx), 1); - migrationutil::markAsReadyRangeDeletionTaskLocally( - opCtx, deletionTask.getCollectionUuid(), deletionTask.getRange()); - - // Make the refresh triggered by submitting the task return a UUID that matches the task's UUID. - auto matchingColl = makeCollectionType(collectionUUID, kEpoch, kDefaultTimestamp); - _mockCatalogCacheLoader->setCollectionRefreshReturnValue(matchingColl); - _mockCatalogCacheLoader->setChunkRefreshReturnValue( - makeChangedChunks(ChunkVersion({kEpoch, kDefaultTimestamp}, {10, 0}))); - _mockCatalogClient->setCollections({matchingColl}); - _mockCatalogClient->setCollection({matchingColl}); - - auto metadata = makeShardedMetadata(opCtx, collectionUUID); - csr()->setFilteringMetadata(opCtx, metadata); - - // The task should have been submitted successfully. - auto cleanupCompleteFuture = migrationutil::submitRangeDeletionTask(opCtx, deletionTask); - cleanupCompleteFuture.get(opCtx); -} - -TEST_F(SubmitRangeDeletionTaskTest, - FailsAndDeletesTaskIfFilteringMetadataUUIDDifferentFromTaskUUIDEvenAfterRefresh) { - auto opCtx = operationContext(); - - auto deletionTask = createDeletionTask(opCtx, kTestNss, kDefaultUUID, 0, 10, _myShardName); - - PersistentTaskStore store(NamespaceString::kRangeDeletionNamespace); - - store.add(opCtx, deletionTask); - ASSERT_EQ(store.count(opCtx), 1); - migrationutil::markAsReadyRangeDeletionTaskLocally( - opCtx, deletionTask.getCollectionUuid(), deletionTask.getRange()); - - // Make the refresh triggered by submitting the task return an arbitrary UUID. - const auto otherEpoch = OID::gen(); - const auto otherTimestamp = Timestamp(3, 0); - auto otherColl = makeCollectionType(UUID::gen(), otherEpoch, otherTimestamp); - _mockCatalogCacheLoader->setDatabaseRefreshReturnValue(kDefaultDatabaseType); - _mockCatalogCacheLoader->setCollectionRefreshReturnValue(otherColl); - _mockCatalogCacheLoader->setChunkRefreshReturnValue( - makeChangedChunks(ChunkVersion({otherEpoch, otherTimestamp}, {1, 0}))); - _mockCatalogClient->setCollections({otherColl}); - - // The task should not have been submitted, and the task's entry should have been removed from - // the persistent store. - auto cleanupCompleteFuture = migrationutil::submitRangeDeletionTask(opCtx, deletionTask); - ASSERT_THROWS_CODE(cleanupCompleteFuture.get(opCtx), - AssertionException, - ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist); - ASSERT_EQ(store.count(opCtx), 0); -} - } // namespace } // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_base_cloner.cpp b/src/mongo/db/s/move_primary/move_primary_base_cloner.cpp deleted file mode 100644 index 8b55aeff96a1f..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_base_cloner.cpp +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/move_primary/move_primary_base_cloner.h" - -#include "mongo/logv2/log.h" -#include "mongo/platform/basic.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kMovePrimary - -namespace mongo { - -MovePrimaryBaseCloner::MovePrimaryBaseCloner(StringData clonerName, - MovePrimarySharedData* sharedData, - const HostAndPort& source, - DBClientConnection* client, - repl::StorageInterface* storageInterface, - ThreadPool* dbPool) - : repl::BaseCloner(clonerName, sharedData, source, client, storageInterface, dbPool) {} - -logv2::LogComponent MovePrimaryBaseCloner::getLogComponent() { - return logv2::LogComponent::kMovePrimary; -} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_base_cloner.h b/src/mongo/db/s/move_primary/move_primary_base_cloner.h deleted file mode 100644 index 1de9634144f90..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_base_cloner.h +++ /dev/null @@ -1,60 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/base/checked_cast.h" -#include "mongo/db/repl/base_cloner.h" -#include "mongo/db/s/move_primary/move_primary_shared_data.h" - -namespace mongo { - -class MovePrimaryBaseCloner : public repl::BaseCloner { -public: - MovePrimaryBaseCloner(StringData clonerName, - MovePrimarySharedData* sharedData, - const HostAndPort& source, - DBClientConnection* client, - repl::StorageInterface* storageInterface, - ThreadPool* dbPool); - virtual ~MovePrimaryBaseCloner() = default; - -protected: - MovePrimarySharedData* getSharedData() const override { - return checked_cast(BaseCloner::getSharedData()); - } - -private: - /** - * Overriden to allow the BaseCloner to use the move primary log component. - */ - virtual logv2::LogComponent getLogComponent() final; -}; - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_cloner_test_fixture.cpp b/src/mongo/db/s/move_primary/move_primary_cloner_test_fixture.cpp deleted file mode 100644 index e4a7485010811..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_cloner_test_fixture.cpp +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/move_primary/move_primary_cloner_test_fixture.h" - -#include "mongo/base/checked_cast.h" -#include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/repl/storage_interface.h" -#include "mongo/db/repl/storage_interface_impl.h" -#include "mongo/platform/basic.h" - -namespace mongo { - -void MovePrimaryClonerTestFixture::setUp() { - ClonerTestFixture::setUp(); - serviceContext = getServiceContext(); - repl::StorageInterface::set(serviceContext, std::make_unique()); - _sharedData = std::make_unique(&_clock, _migrationId); - - _mockClient->setOperationTime(_operationTime); -} - -void MovePrimaryClonerTestFixture::tearDown() { - ClonerTestFixture::tearDown(); -} - -MovePrimarySharedData* MovePrimaryClonerTestFixture::getSharedData() { - return checked_cast(_sharedData.get()); -} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_cloner_test_fixture.h b/src/mongo/db/s/move_primary/move_primary_cloner_test_fixture.h deleted file mode 100644 index 730a7f3706fda..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_cloner_test_fixture.h +++ /dev/null @@ -1,48 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/repl/cloner_test_fixture.h" -#include "mongo/db/s/move_primary/move_primary_shared_data.h" -#include "mongo/util/uuid.h" - -namespace mongo { - -class MovePrimaryClonerTestFixture : public repl::ClonerTestFixture { -protected: - void setUp() override; - void tearDown() override; - ServiceContext* serviceContext{nullptr}; - MovePrimarySharedData* getSharedData(); - const Timestamp _operationTime = Timestamp(12345, 67); - const UUID _migrationId = UUID::gen(); -}; - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_collection_cloner.cpp b/src/mongo/db/s/move_primary/move_primary_collection_cloner.cpp deleted file mode 100644 index 614c10b56837e..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_collection_cloner.cpp +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/move_primary/move_primary_collection_cloner.h" - -#include "mongo/base/string_data.h" -#include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/document_validation.h" -#include "mongo/db/commands/list_collections_filter.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/ops/write_ops_exec.h" -#include "mongo/db/repl/cloner_utils.h" -#include "mongo/db/repl/database_cloner_gen.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" -#include "mongo/db/s/operation_sharding_state.h" -#include "mongo/logv2/log.h" -#include "mongo/platform/basic.h" -#include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/rpc/metadata/repl_set_metadata.h" -#include "mongo/util/assert_util.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kMovePrimary - -namespace mongo { - -MovePrimaryCollectionCloner::MovePrimaryCollectionCloner(MovePrimarySharedData* sharedData, - const HostAndPort& source, - DBClientConnection* client, - repl::StorageInterface* storageInterface, - ThreadPool* dbPool) - : MovePrimaryBaseCloner( - "MovePrimaryCollectionCloner"_sd, sharedData, source, client, storageInterface, dbPool), - _countStage("count", this, &MovePrimaryCollectionCloner::countStage), - _checkIfDonorCollectionIsEmptyStage( - "checkIfDonorCollectionIsEmpty", - this, - &MovePrimaryCollectionCloner::checkIfDonorCollectionIsEmptyStage), - _listIndexesStage("listIndexes", this, &MovePrimaryCollectionCloner::listIndexesStage), - _createCollectionStage( - "createCollection", this, &MovePrimaryCollectionCloner::createCollectionStage), - _queryStage("query", this, &MovePrimaryCollectionCloner::queryStage) {} - -repl::BaseCloner::ClonerStages MovePrimaryCollectionCloner::getStages() { - return {&_countStage, - &_checkIfDonorCollectionIsEmptyStage, - &_listIndexesStage, - &_createCollectionStage, - &_queryStage}; -} - -void MovePrimaryCollectionCloner::preStage() {} - -void MovePrimaryCollectionCloner::postStage() {} - -repl::BaseCloner::AfterStageBehavior -MovePrimaryCollectionCloner::MovePrimaryCollectionClonerStage::run() { - return ClonerStage::run(); -} - -repl::BaseCloner::AfterStageBehavior MovePrimaryCollectionCloner::countStage() { - return kContinueNormally; -} - -repl::BaseCloner::AfterStageBehavior -MovePrimaryCollectionCloner::checkIfDonorCollectionIsEmptyStage() { - return kContinueNormally; -} - -repl::BaseCloner::AfterStageBehavior MovePrimaryCollectionCloner::listIndexesStage() { - return kContinueNormally; -} - -repl::BaseCloner::AfterStageBehavior MovePrimaryCollectionCloner::createCollectionStage() { - return kContinueNormally; -} - -repl::BaseCloner::AfterStageBehavior MovePrimaryCollectionCloner::queryStage() { - return kContinueNormally; -} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_collection_cloner.h b/src/mongo/db/s/move_primary/move_primary_collection_cloner.h deleted file mode 100644 index e408f9fab96b6..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_collection_cloner.h +++ /dev/null @@ -1,145 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include -#include - -#include "mongo/db/repl/base_cloner.h" -#include "mongo/db/repl/task_runner.h" -#include "mongo/db/s/move_primary/move_primary_base_cloner.h" -#include "mongo/db/s/move_primary/move_primary_shared_data.h" -#include "mongo/util/progress_meter.h" - -namespace mongo { - -class MovePrimaryCollectionCloner : public MovePrimaryBaseCloner { -public: - struct Stats { - std::string ns; - Date_t start; - Date_t end; - size_t documentToCopy{0}; - size_t documentsCopied{0}; - size_t indexes{0}; - size_t insertedBatches{0}; - size_t receivedBatches{0}; - long long avgObjSize{0}; - long long approxTotalDataSize{0}; - long long approxTotalBytesCopied{0}; - }; - - MovePrimaryCollectionCloner(MovePrimarySharedData* sharedData, - const HostAndPort& source, - DBClientConnection* client, - repl::StorageInterface* storageInterface, - ThreadPool* dbPool); - - virtual ~MovePrimaryCollectionCloner() = default; - -protected: - ClonerStages getStages() final; - -private: - friend class MovePrimaryCollectionClonerTest; - friend class MovePrimaryCollectionClonerStage; - - class MovePrimaryCollectionClonerStage : public ClonerStage { - public: - MovePrimaryCollectionClonerStage(std::string name, - MovePrimaryCollectionCloner* cloner, - ClonerRunFn stageFunc) - : ClonerStage(name, cloner, stageFunc) {} - AfterStageBehavior run() override; - - bool isTransientError(const Status& status) override { - // Always abort on error. - return false; - } - }; - - /** - * The preStage sets the start time in _stats. - */ - void preStage() final; - - /** - * The postStage sets the end time in _stats. - */ - void postStage() final; - - /** - * Stage function that counts the number of documents in the collection on the source in order - * to generate progress information. - */ - AfterStageBehavior countStage(); - - /** - * Stage function that checks to see if the donor collection is empty (and therefore we may - * race with createIndexes on empty collections) before running listIndexes. - */ - AfterStageBehavior checkIfDonorCollectionIsEmptyStage(); - - /** - * Stage function that gets the index information of the collection on the source to re-create - * it. - */ - AfterStageBehavior listIndexesStage(); - - /** - * Stage function that creates the collection using the storageInterface. This stage does not - * actually contact the sync source. - */ - AfterStageBehavior createCollectionStage(); - - /** - * Stage function that executes a query to retrieve all documents in the collection. For each - * batch returned by the upstream node, handleNextBatch will be called with the data. This - * stage will finish when the entire query is finished or failed. - */ - AfterStageBehavior queryStage(); - - // All member variables are labeled with one of the following codes indicating the - // synchronization rules for accessing them. - // - // (R) Read-only in concurrent operation; no synchronization required. - // (S) Self-synchronizing; access according to class's own rules. - // (M) Reads and writes guarded by _mutex (defined in base class). - // (X) Access only allowed from the main flow of control called from run() or constructor. - MovePrimaryCollectionClonerStage _countStage; // (R) - MovePrimaryCollectionClonerStage _checkIfDonorCollectionIsEmptyStage; // (R) - MovePrimaryCollectionClonerStage _listIndexesStage; // (R) - MovePrimaryCollectionClonerStage _createCollectionStage; // (R) - MovePrimaryCollectionClonerStage _queryStage; // (R) - - Stats _stats; // (M) -}; - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_collection_cloner_test.cpp b/src/mongo/db/s/move_primary/move_primary_collection_cloner_test.cpp deleted file mode 100644 index 77b4c4d137603..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_collection_cloner_test.cpp +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/move_primary/move_primary_cloner_test_fixture.h" - -#include "mongo/db/clientcursor.h" -#include "mongo/db/repl/storage_interface.h" -#include "mongo/db/repl/storage_interface_mock.h" -#include "mongo/db/s/move_primary/move_primary_collection_cloner.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/dbtests/mock/mock_dbclient_connection.h" -#include "mongo/logv2/log.h" -#include "mongo/platform/basic.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" -#include "mongo/util/concurrency/thread_pool.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - -namespace mongo { - -class MovePrimaryCollectionClonerTest : public MovePrimaryClonerTestFixture { -public: - MovePrimaryCollectionClonerTest() {} - -protected: - void setUp() override { - MovePrimaryClonerTestFixture::setUp(); - } - - void tearDown() { - MovePrimaryClonerTestFixture::tearDown(); - } - - std::unique_ptr makeCollectionCloner( - MovePrimarySharedData* sharedData = nullptr) { - return std::make_unique(sharedData ? sharedData - : getSharedData(), - _source, - _mockClient.get(), - &_storageInterface, - _dbWorkThreadPool.get()); - } - - const std::string _dbName = "_testDb"; - const NamespaceString _nss = {"_testDb", "testcoll"}; - CollectionOptions _options; -}; - -TEST_F(MovePrimaryCollectionClonerTest, DummyTest) { - MovePrimarySharedData dummySharedData(&_clock, _migrationId, ResumePhase::kDataSync); - auto cloner = makeCollectionCloner(&dummySharedData); -} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_common_metadata.idl b/src/mongo/db/s/move_primary/move_primary_common_metadata.idl deleted file mode 100644 index 0d9256b5d725d..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_common_metadata.idl +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2023-present MongoDB, Inc. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the Server Side Public License, version 1, -# as published by MongoDB, Inc. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Server Side Public License for more details. -# -# You should have received a copy of the Server Side Public License -# along with this program. If not, see -# . -# -# As a special exception, the copyright holders give permission to link the -# code of portions of this program with the OpenSSL library under certain -# conditions as described in each individual source file and distribute -# linked combinations including the program with the OpenSSL library. You -# must comply with the Server Side Public License in all respects for -# all of the code used other than as permitted herein. If you modify file(s) -# with this exception, you may extend this exception to your version of the -# file(s), but you are not obligated to do so. If you do not wish to do so, -# delete this exception statement from your version. If you delete this -# exception statement from all source files in the program, then also delete -# it in the license file. -# - -global: - cpp_namespace: "mongo" - -imports: - - "mongo/db/basic_types.idl" - -structs: - MovePrimaryCommonMetadata: - description: "Common metadata for movePrimary operations" - strict: false - fields: - migrationId: - description: "Unique identifier for the movePrimary operation." - type: uuid - databaseName: - description: "The name of the database being moved as part of movePrimary." - type: namespacestring - fromShardName: - description: "The name of the shard the database is being moved from." - type: string - toShardName: - description: "The name of the shard the database is being moved to." - type: string diff --git a/src/mongo/db/s/move_primary/move_primary_cumulative_metrics.cpp b/src/mongo/db/s/move_primary/move_primary_cumulative_metrics.cpp deleted file mode 100644 index 2f971a5971624..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_cumulative_metrics.cpp +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/move_primary/move_primary_cumulative_metrics.h" - -namespace mongo { -namespace { - -constexpr auto kMovePrimary = "movePrimary"; - -const auto kReportedStateFieldNamesMap = [] { - return MovePrimaryCumulativeMetrics::StateFieldNameMap{ - {MovePrimaryDonorStateEnum::kInitializing, "countInstancesInDonorState1Initializing"}, - {MovePrimaryDonorStateEnum::kCloning, "countInstancesInDonorState2Cloning"}, - {MovePrimaryDonorStateEnum::kWaitingToBlockWrites, - "countInstancesInDonorState3WaitingToBlockWrites"}, - {MovePrimaryDonorStateEnum::kBlockingWrites, "countInstancesInDonorState4BlockingWrites"}, - {MovePrimaryDonorStateEnum::kPrepared, "countInstancesInDonorState5Prepared"}, - {MovePrimaryDonorStateEnum::kAborted, "countInstancesInDonorState6Aborted"}, - {MovePrimaryRecipientStateEnum::kCloning, "countInstancesInRecipientState1Cloning"}, - {MovePrimaryRecipientStateEnum::kApplying, "countInstancesInRecipientState2Applying"}, - {MovePrimaryRecipientStateEnum::kBlocking, "countInstancesInRecipientState3Blocking"}, - {MovePrimaryRecipientStateEnum::kPrepared, "countInstancesInRecipientState4Prepared"}, - {MovePrimaryRecipientStateEnum::kAborted, "countInstancesInRecipientState5Aborted"}, - {MovePrimaryRecipientStateEnum::kDone, "countInstancesInRecipientState6Done"}}; -}(); - -} // namespace - -MovePrimaryCumulativeMetrics::MovePrimaryCumulativeMetrics() - : move_primary_cumulative_metrics::Base( - kMovePrimary, std::make_unique()), - _fieldNames( - static_cast(getFieldNames())) {} - -void MovePrimaryCumulativeMetrics::reportActive(BSONObjBuilder* bob) const { - ShardingDataTransformCumulativeMetrics::reportActive(bob); - reportOplogApplicationCountMetrics(_fieldNames, bob); -} - -void MovePrimaryCumulativeMetrics::reportLatencies(BSONObjBuilder* bob) const { - ShardingDataTransformCumulativeMetrics::reportLatencies(bob); - reportOplogApplicationLatencyMetrics(_fieldNames, bob); -} - -void MovePrimaryCumulativeMetrics::reportCurrentInSteps(BSONObjBuilder* bob) const { - ShardingDataTransformCumulativeMetrics::reportCurrentInSteps(bob); - reportCountsForAllStates(kReportedStateFieldNamesMap, bob); -} -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_cumulative_metrics.h b/src/mongo/db/s/move_primary/move_primary_cumulative_metrics.h deleted file mode 100644 index da4f70dd426ab..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_cumulative_metrics.h +++ /dev/null @@ -1,66 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/s/metrics/cumulative_metrics_state_holder.h" -#include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" -#include "mongo/db/s/metrics/sharding_data_transform_metrics_macros.h" -#include "mongo/db/s/metrics/with_oplog_application_count_metrics.h" -#include "mongo/db/s/metrics/with_oplog_application_latency_metrics.h" -#include "mongo/db/s/metrics/with_state_management_for_cumulative_metrics.h" -#include "mongo/db/s/move_primary/move_primary_cumulative_metrics_field_name_provider.h" -#include "mongo/db/s/move_primary/move_primary_state_machine_gen.h" - -namespace mongo { - -namespace move_primary_cumulative_metrics { -DEFINE_IDL_ENUM_SIZE_TEMPLATE_HELPER(MovePrimaryMetrics, - MovePrimaryDonorStateEnum, - MovePrimaryRecipientStateEnum) -using Base = WithOplogApplicationLatencyMetrics>>; -} // namespace move_primary_cumulative_metrics - -class MovePrimaryCumulativeMetrics : public move_primary_cumulative_metrics::Base { -public: - MovePrimaryCumulativeMetrics(); - -private: - virtual void reportActive(BSONObjBuilder* bob) const; - virtual void reportLatencies(BSONObjBuilder* bob) const; - virtual void reportCurrentInSteps(BSONObjBuilder* bob) const; - - const MovePrimaryCumulativeMetricsFieldNameProvider* _fieldNames; -}; - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_cumulative_metrics_field_name_provider.h b/src/mongo/db/s/move_primary/move_primary_cumulative_metrics_field_name_provider.h deleted file mode 100644 index 248af1dcae1e8..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_cumulative_metrics_field_name_provider.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/s/metrics/field_names/sharding_data_transform_cumulative_metrics_field_name_provider.h" -#include "mongo/db/s/metrics/field_names/with_document_copy_count_field_name_overrides.h" -#include "mongo/db/s/metrics/field_names/with_oplog_application_count_metrics_field_names.h" -#include "mongo/db/s/metrics/field_names/with_oplog_application_latency_metrics_field_names.h" - -namespace mongo { - -class MovePrimaryCumulativeMetricsFieldNameProvider - : public WithOplogApplicationLatencyMetricsFieldNames< - WithOplogApplicationCountFieldNames>> {}; - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_database_cloner.cpp b/src/mongo/db/s/move_primary/move_primary_database_cloner.cpp deleted file mode 100644 index 67a8d3e90160c..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_database_cloner.cpp +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/move_primary/move_primary_database_cloner.h" - -#include "mongo/base/string_data.h" -#include "mongo/db/commands/list_collections_filter.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/repl/cloner_utils.h" -#include "mongo/db/repl/database_cloner_gen.h" -#include "mongo/db/s/move_primary/move_primary_collection_cloner.h" -#include "mongo/logv2/log.h" -#include "mongo/platform/basic.h" -#include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/util/assert_util.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kMovePrimary - -namespace mongo { - -MovePrimaryDatabaseCloner::MovePrimaryDatabaseCloner(const std::string& dbName, - MovePrimarySharedData* sharedData, - const HostAndPort& source, - DBClientConnection* client, - repl::StorageInterface* storageInterface, - ThreadPool* dbPool) - : MovePrimaryBaseCloner( - "MovePrimaryDatabaseCloner"_sd, sharedData, source, client, storageInterface, dbPool), - _listCollectionsStage( - "listCollections", this, &MovePrimaryDatabaseCloner::listCollectionsStage), - _listExistingCollectionsStage("listExistingCollections", - this, - &MovePrimaryDatabaseCloner::listExistingCollectionsStage) { - invariant(!dbName.empty()); -} - -repl::BaseCloner::ClonerStages MovePrimaryDatabaseCloner::getStages() { - return {&_listCollectionsStage, &_listExistingCollectionsStage}; -} - -void MovePrimaryDatabaseCloner::preStage() {} - -repl::BaseCloner::AfterStageBehavior MovePrimaryDatabaseCloner::listCollectionsStage() { - return kContinueNormally; -} - -repl::BaseCloner::AfterStageBehavior MovePrimaryDatabaseCloner::listExistingCollectionsStage() { - return kContinueNormally; -} - -void MovePrimaryDatabaseCloner::postStage() {} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_database_cloner.h b/src/mongo/db/s/move_primary/move_primary_database_cloner.h deleted file mode 100644 index 1c46954c2a70a..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_database_cloner.h +++ /dev/null @@ -1,122 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include - -#include "mongo/db/repl/base_cloner.h" -#include "mongo/db/s/move_primary/move_primary_base_cloner.h" -#include "mongo/db/s/move_primary/move_primary_collection_cloner.h" -#include "mongo/db/s/move_primary/move_primary_shared_data.h" - -namespace mongo { - -class MovePrimaryDatabaseCloner final : public MovePrimaryBaseCloner { -public: - struct Stats { - std::string dbname; - Date_t start; - Date_t end; - size_t collections{0}; - size_t clonedCollections{0}; - size_t clonedCollectionsBeforeFailover{0}; - - std::vector collectionStats; - long long approxTotalBytesCopied{0}; - }; - - MovePrimaryDatabaseCloner(const std::string& dbName, - MovePrimarySharedData* sharedData, - const HostAndPort& source, - DBClientConnection* client, - repl::StorageInterface* storageInterface, - ThreadPool* dbPool); - - virtual ~MovePrimaryDatabaseCloner() = default; - -protected: - ClonerStages getStages() final; - -private: - friend class MovePrimaryDatabaseClonerTest; - - class MovePrimaryDatabaseClonerStage : public ClonerStage { - public: - MovePrimaryDatabaseClonerStage(std::string name, - MovePrimaryDatabaseCloner* cloner, - ClonerRunFn stageFunc) - : ClonerStage(name, cloner, stageFunc) {} - - bool isTransientError(const Status& status) override { - // Always abort on error. - return false; - } - }; - - /** - * Stage function that retrieves collection information from the donor. - */ - AfterStageBehavior listCollectionsStage(); - - /** - * Stage function that retrieves collection information locally for collections that are already - * cloned. - */ - AfterStageBehavior listExistingCollectionsStage(); - - /** - * The preStage sets the start time in _stats. - */ - void preStage() final; - - /** - * The postStage creates and runs the individual MovePrimaryCollectionCloners on each database - * found on the sync source, and sets the end time in _stats when done. - */ - void postStage() final; - - // All member variables are labeled with one of the following codes indicating the - // synchronization rules for accessing them. - // - // (R) Read-only in concurrent operation; no synchronization required. - // (S) Self-synchronizing; access according to class's own rules. - // (M) Reads and writes guarded by _mutex (defined in base class). - // (X) Access only allowed from the main flow of control called from run() or constructor. - // (MX) Write access with mutex from main flow of control, read access with mutex from other - // threads, read access allowed from main flow without mutex. - const std::string _dbName; // (R) - - MovePrimaryDatabaseClonerStage _listCollectionsStage; // (R) - MovePrimaryDatabaseClonerStage _listExistingCollectionsStage; // (R) - - Stats _stats; // (MX) -}; - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_database_cloner_test.cpp b/src/mongo/db/s/move_primary/move_primary_database_cloner_test.cpp deleted file mode 100644 index 3ef8bd6eed823..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_database_cloner_test.cpp +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/move_primary/move_primary_database_cloner.h" - -#include "mongo/db/clientcursor.h" -#include "mongo/db/repl/storage_interface.h" -#include "mongo/db/repl/storage_interface_mock.h" -#include "mongo/db/s/move_primary/move_primary_cloner_test_fixture.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/dbtests/mock/mock_dbclient_connection.h" -#include "mongo/logv2/log.h" -#include "mongo/platform/basic.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" -#include "mongo/util/concurrency/thread_pool.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - -namespace mongo { - -class MovePrimaryDatabaseClonerTest : public MovePrimaryClonerTestFixture { -public: - MovePrimaryDatabaseClonerTest() {} - -protected: - void setUp() override { - MovePrimaryClonerTestFixture::setUp(); - } - - void tearDown() { - MovePrimaryClonerTestFixture::tearDown(); - } - - std::unique_ptr makeDatabaseCloner( - MovePrimarySharedData* sharedData = nullptr) { - return std::make_unique(_dbName, - sharedData ? sharedData - : getSharedData(), - _source, - _mockClient.get(), - &_storageInterface, - _dbWorkThreadPool.get()); - } - - const std::string _dbName = "_testDb"; -}; - -TEST_F(MovePrimaryDatabaseClonerTest, DummyTest) { - MovePrimarySharedData dummySharedData(&_clock, _migrationId, ResumePhase::kDataSync); - auto cloner = makeDatabaseCloner(&dummySharedData); -} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_donor_service.cpp b/src/mongo/db/s/move_primary/move_primary_donor_service.cpp deleted file mode 100644 index ae82cda7b079d..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_donor_service.cpp +++ /dev/null @@ -1,776 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/move_primary/move_primary_donor_service.h" - -#include "mongo/db/persistent_task_store.h" -#include "mongo/db/s/move_primary/move_primary_recipient_cmds_gen.h" -#include "mongo/db/s/move_primary/move_primary_server_parameters_gen.h" -#include "mongo/db/s/sharding_state.h" -#include "mongo/s/grid.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kMovePrimary - -namespace mongo { -namespace { -// Both of these failpoints have the same implementation. A single failpoint can't be active -// multiple times with different arguments, but setting up more complex scenarios sometimes requires -// multiple failpoints. -MONGO_FAIL_POINT_DEFINE(pauseDuringMovePrimaryDonorStateEnumTransition); -MONGO_FAIL_POINT_DEFINE(pauseDuringMovePrimaryDonorStateEnumTransitionAlternate); - -MONGO_FAIL_POINT_DEFINE(pauseBeforeBeginningMovePrimaryDonorWorkflow); -MONGO_FAIL_POINT_DEFINE(pauseBeforeMovePrimaryDonorPersistsBlockTimestamp); -MONGO_FAIL_POINT_DEFINE(pauseBeforeBeginningMovePrimaryDonorCleanup); - -enum StateTransitionProgress { - kBefore, // Prior to any changes for state. - kPartial, // After updating on-disk state, but before updating in-memory state. - kAfter // After updating in-memory state. -}; - -const auto kProgressArgMap = [] { - return stdx::unordered_map{ - {"before", StateTransitionProgress::kBefore}, - {"partial", StateTransitionProgress::kPartial}, - {"after", StateTransitionProgress::kAfter}}; -}(); - -boost::optional readProgressArgument(const BSONObj& data) { - auto arg = data.getStringField("progress"); - auto it = kProgressArgMap.find(arg.toString()); - if (it == kProgressArgMap.end()) { - return boost::none; - } - return it->second; -} - -boost::optional readStateArgument(const BSONObj& data) { - try { - auto arg = data.getStringField("state"); - IDLParserContext ectx("pauseDuringMovePrimaryDonorStateEnumTransition::readStateArgument"); - return MovePrimaryDonorState_parse(ectx, arg); - } catch (...) { - return boost::none; - } -} - -void evaluatePauseDuringStateTransitionFailpoint(StateTransitionProgress progress, - MovePrimaryDonorStateEnum newState, - FailPoint& failpoint) { - failpoint.executeIf( - [&](const auto& data) { failpoint.pauseWhileSet(); }, - [&](const auto& data) { - auto desiredProgress = readProgressArgument(data); - auto desiredState = readStateArgument(data); - if (!desiredProgress.has_value() || !desiredState.has_value()) { - LOGV2(7306200, - "pauseDuringMovePrimaryDonorStateEnumTransition failpoint data must contain " - "progress and state arguments", - "failpoint"_attr = failpoint.getName(), - "data"_attr = data); - return false; - } - return *desiredProgress == progress && *desiredState == newState; - }); -} - -void evaluatePauseDuringStateTransitionFailpoints(StateTransitionProgress progress, - MovePrimaryDonorStateEnum newState) { - const auto fps = {std::ref(pauseDuringMovePrimaryDonorStateEnumTransition), - std::ref(pauseDuringMovePrimaryDonorStateEnumTransitionAlternate)}; - for (auto& fp : fps) { - evaluatePauseDuringStateTransitionFailpoint(progress, newState, fp); - } -} - -Status deserializeStatus(const BSONObj& bson) { - auto code = ErrorCodes::Error(bson["code"].numberInt()); - auto reason = bson["errmsg"].String(); - return Status{code, reason}; -} - -} // namespace - -MovePrimaryDonorService::MovePrimaryDonorService(ServiceContext* serviceContext) - : PrimaryOnlyService{serviceContext}, _serviceContext{serviceContext} {} - -StringData MovePrimaryDonorService::getServiceName() const { - return kServiceName; -} - -NamespaceString MovePrimaryDonorService::getStateDocumentsNS() const { - return NamespaceString::kMovePrimaryDonorNamespace; -} - -ThreadPool::Limits MovePrimaryDonorService::getThreadPoolLimits() const { - ThreadPool::Limits limits; - limits.minThreads = gMovePrimaryDonorServiceMinThreadCount; - limits.maxThreads = gMovePrimaryDonorServiceMaxThreadCount; - return limits; -} - -void MovePrimaryDonorService::checkIfConflictsWithOtherInstances( - OperationContext* opCtx, - BSONObj initialState, - const std::vector& existingInstances) { - auto initialDoc = MovePrimaryDonorDocument::parse( - IDLParserContext("MovePrimaryDonorCheckIfConflictsWithOtherInstances"), initialState); - const auto& newMetadata = initialDoc.getMetadata(); - for (const auto& instance : existingInstances) { - auto typed = checked_cast(instance); - const auto& existingMetadata = typed->getMetadata(); - uassert(ErrorCodes::ConflictingOperationInProgress, - str::stream() << "Existing movePrimary operation for database " - << newMetadata.getDatabaseName() << " is still ongoing", - newMetadata.getDatabaseName() != existingMetadata.getDatabaseName()); - } -} - -std::shared_ptr MovePrimaryDonorService::constructInstance( - BSONObj initialState) { - auto initialDoc = MovePrimaryDonorDocument::parse( - IDLParserContext("MovePrimaryDonorServiceConstructInstance"), initialState); - return std::make_shared(_serviceContext, - this, - initialDoc, - getInstanceCleanupExecutor(), - _makeDependencies(initialDoc)); -} - -std::vector> MovePrimaryDonorService::getAllDonorInstances( - OperationContext* opCtx) { - std::vector> result; - auto instances = getAllInstances(opCtx); - for (const auto& instance : instances) { - result.push_back(checked_pointer_cast(instance)); - } - return result; -} - -MovePrimaryDonorDependencies MovePrimaryDonorService::_makeDependencies( - const MovePrimaryDonorDocument& initialDoc) { - return {std::make_unique(initialDoc.getMetadata())}; -} - -MovePrimaryDonorCancelState::MovePrimaryDonorCancelState(const CancellationToken& stepdownToken) - : _stepdownToken{stepdownToken}, - _abortSource{stepdownToken}, - _abortToken{_abortSource.token()} {} - -const CancellationToken& MovePrimaryDonorCancelState::getStepdownToken() { - return _stepdownToken; -} - -const CancellationToken& MovePrimaryDonorCancelState::getAbortToken() { - return _abortToken; -} - -bool MovePrimaryDonorCancelState::isSteppingDown() const { - return _stepdownToken.isCanceled(); -} - -void MovePrimaryDonorCancelState::abort() { - _abortSource.cancel(); -} - -const Backoff MovePrimaryDonorRetryHelper::kBackoff{Seconds(1), Milliseconds::max()}; - -MovePrimaryDonorRetryHelper::MovePrimaryDonorRetryHelper( - const MovePrimaryCommonMetadata& metadata, - std::shared_ptr taskExecutor, - MovePrimaryDonorCancelState* cancelState) - : _metadata{metadata}, - _taskExecutor{taskExecutor}, - _markKilledExecutor{std::make_shared([] { - ThreadPool::Options options; - options.poolName = "MovePrimaryDonorRetryHelperCancelableOpCtxPool"; - options.minThreads = 1; - options.maxThreads = 1; - return options; - }())}, - _cancelState{cancelState}, - _cancelOnStepdownFactory{_cancelState->getStepdownToken(), _markKilledExecutor}, - _cancelOnAbortFactory{_cancelState->getAbortToken(), _markKilledExecutor} { - _markKilledExecutor->startup(); -} - -void MovePrimaryDonorRetryHelper::_handleTransientError(const std::string& operationName, - const Status& status) { - LOGV2(7306301, - "MovePrimaryDonor has encountered a transient error", - "operation"_attr = operationName, - "status"_attr = redact(status), - "migrationId"_attr = _metadata.getMigrationId(), - "databaseName"_attr = _metadata.getDatabaseName(), - "toShard"_attr = _metadata.getToShardName()); -} - -void MovePrimaryDonorRetryHelper::_handleUnrecoverableError(const std::string& operationName, - const Status& status) { - LOGV2(7306302, - "MovePrimaryDonor has encountered an unrecoverable error", - "operation"_attr = operationName, - "status"_attr = redact(status), - "migrationId"_attr = _metadata.getMigrationId(), - "databaseName"_attr = _metadata.getDatabaseName(), - "toShard"_attr = _metadata.getToShardName()); -} - -ExecutorFuture MovePrimaryDonorRetryHelper::_waitForMajorityOrStepdown( - const std::string& operationName) { - auto cancelToken = _cancelState->getStepdownToken(); - return _untilStepdownOrSuccess(operationName, [cancelToken](const auto& factory) { - auto opCtx = factory.makeOperationContext(&cc()); - auto client = opCtx->getClient(); - repl::ReplClientInfo::forClient(client).setLastOpToSystemLastOpTime(opCtx.get()); - auto opTime = repl::ReplClientInfo::forClient(client).getLastOp(); - return WaitForMajorityService::get(client->getServiceContext()) - .waitUntilMajority(opTime, cancelToken); - }); -} - -MovePrimaryDonorExternalState::MovePrimaryDonorExternalState( - const MovePrimaryCommonMetadata& metadata) - : _metadata{metadata} {} - - -const MovePrimaryCommonMetadata& MovePrimaryDonorExternalState::getMetadata() const { - return _metadata; -} - -ShardId MovePrimaryDonorExternalState::getRecipientShardId() const { - return ShardId{_metadata.getToShardName().toString()}; -} - -void MovePrimaryDonorExternalState::syncDataOnRecipient(OperationContext* opCtx) { - syncDataOnRecipient(opCtx, boost::none); -} - -void MovePrimaryDonorExternalState::syncDataOnRecipient(OperationContext* opCtx, - boost::optional timestamp) { - MovePrimaryRecipientSyncData request; - request.setMovePrimaryCommonMetadata(getMetadata()); - request.setDbName(getMetadata().getDatabaseName().db()); - if (timestamp) { - request.setReturnAfterReachingDonorTimestamp(*timestamp); - } - _runCommandOnRecipient(opCtx, request.toBSON({})); -} - -void MovePrimaryDonorExternalState::abortMigrationOnRecipient(OperationContext* opCtx) { - MovePrimaryRecipientAbortMigration request; - request.setMovePrimaryCommonMetadata(getMetadata()); - request.setDbName(getMetadata().getDatabaseName().db()); - _runCommandOnRecipient(opCtx, request.toBSON({})); -} - -void MovePrimaryDonorExternalState::forgetMigrationOnRecipient(OperationContext* opCtx) { - MovePrimaryRecipientForgetMigration request; - request.setMovePrimaryCommonMetadata(getMetadata()); - request.setDbName(getMetadata().getDatabaseName().db()); - _runCommandOnRecipient(opCtx, request.toBSON({})); -} - -void MovePrimaryDonorExternalState::_runCommandOnRecipient(OperationContext* opCtx, - const BSONObj& command) { - auto response = runCommand(opCtx, - getRecipientShardId(), - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - DatabaseName::kAdmin.toString(), - command, - Shard::RetryPolicy::kNoRetry); - uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(response)); -} - -MovePrimaryDonorExternalStateImpl::MovePrimaryDonorExternalStateImpl( - const MovePrimaryCommonMetadata& metadata) - : MovePrimaryDonorExternalState{metadata} {} - -StatusWith MovePrimaryDonorExternalStateImpl::runCommand( - OperationContext* opCtx, - const ShardId& shardId, - const ReadPreferenceSetting& readPref, - const std::string& dbName, - const BSONObj& cmdObj, - Shard::RetryPolicy retryPolicy) { - auto shard = uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId)); - return shard->runCommand(opCtx, readPref, dbName, cmdObj, retryPolicy); -} - -std::shared_ptr MovePrimaryDonor::get(OperationContext* opCtx, - const DatabaseName& dbName, - const ShardId& toShard) { - auto registry = repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()); - auto service = checked_cast( - registry->lookupServiceByName(MovePrimaryDonorService::kServiceName)); - auto instances = service->getAllDonorInstances(opCtx); - for (const auto& instance : instances) { - if (_matchesArguments(instance, dbName, toShard)) { - return instance; - } - } - return nullptr; -} - -std::shared_ptr MovePrimaryDonor::create(OperationContext* opCtx, - const DatabaseName& dbName, - const ShardId& toShard) { - auto registry = repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()); - auto service = registry->lookupServiceByName(MovePrimaryDonorService::kServiceName); - MovePrimaryCommonMetadata metadata; - metadata.setMigrationId(UUID::gen()); - metadata.setDatabaseName(NamespaceString{dbName.toString()}); - metadata.setFromShardName(ShardingState::get(opCtx)->shardId()); - metadata.setToShardName(toShard.toString()); - MovePrimaryDonorDocument document; - document.setId(metadata.getMigrationId()); - document.setMetadata(std::move(metadata)); - auto donor = MovePrimaryDonor::getOrCreate(opCtx, service, document.toBSON()); - uassert(7309100, - "Unable to create MovePrimaryDonor using the following initial state: {}"_format( - redact(document.toBSON()).toString()), - donor); - return donor; -} - -bool MovePrimaryDonor::_matchesArguments(const std::shared_ptr& instance, - const DatabaseName& dbName, - const ShardId& toShard) { - const auto& metadata = instance->getMetadata(); - if (dbName != metadata.getDatabaseName().db()) { - return false; - } - if (toShard.toString() != metadata.getToShardName()) { - return false; - } - return true; -} - -MovePrimaryDonor::MovePrimaryDonor(ServiceContext* serviceContext, - MovePrimaryDonorService* donorService, - MovePrimaryDonorDocument initialState, - const std::shared_ptr& cleanupExecutor, - MovePrimaryDonorDependencies dependencies) - : _serviceContext{serviceContext}, - _donorService{donorService}, - _metadata{std::move(initialState.getMetadata())}, - _mutableFields{std::move(initialState.getMutableFields())}, - _metrics{MovePrimaryMetrics::initializeFrom(initialState, _serviceContext)}, - _cleanupExecutor{cleanupExecutor}, - _externalState{std::move(dependencies.externalState)} { - if (auto abortReason = _mutableFields.getAbortReason()) { - _abortReason = deserializeStatus(abortReason->getOwned()); - } - _metrics->onStateTransition(boost::none, _getCurrentState()); -} - -SemiFuture MovePrimaryDonor::run(std::shared_ptr executor, - const CancellationToken& stepdownToken) noexcept { - _initializeRun(executor, stepdownToken); - _completionPromise.setFrom( - _runDonorWorkflow().unsafeToInlineFuture().tapError([](const Status& status) { - LOGV2(7306201, "MovePrimaryDonor encountered an error", "error"_attr = redact(status)); - })); - return _completionPromise.getFuture().semi(); -} - -void MovePrimaryDonor::interrupt(Status status) {} - -boost::optional MovePrimaryDonor::reportForCurrentOp( - MongoProcessInterface::CurrentOpConnectionsMode connMode, - MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept { - return _metrics->reportForCurrentOp(); -} - -void MovePrimaryDonor::checkIfOptionsConflict(const BSONObj& stateDoc) const { - auto otherDoc = MovePrimaryDonorDocument::parse( - IDLParserContext("MovePrimaryDonorCheckIfOptionsConflict"), stateDoc); - const auto& otherMetadata = otherDoc.getMetadata(); - const auto& metadata = getMetadata(); - invariant(metadata.getMigrationId() == otherMetadata.getMigrationId()); - uassert(ErrorCodes::ConflictingOperationInProgress, - "Existing movePrimary operation exists with same id, but incompatible arguments", - metadata.getDatabaseName() == otherMetadata.getDatabaseName() && - metadata.getToShardName() == otherMetadata.getToShardName()); -} - -const MovePrimaryCommonMetadata& MovePrimaryDonor::getMetadata() const { - return _metadata; -} - -SharedSemiFuture MovePrimaryDonor::getReadyToBlockWritesFuture() const { - return _progressedToReadyToBlockWritesPromise.getFuture(); -} - -SharedSemiFuture MovePrimaryDonor::getDecisionFuture() const { - return _progressedToDecisionPromise.getFuture(); -} - -SharedSemiFuture MovePrimaryDonor::getCompletionFuture() const { - return _completionPromise.getFuture(); -} - -MovePrimaryDonorStateEnum MovePrimaryDonor::_getCurrentState() const { - stdx::unique_lock lock(_mutex); - return _mutableFields.getState(); -} - -MovePrimaryDonorMutableFields MovePrimaryDonor::_getMutableFields() const { - stdx::unique_lock lock(_mutex); - return _mutableFields; -} - -bool MovePrimaryDonor::_isAborted(WithLock) const { - return _abortReason.has_value(); -} - -boost::optional MovePrimaryDonor::_getAbortReason() const { - stdx::unique_lock lock(_mutex); - return _abortReason; -} - -Status MovePrimaryDonor::_getOperationStatus() const { - return _getAbortReason().value_or(Status::OK()); -} - -MovePrimaryDonorDocument MovePrimaryDonor::_buildCurrentStateDocument() const { - MovePrimaryDonorDocument doc; - doc.setMetadata(getMetadata()); - doc.setMutableFields(_getMutableFields()); - doc.setId(getMetadata().getMigrationId()); - return doc; -} - -void MovePrimaryDonor::_initializeRun(std::shared_ptr executor, - const CancellationToken& stepdownToken) { - stdx::unique_lock lock(_mutex); - _taskExecutor = executor; - _cancelState.emplace(stepdownToken); - _retry.emplace(_metadata, _taskExecutor, _cancelState.get_ptr()); - if (_isAborted(lock)) { - _cancelState->abort(); - } -} - -ExecutorFuture MovePrimaryDonor::_runDonorWorkflow() { - using State = MovePrimaryDonorStateEnum; - return _runOnTaskExecutor([] { pauseBeforeBeginningMovePrimaryDonorWorkflow.pauseWhileSet(); }) - .then([this] { return _transitionToState(State::kInitializing); }) - .then([this] { return _doInitializing(); }) - .then([this] { return _transitionToState(State::kCloning); }) - .then([this] { return _doCloning(); }) - .then([this] { return _transitionToState(State::kWaitingToBlockWrites); }) - .then([this] { return _doWaitingToBlockWrites(); }) - .then([this] { return _transitionToState(State::kBlockingWrites); }) - .then([this] { return _doBlockingWrites(); }) - .then([this] { return _transitionToState(State::kPrepared); }) - .onCompletion([this](Status result) { - if (result.isOK()) { - return _doPrepared(); - } - abort(result); - return _ensureAbortReasonSetInStateDocument() - .then([this] { return _transitionToState(State::kAborted); }) - .then([this] { return _doAbort(); }); - }) - .then([this] { return _waitForForgetThenDoCleanup(); }) - .thenRunOn(_cleanupExecutor) - .onCompletion([this, self = shared_from_this()](Status okOrStepdownError) { - bool steppingDown = - _cancelState->isSteppingDown() || (**_taskExecutor)->isShuttingDown(); - invariant(okOrStepdownError.isOK() || steppingDown); - const auto& finalResult = steppingDown ? okOrStepdownError : _getOperationStatus(); - _ensureProgressPromisesAreFulfilled(finalResult); - return finalResult; - }); -} - -bool MovePrimaryDonor::_allowedToAbortDuringStateTransition( - MovePrimaryDonorStateEnum newState) const { - switch (newState) { - case MovePrimaryDonorStateEnum::kAborted: - case MovePrimaryDonorStateEnum::kDone: - return false; - default: - return true; - } -} - -ExecutorFuture MovePrimaryDonor::_transitionToState(MovePrimaryDonorStateEnum newState) { - auto op = fmt::format("transitionToState({})", MovePrimaryDonorState_serializer(newState)); - auto action = [this, newState](const auto& factory) { - auto opCtx = factory.makeOperationContext(&cc()); - _tryTransitionToStateOnce(opCtx.get(), newState); - }; - if (_allowedToAbortDuringStateTransition(newState)) { - return _retry->untilAbortOrMajorityCommit(op, action); - } - return _retry->untilStepdownOrMajorityCommit(op, action); -} - -void MovePrimaryDonor::_tryTransitionToStateOnce(OperationContext* opCtx, - MovePrimaryDonorStateEnum newState) { - auto oldState = _getCurrentState(); - if (oldState >= newState) { - return; - } - auto newDocument = _buildCurrentStateDocument(); - newDocument.getMutableFields().setState(newState); - evaluatePauseDuringStateTransitionFailpoints(StateTransitionProgress::kBefore, newState); - - _updateOnDiskState(opCtx, newDocument); - - evaluatePauseDuringStateTransitionFailpoints(StateTransitionProgress::kPartial, newState); - - _updateInMemoryState(newDocument); - _metrics->onStateTransition(oldState, newState); - - LOGV2(7306300, - "MovePrimaryDonor transitioned state", - "oldState"_attr = MovePrimaryDonorState_serializer(oldState), - "newState"_attr = MovePrimaryDonorState_serializer(newState), - "migrationId"_attr = _metadata.getMigrationId(), - "databaseName"_attr = _metadata.getDatabaseName(), - "toShard"_attr = _metadata.getToShardName()); - - evaluatePauseDuringStateTransitionFailpoints(StateTransitionProgress::kAfter, newState); -} - -void MovePrimaryDonor::_updateOnDiskState(OperationContext* opCtx, - const MovePrimaryDonorDocument& newStateDocument) { - PersistentTaskStore store(_donorService->getStateDocumentsNS()); - auto oldState = _getCurrentState(); - auto newState = newStateDocument.getMutableFields().getState(); - if (oldState == MovePrimaryDonorStateEnum::kUnused) { - store.add(opCtx, newStateDocument, WriteConcerns::kLocalWriteConcern); - } else if (newState == MovePrimaryDonorStateEnum::kDone) { - store.remove(opCtx, - BSON(MovePrimaryDonorDocument::kIdFieldName << _metadata.getMigrationId()), - WriteConcerns::kLocalWriteConcern); - } else { - store.update(opCtx, - BSON(MovePrimaryDonorDocument::kIdFieldName << _metadata.getMigrationId()), - BSON("$set" << BSON(MovePrimaryDonorDocument::kMutableFieldsFieldName - << newStateDocument.getMutableFields().toBSON())), - WriteConcerns::kLocalWriteConcern); - } -} - -void MovePrimaryDonor::_updateInMemoryState(const MovePrimaryDonorDocument& newStateDocument) { - stdx::unique_lock lock(_mutex); - _mutableFields = newStateDocument.getMutableFields(); -} - -ExecutorFuture MovePrimaryDonor::_doInitializing() { - return _retry->untilAbortOrMajorityCommit("doInitializing()", [](const auto& factory) { - // TODO: SERVER-74757 - }); -} - -ExecutorFuture MovePrimaryDonor::_doNothing() { - return _runOnTaskExecutor([] {}); -} - -ExecutorFuture MovePrimaryDonor::_doCloning() { - if (_getCurrentState() > MovePrimaryDonorStateEnum::kCloning) { - return _doNothing(); - } - return _retry->untilAbortOrMajorityCommit("doCloning()", [this](const auto& factory) { - auto opCtx = factory.makeOperationContext(&cc()); - _externalState->syncDataOnRecipient(opCtx.get()); - }); -} - -ExecutorFuture MovePrimaryDonor::_doWaitingToBlockWrites() { - if (_getCurrentState() > MovePrimaryDonorStateEnum::kWaitingToBlockWrites) { - return _doNothing(); - } - return _waitUntilReadyToBlockWrites() - .then([this] { return _waitUntilCurrentlyBlockingWrites(); }) - .then([this](Timestamp blockingWritesTimestamp) { - pauseBeforeMovePrimaryDonorPersistsBlockTimestamp.pauseWhileSet(); - return _persistBlockingWritesTimestamp(blockingWritesTimestamp); - }); -} - -ExecutorFuture MovePrimaryDonor::_waitUntilReadyToBlockWrites() { - return _runOnTaskExecutor([this] { - // TODO SERVER-74933: Use commit monitor to determine when to engage critical section. - LOGV2(7306500, - "MovePrimaryDonor ready to block writes", - "migrationId"_attr = _metadata.getMigrationId(), - "databaseName"_attr = _metadata.getDatabaseName(), - "toShard"_attr = _metadata.getToShardName()); - - _progressedToReadyToBlockWritesPromise.setFrom(Status::OK()); - }); -} - -ExecutorFuture MovePrimaryDonor::_waitUntilCurrentlyBlockingWrites() { - return _runOnTaskExecutor([this] { - return future_util::withCancellation(_currentlyBlockingWritesPromise.getFuture(), - _cancelState->getAbortToken()); - }); -} - -void MovePrimaryDonor::onBeganBlockingWrites(StatusWith blockingWritesTimestamp) { - _currentlyBlockingWritesPromise.setFrom(blockingWritesTimestamp); -} - -void MovePrimaryDonor::onReadyToForget() { - stdx::unique_lock lock(_mutex); - if (_readyToForgetPromise.getFuture().isReady()) { - return; - } - _readyToForgetPromise.setFrom(Status::OK()); -} - -void MovePrimaryDonor::abort(Status reason) { - invariant(!reason.isOK()); - stdx::unique_lock lock(_mutex); - if (_isAborted(lock)) { - return; - } - - _abortReason = reason; - if (_cancelState) { - _cancelState->abort(); - } - - LOGV2(7306700, - "MovePrimaryDonor has received signal to abort", - "reason"_attr = redact(reason), - "migrationId"_attr = _metadata.getMigrationId(), - "databaseName"_attr = _metadata.getDatabaseName(), - "toShard"_attr = _metadata.getToShardName()); -} - -bool MovePrimaryDonor::isAborted() const { - stdx::unique_lock lock(_mutex); - return _isAborted(lock); -} - -ExecutorFuture MovePrimaryDonor::_persistBlockingWritesTimestamp( - Timestamp blockingWritesTimestamp) { - return _retry->untilAbortOrMajorityCommit( - fmt::format("persistBlockingWritesTimestamp({})", blockingWritesTimestamp.toString()), - [this, blockingWritesTimestamp](const auto& factory) { - auto opCtx = factory.makeOperationContext(&cc()); - auto newStateDocument = _buildCurrentStateDocument(); - newStateDocument.getMutableFields().setBlockingWritesTimestamp(blockingWritesTimestamp); - _updateOnDiskState(opCtx.get(), newStateDocument); - _updateInMemoryState(newStateDocument); - - LOGV2(7306501, - "MovePrimaryDonor persisted block timestamp", - "blockingWritesTimestamp"_attr = blockingWritesTimestamp, - "migrationId"_attr = _metadata.getMigrationId(), - "databaseName"_attr = _metadata.getDatabaseName(), - "toShard"_attr = _metadata.getToShardName()); - }); -} - -ExecutorFuture MovePrimaryDonor::_doBlockingWrites() { - if (_getCurrentState() > MovePrimaryDonorStateEnum::kBlockingWrites) { - return _doNothing(); - } - return _retry->untilAbortOrMajorityCommit("doBlockingWrites()", [this](const auto& factory) { - auto opCtx = factory.makeOperationContext(&cc()); - auto timestamp = _getMutableFields().getBlockingWritesTimestamp(); - invariant(timestamp); - _externalState->syncDataOnRecipient(opCtx.get(), *timestamp); - }); -} - -ExecutorFuture MovePrimaryDonor::_doPrepared() { - return _runOnTaskExecutor([this] { _progressedToDecisionPromise.setFrom(Status::OK()); }); -} - -ExecutorFuture MovePrimaryDonor::_waitForForgetThenDoCleanup() { - return _runOnTaskExecutor([this] { - return future_util::withCancellation(_readyToForgetPromise.getFuture(), - _cancelState->getStepdownToken()); - }) - .then([this] { pauseBeforeBeginningMovePrimaryDonorCleanup.pauseWhileSet(); }) - .then([this] { return _doCleanup(); }) - .then([this] { return _transitionToState(MovePrimaryDonorStateEnum::kDone); }); -} - -ExecutorFuture MovePrimaryDonor::_doCleanup() { - return _doAbortIfRequired().then([this] { return _doForget(); }); -} - -ExecutorFuture MovePrimaryDonor::_doAbortIfRequired() { - if (!isAborted()) { - return _doNothing(); - } - return _doAbort(); -} - -ExecutorFuture MovePrimaryDonor::_ensureAbortReasonSetInStateDocument() { - return _runOnTaskExecutor([this] { - auto doc = _buildCurrentStateDocument(); - if (doc.getMutableFields().getAbortReason()) { - return; - } - auto reason = _getAbortReason(); - invariant(reason); - BSONObjBuilder bob; - reason->serializeErrorToBSON(&bob); - doc.getMutableFields().setAbortReason(bob.obj()); - _updateInMemoryState(doc); - }); -} - -ExecutorFuture MovePrimaryDonor::_doAbort() { - return _retry->untilStepdownOrMajorityCommit("doAbort()", [this](const auto& factory) { - _ensureProgressPromisesAreFulfilled(_getOperationStatus()); - auto opCtx = factory.makeOperationContext(&cc()); - _externalState->abortMigrationOnRecipient(opCtx.get()); - }); -} - -ExecutorFuture MovePrimaryDonor::_doForget() { - return _retry->untilStepdownOrMajorityCommit("doForget()", [this](const auto& factory) { - auto opCtx = factory.makeOperationContext(&cc()); - _externalState->forgetMigrationOnRecipient(opCtx.get()); - }); -} - -void MovePrimaryDonor::_ensureProgressPromisesAreFulfilled(Status result) { - if (!_progressedToReadyToBlockWritesPromise.getFuture().isReady()) { - _progressedToReadyToBlockWritesPromise.setFrom(result); - } - if (!_progressedToDecisionPromise.getFuture().isReady()) { - _progressedToDecisionPromise.setFrom(result); - } -} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_donor_service.h b/src/mongo/db/s/move_primary/move_primary_donor_service.h deleted file mode 100644 index 3236334e7b187..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_donor_service.h +++ /dev/null @@ -1,329 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/repl/primary_only_service.h" -#include "mongo/db/repl/wait_for_majority_service.h" -#include "mongo/db/s/move_primary/move_primary_metrics.h" -#include "mongo/db/s/move_primary/move_primary_state_machine_gen.h" -#include "mongo/db/s/resharding/resharding_future_util.h" -#include "mongo/s/client/shard.h" - -namespace mongo { - -class MovePrimaryDonor; -struct MovePrimaryDonorDependencies; - -class MovePrimaryDonorService : public repl::PrimaryOnlyService { -public: - static constexpr StringData kServiceName = "MovePrimaryDonorService"_sd; - - MovePrimaryDonorService(ServiceContext* serviceContext); - - StringData getServiceName() const override; - - NamespaceString getStateDocumentsNS() const override; - - ThreadPool::Limits getThreadPoolLimits() const override; - - void checkIfConflictsWithOtherInstances( - OperationContext* opCtx, - BSONObj initialState, - const std::vector& existingInstances) override; - - std::shared_ptr constructInstance(BSONObj initialState) override; - - std::vector> getAllDonorInstances(OperationContext* opCtx); - -protected: - virtual MovePrimaryDonorDependencies _makeDependencies( - const MovePrimaryDonorDocument& initialDoc); - -private: - ServiceContext* _serviceContext; -}; - -class MovePrimaryDonorCancelState { -public: - MovePrimaryDonorCancelState(const CancellationToken& stepdownToken); - const CancellationToken& getStepdownToken(); - const CancellationToken& getAbortToken(); - bool isSteppingDown() const; - void abort(); - -private: - CancellationToken _stepdownToken; - CancellationSource _abortSource; - CancellationToken _abortToken; -}; - -// Retries indefinitely unless this node is stepping down. Intended to be used with -// resharding::RetryingCancelableOperationContextFactory for cases where an operation failure is not -// allowed to abort the task being performed by a PrimaryOnlyService (e.g. because that -// task is already aborted, or past the point where aborts are allowed). -template -class [[nodiscard]] IndefiniteRetryProvider { -public: - explicit IndefiniteRetryProvider(BodyCallable&& body) : _body{std::move(body)} {} - - template - auto until(Predicate&& predicate) && { - using StatusType = - decltype(std::declval>>().getNoThrow()); - static_assert( - std::is_invocable_r_v, - "Predicate to until() must implement call operator accepting Status or StatusWith " - "type that would be returned by this class's BodyCallable, and must return bool"); - return AsyncTry(std::move(_body)) - .until([predicate = std::move(predicate)](const auto& statusLike) { - return predicate(statusLike); - }); - } - -private: - BodyCallable _body; -}; -class MovePrimaryDonorRetryHelper { -public: - MovePrimaryDonorRetryHelper(const MovePrimaryCommonMetadata& metadata, - std::shared_ptr executor, - MovePrimaryDonorCancelState* cancelState); - - template - auto untilStepdownOrMajorityCommit(const std::string& operationName, Fn&& fn) { - return _untilStepdownOrSuccess(operationName, std::forward(fn)) - .then([this, operationName] { return _waitForMajorityOrStepdown(operationName); }); - } - - template - auto untilAbortOrMajorityCommit(const std::string& operationName, Fn&& fn) { - return _untilAbortOrSuccess(operationName, std::forward(fn)) - .then([this, operationName] { return _waitForMajorityOrStepdown(operationName); }); - } - -private: - template - auto _untilStepdownOrSuccess(const std::string& operationName, Fn&& fn) { - return _cancelOnStepdownFactory - .withAutomaticRetry(std::forward(fn)) - .until([this, operationName](const auto& statusLike) { - if (!statusLike.isOK()) { - _handleTransientError(operationName, statusLike); - } - return statusLike.isOK(); - }) - .withBackoffBetweenIterations(kBackoff) - .on(**_taskExecutor, _cancelState->getStepdownToken()); - } - - template - auto _untilAbortOrSuccess(const std::string& operationName, Fn&& fn) { - using FuturizedResultType = - FutureContinuationResult; - using StatusifiedResultType = - decltype(std::declval>().getNoThrow()); - return _cancelOnAbortFactory.withAutomaticRetry(std::forward(fn)) - .onTransientError([this, operationName](const Status& status) { - _handleTransientError(operationName, status); - }) - .onUnrecoverableError([this, operationName](const Status& status) { - _handleUnrecoverableError(operationName, status); - }) - .template until( - [](const auto& statusLike) { return statusLike.isOK(); }) - .withBackoffBetweenIterations(kBackoff) - .on(**_taskExecutor, _cancelState->getAbortToken()); - } - - void _handleTransientError(const std::string& operationName, const Status& status); - void _handleUnrecoverableError(const std::string& operationName, const Status& status); - ExecutorFuture _waitForMajorityOrStepdown(const std::string& operationName); - - const static Backoff kBackoff; - - const MovePrimaryCommonMetadata _metadata; - std::shared_ptr _taskExecutor; - std::shared_ptr _markKilledExecutor; - MovePrimaryDonorCancelState* _cancelState; - resharding::RetryingCancelableOperationContextFactory _cancelOnStepdownFactory; - resharding::RetryingCancelableOperationContextFactory _cancelOnAbortFactory; -}; - -class MovePrimaryDonorExternalState { -public: - MovePrimaryDonorExternalState(const MovePrimaryCommonMetadata& metadata); - virtual ~MovePrimaryDonorExternalState() = default; - - void syncDataOnRecipient(OperationContext* opCtx); - void syncDataOnRecipient(OperationContext* opCtx, boost::optional timestamp); - void abortMigrationOnRecipient(OperationContext* opCtx); - void forgetMigrationOnRecipient(OperationContext* opCtx); - -protected: - virtual StatusWith runCommand(OperationContext* opCtx, - const ShardId& shardId, - const ReadPreferenceSetting& readPref, - const std::string& dbName, - const BSONObj& cmdObj, - Shard::RetryPolicy retryPolicy) = 0; - - const MovePrimaryCommonMetadata& getMetadata() const; - ShardId getRecipientShardId() const; - -private: - void _runCommandOnRecipient(OperationContext* opCtx, const BSONObj& command); - - MovePrimaryCommonMetadata _metadata; -}; - -class MovePrimaryDonorExternalStateImpl : public MovePrimaryDonorExternalState { -public: - MovePrimaryDonorExternalStateImpl(const MovePrimaryCommonMetadata& metadata); - -protected: - StatusWith runCommand(OperationContext* opCtx, - const ShardId& shardId, - const ReadPreferenceSetting& readPref, - const std::string& dbName, - const BSONObj& cmdObj, - Shard::RetryPolicy retryPolicy) override; -}; - -struct MovePrimaryDonorDependencies { - std::unique_ptr externalState; -}; - -class MovePrimaryDonor : public repl::PrimaryOnlyService::TypedInstance { -public: - static std::shared_ptr get(OperationContext* opCtx, - const DatabaseName& dbName, - const ShardId& toShard); - static std::shared_ptr create(OperationContext* opCtx, - const DatabaseName& dbName, - const ShardId& toShard); - - MovePrimaryDonor(ServiceContext* serviceContext, - MovePrimaryDonorService* donorService, - MovePrimaryDonorDocument initialState, - const std::shared_ptr& cleanupExecutor, - MovePrimaryDonorDependencies dependencies); - - SemiFuture run(std::shared_ptr executor, - const CancellationToken& stepdownToken) noexcept override; - - void interrupt(Status status) override; - - boost::optional reportForCurrentOp( - MongoProcessInterface::CurrentOpConnectionsMode connMode, - MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept override; - - void checkIfOptionsConflict(const BSONObj& stateDoc) const override; - - const MovePrimaryCommonMetadata& getMetadata() const; - - void onBeganBlockingWrites(StatusWith blockingWritesTimestamp); - void onReadyToForget(); - - void abort(Status reason); - bool isAborted() const; - - SharedSemiFuture getReadyToBlockWritesFuture() const; - SharedSemiFuture getDecisionFuture() const; - SharedSemiFuture getCompletionFuture() const; - -private: - static bool _matchesArguments(const std::shared_ptr& instance, - const DatabaseName& dbName, - const ShardId& toShard); - - MovePrimaryDonorStateEnum _getCurrentState() const; - MovePrimaryDonorMutableFields _getMutableFields() const; - bool _isAborted(WithLock) const; - boost::optional _getAbortReason() const; - Status _getOperationStatus() const; - MovePrimaryDonorDocument _buildCurrentStateDocument() const; - - void _initializeRun(std::shared_ptr executor, - const CancellationToken& stepdownToken); - ExecutorFuture _runDonorWorkflow(); - ExecutorFuture _transitionToState(MovePrimaryDonorStateEnum state); - ExecutorFuture _doNothing(); - ExecutorFuture _doInitializing(); - ExecutorFuture _doCloning(); - ExecutorFuture _doWaitingToBlockWrites(); - ExecutorFuture _doBlockingWrites(); - ExecutorFuture _waitUntilReadyToBlockWrites(); - ExecutorFuture _waitUntilCurrentlyBlockingWrites(); - ExecutorFuture _persistBlockingWritesTimestamp(Timestamp blockingWritesTimestamp); - ExecutorFuture _doPrepared(); - ExecutorFuture _waitForForgetThenDoCleanup(); - ExecutorFuture _doCleanup(); - ExecutorFuture _doAbortIfRequired(); - ExecutorFuture _ensureAbortReasonSetInStateDocument(); - ExecutorFuture _doAbort(); - ExecutorFuture _doForget(); - bool _allowedToAbortDuringStateTransition(MovePrimaryDonorStateEnum newState) const; - void _tryTransitionToStateOnce(OperationContext* opCtx, MovePrimaryDonorStateEnum newState); - void _updateOnDiskState(OperationContext* opCtx, - const MovePrimaryDonorDocument& newStateDocument); - void _updateInMemoryState(const MovePrimaryDonorDocument& newStateDocument); - void _ensureProgressPromisesAreFulfilled(Status result); - - template - auto _runOnTaskExecutor(Fn&& fn) { - return ExecutorFuture(**_taskExecutor).then(std::forward(fn)); - } - - mutable Mutex _mutex = MONGO_MAKE_LATCH("MovePrimaryDonor::_mutex"); - ServiceContext* _serviceContext; - MovePrimaryDonorService* const _donorService; - const MovePrimaryCommonMetadata _metadata; - - boost::optional _abortReason; - MovePrimaryDonorMutableFields _mutableFields; - std::unique_ptr _metrics; - - std::shared_ptr _cleanupExecutor; - std::shared_ptr _taskExecutor; - boost::optional _cancelState; - boost::optional _retry; - - std::unique_ptr _externalState; - - SharedPromise _progressedToReadyToBlockWritesPromise; - SharedPromise _progressedToDecisionPromise; - - SharedPromise _currentlyBlockingWritesPromise; - SharedPromise _readyToForgetPromise; - - SharedPromise _completionPromise; -}; - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_donor_service_test.cpp b/src/mongo/db/s/move_primary/move_primary_donor_service_test.cpp deleted file mode 100644 index 56442402256f6..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_donor_service_test.cpp +++ /dev/null @@ -1,969 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/repl/primary_only_service_test_fixture.h" -#include "mongo/db/s/move_primary/move_primary_donor_service.h" -#include "mongo/db/s/move_primary/move_primary_recipient_cmds_gen.h" -#include "mongo/logv2/log.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - -namespace mongo { -namespace { - -const auto kDatabaseName = NamespaceString{"testDb"}; -constexpr auto kOldPrimaryShardName = "oldPrimaryId"; -constexpr auto kNewPrimaryShardName = "newPrimaryId"; -const StatusWith kOkResponse = - Shard::CommandResponse{boost::none, BSON("ok" << 1), Status::OK(), Status::OK()}; -const Status kRetryableError{ErrorCodes::Interrupted, "Interrupted"}; -const Status kUnrecoverableError{ErrorCodes::UnknownError, "Something bad happened"}; -const Status kAbortedError{ErrorCodes::MovePrimaryAborted, "MovePrimary aborted"}; - -struct CommandDetails { - CommandDetails(const ShardId& shardId, - const ReadPreferenceSetting& readPreference, - const std::string& databaseName, - const BSONObj& command, - Shard::RetryPolicy retryPolicy) - : shardId{shardId}, - readPreference{readPreference}, - databaseName{databaseName}, - command{command.getOwned()}, - retryPolicy{retryPolicy} {} - - ShardId shardId; - ReadPreferenceSetting readPreference; - std::string databaseName; - BSONObj command; - Shard::RetryPolicy retryPolicy; -}; - -class FakeCommandRunner { -public: - StatusWith runCommand(OperationContext* opCtx, - const ShardId& shardId, - const ReadPreferenceSetting& readPref, - const std::string& dbName, - const BSONObj& cmdObj, - Shard::RetryPolicy retryPolicy) { - stdx::unique_lock lock{_mutex}; - _commandHistory.emplace_back(shardId, readPref, dbName, cmdObj, retryPolicy); - if (_nextResponses.empty()) { - return kOkResponse; - } - auto response = _nextResponses.front(); - _nextResponses.pop_front(); - return response; - } - - boost::optional getLastCommandDetails() { - stdx::unique_lock lock{_mutex}; - if (_commandHistory.empty()) { - return boost::none; - } - return _commandHistory.back(); - } - - const std::list& getCommandHistory() const { - stdx::unique_lock lock{_mutex}; - return _commandHistory; - } - - size_t getCommandsRunCount() const { - stdx::unique_lock lock{_mutex}; - return _commandHistory.size(); - } - - void addNextResponse(StatusWith response) { - stdx::unique_lock lock{_mutex}; - _nextResponses.push_back(std::move(response)); - } - -private: - mutable Mutex _mutex; - std::list> _nextResponses; - std::list _commandHistory; -}; - -class MovePrimaryDonorExternalStateForTest : public MovePrimaryDonorExternalState { -public: - MovePrimaryDonorExternalStateForTest(const MovePrimaryCommonMetadata& metadata, - const std::shared_ptr& commandRunner) - : MovePrimaryDonorExternalState{metadata}, _commandRunner{commandRunner} {} - -protected: - StatusWith runCommand(OperationContext* opCtx, - const ShardId& shardId, - const ReadPreferenceSetting& readPref, - const std::string& dbName, - const BSONObj& cmdObj, - Shard::RetryPolicy retryPolicy) { - return _commandRunner->runCommand(opCtx, shardId, readPref, dbName, cmdObj, retryPolicy); - }; - -private: - std::shared_ptr _commandRunner; -}; - -class MovePrimaryDonorServiceForTest : public MovePrimaryDonorService { -public: - MovePrimaryDonorServiceForTest(ServiceContext* serviceContext, - const std::shared_ptr& commandRunner) - : MovePrimaryDonorService{serviceContext}, _commandRunner{commandRunner} {} - -protected: - virtual MovePrimaryDonorDependencies _makeDependencies( - const MovePrimaryDonorDocument& initialDoc) override { - return {std::make_unique(initialDoc.getMetadata(), - _commandRunner)}; - } - -private: - std::shared_ptr _commandRunner; -}; - -class MovePrimaryDonorServiceTest : public repl::PrimaryOnlyServiceMongoDTest { -protected: - using DonorInstance = MovePrimaryDonor; - - MovePrimaryDonorServiceTest() : _commandRunner{std::make_shared()} {} - - FakeCommandRunner& getCommandRunner() { - return *_commandRunner; - } - - auto getCurrentTimestamp() { - return Timestamp{getServiceContext()->getFastClockSource()->now()}; - } - - std::unique_ptr makeService(ServiceContext* serviceContext) override { - return std::make_unique(serviceContext, _commandRunner); - } - - MovePrimaryCommonMetadata createMetadata() const { - MovePrimaryCommonMetadata metadata; - metadata.setMigrationId(UUID::gen()); - metadata.setDatabaseName(kDatabaseName); - metadata.setFromShardName(kOldPrimaryShardName); - metadata.setToShardName(kNewPrimaryShardName); - return metadata; - } - - MovePrimaryDonorDocument createStateDocument() const { - MovePrimaryDonorDocument doc; - auto metadata = createMetadata(); - doc.setMetadata(metadata); - doc.setId(metadata.getMigrationId()); - return doc; - } - - MovePrimaryDonorDocument getStateDocumentOnDisk(OperationContext* opCtx, UUID instanceId) { - DBDirectClient client(opCtx); - auto doc = client.findOne(NamespaceString::kMovePrimaryDonorNamespace, - BSON(MovePrimaryDonorDocument::kIdFieldName << instanceId)); - IDLParserContext errCtx("MovePrimaryDonorServiceTest::getStateDocumentOnDisk()"); - return MovePrimaryDonorDocument::parse(errCtx, doc); - } - - MovePrimaryDonorDocument getStateDocumentOnDisk( - OperationContext* opCtx, const std::shared_ptr& instance) { - return getStateDocumentOnDisk(opCtx, instance->getMetadata().getMigrationId()); - } - - static constexpr auto kBefore = "before"; - static constexpr auto kPartial = "partial"; - static constexpr auto kAfter = "after"; - - auto pauseStateTransitionImpl(const std::string& progress, - MovePrimaryDonorStateEnum state, - const std::string& failpointName) { - auto fp = globalFailPointRegistry().find(failpointName); - auto count = fp->setMode(FailPoint::alwaysOn, - 0, - fromjson(fmt::format("{{progress: '{}', state: '{}'}}", - progress, - MovePrimaryDonorState_serializer(state)))); - return std::tuple{fp, count}; - } - - auto pauseStateTransition(const std::string& progress, MovePrimaryDonorStateEnum state) { - return pauseStateTransitionImpl( - progress, state, "pauseDuringMovePrimaryDonorStateEnumTransition"); - } - - auto pauseStateTransitionAlternate(const std::string& progress, - MovePrimaryDonorStateEnum state) { - return pauseStateTransitionImpl( - progress, state, "pauseDuringMovePrimaryDonorStateEnumTransitionAlternate"); - } - - auto failCrudOpsOn(NamespaceString nss, ErrorCodes::Error code) { - auto fp = globalFailPointRegistry().find("failCommand"); - auto count = - fp->setMode(FailPoint::alwaysOn, - 0, - fromjson(fmt::format("{{failCommands:['insert', 'update', 'delete'], " - "namespace: '{}', failLocalClients: true, " - "failInternalCommands: true, errorCode: {}}}", - nss.toString(), - code))); - return std::tuple{fp, count}; - } - - BSONObj getMetrics(const std::shared_ptr& instance) { - auto currentOp = instance->reportForCurrentOp( - MongoProcessInterface::CurrentOpConnectionsMode::kExcludeIdle, - MongoProcessInterface::CurrentOpSessionsMode::kExcludeIdle); - ASSERT_TRUE(currentOp); - return *currentOp; - } - - std::shared_ptr createInstance() { - auto opCtx = makeOperationContext(); - return createInstance(opCtx.get()); - } - - std::shared_ptr createInstance(OperationContext* opCtx) { - auto stateDoc = createStateDocument(); - return DonorInstance::getOrCreate(opCtx, _service, stateDoc.toBSON()); - } - - std::shared_ptr getExistingInstance(OperationContext* opCtx, const UUID& id) { - auto instanceId = BSON(MovePrimaryDonorDocument::kIdFieldName << id); - auto instance = DonorInstance::lookup(opCtx, _service, instanceId); - if (!instance) { - return nullptr; - } - return *instance; - } - - bool mustBlockWritesToReachState(MovePrimaryDonorStateEnum state) { - if (state == MovePrimaryDonorStateEnum::kAborted) { - return false; - } - return state >= MovePrimaryDonorStateEnum::kBlockingWrites; - } - - bool mustForgetToReachState(MovePrimaryDonorStateEnum state) { - return state == MovePrimaryDonorStateEnum::kDone; - } - - bool mustAbortToReachState(MovePrimaryDonorStateEnum state) { - return state == MovePrimaryDonorStateEnum::kAborted; - } - - auto createInstanceBeforeOrAfterState(OperationContext* opCtx, - const std::string& beforeOrAfter, - MovePrimaryDonorStateEnum state) { - auto [fp, count] = pauseStateTransition(beforeOrAfter, state); - auto instance = createInstance(opCtx); - if (mustBlockWritesToReachState(state)) { - instance->onBeganBlockingWrites(getCurrentTimestamp()); - } - if (mustForgetToReachState(state)) { - instance->onReadyToForget(); - } - if (mustAbortToReachState(state)) { - instance->abort(kAbortedError); - } - fp->waitForTimesEntered(count + 1); - if (beforeOrAfter == kAfter) { - ASSERT_EQ(getState(instance), state); - } - return std::tuple{instance, fp}; - } - - auto createInstanceInState(OperationContext* opCtx, MovePrimaryDonorStateEnum state) { - return createInstanceBeforeOrAfterState(opCtx, kAfter, state); - } - - auto createInstanceInState(MovePrimaryDonorStateEnum state) { - auto opCtx = makeOperationContext(); - return createInstanceInState(opCtx.get(), state); - } - - auto createInstanceBeforeState(OperationContext* opCtx, MovePrimaryDonorStateEnum state) { - return createInstanceBeforeOrAfterState(opCtx, kBefore, state); - } - - auto createInstanceBeforeState(MovePrimaryDonorStateEnum state) { - auto opCtx = makeOperationContext(); - return createInstanceBeforeState(opCtx.get(), state); - } - - MovePrimaryDonorStateEnum getState(const std::shared_ptr& instance) { - auto stateString = getMetrics(instance).getStringField("state").toString(); - IDLParserContext errCtx("MovePrimaryDonorServiceTest::getState()"); - return MovePrimaryDonorState_parse(errCtx, stateString); - } - - Timestamp getBlockingWritesTimestamp(OperationContext* opCtx, - const std::shared_ptr& instance) { - auto doc = getStateDocumentOnDisk(opCtx, instance->getMetadata().getMigrationId()); - auto timestamp = doc.getMutableFields().getBlockingWritesTimestamp(); - ASSERT_TRUE(timestamp.has_value()); - return *timestamp; - } - - void makeReadyToComplete(const std::shared_ptr& instance) { - auto state = getState(instance); - if (state <= MovePrimaryDonorStateEnum::kWaitingToBlockWrites) { - instance->onBeganBlockingWrites(getCurrentTimestamp()); - } - instance->onReadyToForget(); - } - - void assertCompletesAppropriately(const std::shared_ptr& instance) { - makeReadyToComplete(instance); - auto result = instance->getCompletionFuture().getNoThrow(); - if (instance->isAborted()) { - ASSERT_NOT_OK(result); - } else { - ASSERT_OK(result); - } - } - - void unpauseAndAssertCompletesAppropriately(FailPoint* fp, - const std::shared_ptr& instance) { - fp->setMode(FailPoint::off); - assertCompletesAppropriately(instance); - } - - auto createInstanceInStateAndSimulateFailover(OperationContext* opCtx, - MovePrimaryDonorStateEnum state) { - boost::optional instanceId; - { - auto [instance, fp] = createInstanceInState(opCtx, state); - instanceId = instance->getMetadata().getMigrationId(); - stepDown(); - fp->setMode(FailPoint::off); - ASSERT_NOT_OK(instance->getCompletionFuture().getNoThrow()); - } - auto fp = globalFailPointRegistry().find("pauseBeforeBeginningMovePrimaryDonorWorkflow"); - auto count = fp->setMode(FailPoint::alwaysOn); - stepUp(opCtx); - auto instance = getExistingInstance(opCtx, *instanceId); - fp->waitForTimesEntered(count + 1); - return std::tuple{instance, fp}; - } - - void testStateTransitionUpdatesOnDiskState(MovePrimaryDonorStateEnum state) { - auto opCtx = makeOperationContext(); - auto [instance, fp] = createInstanceInState(opCtx.get(), state); - - auto onDiskState = getStateDocumentOnDisk(opCtx.get(), instance); - ASSERT_EQ(onDiskState.getMutableFields().getState(), state); - - unpauseAndAssertCompletesAppropriately(fp, instance); - } - - void testStateTransitionAbortsOnUnrecoverableError(MovePrimaryDonorStateEnum state) { - auto opCtx = makeOperationContext(); - auto [instance, beforeFp] = createInstanceBeforeState(opCtx.get(), state); - auto [failCrud, crudCount] = - failCrudOpsOn(NamespaceString::kMovePrimaryDonorNamespace, kUnrecoverableError.code()); - auto [afterFp, afterCount] = - pauseStateTransitionAlternate(kAfter, MovePrimaryDonorStateEnum::kAborted); - - beforeFp->setMode(FailPoint::off); - failCrud->waitForTimesEntered(crudCount + 1); - failCrud->setMode(FailPoint::off); - afterFp->waitForTimesEntered(afterCount + 1); - - unpauseAndAssertCompletesAppropriately(afterFp, instance); - ASSERT_TRUE(instance->isAborted()); - } - - void testStateTransitionUpdatesOnDiskStateWithWriteFailure(MovePrimaryDonorStateEnum state) { - auto opCtx = makeOperationContext(); - auto [instance, beforeFp] = createInstanceBeforeState(opCtx.get(), state); - auto [afterFp, afterCount] = pauseStateTransitionAlternate(kAfter, state); - - auto [failCrud, crudCount] = - failCrudOpsOn(NamespaceString::kMovePrimaryDonorNamespace, kRetryableError.code()); - beforeFp->setMode(FailPoint::off); - failCrud->waitForTimesEntered(crudCount + 1); - failCrud->setMode(FailPoint::off); - - afterFp->waitForTimesEntered(afterCount + 1); - auto onDiskState = getStateDocumentOnDisk(opCtx.get(), instance); - ASSERT_EQ(onDiskState.getMutableFields().getState(), state); - - unpauseAndAssertCompletesAppropriately(afterFp, instance); - } - - void testStateTransitionUpdatesInMemoryState(MovePrimaryDonorStateEnum state) { - auto [instance, fp] = createInstanceInState(state); - - ASSERT_EQ(getMetrics(instance).getStringField("state"), - MovePrimaryDonorState_serializer(state)); - - unpauseAndAssertCompletesAppropriately(fp, instance); - } - - void testStepUpInState(MovePrimaryDonorStateEnum state) { - auto opCtx = makeOperationContext(); - auto [instance, fp] = createInstanceInStateAndSimulateFailover(opCtx.get(), state); - unpauseAndAssertCompletesAppropriately(fp, instance); - } - - void assertSentRecipientAbortCommand() { - auto history = getCommandRunner().getCommandHistory(); - auto recipientAbortsSent = 0; - for (const auto& details : history) { - if (details.command.hasField(MovePrimaryRecipientAbortMigration::kCommandName)) { - recipientAbortsSent++; - } - } - ASSERT_GTE(recipientAbortsSent, 1); - } - - void testAbortInState(MovePrimaryDonorStateEnum state) { - auto [instance, fp] = createInstanceInState(state); - instance->abort(kAbortedError); - unpauseAndAssertCompletesAppropriately(fp, instance); - - ASSERT_TRUE(instance->isAborted()); - assertSentRecipientAbortCommand(); - } - -private: - std::shared_ptr _commandRunner; -}; - -TEST_F(MovePrimaryDonorServiceTest, GetMetadata) { - auto opCtx = makeOperationContext(); - auto stateDoc = createStateDocument(); - auto instance = DonorInstance::getOrCreate(opCtx.get(), _service, stateDoc.toBSON()); - ASSERT_BSONOBJ_EQ(stateDoc.getMetadata().toBSON(), instance->getMetadata().toBSON()); -} - -TEST_F(MovePrimaryDonorServiceTest, CannotCreateTwoInstancesForSameDb) { - auto opCtx = makeOperationContext(); - auto stateDoc = createStateDocument(); - auto instance = DonorInstance::getOrCreate(opCtx.get(), _service, stateDoc.toBSON()); - auto otherStateDoc = stateDoc; - auto otherMigrationId = UUID::gen(); - otherStateDoc.getMetadata().setMigrationId(otherMigrationId); - otherStateDoc.setId(otherMigrationId); - ASSERT_THROWS_CODE(DonorInstance::getOrCreate(opCtx.get(), _service, otherStateDoc.toBSON()), - DBException, - ErrorCodes::ConflictingOperationInProgress); -} - -TEST_F(MovePrimaryDonorServiceTest, SameUuidMustHaveSameDb) { - auto opCtx = makeOperationContext(); - auto stateDoc = createStateDocument(); - auto instance = DonorInstance::getOrCreate(opCtx.get(), _service, stateDoc.toBSON()); - auto otherStateDoc = stateDoc; - otherStateDoc.getMetadata().setDatabaseName(NamespaceString{"someOtherDb"}); - ASSERT_THROWS_CODE(DonorInstance::getOrCreate(opCtx.get(), _service, otherStateDoc.toBSON()), - DBException, - ErrorCodes::ConflictingOperationInProgress); -} - -TEST_F(MovePrimaryDonorServiceTest, SameUuidMustHaveSameRecipient) { - auto opCtx = makeOperationContext(); - auto stateDoc = createStateDocument(); - auto instance = DonorInstance::getOrCreate(opCtx.get(), _service, stateDoc.toBSON()); - auto otherStateDoc = stateDoc; - otherStateDoc.getMetadata().setToShardName("someOtherShard"); - ASSERT_THROWS_CODE(DonorInstance::getOrCreate(opCtx.get(), _service, otherStateDoc.toBSON()), - DBException, - ErrorCodes::ConflictingOperationInProgress); -} - -TEST_F(MovePrimaryDonorServiceTest, StateDocumentIsPersistedAfterInitializing) { - testStateTransitionUpdatesOnDiskState(MovePrimaryDonorStateEnum::kInitializing); -} - -TEST_F(MovePrimaryDonorServiceTest, StateDocumentIsUpdatedDuringCloning) { - testStateTransitionUpdatesOnDiskState(MovePrimaryDonorStateEnum::kCloning); -} - -TEST_F(MovePrimaryDonorServiceTest, StateDocumentIsUpdatedDuringWaitingToBlockWrites) { - testStateTransitionUpdatesOnDiskState(MovePrimaryDonorStateEnum::kWaitingToBlockWrites); -} - -TEST_F(MovePrimaryDonorServiceTest, StateDocumentIsUpdatedDuringBlockingWrites) { - testStateTransitionUpdatesOnDiskState(MovePrimaryDonorStateEnum::kBlockingWrites); -} - -TEST_F(MovePrimaryDonorServiceTest, StateDocumentIsUpdatedDuringPrepared) { - testStateTransitionUpdatesOnDiskState(MovePrimaryDonorStateEnum::kPrepared); -} - -TEST_F(MovePrimaryDonorServiceTest, StateDocumentIsUpdatedDuringAborted) { - testStateTransitionUpdatesOnDiskState(MovePrimaryDonorStateEnum::kAborted); -} - -TEST_F(MovePrimaryDonorServiceTest, StateDocumentInsertionRetriesIfWriteFails) { - testStateTransitionUpdatesOnDiskStateWithWriteFailure(MovePrimaryDonorStateEnum::kInitializing); -} - -TEST_F(MovePrimaryDonorServiceTest, TransitionToCloningRetriesIfWriteFails) { - testStateTransitionUpdatesOnDiskStateWithWriteFailure(MovePrimaryDonorStateEnum::kCloning); -} - -TEST_F(MovePrimaryDonorServiceTest, TransitionToWaitingToBlockWritesRetriesIfWriteFails) { - testStateTransitionUpdatesOnDiskStateWithWriteFailure( - MovePrimaryDonorStateEnum::kWaitingToBlockWrites); -} - -TEST_F(MovePrimaryDonorServiceTest, TransitionToBlockingWritesRetriesIfWriteFails) { - testStateTransitionUpdatesOnDiskStateWithWriteFailure( - MovePrimaryDonorStateEnum::kBlockingWrites); -} - -TEST_F(MovePrimaryDonorServiceTest, TransitionToPreparedRetriesIfWriteFails) { - testStateTransitionUpdatesOnDiskStateWithWriteFailure(MovePrimaryDonorStateEnum::kPrepared); -} - -TEST_F(MovePrimaryDonorServiceTest, TransitionToAbortedRetriesIfWriteFails) { - testStateTransitionUpdatesOnDiskStateWithWriteFailure(MovePrimaryDonorStateEnum::kAborted); -} - -TEST_F(MovePrimaryDonorServiceTest, InitializingUpdatesInMemoryState) { - testStateTransitionUpdatesInMemoryState(MovePrimaryDonorStateEnum::kInitializing); -} - -TEST_F(MovePrimaryDonorServiceTest, CloningUpdatesInMemoryState) { - testStateTransitionUpdatesInMemoryState(MovePrimaryDonorStateEnum::kCloning); -} - -TEST_F(MovePrimaryDonorServiceTest, WaitingToBlockWritesUpdatesInMemoryState) { - testStateTransitionUpdatesInMemoryState(MovePrimaryDonorStateEnum::kWaitingToBlockWrites); -} - -TEST_F(MovePrimaryDonorServiceTest, BlockingWritesUpdatesInMemoryState) { - testStateTransitionUpdatesInMemoryState(MovePrimaryDonorStateEnum::kBlockingWrites); -} - -TEST_F(MovePrimaryDonorServiceTest, PreparedUpdatesInMemoryState) { - testStateTransitionUpdatesInMemoryState(MovePrimaryDonorStateEnum::kPrepared); -} - -TEST_F(MovePrimaryDonorServiceTest, AbortedUpdatesInMemoryState) { - testStateTransitionUpdatesInMemoryState(MovePrimaryDonorStateEnum::kAborted); -} - -TEST_F(MovePrimaryDonorServiceTest, StepUpInInitializing) { - testStepUpInState(MovePrimaryDonorStateEnum::kInitializing); -} - -TEST_F(MovePrimaryDonorServiceTest, StepUpInCloning) { - testStepUpInState(MovePrimaryDonorStateEnum::kCloning); -} - -TEST_F(MovePrimaryDonorServiceTest, StepUpInWaitingToBlockWrites) { - testStepUpInState(MovePrimaryDonorStateEnum::kWaitingToBlockWrites); -} - -TEST_F(MovePrimaryDonorServiceTest, StepUpInBlockingWrites) { - testStepUpInState(MovePrimaryDonorStateEnum::kBlockingWrites); -} - -TEST_F(MovePrimaryDonorServiceTest, StepUpInPrepared) { - testStepUpInState(MovePrimaryDonorStateEnum::kPrepared); -} - -TEST_F(MovePrimaryDonorServiceTest, StepUpInAborted) { - testStepUpInState(MovePrimaryDonorStateEnum::kAborted); -} - -TEST_F(MovePrimaryDonorServiceTest, AbortInInitializing) { - testAbortInState(MovePrimaryDonorStateEnum::kInitializing); -} - -TEST_F(MovePrimaryDonorServiceTest, AbortInCloning) { - testAbortInState(MovePrimaryDonorStateEnum::kCloning); -} - -TEST_F(MovePrimaryDonorServiceTest, AbortInWaitingToBlockWrites) { - testAbortInState(MovePrimaryDonorStateEnum::kWaitingToBlockWrites); -} - -TEST_F(MovePrimaryDonorServiceTest, AbortInBlockingWrites) { - testAbortInState(MovePrimaryDonorStateEnum::kBlockingWrites); -} - -TEST_F(MovePrimaryDonorServiceTest, AbortInPrepared) { - testAbortInState(MovePrimaryDonorStateEnum::kPrepared); -} - -TEST_F(MovePrimaryDonorServiceTest, FailTransitionToInitializing) { - testStateTransitionAbortsOnUnrecoverableError(MovePrimaryDonorStateEnum::kInitializing); -} - -TEST_F(MovePrimaryDonorServiceTest, FailTransitionToCloning) { - testStateTransitionAbortsOnUnrecoverableError(MovePrimaryDonorStateEnum::kCloning); -} - -TEST_F(MovePrimaryDonorServiceTest, FailTransitionToWaitingToBlockWrites) { - testStateTransitionAbortsOnUnrecoverableError(MovePrimaryDonorStateEnum::kWaitingToBlockWrites); -} - -TEST_F(MovePrimaryDonorServiceTest, FailTransitionToBlockingWrites) { - testStateTransitionAbortsOnUnrecoverableError(MovePrimaryDonorStateEnum::kBlockingWrites); -} - -TEST_F(MovePrimaryDonorServiceTest, FailTransitionToPrepared) { - testStateTransitionAbortsOnUnrecoverableError(MovePrimaryDonorStateEnum::kPrepared); -} - -TEST_F(MovePrimaryDonorServiceTest, CloningSendsSyncDataCommandWithoutTimestamp) { - auto [instance, fp] = createInstanceInState(MovePrimaryDonorStateEnum::kWaitingToBlockWrites); - - ASSERT_GT(getCommandRunner().getCommandsRunCount(), 0); - auto details = getCommandRunner().getLastCommandDetails(); - const auto& command = details->command; - ASSERT_TRUE(command.hasField(MovePrimaryRecipientSyncData::kCommandName)); - ASSERT_FALSE(command.hasField( - MovePrimaryRecipientSyncData::kReturnAfterReachingDonorTimestampFieldName)); - - unpauseAndAssertCompletesAppropriately(fp, instance); -} - -TEST_F(MovePrimaryDonorServiceTest, CloningRetriesSyncDataCommandOnFailure) { - auto [instance, beforeCloning] = createInstanceInState(MovePrimaryDonorStateEnum::kCloning); - auto [afterCloning, afterCount] = - pauseStateTransitionAlternate(kBefore, MovePrimaryDonorStateEnum::kWaitingToBlockWrites); - - ASSERT_EQ(getCommandRunner().getCommandsRunCount(), 0); - getCommandRunner().addNextResponse(kRetryableError); - - beforeCloning->setMode(FailPoint::off); - afterCloning->waitForTimesEntered(afterCount + 1); - - ASSERT_EQ(getCommandRunner().getCommandsRunCount(), 2); - - unpauseAndAssertCompletesAppropriately(afterCloning, instance); -} - -TEST_F(MovePrimaryDonorServiceTest, CloningAbortsOnSyncDataCommandUnrecoverableError) { - auto [instance, fp] = createInstanceInState(MovePrimaryDonorStateEnum::kCloning); - getCommandRunner().addNextResponse(kUnrecoverableError); - - unpauseAndAssertCompletesAppropriately(fp, instance); - ASSERT_TRUE(instance->isAborted()); -} - -TEST_F(MovePrimaryDonorServiceTest, WaitingToBlockWritesSetsReadyToBlockWritesFuture) { - auto instance = createInstance(); - ASSERT_OK(instance->getReadyToBlockWritesFuture().getNoThrow()); - ASSERT_EQ(getState(instance), MovePrimaryDonorStateEnum::kWaitingToBlockWrites); - assertCompletesAppropriately(instance); -} - -TEST_F(MovePrimaryDonorServiceTest, WaitingToBlockWritesPersistsBlockTimestamp) { - auto opCtx = makeOperationContext(); - auto [instance, fp] = - createInstanceInState(opCtx.get(), MovePrimaryDonorStateEnum::kWaitingToBlockWrites); - fp->setMode(FailPoint::off); - - const auto timestamp = getCurrentTimestamp(); - instance->onBeganBlockingWrites(timestamp); - - ASSERT_OK(instance->getDecisionFuture().getNoThrow()); - - auto docOnDisk = getStateDocumentOnDisk(opCtx.get(), instance); - ASSERT_EQ(docOnDisk.getMutableFields().getBlockingWritesTimestamp(), timestamp); - - assertCompletesAppropriately(instance); -} - -TEST_F(MovePrimaryDonorServiceTest, BlockingWritesSendsSyncDataCommandWithTimestamp) { - auto [instance, fp] = createInstanceInState(MovePrimaryDonorStateEnum::kWaitingToBlockWrites); - fp->setMode(FailPoint::off); - const auto timestamp = getCurrentTimestamp(); - - auto beforeCommandCount = getCommandRunner().getCommandsRunCount(); - instance->onBeganBlockingWrites(timestamp); - - ASSERT_OK(instance->getDecisionFuture().getNoThrow()); - - ASSERT_EQ(getCommandRunner().getCommandsRunCount(), beforeCommandCount + 1); - auto details = getCommandRunner().getLastCommandDetails(); - const auto& command = details->command; - ASSERT_TRUE(command.hasField(MovePrimaryRecipientSyncData::kCommandName)); - ASSERT_EQ( - command.getField(MovePrimaryRecipientSyncData::kReturnAfterReachingDonorTimestampFieldName) - .timestamp(), - timestamp); - - assertCompletesAppropriately(instance); -} - -TEST_F(MovePrimaryDonorServiceTest, BlockingWritesSyncDataCommandRetriesOnFailure) { - auto [instance, fp] = createInstanceInState(MovePrimaryDonorStateEnum::kBlockingWrites); - auto beforeCommandCount = getCommandRunner().getCommandsRunCount(); - getCommandRunner().addNextResponse(kRetryableError); - - fp->setMode(FailPoint::off); - ASSERT_OK(instance->getDecisionFuture().getNoThrow()); - - ASSERT_EQ(getCommandRunner().getCommandsRunCount(), beforeCommandCount + 2); - assertCompletesAppropriately(instance); -} - -TEST_F(MovePrimaryDonorServiceTest, BlockingWritesAbortsOnSyncDataCommandUnrecoverableError) { - auto [instance, fp] = createInstanceInState(MovePrimaryDonorStateEnum::kBlockingWrites); - getCommandRunner().addNextResponse(kUnrecoverableError); - - unpauseAndAssertCompletesAppropriately(fp, instance); - ASSERT_TRUE(instance->isAborted()); -} - -TEST_F(MovePrimaryDonorServiceTest, - BlockingWritesSyncDataCommandSendsProperTimestampAfterFailover) { - auto opCtx = makeOperationContext(); - auto [instance, fp] = createInstanceInStateAndSimulateFailover( - opCtx.get(), MovePrimaryDonorStateEnum::kBlockingWrites); - - fp->setMode(FailPoint::off); - const auto timestamp = getBlockingWritesTimestamp(opCtx.get(), instance); - ASSERT_OK(instance->getDecisionFuture().getNoThrow()); - - auto details = getCommandRunner().getLastCommandDetails(); - const auto& command = details->command; - ASSERT_TRUE(command.hasField(MovePrimaryRecipientSyncData::kCommandName)); - ASSERT_EQ( - command.getField(MovePrimaryRecipientSyncData::kReturnAfterReachingDonorTimestampFieldName) - .timestamp(), - timestamp); - - assertCompletesAppropriately(instance); -} - -TEST_F(MovePrimaryDonorServiceTest, PreparedSetsDecisionFuture) { - auto [instance, fp] = createInstanceInState(MovePrimaryDonorStateEnum::kPrepared); - ASSERT_FALSE(instance->getDecisionFuture().isReady()); - fp->setMode(FailPoint::off); - ASSERT_OK(instance->getDecisionFuture().getNoThrow()); - assertCompletesAppropriately(instance); -} - -TEST_F(MovePrimaryDonorServiceTest, StepUpAfterPreparedSendsNoAdditionalCommands) { - auto opCtx = makeOperationContext(); - auto [instance, fp] = - createInstanceInStateAndSimulateFailover(opCtx.get(), MovePrimaryDonorStateEnum::kPrepared); - - const auto beforeCommandCount = getCommandRunner().getCommandsRunCount(); - fp->setMode(FailPoint::off); - ASSERT_OK(instance->getDecisionFuture().getNoThrow()); - ASSERT_EQ(getCommandRunner().getCommandsRunCount(), beforeCommandCount); - - assertCompletesAppropriately(instance); -} - -TEST_F(MovePrimaryDonorServiceTest, ForgetSendsForgetToRecipient) { - auto [instance, fp] = createInstanceInState(MovePrimaryDonorStateEnum::kPrepared); - fp->setMode(FailPoint::off); - - auto beforeCommandCount = getCommandRunner().getCommandsRunCount(); - assertCompletesAppropriately(instance); - - ASSERT_EQ(getCommandRunner().getCommandsRunCount(), beforeCommandCount + 1); - auto details = getCommandRunner().getLastCommandDetails(); - const auto& command = details->command; - ASSERT_TRUE(command.hasField(MovePrimaryRecipientForgetMigration::kCommandName)); -} - -TEST_F(MovePrimaryDonorServiceTest, ForgetRetriesRecipientForgetOnAnyFailure) { - auto [instance, fp] = createInstanceInState(MovePrimaryDonorStateEnum::kPrepared); - fp->setMode(FailPoint::off); - - auto beforeCommandCount = getCommandRunner().getCommandsRunCount(); - getCommandRunner().addNextResponse(kRetryableError); - getCommandRunner().addNextResponse(kUnrecoverableError); - - assertCompletesAppropriately(instance); - ASSERT_EQ(getCommandRunner().getCommandsRunCount(), beforeCommandCount + 3); -} - -TEST_F(MovePrimaryDonorServiceTest, ForgetRetriesRecipientForgetAfterFailover) { - auto opCtx = makeOperationContext(); - auto [instance, fp] = - createInstanceInStateAndSimulateFailover(opCtx.get(), MovePrimaryDonorStateEnum::kPrepared); - - auto beforeCommandCount = getCommandRunner().getCommandsRunCount(); - fp->setMode(FailPoint::off); - - assertCompletesAppropriately(instance); - ASSERT_EQ(getCommandRunner().getCommandsRunCount(), beforeCommandCount + 1); -} - -TEST_F(MovePrimaryDonorServiceTest, StateDocumentRemovedAfterSuccess) { - auto opCtx = makeOperationContext(); - auto [instance, fp] = createInstanceInState(opCtx.get(), MovePrimaryDonorStateEnum::kPrepared); - unpauseAndAssertCompletesAppropriately(fp, instance); - - DBDirectClient client(opCtx.get()); - auto doc = client.findOne(NamespaceString::kMovePrimaryDonorNamespace, BSONObj{}); - ASSERT_TRUE(doc.isEmpty()); -} - -TEST_F(MovePrimaryDonorServiceTest, ReadyToBlockWritesPromiseReturnsErrorIfAborted) { - auto opCtx = makeOperationContext(); - auto [instance, fp] = - createInstanceInState(opCtx.get(), MovePrimaryDonorStateEnum::kInitializing); - instance->abort(kAbortedError); - fp->setMode(FailPoint::off); - ASSERT_EQ(instance->getReadyToBlockWritesFuture().getNoThrow(), kAbortedError); -} - -TEST_F(MovePrimaryDonorServiceTest, DecisionPromiseReturnsErrorIfAborted) { - auto opCtx = makeOperationContext(); - auto instance = createInstance(opCtx.get()); - instance->abort(kAbortedError); - ASSERT_EQ(instance->getDecisionFuture().getNoThrow(), kAbortedError); -} - -TEST_F(MovePrimaryDonorServiceTest, CompletionPromiseReturnsErrorIfAborted) { - auto opCtx = makeOperationContext(); - auto instance = createInstance(opCtx.get()); - instance->abort(kAbortedError); - instance->onReadyToForget(); - ASSERT_EQ(instance->getCompletionFuture().getNoThrow(), kAbortedError); -} - -TEST_F(MovePrimaryDonorServiceTest, StateDocumentRemovedAfterAbort) { - auto opCtx = makeOperationContext(); - auto [instance, fp] = createInstanceInState(opCtx.get(), MovePrimaryDonorStateEnum::kAborted); - unpauseAndAssertCompletesAppropriately(fp, instance); - - DBDirectClient client(opCtx.get()); - auto doc = client.findOne(NamespaceString::kMovePrimaryDonorNamespace, BSONObj{}); - ASSERT_TRUE(doc.isEmpty()); -} - -TEST_F(MovePrimaryDonorServiceTest, AbortSendsAbortToRecipient) { - auto [instance, fp] = createInstanceInState(MovePrimaryDonorStateEnum::kAborted); - auto beforeCommandCount = getCommandRunner().getCommandsRunCount(); - fp->setMode(FailPoint::off); - - fp = globalFailPointRegistry().find("pauseBeforeBeginningMovePrimaryDonorCleanup"); - auto count = fp->setMode(FailPoint::alwaysOn); - instance->onReadyToForget(); - fp->waitForTimesEntered(count + 1); - fp->setMode(FailPoint::off); - - ASSERT_EQ(getCommandRunner().getCommandsRunCount(), beforeCommandCount + 1); - auto details = getCommandRunner().getLastCommandDetails(); - const auto& command = details->command; - ASSERT_TRUE(command.hasField(MovePrimaryRecipientAbortMigration::kCommandName)); - - ASSERT_NOT_OK(instance->getCompletionFuture().getNoThrow()); -} - -TEST_F(MovePrimaryDonorServiceTest, AbortRetriesAbortToRecipientOnAnyError) { - auto [instance, fp] = createInstanceInState(MovePrimaryDonorStateEnum::kAborted); - auto beforeCommandCount = getCommandRunner().getCommandsRunCount(); - getCommandRunner().addNextResponse(kRetryableError); - getCommandRunner().addNextResponse(kUnrecoverableError); - fp->setMode(FailPoint::off); - - fp = globalFailPointRegistry().find("pauseBeforeBeginningMovePrimaryDonorCleanup"); - auto count = fp->setMode(FailPoint::alwaysOn); - instance->onReadyToForget(); - fp->waitForTimesEntered(count + 1); - fp->setMode(FailPoint::off); - - ASSERT_EQ(getCommandRunner().getCommandsRunCount(), beforeCommandCount + 3); - auto details = getCommandRunner().getLastCommandDetails(); - const auto& command = details->command; - ASSERT_TRUE(command.hasField(MovePrimaryRecipientAbortMigration::kCommandName)); - - ASSERT_NOT_OK(instance->getCompletionFuture().getNoThrow()); -} - -TEST_F(MovePrimaryDonorServiceTest, AbortRetriesAbortToRecipientAfterFailover) { - auto opCtx = makeOperationContext(); - auto [instance, fp] = - createInstanceInStateAndSimulateFailover(opCtx.get(), MovePrimaryDonorStateEnum::kAborted); - auto beforeCommandCount = getCommandRunner().getCommandsRunCount(); - fp->setMode(FailPoint::off); - - fp = globalFailPointRegistry().find("pauseBeforeBeginningMovePrimaryDonorCleanup"); - auto count = fp->setMode(FailPoint::alwaysOn); - instance->onReadyToForget(); - fp->waitForTimesEntered(count + 1); - fp->setMode(FailPoint::off); - - ASSERT_EQ(getCommandRunner().getCommandsRunCount(), beforeCommandCount + 1); - auto details = getCommandRunner().getLastCommandDetails(); - const auto& command = details->command; - ASSERT_TRUE(command.hasField(MovePrimaryRecipientAbortMigration::kCommandName)); - - ASSERT_NOT_OK(instance->getCompletionFuture().getNoThrow()); -} - -TEST_F(MovePrimaryDonorServiceTest, AbortPersistsReason) { - auto opCtx = makeOperationContext(); - auto [instance, fp] = createInstanceInState(opCtx.get(), MovePrimaryDonorStateEnum::kAborted); - - auto doc = getStateDocumentOnDisk(opCtx.get(), instance); - auto maybeReason = doc.getMutableFields().getAbortReason(); - ASSERT_TRUE(maybeReason); - auto reason = *maybeReason; - ASSERT_EQ(ErrorCodes::Error(reason["code"].numberInt()), kAbortedError.code()); - ASSERT_EQ(reason["errmsg"].String(), kAbortedError.reason()); - - unpauseAndAssertCompletesAppropriately(fp, instance); -} - -TEST_F(MovePrimaryDonorServiceTest, ExplicitAbortAfterDecisionSetOk) { - auto opCtx = makeOperationContext(); - auto [instance, fp] = createInstanceInState(opCtx.get(), MovePrimaryDonorStateEnum::kPrepared); - fp->setMode(FailPoint::off); - - ASSERT_OK(instance->getDecisionFuture().getNoThrow()); - instance->abort(kAbortedError); - instance->onReadyToForget(); - - ASSERT_EQ(instance->getCompletionFuture().getNoThrow(), kAbortedError); - assertSentRecipientAbortCommand(); -} - -TEST_F(MovePrimaryDonorServiceTest, AbortDuringPartialStateTransitionMaintainsAbortReason) { - auto opCtx = makeOperationContext(); - auto [fp, count] = pauseStateTransition(kPartial, MovePrimaryDonorStateEnum::kInitializing); - auto instance = createInstance(opCtx.get()); - fp->waitForTimesEntered(count + 1); - instance->abort(kAbortedError); - fp->setMode(FailPoint::off); - instance->onReadyToForget(); - ASSERT_EQ(instance->getCompletionFuture().getNoThrow(), kAbortedError); -} - - -} // namespace -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_metrics.cpp b/src/mongo/db/s/move_primary/move_primary_metrics.cpp deleted file mode 100644 index ec4ba48795358..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_metrics.cpp +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/move_primary/move_primary_metrics.h" -#include "mongo/db/exec/document_value/document.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/s/move_primary/move_primary_metrics_field_name_provider.h" - -namespace mongo { - -namespace { -using TimedPhase = MovePrimaryMetrics::TimedPhase; -const auto kTimedPhaseNamesMap = [] { - return MovePrimaryMetrics::TimedPhaseNameMap{ - {TimedPhase::kPlaceholder, "placeholderPhaseDurationSecs"}}; -}(); - -BSONObj createOriginalCommand(const NamespaceString& database, const StringData& shard) { - return Document{{"movePrimary", Value{StringData{database.toString()}}}, {"to", shard}} - .toBson(); -} -} // namespace - -MovePrimaryMetrics::MovePrimaryMetrics(const MovePrimaryCommonMetadata& metadata, - Role role, - ClockSource* clockSource, - ShardingDataTransformCumulativeMetrics* cumulativeMetrics, - AnyState state) - : MovePrimaryMetrics{ - metadata.getMigrationId(), - createOriginalCommand(metadata.getDatabaseName(), metadata.getToShardName()), - metadata.getDatabaseName(), - role, - clockSource->now(), - clockSource, - cumulativeMetrics} { - setState(state); -} - -MovePrimaryMetrics::MovePrimaryMetrics(UUID instanceId, - BSONObj originalCommand, - NamespaceString nss, - Role role, - Date_t startTime, - ClockSource* clockSource, - ShardingDataTransformCumulativeMetrics* cumulativeMetrics) - : Base{std::move(instanceId), - std::move(originalCommand), - std::move(nss), - role, - startTime, - clockSource, - cumulativeMetrics, - std::make_unique()} {} - -BSONObj MovePrimaryMetrics::reportForCurrentOp() const noexcept { - BSONObjBuilder builder; - reportDurationsForAllPhases(kTimedPhaseNamesMap, getClockSource(), &builder); - builder.appendElementsUnique(Base::reportForCurrentOp()); - return builder.obj(); -} - -boost::optional MovePrimaryMetrics::getRecipientHighEstimateRemainingTimeMillis() - const { - return boost::none; -} - -StringData MovePrimaryMetrics::getStateString() const noexcept { - return stdx::visit(OverloadedVisitor{[](MovePrimaryRecipientStateEnum state) { - return MovePrimaryRecipientState_serializer(state); - }, - [](MovePrimaryDonorStateEnum state) { - return MovePrimaryDonorState_serializer(state); - }}, - getState()); -} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_metrics.h b/src/mongo/db/s/move_primary/move_primary_metrics.h deleted file mode 100644 index dd82000330590..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_metrics.h +++ /dev/null @@ -1,114 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" -#include "mongo/db/s/metrics/with_oplog_application_count_metrics_also_updating_cumulative_metrics.h" -#include "mongo/db/s/metrics/with_oplog_application_latency_metrics_interface_updating_cumulative_metrics.h" -#include "mongo/db/s/metrics/with_phase_duration_management.h" -#include "mongo/db/s/metrics/with_state_management_for_instance_metrics.h" -#include "mongo/db/s/metrics/with_typed_cumulative_metrics_provider.h" -#include "mongo/db/s/move_primary/move_primary_cumulative_metrics.h" -#include "mongo/db/s/move_primary/move_primary_metrics_helpers.h" - -namespace mongo { -namespace move_primary_metrics { - -enum TimedPhase { kPlaceholder }; -constexpr auto kNumTimedPhase = 1; - -namespace detail { - -using PartialBase1 = WithTypedCumulativeMetricsProvider; - -using PartialBase2 = - WithStateManagementForInstanceMetrics; - -using PartialBaseFinal = WithPhaseDurationManagement; - -using Base = WithOplogApplicationLatencyMetricsInterfaceUpdatingCumulativeMetrics< - WithOplogApplicationCountMetricsAlsoUpdatingCumulativeMetrics< - WithOplogApplicationCountMetrics>>; - -} // namespace detail -} // namespace move_primary_metrics - -class MovePrimaryMetrics : public move_primary_metrics::detail::Base { -public: - using AnyState = MovePrimaryCumulativeMetrics::AnyState; - using Base = move_primary_metrics::detail::Base; - using TimedPhase = move_primary_metrics::TimedPhase; - - template - static auto initializeFrom(const T& document, ServiceContext* serviceContext) { - return initializeFrom( - document, - serviceContext->getFastClockSource(), - ShardingDataTransformCumulativeMetrics::getForMovePrimary(serviceContext)); - } - - template - static auto initializeFrom(const T& document, - ClockSource* clockSource, - ShardingDataTransformCumulativeMetrics* cumulativeMetrics) { - static_assert(move_primary_metrics::isStateDocument); - return std::make_unique( - document.getMetadata(), - move_primary_metrics::getRoleForStateDocument(), - clockSource, - cumulativeMetrics, - move_primary_metrics::getState(document)); - } - - MovePrimaryMetrics(const MovePrimaryCommonMetadata& metadata, - Role role, - ClockSource* clockSource, - ShardingDataTransformCumulativeMetrics* cumulativeMetrics, - AnyState state); - - MovePrimaryMetrics(UUID instanceId, - BSONObj originalCommand, - NamespaceString nss, - Role role, - Date_t startTime, - ClockSource* clockSource, - ShardingDataTransformCumulativeMetrics* cumulativeMetrics); - - BSONObj reportForCurrentOp() const noexcept override; - boost::optional getRecipientHighEstimateRemainingTimeMillis() const override; - -protected: - virtual StringData getStateString() const noexcept override; - -private: -}; - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_metrics_field_name_provider.cpp b/src/mongo/db/s/move_primary/move_primary_metrics_field_name_provider.cpp deleted file mode 100644 index 4f56b4622c970..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_metrics_field_name_provider.cpp +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/move_primary/move_primary_metrics_field_name_provider.h" - -namespace mongo { -namespace { -constexpr auto kState = "state"; -} - -StringData MovePrimaryMetricsFieldNameProvider::getForCoordinatorState() const { - return kState; -} - -StringData MovePrimaryMetricsFieldNameProvider::getForDonorState() const { - return kState; -} - -StringData MovePrimaryMetricsFieldNameProvider::getForRecipientState() const { - return kState; -} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_metrics_field_name_provider.h b/src/mongo/db/s/move_primary/move_primary_metrics_field_name_provider.h deleted file mode 100644 index 2313bb0d05eab..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_metrics_field_name_provider.h +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/s/metrics/field_names/sharding_data_transform_instance_metrics_field_name_provider.h" -#include "mongo/db/s/metrics/field_names/with_document_copy_approximation_field_name_overrides.h" -#include "mongo/db/s/metrics/field_names/with_document_copy_count_field_name_overrides.h" -#include "mongo/db/s/metrics/field_names/with_oplog_application_count_metrics_field_names.h" - -namespace mongo { - -class MovePrimaryMetricsFieldNameProvider - : public WithOplogApplicationCountFieldNames< - WithDocumentCopyApproximationFieldNameOverrides>> { -public: - StringData getForCoordinatorState() const override; - StringData getForDonorState() const override; - StringData getForRecipientState() const override; -}; - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_metrics_helpers.h b/src/mongo/db/s/move_primary/move_primary_metrics_helpers.h deleted file mode 100644 index e2e72d3843f2c..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_metrics_helpers.h +++ /dev/null @@ -1,67 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/s/move_primary/move_primary_state_machine_gen.h" -#include - -namespace mongo { - -namespace move_primary_metrics { - -template -inline constexpr bool isStateDocument = - std::disjunction_v, - std::is_same>; - -template -inline constexpr auto getState(const T& document) { - static_assert(isStateDocument); - if constexpr (std::is_same_v) { - return document.getMutableFields().getState(); - } else { - return document.getState(); - } -} - -template -inline constexpr ShardingDataTransformMetrics::Role getRoleForStateDocument() { - static_assert(isStateDocument); - using Role = ShardingDataTransformMetrics::Role; - if constexpr (std::is_same_v) { - return Role::kDonor; - } else { - return Role::kRecipient; - } -} - -} // namespace move_primary_metrics - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_oplog_applier_progress.idl b/src/mongo/db/s/move_primary/move_primary_oplog_applier_progress.idl deleted file mode 100644 index 162803bfd43fc..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_oplog_applier_progress.idl +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2023-present MongoDB, Inc. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the Server Side Public License, version 1, -# as published by MongoDB, Inc. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Server Side Public License for more details. -# -# You should have received a copy of the Server Side Public License -# along with this program. If not, see -# . -# -# As a special exception, the copyright holders give permission to link the -# code of portions of this program with the OpenSSL library under certain -# conditions as described in each individual source file and distribute -# linked combinations including the program with the OpenSSL library. You -# must comply with the Server Side Public License in all respects for -# all of the code used other than as permitted herein. If you modify file(s) -# with this exception, you may extend this exception to your version of the -# file(s), but you are not obligated to do so. If you do not wish to do so, -# delete this exception statement from your version. If you delete this -# exception statement from all source files in the program, then also delete -# it in the license file. -# - -global: - cpp_namespace: "mongo" - -imports: - - "mongo/db/basic_types.idl" - - "mongo/db/s/move_primary/move_primary_common_metadata.idl" - -structs: - MovePrimaryOplogApplierProgress: - description: "Used for storing the progress made by the movePrimary oplog applier at recipient." - strict: false - inline_chained_structs: true - chained_structs: - MovePrimaryCommonMetadata: metadata diff --git a/src/mongo/db/s/move_primary/move_primary_recipient_cmds.cpp b/src/mongo/db/s/move_primary/move_primary_recipient_cmds.cpp deleted file mode 100644 index 2a9d89a397d4b..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_recipient_cmds.cpp +++ /dev/null @@ -1,272 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/commands.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/repl/primary_only_service.h" -#include "mongo/db/s/move_primary/move_primary_common_metadata_gen.h" -#include "mongo/db/s/move_primary/move_primary_recipient_cmds_gen.h" -#include "mongo/db/s/move_primary/move_primary_recipient_service.h" -#include "mongo/db/s/move_primary/move_primary_state_machine_gen.h" -#include "mongo/s/catalog/type_collection.h" -#include "mongo/s/move_primary/move_primary_feature_flag_gen.h" -#include "mongo/util/assert_util.h" -#include "mongo/util/concurrency/thread_pool.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kMovePrimary - -namespace mongo { - -namespace { - -class MovePrimaryRecipientSyncDataCmd : public TypedCommand { -public: - using Request = MovePrimaryRecipientSyncData; - - class Invocation : public InvocationBase { - public: - using InvocationBase::InvocationBase; - - void typedRun(OperationContext* opCtx) { - // (Generic FCV reference): This FCV reference should exist across LTS binary versions. - uassert(7249200, - "movePrimaryRecipientSyncData not available while upgrading or downgrading the " - "recipient FCV", - !serverGlobalParams.featureCompatibility.isUpgradingOrDowngrading()); - - opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); - - const auto& cmd = request(); - - MovePrimaryRecipientDocument recipientDoc; - recipientDoc.setId(cmd.getMigrationId()); - recipientDoc.setMetadata(std::move(cmd.getMovePrimaryCommonMetadata())); - - auto registry = repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()); - auto service = registry->lookupServiceByName( - MovePrimaryRecipientService::kMovePrimaryRecipientServiceName); - - auto instance = MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, service, recipientDoc.toBSON()); - - auto returnAfterReachingDonorTimestamp = cmd.getReturnAfterReachingDonorTimestamp(); - - if (!returnAfterReachingDonorTimestamp) { - instance->getDataClonedFuture().get(opCtx); - } else { - auto preparedFuture = - instance->onReceiveSyncData(returnAfterReachingDonorTimestamp.get()); - preparedFuture.get(opCtx); - } - } - - private: - void doCheckAuthorization(OperationContext* opCtx) const override { - uassert(ErrorCodes::Unauthorized, - "Unauthorized", - AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); - } - - bool supportsWriteConcern() const override { - return false; - } - - NamespaceString ns() const override { - return NamespaceString(request().getDbName()); - } - }; - - bool skipApiVersionCheck() const override { - // Internal command (server to server). - return true; - } - - std::string help() const override { - return "Internal command sent by the movePrimary operation donor to the recipient to sync " - "data with the donor."; - } - - bool adminOnly() const override { - return true; - } - - AllowedOnSecondary secondaryAllowed(ServiceContext* context) const override { - return Command::AllowedOnSecondary::kNever; - } -} movePrimaryRecipientSyncDataCmd; - -class MovePrimaryRecipientForgetMigrationCmd - : public TypedCommand { -public: - using Request = MovePrimaryRecipientForgetMigration; - - class Invocation : public InvocationBase { - public: - using InvocationBase::InvocationBase; - - void typedRun(OperationContext* opCtx) { - const auto& cmd = request(); - - opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); - - auto& migrationId = cmd.getMigrationId(); - auto registry = repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()); - auto service = registry->lookupServiceByName( - MovePrimaryRecipientService::kMovePrimaryRecipientServiceName); - auto instance = MovePrimaryRecipientService::MovePrimaryRecipient::lookup( - opCtx, service, BSON("_id" << migrationId)); - - if (instance) { - auto completionFuture = (*instance)->onReceiveForgetMigration(); - completionFuture.get(opCtx); - } else { - LOGV2(7270002, - "No instance of movePrimary recipient found to forget", - "metadata"_attr = cmd.getMovePrimaryCommonMetadata()); - } - } - - private: - void doCheckAuthorization(OperationContext* opCtx) const override { - uassert(ErrorCodes::Unauthorized, - "Unauthorized", - AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); - } - - bool supportsWriteConcern() const override { - return false; - } - - NamespaceString ns() const override { - return NamespaceString(request().getDbName()); - } - }; - - bool skipApiVersionCheck() const override { - // Internal command (server to server). - return true; - } - - std::string help() const override { - return "Internal command sent by the movePrimary operation donor to mark state doc garbage" - " collectable after a successful data sync."; - } - - bool adminOnly() const override { - return true; - } - - AllowedOnSecondary secondaryAllowed(ServiceContext* context) const override { - return Command::AllowedOnSecondary::kNever; - } -} movePrimaryRecipientForgetMigrationCmd; - -class MovePrimaryRecipientAbortMigrationCmd - : public TypedCommand { -public: - using Request = MovePrimaryRecipientAbortMigration; - - class Invocation : public InvocationBase { - public: - using InvocationBase::InvocationBase; - - void typedRun(OperationContext* opCtx) { - const auto& cmd = request(); - - opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); - - auto& migrationId = cmd.getMigrationId(); - auto registry = repl::PrimaryOnlyServiceRegistry::get(opCtx->getServiceContext()); - auto service = registry->lookupServiceByName( - MovePrimaryRecipientService::kMovePrimaryRecipientServiceName); - auto instance = MovePrimaryRecipientService::MovePrimaryRecipient::lookup( - opCtx, service, BSON("_id" << migrationId)); - - if (instance) { - instance.get()->abort(); - auto completionStatus = instance.get()->getCompletionFuture().getNoThrow(opCtx); - if (completionStatus == ErrorCodes::MovePrimaryAborted) { - return; - } - uassert(ErrorCodes::MovePrimaryRecipientPastAbortableStage, - "movePrimary operation could not be aborted", - completionStatus != Status::OK()); - uassertStatusOK(completionStatus); - } else { - LOGV2(7270003, - "No instance of movePrimary recipient found to abort", - "metadata"_attr = cmd.getMovePrimaryCommonMetadata()); - } - } - - private: - void doCheckAuthorization(OperationContext* opCtx) const override { - uassert(ErrorCodes::Unauthorized, - "Unauthorized", - AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); - } - - bool supportsWriteConcern() const override { - return false; - } - - NamespaceString ns() const override { - return NamespaceString(request().getDbName()); - } - }; - - bool skipApiVersionCheck() const override { - // Internal command (server to server). - return true; - } - - bool adminOnly() const override { - return true; - } - - std::string help() const override { - return "Internal command sent by the movePrimary operation donor to abort the movePrimary " - "operation at the recipient."; - } - - AllowedOnSecondary secondaryAllowed(ServiceContext* context) const override { - return Command::AllowedOnSecondary::kNever; - } -} movePrimaryRecipientAbortMigrationCmd; - -} // namespace - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_recipient_cmds.idl b/src/mongo/db/s/move_primary/move_primary_recipient_cmds.idl deleted file mode 100644 index 3e8765c2aae88..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_recipient_cmds.idl +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (C) 2023-present MongoDB, Inc. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the Server Side Public License, version 1, -# as published by MongoDB, Inc. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Server Side Public License for more details. -# -# You should have received a copy of the Server Side Public License -# along with this program. If not, see -# . -# -# As a special exception, the copyright holders give permission to link the -# code of portions of this program with the OpenSSL library under certain -# conditions as described in each individual source file and distribute -# linked combinations including the program with the OpenSSL library. You -# must comply with the Server Side Public License in all respects for -# all of the code used other than as permitted herein. If you modify file(s) -# with this exception, you may extend this exception to your version of the -# file(s), but you are not obligated to do so. If you do not wish to do so, -# delete this exception statement from your version. If you delete this -# exception statement from all source files in the program, then also delete -# it in the license file. -# - -global: - cpp_namespace: "mongo" - cpp_includes: - - "mongo/db/s/move_primary/move_primary_util.h" - - "mongo/db/repl/optime.h" - -imports: - - "mongo/db/basic_types.idl" - - "mongo/s/sharding_types.idl" - - "mongo/db/repl/replication_types.idl" - - "mongo/db/s/move_primary/move_primary_common_metadata.idl" - -commands: - _movePrimaryRecipientSyncData: - description: "Parser for the '_movePrimaryRecipientSyncData' command." - api_version: "" - command_name: _movePrimaryRecipientSyncData - cpp_name: MovePrimaryRecipientSyncData - strict: false - namespace: ignored - reply_type: OkReply - inline_chained_structs: true - chained_structs: - MovePrimaryCommonMetadata: MovePrimaryCommonMetadata - fields: - returnAfterReachingDonorTimestamp: - description: >- - If provided, the recipient should return after syncing up to this donor timestamp - i.e. once it reaches kPrepared state. Otherwise, the recipient will return - once it finishes cloning. - type: timestamp - optional: true - validator: - callback: "move_primary_util::validateTimestampNotNull" - - _movePrimaryRecipientForgetMigration: - description: "Parser for the '_movePrimaryRecipientForgetMigration' command." - api_version: "" - command_name: _movePrimaryRecipientForgetMigration - cpp_name: MovePrimaryRecipientForgetMigration - reply_type: OkReply - strict: false - namespace: ignored - inline_chained_structs: true - chained_structs: - MovePrimaryCommonMetadata: MovePrimaryCommonMetadata - - _movePrimaryRecipientAbortMigration: - description: "Parser for the '_movePrimaryRecipientAbortMigration' command." - api_version: "" - command_name: _movePrimaryRecipientAbortMigration - cpp_name: MovePrimaryRecipientAbortMigration - reply_type: OkReply - strict: false - namespace: ignored - inline_chained_structs: true - chained_structs: - MovePrimaryCommonMetadata: MovePrimaryCommonMetadata diff --git a/src/mongo/db/s/move_primary/move_primary_recipient_service.cpp b/src/mongo/db/s/move_primary/move_primary_recipient_service.cpp deleted file mode 100644 index 08b73e844bae2..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_recipient_service.cpp +++ /dev/null @@ -1,944 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/move_primary/move_primary_recipient_service.h" - -#include "mongo/db/s/database_sharding_state.h" -#include "mongo/db/s/move_primary/move_primary_util.h" -#include "mongo/db/s/sharding_recovery_service.h" -#include "mongo/executor/scoped_task_executor.h" -#include "mongo/stdx/mutex.h" -#include "mongo/util/str.h" -#include - -#include "mongo/base/string_data.h" -#include "mongo/bson/bsonobj.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/catalog/document_validation.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/commands/list_collections_filter.h" -#include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/dbhelpers.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/persistent_task_store.h" -#include "mongo/db/repl/primary_only_service.h" -#include "mongo/db/repl/wait_for_majority_service.h" -#include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/db/s/move_primary/move_primary_oplog_applier_progress_gen.h" -#include "mongo/db/s/move_primary/move_primary_server_parameters_gen.h" -#include "mongo/db/s/move_primary/move_primary_state_machine_gen.h" -#include "mongo/db/s/resharding/resharding_data_copy_util.h" -#include "mongo/db/s/sharding_ddl_util.h" -#include "mongo/db/service_context.h" -#include "mongo/db/write_block_bypass.h" -#include "mongo/logv2/log.h" -#include "mongo/s/catalog/sharding_catalog_client_impl.h" -#include "mongo/s/grid.h" -#include "mongo/s/move_primary/move_primary_feature_flag_gen.h" -#include "mongo/util/assert_util.h" -#include "mongo/util/future.h" -#include "mongo/util/future_util.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kMovePrimary - -namespace mongo { - -namespace { - -bool wouldSurpassBatchLimit(int currSize, int nextDocSize) { - return (currSize + nextDocSize) >= gMovePrimaryClonerMetadataCollMaxBatchSizeBytes.load(); -} - -} // anonymous namespace - -MONGO_FAIL_POINT_DEFINE(movePrimaryRecipientPauseBeforeRunning); -MONGO_FAIL_POINT_DEFINE(movePrimaryRecipientPauseAfterInsertingStateDoc); -MONGO_FAIL_POINT_DEFINE(movePrimaryRecipientPauseAfterCloningState); -MONGO_FAIL_POINT_DEFINE(movePrimaryRecipientPauseAfterApplyingState); -MONGO_FAIL_POINT_DEFINE(movePrimaryRecipientPauseAfterBlockingState); -MONGO_FAIL_POINT_DEFINE(movePrimaryRecipientPauseAfterPreparedState); -MONGO_FAIL_POINT_DEFINE(movePrimaryRecipientPauseBeforeDeletingStateDoc); -MONGO_FAIL_POINT_DEFINE(movePrimaryRecipientPauseBeforeCompletion); - -MovePrimaryRecipientService::MovePrimaryRecipientService(ServiceContext* serviceContext) - : repl::PrimaryOnlyService(serviceContext), _serviceContext(serviceContext) {} - -StringData MovePrimaryRecipientService::getServiceName() const { - return kMovePrimaryRecipientServiceName; -} - -ThreadPool::Limits MovePrimaryRecipientService::getThreadPoolLimits() const { - ThreadPool::Limits threadPoolLimits; - threadPoolLimits.maxThreads = gMovePrimaryRecipientServiceMaxThreadCount; - return threadPoolLimits; -} - -/** - * ShardingDDLCoordinator will serialize each movePrimary on same namespace. This is added for - * safety and testing. - */ -void MovePrimaryRecipientService::checkIfConflictsWithOtherInstances( - OperationContext* opCtx, - BSONObj initialState, - const std::vector& existingInstances) { - auto recipientDoc = MovePrimaryRecipientDocument::parse( - IDLParserContext("MovePrimaryRecipientService::checkIfConflictsWithOtherInstances"), - std::move(initialState)); - - for (const auto instance : existingInstances) { - auto typedInstance = checked_cast(instance); - auto dbName = typedInstance->getDatabaseName(); - uassert(ErrorCodes::MovePrimaryInProgress, - str::stream() << "Only one movePrimary operation is allowed on a given database", - dbName != recipientDoc.getDatabaseName()); - } -} - -std::shared_ptr MovePrimaryRecipientService::constructInstance( - BSONObj initialState) { - auto recipientStateDoc = MovePrimaryRecipientDocument::parse( - IDLParserContext("MovePrimaryRecipientService::constructInstance"), - std::move(initialState)); - - return std::make_shared( - this, - recipientStateDoc, - std::make_shared(), - _serviceContext, - std::make_unique()); -} - -MovePrimaryRecipientService::MovePrimaryRecipient::MovePrimaryRecipient( - const MovePrimaryRecipientService* service, - MovePrimaryRecipientDocument recipientDoc, - std::shared_ptr externalState, - ServiceContext* serviceContext, - std::unique_ptr cloner) - : _recipientService(service), - _metadata(recipientDoc.getMetadata()), - _movePrimaryRecipientExternalState(externalState), - _serviceContext(serviceContext), - _markKilledExecutor(std::make_shared([] { - ThreadPool::Options options; - options.poolName = "MovePrimaryRecipientServiceCancelableOpCtxPool"; - options.minThreads = 1; - options.maxThreads = 1; - return options; - }())), - _startApplyingDonorOpTime(recipientDoc.getStartApplyingDonorOpTime()), - _criticalSectionReason(BSON("reason" - << "Entering kPrepared state at MovePrimaryRecipientService" - << "operationInfo" << _metadata.toBSON())), - _resumedAfterFailover(recipientDoc.getState() > MovePrimaryRecipientStateEnum::kUnused), - _state(recipientDoc.getState()), - _cloner(std::move(cloner)){}; - -void MovePrimaryRecipientService::MovePrimaryRecipient::checkIfOptionsConflict( - const BSONObj& stateDoc) const { - auto recipientDoc = MovePrimaryRecipientDocument::parse( - IDLParserContext("movePrimaryCheckIfOptionsConflict"), stateDoc); - uassert(ErrorCodes::MovePrimaryInProgress, - str::stream() << "Found an existing movePrimary operation in progress", - recipientDoc.getDatabaseName() == getDatabaseName() && - recipientDoc.getFromShardName() == _metadata.getFromShardName()); -} - -std::vector -MovePrimaryRecipientExternalStateImpl::sendCommandToShards( - OperationContext* opCtx, - StringData dbName, - const BSONObj& command, - const std::vector& shardIds, - const std::shared_ptr& executor) { - return sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, dbName, command, shardIds, executor); -} - -SemiFuture MovePrimaryRecipientService::MovePrimaryRecipient::run( - std::shared_ptr executor, - const CancellationToken& stepDownToken) noexcept { - - movePrimaryRecipientPauseBeforeRunning.pauseWhileSet(); - - // We would like to abort in all cases where there is a failover and we have not yet reached - // kPrepared state to maintain correctness of movePrimary operation across upgrades/downgrades - // in binary versions with feature parity in online movePrimary implementation. The offline - // cloner is not resumable after failovers. - auto shouldAbort = [&] { - if (!_useOnlineCloner()) { - stdx::lock_guard lg(_mutex); - if (_resumedAfterFailover && _canAbort(lg)) { - return true; - } - } - return false; - }(); - - // Synchronize abort() called from a different thread before _ctHolder is initialized. - auto abortCalled = [&] { - stdx::lock_guard lg(_mutex); - _ctHolder = std::make_unique(std::move(stepDownToken)); - return _abortCalled; - }(); - - if (abortCalled || shouldAbort) { - abort(); - } - - _markKilledExecutor->startup(); - _retryingCancelableOpCtxFactory.emplace(_ctHolder->getAbortToken(), _markKilledExecutor); - - return ExecutorFuture(**executor) - .then([this, executor] { return _transitionToInitializingState(executor); }) - .then([this] { - { - stdx::lock_guard lg(_mutex); - move_primary_util::ensureFulfilledPromise( - lg, _recipientDocDurablePromise, Status::OK()); - } - auto opCtxHolder = cc().makeOperationContext(); - auto opCtx = opCtxHolder.get(); - movePrimaryRecipientPauseAfterInsertingStateDoc.pauseWhileSetAndNotCanceled( - opCtx, _ctHolder->getStepdownToken()); - }) - .then([this, executor] { return _initializeForCloningState(executor); }) - .then([this, executor] { return _transitionToCloningStateAndClone(executor); }) - .then([this, executor] { return _transitionToApplyingState(executor); }) - .then([this, executor] { - return _transitionToBlockingStateAndAcquireCriticalSection(executor); - }) - .then([this, executor] { return _transitionToPreparedState(executor); }) - .then([this, executor] { - auto forgetMigrationFuture = ([&] { - stdx::lock_guard lg(_mutex); - return _forgetMigrationPromise.getFuture(); - })(); - - return future_util::withCancellation(std::move(forgetMigrationFuture), - _ctHolder->getAbortToken()) - .thenRunOn(**executor) - .then([this, executor] { - return _transitionToDoneStateAndFinishMovePrimaryOp(executor); - }); - }) - .onError([this, executor](Status status) { - if (_ctHolder->isAborted()) { - _retryingCancelableOpCtxFactory.emplace(_ctHolder->getStepdownToken(), - _markKilledExecutor); - LOGV2(7307002, - "MovePrimaryRecipient aborting movePrimary operation", - "metadata"_attr = _metadata, - "error"_attr = status); - return _transitionToAbortedStateAndCleanupOrphanedData(executor).then( - [this, executor] { - return _transitionToDoneStateAndFinishMovePrimaryOp(executor); - }); - } - return ExecutorFuture(**executor, status); - }) - .thenRunOn(_recipientService->getInstanceCleanupExecutor()) - .onCompletion([this, self = shared_from_this()](Status status) { - if (!status.isOK()) { - LOGV2(7307003, - "MovePrimaryRecipient encountered error during movePrimary operation", - "metadata"_attr = _metadata, - "error"_attr = status); - } - _ensureUnfulfilledPromisesError(status); - movePrimaryRecipientPauseBeforeCompletion.pauseWhileSet(); - }) - .semi(); -} - -void MovePrimaryRecipientService::MovePrimaryRecipient::abort() { - stdx::lock_guard lg(_mutex); - _abortCalled = true; - if (_ctHolder) { - LOGV2(7270000, - "MovePrimaryRecipient received abort of movePrimary operation", - "metadata"_attr = _metadata); - _ctHolder->abort(); - } -} - -void MovePrimaryRecipientService::MovePrimaryRecipient::_cloneDataFromDonor( - OperationContext* opCtx) { - // Enable write blocking bypass to allow cloning of catalog data even if writes are disallowed. - WriteBlockBypass::get(opCtx).set(true); - DisableDocumentValidation disableValidation(opCtx); - std::set clonedCollections; - const auto shardRegistry = Grid::get(opCtx)->shardRegistry(); - const auto fromShard = - uassertStatusOK(shardRegistry->getShard(opCtx, _metadata.getFromShardName().toString())); - uassertStatusOK(_cloner->copyDb(opCtx, - _metadata.getDatabaseName().toString(), - fromShard->getConnString().toString(), - _shardedColls, - &clonedCollections)); -} - -ExecutorFuture -MovePrimaryRecipientService::MovePrimaryRecipient::_transitionToCloningStateAndClone( - const std::shared_ptr& executor) { - return _retryingCancelableOpCtxFactory - ->withAutomaticRetry([this, executor](const auto& factory) { - if (_checkInvalidStateTransition(MovePrimaryRecipientStateEnum::kCloning)) { - return; - } - auto opCtx = factory.makeOperationContext(Client::getCurrent()); - _updateRecipientDocument( - opCtx.get(), - MovePrimaryRecipientDocument::kStateFieldName, - MovePrimaryRecipientState_serializer(MovePrimaryRecipientStateEnum::kCloning)); - _transitionStateMachine(MovePrimaryRecipientStateEnum::kCloning); - // TODO SERVER-75872: Refactor this logic after integrating online cloner. - _cloneDataFromDonor(opCtx.get()); - }) - .onTransientError([](const Status& status) { - LOGV2(7307000, - "MovePrimaryRecipient encountered transient error in _transitionToCloningState", - "error"_attr = redact(status)); - }) - .onUnrecoverableError([this](const Status& status) { - LOGV2_ERROR( - 7306911, - "MovePrimaryRecipient encountered unrecoverable error in _transitionToCloningState", - "_metadata"_attr = _metadata, - "_error"_attr = status); - abort(); - }) - .until([](const Status& status) { return status.isOK(); }) - .on(**executor, _ctHolder->getAbortToken()) - .onCompletion([this, executor](Status status) { - return _waitForMajority(executor).then([this, status] { - { - stdx::lock_guard lg(_mutex); - move_primary_util::ensureFulfilledPromise(lg, _dataClonePromise, status); - } - auto opCtxHolder = cc().makeOperationContext(); - auto opCtx = opCtxHolder.get(); - movePrimaryRecipientPauseAfterCloningState.pauseWhileSetAndNotCanceled( - opCtx, _ctHolder->getStepdownToken()); - }); - }); -} - -ExecutorFuture MovePrimaryRecipientService::MovePrimaryRecipient::_transitionToApplyingState( - const std::shared_ptr& executor) { - return _retryingCancelableOpCtxFactory - ->withAutomaticRetry([this, executor](const auto& factory) { - if (_checkInvalidStateTransition(MovePrimaryRecipientStateEnum::kApplying)) { - return ExecutorFuture(**executor); - } - auto opCtx = factory.makeOperationContext(Client::getCurrent()); - _updateRecipientDocument( - opCtx.get(), - MovePrimaryRecipientDocument::kStateFieldName, - MovePrimaryRecipientState_serializer(MovePrimaryRecipientStateEnum::kApplying)); - _transitionStateMachine(MovePrimaryRecipientStateEnum::kApplying); - return ExecutorFuture(**executor); - }) - .onTransientError([](const Status& status) {}) - .onUnrecoverableError([this](const Status& status) { - LOGV2_ERROR(7306912, - "MovePrimaryRecipient encountered unrecoverable error in " - "_transitionToApplyingState", - "_metadata"_attr = _metadata, - "_error"_attr = status); - abort(); - }) - .until([](const Status& status) { return status.isOK(); }) - .on(**executor, _ctHolder->getAbortToken()) - .onCompletion([this, executor](Status status) { - return _waitForMajority(executor).then([this] { - auto opCtxHolder = cc().makeOperationContext(); - auto opCtx = opCtxHolder.get(); - movePrimaryRecipientPauseAfterApplyingState.pauseWhileSetAndNotCanceled( - opCtx, _ctHolder->getStepdownToken()); - }); - }); -} - -ExecutorFuture MovePrimaryRecipientService::MovePrimaryRecipient:: - _transitionToBlockingStateAndAcquireCriticalSection( - const std::shared_ptr& executor) { - return _retryingCancelableOpCtxFactory - ->withAutomaticRetry([this, executor](const auto& factory) { - return ExecutorFuture(**executor) - .then([this, factory, executor] { - if (_checkInvalidStateTransition(MovePrimaryRecipientStateEnum::kBlocking)) { - return; - } - auto opCtx = factory.makeOperationContext(Client::getCurrent()); - _updateRecipientDocument(opCtx.get(), - MovePrimaryRecipientDocument::kStateFieldName, - MovePrimaryRecipientState_serializer( - MovePrimaryRecipientStateEnum::kBlocking)); - _transitionStateMachine(MovePrimaryRecipientStateEnum::kBlocking); - }) - .then([this, factory, executor] { - auto opCtx = factory.makeOperationContext(Client::getCurrent()); - ShardingRecoveryService::get(opCtx.get()) - ->acquireRecoverableCriticalSectionBlockWrites( - opCtx.get(), - getDatabaseName(), - _criticalSectionReason, - ShardingCatalogClient::kLocalWriteConcern); - ShardingRecoveryService::get(opCtx.get()) - ->promoteRecoverableCriticalSectionToBlockAlsoReads( - opCtx.get(), - getDatabaseName(), - _criticalSectionReason, - ShardingCatalogClient::kLocalWriteConcern); - }); - }) - .onTransientError([](const Status& status) {}) - .onUnrecoverableError([this](const Status& status) { - LOGV2_ERROR(7306900, - "MovePrimaryRecipient encountered unrecoverable error in " - "_transitionToBlockingStateAndAcquireCriticalSection", - "_metadata"_attr = _metadata, - "_error"_attr = status); - abort(); - }) - .until([](const Status& status) { return status.isOK(); }) - .on(**executor, _ctHolder->getAbortToken()) - .onCompletion([this, executor](Status status) { - return _waitForMajority(executor).then([this] { - auto opCtxHolder = cc().makeOperationContext(); - auto opCtx = opCtxHolder.get(); - movePrimaryRecipientPauseAfterBlockingState.pauseWhileSetAndNotCanceled( - opCtx, _ctHolder->getStepdownToken()); - }); - }); -} - -ExecutorFuture MovePrimaryRecipientService::MovePrimaryRecipient::_transitionToPreparedState( - const std::shared_ptr& executor) { - return _retryingCancelableOpCtxFactory - ->withAutomaticRetry([this, executor](const auto& factory) { - if (_checkInvalidStateTransition(MovePrimaryRecipientStateEnum::kPrepared)) { - return; - } - auto opCtx = factory.makeOperationContext(Client::getCurrent()); - _updateRecipientDocument( - opCtx.get(), - MovePrimaryRecipientDocument::kStateFieldName, - MovePrimaryRecipientState_serializer(MovePrimaryRecipientStateEnum::kPrepared)); - _transitionStateMachine(MovePrimaryRecipientStateEnum::kPrepared); - }) - .onTransientError([](const Status& status) {}) - .onUnrecoverableError([this](const Status& status) { - LOGV2_ERROR(7306910, - "MovePrimaryRecipient encountered unrecoverable error in " - "_transitionToPreparedState", - "_metadata"_attr = _metadata, - "_error"_attr = status); - abort(); - }) - .until([](const Status& status) { return status.isOK(); }) - .on(**executor, _ctHolder->getAbortToken()) - .onCompletion([this, executor](Status status) { - return _waitForMajority(executor).then([this, status] { - { - stdx::lock_guard lg(_mutex); - move_primary_util::ensureFulfilledPromise(lg, _preparedPromise, status); - } - - auto opCtxHolder = cc().makeOperationContext(); - auto opCtx = opCtxHolder.get(); - movePrimaryRecipientPauseAfterPreparedState.pauseWhileSetAndNotCanceled( - opCtx, _ctHolder->getStepdownToken()); - }); - }); -} - -ExecutorFuture -MovePrimaryRecipientService::MovePrimaryRecipient::_transitionToAbortedStateAndCleanupOrphanedData( - const std::shared_ptr& executor) { - return _retryingCancelableOpCtxFactory - ->withAutomaticRetry([this, executor](const auto& factory) { - return ExecutorFuture(**executor) - .then([this, factory] { - if (_checkInvalidStateTransition(MovePrimaryRecipientStateEnum::kAborted)) { - return; - } - auto opCtx = factory.makeOperationContext(Client::getCurrent()); - _updateRecipientDocument(opCtx.get(), - MovePrimaryRecipientDocument::kStateFieldName, - MovePrimaryRecipientState_serializer( - MovePrimaryRecipientStateEnum::kAborted)); - _transitionStateMachine(MovePrimaryRecipientStateEnum::kAborted); - }) - .then([this, factory, executor] { - auto opCtx = factory.makeOperationContext(Client::getCurrent()); - _cleanUpOrphanedDataOnRecipient(opCtx.get()); - }); - }) - .onTransientError([](const Status& status) {}) - .onUnrecoverableError([](const Status& status) {}) - .until([](const Status& status) { return status.isOK(); }) - .on(**executor, _ctHolder->getStepdownToken()) - .onCompletion([this, self = shared_from_this(), executor](Status status) { - return _waitForMajority(executor).then([this, executor, status] { - if (!status.isOK()) { - LOGV2(7307001, - "MovePrimaryRecipient encountered error in " - "_transitionToAbortedStateAndCleanupOrphanedData", - "error"_attr = status); - } - // Intentionally return OK status after logging as the abort is best effort. - return ExecutorFuture(**executor, Status::OK()); - }); - }); -} - -ExecutorFuture -MovePrimaryRecipientService::MovePrimaryRecipient::_transitionToDoneStateAndFinishMovePrimaryOp( - const std::shared_ptr& executor) { - _retryingCancelableOpCtxFactory.emplace(_ctHolder->getStepdownToken(), _markKilledExecutor); - return _retryingCancelableOpCtxFactory - ->withAutomaticRetry([this, executor](const auto& factory) { - return ExecutorFuture(**executor) - .then([this, factory, executor] { - auto opCtx = factory.makeOperationContext(Client::getCurrent()); - if (_checkInvalidStateTransition(MovePrimaryRecipientStateEnum::kDone)) { - return; - } - - _updateRecipientDocument( - opCtx.get(), - MovePrimaryRecipientDocument::kStateFieldName, - MovePrimaryRecipientState_serializer(MovePrimaryRecipientStateEnum::kDone)); - _transitionStateMachine(MovePrimaryRecipientStateEnum::kDone); - }) - .then([this, factory, executor] { - auto opCtx = factory.makeOperationContext(Client::getCurrent()); - _cleanUpOperationMetadata(opCtx.get(), executor); - }) - .then([this, factory, executor] { - auto opCtx = factory.makeOperationContext(Client::getCurrent()); - _clearDatabaseMetadata(opCtx.get()); - - ShardingRecoveryService::get(opCtx.get()) - ->releaseRecoverableCriticalSection( - opCtx.get(), - getDatabaseName(), - _criticalSectionReason, - ShardingCatalogClient::kLocalWriteConcern); - movePrimaryRecipientPauseBeforeDeletingStateDoc.pauseWhileSetAndNotCanceled( - opCtx.get(), _ctHolder->getStepdownToken()); - _removeRecipientDocument(opCtx.get()); - }); - }) - .onTransientError([](const Status& status) {}) - .onUnrecoverableError([](const Status& status) { - LOGV2(7306901, - "MovePrimaryRecipient received unrecoverable error in " - "_transitionToDoneStateAndFinishMovePrimaryOp", - "error"_attr = status); - }) - .until([](const Status& status) { return status.isOK(); }) - .on(**executor, _ctHolder->getStepdownToken()) - .onCompletion([this, executor](Status status) { - if (_ctHolder->isAborted()) { - // Override status code to aborted after logging the original error - status = {ErrorCodes::MovePrimaryAborted, "movePrimary operation aborted"}; - } - return _waitForMajority(executor).then([this, executor, status] { - if (status.isOK()) { - stdx::lock_guard lg(_mutex); - move_primary_util::ensureFulfilledPromise(lg, _completionPromise, Status::OK()); - } - return ExecutorFuture(**executor, status); - }); - }); -} - -void MovePrimaryRecipientService::MovePrimaryRecipient::_clearDatabaseMetadata( - OperationContext* opCtx) { - auto dbName = getDatabaseName().dbName(); - AutoGetDb autoDb(opCtx, dbName, MODE_IX); - auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx, dbName); - scopedDss->clearDbInfo(opCtx); -} - -void MovePrimaryRecipientService::MovePrimaryRecipient::_createMetadataCollection( - OperationContext* opCtx) { - resharding::data_copy::ensureCollectionExists(opCtx, _getCollectionsToCloneNSS(), {}); -} - -std::vector -MovePrimaryRecipientService::MovePrimaryRecipient::_getUnshardedCollections( - OperationContext* opCtx) { - const auto shardRegistry = Grid::get(opCtx)->shardRegistry(); - const auto fromShard = - uassertStatusOK(shardRegistry->getShard(opCtx, _metadata.getFromShardName().toString())); - - auto collectionsToCloneWithStatus = _cloner->getListOfCollections( - opCtx, getDatabaseName().dbName().toString(), fromShard->getConnString().toString()); - auto collectionsToClone = uassertStatusOK(collectionsToCloneWithStatus); - - const auto allCollections = [&] { - std::vector colls; - for (const auto& collInfo : collectionsToClone) { - std::string collName; - uassertStatusOK(bsonExtractStringField(collInfo, "name", &collName)); - const NamespaceString nss(getDatabaseName().toString(), collName); - if (!nss.isSystem() || - nss.isLegalClientSystemNS(serverGlobalParams.featureCompatibility)) { - colls.push_back(nss); - } - } - std::sort(colls.begin(), colls.end()); - return colls; - }(); - - std::vector unshardedCollections; - std::set_difference(allCollections.cbegin(), - allCollections.cend(), - _shardedColls.cbegin(), - _shardedColls.cend(), - std::back_inserter(unshardedCollections)); - - return unshardedCollections; -} - -void MovePrimaryRecipientService::MovePrimaryRecipient::_persistCollectionsToClone( - OperationContext* opCtx) { - auto collsToClone = _getUnshardedCollections(opCtx); - std::vector batch; - int i = 0; - int numBytes = 0; - auto collectionsToCloneNSS = _getCollectionsToCloneNSS(); - for (const auto& coll : collsToClone) { - auto doc = BSON("_id" << i << "nss" << coll.ns()); - ++i; - if (wouldSurpassBatchLimit(numBytes, doc.objsize())) { - resharding::data_copy::insertBatch(opCtx, collectionsToCloneNSS, batch); - batch.clear(); - numBytes = 0; - } - batch.emplace_back(InsertStatement(doc)); - numBytes += doc.objsize(); - } - if (!batch.empty()) - resharding::data_copy::insertBatch(opCtx, collectionsToCloneNSS, batch); -} - -std::vector -MovePrimaryRecipientService::MovePrimaryRecipient::_getCollectionsToClone( - OperationContext* opCtx) const { - std::vector collsToClone; - auto collectionsToCloneNSS = _getCollectionsToCloneNSS(); - AutoGetCollection autoColl(opCtx, collectionsToCloneNSS, MODE_IS); - uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection '" << collectionsToCloneNSS << "' did not already exist", - autoColl); - auto cursor = autoColl->getCursor(opCtx); - while (auto record = cursor->next()) { - BSONObj obj = record->data.releaseToBson(); - NamespaceString ns( - NamespaceStringUtil::deserialize(boost::none, obj.getStringField("nss"))); - collsToClone.emplace_back(ns); - } - return collsToClone; -} - -void MovePrimaryRecipientService::MovePrimaryRecipient::_cleanUpOrphanedDataOnRecipient( - OperationContext* opCtx) { - // Drop all the collections which might have been cloned on the recipient. - auto colls = _getCollectionsToClone(opCtx); - for (const auto& coll : colls) { - resharding::data_copy::ensureCollectionDropped(opCtx, coll); - } -} - -void MovePrimaryRecipientService::MovePrimaryRecipient::_cleanUpOperationMetadata( - OperationContext* opCtx, const std::shared_ptr& executor) { - - // Drop collectionsToClone NSS - resharding::data_copy::ensureCollectionDropped(opCtx, _getCollectionsToCloneNSS()); - - // Drop temp oplog buffer - resharding::data_copy::ensureCollectionDropped( - opCtx, NamespaceString::makeMovePrimaryOplogBufferNSS(getMigrationId())); - - // Drop oplog applier progress document - PersistentTaskStore store( - NamespaceString::kMovePrimaryApplierProgressNamespace); - store.remove(opCtx, - BSON(MovePrimaryRecipientDocument::kMigrationIdFieldName << getMigrationId()), - WriteConcerns::kLocalWriteConcern); -} - -void MovePrimaryRecipientService::MovePrimaryRecipient::_removeRecipientDocument( - OperationContext* opCtx) { - // Delete state document. - PersistentTaskStore store( - NamespaceString::kMovePrimaryRecipientNamespace); - store.remove(opCtx, - BSON(MovePrimaryRecipientDocument::kIdFieldName << getMigrationId()), - WriteConcerns::kLocalWriteConcern); - LOGV2(7306902, - "MovePrimaryRecipient removed recipient document for movePrimary operation", - "metadata"_attr = _metadata); -} - -SharedSemiFuture -MovePrimaryRecipientService::MovePrimaryRecipient::onReceiveForgetMigration() { - stdx::lock_guard lg(_mutex); - LOGV2(7270001, - "MovePrimaryRecipient received forgetMigration for movePrimary operation", - "metadata"_attr = _metadata); - move_primary_util::ensureFulfilledPromise(lg, _forgetMigrationPromise, Status::OK()); - return _completionPromise.getFuture(); -} - -SharedSemiFuture MovePrimaryRecipientService::MovePrimaryRecipient::onReceiveSyncData( - Timestamp blockTimestamp) { - return _preparedPromise.getFuture(); -} - -void MovePrimaryRecipientService::MovePrimaryRecipient::_ensureUnfulfilledPromisesError( - Status status) { - stdx::lock_guard lg(_mutex); - if (!_recipientDocDurablePromise.getFuture().isReady()) { - _recipientDocDurablePromise.setError(status); - } - if (!_dataClonePromise.getFuture().isReady()) { - _dataClonePromise.setError(status); - } - if (!_preparedPromise.getFuture().isReady()) { - _preparedPromise.setError(status); - } - if (!_forgetMigrationPromise.getFuture().isReady()) { - _forgetMigrationPromise.setError(status); - } - if (!_completionPromise.getFuture().isReady()) { - _completionPromise.setError(status); - } -} - -void MovePrimaryRecipientService::MovePrimaryRecipient::_transitionStateMachine( - MovePrimaryRecipientStateEnum newState) { - stdx::lock_guard lg(_mutex); - invariant(newState > _state); - - std::swap(_state, newState); - LOGV2(7271201, - "Transitioned MovePrimaryRecipient state", - "oldState"_attr = MovePrimaryRecipientState_serializer(newState), - "newState"_attr = MovePrimaryRecipientState_serializer(_state), - "migrationId"_attr = getMigrationId(), - "databaseName"_attr = getDatabaseName(), - "fromShard"_attr = _metadata.getFromShardName()); -} - -ExecutorFuture -MovePrimaryRecipientService::MovePrimaryRecipient::_transitionToInitializingState( - const std::shared_ptr& executor) { - return _retryingCancelableOpCtxFactory - ->withAutomaticRetry([this, executor](const auto& factory) { - if (_resumedAfterFailover) { - return; - } - auto opCtxHolder = cc().makeOperationContext(); - auto opCtx = opCtxHolder.get(); - - MovePrimaryRecipientDocument recipientDoc; - recipientDoc.setId(getMigrationId()); - recipientDoc.setMetadata(_metadata); - recipientDoc.setState(MovePrimaryRecipientStateEnum::kInitializing); - recipientDoc.setStartAt(_serviceContext->getPreciseClockSource()->now()); - - PersistentTaskStore store( - NamespaceString::kMovePrimaryRecipientNamespace); - store.add(opCtx, recipientDoc, WriteConcerns::kLocalWriteConcern); - - _transitionStateMachine(MovePrimaryRecipientStateEnum::kInitializing); - }) - .onTransientError([](const Status& status) {}) - .onUnrecoverableError([this](const Status& status) { - LOGV2(7306800, - "MovePrimaryRecipient received unrecoverable error in " - "_transitionToInitializingState", - "error"_attr = status); - abort(); - }) - .until([](const Status& status) { return status.isOK(); }) - .on(**executor, _ctHolder->getAbortToken()) - .onCompletion([this, executor](Status status) { - return _waitForMajority(executor).then([this, status] { - if (status.isOK()) { - LOGV2(7306903, - "MovePrimaryRecipient persisted state doc", - "metadata"_attr = _metadata); - } - }); - }); -} - -ExecutorFuture MovePrimaryRecipientService::MovePrimaryRecipient::_initializeForCloningState( - const std::shared_ptr& executor) { - if (_checkInvalidStateTransition(MovePrimaryRecipientStateEnum::kCloning)) { - return ExecutorFuture(**executor); - } - return _retryingCancelableOpCtxFactory - ->withAutomaticRetry([this, executor](const auto& factory) { - auto opCtx = factory.makeOperationContext(Client::getCurrent()); - _shardedColls = _getShardedCollectionsFromConfigSvr(opCtx.get()); - _startApplyingDonorOpTime = _startApplyingDonorOpTime - ? _startApplyingDonorOpTime - : _getStartApplyingDonorOpTime(opCtx.get(), executor); - _updateRecipientDocument( - opCtx.get(), - MovePrimaryRecipientDocument::kStartApplyingDonorOpTimeFieldName, - _startApplyingDonorOpTime.get().toBSON()); - _createMetadataCollection(opCtx.get()); - _persistCollectionsToClone(opCtx.get()); - }) - .onTransientError([](const Status& status) {}) - .onUnrecoverableError([this](const Status& status) { - LOGV2(7306801, - "Received unrecoverable error while initializing for cloning state", - "error"_attr = status); - abort(); - }) - .until([](const Status& status) { return status.isOK(); }) - .on(**executor, _ctHolder->getAbortToken()); -} - -template -void MovePrimaryRecipientService::MovePrimaryRecipient::_updateRecipientDocument( - OperationContext* opCtx, const StringData& fieldName, T value) { - PersistentTaskStore store( - NamespaceString::kMovePrimaryRecipientNamespace); - - BSONObjBuilder updateBuilder; - { - BSONObjBuilder setBuilder(updateBuilder.subobjStart("$set")); - setBuilder.append(fieldName, value); - setBuilder.doneFast(); - } - - store.update(opCtx, - BSON(MovePrimaryRecipientDocument::kIdFieldName << getMigrationId()), - updateBuilder.done(), - WriteConcerns::kLocalWriteConcern); -} - -repl::OpTime MovePrimaryRecipientService::MovePrimaryRecipient::_getStartApplyingDonorOpTime( - OperationContext* opCtx, const std::shared_ptr& executor) { - auto oplogOpTimeFields = - BSON(repl::OplogEntry::kTimestampFieldName << 1 << repl::OplogEntry::kTermFieldName << 1); - FindCommandRequest findCmd{NamespaceString::kRsOplogNamespace}; - findCmd.setSort(BSON("$natural" << -1)); - findCmd.setProjection(oplogOpTimeFields); - findCmd.setReadConcern( - repl::ReadConcernArgs(repl::ReadConcernLevel::kMajorityReadConcern).toBSONInner()); - findCmd.setLimit(1); - - auto rawResp = _movePrimaryRecipientExternalState->sendCommandToShards( - opCtx, - "local"_sd, - findCmd.toBSON({}), - {ShardId(_metadata.getFromShardName().toString())}, - **executor); - - uassert(7356200, - "MovePrimaryRecipient unable to find majority committed OpTime at donor", - !rawResp.empty()); - auto swResp = uassertStatusOK(rawResp.front().swResponse); - BSONObj cursorObj = swResp.data["cursor"].Obj(); - BSONObj firstBatchObj = cursorObj["firstBatch"].Obj(); - auto majorityOpTime = - uassertStatusOK(repl::OpTime::parseFromOplogEntry(firstBatchObj[0].Obj())); - return majorityOpTime; -} - -std::vector -MovePrimaryRecipientService::MovePrimaryRecipient::_getShardedCollectionsFromConfigSvr( - OperationContext* opCtx) const { - auto catalogClient = Grid::get(opCtx)->catalogClient(); - auto shardedColls = - catalogClient->getAllShardedCollectionsForDb(opCtx, - getDatabaseName().toString(), - repl::ReadConcernLevel::kMajorityReadConcern, - BSON("ns" << 1)); - return shardedColls; -} - -bool MovePrimaryRecipientService::MovePrimaryRecipient::_checkInvalidStateTransition( - MovePrimaryRecipientStateEnum newState) { - stdx::lock_guard lg(_mutex); - return newState <= _state; -} - -ExecutorFuture MovePrimaryRecipientService::MovePrimaryRecipient::_waitForMajority( - std::shared_ptr executor) { - return ExecutorFuture(**executor).then([this] { - auto opCtxHolder = cc().makeOperationContext(); - auto opCtx = opCtxHolder.get(); - repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); - auto clientOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); - return WaitForMajorityService::get(_serviceContext) - .waitUntilMajority(std::move(clientOpTime), _ctHolder->getStepdownToken()); - }); -} - -boost::optional MovePrimaryRecipientService::MovePrimaryRecipient::reportForCurrentOp( - MongoProcessInterface::CurrentOpConnectionsMode connMode, - MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept { - return boost::none; -} - -NamespaceString MovePrimaryRecipientService::MovePrimaryRecipient::getDatabaseName() const { - return _metadata.getDatabaseName(); -} - -NamespaceString MovePrimaryRecipientService::MovePrimaryRecipient::_getCollectionsToCloneNSS() - const { - return NamespaceString::makeMovePrimaryCollectionsToCloneNSS(getMigrationId()); -} - -UUID MovePrimaryRecipientService::MovePrimaryRecipient::getMigrationId() const { - return _metadata.getMigrationId(); -} - -bool MovePrimaryRecipientService::MovePrimaryRecipient::_canAbort(WithLock) const { - return _state < MovePrimaryRecipientStateEnum::kPrepared; -} - -bool MovePrimaryRecipientService::MovePrimaryRecipient::_useOnlineCloner() const { - return false; -} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_recipient_service.h b/src/mongo/db/s/move_primary/move_primary_recipient_service.h deleted file mode 100644 index 8ea0c74a790d8..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_recipient_service.h +++ /dev/null @@ -1,379 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/base/string_data.h" -#include "mongo/bson/bsonobj.h" -#include "mongo/db/cancelable_operation_context.h" -#include "mongo/db/s/move_primary/move_primary_recipient_cmds_gen.h" -#include "mongo/db/s/resharding/resharding_future_util.h" -#include "mongo/executor/scoped_task_executor.h" -#include "mongo/s/async_requests_sender.h" -#include -#include - -#include "mongo/client/fetcher.h" -#include "mongo/db/cloner.h" -#include "mongo/db/pipeline/aggregate_command_gen.h" -#include "mongo/db/repl/oplog_fetcher.h" -#include "mongo/db/repl/primary_only_service.h" -#include "mongo/db/s/move_primary/move_primary_state_machine_gen.h" -#include "mongo/rpc/metadata/repl_set_metadata.h" -#include "mongo/util/time_support.h" - -namespace mongo { - -class DBClientConnection; -class OperationContext; -class ReplicaSetMonitor; -class ServiceContext; - -class MovePrimaryRecipientExternalState { -public: - virtual ~MovePrimaryRecipientExternalState() = default; - - virtual std::vector sendCommandToShards( - OperationContext* opCtx, - StringData dbName, - const BSONObj& command, - const std::vector& shardIds, - const std::shared_ptr& executor) = 0; -}; - -class MovePrimaryRecipientExternalStateImpl final : public MovePrimaryRecipientExternalState { -public: - std::vector sendCommandToShards( - OperationContext* opCtx, - StringData dbName, - const BSONObj& command, - const std::vector& shardIds, - const std::shared_ptr& executor) override; -}; - -class RecipientCancellationTokenHolder { -public: - RecipientCancellationTokenHolder(CancellationToken stepdownToken) - : _stepdownToken(stepdownToken), - _abortSource(CancellationSource(stepdownToken)), - _abortToken(_abortSource.token()) {} - - /** - * Returns whether any token has been canceled. - */ - bool isCanceled() { - return _stepdownToken.isCanceled() || _abortToken.isCanceled(); - } - - /** - * Returns true if an abort was triggered by user or if the recipient decided to abort the - * operation. - */ - bool isAborted() { - return !_stepdownToken.isCanceled() && _abortToken.isCanceled(); - } - - /** - * Returns whether the stepdownToken has been canceled, indicating that the shard's underlying - * replica set node is stepping down or shutting down. - */ - bool isSteppingOrShuttingDown() { - return _stepdownToken.isCanceled(); - } - - /** - * Cancels the source created by this class, in order to indicate to holders of the abortToken - * that the movePrimary operation has been aborted. - */ - void abort() { - _abortSource.cancel(); - } - - const CancellationToken& getStepdownToken() { - return _stepdownToken; - } - - const CancellationToken& getAbortToken() { - return _abortToken; - } - -private: - // The token passed in by the PrimaryOnlyService runner that is canceled when this shard's - // underlying replica set node is stepping down or shutting down. - CancellationToken _stepdownToken; - - // The source created by inheriting from the stepdown token. - CancellationSource _abortSource; - - // The token to wait on in cases where a user wants to wait on either a movePrimary operation - // being aborted or the replica set node stepping/shutting down. - CancellationToken _abortToken; -}; - -/** - * MovePrimaryRecipientService coordinates online movePrimary data migration on the - * recipient side. - */ -class MovePrimaryRecipientService : public repl::PrimaryOnlyService { - // Disallows copying. - MovePrimaryRecipientService(const MovePrimaryRecipientService&) = delete; - MovePrimaryRecipientService& operator=(const MovePrimaryRecipientService&) = delete; - -public: - static constexpr StringData kMovePrimaryRecipientServiceName = "MovePrimaryRecipientService"_sd; - - explicit MovePrimaryRecipientService(ServiceContext* serviceContext); - ~MovePrimaryRecipientService() = default; - - StringData getServiceName() const override; - - NamespaceString getStateDocumentsNS() const override { - return NamespaceString::kMovePrimaryRecipientNamespace; - } - - ThreadPool::Limits getThreadPoolLimits() const final; - - void checkIfConflictsWithOtherInstances( - OperationContext* opCtx, - BSONObj initialStateDoc, - const std::vector& existingInstances) final; - - std::shared_ptr constructInstance(BSONObj initialStateDoc); - - class MovePrimaryRecipient final - : public PrimaryOnlyService::TypedInstance { - public: - explicit MovePrimaryRecipient( - const MovePrimaryRecipientService* recipientService, - MovePrimaryRecipientDocument recipientDoc, - std::shared_ptr externalState, - ServiceContext* serviceContext, - std::unique_ptr cloner); - - SemiFuture run(std::shared_ptr executor, - const CancellationToken& token) noexcept final; - - /** - * This service relies on the stepdown token passed to run method of base class and hence - * ignores the interrupts. - */ - void interrupt(Status status) override{}; - - /** - * Aborts the ongoing movePrimary operation which may be user initiated. - */ - void abort(); - - /** - * Returns a Future that will be resolved when _recipientDocDurablePromise is fulfilled. - */ - SharedSemiFuture getRecipientDocDurableFuture() const { - return _recipientDocDurablePromise.getFuture(); - } - - /** - * Returns a Future that will be resolved when the _dataClonePromise is fulfilled. - */ - SharedSemiFuture getDataClonedFuture() const { - return _dataClonePromise.getFuture(); - } - - /** - * Returns a Future that will be resolved when the recipient instance finishes movePrimary - * op. - */ - SharedSemiFuture getCompletionFuture() const { - return _completionPromise.getFuture(); - } - - /** - * Fulfills _forgetMigrationPromise and returns future from _completionPromise. - */ - SharedSemiFuture onReceiveForgetMigration(); - - /** - * Returns Future that will be resolved when the _preparedPromise is fulfilled. - */ - SharedSemiFuture onReceiveSyncData(Timestamp blockTimestamp); - - /** - * Report MovePrimaryRecipientService Instances in currentOp(). - */ - boost::optional reportForCurrentOp( - MongoProcessInterface::CurrentOpConnectionsMode connMode, - MongoProcessInterface::CurrentOpSessionsMode sessionMode) noexcept final; - - void checkIfOptionsConflict(const BSONObj& stateDoc) const final; - - NamespaceString getDatabaseName() const; - - UUID getMigrationId() const; - - private: - ExecutorFuture _transitionToInitializingState( - const std::shared_ptr& executor); - - ExecutorFuture _transitionToCloningStateAndClone( - const std::shared_ptr& executor); - - ExecutorFuture _initializeForCloningState( - const std::shared_ptr& executor); - - ExecutorFuture _transitionToApplyingState( - const std::shared_ptr& executor); - - ExecutorFuture _transitionToBlockingStateAndAcquireCriticalSection( - const std::shared_ptr& executor); - - ExecutorFuture _transitionToPreparedState( - const std::shared_ptr& executor); - - ExecutorFuture _transitionToAbortedStateAndCleanupOrphanedData( - const std::shared_ptr& executor); - - ExecutorFuture _transitionToDoneStateAndFinishMovePrimaryOp( - const std::shared_ptr& executor); - - /** - * Clears cached database info on recipient shard to trigger a refresh on next request with - * DB version. This is done before releasing critical section. - */ - void _clearDatabaseMetadata(OperationContext* opCtx); - - void _createMetadataCollection(OperationContext* opCtx); - - std::vector _getUnshardedCollections(OperationContext* opCtx); - - void _persistCollectionsToClone(OperationContext* opCtx); - - std::vector _getCollectionsToClone(OperationContext* opCtx) const; - - void _cleanUpOrphanedDataOnRecipient(OperationContext* opCtx); - - void _cleanUpOperationMetadata( - OperationContext* opCtx, const std::shared_ptr& executor); - - void _removeRecipientDocument(OperationContext* opCtx); - - void _ensureUnfulfilledPromisesError(Status status); - - std::vector _getShardedCollectionsFromConfigSvr( - OperationContext* opCtx) const; - - void _transitionStateMachine(MovePrimaryRecipientStateEnum newState); - - template - void _updateRecipientDocument(OperationContext* opCtx, - const StringData& fieldName, - T value); - - repl::OpTime _getStartApplyingDonorOpTime( - OperationContext* opCtx, const std::shared_ptr& executor); - - bool _checkInvalidStateTransition(MovePrimaryRecipientStateEnum newState); - - bool _canAbort(WithLock) const; - - bool _useOnlineCloner() const; - - void _cloneDataFromDonor(OperationContext* opCtx); - - NamespaceString _getCollectionsToCloneNSS() const; - /** - * Waits for majority write concern for client's last applied opTime. Cancels on stepDown. - * This is needed after each state transition completes in future chain because disk updates - * are done with kLocalWriteConcern in the _retryingCancelableOpCtxFactory retry loops. - */ - ExecutorFuture _waitForMajority( - std::shared_ptr executor); - - const NamespaceString _stateDocumentNS = NamespaceString::kMovePrimaryRecipientNamespace; - - const MovePrimaryRecipientService* _recipientService; - - const MovePrimaryCommonMetadata _metadata; - - std::shared_ptr _movePrimaryRecipientExternalState; - - ServiceContext* _serviceContext; - - // ThreadPool used by CancelableOperationContext. - // CancelableOperationContext must have a thread that is always available to it to mark its - // opCtx as killed when the cancelToken has been cancelled. - const std::shared_ptr _markKilledExecutor; - boost::optional - _retryingCancelableOpCtxFactory; - - boost::optional _startApplyingDonorOpTime; - - std::vector _shardedColls; - - const BSONObj _criticalSectionReason; - - const bool _resumedAfterFailover; - - // To synchronize operations on mutable states below. - Mutex _mutex = MONGO_MAKE_LATCH("MovePrimaryRecipient::_mutex"); - - // Used to catch the case when abort is called from a different thread around the time run() - // is called. - bool _abortCalled{false}; - - // Holds the cancellation tokens relevant to the MovePrimaryRecipientService. - std::unique_ptr _ctHolder; - - MovePrimaryRecipientStateEnum _state; - - std::unique_ptr _cloner; - - // Promise that is resolved when the recipient doc is persisted on disk - SharedPromise _recipientDocDurablePromise; - - // Promise that is resolved when the recipient successfully clones documents and transitions - // to kApplying state. - SharedPromise _dataClonePromise; - - // Promise that is resolved when the recipient successfully applies oplog entries till - // blockTimestamp from donor and enters kPrepared state - SharedPromise _preparedPromise; - - // Promise that is resolved when the recipient receives movePrimaryRecipientForgetMigration. - SharedPromise _forgetMigrationPromise; - - // Promise that is resolved when all the needed work for movePrimary op is completed at the - // recipient for a successful or unsuccessful operation both. - SharedPromise _completionPromise; - }; - -protected: - static constexpr StringData movePrimaryOpLogBufferPrefix = "movePrimaryOplogBuffer"_sd; - ServiceContext* const _serviceContext; -}; - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_recipient_service_test.cpp b/src/mongo/db/s/move_primary/move_primary_recipient_service_test.cpp deleted file mode 100644 index b6822c9184fb8..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_recipient_service_test.cpp +++ /dev/null @@ -1,788 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/op_observer/op_observer_registry.h" -#include "mongo/db/operation_context.h" -#include "mongo/db/repl/primary_only_service.h" -#include "mongo/db/repl/primary_only_service_op_observer.h" -#include "mongo/db/repl/primary_only_service_test_fixture.h" -#include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/repl/wait_for_majority_service.h" -#include "mongo/db/s/move_primary/move_primary_recipient_cmds_gen.h" -#include "mongo/db/s/move_primary/move_primary_recipient_service.h" -#include "mongo/db/s/move_primary/move_primary_state_machine_gen.h" -#include "mongo/db/s/resharding/resharding_service_test_helpers.h" -#include "mongo/db/s/shard_server_test_fixture.h" -#include "mongo/db/s/sharding_mongod_test_fixture.h" -#include "mongo/db/s/sharding_state.h" -#include "mongo/db/service_context.h" -#include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/executor/remote_command_request.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" -#include "mongo/s/catalog/sharding_catalog_client_mock.h" -#include "mongo/s/catalog/type_shard.h" -#include "mongo/s/config_server_catalog_cache_loader.h" -#include "mongo/s/sharding_router_test_fixture.h" -#include "mongo/unittest/assert.h" -#include "mongo/util/duration.h" -#include "mongo/util/fail_point.h" -#include - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - -namespace mongo { - -namespace { - -using MovePrimaryRecipientStateTransitionController = - resharding_service_test_helpers::StateTransitionController; -using OpObserverForTest = resharding_service_test_helpers::StateTransitionControllerOpObserver< - MovePrimaryRecipientStateEnum, - MovePrimaryRecipientDocument>; -using PauseDuringStateTransition = - resharding_service_test_helpers::PauseDuringStateTransitions; - -const std::vector kShardList = {ShardType("shard0", "host0"), - ShardType("shard1", "host1")}; - -static HostAndPort makeHostAndPort(const ShardId& shardId) { - return HostAndPort(str::stream() << shardId << ":123"); -} -} // namespace - -class ClonerForTest : public Cloner { - Status copyDb(OperationContext* opCtx, - const std::string& dBName, - const std::string& masterHost, - const std::vector& shardedColls, - std::set* clonedColls) override { - return Status::OK(); - } - - Status setupConn(OperationContext* opCtx, - const std::string& dBName, - const std::string& masterHost) override { - return Status::OK(); - } - - StatusWith> getListOfCollections(OperationContext* opCtx, - const std::string& dBName, - const std::string& masterHost) override { - std::vector colls; - return colls; - } -}; - -class MovePrimaryRecipientExternalStateForTest : public MovePrimaryRecipientExternalState { - - std::vector sendCommandToShards( - OperationContext* opCtx, - StringData dbName, - const BSONObj& command, - const std::vector& shardIds, - const std::shared_ptr& executor) { - auto opTimeBase = repl::OpTimeBase(Timestamp::min()); - opTimeBase.setTerm(0); - BSONArrayBuilder bab; - bab.append(opTimeBase.toBSON()); - executor::RemoteCommandResponse kOkResponse{ - BSON("ok" << 1 << "cursor" << BSON("firstBatch" << bab.done())), Microseconds(0)}; - std::vector shardResponses{ - {kShardList.front().getName(), - kOkResponse, - makeHostAndPort(kShardList.front().getName())}}; - return shardResponses; - }; -}; - -class MovePrimaryRecipientServiceForTest : public MovePrimaryRecipientService { -public: - static constexpr StringData kServiceName = "MovePrimaryRecipientServiceForTest"_sd; - - explicit MovePrimaryRecipientServiceForTest(ServiceContext* serviceContext) - : MovePrimaryRecipientService(serviceContext) {} - - std::shared_ptr constructInstance( - BSONObj initialStateDoc) override { - auto recipientStateDoc = MovePrimaryRecipientDocument::parse( - IDLParserContext("MovePrimaryRecipientServiceForTest::constructInstance"), - std::move(initialStateDoc)); - - return std::make_shared( - this, - recipientStateDoc, - std::make_shared(), - _serviceContext, - std::make_unique()); - } - - StringData getServiceName() const { - return kServiceName; - } -}; - -class MovePrimaryRecipientServiceTest : public ShardServerTestFixture { - - void setUp() override { - ShardServerTestFixture::setUp(); - - _serviceCtx = getServiceContext(); - WaitForMajorityService::get(_serviceCtx).startup(_serviceCtx); - - auto opCtx = operationContext(); - - for (const auto& shardType : kShardList) { - auto shardTargeter = RemoteCommandTargeterMock::get( - uassertStatusOK(shardRegistry()->getShard(opCtx, shardType.getName())) - ->getTargeter()); - shardTargeter->setFindHostReturnValue(makeHostAndPort(shardType.getName())); - } - - _controller = std::make_shared(); - - _opObserverRegistry = checked_cast(_serviceCtx->getOpObserver()); - - invariant(_opObserverRegistry); - - _opObserverRegistry->addObserver( - std::make_unique(_serviceCtx)); - - _opObserverRegistry->addObserver(std::make_unique( - _controller, - NamespaceString::kMovePrimaryRecipientNamespace, - [](const MovePrimaryRecipientDocument& stateDoc) { return stateDoc.getState(); })); - - _registry = repl::PrimaryOnlyServiceRegistry::get(_serviceCtx); - auto service = std::make_unique(_serviceCtx); - - auto serviceName = service->getServiceName(); - _registry->registerService(std::move(service)); - - _service = _registry->lookupServiceByName(serviceName); - - - _registry->onStartup(opCtx); - _stepUpPOS(); - } - - void tearDown() override { - globalFailPointRegistry().disableAllFailpoints(); - WaitForMajorityService::get(getServiceContext()).shutDown(); - _registry->onShutdown(); - - ShardServerTestFixture::tearDown(); - } - - - std::unique_ptr makeShardingCatalogClient() override { - - class StaticCatalogClient final : public ShardingCatalogClientMock { - public: - StaticCatalogClient(std::vector shards) : _shards(std::move(shards)) {} - - StatusWith>> getAllShards( - OperationContext* opCtx, repl::ReadConcernLevel readConcern) override { - return repl::OpTimeWith>(_shards); - } - - std::vector getCollections(OperationContext* opCtx, - StringData dbName, - repl::ReadConcernLevel readConcernLevel, - const BSONObj& sort) override { - return _colls; - } - - std::vector getAllShardedCollectionsForDb( - OperationContext* opCtx, - StringData dbName, - repl::ReadConcernLevel readConcern, - const BSONObj& sort = BSONObj()) override { - return _shardedColls; - } - - void setCollections(std::vector colls) { - _colls = std::move(colls); - } - - private: - const std::vector _shards; - std::vector _colls; - std::vector _shardedColls; - }; - - return std::make_unique(kShardList); - } - -protected: - MovePrimaryRecipientDocument createRecipientDoc() { - UUID migrationId = UUID::gen(); - MovePrimaryCommonMetadata metadata(migrationId, - NamespaceString{"foo"}, - kShardList.front().getName(), - kShardList.back().getName()); - - MovePrimaryRecipientDocument doc; - doc.setMetadata(metadata); - doc.setId(migrationId); - - return doc; - } - - MovePrimaryRecipientDocument getRecipientDoc(OperationContext* opCtx, UUID migrationId) { - DBDirectClient client(opCtx); - auto doc = client.findOne(NamespaceString::kMovePrimaryRecipientNamespace, - BSON("_id" << migrationId)); - ASSERT_FALSE(doc.isEmpty()); - return MovePrimaryRecipientDocument::parse( - IDLParserContext("MovePrimaryRecipientServiceTest::getRecipientDoc"), doc); - } - - std::shared_ptr lookupRecipient( - OperationContext* opCtx, repl::PrimaryOnlyService::InstanceID instanceId) { - auto recipientOpt = - MovePrimaryRecipientService::MovePrimaryRecipient::lookup(opCtx, _service, instanceId); - return recipientOpt ? recipientOpt.get() : nullptr; - } - - void waitUntilDocDeleted(OperationContext* opCtx, NamespaceString nss, UUID migrationId) { - DBDirectClient client(opCtx); - int cnt = 1000; - while (cnt--) { - DBDirectClient client(opCtx); - auto recipientDoc = client.findOne(nss, BSON("_id" << migrationId)); - if (recipientDoc.isEmpty()) { - return; - } - - sleepmillis(60); - } - FAIL(str::stream() << "Timed out waiting for delete of doc with migrationId: " - << migrationId); - } - - void _stepUpPOS() { - auto opCtx = operationContext(); - auto replCoord = repl::ReplicationCoordinator::get(getServiceContext()); - WriteUnitOfWork wuow{operationContext()}; - auto newOpTime = repl::getNextOpTime(operationContext()); - wuow.commit(); - ASSERT_OK(replCoord->setFollowerMode(repl::MemberState::RS_PRIMARY)); - replCoord->setMyLastAppliedOpTimeAndWallTime({newOpTime, {}}); - - _registry->onStepUpComplete(opCtx, _term); - } - - OpObserverRegistry* _opObserverRegistry = nullptr; - repl::PrimaryOnlyServiceRegistry* _registry = nullptr; - repl::PrimaryOnlyService* _service = nullptr; - std::shared_ptr _controller; - ServiceContext* _serviceCtx = nullptr; - long long _term = 0; -}; - -TEST_F(MovePrimaryRecipientServiceTest, CanRunToCompletion) { - auto doc = createRecipientDoc(); - auto opCtx = operationContext(); - - auto recipient = MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, doc.toBSON()); - auto future = recipient->onReceiveForgetMigration(); - future.get(opCtx); -} - -TEST_F(MovePrimaryRecipientServiceTest, TransitionsThroughEachStateInRunToCompletion) { - const std::vector recipientStates{ - MovePrimaryRecipientStateEnum::kInitializing, - MovePrimaryRecipientStateEnum::kCloning, - MovePrimaryRecipientStateEnum::kApplying, - MovePrimaryRecipientStateEnum::kBlocking, - MovePrimaryRecipientStateEnum::kPrepared, - MovePrimaryRecipientStateEnum::kDone}; - - const std::vector> stateFPNames{ - {MovePrimaryRecipientStateEnum::kInitializing, - "movePrimaryRecipientPauseAfterInsertingStateDoc"}, - {MovePrimaryRecipientStateEnum::kCloning, "movePrimaryRecipientPauseAfterCloningState"}, - {MovePrimaryRecipientStateEnum::kApplying, "movePrimaryRecipientPauseAfterApplyingState"}, - {MovePrimaryRecipientStateEnum::kBlocking, "movePrimaryRecipientPauseAfterBlockingState"}, - {MovePrimaryRecipientStateEnum::kPrepared, "movePrimaryRecipientPauseAfterPreparedState"}, - {MovePrimaryRecipientStateEnum::kDone, "movePrimaryRecipientPauseBeforeDeletingStateDoc"}}; - - std::map> stateFailPointMap; - - PauseDuringStateTransition guard(_controller.get(), recipientStates); - - for (const auto& stateFPName : stateFPNames) { - auto state = stateFPName.first; - auto fp = globalFailPointRegistry().find(stateFPName.second); - auto cnt = fp->setMode(FailPoint::alwaysOn, 0); - stateFailPointMap[state] = {fp, cnt}; - } - - auto movePrimaryRecipientPauseBeforeCompletion = - globalFailPointRegistry().find("movePrimaryRecipientPauseBeforeCompletion"); - auto timesEnteredPauseBeforeCompletionFailPoint = - movePrimaryRecipientPauseBeforeCompletion->setMode(FailPoint::alwaysOn, 0); - - auto doc = createRecipientDoc(); - auto opCtx = operationContext(); - - auto recipient = MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, doc.toBSON()); - - for (const auto& stateFPName : stateFPNames) { - auto state = stateFPName.first; - guard.wait(state); - guard.unset(state); - auto fp = stateFailPointMap[state].first; - auto cnt = stateFailPointMap[state].second; - fp->waitForTimesEntered(cnt + 1); - fp->setMode(FailPoint::off, 0); - - if (state == MovePrimaryRecipientStateEnum::kPrepared) { - (void)recipient->onReceiveForgetMigration(); - } - } - - movePrimaryRecipientPauseBeforeCompletion->waitForTimesEntered( - timesEnteredPauseBeforeCompletionFailPoint + 1); - - recipient->getCompletionFuture().get(); - movePrimaryRecipientPauseBeforeCompletion->setMode(FailPoint::off, 0); - waitUntilDocDeleted( - opCtx, NamespaceString::kMovePrimaryRecipientNamespace, doc.getMigrationId()); -} - -TEST_F(MovePrimaryRecipientServiceTest, PersistsStateDocument) { - auto doc = createRecipientDoc(); - - auto movePrimaryRecipientPauseAfterInsertingStateDoc = - globalFailPointRegistry().find("movePrimaryRecipientPauseAfterInsertingStateDoc"); - auto timesEnteredFailPoint = - movePrimaryRecipientPauseAfterInsertingStateDoc->setMode(FailPoint::alwaysOn, 0); - - auto opCtx = operationContext(); - auto instance = MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, doc.toBSON()); - - movePrimaryRecipientPauseAfterInsertingStateDoc->waitForTimesEntered(timesEnteredFailPoint + 1); - - ASSERT(instance.get()); - ASSERT_EQ(doc.getMigrationId(), instance->getMigrationId()); - ASSERT(instance->getRecipientDocDurableFuture().isReady()); - - auto persistedDoc = getRecipientDoc(opCtx, doc.getMigrationId()); - ASSERT_BSONOBJ_EQ(persistedDoc.getMetadata().toBSON(), doc.getMetadata().toBSON()); - - movePrimaryRecipientPauseAfterInsertingStateDoc->setMode(FailPoint::off, 0); -} - -TEST_F(MovePrimaryRecipientServiceTest, ThrowsWithConflictingOperation) { - auto doc = createRecipientDoc(); - - auto movePrimaryRecipientPauseAfterInsertingStateDoc = - globalFailPointRegistry().find("movePrimaryRecipientPauseAfterInsertingStateDoc"); - auto timesEnteredFailPoint = - movePrimaryRecipientPauseAfterInsertingStateDoc->setMode(FailPoint::alwaysOn, 0); - - auto opCtx = operationContext(); - auto instance = MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, doc.toBSON()); - - movePrimaryRecipientPauseAfterInsertingStateDoc->waitForTimesEntered(timesEnteredFailPoint + 1); - - auto conflictingDoc = createRecipientDoc(); - - ASSERT_NE(doc.getMigrationId(), conflictingDoc.getMigrationId()); - - // Asserts that a movePrimary op on same database fails with MovePrimaryInProgress - ASSERT_THROWS_CODE(MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, conflictingDoc.toBSON()), - DBException, - ErrorCodes::MovePrimaryInProgress); - - movePrimaryRecipientPauseAfterInsertingStateDoc->setMode(FailPoint::off, 0); -} - - -TEST_F(MovePrimaryRecipientServiceTest, ThrowsWithConflictingOptions) { - auto doc = createRecipientDoc(); - - auto movePrimaryRecipientPauseAfterInsertingStateDoc = - globalFailPointRegistry().find("movePrimaryRecipientPauseAfterInsertingStateDoc"); - auto timesEnteredFailPoint = - movePrimaryRecipientPauseAfterInsertingStateDoc->setMode(FailPoint::alwaysOn, 0); - - auto opCtx = operationContext(); - auto instance = MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, doc.toBSON()); - - movePrimaryRecipientPauseAfterInsertingStateDoc->waitForTimesEntered(timesEnteredFailPoint + 1); - - MovePrimaryCommonMetadata metadata(doc.getMigrationId(), - NamespaceString{"bar"}, - "second/localhost:27018", - "first/localhost:27019"); - MovePrimaryRecipientDocument conflictingDoc; - conflictingDoc.setId(doc.getMigrationId()); - conflictingDoc.setMetadata(metadata); - - // Asserts that a movePrimary op with a different fromShard fails with MovePrimaryInProgress - ASSERT_THROWS_CODE(MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, conflictingDoc.toBSON()), - DBException, - ErrorCodes::MovePrimaryInProgress); - - // Asserts that a movePrimary op with a different databaseName fails with MovePrimaryInProgress - ASSERT_THROWS_CODE(MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, conflictingDoc.toBSON()), - DBException, - ErrorCodes::MovePrimaryInProgress); - - movePrimaryRecipientPauseAfterInsertingStateDoc->setMode(FailPoint::off, 0); -} - -TEST_F(MovePrimaryRecipientServiceTest, CanAbortBeforePersistingStateDoc) { - auto doc = createRecipientDoc(); - - auto movePrimaryRecipientPauseBeforeRunning = - globalFailPointRegistry().find("movePrimaryRecipientPauseBeforeRunning"); - auto timesEnteredPauseBeforeRunningFailPoint = - movePrimaryRecipientPauseBeforeRunning->setMode(FailPoint::alwaysOn, 0); - - auto movePrimaryRecipientPauseBeforeCompletion = - globalFailPointRegistry().find("movePrimaryRecipientPauseBeforeCompletion"); - auto timesEnteredPauseBeforeCompletionFailPoint = - movePrimaryRecipientPauseBeforeCompletion->setMode(FailPoint::alwaysOn, 0); - - auto opCtx = operationContext(); - auto instance = MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, doc.toBSON()); - - movePrimaryRecipientPauseBeforeRunning->waitForTimesEntered( - timesEnteredPauseBeforeRunningFailPoint + 1); - - instance->abort(); - movePrimaryRecipientPauseBeforeRunning->setMode(FailPoint::off, 0); - - movePrimaryRecipientPauseBeforeCompletion->waitForTimesEntered( - timesEnteredPauseBeforeCompletionFailPoint + 1); - - ASSERT(instance->getCompletionFuture().isReady()); - ASSERT(!instance->getCompletionFuture().getNoThrow().isOK()); -} - -TEST_F(MovePrimaryRecipientServiceTest, FulfillsDataClonedFutureAfterCloning) { - auto movePrimaryRecipientPauseAfterCloningState = - globalFailPointRegistry().find("movePrimaryRecipientPauseAfterCloningState"); - auto timesEnteredPauseAfterCloningStateFailPoint = - movePrimaryRecipientPauseAfterCloningState->setMode(FailPoint::alwaysOn, 0); - - auto doc = createRecipientDoc(); - auto opCtx = operationContext(); - - auto recipient = MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, doc.toBSON()); - - movePrimaryRecipientPauseAfterCloningState->waitForTimesEntered( - timesEnteredPauseAfterCloningStateFailPoint + 1); - - auto dataClonedFuture = recipient->getDataClonedFuture(); - ASSERT_TRUE(dataClonedFuture.getNoThrow(opCtx).isOK()); - - movePrimaryRecipientPauseAfterCloningState->setMode(FailPoint::off, 0); - - ASSERT_TRUE(recipient->onReceiveForgetMigration().getNoThrow(opCtx).isOK()); -} - -TEST_F(MovePrimaryRecipientServiceTest, CanAbortInEachAbortableState) { - // Tests that the movePrimary op aborts in the states below when asked to abort. - const std::vector> stateFPNames{ - {MovePrimaryRecipientStateEnum::kInitializing, - "movePrimaryRecipientPauseAfterInsertingStateDoc"}, - {MovePrimaryRecipientStateEnum::kCloning, "movePrimaryRecipientPauseAfterCloningState"}, - {MovePrimaryRecipientStateEnum::kApplying, "movePrimaryRecipientPauseAfterApplyingState"}, - {MovePrimaryRecipientStateEnum::kBlocking, "movePrimaryRecipientPauseAfterBlockingState"}, - {MovePrimaryRecipientStateEnum::kPrepared, "movePrimaryRecipientPauseAfterPreparedState"}}; - - for (const auto& stateFPNamePair : stateFPNames) { - PauseDuringStateTransition stateTransitionsGuard{_controller.get(), - MovePrimaryRecipientStateEnum::kAborted}; - const auto& state = stateFPNamePair.first; - auto fp = globalFailPointRegistry().find(stateFPNamePair.second); - - auto cnt = fp->setMode(FailPoint::alwaysOn, 0); - - auto doc = createRecipientDoc(); - - auto opCtx = operationContext(); - - auto movePrimaryRecipientPauseBeforeCompletion = - globalFailPointRegistry().find("movePrimaryRecipientPauseBeforeCompletion"); - auto timesEnteredPauseBeforeCompletionFailPoint = - movePrimaryRecipientPauseBeforeCompletion->setMode(FailPoint::alwaysOn, 0); - - auto recipient = MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, doc.toBSON()); - - auto instanceId = BSON("_id" << recipient->getMigrationId()); - - ASSERT_FALSE(recipient->getCompletionFuture().isReady()); - - LOGV2(7306904, "Running CanAbortInEachAbortableState", "state"_attr = state); - - fp->waitForTimesEntered(cnt + 1); - - recipient->abort(); - - fp->setMode(FailPoint::off, 0); - - stateTransitionsGuard.wait(MovePrimaryRecipientStateEnum::kAborted); - stateTransitionsGuard.unset(MovePrimaryRecipientStateEnum::kAborted); - - movePrimaryRecipientPauseBeforeCompletion->waitForTimesEntered( - timesEnteredPauseBeforeCompletionFailPoint + 1); - - ASSERT_THROWS_CODE(recipient->getCompletionFuture().get(opCtx), - DBException, - ErrorCodes::MovePrimaryAborted); - movePrimaryRecipientPauseBeforeCompletion->setMode(FailPoint::off, 0); - - recipient.reset(); - - waitUntilDocDeleted( - opCtx, NamespaceString::kMovePrimaryRecipientNamespace, doc.getMigrationId()); - - _service->releaseInstance(instanceId, Status::OK()); - - LOGV2(7306905, "Finished running CanAbortInEachAbortableState", "state"_attr = state); - } -} - -TEST_F(MovePrimaryRecipientServiceTest, StepUpStepDownEachPersistedStateLifecycleFlagEnabled) { - // Tests that the movePrimary op aborts on stepdown-stepup for first four states and goes to - // completion for the rest of the states below. - const std::vector> stateFPNames{ - {MovePrimaryRecipientStateEnum::kInitializing, - "movePrimaryRecipientPauseAfterInsertingStateDoc"}, - {MovePrimaryRecipientStateEnum::kCloning, "movePrimaryRecipientPauseAfterCloningState"}, - {MovePrimaryRecipientStateEnum::kApplying, "movePrimaryRecipientPauseAfterApplyingState"}, - {MovePrimaryRecipientStateEnum::kBlocking, "movePrimaryRecipientPauseAfterBlockingState"}, - {MovePrimaryRecipientStateEnum::kPrepared, "movePrimaryRecipientPauseAfterPreparedState"}, - {MovePrimaryRecipientStateEnum::kDone, "movePrimaryRecipientPauseBeforeDeletingStateDoc"}}; - - for (const auto& stateFPNamePair : stateFPNames) { - PauseDuringStateTransition stateTransitionsGuard{_controller.get(), - MovePrimaryRecipientStateEnum::kAborted}; - const auto& state = stateFPNamePair.first; - auto fp = globalFailPointRegistry().find(stateFPNamePair.second); - - auto doc = createRecipientDoc(); - - auto opCtx = operationContext(); - - auto movePrimaryRecipientPauseBeforeCompletion = - globalFailPointRegistry().find("movePrimaryRecipientPauseBeforeCompletion"); - auto timesEnteredPauseBeforeCompletionFailPoint = - movePrimaryRecipientPauseBeforeCompletion->setMode(FailPoint::alwaysOn, 0); - - auto cnt = fp->setMode(FailPoint::alwaysOn, 0); - - auto recipient = MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, doc.toBSON()); - - auto instanceId = BSON("_id" << recipient->getMigrationId()); - - ASSERT_FALSE(recipient->getCompletionFuture().isReady()); - - LOGV2(7306906, - "Running StepUpStepDownEachPersistedStateLifecycleFlagEnabled", - "stepdown"_attr = state); - - if (state == MovePrimaryRecipientStateEnum::kDone) { - (void)recipient->onReceiveForgetMigration(); - } - - fp->waitForTimesEntered(cnt + 1); - - stepDown(_serviceCtx, _registry); - - fp->setMode(FailPoint::off, 0); - - movePrimaryRecipientPauseBeforeCompletion->waitForTimesEntered( - timesEnteredPauseBeforeCompletionFailPoint + 1); - ASSERT_EQ(recipient->getCompletionFuture().getNoThrow(), ErrorCodes::CallbackCanceled); - movePrimaryRecipientPauseBeforeCompletion->setMode(FailPoint::off, 0); - - stepUp(opCtx, _serviceCtx, _registry, _term); - - recipient = lookupRecipient(opCtx, instanceId); - - if (state < MovePrimaryRecipientStateEnum::kPrepared) { - stateTransitionsGuard.wait(MovePrimaryRecipientStateEnum::kAborted); - stateTransitionsGuard.unset(MovePrimaryRecipientStateEnum::kAborted); - ASSERT_THROWS_CODE(recipient->getCompletionFuture().get(opCtx), - DBException, - ErrorCodes::MovePrimaryAborted); - } else { - stateTransitionsGuard.unset(MovePrimaryRecipientStateEnum::kAborted); - (void)recipient->onReceiveForgetMigration(); - recipient->getCompletionFuture().get(opCtx); - } - - recipient.reset(); - - waitUntilDocDeleted( - opCtx, NamespaceString::kMovePrimaryRecipientNamespace, doc.getMigrationId()); - - _service->releaseInstance(instanceId, Status::OK()); - - LOGV2(7306907, - "Finished running StepUpStepDownEachPersistedStateLifecycleFlagEnabled", - "stepdown"_attr = state); - } -} - -TEST_F(MovePrimaryRecipientServiceTest, CleansUpPersistedMetadataOnCompletion) { - auto doc = createRecipientDoc(); - auto opCtx = operationContext(); - - auto recipient = MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, doc.toBSON()); - auto future = recipient->onReceiveForgetMigration(); - future.get(opCtx); - waitUntilDocDeleted( - opCtx, NamespaceString::kMovePrimaryRecipientNamespace, doc.getMigrationId()); - waitUntilDocDeleted( - opCtx, NamespaceString::kMovePrimaryApplierProgressNamespace, doc.getMigrationId()); - waitUntilDocDeleted(opCtx, - NamespaceString::makeMovePrimaryOplogBufferNSS(doc.getMigrationId()), - doc.getMigrationId()); -} - -TEST_F(MovePrimaryRecipientServiceTest, OnReceiveSyncData) { - auto movePrimaryRecipientPauseAfterPreparedState = - globalFailPointRegistry().find("movePrimaryRecipientPauseAfterPreparedState"); - auto timesEnteredPauseAfterPreparedStateFailPoint = - movePrimaryRecipientPauseAfterPreparedState->setMode(FailPoint::alwaysOn, 0); - - auto doc = createRecipientDoc(); - auto opCtx = operationContext(); - - auto recipient = MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate( - opCtx, _service, doc.toBSON()); - - movePrimaryRecipientPauseAfterPreparedState->waitForTimesEntered( - timesEnteredPauseAfterPreparedStateFailPoint + 1); - - auto preparedFuture = recipient->onReceiveSyncData(Timestamp()); - ASSERT_TRUE(preparedFuture.getNoThrow(opCtx).isOK()); - - movePrimaryRecipientPauseAfterPreparedState->setMode(FailPoint::off, 0); - - ASSERT_TRUE(recipient->onReceiveForgetMigration().getNoThrow(opCtx).isOK()); -} - -TEST_F(MovePrimaryRecipientServiceTest, AbortsOnUnrecoverableClonerError) { - // Step Down to register new POS before Step Up. - stepDown(_serviceCtx, _registry); - - class FailingCloner : public ClonerForTest { - Status copyDb(OperationContext* opCtx, - const std::string& dBName, - const std::string& masterHost, - const std::vector& shardedColls, - std::set* clonedColls) override { - return Status(ErrorCodes::NamespaceExists, "namespace exists"); - } - }; - - class MovePrimaryRecipientServiceWithBadCloner : public MovePrimaryRecipientService { - public: - explicit MovePrimaryRecipientServiceWithBadCloner(ServiceContext* serviceContext) - : MovePrimaryRecipientService(serviceContext){}; - - std::shared_ptr constructInstance( - BSONObj initialStateDoc) override { - auto recipientStateDoc = MovePrimaryRecipientDocument::parse( - IDLParserContext("MovePrimaryRecipientServiceWithBadCloner::constructInstance"), - std::move(initialStateDoc)); - - return std::make_shared( - this, - recipientStateDoc, - std::make_shared(), - _serviceContext, - std::make_unique()); - } - - StringData getServiceName() const override { - return "MovePrimaryRecipientServiceWithBadCloner"_sd; - } - - NamespaceString getStateDocumentsNS() const override { - return NamespaceString::createNamespaceString_forTest( - "config.movePrimaryRecipientsWithBadCloner"); - } - }; - - auto opCtx = operationContext(); - auto service = std::make_unique(_serviceCtx); - auto serviceName = service->getServiceName(); - _registry->registerService(std::move(service)); - auto pos = _registry->lookupServiceByName(serviceName); - pos->startup(opCtx); - stepUp(opCtx, _serviceCtx, _registry, _term); - - auto doc = createRecipientDoc(); - - auto movePrimaryRecipientPauseBeforeCompletion = - globalFailPointRegistry().find("movePrimaryRecipientPauseBeforeCompletion"); - auto timesEnteredPauseBeforeCompletionFailPoint = - movePrimaryRecipientPauseBeforeCompletion->setMode(FailPoint::alwaysOn, 0); - - auto recipient = - MovePrimaryRecipientService::MovePrimaryRecipient::getOrCreate(opCtx, pos, doc.toBSON()); - ASSERT_THROWS_CODE( - recipient->getDataClonedFuture().get(), DBException, ErrorCodes::NamespaceExists); - - movePrimaryRecipientPauseBeforeCompletion->waitForTimesEntered( - timesEnteredPauseBeforeCompletionFailPoint + 1); - - ASSERT_THROWS_CODE( - recipient->getCompletionFuture().get(), DBException, ErrorCodes::MovePrimaryAborted); - movePrimaryRecipientPauseBeforeCompletion->setMode(FailPoint::off, 0); -} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_server_parameters.idl b/src/mongo/db/s/move_primary/move_primary_server_parameters.idl deleted file mode 100644 index 2c1a88220da42..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_server_parameters.idl +++ /dev/null @@ -1,71 +0,0 @@ -# Copyright (C) 2023-present MongoDB, Inc. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the Server Side Public License, version 1, -# as published by MongoDB, Inc. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Server Side Public License for more details. -# -# You should have received a copy of the Server Side Public License -# along with this program. If not, see -# . -# -# As a special exception, the copyright holders give permission to link the -# code of portions of this program with the OpenSSL library under certain -# conditions as described in each individual source file and distribute -# linked combinations including the program with the OpenSSL library. You -# must comply with the Server Side Public License in all respects for -# all of the code used other than as permitted herein. If you modify file(s) -# with this exception, you may extend this exception to your version of the -# file(s), but you are not obligated to do so. If you do not wish to do so, -# delete this exception statement from your version. If you delete this -# exception statement from all source files in the program, then also delete -# it in the license file. -# - -global: - cpp_namespace: "mongo" - -imports: - - "mongo/db/basic_types.idl" - -server_parameters: - movePrimaryDonorServiceMinThreadCount: - description: The min number of threads in the movePrimary donor's thread pool. - set_at: startup - cpp_vartype: int - cpp_varname: gMovePrimaryDonorServiceMinThreadCount - default: 0 - validator: - gte: 0 - lte: 256 - movePrimaryDonorServiceMaxThreadCount: - description: The max number of threads in the movePrimary donor's thread pool. - set_at: startup - cpp_vartype: int - cpp_varname: gMovePrimaryDonorServiceMaxThreadCount - default: 8 - validator: - gte: 1 - lte: 256 - movePrimaryRecipientServiceMaxThreadCount: - description: The max number of threads in the movePrimary recipient's thread pool. - set_at: startup - cpp_vartype: int - cpp_varname: gMovePrimaryRecipientServiceMaxThreadCount - default: 8 - validator: - gte: 1 - lte: 256 - movePrimaryClonerMetadataCollMaxBatchSizeBytes: - description: The max number of bytes of BSON documents containing collection names that can be batched together for an insert. - set_at: [startup, runtime] - cpp_vartype: AtomicWord - cpp_varname: gMovePrimaryClonerMetadataCollMaxBatchSizeBytes - default: - expr: 100 * 1024 - validator: - gte: 1 diff --git a/src/mongo/db/s/move_primary/move_primary_shared_data.cpp b/src/mongo/db/s/move_primary/move_primary_shared_data.cpp deleted file mode 100644 index 248f9a98116c3..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_shared_data.cpp +++ /dev/null @@ -1,41 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/move_primary/move_primary_shared_data.h" - -namespace mongo { -void MovePrimarySharedData::setLastVisibleOpTime(WithLock, repl::OpTime opTime) { - _lastVisibleOpTime = opTime; -} - -repl::OpTime MovePrimarySharedData::getLastVisibleOpTime(WithLock) { - return _lastVisibleOpTime; -} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_shared_data.h b/src/mongo/db/s/move_primary/move_primary_shared_data.h deleted file mode 100644 index 26186804df240..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_shared_data.h +++ /dev/null @@ -1,73 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/cursor_id.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/repl/optime.h" -#include "mongo/db/repl/repl_sync_shared_data.h" - -namespace mongo { - -enum class ResumePhase { kNone, kDataSync, kOplogCatchup }; - -class MovePrimarySharedData final : public repl::ReplSyncSharedData { -public: - MovePrimarySharedData(ClockSource* clock, const UUID& migrationId) - : ReplSyncSharedData(clock), _migrationId(migrationId), _resumePhase(ResumePhase::kNone) {} - MovePrimarySharedData(ClockSource* clock, const UUID& migrationId, ResumePhase resumePhase) - : ReplSyncSharedData(clock), _migrationId(migrationId), _resumePhase(resumePhase) {} - - void setLastVisibleOpTime(WithLock, repl::OpTime opTime); - - repl::OpTime getLastVisibleOpTime(WithLock); - - const mongo::UUID& getMigrationId() const { - return _migrationId; - } - - ResumePhase getResumePhase() const { - return _resumePhase; - } - -private: - // Must hold mutex (in base class) to access this. - // Represents last visible majority committed donor opTime. - repl::OpTime _lastVisibleOpTime; - - // Id of the current move primary migration. - const UUID _migrationId; - - // Indicate the phase from which the online move primary migration is resuming due to - // recipient/donor failovers. - const ResumePhase _resumePhase; -}; - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary/move_primary_state_machine.idl b/src/mongo/db/s/move_primary/move_primary_state_machine.idl deleted file mode 100644 index 70e34c3274897..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_state_machine.idl +++ /dev/null @@ -1,164 +0,0 @@ -# Copyright (C) 2023-present MongoDB, Inc. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the Server Side Public License, version 1, -# as published by MongoDB, Inc. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Server Side Public License for more details. -# -# You should have received a copy of the Server Side Public License -# along with this program. If not, see -# . -# -# As a special exception, the copyright holders give permission to link the -# code of portions of this program with the OpenSSL library under certain -# conditions as described in each individual source file and distribute -# linked combinations including the program with the OpenSSL library. You -# must comply with the Server Side Public License in all respects for -# all of the code used other than as permitted herein. If you modify file(s) -# with this exception, you may extend this exception to your version of the -# file(s), but you are not obligated to do so. If you do not wish to do so, -# delete this exception statement from your version. If you delete this -# exception statement from all source files in the program, then also delete -# it in the license file. -# - -global: - cpp_namespace: "mongo" - cpp_includes: - - "mongo/db/feature_compatibility_version_parser.h" - -imports: - - "mongo/db/repl/replication_types.idl" - - "mongo/db/basic_types.idl" - - "mongo/db/s/move_primary/move_primary_recipient_cmds.idl" - - "mongo/db/s/move_primary/move_primary_common_metadata.idl" - -enums: - MovePrimaryDonorState: - description: "The state of a movePrimary operation on the donor side." - type: string - values: - kUnused: "unused" - kInitializing: "initializing" - kCloning: "cloning" - kWaitingToBlockWrites: "waitingToBlockWrites" - kBlockingWrites: "blockingWrites" - kPrepared: "prepared" - kAborted: "aborted" - kDone: "done" - - MovePrimaryRecipientState: - description: "The state of a movePrimary operation on the recipient side." - type: string - values: - kUnused: "unused" - kInitializing: "initializing" - kCloning: "cloning" - kApplying: "applying" - kBlocking: "blocking" - kPrepared: "prepared" - kAborted: "aborted" - kDone: "done" - -structs: - MovePrimaryDonorMutableFields: - description: >- - Contains fields for a move primary donor which can be updated throughout the lifetime - of the operation. - strict: false - fields: - state: - type: MovePrimaryDonorState - description: "The state of an in-progress movePrimary operation on the donor." - default: kUnused - blockingWritesTimestamp: - type: timestamp - description: >- - A timestamp after writes began being blocked on the donor node for the database - being moved. - optional: true - abortReason: - type: object_owned - description: "The error that caused the migration to abort." - optional: true - - MovePrimaryDonorDocument: - description: "Represents an in-progress move primary operation on the donor." - strict: false - inline_chained_structs: true - chained_structs: - MovePrimaryCommonMetadata: metadata - fields: - _id: - type: uuid - description: >- - Unique identifier for the movePrimary operation. - cpp_name: id - mutableFields: - type: MovePrimaryDonorMutableFields - default: true - - MovePrimaryRecipientDocument: - description: "Represents an in-progress movePrimary operation on the recipient." - strict: false - inline_chained_structs: true - chained_structs: - MovePrimaryCommonMetadata: metadata - fields: - _id: - type: uuid - description: >- - Unique identifier for the movePrimary operation. - This is sent as migrationId to the recipient by the donor. - cpp_name: id - state: - type: MovePrimaryRecipientState - description: "The state of the recipient during an in progress movePrimary operation." - default: kUnused - startApplyingDonorOpTime: - description: >- - Populated during data sync; the donor's operation time when the data - cloning starts. - type: optime - optional: true - startFetchingDonorOpTime: - description: >- - Populated during data sync; the donor's operation time of the last open - transaction when the data cloning started. - type: optime - optional: true - dataConsistentStopDonorOpTime: - description: >- - Populated during data sync; the donor's operation time when the data - cloning finishes. - type: optime - optional: true - cloneFinishedRecipientOpTime: - description: >- - Populated during data sync; the recipient operation time when the data - cloning finishes. - type: optime - optional: true - completedUpdatingTransactionsBeforeStartOpTime: - description: >- - Indicates if the recipient has finished updating transaction entries that were - committed before 'startFetchingDonorOpTime'. If true, the recipient can skip - the fetching transactions stage. - type: bool - default: false - completedFetchingRetryableWritesBeforeStartOpTime: - description: >- - Indicates if the recipient has finished fetching retryable writes oplog entries - before 'startFetchingDonorOpTime' for each retryable writes entry in - 'config.transactions' - type: bool - default: false - startAt: - type: date - description: >- - The wall-clock time at which the state machine document is initialized. - optional: true diff --git a/src/mongo/db/s/move_primary/move_primary_util.h b/src/mongo/db/s/move_primary/move_primary_util.h deleted file mode 100644 index 5966bfeeea3fb..0000000000000 --- a/src/mongo/db/s/move_primary/move_primary_util.h +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/base/status.h" -#include "mongo/bson/timestamp.h" -#include "mongo/db/operation_context.h" -#include "mongo/util/future.h" - -namespace mongo { - -namespace move_primary_util { - -inline Status validateTimestampNotNull(const Timestamp& ts) { - return (!ts.isNull()) - ? Status::OK() - : Status(ErrorCodes::BadValue, str::stream() << "Timestamp can't be null"); -} - -template -void ensureFulfilledPromise(WithLock lk, SharedPromise& sp, Status status) { - auto future = sp.getFuture(); - if (!future.isReady()) { - sp.setFrom(status); - } -} - -} // namespace move_primary_util - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary_coordinator.cpp b/src/mongo/db/s/move_primary_coordinator.cpp index 929e34809da2c..7341d3d4f7bfc 100644 --- a/src/mongo/db/s/move_primary_coordinator.cpp +++ b/src/mongo/db/s/move_primary_coordinator.cpp @@ -30,36 +30,68 @@ #include "mongo/db/s/move_primary_coordinator.h" #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/client/connpool.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/commands.h" #include "mongo/db/commands/list_collections_filter.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/dbdirectclient.h" #include "mongo/db/repl/change_stream_oplog_notification.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/sharding_ddl_util.h" #include "mongo/db/s/sharding_logging.h" #include "mongo/db/s/sharding_recovery_service.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/db/vector_clock_mutable.h" #include "mongo/db/write_block_bypass.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" -#include "mongo/s/move_primary/move_primary_feature_flag_gen.h" #include "mongo/s/request_types/move_primary_gen.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding namespace mongo { -namespace { - -bool useOnlineCloner() { - return move_primary::gFeatureFlagOnlineMovePrimaryLifecycle.isEnabled( - serverGlobalParams.featureCompatibility); -} - -} // namespace - MONGO_FAIL_POINT_DEFINE(hangBeforeCloningData); MovePrimaryCoordinator::MovePrimaryCoordinator(ShardingDDLCoordinatorService* service, @@ -69,7 +101,7 @@ MovePrimaryCoordinator::MovePrimaryCoordinator(ShardingDDLCoordinatorService* se _csReason([&] { BSONObjBuilder builder; builder.append("command", "movePrimary"); - builder.append("db", _dbName.toString()); + builder.append("db", DatabaseNameUtil::serialize(_dbName)); builder.append("to", _doc.getToShardId()); return builder.obj(); }()) {} @@ -110,60 +142,54 @@ void MovePrimaryCoordinator::checkIfOptionsConflict(const BSONObj& doc) const { ExecutorFuture MovePrimaryCoordinator::_runImpl( std::shared_ptr executor, const CancellationToken& token) noexcept { - return ExecutorFuture(**executor) - .then([this, executor, token, anchor = shared_from_this()] { - const auto opCtxHolder = cc().makeOperationContext(); - auto* opCtx = opCtxHolder.get(); - getForwardableOpMetadata().setOn(opCtx); - - const auto& toShardId = _doc.getToShardId(); + return ExecutorFuture(**executor).then([this, executor, anchor = shared_from_this()] { + const auto opCtxHolder = cc().makeOperationContext(); + auto* opCtx = opCtxHolder.get(); + getForwardableOpMetadata().setOn(opCtx); - if (toShardId == ShardingState::get(opCtx)->shardId()) { - LOGV2(7120200, - "Database already on requested primary shard", - logAttrs(_dbName), - "to"_attr = toShardId); - - return ExecutorFuture(**executor); - } + const auto& toShardId = _doc.getToShardId(); - const auto toShardEntry = [&] { - const auto config = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - const auto findResponse = uassertStatusOK(config->exhaustiveFindOnConfig( - opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - repl::ReadConcernLevel::kMajorityReadConcern, - NamespaceString::kConfigsvrShardsNamespace, - BSON(ShardType::name() << toShardId), - BSONObj() /* No sorting */, - 1 /* Limit */)); + if (toShardId == ShardingState::get(opCtx)->shardId()) { + LOGV2(7120200, + "Database already on requested primary shard", + logAttrs(_dbName), + "to"_attr = toShardId); - uassert(ErrorCodes::ShardNotFound, - "Requested primary shard {} does not exist"_format(toShardId.toString()), - !findResponse.docs.empty()); + return ExecutorFuture(**executor); + } - return uassertStatusOK(ShardType::fromBSON(findResponse.docs.front())); - }(); + const auto toShardEntry = [&] { + const auto config = Grid::get(opCtx)->shardRegistry()->getConfigShard(); + const auto findResponse = uassertStatusOK( + config->exhaustiveFindOnConfig(opCtx, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + repl::ReadConcernLevel::kMajorityReadConcern, + NamespaceString::kConfigsvrShardsNamespace, + BSON(ShardType::name() << toShardId), + BSONObj() /* No sorting */, + 1 /* Limit */)); uassert(ErrorCodes::ShardNotFound, - "Requested primary shard {} is draining"_format(toShardId.toString()), - !toShardEntry.getDraining()); + "Requested primary shard {} does not exist"_format(toShardId.toString()), + !findResponse.docs.empty()); - if (useOnlineCloner() && !_firstExecution) { - recoverOnlineCloner(opCtx); - } + return uassertStatusOK(ShardType::fromBSON(findResponse.docs.front())); + }(); - return runMovePrimaryWorkflow(executor, token); - }); + uassert(ErrorCodes::ShardNotFound, + "Requested primary shard {} is draining"_format(toShardId.toString()), + !toShardEntry.getDraining()); + + return runMovePrimaryWorkflow(executor); + }); } ExecutorFuture MovePrimaryCoordinator::runMovePrimaryWorkflow( - std::shared_ptr executor, - const CancellationToken& token) noexcept { + std::shared_ptr executor) noexcept { return ExecutorFuture(**executor) .then(_buildPhaseHandler( Phase::kClone, - [this, token, anchor = shared_from_this()] { + [this, anchor = shared_from_this()] { const auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); @@ -179,7 +205,7 @@ ExecutorFuture MovePrimaryCoordinator::runMovePrimaryWorkflow( uasserted( 7120202, "movePrimary operation on database {} failed cloning data to recipient {}"_format( - _dbName.toString(), toShardId.toString())); + _dbName.toStringForErrorMsg(), toShardId.toString())); } LOGV2(7120201, @@ -202,30 +228,19 @@ ExecutorFuture MovePrimaryCoordinator::runMovePrimaryWorkflow( hangBeforeCloningData.pauseWhileSet(opCtx); } - if (useOnlineCloner()) { - if (!_onlineCloner) { - createOnlineCloner(opCtx); - } - cloneDataUntilReadyForCatchup(opCtx, token); - } else { - cloneDataLegacy(opCtx); - } + cloneData(opCtx); // TODO (SERVER-71566): Temporary solution to cover the case of stepping down before // actually entering the `kCatchup` phase. blockWrites(opCtx); })) .then(_buildPhaseHandler(Phase::kCatchup, - [this, token, anchor = shared_from_this()] { + [this, anchor = shared_from_this()] { const auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); blockWrites(opCtx); - if (useOnlineCloner()) { - informOnlineClonerOfBlockingWrites(opCtx); - waitUntilOnlineClonerPrepared(token); - } })) .then(_buildPhaseHandler(Phase::kEnterCriticalSection, [this, executor, anchor = shared_from_this()] { @@ -233,20 +248,17 @@ ExecutorFuture MovePrimaryCoordinator::runMovePrimaryWorkflow( auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); - _updateSession(opCtx); if (!_firstExecution) { // Perform a noop write on the recipient in order to // advance the txnNumber for this coordinator's logical // session. This prevents requests with older txnNumbers // from being processed. _performNoopRetryableWriteOnAllShardsAndConfigsvr( - opCtx, getCurrentSession(), **executor); + opCtx, getNewSession(opCtx), **executor); } blockReads(opCtx); - if (!useOnlineCloner()) { - enterCriticalSectionOnRecipient(opCtx); - } + enterCriticalSectionOnRecipient(opCtx); })) .then(_buildPhaseHandler( Phase::kCommit, @@ -268,8 +280,6 @@ ExecutorFuture MovePrimaryCoordinator::runMovePrimaryWorkflow( // shutdown. VectorClockMutable::get(opCtx)->waitForDurableConfigTime().get(opCtx); - clearDbMetadataOnPrimary(opCtx); - logChange(opCtx, "commit"); })) .then(_buildPhaseHandler(Phase::kClean, @@ -281,27 +291,22 @@ ExecutorFuture MovePrimaryCoordinator::runMovePrimaryWorkflow( dropStaleDataOnDonor(opCtx); })) .then(_buildPhaseHandler(Phase::kExitCriticalSection, - [this, executor, token, anchor = shared_from_this()] { + [this, executor, anchor = shared_from_this()] { const auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); - _updateSession(opCtx); if (!_firstExecution) { // Perform a noop write on the recipient in order to // advance the txnNumber for this coordinator's logical // session. This prevents requests with older txnNumbers // from being processed. _performNoopRetryableWriteOnAllShardsAndConfigsvr( - opCtx, getCurrentSession(), **executor); + opCtx, getNewSession(opCtx), **executor); } unblockReadsAndWrites(opCtx); - if (useOnlineCloner()) { - cleanupOnlineCloner(opCtx, token); - } else { - exitCriticalSectionOnRecipient(opCtx); - } + exitCriticalSectionOnRecipient(opCtx); LOGV2(7120206, "Completed movePrimary operation", @@ -316,8 +321,7 @@ ExecutorFuture MovePrimaryCoordinator::runMovePrimaryWorkflow( getForwardableOpMetadata().setOn(opCtx); const auto& failedPhase = _doc.getPhase(); - if (_onlineCloner || failedPhase == Phase::kClone || - status == ErrorCodes::ShardNotFound) { + if (failedPhase == Phase::kClone || status == ErrorCodes::ShardNotFound) { LOGV2_DEBUG(7392900, 1, "Triggering movePrimary cleanup", @@ -331,43 +335,7 @@ ExecutorFuture MovePrimaryCoordinator::runMovePrimaryWorkflow( }); } -bool MovePrimaryCoordinator::onlineClonerPossiblyNeverCreated() const { - // Either the first run of this service, or failed over before online cloner persisted its - // state document. - auto phase = _doc.getPhase(); - return phase <= Phase::kClone; -} - -bool MovePrimaryCoordinator::onlineClonerPossiblyCleanedUp() const { - // Could have failed over between the online cloner deleting its state document and the - // coordinator deleting its state document. - auto phase = _doc.getPhase(); - return phase == Phase::kExitCriticalSection || getAbortReason(); -} - -bool MovePrimaryCoordinator::onlineClonerAllowedToBeMissing() const { - return onlineClonerPossiblyNeverCreated() || onlineClonerPossiblyCleanedUp(); -} - -void MovePrimaryCoordinator::recoverOnlineCloner(OperationContext* opCtx) { - _onlineCloner = MovePrimaryDonor::get(opCtx, _dbName, _doc.getToShardId()); - if (_onlineCloner) { - return; - } - invariant(onlineClonerAllowedToBeMissing()); -} - -void MovePrimaryCoordinator::createOnlineCloner(OperationContext* opCtx) { - invariant(onlineClonerPossiblyNeverCreated()); - _onlineCloner = MovePrimaryDonor::create(opCtx, _dbName, _doc.getToShardId()); -} - -void MovePrimaryCoordinator::cloneDataUntilReadyForCatchup(OperationContext* opCtx, - const CancellationToken& token) { - future_util::withCancellation(_onlineCloner->getReadyToBlockWritesFuture(), token).get(); -} - -void MovePrimaryCoordinator::cloneDataLegacy(OperationContext* opCtx) { +void MovePrimaryCoordinator::cloneData(OperationContext* opCtx) { const auto& collectionsToClone = getUnshardedCollections(opCtx); assertNoOrphanedDataOnRecipient(opCtx, collectionsToClone); @@ -378,35 +346,48 @@ void MovePrimaryCoordinator::cloneDataLegacy(OperationContext* opCtx) { assertClonedData(clonedCollections); } -void MovePrimaryCoordinator::informOnlineClonerOfBlockingWrites(OperationContext* opCtx) { - auto& replClient = repl::ReplClientInfo::forClient(opCtx->getClient()); - replClient.setLastOpToSystemLastOpTime(opCtx); - const auto latestOpTime = replClient.getLastOp(); - _onlineCloner->onBeganBlockingWrites(latestOpTime.getTimestamp()); -} - -void MovePrimaryCoordinator::waitUntilOnlineClonerPrepared(const CancellationToken& token) { - future_util::withCancellation(_onlineCloner->getDecisionFuture(), token).get(); -} - ExecutorFuture MovePrimaryCoordinator::_cleanupOnAbort( std::shared_ptr executor, const CancellationToken& token, const Status& status) noexcept { return ExecutorFuture(**executor) - .then([this, executor, token, status, anchor = shared_from_this()] { + .then([this, executor, status, anchor = shared_from_this()] { const auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); - _updateSession(opCtx); _performNoopRetryableWriteOnAllShardsAndConfigsvr( - opCtx, getCurrentSession(), **executor); + opCtx, getNewSession(opCtx), **executor); - if (useOnlineCloner()) { - cleanupOnAbortWithOnlineCloner(opCtx, token, status); - } else { - cleanupOnAbortWithoutOnlineCloner(opCtx, executor); + const auto& failedPhase = _doc.getPhase(); + const auto& toShardId = _doc.getToShardId(); + + if (failedPhase <= Phase::kCommit) { + // A non-retryable error occurred before the new primary shard was actually + // committed, so any cloned data on the recipient must be dropped. + + try { + // Even if the error is `ShardNotFound`, the recipient may still be in draining + // mode, so try to drop any orphaned data anyway. + dropOrphanedDataOnRecipient(opCtx, executor); + } catch (const ExceptionFor&) { + LOGV2_INFO(7392901, + "Failed to remove orphaned data on recipient as it has been removed", + logAttrs(_dbName), + "to"_attr = toShardId); + } + } + + unblockReadsAndWrites(opCtx); + try { + // Even if the error is `ShardNotFound`, the recipient may still be in draining + // mode, so try to exit the critical section anyway. + exitCriticalSectionOnRecipient(opCtx); + } catch (const ExceptionFor&) { + LOGV2_INFO(7392902, + "Failed to exit critical section on recipient as it has been removed", + logAttrs(_dbName), + "to"_attr = toShardId); } LOGV2_ERROR(7392903, @@ -420,60 +401,6 @@ ExecutorFuture MovePrimaryCoordinator::_cleanupOnAbort( }); } -void MovePrimaryCoordinator::cleanupOnAbortWithoutOnlineCloner( - OperationContext* opCtx, std::shared_ptr executor) { - const auto& failedPhase = _doc.getPhase(); - const auto& toShardId = _doc.getToShardId(); - - if (failedPhase <= Phase::kCommit) { - // A non-retryable error occurred before the new primary shard was actually - // committed, so any cloned data on the recipient must be dropped. - - try { - // Even if the error is `ShardNotFound`, the recipient may still be in draining - // mode, so try to drop any orphaned data anyway. - dropOrphanedDataOnRecipient(opCtx, executor); - } catch (const ExceptionFor&) { - LOGV2_INFO(7392901, - "Failed to remove orphaned data on recipient as it has been removed", - logAttrs(_dbName), - "to"_attr = toShardId); - } - } - - unblockReadsAndWrites(opCtx); - try { - // Even if the error is `ShardNotFound`, the recipient may still be in draining - // mode, so try to exit the critical section anyway. - exitCriticalSectionOnRecipient(opCtx); - } catch (const ExceptionFor&) { - LOGV2_INFO(7392902, - "Failed to exit critical section on recipient as it has been removed", - logAttrs(_dbName), - "to"_attr = toShardId); - } -} - -void MovePrimaryCoordinator::cleanupOnlineCloner(OperationContext* opCtx, - const CancellationToken& token) { - if (!_onlineCloner) { - return; - } - _onlineCloner->onReadyToForget(); - future_util::withCancellation(_onlineCloner->getCompletionFuture(), token).wait(); -} - -void MovePrimaryCoordinator::cleanupOnAbortWithOnlineCloner(OperationContext* opCtx, - const CancellationToken& token, - const Status& status) { - unblockReadsAndWrites(opCtx); - if (!_onlineCloner) { - return; - } - _onlineCloner->abort(status); - cleanupOnlineCloner(opCtx, token); -} - void MovePrimaryCoordinator::logChange(OperationContext* opCtx, const std::string& what, const Status& status) const { @@ -484,7 +411,7 @@ void MovePrimaryCoordinator::logChange(OperationContext* opCtx, details.append("error", status.toString()); } ShardingLogging::get(opCtx)->logChange( - opCtx, "movePrimary.{}"_format(what), _dbName.toString(), details.obj()); + opCtx, "movePrimary.{}"_format(what), DatabaseNameUtil::serialize(_dbName), details.obj()); } std::vector MovePrimaryCoordinator::getUnshardedCollections( @@ -513,7 +440,9 @@ std::vector MovePrimaryCoordinator::getUnshardedCollections( const auto shardedCollections = [&] { auto colls = Grid::get(opCtx)->catalogClient()->getAllShardedCollectionsForDb( - opCtx, _dbName.toString(), repl::ReadConcernLevel::kMajorityReadConcern); + opCtx, + DatabaseNameUtil::serialize(_dbName), + repl::ReadConcernLevel::kMajorityReadConcern); std::sort(colls.begin(), colls.end()); return colls; @@ -547,7 +476,7 @@ void MovePrimaryCoordinator::assertNoOrphanedDataOnRecipient( const auto listResponse = uassertStatusOK( toShard->runExhaustiveCursorCommand(opCtx, ReadPreferenceSetting(ReadPreference::PrimaryOnly), - _dbName.toString(), + DatabaseNameUtil::serialize(_dbName), listCommand, Milliseconds(-1))); @@ -564,7 +493,7 @@ void MovePrimaryCoordinator::assertNoOrphanedDataOnRecipient( for (const auto& nss : collectionsToClone) { uassert(ErrorCodes::NamespaceExists, - "Found orphaned collection {} on recipient {}"_format(nss.toString(), + "Found orphaned collection {} on recipient {}"_format(nss.toStringForErrorMsg(), toShardId.toString()), !std::binary_search(allCollections.cbegin(), allCollections.cend(), nss)); }; @@ -584,7 +513,7 @@ std::vector MovePrimaryCoordinator::cloneDataToRecipient( const auto cloneCommand = [&] { BSONObjBuilder commandBuilder; - commandBuilder.append("_shardsvrCloneCatalogData", _dbName.toString()); + commandBuilder.append("_shardsvrCloneCatalogData", DatabaseNameUtil::serialize(_dbName)); commandBuilder.append("from", fromShard->getConnString().toString()); return CommandHelpers::appendMajorityWriteConcern(commandBuilder.obj()); }(); @@ -592,16 +521,16 @@ std::vector MovePrimaryCoordinator::cloneDataToRecipient( const auto cloneResponse = toShard->runCommand(opCtx, ReadPreferenceSetting(ReadPreference::PrimaryOnly), - DatabaseName::kAdmin.db(), + DatabaseName::kAdmin.db().toString(), cloneCommand, Shard::RetryPolicy::kNoRetry); uassertStatusOKWithContext( Shard::CommandResponse::getEffectiveStatus(cloneResponse), "movePrimary operation on database {} failed to clone data to recipient {}"_format( - _dbName.toString(), toShardId.toString())); + _dbName.toStringForErrorMsg(), toShardId.toString())); - const auto clonedCollections = [&] { + auto clonedCollections = [&] { std::vector colls; for (const auto& bsonElem : cloneResponse.getValue().response["clonedColls"].Obj()) { if (bsonElem.type() == String) { @@ -641,33 +570,33 @@ void MovePrimaryCoordinator::commitMetadataToConfig( const auto commitResponse = config->runCommandWithFixedRetryAttempts(opCtx, ReadPreferenceSetting(ReadPreference::PrimaryOnly), - DatabaseName::kAdmin.db(), + DatabaseName::kAdmin.db().toString(), commitCommand, Shard::RetryPolicy::kIdempotent); uassertStatusOKWithContext( Shard::CommandResponse::getEffectiveStatus(commitResponse), "movePrimary operation on database {} failed to commit metadata changes"_format( - _dbName.toString())); + _dbName.toStringForErrorMsg())); } void MovePrimaryCoordinator::assertChangedMetadataOnConfig( OperationContext* opCtx, const DatabaseVersion& preCommitDbVersion) const { const auto postCommitDbType = [&]() { const auto config = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - auto findResponse = uassertStatusOK( - config->exhaustiveFindOnConfig(opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - repl::ReadConcernLevel::kMajorityReadConcern, - NamespaceString::kConfigDatabasesNamespace, - BSON(DatabaseType::kNameFieldName << _dbName.toString()), - BSONObj(), - 1)); + auto findResponse = uassertStatusOK(config->exhaustiveFindOnConfig( + opCtx, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + repl::ReadConcernLevel::kMajorityReadConcern, + NamespaceString::kConfigDatabasesNamespace, + BSON(DatabaseType::kNameFieldName << DatabaseNameUtil::serialize(_dbName)), + BSONObj(), + 1)); const auto databases = std::move(findResponse.docs); uassert(ErrorCodes::IncompatibleShardingMetadata, "Tried to find version for database {}, but found no databases"_format( - _dbName.toString()), + _dbName.toStringForErrorMsg()), !databases.empty()); return DatabaseType::parse(IDLParserContext("DatabaseType"), databases.front()); @@ -691,14 +620,8 @@ void MovePrimaryCoordinator::dropStaleDataOnDonor(OperationContext* opCtx) const WriteBlockBypass::get(opCtx).set(true); DBDirectClient dbClient(opCtx); - auto unshardedCollections = [this, opCtx] { - if (useOnlineCloner()) { - return getUnshardedCollections(opCtx); - } - invariant(_doc.getCollectionsToClone()); - return *_doc.getCollectionsToClone(); - }(); - for (const auto& nss : unshardedCollections) { + invariant(_doc.getCollectionsToClone()); + for (const auto& nss : *_doc.getCollectionsToClone()) { const auto dropStatus = [&] { BSONObj dropResult; dbClient.runCommand(_dbName, BSON("drop" << nss.coll()), dropResult); @@ -722,15 +645,14 @@ void MovePrimaryCoordinator::dropOrphanedDataOnRecipient( return; } - // Make a copy of this container since `_updateSession` changes the coordinator document. + // Make a copy of this container since `getNewSession` changes the coordinator document. const auto collectionsToClone = *_doc.getCollectionsToClone(); for (const auto& nss : collectionsToClone) { - _updateSession(opCtx); sharding_ddl_util::sendDropCollectionParticipantCommandToShards(opCtx, nss, {_doc.getToShardId()}, **executor, - getCurrentSession(), + getNewSession(opCtx), false /* fromMigrate */); } } @@ -758,18 +680,26 @@ void MovePrimaryCoordinator::blockReads(OperationContext* opCtx) const { } void MovePrimaryCoordinator::unblockReadsAndWrites(OperationContext* opCtx) const { + // The release of the critical section will clear db metadata on secondaries + clearDbMetadataOnPrimary(opCtx); + // In case of step-down, this operation could be re-executed and trigger the invariant in case + // the new primary runs a DDL that acquires the critical section in the old primary shard ShardingRecoveryService::get(opCtx)->releaseRecoverableCriticalSection( - opCtx, NamespaceString(_dbName), _csReason, ShardingCatalogClient::kLocalWriteConcern); + opCtx, + NamespaceString(_dbName), + _csReason, + ShardingCatalogClient::kLocalWriteConcern, + false /*throwIfReasonDiffers*/); } -void MovePrimaryCoordinator::enterCriticalSectionOnRecipient(OperationContext* opCtx) const { +void MovePrimaryCoordinator::enterCriticalSectionOnRecipient(OperationContext* opCtx) { const auto enterCriticalSectionCommand = [&] { ShardsvrMovePrimaryEnterCriticalSection request(_dbName); request.setDbName(DatabaseName::kAdmin); request.setReason(_csReason); auto command = CommandHelpers::appendMajorityWriteConcern(request.toBSON({})); - return command.addFields(getCurrentSession().toBSON()); + return command.addFields(getNewSession(opCtx).toBSON()); }(); const auto& toShardId = _doc.getToShardId(); @@ -789,17 +719,17 @@ void MovePrimaryCoordinator::enterCriticalSectionOnRecipient(OperationContext* o uassertStatusOKWithContext( Shard::CommandResponse::getEffectiveStatus(enterCriticalSectionResponse), "movePrimary operation on database {} failed to block read/write operations on recipient {}"_format( - _dbName.toString(), toShardId.toString())); + _dbName.toStringForErrorMsg(), toShardId.toString())); } -void MovePrimaryCoordinator::exitCriticalSectionOnRecipient(OperationContext* opCtx) const { +void MovePrimaryCoordinator::exitCriticalSectionOnRecipient(OperationContext* opCtx) { const auto exitCriticalSectionCommand = [&] { ShardsvrMovePrimaryExitCriticalSection request(_dbName); request.setDbName(DatabaseName::kAdmin); request.setReason(_csReason); auto command = CommandHelpers::appendMajorityWriteConcern(request.toBSON({})); - return command.addFields(getCurrentSession().toBSON()); + return command.addFields(getNewSession(opCtx).toBSON()); }(); const auto& toShardId = _doc.getToShardId(); @@ -819,7 +749,7 @@ void MovePrimaryCoordinator::exitCriticalSectionOnRecipient(OperationContext* op uassertStatusOKWithContext( Shard::CommandResponse::getEffectiveStatus(exitCriticalSectionResponse), "movePrimary operation on database {} failed to unblock read/write operations on recipient {}"_format( - _dbName.toString(), toShardId.toString())); + _dbName.toStringForErrorMsg(), toShardId.toString())); } } // namespace mongo diff --git a/src/mongo/db/s/move_primary_coordinator.h b/src/mongo/db/s/move_primary_coordinator.h index 22a03c2a460a1..d9fe8ad6d218c 100644 --- a/src/mongo/db/s/move_primary_coordinator.h +++ b/src/mongo/db/s/move_primary_coordinator.h @@ -29,10 +29,30 @@ #pragma once -#include "mongo/db/s/move_primary/move_primary_donor_service.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" #include "mongo/db/s/move_primary_coordinator_document_gen.h" #include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/executor/scoped_task_executor.h" #include "mongo/s/client/shard.h" +#include "mongo/s/database_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" namespace mongo { @@ -60,27 +80,12 @@ class MovePrimaryCoordinator final const Status& status) noexcept override; ExecutorFuture runMovePrimaryWorkflow( - std::shared_ptr executor, - const CancellationToken& token) noexcept; - - bool onlineClonerPossiblyNeverCreated() const; - bool onlineClonerPossiblyCleanedUp() const; - bool onlineClonerAllowedToBeMissing() const; - void recoverOnlineCloner(OperationContext* opCtx); - void createOnlineCloner(OperationContext* opCtx); - - /** - * Clone data to the recipient without using the online cloning machinery. - */ - void cloneDataLegacy(OperationContext* opCtx); + std::shared_ptr executor) noexcept; /** - * Clone data to the recipient using the online cloning machinery. + * Clone data to the recipient shard. */ - void cloneDataUntilReadyForCatchup(OperationContext* opCtx, const CancellationToken& token); - - void informOnlineClonerOfBlockingWrites(OperationContext* opCtx); - void waitUntilOnlineClonerPrepared(const CancellationToken& token); + void cloneData(OperationContext* opCtx); /** * Logs in the `config.changelog` collection a specific event for `movePrimary` operations. @@ -190,24 +195,16 @@ class MovePrimaryCoordinator final * Requests the recipient to enter the critical section on the database, causing the database * metadata refreshes to block. */ - void enterCriticalSectionOnRecipient(OperationContext* opCtx) const; + void enterCriticalSectionOnRecipient(OperationContext* opCtx); /** * Requests the recipient to exit the critical section on the database, causing the database * metadata refreshes to unblock. */ - void exitCriticalSectionOnRecipient(OperationContext* opCtx) const; - - void cleanupOnlineCloner(OperationContext* opCtx, const CancellationToken& token); - void cleanupOnAbortWithoutOnlineCloner(OperationContext* opCtx, - std::shared_ptr executor); - void cleanupOnAbortWithOnlineCloner(OperationContext* opCtx, - const CancellationToken& token, - const Status& status); + void exitCriticalSectionOnRecipient(OperationContext* opCtx); const DatabaseName _dbName; const BSONObj _csReason; - std::shared_ptr _onlineCloner; }; } // namespace mongo diff --git a/src/mongo/db/s/move_primary_coordinator_no_resilient.cpp b/src/mongo/db/s/move_primary_coordinator_no_resilient.cpp deleted file mode 100644 index c633278e6554f..0000000000000 --- a/src/mongo/db/s/move_primary_coordinator_no_resilient.cpp +++ /dev/null @@ -1,125 +0,0 @@ -/** - * Copyright (C) 2021-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - - -#include "mongo/platform/basic.h" - -#include "mongo/db/s/move_primary_coordinator_no_resilient.h" - -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/s/move_primary_source_manager.h" -#include "mongo/db/s/sharding_state.h" -#include "mongo/db/shard_id.h" -#include "mongo/db/write_block_bypass.h" -#include "mongo/logv2/log.h" -#include "mongo/s/client/shard_registry.h" -#include "mongo/s/grid.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding - - -namespace mongo { - -void MovePrimaryCoordinatorNoResilient::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const { - stdx::lock_guard lk{_docMutex}; - cmdInfoBuilder->append("request", BSON(_doc.kToShardIdFieldName << _doc.getToShardId())); -}; - -void MovePrimaryCoordinatorNoResilient::checkIfOptionsConflict(const BSONObj& doc) const { - // If we have two shard collections on the same namespace, then the arguments must be the same. - const auto otherDoc = MovePrimaryCoordinatorDocument::parse( - IDLParserContext("MovePrimaryCoordinatorDocument"), doc); - - uassert( - ErrorCodes::ConflictingOperationInProgress, - "Another move primary with different arguments is already running for the same namespace", - _doc.getToShardId() == otherDoc.getToShardId()); -} - - -ExecutorFuture MovePrimaryCoordinatorNoResilient::_runImpl( - std::shared_ptr executor, - const CancellationToken& token) noexcept { - return ExecutorFuture(**executor) - .then([this, anchor = shared_from_this()] { - auto opCtxHolder = cc().makeOperationContext(); - auto* opCtx = opCtxHolder.get(); - getForwardableOpMetadata().setOn(opCtx); - - // Any error should terminate the coordinator, even if it is a retryable error, this way - // we have a movePrimary with a similar behavior of the previous one. - _completeOnError = true; - - auto const shardRegistry = Grid::get(opCtx)->shardRegistry(); - // Make sure we're as up-to-date as possible with shard information. This catches the - // case where we might have changed a shard's host by removing/adding a shard with the - // same name. - shardRegistry->reload(opCtx); - - const auto& dbName = nss().dbName().db(); - const auto& toShard = - uassertStatusOK(shardRegistry->getShard(opCtx, _doc.getToShardId())); - - const auto& selfShardId = ShardingState::get(opCtx)->shardId(); - if (selfShardId == toShard->getId()) { - LOGV2(5275803, - "Database already on the requested primary shard", - logAttrs(nss().dbName()), - "shardId"_attr = _doc.getToShardId()); - // The database primary is already the `to` shard - return; - } - - // Enable write blocking bypass to allow cloning and droping the stale collections even - // if user writes are currently disallowed. - WriteBlockBypass::get(opCtx).set(true); - - ShardMovePrimary movePrimaryRequest(nss(), _doc.getToShardId().toString()); - - auto primaryId = selfShardId; - auto toId = toShard->getId(); - MovePrimarySourceManager movePrimarySourceManager( - opCtx, movePrimaryRequest, dbName, primaryId, toId); - uassertStatusOK(movePrimarySourceManager.clone(opCtx)); - uassertStatusOK(movePrimarySourceManager.enterCriticalSection(opCtx)); - uassertStatusOK(movePrimarySourceManager.commitOnConfig(opCtx)); - uassertStatusOK(movePrimarySourceManager.cleanStaleData(opCtx)); - }) - .onError([this, anchor = shared_from_this()](const Status& status) { - LOGV2_ERROR(5275804, - "Error running move primary", - "database"_attr = nss().dbName(), - "to"_attr = _doc.getToShardId(), - "error"_attr = redact(status)); - - return status; - }); -} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary_coordinator_no_resilient.h b/src/mongo/db/s/move_primary_coordinator_no_resilient.h deleted file mode 100644 index 32b26b9781d66..0000000000000 --- a/src/mongo/db/s/move_primary_coordinator_no_resilient.h +++ /dev/null @@ -1,61 +0,0 @@ -/** - * Copyright (C) 2021-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/s/move_primary_coordinator_document_gen.h" -#include "mongo/db/s/sharding_ddl_coordinator.h" -#include "mongo/util/future.h" - -namespace mongo { - -// TODO (SERVER-71309): Remove once 7.0 becomes last LTS. -class MovePrimaryCoordinatorNoResilient final - : public ShardingDDLCoordinatorImpl { -public: - MovePrimaryCoordinatorNoResilient(ShardingDDLCoordinatorService* service, - const BSONObj& initialState) - : ShardingDDLCoordinatorImpl(service, "MovePrimaryCoordinator", initialState) {} - - ~MovePrimaryCoordinatorNoResilient() = default; - - void checkIfOptionsConflict(const BSONObj& coorDoc) const override; - - void appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const override; - - bool canAlwaysStartWhenUserWritesAreDisabled() const override { - return true; - } - -private: - ExecutorFuture _runImpl(std::shared_ptr executor, - const CancellationToken& token) noexcept override; -}; - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary_source_manager.cpp b/src/mongo/db/s/move_primary_source_manager.cpp deleted file mode 100644 index fcf62395449f5..0000000000000 --- a/src/mongo/db/s/move_primary_source_manager.cpp +++ /dev/null @@ -1,546 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/move_primary_source_manager.h" - -#include "mongo/client/connpool.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/commands.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/repl/change_stream_oplog_notification.h" -#include "mongo/db/s/database_sharding_state.h" -#include "mongo/db/s/shard_metadata_util.h" -#include "mongo/db/s/sharding_logging.h" -#include "mongo/db/s/sharding_state_recovery.h" -#include "mongo/db/s/sharding_statistics.h" -#include "mongo/db/s/type_shard_database.h" -#include "mongo/db/vector_clock_mutable.h" -#include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/s/catalog_cache.h" -#include "mongo/s/grid.h" -#include "mongo/s/request_types/move_primary_gen.h" -#include "mongo/util/exit.h" -#include "mongo/util/scopeguard.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding - -namespace mongo { - -MONGO_FAIL_POINT_DEFINE(hangInCloneStage); -MONGO_FAIL_POINT_DEFINE(hangInCleanStaleDataStage); - -MovePrimarySourceManager::MovePrimarySourceManager(OperationContext* opCtx, - ShardMovePrimary requestArgs, - StringData dbname, - ShardId& fromShard, - ShardId& toShard) - : _requestArgs(std::move(requestArgs)), - _dbname(dbname), - _fromShard(fromShard), - _toShard(toShard), - _critSecReason(BSON("command" - << "movePrimary" - << "dbName" << _dbname << "fromShard" << fromShard << "toShard" - << toShard)) {} - -MovePrimarySourceManager::~MovePrimarySourceManager() {} - -NamespaceString MovePrimarySourceManager::getNss() const { - return _requestArgs.get_shardsvrMovePrimary(); -} - -Status MovePrimarySourceManager::clone(OperationContext* opCtx) { - invariant(!opCtx->lockState()->isLocked()); - invariant(_state == kCreated); - ScopeGuard scopedGuard([&] { cleanupOnError(opCtx); }); - - LOGV2(22042, - "Moving {db} primary from: {fromShard} to: {toShard}", - "Moving primary for database", - "db"_attr = _dbname, - "fromShard"_attr = _fromShard, - "toShard"_attr = _toShard); - - // Record start in changelog - auto logChangeCheckedStatus = ShardingLogging::get(opCtx)->logChangeChecked( - opCtx, - "movePrimary.start", - _dbname.toString(), - _buildMoveLogEntry(_dbname.toString(), _fromShard.toString(), _toShard.toString()), - ShardingCatalogClient::kMajorityWriteConcern); - - if (!logChangeCheckedStatus.isOK()) { - return logChangeCheckedStatus; - } - - { - // We use AutoGetDb::ensureDbExists() the first time just in case movePrimary was called - // before any data was inserted into the database. - AutoGetDb autoDb(opCtx, getNss().dbName(), MODE_X); - invariant(autoDb.ensureDbExists(opCtx), getNss().toString()); - - auto scopedDss = - DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx, getNss().dbName()); - scopedDss->setMovePrimaryInProgress(opCtx); - } - - _state = kCloning; - - if (MONGO_unlikely(hangInCloneStage.shouldFail())) { - LOGV2(4908700, "Hit hangInCloneStage"); - hangInCloneStage.pauseWhileSet(opCtx); - } - - auto const shardRegistry = Grid::get(opCtx)->shardRegistry(); - auto fromShardObj = uassertStatusOK(shardRegistry->getShard(opCtx, _fromShard)); - auto toShardObj = uassertStatusOK(shardRegistry->getShard(opCtx, _toShard)); - - BSONObjBuilder cloneCatalogDataCommandBuilder; - cloneCatalogDataCommandBuilder << "_shardsvrCloneCatalogData" << _dbname << "from" - << fromShardObj->getConnString().toString(); - - - auto cloneCommandResponse = toShardObj->runCommandWithFixedRetryAttempts( - opCtx, - ReadPreferenceSetting(ReadPreference::PrimaryOnly), - "admin", - CommandHelpers::appendMajorityWriteConcern(cloneCatalogDataCommandBuilder.obj()), - Shard::RetryPolicy::kNotIdempotent); - - auto cloneCommandStatus = Shard::CommandResponse::getEffectiveStatus(cloneCommandResponse); - if (!cloneCommandStatus.isOK()) { - return cloneCommandStatus; - } - - auto clonedCollsArray = cloneCommandResponse.getValue().response["clonedColls"]; - for (const auto& elem : clonedCollsArray.Obj()) { - if (elem.type() == String) { - _clonedColls.push_back(NamespaceString(elem.String())); - } - } - - _state = kCloneCaughtUp; - scopedGuard.dismiss(); - return Status::OK(); -} - -Status MovePrimarySourceManager::enterCriticalSection(OperationContext* opCtx) { - invariant(!opCtx->lockState()->isLocked()); - invariant(_state == kCloneCaughtUp); - ScopeGuard scopedGuard([&] { cleanupOnError(opCtx); }); - - // Mark the shard as running a critical operation that requires recovery on crash. - // TODO (SERVER-60110): Remove once 7.0 becomes last LTS. - auto startMetadataOpStatus = ShardingStateRecovery_DEPRECATED::startMetadataOp(opCtx); - if (!startMetadataOpStatus.isOK()) { - return startMetadataOpStatus; - } - - { - // The critical section must be entered with the database X lock in order to ensure there - // are no writes which could have entered and passed the database version check just before - // we entered the critical section, but will potentially complete after we left it. - AutoGetDb autoDb(opCtx, getNss().dbName(), MODE_X); - - if (!autoDb.getDb()) { - uasserted(ErrorCodes::ConflictingOperationInProgress, - str::stream() << "The database " << getNss().toString() - << " was dropped during the movePrimary operation."); - } - - auto scopedDss = - DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx, getNss().dbName()); - - // IMPORTANT: After this line, the critical section is in place and needs to be signaled - scopedDss->enterCriticalSectionCatchUpPhase(opCtx, _critSecReason); - } - - _state = kCriticalSection; - - // Persist a signal to secondaries that we've entered the critical section. This will cause - // secondaries to refresh their routing table when next accessed, which will block behind the - // critical section. This ensures causal consistency by preventing a stale mongos with a cluster - // time inclusive of the move primary config commit update from accessing secondary data. - // Note: this write must occur after the critSec flag is set, to ensure the secondary refresh - // will stall behind the flag. - Status signalStatus = shardmetadatautil::updateShardDatabasesEntry( - opCtx, - BSON(ShardDatabaseType::kNameFieldName << getNss().toString()), - BSONObj(), - BSON(ShardDatabaseType::kEnterCriticalSectionCounterFieldName << 1), - false /*upsert*/); - if (!signalStatus.isOK()) { - return { - ErrorCodes::OperationFailed, - str::stream() << "Failed to persist critical section signal for secondaries due to: " - << signalStatus.toString()}; - } - - LOGV2(22043, "movePrimary successfully entered critical section"); - - scopedGuard.dismiss(); - - return Status::OK(); -} - -Status MovePrimarySourceManager::commitOnConfig(OperationContext* opCtx) { - invariant(!opCtx->lockState()->isLocked()); - invariant(_state == kCriticalSection); - ScopeGuard scopedGuard([&] { cleanupOnError(opCtx); }); - - boost::optional expectedDbVersion; - - { - AutoGetDb autoDb(opCtx, getNss().dbName(), MODE_X); - - if (!autoDb.getDb()) { - uasserted(ErrorCodes::ConflictingOperationInProgress, - str::stream() << "The database " << getNss().toString() - << " was dropped during the movePrimary operation."); - } - - auto scopedDss = - DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx, getNss().dbName()); - - // Read operations must begin to wait on the critical section just before we send the - // commit operation to the config server - scopedDss->enterCriticalSectionCommitPhase(opCtx, _critSecReason); - - expectedDbVersion = scopedDss->getDbVersion(opCtx); - } - - auto commitStatus = [&]() { - try { - return _commitOnConfig(opCtx, *expectedDbVersion); - } catch (const DBException& ex) { - return ex.toStatus(); - } - }(); - - if (!commitStatus.isOK()) { - // Need to get the latest optime in case the refresh request goes to a secondary -- - // otherwise the read won't wait for the write that commit on config server may have done. - LOGV2(22044, - "Error occurred while committing the movePrimary. Performing a majority write " - "against the config server to obtain its latest optime: {error}", - "Error occurred while committing the movePrimary. Performing a majority write " - "against the config server to obtain its latest optime", - "error"_attr = redact(commitStatus)); - - Status validateStatus = ShardingLogging::get(opCtx)->logChangeChecked( - opCtx, - "movePrimary.validating", - getNss().ns(), - _buildMoveLogEntry(_dbname.toString(), _fromShard.toString(), _toShard.toString()), - ShardingCatalogClient::kMajorityWriteConcern); - - if ((ErrorCodes::isInterruption(validateStatus.code()) || - ErrorCodes::isShutdownError(validateStatus.code()) || - validateStatus == ErrorCodes::CallbackCanceled) && - globalInShutdownDeprecated()) { - // Since the server is already doing a clean shutdown, this call will just join the - // previous shutdown call - shutdown(waitForShutdown()); - } - - // If we failed to get the latest config optime because we stepped down as primary, then it - // is safe to fail without crashing because the new primary will fetch the latest optime - // when it recovers the sharding state recovery document, as long as we also clear the - // metadata for this database, forcing subsequent callers to do a full refresh. Check if - // this node can accept writes for this collection as a proxy for it being primary. - if (!validateStatus.isOK()) { - // TODO (SERVER-71444): Fix to be interruptible or document exception. - UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. - AutoGetDb autoDb(opCtx, getNss().dbName(), MODE_IX); - - if (!autoDb.getDb()) { - uasserted(ErrorCodes::ConflictingOperationInProgress, - str::stream() << "The database " << getNss().toString() - << " was dropped during the movePrimary operation."); - } - - if (!repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, getNss())) { - auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive( - opCtx, getNss().dbName()); - scopedDss->clearDbInfo(opCtx); - uassertStatusOK(validateStatus.withContext( - str::stream() << "Unable to verify movePrimary commit for database: " - << getNss().ns() - << " because the node's replication role changed. Version " - "was cleared for: " - << getNss().ns() - << ", so it will get a full refresh when accessed again.")); - } - } - - // We would not be able to guarantee our next database refresh would pick up the write for - // the movePrimary commit (if it happened), because we were unable to get the latest config - // OpTime. - fassert(50762, - validateStatus.withContext( - str::stream() << "Failed to commit movePrimary for database " << getNss().ns() - << " due to " << redact(commitStatus) - << ". Updating the optime with a write before clearing the " - << "version also failed")); - - // If we can validate but the commit still failed, return the status. - return commitStatus; - } - - _state = kCloneCompleted; - - _cleanup(opCtx); - - uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked( - opCtx, - "movePrimary.commit", - _dbname.toString(), - _buildMoveLogEntry(_dbname.toString(), _fromShard.toString(), _toShard.toString()), - ShardingCatalogClient::kMajorityWriteConcern)); - - scopedGuard.dismiss(); - - _state = kNeedCleanStaleData; - - return Status::OK(); -} - -Status MovePrimarySourceManager::_commitOnConfig(OperationContext* opCtx, - const DatabaseVersion& expectedDbVersion) { - LOGV2_DEBUG(6854100, - 3, - "Committing movePrimary", - "db"_attr = _dbname, - "fromShard"_attr = _fromShard, - "toShard"_attr = _toShard, - "expectedDbVersion"_attr = expectedDbVersion); - - notifyChangeStreamsOnMovePrimary(opCtx, getNss().dbName(), _fromShard, _toShard); - - const auto commitStatus = [&] { - ConfigsvrCommitMovePrimary commitRequest(_dbname, expectedDbVersion, _toShard); - commitRequest.setDbName(DatabaseName::kAdmin); - - const auto commitResponse = - Grid::get(opCtx)->shardRegistry()->getConfigShard()->runCommandWithFixedRetryAttempts( - opCtx, - ReadPreferenceSetting(ReadPreference::PrimaryOnly), - DatabaseName::kAdmin.toString(), - CommandHelpers::appendMajorityWriteConcern(commitRequest.toBSON({})), - Shard::RetryPolicy::kIdempotent); - - const auto status = Shard::CommandResponse::getEffectiveStatus(commitResponse); - if (status != ErrorCodes::CommandNotFound) { - return status; - } - - LOGV2(6854101, - "_configsvrCommitMovePrimary command not found on config server, so try to update " - "the metadata document directly", - "db"_attr = _dbname); - - // The fallback logic is not synchronized with the removeShard command and simultaneous - // invocations of movePrimary and removeShard can lead to data loss. - return _fallbackCommitOnConfig(opCtx, expectedDbVersion); - }(); - - if (!commitStatus.isOK()) { - LOGV2(6854102, - "Error committing movePrimary", - "db"_attr = _dbname, - "error"_attr = redact(commitStatus)); - // Try to emit a second notification to reverse the effect of the one notified before the - // commit attempt. - notifyChangeStreamsOnMovePrimary(opCtx, getNss().dbName(), _toShard, _fromShard); - return commitStatus; - } - - const auto updatedDbType = [&]() { - auto findResponse = uassertStatusOK( - Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig( - opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - repl::ReadConcernLevel::kMajorityReadConcern, - NamespaceString::kConfigDatabasesNamespace, - BSON(DatabaseType::kNameFieldName << _dbname), - BSON(DatabaseType::kNameFieldName << -1), - 1)); - - const auto databases = std::move(findResponse.docs); - uassert(ErrorCodes::IncompatibleShardingMetadata, - "Tried to find version for database {}, but found no databases"_format(_dbname), - !databases.empty()); - - return DatabaseType::parse(IDLParserContext("DatabaseType"), databases.front()); - }(); - tassert(6851100, - "Error committing movePrimary: database version went backwards", - updatedDbType.getVersion() > expectedDbVersion); - uassert(6851101, - "Error committing movePrimary: update of config.databases failed", - updatedDbType.getPrimary() != _fromShard); - - LOGV2_DEBUG(6854103, - 3, - "Commited movePrimary", - "db"_attr = _dbname, - "fromShard"_attr = _fromShard, - "toShard"_attr = _toShard, - "updatedDbVersion"_attr = updatedDbType.getVersion()); - - return Status::OK(); -} - -Status MovePrimarySourceManager::_fallbackCommitOnConfig(OperationContext* opCtx, - const DatabaseVersion& expectedDbVersion) { - const auto query = [&] { - BSONObjBuilder bsonBuilder; - bsonBuilder.append(DatabaseType::kNameFieldName, _dbname); - // Include the version in the update filter to be resilient to potential network retries and - // delayed messages. - for (const auto [fieldName, fieldValue] : expectedDbVersion.toBSON()) { - const auto dottedFieldName = DatabaseType::kVersionFieldName + "." + fieldName; - bsonBuilder.appendAs(fieldValue, dottedFieldName); - } - return bsonBuilder.obj(); - }(); - - const auto update = [&] { - const auto newDbVersion = expectedDbVersion.makeUpdated(); - - BSONObjBuilder bsonBuilder; - bsonBuilder.append(DatabaseType::kPrimaryFieldName, _toShard); - bsonBuilder.append(DatabaseType::kVersionFieldName, newDbVersion.toBSON()); - return BSON("$set" << bsonBuilder.obj()); - }(); - - return Grid::get(opCtx) - ->catalogClient() - ->updateConfigDocument(opCtx, - NamespaceString::kConfigDatabasesNamespace, - query, - update, - false, - ShardingCatalogClient::kMajorityWriteConcern) - .getStatus(); -} - -Status MovePrimarySourceManager::cleanStaleData(OperationContext* opCtx) { - invariant(!opCtx->lockState()->isLocked()); - invariant(_state == kNeedCleanStaleData); - - if (MONGO_unlikely(hangInCleanStaleDataStage.shouldFail())) { - LOGV2(4908701, "Hit hangInCleanStaleDataStage"); - hangInCleanStaleDataStage.pauseWhileSet(opCtx); - } - - // Only drop the cloned (unsharded) collections. - DBDirectClient client(opCtx); - for (auto& coll : _clonedColls) { - BSONObj dropCollResult; - client.runCommand(_dbname, BSON("drop" << coll.coll()), dropCollResult); - Status dropStatus = getStatusFromCommandResult(dropCollResult); - if (!dropStatus.isOK()) { - LOGV2(22045, - "Failed to drop cloned collection {namespace} in movePrimary: {error}", - "Failed to drop cloned collection in movePrimary", - logAttrs(coll), - "error"_attr = redact(dropStatus)); - } - } - - _state = kDone; - return Status::OK(); -} - -void MovePrimarySourceManager::cleanupOnError(OperationContext* opCtx) { - if (_state == kDone) { - return; - } - - ShardingLogging::get(opCtx)->logChange( - opCtx, - "movePrimary.error", - _dbname.toString(), - _buildMoveLogEntry(_dbname.toString(), _fromShard.toString(), _toShard.toString()), - ShardingCatalogClient::kMajorityWriteConcern); - - try { - _cleanup(opCtx); - } catch (const ExceptionForCat& ex) { - BSONObjBuilder requestArgsBSON; - _requestArgs.serialize(&requestArgsBSON); - LOGV2_WARNING(22046, - "Failed to clean up movePrimary with request parameters {request} due to: " - "{error}", - "Failed to clean up movePrimary", - "request"_attr = redact(requestArgsBSON.obj()), - "error"_attr = redact(ex)); - } -} - -void MovePrimarySourceManager::_cleanup(OperationContext* opCtx) { - invariant(_state != kDone); - - { - // Unregister from the database's sharding state if we're still registered. - // TODO (SERVER-71444): Fix to be interruptible or document exception. - UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. - AutoGetDb autoDb(opCtx, getNss().dbName(), MODE_IX); - - auto scopedDss = - DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx, getNss().dbName()); - scopedDss->unsetMovePrimaryInProgress(opCtx); - scopedDss->clearDbInfo(opCtx); - - // Leave the critical section if we're still registered. - scopedDss->exitCriticalSection(opCtx, _critSecReason); - } - - if (_state == kCriticalSection || _state == kCloneCompleted) { - // Clear the 'minOpTime recovery' document so that the next time a node from this shard - // becomes a primary, it won't have to recover the config server optime. - // TODO (SERVER-60110): Remove once 7.0 becomes last LTS. - ShardingStateRecovery_DEPRECATED::endMetadataOp(opCtx); - - // Checkpoint the vector clock to ensure causality in the event of a crash or shutdown. - VectorClockMutable::get(opCtx)->waitForDurableConfigTime().get(opCtx); - } - - // If we're in the kCloneCompleted state, then we need to do the last step of cleaning up - // now-stale data on the old primary. Otherwise, indicate that we're done. - if (_state != kCloneCompleted) { - _state = kDone; - } - - return; -} - -} // namespace mongo diff --git a/src/mongo/db/s/move_primary_source_manager.h b/src/mongo/db/s/move_primary_source_manager.h deleted file mode 100644 index 2b87aa02c8c3d..0000000000000 --- a/src/mongo/db/s/move_primary_source_manager.h +++ /dev/null @@ -1,210 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/bson/bsonobj.h" -#include "mongo/db/s/database_sharding_state.h" -#include "mongo/db/shard_id.h" -#include "mongo/s/request_types/move_primary_gen.h" -#include "mongo/util/timer.h" - -namespace mongo { - -class OperationContext; -class Shard; -struct ShardingStatistics; -class Status; - -/** - * The donor-side movePrimary state machine. This object must be created and owned by a single - * thread, which controls its lifetime and should not be passed across threads. Unless explicitly - * indicated its methods must not be called from more than one thread and must not be called while - * any locks are held. - * - * The intended workflow is as follows: - * - Acquire a distributed lock on the database whose primary is about to be moved. - * - Instantiate a MovePrimarySourceManager on the stack. - * - Call clone to start and finish cloning of the unsharded collections. - * - Call enterCriticalSection to cause the shard to enter in 'read only' mode while the config - * server is notified of the new primary. - * - Call commitOnConfig to indicate the new primary in the config server metadata. - * - Call cleanStaleData to drop now-unused collections (and potentially databases) on the - * old primary. - * - * At any point in time it is safe to let the MovePrimarySourceManager object go out of scope in - * which case the destructor will take care of clean up based on how far we have advanced. - * - * TODO (SERVER-71309): Remove once 7.0 becomes last LTS. - */ -class MovePrimarySourceManager { - MovePrimarySourceManager(const MovePrimarySourceManager&) = delete; - MovePrimarySourceManager& operator=(const MovePrimarySourceManager&) = delete; - -public: - /** - * Instantiates a new movePrimary source manager. Must be called with the distributed lock - * acquired in advance (not asserted). - * - * May throw any exception. Known exceptions are: - * - InvalidOptions if the operation context is missing database version - * - StaleConfig if the expected database version does not match what we find it to be after - * acquiring the distributed lock - */ - MovePrimarySourceManager(OperationContext* opCtx, - ShardMovePrimary requestArgs, - StringData dbname, - ShardId& fromShard, - ShardId& toShard); - ~MovePrimarySourceManager(); - - /** - * Returns the namespace for which this source manager is active. - */ - NamespaceString getNss() const; - - /** - * Contacts the recipient shard and tells it to start cloning the specified chunk. This method - * will fail if for any reason the recipient shard fails to complete the cloning sequence. - * - * Expected state: kCreated - * Resulting state: kCloning on success, kDone on failure - */ - Status clone(OperationContext* opCtx); - - /** - * Once this call returns successfully, no writes will be happening on this shard until the - * movePrimary is committed. Therefore, commitMovePrimaryMetadata must be called as soon as - * possible afterwards. - * - * Expected state: kCloneCaughtUp - * Resulting state: kCriticalSection on success, kDone on failure - */ - Status enterCriticalSection(OperationContext* opCtx); - - /** - * Persists the updated DatabaseVersion on the config server and leaves the critical section. - * - * Expected state: kCriticalSection - * Resulting state: kNeedCleanStaleData - */ - Status commitOnConfig(OperationContext* opCtx); - - /** - * Clears stale collections (and potentially databases) on the old primary. - * - * Expected state: kNeedCleanStaleData - * Resulting state: kDone - */ - Status cleanStaleData(OperationContext* opCtx); - - /** - * May be called at any time. Unregisters the movePrimary source manager from the database and - * logs an error in the change log to indicate that the migration has failed. - * - * Expected state: Any - * Resulting state: kDone - */ - void cleanupOnError(OperationContext* opCtx); - -private: - static BSONObj _buildMoveLogEntry(const std::string& db, - const std::string& from, - const std::string& to) { - BSONObjBuilder details; - details.append("database", db); - details.append("from", from); - details.append("to", to); - - return details.obj(); - } - - /** - * Invokes the _configsvrCommitMovePrimary command of the config server to reassign the primary - * shard of the database. - */ - Status _commitOnConfig(OperationContext* opCtx, const DatabaseVersion& expectedDbVersion); - - /** - * Updates the config server's metadata in config.databases collection to reassign the primary - * shard of the database. - * - * This logic is not synchronized with the removeShard command and simultaneous invocations of - * movePrimary and removeShard can lead to data loss. - */ - Status _fallbackCommitOnConfig(OperationContext* opCtx, - const DatabaseVersion& expectedDbVersion); - - // Used to track the current state of the source manager. See the methods above, which have - // comments explaining the various state transitions. - enum State { - kCreated, - kCloning, - kCloneCaughtUp, - kCriticalSection, - kCloneCompleted, - kNeedCleanStaleData, - kDone - }; - - /** - * Called when any of the states fails. May only be called once and will put the migration - * manager into the kDone state. - */ - void _cleanup(OperationContext* opCtx); - - // The parameters to the movePrimary command - const ShardMovePrimary _requestArgs; - - // The database whose primary we are moving. - const StringData _dbname; - - // The donor shard - const ShardId& _fromShard; - - // The recipient shard - const ShardId& _toShard; - - // Collections that were cloned to the new primary - std::vector _clonedColls; - - // Indicates whether sharded collections exist on the old primary. - bool _shardedCollectionsExistOnDb; - - // Times the entire movePrimary operation - const Timer _entireOpTimer; - - // The current state. Used only for diagnostics and validation. - State _state{kCreated}; - - // Information about the movePrimary to be used in the critical section. - const BSONObj _critSecReason; -}; - -} // namespace mongo diff --git a/src/mongo/db/s/move_timing_helper.cpp b/src/mongo/db/s/move_timing_helper.cpp index 1494da2882ecb..bbb91f2ab346b 100644 --- a/src/mongo/db/s/move_timing_helper.cpp +++ b/src/mongo/db/s/move_timing_helper.cpp @@ -28,15 +28,24 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/move_timing_helper.h" +#include +#include +#include +#include +#include #include "mongo/db/client.h" #include "mongo/db/curop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/move_timing_helper.h" #include "mongo/db/s/sharding_logging.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -45,7 +54,7 @@ namespace mongo { MoveTimingHelper::MoveTimingHelper(OperationContext* opCtx, const std::string& where, - const std::string& ns, + StringData ns, const boost::optional& min, const boost::optional& max, int totalNumSteps, diff --git a/src/mongo/db/s/move_timing_helper.h b/src/mongo/db/s/move_timing_helper.h index e7f0de9ca2a03..a34d29d096fe0 100644 --- a/src/mongo/db/s/move_timing_helper.h +++ b/src/mongo/db/s/move_timing_helper.h @@ -29,8 +29,12 @@ #pragma once +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/shard_id.h" #include "mongo/util/timer.h" @@ -44,7 +48,7 @@ class MoveTimingHelper { public: MoveTimingHelper(OperationContext* opCtx, const std::string& where, - const std::string& ns, + StringData ns, const boost::optional& min, const boost::optional& max, int totalNumSteps, diff --git a/src/mongo/db/s/namespace_metadata_change_notifications.cpp b/src/mongo/db/s/namespace_metadata_change_notifications.cpp index ecf6303910586..bb8acc17b146c 100644 --- a/src/mongo/db/s/namespace_metadata_change_notifications.cpp +++ b/src/mongo/db/s/namespace_metadata_change_notifications.cpp @@ -27,9 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include +#include #include "mongo/db/s/namespace_metadata_change_notifications.h" +#include "mongo/util/assert_util_core.h" namespace mongo { @@ -46,13 +51,38 @@ NamespaceMetadataChangeNotifications::createNotification(const NamespaceString& stdx::lock_guard lg(_mutex); - auto& notifList = _notificationsList[nss]; + auto& notifList = _notificationsList[nss].second; notifToken->itToErase = notifList.insert(notifList.end(), notifToken); return {this, std::move(notifToken)}; } -void NamespaceMetadataChangeNotifications::notifyChange(const NamespaceString& nss) { +Timestamp NamespaceMetadataChangeNotifications::get(OperationContext* opCtx, + ScopedNotification& notif) { + // Wait for notification to be ready + notif.get(opCtx); + + // Get value and replace notification token under lock + auto nss = notif.getToken()->nss; + auto newToken = std::make_shared(nss); + + stdx::lock_guard lock(_mutex); + auto& [opTime, notifList] = _notificationsList[nss]; + + // Put new token in _notificationsList + newToken->itToErase = notifList.insert(notifList.end(), newToken); + + // Deregister old token from notifications list. + _unregisterNotificationToken_inlock(lock, *notif.getToken()); + + // Update scoped notification. + notif.replaceToken(std::move(newToken)); + + return opTime; +} + +void NamespaceMetadataChangeNotifications::notifyChange(const NamespaceString& nss, + const Timestamp& commitTime) { stdx::lock_guard lock(_mutex); auto mapIt = _notificationsList.find(nss); @@ -60,25 +90,30 @@ void NamespaceMetadataChangeNotifications::notifyChange(const NamespaceString& n return; } - for (auto& notifToken : mapIt->second) { - notifToken->notify.set(); - notifToken->itToErase.reset(); - } + auto& [opTime, notifList] = mapIt->second; + + if (commitTime <= opTime) + return; - _notificationsList.erase(mapIt); + opTime = commitTime; + for (auto& notifToken : notifList) { + if (!notifToken->notify) + notifToken->notify.set(); + } } void NamespaceMetadataChangeNotifications::_unregisterNotificationToken( - std::shared_ptr token) { + const NotificationToken& token) { stdx::lock_guard lg(_mutex); - if (!token->itToErase) { - return; - } + _unregisterNotificationToken_inlock(lg, token); +} - auto mapIt = _notificationsList.find(token->nss); - auto& notifList = mapIt->second; - notifList.erase(*token->itToErase); +void NamespaceMetadataChangeNotifications::_unregisterNotificationToken_inlock( + WithLock lk, const NotificationToken& token) { + auto mapIt = _notificationsList.find(token.nss); + auto& notifList = mapIt->second.second; + notifList.erase(*token.itToErase); if (notifList.empty()) { _notificationsList.erase(mapIt); diff --git a/src/mongo/db/s/namespace_metadata_change_notifications.h b/src/mongo/db/s/namespace_metadata_change_notifications.h index 12df62bfb95b9..8e7991fb189b4 100644 --- a/src/mongo/db/s/namespace_metadata_change_notifications.h +++ b/src/mongo/db/s/namespace_metadata_change_notifications.h @@ -29,10 +29,15 @@ #pragma once +#include +#include #include #include +#include +#include #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/platform/mutex.h" #include "mongo/util/concurrency/notification.h" @@ -73,7 +78,7 @@ class NamespaceMetadataChangeNotifications { ~ScopedNotification() { if (_token) { - _notifications->_unregisterNotificationToken(std::move(_token)); + _notifications->_unregisterNotificationToken(*_token); } } @@ -81,6 +86,15 @@ class NamespaceMetadataChangeNotifications { _token->notify.get(opCtx); } + std::shared_ptr getToken() { + return _token; + } + + void replaceToken( + std::shared_ptr newToken) { + _token = std::move(newToken); + } + private: NamespaceMetadataChangeNotifications* _notifications; @@ -93,10 +107,17 @@ class NamespaceMetadataChangeNotifications { ScopedNotification createNotification(const NamespaceString& nss); /** - * Goes through all registered notifications for this namespace signals them and removes them - * from the registry atomically. + * If the commit time is greater than the current one for this namespace, updates the + * notification commit time and signals any notifications that haven't already been notified. */ - void notifyChange(const NamespaceString& nss); + void notifyChange(const NamespaceString& nss, const Timestamp& commitTime); + + /** + * Blocks until the notification in `notif` is ready and then returns the current commitTime + * associated with the namespace and replaces the notification token so that any newer commit + * times will notify this waiter again. + */ + Timestamp get(OperationContext* opCtx, ScopedNotification& notif); private: using NotificationsList = std::list>; @@ -112,10 +133,13 @@ class NamespaceMetadataChangeNotifications { itToErase; }; - void _unregisterNotificationToken(std::shared_ptr token); + void _unregisterNotificationToken(const NotificationToken& token); + + void _unregisterNotificationToken_inlock(WithLock, const NotificationToken& token); Mutex _mutex = MONGO_MAKE_LATCH("NamespaceMetadataChangeNotifications::_mutex"); - std::map _notificationsList; + // The timestamp represents the latest commitTime for a given namespace seen via notifyChange. + std::map> _notificationsList; }; } // namespace mongo diff --git a/src/mongo/db/s/namespace_metadata_change_notifications_test.cpp b/src/mongo/db/s/namespace_metadata_change_notifications_test.cpp index 1d58b782ba1e0..5c2f15b8edca6 100644 --- a/src/mongo/db/s/namespace_metadata_change_notifications_test.cpp +++ b/src/mongo/db/s/namespace_metadata_change_notifications_test.cpp @@ -27,22 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include - -#include "mongo/db/operation_context_noop.h" #include "mongo/db/s/namespace_metadata_change_notifications.h" -#include "mongo/db/service_context.h" + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/client.h" #include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/stdx/thread.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/tick_source_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" namespace mongo { namespace { -const NamespaceString kNss("foo.bar"); +const NamespaceString kNss = NamespaceString::createNamespaceString_forTest("foo.bar"); class NamespaceMetadataChangeNotificationsTest : public ServiceContextMongoDTest { protected: @@ -58,15 +57,16 @@ TEST_F(NamespaceMetadataChangeNotificationsTest, WaitForNotify) { { auto opCtx = getClient()->makeOperationContext(); opCtx->setDeadlineAfterNowBy(Milliseconds{0}, ErrorCodes::ExceededTimeLimit); - ASSERT_THROWS_CODE( - scopedNotif.get(opCtx.get()), AssertionException, ErrorCodes::ExceededTimeLimit); + ASSERT_THROWS_CODE(notifications.get(opCtx.get(), scopedNotif), + AssertionException, + ErrorCodes::ExceededTimeLimit); } - notifications.notifyChange(kNss); + notifications.notifyChange(kNss, {Timestamp(2, 1)}); { auto opCtx = getClient()->makeOperationContext(); - scopedNotif.get(opCtx.get()); + notifications.get(opCtx.get(), scopedNotif); } } @@ -78,11 +78,12 @@ TEST_F(NamespaceMetadataChangeNotificationsTest, GiveUpWaitingForNotify) { auto opCtx = getClient()->makeOperationContext(); opCtx->setDeadlineAfterNowBy(Milliseconds{0}, ErrorCodes::ExceededTimeLimit); - ASSERT_THROWS_CODE( - scopedNotif.get(opCtx.get()), AssertionException, ErrorCodes::ExceededTimeLimit); + ASSERT_THROWS_CODE(notifications.get(opCtx.get(), scopedNotif), + AssertionException, + ErrorCodes::ExceededTimeLimit); } - notifications.notifyChange(kNss); + notifications.notifyChange(kNss, {Timestamp(2, 1)}); } TEST_F(NamespaceMetadataChangeNotificationsTest, MoveConstructionWaitForNotify) { @@ -91,18 +92,101 @@ TEST_F(NamespaceMetadataChangeNotificationsTest, MoveConstructionWaitForNotify) auto scopedNotif = notifications.createNotification(kNss); auto movedScopedNotif = std::move(scopedNotif); + { + auto opCtx = getClient()->makeOperationContext(); + opCtx->setDeadlineAfterNowBy(Milliseconds{0}, ErrorCodes::ExceededTimeLimit); + ASSERT_THROWS_CODE(notifications.get(opCtx.get(), movedScopedNotif), + AssertionException, + ErrorCodes::ExceededTimeLimit); + } + + notifications.notifyChange(kNss, {Timestamp(2, 1)}); + + { + auto opCtx = getClient()->makeOperationContext(); + ASSERT_EQ(notifications.get(opCtx.get(), movedScopedNotif), Timestamp(2, 1)); + } +} + +TEST_F(NamespaceMetadataChangeNotificationsTest, NotifyTwice) { + NamespaceMetadataChangeNotifications notifications; + + auto scopedNotif = notifications.createNotification(kNss); + + { + auto opCtx = getClient()->makeOperationContext(); + opCtx->setDeadlineAfterNowBy(Milliseconds{0}, ErrorCodes::ExceededTimeLimit); + ASSERT_THROWS_CODE(notifications.get(opCtx.get(), scopedNotif), + AssertionException, + ErrorCodes::ExceededTimeLimit); + } + + notifications.notifyChange(kNss, {Timestamp(2, 1)}); + notifications.notifyChange(kNss, {Timestamp(3, 1)}); + + { + auto opCtx = getClient()->makeOperationContext(); + ASSERT_EQUALS(notifications.get(opCtx.get(), scopedNotif), Timestamp(3, 1)); + } +} + +TEST_F(NamespaceMetadataChangeNotificationsTest, NotifyAndThenWaitAgain) { + NamespaceMetadataChangeNotifications notifications; + + auto scopedNotif = notifications.createNotification(kNss); + + { + auto opCtx = getClient()->makeOperationContext(); + opCtx->setDeadlineAfterNowBy(Milliseconds{0}, ErrorCodes::ExceededTimeLimit); + ASSERT_THROWS_CODE(notifications.get(opCtx.get(), scopedNotif), + AssertionException, + ErrorCodes::ExceededTimeLimit); + } + + notifications.notifyChange(kNss, {Timestamp(2, 1)}); + + { + auto opCtx = getClient()->makeOperationContext(); + ASSERT_EQUALS(notifications.get(opCtx.get(), scopedNotif), Timestamp(2, 1)); + opCtx->setDeadlineAfterNowBy(Milliseconds{0}, ErrorCodes::ExceededTimeLimit); + ASSERT_THROWS_CODE(notifications.get(opCtx.get(), scopedNotif), + AssertionException, + ErrorCodes::ExceededTimeLimit); + } + + notifications.notifyChange(kNss, {Timestamp(3, 1)}); + + { + auto opCtx = getClient()->makeOperationContext(); + ASSERT_EQUALS(notifications.get(opCtx.get(), scopedNotif), Timestamp(3, 1)); + } +} + +TEST_F(NamespaceMetadataChangeNotificationsTest, TwoWaiters) { + NamespaceMetadataChangeNotifications notifications; + + auto scopedNotif1 = notifications.createNotification(kNss); + { auto opCtx = getClient()->makeOperationContext(); opCtx->setDeadlineAfterNowBy(Milliseconds{0}, ErrorCodes::ExceededTimeLimit); ASSERT_THROWS_CODE( - movedScopedNotif.get(opCtx.get()), AssertionException, ErrorCodes::ExceededTimeLimit); + scopedNotif1.get(opCtx.get()), AssertionException, ErrorCodes::ExceededTimeLimit); } - notifications.notifyChange(kNss); + notifications.notifyChange(kNss, {Timestamp(2, 1)}); + auto scopedNotif2 = notifications.createNotification(kNss); + notifications.notifyChange(kNss, {Timestamp(3, 1)}); + + { + auto opCtx = getClient()->makeOperationContext(); + ASSERT_EQUALS(notifications.get(opCtx.get(), scopedNotif1), Timestamp(3, 1)); + } { auto opCtx = getClient()->makeOperationContext(); - movedScopedNotif.get(opCtx.get()); + scopedNotif2.get(opCtx.get()); + ASSERT_EQUALS(notifications.get(opCtx.get(), scopedNotif2), Timestamp(3, 1)); } } diff --git a/src/mongo/db/s/op_observer_sharding_impl.cpp b/src/mongo/db/s/op_observer_sharding_impl.cpp deleted file mode 100644 index ee75c265bce68..0000000000000 --- a/src/mongo/db/s/op_observer_sharding_impl.cpp +++ /dev/null @@ -1,249 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/s/op_observer_sharding_impl.h" - -#include "mongo/db/repl/oplog_entry.h" -#include "mongo/db/s/collection_sharding_runtime.h" -#include "mongo/db/s/database_sharding_state.h" -#include "mongo/db/s/migration_chunk_cloner_source.h" -#include "mongo/db/s/migration_source_manager.h" -#include "mongo/db/s/sharding_write_router.h" -#include "mongo/logv2/log.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding - -namespace mongo { -namespace { - -const auto getIsMigrating = OperationContext::declareDecoration(); - -/** - * Write operations do shard version checking, but if an update operation runs as part of a - * 'readConcern:snapshot' transaction, the router could have used the metadata at the snapshot - * time and yet set the latest shard version on the request. This is why the write can get routed - * to a shard which no longer owns the chunk being written to. In such cases, throw a - * MigrationConflict exception to indicate that the transaction needs to be rolled-back and - * restarted. - */ -void assertIntersectingChunkHasNotMoved(OperationContext* opCtx, - const CollectionMetadata& metadata, - const BSONObj& shardKey, - const LogicalTime& atClusterTime) { - // We can assume the simple collation because shard keys do not support non-simple collations. - auto cmAtTimeOfWrite = - ChunkManager::makeAtTime(*metadata.getChunkManager(), atClusterTime.asTimestamp()); - auto chunk = cmAtTimeOfWrite.findIntersectingChunkWithSimpleCollation(shardKey); - - // Throws if the chunk has moved since the timestamp of the running transaction's atClusterTime - // read concern parameter. - chunk.throwIfMoved(); -} - -void assertNoMovePrimaryInProgress(OperationContext* opCtx, const NamespaceString& nss) { - if (!nss.isNormalCollection() && nss.coll() != "system.views" && - !nss.isTimeseriesBucketsCollection()) { - return; - } - - // TODO SERVER-58222: evaluate whether this is safe or whether acquiring the lock can block. - AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(opCtx->lockState()); - Lock::DBLock dblock(opCtx, nss.dbName(), MODE_IS); - - const auto scopedDss = - DatabaseShardingState::assertDbLockedAndAcquireShared(opCtx, nss.dbName()); - if (scopedDss->isMovePrimaryInProgress()) { - LOGV2(4908600, "assertNoMovePrimaryInProgress", logAttrs(nss)); - - uasserted(ErrorCodes::MovePrimaryInProgress, - "movePrimary is in progress for namespace " + nss.toString()); - } -} - -} // namespace - -OpObserverShardingImpl::OpObserverShardingImpl(std::unique_ptr oplogWriter) - : OpObserverImpl(std::move(oplogWriter)) {} - -bool OpObserverShardingImpl::isMigrating(OperationContext* opCtx, - NamespaceString const& nss, - BSONObj const& docToDelete) { - const auto scopedCsr = - CollectionShardingRuntime::assertCollectionLockedAndAcquireShared(opCtx, nss); - auto cloner = MigrationSourceManager::getCurrentCloner(*scopedCsr); - - return cloner && cloner->isDocumentInMigratingChunk(docToDelete); -} - -void OpObserverShardingImpl::shardObserveAboutToDelete(OperationContext* opCtx, - NamespaceString const& nss, - BSONObj const& docToDelete) { - getIsMigrating(opCtx) = isMigrating(opCtx, nss, docToDelete); -} - -void OpObserverShardingImpl::shardObserveInsertsOp( - OperationContext* opCtx, - const NamespaceString& nss, - std::vector::const_iterator first, - std::vector::const_iterator last, - const std::vector& opTimeList, - const ShardingWriteRouter& shardingWriteRouter, - const bool fromMigrate, - const bool inMultiDocumentTransaction) { - if (nss == NamespaceString::kSessionTransactionsTableNamespace || fromMigrate) - return; - - auto* const css = shardingWriteRouter.getCss(); - css->checkShardVersionOrThrow(opCtx); - DatabaseShardingState::assertMatchingDbVersion(opCtx, nss.db()); - - auto* const csr = checked_cast(css); - auto metadata = csr->getCurrentMetadataIfKnown(); - if (!metadata || !metadata->isSharded()) { - assertNoMovePrimaryInProgress(opCtx, nss); - return; - } - - int index = 0; - for (auto it = first; it != last; it++, index++) { - auto opTime = opTimeList.empty() ? repl::OpTime() : opTimeList[index]; - - if (inMultiDocumentTransaction) { - const auto atClusterTime = repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime(); - - if (atClusterTime) { - const auto shardKey = - metadata->getShardKeyPattern().extractShardKeyFromDocThrows(it->doc); - assertIntersectingChunkHasNotMoved(opCtx, *metadata, shardKey, *atClusterTime); - } - - return; - } - - auto cloner = MigrationSourceManager::getCurrentCloner(*csr); - if (cloner) { - cloner->onInsertOp(opCtx, it->doc, opTime); - } - } -} - -void OpObserverShardingImpl::shardObserveUpdateOp(OperationContext* opCtx, - const NamespaceString& nss, - boost::optional preImageDoc, - const BSONObj& postImageDoc, - const repl::OpTime& opTime, - const ShardingWriteRouter& shardingWriteRouter, - const repl::OpTime& prePostImageOpTime, - const bool inMultiDocumentTransaction) { - auto* const css = shardingWriteRouter.getCss(); - css->checkShardVersionOrThrow(opCtx); - DatabaseShardingState::assertMatchingDbVersion(opCtx, nss.db()); - - auto* const csr = checked_cast(css); - auto metadata = csr->getCurrentMetadataIfKnown(); - if (!metadata || !metadata->isSharded()) { - assertNoMovePrimaryInProgress(opCtx, nss); - return; - } - - if (inMultiDocumentTransaction) { - const auto atClusterTime = repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime(); - - if (atClusterTime) { - const auto shardKey = - metadata->getShardKeyPattern().extractShardKeyFromDocThrows(postImageDoc); - assertIntersectingChunkHasNotMoved(opCtx, *metadata, shardKey, *atClusterTime); - } - - return; - } - - auto cloner = MigrationSourceManager::getCurrentCloner(*csr); - if (cloner) { - cloner->onUpdateOp(opCtx, preImageDoc, postImageDoc, opTime, prePostImageOpTime); - } -} - -void OpObserverShardingImpl::shardObserveDeleteOp(OperationContext* opCtx, - const NamespaceString& nss, - const BSONObj& documentKey, - const repl::OpTime& opTime, - const ShardingWriteRouter& shardingWriteRouter, - const repl::OpTime& preImageOpTime, - const bool inMultiDocumentTransaction) { - auto* const css = shardingWriteRouter.getCss(); - css->checkShardVersionOrThrow(opCtx); - DatabaseShardingState::assertMatchingDbVersion(opCtx, nss.db()); - - auto* const csr = checked_cast(css); - auto metadata = csr->getCurrentMetadataIfKnown(); - if (!metadata || !metadata->isSharded()) { - assertNoMovePrimaryInProgress(opCtx, nss); - return; - } - - if (inMultiDocumentTransaction) { - const auto atClusterTime = repl::ReadConcernArgs::get(opCtx).getArgsAtClusterTime(); - - if (atClusterTime) { - const auto shardKey = - metadata->getShardKeyPattern().extractShardKeyFromDocumentKeyThrows(documentKey); - assertIntersectingChunkHasNotMoved(opCtx, *metadata, shardKey, *atClusterTime); - } - - return; - } - - auto cloner = MigrationSourceManager::getCurrentCloner(*csr); - if (cloner && getIsMigrating(opCtx)) { - cloner->onDeleteOp(opCtx, documentKey, opTime, preImageOpTime); - } -} - -void OpObserverShardingImpl::shardObserveTransactionPrepareOrUnpreparedCommit( - OperationContext* opCtx, - const std::vector& stmts, - const repl::OpTime& prepareOrCommitOptime) { - - opCtx->recoveryUnit()->registerChange( - std::make_unique( - *opCtx->getLogicalSessionId(), stmts, prepareOrCommitOptime)); -} - -void OpObserverShardingImpl::shardObserveNonPrimaryTransactionPrepare( - OperationContext* opCtx, - const std::vector& stmts, - const repl::OpTime& prepareOrCommitOptime) { - - opCtx->recoveryUnit()->registerChange( - std::make_unique( - *opCtx->getLogicalSessionId(), stmts, prepareOrCommitOptime)); -} - -} // namespace mongo diff --git a/src/mongo/db/s/op_observer_sharding_impl.h b/src/mongo/db/s/op_observer_sharding_impl.h deleted file mode 100644 index 2858f5b92d83b..0000000000000 --- a/src/mongo/db/s/op_observer_sharding_impl.h +++ /dev/null @@ -1,84 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/op_observer/op_observer_impl.h" - -namespace mongo { - -class OpObserverShardingImpl : public OpObserverImpl { -public: - OpObserverShardingImpl(std::unique_ptr oplogWriter); - - // True if the document being deleted belongs to a chunk which, while still in the shard, - // is being migrated out. (Not to be confused with "fromMigrate", which tags operations - // that are steps in performing the migration.) - static bool isMigrating(OperationContext* opCtx, - NamespaceString const& nss, - BSONObj const& docToDelete); - -protected: - void shardObserveAboutToDelete(OperationContext* opCtx, - NamespaceString const& nss, - BSONObj const& docToDelete) override; - void shardObserveInsertsOp(OperationContext* opCtx, - const NamespaceString& nss, - std::vector::const_iterator first, - std::vector::const_iterator last, - const std::vector& opTimeList, - const ShardingWriteRouter& shardingWriteRouter, - bool fromMigrate, - bool inMultiDocumentTransaction) override; - void shardObserveUpdateOp(OperationContext* opCtx, - const NamespaceString& nss, - boost::optional preImageDoc, - const BSONObj& updatedDoc, - const repl::OpTime& opTime, - const ShardingWriteRouter& shardingWriteRouter, - const repl::OpTime& prePostImageOpTime, - bool inMultiDocumentTransaction) override; - void shardObserveDeleteOp(OperationContext* opCtx, - const NamespaceString& nss, - const BSONObj& documentKey, - const repl::OpTime& opTime, - const ShardingWriteRouter& shardingWriteRouter, - const repl::OpTime& preImageOpTime, - bool inMultiDocumentTransaction) override; - void shardObserveTransactionPrepareOrUnpreparedCommit( - OperationContext* opCtx, - const std::vector& stmts, - const repl::OpTime& prepareOrCommitOptime) override; - void shardObserveNonPrimaryTransactionPrepare( - OperationContext* opCtx, - const std::vector& stmts, - const repl::OpTime& prepareOrCommitOptime) override; -}; - -} // namespace mongo diff --git a/src/mongo/db/s/op_observer_sharding_test.cpp b/src/mongo/db/s/op_observer_sharding_test.cpp index 65754edcd7199..84b885216d762 100644 --- a/src/mongo/db/s/op_observer_sharding_test.cpp +++ b/src/mongo/db/s/op_observer_sharding_test.cpp @@ -27,28 +27,72 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/op_observer_util.h" #include "mongo/db/op_observer/oplog_writer_impl.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/database_sharding_state.h" -#include "mongo/db/s/op_observer_sharding_impl.h" +#include "mongo/db/s/migration_chunk_cloner_source_op_observer.h" +#include "mongo/db/s/migration_source_manager.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/shard_server_test_fixture.h" -#include "mongo/db/s/type_shard_identity.h" -#include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { const NamespaceString kTestNss = NamespaceString::createNamespaceString_forTest("TestDB", "TestColl"); -const NamespaceString kUnshardedNss("TestDB", "UnshardedColl"); +const NamespaceString kUnshardedNss = + NamespaceString::createNamespaceString_forTest("TestDB", "UnshardedColl"); void setCollectionFilteringMetadata(OperationContext* opCtx, CollectionMetadata metadata) { AutoGetCollection autoColl(opCtx, kTestNss, MODE_X); @@ -70,8 +114,9 @@ class DocumentKeyStateTest : public ShardServerTestFixture { auto db = databaseHolder->openDb(operationContext(), kTestNss.dbName(), &justCreated); auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive( operationContext(), kTestNss.dbName()); - scopedDss->setDbInfo(operationContext(), - DatabaseType{kTestNss.dbName().db(), ShardId("this"), dbVersion1}); + scopedDss->setDbInfo( + operationContext(), + DatabaseType{kTestNss.dbName().toString_forTest(), ShardId("this"), dbVersion1}); ASSERT_TRUE(db); ASSERT_TRUE(justCreated); @@ -137,10 +182,10 @@ TEST_F(DocumentKeyStateTest, MakeDocumentKeyStateUnsharded) { << "key2" << true); // Check that an order for deletion from an unsharded collection extracts just the "_id" field - ASSERT_BSONOBJ_EQ(repl::getDocumentKey(operationContext(), *autoColl, doc).getShardKeyAndId(), + ASSERT_BSONOBJ_EQ(getDocumentKey(*autoColl, doc).getShardKeyAndId(), BSON("_id" << "hello")); - ASSERT_FALSE(OpObserverShardingImpl::isMigrating(operationContext(), kTestNss, doc)); + ASSERT_FALSE(MigrationSourceManager::isMigrating(operationContext(), kTestNss, doc)); } TEST_F(DocumentKeyStateTest, MakeDocumentKeyStateShardedWithoutIdInShardKey) { @@ -164,12 +209,12 @@ TEST_F(DocumentKeyStateTest, MakeDocumentKeyStateShardedWithoutIdInShardKey) { << "key2" << true); // Verify the shard key is extracted, in correct order, followed by the "_id" field. - ASSERT_BSONOBJ_EQ(repl::getDocumentKey(operationContext(), *autoColl, doc).getShardKeyAndId(), + ASSERT_BSONOBJ_EQ(getDocumentKey(*autoColl, doc).getShardKeyAndId(), BSON("key" << 100 << "key3" << "abc" << "_id" << "hello")); - ASSERT_FALSE(OpObserverShardingImpl::isMigrating(operationContext(), kTestNss, doc)); + ASSERT_FALSE(MigrationSourceManager::isMigrating(operationContext(), kTestNss, doc)); } TEST_F(DocumentKeyStateTest, MakeDocumentKeyStateShardedWithIdInShardKey) { @@ -193,11 +238,11 @@ TEST_F(DocumentKeyStateTest, MakeDocumentKeyStateShardedWithIdInShardKey) { << "key" << 100); // Verify the shard key is extracted with "_id" in the right place. - ASSERT_BSONOBJ_EQ(repl::getDocumentKey(operationContext(), *autoColl, doc).getShardKeyAndId(), + ASSERT_BSONOBJ_EQ(getDocumentKey(*autoColl, doc).getShardKeyAndId(), BSON("key" << 100 << "_id" << "hello" << "key2" << true)); - ASSERT_FALSE(OpObserverShardingImpl::isMigrating(operationContext(), kTestNss, doc)); + ASSERT_FALSE(MigrationSourceManager::isMigrating(operationContext(), kTestNss, doc)); } TEST_F(DocumentKeyStateTest, MakeDocumentKeyStateShardedWithIdHashInShardKey) { @@ -219,16 +264,16 @@ TEST_F(DocumentKeyStateTest, MakeDocumentKeyStateShardedWithIdHashInShardKey) { << "key" << 100); // Verify the shard key is extracted with "_id" in the right place, not hashed. - ASSERT_BSONOBJ_EQ(repl::getDocumentKey(operationContext(), *autoColl, doc).getShardKeyAndId(), + ASSERT_BSONOBJ_EQ(getDocumentKey(*autoColl, doc).getShardKeyAndId(), BSON("_id" << "hello")); - ASSERT_FALSE(OpObserverShardingImpl::isMigrating(operationContext(), kTestNss, doc)); + ASSERT_FALSE(MigrationSourceManager::isMigrating(operationContext(), kTestNss, doc)); } TEST_F(DocumentKeyStateTest, CheckDBVersion) { OpObserverRegistry opObserver; - opObserver.addObserver( - std::make_unique(std::make_unique())); + opObserver.addObserver(std::make_unique(std::make_unique())); + opObserver.addObserver(std::make_unique()); OperationContext* opCtx = operationContext(); AutoGetCollection autoColl(opCtx, kUnshardedNss, MODE_IX); @@ -265,8 +310,9 @@ TEST_F(DocumentKeyStateTest, CheckDBVersion) { opObserver.onUpdate(opCtx, update); }; auto onDelete = [&]() { - opObserver.aboutToDelete(opCtx, *autoColl, BSON("_id" << 0)); - opObserver.onDelete(opCtx, *autoColl, kUninitializedStmtId, {}); + OplogDeleteEntryArgs args; + opObserver.aboutToDelete(opCtx, *autoColl, BSON("_id" << 0), &args); + opObserver.onDelete(opCtx, *autoColl, kUninitializedStmtId, args); }; // Using the latest dbVersion works diff --git a/src/mongo/db/s/operation_sharding_state.cpp b/src/mongo/db/s/operation_sharding_state.cpp index de05b91e4d898..a04ebe8f410e5 100644 --- a/src/mongo/db/s/operation_sharding_state.cpp +++ b/src/mongo/db/s/operation_sharding_state.cpp @@ -29,7 +29,30 @@ #include "mongo/db/s/operation_sharding_state.h" +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/s/sharding_api_d_params_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -61,12 +84,14 @@ void OperationShardingState::setShardRole(OperationContext* opCtx, auto& oss = OperationShardingState::get(opCtx); if (shardVersion) { - auto emplaceResult = oss._shardVersions.try_emplace(nss.ns(), *shardVersion); + auto emplaceResult = + oss._shardVersions.try_emplace(NamespaceStringUtil::serialize(nss), *shardVersion); auto& tracker = emplaceResult.first->second; if (!emplaceResult.second) { uassert(640570, str::stream() << "Illegal attempt to change the expected shard version for " - << nss << " from " << tracker.v << " to " << *shardVersion, + << nss.toStringForErrorMsg() << " from " << tracker.v << " to " + << *shardVersion, tracker.v == *shardVersion); } invariant(++tracker.recursion > 0); @@ -90,7 +115,7 @@ void OperationShardingState::unsetShardRoleForLegacyDDLOperationsSentWithShardVe OperationContext* opCtx, const NamespaceString& nss) { auto& oss = OperationShardingState::get(opCtx); - auto it = oss._shardVersions.find(nss.ns()); + auto it = oss._shardVersions.find(NamespaceStringUtil::serialize(nss)); if (it != oss._shardVersions.end()) { auto& tracker = it->second; tassert(6848500, @@ -103,15 +128,16 @@ void OperationShardingState::unsetShardRoleForLegacyDDLOperationsSentWithShardVe } boost::optional OperationShardingState::getShardVersion(const NamespaceString& nss) { - const auto it = _shardVersions.find(nss.ns()); + const auto it = _shardVersions.find(NamespaceStringUtil::serialize(nss)); if (it != _shardVersions.end()) { return it->second.v; } return boost::none; } -boost::optional OperationShardingState::getDbVersion(StringData dbName) const { - const auto it = _databaseVersions.find(dbName); +boost::optional OperationShardingState::getDbVersion( + const DatabaseName& dbName) const { + const auto it = _databaseVersions.find(DatabaseNameUtil::serialize(dbName)); if (it != _databaseVersions.end()) { return it->second.v; } @@ -168,17 +194,19 @@ using ScopedAllowImplicitCollectionCreate_UNSAFE = OperationShardingState::ScopedAllowImplicitCollectionCreate_UNSAFE; ScopedAllowImplicitCollectionCreate_UNSAFE::ScopedAllowImplicitCollectionCreate_UNSAFE( - OperationContext* opCtx) + OperationContext* opCtx, bool forceCSRAsUnknownAfterCollectionCreation) : _opCtx(opCtx) { auto& oss = get(_opCtx); invariant(!oss._allowCollectionCreation); oss._allowCollectionCreation = true; + oss._forceCSRAsUnknownAfterCollectionCreation = forceCSRAsUnknownAfterCollectionCreation; } ScopedAllowImplicitCollectionCreate_UNSAFE::~ScopedAllowImplicitCollectionCreate_UNSAFE() { auto& oss = get(_opCtx); invariant(oss._allowCollectionCreation); oss._allowCollectionCreation = false; + oss._forceCSRAsUnknownAfterCollectionCreation = false; } ScopedSetShardRole::ScopedSetShardRole(OperationContext* opCtx, @@ -207,7 +235,7 @@ ScopedSetShardRole::~ScopedSetShardRole() { auto& oss = OperationShardingState::get(_opCtx); if (_shardVersion) { - auto it = oss._shardVersions.find(_nss.ns()); + auto it = oss._shardVersions.find(NamespaceStringUtil::serialize(_nss)); invariant(it != oss._shardVersions.end()); auto& tracker = it->second; invariant(--tracker.recursion >= 0); diff --git a/src/mongo/db/s/operation_sharding_state.h b/src/mongo/db/s/operation_sharding_state.h index baf7fc622148f..abbac9f9f63c2 100644 --- a/src/mongo/db/s/operation_sharding_state.h +++ b/src/mongo/db/s/operation_sharding_state.h @@ -29,8 +29,12 @@ #pragma once +#include #include +#include +#include "mongo/base/status.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/s/database_version.h" @@ -96,11 +100,17 @@ class OperationShardingState { * * Instantiating this object on the stack indicates to the storage execution subsystem that it * is allowed to create any collection in this context and that the caller will be responsible - * for notifying the shard Sharding sybsystem of the collection creation. + * for notifying the shard Sharding subsystem of the collection creation. Note that in most of + * cases the CollectionShardingRuntime associated to that nss will be set as UNSHARDED. However, + * there are some scenarios in which it is required to set is as UNKNOWN: that's the reason why + * the constructor has the 'forceCSRAsUnknownAfterCollectionCreation' parameter. You can find + * more information about how the CSR is modified in ShardServerOpObserver::onCreateCollection. */ class ScopedAllowImplicitCollectionCreate_UNSAFE { public: - ScopedAllowImplicitCollectionCreate_UNSAFE(OperationContext* opCtx); + /* Please read the comment associated to this class */ + ScopedAllowImplicitCollectionCreate_UNSAFE( + OperationContext* opCtx, bool forceCSRAsUnknownAfterCollectionCreation = false); ~ScopedAllowImplicitCollectionCreate_UNSAFE(); private: @@ -135,7 +145,7 @@ class OperationShardingState { * If 'db' matches the 'db' in the namespace the client sent versions for, returns the database * version sent by the client (if any), else returns boost::none. */ - boost::optional getDbVersion(StringData dbName) const; + boost::optional getDbVersion(const DatabaseName& dbName) const; /** * This method implements a best-effort attempt to wait for the critical section to complete @@ -170,6 +180,9 @@ class OperationShardingState { // Specifies whether the request is allowed to create database/collection implicitly bool _allowCollectionCreation{false}; + // Specifies whether the CollectionShardingRuntime should be set as unknown after collection + // creation + bool _forceCSRAsUnknownAfterCollectionCreation{false}; // Stores the shard version expected for each collection that will be accessed struct ShardVersionTracker { diff --git a/src/mongo/db/s/operation_sharding_state_test.cpp b/src/mongo/db/s/operation_sharding_state_test.cpp index 7c94421d0fcbe..914ba20347243 100644 --- a/src/mongo/db/s/operation_sharding_state_test.cpp +++ b/src/mongo/db/s/operation_sharding_state_test.cpp @@ -28,14 +28,31 @@ */ #include "mongo/db/s/operation_sharding_state.h" + +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/index_version.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { -const NamespaceString kNss("TestDB", "TestColl"); -const NamespaceString kAnotherNss("TestDB", "AnotherColl"); +const NamespaceString kNss = NamespaceString::createNamespaceString_forTest("TestDB", "TestColl"); +const NamespaceString kAnotherNss = + NamespaceString::createNamespaceString_forTest("TestDB", "AnotherColl"); using OperationShardingStateTest = ShardServerTestFixture; @@ -44,7 +61,7 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleDbVersion) { ScopedSetShardRole scopedSetShardRole(operationContext(), kNss, boost::none, dbv); auto& oss = OperationShardingState::get(operationContext()); - ASSERT_EQ(dbv, *oss.getDbVersion(kNss.db())); + ASSERT_EQ(dbv, *oss.getDbVersion(kNss.dbName())); } TEST_F(OperationShardingStateTest, ScopedSetShardRoleShardVersion) { @@ -100,7 +117,7 @@ TEST_F(OperationShardingStateTest, ScopedSetShardRoleIgnoresFixedDbVersion) { ScopedSetShardRole scopedSetShardRole(operationContext(), kNss, boost::none, dbv); auto& oss = OperationShardingState::get(operationContext()); - ASSERT_FALSE(oss.getDbVersion(kNss.db())); + ASSERT_FALSE(oss.getDbVersion(kNss.dbName())); } TEST_F(OperationShardingStateTest, ScopedSetShardRoleAllowedShardVersionsWithFixedDbVersion) { diff --git a/src/mongo/db/s/participant_block.idl b/src/mongo/db/s/participant_block.idl index 0ccf831d17735..0517e905d357b 100644 --- a/src/mongo/db/s/participant_block.idl +++ b/src/mongo/db/s/participant_block.idl @@ -55,7 +55,4 @@ commands: reason: type: object optional: true - allowViews: - # WARNING: This flag can be used only by coordinators running exclusively in FCV >= 7.0 - # TODO SERVER-68084: remove this flag - type: optionalBool \ No newline at end of file + reply_type: OkReply diff --git a/src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp b/src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp index fb5b113216daf..a06cc45da4245 100644 --- a/src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp +++ b/src/mongo/db/s/periodic_sharded_index_consistency_checker.cpp @@ -28,20 +28,47 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/periodic_sharded_index_consistency_checker.h" - +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/auth/privilege.h" +#include "mongo/db/client.h" #include "mongo/db/curop.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/config/sharding_catalog_manager.h" +#include "mongo/db/s/periodic_sharded_index_consistency_checker.h" #include "mongo/db/s/sharding_runtime_d_params_gen.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_aggregate.h" #include "mongo/s/stale_shard_version_helpers.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -162,7 +189,8 @@ void PeriodicShardedIndexConsistencyChecker::_launchShardedIndexConsistencyCheck // Stop counting if the agg command failed for one of the collections // to avoid recording a false count. - uassertStatusOKWithContext(status, str::stream() << "nss " << nss); + uassertStatusOKWithContext( + status, str::stream() << "nss " << nss.toStringForErrorMsg()); if (!responseBuilder.obj()["cursor"]["firstBatch"].Array().empty()) { numShardedCollsWithInconsistentIndexes++; @@ -194,7 +222,9 @@ void PeriodicShardedIndexConsistencyChecker::_launchShardedIndexConsistencyCheck "error"_attr = ex.toStatus()); } }, - Milliseconds(shardedIndexConsistencyCheckIntervalMS)); + Milliseconds(shardedIndexConsistencyCheckIntervalMS), + // TODO(SERVER-74658): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/); _shardedIndexConsistencyChecker = periodicRunner->makeJob(std::move(job)); _shardedIndexConsistencyChecker.start(); } diff --git a/src/mongo/db/s/periodic_sharded_index_consistency_checker.h b/src/mongo/db/s/periodic_sharded_index_consistency_checker.h index 5d15f1b7d80f3..6c9ac5675a0a9 100644 --- a/src/mongo/db/s/periodic_sharded_index_consistency_checker.h +++ b/src/mongo/db/s/periodic_sharded_index_consistency_checker.h @@ -29,7 +29,11 @@ #pragma once +#include + +#include "mongo/platform/mutex.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/hierarchical_acquisition.h" #include "mongo/util/periodic_runner.h" namespace mongo { diff --git a/src/mongo/db/s/persistent_task_queue.h b/src/mongo/db/s/persistent_task_queue.h index 2bd70140bc6f1..688aae6afafb9 100644 --- a/src/mongo/db/s/persistent_task_queue.h +++ b/src/mongo/db/s/persistent_task_queue.h @@ -29,10 +29,34 @@ #pragma once +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/query/find_command.h" +#include "mongo/idl/idl_parser.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { @@ -105,7 +129,8 @@ class PersistentTaskQueue final : public BlockingTaskQueue { template PersistentTaskQueue::PersistentTaskQueue(OperationContext* opCtx, NamespaceString storageNss) - : _storageNss(std::move(storageNss)), _mutex("persistentQueueLock:" + _storageNss.toString()) { + : _storageNss(std::move(storageNss)), + _mutex("persistentQueueLock:" + NamespaceStringUtil::serialize(_storageNss)) { DBDirectClient client(opCtx); @@ -224,7 +249,8 @@ PersistentTaskQueue::_loadNextRecord(DBDirectClient& client) { if (!bson.isEmpty()) { result = typename PersistentTaskQueue::Record{ bson.getField("_id").Long(), - T::parse(IDLParserContext("PersistentTaskQueue:" + _storageNss.toString()), + T::parse(IDLParserContext("PersistentTaskQueue:" + + NamespaceStringUtil::serialize(_storageNss)), bson.getObjectField("task"))}; } diff --git a/src/mongo/db/s/persistent_task_queue_test.cpp b/src/mongo/db/s/persistent_task_queue_test.cpp index d9dd6e3562c3d..5665029e67b52 100644 --- a/src/mongo/db/s/persistent_task_queue_test.cpp +++ b/src/mongo/db/s/persistent_task_queue_test.cpp @@ -27,13 +27,27 @@ * it in the license file. */ +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include + #include "mongo/bson/bsonobj.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/persistent_task_queue.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/db/service_context.h" +#include "mongo/stdx/future.h" #include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -288,11 +302,6 @@ TEST_F(PersistentTaskQueueTest, TestKilledOperationContextWhileWaitingOnCV) { auto result = stdx::async(stdx::launch::async, [this, &q, &barrier] { ThreadClient tc("TestKilledOperationContextWhileWaitingOnCV", getServiceContext()); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } - auto opCtx = tc->makeOperationContext(); barrier.countDownAndWait(); diff --git a/src/mongo/db/s/placement_history_bm.cpp b/src/mongo/db/s/placement_history_bm.cpp index e25f54cc60751..af6e564102990 100644 --- a/src/mongo/db/s/placement_history_bm.cpp +++ b/src/mongo/db/s/placement_history_bm.cpp @@ -29,22 +29,45 @@ #include - -#include "mongo/base/init.h" +#include +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/wait_for_majority_service.h" -#include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/config_server_op_observer.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/server_options.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" #include "mongo/db/vector_clock.h" #include "mongo/idl/server_parameter_test_util.h" -#include +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -54,7 +77,6 @@ namespace { // _doTest has empty implementation to honor the abstract class, but it is not used in the benchmark // framework. class BenchmarkConfigServerTestFixture : public ConfigServerTestFixture { - public: BenchmarkConfigServerTestFixture() : ConfigServerTestFixture() { ConfigServerTestFixture::setUp(); @@ -174,11 +196,7 @@ class BenchmarkConfigServerTestFixture : public ConfigServerTestFixture { } // namespace void BM_initPlacementHistory(benchmark::State& state) { - - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; - - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; BenchmarkConfigServerTestFixture fixture; @@ -192,7 +210,8 @@ void BM_initPlacementHistory(benchmark::State& state) { for (int i = 1; i <= nCollections; i++) { const std::string collName = "coll" + std::to_string(i); - fixture.setupCollectionWithChunks(NamespaceString("db1." + collName), nChunks); + fixture.setupCollectionWithChunks( + NamespaceString::createNamespaceString_forTest("db1." + collName), nChunks); } for (auto _ : state) { diff --git a/src/mongo/db/s/query_analysis_coordinator.cpp b/src/mongo/db/s/query_analysis_coordinator.cpp index 406d5a26897e0..3f1f2285f17a1 100644 --- a/src/mongo/db/s/query_analysis_coordinator.cpp +++ b/src/mongo/db/s/query_analysis_coordinator.cpp @@ -27,15 +27,43 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/query_analysis_coordinator.h" - -#include "mongo/db/catalog_shard_feature_flag_gen.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/s/query_analysis_coordinator.h" +#include "mongo/db/server_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/analyze_shard_key_role.h" #include "mongo/s/analyze_shard_key_server_parameters_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -45,7 +73,7 @@ namespace analyze_shard_key { namespace { MONGO_FAIL_POINT_DEFINE(disableQueryAnalysisCoordinator); -MONGO_FAIL_POINT_DEFINE(queryAnalysisCoordinatorDistributeSampleRateEqually); +MONGO_FAIL_POINT_DEFINE(queryAnalysisCoordinatorDistributeSamplesPerSecondEqually); const auto getQueryAnalysisCoordinator = ServiceContext::declareDecoration(); @@ -65,43 +93,42 @@ QueryAnalysisCoordinator* QueryAnalysisCoordinator::get(ServiceContext* serviceC } bool QueryAnalysisCoordinator::shouldRegisterReplicaSetAwareService() const { - // This is invoked when the Register above is constructed which is before FCV is set so we need - // to ignore FCV when checking if the feature flag is enabled. - return supportsCoordinatingQueryAnalysis(true /* isReplEnabled */, true /* ignoreFCV */); + return supportsCoordinatingQueryAnalysis(true /* isReplEnabled */); } void QueryAnalysisCoordinator::onConfigurationInsert(const QueryAnalyzerDocument& doc) { stdx::lock_guard lk(_mutex); LOGV2(7372308, "Detected new query analyzer configuration", "configuration"_attr = doc); - if (doc.getMode() == QueryAnalyzerModeEnum::kOff) { // Do not create an entry for it if the mode is "off". return; } - auto configuration = CollectionQueryAnalyzerConfiguration{ - doc.getNs(), doc.getCollectionUuid(), *doc.getSampleRate(), doc.getStartTime()}; - - _configurations.emplace(doc.getCollectionUuid(), std::move(configuration)); + doc.getNs(), doc.getCollectionUuid(), *doc.getSamplesPerSecond(), doc.getStartTime()}; + _configurations.emplace(doc.getNs(), std::move(configuration)); } void QueryAnalysisCoordinator::onConfigurationUpdate(const QueryAnalyzerDocument& doc) { stdx::lock_guard lk(_mutex); LOGV2(7372309, "Detected a query analyzer configuration update", "configuration"_attr = doc); - if (doc.getMode() == QueryAnalyzerModeEnum::kOff) { // Remove the entry for it if the mode has been set to "off". - _configurations.erase(doc.getCollectionUuid()); + _configurations.erase(doc.getNs()); } else { - auto it = _configurations.find(doc.getCollectionUuid()); + auto it = _configurations.find(doc.getNs()); if (it == _configurations.end()) { - auto configuration = CollectionQueryAnalyzerConfiguration{ - doc.getNs(), doc.getCollectionUuid(), *doc.getSampleRate(), doc.getStartTime()}; - _configurations.emplace(doc.getCollectionUuid(), std::move(configuration)); + auto configuration = CollectionQueryAnalyzerConfiguration{doc.getNs(), + doc.getCollectionUuid(), + *doc.getSamplesPerSecond(), + doc.getStartTime()}; + _configurations.emplace(doc.getNs(), std::move(configuration)); } else { - it->second.setSampleRate(*doc.getSampleRate()); + it->second.setSamplesPerSecond(*doc.getSamplesPerSecond()); + it->second.setStartTime(doc.getStartTime()); + // The collection could have been dropped and recreated. + it->second.setCollectionUuid(doc.getCollectionUuid()); } } } @@ -110,8 +137,7 @@ void QueryAnalysisCoordinator::onConfigurationDelete(const QueryAnalyzerDocument stdx::lock_guard lk(_mutex); LOGV2(7372310, "Detected a query analyzer configuration delete", "configuration"_attr = doc); - - _configurations.erase(doc.getCollectionUuid()); + _configurations.erase(doc.getNs()); } Date_t QueryAnalysisCoordinator::_getMinLastPingTime() { @@ -183,10 +209,11 @@ void QueryAnalysisCoordinator::onStartup(OperationContext* opCtx) { auto doc = QueryAnalyzerDocument::parse(IDLParserContext("QueryAnalysisCoordinator"), cursor->next()); invariant(doc.getMode() != QueryAnalyzerModeEnum::kOff); - auto configuration = CollectionQueryAnalyzerConfiguration{ - doc.getNs(), doc.getCollectionUuid(), *doc.getSampleRate(), doc.getStartTime()}; - auto [_, inserted] = - _configurations.emplace(doc.getCollectionUuid(), std::move(configuration)); + auto configuration = CollectionQueryAnalyzerConfiguration{doc.getNs(), + doc.getCollectionUuid(), + *doc.getSamplesPerSecond(), + doc.getStartTime()}; + auto [_, inserted] = _configurations.emplace(doc.getNs(), std::move(configuration)); invariant(inserted); } } @@ -284,9 +311,9 @@ QueryAnalysisCoordinator::getNewConfigurationsForSampler(OperationContext* opCtx // If the coordinator doesn't yet have a full view of the query distribution or no samplers // have executed any queries, each sampler gets an equal ratio of the sample rates. Otherwise, // the ratio is weighted based on the query distribution across samplers. - double sampleRateRatio = + double samplesPerSecRatio = ((numWeights < numActiveSamplers) || (totalWeight == 0) || - MONGO_unlikely(queryAnalysisCoordinatorDistributeSampleRateEqually.shouldFail())) + MONGO_unlikely(queryAnalysisCoordinatorDistributeSamplesPerSecondEqually.shouldFail())) ? (1.0 / numActiveSamplers) : (weight / totalWeight); @@ -295,7 +322,7 @@ QueryAnalysisCoordinator::getNewConfigurationsForSampler(OperationContext* opCtx for (const auto& [_, configuration] : _configurations) { configurations.emplace_back(configuration.getNs(), configuration.getCollectionUuid(), - sampleRateRatio * configuration.getSampleRate(), + samplesPerSecRatio * configuration.getSamplesPerSecond(), configuration.getStartTime()); } return configurations; diff --git a/src/mongo/db/s/query_analysis_coordinator.h b/src/mongo/db/s/query_analysis_coordinator.h index 911e00c3f0c41..f0771a779fb34 100644 --- a/src/mongo/db/s/query_analysis_coordinator.h +++ b/src/mongo/db/s/query_analysis_coordinator.h @@ -29,14 +29,26 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/replica_set_aware_service.h" #include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" #include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/analyze_shard_key_documents_gen.h" #include "mongo/s/analyze_shard_key_role.h" #include "mongo/s/catalog/type_mongos.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/util/periodic_runner.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" namespace mongo { namespace analyze_shard_key { @@ -53,7 +65,7 @@ namespace analyze_shard_key { class QueryAnalysisCoordinator : public ReplicaSetAwareService { public: using CollectionQueryAnalyzerConfigurationMap = - stdx::unordered_map; + stdx::unordered_map; /** * Stores the last ping time and the last exponential moving average number of queries executed * per second for a sampler. diff --git a/src/mongo/db/s/query_analysis_coordinator_test.cpp b/src/mongo/db/s/query_analysis_coordinator_test.cpp index 2b03b7e78a22e..14278e5fac8a1 100644 --- a/src/mongo/db/s/query_analysis_coordinator_test.cpp +++ b/src/mongo/db/s/query_analysis_coordinator_test.cpp @@ -29,17 +29,32 @@ #include "mongo/db/s/query_analysis_coordinator.h" +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/s/config/config_server_test_fixture.h" -#include "mongo/db/s/query_analysis_op_observer.h" +#include "mongo/db/s/query_analysis_op_observer_configsvr.h" +#include "mongo/db/service_context_d_test_fixture.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" #include "mongo/s/analyze_shard_key_documents_gen.h" #include "mongo/s/catalog/type_mongos.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" +#include "mongo/util/uuid.h" #include "mongo/util/version.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -56,7 +71,7 @@ class QueryAnalysisCoordinatorTest : public ConfigServerTestFixture { ConfigServerTestFixture::setupOpObservers(); auto opObserverRegistry = checked_cast(getServiceContext()->getOpObserver()); - opObserverRegistry->addObserver(std::make_unique()); + opObserverRegistry->addObserver(std::make_unique()); } protected: @@ -72,7 +87,7 @@ class QueryAnalysisCoordinatorTest : public ConfigServerTestFixture { const NamespaceString& nss, const UUID& collUuid, QueryAnalyzerModeEnum mode, - boost::optional sampleRate = boost::none, + boost::optional samplesPerSec = boost::none, boost::optional startTime = boost::none, boost::optional stopTime = boost::none) { QueryAnalyzerDocument doc; @@ -80,7 +95,7 @@ class QueryAnalysisCoordinatorTest : public ConfigServerTestFixture { doc.setCollectionUuid(collUuid); QueryAnalyzerConfiguration configuration; configuration.setMode(mode); - configuration.setSampleRate(sampleRate); + configuration.setSamplesPerSecond(samplesPerSec); doc.setConfiguration(configuration); doc.setStartTime(startTime ? *startTime : now()); if (mode == QueryAnalyzerModeEnum::kOff) { @@ -92,12 +107,12 @@ class QueryAnalysisCoordinatorTest : public ConfigServerTestFixture { void assertContainsConfiguration( const QueryAnalysisCoordinator::CollectionQueryAnalyzerConfigurationMap& configurations, QueryAnalyzerDocument analyzerDoc) { - auto it = configurations.find(analyzerDoc.getCollectionUuid()); + auto it = configurations.find(analyzerDoc.getNs()); ASSERT(it != configurations.end()); auto& configuration = it->second; ASSERT_EQ(configuration.getNs(), analyzerDoc.getNs()); ASSERT_EQ(configuration.getCollectionUuid(), analyzerDoc.getCollectionUuid()); - ASSERT_EQ(configuration.getSampleRate(), *analyzerDoc.getSampleRate()); + ASSERT_EQ(configuration.getSamplesPerSecond(), *analyzerDoc.getSamplesPerSecond()); ASSERT_EQ(configuration.getStartTime(), analyzerDoc.getStartTime()); } @@ -105,12 +120,12 @@ class QueryAnalysisCoordinatorTest : public ConfigServerTestFixture { std::vector& configurations, const NamespaceString& nss, const UUID& collUuid, - double sampleRate, + double samplesPerSec, Date_t startTime) { for (const auto& configuration : configurations) { if (configuration.getNs() == nss) { ASSERT_EQ(configuration.getCollectionUuid(), collUuid); - ASSERT_EQ(configuration.getSampleRate(), sampleRate); + ASSERT_EQ(configuration.getSamplesPerSecond(), samplesPerSec); ASSERT_EQ(configuration.getStartTime(), startTime); return; } @@ -173,7 +188,6 @@ class QueryAnalysisCoordinatorTest : public ConfigServerTestFixture { private: const std::shared_ptr _mockClock = std::make_shared(); - RAIIServerParameterControllerForTest _featureFlagController{"featureFlagAnalyzeShardKey", true}; RAIIServerParameterControllerForTest _inactiveThresholdController{ "queryAnalysisSamplerInActiveThresholdSecs", inActiveThresholdSecs}; }; @@ -211,7 +225,7 @@ TEST_F(QueryAnalysisCoordinatorTest, CreateConfigurationsOnInsert) { assertContainsConfiguration(configurations, analyzerDoc1); } -TEST_F(QueryAnalysisCoordinatorTest, UpdateConfigurationsOnSampleRateUpdate) { +TEST_F(QueryAnalysisCoordinatorTest, UpdateConfigurationsSameCollectionUUid) { auto coordinator = QueryAnalysisCoordinator::get(operationContext()); // There are no configurations initially. @@ -219,7 +233,7 @@ TEST_F(QueryAnalysisCoordinatorTest, UpdateConfigurationsOnSampleRateUpdate) { ASSERT(configurations.empty()); auto analyzerDocPreUpdate = - makeConfigQueryAnalyzersDocument(nss0, collUuid0, QueryAnalyzerModeEnum::kFull, 0.5); + makeConfigQueryAnalyzersDocument(nss0, collUuid0, QueryAnalyzerModeEnum::kFull, 0.5, now()); uassertStatusOK(insertToConfigCollection(operationContext(), NamespaceString::kConfigQueryAnalyzersNamespace, analyzerDocPreUpdate.toBSON())); @@ -228,13 +242,16 @@ TEST_F(QueryAnalysisCoordinatorTest, UpdateConfigurationsOnSampleRateUpdate) { ASSERT_EQ(configurations.size(), 1U); assertContainsConfiguration(configurations, analyzerDocPreUpdate); + advanceTime(Seconds(1)); + auto analyzerDocPostUpdate = - makeConfigQueryAnalyzersDocument(nss0, collUuid0, QueryAnalyzerModeEnum::kFull, 1.5); - uassertStatusOK(updateToConfigCollection(operationContext(), - NamespaceString::kConfigQueryAnalyzersNamespace, - BSON("_id" << collUuid0), - analyzerDocPostUpdate.toBSON(), - false /* upsert */)); + makeConfigQueryAnalyzersDocument(nss0, collUuid0, QueryAnalyzerModeEnum::kFull, 1.5, now()); + uassertStatusOK(updateToConfigCollection( + operationContext(), + NamespaceString::kConfigQueryAnalyzersNamespace, + BSON(QueryAnalyzerDocument::kNsFieldName << nss0.toString_forTest()), + analyzerDocPostUpdate.toBSON(), + false /* upsert */)); // The update should cause the configuration to have the new sample rate. configurations = coordinator->getConfigurationsForTest(); @@ -242,7 +259,42 @@ TEST_F(QueryAnalysisCoordinatorTest, UpdateConfigurationsOnSampleRateUpdate) { assertContainsConfiguration(configurations, analyzerDocPostUpdate); } -TEST_F(QueryAnalysisCoordinatorTest, UpdateOrRemoveConfigurationsOnModeUpdate) { +TEST_F(QueryAnalysisCoordinatorTest, UpdateConfigurationDifferentCollectionUUid) { + auto coordinator = QueryAnalysisCoordinator::get(operationContext()); + + // There are no configurations initially. + auto configurations = coordinator->getConfigurationsForTest(); + ASSERT(configurations.empty()); + + auto analyzerDocPreUpdate = + makeConfigQueryAnalyzersDocument(nss0, collUuid0, QueryAnalyzerModeEnum::kFull, 0.5, now()); + uassertStatusOK(insertToConfigCollection(operationContext(), + NamespaceString::kConfigQueryAnalyzersNamespace, + analyzerDocPreUpdate.toBSON())); + + configurations = coordinator->getConfigurationsForTest(); + ASSERT_EQ(configurations.size(), 1U); + assertContainsConfiguration(configurations, analyzerDocPreUpdate); + + advanceTime(Seconds(1)); + + auto analyzerDocPostUpdate = makeConfigQueryAnalyzersDocument( + nss0, UUID::gen(), QueryAnalyzerModeEnum::kFull, 1.5, now()); + uassertStatusOK(updateToConfigCollection( + operationContext(), + NamespaceString::kConfigQueryAnalyzersNamespace, + BSON(QueryAnalyzerDocument::kNsFieldName << nss0.toString_forTest()), + analyzerDocPostUpdate.toBSON(), + false /* upsert */)); + + // The update should cause the configuration to have the new collection uuid, sample rate and + // start time. + configurations = coordinator->getConfigurationsForTest(); + ASSERT_EQ(configurations.size(), 1U); + assertContainsConfiguration(configurations, analyzerDocPostUpdate); +} + +TEST_F(QueryAnalysisCoordinatorTest, RemoveOrCreateConfigurationsOnModeUpdate) { auto coordinator = QueryAnalysisCoordinator::get(operationContext()); // There are no configurations initially. @@ -261,11 +313,12 @@ TEST_F(QueryAnalysisCoordinatorTest, UpdateOrRemoveConfigurationsOnModeUpdate) { auto analyzerDocPostUpdate0 = makeConfigQueryAnalyzersDocument(nss0, collUuid0, QueryAnalyzerModeEnum::kOff); - uassertStatusOK(updateToConfigCollection(operationContext(), - NamespaceString::kConfigQueryAnalyzersNamespace, - BSON("_id" << collUuid0), - analyzerDocPostUpdate0.toBSON(), - false /* upsert */)); + uassertStatusOK(updateToConfigCollection( + operationContext(), + NamespaceString::kConfigQueryAnalyzersNamespace, + BSON(QueryAnalyzerDocument::kNsFieldName << nss0.toString_forTest()), + analyzerDocPostUpdate0.toBSON(), + false /* upsert */)); // The update to mode "off" should cause the configuration to get removed. configurations = coordinator->getConfigurationsForTest(); @@ -273,11 +326,12 @@ TEST_F(QueryAnalysisCoordinatorTest, UpdateOrRemoveConfigurationsOnModeUpdate) { auto analyzerDocPostUpdate1 = makeConfigQueryAnalyzersDocument(nss0, collUuid0, QueryAnalyzerModeEnum::kFull, 15); - uassertStatusOK(updateToConfigCollection(operationContext(), - NamespaceString::kConfigQueryAnalyzersNamespace, - BSON("_id" << collUuid0), - analyzerDocPostUpdate1.toBSON(), - false /* upsert */)); + uassertStatusOK(updateToConfigCollection( + operationContext(), + NamespaceString::kConfigQueryAnalyzersNamespace, + BSON(QueryAnalyzerDocument::kNsFieldName << nss0.toString_forTest()), + analyzerDocPostUpdate1.toBSON(), + false /* upsert */)); // The update to mode "on" should cause the configuration to get recreated. configurations = coordinator->getConfigurationsForTest(); @@ -632,12 +686,12 @@ TEST_F(QueryAnalysisCoordinatorTest, GetNewConfigurationsOneSamplerBasic) { assertContainsConfiguration(configurations, analyzerDoc0.getNs(), analyzerDoc0.getCollectionUuid(), - *analyzerDoc0.getSampleRate(), + *analyzerDoc0.getSamplesPerSecond(), startTime0); assertContainsConfiguration(configurations, analyzerDoc1.getNs(), analyzerDoc1.getCollectionUuid(), - *analyzerDoc1.getSampleRate(), + *analyzerDoc1.getSamplesPerSecond(), startTime1); } @@ -646,7 +700,7 @@ TEST_F(QueryAnalysisCoordinatorTest, GetNewConfigurationsOneSamplerOneDisabledCo auto startTime0 = now(); auto analyzerDoc0 = makeConfigQueryAnalyzersDocument( - nss0, collUuid0, QueryAnalyzerModeEnum::kOff, boost::none /* sampleRate */, startTime0); + nss0, collUuid0, QueryAnalyzerModeEnum::kOff, boost::none /* samplesPerSec */, startTime0); uassertStatusOK(insertToConfigCollection(operationContext(), NamespaceString::kConfigQueryAnalyzersNamespace, analyzerDoc0.toBSON())); @@ -667,7 +721,7 @@ TEST_F(QueryAnalysisCoordinatorTest, GetNewConfigurationsOneSamplerOneDisabledCo assertContainsConfiguration(configurations, analyzerDoc1.getNs(), analyzerDoc1.getCollectionUuid(), - *analyzerDoc1.getSampleRate(), + *analyzerDoc1.getSamplesPerSecond(), startTime1); } @@ -713,12 +767,12 @@ TEST_F(QueryAnalysisCoordinatorTest, GetNewConfigurationsMultipleSamplersBasic) assertContainsConfiguration(configurations0, analyzerDoc0.getNs(), analyzerDoc0.getCollectionUuid(), - expectedRatio0 * analyzerDoc0.getSampleRate().get(), + expectedRatio0 * analyzerDoc0.getSamplesPerSecond().get(), startTime0); assertContainsConfiguration(configurations0, analyzerDoc1.getNs(), analyzerDoc1.getCollectionUuid(), - expectedRatio0 * analyzerDoc1.getSampleRate().get(), + expectedRatio0 * analyzerDoc1.getSamplesPerSecond().get(), startTime1); // Query distribution after: [1, 4.5]. @@ -729,12 +783,12 @@ TEST_F(QueryAnalysisCoordinatorTest, GetNewConfigurationsMultipleSamplersBasic) assertContainsConfiguration(configurations1, analyzerDoc0.getNs(), analyzerDoc0.getCollectionUuid(), - expectedRatio1 * analyzerDoc0.getSampleRate().get(), + expectedRatio1 * analyzerDoc0.getSamplesPerSecond().get(), startTime0); assertContainsConfiguration(configurations1, analyzerDoc1.getNs(), analyzerDoc1.getCollectionUuid(), - expectedRatio1 * analyzerDoc1.getSampleRate().get(), + expectedRatio1 * analyzerDoc1.getSamplesPerSecond().get(), startTime1); // Query distribution after: [1.5, 4.5]. @@ -744,12 +798,12 @@ TEST_F(QueryAnalysisCoordinatorTest, GetNewConfigurationsMultipleSamplersBasic) assertContainsConfiguration(configurations0, analyzerDoc0.getNs(), analyzerDoc0.getCollectionUuid(), - expectedRatio0 * analyzerDoc0.getSampleRate().get(), + expectedRatio0 * analyzerDoc0.getSamplesPerSecond().get(), startTime0); assertContainsConfiguration(configurations0, analyzerDoc1.getNs(), analyzerDoc1.getCollectionUuid(), - expectedRatio0 * analyzerDoc1.getSampleRate().get(), + expectedRatio0 * analyzerDoc1.getSamplesPerSecond().get(), startTime1); // Query distribution after: [1.5, 0]. @@ -765,12 +819,12 @@ TEST_F(QueryAnalysisCoordinatorTest, GetNewConfigurationsMultipleSamplersBasic) assertContainsConfiguration(configurations0, analyzerDoc0.getNs(), analyzerDoc0.getCollectionUuid(), - expectedRatio0 * analyzerDoc0.getSampleRate().get(), + expectedRatio0 * analyzerDoc0.getSamplesPerSecond().get(), startTime0); assertContainsConfiguration(configurations0, analyzerDoc1.getNs(), analyzerDoc1.getCollectionUuid(), - expectedRatio0 * analyzerDoc1.getSampleRate().get(), + expectedRatio0 * analyzerDoc1.getSamplesPerSecond().get(), startTime1); } diff --git a/src/mongo/db/s/query_analysis_op_observer.cpp b/src/mongo/db/s/query_analysis_op_observer.cpp index 71b4c4d101b3b..887a33ff3e195 100644 --- a/src/mongo/db/s/query_analysis_op_observer.cpp +++ b/src/mongo/db/s/query_analysis_op_observer.cpp @@ -27,126 +27,70 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/bson/timestamp.h" #include "mongo/db/s/query_analysis_coordinator.h" #include "mongo/db/s/query_analysis_op_observer.h" #include "mongo/db/s/query_analysis_writer.h" -#include "mongo/logv2/log.h" -#include "mongo/s/catalog/type_mongos.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/analyze_shard_key_documents_gen.h" +#include "mongo/util/future.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault namespace mongo { namespace analyze_shard_key { -namespace { - -const auto docToDeleteDecoration = OperationContext::declareDecoration(); - -} // namespace - -void QueryAnalysisOpObserver::onInserts(OperationContext* opCtx, - const CollectionPtr& coll, - std::vector::const_iterator begin, - std::vector::const_iterator end, - std::vector fromMigrate, - bool defaultFromMigrate) { - if (analyze_shard_key::supportsCoordinatingQueryAnalysis(opCtx)) { - if (coll->ns() == NamespaceString::kConfigQueryAnalyzersNamespace) { - for (auto it = begin; it != end; ++it) { - const auto parsedDoc = QueryAnalyzerDocument::parse( - IDLParserContext("QueryAnalysisOpObserver::onInserts"), it->doc); - opCtx->recoveryUnit()->onCommit([parsedDoc](OperationContext* opCtx, - boost::optional) { - analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onConfigurationInsert( - parsedDoc); - }); - } - } else if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && - coll->ns() == MongosType::ConfigNS) { - for (auto it = begin; it != end; ++it) { - const auto parsedDoc = uassertStatusOK(MongosType::fromBSON(it->doc)); - opCtx->recoveryUnit()->onCommit( - [parsedDoc](OperationContext* opCtx, boost::optional) { - analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onSamplerInsert( - parsedDoc); - }); - } - } +void QueryAnalysisOpObserver::insertInConfigQueryAnalyzersNamespaceImpl( + OperationContext* opCtx, + std::vector::const_iterator begin, + std::vector::const_iterator end) { + for (auto it = begin; it != end; ++it) { + auto parsedDoc = QueryAnalyzerDocument::parse( + IDLParserContext("QueryAnalysisOpObserver::onInserts"), it->doc); + opCtx->recoveryUnit()->onCommit([parsedDoc = std::move(parsedDoc)]( + OperationContext* opCtx, boost::optional) { + analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onConfigurationInsert( + parsedDoc); + }); } } -void QueryAnalysisOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) { - if (analyze_shard_key::supportsCoordinatingQueryAnalysis(opCtx)) { - if (args.coll->ns() == NamespaceString::kConfigQueryAnalyzersNamespace) { - const auto parsedDoc = QueryAnalyzerDocument::parse( - IDLParserContext("QueryAnalysisOpObserver::onUpdate"), args.updateArgs->updatedDoc); - opCtx->recoveryUnit()->onCommit( - [parsedDoc](OperationContext* opCtx, boost::optional) { - analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onConfigurationUpdate( - parsedDoc); - }); - } else if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && - args.coll->ns() == MongosType::ConfigNS) { - const auto parsedDoc = - uassertStatusOK(MongosType::fromBSON(args.updateArgs->updatedDoc)); - opCtx->recoveryUnit()->onCommit([parsedDoc](OperationContext* opCtx, - boost::optional) { - analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onSamplerUpdate(parsedDoc); - }); - } - } - - if (analyze_shard_key::supportsPersistingSampledQueries(opCtx) && args.updateArgs->sampleId && - opCtx->writesAreReplicated()) { - analyze_shard_key::QueryAnalysisWriter::get(opCtx) - ->addDiff(*args.updateArgs->sampleId, - args.coll->ns(), - args.coll->uuid(), - args.updateArgs->preImageDoc, - args.updateArgs->updatedDoc) - .getAsync([](auto) {}); - } +void QueryAnalysisOpObserver::updateToConfigQueryAnalyzersNamespaceImpl( + OperationContext* opCtx, const OplogUpdateEntryArgs& args) { + auto parsedDoc = QueryAnalyzerDocument::parse( + IDLParserContext("QueryAnalysisOpObserver::onUpdate"), args.updateArgs->updatedDoc); + opCtx->recoveryUnit()->onCommit([parsedDoc = std::move(parsedDoc)](OperationContext* opCtx, + boost::optional) { + analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onConfigurationUpdate(parsedDoc); + }); } -void QueryAnalysisOpObserver::aboutToDelete(OperationContext* opCtx, - const CollectionPtr& coll, - BSONObj const& doc) { - if (analyze_shard_key::supportsCoordinatingQueryAnalysis(opCtx)) { - if (coll->ns() == NamespaceString::kConfigQueryAnalyzersNamespace || - coll->ns() == MongosType::ConfigNS) { - docToDeleteDecoration(opCtx) = doc; - } - } +void QueryAnalysisOpObserver::updateWithSampleIdImpl(OperationContext* opCtx, + const OplogUpdateEntryArgs& args) { + analyze_shard_key::QueryAnalysisWriter::get(opCtx) + ->addDiff(*args.updateArgs->sampleId, + args.coll->ns(), + args.coll->uuid(), + args.updateArgs->preImageDoc, + args.updateArgs->updatedDoc) + .getAsync([](auto) {}); } -void QueryAnalysisOpObserver::onDelete(OperationContext* opCtx, - const CollectionPtr& coll, - StmtId stmtId, - const OplogDeleteEntryArgs& args) { - if (analyze_shard_key::supportsCoordinatingQueryAnalysis(opCtx)) { - if (coll->ns() == NamespaceString::kConfigQueryAnalyzersNamespace) { - auto& doc = docToDeleteDecoration(opCtx); - invariant(!doc.isEmpty()); - const auto parsedDoc = QueryAnalyzerDocument::parse( - IDLParserContext("QueryAnalysisOpObserver::onDelete"), doc); - opCtx->recoveryUnit()->onCommit( - [parsedDoc](OperationContext* opCtx, boost::optional) { - analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onConfigurationDelete( - parsedDoc); - }); - } else if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && - coll->ns() == MongosType::ConfigNS) { - auto& doc = docToDeleteDecoration(opCtx); - invariant(!doc.isEmpty()); - const auto parsedDoc = uassertStatusOK(MongosType::fromBSON(doc)); - opCtx->recoveryUnit()->onCommit([parsedDoc](OperationContext* opCtx, - boost::optional) { - analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onSamplerDelete(parsedDoc); - }); - } - } +void QueryAnalysisOpObserver::deleteFromConfigQueryAnalyzersNamespaceImpl( + OperationContext* opCtx, const OplogDeleteEntryArgs& args, const BSONObj& doc) { + auto parsedDoc = + QueryAnalyzerDocument::parse(IDLParserContext("QueryAnalysisOpObserver::onDelete"), doc); + opCtx->recoveryUnit()->onCommit([parsedDoc = std::move(parsedDoc)](OperationContext* opCtx, + boost::optional) { + analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onConfigurationDelete(parsedDoc); + }); } } // namespace analyze_shard_key diff --git a/src/mongo/db/s/query_analysis_op_observer.h b/src/mongo/db/s/query_analysis_op_observer.h index 1a13e7e6134c8..d5056270514cd 100644 --- a/src/mongo/db/s/query_analysis_op_observer.h +++ b/src/mongo/db/s/query_analysis_op_observer.h @@ -29,7 +29,15 @@ #pragma once +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/session/logical_session_id.h" namespace mongo { namespace analyze_shard_key { @@ -37,7 +45,7 @@ namespace analyze_shard_key { /** * OpObserver for query analysis. */ -class QueryAnalysisOpObserver final : public OpObserver { +class QueryAnalysisOpObserver : public OpObserverNoop { QueryAnalysisOpObserver(const QueryAnalysisOpObserver&) = delete; QueryAnalysisOpObserver& operator=(const QueryAnalysisOpObserver&) = delete; @@ -45,211 +53,41 @@ class QueryAnalysisOpObserver final : public OpObserver { QueryAnalysisOpObserver() = default; ~QueryAnalysisOpObserver() = default; - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) final {} - - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - void onCreateIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc, - bool fromMigrate) final {} - - void onStartIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onStartIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) final {} - - void onInserts(OperationContext* opCtx, - const CollectionPtr& coll, - std::vector::const_iterator first, - std::vector::const_iterator last, - std::vector fromMigrate, - bool defaultFromMigrate) final; - - void onInsertGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final{}; - - void onDeleteGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final {} - - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) final; - - void aboutToDelete(OperationContext* opCtx, - const CollectionPtr& coll, - const BSONObj& doc) final; - - void onDelete(OperationContext* opCtx, - const CollectionPtr& coll, - StmtId stmtId, - const OplogDeleteEntryArgs& args) final; - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final {} - - void onCreateCollection(OperationContext* opCtx, - const CollectionPtr& coll, - const NamespaceString& collectionName, - const CollectionOptions& options, - const BSONObj& idIndex, - const OplogSlot& createOpTime, - bool fromMigrate) final {} - - void onCollMod(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& collModCmd, - const CollectionOptions& oldCollOptions, - boost::optional indexInfo) final {} - - void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final {} - using OpObserver::onDropCollection; - repl::OpTime onDropCollection(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid, - std::uint64_t numRecords, - CollectionDropType dropType) final { - return repl::OpTime(); - }; - - void onDropIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const std::string& indexName, - const BSONObj& indexInfo) final {} - - using OpObserver::onRenameCollection; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final {} - - void onImportCollection(OperationContext* opCtx, - const UUID& importUUID, - const NamespaceString& nss, - long long numRecords, - long long dataSize, - const BSONObj& catalogEntry, - const BSONObj& storageMetadata, - bool isDryRun) final {} - - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final { - return repl::OpTime(); - } - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) final {} - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) final {} - - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) final {} - - void onTransactionStart(OperationContext* opCtx) final {} - - void onUnpreparedTransactionCommit(OperationContext* opCtx, - const TransactionOperations& transactionOperations) final {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept final {} - - std::unique_ptr preTransactionPrepare( + virtual void onInserts(OperationContext* opCtx, + const CollectionPtr& coll, + std::vector::const_iterator first, + std::vector::const_iterator last, + std::vector fromMigrate, + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) = 0; + + virtual void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) = 0; + + virtual void aboutToDelete(OperationContext* opCtx, + const CollectionPtr& coll, + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) = 0; + + virtual void onDelete(OperationContext* opCtx, + const CollectionPtr& coll, + StmtId stmtId, + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) = 0; + +protected: + void insertInConfigQueryAnalyzersNamespaceImpl( OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) final { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) final {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) final {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) final {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} - - void onMajorityCommitPointUpdate(ServiceContext* service, - const repl::OpTime& newCommitPoint) final{}; - -private: - void _onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final { - } + std::vector::const_iterator begin, + std::vector::const_iterator end); + void updateToConfigQueryAnalyzersNamespaceImpl(OperationContext* opCtx, + const OplogUpdateEntryArgs& args); + void updateWithSampleIdImpl(OperationContext* opCtx, const OplogUpdateEntryArgs& args); + void deleteFromConfigQueryAnalyzersNamespaceImpl(OperationContext* opCtx, + const OplogDeleteEntryArgs& args, + const BSONObj& doc); }; } // namespace analyze_shard_key diff --git a/src/mongo/db/s/query_analysis_op_observer_configsvr.cpp b/src/mongo/db/s/query_analysis_op_observer_configsvr.cpp new file mode 100644 index 0000000000000..7e1b5186afdc7 --- /dev/null +++ b/src/mongo/db/s/query_analysis_op_observer_configsvr.cpp @@ -0,0 +1,128 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include + +#include +#include +#include + +#include "mongo/bson/timestamp.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/s/query_analysis_coordinator.h" +#include "mongo/db/s/query_analysis_op_observer_configsvr.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/s/catalog/type_mongos.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault + +namespace mongo { +namespace analyze_shard_key { + +namespace { + +const auto docToDeleteDecoration = OplogDeleteEntryArgs::declareDecoration(); + +} // namespace + +void QueryAnalysisOpObserverConfigSvr::onInserts(OperationContext* opCtx, + const CollectionPtr& coll, + std::vector::const_iterator begin, + std::vector::const_iterator end, + std::vector fromMigrate, + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { + const auto& ns = coll->ns(); + + if (ns == NamespaceString::kConfigQueryAnalyzersNamespace) { + insertInConfigQueryAnalyzersNamespaceImpl(opCtx, begin, end); + } else if (ns == MongosType::ConfigNS) { + for (auto it = begin; it != end; ++it) { + const auto parsedDoc = uassertStatusOK(MongosType::fromBSON(it->doc)); + opCtx->recoveryUnit()->onCommit([parsedDoc](OperationContext* opCtx, + boost::optional) { + analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onSamplerInsert(parsedDoc); + }); + } + } +} + +void QueryAnalysisOpObserverConfigSvr::onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { + const auto& ns = args.coll->ns(); + + if (ns == NamespaceString::kConfigQueryAnalyzersNamespace) { + updateToConfigQueryAnalyzersNamespaceImpl(opCtx, args); + } else if (ns == MongosType::ConfigNS) { + const auto parsedDoc = uassertStatusOK(MongosType::fromBSON(args.updateArgs->updatedDoc)); + opCtx->recoveryUnit()->onCommit( + [parsedDoc](OperationContext* opCtx, boost::optional) { + analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onSamplerUpdate(parsedDoc); + }); + } +} + +void QueryAnalysisOpObserverConfigSvr::aboutToDelete(OperationContext* opCtx, + const CollectionPtr& coll, + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { + const auto& ns = coll->ns(); + + if (ns == NamespaceString::kConfigQueryAnalyzersNamespace || ns == MongosType::ConfigNS) { + docToDeleteDecoration(args) = doc.getOwned(); + } +} + +void QueryAnalysisOpObserverConfigSvr::onDelete(OperationContext* opCtx, + const CollectionPtr& coll, + StmtId stmtId, + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { + const auto& ns = coll->ns(); + + if (ns == NamespaceString::kConfigQueryAnalyzersNamespace) { + auto& doc = docToDeleteDecoration(args); + invariant(!doc.isEmpty()); + deleteFromConfigQueryAnalyzersNamespaceImpl(opCtx, args, doc); + } else if (ns == MongosType::ConfigNS) { + auto& doc = docToDeleteDecoration(args); + invariant(!doc.isEmpty()); + const auto parsedDoc = uassertStatusOK(MongosType::fromBSON(doc)); + opCtx->recoveryUnit()->onCommit( + [parsedDoc](OperationContext* opCtx, boost::optional) { + analyze_shard_key::QueryAnalysisCoordinator::get(opCtx)->onSamplerDelete(parsedDoc); + }); + } +} +} // namespace analyze_shard_key +} // namespace mongo diff --git a/src/mongo/db/s/query_analysis_op_observer_configsvr.h b/src/mongo/db/s/query_analysis_op_observer_configsvr.h new file mode 100644 index 0000000000000..7844758459b54 --- /dev/null +++ b/src/mongo/db/s/query_analysis_op_observer_configsvr.h @@ -0,0 +1,86 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/s/query_analysis_op_observer.h" +#include "mongo/db/session/logical_session_id.h" + +namespace mongo { +namespace analyze_shard_key { + +/** + * OpObserver for query analysis on the config server. + */ +class QueryAnalysisOpObserverConfigSvr final : public QueryAnalysisOpObserver { + QueryAnalysisOpObserverConfigSvr(const QueryAnalysisOpObserverConfigSvr&) = delete; + QueryAnalysisOpObserverConfigSvr& operator=(const QueryAnalysisOpObserverConfigSvr&) = delete; + +public: + QueryAnalysisOpObserverConfigSvr() = default; + ~QueryAnalysisOpObserverConfigSvr() = default; + + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kConfig, NamespaceFilter::kConfig}; + } + + void onInserts(OperationContext* opCtx, + const CollectionPtr& coll, + std::vector::const_iterator first, + std::vector::const_iterator last, + std::vector fromMigrate, + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; + + void aboutToDelete(OperationContext* opCtx, + const CollectionPtr& coll, + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onDelete(OperationContext* opCtx, + const CollectionPtr& coll, + StmtId stmtId, + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; +}; + +} // namespace analyze_shard_key +} // namespace mongo diff --git a/src/mongo/db/s/query_analysis_op_observer_rs.cpp b/src/mongo/db/s/query_analysis_op_observer_rs.cpp new file mode 100644 index 0000000000000..7a9dd98859765 --- /dev/null +++ b/src/mongo/db/s/query_analysis_op_observer_rs.cpp @@ -0,0 +1,97 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include + +#include +#include + +#include "mongo/db/namespace_string.h" +#include "mongo/db/s/query_analysis_op_observer_rs.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault + +namespace mongo { +namespace analyze_shard_key { + +namespace { + +const auto docToDeleteDecoration = OplogDeleteEntryArgs::declareDecoration(); + +} // namespace + +void QueryAnalysisOpObserverRS::onInserts(OperationContext* opCtx, + const CollectionPtr& coll, + std::vector::const_iterator begin, + std::vector::const_iterator end, + std::vector fromMigrate, + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { + if (coll->ns() == NamespaceString::kConfigQueryAnalyzersNamespace) { + insertInConfigQueryAnalyzersNamespaceImpl(opCtx, begin, end); + } +} + +void QueryAnalysisOpObserverRS::onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { + if (args.coll->ns() == NamespaceString::kConfigQueryAnalyzersNamespace) { + updateToConfigQueryAnalyzersNamespaceImpl(opCtx, args); + } + + if (args.updateArgs->sampleId && opCtx->writesAreReplicated()) { + updateWithSampleIdImpl(opCtx, args); + } +} + +void QueryAnalysisOpObserverRS::aboutToDelete(OperationContext* opCtx, + const CollectionPtr& coll, + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { + if (coll->ns() == NamespaceString::kConfigQueryAnalyzersNamespace) { + docToDeleteDecoration(args) = doc.getOwned(); + } +} + +void QueryAnalysisOpObserverRS::onDelete(OperationContext* opCtx, + const CollectionPtr& coll, + StmtId stmtId, + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { + if (coll->ns() == NamespaceString::kConfigQueryAnalyzersNamespace) { + auto& doc = docToDeleteDecoration(args); + invariant(!doc.isEmpty()); + deleteFromConfigQueryAnalyzersNamespaceImpl(opCtx, args, doc); + } +} +} // namespace analyze_shard_key +} // namespace mongo diff --git a/src/mongo/db/s/query_analysis_op_observer_rs.h b/src/mongo/db/s/query_analysis_op_observer_rs.h new file mode 100644 index 0000000000000..24a62629b5cdd --- /dev/null +++ b/src/mongo/db/s/query_analysis_op_observer_rs.h @@ -0,0 +1,86 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/s/query_analysis_op_observer.h" +#include "mongo/db/session/logical_session_id.h" + +namespace mongo { +namespace analyze_shard_key { + +/** + * OpObserver for query analysis in a replica set. + */ +class QueryAnalysisOpObserverRS final : public QueryAnalysisOpObserver { + QueryAnalysisOpObserverRS(const QueryAnalysisOpObserverRS&) = delete; + QueryAnalysisOpObserverRS& operator=(const QueryAnalysisOpObserverRS&) = delete; + +public: + QueryAnalysisOpObserverRS() = default; + ~QueryAnalysisOpObserverRS() = default; + + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kAll, NamespaceFilter::kConfig}; + } + + void onInserts(OperationContext* opCtx, + const CollectionPtr& coll, + std::vector::const_iterator first, + std::vector::const_iterator last, + std::vector fromMigrate, + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; + + void aboutToDelete(OperationContext* opCtx, + const CollectionPtr& coll, + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onDelete(OperationContext* opCtx, + const CollectionPtr& coll, + StmtId stmtId, + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; +}; + +} // namespace analyze_shard_key +} // namespace mongo diff --git a/src/mongo/db/s/query_analysis_op_observer_shardsvr.cpp b/src/mongo/db/s/query_analysis_op_observer_shardsvr.cpp new file mode 100644 index 0000000000000..d565a55c0ce7f --- /dev/null +++ b/src/mongo/db/s/query_analysis_op_observer_shardsvr.cpp @@ -0,0 +1,48 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include + +#include "mongo/db/s/query_analysis_op_observer_shardsvr.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault + +namespace mongo { +namespace analyze_shard_key { + +void QueryAnalysisOpObserverShardSvr::onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { + if (args.updateArgs->sampleId && opCtx->writesAreReplicated()) { + updateWithSampleIdImpl(opCtx, args); + } +} + +} // namespace analyze_shard_key +} // namespace mongo diff --git a/src/mongo/db/s/query_analysis_op_observer_shardsvr.h b/src/mongo/db/s/query_analysis_op_observer_shardsvr.h new file mode 100644 index 0000000000000..fce59b2694f06 --- /dev/null +++ b/src/mongo/db/s/query_analysis_op_observer_shardsvr.h @@ -0,0 +1,92 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/s/query_analysis_op_observer.h" +#include "mongo/db/session/logical_session_id.h" + +namespace mongo { +namespace analyze_shard_key { + +/** + * OpObserver for query analysis in a shard svr. + */ +class QueryAnalysisOpObserverShardSvr final : public QueryAnalysisOpObserver { + QueryAnalysisOpObserverShardSvr(const QueryAnalysisOpObserverShardSvr&) = delete; + QueryAnalysisOpObserverShardSvr& operator=(const QueryAnalysisOpObserverShardSvr&) = delete; + +public: + QueryAnalysisOpObserverShardSvr() = default; + ~QueryAnalysisOpObserverShardSvr() = default; + + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kAll, NamespaceFilter::kNone}; + } + + void onInserts(OperationContext* opCtx, + const CollectionPtr& coll, + std::vector::const_iterator first, + std::vector::const_iterator last, + std::vector fromMigrate, + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final { + // no-op + } + + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; + + void aboutToDelete(OperationContext* opCtx, + const CollectionPtr& coll, + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final { + // no-op + } + + void onDelete(OperationContext* opCtx, + const CollectionPtr& coll, + StmtId stmtId, + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final { + // no-op + } +}; + +} // namespace analyze_shard_key +} // namespace mongo diff --git a/src/mongo/db/s/query_analysis_writer.cpp b/src/mongo/db/s/query_analysis_writer.cpp index 668910cc2cbb6..923c1d7022a8f 100644 --- a/src/mongo/db/s/query_analysis_writer.cpp +++ b/src/mongo/db/s/query_analysis_writer.cpp @@ -27,31 +27,59 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/query_analysis_writer.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/client/connpool.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/client.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/repl/replica_set_aware_service.h" -#include "mongo/db/server_options.h" +#include "mongo/db/s/query_analysis_writer.h" #include "mongo/db/service_context.h" #include "mongo/db/update/document_diff_calculator.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/analyze_shard_key_documents_gen.h" +#include "mongo/s/analyze_shard_key_role.h" #include "mongo/s/analyze_shard_key_server_parameters_gen.h" #include "mongo/s/analyze_shard_key_util.h" #include "mongo/s/query_analysis_client.h" #include "mongo/s/query_analysis_sample_tracker.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/clock_source.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -68,8 +96,7 @@ static ReplicaSetAwareServiceRegistry::Registerer MONGO_FAIL_POINT_DEFINE(disableQueryAnalysisWriter); MONGO_FAIL_POINT_DEFINE(disableQueryAnalysisWriterFlusher); -MONGO_FAIL_POINT_DEFINE(hangQueryAnalysisWriterBeforeWritingLocally); -MONGO_FAIL_POINT_DEFINE(hangQueryAnalysisWriterBeforeWritingRemotely); +MONGO_FAIL_POINT_DEFINE(queryAnalysisWriterSkipActiveSamplingCheck); const Backoff kExponentialBackoff(Seconds(1), Milliseconds::max()); @@ -81,7 +108,7 @@ BSONObj createIndex(OperationContext* opCtx, const NamespaceString& nss, const B DBDirectClient client(opCtx); client.runCommand( - nss.db(), + nss.dbName(), BSON("createIndexes" << nss.coll().toString() << "indexes" << BSON_ARRAY(indexSpec)), resObj); @@ -120,7 +147,10 @@ SampledCommandRequest makeSampledReadCommand(const UUID& sampleId, * Returns a sampled update command for the update at 'opIndex' in the given update command. */ SampledCommandRequest makeSampledUpdateCommandRequest( - const UUID& sampleId, const write_ops::UpdateCommandRequest& originalCmd, int opIndex) { + OperationContext* opCtx, + const UUID& sampleId, + const write_ops::UpdateCommandRequest& originalCmd, + int opIndex) { auto op = originalCmd.getUpdates()[opIndex]; if (op.getSampleId()) { tassert(ErrorCodes::IllegalOperation, @@ -132,10 +162,14 @@ SampledCommandRequest makeSampledUpdateCommandRequest( // If the initial query was a write without shard key, the two phase write protocol modifies the // query in the write phase. In order to get correct metrics, we need to reconstruct the // original query here. - if (originalCmd.getOriginalQuery()) { + if (originalCmd.getOriginalQuery() || originalCmd.getOriginalCollation()) { tassert(7406500, "Found a _clusterWithoutShardKey command with batch size > 1", originalCmd.getUpdates().size() == 1); + uassert(ErrorCodes::InvalidOptions, + "Cannot specify '$_originalQuery' or '$_originalCollation' since they are internal " + "fields", + isInternalClient(opCtx)); op.setQ(*originalCmd.getOriginalQuery()); op.setCollation(originalCmd.getOriginalCollation()); } @@ -152,7 +186,10 @@ SampledCommandRequest makeSampledUpdateCommandRequest( * Returns a sampled delete command for the delete at 'opIndex' in the given delete command. */ SampledCommandRequest makeSampledDeleteCommandRequest( - const UUID& sampleId, const write_ops::DeleteCommandRequest& originalCmd, int opIndex) { + OperationContext* opCtx, + const UUID& sampleId, + const write_ops::DeleteCommandRequest& originalCmd, + int opIndex) { auto op = originalCmd.getDeletes()[opIndex]; if (op.getSampleId()) { tassert(ErrorCodes::IllegalOperation, @@ -164,10 +201,14 @@ SampledCommandRequest makeSampledDeleteCommandRequest( // If the initial query was a write without shard key, the two phase write protocol modifies the // query in the write phase. In order to get correct metrics, we need to reconstruct the // original query here. - if (originalCmd.getOriginalQuery()) { + if (originalCmd.getOriginalQuery() || originalCmd.getOriginalCollation()) { tassert(7406501, "Found a _clusterWithoutShardKey command with batch size > 1", originalCmd.getDeletes().size() == 1); + uassert(ErrorCodes::InvalidOptions, + "Cannot specify '$_originalQuery' or '$_originalCollation' since they are internal " + "fields", + isInternalClient(opCtx)); op.setQ(*originalCmd.getOriginalQuery()); op.setCollation(originalCmd.getOriginalCollation()); } @@ -184,7 +225,9 @@ SampledCommandRequest makeSampledDeleteCommandRequest( * Returns a sampled findAndModify command for the given findAndModify command. */ SampledCommandRequest makeSampledFindAndModifyCommandRequest( - const UUID& sampleId, const write_ops::FindAndModifyCommandRequest& originalCmd) { + OperationContext* opCtx, + const UUID& sampleId, + const write_ops::FindAndModifyCommandRequest& originalCmd) { write_ops::FindAndModifyCommandRequest sampledCmd(originalCmd.getNamespace()); if (sampledCmd.getSampleId()) { tassert(ErrorCodes::IllegalOperation, @@ -196,7 +239,11 @@ SampledCommandRequest makeSampledFindAndModifyCommandRequest( // If the initial query was a write without shard key, the two phase write protocol modifies the // query in the write phase. In order to get correct metrics, we need to reconstruct the // original query here. - if (originalCmd.getOriginalQuery()) { + if (originalCmd.getOriginalQuery() || originalCmd.getOriginalCollation()) { + uassert(ErrorCodes::InvalidOptions, + "Cannot specify '$_originalQuery' or '$_originalCollation' since they are internal " + "fields", + isInternalClient(opCtx)); sampledCmd.setQuery(*originalCmd.getOriginalQuery()); sampledCmd.setCollation(originalCmd.getOriginalCollation()); } else { @@ -216,6 +263,22 @@ SampledCommandRequest makeSampledFindAndModifyCommandRequest( sampledCmd.toBSON(BSON("$db" << sampledCmd.getNamespace().db().toString()))}; } +/* + * Returns true if a sample for the collection with the given namespace and collection uuid should + * be persisted. If the collection does not exist (i.e. the collection uuid is none), returns false. + * If the collection has been recreated or renamed (i.e. the given collection uuid does not match + * the one in the sampling configuration), returns false. Otherwise, returns true. + */ +bool shouldPersistSample(OperationContext* opCtx, + const NamespaceString& nss, + const boost::optional& collUuid) { + if (!collUuid) { + return false; + } + return MONGO_unlikely(queryAnalysisWriterSkipActiveSamplingCheck.shouldFail()) || + QueryAnalysisSampleTracker::get(opCtx).isSamplingActive(nss, *collUuid); +} + } // namespace const std::string QueryAnalysisWriter::kSampledQueriesTTLIndexName = "SampledQueriesTTLIndex"; @@ -250,9 +313,7 @@ QueryAnalysisWriter* QueryAnalysisWriter::get(ServiceContext* serviceContext) { } bool QueryAnalysisWriter::shouldRegisterReplicaSetAwareService() const { - // This is invoked when the Register above is constructed which is before FCV is set so we need - // to ignore FCV when checking if the feature flag is enabled. - return supportsPersistingSampledQueries(true /* isReplEnabled */, true /* ignoreFCV */); + return supportsPersistingSampledQueries(true /* isReplEnabled */); } void QueryAnalysisWriter::onStartup(OperationContext* opCtx) { @@ -275,7 +336,9 @@ void QueryAnalysisWriter::onStartup(OperationContext* opCtx) { auto opCtx = client->makeOperationContext(); _flushQueries(opCtx.get()); }, - Seconds(gQueryAnalysisWriterIntervalSecs)); + Seconds(gQueryAnalysisWriterIntervalSecs), + // TODO(SERVER-74662): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/); _periodicQueryWriter = periodicRunner->makeJob(std::move(queryWriterJob)); _periodicQueryWriter.start(); @@ -288,7 +351,9 @@ void QueryAnalysisWriter::onStartup(OperationContext* opCtx) { auto opCtx = client->makeOperationContext(); _flushDiffs(opCtx.get()); }, - Seconds(gQueryAnalysisWriterIntervalSecs)); + Seconds(gQueryAnalysisWriterIntervalSecs), + // TODO(SERVER-74662): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/); _periodicDiffWriter = periodicRunner->makeJob(std::move(diffWriterJob)); _periodicDiffWriter.start(); @@ -299,6 +364,10 @@ void QueryAnalysisWriter::onStartup(OperationContext* opCtx) { threadPoolOptions.poolName = "QueryAnalysisWriterThreadPool"; threadPoolOptions.onCreateThread = [](const std::string& threadName) { Client::initThread(threadName.c_str()); + + // TODO(SERVER-74662): Please revisit if this thread could be made killable. + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); }; _executor = std::make_shared( std::make_unique(threadPoolOptions), @@ -569,8 +638,7 @@ ExecutorFuture QueryAnalysisWriter::_addReadQuery( auto collUuid = CollectionCatalog::get(opCtx)->lookupUUIDByNSS(opCtx, sampledReadCmd.nss); - if (!collUuid) { - LOGV2_WARNING(7047301, "Found a sampled read query for non-existing collection"); + if (!shouldPersistSample(opCtx, sampledReadCmd.nss, collUuid)) { return; } @@ -604,20 +672,22 @@ ExecutorFuture QueryAnalysisWriter::_addReadQuery( } ExecutorFuture QueryAnalysisWriter::addUpdateQuery( - const UUID& sampleId, const write_ops::UpdateCommandRequest& updateCmd, int opIndex) { + OperationContext* originalOpCtx, + const UUID& sampleId, + const write_ops::UpdateCommandRequest& updateCmd, + int opIndex) { invariant(_executor); return ExecutorFuture(_executor) .then([this, - sampledUpdateCmd = makeSampledUpdateCommandRequest(sampleId, updateCmd, opIndex)]() { + sampledUpdateCmd = + makeSampledUpdateCommandRequest(originalOpCtx, sampleId, updateCmd, opIndex)]() { auto opCtxHolder = cc().makeOperationContext(); auto opCtx = opCtxHolder.get(); auto collUuid = CollectionCatalog::get(opCtx)->lookupUUIDByNSS(opCtx, sampledUpdateCmd.nss); - if (!collUuid) { - LOGV2_WARNING(7075300, - "Found a sampled update query for a non-existing collection"); + if (!shouldPersistSample(opCtx, sampledUpdateCmd.nss, collUuid)) { return; } @@ -653,27 +723,29 @@ ExecutorFuture QueryAnalysisWriter::addUpdateQuery( } ExecutorFuture QueryAnalysisWriter::addUpdateQuery( - const write_ops::UpdateCommandRequest& updateCmd, int opIndex) { + OperationContext* opCtx, const write_ops::UpdateCommandRequest& updateCmd, int opIndex) { auto sampleId = updateCmd.getUpdates()[opIndex].getSampleId(); invariant(sampleId); - return addUpdateQuery(*sampleId, updateCmd, opIndex); + return addUpdateQuery(opCtx, *sampleId, updateCmd, opIndex); } ExecutorFuture QueryAnalysisWriter::addDeleteQuery( - const UUID& sampleId, const write_ops::DeleteCommandRequest& deleteCmd, int opIndex) { + OperationContext* originalOpCtx, + const UUID& sampleId, + const write_ops::DeleteCommandRequest& deleteCmd, + int opIndex) { invariant(_executor); return ExecutorFuture(_executor) .then([this, - sampledDeleteCmd = makeSampledDeleteCommandRequest(sampleId, deleteCmd, opIndex)]() { + sampledDeleteCmd = + makeSampledDeleteCommandRequest(originalOpCtx, sampleId, deleteCmd, opIndex)]() { auto opCtxHolder = cc().makeOperationContext(); auto opCtx = opCtxHolder.get(); auto collUuid = CollectionCatalog::get(opCtx)->lookupUUIDByNSS(opCtx, sampledDeleteCmd.nss); - if (!collUuid) { - LOGV2_WARNING(7075302, - "Found a sampled delete query for a non-existing collection"); + if (!shouldPersistSample(opCtx, sampledDeleteCmd.nss, collUuid)) { return; } @@ -709,28 +781,28 @@ ExecutorFuture QueryAnalysisWriter::addDeleteQuery( } ExecutorFuture QueryAnalysisWriter::addDeleteQuery( - const write_ops::DeleteCommandRequest& deleteCmd, int opIndex) { + OperationContext* opCtx, const write_ops::DeleteCommandRequest& deleteCmd, int opIndex) { auto sampleId = deleteCmd.getDeletes()[opIndex].getSampleId(); invariant(sampleId); - return addDeleteQuery(*sampleId, deleteCmd, opIndex); + return addDeleteQuery(opCtx, *sampleId, deleteCmd, opIndex); } ExecutorFuture QueryAnalysisWriter::addFindAndModifyQuery( - const UUID& sampleId, const write_ops::FindAndModifyCommandRequest& findAndModifyCmd) { + OperationContext* originalOpCtx, + const UUID& sampleId, + const write_ops::FindAndModifyCommandRequest& findAndModifyCmd) { invariant(_executor); return ExecutorFuture(_executor) .then([this, - sampledFindAndModifyCmd = - makeSampledFindAndModifyCommandRequest(sampleId, findAndModifyCmd)]() { + sampledFindAndModifyCmd = makeSampledFindAndModifyCommandRequest( + originalOpCtx, sampleId, findAndModifyCmd)]() { auto opCtxHolder = cc().makeOperationContext(); auto opCtx = opCtxHolder.get(); auto collUuid = CollectionCatalog::get(opCtx)->lookupUUIDByNSS(opCtx, sampledFindAndModifyCmd.nss); - if (!collUuid) { - LOGV2_WARNING(7075304, - "Found a sampled findAndModify query for a non-existing collection"); + if (!shouldPersistSample(opCtx, sampledFindAndModifyCmd.nss, collUuid)) { return; } @@ -766,10 +838,10 @@ ExecutorFuture QueryAnalysisWriter::addFindAndModifyQuery( } ExecutorFuture QueryAnalysisWriter::addFindAndModifyQuery( - const write_ops::FindAndModifyCommandRequest& findAndModifyCmd) { + OperationContext* opCtx, const write_ops::FindAndModifyCommandRequest& findAndModifyCmd) { auto sampleId = findAndModifyCmd.getSampleId(); invariant(sampleId); - return addFindAndModifyQuery(*sampleId, findAndModifyCmd); + return addFindAndModifyQuery(opCtx, *sampleId, findAndModifyCmd); } ExecutorFuture QueryAnalysisWriter::addDiff(const UUID& sampleId, @@ -793,6 +865,14 @@ ExecutorFuture QueryAnalysisWriter::addDiff(const UUID& sampleId, return; } + if (collUuid != CollectionCatalog::get(opCtx)->lookupUUIDByNSS(opCtx, nss)) { + return; + } + + if (!shouldPersistSample(opCtx, nss, collUuid)) { + return; + } + auto expireAt = opCtx->getServiceContext()->getFastClockSource()->now() + mongo::Milliseconds(gQueryAnalysisSampleExpirationSecs.load() * 1000); auto doc = diff --git a/src/mongo/db/s/query_analysis_writer.h b/src/mongo/db/s/query_analysis_writer.h index 18f980dd20f9d..34d9b72a3b3d4 100644 --- a/src/mongo/db/s/query_analysis_writer.h +++ b/src/mongo/db/s/query_analysis_writer.h @@ -29,18 +29,30 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/repl/replica_set_aware_service.h" #include "mongo/db/service_context.h" #include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/platform/mutex.h" #include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/analyze_shard_key_role.h" #include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/util/future.h" #include "mongo/util/periodic_runner.h" - -#include -#include +#include "mongo/util/uuid.h" namespace mongo { namespace analyze_shard_key { @@ -173,22 +185,28 @@ class QueryAnalysisWriter final : public std::enable_shared_from_this addUpdateQuery(const UUID& sampleId, + ExecutorFuture addUpdateQuery(OperationContext* opCtx, + const UUID& sampleId, const write_ops::UpdateCommandRequest& updateCmd, int opIndex); - ExecutorFuture addUpdateQuery(const write_ops::UpdateCommandRequest& updateCmd, + ExecutorFuture addUpdateQuery(OperationContext* opCtx, + const write_ops::UpdateCommandRequest& updateCmd, int opIndex); - ExecutorFuture addDeleteQuery(const UUID& sampleId, + ExecutorFuture addDeleteQuery(OperationContext* opCtx, + const UUID& sampleId, const write_ops::DeleteCommandRequest& deleteCmd, int opIndex); - ExecutorFuture addDeleteQuery(const write_ops::DeleteCommandRequest& deleteCmd, + ExecutorFuture addDeleteQuery(OperationContext* opCtx, + const write_ops::DeleteCommandRequest& deleteCmd, int opIndex); ExecutorFuture addFindAndModifyQuery( - const UUID& sampleId, const write_ops::FindAndModifyCommandRequest& findAndModifyCmd); - ExecutorFuture addFindAndModifyQuery( + OperationContext* opCtx, + const UUID& sampleId, const write_ops::FindAndModifyCommandRequest& findAndModifyCmd); + ExecutorFuture addFindAndModifyQuery( + OperationContext* opCtx, const write_ops::FindAndModifyCommandRequest& findAndModifyCmd); ExecutorFuture addDiff(const UUID& sampleId, const NamespaceString& nss, diff --git a/src/mongo/db/s/query_analysis_writer_test.cpp b/src/mongo/db/s/query_analysis_writer_test.cpp index d6046d5f3a529..0642dec834c2b 100644 --- a/src/mongo/db/s/query_analysis_writer_test.cpp +++ b/src/mongo/db/s/query_analysis_writer_test.cpp @@ -29,17 +29,50 @@ #include "mongo/db/s/query_analysis_writer.h" +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" -#include "mongo/db/db_raii.h" +#include "mongo/bson/util/builder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/client.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/db/update/document_diff_calculator.h" +#include "mongo/idl/idl_parser.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" #include "mongo/platform/random.h" #include "mongo/s/analyze_shard_key_documents_gen.h" +#include "mongo/s/query_analysis_sample_tracker.h" +#include "mongo/stdx/future.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/static_immortal.h" +#include "mongo/util/synchronized_value.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -49,6 +82,7 @@ namespace { const NamespaceString nss0 = NamespaceString::createNamespaceString_forTest("testDb", "testColl0"); const NamespaceString nss1 = NamespaceString::createNamespaceString_forTest("testDb", "testColl1"); +const int samplesPerSecond = 100; TEST(QueryAnalysisWriterBufferTest, AddBasic) { auto buffer = QueryAnalysisWriter::Buffer(nss0); @@ -175,6 +209,13 @@ struct QueryAnalysisWriterTest : public ShardServerTestFixture { DBDirectClient client(operationContext()); client.createCollection(nss0); client.createCollection(nss1); + + auto& tracker = QueryAnalysisSampleTracker::get(operationContext()); + auto configuration0 = CollectionQueryAnalyzerConfiguration( + nss0, getCollectionUUID(nss0), samplesPerSecond, Date_t::now()); + auto configuration1 = CollectionQueryAnalyzerConfiguration( + nss1, getCollectionUUID(nss1), samplesPerSecond, Date_t::now()); + tracker.refreshConfigurations({configuration0, configuration1}); } void tearDown() { @@ -210,7 +251,7 @@ struct QueryAnalysisWriterTest : public ShardServerTestFixture { void assertTTLIndexExists(const NamespaceString& nss, const std::string& name) const { DBDirectClient client(operationContext()); BSONObj result; - client.runCommand(nss.db(), BSON("listIndexes" << nss.coll().toString()), result); + client.runCommand(nss.dbName(), BSON("listIndexes" << nss.coll().toString()), result); auto indexes = result.getObjectField("cursor").getField("firstBatch").Array(); auto iter = indexes.begin(); @@ -449,6 +490,11 @@ struct QueryAnalysisWriterTest : public ShardServerTestFixture { assertBsonObjEqualUnordered(parsedDiffDoc.getDiff(), expectedDiff); } + /* + * The helper for testing that samples are discarded. + */ + void assertNoSampling(const NamespaceString& nss, const UUID& collUuid); + // Test with both empty and non-empty filter and collation to verify that the // QueryAnalysisWriter doesn't require filter or collation to be non-empty. const BSONObj emptyFilter{}; @@ -476,7 +522,7 @@ struct QueryAnalysisWriterTest : public ShardServerTestFixture { int _getConfigDocumentsCount(const NamespaceString& configNss, const NamespaceString& collNss) const { DBDirectClient client(operationContext()); - return client.count(configNss, BSON("ns" << collNss.toString())); + return client.count(configNss, BSON("ns" << collNss.toString_forTest())); } /** @@ -492,7 +538,6 @@ struct QueryAnalysisWriterTest : public ShardServerTestFixture { return cursor->next(); } - RAIIServerParameterControllerForTest _featureFlagController{"featureFlagAnalyzeShardKey", true}; // This fixture manually flushes sampled queries and diffs. FailPointEnableBlock _fp{"disableQueryAnalysisWriterFlusher"}; PseudoRandom _random{SecureRandom{}.nextInt64()}; @@ -516,7 +561,7 @@ TEST_F(QueryAnalysisWriterTest, CreateTTLIndexesWhenSampledQueriesIndexExists) { FailPoint::alwaysOn, 0, BSON("failCommands" << BSON_ARRAY("createIndexes") << "namespace" - << NamespaceString::kConfigSampledQueriesNamespace.toString() + << NamespaceString::kConfigSampledQueriesNamespace.toStringForErrorMsg() << "errorCode" << ErrorCodes::IndexAlreadyExists << "failInternalCommands" << true << "failLocalClients" << true)); auto& writer = *QueryAnalysisWriter::get(operationContext()); @@ -530,13 +575,14 @@ TEST_F(QueryAnalysisWriterTest, CreateTTLIndexesWhenSampledQueriesIndexExists) { TEST_F(QueryAnalysisWriterTest, CreateTTLIndexesWhenSampledQueriesDiffIndexExists) { auto failCreateIndexes = globalFailPointRegistry().find("failCommand"); - failCreateIndexes->setMode( - FailPoint::alwaysOn, - 0, - BSON("failCommands" << BSON_ARRAY("createIndexes") << "namespace" - << NamespaceString::kConfigSampledQueriesDiffNamespace.toString() - << "errorCode" << ErrorCodes::IndexAlreadyExists - << "failInternalCommands" << true << "failLocalClients" << true)); + failCreateIndexes + ->setMode(FailPoint::alwaysOn, + 0, + BSON("failCommands" + << BSON_ARRAY("createIndexes") << "namespace" + << NamespaceString::kConfigSampledQueriesDiffNamespace.toStringForErrorMsg() + << "errorCode" << ErrorCodes::IndexAlreadyExists << "failInternalCommands" + << true << "failLocalClients" << true)); auto& writer = *QueryAnalysisWriter::get(operationContext()); auto future = writer.createTTLIndexes(operationContext()); future.get(); @@ -551,11 +597,11 @@ TEST_F(QueryAnalysisWriterTest, CreateTTLIndexesWhenAnalyzeShardKeySplitPointsIn failCreateIndexes->setMode( FailPoint::alwaysOn, 0, - BSON( - "failCommands" << BSON_ARRAY("createIndexes") << "namespace" - << NamespaceString::kConfigAnalyzeShardKeySplitPointsNamespace.toString() - << "errorCode" << ErrorCodes::IndexAlreadyExists - << "failInternalCommands" << true << "failLocalClients" << true)); + BSON("failCommands" + << BSON_ARRAY("createIndexes") << "namespace" + << NamespaceString::kConfigAnalyzeShardKeySplitPointsNamespace.toStringForErrorMsg() + << "errorCode" << ErrorCodes::IndexAlreadyExists << "failInternalCommands" << true + << "failLocalClients" << true)); auto& writer = *QueryAnalysisWriter::get(operationContext()); auto future = writer.createTTLIndexes(operationContext()); future.get(); @@ -726,7 +772,7 @@ TEST_F(QueryAnalysisWriterTest, AggregateQuery) { DEATH_TEST_F(QueryAnalysisWriterTest, UpdateQueryNotMarkedForSampling, "invariant") { auto& writer = *QueryAnalysisWriter::get(operationContext()); auto [originalCmd, _] = makeUpdateCommandRequest(nss0, 1, {} /* markForSampling */); - writer.addUpdateQuery(originalCmd, 0).get(); + writer.addUpdateQuery(operationContext(), originalCmd, 0).get(); } TEST_F(QueryAnalysisWriterTest, UpdateQueriesMarkedForSampling) { @@ -736,8 +782,8 @@ TEST_F(QueryAnalysisWriterTest, UpdateQueriesMarkedForSampling) { makeUpdateCommandRequest(nss0, 3, {0, 2} /* markForSampling */); ASSERT_EQ(expectedSampledCmds.size(), 2U); - writer.addUpdateQuery(originalCmd, 0).get(); - writer.addUpdateQuery(originalCmd, 2).get(); + writer.addUpdateQuery(operationContext(), originalCmd, 0).get(); + writer.addUpdateQuery(operationContext(), originalCmd, 2).get(); ASSERT_EQ(writer.getQueriesCountForTest(), 2); writer.flushQueriesForTest(operationContext()); ASSERT_EQ(writer.getQueriesCountForTest(), 0); @@ -754,7 +800,7 @@ TEST_F(QueryAnalysisWriterTest, UpdateQueriesMarkedForSampling) { DEATH_TEST_F(QueryAnalysisWriterTest, DeleteQueryNotMarkedForSampling, "invariant") { auto& writer = *QueryAnalysisWriter::get(operationContext()); auto [originalCmd, _] = makeDeleteCommandRequest(nss0, 1, {} /* markForSampling */); - writer.addDeleteQuery(originalCmd, 0).get(); + writer.addDeleteQuery(operationContext(), originalCmd, 0).get(); } TEST_F(QueryAnalysisWriterTest, DeleteQueriesMarkedForSampling) { @@ -764,8 +810,8 @@ TEST_F(QueryAnalysisWriterTest, DeleteQueriesMarkedForSampling) { makeDeleteCommandRequest(nss0, 3, {1, 2} /* markForSampling */); ASSERT_EQ(expectedSampledCmds.size(), 2U); - writer.addDeleteQuery(originalCmd, 1).get(); - writer.addDeleteQuery(originalCmd, 2).get(); + writer.addDeleteQuery(operationContext(), originalCmd, 1).get(); + writer.addDeleteQuery(operationContext(), originalCmd, 2).get(); ASSERT_EQ(writer.getQueriesCountForTest(), 2); writer.flushQueriesForTest(operationContext()); ASSERT_EQ(writer.getQueriesCountForTest(), 0); @@ -783,7 +829,7 @@ DEATH_TEST_F(QueryAnalysisWriterTest, FindAndModifyQueryNotMarkedForSampling, "i auto& writer = *QueryAnalysisWriter::get(operationContext()); auto [originalCmd, _] = makeFindAndModifyCommandRequest(nss0, true /* isUpdate */, false /* markForSampling */); - writer.addFindAndModifyQuery(originalCmd).get(); + writer.addFindAndModifyQuery(operationContext(), originalCmd).get(); } TEST_F(QueryAnalysisWriterTest, FindAndModifyQueryUpdateMarkedForSampling) { @@ -794,7 +840,7 @@ TEST_F(QueryAnalysisWriterTest, FindAndModifyQueryUpdateMarkedForSampling) { ASSERT_EQ(expectedSampledCmds.size(), 1U); auto [sampleId, expectedSampledCmd] = *expectedSampledCmds.begin(); - writer.addFindAndModifyQuery(originalCmd).get(); + writer.addFindAndModifyQuery(operationContext(), originalCmd).get(); ASSERT_EQ(writer.getQueriesCountForTest(), 1); writer.flushQueriesForTest(operationContext()); ASSERT_EQ(writer.getQueriesCountForTest(), 0); @@ -814,7 +860,7 @@ TEST_F(QueryAnalysisWriterTest, FindAndModifyQueryRemoveMarkedForSampling) { ASSERT_EQ(expectedSampledCmds.size(), 1U); auto [sampleId, expectedSampledCmd] = *expectedSampledCmds.begin(); - writer.addFindAndModifyQuery(originalCmd).get(); + writer.addFindAndModifyQuery(operationContext(), originalCmd).get(); ASSERT_EQ(writer.getQueriesCountForTest(), 1); writer.flushQueriesForTest(operationContext()); ASSERT_EQ(writer.getQueriesCountForTest(), 0); @@ -845,8 +891,8 @@ TEST_F(QueryAnalysisWriterTest, MultipleQueriesAndCollections) { auto originalCountFilter = makeNonEmptyFilter(); auto originalCountCollation = makeNonEmptyCollation(); - writer.addDeleteQuery(originalDeleteCmd, 1).get(); - writer.addUpdateQuery(originalUpdateCmd, 0).get(); + writer.addDeleteQuery(operationContext(), originalDeleteCmd, 1).get(); + writer.addUpdateQuery(operationContext(), originalUpdateCmd, 0).get(); writer.addCountQuery(countSampleId, nss1, originalCountFilter, originalCountCollation).get(); ASSERT_EQ(writer.getQueriesCountForTest(), 3); writer.flushQueriesForTest(operationContext()); @@ -903,7 +949,7 @@ TEST_F(QueryAnalysisWriterTest, DuplicateQueries) { originalFindFilter, originalFindCollation); - writer.addUpdateQuery(originalUpdateCmd, 0).get(); + writer.addUpdateQuery(operationContext(), originalUpdateCmd, 0).get(); writer .addFindQuery(findSampleId, nss0, @@ -959,8 +1005,7 @@ TEST_F(QueryAnalysisWriterTest, QueriesMultipleBatches_MaxBatchSize) { } } -TEST_F(QueryAnalysisWriterTest, QueriesMultipleBatches_MaxBSONObjSize) { - RAIIServerParameterControllerForTest featureFlagController("featureFlagAnalyzeShardKey", true); +TEST_F(QueryAnalysisWriterTest, QueriesMultipleBatchesFewQueries_MaxBSONObjSize) { auto& writer = *QueryAnalysisWriter::get(operationContext()); auto numQueries = 3; @@ -984,6 +1029,30 @@ TEST_F(QueryAnalysisWriterTest, QueriesMultipleBatches_MaxBSONObjSize) { } } +TEST_F(QueryAnalysisWriterTest, QueriesMultipleBatchesManyQueries_MaxBSONObjSize) { + auto& writer = *QueryAnalysisWriter::get(operationContext()); + + auto numQueries = 75'000; + std::vector> expectedSampledCmds; + for (auto i = 0; i < numQueries; i++) { + auto sampleId = UUID::gen(); + auto filter = makeNonEmptyFilter(); + auto collation = makeNonEmptyCollation(); + writer.addAggregateQuery(sampleId, nss0, filter, collation, boost::none /* letParameters */) + .get(); + expectedSampledCmds.push_back({sampleId, filter, collation}); + } + ASSERT_EQ(writer.getQueriesCountForTest(), numQueries); + writer.flushQueriesForTest(operationContext()); + ASSERT_EQ(writer.getQueriesCountForTest(), 0); + + ASSERT_EQ(getSampledQueryDocumentsCount(nss0), numQueries); + for (const auto& [sampleId, filter, collation] : expectedSampledCmds) { + assertSampledReadQueryDocument( + sampleId, nss0, SampledCommandNameEnum::kAggregate, filter, collation); + } +} + TEST_F(QueryAnalysisWriterTest, FlushAfterAddReadIfExceedsSizeLimit) { auto& writer = *QueryAnalysisWriter::get(operationContext()); @@ -1028,10 +1097,10 @@ TEST_F(QueryAnalysisWriterTest, FlushAfterAddUpdateIfExceedsSizeLimit) { std::string(maxMemoryUsageBytes / 2, 'a') /* filterFieldName */); ASSERT_EQ(expectedSampledCmds.size(), 2U); - writer.addUpdateQuery(originalCmd, 0).get(); + writer.addUpdateQuery(operationContext(), originalCmd, 0).get(); ASSERT_EQ(writer.getQueriesCountForTest(), 1); // Adding the next query causes the size to exceed the limit. - writer.addUpdateQuery(originalCmd, 2).get(); + writer.addUpdateQuery(operationContext(), originalCmd, 2).get(); ASSERT_EQ(writer.getQueriesCountForTest(), 0); ASSERT_EQ(getSampledQueryDocumentsCount(nss0), 2); @@ -1056,10 +1125,10 @@ TEST_F(QueryAnalysisWriterTest, FlushAfterAddDeleteIfExceedsSizeLimit) { std::string(maxMemoryUsageBytes / 2, 'a') /* filterFieldName */); ASSERT_EQ(expectedSampledCmds.size(), 2U); - writer.addDeleteQuery(originalCmd, 0).get(); + writer.addDeleteQuery(operationContext(), originalCmd, 0).get(); ASSERT_EQ(writer.getQueriesCountForTest(), 1); // Adding the next query causes the size to exceed the limit. - writer.addDeleteQuery(originalCmd, 1).get(); + writer.addDeleteQuery(operationContext(), originalCmd, 1).get(); ASSERT_EQ(writer.getQueriesCountForTest(), 0); ASSERT_EQ(getSampledQueryDocumentsCount(nss0), 2); @@ -1094,10 +1163,10 @@ TEST_F(QueryAnalysisWriterTest, FlushAfterAddFindAndModifyIfExceedsSizeLimit) { ASSERT_EQ(expectedSampledCmds0.size(), 1U); auto [sampleId1, expectedSampledCmd1] = *expectedSampledCmds1.begin(); - writer.addFindAndModifyQuery(originalCmd0).get(); + writer.addFindAndModifyQuery(operationContext(), originalCmd0).get(); ASSERT_EQ(writer.getQueriesCountForTest(), 1); // Adding the next query causes the size to exceed the limit. - writer.addFindAndModifyQuery(originalCmd1).get(); + writer.addFindAndModifyQuery(operationContext(), originalCmd1).get(); ASSERT_EQ(writer.getQueriesCountForTest(), 0); ASSERT_EQ(getSampledQueryDocumentsCount(nss0), 1); @@ -1472,6 +1541,70 @@ TEST_F(QueryAnalysisWriterTest, DiffExceedsSizeLimit) { ASSERT_EQ(getDiffDocumentsCount(nss0), 0); } +void QueryAnalysisWriterTest::assertNoSampling(const NamespaceString& nss, const UUID& collUuid) { + auto& writer = *QueryAnalysisWriter::get(operationContext()); + + writer + .addFindQuery(UUID::gen() /* sampleId */, + nss, + emptyFilter, + emptyCollation, + boost::none /* letParameters */) + .get(); + ASSERT_EQ(writer.getQueriesCountForTest(), 0); + + writer + .addAggregateQuery(UUID::gen() /* sampleId */, + nss, + emptyFilter, + emptyCollation, + boost::none /* letParameters */) + .get(); + ASSERT_EQ(writer.getQueriesCountForTest(), 0); + + writer.addCountQuery(UUID::gen() /* sampleId */, nss, emptyFilter, emptyCollation).get(); + ASSERT_EQ(writer.getQueriesCountForTest(), 0); + + writer.addDistinctQuery(UUID::gen() /* sampleId */, nss, emptyFilter, emptyCollation).get(); + ASSERT_EQ(writer.getQueriesCountForTest(), 0); + + auto originalUpdateCmd = makeUpdateCommandRequest(nss, 1, {0} /* markForSampling */).first; + writer.addUpdateQuery(operationContext(), originalUpdateCmd, 0).get(); + ASSERT_EQ(writer.getQueriesCountForTest(), 0); + + auto originalDeleteCmd = makeDeleteCommandRequest(nss, 1, {0} /* markForSampling */).first; + writer.addDeleteQuery(operationContext(), originalDeleteCmd, 0).get(); + ASSERT_EQ(writer.getQueriesCountForTest(), 0); + + auto originalFindAndModifyCmd = + makeFindAndModifyCommandRequest(nss, true /* isUpdate */, true /* markForSampling */).first; + writer.addFindAndModifyQuery(operationContext(), originalFindAndModifyCmd).get(); + + writer + .addDiff(UUID::gen() /* sampleId */, + nss, + collUuid, + BSON("a" << 0) /* preImage */, + BSON("a" << 1) /* postImage */) + .get(); + ASSERT_EQ(writer.getDiffsCountForTest(), 0); +} + +TEST_F(QueryAnalysisWriterTest, DiscardSamplesIfCollectionNoLongerExists) { + DBDirectClient client(operationContext()); + auto collUuid0BeforeDrop = getCollectionUUID(nss0); + client.dropCollection(nss0); + assertNoSampling(nss0, collUuid0BeforeDrop); +} + +TEST_F(QueryAnalysisWriterTest, DiscardSamplesIfCollectionIsDroppedAndRecreated) { + DBDirectClient client(operationContext()); + auto collUuid0BeforeDrop = getCollectionUUID(nss0); + client.dropCollection(nss0); + client.createCollection(nss0); + assertNoSampling(nss0, collUuid0BeforeDrop); +} + } // namespace } // namespace analyze_shard_key } // namespace mongo diff --git a/src/mongo/db/s/range_arithmetic.cpp b/src/mongo/db/s/range_arithmetic.cpp index d172e1a9c5ed1..5fb3769a4417d 100644 --- a/src/mongo/db/s/range_arithmetic.cpp +++ b/src/mongo/db/s/range_arithmetic.cpp @@ -29,6 +29,9 @@ #include "mongo/db/s/range_arithmetic.h" +#include +#include + namespace mongo { namespace { diff --git a/src/mongo/db/s/range_arithmetic.h b/src/mongo/db/s/range_arithmetic.h index d1d773cb08eba..64e2815e79e87 100644 --- a/src/mongo/db/s/range_arithmetic.h +++ b/src/mongo/db/s/range_arithmetic.h @@ -30,6 +30,7 @@ #pragma once #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobj_comparator_interface.h" #include "mongo/bson/simple_bsonobj_comparator.h" namespace mongo { diff --git a/src/mongo/db/s/range_arithmetic_test.cpp b/src/mongo/db/s/range_arithmetic_test.cpp index 4495f725a8e31..270ef30fa0c91 100644 --- a/src/mongo/db/s/range_arithmetic_test.cpp +++ b/src/mongo/db/s/range_arithmetic_test.cpp @@ -27,9 +27,16 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/s/range_arithmetic.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/range_deleter_service.cpp b/src/mongo/db/s/range_deleter_service.cpp index f392cd347170c..0a5ecfa0425bc 100644 --- a/src/mongo/db/s/range_deleter_service.cpp +++ b/src/mongo/db/s/range_deleter_service.cpp @@ -29,19 +29,61 @@ #include "mongo/db/s/range_deleter_service.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/wait_for_majority_service.h" -#include "mongo/db/s/balancer_stats_registry.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/range_deleter_service_op_observer.h" #include "mongo/db/s/range_deletion_util.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" +#include "mongo/db/s/sharding_runtime_d_params_gen.h" +#include "mongo/executor/network_interface_factory.h" +#include "mongo/executor/network_interface_thread_pool.h" +#include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/future_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kShardingRangeDeleter @@ -57,7 +99,9 @@ BSONObj getShardKeyPattern(OperationContext* opCtx, boost::optional optNss; { AutoGetCollection collection( - opCtx, NamespaceStringOrUUID{dbName.toString(), collectionUuid}, MODE_IS); + opCtx, + NamespaceStringOrUUID{DatabaseNameUtil::serialize(dbName), collectionUuid}, + MODE_IS); auto optMetadata = CollectionShardingRuntime::assertCollectionLockedAndAcquireShared( opCtx, collection.getNss()) @@ -88,7 +132,7 @@ RangeDeleterService* RangeDeleterService::get(OperationContext* opCtx) { RangeDeleterService::ReadyRangeDeletionsProcessor::ReadyRangeDeletionsProcessor( OperationContext* opCtx) - : _thread([this] { _runRangeDeletions(); }) {} + : _service(opCtx->getServiceContext()), _thread([this] { _runRangeDeletions(); }) {} RangeDeleterService::ReadyRangeDeletionsProcessor::~ReadyRangeDeletionsProcessor() { shutdown(); @@ -131,11 +175,7 @@ void RangeDeleterService::ReadyRangeDeletionsProcessor::_completedRangeDeletion( } void RangeDeleterService::ReadyRangeDeletionsProcessor::_runRangeDeletions() { - Client::initThread(kRangeDeletionThreadName); - { - stdx::lock_guard lk(cc()); - cc().setSystemOperationKillableByStepdown(lk); - } + ThreadClient threadClient(kRangeDeletionThreadName, _service); { stdx::lock_guard lock(_mutex); @@ -283,10 +323,7 @@ void RangeDeleterService::ReadyRangeDeletionsProcessor::_runRangeDeletions() { } void RangeDeleterService::onStartup(OperationContext* opCtx) { - // (Ignore FCV check): This feature doesn't have any upgrade/downgrade concerns. The feature - // flag is used to turn on new range deleter on startup. - if (disableResumableRangeDeleter.load() || - !feature_flags::gRangeDeleterService.isEnabledAndIgnoreFCVUnsafe()) { + if (disableResumableRangeDeleter.load()) { return; } @@ -296,12 +333,6 @@ void RangeDeleterService::onStartup(OperationContext* opCtx) { } void RangeDeleterService::onStepUpComplete(OperationContext* opCtx, long long term) { - // (Ignore FCV check): This feature doesn't have any upgrade/downgrade concerns. The feature - // flag is used to turn on new range deleter on startup. - if (!feature_flags::gRangeDeleterService.isEnabledAndIgnoreFCVUnsafe()) { - return; - } - if (disableResumableRangeDeleter.load()) { LOGV2_INFO( 6872508, @@ -332,7 +363,6 @@ void RangeDeleterService::onStepUpComplete(OperationContext* opCtx, long long te } void RangeDeleterService::_recoverRangeDeletionsOnStepUp(OperationContext* opCtx) { - _stepUpCompletedFuture = ExecutorFuture(_executor) .then([serviceContext = opCtx->getServiceContext(), this] { @@ -517,13 +547,6 @@ SharedSemiFuture RangeDeleterService::registerTask( .share(); } - LOGV2_DEBUG(7536600, - 2, - "Registering range deletion task", - "collectionUUID"_attr = rdt.getCollectionUuid(), - "range"_attr = redact(rdt.getRange().toString()), - "pending"_attr = pending); - auto scheduleRangeDeletionChain = [&](SharedSemiFuture pendingFuture) { (void)pendingFuture.thenRunOn(_executor) .then([this, @@ -570,6 +593,13 @@ SharedSemiFuture RangeDeleterService::registerTask( auto lock = fromResubmitOnStepUp ? _acquireMutexUnconditionally() : _acquireMutexFailIfServiceNotUp(); + LOGV2_DEBUG(7536600, + 2, + "Registering range deletion task", + "collectionUUID"_attr = rdt.getCollectionUuid(), + "range"_attr = redact(rdt.getRange().toString()), + "pending"_attr = pending); + auto [registeredTask, firstRegistration] = _rangeDeletionTasks[rdt.getCollectionUuid()].insert(std::make_shared(rdt)); diff --git a/src/mongo/db/s/range_deleter_service.h b/src/mongo/db/s/range_deleter_service.h index 90745568ce1ec..d0af0de5475a0 100644 --- a/src/mongo/db/s/range_deleter_service.h +++ b/src/mongo/db/s/range_deleter_service.h @@ -28,14 +28,40 @@ */ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replica_set_aware_service.h" #include "mongo/db/s/range_deletion_task_gen.h" #include "mongo/db/s/sharding_runtime_d_params_gen.h" +#include "mongo/db/service_context.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/network_interface_thread_pool.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/platform/mutex.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -140,6 +166,8 @@ class RangeDeleterService : public ReplicaSetAwareServiceShardSvr +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/cursor_manager.h" -#include "mongo/db/persistent_task_store.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/range_deleter_service.h" #include "mongo/db/s/range_deletion_task_gen.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kShardingRangeDeleter namespace mongo { namespace { // Small hack used to be able to retrieve the full removed document in the `onDelete` method -const auto deletedDocumentDecoration = OperationContext::declareDecoration(); +const auto deletedDocumentDecoration = OplogDeleteEntryArgs::declareDecoration(); void registerTaskWithOngoingQueriesOnOpLogEntryCommit(OperationContext* opCtx, const RangeDeletionTask& rdt) { @@ -59,10 +83,11 @@ void registerTaskWithOngoingQueriesOnOpLogEntryCommit(OperationContext* opCtx, const auto openCursorsIds = CursorManager::get(opCtx)->getCursorIdsForNamespace(rdt.getNss()); LOGV2_INFO( - 7179200, + 6180600, "Range deletion will be scheduled after all possibly dependent queries finish", logAttrs(rdt.getNss()), - "range"_attr = rdt.getRange(), + "collectionUUID"_attr = rdt.getCollectionUuid(), + "range"_attr = redact(rdt.getRange().toString()), "cursorsDirectlyReferringTheNamespace"_attr = openCursorsIds); } (void)RangeDeleterService::get(opCtx)->registerTask( @@ -91,7 +116,8 @@ void RangeDeleterServiceOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { if (coll->ns() == NamespaceString::kRangeDeletionNamespace) { for (auto it = begin; it != end; ++it) { auto deletionTask = RangeDeletionTask::parse( @@ -104,7 +130,8 @@ void RangeDeleterServiceOpObserver::onInserts(OperationContext* opCtx, } void RangeDeleterServiceOpObserver::onUpdate(OperationContext* opCtx, - const OplogUpdateEntryArgs& args) { + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (args.coll->ns() == NamespaceString::kRangeDeletionNamespace) { const bool pendingFieldIsRemoved = [&] { return update_oplog_entry::isFieldRemovedByUpdate( @@ -128,30 +155,35 @@ void RangeDeleterServiceOpObserver::onUpdate(OperationContext* opCtx, void RangeDeleterServiceOpObserver::aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - BSONObj const& doc) { + BSONObj const& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { if (coll->ns() == NamespaceString::kRangeDeletionNamespace) { - deletedDocumentDecoration(opCtx) = doc; + deletedDocumentDecoration(args) = doc; } } void RangeDeleterServiceOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (coll->ns() == NamespaceString::kRangeDeletionNamespace) { - const auto& deletedDoc = deletedDocumentDecoration(opCtx); - - auto deletionTask = - RangeDeletionTask::parse(IDLParserContext("RangeDeleterServiceOpObserver"), deletedDoc); - try { - RangeDeleterService::get(opCtx)->deregisterTask(deletionTask.getCollectionUuid(), - deletionTask.getRange()); - } catch (const DBException& ex) { - dassert(ex.code() == ErrorCodes::NotYetInitialized, - str::stream() << "No error different from `NotYetInitialized` is expected " - "to be propagated to the range deleter observer. Got error: " - << ex.toStatus()); - } + opCtx->recoveryUnit()->onCommit([deletedDoc = std::move(deletedDocumentDecoration(args))]( + OperationContext* opCtx, boost::optional) { + auto deletionTask = RangeDeletionTask::parse( + IDLParserContext("RangeDeleterServiceOpObserver"), deletedDoc); + try { + RangeDeleterService::get(opCtx)->deregisterTask(deletionTask.getCollectionUuid(), + deletionTask.getRange()); + } catch (const DBException& ex) { + dassert(ex.code() == ErrorCodes::NotYetInitialized, + str::stream() + << "No error different from `NotYetInitialized` is expected " + "to be propagated to the range deleter observer. Got error: " + << ex.toStatus()); + } + }); } } diff --git a/src/mongo/db/s/range_deleter_service_op_observer.h b/src/mongo/db/s/range_deleter_service_op_observer.h index 4cfec3eeaaef7..b6b4c2417450d 100644 --- a/src/mongo/db/s/range_deleter_service_op_observer.h +++ b/src/mongo/db/s/range_deleter_service_op_observer.h @@ -28,7 +28,15 @@ */ #pragma once +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/session/logical_session_id.h" namespace mongo { @@ -37,7 +45,7 @@ namespace mongo { * Observes all writes to the config.rangeDeletions namespace and schedule/remove range deletion * tasks accordingly. */ -class RangeDeleterServiceOpObserver final : public OpObserver { +class RangeDeleterServiceOpObserver final : public OpObserverNoop { RangeDeleterServiceOpObserver(const RangeDeleterServiceOpObserver&) = delete; RangeDeleterServiceOpObserver& operator=(const RangeDeleterServiceOpObserver&) = delete; @@ -45,215 +53,33 @@ class RangeDeleterServiceOpObserver final : public OpObserver { RangeDeleterServiceOpObserver(); ~RangeDeleterServiceOpObserver(); + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kConfig, NamespaceFilter::kConfig}; + } + void onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) override; - - void onInsertGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final{}; - - void onDeleteGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final {} + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) override; - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) override; + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override; void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) override; + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) override; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) override; - -private: - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) override {} - - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - void onCreateIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc, - bool fromMigrate) override {} - - void onStartIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) override {} - - void onStartIndexBuildSinglePhase(OperationContext* opCtx, - const NamespaceString& nss) override {} - - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, - const NamespaceString& nss) override {} - - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) override {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) override {} - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final{}; - - void onCreateCollection(OperationContext* opCtx, - const CollectionPtr& coll, - const NamespaceString& collectionName, - const CollectionOptions& options, - const BSONObj& idIndex, - const OplogSlot& createOpTime, - bool fromMigrate) override {} - - void onCollMod(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& collModCmd, - const CollectionOptions& oldCollOptions, - boost::optional indexInfo) override {} - - void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) override {} - - using OpObserver::onDropCollection; - repl::OpTime onDropCollection(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid, - std::uint64_t numRecords, - CollectionDropType dropType) override { - return repl::OpTime(); - } - - void onDropIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const std::string& indexName, - const BSONObj& indexInfo) override {} - - using OpObserver::onRenameCollection; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) override {} - - virtual void onImportCollection(OperationContext* opCtx, - const UUID& importUUID, - const NamespaceString& nss, - long long numRecords, - long long dataSize, - const BSONObj& catalogEntry, - const BSONObj& storageMetadata, - bool isDryRun) override {} - - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) override { - return repl::OpTime(); - } - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) override {} - - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) override {} - - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) override {} - - void onTransactionStart(OperationContext* opCtx) override {} - - void onUnpreparedTransactionCommit( - OperationContext* opCtx, const TransactionOperations& transactionOperations) override {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept override {} - - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) override { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) override {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) override {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) override {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} - - void onMajorityCommitPointUpdate(ServiceContext* service, - const repl::OpTime& newCommitPoint) override {} - - void _onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) override{}; + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override; }; } // namespace mongo diff --git a/src/mongo/db/s/range_deleter_service_op_observer_test.cpp b/src/mongo/db/s/range_deleter_service_op_observer_test.cpp index 3d7d4767bf9ba..5490f0403e96c 100644 --- a/src/mongo/db/s/range_deleter_service_op_observer_test.cpp +++ b/src/mongo/db/s/range_deleter_service_op_observer_test.cpp @@ -27,8 +27,15 @@ * it in the license file. */ -#include "mongo/db/persistent_task_store.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/s/range_deleter_service.h" #include "mongo/db/s/range_deleter_service_test.h" +#include "mongo/db/s/range_deletion_task_gen.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/s/range_deleter_service_test.cpp b/src/mongo/db/s/range_deleter_service_test.cpp index ab5513ecebb7f..a38f2f675b453 100644 --- a/src/mongo/db/s/range_deleter_service_test.cpp +++ b/src/mongo/db/s/range_deleter_service_test.cpp @@ -27,14 +27,44 @@ * it in the license file. */ -#include "mongo/db/s/range_deleter_service_test.h" +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/dbdirectclient.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/range_deleter_service.h" +#include "mongo/db/s/range_deleter_service_test.h" +#include "mongo/db/s/sharding_runtime_d_params_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/random.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -588,7 +618,7 @@ TEST_F(RangeDeleterServiceTest, TotalNumOfRegisteredTasks) { } TEST_F(RangeDeleterServiceTest, RegisterTaskWithDisableResumableRangeDeleterFlagEnabled) { - RAIIServerParameterControllerForTest enableFeatureFlag{"disableResumableRangeDeleter", true}; + RAIIServerParameterControllerForTest disableRangeDeleter{"disableResumableRangeDeleter", true}; auto rds = RangeDeleterService::get(opCtx); auto taskWithOngoingQueries = rangeDeletionTask0ForCollA; @@ -615,7 +645,7 @@ TEST_F(RangeDeleterServiceTest, uuidCollA, taskWithOngoingQueries->getTask().getRange()); ASSERT(!overlappingRangeFuture.isReady()); - RAIIServerParameterControllerForTest enableFeatureFlag{"disableResumableRangeDeleter", true}; + RAIIServerParameterControllerForTest disableRangeDeleter{"disableResumableRangeDeleter", true}; auto overlappingRangeFutureWhenDisabled = rds->getOverlappingRangeDeletionsFuture( uuidCollA, taskWithOngoingQueries->getTask().getRange()); ASSERT(overlappingRangeFutureWhenDisabled.isReady()); diff --git a/src/mongo/db/s/range_deleter_service_test.h b/src/mongo/db/s/range_deleter_service_test.h index cc44ac0322507..6c73256842773 100644 --- a/src/mongo/db/s/range_deleter_service_test.h +++ b/src/mongo/db/s/range_deleter_service_test.h @@ -29,9 +29,33 @@ #pragma once #include "mongo/db/s/range_deleter_service.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/range_deletion_task_gen.h" #include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/unittest/log_test.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -61,9 +85,11 @@ class RangeDeleterServiceTest : public ShardServerTestFixture { // Instantiate some collection UUIDs and tasks to be used for testing UUID uuidCollA = UUID::gen(); - inline static const NamespaceString nsCollA{"test", "collA"}; + inline static const NamespaceString nsCollA = + NamespaceString::createNamespaceString_forTest("test", "collA"); UUID uuidCollB = UUID::gen(); - inline static const NamespaceString nsCollB{"test", "collB"}; + inline static const NamespaceString nsCollB = + NamespaceString::createNamespaceString_forTest("test", "collB"); inline static std::map nssWithUuid{}; @@ -78,7 +104,6 @@ class RangeDeleterServiceTest : public ShardServerTestFixture { void _setFilteringMetadataByUUID(OperationContext* opCtx, const UUID& uuid); // Scoped objects - RAIIServerParameterControllerForTest enableFeatureFlag{"featureFlagRangeDeleterService", true}; unittest::MinimumLoggedSeverityGuard _severityGuard{logv2::LogComponent::kShardingRangeDeleter, logv2::LogSeverity::Debug(2)}; }; diff --git a/src/mongo/db/s/range_deleter_service_test_util.cpp b/src/mongo/db/s/range_deleter_service_test_util.cpp index 3537682d12a7e..b85abb469dcfc 100644 --- a/src/mongo/db/s/range_deleter_service_test_util.cpp +++ b/src/mongo/db/s/range_deleter_service_test_util.cpp @@ -27,10 +27,41 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" #include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/s/range_deleter_service.h" #include "mongo/db/s/range_deleter_service_test.h" +#include "mongo/db/s/range_deletion_task_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/range_deletion_util.cpp b/src/mongo/db/s/range_deletion_util.cpp index 48d606806e88d..1baed77a5bf05 100644 --- a/src/mongo/db/s/range_deletion_util.cpp +++ b/src/mongo/db/s/range_deletion_util.cpp @@ -29,38 +29,54 @@ #include "mongo/db/s/range_deletion_util.h" -#include -#include +#include +#include +#include +#include #include +#include -#include "mongo/db/catalog_raii.h" -#include "mongo/db/client.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/exec/delete_stage.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/index/index_descriptor.h" #include "mongo/db/keypattern.h" #include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_explainer.h" #include "mongo/db/query/plan_yield_policy.h" -#include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/query/query_planner.h" -#include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/s/balancer_stats_registry.h" -#include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/range_deleter_service.h" +#include "mongo/db/s/range_deletion_task_gen.h" +#include "mongo/db/s/shard_key_index_util.h" #include "mongo/db/s/sharding_runtime_d_params_gen.h" #include "mongo/db/s/sharding_statistics.h" -#include "mongo/db/service_context.h" -#include "mongo/db/storage/remove_saver.h" -#include "mongo/db/write_concern.h" -#include "mongo/executor/task_executor.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" -#include "mongo/s/catalog/sharding_catalog_client.h" -#include "mongo/util/cancellation.h" -#include "mongo/util/future_util.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kShardingRangeDeleter @@ -81,18 +97,19 @@ MONGO_FAIL_POINT_DEFINE(throwInternalErrorInDeleteRange); * the range failed. */ StatusWith deleteNextBatch(OperationContext* opCtx, - const CollectionPtr& collection, + const CollectionAcquisition& collection, BSONObj const& keyPattern, ChunkRange const& range, int numDocsToRemovePerBatch) { - invariant(collection); + invariant(collection.exists()); - auto const nss = collection->ns(); + auto const nss = collection.nss(); + auto const uuid = collection.uuid(); // The IndexChunk has a keyPattern that may apply to more than one index - we need to // select the index and get the full index keyPattern here. - const auto shardKeyIdx = - findShardKeyPrefixedIndex(opCtx, collection, keyPattern, /*requireSingleKey=*/false); + const auto shardKeyIdx = findShardKeyPrefixedIndex( + opCtx, collection.getCollectionPtr(), keyPattern, /*requireSingleKey=*/false); if (!shardKeyIdx) { LOGV2_ERROR( 23765, "Unable to find shard key index", "keyPattern"_attr = keyPattern, logAttrs(nss)); @@ -104,10 +121,16 @@ StatusWith deleteNextBatch(OperationContext* opCtx, uasserted(ErrorCodes::IndexNotFound, str::stream() << "Unable to find shard key index" - << " for " << nss.ns() << " and key pattern `" + << " for " << nss.toStringForErrorMsg() << " and key pattern `" << keyPattern.toString() << "'"); } + const auto rangeDeleterPriority = rangeDeleterHighPriority.load() + ? AdmissionContext::Priority::kImmediate + : AdmissionContext::Priority::kLow; + + ScopedAdmissionPriorityForLock priority{opCtx->lockState(), rangeDeleterPriority}; + // Extend bounds to match the index we found const KeyPattern indexKeyPattern(shardKeyIdx->keyPattern()); const auto extend = [&](const auto& key) { @@ -117,27 +140,21 @@ StatusWith deleteNextBatch(OperationContext* opCtx, const auto min = extend(range.getMin()); const auto max = extend(range.getMax()); - LOGV2_DEBUG(23766, + LOGV2_DEBUG(6180601, 1, - "Begin removal of {min} to {max} in {namespace}", "Begin removal of range", - "min"_attr = min, - "max"_attr = max, - logAttrs(nss)); + logAttrs(nss), + "collectionUUID"_attr = uuid, + "range"_attr = redact(range.toString())); auto deleteStageParams = std::make_unique(); deleteStageParams->fromMigrate = true; deleteStageParams->isMulti = true; deleteStageParams->returnDeleted = true; - if (serverGlobalParams.moveParanoia) { - deleteStageParams->removeSaver = - std::make_unique("moveChunk", nss.ns(), "cleaning"); - } - auto exec = InternalPlanner::deleteWithShardKeyIndexScan(opCtx, - &collection, + collection, std::move(deleteStageParams), *shardKeyIdx, min, @@ -172,13 +189,11 @@ StatusWith deleteNextBatch(OperationContext* opCtx, auto&& explainer = exec->getPlanExplainer(); auto&& [stats, _] = explainer.getWinningPlanStats(ExplainOptions::Verbosity::kExecStats); - LOGV2_WARNING(23776, - "Cursor error while trying to delete {min} to {max} in {namespace}, " - "stats: {stats}, error: {error}", + LOGV2_WARNING(6180602, "Cursor error while trying to delete range", - "min"_attr = redact(min), - "max"_attr = redact(max), logAttrs(nss), + "collectionUUID"_attr = uuid, + "range"_attr = redact(range.toString()), "stats"_attr = redact(stats), "error"_attr = redact(ex.toStatus())); throw; @@ -189,7 +204,7 @@ StatusWith deleteNextBatch(OperationContext* opCtx, } invariant(PlanExecutor::ADVANCED == state); - ShardingStatistics::get(opCtx).countDocsDeletedOnDonor.addAndFetch(1); + ShardingStatistics::get(opCtx).countDocsDeletedByRangeDeleter.addAndFetch(1); } while (++numDeleted < numDocsToRemovePerBatch); @@ -248,59 +263,12 @@ void markRangeDeletionTaskAsProcessing(OperationContext* opCtx, } } -/** - * Delete the range in a sequence of batches until there are no more documents to delete or deletion - * returns an error. - */ -ExecutorFuture deleteRangeInBatchesWithExecutor( - const std::shared_ptr& executor, - const NamespaceString& nss, - const UUID& collectionUuid, - const BSONObj& keyPattern, - const ChunkRange& range) { - return ExecutorFuture(executor).then([=] { - return withTemporaryOperationContext( - [=](OperationContext* opCtx) { - return deleteRangeInBatches(opCtx, nss.db(), collectionUuid, keyPattern, range); - }, - nss.db(), - collectionUuid); - }); -} - -ExecutorFuture waitForDeletionsToMajorityReplicate( - const std::shared_ptr& executor, - const NamespaceString& nss, - const UUID& collectionUuid, - const ChunkRange& range) { - return withTemporaryOperationContext( - [=](OperationContext* opCtx) { - repl::ReplClientInfo::forClient(opCtx->getClient()).setLastOpToSystemLastOpTime(opCtx); - auto clientOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); - - LOGV2_DEBUG(5346202, - 1, - "Waiting for majority replication of local deletions", - logAttrs(nss), - "collectionUUID"_attr = collectionUuid, - "range"_attr = redact(range.toString()), - "clientOpTime"_attr = clientOpTime); - - // Asynchronously wait for majority write concern. - return WaitForMajorityService::get(opCtx->getServiceContext()) - .waitUntilMajority(clientOpTime, CancellationToken::uncancelable()) - .thenRunOn(executor); - }, - nss.db(), - collectionUuid); -} - std::vector getPersistentRangeDeletionTasks(OperationContext* opCtx, const NamespaceString& nss) { std::vector tasks; PersistentTaskStore store(NamespaceString::kRangeDeletionNamespace); - auto query = BSON(RangeDeletionTask::kNssFieldName << nss.ns()); + auto query = BSON(RangeDeletionTask::kNssFieldName << NamespaceStringUtil::serialize(nss)); store.forEach(opCtx, query, [&](const RangeDeletionTask& deletionTask) { tasks.push_back(std::move(deletionTask)); @@ -346,25 +314,29 @@ Status deleteRangeInBatches(OperationContext* opCtx, int numDeleted; const auto nss = [&]() { try { - AutoGetCollection collection( - opCtx, NamespaceStringOrUUID{dbName.toString(), collectionUuid}, MODE_IX); + const auto nssOrUuid = + NamespaceStringOrUUID{DatabaseNameUtil::serialize(dbName), collectionUuid}; + const auto collection = + acquireCollection(opCtx, + {nssOrUuid, + AcquisitionPrerequisites::kPretendUnsharded, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite}, + MODE_IX); LOGV2_DEBUG(6777800, 1, "Starting batch deletion", - logAttrs(collection.getNss()), + logAttrs(collection.nss()), "collectionUUID"_attr = collectionUuid, "range"_attr = redact(range.toString()), "numDocsToRemovePerBatch"_attr = numDocsToRemovePerBatch, "delayBetweenBatches"_attr = delayBetweenBatches); - numDeleted = uassertStatusOK(deleteNextBatch(opCtx, - collection.getCollection(), - keyPattern, - range, - numDocsToRemovePerBatch)); + numDeleted = uassertStatusOK(deleteNextBatch( + opCtx, collection, keyPattern, range, numDocsToRemovePerBatch)); - return collection.getNss(); + return collection.nss(); } catch (const ExceptionFor&) { // Throw specific error code that stops range deletions in case of errors uasserted( @@ -386,7 +358,7 @@ Status deleteRangeInBatches(OperationContext* opCtx, "numDeleted"_attr = numDeleted, logAttrs(nss), "collectionUUID"_attr = collectionUuid, - "range"_attr = range.toString()); + "range"_attr = redact(range.toString())); if (numDeleted > 0) { // (SERVER-62368) The range-deleter executor is mono-threaded, so @@ -420,7 +392,8 @@ void snapshotRangeDeletionsForRename(OperationContext* opCtx, // Clear out eventual snapshots associated with the target collection: always restart from a // clean state in case of stepdown or primary killed. PersistentTaskStore store(NamespaceString::kRangeDeletionForRenameNamespace); - store.remove(opCtx, BSON(RangeDeletionTask::kNssFieldName << toNss.ns())); + store.remove(opCtx, + BSON(RangeDeletionTask::kNssFieldName << NamespaceStringUtil::serialize(toNss))); auto rangeDeletionTasks = getPersistentRangeDeletionTasks(opCtx, fromNss); for (auto& task : rangeDeletionTasks) { @@ -438,7 +411,8 @@ void restoreRangeDeletionTasksForRename(OperationContext* opCtx, const Namespace PersistentTaskStore rangeDeletionsStore( NamespaceString::kRangeDeletionNamespace); - const auto query = BSON(RangeDeletionTask::kNssFieldName << nss.ns()); + const auto query = + BSON(RangeDeletionTask::kNssFieldName << NamespaceStringUtil::serialize(nss)); rangeDeletionsForRenameStore.forEach(opCtx, query, [&](const RangeDeletionTask& deletionTask) { try { @@ -456,123 +430,10 @@ void deleteRangeDeletionTasksForRename(OperationContext* opCtx, // Delete already restored snapshots associated to the target collection PersistentTaskStore rangeDeletionsForRenameStore( NamespaceString::kRangeDeletionForRenameNamespace); - rangeDeletionsForRenameStore.remove(opCtx, - BSON(RangeDeletionTask::kNssFieldName << toNss.ns())); + rangeDeletionsForRenameStore.remove( + opCtx, BSON(RangeDeletionTask::kNssFieldName << NamespaceStringUtil::serialize(toNss))); } -SharedSemiFuture removeDocumentsInRange( - const std::shared_ptr& executor, - SemiFuture waitForActiveQueriesToComplete, - const NamespaceString& nss, - const UUID& collectionUuid, - const BSONObj& keyPattern, - const ChunkRange& range, - Seconds delayForActiveQueriesOnSecondariesToComplete) { - return std::move(waitForActiveQueriesToComplete) - .thenRunOn(executor) - .onError([&](Status s) { - // The code does not expect the input future to have an error set on it, so we - // invariant here to prevent future misuse (no pun intended). - invariant(s.isOK()); - }) - .then([=]() mutable { - // Wait for possibly ongoing queries on secondaries to complete. - return sleepUntil(executor, - executor->now() + delayForActiveQueriesOnSecondariesToComplete); - }) - .then([=]() mutable { - LOGV2_DEBUG(23772, - 1, - "Beginning deletion of documents", - logAttrs(nss), - "range"_attr = redact(range.toString())); - - return deleteRangeInBatchesWithExecutor( - executor, nss, collectionUuid, keyPattern, range) - .onCompletion([=](Status s) { - if (!s.isOK() && - s.code() != - ErrorCodes::RangeDeletionAbandonedBecauseTaskDocumentDoesNotExist) { - // Propagate any errors to the onCompletion() handler below. - return ExecutorFuture(executor, s); - } - - // We wait for majority write concern even if the range deletion task document - // doesn't exist to guarantee the deletion (which must have happened earlier) is - // visible to the caller at non-local read concerns. - return waitForDeletionsToMajorityReplicate(executor, nss, collectionUuid, range) - .then([=] { - LOGV2_DEBUG(5346201, - 1, - "Finished waiting for majority for deleted batch", - logAttrs(nss), - "range"_attr = redact(range.toString())); - // Propagate any errors to the onCompletion() handler below. - return s; - }); - }); - }) - .onCompletion([=](Status s) { - if (s.isOK()) { - LOGV2_DEBUG(23773, - 1, - "Completed deletion of documents in {namespace} range {range}", - "Completed deletion of documents", - logAttrs(nss), - "range"_attr = redact(range.toString())); - } else { - LOGV2(23774, - "Failed to delete documents in {namespace} range {range} due to {error}", - "Failed to delete documents", - logAttrs(nss), - "range"_attr = redact(range.toString()), - "error"_attr = redact(s)); - } - - if (s.code() == ErrorCodes::RangeDeletionAbandonedBecauseTaskDocumentDoesNotExist) { - return Status::OK(); - } - - if (!s.isOK() && - s.code() != - ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist) { - // Propagate any errors to callers waiting on the result. - return s; - } - - try { - withTemporaryOperationContext( - [&](OperationContext* opCtx) { - removePersistentRangeDeletionTask(opCtx, collectionUuid, range); - }, - nss.db(), - collectionUuid); - } catch (const DBException& e) { - LOGV2_ERROR(23770, - "Failed to delete range deletion task for range {range} in collection " - "{namespace} due to {error}", - "Failed to delete range deletion task", - "range"_attr = range, - logAttrs(nss), - "error"_attr = e.what()); - - return e.toStatus(); - } - - LOGV2_DEBUG(23775, - 1, - "Completed removal of persistent range deletion task for {namespace} " - "range {range}", - "Completed removal of persistent range deletion task", - logAttrs(nss), - "range"_attr = redact(range.toString())); - - // Propagate any errors to callers waiting on the result. - return s; - }) - .semi() - .share(); -} void persistUpdatedNumOrphans(OperationContext* opCtx, const UUID& collectionUuid, @@ -585,7 +446,7 @@ void persistUpdatedNumOrphans(OperationContext* opCtx, // The DBDirectClient will not retry WriteConflictExceptions internally while holding an X // mode lock, so we need to retry at this level. writeConflictRetry( - opCtx, "updateOrphanCount", NamespaceString::kRangeDeletionNamespace.ns(), [&] { + opCtx, "updateOrphanCount", NamespaceString::kRangeDeletionNamespace, [&] { store.update(opCtx, query, BSON("$inc" << BSON(RangeDeletionTask::kNumOrphanDocsFieldName diff --git a/src/mongo/db/s/range_deletion_util.h b/src/mongo/db/s/range_deletion_util.h index 1042137a18487..390021614b0d1 100644 --- a/src/mongo/db/s/range_deletion_util.h +++ b/src/mongo/db/s/range_deletion_util.h @@ -29,43 +29,33 @@ #pragma once #include +#include #include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/range_deletion_task_gen.h" +#include "mongo/db/service_context.h" #include "mongo/executor/task_executor.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { constexpr auto kRangeDeletionThreadName = "range-deleter"_sd; -/** - * DO NOT USE - only necessary for the legacy range deleter - * - * Deletes a range of orphaned documents for the given namespace and collection UUID. Returns a - * future which will be resolved when the range has finished being deleted. The resulting future - * will contain an error in cases where the range could not be deleted successfully. - * - * The overall algorithm is as follows: - * 1. Wait for the all active queries which could be using the range to resolve by waiting - * for the waitForActiveQueriesToComplete future to resolve. - * 2. Waits for delayForActiveQueriesOnSecondariesToComplete seconds before deleting any documents, - * to give queries running on secondaries a chance to finish. - * 3. Delete documents in a series of batches with up to numDocsToRemovePerBatch documents per - * batch, with a delay of delayBetweenBatches milliseconds in between batches. - */ -SharedSemiFuture removeDocumentsInRange( - const std::shared_ptr& executor, - SemiFuture waitForActiveQueriesToComplete, - const NamespaceString& nss, - const UUID& collectionUuid, - const BSONObj& keyPattern, - const ChunkRange& range, - Seconds delayForActiveQueriesOnSecondariesToComplete); - /** * Delete the range in a sequence of batches until there are no more documents to delete or deletion * returns an error. @@ -130,10 +120,6 @@ auto withTemporaryOperationContext(Callable&& callable, const UUID& collectionUUID, bool writeToRangeDeletionNamespace = false) { ThreadClient tc(kRangeDeletionThreadName, getGlobalServiceContext()); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } auto uniqueOpCtx = Client::getCurrent()->makeOperationContext(); auto opCtx = uniqueOpCtx.get(); diff --git a/src/mongo/db/s/range_deletion_util_test.cpp b/src/mongo/db/s/range_deletion_util_test.cpp index c04869cb31e39..06e97933ccb42 100644 --- a/src/mongo/db/s/range_deletion_util_test.cpp +++ b/src/mongo/db/s/range_deletion_util_test.cpp @@ -27,23 +27,50 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/create_collection.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/logical_time.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" -#include "mongo/db/s/collection_sharding_state.h" -#include "mongo/db/s/metadata_manager.h" #include "mongo/db/s/migration_util.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/range_deletion_task_gen.h" #include "mongo/db/s/range_deletion_util.h" #include "mongo/db/s/shard_server_test_fixture.h" -#include "mongo/db/s/sharding_runtime_d_params_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/db/vector_clock.h" +#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/platform/random.h" -#include "mongo/unittest/death_test.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" namespace mongo { @@ -142,7 +169,7 @@ class RangeDeleterTest : public ShardServerTestFixture { class RenameRangeDeletionsTest : public RangeDeleterTest { public: const NamespaceString kToNss = - NamespaceString::createNamespaceString_forTest(kNss.db(), "toColl"); + NamespaceString::createNamespaceString_forTest(kNss.db_forTest(), "toColl"); void setUp() override { RangeDeleterTest::setUp(); @@ -211,647 +238,6 @@ RangeDeletionTask insertRangeDeletionTask(OperationContext* opCtx, return insertRangeDeletionTask(opCtx, kNss, uuid, range, numOrphans); } -TEST_F(RangeDeleterTest, - RemoveDocumentsInRangeRemovesAllDocumentsInRangeWhenAllDocumentsFitInSingleBatch) { - const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10)); - auto queriesComplete = SemiFuture::makeReady(); - - setFilteringMetadataWithUUID(uuid()); - auto task = insertRangeDeletionTask(_opCtx, uuid(), range, 1); - DBDirectClient dbclient(_opCtx); - dbclient.insert(kNss, BSON(kShardKey << 5)); - - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/); - - cleanupComplete.get(); - ASSERT_EQUALS(dbclient.count(kNss, BSONObj()), 0); -} - -TEST_F(RangeDeleterTest, - RemoveDocumentsInRangeRemovesAllDocumentsInRangeWhenSeveralBatchesAreRequired) { - const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10)); - // More documents than the batch size. - const auto numDocsToInsert = 3; - auto queriesComplete = SemiFuture::makeReady(); - - // Insert documents in range. - setFilteringMetadataWithUUID(uuid()); - auto task = insertRangeDeletionTask(_opCtx, uuid(), range, numDocsToInsert); - DBDirectClient dbclient(_opCtx); - for (auto i = 0; i < numDocsToInsert; ++i) { - dbclient.insert(kNss, BSON(kShardKey << i)); - } - - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/); - - cleanupComplete.get(); - ASSERT_EQUALS(dbclient.count(kNss, BSONObj()), 0); -} - -TEST_F(RangeDeleterTest, - RemoveDocumentsInRangeDoesNotRemoveDocumentsWithKeysLowerThanMinKeyOfRange) { - const auto numDocsToInsert = 3; - - const auto minKey = 0; - const auto range = ChunkRange(BSON(kShardKey << minKey), BSON(kShardKey << 10)); - - auto queriesComplete = SemiFuture::makeReady(); - - setFilteringMetadataWithUUID(uuid()); - auto task = insertRangeDeletionTask(_opCtx, uuid(), range, 0); - DBDirectClient dbclient(_opCtx); - // All documents below the range. - for (auto i = minKey - numDocsToInsert; i < minKey; ++i) { - dbclient.insert(kNss, BSON(kShardKey << i)); - } - - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/); - - cleanupComplete.get(); - // No documents should have been deleted. - ASSERT_EQUALS(dbclient.count(kNss, BSONObj()), numDocsToInsert); -} - -TEST_F(RangeDeleterTest, - RemoveDocumentsInRangeDoesNotRemoveDocumentsWithKeysGreaterThanOrEqualToMaxKeyOfRange) { - const auto numDocsToInsert = 3; - - const auto maxKey = 10; - const auto range = ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << maxKey)); - - auto queriesComplete = SemiFuture::makeReady(); - - setFilteringMetadataWithUUID(uuid()); - auto task = insertRangeDeletionTask(_opCtx, uuid(), range, 0); - DBDirectClient dbclient(_opCtx); - // All documents greater than or equal to the range. - for (auto i = maxKey; i < maxKey + numDocsToInsert; ++i) { - dbclient.insert(kNss, BSON(kShardKey << i)); - } - - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/); - - cleanupComplete.get(); - // No documents should have been deleted. - ASSERT_EQUALS(dbclient.count(kNss, BSONObj()), numDocsToInsert); -} - -TEST_F(RangeDeleterTest, - RemoveDocumentsInRangeDoesNotRemoveDocumentsForCollectionWithSameNamespaceAndDifferentUUID) { - const auto numDocsToInsert = 3; - const auto range = ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)); - - setFilteringMetadataWithUUID(uuid()); - auto task = insertRangeDeletionTask(_opCtx, uuid(), range, numDocsToInsert); - const auto collUuidWrongTaks = UUID::gen(); - auto wrongTask = insertRangeDeletionTask(_opCtx, collUuidWrongTaks, range, numDocsToInsert); - DBDirectClient dbclient(_opCtx); - for (auto i = 0; i < numDocsToInsert; ++i) { - dbclient.insert(kNss, BSON(kShardKey << i)); - } - - auto queriesComplete = SemiFuture::makeReady(); - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - // Use a different UUID from the collection UUID. - collUuidWrongTaks, - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/); - - - ASSERT_THROWS_CODE(cleanupComplete.get(), - DBException, - ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist); - ASSERT_EQUALS(dbclient.count(kNss, BSONObj()), numDocsToInsert); -} - -TEST_F(RangeDeleterTest, RemoveDocumentsInRangeThrowsErrorWhenCollectionDoesNotExist) { - auto queriesComplete = SemiFuture::makeReady(); - const auto notExistingNss = - NamespaceString::createNamespaceString_forTest("someFake.namespace"); - const auto notExistingCollectionUUID = UUID::gen(); - const auto range = ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)); - auto task = - insertRangeDeletionTask(_opCtx, notExistingNss, notExistingCollectionUUID, range, 0); - - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - notExistingNss, - notExistingCollectionUUID, - kShardKeyPattern, - ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)), - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/); - - - ASSERT_THROWS_CODE(cleanupComplete.get(), - DBException, - ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist); -} - -TEST_F(RangeDeleterTest, RemoveDocumentsInRangeLeavesDocumentsWhenTaskDocumentDoesNotExist) { - auto replCoord = checked_cast( - repl::ReplicationCoordinator::get(getServiceContext())); - - const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10)); - - setFilteringMetadataWithUUID(uuid()); - DBDirectClient dbclient(_opCtx); - dbclient.insert(kNss, BSON(kShardKey << 5)); - - // We intentionally skip inserting a range deletion task document to simulate it already having - // been deleted. - - // We should wait for replication after attempting to delete the document in the range even when - // the task document doesn't exist. - const auto expectedNumTimesWaitedForReplication = 1; - int numTimesWaitedForReplication = 0; - - // Override special handler for waiting for replication to count the number of times we wait for - // replication. - replCoord->setAwaitReplicationReturnValueFunction( - [&](OperationContext* opCtx, const repl::OpTime& opTime) { - ++numTimesWaitedForReplication; - return repl::ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0)); - }); - - auto queriesComplete = SemiFuture::makeReady(); - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */); - - cleanupComplete.get(); - - // Document should not have been deleted. - ASSERT_EQUALS(dbclient.count(kNss, BSONObj()), 1); - ASSERT_EQ(numTimesWaitedForReplication, expectedNumTimesWaitedForReplication); -} - -TEST_F(RangeDeleterTest, RemoveDocumentsInRangeWaitsForReplicationAfterDeletingSingleBatch) { - auto replCoord = checked_cast( - repl::ReplicationCoordinator::get(getServiceContext())); - - const auto numDocsToInsert = 3; - const auto numDocsToRemovePerBatch = 10; - rangeDeleterBatchSize.store(numDocsToRemovePerBatch); - const auto numBatches = ceil((double)numDocsToInsert / numDocsToRemovePerBatch); - ASSERT_EQ(numBatches, 1); - // We should wait twice: Once after deleting documents in the range, and once after deleting the - // range deletion task. - const auto expectedNumTimesWaitedForReplication = 2; - - setFilteringMetadataWithUUID(uuid()); - DBDirectClient dbclient(_opCtx); - for (auto i = 0; i < numDocsToInsert; ++i) { - dbclient.insert(kNss, BSON(kShardKey << i)); - } - - // Insert range deletion task for this collection and range. - const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10)); - auto t = insertRangeDeletionTask(_opCtx, uuid(), range); - - int numTimesWaitedForReplication = 0; - // Override special handler for waiting for replication to count the number of times we wait for - // replication. - replCoord->setAwaitReplicationReturnValueFunction( - [&](OperationContext* opCtx, const repl::OpTime& opTime) { - ++numTimesWaitedForReplication; - return repl::ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0)); - }); - - auto queriesComplete = SemiFuture::makeReady(); - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/); - - cleanupComplete.get(); - - ASSERT_EQUALS(dbclient.count(kNss, BSONObj()), 0); - ASSERT_EQ(numTimesWaitedForReplication, expectedNumTimesWaitedForReplication); -} - -TEST_F(RangeDeleterTest, RemoveDocumentsInRangeWaitsForReplicationOnlyOnceAfterSeveralBatches) { - auto replCoord = checked_cast( - repl::ReplicationCoordinator::get(getServiceContext())); - - const auto numDocsToInsert = 3; - const auto numDocsToRemovePerBatch = 1; - rangeDeleterBatchSize.store(numDocsToRemovePerBatch); - const auto numBatches = ceil((double)numDocsToInsert / numDocsToRemovePerBatch); - ASSERT_GTE(numBatches, 1); - - // We should wait twice: Once after deleting documents in the range, and once after deleting the - // range deletion task. - const auto expectedNumTimesWaitedForReplication = 2; - - setFilteringMetadataWithUUID(uuid()); - DBDirectClient dbclient(_opCtx); - for (auto i = 0; i < numDocsToInsert; ++i) { - dbclient.insert(kNss, BSON(kShardKey << i)); - } - - // Insert range deletion task for this collection and range. - const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10)); - auto t = insertRangeDeletionTask(_opCtx, uuid(), range); - - int numTimesWaitedForReplication = 0; - - // Set special handler for waiting for replication. - replCoord->setAwaitReplicationReturnValueFunction( - [&](OperationContext* opCtx, const repl::OpTime& opTime) { - ++numTimesWaitedForReplication; - return repl::ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0)); - }); - - auto queriesComplete = SemiFuture::makeReady(); - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */); - - cleanupComplete.get(); - - ASSERT_EQUALS(dbclient.count(kNss, BSONObj()), 0); - ASSERT_EQ(numTimesWaitedForReplication, expectedNumTimesWaitedForReplication); -} - -TEST_F(RangeDeleterTest, RemoveDocumentsInRangeDoesNotWaitForReplicationIfErrorDuringDeletion) { - auto replCoord = checked_cast( - repl::ReplicationCoordinator::get(getServiceContext())); - - const auto numDocsToInsert = 3; - - setFilteringMetadataWithUUID(uuid()); - DBDirectClient dbclient(_opCtx); - for (auto i = 0; i < numDocsToInsert; ++i) { - dbclient.insert(kNss, BSON(kShardKey << i)); - } - - // Insert range deletion task for this collection and range. - const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10)); - auto t = insertRangeDeletionTask(_opCtx, uuid(), range); - - int numTimesWaitedForReplication = 0; - // Override special handler for waiting for replication to count the number of times we wait for - // replication. - replCoord->setAwaitReplicationReturnValueFunction( - [&](OperationContext* opCtx, const repl::OpTime& opTime) { - ++numTimesWaitedForReplication; - return repl::ReplicationCoordinator::StatusAndDuration(Status::OK(), Milliseconds(0)); - }); - - // Pretend we stepped down. - replCoord->setCanAcceptNonLocalWrites(false); - std::ignore = replCoord->setFollowerMode(repl::MemberState::RS_SECONDARY); - - auto queriesComplete = SemiFuture::makeReady(); - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/); - - ASSERT_THROWS_CODE(cleanupComplete.get(), DBException, ErrorCodes::PrimarySteppedDown); - ASSERT_EQ(numTimesWaitedForReplication, 0); -} - -TEST_F(RangeDeleterTest, RemoveDocumentsInRangeRetriesOnWriteConflictException) { - // Enable fail point to throw WriteConflictException. - globalFailPointRegistry() - .find("throwWriteConflictExceptionInDeleteRange") - ->setMode(FailPoint::nTimes, 3 /* Throw a few times before disabling. */); - - const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10)); - auto queriesComplete = SemiFuture::makeReady(); - - setFilteringMetadataWithUUID(uuid()); - DBDirectClient dbclient(_opCtx); - dbclient.insert(kNss, BSON(kShardKey << 5)); - - // Insert range deletion task for this collection and range. - auto t = insertRangeDeletionTask(_opCtx, uuid(), range); - - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */); - - cleanupComplete.get(); - - ASSERT_EQUALS(dbclient.count(kNss, BSONObj()), 0); -} - -TEST_F(RangeDeleterTest, RemoveDocumentsInRangeRetriesOnUnexpectedError) { - // Enable fail point to throw InternalError. - globalFailPointRegistry() - .find("throwInternalErrorInDeleteRange") - ->setMode(FailPoint::nTimes, 3 /* Throw a few times before disabling. */); - - const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10)); - auto queriesComplete = SemiFuture::makeReady(); - - setFilteringMetadataWithUUID(uuid()); - DBDirectClient dbclient(_opCtx); - dbclient.insert(kNss, BSON(kShardKey << 5)); - - // Insert range deletion task for this collection and range. - auto t = insertRangeDeletionTask(_opCtx, uuid(), range); - - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */); - - cleanupComplete.get(); - - ASSERT_EQUALS(dbclient.count(kNss, BSONObj()), 0); -} - - -TEST_F(RangeDeleterTest, RemoveDocumentsInRangeRespectsDelayInBetweenBatches) { - const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10)); - // More documents than the batch size. - const auto numDocsToInsert = 3; - const auto numDocsToRemovePerBatch = 1; - rangeDeleterBatchSize.store(numDocsToRemovePerBatch); - - auto queriesComplete = SemiFuture::makeReady(); - // Insert documents in range. - setFilteringMetadataWithUUID(uuid()); - auto task = insertRangeDeletionTask(_opCtx, uuid(), range, numDocsToInsert); - DBDirectClient dbclient(_opCtx); - for (auto i = 0; i < numDocsToInsert; ++i) { - dbclient.insert(kNss, BSON(kShardKey << i)); - } - - // The deletion of a document in unit tests with ephemeral storage engine is usually - // extremely fast (less than 5ms), so setting the delay to 1 second ensures the test - // is relevant: it is very improbable for a deletion to last so much, even on slow - // machines. - const auto delayBetweenBatchesMS = 1000 /* 1 second */; - rangeDeleterBatchDelayMS.store(delayBetweenBatchesMS); - - auto beforeRangeDeletion = Date_t::now(); - - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */); - - cleanupComplete.get(); - auto afterRangeDeletion = Date_t::now(); - auto rangeDeletionTimeMS = - afterRangeDeletion.toMillisSinceEpoch() - beforeRangeDeletion.toMillisSinceEpoch(); - ASSERT(rangeDeletionTimeMS >= delayBetweenBatchesMS * numDocsToInsert); - ASSERT_EQUALS(dbclient.count(kNss, BSONObj()), 0); -} - -TEST_F(RangeDeleterTest, RemoveDocumentsInRangeRespectsOrphanCleanupDelay) { - const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10)); - // More documents than the batch size. - const auto numDocsToInsert = 3; - const auto orphanCleanupDelay = Seconds(10); - auto queriesComplete = SemiFuture::makeReady(); - - // Insert documents in range. - setFilteringMetadataWithUUID(uuid()); - auto task = insertRangeDeletionTask(_opCtx, uuid(), range, numDocsToInsert); - DBDirectClient dbclient(_opCtx); - for (auto i = 0; i < numDocsToInsert; ++i) { - dbclient.insert(kNss, BSON(kShardKey << i)); - } - - auto cleanupComplete = removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - orphanCleanupDelay); - - // A best-effort check that cleanup has not completed without advancing the clock. - sleepsecs(1); - ASSERT_FALSE(cleanupComplete.isReady()); - - // Advance the time past the delay until cleanup is complete. This cannot be made exact because - // there's no way to tell when the sleep operation gets hit exactly, so instead we incrementally - // advance time until it's ready. - while (!cleanupComplete.isReady()) { - executor::NetworkInterfaceMock::InNetworkGuard guard(network()); - network()->advanceTime(network()->now() + orphanCleanupDelay); - } - - cleanupComplete.get(); - - ASSERT_EQUALS(dbclient.count(kNss, BSONObj()), 0); -} - -TEST_F(RangeDeleterTest, RemoveDocumentsInRangeRemovesRangeDeletionTaskOnSuccess) { - const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10)); - auto queriesComplete = SemiFuture::makeReady(); - - setFilteringMetadataWithUUID(uuid()); - DBDirectClient dbclient(_opCtx); - dbclient.insert(kNss, BSON(kShardKey << 5)); - - // Insert range deletion task for this collection and range. - auto t = insertRangeDeletionTask(_opCtx, uuid(), range); - - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */); - - cleanupComplete.get(); - // Document should have been deleted. - PersistentTaskStore store(NamespaceString::kRangeDeletionNamespace); - ASSERT_EQUALS(countDocsInConfigRangeDeletions(store, _opCtx), 0); -} - -TEST_F(RangeDeleterTest, - RemoveDocumentsInRangeRemovesRangeDeletionTaskOnCollectionDroppedErrorWhenStillPrimary) { - const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10)); - auto queriesComplete = SemiFuture::makeReady(); - - DBDirectClient dbclient(_opCtx); - dbclient.insert(kNss, BSON(kShardKey << 5)); - - // Insert range deletion task for this collection and range. - auto t = insertRangeDeletionTask(_opCtx, uuid(), range); - - dbclient.dropCollection(kNss); - - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */); - - ASSERT_THROWS_CODE(cleanupComplete.get(), - DBException, - ErrorCodes::RangeDeletionAbandonedBecauseCollectionWithUUIDDoesNotExist); - - // Document should have been deleted. - PersistentTaskStore store(NamespaceString::kRangeDeletionNamespace); - ASSERT_EQUALS(countDocsInConfigRangeDeletions(store, _opCtx), 0); -} - -TEST_F(RangeDeleterTest, - RemoveDocumentsInRangeDoesNotRemoveRangeDeletionTaskOnErrorWhenNotStillPrimary) { - const ChunkRange range(BSON(kShardKey << 0), BSON(kShardKey << 10)); - auto queriesComplete = SemiFuture::makeReady(); - - setFilteringMetadataWithUUID(uuid()); - DBDirectClient dbclient(_opCtx); - dbclient.insert(kNss, BSON(kShardKey << 5)); - - // Insert range deletion task for this collection and range. - auto t = insertRangeDeletionTask(_opCtx, uuid(), range); - - // Pretend we stepped down. - auto replCoord = checked_cast( - repl::ReplicationCoordinator::get(getServiceContext())); - replCoord->setCanAcceptNonLocalWrites(false); - std::ignore = replCoord->setFollowerMode(repl::MemberState::RS_SECONDARY); - - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */); - - ASSERT_THROWS_CODE(cleanupComplete.get(), DBException, ErrorCodes::PrimarySteppedDown); - - // Pretend we stepped back up so we can read the task store. - replCoord->setCanAcceptNonLocalWrites(true); - std::ignore = replCoord->setFollowerMode(repl::MemberState::RS_PRIMARY); - - // Document should not have been deleted. - PersistentTaskStore store(NamespaceString::kRangeDeletionNamespace); - ASSERT_EQUALS(countDocsInConfigRangeDeletions(store, _opCtx), 1); -} - -// The input future should never have an error. -DEATH_TEST_F(RangeDeleterTest, RemoveDocumentsInRangeCrashesIfInputFutureHasError, "invariant") { - const ChunkRange range = ChunkRange(BSON(kShardKey << 0), BSON(kShardKey << 10)); - DBDirectClient dbclient(_opCtx); - dbclient.insert(kNss, BSON(kShardKey << 5)); - - // Insert range deletion task for this collection and range. - auto t = insertRangeDeletionTask(_opCtx, uuid(), range); - - auto queriesCompletePf = makePromiseFuture(); - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move((queriesCompletePf.future)).semi(), - kNss, - uuid(), - kShardKeyPattern, - range, - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete */); - - - // Should cause an invariant failure. - queriesCompletePf.promise.setError(Status(ErrorCodes::InternalError, "Some unexpected error")); - cleanupComplete.get(); -} - -TEST_F(RangeDeleterTest, RemoveDocumentsInRangeDoesNotCrashWhenShardKeyIndexDoesNotExist) { - auto queriesComplete = SemiFuture::makeReady(); - const std::string kNoShardKeyIndexMsg("Unable to find shard key index for"); - auto logCountBefore = countTextFormatLogLinesContaining(kNoShardKeyIndexMsg); - - auto cleanupComplete = - removeDocumentsInRange(executor(), - std::move(queriesComplete), - kNss, - uuid(), - BSON("x" << 1) /* shard key pattern */, - ChunkRange(BSON("x" << 0), BSON("x" << 10)), - Seconds(0) /* delayForActiveQueriesOnSecondariesToComplete*/); - - // Range deleter will keep on retrying when it encounters non-stepdown errors. Make it run - // a few iterations and then create the index to make it exit the retry loop. - while (countTextFormatLogLinesContaining(kNoShardKeyIndexMsg) < logCountBefore) { - sleepmicros(100); - } - - DBDirectClient client(_opCtx); - client.createIndex(kNss, BSON("x" << 1)); - - cleanupComplete.get(); -} - /** * Tests that the rename range deletion flow: * - Renames range deletions from source to target collection @@ -878,7 +264,8 @@ TEST_F(RenameRangeDeletionsTest, BasicRenameRangeDeletionsTest) { restoreRangeDeletionTasksForRename(_opCtx, kToNss); deleteRangeDeletionTasksForRename(_opCtx, kNss, kToNss); - const auto targetRangeDeletionsQuery = BSON(RangeDeletionTask::kNssFieldName << kToNss.ns()); + const auto targetRangeDeletionsQuery = + BSON(RangeDeletionTask::kNssFieldName << kToNss.ns_forTest()); // Make sure range deletions for the TO collection are found ASSERT_EQ(10, rangeDeletionsStore.count(_opCtx, targetRangeDeletionsQuery)); @@ -940,7 +327,8 @@ TEST_F(RenameRangeDeletionsTest, IdempotentRenameRangeDeletionsTest) { deleteRangeDeletionTasksForRename(_opCtx, kNss, kToNss); } - const auto targetRangeDeletionsQuery = BSON(RangeDeletionTask::kNssFieldName << kToNss.ns()); + const auto targetRangeDeletionsQuery = + BSON(RangeDeletionTask::kNssFieldName << kToNss.ns_forTest()); // Make sure range deletions for the TO collection are found ASSERT_EQ(10, rangeDeletionsStore.count(_opCtx, targetRangeDeletionsQuery)); diff --git a/src/mongo/db/s/read_only_catalog_cache_loader.cpp b/src/mongo/db/s/read_only_catalog_cache_loader.cpp index 7744a51704896..0747ba404257f 100644 --- a/src/mongo/db/s/read_only_catalog_cache_loader.cpp +++ b/src/mongo/db/s/read_only_catalog_cache_loader.cpp @@ -27,9 +27,8 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/s/read_only_catalog_cache_loader.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/s/read_only_catalog_cache_loader.h b/src/mongo/db/s/read_only_catalog_cache_loader.h index a0b10834f4c9d..af457b178af58 100644 --- a/src/mongo/db/s/read_only_catalog_cache_loader.h +++ b/src/mongo/db/s/read_only_catalog_cache_loader.h @@ -29,7 +29,14 @@ #pragma once +#include "mongo/base/string_data.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog_cache_loader.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/config_server_catalog_cache_loader.h" +#include "mongo/util/future.h" namespace mongo { @@ -46,8 +53,10 @@ class ReadOnlyCatalogCacheLoader final : public CatalogCacheLoader { void initializeReplicaSetRole(bool isPrimary) override {} void onStepDown() override {} void onStepUp() override {} + void onReplicationRollback() override {} void shutDown() override; - void notifyOfCollectionPlacementVersionUpdate(const NamespaceString& nss) override {} + void notifyOfCollectionRefreshEndMarkerSeen(const NamespaceString& nss, + const Timestamp& commitTime) override {} void waitForCollectionFlush(OperationContext* opCtx, const NamespaceString& nss) override; void waitForDatabaseFlush(OperationContext* opCtx, StringData dbName) override; diff --git a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp index 7c8f41ed1b163..0bd6a01adf93d 100644 --- a/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp +++ b/src/mongo/db/s/refine_collection_shard_key_coordinator.cpp @@ -30,17 +30,48 @@ #include "mongo/db/s/refine_collection_shard_key_coordinator.h" -#include "mongo/db/catalog/collection_uuid_mismatch.h" +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/shard_key_util.h" #include "mongo/db/s/sharding_ddl_util.h" -#include "mongo/logv2/log.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/idl/idl_parser.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -56,30 +87,30 @@ void notifyChangeStreamsOnRefineCollectionShardKeyComplete(OperationContext* opC const UUID& collUUID) { const std::string oMessage = str::stream() - << "Refine shard key for collection " << collNss << " with " << shardKey.toString(); + << "Refine shard key for collection " << NamespaceStringUtil::serialize(collNss) << " with " + << shardKey.toString(); BSONObjBuilder cmdBuilder; - cmdBuilder.append("refineCollectionShardKey", collNss.ns()); + cmdBuilder.append("refineCollectionShardKey", NamespaceStringUtil::serialize(collNss)); cmdBuilder.append("shardKey", shardKey.toBSON()); cmdBuilder.append("oldShardKey", oldShardKey.toBSON()); auto const serviceContext = opCtx->getClient()->getServiceContext(); - writeConflictRetry( - opCtx, "RefineCollectionShardKey", NamespaceString::kRsOplogNamespace.ns(), [&] { - AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); - WriteUnitOfWork uow(opCtx); - serviceContext->getOpObserver()->onInternalOpMessage(opCtx, - collNss, - collUUID, - BSON("msg" << oMessage), - cmdBuilder.obj(), - boost::none, - boost::none, - boost::none, - boost::none); - uow.commit(); - }); + writeConflictRetry(opCtx, "RefineCollectionShardKey", NamespaceString::kRsOplogNamespace, [&] { + AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); + WriteUnitOfWork uow(opCtx); + serviceContext->getOpObserver()->onInternalOpMessage(opCtx, + collNss, + collUUID, + BSON("msg" << oMessage), + cmdBuilder.obj(), + boost::none, + boost::none, + boost::none, + boost::none); + uow.commit(); + }); } } // namespace @@ -157,14 +188,6 @@ ExecutorFuture RefineCollectionShardKeyCoordinator::_runImpl( uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(std::move(cmdResponse))); })) - .onError([this, anchor = shared_from_this()](const Status& status) { - LOGV2_ERROR(5277700, - "Error running refine collection shard key", - logAttrs(nss()), - "error"_attr = redact(status)); - - return status; - }) .onCompletion([this, anchor = shared_from_this()](const Status& status) { auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); diff --git a/src/mongo/db/s/refine_collection_shard_key_coordinator.h b/src/mongo/db/s/refine_collection_shard_key_coordinator.h index c461383e87622..1e1b22cd47c2d 100644 --- a/src/mongo/db/s/refine_collection_shard_key_coordinator.h +++ b/src/mongo/db/s/refine_collection_shard_key_coordinator.h @@ -29,9 +29,25 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/keypattern.h" +#include "mongo/db/ops/write_ops.h" #include "mongo/db/s/refine_collection_shard_key_coordinator_document_gen.h" #include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/refresh_query_analyzer_configuration_cmd.cpp b/src/mongo/db/s/refresh_query_analyzer_configuration_cmd.cpp index 0efae360f4814..8d8da5ae0945c 100644 --- a/src/mongo/db/s/refresh_query_analyzer_configuration_cmd.cpp +++ b/src/mongo/db/s/refresh_query_analyzer_configuration_cmd.cpp @@ -27,15 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/query_analysis_coordinator.h" -#include "mongo/logv2/log.h" -#include "mongo/s/analyze_shard_key_feature_flag_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/refresh_query_analyzer_configuration_cmd_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -66,7 +78,7 @@ class RefreshQueryAnalyzerConfigurationCmd uassert( ErrorCodes::IllegalOperation, "_refreshQueryAnalyzerConfiguration command is not supported on a shardsvr mongod", - !serverGlobalParams.clusterRole.exclusivelyHasShardRole()); + !serverGlobalParams.clusterRole.hasExclusively(ClusterRole::ShardServer)); auto coodinator = analyze_shard_key::QueryAnalysisCoordinator::get(opCtx); auto configurations = coodinator->getNewConfigurationsForSampler( @@ -87,8 +99,9 @@ class RefreshQueryAnalyzerConfigurationCmd uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; @@ -107,10 +120,7 @@ class RefreshQueryAnalyzerConfigurationCmd std::string help() const override { return "Refreshes the query analyzer configurations for all collections."; } -}; - -MONGO_REGISTER_FEATURE_FLAGGED_COMMAND(RefreshQueryAnalyzerConfigurationCmd, - analyze_shard_key::gFeatureFlagAnalyzeShardKey); +} refreshQueryAnalyzerConfigurationCmd; } // namespace diff --git a/src/mongo/db/s/rename_collection_coordinator.cpp b/src/mongo/db/s/rename_collection_coordinator.cpp index 8610f11aa14d8..f997e4eb78276 100644 --- a/src/mongo/db/s/rename_collection_coordinator.cpp +++ b/src/mongo/db/s/rename_collection_coordinator.cpp @@ -28,31 +28,84 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/rename_collection_coordinator.h" - +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_uuid_mismatch.h" -#include "mongo/db/catalog/database_holder.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/ops/insert.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/persistent_task_store.h" -#include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/query/distinct_command_gen.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/s/forwardable_operation_metadata.h" +#include "mongo/db/s/rename_collection_coordinator.h" #include "mongo/db/s/sharded_index_catalog_commands_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator.h" #include "mongo/db/s/sharding_ddl_util.h" -#include "mongo/db/s/sharding_index_catalog_ddl_util.h" #include "mongo/db/s/sharding_logging.h" #include "mongo/db/s/sharding_recovery_service.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/transaction/transaction_api.h" #include "mongo/db/vector_clock.h" +#include "mongo/db/vector_clock_mutable.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/async_rpc.h" +#include "mongo/executor/async_rpc_util.h" +#include "mongo/executor/task_executor.h" #include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/analyze_shard_key_documents_gen.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/s/catalog/type_namespace_placement_gen.h" +#include "mongo/s/catalog/type_tags.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" -#include "mongo/s/index_version.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -84,7 +137,7 @@ boost::optional getCollectionUUID(OperationContext* opCtx, } uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection " << nss << " doesn't exist.", + str::stream() << "Collection " << nss.toStringForErrorMsg() << " doesn't exist.", collPtr); return collPtr->uuid(); @@ -95,18 +148,18 @@ void renameIndexMetadataInShards(OperationContext* opCtx, const RenameCollectionRequest& request, const OperationSessionInfo& osi, const std::shared_ptr& executor, - RenameCollectionCoordinatorDocument* doc) { + RenameCollectionCoordinatorDocument* doc, + const CancellationToken& token) { const auto [configTime, newIndexVersion] = [opCtx]() -> std::pair { VectorClock::VectorTime vt = VectorClock::get(opCtx)->getTime(); return {vt.configTime(), vt.clusterTime().asTimestamp()}; }(); - // Bump the index version only if there are indexes in the source - // collection. + // Bump the index version only if there are indexes in the source collection. auto optShardedCollInfo = doc->getOptShardedCollInfo(); if (optShardedCollInfo && optShardedCollInfo->getIndexVersion()) { - // Bump sharding catalog's index version on the config server if the source - // collection is sharded. It will be updated later on. + // Bump sharding catalog's index version on the config server if the source collection is + // sharded. It will be updated later on. optShardedCollInfo->setIndexVersion({optShardedCollInfo->getUuid(), newIndexVersion}); doc->setOptShardedCollInfo(optShardedCollInfo); } @@ -117,14 +170,354 @@ void renameIndexMetadataInShards(OperationContext* opCtx, auto participants = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx); ShardsvrRenameIndexMetadata renameIndexCatalogReq( nss, toNss, {doc->getSourceUUID().value(), newIndexVersion}); - const auto renameIndexCatalogCmdObj = - CommandHelpers::appendMajorityWriteConcern(renameIndexCatalogReq.toBSON({})); - sharding_ddl_util::sendAuthenticatedCommandToShards( + renameIndexCatalogReq.setDbName(toNss.dbName()); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + async_rpc::AsyncRPCCommandHelpers::appendOSI(args, osi); + auto opts = std::make_shared>( + renameIndexCatalogReq, executor, token, args); + sharding_ddl_util::sendAuthenticatedCommandToShards(opCtx, opts, participants); +} + +std::vector getLatestCollectionPlacementInfoFor(OperationContext* opCtx, + const NamespaceString& nss, + const UUID& uuid) { + // Use the content of config.chunks to obtain the placement of the collection being renamed. + // The request is equivalent to 'configDb.chunks.distinct("shard", {uuid:collectionUuid})'. + auto query = BSON(NamespacePlacementType::kNssFieldName << NamespaceStringUtil::serialize(nss)); + + auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); + + + DistinctCommandRequest distinctRequest(ChunkType::ConfigNS); + distinctRequest.setKey(ChunkType::shard.name()); + distinctRequest.setQuery(BSON(ChunkType::collectionUUID.name() << uuid)); + auto rc = BSON(repl::ReadConcernArgs::kReadConcernFieldName << repl::ReadConcernArgs::kLocal); + + auto reply = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts( opCtx, - toNss.db(), - renameIndexCatalogCmdObj.addFields(osi.toBSON()), - participants, - executor); + ReadPreferenceSetting(ReadPreference::PrimaryOnly, TagSet{}), + DatabaseName::kConfig.toString(), + distinctRequest.toBSON({rc}), + Shard::RetryPolicy::kIdempotent)); + + uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(reply)); + std::vector shardIds; + for (const auto& valueElement : reply.response.getField("values").Array()) { + shardIds.emplace_back(valueElement.String()); + } + + return shardIds; +} + +SemiFuture noOpStatement() { + BatchedCommandResponse noOpResponse; + noOpResponse.setStatus(Status::OK()); + noOpResponse.setN(0); + return SemiFuture(std::move(noOpResponse)); +} + +SemiFuture deleteShardedCollectionStatement( + const txn_api::TransactionClient& txnClient, + const NamespaceString& nss, + const boost::optional& uuid, + int stmtId) { + + if (uuid) { + const auto deleteCollectionQuery = + BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss) + << CollectionType::kUuidFieldName << *uuid); + + write_ops::DeleteCommandRequest deleteOp(CollectionType::ConfigNS); + deleteOp.setDeletes({[&]() { + write_ops::DeleteOpEntry entry; + entry.setMulti(false); + entry.setQ(deleteCollectionQuery); + return entry; + }()}); + + return txnClient.runCRUDOp(deleteOp, {stmtId}); + } else { + return noOpStatement(); + } +} + +SemiFuture renameShardedCollectionStatement( + const txn_api::TransactionClient& txnClient, + const CollectionType& oldCollection, + const NamespaceString& newNss, + const Timestamp& timeInsertion, + int stmtId) { + auto newCollectionType = oldCollection; + newCollectionType.setNss(newNss); + newCollectionType.setTimestamp(timeInsertion); + newCollectionType.setEpoch(OID::gen()); + + // Implemented as an upsert to be idempotent + auto query = BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(newNss)); + write_ops::UpdateCommandRequest updateOp(CollectionType::ConfigNS); + updateOp.setUpdates({[&] { + write_ops::UpdateOpEntry entry; + entry.setQ(query); + entry.setU( + write_ops::UpdateModification::parseFromClassicUpdate(newCollectionType.toBSON())); + entry.setUpsert(true); + entry.setMulti(false); + return entry; + }()}); + + return txnClient.runCRUDOp(updateOp, {stmtId} /*stmtIds*/); +} + +SemiFuture insertToPlacementHistoryStatement( + const txn_api::TransactionClient& txnClient, + const NamespaceString& nss, + const boost::optional& uuid, + const Timestamp& clusterTime, + const std::vector& shards, + int stmtId, + const BatchedCommandResponse& previousOperationResult) { + + // Skip the insertion of the placement entry if the previous statement didn't change any + // document - we can deduce that the whole transaction was already committed in a previous + // attempt. + if (previousOperationResult.getN() == 0) { + return noOpStatement(); + } + + NamespacePlacementType placementInfo(NamespaceString(nss), clusterTime, shards); + if (uuid) + placementInfo.setUuid(*uuid); + write_ops::InsertCommandRequest insertPlacementEntry( + NamespaceString::kConfigsvrPlacementHistoryNamespace, {placementInfo.toBSON()}); + + return txnClient.runCRUDOp(insertPlacementEntry, {stmtId} /*stmtIds*/); +} + + +SemiFuture updateZonesStatement(const txn_api::TransactionClient& txnClient, + const NamespaceString& oldNss, + const NamespaceString& newNss) { + + const auto query = BSON(TagsType::ns(NamespaceStringUtil::serialize(oldNss))); + const auto update = BSON("$set" << BSON(TagsType::ns(NamespaceStringUtil::serialize(newNss)))); + + BatchedCommandRequest request([&] { + write_ops::UpdateCommandRequest updateOp(TagsType::ConfigNS); + updateOp.setUpdates({[&] { + write_ops::UpdateOpEntry entry; + entry.setQ(query); + entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(update)); + entry.setUpsert(false); + entry.setMulti(true); + return entry; + }()}); + return updateOp; + }()); + return txnClient.runCRUDOp(request, {-1} /*stmtIds*/); +} + +SemiFuture deleteZonesStatement(const txn_api::TransactionClient& txnClient, + const NamespaceString& nss) { + + const auto query = BSON(TagsType::ns(NamespaceStringUtil::serialize(nss))); + const auto hint = BSON(TagsType::ns() << 1 << TagsType::min() << 1); + + BatchedCommandRequest request([&] { + write_ops::DeleteCommandRequest deleteOp(TagsType::ConfigNS); + deleteOp.setDeletes({[&] { + write_ops::DeleteOpEntry entry; + entry.setQ(query); + entry.setMulti(true); + entry.setHint(hint); + return entry; + }()}); + return deleteOp; + }()); + + return txnClient.runCRUDOp(request, {-1}); +} + +SemiFuture deleteShardingIndexCatalogMetadataStatement( + const txn_api::TransactionClient& txnClient, const boost::optional& uuid) { + if (uuid) { + // delete index catalog metadata + BatchedCommandRequest request([&] { + write_ops::DeleteCommandRequest deleteOp( + NamespaceString::kConfigsvrIndexCatalogNamespace); + deleteOp.setDeletes({[&] { + write_ops::DeleteOpEntry entry; + entry.setQ(BSON(IndexCatalogType::kCollectionUUIDFieldName << *uuid)); + entry.setMulti(true); + return entry; + }()}); + return deleteOp; + }()); + + return txnClient.runCRUDOp(request, {-1}); + } else { + return noOpStatement(); + } +} + + +void renameCollectionMetadataInTransaction(OperationContext* opCtx, + const boost::optional& optFromCollType, + const NamespaceString& fromNss, + const NamespaceString& toNss, + const boost::optional& droppedTargetUUID, + const WriteConcernOptions& writeConcern, + const std::shared_ptr& executor, + const OperationSessionInfo& osi) { + + std::string logMsg = str::stream() << fromNss.ns() << " to " << toNss.ns(); + if (optFromCollType) { + // Case sharded FROM collection + auto fromUUID = optFromCollType->getUuid(); + + // Every statement in the transaction runs under the same clusterTime. To ensure in the + // placementHistory the drop of the target will appear earlier then the insert of the target + // we forcely add a tick to have 2 valid timestamp that we can use to differentiate the 2 + // operations. + auto now = VectorClock::get(opCtx)->getTime(); + auto nowClusterTime = now.clusterTime(); + auto timeDrop = nowClusterTime.asTimestamp(); + + nowClusterTime.addTicks(1); + auto timeInsert = nowClusterTime.asTimestamp(); + + // Retrieve the latest placement information about "FROM". + auto fromNssShards = getLatestCollectionPlacementInfoFor(opCtx, fromNss, fromUUID); + + auto transactionChain = [&](const txn_api::TransactionClient& txnClient, + ExecutorPtr txnExec) { + // Remove config.collection entry. Query by 'ns' AND 'uuid' so that the remove can be + // resolved with an IXSCAN (thanks to the index on '_id') and is idempotent (thanks to + // the 'uuid') delete TO collection if exists. + return deleteShardedCollectionStatement(txnClient, toNss, droppedTargetUUID, 1) + .thenRunOn(txnExec) + .then([&](const BatchedCommandResponse& deleteCollResponse) { + uassertStatusOK(deleteCollResponse.toStatus()); + + return insertToPlacementHistoryStatement( + txnClient, toNss, droppedTargetUUID, timeDrop, {}, 2, deleteCollResponse); + }) + .thenRunOn(txnExec) + .then([&](const BatchedCommandResponse& response) { + uassertStatusOK(response.toStatus()); + + return deleteShardingIndexCatalogMetadataStatement(txnClient, + droppedTargetUUID); + }) + // Delete "FROM" collection + .thenRunOn(txnExec) + .then([&](const BatchedCommandResponse& response) { + uassertStatusOK(response.toStatus()); + return deleteShardedCollectionStatement(txnClient, fromNss, fromUUID, 3); + }) + .thenRunOn(txnExec) + .then([&](const BatchedCommandResponse& deleteCollResponse) { + uassertStatusOK(deleteCollResponse.toStatus()); + + return insertToPlacementHistoryStatement( + txnClient, fromNss, fromUUID, timeDrop, {}, 4, deleteCollResponse); + }) + .thenRunOn(txnExec) + .then([&](const BatchedCommandResponse& deleteCollResponse) { + uassertStatusOK(deleteCollResponse.toStatus()); + // Use the modified entries to insert collection and placement entries for "TO". + return renameShardedCollectionStatement( + txnClient, *optFromCollType, toNss, timeInsert, 5); + }) + .thenRunOn(txnExec) + .then([&](const BatchedCommandResponse& upsertCollResponse) { + uassertStatusOK(upsertCollResponse.toStatus()); + + return insertToPlacementHistoryStatement(txnClient, + toNss, + fromUUID, + timeInsert, + fromNssShards, + 6, + upsertCollResponse); + }) + // update tags and check it was successful + .thenRunOn(txnExec) + .then([&](const BatchedCommandResponse& insertCollResponse) { + uassertStatusOK(insertCollResponse.toStatus()); + + return updateZonesStatement(txnClient, fromNss, toNss); + }) + .thenRunOn(txnExec) + .then([&](const BatchedCommandResponse& response) { + uassertStatusOK(response.toStatus()); + }) + .semi(); + }; + const bool useClusterTransaction = true; + sharding_ddl_util::runTransactionOnShardingCatalog( + opCtx, std::move(transactionChain), writeConcern, osi, useClusterTransaction, executor); + + ShardingLogging::get(opCtx)->logChange( + opCtx, + "renameCollection.metadata", + str::stream() << logMsg << ": dropped target collection and renamed source collection", + BSON("newCollMetadata" << optFromCollType->toBSON()), + ShardingCatalogClient::kMajorityWriteConcern, + Grid::get(opCtx)->shardRegistry()->getConfigShard(), + Grid::get(opCtx)->catalogClient()); + } else { + // Case unsharded FROM collection : just delete the target collection if sharded + auto now = VectorClock::get(opCtx)->getTime(); + auto newTimestamp = now.clusterTime().asTimestamp(); + + auto transactionChain = [&](const txn_api::TransactionClient& txnClient, + ExecutorPtr txnExec) { + return deleteShardedCollectionStatement(txnClient, toNss, droppedTargetUUID, 1) + .thenRunOn(txnExec) + .then([&](const BatchedCommandResponse& deleteCollResponse) { + uassertStatusOK(deleteCollResponse.toStatus()); + return insertToPlacementHistoryStatement(txnClient, + toNss, + droppedTargetUUID, + newTimestamp, + {}, + 2, + deleteCollResponse); + }) + .thenRunOn(txnExec) + .then([&](const BatchedCommandResponse& response) { + uassertStatusOK(response.toStatus()); + + return deleteShardingIndexCatalogMetadataStatement(txnClient, + droppedTargetUUID); + }) + .thenRunOn(txnExec) + .then([&](const BatchedCommandResponse& response) { + uassertStatusOK(response.toStatus()); + + return deleteZonesStatement(txnClient, toNss); + }) + .thenRunOn(txnExec) + .then([&](const BatchedCommandResponse& response) { + uassertStatusOK(response.toStatus()); + }) + .semi(); + }; + + const bool useClusterTransaction = true; + sharding_ddl_util::runTransactionOnShardingCatalog( + opCtx, std::move(transactionChain), writeConcern, osi, useClusterTransaction, executor); + + ShardingLogging::get(opCtx)->logChange(opCtx, + "renameCollection.metadata", + str::stream() + << logMsg << " : dropped target collection.", + BSONObj(), + ShardingCatalogClient::kMajorityWriteConcern, + Grid::get(opCtx)->shardRegistry()->getConfigShard(), + Grid::get(opCtx)->catalogClient()); + } } } // namespace @@ -141,14 +534,15 @@ void RenameCollectionCoordinator::checkIfOptionsConflict(const BSONObj& doc) con const auto& otherReq = otherDoc.getRenameCollectionRequest().toBSON(); uassert(ErrorCodes::ConflictingOperationInProgress, - str::stream() << "Another rename collection for namespace " << originalNss() + str::stream() << "Another rename collection for namespace " + << originalNss().toStringForErrorMsg() << " is being executed with different parameters: " << selfReq, SimpleBSONObjComparator::kInstance.evaluate(selfReq == otherReq)); } -std::vector RenameCollectionCoordinator::_acquireAdditionalLocks( +std::set RenameCollectionCoordinator::_getAdditionalLocksToAcquire( OperationContext* opCtx) { - return {_request.getTo().ns()}; + return {_request.getTo()}; } void RenameCollectionCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBuilder) const { @@ -173,28 +567,12 @@ ExecutorFuture RenameCollectionCoordinator::_runImpl( sharding_ddl_util::getCriticalSectionReasonForRename(fromNss, toNss); try { - uassert(ErrorCodes::IllegalOperation, - "Renaming a timeseries collection is not allowed", - !fromNss.isTimeseriesBucketsCollection()); - - uassert(ErrorCodes::IllegalOperation, - "Renaming to a bucket namespace is not allowed", - !toNss.isTimeseriesBucketsCollection()); - uassert(ErrorCodes::InvalidOptions, "Cannot provide an expected collection UUID when renaming between " "databases", fromNss.db() == toNss.db() || (!_doc.getExpectedSourceUUID() && !_doc.getExpectedTargetUUID())); - uassert(ErrorCodes::IllegalOperation, - "Can't rename a collection in the config database", - !fromNss.isConfigDB()); - - uassert(ErrorCodes::IllegalOperation, - "Can't rename a collection in the admin database", - !fromNss.isAdminDB()); - { AutoGetCollection coll{ opCtx, @@ -205,15 +583,17 @@ ExecutorFuture RenameCollectionCoordinator::_runImpl( .expectedUUID(_doc.getExpectedSourceUUID())}; uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "Can't rename source collection `" << fromNss - << "` because it is a view.", + str::stream() + << "Can't rename source collection `" + << fromNss.toStringForErrorMsg() << "` because it is a view.", !CollectionCatalog::get(opCtx)->lookupView(opCtx, fromNss)); checkCollectionUUIDMismatch( opCtx, fromNss, *coll, _doc.getExpectedSourceUUID()); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection " << fromNss << " doesn't exist.", + str::stream() << "Collection " << fromNss.toStringForErrorMsg() + << " doesn't exist.", coll.getCollection()); uassert(ErrorCodes::IllegalOperation, @@ -231,7 +611,7 @@ ExecutorFuture RenameCollectionCoordinator::_runImpl( uassert(ErrorCodes::CommandFailed, str::stream() << "Source and destination collections must be on " "the same database because " - << fromNss << " is sharded.", + << fromNss.toStringForErrorMsg() << " is sharded.", fromNss.db() == toNss.db()); _doc.setOptShardedCollInfo(optSourceCollType); } else if (fromNss.db() != toNss.db()) { @@ -264,7 +644,8 @@ ExecutorFuture RenameCollectionCoordinator::_runImpl( // Make sure the target namespace is not a view uassert(ErrorCodes::NamespaceExists, - str::stream() << "a view already exists with that name: " << toNss, + str::stream() << "a view already exists with that name: " + << toNss.toStringForErrorMsg(), !CollectionCatalog::get(opCtx)->lookupView(opCtx, toNss)); if (CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, @@ -322,128 +703,122 @@ ExecutorFuture RenameCollectionCoordinator::_runImpl( ShardingLogging::get(opCtx)->logChange( opCtx, "renameCollection.start", - fromNss.ns(), - BSON("source" << fromNss.toString() << "destination" << toNss.toString()), + NamespaceStringUtil::serialize(fromNss), + BSON("source" << NamespaceStringUtil::serialize(fromNss) << "destination" + << NamespaceStringUtil::serialize(toNss)), ShardingCatalogClient::kMajorityWriteConcern); // Block migrations on involved sharded collections if (_doc.getOptShardedCollInfo()) { - sharding_ddl_util::stopMigrations(opCtx, fromNss, _doc.getSourceUUID()); + const auto& osi = getNewSession(opCtx); + sharding_ddl_util::stopMigrations(opCtx, fromNss, _doc.getSourceUUID(), osi); } if (_doc.getTargetIsSharded()) { - sharding_ddl_util::stopMigrations(opCtx, toNss, _doc.getTargetUUID()); + const auto& osi = getNewSession(opCtx); + sharding_ddl_util::stopMigrations(opCtx, toNss, _doc.getTargetUUID(), osi); } })) .then(_buildPhaseHandler( Phase::kBlockCrudAndRename, - [this, executor = executor, anchor = shared_from_this()] { + [this, token, executor = executor, anchor = shared_from_this()] { auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); if (!_firstExecution) { - _updateSession(opCtx); _performNoopRetryableWriteOnAllShardsAndConfigsvr( - opCtx, getCurrentSession(), **executor); + opCtx, getNewSession(opCtx), **executor); } const auto& fromNss = nss(); - _updateSession(opCtx); - const OperationSessionInfo osi = getCurrentSession(); - // On participant shards: - // - Block CRUD on source and target collection in case at least one - // of such collections is currently sharded. + // - Block CRUD on source and target collection in case at least one of such + // collections is currently sharded // - Locally drop the target collection // - Locally rename source to target ShardsvrRenameCollectionParticipant renameCollParticipantRequest( fromNss, _doc.getSourceUUID().value()); - renameCollParticipantRequest.setDbName(fromNss.db()); + renameCollParticipantRequest.setDbName(fromNss.dbName()); renameCollParticipantRequest.setTargetUUID(_doc.getTargetUUID()); renameCollParticipantRequest.setRenameCollectionRequest(_request); - const auto cmdObj = CommandHelpers::appendMajorityWriteConcern( - renameCollParticipantRequest.toBSON({})) - .addFields(osi.toBSON()); - - // We need to send the command to all the shards because both - // movePrimary and moveChunk leave garbage behind for sharded - // collections. - // At the same time, the primary shard needs to be last participant to perfom its - // local rename operation: this will ensure that the op entries generated by the - // collections being renamed/dropped will be generated at points in time where all - // shards have a consistent view of the metadata and no concurrent writes are being - // performed. + + // We need to send the command to all the shards because both movePrimary and + // moveChunk leave garbage behind for sharded collections. At the same time, the + // primary shard needs to be last participant to perfom its local rename operation: + // this will ensure that the op entries generated by the collections being + // renamed/dropped will be generated at points in time where all shards have a + // consistent view of the metadata and no concurrent writes are being performed. const auto primaryShardId = ShardingState::get(opCtx)->shardId(); auto participants = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx); participants.erase( std::remove(participants.begin(), participants.end(), primaryShardId), participants.end()); - sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, fromNss.db(), cmdObj, participants, **executor); - - sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, fromNss.db(), cmdObj, {primaryShardId}, **executor); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + async_rpc::AsyncRPCCommandHelpers::appendOSI(args, getNewSession(opCtx)); + auto opts = std::make_shared< + async_rpc::AsyncRPCOptions>( + renameCollParticipantRequest, **executor, token, args); + sharding_ddl_util::sendAuthenticatedCommandToShards(opCtx, opts, participants); + sharding_ddl_util::sendAuthenticatedCommandToShards(opCtx, opts, {primaryShardId}); })) .then(_buildPhaseHandler( Phase::kRenameMetadata, - [this, executor = executor, anchor = shared_from_this()] { + [this, token, executor = executor, anchor = shared_from_this()] { auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); + // Remove the query sampling configuration documents for the source and destination + // collections, if they exist. + sharding_ddl_util::removeQueryAnalyzerMetadataFromConfig( + opCtx, + BSON(analyze_shard_key::QueryAnalyzerDocument::kNsFieldName + << BSON("$in" << BSON_ARRAY( + NamespaceStringUtil::serialize(nss()) + << NamespaceStringUtil::serialize(_request.getTo()))))); + // For an unsharded collection the CSRS server can not verify the targetUUID. // Use the session ID + txnNumber to ensure no stale requests get through. - _updateSession(opCtx); - if (!_firstExecution) { _performNoopRetryableWriteOnAllShardsAndConfigsvr( - opCtx, getCurrentSession(), **executor); + opCtx, getNewSession(opCtx), **executor); } - if (!_isPre63Compatible() && - (_doc.getTargetIsSharded() || _doc.getOptShardedCollInfo())) { + if ((_doc.getTargetIsSharded() || _doc.getOptShardedCollInfo())) { + const auto& osi = getNewSession(opCtx); renameIndexMetadataInShards( - opCtx, nss(), _request, getCurrentSession(), **executor, &_doc); + opCtx, nss(), _request, osi, **executor, &_doc, token); } - ConfigsvrRenameCollectionMetadata req(nss(), _request.getTo()); - req.setOptFromCollection(_doc.getOptShardedCollInfo()); - const auto cmdObj = CommandHelpers::appendMajorityWriteConcern(req.toBSON({})); - const auto& configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - - uassertStatusOK(Shard::CommandResponse::getEffectiveStatus( - configShard->runCommand(opCtx, - ReadPreferenceSetting(ReadPreference::PrimaryOnly), - "admin", - cmdObj.addFields(getCurrentSession().toBSON()), - Shard::RetryPolicy::kIdempotent))); - - // (SERVER-67730) Delete potential orphaned chunk entries from CSRS since - // ConfigsvrRenameCollectionMetadata is not idempotent in case of a CSRS step-down - auto uuid = _doc.getTargetUUID(); - if (uuid) { - auto query = BSON("uuid" << *uuid); - uassertStatusOK(Grid::get(opCtx)->catalogClient()->removeConfigDocuments( - opCtx, - ChunkType::ConfigNS, - query, - ShardingCatalogClient::kMajorityWriteConcern)); - } + const auto& osi = getNewSession(opCtx); + renameCollectionMetadataInTransaction(opCtx, + _doc.getOptShardedCollInfo(), + nss(), + _request.getTo(), + _doc.getTargetUUID(), + ShardingCatalogClient::kMajorityWriteConcern, + **executor, + osi); + + // Checkpoint the configTime to ensure that, in the case of a stepdown, the new + // primary will start-up from a configTime that is inclusive of the renamed + // metadata. + VectorClockMutable::get(opCtx)->waitForDurableConfigTime().get(opCtx); })) .then(_buildPhaseHandler( Phase::kUnblockCRUD, - [this, executor = executor, anchor = shared_from_this()] { + [this, token, executor = executor, anchor = shared_from_this()] { auto opCtxHolder = cc().makeOperationContext(); auto* opCtx = opCtxHolder.get(); getForwardableOpMetadata().setOn(opCtx); if (!_firstExecution) { - _updateSession(opCtx); _performNoopRetryableWriteOnAllShardsAndConfigsvr( - opCtx, getCurrentSession(), **executor); + opCtx, getNewSession(opCtx), **executor); } const auto& fromNss = nss(); @@ -451,52 +826,52 @@ ExecutorFuture RenameCollectionCoordinator::_runImpl( // - Unblock CRUD on participants for both source and destination collections ShardsvrRenameCollectionUnblockParticipant unblockParticipantRequest( fromNss, _doc.getSourceUUID().value()); - unblockParticipantRequest.setDbName(fromNss.db()); + unblockParticipantRequest.setDbName(fromNss.dbName()); unblockParticipantRequest.setRenameCollectionRequest(_request); - auto const cmdObj = CommandHelpers::appendMajorityWriteConcern( - unblockParticipantRequest.toBSON({})); auto participants = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx); - _updateSession(opCtx); - const OperationSessionInfo osi = getCurrentSession(); - - sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, fromNss.db(), cmdObj.addFields(osi.toBSON()), participants, **executor); - })) - .then(_buildPhaseHandler( - Phase::kSetResponse, - [this, anchor = shared_from_this()] { - auto opCtxHolder = cc().makeOperationContext(); - auto* opCtx = opCtxHolder.get(); - getForwardableOpMetadata().setOn(opCtx); - - // Retrieve the new collection version - const auto catalog = Grid::get(opCtx)->catalogCache(); - const auto cri = uassertStatusOK( - catalog->getCollectionRoutingInfoWithRefresh(opCtx, _request.getTo())); - _response = RenameCollectionResponse( - cri.cm.isSharded() ? cri.getCollectionVersion() : ShardVersion::UNSHARDED()); - - ShardingLogging::get(opCtx)->logChange( - opCtx, - "renameCollection.end", - nss().ns(), - BSON("source" << nss().toString() << "destination" - << _request.getTo().toString()), - ShardingCatalogClient::kMajorityWriteConcern); - LOGV2(5460504, "Collection renamed", logAttrs(nss())); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + async_rpc::AsyncRPCCommandHelpers::appendOSI(args, getNewSession(opCtx)); + auto opts = std::make_shared< + async_rpc::AsyncRPCOptions>( + unblockParticipantRequest, **executor, token, args); + sharding_ddl_util::sendAuthenticatedCommandToShards(opCtx, opts, participants); + + // Delete chunks belonging to the previous incarnation of the target collection. + // This is performed after releasing the critical section in order to reduce stalls + // and performed outside of a transaction to prevent timeout. + auto targetUUID = _doc.getTargetUUID(); + if (targetUUID) { + auto query = BSON("uuid" << *targetUUID); + uassertStatusOK(Grid::get(opCtx)->catalogClient()->removeConfigDocuments( + opCtx, + ChunkType::ConfigNS, + query, + ShardingCatalogClient::kMajorityWriteConcern)); + } })) - .onError([this, anchor = shared_from_this()](const Status& status) { - if (!status.isA() && - !status.isA()) { - LOGV2_ERROR(5460505, - "Error running rename collection", - logAttrs(nss()), - "error"_attr = redact(status)); - } - - return status; - }); + .then(_buildPhaseHandler(Phase::kSetResponse, [this, anchor = shared_from_this()] { + auto opCtxHolder = cc().makeOperationContext(); + auto* opCtx = opCtxHolder.get(); + getForwardableOpMetadata().setOn(opCtx); + + // Retrieve the new collection version + const auto catalog = Grid::get(opCtx)->catalogCache(); + const auto cri = uassertStatusOK( + catalog->getCollectionRoutingInfoWithRefresh(opCtx, _request.getTo())); + _response = RenameCollectionResponse(cri.cm.isSharded() ? cri.getCollectionVersion() + : ShardVersion::UNSHARDED()); + + ShardingLogging::get(opCtx)->logChange( + opCtx, + "renameCollection.end", + NamespaceStringUtil::serialize(nss()), + BSON("source" << NamespaceStringUtil::serialize(nss()) << "destination" + << NamespaceStringUtil::serialize(_request.getTo())), + ShardingCatalogClient::kMajorityWriteConcern); + LOGV2(5460504, "Collection renamed", logAttrs(nss())); + })); } } // namespace mongo diff --git a/src/mongo/db/s/rename_collection_coordinator.h b/src/mongo/db/s/rename_collection_coordinator.h index ee67700a9874f..cbc2bb63beddb 100644 --- a/src/mongo/db/s/rename_collection_coordinator.h +++ b/src/mongo/db/s/rename_collection_coordinator.h @@ -29,9 +29,27 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops.h" #include "mongo/db/s/sharded_rename_collection_gen.h" #include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/executor/scoped_task_executor.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" namespace mongo { @@ -67,15 +85,10 @@ class RenameCollectionCoordinator final return _doc.getPhase() >= Phase::kFreezeMigrations; }; - // TODO SERVER-72796: Remove once gGlobalIndexesShardingCatalog is enabled. - bool _isPre63Compatible() const { - return operationType() == DDLCoordinatorTypeEnum::kRenameCollectionPre63Compatible; - } - ExecutorFuture _runImpl(std::shared_ptr executor, const CancellationToken& token) noexcept override; - std::vector _acquireAdditionalLocks(OperationContext* opCtx) override; + std::set _getAdditionalLocksToAcquire(OperationContext* opCtx) override; boost::optional _response; const RenameCollectionRequest _request; diff --git a/src/mongo/db/s/rename_collection_participant_service.cpp b/src/mongo/db/s/rename_collection_participant_service.cpp index 4a5648833ba9d..bd91ad601a651 100644 --- a/src/mongo/db/s/rename_collection_participant_service.cpp +++ b/src/mongo/db/s/rename_collection_participant_service.cpp @@ -29,22 +29,52 @@ #include "mongo/db/s/rename_collection_participant_service.h" +#include +#include +#include +#include +#include +#include +#include + #include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/rename_collection.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/collection_sharding_runtime.h" -#include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/drop_collection_coordinator.h" -#include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/range_deletion_util.h" +#include "mongo/db/s/sharding_ddl_coordinator.h" #include "mongo/db/s/sharding_ddl_util.h" #include "mongo/db/s/sharding_recovery_service.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/vector_clock_mutable.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/grid.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/duration.h" #include "mongo/util/future_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -86,7 +116,7 @@ void renameOrDropTarget(OperationContext* opCtx, return; } uassert(5807602, - str::stream() << "Target collection " << toNss + str::stream() << "Target collection " << toNss.toStringForErrorMsg() << " UUID does not match the provided UUID.", !targetUUID || targetCollPtr->uuid() == *targetUUID); } @@ -99,7 +129,7 @@ void renameOrDropTarget(OperationContext* opCtx, const auto sourceCollPtr = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, fromNss); uassert(ErrorCodes::CommandFailed, - str::stream() << "Source Collection " << fromNss + str::stream() << "Source Collection " << fromNss.toStringForErrorMsg() << " UUID does not match provided uuid.", !sourceCollPtr || sourceCollPtr->uuid() == sourceUUID); } @@ -143,7 +173,7 @@ bool RenameParticipantInstance::hasSameOptions(const BSONObj& participantDoc) { const auto otherDoc = RenameCollectionParticipantDocument::parse( IDLParserContext("RenameCollectionParticipantDocument"), participantDoc); - const auto& selfReq = _doc.getRenameCollectionRequest().toBSON(); + const auto& selfReq = _request.toBSON(); const auto& otherReq = otherDoc.getRenameCollectionRequest().toBSON(); return SimpleBSONObjComparator::kInstance.evaluate(selfReq == otherReq); @@ -161,8 +191,8 @@ boost::optional RenameParticipantInstance::reportForCurrentOp( bob.append("type", "op"); bob.append("desc", "RenameParticipantInstance"); bob.append("op", "command"); - bob.append("ns", fromNss().toString()); - bob.append("to", toNss().toString()); + bob.append("ns", NamespaceStringUtil::serialize(fromNss())); + bob.append("to", NamespaceStringUtil::serialize(toNss())); bob.append("command", cmdBob.obj()); bob.append("currentPhase", _doc.getPhase()); bob.append("active", true); @@ -199,7 +229,7 @@ void RenameParticipantInstance::_enterPhase(Phase newPhase) { } } else { store.update(opCtx.get(), - BSON(StateDoc::kFromNssFieldName << fromNss().ns()), + BSON(StateDoc::kFromNssFieldName << NamespaceStringUtil::serialize(fromNss())), newDoc.toBSON(), WriteConcerns::kMajorityWriteConcernNoTimeout); } @@ -216,7 +246,7 @@ void RenameParticipantInstance::_removeStateDocument(OperationContext* opCtx) { PersistentTaskStore store(NamespaceString::kShardingRenameParticipantsNamespace); store.remove(opCtx, - BSON(StateDoc::kFromNssFieldName << fromNss().ns()), + BSON(StateDoc::kFromNssFieldName << NamespaceStringUtil::serialize(fromNss())), WriteConcerns::kMajorityWriteConcernNoTimeout); _doc = {}; @@ -338,7 +368,7 @@ SemiFuture RenameParticipantInstance::_runImpl( Grid::get(opCtx) ->catalogClient() ->getDatabase(opCtx, - fromNss().dbName().db(), + DatabaseNameUtil::serialize(fromNss().dbName()), repl::ReadConcernLevel::kMajorityReadConcern) .getPrimary(); const auto thisShardId = ShardingState::get(opCtx)->shardId(); @@ -379,6 +409,11 @@ SemiFuture RenameParticipantInstance::_runImpl( return _canUnblockCRUDPromise.getFuture(); } + // Checkpoint the vector clock to ensure causality in the event of a crash or shutdown. + auto opCtxHolder = cc().makeOperationContext(); + auto* opCtx = opCtxHolder.get(); + VectorClockMutable::get(opCtx)->waitForDurableConfigTime().get(opCtx); + return SemiFuture::makeReady().share(); }) .then(_buildPhaseHandler( @@ -394,10 +429,10 @@ SemiFuture RenameParticipantInstance::_runImpl( // migration. It is not needed for the source collection because no migration can // start until it first becomes sharded, which cannot happen until the DDLLock is // released. - const auto reason = - BSON("command" - << "rename" - << "from" << fromNss().toString() << "to" << toNss().toString()); + const auto reason = BSON("command" + << "rename" + << "from" << NamespaceStringUtil::serialize(fromNss()) + << "to" << NamespaceStringUtil::serialize(toNss())); auto service = ShardingRecoveryService::get(opCtx); service->releaseRecoverableCriticalSection( opCtx, fromNss(), reason, ShardingCatalogClient::kLocalWriteConcern); diff --git a/src/mongo/db/s/rename_collection_participant_service.h b/src/mongo/db/s/rename_collection_participant_service.h index 47627d0d5d765..3ab08bc787a55 100644 --- a/src/mongo/db/s/rename_collection_participant_service.h +++ b/src/mongo/db/s/rename_collection_participant_service.h @@ -29,9 +29,33 @@ #pragma once +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/s/sharded_rename_collection_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/platform/mutex.h" +#include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -86,7 +110,8 @@ class RenameParticipantInstance explicit RenameParticipantInstance(const BSONObj& participantDoc) : _doc(RenameCollectionParticipantDocument::parse( - IDLParserContext("RenameCollectionParticipantDocument"), participantDoc)) {} + IDLParserContext("RenameCollectionParticipantDocument"), participantDoc)), + _request(_doc.getRenameCollectionRequest()) {} ~RenameParticipantInstance(); @@ -140,6 +165,7 @@ class RenameParticipantInstance private: RenameCollectionParticipantDocument _doc; + const RenameCollectionRequest _request; SemiFuture run(std::shared_ptr executor, const CancellationToken& token) noexcept override final; @@ -151,7 +177,7 @@ class RenameParticipantInstance template auto _buildPhaseHandler(const Phase& newPhase, Func&& handlerFn) { - return [=] { + return [=, this] { const auto& currPhase = _doc.getPhase(); if (currPhase > newPhase) { diff --git a/src/mongo/db/s/reshard_collection_coordinator.cpp b/src/mongo/db/s/reshard_collection_coordinator.cpp index 8f59b8849555d..8e7c7f5a3f2f2 100644 --- a/src/mongo/db/s/reshard_collection_coordinator.cpp +++ b/src/mongo/db/s/reshard_collection_coordinator.cpp @@ -28,16 +28,56 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog/collection_uuid_mismatch.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/commands.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/reshard_collection_coordinator.h" -#include "mongo/logv2/log.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/reshard_collection_gen.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -52,13 +92,14 @@ void notifyChangeStreamsOnReshardCollectionComplete(OperationContext* opCtx, const UUID& reshardUUID) { const std::string oMessage = str::stream() - << "Reshard collection " << collNss << " with shard key " << doc.getKey().toString(); + << "Reshard collection " << collNss.toStringForErrorMsg() << " with shard key " + << doc.getKey().toString(); BSONObjBuilder cmdBuilder; tassert(6590800, "Did not set old collectionUUID", doc.getOldCollectionUUID()); tassert(6590801, "Did not set old ShardKey", doc.getOldShardKey()); UUID collUUID = *doc.getOldCollectionUUID(); - cmdBuilder.append("reshardCollection", collNss.ns()); + cmdBuilder.append("reshardCollection", NamespaceStringUtil::serialize(collNss)); reshardUUID.appendToBuilder(&cmdBuilder, "reshardUUID"); cmdBuilder.append("shardKey", doc.getKey()); cmdBuilder.append("oldShardKey", *doc.getOldShardKey()); @@ -83,7 +124,7 @@ void notifyChangeStreamsOnReshardCollectionComplete(OperationContext* opCtx, const auto cmd = cmdBuilder.obj(); - writeConflictRetry(opCtx, "ReshardCollection", NamespaceString::kRsOplogNamespace.ns(), [&] { + writeConflictRetry(opCtx, "ReshardCollection", NamespaceString::kRsOplogNamespace, [&] { AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); WriteUnitOfWork uow(opCtx); serviceContext->getOpObserver()->onInternalOpMessage(opCtx, @@ -129,76 +170,81 @@ ExecutorFuture ReshardCollectionCoordinator::_runImpl( std::shared_ptr executor, const CancellationToken& token) noexcept { return ExecutorFuture(**executor) - .then(_buildPhaseHandler( - Phase::kReshard, - [this, anchor = shared_from_this()] { - auto opCtxHolder = cc().makeOperationContext(); - auto* opCtx = opCtxHolder.get(); - getForwardableOpMetadata().setOn(opCtx); - - { - AutoGetCollection coll{ - opCtx, - nss(), - MODE_IS, - AutoGetCollection::Options{} - .viewMode(auto_get_collection::ViewMode::kViewsPermitted) - .expectedUUID(_doc.getCollectionUUID())}; - } - - const auto cmOld = - uassertStatusOK( - Grid::get(opCtx) - ->catalogCache() - ->getShardedCollectionRoutingInfoWithPlacementRefresh(opCtx, nss())) - .cm; - - StateDoc newDoc(_doc); - newDoc.setOldShardKey(cmOld.getShardKeyPattern().getKeyPattern().toBSON()); - newDoc.setOldCollectionUUID(cmOld.getUUID()); - _updateStateDocument(opCtx, std::move(newDoc)); - - ConfigsvrReshardCollection configsvrReshardCollection(nss(), _doc.getKey()); - configsvrReshardCollection.setDbName(nss().db()); - configsvrReshardCollection.setUnique(_doc.getUnique()); - configsvrReshardCollection.setCollation(_doc.getCollation()); - configsvrReshardCollection.set_presetReshardedChunks( - _doc.get_presetReshardedChunks()); - configsvrReshardCollection.setZones(_doc.getZones()); - configsvrReshardCollection.setNumInitialChunks(_doc.getNumInitialChunks()); - - const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - - const auto cmdResponse = - uassertStatusOK(configShard->runCommandWithFixedRetryAttempts( - opCtx, - ReadPreferenceSetting(ReadPreference::PrimaryOnly), - DatabaseName::kAdmin.toString(), - CommandHelpers::appendMajorityWriteConcern( - configsvrReshardCollection.toBSON({}), opCtx->getWriteConcern()), - Shard::RetryPolicy::kIdempotent)); - uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(std::move(cmdResponse))); - - // Report command completion to the oplog. - const auto cm = - uassertStatusOK( - Grid::get(opCtx) - ->catalogCache() - ->getShardedCollectionRoutingInfoWithPlacementRefresh(opCtx, nss())) - .cm; - - if (_doc.getOldCollectionUUID() && _doc.getOldCollectionUUID() != cm.getUUID()) { - notifyChangeStreamsOnReshardCollectionComplete( - opCtx, nss(), _doc, cm.getUUID()); - } - })) - .onError([this, anchor = shared_from_this()](const Status& status) { - LOGV2_ERROR(6206401, - "Error running reshard collection", - logAttrs(nss()), - "error"_attr = redact(status)); - return status; - }); + .then(_buildPhaseHandler(Phase::kReshard, [this, anchor = shared_from_this()] { + auto opCtxHolder = cc().makeOperationContext(); + auto* opCtx = opCtxHolder.get(); + getForwardableOpMetadata().setOn(opCtx); + + { + AutoGetCollection coll{opCtx, + nss(), + MODE_IS, + AutoGetCollection::Options{} + .viewMode(auto_get_collection::ViewMode::kViewsPermitted) + .expectedUUID(_doc.getCollectionUUID())}; + } + + const auto cmOld = + uassertStatusOK( + Grid::get(opCtx) + ->catalogCache() + ->getShardedCollectionRoutingInfoWithPlacementRefresh(opCtx, nss())) + .cm; + + StateDoc newDoc(_doc); + newDoc.setOldShardKey(cmOld.getShardKeyPattern().getKeyPattern().toBSON()); + newDoc.setOldCollectionUUID(cmOld.getUUID()); + _updateStateDocument(opCtx, std::move(newDoc)); + + ConfigsvrReshardCollection configsvrReshardCollection(nss(), _doc.getKey()); + configsvrReshardCollection.setDbName(nss().dbName()); + configsvrReshardCollection.setUnique(_doc.getUnique()); + configsvrReshardCollection.setCollation(_doc.getCollation()); + configsvrReshardCollection.set_presetReshardedChunks(_doc.get_presetReshardedChunks()); + configsvrReshardCollection.setZones(_doc.getZones()); + configsvrReshardCollection.setNumInitialChunks(_doc.getNumInitialChunks()); + + if (!resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + uassert( + ErrorCodes::InvalidOptions, + "Resharding improvements is not enabled, reject shardDistribution parameter", + !_doc.getShardDistribution().has_value()); + uassert( + ErrorCodes::InvalidOptions, + "Resharding improvements is not enabled, reject forceRedistribution parameter", + !_doc.getForceRedistribution().has_value()); + uassert(ErrorCodes::InvalidOptions, + "Resharding improvements is not enabled, reject reshardingUUID parameter", + !_doc.getReshardingUUID().has_value()); + } + configsvrReshardCollection.setShardDistribution(_doc.getShardDistribution()); + configsvrReshardCollection.setForceRedistribution(_doc.getForceRedistribution()); + configsvrReshardCollection.setReshardingUUID(_doc.getReshardingUUID()); + + const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); + + const auto cmdResponse = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts( + opCtx, + ReadPreferenceSetting(ReadPreference::PrimaryOnly), + DatabaseName::kAdmin.toString(), + CommandHelpers::appendMajorityWriteConcern(configsvrReshardCollection.toBSON({}), + opCtx->getWriteConcern()), + Shard::RetryPolicy::kIdempotent)); + uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(std::move(cmdResponse))); + + // Report command completion to the oplog. + const auto cm = + uassertStatusOK( + Grid::get(opCtx) + ->catalogCache() + ->getShardedCollectionRoutingInfoWithPlacementRefresh(opCtx, nss())) + .cm; + + if (_doc.getOldCollectionUUID() && _doc.getOldCollectionUUID() != cm.getUUID()) { + notifyChangeStreamsOnReshardCollectionComplete(opCtx, nss(), _doc, cm.getUUID()); + } + })); } } // namespace mongo diff --git a/src/mongo/db/s/reshard_collection_coordinator.h b/src/mongo/db/s/reshard_collection_coordinator.h index 31e2512db6cb1..a07c89f78a936 100644 --- a/src/mongo/db/s/reshard_collection_coordinator.h +++ b/src/mongo/db/s/reshard_collection_coordinator.h @@ -29,8 +29,21 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/ops/write_ops.h" #include "mongo/db/s/reshard_collection_coordinator_document_gen.h" #include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" #include "mongo/util/future.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/coordinator_document.idl b/src/mongo/db/s/resharding/coordinator_document.idl index 8dbd638d98beb..6f7114e7e24ea 100644 --- a/src/mongo/db/s/resharding/coordinator_document.idl +++ b/src/mongo/db/s/resharding/coordinator_document.idl @@ -124,6 +124,8 @@ structs: optional: true active: type: bool + # TODO(SERVER-77873): Deprecated by the resharding improvements project, and should + # be removed with that feature flag. description: >- Dummy field that always has the same value (true) to have a unique index on in order to prevent multiple resharding operations from being active at @@ -133,3 +135,15 @@ structs: type: ReshardingCoordinatorMetrics description: "Metrics related to the coordinator." optional: true + shardDistribution: + type: array + description: "The key ranges for the new shard key. This should be continuous and complete." + optional: true + forceRedistribution: + type: bool + description: "Whether to initiate reshardCollection if the shardKey doesn't change." + optional: true + quiescePeriodEnd: + type: date + description: "When to release the instance following its completion." + optional: true diff --git a/src/mongo/db/s/resharding/document_source_resharding_add_resume_id.cpp b/src/mongo/db/s/resharding/document_source_resharding_add_resume_id.cpp index a936872d0bb58..95dfd7c8d12e8 100644 --- a/src/mongo/db/s/resharding/document_source_resharding_add_resume_id.cpp +++ b/src/mongo/db/s/resharding/document_source_resharding_add_resume_id.cpp @@ -27,11 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/commands/txn_cmds_gen.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/repl/oplog_entry.h" #include "mongo/db/s/resharding/document_source_resharding_add_resume_id.h" #include "mongo/db/s/resharding/donor_oplog_id_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/document_source_resharding_add_resume_id.h b/src/mongo/db/s/resharding/document_source_resharding_add_resume_id.h index c52829524f230..a062aa5faa1d9 100644 --- a/src/mongo/db/s/resharding/document_source_resharding_add_resume_id.h +++ b/src/mongo/db/s/resharding/document_source_resharding_add_resume_id.h @@ -29,7 +29,22 @@ #pragma once +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/document_source_resharding_iterate_transaction.cpp b/src/mongo/db/s/resharding/document_source_resharding_iterate_transaction.cpp index 71a37bcd50708..d980400c4de32 100644 --- a/src/mongo/db/s/resharding/document_source_resharding_iterate_transaction.cpp +++ b/src/mongo/db/s/resharding/document_source_resharding_iterate_transaction.cpp @@ -28,12 +28,27 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/resharding/document_source_resharding_iterate_transaction.h" - +#include +#include +#include + +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/commands/txn_cmds_gen.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/s/resharding/document_source_resharding_iterate_transaction.h" #include "mongo/db/transaction/transaction_history_iterator.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/db/s/resharding/document_source_resharding_iterate_transaction.h b/src/mongo/db/s/resharding/document_source_resharding_iterate_transaction.h index 809fecae60367..35ecb9c1defba 100644 --- a/src/mongo/db/s/resharding/document_source_resharding_iterate_transaction.h +++ b/src/mongo/db/s/resharding/document_source_resharding_iterate_transaction.h @@ -29,7 +29,31 @@ #pragma once +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp b/src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp index dd4a900f812bb..5c3d435886197 100644 --- a/src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp +++ b/src/mongo/db/s/resharding/document_source_resharding_ownership_match.cpp @@ -28,15 +28,26 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/s/resharding/document_source_resharding_ownership_match.h" +#include +#include +#include +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/s/resharding/document_source_resharding_ownership_match.h" #include "mongo/db/s/resharding/resharding_util.h" -#include "mongo/db/transaction/transaction_history_iterator.h" +#include "mongo/idl/idl_parser.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/grid.h" #include "mongo/s/resharding/common_types_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -93,14 +104,10 @@ StageConstraints DocumentSourceReshardingOwnershipMatch::constraints( } Value DocumentSourceReshardingOwnershipMatch::serialize(SerializationOptions opts) const { - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484302); - } - return Value{Document{{kStageName, DocumentSourceReshardingOwnershipMatchSpec( _recipientShardId, _reshardingKey.getKeyPattern()) - .toBSON()}}}; + .toBSON(opts)}}}; } DepsTracker::State DocumentSourceReshardingOwnershipMatch::getDependencies( diff --git a/src/mongo/db/s/resharding/document_source_resharding_ownership_match.h b/src/mongo/db/s/resharding/document_source_resharding_ownership_match.h index d92c988b3489a..85e28ab13a70d 100644 --- a/src/mongo/db/s/resharding/document_source_resharding_ownership_match.h +++ b/src/mongo/db/s/resharding/document_source_resharding_ownership_match.h @@ -29,7 +29,23 @@ #pragma once +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" #include "mongo/db/shard_id.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/shard_key_pattern.h" diff --git a/src/mongo/db/s/resharding/recipient_document.idl b/src/mongo/db/s/resharding/recipient_document.idl index 54334cfefd46a..036bb5fff2901 100644 --- a/src/mongo/db/s/resharding/recipient_document.idl +++ b/src/mongo/db/s/resharding/recipient_document.idl @@ -55,6 +55,11 @@ structs: description: >- The time interval that it takes to apply oplog entries on this recipient. optional: true + indexBuildTime: + type: ReshardingMetricsTimeInterval + description: >- + The time interval that it takes to build the indexes on this recipient. + optional: true approxDocumentsToCopy: type: long optional: true diff --git a/src/mongo/db/s/resharding/resharding_agg_test.cpp b/src/mongo/db/s/resharding/resharding_agg_test.cpp index 9975d3a68c7f4..f8d23a953b43b 100644 --- a/src/mongo/db/s/resharding/resharding_agg_test.cpp +++ b/src/mongo/db/s/resharding/resharding_agg_test.cpp @@ -27,23 +27,74 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_mock.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" +#include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" #include "mongo/db/repl/apply_ops_command_info.h" +#include "mongo/db/repl/apply_ops_gen.h" #include "mongo/db/repl/image_collection_entry_gen.h" #include "mongo/db/repl/mock_repl_coord_server_fixture.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/s/resharding/donor_oplog_id_gen.h" #include "mongo/db/s/resharding/resharding_donor_oplog_iterator.h" #include "mongo/db/s/resharding/resharding_util.h" -#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/db/transaction/transaction_history_iterator.h" -#include "mongo/unittest/unittest.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -153,7 +204,7 @@ class MockMongoInterface final : public StubMongoProcessInterface { BSONObj getCollectionOptions(OperationContext* opCtx, const NamespaceString& nss) override { auto optionIter = _collectionOptions.find(nss); invariant(optionIter != _collectionOptions.end(), - str::stream() << nss.ns() << " was not registered"); + str::stream() << nss.toStringForErrorMsg() << " was not registered"); return optionIter->second; } @@ -382,7 +433,7 @@ class ReshardingAggTest : public AggregationContextFixture { return pipeline; } - const NamespaceString _crudNss{"test.foo"}; + const NamespaceString _crudNss = NamespaceString::createNamespaceString_forTest("test.foo"); // Use a constant value so unittests can store oplog entries as extended json strings in code. const UUID _reshardingCollUUID = fassert(5074001, UUID::parse("8926ba8e-611a-42c2-bb1a-3b7819f610ed")); @@ -1430,7 +1481,7 @@ using ReshardingAggWithStorageTest = MockReplCoordServerFixture; // with no-op pre/post image oplog. TEST_F(ReshardingAggWithStorageTest, RetryableFindAndModifyWithImageLookup) { repl::OpTime opTime(Timestamp(43, 56), 1); - const NamespaceString kCrudNs("foo", "bar"); + const NamespaceString kCrudNs = NamespaceString::createNamespaceString_forTest("foo", "bar"); const UUID kCrudUUID = UUID::gen(); const ShardId kMyShardId{"shard1"}; ReshardingDonorOplogId id(opTime.getTimestamp(), opTime.getTimestamp()); @@ -1525,7 +1576,7 @@ TEST_F(ReshardingAggWithStorageTest, RetryableFindAndModifyWithImageLookup) { TEST_F(ReshardingAggWithStorageTest, RetryableFindAndModifyInsideInternalTransactionWithImageLookup) { - const NamespaceString kCrudNs("foo", "bar"); + const NamespaceString kCrudNs = NamespaceString::createNamespaceString_forTest("foo", "bar"); const UUID kCrudUUID = UUID::gen(); const ShardId kMyShardId{"shard1"}; diff --git a/src/mongo/db/s/resharding/resharding_change_event_o2_field.idl b/src/mongo/db/s/resharding/resharding_change_event_o2_field.idl index 697b3dd117650..56622eb3a0b36 100644 --- a/src/mongo/db/s/resharding/resharding_change_event_o2_field.idl +++ b/src/mongo/db/s/resharding/resharding_change_event_o2_field.idl @@ -60,4 +60,4 @@ structs: type: namespacestring reshardingUUID: type: uuid - description: "The UUID for this resharding operation." \ No newline at end of file + description: "The UUID for this resharding operation." diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp index 8f968f1bd793e..f137c5724f790 100644 --- a/src/mongo/db/s/resharding/resharding_collection_cloner.cpp +++ b/src/mongo/db/s/resharding/resharding_collection_cloner.cpp @@ -28,25 +28,37 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/resharding/resharding_collection_cloner.h" - +#include +#include +#include +#include +#include +#include +#include +#include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" -#include "mongo/db/catalog/collection.h" +#include "mongo/client/read_preference.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/curop.h" #include "mongo/db/exec/document_value/document.h" -#include "mongo/db/pipeline/aggregation_request_helper.h" -#include "mongo/db/pipeline/document_source_match.h" -#include "mongo/db/pipeline/document_source_replace_root.h" -#include "mongo/db/pipeline/sharded_agg_helpers.h" -#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/exec/document_value/value_comparator.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/resharding/document_source_resharding_ownership_match.h" +#include "mongo/db/s/resharding/resharding_collection_cloner.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" #include "mongo/db/s/resharding/resharding_future_util.h" #include "mongo/db/s/resharding/resharding_metrics.h" @@ -54,13 +66,33 @@ #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id_helpers.h" -#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/grid.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/s/sharding_index_catalog_cache.h" #include "mongo/s/stale_shard_version_helpers.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/future_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/scopeguard.h" #include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -73,7 +105,8 @@ bool collectionHasSimpleCollation(OperationContext* opCtx, const NamespaceString auto [sourceChunkMgr, _] = uassertStatusOK(catalogCache->getCollectionRoutingInfo(opCtx, nss)); uassert(ErrorCodes::NamespaceNotSharded, - str::stream() << "Expected collection " << nss << " to be sharded", + str::stream() << "Expected collection " << nss.toStringForErrorMsg() + << " to be sharded", sourceChunkMgr.isSharded()); return !sourceChunkMgr.getDefaultCollator(); @@ -108,8 +141,8 @@ ReshardingCollectionCloner::makeRawPipeline( // Assume that the config.cache.chunks collection isn't a view either. auto tempNss = resharding::constructTemporaryReshardingNss(_sourceNss.db(), _sourceUUID); - auto tempCacheChunksNss = - NamespaceString::makeGlobalConfigCollection("cache.chunks." + tempNss.ns()); + auto tempCacheChunksNss = NamespaceString::makeGlobalConfigCollection( + "cache.chunks." + NamespaceStringUtil::serialize(tempNss)); resolvedNamespaces[tempCacheChunksNss.coll()] = {tempCacheChunksNss, std::vector{}}; // Pipeline::makePipeline() ignores the collation set on the AggregationRequest (or lack @@ -218,8 +251,8 @@ std::unique_ptr ReshardingCollectionCloner::_restartP auto idToResumeFrom = [&] { AutoGetCollection outputColl(opCtx, _outputNss, MODE_IS); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Resharding collection cloner's output collection '" << _outputNss - << "' did not already exist", + str::stream() << "Resharding collection cloner's output collection '" + << _outputNss.toStringForErrorMsg() << "' did not already exist", outputColl); return resharding::data_copy::findHighestInsertedId(opCtx, *outputColl); }(); @@ -361,6 +394,12 @@ SemiFuture ReshardingCollectionCloner::run( auto client = cc().getServiceContext()->makeClient("ReshardingCollectionClonerCleanupClient"); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } + AlternativeClientRegion acr(client); auto opCtx = cc().makeOperationContext(); diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner.h b/src/mongo/db/s/resharding/resharding_collection_cloner.h index dac1dc5a8b54b..ea270ab0abfa5 100644 --- a/src/mongo/db/s/resharding/resharding_collection_cloner.h +++ b/src/mongo/db/s/resharding/resharding_collection_cloner.h @@ -29,18 +29,27 @@ #pragma once +#include #include +#include +#include +#include "mongo/bson/bsonobj.h" #include "mongo/bson/timestamp.h" #include "mongo/db/cancelable_operation_context.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/shard_id.h" +#include "mongo/executor/task_executor.h" #include "mongo/s/shard_key_pattern.h" #include "mongo/util/cancellation.h" #include "mongo/util/future.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -52,6 +61,7 @@ class TaskExecutor; class OperationContext; class MongoProcessInterface; + class ReshardingMetrics; class ServiceContext; diff --git a/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp index 28d61bc48d381..8b44a8cde87a1 100644 --- a/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp +++ b/src/mongo/db/s/resharding/resharding_collection_cloner_test.cpp @@ -27,23 +27,56 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include #include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/create_collection.h" -#include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/hasher.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_mock.h" +#include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" +#include "mongo/db/pipeline/sharded_agg_helpers_targeting_policy.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/resharding/resharding_collection_cloner.h" #include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/shard_server_test_fixture.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog_cache_mock.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -197,7 +230,7 @@ class ReshardingCollectionClonerTest : public ShardServerTestFixtureWithCatalogC const NamespaceString _sourceNss = NamespaceString::createNamespaceString_forTest("test"_sd, "collection_being_resharded"_sd); const NamespaceString tempNss = - resharding::constructTemporaryReshardingNss(_sourceNss.db(), _sourceUUID); + resharding::constructTemporaryReshardingNss(_sourceNss.db_forTest(), _sourceUUID); const UUID _sourceUUID = UUID::gen(); const ReshardingSourceId _sourceId{UUID::gen(), _myShardName}; const DatabaseVersion _sourceDbVersion{UUID::gen(), Timestamp(1, 1)}; diff --git a/src/mongo/db/s/resharding/resharding_collection_test.cpp b/src/mongo/db/s/resharding/resharding_collection_test.cpp index 8ebb428705578..154cb602cd045 100644 --- a/src/mongo/db/s/resharding/resharding_collection_test.cpp +++ b/src/mongo/db/s/resharding/resharding_collection_test.cpp @@ -28,16 +28,43 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include + +#include "mongo/base/counter.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/db/service_context.h" #include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/unittest/unittest.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/timer.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp index 6cc7f93abadcf..b2702bd2503eb 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor.cpp @@ -28,22 +28,45 @@ */ -#include +#include +#include #include +#include +#include +#include +#include -#include "mongo/db/s/resharding/resharding_coordinator_commit_monitor.h" +#include +#include +#include +#include #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/client/read_preference.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/resource_yielder.h" +#include "mongo/db/s/resharding/resharding_coordinator_commit_monitor.h" #include "mongo/db/s/resharding/resharding_server_parameters_gen.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/async_requests_sender.h" #include "mongo/s/client/shard.h" #include "mongo/s/request_types/resharding_operation_time_gen.h" #include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/testing_proctor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -61,7 +84,7 @@ MONGO_FAIL_POINT_DEFINE(hangBeforeQueryingRecipients); BSONObj makeCommandObj(const NamespaceString& ns) { auto command = _shardsvrReshardingOperationTime(ns); - command.setDbName(DatabaseName(ns.tenantId(), "admin")); + command.setDbName(DatabaseNameUtil::deserialize(ns.tenantId(), DatabaseName::kAdmin.db())); return command.toBSON({}); } @@ -146,7 +169,8 @@ CoordinatorCommitMonitor::queryRemainingOperationTimeForRecipients() const { requests, ReadPreferenceSetting(ReadPreference::PrimaryOnly), Shard::RetryPolicy::kIdempotent, - nullptr /* resourceYielder */); + nullptr /* resourceYielder */, + {} /* designatedHostMap */); hangBeforeQueryingRecipients.pauseWhileSet(); @@ -158,12 +182,12 @@ CoordinatorCommitMonitor::queryRemainingOperationTimeForRecipients() const { !_cancelToken.isCanceled()); auto response = ars.next(); - const auto errorContext = + auto errorContext = "Failed command: {} on {}"_format(cmdObj.toString(), response.shardId.toString()); - const auto shardResponse = + auto shardResponse = uassertStatusOKWithContext(std::move(response.swResponse), errorContext); - const auto status = getStatusFromCommandResult(shardResponse.data); + auto status = getStatusFromCommandResult(shardResponse.data); uassertStatusOKWithContext(status, errorContext); const auto remainingTime = extractOperationRemainingTime(shardResponse.data); diff --git a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor_test.cpp index e68359f2801f5..51e838752340f 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor_test.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_commit_monitor_test.cpp @@ -28,28 +28,61 @@ */ -#include -#include +#include +#include +#include +#include #include +// IWYU pragma: no_include "cxxabi.h" +#include #include +#include +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/remote_command_targeter_mock.h" #include "mongo/db/client.h" #include "mongo/db/namespace_string.h" #include "mongo/db/s/config/config_server_test_fixture.h" +#include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" #include "mongo/db/s/resharding/resharding_coordinator_commit_monitor.h" +#include "mongo/db/s/resharding/resharding_cumulative_metrics.h" #include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/s/resharding/resharding_server_parameters_gen.h" +#include "mongo/db/service_context.h" #include "mongo/db/shard_id.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/thread_pool_mock.h" +#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" +#include "mongo/util/clock_source.h" #include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/functional.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -100,7 +133,7 @@ class CoordinatorCommitMonitorTest : public ConfigServerTestFixture { CoordinatorCommitMonitor::RemainingOperationTimes remainingOperationTimes); private: - const NamespaceString _ns{"test.test"}; + const NamespaceString _ns = NamespaceString::createNamespaceString_forTest("test.test"); const std::vector _recipientShards = {{"shardOne"}, {"shardTwo"}}; std::shared_ptr _futureExecutor; diff --git a/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp b/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp index b77fafd84b1fc..bc01704b66285 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_observer.cpp @@ -28,18 +28,22 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/resharding/resharding_coordinator_observer.h" - +#include +#include +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/s/resharding/coordinator_document_gen.h" +#include "mongo/db/s/resharding/resharding_coordinator_observer.h" #include "mongo/db/s/resharding/resharding_util.h" -#include "mongo/db/service_context.h" -#include "mongo/db/shard_id.h" -#include "mongo/logv2/log.h" -#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding diff --git a/src/mongo/db/s/resharding/resharding_coordinator_observer.h b/src/mongo/db/s/resharding/resharding_coordinator_observer.h index 3aaff4f5f7eb4..518851663eb83 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_observer.h +++ b/src/mongo/db/s/resharding/resharding_coordinator_observer.h @@ -29,6 +29,8 @@ #pragma once +#include +#include #include #include "mongo/base/status.h" @@ -37,6 +39,7 @@ #include "mongo/platform/mutex.h" #include "mongo/util/concurrency/with_lock.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/resharding_coordinator_observer_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_observer_test.cpp index 3a2764890a2e7..2fa6add15458a 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_observer_test.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_observer_test.cpp @@ -28,15 +28,24 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/s/resharding/resharding_coordinator_observer.h" #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/shard_id.h" -#include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp index 45165f292fbad..aa5fd3206ff31 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_service.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_service.cpp @@ -29,37 +29,79 @@ #include "mongo/db/s/resharding/resharding_coordinator_service.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/json.h" -#include "mongo/db/auth/authorization_session_impl.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/ops/write_ops.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/query/collation/collation_spec.h" +#include "mongo/db/repl/optime_with.h" #include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/balancer/balance_stats.h" #include "mongo/db/s/balancer/balancer_policy.h" #include "mongo/db/s/config/initial_split_policy.h" #include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" -#include "mongo/db/s/metrics/sharding_data_transform_metrics.h" +#include "mongo/db/s/resharding/recipient_document_gen.h" #include "mongo/db/s/resharding/resharding_coordinator_commit_monitor.h" #include "mongo/db/s/resharding/resharding_future_util.h" +#include "mongo/db/s/resharding/resharding_metrics_helpers.h" #include "mongo/db/s/resharding/resharding_server_parameters_gen.h" #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/sharding_ddl_util.h" #include "mongo/db/s/sharding_logging.h" -#include "mongo/db/session/logical_session_cache.h" +#include "mongo/db/server_options.h" #include "mongo/db/shard_id.h" -#include "mongo/db/storage/duplicate_key_error_info.h" #include "mongo/db/vector_clock.h" #include "mongo/db/write_concern_options.h" +#include "mongo/executor/async_rpc_util.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/catalog/type_namespace_placement_gen.h" #include "mongo/s/catalog/type_tags.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/abort_reshard_collection_gen.h" #include "mongo/s/request_types/commit_reshard_collection_gen.h" @@ -67,13 +109,23 @@ #include "mongo/s/request_types/flush_resharding_state_change_gen.h" #include "mongo/s/request_types/flush_routing_table_cache_updates_gen.h" #include "mongo/s/resharding/resharding_coordinator_service_conflicting_op_in_progress_info.h" +#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/s/sharding_index_catalog_cache.h" #include "mongo/s/write_ops/batched_command_request.h" -#include "mongo/s/write_ops/batched_command_response.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/functional.h" #include "mongo/util/future_util.h" -#include "mongo/util/string_map.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -93,7 +145,7 @@ MONGO_FAIL_POINT_DEFINE(reshardingPauseCoordinatorBeforeCompletion); MONGO_FAIL_POINT_DEFINE(reshardingPauseCoordinatorBeforeStartingErrorFlow); MONGO_FAIL_POINT_DEFINE(reshardingPauseCoordinatorBeforePersistingStateTransition); MONGO_FAIL_POINT_DEFINE(pauseBeforeTellDonorToRefresh); -MONGO_FAIL_POINT_DEFINE(pauseBeforeInsertCoordinatorDoc); +MONGO_FAIL_POINT_DEFINE(pauseAfterInsertCoordinatorDoc); MONGO_FAIL_POINT_DEFINE(pauseBeforeCTHolderInitialization); const std::string kReshardingCoordinatorActiveIndexName = "ReshardingCoordinatorActiveIndex"; @@ -262,6 +314,12 @@ void writeToCoordinatorStateNss(OperationContext* opCtx, *approxDocumentsToCopy); } + if (auto quiescePeriodEnd = coordinatorDoc.getQuiescePeriodEnd()) { + // If the quiescePeriodEnd exists, include it in the update. + setBuilder.append(ReshardingCoordinatorDocument::kQuiescePeriodEndFieldName, + *quiescePeriodEnd); + } + buildStateDocumentMetricsForUpdate(setBuilder, nextState, timestamp); if (nextState == CoordinatorStateEnum::kPreparingToDonate) { @@ -293,7 +351,11 @@ void writeToCoordinatorStateNss(OperationContext* opCtx, assertNumDocsModifiedMatchesExpected(request, res, *expectedNumModified); } - setMeticsAfterWrite(metrics, nextState, timestamp); + // When moving from quiescing to done, we don't have metrics available. + invariant(metrics || nextState == CoordinatorStateEnum::kDone); + if (metrics) { + setMeticsAfterWrite(metrics, nextState, timestamp); + } } /** @@ -397,6 +459,7 @@ BSONObj createReshardingFieldsUpdateForOriginalNss( return BSON("$set" << setFields); } + case mongo::CoordinatorStateEnum::kQuiesced: case mongo::CoordinatorStateEnum::kDone: // Remove 'reshardingFields' from the config.collections entry return BSON( @@ -450,7 +513,8 @@ void updateConfigCollectionsForOriginalNss(OperationContext* opCtx, auto request = BatchedCommandRequest::buildUpdateOp( CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << coordinatorDoc.getSourceNss().ns()), // query + BSON(CollectionType::kNssFieldName + << NamespaceStringUtil::serialize(coordinatorDoc.getSourceNss())), // query writeOp, false, // upsert false // multi @@ -493,7 +557,7 @@ void writeToConfigCollectionsForTempNss(OperationContext* opCtx, return BatchedCommandRequest::buildUpdateOp( CollectionType::ConfigNS, BSON(CollectionType::kNssFieldName - << coordinatorDoc.getTempReshardingNss().ns()), + << NamespaceStringUtil::serialize(coordinatorDoc.getTempReshardingNss())), BSON("$set" << BSON( "reshardingFields.state" << CoordinatorState_serializer(nextState).toString() @@ -515,7 +579,7 @@ void writeToConfigCollectionsForTempNss(OperationContext* opCtx, return BatchedCommandRequest::buildDeleteOp( CollectionType::ConfigNS, BSON(CollectionType::kNssFieldName - << coordinatorDoc.getTempReshardingNss().ns()), + << NamespaceStringUtil::serialize(coordinatorDoc.getTempReshardingNss())), false // multi ); default: { @@ -542,7 +606,7 @@ void writeToConfigCollectionsForTempNss(OperationContext* opCtx, return BatchedCommandRequest::buildUpdateOp( CollectionType::ConfigNS, BSON(CollectionType::kNssFieldName - << coordinatorDoc.getTempReshardingNss().ns()), + << NamespaceStringUtil::serialize(coordinatorDoc.getTempReshardingNss())), updateBuilder.obj(), true, // upsert false // multi @@ -664,19 +728,29 @@ void insertChunkAndTagDocsForTempNss(OperationContext* opCtx, ShardingCatalogManager::get(opCtx)->insertConfigDocuments(opCtx, TagsType::ConfigNS, newZones); } +void removeTagsDocs(OperationContext* opCtx, const BSONObj& tagsQuery, TxnNumber txnNumber) { + // Remove tag documents with the specified tagsQuery. + const auto tagDeleteOperationHint = BSON(TagsType::ns() << 1 << TagsType::min() << 1); + ShardingCatalogManager::get(opCtx)->writeToConfigDocumentInTxn( + opCtx, + TagsType::ConfigNS, + BatchedCommandRequest::buildDeleteOp(TagsType::ConfigNS, + tagsQuery, // query + true, // multi + tagDeleteOperationHint // hint + ), + txnNumber); +} + // Requires that there be no session information on the opCtx. void removeChunkAndTagsDocs(OperationContext* opCtx, const BSONObj& tagsQuery, const UUID& collUUID) { - // Remove all chunk documents for the original nss. We do not know how many chunk docs - // currently exist, so cannot pass a value for expectedNumModified - const auto chunksQuery = BSON(ChunkType::collectionUUID() << collUUID); - const auto tagDeleteOperationHint = BSON(TagsType::ns() << 1 << TagsType::min() << 1); + // Remove all chunk documents and specified tag documents. + resharding::removeChunkDocs(opCtx, collUUID); + const auto tagDeleteOperationHint = BSON(TagsType::ns() << 1 << TagsType::min() << 1); const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); - - uassertStatusOK(catalogClient->removeConfigDocuments( - opCtx, ChunkType::ConfigNS, chunksQuery, kMajorityWriteConcern)); uassertStatusOK(catalogClient->removeConfigDocuments( opCtx, TagsType::ConfigNS, tagsQuery, kMajorityWriteConcern, tagDeleteOperationHint)); } @@ -693,12 +767,19 @@ void executeMetadataChangesInTxn( ShardingCatalogClient::kLocalWriteConcern); } -BSONObj makeFlushRoutingTableCacheUpdatesCmd(const NamespaceString& nss) { +std::shared_ptr> +makeFlushRoutingTableCacheUpdatesOptions(const NamespaceString& nss, + const std::shared_ptr& exec, + CancellationToken token, + async_rpc::GenericArgs args) { auto cmd = FlushRoutingTableCacheUpdatesWithWriteConcern(nss); cmd.setSyncFromConfig(true); - cmd.setDbName(nss.db()); - return cmd.toBSON( - BSON(WriteConcernOptions::kWriteConcernField << kMajorityWriteConcern.toBSON())); + cmd.setDbName(nss.dbName()); + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + auto opts = + std::make_shared>( + cmd, exec, token, args); + return opts; } } // namespace @@ -733,28 +814,14 @@ CollectionType createTempReshardingCollectionType( return collType; } -void cleanupSourceConfigCollections(OperationContext* opCtx, - const ReshardingCoordinatorDocument& coordinatorDoc) { - using Doc = Document; - using Arr = std::vector; - using V = Value; - - auto createTagFilter = [](const V value) { - return V{Doc{{"$map", - V{Doc{{"input", V{Doc{{"$objectToArray", value}}}}, - {"in", V{StringData("$$this.k")}}}}}}}; - }; - - - auto skipNewTagsFilter = Doc{ - {"$ne", - Arr{createTagFilter(V{StringData("$min")}), - createTagFilter(V{Doc{{"$literal", coordinatorDoc.getReshardingKey().toBSON()}}})}}}; - - const auto removeTagsQuery = - BSON(TagsType::ns(coordinatorDoc.getSourceNss().ns()) << "$expr" << skipNewTagsFilter); +void removeChunkDocs(OperationContext* opCtx, const UUID& collUUID) { + // Remove all chunk documents for the specified collUUID. We do not know how many chunk docs + // currently exist, so cannot pass a value for expectedNumModified + const auto chunksQuery = BSON(ChunkType::collectionUUID() << collUUID); + const auto catalogClient = ShardingCatalogManager::get(opCtx)->localCatalogClient(); - removeChunkAndTagsDocs(opCtx, removeTagsQuery, coordinatorDoc.getSourceUUID()); + uassertStatusOK(catalogClient->removeConfigDocuments( + opCtx, ChunkType::ConfigNS, chunksQuery, kMajorityWriteConcern)); } void writeDecisionPersistedState(OperationContext* opCtx, @@ -800,27 +867,37 @@ void writeDecisionPersistedState(OperationContext* opCtx, newCollectionTimestamp, reshardedCollectionPlacement, txnNumber); + + // Delete all of the config.tags entries for the user collection namespace. + const auto removeTagsQuery = + BSON(TagsType::ns(NamespaceStringUtil::serialize(coordinatorDoc.getSourceNss()))); + removeTagsDocs(opCtx, removeTagsQuery, txnNumber); + + // Update all of the config.tags entries for the temporary resharding namespace + // to refer to the user collection namespace. + updateTagsDocsForTempNss(opCtx, coordinatorDoc, txnNumber); }); } void updateTagsDocsForTempNss(OperationContext* opCtx, - const ReshardingCoordinatorDocument& coordinatorDoc) { + const ReshardingCoordinatorDocument& coordinatorDoc, + TxnNumber txnNumber) { auto hint = BSON("ns" << 1 << "min" << 1); auto tagsRequest = BatchedCommandRequest::buildUpdateOp( TagsType::ConfigNS, - BSON(TagsType::ns(coordinatorDoc.getTempReshardingNss().ns())), // query - BSON("$set" << BSON("ns" << coordinatorDoc.getSourceNss().ns())), // update - false, // upsert - true, // multi - hint // hint + BSON(TagsType::ns( + NamespaceStringUtil::serialize(coordinatorDoc.getTempReshardingNss()))), // query + BSON("$set" << BSON( + "ns" << NamespaceStringUtil::serialize(coordinatorDoc.getSourceNss()))), // update + false, // upsert + true, // multi + hint // hint ); // Update the 'ns' field to be the original collection namespace for all tags documents that // currently have 'ns' as the temporary collection namespace. - DBDirectClient client(opCtx); - BSONObj tagsRes; - client.runCommand(tagsRequest.getNS().dbName(), tagsRequest.toBSON(), tagsRes); - uassertStatusOK(getStatusFromWriteCommandReply(tagsRes)); + auto tagsRes = ShardingCatalogManager::get(opCtx)->writeToConfigDocumentInTxn( + opCtx, TagsType::ConfigNS, tagsRequest, txnNumber); } void insertCoordDocAndChangeOrigCollEntry(OperationContext* opCtx, @@ -834,11 +911,12 @@ void insertCoordDocAndChangeOrigCollEntry(OperationContext* opCtx, opCtx, CollectionType::ConfigNS, txnNumber, - BSON(CollectionType::kNssFieldName << coordinatorDoc.getSourceNss().ns())); + BSON(CollectionType::kNssFieldName + << NamespaceStringUtil::serialize(coordinatorDoc.getSourceNss()))); uassert(5808200, str::stream() << "config.collection entry not found for " - << coordinatorDoc.getSourceNss().ns(), + << coordinatorDoc.getSourceNss().toStringForErrorMsg(), doc); CollectionType configCollDoc(*doc); @@ -866,7 +944,8 @@ void writeParticipantShardsAndTempCollInfo( std::vector initialChunks, std::vector zones, boost::optional indexVersion) { - const auto tagsQuery = BSON(TagsType::ns(updatedCoordinatorDoc.getTempReshardingNss().ns())); + const auto tagsQuery = BSON( + TagsType::ns(NamespaceStringUtil::serialize(updatedCoordinatorDoc.getTempReshardingNss()))); removeChunkAndTagsDocs(opCtx, tagsQuery, updatedCoordinatorDoc.getReshardingUUID()); insertChunkAndTagDocsForTempNss(opCtx, initialChunks, zones); @@ -934,10 +1013,12 @@ void writeStateTransitionAndCatalogUpdatesThenBumpCollectionPlacementVersions( ShardingCatalogClient::kLocalWriteConcern); } -void removeCoordinatorDocAndReshardingFields(OperationContext* opCtx, - ReshardingMetrics* metrics, - const ReshardingCoordinatorDocument& coordinatorDoc, - boost::optional abortReason) { +boost::optional +removeOrQuiesceCoordinatorDocAndRemoveReshardingFields( + OperationContext* opCtx, + ReshardingMetrics* metrics, + const ReshardingCoordinatorDocument& coordinatorDoc, + boost::optional abortReason) { // If the coordinator needs to abort and isn't in kInitializing, additional collections need to // be cleaned up in the final transaction. Otherwise, cleanup for abort and success are the // same. @@ -946,10 +1027,20 @@ void removeCoordinatorDocAndReshardingFields(OperationContext* opCtx, invariant((wasDecisionPersisted && !abortReason) || abortReason); ReshardingCoordinatorDocument updatedCoordinatorDoc = coordinatorDoc; - updatedCoordinatorDoc.setState(CoordinatorStateEnum::kDone); + // If a user resharding ID was provided, move the coordinator doc to "quiesced" rather than + // "done". + if (coordinatorDoc.getUserReshardingUUID()) { + updatedCoordinatorDoc.setState(CoordinatorStateEnum::kQuiesced); + updatedCoordinatorDoc.setQuiescePeriodEnd( + opCtx->getServiceContext()->getFastClockSource()->now() + + Milliseconds(resharding::gReshardingCoordinatorQuiescePeriodMillis)); + } else { + updatedCoordinatorDoc.setState(CoordinatorStateEnum::kDone); + } emplaceTruncatedAbortReasonIfExists(updatedCoordinatorDoc, abortReason); - const auto tagsQuery = BSON(TagsType::ns(coordinatorDoc.getTempReshardingNss().ns())); + const auto tagsQuery = + BSON(TagsType::ns(NamespaceStringUtil::serialize(coordinatorDoc.getTempReshardingNss()))); // Once the decision has been persisted, the coordinator would have modified the // config.chunks and config.collections entry. This means that the UUID of the // non-temp collection is now the UUID of what was previously the UUID of the temp @@ -961,7 +1052,8 @@ void removeCoordinatorDocAndReshardingFields(OperationContext* opCtx, uassertStatusOK(catalogClient->removeConfigDocuments( opCtx, CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << coordinatorDoc.getTempReshardingNss().ns()), + BSON(CollectionType::kNssFieldName + << NamespaceStringUtil::serialize(coordinatorDoc.getTempReshardingNss())), kMajorityWriteConcern)); removeChunkAndTagsDocs(opCtx, tagsQuery, coordinatorDoc.getReshardingUUID()); @@ -980,6 +1072,9 @@ void removeCoordinatorDocAndReshardingFields(OperationContext* opCtx, ShardingCatalogClient::kLocalWriteConcern); metrics->onStateTransition(coordinatorDoc.getState(), updatedCoordinatorDoc.getState()); + return boost::optional{updatedCoordinatorDoc.getState() == + CoordinatorStateEnum::kQuiesced, + std::move(updatedCoordinatorDoc)}; } } // namespace resharding @@ -1094,17 +1189,54 @@ ReshardingCoordinatorExternalStateImpl::calculateParticipantShardsAndChunks( } } - auto initialSplitter = SamplingBasedSplitPolicy::make(opCtx, - coordinatorDoc.getSourceNss(), - shardKey, - numInitialChunks, - std::move(parsedZones)); - - // Note: The resharding initial split policy doesn't care about what is the real primary - // shard, so just pass in a random shard. - const SplitPolicyParams splitParams{coordinatorDoc.getReshardingUUID(), - *donorShardIds.begin()}; - auto splitResult = initialSplitter.createFirstChunks(opCtx, shardKey, splitParams); + InitialSplitPolicy::ShardCollectionConfig splitResult; + + // If shardDistribution is specified with min/max, use ShardDistributionSplitPolicy. + if (const auto& shardDistribution = coordinatorDoc.getShardDistribution()) { + uassert(ErrorCodes::InvalidOptions, + "Resharding improvements is not enabled, should not have " + "shardDistribution in coordinatorDoc", + resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)); + uassert(ErrorCodes::InvalidOptions, + "ShardDistribution should not be empty if provided", + shardDistribution->size() > 0); + const SplitPolicyParams splitParams{coordinatorDoc.getReshardingUUID(), + *donorShardIds.begin()}; + // If shardDistribution is specified with min/max, create chunks based on the shard + // min/max. If not, do sampling based split on limited shards. + if ((*shardDistribution)[0].getMin()) { + auto initialSplitter = ShardDistributionSplitPolicy::make( + opCtx, shardKey, *shardDistribution, std::move(parsedZones)); + splitResult = initialSplitter.createFirstChunks(opCtx, shardKey, splitParams); + } else { + std::vector availableShardIds; + for (const auto& shardDist : *shardDistribution) { + availableShardIds.emplace_back(shardDist.getShard()); + } + auto initialSplitter = SamplingBasedSplitPolicy::make(opCtx, + coordinatorDoc.getSourceNss(), + shardKey, + numInitialChunks, + std::move(parsedZones), + availableShardIds); + splitResult = initialSplitter.createFirstChunks(opCtx, shardKey, splitParams); + } + } else { + auto initialSplitter = + SamplingBasedSplitPolicy::make(opCtx, + coordinatorDoc.getSourceNss(), + shardKey, + numInitialChunks, + std::move(parsedZones), + boost::none /*availableShardIds*/); + // Note: The resharding initial split policy doesn't care about what is the real + // primary shard, so just pass in a random shard. + const SplitPolicyParams splitParams{coordinatorDoc.getReshardingUUID(), + *donorShardIds.begin()}; + splitResult = initialSplitter.createFirstChunks(opCtx, shardKey, splitParams); + } + initialChunks = std::move(splitResult.chunks); for (const auto& chunk : initialChunks) { @@ -1117,13 +1249,12 @@ ReshardingCoordinatorExternalStateImpl::calculateParticipantShardsAndChunks( initialChunks}; } -void ReshardingCoordinatorExternalStateImpl::sendCommandToShards( +template +void ReshardingCoordinatorExternalState::sendCommandToShards( OperationContext* opCtx, - StringData dbName, - const BSONObj& command, - const std::vector& shardIds, - const std::shared_ptr& executor) { - sharding_ddl_util::sendAuthenticatedCommandToShards(opCtx, dbName, command, shardIds, executor); + std::shared_ptr> opts, + const std::vector& shardIds) { + sharding_ddl_util::sendAuthenticatedCommandToShards(opCtx, opts, shardIds); } ThreadPool::Limits ReshardingCoordinatorService::getThreadPoolLimits() const { @@ -1142,6 +1273,25 @@ void ReshardingCoordinatorService::checkIfConflictsWithOtherInstances( for (const auto& instance : existingInstances) { auto typedInstance = checked_cast(instance); + // Instances which have already completed do not conflict with other instances, unless + // their user resharding UUIDs are the same. + const bool isUserReshardingUUIDSame = + typedInstance->getMetadata().getUserReshardingUUID() == + coordinatorDoc.getUserReshardingUUID(); + if (!isUserReshardingUUIDSame && typedInstance->getCompletionFuture().isReady()) { + LOGV2_DEBUG(7760400, + 1, + "Ignoring 'conflict' with completed instance of resharding", + "newNss"_attr = coordinatorDoc.getSourceNss(), + "oldNss"_attr = typedInstance->getMetadata().getSourceNss(), + "newUUID"_attr = coordinatorDoc.getReshardingUUID(), + "oldUUID"_attr = typedInstance->getMetadata().getReshardingUUID()); + continue; + } + // For resharding commands with no UUID provided by the user, we will re-connect to an + // instance with the same NS and resharding key, if that instance was originally started + // with no user-provided UUID. If a UUID is provided by the user, we will connect only + // to the original instance. const bool isNssSame = typedInstance->getMetadata().getSourceNss() == coordinatorDoc.getSourceNss(); const bool isReshardingKeySame = SimpleBSONObjComparator::kInstance.evaluate( @@ -1151,14 +1301,22 @@ void ReshardingCoordinatorService::checkIfConflictsWithOtherInstances( iassert(ErrorCodes::ConflictingOperationInProgress, str::stream() << "Only one resharding operation is allowed to be active at a " "time, aborting resharding op for " - << coordinatorDoc.getSourceNss(), - isNssSame && isReshardingKeySame); + << coordinatorDoc.getSourceNss().toStringForErrorMsg(), + isUserReshardingUUIDSame && isNssSame && isReshardingKeySame); + + std::string userReshardingIdMsg; + if (coordinatorDoc.getUserReshardingUUID()) { + userReshardingIdMsg = str::stream() + << " and user resharding UUID " << coordinatorDoc.getUserReshardingUUID(); + } iasserted(ReshardingCoordinatorServiceConflictingOperationInProgressInfo( typedInstance->shared_from_this()), str::stream() << "Found an active resharding operation for " - << coordinatorDoc.getSourceNss() << " with resharding key " - << coordinatorDoc.getReshardingKey().toString()); + << coordinatorDoc.getSourceNss().toStringForErrorMsg() + << " with resharding key " + << coordinatorDoc.getReshardingKey().toString() + << userReshardingIdMsg); } } @@ -1174,6 +1332,7 @@ std::shared_ptr ReshardingCoordinatorService ExecutorFuture ReshardingCoordinatorService::_rebuildService( std::shared_ptr executor, const CancellationToken& token) { + return AsyncTry([this] { auto nss = getStateDocumentsNS(); @@ -1182,15 +1341,30 @@ ExecutorFuture ReshardingCoordinatorService::_rebuildService( auto opCtx = opCtxHolder.get(); DBDirectClient client(opCtx); BSONObj result; - client.runCommand( - nss.dbName(), - BSON("createIndexes" - << nss.coll().toString() << "indexes" - << BSON_ARRAY(BSON("key" << BSON("active" << 1) << "name" - << kReshardingCoordinatorActiveIndexName - << "unique" << true))), - result); - uassertStatusOK(getStatusFromCommandResult(result)); + // We don't need a unique index on "active" any more since + // checkIfConflictsWithOtherInstances was implemented, and once we allow quiesced + // instances it breaks them, so don't create it. + // + // TODO(SERVER-67712): We create the collection only to make index creation during + // downgrade simpler, so we can remove all of this initialization when the flag is + // removed. + if (!resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + client.runCommand( + nss.dbName(), + BSON("createIndexes" + << nss.coll().toString() << "indexes" + << BSON_ARRAY(BSON("key" << BSON("active" << 1) << "name" + << kReshardingCoordinatorActiveIndexName + << "unique" << true))), + result); + uassertStatusOK(getStatusFromCommandResult(result)); + } else { + client.runCommand(nss.dbName(), BSON("create" << nss.coll().toString()), result); + const auto& status = getStatusFromCommandResult(result); + if (status.code() != ErrorCodes::NamespaceExists) + uassertStatusOK(status); + } }) .until([token](Status status) { return shouldStopAttemptingToCreateIndex(status, token); }) .withBackoffBetweenIterations(kExponentialBackoff) @@ -1202,8 +1376,9 @@ void ReshardingCoordinatorService::abortAllReshardCollection(OperationContext* o for (auto& instance : getAllInstances(opCtx)) { auto reshardingCoordinator = checked_pointer_cast(instance); - reshardingCoordinatorFutures.push_back(reshardingCoordinator->getCompletionFuture()); - reshardingCoordinator->abort(); + reshardingCoordinatorFutures.push_back( + reshardingCoordinator->getQuiescePeriodFinishedFuture()); + reshardingCoordinator->abort(true /* skip quiesce period */); } for (auto&& future : reshardingCoordinatorFutures) { @@ -1249,7 +1424,7 @@ void ReshardingCoordinator::installCoordinatorDoc( BSONObjBuilder bob; bob.append("newState", CoordinatorState_serializer(doc.getState())); bob.append("oldState", CoordinatorState_serializer(_coordinatorDoc.getState())); - bob.append("namespace", doc.getSourceNss().toString()); + bob.append("namespace", NamespaceStringUtil::serialize(doc.getSourceNss())); bob.append("collectionUUID", doc.getSourceUUID().toString()); bob.append("reshardingUUID", doc.getReshardingUUID().toString()); @@ -1268,37 +1443,50 @@ void ReshardingCoordinator::installCoordinatorDoc( ShardingLogging::get(opCtx)->logChange(opCtx, "resharding.coordinator.transition", - doc.getSourceNss().toString(), + NamespaceStringUtil::serialize(doc.getSourceNss()), bob.obj(), kMajorityWriteConcern); } -void markCompleted(const Status& status, ReshardingMetrics* metrics) { +void markCompleted(const Status& status, + ReshardingMetrics* metrics, + const bool isSameKeyResharding) { if (status.isOK()) { - metrics->onSuccess(); + metrics->onSuccess(isSameKeyResharding); } else if (status == ErrorCodes::ReshardCollectionAborted) { - metrics->onCanceled(); + metrics->onCanceled(isSameKeyResharding); } else { - metrics->onFailure(); + metrics->onFailure(isSameKeyResharding); } } -BSONObj createFlushReshardingStateChangeCommand(const NamespaceString& nss, - const UUID& reshardingUUID) { +std::shared_ptr> +createFlushReshardingStateChangeOptions(const NamespaceString& nss, + const UUID& reshardingUUID, + const std::shared_ptr& exec, + CancellationToken token, + async_rpc::GenericArgs args) { _flushReshardingStateChange cmd(nss); cmd.setDbName(DatabaseName::kAdmin); cmd.setReshardingUUID(reshardingUUID); - return cmd.toBSON( - BSON(WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority)); + auto opts = std::make_shared>( + cmd, exec, token, args); + return opts; } -BSONObj createShardsvrCommitReshardCollectionCmd(const NamespaceString& nss, - const UUID& reshardingUUID) { +std::shared_ptr> +createShardsvrCommitReshardCollectionOptions(const NamespaceString& nss, + const UUID& reshardingUUID, + const std::shared_ptr& exec, + CancellationToken token, + async_rpc::GenericArgs args) { ShardsvrCommitReshardCollection cmd(nss); cmd.setDbName(DatabaseName::kAdmin); cmd.setReshardingUUID(reshardingUUID); - return cmd.toBSON( - BSON(WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority)); + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + auto opts = std::make_shared>( + cmd, exec, token, args); + return opts; } ExecutorFuture ReshardingCoordinator::_tellAllParticipantsReshardingStarted( @@ -1361,7 +1549,7 @@ ExecutorFuture ReshardingCoordinator::_initializeCoordinator( return ExecutorFuture(**executor, status); } - if (_coordinatorDoc.getState() < CoordinatorStateEnum::kPreparingToDonate) { + if (_coordinatorDoc.getState() != CoordinatorStateEnum::kPreparingToDonate) { return ExecutorFuture(**executor, status); } @@ -1399,7 +1587,23 @@ ExecutorFuture ReshardingCoordinator::_initializeCoordinator( // Allow abort to continue except when stepped down. _cancelableOpCtxFactory.emplace(_ctHolder->getStepdownToken(), _markKilledExecutor); - if (_coordinatorDoc.getState() < CoordinatorStateEnum::kPreparingToDonate) { + // If we're already quiesced here it means we failed over and need to preserve the + // original abort reason. + if (_coordinatorDoc.getState() == CoordinatorStateEnum::kQuiesced) { + _originalReshardingStatus.emplace(Status::OK()); + auto originalAbortReason = _coordinatorDoc.getAbortReason(); + if (originalAbortReason) { + _originalReshardingStatus.emplace( + sharding_ddl_util_deserializeErrorStatusFromBSON( + BSON("status" << *originalAbortReason).firstElement())); + } + const bool isSameKeyResharding = _coordinatorDoc.getForceRedistribution() && + *_coordinatorDoc.getForceRedistribution(); + markCompleted(*_originalReshardingStatus, _metrics.get(), isSameKeyResharding); + // We must return status here, not _originalReshardingStatus, because the latter + // may be Status::OK() and not abort the future flow. + return ExecutorFuture(**executor, status); + } else if (_coordinatorDoc.getState() < CoordinatorStateEnum::kPreparingToDonate) { return _onAbortCoordinatorOnly(executor, status); } else { return _onAbortCoordinatorAndParticipants(executor, status); @@ -1483,39 +1687,11 @@ ExecutorFuture ReshardingCoordinator::_commitAndFinishReshardOperation( const ReshardingCoordinatorDocument& updatedCoordinatorDoc) noexcept { return resharding::WithAutomaticRetry([this, executor, updatedCoordinatorDoc] { return ExecutorFuture(**executor) - .then([this, executor, updatedCoordinatorDoc] { - _commit(updatedCoordinatorDoc); - - auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); - resharding::updateTagsDocsForTempNss(opCtx.get(), updatedCoordinatorDoc); - }) - .then([this] { return _waitForMajority(_ctHolder->getStepdownToken()); }) - .thenRunOn(**executor) - .then([this, executor] { - _tellAllParticipantsToCommit(_coordinatorDoc.getSourceNss(), executor); - }) - .then([this] { _updateChunkImbalanceMetrics(_coordinatorDoc.getSourceNss()); }) - .then([this, updatedCoordinatorDoc] { - auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); - resharding::cleanupSourceConfigCollections(opCtx.get(), - updatedCoordinatorDoc); - return Status::OK(); - }) - .then([this, executor] { return _awaitAllParticipantShardsDone(executor); }) - .then([this, executor] { - _metrics->setEndFor(ReshardingMetrics::TimedPhase::kCriticalSection, - getCurrentTime()); - - // Best-effort attempt to trigger a refresh on the participant shards so - // they see the collection metadata without reshardingFields and no longer - // throw ReshardCollectionInProgress. There is no guarantee this logic ever - // runs if the config server primary steps down after having removed the - // coordinator state document. - return _tellAllRecipientsToRefresh(executor); - }); + .then( + [this, executor, updatedCoordinatorDoc] { _commit(updatedCoordinatorDoc); }); }) .onTransientError([](const Status& status) { - LOGV2(5093705, + LOGV2(7698801, "Resharding coordinator encountered transient error while committing", "error"_attr = status); }) @@ -1523,18 +1699,68 @@ ExecutorFuture ReshardingCoordinator::_commitAndFinishReshardOperation( .until([](const Status& status) { return status.isOK(); }) .on(**executor, _ctHolder->getStepdownToken()) .onError([this, executor](Status status) { - { - auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); - reshardingPauseCoordinatorBeforeStartingErrorFlow.pauseWhileSet(opCtx.get()); + if (status == ErrorCodes::TransactionTooLargeForCache) { + return _onAbortCoordinatorAndParticipants(executor, status); } + return ExecutorFuture(**executor, status); + }) + .then([this, executor, updatedCoordinatorDoc] { + return resharding::WithAutomaticRetry([this, executor, updatedCoordinatorDoc] { + return ExecutorFuture(**executor) + .then([this] { return _waitForMajority(_ctHolder->getStepdownToken()); }) + .thenRunOn(**executor) + .then([this, executor] { + _tellAllParticipantsToCommit(_coordinatorDoc.getSourceNss(), + executor); + }) + .then([this] { + _updateChunkImbalanceMetrics(_coordinatorDoc.getSourceNss()); + }) + .then([this, updatedCoordinatorDoc] { + auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); + resharding::removeChunkDocs(opCtx.get(), + updatedCoordinatorDoc.getSourceUUID()); + return Status::OK(); + }) + .then([this, executor] { + return _awaitAllParticipantShardsDone(executor); + }) + .then([this, executor] { + _metrics->setEndFor(ReshardingMetrics::TimedPhase::kCriticalSection, + getCurrentTime()); + + // Best-effort attempt to trigger a refresh on the participant shards + // so they see the collection metadata without reshardingFields and + // no longer throw ReshardCollectionInProgress. There is no guarantee + // this logic ever runs if the config server primary steps down after + // having removed the coordinator state document. + return _tellAllRecipientsToRefresh(executor); + }); + }) + .onTransientError([](const Status& status) { + LOGV2(5093705, + "Resharding coordinator encountered transient error while committing", + "error"_attr = status); + }) + .onUnrecoverableError([](const Status& status) {}) + .until([](const Status& status) { return status.isOK(); }) + .on(**executor, _ctHolder->getStepdownToken()) + .onError([this, executor](Status status) { + { + auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); + reshardingPauseCoordinatorBeforeStartingErrorFlow.pauseWhileSet( + opCtx.get()); + } - if (_ctHolder->isSteppingOrShuttingDown()) { - return status; - } + if (_ctHolder->isSteppingOrShuttingDown()) { + return status; + } - LOGV2_FATAL(5277000, + LOGV2_FATAL( + 5277000, "Unrecoverable error past the point resharding was guaranteed to succeed", "error"_attr = redact(status)); + }); }); } @@ -1549,6 +1775,9 @@ SemiFuture ReshardingCoordinator::run(std::shared_ptrcancelQuiescePeriod(); + } _ctHolder->abort(); } @@ -1560,6 +1789,12 @@ SemiFuture ReshardingCoordinator::run(std::shared_ptr shardKeyMatchesSW) -> ExecutorFuture { if (shardKeyMatchesSW.isOK() && shardKeyMatchesSW.getValue()) { + // If forceRedistribution is true, still do resharding. + if (_coordinatorDoc.getForceRedistribution() && + *_coordinatorDoc.getForceRedistribution()) { + return _runReshardingOp(executor); + } + this->_coordinatorService->releaseInstance(this->_id, shardKeyMatchesSW.getStatus()); _coordinatorDocWrittenPromise.emplaceValue(); @@ -1574,11 +1809,59 @@ SemiFuture ReshardingCoordinator::run(std::shared_ptrinterrupt(shardKeyMatchesSW.getStatus()); return ExecutorFuture(**executor, shardKeyMatchesSW.getStatus()); } + // If this is not forced same-key resharding, set forceRedistribution to false so we can + // identify forced same-key resharding by this field later. + _coordinatorDoc.setForceRedistribution(false); return _runReshardingOp(executor); }) + .onCompletion([this, self = shared_from_this(), executor](Status status) { + _cancelableOpCtxFactory.emplace(_ctHolder->getStepdownToken(), _markKilledExecutor); + return _quiesce(executor, std::move(status)); + }) .semi(); } +ExecutorFuture ReshardingCoordinator::_quiesce( + const std::shared_ptr& executor, Status status) { + if (_coordinatorDoc.getState() == CoordinatorStateEnum::kQuiesced) { + return (*executor) + ->sleepUntil(*_coordinatorDoc.getQuiescePeriodEnd(), _ctHolder->getCancelQuiesceToken()) + .onCompletion([this, self = shared_from_this(), executor, status](Status sleepStatus) { + LOGV2_DEBUG(7760405, + 1, + "Resharding coordinator quiesce period done", + "reshardingUUID"_attr = _coordinatorDoc.getReshardingUUID()); + if (!_ctHolder->isSteppingOrShuttingDown()) { + auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); + ReshardingCoordinatorDocument updatedCoordinatorDoc = _coordinatorDoc; + updatedCoordinatorDoc.setState(CoordinatorStateEnum::kDone); + executeMetadataChangesInTxn( + opCtx.get(), + [&updatedCoordinatorDoc](OperationContext* opCtx, TxnNumber txnNumber) { + writeToCoordinatorStateNss(opCtx, + nullptr /* metrics have already been freed */ + , + updatedCoordinatorDoc, + txnNumber); + }); + LOGV2_DEBUG(7760406, + 1, + "Resharding coordinator removed state doc after quiesce", + "reshardingUUID"_attr = _coordinatorDoc.getReshardingUUID()); + } + return status; + }) + .thenRunOn(_coordinatorService->getInstanceCleanupExecutor()) + .onCompletion([this, self = shared_from_this(), executor, status](Status deleteStatus) { + _quiescePeriodFinishedPromise.emplaceValue(); + return status; + }); + } + // No quiesce period is required. + _quiescePeriodFinishedPromise.emplaceValue(); + return ExecutorFuture(**executor, status); +} + ExecutorFuture ReshardingCoordinator::_runReshardingOp( const std::shared_ptr& executor) { return _initializeCoordinator(executor) @@ -1595,22 +1878,28 @@ ExecutorFuture ReshardingCoordinator::_runReshardingOp( }, [&](const BSONObj& data) { auto ns = data.getStringField("sourceNamespace"); - return ns.empty() ? true : ns.toString() == _coordinatorDoc.getSourceNss().ns(); + return ns.empty() ? true + : ns.toString() == + NamespaceStringUtil::serialize(_coordinatorDoc.getSourceNss()); }); { auto lg = stdx::lock_guard(_fulfillmentMutex); - if (status.isOK()) { + // reportStatus is the status reported back to the caller, which may be + // different than the status if we interrupted the future chain because the + // resharding was already completed on a previous primary. + auto reportStatus = _originalReshardingStatus.value_or(status); + if (reportStatus.isOK()) { _completionPromise.emplaceValue(); if (!_coordinatorDocWrittenPromise.getFuture().isReady()) { _coordinatorDocWrittenPromise.emplaceValue(); } } else { - _completionPromise.setError(status); + _completionPromise.setError(reportStatus); if (!_coordinatorDocWrittenPromise.getFuture().isReady()) { - _coordinatorDocWrittenPromise.setError(status); + _coordinatorDocWrittenPromise.setError(reportStatus); } } } @@ -1632,11 +1921,13 @@ ExecutorFuture ReshardingCoordinator::_runReshardingOp( .onCompletion([this, self = shared_from_this()](Status status) { _metrics->onStateTransition(_coordinatorDoc.getState(), boost::none); - // Destroy metrics early so it's lifetime will not be tied to the lifetime of this + // Destroy metrics early so its lifetime will not be tied to the lifetime of this // state machine. This is because we have future callbacks copy shared pointers to this // state machine that causes it to live longer than expected and potentially overlap - // with a newer instance when stepping up. + // with a newer instance when stepping up. The commit monitor also has a shared pointer + // to the metrics, so release this as well. _metrics.reset(); + _commitMonitor.reset(); if (!status.isOK()) { { @@ -1664,12 +1955,13 @@ ExecutorFuture ReshardingCoordinator::_onAbortCoordinatorOnly( auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); // Notify metrics as the operation is now complete for external observers. - markCompleted(status, _metrics.get()); + const bool isSameKeyResharding = _coordinatorDoc.getForceRedistribution() && + *_coordinatorDoc.getForceRedistribution(); + markCompleted(status, _metrics.get(), isSameKeyResharding); // The temporary collection and its corresponding entries were never created. Only // the coordinator document and reshardingFields require cleanup. - resharding::removeCoordinatorDocAndReshardingFields( - opCtx.get(), _metrics.get(), _coordinatorDoc, status); + _removeOrQuiesceCoordinatorDocAndRemoveReshardingFields(opCtx.get(), status); return status; }) .onTransientError([](const Status& retryStatus) { @@ -1730,14 +2022,18 @@ ExecutorFuture ReshardingCoordinator::_onAbortCoordinatorAndParticipants( .then([status] { return status; }); } -void ReshardingCoordinator::abort() { +void ReshardingCoordinator::abort(bool skipQuiescePeriod) { auto ctHolderInitialized = [&] { stdx::lock_guard lk(_abortCalledMutex); - _abortCalled = true; + skipQuiescePeriod = skipQuiescePeriod || _abortCalled == AbortType::kAbortSkipQuiesce; + _abortCalled = + skipQuiescePeriod ? AbortType::kAbortSkipQuiesce : AbortType::kAbortWithQuiesce; return !(_ctHolder == nullptr); }(); if (ctHolderInitialized) { + if (skipQuiescePeriod) + _ctHolder->cancelQuiescePeriod(); _ctHolder->abort(); } } @@ -1842,8 +2138,11 @@ void ReshardingCoordinator::_insertCoordDocAndChangeOrigCollEntry() { _coordinatorDocWrittenPromise.emplaceValue(); } - if (_coordinatorDoc.getState() == CoordinatorStateEnum::kAborting) { + if (_coordinatorDoc.getState() == CoordinatorStateEnum::kAborting || + _coordinatorDoc.getState() == CoordinatorStateEnum::kQuiesced) { _ctHolder->abort(); + // Force future chain to enter onError flow + uasserted(ErrorCodes::ReshardCollectionAborted, "aborted"); } return; @@ -1860,11 +2159,14 @@ void ReshardingCoordinator::_insertCoordDocAndChangeOrigCollEntry() { { // Note: don't put blocking or interruptible code in this block. + const bool isSameKeyResharding = + _coordinatorDoc.getForceRedistribution() && *_coordinatorDoc.getForceRedistribution(); _coordinatorDocWrittenPromise.emplaceValue(); - _metrics->onStarted(); + _metrics->onStarted(isSameKeyResharding); + _metrics->setIsSameKeyResharding(isSameKeyResharding); } - pauseBeforeInsertCoordinatorDoc.pauseWhileSet(); + pauseAfterInsertCoordinatorDoc.pauseWhileSet(); } void ReshardingCoordinator::_calculateParticipantsAndChunksThenWriteToDisk() { @@ -1874,6 +2176,15 @@ void ReshardingCoordinator::_calculateParticipantsAndChunksThenWriteToDisk() { auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); ReshardingCoordinatorDocument updatedCoordinatorDoc = _coordinatorDoc; + // If zones is not provided by the user, we should use the existing zones for + // this resharding operation. + if (updatedCoordinatorDoc.getForceRedistribution() && + *updatedCoordinatorDoc.getForceRedistribution() && !updatedCoordinatorDoc.getZones()) { + auto zones = resharding::getZonesFromExistingCollection( + opCtx.get(), updatedCoordinatorDoc.getSourceNss()); + updatedCoordinatorDoc.setZones(std::move(zones)); + } + auto shardsAndChunks = _reshardingCoordinatorExternalState->calculateParticipantShardsAndChunks( opCtx.get(), updatedCoordinatorDoc); @@ -1883,10 +2194,12 @@ void ReshardingCoordinator::_calculateParticipantsAndChunksThenWriteToDisk() { // Remove the presetReshardedChunks and zones from the coordinator document to reduce // the possibility of the document reaching the BSONObj size constraint. + ShardKeyPattern shardKey(updatedCoordinatorDoc.getReshardingKey()); std::vector zones; if (updatedCoordinatorDoc.getZones()) { zones = resharding::buildTagsDocsFromZones(updatedCoordinatorDoc.getTempReshardingNss(), - *updatedCoordinatorDoc.getZones()); + *updatedCoordinatorDoc.getZones(), + shardKey); } updatedCoordinatorDoc.setPresetReshardedChunks(boost::none); updatedCoordinatorDoc.setZones(boost::none); @@ -2174,23 +2487,28 @@ ExecutorFuture ReshardingCoordinator::_awaitAllParticipantShardsDone( Grid::get(opCtx.get())->shardRegistry()->getAllShardIds(opCtx.get()); const auto& nss = coordinatorDoc.getSourceNss(); const auto& notMatchingThisUUID = coordinatorDoc.getReshardingUUID(); - // TODO SERVER-74324: deprecate _shardsvrDropCollectionIfUUIDNotMatching after 7.0 - // is lastLTS. - const auto cmdObj = - ShardsvrDropCollectionIfUUIDNotMatchingRequest(nss, notMatchingThisUUID) - .toBSON({}); + const auto cmd = ShardsvrDropCollectionIfUUIDNotMatchingWithWriteConcernRequest( + nss, notMatchingThisUUID); + + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + auto opts = std::make_shared>( + cmd, **executor, _ctHolder->getStepdownToken(), args); _reshardingCoordinatorExternalState->sendCommandToShards( - opCtx.get(), nss.db(), cmdObj, allShardIds, **executor); + opCtx.get(), opts, allShardIds); } reshardingPauseCoordinatorBeforeRemovingStateDoc.pauseWhileSetAndNotCanceled( opCtx.get(), _ctHolder->getStepdownToken()); // Notify metrics as the operation is now complete for external observers. - markCompleted(abortReason ? *abortReason : Status::OK(), _metrics.get()); + const bool isSameKeyResharding = _coordinatorDoc.getForceRedistribution() && + *_coordinatorDoc.getForceRedistribution(); + markCompleted( + abortReason ? *abortReason : Status::OK(), _metrics.get(), isSameKeyResharding); - resharding::removeCoordinatorDocAndReshardingFields( - opCtx.get(), _metrics.get(), coordinatorDoc, abortReason); + _removeOrQuiesceCoordinatorDocAndRemoveReshardingFields(opCtx.get(), abortReason); }); } @@ -2215,8 +2533,21 @@ void ReshardingCoordinator::_updateCoordinatorDocStateAndCatalogEntries( installCoordinatorDoc(opCtx.get(), updatedCoordinatorDoc); } +void ReshardingCoordinator::_removeOrQuiesceCoordinatorDocAndRemoveReshardingFields( + OperationContext* opCtx, boost::optional abortReason) { + auto optionalDoc = resharding::removeOrQuiesceCoordinatorDocAndRemoveReshardingFields( + opCtx, _metrics.get(), _coordinatorDoc, abortReason); + + // Update in-memory coordinator doc if it wasn't deleted. + if (optionalDoc) { + installCoordinatorDoc(opCtx, *optionalDoc); + } +} + +template void ReshardingCoordinator::_sendCommandToAllParticipants( - const std::shared_ptr& executor, const BSONObj& command) { + const std::shared_ptr& executor, + std::shared_ptr> opts) { auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); auto donorShardIds = resharding::extractShardIdsFromParticipantEntries(_coordinatorDoc.getDonorShards()); @@ -2226,53 +2557,53 @@ void ReshardingCoordinator::_sendCommandToAllParticipants( participantShardIds.insert(recipientShardIds.begin(), recipientShardIds.end()); _reshardingCoordinatorExternalState->sendCommandToShards( - opCtx.get(), - DatabaseName::kAdmin.db(), - command, - {participantShardIds.begin(), participantShardIds.end()}, - **executor); + opCtx.get(), opts, {participantShardIds.begin(), participantShardIds.end()}); } +template void ReshardingCoordinator::_sendCommandToAllRecipients( - const std::shared_ptr& executor, const BSONObj& command) { + const std::shared_ptr& executor, + std::shared_ptr> opts) { auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); auto recipientShardIds = resharding::extractShardIdsFromParticipantEntries(_coordinatorDoc.getRecipientShards()); _reshardingCoordinatorExternalState->sendCommandToShards( - opCtx.get(), - DatabaseName::kAdmin.db(), - command, - {recipientShardIds.begin(), recipientShardIds.end()}, - **executor); + opCtx.get(), opts, {recipientShardIds.begin(), recipientShardIds.end()}); } +template void ReshardingCoordinator::_sendCommandToAllDonors( - const std::shared_ptr& executor, const BSONObj& command) { + const std::shared_ptr& executor, + std::shared_ptr> opts) { auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); auto donorShardIds = resharding::extractShardIdsFromParticipantEntries(_coordinatorDoc.getDonorShards()); _reshardingCoordinatorExternalState->sendCommandToShards( - opCtx.get(), - DatabaseName::kAdmin.db(), - command, - {donorShardIds.begin(), donorShardIds.end()}, - **executor); + opCtx.get(), opts, {donorShardIds.begin(), donorShardIds.end()}); } void ReshardingCoordinator::_establishAllDonorsAsParticipants( const std::shared_ptr& executor) { invariant(_coordinatorDoc.getState() == CoordinatorStateEnum::kPreparingToDonate); - auto flushCmd = makeFlushRoutingTableCacheUpdatesCmd(_coordinatorDoc.getSourceNss()); - _sendCommandToAllDonors(executor, flushCmd); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + auto opts = makeFlushRoutingTableCacheUpdatesOptions( + _coordinatorDoc.getSourceNss(), **executor, _ctHolder->getStepdownToken(), args); + opts->cmd.setDbName(DatabaseName::kAdmin); + _sendCommandToAllDonors(executor, opts); } void ReshardingCoordinator::_establishAllRecipientsAsParticipants( const std::shared_ptr& executor) { invariant(_coordinatorDoc.getState() == CoordinatorStateEnum::kPreparingToDonate); - auto flushCmd = makeFlushRoutingTableCacheUpdatesCmd(_coordinatorDoc.getTempReshardingNss()); - _sendCommandToAllRecipients(executor, flushCmd); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + auto opts = makeFlushRoutingTableCacheUpdatesOptions( + _coordinatorDoc.getTempReshardingNss(), **executor, _ctHolder->getStepdownToken(), args); + opts->cmd.setDbName(DatabaseName::kAdmin); + _sendCommandToAllRecipients(executor, opts); } void ReshardingCoordinator::_tellAllRecipientsToRefresh( @@ -2287,32 +2618,51 @@ void ReshardingCoordinator::_tellAllRecipientsToRefresh( nssToRefresh = _coordinatorDoc.getSourceNss(); } - auto refreshCmd = - createFlushReshardingStateChangeCommand(nssToRefresh, _coordinatorDoc.getReshardingUUID()); - _sendCommandToAllRecipients(executor, refreshCmd); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern( + args, kMajorityWriteConcern, true); + auto opts = createFlushReshardingStateChangeOptions(nssToRefresh, + _coordinatorDoc.getReshardingUUID(), + **executor, + _ctHolder->getStepdownToken(), + args); + opts->cmd.setDbName(DatabaseName::kAdmin); + _sendCommandToAllRecipients(executor, opts); } void ReshardingCoordinator::_tellAllDonorsToRefresh( const std::shared_ptr& executor) { - auto refreshCmd = createFlushReshardingStateChangeCommand(_coordinatorDoc.getSourceNss(), - _coordinatorDoc.getReshardingUUID()); - _sendCommandToAllDonors(executor, refreshCmd); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern( + args, kMajorityWriteConcern, true); + auto opts = createFlushReshardingStateChangeOptions(_coordinatorDoc.getSourceNss(), + _coordinatorDoc.getReshardingUUID(), + **executor, + _ctHolder->getStepdownToken(), + args); + opts->cmd.setDbName(DatabaseName::kAdmin); + _sendCommandToAllDonors(executor, opts); } void ReshardingCoordinator::_tellAllParticipantsToCommit( const NamespaceString& nss, const std::shared_ptr& executor) { - auto commitCmd = - createShardsvrCommitReshardCollectionCmd(nss, _coordinatorDoc.getReshardingUUID()); - _sendCommandToAllParticipants(executor, commitCmd); + auto opts = createShardsvrCommitReshardCollectionOptions( + nss, _coordinatorDoc.getReshardingUUID(), **executor, _ctHolder->getStepdownToken(), {}); + opts->cmd.setDbName(DatabaseName::kAdmin); + _sendCommandToAllParticipants(executor, opts); } void ReshardingCoordinator::_tellAllParticipantsToAbort( const std::shared_ptr& executor, bool isUserAborted) { ShardsvrAbortReshardCollection abortCmd(_coordinatorDoc.getReshardingUUID(), isUserAborted); + // Empty tenant id is acceptable here as command's tenant id will not be serialized to BSON. + // TODO SERVER-62491: Use system tenant id. abortCmd.setDbName(DatabaseName::kAdmin); - _sendCommandToAllParticipants(executor, - abortCmd.toBSON(BSON(WriteConcernOptions::kWriteConcernField - << WriteConcernOptions::Majority))); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + auto opts = std::make_shared>( + abortCmd, **executor, _ctHolder->getStepdownToken(), args); + _sendCommandToAllParticipants(executor, opts); } void ReshardingCoordinator::_updateChunkImbalanceMetrics(const NamespaceString& nss) { diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service.h b/src/mongo/db/s/resharding/resharding_coordinator_service.h index de8849c659598..dd24c3ce81e3f 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_service.h +++ b/src/mongo/db/s/resharding/resharding_coordinator_service.h @@ -29,18 +29,45 @@ #pragma once +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/s/resharding/coordinator_document_gen.h" #include "mongo/db/s/resharding/resharding_coordinator_observer.h" #include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/shard_id.h" +#include "mongo/executor/async_rpc.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_tags.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/uuid.h" namespace mongo { namespace resharding { @@ -53,8 +80,7 @@ CollectionType createTempReshardingCollectionType( const BSONObj& collation, boost::optional indexVersion); -void cleanupSourceConfigCollections(OperationContext* opCtx, - const ReshardingCoordinatorDocument& coordinatorDoc); +void removeChunkDocs(OperationContext* opCtx, const UUID& collUUID); void writeDecisionPersistedState(OperationContext* opCtx, ReshardingMetrics* metrics, @@ -65,7 +91,8 @@ void writeDecisionPersistedState(OperationContext* opCtx, const std::vector& reshardedCollectionPlacement); void updateTagsDocsForTempNss(OperationContext* opCtx, - const ReshardingCoordinatorDocument& coordinatorDoc); + const ReshardingCoordinatorDocument& coordinatorDoc, + TxnNumber txnNumber); void insertCoordDocAndChangeOrigCollEntry(OperationContext* opCtx, ReshardingMetrics* metrics, @@ -83,10 +110,12 @@ void writeStateTransitionAndCatalogUpdatesThenBumpCollectionPlacementVersions( ReshardingMetrics* metrics, const ReshardingCoordinatorDocument& coordinatorDoc); -void removeCoordinatorDocAndReshardingFields(OperationContext* opCtx, - ReshardingMetrics* metrics, - const ReshardingCoordinatorDocument& coordinatorDoc, - boost::optional abortReason = boost::none); +boost::optional +removeOrQuiesceCoordinatorDocAndRemoveReshardingFields( + OperationContext* opCtx, + ReshardingMetrics* metrics, + const ReshardingCoordinatorDocument& coordinatorDoc, + boost::optional abortReason = boost::none); } // namespace resharding class ReshardingCoordinatorExternalState { @@ -111,23 +140,16 @@ class ReshardingCoordinatorExternalState { boost::optional getCatalogIndexVersionForCommit(OperationContext* opCtx, const NamespaceString& nss); - virtual void sendCommandToShards(OperationContext* opCtx, - StringData dbName, - const BSONObj& command, - const std::vector& shardIds, - const std::shared_ptr& executor) = 0; + template + void sendCommandToShards(OperationContext* opCtx, + std::shared_ptr> opts, + const std::vector& shardIds); }; class ReshardingCoordinatorExternalStateImpl final : public ReshardingCoordinatorExternalState { public: ParticipantShardsAndChunks calculateParticipantShardsAndChunks( OperationContext* opCtx, const ReshardingCoordinatorDocument& coordinatorDoc) override; - - void sendCommandToShards(OperationContext* opCtx, - StringData dbName, - const BSONObj& command, - const std::vector& shardIds, - const std::shared_ptr& executor) override; }; /** @@ -139,7 +161,8 @@ class CoordinatorCancellationTokenHolder { : _stepdownToken(stepdownToken), _abortSource(CancellationSource(stepdownToken)), _abortToken(_abortSource.token()), - _commitMonitorCancellationSource(CancellationSource(_abortToken)) {} + _commitMonitorCancellationSource(CancellationSource(_abortToken)), + _quiesceCancellationSource(CancellationSource(_stepdownToken)) {} /** * Returns whether the any token has been canceled. @@ -176,6 +199,10 @@ class CoordinatorCancellationTokenHolder { _commitMonitorCancellationSource.cancel(); } + void cancelQuiescePeriod() { + _quiesceCancellationSource.cancel(); + } + const CancellationToken& getStepdownToken() { return _stepdownToken; } @@ -188,6 +215,10 @@ class CoordinatorCancellationTokenHolder { return _commitMonitorCancellationSource.token(); } + CancellationToken getCancelQuiesceToken() { + return _quiesceCancellationSource.token(); + } + private: // The token passed in by the PrimaryOnlyService runner that is canceled when this shard's // underlying replica set node is stepping down or shutting down. @@ -203,6 +234,10 @@ class CoordinatorCancellationTokenHolder { // The source created by inheriting from the abort token. // Provides the means to cancel the commit monitor (e.g., due to receiving the commit command). CancellationSource _commitMonitorCancellationSource; + + // A source created by inheriting from the stepdown token. + // Provides the means to cancel the quiesce period. + CancellationSource _quiesceCancellationSource; }; class ReshardingCoordinator; @@ -245,6 +280,8 @@ class ReshardingCoordinatorService : public repl::PrimaryOnlyService { * between operations interrupted due to stepdown or abort. Callers who wish to confirm that * the abort successfully went through should follow up with an inspection on the resharding * coordinator docs to ensure that they are empty. + * + * This call skips quiesce periods for all aborted coordinators. */ void abortAllReshardCollection(OperationContext* opCtx); @@ -272,8 +309,9 @@ class ReshardingCoordinator final /** * Attempts to cancel the underlying resharding operation using the abort token. + * If 'skipQuiescePeriod' is set, will also skip the quiesce period used to allow retries. */ - void abort(); + void abort(bool skipQuiescePeriod = false); /** * Replace in-memory representation of the CoordinatorDoc @@ -301,6 +339,14 @@ class ReshardingCoordinator final return _coordinatorDocWrittenPromise.getFuture(); } + /** + * Returns a Future that will be resolved when the service has finished its quiesce period + * and deleted the coordinator document. + */ + SharedSemiFuture getQuiescePeriodFinishedFuture() const { + return _quiescePeriodFinishedPromise.getFuture(); + } + boost::optional reportForCurrentOp( MongoProcessInterface::CurrentOpConnectionsMode, MongoProcessInterface::CurrentOpSessionsMode) noexcept override; @@ -385,6 +431,12 @@ class ReshardingCoordinator final ExecutorFuture _runReshardingOp( const std::shared_ptr& executor); + /** + * Keep the instance in a quiesced state in order to handle retries. + */ + ExecutorFuture _quiesce(const std::shared_ptr& executor, + Status status); + /** * Does the following writes: * 1. Inserts the coordinator document into config.reshardingOperations @@ -474,15 +526,27 @@ class ReshardingCoordinator final boost::optional approxCopySize = boost::none, boost::optional abortReason = boost::none); + /** + * Updates the entry for this resharding operation in config.reshardingOperations to the + * quiesced state, or removes it if quiesce isn't being done. Removes the resharding fields + * from the catalog entries. + */ + void _removeOrQuiesceCoordinatorDocAndRemoveReshardingFields( + OperationContext* opCtx, boost::optional abortReason = boost::none); + /** * Sends the command to the specified participants asynchronously. */ + template void _sendCommandToAllParticipants( - const std::shared_ptr& executor, const BSONObj& command); + const std::shared_ptr& executor, + std::shared_ptr> opts); + template void _sendCommandToAllDonors(const std::shared_ptr& executor, - const BSONObj& command); + std::shared_ptr> opts); + template void _sendCommandToAllRecipients(const std::shared_ptr& executor, - const BSONObj& command); + std::shared_ptr> opts); /** * Sends '_flushRoutingTableCacheUpdatesWithWriteConcern' to ensure donor state machine creation @@ -598,6 +662,9 @@ class ReshardingCoordinator final // Promise that is fulfilled when the chain of work kicked off by run() has completed. SharedPromise _completionPromise; + // Promise that is fulfilled when the quiesce period is finished + SharedPromise _quiescePeriodFinishedPromise; + // Callback handle for scheduled work to handle critical section timeout. boost::optional _criticalSectionTimeoutCbHandle; @@ -608,7 +675,15 @@ class ReshardingCoordinator final // Used to catch the case when an abort() is called but the cancellation source (_ctHolder) has // not been initialized. - bool _abortCalled{false}; + enum AbortType { + kNoAbort = 0, + kAbortWithQuiesce, + kAbortSkipQuiesce + } _abortCalled{AbortType::kNoAbort}; + + // If we recovered a completed resharding coordinator (quiesced) on failover, the + // resharding status when it actually ran. + boost::optional _originalReshardingStatus; }; } // namespace mongo diff --git a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp index 4eb3af4dd5a93..92fc878c5f915 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_service_test.cpp @@ -27,35 +27,67 @@ * it in the license file. */ +#include #include +#include #include - +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/op_observer/op_observer_impl.h" -#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_registry.h" -#include "mongo/db/repl/primary_only_service_op_observer.h" -#include "mongo/db/repl/primary_only_service_test_fixture.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/resharding/resharding_coordinator_service.h" #include "mongo/db/s/resharding/resharding_op_observer.h" -#include "mongo/db/s/resharding/resharding_server_parameters_gen.h" #include "mongo/db/s/resharding/resharding_service_test_helpers.h" #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/transaction_coordinator_service.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/executor/mock_async_rpc.h" +#include "mongo/idl/idl_parser.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/database_version.h" #include "mongo/s/resharding/resharding_coordinator_service_conflicting_op_in_progress_info.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/stdx/unordered_map.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -90,12 +122,6 @@ class ExternalStateForTest : public ReshardingCoordinatorExternalState { return ParticipantShardsAndChunks( {coordinatorDoc.getDonorShards(), coordinatorDoc.getRecipientShards(), initialChunks}); } - - void sendCommandToShards(OperationContext* opCtx, - StringData dbName, - const BSONObj& command, - const std::vector& shardIds, - const std::shared_ptr& executor) override {} }; class ReshardingCoordinatorServiceForTest : public ReshardingCoordinatorService { @@ -150,6 +176,9 @@ class ReshardingCoordinatorServiceTest : public ConfigServerTestFixture { repl::createOplog(opCtx); + auto asyncRPCMock = std::make_unique(); + async_rpc::detail::AsyncRPCRunner::set(getServiceContext(), std::move(asyncRPCMock)); + _opObserverRegistry = dynamic_cast(getServiceContext()->getOpObserver()); invariant(_opObserverRegistry); @@ -403,7 +432,7 @@ class ReshardingCoordinatorServiceTest : public ConfigServerTestFixture { opCtx->getServiceContext()->getPreciseClockSource()->now()); client.insert(CollectionType::ConfigNS, originalNssCatalogEntry.toBSON()); - DatabaseType dbDoc(coordinatorDoc.getSourceNss().db().toString(), + DatabaseType dbDoc(coordinatorDoc.getSourceNss().db_forTest().toString(), coordinatorDoc.getDonorShards().front().getId(), DatabaseVersion{UUID::gen(), Timestamp(1, 1)}); client.insert(NamespaceString::kConfigDatabasesNamespace, dbDoc.toBSON()); @@ -704,9 +733,9 @@ TEST_F(ReshardingCoordinatorServiceTest, StepDownStepUpDuringInitializing) { CoordinatorStateEnum::kPreparingToDonate}; auto opCtx = operationContext(); - auto pauseBeforeInsertCoordinatorDoc = - globalFailPointRegistry().find("pauseBeforeInsertCoordinatorDoc"); - auto timesEnteredFailPoint = pauseBeforeInsertCoordinatorDoc->setMode(FailPoint::alwaysOn, 0); + auto pauseAfterInsertCoordinatorDoc = + globalFailPointRegistry().find("pauseAfterInsertCoordinatorDoc"); + auto timesEnteredFailPoint = pauseAfterInsertCoordinatorDoc->setMode(FailPoint::alwaysOn, 0); auto doc = insertStateAndCatalogEntries(CoordinatorStateEnum::kUnused, _originalEpoch); doc.setRecipientShards({}); @@ -735,11 +764,11 @@ TEST_F(ReshardingCoordinatorServiceTest, StepDownStepUpDuringInitializing) { auto instanceId = BSON(ReshardingCoordinatorDocument::kReshardingUUIDFieldName << doc.getReshardingUUID()); - pauseBeforeInsertCoordinatorDoc->waitForTimesEntered(timesEnteredFailPoint + 1); + pauseAfterInsertCoordinatorDoc->waitForTimesEntered(timesEnteredFailPoint + 1); auto coordinator = getCoordinator(opCtx, instanceId); stepDown(opCtx); - pauseBeforeInsertCoordinatorDoc->setMode(FailPoint::off, 0); + pauseAfterInsertCoordinatorDoc->setMode(FailPoint::off, 0); ASSERT_EQ(coordinator->getCompletionFuture().getNoThrow(), ErrorCodes::CallbackCanceled); coordinator.reset(); @@ -881,7 +910,7 @@ TEST_F(ReshardingCoordinatorServiceTest, StepDownStepUpEachTransition) { std::vector foundCollections; auto collection = client.findOne(CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << doc.getSourceNss().ns())); + BSON(CollectionType::kNssFieldName << doc.getSourceNss().ns_forTest())); ASSERT_EQUALS(collection.isEmpty(), false); ASSERT_EQUALS( @@ -915,7 +944,7 @@ TEST_F(ReshardingCoordinatorServiceTest, ReshardingCoordinatorFailsIfMigrationNo { DBDirectClient client(opCtx); client.update(CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << _originalNss.ns()), + BSON(CollectionType::kNssFieldName << _originalNss.ns_forTest()), BSON("$set" << BSON(CollectionType::kAllowMigrationsFieldName << false))); } @@ -925,8 +954,9 @@ TEST_F(ReshardingCoordinatorServiceTest, ReshardingCoordinatorFailsIfMigrationNo // Check that reshardCollection keeps allowMigrations setting intact. { DBDirectClient client(opCtx); - CollectionType collDoc(client.findOne( - CollectionType::ConfigNS, BSON(CollectionType::kNssFieldName << _originalNss.ns()))); + CollectionType collDoc( + client.findOne(CollectionType::ConfigNS, + BSON(CollectionType::kNssFieldName << _originalNss.ns_forTest()))); ASSERT_FALSE(collDoc.getAllowMigrations()); } } diff --git a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp index ef840dc8d77b8..713973fa075ae 100644 --- a/src/mongo/db/s/resharding/resharding_coordinator_test.cpp +++ b/src/mongo/db/s/resharding/resharding_coordinator_test.cpp @@ -27,26 +27,66 @@ * it in the license file. */ +#include +#include +#include +#include #include - -#include "mongo/client/remote_command_targeter_mock.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/repl/storage_interface_impl.h" -#include "mongo/db/repl/storage_interface_mock.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/index_on_config.h" +#include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" #include "mongo/db/s/resharding/coordinator_document_gen.h" #include "mongo/db/s/resharding/resharding_coordinator_service.h" +#include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/logv2/log.h" +#include "mongo/db/shard_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_namespace_placement_gen.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/s/catalog/type_tags.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -96,9 +136,14 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { } ReshardingCoordinatorDocument makeCoordinatorDoc( - CoordinatorStateEnum state, boost::optional fetchTimestamp = boost::none) { + CoordinatorStateEnum state, + bool useUserUUID = false, + boost::optional fetchTimestamp = boost::none) { CommonReshardingMetadata meta( _reshardingUUID, _originalNss, UUID::gen(), _tempNss, _newShardKey.toBSON()); + if (useUserUUID) { + meta.setUserReshardingUUID(_reshardingUUID); + } ReshardingCoordinatorDocument doc(state, {DonorShardEntry(ShardId("shard0000"), {})}, {RecipientShardEntry(ShardId("shard0001"), {})}); @@ -132,6 +177,7 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { collType.setReshardingFields(std::move(reshardingFields.value())); if (coordinatorDoc.getState() == CoordinatorStateEnum::kDone || + coordinatorDoc.getState() == CoordinatorStateEnum::kQuiesced || coordinatorDoc.getState() == CoordinatorStateEnum::kAborting) { collType.setAllowMigrations(true); } else if (coordinatorDoc.getState() >= CoordinatorStateEnum::kPreparingToDonate) { @@ -200,11 +246,12 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { ReshardingCoordinatorDocument insertStateAndCatalogEntries( CoordinatorStateEnum state, OID epoch, + bool useUserUUID = false, boost::optional fetchTimestamp = boost::none) { auto opCtx = operationContext(); DBDirectClient client(opCtx); - auto coordinatorDoc = makeCoordinatorDoc(state, fetchTimestamp); + auto coordinatorDoc = makeCoordinatorDoc(state, useUserUUID, fetchTimestamp); client.insert(NamespaceString::kConfigReshardingOperationsNamespace, coordinatorDoc.toBSON()); @@ -250,7 +297,7 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { OperationContext* opCtx, ReshardingCoordinatorDocument expectedCoordinatorDoc) { DBDirectClient client(opCtx); auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace, - BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns())); + BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns_forTest())); auto coordinatorDoc = ReshardingCoordinatorDocument::parse( IDLParserContext("ReshardingCoordinatorTest"), doc); @@ -332,7 +379,7 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { const ReshardingCoordinatorDocument& expectedCoordinatorDoc) { DBDirectClient client(opCtx); CollectionType onDiskEntry( - client.findOne(CollectionType::ConfigNS, BSON("_id" << _originalNss.ns()))); + client.findOne(CollectionType::ConfigNS, BSON("_id" << _originalNss.ns_forTest()))); ASSERT_EQUALS(onDiskEntry.getAllowMigrations(), expectedCollType.getAllowMigrations()); @@ -391,7 +438,7 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { void assertTemporaryCollectionCatalogEntryMatchesExpected( OperationContext* opCtx, boost::optional expectedCollType) { DBDirectClient client(opCtx); - auto doc = client.findOne(CollectionType::ConfigNS, BSON("_id" << _tempNss.ns())); + auto doc = client.findOne(CollectionType::ConfigNS, BSON("_id" << _tempNss.ns_forTest())); if (!expectedCollType) { ASSERT(doc.isEmpty()); return; @@ -465,7 +512,7 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { DBDirectClient client(opCtx); FindCommandRequest findRequest{TagsType::ConfigNS}; - findRequest.setFilter(BSON("ns" << nss.ns())); + findRequest.setFilter(BSON("ns" << nss.ns_forTest())); auto cursor = client.find(std::move(findRequest)); std::vector foundZones; @@ -555,7 +602,7 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { FindCommandRequest reshardedCollPlacementReq( NamespaceString::kConfigsvrPlacementHistoryNamespace); - reshardedCollPlacementReq.setFilter(BSON("nss" << nss.ns())); + reshardedCollPlacementReq.setFilter(BSON("nss" << nss.ns_forTest())); reshardedCollPlacementReq.setSort(BSON("timestamp" << -1)); const auto placementDoc = client.findOne(reshardedCollPlacementReq); ASSERT(!placementDoc.isEmpty()); @@ -622,7 +669,8 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { std::vector zones; if (expectedCoordinatorDoc.getZones()) { zones = buildTagsDocsFromZones(expectedCoordinatorDoc.getTempReshardingNss(), - *expectedCoordinatorDoc.getZones()); + *expectedCoordinatorDoc.getZones(), + _newShardKey); } expectedCoordinatorDoc.setZones(boost::none); expectedCoordinatorDoc.setPresetReshardedChunks(boost::none); @@ -670,18 +718,16 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { std::vector(reshardedCollectionPlacement.begin(), reshardedCollectionPlacement.end())); - updateTagsDocsForTempNss(operationContext(), expectedCoordinatorDoc); - // Check that config.reshardingOperations and config.collections entries are updated // correctly assertStateAndCatalogEntriesMatchExpected(opCtx, expectedCoordinatorDoc, _finalEpoch); // Check that chunks and tags under the temp namespace have been removed DBDirectClient client(opCtx); - auto chunkDoc = client.findOne(ChunkType::ConfigNS, BSON("ns" << _tempNss.ns())); + auto chunkDoc = client.findOne(ChunkType::ConfigNS, BSON("ns" << _tempNss.ns_forTest())); ASSERT(chunkDoc.isEmpty()); - auto tagDoc = client.findOne(TagsType::ConfigNS, BSON("ns" << _tempNss.ns())); + auto tagDoc = client.findOne(TagsType::ConfigNS, BSON("ns" << _tempNss.ns_forTest())); ASSERT(tagDoc.isEmpty()); assertCatalogPlacementHistoryEntryMatchExpected( @@ -692,7 +738,7 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { ReshardingCoordinatorDocument expectedCoordinatorDoc, std::vector expectedChunks, std::vector expectedZones) { - cleanupSourceConfigCollections(opCtx, expectedCoordinatorDoc); + removeChunkDocs(opCtx, expectedCoordinatorDoc.getSourceUUID()); // Check that chunks and tags entries previously under the temporary namespace have been // correctly updated to the original namespace @@ -703,7 +749,9 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { void removeCoordinatorDocAndReshardingFieldsExpectSuccess( OperationContext* opCtx, const ReshardingCoordinatorDocument& coordinatorDoc) { - removeCoordinatorDocAndReshardingFields(opCtx, _metrics.get(), coordinatorDoc); + auto optionalDoc = removeOrQuiesceCoordinatorDocAndRemoveReshardingFields( + opCtx, _metrics.get(), coordinatorDoc); + ASSERT(!optionalDoc); auto expectedCoordinatorDoc = coordinatorDoc; expectedCoordinatorDoc.setState(CoordinatorStateEnum::kDone); @@ -711,7 +759,7 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { // Check that the entry is removed from config.reshardingOperations DBDirectClient client(opCtx); auto doc = client.findOne(NamespaceString::kConfigReshardingOperationsNamespace, - BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns())); + BSON("ns" << expectedCoordinatorDoc.getSourceNss().ns_forTest())); ASSERT(doc.isEmpty()); // Check that the resharding fields are removed from the config.collections entry and @@ -725,6 +773,29 @@ class ReshardingCoordinatorPersistenceTest : public ConfigServerTestFixture { opCtx, expectedOriginalCollType, expectedCoordinatorDoc); } + void quiesceCoordinatorDocAndReshardingFieldsExpectSuccess( + OperationContext* opCtx, const ReshardingCoordinatorDocument& coordinatorDoc) { + auto optionalDoc = removeOrQuiesceCoordinatorDocAndRemoveReshardingFields( + opCtx, _metrics.get(), coordinatorDoc); + ASSERT(optionalDoc); + + auto expectedCoordinatorDoc = coordinatorDoc; + expectedCoordinatorDoc.setState(CoordinatorStateEnum::kQuiesced); + + // Check that the entry is marked as quiesced in config.reshardingOperations + readReshardingCoordinatorDocAndAssertMatchesExpected(opCtx, expectedCoordinatorDoc); + + // Check that the resharding fields are removed from the config.collections entry and + // allowMigrations is set back to true. + auto expectedOriginalCollType = makeOriginalCollectionCatalogEntry( + expectedCoordinatorDoc, + boost::none, + _finalEpoch, + opCtx->getServiceContext()->getPreciseClockSource()->now()); + assertOriginalCollectionCatalogEntryMatchesExpected( + opCtx, expectedOriginalCollType, expectedCoordinatorDoc); + } + void transitionToErrorExpectSuccess(ErrorCodes::Error errorCode) { auto coordinatorDoc = insertStateAndCatalogEntries(CoordinatorStateEnum::kPreparingToDonate, _originalEpoch); @@ -898,7 +969,7 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionWithFetchTimestampSu TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionToDecisionPersistedSucceeds) { Timestamp fetchTimestamp = Timestamp(1, 1); auto coordinatorDoc = insertStateAndCatalogEntries( - CoordinatorStateEnum::kBlockingWrites, _originalEpoch, fetchTimestamp); + CoordinatorStateEnum::kBlockingWrites, _originalEpoch, false, fetchTimestamp); auto initialChunksIds = std::vector{OID::gen(), OID::gen()}; auto tempNssChunks = makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, initialChunksIds); @@ -959,10 +1030,31 @@ TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionToDoneSucceeds) { finalOriginalCollectionPlacementVersion)); } +TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionToQuiescedSucceeds) { + auto coordinatorDoc = insertStateAndCatalogEntries( + CoordinatorStateEnum::kCommitting, _finalEpoch, true /* useUserUUID */); + + // Ensure the chunks for the original namespace exist since they will be bumped as a product of + // the state transition to kDone. + makeAndInsertChunksForRecipientShard( + _reshardingUUID, _finalEpoch, _newShardKey, std::vector{OID::gen(), OID::gen()}); + + auto initialOriginalCollectionPlacementVersion = + assertGet(getCollectionPlacementVersion(operationContext(), _originalNss)); + + quiesceCoordinatorDocAndReshardingFieldsExpectSuccess(operationContext(), coordinatorDoc); + + auto finalOriginalCollectionPlacementVersion = + assertGet(getCollectionPlacementVersion(operationContext(), _originalNss)); + ASSERT_TRUE(initialOriginalCollectionPlacementVersion.isOlderThan( + finalOriginalCollectionPlacementVersion)); +} + TEST_F(ReshardingCoordinatorPersistenceTest, StateTransitionWhenCoordinatorDocDoesNotExistFails) { // Do not insert initial entry into config.reshardingOperations. Attempt to update coordinator // state documents. - auto coordinatorDoc = makeCoordinatorDoc(CoordinatorStateEnum::kCloning, Timestamp(1, 1)); + auto coordinatorDoc = + makeCoordinatorDoc(CoordinatorStateEnum::kCloning, false, Timestamp(1, 1)); ASSERT_THROWS_CODE(writeStateTransitionAndCatalogUpdatesThenBumpCollectionPlacementVersions( operationContext(), _metrics.get(), coordinatorDoc), AssertionException, @@ -987,7 +1079,7 @@ TEST_F(ReshardingCoordinatorPersistenceTest, SourceCleanupBetweenTransitionsSucc Timestamp fetchTimestamp = Timestamp(1, 1); auto coordinatorDoc = insertStateAndCatalogEntries( - CoordinatorStateEnum::kBlockingWrites, _originalEpoch, fetchTimestamp); + CoordinatorStateEnum::kBlockingWrites, _originalEpoch, false, fetchTimestamp); auto initialChunksIds = std::vector{OID::gen(), OID::gen()}; auto tempNssChunks = makeChunks(_reshardingUUID, _tempEpoch, _newShardKey, initialChunksIds); diff --git a/src/mongo/db/s/resharding/resharding_cumulative_metrics.cpp b/src/mongo/db/s/resharding/resharding_cumulative_metrics.cpp index c53310cbe2b43..8beb9e64eda0d 100644 --- a/src/mongo/db/s/resharding/resharding_cumulative_metrics.cpp +++ b/src/mongo/db/s/resharding/resharding_cumulative_metrics.cpp @@ -29,6 +29,15 @@ #include "mongo/db/s/resharding/resharding_cumulative_metrics.h" +#include +#include +#include +#include +#include +#include + +#include + namespace mongo { namespace { @@ -58,11 +67,12 @@ const auto kReportedStateFieldNamesMap = [] { {RecipientStateEnum::kCreatingCollection, "countInstancesInRecipientState2CreatingCollection"}, {RecipientStateEnum::kCloning, "countInstancesInRecipientState3Cloning"}, - {RecipientStateEnum::kApplying, "countInstancesInRecipientState4Applying"}, - {RecipientStateEnum::kError, "countInstancesInRecipientState5Error"}, + {RecipientStateEnum::kBuildingIndex, "countInstancesInRecipientState4BuildingIndex"}, + {RecipientStateEnum::kApplying, "countInstancesInRecipientState5Applying"}, + {RecipientStateEnum::kError, "countInstancesInRecipientState6Error"}, {RecipientStateEnum::kStrictConsistency, - "countInstancesInRecipientState6StrictConsistency"}, - {RecipientStateEnum::kDone, "countInstancesInRecipientState7Done"}, + "countInstancesInRecipientState7StrictConsistency"}, + {RecipientStateEnum::kDone, "countInstancesInRecipientState8Done"}, }; }(); diff --git a/src/mongo/db/s/resharding/resharding_cumulative_metrics.h b/src/mongo/db/s/resharding/resharding_cumulative_metrics.h index 7b231a579b2c3..2ca4600e68d44 100644 --- a/src/mongo/db/s/resharding/resharding_cumulative_metrics.h +++ b/src/mongo/db/s/resharding/resharding_cumulative_metrics.h @@ -29,6 +29,23 @@ #pragma once +#include +#include +#include +#include +// IWYU pragma: no_include "boost/preprocessor/detail/limits/auto_rec_256.hpp" +#include +// IWYU pragma: no_include "boost/preprocessor/repetition/detail/limits/for_256.hpp" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/s/metrics/cumulative_metrics_state_holder.h" #include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_metrics_macros.h" diff --git a/src/mongo/db/s/resharding/resharding_cumulative_metrics_test.cpp b/src/mongo/db/s/resharding/resharding_cumulative_metrics_test.cpp index 245b244509a63..5c2c11d78a3a0 100644 --- a/src/mongo/db/s/resharding/resharding_cumulative_metrics_test.cpp +++ b/src/mongo/db/s/resharding/resharding_cumulative_metrics_test.cpp @@ -28,8 +28,29 @@ */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/s/metrics/sharding_data_transform_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h" #include "mongo/db/s/resharding/resharding_cumulative_metrics.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -152,6 +173,7 @@ class ReshardingCumulativeMetricsTest : public ShardingDataTransformMetricsTestF addExpectedField(RecipientStateEnum::kAwaitingFetchTimestamp); addExpectedField(RecipientStateEnum::kCreatingCollection); addExpectedField(RecipientStateEnum::kCloning); + addExpectedField(RecipientStateEnum::kBuildingIndex); addExpectedField(RecipientStateEnum::kApplying); addExpectedField(RecipientStateEnum::kError); addExpectedField(RecipientStateEnum::kStrictConsistency); diff --git a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp index d79b4d1a49637..db800439b2e94 100644 --- a/src/mongo/db/s/resharding/resharding_data_copy_util.cpp +++ b/src/mongo/db/s/resharding/resharding_data_copy_util.cpp @@ -29,28 +29,62 @@ #include "mongo/db/s/resharding/resharding_data_copy_util.h" +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/rename_collection.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/resharding/resharding_oplog_applier_progress_gen.h" #include "mongo/db/s/resharding/resharding_txn_cloner_progress_gen.h" #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/session_catalog_migration.h" #include "mongo/db/s/sharding_index_catalog_ddl_util.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/db/write_concern_options.h" #include "mongo/logv2/redaction.h" +#include "mongo/s/index_version.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/util/clock_source.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" namespace mongo::resharding::data_copy { @@ -60,7 +94,7 @@ void ensureCollectionExists(OperationContext* opCtx, invariant(!opCtx->lockState()->isLocked()); invariant(!opCtx->lockState()->inAWriteUnitOfWork()); - writeConflictRetry(opCtx, "resharding::data_copy::ensureCollectionExists", nss.toString(), [&] { + writeConflictRetry(opCtx, "resharding::data_copy::ensureCollectionExists", nss, [&] { AutoGetCollection coll(opCtx, nss, MODE_IX); if (coll) { return; @@ -78,20 +112,19 @@ void ensureCollectionDropped(OperationContext* opCtx, invariant(!opCtx->lockState()->isLocked()); invariant(!opCtx->lockState()->inAWriteUnitOfWork()); - writeConflictRetry( - opCtx, "resharding::data_copy::ensureCollectionDropped", nss.toString(), [&] { - AutoGetCollection coll(opCtx, nss, MODE_X); - if (!coll || (uuid && coll->uuid() != uuid)) { - // If the collection doesn't exist or exists with a different UUID, then the - // requested collection has been dropped already. - return; - } + writeConflictRetry(opCtx, "resharding::data_copy::ensureCollectionDropped", nss, [&] { + AutoGetCollection coll(opCtx, nss, MODE_X); + if (!coll || (uuid && coll->uuid() != uuid)) { + // If the collection doesn't exist or exists with a different UUID, then the + // requested collection has been dropped already. + return; + } - WriteUnitOfWork wuow(opCtx); - uassertStatusOK(coll.getDb()->dropCollectionEvenIfSystem( - opCtx, nss, {} /* dropOpTime */, true /* markFromMigrate */)); - wuow.commit(); - }); + WriteUnitOfWork wuow(opCtx); + uassertStatusOK(coll.getDb()->dropCollectionEvenIfSystem( + opCtx, nss, {} /* dropOpTime */, true /* markFromMigrate */)); + wuow.commit(); + }); } void ensureOplogCollectionsDropped(OperationContext* opCtx, @@ -237,11 +270,15 @@ std::vector fillBatchForInsert(Pipeline& pipeline, int batchSiz int insertBatch(OperationContext* opCtx, const NamespaceString& nss, std::vector& batch) { - return writeConflictRetry(opCtx, "resharding::data_copy::insertBatch", nss.ns(), [&] { - AutoGetCollection outputColl(opCtx, nss, MODE_IX); + return writeConflictRetry(opCtx, "resharding::data_copy::insertBatch", nss, [&] { + const auto outputColl = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection '" << nss << "' did not already exist", - outputColl); + str::stream() << "Collection '" << nss.toStringForErrorMsg() + << "' did not already exist", + outputColl.exists()); int numBytes = 0; WriteUnitOfWork wuow(opCtx); @@ -258,7 +295,7 @@ int insertBatch(OperationContext* opCtx, } uassertStatusOK(collection_internal::insertDocuments( - opCtx, *outputColl, batch.begin(), batch.end(), nullptr)); + opCtx, outputColl.getCollectionPtr(), batch.begin(), batch.end(), nullptr)); wuow.commit(); return numBytes; @@ -348,7 +385,7 @@ void updateSessionRecord(OperationContext* opCtx, writeConflictRetry( opCtx, "resharding::data_copy::updateSessionRecord", - NamespaceString::kSessionTransactionsTableNamespace.ns(), + NamespaceString::kSessionTransactionsTableNamespace, [&] { AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); diff --git a/src/mongo/db/s/resharding/resharding_data_copy_util.h b/src/mongo/db/s/resharding/resharding_data_copy_util.h index fcf1eeeb48cf3..fbd1cd5e5f3e0 100644 --- a/src/mongo/db/s/resharding/resharding_data_copy_util.h +++ b/src/mongo/db/s/resharding/resharding_data_copy_util.h @@ -29,19 +29,39 @@ #pragma once +#include +#include #include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/grid.h" #include "mongo/s/resharding/common_types_gen.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" #include "mongo/util/functional.h" +#include "mongo/util/future.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/resharding_data_replication.cpp b/src/mongo/db/s/resharding/resharding_data_replication.cpp index 9704ecc8c98db..3e4cbd20b1e50 100644 --- a/src/mongo/db/s/resharding/resharding_data_replication.cpp +++ b/src/mongo/db/s/resharding/resharding_data_replication.cpp @@ -28,23 +28,43 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/resharding/resharding_data_replication.h" - -#include "mongo/db/repl/oplog_applier.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/oplog_entry.h" #include "mongo/db/s/resharding/resharding_collection_cloner.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" +#include "mongo/db/s/resharding/resharding_data_replication.h" +#include "mongo/db/s/resharding/resharding_donor_oplog_iterator.h" #include "mongo/db/s/resharding/resharding_future_util.h" #include "mongo/db/s/resharding/resharding_oplog_applier.h" +#include "mongo/db/s/resharding/resharding_oplog_applier_progress_gen.h" #include "mongo/db/s/resharding/resharding_oplog_fetcher.h" -#include "mongo/db/s/resharding/resharding_server_parameters_gen.h" #include "mongo/db/s/resharding/resharding_txn_cloner.h" #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/logv2/log.h" -#include "mongo/logv2/redaction.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/future_util.h" diff --git a/src/mongo/db/s/resharding/resharding_data_replication.h b/src/mongo/db/s/resharding/resharding_data_replication.h index 8d6659b65e502..e737209d5933c 100644 --- a/src/mongo/db/s/resharding/resharding_data_replication.h +++ b/src/mongo/db/s/resharding/resharding_data_replication.h @@ -29,20 +29,35 @@ #pragma once +#include +#include #include +#include #include +#include #include #include "mongo/bson/timestamp.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/resharding/donor_oplog_id_gen.h" +#include "mongo/db/s/resharding/resharding_collection_cloner.h" +#include "mongo/db/s/resharding/resharding_metrics.h" +#include "mongo/db/s/resharding/resharding_oplog_applier.h" #include "mongo/db/s/resharding/resharding_oplog_applier_metrics.h" +#include "mongo/db/s/resharding/resharding_oplog_fetcher.h" +#include "mongo/db/s/resharding/resharding_txn_cloner.h" #include "mongo/db/shard_id.h" +#include "mongo/executor/task_executor.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/resharding/common_types_gen.h" #include "mongo/util/cancellation.h" #include "mongo/util/functional.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp index 5eabc1a378760..b34f5dcea681d 100644 --- a/src/mongo/db/s/resharding/resharding_data_replication_test.cpp +++ b/src/mongo/db/s/resharding/resharding_data_replication_test.cpp @@ -27,21 +27,64 @@ * it in the license file. */ +#include "mongo/db/s/resharding/resharding_data_replication.h" + +#include +#include +#include +#include + +#include + +#include "mongo/base/counter.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/collation/collator_factory_mock.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" -#include "mongo/db/s/resharding/resharding_data_replication.h" #include "mongo/db/s/resharding/resharding_oplog_applier_progress_gen.h" #include "mongo/db/s/resharding/resharding_util.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/chunk_manager.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp index 33016bf5bf6ea..c2d9a26809d48 100644 --- a/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp +++ b/src/mongo/db/s/resharding/resharding_destined_recipient_test.cpp @@ -27,30 +27,90 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/remote_command_targeter_factory_mock.h" +#include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/repl/apply_ops.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/apply_ops_command_info.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_interface.h" #include "mongo/db/repl/oplog_interface_local.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/wait_for_majority_service.h" -#include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/db/s/sharding_mongod_test_fixture.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/sharding_write_router.h" +#include "mongo/db/server_options.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/shard_id.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_mock.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/catalog_cache_loader.h" #include "mongo/s/catalog_cache_loader_mock.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/database_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" #include "mongo/s/shard_cannot_refresh_due_to_locks_held_exception.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -210,7 +270,7 @@ class DestinedRecipientTest : public ShardServerTestFixture { env.version = ShardVersionFactory::make(ChunkVersion(gen, {1, 0}), boost::optional(boost::none)); env.tempNss = NamespaceString::createNamespaceString_forTest( - kNss.db(), + kNss.db_forTest(), fmt::format("{}{}", NamespaceString::kTemporaryReshardingCollectionPrefix, env.sourceUuid.toString())); @@ -235,7 +295,7 @@ class DestinedRecipientTest : public ShardServerTestFixture { coll.setAllowMigrations(false); _mockCatalogCacheLoader->setDatabaseRefreshReturnValue( - DatabaseType(kNss.db().toString(), kShardList[0].getName(), env.dbVersion)); + DatabaseType(kNss.db_forTest().toString(), kShardList[0].getName(), env.dbVersion)); _mockCatalogCacheLoader->setCollectionRefreshValues( kNss, coll, @@ -253,7 +313,7 @@ class DestinedRecipientTest : public ShardServerTestFixture { "y"), boost::none); - ASSERT_OK(onDbVersionMismatchNoExcept(opCtx, kNss.db(), boost::none)); + ASSERT_OK(onDbVersionMismatchNoExcept(opCtx, kNss.db_forTest(), boost::none)); forceShardFilteringMetadataRefresh(opCtx, kNss); if (refreshTempNss) @@ -277,8 +337,11 @@ class DestinedRecipientTest : public ShardServerTestFixture { const BSONObj& filter, const BSONObj& update, const ReshardingEnv& env) { - AutoGetCollection coll(opCtx, nss, MODE_IX); - Helpers::update(opCtx, nss, filter, update); + auto coll = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + Helpers::update(opCtx, coll, filter, update); } void deleteDoc(OperationContext* opCtx, @@ -314,7 +377,7 @@ TEST_F(DestinedRecipientTest, TestGetDestinedRecipient) { AutoGetCollection coll(opCtx, kNss, MODE_IX); OperationShardingState::setShardRole(opCtx, kNss, env.version, env.dbVersion); - ShardingWriteRouter shardingWriteRouter(opCtx, kNss, Grid::get(opCtx)->catalogCache()); + ShardingWriteRouter shardingWriteRouter(opCtx, kNss); auto destShardId = shardingWriteRouter.getReshardingDestinedRecipient(BSON("x" << 2 << "y" << 10)); @@ -331,7 +394,7 @@ TEST_F(DestinedRecipientTest, TestGetDestinedRecipientThrowsOnBlockedRefresh) { OperationShardingState::setShardRole(opCtx, kNss, env.version, env.dbVersion); FailPointEnableBlock failPoint("blockCollectionCacheLookup"); - ASSERT_THROWS_WITH_CHECK(ShardingWriteRouter(opCtx, kNss, Grid::get(opCtx)->catalogCache()), + ASSERT_THROWS_WITH_CHECK(ShardingWriteRouter(opCtx, kNss), ShardCannotRefreshDueToLocksHeldException, [&](const ShardCannotRefreshDueToLocksHeldException& ex) { const auto refreshInfo = diff --git a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp index 5a9c8343fa2ee..5d3dcf8305dad 100644 --- a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.cpp @@ -28,29 +28,45 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/resharding/resharding_donor_oplog_iterator.h" - +#include +#include #include #include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" -#include "mongo/bson/json.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" -#include "mongo/db/pipeline/document_source_lookup.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source_match.h" -#include "mongo/db/pipeline/document_source_replace_root.h" #include "mongo/db/pipeline/document_source_sort.h" -#include "mongo/db/pipeline/document_source_unwind.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/s/resharding/resharding_donor_oplog_iterator.h" #include "mongo/db/s/resharding/resharding_server_parameters_gen.h" #include "mongo/db/s/resharding/resharding_util.h" -#include "mongo/logv2/log.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/scopeguard.h" -#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding diff --git a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.h b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.h index 662277c087996..cbf1656e5a703 100644 --- a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.h +++ b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator.h @@ -29,14 +29,18 @@ #pragma once +#include #include #include "mongo/db/cancelable_operation_context.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/repl/oplog_entry.h" #include "mongo/db/s/resharding/donor_oplog_id_gen.h" #include "mongo/executor/task_executor.h" +#include "mongo/util/cancellation.h" #include "mongo/util/future.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp index c75396abcdfdf..12dbcd471c6b5 100644 --- a/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_oplog_iterator_test.cpp @@ -28,20 +28,46 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include #include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/s/resharding/resharding_donor_oplog_iterator.h" -#include "mongo/db/s/resharding/resharding_server_parameters_gen.h" #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/thread_pool_mock.h" +#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -130,11 +156,6 @@ class ReshardingDonorOplogIterTest : public ShardServerTestFixture { executor::ThreadPoolMock::Options threadPoolOptions; threadPoolOptions.onCreateThread = [] { Client::initThread("TestReshardingDonorOplogIterator"); - auto& client = cc(); - { - stdx::lock_guard lk(client); - client.setSystemOperationKillableByStepdown(lk); - } }; auto executor = executor::makeThreadPoolTestExecutor( @@ -175,8 +196,6 @@ class ReshardingDonorOplogIterTest : public ShardServerTestFixture { ServiceContext::UniqueClient makeKillableClient() { auto client = getServiceContext()->makeClient("ReshardingDonorOplogIterator"); - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); return client; } diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp index 116d975da7c6c..119512a870085 100644 --- a/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common.cpp @@ -29,17 +29,59 @@ #include "mongo/db/s/resharding/resharding_donor_recipient_common.h" +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/keypattern.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/s/resharding/resharding_donor_service.h" +#include "mongo/db/s/resharding/resharding_recipient_service.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/db/vector_clock_mutable.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" -#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/grid.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/future_util.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -320,6 +362,13 @@ void processReshardingFieldsForCollection(OperationContext* opCtx, const NamespaceString& nss, const CollectionMetadata& metadata, const ReshardingFields& reshardingFields) { + // Persist the config time to ensure that in case of stepdown next filtering metadata refresh on + // the new primary will always fetch the latest information. + auto* const replCoord = repl::ReplicationCoordinator::get(opCtx); + if (!replCoord->isReplEnabled() || replCoord->getMemberState().primary()) { + VectorClockMutable::get(opCtx)->waitForDurableConfigTime().get(opCtx); + } + if (reshardingFields.getState() == CoordinatorStateEnum::kAborting) { // The coordinator encountered an unrecoverable error, both donors and recipients should be // made aware. @@ -380,11 +429,6 @@ void clearFilteringMetadata(OperationContext* opCtx, AsyncTry([svcCtx = opCtx->getServiceContext(), nss] { ThreadClient tc("TriggerReshardingRecovery", svcCtx); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } - auto opCtx = tc->makeOperationContext(); onCollectionPlacementVersionMismatch( opCtx.get(), nss, boost::none /* chunkVersionReceived */); diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common.h b/src/mongo/db/s/resharding/resharding_donor_recipient_common.h index 10be195c58679..6f7330f3a100b 100644 --- a/src/mongo/db/s/resharding/resharding_donor_recipient_common.h +++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common.h @@ -28,9 +28,21 @@ */ #pragma once +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/primary_only_service.h" #include "mongo/db/s/collection_metadata.h" +#include "mongo/db/s/resharding/donor_document_gen.h" +#include "mongo/db/s/resharding/recipient_document_gen.h" #include "mongo/db/s/resharding/resharding_donor_service.h" #include "mongo/db/s/resharding/resharding_recipient_service.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/uuid.h" namespace mongo { namespace resharding { diff --git a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp index eea75c8761a20..03b93de0f595a 100644 --- a/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_recipient_common_test.cpp @@ -27,21 +27,63 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/drop_database.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/persistent_task_store.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/resharding/donor_document_gen.h" #include "mongo/db/s/resharding/resharding_donor_recipient_common.h" +#include "mongo/db/s/resharding/resharding_donor_service.h" +#include "mongo/db/s/resharding/resharding_recipient_service.h" +#include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/grid.h" +#include "mongo/s/index_version.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" -#include "mongo/unittest/death_test.h" -#include "mongo/util/fail_point.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -647,10 +689,10 @@ TEST_F(ReshardingDonorRecipientCommonInternalsTest, ClearReshardingFilteringMeta OperationContext* opCtx = operationContext(); NamespaceString sourceNss1 = NamespaceString::createNamespaceString_forTest("db", "one"); NamespaceString tempReshardingNss1 = - resharding::constructTemporaryReshardingNss(sourceNss1.db(), UUID::gen()); + resharding::constructTemporaryReshardingNss(sourceNss1.db_forTest(), UUID::gen()); NamespaceString sourceNss2 = NamespaceString::createNamespaceString_forTest("db", "two"); NamespaceString tempReshardingNss2 = - resharding::constructTemporaryReshardingNss(sourceNss2.db(), UUID::gen()); + resharding::constructTemporaryReshardingNss(sourceNss2.db_forTest(), UUID::gen()); ShardId shardId1 = ShardId{"recipient1"}; ShardId shardId2 = ShardId{"recipient2"}; ReshardingDonorDocument doc1 = diff --git a/src/mongo/db/s/resharding/resharding_donor_service.cpp b/src/mongo/db/s/resharding/resharding_donor_service.cpp index aafe846b7cb57..474d9755586b2 100644 --- a/src/mongo/db/s/resharding/resharding_donor_service.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_service.cpp @@ -30,22 +30,43 @@ #include "mongo/db/s/resharding/resharding_donor_service.h" +#include #include +#include +#include +#include #include - -#include "mongo/db/catalog/drop_collection.h" -#include "mongo/db/catalog/rename_collection.h" +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/ops/delete.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/s/resharding/coordinator_document_gen.h" #include "mongo/db/s/resharding/resharding_change_event_o2_field_gen.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" #include "mongo/db/s/resharding/resharding_donor_recipient_common.h" @@ -55,13 +76,40 @@ #include "mongo/db/s/sharding_index_catalog_ddl_util.h" #include "mongo/db/s/sharding_recovery_service.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" #include "mongo/db/write_block_bypass.h" #include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/catalog_cache_loader.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_version.h" #include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/future_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/timer.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -89,14 +137,14 @@ Date_t getCurrentTime() { Timestamp generateMinFetchTimestamp(OperationContext* opCtx, const NamespaceString& sourceNss) { // Do a no-op write and use the OpTime as the minFetchTimestamp writeConflictRetry( - opCtx, "resharding donor minFetchTimestamp", NamespaceString::kRsOplogNamespace.ns(), [&] { + opCtx, "resharding donor minFetchTimestamp", NamespaceString::kRsOplogNamespace, [&] { AutoGetDb db(opCtx, sourceNss.dbName(), MODE_IX); Lock::CollectionLock collLock(opCtx, sourceNss, MODE_S); AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); const std::string msg = str::stream() - << "All future oplog entries on the namespace " << sourceNss.ns() + << "All future oplog entries on the namespace " << sourceNss.toStringForErrorMsg() << " must include a 'destinedRecipient' field"; WriteUnitOfWork wuow(opCtx); opCtx->getClient()->getServiceContext()->getOpObserver()->onInternalOpMessage( @@ -233,7 +281,8 @@ ReshardingDonorService::DonorStateMachine::DonorStateMachine( }())), _critSecReason(BSON("command" << "resharding_donor" - << "collection" << _metadata.getSourceNss().toString())), + << "collection" + << NamespaceStringUtil::serialize(_metadata.getSourceNss()))), _isAlsoRecipient([&] { auto myShardId = _externalState->myShardId(_serviceContext); return std::find(_recipientShardIds.begin(), _recipientShardIds.end(), myShardId) != @@ -626,7 +675,7 @@ void ReshardingDonorService::DonorStateMachine:: auto oplog = generateOplogEntry(); writeConflictRetry( - rawOpCtx, "ReshardingBeginOplog", NamespaceString::kRsOplogNamespace.ns(), [&] { + rawOpCtx, "ReshardingBeginOplog", NamespaceString::kRsOplogNamespace, [&] { AutoGetOplog oplogWrite(rawOpCtx, OplogAccessMode::kWrite); WriteUnitOfWork wunit(rawOpCtx); const auto& oplogOpTime = repl::logOp(rawOpCtx, &oplog); @@ -725,9 +774,9 @@ void ReshardingDonorService::DonorStateMachine:: oplog.setOpType(repl::OpTypeEnum::kNoop); oplog.setUuid(_metadata.getSourceUUID()); oplog.setDestinedRecipient(destinedRecipient); - oplog.setObject( - BSON("msg" << fmt::format("Writes to {} are temporarily blocked for resharding.", - _metadata.getSourceNss().toString()))); + oplog.setObject(BSON( + "msg" << fmt::format("Writes to {} are temporarily blocked for resharding.", + NamespaceStringUtil::serialize(_metadata.getSourceNss())))); oplog.setObject2(BSON("type" << resharding::kReshardFinalOpLogType << "reshardingUUID" << _metadata.getReshardingUUID())); oplog.setOpTime(OplogSlot()); @@ -743,7 +792,7 @@ void ReshardingDonorService::DonorStateMachine:: writeConflictRetry( rawOpCtx, "ReshardingBlockWritesOplog", - NamespaceString::kRsOplogNamespace.ns(), + NamespaceString::kRsOplogNamespace, [&] { AutoGetOplog oplogWrite(rawOpCtx, OplogAccessMode::kWrite); WriteUnitOfWork wunit(rawOpCtx); @@ -995,16 +1044,22 @@ void ReshardingDonorService::DonorStateMachine::_updateDonorDocument( auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); const auto& nss = NamespaceString::kDonorReshardingOperationsNamespace; - writeConflictRetry(opCtx.get(), "DonorStateMachine::_updateDonorDocument", nss.toString(), [&] { - AutoGetCollection coll(opCtx.get(), nss, MODE_X); + writeConflictRetry(opCtx.get(), "DonorStateMachine::_updateDonorDocument", nss, [&] { + auto coll = acquireCollection( + opCtx.get(), + CollectionAcquisitionRequest(nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx.get()), + AcquisitionPrerequisites::kWrite), + MODE_X); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << nss.toString() << " does not exist", - coll); + str::stream() << nss.toStringForErrorMsg() << " does not exist", + coll.exists()); WriteUnitOfWork wuow(opCtx.get()); Helpers::update(opCtx.get(), - nss, + coll, BSON(ReshardingDonorDocument::kReshardingUUIDFieldName << _metadata.getReshardingUUID()), BSON("$set" << BSON(ReshardingDonorDocument::kMutableStateFieldName @@ -1021,10 +1076,16 @@ void ReshardingDonorService::DonorStateMachine::_removeDonorDocument( auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); const auto& nss = NamespaceString::kDonorReshardingOperationsNamespace; - writeConflictRetry(opCtx.get(), "DonorStateMachine::_removeDonorDocument", nss.toString(), [&] { - AutoGetCollection coll(opCtx.get(), nss, MODE_X); - - if (!coll) { + writeConflictRetry(opCtx.get(), "DonorStateMachine::_removeDonorDocument", nss, [&] { + const auto coll = acquireCollection( + opCtx.get(), + CollectionAcquisitionRequest(nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx.get()), + AcquisitionPrerequisites::kWrite), + MODE_X); + + if (!coll.exists()) { return; } @@ -1037,8 +1098,7 @@ void ReshardingDonorService::DonorStateMachine::_removeDonorDocument( }); deleteObjects(opCtx.get(), - *coll, - nss, + coll, BSON(ReshardingDonorDocument::kReshardingUUIDFieldName << _metadata.getReshardingUUID()), true /* justOne */); diff --git a/src/mongo/db/s/resharding/resharding_donor_service.h b/src/mongo/db/s/resharding/resharding_donor_service.h index 9bf06760f82f6..280f59c93bba2 100644 --- a/src/mongo/db/s/resharding/resharding_donor_service.h +++ b/src/mongo/db/s/resharding/resharding_donor_service.h @@ -29,12 +29,35 @@ #pragma once +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/s/resharding/donor_document_gen.h" #include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/s/resharding/common_types_gen.h" #include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp index 53c0044ae7514..ea6d4acbc235b 100644 --- a/src/mongo/db/s/resharding/resharding_donor_service_test.cpp +++ b/src/mongo/db/s/resharding/resharding_donor_service_test.cpp @@ -28,22 +28,36 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_registry.h" -#include "mongo/db/ops/update.h" -#include "mongo/db/ops/update_request.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/primary_only_service_test_fixture.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" -#include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/resharding/resharding_change_event_o2_field_gen.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" #include "mongo/db/s/resharding/resharding_donor_service.h" @@ -51,10 +65,21 @@ #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/sharding_recovery_service.h" #include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -148,13 +173,14 @@ class ReshardingDonorServiceTest : public repl::PrimaryOnlyServiceMongoDTest { isAlsoRecipient ? donorShardId : ShardId{"recipient2"}, ShardId{"recipient3"}}); - NamespaceString sourceNss("sourcedb.sourcecollection"); + NamespaceString sourceNss = + NamespaceString::createNamespaceString_forTest("sourcedb.sourcecollection"); auto sourceUUID = UUID::gen(); auto commonMetadata = CommonReshardingMetadata( UUID::gen(), sourceNss, sourceUUID, - resharding::constructTemporaryReshardingNss(sourceNss.db(), sourceUUID), + resharding::constructTemporaryReshardingNss(sourceNss.db_forTest(), sourceUUID), BSON("newKey" << 1)); commonMetadata.setStartTime(getServiceContext()->getFastClockSource()->now()); @@ -272,9 +298,10 @@ TEST_F(ReshardingDonorServiceTest, WritesNoOpOplogEntryOnReshardingBegin) { ErrorCodes::InterruptedDueToReplStateChange); DBDirectClient client(opCtx.get()); - NamespaceString sourceNss("sourcedb", "sourcecollection"); + NamespaceString sourceNss = + NamespaceString::createNamespaceString_forTest("sourcedb", "sourcecollection"); FindCommandRequest findRequest{NamespaceString::kRsOplogNamespace}; - findRequest.setFilter(BSON("ns" << sourceNss.toString())); + findRequest.setFilter(BSON("ns" << sourceNss.toString_forTest())); auto cursor = client.find(std::move(findRequest)); ASSERT_TRUE(cursor->more()) << "Found no oplog entries for source collection"; @@ -751,13 +778,13 @@ TEST_F(ReshardingDonorServiceTest, RestoreMetricsOnKBlockingWrites) { // This acquires the critical section required by resharding donor machine when it is in // kBlockingWrites. ShardingRecoveryService::get(opCtx.get()) - ->acquireRecoverableCriticalSectionBlockWrites(opCtx.get(), - doc.getSourceNss(), - BSON("command" - << "resharding_donor" - << "collection" - << doc.getSourceNss().toString()), - ShardingCatalogClient::kLocalWriteConcern); + ->acquireRecoverableCriticalSectionBlockWrites( + opCtx.get(), + doc.getSourceNss(), + BSON("command" + << "resharding_donor" + << "collection" << doc.getSourceNss().toString_forTest()), + ShardingCatalogClient::kLocalWriteConcern); auto donor = DonorStateMachine::getOrCreate(opCtx.get(), _service, doc.toBSON()); notifyReshardingCommitting(opCtx.get(), *donor, doc); diff --git a/src/mongo/db/s/resharding/resharding_future_util.cpp b/src/mongo/db/s/resharding/resharding_future_util.cpp index 62c95fbaf3f82..9894570835671 100644 --- a/src/mongo/db/s/resharding/resharding_future_util.cpp +++ b/src/mongo/db/s/resharding/resharding_future_util.cpp @@ -27,9 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include + +#include #include "mongo/db/s/resharding/resharding_future_util.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future_impl.h" namespace mongo::resharding { diff --git a/src/mongo/db/s/resharding/resharding_future_util.h b/src/mongo/db/s/resharding/resharding_future_util.h index 30c2c359ea1a1..dda1ed6db1993 100644 --- a/src/mongo/db/s/resharding/resharding_future_util.h +++ b/src/mongo/db/s/resharding/resharding_future_util.h @@ -29,9 +29,16 @@ #pragma once +#include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/client/read_preference.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/cancellation.h" #include "mongo/util/functional.h" #include "mongo/util/future.h" diff --git a/src/mongo/db/s/resharding/resharding_future_util_test.cpp b/src/mongo/db/s/resharding/resharding_future_util_test.cpp index e37a13a314b7d..3f8af82462223 100644 --- a/src/mongo/db/s/resharding/resharding_future_util_test.cpp +++ b/src/mongo/db/s/resharding/resharding_future_util_test.cpp @@ -28,8 +28,20 @@ */ #include "mongo/db/s/resharding/resharding_future_util.h" -#include "mongo/unittest/unittest.h" + +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future_impl.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp b/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp index fad84d1a4e8e3..891e9929055b4 100644 --- a/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp +++ b/src/mongo/db/s/resharding/resharding_manual_cleanup.cpp @@ -30,13 +30,54 @@ #include "mongo/db/s/resharding/resharding_manual_cleanup.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/resource_yielder.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" #include "mongo/db/s/resharding/resharding_donor_recipient_common.h" +#include "mongo/db/s/resharding/resharding_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/cleanup_reshard_collection_gen.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -76,7 +117,7 @@ void assertResponseOK(const NamespaceString& nss, StatusWith response, ShardId shardId) { auto errorContext = "Unable to cleanup reshard collection for namespace {} on shard {}"_format( - nss.ns(), shardId.toString()); + nss.toStringForErrorMsg(), shardId.toString()); auto shardResponse = uassertStatusOKWithContext(std::move(response), errorContext); auto status = getStatusFromCommandResult(shardResponse.data); @@ -214,7 +255,8 @@ void ReshardingCoordinatorCleaner::_cleanOnParticipantShards( createShardCleanupRequests(_originalCollectionNss, _reshardingUUID, doc), ReadPreferenceSetting(ReadPreference::PrimaryOnly), Shard::RetryPolicy::kIdempotent, - nullptr /* resourceYielder */); + nullptr /* resourceYielder */, + {} /* designatedHostsMap */); while (!ars.done()) { auto arsResponse = ars.next(); @@ -239,7 +281,7 @@ bool ReshardingCoordinatorCleaner::_checkExistsTempReshardingCollection( void ReshardingCoordinatorCleaner::_dropTemporaryReshardingCollection( OperationContext* opCtx, const NamespaceString& tempReshardingNss) { ShardsvrDropCollection dropCollectionCommand(tempReshardingNss); - dropCollectionCommand.setDbName(tempReshardingNss.db()); + dropCollectionCommand.setDbName(tempReshardingNss.dbName()); const auto dbInfo = uassertStatusOK( Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, tempReshardingNss.db())); diff --git a/src/mongo/db/s/resharding/resharding_manual_cleanup.h b/src/mongo/db/s/resharding/resharding_manual_cleanup.h index 8ab2157c99a65..63f58f6d994f0 100644 --- a/src/mongo/db/s/resharding/resharding_manual_cleanup.h +++ b/src/mongo/db/s/resharding/resharding_manual_cleanup.h @@ -29,8 +29,15 @@ #pragma once +#include +#include + #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/s/resharding/coordinator_document_gen.h" +#include "mongo/db/s/resharding/donor_document_gen.h" +#include "mongo/db/s/resharding/recipient_document_gen.h" #include "mongo/db/s/resharding/resharding_coordinator_service.h" #include "mongo/db/s/resharding/resharding_donor_service.h" #include "mongo/db/s/resharding/resharding_recipient_service.h" diff --git a/src/mongo/db/s/resharding/resharding_metrics.cpp b/src/mongo/db/s/resharding/resharding_metrics.cpp index 2f1539722565a..82b4728bbab84 100644 --- a/src/mongo/db/s/resharding/resharding_metrics.cpp +++ b/src/mongo/db/s/resharding/resharding_metrics.cpp @@ -27,21 +27,46 @@ * it in the license file. */ #include "mongo/db/s/resharding/resharding_metrics.h" + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/s/metrics/sharding_data_transform_metrics.h" #include "mongo/db/s/resharding/resharding_util.h" -#include "mongo/util/optional_util.h" +#include "mongo/db/server_options.h" +#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { namespace { using TimedPhase = ReshardingMetrics::TimedPhase; const auto kTimedPhaseNamesMap = [] { + return ReshardingMetrics::TimedPhaseNameMap{ + {TimedPhase::kCloning, "totalCopyTimeElapsedSecs"}, + {TimedPhase::kApplying, "totalApplyTimeElapsedSecs"}, + {TimedPhase::kCriticalSection, "totalCriticalSectionTimeElapsedSecs"}, + {TimedPhase::kBuildingIndex, "totalIndexBuildTimeElapsedSecs"}}; +}(); +const auto kTimedPhaseNamesMapWithoutReshardingImprovements = [] { return ReshardingMetrics::TimedPhaseNameMap{ {TimedPhase::kCloning, "totalCopyTimeElapsedSecs"}, {TimedPhase::kApplying, "totalApplyTimeElapsedSecs"}, {TimedPhase::kCriticalSection, "totalCriticalSectionTimeElapsedSecs"}}; }(); - inline ReshardingMetrics::State getDefaultState(ReshardingMetrics::Role role) { using Role = ReshardingMetrics::Role; switch (role) { @@ -62,7 +87,7 @@ BSONObj createOriginalCommand(const NamespaceString& nss, BSONObj shardKey) { using Arr = std::vector; using V = Value; - return Doc{{"reshardCollection", V{StringData{nss.toString()}}}, + return Doc{{"reshardCollection", V{StringData{NamespaceStringUtil::serialize(nss)}}}, {"key", std::move(shardKey)}, {"unique", V{StringData{"false"}}}, {"collation", V{Doc{{"locale", V{StringData{"simple"}}}}}}} @@ -100,7 +125,7 @@ ReshardingMetrics::ReshardingMetrics(UUID instanceId, : ReshardingMetrics{std::move(instanceId), shardKey, std::move(nss), - std::move(role), + role, std::move(startTime), clockSource, cumulativeMetrics, @@ -217,8 +242,16 @@ StringData ReshardingMetrics::getStateString() const noexcept { BSONObj ReshardingMetrics::reportForCurrentOp() const noexcept { BSONObjBuilder builder; - reportDurationsForAllPhases( - kTimedPhaseNamesMap, getClockSource(), &builder, Seconds{0}); + if (resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + reportDurationsForAllPhases( + kTimedPhaseNamesMap, getClockSource(), &builder, Seconds{0}); + } else { + reportDurationsForAllPhases(kTimedPhaseNamesMapWithoutReshardingImprovements, + getClockSource(), + &builder, + Seconds{0}); + } if (_role == Role::kRecipient) { reportOplogApplicationCountMetrics(_reshardingFieldNames, &builder); } @@ -243,10 +276,17 @@ void ReshardingMetrics::restoreRecipientSpecificFields( restoreDocumentsProcessed(*docsCopied, *bytesCopied); } restorePhaseDurationFields(document); + restoreIndexBuildDurationFields(*metrics); } void ReshardingMetrics::restoreCoordinatorSpecificFields( const ReshardingCoordinatorDocument& document) { + if (resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + auto isSameKeyResharding = + document.getForceRedistribution() && *document.getForceRedistribution(); + setIsSameKeyResharding(isSameKeyResharding); + } restorePhaseDurationFields(document); } @@ -264,4 +304,18 @@ void ReshardingMetrics::restoreExternallyTrackedRecipientFields( values.writesToStashCollections); _ableToEstimateRemainingRecipientTime.store(true); } + +void ReshardingMetrics::restoreIndexBuildDurationFields(const ReshardingRecipientMetrics& metrics) { + auto indexBuildTime = metrics.getIndexBuildTime(); + if (indexBuildTime) { + auto indexBuildBegin = indexBuildTime->getStart(); + if (indexBuildBegin) { + setStartFor(TimedPhase::kBuildingIndex, *indexBuildBegin); + } + auto indexBuildEnd = indexBuildTime->getStop(); + if (indexBuildEnd) { + setEndFor(TimedPhase::kBuildingIndex, *indexBuildEnd); + } + } +} } // namespace mongo diff --git a/src/mongo/db/s/resharding/resharding_metrics.h b/src/mongo/db/s/resharding/resharding_metrics.h index 53c0db9b1d34b..c2b62eec00f8b 100644 --- a/src/mongo/db/s/resharding/resharding_metrics.h +++ b/src/mongo/db/s/resharding/resharding_metrics.h @@ -29,27 +29,47 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/namespace_string.h" #include "mongo/db/s/metrics/metrics_state_holder.h" +#include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" +#include "mongo/db/s/metrics/with_oplog_application_count_metrics.h" #include "mongo/db/s/metrics/with_oplog_application_count_metrics_also_updating_cumulative_metrics.h" #include "mongo/db/s/metrics/with_oplog_application_latency_metrics_interface_updating_cumulative_metrics.h" #include "mongo/db/s/metrics/with_phase_duration_management.h" +#include "mongo/db/s/metrics/with_state_management_for_cumulative_metrics.h" #include "mongo/db/s/metrics/with_state_management_for_instance_metrics.h" #include "mongo/db/s/metrics/with_typed_cumulative_metrics_provider.h" +#include "mongo/db/s/resharding/coordinator_document_gen.h" +#include "mongo/db/s/resharding/recipient_document_gen.h" #include "mongo/db/s/resharding/resharding_cumulative_metrics.h" #include "mongo/db/s/resharding/resharding_metrics_field_name_provider.h" #include "mongo/db/s/resharding/resharding_metrics_helpers.h" #include "mongo/db/s/resharding/resharding_oplog_applier_progress_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" namespace mongo { namespace resharding_metrics { -enum TimedPhase { kCloning, kApplying, kCriticalSection }; -constexpr auto kNumTimedPhase = 3; +enum TimedPhase { kCloning, kApplying, kCriticalSection, kBuildingIndex }; +constexpr auto kNumTimedPhase = 4; namespace detail { using PartialBase1 = WithTypedCumulativeMetricsProvider diff --git a/src/mongo/db/s/resharding/resharding_metrics_helpers.cpp b/src/mongo/db/s/resharding/resharding_metrics_helpers.cpp index 1708a43295950..7dfee57133e8b 100644 --- a/src/mongo/db/s/resharding/resharding_metrics_helpers.cpp +++ b/src/mongo/db/s/resharding/resharding_metrics_helpers.cpp @@ -29,10 +29,28 @@ #include "mongo/db/s/resharding/resharding_metrics_helpers.h" +#include + +#include +#include +#include + #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/resharding/resharding_donor_recipient_common.h" +#include "mongo/db/s/resharding/resharding_donor_service.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/stale_exception.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding diff --git a/src/mongo/db/s/resharding/resharding_metrics_helpers.h b/src/mongo/db/s/resharding/resharding_metrics_helpers.h index 9ce618f13ae84..bd0e29a957ebc 100644 --- a/src/mongo/db/s/resharding/resharding_metrics_helpers.h +++ b/src/mongo/db/s/resharding/resharding_metrics_helpers.h @@ -29,11 +29,19 @@ #pragma once +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" +#include "mongo/db/s/metrics/sharding_data_transform_metrics.h" #include "mongo/db/s/resharding/coordinator_document_gen.h" #include "mongo/db/s/resharding/donor_document_gen.h" #include "mongo/db/s/resharding/recipient_document_gen.h" -#include +#include "mongo/db/service_context.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/resharding_metrics_test.cpp b/src/mongo/db/s/resharding/resharding_metrics_test.cpp index 0d6f78bab7d72..841126f8c0dcb 100644 --- a/src/mongo/db/s/resharding/resharding_metrics_test.cpp +++ b/src/mongo/db/s/resharding/resharding_metrics_test.cpp @@ -28,14 +28,33 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/keypattern.h" #include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" +#include "mongo/db/s/metrics/sharding_data_transform_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h" +#include "mongo/db/s/resharding/donor_document_gen.h" #include "mongo/db/s/resharding/resharding_metrics.h" -#include "mongo/db/s/resharding/resharding_service_test_helpers.h" #include "mongo/db/s/resharding/resharding_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/shard_id.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source_mock.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -123,7 +142,7 @@ class ReshardingMetricsTest : public ShardingDataTransformMetricsTestFixture { kTestNamespace, getSourceCollectionId(), resharding::constructTemporaryReshardingNss( - kTestNamespace.db(), getSourceCollectionId()), + kTestNamespace.db_forTest(), getSourceCollectionId()), kShardKey}; metadata.setStartTime(getClockSource()->now() - kRunningTime); return metadata; @@ -133,7 +152,8 @@ class ReshardingMetricsTest : public ShardingDataTransformMetricsTestFixture { ASSERT_EQ(report.getStringField("type"), "op"); ASSERT_EQ(report.getStringField("op"), "command"); auto originalCommand = report.getObjectField("originatingCommand"); - ASSERT_EQ(originalCommand.getStringField("reshardCollection"), kTestNamespace.toString()); + ASSERT_EQ(originalCommand.getStringField("reshardCollection"), + kTestNamespace.toString_forTest()); ASSERT_EQ(originalCommand.getObjectField("key").woCompare(kShardKey), 0); ASSERT_EQ(originalCommand.getStringField("unique"), "false"); ASSERT_EQ(originalCommand.getObjectField("collation") @@ -305,6 +325,22 @@ TEST_F(ReshardingMetricsTest, RestoresFinishedApplyingTimeFromRecipientStateDocu "totalApplyTimeElapsedSecs"); } +TEST_F(ReshardingMetricsTest, RestoresOngoingBuildIndexTimeFromRecipientStateDocument) { + RAIIServerParameterControllerForTest controller("featureFlagReshardingImprovements", true); + doRestoreOngoingPhaseTest( + [this] { return createRecipientDocument(RecipientStateEnum::kBuildingIndex, UUID::gen()); }, + [this](auto& doc, auto interval) { doc.setIndexBuildTime(std::move(interval)); }, + "totalIndexBuildTimeElapsedSecs"); +} + +TEST_F(ReshardingMetricsTest, RestoresFinishedBuildIndexTimeFromRecipientStateDocument) { + RAIIServerParameterControllerForTest controller("featureFlagReshardingImprovements", true); + doRestoreCompletedPhaseTest( + [this] { return createRecipientDocument(RecipientStateEnum::kApplying, UUID::gen()); }, + [this](auto& doc, auto interval) { doc.setIndexBuildTime(std::move(interval)); }, + "totalIndexBuildTimeElapsedSecs"); +} + TEST_F(ReshardingMetricsTest, RestoresGeneralFieldsFromDonorStateDocument) { auto state = DonorStateEnum::kDonatingInitialData; auto opId = UUID::gen(); @@ -377,6 +413,7 @@ TEST_F(ReshardingMetricsTest, RestoresFinishedApplyingTimeFromCoordinatorStateDo "totalApplyTimeElapsedSecs"); } + TEST_F(ReshardingMetricsTest, OnInsertAppliedShouldIncrementInsertsApplied) { auto metrics = createInstanceMetrics(getClockSource(), UUID::gen(), Role::kRecipient); @@ -532,6 +569,20 @@ TEST_F(ReshardingMetricsTest, CurrentOpReportsCopyingTime) { }); } +TEST_F(ReshardingMetricsTest, CurrentOpReportsBuildIndexTime) { + RAIIServerParameterControllerForTest controller("featureFlagReshardingImprovements", true); + runTimeReportTest( + "CurrentOpReportsBuildIndexTime", + {Role::kRecipient}, + "totalIndexBuildTimeElapsedSecs", + [this](ReshardingMetrics* metrics) { + metrics->setStartFor(TimedPhase::kBuildingIndex, getClockSource()->now()); + }, + [this](ReshardingMetrics* metrics) { + metrics->setEndFor(TimedPhase::kBuildingIndex, getClockSource()->now()); + }); +} + TEST_F(ReshardingMetricsTest, CurrentOpReportsApplyingTime) { runTimeReportTest( "CurrentOpReportsApplyingTime", diff --git a/src/mongo/db/s/resharding/resharding_op_observer.cpp b/src/mongo/db/s/resharding/resharding_op_observer.cpp index 63ef981a2b2d1..7a8e175124603 100644 --- a/src/mongo/db/s/resharding/resharding_op_observer.cpp +++ b/src/mongo/db/s/resharding/resharding_op_observer.cpp @@ -28,16 +28,50 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/resharding/resharding_op_observer.h" - +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/primary_only_service.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/s/resharding/coordinator_document_gen.h" #include "mongo/db/s/resharding/donor_document_gen.h" +#include "mongo/db/s/resharding/resharding_coordinator_observer.h" #include "mongo/db/s/resharding/resharding_coordinator_service.h" +#include "mongo/db/s/resharding/resharding_op_observer.h" +#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -76,7 +110,7 @@ void assertCanExtractShardKeyFromDocs(OperationContext* opCtx, // A user can manually create a 'db.system.resharding.' collection that isn't guaranteed to be // sharded outside of running reshardCollection. uassert(ErrorCodes::NamespaceNotSharded, - str::stream() << "Temporary resharding collection " << nss.toString() + str::stream() << "Temporary resharding collection " << nss.toStringForErrorMsg() << " is not sharded", collDesc.isSharded()); @@ -184,7 +218,8 @@ void ReshardingOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { const auto& nss = coll->ns(); if (nss == NamespaceString::kDonorReshardingOperationsNamespace) { @@ -205,7 +240,9 @@ void ReshardingOpObserver::onInserts(OperationContext* opCtx, } } -void ReshardingOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) { +void ReshardingOpObserver::onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (args.coll->ns() == NamespaceString::kDonorReshardingOperationsNamespace) { // Primaries and secondaries should execute pinning logic when observing changes to the // donor resharding document. @@ -252,7 +289,8 @@ void ReshardingOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEn void ReshardingOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (coll->ns() == NamespaceString::kDonorReshardingOperationsNamespace) { _doPin(opCtx); } diff --git a/src/mongo/db/s/resharding/resharding_op_observer.h b/src/mongo/db/s/resharding/resharding_op_observer.h index 12a6d5d83f32b..21b1d3fe8bc95 100644 --- a/src/mongo/db/s/resharding/resharding_op_observer.h +++ b/src/mongo/db/s/resharding/resharding_op_observer.h @@ -29,10 +29,19 @@ #pragma once -#include "mongo/db/op_observer/op_observer.h" - #include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/storage/durable_history_pin.h" namespace mongo { @@ -53,7 +62,7 @@ class ReshardingHistoryHook : public DurableHistoryPin { * such as config.reshardingOperations, config.localReshardingOperations.donor, and * config.localReshardingOperations.recipient. */ -class ReshardingOpObserver final : public OpObserver { +class ReshardingOpObserver final : public OpObserverNoop { ReshardingOpObserver(const ReshardingOpObserver&) = delete; ReshardingOpObserver& operator=(const ReshardingOpObserver&) = delete; @@ -61,216 +70,27 @@ class ReshardingOpObserver final : public OpObserver { ReshardingOpObserver(); ~ReshardingOpObserver() override; - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) override {} - - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - void onCreateIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc, - bool fromMigrate) override {} - - void onStartIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) override {} - - void onStartIndexBuildSinglePhase(OperationContext* opCtx, - const NamespaceString& nss) override {} - - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, - const NamespaceString& nss) override {} - - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) override {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) override {} + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kConfigAndSystem, NamespaceFilter::kConfigAndSystem}; + } void onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) override; - - void onInsertGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final{}; - - void onDeleteGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final {} + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) override; - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) override; - - void aboutToDelete(OperationContext* opCtx, - const CollectionPtr& coll, - const BSONObj& doc) override {} + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) override; - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) override {} - - void onCreateCollection(OperationContext* opCtx, - const CollectionPtr& coll, - const NamespaceString& collectionName, - const CollectionOptions& options, - const BSONObj& idIndex, - const OplogSlot& createOpTime, - bool fromMigrate) override {} - - void onCollMod(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& collModCmd, - const CollectionOptions& oldCollOptions, - boost::optional indexInfo) override {} - - void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) override {} - - using OpObserver::onDropCollection; - repl::OpTime onDropCollection(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid, - std::uint64_t numRecords, - CollectionDropType dropType) override { - return repl::OpTime(); - } - - void onDropIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const std::string& indexName, - const BSONObj& indexInfo) override {} - - using OpObserver::onRenameCollection; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) override {} - - void onImportCollection(OperationContext* opCtx, - const UUID& importUUID, - const NamespaceString& nss, - long long numRecords, - long long dataSize, - const BSONObj& catalogEntry, - const BSONObj& storageMetadata, - bool isDryRun) override {} - - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) override { - return repl::OpTime(); - } - - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) override {} - - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) override {} - - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) override {} - - void onTransactionStart(OperationContext* opCtx) override {} - - void onUnpreparedTransactionCommit( - OperationContext* opCtx, const TransactionOperations& transactionOperations) override {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept override {} - - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) override { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) override {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) override {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) override {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} - - void onMajorityCommitPointUpdate(ServiceContext* service, - const repl::OpTime& newCommitPoint) override {} - -private: - void _onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) override {} + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override; }; } // namespace mongo diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.cpp b/src/mongo/db/s/resharding/resharding_oplog_application.cpp index 670dc1635214f..bc114d2ebd346 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_application.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_application.cpp @@ -29,28 +29,65 @@ #include "mongo/db/s/resharding/resharding_oplog_application.h" -#include "mongo/db/catalog/collection_write_path.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/index/index_access_method.h" #include "mongo/db/namespace_string.h" #include "mongo/db/ops/delete.h" #include "mongo/db/ops/delete_request_gen.h" #include "mongo/db/ops/parsed_delete.h" #include "mongo/db/ops/update.h" +#include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/update_result.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/query/get_executor.h" #include "mongo/db/query/plan_executor.h" -#include "mongo/db/repl/oplog_applier_utils.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/resharding/resharding_server_parameters_gen.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_cache.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/db/stats/counters.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction/transaction_operations.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/functional.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -67,10 +104,6 @@ void runWithTransaction(OperationContext* opCtx, unique_function func) { AlternativeSessionRegion asr(opCtx); auto* const client = asr.opCtx()->getClient(); - { - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); - } asr.opCtx()->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); AuthorizationSession::get(client)->grantInternalAuthorization(client); @@ -149,69 +182,22 @@ Status ReshardingOplogApplicationRules::applyOperation( invariant(!opCtx->lockState()->inAWriteUnitOfWork()); invariant(opCtx->writesAreReplicated()); - return writeConflictRetry(opCtx, "applyOplogEntryCRUDOpResharding", op.getNss().ns(), [&] { + return writeConflictRetry(opCtx, "applyOplogEntryCRUDOpResharding", op.getNss(), [&] { try { - WriteUnitOfWork wuow(opCtx); - - AutoGetCollection autoCollOutput( - opCtx, - _outputNss, - MODE_IX, - AutoGetCollection::Options{}.deadline(getDeadline(opCtx))); - uassert( - ErrorCodes::NamespaceNotFound, - str::stream() << "Failed to apply op during resharding due to missing collection " - << _outputNss.ns(), - autoCollOutput); - - AutoGetCollection autoCollStash( - opCtx, - _myStashNss, - MODE_IX, - AutoGetCollection::Options{}.deadline(getDeadline(opCtx))); - uassert( - ErrorCodes::NamespaceNotFound, - str::stream() << "Failed to apply op during resharding due to missing collection " - << _myStashNss.ns(), - autoCollStash); - auto opType = op.getOpType(); switch (opType) { case repl::OpTypeEnum::kInsert: - _applyInsert_inlock( - opCtx, autoCollOutput.getDb(), *autoCollOutput, *autoCollStash, op); - _applierMetrics->onInsertApplied(); - - break; case repl::OpTypeEnum::kUpdate: - _applyUpdate_inlock( - opCtx, autoCollOutput.getDb(), *autoCollOutput, *autoCollStash, op); - _applierMetrics->onUpdateApplied(); + _applyInsertOrUpdate(opCtx, sii, op); break; - case repl::OpTypeEnum::kDelete: - _applyDelete_inlock( - opCtx, autoCollOutput.getDb(), *autoCollOutput, *autoCollStash, sii, op); + case repl::OpTypeEnum::kDelete: { + _applyDelete(opCtx, sii, op); _applierMetrics->onDeleteApplied(); break; + } default: MONGO_UNREACHABLE; } - - if (opCtx->recoveryUnit()->isTimestamped()) { - // Resharding oplog application does two kinds of writes: - // - // 1) The (obvious) write for applying oplog entries to documents being resharded. - // 2) An unreplicated no-op write that on a document in the output collection to - // ensure serialization of concurrent transactions. - // - // Some of the code paths can end up where only the second kind of write is made. In - // that case, there is no timestamp associated with the write. This results in a - // mixed-mode update chain within WT that is problematic with durable history. We - // roll back those transactions by only committing the `WriteUnitOfWork` when there - // is a timestamp set. - wuow.commit(); - } - return Status::OK(); } catch (const DBException& ex) { if (ex.code() == ErrorCodes::WriteConflict) { @@ -227,10 +213,71 @@ Status ReshardingOplogApplicationRules::applyOperation( }); } +void ReshardingOplogApplicationRules::_applyInsertOrUpdate( + OperationContext* opCtx, + const boost::optional& sii, + const repl::OplogEntry& op) const { + + WriteUnitOfWork wuow(opCtx); + + auto outputColl = opCtx->runWithDeadline(getDeadline(opCtx), opCtx->getTimeoutError(), [&] { + return acquireCollection(opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, _outputNss, AcquisitionPrerequisites::kWrite), + MODE_IX); + }); + + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "Failed to apply op during resharding due to missing collection " + << _outputNss.toStringForErrorMsg(), + outputColl.exists()); + + auto stashColl = opCtx->runWithDeadline(getDeadline(opCtx), opCtx->getTimeoutError(), [&] { + return acquireCollection(opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, _myStashNss, AcquisitionPrerequisites::kWrite), + MODE_IX); + }); + + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "Failed to apply op during resharding due to missing collection " + << _myStashNss.toStringForErrorMsg(), + stashColl.exists()); + + auto opType = op.getOpType(); + switch (opType) { + case repl::OpTypeEnum::kInsert: + _applyInsert_inlock(opCtx, outputColl, stashColl, op); + _applierMetrics->onInsertApplied(); + break; + case repl::OpTypeEnum::kUpdate: + _applyUpdate_inlock(opCtx, outputColl, stashColl, op); + _applierMetrics->onUpdateApplied(); + break; + default: + MONGO_UNREACHABLE; + } + + if (opCtx->recoveryUnit()->isTimestamped()) { + // Resharding oplog application does two kinds of writes: + // + // 1) The (obvious) write for applying oplog entries to documents being resharded. + // 2) A find on document in the output collection transformed into an unreplicated no-op + // write on the same document to ensure serialization of concurrent oplog appliers reading + // on the same doc. + // + // Some of the code paths can end up where only the second kind of write is made. In + // that case, there is no timestamp associated with the write. This results in a + // mixed-mode update chain within WT that is problematic with durable history. We + // roll back those transactions by only committing the `WriteUnitOfWork` when there + // is a timestamp set. + wuow.commit(); + } +} + void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCtx, - Database* db, - const CollectionPtr& outputColl, - const CollectionPtr& stashColl, + CollectionAcquisition& outputColl, + CollectionAcquisition& stashColl, const repl::OplogEntry& op) const { /** * The rules to apply ordinary insert operations are as follows: @@ -263,7 +310,7 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt // First, query the conflict stash collection using [op _id] as the query. If a doc exists, // apply rule #1 and run a replacement update on the stash collection. - auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery); + auto stashCollDoc = _queryStashCollById(opCtx, stashColl.getCollectionPtr(), idQuery); if (!stashCollDoc.isEmpty()) { auto request = UpdateRequest(); request.setNamespaceString(_myStashNss); @@ -272,7 +319,7 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt request.setUpsert(false); request.setFromOplogApplication(true); - UpdateResult ur = update(opCtx, db, request); + UpdateResult ur = update(opCtx, stashColl, request); invariant(ur.numMatched != 0); _applierMetrics->onWriteToStashCollections(); @@ -283,14 +330,11 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt // Query the output collection for a doc with _id == [op _id]. If a doc does not exist, apply // rule #2 and insert this doc into the output collection. BSONObj outputCollDoc; - auto foundDoc = Helpers::findByIdAndNoopUpdate(opCtx, outputColl, idQuery, outputCollDoc); + auto foundDoc = Helpers::findByIdAndNoopUpdate( + opCtx, outputColl.getCollectionPtr(), idQuery, outputCollDoc); if (!foundDoc) { - uassertStatusOK(collection_internal::insertDocument(opCtx, - outputColl, - InsertStatement(oField), - nullptr /* OpDebug */, - false /* fromMigrate */)); + uassertStatusOK(Helpers::insert(opCtx, outputColl, oField)); return; } @@ -310,7 +354,7 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt request.setUpsert(false); request.setFromOplogApplication(true); - UpdateResult ur = update(opCtx, db, request); + UpdateResult ur = update(opCtx, outputColl, request); invariant(ur.numMatched != 0); return; @@ -318,16 +362,14 @@ void ReshardingOplogApplicationRules::_applyInsert_inlock(OperationContext* opCt // The doc does not belong to '_donorShardId' under the original shard key, so apply rule #4 // and insert the contents of 'op' to the stash collection. - uassertStatusOK(collection_internal::insertDocument( - opCtx, stashColl, InsertStatement(oField), nullptr /* OpDebug */, false /* fromMigrate */)); + uassertStatusOK(Helpers::insert(opCtx, stashColl, oField)); _applierMetrics->onWriteToStashCollections(); } void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCtx, - Database* db, - const CollectionPtr& outputColl, - const CollectionPtr& stashColl, + CollectionAcquisition& outputColl, + CollectionAcquisition& stashColl, const repl::OplogEntry& op) const { /** * The rules to apply ordinary update operations are as follows: @@ -362,7 +404,7 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt // First, query the conflict stash collection using [op _id] as the query. If a doc exists, // apply rule #1 and update the doc from the stash collection. - auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery); + auto stashCollDoc = _queryStashCollById(opCtx, stashColl.getCollectionPtr(), idQuery); if (!stashCollDoc.isEmpty()) { auto request = UpdateRequest(); request.setNamespaceString(_myStashNss); @@ -370,7 +412,7 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt request.setUpdateModification(std::move(updateMod)); request.setUpsert(false); request.setFromOplogApplication(true); - UpdateResult ur = update(opCtx, db, request); + UpdateResult ur = update(opCtx, stashColl, request); invariant(ur.numMatched != 0); @@ -381,7 +423,8 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt // Query the output collection for a doc with _id == [op _id]. BSONObj outputCollDoc; - auto foundDoc = Helpers::findByIdAndNoopUpdate(opCtx, outputColl, idQuery, outputCollDoc); + auto foundDoc = Helpers::findByIdAndNoopUpdate( + opCtx, outputColl.getCollectionPtr(), idQuery, outputCollDoc); if (!foundDoc || !_sourceChunkMgr.keyBelongsToShard( @@ -403,16 +446,13 @@ void ReshardingOplogApplicationRules::_applyUpdate_inlock(OperationContext* opCt request.setUpdateModification(std::move(updateMod)); request.setUpsert(false); request.setFromOplogApplication(true); - UpdateResult ur = update(opCtx, db, request); + UpdateResult ur = update(opCtx, outputColl, request); invariant(ur.numMatched != 0); } -void ReshardingOplogApplicationRules::_applyDelete_inlock( +void ReshardingOplogApplicationRules::_applyDelete( OperationContext* opCtx, - Database* db, - const CollectionPtr& outputColl, - const CollectionPtr& stashColl, const boost::optional& sii, const repl::OplogEntry& op) const { /** @@ -443,17 +483,36 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock( BSONObj idQuery = idField.wrap(); const NamespaceString outputNss = op.getNss(); + { + // First, query the conflict stash collection using [op _id] as the query. If a doc exists, + // apply rule #1 and delete the doc from the stash collection. + WriteUnitOfWork wuow(opCtx); + + const auto stashColl = + opCtx->runWithDeadline(getDeadline(opCtx), opCtx->getTimeoutError(), [&] { + return acquireCollection(opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, _myStashNss, AcquisitionPrerequisites::kWrite), + MODE_IX); + }); - // First, query the conflict stash collection using [op _id] as the query. If a doc exists, - // apply rule #1 and delete the doc from the stash collection. - auto stashCollDoc = _queryStashCollById(opCtx, stashColl, idQuery); - if (!stashCollDoc.isEmpty()) { - auto nDeleted = deleteObjects(opCtx, stashColl, _myStashNss, idQuery, true /* justOne */); - invariant(nDeleted != 0); + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "Failed to apply op during resharding due to missing collection " + << _myStashNss.toStringForErrorMsg(), + stashColl.exists()); - _applierMetrics->onWriteToStashCollections(); + auto stashCollDoc = _queryStashCollById(opCtx, stashColl.getCollectionPtr(), idQuery); + if (!stashCollDoc.isEmpty()) { + auto nDeleted = deleteObjects(opCtx, stashColl, idQuery, true /* justOne */); + invariant(nDeleted != 0); - return; + _applierMetrics->onWriteToStashCollections(); + + invariant(opCtx->recoveryUnit()->isTimestamped()); + wuow.commit(); + + return; + } } // Now run 'findByIdAndNoopUpdate' to figure out which of rules #2, #3, and #4 we must apply. @@ -461,17 +520,24 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock( // single replica set transaction that is executed if we apply rule #4, so we therefore must run // 'findByIdAndNoopUpdate' as a part of the single replica set transaction. runWithTransaction(opCtx, _outputNss, sii, [this, idQuery](OperationContext* opCtx) { - AutoGetCollection autoCollOutput( - opCtx, _outputNss, MODE_IX, AutoGetCollection::Options{}.deadline(getDeadline(opCtx))); + const auto outputColl = + opCtx->runWithDeadline(getDeadline(opCtx), opCtx->getTimeoutError(), [&] { + return acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, _outputNss, AcquisitionPrerequisites::OperationType::kWrite), + MODE_IX); + }); + uassert(ErrorCodes::NamespaceNotFound, str::stream() << "Failed to apply op during resharding due to missing collection " - << _outputNss.ns(), - autoCollOutput); + << _outputNss.toStringForErrorMsg(), + outputColl.exists()); // Query the output collection for a doc with _id == [op _id]. BSONObj outputCollDoc; - auto foundDoc = - Helpers::findByIdAndNoopUpdate(opCtx, *autoCollOutput, idQuery, outputCollDoc); + auto foundDoc = Helpers::findByIdAndNoopUpdate( + opCtx, outputColl.getCollectionPtr(), idQuery, outputCollDoc); if (!foundDoc || !_sourceChunkMgr.keyBelongsToShard( @@ -493,8 +559,7 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock( // 3. Insert the doc just deleted into the output collection // Delete from the output collection - auto nDeleted = - deleteObjects(opCtx, *autoCollOutput, _outputNss, idQuery, true /* justOne */); + auto nDeleted = deleteObjects(opCtx, outputColl, idQuery, true /* justOne */); invariant(nDeleted != 0); // Attempt to delete a doc from one of the stash collections. Once we've matched a doc in @@ -507,13 +572,20 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock( continue; } - AutoGetCollection autoCollStash( - opCtx, coll, MODE_IX, AutoGetCollection::Options{}.deadline(getDeadline(opCtx))); + const auto stashColl = + opCtx->runWithDeadline(getDeadline(opCtx), opCtx->getTimeoutError(), [&] { + return acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx( + opCtx, coll, AcquisitionPrerequisites::OperationType::kWrite), + MODE_IX); + }); + uassert( ErrorCodes::NamespaceNotFound, str::stream() << "Failed to apply op during resharding due to missing collection " - << coll.ns(), - autoCollStash); + << coll.toStringForErrorMsg(), + stashColl.exists()); auto request = DeleteRequest{}; request.setNsString(coll); @@ -521,11 +593,11 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock( request.setMulti(false); request.setReturnDeleted(true); - ParsedDelete parsedDelete(opCtx, &request); + ParsedDelete parsedDelete(opCtx, &request, stashColl.getCollectionPtr()); uassertStatusOK(parsedDelete.parseRequest()); auto exec = uassertStatusOK(getExecutorDelete(&CurOp::get(opCtx)->debug(), - &(*autoCollStash), + stashColl, &parsedDelete, boost::none /* verbosity */)); BSONObj res; @@ -546,11 +618,7 @@ void ReshardingOplogApplicationRules::_applyDelete_inlock( // Insert the doc we just deleted from one of the stash collections into the output // collection. if (!doc.isEmpty()) { - uassertStatusOK(collection_internal::insertDocument(opCtx, - *autoCollOutput, - InsertStatement(doc), - nullptr /* OpDebug */, - false /* fromMigrate */)); + uassertStatusOK(Helpers::insert(opCtx, outputColl, doc)); } }); } @@ -560,7 +628,8 @@ BSONObj ReshardingOplogApplicationRules::_queryStashCollById(OperationContext* o const BSONObj& idQuery) const { const IndexCatalog* indexCatalog = coll->getIndexCatalog(); uassert(4990100, - str::stream() << "Missing _id index for collection " << _myStashNss.ns(), + str::stream() << "Missing _id index for collection " + << _myStashNss.toStringForErrorMsg(), indexCatalog->haveIdIndex(opCtx)); BSONObj result; diff --git a/src/mongo/db/s/resharding/resharding_oplog_application.h b/src/mongo/db/s/resharding/resharding_oplog_application.h index 6bd35d92f6d77..92168082526e0 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_application.h +++ b/src/mongo/db/s/resharding/resharding_oplog_application.h @@ -29,6 +29,8 @@ #pragma once +#include +#include #include #include #include @@ -36,12 +38,17 @@ #include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/db_raii.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/resharding/resharding_oplog_applier_metrics.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/shard_role.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/sharding_index_catalog_cache.h" @@ -76,27 +83,26 @@ class ReshardingOplogApplicationRules { const repl::OplogEntry& op) const; private: + // Applies an insert or update operation + void _applyInsertOrUpdate(OperationContext* opCtx, + const boost::optional& gii, + const repl::OplogEntry& op) const; // Applies an insert operation void _applyInsert_inlock(OperationContext* opCtx, - Database* db, - const CollectionPtr& outputColl, - const CollectionPtr& stashColl, + CollectionAcquisition& outputColl, + CollectionAcquisition& stashColl, const repl::OplogEntry& op) const; // Applies an update operation void _applyUpdate_inlock(OperationContext* opCtx, - Database* db, - const CollectionPtr& outputColl, - const CollectionPtr& stashColl, + CollectionAcquisition& outputColl, + CollectionAcquisition& stashColl, const repl::OplogEntry& op) const; // Applies a delete operation - void _applyDelete_inlock(OperationContext* opCtx, - Database* db, - const CollectionPtr& outputColl, - const CollectionPtr& stashColl, - const boost::optional& gii, - const repl::OplogEntry& op) const; + void _applyDelete(OperationContext* opCtx, + const boost::optional& gii, + const repl::OplogEntry& op) const; // Queries '_stashNss' using 'idQuery'. BSONObj _queryStashCollById(OperationContext* opCtx, diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp index b81e262d0a3a6..def6e0d945ef7 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_applier.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_applier.cpp @@ -28,19 +28,46 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/resharding/resharding_oplog_applier.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/s/resharding/donor_oplog_id_gen.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" #include "mongo/db/s/resharding/resharding_donor_oplog_iterator.h" #include "mongo/db/s/resharding/resharding_future_util.h" +#include "mongo/db/s/resharding/resharding_oplog_applier.h" #include "mongo/db/s/resharding/resharding_util.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/future_util.h" +#include "mongo/util/timer.h" #include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -197,6 +224,12 @@ SemiFuture ReshardingOplogApplier::run( auto client = cc().getServiceContext()->makeClient("ReshardingOplogApplierCleanupClient"); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } + AlternativeClientRegion acr(client); auto opCtx = cc().makeOperationContext(); diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier.h b/src/mongo/db/s/resharding/resharding_oplog_applier.h index f1df65219ccc1..1d18bb7fdfaa5 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_applier.h +++ b/src/mongo/db/s/resharding/resharding_oplog_applier.h @@ -29,22 +29,34 @@ #pragma once +#include +#include #include #include +#include #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/oplog_entry.h" #include "mongo/db/s/resharding/donor_oplog_id_gen.h" #include "mongo/db/s/resharding/resharding_donor_oplog_iterator.h" #include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/s/resharding/resharding_oplog_application.h" +#include "mongo/db/s/resharding/resharding_oplog_applier_metrics.h" #include "mongo/db/s/resharding/resharding_oplog_applier_progress_gen.h" #include "mongo/db/s/resharding/resharding_oplog_batch_applier.h" #include "mongo/db/s/resharding/resharding_oplog_batch_preparer.h" #include "mongo/db/s/resharding/resharding_oplog_session_application.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/executor/task_executor.h" #include "mongo/s/chunk_manager.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/util/cancellation.h" #include "mongo/util/future.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.cpp index a67f279055e87..dd95c7f2373e5 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.cpp @@ -27,10 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/s/resharding/resharding_oplog_applier_metrics.h" +#include + namespace mongo { ReshardingOplogApplierMetrics::ReshardingOplogApplierMetrics( diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.h b/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.h index 5b0a6d5f8c95d..5c6f6d5f6ab36 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.h +++ b/src/mongo/db/s/resharding/resharding_oplog_applier_metrics.h @@ -29,8 +29,13 @@ #pragma once +#include + +#include + #include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/s/resharding/resharding_oplog_applier_progress_gen.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_metrics_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_metrics_test.cpp index 0d09b53d3e9e5..d4913f0d3e446 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_applier_metrics_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_applier_metrics_test.cpp @@ -28,11 +28,22 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" +#include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" #include "mongo/db/s/metrics/sharding_data_transform_metrics_test_fixture.h" +#include "mongo/db/s/resharding/resharding_cumulative_metrics.h" #include "mongo/db/s/resharding/resharding_oplog_applier_metrics.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source_mock.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp index 330084774b3ea..71b2dbe10d558 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_applier_test.cpp @@ -27,38 +27,90 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" +#include "mongo/db/auth/authorization_session.h" #include "mongo/db/cancelable_operation_context.h" #include "mongo/db/catalog/create_collection.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/repl/oplog_applier.h" -#include "mongo/db/repl/session_update_tracker.h" -#include "mongo/db/repl/storage_interface_impl.h" -#include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" #include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/resharding/donor_oplog_id_gen.h" #include "mongo/db/s/resharding/resharding_donor_oplog_iterator.h" +#include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/s/resharding/resharding_oplog_applier.h" -#include "mongo/db/s/resharding/resharding_server_parameters_gen.h" -#include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/sharding_mongod_test_fixture.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/update_oplog_entry_serialization.h" #include "mongo/db/vector_clock_metadata_hook.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_factory.h" -#include "mongo/executor/thread_pool_task_executor_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" +#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_mock.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/catalog_cache_loader.h" #include "mongo/s/catalog_cache_loader_mock.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/str.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -262,7 +314,7 @@ class ReshardingOplogApplierTest : public ShardingMongodTestFixture { void loadCatalogCacheValues() { _mockCatalogCacheLoader->setDatabaseRefreshReturnValue( - DatabaseType(kAppliedToNs.db().toString(), _cm->dbPrimary(), _cm->dbVersion())); + DatabaseType(kAppliedToNs.db_forTest().toString(), _cm->dbPrimary(), _cm->dbVersion())); std::vector chunks; _cm->forEachChunk([&](const auto& chunk) { chunks.emplace_back( @@ -375,11 +427,6 @@ class ReshardingOplogApplierTest : public ShardingMongodTestFixture { Client::initThread(threadName.c_str()); auto* client = Client::getCurrent(); AuthorizationSession::get(*client)->grantInternalAuthorization(client); - - { - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); - } }; auto hookList = std::make_unique(); @@ -831,11 +878,11 @@ TEST_F(ReshardingOplogApplierTest, UnsupportedCommandOpsShouldError) { repl::OpTypeEnum::kInsert, BSON("_id" << 1), boost::none)); - ops.push_back( - makeOplog(repl::OpTime(Timestamp(6, 3), 1), - repl::OpTypeEnum::kCommand, - BSON("renameCollection" << appliedToNs().ns() << "to" << stashNs().ns()), - boost::none)); + ops.push_back(makeOplog( + repl::OpTime(Timestamp(6, 3), 1), + repl::OpTypeEnum::kCommand, + BSON("renameCollection" << appliedToNs().ns_forTest() << "to" << stashNs().ns_forTest()), + boost::none)); ops.push_back(makeOplog(repl::OpTime(Timestamp(7, 3), 1), repl::OpTypeEnum::kInsert, BSON("_id" << 2), @@ -875,7 +922,7 @@ TEST_F(ReshardingOplogApplierTest, DropSourceCollectionCmdShouldError) { std::deque ops; ops.push_back(makeOplog(repl::OpTime(Timestamp(5, 3), 1), repl::OpTypeEnum::kCommand, - BSON("drop" << appliedToNs().ns()), + BSON("drop" << appliedToNs().ns_forTest()), boost::none)); auto iterator = std::make_unique(std::move(ops), 1 /* batchSize */); diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_applier.cpp b/src/mongo/db/s/resharding/resharding_oplog_batch_applier.cpp index 085114a2dde46..671f4f8ff9bc7 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_batch_applier.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_batch_applier.cpp @@ -28,20 +28,38 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/resharding/resharding_oplog_batch_applier.h" - +#include #include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/db/client.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" #include "mongo/db/s/resharding/resharding_future_util.h" #include "mongo/db/s/resharding/resharding_oplog_application.h" +#include "mongo/db/s/resharding/resharding_oplog_batch_applier.h" #include "mongo/db/s/resharding/resharding_oplog_session_application.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/functional.h" +#include "mongo/util/future_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_applier.h b/src/mongo/db/s/resharding/resharding_oplog_batch_applier.h index c5c7681c2497e..7f73e7805a0ec 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_batch_applier.h +++ b/src/mongo/db/s/resharding/resharding_oplog_batch_applier.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/db/cancelable_operation_context.h" #include "mongo/db/s/resharding/resharding_oplog_batch_preparer.h" #include "mongo/executor/task_executor.h" @@ -38,6 +40,7 @@ namespace mongo { class ReshardingOplogApplicationRules; + class ReshardingOplogSessionApplication; /** diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp index 35d9161f7e882..dd5a324e3aff6 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_batch_applier_test.cpp @@ -27,40 +27,105 @@ * it in the license file. */ +#include +#include +#include +#include #include +#include +#include +#include #include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/collection_options.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/commands/txn_cmds_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_impl.h" +#include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" -#include "mongo/db/s/op_observer_sharding_impl.h" +#include "mongo/db/s/metrics/sharding_data_transform_metrics.h" +#include "mongo/db/s/migration_chunk_cloner_source_op_observer.h" +#include "mongo/db/s/resharding/donor_oplog_id_gen.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" #include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/s/resharding/resharding_oplog_application.h" +#include "mongo/db/s/resharding/resharding_oplog_applier_metrics.h" #include "mongo/db/s/resharding/resharding_oplog_batch_applier.h" #include "mongo/db/s/resharding/resharding_oplog_session_application.h" #include "mongo/db/s/resharding/resharding_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/vector_clock_metadata_hook.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/idl/server_parameter_test_util.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/rpc/metadata/metadata_hook.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/chunk_manager.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/clock_source.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -97,14 +162,16 @@ class ReshardingOplogBatchApplierTest : public ServiceContextMongoDTest { LogicalSessionCache::set(serviceContext, std::make_unique()); - // OpObserverShardingImpl is required for timestamping the writes from + // OpObserverImpl is required for timestamping the writes from // ReshardingOplogApplicationRules. auto opObserverRegistry = dynamic_cast(serviceContext->getOpObserver()); invariant(opObserverRegistry); opObserverRegistry->addObserver( - std::make_unique(std::make_unique())); + std::make_unique(std::make_unique())); + opObserverRegistry->addObserver( + std::make_unique()); } { @@ -156,11 +223,6 @@ class ReshardingOplogBatchApplierTest : public ServiceContextMongoDTest { Client::initThread(threadName.c_str()); auto* client = Client::getCurrent(); AuthorizationSession::get(*client)->grantInternalAuthorization(client); - - { - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); - } }; auto hookList = std::make_unique(); @@ -354,14 +416,15 @@ class ReshardingOplogBatchApplierTest : public ServiceContextMongoDTest { const StringData _currentShardKey = "sk"; - const NamespaceString _sourceNss{"test_crud", "collection_being_resharded"}; + const NamespaceString _sourceNss = + NamespaceString::createNamespaceString_forTest("test_crud", "collection_being_resharded"); const UUID _sourceUUID = UUID::gen(); const ShardId _myDonorId{"myDonorId"}; const ShardId _otherDonorId{"otherDonorId"}; const NamespaceString _outputNss = - resharding::constructTemporaryReshardingNss(_sourceNss.db(), _sourceUUID); + resharding::constructTemporaryReshardingNss(_sourceNss.db_forTest(), _sourceUUID); const NamespaceString _myStashNss = resharding::getLocalConflictStashNamespace(_sourceUUID, _myDonorId); const NamespaceString _otherStashNss = diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_preparer.cpp b/src/mongo/db/s/resharding/resharding_oplog_batch_preparer.cpp index 806f2ce60905f..1e177a95ca108 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_batch_preparer.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_batch_preparer.cpp @@ -27,20 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/s/resharding/resharding_oplog_batch_preparer.h" -#include +#include +#include + +#include +#include +#include +#include +#include +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonelement_comparator.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/ops/write_ops_retryability.h" #include "mongo/db/query/collation/collator_interface.h" -#include "mongo/db/repl/apply_ops.h" +#include "mongo/db/repl/apply_ops_command_info.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/s/resharding/resharding_server_parameters_gen.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" +#include "mongo/util/murmur3.h" #include "mongo/util/str.h" namespace mongo { @@ -280,8 +293,9 @@ void ReshardingOplogBatchPreparer::_appendCrudOpToWriterVector(const OplogEntry* const size_t idHash = elementHasher.hash(op->getIdElement()); - uint32_t hash = 0; - MurmurHash3_x86_32(&idHash, sizeof(idHash), hash, &hash); + // View 'idHash' as an array of 8 bytes. + ConstDataRange dataRange{reinterpret_cast(&idHash), sizeof(idHash)}; + uint32_t hash = murmur3(dataRange, 0 /*seed*/); _appendOpToWriterVector(hash, op, writerVectors); } diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_preparer.h b/src/mongo/db/s/resharding/resharding_oplog_batch_preparer.h index 27e7934b32695..3d13de1e823d2 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_batch_preparer.h +++ b/src/mongo/db/s/resharding/resharding_oplog_batch_preparer.h @@ -35,6 +35,7 @@ #include #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/session/logical_session_id_gen.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/resharding_oplog_batch_preparer_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_batch_preparer_test.cpp index 2b42c344a2e6b..09516661a7931 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_batch_preparer_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_batch_preparer_test.cpp @@ -27,15 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/ops/write_ops_retryability.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/apply_ops_gen.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/s/resharding/resharding_oplog_batch_preparer.h" -#include "mongo/db/s/resharding/resharding_server_parameters_gen.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp index c4ddf75548954..c111d8d105cce 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_crud_application_test.cpp @@ -27,33 +27,97 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/crypto/encryption_fields_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_impl.h" +#include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/internal_plans.h" -#include "mongo/db/repl/apply_ops.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/repl/apply_ops_command_info.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" -#include "mongo/db/s/op_observer_sharding_impl.h" +#include "mongo/db/s/metrics/sharding_data_transform_metrics.h" +#include "mongo/db/s/migration_chunk_cloner_source_op_observer.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" #include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/s/resharding/resharding_oplog_application.h" +#include "mongo/db/s/resharding/resharding_oplog_applier_metrics.h" #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" +#include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/idl/idl_parser.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/chunk_manager.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -90,14 +154,16 @@ class ReshardingOplogCrudApplicationTest : public ServiceContextMongoDTest { mongoDSessionCatalog->onStepUp(opCtx.get()); LogicalSessionCache::set(serviceContext, std::make_unique()); - // OpObserverShardingImpl is required for timestamping the writes from + // OpObserverImpl is required for timestamping the writes from // ReshardingOplogApplicationRules. auto opObserverRegistry = dynamic_cast(serviceContext->getOpObserver()); invariant(opObserverRegistry); opObserverRegistry->addObserver( - std::make_unique(std::make_unique())); + std::make_unique(std::make_unique())); + opObserverRegistry->addObserver( + std::make_unique()); } { @@ -200,7 +266,8 @@ class ReshardingOplogCrudApplicationTest : public ServiceContextMongoDTest { const NamespaceString& nss, const std::vector& documents) { AutoGetCollection coll(opCtx, nss, MODE_IS); - ASSERT_TRUE(bool(coll)) << "Collection '" << nss << "' does not exist"; + ASSERT_TRUE(bool(coll)) << "Collection '" << nss.toStringForErrorMsg() + << "' does not exist"; auto exec = InternalPlanner::indexScan(opCtx, &*coll, @@ -216,13 +283,15 @@ class ReshardingOplogCrudApplicationTest : public ServiceContextMongoDTest { BSONObj obj; while (exec->getNext(&obj, nullptr) == PlanExecutor::ADVANCED) { ASSERT_LT(i, documents.size()) - << "Found extra document in collection: " << nss << ": " << obj; + << "Found extra document in collection: " << nss.toStringForErrorMsg() << ": " + << obj; ASSERT_BSONOBJ_BINARY_EQ(obj, documents[i]); ++i; } if (i < documents.size()) { - FAIL("Didn't find document in collection: ") << nss << ": " << documents[i]; + FAIL("Didn't find document in collection: ") + << nss.toStringForErrorMsg() << ": " << documents[i]; } } @@ -332,14 +401,15 @@ class ReshardingOplogCrudApplicationTest : public ServiceContextMongoDTest { const StringData _currentShardKey = "sk"; const StringData _newShardKey = "new_sk"; - const NamespaceString _sourceNss{"test_crud", "collection_being_resharded"}; + const NamespaceString _sourceNss = + NamespaceString::createNamespaceString_forTest("test_crud", "collection_being_resharded"); const UUID _sourceUUID = UUID::gen(); const ShardId _myDonorId{"myDonorId"}; const ShardId _otherDonorId{"otherDonorId"}; const NamespaceString _outputNss = - resharding::constructTemporaryReshardingNss(_sourceNss.db(), _sourceUUID); + resharding::constructTemporaryReshardingNss(_sourceNss.db_forTest(), _sourceUUID); const NamespaceString _myStashNss = resharding::getLocalConflictStashNamespace(_sourceUUID, _myDonorId); const NamespaceString _otherStashNss = diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp b/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp index 97a9788ef629f..f83270513eb65 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher.cpp @@ -29,27 +29,79 @@ #include "mongo/db/s/resharding/resharding_oplog_fetcher.h" +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/client/dbclient_connection.h" -#include "mongo/client/remote_command_targeter.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" -#include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/dbhelpers.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/read_concern_level.h" -#include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" #include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/s/resharding/resharding_util.h" +#include "mongo/db/shard_role.h" #include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/db/write_concern_options.h" #include "mongo/executor/task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" -#include "mongo/stdx/mutex.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/string_map.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -164,6 +216,13 @@ ExecutorFuture ReshardingOplogFetcher::_reschedule( _reshardingUUID.toString(), _donorShard.toString()), _service()); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } + return iterate(client.get(), factory); }) .then([executor, cancelToken](bool moreToCome) { @@ -235,7 +294,7 @@ void ReshardingOplogFetcher::_ensureCollection(Client* client, invariant(!opCtx->lockState()->inAWriteUnitOfWork()); // Create the destination collection if necessary. - writeConflictRetry(opCtx, "createReshardingLocalOplogBuffer", nss.toString(), [&] { + writeConflictRetry(opCtx, "createReshardingLocalOplogBuffer", nss, [&] { const Collection* coll = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss); if (coll) { @@ -328,13 +387,27 @@ bool ReshardingOplogFetcher::consume(Client* client, auto opCtxRaii = factory.makeOperationContext(client.get()); auto opCtx = opCtxRaii.get(); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } + // Noting some possible optimizations: // // * Batch more inserts into larger storage transactions. // * Parallize writing documents across multiple threads. // * Doing either of the above while still using the underlying message buffer of bson // objects. - AutoGetCollection toWriteTo(opCtx, _toWriteInto, LockMode::MODE_IX); + const auto toWriteTo = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + _toWriteInto, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + for (const BSONObj& doc : batch) { WriteUnitOfWork wuow(opCtx); auto nextOplog = uassertStatusOK(repl::OplogEntry::parse(doc)); @@ -343,8 +416,7 @@ bool ReshardingOplogFetcher::consume(Client* client, ReshardingDonorOplogId::parse(IDLParserContext{"OplogFetcherParsing"}, nextOplog.get_id()->getDocument().toBson()); Timer insertTimer; - uassertStatusOK(collection_internal::insertDocument( - opCtx, *toWriteTo, InsertStatement{doc}, nullptr)); + uassertStatusOK(Helpers::insert(opCtx, toWriteTo, doc)); wuow.commit(); _env->metrics()->onLocalInsertDuringOplogFetching( @@ -389,8 +461,7 @@ bool ReshardingOplogFetcher::consume(Client* client, oplog.setOpTime(OplogSlot()); oplog.setWallClockTime(opCtx->getServiceContext()->getFastClockSource()->now()); - uassertStatusOK(collection_internal::insertDocument( - opCtx, *toWriteTo, InsertStatement{oplog.toBSON()}, nullptr)); + uassertStatusOK(Helpers::insert(opCtx, toWriteTo, oplog.toBSON())); wuow.commit(); // Also include synthetic oplog in the fetched count so it can match up with the diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher.h b/src/mongo/db/s/resharding/resharding_oplog_fetcher.h index 5e2e5d351ef2e..fabd02237e1ab 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_fetcher.h +++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher.h @@ -28,23 +28,30 @@ */ #pragma once +#include #include +#include +#include #include "mongo/base/status_with.h" #include "mongo/client/dbclient_base.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/client.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/s/resharding/donor_oplog_id_gen.h" #include "mongo/db/s/resharding/resharding_donor_oplog_iterator.h" #include "mongo/db/service_context.h" #include "mongo/db/shard_id.h" +#include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/s/client/shard.h" #include "mongo/util/background.h" #include "mongo/util/cancellation.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/time_support.h" #include "mongo/util/uuid.h" diff --git a/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp index 455bad8a52676..b1fc89153e868 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_fetcher_test.cpp @@ -27,34 +27,86 @@ * it in the license file. */ -#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/op_observer/op_observer_impl.h" -#include "mongo/db/pipeline/document_source_mock.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/s/resharding/resharding_oplog_fetcher.h" #include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/write_unit_of_work.h" -#include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" +#include "mongo/db/tenant_id.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_mock.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -98,7 +150,7 @@ class ReshardingOplogFetcherTest : public ShardServerTestFixture { _metrics = ReshardingMetrics::makeInstance(_reshardingUUID, BSON("y" << 1), - NamespaceString{""}, + NamespaceString(), ReshardingMetrics::Role::kRecipient, getServiceContext()->getFastClockSource()->now(), getServiceContext()); @@ -115,10 +167,6 @@ class ReshardingOplogFetcherTest : public ShardServerTestFixture { // onStepUp() relies on the storage interface to create the config.transactions table. repl::StorageInterface::set(getServiceContext(), std::make_unique()); - MongoDSessionCatalog::set( - getServiceContext(), - std::make_unique( - std::make_unique())); auto mongoDSessionCatalog = MongoDSessionCatalog::get(operationContext()); mongoDSessionCatalog->onStepUp(operationContext()); LogicalSessionCache::set(getServiceContext(), std::make_unique()); @@ -217,7 +265,7 @@ class ReshardingOplogFetcherTest : public ShardServerTestFixture { } void create(NamespaceString nss) { - writeConflictRetry(_opCtx, "create", nss.ns(), [&] { + writeConflictRetry(_opCtx, "create", nss, [&] { AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(_opCtx->lockState()); AutoGetDb autoDb(_opCtx, nss.dbName(), LockMode::MODE_X); WriteUnitOfWork wunit(_opCtx); @@ -228,7 +276,7 @@ class ReshardingOplogFetcherTest : public ShardServerTestFixture { OperationShardingState::ScopedAllowImplicitCollectionCreate_UNSAFE unsafeCreateCollection(_opCtx); auto db = autoDb.ensureDbExists(_opCtx); - ASSERT(db->createCollection(_opCtx, nss)) << nss; + ASSERT(db->createCollection(_opCtx, nss)) << nss.toStringForErrorMsg(); wunit.commit(); }); } @@ -248,7 +296,10 @@ class ReshardingOplogFetcherTest : public ShardServerTestFixture { onCommand([&](const executor::RemoteCommandRequest& request) -> StatusWith { DBDirectClient client(cc().getOperationContext()); BSONObj result; - bool res = client.runCommand({boost::none, request.dbname}, request.cmdObj, result); + bool res = client.runCommand( + DatabaseName::createDatabaseName_forTest(boost::none, request.dbname), + request.cmdObj, + result); if (res == false || result.hasField("cursorsKilled") || result["cursor"]["id"].Long() == 0) { hasMore = false; @@ -301,7 +352,7 @@ class ReshardingOplogFetcherTest : public ShardServerTestFixture { dataColl.getCollection()->uuid(), BSON( "msg" << fmt::format("Writes to {} are temporarily blocked for resharding.", - dataColl.getCollection()->ns().toString())), + dataColl.getCollection()->ns().toString_forTest())), BSON("type" << resharding::kReshardFinalOpLogType << "reshardingUUID" << _reshardingUUID), boost::none, @@ -551,7 +602,8 @@ TEST_F(ReshardingOplogFetcherTest, TestStartAtUpdatedWithProgressMarkOplogTs) { NamespaceString::createNamespaceString_forTest("dbtests.outputCollection"); const NamespaceString dataCollectionNss = NamespaceString::createNamespaceString_forTest("dbtests.runFetchIteration"); - const NamespaceString otherCollection("dbtests.collectionNotBeingResharded"); + const NamespaceString otherCollection = + NamespaceString::createNamespaceString_forTest("dbtests.collectionNotBeingResharded"); create(outputCollectionNss); create(dataCollectionNss); diff --git a/src/mongo/db/s/resharding/resharding_oplog_session_application.cpp b/src/mongo/db/s/resharding/resharding_oplog_session_application.cpp index 8471b6586bf34..78d10839f36d6 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_session_application.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_session_application.cpp @@ -27,19 +27,41 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/resharding/resharding_oplog_session_application.h" - +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" +#include "mongo/db/s/resharding/resharding_oplog_session_application.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/str.h" namespace mongo { @@ -83,7 +105,7 @@ boost::optional ReshardingOplogSessionApplication::_logPrePostImag return writeConflictRetry( opCtx, "ReshardingOplogSessionApplication::_logPrePostImage", - NamespaceString::kRsOplogNamespace.ns(), + NamespaceString::kRsOplogNamespace, [&] { AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); diff --git a/src/mongo/db/s/resharding/resharding_oplog_session_application.h b/src/mongo/db/s/resharding/resharding_oplog_session_application.h index a0e0b2b9791f0..e7c7b6aebd266 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_session_application.h +++ b/src/mongo/db/s/resharding/resharding_oplog_session_application.h @@ -30,7 +30,9 @@ #pragma once #include +#include +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/namespace_string.h" #include "mongo/db/repl/optime.h" #include "mongo/db/s/resharding/donor_oplog_id_gen.h" diff --git a/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp b/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp index c2d732c7da8fd..27b79bb2c7d29 100644 --- a/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp +++ b/src/mongo/db/s/resharding/resharding_oplog_session_application_test.cpp @@ -28,28 +28,69 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include #include -#include "mongo/db/catalog_raii.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/commands/txn_cmds_gen.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/document_value/document.h" -#include "mongo/db/op_observer/op_observer_noop.h" -#include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/session_update_tracker.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/s/resharding/donor_oplog_id_gen.h" #include "mongo/db/s/resharding/resharding_oplog_session_application.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/session/logical_session_id.h" -#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -57,24 +98,6 @@ namespace mongo { namespace { -/** - * OpObserver for OplogApplierImpl test fixture. - */ -class ReshardingOplogSessionApplicationOpObserver : public OpObserverNoop { -public: - /** - * Called when OplogApplierImpl prepares a multi-doc transaction using the - * TransactionParticipant. - */ - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) override { - return std::make_unique(/*prepare=*/false); - } -}; - class ReshardingOplogSessionApplicationTest : public ServiceContextMongoDTest { public: void setUp() override { @@ -100,13 +123,6 @@ class ReshardingOplogSessionApplicationTest : public ServiceContextMongoDTest { mongoDSessionCatalog->onStepUp(opCtx.get()); } - // Set up an OpObserver to ensure that preparing a multi-doc transaction has - // a valid description for mapping transaction operations to applyOps entries. - auto opObserverRegistry = - dynamic_cast(serviceContext->getOpObserver()); - opObserverRegistry->addObserver( - std::make_unique()); - serverGlobalParams.clusterRole = ClusterRole::ShardServer; } diff --git a/src/mongo/db/s/resharding/resharding_recipient_service.cpp b/src/mongo/db/s/resharding/resharding_recipient_service.cpp index 1a75eb2831f8a..98ec66c44addb 100644 --- a/src/mongo/db/s/resharding/resharding_recipient_service.cpp +++ b/src/mongo/db/s/resharding/resharding_recipient_service.cpp @@ -29,45 +29,91 @@ #include "mongo/db/s/resharding/resharding_recipient_service.h" +#include #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/cancelable_operation_context.h" -#include "mongo/db/catalog/rename_collection.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/commit_quorum_options.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/keypattern.h" #include "mongo/db/ops/delete.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/collation/collation_spec.h" #include "mongo/db/repl/change_stream_oplog_notification.h" -#include "mongo/db/repl/oplog_applier.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/repl_index_build_state.h" #include "mongo/db/s/migration_destination_manager.h" +#include "mongo/db/s/resharding/coordinator_document_gen.h" #include "mongo/db/s/resharding/resharding_change_event_o2_field_gen.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" #include "mongo/db/s/resharding/resharding_future_util.h" #include "mongo/db/s/resharding/resharding_metrics_helpers.h" -#include "mongo/db/s/resharding/resharding_oplog_applier.h" +#include "mongo/db/s/resharding/resharding_oplog_applier_metrics.h" +#include "mongo/db/s/resharding/resharding_oplog_applier_progress_gen.h" #include "mongo/db/s/resharding/resharding_recipient_service_external_state.h" #include "mongo/db/s/resharding/resharding_server_parameters_gen.h" +#include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/shard_key_util.h" -#include "mongo/db/s/sharding_ddl_util.h" #include "mongo/db/s/sharding_index_catalog_ddl_util.h" #include "mongo/db/s/sharding_recovery_service.h" -#include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" #include "mongo/db/write_block_bypass.h" -#include "mongo/executor/network_interface_factory.h" -#include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/s/catalog/sharding_catalog_client.h" -#include "mongo/s/cluster_commands_helpers.h" -#include "mongo/s/grid.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" #include "mongo/s/sharding_feature_flags_gen.h" -#include "mongo/s/stale_shard_version_helpers.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" #include "mongo/util/future_util.h" -#include "mongo/util/optional_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -79,6 +125,7 @@ MONGO_FAIL_POINT_DEFINE(reshardingPauseRecipientDuringCloning); MONGO_FAIL_POINT_DEFINE(reshardingPauseRecipientDuringOplogApplication); MONGO_FAIL_POINT_DEFINE(reshardingOpCtxKilledWhileRestoringMetrics); MONGO_FAIL_POINT_DEFINE(reshardingRecipientFailsAfterTransitionToCloning); +MONGO_FAIL_POINT_DEFINE(reshardingPauseRecipientBeforeBuildingIndex); namespace { @@ -125,11 +172,27 @@ void buildStateDocumentCloneMetricsForUpdate(BSONObjBuilder& bob, Date_t timesta timestamp); } +void buildStateDocumentBuildingIndexMetricsForUpdate(BSONObjBuilder& bob, Date_t timestamp) { + bob.append(getIntervalEndFieldName(ReshardingRecipientMetrics::kDocumentCopyFieldName), + timestamp); + bob.append( + getIntervalStartFieldName(ReshardingRecipientMetrics::kIndexBuildTimeFieldName), + timestamp); +} + void buildStateDocumentApplyMetricsForUpdate(BSONObjBuilder& bob, ReshardingMetrics* metrics, Date_t timestamp) { - bob.append(getIntervalEndFieldName(ReshardingRecipientMetrics::kDocumentCopyFieldName), - timestamp); + if (resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + bob.append( + getIntervalEndFieldName(ReshardingRecipientMetrics::kIndexBuildTimeFieldName), + timestamp); + } else { + bob.append( + getIntervalEndFieldName(ReshardingRecipientMetrics::kDocumentCopyFieldName), + timestamp); + } bob.append( getIntervalStartFieldName(ReshardingRecipientMetrics::kOplogApplicationFieldName), timestamp); @@ -154,6 +217,9 @@ void buildStateDocumentMetricsForUpdate(BSONObjBuilder& bob, case RecipientStateEnum::kCloning: buildStateDocumentCloneMetricsForUpdate(bob, timestamp); return; + case RecipientStateEnum::kBuildingIndex: + buildStateDocumentBuildingIndexMetricsForUpdate(bob, timestamp); + return; case RecipientStateEnum::kApplying: buildStateDocumentApplyMetricsForUpdate(bob, metrics, timestamp); return; @@ -172,8 +238,17 @@ void setMeticsAfterWrite(ReshardingMetrics* metrics, case RecipientStateEnum::kCloning: metrics->setStartFor(ReshardingMetrics::TimedPhase::kCloning, timestamp); return; - case RecipientStateEnum::kApplying: + case RecipientStateEnum::kBuildingIndex: metrics->setEndFor(ReshardingMetrics::TimedPhase::kCloning, timestamp); + metrics->setStartFor(ReshardingMetrics::TimedPhase::kBuildingIndex, timestamp); + return; + case RecipientStateEnum::kApplying: + if (resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + metrics->setEndFor(ReshardingMetrics::TimedPhase::kBuildingIndex, timestamp); + } else { + metrics->setEndFor(ReshardingMetrics::TimedPhase::kCloning, timestamp); + } metrics->setStartFor(ReshardingMetrics::TimedPhase::kApplying, timestamp); return; case RecipientStateEnum::kStrictConsistency: @@ -229,7 +304,8 @@ ReshardingRecipientService::RecipientStateMachine::RecipientStateMachine( _dataReplicationFactory{std::move(dataReplicationFactory)}, _critSecReason(BSON("command" << "resharding_recipient" - << "collection" << _metadata.getSourceNss().toString())), + << "collection" + << NamespaceStringUtil::serialize(_metadata.getSourceNss()))), _isAlsoDonor([&]() { auto myShardId = _externalState->myShardId(_serviceContext); return std::find_if(_donorShards.begin(), @@ -258,7 +334,10 @@ ReshardingRecipientService::RecipientStateMachine::_runUntilStrictConsistencyOrE _createTemporaryReshardingCollectionThenTransitionToCloning(factory); }) .then([this, executor, abortToken, &factory] { - return _cloneThenTransitionToApplying(executor, abortToken, factory); + return _cloneThenTransitionToBuildingIndex(executor, abortToken, factory); + }) + .then([this, executor, abortToken, &factory] { + return _buildIndexThenTransitionToApplying(executor, abortToken, factory); }) .then([this, executor, abortToken, &factory] { return _awaitAllDonorsBlockingWritesThenTransitionToStrictConsistency( @@ -532,6 +611,9 @@ void ReshardingRecipientService::RecipientStateMachine::interrupt(Status status) boost::optional ReshardingRecipientService::RecipientStateMachine::reportForCurrentOp( MongoProcessInterface::CurrentOpConnectionsMode, MongoProcessInterface::CurrentOpSessionsMode) noexcept { + if (_recipientCtx.getState() == RecipientStateEnum::kBuildingIndex) { + _fetchBuildIndexMetrics(); + } return _metrics->reportForCurrentOp(); } @@ -596,24 +678,53 @@ void ReshardingRecipientService::RecipientStateMachine:: _externalState->ensureTempReshardingCollectionExistsWithIndexes( opCtx.get(), _metadata, *_cloneTimestamp); - _externalState->withShardVersionRetry( - opCtx.get(), - _metadata.getTempReshardingNss(), - "validating shard key index for reshardCollection"_sd, - [&] { - shardkeyutil::validateShardKeyIsNotEncrypted( - opCtx.get(), - _metadata.getTempReshardingNss(), - ShardKeyPattern(_metadata.getReshardingKey())); - shardkeyutil::validateShardKeyIndexExistsOrCreateIfPossible( - opCtx.get(), - _metadata.getTempReshardingNss(), - ShardKeyPattern{_metadata.getReshardingKey()}, - CollationSpec::kSimpleSpec, - false /* unique */, - true /* enforceUniquenessCheck */, - shardkeyutil::ValidationBehaviorsShardCollection(opCtx.get())); - }); + if (resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + _externalState->withShardVersionRetry( + opCtx.get(), + _metadata.getSourceNss(), + "validating shard key index for reshardCollection"_sd, + [&] { + shardkeyutil::validateShardKeyIsNotEncrypted( + opCtx.get(), + _metadata.getSourceNss(), + ShardKeyPattern(_metadata.getReshardingKey())); + // This behavior in this phase is only used to validate whether this resharding + // should be permitted, we need to call + // validateShardKeyIndexExistsOrCreateIfPossible again in the buildIndex phase + // to make sure we have the indexSpecs even after restart. + shardkeyutil::ValidationBehaviorsReshardingBulkIndex behaviors; + behaviors.setOpCtxAndCloneTimestamp(opCtx.get(), *_cloneTimestamp); + shardkeyutil::validateShardKeyIndexExistsOrCreateIfPossible( + opCtx.get(), + _metadata.getSourceNss(), + ShardKeyPattern{_metadata.getReshardingKey()}, + CollationSpec::kSimpleSpec, + false /* unique */, + true /* enforceUniquenessCheck */, + behaviors); + }); + } else { + _externalState->withShardVersionRetry( + opCtx.get(), + _metadata.getTempReshardingNss(), + "validating shard key index for reshardCollection"_sd, + [&] { + shardkeyutil::validateShardKeyIsNotEncrypted( + opCtx.get(), + _metadata.getTempReshardingNss(), + ShardKeyPattern(_metadata.getReshardingKey())); + + shardkeyutil::validateShardKeyIndexExistsOrCreateIfPossible( + opCtx.get(), + _metadata.getTempReshardingNss(), + ShardKeyPattern{_metadata.getReshardingKey()}, + CollationSpec::kSimpleSpec, + false /* unique */, + true /* enforceUniquenessCheck */, + shardkeyutil::ValidationBehaviorsShardCollection(opCtx.get())); + }); + } // We add a fake 'shardCollection' notification here so that the C2C replicator can sync the // resharding operation to the target cluster. The only information we have is the shard @@ -699,7 +810,7 @@ void ReshardingRecipientService::RecipientStateMachine::_ensureDataReplicationSt } ExecutorFuture -ReshardingRecipientService::RecipientStateMachine::_cloneThenTransitionToApplying( +ReshardingRecipientService::RecipientStateMachine::_cloneThenTransitionToBuildingIndex( const std::shared_ptr& executor, const CancellationToken& abortToken, const CancelableOperationContextFactory& factory) { @@ -729,7 +840,92 @@ ReshardingRecipientService::RecipientStateMachine::_cloneThenTransitionToApplyin return future_util::withCancellation(_dataReplication->awaitCloningDone(), abortToken) .thenRunOn(**executor) - .then([this, &factory] { _transitionToApplying(factory); }); + .then([this, &factory] { + if (resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + _transitionToBuildingIndex(factory); + } else { + _transitionToApplying(factory); + } + }); +} + +ExecutorFuture +ReshardingRecipientService::RecipientStateMachine::_buildIndexThenTransitionToApplying( + const std::shared_ptr& executor, + const CancellationToken& abortToken, + const CancelableOperationContextFactory& factory) { + if (_recipientCtx.getState() > RecipientStateEnum::kBuildingIndex) { + return ExecutorFuture(**executor); + } + + { + auto opCtx = factory.makeOperationContext(&cc()); + reshardingPauseRecipientBeforeBuildingIndex.pauseWhileSet(opCtx.get()); + } + + return future_util::withCancellation( + [this, &factory] { + auto opCtx = factory.makeOperationContext(&cc()); + // We call validateShardKeyIndexExistsOrCreateIfPossible again here in case if we + // restarted after creatingCollection phase, whatever indexSpec we get in that + // phase will go away. + shardkeyutil::ValidationBehaviorsReshardingBulkIndex behaviors; + behaviors.setOpCtxAndCloneTimestamp(opCtx.get(), *_cloneTimestamp); + shardkeyutil::validateShardKeyIndexExistsOrCreateIfPossible( + opCtx.get(), + _metadata.getSourceNss(), + ShardKeyPattern{_metadata.getReshardingKey()}, + CollationSpec::kSimpleSpec, + false /* unique */, + true /* enforceUniquenessCheck */, + behaviors); + + // Get all indexSpecs need to build. + auto* indexBuildsCoordinator = IndexBuildsCoordinator::get(opCtx.get()); + auto [indexSpecs, _] = _externalState->getCollectionIndexes( + opCtx.get(), + _metadata.getSourceNss(), + _metadata.getSourceUUID(), + *_cloneTimestamp, + "loading indexes to create indexes on temporary resharding collection"_sd); + auto shardKeyIndexSpec = behaviors.getShardKeyIndexSpec(); + if (shardKeyIndexSpec) { + indexSpecs.push_back(*shardKeyIndexSpec); + } + // Build all the indexes. + auto buildUUID = UUID::gen(); + IndexBuildsCoordinator::IndexBuildOptions indexBuildOptions{ + CommitQuorumOptions(CommitQuorumOptions::kVotingMembers)}; + auto indexBuildFuture = indexBuildsCoordinator->startIndexBuild( + opCtx.get(), + _metadata.getTempReshardingNss().dbName(), + // When we create the collection we use the metadata resharding UUID as the + // collection UUID. + _metadata.getReshardingUUID(), + indexSpecs, + buildUUID, + IndexBuildProtocol::kTwoPhase, + indexBuildOptions); + if (indexBuildFuture.isOK()) { + return indexBuildFuture.getValue(); + } else if (indexBuildFuture == ErrorCodes::IndexBuildAlreadyInProgress || + indexBuildFuture == ErrorCodes::IndexAlreadyExists) { + // In case of failover, the index build could have been started by oplog + // applier, so we just wait those finish. + indexBuildsCoordinator->awaitNoIndexBuildInProgressForCollection( + opCtx.get(), _metadata.getReshardingUUID()); + return SharedSemiFuture( + ReplIndexBuildState::IndexCatalogStats()); + } else { + return uassertStatusOK(indexBuildFuture); + } + }(), + abortToken) + .thenRunOn(**executor) + .then([this, &factory](const ReplIndexBuildState::IndexCatalogStats& stats) { + _transitionToApplying(factory); + }); } ExecutorFuture ReshardingRecipientService::RecipientStateMachine:: @@ -809,7 +1005,7 @@ void ReshardingRecipientService::RecipientStateMachine::_writeStrictConsistencyO auto oplog = generateOplogEntry(); writeConflictRetry( - rawOpCtx, "ReshardDoneCatchUpOplog", NamespaceString::kRsOplogNamespace.ns(), [&] { + rawOpCtx, "ReshardDoneCatchUpOplog", NamespaceString::kRsOplogNamespace, [&] { AutoGetOplog oplogWrite(rawOpCtx, OplogAccessMode::kWrite); WriteUnitOfWork wunit(rawOpCtx); const auto& oplogOpTime = repl::logOp(rawOpCtx, &oplog); @@ -915,6 +1111,13 @@ void ReshardingRecipientService::RecipientStateMachine::_transitionToCloning( _transitionState(std::move(newRecipientCtx), boost::none, boost::none, factory); } +void ReshardingRecipientService::RecipientStateMachine::_transitionToBuildingIndex( + const CancelableOperationContextFactory& factory) { + auto newRecipientCtx = _recipientCtx; + newRecipientCtx.setState(RecipientStateEnum::kBuildingIndex); + _transitionState(std::move(newRecipientCtx), boost::none, boost::none, factory); +} + void ReshardingRecipientService::RecipientStateMachine::_transitionToApplying( const CancelableOperationContextFactory& factory) { auto newRecipientCtx = _recipientCtx; @@ -1119,30 +1322,34 @@ void ReshardingRecipientService::RecipientStateMachine::_removeRecipientDocument auto opCtx = factory.makeOperationContext(&cc()); const auto& nss = NamespaceString::kRecipientReshardingOperationsNamespace; - writeConflictRetry( - opCtx.get(), "RecipientStateMachine::_removeRecipientDocument", nss.toString(), [&] { - AutoGetCollection coll(opCtx.get(), nss, MODE_IX); + writeConflictRetry(opCtx.get(), "RecipientStateMachine::_removeRecipientDocument", nss, [&] { + const auto coll = acquireCollection( + opCtx.get(), + CollectionAcquisitionRequest(nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx.get()), + AcquisitionPrerequisites::kWrite), + MODE_IX); - if (!coll) { - return; - } + if (!coll.exists()) { + return; + } - WriteUnitOfWork wuow(opCtx.get()); + WriteUnitOfWork wuow(opCtx.get()); - opCtx->recoveryUnit()->onCommit([this](OperationContext*, boost::optional) { - stdx::lock_guard lk(_mutex); - _completionPromise.emplaceValue(); - }); + opCtx->recoveryUnit()->onCommit([this](OperationContext*, boost::optional) { + stdx::lock_guard lk(_mutex); + _completionPromise.emplaceValue(); + }); - deleteObjects(opCtx.get(), - *coll, - nss, - BSON(ReshardingRecipientDocument::kReshardingUUIDFieldName - << _metadata.getReshardingUUID()), - true /* justOne */); + deleteObjects(opCtx.get(), + coll, + BSON(ReshardingRecipientDocument::kReshardingUUIDFieldName + << _metadata.getReshardingUUID()), + true /* justOne */); - wuow.commit(); - }); + wuow.commit(); + }); } ExecutorFuture ReshardingRecipientService::RecipientStateMachine::_startMetrics( @@ -1184,8 +1391,8 @@ void ReshardingRecipientService::RecipientStateMachine::_restoreMetrics( if (_recipientCtx.getState() != RecipientStateEnum::kCloning) { // Before cloning, these values are 0. After cloning these values are written to the // metrics section of the recipient state document and restored during metrics - // initialization. This is so that applied oplog entries that add or remove documents do - // not affect the cloning metrics. + // initialization. This is so that applied oplog entries that add or remove + // documents do not affect the cloning metrics. return; } externalMetrics.documentBytesCopied = tempReshardingColl->dataSize(opCtx.get()); @@ -1279,6 +1486,20 @@ CancellationToken ReshardingRecipientService::RecipientStateMachine::_initAbortS return _abortSource->token(); } +void ReshardingRecipientService::RecipientStateMachine::_fetchBuildIndexMetrics() { + auto opCtx = cc().getOperationContext(); + if (!opCtx) { + opCtx = cc().makeOperationContext().get(); + } + AutoGetCollection tempReshardingColl(opCtx, _metadata.getTempReshardingNss(), MODE_IS); + auto indexCatalog = tempReshardingColl->getIndexCatalog(); + invariant(indexCatalog, + str::stream() << "Collection is missing index catalog: " + << _metadata.getTempReshardingNss().toStringForErrorMsg()); + _metrics->setIndexesToBuild(indexCatalog->numIndexesTotal()); + _metrics->setIndexesBuilt(indexCatalog->numIndexesReady()); +} + void ReshardingRecipientService::RecipientStateMachine::abort(bool isUserCancelled) { auto abortSource = [&]() -> boost::optional { stdx::lock_guard lk(_mutex); diff --git a/src/mongo/db/s/resharding/resharding_recipient_service.h b/src/mongo/db/s/resharding/resharding_recipient_service.h index 8ccf3938ebfcd..ae27dd68bf930 100644 --- a/src/mongo/db/s/resharding/resharding_recipient_service.h +++ b/src/mongo/db/s/resharding/resharding_recipient_service.h @@ -29,6 +29,23 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/s/resharding/recipient_document_gen.h" #include "mongo/db/s/resharding/resharding_data_replication.h" @@ -36,9 +53,20 @@ #include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/s/resharding/resharding_oplog_applier_metrics.h" #include "mongo/db/s/resharding/resharding_util.h" +#include "mongo/db/s/shard_key_util.h" #include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/s/resharding/common_types_gen.h" #include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/cancellation.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -213,7 +241,12 @@ class ReshardingRecipientService::RecipientStateMachine final void _createTemporaryReshardingCollectionThenTransitionToCloning( const CancelableOperationContextFactory& factory); - ExecutorFuture _cloneThenTransitionToApplying( + ExecutorFuture _cloneThenTransitionToBuildingIndex( + const std::shared_ptr& executor, + const CancellationToken& abortToken, + const CancelableOperationContextFactory& factory); + + ExecutorFuture _buildIndexThenTransitionToApplying( const std::shared_ptr& executor, const CancellationToken& abortToken, const CancelableOperationContextFactory& factory); @@ -246,6 +279,8 @@ class ReshardingRecipientService::RecipientStateMachine final void _transitionToCloning(const CancelableOperationContextFactory& factory); + void _transitionToBuildingIndex(const CancelableOperationContextFactory& factory); + void _transitionToApplying(const CancelableOperationContextFactory& factory); void _transitionToStrictConsistency(const CancelableOperationContextFactory& factory); @@ -293,6 +328,10 @@ class ReshardingRecipientService::RecipientStateMachine final // Should only be called once per lifetime. CancellationToken _initAbortSource(const CancellationToken& stepdownToken); + // Get indexesToBuild and indexesBuilt from the index catalog, then save them in _metrics + void _fetchBuildIndexMetrics(); + + // The primary-only service instance corresponding to the recipient instance. Not owned. const ReshardingRecipientService* const _recipientService; diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp index 644c5688dd68d..42ab44e9bb539 100644 --- a/src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp +++ b/src/mongo/db/s/resharding/resharding_recipient_service_external_state.cpp @@ -29,18 +29,39 @@ #include "mongo/db/s/resharding/resharding_recipient_service_external_state.h" +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/resharding/resharding_donor_recipient_common.h" #include "mongo/db/s/sharding_index_catalog_ddl_util.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" #include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/grid.h" +#include "mongo/s/index_version.h" #include "mongo/s/resharding/common_types_gen.h" +#include "mongo/s/resharding/resharding_feature_flag_gen.h" #include "mongo/s/sharding_feature_flags_gen.h" #include "mongo/s/stale_shard_version_helpers.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -82,13 +103,18 @@ void ReshardingRecipientService::RecipientStateMachineExternalState:: // Set the temporary resharding collection's UUID to the resharding UUID. Note that // BSONObj::addFields() replaces any fields that already exist. collOptions = collOptions.addFields(BSON("uuid" << metadata.getReshardingUUID())); + CollectionOptionsAndIndexes collOptionsAndIndexes{metadata.getReshardingUUID(), + std::move(indexes), + std::move(idIndex), + std::move(collOptions)}; + if (resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + // The indexSpecs are cleared here so we don't create those indexes when creating temp + // collections. These indexes will be fetched and built during building-index stage. + collOptionsAndIndexes.indexSpecs = {}; + } MigrationDestinationManager::cloneCollectionIndexesAndOptions( - opCtx, - metadata.getTempReshardingNss(), - CollectionOptionsAndIndexes{metadata.getReshardingUUID(), - std::move(indexes), - std::move(idIndex), - std::move(collOptions)}); + opCtx, metadata.getTempReshardingNss(), collOptionsAndIndexes); if (feature_flags::gGlobalIndexesShardingCatalog.isEnabled( serverGlobalParams.featureCompatibility)) { diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_external_state.h b/src/mongo/db/s/resharding/resharding_recipient_service_external_state.h index dbba30a6d688d..8d17bb9f68086 100644 --- a/src/mongo/db/s/resharding/resharding_recipient_service_external_state.h +++ b/src/mongo/db/s/resharding/resharding_recipient_service_external_state.h @@ -29,15 +29,24 @@ #pragma once -#include "mongo/util/functional.h" +#include #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection_options.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/migration_destination_manager.h" #include "mongo/db/s/resharding/resharding_recipient_service.h" +#include "mongo/db/service_context.h" #include "mongo/db/shard_id.h" +#include "mongo/s/catalog_cache.h" #include "mongo/s/chunk_manager.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/util/functional.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp index cf01d4db95328..f4781294b2e29 100644 --- a/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp +++ b/src/mongo/db/s/resharding/resharding_recipient_service_external_state_test.cpp @@ -27,25 +27,57 @@ * it in the license file. */ +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/s/collection_sharding_runtime.h" -#include "mongo/db/s/resharding/resharding_oplog_applier_progress_gen.h" #include "mongo/db/s/resharding/resharding_recipient_service_external_state.h" -#include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" -#include "mongo/logv2/log.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog_cache_test_fixture.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/stale_exception.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -106,7 +138,7 @@ class RecipientServiceExternalStateTest : public CatalogCacheTestFixture, const HostAndPort& expectedHost) { onCommand([&](const executor::RemoteCommandRequest& request) { ASSERT_EQ(request.cmdObj.firstElementFieldName(), "listCollections"_sd); - ASSERT_EQUALS(nss.db(), request.dbname); + ASSERT_EQUALS(nss.db_forTest(), request.dbname); ASSERT_EQUALS(expectedHost, request.target); ASSERT_BSONOBJ_EQ(request.cmdObj["filter"].Obj(), BSON("info.uuid" << uuid)); ASSERT(request.cmdObj.hasField("databaseVersion")); @@ -115,7 +147,8 @@ class RecipientServiceExternalStateTest : public CatalogCacheTestFixture, << "local" << "afterClusterTime" << kDefaultFetchTimestamp)); - std::string listCollectionsNs = str::stream() << nss.db() << "$cmd.listCollections"; + std::string listCollectionsNs = str::stream() + << nss.db_forTest() << "$cmd.listCollections"; return BSON("ok" << 1 << "cursor" << BSON("id" << 0LL << "ns" << listCollectionsNs << "firstBatch" << collectionsDocs)); @@ -128,7 +161,7 @@ class RecipientServiceExternalStateTest : public CatalogCacheTestFixture, const HostAndPort& expectedHost) { onCommand([&](const executor::RemoteCommandRequest& request) { ASSERT_EQ(request.cmdObj.firstElementFieldName(), "listIndexes"_sd); - ASSERT_EQUALS(nss.db(), request.dbname); + ASSERT_EQUALS(nss.db_forTest(), request.dbname); ASSERT_EQUALS(expectedHost, request.target); ASSERT_EQ(unittest::assertGet(UUID::parse(request.cmdObj.firstElement())), uuid); ASSERT(request.cmdObj.hasField("shardVersion")); @@ -137,8 +170,9 @@ class RecipientServiceExternalStateTest : public CatalogCacheTestFixture, << "local" << "afterClusterTime" << kDefaultFetchTimestamp)); - return BSON("ok" << 1 << "cursor" - << BSON("id" << 0LL << "ns" << nss.ns() << "firstBatch" << indexDocs)); + return BSON( + "ok" << 1 << "cursor" + << BSON("id" << 0LL << "ns" << nss.ns_forTest() << "firstBatch" << indexDocs)); }); } @@ -218,7 +252,7 @@ class RecipientServiceExternalStateTest : public CatalogCacheTestFixture, onCommand([&](const executor::RemoteCommandRequest& request) { ASSERT_EQ(request.cmdObj.firstElementFieldNameStringData(), expectedCmdName); return createErrorCursorResponse( - Status(StaleDbRoutingVersion(nss.db().toString(), + Status(StaleDbRoutingVersion(nss.db_forTest().toString(), DatabaseVersion(UUID::gen(), Timestamp(1, 1)), boost::none), "dummy stale db version error")); @@ -263,7 +297,7 @@ class RecipientServiceExternalStateTest : public CatalogCacheTestFixture, RecipientStateMachineExternalStateImpl externalState; externalState.ensureTempReshardingCollectionExistsWithIndexes( operationContext(), kMetadata, kDefaultFetchTimestamp); - CollectionShardingRuntime csr(getServiceContext(), kOrigNss, executor()); + CollectionShardingRuntime csr(getServiceContext(), kOrigNss); ASSERT(csr.getCurrentMetadataIfKnown() == boost::none); } }; diff --git a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp index 2ee479d5b9b06..28d4c117d4ec9 100644 --- a/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp +++ b/src/mongo/db/s/resharding/resharding_recipient_service_test.cpp @@ -28,18 +28,45 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/primary_only_service_test_fixture.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" +#include "mongo/db/s/migration_destination_manager.h" +#include "mongo/db/s/resharding/donor_oplog_id_gen.h" #include "mongo/db/s/resharding/resharding_change_event_o2_field_gen.h" #include "mongo/db/s/resharding/resharding_data_copy_util.h" #include "mongo/db/s/resharding/resharding_data_replication.h" @@ -47,11 +74,31 @@ #include "mongo/db/s/resharding/resharding_recipient_service.h" #include "mongo/db/s/resharding/resharding_recipient_service_external_state.h" #include "mongo/db/s/resharding/resharding_service_test_helpers.h" +#include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/service_context.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/executor/task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/fail_point.h" +#include "mongo/util/functional.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -269,7 +316,7 @@ class ReshardingRecipientServiceTest : public repl::PrimaryOnlyServiceMongoDTest UUID::gen(), sourceNss, sourceUUID, - resharding::constructTemporaryReshardingNss(sourceNss.db(), sourceUUID), + resharding::constructTemporaryReshardingNss(sourceNss.db_forTest(), sourceUUID), newShardKeyPattern()); commonMetadata.setStartTime(getServiceContext()->getFastClockSource()->now()); @@ -700,8 +747,8 @@ TEST_F(ReshardingRecipientServiceTest, WritesNoopOplogEntryOnReshardDoneCatchUp) resharding::constructTemporaryReshardingNss("sourcedb", doc.getSourceUUID()); FindCommandRequest findRequest{NamespaceString::kRsOplogNamespace}; - findRequest.setFilter( - BSON("ns" << sourceNss.toString() << "o2.reshardDoneCatchUp" << BSON("$exists" << true))); + findRequest.setFilter(BSON("ns" << sourceNss.toString_forTest() << "o2.reshardDoneCatchUp" + << BSON("$exists" << true))); auto cursor = client.find(std::move(findRequest)); ASSERT_TRUE(cursor->more()) << "Found no oplog entries for source collection"; @@ -746,8 +793,8 @@ TEST_F(ReshardingRecipientServiceTest, WritesNoopOplogEntryForImplicitShardColle resharding::constructTemporaryReshardingNss("sourcedb", doc.getSourceUUID()); FindCommandRequest findRequest{NamespaceString::kRsOplogNamespace}; - findRequest.setFilter( - BSON("ns" << sourceNss.toString() << "o2.shardCollection" << BSON("$exists" << true))); + findRequest.setFilter(BSON("ns" << sourceNss.toString_forTest() << "o2.shardCollection" + << BSON("$exists" << true))); auto cursor = client.find(std::move(findRequest)); ASSERT_TRUE(cursor->more()) << "Found no oplog entries for source collection"; @@ -762,8 +809,8 @@ TEST_F(ReshardingRecipientServiceTest, WritesNoopOplogEntryForImplicitShardColle << shardCollectionOp.getEntry(); ASSERT_FALSE(shardCollectionOp.getFromMigrate()); - auto shardCollEventExpected = - BSON("shardCollection" << sourceNss.toString() << "shardKey" << newShardKeyPattern()); + auto shardCollEventExpected = BSON("shardCollection" << sourceNss.toString_forTest() + << "shardKey" << newShardKeyPattern()); ASSERT_BSONOBJ_EQ(*shardCollectionOp.getObject2(), shardCollEventExpected); } diff --git a/src/mongo/db/s/resharding/resharding_server_parameters.idl b/src/mongo/db/s/resharding/resharding_server_parameters.idl index ea5703cf34d1f..d4069741a369e 100644 --- a/src/mongo/db/s/resharding/resharding_server_parameters.idl +++ b/src/mongo/db/s/resharding/resharding_server_parameters.idl @@ -175,3 +175,17 @@ server_parameters: expr: 2 * 1000 validator: gte: 0 + + reshardingCoordinatorQuiescePeriodMillis: + description: >- + Controls the amount of time a resharding operation will remain in a quiesced state on + the coordinator, after it completes or aborts, to allow a user to reconnect to it and + retrieve the result. This applies only to resharding operations where the user + provided a reshardingUUID; if no UUID is provided there is no quiesce period. + set_at: startup + cpp_vartype: int + cpp_varname: gReshardingCoordinatorQuiescePeriodMillis + default: + expr: 15 * 60 * 1000 + validator: + gte: 0 diff --git a/src/mongo/db/s/resharding/resharding_service_test_helpers.h b/src/mongo/db/s/resharding/resharding_service_test_helpers.h index e2409bd38780a..9d2e3e51665ec 100644 --- a/src/mongo/db/s/resharding/resharding_service_test_helpers.h +++ b/src/mongo/db/s/resharding/resharding_service_test_helpers.h @@ -154,7 +154,8 @@ class StateTransitionControllerOpObserver : public OpObserverNoop { std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) override { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) override { if (coll->ns() != _stateDocumentNss) { return; } @@ -165,7 +166,9 @@ class StateTransitionControllerOpObserver : public OpObserverNoop { invariant(++begin == end); // No support for inserting more than one state document yet. } - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) override { + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override { if (args.coll->ns() != _stateDocumentNss) { return; } @@ -179,7 +182,8 @@ class StateTransitionControllerOpObserver : public OpObserverNoop { void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) override { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override { if (coll->ns() != _stateDocumentNss) { return; } diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp index 8a015e52af582..626ba568e2116 100644 --- a/src/mongo/db/s/resharding/resharding_txn_cloner.cpp +++ b/src/mongo/db/s/resharding/resharding_txn_cloner.cpp @@ -28,20 +28,34 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/resharding/resharding_txn_cloner.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/read_preference.h" #include "mongo/db/client.h" -#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/curop.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_sort.h" #include "mongo/db/pipeline/process_interface/mongo_process_interface.h" @@ -52,14 +66,30 @@ #include "mongo/db/s/resharding/resharding_data_copy_util.h" #include "mongo/db/s/resharding/resharding_future_util.h" #include "mongo/db/s/resharding/resharding_server_parameters_gen.h" +#include "mongo/db/s/resharding/resharding_txn_cloner.h" #include "mongo/db/s/resharding/resharding_txn_cloner_progress_gen.h" -#include "mongo/db/s/session_catalog_migration_destination.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" -#include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/future_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -307,6 +337,12 @@ SemiFuture ReshardingTxnCloner::run( auto client = cc().getServiceContext()->makeClient("ReshardingTxnClonerCleanupClient"); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } + AlternativeClientRegion acr(client); auto opCtx = cc().makeOperationContext(); diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner.h b/src/mongo/db/s/resharding/resharding_txn_cloner.h index 17a0f7573d135..447e0f4fe9873 100644 --- a/src/mongo/db/s/resharding/resharding_txn_cloner.h +++ b/src/mongo/db/s/resharding/resharding_txn_cloner.h @@ -29,29 +29,33 @@ #pragma once #include +#include +#include #include #include #include "mongo/bson/timestamp.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/executor/task_executor.h" #include "mongo/s/resharding/common_types_gen.h" +#include "mongo/util/cancellation.h" #include "mongo/util/future.h" namespace mongo { - namespace executor { class TaskExecutor; } // namespace executor -class OperationContext; - /** * Transfer config.transaction information from a given source shard to this shard. */ diff --git a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp index 5240e118bff77..1f1748a75df45 100644 --- a/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp +++ b/src/mongo/db/s/resharding/resharding_txn_cloner_test.cpp @@ -28,38 +28,111 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/client/remote_command_targeter_mock.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/persistent_task_store.h" #include "mongo/db/pipeline/process_interface/shardsvr_process_interface.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/repl/wait_for_majority_service.h" -#include "mongo/db/s/resharding/resharding_server_parameters_gen.h" #include "mongo/db/s/resharding/resharding_txn_cloner.h" #include "mongo/db/s/resharding/resharding_txn_cloner_progress_gen.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/db/s/sharding_mongod_test_fixture.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" -#include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/vector_clock_metadata_hook.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_factory.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_mock.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/catalog_cache_loader.h" #include "mongo/s/catalog_cache_loader_mock.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/database_version.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/grid.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -108,10 +181,6 @@ class ReshardingTxnClonerTest : public ShardServerTestFixture { // onStepUp() relies on the storage interface to create the config.transactions table. repl::StorageInterface::set(getServiceContext(), std::make_unique()); - MongoDSessionCatalog::set( - getServiceContext(), - std::make_unique( - std::make_unique())); auto mongoDSessionCatalog = MongoDSessionCatalog::get(operationContext()); mongoDSessionCatalog->onStepUp(operationContext()); LogicalSessionCache::set(getServiceContext(), std::make_unique()); @@ -154,7 +223,8 @@ class ReshardingTxnClonerTest : public ShardServerTestFixture { const NamespaceString& nss, const repl::ReadConcernArgs& readConcern) override { uasserted(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection " << nss.ns() << " not found"); + str::stream() + << "Collection " << nss.toStringForErrorMsg() << " not found"); } private: @@ -340,11 +410,6 @@ class ReshardingTxnClonerTest : public ShardServerTestFixture { Client::initThread(threadName.c_str()); auto* client = Client::getCurrent(); AuthorizationSession::get(*client)->grantInternalAuthorization(client); - - { - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); - } }; auto hookList = std::make_unique(); diff --git a/src/mongo/db/s/resharding/resharding_util.cpp b/src/mongo/db/s/resharding/resharding_util.cpp index 5b47fc0780993..27e9e2d8bf8c4 100644 --- a/src/mongo/db/s/resharding/resharding_util.cpp +++ b/src/mongo/db/s/resharding/resharding_util.cpp @@ -29,32 +29,53 @@ #include "mongo/db/s/resharding/resharding_util.h" +#include +#include +#include +#include #include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/json.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/document_source_add_fields.h" #include "mongo/db/pipeline/document_source_find_and_modify_image_lookup.h" #include "mongo/db/pipeline/document_source_match.h" -#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/resharding/document_source_resharding_add_resume_id.h" #include "mongo/db/s/resharding/document_source_resharding_iterate_transaction.h" -#include "mongo/db/s/resharding/resharding_metrics.h" -#include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/write_unit_of_work.h" -#include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_tags.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" -#include "mongo/s/request_types/flush_routing_table_cache_updates_gen.h" -#include "mongo/s/shard_invalidated_for_targeting_exception.h" +#include "mongo/s/resharding/common_types_gen.h" #include "mongo/s/shard_key_pattern.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResharding @@ -143,7 +164,8 @@ std::set getRecipientShards(OperationContext* opCtx, auto [cm, _] = uassertStatusOK(catalogCache->getCollectionRoutingInfo(opCtx, tempNss)); uassert(ErrorCodes::NamespaceNotSharded, - str::stream() << "Expected collection " << tempNss << " to be sharded", + str::stream() << "Expected collection " << tempNss.toStringForErrorMsg() + << " to be sharded", cm.isSharded()); std::set recipients; @@ -224,10 +246,13 @@ void checkForOverlappingZones(std::vector& zones) { } std::vector buildTagsDocsFromZones(const NamespaceString& tempNss, - const std::vector& zones) { + std::vector& zones, + const ShardKeyPattern& shardKey) { std::vector tags; tags.reserve(zones.size()); - for (const auto& zone : zones) { + for (auto& zone : zones) { + zone.setMin(shardKey.getKeyPattern().extendRangeBound(zone.getMin(), false)); + zone.setMax(shardKey.getKeyPattern().extendRangeBound(zone.getMax(), false)); ChunkRange range(zone.getMin(), zone.getMax()); TagsType tag(tempNss, zone.getZone().toString(), range); tags.push_back(tag.toBSON()); @@ -236,6 +261,20 @@ std::vector buildTagsDocsFromZones(const NamespaceString& tempNss, return tags; } +std::vector getZonesFromExistingCollection(OperationContext* opCtx, + const NamespaceString& sourceNss) { + std::vector zones; + const auto collectionZones = uassertStatusOK( + ShardingCatalogManager::get(opCtx)->localCatalogClient()->getTagsForCollection(opCtx, + sourceNss)); + + for (const auto& zone : collectionZones) { + ReshardingZoneType newZone(zone.getTag(), zone.getMinKey(), zone.getMaxKey()); + zones.push_back(newZone); + } + return zones; +} + std::unique_ptr createOplogFetchingPipelineForResharding( const boost::intrusive_ptr& expCtx, const ReshardingDonorOplogId& startAfter, @@ -358,10 +397,10 @@ NamespaceString getLocalConflictStashNamespace(UUID existingUUID, ShardId donorS } void doNoopWrite(OperationContext* opCtx, StringData opStr, const NamespaceString& nss) { - writeConflictRetry(opCtx, opStr, NamespaceString::kRsOplogNamespace.ns(), [&] { + writeConflictRetry(opCtx, opStr, NamespaceString::kRsOplogNamespace, [&] { AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); - const std::string msg = str::stream() << opStr << " on " << nss; + const std::string msg = str::stream() << opStr << " on " << nss.toStringForErrorMsg(); WriteUnitOfWork wuow(opCtx); opCtx->getClient()->getServiceContext()->getOpObserver()->onInternalOpMessage( opCtx, @@ -399,5 +438,72 @@ boost::optional estimateRemainingRecipientTime(bool applyingBegan, return {}; } +void validateShardDistribution(const std::vector& shardDistribution, + OperationContext* opCtx, + const ShardKeyPattern& keyPattern) { + boost::optional hasMinMax = boost::none; + std::vector validShards; + stdx::unordered_set shardIds; + for (const auto& shard : shardDistribution) { + uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shard.getShard())); + uassert(ErrorCodes::InvalidOptions, + "ShardKeyRange should have a pair of min/max or none of them", + !(shard.getMax().has_value() ^ shard.getMin().has_value())); + uassert(ErrorCodes::InvalidOptions, + "ShardKeyRange min should follow shard key's keyPattern", + (!shard.getMin().has_value()) || keyPattern.isShardKey(*shard.getMin())); + uassert(ErrorCodes::InvalidOptions, + "ShardKeyRange max should follow shard key's keyPattern", + (!shard.getMax().has_value()) || keyPattern.isShardKey(*shard.getMax())); + if (hasMinMax && !(*hasMinMax)) { + uassert(ErrorCodes::InvalidOptions, + "Non-explicit shardDistribution should have unique shardIds", + shardIds.find(shard.getShard()) == shardIds.end()); + } + + // Check all shardKeyRanges have min/max or none of them has min/max. + if (hasMinMax.has_value()) { + uassert(ErrorCodes::InvalidOptions, + "All ShardKeyRanges should have the same min/max pattern", + !(*hasMinMax ^ shard.getMax().has_value())); + } else { + hasMinMax = shard.getMax().has_value(); + } + + validShards.push_back(shard); + shardIds.insert(shard.getShard()); + } + + // If the shardDistribution contains min/max, validate whether they are continuous and complete. + if (hasMinMax && *hasMinMax) { + std::sort(validShards.begin(), + validShards.end(), + [](const ShardKeyRange& a, const ShardKeyRange& b) { + return SimpleBSONObjComparator::kInstance.evaluate(*a.getMin() < *b.getMin()); + }); + + uassert( + ErrorCodes::InvalidOptions, + "ShardKeyRange must start at global min for the new shard key", + SimpleBSONObjComparator::kInstance.evaluate(validShards.front().getMin().value() == + keyPattern.getKeyPattern().globalMin())); + uassert(ErrorCodes::InvalidOptions, + "ShardKeyRange must end at global max for the new shard key", + SimpleBSONObjComparator::kInstance.evaluate( + validShards.back().getMax().value() == keyPattern.getKeyPattern().globalMax())); + + boost::optional prevMax = boost::none; + for (const auto& shard : validShards) { + if (prevMax) { + uassert(ErrorCodes::InvalidOptions, + "ShardKeyRanges must be continuous", + SimpleBSONObjComparator::kInstance.evaluate(prevMax.value() == + *shard.getMin())); + } + prevMax = *shard.getMax(); + } + } +} + } // namespace resharding } // namespace mongo diff --git a/src/mongo/db/s/resharding/resharding_util.h b/src/mongo/db/s/resharding/resharding_util.h index 1b55c70a75530..842f8da4ad17d 100644 --- a/src/mongo/db/s/resharding/resharding_util.h +++ b/src/mongo/db/s/resharding/resharding_util.h @@ -29,20 +29,46 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/s/resharding/coordinator_document_gen.h" #include "mongo/db/s/resharding/donor_oplog_id_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/executor/task_executor.h" #include "mongo/s/catalog/type_tags.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/resharding/common_types_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/uuid.h" namespace mongo { namespace resharding { @@ -257,7 +283,15 @@ void checkForOverlappingZones(std::vector& zones); * Builds documents to insert into config.tags from zones provided to reshardCollection cmd. */ std::vector buildTagsDocsFromZones(const NamespaceString& tempNss, - const std::vector& zones); + std::vector& zones, + const ShardKeyPattern& shardKey); + +/** + * Create an array of resharding zones from the existing collection. This is used for forced + * same-key resharding. + */ +std::vector getZonesFromExistingCollection(OperationContext* opCtx, + const NamespaceString& sourceNss); /** * Creates a pipeline that can be serialized into a query for fetching oplog entries. `startAfter` @@ -321,5 +355,18 @@ std::vector> getReshardingStateMachines(OperationConte return result; } +/** + * Validate the shardDistribution parameter in reshardCollection cmd, which should satisfy the + * following properties: + * - The shardKeyRanges should be continuous and cover the full data range. + * - Every shardKeyRange should be on the same key. + * - A shardKeyRange should either have no min/max or have a min/max pair. + * - All shardKeyRanges in the array should have the same min/max pattern. + * Not satisfying the rules above will cause an uassert failure. + */ +void validateShardDistribution(const std::vector& shardDistribution, + OperationContext* opCtx, + const ShardKeyPattern& keyPattern); + } // namespace resharding } // namespace mongo diff --git a/src/mongo/db/s/resharding/resharding_util_test.cpp b/src/mongo/db/s/resharding/resharding_util_test.cpp index 8de383855b771..9b5f97359b985 100644 --- a/src/mongo/db/s/resharding/resharding_util_test.cpp +++ b/src/mongo/db/s/resharding/resharding_util_test.cpp @@ -28,25 +28,40 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/json.h" -#include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/hasher.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/pipeline/aggregation_context_fixture.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_mock.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/resharding/resharding_txn_cloner.h" #include "mongo/db/s/resharding/resharding_util.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_txn_record_gen.h" #include "mongo/db/shard_id.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -259,7 +274,8 @@ class ReshardingTxnCloningPipelineTest : public AggregationContextFixture { Timestamp fetchTimestamp, boost::optional startAfter) { // create expression context - static const NamespaceString _transactionsNss{"config.transactions"}; + static const NamespaceString _transactionsNss = + NamespaceString::createNamespaceString_forTest("config.transactions"); boost::intrusive_ptr expCtx( new ExpressionContextForTest(getOpCtx(), _transactionsNss)); expCtx->setResolvedNamespace(_transactionsNss, {_transactionsNss, {}}); diff --git a/src/mongo/db/s/resharding_test_commands.cpp b/src/mongo/db/s/resharding_test_commands.cpp index 74688928784d8..a910c463e6830 100644 --- a/src/mongo/db/s/resharding_test_commands.cpp +++ b/src/mongo/db/s/resharding_test_commands.cpp @@ -28,22 +28,40 @@ */ -#include "mongo/platform/basic.h" - +#include +#include #include +#include +#include + +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/s/metrics/sharding_data_transform_instance_metrics.h" #include "mongo/db/s/resharding/resharding_collection_cloner.h" #include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/s/resharding_test_commands_gen.h" +#include "mongo/db/service_context.h" #include "mongo/db/vector_clock_metadata_hook.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -72,11 +90,6 @@ class ReshardingCloneCollectionTestCommand final Client::initThread(threadName.c_str()); auto* client = Client::getCurrent(); AuthorizationSession::get(*client)->grantInternalAuthorization(client); - - { - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); - } }; auto metrics = ReshardingMetrics::makeInstance( diff --git a/src/mongo/db/s/scoped_collection_metadata.h b/src/mongo/db/s/scoped_collection_metadata.h index 25e8c28e48e22..209941d055e98 100644 --- a/src/mongo/db/s/scoped_collection_metadata.h +++ b/src/mongo/db/s/scoped_collection_metadata.h @@ -35,7 +35,7 @@ namespace mongo { /** * Contains the parts of the sharding state for a particular collection, which do not change due to - * chunk move, split and merge. The implementation is allowed to be tighly coupled with the + * chunk move, split and merge. The implementation is allowed to be tightly coupled with the * CollectionShardingState from which it was derived and because of this it must not be accessed * outside of a collection lock. */ diff --git a/src/mongo/db/s/scoped_operation_completion_sharding_actions.cpp b/src/mongo/db/s/scoped_operation_completion_sharding_actions.cpp index 5744b229e8375..1d4bb0eaf6cc1 100644 --- a/src/mongo/db/s/scoped_operation_completion_sharding_actions.cpp +++ b/src/mongo/db/s/scoped_operation_completion_sharding_actions.cpp @@ -30,14 +30,33 @@ #include "mongo/db/s/scoped_operation_completion_sharding_actions.h" -#include "mongo/db/curop.h" +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/resharding/resharding_metrics_helpers.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" -#include "mongo/db/s/sharding_state.h" #include "mongo/db/s/sharding_statistics.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" #include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/server_transaction_coordinators_metrics.cpp b/src/mongo/db/s/server_transaction_coordinators_metrics.cpp index 796bb96f75cde..5af84d1348309 100644 --- a/src/mongo/db/s/server_transaction_coordinators_metrics.cpp +++ b/src/mongo/db/s/server_transaction_coordinators_metrics.cpp @@ -27,12 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/server_transaction_coordinators_metrics.h" +#include #include "mongo/db/operation_context.h" +#include "mongo/db/s/server_transaction_coordinators_metrics.h" #include "mongo/db/service_context.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/s/server_transaction_coordinators_metrics.h b/src/mongo/db/s/server_transaction_coordinators_metrics.h index c69f45657395f..bb22642c2df89 100644 --- a/src/mongo/db/s/server_transaction_coordinators_metrics.h +++ b/src/mongo/db/s/server_transaction_coordinators_metrics.h @@ -29,10 +29,16 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands/server_status.h" #include "mongo/db/operation_context.h" #include "mongo/db/s/transaction_coordinators_stats_gen.h" #include "mongo/db/service_context.h" +#include "mongo/platform/atomic_word.h" namespace mongo { diff --git a/src/mongo/db/s/session_catalog_migration_destination.cpp b/src/mongo/db/s/session_catalog_migration_destination.cpp index 5ea8c1f5413d3..9d82b15301c29 100644 --- a/src/mongo/db/s/session_catalog_migration_destination.cpp +++ b/src/mongo/db/s/session_catalog_migration_destination.cpp @@ -28,32 +28,62 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/session_catalog_migration_destination.h" - -#include - +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/client/connection_string.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops_retryability.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/s/migration_session_id.h" #include "mongo/db/s/session_catalog_migration.h" -#include "mongo/db/s/sharding_statistics.h" +#include "mongo/db/s/session_catalog_migration_destination.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/session/session_txn_record_gen.h" #include "mongo/db/shard_id.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/logv2/redaction.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -217,7 +247,7 @@ void SessionCatalogMigrationDestination::start(ServiceContext* service) { _state = State::Migrating; } - _thread = stdx::thread([=] { + _thread = stdx::thread([=, this] { try { _retrieveSessionStateFromSource(service); } catch (const DBException& ex) { @@ -262,12 +292,6 @@ void SessionCatalogMigrationDestination::join() { void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(ServiceContext* service) { Client::initThread( "sessionCatalogMigrationProducer-" + _migrationSessionId.toString(), service, nullptr); - auto client = Client::getCurrent(); - { - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); - } - bool oplogDrainedAfterCommiting = false; ProcessOplogResult lastResult; repl::OpTime lastOpTimeWaited; @@ -287,6 +311,7 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service auto uniqueCtx = CancelableOperationContext( cc().makeOperationContext(), _cancellationToken, executor); auto opCtx = uniqueCtx.get(); + opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); nextBatch = getNextSessionOplogBatch(opCtx, _fromShard, _migrationSessionId); oplogArray = BSONArray{nextBatch[kOplogField].Obj()}; @@ -367,6 +392,7 @@ void SessionCatalogMigrationDestination::_retrieveSessionStateFromSource(Service auto executor = Grid::get(service)->getExecutorPool()->getFixedExecutor(); auto uniqueOpCtx = CancelableOperationContext(cc().makeOperationContext(), _cancellationToken, executor); + uniqueOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); uassertStatusOK( waitForWriteConcern(uniqueOpCtx.get(), lastResult.oplogTime, kMajorityWC, &unusedWCResult)); @@ -436,6 +462,8 @@ SessionCatalogMigrationDestination::_processSessionOplog(const BSONObj& oplogBSO auto uniqueOpCtx = CancelableOperationContext(cc().makeOperationContext(), cancellationToken, executor); auto opCtx = uniqueOpCtx.get(); + opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); + { auto lk = stdx::lock_guard(*opCtx->getClient()); opCtx->setLogicalSessionId(result.sessionId); @@ -496,10 +524,7 @@ SessionCatalogMigrationDestination::_processSessionOplog(const BSONObj& oplogBSO oplogEntry.setHash(boost::none); writeConflictRetry( - opCtx, - "SessionOplogMigration", - NamespaceString::kSessionTransactionsTableNamespace.ns(), - [&] { + opCtx, "SessionOplogMigration", NamespaceString::kSessionTransactionsTableNamespace, [&] { // Need to take global lock here so repl::logOp will not unlock it and trigger the // invariant that disallows unlocking global lock while inside a WUOW. Take the // transaction table db lock to ensure the same lock ordering with normal replicated diff --git a/src/mongo/db/s/session_catalog_migration_destination.h b/src/mongo/db/s/session_catalog_migration_destination.h index 4be67328ae8ab..e4af90e61bdcc 100644 --- a/src/mongo/db/s/session_catalog_migration_destination.h +++ b/src/mongo/db/s/session_catalog_migration_destination.h @@ -33,10 +33,17 @@ #include #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/cluster_auth_mode.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/s/migration_session_id.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/shard_id.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" diff --git a/src/mongo/db/s/session_catalog_migration_destination_test.cpp b/src/mongo/db/s/session_catalog_migration_destination_test.cpp index 6e54739ffe8c5..64dd98d0cc5fb 100644 --- a/src/mongo/db/s/session_catalog_migration_destination_test.cpp +++ b/src/mongo/db/s/session_catalog_migration_destination_test.cpp @@ -27,39 +27,72 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include #include - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/initialize_operation_session_info.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops_exec.h" #include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/migration_session_id.h" #include "mongo/db/s/session_catalog_migration.h" #include "mongo/db/s/session_catalog_migration_destination.h" #include "mongo/db/s/shard_server_test_fixture.h" -#include "mongo/db/s/sharding_statistics.h" -#include "mongo/db/server_options.h" -#include "mongo/db/session/initialize_operation_session_info.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/db/session/session_txn_record_gen.h" -#include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/transaction_history_iterator.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/executor/remote_command_request.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_mock.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -139,10 +172,6 @@ class SessionCatalogMigrationDestinationTest : public ShardServerTestFixture { // onStepUp() relies on the storage interface to create the config.transactions table. repl::StorageInterface::set(getServiceContext(), std::make_unique()); - MongoDSessionCatalog::set( - getServiceContext(), - std::make_unique( - std::make_unique())); auto mongoDSessionCatalog = MongoDSessionCatalog::get(operationContext()); mongoDSessionCatalog->onStepUp(operationContext()); LogicalSessionCache::set(getServiceContext(), std::make_unique()); @@ -252,7 +281,15 @@ class SessionCatalogMigrationDestinationTest : public ShardServerTestFixture { Client::initThread("test-insert-thread"); auto innerOpCtx = Client::getCurrent()->makeOperationContext(); - initializeOperationSessionInfo(innerOpCtx.get(), insertBuilder.obj(), true, true, true); + auto opMsgRequest = OpMsgRequestBuilder::create( + DatabaseName::createDatabaseName_forTest(boost::none, "test_unused_dbname"), + insertBuilder.obj(), + BSONObj()); + initializeOperationSessionInfo(innerOpCtx.get(), + opMsgRequest, + true /* requiresAuth */, + true /* attachToOpCtx */, + true /* isReplSetMemberOrMongos */); auto mongoDSessionCatalog = MongoDSessionCatalog::get(innerOpCtx.get()); auto sessionTxnState = mongoDSessionCatalog->checkOutSession(innerOpCtx.get()); auto txnParticipant = TransactionParticipant::get(innerOpCtx.get()); diff --git a/src/mongo/db/s/session_catalog_migration_source.cpp b/src/mongo/db/s/session_catalog_migration_source.cpp index 5b2f14bc48b29..bfa3d5d041994 100644 --- a/src/mongo/db/s/session_catalog_migration_source.cpp +++ b/src/mongo/db/s/session_catalog_migration_source.cpp @@ -27,35 +27,67 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/session_catalog_migration_source.h" - +#include +#include +#include +#include #include - +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/ops/write_ops_retryability.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/apply_ops_command_info.h" #include "mongo/db/repl/image_collection_entry_gen.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime_base_gen.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/s/session_catalog_migration.h" -#include "mongo/db/s/sharding_statistics.h" -#include "mongo/db/session/session.h" +#include "mongo/db/s/session_catalog_migration_source.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/transaction_history_iterator.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/write_concern.h" -#include "mongo/platform/random.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/logv2/redaction.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/shard_key_pattern.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -216,7 +248,10 @@ SessionCatalogMigrationSource::SessionCatalogMigrationSource(OperationContext* o _chunkRange(std::move(chunk)), _keyPattern(shardKey) {} -void SessionCatalogMigrationSource::init(OperationContext* opCtx) { +void SessionCatalogMigrationSource::init(OperationContext* opCtx, + const LogicalSessionId& migrationLsid) { + const auto migrationLsidWithoutTxnNumber = castToParentSessionId(migrationLsid); + DBDirectClient client(opCtx); FindCommandRequest findRequest{NamespaceString::kSessionTransactionsTableNamespace}; // Skip internal sessions for retryable writes with aborted or in progress transactions since @@ -254,6 +289,16 @@ void SessionCatalogMigrationSource::init(OperationContext* opCtx) { // txnNumber. continue; } + + if (parentSessionId == migrationLsidWithoutTxnNumber) { + // Skip session id matching the migration lsid as they are only for used for rejecting + // old migration source from initiating range deleter on the destination. Sending + // these sessions to the other side has a potential to deadlock as the destination + // will also try to checkout the same session for almost the entire duration of + // the migration. + continue; + } + lastTxnSession = LastTxnSession{parentSessionId, parentTxnNumber}; if (!txnRecord.getLastWriteOpTime().isNull()) { @@ -267,9 +312,10 @@ void SessionCatalogMigrationSource::init(OperationContext* opCtx) { writeConflictRetry( opCtx, "session migration initialization majority commit barrier", - NamespaceString::kRsOplogNamespace.ns(), + NamespaceString::kRsOplogNamespace, [&] { - const auto message = BSON("sessionMigrateCloneStart" << _ns.ns()); + const auto message = + BSON("sessionMigrateCloneStart" << NamespaceStringUtil::serialize(_ns)); WriteUnitOfWork wuow(opCtx); opCtx->getClient()->getServiceContext()->getOpObserver()->onInternalOpMessage( @@ -671,13 +717,21 @@ bool SessionCatalogMigrationSource::_fetchNextNewWriteOplog(OperationContext* op repl::OplogEntry::CommandType::kApplyOps); const auto sessionId = *nextNewWriteOplog.getSessionId(); - // The opTimes for transactions inside internal sessions for non-retryable writes - // should never get added to the opTime queue since those transactions are not - // retryable so there is no need to transfer their write history to the - // recipient. - invariant(!isInternalSessionForNonRetryableWrite(sessionId), - "Cannot add op time for a non-retryable internal transaction to the " - "session migration op time queue"); + if (isInternalSessionForNonRetryableWrite(sessionId)) { + dassert(0, + str::stream() << "Cannot add op time for a non-retryable " + "internal transaction to the " + "session migration op time queue - " + << "session id:" << sessionId << " oplog entry: " + << redact(nextNewWriteOplog.toBSONForLogging())); + + // Transactions inside internal sessions for non-retryable writes are not + // retryable so there is no need to transfer their write history to the + // recipient. + _newWriteOpTimeList.pop_front(); + lk.unlock(); + return _fetchNextNewWriteOplog(opCtx); + } if (isInternalSessionForRetryableWrite(sessionId)) { // Derive retryable write oplog entries from this retryable internal diff --git a/src/mongo/db/s/session_catalog_migration_source.h b/src/mongo/db/s/session_catalog_migration_source.h index 71e793fa50133..c54e3785afe5b 100644 --- a/src/mongo/db/s/session_catalog_migration_source.h +++ b/src/mongo/db/s/session_catalog_migration_source.h @@ -29,14 +29,26 @@ #pragma once +#include #include - +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/client/dbclient_cursor.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_txn_record_gen.h" #include "mongo/db/transaction/transaction_history_iterator.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/shard_key_pattern.h" @@ -99,7 +111,7 @@ class SessionCatalogMigrationSource { * from the constructor to allow the member functions of the SessionCatalogMigrationSource to be * called before the initialization step is finished. */ - void init(OperationContext* opCtx); + void init(OperationContext* opCtx, const LogicalSessionId& migrationLsid); /** * Returns true if there are more oplog entries to fetch at this moment. Note that new writes diff --git a/src/mongo/db/s/session_catalog_migration_source_test.cpp b/src/mongo/db/s/session_catalog_migration_source_test.cpp index 9b9a22824bd57..e5af652d325d0 100644 --- a/src/mongo/db/s/session_catalog_migration_source_test.cpp +++ b/src/mongo/db/s/session_catalog_migration_source_test.cpp @@ -28,30 +28,54 @@ */ #include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/ops/write_ops_retryability.h" +#include "mongo/db/repl/apply_ops_gen.h" #include "mongo/db/repl/image_collection_entry_gen.h" #include "mongo/db/repl/mock_repl_coord_server_fixture.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/s/session_catalog_migration.h" #include "mongo/db/s/session_catalog_migration_source.h" -#include "mongo/db/s/sharding_statistics.h" #include "mongo/db/session/logical_session_id.h" -#include "mongo/db/session/session.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/executor/remote_command_request.h" -#include "mongo/logv2/log.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault namespace mongo { @@ -65,6 +89,7 @@ const KeyPattern kShardKey(BSON("x" << 1)); const ChunkRange kChunkRange(BSON("x" << 0), BSON("x" << 100)); const KeyPattern kNestedShardKey(BSON("x.y" << 1)); const ChunkRange kNestedChunkRange(BSON("x.y" << 0), BSON("x.y" << 100)); +const LogicalSessionId kMigrationLsid; class SessionCatalogMigrationSourceTest : public MockReplCoordServerFixture {}; @@ -334,7 +359,7 @@ TEST_F(SessionCatalogMigrationSourceTest, OneSessionWithTwoWrites) { client.insert(NamespaceString::kSessionTransactionsTableNamespace, sessionRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); { @@ -399,7 +424,7 @@ TEST_F(SessionCatalogMigrationSourceTest, OneSessionWithTwoWritesMultiStmtIds) { client.insert(NamespaceString::kSessionTransactionsTableNamespace, sessionRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); { @@ -499,7 +524,7 @@ TEST_F(SessionCatalogMigrationSourceTest, TwoSessionWithTwoWrites) { insertOplogEntry(entry2b); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); auto checkNextBatch = [this, &migrationSource](const repl::OplogEntry& firstExpectedOplog, @@ -612,7 +637,7 @@ TEST_F(SessionCatalogMigrationSourceTest, OneSessionWithFindAndModifyPreImageAnd client.insert(NamespaceString::kSessionTransactionsTableNamespace, sessionRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); auto expectedSequence = {entry3, entry4, entry1, entry2}; @@ -697,7 +722,7 @@ TEST_F(SessionCatalogMigrationSourceTest, client.insert(NamespaceString::kSessionTransactionsTableNamespace, sessionRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); auto expectedSequence = {entry3, entry4, entry1, entry2}; @@ -756,7 +781,7 @@ TEST_F(SessionCatalogMigrationSourceTest, ForgeImageEntriesWhenFetchingEntriesWi client.insert(NamespaceString::kSessionTransactionsTableNamespace, sessionRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); // The next oplog entry should be the forged preImage entry. ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); ASSERT_TRUE(migrationSource.hasMoreOplog()); @@ -836,7 +861,7 @@ TEST_F(SessionCatalogMigrationSourceTest, OplogWithOtherNsShouldBeIgnored) { client.insert(NamespaceString::kSessionTransactionsTableNamespace, sessionRecord2.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); ASSERT_TRUE(migrationSource.hasMoreOplog()); @@ -905,7 +930,7 @@ TEST_F(SessionCatalogMigrationSourceTest, SessionDumpWithMultipleNewWrites) { insertOplogEntry(entry3); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); migrationSource.notifyNewWriteOpTime( @@ -945,7 +970,7 @@ TEST_F(SessionCatalogMigrationSourceTest, SessionDumpWithMultipleNewWrites) { TEST_F(SessionCatalogMigrationSourceTest, ShouldAssertIfOplogCannotBeFound) { SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); migrationSource.notifyNewWriteOpTime( @@ -958,7 +983,7 @@ TEST_F(SessionCatalogMigrationSourceTest, ShouldAssertIfOplogCannotBeFound) { TEST_F(SessionCatalogMigrationSourceTest, ReturnDeadEndSentinelOplogEntryForNewCommittedNonInternalTransaction) { SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); const auto sessionId = makeLogicalSessionIdForTest(); @@ -996,10 +1021,10 @@ TEST_F(SessionCatalogMigrationSourceTest, } DEATH_TEST_F(SessionCatalogMigrationSourceTest, - ThrowUponSeeingNewCommittedForInternalTransactionForNonRetryableWrite, - "Cannot add op time for a non-retryable internal transaction") { + DiscardOplogEntriesForNewCommittedInternalTransactionForNonRetryableWrite, + "invariant") { SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); const auto sessionId = makeLogicalSessionIdWithTxnUUIDForTest(); @@ -1021,12 +1046,16 @@ DEATH_TEST_F(SessionCatalogMigrationSourceTest, ASSERT_TRUE(migrationSource.hasMoreOplog()); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); ASSERT_EQ(migrationSource.getSessionOplogEntriesToBeMigratedSoFar(), 0); + + // notifyNewWriteOpTime() uses dassert, so it will only invariant in debug mode. Deliberately + // crash here in non-debug mode to make the test work in both modes. + invariant(kDebugBuild); } TEST_F(SessionCatalogMigrationSourceTest, DeriveOplogEntriesForNewCommittedInternalTransactionForRetryableWriteBasic) { SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); const auto sessionId = makeLogicalSessionIdWithTxnNumberAndUUIDForTest(); @@ -1101,7 +1130,7 @@ TEST_F(SessionCatalogMigrationSourceTest, TEST_F(SessionCatalogMigrationSourceTest, DeriveOplogEntriesForNewCommittedInternalTransactionForRetryableWriteFetchPrePostImage) { SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); const auto sessionId = makeLogicalSessionIdWithTxnNumberAndUUIDForTest(); @@ -1206,7 +1235,7 @@ TEST_F(SessionCatalogMigrationSourceTest, TEST_F(SessionCatalogMigrationSourceTest, DeriveOplogEntriesForNewCommittedInternalTransactionForRetryableWriteForgePrePostImage) { SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); std::vector cases{repl::RetryImageEnum::kPreImage, @@ -1299,7 +1328,7 @@ TEST_F(SessionCatalogMigrationSourceTest, ShouldBeAbleInsertNewWritesAfterBuffer const auto txnNumber = TxnNumber{1}; SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); { @@ -1411,7 +1440,7 @@ TEST_F(SessionCatalogMigrationSourceTest, ReturnsDeadEndSentinelForIncompleteHis client.insert(NamespaceString::kSessionTransactionsTableNamespace, sessionRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); { @@ -1475,7 +1504,7 @@ TEST_F(SessionCatalogMigrationSourceTest, ShouldAssertWhenRollbackDetected) { client.insert(NamespaceString::kSessionTransactionsTableNamespace, sessionRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); { @@ -1522,7 +1551,7 @@ TEST_F(SessionCatalogMigrationSourceTest, client.insert(NamespaceString::kSessionTransactionsTableNamespace, txnRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); ASSERT_TRUE(migrationSource.hasMoreOplog()); @@ -1565,7 +1594,7 @@ TEST_F(SessionCatalogMigrationSourceTest, IgnoreCommittedInternalTransactionForN client.insert(NamespaceString::kSessionTransactionsTableNamespace, txnRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); ASSERT_FALSE(migrationSource.hasMoreOplog()); @@ -1653,7 +1682,7 @@ TEST_F(SessionCatalogMigrationSourceTest, client.insert(NamespaceString::kSessionTransactionsTableNamespace, txnRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); const auto expectedSessionId = *getParentSessionId(sessionId); const auto expectedTxnNumber = *sessionId.getTxnNumber(); @@ -1779,7 +1808,7 @@ TEST_F(SessionCatalogMigrationSourceTest, client.insert(NamespaceString::kSessionTransactionsTableNamespace, txnRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); const auto expectedSessionId = *getParentSessionId(sessionId); const auto expectedTxnNumber = *sessionId.getTxnNumber(); @@ -1874,7 +1903,7 @@ TEST_F(SessionCatalogMigrationSourceTest, // Create a SessionCatalogMigrationSource. It should return only the oplog entry for the // internal session with the latest txnNumber. SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); ASSERT_TRUE(migrationSource.hasMoreOplog()); @@ -1914,7 +1943,7 @@ TEST_F(SessionCatalogMigrationSourceTest, // Create another SessionCatalogMigrationSource. It should still return only the oplog entry // for the internal session with the latest txnNumber. SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); ASSERT_TRUE(migrationSource.hasMoreOplog()); @@ -2026,7 +2055,7 @@ TEST_F( client.insert(NamespaceString::kConfigImagesNamespace, imageEntryForOp2.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); const auto expectedSessionId = *getParentSessionId(sessionId); const auto expectedTxnNumber = *sessionId.getTxnNumber(); @@ -2090,7 +2119,7 @@ TEST_F(SessionCatalogMigrationSourceTest, client.insert(NamespaceString::kSessionTransactionsTableNamespace, txnRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); ASSERT_TRUE(migrationSource.hasMoreOplog()); @@ -2133,7 +2162,7 @@ TEST_F(SessionCatalogMigrationSourceTest, IgnorePreparedInternalTransactionForNo client.insert(NamespaceString::kSessionTransactionsTableNamespace, txnRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); ASSERT_FALSE(migrationSource.hasMoreOplog()); @@ -2167,7 +2196,7 @@ TEST_F(SessionCatalogMigrationSourceTest, IgnorePreparedInternalTransactionForRe client.insert(NamespaceString::kSessionTransactionsTableNamespace, txnRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); ASSERT_FALSE(migrationSource.hasMoreOplog()); @@ -2189,7 +2218,7 @@ TEST_F(SessionCatalogMigrationSourceTest, IgnoreInProgressTransaction) { client.insert(NamespaceString::kSessionTransactionsTableNamespace, txnRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); ASSERT_FALSE(migrationSource.hasMoreOplog()); @@ -2241,7 +2270,7 @@ TEST_F(SessionCatalogMigrationSourceTest, IgnoreAbortedTransaction) { client.insert(NamespaceString::kSessionTransactionsTableNamespace, txnRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); ASSERT_FALSE(migrationSource.hasMoreOplog()); @@ -2300,7 +2329,7 @@ TEST_F(SessionCatalogMigrationSourceTest, insertOplogEntry(insertOplog); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); // Function to verify the oplog entry corresponding to the retryable write. auto checkRetryableWriteEntry = [&] { @@ -2382,7 +2411,7 @@ TEST_F(SessionCatalogMigrationSourceTest, FindAndModifyDeleteNotTouchingChunkIsI client.insert(NamespaceString::kSessionTransactionsTableNamespace, sessionRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); ASSERT_EQ(migrationSource.getSessionOplogEntriesToBeMigratedSoFar(), 0); @@ -2428,7 +2457,7 @@ TEST_F(SessionCatalogMigrationSourceTest, FindAndModifyUpdatePrePostNotTouchingC client.insert(NamespaceString::kSessionTransactionsTableNamespace, sessionRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); ASSERT_EQ(migrationSource.getSessionOplogEntriesToBeMigratedSoFar(), 0); @@ -2476,7 +2505,7 @@ TEST_F(SessionCatalogMigrationSourceTest, client.insert(NamespaceString::kSessionTransactionsTableNamespace, sessionRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); auto expectedSequence = {entry1, entry2}; @@ -2537,7 +2566,7 @@ TEST_F(SessionCatalogMigrationSourceTest, client.insert(NamespaceString::kSessionTransactionsTableNamespace, sessionRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); ASSERT_EQ(migrationSource.getSessionOplogEntriesToBeMigratedSoFar(), 0); @@ -2584,7 +2613,7 @@ TEST_F(SessionCatalogMigrationSourceTest, FindAndModifyUpdateNotTouchingChunkSho client.insert(NamespaceString::kSessionTransactionsTableNamespace, sessionRecord.toBSON()); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_FALSE(migrationSource.fetchNextOplog(opCtx())); ASSERT_EQ(migrationSource.getSessionOplogEntriesToBeMigratedSoFar(), 0); @@ -2667,7 +2696,7 @@ TEST_F(SessionCatalogMigrationSourceTest, TwoSessionWithTwoWritesContainingWrite insertOplogEntry(entry2b); SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.fetchNextOplog(opCtx())); auto expectedSequence = {entry1a, entry2b, entry2a}; @@ -2720,7 +2749,7 @@ TEST_F(SessionCatalogMigrationSourceTest, UntransferredDataSizeWithCommittedWrit // Check for the initial state of the SessionCatalogMigrationSource, and drain the majority // committed session writes. SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); ASSERT_TRUE(migrationSource.hasMoreOplog()); ASSERT_FALSE(migrationSource.inCatchupPhase()); migrationSource.fetchNextOplog(opCtx()); @@ -2760,7 +2789,7 @@ TEST_F(SessionCatalogMigrationSourceTest, UntransferredDataSizeWithNoCommittedWr const auto txnNumber = TxnNumber{1}; SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); auto entry = makeOplogEntry( repl::OpTime(Timestamp(52, 345), 2), // optime @@ -2832,7 +2861,7 @@ TEST_F(SessionCatalogMigrationSourceTest, FilterRewrittenOplogEntriesOutsideChun insertOplogEntry(entry); } SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); std::vector filteredEntries = {entries.at(1)}; while (migrationSource.fetchNextOplog(opCtx())) { @@ -2880,7 +2909,7 @@ TEST_F(SessionCatalogMigrationSourceTest, } SessionCatalogMigrationSource migrationSource(opCtx(), kNs, kChunkRange, kShardKey); - migrationSource.init(opCtx()); + migrationSource.init(opCtx(), kMigrationLsid); std::vector filteredEntries = {entries.at(1)}; diff --git a/src/mongo/db/s/sessions_collection_config_server.cpp b/src/mongo/db/s/sessions_collection_config_server.cpp index 4e9ebb06578dc..393d9cafba9e0 100644 --- a/src/mongo/db/s/sessions_collection_config_server.cpp +++ b/src/mongo/db/s/sessions_collection_config_server.cpp @@ -28,17 +28,50 @@ */ #include "mongo/db/s/sessions_collection_config_server.h" -#include "mongo/db/repl/replication_coordinator.h" +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/logv2/log.h" +#include "mongo/db/session/sessions_collection.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/s/catalog_cache.h" #include "mongo/s/chunk_constraints.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/cluster_ddl.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" #include "mongo/s/stale_shard_version_helpers.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl @@ -56,7 +89,8 @@ void SessionsCollectionConfigServer::_shardCollectionIfNeeded(OperationContext* // If we don't have any shards, we can't set up this collection yet. uassert(ErrorCodes::ShardNotFound, - str::stream() << "Failed to create " << NamespaceString::kLogicalSessionsNamespace + str::stream() << "Failed to create " + << NamespaceString::kLogicalSessionsNamespace.toStringForErrorMsg() << ": cannot create the collection until there are shards", Grid::get(opCtx)->shardRegistry()->getNumShards(opCtx) != 0); @@ -64,7 +98,7 @@ void SessionsCollectionConfigServer::_shardCollectionIfNeeded(OperationContext* CreateCollectionRequest requestParamsObj; requestParamsObj.setShardKey(BSON("_id" << 1)); shardsvrCollRequest.setCreateCollectionRequest(std::move(requestParamsObj)); - shardsvrCollRequest.setDbName(NamespaceString::kLogicalSessionsNamespace.db()); + shardsvrCollRequest.setDbName(NamespaceString::kLogicalSessionsNamespace.dbName()); cluster::createCollection(opCtx, shardsvrCollRequest); } @@ -99,8 +133,10 @@ void SessionsCollectionConfigServer::_generateIndexesIfNeeded(OperationContext* SessionsCollection::generateCreateIndexesCmd(), ReadPreferenceSetting(ReadPreference::PrimaryOnly), Shard::RetryPolicy::kNoRetry, - BSONObj() /* query */, - BSONObj() /* collation */); + BSONObj() /*query*/, + BSONObj() /*collation*/, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); }); for (auto& shardResult : shardResults) { @@ -125,7 +161,7 @@ void SessionsCollectionConfigServer::setupSessionsCollection(OperationContext* o if (const auto replCoord = repl::ReplicationCoordinator::get(opCtx); replCoord->canAcceptWritesFor(opCtx, CollectionType::ConfigNS)) { auto filterQuery = - BSON("_id" << NamespaceString::kLogicalSessionsNamespace.ns() + BSON("_id" << NamespaceStringUtil::serialize(NamespaceString::kLogicalSessionsNamespace) << CollectionType::kMaxChunkSizeBytesFieldName << BSON("$exists" << false)); auto updateQuery = BSON("$set" << BSON(CollectionType::kMaxChunkSizeBytesFieldName << logical_sessions::kMaxChunkSizeBytes)); diff --git a/src/mongo/db/s/set_allow_migrations_coordinator.cpp b/src/mongo/db/s/set_allow_migrations_coordinator.cpp index 5aa755edf10e1..d8960565bd430 100644 --- a/src/mongo/db/s/set_allow_migrations_coordinator.cpp +++ b/src/mongo/db/s/set_allow_migrations_coordinator.cpp @@ -30,11 +30,38 @@ #include "mongo/db/s/set_allow_migrations_coordinator.h" -#include "mongo/db/commands.h" +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/client.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/sharding_logging.h" -#include "mongo/logv2/log.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -73,57 +100,50 @@ void SetAllowMigrationsCoordinator::appendCommandInfo(BSONObjBuilder* cmdInfoBui ExecutorFuture SetAllowMigrationsCoordinator::_runImpl( std::shared_ptr executor, const CancellationToken& token) noexcept { - return ExecutorFuture(**executor) - .then([this, anchor = shared_from_this()] { - auto opCtxHolder = cc().makeOperationContext(); - auto* opCtx = opCtxHolder.get(); - getForwardableOpMetadata().setOn(opCtx); - - uassert(ErrorCodes::NamespaceNotSharded, - "Collection must be sharded so migrations can be blocked", - isCollectionSharded(opCtx, nss())); - - const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - - BatchedCommandRequest updateRequest([&]() { - write_ops::UpdateCommandRequest updateOp(CollectionType::ConfigNS); - updateOp.setUpdates({[&] { - write_ops::UpdateOpEntry entry; - entry.setQ(BSON(CollectionType::kNssFieldName << nss().ns())); - if (_allowMigrations) { - entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(BSON( - "$unset" << BSON(CollectionType::kPermitMigrationsFieldName << true)))); - } else { - entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(BSON( - "$set" << BSON(CollectionType::kPermitMigrationsFieldName << false)))); - } - entry.setMulti(false); - return entry; - }()}); - return updateOp; - }()); - - updateRequest.setWriteConcern(ShardingCatalogClient::kMajorityWriteConcern.toBSON()); - - auto response = configShard->runBatchWriteCommand(opCtx, - Shard::kDefaultConfigCommandTimeout, - updateRequest, - Shard::RetryPolicy::kIdempotent); - - uassertStatusOK(response.toStatus()); - - ShardingLogging::get(opCtx)->logChange(opCtx, - "setPermitMigrations", - nss().ns(), - BSON("permitMigrations" << _allowMigrations)); - }) - .onError([this, anchor = shared_from_this()](const Status& status) { - LOGV2_ERROR(5622700, - "Error running set allow migrations", - logAttrs(nss()), - "error"_attr = redact(status)); - return status; - }); + return ExecutorFuture(**executor).then([this, anchor = shared_from_this()] { + auto opCtxHolder = cc().makeOperationContext(); + auto* opCtx = opCtxHolder.get(); + getForwardableOpMetadata().setOn(opCtx); + + uassert(ErrorCodes::NamespaceNotSharded, + "Collection must be sharded so migrations can be blocked", + isCollectionSharded(opCtx, nss())); + + const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); + + BatchedCommandRequest updateRequest([&]() { + write_ops::UpdateCommandRequest updateOp(CollectionType::ConfigNS); + updateOp.setUpdates({[&] { + write_ops::UpdateOpEntry entry; + entry.setQ( + BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss()))); + if (_allowMigrations) { + entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(BSON( + "$unset" << BSON(CollectionType::kPermitMigrationsFieldName << true)))); + } else { + entry.setU(write_ops::UpdateModification::parseFromClassicUpdate( + BSON("$set" << BSON(CollectionType::kPermitMigrationsFieldName << false)))); + } + entry.setMulti(false); + return entry; + }()}); + return updateOp; + }()); + + updateRequest.setWriteConcern(ShardingCatalogClient::kMajorityWriteConcern.toBSON()); + + auto response = configShard->runBatchWriteCommand(opCtx, + Shard::kDefaultConfigCommandTimeout, + updateRequest, + Shard::RetryPolicy::kIdempotent); + + uassertStatusOK(response.toStatus()); + + ShardingLogging::get(opCtx)->logChange(opCtx, + "setPermitMigrations", + NamespaceStringUtil::serialize(nss()), + BSON("permitMigrations" << _allowMigrations)); + }); } } // namespace mongo diff --git a/src/mongo/db/s/set_allow_migrations_coordinator.h b/src/mongo/db/s/set_allow_migrations_coordinator.h index 78d2e03696af3..57cf477599041 100644 --- a/src/mongo/db/s/set_allow_migrations_coordinator.h +++ b/src/mongo/db/s/set_allow_migrations_coordinator.h @@ -29,11 +29,20 @@ #pragma once +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/operation_context.h" #include "mongo/db/s/set_allow_migrations_coordinator_document_gen.h" #include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/executor/scoped_task_executor.h" #include "mongo/s/request_types/set_allow_migrations_gen.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/cancellation.h" #include "mongo/util/future.h" namespace mongo { diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp index 17474c445dd43..8a2c62a9c9d69 100644 --- a/src/mongo/db/s/shard_filtering_metadata_refresh.cpp +++ b/src/mongo/db/s/shard_filtering_metadata_refresh.cpp @@ -29,10 +29,32 @@ #include "mongo/db/s/shard_filtering_metadata_refresh.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/commands/feature_compatibility_version.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/forwardable_operation_metadata.h" @@ -40,12 +62,33 @@ #include "mongo/db/s/migration_util.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/resharding/resharding_donor_recipient_common.h" +#include "mongo/db/s/sharding_migration_critical_section.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/read_through_cache.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -127,8 +170,8 @@ Status refreshDbMetadata(OperationContext* opCtx, }); // Force a refresh of the cached database metadata from the config server. - const auto swDbMetadata = - Grid::get(opCtx)->catalogCache()->getDatabaseWithRefresh(opCtx, dbName.db()); + const auto swDbMetadata = Grid::get(opCtx)->catalogCache()->getDatabaseWithRefresh( + opCtx, DatabaseNameUtil::serialize(dbName)); // Before setting the database metadata, exit early if the database version received by the // config server is not newer than the cached one. This is a best-effort optimization to reduce @@ -178,11 +221,6 @@ SharedSemiFuture recoverRefreshDbVersion(OperationContext* opCtx, serviceCtx = opCtx->getServiceContext(), forwardableOpMetadata = ForwardableOperationMetadata(opCtx)] { ThreadClient tc("DbMetadataRefreshThread", serviceCtx); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } - const auto opCtxHolder = CancelableOperationContext(tc->makeOperationContext(), cancellationToken, executor); auto opCtx = opCtxHolder.get(); @@ -241,11 +279,12 @@ void onDbVersionMismatch(OperationContext* opCtx, { boost::optional dbLock; - dbLock.emplace(opCtx, dbName, MODE_IS); + dbLock.emplace(opCtx, DatabaseNameUtil::deserialize(boost::none, dbName), MODE_IS); if (receivedDbVersion) { - auto scopedDss = boost::make_optional( - DatabaseShardingState::assertDbLockedAndAcquireShared(opCtx, dbName)); + auto scopedDss = + boost::make_optional(DatabaseShardingState::assertDbLockedAndAcquireShared( + opCtx, DatabaseNameUtil::deserialize(boost::none, dbName))); if (joinDbVersionOperation(opCtx, &dbLock, &scopedDss)) { // Waited for another thread to exit from the critical section or to complete an @@ -270,8 +309,9 @@ void onDbVersionMismatch(OperationContext* opCtx, return; } - auto scopedDss = boost::make_optional( - DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx, dbName)); + auto scopedDss = + boost::make_optional(DatabaseShardingState::assertDbLockedAndAcquireExclusive( + opCtx, DatabaseNameUtil::deserialize(boost::none, dbName))); if (joinDbVersionOperation(opCtx, &dbLock, &scopedDss)) { // Waited for another thread to exit from the critical section or to complete an @@ -288,7 +328,9 @@ void onDbVersionMismatch(OperationContext* opCtx, CancellationToken cancellationToken = cancellationSource.token(); (*scopedDss) ->setDbMetadataRefreshFuture( - recoverRefreshDbVersion(opCtx, dbName, cancellationToken), + recoverRefreshDbVersion(opCtx, + DatabaseNameUtil::deserialize(boost::none, dbName), + cancellationToken), std::move(cancellationSource)); dbMetadataRefreshFuture = (*scopedDss)->getDbMetadataRefreshFuture(); } @@ -365,10 +407,6 @@ SharedSemiFuture recoverRefreshCollectionPlacementVersion( return ExecutorFuture(executor) .then([=] { ThreadClient tc("RecoverRefreshThread", serviceContext); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } if (MONGO_unlikely(hangInRecoverRefreshThread.shouldFail())) { hangInRecoverRefreshThread.pauseWhileSet(); @@ -399,8 +437,8 @@ SharedSemiFuture recoverRefreshCollectionPlacementVersion( auto currentMetadata = forceGetCurrentMetadata(opCtx, nss); if (currentMetadata.isSharded()) { - // If migrations are disallowed for the namespace, join any migrations which may be - // executing currently + // Abort and join any ongoing migration if migrations are disallowed for the + // namespace. if (!currentMetadata.allowMigrations()) { boost::optional> waitForMigrationAbort; { @@ -410,9 +448,12 @@ SharedSemiFuture recoverRefreshCollectionPlacementVersion( const auto scopedCsr = CollectionShardingRuntime::assertCollectionLockedAndAcquireShared(opCtx, nss); - - if (auto msm = MigrationSourceManager::get(*scopedCsr)) { - waitForMigrationAbort.emplace(msm->abort()); + // There is no need to abort an ongoing migration if the refresh is + // cancelled. + if (!cancellationToken.isCanceled()) { + if (auto msm = MigrationSourceManager::get(*scopedCsr)) { + waitForMigrationAbort.emplace(msm->abort()); + } } } @@ -430,25 +471,51 @@ SharedSemiFuture recoverRefreshCollectionPlacementVersion( } } - // Only if all actions taken as part of refreshing the placement version completed - // successfully do we want to install the current metadata. - // A view can potentially be created after spawning a thread to recover nss's shard - // version. It is then ok to lock views in order to clear filtering metadata. - // - // DBLock and CollectionLock must be used in order to avoid placement version checks - Lock::DBLock dbLock(opCtx, nss.dbName(), MODE_IX); - Lock::CollectionLock collLock(opCtx, nss, MODE_IX); - auto scopedCsr = - CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, nss); - - // cancellationToken needs to be checked under the CSR lock before overwriting the - // filtering metadata to serialize with other threads calling 'clearFilteringMetadata'. - if (!cancellationToken.isCanceled()) { - scopedCsr->setFilteringMetadata(opCtx, currentMetadata); + boost::optional> waitForMigrationAbort; + { + // Only if all actions taken as part of refreshing the placement version completed + // successfully do we want to install the current metadata. A view can potentially + // be created after spawning a thread to recover nss's shard version. It is then ok + // to lock views in order to clear filtering metadata. DBLock and CollectionLock + // must be used in order to avoid placement version checks + Lock::DBLock dbLock(opCtx, nss.dbName(), MODE_IX); + Lock::CollectionLock collLock(opCtx, nss, MODE_IX); + auto scopedCsr = + CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, + nss); + + // cancellationToken needs to be checked under the CSR lock before overwriting the + // filtering metadata to serialize with other threads calling + // 'clearFilteringMetadata'. + if (!cancellationToken.isCanceled()) { + // Atomically set the new filtering metadata and check if there is a migration + // that must be aborted. + scopedCsr->setFilteringMetadata(opCtx, currentMetadata); + + if (currentMetadata.isSharded() && !currentMetadata.allowMigrations()) { + if (auto msm = MigrationSourceManager::get(*scopedCsr)) { + waitForMigrationAbort.emplace(msm->abort()); + } + } + } + } + + // Join any ongoing migration outside of the CSR lock. + if (waitForMigrationAbort) { + waitForMigrationAbort->get(opCtx); } - scopedCsr->resetPlacementVersionRecoverRefreshFuture(); - resetRefreshFutureOnError.dismiss(); + { + // Remember to wake all waiting threads for this refresh to finish. + Lock::DBLock dbLock(opCtx, nss.dbName(), MODE_IX); + Lock::CollectionLock collLock(opCtx, nss, MODE_IX); + auto scopedCsr = + CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, + nss); + + scopedCsr->resetPlacementVersionRecoverRefreshFuture(); + resetRefreshFutureOnError.dismiss(); + } }) .onCompletion([=](Status status) { // Check the cancellation token here to ensure we throw in all cancelation events. @@ -494,6 +561,13 @@ void onCollectionPlacementVersionMismatch(OperationContext* opCtx, boost::optional> inRecoverOrRefresh; { + // The refresh threads do not perform any data reads themselves, therefore they don't + // need to synchronise with secondary oplog application or go through admission control. + ShouldNotConflictWithSecondaryBatchApplicationBlock skipParallelBatchWriterMutex( + opCtx->lockState()); + ScopedAdmissionPriorityForLock skipAdmissionControl( + opCtx->lockState(), AdmissionContext::Priority::kImmediate); + boost::optional dbLock; boost::optional collLock; dbLock.emplace(opCtx, nss.dbName(), MODE_IS); diff --git a/src/mongo/db/s/shard_filtering_metadata_refresh.h b/src/mongo/db/s/shard_filtering_metadata_refresh.h index 20a146c86f3e5..f0c22c348e7b1 100644 --- a/src/mongo/db/s/shard_filtering_metadata_refresh.h +++ b/src/mongo/db/s/shard_filtering_metadata_refresh.h @@ -29,8 +29,14 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/collection_metadata.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/database_version.h" #include "mongo/s/shard_version.h" diff --git a/src/mongo/db/s/shard_identity_rollback_notifier.cpp b/src/mongo/db/s/shard_identity_rollback_notifier.cpp index 66ff55ffdc76e..c33eb95bbda0f 100644 --- a/src/mongo/db/s/shard_identity_rollback_notifier.cpp +++ b/src/mongo/db/s/shard_identity_rollback_notifier.cpp @@ -27,11 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/shard_identity_rollback_notifier.h" +#include #include "mongo/db/operation_context.h" +#include "mongo/db/s/shard_identity_rollback_notifier.h" +#include "mongo/db/service_context.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/s/shard_key_index_util.cpp b/src/mongo/db/s/shard_key_index_util.cpp index f0b4f3da72c4e..b89c7d4985e08 100644 --- a/src/mongo/db/s/shard_key_index_util.cpp +++ b/src/mongo/db/s/shard_key_index_util.cpp @@ -28,13 +28,20 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include #include "mongo/bson/simple_bsonelement_comparator.h" #include "mongo/db/catalog/clustered_collection_util.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/s/shard_key_index_util.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/shard_key_index_util.h b/src/mongo/db/s/shard_key_index_util.h index b0718fbe44f67..374cfc4106b4e 100644 --- a/src/mongo/db/s/shard_key_index_util.h +++ b/src/mongo/db/s/shard_key_index_util.h @@ -29,15 +29,19 @@ #pragma once +#include +#include + #include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/operation_context.h" namespace mongo { class Collection; class CollectionPtr; - class IndexDescriptor; class ShardKeyIndex { diff --git a/src/mongo/db/s/shard_key_index_util_test.cpp b/src/mongo/db/s/shard_key_index_util_test.cpp index e20a30b653d1b..c7947467a187f 100644 --- a/src/mongo/db/s/shard_key_index_util_test.cpp +++ b/src/mongo/db/s/shard_key_index_util_test.cpp @@ -27,15 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/db/catalog/catalog_test_fixture.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/index_catalog_entry_impl.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/s/shard_key_index_util.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/shard_key_util.cpp b/src/mongo/db/s/shard_key_util.cpp index daa8efa68b4ac..f704af44db626 100644 --- a/src/mongo/db/s/shard_key_util.cpp +++ b/src/mongo/db/s/shard_key_util.cpp @@ -27,20 +27,56 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/shard_key_util.h" - +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/simple_bsonelement_comparator.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/client/read_preference.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/crypto/encryption_fields_util.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/field_ref.h" #include "mongo/db/hasher.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collation_spec.h" +#include "mongo/db/s/migration_destination_manager.h" #include "mongo/db/s/shard_key_index_util.h" +#include "mongo/db/s/shard_key_util.h" +#include "mongo/db/write_concern_options.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { namespace shardkeyutil { @@ -50,13 +86,12 @@ constexpr StringData kCheckShardingIndexCmdName = "checkShardingIndex"_sd; constexpr StringData kKeyPatternField = "keyPattern"_sd; /** - * Constructs the BSON specification document for the create indexes command using the given - * namespace, index key and options. + * Create an index specification used for create index command. */ -BSONObj makeCreateIndexesCmd(const NamespaceString& nss, - const BSONObj& keys, - const BSONObj& collation, - bool unique) { +BSONObj makeIndexSpec(const NamespaceString& nss, + const BSONObj& keys, + const BSONObj& collation, + bool unique) { BSONObjBuilder index; // Required fields for an index. @@ -93,10 +128,22 @@ BSONObj makeCreateIndexesCmd(const NamespaceString& nss, index.appendBool("unique", unique); } + return index.obj(); +} + +/** + * Constructs the BSON specification document for the create indexes command using the given + * namespace, index key and options. + */ +BSONObj makeCreateIndexesCmd(const NamespaceString& nss, + const BSONObj& keys, + const BSONObj& collation, + bool unique) { + auto indexSpec = makeIndexSpec(nss, keys, collation, unique); // The outer createIndexes command. BSONObjBuilder createIndexes; createIndexes.append("createIndexes", nss.coll()); - createIndexes.append("indexes", BSON_ARRAY(index.obj())); + createIndexes.append("indexes", BSON_ARRAY(indexSpec)); createIndexes.append("writeConcern", WriteConcernOptions::Majority); return createIndexes.obj(); } @@ -118,9 +165,9 @@ bool validShardKeyIndexExists(OperationContext* opCtx, bool isUnique = idx["unique"].trueValue(); bool isPrepareUnique = idx["prepareUnique"].trueValue(); uassert(ErrorCodes::InvalidOptions, - str::stream() << "can't shard collection '" << nss.ns() << "' with unique index on " - << currentKey << " and proposed shard key " - << shardKeyPattern.toBSON() + str::stream() << "can't shard collection '" << nss.toStringForErrorMsg() + << "' with unique index on " << currentKey + << " and proposed shard key " << shardKeyPattern.toBSON() << ". Uniqueness can't be maintained unless shard key is a prefix", (!isUnique && !isPrepareUnique) || shardKeyPattern.isIndexUniquenessCompatible(currentKey)); @@ -141,7 +188,7 @@ bool validShardKeyIndexExists(OperationContext* opCtx, // Note that this means that, for sharding, we only support one hashed index // per field per collection. uassert(ErrorCodes::InvalidOptions, - str::stream() << "can't shard collection " << nss.ns() + str::stream() << "can't shard collection " << nss.toStringForErrorMsg() << " with hashed shard key " << shardKeyPattern.toBSON() << " because the hashed index uses a non-default seed of " << idx["seed"].numberInt(), @@ -166,7 +213,8 @@ bool validShardKeyIndexExists(OperationContext* opCtx, // 3. If proposed key is required to be unique, additionally check for exact match. if (hasUsefulIndexForKey && requiresUnique) { - BSONObj eqQuery = BSON("ns" << nss.ns() << "key" << shardKeyPattern.toBSON()); + BSONObj eqQuery = + BSON("ns" << NamespaceStringUtil::serialize(nss) << "key" << shardKeyPattern.toBSON()); BSONObj eqQueryResult; for (const auto& idx : indexes) { @@ -185,7 +233,7 @@ bool validShardKeyIndexExists(OperationContext* opCtx, BSONObj currKey = eqQueryResult["key"].embeddedObject(); bool isCurrentID = (currKey.firstElementFieldNameStringData() == "_id"); uassert(ErrorCodes::InvalidOptions, - str::stream() << "can't shard collection " << nss.ns() << ", " + str::stream() << "can't shard collection " << nss.toStringForErrorMsg() << ", " << shardKeyPattern.toBSON() << " index not unique, and unique index explicitly specified", isExplicitlyUnique || isCurrentID); @@ -288,10 +336,11 @@ std::vector ValidationBehaviorsShardCollection::loadIndexes( void ValidationBehaviorsShardCollection::verifyUsefulNonMultiKeyIndex( const NamespaceString& nss, const BSONObj& proposedKey) const { BSONObj res; - auto success = _localClient->runCommand( - DatabaseName(boost::none, "admin"), - BSON(kCheckShardingIndexCmdName << nss.ns() << kKeyPatternField << proposedKey), - res); + auto success = _localClient->runCommand(DatabaseName::kAdmin, + BSON(kCheckShardingIndexCmdName + << NamespaceStringUtil::serialize(nss) + << kKeyPatternField << proposedKey), + res); uassert(ErrorCodes::InvalidOptions, res["errmsg"].str(), success); } @@ -347,9 +396,9 @@ void ValidationBehaviorsRefineShardKey::verifyUsefulNonMultiKeyIndex( _opCtx, ReadPreferenceSetting(ReadPreference::PrimaryOnly), "admin", - appendShardVersion( - BSON(kCheckShardingIndexCmdName << nss.ns() << kKeyPatternField << proposedKey), - _cri.getShardVersion(_indexShard->getId())), + appendShardVersion(BSON(kCheckShardingIndexCmdName << NamespaceStringUtil::serialize(nss) + << kKeyPatternField << proposedKey), + _cri.getShardVersion(_indexShard->getId())), Shard::RetryPolicy::kIdempotent)); if (checkShardingIndexRes.commandStatus == ErrorCodes::UnknownError) { // CheckShardingIndex returns UnknownError if a compatible shard key index cannot be found, @@ -423,6 +472,65 @@ void ValidationBehaviorsLocalRefineShardKey::createShardKeyIndex( MONGO_UNREACHABLE; } +ValidationBehaviorsReshardingBulkIndex::ValidationBehaviorsReshardingBulkIndex() + : _opCtx(nullptr), _cloneTimestamp(), _shardKeyIndexSpec() {} + +std::vector ValidationBehaviorsReshardingBulkIndex::loadIndexes( + const NamespaceString& nss) const { + invariant(_opCtx); + auto catalogCache = Grid::get(_opCtx)->catalogCache(); + auto cri = catalogCache->getShardedCollectionRoutingInfo(_opCtx, nss); + auto [indexSpecs, _] = MigrationDestinationManager::getCollectionIndexes( + _opCtx, nss, cri.cm.getMinKeyShardIdWithSimpleCollation(), cri, _cloneTimestamp); + return indexSpecs; +} + +void ValidationBehaviorsReshardingBulkIndex::verifyUsefulNonMultiKeyIndex( + const NamespaceString& nss, const BSONObj& proposedKey) const { + invariant(_opCtx); + auto catalogCache = Grid::get(_opCtx)->catalogCache(); + auto cri = catalogCache->getShardedCollectionRoutingInfo(_opCtx, nss); + auto shard = uassertStatusOK(Grid::get(_opCtx)->shardRegistry()->getShard( + _opCtx, cri.cm.getMinKeyShardIdWithSimpleCollation())); + auto checkShardingIndexRes = uassertStatusOK( + shard->runCommand(_opCtx, + ReadPreferenceSetting(ReadPreference::PrimaryOnly), + "admin", + appendShardVersion(BSON(kCheckShardingIndexCmdName + << nss.ns() << kKeyPatternField << proposedKey), + cri.getShardVersion(shard->getId())), + Shard::RetryPolicy::kIdempotent)); + if (checkShardingIndexRes.commandStatus == ErrorCodes::UnknownError) { + // CheckShardingIndex returns UnknownError if a compatible shard key index cannot be found, + // but we return InvalidOptions to correspond with the shardCollection behavior. + uasserted(ErrorCodes::InvalidOptions, checkShardingIndexRes.response["errmsg"].str()); + } + // Rethrow any other error to allow retries on retryable errors. + uassertStatusOK(checkShardingIndexRes.commandStatus); +} + +void ValidationBehaviorsReshardingBulkIndex::verifyCanCreateShardKeyIndex( + const NamespaceString& nss, std::string* errMsg) const {} + +void ValidationBehaviorsReshardingBulkIndex::createShardKeyIndex( + const NamespaceString& nss, + const BSONObj& proposedKey, + const boost::optional& defaultCollation, + bool unique) const { + BSONObj collation = + defaultCollation && !defaultCollation->isEmpty() ? CollationSpec::kSimpleSpec : BSONObj(); + _shardKeyIndexSpec = makeIndexSpec(nss, proposedKey, collation, unique); +} + +void ValidationBehaviorsReshardingBulkIndex::setOpCtxAndCloneTimestamp(OperationContext* opCtx, + Timestamp cloneTimestamp) { + _opCtx = opCtx; + _cloneTimestamp = cloneTimestamp; +} + +boost::optional ValidationBehaviorsReshardingBulkIndex::getShardKeyIndexSpec() const { + return _shardKeyIndexSpec; +} } // namespace shardkeyutil } // namespace mongo diff --git a/src/mongo/db/s/shard_key_util.h b/src/mongo/db/s/shard_key_util.h index 82dbb3ee1344d..6ae653dc36528 100644 --- a/src/mongo/db/s/shard_key_util.h +++ b/src/mongo/db/s/shard_key_util.h @@ -29,10 +29,22 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/client/shard.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/shard_util.h" namespace mongo { @@ -140,6 +152,37 @@ class ValidationBehaviorsLocalRefineShardKey final : public ShardKeyValidationBe const CollectionPtr& _coll; }; +/** + * Implementation of steps for validating a shard key for resharding building indexes after cloning. + */ +class ValidationBehaviorsReshardingBulkIndex final : public ShardKeyValidationBehaviors { +public: + class RecipientStateMachineExternalState; + ValidationBehaviorsReshardingBulkIndex(); + + std::vector loadIndexes(const NamespaceString& nss) const override; + + void verifyUsefulNonMultiKeyIndex(const NamespaceString& nss, + const BSONObj& proposedKey) const override; + + void verifyCanCreateShardKeyIndex(const NamespaceString& nss, + std::string* errMsg) const override; + + void createShardKeyIndex(const NamespaceString& nss, + const BSONObj& proposedKey, + const boost::optional& defaultCollation, + bool unique) const override; + + void setOpCtxAndCloneTimestamp(OperationContext* opCtx, Timestamp cloneTimestamp); + + boost::optional getShardKeyIndexSpec() const; + +private: + OperationContext* _opCtx; + Timestamp _cloneTimestamp; + mutable boost::optional _shardKeyIndexSpec; +}; + /** * Compares the proposed shard key with the collection's existing indexes to ensure they are a legal * combination. diff --git a/src/mongo/db/s/shard_local.cpp b/src/mongo/db/s/shard_local.cpp index 51020d825431b..5747b490ec051 100644 --- a/src/mongo/db/s/shard_local.cpp +++ b/src/mongo/db/s/shard_local.cpp @@ -28,23 +28,17 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/s/shard_local.h" +#include #include "mongo/client/remote_command_targeter.h" -#include "mongo/db/catalog/index_catalog.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/s/shard_local.h" #include "mongo/db/server_options.h" -#include "mongo/logv2/log.h" +#include "mongo/db/service_context.h" #include "mongo/util/assert_util.h" -#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/shard_local.h b/src/mongo/db/s/shard_local.h index 386ceba7f1332..5b9b5c88f5755 100644 --- a/src/mongo/db/s/shard_local.h +++ b/src/mongo/db/s/shard_local.h @@ -29,8 +29,30 @@ #pragma once +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/rs_local_client.h" +#include "mongo/db/shard_id.h" #include "mongo/s/client/shard.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/db/s/shard_local_test.cpp b/src/mongo/db/s/shard_local_test.cpp index 2bbd9b3d60a0b..64ad1ca6ced22 100644 --- a/src/mongo/db/s/shard_local_test.cpp +++ b/src/mongo/db/s/shard_local_test.cpp @@ -27,21 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/read_preference.h" -#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/ops/write_ops.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/s/shard_local.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/storage/snapshot_manager.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/write_concern_options.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -82,7 +97,7 @@ class ShardLocalTest : public ServiceContextMongoDTest { void ShardLocalTest::setUp() { ServiceContextMongoDTest::setUp(); _opCtx = getGlobalServiceContext()->makeOperationContext(&cc()); - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; _shardLocal = std::make_unique(ShardId::kConfigServerId); const repl::ReplSettings replSettings = {}; repl::ReplicationCoordinator::set( @@ -123,7 +138,7 @@ StatusWith ShardLocalTest::runFindAndModifyRunCommand(Na return _shardLocal->runCommandWithFixedRetryAttempts( _opCtx.get(), ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - nss.db().toString(), + nss.db_forTest().toString(), findAndModifyRequest.toBSON({}), Shard::RetryPolicy::kNoRetry); } @@ -132,7 +147,7 @@ StatusWith> ShardLocalTest::getIndexes(NamespaceString nss) auto response = _shardLocal->runCommandWithFixedRetryAttempts( _opCtx.get(), ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - nss.db().toString(), + nss.db_forTest().toString(), BSON("listIndexes" << nss.coll().toString()), Shard::RetryPolicy::kIdempotent); if (!response.isOK()) { diff --git a/src/mongo/db/s/shard_metadata_util.cpp b/src/mongo/db/s/shard_metadata_util.cpp index 3a6f9f14e7830..cf0f60f304685 100644 --- a/src/mongo/db/s/shard_metadata_util.cpp +++ b/src/mongo/db/s/shard_metadata_util.cpp @@ -29,17 +29,46 @@ #include "mongo/db/s/shard_metadata_util.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/ops/write_ops.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/s/type_shard_collection.h" +#include "mongo/db/s/type_shard_collection_gen.h" #include "mongo/db/s/type_shard_database.h" +#include "mongo/db/s/type_shard_database_gen.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/reply_interface.h" #include "mongo/rpc/unique_message.h" #include "mongo/s/catalog/type_chunk.h" -#include "mongo/s/catalog/type_collection.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -68,7 +97,7 @@ Status getStatusFromWriteCommandResponse(const BSONObj& commandResult) { Status setPersistedRefreshFlags(OperationContext* opCtx, const NamespaceString& nss) { return updateShardCollectionsEntry( opCtx, - BSON(ShardCollectionType::kNssFieldName << nss.ns()), + BSON(ShardCollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss)), BSON("$set" << BSON(ShardCollectionType::kRefreshingFieldName << true)), false /*upsert*/); } @@ -103,10 +132,11 @@ Status unsetPersistedRefreshFlags(OperationContext* opCtx, ShardCollectionType::kLastRefreshedCollectionMajorMinorVersionFieldName, refreshedVersion.toLong()); - return updateShardCollectionsEntry(opCtx, - BSON(ShardCollectionType::kNssFieldName << nss.ns()), - BSON("$set" << updateBuilder.obj()), - false /*upsert*/); + return updateShardCollectionsEntry( + opCtx, + BSON(ShardCollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss)), + BSON("$set" << updateBuilder.obj()), + false /*upsert*/); } StatusWith getPersistedRefreshFlags(OperationContext* opCtx, @@ -145,26 +175,29 @@ StatusWith readShardCollectionsEntry(OperationContext* opCt try { DBDirectClient client(opCtx); FindCommandRequest findRequest{NamespaceString::kShardConfigCollectionsNamespace}; - findRequest.setFilter(BSON(ShardCollectionType::kNssFieldName << nss.ns())); + findRequest.setFilter( + BSON(ShardCollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss))); findRequest.setLimit(1); std::unique_ptr cursor = client.find(std::move(findRequest)); if (!cursor) { return Status(ErrorCodes::OperationFailed, str::stream() << "Failed to establish a cursor for reading " - << NamespaceString::kShardConfigCollectionsNamespace.ns() + << NamespaceString::kShardConfigCollectionsNamespace + .toStringForErrorMsg() << " from local storage"); } if (!cursor->more()) { // The collection has been dropped. return Status(ErrorCodes::NamespaceNotFound, - str::stream() << "collection " << nss.ns() << " not found"); + str::stream() + << "collection " << nss.toStringForErrorMsg() << " not found"); } BSONObj document = cursor->nextSafe(); return ShardCollectionType(document); } catch (const DBException& ex) { - return ex.toStatus(str::stream() << "Failed to read the '" << nss.ns() + return ex.toStatus(str::stream() << "Failed to read the '" << nss.toStringForErrorMsg() << "' entry locally from config.collections"); } } @@ -179,7 +212,8 @@ StatusWith readShardDatabasesEntry(OperationContext* opCtx, S if (!cursor) { return Status(ErrorCodes::OperationFailed, str::stream() << "Failed to establish a cursor for reading " - << NamespaceString::kShardConfigDatabasesNamespace.ns() + << NamespaceString::kShardConfigDatabasesNamespace + .toStringForErrorMsg() << " from local storage"); } @@ -192,9 +226,9 @@ StatusWith readShardDatabasesEntry(OperationContext* opCtx, S BSONObj document = cursor->nextSafe(); return ShardDatabaseType(document); } catch (const DBException& ex) { - return ex.toStatus(str::stream() - << "Failed to read the '" << dbName.toString() << "' entry locally from " - << NamespaceString::kShardConfigDatabasesNamespace); + return ex.toStatus( + str::stream() << "Failed to read the '" << dbName.toString() << "' entry locally from " + << NamespaceString::kShardConfigDatabasesNamespace.toStringForErrorMsg()); } } @@ -283,7 +317,8 @@ StatusWith> readShardChunks(OperationContext* opCtx, boost::optional limit, const OID& epoch, const Timestamp& timestamp) { - const auto chunksNss = NamespaceString{ChunkType::ShardNSPrefix + nss.ns()}; + const auto chunksNss = + NamespaceString{ChunkType::ShardNSPrefix + NamespaceStringUtil::serialize(nss)}; try { DBDirectClient client(opCtx); @@ -296,8 +331,8 @@ StatusWith> readShardChunks(OperationContext* opCtx, } std::unique_ptr cursor = client.find(std::move(findRequest)); uassert(ErrorCodes::OperationFailed, - str::stream() << "Failed to establish a cursor for reading " << chunksNss.ns() - << " from local storage", + str::stream() << "Failed to establish a cursor for reading " + << chunksNss.toStringForErrorMsg() << " from local storage", cursor); std::vector chunks; @@ -324,7 +359,8 @@ Status updateShardChunks(OperationContext* opCtx, const OID& currEpoch) { invariant(!chunks.empty()); - const auto chunksNss = NamespaceString{ChunkType::ShardNSPrefix + nss.ns()}; + const auto chunksNss = + NamespaceString{ChunkType::ShardNSPrefix + NamespaceStringUtil::serialize(nss)}; try { DBDirectClient client(opCtx); @@ -402,7 +438,8 @@ Status dropChunksAndDeleteCollectionsEntry(OperationContext* opCtx, const Namesp NamespaceString::kShardConfigCollectionsNamespace); deleteOp.setDeletes({[&] { write_ops::DeleteOpEntry entry; - entry.setQ(BSON(ShardCollectionType::kNssFieldName << nss.ns())); + entry.setQ(BSON(ShardCollectionType::kNssFieldName + << NamespaceStringUtil::serialize(nss))); entry.setMulti(true); return entry; }()}); @@ -433,7 +470,8 @@ void dropChunks(OperationContext* opCtx, const NamespaceString& nss) { // Drop the 'config.cache.chunks.' collection. BSONObj result; if (!client.dropCollection( - NamespaceStringUtil::deserialize(boost::none, ChunkType::ShardNSPrefix + nss.ns()), + NamespaceStringUtil::deserialize( + boost::none, ChunkType::ShardNSPrefix + NamespaceStringUtil::serialize(nss)), kLocalWriteConcern, &result)) { auto status = getStatusFromCommandResult(result); diff --git a/src/mongo/db/s/shard_metadata_util.h b/src/mongo/db/s/shard_metadata_util.h index cc9f96097ba3b..33cc9cd4b2a61 100644 --- a/src/mongo/db/s/shard_metadata_util.h +++ b/src/mongo/db/s/shard_metadata_util.h @@ -29,12 +29,16 @@ #pragma once +#include #include #include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/s/chunk_version.h" namespace mongo { @@ -42,6 +46,7 @@ namespace mongo { class ChunkType; class NamespaceString; class OperationContext; + class ShardCollectionType; class ShardDatabaseType; diff --git a/src/mongo/db/s/shard_metadata_util_test.cpp b/src/mongo/db/s/shard_metadata_util_test.cpp index 75b3b12382c46..cbe859615590a 100644 --- a/src/mongo/db/s/shard_metadata_util_test.cpp +++ b/src/mongo/db/s/shard_metadata_util_test.cpp @@ -27,14 +27,37 @@ * it in the license file. */ -#include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/commands.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/client/read_preference.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/s/shard_metadata_util.h" #include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/db/s/type_shard_collection.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/db/s/type_shard_collection_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -59,7 +82,7 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture { ShardCollectionType setUpCollection() { ShardCollectionType shardCollectionType( BSON(ShardCollectionType::kNssFieldName - << kNss.ns() << ShardCollectionType::kEpochFieldName + << kNss.ns_forTest() << ShardCollectionType::kEpochFieldName << maxCollPlacementVersion.epoch() << ShardCollectionType::kTimestampFieldName << maxCollPlacementVersion.getTimestamp() << ShardCollectionType::kUuidFieldName << uuid << ShardCollectionType::kKeyPatternFieldName << keyPattern.toBSON() @@ -67,10 +90,11 @@ struct ShardMetadataUtilTest : public ShardServerTestFixture { << ShardCollectionType::kUniqueFieldName << kUnique)); shardCollectionType.setRefreshing(true); - ASSERT_OK(updateShardCollectionsEntry(operationContext(), - BSON(ShardCollectionType::kNssFieldName << kNss.ns()), - shardCollectionType.toBSON(), - true /*upsert*/)); + ASSERT_OK(updateShardCollectionsEntry( + operationContext(), + BSON(ShardCollectionType::kNssFieldName << kNss.ns_forTest()), + shardCollectionType.toBSON(), + true /*upsert*/)); return shardCollectionType; } @@ -196,7 +220,7 @@ TEST_F(ShardMetadataUtilTest, PersistedRefreshSignalStartAndFinish) { assertGet(readShardCollectionsEntry(operationContext(), kNss)); ASSERT_EQUALS(shardCollectionsEntry.getUuid(), uuid); - ASSERT_EQUALS(shardCollectionsEntry.getNss().ns(), kNss.ns()); + ASSERT_EQUALS(shardCollectionsEntry.getNss().ns_forTest(), kNss.ns_forTest()); ASSERT_EQUALS(shardCollectionsEntry.getEpoch(), maxCollPlacementVersion.epoch()); ASSERT_EQUALS(shardCollectionsEntry.getTimestamp(), maxCollPlacementVersion.getTimestamp()); ASSERT_BSONOBJ_EQ(shardCollectionsEntry.getKeyPattern().toBSON(), keyPattern.toBSON()); @@ -208,7 +232,7 @@ TEST_F(ShardMetadataUtilTest, PersistedRefreshSignalStartAndFinish) { // Signal refresh start again to make sure nothing changes ASSERT_OK(updateShardCollectionsEntry( operationContext(), - BSON(ShardCollectionType::kNssFieldName << kNss.ns()), + BSON(ShardCollectionType::kNssFieldName << kNss.ns_forTest()), BSON("$set" << BSON(ShardCollectionType::kRefreshingFieldName << true)), false)); diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp index 7d5edf3350a2b..d937d513d1d3a 100644 --- a/src/mongo/db/s/shard_server_catalog_cache_loader.cpp +++ b/src/mongo/db/s/shard_server_catalog_cache_loader.cpp @@ -29,27 +29,72 @@ #include "mongo/db/s/shard_server_catalog_cache_loader.h" +#include #include - -#include "mongo/db/catalog/rename_collection.h" +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" #include "mongo/db/catalog_shard_feature_flag_gen.h" #include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" #include "mongo/db/operation_context_group.h" -#include "mongo/db/read_concern.h" -#include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/shard_metadata_util.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/type_shard_collection.h" +#include "mongo/db/s/type_shard_collection_gen.h" #include "mongo/db/s/type_shard_database.h" +#include "mongo/db/s/type_shard_database_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/index_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" #include "mongo/s/stale_exception.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -110,11 +155,11 @@ Status persistCollectionAndChangedChunks(OperationContext* opCtx, update.setAllowMigrations(collAndChunks.allowMigrations); update.setRefreshing(true); // Mark as refreshing so secondaries are aware of it. - Status status = - updateShardCollectionsEntry(opCtx, - BSON(ShardCollectionType::kNssFieldName << nss.ns()), - update.toBSON(), - true /*upsert*/); + Status status = updateShardCollectionsEntry( + opCtx, + BSON(ShardCollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss)), + update.toBSON(), + true /*upsert*/); if (!status.isOK()) { return status; } @@ -184,7 +229,7 @@ ChunkVersion getPersistedMaxChunkVersion(OperationContext* opCtx, const Namespac uassertStatusOKWithContext(statusWithCollection, str::stream() << "Failed to read persisted collections entry for collection '" - << nss.ns() << "'."); + << nss.toStringForErrorMsg() << "'."); auto cachedCollection = statusWithCollection.getValue(); if (cachedCollection.getRefreshing() && *cachedCollection.getRefreshing()) { @@ -208,7 +253,7 @@ ChunkVersion getPersistedMaxChunkVersion(OperationContext* opCtx, const Namespac uassertStatusOKWithContext( statusWithChunk, str::stream() << "Failed to read highest version persisted chunk for collection '" - << nss.ns() << "'."); + << nss.toStringForErrorMsg() << "'."); return statusWithChunk.getValue().empty() ? ChunkVersion::UNSHARDED() : statusWithChunk.getValue().front().getVersion(); @@ -307,29 +352,6 @@ ShardId getSelfShardId(OperationContext* opCtx) { return shardingState->shardId(); } -/** - * Sends _flushRoutingTableCacheUpdates to the primary to force it to refresh its routing table for - * collection 'nss' and then waits for the refresh to replicate to this node. - */ -void forcePrimaryCollectionRefreshAndWaitForReplication(OperationContext* opCtx, - const NamespaceString& nss) { - auto selfShard = - uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, getSelfShardId(opCtx))); - - auto cmdResponse = uassertStatusOK(selfShard->runCommandWithFixedRetryAttempts( - opCtx, - ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - "admin", - BSON("_flushRoutingTableCacheUpdates" << nss.ns()), - Seconds{30}, - Shard::RetryPolicy::kIdempotent)); - - uassertStatusOK(cmdResponse.commandStatus); - - uassertStatusOK(repl::ReplicationCoordinator::get(opCtx)->waitUntilOpTimeForRead( - opCtx, {LogicalTime::fromOperationTime(cmdResponse.response), boost::none})); -} - /** * Sends _flushDatabaseCacheUpdates to the primary to force it to refresh its routing table for * database 'dbName' and then waits for the refresh to replicate to this node. @@ -352,13 +374,6 @@ void forcePrimaryDatabaseRefreshAndWaitForReplication(OperationContext* opCtx, S opCtx, {LogicalTime::fromOperationTime(cmdResponse.response), boost::none})); } -// TODO: SERVER-74105 remove -bool shouldSkipStoringLocally() { - // Note: cannot use isExclusivelyConfigSvrRole as it ignores fcv. - return serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && - !gFeatureFlagCatalogShard.isEnabled(serverGlobalParams.featureCompatibility); -} - } // namespace ShardServerCatalogCacheLoader::ShardServerCatalogCacheLoader( @@ -378,9 +393,9 @@ ShardServerCatalogCacheLoader::~ShardServerCatalogCacheLoader() { shutDown(); } -void ShardServerCatalogCacheLoader::notifyOfCollectionPlacementVersionUpdate( - const NamespaceString& nss) { - _namespaceNotifications.notifyChange(nss); +void ShardServerCatalogCacheLoader::notifyOfCollectionRefreshEndMarkerSeen( + const NamespaceString& nss, const Timestamp& commitTime) { + _namespaceNotifications.notifyChange(nss, commitTime); } void ShardServerCatalogCacheLoader::initializeReplicaSetRole(bool isPrimary) { @@ -410,6 +425,13 @@ void ShardServerCatalogCacheLoader::onStepUp() { _role = ReplicaSetRole::Primary; } +void ShardServerCatalogCacheLoader::onReplicationRollback() { + // No need to increment the term since this interruption is only to prevent the secondary + // refresh thread from getting stuck or waiting on an incorrect opTime. + stdx::lock_guard lg(_mutex); + _contexts.interrupt(ErrorCodes::Interrupted); +} + void ShardServerCatalogCacheLoader::shutDown() { { stdx::lock_guard lg(_mutex); @@ -442,7 +464,7 @@ SemiFuture ShardServerCatalogCacheLoader::getChunksS // unavailable, unnecessarily reducing availability. if (nss.isNamespaceAlwaysUnsharded()) { return Status(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection " << nss.ns() << " not found"); + str::stream() << "Collection " << nss.toStringForErrorMsg() << " not found"); } bool isPrimary; @@ -453,10 +475,17 @@ SemiFuture ShardServerCatalogCacheLoader::getChunksS }(); return ExecutorFuture(_executor) - .then([=]() { + .then([=, this]() { ThreadClient tc("ShardServerCatalogCacheLoader::getChunksSince", getGlobalServiceContext()); auto context = _contexts.makeOperationContext(*tc); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + { // We may have missed an OperationContextGroup interrupt since this operation // began but before the OperationContext was added to the group. So we'll check @@ -498,6 +527,12 @@ SemiFuture ShardServerCatalogCacheLoader::getDatabase(StringData d getGlobalServiceContext()); auto context = _contexts.makeOperationContext(*tc); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + { // We may have missed an OperationContextGroup interrupt since this operation began // but before the OperationContext was added to the group. So we'll check that we're @@ -527,17 +562,11 @@ void ShardServerCatalogCacheLoader::waitForCollectionFlush(OperationContext* opC while (true) { uassert(ErrorCodes::NotWritablePrimary, - str::stream() << "Unable to wait for collection metadata flush for " << nss.ns() + str::stream() << "Unable to wait for collection metadata flush for " + << nss.toStringForErrorMsg() << " because the node's replication role changed.", _role == ReplicaSetRole::Primary && _term == initialTerm); - uassert(StaleConfigInfo(nss, - ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), - boost::none, - getSelfShardId(opCtx)), - "config server is not storing cached metadata", - !shouldSkipStoringLocally()); - auto it = _collAndChunkTaskLists.find(nss); // If there are no tasks for the specified namespace, everything must have been completed @@ -584,7 +613,7 @@ void ShardServerCatalogCacheLoader::waitForCollectionFlush(OperationContext* opC opCtx->waitForConditionOrInterrupt(*condVar, lg, [&]() { const auto it = _collAndChunkTaskLists.find(nss); return it == _collAndChunkTaskLists.end() || it->second.empty() || - it->second.front().taskNum != activeTaskNum || shouldSkipStoringLocally(); + it->second.front().taskNum != activeTaskNum; }); } } @@ -605,10 +634,6 @@ void ShardServerCatalogCacheLoader::waitForDatabaseFlush(OperationContext* opCtx << " because the node's replication role changed.", _role == ReplicaSetRole::Primary && _term == initialTerm); - uassert(StaleDbRoutingVersion(dbName.toString(), DatabaseVersion::makeFixed(), boost::none), - "config server is not storing cached metadata", - !shouldSkipStoringLocally()); - auto it = _dbTaskLists.find(dbName.toString()); // If there are no tasks for the specified namespace, everything must have been completed @@ -656,7 +681,7 @@ void ShardServerCatalogCacheLoader::waitForDatabaseFlush(OperationContext* opCtx opCtx->waitForConditionOrInterrupt(*condVar, lg, [&]() { const auto it = _dbTaskLists.find(dbName.toString()); return it == _dbTaskLists.end() || it->second.empty() || - it->second.front().taskNum != activeTaskNum || shouldSkipStoringLocally(); + it->second.front().taskNum != activeTaskNum; }); } } @@ -666,13 +691,8 @@ StatusWith ShardServerCatalogCacheLoader::_runSecond OperationContext* opCtx, const NamespaceString& nss, const ChunkVersion& catalogCacheSinceVersion) { - - if (shouldSkipStoringLocally()) { - return _configServerLoader->getChunksSince(nss, catalogCacheSinceVersion).getNoThrow(); - } - Timer t; - forcePrimaryCollectionRefreshAndWaitForReplication(opCtx, nss); + auto nssNotif = _forcePrimaryCollectionRefreshAndWaitForReplication(opCtx, nss); LOGV2_FOR_CATALOG_REFRESH(5965800, 2, "Cache loader on secondary successfully waited for primary refresh " @@ -681,14 +701,8 @@ StatusWith ShardServerCatalogCacheLoader::_runSecond "duration"_attr = Milliseconds(t.millis())); // Read the local metadata. - - // Disallow reading on an older snapshot because this relies on being able to read the - // side effects of writes during secondary replication after being signalled from the - // CollectionPlacementVersionLogOpHandler. - BlockSecondaryReadsDuringBatchApplication_DONT_USE secondaryReadsBlockBehindReplication(opCtx); - return _getCompletePersistedMetadataForSecondarySinceVersion( - opCtx, nss, catalogCacheSinceVersion); + opCtx, std::move(nssNotif), nss, catalogCacheSinceVersion); } StatusWith @@ -697,16 +711,8 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince( const NamespaceString& nss, const ChunkVersion& catalogCacheSinceVersion, long long termScheduled) { - const auto decidedToSkipStoringLocally = shouldSkipStoringLocally(); - // Get the max version the loader has. const auto maxLoaderVersion = [&] { - // If we are not storing locally, use the requested version to prevent fetching the entire - // routing table everytime. - if (decidedToSkipStoringLocally) { - return catalogCacheSinceVersion; - } - { stdx::lock_guard lock(_mutex); auto taskListIt = _collAndChunkTaskLists.find(nss); @@ -727,8 +733,7 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince( auto swCollectionAndChangedChunks = _configServerLoader->getChunksSince(nss, maxLoaderVersion).getNoThrow(); - if (swCollectionAndChangedChunks == ErrorCodes::NamespaceNotFound && - !decidedToSkipStoringLocally) { + if (swCollectionAndChangedChunks == ErrorCodes::NamespaceNotFound) { _ensureMajorityPrimaryAndScheduleCollAndChunksTask( opCtx, nss, @@ -755,16 +760,15 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince( {collAndChunks.epoch, collAndChunks.timestamp})) { return Status{ErrorCodes::ConflictingOperationInProgress, str::stream() - << "Invalid chunks found when reloading '" << nss.toString() + << "Invalid chunks found when reloading '" << nss.toStringForErrorMsg() << "' Previous collection timestamp was '" << collAndChunks.timestamp << "', but found a new timestamp '" << collAndChunks.changedChunks.back().getVersion().getTimestamp() << "'."}; } - if (!decidedToSkipStoringLocally && - (collAndChunks.changedChunks.back().getVersion().isNotComparableWith(maxLoaderVersion) || - maxLoaderVersion.isOlderThan(collAndChunks.changedChunks.back().getVersion()))) { + if (collAndChunks.changedChunks.back().getVersion().isNotComparableWith(maxLoaderVersion) || + maxLoaderVersion.isOlderThan(collAndChunks.changedChunks.back().getVersion())) { _ensureMajorityPrimaryAndScheduleCollAndChunksTask( opCtx, nss, @@ -783,10 +787,6 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince( "refreshedCollectionPlacementVersion"_attr = collAndChunks.changedChunks.back().getVersion()); - if (decidedToSkipStoringLocally) { - return swCollectionAndChangedChunks; - } - // Metadata was found remotely // -- otherwise we would have received NamespaceNotFound rather than Status::OK(). // Return metadata for CatalogCache that's GTE catalogCacheSinceVersion, @@ -808,12 +808,13 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince( // to attempt the refresh as secondary instead of failing the operation return Status(ErrorCodes::ConflictingOperationInProgress, str::stream() << "Replication stepdown occurred during refresh for '" - << nss.toString()); + << nss.toStringForErrorMsg()); } // After finding metadata remotely, we must have found metadata locally. tassert(7032350, - str::stream() << "No chunks metadata found for collection '" << nss + str::stream() << "No chunks metadata found for collection '" + << nss.toStringForErrorMsg() << "' despite the config server returned actual information", !collAndChunks.changedChunks.empty()); @@ -823,10 +824,6 @@ ShardServerCatalogCacheLoader::_schedulePrimaryGetChunksSince( StatusWith ShardServerCatalogCacheLoader::_runSecondaryGetDatabase( OperationContext* opCtx, StringData dbName) { - if (shouldSkipStoringLocally()) { - return _configServerLoader->getDatabase(dbName).getNoThrow(); - } - Timer t; forcePrimaryDatabaseRefreshAndWaitForReplication(opCtx, dbName); LOGV2_FOR_CATALOG_REFRESH(5965801, @@ -843,7 +840,6 @@ StatusWith ShardServerCatalogCacheLoader::_runSecondaryGetDatabase DatabaseType dbt; dbt.setName(shardDatabase.getName()); dbt.setPrimary(shardDatabase.getPrimary()); - dbt.setSharded(shardDatabase.getSharded()); dbt.setVersion(shardDatabase.getVersion()); return dbt; @@ -1038,10 +1034,6 @@ void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleCollAndChun void ShardServerCatalogCacheLoader::_ensureMajorityPrimaryAndScheduleDbTask(OperationContext* opCtx, StringData dbName, DBTask task) { - if (shouldSkipStoringLocally()) { - return; - } - { stdx::lock_guard lock(_mutex); @@ -1071,6 +1063,12 @@ void ShardServerCatalogCacheLoader::_runCollAndChunksTasks(const NamespaceString getGlobalServiceContext()); auto context = _contexts.makeOperationContext(*tc); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + bool taskFinished = false; bool inShutdown = false; try { @@ -1150,6 +1148,12 @@ void ShardServerCatalogCacheLoader::_runDbTasks(StringData dbName) { ThreadClient tc("ShardServerCatalogCacheLoader::runDbTasks", getGlobalServiceContext()); auto context = _contexts.makeOperationContext(*tc); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + bool taskFinished = false; bool inShutdown = false; try { @@ -1246,8 +1250,8 @@ void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata( // The namespace was dropped. The persisted metadata for the collection must be cleared. uassertStatusOKWithContext( dropChunksAndDeleteCollectionsEntry(opCtx, nss), - str::stream() << "Failed to clear persisted chunk metadata for collection '" << nss.ns() - << "'. Will be retried."); + str::stream() << "Failed to clear persisted chunk metadata for collection '" + << nss.toStringForErrorMsg() << "'. Will be retried."); return; } @@ -1255,8 +1259,8 @@ void ShardServerCatalogCacheLoader::_updatePersistedCollAndChunksMetadata( persistCollectionAndChangedChunks( opCtx, nss, *task.collectionAndChangedChunks, task.minQueryVersion), str::stream() << "Failed to update the persisted chunk metadata for collection '" - << nss.ns() << "' from '" << task.minQueryVersion.toString() << "' to '" - << task.maxQueryVersion.toString() << "'. Will be retried."); + << nss.toStringForErrorMsg() << "' from '" << task.minQueryVersion.toString() + << "' to '" << task.maxQueryVersion.toString() << "'. Will be retried."); LOGV2_FOR_CATALOG_REFRESH( 24112, @@ -1304,23 +1308,52 @@ void ShardServerCatalogCacheLoader::_updatePersistedDbMetadata(OperationContext* "db"_attr = dbName.toString()); } +NamespaceMetadataChangeNotifications::ScopedNotification +ShardServerCatalogCacheLoader::_forcePrimaryCollectionRefreshAndWaitForReplication( + OperationContext* opCtx, const NamespaceString& nss) { + auto selfShard = + uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, getSelfShardId(opCtx))); + + auto notif = _namespaceNotifications.createNotification(nss); + + auto cmdResponse = uassertStatusOK(selfShard->runCommandWithFixedRetryAttempts( + opCtx, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + "admin", + BSON("_flushRoutingTableCacheUpdates" << NamespaceStringUtil::serialize(nss)), + Seconds{30}, + Shard::RetryPolicy::kIdempotent)); + + uassertStatusOK(cmdResponse.commandStatus); + + uassertStatusOK(repl::ReplicationCoordinator::get(opCtx)->waitUntilOpTimeForRead( + opCtx, {LogicalTime::fromOperationTime(cmdResponse.response), boost::none})); + return notif; +} + CollectionAndChangedChunks ShardServerCatalogCacheLoader::_getCompletePersistedMetadataForSecondarySinceVersion( - OperationContext* opCtx, const NamespaceString& nss, const ChunkVersion& version) { + OperationContext* opCtx, + NamespaceMetadataChangeNotifications::ScopedNotification&& notif, + const NamespaceString& nss, + const ChunkVersion& version) { // Keep trying to load the metadata until we get a complete view without updates being // concurrently applied. while (true) { const auto beginRefreshState = [&]() { while (true) { - auto notif = _namespaceNotifications.createNotification(nss); - auto refreshState = uassertStatusOK(getPersistedRefreshFlags(opCtx, nss)); if (!refreshState.refreshing) { return refreshState; } - notif.get(opCtx); + // Blocking call to wait for the notification, get the most recent value, and + // recreate the notification under lock so that we don't miss any notifications. + auto notificationTime = _namespaceNotifications.get(opCtx, notif); + // Wait until the local lastApplied timestamp is the one from the notification. + uassertStatusOK(repl::ReplicationCoordinator::get(opCtx)->waitUntilOpTimeForRead( + opCtx, {LogicalTime(notificationTime), boost::none})); } }(); diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader.h b/src/mongo/db/s/shard_server_catalog_cache_loader.h index 446de371751a8..b1a04867f887c 100644 --- a/src/mongo/db/s/shard_server_catalog_cache_loader.h +++ b/src/mongo/db/s/shard_server_catalog_cache_loader.h @@ -29,11 +29,34 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/operation_context_group.h" #include "mongo/db/s/namespace_metadata_change_notifications.h" +#include "mongo/platform/mutex.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog_cache_loader.h" +#include "mongo/s/chunk_version.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future.h" namespace mongo { @@ -71,11 +94,19 @@ class ShardServerCatalogCacheLoader : public CatalogCacheLoader { void shutDown() override; + /** + * Interrupts ongoing refreshes to prevent secondaries from waiting for opTimes from wrong terms + * in case of rollback. Primaries must step down before going through rollback, so this should + * only be run on secondaries. + */ + void onReplicationRollback(); + /** * Sets any notifications waiting for this version to arrive and invalidates the catalog cache's * chunk metadata for collection 'nss' so that the next caller provokes a refresh. */ - void notifyOfCollectionPlacementVersionUpdate(const NamespaceString& nss) override; + void notifyOfCollectionRefreshEndMarkerSeen(const NamespaceString& nss, + const Timestamp& commitTime) override; SemiFuture getChunksSince(const NamespaceString& nss, ChunkVersion version) override; @@ -440,6 +471,16 @@ class ShardServerCatalogCacheLoader : public CatalogCacheLoader { void _updatePersistedDbMetadata(OperationContext* opCtx, StringData dbName); + /** + * Sends _flushRoutingTableCacheUpdates to the primary to force it to refresh its routing table + * for collection 'nss' and then waits for the refresh to replicate to this node. Returns a + * notification that can be used to wait for the refreshing flag to be set to true in the + * config.collections entry to provide a consistent view of config.chunks. + */ + NamespaceMetadataChangeNotifications::ScopedNotification + _forcePrimaryCollectionRefreshAndWaitForReplication(OperationContext* opCtx, + const NamespaceString& nss); + /** * Attempts to read the collection and chunk metadata since 'version' from the shard persisted * metadata store. Continues to retry reading the metadata until a complete diff is read @@ -450,7 +491,10 @@ class ShardServerCatalogCacheLoader : public CatalogCacheLoader { * NamespaceNotFound error means the collection does not exist. */ CollectionAndChangedChunks _getCompletePersistedMetadataForSecondarySinceVersion( - OperationContext* opCtx, const NamespaceString& nss, const ChunkVersion& version); + OperationContext* opCtx, + NamespaceMetadataChangeNotifications::ScopedNotification&& notif, + const NamespaceString& nss, + const ChunkVersion& version); // Loader used by the shard primary to retrieve the authoritative routing metadata from the // config server diff --git a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp index a4cc4fce1b863..39f61eea65d30 100644 --- a/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp +++ b/src/mongo/db/s/shard_server_catalog_cache_loader_test.cpp @@ -27,16 +27,40 @@ * it in the license file. */ -#include "mongo/db/dbdirectclient.h" +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" #include "mongo/db/s/shard_server_catalog_cache_loader.h" #include "mongo/db/s/shard_server_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog_cache_loader_mock.h" +#include "mongo/s/database_version.h" #include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -228,7 +252,7 @@ ShardServerCatalogCacheLoaderTest::setUpChunkLoaderWithFiveChunks() { void ShardServerCatalogCacheLoaderTest::refreshDatabaseOnRemoteLoader() { DatabaseType databaseType( - kNss.db().toString(), kShardId, DatabaseVersion{UUID::gen(), Timestamp{1, 1}}); + kNss.db_forTest().toString(), kShardId, DatabaseVersion{UUID::gen(), Timestamp{1, 1}}); _remoteLoaderMock->setDatabaseRefreshReturnValue(std::move(databaseType)); } @@ -525,125 +549,5 @@ TEST_F(ShardServerCatalogCacheLoaderTest, CollAndChunkTasksConsistency) { _shardLoader->getChunksSince(kNss, ChunkVersion::UNSHARDED()).get(); } -TEST_F(ShardServerCatalogCacheLoaderTest, setFCVForGetChunks) { - const auto kOriginalRole = serverGlobalParams.clusterRole; - const auto kOriginalFCV = serverGlobalParams.featureCompatibility.getVersion(); - - ON_BLOCK_EXIT([&] { - serverGlobalParams.clusterRole = kOriginalRole; - serverGlobalParams.mutableFeatureCompatibility.setVersion(kOriginalFCV); - }); - - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; - // (Generic FCV reference): for testing only. This comment is required by linter. - serverGlobalParams.mutableFeatureCompatibility.setVersion(multiversion::GenericFCV::kLastLTS); - - const ChunkVersion collectionPlacementVersion({OID::gen(), Timestamp(1, 1)}, {1, 2}); - const auto collectionType = makeCollectionType(collectionPlacementVersion); - auto setRemoteLoaderMockResponse = [&]() { - vector chunks = makeFiveChunks(collectionPlacementVersion); - _remoteLoaderMock->setCollectionRefreshReturnValue(collectionType); - _remoteLoaderMock->setChunkRefreshReturnValue(chunks); - }; - - FindCommandRequest persistedCacheQuery(NamespaceString::kShardConfigCollectionsNamespace); - DBDirectClient client(operationContext()); - - { - // Pause the thread processing the pending updates on metadata - FailPointEnableBlock failPoint("hangCollectionFlush"); - - setRemoteLoaderMockResponse(); - auto getChunksFuture = _shardLoader->getChunksSince(kNss, ChunkVersion::UNSHARDED()); - - _shardLoader->onFCVChanged(); - - // Should be able to join since downgrade should interrupt ongoing refreshes. - (void)getChunksFuture.waitNoThrow(); - } - - ASSERT_THROWS_CODE(_shardLoader->waitForCollectionFlush(operationContext(), kNss), - DBException, - ErrorCodes::StaleConfig); - - auto cachedDoc = client.findOne(persistedCacheQuery); - ASSERT_TRUE(cachedDoc.isEmpty()) << cachedDoc; - - setRemoteLoaderMockResponse(); - auto getChunksFuture = _shardLoader->getChunksSince(kNss, ChunkVersion::UNSHARDED()); - auto newChunks = getChunksFuture.get().changedChunks; - ASSERT_EQ(5, newChunks.size()); - - cachedDoc = client.findOne(persistedCacheQuery); - ASSERT_TRUE(cachedDoc.isEmpty()) << cachedDoc; - - serverGlobalParams.mutableFeatureCompatibility.setVersion(kOriginalFCV); - - setRemoteLoaderMockResponse(); - auto chunkVersionFrom = newChunks[2].getVersion(); - getChunksFuture = _shardLoader->getChunksSince(kNss, chunkVersionFrom); - newChunks = getChunksFuture.get().changedChunks; - ASSERT_EQ(3, newChunks.size()); - ASSERT_EQ(chunkVersionFrom, newChunks.front().getVersion()); - - _shardLoader->waitForCollectionFlush(operationContext(), kNss); - cachedDoc = client.findOne(persistedCacheQuery); - ASSERT_FALSE(cachedDoc.isEmpty()); -} - -TEST_F(ShardServerCatalogCacheLoaderTest, setFCVForGetDatabase) { - const auto kOriginalRole = serverGlobalParams.clusterRole; - const auto kOriginalFCV = serverGlobalParams.featureCompatibility.getVersion(); - - ON_BLOCK_EXIT([&] { - serverGlobalParams.clusterRole = kOriginalRole; - serverGlobalParams.mutableFeatureCompatibility.setVersion(kOriginalFCV); - }); - - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; - // (Generic FCV reference): for testing only. This comment is required by linter. - serverGlobalParams.mutableFeatureCompatibility.setVersion(multiversion::GenericFCV::kLastLTS); - - refreshDatabaseOnRemoteLoader(); - - FindCommandRequest persistedCacheQuery(NamespaceString::kShardConfigDatabasesNamespace); - DBDirectClient client(operationContext()); - const auto kDb = kNss.db(); - - { - // Pause the thread processing the pending updates on metadata - FailPointEnableBlock failPoint("hangDatabaseFlush"); - - // Put a first task in the list of pending updates on metadata (in-memory) - auto getDbFuture = _shardLoader->getDatabase(kDb); - - _shardLoader->onFCVChanged(); - - // Should be able to join since downgrade should interrupt ongoing refreshes. - (void)getDbFuture.waitNoThrow(); - } - - ASSERT_THROWS_CODE(_shardLoader->waitForDatabaseFlush(operationContext(), kDb), - DBException, - ErrorCodes::StaleDbVersion); - - auto cachedDoc = client.findOne(persistedCacheQuery); - ASSERT_TRUE(cachedDoc.isEmpty()) << cachedDoc; - - auto getDbFuture = _shardLoader->getDatabase(kDb); - getDbFuture.wait(); - - cachedDoc = client.findOne(persistedCacheQuery); - ASSERT_TRUE(cachedDoc.isEmpty()) << cachedDoc; - - serverGlobalParams.mutableFeatureCompatibility.setVersion(kOriginalFCV); - - getDbFuture = _shardLoader->getDatabase(kDb); - getDbFuture.wait(); - _shardLoader->waitForDatabaseFlush(operationContext(), kDb); - cachedDoc = client.findOne(persistedCacheQuery); - ASSERT_FALSE(cachedDoc.isEmpty()); -} - } // namespace } // namespace mongo diff --git a/src/mongo/db/s/shard_server_op_observer.cpp b/src/mongo/db/s/shard_server_op_observer.cpp index e13bc8fb98556..30e129f0a187a 100644 --- a/src/mongo/db/s/shard_server_op_observer.cpp +++ b/src/mongo/db/s/shard_server_op_observer.cpp @@ -29,42 +29,70 @@ #include "mongo/db/s/shard_server_op_observer.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/op_observer/op_observer_impl.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/balancer_stats_registry.h" #include "mongo/db/s/collection_critical_section_document_gen.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/migration_source_manager.h" -#include "mongo/db/s/migration_util.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/range_deletion_task_gen.h" #include "mongo/db/s/shard_identity_rollback_notifier.h" -#include "mongo/db/s/sharding_index_catalog_ddl_util.h" #include "mongo/db/s/sharding_initialization_mongod.h" +#include "mongo/db/s/sharding_migration_critical_section.h" #include "mongo/db/s/sharding_recovery_service.h" -#include "mongo/db/s/sharding_state.h" #include "mongo/db/s/type_shard_collection.h" +#include "mongo/db/s/type_shard_collection_gen.h" #include "mongo/db/s/type_shard_database.h" +#include "mongo/db/s/type_shard_database_gen.h" #include "mongo/db/s/type_shard_identity.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/s/balancer_configuration.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/cannot_implicitly_create_collection_info.h" -#include "mongo/s/grid.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/s/catalog/type_index_catalog.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/s/catalog_cache_loader.h" +#include "mongo/s/index_version.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding namespace mongo { namespace { -const auto documentIdDecoration = OperationContext::declareDecoration(); +const auto documentIdDecoration = OplogDeleteEntryArgs::declareDecoration(); bool isStandaloneOrPrimary(OperationContext* opCtx) { auto replCoord = repl::ReplicationCoordinator::get(opCtx); - return replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin.toString()); + return replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin); } /** @@ -76,10 +104,11 @@ class CollectionPlacementVersionLogOpHandler final : public RecoveryUnit::Change CollectionPlacementVersionLogOpHandler(const NamespaceString& nss, bool droppingCollection) : _nss(nss), _droppingCollection(droppingCollection) {} - void commit(OperationContext* opCtx, boost::optional) override { + void commit(OperationContext* opCtx, boost::optional commitTime) override { invariant(opCtx->lockState()->isCollectionLockedForMode(_nss, MODE_IX)); + invariant(commitTime, "Invalid commit time"); - CatalogCacheLoader::get(opCtx).notifyOfCollectionPlacementVersionUpdate(_nss); + CatalogCacheLoader::get(opCtx).notifyOfCollectionRefreshEndMarkerSeen(_nss, *commitTime); // Force subsequent uses of the namespace to refresh the filtering metadata so they can // synchronize with any work happening on the primary (e.g., migration critical section). @@ -100,31 +129,6 @@ class CollectionPlacementVersionLogOpHandler final : public RecoveryUnit::Change const bool _droppingCollection; }; -/** - * Used to submit a range deletion task once it is certain that the update/insert to - * config.rangeDeletions is committed. - */ -class SubmitRangeDeletionHandler final : public RecoveryUnit::Change { -public: - SubmitRangeDeletionHandler(OperationContext* opCtx, RangeDeletionTask task) - : _opCtx(opCtx), _task(std::move(task)) {} - - void commit(OperationContext* opCtx, boost::optional) override { - // (Ignore FCV check): This feature doesn't have any upgrade/downgrade concerns. The feature - // flag is used to turn on new range deleter on startup. - if (!feature_flags::gRangeDeleterService.isEnabledAndIgnoreFCVUnsafe()) { - migrationutil::submitRangeDeletionTask(_opCtx, _task).getAsync([](auto) {}); - } - } - - void rollback(OperationContext* opCtx) override {} - -private: - OperationContext* _opCtx; - RangeDeletionTask _task; -}; - - /** * Invalidates the in-memory routing table cache when a collection is dropped, so the next caller * with routing information will provoke a routing table refresh and see the drop. @@ -153,6 +157,11 @@ void onConfigDeleteInvalidateCachedCollectionMetadataAndNotify(OperationContext* AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(opCtx->lockState()); AutoGetCollection autoColl(opCtx, deletedNss, MODE_IX); + tassert(7751400, + str::stream() << "Untimestamped writes to " + << NamespaceString::kShardConfigCollectionsNamespace.toStringForErrorMsg() + << " are not allowed", + opCtx->recoveryUnit()->isTimestamped()); opCtx->recoveryUnit()->registerChange(std::make_unique( deletedNss, /* droppingCollection */ true)); } @@ -181,7 +190,8 @@ void ShardServerOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { const auto& nss = coll->ns(); for (auto it = begin; it != end; ++it) { @@ -219,11 +229,6 @@ void ShardServerOpObserver::onInserts(OperationContext* opCtx, auto deletionTask = RangeDeletionTask::parse(IDLParserContext("ShardServerOpObserver"), insertedDoc); - if (!deletionTask.getPending()) { - opCtx->recoveryUnit()->registerChange( - std::make_unique(opCtx, deletionTask)); - } - const auto numOrphanDocs = deletionTask.getNumOrphanDocs(); BalancerStatsRegistry::get(opCtx)->onRangeDeletionTaskInsertion( deletionTask.getCollectionUuid(), numOrphanDocs); @@ -237,7 +242,7 @@ void ShardServerOpObserver::onInserts(OperationContext* opCtx, opCtx->recoveryUnit()->onCommit( [insertedNss = collCSDoc.getNss(), reason = collCSDoc.getReason().getOwned()]( OperationContext* opCtx, boost::optional) { - if (nsIsDbOnly(insertedNss.ns())) { + if (nsIsDbOnly(NamespaceStringUtil::serialize(insertedNss))) { boost::optional lockDbIfNotPrimary; if (!isStandaloneOrPrimary(opCtx)) { lockDbIfNotPrimary.emplace(opCtx, insertedNss.dbName(), MODE_IX); @@ -270,7 +275,9 @@ void ShardServerOpObserver::onInserts(OperationContext* opCtx, } } -void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) { +void ShardServerOpObserver::onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { const auto& updateDoc = args.updateArgs->update; // Most of these handlers do not need to run when the update is a full document replacement. // An empty updateDoc implies a no-op update and is not a valid oplog entry. @@ -318,6 +325,12 @@ void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateE AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(opCtx->lockState()); AutoGetCollection autoColl(opCtx, updatedNss, MODE_IX); if (refreshingFieldNewVal.isBoolean() && !refreshingFieldNewVal.boolean()) { + tassert(7751401, + str::stream() + << "Untimestamped writes to " + << NamespaceString::kShardConfigCollectionsNamespace.toStringForErrorMsg() + << " are not allowed", + opCtx->recoveryUnit()->isTimestamped()); opCtx->recoveryUnit()->registerChange( std::make_unique( updatedNss, /* droppingCollection */ false)); @@ -361,7 +374,7 @@ void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateE // block. AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(opCtx->lockState()); - DatabaseName dbName(boost::none, db); + DatabaseName dbName = DatabaseNameUtil::deserialize(boost::none, db); AutoGetDb autoDb(opCtx, dbName, MODE_X); auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx, dbName); @@ -369,26 +382,6 @@ void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateE } } - if (needsSpecialHandling && args.coll->ns() == NamespaceString::kRangeDeletionNamespace) { - if (!isStandaloneOrPrimary(opCtx)) - return; - - const auto pendingFieldRemovedStatus = - update_oplog_entry::isFieldRemovedByUpdate(args.updateArgs->update, "pending"); - - if (pendingFieldRemovedStatus == update_oplog_entry::FieldRemovedStatus::kFieldRemoved) { - auto deletionTask = RangeDeletionTask::parse(IDLParserContext("ShardServerOpObserver"), - args.updateArgs->updatedDoc); - - if (deletionTask.getDonorShardId() != ShardingState::get(opCtx)->shardId()) { - // Range deletion tasks for moved away chunks are scheduled through the - // MigrationCoordinator, so only schedule a task for received chunks. - opCtx->recoveryUnit()->registerChange( - std::make_unique(opCtx, deletionTask)); - } - } - } - if (args.coll->ns() == NamespaceString::kCollectionCriticalSectionsNamespace && !sharding_recovery_util::inRecoveryMode(opCtx)) { const auto collCSDoc = CollectionCriticalSectionDocument::parse( @@ -398,7 +391,7 @@ void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateE opCtx->recoveryUnit()->onCommit( [updatedNss = collCSDoc.getNss(), reason = collCSDoc.getReason().getOwned()]( OperationContext* opCtx, boost::optional) { - if (nsIsDbOnly(updatedNss.ns())) { + if (nsIsDbOnly(NamespaceStringUtil::serialize(updatedNss))) { boost::optional lockDbIfNotPrimary; if (!isStandaloneOrPrimary(opCtx)) { lockDbIfNotPrimary.emplace(opCtx, updatedNss.dbName(), MODE_IX); @@ -445,15 +438,17 @@ void ShardServerOpObserver::onUpdate(OperationContext* opCtx, const OplogUpdateE void ShardServerOpObserver::aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - BSONObj const& doc) { + BSONObj const& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { if (coll->ns() == NamespaceString::kCollectionCriticalSectionsNamespace || coll->ns() == NamespaceString::kRangeDeletionNamespace) { - documentIdDecoration(opCtx) = doc; + documentIdDecoration(args) = doc; } else { // Extract the _id field from the document. If it does not have an _id, use the // document itself as the _id. - documentIdDecoration(opCtx) = doc["_id"] ? doc["_id"].wrap() : doc; + documentIdDecoration(args) = doc["_id"] ? doc["_id"].wrap() : doc; } } @@ -557,7 +552,7 @@ void ShardServerOpObserver::onModifyCollectionShardingIndexCatalog(OperationCont uassert(7079505, format(FMT_STRING("The critical section for collection {} must be taken in " "order to execute this command"), - renameEntry.getToNss().toString()), + renameEntry.getToNss().toStringForErrorMsg()), toCSR->getCriticalSectionSignal(opCtx, ShardingMigrationCriticalSection::kWrite)); toCSR->replaceIndexes(opCtx, fromIndexes, {*uuid, renameEntry.getLastmod()}); @@ -572,9 +567,10 @@ void ShardServerOpObserver::onModifyCollectionShardingIndexCatalog(OperationCont void ShardServerOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { const auto& nss = coll->ns(); - auto& documentId = documentIdDecoration(opCtx); + auto& documentId = documentIdDecoration(args); invariant(!documentId.isEmpty()); if (nss == NamespaceString::kShardConfigCollectionsNamespace) { @@ -596,7 +592,7 @@ void ShardServerOpObserver::onDelete(OperationContext* opCtx, // TODO SERVER-58223: evaluate whether this is safe or whether acquiring the lock can block. AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(opCtx->lockState()); - DatabaseName dbName(boost::none, deletedDatabase); + DatabaseName dbName = DatabaseNameUtil::deserialize(boost::none, deletedDatabase); AutoGetDb autoDb(opCtx, dbName, MODE_X); auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx, dbName); scopedDss->clearDbInfo(opCtx); @@ -628,7 +624,7 @@ void ShardServerOpObserver::onDelete(OperationContext* opCtx, opCtx->recoveryUnit()->onCommit( [deletedNss = collCSDoc.getNss(), reason = collCSDoc.getReason().getOwned()]( OperationContext* opCtx, boost::optional) { - if (nsIsDbOnly(deletedNss.ns())) { + if (nsIsDbOnly(NamespaceStringUtil::serialize(deletedNss))) { boost::optional lockDbIfNotPrimary; if (!isStandaloneOrPrimary(opCtx)) { lockDbIfNotPrimary.emplace(opCtx, deletedNss.dbName(), MODE_IX); @@ -705,6 +701,7 @@ void ShardServerOpObserver::onCreateCollection(OperationContext* opCtx, const OplogSlot& createOpTime, bool fromMigrate) { // Only the shard primay nodes control the collection creation and secondaries just follow + // Secondaries CSR will be the defaulted one (UNKNOWN in most of the cases) if (!opCtx->writesAreReplicated()) { return; } @@ -729,10 +726,12 @@ void ShardServerOpObserver::onCreateCollection(OperationContext* opCtx, oss._allowCollectionCreation); // If the check above passes, this means the collection doesn't exist and is being created and - // that the caller will be responsible to eventially set the proper placement version + // that the caller will be responsible to eventially set the proper placement version. auto scopedCsr = CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, collectionName); - if (!scopedCsr->getCurrentMetadataIfKnown()) { + if (oss._forceCSRAsUnknownAfterCollectionCreation) { + scopedCsr->clearFilteringMetadata(opCtx); + } else if (!scopedCsr->getCurrentMetadataIfKnown()) { scopedCsr->setFilteringMetadata(opCtx, CollectionMetadata()); } } @@ -741,7 +740,8 @@ repl::OpTime ShardServerOpObserver::onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) { + const CollectionDropType dropType, + bool markFromMigrate) { if (collectionName == NamespaceString::kServerConfigurationNamespace) { // Dropping system collections is not allowed for end users invariant(!opCtx->writesAreReplicated()); @@ -803,9 +803,20 @@ void ShardServerOpObserver::onCollMod(OperationContext* opCtx, abortOngoingMigrationIfNeeded(opCtx, nss); }; -void ShardServerOpObserver::_onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) { +void ShardServerOpObserver::onReplicationRollback(OperationContext* opCtx, + const RollbackObserverInfo& rbInfo) { ShardingRecoveryService::get(opCtx)->recoverStates(opCtx, rbInfo.rollbackNamespaces); + + // If writes to config.cache.collections or config.cache.* have been rolled back, interrupt the + // SSCCL to ensure secondary waits for replication do not use incorrect opTimes. + if (std::any_of(rbInfo.rollbackNamespaces.begin(), + rbInfo.rollbackNamespaces.end(), + [](const NamespaceString& nss) { + return nss == NamespaceString::kShardConfigCollectionsNamespace || + nss.isConfigDotCacheDotChunks(); + })) { + CatalogCacheLoader::get(opCtx).onReplicationRollback(); + } } diff --git a/src/mongo/db/s/shard_server_op_observer.h b/src/mongo/db/s/shard_server_op_observer.h index 9b48d35c37280..1c507fb98b2ac 100644 --- a/src/mongo/db/s/shard_server_op_observer.h +++ b/src/mongo/db/s/shard_server_op_observer.h @@ -29,7 +29,23 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -37,7 +53,7 @@ namespace mongo { * OpObserver which is installed on the op observers chain when the server is running as a shard * server (--shardsvr). */ -class ShardServerOpObserver final : public OpObserver { +class ShardServerOpObserver final : public OpObserverNoop { ShardServerOpObserver(const ShardServerOpObserver&) = delete; ShardServerOpObserver& operator=(const ShardServerOpObserver&) = delete; @@ -45,20 +61,16 @@ class ShardServerOpObserver final : public OpObserver { ShardServerOpObserver(); ~ShardServerOpObserver(); + + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kConfigAndSystem, NamespaceFilter::kConfigAndSystem}; + } + void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, const NamespaceString& nss, const UUID& uuid, BSONObj indexDoc) override; - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - void onCreateIndex(OperationContext* opCtx, const NamespaceString& nss, const UUID& uuid, @@ -76,60 +88,29 @@ class ShardServerOpObserver final : public OpObserver { void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) override; - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) override {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) override {} - void onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator begin, std::vector::const_iterator end, std::vector fromMigrate, - bool defaultFromMigrate) override; + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) override; - void onInsertGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final{}; - - void onDeleteGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final {} - - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) override; + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override; void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) override; + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) override; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) override; - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final{}; + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) override; void onCreateCollection(OperationContext* opCtx, const CollectionPtr& coll, @@ -146,14 +127,12 @@ class ShardServerOpObserver final : public OpObserver { const CollectionOptions& oldCollOptions, boost::optional indexInfo) override; - void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) override {} - - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) override; + CollectionDropType dropType, + bool markFromMigrate) override; void onDropIndex(OperationContext* opCtx, const NamespaceString& nss, @@ -161,94 +140,8 @@ class ShardServerOpObserver final : public OpObserver { const std::string& indexName, const BSONObj& indexInfo) override; - using OpObserver::onRenameCollection; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) override {} - - virtual void onImportCollection(OperationContext* opCtx, - const UUID& importUUID, - const NamespaceString& nss, - long long numRecords, - long long dataSize, - const BSONObj& catalogEntry, - const BSONObj& storageMetadata, - bool isDryRun) override {} - - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) override { - return repl::OpTime(); - } - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) override {} - - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) override {} - - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) override {} - - void onTransactionStart(OperationContext* opCtx) override {} - - void onUnpreparedTransactionCommit( - OperationContext* opCtx, const TransactionOperations& transactionOperations) override {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept override {} - - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) override { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) override {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) override {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) override {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} - - void onMajorityCommitPointUpdate(ServiceContext* service, - const repl::OpTime& newCommitPoint) override {} - -private: - void _onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo); + void onReplicationRollback(OperationContext* opCtx, + const RollbackObserverInfo& rbInfo) override; }; } // namespace mongo diff --git a/src/mongo/db/s/shard_server_test_fixture.cpp b/src/mongo/db/s/shard_server_test_fixture.cpp index 4eaaf00a76967..3e46e222005bc 100644 --- a/src/mongo/db/s/shard_server_test_fixture.cpp +++ b/src/mongo/db/s/shard_server_test_fixture.cpp @@ -27,18 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/s/shard_server_test_fixture.h" +#include +#include +#include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/commands.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/s/shard_server_catalog_cache_loader.h" +#include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context_d_test_fixture.h" #include "mongo/s/catalog/sharding_catalog_client_impl.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/config_server_catalog_cache_loader.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/s/shard_server_test_fixture.h b/src/mongo/db/s/shard_server_test_fixture.h index 39c79947bfc1b..705db44de21e6 100644 --- a/src/mongo/db/s/shard_server_test_fixture.h +++ b/src/mongo/db/s/shard_server_test_fixture.h @@ -29,9 +29,19 @@ #pragma once +#include +#include + +#include "mongo/bson/oid.h" +#include "mongo/client/remote_command_targeter_mock.h" #include "mongo/db/s/sharding_mongod_test_fixture.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/catalog_cache_loader.h" #include "mongo/s/catalog_cache_loader_mock.h" #include "mongo/s/catalog_cache_mock.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/db/s/sharded_collmod.idl b/src/mongo/db/s/sharded_collmod.idl index 998db7303c8b3..c08a8b213e584 100644 --- a/src/mongo/db/s/sharded_collmod.idl +++ b/src/mongo/db/s/sharded_collmod.idl @@ -62,3 +62,4 @@ commands: performViewChange: type: optionalBool description: "Whether should perform view catalog change, set only for commands sent to the primary shard" + reply_type: CollModReply diff --git a/src/mongo/db/s/sharded_index_catalog_commands.idl b/src/mongo/db/s/sharded_index_catalog_commands.idl index 5e37f6c45e3fa..f95c0ed6763eb 100644 --- a/src/mongo/db/s/sharded_index_catalog_commands.idl +++ b/src/mongo/db/s/sharded_index_catalog_commands.idl @@ -109,6 +109,7 @@ commands: indexVersion: type: CollectionIndexes description: "Collection index version with uuid" + reply_type: OkReply _configsvrCommitIndex: command_name: _configsvrCommitIndex diff --git a/src/mongo/db/s/sharded_index_consistency_server_status.cpp b/src/mongo/db/s/sharded_index_consistency_server_status.cpp index 4ee7873755b18..f2dd87a4311bf 100644 --- a/src/mongo/db/s/sharded_index_consistency_server_status.cpp +++ b/src/mongo/db/s/sharded_index_consistency_server_status.cpp @@ -27,11 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/periodic_sharded_index_consistency_checker.h" #include "mongo/db/s/sharding_runtime_d_params_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/platform/atomic_word.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/sharded_rename_collection.idl b/src/mongo/db/s/sharded_rename_collection.idl index 5d0ef913d3c56..6cb56dddd6b74 100644 --- a/src/mongo/db/s/sharded_rename_collection.idl +++ b/src/mongo/db/s/sharded_rename_collection.idl @@ -80,6 +80,7 @@ commands: type: uuid description: "ID of the existing collection getting dropped." optional: true + reply_type: OkReply _shardsvrRenameCollectionParticipantUnblock: command_name: _shardsvrRenameCollectionParticipantUnblock @@ -94,6 +95,7 @@ commands: sourceUUID: type: uuid description: "ID of the collection getting renamed." + reply_type: OkReply structs: RenameCollectionCoordinatorDocument: diff --git a/src/mongo/db/s/sharding_catalog_client_aggregations_test.cpp b/src/mongo/db/s/sharding_catalog_client_aggregations_test.cpp index 23d3a49da254f..0d92a3e31ef5b 100644 --- a/src/mongo/db/s/sharding_catalog_client_aggregations_test.cpp +++ b/src/mongo/db/s/sharding_catalog_client_aggregations_test.cpp @@ -26,23 +26,53 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/catalog/type_namespace_placement_gen.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_version.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -116,10 +146,10 @@ class CatalogClientAggregationsTest : public ConfigServerTestFixture { return boost::optional(boost::none); } - if (nssToUuid.find(nss.toString()) == nssToUuid.end()) - nssToUuid.emplace(nss.toString(), UUID::gen()); + if (nssToUuid.find(nss.toString_forTest()) == nssToUuid.end()) + nssToUuid.emplace(nss.toString_forTest(), UUID::gen()); - const UUID& collUuid = nssToUuid.at(nss.toString()); + const UUID& collUuid = nssToUuid.at(nss.toString_forTest()); return boost::optional(collUuid); }(); @@ -177,8 +207,6 @@ void assertSameHistoricalPlacement(HistoricalPlacement historicalPlacement, // ######################## PlacementHistory: Query by collection ########################## TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_ShardedCollection) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Quering the placementHistory for a sharded collection should return the shards that owned the * collection at the given clusterTime*/ auto opCtx = operationContext(); @@ -253,8 +281,6 @@ TEST_F(CatalogClientAggregationsTest, } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_DifferentTimestamp) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Query the placementHistory at different timestamp should return different results*/ auto opCtx = operationContext(); @@ -295,8 +321,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_D } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_SameTimestamp) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Having different namespaces for the same timestamp should not influece the expected result*/ auto opCtx = operationContext(); @@ -328,8 +352,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_S TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_InvertedTimestampOrder) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Ordering of document insertion into config.placementHistory must not matter*/ auto opCtx = operationContext(); @@ -350,8 +372,6 @@ TEST_F(CatalogClientAggregationsTest, TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_ReturnPrimaryShardWhenNoShards) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Quering the placementHistory must report only the primary shard when an empty list of shards * is reported for the collection*/ auto opCtx = operationContext(); @@ -378,8 +398,6 @@ TEST_F(CatalogClientAggregationsTest, } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_AddPrimaryShard) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Quering the placementHistory must report the primary shard in addition to the list of shards * related to db.collection. Primary shards must always be returned*/ auto opCtx = operationContext(); @@ -406,8 +424,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_A TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_AddPrimaryShardAtSameTimestamp) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Quering the placementHistory must report the primary shard in addition to the list of shards * related to db.collection. Primary shards must always be returned*/ auto opCtx = operationContext(); @@ -428,15 +444,15 @@ TEST_F(CatalogClientAggregationsTest, } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_WithMarkers) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; auto opCtx = operationContext(); PlacementDescriptor _startFcvMarker = { Timestamp(1, 0), - NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(), + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.toString_forTest(), {"shard1", "shard2", "shard3", "shard4", "shard5"}}; PlacementDescriptor _endFcvMarker = { - Timestamp(3, 0), NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(), {}}; + Timestamp(3, 0), + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.toString_forTest(), + {}}; // initialization setupConfigPlacementHistory( @@ -459,25 +475,23 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForCollAtClusterTime_W // Asking for a timestamp before the closing marker should return the shards from the first // marker of the fcv upgrade. As result, "isExact" is expected to be false auto historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime( - opCtx, NamespaceString("db.collection1"), Timestamp(2, 0)); + opCtx, NamespaceString::createNamespaceString_forTest("db.collection1"), Timestamp(2, 0)); assertSameHistoricalPlacement( historicalPlacement, {"shard1", "shard2", "shard3", "shard4", "shard5"}, false); // Asking for a timestamp after the closing marker should return the expected shards historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime( - opCtx, NamespaceString("db.collection1"), Timestamp(3, 0)); + opCtx, NamespaceString::createNamespaceString_forTest("db.collection1"), Timestamp(3, 0)); assertSameHistoricalPlacement( historicalPlacement, {"shard1", "shard2", "shard3", "shard4"}, true); historicalPlacement = catalogClient()->getShardsThatOwnDataForCollAtClusterTime( - opCtx, NamespaceString("db.collection1"), Timestamp(6, 0)); + opCtx, NamespaceString::createNamespaceString_forTest("db.collection1"), Timestamp(6, 0)); assertSameHistoricalPlacement(historicalPlacement, {"shard1"}, true); } // ######################## PlacementHistory: Query by database ############################ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_SingleDatabase) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Quering the placementHistory must report all the shards for every collection belonging to * the input db*/ auto opCtx = operationContext(); @@ -497,8 +511,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_Sin } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_MultipleDatabases) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Quering the placementHistory must report all the shards for every collection belonging to * the input db*/ auto opCtx = operationContext(); @@ -529,8 +541,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_Mul } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_DifferentTimestamp) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Query the placementHistory at different timestamp should return different results*/ auto opCtx = operationContext(); @@ -571,8 +581,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_Dif } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_SameTimestamp) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Having different namespaces for the same timestamp should not influece the expected result*/ auto opCtx = operationContext(); @@ -600,8 +608,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_Sam TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_InvertedTimestampOrder) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Ordering of document insertion into config.placementHistory must not matter*/ auto opCtx = operationContext(); @@ -621,8 +627,6 @@ TEST_F(CatalogClientAggregationsTest, } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_NoShardsForDb) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Quering the placementHistory must report no shards if the list of shards belonging to every * collection and the db is empty*/ auto opCtx = operationContext(); @@ -649,8 +653,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_NoS } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_NewShardForDb) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Quering the placementHistory must correctly identify a new primary for the db*/ auto opCtx = operationContext(); @@ -675,15 +677,15 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_New } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_WithMarkers) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; auto opCtx = operationContext(); PlacementDescriptor _startFcvMarker = { Timestamp(1, 0), - NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(), + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.toString_forTest(), {"shard1", "shard2", "shard3", "shard4", "shard5"}}; PlacementDescriptor _endFcvMarker = { - Timestamp(3, 0), NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(), {}}; + Timestamp(3, 0), + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.toString_forTest(), + {}}; // initialization setupConfigPlacementHistory( @@ -706,25 +708,23 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataForDbAtClusterTime_Wit // Asking for a timestamp before the closing marker should return the shards from the first // marker of the fcv upgrade. As result, "isExact" is expected to be false auto historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime( - opCtx, NamespaceString("db"), Timestamp(2, 0)); + opCtx, NamespaceString::createNamespaceString_forTest("db"), Timestamp(2, 0)); assertSameHistoricalPlacement( historicalPlacement, {"shard1", "shard2", "shard3", "shard4", "shard5"}, false); // Asking for a timestamp after the closing marker should return the expected shards historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime( - opCtx, NamespaceString("db"), Timestamp(3, 0)); + opCtx, NamespaceString::createNamespaceString_forTest("db"), Timestamp(3, 0)); assertSameHistoricalPlacement( historicalPlacement, {"shard1", "shard2", "shard3", "shard4"}, true); historicalPlacement = catalogClient()->getShardsThatOwnDataForDbAtClusterTime( - opCtx, NamespaceString("db"), Timestamp(7, 0)); + opCtx, NamespaceString::createNamespaceString_forTest("db"), Timestamp(7, 0)); assertSameHistoricalPlacement(historicalPlacement, {"shard1", "shard2", "shard3"}, true); } // ######################## PlacementHistory: Query the entire cluster ################## TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_SingleDatabase) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Quering the placementHistory must report all the shards for every collection and db*/ auto opCtx = operationContext(); @@ -743,8 +743,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_SingleDa } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_MultipleDatabases) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Quering the placementHistory must report all the shards for every collection and db*/ auto opCtx = operationContext(); @@ -766,8 +764,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_Multiple } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_DifferentTimestamp) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Query the placementHistory at different timestamp should return different results*/ auto opCtx = operationContext(); @@ -808,8 +804,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_Differen } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_SameTimestamp) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Having different namespaces for the same timestamp should not influence the expected * result*/ auto opCtx = operationContext(); @@ -833,8 +827,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_SameTime } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_InvertedTimestampOrder) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Ordering of document insertion into config.placementHistory must not matter*/ auto opCtx = operationContext(); @@ -856,8 +848,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_Inverted } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_NoShards) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Quering the placementHistory must report no shards if the list of shards belonging to * every db.collection and db is empty*/ auto opCtx = operationContext(); @@ -884,15 +874,15 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_NoShards } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_WithMarkers) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; auto opCtx = operationContext(); PlacementDescriptor _startFcvMarker = { Timestamp(1, 0), - NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(), + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.toString_forTest(), {"shard1", "shard2", "shard3", "shard4"}}; PlacementDescriptor _endFcvMarker = { - Timestamp(3, 0), NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(), {}}; + Timestamp(3, 0), + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.toString_forTest(), + {}}; // initialization setupConfigPlacementHistory( @@ -930,8 +920,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_WithMark // ######################## PlacementHistory: Regex Stage ##################### TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_RegexStage_ConfigSystem) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*The regex stage must match correctly the config.system.namespaces collection*/ auto opCtx = operationContext(); @@ -953,8 +941,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_RegexSta } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_RegexStage_NssWithPrefix) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*The regex stage must match correctly the input namespaces*/ auto opCtx = operationContext(); @@ -1004,8 +990,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_RegexSta } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_RegexStage_DbWithSymbols) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*The regex stage must correctly escape special character*/ auto opCtx = operationContext(); @@ -1041,8 +1025,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_RegexSta // ######################## PlacementHistory: EmptyHistory ##################### TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_EmptyHistory) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; // Setup a shard to perform a write into the config DB and initialize a committed OpTime // (required to perform a snapshot read of the placementHistory). setupShards({ShardType("shardName", "host01")}); @@ -1070,8 +1052,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_EmptyHis // ######################## PlacementHistory: InvalidOptions ##################### TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_InvalidOptions) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; /*Testing input validation*/ auto opCtx = operationContext(); @@ -1104,9 +1084,6 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_InvalidO // ######################## PlacementHistory: Clean-up ##################### TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_CleanUp) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; - auto opCtx = operationContext(); // Insert the initial content @@ -1188,16 +1165,15 @@ TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_CleanUp) } TEST_F(CatalogClientAggregationsTest, GetShardsThatOwnDataAtClusterTime_CleanUp_NewMarkers) { - RAIIServerParameterControllerForTest featureFlagHistoricalPlacementShardingCatalog{ - "featureFlagHistoricalPlacementShardingCatalog", true}; - auto opCtx = operationContext(); PlacementDescriptor startFcvMarker = { Timestamp(1, 0), - NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(), + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.toString_forTest(), {"shard1", "shard2", "shard3", "shard4"}}; PlacementDescriptor endFcvMarker = { - Timestamp(3, 0), NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(), {}}; + Timestamp(3, 0), + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker.toString_forTest(), + {}}; // initialization setupConfigPlacementHistory( diff --git a/src/mongo/db/s/sharding_config_server_parameters.idl b/src/mongo/db/s/sharding_config_server_parameters.idl index 4bd1c58597e1c..915eda6620d76 100644 --- a/src/mongo/db/s/sharding_config_server_parameters.idl +++ b/src/mongo/db/s/sharding_config_server_parameters.idl @@ -74,3 +74,25 @@ server_parameters: cpp_vartype: AtomicWord cpp_varname: balancerMigrationsThrottlingMs default: 1000 # 1 sec + + balancerChunksSelectionTimeoutMs: + description: >- + Maximum time in milliseconds the balancer will spend deciding which ranges to move in + the current balancing round. + set_at: [startup, runtime] + cpp_vartype: AtomicWord + cpp_varname: balancerChunksSelectionTimeoutMs + default: 5000 # 5 seconds + + newShardExistingClusterTimeKeysExpirationSecs: + description: >- + The amount of time in seconds that the config server should wait before removing the + key documents fetched from a replica set when it gets added to the cluster + as a shard. + set_at: [ startup, runtime ] + cpp_vartype: AtomicWord + cpp_varname: gNewShardExistingClusterTimeKeysExpirationSecs + default: + expr: 7 * 24 * 3600 # 1 week + validator: + gte: 1 diff --git a/src/mongo/db/s/sharding_ddl_coordinator.cpp b/src/mongo/db/s/sharding_ddl_coordinator.cpp index 0606cba0993fe..da8ccded87f0f 100644 --- a/src/mongo/db/s/sharding_ddl_coordinator.cpp +++ b/src/mongo/db/s/sharding_ddl_coordinator.cpp @@ -28,25 +28,59 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/sharding_ddl_coordinator.h" - +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/concurrency/locker_impl.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/global_user_write_block_state.h" #include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/sharding_ddl_coordinator.h" #include "mongo/db/s/sharding_ddl_coordinator_gen.h" #include "mongo/db/s/sharding_ddl_util.h" -#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" #include "mongo/db/vector_clock_mutable.h" #include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_version.h" #include "mongo/s/sharding_feature_flags_gen.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/future_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -62,6 +96,57 @@ const Backoff kExponentialBackoff(Seconds(1), Milliseconds::max()); } // namespace +template +ExecutorFuture ShardingDDLCoordinator::_acquireLockAsync( + std::shared_ptr executor, + const CancellationToken& token, + const T& resource, + LockMode lockMode) { + return AsyncTry([this, resource, lockMode] { + auto opCtxHolder = cc().makeOperationContext(); + auto* opCtx = opCtxHolder.get(); + + const auto coorName = DDLCoordinatorType_serializer(_coordId.getOperationType()); + + const auto lockTimeOut = [&]() -> Milliseconds { + if (auto sfp = overrideDDLLockTimeout.scoped(); MONGO_unlikely(sfp.isActive())) { + if (auto timeoutElem = sfp.getData()["timeoutMillisecs"]; timeoutElem.ok()) { + const auto timeoutMillisecs = Milliseconds(timeoutElem.safeNumberLong()); + LOGV2(6320700, + "Overriding DDL lock timeout", + "timeout"_attr = timeoutMillisecs); + return timeoutMillisecs; + } + } + return DDLLockManager::kDefaultLockTimeout; + }(); + + _scopedLocks.emplace(DDLLockManager::ScopedBaseDDLLock{opCtx, + _locker.get(), + resource, + coorName, + lockMode, + Date_t::now() + lockTimeOut, + false /* waitForRecovery */}); + }) + .until([this, resource, lockMode](Status status) { + if (!status.isOK()) { + LOGV2_WARNING(6819300, + "DDL lock acquisition attempt failed", + "coordinatorId"_attr = _coordId, + "resource"_attr = toStringForLogging(resource), + "mode"_attr = lockMode, + "error"_attr = redact(status)); + } + // Sharding DDL operations are not rollbackable so in case we recovered a coordinator + // from disk we need to ensure eventual completion of the DDL operation, so we must + // retry until we manage to acquire the lock. + return (!_recoveredFromDisk) || status.isOK(); + }) + .withBackoffBetweenIterations(kExponentialBackoff) + .on(**executor, token); +} + ShardingDDLCoordinatorMetadata extractShardingDDLCoordinatorMetadata(const BSONObj& coorDoc) { return ShardingDDLCoordinatorMetadata::parse(IDLParserContext("ShardingDDLCoordinatorMetadata"), coorDoc); @@ -183,49 +268,53 @@ ExecutorFuture ShardingDDLCoordinator::_translateTimeseriesNss( .on(**executor, token); } -ExecutorFuture ShardingDDLCoordinator::_acquireLockAsync( +ExecutorFuture ShardingDDLCoordinator::_acquireAllLocksAsync( + OperationContext* opCtx, std::shared_ptr executor, - const CancellationToken& token, - StringData resource) { - return AsyncTry([this, resource = resource.toString()] { - auto opCtxHolder = cc().makeOperationContext(); - auto* opCtx = opCtxHolder.get(); - auto ddlLockManager = DDLLockManager::get(opCtx); - - const auto coorName = DDLCoordinatorType_serializer(_coordId.getOperationType()); - - const auto lockTimeOut = [&]() -> Milliseconds { - if (auto sfp = overrideDDLLockTimeout.scoped(); MONGO_unlikely(sfp.isActive())) { - if (auto timeoutElem = sfp.getData()["timeoutMillisecs"]; timeoutElem.ok()) { - const auto timeoutMillisecs = Milliseconds(timeoutElem.safeNumberLong()); - LOGV2(6320700, - "Overriding DDL lock timeout", - "timeout"_attr = timeoutMillisecs); - return timeoutMillisecs; - } - } - return DDLLockManager::kDefaultLockTimeout; - }(); + const CancellationToken& token) { + + // Fetching all the locks that need to be acquired + std::set locksToAcquire = _getAdditionalLocksToAcquire(opCtx); + locksToAcquire.insert(originalNss()); + + // Acquiring all DDL locks in sorted order to avoid deadlocks + // Note that the sorted order is provided by default through the std::set container + auto futureChain = ExecutorFuture(**executor); + boost::optional lastDb; + for (const auto& lockNss : locksToAcquire) { + const bool isDbOnly = lockNss.coll().empty(); + + // Acquiring the database DDL lock + if (lastDb != lockNss.dbName()) { + const auto lockMode = [&] { + if (!feature_flags::gMultipleGranularityDDLLocking.isEnabled( + serverGlobalParams.featureCompatibility)) { + return MODE_X; + } + return (isDbOnly ? MODE_X : MODE_IX); + }(); + const auto& dbName = lockNss.dbName(); + futureChain = + std::move(futureChain) + .then([this, executor, token, dbName, lockMode, anchor = shared_from_this()] { + return _acquireLockAsync(executor, token, dbName, lockMode); + }); + lastDb = dbName; + } - _scopedLocks.emplace(ddlLockManager->lock(opCtx, resource, coorName, lockTimeOut)); - }) - .until([this, resource = resource.toString()](Status status) { - if (!status.isOK()) { - LOGV2_WARNING(6819300, - "DDL lock acquisition attempt failed", - "coordinatorId"_attr = _coordId, - "resource"_attr = resource, - "error"_attr = redact(status)); - } - // Sharding DDL operations are not rollbackable so in case we recovered a coordinator - // from disk we need to ensure eventual completion of the DDL operation, so we must - // retry until we manage to acquire the lock. - return (!_recoveredFromDisk) || status.isOK(); - }) - .withBackoffBetweenIterations(kExponentialBackoff) - .on(**executor, token); + // Acquiring the collection DDL lock + if (!isDbOnly) { + futureChain = + std::move(futureChain) + .then([this, executor, token, nss = lockNss, anchor = shared_from_this()] { + return _acquireLockAsync(executor, token, nss, MODE_X); + }); + } + } + return futureChain; } + ExecutorFuture ShardingDDLCoordinator::_cleanupOnAbort( std::shared_ptr executor, const CancellationToken& token, @@ -262,6 +351,11 @@ SemiFuture ShardingDDLCoordinator::run(std::shared_ptr(opCtx->getServiceContext()); + _locker->unsetThreadId(); + _locker->setDebugInfo(str::stream() << _coordId.toBSON()); + // Check if this coordinator is allowed to start according to the user-writes blocking // critical section. If it is not the first execution, it means it had started already // and we are recovering this coordinator. In this case, let it be completed even though @@ -274,7 +368,9 @@ SemiFuture ShardingDDLCoordinator::run(std::shared_ptr ShardingDDLCoordinator::run(std::shared_ptr(**executor); - }) .then([this, executor, token, anchor = shared_from_this()] { if (!_firstExecution || - // The Feature flag is disabled - !feature_flags::gImplicitDDLTimeseriesNssTranslation.isEnabled( - serverGlobalParams.featureCompatibility) || // this DDL operation operates on a DB originalNss().coll().empty() || // this DDL operation operates directly on a bucket nss @@ -313,17 +400,8 @@ SemiFuture ShardingDDLCoordinator::run(std::shared_ptr(**executor); - }) - .then([this, executor, token, anchor = shared_from_this()] { - auto opCtxHolder = cc().makeOperationContext(); - auto* opCtx = opCtxHolder.get(); - auto additionalLocks = _acquireAdditionalLocks(opCtx); - if (!additionalLocks.empty()) { - invariant(additionalLocks.size() == 1); - return _acquireLockAsync(executor, token, additionalLocks.front()); + return _acquireLockAsync( + executor, token, bucketNss.value(), MODE_X); } return ExecutorFuture(**executor); }) @@ -416,6 +494,13 @@ SemiFuture ShardingDDLCoordinator::run(std::shared_ptr +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/repl/primary_only_service.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/ddl_lock_manager.h" #include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/sharding_ddl_coordinator_gen.h" #include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/internal_session_pool.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/executor/scoped_task_executor.h" #include "mongo/executor/task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/mutex.h" +#include "mongo/s/database_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -112,7 +146,7 @@ class ShardingDDLCoordinator return originalNss(); } - virtual std::vector _acquireAdditionalLocks(OperationContext* opCtx) { + virtual std::set _getAdditionalLocksToAcquire(OperationContext* opCtx) { return {}; }; @@ -148,6 +182,7 @@ class ShardingDDLCoordinator const bool _recoveredFromDisk; const boost::optional _forwardableOpMetadata; const boost::optional _databaseVersion; + boost::optional _bucketNss; bool _firstExecution{ true}; // True only when executing the coordinator for the first time (meaning it's not a @@ -173,9 +208,17 @@ class ShardingDDLCoordinator ExecutorFuture _removeDocumentUntillSuccessOrStepdown( std::shared_ptr executor); + ExecutorFuture _acquireAllLocksAsync( + OperationContext* opCtx, + std::shared_ptr executor, + const CancellationToken& token); + + template ExecutorFuture _acquireLockAsync(std::shared_ptr executor, const CancellationToken& token, - StringData resource); + const T& resource, + LockMode lockMode); + ExecutorFuture _translateTimeseriesNss( std::shared_ptr executor, const CancellationToken& token); @@ -185,7 +228,12 @@ class ShardingDDLCoordinator SharedPromise _constructionCompletionPromise; SharedPromise _completionPromise; - std::stack _scopedLocks; + // A Locker object works attached to an opCtx and it's destroyed once the opCtx gets out of + // scope. However, we must keep alive a unique Locker object during the whole + // ShardingDDLCoordinator life to preserve the lock state among all the executor tasks. + std::unique_ptr _locker; + + std::stack _scopedLocks; }; template @@ -228,7 +276,7 @@ class ShardingDDLCoordinatorImpl : public ShardingDDLCoordinator { // Append static info bob.append("type", "op"); - bob.append("ns", originalNss().toString()); + bob.append("ns", NamespaceStringUtil::serialize(originalNss())); bob.append("desc", _coordinatorName); bob.append("op", "command"); bob.append("active", true); @@ -238,7 +286,7 @@ class ShardingDDLCoordinatorImpl : public ShardingDDLCoordinator { stdx::lock_guard lk{_docMutex}; if (const auto& bucketNss = _doc.getBucketNss()) { // Bucket namespace is only present in case the collection is a sharded timeseries - bob.append("bucketNamespace", bucketNss.get().toString()); + bob.append("bucketNamespace", NamespaceStringUtil::serialize(bucketNss.get())); } } @@ -277,7 +325,7 @@ class RecoverableShardingDDLCoordinator : public ShardingDDLCoordinatorImpl auto _buildPhaseHandler(const Phase& newPhase, Func&& handlerFn) { - return [=] { + return [=, this] { const auto& currPhase = _doc.getPhase(); if (currPhase > newPhase) { @@ -372,6 +420,47 @@ class RecoverableShardingDDLCoordinator : public ShardingDDLCoordinatorImpl getAbortReason() const override { + const auto& status = _doc.getAbortReason(); + invariant(!status || !status->isOK(), "when persisted, status must be an error"); + return status; + } + + /** + * Persists the abort reason and throws it as an exception. This causes the coordinator to fail, + * and triggers the cleanup future chain since there is a the persisted reason. + */ + void triggerCleanup(OperationContext* opCtx, const Status& status) { + LOGV2_INFO(7418502, + "Coordinator failed, persisting abort reason", + "coordinatorId"_attr = _doc.getId(), + "phase"_attr = serializePhase(_doc.getPhase()), + "reason"_attr = redact(status)); + + auto newDoc = [&] { + stdx::lock_guard lk{_docMutex}; + return _doc; + }(); + + auto coordinatorMetadata = newDoc.getShardingDDLCoordinatorMetadata(); + coordinatorMetadata.setAbortReason(status); + newDoc.setShardingDDLCoordinatorMetadata(std::move(coordinatorMetadata)); + + _updateStateDocument(opCtx, std::move(newDoc)); + + uassertStatusOK(status); + } + +private: // lazily acquire Logical Session ID and a txn number void _updateSession(OperationContext* opCtx) { auto newDoc = [&] { @@ -408,37 +497,6 @@ class RecoverableShardingDDLCoordinator : public ShardingDDLCoordinatorImplgetTxnNumber()); return osi; } - - virtual boost::optional getAbortReason() const override { - const auto& status = _doc.getAbortReason(); - invariant(!status || !status->isOK(), "when persisted, status must be an error"); - return status; - } - - /** - * Persists the abort reason and throws it as an exception. This causes the coordinator to fail, - * and triggers the cleanup future chain since there is a the persisted reason. - */ - void triggerCleanup(OperationContext* opCtx, const Status& status) { - LOGV2_INFO(7418502, - "Coordinator failed, persisting abort reason", - "coordinatorId"_attr = _doc.getId(), - "phase"_attr = serializePhase(_doc.getPhase()), - "reason"_attr = redact(status)); - - auto newDoc = [&] { - stdx::lock_guard lk{_docMutex}; - return _doc; - }(); - - auto coordinatorMetadata = newDoc.getShardingDDLCoordinatorMetadata(); - coordinatorMetadata.setAbortReason(status); - newDoc.setShardingDDLCoordinatorMetadata(std::move(coordinatorMetadata)); - - _updateStateDocument(opCtx, std::move(newDoc)); - - uassertStatusOK(status); - } }; #undef MONGO_LOGV2_DEFAULT_COMPONENT diff --git a/src/mongo/db/s/sharding_ddl_coordinator.idl b/src/mongo/db/s/sharding_ddl_coordinator.idl index de953d9a3ca78..92129463f9a59 100644 --- a/src/mongo/db/s/sharding_ddl_coordinator.idl +++ b/src/mongo/db/s/sharding_ddl_coordinator.idl @@ -44,8 +44,6 @@ enums: description: "Type of the sharding DDL Operation." type: string values: - # TODO (SERVER-71309): Remove once 7.0 becomes last LTS. - kMovePrimaryNoResilient: "movePrimaryNoResilient" kMovePrimary: "movePrimary" kDropDatabase: "dropDatabase_V2" # TODO SERVER-73627: Remove once 7.0 becomes last LTS. @@ -54,23 +52,14 @@ enums: # TODO SERVER-73627: Remove once 7.0 becomes last LTS. kDropCollectionPre70Compatible: "dropCollection" kRenameCollection: "renameCollection_V2" - # TODO SERVER-72796: Remove once gGlobalIndexesShardingCatalog is enabled. - kRenameCollectionPre63Compatible: "renameCollection" kCreateCollection: "createCollection_V3" - # TODO SERVER-68008: Remove once 7.0 becomes last LTS - kCreateCollectionPre61Compatible: "createCollection_V2" kRefineCollectionShardKey: "refineCollectionShardKey" kSetAllowMigrations: "setAllowMigrations" kCollMod: "collMod_V3" - # TODO SERVER-68008: Remove once 7.0 becomes last LTS - kCollModPre61Compatible: "collMod_V2" kReshardCollection: "reshardCollection" # Note: the V3 refers to the coordinator version, not the FLE2 protocol version kCompactStructuredEncryptionData: "compactStructuredEncryptionData_V3" - # TODO SERVER-68373: Remove once 7.0 becomes last LTS - kCompactStructuredEncryptionDataPre70Compatible: "compactStructuredEncryptionData_V2" - # TODO SERVER-68373 remove once 7.0 becomes last LTS - kCompactStructuredEncryptionDataPre61Compatible: "compactStructuredEncryptionData" + kCleanupStructuredEncryptionData: "cleanupStructuredEncryptionData" types: ForwardableOperationMetadata: diff --git a/src/mongo/db/s/sharding_ddl_coordinator_service.cpp b/src/mongo/db/s/sharding_ddl_coordinator_service.cpp index 7ea139345a3ff..b642bdbeec08a 100644 --- a/src/mongo/db/s/sharding_ddl_coordinator_service.cpp +++ b/src/mongo/db/s/sharding_ddl_coordinator_service.cpp @@ -30,17 +30,39 @@ #include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + #include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/client.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/s/cleanup_structured_encryption_data_coordinator.h" #include "mongo/db/s/collmod_coordinator.h" #include "mongo/db/s/compact_structured_encryption_data_coordinator.h" #include "mongo/db/s/create_collection_coordinator.h" #include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/s/ddl_lock_manager.h" #include "mongo/db/s/drop_collection_coordinator.h" #include "mongo/db/s/drop_database_coordinator.h" +#include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/move_primary_coordinator.h" -#include "mongo/db/s/move_primary_coordinator_no_resilient.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/refine_collection_shard_key_coordinator.h" #include "mongo/db/s/rename_collection_coordinator.h" @@ -48,6 +70,14 @@ #include "mongo/db/s/set_allow_migrations_coordinator.h" #include "mongo/db/s/sharding_ddl_coordinator.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/database_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -55,17 +85,14 @@ namespace mongo { namespace { +MONGO_FAIL_POINT_DEFINE(pauseShardingDDLCoordinatorServiceOnRecovery); + std::shared_ptr constructShardingDDLCoordinatorInstance( ShardingDDLCoordinatorService* service, BSONObj initialState) { const auto op = extractShardingDDLCoordinatorMetadata(initialState); LOGV2( 5390510, "Constructing new sharding DDL coordinator", "coordinatorDoc"_attr = op.toBSON()); switch (op.getId().getOperationType()) { - // TODO (SERVER-71309): Remove once 7.0 becomes last LTS. - case DDLCoordinatorTypeEnum::kMovePrimaryNoResilient: - return std::make_shared(service, - std::move(initialState)); - break; case DDLCoordinatorTypeEnum::kMovePrimary: return std::make_shared(service, std::move(initialState)); break; @@ -80,12 +107,8 @@ std::shared_ptr constructShardingDDLCoordinatorInstance( return std::make_shared(service, std::move(initialState)); break; case DDLCoordinatorTypeEnum::kRenameCollection: - // TODO SERVER-72796: Remove once gGlobalIndexesShardingCatalog is enabled. - case DDLCoordinatorTypeEnum::kRenameCollectionPre63Compatible: return std::make_shared(service, std::move(initialState)); case DDLCoordinatorTypeEnum::kCreateCollection: - // TODO SERVER-68008 Remove the Pre61Compatible case once 7.0 becomes last LTS - case DDLCoordinatorTypeEnum::kCreateCollectionPre61Compatible: return std::make_shared(service, std::move(initialState)); break; case DDLCoordinatorTypeEnum::kRefineCollectionShardKey: @@ -97,24 +120,19 @@ std::shared_ptr constructShardingDDLCoordinatorInstance( std::move(initialState)); break; case DDLCoordinatorTypeEnum::kCollMod: - case DDLCoordinatorTypeEnum::kCollModPre61Compatible: // TODO SERVER-68008 Remove once 7.0 - // becomes last LTS return std::make_shared(service, std::move(initialState)); break; case DDLCoordinatorTypeEnum::kReshardCollection: return std::make_shared(service, std::move(initialState)); break; - case DDLCoordinatorTypeEnum::kCompactStructuredEncryptionDataPre61Compatible: - // TODO SERVER-68373 remove once 7.0 becomes last LTS - case DDLCoordinatorTypeEnum::kCompactStructuredEncryptionDataPre70Compatible: - // TODO SERVER-68373 remove once 7.0 becomes last LTS - return std::make_shared( - service, std::move(initialState)); - break; case DDLCoordinatorTypeEnum::kCompactStructuredEncryptionData: return std::make_shared( service, std::move(initialState)); break; + case DDLCoordinatorTypeEnum::kCleanupStructuredEncryptionData: + return std::make_shared( + service, std::move(initialState)); + break; default: uasserted(ErrorCodes::BadValue, str::stream() @@ -146,6 +164,8 @@ ShardingDDLCoordinatorService::constructInstance(BSONObj initialState) { } } + pauseShardingDDLCoordinatorServiceOnRecovery.pauseWhileSet(); + coord->getConstructionCompletionFuture() .thenRunOn(getInstanceCleanupExecutor()) .getAsync([this](auto status) { @@ -155,8 +175,8 @@ ShardingDDLCoordinatorService::constructInstance(BSONObj initialState) { } invariant(_numCoordinatorsToWait > 0); if (--_numCoordinatorsToWait == 0) { - _state = State::kRecovered; - _recoveredOrCoordinatorCompletedCV.notify_all(); + auto opCtx = cc().makeOperationContext(); + _transitionToRecovered(lg, opCtx.get()); } }); @@ -204,6 +224,7 @@ void ShardingDDLCoordinatorService::_afterStepDown() { stdx::lock_guard lg(_mutex); _state = State::kPaused; _numCoordinatorsToWait = 0; + DDLLockManager::get(cc().getServiceContext())->setState(DDLLockManager::State::kPaused); } size_t ShardingDDLCoordinatorService::_countCoordinatorDocs(OperationContext* opCtx) { @@ -246,13 +267,14 @@ ExecutorFuture ShardingDDLCoordinatorService::_rebuildService( "Found Sharding DDL Coordinators to rebuild", "numCoordinators"_attr = numCoordinators); } - stdx::lock_guard lg(_mutex); if (numCoordinators > 0) { + stdx::lock_guard lg(_mutex); _state = State::kRecovering; _numCoordinatorsToWait = numCoordinators; } else { - _state = State::kRecovered; - _recoveredOrCoordinatorCompletedCV.notify_all(); + pauseShardingDDLCoordinatorServiceOnRecovery.pauseWhileSet(); + stdx::lock_guard lg(_mutex); + _transitionToRecovered(lg, opCtx.get()); } }) .onError([this](const Status& status) { @@ -274,11 +296,11 @@ ShardingDDLCoordinatorService::getOrCreateInstance(OperationContext* opCtx, BSON if (!nss.isConfigDB() && !nss.isAdminDB()) { // Check that the operation context has a database version for this namespace - const auto clientDbVersion = OperationShardingState::get(opCtx).getDbVersion(nss.db()); + const auto clientDbVersion = OperationShardingState::get(opCtx).getDbVersion(nss.dbName()); uassert(ErrorCodes::IllegalOperation, "Request sent without attaching database version", clientDbVersion); - DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, nss.db()); + DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, nss.dbName()); coorMetadata.setDatabaseVersion(clientDbVersion); } @@ -310,4 +332,10 @@ std::shared_ptr ShardingDDLCoordinatorService::getInstan return PrimaryOnlyService::getInstanceCleanupExecutor(); } +void ShardingDDLCoordinatorService::_transitionToRecovered(WithLock lk, OperationContext* opCtx) { + _state = State::kRecovered; + DDLLockManager::get(opCtx)->setState(DDLLockManager::State::kPrimaryAndRecovered); + _recoveredOrCoordinatorCompletedCV.notify_all(); +} + } // namespace mongo diff --git a/src/mongo/db/s/sharding_ddl_coordinator_service.h b/src/mongo/db/s/sharding_ddl_coordinator_service.h index b5e98d14fc236..43ca5bdf5a371 100644 --- a/src/mongo/db/s/sharding_ddl_coordinator_service.h +++ b/src/mongo/db/s/sharding_ddl_coordinator_service.h @@ -29,9 +29,27 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/s/sharding_ddl_coordinator_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/future.h" namespace mongo { @@ -91,12 +109,16 @@ class ShardingDDLCoordinatorService final : public repl::PrimaryOnlyService { void waitForRecoveryCompletion(OperationContext* opCtx) const; private: + friend class ShardingDDLCoordinatorServiceTest; + ExecutorFuture _rebuildService(std::shared_ptr executor, const CancellationToken& token) override; void _afterStepDown() override; size_t _countCoordinatorDocs(OperationContext* opCtx); + void _transitionToRecovered(WithLock lk, OperationContext* opCtx); + mutable Mutex _mutex = MONGO_MAKE_LATCH("ShardingDDLCoordinatorService::_mutex"); // When the node stepDown the state is set to kPaused and all the new DDL operation will be diff --git a/src/mongo/db/s/sharding_ddl_coordinator_service_test.cpp b/src/mongo/db/s/sharding_ddl_coordinator_service_test.cpp new file mode 100644 index 0000000000000..026561f23ad8a --- /dev/null +++ b/src/mongo/db/s/sharding_ddl_coordinator_service_test.cpp @@ -0,0 +1,277 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/repl/primary_only_service_test_fixture.h" +#include "mongo/db/s/ddl_lock_manager.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/network_interface_factory.h" +#include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/time_support.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest + +namespace mongo { + +class ShardingDDLCoordinatorServiceTest : public repl::PrimaryOnlyServiceMongoDTest { +public: + std::unique_ptr makeService(ServiceContext* serviceContext) override { + return std::make_unique(serviceContext); + } + + void setUp() override { + PrimaryOnlyServiceMongoDTest::setUp(); + _testExecutor = makeTestExecutor(); + } + + void tearDown() override { + // Ensure that even on test failures all failpoint state gets reset. + globalFailPointRegistry().disableAllFailpoints(); + + _testExecutor->shutdown(); + _testExecutor->join(); + _testExecutor.reset(); + + PrimaryOnlyServiceMongoDTest::tearDown(); + } + + ShardingDDLCoordinatorService* ddlService() { + return static_cast(_service); + } + + std::shared_ptr makeTestExecutor() { + ThreadPool::Options threadPoolOptions; + threadPoolOptions.maxThreads = 1; + threadPoolOptions.threadNamePrefix = "ShardingDDLCoordinatorServiceTest-"; + threadPoolOptions.poolName = "ShardingDDLCoordinatorServiceTestThreadPool"; + threadPoolOptions.onCreateThread = [](const std::string& threadName) { + Client::initThread(threadName.c_str()); + }; + + auto executor = std::make_shared( + std::make_unique(threadPoolOptions), + executor::makeNetworkInterface( + "ShardingDDLCoordinatorServiceTestNetwork", nullptr, nullptr)); + executor->startup(); + return executor; + } + + void printState() { + std::string stateStr; + switch (ddlService()->_state) { + case ShardingDDLCoordinatorService::State::kPaused: + stateStr = "kPaused"; + break; + case ShardingDDLCoordinatorService::State::kRecovered: + stateStr = "kRecovered"; + break; + case ShardingDDLCoordinatorService::State::kRecovering: + stateStr = "kRecovering"; + break; + default: + MONGO_UNREACHABLE; + } + LOGV2(7646301, "ShardingDDLCoordinatorService::_state", "state"_attr = stateStr); + } + + void assertStateIsPaused() { + ASSERT_EQ(ShardingDDLCoordinatorService::State::kPaused, ddlService()->_state); + } + + void assertStateIsRecovered() { + ASSERT_EQ(ShardingDDLCoordinatorService::State::kRecovered, ddlService()->_state); + } + +protected: + using ScopedBaseDDLLock = DDLLockManager::ScopedBaseDDLLock; + + /** + * Acquire Database and Collection DDL locks on the given resource. + */ + std::pair acquireDbAndCollDDLLocks( + OperationContext* opCtx, + const NamespaceString& ns, + StringData reason, + LockMode mode, + Milliseconds timeout, + bool waitForRecovery = true) { + + const Date_t deadline = Date_t::now() + timeout; + return std::make_pair( + ScopedBaseDDLLock{ + opCtx, opCtx->lockState(), ns.dbName(), reason, mode, deadline, waitForRecovery}, + ScopedBaseDDLLock{ + opCtx, opCtx->lockState(), ns, reason, mode, deadline, waitForRecovery}); + } + + /** + * Acquire Database and Collection DDL locks on the given resource without waiting for recovery + * state to simulate requests coming from ShardingDDLCoordinators. + */ + std::pair + acquireDbAndCollDDLLocksWithoutWaitingForRecovery(OperationContext* opCtx, + const NamespaceString& ns, + StringData reason, + LockMode mode, + Milliseconds timeout) { + return acquireDbAndCollDDLLocks( + opCtx, ns, reason, mode, timeout, false /*waitForRecovery*/); + } + + std::shared_ptr _testExecutor; +}; + +TEST_F(ShardingDDLCoordinatorServiceTest, StateTransitions) { + auto opCtx = makeOperationContext(); + + // Reaching a steady state to start the test + ddlService()->waitForRecoveryCompletion(opCtx.get()); + assertStateIsRecovered(); + + // State must be `kPaused` after stepping down + stepDown(); + assertStateIsPaused(); + + // Check state is `kRecovered` once the recovery finishes + stepUp(opCtx.get()); + ddlService()->waitForRecoveryCompletion(opCtx.get()); + assertStateIsRecovered(); +} + +TEST_F(ShardingDDLCoordinatorServiceTest, + DDLLocksCanOnlyBeAcquiredOnceShardingDDLCoordinatorServiceIsRecovered) { + auto opCtx = makeOperationContext(); + + // Reaching a steady state to start the test + ddlService()->waitForRecoveryCompletion(opCtx.get()); + + const std::string reason = "dummyReason"; + const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.coll"); + + // 1- Stepping down + // Only DDL coordinators can acquire DDL locks after stepping down, otherwise trying to acquire + // a DDL lock will throw a LockTimeout error + stepDown(); + + ASSERT_THROWS_CODE( + acquireDbAndCollDDLLocks(opCtx.get(), nss, reason, MODE_X, Milliseconds::zero()), + DBException, + ErrorCodes::LockTimeout); + + ASSERT_DOES_NOT_THROW(acquireDbAndCollDDLLocksWithoutWaitingForRecovery( + opCtx.get(), nss, reason, MODE_X, Milliseconds::zero())); + + // 2- Stepping up and pausing on Recovery state + // Only DDL coordinators can acquire DDL locks during recovery, otherwise trying to acquire a + // DDL lock will throw a LockTimeout error + auto pauseOnRecoveryFailPoint = + globalFailPointRegistry().find("pauseShardingDDLCoordinatorServiceOnRecovery"); + const auto fpCount = pauseOnRecoveryFailPoint->setMode(FailPoint::alwaysOn); + stepUp(opCtx.get()); + pauseOnRecoveryFailPoint->waitForTimesEntered(fpCount + 1); + + ASSERT_THROWS_CODE( + acquireDbAndCollDDLLocks(opCtx.get(), nss, reason, MODE_X, Milliseconds::zero()), + DBException, + ErrorCodes::LockTimeout); + ASSERT_DOES_NOT_THROW(acquireDbAndCollDDLLocksWithoutWaitingForRecovery( + opCtx.get(), nss, reason, MODE_X, Milliseconds::zero())); + + // 3- Ending Recovery and enter on Recovered state + // Once ShardingDDLCoordinatorService is recovered, anyone can aquire a DDL lock + pauseOnRecoveryFailPoint->setMode(FailPoint::off); + ddlService()->waitForRecoveryCompletion(opCtx.get()); + + ASSERT_DOES_NOT_THROW( + acquireDbAndCollDDLLocks(opCtx.get(), nss, reason, MODE_X, Milliseconds::zero())); + ASSERT_DOES_NOT_THROW(acquireDbAndCollDDLLocksWithoutWaitingForRecovery( + opCtx.get(), nss, reason, MODE_X, Milliseconds::zero())); +} + +TEST_F(ShardingDDLCoordinatorServiceTest, DDLLockMustBeEventuallyAcquiredAfterAStepUp) { + auto opCtx = makeOperationContext(); + + // Reaching a steady state to start the test + ddlService()->waitForRecoveryCompletion(opCtx.get()); + + const std::string reason = "dummyReason"; + const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.coll"); + + stepDown(); + + ASSERT_THROWS_CODE( + acquireDbAndCollDDLLocks(opCtx.get(), nss, reason, MODE_X, Milliseconds::zero()), + DBException, + ErrorCodes::LockTimeout); + + // Start an async task to step up + auto stepUpFuture = ExecutorFuture(_testExecutor).then([this]() { + auto pauseOnRecoveryFailPoint = + globalFailPointRegistry().find("pauseShardingDDLCoordinatorServiceOnRecovery"); + const auto fpCount = pauseOnRecoveryFailPoint->setMode(FailPoint::alwaysOn); + + + auto opCtx = makeOperationContext(); + stepUp(opCtx.get()); + + // Stay on recovery state for some time to ensure the lock is acquired before transition to + // recovered state + sleepFor(Milliseconds(30)); + pauseOnRecoveryFailPoint->waitForTimesEntered(fpCount + 1); + pauseOnRecoveryFailPoint->setMode(FailPoint::off); + }); + + ASSERT_DOES_NOT_THROW(acquireDbAndCollDDLLocks(opCtx.get(), nss, reason, MODE_X, Seconds(1))); + + // Lock should be acquired after step up conclusion + ASSERT(stepUpFuture.isReady()); +} + +} // namespace mongo diff --git a/src/mongo/db/s/sharding_ddl_util.cpp b/src/mongo/db/s/sharding_ddl_util.cpp index 9560059c02612..c1e714dfe5298 100644 --- a/src/mongo/db/s/sharding_ddl_util.cpp +++ b/src/mongo/db/s/sharding_ddl_util.cpp @@ -30,34 +30,89 @@ #include "mongo/db/s/sharding_ddl_util.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/cluster_transaction_api.h" -#include "mongo/db/commands/feature_compatibility_version.h" -#include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/commands.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/list_collections_gen.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/query/distinct_command_gen.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/resource_yielder.h" #include "mongo/db/s/remove_tags_gen.h" -#include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/sharding_logging.h" -#include "mongo/db/s/sharding_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/vector_clock.h" -#include "mongo/db/write_block_bypass.h" -#include "mongo/logv2/log.h" -#include "mongo/rpc/metadata/impersonated_user_metadata.h" -#include "mongo/s/analyze_shard_key_documents_gen.h" +#include "mongo/db/write_concern.h" +#include "mongo/executor/async_rpc.h" +#include "mongo/executor/async_rpc_util.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" -#include "mongo/s/catalog/type_index_catalog.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/catalog/type_namespace_placement_gen.h" -#include "mongo/s/catalog/type_tags.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/set_allow_migrations_gen.h" -#include "mongo/s/write_ops/batch_write_exec.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/read_through_cache.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #include "mongo/util/uuid.h" @@ -106,35 +161,8 @@ Status sharding_ddl_util_deserializeErrorStatusFromBSON(const BSONElement& bsonE namespace sharding_ddl_util { namespace { -void updateTags(OperationContext* opCtx, - Shard* configShard, - const NamespaceString& fromNss, - const NamespaceString& toNss, - const WriteConcernOptions& writeConcern) { - const auto query = BSON(TagsType::ns(fromNss.ns())); - const auto update = BSON("$set" << BSON(TagsType::ns(toNss.ns()))); - - BatchedCommandRequest request([&] { - write_ops::UpdateCommandRequest updateOp(TagsType::ConfigNS); - updateOp.setUpdates({[&] { - write_ops::UpdateOpEntry entry; - entry.setQ(query); - entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(update)); - entry.setUpsert(false); - entry.setMulti(true); - return entry; - }()}); - return updateOp; - }()); - request.setWriteConcern(writeConcern.toBSON()); - - auto response = configShard->runBatchWriteCommand( - opCtx, Milliseconds::max(), request, Shard::RetryPolicy::kIdempotentOrCursorInvalidated); - - uassertStatusOK(response.toStatus()); -} - void deleteChunks(OperationContext* opCtx, + const std::shared_ptr& configShard, const UUID& collectionUUID, const WriteConcernOptions& writeConcern) { // Remove config.chunks entries @@ -155,7 +183,6 @@ void deleteChunks(OperationContext* opCtx, request.setWriteConcern(writeConcern.toBSON()); - auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); auto response = configShard->runBatchWriteCommand( opCtx, Milliseconds::max(), request, Shard::RetryPolicy::kIdempotentOrCursorInvalidated); @@ -167,7 +194,8 @@ void deleteCollection(OperationContext* opCtx, const UUID& uuid, const WriteConcernOptions& writeConcern, const OperationSessionInfo& osi, - const std::shared_ptr& executor) { + const std::shared_ptr& executor, + bool useClusterTransaction) { /* Perform a transaction to delete the collection and append a new placement entry. * NOTE: deleteCollectionFn may be run on a separate thread than the one serving * deleteCollection(). For this reason, all the referenced parameters have to @@ -179,8 +207,9 @@ void deleteCollection(OperationContext* opCtx, // Remove config.collection entry. Query by 'ns' AND 'uuid' so that the remove can be // resolved with an IXSCAN (thanks to the index on '_id') and is idempotent (thanks to the // 'uuid') - const auto deleteCollectionQuery = BSON( - CollectionType::kNssFieldName << nss.ns() << CollectionType::kUuidFieldName << uuid); + const auto deleteCollectionQuery = + BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss) + << CollectionType::kUuidFieldName << uuid); write_ops::DeleteCommandRequest deleteOp(CollectionType::ConfigNS); deleteOp.setDeletes({[&]() { @@ -223,10 +252,11 @@ void deleteCollection(OperationContext* opCtx, }; runTransactionOnShardingCatalog( - opCtx, std::move(transactionChain), writeConcern, osi, executor); + opCtx, std::move(transactionChain), writeConcern, osi, useClusterTransaction, executor); } void deleteShardingIndexCatalogMetadata(OperationContext* opCtx, + const std::shared_ptr& configShard, const UUID& uuid, const WriteConcernOptions& writeConcern) { BatchedCommandRequest request([&] { @@ -242,7 +272,6 @@ void deleteShardingIndexCatalogMetadata(OperationContext* opCtx, request.setWriteConcern(writeConcern.toBSON()); - auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); auto response = configShard->runBatchWriteCommand( opCtx, Milliseconds::max(), request, Shard::RetryPolicy::kIdempotentOrCursorInvalidated); @@ -268,6 +297,7 @@ write_ops::UpdateCommandRequest buildNoopWriteRequestCommand() { void setAllowMigrations(OperationContext* opCtx, const NamespaceString& nss, const boost::optional& expectedCollectionUUID, + const boost::optional& osi, bool allowMigrations) { ConfigsvrSetAllowMigrations configsvrSetAllowMigrationsCmd(nss, allowMigrations); configsvrSetAllowMigrationsCmd.setCollectionUUID(expectedCollectionUUID); @@ -277,7 +307,8 @@ void setAllowMigrations(OperationContext* opCtx, opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, DatabaseName::kAdmin.toString(), - CommandHelpers::appendMajorityWriteConcern(configsvrSetAllowMigrationsCmd.toBSON({})), + CommandHelpers::appendMajorityWriteConcern( + configsvrSetAllowMigrationsCmd.toBSON(osi ? osi->toBSON() : BSONObj())), Shard::RetryPolicy::kIdempotent // Although ConfigsvrSetAllowMigrations is not really // idempotent (because it will cause the collection // version to be bumped), it is safe to be retried. @@ -286,7 +317,7 @@ void setAllowMigrations(OperationContext* opCtx, uassertStatusOKWithContext( Shard::CommandResponse::getEffectiveStatus(std::move(swSetAllowMigrationsResult)), str::stream() << "Error setting allowMigrations to " << allowMigrations - << " for collection " << nss.toString()); + << " for collection " << nss.toStringForErrorMsg()); } catch (const ExceptionFor&) { // Collection no longer exists } catch (const ExceptionFor&) { @@ -303,10 +334,12 @@ void checkCollectionUUIDConsistencyAcrossShards( const std::vector& shardIds, std::shared_ptr executor) { const BSONObj filterObj = BSON("name" << nss.coll()); - BSONObj cmdObj = BSON("listCollections" << 1 << "filter" << filterObj); - - auto responses = sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, nss.db().toString(), cmdObj, shardIds, **executor); + ListCollections command; + command.setFilter(filterObj); + command.setDbName(nss.dbName()); + auto opts = std::make_shared>( + command, **executor, CancellationToken::uncancelable()); + auto responses = sharding_ddl_util::sendAuthenticatedCommandToShards(opCtx, opts, shardIds); struct MismatchedShard { std::string shardId; @@ -333,7 +366,7 @@ void checkCollectionUUIDConsistencyAcrossShards( if (!mismatches.empty()) { std::stringstream errorMessage; - errorMessage << "The collection " << nss.toString() + errorMessage << "The collection " << nss.toStringForErrorMsg() << " with expected UUID: " << collectionUuid.toString() << " has different UUIDs on the following shards: ["; @@ -353,10 +386,12 @@ void checkTargetCollectionDoesNotExistInCluster( const std::vector& shardIds, std::shared_ptr executor) { const BSONObj filterObj = BSON("name" << toNss.coll()); - BSONObj cmdObj = BSON("listCollections" << 1 << "filter" << filterObj); - - auto responses = sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, toNss.db(), cmdObj, shardIds, **executor); + ListCollections command; + command.setFilter(filterObj); + command.setDbName(toNss.dbName()); + auto opts = std::make_shared>( + command, **executor, CancellationToken::uncancelable()); + auto responses = sharding_ddl_util::sendAuthenticatedCommandToShards(opCtx, opts, shardIds); std::vector shardsContainingTargetCollection; for (const auto& cmdResponse : responses) { @@ -371,7 +406,7 @@ void checkTargetCollectionDoesNotExistInCluster( if (!shardsContainingTargetCollection.empty()) { std::stringstream errorMessage; - errorMessage << "The collection " << toNss.toString() + errorMessage << "The collection " << toNss.toStringForErrorMsg() << " already exists in the following shards: ["; std::move(shardsContainingTargetCollection.begin(), shardsContainingTargetCollection.end(), @@ -390,28 +425,11 @@ void linearizeCSRSReads(OperationContext* opCtx) { uassertStatusOK(ShardingLogging::get(opCtx)->logChangeChecked( opCtx, "Linearize CSRS reads", - NamespaceString::kServerConfigurationNamespace.ns(), + NamespaceStringUtil::serialize(NamespaceString::kServerConfigurationNamespace), {}, ShardingCatalogClient::kMajorityWriteConcern)); } -std::vector sendAuthenticatedCommandToShards( - OperationContext* opCtx, - StringData dbName, - const BSONObj& command, - const std::vector& shardIds, - const std::shared_ptr& executor) { - - // The AsyncRequestsSender ignore impersonation metadata so we need to manually attach them to - // the command - BSONObjBuilder bob(command); - rpc::writeAuthDataToImpersonatedUserMetadata(opCtx, &bob); - WriteBlockBypass::get(opCtx).writeAsMetadata(&bob); - auto authenticatedCommand = bob.obj(); - return sharding_util::sendCommandToShards( - opCtx, dbName, authenticatedCommand, shardIds, executor); -} - void removeTagsMetadataFromConfig(OperationContext* opCtx, const NamespaceString& nss, const OperationSessionInfo& osi) { @@ -430,31 +448,18 @@ void removeTagsMetadataFromConfig(OperationContext* opCtx, uassertStatusOKWithContext( Shard::CommandResponse::getEffectiveStatus(std::move(swRemoveTagsResult)), - str::stream() << "Error removing tags for collection " << nss.toString()); + str::stream() << "Error removing tags for collection " << nss.toStringForErrorMsg()); } -void removeQueryAnalyzerMetadataFromConfig(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid) { +void removeQueryAnalyzerMetadataFromConfig(OperationContext* opCtx, const BSONObj& filter) { auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); write_ops::DeleteCommandRequest deleteCmd(NamespaceString::kConfigQueryAnalyzersNamespace); - if (uuid) { - deleteCmd.setDeletes({[&] { - write_ops::DeleteOpEntry entry; - entry.setQ( - BSON(analyze_shard_key::QueryAnalyzerDocument::kCollectionUuidFieldName << *uuid)); - entry.setMulti(false); - return entry; - }()}); - } else { - deleteCmd.setDeletes({[&] { - write_ops::DeleteOpEntry entry; - entry.setQ( - BSON(analyze_shard_key::QueryAnalyzerDocument::kNsFieldName << nss.toString())); - entry.setMulti(true); - return entry; - }()}); - } + deleteCmd.setDeletes({[&] { + write_ops::DeleteOpEntry entry; + entry.setQ(filter); + entry.setMulti(true); + return entry; + }()}); const auto deleteResult = configShard->runCommandWithFixedRetryAttempts( opCtx, @@ -463,45 +468,20 @@ void removeQueryAnalyzerMetadataFromConfig(OperationContext* opCtx, CommandHelpers::appendMajorityWriteConcern(deleteCmd.toBSON({})), Shard::RetryPolicy::kIdempotent); - uassertStatusOKWithContext(Shard::CommandResponse::getEffectiveStatus(std::move(deleteResult)), - str::stream() - << "Error removing query analyzer configurations for collection " - << nss.toString()); -} - -void removeTagsMetadataFromConfig_notIdempotent(OperationContext* opCtx, - Shard* configShard, - const NamespaceString& nss, - const WriteConcernOptions& writeConcern) { - // Remove config.tags entries - const auto query = BSON(TagsType::ns(nss.ns())); - const auto hint = BSON(TagsType::ns() << 1 << TagsType::min() << 1); - - BatchedCommandRequest request([&] { - write_ops::DeleteCommandRequest deleteOp(TagsType::ConfigNS); - deleteOp.setDeletes({[&] { - write_ops::DeleteOpEntry entry; - entry.setQ(query); - entry.setMulti(true); - entry.setHint(hint); - return entry; - }()}); - return deleteOp; - }()); - - request.setWriteConcern(writeConcern.toBSON()); - - auto response = configShard->runBatchWriteCommand( - opCtx, Milliseconds::max(), request, Shard::RetryPolicy::kIdempotentOrCursorInvalidated); - - uassertStatusOK(response.toStatus()); + uassertStatusOKWithContext( + Shard::CommandResponse::getEffectiveStatus(std::move(deleteResult)), + str::stream() << "Failed to remove query analyzer documents that match the filter" + << filter); } void removeCollAndChunksMetadataFromConfig( OperationContext* opCtx, + const std::shared_ptr& configShard, + ShardingCatalogClient* catalogClient, const CollectionType& coll, const WriteConcernOptions& writeConcern, const OperationSessionInfo& osi, + bool useClusterTransaction, const std::shared_ptr& executor) { IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx); const auto& nss = coll.getNss(); @@ -517,162 +497,11 @@ void removeCollAndChunksMetadataFromConfig( config.placementHistory. In case this operation is run by a ddl coordinator, we can re-use the osi in the transaction to guarantee the replay protection. */ - deleteCollection(opCtx, nss, uuid, writeConcern, osi, executor); + deleteCollection(opCtx, nss, uuid, writeConcern, osi, executor, useClusterTransaction); - deleteChunks(opCtx, uuid, writeConcern); + deleteChunks(opCtx, configShard, uuid, writeConcern); - deleteShardingIndexCatalogMetadata(opCtx, uuid, writeConcern); -} - -bool removeCollAndChunksMetadataFromConfig_notIdempotent(OperationContext* opCtx, - ShardingCatalogClient* catalogClient, - const NamespaceString& nss, - const WriteConcernOptions& writeConcern) { - invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); - IgnoreAPIParametersBlock ignoreApiParametersBlock(opCtx); - - ON_BLOCK_EXIT( - [&] { Grid::get(opCtx)->catalogCache()->invalidateCollectionEntry_LINEARIZABLE(nss); }); - - try { - auto coll = - catalogClient->getCollection(opCtx, nss, repl::ReadConcernLevel::kLocalReadConcern); - - removeCollAndChunksMetadataFromConfig(opCtx, coll, writeConcern, {} /* osi */); - return true; - } catch (ExceptionFor&) { - // The collection is not sharded or doesn't exist - return false; - } -} - -void shardedRenameMetadata(OperationContext* opCtx, - Shard* configShard, - ShardingCatalogClient* catalogClient, - CollectionType& fromCollType, - const NamespaceString& toNss, - const WriteConcernOptions& writeConcern) { - invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); - - auto fromNss = fromCollType.getNss(); - auto fromUUID = fromCollType.getUuid(); - - // Delete eventual "TO" chunk/collection entries referring a dropped collection - try { - auto coll = - catalogClient->getCollection(opCtx, toNss, repl::ReadConcernLevel::kLocalReadConcern); - - if (coll.getUuid() == fromCollType.getUuid()) { - // shardedRenameMetadata() was already completed in a previous commit attempt. - return; - } - - // Delete "TO" chunk/collection entries referring a dropped collection - removeCollAndChunksMetadataFromConfig_notIdempotent( - opCtx, catalogClient, toNss, writeConcern); - } catch (ExceptionFor&) { - // The "TO" collection is not sharded or doesn't exist - } - - // Delete "FROM" from config.collections. - // Run Transaction 1 - delete source collection - // Note: in case of empty osi the transaction performing the deletion will use a new osi. - deleteCollection(opCtx, fromNss, fromUUID, writeConcern, {}, nullptr); - - // Update "FROM" tags to "TO". - updateTags(opCtx, configShard, fromNss, toNss, writeConcern); - - auto renamedCollPlacementInfo = [&]() { - // Retrieve the latest placement document about "FROM" prior to its deletion (which will - // have left an entry with an empty set of shards). - auto query = BSON(NamespacePlacementType::kNssFieldName - << fromNss.ns() << NamespacePlacementType::kShardsFieldName - << BSON("$ne" << BSONArray())); - - auto queryResponse = - uassertStatusOK(configShard->exhaustiveFindOnConfig( - opCtx, - ReadPreferenceSetting(ReadPreference::Nearest, TagSet{}), - repl::ReadConcernLevel::kMajorityReadConcern, - NamespaceString::kConfigsvrPlacementHistoryNamespace, - query, - BSON(NamespacePlacementType::kTimestampFieldName << -1) /*sort*/, - 1 /*limit*/)) - .docs; - - if (!queryResponse.empty()) { - return NamespacePlacementType::parse(IDLParserContext("shardedRenameMetadata"), - queryResponse.back()); - } - - // Persisted placement information may be unavailable as a consequence of FCV - // transitions. Use the content of config.chunks as a fallback. - DistinctCommandRequest distinctRequest(ChunkType::ConfigNS); - distinctRequest.setKey(ChunkType::shard.name()); - distinctRequest.setQuery(BSON(ChunkType::collectionUUID.name() << fromUUID)); - - auto reply = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts( - opCtx, - ReadPreferenceSetting(ReadPreference::PrimaryOnly, TagSet{}), - DatabaseName::kConfig.toString(), - distinctRequest.toBSON({}), - Shard::RetryPolicy::kIdempotent)); - - uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(reply)); - std::vector shardIds; - for (const auto& valueElement : reply.response.getField("values").Array()) { - shardIds.emplace_back(valueElement.String()); - } - - // Compose a placement info object based on the retrieved information; the timestamp - // field may be disregarded, since it will be overwritten by the caller before being - // consumed. - NamespacePlacementType placementInfo(fromNss, Timestamp(), std::move(shardIds)); - placementInfo.setUuid(fromUUID); - return placementInfo; - }(); - - // Rename namespace and bump timestamp in the original collection and placement entries of - // "FROM". - fromCollType.setNss(toNss); - auto now = VectorClock::get(opCtx)->getTime(); - auto newTimestamp = now.clusterTime().asTimestamp(); - fromCollType.setTimestamp(newTimestamp); - fromCollType.setEpoch(OID::gen()); - - renamedCollPlacementInfo.setNss(toNss); - renamedCollPlacementInfo.setTimestamp(newTimestamp); - - // Use the modified entries to insert collection and placement entries for "TO". - auto transactionChain = [collInfo = std::move(fromCollType), - placementInfo = std::move(renamedCollPlacementInfo)]( - const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { - write_ops::InsertCommandRequest insertConfigCollectionDoc(CollectionType::ConfigNS, - {collInfo.toBSON()}); - return txnClient.runCRUDOp(insertConfigCollectionDoc, {0} /*stmtIds*/) - .thenRunOn(txnExec) - .then([&](const BatchedCommandResponse& insertCollResponse) { - uassertStatusOK(insertCollResponse.toStatus()); - if (insertCollResponse.getN() == 0) { - BatchedCommandResponse noOpResponse; - noOpResponse.setStatus(Status::OK()); - noOpResponse.setN(0); - return SemiFuture(std::move(noOpResponse)); - } - write_ops::InsertCommandRequest insertPlacementEntry( - NamespaceString::kConfigsvrPlacementHistoryNamespace, {placementInfo.toBSON()}); - return txnClient.runCRUDOp(insertPlacementEntry, {1} /*stmtIds*/); - }) - .thenRunOn(txnExec) - .then([&](const BatchedCommandResponse& insertPlacementResponse) { - uassertStatusOK(insertPlacementResponse.toStatus()); - }) - .semi(); - }; - - // Run Transaction 2 - insert target collection and placement entries - // Note: in case of empty osi the transaction performing the deletion will use a new osi. - runTransactionOnShardingCatalog(opCtx, std::move(transactionChain), writeConcern, {}); + deleteShardingIndexCatalogMetadata(opCtx, configShard, uuid, writeConcern); } void checkCatalogConsistencyAcrossShardsForRename( @@ -699,7 +528,8 @@ void checkRenamePreconditions(OperationContext* opCtx, const bool dropTarget) { if (sourceIsSharded) { uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Namespace of target collection too long. Namespace: " << toNss + str::stream() << "Namespace of target collection too long. Namespace: " + << toNss.toStringForErrorMsg() << " Max: " << NamespaceString::MaxNsShardedCollectionLen, toNss.size() <= NamespaceString::MaxNsShardedCollectionLen); } @@ -711,7 +541,7 @@ void checkRenamePreconditions(OperationContext* opCtx, catalogClient->getCollection(opCtx, toNss); // If no exception is thrown, the collection exists and is sharded uasserted(ErrorCodes::NamespaceExists, - str::stream() << "Sharded target collection " << toNss.ns() + str::stream() << "Sharded target collection " << toNss.toStringForErrorMsg() << " exists but dropTarget is not set"); } catch (const DBException& ex) { auto code = ex.code(); @@ -724,7 +554,7 @@ void checkRenamePreconditions(OperationContext* opCtx, auto collectionCatalog = CollectionCatalog::get(opCtx); auto targetColl = collectionCatalog->lookupCollectionByNamespace(opCtx, toNss); uassert(ErrorCodes::NamespaceExists, - str::stream() << "Target collection " << toNss.ns() + str::stream() << "Target collection " << toNss.toStringForErrorMsg() << " exists but dropTarget is not set", !targetColl); } @@ -732,7 +562,7 @@ void checkRenamePreconditions(OperationContext* opCtx, // Check that there are no tags associated to the target collection auto tags = uassertStatusOK(catalogClient->getTagsForCollection(opCtx, toNss)); uassert(ErrorCodes::CommandFailed, - str::stream() << "Can't rename to target collection " << toNss.ns() + str::stream() << "Can't rename to target collection " << toNss.toStringForErrorMsg() << " because it must not have associated tags", tags.empty()); } @@ -771,7 +601,8 @@ boost::optional checkIfCollectionAlreadySharded( // If the collection is already sharded, fail if the deduced options in this request do not // match the options the collection was originally sharded with. uassert(ErrorCodes::AlreadyInitialized, - str::stream() << "sharding already enabled for collection " << nss, + str::stream() << "sharding already enabled for collection " + << nss.toStringForErrorMsg(), SimpleBSONObjComparator::kInstance.evaluate(cm.getShardKeyPattern().toBSON() == key) && SimpleBSONObjComparator::kInstance.evaluate(defaultCollator == collation) && cm.isUnique() == unique); @@ -783,30 +614,33 @@ boost::optional checkIfCollectionAlreadySharded( void stopMigrations(OperationContext* opCtx, const NamespaceString& nss, - const boost::optional& expectedCollectionUUID) { - setAllowMigrations(opCtx, nss, expectedCollectionUUID, false); + const boost::optional& expectedCollectionUUID, + const boost::optional& osi) { + setAllowMigrations(opCtx, nss, expectedCollectionUUID, osi, false); } void resumeMigrations(OperationContext* opCtx, const NamespaceString& nss, - const boost::optional& expectedCollectionUUID) { - setAllowMigrations(opCtx, nss, expectedCollectionUUID, true); + const boost::optional& expectedCollectionUUID, + const boost::optional& osi) { + setAllowMigrations(opCtx, nss, expectedCollectionUUID, osi, true); } bool checkAllowMigrations(OperationContext* opCtx, const NamespaceString& nss) { auto collDoc = - uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig( - opCtx, - ReadPreferenceSetting(ReadPreference::PrimaryOnly, TagSet{}), - repl::ReadConcernLevel::kMajorityReadConcern, - CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << nss.ns()), - BSONObj(), - 1)) + uassertStatusOK( + Grid::get(opCtx)->shardRegistry()->getConfigShard()->exhaustiveFindOnConfig( + opCtx, + ReadPreferenceSetting(ReadPreference::PrimaryOnly, TagSet{}), + repl::ReadConcernLevel::kMajorityReadConcern, + CollectionType::ConfigNS, + BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(nss)), + BSONObj(), + 1)) .docs; uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "collection " << nss.ns() << " not found", + str::stream() << "collection " << nss.toStringForErrorMsg() << " not found", !collDoc.empty()); auto coll = CollectionType(collDoc[0]); @@ -830,21 +664,20 @@ void performNoopRetryableWriteOnShards(OperationContext* opCtx, const OperationSessionInfo& osi, const std::shared_ptr& executor) { const auto updateOp = buildNoopWriteRequestCommand(); - - sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, - updateOp.getDbName().db(), - CommandHelpers::appendMajorityWriteConcern(updateOp.toBSON(osi.toBSON())), - shardIds, - executor); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendOSI(args, osi); + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + auto opts = std::make_shared>( + updateOp, executor, CancellationToken::uncancelable(), args); + sharding_ddl_util::sendAuthenticatedCommandToShards(opCtx, opts, shardIds); } void performNoopMajorityWriteLocally(OperationContext* opCtx) { const auto updateOp = buildNoopWriteRequestCommand(); DBDirectClient client(opCtx); - const auto commandResponse = client.runCommand( - OpMsgRequest::fromDBAndBody(updateOp.getDbName().db(), updateOp.toBSON({}))); + const auto commandResponse = + client.runCommand(OpMsgRequestBuilder::create(updateOp.getDbName(), updateOp.toBSON({}))); const auto commandReply = commandResponse->getCommandReply(); uassertStatusOK(getStatusFromWriteCommandReply(commandReply)); @@ -866,34 +699,41 @@ void sendDropCollectionParticipantCommandToShards(OperationContext* opCtx, bool fromMigrate) { ShardsvrDropCollectionParticipant dropCollectionParticipant(nss); dropCollectionParticipant.setFromMigrate(fromMigrate); - - const auto cmdObj = - CommandHelpers::appendMajorityWriteConcern(dropCollectionParticipant.toBSON({})); - - sharding_ddl_util::sendAuthenticatedCommandToShards( - opCtx, nss.db(), cmdObj.addFields(osi.toBSON()), shardIds, executor); + async_rpc::GenericArgs args; + async_rpc::AsyncRPCCommandHelpers::appendOSI(args, osi); + async_rpc::AsyncRPCCommandHelpers::appendMajorityWriteConcern(args); + auto opts = std::make_shared>( + dropCollectionParticipant, executor, CancellationToken::uncancelable(), args); + sharding_ddl_util::sendAuthenticatedCommandToShards(opCtx, opts, shardIds); } BSONObj getCriticalSectionReasonForRename(const NamespaceString& from, const NamespaceString& to) { return BSON("command" << "rename" - << "from" << from.toString() << "to" << to.toString()); + << "from" << NamespaceStringUtil::serialize(from) << "to" + << NamespaceStringUtil::serialize(to)); } void runTransactionOnShardingCatalog(OperationContext* opCtx, txn_api::Callback&& transactionChain, const WriteConcernOptions& writeConcern, const OperationSessionInfo& osi, + bool useClusterTransaction, const std::shared_ptr& inputExecutor) { // The Internal Transactions API receives the write concern option and osi through the // passed Operation context. We opt for creating a new one to avoid any possible side // effects. auto newClient = opCtx->getServiceContext()->makeClient("ShardingCatalogTransaction"); + AuthorizationSession::get(newClient.get())->grantInternalAuthorization(newClient.get()); AlternativeClientRegion acr(newClient); + auto newOpCtxHolder = cc().makeOperationContext(); + auto newOpCtx = newOpCtxHolder.get(); + newOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); + // if executor is provided, use it, otherwise use the fixed executor - const auto& executor = [&inputExecutor, ctx = opCtx]() { + const auto& executor = [&inputExecutor, ctx = newOpCtx]() { if (inputExecutor) return inputExecutor; @@ -901,42 +741,40 @@ void runTransactionOnShardingCatalog(OperationContext* opCtx, }(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); - // if osi is provided, use it. Otherwise, use the one from the opCtx. - CancelableOperationContext newOpCtx( - cc().makeOperationContext(), opCtx->getCancellationToken(), executor); - if (osi.getSessionId()) { - newOpCtx->setLogicalSessionId(*osi.getSessionId()); - newOpCtx->setTxnNumber(*osi.getTxnNumber()); - } else if (opCtx->getLogicalSessionId()) { - newOpCtx->setLogicalSessionId(*opCtx->getLogicalSessionId()); - newOpCtx->setTxnNumber(*opCtx->getTxnNumber()); - } - newOpCtx->setWriteConcern(writeConcern); // Instantiate the right custom TXN client to ensure that the queries to the config DB will be // routed to the CSRS. auto customTxnClient = [&]() -> std::unique_ptr { - // TODO SERVER-75919: Investigate if this should always use the remote client. - if (serverGlobalParams.clusterRole.exclusivelyHasShardRole()) { - return std::make_unique( - newOpCtx.get(), - inlineExecutor, - sleepInlineExecutor, - std::make_unique( - newOpCtx->getServiceContext())); + if (!useClusterTransaction) { + tassert(7591900, + "Can only use local transaction client for sharding catalog operations on a " + "config server", + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); + return nullptr; } - invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); - return nullptr; + auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); + return std::make_unique( + newOpCtx, + inlineExecutor, + sleepInlineExecutor, + std::make_unique( + newOpCtx->getServiceContext())); }(); - txn_api::SyncTransactionWithRetries txn(newOpCtx.get(), - sleepInlineExecutor, + if (osi.getSessionId()) { + newOpCtx->setLogicalSessionId(*osi.getSessionId()); + newOpCtx->setTxnNumber(*osi.getTxnNumber()); + } + + newOpCtx->setWriteConcern(writeConcern); + + txn_api::SyncTransactionWithRetries txn(newOpCtx, + executor, nullptr /*resourceYielder*/, inlineExecutor, std::move(customTxnClient)); - txn.run(newOpCtx.get(), std::move(transactionChain)); + txn.run(newOpCtx, std::move(transactionChain)); } } // namespace sharding_ddl_util diff --git a/src/mongo/db/s/sharding_ddl_util.h b/src/mongo/db/s/sharding_ddl_util.h index 65da28622815c..6acfa5b077ab1 100644 --- a/src/mongo/db/s/sharding_ddl_util.h +++ b/src/mongo/db/s/sharding_ddl_util.h @@ -29,16 +29,51 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/db/catalog/drop_collection.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/db/transaction/transaction_api.h" +#include "mongo/db/write_block_bypass.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/async_rpc.h" +#include "mongo/executor/async_rpc_error_info.h" +#include "mongo/executor/async_rpc_targeter.h" +#include "mongo/executor/async_rpc_util.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/scoped_task_executor.h" #include "mongo/executor/task_executor.h" +#include "mongo/rpc/metadata/impersonated_user_metadata.h" #include "mongo/s/async_requests_sender.h" +#include "mongo/s/async_rpc_shard_retry_policy.h" +#include "mongo/s/async_rpc_shard_targeter.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/client/shard.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" + namespace mongo { @@ -60,12 +95,64 @@ void linearizeCSRSReads(OperationContext* opCtx); /** * Generic utility to send a command to a list of shards. Throws if one of the commands fails. */ +template std::vector sendAuthenticatedCommandToShards( OperationContext* opCtx, - StringData dbName, - const BSONObj& command, - const std::vector& shardIds, - const std::shared_ptr& executor); + std::shared_ptr> originalOpts, + const std::vector& shardIds) { + if (shardIds.size() == 0) { + return {}; + } + + // AsyncRPC ignores impersonation metadata so we need to manually attach them to + // the command + if (auto meta = rpc::getAuthDataToImpersonatedUserMetadata(opCtx)) { + originalOpts->genericArgs.unstable.setDollarAudit(*meta); + } + originalOpts->genericArgs.unstable.setMayBypassWriteBlocking( + WriteBlockBypass::get(opCtx).isWriteBlockBypassEnabled()); + + std::vector>> futures; + auto indexToShardId = std::make_shared>(); + + for (size_t i = 0; i < shardIds.size(); ++i) { + ReadPreferenceSetting readPref(ReadPreference::PrimaryOnly); + std::unique_ptr targeter = + std::make_unique( + shardIds[i], opCtx, readPref, originalOpts->exec); + bool startTransaction = originalOpts->genericArgs.stable.getStartTransaction() + ? *originalOpts->genericArgs.stable.getStartTransaction() + : false; + auto retryPolicy = std::make_shared( + Shard::RetryPolicy::kIdempotentOrCursorInvalidated, startTransaction); + auto opts = + std::make_shared>(originalOpts->cmd, + originalOpts->exec, + originalOpts->token, + originalOpts->genericArgs, + retryPolicy); + futures.push_back(async_rpc::sendCommand(opts, opCtx, std::move(targeter))); + (*indexToShardId)[i] = shardIds[i]; + } + + auto responses = async_rpc::getAllResponsesOrFirstErrorWithCancellation< + AsyncRequestsSender::Response, + async_rpc::AsyncRPCResponse>( + std::move(futures), + originalOpts->token, + [indexToShardId](async_rpc::AsyncRPCResponse reply, + size_t index) -> AsyncRequestsSender::Response { + BSONObjBuilder replyBob; + reply.response.serialize(&replyBob); + reply.genericReplyFields.stable.serialize(&replyBob); + reply.genericReplyFields.unstable.serialize(&replyBob); + return AsyncRequestsSender::Response{ + (*indexToShardId)[index], + executor::RemoteCommandOnAnyResponse( + reply.targetUsed, replyBob.obj(), reply.elapsed)}; + }); + return responses.get(); +} /** * Erase tags metadata from config server for the given namespace, using the _configsvrRemoveTags @@ -75,59 +162,24 @@ void removeTagsMetadataFromConfig(OperationContext* opCtx, const NamespaceString& nss, const OperationSessionInfo& osi); -/** - * Erase tags metadata from config server for the given namespace. - */ -void removeTagsMetadataFromConfig_notIdempotent(OperationContext* opCtx, - Shard* configShard, - const NamespaceString& nss, - const WriteConcernOptions& writeConcern); - - /** * Erase collection metadata from config server and invalidate the locally cached one. * In particular remove the collection and chunks metadata associated with the given namespace. */ void removeCollAndChunksMetadataFromConfig( OperationContext* opCtx, + const std::shared_ptr& configShard, + ShardingCatalogClient* catalogClient, const CollectionType& coll, const WriteConcernOptions& writeConcern, const OperationSessionInfo& osi, + bool useClusterTransaction, const std::shared_ptr& executor = nullptr); /** - * Erase collection metadata from config server and invalidate the locally cached one. - * In particular remove the collection, chunks and index metadata associated with the given - * namespace. - * - * Returns true if the collection existed before being removed. - */ -bool removeCollAndChunksMetadataFromConfig_notIdempotent(OperationContext* opCtx, - ShardingCatalogClient* catalogClient, - const NamespaceString& nss, - const WriteConcernOptions& writeConcern); - -/** - * Delete the config query analyzer document for the given collection, if it exists. - */ -void removeQueryAnalyzerMetadataFromConfig(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid); - -/** - * Rename sharded collection metadata as part of a renameCollection operation. - * - * - Update namespace associated with tags (FROM -> TO) - * - Update FROM collection entry to TO - * - * This function is idempotent and can just be invoked by the CSRS. + * Delete the query analyzer documents that match the given filter. */ -void shardedRenameMetadata(OperationContext* opCtx, - Shard* configShard, - ShardingCatalogClient* catalogClient, - CollectionType& fromCollType, - const NamespaceString& toNss, - const WriteConcernOptions& writeConcern); +void removeQueryAnalyzerMetadataFromConfig(OperationContext* opCtx, const BSONObj& filter); /** * Ensure source collection uuid is consistent on every shard @@ -181,7 +233,8 @@ boost::optional checkIfCollectionAlreadySharded( */ void stopMigrations(OperationContext* opCtx, const NamespaceString& nss, - const boost::optional& expectedCollectionUUID); + const boost::optional& expectedCollectionUUID, + const boost::optional& osi = boost::none); /** * Resume migrations and balancing rounds for the given nss. @@ -190,7 +243,8 @@ void stopMigrations(OperationContext* opCtx, */ void resumeMigrations(OperationContext* opCtx, const NamespaceString& nss, - const boost::optional& expectedCollectionUUID); + const boost::optional& expectedCollectionUUID, + const boost::optional& osi = boost::none); /** * Calls to the config server primary to get the collection document for the given nss. @@ -243,6 +297,7 @@ void runTransactionOnShardingCatalog( txn_api::Callback&& transactionChain, const WriteConcernOptions& writeConcern, const OperationSessionInfo& osi, + bool useClusterTransaction, const std::shared_ptr& inputExecutor = nullptr); } // namespace sharding_ddl_util } // namespace mongo diff --git a/src/mongo/db/s/sharding_ddl_util_test.cpp b/src/mongo/db/s/sharding_ddl_util_test.cpp index ee4c94640e6a7..b56bd828614fc 100644 --- a/src/mongo/db/s/sharding_ddl_util_test.cpp +++ b/src/mongo/db/s/sharding_ddl_util_test.cpp @@ -27,21 +27,42 @@ * it in the license file. */ +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_base.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/sharding_ddl_util.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" #include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/logv2/log.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" -#include "mongo/s/client/shard_registry.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/chunk_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -195,117 +216,6 @@ TEST_F(ShardingDDLUtilTest, SerializeErrorStatusTooBig) { ASSERT(!deserialized.extraInfo()); } -// Test that config.collection document and config.chunks documents are properly updated from source -// to destination collection metadata -TEST_F(ShardingDDLUtilTest, ShardedRenameMetadata) { - auto opCtx = operationContext(); - DBDirectClient client(opCtx); - - const NamespaceString fromNss = NamespaceString::createNamespaceString_forTest("test.from"); - const auto fromCollQuery = BSON(CollectionType::kNssFieldName << fromNss.ns()); - - const auto toCollQuery = BSON(CollectionType::kNssFieldName << kToNss.ns()); - - const Timestamp collTimestamp(1); - const auto collUUID = UUID::gen(); - - // Initialize FROM collection chunks - const auto fromEpoch = OID::gen(); - const int nChunks = 10; - std::vector chunks; - for (int i = 0; i < nChunks; i++) { - ChunkVersion chunkVersion({fromEpoch, collTimestamp}, {1, uint32_t(i)}); - ChunkType chunk; - chunk.setName(OID::gen()); - chunk.setCollectionUUID(collUUID); - chunk.setVersion(chunkVersion); - chunk.setShard(shard0.getName()); - chunk.setOnCurrentShardSince(Timestamp(1, i)); - chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), shard0.getName())}); - chunk.setMin(BSON("a" << i)); - chunk.setMax(BSON("a" << i + 1)); - chunks.push_back(chunk); - } - - setupCollection(fromNss, KeyPattern(BSON("x" << 1)), chunks); - - // Initialize TO collection chunks - std::vector originalToChunks; - const auto toEpoch = OID::gen(); - const auto toUUID = UUID::gen(); - for (int i = 0; i < nChunks; i++) { - ChunkVersion chunkVersion({toEpoch, Timestamp(2)}, {1, uint32_t(i)}); - ChunkType chunk; - chunk.setName(OID::gen()); - chunk.setCollectionUUID(toUUID); - chunk.setVersion(chunkVersion); - chunk.setShard(shard0.getName()); - chunk.setOnCurrentShardSince(Timestamp(1, i)); - chunk.setHistory({ChunkHistory(*chunk.getOnCurrentShardSince(), shard0.getName())}); - chunk.setMin(BSON("a" << i)); - chunk.setMax(BSON("a" << i + 1)); - originalToChunks.push_back(chunk); - } - setupCollection(kToNss, KeyPattern(BSON("x" << 1)), originalToChunks); - - // Get FROM collection document and chunks - auto fromDoc = client.findOne(CollectionType::ConfigNS, fromCollQuery); - CollectionType fromCollection(fromDoc); - - FindCommandRequest fromChunksRequest{ChunkType::ConfigNS}; - fromChunksRequest.setFilter(BSON(ChunkType::collectionUUID << collUUID)); - fromChunksRequest.setSort(BSON("_id" << 1)); - - std::vector fromChunks; - findN(client, std::move(fromChunksRequest), nChunks, fromChunks); - - auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - auto catalogClient = Grid::get(opCtx)->catalogClient(); - auto fromCollType = catalogClient->getCollection(opCtx, fromNss); - // Perform the metadata rename - sharding_ddl_util::shardedRenameMetadata(opCtx, - configShard.get(), - catalogClient, - fromCollType, - kToNss, - ShardingCatalogClient::kMajorityWriteConcern); - - // Check that the FROM config.collections entry has been deleted - ASSERT(client.findOne(CollectionType::ConfigNS, fromCollQuery).isEmpty()); - - // Get TO collection document and chunks - auto toDoc = client.findOne(CollectionType::ConfigNS, toCollQuery); - CollectionType toCollection(toDoc); - - FindCommandRequest toChunksRequest{ChunkType::ConfigNS}; - toChunksRequest.setFilter(BSON(ChunkType::collectionUUID << collUUID)); - toChunksRequest.setSort(BSON("_id" << 1)); - - std::vector toChunks; - findN(client, std::move(toChunksRequest), nChunks, toChunks); - - // Check that original epoch/timestamp are changed in config.collections entry - ASSERT(fromCollection.getEpoch() != toCollection.getEpoch()); - ASSERT(fromCollection.getTimestamp() != toCollection.getTimestamp()); - - // Check that no other CollectionType field has been changed - auto fromUnchangedFields = fromDoc.removeField(CollectionType::kNssFieldName) - .removeField(CollectionType::kEpochFieldName) - .removeField(CollectionType::kTimestampFieldName); - auto toUnchangedFields = toDoc.removeField(CollectionType::kNssFieldName) - .removeField(CollectionType::kEpochFieldName) - .removeField(CollectionType::kTimestampFieldName); - ASSERT_EQ(fromUnchangedFields.woCompare(toUnchangedFields), 0); - - // Check that chunk documents remain unchanged - for (int i = 0; i < nChunks; i++) { - auto fromChunkDoc = fromChunks[i]; - auto toChunkDoc = toChunks[i]; - - ASSERT_EQ(fromChunkDoc.woCompare(toChunkDoc), 0); - } -} - // Test all combinations of rename acceptable preconditions: // (1) Namespace of target collection is not too long // (2) Target collection doesn't exist and doesn't have no associated tags @@ -350,7 +260,7 @@ TEST_F(ShardingDDLUtilTest, RenamePreconditionsTargetNamespaceIsTooLong) { // Check that an exception is thrown if the namespace of the target collection is too long const NamespaceString tooLongNss = - NamespaceString::createNamespaceString_forTest(longEnoughNss.ns() + 'x'); + NamespaceString::createNamespaceString_forTest(longEnoughNss.toString_forTest() + 'x'); ASSERT_THROWS_CODE(sharding_ddl_util::checkRenamePreconditions( opCtx, true /* sourceIsSharded */, tooLongNss, false /* dropTarget */), AssertionException, diff --git a/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp b/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp index 3c137b52488cc..ef5972600c293 100644 --- a/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp +++ b/src/mongo/db/s/sharding_index_catalog_ddl_util.cpp @@ -29,16 +29,42 @@ #include "mongo/db/s/sharding_index_catalog_ddl_util.h" -#include "mongo/db/catalog/collection_write_path.h" +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/ops/delete.h" #include "mongo/db/ops/update.h" +#include "mongo/db/ops/update_request.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/s/shard_authoritative_catalog_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" #include "mongo/s/catalog/type_index_catalog.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -49,14 +75,12 @@ namespace { * Remove all indexes by uuid. */ void deleteShardingIndexCatalogEntries(OperationContext* opCtx, - const CollectionPtr& collection, + const CollectionAcquisition& collection, const UUID& uuid) { - mongo::deleteObjects(opCtx, - collection, - NamespaceString::kShardIndexCatalogNamespace, - BSON(IndexCatalogType::kCollectionUUIDFieldName << uuid), - false); + mongo::deleteObjects( + opCtx, collection, BSON(IndexCatalogType::kCollectionUUIDFieldName << uuid), false); } + } // namespace void renameCollectionShardingIndexCatalog(OperationContext* opCtx, @@ -66,24 +90,37 @@ void renameCollectionShardingIndexCatalog(OperationContext* opCtx, writeConflictRetry( opCtx, "RenameCollectionShardingIndexCatalog", - NamespaceString::kShardIndexCatalogNamespace.ns(), + NamespaceString::kShardIndexCatalogNamespace, [&]() { boost::optional toUuid; WriteUnitOfWork wunit(opCtx); AutoGetCollection fromToColl( opCtx, fromNss, MODE_IX, AutoGetCollection::Options{}.secondaryNssOrUUIDs({toNss})); - AutoGetCollection collsColl(opCtx, - NamespaceString::kShardCollectionCatalogNamespace, - MODE_IX, - AutoGetCollection::Options{}.secondaryNssOrUUIDs( - {NamespaceString::kShardIndexCatalogNamespace})); + auto acquisitions = acquireCollections( + opCtx, + {CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardCollectionCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardIndexCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite)}, + MODE_IX); + + const auto& collsColl = + acquisitions.at(NamespaceString::kShardCollectionCatalogNamespace); + const auto& idxColl = acquisitions.at(NamespaceString::kShardIndexCatalogNamespace); + { // First get the document to check the index version if the document already exists - const auto queryTo = - BSON(ShardAuthoritativeCollectionType::kNssFieldName << toNss.ns()); + const auto queryTo = BSON(ShardAuthoritativeCollectionType::kNssFieldName + << NamespaceStringUtil::serialize(toNss)); BSONObj collectionToDoc; bool docExists = - Helpers::findOne(opCtx, collsColl.getCollection(), queryTo, collectionToDoc); + Helpers::findOne(opCtx, collsColl.getCollectionPtr(), queryTo, collectionToDoc); if (docExists) { auto collectionTo = ShardAuthoritativeCollectionType::parse( IDLParserContext("RenameCollectionShardingIndexCatalogCtx"), @@ -108,42 +145,29 @@ void renameCollectionShardingIndexCatalog(OperationContext* opCtx, // Save uuid to remove the 'to' indexes later on. if (docExists) { // Remove the 'to' entry. - mongo::deleteObjects(opCtx, - collsColl.getCollection(), - NamespaceString::kShardCollectionCatalogNamespace, - queryTo, - true); + mongo::deleteObjects(opCtx, collsColl, queryTo, true); } // Replace the _id in the 'From' entry. BSONObj collectionFromDoc; - auto queryFrom = BSON(CollectionType::kNssFieldName << fromNss.ns()); + auto queryFrom = + BSON(CollectionType::kNssFieldName << NamespaceStringUtil::serialize(fromNss)); fassert(7082801, Helpers::findOne( - opCtx, collsColl.getCollection(), queryFrom, collectionFromDoc)); + opCtx, collsColl.getCollectionPtr(), queryFrom, collectionFromDoc)); auto collectionFrom = ShardAuthoritativeCollectionType::parse( IDLParserContext("RenameCollectionShardingIndexCatalogCtx"), collectionFromDoc); collectionFrom.setNss(toNss); - mongo::deleteObjects(opCtx, - collsColl.getCollection(), - NamespaceString::kShardCollectionCatalogNamespace, - queryFrom, - true); - uassertStatusOK( - collection_internal::insertDocument(opCtx, - collsColl.getCollection(), - InsertStatement(collectionFrom.toBSON()), - nullptr)); + mongo::deleteObjects(opCtx, collsColl, queryFrom, true); + uassertStatusOK(Helpers::insert(opCtx, collsColl, collectionFrom.toBSON())); } - AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, MODE_IX); if (toUuid) { // Remove the 'to' indexes. repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); mongo::deleteObjects( opCtx, - idxColl.getCollection(), - NamespaceString::kShardIndexCatalogNamespace, + idxColl, BSON(IndexCatalogType::kCollectionUUIDFieldName << toUuid.value()), false); } @@ -151,7 +175,7 @@ void renameCollectionShardingIndexCatalog(OperationContext* opCtx, opCtx->getServiceContext()->getOpObserver()->onModifyCollectionShardingIndexCatalog( opCtx, fromNss, - idxColl->uuid(), + idxColl.uuid(), ShardingIndexCatalogRenameEntry(fromNss, toNss, indexVersion).toBSON()); wunit.commit(); }); @@ -169,24 +193,35 @@ void addShardingIndexCatalogEntryToCollection(OperationContext* opCtx, indexCatalogEntry.setIndexCollectionUUID(indexCollectionUUID); writeConflictRetry( - opCtx, "AddIndexCatalogEntry", NamespaceString::kShardIndexCatalogNamespace.ns(), [&]() { + opCtx, "AddIndexCatalogEntry", NamespaceString::kShardIndexCatalogNamespace, [&]() { WriteUnitOfWork wunit(opCtx); AutoGetCollection userColl(opCtx, userCollectionNss, MODE_IX); - AutoGetCollection collsColl(opCtx, - NamespaceString::kShardCollectionCatalogNamespace, - MODE_IX, - AutoGetCollection::Options{}.secondaryNssOrUUIDs( - {NamespaceString::kShardIndexCatalogNamespace})); + auto acquisitions = acquireCollections( + opCtx, + {CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardCollectionCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardIndexCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite)}, + MODE_IX); + + auto& collsColl = acquisitions.at(NamespaceString::kShardCollectionCatalogNamespace); + const auto& idxColl = acquisitions.at(NamespaceString::kShardIndexCatalogNamespace); { // First get the document to check the index version if the document already exists const auto query = BSON(ShardAuthoritativeCollectionType::kNssFieldName - << userCollectionNss.ns() + << NamespaceStringUtil::serialize(userCollectionNss) << ShardAuthoritativeCollectionType::kUuidFieldName << collectionUUID); BSONObj collectionDoc; bool docExists = - Helpers::findOne(opCtx, collsColl.getCollection(), query, collectionDoc); + Helpers::findOne(opCtx, collsColl.getCollectionPtr(), query, collectionDoc); if (docExists) { auto collection = ShardAuthoritativeCollectionType::parse( IDLParserContext("AddIndexCatalogEntry"), collectionDoc); @@ -208,32 +243,26 @@ void addShardingIndexCatalogEntryToCollection(OperationContext* opCtx, request.setQuery(query); request.setUpdateModification( BSON(ShardAuthoritativeCollectionType::kNssFieldName - << userCollectionNss.ns() + << NamespaceStringUtil::serialize(userCollectionNss) << ShardAuthoritativeCollectionType::kUuidFieldName << collectionUUID << ShardAuthoritativeCollectionType::kIndexVersionFieldName << lastmod)); request.setUpsert(true); request.setFromOplogApplication(true); - mongo::update(opCtx, collsColl.getDb(), request); + mongo::update(opCtx, collsColl, request); } - AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, MODE_IX); - { repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); BSONObjBuilder builder(indexCatalogEntry.toBSON()); auto idStr = format(FMT_STRING("{}_{}"), collectionUUID.toString(), name); builder.append("_id", idStr); - uassertStatusOK(collection_internal::insertDocument(opCtx, - idxColl.getCollection(), - InsertStatement{builder.obj()}, - nullptr, - false)); + uassertStatusOK(Helpers::insert(opCtx, idxColl, builder.obj())); } opCtx->getServiceContext()->getOpObserver()->onModifyCollectionShardingIndexCatalog( opCtx, userCollectionNss, - idxColl->uuid(), + idxColl.uuid(), ShardingIndexCatalogInsertEntry(indexCatalogEntry).toBSON()); wunit.commit(); }); @@ -247,23 +276,36 @@ void removeShardingIndexCatalogEntryFromCollection(OperationContext* opCtx, writeConflictRetry( opCtx, "RemoveShardingIndexCatalogEntryFromCollection", - NamespaceString::kShardIndexCatalogNamespace.ns(), + NamespaceString::kShardIndexCatalogNamespace, [&]() { WriteUnitOfWork wunit(opCtx); AutoGetCollection userColl(opCtx, nss, MODE_IX); - AutoGetCollection collsColl(opCtx, - NamespaceString::kShardCollectionCatalogNamespace, - MODE_IX, - AutoGetCollection::Options{}.secondaryNssOrUUIDs( - {NamespaceString::kShardIndexCatalogNamespace})); + auto acquisitions = acquireCollections( + opCtx, + {CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardCollectionCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardIndexCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite)}, + MODE_IX); + + auto& collsColl = acquisitions.at(NamespaceString::kShardCollectionCatalogNamespace); + const auto& idxColl = acquisitions.at(NamespaceString::kShardIndexCatalogNamespace); + { // First get the document to check the index version if the document already exists const auto query = BSON(ShardAuthoritativeCollectionType::kNssFieldName - << nss.ns() << ShardAuthoritativeCollectionType::kUuidFieldName << uuid); + << NamespaceStringUtil::serialize(nss) + << ShardAuthoritativeCollectionType::kUuidFieldName << uuid); BSONObj collectionDoc; bool docExists = - Helpers::findOne(opCtx, collsColl.getCollection(), query, collectionDoc); + Helpers::findOne(opCtx, collsColl.getCollectionPtr(), query, collectionDoc); if (docExists) { auto collection = ShardAuthoritativeCollectionType::parse( IDLParserContext("RemoveIndexCatalogEntry"), collectionDoc); @@ -285,20 +327,18 @@ void removeShardingIndexCatalogEntryFromCollection(OperationContext* opCtx, request.setQuery(query); request.setUpdateModification( BSON(ShardAuthoritativeCollectionType::kNssFieldName - << nss.ns() << ShardAuthoritativeCollectionType::kUuidFieldName << uuid + << NamespaceStringUtil::serialize(nss) + << ShardAuthoritativeCollectionType::kUuidFieldName << uuid << ShardAuthoritativeCollectionType::kIndexVersionFieldName << lastmod)); request.setUpsert(true); request.setFromOplogApplication(true); - mongo::update(opCtx, collsColl.getDb(), request); + mongo::update(opCtx, collsColl, request); } - AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, MODE_IX); - { repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); mongo::deleteObjects(opCtx, - idxColl.getCollection(), - NamespaceString::kShardIndexCatalogNamespace, + idxColl, BSON(IndexCatalogType::kCollectionUUIDFieldName << uuid << IndexCatalogType::kNameFieldName << indexName), true); @@ -307,7 +347,7 @@ void removeShardingIndexCatalogEntryFromCollection(OperationContext* opCtx, opCtx->getServiceContext()->getOpObserver()->onModifyCollectionShardingIndexCatalog( opCtx, nss, - idxColl->uuid(), + idxColl.uuid(), ShardingIndexCatalogRemoveEntry(indexName.toString(), uuid, lastmod).toBSON()); wunit.commit(); }); @@ -321,22 +361,35 @@ void replaceCollectionShardingIndexCatalog(OperationContext* opCtx, writeConflictRetry( opCtx, "ReplaceCollectionShardingIndexCatalog", - NamespaceString::kShardIndexCatalogNamespace.ns(), + NamespaceString::kShardIndexCatalogNamespace, [&]() { WriteUnitOfWork wunit(opCtx); AutoGetCollection userColl(opCtx, nss, MODE_IX); - AutoGetCollection collsColl(opCtx, - NamespaceString::kShardCollectionCatalogNamespace, - MODE_IX, - AutoGetCollection::Options{}.secondaryNssOrUUIDs( - {NamespaceString::kShardIndexCatalogNamespace})); + auto acquisitions = acquireCollections( + opCtx, + {CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardCollectionCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardIndexCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite)}, + MODE_IX); + + auto& collsColl = acquisitions.at(NamespaceString::kShardCollectionCatalogNamespace); + const auto& idxColl = acquisitions.at(NamespaceString::kShardIndexCatalogNamespace); + { const auto query = BSON(ShardAuthoritativeCollectionType::kNssFieldName - << nss.ns() << ShardAuthoritativeCollectionType::kUuidFieldName << uuid); + << NamespaceStringUtil::serialize(nss) + << ShardAuthoritativeCollectionType::kUuidFieldName << uuid); BSONObj collectionDoc; bool docExists = - Helpers::findOne(opCtx, collsColl.getCollection(), query, collectionDoc); + Helpers::findOne(opCtx, collsColl.getCollectionPtr(), query, collectionDoc); if (docExists) { auto collection = ShardAuthoritativeCollectionType::parse( IDLParserContext("ReplaceIndexCatalogEntry"), collectionDoc); @@ -361,18 +414,18 @@ void replaceCollectionShardingIndexCatalog(OperationContext* opCtx, request.setQuery(query); request.setUpdateModification(BSON( ShardAuthoritativeCollectionType::kNssFieldName - << nss.ns() << ShardAuthoritativeCollectionType::kUuidFieldName << uuid + << NamespaceStringUtil::serialize(nss) + << ShardAuthoritativeCollectionType::kUuidFieldName << uuid << ShardAuthoritativeCollectionType::kIndexVersionFieldName << indexVersion)); request.setUpsert(true); request.setFromOplogApplication(true); - mongo::update(opCtx, collsColl.getDb(), request); + mongo::update(opCtx, collsColl, request); } - AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, MODE_IX); { // Clear old indexes. repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); - deleteShardingIndexCatalogEntries(opCtx, idxColl.getCollection(), uuid); + deleteShardingIndexCatalogEntries(opCtx, idxColl, uuid); // Add new indexes. for (const auto& i : indexes) { @@ -382,19 +435,14 @@ void replaceCollectionShardingIndexCatalog(OperationContext* opCtx, auto idStr = format(FMT_STRING("{}_{}"), uuid.toString(), i.getName().toString()); builder.append("_id", idStr); - uassertStatusOK( - collection_internal::insertDocument(opCtx, - idxColl.getCollection(), - InsertStatement{builder.done()}, - nullptr, - false)); + uassertStatusOK(Helpers::insert(opCtx, idxColl, builder.done())); } } opCtx->getServiceContext()->getOpObserver()->onModifyCollectionShardingIndexCatalog( opCtx, nss, - idxColl->uuid(), + idxColl.uuid(), ShardingIndexCatalogReplaceEntry(uuid, indexVersion, indexes).toBSON()); wunit.commit(); }); @@ -404,23 +452,36 @@ void dropCollectionShardingIndexCatalog(OperationContext* opCtx, const Namespace writeConflictRetry( opCtx, "DropCollectionShardingIndexCatalog", - NamespaceString::kShardIndexCatalogNamespace.ns(), + NamespaceString::kShardIndexCatalogNamespace, [&]() { boost::optional collectionUUID; WriteUnitOfWork wunit(opCtx); Lock::DBLock dbLock(opCtx, nss.dbName(), MODE_IX); Lock::CollectionLock collLock(opCtx, nss, MODE_IX); - AutoGetCollection collsColl(opCtx, - NamespaceString::kShardCollectionCatalogNamespace, - MODE_IX, - AutoGetCollection::Options{}.secondaryNssOrUUIDs( - {NamespaceString::kShardIndexCatalogNamespace})); + auto acquisitions = acquireCollections( + opCtx, + {CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardCollectionCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardIndexCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite)}, + MODE_IX); + + const auto& collsColl = + acquisitions.at(NamespaceString::kShardCollectionCatalogNamespace); + const auto& idxColl = acquisitions.at(NamespaceString::kShardIndexCatalogNamespace); + { - const auto query = - BSON(ShardAuthoritativeCollectionType::kNssFieldName << nss.ns()); + const auto query = BSON(ShardAuthoritativeCollectionType::kNssFieldName + << NamespaceStringUtil::serialize(nss)); BSONObj collectionDoc; // Get the collection UUID, if nothing is found, return early. - if (!Helpers::findOne(opCtx, collsColl.getCollection(), query, collectionDoc)) { + if (!Helpers::findOne(opCtx, collsColl.getCollectionPtr(), query, collectionDoc)) { LOGV2_DEBUG(6712305, 1, "dropCollectionGlobalIndexesMetadata did not found collection, " @@ -432,24 +493,18 @@ void dropCollectionShardingIndexCatalog(OperationContext* opCtx, const Namespace IDLParserContext("dropCollectionShardingIndexCatalog"), collectionDoc); collectionUUID.emplace(collection.getUuid()); repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); - mongo::deleteObjects(opCtx, - collsColl.getCollection(), - NamespaceString::kShardCollectionCatalogNamespace, - query, - true); + mongo::deleteObjects(opCtx, collsColl, query, true); } - AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, MODE_IX); - { repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); - deleteShardingIndexCatalogEntries(opCtx, idxColl.getCollection(), *collectionUUID); + deleteShardingIndexCatalogEntries(opCtx, idxColl, *collectionUUID); } opCtx->getServiceContext()->getOpObserver()->onModifyCollectionShardingIndexCatalog( opCtx, nss, - idxColl->uuid(), + idxColl.uuid(), ShardingIndexCatalogDropEntry(*collectionUUID).toBSON()); wunit.commit(); }); @@ -461,23 +516,37 @@ void clearCollectionShardingIndexCatalog(OperationContext* opCtx, writeConflictRetry( opCtx, "ClearCollectionShardingIndexCatalog", - NamespaceString::kShardIndexCatalogNamespace.ns(), + NamespaceString::kShardIndexCatalogNamespace, [&]() { WriteUnitOfWork wunit(opCtx); AutoGetCollection userColl(opCtx, nss, MODE_IX); - AutoGetCollection collsColl(opCtx, - NamespaceString::kShardCollectionCatalogNamespace, - MODE_IX, - AutoGetCollection::Options{}.secondaryNssOrUUIDs( - {NamespaceString::kShardIndexCatalogNamespace})); + auto acquisitions = acquireCollections( + opCtx, + {CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardCollectionCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + CollectionAcquisitionRequest( + NamespaceString(NamespaceString::kShardIndexCatalogNamespace), + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite)}, + MODE_IX); + + const auto& collsColl = + acquisitions.at(NamespaceString::kShardCollectionCatalogNamespace); + const auto& idxColl = acquisitions.at(NamespaceString::kShardIndexCatalogNamespace); + { // First unset the index version. const auto query = BSON(ShardAuthoritativeCollectionType::kNssFieldName - << nss.ns() << ShardAuthoritativeCollectionType::kUuidFieldName << uuid); + << NamespaceStringUtil::serialize(nss) + << ShardAuthoritativeCollectionType::kUuidFieldName << uuid); BSONObj collectionDoc; bool docExists = - Helpers::findOne(opCtx, collsColl.getCollection(), query, collectionDoc); + Helpers::findOne(opCtx, collsColl.getCollectionPtr(), query, collectionDoc); // Return if there is nothing to clear. if (!docExists) { @@ -492,28 +561,18 @@ void clearCollectionShardingIndexCatalog(OperationContext* opCtx, } repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); - mongo::deleteObjects(opCtx, - collsColl.getCollection(), - NamespaceString::kShardCollectionCatalogNamespace, - query, - true); + mongo::deleteObjects(opCtx, collsColl, query, true); collection.setIndexVersion(boost::none); - uassertStatusOK( - collection_internal::insertDocument(opCtx, - collsColl.getCollection(), - InsertStatement(collection.toBSON()), - nullptr)); + uassertStatusOK(Helpers::insert(opCtx, collsColl, collection.toBSON())); } - AutoGetCollection idxColl(opCtx, NamespaceString::kShardIndexCatalogNamespace, MODE_IX); - { repl::UnreplicatedWritesBlock unreplicatedWritesBlock(opCtx); - deleteShardingIndexCatalogEntries(opCtx, idxColl.getCollection(), uuid); + deleteShardingIndexCatalogEntries(opCtx, idxColl, uuid); } opCtx->getServiceContext()->getOpObserver()->onModifyCollectionShardingIndexCatalog( - opCtx, nss, idxColl->uuid(), ShardingIndexCatalogClearEntry(uuid).toBSON()); + opCtx, nss, idxColl.uuid(), ShardingIndexCatalogClearEntry(uuid).toBSON()); wunit.commit(); }); } diff --git a/src/mongo/db/s/sharding_index_catalog_ddl_util.h b/src/mongo/db/s/sharding_index_catalog_ddl_util.h index bcef36eb83787..96b5c63b54490 100644 --- a/src/mongo/db/s/sharding_index_catalog_ddl_util.h +++ b/src/mongo/db/s/sharding_index_catalog_ddl_util.h @@ -29,8 +29,18 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/s/catalog/type_index_catalog.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/sharding_index_catalog_util.cpp b/src/mongo/db/s/sharding_index_catalog_util.cpp index 306c5c85ad32d..47def14ed5b48 100644 --- a/src/mongo/db/s/sharding_index_catalog_util.cpp +++ b/src/mongo/db/s/sharding_index_catalog_util.cpp @@ -31,17 +31,38 @@ #include "mongo/db/s/sharding_index_catalog_util.h" -#include "mongo/db/dbdirectclient.h" +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" #include "mongo/db/s/participant_block_gen.h" #include "mongo/db/s/sharded_index_catalog_commands_gen.h" #include "mongo/db/s/sharding_ddl_util.h" #include "mongo/db/s/sharding_util.h" -#include "mongo/db/transaction/transaction_api.h" -#include "mongo/db/transaction/transaction_participant_resource_yielder.h" +#include "mongo/db/shard_id.h" #include "mongo/db/vector_clock.h" #include "mongo/logv2/log.h" -#include "mongo/s/catalog/type_index_catalog.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/functional.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" namespace mongo { namespace sharding_index_catalog_util { @@ -67,7 +88,7 @@ BSONObj getCriticalSectionReasonForIndexCommit(const NamespaceString& nss, const std::string& name) { return BSON("command" << "commitIndexCatalogEntry" - << "nss" << nss.toString() << IndexCatalogType::kNameFieldName << name); + << "nss" << nss.toStringForErrorMsg() << IndexCatalogType::kNameFieldName << name); } /** @@ -112,7 +133,8 @@ void coordinateIndexCatalogModificationAcrossCollectionShards( Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfoWithPlacementRefresh( opCtx, userCollectionNss)); uassert(ErrorCodes::NamespaceNotSharded, - str::stream() << "collection " << userCollectionNss << " is not sharded", + str::stream() << "collection " << userCollectionNss.toStringForErrorMsg() + << " is not sharded", routingInfo.isSharded()); std::set shardIdsSet; routingInfo.getAllShardIds(&shardIdsSet); diff --git a/src/mongo/db/s/sharding_index_catalog_util.h b/src/mongo/db/s/sharding_index_catalog_util.h index f12be516a9522..c91e658b1b0d0 100644 --- a/src/mongo/db/s/sharding_index_catalog_util.h +++ b/src/mongo/db/s/sharding_index_catalog_util.h @@ -28,9 +28,17 @@ */ #pragma once +#include +#include + +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/executor/task_executor.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/sharding_initialization_mongod.cpp b/src/mongo/db/s/sharding_initialization_mongod.cpp index 5b3ff46f4c8f2..3a710f49896f8 100644 --- a/src/mongo/db/s/sharding_initialization_mongod.cpp +++ b/src/mongo/db/s/sharding_initialization_mongod.cpp @@ -28,45 +28,98 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/sharding_initialization_mongod.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/client/connection_string.h" +#include "mongo/client/connpool.h" #include "mongo/client/global_conn_pool.h" #include "mongo/client/remote_command_targeter_factory_impl.h" #include "mongo/client/replica_set_monitor.h" #include "mongo/db/audit.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/catalog_shard_feature_flag_gen.h" +#include "mongo/db/client.h" #include "mongo/db/client_metadata_propagation_egress_hook.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/keys_collection_client.h" #include "mongo/db/keys_collection_client_direct.h" #include "mongo/db/keys_collection_client_sharded.h" #include "mongo/db/logical_time_validator.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/update.h" +#include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/update_result.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/config/sharding_catalog_manager.h" #include "mongo/db/s/read_only_catalog_cache_loader.h" #include "mongo/db/s/shard_local.h" #include "mongo/db/s/shard_server_catalog_cache_loader.h" +#include "mongo/db/s/sharding_initialization_mongod.h" +#include "mongo/db/s/sharding_state.h" #include "mongo/db/s/transaction_coordinator_service.h" #include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/db/transaction_resources.h" #include "mongo/db/vector_clock_metadata_hook.h" #include "mongo/executor/network_interface_factory.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_impl.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/catalog_cache_loader.h" #include "mongo/s/client/shard_factory.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/client/shard_remote.h" #include "mongo/s/client/sharding_connection_hook.h" #include "mongo/s/config_server_catalog_cache_loader.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/sharding_initialization.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -230,6 +283,13 @@ class ShardingReplicaSetChangeListener final ThreadClient tc("updateShardIdentityConfigString", _serviceContext); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + auto opCtx = tc->makeOperationContext(); ShardingInitializationMongoD::updateShardIdentityConfigString(opCtx.get(), update); } catch (const ExceptionForCat& e) { @@ -316,7 +376,7 @@ bool ShardingInitializationMongoD::initializeShardingAwarenessIfNeeded(Operation // In sharded queryableBackupMode mode, we ignore the shardIdentity document on disk and instead // *require* a shardIdentity document to be passed through --overrideShardIdentity if (storageGlobalParams.queryableBackupMode) { - if (serverGlobalParams.clusterRole.exclusivelyHasShardRole()) { + if (serverGlobalParams.clusterRole.hasExclusively(ClusterRole::ShardServer)) { uassert(ErrorCodes::InvalidOptions, "If started with --shardsvr in queryableBackupMode, a shardIdentity document " "must be provided through --overrideShardIdentity", @@ -353,7 +413,7 @@ bool ShardingInitializationMongoD::initializeShardingAwarenessIfNeeded(Operation "queryableBackupMode. If not in queryableBackupMode, you can edit " "the shardIdentity document by starting the server *without* " "--shardsvr, manually updating the shardIdentity document in the " - << NamespaceString::kServerConfigurationNamespace.toString() + << NamespaceString::kServerConfigurationNamespace.toStringForErrorMsg() << " collection, and restarting the server with --shardsvr.", serverGlobalParams.overrideShardIdentity.isEmpty()); @@ -430,7 +490,6 @@ void ShardingInitializationMongoD::initializeFromShardIdentity( const auto& configSvrConnStr = shardIdentity.getConfigsvrConnectionString(); auto const shardingState = ShardingState::get(opCtx); - auto const shardRegistry = Grid::get(opCtx)->shardRegistry(); hangDuringShardingInitialization.pauseWhileSet(); @@ -446,6 +505,7 @@ void ShardingInitializationMongoD::initializeFromShardIdentity( // If run on a config server, we may not know our connection string yet. if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { + auto const shardRegistry = Grid::get(opCtx)->shardRegistry(); auto prevConfigsvrConnStr = shardRegistry->getConfigServerConnectionString(); uassert( 40373, @@ -499,8 +559,14 @@ void ShardingInitializationMongoD::updateShardIdentityConfigString( write_ops::UpdateModification::parseFromClassicUpdate(updateObj)); try { - AutoGetCollection autoColl(opCtx, NamespaceString::kServerConfigurationNamespace, MODE_IX); - auto result = update(opCtx, autoColl.ensureDbExists(opCtx), updateReq); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString::kServerConfigurationNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + auto result = update(opCtx, collection, updateReq); if (result.numMatched == 0) { LOGV2_WARNING(22076, "Failed to update config server connection string of shard identity " @@ -528,10 +594,7 @@ void ShardingInitializationMongoD::updateShardIdentityConfigString( } void ShardingInitializationMongoD::onSetCurrentConfig(OperationContext* opCtx) { - if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) || - !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafeAtStartup()) { - // Only config servers capable of acting as a shard set up the config shard in their shard - // registry with a real connection string. + if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { return; } @@ -556,37 +619,29 @@ void initializeGlobalShardingStateForConfigServerIfNeeded(OperationContext* opCt const auto service = opCtx->getServiceContext(); - ShardingInitializationMongoD::get(opCtx)->installReplicaSetChangeListener(service); - auto configCS = []() -> boost::optional { - if (gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafeAtStartup()) { - // When the config server can operate as a shard, it sets up a ShardRemote for the - // config shard, which is created later after loading the local replica set config. - return boost::none; - } - return {ConnectionString::forLocal()}; + // When the config server can operate as a shard, it sets up a ShardRemote for the + // config shard, which is created later after loading the local replica set config. + return boost::none; }(); - if (gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafeAtStartup()) { - CatalogCacheLoader::set(service, - std::make_unique( - std::make_unique())); - - // This is only called in startup when there shouldn't be replication state changes, but to - // be safe we take the RSTL anyway. - repl::ReplicationStateTransitionLockGuard rstl(opCtx, MODE_IX); - const auto replCoord = repl::ReplicationCoordinator::get(opCtx); - bool isReplSet = - replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet; - bool isStandaloneOrPrimary = - !isReplSet || (replCoord->getMemberState() == repl::MemberState::RS_PRIMARY); - CatalogCacheLoader::get(opCtx).initializeReplicaSetRole(isStandaloneOrPrimary); - } else { - CatalogCacheLoader::set(service, std::make_unique()); - } + CatalogCacheLoader::set(service, + std::make_unique( + std::make_unique())); + + // This is only called in startup when there shouldn't be replication state changes, but to + // be safe we take the RSTL anyway. + repl::ReplicationStateTransitionLockGuard rstl(opCtx, MODE_IX); + const auto replCoord = repl::ReplicationCoordinator::get(opCtx); + bool isReplSet = replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet; + bool isStandaloneOrPrimary = + !isReplSet || (replCoord->getMemberState() == repl::MemberState::RS_PRIMARY); + CatalogCacheLoader::get(opCtx).initializeReplicaSetRole(isStandaloneOrPrimary); initializeGlobalShardingStateForMongoD(opCtx, configCS); + ShardingInitializationMongoD::get(opCtx)->installReplicaSetChangeListener(service); + // ShardLocal to use for explicitly local commands on the config server. auto localConfigShard = Grid::get(opCtx)->shardRegistry()->createLocalConfigShard(); auto localCatalogClient = std::make_unique(localConfigShard); @@ -692,8 +747,6 @@ void ShardingInitializationMongoD::_initializeShardingEnvironmentOnShardServer( OperationContext* opCtx, const ShardIdentity& shardIdentity) { auto const service = opCtx->getServiceContext(); - installReplicaSetChangeListener(service); - // Determine primary/secondary/standalone state in order to properly initialize sharding // components. const auto replCoord = repl::ReplicationCoordinator::get(opCtx); @@ -701,7 +754,7 @@ void ShardingInitializationMongoD::_initializeShardingEnvironmentOnShardServer( bool isStandaloneOrPrimary = !isReplSet || (replCoord->getMemberState() == repl::MemberState::RS_PRIMARY); - if (serverGlobalParams.clusterRole.exclusivelyHasShardRole()) { + if (serverGlobalParams.clusterRole.hasExclusively(ClusterRole::ShardServer)) { // A config server added as a shard would have already set this up at startup. if (storageGlobalParams.queryableBackupMode) { CatalogCacheLoader::set(service, std::make_unique()); @@ -714,6 +767,24 @@ void ShardingInitializationMongoD::_initializeShardingEnvironmentOnShardServer( initializeGlobalShardingStateForMongoD(opCtx, {shardIdentity.getConfigsvrConnectionString()}); + installReplicaSetChangeListener(service); + + // Reset the shard register config connection string in case it missed the replica set + // monitor notification. Config server does not need to do this since it gets the connection + // string directly from the replication coordinator. + auto configShardConnStr = Grid::get(opCtx->getServiceContext()) + ->shardRegistry() + ->getConfigServerConnectionString(); + if (configShardConnStr.type() == ConnectionString::ConnectionType::kReplicaSet) { + ConnectionString rsMonitorConfigConnStr( + ReplicaSetMonitor::get(configShardConnStr.getSetName())->getServerAddress(), + ConnectionString::ConnectionType::kReplicaSet); + Grid::get(opCtx->getServiceContext()) + ->shardRegistry() + ->updateReplSetHosts(rsMonitorConfigConnStr, + ShardRegistry::ConnectionStringUpdateType::kConfirmed); + } + CatalogCacheLoader::get(opCtx).initializeReplicaSetRole(isStandaloneOrPrimary); // Start transaction coordinator service only if the node is the primary of a replica set. diff --git a/src/mongo/db/s/sharding_initialization_mongod.h b/src/mongo/db/s/sharding_initialization_mongod.h index d11d6deaa3215..982b67b5666be 100644 --- a/src/mongo/db/s/sharding_initialization_mongod.h +++ b/src/mongo/db/s/sharding_initialization_mongod.h @@ -29,13 +29,22 @@ #pragma once +#include #include +#include +#include +#include #include "mongo/base/string_data.h" +#include "mongo/client/connection_string.h" #include "mongo/client/replica_set_change_notifier.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/db/s/add_shard_cmd_gen.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/type_shard_identity.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" #include "mongo/s/sharding_initialization.h" namespace mongo { diff --git a/src/mongo/db/s/sharding_initialization_mongod_test.cpp b/src/mongo/db/s/sharding_initialization_mongod_test.cpp index 2389f2982678f..6dbaba55869ce 100644 --- a/src/mongo/db/s/sharding_initialization_mongod_test.cpp +++ b/src/mongo/db/s/sharding_initialization_mongod_test.cpp @@ -27,27 +27,49 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_mock.h" -#include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/collection_sharding_state_factory_shard.h" #include "mongo/db/s/collection_sharding_state_factory_standalone.h" -#include "mongo/db/s/op_observer_sharding_impl.h" +#include "mongo/db/s/migration_chunk_cloner_source_op_observer.h" #include "mongo/db/s/shard_server_catalog_cache_loader.h" #include "mongo/db/s/shard_server_op_observer.h" -#include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/db/s/sharding_initialization_mongod.h" +#include "mongo/db/s/sharding_mongod_test_fixture.h" +#include "mongo/db/s/sharding_state.h" #include "mongo/db/s/type_shard_identity.h" #include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_impl.h" +#include "mongo/s/catalog_cache_loader.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/config_server_catalog_cache_loader.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace { @@ -148,7 +170,8 @@ class ScopedSetStandaloneMode { _serviceContext->setOpObserver([&] { auto opObserver = std::make_unique(); opObserver->addObserver( - std::make_unique(std::make_unique())); + std::make_unique(std::make_unique())); + opObserver->addObserver(std::make_unique()); opObserver->addObserver(std::make_unique()); return opObserver; }()); diff --git a/src/mongo/db/s/sharding_initialization_op_observer_test.cpp b/src/mongo/db/s/sharding_initialization_op_observer_test.cpp index c41e496d3e420..ecc346e05028b 100644 --- a/src/mongo/db/s/sharding_initialization_op_observer_test.cpp +++ b/src/mongo/db/s/sharding_initialization_op_observer_test.cpp @@ -27,23 +27,34 @@ * it in the license file. */ -#include "mongo/client/remote_command_targeter_mock.h" +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/client/connection_string.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/op_observer/op_observer_impl.h" -#include "mongo/db/op_observer/op_observer_registry.h" -#include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/s/config_server_op_observer.h" -#include "mongo/db/s/shard_server_catalog_cache_loader.h" -#include "mongo/db/s/shard_server_op_observer.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/s/add_shard_cmd_gen.h" #include "mongo/db/s/sharding_initialization_mongod.h" #include "mongo/db/s/sharding_mongod_test_fixture.h" +#include "mongo/db/s/sharding_state.h" #include "mongo/db/s/type_shard_identity.h" #include "mongo/db/server_options.h" -#include "mongo/s/client/shard_registry.h" -#include "mongo/s/config_server_catalog_cache_loader.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/sharding_logging.cpp b/src/mongo/db/s/sharding_logging.cpp index 5db35d417fd7f..4a6a4324f6106 100644 --- a/src/mongo/db/s/sharding_logging.cpp +++ b/src/mongo/db/s/sharding_logging.cpp @@ -28,17 +28,38 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/s/sharding_logging.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/s/sharding_logging.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" #include "mongo/executor/network_interface.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/s/catalog/type_changelog.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/transport/session.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -187,10 +208,10 @@ Status ShardingLogging::_log(OperationContext* opCtx, Status result = catalogClient->insertConfigDocument(opCtx, nss, changeLogBSON, writeConcern); if (!result.isOK()) { - LOGV2_ERROR(5538900, - "Error encountered while logging config change", - "changeDocument"_attr = changeLog, - "error"_attr = redact(result)); + LOGV2_WARNING(5538900, + "Error encountered while logging config change", + "changeDocument"_attr = changeLog, + "error"_attr = redact(result)); } return result; diff --git a/src/mongo/db/s/sharding_logging.h b/src/mongo/db/s/sharding_logging.h index b9467be2e5512..fbd495a152be6 100644 --- a/src/mongo/db/s/sharding_logging.h +++ b/src/mongo/db/s/sharding_logging.h @@ -29,7 +29,16 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/platform/atomic_word.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/client/shard.h" diff --git a/src/mongo/db/s/sharding_logging_test.cpp b/src/mongo/db/s/sharding_logging_test.cpp index 039e99b3a0b4b..0a65177290c36 100644 --- a/src/mongo/db/s/sharding_logging_test.cpp +++ b/src/mongo/db/s/sharding_logging_test.cpp @@ -28,20 +28,22 @@ */ -#include "mongo/platform/basic.h" - -#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" #include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/db/s/sharding_logging.h" -#include "mongo/executor/task_executor.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/network_test_env.h" #include "mongo/s/catalog/sharding_catalog_client.h" -#include "mongo/s/client/shard_registry.h" -#include "mongo/stdx/chrono.h" -#include "mongo/stdx/future.h" -#include "mongo/util/str.h" -#include "mongo/util/text.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/text.h" // IWYU pragma: keep #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/sharding_migration_critical_section.cpp b/src/mongo/db/s/sharding_migration_critical_section.cpp index 9c415c0745202..e52be9a03ee7a 100644 --- a/src/mongo/db/s/sharding_migration_critical_section.cpp +++ b/src/mongo/db/s/sharding_migration_critical_section.cpp @@ -27,9 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include +#include #include "mongo/db/s/sharding_migration_critical_section.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/s/sharding_migration_critical_section.h b/src/mongo/db/s/sharding_migration_critical_section.h index 43d1b24578e4c..9327ea0ae0246 100644 --- a/src/mongo/db/s/sharding_migration_critical_section.h +++ b/src/mongo/db/s/sharding_migration_critical_section.h @@ -29,10 +29,14 @@ #pragma once +#include #include +#include +#include #include "mongo/bson/bsonobj.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" namespace mongo { diff --git a/src/mongo/db/s/sharding_mongod_test_fixture.cpp b/src/mongo/db/s/sharding_mongod_test_fixture.cpp index 76dca0f5e14cf..377661c2fce01 100644 --- a/src/mongo/db/s/sharding_mongod_test_fixture.cpp +++ b/src/mongo/db/s/sharding_mongod_test_fixture.cpp @@ -27,46 +27,55 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/sharding_mongod_test_fixture.h" - -#include +#include +#include +#include #include +#include #include #include "mongo/base/checked_cast.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/remote_command_targeter_factory_mock.h" #include "mongo/client/replica_set_monitor.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/client.h" -#include "mongo/db/commands.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_impl.h" -#include "mongo/db/query/cursor_response.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" -#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_consistency_markers_mock.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/replication_recovery_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" -#include "mongo/db/s/config_server_op_observer.h" -#include "mongo/db/s/op_observer_sharding_impl.h" +#include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/shard_local.h" #include "mongo/db/s/shard_server_op_observer.h" +#include "mongo/db/s/sharding_mongod_test_fixture.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" #include "mongo/db/storage/snapshot_manager.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" +#include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor_pool.h" +#include "mongo/executor/thread_pool_mock.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" -#include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/s/balancer_configuration.h" #include "mongo/s/catalog/sharding_catalog_client.h" -#include "mongo/s/catalog/type_collection.h" -#include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/catalog_cache_loader.h" #include "mongo/s/client/shard_factory.h" @@ -74,8 +83,9 @@ #include "mongo/s/client/shard_remote.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_cursor_manager.h" -#include "mongo/util/clock_source_mock.h" -#include "mongo/util/tick_source_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/version/releases.h" namespace mongo { @@ -274,12 +284,14 @@ void ShardingMongodTestFixture::setUp() { void ShardingMongodTestFixture::tearDown() { ReplicaSetMonitor::cleanup(); - if (Grid::get(operationContext())->getExecutorPool() && !_executorPoolShutDown) { - Grid::get(operationContext())->getExecutorPool()->shutdownAndJoin(); - } + if (Grid::get(operationContext())->isInitialized()) { + if (Grid::get(operationContext())->getExecutorPool() && !_executorPoolShutDown) { + Grid::get(operationContext())->getExecutorPool()->shutdownAndJoin(); + } - if (Grid::get(operationContext())->shardRegistry()) { - Grid::get(operationContext())->shardRegistry()->shutdown(); + if (Grid::get(operationContext())->shardRegistry()) { + Grid::get(operationContext())->shardRegistry()->shutdown(); + } } CollectionShardingStateFactory::clear(getServiceContext()); @@ -334,7 +346,7 @@ void ShardingMongodTestFixture::setupOpObservers() { auto opObserverRegistry = checked_cast(getServiceContext()->getOpObserver()); opObserverRegistry->addObserver( - std::make_unique(std::make_unique())); + std::make_unique(std::make_unique())); opObserverRegistry->addObserver(std::make_unique()); } diff --git a/src/mongo/db/s/sharding_mongod_test_fixture.h b/src/mongo/db/s/sharding_mongod_test_fixture.h index 16b208ba1234d..91573c8c7f3ae 100644 --- a/src/mongo/db/s/sharding_mongod_test_fixture.h +++ b/src/mongo/db/s/sharding_mongod_test_fixture.h @@ -29,9 +29,24 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/client/connection_string.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/executor/task_executor.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/grid.h" #include "mongo/s/sharding_test_fixture_common.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/db/s/sharding_recovery_service.cpp b/src/mongo/db/s/sharding_recovery_service.cpp index 0187558302f61..095a1ba796361 100644 --- a/src/mongo/db/s/sharding_recovery_service.cpp +++ b/src/mongo/db/s/sharding_recovery_service.cpp @@ -29,26 +29,69 @@ #include +#include #include - -#include "mongo/db/s/sharding_recovery_service.h" - -#include "mongo/db/db_raii.h" +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/document_source_lookup.h" #include "mongo/db/pipeline/document_source_match.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_critical_section_document_gen.h" #include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/shard_authoritative_catalog_gen.h" -#include "mongo/db/s/sharding_migration_critical_section.h" -#include "mongo/db/server_options.h" +#include "mongo/db/s/sharding_recovery_service.h" +#include "mongo/db/write_concern.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" #include "mongo/s/catalog/type_collection.h" -#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/s/catalog/type_collection_gen.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -144,8 +187,7 @@ void ShardingRecoveryService::acquireRecoverableCriticalSectionBlockWrites( OperationContext* opCtx, const NamespaceString& nss, const BSONObj& reason, - const WriteConcernOptions& writeConcern, - bool allowViews) { + const WriteConcernOptions& writeConcern) { LOGV2_DEBUG(5656600, 3, "Acquiring recoverable critical section blocking writes", @@ -156,7 +198,7 @@ void ShardingRecoveryService::acquireRecoverableCriticalSectionBlockWrites( tassert(7032360, fmt::format("Can't acquire recoverable critical section for collection '{}' with " "reason '{}' while holding locks", - nss.toString(), + nss.toStringForErrorMsg(), reason.toString()), !opCtx->lockState()->isLocked()); @@ -164,23 +206,20 @@ void ShardingRecoveryService::acquireRecoverableCriticalSectionBlockWrites( Lock::GlobalLock lk(opCtx, MODE_IX); boost::optional dbLock; boost::optional collLock; - if (nsIsDbOnly(nss.ns())) { + if (nsIsDbOnly(NamespaceStringUtil::serialize(nss))) { dbLock.emplace(opCtx, nss.dbName(), MODE_S); } else { - // TODO SERVER-68084 add the AutoGetCollectionViewMode::kViewsPermitted parameter to - // construct collLock. collLock.emplace(opCtx, nss, MODE_S, - (allowViews ? AutoGetCollection::Options{}.viewMode( - auto_get_collection::ViewMode::kViewsPermitted) - : AutoGetCollection::Options{})); + AutoGetCollection::Options{}.viewMode( + auto_get_collection::ViewMode::kViewsPermitted)); } DBDirectClient dbClient(opCtx); FindCommandRequest findRequest{NamespaceString::kCollectionCriticalSectionsNamespace}; - findRequest.setFilter( - BSON(CollectionCriticalSectionDocument::kNssFieldName << nss.toString())); + findRequest.setFilter(BSON(CollectionCriticalSectionDocument::kNssFieldName + << NamespaceStringUtil::serialize(nss))); auto cursor = dbClient.find(std::move(findRequest)); // if there is a doc with the same nss -> in order to not fail it must have the same @@ -194,7 +233,7 @@ void ShardingRecoveryService::acquireRecoverableCriticalSectionBlockWrites( fmt::format("Trying to acquire a critical section blocking writes for " "namespace '{}' and reason '{}' but it is already taken by another " "operation with different reason '{}'", - nss.toString(), + nss.toStringForErrorMsg(), reason.toString(), collCSDoc.getReason().toString()), collCSDoc.getReason().woCompare(reason) == 0); @@ -233,11 +272,12 @@ void ShardingRecoveryService::acquireRecoverableCriticalSectionBlockWrites( std::string unusedErrmsg; batchedResponse.parseBSON(commandReply, &unusedErrmsg); tassert(7032369, - fmt::format("Insert did not add any doc to collection '{}' for namespace '{}' " - "and reason '{}'", - nss.toString(), - reason.toString(), - NamespaceString::kCollectionCriticalSectionsNamespace.toString()), + fmt::format( + "Insert did not add any doc to collection '{}' for namespace '{}' " + "and reason '{}'", + nss.toStringForErrorMsg(), + reason.toString(), + NamespaceString::kCollectionCriticalSectionsNamespace.toStringForErrorMsg()), batchedResponse.getN() > 0); } @@ -257,8 +297,7 @@ void ShardingRecoveryService::promoteRecoverableCriticalSectionToBlockAlsoReads( OperationContext* opCtx, const NamespaceString& nss, const BSONObj& reason, - const WriteConcernOptions& writeConcern, - bool allowViews) { + const WriteConcernOptions& writeConcern) { LOGV2_DEBUG(5656603, 3, "Promoting recoverable critical section to also block reads", @@ -269,37 +308,34 @@ void ShardingRecoveryService::promoteRecoverableCriticalSectionToBlockAlsoReads( tassert(7032364, fmt::format("Can't promote recoverable critical section for collection '{}' with " "reason '{}' while holding locks", - nss.toString(), + nss.toStringForErrorMsg(), reason.toString()), !opCtx->lockState()->isLocked()); { boost::optional dbLock; boost::optional collLock; - if (nsIsDbOnly(nss.ns())) { + if (nsIsDbOnly(NamespaceStringUtil::serialize(nss))) { dbLock.emplace(opCtx, nss.dbName(), MODE_X); } else { - // TODO SERVER-68084 add the AutoGetCollectionViewMode::kViewsPermitted parameter to - // construct collLock. collLock.emplace(opCtx, nss, MODE_X, - (allowViews ? AutoGetCollection::Options{}.viewMode( - auto_get_collection::ViewMode::kViewsPermitted) - : AutoGetCollection::Options{})); + AutoGetCollection::Options{}.viewMode( + auto_get_collection::ViewMode::kViewsPermitted)); } DBDirectClient dbClient(opCtx); FindCommandRequest findRequest{NamespaceString::kCollectionCriticalSectionsNamespace}; - findRequest.setFilter( - BSON(CollectionCriticalSectionDocument::kNssFieldName << nss.toString())); + findRequest.setFilter(BSON(CollectionCriticalSectionDocument::kNssFieldName + << NamespaceStringUtil::serialize(nss))); auto cursor = dbClient.find(std::move(findRequest)); tassert(7032361, fmt::format( "Trying to acquire a critical section blocking reads for namespace '{}' and " "reason '{}' but the critical section wasn't acquired first blocking writers.", - nss.toString(), + nss.toStringForErrorMsg(), reason.toString()), cursor->more()); BSONObj bsonObj = cursor->next(); @@ -311,7 +347,7 @@ void ShardingRecoveryService::promoteRecoverableCriticalSectionToBlockAlsoReads( "Trying to acquire a critical section blocking reads for namespace '{}' and " "reason " "'{}' but it is already taken by another operation with different reason '{}'", - nss.toString(), + nss.toStringForErrorMsg(), reason.toString(), collCSDoc.getReason().toString()), collCSDoc.getReason().woCompare(reason) == 0); @@ -338,9 +374,10 @@ void ShardingRecoveryService::promoteRecoverableCriticalSectionToBlockAlsoReads( // - Otherwise this call will fail and the CS won't be advanced (neither persisted nor // in-mem) auto commandResponse = dbClient.runCommand([&] { - const auto query = BSON( - CollectionCriticalSectionDocument::kNssFieldName - << nss.toString() << CollectionCriticalSectionDocument::kReasonFieldName << reason); + const auto query = + BSON(CollectionCriticalSectionDocument::kNssFieldName + << NamespaceStringUtil::serialize(nss) + << CollectionCriticalSectionDocument::kReasonFieldName << reason); const auto update = BSON( "$set" << BSON(CollectionCriticalSectionDocument::kBlockReadsFieldName << true)); @@ -359,13 +396,14 @@ void ShardingRecoveryService::promoteRecoverableCriticalSectionToBlockAlsoReads( BatchedCommandResponse batchedResponse; std::string unusedErrmsg; batchedResponse.parseBSON(commandReply, &unusedErrmsg); - tassert(7032363, - fmt::format("Update did not modify any doc from collection '{}' for namespace '{}' " - "and reason '{}'", - NamespaceString::kCollectionCriticalSectionsNamespace.toString(), - nss.toString(), - reason.toString()), - batchedResponse.getNModified() > 0); + tassert( + 7032363, + fmt::format("Update did not modify any doc from collection '{}' for namespace '{}' " + "and reason '{}'", + NamespaceString::kCollectionCriticalSectionsNamespace.toStringForErrorMsg(), + nss.toStringForErrorMsg(), + reason.toString()), + batchedResponse.getNModified() > 0); } WriteConcernResult ignoreResult; @@ -385,8 +423,7 @@ void ShardingRecoveryService::releaseRecoverableCriticalSection( const NamespaceString& nss, const BSONObj& reason, const WriteConcernOptions& writeConcern, - bool throwIfReasonDiffers, - bool allowViews) { + bool throwIfReasonDiffers) { LOGV2_DEBUG(5656606, 3, "Releasing recoverable critical section", @@ -397,30 +434,27 @@ void ShardingRecoveryService::releaseRecoverableCriticalSection( tassert(7032365, fmt::format("Can't release recoverable critical section for collection '{}' with " "reason '{}' while holding locks", - nss.toString(), + nss.toStringForErrorMsg(), reason.toString()), !opCtx->lockState()->isLocked()); { boost::optional dbLock; boost::optional collLock; - if (nsIsDbOnly(nss.ns())) { + if (nsIsDbOnly(NamespaceStringUtil::serialize(nss))) { dbLock.emplace(opCtx, nss.dbName(), MODE_X); } else { - // TODO SERVER-68084 add the AutoGetCollectionViewMode::kViewsPermitted parameter to - // construct collLock. collLock.emplace(opCtx, nss, MODE_X, - (allowViews ? AutoGetCollection::Options{}.viewMode( - auto_get_collection::ViewMode::kViewsPermitted) - : AutoGetCollection::Options{})); + AutoGetCollection::Options{}.viewMode( + auto_get_collection::ViewMode::kViewsPermitted)); } DBDirectClient dbClient(opCtx); - const auto queryNss = - BSON(CollectionCriticalSectionDocument::kNssFieldName << nss.toString()); + const auto queryNss = BSON(CollectionCriticalSectionDocument::kNssFieldName + << NamespaceStringUtil::serialize(nss)); FindCommandRequest findRequest{NamespaceString::kCollectionCriticalSectionsNamespace}; findRequest.setFilter(queryNss); auto cursor = dbClient.find(std::move(findRequest)); @@ -456,7 +490,7 @@ void ShardingRecoveryService::releaseRecoverableCriticalSection( tassert(7032366, fmt::format("Trying to release a critical for namespace '{}' and reason '{}' but " "it is already taken by another operation with different reason '{}'", - nss.toString(), + nss.toStringForErrorMsg(), reason.toString(), collCSDoc.getReason().toString()), !isDifferentReason); @@ -489,13 +523,14 @@ void ShardingRecoveryService::releaseRecoverableCriticalSection( BatchedCommandResponse batchedResponse; std::string unusedErrmsg; batchedResponse.parseBSON(commandReply, &unusedErrmsg); - tassert(7032367, - fmt::format("Delete did not remove any doc from collection '{}' for namespace '{}' " - "and reason '{}'", - NamespaceString::kCollectionCriticalSectionsNamespace.toString(), - nss.toString(), - reason.toString()), - batchedResponse.getN() > 0); + tassert( + 7032367, + fmt::format("Delete did not remove any doc from collection '{}' for namespace '{}' " + "and reason '{}'", + NamespaceString::kCollectionCriticalSectionsNamespace.toStringForErrorMsg(), + nss.toStringForErrorMsg(), + reason.toString()), + batchedResponse.getN() > 0); } WriteConcernResult ignoreResult; @@ -536,7 +571,7 @@ void ShardingRecoveryService::recoverRecoverableCriticalSections(OperationContex store.forEach(opCtx, BSONObj{}, [&opCtx](const CollectionCriticalSectionDocument& doc) { const auto& nss = doc.getNss(); { - if (nsIsDbOnly(nss.ns())) { + if (nsIsDbOnly(NamespaceStringUtil::serialize(nss))) { AutoGetDb dbLock(opCtx, nss.dbName(), MODE_X); auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx, nss.dbName()); diff --git a/src/mongo/db/s/sharding_recovery_service.h b/src/mongo/db/s/sharding_recovery_service.h index 3ba7a40f6e992..02744cbe1ee2d 100644 --- a/src/mongo/db/s/sharding_recovery_service.h +++ b/src/mongo/db/s/sharding_recovery_service.h @@ -29,6 +29,10 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/replica_set_aware_service.h" @@ -63,18 +67,11 @@ class ShardingRecoveryService : public ReplicaSetAwareServiceShardSvr +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/database_name.h" +#include "mongo/db/commands/create_gen.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_critical_section_document_gen.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/shard_server_op_observer.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/db/s/sharding_migration_critical_section.h" #include "mongo/db/s/sharding_recovery_service.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { @@ -49,11 +74,12 @@ class ShardingRecoveryServiceTest : public ShardServerTestFixture { inline static const NamespaceString collNss = NamespaceString::createNamespaceString_forTest("TestDB", "TestCollection"); inline static const BSONObj collOpReason = - BSON("Dummy operation on collection" << collNss.ns()); + BSON("Dummy operation on collection" << collNss.ns_forTest()); inline static const NamespaceString dbName = NamespaceString::createNamespaceString_forTest("TestDB"); - inline static const BSONObj dbOpReason = BSON("Dummy operation on database" << dbName.ns()); + inline static const BSONObj dbOpReason = + BSON("Dummy operation on database" << dbName.ns_forTest()); inline static const BSONObj differentOpReason = BSON("Yet another dummy operation" << true); @@ -73,7 +99,8 @@ class ShardingRecoveryServiceTest : public ShardServerTestFixture { boost::optional readCriticalSectionDocument( const NamespaceString& nss, const BSONObj& reason) { FindCommandRequest findOp(NamespaceString::kCollectionCriticalSectionsNamespace); - findOp.setFilter(BSON(CollectionCriticalSectionDocument::kNssFieldName << nss.toString())); + findOp.setFilter( + BSON(CollectionCriticalSectionDocument::kNssFieldName << nss.toString_forTest())); DBDirectClient dbClient(opCtx()); auto cursor = dbClient.find(std::move(findOp)); @@ -109,10 +136,11 @@ class ShardingRecoveryServiceTest : public ShardServerTestFixture { ASSERT_NE(doc->getBlockReads(), blockReads); DBDirectClient dbClient(opCtx()); - dbClient.update(NamespaceString::kCollectionCriticalSectionsNamespace, - BSON(CollectionCriticalSectionDocument::kNssFieldName << nss.toString()), - BSON("$set" << BSON(CollectionCriticalSectionDocument::kBlockReadsFieldName - << blockReads))); + dbClient.update( + NamespaceString::kCollectionCriticalSectionsNamespace, + BSON(CollectionCriticalSectionDocument::kNssFieldName << nss.toString_forTest()), + BSON("$set" << BSON(CollectionCriticalSectionDocument::kBlockReadsFieldName + << blockReads))); } void deleteReadCriticalSectionDocument(const NamespaceString& nss, const BSONObj& reason) { @@ -120,13 +148,14 @@ class ShardingRecoveryServiceTest : public ShardServerTestFixture { ASSERT(readCriticalSectionDocument(nss, reason)); DBDirectClient dbClient(opCtx()); - dbClient.remove(NamespaceString::kCollectionCriticalSectionsNamespace, - BSON(CollectionCriticalSectionDocument::kNssFieldName << nss.toString()), - false /* removeMany */); + dbClient.remove( + NamespaceString::kCollectionCriticalSectionsNamespace, + BSON(CollectionCriticalSectionDocument::kNssFieldName << nss.toString_forTest()), + false /* removeMany */); } void assertCriticalSectionCatchUpEnteredInMemory(const NamespaceString& nss) { - if (nsIsDbOnly(nss.ns())) { + if (nsIsDbOnly(nss.ns_forTest())) { AutoGetDb db(opCtx(), nss.dbName(), MODE_IS); const auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireShared(opCtx(), nss.dbName()); @@ -144,7 +173,7 @@ class ShardingRecoveryServiceTest : public ShardServerTestFixture { } void assertCriticalSectionCommitEnteredInMemory(const NamespaceString& nss) { - if (nsIsDbOnly(nss.ns())) { + if (nsIsDbOnly(nss.ns_forTest())) { AutoGetDb db(opCtx(), nss.dbName(), MODE_IS); const auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireShared(opCtx(), nss.dbName()); @@ -161,7 +190,7 @@ class ShardingRecoveryServiceTest : public ShardServerTestFixture { } void assertCriticalSectionLeftInMemory(const NamespaceString& nss) { - if (nsIsDbOnly(nss.ns())) { + if (nsIsDbOnly(nss.ns_forTest())) { AutoGetDb db(opCtx(), nss.dbName(), MODE_IS); const auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireShared(opCtx(), nss.dbName()); @@ -618,8 +647,9 @@ TEST_F(ShardingRecoveryServiceTestOnSecondary, BlockAndUnblockOperationsOnDataba { WriteUnitOfWork wuow(opCtx()); AutoGetDb db(opCtx(), dbName.dbName(), MODE_IX); - opObserver().aboutToDelete(opCtx(), criticalSectionColl(), doc.toBSON()); - opObserver().onDelete(opCtx(), criticalSectionColl(), kUninitializedStmtId, {}); + OplogDeleteEntryArgs args; + opObserver().aboutToDelete(opCtx(), criticalSectionColl(), doc.toBSON(), &args); + opObserver().onDelete(opCtx(), criticalSectionColl(), kUninitializedStmtId, args); wuow.commit(); } @@ -686,8 +716,9 @@ TEST_F(ShardingRecoveryServiceTestOnSecondary, BlockAndUnblockOperationsOnCollec { WriteUnitOfWork wuow(opCtx()); AutoGetCollection coll(opCtx(), collNss, MODE_IX); - opObserver().aboutToDelete(opCtx(), criticalSectionColl(), doc.toBSON()); - opObserver().onDelete(opCtx(), criticalSectionColl(), kUninitializedStmtId, {}); + OplogDeleteEntryArgs args; + opObserver().aboutToDelete(opCtx(), criticalSectionColl(), doc.toBSON(), &args); + opObserver().onDelete(opCtx(), criticalSectionColl(), kUninitializedStmtId, args); wuow.commit(); } diff --git a/src/mongo/db/s/sharding_runtime_d_params.idl b/src/mongo/db/s/sharding_runtime_d_params.idl index af5722d6f6f27..424774e06e40b 100644 --- a/src/mongo/db/s/sharding_runtime_d_params.idl +++ b/src/mongo/db/s/sharding_runtime_d_params.idl @@ -68,6 +68,16 @@ server_parameters: gte: 0 default: 20 + rangeDeleterHighPriority: + description: + "By default, this parameter is set to 'false' meaning that range deletions have a + lower priority than user operations. Setting this parameter to true prioritizes + range deletions over user operations." + set_at: [ startup, runtime ] + cpp_vartype: AtomicWord + cpp_varname: rangeDeleterHighPriority + default: false + receiveChunkWaitForRangeDeleterTimeoutMS: description: >- Amount of time in milliseconds an incoming migration will wait for an intersecting range @@ -116,8 +126,8 @@ server_parameters: The maximum percentage of untrasferred chunk mods at the end of a catch up iteration that may be deferred to the next phase of the migration protocol (where new writes get blocked). - set_at: [startup] - cpp_vartype: int + set_at: [startup, runtime] + cpp_vartype: AtomicWord cpp_varname: maxCatchUpPercentageBeforeBlockingWrites validator: gte: 0 diff --git a/src/mongo/db/s/sharding_server_status.cpp b/src/mongo/db/s/sharding_server_status.cpp index 51ade66b553c5..6121f40ea9e13 100644 --- a/src/mongo/db/s/sharding_server_status.cpp +++ b/src/mongo/db/s/sharding_server_status.cpp @@ -27,26 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands/server_status.h" #include "mongo/db/db_raii.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/s/active_migrations_registry.h" -#include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/metrics/sharding_data_transform_cumulative_metrics.h" #include "mongo/db/s/range_deleter_service.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/sharding_statistics.h" #include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/server_options.h" #include "mongo/db/vector_clock.h" #include "mongo/s/balancer_configuration.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/is_mongos.h" -#include "mongo/s/resharding/resharding_feature_flag_gen.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -128,20 +139,14 @@ class ShardingStatisticsServerStatus final : public ServerStatusSection { ShardingStatistics::get(opCtx).report(&result); catalogCache->report(&result); - // (Ignore FCV check): This feature doesn't have any upgrade/downgrade concerns. The - // feature flag is used to turn on new range deleter on startup. - if (mongo::feature_flags::gRangeDeleterService.isEnabledAndIgnoreFCVUnsafe()) { - auto nRangeDeletions = [&]() { - try { - return RangeDeleterService::get(opCtx)->totalNumOfRegisteredTasks(); - } catch (const ExceptionFor&) { - return 0LL; - } - }(); - result.appendNumber("rangeDeleterTasks", nRangeDeletions); - } - - CollectionShardingState::appendInfoForServerStatus(opCtx, &result); + auto nRangeDeletions = [&]() { + try { + return RangeDeleterService::get(opCtx)->totalNumOfRegisteredTasks(); + } catch (const ExceptionFor&) { + return 0LL; + } + }(); + result.appendNumber("rangeDeleterTasks", nRangeDeletions); } // To calculate the number of sharded collection we simply get the number of records from @@ -162,12 +167,10 @@ class ShardingStatisticsServerStatus final : public ServerStatusSection { void reportDataTransformMetrics(OperationContext* opCtx, BSONObjBuilder* bob) const { auto sCtx = opCtx->getServiceContext(); using Metrics = ShardingDataTransformCumulativeMetrics; + Metrics::getForResharding(sCtx)->reportForServerStatus(bob); // The serverStatus command is run before the FCV is initialized so we ignore it when - // checking whether the resharding and global index features are enabled here. - if (resharding::gFeatureFlagResharding.isEnabledAndIgnoreFCVUnsafeAtStartup()) { - Metrics::getForResharding(sCtx)->reportForServerStatus(bob); - } + // checking whether the global index feature is enabled here. if (gFeatureFlagGlobalIndexes.isEnabledAndIgnoreFCVUnsafeAtStartup()) { Metrics::getForGlobalIndexes(sCtx)->reportForServerStatus(bob); } diff --git a/src/mongo/db/s/sharding_state.cpp b/src/mongo/db/s/sharding_state.cpp index 77bb96d16f0e5..1bcfea3c34d5f 100644 --- a/src/mongo/db/s/sharding_state.cpp +++ b/src/mongo/db/s/sharding_state.cpp @@ -28,13 +28,22 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/s/sharding_state.h" +#include +#include "mongo/db/cluster_role.h" #include "mongo/db/operation_context.h" +#include "mongo/db/s/sharding_state.h" #include "mongo/db/server_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/sharding_state.h b/src/mongo/db/s/sharding_state.h index e6d477bbc074b..e4d4b1f731278 100644 --- a/src/mongo/db/s/sharding_state.h +++ b/src/mongo/db/s/sharding_state.h @@ -29,11 +29,17 @@ #pragma once +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/bson/oid.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/shard_id.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" namespace mongo { diff --git a/src/mongo/db/s/sharding_state_command.cpp b/src/mongo/db/s/sharding_state_command.cpp index e2e52c5efc511..43d5937c67304 100644 --- a/src/mongo/db/s/sharding_state_command.cpp +++ b/src/mongo/db/s/sharding_state_command.cpp @@ -28,15 +28,23 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/auth/action_set.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -62,11 +70,12 @@ class ShardingStateCmd : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::shardingState)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::shardingState)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/db/s/sharding_state_recovery.cpp b/src/mongo/db/s/sharding_state_recovery.cpp deleted file mode 100644 index 0a73b4a91a38c..0000000000000 --- a/src/mongo/db/s/sharding_state_recovery.cpp +++ /dev/null @@ -1,293 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - - -#include "mongo/platform/basic.h" - -#include "mongo/db/s/sharding_state_recovery.h" - -#include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/client/connection_string.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/dbhelpers.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/operation_context.h" -#include "mongo/db/ops/update.h" -#include "mongo/db/ops/update_request.h" -#include "mongo/db/repl/bson_extract_optime.h" -#include "mongo/db/repl/optime.h" -#include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/s/sharding_logging.h" -#include "mongo/db/s/sharding_state.h" -#include "mongo/db/vector_clock_mutable.h" -#include "mongo/db/write_concern.h" -#include "mongo/db/write_concern_options.h" -#include "mongo/logv2/log.h" -#include "mongo/s/client/shard_registry.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding - - -namespace mongo { -namespace { - -const char kRecoveryDocumentId[] = "minOpTimeRecovery"; -const char kMinOpTime[] = "minOpTime"; -const char kMinOpTimeUpdaters[] = "minOpTimeUpdaters"; - -const WriteConcernOptions kMajorityWriteConcern(WriteConcernOptions::kMajority, - WriteConcernOptions::SyncMode::UNSET, - WriteConcernOptions::kWriteConcernTimeoutSharding); - -const WriteConcernOptions kLocalWriteConcern(1, - WriteConcernOptions::SyncMode::UNSET, - Milliseconds(0)); - -/** - * Encapsulates the parsing and construction of the config server min opTime recovery document. - */ -class RecoveryDocument { -public: - enum ChangeType : int8_t { Increment = 1, Decrement = -1, Clear = 0 }; - - static StatusWith fromBSON(const BSONObj& obj) { - RecoveryDocument recDoc; - - Status status = bsonExtractOpTimeField(obj, kMinOpTime, &recDoc._minOpTime); - if (!status.isOK()) - return status; - - status = bsonExtractIntegerField(obj, kMinOpTimeUpdaters, &recDoc._minOpTimeUpdaters); - if (!status.isOK()) - return status; - - return recDoc; - } - - static BSONObj createChangeObj(repl::OpTime minOpTime, ChangeType change) { - BSONObjBuilder cmdBuilder; - - { - BSONObjBuilder setBuilder(cmdBuilder.subobjStart("$set")); - minOpTime.append(&setBuilder, kMinOpTime); - } - - if (change == Clear) { - cmdBuilder.append("$set", BSON(kMinOpTimeUpdaters << 0)); - } else { - cmdBuilder.append("$inc", BSON(kMinOpTimeUpdaters << change)); - } - - return cmdBuilder.obj(); - } - - static BSONObj getQuery() { - return BSON("_id" << kRecoveryDocumentId); - } - - BSONObj toBSON() const { - BSONObjBuilder builder; - builder.append("_id", kRecoveryDocumentId); - builder.append(kMinOpTime, _minOpTime.toBSON()); - builder.append(kMinOpTimeUpdaters, _minOpTimeUpdaters); - - return builder.obj(); - } - - repl::OpTime getMinOpTime() const { - return _minOpTime; - } - - int64_t getMinOpTimeUpdaters() const { - return _minOpTimeUpdaters; - } - -private: - RecoveryDocument() : _minOpTimeUpdaters{0} {} - repl::OpTime _minOpTime; - long long _minOpTimeUpdaters; -}; - -/** - * This method is the main entry point for updating the sharding state recovery document. The goal - * it has is to always move the opTime forward for a currently running server. It achieves this by - * serializing the modify calls and reading the current opTime under X-lock on the admin database. - */ -Status modifyRecoveryDocument(OperationContext* opCtx, - RecoveryDocument::ChangeType change, - const WriteConcernOptions& writeConcern) { - try { - // Use boost::optional so we can release the locks early - boost::optional autoGetDb; - autoGetDb.emplace(opCtx, NamespaceString::kServerConfigurationNamespace.db(), MODE_X); - - const auto configOpTime = [&]() { - const auto vcTime = VectorClock::get(opCtx)->getTime(); - const auto vcConfigTimeTs = vcTime.configTime().asTimestamp(); - return mongo::repl::OpTime(vcConfigTimeTs, mongo::repl::OpTime::kUninitializedTerm); - }(); - - BSONObj updateObj = RecoveryDocument::createChangeObj(configOpTime, change); - - LOGV2_DEBUG(22083, - 1, - "Changing sharding recovery document {update}", - "Changing sharding recovery document", - "update"_attr = redact(updateObj)); - - auto updateReq = UpdateRequest(); - updateReq.setNamespaceString(NamespaceString::kServerConfigurationNamespace); - updateReq.setQuery(RecoveryDocument::getQuery()); - updateReq.setUpdateModification( - write_ops::UpdateModification::parseFromClassicUpdate(updateObj)); - updateReq.setUpsert(); - - UpdateResult result = update(opCtx, autoGetDb->ensureDbExists(opCtx), updateReq); - invariant(result.numDocsModified == 1 || !result.upsertedId.isEmpty()); - invariant(result.numMatched <= 1); - - // Wait until the majority write concern has been satisfied, but do it outside of lock - autoGetDb = boost::none; - - WriteConcernResult writeConcernResult; - return waitForWriteConcern(opCtx, - repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(), - writeConcern, - &writeConcernResult); - } catch (const DBException& ex) { - return ex.toStatus(); - } -} - -} // namespace - -Status ShardingStateRecovery_DEPRECATED::startMetadataOp(OperationContext* opCtx) { - Status upsertStatus = - modifyRecoveryDocument(opCtx, RecoveryDocument::Increment, kMajorityWriteConcern); - - if (upsertStatus == ErrorCodes::WriteConcernFailed) { - // Couldn't wait for the replication to complete, but the local write was performed. Clear - // it up fast (without any waiting for journal or replication) and still treat it as - // failure. - modifyRecoveryDocument(opCtx, RecoveryDocument::Decrement, WriteConcernOptions()) - .transitional_ignore(); - } - - return upsertStatus; -} - -void ShardingStateRecovery_DEPRECATED::endMetadataOp(OperationContext* opCtx) { - Status status = - modifyRecoveryDocument(opCtx, RecoveryDocument::Decrement, WriteConcernOptions()); - if (!status.isOK()) { - LOGV2_WARNING(22088, - "Failed to decrement minOpTimeUpdaters due to {error}", - "Failed to decrement minOpTimeUpdaters", - "error"_attr = redact(status)); - } -} - -Status ShardingStateRecovery_DEPRECATED::recover(OperationContext* opCtx) { - ShardingState* const shardingState = ShardingState::get(opCtx); - invariant(shardingState->enabled()); - - BSONObj recoveryDocBSON; - - try { - AutoGetCollection autoColl(opCtx, NamespaceString::kServerConfigurationNamespace, MODE_IS); - if (!Helpers::findOne( - opCtx, autoColl.getCollection(), RecoveryDocument::getQuery(), recoveryDocBSON)) { - return Status::OK(); - } - } catch (const DBException& ex) { - return ex.toStatus(); - } - - const auto recoveryDocStatus = RecoveryDocument::fromBSON(recoveryDocBSON); - if (!recoveryDocStatus.isOK()) - return recoveryDocStatus.getStatus(); - - const auto recoveryDoc = std::move(recoveryDocStatus.getValue()); - - LOGV2(22084, - "Sharding state recovery process found document {recoveryDoc}", - "Sharding state recovery process found document", - "recoveryDoc"_attr = redact(recoveryDoc.toBSON())); - - if (!recoveryDoc.getMinOpTimeUpdaters()) { - LogicalTime minOpTime{recoveryDoc.getMinOpTime().getTimestamp()}; - VectorClockMutable::get(opCtx)->tickClusterTimeTo(minOpTime); - VectorClockMutable::get(opCtx)->tickConfigTimeTo(minOpTime); - return Status::OK(); - } - - LOGV2( - 22086, - "Sharding state recovery document indicates there were {inProgressMetadataOperationCount} " - "metadata change operations in flight. Contacting the config server primary in order " - "to retrieve the most recent opTime.", - "Sharding state recovery document indicates there were metadata change operations in " - "flight. Contacting the config server primary in order to retrieve the most recent opTime", - "inProgressMetadataOperationCount"_attr = recoveryDoc.getMinOpTimeUpdaters()); - - // Need to fetch the latest uptime from the config server, so do a logging write. - // - // If this node is the config server, we skip the write because we may be in stepup and cannot - // perform majority writes. The write isn't required in this case, since the node must be in a - // recent enough version where configTime guarantees are maintained via the vector clock. - Status status = Status::OK(); - if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { - status = ShardingLogging::get(opCtx)->logChangeChecked( - opCtx, - "Sharding minOpTime recovery", - NamespaceString::kServerConfigurationNamespace.ns(), - recoveryDocBSON, - ShardingCatalogClient::kMajorityWriteConcern); - if (!status.isOK()) - return status; - } - - LOGV2(22087, "Sharding state recovered"); - - // Finally, clear the recovery document so next time we don't need to recover - status = modifyRecoveryDocument(opCtx, RecoveryDocument::Clear, kLocalWriteConcern); - if (!status.isOK()) { - LOGV2_WARNING(22089, - "Failed to reset sharding state recovery document due to {error}", - "Failed to reset sharding state recovery document", - "error"_attr = redact(status)); - } - - return Status::OK(); -} - - -} // namespace mongo diff --git a/src/mongo/db/s/sharding_state_recovery.h b/src/mongo/db/s/sharding_state_recovery.h deleted file mode 100644 index 4049b66b229c3..0000000000000 --- a/src/mongo/db/s/sharding_state_recovery.h +++ /dev/null @@ -1,80 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -namespace mongo { - -class OperationContext; -class Status; - -/** - * Manages the persistence and recovery of the sharding config metadata's min opTime. - * - * The opTime recovery document resides in the admin.system.version collection and has the - * following format: - * - * { _id: "minOpTimeRecovery", - * minOpTime: { ts: Timestamp 1443820968000|1, t: 11 }, - * minOptimeUpdaters: 1 } - * - * This class is flagged as deprecated because the persist/recovery of the config - * time on `[start|end]MetadataOp` is managed via vector clock starting from v6.2. - * - * TODO (SERVER-60110): Remove once 7.0 becomes last LTS. - */ -class ShardingStateRecovery_DEPRECATED { -public: - /** - * Marks the beginning of a sharding metadata operation which requires recovery of the config - * server's minOpTime after node failure. It is only safe to commence the operation after this - * method returns an OK status. - */ - static Status startMetadataOp(OperationContext* opCtx); - - /** - * Marks the end of a sharding metadata operation, persisting the latest config server opTime at - * the time of the call. - */ - static void endMetadataOp(OperationContext* opCtx); - - /** - * Recovers the minimal config server opTime that the instance should be using for reading - * sharding metadata so that the instance observes all metadata modifications it did the last - * time it was active (or PRIMARY, if replica set). - * - * NOTE: This method will block until recovery completes. - * - * Returns OK if the minOpTime was successfully recovered or failure status otherwise. It is - * unsafe to read and rely on any sharding metadata before this method has returned success. - */ - static Status recover(OperationContext* opCtx); -}; - -} // namespace mongo diff --git a/src/mongo/db/s/sharding_statistics.cpp b/src/mongo/db/s/sharding_statistics.cpp index 4c3d54963a2a0..982231cbed5ab 100644 --- a/src/mongo/db/s/sharding_statistics.cpp +++ b/src/mongo/db/s/sharding_statistics.cpp @@ -29,11 +29,14 @@ #include "mongo/db/s/sharding_statistics.h" +#include + #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/feature_flag.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { @@ -54,6 +57,9 @@ void ShardingStatistics::report(BSONObjBuilder* builder) const { builder->append("countStaleConfigErrors", countStaleConfigErrors.load()); builder->append("countDonorMoveChunkStarted", countDonorMoveChunkStarted.load()); + builder->append("countDonorMoveChunkCommitted", countDonorMoveChunkCommitted.load()); + builder->append("countDonorMoveChunkAborted", countDonorMoveChunkAborted.load()); + builder->append("totalDonorMoveChunkTimeMillis", totalDonorMoveChunkTimeMillis.load()); builder->append("totalDonorChunkCloneTimeMillis", totalDonorChunkCloneTimeMillis.load()); builder->append("totalCriticalSectionCommitTimeMillis", totalCriticalSectionCommitTimeMillis.load()); @@ -61,9 +67,15 @@ void ShardingStatistics::report(BSONObjBuilder* builder) const { builder->append("totalRecipientCriticalSectionTimeMillis", totalRecipientCriticalSectionTimeMillis.load()); builder->append("countDocsClonedOnRecipient", countDocsClonedOnRecipient.load()); + builder->append("countBytesClonedOnRecipient", countBytesClonedOnRecipient.load()); + builder->append("countDocsClonedOnCatchUpOnRecipient", + countDocsClonedOnCatchUpOnRecipient.load()); + builder->append("countBytesClonedOnCatchUpOnRecipient", + countBytesClonedOnCatchUpOnRecipient.load()); builder->append("countDocsClonedOnDonor", countDocsClonedOnDonor.load()); + builder->append("countBytesClonedOnDonor", countBytesClonedOnDonor.load()); builder->append("countRecipientMoveChunkStarted", countRecipientMoveChunkStarted.load()); - builder->append("countDocsDeletedOnDonor", countDocsDeletedOnDonor.load()); + builder->append("countDocsDeletedByRangeDeleter", countDocsDeletedByRangeDeleter.load()); builder->append("countDonorMoveChunkLockTimeout", countDonorMoveChunkLockTimeout.load()); builder->append("countDonorMoveChunkAbortConflictingIndexOperation", countDonorMoveChunkAbortConflictingIndexOperation.load()); @@ -72,6 +84,12 @@ void ShardingStatistics::report(BSONObjBuilder* builder) const { // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. if (mongo::feature_flags::gConcurrencyInChunkMigration.isEnabledAndIgnoreFCVUnsafe()) builder->append("chunkMigrationConcurrency", chunkMigrationConcurrencyCnt.load()); + // The serverStatus command is run before the FCV is initialized so we ignore it when + // checking whether the direct shard operations feature flag is enabled. + if (mongo::feature_flags::gCheckForDirectShardOperations + .isEnabledAndIgnoreFCVUnsafeAtStartup()) { + builder->append("unauthorizedDirectShardOps", unauthorizedDirectShardOperations.load()); + } } } // namespace mongo diff --git a/src/mongo/db/s/sharding_statistics.h b/src/mongo/db/s/sharding_statistics.h index 91a10cc430839..541b6df1bd0dc 100644 --- a/src/mongo/db/s/sharding_statistics.h +++ b/src/mongo/db/s/sharding_statistics.h @@ -49,6 +49,12 @@ struct ShardingStatistics { // (whether they succeeded or not). AtomicWord countDonorMoveChunkStarted{0}; + // Cumulative, always-increasing counter of how many chunks this node successfully committed. + AtomicWord countDonorMoveChunkCommitted{0}; + + // Cumulative, always-increasing counter of how many move chunks this node aborted. + AtomicWord countDonorMoveChunkAborted{0}; + // Cumulative, always-increasing counter of how much time the entire move chunk operation took // (excluding range deletion). AtomicWord totalDonorMoveChunkTimeMillis{0}; @@ -61,13 +67,29 @@ struct ShardingStatistics { // recipient node. AtomicWord countDocsClonedOnRecipient{0}; + // Cumulative, always-increasing counter of how many documents have been cloned on the catch up + // phase on the recipient node. + AtomicWord countDocsClonedOnCatchUpOnRecipient{0}; + + // Cumulative, always-increasing counter of how many bytes have been cloned on the catch up + // phase on the recipient node. + AtomicWord countBytesClonedOnCatchUpOnRecipient{0}; + + // Cumulative, always-increasing counter of how many bytes have been cloned on the + // recipient node. + AtomicWord countBytesClonedOnRecipient{0}; + // Cumulative, always-increasing counter of how many documents have been cloned on the donor // node. AtomicWord countDocsClonedOnDonor{0}; - // Cumulative, always-increasing counter of how many documents have been deleted on the donor - // node by the rangeDeleter. - AtomicWord countDocsDeletedOnDonor{0}; + // Cumulative, always-increasing counter of how many bytes have been cloned on the donor + // node. + AtomicWord countBytesClonedOnDonor{0}; + + // Cumulative, always-increasing counter of how many documents have been deleted by the + // rangeDeleter. + AtomicWord countDocsDeletedByRangeDeleter{0}; // Cumulative, always-increasing counter of how many chunks this node started to receive // (whether the receiving succeeded or not) @@ -107,6 +129,11 @@ struct ShardingStatistics { // Current number for chunkMigrationConcurrency that defines concurrent fetchers and inserters // used for _migrateClone(step 4) of chunk migration AtomicWord chunkMigrationConcurrencyCnt{1}; + + // Total number of commands run directly against this shard without the directShardOperations + // role. + AtomicWord unauthorizedDirectShardOperations{0}; + /** * Obtains the per-process instance of the sharding statistics object. */ diff --git a/src/mongo/db/s/sharding_util.cpp b/src/mongo/db/s/sharding_util.cpp index 4cf80f20341b6..a4ed8a5be3a61 100644 --- a/src/mongo/db/s/sharding_util.cpp +++ b/src/mongo/db/s/sharding_util.cpp @@ -31,18 +31,45 @@ #include "mongo/db/s/sharding_util.h" #include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/index_spec.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_builds_manager.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/resource_yielder.h" #include "mongo/db/s/shard_authoritative_catalog_gen.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/logv2/log.h" -#include "mongo/s/catalog/type_collection.h" -#include "mongo/s/catalog/type_index_catalog.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/request_types/flush_routing_table_cache_updates_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -58,7 +85,7 @@ void tellShardsToRefreshCollection(OperationContext* opCtx, const std::shared_ptr& executor) { auto cmd = FlushRoutingTableCacheUpdatesWithWriteConcern(nss); cmd.setSyncFromConfig(true); - cmd.setDbName(nss.db()); + cmd.setDbName(nss.dbName()); auto cmdObj = CommandHelpers::appendMajorityWriteConcern(cmd.toBSON({})); sendCommandToShards(opCtx, DatabaseName::kAdmin.db(), cmdObj, shardIds, executor); } @@ -84,24 +111,24 @@ std::vector processShardResponses( requests, ReadPreferenceSetting(ReadPreference::PrimaryOnly), Shard::RetryPolicy::kIdempotentOrCursorInvalidated, - nullptr /* resourceYielder */); + nullptr /* resourceYielder */, + {} /* designatedHostsMap */); while (!ars.done()) { // Retrieve the responses and throw at the first failure. auto response = ars.next(); if (throwOnError) { - const auto errorContext = - "Failed command {} for database '{}' on shard '{}'"_format( - command.toString(), dbName, StringData{response.shardId}); + auto errorContext = "Failed command {} for database '{}' on shard '{}'"_format( + command.toString(), dbName, StringData{response.shardId}); uassertStatusOKWithContext(response.swResponse.getStatus(), errorContext); const auto& respBody = response.swResponse.getValue().data; - const auto status = getStatusFromCommandResult(respBody); + auto status = getStatusFromCommandResult(respBody); uassertStatusOKWithContext(status, errorContext); - const auto wcStatus = getWriteConcernStatusFromCommandResult(respBody); + auto wcStatus = getWriteConcernStatusFromCommandResult(respBody); uassertStatusOKWithContext(wcStatus, errorContext); } @@ -155,13 +182,13 @@ Status createIndexOnCollection(OperationContext* opCtx, if (!collection) { CollectionOptions options; options.uuid = UUID::gen(); - writeConflictRetry(opCtx, "createIndexOnCollection", ns.ns(), [&] { + writeConflictRetry(opCtx, "createIndexOnCollection", ns, [&] { WriteUnitOfWork wunit(opCtx); auto db = autoColl.ensureDbExists(opCtx); collection = db->createCollection(opCtx, ns, options); invariant(collection, - str::stream() << "Failed to create collection " << ns.ns() - << " for indexes: " << keys); + str::stream() << "Failed to create collection " + << ns.toStringForErrorMsg() << " for indexes: " << keys); wunit.commit(); }); } @@ -197,7 +224,7 @@ Status createIndexOnCollection(OperationContext* opCtx, IndexBuildsCoordinator::get(opCtx)->createIndex( opCtx, collection->uuid(), indexSpec, indexConstraints, fromMigrate); } else { - writeConflictRetry(opCtx, "createIndexOnConfigCollection", ns.ns(), [&] { + writeConflictRetry(opCtx, "createIndexOnConfigCollection", ns, [&] { WriteUnitOfWork wunit(opCtx); CollectionWriter collWriter(opCtx, collection->uuid()); IndexBuildsCoordinator::get(opCtx)->createIndexesOnEmptyCollection( @@ -212,14 +239,9 @@ Status createIndexOnCollection(OperationContext* opCtx, return Status::OK(); } -Status createShardingIndexCatalogIndexes(OperationContext* opCtx) { +Status createShardingIndexCatalogIndexes(OperationContext* opCtx, + const NamespaceString& indexCatalogNamespace) { bool unique = true; - NamespaceString indexCatalogNamespace; - if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { - indexCatalogNamespace = NamespaceString::kConfigsvrIndexCatalogNamespace; - } else { - indexCatalogNamespace = NamespaceString::kShardIndexCatalogNamespace; - } auto result = createIndexOnCollection(opCtx, indexCatalogNamespace, BSON(IndexCatalogType::kCollectionUUIDFieldName @@ -228,7 +250,7 @@ Status createShardingIndexCatalogIndexes(OperationContext* opCtx) { if (!result.isOK()) { return result.withContext(str::stream() << "couldn't create collectionUUID_1_lastmod_1 index on " - << indexCatalogNamespace); + << indexCatalogNamespace.toStringForErrorMsg()); } result = createIndexOnCollection(opCtx, indexCatalogNamespace, @@ -238,7 +260,7 @@ Status createShardingIndexCatalogIndexes(OperationContext* opCtx) { if (!result.isOK()) { return result.withContext(str::stream() << "couldn't create collectionUUID_1_name_1 index on " - << indexCatalogNamespace); + << indexCatalogNamespace.toStringForErrorMsg()); } return Status::OK(); } @@ -251,9 +273,9 @@ Status createShardCollectionCatalogIndexes(OperationContext* opCtx) { BSON(ShardAuthoritativeCollectionType::kUuidFieldName << 1), !unique); if (!result.isOK()) { - return result.withContext(str::stream() - << "couldn't create uuid_1 index on " - << NamespaceString::kShardCollectionCatalogNamespace); + return result.withContext(str::stream() << "couldn't create uuid_1 index on " + << NamespaceString::kShardCollectionCatalogNamespace + .toStringForErrorMsg()); } return Status::OK(); diff --git a/src/mongo/db/s/sharding_util.h b/src/mongo/db/s/sharding_util.h index afab57904a794..2d46346b2f459 100644 --- a/src/mongo/db/s/sharding_util.h +++ b/src/mongo/db/s/sharding_util.h @@ -29,8 +29,12 @@ #pragma once +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/shard_id.h" @@ -90,7 +94,8 @@ std::vector sendCommandToShardsWithVersion( /** * Creates the necessary indexes for the sharding index catalog collections. */ -Status createShardingIndexCatalogIndexes(OperationContext* opCtx); +Status createShardingIndexCatalogIndexes(OperationContext* opCtx, + const NamespaceString& indexCatalogNamespace); /** * Creates the necessary indexes for the collections collection. diff --git a/src/mongo/db/s/sharding_util_refresh_test.cpp b/src/mongo/db/s/sharding_util_refresh_test.cpp index 3a384b5931281..700c1380d3081 100644 --- a/src/mongo/db/s/sharding_util_refresh_test.cpp +++ b/src/mongo/db/s/sharding_util_refresh_test.cpp @@ -27,13 +27,35 @@ * it in the license file. */ +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/remote_command_targeter_factory_mock.h" #include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/sharding_util.h" #include "mongo/db/shard_id.h" -#include "mongo/executor/thread_pool_task_executor_test_fixture.h" -#include "mongo/logv2/log.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/s/sharding_write_router.cpp b/src/mongo/db/s/sharding_write_router.cpp index 9cd070cbf3143..6452f6ab0b22c 100644 --- a/src/mongo/db/s/sharding_write_router.cpp +++ b/src/mongo/db/s/sharding_write_router.cpp @@ -29,11 +29,23 @@ #include "mongo/db/s/sharding_write_router.h" +#include +#include + +#include +#include + +#include "mongo/db/cluster_role.h" +#include "mongo/db/server_options.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk.h" +#include "mongo/s/grid.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/util/assert_util.h" + namespace mongo { -ShardingWriteRouter::ShardingWriteRouter(OperationContext* opCtx, - const NamespaceString& nss, - CatalogCache* catalogCache) { +ShardingWriteRouter::ShardingWriteRouter(OperationContext* opCtx, const NamespaceString& nss) { if (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)) { _scopedCss.emplace(CollectionShardingState::assertCollectionLockedAndAcquire(opCtx, nss)); _collDesc = (*_scopedCss)->getCollectionDescription(opCtx); @@ -54,6 +66,8 @@ ShardingWriteRouter::ShardingWriteRouter(OperationContext* opCtx, invariant(reshardingFields); const auto& donorFields = reshardingFields->getDonorFields(); invariant(donorFields); + auto catalogCache = Grid::get(opCtx)->catalogCache(); + invariant(catalogCache); _reshardingChunkMgr = uassertStatusOK( diff --git a/src/mongo/db/s/sharding_write_router.h b/src/mongo/db/s/sharding_write_router.h index 05b9fa042bd14..0444a3cf78c7c 100644 --- a/src/mongo/db/s/sharding_write_router.h +++ b/src/mongo/db/s/sharding_write_router.h @@ -29,16 +29,24 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/shard_id.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/shard_key_pattern.h" namespace mongo { class ShardingWriteRouter { public: - ShardingWriteRouter(OperationContext* opCtx, - const NamespaceString& nss, - CatalogCache* catalogCache); + ShardingWriteRouter(OperationContext* opCtx, const NamespaceString& nss); CollectionShardingState* getCss() const { return _scopedCss ? &(**_scopedCss) : nullptr; diff --git a/src/mongo/db/s/sharding_write_router_bm.cpp b/src/mongo/db/s/sharding_write_router_bm.cpp index 527cc992b0cb9..f0d7c40c8cdb2 100644 --- a/src/mongo/db/s/sharding_write_router_bm.cpp +++ b/src/mongo/db/s/sharding_write_router_bm.cpp @@ -28,28 +28,63 @@ */ #include +#include +#include #include +#include +#include +#include #include #include -#include "mongo/db/concurrency/locker_noop_client_observer.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/collection_sharding_state_factory_shard.h" #include "mongo/db/s/collection_sharding_state_factory_standalone.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/sharding_write_router.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" -#include "mongo/platform/random.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/s/balancer_configuration.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/sharding_catalog_client_impl.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog_cache.h" -#include "mongo/s/catalog_cache_loader_mock.h" #include "mongo/s/catalog_cache_mock.h" #include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/database_version.h" +#include "mongo/s/grid.h" +#include "mongo/s/index_version.h" +#include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" #include "mongo/util/assert_util.h" #include "mongo/util/processinfo.h" #include "mongo/util/str.h" @@ -58,7 +93,7 @@ namespace mongo { namespace { -const NamespaceString kNss("test", "foo"); +const NamespaceString kNss = NamespaceString::createNamespaceString_forTest("test", "foo"); ShardId pessimalShardSelector(int i, int nShards, int nChunks) { return ShardId(str::stream() << "shard" << (i % nShards)); @@ -93,11 +128,11 @@ std::pair, mongo::ChunkManager> createChunks( const auto reshardKeyPattern = KeyPattern(BSON("y" << 1)); const auto collEpoch = OID::gen(); const auto collTimestamp = Timestamp(100, 5); - const auto tempNss = - NamespaceString(kNss.db(), - fmt::format("{}{}", - NamespaceString::kTemporaryReshardingCollectionPrefix, - collIdentifier.toString())); + const auto tempNss = NamespaceString::createNamespaceString_forTest( + kNss.db(), + fmt::format("{}{}", + NamespaceString::kTemporaryReshardingCollectionPrefix, + collIdentifier.toString())); std::vector chunks; chunks.reserve(nChunks); @@ -133,39 +168,56 @@ std::pair, mongo::ChunkManager> createChunks( return std::make_pair(chunks, cm); } -std::unique_ptr createCatalogCacheMock(OperationContext* opCtx) { - const size_t nShards = 1; - const uint32_t nChunks = 60; - const auto clusterId = OID::gen(); - const auto shards = std::vector{ShardId("shard0")}; - const auto originatorShard = shards[0]; +void setupCatalogCacheMock(OperationContext* opCtx, bool withShardedCollection) { + auto catalogCache = CatalogCacheMock::make(); - const auto [chunks, chunkManager] = createChunks(nShards, nChunks, shards); + if (withShardedCollection) { + const size_t nShards = 1; + const uint32_t nChunks = 60; + const auto clusterId = OID::gen(); + const auto shards = std::vector{ShardId("shard0")}; + const auto originatorShard = shards[0]; + + const auto [chunks, chunkManager] = createChunks(nShards, nChunks, shards); + + ShardingState::get(opCtx->getServiceContext())->setInitialized(originatorShard, clusterId); + + CollectionShardingStateFactory::set( + opCtx->getServiceContext(), + std::make_unique(opCtx->getServiceContext())); + + OperationShardingState::setShardRole( + opCtx, + kNss, + ShardVersionFactory::make( + chunkManager, + originatorShard, + boost::optional(boost::none)) /* shardVersion */, + boost::none /* databaseVersion */); + + // Configuring the filtering metadata such that calls to getCollectionDescription return + // what we want. Specifically the reshardingFields are what we use. Its specified by the + // chunkManager. + CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, kNss) + ->setFilteringMetadata(opCtx, CollectionMetadata(chunkManager, originatorShard)); + + catalogCache->setChunkManagerReturnValue(chunkManager); + } - ShardingState::get(opCtx->getServiceContext())->setInitialized(originatorShard, clusterId); + auto mockNetwork = std::make_unique(); - CollectionShardingStateFactory::set( - opCtx->getServiceContext(), - std::make_unique(opCtx->getServiceContext())); - - OperationShardingState::setShardRole( - opCtx, - kNss, - ShardVersionFactory::make( - chunkManager, - originatorShard, - boost::optional(boost::none)) /* shardVersion */, - boost::none /* databaseVersion */); - - // Configuring the filtering metadata such that calls to getCollectionDescription return what we - // want. Specifically the reshardingFields are what we use. Its specified by the chunkManager. - CollectionShardingRuntime::assertCollectionLockedAndAcquireExclusive(opCtx, kNss) - ->setFilteringMetadata(opCtx, CollectionMetadata(chunkManager, originatorShard)); + auto const grid = Grid::get(opCtx); + grid->init( + std::make_unique(nullptr), + std::move(catalogCache), + std::make_unique(opCtx->getServiceContext(), nullptr, boost::none), + std::make_unique(opCtx->getServiceContext()->getPreciseClockSource()), + std::make_unique(), + std::make_unique(), + mockNetwork.get()); - auto catalogCache = CatalogCacheMock::make(); - catalogCache->setChunkManagerReturnValue(chunkManager); - - return catalogCache; + // Note: mockNetwork in Grid will become a dangling pointer after this function, this + // is fine since the test shouldn't be using it. } void BM_InsertGetDestinedRecipient(benchmark::State& state) { @@ -174,12 +226,10 @@ void BM_InsertGetDestinedRecipient(benchmark::State& state) { auto serviceContext = ServiceContext::make(); const auto client = serviceContext->makeClient("test"); - serviceContext->registerClientObserver(std::make_unique()); const auto opCtx = client->makeOperationContext(); - const auto catalogCache = createCatalogCacheMock(opCtx.get()); - - ShardingWriteRouter writeRouter(opCtx.get(), kNss, catalogCache.get()); + setupCatalogCacheMock(opCtx.get(), true /* withShardedColl */); + ShardingWriteRouter writeRouter(opCtx.get(), kNss); for (auto keepRunning : state) { benchmark::ClobberMemory(); @@ -194,14 +244,13 @@ void BM_UpdateGetDestinedRecipient(benchmark::State& state) { auto serviceContext = ServiceContext::make(); const auto client = serviceContext->makeClient("test"); - serviceContext->registerClientObserver(std::make_unique()); const auto opCtx = client->makeOperationContext(); - const auto catalogCache = createCatalogCacheMock(opCtx.get()); + setupCatalogCacheMock(opCtx.get(), true /* withShardedColl */); for (auto keepRunning : state) { benchmark::ClobberMemory(); - ShardingWriteRouter writeRouter(opCtx.get(), kNss, catalogCache.get()); + ShardingWriteRouter writeRouter(opCtx.get(), kNss); auto shardId = writeRouter.getReshardingDestinedRecipient(BSON("_id" << 0)); ASSERT(shardId != boost::none); } @@ -212,18 +261,17 @@ void BM_UnshardedDestinedRecipient(benchmark::State& state) { auto serviceContext = ServiceContext::make(); const auto client = serviceContext->makeClient("test"); - serviceContext->registerClientObserver(std::make_unique()); const auto opCtx = client->makeOperationContext(); CollectionShardingStateFactory::set( opCtx->getServiceContext(), std::make_unique(opCtx->getServiceContext())); - const auto catalogCache = CatalogCacheMock::make(); + setupCatalogCacheMock(opCtx.get(), false /* withShardedColl */); for (auto keepRunning : state) { benchmark::ClobberMemory(); - ShardingWriteRouter writeRouter(opCtx.get(), kNss, catalogCache.get()); + ShardingWriteRouter writeRouter(opCtx.get(), kNss); auto shardId = writeRouter.getReshardingDestinedRecipient(BSON("_id" << 0)); ASSERT(shardId == boost::none); } diff --git a/src/mongo/db/s/shardsvr_abort_reshard_collection_command.cpp b/src/mongo/db/s/shardsvr_abort_reshard_collection_command.cpp index d4c3195f3da9c..d516673c2877c 100644 --- a/src/mongo/db/s/shardsvr_abort_reshard_collection_command.cpp +++ b/src/mongo/db/s/shardsvr_abort_reshard_collection_command.cpp @@ -28,19 +28,42 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" -#include "mongo/db/s/config/sharding_catalog_manager.h" -#include "mongo/db/s/resharding/resharding_coordinator_service.h" +#include "mongo/db/s/resharding/donor_document_gen.h" +#include "mongo/db/s/resharding/recipient_document_gen.h" #include "mongo/db/s/resharding/resharding_donor_recipient_common.h" +#include "mongo/db/s/resharding/resharding_donor_service.h" +#include "mongo/db/s/resharding/resharding_recipient_service.h" #include "mongo/db/s/resharding/resharding_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/vector_clock_mutable.h" #include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/abort_reshard_collection_gen.h" -#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -66,6 +89,10 @@ class ShardsvrAbortReshardCollectionCommand final CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); + // Persist the config time to ensure that in case of stepdown next filtering metadata + // refresh on the new primary will always fetch the latest information. + VectorClockMutable::get(opCtx)->waitForDurableConfigTime().get(opCtx); + std::vector> futuresToWait; if (auto machine = resharding::tryGetReshardingStateMachine< @@ -139,8 +166,9 @@ class ShardsvrAbortReshardCollectionCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_check_metadata_consistency_command.cpp b/src/mongo/db/s/shardsvr_check_metadata_consistency_command.cpp index 3d413e49e5853..b312a43ae2243 100644 --- a/src/mongo/db/s/shardsvr_check_metadata_consistency_command.cpp +++ b/src/mongo/db/s/shardsvr_check_metadata_consistency_command.cpp @@ -28,20 +28,81 @@ */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/clientcursor.h" #include "mongo/db/commands.h" -#include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/metadata_consistency_types_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/query/cursor_response_gen.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/ddl_lock_manager.h" #include "mongo/db/s/metadata_consistency_util.h" #include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/s/sharding_ddl_coordinator_service.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/query/async_results_merger_params_gen.h" #include "mongo/s/query/document_source_merge_cursors.h" #include "mongo/s/query/establish_cursors.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -70,6 +131,12 @@ std::vector getDatabasesThisShardIsPrimaryFor(OperationContext* op databases.emplace_back( DatabaseType::parseOwned(IDLParserContext("DatabaseType"), std::move(rawDb))); } + if (thisShardId == ShardId::kConfigServerId) { + // Config database + databases.emplace_back(DatabaseName::kConfig.db().toString(), + ShardId::kConfigServerId, + DatabaseVersion::makeFixed()); + } return databases; } @@ -102,33 +169,44 @@ class ShardsvrCheckMetadataConsistencyCommand final Response typedRun(OperationContext* opCtx) { uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands()); - opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); - // This commands uses DDL locks to serialize with concurrent DDL operations. - // Since we are not using the ShardingDDLCoordinator infrastructure we need to - // explicitely wait for all DDL coordinators to be recovered and to have re-acquired - // their DDL locks before to proceed. - ShardingDDLCoordinatorService::getService(opCtx)->waitForRecoveryCompletion(opCtx); - - const auto nss = ns(); - switch (metadata_consistency_util::getCommandLevel(nss)) { - case MetadataConsistencyCommandLevelEnum::kClusterLevel: - return _runClusterLevel(opCtx, nss); - case MetadataConsistencyCommandLevelEnum::kDatabaseLevel: - return _runDatabaseLevel(opCtx, nss); - case MetadataConsistencyCommandLevelEnum::kCollectionLevel: - return _runCollectionLevel(opCtx, nss); - default: - MONGO_UNREACHABLE; + opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); + { + // Ensure that opCtx will get interrupted in the event of a stepdown. + Lock::GlobalLock lk(opCtx, MODE_IX); + uassert(ErrorCodes::InterruptedDueToReplStateChange, + "Not primary while attempting to start a metadata consistency check", + repl::ReplicationCoordinator::get(opCtx)->getMemberState().primary()); } + + auto response = [&] { + const auto nss = ns(); + switch (metadata_consistency_util::getCommandLevel(nss)) { + case MetadataConsistencyCommandLevelEnum::kClusterLevel: + return _runClusterLevel(opCtx, nss); + case MetadataConsistencyCommandLevelEnum::kDatabaseLevel: + return _runDatabaseLevel(opCtx, nss); + case MetadataConsistencyCommandLevelEnum::kCollectionLevel: + return _runCollectionLevel(opCtx, nss); + default: + MONGO_UNREACHABLE; + } + }(); + + // Make sure the response gets invalidated in case of interruption + opCtx->checkForInterrupt(); + + return response; } private: Response _runClusterLevel(OperationContext* opCtx, const NamespaceString& nss) { - uassert( - ErrorCodes::InvalidNamespace, - "cluster level mode must be run against the 'admin' database with {aggregate: 1}", - nss.isCollectionlessCursorNamespace()); + uassert(ErrorCodes::InvalidNamespace, + str::stream() << Request::kCommandName + << " command on admin database can only be run without " + "collection name. Found unexpected collection name: " + << nss.coll(), + nss.isCollectionlessCursorNamespace()); std::vector cursors; @@ -175,27 +253,33 @@ class ShardsvrCheckMetadataConsistencyCommand final std::vector> requests; // Shard requests + const auto shardOpKey = UUID::gen(); ShardsvrCheckMetadataConsistencyParticipant participantRequest{nss}; participantRequest.setCommonFields(request().getCommonFields()); participantRequest.setPrimaryShardId(ShardingState::get(opCtx)->shardId()); participantRequest.setCursor(request().getCursor()); const auto participants = Grid::get(opCtx)->shardRegistry()->getAllShardIds(opCtx); + auto participantRequestWithOpKey = + appendOpKey(shardOpKey, participantRequest.toBSON({})); for (const auto& shardId : participants) { - requests.emplace_back(shardId, participantRequest.toBSON({})); + requests.emplace_back(shardId, participantRequestWithOpKey.getOwned()); } // Config server request + const auto configOpKey = UUID::gen(); ConfigsvrCheckMetadataConsistency configRequest{nss}; participantRequest.setCursor(request().getCursor()); - requests.emplace_back(ShardId::kConfigServerId, configRequest.toBSON({})); + requests.emplace_back(ShardId::kConfigServerId, + appendOpKey(configOpKey, configRequest.toBSON({}))); // Take a DDL lock on the database static constexpr StringData kLockReason{"checkMetadataConsistency"_sd}; - auto ddlLockManager = DDLLockManager::get(opCtx); - const auto dbDDLLock = ddlLockManager->lock( - opCtx, nss.db(), kLockReason, DDLLockManager::kDefaultLockTimeout); - - DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, nss.dbName()); + const LockMode mode = (feature_flags::gMultipleGranularityDDLLocking.isEnabled( + serverGlobalParams.featureCompatibility) + ? MODE_S + : MODE_X); + const DDLLockManager::ScopedDatabaseDDLLock dbDDLLock{ + opCtx, nss.dbName(), kLockReason, mode, DDLLockManager::kDefaultLockTimeout}; return establishCursors(opCtx, Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(), @@ -203,7 +287,8 @@ class ShardsvrCheckMetadataConsistencyCommand final ReadPreferenceSetting(ReadPreference::PrimaryOnly), requests, false /* allowPartialResults */, - Shard::RetryPolicy::kIdempotentOrCursorInvalidated); + Shard::RetryPolicy::kIdempotentOrCursorInvalidated, + {shardOpKey, configOpKey}); } CursorInitialReply _mergeCursors(OperationContext* opCtx, @@ -251,7 +336,8 @@ class ShardsvrCheckMetadataConsistencyCommand final repl::ReadConcernArgs::get(opCtx), ReadPreferenceSetting::get(opCtx), request().toBSON({}), - {Privilege(ResourcePattern::forClusterResource(), ActionType::internal)}}; + {Privilege(ResourcePattern::forClusterResource(nss.tenantId()), + ActionType::internal)}}; return metadata_consistency_util::createInitialCursorReplyMongod( opCtx, std::move(cursorParams), batchSize); @@ -269,8 +355,9 @@ class ShardsvrCheckMetadataConsistencyCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_check_metadata_consistency_participant_command.cpp b/src/mongo/db/s/shardsvr_check_metadata_consistency_participant_command.cpp index 49f3dfaba4037..eda79a88be944 100644 --- a/src/mongo/db/s/shardsvr_check_metadata_consistency_participant_command.cpp +++ b/src/mongo/db/s/shardsvr_check_metadata_consistency_participant_command.cpp @@ -28,19 +28,73 @@ */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/clientcursor.h" #include "mongo/db/commands.h" -#include "mongo/db/cursor_manager.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/metadata_consistency_types_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/cursor_response_gen.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/ddl_lock_manager.h" #include "mongo/db/s/metadata_consistency_util.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/logv2/log.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_aggregate.h" +#include "mongo/s/query/cluster_client_cursor.h" +#include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/s/query/cluster_query_result.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/sharding_feature_flags_gen.h" #include "mongo/s/stale_shard_version_helpers.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -50,6 +104,26 @@ namespace { std::vector checkIndexesInconsistencies( OperationContext* opCtx, const std::vector& collections) { static const auto rawPipelineStages = [] { + /** + * The following pipeline is used to check for inconsistencies in the indexes of all the + * collections across all shards in the cluster. In particular, it checks that: + * 1. All shards have the same set of indexes. + * 2. All shards have the same properties for each index. + * + * The pipeline is structured as follows: + * 1. Use the $indexStats stage to gather statistics about each index in all shards. + * 2. Group all the indexes together and collect them into an array. Also, collect the + * names of all the shards in the cluster. + * 3. Create a new document for each index in the array created by the previous stage. + * 4. Group all the indexes by name. + * 5. For each index, create two new fields: + * - `missingFromShards`: array of differences between all shards that are expected + * to have the index and the shards that actually contain the index. + * - `inconsistentProperties`: array of differences between the properties of each + * index across all shards. + * 6. Filter out indexes that are consistent across all shards. + * 7. Project the final result. + */ auto rawPipelineBSON = fromjson(R"({pipeline: [ {$indexStats: {}}, {$group: { @@ -100,25 +174,22 @@ std::vector checkIndexesInconsistencies( }(); static constexpr StringData kLockReason{"checkMetadataConsistency::indexCheck"_sd}; - auto ddlLockManager = DDLLockManager::get(opCtx); auto catalogCache = Grid::get(opCtx)->catalogCache(); std::vector indexIncons; for (const auto& coll : collections) { const auto& nss = coll.getNss(); - // The only sharded collection in the config database with indexes is - // config.system.sessions. Unfortunately, the code path to run aggregation - // below would currently invariant if one of the targeted shards was the config - // server itself. - if (nss.isConfigDB()) { - continue; + // TODO SERVER-77546 Remove the collection DDL lock acquisition on feature flag removal + // since the already taken DB DDL lock in S mode will be enough to serialize with other + // indexes operations + if (!feature_flags::gMultipleGranularityDDLLocking.isEnabled( + serverGlobalParams.featureCompatibility)) { + // Serialize with concurrent DDL operations that modify indexes + const DDLLockManager::ScopedCollectionDDLLock collectionDDLLock{ + opCtx, nss, kLockReason, MODE_X, DDLLockManager::kDefaultLockTimeout}; } - // Serialize with concurrent DDL operations that modify indexes - const auto collectionDDLLock = ddlLockManager->lock( - opCtx, nss.toString(), kLockReason, DDLLockManager::kDefaultLockTimeout); - AggregateCommandRequest aggRequest{nss, rawPipelineStages}; std::vector results; @@ -145,15 +216,27 @@ std::vector checkIndexesInconsistencies( return; } - auto cursorPin = uassertStatusOK( - CursorManager::get(opCtx)->pinCursor(opCtx, indexStatsCursor.getCursorId())); - auto exec = cursorPin->getExecutor(); + const auto authzSession = AuthorizationSession::get(opCtx->getClient()); + const auto authChecker = + [&authzSession](const boost::optional& userName) -> Status { + return authzSession->isCoauthorizedWith(userName) + ? Status::OK() + : Status(ErrorCodes::Unauthorized, "User not authorized to access cursor"); + }; + + // Check out the cursor. If the cursor is not found, all data was retrieve in the + // first batch. + const auto cursorManager = Grid::get(opCtx)->getCursorManager(); + auto pinnedCursor = uassertStatusOK(cursorManager->checkOutCursor( + indexStatsCursor.getCursorId(), opCtx, authChecker)); + while (true) { + auto next = pinnedCursor->next(); + if (!next.isOK() || next.getValue().isEOF()) { + break; + } - BSONObj nextDoc; - while (!exec->isEOF()) { - auto state = exec->getNext(&nextDoc, nullptr); - if (state == PlanExecutor::ADVANCED) { - results.emplace_back(nextDoc); + if (auto data = next.getValue().getResult()) { + results.emplace_back(data.get().getOwned()); } } }); @@ -202,83 +285,20 @@ class ShardsvrCheckMetadataConsistencyParticipantCommand final const auto nss = ns(); const auto shardId = ShardingState::get(opCtx)->shardId(); const auto& primaryShardId = request().getPrimaryShardId(); + const auto commandLevel = metadata_consistency_util::getCommandLevel(nss); // Get the list of collections from configsvr sorted by namespace - const auto catalogClientCollections = [&] { - switch (metadata_consistency_util::getCommandLevel(nss)) { - case MetadataConsistencyCommandLevelEnum::kDatabaseLevel: - return Grid::get(opCtx)->catalogClient()->getCollections( - opCtx, - nss.db(), - repl::ReadConcernLevel::kMajorityReadConcern, - BSON(CollectionType::kNssFieldName << 1) /*sort*/); - case MetadataConsistencyCommandLevelEnum::kCollectionLevel: - try { - auto collectionType = - Grid::get(opCtx)->catalogClient()->getCollection(opCtx, nss); - return std::vector{std::move(collectionType)}; - } catch (const ExceptionFor&) { - // If we don't find the nss, it means that the collection is not - // sharded. - return std::vector{}; - } - default: - MONGO_UNREACHABLE; - } - }(); - - auto inconsistencies = [&] { - auto collCatalogSnapshot = [&] { - // Lock db in mode IS while taking the collection catalog snapshot to ensure - // that we serialize with non-atomic collection and index creation performed by - // the MigrationDestinationManager. - // Without this lock we could potentially acquire a snapshot in which a - // collection have been already created by the MigrationDestinationManager but - // the relative shardkey index is still missing. - AutoGetDb autoDb(opCtx, nss.dbName(), MODE_IS); - return CollectionCatalog::get(opCtx); - }(); - - std::vector localCollections; - switch (metadata_consistency_util::getCommandLevel(nss)) { - case MetadataConsistencyCommandLevelEnum::kDatabaseLevel: { - for (auto it = collCatalogSnapshot->begin(opCtx, nss.dbName()); - it != collCatalogSnapshot->end(opCtx); - ++it) { - const auto coll = *it; - if (!coll || !coll->ns().isNormalCollection()) { - continue; - } - localCollections.emplace_back(CollectionPtr(coll)); - } - std::sort(localCollections.begin(), - localCollections.end(), - [](const CollectionPtr& prev, const CollectionPtr& next) { - return prev->ns() < next->ns(); - }); - break; - } - case MetadataConsistencyCommandLevelEnum::kCollectionLevel: { - if (auto coll = - collCatalogSnapshot->lookupCollectionByNamespace(opCtx, nss)) { - localCollections.emplace_back(CollectionPtr(coll)); - } - break; - } - default: - MONGO_UNREACHABLE; - } + const auto configsvrCollections = + getCollectionsListFromConfigServer(opCtx, nss, commandLevel); - // Check consistency between local metadata and configsvr metadata - return metadata_consistency_util::checkCollectionMetadataInconsistencies( - opCtx, shardId, primaryShardId, catalogClientCollections, localCollections); - }(); + auto inconsistencies = checkCollectionMetadataInconsistencies( + opCtx, nss, commandLevel, shardId, primaryShardId, configsvrCollections); // If this is the primary shard of the db coordinate index check across shards const auto& optionalCheckIndexes = request().getCommonFields().getCheckIndexes(); if (shardId == primaryShardId && optionalCheckIndexes && *optionalCheckIndexes) { auto indexInconsistencies = - checkIndexesInconsistencies(opCtx, catalogClientCollections); + checkIndexesInconsistencies(opCtx, configsvrCollections); inconsistencies.insert(inconsistencies.end(), std::make_move_iterator(indexInconsistencies.begin()), std::make_move_iterator(indexInconsistencies.end())); @@ -296,7 +316,8 @@ class ShardsvrCheckMetadataConsistencyParticipantCommand final repl::ReadConcernArgs::get(opCtx), ReadPreferenceSetting::get(opCtx), request().toBSON({}), - {Privilege(ResourcePattern::forClusterResource(), ActionType::internal)}}; + {Privilege(ResourcePattern::forClusterResource(nss.tenantId()), + ActionType::internal)}}; const auto batchSize = [&]() -> long long { const auto& cursorOpts = request().getCursor(); @@ -312,6 +333,104 @@ class ShardsvrCheckMetadataConsistencyParticipantCommand final } private: + std::vector getCollectionsListFromConfigServer( + OperationContext* opCtx, + const NamespaceString& nss, + const MetadataConsistencyCommandLevelEnum& commandLevel) { + switch (commandLevel) { + case MetadataConsistencyCommandLevelEnum::kDatabaseLevel: { + return Grid::get(opCtx)->catalogClient()->getCollections( + opCtx, + nss.db(), + repl::ReadConcernLevel::kMajorityReadConcern, + BSON(CollectionType::kNssFieldName << 1) /*sort*/); + } + case MetadataConsistencyCommandLevelEnum::kCollectionLevel: { + try { + auto collectionType = + Grid::get(opCtx)->catalogClient()->getCollection(opCtx, nss); + return {std::move(collectionType)}; + } catch (const ExceptionFor&) { + // If we don't find the nss, it means that the collection is not sharded. + return {}; + } + } + default: + MONGO_UNREACHABLE; + } + } + + std::vector checkCollectionMetadataInconsistencies( + OperationContext* opCtx, + const NamespaceString& nss, + const MetadataConsistencyCommandLevelEnum& commandLevel, + const ShardId& shardId, + const ShardId& primaryShardId, + const std::vector& catalogClientCollections) { + std::vector localCollections; + auto collCatalogSnapshot = [&] { + switch (commandLevel) { + case MetadataConsistencyCommandLevelEnum::kDatabaseLevel: { + auto collCatalogSnapshot = [&] { + // Lock db in mode IS while taking the collection catalog snapshot to + // ensure that we serialize with non-atomic collection and index + // creation performed by the MigrationDestinationManager. Without this + // lock we could potentially acquire a snapshot in which a collection + // have been already created by the MigrationDestinationManager but the + // relative shardkey index is still missing. + AutoGetDb autoDb(opCtx, nss.dbName(), MODE_IS); + return CollectionCatalog::get(opCtx); + }(); + + for (auto&& coll : collCatalogSnapshot->range(nss.dbName())) { + if (!coll) { + continue; + } + localCollections.emplace_back(CollectionPtr(coll)); + } + std::sort(localCollections.begin(), + localCollections.end(), + [](const CollectionPtr& prev, const CollectionPtr& next) { + return prev->ns() < next->ns(); + }); + + return collCatalogSnapshot; + } + case MetadataConsistencyCommandLevelEnum::kCollectionLevel: { + auto collCatalogSnapshot = [&] { + // Lock collection in mode IS while taking the collection catalog + // snapshot to ensure that we serialize with non-atomic collection and + // index creation performed by the MigrationDestinationManager. Without + // this lock we could potentially acquire a snapshot in which a + // collection have been already created by the + // MigrationDestinationManager but the relative shardkey index is still + // missing. + AutoGetCollection coll( + opCtx, + nss, + MODE_IS, + AutoGetCollection::Options{}.viewMode( + auto_get_collection::ViewMode::kViewsPermitted)); + return CollectionCatalog::get(opCtx); + }(); + + if (auto coll = + collCatalogSnapshot->lookupCollectionByNamespace(opCtx, nss)) { + localCollections.emplace_back(CollectionPtr(coll)); + } + + return collCatalogSnapshot; + } + default: + MONGO_UNREACHABLE; + } + }(); + + // Check consistency between local metadata and configsvr metadata + return metadata_consistency_util::checkCollectionMetadataInconsistencies( + opCtx, shardId, primaryShardId, catalogClientCollections, localCollections); + } + NamespaceString ns() const override { return request().getNamespace(); } @@ -324,8 +443,9 @@ class ShardsvrCheckMetadataConsistencyParticipantCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_cleanup_reshard_collection_command.cpp b/src/mongo/db/s/shardsvr_cleanup_reshard_collection_command.cpp index a1541d396a6cd..5be36b7cf42a3 100644 --- a/src/mongo/db/s/shardsvr_cleanup_reshard_collection_command.cpp +++ b/src/mongo/db/s/shardsvr_cleanup_reshard_collection_command.cpp @@ -28,13 +28,27 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/resharding/resharding_manual_cleanup.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/cleanup_reshard_collection_gen.h" -#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -52,11 +66,6 @@ class ShardsvrCleanupReshardCollectionCommand final using InvocationBase::InvocationBase; void typedRun(OperationContext* opCtx) { - uassert(ErrorCodes::CommandNotSupported, - "cleanupReshardCollection command not enabled", - resharding::gFeatureFlagResharding.isEnabled( - serverGlobalParams.featureCompatibility)); - uassert(ErrorCodes::IllegalOperation, "_shardsvrCleanupReshardCollection can only be run on shard servers", serverGlobalParams.clusterRole.has(ClusterRole::ShardServer)); @@ -84,8 +93,9 @@ class ShardsvrCleanupReshardCollectionCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_cleanup_structured_encryption_data_command.cpp b/src/mongo/db/s/shardsvr_cleanup_structured_encryption_data_command.cpp new file mode 100644 index 0000000000000..d39ac3f5c04b5 --- /dev/null +++ b/src/mongo/db/s/shardsvr_cleanup_structured_encryption_data_command.cpp @@ -0,0 +1,187 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include +#include +#include + +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/commands.h" +#include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/commands/fle2_cleanup_gen.h" +#include "mongo/db/commands/fle2_compact.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/cleanup_structured_encryption_data_coordinator.h" +#include "mongo/db/s/cleanup_structured_encryption_data_coordinator_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand + + +namespace mongo { +namespace { + +class _shardsvrCleanupStructuredEncryptionDataCommand final + : public TypedCommand<_shardsvrCleanupStructuredEncryptionDataCommand> { +public: + using Request = CleanupStructuredEncryptionData; + using Reply = typename Request::Reply; + + _shardsvrCleanupStructuredEncryptionDataCommand() + : TypedCommand("_shardsvrCleanupStructuredEncryptionData"_sd) {} + + bool skipApiVersionCheck() const final { + // Internal command (server to server). + return true; + } + + std::string help() const final { + return "Internal command. Do not call directly. Cleans up an ECOC collection."; + } + + bool adminOnly() const final { + return false; + } + + AllowedOnSecondary secondaryAllowed(ServiceContext*) const final { + return AllowedOnSecondary::kNever; + } + + std::set sensitiveFieldNames() const final { + return {CleanupStructuredEncryptionData::kCleanupTokensFieldName}; + } + + class Invocation final : public InvocationBase { + public: + using InvocationBase::InvocationBase; + + Reply typedRun(OperationContext* opCtx) { + + CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation = true; + + auto cleanupCoordinator = + [&]() -> std::shared_ptr { + FixedFCVRegion fixedFcvRegion(opCtx); + + auto cleanup = makeRequest(opCtx); + return ShardingDDLCoordinatorService::getService(opCtx)->getOrCreateInstance( + opCtx, cleanup.toBSON()); + }(); + + return checked_pointer_cast( + cleanupCoordinator) + ->getResponse(opCtx); + } + + private: + CleanupStructuredEncryptionDataState makeRequest(OperationContext* opCtx) { + const auto& req = request(); + const auto& nss = req.getNamespace(); + + AutoGetCollection baseColl(opCtx, nss, MODE_IX); + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "Unknown collection: " << nss.toStringForErrorMsg(), + baseColl.getCollection()); + + validateCleanupRequest(req, *(baseColl.getCollection().get())); + + auto namespaces = + uassertStatusOK(EncryptedStateCollectionsNamespaces::createFromDataCollection( + *(baseColl.getCollection().get()))); + + AutoGetCollection ecocColl(opCtx, namespaces.ecocNss, MODE_IX); + AutoGetCollection ecocTempColl(opCtx, namespaces.ecocRenameNss, MODE_IX); + AutoGetCollection escDeletesColl(opCtx, namespaces.escDeletesNss, MODE_IX); + + CleanupStructuredEncryptionDataState cleanup; + + if (ecocColl.getCollection()) { + cleanup.setEcocUuid(ecocColl->uuid()); + } + if (ecocTempColl.getCollection()) { + cleanup.setEcocRenameUuid(ecocTempColl->uuid()); + } + if (escDeletesColl.getCollection()) { + cleanup.setEscDeletesUuid(escDeletesColl->uuid()); + } + + cleanup.setShardingDDLCoordinatorMetadata( + {{nss, DDLCoordinatorTypeEnum::kCleanupStructuredEncryptionData}}); + cleanup.setEscNss(namespaces.escNss); + cleanup.setEcocNss(namespaces.ecocNss); + cleanup.setEcocRenameNss(namespaces.ecocRenameNss); + cleanup.setEscDeletesNss(namespaces.escDeletesNss); + cleanup.setCleanupTokens(req.getCleanupTokens().getOwned()); + + return cleanup; + } + + NamespaceString ns() const override { + return request().getNamespace(); + } + + bool supportsWriteConcern() const override { + return true; + } + + void doCheckAuthorization(OperationContext* opCtx) const override { + uassert(ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); + } + }; + +} shardsvrCleanupStructuredEncryptionDataCommand; + +} // namespace +} // namespace mongo diff --git a/src/mongo/db/s/shardsvr_collmod_command.cpp b/src/mongo/db/s/shardsvr_collmod_command.cpp index b924c43cfdfa3..e9d0b3796f6f6 100644 --- a/src/mongo/db/s/shardsvr_collmod_command.cpp +++ b/src/mongo/db/s/shardsvr_collmod_command.cpp @@ -27,18 +27,36 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/authorization_checks.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/client.h" #include "mongo/db/coll_mod_gen.h" #include "mongo/db/coll_mod_reply_validation.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/curop.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/collmod_coordinator.h" +#include "mongo/db/s/collmod_coordinator_document_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/db/timeseries/catalog_helper.h" -#include "mongo/logv2/log.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -102,22 +120,13 @@ class ShardsvrCollModCommand final : public BasicCommandWithRequestParserraiseDbProfileLevel( CollectionCatalog::get(opCtx)->getDatabaseProfileLevel(cmd.getNamespace().dbName())); - auto collModCoordinator = [&] { - FixedFCVRegion fcvRegion(opCtx); - auto coordinatorType = DDLCoordinatorTypeEnum::kCollMod; - if (!feature_flags::gCollModCoordinatorV3.isEnabled( - serverGlobalParams.featureCompatibility)) { - // TODO SERVER-68008 Remove once 7.0 becomes last LTS - coordinatorType = DDLCoordinatorTypeEnum::kCollModPre61Compatible; - } - auto coordinatorDoc = CollModCoordinatorDocument(); - coordinatorDoc.setCollModRequest(cmd.getCollModRequest()); - coordinatorDoc.setShardingDDLCoordinatorMetadata( - {{cmd.getNamespace(), coordinatorType}}); - auto service = ShardingDDLCoordinatorService::getService(opCtx); - return checked_pointer_cast( - service->getOrCreateInstance(opCtx, coordinatorDoc.toBSON())); - }(); + auto coordinatorDoc = CollModCoordinatorDocument(); + coordinatorDoc.setCollModRequest(cmd.getCollModRequest()); + coordinatorDoc.setShardingDDLCoordinatorMetadata( + {{cmd.getNamespace(), DDLCoordinatorTypeEnum::kCollMod}}); + auto service = ShardingDDLCoordinatorService::getService(opCtx); + auto collModCoordinator = checked_pointer_cast( + service->getOrCreateInstance(opCtx, coordinatorDoc.toBSON())); result.appendElements(collModCoordinator->getResult(opCtx)); return true; diff --git a/src/mongo/db/s/shardsvr_collmod_participant_command.cpp b/src/mongo/db/s/shardsvr_collmod_participant_command.cpp index 907ca01bf465a..36adff22939c7 100644 --- a/src/mongo/db/s/shardsvr_collmod_participant_command.cpp +++ b/src/mongo/db/s/shardsvr_collmod_participant_command.cpp @@ -28,27 +28,75 @@ */ +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/cancelable_operation_context.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/coll_mod_gen.h" #include "mongo/db/commands.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/collection_sharding_runtime.h" -#include "mongo/db/s/collmod_coordinator.h" -#include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/sharded_collmod_gen.h" #include "mongo/db/s/sharding_recovery_service.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" #include "mongo/db/timeseries/catalog_helper.h" #include "mongo/db/timeseries/timeseries_collmod.h" -#include "mongo/logv2/log.h" +#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding - namespace mongo { namespace { +void releaseCriticalSectionInEmptySession(OperationContext* opCtx, + ShardingRecoveryService* service, + const NamespaceString& bucketNs, + const BSONObj& reason) { + auto txnParticipant = TransactionParticipant::get(opCtx); + if (txnParticipant) { + auto newClient = + getGlobalServiceContext()->makeClient("ShardsvrMovePrimaryExitCriticalSection"); + AlternativeClientRegion acr(newClient); + auto newOpCtx = CancelableOperationContext( + cc().makeOperationContext(), + opCtx->getCancellationToken(), + Grid::get(opCtx->getServiceContext())->getExecutorPool()->getFixedExecutor()); + newOpCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); + service->releaseRecoverableCriticalSection( + newOpCtx.get(), bucketNs, reason, ShardingCatalogClient::kLocalWriteConcern); + } else { + // No need to create a new operation context if no session is checked-out + service->releaseRecoverableCriticalSection( + opCtx, bucketNs, reason, ShardingCatalogClient::kLocalWriteConcern); + } +} + class ShardSvrCollModParticipantCommand final : public TypedCommand { public: @@ -84,7 +132,6 @@ class ShardSvrCollModParticipantCommand final opCtx->getWriteConcern()); opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); - // If the needsUnblock flag is set, we must have blocked the CRUD operations in the // previous phase of collMod operation for granularity updates. Unblock it now after we // have updated the granularity. @@ -111,9 +158,14 @@ class ShardSvrCollModParticipantCommand final auto service = ShardingRecoveryService::get(opCtx); const auto reason = BSON("command" << "ShardSvrParticipantBlockCommand" - << "ns" << bucketNs.toString()); - service->releaseRecoverableCriticalSection( - opCtx, bucketNs, reason, ShardingCatalogClient::kLocalWriteConcern); + << "ns" << NamespaceStringUtil::serialize(bucketNs)); + // In order to guarantee replay protection ShardsvrCollModParticipant will run + // within a retryable write. Any local transaction or retryable write spawned by + // this command (such as the release of the critical section) using the original + // operation context will cause a dead lock since the session has been already + // checked-out. We prevent the issue by using a new operation context with an + // empty session. + releaseCriticalSectionInEmptySession(opCtx, service, bucketNs, reason); } BSONObjBuilder builder; @@ -125,7 +177,23 @@ class ShardSvrCollModParticipantCommand final auto performViewChange = request().getPerformViewChange(); uassertStatusOK(timeseries::processCollModCommandWithTimeSeriesTranslation( opCtx, ns(), cmd, performViewChange, &builder)); - return CollModReply::parse(IDLParserContext("CollModReply"), builder.obj()); + auto collmodReply = + CollModReply::parse(IDLParserContext("CollModReply"), builder.obj()); + + // Since no write that generated a retryable write oplog entry with this sessionId + // and txnNumber happened, we need to make a dummy write so that the session gets + // durably persisted on the oplog. This must be the last operation done on this + // command. + auto txnParticipant = TransactionParticipant::get(opCtx); + if (txnParticipant) { + DBDirectClient dbClient(opCtx); + dbClient.update(NamespaceString::kServerConfigurationNamespace, + BSON("_id" << Request::kCommandName), + BSON("$inc" << BSON("count" << 1)), + true /* upsert */, + false /* multi */); + } + return collmodReply; } private: @@ -141,8 +209,9 @@ class ShardSvrCollModParticipantCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; } shardsvrCollModParticipantCommand; diff --git a/src/mongo/db/s/shardsvr_commit_index_participant_command.cpp b/src/mongo/db/s/shardsvr_commit_index_participant_command.cpp index 72c3cb33d713e..c62bad09b779a 100644 --- a/src/mongo/db/s/shardsvr_commit_index_participant_command.cpp +++ b/src/mongo/db/s/shardsvr_commit_index_participant_command.cpp @@ -28,18 +28,36 @@ */ +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/sharded_index_catalog_commands_gen.h" #include "mongo/db/s/sharding_index_catalog_ddl_util.h" -#include "mongo/db/s/sharding_index_catalog_util.h" +#include "mongo/db/s/sharding_migration_critical_section.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/transaction/transaction_participant.h" -#include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -128,8 +146,9 @@ class ShardsvrCommitIndexParticipantCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_commit_reshard_collection_command.cpp b/src/mongo/db/s/shardsvr_commit_reshard_collection_command.cpp index 9f71931571792..6c74ef8cd055e 100644 --- a/src/mongo/db/s/shardsvr_commit_reshard_collection_command.cpp +++ b/src/mongo/db/s/shardsvr_commit_reshard_collection_command.cpp @@ -28,19 +28,42 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" -#include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/s/resharding/donor_document_gen.h" +#include "mongo/db/s/resharding/recipient_document_gen.h" #include "mongo/db/s/resharding/resharding_donor_recipient_common.h" +#include "mongo/db/s/resharding/resharding_donor_service.h" +#include "mongo/db/s/resharding/resharding_recipient_service.h" #include "mongo/db/s/resharding/resharding_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/vector_clock_mutable.h" #include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/commit_reshard_collection_gen.h" -#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -66,6 +89,10 @@ class ShardsvrCommitReshardCollectionCommand final CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, opCtx->getWriteConcern()); + // Persist the config time to ensure that in case of stepdown next filtering metadata + // refresh on the new primary will always fetch the latest information. + VectorClockMutable::get(opCtx)->waitForDurableConfigTime().get(opCtx); + std::vector> futuresToWait; { @@ -143,8 +170,9 @@ class ShardsvrCommitReshardCollectionCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_compact_structured_encryption_data_command.cpp b/src/mongo/db/s/shardsvr_compact_structured_encryption_data_command.cpp index bca40634ef641..05a3277cff650 100644 --- a/src/mongo/db/s/shardsvr_compact_structured_encryption_data_command.cpp +++ b/src/mongo/db/s/shardsvr_compact_structured_encryption_data_command.cpp @@ -28,18 +28,39 @@ */ +#include +#include +#include + +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" #include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/commands/fle2_compact.h" #include "mongo/db/commands/fle2_compact_gen.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/compact_structured_encryption_data_coordinator.h" #include "mongo/db/s/compact_structured_encryption_data_coordinator_gen.h" -#include "mongo/db/server_feature_flags_gen.h" -#include "mongo/logv2/log.h" -#include "mongo/s/cluster_commands_helpers.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -89,14 +110,6 @@ class _shardsvrCompactStructuredEncryptionDataCommand final [&]() -> std::shared_ptr { FixedFCVRegion fixedFcvRegion(opCtx); - // TODO: SERVER-68373 Remove once 7.0 becomes last LTS - uassert(7330300, - "The preview version of compactStructuredEncryptionData is no longer " - "supported in this binary version", - gFeatureFlagFLE2CompactForProtocolV2.isEnabled( - serverGlobalParams.featureCompatibility)); - - auto compact = makeRequest(opCtx); return ShardingDDLCoordinatorService::getService(opCtx)->getOrCreateInstance( opCtx, compact.toBSON()); @@ -114,7 +127,7 @@ class _shardsvrCompactStructuredEncryptionDataCommand final AutoGetCollection baseColl(opCtx, nss, MODE_IX); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Unknown collection: " << nss, + str::stream() << "Unknown collection: " << nss.toStringForErrorMsg(), baseColl.getCollection()); validateCompactRequest(req, *(baseColl.getCollection().get())); @@ -157,8 +170,9 @@ class _shardsvrCompactStructuredEncryptionDataCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_create_collection_command.cpp b/src/mongo/db/s/shardsvr_create_collection_command.cpp index 34091418ceaf9..7cee9a4055635 100644 --- a/src/mongo/db/s/shardsvr_create_collection_command.cpp +++ b/src/mongo/db/s/shardsvr_create_collection_command.cpp @@ -28,19 +28,38 @@ */ +#include +#include +#include + +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/create_collection_coordinator.h" +#include "mongo/db/s/create_collection_coordinator_document_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" #include "mongo/db/s/sharding_ddl_coordinator_service.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_options.h" -#include "mongo/logv2/log.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -48,65 +67,6 @@ namespace mongo { namespace { -void translateToTimeseriesCollection(OperationContext* opCtx, - NamespaceString* nss, - CreateCollectionRequest* createCmdRequest) { - auto bucketsNs = nss->makeTimeseriesBucketsNamespace(); - // Hold reference to the catalog for collection lookup without locks to be safe. - auto catalog = CollectionCatalog::get(opCtx); - auto bucketsColl = catalog->lookupCollectionByNamespace(opCtx, bucketsNs); - - // If the 'system.buckets' exists or 'timeseries' parameters are passed in, we know that - // we are trying shard a timeseries collection. - if (bucketsColl || createCmdRequest->getTimeseries()) { - uassert(5731502, - "Sharding a timeseries collection feature is not enabled", - feature_flags::gFeatureFlagShardedTimeSeries.isEnabled( - serverGlobalParams.featureCompatibility)); - - if (bucketsColl) { - uassert(6235600, - str::stream() << "the collection '" << bucketsNs - << "' does not have 'timeseries' options", - bucketsColl->getTimeseriesOptions()); - - if (createCmdRequest->getTimeseries()) { - uassert(6235601, - str::stream() - << "the 'timeseries' spec provided must match that of exists '" << nss - << "' collection", - timeseries::optionsAreEqual(*createCmdRequest->getTimeseries(), - *bucketsColl->getTimeseriesOptions())); - } else { - createCmdRequest->setTimeseries(bucketsColl->getTimeseriesOptions()); - } - } - - auto timeField = createCmdRequest->getTimeseries()->getTimeField(); - auto metaField = createCmdRequest->getTimeseries()->getMetaField(); - BSONObjIterator iter{*createCmdRequest->getShardKey()}; - while (auto elem = iter.next()) { - if (elem.fieldNameStringData() == timeField) { - uassert(6235602, - str::stream() << "the time field '" << timeField - << "' can be only at the end of the shard key pattern", - !iter.more()); - } else { - uassert(6235603, - str::stream() << "only the time field or meta field can be " - "part of shard key pattern", - metaField && - (elem.fieldNameStringData() == *metaField || - elem.fieldNameStringData().startsWith(*metaField + "."))); - } - } - *nss = bucketsNs; - createCmdRequest->setShardKey( - uassertStatusOK(timeseries::createBucketsShardKeySpecFromTimeseriesShardKeySpec( - *createCmdRequest->getTimeseries(), *createCmdRequest->getShardKey()))); - } -} - class ShardsvrCreateCollectionCommand final : public TypedCommand { public: using Request = ShardsvrCreateCollection; @@ -146,19 +106,19 @@ class ShardsvrCreateCollectionCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_create_collection_participant_command.cpp b/src/mongo/db/s/shardsvr_create_collection_participant_command.cpp index 560da89dceca3..54a2352b3a0b0 100644 --- a/src/mongo/db/s/shardsvr_create_collection_participant_command.cpp +++ b/src/mongo/db/s/shardsvr_create_collection_participant_command.cpp @@ -28,14 +28,32 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/migration_destination_manager.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -119,8 +137,9 @@ class ShardsvrCreateCollectionParticipantCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_create_global_index_command.cpp b/src/mongo/db/s/shardsvr_create_global_index_command.cpp index a5b626fea85a7..634b6e317e27e 100644 --- a/src/mongo/db/s/shardsvr_create_global_index_command.cpp +++ b/src/mongo/db/s/shardsvr_create_global_index_command.cpp @@ -27,13 +27,25 @@ * it in the license file. */ +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/global_index.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -97,8 +109,9 @@ class ShardsvrCreateGlobalIndexCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; }; diff --git a/src/mongo/db/s/shardsvr_delete_global_index_key_command.cpp b/src/mongo/db/s/shardsvr_delete_global_index_key_command.cpp index f543641b5bb59..90707e9deb162 100644 --- a/src/mongo/db/s/shardsvr_delete_global_index_key_command.cpp +++ b/src/mongo/db/s/shardsvr_delete_global_index_key_command.cpp @@ -29,16 +29,26 @@ #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" -#include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/dbhelpers.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/global_index.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/global_index_crud_commands_gen.h" #include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -74,8 +84,9 @@ class ShardsvrDeleteGlobalIndexKeyCmd final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } bool supportsWriteConcern() const override { diff --git a/src/mongo/db/s/shardsvr_drop_collection_command.cpp b/src/mongo/db/s/shardsvr_drop_collection_command.cpp index 8ac48003f7b38..babcba51e54bc 100644 --- a/src/mongo/db/s/shardsvr_drop_collection_command.cpp +++ b/src/mongo/db/s/shardsvr_drop_collection_command.cpp @@ -28,19 +28,41 @@ */ +#include +#include + +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/commands.h" #include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/curop.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/drop_collection_coordinator.h" +#include "mongo/db/s/drop_collection_coordinator_document_gen.h" #include "mongo/db/s/sharding_ddl_coordinator_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/logv2/log.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/collection_routing_info_targeter.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" #include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -95,16 +117,6 @@ class ShardsvrDropCollectionCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; } sharsvrdDropCollectionCommand; diff --git a/src/mongo/db/s/shardsvr_drop_collection_if_uuid_not_matching_command.cpp b/src/mongo/db/s/shardsvr_drop_collection_if_uuid_not_matching_command.cpp index 26975ac7cb82e..4be3c42b2d4ab 100644 --- a/src/mongo/db/s/shardsvr_drop_collection_if_uuid_not_matching_command.cpp +++ b/src/mongo/db/s/shardsvr_drop_collection_if_uuid_not_matching_command.cpp @@ -27,13 +27,26 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/drop_collection.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/logv2/log.h" +#include "mongo/db/service_context.h" +#include "mongo/db/write_concern.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/drop_collection_if_uuid_not_matching_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -41,62 +54,6 @@ namespace mongo { namespace { -// TODO SERVER-74324: deprecate _shardsvrDropCollectionIfUUIDNotMatching after 7.0 is lastLTS. -class ShardsvrDropCollectionIfUUIDNotMatchingCommand final - : public TypedCommand { -public: - bool skipApiVersionCheck() const override { - /* Internal command (server to server) */ - return true; - } - - AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { - return Command::AllowedOnSecondary::kNever; - } - - std::string help() const override { - return "Internal command aimed to remove stale entries from the local collection catalog."; - } - - using Request = ShardsvrDropCollectionIfUUIDNotMatchingRequest; - - class Invocation final : public InvocationBase { - public: - using InvocationBase::InvocationBase; - - void typedRun(OperationContext* opCtx) { - uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands()); - - opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); - - uassertStatusOK(dropCollectionIfUUIDNotMatching( - opCtx, ns(), request().getExpectedCollectionUUID())); - - WriteConcernResult ignoreResult; - auto latestOpTime = repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); - uassertStatusOK(waitForWriteConcern( - opCtx, latestOpTime, CommandHelpers::kMajorityWriteConcern, &ignoreResult)); - } - - private: - NamespaceString ns() const override { - return request().getNamespace(); - } - - bool supportsWriteConcern() const override { - return false; - } - - void doCheckAuthorization(OperationContext* opCtx) const override { - uassert(ErrorCodes::Unauthorized, - "Unauthorized", - AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::dropCollection)); - } - }; -} shardSvrDropCollectionIfUUIDNotMatching; - class ShardsvrDropCollectionIfUUIDNotMatchingWithWriteConcernCommand final : public TypedCommand { public: @@ -144,8 +101,9 @@ class ShardsvrDropCollectionIfUUIDNotMatchingWithWriteConcernCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::dropCollection)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::dropCollection)); } }; } shardSvrDropCollectionIfUUIDNotMatchingWithWriteConcern; diff --git a/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp b/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp index 60257a34b6a82..ac623fff10e5a 100644 --- a/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp +++ b/src/mongo/db/s/shardsvr_drop_collection_participant_command.cpp @@ -28,18 +28,34 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/drop_collection_coordinator.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" #include "mongo/db/transaction/transaction_participant.h" -#include "mongo/logv2/log.h" +#include "mongo/db/vector_clock_mutable.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -86,6 +102,9 @@ class ShardsvrDropCollectionParticipantCommand final opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); + // Checkpoint the vector clock to ensure causality in the event of a crash or shutdown. + VectorClockMutable::get(opCtx)->waitForDurableConfigTime().get(opCtx); + bool fromMigrate = request().getFromMigrate().value_or(false); DropCollectionCoordinator::dropCollectionLocally(opCtx, ns(), fromMigrate); @@ -113,8 +132,9 @@ class ShardsvrDropCollectionParticipantCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; } shardsvrDropCollectionParticipantCommand; diff --git a/src/mongo/db/s/shardsvr_drop_database_command.cpp b/src/mongo/db/s/shardsvr_drop_database_command.cpp index 55c11b82e71d8..5770b1ecde441 100644 --- a/src/mongo/db/s/shardsvr_drop_database_command.cpp +++ b/src/mongo/db/s/shardsvr_drop_database_command.cpp @@ -28,19 +28,42 @@ */ +#include +#include +#include + +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/commands.h" #include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/curop.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/drop_database_coordinator.h" +#include "mongo/db/s/drop_database_coordinator_document_gen.h" #include "mongo/db/s/operation_sharding_state.h" +#include "mongo/db/s/sharding_ddl_coordinator.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" #include "mongo/db/s/sharding_ddl_coordinator_service.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" #include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -84,7 +107,8 @@ class ShardsvrDropDatabaseCommand final : public TypedCommandgetDatabaseProfileLevel(ns().dbName())); auto service = ShardingDDLCoordinatorService::getService(opCtx); - const auto requestVersion = OperationShardingState::get(opCtx).getDbVersion(ns().db()); + const auto requestVersion = + OperationShardingState::get(opCtx).getDbVersion(ns().dbName()); auto dropDatabaseCoordinator = [&]() { while (true) { // TODO SERVER-73627: Remove once 7.0 becomes last LTS. @@ -132,8 +156,9 @@ class ShardsvrDropDatabaseCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; } shardsvrDropDatabaseCommand; diff --git a/src/mongo/db/s/shardsvr_drop_database_participant_command.cpp b/src/mongo/db/s/shardsvr_drop_database_participant_command.cpp index 5bd7f1b5225c0..a28161a93b69e 100644 --- a/src/mongo/db/s/shardsvr_drop_database_participant_command.cpp +++ b/src/mongo/db/s/shardsvr_drop_database_participant_command.cpp @@ -28,16 +28,26 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/drop_database.h" -#include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" -#include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -99,8 +109,9 @@ class ShardsvrDropDatabaseParticipantCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; } shardsvrDropDatabaseParticipantCommand; diff --git a/src/mongo/db/s/shardsvr_drop_global_index_command.cpp b/src/mongo/db/s/shardsvr_drop_global_index_command.cpp index e28d4e94da205..38a11ba5139c9 100644 --- a/src/mongo/db/s/shardsvr_drop_global_index_command.cpp +++ b/src/mongo/db/s/shardsvr_drop_global_index_command.cpp @@ -27,13 +27,25 @@ * it in the license file. */ +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/global_index.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -96,8 +108,9 @@ class ShardsvrDropGlobalIndexCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; }; diff --git a/src/mongo/db/s/shardsvr_drop_index_catalog_entry_participant_command.cpp b/src/mongo/db/s/shardsvr_drop_index_catalog_entry_participant_command.cpp index 9d65051ec8c92..8a4d004db5614 100644 --- a/src/mongo/db/s/shardsvr_drop_index_catalog_entry_participant_command.cpp +++ b/src/mongo/db/s/shardsvr_drop_index_catalog_entry_participant_command.cpp @@ -28,18 +28,36 @@ */ +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/sharded_index_catalog_commands_gen.h" #include "mongo/db/s/sharding_index_catalog_ddl_util.h" -#include "mongo/db/s/sharding_index_catalog_util.h" +#include "mongo/db/s/sharding_migration_critical_section.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/transaction/transaction_participant.h" -#include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -124,8 +142,9 @@ class ShardsvrDropIndexCatalogEntryParticipantCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_drop_indexes_command.cpp b/src/mongo/db/s/shardsvr_drop_indexes_command.cpp index a50ace9a5aef5..faec71c5f5401 100644 --- a/src/mongo/db/s/shardsvr_drop_indexes_command.cpp +++ b/src/mongo/db/s/shardsvr_drop_indexes_command.cpp @@ -28,20 +28,59 @@ */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/commands.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/curop.h" -#include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/database_name.h" +#include "mongo/db/drop_indexes_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" #include "mongo/db/s/ddl_lock_manager.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/db/timeseries/catalog_helper.h" #include "mongo/db/timeseries/timeseries_commands_conversion_helper.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/sharding_feature_flags_gen.h" #include "mongo/s/stale_shard_version_helpers.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -122,8 +161,9 @@ class ShardsvrDropIndexesCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; @@ -154,17 +194,28 @@ ShardsvrDropIndexesCommand::Invocation::Response ShardsvrDropIndexesCommand::Inv return DDLLockManager::kDefaultLockTimeout; }(); + // Acquire the DDL lock to serialize with other DDL operations. It also makes sure that we are + // targeting the primary shard for this database. static constexpr StringData lockReason{"dropIndexes"_sd}; - auto ddlLockManager = DDLLockManager::get(opCtx); - auto dbDDLLock = ddlLockManager->lock(opCtx, ns().db(), lockReason, lockTimeout); + // TODO SERVER-77546 remove db ddl lock acquisition on feature flag removal since it + // will be implicitly taken in IX mode under the collection ddl lock acquisition + boost::optional dbDDLLock; + if (!feature_flags::gMultipleGranularityDDLLocking.isEnabled( + serverGlobalParams.featureCompatibility)) { + dbDDLLock.emplace(opCtx, ns().dbName(), lockReason, MODE_X, lockTimeout); + } - // Check under the dbLock if this is still the primary shard for the database - DatabaseShardingState::assertIsPrimaryShardForDb(opCtx, ns().db()); + // Acquire the DDL lock to serialize with other DDL operations. It also makes sure that we are + // targeting the primary shard for this database. + const DDLLockManager::ScopedCollectionDDLLock collDDLLock{ + opCtx, ns(), lockReason, MODE_X, lockTimeout}; auto resolvedNs = ns(); auto dropIdxBSON = dropIdxCmd.toBSON({}); + // Checking if it is a timeseries collection under the collection DDL lock + boost::optional timeseriesCollDDLLock; if (auto timeseriesOptions = timeseries::getTimeseriesOptions(opCtx, ns(), true)) { dropIdxBSON = timeseries::makeTimeseriesCommand(dropIdxBSON, @@ -173,9 +224,11 @@ ShardsvrDropIndexesCommand::Invocation::Response ShardsvrDropIndexesCommand::Inv DropIndexes::kIsTimeseriesNamespaceFieldName); resolvedNs = ns().makeTimeseriesBucketsNamespace(); - } - auto collDDLLock = ddlLockManager->lock(opCtx, resolvedNs.ns(), lockReason, lockTimeout); + // If it is a timeseries collection, we actually need to acquire the bucket + // namespace DDL lock + timeseriesCollDDLLock.emplace(opCtx, resolvedNs, lockReason, MODE_X, lockTimeout); + } StaleConfigRetryState retryState; return shardVersionRetry( @@ -201,8 +254,10 @@ ShardsvrDropIndexesCommand::Invocation::Response ShardsvrDropIndexesCommand::Inv CommandHelpers::filterCommandRequestForPassthrough(cmdToBeSent)), ReadPreferenceSetting::get(opCtx), Shard::RetryPolicy::kNotIdempotent, - BSONObj() /* query */, - BSONObj() /* collation */); + BSONObj() /*query*/, + BSONObj() /*collation*/, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); // Append responses we've received from previous retries of this operation due to a // stale config error. diff --git a/src/mongo/db/s/shardsvr_get_stats_for_balancing_command.cpp b/src/mongo/db/s/shardsvr_get_stats_for_balancing_command.cpp index 4034d70a4eac6..970ad32e7616e 100644 --- a/src/mongo/db/s/shardsvr_get_stats_for_balancing_command.cpp +++ b/src/mongo/db/s/shardsvr_get_stats_for_balancing_command.cpp @@ -28,15 +28,32 @@ */ +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/balancer_stats_registry.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/get_stats_for_balancing_gen.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -145,8 +162,9 @@ class ShardsvrGetStatsForBalancingCmd final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; } _shardsvrGetStatsForBalancingCmd; diff --git a/src/mongo/db/s/shardsvr_index_catalog_test_commands.cpp b/src/mongo/db/s/shardsvr_index_catalog_test_commands.cpp index bdf9f28b47754..80081fb9ae159 100644 --- a/src/mongo/db/s/shardsvr_index_catalog_test_commands.cpp +++ b/src/mongo/db/s/shardsvr_index_catalog_test_commands.cpp @@ -27,22 +27,38 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/sharded_index_catalog_commands_gen.h" #include "mongo/db/s/sharding_index_catalog_util.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/internal_session_pool.h" -#include "mongo/executor/network_interface_factory.h" -#include "mongo/executor/network_interface_thread_pool.h" -#include "mongo/executor/task_executor.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/executor/task_executor_pool.h" -#include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/logv2/log.h" -#include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/grid.h" #include "mongo/s/sharding_feature_flags_gen.h" -#include "mongo/util/future_util.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -124,8 +140,9 @@ class ShardsvrRegisterIndexTestCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; }; @@ -204,8 +221,9 @@ class ShardsvrUnregisterIndexTestCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; }; diff --git a/src/mongo/db/s/shardsvr_insert_global_index_key_command.cpp b/src/mongo/db/s/shardsvr_insert_global_index_key_command.cpp index f4af6507281b9..d68d462dfafb4 100644 --- a/src/mongo/db/s/shardsvr_insert_global_index_key_command.cpp +++ b/src/mongo/db/s/shardsvr_insert_global_index_key_command.cpp @@ -29,16 +29,26 @@ #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" -#include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/dbhelpers.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/global_index.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/global_index_crud_commands_gen.h" #include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -74,8 +84,9 @@ class ShardsvrInsertGlobalIndexKeyCmd final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } bool supportsWriteConcern() const override { diff --git a/src/mongo/db/s/shardsvr_join_migrations_command.cpp b/src/mongo/db/s/shardsvr_join_migrations_command.cpp index d5b2e1710383f..937d0f3cf4ed0 100644 --- a/src/mongo/db/s/shardsvr_join_migrations_command.cpp +++ b/src/mongo/db/s/shardsvr_join_migrations_command.cpp @@ -27,13 +27,27 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/active_migrations_registry.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/shardsvr_join_migrations_request_gen.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -94,8 +108,9 @@ class ShardsvrJoinMigrationsCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; } _shardsvrJoinMigrationsCmd; diff --git a/src/mongo/db/s/shardsvr_merge_all_chunks_on_shard_command.cpp b/src/mongo/db/s/shardsvr_merge_all_chunks_on_shard_command.cpp index 1d8989cc04814..c9ec657fe9e06 100644 --- a/src/mongo/db/s/shardsvr_merge_all_chunks_on_shard_command.cpp +++ b/src/mongo/db/s/shardsvr_merge_all_chunks_on_shard_command.cpp @@ -28,12 +28,33 @@ */ +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/merge_chunk_request_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -113,8 +134,9 @@ class ShardSvrMergeAllChunksOnShardCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; } shardSvrMergeAllChunksOnShard; diff --git a/src/mongo/db/s/shardsvr_merge_chunks_command.cpp b/src/mongo/db/s/shardsvr_merge_chunks_command.cpp index 4169f3235d526..7ab3a0d67cb5b 100644 --- a/src/mongo/db/s/shardsvr_merge_chunks_command.cpp +++ b/src/mongo/db/s/shardsvr_merge_chunks_command.cpp @@ -28,26 +28,53 @@ */ +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/field_parser.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/active_migrations_registry.h" #include "mongo/db/s/chunk_operation_precondition_checks.h" -#include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/db/vector_clock.h" -#include "mongo/logv2/log.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/merge_chunk_request_gen.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -129,11 +156,12 @@ class MergeChunksCommand : public ErrmsgCommandDeprecated { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); @@ -168,7 +196,8 @@ class MergeChunksCommand : public ErrmsgCommandDeprecated { BSONObjBuilder& result) override { uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands()); - const NamespaceString nss(parseNs({boost::none, dbname}, cmdObj)); + const NamespaceString nss( + parseNs(DatabaseNameUtil::deserialize(boost::none, dbname), cmdObj)); std::vector bounds; if (!FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg)) { diff --git a/src/mongo/db/s/shardsvr_move_primary_command.cpp b/src/mongo/db/s/shardsvr_move_primary_command.cpp index c306297fe761a..ff49c2731554c 100644 --- a/src/mongo/db/s/shardsvr_move_primary_command.cpp +++ b/src/mongo/db/s/shardsvr_move_primary_command.cpp @@ -27,149 +27,141 @@ * it in the license file. */ +#include +#include +#include + +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/move_primary_coordinator.h" -#include "mongo/db/s/move_primary_coordinator_no_resilient.h" +#include "mongo/db/s/move_primary_coordinator_document_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/move_primary_gen.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding namespace mongo { namespace { -class MovePrimaryCommand : public BasicCommand { +class ShardsvrMovePrimaryCommand final : public TypedCommand { public: - MovePrimaryCommand() : BasicCommand("_shardsvrMovePrimary") {} - - bool skipApiVersionCheck() const override { - // Internal command (server to server). - return true; - } - - std::string help() const override { - return "should not be calling this directly"; - } - - AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { - return AllowedOnSecondary::kNever; - } + using Request = ShardsvrMovePrimary; - bool adminOnly() const override { - return true; - } + class Invocation final : public InvocationBase { + public: + using InvocationBase::InvocationBase; - bool supportsWriteConcern(const BSONObj& cmd) const override { - return true; - } - - Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, - const BSONObj&) const override { - if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { - return Status(ErrorCodes::Unauthorized, "Unauthorized"); - } - return Status::OK(); - } - - NamespaceString parseNs(const DatabaseName& dbName, const BSONObj& cmdObj) const override { - const auto nsElt = cmdObj.firstElement(); - uassert(ErrorCodes::InvalidNamespace, - "'movePrimary' must be of type String", - nsElt.type() == BSONType::String); - return NamespaceStringUtil::parseNamespaceFromRequest(dbName.tenantId(), nsElt.str()); - } + void typedRun(OperationContext* opCtx) { + uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands()); + CommandHelpers::uassertCommandRunWithMajority(Request::kCommandName, + opCtx->getWriteConcern()); - bool run(OperationContext* opCtx, - const DatabaseName&, - const BSONObj& cmdObj, - BSONObjBuilder& result) override { - uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands()); + const auto& dbNss = ns(); + const auto& toShardId = request().getTo(); - const auto movePrimaryRequest = - ShardMovePrimary::parse(IDLParserContext("_shardsvrMovePrimary"), cmdObj); - const auto dbName = parseNs({boost::none, ""}, cmdObj).dbName(); + uassert(ErrorCodes::InvalidNamespace, + "invalid database {}"_format(dbNss.toStringForErrorMsg()), + NamespaceString::validDBName(dbNss.dbName(), + NamespaceString::DollarInDbNameBehavior::Allow)); - const NamespaceString dbNss(dbName); - const auto toShardArg = movePrimaryRequest.getTo(); - - uassert( - ErrorCodes::InvalidNamespace, - str::stream() << "invalid db name specified: " << dbName.toStringForErrorMsg(), - NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow)); - - uassert(ErrorCodes::InvalidOptions, - str::stream() << "Can't move primary for " << dbName.toStringForErrorMsg() - << " database", + uassert( + ErrorCodes::InvalidOptions, + "cannot move primary of internal database {}"_format(dbNss.toStringForErrorMsg()), !dbNss.isOnInternalDb()); - uassert(ErrorCodes::InvalidOptions, - str::stream() << "you have to specify where you want to move it", - !toShardArg.empty()); - - CommandHelpers::uassertCommandRunWithMajority(getName(), opCtx->getWriteConcern()); + ScopeGuard onBlockExit( + [&] { Grid::get(opCtx)->catalogCache()->purgeDatabase(dbNss.db()); }); - ON_BLOCK_EXIT( - [opCtx, dbNss] { Grid::get(opCtx)->catalogCache()->purgeDatabase(dbNss.db()); }); + const auto coordinatorFuture = [&] { + FixedFCVRegion fcvRegion(opCtx); - const auto coordinatorFuture = [&] { - FixedFCVRegion fcvRegion(opCtx); + auto shardRegistry = Grid::get(opCtx)->shardRegistry(); + // Ensure that the shard information is up-to-date as possible to catch the case + // where a shard with the same name, but with a different host, has been + // removed/re-added. + shardRegistry->reload(opCtx); + const auto toShard = uassertStatusOKWithContext( + shardRegistry->getShard(opCtx, toShardId), + "requested primary shard {} does not exist"_format(toShardId.toString())); - // TODO (SERVER-71309): Remove once 7.0 becomes last LTS. - if (!feature_flags::gResilientMovePrimary.isEnabled( - serverGlobalParams.featureCompatibility)) { const auto coordinatorDoc = [&] { MovePrimaryCoordinatorDocument doc; doc.setShardingDDLCoordinatorMetadata( - {{dbNss, DDLCoordinatorTypeEnum::kMovePrimaryNoResilient}}); - doc.setToShardId(toShardArg.toString()); + {{dbNss, DDLCoordinatorTypeEnum::kMovePrimary}}); + doc.setToShardId(toShard->getId()); return doc.toBSON(); }(); const auto coordinator = [&] { auto service = ShardingDDLCoordinatorService::getService(opCtx); - return checked_pointer_cast( + return checked_pointer_cast( service->getOrCreateInstance(opCtx, std::move(coordinatorDoc))); }(); return coordinator->getCompletionFuture(); - } - - auto shardRegistry = Grid::get(opCtx)->shardRegistry(); - // Ensure that the shard information is up-to-date as possible to catch the case where - // a shard with the same name, but with a different host, has been removed/re-added. - shardRegistry->reload(opCtx); - const auto toShard = uassertStatusOKWithContext( - shardRegistry->getShard(opCtx, toShardArg.toString()), - "Requested primary shard {} does not exist"_format(toShardArg)); - - const auto coordinatorDoc = [&] { - MovePrimaryCoordinatorDocument doc; - doc.setShardingDDLCoordinatorMetadata( - {{dbNss, DDLCoordinatorTypeEnum::kMovePrimary}}); - doc.setToShardId(toShard->getId()); - return doc.toBSON(); }(); - const auto coordinator = [&] { - auto service = ShardingDDLCoordinatorService::getService(opCtx); - return checked_pointer_cast( - service->getOrCreateInstance(opCtx, std::move(coordinatorDoc))); - }(); + coordinatorFuture.get(opCtx); + } - return coordinator->getCompletionFuture(); - }(); + private: + NamespaceString ns() const override { + return NamespaceString(request().getCommandParameter()); + } - coordinatorFuture.get(opCtx); + bool supportsWriteConcern() const override { + return true; + } + + void doCheckAuthorization(OperationContext* opCtx) const override { + uassert(ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); + } + }; + +private: + bool adminOnly() const override { return true; } -} movePrimaryCmd; + + bool skipApiVersionCheck() const override { + return true; + } + + AllowedOnSecondary secondaryAllowed(ServiceContext* context) const override { + return AllowedOnSecondary::kNever; + } + + std::string help() const override { + return "Internal command. Do not call directly."; + } +} shardsvrMovePrimaryCommand; } // namespace } // namespace mongo diff --git a/src/mongo/db/s/shardsvr_move_primary_enter_critical_section_command.cpp b/src/mongo/db/s/shardsvr_move_primary_enter_critical_section_command.cpp index 0abdf48d19e27..d20ddb1a53b36 100644 --- a/src/mongo/db/s/shardsvr_move_primary_enter_critical_section_command.cpp +++ b/src/mongo/db/s/shardsvr_move_primary_enter_critical_section_command.cpp @@ -27,18 +27,45 @@ * it in the license file. */ +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/s/sharding_migration_critical_section.h" #include "mongo/db/s/sharding_recovery_service.h" -#include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" #include "mongo/db/transaction/transaction_participant.h" -#include "mongo/logv2/log.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/move_primary_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/out_of_line_executor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -69,10 +96,6 @@ class ShardsvrMovePrimaryEnterCriticalSectionCommand final // cause the failure of the second operation. auto newClient = getGlobalServiceContext()->makeClient( "ShardsvrMovePrimaryEnterCriticalSection"); - { - stdx::lock_guard lk(*newClient); - newClient->setSystemOperationKillableByStepdown(lk); - } AlternativeClientRegion acr(newClient); auto newOpCtx = CancelableOperationContext( cc().makeOperationContext(), @@ -126,8 +149,9 @@ class ShardsvrMovePrimaryEnterCriticalSectionCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } void waitForCriticalSectionToComplete(OperationContext* opCtx, diff --git a/src/mongo/db/s/shardsvr_move_primary_exit_critical_section_command.cpp b/src/mongo/db/s/shardsvr_move_primary_exit_critical_section_command.cpp index 81a0f4bd8aad7..90a212d68ecef 100644 --- a/src/mongo/db/s/shardsvr_move_primary_exit_critical_section_command.cpp +++ b/src/mongo/db/s/shardsvr_move_primary_exit_critical_section_command.cpp @@ -27,18 +27,38 @@ * it in the license file. */ +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/sharding_recovery_service.h" -#include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" #include "mongo/db/transaction/transaction_participant.h" -#include "mongo/logv2/log.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/move_primary_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/out_of_line_executor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -69,10 +89,6 @@ class ShardsvrMovePrimaryExitCriticalSectionCommand final // solution is to use an alternative client as well as a new operation context. auto newClient = getGlobalServiceContext()->makeClient("ShardsvrMovePrimaryExitCriticalSection"); - { - stdx::lock_guard lk(*newClient); - newClient->setSystemOperationKillableByStepdown(lk); - } AlternativeClientRegion acr(newClient); auto newOpCtx = CancelableOperationContext( cc().makeOperationContext(), @@ -126,8 +142,9 @@ class ShardsvrMovePrimaryExitCriticalSectionCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_move_range_command.cpp b/src/mongo/db/s/shardsvr_move_range_command.cpp index 9fdacc8186c5c..6f3b6d9a9e6cf 100644 --- a/src/mongo/db/s/shardsvr_move_range_command.cpp +++ b/src/mongo/db/s/shardsvr_move_range_command.cpp @@ -28,19 +28,57 @@ */ +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/client/read_preference.h" +#include "mongo/client/remote_command_targeter.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/active_migrations_registry.h" #include "mongo/db/s/migration_source_manager.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/sharding_statistics.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/logv2/redaction.h" -#include "mongo/s/commands/cluster_commands_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/move_range_request_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -109,10 +147,6 @@ class ShardsvrMoveRangeCommand final : public TypedCommand lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } auto uniqueOpCtx = Client::getCurrent()->makeOperationContext(); auto executorOpCtx = uniqueOpCtx.get(); Status status = {ErrorCodes::InternalError, "Uninitialized value"}; @@ -190,8 +224,9 @@ class ShardsvrMoveRangeCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } static void _runImpl(OperationContext* opCtx, @@ -215,6 +250,13 @@ class ShardsvrMoveRangeCommand final : public TypedCommandtoString() : "", + "totalTimeMillis"_attr = migrationSourceManager.getOpTimeMillis(), + "docsCloned"_attr = docsCloned, + "bytesCloned"_attr = bytesCloned, + "cloneTime"_attr = cloneTime); } }; diff --git a/src/mongo/db/s/shardsvr_notify_sharding_event_command.cpp b/src/mongo/db/s/shardsvr_notify_sharding_event_command.cpp index 8b05305731cbf..0e161293b803b 100644 --- a/src/mongo/db/s/shardsvr_notify_sharding_event_command.cpp +++ b/src/mongo/db/s/shardsvr_notify_sharding_event_command.cpp @@ -26,13 +26,26 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" #include "mongo/db/commands/notify_sharding_event_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/change_stream_oplog_notification.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -76,17 +89,14 @@ class ShardsvrNotifyShardingEventCommand : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_participant_block_command.cpp b/src/mongo/db/s/shardsvr_participant_block_command.cpp index ad7f003df9b2f..d966f2a643c52 100644 --- a/src/mongo/db/s/shardsvr_participant_block_command.cpp +++ b/src/mongo/db/s/shardsvr_participant_block_command.cpp @@ -28,18 +28,37 @@ */ +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/cancelable_operation_context.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/participant_block_gen.h" #include "mongo/db/s/sharding_recovery_service.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" #include "mongo/db/transaction/transaction_participant.h" -#include "mongo/logv2/log.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/out_of_line_executor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -81,14 +100,13 @@ class ShardSvrParticipantBlockCommand final : public TypedCommandsetAlwaysInterruptAtStepDownOrUp_UNSAFE(); auto handleRecoverableCriticalSection = [this](auto opCtx) { - const auto reason = - request().getReason().get_value_or(BSON("command" - << "ShardSvrParticipantBlockCommand" - << "ns" << ns().toString())); + const auto reason = request().getReason().get_value_or( + BSON("command" + << "ShardSvrParticipantBlockCommand" + << "ns" << NamespaceStringUtil::serialize(ns()))); auto blockType = request().getBlockType().get_value_or( CriticalSectionBlockTypeEnum::kReadsAndWrites); - bool allowViews = request().getAllowViews(); auto service = ShardingRecoveryService::get(opCtx); switch (blockType) { case CriticalSectionBlockTypeEnum::kUnblock: @@ -97,30 +115,17 @@ class ShardSvrParticipantBlockCommand final : public TypedCommandacquireRecoverableCriticalSectionBlockWrites( - opCtx, - ns(), - reason, - ShardingCatalogClient::kLocalWriteConcern, - allowViews); + opCtx, ns(), reason, ShardingCatalogClient::kLocalWriteConcern); break; default: service->acquireRecoverableCriticalSectionBlockWrites( - opCtx, - ns(), - reason, - ShardingCatalogClient::kLocalWriteConcern, - allowViews); + opCtx, ns(), reason, ShardingCatalogClient::kLocalWriteConcern); service->promoteRecoverableCriticalSectionToBlockAlsoReads( - opCtx, - ns(), - reason, - ShardingCatalogClient::kLocalWriteConcern, - allowViews); + opCtx, ns(), reason, ShardingCatalogClient::kLocalWriteConcern); }; }; @@ -128,10 +133,6 @@ class ShardSvrParticipantBlockCommand final : public TypedCommandmakeClient("ShardSvrParticipantBlockCmdClient"); - { - stdx::lock_guard lk(*newClient); - newClient->setSystemOperationKillableByStepdown(lk); - } AlternativeClientRegion acr(newClient); auto cancelableOperationContext = CancelableOperationContext( cc().makeOperationContext(), @@ -171,8 +172,9 @@ class ShardSvrParticipantBlockCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; } shardsvrParticipantBlockCommand; diff --git a/src/mongo/db/s/shardsvr_refine_collection_shard_key_command.cpp b/src/mongo/db/s/shardsvr_refine_collection_shard_key_command.cpp index 22b3665b70806..f1f9e4012c5e4 100644 --- a/src/mongo/db/s/shardsvr_refine_collection_shard_key_command.cpp +++ b/src/mongo/db/s/shardsvr_refine_collection_shard_key_command.cpp @@ -28,12 +28,23 @@ */ -#include "mongo/db/auth/authorization_session.h" +#include +#include + +#include "mongo/base/checked_cast.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/refine_collection_shard_key_coordinator.h" +#include "mongo/db/s/refine_collection_shard_key_coordinator_document_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/shardsvr_rename_collection_command.cpp b/src/mongo/db/s/shardsvr_rename_collection_command.cpp index 8422f94167d02..cca081d848804 100644 --- a/src/mongo/db/s/shardsvr_rename_collection_command.cpp +++ b/src/mongo/db/s/shardsvr_rename_collection_command.cpp @@ -28,18 +28,32 @@ */ +#include +#include + +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/rename_collection.h" #include "mongo/db/commands.h" #include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/rename_collection_coordinator.h" +#include "mongo/db/s/sharded_rename_collection_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" #include "mongo/db/s/sharding_ddl_coordinator_service.h" -#include "mongo/db/s/sharding_ddl_util.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/logv2/log.h" -#include "mongo/s/cluster_commands_helpers.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -90,18 +104,21 @@ class ShardsvrRenameCollectionCommand final : public TypedCommandgetWriteConcern()); + uassert(ErrorCodes::IllegalOperation, + "Can't rename a collection in the config database", + !fromNss.isConfigDB()); + uassert(ErrorCodes::IllegalOperation, + "Can't rename a collection in the admin database", + !fromNss.isAdminDB()); + validateNamespacesForRenameCollection(opCtx, fromNss, toNss); auto renameCollectionCoordinator = [&]() { FixedFCVRegion fixedFcvRegion{opCtx}; auto coordinatorDoc = RenameCollectionCoordinatorDocument(); coordinatorDoc.setRenameCollectionRequest(req.getRenameCollectionRequest()); - // TODO SERVER-72796: Remove once gGlobalIndexesShardingCatalog is enabled. coordinatorDoc.setShardingDDLCoordinatorMetadata( - {{fromNss, - feature_flags::gGlobalIndexesShardingCatalog.isEnabled(*fixedFcvRegion) - ? DDLCoordinatorTypeEnum::kRenameCollection - : DDLCoordinatorTypeEnum::kRenameCollectionPre63Compatible}}); + {{fromNss, DDLCoordinatorTypeEnum::kRenameCollection}}); coordinatorDoc.setAllowEncryptedCollectionRename( req.getAllowEncryptedCollectionRename().value_or(false)); auto service = ShardingDDLCoordinatorService::getService(opCtx); @@ -126,8 +143,9 @@ class ShardsvrRenameCollectionCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp b/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp index b09077fee0c77..d0e6d5032b7f7 100644 --- a/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp +++ b/src/mongo/db/s/shardsvr_rename_collection_participant_command.cpp @@ -28,17 +28,37 @@ */ -#include "mongo/platform/basic.h" - +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/rename_collection_participant_service.h" #include "mongo/db/s/sharded_rename_collection_gen.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" #include "mongo/db/transaction/transaction_participant.h" -#include "mongo/db/write_concern.h" -#include "mongo/logv2/log.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -101,7 +121,8 @@ class ShardsvrRenameCollectionParticipantCommand final RenameParticipantInstance::getOrCreate(opCtx, service, participantDocBSON); bool hasSameOptions = renameCollectionParticipant->hasSameOptions(participantDocBSON); uassert(ErrorCodes::InvalidOptions, - str::stream() << "Another rename participant for namespace " << fromNss + str::stream() << "Another rename participant for namespace " + << fromNss.toStringForErrorMsg() << "is instantiated with different parameters: `" << renameCollectionParticipant->doc() << "` vs `" << participantDocBSON << "`", @@ -133,8 +154,9 @@ class ShardsvrRenameCollectionParticipantCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; @@ -187,7 +209,7 @@ class ShardsvrRenameCollectionUnblockParticipantCommand final const auto& req = request(); const auto service = RenameCollectionParticipantService::getService(opCtx); - const auto id = BSON("_id" << fromNss.ns()); + const auto id = BSON("_id" << NamespaceStringUtil::serialize(fromNss)); const auto optRenameCollectionParticipant = RenameParticipantInstance::lookup(opCtx, service, id); if (optRenameCollectionParticipant) { @@ -223,8 +245,9 @@ class ShardsvrRenameCollectionUnblockParticipantCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_rename_index_metadata_command.cpp b/src/mongo/db/s/shardsvr_rename_index_metadata_command.cpp index 769baf15c2c1d..f37d662bc4252 100644 --- a/src/mongo/db/s/shardsvr_rename_index_metadata_command.cpp +++ b/src/mongo/db/s/shardsvr_rename_index_metadata_command.cpp @@ -27,17 +27,41 @@ * it in the license file. */ +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/sharded_index_catalog_commands_gen.h" #include "mongo/db/s/sharding_index_catalog_ddl_util.h" +#include "mongo/db/s/sharding_migration_critical_section.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" #include "mongo/db/transaction/transaction_participant.h" -#include "mongo/logv2/log.h" -#include "mongo/s/grid.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/index_version.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -105,7 +129,7 @@ class ShardsvrRenameIndexMetadataCommand final uassert(7079502, format(FMT_STRING("The critical section for collection {} must be taken in " "order to execute this command"), - ns().toString()), + ns().toStringForErrorMsg()), scopedCsr->getCriticalSectionSignal( opCtx, ShardingMigrationCriticalSection::kWrite)); if (scopedCsr->getIndexesInCritSec(opCtx)) { @@ -122,7 +146,7 @@ class ShardsvrRenameIndexMetadataCommand final uassert(7079503, format(FMT_STRING("The critical section for collection {} must be taken in " "order to execute this command"), - ns().toString()), + ns().toStringForErrorMsg()), scopedToCsr->getCriticalSectionSignal( opCtx, ShardingMigrationCriticalSection::kWrite)); const auto& indexMetadata = scopedToCsr->getIndexesInCritSec(opCtx); @@ -179,8 +203,9 @@ class ShardsvrRenameIndexMetadataCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_reshard_collection_command.cpp b/src/mongo/db/s/shardsvr_reshard_collection_command.cpp index 89833371745e5..b3ef60fc0b30c 100644 --- a/src/mongo/db/s/shardsvr_reshard_collection_command.cpp +++ b/src/mongo/db/s/shardsvr_reshard_collection_command.cpp @@ -28,13 +28,29 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/reshard_collection_coordinator.h" +#include "mongo/db/s/reshard_collection_coordinator_document_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_service.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -109,8 +125,9 @@ class ShardsvrReshardCollectionCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_resharding_operation_time_command.cpp b/src/mongo/db/s/shardsvr_resharding_operation_time_command.cpp index eaaa2a43c3b10..d8d093d28187c 100644 --- a/src/mongo/db/s/shardsvr_resharding_operation_time_command.cpp +++ b/src/mongo/db/s/shardsvr_resharding_operation_time_command.cpp @@ -27,19 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/primary_only_service.h" -#include "mongo/db/s/metrics/sharding_data_transform_metrics.h" +#include "mongo/db/s/resharding/resharding_metrics.h" #include "mongo/db/s/resharding/resharding_recipient_service.h" +#include "mongo/db/s/resharding/resharding_util.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/resharding_operation_time_gen.h" +#include "mongo/util/assert_util.h" #include "mongo/util/duration.h" namespace mongo { @@ -104,8 +117,9 @@ class ShardsvrReshardingOperationTimeCmd final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } Response typedRun(OperationContext* opCtx) { diff --git a/src/mongo/db/s/shardsvr_set_allow_migrations_command.cpp b/src/mongo/db/s/shardsvr_set_allow_migrations_command.cpp index 1280bf2f5e76c..c352b0dc82ea0 100644 --- a/src/mongo/db/s/shardsvr_set_allow_migrations_command.cpp +++ b/src/mongo/db/s/shardsvr_set_allow_migrations_command.cpp @@ -28,13 +28,29 @@ */ +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/set_allow_migrations_coordinator.h" +#include "mongo/db/s/set_allow_migrations_coordinator_document_gen.h" +#include "mongo/db/s/sharding_ddl_coordinator_gen.h" #include "mongo/db/s/sharding_ddl_coordinator_service.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/logv2/log.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -104,8 +120,9 @@ class ShardsvrSetAllowMigrationsCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_set_cluster_parameter_command.cpp b/src/mongo/db/s/shardsvr_set_cluster_parameter_command.cpp index d1cb7cc9d4220..d9976b48336a5 100644 --- a/src/mongo/db/s/shardsvr_set_cluster_parameter_command.cpp +++ b/src/mongo/db/s/shardsvr_set_cluster_parameter_command.cpp @@ -28,18 +28,36 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/commands/cluster_server_parameter_cmds_gen.h" #include "mongo/db/commands/set_cluster_parameter_invocation.h" -#include "mongo/db/commands/user_management_commands_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/repl/repl_client_info.h" -#include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -111,8 +129,9 @@ class ShardsvrSetClusterParameterCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp b/src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp index c79bb4ded5959..c64219cf8a003 100644 --- a/src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp +++ b/src/mongo/db/s/shardsvr_set_user_write_block_mode_command.cpp @@ -28,20 +28,37 @@ */ -#include "mongo/platform/basic.h" - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/forwardable_operation_metadata.h" #include "mongo/db/s/global_user_write_block_state.h" #include "mongo/db/s/sharding_ddl_coordinator.h" #include "mongo/db/s/sharding_ddl_coordinator_service.h" #include "mongo/db/s/user_writes_recoverable_critical_section_service.h" -#include "mongo/logv2/log.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -179,8 +196,9 @@ class ShardsvrSetUserWriteBlockCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } Mutex _mutex = MONGO_MAKE_LATCH("ShardsvrSetUserWriteBlockCommand::_mutex"); diff --git a/src/mongo/db/s/shardsvr_split_chunk_command.cpp b/src/mongo/db/s/shardsvr_split_chunk_command.cpp index f547cfb95432a..8079b0a97fbb1 100644 --- a/src/mongo/db/s/shardsvr_split_chunk_command.cpp +++ b/src/mongo/db/s/shardsvr_split_chunk_command.cpp @@ -28,24 +28,47 @@ */ +#include +#include +#include #include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/chunk_operation_precondition_checks.h" -#include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/split_chunk.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/s/catalog/type_chunk.h" -#include "mongo/util/str.h" +#include "mongo/s/chunk_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -77,11 +100,12 @@ class SplitChunkCommand : public ErrmsgCommandDeprecated { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::internal)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); @@ -99,7 +123,8 @@ class SplitChunkCommand : public ErrmsgCommandDeprecated { BSONObjBuilder& result) override { uassertStatusOK(ShardingState::get(opCtx)->canAcceptShardedCommands()); - const NamespaceString nss(parseNs({boost::none, dbname}, cmdObj)); + const NamespaceString nss( + parseNs(DatabaseNameUtil::deserialize(boost::none, dbname), cmdObj)); // Check whether parameters passed to splitChunk are sound BSONObj keyPatternObj; diff --git a/src/mongo/db/s/shardsvr_validate_shard_key_candidate.cpp b/src/mongo/db/s/shardsvr_validate_shard_key_candidate.cpp index 41ba1406a2880..c904f38723659 100644 --- a/src/mongo/db/s/shardsvr_validate_shard_key_candidate.cpp +++ b/src/mongo/db/s/shardsvr_validate_shard_key_candidate.cpp @@ -28,14 +28,30 @@ */ -#include "mongo/db/auth/authorization_session.h" +#include +#include + +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands.h" #include "mongo/db/db_raii.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/shard_key_util.h" #include "mongo/db/s/sharding_state.h" -#include "mongo/logv2/log.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/shardsvr_write_global_index_keys_command.cpp b/src/mongo/db/s/shardsvr_write_global_index_keys_command.cpp index 8eece73ffc978..84bf025e96c9d 100644 --- a/src/mongo/db/s/shardsvr_write_global_index_keys_command.cpp +++ b/src/mongo/db/s/shardsvr_write_global_index_keys_command.cpp @@ -29,11 +29,31 @@ #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/global_index.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/global_index_crud_commands_gen.h" #include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -58,8 +78,9 @@ class ShardsvrWriteGlobalIndexKeysCmd final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } bool supportsWriteConcern() const override { diff --git a/src/mongo/db/s/single_transaction_coordinator_stats.cpp b/src/mongo/db/s/single_transaction_coordinator_stats.cpp index 5acf498be5b03..6b9a8612d604b 100644 --- a/src/mongo/db/s/single_transaction_coordinator_stats.cpp +++ b/src/mongo/db/s/single_transaction_coordinator_stats.cpp @@ -27,10 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include "mongo/db/s/single_transaction_coordinator_stats.h" + +#include #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/s/single_transaction_coordinator_stats.h" #include "mongo/util/net/socket_utils.h" namespace mongo { diff --git a/src/mongo/db/s/single_transaction_coordinator_stats.h b/src/mongo/db/s/single_transaction_coordinator_stats.h index 9f7349cf3edb8..5342130dc0cd3 100644 --- a/src/mongo/db/s/single_transaction_coordinator_stats.h +++ b/src/mongo/db/s/single_transaction_coordinator_stats.h @@ -29,8 +29,18 @@ #pragma once +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" #include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/tick_source.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/s/split_chunk.cpp b/src/mongo/db/s/split_chunk.cpp index 2e86913d7958d..ac0b2d6000d55 100644 --- a/src/mongo/db/s/split_chunk.cpp +++ b/src/mongo/db/s/split_chunk.cpp @@ -29,25 +29,58 @@ #include "mongo/db/s/split_chunk.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog_raii.h" -#include "mongo/db/commands.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/index/index_descriptor.h" #include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/s/active_migrations_registry.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" +#include "mongo/db/s/shard_key_index_util.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/split_chunk_request_type.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -118,25 +151,26 @@ bool checkMetadataForSuccessfulSplitChunk(OperationContext* opCtx, ShardVersionPlacementIgnoredNoIndexes() /* receivedVersion */, boost::none /* wantedVersion */, shardId), - str::stream() << "Collection " << nss.ns() << " needs to be recovered", + str::stream() << "Collection " << nss.toStringForErrorMsg() << " needs to be recovered", metadataAfterSplit); uassert(StaleConfigInfo(nss, ShardVersionPlacementIgnoredNoIndexes() /* receivedVersion */, ShardVersion::UNSHARDED() /* wantedVersion */, shardId), - str::stream() << "Collection " << nss.ns() << " is not sharded", + str::stream() << "Collection " << nss.toStringForErrorMsg() << " is not sharded", metadataAfterSplit->isSharded()); const auto placementVersion = metadataAfterSplit->getShardPlacementVersion(); const auto epoch = placementVersion.epoch(); - uassert(StaleConfigInfo(nss, - ShardVersionPlacementIgnoredNoIndexes() /* receivedVersion */, - ShardVersionFactory::make( - *metadataAfterSplit, - scopedCSR->getCollectionIndexes(opCtx)) /* wantedVersion */, - shardId), - str::stream() << "Collection " << nss.ns() << " changed since split start", - epoch == expectedEpoch && - (!expectedTimestamp || placementVersion.getTimestamp() == expectedTimestamp)); + uassert( + StaleConfigInfo( + nss, + ShardVersionPlacementIgnoredNoIndexes() /* receivedVersion */, + ShardVersionFactory::make(*metadataAfterSplit, + scopedCSR->getCollectionIndexes(opCtx)) /* wantedVersion */, + shardId), + str::stream() << "Collection " << nss.toStringForErrorMsg() << " changed since split start", + epoch == expectedEpoch && + (!expectedTimestamp || placementVersion.getTimestamp() == expectedTimestamp)); ChunkType nextChunk; for (auto it = splitPoints.begin(); it != splitPoints.end(); ++it) { diff --git a/src/mongo/db/s/split_chunk.h b/src/mongo/db/s/split_chunk.h index 51905a54b7d0f..f375932f5730e 100644 --- a/src/mongo/db/s/split_chunk.h +++ b/src/mongo/db/s/split_chunk.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include #include @@ -40,6 +41,7 @@ namespace mongo { class BSONObj; + class ChunkRange; class NamespaceString; class OperationContext; diff --git a/src/mongo/db/s/split_chunk_request_test.cpp b/src/mongo/db/s/split_chunk_request_test.cpp index bcf0c0f013e67..4836fe732c93f 100644 --- a/src/mongo/db/s/split_chunk_request_test.cpp +++ b/src/mongo/db/s/split_chunk_request_test.cpp @@ -27,9 +27,24 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/s/split_chunk_request_type.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/split_chunk_request_type.cpp b/src/mongo/db/s/split_chunk_request_type.cpp index 8bea20d353ad7..cda699c239197 100644 --- a/src/mongo/db/s/split_chunk_request_type.cpp +++ b/src/mongo/db/s/split_chunk_request_type.cpp @@ -29,9 +29,20 @@ #include "mongo/db/s/split_chunk_request_type.h" +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/write_concern_options.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -136,7 +147,7 @@ BSONObj SplitChunkRequest::toConfigCommandBSON(const BSONObj& writeConcern) { } void SplitChunkRequest::appendAsConfigCommand(BSONObjBuilder* cmdBuilder) { - cmdBuilder->append(kConfigsvrSplitChunk, _nss.ns()); + cmdBuilder->append(kConfigsvrSplitChunk, NamespaceStringUtil::serialize(_nss)); cmdBuilder->append(kCollEpoch, _epoch); _chunkRange.append(cmdBuilder); { @@ -171,8 +182,8 @@ const std::string& SplitChunkRequest::getShardName() const { Status SplitChunkRequest::_validate() { if (!getNamespace().isValid()) { return Status(ErrorCodes::InvalidNamespace, - str::stream() - << "invalid namespace '" << _nss.ns() << "' specified for request"); + str::stream() << "invalid namespace '" << _nss.toStringForErrorMsg() + << "' specified for request"); } if (getSplitPoints().empty()) { diff --git a/src/mongo/db/s/split_chunk_request_type.h b/src/mongo/db/s/split_chunk_request_type.h index e612586bcee69..f63b0feb796bd 100644 --- a/src/mongo/db/s/split_chunk_request_type.h +++ b/src/mongo/db/s/split_chunk_request_type.h @@ -29,10 +29,17 @@ #pragma once +#include +#include +#include #include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" - +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/s/catalog/type_chunk.h" diff --git a/src/mongo/db/s/split_vector.cpp b/src/mongo/db/s/split_vector.cpp index 2f5c22e16a983..27d5973d843c5 100644 --- a/src/mongo/db/s/split_vector.cpp +++ b/src/mongo/db/s/split_vector.cpp @@ -29,13 +29,39 @@ #include "mongo/db/s/split_vector.h" +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/keypattern.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/internal_plans.h" #include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/s/shard_key_index_util.h" +#include "mongo/db/server_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -119,9 +145,10 @@ std::vector splitVector(OperationContext* opCtx, maxChunkSizeBytes = dataSize; } - // We need a maximum size for the chunk. + // If the collection is empty, cannot use split with find or bounds option. if (!maxChunkSizeBytes || maxChunkSizeBytes.value() <= 0) { - uasserted(ErrorCodes::InvalidOptions, "need to specify the desired max chunk size"); + uasserted(ErrorCodes::InvalidOptions, + "cannot use split with find or bounds option on an empty collection"); } // If there's not enough data for more than one chunk, no point continuing. diff --git a/src/mongo/db/s/split_vector.h b/src/mongo/db/s/split_vector.h index 3b94c40e35374..25eeb8b16dfcf 100644 --- a/src/mongo/db/s/split_vector.h +++ b/src/mongo/db/s/split_vector.h @@ -30,13 +30,16 @@ #pragma once #include +#include #include +#include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" namespace mongo { class BSONObj; + class OperationContext; /** diff --git a/src/mongo/db/s/split_vector_command.cpp b/src/mongo/db/s/split_vector_command.cpp index 2e2bf3cb9b39f..34fc50b4ade07 100644 --- a/src/mongo/db/s/split_vector_command.cpp +++ b/src/mongo/db/s/split_vector_command.cpp @@ -27,13 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include - +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/split_vector.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -92,7 +112,8 @@ class SplitVector : public ErrmsgCommandDeprecated { string& errmsg, BSONObjBuilder& result) override { - const NamespaceString nss(parseNs({boost::none, dbname}, jsobj)); + const NamespaceString nss( + parseNs(DatabaseNameUtil::deserialize(boost::none, dbname), jsobj)); BSONObj keyPattern = jsobj.getObjectField("keyPattern"); if (keyPattern.isEmpty()) { diff --git a/src/mongo/db/s/split_vector_test.cpp b/src/mongo/db/s/split_vector_test.cpp index 7d653ca3398bc..ad9f4b05fe3c2 100644 --- a/src/mongo/db/s/split_vector_test.cpp +++ b/src/mongo/db/s/split_vector_test.cpp @@ -27,13 +27,32 @@ * it in the license file. */ +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/catalog/create_collection.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/db/s/split_vector.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/s/start_chunk_clone_request.cpp b/src/mongo/db/s/start_chunk_clone_request.cpp index 8cdb8c823fa1b..babcce9faac83 100644 --- a/src/mongo/db/s/start_chunk_clone_request.cpp +++ b/src/mongo/db/s/start_chunk_clone_request.cpp @@ -27,14 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/s/start_chunk_clone_request.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/s/start_chunk_clone_request.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { namespace { @@ -187,7 +196,7 @@ void StartChunkCloneRequest::appendAsCommand( invariant(nss.isValid()); invariant(fromShardConnectionString.isValid()); - builder->append(kRecvChunkStart, nss.ns()); + builder->append(kRecvChunkStart, NamespaceStringUtil::serialize(nss)); builder->append(kParallelMigration, true); migrationId.appendToBuilder(builder, kMigrationId); diff --git a/src/mongo/db/s/start_chunk_clone_request.h b/src/mongo/db/s/start_chunk_clone_request.h index a5d511dae4082..bd09630305a2a 100644 --- a/src/mongo/db/s/start_chunk_clone_request.h +++ b/src/mongo/db/s/start_chunk_clone_request.h @@ -29,14 +29,24 @@ #pragma once +#include +#include +#include #include +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/connection_string.h" #include "mongo/db/namespace_string.h" #include "mongo/db/s/migration_session_id.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/shard_id.h" #include "mongo/s/request_types/migration_secondary_throttle_options.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/start_chunk_clone_request_test.cpp b/src/mongo/db/s/start_chunk_clone_request_test.cpp index ac13dcbf78c82..61fabfa59284a 100644 --- a/src/mongo/db/s/start_chunk_clone_request_test.cpp +++ b/src/mongo/db/s/start_chunk_clone_request_test.cpp @@ -27,18 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/s/start_chunk_clone_request.h" - -#include "mongo/base/status_with.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/s/start_chunk_clone_request.h" #include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/shard_id.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { @@ -78,7 +82,7 @@ TEST(StartChunkCloneRequest, CreateAsCommandComplete) { NamespaceString::createNamespaceString_forTest(cmdObj["_recvChunkStart"].String()), cmdObj)); - ASSERT_EQ("TestDB.TestColl", request.getNss().ns()); + ASSERT_EQ("TestDB.TestColl", request.getNss().ns_forTest()); ASSERT_EQ(sessionId.toString(), request.getSessionId().toString()); ASSERT_EQ(migrationId, request.getMigrationId()); ASSERT_EQ(lsid, request.getLsid()); diff --git a/src/mongo/db/s/topology_time_ticker.cpp b/src/mongo/db/s/topology_time_ticker.cpp index 9bef98d30e9c0..933fc27cbc2bf 100644 --- a/src/mongo/db/s/topology_time_ticker.cpp +++ b/src/mongo/db/s/topology_time_ticker.cpp @@ -29,13 +29,24 @@ #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/s/topology_time_ticker.h" +#include +#include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/s/topology_time_ticker.h" #include "mongo/db/vector_clock_mutable.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/s/topology_time_ticker.h b/src/mongo/db/s/topology_time_ticker.h index f96674125fd8f..2b8b25913c704 100644 --- a/src/mongo/db/s/topology_time_ticker.h +++ b/src/mongo/db/s/topology_time_ticker.h @@ -29,10 +29,12 @@ #pragma once +#include #include #include "mongo/bson/timestamp.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/s/topology_time_ticker_test.cpp b/src/mongo/db/s/topology_time_ticker_test.cpp index d676ee35b40db..efd10f7c276c6 100644 --- a/src/mongo/db/s/topology_time_ticker_test.cpp +++ b/src/mongo/db/s/topology_time_ticker_test.cpp @@ -27,20 +27,24 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/logical_time.h" #include "mongo/db/service_context.h" -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/vector_clock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" -#include "mongo/platform/basic.h" +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest -#include "mongo/db/keys_collection_client_sharded.h" -#include "mongo/db/keys_collection_manager.h" -#include "mongo/db/logical_time_validator.h" #include "mongo/db/s/config/config_server_test_fixture.h" #include "mongo/db/s/topology_time_ticker.h" #include "mongo/db/vector_clock_mutable.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/transaction_coordinator.cpp b/src/mongo/db/s/transaction_coordinator.cpp index 39892137e9ad5..b681061acec8c 100644 --- a/src/mongo/db/s/transaction_coordinator.cpp +++ b/src/mongo/db/s/transaction_coordinator.cpp @@ -28,18 +28,55 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/transaction_coordinator.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/s/server_transaction_coordinators_metrics.h" +#include "mongo/db/s/single_transaction_coordinator_stats.h" +#include "mongo/db/s/transaction_coordinator.h" #include "mongo/db/s/transaction_coordinator_metrics_observer.h" +#include "mongo/db/s/transaction_coordinator_util.h" #include "mongo/db/server_options.h" #include "mongo/db/vector_clock_mutable.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/str.h" +#include "mongo/util/tick_source.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction @@ -84,6 +121,13 @@ ExecutorFuture waitForMajorityWithHangFailpoint( failpoint.pauseWhileSet(); } else { ThreadClient tc(failPointName, service); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + auto opCtx = tc->makeOperationContext(); failpoint.pauseWhileSet(opCtx.get()); } @@ -231,6 +275,11 @@ TransactionCoordinator::TransactionCoordinator( } if (_decision->getDecision() == CommitDecision::kCommit) { + auto affectedNamespacesSet = consensus.releaseAffectedNamespaces(); + _affectedNamespaces.reserve(affectedNamespacesSet.size()); + std::move(affectedNamespacesSet.begin(), + affectedNamespacesSet.end(), + std::back_inserter(_affectedNamespaces)); LOGV2_DEBUG( 22446, 3, @@ -281,8 +330,12 @@ TransactionCoordinator::TransactionCoordinator( return Future::makeReady(repl::OpTime()); } - return txn::persistDecision( - *_scheduler, _lsid, _txnNumberAndRetryCounter, *_participants, *_decision); + return txn::persistDecision(*_scheduler, + _lsid, + _txnNumberAndRetryCounter, + *_participants, + *_decision, + _affectedNamespaces); }) .then([this](repl::OpTime opTime) { switch (_decision->getDecision()) { @@ -404,6 +457,7 @@ void TransactionCoordinator::continueCommit(const TransactionCoordinatorDocument _participantsDurable = true; _decision = std::move(doc.getDecision()); } + _affectedNamespaces = doc.getAffectedNamespaces().get_value_or({}); _kickOffCommitPromise.emplaceValue(); } diff --git a/src/mongo/db/s/transaction_coordinator.h b/src/mongo/db/s/transaction_coordinator.h index 3e09cf9e07ed1..c448abb5677b3 100644 --- a/src/mongo/db/s/transaction_coordinator.h +++ b/src/mongo/db/s/transaction_coordinator.h @@ -29,10 +29,31 @@ #pragma once +#include +#include +#include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/transaction_coordinator_document_gen.h" +#include "mongo/db/s/transaction_coordinator_futures_util.h" +#include "mongo/db/s/transaction_coordinator_structures.h" #include "mongo/db/s/transaction_coordinator_util.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/platform/mutex.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -190,6 +211,10 @@ class TransactionCoordinator { // hasn't yet persisted it boost::optional _decision; + // Set when the coordinator has heard back from all the participants and reached a commit + // decision. + std::vector _affectedNamespaces; + // Set when the coordinator has durably persisted `_decision` to the `config.coordinators` // collection bool _decisionDurable{false}; diff --git a/src/mongo/db/s/transaction_coordinator_catalog.cpp b/src/mongo/db/s/transaction_coordinator_catalog.cpp index 6afd1e8d00f4c..727eae80d1764 100644 --- a/src/mongo/db/s/transaction_coordinator_catalog.cpp +++ b/src/mongo/db/s/transaction_coordinator_catalog.cpp @@ -28,12 +28,31 @@ */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/s/transaction_coordinator_catalog.h" - +#include "mongo/db/s/transaction_coordinator_structures.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction diff --git a/src/mongo/db/s/transaction_coordinator_catalog.h b/src/mongo/db/s/transaction_coordinator_catalog.h index 43c80be895dd2..d673a460e2baa 100644 --- a/src/mongo/db/s/transaction_coordinator_catalog.h +++ b/src/mongo/db/s/transaction_coordinator_catalog.h @@ -29,10 +29,22 @@ #pragma once +#include #include +#include +#include #include +#include +#include +#include +#include +#include "mongo/base/status.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/transaction_coordinator.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/util/concurrency/with_lock.h" diff --git a/src/mongo/db/s/transaction_coordinator_catalog_test.cpp b/src/mongo/db/s/transaction_coordinator_catalog_test.cpp index 1c28ab3f3026e..9eae24fe4f868 100644 --- a/src/mongo/db/s/transaction_coordinator_catalog_test.cpp +++ b/src/mongo/db/s/transaction_coordinator_catalog_test.cpp @@ -27,12 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/s/transaction_coordinator_catalog.h" +#include "mongo/db/s/transaction_coordinator_futures_util.h" #include "mongo/db/s/transaction_coordinator_test_fixture.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/transaction_coordinator_curop.cpp b/src/mongo/db/s/transaction_coordinator_curop.cpp index 26c22fcc9c498..b09f92da14055 100644 --- a/src/mongo/db/s/transaction_coordinator_curop.cpp +++ b/src/mongo/db/s/transaction_coordinator_curop.cpp @@ -27,11 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/transaction_coordinator_curop.h" +#include #include "mongo/base/shim.h" +#include "mongo/db/s/transaction_coordinator_curop.h" namespace mongo { diff --git a/src/mongo/db/s/transaction_coordinator_curop.h b/src/mongo/db/s/transaction_coordinator_curop.h index 8b69c916162f3..7c0931ee333c1 100644 --- a/src/mongo/db/s/transaction_coordinator_curop.h +++ b/src/mongo/db/s/transaction_coordinator_curop.h @@ -28,6 +28,10 @@ */ #pragma once +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/process_interface/mongos_process_interface.h" namespace mongo { diff --git a/src/mongo/db/s/transaction_coordinator_curop_mongod.cpp b/src/mongo/db/s/transaction_coordinator_curop_mongod.cpp index ce5644cd826ed..e78c3abfe9ced 100644 --- a/src/mongo/db/s/transaction_coordinator_curop_mongod.cpp +++ b/src/mongo/db/s/transaction_coordinator_curop_mongod.cpp @@ -26,11 +26,13 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/db/s/transaction_coordinator_curop.h" - -#include "mongo/db/s/transaction_coordinator_service.h" +#include +#include #include "mongo/base/shim.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/transaction_coordinator_service.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/transaction_coordinator_document.idl b/src/mongo/db/s/transaction_coordinator_document.idl index b3cbca2787031..14e12e7c17421 100644 --- a/src/mongo/db/s/transaction_coordinator_document.idl +++ b/src/mongo/db/s/transaction_coordinator_document.idl @@ -89,3 +89,8 @@ structs: description: "The coordinator's decision for the transaction including the decision ('commit' or 'abort') and a commit timestamp (if the decision is 'commit'). Only set if the coordinator has made a decision." + affectedNamespaces: + optional: true + type: array + description: "The list of namespaces, affected by the transaction. Each participant + returns a list in response to prepareTransaction command." diff --git a/src/mongo/db/s/transaction_coordinator_factory.cpp b/src/mongo/db/s/transaction_coordinator_factory.cpp index b4d4dbcef8b9f..b06f36217ecc6 100644 --- a/src/mongo/db/s/transaction_coordinator_factory.cpp +++ b/src/mongo/db/s/transaction_coordinator_factory.cpp @@ -27,7 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include + +#include #include "mongo/base/shim.h" #include "mongo/db/s/transaction_coordinator_factory.h" diff --git a/src/mongo/db/s/transaction_coordinator_factory.h b/src/mongo/db/s/transaction_coordinator_factory.h index e027763275099..c31e1e050f8ac 100644 --- a/src/mongo/db/s/transaction_coordinator_factory.h +++ b/src/mongo/db/s/transaction_coordinator_factory.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/db/operation_context.h" #include "mongo/db/session/logical_session_id.h" diff --git a/src/mongo/db/s/transaction_coordinator_factory_mongod.cpp b/src/mongo/db/s/transaction_coordinator_factory_mongod.cpp index cf8882bd82a80..5b6ca7ae8133f 100644 --- a/src/mongo/db/s/transaction_coordinator_factory_mongod.cpp +++ b/src/mongo/db/s/transaction_coordinator_factory_mongod.cpp @@ -27,13 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include + +#include +#include #include "mongo/base/shim.h" -#include "mongo/db/s/transaction_coordinator_factory.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/transaction_coordinator_service.h" -#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/transaction/transaction_participant_gen.h" +#include "mongo/idl/mutable_observer_registry.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.cpp b/src/mongo/db/s/transaction_coordinator_futures_util.cpp index 881c92275c721..910e036b9ea23 100644 --- a/src/mongo/db/s/transaction_coordinator_futures_util.cpp +++ b/src/mongo/db/s/transaction_coordinator_futures_util.cpp @@ -28,18 +28,39 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/transaction_coordinator_futures_util.h" - +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/remote_command_targeter.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbmessage.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/s/transaction_coordinator_futures_util.h" +#include "mongo/db/server_options.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/transport/service_entry_point.h" +#include "mongo/util/cancellation.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction diff --git a/src/mongo/db/s/transaction_coordinator_futures_util.h b/src/mongo/db/s/transaction_coordinator_futures_util.h index 1dd0554ee63cd..4916ecdd877a2 100644 --- a/src/mongo/db/s/transaction_coordinator_futures_util.h +++ b/src/mongo/db/s/transaction_coordinator_futures_util.h @@ -29,16 +29,37 @@ #pragma once +#include +#include +#include +#include +#include #include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/read_preference.h" +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/shard_id.h" #include "mongo/executor/task_executor.h" #include "mongo/executor/task_executor_pool.h" +#include "mongo/platform/mutex.h" #include "mongo/s/client/shard.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/mutex.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" #include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/time_support.h" namespace mongo { @@ -101,6 +122,12 @@ class AsyncWorkScheduler { ThreadClient tc("TransactionCoordinator", _serviceContext); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + auto uniqueOpCtxIter = [&] { stdx::lock_guard lk(_mutex); return _activeOpContexts.emplace(_activeOpContexts.begin(), diff --git a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp index 3f0b9a34eb555..976c50771b5b2 100644 --- a/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp +++ b/src/mongo/db/s/transaction_coordinator_futures_util_test.cpp @@ -27,14 +27,41 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/db/s/transaction_coordinator_futures_util.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_mock.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/str.h" namespace mongo { namespace txn { @@ -421,7 +448,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledRemoteCommandRespondsOK) { kShardIds[1], ReadPreferenceSetting{ReadPreference::PrimaryOnly}, BSON("TestCommand" << 1)); ASSERT(!future.isReady()); - const auto objResponse = BSON("ok" << 1 << "responseData" << 2); + auto objResponse = BSON("ok" << 1 << "responseData" << 2); onCommand([&](const executor::RemoteCommandRequest& request) { ASSERT_BSONOBJ_EQ(BSON("TestCommand" << 1), request.cmdObj); return objResponse; @@ -439,7 +466,7 @@ TEST_F(AsyncWorkSchedulerTest, ScheduledRemoteCommandRespondsNotOK) { kShardIds[1], ReadPreferenceSetting{ReadPreference::PrimaryOnly}, BSON("TestCommand" << 2)); ASSERT(!future.isReady()); - const auto objResponse = BSON("ok" << 0 << "responseData" << 3); + auto objResponse = BSON("ok" << 0 << "responseData" << 3); onCommand([&](const executor::RemoteCommandRequest& request) { ASSERT_BSONOBJ_EQ(BSON("TestCommand" << 2), request.cmdObj); return objResponse; diff --git a/src/mongo/db/s/transaction_coordinator_metrics_observer.cpp b/src/mongo/db/s/transaction_coordinator_metrics_observer.cpp index dffdfdcba196a..b98f7f35040a9 100644 --- a/src/mongo/db/s/transaction_coordinator_metrics_observer.cpp +++ b/src/mongo/db/s/transaction_coordinator_metrics_observer.cpp @@ -27,10 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/s/transaction_coordinator_metrics_observer.h" +#include + +#include "mongo/db/s/transaction_coordinator_structures.h" + namespace mongo { using CommitDecision = txn::CommitDecision; diff --git a/src/mongo/db/s/transaction_coordinator_metrics_observer.h b/src/mongo/db/s/transaction_coordinator_metrics_observer.h index 535c865da0b82..e00d5e2253127 100644 --- a/src/mongo/db/s/transaction_coordinator_metrics_observer.h +++ b/src/mongo/db/s/transaction_coordinator_metrics_observer.h @@ -29,9 +29,15 @@ #pragma once +#include + +#include "mongo/db/client.h" #include "mongo/db/s/server_transaction_coordinators_metrics.h" #include "mongo/db/s/single_transaction_coordinator_stats.h" #include "mongo/db/s/transaction_coordinator.h" +#include "mongo/db/s/transaction_coordinator_document_gen.h" +#include "mongo/util/tick_source.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/s/transaction_coordinator_service.cpp b/src/mongo/db/s/transaction_coordinator_service.cpp index 5af1b93e1d564..ba36dc9eb2d66 100644 --- a/src/mongo/db/s/transaction_coordinator_service.cpp +++ b/src/mongo/db/s/transaction_coordinator_service.cpp @@ -28,18 +28,37 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/transaction_coordinator_service.h" - +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/s/transaction_coordinator.h" #include "mongo/db/s/transaction_coordinator_document_gen.h" #include "mongo/db/s/transaction_coordinator_params_gen.h" -#include "mongo/db/storage/flow_control.h" +#include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/s/transaction_coordinator_util.h" #include "mongo/db/transaction/transaction_participant_gen.h" #include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/mutable_observer_registry.h" #include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future_impl.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction diff --git a/src/mongo/db/s/transaction_coordinator_service.h b/src/mongo/db/s/transaction_coordinator_service.h index c0946bd5286dd..fdce1a0873c42 100644 --- a/src/mongo/db/s/transaction_coordinator_service.h +++ b/src/mongo/db/s/transaction_coordinator_service.h @@ -29,7 +29,27 @@ #pragma once +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/transaction_coordinator_catalog.h" +#include "mongo/db/s/transaction_coordinator_futures_util.h" +#include "mongo/db/s/transaction_coordinator_structures.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/s/transaction_coordinator_service_test.cpp b/src/mongo/db/s/transaction_coordinator_service_test.cpp index 2a11614d07e59..651006766418a 100644 --- a/src/mongo/db/s/transaction_coordinator_service_test.cpp +++ b/src/mongo/db/s/transaction_coordinator_service_test.cpp @@ -28,17 +28,41 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/client/remote_command_targeter_mock.h" +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/commands/txn_cmds_gen.h" #include "mongo/db/commands/txn_two_phase_commit_cmds_gen.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/s/transaction_coordinator_document_gen.h" #include "mongo/db/s/transaction_coordinator_service.h" #include "mongo/db/s/transaction_coordinator_test_fixture.h" #include "mongo/db/write_concern_options.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/util/scopeguard.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/s/transaction_coordinator_structures.cpp b/src/mongo/db/s/transaction_coordinator_structures.cpp index f6cf5121b30ce..3880f09c7cae9 100644 --- a/src/mongo/db/s/transaction_coordinator_structures.cpp +++ b/src/mongo/db/s/transaction_coordinator_structures.cpp @@ -28,11 +28,10 @@ */ -#include "mongo/platform/basic.h" - #include "mongo/db/s/transaction_coordinator_structures.h" - +#include "mongo/base/error_codes.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction diff --git a/src/mongo/db/s/transaction_coordinator_structures.h b/src/mongo/db/s/transaction_coordinator_structures.h index b86be50ad56c2..422b282094f96 100644 --- a/src/mongo/db/s/transaction_coordinator_structures.h +++ b/src/mongo/db/s/transaction_coordinator_structures.h @@ -32,6 +32,9 @@ #include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/shard_id.h" diff --git a/src/mongo/db/s/transaction_coordinator_structures_test.cpp b/src/mongo/db/s/transaction_coordinator_structures_test.cpp index 587c8d16ae048..5a2914a277bb8 100644 --- a/src/mongo/db/s/transaction_coordinator_structures_test.cpp +++ b/src/mongo/db/s/transaction_coordinator_structures_test.cpp @@ -27,10 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/s/transaction_coordinator_document_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/s/transaction_coordinator_structures.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace txn { diff --git a/src/mongo/db/s/transaction_coordinator_test.cpp b/src/mongo/db/s/transaction_coordinator_test.cpp index 48396dd4e4b04..2195a5702437d 100644 --- a/src/mongo/db/s/transaction_coordinator_test.cpp +++ b/src/mongo/db/s/transaction_coordinator_test.cpp @@ -28,22 +28,79 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/client/remote_command_targeter_mock.h" +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/client.h" #include "mongo/db/commands/txn_cmds_gen.h" #include "mongo/db/commands/txn_two_phase_commit_cmds_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/s/server_transaction_coordinators_metrics.h" +#include "mongo/db/s/single_transaction_coordinator_stats.h" +#include "mongo/db/s/transaction_coordinator.h" #include "mongo/db/s/transaction_coordinator_document_gen.h" +#include "mongo/db/s/transaction_coordinator_futures_util.h" #include "mongo/db/s/transaction_coordinator_metrics_observer.h" +#include "mongo/db/s/transaction_coordinator_structures.h" #include "mongo/db/s/transaction_coordinator_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/db/s/transaction_coordinator_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/string_map.h" +#include "mongo/util/tick_source.h" #include "mongo/util/tick_source_mock.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -62,18 +119,36 @@ const StatusWith kNoSuchTransaction = << "No such transaction exists"); const StatusWith kOk = BSON("ok" << 1); const Timestamp kDummyPrepareTimestamp = Timestamp(1, 1); +const std::vector kDummyAffectedNamespaces = {NamespaceString("test.test")}; -StatusWith makePrepareOkResponse(const Timestamp& timestamp) { - return BSON("ok" << 1 << "prepareTimestamp" << timestamp); +StatusWith makePrepareOkResponse(const Timestamp& timestamp, + const std::vector& affectedNamespaces) { + BSONArrayBuilder namespaces; + for (const auto& nss : affectedNamespaces) { + namespaces << nss.ns(); + } + return BSON("ok" << 1 << "prepareTimestamp" << timestamp << "affectedNamespaces" + << namespaces.arr()); } -const StatusWith kPrepareOk = makePrepareOkResponse(kDummyPrepareTimestamp); +const StatusWith kPrepareOk = + makePrepareOkResponse(kDummyPrepareTimestamp, kDummyAffectedNamespaces); const StatusWith kPrepareOkNoTimestamp = BSON("ok" << 1); const StatusWith kTxnRetryCounterTooOld = BSON("ok" << 0 << "code" << ErrorCodes::TxnRetryCounterTooOld << "errmsg" << "txnRetryCounter is too old" << "txnRetryCounter" << 1); +template +static StringSet toStringSet(const NamespaceStringContainer& namespaces) { + StringSet set; + set.reserve(namespaces.size()); + for (const auto& nss : namespaces) { + set.emplace(nss.ns()); + } + return set; +} + /** * Searches for a client matching the name and mark the operation context as killed. */ @@ -112,7 +187,7 @@ class TransactionCoordinatorTestBase : public TransactionCoordinatorTestFixture void assertPrepareSentAndRespondWithSuccess(const Timestamp& timestamp) { assertCommandSentAndRespondWith(PrepareTransaction::kCommandName, - makePrepareOkResponse(timestamp), + makePrepareOkResponse(timestamp, kDummyAffectedNamespaces), WriteConcernOptions::Majority); } @@ -647,6 +722,34 @@ TEST_F(TransactionCoordinatorDriverTest, abortFuture.get(); } +TEST_F(TransactionCoordinatorDriverTest, SendPrepareToShardsCollectsAffectedNamespaces) { + const auto timestamp = Timestamp(1, 1); + + txn::AsyncWorkScheduler aws(getServiceContext()); + auto future = txn::sendPrepare(getServiceContext(), + aws, + _lsid, + _txnNumberAndRetryCounter, + APIParameters(), + kTwoShardIdList); + + assertCommandSentAndRespondWith( + PrepareTransaction::kCommandName, + makePrepareOkResponse(timestamp, + {NamespaceString("db1.coll1"), NamespaceString("db2.coll2")}), + WriteConcernOptions::Majority); + assertCommandSentAndRespondWith( + PrepareTransaction::kCommandName, + makePrepareOkResponse(timestamp, + {NamespaceString("db1.coll2"), NamespaceString("db2.coll1")}), + WriteConcernOptions::Majority); + + auto response = future.get(); + ASSERT_EQUALS(txn::CommitDecision::kCommit, response.decision().getDecision()); + StringSet expectedAffectedNamespaces{"db1.coll1", "db1.coll2", "db2.coll1", "db2.coll2"}; + ASSERT_EQUALS(expectedAffectedNamespaces, toStringSet(response.releaseAffectedNamespaces())); +} + class TransactionCoordinatorDriverPersistenceTest : public TransactionCoordinatorDriverTest { protected: void setUp() override { @@ -665,7 +768,8 @@ class TransactionCoordinatorDriverPersistenceTest : public TransactionCoordinato TxnNumberAndRetryCounter expectedTxnNumberAndRetryCounter, std::vector expectedParticipants, boost::optional expectedDecision = boost::none, - boost::optional expectedCommitTimestamp = boost::none) { + boost::optional expectedCommitTimestamp = boost::none, + boost::optional> expectedAffectedNamespaces = boost::none) { ASSERT(doc.getId().getSessionId()); ASSERT_EQUALS(*doc.getId().getSessionId(), expectedLsid); ASSERT(doc.getId().getTxnNumber()); @@ -683,6 +787,13 @@ class TransactionCoordinatorDriverPersistenceTest : public TransactionCoordinato ASSERT(!decision); } + ASSERT_EQUALS(expectedAffectedNamespaces.has_value(), + doc.getAffectedNamespaces().has_value()); + if (expectedAffectedNamespaces) { + ASSERT_EQUALS(toStringSet(*expectedAffectedNamespaces), + toStringSet(*doc.getAffectedNamespaces())); + } + if (expectedCommitTimestamp) { ASSERT(decision->getCommitTimestamp()); ASSERT_EQUALS(*expectedCommitTimestamp, *decision->getCommitTimestamp()); @@ -702,27 +813,31 @@ class TransactionCoordinatorDriverPersistenceTest : public TransactionCoordinato assertDocumentMatches(allCoordinatorDocs[0], lsid, txnNumberAndRetryCounter, participants); } - void persistDecisionExpectSuccess(OperationContext* opCtx, - LogicalSessionId lsid, - TxnNumberAndRetryCounter txnNumberAndRetryCounter, - const std::vector& participants, - const boost::optional& commitTimestamp) { - txn::persistDecision(*_aws, - lsid, - txnNumberAndRetryCounter, - participants, - [&] { - txn::CoordinatorCommitDecision decision; - if (commitTimestamp) { - decision.setDecision(txn::CommitDecision::kCommit); - decision.setCommitTimestamp(commitTimestamp); - } else { - decision.setDecision(txn::CommitDecision::kAbort); - decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction, - "Test abort status")); - } - return decision; - }()) + void persistDecisionExpectSuccess( + OperationContext* opCtx, + LogicalSessionId lsid, + TxnNumberAndRetryCounter txnNumberAndRetryCounter, + const std::vector& participants, + const boost::optional& commitTimestamp, + const boost::optional>& affectedNamespaces) { + txn::persistDecision( + *_aws, + lsid, + txnNumberAndRetryCounter, + participants, + [&] { + txn::CoordinatorCommitDecision decision; + if (commitTimestamp) { + decision.setDecision(txn::CommitDecision::kCommit); + decision.setCommitTimestamp(commitTimestamp); + } else { + decision.setDecision(txn::CommitDecision::kAbort); + decision.setAbortStatus( + Status(ErrorCodes::NoSuchTransaction, "Test abort status")); + } + return decision; + }(), + kDummyAffectedNamespaces) .get(); auto allCoordinatorDocs = txn::readAllCoordinatorDocs(opCtx); @@ -733,7 +848,8 @@ class TransactionCoordinatorDriverPersistenceTest : public TransactionCoordinato txnNumberAndRetryCounter, participants, txn::CommitDecision::kCommit, - *commitTimestamp); + *commitTimestamp, + affectedNamespaces); } else { assertDocumentMatches(allCoordinatorDocs[0], lsid, @@ -839,6 +955,7 @@ TEST_F(TransactionCoordinatorDriverPersistenceTest, _lsid, _txnNumberAndRetryCounter, _participants, + boost::none, boost::none /* abort */); } @@ -850,11 +967,13 @@ TEST_F(TransactionCoordinatorDriverPersistenceTest, _lsid, _txnNumberAndRetryCounter, _participants, + boost::none, boost::none /* abort */); persistDecisionExpectSuccess(operationContext(), _lsid, _txnNumberAndRetryCounter, _participants, + boost::none, boost::none /* abort */); } @@ -864,11 +983,17 @@ TEST_F(TransactionCoordinatorDriverPersistenceTest, { FailPointEnableBlock failpoint("hangBeforeWritingDecision"); - future = txn::persistDecision(*_aws, _lsid, _txnNumberAndRetryCounter, _participants, [&] { - txn::CoordinatorCommitDecision decision(txn::CommitDecision::kCommit); - decision.setCommitTimestamp(_commitTimestamp); - return decision; - }()); + future = txn::persistDecision( + *_aws, + _lsid, + _txnNumberAndRetryCounter, + _participants, + [&] { + txn::CoordinatorCommitDecision decision(txn::CommitDecision::kCommit); + decision.setCommitTimestamp(_commitTimestamp); + return decision; + }(), + kDummyAffectedNamespaces); failpoint->waitForTimesEntered(failpoint.initialTimesEntered() + 1); _aws->shutdown({ErrorCodes::TransactionCoordinatorSteppingDown, "Shutdown for test"}); } @@ -885,7 +1010,8 @@ TEST_F(TransactionCoordinatorDriverPersistenceTest, _lsid, _txnNumberAndRetryCounter, _participants, - _commitTimestamp /* commit */); + _commitTimestamp, + kDummyAffectedNamespaces /* commit */); } TEST_F(TransactionCoordinatorDriverPersistenceTest, @@ -896,12 +1022,14 @@ TEST_F(TransactionCoordinatorDriverPersistenceTest, _lsid, _txnNumberAndRetryCounter, _participants, - _commitTimestamp /* commit */); + _commitTimestamp, + kDummyAffectedNamespaces /* commit */); persistDecisionExpectSuccess(operationContext(), _lsid, _txnNumberAndRetryCounter, _participants, - _commitTimestamp /* commit */); + _commitTimestamp, + kDummyAffectedNamespaces /* commit */); } TEST_F(TransactionCoordinatorDriverPersistenceTest, DeleteCoordinatorDocWhenNoDocumentExistsFails) { @@ -954,6 +1082,7 @@ TEST_F(TransactionCoordinatorDriverPersistenceTest, _lsid, _txnNumberAndRetryCounter, _participants, + boost::none, boost::none /* abort */); deleteCoordinatorDocExpectSuccess(operationContext(), _lsid, _txnNumberAndRetryCounter); } @@ -966,7 +1095,8 @@ TEST_F(TransactionCoordinatorDriverPersistenceTest, _lsid, _txnNumberAndRetryCounter, _participants, - _commitTimestamp /* commit */); + _commitTimestamp, + kDummyAffectedNamespaces /* commit */); deleteCoordinatorDocExpectSuccess(operationContext(), _lsid, _txnNumberAndRetryCounter); } @@ -987,16 +1117,17 @@ TEST_F(TransactionCoordinatorDriverPersistenceTest, // Delete the document for the first transaction and check that only the second transaction's // document still exists. - txn::persistDecision(*_aws, - _lsid, - txnNumberAndRetryCounter1, - _participants, - [&] { - txn::CoordinatorCommitDecision decision(txn::CommitDecision::kAbort); - decision.setAbortStatus( - Status(ErrorCodes::NoSuchTransaction, "Test abort error")); - return decision; - }()) + txn::persistDecision( + *_aws, + _lsid, + txnNumberAndRetryCounter1, + _participants, + [&] { + txn::CoordinatorCommitDecision decision(txn::CommitDecision::kAbort); + decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction, "Test abort error")); + return decision; + }(), + kDummyAffectedNamespaces) .get(); txn::deleteCoordinatorDoc(*_aws, _lsid, txnNumberAndRetryCounter1).get(); @@ -1023,16 +1154,17 @@ TEST_F( // Delete the document for the first transaction and check that only the second transaction's // document still exists. - txn::persistDecision(*_aws, - _lsid, - txnNumberAndRetryCounter1, - _participants, - [&] { - txn::CoordinatorCommitDecision decision(txn::CommitDecision::kAbort); - decision.setAbortStatus( - Status(ErrorCodes::NoSuchTransaction, "Test abort error")); - return decision; - }()) + txn::persistDecision( + *_aws, + _lsid, + txnNumberAndRetryCounter1, + _participants, + [&] { + txn::CoordinatorCommitDecision decision(txn::CommitDecision::kAbort); + decision.setAbortStatus(Status(ErrorCodes::NoSuchTransaction, "Test abort error")); + return decision; + }(), + kDummyAffectedNamespaces) .get(); txn::deleteCoordinatorDoc(*_aws, _lsid, txnNumberAndRetryCounter1).get(); @@ -2756,5 +2888,6 @@ TEST_F(TransactionCoordinatorMetricsTest, ClientInformationIncludedInReportState coordinator.onCompletion().get(); } + } // namespace } // namespace mongo diff --git a/src/mongo/db/s/transaction_coordinator_test_fixture.cpp b/src/mongo/db/s/transaction_coordinator_test_fixture.cpp index a6aa349d58ebc..325b86b5e8164 100644 --- a/src/mongo/db/s/transaction_coordinator_test_fixture.cpp +++ b/src/mongo/db/s/transaction_coordinator_test_fixture.cpp @@ -28,19 +28,37 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/s/transaction_coordinator_test_fixture.h" +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/commands/txn_cmds_gen.h" -#include "mongo/db/commands/txn_two_phase_commit_cmds_gen.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/s/transaction_coordinator_test_fixture.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/rpc/metadata/client_metadata.h" #include "mongo/s/catalog/sharding_catalog_client_mock.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/s/transaction_coordinator_test_fixture.h b/src/mongo/db/s/transaction_coordinator_test_fixture.h index 62198ae4b7349..0c6080a0d5d04 100644 --- a/src/mongo/db/s/transaction_coordinator_test_fixture.h +++ b/src/mongo/db/s/transaction_coordinator_test_fixture.h @@ -29,13 +29,24 @@ #pragma once +#include +#include #include +#include +#include #include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/client.h" #include "mongo/db/s/shard_server_test_fixture.h" #include "mongo/db/s/transaction_coordinator.h" +#include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/shard_id.h" +#include "mongo/s/catalog/sharding_catalog_client.h" namespace mongo { diff --git a/src/mongo/db/s/transaction_coordinator_util.cpp b/src/mongo/db/s/transaction_coordinator_util.cpp index 1838dc200f08e..ad04ee5e0162b 100644 --- a/src/mongo/db/s/transaction_coordinator_util.cpp +++ b/src/mongo/db/s/transaction_coordinator_util.cpp @@ -28,24 +28,64 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/transaction_coordinator_util.h" - -#include "mongo/client/remote_command_retry_scheduler.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/client/read_preference.h" #include "mongo/db/commands/txn_cmds_gen.h" #include "mongo/db/commands/txn_two_phase_commit_cmds_gen.h" -#include "mongo/db/curop.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/transaction_coordinator_futures_util.h" +#include "mongo/db/s/transaction_coordinator_util.h" #include "mongo/db/s/transaction_coordinator_worker_curop_repository.h" -#include "mongo/db/storage/flow_control.h" -#include "mongo/db/write_concern.h" +#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction @@ -218,6 +258,7 @@ void PrepareVoteConsensus::registerVote(const PrepareResponse& vote) { if (vote.vote == PrepareVote::kCommit) { ++_numCommitVotes; _maxPrepareTimestamp = std::max(_maxPrepareTimestamp, *vote.prepareTimestamp); + _affectedNamespaces.insert(vote.affectedNamespaces.begin(), vote.affectedNamespaces.end()); } else { vote.vote == PrepareVote::kAbort ? ++_numAbortVotes : ++_numNoVotes; @@ -255,7 +296,7 @@ Future sendPrepare(ServiceContext* service, << WriteConcernOptions::Majority)); if (auto txnRetryCounter = txnNumberAndRetryCounter.getTxnRetryCounter(); txnRetryCounter && !isDefaultTxnRetryCounter(*txnRetryCounter)) { - bob.append(OperationSessionInfo::kTxnRetryCounterFieldName, *txnRetryCounter); + bob.append(OperationSessionInfoFromClient::kTxnRetryCounterFieldName, *txnRetryCounter); } apiParams.appendInfo(&bob); auto prepareObj = prepareTransaction.toBSON(bob.obj()); @@ -327,8 +368,9 @@ namespace { repl::OpTime persistDecisionBlocking(OperationContext* opCtx, const LogicalSessionId& lsid, const TxnNumberAndRetryCounter& txnNumberAndRetryCounter, - const std::vector& participantList, - const txn::CoordinatorCommitDecision& decision) { + std::vector participantList, + const txn::CoordinatorCommitDecision& decision, + std::vector affectedNamespaces) { const bool isCommit = decision.getDecision() == txn::CommitDecision::kCommit; LOGV2_DEBUG(22467, 3, @@ -387,6 +429,9 @@ repl::OpTime persistDecisionBlocking(OperationContext* opCtx, doc.setId(sessionInfo); doc.setParticipants(std::move(participantList)); doc.setDecision(decision); + if (decision.getDecision() == CommitDecision::kCommit) { + doc.setAffectedNamespaces(std::move(affectedNamespaces)); + } return doc.toBSON(); }())); @@ -433,14 +478,16 @@ Future persistDecision(txn::AsyncWorkScheduler& scheduler, const LogicalSessionId& lsid, const TxnNumberAndRetryCounter& txnNumberAndRetryCounter, const txn::ParticipantsList& participants, - const txn::CoordinatorCommitDecision& decision) { + const txn::CoordinatorCommitDecision& decision, + const std::vector& affectedNamespaces) { return txn::doWhile( scheduler, boost::none /* no need for a backoff */, [](const StatusWith& s) { return shouldRetryPersistingCoordinatorState(s); }, - [&scheduler, lsid, txnNumberAndRetryCounter, participants, decision] { + [&scheduler, lsid, txnNumberAndRetryCounter, participants, decision, affectedNamespaces] { return scheduler.scheduleWork( - [lsid, txnNumberAndRetryCounter, participants, decision](OperationContext* opCtx) { + [lsid, txnNumberAndRetryCounter, participants, decision, affectedNamespaces]( + OperationContext* opCtx) { // Do not acquire a storage ticket in order to avoid unnecessary serialization // with other prepared transactions that are holding a storage ticket // themselves; see SERVER-60682. @@ -448,8 +495,12 @@ Future persistDecision(txn::AsyncWorkScheduler& scheduler, opCtx->lockState(), AdmissionContext::Priority::kImmediate); getTransactionCoordinatorWorkerCurOpRepository()->set( opCtx, lsid, txnNumberAndRetryCounter, CoordinatorAction::kWritingDecision); - return persistDecisionBlocking( - opCtx, lsid, txnNumberAndRetryCounter, participants, decision); + return persistDecisionBlocking(opCtx, + lsid, + txnNumberAndRetryCounter, + std::move(participants), + decision, + std::move(affectedNamespaces)); }); }); } @@ -470,7 +521,7 @@ Future sendCommit(ServiceContext* service, << WriteConcernOptions::Majority)); if (auto txnRetryCounter = txnNumberAndRetryCounter.getTxnRetryCounter(); txnRetryCounter && !isDefaultTxnRetryCounter(*txnRetryCounter)) { - bob.append(OperationSessionInfo::kTxnRetryCounterFieldName, *txnRetryCounter); + bob.append(OperationSessionInfoFromClient::kTxnRetryCounterFieldName, *txnRetryCounter); } apiParams.appendInfo(&bob); auto commitObj = commitTransaction.toBSON(bob.obj()); @@ -514,7 +565,7 @@ Future sendAbort(ServiceContext* service, << WriteConcernOptions::Majority)); if (auto txnRetryCounter = txnNumberAndRetryCounter.getTxnRetryCounter(); txnRetryCounter && !isDefaultTxnRetryCounter(*txnRetryCounter)) { - bob.append(OperationSessionInfo::kTxnRetryCounterFieldName, *txnRetryCounter); + bob.append(OperationSessionInfoFromClient::kTxnRetryCounterFieldName, *txnRetryCounter); } apiParams.appendInfo(&bob); auto abortObj = abortTransaction.toBSON(bob.obj()); @@ -720,9 +771,9 @@ Future sendPrepareToShard(ServiceContext* service, } if (status.isOK()) { - auto prepareTimestampField = response.data["prepareTimestamp"]; - if (prepareTimestampField.eoo() || - prepareTimestampField.timestamp().isNull()) { + auto reply = + PrepareReply::parse(IDLParserContext("PrepareReply"), response.data); + if (!reply.getPrepareTimestamp()) { Status abortStatus(ErrorCodes::Error(50993), str::stream() << "Coordinator shard received an OK response " @@ -739,7 +790,7 @@ Future sendPrepareToShard(ServiceContext* service, "error"_attr = redact(abortStatus)); return PrepareResponse{ - shardId, PrepareVote::kAbort, boost::none, abortStatus}; + shardId, PrepareVote::kAbort, boost::none, {}, abortStatus}; } LOGV2_DEBUG( @@ -752,12 +803,15 @@ Future sendPrepareToShard(ServiceContext* service, "sessionId"_attr = lsid, "txnNumberAndRetryCounter"_attr = txnNumberAndRetryCounter, "shardId"_attr = shardId, - "prepareTimestampField"_attr = prepareTimestampField.timestamp()); + "prepareTimestamp"_attr = reply.getPrepareTimestamp(), + "affectedNamespaces"_attr = reply.getAffectedNamespaces()); - return PrepareResponse{shardId, - PrepareVote::kCommit, - prepareTimestampField.timestamp(), - boost::none}; + return PrepareResponse{ + shardId, + PrepareVote::kCommit, + *reply.getPrepareTimestamp(), + reply.getAffectedNamespaces().value_or(std::vector{}), + boost::none}; } LOGV2_DEBUG(22479, @@ -776,6 +830,7 @@ Future sendPrepareToShard(ServiceContext* service, shardId, PrepareVote::kAbort, boost::none, + {}, status.withContext(str::stream() << "from shard " << shardId)}; } @@ -792,7 +847,7 @@ Future sendPrepareToShard(ServiceContext* service, // treat ShardNotFound as a vote to abort, which is always safe since the node // must then send abort. return Future::makeReady( - {shardId, CommitDecision::kAbort, boost::none, status}); + {shardId, CommitDecision::kAbort, boost::none, {}, status}); }); }); @@ -809,6 +864,7 @@ Future sendPrepareToShard(ServiceContext* service, return PrepareResponse{shardId, boost::none, boost::none, + {}, Status(ErrorCodes::NoSuchTransaction, status.reason())}; }); } diff --git a/src/mongo/db/s/transaction_coordinator_util.h b/src/mongo/db/s/transaction_coordinator_util.h index 1b2011ba020c1..756d91be6f82c 100644 --- a/src/mongo/db/s/transaction_coordinator_util.h +++ b/src/mongo/db/s/transaction_coordinator_util.h @@ -29,11 +29,30 @@ #pragma once +#include +#include #include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/optime.h" #include "mongo/db/s/transaction_coordinator_document_gen.h" #include "mongo/db/s/transaction_coordinator_futures_util.h" +#include "mongo/db/s/transaction_coordinator_structures.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/util/future.h" namespace mongo { namespace txn { @@ -73,6 +92,10 @@ class PrepareVoteConsensus { */ CoordinatorCommitDecision decision() const; + absl::flat_hash_set releaseAffectedNamespaces() { + return std::move(_affectedNamespaces); + } + private: int _numShards; @@ -81,7 +104,7 @@ class PrepareVoteConsensus { int _numNoVotes{0}; Timestamp _maxPrepareTimestamp; - + absl::flat_hash_set _affectedNamespaces; boost::optional _abortStatus; }; @@ -135,7 +158,8 @@ Future persistDecision(txn::AsyncWorkScheduler& scheduler, const LogicalSessionId& lsid, const TxnNumberAndRetryCounter& txnNumberAndRetryCounter, const txn::ParticipantsList& participants, - const txn::CoordinatorCommitDecision& decision); + const txn::CoordinatorCommitDecision& decision, + const std::vector& affectedNamespaces); /** * Sends commit to all shards and returns a future that will be resolved when all participants have @@ -203,6 +227,9 @@ struct PrepareResponse { // Will only be set if the vote was kCommit boost::optional prepareTimestamp; + // Will only be set if the vote was kCommit + std::vector affectedNamespaces; + // Will only be set if the vote was kAbort or no value boost::optional abortReason; }; diff --git a/src/mongo/db/s/transaction_coordinator_worker_curop_repository.cpp b/src/mongo/db/s/transaction_coordinator_worker_curop_repository.cpp index ea647fa9fd1bb..de5337a1e2da5 100644 --- a/src/mongo/db/s/transaction_coordinator_worker_curop_repository.cpp +++ b/src/mongo/db/s/transaction_coordinator_worker_curop_repository.cpp @@ -29,6 +29,8 @@ #include "mongo/db/s/transaction_coordinator_worker_curop_repository.h" +#include + #include "mongo/base/shim.h" namespace mongo { diff --git a/src/mongo/db/s/transaction_coordinator_worker_curop_repository.h b/src/mongo/db/s/transaction_coordinator_worker_curop_repository.h index 8a574a567385c..69cafb79e1155 100644 --- a/src/mongo/db/s/transaction_coordinator_worker_curop_repository.h +++ b/src/mongo/db/s/transaction_coordinator_worker_curop_repository.h @@ -28,7 +28,13 @@ */ #pragma once +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/curop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" namespace mongo { diff --git a/src/mongo/db/s/transaction_coordinator_worker_curop_repository_mongod.cpp b/src/mongo/db/s/transaction_coordinator_worker_curop_repository_mongod.cpp index b65fcb9fddc92..8fa3725e13eaf 100644 --- a/src/mongo/db/s/transaction_coordinator_worker_curop_repository_mongod.cpp +++ b/src/mongo/db/s/transaction_coordinator_worker_curop_repository_mongod.cpp @@ -27,8 +27,27 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include +#include + #include "mongo/base/shim.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/transaction_coordinator_worker_curop_repository.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp index 3a1030d95c6ce..99245e4aba703 100644 --- a/src/mongo/db/s/txn_two_phase_commit_cmds.cpp +++ b/src/mongo/db/s/txn_two_phase_commit_cmds.cpp @@ -28,20 +28,59 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/uncommitted_catalog_updates.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" #include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/commands/txn_two_phase_commit_cmds_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/transaction_coordinator_service.h" +#include "mongo/db/s/transaction_coordinator_structures.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction @@ -67,19 +106,8 @@ class PrepareTransactionCmd : public TypedCommand { return true; } - class PrepareTimestamp { - public: - PrepareTimestamp(Timestamp timestamp) : _timestamp(std::move(timestamp)) {} - void serialize(BSONObjBuilder* bob) const { - bob->append("prepareTimestamp", _timestamp); - } - - private: - Timestamp _timestamp; - }; - using Request = PrepareTransaction; - using Response = PrepareTimestamp; + using Response = PrepareReply; class Invocation final : public InvocationBase { public: @@ -166,16 +194,18 @@ class PrepareTransactionCmd : public TypedCommand { uasserted(ErrorCodes::HostUnreachable, "returning network error because failpoint is on"); } - return PrepareTimestamp(prepareOpTime.getTimestamp()); + return createResponse(prepareOpTime.getTimestamp(), + txnParticipant.affectedNamespaces()); } - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx, {}); + auto [prepareTimestamp, affectedNamespaces] = + txnParticipant.prepareTransaction(opCtx, {}); if (MONGO_unlikely(participantReturnNetworkErrorForPrepareAfterExecutingPrepareLogic .shouldFail())) { uasserted(ErrorCodes::HostUnreachable, "returning network error because failpoint is on"); } - return PrepareTimestamp(std::move(prepareTimestamp)); + return createResponse(std::move(prepareTimestamp), std::move(affectedNamespaces)); } private: @@ -187,12 +217,21 @@ class PrepareTransactionCmd : public TypedCommand { return NamespaceString(request().getDbName()); } + Response createResponse(Timestamp prepareTimestamp, + std::vector affectedNamespaces) { + Response response; + response.setPrepareTimestamp(std::move(prepareTimestamp)); + response.setAffectedNamespaces(std::move(affectedNamespaces)); + return response; + } + void doCheckAuthorization(OperationContext* opCtx) const override { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), - ActionType::internal})); + ->isAuthorizedForPrivilege(Privilege{ + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal})); } }; diff --git a/src/mongo/db/s/type_shard_collection.cpp b/src/mongo/db/s/type_shard_collection.cpp index 736b57a3970f1..88003625d067d 100644 --- a/src/mongo/db/s/type_shard_collection.cpp +++ b/src/mongo/db/s/type_shard_collection.cpp @@ -29,6 +29,17 @@ #include "mongo/db/s/type_shard_collection.h" +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + namespace mongo { ShardCollectionType::ShardCollectionType(NamespaceString nss, diff --git a/src/mongo/db/s/type_shard_collection.h b/src/mongo/db/s/type_shard_collection.h index 7a923b08c979b..30875a3a36d59 100644 --- a/src/mongo/db/s/type_shard_collection.h +++ b/src/mongo/db/s/type_shard_collection.h @@ -29,7 +29,17 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/s/type_shard_collection_gen.h" +#include "mongo/s/chunk_version.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/s/type_shard_collection_test.cpp b/src/mongo/db/s/type_shard_collection_test.cpp index 1947f68af19ac..c22bd10c70768 100644 --- a/src/mongo/db/s/type_shard_collection_test.cpp +++ b/src/mongo/db/s/type_shard_collection_test.cpp @@ -27,12 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/oid.h" #include "mongo/db/s/type_shard_collection.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/time_support.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -47,11 +58,11 @@ const BSONObj kDefaultCollation = BSON("locale" TEST(ShardCollectionType, FromBSONEmptyShardKeyFails) { ASSERT_THROWS_CODE( ShardCollectionType(BSON(ShardCollectionType::kNssFieldName - << kNss.ns() << ShardCollectionType::kEpochFieldName << OID::gen() - << ShardCollectionType::kTimestampFieldName << Timestamp(1, 1) - << ShardCollectionType::kUuidFieldName << UUID::gen() - << ShardCollectionType::kKeyPatternFieldName << BSONObj() - << ShardCollectionType::kUniqueFieldName << true)), + << kNss.ns_forTest() << ShardCollectionType::kEpochFieldName + << OID::gen() << ShardCollectionType::kTimestampFieldName + << Timestamp(1, 1) << ShardCollectionType::kUuidFieldName + << UUID::gen() << ShardCollectionType::kKeyPatternFieldName + << BSONObj() << ShardCollectionType::kUniqueFieldName << true)), DBException, ErrorCodes::ShardKeyNotFound); } @@ -63,7 +74,7 @@ TEST(ShardCollectionType, ShardCollectionType shardCollType( BSON(ShardCollectionType::kNssFieldName - << kNss.ns() << ShardCollectionType::kEpochFieldName << epoch + << kNss.ns_forTest() << ShardCollectionType::kEpochFieldName << epoch << ShardCollectionType::kTimestampFieldName << timestamp << ShardCollectionType::kUuidFieldName << UUID::gen() << ShardCollectionType::kKeyPatternFieldName << kKeyPattern diff --git a/src/mongo/db/s/type_shard_database.cpp b/src/mongo/db/s/type_shard_database.cpp index 786d00545ec8e..c2b604f8fda91 100644 --- a/src/mongo/db/s/type_shard_database.cpp +++ b/src/mongo/db/s/type_shard_database.cpp @@ -29,6 +29,8 @@ #include "mongo/db/s/type_shard_database.h" +#include "mongo/idl/idl_parser.h" + namespace mongo { ShardDatabaseType::ShardDatabaseType(const BSONObj& obj) { diff --git a/src/mongo/db/s/type_shard_database.h b/src/mongo/db/s/type_shard_database.h index 8887326dfc602..8c60c4e2bdd0f 100644 --- a/src/mongo/db/s/type_shard_database.h +++ b/src/mongo/db/s/type_shard_database.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/bson/bsonobj.h" #include "mongo/db/s/type_shard_database_gen.h" namespace mongo { @@ -42,7 +43,6 @@ class ShardDatabaseType : private ShardDatabaseTypeBase { // Make getters and setters accessible. using ShardDatabaseTypeBase::getName; using ShardDatabaseTypeBase::getPrimary; - using ShardDatabaseTypeBase::getSharded; using ShardDatabaseTypeBase::getVersion; explicit ShardDatabaseType(const BSONObj& obj); diff --git a/src/mongo/db/s/type_shard_database.idl b/src/mongo/db/s/type_shard_database.idl index f16680f85f97d..fb12a825d89de 100644 --- a/src/mongo/db/s/type_shard_database.idl +++ b/src/mongo/db/s/type_shard_database.idl @@ -54,14 +54,6 @@ structs: validator: callback: "ShardId::validate" optional: false - # The following field has been deprecated in 6.0 and should not be used - # TODO SERVER-63983 make this field optional - partitioned: - description: "Specify if it is allowed to create sharded collection on this database." - cpp_name: sharded - type: bool - default: false - optional: false # What follows below are fields which control DDL operations on the database object diff --git a/src/mongo/db/s/type_shard_identity.cpp b/src/mongo/db/s/type_shard_identity.cpp index 9d650fcd5bbf1..9d089066af96d 100644 --- a/src/mongo/db/s/type_shard_identity.cpp +++ b/src/mongo/db/s/type_shard_identity.cpp @@ -27,14 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/type_shard_identity.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/client/connection_string.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/s/type_shard_identity.h" +#include "mongo/db/server_options.h" #include "mongo/db/shard_id.h" +#include "mongo/idl/idl_parser.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/s/type_shard_identity.h b/src/mongo/db/s/type_shard_identity.h index f6ba00bfbfea7..b8ec3fb422a6e 100644 --- a/src/mongo/db/s/type_shard_identity.h +++ b/src/mongo/db/s/type_shard_identity.h @@ -31,6 +31,9 @@ #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/connection_string.h" #include "mongo/db/jsobj.h" #include "mongo/db/s/add_shard_cmd_gen.h" diff --git a/src/mongo/db/s/type_shard_identity_test.cpp b/src/mongo/db/s/type_shard_identity_test.cpp index bdf9cda055c87..4c2e248114a1e 100644 --- a/src/mongo/db/s/type_shard_identity_test.cpp +++ b/src/mongo/db/s/type_shard_identity_test.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/client/connection_string.h" #include "mongo/db/s/type_shard_identity.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp b/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp index 9a68cdb7e2be9..d2e3f5894bc34 100644 --- a/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp +++ b/src/mongo/db/s/user_writes_recoverable_critical_section_service.cpp @@ -28,18 +28,44 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/s/user_writes_recoverable_critical_section_service.h" - -#include "mongo/db/db_raii.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/global_user_write_block_state.h" #include "mongo/db/s/user_writes_critical_section_document_gen.h" +#include "mongo/db/s/user_writes_recoverable_critical_section_service.h" +#include "mongo/db/server_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -69,8 +95,8 @@ const auto serviceDecorator = BSONObj findRecoverableCriticalSectionDoc(OperationContext* opCtx, const NamespaceString& nss) { DBDirectClient dbClient(opCtx); - const auto queryNss = - BSON(UserWriteBlockingCriticalSectionDocument::kNssFieldName << nss.toString()); + const auto queryNss = BSON(UserWriteBlockingCriticalSectionDocument::kNssFieldName + << NamespaceStringUtil::serialize(nss)); FindCommandRequest findRequest{NamespaceString::kUserWritesCriticalSectionsNamespace}; findRequest.setFilter(queryNss); return dbClient.findOne(std::move(findRequest)); @@ -83,7 +109,8 @@ void setBlockUserWritesDocumentField(OperationContext* opCtx, NamespaceString::kUserWritesCriticalSectionsNamespace); store.update( opCtx, - BSON(UserWriteBlockingCriticalSectionDocument::kNssFieldName << nss.toString()), + BSON(UserWriteBlockingCriticalSectionDocument::kNssFieldName + << NamespaceStringUtil::serialize(nss)), BSON("$set" << BSON(UserWriteBlockingCriticalSectionDocument::kBlockUserWritesFieldName << blockUserWrites)), ShardingCatalogClient::kLocalWriteConcern); @@ -344,7 +371,7 @@ void UserWritesRecoverableCriticalSectionService::releaseRecoverableCriticalSect deleteOp.setDeletes({[&] { write_ops::DeleteOpEntry entry; entry.setQ(BSON(UserWriteBlockingCriticalSectionDocument::kNssFieldName - << nss.toString())); + << NamespaceStringUtil::serialize(nss))); // At most one doc can possibly match the above query. entry.setMulti(false); return entry; diff --git a/src/mongo/db/s/user_writes_recoverable_critical_section_service.h b/src/mongo/db/s/user_writes_recoverable_critical_section_service.h index 9df6045575f57..942a75b2a59b6 100644 --- a/src/mongo/db/s/user_writes_recoverable_critical_section_service.h +++ b/src/mongo/db/s/user_writes_recoverable_critical_section_service.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/replica_set_aware_service.h" diff --git a/src/mongo/db/s/vector_clock_config_server_test.cpp b/src/mongo/db/s/vector_clock_config_server_test.cpp index e56f0c763f5d9..86bf330c7ccab 100644 --- a/src/mongo/db/s/vector_clock_config_server_test.cpp +++ b/src/mongo/db/s/vector_clock_config_server_test.cpp @@ -27,15 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/keys_collection_client_sharded.h" #include "mongo/db/keys_collection_manager.h" +#include "mongo/db/logical_time.h" #include "mongo/db/logical_time_validator.h" #include "mongo/db/s/config/config_server_test_fixture.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_mutable.h" +#include "mongo/s/grid.h" +#include "mongo/transport/session.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" namespace mongo { namespace { diff --git a/src/mongo/db/s/vector_clock_shard_server_test.cpp b/src/mongo/db/s/vector_clock_shard_server_test.cpp index e5368ec14e2a0..ff76d0717c604 100644 --- a/src/mongo/db/s/vector_clock_shard_server_test.cpp +++ b/src/mongo/db/s/vector_clock_shard_server_test.cpp @@ -27,17 +27,43 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/dbdirectclient.h" #include "mongo/db/keys_collection_client_direct.h" #include "mongo/db/keys_collection_manager.h" +#include "mongo/db/logical_time.h" #include "mongo/db/logical_time_validator.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/shard_server_test_fixture.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_document_gen.h" #include "mongo/db/vector_clock_mutable.h" +#include "mongo/transport/session.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" namespace mongo { namespace { @@ -125,12 +151,11 @@ DEATH_TEST_F(VectorClockShardServerTest, CannotTickConfigTime, "Hit a MONGO_UNRE vc->tickConfigTime(1); } -// TODO SERVER-60110 re-enable the following test -// DEATH_TEST_F(VectorClockShardServerTest, CannotTickToConfigTime, "Hit a MONGO_UNREACHABLE") { -// auto sc = getServiceContext(); -// auto vc = VectorClockMutable::get(sc); -// vc->tickConfigTimeTo(LogicalTime()); -//} +DEATH_TEST_F(VectorClockShardServerTest, CannotTickToConfigTime, "Hit a MONGO_UNREACHABLE") { + auto sc = getServiceContext(); + auto vc = VectorClockMutable::get(sc); + vc->tickConfigTimeTo(LogicalTime()); +} DEATH_TEST_F(VectorClockShardServerTest, CannotTickTopologyTime, "Hit a MONGO_UNREACHABLE") { auto sc = getServiceContext(); diff --git a/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp b/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp index 4d358859c5cd7..30efd2ec6d396 100644 --- a/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp +++ b/src/mongo/db/s/wait_for_ongoing_chunk_splits_command.cpp @@ -28,9 +28,16 @@ */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/db/server_feature_flags.idl b/src/mongo/db/server_feature_flags.idl index d7324158ccde8..c3050b0fa39d8 100644 --- a/src/mongo/db/server_feature_flags.idl +++ b/src/mongo/db/server_feature_flags.idl @@ -34,6 +34,7 @@ feature_flags: description: "Enable parsing and handling of SecurityTokens in multitenancy mode" cpp_varname: gFeatureFlagSecurityToken default: false + shouldBeFCVGated: true featureFlagRequireTenantID: # Only available with the server parameter "multitenancySupport". description: >- @@ -41,51 +42,42 @@ feature_flags: require tenantID to be a part of NamespaceString and TenantDatabase. cpp_varname: gFeatureFlagRequireTenantID default: false - featureFlagConnHealthMetrics: - description: "Enable newly added cluster connection health metrics" - cpp_varname: gFeatureFlagConnHealthMetrics - default: true - version: 6.3 + shouldBeFCVGated: true featureFlagGlobalIndexes: description: "Enable support for global indexes" cpp_varname: gFeatureFlagGlobalIndexes default: false + shouldBeFCVGated: true featureFlagAdditionalParticipants: description: "Adding additional participants to existing transactions" cpp_varname: gFeatureFlagAdditionalParticipants default: false - featureFlagUseNewCompactStructuredEncryptionDataCoordinator: - description: "Use the new 6.1 compact structured encryption data coordinator" - cpp_varname: gFeatureFlagUseNewCompactStructuredEncryptionDataCoordinator - default: true - version: 6.1 - featureFlagOIDC: - description: "Feature flag for OIDC support" - cpp_varname: gFeatureFlagOIDC - default: true - version: 7.0 + shouldBeFCVGated: true featureFlagBulkWriteCommand: description: "Support for bulkWrite command and one-shot transactions" cpp_varname: gFeatureFlagBulkWriteCommand default: false + shouldBeFCVGated: true featureFlagAuditConfigClusterParameter: description: "Enable use of new auditConfig cluster server parameter" cpp_varname: feature_flags::gFeatureFlagAuditConfigClusterParameter + default: true + version: 7.1 + shouldBeFCVGated: true + featureFlagOCSF: + description: "Provide an OCSF compatible output for audit logs" + cpp_varname: gFeatureFlagOCSF default: false + shouldBeFCVGated: false featureFlagStreams: description: "Enable support for streams" cpp_varname: gFeatureFlagStreams default: false + shouldBeFCVGated: true featureFlagUseUnreplicatedTruncatesForDeletions: description: "Feature flag to enable pre-image collection and change collection maintenance using unreplicated truncates instead of normal document deletions replicated from the primary." cpp_varname: feature_flags::gFeatureFlagUseUnreplicatedTruncatesForDeletions default: false - featureFlagConfigurableX509ClusterAuthn: - description: >- - Enable configurable parameters for detection of peer server nodes using X.509 - intracluster authentication. - cpp_varname: gFeatureFlagConfigurableX509ClusterAuthn - default: true - version: 7.0 + shouldBeFCVGated: false diff --git a/src/mongo/db/server_options.cpp b/src/mongo/db/server_options.cpp index 6f25e67674291..e162eed9b3ba0 100644 --- a/src/mongo/db/server_options.cpp +++ b/src/mongo/db/server_options.cpp @@ -30,6 +30,9 @@ #include "mongo/db/server_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/str.h" #include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/db/server_options.h b/src/mongo/db/server_options.h index 36ce77d82e79a..0b7cc203da873 100644 --- a/src/mongo/db/server_options.h +++ b/src/mongo/db/server_options.h @@ -29,6 +29,18 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/auth/cluster_auth_mode.h" #include "mongo/db/cluster_role.h" @@ -36,6 +48,7 @@ #include "mongo/platform/atomic_word.h" #include "mongo/platform/process_id.h" #include "mongo/stdx/variant.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/net/cidr.h" #include "mongo/util/version/releases.h" @@ -79,7 +92,6 @@ struct ServerGlobalParams { AtomicWord slowMS{100}; // --time in ms that is "slow" AtomicWord sampleRate{1.0}; // --samplerate rate at which to sample slow queries int defaultLocalThresholdMillis = 15; // --localThreshold in ms to consider a node local - bool moveParanoia = false; // for move chunk paranoia bool noUnixSocket = false; // --nounixsocket bool doFork = false; // --fork diff --git a/src/mongo/db/server_options_base.cpp b/src/mongo/db/server_options_base.cpp index d8768dfcd02a8..03515aaa61411 100644 --- a/src/mongo/db/server_options_base.cpp +++ b/src/mongo/db/server_options_base.cpp @@ -29,16 +29,20 @@ #include "mongo/db/server_options_base.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/db/auth/cluster_auth_mode.h" +#include "mongo/db/server_options.h" #include "mongo/db/server_options_base_gen.h" #include "mongo/db/server_options_general_gen.h" #include "mongo/logv2/log_component.h" #include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/option_description.h" #include "mongo/util/options_parser/option_section.h" -#include "mongo/util/options_parser/startup_option_init.h" -#include "mongo/util/options_parser/startup_options.h" +#include "mongo/util/options_parser/value.h" namespace moe = mongo::optionenvironment; diff --git a/src/mongo/db/server_options_base.h b/src/mongo/db/server_options_base.h index bffd542324eb5..39c1f67571354 100644 --- a/src/mongo/db/server_options_base.h +++ b/src/mongo/db/server_options_base.h @@ -30,6 +30,7 @@ #include #include "mongo/base/status.h" +#include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/option_section.h" namespace mongo { diff --git a/src/mongo/db/server_options_general.idl b/src/mongo/db/server_options_general.idl index a99deacdf5330..59d685bb95a7f 100644 --- a/src/mongo/db/server_options_general.idl +++ b/src/mongo/db/server_options_general.idl @@ -75,6 +75,7 @@ configs: is_constexpr: false short_name: port arg_vartype: Int + validator: { gte: 0, lte: 65535 } 'net.ipv6': description: 'Enable IPv6 support (disabled by default)' short_name: ipv6 diff --git a/src/mongo/db/server_options_helpers.cpp b/src/mongo/db/server_options_helpers.cpp index c1b32744cfc24..d1fa2955c04e4 100644 --- a/src/mongo/db/server_options_helpers.cpp +++ b/src/mongo/db/server_options_helpers.cpp @@ -35,28 +35,35 @@ #define SYSLOG_NAMES #include #endif -#include -#include +#include +#include +#include #include -#include -#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" #include "mongo/bson/util/builder.h" -#include "mongo/config.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/server_options.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" #include "mongo/logv2/log_component.h" #include "mongo/logv2/log_component_settings.h" +#include "mongo/logv2/log_format.h" #include "mongo/logv2/log_manager.h" -#include "mongo/transport/message_compressor_registry.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" #include "mongo/util/cmdline_utils/censor_cmdline.h" #include "mongo/util/fail_point.h" -#include "mongo/util/net/sock.h" -#include "mongo/util/net/socket_utils.h" -#include "mongo/util/net/ssl_options.h" -#include "mongo/util/options_parser/startup_options.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/db/server_options_helpers.h b/src/mongo/db/server_options_helpers.h index bbb362587cd05..fb335dcbd904d 100644 --- a/src/mongo/db/server_options_helpers.h +++ b/src/mongo/db/server_options_helpers.h @@ -31,11 +31,15 @@ #include #include +#include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/server_parameter.h" #include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/option_section.h" +#include "mongo/util/options_parser/value.h" namespace mongo { diff --git a/src/mongo/db/server_options_init.cpp b/src/mongo/db/server_options_init.cpp index 6396125dc778d..72a200a2ac525 100644 --- a/src/mongo/db/server_options_init.cpp +++ b/src/mongo/db/server_options_init.cpp @@ -27,8 +27,12 @@ * it in the license file. */ -#include "mongo/base/init.h" +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/db/server_options_server_helpers.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/server_options_server_helpers.cpp b/src/mongo/db/server_options_server_helpers.cpp index 87e11becb7cc6..6013376ede120 100644 --- a/src/mongo/db/server_options_server_helpers.cpp +++ b/src/mongo/db/server_options_server_helpers.cpp @@ -30,34 +30,45 @@ #include "mongo/db/server_options_server_helpers.h" -#include -#include +#include #include #include -#include +#include #include -#include +#include +#include +#include #include -#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include #include +#include +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" -#include "mongo/bson/util/builder.h" -#include "mongo/config.h" +#include "mongo/bson/oid.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/auth/cluster_auth_mode.h" #include "mongo/db/server_options.h" -#include "mongo/db/server_options_base.h" #include "mongo/db/server_options_helpers.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" #include "mongo/logv2/log_component.h" #include "mongo/transport/message_compressor_registry.h" +#include "mongo/util/assert_util.h" #include "mongo/util/cmdline_utils/censor_cmdline.h" -#include "mongo/util/fail_point.h" -#include "mongo/util/net/sock.h" +#include "mongo/util/net/cidr.h" #include "mongo/util/net/socket_utils.h" -#include "mongo/util/net/ssl_options.h" -#include "mongo/util/options_parser/options_parser.h" -#include "mongo/util/options_parser/startup_options.h" +#include "mongo/util/options_parser/value.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl @@ -460,7 +471,7 @@ Status storeServerOptions(const moe::Environment& params) { } if (params.count("net.compression.compressors")) { - const auto ret = + auto ret = storeMessageCompressionOptions(params["net.compression.compressors"].as()); if (!ret.isOK()) { return ret; diff --git a/src/mongo/db/server_options_server_helpers.h b/src/mongo/db/server_options_server_helpers.h index 5a243ab5dee80..b522c67270619 100644 --- a/src/mongo/db/server_options_server_helpers.h +++ b/src/mongo/db/server_options_server_helpers.h @@ -30,6 +30,8 @@ #pragma once #include +#include +#include #include "mongo/base/status.h" #include "mongo/util/options_parser/environment.h" diff --git a/src/mongo/db/server_options_test.cpp b/src/mongo/db/server_options_test.cpp index 5f0b44e944765..ab67538ac9070 100644 --- a/src/mongo/db/server_options_test.cpp +++ b/src/mongo/db/server_options_test.cpp @@ -27,14 +27,20 @@ * it in the license file. */ -#include "mongo/config.h" -#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) -#include -#endif +#include +#include +#include + +#include +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" +#include + #ifndef _WIN32 #include -#include #include #endif @@ -42,25 +48,41 @@ #include #endif -#include -#include - -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/parse_number.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/server_options.h" #include "mongo/db/server_options_base.h" #include "mongo/db/server_options_helpers.h" #include "mongo/db/server_options_nongeneral_gen.h" #include "mongo/db/server_options_server_helpers.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" #include "mongo/util/errno_util.h" #include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/option_section.h" #include "mongo/util/options_parser/options_parser.h" +#include "mongo/util/options_parser/value.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" + +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -934,23 +956,79 @@ TEST(SetupOptions, ForkOptionAlwaysFalseWithNoforkEnvVar) { } #endif -TEST(ClusterRole, Equality) { - ASSERT_TRUE(ClusterRole(ClusterRole::None).has(ClusterRole::None)); - ASSERT_TRUE(!ClusterRole(ClusterRole::None).has(ClusterRole::ConfigServer)); - ASSERT_TRUE(!ClusterRole(ClusterRole::None).has(ClusterRole::ShardServer)); - - ASSERT_TRUE(!ClusterRole(ClusterRole::ConfigServer).has(ClusterRole::None)); - ASSERT_TRUE(ClusterRole(ClusterRole::ConfigServer).has(ClusterRole::ConfigServer)); - ASSERT_TRUE(ClusterRole(ClusterRole::ConfigServer).has(ClusterRole::ShardServer)); - - ASSERT_TRUE(!ClusterRole(ClusterRole::ShardServer).has(ClusterRole::None)); - ASSERT_TRUE(!ClusterRole(ClusterRole::ShardServer).has(ClusterRole::ConfigServer)); - ASSERT_TRUE(ClusterRole(ClusterRole::ShardServer).has(ClusterRole::ShardServer)); - - ASSERT_TRUE(ClusterRole(ClusterRole::ShardServer).exclusivelyHasShardRole()); - ASSERT_FALSE(ClusterRole(ClusterRole::ConfigServer).exclusivelyHasShardRole()); +TEST(ClusterRole, MonoRole) { + const ClusterRole noRole{ClusterRole::None}; + ASSERT_TRUE(noRole.has(ClusterRole::None)); + ASSERT_FALSE(noRole.has(ClusterRole::ShardServer)); + ASSERT_FALSE(noRole.has(ClusterRole::ConfigServer)); + ASSERT_FALSE(noRole.has(ClusterRole::RouterServer)); + ASSERT_TRUE(noRole.hasExclusively(ClusterRole::None)); + ASSERT_FALSE(noRole.hasExclusively(ClusterRole::ShardServer)); + ASSERT_FALSE(noRole.hasExclusively(ClusterRole::ConfigServer)); + ASSERT_FALSE(noRole.hasExclusively(ClusterRole::RouterServer)); + + const ClusterRole shardRole{ClusterRole::ShardServer}; + ASSERT_FALSE(shardRole.has(ClusterRole::None)); + ASSERT_TRUE(shardRole.has(ClusterRole::ShardServer)); + ASSERT_FALSE(shardRole.has(ClusterRole::ConfigServer)); + ASSERT_FALSE(shardRole.has(ClusterRole::RouterServer)); + ASSERT_FALSE(shardRole.hasExclusively(ClusterRole::None)); + ASSERT_TRUE(shardRole.hasExclusively(ClusterRole::ShardServer)); + ASSERT_FALSE(shardRole.hasExclusively(ClusterRole::ConfigServer)); + ASSERT_FALSE(shardRole.hasExclusively(ClusterRole::RouterServer)); + + const ClusterRole routerRole{ClusterRole::RouterServer}; + ASSERT_FALSE(routerRole.has(ClusterRole::None)); + ASSERT_FALSE(routerRole.has(ClusterRole::ShardServer)); + ASSERT_FALSE(routerRole.has(ClusterRole::ConfigServer)); + ASSERT_TRUE(routerRole.has(ClusterRole::RouterServer)); + ASSERT_FALSE(routerRole.hasExclusively(ClusterRole::None)); + ASSERT_FALSE(routerRole.hasExclusively(ClusterRole::ShardServer)); + ASSERT_FALSE(routerRole.hasExclusively(ClusterRole::ConfigServer)); + ASSERT_TRUE(routerRole.hasExclusively(ClusterRole::RouterServer)); +} - ASSERT_FALSE(ClusterRole(ClusterRole::ConfigServer).exclusivelyHasConfigRole()); +TEST(ClusterRole, MultiRole) { + const ClusterRole shardAndConfigRole{ClusterRole::ShardServer, ClusterRole::ConfigServer}; + ASSERT_FALSE(shardAndConfigRole.has(ClusterRole::None)); + ASSERT_TRUE(shardAndConfigRole.has(ClusterRole::ShardServer)); + ASSERT_TRUE(shardAndConfigRole.has(ClusterRole::ConfigServer)); + ASSERT_FALSE(shardAndConfigRole.has(ClusterRole::RouterServer)); + ASSERT_FALSE(shardAndConfigRole.hasExclusively(ClusterRole::None)); + ASSERT_FALSE(shardAndConfigRole.hasExclusively(ClusterRole::ShardServer)); + ASSERT_FALSE(shardAndConfigRole.hasExclusively(ClusterRole::ConfigServer)); + ASSERT_FALSE(shardAndConfigRole.hasExclusively(ClusterRole::RouterServer)); + + const ClusterRole shardAndRouterRole{ClusterRole::ShardServer, ClusterRole::RouterServer}; + ASSERT_FALSE(shardAndRouterRole.has(ClusterRole::None)); + ASSERT_TRUE(shardAndRouterRole.has(ClusterRole::ShardServer)); + ASSERT_FALSE(shardAndRouterRole.has(ClusterRole::ConfigServer)); + ASSERT_TRUE(shardAndRouterRole.has(ClusterRole::RouterServer)); + ASSERT_FALSE(shardAndRouterRole.hasExclusively(ClusterRole::None)); + ASSERT_FALSE(shardAndRouterRole.hasExclusively(ClusterRole::ShardServer)); + ASSERT_FALSE(shardAndRouterRole.hasExclusively(ClusterRole::ConfigServer)); + ASSERT_FALSE(shardAndRouterRole.hasExclusively(ClusterRole::RouterServer)); + + const ClusterRole configAndRouterRole{ClusterRole::ConfigServer, ClusterRole::RouterServer}; + ASSERT_FALSE(configAndRouterRole.has(ClusterRole::None)); + ASSERT_FALSE(configAndRouterRole.has(ClusterRole::ShardServer)); + ASSERT_TRUE(configAndRouterRole.has(ClusterRole::ConfigServer)); + ASSERT_TRUE(configAndRouterRole.has(ClusterRole::RouterServer)); + ASSERT_FALSE(configAndRouterRole.hasExclusively(ClusterRole::None)); + ASSERT_FALSE(configAndRouterRole.hasExclusively(ClusterRole::ShardServer)); + ASSERT_FALSE(configAndRouterRole.hasExclusively(ClusterRole::ConfigServer)); + ASSERT_FALSE(configAndRouterRole.hasExclusively(ClusterRole::RouterServer)); + + const ClusterRole anyRole{ + ClusterRole::ShardServer, ClusterRole::ConfigServer, ClusterRole::RouterServer}; + ASSERT_FALSE(anyRole.has(ClusterRole::None)); + ASSERT_TRUE(anyRole.has(ClusterRole::ShardServer)); + ASSERT_TRUE(anyRole.has(ClusterRole::ConfigServer)); + ASSERT_TRUE(anyRole.has(ClusterRole::RouterServer)); + ASSERT_FALSE(anyRole.hasExclusively(ClusterRole::None)); + ASSERT_FALSE(anyRole.hasExclusively(ClusterRole::ShardServer)); + ASSERT_FALSE(anyRole.hasExclusively(ClusterRole::ConfigServer)); + ASSERT_FALSE(anyRole.hasExclusively(ClusterRole::RouterServer)); } #if !defined(_WIN32) && !(defined(__APPLE__) && TARGET_OS_TV) diff --git a/src/mongo/db/server_parameter.cpp b/src/mongo/db/server_parameter.cpp index ba2d5db22c1b2..cb69f72f9afa9 100644 --- a/src/mongo/db/server_parameter.cpp +++ b/src/mongo/db/server_parameter.cpp @@ -29,11 +29,21 @@ #include "mongo/db/server_parameter.h" +#include #include +#include +#include +#include + +#include "mongo/bson/bsontypes.h" #include "mongo/db/feature_flag.h" +#include "mongo/db/server_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/static_immortal.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/db/server_parameter.h b/src/mongo/db/server_parameter.h index 47c840ff99bcf..6af2f199b4626 100644 --- a/src/mongo/db/server_parameter.h +++ b/src/mongo/db/server_parameter.h @@ -35,15 +35,32 @@ * rather parameters should be defined in .idl files. */ +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include "mongo/base/checked_cast.h" -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/parse_number.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/logical_time.h" #include "mongo/db/tenant_id.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #include "mongo/util/version/releases.h" #define MONGO_SERVER_PARAMETER_REGISTER(name) \ @@ -91,6 +108,7 @@ enum class ServerParameterType { class FeatureFlag; class ServerParameterSet; + class OperationContext; template diff --git a/src/mongo/db/server_parameter_with_storage.h b/src/mongo/db/server_parameter_with_storage.h index 357e58e89a8d8..60ab98134d32f 100644 --- a/src/mongo/db/server_parameter_with_storage.h +++ b/src/mongo/db/server_parameter_with_storage.h @@ -35,18 +35,33 @@ * rather parameters should be defined in .idl files. */ +#include +#include +#include #include +#include +#include #include +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/parse_number.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/logical_time.h" #include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/idl_parser.h" #include "mongo/platform/atomic_proxy.h" #include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #include "mongo/util/synchronized_value.h" @@ -319,7 +334,7 @@ class IDLServerParameterWithStorage : public ServerParameter { Status validateValue(const element_type& newValue, const boost::optional& tenantId) const { for (const auto& validator : _validators) { - const auto status = validator(newValue, tenantId); + auto status = validator(newValue, tenantId); if (!status.isOK()) { return status; } diff --git a/src/mongo/db/server_recovery.cpp b/src/mongo/db/server_recovery.cpp index c44515a335803..2764cd4662c8a 100644 --- a/src/mongo/db/server_recovery.cpp +++ b/src/mongo/db/server_recovery.cpp @@ -27,11 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/server_recovery.h" +#include -#include "mongo/db/namespace_string.h" +#include "mongo/db/server_recovery.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/db/server_recovery.h b/src/mongo/db/server_recovery.h index 2d6d64206fd1b..99fcced44b9e2 100644 --- a/src/mongo/db/server_recovery.h +++ b/src/mongo/db/server_recovery.h @@ -34,6 +34,7 @@ #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" +#include "mongo/util/string_map.h" namespace mongo { /** diff --git a/src/mongo/db/serverless/README.md b/src/mongo/db/serverless/README.md index f9de16492e12b..597b77be14cfb 100644 --- a/src/mongo/db/serverless/README.md +++ b/src/mongo/db/serverless/README.md @@ -11,14 +11,14 @@ The following diagram illustrates the lifetime of a shard split operation: A shard is split by calling the `commitShardSplit` command, and is generally issued by a cloud component such as the atlasproxy. The shard split protocol consists of an exchange of messages between two shards: the donor and recipient. This exchange is orchestrated by the donor shard in a PrimaryOnlyService implementation, which has the following steps: 1. **Start the split operation** -The donor receives a `commitShardSplit` command with a `recipientSetName`, `recipientTagName`, and list of tenants that should be split into the recipient. The `recipientTagName` identifies recipient nodes in the donor config, and the `recipientSetName` is the setName for the recipient replica set. + The donor receives a `commitShardSplit` command with a `recipientSetName`, `recipientTagName`, and list of tenants that should be split into the recipient. The `recipientTagName` identifies recipient nodes in the donor config, and the `recipientSetName` is the setName for the recipient replica set. All active index builds for collections belonging to tenants which will be split are [aborted](https://github.com/mongodb/mongo/blob/646eed48d0da896588759030f2ec546ac6fbbd48/src/mongo/db/serverless/shard_split_donor_service.cpp#L649-L652) at the start of the split operation. All index builds for tenants being split will be blocked for the duration of the operation. Finally, the donor [reserves an oplog slot](https://github.com/mongodb/mongo/blob/646eed48d0da896588759030f2ec546ac6fbbd48/src/mongo/db/serverless/shard_split_donor_service.cpp#L926), called the `blockTimestamp`, after which all user requests for tenants being split will be blocked. It then durably records a state document update to the `kBlocking` state at the `blockTimestamp`, and enters the split critical section. 2. **Wait for recipient nodes to catch up** -Before proceeding with any split-specific steps, the donor must wait for all recipient nodes to catch up to the `blockTimestamp`. This wait is accomplished by calling [ReplicationCoordinator::awaitReplication with a custom tagged writeConcern](https://github.com/mongodb/mongo/blob/646eed48d0da896588759030f2ec546ac6fbbd48/src/mongo/db/serverless/shard_split_donor_service.cpp#L702), which targets nodes in the local config with the `recipientTagName`. Note that because of how replica set tags are implemented, each recipient node must have a different value for the `recipientTagName` ([learn more](https://www.mongodb.com/docs/manual/tutorial/configure-replica-set-tag-sets/#std-label-configure-custom-write-concern)). Donor nodes are guaranteed to be caught up because we [wait for majority write](https://github.com/mongodb/mongo/blob/c2a1125bc0bb729acfec94a94be924b2bb65d128/src/mongo/db/serverless/shard_split_donor_service.cpp#L663-L667) of the state document establishing the `blockTimestamp`. + Before proceeding with any split-specific steps, the donor must wait for all recipient nodes to catch up to the `blockTimestamp`. This wait is accomplished by calling [ReplicationCoordinator::awaitReplication with a custom tagged writeConcern](https://github.com/mongodb/mongo/blob/646eed48d0da896588759030f2ec546ac6fbbd48/src/mongo/db/serverless/shard_split_donor_service.cpp#L702), which targets nodes in the local config with the `recipientTagName`. Note that because of how replica set tags are implemented, each recipient node must have a different value for the `recipientTagName` ([learn more](https://www.mongodb.com/docs/manual/tutorial/configure-replica-set-tag-sets/#std-label-configure-custom-write-concern)). Donor nodes are guaranteed to be caught up because we [wait for majority write](https://github.com/mongodb/mongo/blob/c2a1125bc0bb729acfec94a94be924b2bb65d128/src/mongo/db/serverless/shard_split_donor_service.cpp#L663-L667) of the state document establishing the `blockTimestamp`. 4. **Applying the split** The donor then [prepares a "split config"](https://github.com/mongodb/mongo/blob/646eed48d0da896588759030f2ec546ac6fbbd48/src/mongo/db/serverless/shard_split_donor_service.cpp#L718-L730) which is a copy of the current config with recipient nodes removed, an increased version, and a new subdocument (`recipientConfig`) which contains the config recipient nodes will apply during split. The recipient config is a copy of the current config with donor nodes removed, recipient nodes reindexed from zero, a new set name. The donor then calls `replSetReconfig` on itself with the split config. @@ -39,13 +39,7 @@ Before proceeding with any split-specific steps, the donor must wait for all rec `commitShardSplit` will return [TenantMigrationCommitted](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/shard_split_commands.cpp#L171-L173), [CommandFailed](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/shard_split_commands.cpp#L166-L169), [ConflictingServerlessOperation](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/serverless_operation_lock_registry.cpp#L52-L54), or any retryable errors encountered during the operation’s execution. On retryable error, callers are expected to retry the operation against the new donor primary. A ConflictingServerlessOperation may be retried, however the caller should do extra work to ensure the conflicting operation has completed before retrying. ### Access Blocking -During the critical section of a serverless operation the server will queue user requests for data involved in the operation, waiting to produce a response until after the critical section has completed. This process is called “blocking”, and the server provides this functionality by maintaining a [map of namespace to tenant access blocker](https://github.com/mongodb/mongo/blob/87b60722e3c5ddaf7bc73d1ba08b31b437ef4f48/src/mongo/db/repl/tenant_migration_access_blocker_registry.h#L199). This registry is consulted when deciding to block: -* **reads** in the ServiceEntryPoint ([InvokeCommand::run](https://github.com/mongodb/mongo/blob/e476ee17e9258f540d97a51baf471f5496488e33/src/mongo/db/service_entry_point_common.cpp#L868), or [CheckoutSessionAndInvokeCommand::run](https://github.com/mongodb/mongo/blob/e476ee17e9258f540d97a51baf471f5496488e33/src/mongo/db/service_entry_point_common.cpp#L886)) -* **linearizable reads** in the [RunCommandImpl::_epilogue](https://github.com/mongodb/mongo/blob/e476ee17e9258f540d97a51baf471f5496488e33/src/mongo/db/service_entry_point_common.cpp#L1226) -* **writes** in [OpObserverImpl::onBatchedWriteCommit](https://github.com/mongodb/mongo/blob/e476ee17e9258f540d97a51baf471f5496488e33/src/mongo/db/op_observer/op_observer_impl.cpp#L2107), [OpObserverImpl::onUnpreparedTransactionCommit](https://github.com/mongodb/mongo/blob/e476ee17e9258f540d97a51baf471f5496488e33/src/mongo/db/op_observer/op_observer_impl.cpp#L2036), and the [_logOpsInner oplog helper](https://github.com/mongodb/mongo/blob/e476ee17e9258f540d97a51baf471f5496488e33/src/mongo/db/repl/oplog.cpp#L384) -* **index builds** in [ReplIndexBuildState::tryAbort](https://github.com/mongodb/mongo/blob/e476ee17e9258f540d97a51baf471f5496488e33/src/mongo/db/repl_index_build_state.cpp#L351), IndexBuildsCoordinatorMongod::_startIndexBuild ([here](https://github.com/mongodb/mongo/blob/e476ee17e9258f540d97a51baf471f5496488e33/src/mongo/db/index_builds_coordinator_mongod.cpp#L200), [here](https://github.com/mongodb/mongo/blob/e476ee17e9258f540d97a51baf471f5496488e33/src/mongo/db/index_builds_coordinator_mongod.cpp#L275)) - -Access blockers are [installed](https://github.com/mongodb/mongo/blob/87b60722e3c5ddaf7bc73d1ba08b31b437ef4f48/src/mongo/db/serverless/shard_split_donor_op_observer.cpp#L155-L161) on all nodes as soon as a split operation performs its first state transition to kAbortingIndexBuilds. They are initially configured to allow all reads and writes. When the donor primary transitions to the kBlocking state (entering the critical section) it first instructs its access blockers to begin [blocking writes](https://github.com/mongodb/mongo/blob/87b60722e3c5ddaf7bc73d1ba08b31b437ef4f48/src/mongo/db/serverless/shard_split_donor_service.cpp#L918), ensuring that no writes to tenant data can commit with a timestamp after the `blockTimestamp`. We begin to block reads once the kBlocking state document [update is committed](https://github.com/mongodb/mongo/blob/87b60722e3c5ddaf7bc73d1ba08b31b437ef4f48/src/mongo/db/serverless/shard_split_donor_op_observer.cpp#L201). Writes begin blocking on secondaries when the kBlocking state change is [committed on the secondary](https://github.com/mongodb/mongo/blob/87b60722e3c5ddaf7bc73d1ba08b31b437ef4f48/src/mongo/db/serverless/shard_split_donor_op_observer.cpp#L195), this ensures that an access blocker is already installed and blocking writes if there is donor primary failover. +[Access blockers](#access-blocking-1) are [installed](https://github.com/mongodb/mongo/blob/87b60722e3c5ddaf7bc73d1ba08b31b437ef4f48/src/mongo/db/serverless/shard_split_donor_op_observer.cpp#L155-L161) on all nodes as soon as a split operation performs its first state transition to kAbortingIndexBuilds. They are initially configured to allow all reads and writes. When the donor primary transitions to the kBlocking state (entering the critical section) it first instructs its access blockers to begin [blocking writes](https://github.com/mongodb/mongo/blob/87b60722e3c5ddaf7bc73d1ba08b31b437ef4f48/src/mongo/db/serverless/shard_split_donor_service.cpp#L918), ensuring that no writes to tenant data can commit with a timestamp after the `blockTimestamp`. We begin to block reads once the kBlocking state document [update is committed](https://github.com/mongodb/mongo/blob/87b60722e3c5ddaf7bc73d1ba08b31b437ef4f48/src/mongo/db/serverless/shard_split_donor_op_observer.cpp#L201). Writes begin blocking on secondaries when the kBlocking state change is [committed on the secondary](https://github.com/mongodb/mongo/blob/87b60722e3c5ddaf7bc73d1ba08b31b437ef4f48/src/mongo/db/serverless/shard_split_donor_op_observer.cpp#L195), this ensures that an access blocker is already installed and blocking writes if there is donor primary failover. Access blockers are removed when the state document backing a shard split operation is [deleted](https://github.com/mongodb/mongo/blob/87b60722e3c5ddaf7bc73d1ba08b31b437ef4f48/src/mongo/db/serverless/shard_split_donor_op_observer.cpp#L437). Since garbage collection of split operation state documents is [not immediate](https://github.com/mongodb/mongo/blob/87b60722e3c5ddaf7bc73d1ba08b31b437ef4f48/src/mongo/db/serverless/shard_split_donor_service.cpp#L1178-L1182), access blockers will continue to block reads and writes to tenant data for some time after the operation has completed its critical section. If the split operation is aborted, then access blockers will be removed as soon as the state document [records a decision and is marked garbage-collectable ](https://github.com/mongodb/mongo/blob/87b60722e3c5ddaf7bc73d1ba08b31b437ef4f48/src/mongo/db/serverless/shard_split_donor_op_observer.cpp#L297-L304)(the `expireAt` field is set). Otherwise, access blockers will be removed when [the state document is deleted](https://github.com/mongodb/mongo/blob/87b60722e3c5ddaf7bc73d1ba08b31b437ef4f48/src/mongo/db/serverless/shard_split_donor_op_observer.cpp#L435-L438). Access blockers are removed from recipient nodes [after installing the recipient config](https://github.com/mongodb/mongo/blob/e476ee17e9258f540d97a51baf471f5496488e33/src/mongo/db/repl/replication_coordinator_impl_heartbeat.cpp#L878-L887), they are no longer donors. a @@ -58,20 +52,15 @@ Access blockers are recovered * After initial sync has completed in [InitialSyncer::_teardown](https://github.com/mongodb/mongo/blob/65154f6a1356de6ca09e04975a0acdfb1a0351ef/src/mongo/db/repl/initial_syncer.cpp#L580) * On rollback during the [RollbackImpl::_runPhaseFromAbortToReconstructPreparedTxns](https://github.com/mongodb/mongo/blob/65154f6a1356de6ca09e04975a0acdfb1a0351ef/src/mongo/db/repl/rollback_impl.cpp#L655) -### Mutual Exclusion -Of the three types of serverless operation (tenant migration, shard merge, and shard split), no new operation may start if there are any active operations of another serverless operation type. Many operations may run concurrently as long as they are of the same serverless operation type. - -This so-called “serverless operation lock” is acquired the first time a state document is inserted for a particular operation ([shard split](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/shard_split_donor_op_observer.cpp#L152-L153), [tenant migration donor](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/repl/tenant_migration_donor_op_observer.cpp#L58-L60), [tenant migration recipient](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp#L189-L191)). Once the lock is acquired, any attempt to insert a state document of a different operation type will [result in a ConflictingServerlessOperation](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/serverless_operation_lock_registry.cpp#L52-L54). The lock is released when an operation durably recorded its decision, and marked its state document as garbage collectable ([shard split](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/shard_split_donor_op_observer.cpp#L261-L263), [tenant migration donor](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/repl/tenant_migration_donor_op_observer.cpp#L169-L171), [tenant migration recipient](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp#L220-L222)). Serverless operation locks continue to be held even after a stepdown for the same reason access blockers do, if an election occurs later we ensure the lock is already held to prevent conflicting operations on the newly elected primary. - ### Cleanup -Once a shard slit operation has completed it will return either [CommandFailed](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/shard_split_commands.cpp#L166-L169) (if the operation was aborted for any reason), or [TenantMigrationCommitted](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/shard_split_commands.cpp#L171-L173) (if the operation succeeded). At this point it is the caller’s responsibility to take any necessary post-operation actions (such as updating routing tables), before calling `forgetShardSplit` on the donor primary. Calling this command will cause the donor primary to mark the operation garbage-collectable, by [setting the expireAt field in the operation state document](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/shard_split_donor_service.cpp#L1140-L1141) to a configurable timeout called `repl::shardSplitGarbageCollectionDelayMS` with a [default value of 15 minutes](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/repl/repl_server_parameters.idl#L688-L696). The operation will wait for the delay and then [delete the state document](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/shard_split_donor_service.cpp#L1186), which in turn removes access blockers installed for the operation. It is now the responsibility of the caller to remove orphaned data on the donor and recipient. +Once a shard slit operation has completed it will return either [CommandFailed](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/shard_split_commands.cpp#L166-L169) (if the operation was aborted for any reason), or [TenantMigrationCommitted](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/shard_split_commands.cpp#L171-L173) (if the operation succeeded). At this point it is the caller’s responsibility to take any necessary post-operation actions (such as updating routing tables), before calling `forgetShardSplit` on the donor primary. Calling this command will cause the donor primary to mark the operation garbage-collectable, by [setting the expireAt field in the operation state document](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/shard_split_donor_service.cpp#L1140-L1141) to a configurable timeout called `repl::shardSplitGarbageCollectionDelayMS` with a [default value of 15 minutes](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/repl/repl_server_parameters.idl#L688-L696). The operation will wait for the delay and then [delete the state document](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/shard_split_donor_service.cpp#L1186), which in turn removes access blockers installed for the operation. It is now the responsibility of the caller to remove orphaned data on the donor and recipient. -## Serverless server parameter +### Serverless server parameter The [replication.serverless](https://github.com/mongodb/mongo/blob/e75a51a7dcbe842e07a24343438706d865de96dc/src/mongo/db/mongod_options_replication.idl#L77) server parameter allows starting a mongod without providing a replica set name. It cannot be used at the same time as [replication.replSet](https://github.com/mongodb/mongo/blob/e75a51a7dcbe842e07a24343438706d865de96dc/src/mongo/db/mongod_options_replication.idl#L64) or [replication.replSetName](https://github.com/mongodb/mongo/blob/e75a51a7dcbe842e07a24343438706d865de96dc/src/mongo/db/mongod_options_replication.idl#L70). When `replication.serverless` is used, the replica set name is learned through [replSetInitiate](https://www.mongodb.com/docs/manual/reference/command/replSetInitiate/) or [through an hearbeat](https://github.com/mongodb/mongo/blob/e75a51a7dcbe842e07a24343438706d865de96dc/src/mongo/db/repl/replication_coordinator_impl.cpp#L5848) from another mongod. Mongod can only learn its replica set name once. Using `replication.serverless` also enables a node to apply a recipient config to join a new recipient set as part of a split. -## Glossary +### Glossary **recipient config** The config for the recipient replica set. @@ -80,3 +69,63 @@ A config based on the original config which excludes the recipient nodes, and in **blockTimestamp** Timestamp after which reads and writes are blocked on the donor replica set for all tenants involved until completion of the split. + +## Shard Merge +A shard split is one of the serverless scaling primitives, allowing for scale in by migrating all tenant data from an underutilized replica set to another existing replica set. The initial replica set will be decomissioned by the cloud control plane after completion of the operation. + +The following diagram illustrates the lifetime of a shard split operation: +![shard_merge_diagram](../../../../docs/images/shard_merge_diagram.png) + +### Protocol + +1. **Start the merge operation** + The donor primary receives the `donorStartMigration` command to begin the operation. The [TenantMigrationDonorOpObserver](https://github.com/mongodb/mongo/blob/f05053d2cb65b84eaed4db94c25e9fe4be82d78c/src/mongo/db/repl/tenant_migration_donor_op_observer.cpp#L82) creates a donor access blocker for each tenant and a global donor access blocker. + + All active index builds for collections belonging to tenants which will be migrated are [aborted](https://github.com/mongodb/mongo/blob/f05053d2cb65b84eaed4db94c25e9fe4be82d78c/src/mongo/db/repl/tenant_migration_donor_service.cpp#L949-L968) at the start of the merge operation. All index builds for tenants being migrated will be blocked for the duration of the operation. + + The donor then reserves an oplog slot, called the `startMigrationDonorTimestamp`. It then [durably records](https://github.com/mongodb/mongo/blob/f05053d2cb65b84eaed4db94c25e9fe4be82d78c/src/mongo/db/repl/tenant_migration_donor_service.cpp#L982) a state document update to the `kDataSync` state at the `startMigrationDonorTimestamp` and sends the `recipientSyncData` command to the recipient primary with the `startMigrationDonorTimestamp` and waits for a response. + +2. **Recipient copies donor data** + The recipient primary receives the `recipientSyncData` command and [durably persists](https://github.com/mongodb/mongo/blob/f05053d2cb65b84eaed4db94c25e9fe4be82d78c/src/mongo/db/repl/shard_merge_recipient_service.cpp#L2428) a state document used to track migration progress. The [ShardMergeRecipientOpObserver](https://github.com/mongodb/mongo/blob/f05053d2cb65b84eaed4db94c25e9fe4be82d78c/src/mongo/db/repl/shard_merge_recipient_op_observer.cpp#L163-L167) creates a recipient access blocker for each tenant. The primary then opens a backup cursor on the donor, records the checkpoint timestamp, and then inserts the list of wired tiger files that need to be cloned into the [donated files collection](https://github.com/mongodb/mongo/blob/f05053d2cb65b84eaed4db94c25e9fe4be82d78c/src/mongo/db/repl/shard_merge_recipient_service.cpp#L1034-L1046) The backup cursor is kept alive (by periodic `getMore`'s) until all recipient nodes have copied donor data. Wiredtiger will not modify file data on the donor while the cursor is open. + + Additionally, the recipient primary will ensure that it's majority commit timestamp is greater than the backup cursor timestamp from the donor. We [advance](https://github.com/mongodb/mongo/blob/a723af8863c5fae1eee7b0a891066e923468e974/src/mongo/db/repl/shard_merge_recipient_service.cpp#L1787-L1789) the cluster time to `donorBackupCursorCheckpointTimestamp` and then write a majority committed noop. + + A `ShardMergeRecipientOpObserver` on each recipient node will [watch for inserts](https://github.com/mongodb/mongo/blob/f05053d2cb65b84eaed4db94c25e9fe4be82d78c/src/mongo/db/repl/shard_merge_recipient_op_observer.cpp#L198) into the donated files collection and then [clone and import](https://github.com/mongodb/mongo/blob/f05053d2cb65b84eaed4db94c25e9fe4be82d78c/src/mongo/db/repl/tenant_file_importer_service.cpp#L299-L303) all file data via the `TenantFileImporterService`. When the data is consistent and all files have been imported, the recipient replies `OK` to the `recipientSyncData` command and kills the backup cursor. + +3. **Donor enters blocking state** + Upon receiving a `recipientSyncData` response, the donor reserves an oplog slot and updates the state document to the `kBlocking` state and sets the `blockTimestamp` to prevent writes. The donor then sends a second `recipientSyncData` command to the recipient with the `returnAfterReachingDonorTimestamp` set to the `blockTimestamp` and waits for a reply. + +4. **Recipient oplog catchup** + After the cloned data is consistent, the recipient primary enters the oplog catchup phase. Here, the primary fetches and [applies](https://github.com/mongodb/mongo/blob/f05053d2cb65b84eaed4db94c25e9fe4be82d78c/src/mongo/db/repl/shard_merge_recipient_service.cpp#L2230) any donor oplog entries that were written between the backup cursor checkpoint timestamp and the `blockTimestamp`. When all entries have been majority replicated and we have [ensured](https://github.com/mongodb/mongo/blob/f05053d2cb65b84eaed4db94c25e9fe4be82d78c/src/mongo/db/repl/shard_merge_recipient_service.cpp#L599-L602) that the recipient's logical clock has advanced to at least `returnAfterReachingDonorTimestamp`, the recipient replies `OK` to the second `recipientSyncData` command. + +5. **Committing the merge** + After receiving a successful response to the `recipientSyncData` command, the Donor updates its state document to `kCommitted` and sets the `commitOrAbortOpTime`. After the commit, the Donor will respond to `donorStartMigration` with `OK`. At this point, all traffic should be re-routed to the Recipient. Finally, cloud will send `donorForgetMigration` to the Donor (which will in turn send `recipientForgetMigration` to the Recipient) to mark the migration as garbage collectable. + +## Access Blocking +During the critical section of a serverless operation the server will queue user requests for data involved in the operation, waiting to produce a response until after the critical section has completed. This process is called “blocking”, and the server provides this functionality by maintaining a [map of namespace to tenant access blocker](https://github.com/mongodb/mongo/blob/a723af8863c5fae1eee7b0a891066e923468e974/src/mongo/db/repl/tenant_migration_access_blocker_registry.h#L242-L243). This registry is consulted when deciding to block: +* **commands** in the ServiceEntryPoint ([InvokeCommand::run](https://github.com/mongodb/mongo/blob/bc57b7313bce890cf1a7d6cdf20f1ec25949698f/src/mongo/db/service_entry_point_common.cpp#L886-L888), or [CheckoutSessionAndInvokeCommand::run](https://github.com/mongodb/mongo/blob/bc57b7313bce890cf1a7d6cdf20f1ec25949698f/src/mongo/db/service_entry_point_common.cpp#L886-L888)) +* **linearizable reads** in the [RunCommandImpl::_epilogue](https://github.com/mongodb/mongo/blob/a723af8863c5fae1eee7b0a891066e923468e974/src/mongo/db/service_entry_point_common.cpp#L1249) +* **writes** in [OpObserverImpl::onBatchedWriteCommit](https://github.com/mongodb/mongo/blob/a723af8863c5fae1eee7b0a891066e923468e974/src/mongo/db/op_observer/op_observer_impl.cpp#L1882-L1883), [OpObserverImpl::onUnpreparedTransactionCommit](https://github.com/mongodb/mongo/blob/a723af8863c5fae1eee7b0a891066e923468e974/src/mongo/db/op_observer/op_observer_impl.cpp#L1770-L1771), and the [_logOpsInner oplog helper](https://github.com/mongodb/mongo/blob/a723af8863c5fae1eee7b0a891066e923468e974/src/mongo/db/repl/oplog.cpp#L429-L430) +* **index builds** in [ReplIndexBuildState::tryAbort](https://github.com/mongodb/mongo/blob/a723af8863c5fae1eee7b0a891066e923468e974/src/mongo/db/repl_index_build_state.cpp#L495), IndexBuildsCoordinatorMongod::_startIndexBuild ([here](https://github.com/mongodb/mongo/blob/a723af8863c5fae1eee7b0a891066e923468e974/src/mongo/db/index_builds_coordinator_mongod.cpp#L282), [here](https://github.com/mongodb/mongo/blob/a723af8863c5fae1eee7b0a891066e923468e974/src/mongo/db/index_builds_coordinator_mongod.cpp#L356-L357)) + +## Mutual Exclusion +Of the three types of serverless operation (tenant migration, shard merge, and shard split), no new operation may start if there are any active operations of another serverless operation type. The serverless operation lock allows multiple Tenant Migrations to run simultaneously, but it does not allow running operations of a different type at the same time. + +This so-called “serverless operation lock” is acquired the first time a state document is inserted for a particular operation ([shard split](https://github.com/mongodb/mongo/blob/a723af8863c5fae1eee7b0a891066e923468e974/src/mongo/db/serverless/shard_split_donor_op_observer.cpp#L150-L151), [tenant migration donor](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/repl/tenant_migration_donor_op_observer.cpp#L58-L60), [tenant migration recipient](https://github.com/mongodb/mongo/blob/a723af8863c5fae1eee7b0a891066e923468e974/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp#L127-L129), [shard merge recipient](https://github.com/mongodb/mongo/blob/f05053d2cb65b84eaed4db94c25e9fe4be82d78c/src/mongo/db/repl/shard_merge_recipient_op_observer.cpp#L152-L154)). Once the lock is acquired, any attempt to insert a state document of a different operation type will [result in a ConflictingServerlessOperation](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/serverless_operation_lock_registry.cpp#L52-L54). The lock is released when an operation durably records its decision, and marks its state document as garbage collectable ([shard split](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/serverless/shard_split_donor_op_observer.cpp#L261-L263), [tenant migration donor](https://github.com/mongodb/mongo/blob/1c4fafd4ae5c082f36a8af1442aa48174962b1b4/src/mongo/db/repl/tenant_migration_donor_op_observer.cpp#L169-L171), [tenant migration recipient](https://github.com/mongodb/mongo/blob/a723af8863c5fae1eee7b0a891066e923468e974/src/mongo/db/repl/tenant_migration_recipient_op_observer.cpp#L152-L154), [shard merge recipient](https://github.com/mongodb/mongo/blob/f05053d2cb65b84eaed4db94c25e9fe4be82d78c/src/mongo/db/repl/shard_merge_recipient_op_observer.cpp#L280-L282)). Serverless operation locks continue to be held even after a stepdown for the same reason access blockers do, if an election occurs later we ensure the lock is already held to prevent conflicting operations on the newly elected primary. + +## Change Streams +Change Stream data for a Serverless cluster is stored in a handful of tenantId-prefixed collections: + +* change collection: `_config.system.change_collection` +* pre-images: `_config.system.preimages` +* cluster parameters: `_config.system.cluster_parameters` + +A Shard Split operation will copy these collections from donor to recipient via Initial Sync. Upon completion, these collections will be cleaned up on the donor (by the cloud control plane) along with all other tenant-specific databases. + +A Shard Merge operation will copy these collections from donor to recipient via backup cursor. For writes that take place during the oplog catchup phase, some additional handling is required in order to ensure correctness of the data written to the tenant's change collection and pre-image collection. + +We extract the 'o2' entry from a given noop oplog entry written during this phase (which will contain the original entry on the donor timeline) and write it to the tenant's change collection (see [here](https://github.com/10gen/mongo/blob/26a441e07f3885dc8b3d9ef9b564eb4f5143bded/src/mongo/db/change_stream_change_collection_manager.cpp#L133-L135) for implementation details). Change collection entries written on the recipient during oplog catchup must be written on the donor timeline so that a change stream can be resumed on the recipient after the Shard Merge. + +For pre-image support, two oplog entry fields (`donorOpTime` and `donorApplyOpsIndex`, see [here](https://github.com/10gen/mongo/blob/26a441e07f3885dc8b3d9ef9b564eb4f5143bded/src/mongo/db/repl/oplog_entry.idl#L168-L180 +)) were added in order to ensure that pre-image entries written on the recipient will be identical to those on the donor. These fields are conditionally set on oplog entries written during the oplog catchup phase of a Shard Merge and used to determine which timestamp and applyOps index to use when writing pre-images. See [here](https://github.com/10gen/mongo/blob/07b38e091b48acd305469d525b81aebf3aeadbf1/src/mongo/db/repl/oplog.cpp#L1237-L1268) for details. + diff --git a/src/mongo/db/serverless/SConscript b/src/mongo/db/serverless/SConscript index b0751b45f27c1..7131fc1bf7a64 100644 --- a/src/mongo/db/serverless/SConscript +++ b/src/mongo/db/serverless/SConscript @@ -3,6 +3,13 @@ Import("env") env = env.Clone() +env.Library(target='multitenancy_check', source=[ + 'multitenancy_check.cpp', +], LIBDEPS=[ + '$BUILD_DIR/mongo/base', + '$BUILD_DIR/mongo/db/service_context', +]) + env.Library( target='serverless_types_idl', source=[ @@ -40,6 +47,9 @@ env.Library( '$BUILD_DIR/mongo/db/server_base', 'shard_split_donor_service', ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/commands', + ], ) env.Library( @@ -101,6 +111,7 @@ env.Library( env.CppUnitTest( target='db_serverless_test', source=[ + 'multitenancy_check_test.cpp', 'serverless_operation_lock_registry_test.cpp', 'shard_split_donor_op_observer_test.cpp', 'shard_split_donor_service_test.cpp', @@ -115,6 +126,7 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/repl/replmocks', '$BUILD_DIR/mongo/db/repl/tenant_migration_access_blocker', '$BUILD_DIR/mongo/dbtests/mocklib', + 'multitenancy_check', 'serverless_lock', 'shard_split_donor_service', 'shard_split_utils', diff --git a/src/mongo/db/serverless/multitenancy_check.cpp b/src/mongo/db/serverless/multitenancy_check.cpp new file mode 100644 index 0000000000000..752e32014fc49 --- /dev/null +++ b/src/mongo/db/serverless/multitenancy_check.cpp @@ -0,0 +1,64 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/serverless/multitenancy_check.h" + +#include "mongo/db/client.h" +#include "mongo/db/service_context.h" + +namespace mongo { + +const ServiceContext::Decoration> MultitenancyCheck::get = + ServiceContext::declareDecoration>(); + +MultitenancyCheck::MultitenancyCheck(bool multitenancySupport) + : _multitenancySupport(multitenancySupport) {} + +void MultitenancyCheck::checkDollarTenantField(const BSONObj& body) const { + uassert(ErrorCodes::InvalidOptions, + "Multitenancy not enabled, cannot set $tenant in command body", + _multitenancySupport || !body["$tenant"_sd]); +} + +const MultitenancyCheck* MultitenancyCheck::getPtr() { + if (!hasGlobalServiceContext()) { + // globalServiceContext is not always set for unit tests + return nullptr; + } + + return MultitenancyCheck::get(getGlobalServiceContext()).get(); +} + +void setUpMultitenancyCheck(ServiceContext* serviceContext, bool multitenancySupport) { + auto& multitenancyCheck = MultitenancyCheck::get(serviceContext); + + multitenancyCheck = std::make_unique(multitenancySupport); +} + +} // namespace mongo diff --git a/src/mongo/db/serverless/multitenancy_check.h b/src/mongo/db/serverless/multitenancy_check.h new file mode 100644 index 0000000000000..9a7b943e6dc17 --- /dev/null +++ b/src/mongo/db/serverless/multitenancy_check.h @@ -0,0 +1,56 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include "mongo/db/service_context.h" + +namespace mongo { + +/* + * ServiceContext decoration only set for mongod and mongos used to run multitenancy validations. + */ +class MultitenancyCheck { +public: + MultitenancyCheck(bool multitenancySupport); + + /* Validates BSONObj msg only contain $tenant field when multitenancySupport is enabled. */ + void checkDollarTenantField(const BSONObj& msg) const; + + static const ServiceContext::Decoration> get; + + static const MultitenancyCheck* getPtr(); + +private: + bool _multitenancySupport; +}; + +void setUpMultitenancyCheck(ServiceContext* serviceContext, bool multitenancySupport); + +} // namespace mongo diff --git a/src/mongo/db/serverless/multitenancy_check_test.cpp b/src/mongo/db/serverless/multitenancy_check_test.cpp new file mode 100644 index 0000000000000..25096fdef5307 --- /dev/null +++ b/src/mongo/db/serverless/multitenancy_check_test.cpp @@ -0,0 +1,79 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/serverless/multitenancy_check.h" + +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/unittest/unittest.h" + +namespace mongo { + +TEST(MultitenancyCheck, NoGlobalService) { + ASSERT_FALSE(MultitenancyCheck::getPtr()); +} + +class MultitenancyCheckTest : public ServiceContextTest {}; + +TEST_F(MultitenancyCheckTest, NoMultitenancyCheck) { + ASSERT_FALSE(MultitenancyCheck::getPtr()); +} + +TEST_F(MultitenancyCheckTest, CheckDollarTenantNoMultitenancy) { + setUpMultitenancyCheck(getGlobalServiceContext(), false); + + auto check = MultitenancyCheck::getPtr(); + ASSERT_TRUE(check); + + check->checkDollarTenantField(BSON("test" + << "ok")); + + ASSERT_THROWS_CODE(check->checkDollarTenantField(BSON("test" + << "fail" + << "$tenant" + << "x")), + DBException, + ErrorCodes::InvalidOptions); +} + +TEST_F(MultitenancyCheckTest, CheckDollarTenantMultitenancy) { + setUpMultitenancyCheck(getGlobalServiceContext(), true); + + auto check = MultitenancyCheck::getPtr(); + ASSERT_TRUE(check); + + check->checkDollarTenantField(BSON("test" + << "ok")); + + check->checkDollarTenantField(BSON("test" + << "fail" + << "$tenant" + << "x")); +} + +} // namespace mongo diff --git a/src/mongo/db/serverless/serverless_operation_lock_registry.cpp b/src/mongo/db/serverless/serverless_operation_lock_registry.cpp index 050e160ac8e46..67d4ade0fb057 100644 --- a/src/mongo/db/serverless/serverless_operation_lock_registry.cpp +++ b/src/mongo/db/serverless/serverless_operation_lock_registry.cpp @@ -28,10 +28,26 @@ */ #include "mongo/db/serverless/serverless_operation_lock_registry.h" + +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/persistent_task_store.h" #include "mongo/db/repl/tenant_migration_state_machine_gen.h" #include "mongo/db/serverless/shard_split_state_machine_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration diff --git a/src/mongo/db/serverless/serverless_operation_lock_registry.h b/src/mongo/db/serverless/serverless_operation_lock_registry.h index 578214d756496..74382c0babb19 100644 --- a/src/mongo/db/serverless/serverless_operation_lock_registry.h +++ b/src/mongo/db/serverless/serverless_operation_lock_registry.h @@ -29,13 +29,16 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" #include "mongo/util/uuid.h" -#include - namespace mongo { /** diff --git a/src/mongo/db/serverless/serverless_operation_lock_registry_test.cpp b/src/mongo/db/serverless/serverless_operation_lock_registry_test.cpp index 9d95b3b7bc7de..5af28b0eb1391 100644 --- a/src/mongo/db/serverless/serverless_operation_lock_registry_test.cpp +++ b/src/mongo/db/serverless/serverless_operation_lock_registry_test.cpp @@ -28,9 +28,19 @@ */ #include "mongo/db/serverless/serverless_operation_lock_registry.h" + +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/serverless/serverless_server_status.cpp b/src/mongo/db/serverless/serverless_server_status.cpp index 8d0d4658dc365..92e1f337f914f 100644 --- a/src/mongo/db/serverless/serverless_server_status.cpp +++ b/src/mongo/db/serverless/serverless_server_status.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" #include "mongo/db/serverless/serverless_operation_lock_registry.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/db/serverless/shard_split_commands.cpp b/src/mongo/db/serverless/shard_split_commands.cpp index 936f51405d12c..e82732593a61e 100644 --- a/src/mongo/db/serverless/shard_split_commands.cpp +++ b/src/mongo/db/serverless/shard_split_commands.cpp @@ -27,14 +27,43 @@ * it in the license file. */ -#include "mongo/client/replica_set_monitor_server_parameters.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/global_settings.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/server_options.h" #include "mongo/db/serverless/shard_split_commands_gen.h" #include "mongo/db/serverless/shard_split_donor_service.h" -#include "mongo/util/fail_point.h" +#include "mongo/db/serverless/shard_split_state_machine_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -96,8 +125,9 @@ class CommitShardSplitCmd : public TypedCommand { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::runTenantMigration)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::runTenantMigration)); } bool supportsWriteConcern() const override { @@ -174,8 +204,9 @@ class AbortShardSplitCmd : public TypedCommand { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::runTenantMigration)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::runTenantMigration)); } bool supportsWriteConcern() const override { @@ -251,8 +282,9 @@ class ForgetShardSplitCmd : public TypedCommand { uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::runTenantMigration)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::runTenantMigration)); } bool supportsWriteConcern() const override { diff --git a/src/mongo/db/serverless/shard_split_donor_op_observer.cpp b/src/mongo/db/serverless/shard_split_donor_op_observer.cpp index 5652265bedcf5..998ee9209a8aa 100644 --- a/src/mongo/db/serverless/shard_split_donor_op_observer.cpp +++ b/src/mongo/db/serverless/shard_split_donor_op_observer.cpp @@ -27,14 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog_raii.h" +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" +#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" +#include "mongo/db/repl/tenant_migration_donor_access_blocker.h" #include "mongo/db/serverless/serverless_operation_lock_registry.h" #include "mongo/db/serverless/shard_split_donor_op_observer.h" #include "mongo/db/serverless/shard_split_state_machine_gen.h" #include "mongo/db/serverless/shard_split_utils.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -102,6 +120,8 @@ ShardSplitDonorDocument parseAndValidateDonorDocument(const BSONObj& doc) { doc.toString()), !donorStateDoc.getAbortReason()); break; + case ShardSplitDonorStateEnum::kRecipientCaughtUp: + break; case ShardSplitDonorStateEnum::kCommitted: uassert(ErrorCodes::BadValue, fmt::format(errmsg, @@ -156,9 +176,12 @@ void onTransitionToAbortingIndexBuilds(OperationContext* opCtx, }); auto tenantIds = *donorStateDoc.getTenantIds(); - auto mtab = std::make_shared(opCtx->getServiceContext(), - donorStateDoc.getId()); - TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()).add(tenantIds, mtab); + auto& registry = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()); + for (const auto& tenantId : tenantIds) { + registry.add(tenantId, + std::make_shared(opCtx->getServiceContext(), + donorStateDoc.getId())); + } opCtx->recoveryUnit()->onRollback([migrationId = donorStateDoc.getId()](OperationContext* opCtx) { @@ -175,21 +198,25 @@ void onTransitionToBlocking(OperationContext* opCtx, const ShardSplitDonorDocume invariant(donorStateDoc.getState() == ShardSplitDonorStateEnum::kBlocking); invariant(donorStateDoc.getBlockOpTime()); - auto mtab = tenant_migration_access_blocker::getDonorAccessBlockerForMigration( - opCtx->getServiceContext(), donorStateDoc.getId()); - invariant(mtab); + auto mtabVector = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .getDonorAccessBlockersForMigration(donorStateDoc.getId()); + invariant(!mtabVector.empty()); - if (isSecondary(opCtx)) { - // A primary calls startBlockingWrites on the TenantMigrationDonorAccessBlocker before - // reserving the OpTime for the "start blocking" write, so only secondaries call - // startBlockingWrites on the TenantMigrationDonorAccessBlocker in the op observer. - mtab->startBlockingWrites(); - } + for (auto& mtab : mtabVector) { + invariant(mtab); - // Both primaries and secondaries call startBlockingReadsAfter in the op observer, since - // startBlockingReadsAfter just needs to be called before the "start blocking" write's oplog - // hole is filled. - mtab->startBlockingReadsAfter(donorStateDoc.getBlockOpTime()->getTimestamp()); + if (isSecondary(opCtx)) { + // A primary calls startBlockingWrites on the TenantMigrationDonorAccessBlocker before + // reserving the OpTime for the "start blocking" write, so only secondaries call + // startBlockingWrites on the TenantMigrationDonorAccessBlocker in the op observer. + mtab->startBlockingWrites(); + } + + // Both primaries and secondaries call startBlockingReadsAfter in the op observer, since + // startBlockingReadsAfter just needs to be called before the "start blocking" write's oplog + // hole is filled. + mtab->startBlockingReadsAfter(donorStateDoc.getBlockOpTime()->getTimestamp()); + } } /** @@ -200,11 +227,15 @@ void onTransitionToCommitted(OperationContext* opCtx, invariant(donorStateDoc.getState() == ShardSplitDonorStateEnum::kCommitted); invariant(donorStateDoc.getCommitOrAbortOpTime()); - auto mtab = tenant_migration_access_blocker::getDonorAccessBlockerForMigration( - opCtx->getServiceContext(), donorStateDoc.getId()); - invariant(mtab); + auto mtabVector = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .getDonorAccessBlockersForMigration(donorStateDoc.getId()); + invariant(!mtabVector.empty()); + + for (auto& mtab : mtabVector) { + invariant(mtab); - mtab->setCommitOpTime(opCtx, donorStateDoc.getCommitOrAbortOpTime().value()); + mtab->setCommitOpTime(opCtx, donorStateDoc.getCommitOrAbortOpTime().value()); + } } /** @@ -214,17 +245,21 @@ void onTransitionToAborted(OperationContext* opCtx, const ShardSplitDonorDocumen invariant(donorStateDoc.getState() == ShardSplitDonorStateEnum::kAborted); invariant(donorStateDoc.getCommitOrAbortOpTime()); - auto mtab = tenant_migration_access_blocker::getDonorAccessBlockerForMigration( - opCtx->getServiceContext(), donorStateDoc.getId()); - if (!mtab) { - // The only case where there can be no tenants is when the instance is created by the - // abort command. In that case, no tenant migration blockers are created and the state - // will go straight to abort. - invariant(donorStateDoc.getState() == ShardSplitDonorStateEnum::kUninitialized); - return; - } + auto mtabVector = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .getDonorAccessBlockersForMigration(donorStateDoc.getId()); + invariant(!mtabVector.empty()); + + for (auto& mtab : mtabVector) { + if (!mtab) { + // The only case where there can be no tenants is when the instance is created by the + // abort command. In that case, no tenant migration blockers are created and the state + // will go straight to abort. + invariant(donorStateDoc.getState() == ShardSplitDonorStateEnum::kUninitialized); + continue; + } - mtab->setAbortOpTime(opCtx, donorStateDoc.getCommitOrAbortOpTime().value()); + mtab->setAbortOpTime(opCtx, donorStateDoc.getCommitOrAbortOpTime().value()); + } } /** @@ -241,14 +276,15 @@ class TenantMigrationDonorCommitOrAbortHandler final : public RecoveryUnit::Chan ServerlessOperationLockRegistry::get(opCtx->getServiceContext()) .releaseLock(ServerlessOperationLockRegistry::LockType::kShardSplit, _donorStateDoc.getId()); - auto mtab = tenant_migration_access_blocker::getDonorAccessBlockerForMigration( - opCtx->getServiceContext(), _donorStateDoc.getId()); - if (!mtab) { + auto mtabVector = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .getDonorAccessBlockersForMigration(_donorStateDoc.getId()); + if (mtabVector.empty()) { // The state doc and TenantMigrationDonorAccessBlocker for this // migration were removed immediately after expireAt was set. This is // unlikely to occur in production where the garbage collection delay - // should be sufficiently large. + // should be sufficiently large. All acces blockers for the split should be in + // the same state. return; } @@ -265,11 +301,17 @@ class TenantMigrationDonorCommitOrAbortHandler final : public RecoveryUnit::Chan // opTime has been majority committed (guaranteed to be true since by // design the donor never marks its state doc as garbage collectable // before the migration decision is majority committed). - mtab->onMajorityCommitPointUpdate(_donorStateDoc.getCommitOrAbortOpTime().value()); + std::for_each(mtabVector.begin(), + mtabVector.end(), + [opTime = _donorStateDoc.getCommitOrAbortOpTime().value()]( + auto& mtab) { mtab->onMajorityCommitPointUpdate(opTime); }); } if (_donorStateDoc.getState() == ShardSplitDonorStateEnum::kAborted) { - invariant(mtab->inStateAborted()); + std::for_each(mtabVector.begin(), mtabVector.end(), [](auto& mtab) { + invariant(mtab->inStateAborted()); + }); + // The migration durably aborted and is now marked as garbage // collectable, remove its TenantMigrationDonorAccessBlocker right away // to allow back-to-back migration retries. @@ -306,7 +348,8 @@ void ShardSplitDonorOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { if (coll->ns() != NamespaceString::kShardSplitDonorsNamespace || tenant_migration_access_blocker::inRecoveryMode(opCtx)) { return; @@ -330,7 +373,8 @@ void ShardSplitDonorOpObserver::onInserts(OperationContext* opCtx, } void ShardSplitDonorOpObserver::onUpdate(OperationContext* opCtx, - const OplogUpdateEntryArgs& args) { + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (args.coll->ns() != NamespaceString::kShardSplitDonorsNamespace || tenant_migration_access_blocker::inRecoveryMode(opCtx)) { return; @@ -338,6 +382,8 @@ void ShardSplitDonorOpObserver::onUpdate(OperationContext* opCtx, auto donorStateDoc = parseAndValidateDonorDocument(args.updateArgs->updatedDoc); switch (donorStateDoc.getState()) { + case ShardSplitDonorStateEnum::kRecipientCaughtUp: + break; case ShardSplitDonorStateEnum::kBlocking: onTransitionToBlocking(opCtx, donorStateDoc); break; @@ -355,7 +401,9 @@ void ShardSplitDonorOpObserver::onUpdate(OperationContext* opCtx, void ShardSplitDonorOpObserver::aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - BSONObj const& doc) { + BSONObj const& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { if (coll->ns() != NamespaceString::kShardSplitDonorsNamespace || tenant_migration_access_blocker::inRecoveryMode(opCtx)) { return; @@ -388,7 +436,8 @@ void ShardSplitDonorOpObserver::aboutToDelete(OperationContext* opCtx, void ShardSplitDonorOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { if (coll->ns() != NamespaceString::kShardSplitDonorsNamespace || !splitCleanupDetails(opCtx) || tenant_migration_access_blocker::inRecoveryMode(opCtx)) { return; @@ -416,7 +465,8 @@ repl::OpTime ShardSplitDonorOpObserver::onDropCollection(OperationContext* opCtx const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) { + const CollectionDropType dropType, + bool markFromMigrate) { if (collectionName == NamespaceString::kShardSplitDonorsNamespace) { opCtx->recoveryUnit()->onCommit([](OperationContext* opCtx, boost::optional) { TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) diff --git a/src/mongo/db/serverless/shard_split_donor_op_observer.h b/src/mongo/db/serverless/shard_split_donor_op_observer.h index c8e65f7b294ff..90085cb286f44 100644 --- a/src/mongo/db/serverless/shard_split_donor_op_observer.h +++ b/src/mongo/db/serverless/shard_split_donor_op_observer.h @@ -29,14 +29,27 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/util/uuid.h" namespace mongo { /** - * OpObserver for tenant migration donor. + * OpObserver for tenant migration donor access blocker. */ -class ShardSplitDonorOpObserver final : public OpObserver { +class ShardSplitDonorOpObserver final : public OpObserverNoop { ShardSplitDonorOpObserver(const ShardSplitDonorOpObserver&) = delete; ShardSplitDonorOpObserver& operator=(const ShardSplitDonorOpObserver&) = delete; @@ -44,209 +57,43 @@ class ShardSplitDonorOpObserver final : public OpObserver { ShardSplitDonorOpObserver() = default; ~ShardSplitDonorOpObserver() = default; - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) final {} - - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - void onCreateIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc, - bool fromMigrate) final {} - - void onStartIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onStartIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) final {} + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kConfig, NamespaceFilter::kConfig}; + } void onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) final; - - void onInsertGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final{}; + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; - void onDeleteGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final {} - - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) final; + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) final; + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) final; - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final {} + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; - void onCreateCollection(OperationContext* opCtx, - const CollectionPtr& coll, - const NamespaceString& collectionName, - const CollectionOptions& options, - const BSONObj& idIndex, - const OplogSlot& createOpTime, - bool fromMigrate) final {} - - void onCollMod(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& collModCmd, - const CollectionOptions& oldCollOptions, - boost::optional indexInfo) final {} - - void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final {} - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) final; - - void onDropIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const std::string& indexName, - const BSONObj& indexInfo) final {} - - using OpObserver::onRenameCollection; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final {} - - void onImportCollection(OperationContext* opCtx, - const UUID& importUUID, - const NamespaceString& nss, - long long numRecords, - long long dataSize, - const BSONObj& catalogEntry, - const BSONObj& storageMetadata, - bool isDryRun) final {} - - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final { - return repl::OpTime(); - } - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) final {} - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) final {} - - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) final {} - - void onTransactionStart(OperationContext* opCtx) final {} - - void onUnpreparedTransactionCommit(OperationContext* opCtx, - const TransactionOperations& transactionOperations) final {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept final {} - - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) final { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) final {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) final {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) final {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} + CollectionDropType dropType, + bool markFromMigrate) final; void onMajorityCommitPointUpdate(ServiceContext* service, const repl::OpTime& newCommitPoint) final; - -private: - void _onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final { - } }; } // namespace mongo diff --git a/src/mongo/db/serverless/shard_split_donor_op_observer_test.cpp b/src/mongo/db/serverless/shard_split_donor_op_observer_test.cpp index ebe45c4fc1ae1..5c7ee459a7c5f 100644 --- a/src/mongo/db/serverless/shard_split_donor_op_observer_test.cpp +++ b/src/mongo/db/serverless/shard_split_donor_op_observer_test.cpp @@ -27,25 +27,55 @@ * it in the license file. */ +#include +#include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/mongo_uri.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/commands/create_gen.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" -#include "mongo/db/repl/tenant_migration_access_blocker_util.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" +#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" +#include "mongo/db/repl/tenant_migration_donor_access_blocker.h" #include "mongo/db/serverless/serverless_operation_lock_registry.h" #include "mongo/db/serverless/shard_split_donor_op_observer.h" #include "mongo/db/serverless/shard_split_state_machine_gen.h" #include "mongo/db/serverless/shard_split_test_utils.h" #include "mongo/db/serverless/shard_split_utils.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" #include "mongo/dbtests/mock/mock_replica_set.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { - class ShardSplitDonorOpObserverTest : public ServiceContextMongoDTest { public: void setUp() override { @@ -57,8 +87,8 @@ class ShardSplitDonorOpObserverTest : public ServiceContextMongoDTest { repl::StorageInterface::set(service, std::make_unique()); // Set up ReplicationCoordinator and create oplog. - auto coordinatorMock = - std::make_unique(service, createReplSettings()); + auto coordinatorMock = std::make_unique( + service, repl::createServerlessReplSettings()); _replicationCoordinatorMock = coordinatorMock.get(); repl::ReplicationCoordinator::set(service, std::move(coordinatorMock)); @@ -114,7 +144,7 @@ class ShardSplitDonorOpObserverTest : public ServiceContextMongoDTest { // If there's an exception, aborting without removing the access blocker will trigger an // invariant. This creates a confusing error log in the test output. - test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, _opCtx.get()); + test::shard_split::ScopedTenantAccessBlocker scopedTenants(_uuid, _opCtx.get()); const auto criteria = BSON("_id" << stateDocument.getId()); auto preImageDoc = defaultStateDocument(); @@ -139,20 +169,27 @@ class ShardSplitDonorOpObserverTest : public ServiceContextMongoDTest { scopedTenants.dismiss(); } - std::shared_ptr createAccessBlockerAndStartBlockingWrites( - const UUID& migrationId, - const std::vector& tenants, - OperationContext* opCtx, - bool isSecondary = false) { - auto mtab = std::make_shared(_opCtx->getServiceContext(), - migrationId); + std::vector> + createAccessBlockerAndStartBlockingWrites(const UUID& migrationId, + const std::vector& tenants, + OperationContext* opCtx, + bool isSecondary = false) { + + std::vector> result; + for (const auto& tenantId : tenants) { + auto mtab = std::make_shared( + _opCtx->getServiceContext(), migrationId); + + if (!isSecondary) { + mtab->startBlockingWrites(); + } - if (!isSecondary) { - mtab->startBlockingWrites(); + TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .add(tenantId, mtab); + result.push_back(mtab); } - TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()).add(tenants, mtab); - return mtab; + return result; } ShardSplitDonorDocument defaultStateDocument() const { @@ -191,18 +228,9 @@ class ShardSplitDonorOpObserverTest : public ServiceContextMongoDTest { mtabVerifier(mtab); } - for (const auto& tenantId : tenants) { - TenantMigrationAccessBlockerRegistry::get(_opCtx->getServiceContext()) - .remove(tenantId, TenantMigrationAccessBlocker::BlockerType::kDonor); - } - } - // Creates a reasonable set of ReplSettings for most tests. We need to be able to - // override this to create a larger oplog. - virtual repl::ReplSettings createReplSettings() { - repl::ReplSettings settings; - settings.setOplogSizeBytes(5 * 1024 * 1024); - settings.setReplSetString("mySet/node1:12345"); - return settings; + TenantMigrationAccessBlockerRegistry::get(_opCtx->getServiceContext()) + .removeAccessBlockersForMigration(_uuid, + TenantMigrationAccessBlocker::BlockerType::kDonor); } }; @@ -386,8 +414,10 @@ TEST_F(ShardSplitDonorOpObserverTest, TransitionToCommit) { stateDocument.setBlockOpTime(repl::OpTime(Timestamp(1, 2), 1)); stateDocument.setCommitOrAbortOpTime(commitOpTime); - auto mtab = createAccessBlockerAndStartBlockingWrites(_uuid, _tenantIds, _opCtx.get()); - mtab->startBlockingReadsAfter(Timestamp(1)); + auto mtabVector = createAccessBlockerAndStartBlockingWrites(_uuid, _tenantIds, _opCtx.get()); + for (auto& mtab : mtabVector) { + mtab->startBlockingReadsAfter(Timestamp(1)); + } auto mtabVerifier = [opCtx = _opCtx.get()](std::shared_ptr mtab) { ASSERT_TRUE(mtab); @@ -418,8 +448,10 @@ TEST_F(ShardSplitDonorOpObserverTest, TransitionToAbort) { stateDocument.setCommitOrAbortOpTime(abortOpTime); stateDocument.setAbortReason(bob.obj()); - auto mtab = createAccessBlockerAndStartBlockingWrites(_uuid, _tenantIds, _opCtx.get()); - mtab->startBlockingReadsAfter(Timestamp(1)); + auto mtabVector = createAccessBlockerAndStartBlockingWrites(_uuid, _tenantIds, _opCtx.get()); + for (auto& mtab : mtabVector) { + mtab->startBlockingReadsAfter(Timestamp(1)); + } auto mtabVerifier = [opCtx = _opCtx.get()](std::shared_ptr mtab) { ASSERT_TRUE(mtab); @@ -450,9 +482,11 @@ TEST_F(ShardSplitDonorOpObserverTest, SetExpireAtForAbortedRemoveBlockers) { stateDocument.setAbortReason(bob.obj()); stateDocument.setExpireAt(mongo::Date_t::fromMillisSinceEpoch(1000)); - auto mtab = createAccessBlockerAndStartBlockingWrites(_uuid, _tenantIds, _opCtx.get()); - mtab->startBlockingReadsAfter(Timestamp(1)); - mtab->setAbortOpTime(_opCtx.get(), *stateDocument.getCommitOrAbortOpTime()); + auto mtabVector = createAccessBlockerAndStartBlockingWrites(_uuid, _tenantIds, _opCtx.get()); + for (auto& mtab : mtabVector) { + mtab->startBlockingReadsAfter(Timestamp(1)); + mtab->setAbortOpTime(_opCtx.get(), *stateDocument.getCommitOrAbortOpTime()); + } auto mtabVerifier = [opCtx = _opCtx.get()](std::shared_ptr mtab) { ASSERT_FALSE(mtab); @@ -483,17 +517,19 @@ TEST_F(ShardSplitDonorOpObserverTest, DeleteAbortedDocumentDoesNotRemoveBlockers stateDocument.setAbortReason(bob.obj()); stateDocument.setExpireAt(mongo::Date_t::fromMillisSinceEpoch(1000)); - auto mtab = createAccessBlockerAndStartBlockingWrites(_uuid, _tenantIds, _opCtx.get()); - mtab->startBlockingReadsAfter(Timestamp(1)); - mtab->setAbortOpTime(_opCtx.get(), *stateDocument.getCommitOrAbortOpTime()); + auto mtabVector = createAccessBlockerAndStartBlockingWrites(_uuid, _tenantIds, _opCtx.get()); + for (auto& mtab : mtabVector) { + mtab->startBlockingReadsAfter(Timestamp(1)); + mtab->setAbortOpTime(_opCtx.get(), *stateDocument.getCommitOrAbortOpTime()); + } auto bsonDoc = stateDocument.toBSON(); WriteUnitOfWork wuow(_opCtx.get()); AutoGetCollection autoColl(_opCtx.get(), NamespaceString::kShardSplitDonorsNamespace, MODE_IX); - _observer->aboutToDelete(_opCtx.get(), *autoColl, bsonDoc); - OplogDeleteEntryArgs deleteArgs; + _observer->aboutToDelete(_opCtx.get(), *autoColl, bsonDoc, &deleteArgs); + deleteArgs.deletedDoc = &bsonDoc; _observer->onDelete(_opCtx.get(), *autoColl, 0 /* stmtId */, deleteArgs); @@ -518,9 +554,11 @@ TEST_F(ShardSplitDonorOpObserverTest, DeleteCommittedDocumentRemovesBlockers) { stateDocument.setCommitOrAbortOpTime(commitOpTime); stateDocument.setExpireAt(mongo::Date_t::fromMillisSinceEpoch(1000)); - auto mtab = createAccessBlockerAndStartBlockingWrites(_uuid, _tenantIds, _opCtx.get()); - mtab->startBlockingReadsAfter(Timestamp(1)); - mtab->setCommitOpTime(_opCtx.get(), *stateDocument.getCommitOrAbortOpTime()); + auto mtabVector = createAccessBlockerAndStartBlockingWrites(_uuid, _tenantIds, _opCtx.get()); + for (auto& mtab : mtabVector) { + mtab->startBlockingReadsAfter(Timestamp(1)); + mtab->setCommitOpTime(_opCtx.get(), *stateDocument.getCommitOrAbortOpTime()); + } ServerlessOperationLockRegistry::get(_opCtx->getServiceContext()) .acquireLock(ServerlessOperationLockRegistry::LockType::kShardSplit, stateDocument.getId()); @@ -529,9 +567,9 @@ TEST_F(ShardSplitDonorOpObserverTest, DeleteCommittedDocumentRemovesBlockers) { WriteUnitOfWork wuow(_opCtx.get()); AutoGetCollection autoColl(_opCtx.get(), NamespaceString::kShardSplitDonorsNamespace, MODE_IX); - _observer->aboutToDelete(_opCtx.get(), *autoColl, bsonDoc); - OplogDeleteEntryArgs deleteArgs; + _observer->aboutToDelete(_opCtx.get(), *autoColl, bsonDoc, &deleteArgs); + deleteArgs.deletedDoc = &bsonDoc; _observer->onDelete(_opCtx.get(), *autoColl, 0 /* stmtId */, deleteArgs); diff --git a/src/mongo/db/serverless/shard_split_donor_service.cpp b/src/mongo/db/serverless/shard_split_donor_service.cpp index 02db5006360b8..7cbcb27bde584 100644 --- a/src/mongo/db/serverless/shard_split_donor_service.cpp +++ b/src/mongo/db/serverless/shard_split_donor_service.cpp @@ -30,29 +30,80 @@ #include "mongo/db/serverless/shard_split_donor_service.h" -#include "mongo/client/streamable_replica_set_monitor.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/mongo_uri.h" +#include "mongo/client/replica_set_monitor_stats.h" +#include "mongo/client/sdam/sdam_configuration.h" +#include "mongo/client/sdam/topology_listener.h" +#include "mongo/client/server_discovery_monitor.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/catalog/local_oplog_info.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/ops/update_result.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/tenant_migration_access_blocker_util.h" +#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" +#include "mongo/db/repl/tenant_migration_donor_access_blocker.h" #include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/s/resharding/resharding_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/serverless/serverless_types_gen.h" #include "mongo/db/serverless/shard_split_statistics.h" #include "mongo/db/serverless/shard_split_utils.h" -#include "mongo/executor/cancelable_executor.h" -#include "mongo/executor/connection_pool.h" -#include "mongo/executor/network_interface_factory.h" -#include "mongo/executor/network_interface_thread_pool.h" -#include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/future_util.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #include "mongo/util/time_support.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -64,6 +115,7 @@ namespace { MONGO_FAIL_POINT_DEFINE(abortShardSplitBeforeLeavingBlockingState); MONGO_FAIL_POINT_DEFINE(pauseShardSplitBeforeBlockingState); MONGO_FAIL_POINT_DEFINE(pauseShardSplitAfterBlocking); +MONGO_FAIL_POINT_DEFINE(pauseShardSplitAfterRecipientCaughtUp); MONGO_FAIL_POINT_DEFINE(pauseShardSplitAfterDecision); MONGO_FAIL_POINT_DEFINE(skipShardSplitGarbageCollectionTimeout); MONGO_FAIL_POINT_DEFINE(skipShardSplitWaitForSplitAcceptance); @@ -344,6 +396,8 @@ SemiFuture ShardSplitDonorService::DonorStateMachine::run( return _waitForRecipientToReachBlockOpTime(executor, abortToken); }) .then([this, executor, abortToken, criticalSectionWithoutCatchupTimer] { + auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); + pauseShardSplitAfterRecipientCaughtUp.pauseWhileSet(opCtx.get()); criticalSectionWithoutCatchupTimer->reset(); return _applySplitConfigToDonor(executor, abortToken); }) @@ -532,7 +586,7 @@ ConnectionString ShardSplitDonorService::DonorStateMachine::_setupAcceptanceMoni // Always start the replica set monitor if we haven't reached a decision yet _splitAcceptancePromise.setWith([&]() { - if (_stateDoc.getState() > ShardSplitDonorStateEnum::kBlocking || + if (_stateDoc.getState() > ShardSplitDonorStateEnum::kRecipientCaughtUp || MONGO_unlikely(skipShardSplitWaitForSplitAcceptance.shouldFail())) { return Future::makeReady(StatusWith(HostAndPort{})); } @@ -660,7 +714,7 @@ ExecutorFuture ShardSplitDonorService::DonorStateMachine::_waitForRecipien checkForTokenInterrupt(abortToken); stdx::lock_guard lg(_mutex); - if (_stateDoc.getState() > ShardSplitDonorStateEnum::kBlocking || + if (_stateDoc.getState() >= ShardSplitDonorStateEnum::kRecipientCaughtUp || _hasInstalledSplitConfig(lg)) { return ExecutorFuture(**executor); } @@ -685,11 +739,27 @@ ExecutorFuture ShardSplitDonorService::DonorStateMachine::_waitForRecipien LOGV2( 6177201, "Waiting for recipient nodes to reach block timestamp.", "id"_attr = _migrationId); - return ExecutorFuture(**executor).then([this, blockOpTime, writeConcern]() { - auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); - auto replCoord = repl::ReplicationCoordinator::get(cc().getServiceContext()); - uassertStatusOK(replCoord->awaitReplication(opCtx.get(), blockOpTime, writeConcern).status); - }); + return ExecutorFuture(**executor) + .then([this, blockOpTime, writeConcern]() { + auto opCtx = _cancelableOpCtxFactory->makeOperationContext(&cc()); + auto replCoord = repl::ReplicationCoordinator::get(cc().getServiceContext()); + uassertStatusOK( + replCoord->awaitReplication(opCtx.get(), blockOpTime, writeConcern).status); + }) + .then([this, executor, abortToken]() { + { + stdx::lock_guard lg(_mutex); + LOGV2(8423389, + "Entering 'recipient caught up' state.", + "id"_attr = _stateDoc.getId()); + } + + return _updateStateDocument( + executor, abortToken, ShardSplitDonorStateEnum::kRecipientCaughtUp) + .then([this, self = shared_from_this(), executor, abortToken](repl::OpTime opTime) { + return _waitForMajorityWriteConcern(executor, std::move(opTime), abortToken); + }); + }); } ExecutorFuture ShardSplitDonorService::DonorStateMachine::_applySplitConfigToDonor( @@ -790,7 +860,7 @@ ShardSplitDonorService::DonorStateMachine::_waitForSplitAcceptanceAndEnterCommit checkForTokenInterrupt(abortToken); { stdx::lock_guard lg(_mutex); - if (_stateDoc.getState() > ShardSplitDonorStateEnum::kBlocking) { + if (_stateDoc.getState() > ShardSplitDonorStateEnum::kRecipientCaughtUp) { return ExecutorFuture(**executor); } } @@ -881,114 +951,124 @@ ExecutorFuture ShardSplitDonorService::DonorStateMachine::_updateS auto opCtxHolder = _cancelableOpCtxFactory->makeOperationContext(&cc()); auto opCtx = opCtxHolder.get(); - AutoGetCollection collection(opCtx, _stateDocumentsNS, MODE_IX); + auto collection = + acquireCollection(opCtx, + CollectionAcquisitionRequest( + _stateDocumentsNS, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); if (!isInsert) { uassert(ErrorCodes::NamespaceNotFound, - str::stream() << _stateDocumentsNS.ns() << " does not exist", - collection); + str::stream() + << _stateDocumentsNS.toStringForErrorMsg() << " does not exist", + collection.exists()); } - writeConflictRetry( - opCtx, "ShardSplitDonorUpdateStateDoc", _stateDocumentsNS.ns(), [&]() { - WriteUnitOfWork wuow(opCtx); - - if (nextState == ShardSplitDonorStateEnum::kBlocking) { - // Start blocking writes before getting an oplog slot to guarantee no - // writes to the tenant's data can commit with a timestamp after the - // block timestamp. - auto mtab = - tenant_migration_access_blocker::getDonorAccessBlockerForMigration( - _serviceContext, uuid); + writeConflictRetry(opCtx, "ShardSplitDonorUpdateStateDoc", _stateDocumentsNS, [&]() { + WriteUnitOfWork wuow(opCtx); + + if (nextState == ShardSplitDonorStateEnum::kBlocking) { + // Start blocking writes before getting an oplog slot to guarantee no + // writes to the tenant's data can commit with a timestamp after the + // block timestamp. + auto mtabVector = + TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) + .getDonorAccessBlockersForMigration(uuid); + invariant(!mtabVector.empty()); + + for (auto& mtab : mtabVector) { invariant(mtab); mtab->startBlockingWrites(); opCtx->recoveryUnit()->onRollback( [mtab](OperationContext*) { mtab->rollBackStartBlocking(); }); } - - // Reserve an opTime for the write. - auto oplogSlot = LocalOplogInfo::get(opCtx)->getNextOpTimes(opCtx, 1U)[0]; - auto updatedStateDocBson = [&]() { - stdx::lock_guard lg(_mutex); - _stateDoc.setState(nextState); - switch (nextState) { - case ShardSplitDonorStateEnum::kUninitialized: - case ShardSplitDonorStateEnum::kAbortingIndexBuilds: - break; - case ShardSplitDonorStateEnum::kBlocking: - _stateDoc.setBlockOpTime(oplogSlot); - break; - case ShardSplitDonorStateEnum::kCommitted: - _stateDoc.setCommitOrAbortOpTime(oplogSlot); - break; - case ShardSplitDonorStateEnum::kAborted: { - _stateDoc.setCommitOrAbortOpTime(oplogSlot); - - invariant(_abortReason); - BSONObjBuilder bob; - _abortReason.value().serializeErrorToBSON(&bob); - _stateDoc.setAbortReason(bob.obj()); - break; - } - default: - MONGO_UNREACHABLE; - } - if (isInsert) { - return BSON("$setOnInsert" << _stateDoc.toBSON()); + } + + // Reserve an opTime for the write. + auto oplogSlot = LocalOplogInfo::get(opCtx)->getNextOpTimes(opCtx, 1U)[0]; + auto updatedStateDocBson = [&]() { + stdx::lock_guard lg(_mutex); + _stateDoc.setState(nextState); + switch (nextState) { + case ShardSplitDonorStateEnum::kUninitialized: + case ShardSplitDonorStateEnum::kAbortingIndexBuilds: + case ShardSplitDonorStateEnum::kRecipientCaughtUp: + break; + case ShardSplitDonorStateEnum::kBlocking: + _stateDoc.setBlockOpTime(oplogSlot); + break; + case ShardSplitDonorStateEnum::kCommitted: + _stateDoc.setCommitOrAbortOpTime(oplogSlot); + break; + case ShardSplitDonorStateEnum::kAborted: { + _stateDoc.setCommitOrAbortOpTime(oplogSlot); + + invariant(_abortReason); + BSONObjBuilder bob; + _abortReason.value().serializeErrorToBSON(&bob); + _stateDoc.setAbortReason(bob.obj()); + break; } + default: + MONGO_UNREACHABLE; + } + if (isInsert) { + return BSON("$setOnInsert" << _stateDoc.toBSON()); + } - return _stateDoc.toBSON(); - }(); - - auto updateOpTime = [&]() { - if (isInsert) { - const auto filter = - BSON(ShardSplitDonorDocument::kIdFieldName << uuid); - auto updateResult = Helpers::upsert(opCtx, - _stateDocumentsNS, - filter, - updatedStateDocBson, - /*fromMigrate=*/false); - - // '$setOnInsert' update operator can never modify an existing - // on-disk state doc. - invariant(!updateResult.existing); - invariant(!updateResult.numDocsModified); - - return repl::ReplClientInfo::forClient(opCtx->getClient()) - .getLastOp(); - } + return _stateDoc.toBSON(); + }(); - const auto originalRecordId = - Helpers::findOne(opCtx, - collection.getCollection(), - BSON("_id" << originalStateDocBson["_id"])); - const auto originalSnapshot = Snapshotted( - opCtx->recoveryUnit()->getSnapshotId(), originalStateDocBson); - invariant(!originalRecordId.isNull()); - - CollectionUpdateArgs args{originalSnapshot.value()}; - args.criteria = BSON("_id" << uuid); - args.oplogSlots = {oplogSlot}; - args.update = updatedStateDocBson; - - collection_internal::updateDocument( - opCtx, - *collection, - originalRecordId, - originalSnapshot, - updatedStateDocBson, - collection_internal::kUpdateNoIndexes, - nullptr /* OpDebug* */, - &args); - - return oplogSlot; - }(); - - wuow.commit(); - return updateOpTime; - }); + auto updateOpTime = [&]() { + if (isInsert) { + const auto filter = BSON(ShardSplitDonorDocument::kIdFieldName << uuid); + auto updateResult = Helpers::upsert(opCtx, + collection, + filter, + updatedStateDocBson, + /*fromMigrate=*/false); + + // '$setOnInsert' update operator can never modify an existing + // on-disk state doc. + invariant(!updateResult.existing); + invariant(!updateResult.numDocsModified); + + return repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); + } + + const auto originalRecordId = + Helpers::findOne(opCtx, + collection.getCollectionPtr(), + BSON("_id" << originalStateDocBson["_id"])); + const auto originalSnapshot = Snapshotted( + opCtx->recoveryUnit()->getSnapshotId(), originalStateDocBson); + invariant(!originalRecordId.isNull()); + + CollectionUpdateArgs args{originalSnapshot.value()}; + args.criteria = BSON("_id" << uuid); + args.oplogSlots = {oplogSlot}; + args.update = updatedStateDocBson; + + collection_internal::updateDocument(opCtx, + collection.getCollectionPtr(), + originalRecordId, + originalSnapshot, + updatedStateDocBson, + collection_internal::kUpdateNoIndexes, + nullptr /* indexesAffected */, + nullptr /* OpDebug* */, + &args); + + return oplogSlot; + }(); + + wuow.commit(); + return updateOpTime; + }); return repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp(); }) diff --git a/src/mongo/db/serverless/shard_split_donor_service.h b/src/mongo/db/serverless/shard_split_donor_service.h index aa871fe6bf086..669c50a9ac0b1 100644 --- a/src/mongo/db/serverless/shard_split_donor_service.h +++ b/src/mongo/db/serverless/shard_split_donor_service.h @@ -29,12 +29,38 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/connection_string.h" #include "mongo/client/replica_set_monitor.h" #include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/primary_only_service.h" #include "mongo/db/serverless/shard_split_state_machine_gen.h" +#include "mongo/db/service_context.h" #include "mongo/executor/cancelable_executor.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/serverless/shard_split_donor_service_test.cpp b/src/mongo/db/serverless/shard_split_donor_service_test.cpp index 39ada1d30cd0e..b06f79bc25e52 100644 --- a/src/mongo/db/serverless/shard_split_donor_service_test.cpp +++ b/src/mongo/db/serverless/shard_split_donor_service_test.cpp @@ -28,49 +28,80 @@ */ +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/connection_string.h" -#include "mongo/client/replica_set_monitor.h" -#include "mongo/client/sdam/server_description_builder.h" -#include "mongo/client/streamable_replica_set_monitor_for_testing.h" -#include "mongo/db/catalog/database_holder_mock.h" +#include "mongo/client/mongo_uri.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/commands.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/op_observer/op_observer_impl.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_registry.h" -#include "mongo/db/repl/drop_pending_collection_reaper.h" #include "mongo/db/repl/primary_only_service.h" -#include "mongo/db/repl/primary_only_service_op_observer.h" #include "mongo/db/repl/primary_only_service_test_fixture.h" +#include "mongo/db/repl/repl_server_parameters_gen.h" +#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/repl/storage_interface_impl.h" -#include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" -#include "mongo/db/repl/tenant_migration_donor_access_blocker.h" -#include "mongo/db/repl/wait_for_majority_service.h" #include "mongo/db/serverless/serverless_operation_lock_registry.h" #include "mongo/db/serverless/shard_split_donor_op_observer.h" #include "mongo/db/serverless/shard_split_donor_service.h" #include "mongo/db/serverless/shard_split_state_machine_gen.h" #include "mongo/db/serverless/shard_split_test_utils.h" #include "mongo/db/serverless/shard_split_utils.h" -#include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/dbtests/mock/mock_conn_registry.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" #include "mongo/dbtests/mock/mock_replica_set.h" -#include "mongo/executor/network_interface.h" -#include "mongo/executor/network_interface_factory.h" #include "mongo/executor/network_interface_mock.h" -#include "mongo/executor/network_interface_thread_pool.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/thread_pool_mock.h" #include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/idl/idl_parser.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" -#include "mongo/rpc/metadata/egress_metadata_hook_list.h" -#include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -87,6 +118,7 @@ namespace mongo { * Returns 'NoMatchingDocument' error code if no document with 'shardSplitId' is found. */ namespace { + StatusWith getStateDocument(OperationContext* opCtx, const UUID& shardSplitId) { // Use kLastApplied so that we can read the state document as a secondary. @@ -167,25 +199,6 @@ class MockReplReconfigCommand : public Command { BSONObj _msg; } mockReplSetReconfigCmd; -namespace { -sdam::TopologyDescriptionPtr makeRecipientTopologyDescription(const MockReplicaSet& set) { - std::shared_ptr topologyDescription = - std::make_shared(sdam::SdamConfiguration( - set.getHosts(), sdam::TopologyType::kReplicaSetNoPrimary, set.getSetName())); - - for (auto& server : set.getHosts()) { - auto serverDescription = sdam::ServerDescriptionBuilder() - .withAddress(server) - .withSetName(set.getSetName()) - .instance(); - topologyDescription->installServerDescription(serverDescription); - } - - return topologyDescription; -} - -} // namespace - std::ostream& operator<<(std::ostream& builder, mongo::ShardSplitDonorStateEnum state) { switch (state) { case mongo::ShardSplitDonorStateEnum::kUninitialized: @@ -200,6 +213,9 @@ std::ostream& operator<<(std::ostream& builder, mongo::ShardSplitDonorStateEnum case mongo::ShardSplitDonorStateEnum::kBlocking: builder << "kBlocking"; break; + case mongo::ShardSplitDonorStateEnum::kRecipientCaughtUp: + builder << "kRecipientCaughtUp"; + break; case mongo::ShardSplitDonorStateEnum::kCommitted: builder << "kCommitted"; break; @@ -232,7 +248,8 @@ void fastForwardCommittedSnapshotOpTime( bool hasActiveSplitForTenants(OperationContext* opCtx, const std::vector& tenantIds) { return std::all_of(tenantIds.begin(), tenantIds.end(), [&](const auto& tenantId) { return tenant_migration_access_blocker::hasActiveTenantMigration( - opCtx, DatabaseName(tenantId.toString() + "_db")); + opCtx, + DatabaseName::createDatabaseName_forTest(boost::none, tenantId.toString() + "_db")); }); } @@ -294,8 +311,8 @@ bool processReplSetStepUpRequest(executor::NetworkInterfaceMock* net, auto noi = net->getNextReadyRequest(); auto request = noi->getRequest(); - // The command can also be `isMaster` - assertRemoteCommandIn({"replSetStepUp", "isMaster"}, request); + // The command can also be `hello` + assertRemoteCommandIn({"replSetStepUp", "hello"}, request); auto&& cmdObj = request.cmdObj; auto requestHost = request.target.toString(); @@ -394,6 +411,11 @@ class ShardSplitDonorServiceTest : public repl::PrimaryOnlyServiceMongoDTest { repl::PrimaryOnlyServiceMongoDTest::tearDown(); } + std::unique_ptr makeReplicationCoordinator() override { + return std::make_unique(getServiceContext(), + _replSettings); + } + protected: std::unique_ptr makeService(ServiceContext* serviceContext) override { return std::make_unique(serviceContext); @@ -446,15 +468,16 @@ class ShardSplitDonorServiceTest : public repl::PrimaryOnlyServiceMongoDTest { void waitForMonitorAndProcessHello() { _net->enterNetwork(); waitForReadyRequest(_net); - processIncomingRequest(_net, &_recipientSet, "isMaster"); + processIncomingRequest(_net, &_recipientSet, "hello"); waitForReadyRequest(_net); - processIncomingRequest(_net, &_recipientSet, "isMaster"); + processIncomingRequest(_net, &_recipientSet, "hello"); waitForReadyRequest(_net); - processIncomingRequest(_net, &_recipientSet, "isMaster"); + processIncomingRequest(_net, &_recipientSet, "hello"); _net->runReadyNetworkOperations(); _net->exitNetwork(); } + const repl::ReplSettings _replSettings = repl::createServerlessReplSettings(); UUID _uuid = UUID::gen(); MockReplicaSet _replSet{ "donorSetForTest", 3, true /* hasPrimary */, false /* dollarPrefixHosts */}; @@ -490,13 +513,13 @@ void mockCommandReplies(MockReplicaSet* replSet) { auto node = replSet->getNode(hostAndPort.toString()); node->setCommandReply("replSetStepUp", BSON("ok" << 1)); node->setCommandReply("appendOplogNote", BSON("ok" << 1)); - node->setCommandReply("isMaster", makeHelloReply(replSet->getSetName())); + node->setCommandReply("hello", makeHelloReply(replSet->getSetName())); } } TEST_F(ShardSplitDonorServiceTest, BasicShardSplitDonorServiceInstanceCreation) { auto opCtx = makeOperationContext(); - test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get()); + test::shard_split::ScopedTenantAccessBlocker scopedTenants(_uuid, opCtx.get()); test::shard_split::reconfigToAddRecipientNodes( getServiceContext(), _recipientTagName, _replSet.getHosts(), _recipientSet.getHosts()); @@ -559,7 +582,7 @@ TEST_F(ShardSplitDonorServiceTest, ShardSplitFailsWhenLockIsHeld) { TEST_F(ShardSplitDonorServiceTest, ReplSetStepUpRetryable) { auto opCtx = makeOperationContext(); - test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get()); + test::shard_split::ScopedTenantAccessBlocker scopedTenants(_uuid, opCtx.get()); test::shard_split::reconfigToAddRecipientNodes( getServiceContext(), _recipientTagName, _replSet.getHosts(), _recipientSet.getHosts()); @@ -599,7 +622,7 @@ TEST_F(ShardSplitDonorServiceTest, ShardSplitDonorServiceTimeout) { auto opCtx = makeOperationContext(); auto serviceContext = getServiceContext(); - test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get()); + test::shard_split::ScopedTenantAccessBlocker scopedTenants(_uuid, opCtx.get()); test::shard_split::reconfigToAddRecipientNodes( serviceContext, _recipientTagName, _replSet.getHosts(), _recipientSet.getHosts()); @@ -628,7 +651,7 @@ TEST_F(ShardSplitDonorServiceTest, ShardSplitDonorServiceTimeout) { TEST_F(ShardSplitDonorServiceTest, ReconfigToRemoveSplitConfig) { auto opCtx = makeOperationContext(); - test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get()); + test::shard_split::ScopedTenantAccessBlocker scopedTenants(_uuid, opCtx.get()); test::shard_split::reconfigToAddRecipientNodes( getServiceContext(), _recipientTagName, _replSet.getHosts(), _recipientSet.getHosts()); @@ -686,7 +709,7 @@ TEST_F(ShardSplitDonorServiceTest, SendReplSetStepUpToHighestLastApplied) { // by replacing the default `hello` replies (set by the MockReplicaSet) with ones that report // `lastWrite.opTime` values in a deterministic way. auto opCtx = makeOperationContext(); - test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get()); + test::shard_split::ScopedTenantAccessBlocker scopedTenants(_uuid, opCtx.get()); test::shard_split::reconfigToAddRecipientNodes( getServiceContext(), _recipientTagName, _replSet.getHosts(), _recipientSet.getHosts()); @@ -695,7 +718,7 @@ TEST_F(ShardSplitDonorServiceTest, SendReplSetStepUpToHighestLastApplied) { mockCommandReplies(&_recipientSet); auto recipientPrimary = _recipientSet.getNode(_recipientSet.getHosts()[1].toString()); - recipientPrimary->setCommandReply("isMaster", makeHelloReply(_recipientSetName, newerOpTime)); + recipientPrimary->setCommandReply("hello", makeHelloReply(_recipientSetName, newerOpTime)); for (auto&& recipientNodeHost : _recipientSet.getHosts()) { if (recipientNodeHost == recipientPrimary->getServerHostAndPort()) { @@ -703,7 +726,7 @@ TEST_F(ShardSplitDonorServiceTest, SendReplSetStepUpToHighestLastApplied) { } auto recipientNode = _recipientSet.getNode(recipientNodeHost.toString()); - recipientNode->setCommandReply("isMaster", makeHelloReply(_recipientSetName, olderOpTime)); + recipientNode->setCommandReply("hello", makeHelloReply(_recipientSetName, olderOpTime)); } _skipAcceptanceFP.reset(); @@ -730,7 +753,7 @@ TEST_F(ShardSplitDonorServiceTest, CreateInstanceInAbortedState) { auto opCtx = makeOperationContext(); auto serviceContext = getServiceContext(); - test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get()); + test::shard_split::ScopedTenantAccessBlocker scopedTenants(_uuid, opCtx.get()); test::shard_split::reconfigToAddRecipientNodes( serviceContext, _recipientTagName, _replSet.getHosts(), _recipientSet.getHosts()); @@ -758,7 +781,7 @@ TEST_F(ShardSplitDonorServiceTest, CreateInstanceThenAbort) { auto opCtx = makeOperationContext(); auto serviceContext = getServiceContext(); - test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get()); + test::shard_split::ScopedTenantAccessBlocker scopedTenants(_uuid, opCtx.get()); test::shard_split::reconfigToAddRecipientNodes( serviceContext, _recipientTagName, _replSet.getHosts(), _recipientSet.getHosts()); @@ -791,7 +814,7 @@ TEST_F(ShardSplitDonorServiceTest, CreateInstanceThenAbort) { TEST_F(ShardSplitDonorServiceTest, StepDownTest) { auto opCtx = makeOperationContext(); - test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get()); + test::shard_split::ScopedTenantAccessBlocker scopedTenants(_uuid, opCtx.get()); test::shard_split::reconfigToAddRecipientNodes( getServiceContext(), _recipientTagName, _replSet.getHosts(), _recipientSet.getHosts()); @@ -825,7 +848,7 @@ TEST_F(ShardSplitDonorServiceTest, DeleteStateDocMarkedGarbageCollectable) { auto opCtx = makeOperationContext(); - test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get()); + test::shard_split::ScopedTenantAccessBlocker scopedTenants(_uuid, opCtx.get()); test::shard_split::reconfigToAddRecipientNodes( getServiceContext(), _recipientTagName, _replSet.getHosts(), _recipientSet.getHosts()); @@ -858,7 +881,7 @@ TEST_F(ShardSplitDonorServiceTest, DeleteStateDocMarkedGarbageCollectable) { TEST_F(ShardSplitDonorServiceTest, AbortDueToRecipientNodesValidation) { auto opCtx = makeOperationContext(); auto serviceContext = getServiceContext(); - test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get()); + test::shard_split::ScopedTenantAccessBlocker scopedTenants(_uuid, opCtx.get()); // Matching recipientSetName to the replSetName to fail validation and abort shard split. test::shard_split::reconfigToAddRecipientNodes( @@ -961,7 +984,7 @@ TEST(RecipientAcceptSplitListenerTest, FutureNotReadyWrongSet) { TEST_F(ShardSplitDonorServiceTest, ResumeAfterStepdownTest) { auto opCtx = makeOperationContext(); - test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get()); + test::shard_split::ScopedTenantAccessBlocker scopedTenants(_uuid, opCtx.get()); test::shard_split::reconfigToAddRecipientNodes( getServiceContext(), _recipientTagName, _replSet.getHosts(), _recipientSet.getHosts()); @@ -1061,7 +1084,7 @@ class ShardSplitRecipientCleanupTest : public ShardSplitPersistenceTest { TEST_F(ShardSplitRecipientCleanupTest, ShardSplitRecipientCleanup) { auto opCtx = makeOperationContext(); - test::shard_split::ScopedTenantAccessBlocker scopedTenants(_tenantIds, opCtx.get()); + test::shard_split::ScopedTenantAccessBlocker scopedTenants(_uuid, opCtx.get()); ASSERT_OK(getStateDocument(opCtx.get(), _uuid).getStatus()); diff --git a/src/mongo/db/serverless/shard_split_state_machine.idl b/src/mongo/db/serverless/shard_split_state_machine.idl index a29750a705a1d..154221416fd89 100644 --- a/src/mongo/db/serverless/shard_split_state_machine.idl +++ b/src/mongo/db/serverless/shard_split_state_machine.idl @@ -42,6 +42,7 @@ enums: kUninitialized: "uninitialized" kAbortingIndexBuilds: "aborting index builds" kBlocking: "blocking" + kRecipientCaughtUp: "recipient caught up" kCommitted: "committed" kAborted: "aborted" diff --git a/src/mongo/db/serverless/shard_split_statistics.cpp b/src/mongo/db/serverless/shard_split_statistics.cpp index 953940465386c..4e25d41daae92 100644 --- a/src/mongo/db/serverless/shard_split_statistics.cpp +++ b/src/mongo/db/serverless/shard_split_statistics.cpp @@ -28,7 +28,15 @@ */ #include "mongo/db/serverless/shard_split_statistics.h" + +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/serverless/shard_split_statistics.h b/src/mongo/db/serverless/shard_split_statistics.h index 7cd8a0bb33615..13055c8d06370 100644 --- a/src/mongo/db/serverless/shard_split_statistics.h +++ b/src/mongo/db/serverless/shard_split_statistics.h @@ -29,8 +29,12 @@ #pragma once +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" +#include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/db/serverless/shard_split_test_utils.cpp b/src/mongo/db/serverless/shard_split_test_utils.cpp index 5c459edf2289b..97a3d662c1507 100644 --- a/src/mongo/db/serverless/shard_split_test_utils.cpp +++ b/src/mongo/db/serverless/shard_split_test_utils.cpp @@ -28,28 +28,42 @@ */ #include "mongo/db/serverless/shard_split_test_utils.h" + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" -#include "mongo/db/serverless/shard_split_state_machine_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/util/decorable.h" #include "mongo/util/uuid.h" namespace mongo { namespace test { namespace shard_split { -ScopedTenantAccessBlocker::ScopedTenantAccessBlocker(const std::vector& tenants, - OperationContext* opCtx) - : _tenants(tenants), _opCtx(opCtx) {} +ScopedTenantAccessBlocker::ScopedTenantAccessBlocker(const UUID& uuid, OperationContext* opCtx) + : _uuid(uuid), _opCtx(opCtx) {} ScopedTenantAccessBlocker::~ScopedTenantAccessBlocker() { - for (const auto& tenant : _tenants) { + if (_uuid) { TenantMigrationAccessBlockerRegistry::get(_opCtx->getServiceContext()) - .remove(tenant, TenantMigrationAccessBlocker::BlockerType::kDonor); + .removeAccessBlockersForMigration(*_uuid, + TenantMigrationAccessBlocker::BlockerType::kDonor); } } void ScopedTenantAccessBlocker::dismiss() { - _tenants.clear(); + _uuid.reset(); } void reconfigToAddRecipientNodes(ServiceContext* serviceContext, diff --git a/src/mongo/db/serverless/shard_split_test_utils.h b/src/mongo/db/serverless/shard_split_test_utils.h index 7992232627655..c3e9f10cc6a1d 100644 --- a/src/mongo/db/serverless/shard_split_test_utils.h +++ b/src/mongo/db/serverless/shard_split_test_utils.h @@ -29,8 +29,14 @@ #pragma once +#include +#include +#include +#include + #include "mongo/db/tenant_id.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo { class OperationContext; @@ -44,13 +50,13 @@ namespace shard_split { // test fixture, which introduces additional errors in the test and makes debugging harder. class ScopedTenantAccessBlocker { public: - ScopedTenantAccessBlocker(const std::vector& tenants, OperationContext* opCtx); + ScopedTenantAccessBlocker(const UUID& uuid, OperationContext* opCtx); ~ScopedTenantAccessBlocker(); void dismiss(); private: - std::vector _tenants; + boost::optional _uuid; OperationContext* _opCtx; }; diff --git a/src/mongo/db/serverless/shard_split_utils.cpp b/src/mongo/db/serverless/shard_split_utils.cpp index 14fa3708cda58..55b862fe180a9 100644 --- a/src/mongo/db/serverless/shard_split_utils.cpp +++ b/src/mongo/db/serverless/shard_split_utils.cpp @@ -28,14 +28,45 @@ */ #include "mongo/db/serverless/shard_split_utils.h" -#include "mongo/db/catalog_raii.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/concurrency/lock_manager_defs.h" -#include "mongo/db/db_raii.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/ops/delete.h" +#include "mongo/db/ops/update_result.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_set_config.h" -#include "mongo/logv2/log_debug.h" +#include "mongo/db/repl/repl_set_tag.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -153,19 +184,26 @@ repl::ReplSetConfig makeSplitConfig(const repl::ReplSetConfig& config, Status insertStateDoc(OperationContext* opCtx, const ShardSplitDonorDocument& stateDoc) { const auto nss = NamespaceString::kShardSplitDonorsNamespace; - AutoGetCollection collection(opCtx, nss, MODE_IX); + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); uassert(ErrorCodes::PrimarySteppedDown, str::stream() << "No longer primary while attempting to insert shard split" " state document", repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesFor(opCtx, nss)); - return writeConflictRetry(opCtx, "insertShardSplitStateDoc", nss.ns(), [&]() -> Status { + return writeConflictRetry(opCtx, "insertShardSplitStateDoc", nss, [&]() -> Status { const auto filter = BSON(ShardSplitDonorDocument::kIdFieldName << stateDoc.getId() << ShardSplitDonorDocument::kExpireAtFieldName << BSON("$exists" << false)); const auto updateMod = BSON("$setOnInsert" << stateDoc.toBSON()); - auto updateResult = Helpers::upsert(opCtx, nss, filter, updateMod, /*fromMigrate=*/false); + auto updateResult = + Helpers::upsert(opCtx, collection, filter, updateMod, /*fromMigrate=*/false); invariant(!updateResult.numDocsModified); if (updateResult.upsertedId.isEmpty()) { @@ -179,15 +217,22 @@ Status insertStateDoc(OperationContext* opCtx, const ShardSplitDonorDocument& st Status updateStateDoc(OperationContext* opCtx, const ShardSplitDonorDocument& stateDoc) { const auto nss = NamespaceString::kShardSplitDonorsNamespace; - AutoGetCollection collection(opCtx, nss, MODE_IX); - - if (!collection) { + auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + if (!collection.exists()) { return Status(ErrorCodes::NamespaceNotFound, - str::stream() << nss.ns() << " does not exist"); + str::stream() << nss.toStringForErrorMsg() << " does not exist"); } - return writeConflictRetry(opCtx, "updateShardSplitStateDoc", nss.ns(), [&]() -> Status { - auto updateResult = Helpers::upsert(opCtx, nss, stateDoc.toBSON(), /*fromMigrate=*/false); + return writeConflictRetry(opCtx, "updateShardSplitStateDoc", nss, [&]() -> Status { + auto updateResult = + Helpers::upsert(opCtx, collection, stateDoc.toBSON(), /*fromMigrate=*/false); if (updateResult.numMatched == 0) { return {ErrorCodes::NoSuchKey, str::stream() << "Existing shard split state document not found for id: " @@ -200,16 +245,21 @@ Status updateStateDoc(OperationContext* opCtx, const ShardSplitDonorDocument& st StatusWith deleteStateDoc(OperationContext* opCtx, const UUID& shardSplitId) { const auto nss = NamespaceString::kShardSplitDonorsNamespace; - AutoGetCollection collection(opCtx, nss, MODE_IX); - - if (!collection) { + const auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(nss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + if (!collection.exists()) { return Status(ErrorCodes::NamespaceNotFound, - str::stream() << nss.ns() << " does not exist"); + str::stream() << nss.toStringForErrorMsg() << " does not exist"); } auto query = BSON(ShardSplitDonorDocument::kIdFieldName << shardSplitId); - return writeConflictRetry(opCtx, "ShardSplitDonorDeleteStateDoc", nss.ns(), [&]() -> bool { - auto nDeleted = - deleteObjects(opCtx, collection.getCollection(), nss, query, true /* justOne */); + return writeConflictRetry(opCtx, "ShardSplitDonorDeleteStateDoc", nss, [&]() -> bool { + auto nDeleted = deleteObjects(opCtx, collection, query, true /* justOne */); return nDeleted > 0; }); } diff --git a/src/mongo/db/serverless/shard_split_utils.h b/src/mongo/db/serverless/shard_split_utils.h index db24008a0552b..87daa85cc371f 100644 --- a/src/mongo/db/serverless/shard_split_utils.h +++ b/src/mongo/db/serverless/shard_split_utils.h @@ -29,10 +29,29 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/connection_string.h" #include "mongo/client/sdam/topology_listener.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_config.h" #include "mongo/db/repl/optime_with.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/db/serverless/shard_split_state_machine_gen.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo { namespace serverless { diff --git a/src/mongo/db/serverless/shard_split_utils_test.cpp b/src/mongo/db/serverless/shard_split_utils_test.cpp index 9eb37e96f9df7..5df5d8fbfb6a4 100644 --- a/src/mongo/db/serverless/shard_split_utils_test.cpp +++ b/src/mongo/db/serverless/shard_split_utils_test.cpp @@ -27,9 +27,22 @@ * it in the license file. */ +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/db/repl/repl_set_config_test.h" #include "mongo/db/serverless/shard_split_utils.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace repl { diff --git a/src/mongo/db/service_context.cpp b/src/mongo/db/service_context.cpp index 863acd0932842..4dceb94853721 100644 --- a/src/mongo/db/service_context.cpp +++ b/src/mongo/db/service_context.cpp @@ -28,30 +28,37 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/service_context.h" - +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include #include #include -#include "mongo/base/init.h" -#include "mongo/bson/bsonobj.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/db/client.h" #include "mongo/db/default_baton.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/recovery_unit_noop.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/transport/service_entry_point.h" #include "mongo/transport/session.h" #include "mongo/transport/transport_layer.h" #include "mongo/util/assert_util.h" #include "mongo/util/processinfo.h" -#include "mongo/util/str.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/system_clock_source.h" #include "mongo/util/system_tick_source.h" -#include #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -293,7 +300,7 @@ ServiceContext::UniqueOperationContext ServiceContext::makeOperationContext(Clie batonGuard.dismiss(); { - stdx::lock_guard lk(_mutex); + stdx::lock_guard lk(_clientByOpIdMutex); bool clientByOperationContextInsertionSuccessful = _clientByOperationId.insert({opCtx->getOpID(), client}).second; invariant(clientByOperationContextInsertionSuccessful); @@ -317,7 +324,8 @@ void ServiceContext::OperationContextDeleter::operator()(OperationContext* opCtx } LockedClient ServiceContext::getLockedClient(OperationId id) { - stdx::lock_guard lk(_mutex); + stdx::lock_guard lk(_clientByOpIdMutex); + auto it = _clientByOperationId.find(id); if (it == _clientByOperationId.end()) { return {}; @@ -394,7 +402,7 @@ void ServiceContext::_delistOperation(OperationContext* opCtx) noexcept { // its client to prevent situations that another thread could use the service context to get a // hold of an `opCtx` that has been removed from its client. { - stdx::lock_guard lk(_mutex); + stdx::lock_guard lk(_clientByOpIdMutex); if (_clientByOperationId.erase(opCtx->getOpID()) != 1) { // Another thread has already delisted this `opCtx`. return; diff --git a/src/mongo/db/service_context.h b/src/mongo/db/service_context.h index 283112fbf6e68..83e117da5a2c3 100644 --- a/src/mongo/db/service_context.h +++ b/src/mongo/db/service_context.h @@ -30,11 +30,21 @@ #pragma once #include +#include +#include #include +#include #include #include +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/db/baton.h" #include "mongo/db/operation_id.h" #include "mongo/db/session/logical_session_id.h" #include "mongo/db/storage/storage_change_lock.h" @@ -42,6 +52,7 @@ #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/stdx/unordered_set.h" #include "mongo/transport/session.h" #include "mongo/util/clock_source.h" @@ -53,13 +64,12 @@ #include "mongo/util/tick_source.h" #include "mongo/util/uuid.h" -#include - namespace mongo { class AbstractMessagingPort; class Client; class OperationContext; + class OpObserver; class ServiceEntryPoint; @@ -744,6 +754,7 @@ class ServiceContext final : public Decorable { /** * Managing classes for our issued operation IDs. */ + Mutex _clientByOpIdMutex = MONGO_MAKE_LATCH("ServiceContext::_clientByOpIdMutex"); std::shared_ptr _opIdRegistry; stdx::unordered_map _clientByOperationId; diff --git a/src/mongo/db/service_context_d_test_fixture.cpp b/src/mongo/db/service_context_d_test_fixture.cpp index 9bfe876350f35..efcb562fd494b 100644 --- a/src/mongo/db/service_context_d_test_fixture.cpp +++ b/src/mongo/db/service_context_d_test_fixture.cpp @@ -27,44 +27,50 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - #include "mongo/db/service_context_d_test_fixture.h" -#include +#include -#include "mongo/base/checked_cast.h" -#include "mongo/db/catalog/catalog_control.h" -#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_impl.h" +#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/database_holder_impl.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/global_settings.h" +#include "mongo/db/index_builds_coordinator.h" #include "mongo/db/index_builds_coordinator_mongod.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/repl/repl_settings.h" -#include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/collection_sharding_state_factory_shard.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_entry_point_mongod.h" #include "mongo/db/storage/control/storage_control.h" +#include "mongo/db/storage/execution_control/concurrency_adjustment_parameters_gen.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_init.h" -#include "mongo/db/storage/storage_engine_parameters_gen.h" #include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" -#include "mongo/util/assert_util.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/transport/service_entry_point.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/periodic_runner.h" #include "mongo/util/periodic_runner_factory.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - namespace mongo { ServiceContextMongoDTest::ServiceContextMongoDTest(Options options) : _journalListener(std::move(options._journalListener)), _tempDir("service_context_d_test_fixture") { - gStorageEngineConcurrencyAdjustmentAlgorithm = ""; + gStorageEngineConcurrencyAdjustmentAlgorithm = "fixedConcurrentTransactions"; if (options._forceDisableTableLogging) { storageGlobalParams.forceDisableTableLogging = true; @@ -99,6 +105,7 @@ ServiceContextMongoDTest::ServiceContextMongoDTest(Options options) } auto const serviceContext = getServiceContext(); + if (options._useMockClock) { // Copied from dbtests.cpp. DBTests sets up a controlled mock clock while // ServiceContextMongoDTest uses the system clock. Tests moved from dbtests to unittests may @@ -128,9 +135,12 @@ ServiceContextMongoDTest::ServiceContextMongoDTest(Options options) serviceContext->setServiceEntryPoint(std::make_unique(serviceContext)); + auto observerRegistry = std::make_unique(); + serviceContext->setOpObserver(std::move(observerRegistry)); + // Set up the periodic runner to allow background job execution for tests that require it. - auto runner = makePeriodicRunner(getServiceContext()); - getServiceContext()->setPeriodicRunner(std::move(runner)); + auto runner = makePeriodicRunner(serviceContext); + serviceContext->setPeriodicRunner(std::move(runner)); storageGlobalParams.dbpath = _tempDir.path(); @@ -147,9 +157,8 @@ ServiceContextMongoDTest::ServiceContextMongoDTest(Options options) Collection::Factory::set(serviceContext, std::make_unique()); IndexBuildsCoordinator::set(serviceContext, std::make_unique()); CollectionShardingStateFactory::set( - getServiceContext(), - std::make_unique(getServiceContext())); - getServiceContext()->getStorageEngine()->notifyStartupComplete(); + serviceContext, std::make_unique(serviceContext)); + serviceContext->getStorageEngine()->notifyStartupComplete(); if (_journalListener) { serviceContext->getStorageEngine()->setJournalListener(_journalListener.get()); diff --git a/src/mongo/db/service_context_d_test_fixture.h b/src/mongo/db/service_context_d_test_fixture.h index 763038eb95e9f..e059c2686214e 100644 --- a/src/mongo/db/service_context_d_test_fixture.h +++ b/src/mongo/db/service_context_d_test_fixture.h @@ -29,10 +29,16 @@ #pragma once +#include +#include +#include + #include "mongo/db/service_context_test_fixture.h" #include "mongo/db/storage/journal_listener.h" #include "mongo/db/storage/storage_engine_init.h" #include "mongo/unittest/temp_dir.h" +#include "mongo/util/duration.h" +#include "mongo/util/tick_source.h" #include "mongo/util/tick_source_mock.h" namespace mongo { diff --git a/src/mongo/db/service_context_devnull_test_fixture.cpp b/src/mongo/db/service_context_devnull_test_fixture.cpp deleted file mode 100644 index c420b7eaa2c61..0000000000000 --- a/src/mongo/db/service_context_devnull_test_fixture.cpp +++ /dev/null @@ -1,39 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/platform/basic.h" - -#include "mongo/db/service_context_devnull_test_fixture.h" - -namespace mongo { - -ServiceContextDevnullTestFixture::ServiceContextDevnullTestFixture() - : ServiceContextMongoDTest(Options{}.engine("devnull")) {} - -} // namespace mongo diff --git a/src/mongo/db/service_context_devnull_test_fixture.h b/src/mongo/db/service_context_devnull_test_fixture.h deleted file mode 100644 index dc77788c4d7fd..0000000000000 --- a/src/mongo/db/service_context_devnull_test_fixture.h +++ /dev/null @@ -1,44 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/service_context_d_test_fixture.h" - -namespace mongo { - -/** - * Test fixture class for mongod tests that use the "devnull" storage engine. - */ -class ServiceContextDevnullTestFixture : public ServiceContextMongoDTest { -protected: - ServiceContextDevnullTestFixture(); -}; - -} // namespace mongo diff --git a/src/mongo/db/service_context_fwd.h b/src/mongo/db/service_context_fwd.h deleted file mode 100644 index 869448d1d8d30..0000000000000 --- a/src/mongo/db/service_context_fwd.h +++ /dev/null @@ -1,36 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -namespace mongo { - -class ServiceContext; - -} // namespace mongo diff --git a/src/mongo/db/service_context_test_fixture.cpp b/src/mongo/db/service_context_test_fixture.cpp index 1bc1e9320ced8..3a822ba5157f7 100644 --- a/src/mongo/db/service_context_test_fixture.cpp +++ b/src/mongo/db/service_context_test_fixture.cpp @@ -27,18 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/service_context_test_fixture.h" -#include +#include #include "mongo/client/replica_set_monitor_manager.h" -#include "mongo/db/client.h" -#include "mongo/db/op_observer/op_observer_registry.h" -#include "mongo/util/assert_util.h" #include "mongo/util/clock_source_mock.h" -#include "mongo/util/diagnostic_info.h" namespace mongo { @@ -49,15 +43,8 @@ ScopedGlobalServiceContextForTest::ScopedGlobalServiceContextForTest() { clkSource.reset(); } - auto serviceContext = [] { - auto serviceContext = ServiceContext::make(); - auto serviceContextPtr = serviceContext.get(); - setGlobalServiceContext(std::move(serviceContext)); - return serviceContextPtr; - }(); - - auto observerRegistry = std::make_unique(); - serviceContext->setOpObserver(std::move(observerRegistry)); + auto serviceContext = ServiceContext::make(); + setGlobalServiceContext(std::move(serviceContext)); } ScopedGlobalServiceContextForTest::~ScopedGlobalServiceContextForTest() { diff --git a/src/mongo/db/service_context_test_fixture.h b/src/mongo/db/service_context_test_fixture.h index a553b3d752e6a..5379b6b46b85c 100644 --- a/src/mongo/db/service_context_test_fixture.h +++ b/src/mongo/db/service_context_test_fixture.h @@ -29,8 +29,10 @@ #pragma once +#include "mongo/db/client.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/unittest.h" namespace mongo { diff --git a/src/mongo/db/service_entry_point_common.cpp b/src/mongo/db/service_entry_point_common.cpp index 3d5e515301f22..8ecf88d730ddb 100644 --- a/src/mongo/db/service_entry_point_common.cpp +++ b/src/mongo/db/service_entry_point_common.cpp @@ -30,90 +30,152 @@ #include "mongo/db/service_entry_point_common.h" +#include +#include +#include +#include +#include #include - -#include "mongo/base/checked_cast.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/document.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/client/server_discovery_monitor.h" -#include "mongo/db/audit.h" -#include "mongo/db/auth/authorization_checks.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/impersonation_session.h" #include "mongo/db/auth/ldap_cumulative_operation_stats.h" +#include "mongo/db/auth/ldap_operation_stats.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/auth/security_token_authentication_guard.h" +#include "mongo/db/auth/user_acquisition_stats.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/command_can_run_here.h" #include "mongo/db/commands.h" +#include "mongo/db/commands/server_status_metric.h" #include "mongo/db/commands/txn_cmds_gen.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" #include "mongo/db/curop.h" #include "mongo/db/curop_failpoint_helpers.h" #include "mongo/db/curop_metrics.h" -#include "mongo/db/cursor_manager.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/database_name.h" #include "mongo/db/error_labels.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/initialize_api_parameters.h" +#include "mongo/db/initialize_operation_session_info.h" #include "mongo/db/introspect.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/logical_time_validator.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/not_primary_error_tracker.h" #include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/ops/write_ops.h" -#include "mongo/db/ops/write_ops_exec.h" -#include "mongo/db/query/find.h" -#include "mongo/db/read_concern.h" +#include "mongo/db/query/max_time_ms_parser.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/read_concern_support_result.h" #include "mongo/db/read_write_concern_defaults.h" +#include "mongo/db/read_write_concern_defaults_gen.h" +#include "mongo/db/read_write_concern_provenance.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/repl/speculative_majority_read_info.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/request_execution_context.h" #include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/s/resharding/resharding_metrics_helpers.h" +#include "mongo/db/s/sharding_cluster_parameters_gen.h" #include "mongo/db/s/sharding_state.h" #include "mongo/db/s/sharding_statistics.h" #include "mongo/db/s/transaction_coordinator_factory.h" #include "mongo/db/server_feature_flags_gen.h" -#include "mongo/db/service_entry_point_common.h" -#include "mongo/db/session/initialize_operation_session_info.h" -#include "mongo/db/session/logical_session_id.h" -#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/server_parameter_with_storage.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_id.h" #include "mongo/db/stats/api_version_metrics.h" #include "mongo/db/stats/counters.h" #include "mongo/db/stats/resource_consumption_metrics.h" #include "mongo/db/stats/server_read_concern_metrics.h" #include "mongo/db/stats/top.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/transaction_validation.h" #include "mongo/db/vector_clock.h" +#include "mongo/db/write_concern.h" +#include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/check_allowed_op_query_cmd.h" #include "mongo/rpc/factory.h" -#include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/message.h" #include "mongo/rpc/metadata.h" #include "mongo/rpc/metadata/client_metadata.h" -#include "mongo/rpc/metadata/oplog_query_metadata.h" -#include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/rpc/metadata/tracking_metadata.h" #include "mongo/rpc/op_msg.h" +#include "mongo/rpc/protocol.h" #include "mongo/rpc/reply_builder_interface.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/s/analyze_shard_key_role.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" #include "mongo/s/query_analysis_sampler.h" #include "mongo/s/shard_cannot_refresh_due_to_locks_held_exception.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/s/stale_exception.h" #include "mongo/s/would_change_owning_shard_exception.h" #include "mongo/transport/hello_metrics.h" #include "mongo/transport/service_executor.h" #include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -122,7 +184,6 @@ namespace mongo { MONGO_FAIL_POINT_DEFINE(respondWithNotPrimaryInCommandDispatch); MONGO_FAIL_POINT_DEFINE(skipCheckingForNotPrimaryInCommandDispatch); -MONGO_FAIL_POINT_DEFINE(sleepMillisAfterCommandExecutionBegins); MONGO_FAIL_POINT_DEFINE(waitAfterNewStatementBlocksBehindPrepare); MONGO_FAIL_POINT_DEFINE(waitAfterNewStatementBlocksBehindOpenInternalTransactionForRetryableWrite); MONGO_FAIL_POINT_DEFINE(waitAfterCommandFinishesExecution); @@ -200,8 +261,9 @@ struct HandleRequest { void assertValidNsString() { if (!nsString().isValid()) { - uassert( - 16257, str::stream() << "Invalid ns [" << nsString().toString() << "]", false); + uassert(16257, + str::stream() << "Invalid ns [" << nsString().toStringForErrorMsg() << "]", + false); } } @@ -498,6 +560,10 @@ void appendErrorLabelsAndTopologyVersion(OperationContext* opCtx, bool isInternalClient, const repl::OpTime& lastOpBeforeRun, const repl::OpTime& lastOpAfterRun) { + if (!code && !wcCode) { + return; + } + auto errorLabels = getErrorLabels(opCtx, sessionOptions, commandName, @@ -519,8 +585,8 @@ void appendErrorLabelsAndTopologyVersion(OperationContext* opCtx, const auto replCoord = repl::ReplicationCoordinator::get(opCtx); // NotPrimary errors always include a topologyVersion, since we increment topologyVersion on // stepdown. ShutdownErrors only include a topologyVersion if the server is in quiesce mode, - // since we only increment the topologyVersion at shutdown and alert waiting isMaster commands - // if the server enters quiesce mode. + // since we only increment the topologyVersion at shutdown and alert waiting isMaster/hello + // commands if the server enters quiesce mode. const auto shouldAppendTopologyVersion = (replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet && isNotPrimaryError) || @@ -538,7 +604,7 @@ void appendErrorLabelsAndTopologyVersion(OperationContext* opCtx, void appendAdditionalParticipants(OperationContext* opCtx, BSONObjBuilder* commandBodyFieldsBob, const std::string& commandName, - const std::string& ns) { + StringData ns) { // (Ignore FCV check): This feature doesn't have any upgrade/downgrade concerns. if (gFeatureFlagAdditionalParticipants.isEnabledAndIgnoreFCVUnsafe()) { std::vector shardIdsFromFpData; @@ -548,7 +614,7 @@ void appendAdditionalParticipants(OperationContext* opCtx, data.hasField("shardId")) { shardIdsFromFpData = data.getField("shardId").Array(); return ((data.getStringField("cmdName") == commandName) && - (data.getStringField("ns").toString() == ns)); + (data.getStringField("ns") == ns)); } return false; }))) { @@ -745,6 +811,10 @@ class ExecCommandDatabase { bool _refreshedDatabase = false; bool _refreshedCollection = false; bool _refreshedCatalogCache = false; + // Keep a static variable to track the last time a warning about direct shard connections was + // logged. + static Mutex _staticMutex; + static Date_t _lastDirectConnectionWarningTime; }; class RunCommandImpl { @@ -874,7 +944,7 @@ Future InvokeCommand::run() { const auto dbName = _ecd->getInvocation()->ns().dbName(); // TODO SERVER-53761: find out if we can do this more asynchronously. The client // Strand is locked to current thread in SessionWorkflow::Impl::startNewLoop(). - tenant_migration_access_blocker::checkIfCanReadOrBlock( + tenant_migration_access_blocker::checkIfCanRunCommandOrBlock( execContext->getOpCtx(), dbName, execContext->getRequest()) .get(execContext->getOpCtx()); return runCommandInvocation(_ecd->getExecutionContext(), _ecd->getInvocation()); @@ -893,7 +963,7 @@ Future CheckoutSessionAndInvokeCommand::run() { auto execContext = _ecd->getExecutionContext(); const auto dbName = _ecd->getInvocation()->ns().dbName(); // TODO SERVER-53761: find out if we can do this more asynchronously. - tenant_migration_access_blocker::checkIfCanReadOrBlock( + tenant_migration_access_blocker::checkIfCanRunCommandOrBlock( execContext->getOpCtx(), dbName, execContext->getRequest()) .get(execContext->getOpCtx()); return runCommandInvocation(_ecd->getExecutionContext(), _ecd->getInvocation()); @@ -1215,7 +1285,6 @@ void RunCommandImpl::_epilogue() { &waitAfterCommandFinishesExecution, opCtx, "waitAfterCommandFinishesExecution"); }, [&](const BSONObj& data) { - auto ns = data["ns"].valueStringDataSafe(); auto commands = data.hasField("commands") ? data["commands"].Array() : std::vector(); bool requestMatchesComment = data.hasField("comment") @@ -1224,7 +1293,8 @@ void RunCommandImpl::_epilogue() { // If 'ns', 'commands', or 'comment' is not set, block for all the namespaces, commands, // or comments respectively. - return (ns.empty() || _ecd->getInvocation()->ns().ns() == ns) && + const auto fpNss = NamespaceStringUtil::parseFailPointData(data, "ns"); + return (fpNss.isEmpty() || _ecd->getInvocation()->ns() == fpNss) && (commands.empty() || std::any_of(commands.begin(), commands.end(), @@ -1438,6 +1508,9 @@ Future RunCommandAndWaitForWriteConcern::_checkWriteConcern() { return Status::OK(); } +Mutex ExecCommandDatabase::_staticMutex = MONGO_MAKE_LATCH("DirectShardConnectionTimer"); +Date_t ExecCommandDatabase::_lastDirectConnectionWarningTime = Date_t(); + void ExecCommandDatabase::_initiateCommand() { auto opCtx = _execContext->getOpCtx(); auto& request = _execContext->getRequest(); @@ -1458,7 +1531,7 @@ void ExecCommandDatabase::_initiateCommand() { _tokenAuthorizationSessionGuard.emplace(opCtx, request.validatedTenancyScope.value()); } - if (isHello()) { + if (MONGO_unlikely(isHello())) { // Preload generic ClientMetadata ahead of our first hello request. After the first // request, metaElement should always be empty. auto metaElem = request.body[kMetadataDocumentName]; @@ -1466,28 +1539,19 @@ void ExecCommandDatabase::_initiateCommand() { ClientMetadata::setFromMetadata(opCtx->getClient(), metaElem, isInternalClient); } - auto& apiParams = APIParameters::get(opCtx); - auto& apiVersionMetrics = APIVersionMetrics::get(opCtx->getServiceContext()); if (auto clientMetadata = ClientMetadata::get(client)) { + auto& apiParams = APIParameters::get(opCtx); + auto& apiVersionMetrics = APIVersionMetrics::get(opCtx->getServiceContext()); auto appName = clientMetadata->getApplicationName().toString(); apiVersionMetrics.update(appName, apiParams); } - sleepMillisAfterCommandExecutionBegins.execute([&](const BSONObj& data) { - auto numMillis = data["millis"].numberInt(); - auto commands = data["commands"].Obj().getFieldNames>(); - // Only sleep for one of the specified commands. - if (commands.find(command->getName()) != commands.end()) { - mongo::sleepmillis(numMillis); - } - }); - rpc::TrackingMetadata::get(opCtx).initWithOperName(command->getName()); auto const replCoord = repl::ReplicationCoordinator::get(opCtx); _sessionOptions = initializeOperationSessionInfo(opCtx, - request.body, + request, command->requiresAuth(), command->attachLogicalSessionsToOpCtx(), replCoord->getReplicationMode() == @@ -1499,15 +1563,17 @@ void ExecCommandDatabase::_initiateCommand() { CommandHelpers::evaluateFailCommandFailPoint(opCtx, _invocation.get()); - const auto dbname = request.getDatabase().toString(); + const auto dbName = + DatabaseNameUtil::deserialize(request.getValidatedTenantId(), request.getDatabase()); uassert(ErrorCodes::InvalidNamespace, - fmt::format("Invalid database name: '{}'", dbname), - NamespaceString::validDBName(dbname, NamespaceString::DollarInDbNameBehavior::Allow)); + fmt::format("Invalid database name: '{}'", dbName.toStringForErrorMsg()), + NamespaceString::validDBName(dbName, NamespaceString::DollarInDbNameBehavior::Allow)); + // Connections from mongod or mongos clients (i.e. initial sync, mirrored reads, etc.) should // not contribute to resource consumption metrics. const bool collect = command->collectsResourceConsumptionMetrics() && !_isInternalClient(); - _scopedMetrics.emplace(opCtx, dbname, collect); + _scopedMetrics.emplace(opCtx, dbName, collect); const auto allowTransactionsOnConfigDatabase = (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) || @@ -1515,7 +1581,6 @@ void ExecCommandDatabase::_initiateCommand() { client->isFromSystemConnection(); const auto invocationNss = _invocation->ns(); - validateSessionOptions( _sessionOptions, command->getName(), invocationNss, allowTransactionsOnConfigDatabase); @@ -1549,7 +1614,7 @@ void ExecCommandDatabase::_initiateCommand() { topLevelFields.insert(fieldName).second); } - if (CommandHelpers::isHelpRequest(helpField)) { + if (MONGO_unlikely(CommandHelpers::isHelpRequest(helpField))) { CurOp::get(opCtx)->ensureStarted(); // We disable not-primary-error tracker for help requests due to SERVER-11492, because // config servers use help requests to determine which commands are database writes, and so @@ -1570,8 +1635,6 @@ void ExecCommandDatabase::_initiateCommand() { _invocation->checkAuthorization(opCtx, request); - const bool iAmPrimary = replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbname); - if (!opCtx->getClient()->isInDirectClient() && !MONGO_unlikely(skipCheckingForNotPrimaryInCommandDispatch.shouldFail())) { const bool inMultiDocumentTransaction = (_sessionOptions.getAutocommit() == false); @@ -1588,7 +1651,7 @@ void ExecCommandDatabase::_initiateCommand() { bool couldHaveOptedIn = allowed == Command::AllowedOnSecondary::kOptIn && !inMultiDocumentTransaction; bool optedIn = couldHaveOptedIn && ReadPreferenceSetting::get(opCtx).canRunOnSecondary(); - bool canRunHere = commandCanRunHere(opCtx, dbname, command, inMultiDocumentTransaction); + bool canRunHere = commandCanRunHere(opCtx, dbName, command, inMultiDocumentTransaction); if (!canRunHere && couldHaveOptedIn) { const auto msg = client->supportsHello() ? "not primary and secondaryOk=false"_sd : "not master and slaveOk=false"_sd; @@ -1604,7 +1667,7 @@ void ExecCommandDatabase::_initiateCommand() { if (!command->maintenanceOk() && replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet && - !replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbname) && + !replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, dbName) && !replCoord->getMemberState().secondary()) { uassert(ErrorCodes::NotPrimaryOrSecondary, @@ -1628,7 +1691,7 @@ void ExecCommandDatabase::_initiateCommand() { repl::ReplicationStateTransitionLockGuard rstl(opCtx, MODE_IX); uassert(ErrorCodes::NotWritablePrimary, "Cannot start a transaction in a non-primary state", - replCoord->canAcceptWritesForDatabase(opCtx, dbname)); + replCoord->canAcceptWritesForDatabase(opCtx, dbName)); } } @@ -1648,43 +1711,47 @@ void ExecCommandDatabase::_initiateCommand() { } } - // Parse the 'maxTimeMS' command option, and use it to set a deadline for the operation on the - // OperationContext. The 'maxTimeMS' option unfortunately has a different meaning for a getMore - // command, where it is used to communicate the maximum time to wait for new inserts on tailable - // cursors, not as a deadline for the operation. - // TODO SERVER-34277 Remove the special handling for maxTimeMS for getMores. This will require - // introducing a new 'max await time' parameter for getMore, and eventually banning maxTimeMS - // altogether on a getMore command. - const auto maxTimeMS = Milliseconds{uassertStatusOK(parseMaxTimeMS(cmdOptionMaxTimeMSField))}; - const auto maxTimeMSOpOnly = - Milliseconds{uassertStatusOK(parseMaxTimeMS(maxTimeMSOpOnlyField))}; - - if ((maxTimeMS > Milliseconds::zero() || maxTimeMSOpOnly > Milliseconds::zero()) && - command->getLogicalOp() != LogicalOp::opGetMore) { - uassert(40119, - "Illegal attempt to set operation deadline within DBDirectClient", - !opCtx->getClient()->isInDirectClient()); - - // The "hello" command should not inherit the deadline from the user op it is operating as a - // part of as that can interfere with replica set monitoring and host selection. - const bool ignoreMaxTimeMSOpOnly = isHello(); - - if (!ignoreMaxTimeMSOpOnly && maxTimeMSOpOnly > Milliseconds::zero() && - (maxTimeMS == Milliseconds::zero() || maxTimeMSOpOnly < maxTimeMS)) { - opCtx->storeMaxTimeMS(maxTimeMS); - opCtx->setDeadlineByDate(startedCommandExecAt + maxTimeMSOpOnly, - ErrorCodes::MaxTimeMSExpired); - } else if (maxTimeMS > Milliseconds::zero()) { - opCtx->setDeadlineByDate(startedCommandExecAt + maxTimeMS, - ErrorCodes::MaxTimeMSExpired); + if (cmdOptionMaxTimeMSField || maxTimeMSOpOnlyField) { + // Parse the 'maxTimeMS' command option, and use it to set a deadline for the operation on + // the OperationContext. The 'maxTimeMS' option unfortunately has a different meaning for a + // getMore command, where it is used to communicate the maximum time to wait for new inserts + // on tailable cursors, not as a deadline for the operation. + // + // TODO SERVER-34277 Remove the special handling for maxTimeMS for getMores. This will + // require introducing a new 'max await time' parameter for getMore, and eventually banning + // maxTimeMS altogether on a getMore command. + const auto maxTimeMS = + Milliseconds{uassertStatusOK(parseMaxTimeMS(cmdOptionMaxTimeMSField))}; + const auto maxTimeMSOpOnly = + Milliseconds{uassertStatusOK(parseMaxTimeMS(maxTimeMSOpOnlyField))}; + + if ((maxTimeMS > Milliseconds::zero() || maxTimeMSOpOnly > Milliseconds::zero()) && + command->getLogicalOp() != LogicalOp::opGetMore) { + uassert(40119, + "Illegal attempt to set operation deadline within DBDirectClient", + !opCtx->getClient()->isInDirectClient()); + + // The "hello" command should not inherit the deadline from the user op it is operating + // as a part of as that can interfere with replica set monitoring and host selection. + const bool ignoreMaxTimeMSOpOnly = isHello(); + + if (!ignoreMaxTimeMSOpOnly && maxTimeMSOpOnly > Milliseconds::zero() && + (maxTimeMS == Milliseconds::zero() || maxTimeMSOpOnly < maxTimeMS)) { + opCtx->storeMaxTimeMS(maxTimeMS); + opCtx->setDeadlineByDate(startedCommandExecAt + maxTimeMSOpOnly, + ErrorCodes::MaxTimeMSExpired); + } else if (maxTimeMS > Milliseconds::zero()) { + opCtx->setDeadlineByDate(startedCommandExecAt + maxTimeMS, + ErrorCodes::MaxTimeMSExpired); + } } } auto& readConcernArgs = repl::ReadConcernArgs::get(opCtx); - // If the parent operation runs in a transaction, we don't override the read concern. - auto skipReadConcern = - opCtx->getClient()->isInDirectClient() && opCtx->inMultiDocumentTransaction(); + // If the operation is being executed as part of DBDirectClient this means we must use the + // original read concern. + auto skipReadConcern = opCtx->getClient()->isInDirectClient(); bool startTransaction = static_cast(_sessionOptions.getStartTransaction()); if (!skipReadConcern) { auto newReadConcernArgs = uassertStatusOK(_extractReadConcern( @@ -1710,28 +1777,63 @@ void ExecCommandDatabase::_initiateCommand() { if (startTransaction) { _setLockStateForTransaction(opCtx); - } - // Remember whether or not this operation is starting a transaction, in case something later in - // the execution needs to adjust its behavior based on this. - opCtx->setIsStartingMultiDocumentTransaction(startTransaction); + // Remember whether or not this operation is starting a transaction, in case something later + // in the execution needs to adjust its behavior based on this. + opCtx->setIsStartingMultiDocumentTransaction(true); + } // Once API params and txn state are set on opCtx, enforce the "requireApiVersion" setting. enforceRequireAPIVersion(opCtx, command); + // Check that the client has the directShardOperations role if this is a direct operation to a + // shard. + if (command->requiresAuth() && ShardingState::get(opCtx)->enabled() && + serverGlobalParams.featureCompatibility.isVersionInitialized() && + feature_flags::gCheckForDirectShardOperations.isEnabled( + serverGlobalParams.featureCompatibility)) { + bool clusterHasTwoOrMoreShards = [&]() { + auto* clusterParameters = ServerParameterSet::getClusterParameterSet(); + auto* clusterCardinalityParam = + clusterParameters->get>( + "shardedClusterCardinalityForDirectConns"); + return clusterCardinalityParam->getValue(boost::none).getHasTwoOrMoreShards(); + }(); + if (clusterHasTwoOrMoreShards && !command->shouldSkipDirectConnectionChecks()) { + const bool authIsEnabled = AuthorizationManager::get(opCtx->getServiceContext()) && + AuthorizationManager::get(opCtx->getServiceContext())->isAuthEnabled(); + + const bool hasDirectShardOperations = !authIsEnabled || + ((AuthorizationSession::get(opCtx->getClient()) != nullptr && + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::issueDirectShardOperations))); + + if (!hasDirectShardOperations) { + bool timeUpdated = false; + auto currentTime = opCtx->getServiceContext()->getFastClockSource()->now(); + { + stdx::lock_guard lk(_staticMutex); + if ((currentTime - _lastDirectConnectionWarningTime) > Hours(1)) { + _lastDirectConnectionWarningTime = currentTime; + timeUpdated = true; + } + } + if (timeUpdated) { + LOGV2_WARNING( + 7553700, + "Command should not be run via a direct connection to a shard without the " + "directShardOperations role. Please connect via a router.", + "command"_attr = request.getCommandName()); + } + ShardingStatistics::get(opCtx).unauthorizedDirectShardOperations.addAndFetch(1); + } + } + } + if (!opCtx->getClient()->isInDirectClient() && - readConcernArgs.getLevel() != repl::ReadConcernLevel::kAvailableReadConcern && - (iAmPrimary || (readConcernArgs.hasLevel() || readConcernArgs.getArgsAfterClusterTime()))) { - // If a timeseries collection is sharded, only the buckets collection would be sharded. We - // expect all versioned commands to be sent over 'system.buckets' namespace. But it is - // possible that a stale mongos may send the request over a view namespace. In this case, we - // initialize the 'OperationShardingState' with buckets namespace. - auto bucketNss = invocationNss.makeTimeseriesBucketsNamespace(); - // Hold reference to the catalog for collection lookup without locks to be safe. - auto catalog = CollectionCatalog::get(opCtx); - auto coll = catalog->lookupCollectionByNamespace(opCtx, bucketNss); - auto namespaceForSharding = - (coll && coll->getTimeseriesOptions()) ? bucketNss : invocationNss; + readConcernArgs.getLevel() != repl::ReadConcernLevel::kAvailableReadConcern) { boost::optional shardVersion; if (auto shardVersionElem = request.body[ShardVersion::kShardVersionField]) { shardVersion = ShardVersion::parse(shardVersionElem); @@ -1742,8 +1844,21 @@ void ExecCommandDatabase::_initiateCommand() { databaseVersion = DatabaseVersion(databaseVersionElem.Obj()); } - OperationShardingState::setShardRole( - opCtx, namespaceForSharding, shardVersion, databaseVersion); + if (shardVersion || databaseVersion) { + // If a timeseries collection is sharded, only the buckets collection would be sharded. + // We expect all versioned commands to be sent over 'system.buckets' namespace. But it + // is possible that a stale mongos may send the request over a view namespace. In this + // case, we initialize the 'OperationShardingState' with buckets namespace. + auto bucketNss = invocationNss.makeTimeseriesBucketsNamespace(); + // Hold reference to the catalog for collection lookup without locks to be safe. + auto catalog = CollectionCatalog::get(opCtx); + auto coll = catalog->lookupCollectionByNamespace(opCtx, bucketNss); + auto namespaceForSharding = + (coll && coll->getTimeseriesOptions()) ? bucketNss : invocationNss; + + OperationShardingState::setShardRole( + opCtx, namespaceForSharding, shardVersion, databaseVersion); + } } _scoped = _execContext->behaviors->scopedOperationCompletionShardingActions(opCtx); @@ -1807,8 +1922,7 @@ Future ExecCommandDatabase::_commandExec() { .onError([this](Status s) -> Future { auto opCtx = _execContext->getOpCtx(); - if (!opCtx->getClient()->isInDirectClient() && - !serverGlobalParams.clusterRole.exclusivelyHasConfigRole() && !_refreshedDatabase) { + if (!opCtx->getClient()->isInDirectClient() && !_refreshedDatabase) { auto sce = s.extraInfo(); invariant(sce); @@ -1838,9 +1952,7 @@ Future ExecCommandDatabase::_commandExec() { auto opCtx = _execContext->getOpCtx(); ShardingStatistics::get(opCtx).countStaleConfigErrors.addAndFetch(1); - if (!opCtx->getClient()->isInDirectClient() && - !serverGlobalParams.clusterRole.exclusivelyHasConfigRole() && - !_refreshedCollection) { + if (!opCtx->getClient()->isInDirectClient() && !_refreshedCollection) { if (auto sce = s.extraInfo()) { bool inCriticalSection = sce->getCriticalSectionSignal().has_value(); bool stableLocalVersion = !inCriticalSection && sce->getVersionWanted(); @@ -1885,10 +1997,6 @@ Future ExecCommandDatabase::_commandExec() { return s; }) .onError([this](Status s) -> Future { - // This exception can never happen on the config server. Config servers can't receive - // SSV either, because they never have commands with shardVersion sent. - invariant(!serverGlobalParams.clusterRole.exclusivelyHasConfigRole()); - auto opCtx = _execContext->getOpCtx(); if (!opCtx->getClient()->isInDirectClient() && !_refreshedCatalogCache) { invariant(!opCtx->lockState()->isLocked()); @@ -2094,7 +2202,7 @@ Future executeCommand(std::shared_ptr exe "Assertion while executing command '{command}' on database '{db}': {error}", "Assertion while executing command", "command"_attr = execContext->getRequest().getCommandName(), - "db"_attr = execContext->getRequest().getDatabase(), + "db"_attr = execContext->getRequest().getDatabaseNoThrow(), "error"_attr = status.toString()); }); past.emplaceValue(); @@ -2390,7 +2498,7 @@ BSONObj ServiceEntryPointCommon::getRedactedCopyForLogging(const Command* comman } void logHandleRequestFailure(const Status& status) { - LOGV2_ERROR(4879802, "Failed to handle request", "error"_attr = redact(status)); + LOGV2_INFO(4879802, "Failed to handle request", "error"_attr = redact(status)); } void onHandleRequestException(const HandleRequest& hr, const Status& status) { diff --git a/src/mongo/db/service_entry_point_common.h b/src/mongo/db/service_entry_point_common.h index a1936351181dc..f5bdc8fa170fc 100644 --- a/src/mongo/db/service_entry_point_common.h +++ b/src/mongo/db/service_entry_point_common.h @@ -29,11 +29,19 @@ #pragma once +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" #include "mongo/db/dbmessage.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" #include "mongo/util/fail_point.h" #include "mongo/util/future.h" #include "mongo/util/polymorphic_scoped.h" diff --git a/src/mongo/db/service_entry_point_mongod.cpp b/src/mongo/db/service_entry_point_mongod.cpp index a09e32be199c3..94703181e66bc 100644 --- a/src/mongo/db/service_entry_point_mongod.cpp +++ b/src/mongo/db/service_entry_point_mongod.cpp @@ -27,33 +27,64 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - #include "mongo/db/service_entry_point_mongod.h" +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/commands.h" #include "mongo/db/commands/fsync_locked.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/concurrency/locker_impl.h" #include "mongo/db/curop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/read_concern.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/speculative_majority_read_info.h" -#include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/resharding/resharding_metrics_helpers.h" #include "mongo/db/s/scoped_operation_completion_sharding_actions.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" -#include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_entry_point_common.h" +#include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/write_concern.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" #include "mongo/s/shard_cannot_refresh_due_to_locks_held_exception.h" +#include "mongo/s/shard_version.h" #include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/polymorphic_scoped.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - namespace mongo { class ServiceEntryPointMongod::Hooks final : public ServiceEntryPointCommon::Hooks { diff --git a/src/mongo/db/service_entry_point_mongod.h b/src/mongo/db/service_entry_point_mongod.h index a4fc691d4086e..a59fae99f1f54 100644 --- a/src/mongo/db/service_entry_point_mongod.h +++ b/src/mongo/db/service_entry_point_mongod.h @@ -29,7 +29,11 @@ #pragma once +#include "mongo/db/client.h" +#include "mongo/db/dbmessage.h" +#include "mongo/rpc/message.h" #include "mongo/transport/service_entry_point_impl.h" +#include "mongo/util/future.h" namespace mongo { diff --git a/src/mongo/db/service_liaison.cpp b/src/mongo/db/service_liaison.cpp index 8f370ccd1bc41..f9fbd2b7464d2 100644 --- a/src/mongo/db/service_liaison.cpp +++ b/src/mongo/db/service_liaison.cpp @@ -27,12 +27,8 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/service_liaison.h" -#include "mongo/db/service_context.h" - namespace mongo { ServiceLiaison::~ServiceLiaison() = default; diff --git a/src/mongo/db/service_liaison.h b/src/mongo/db/service_liaison.h index 79e6b192da2eb..a976da2464299 100644 --- a/src/mongo/db/service_liaison.h +++ b/src/mongo/db/service_liaison.h @@ -30,7 +30,11 @@ #pragma once #include +#include +#include "mongo/base/status.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/session_killer.h" #include "mongo/util/periodic_runner.h" diff --git a/src/mongo/db/service_liaison_mock.cpp b/src/mongo/db/service_liaison_mock.cpp index f6c36f9eb51d4..8a28a27fbaf19 100644 --- a/src/mongo/db/service_liaison_mock.cpp +++ b/src/mongo/db/service_liaison_mock.cpp @@ -27,10 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include #include +#include + +#include +#include #include "mongo/db/service_liaison_mock.h" #include "mongo/util/periodic_runner_factory.h" diff --git a/src/mongo/db/service_liaison_mock.h b/src/mongo/db/service_liaison_mock.h index 76af1a8ebf036..93669a4b84e3c 100644 --- a/src/mongo/db/service_liaison_mock.h +++ b/src/mongo/db/service_liaison_mock.h @@ -29,12 +29,25 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/crypto/hash_block.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/db/service_liaison.h" +#include "mongo/db/session/kill_sessions_gen.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/session_killer.h" #include "mongo/executor/async_timer_mock.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/duration.h" #include "mongo/util/periodic_runner.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/service_liaison_mongod.cpp b/src/mongo/db/service_liaison_mongod.cpp index b1b956c48e3eb..131a81a9d0309 100644 --- a/src/mongo/db/service_liaison_mongod.cpp +++ b/src/mongo/db/service_liaison_mongod.cpp @@ -28,15 +28,19 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/service_liaison_mongod.h" +#include +#include +#include #include "mongo/db/client.h" #include "mongo/db/cursor_manager.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" -#include "mongo/platform/mutex.h" +#include "mongo/db/service_liaison_mongod.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/clock_source.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/db/service_liaison_mongod.h b/src/mongo/db/service_liaison_mongod.h index 037cae74308fc..524e9cbdb9350 100644 --- a/src/mongo/db/service_liaison_mongod.h +++ b/src/mongo/db/service_liaison_mongod.h @@ -29,9 +29,18 @@ #pragma once +#include +#include + +#include + +#include "mongo/base/status.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_liaison.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/session_killer.h" +#include "mongo/platform/mutex.h" #include "mongo/util/hierarchical_acquisition.h" #include "mongo/util/periodic_runner.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/service_liaison_mongos.cpp b/src/mongo/db/service_liaison_mongos.cpp index 979ab179779c7..0e833baacebb1 100644 --- a/src/mongo/db/service_liaison_mongos.cpp +++ b/src/mongo/db/service_liaison_mongos.cpp @@ -28,14 +28,19 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/service_liaison_mongos.h" +#include +#include +#include +#include "mongo/db/client.h" #include "mongo/db/service_context.h" -#include "mongo/platform/mutex.h" +#include "mongo/db/service_liaison_mongos.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/clock_source.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/db/service_liaison_mongos.h b/src/mongo/db/service_liaison_mongos.h index 97281bcf4d0a9..ba090021dbd93 100644 --- a/src/mongo/db/service_liaison_mongos.h +++ b/src/mongo/db/service_liaison_mongos.h @@ -29,9 +29,18 @@ #pragma once +#include +#include + +#include + +#include "mongo/base/status.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_liaison.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/session_killer.h" +#include "mongo/platform/mutex.h" #include "mongo/util/hierarchical_acquisition.h" #include "mongo/util/periodic_runner.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/session/SConscript b/src/mongo/db/session/SConscript index 32ea42a2691af..c019be29d5ff7 100644 --- a/src/mongo/db/session/SConscript +++ b/src/mongo/db/session/SConscript @@ -122,7 +122,6 @@ env.Library( env.Library( target='logical_session_cache_impl', source=[ - 'initialize_operation_session_info.cpp', 'logical_session_cache_impl.cpp', ], LIBDEPS=[ @@ -175,9 +174,14 @@ env.Library( env.Library( target='session_catalog_mongod', source=[ + 'internal_transactions_reap_service.cpp', + 'internal_transactions_reap_service.idl', 'session_catalog_mongod.cpp', 'session_txn_record.idl', ], + LIBDEPS=[ + 'session_catalog', + ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/db/dbdirectclient', @@ -186,6 +190,7 @@ env.Library( '$BUILD_DIR/mongo/db/internal_transactions_feature_flag', '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', '$BUILD_DIR/mongo/db/repl/repl_server_parameters', + '$BUILD_DIR/mongo/db/repl/replica_set_aware_service', '$BUILD_DIR/mongo/db/repl/storage_interface', '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/service_context', @@ -194,6 +199,5 @@ env.Library( 'kill_sessions', 'logical_session_id', 'logical_session_id_helpers', - 'session_catalog', ], ) diff --git a/src/mongo/db/session/internal_session_pool.cpp b/src/mongo/db/session/internal_session_pool.cpp index 98611a0bad214..94fbba069add5 100644 --- a/src/mongo/db/session/internal_session_pool.cpp +++ b/src/mongo/db/session/internal_session_pool.cpp @@ -28,13 +28,26 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "mongo/db/service_context.h" #include "mongo/db/session/internal_session_pool.h" -#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl @@ -97,7 +110,7 @@ boost::optional InternalSessionPool::_acquireSessi } InternalSessionPool::Session InternalSessionPool::acquireSystemSession() { - const InternalSessionPool::Session session = [&] { + InternalSessionPool::Session session = [&] { stdx::lock_guard lock(_mutex); const auto& systemSession = makeSystemLogicalSessionId(); @@ -116,7 +129,7 @@ InternalSessionPool::Session InternalSessionPool::acquireSystemSession() { InternalSessionPool::Session InternalSessionPool::acquireStandaloneSession( OperationContext* opCtx) { - const InternalSessionPool::Session session = [&] { + InternalSessionPool::Session session = [&] { stdx::lock_guard lock(_mutex); const auto& userDigest = getLogicalSessionUserDigestForLoggedInUser(opCtx); @@ -136,7 +149,7 @@ InternalSessionPool::Session InternalSessionPool::acquireStandaloneSession( InternalSessionPool::Session InternalSessionPool::acquireChildSession( OperationContext* opCtx, const LogicalSessionId& parentLsid) { - const InternalSessionPool::Session session = [&] { + InternalSessionPool::Session session = [&] { stdx::lock_guard lock(_mutex); auto it = _childSessions.find(parentLsid); diff --git a/src/mongo/db/session/internal_session_pool.h b/src/mongo/db/session/internal_session_pool.h index a67cc2eb60ac5..2bfd28ca8cc8f 100644 --- a/src/mongo/db/session/internal_session_pool.h +++ b/src/mongo/db/session/internal_session_pool.h @@ -29,11 +29,25 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/crypto/sha256_block.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_map.h" - -#include +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -111,7 +125,7 @@ class InternalSessionPool { LogicalSessionIdMap _childSessions; // Map partitioning the session pool by logged in user. - stdx::unordered_map, SHA256Block::Hash> _perUserSessionPool; + stdx::unordered_map> _perUserSessionPool; // Protects the internal data structures. mutable Mutex _mutex = MONGO_MAKE_LATCH("InternalSessionPool::_mutex"); diff --git a/src/mongo/db/session/internal_session_pool_test.cpp b/src/mongo/db/session/internal_session_pool_test.cpp index b0ae0c5c1dff5..1eb26665f83f5 100644 --- a/src/mongo/db/session/internal_session_pool_test.cpp +++ b/src/mongo/db/session/internal_session_pool_test.cpp @@ -27,17 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/db/service_liaison_mock.h" #include "mongo/db/session/internal_session_pool.h" -#include "mongo/db/session/logical_session_cache.h" -#include "mongo/db/session/logical_session_cache_impl.h" #include "mongo/db/session/logical_session_id_helpers.h" -#include "mongo/db/session/sessions_collection_mock.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" namespace mongo { diff --git a/src/mongo/db/transaction/internal_transactions_reap_service.cpp b/src/mongo/db/session/internal_transactions_reap_service.cpp similarity index 90% rename from src/mongo/db/transaction/internal_transactions_reap_service.cpp rename to src/mongo/db/session/internal_transactions_reap_service.cpp index 90dc23baffe17..becc5d397e452 100644 --- a/src/mongo/db/transaction/internal_transactions_reap_service.cpp +++ b/src/mongo/db/session/internal_transactions_reap_service.cpp @@ -27,11 +27,28 @@ * it in the license file. */ -#include "mongo/db/transaction/internal_transactions_reap_service.h" +#include "mongo/db/session/internal_transactions_reap_service.h" +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/client.h" +#include "mongo/db/session/internal_transactions_reap_service_gen.h" #include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/db/transaction/internal_transactions_reap_service_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction @@ -114,10 +131,6 @@ void InternalTransactionsReapService::onShutdown() { void InternalTransactionsReapService::_reapInternalTransactions(ServiceContext* service) try { ThreadClient tc("reap-internal-transactions", service); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } auto uniqueOpCtx = tc->makeOperationContext(); auto opCtx = uniqueOpCtx.get(); opCtx->setAlwaysInterruptAtStepDownOrUp_UNSAFE(); diff --git a/src/mongo/db/transaction/internal_transactions_reap_service.h b/src/mongo/db/session/internal_transactions_reap_service.h similarity index 92% rename from src/mongo/db/transaction/internal_transactions_reap_service.h rename to src/mongo/db/session/internal_transactions_reap_service.h index 09da172f68764..2f279ce50b322 100644 --- a/src/mongo/db/transaction/internal_transactions_reap_service.h +++ b/src/mongo/db/session/internal_transactions_reap_service.h @@ -29,9 +29,22 @@ #pragma once +#include +#include +#include +#include + +#include +#include + #include "mongo/db/operation_context.h" #include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/platform/mutex.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future.h" +#include "mongo/util/hierarchical_acquisition.h" namespace mongo { diff --git a/src/mongo/db/transaction/internal_transactions_reap_service.idl b/src/mongo/db/session/internal_transactions_reap_service.idl similarity index 100% rename from src/mongo/db/transaction/internal_transactions_reap_service.idl rename to src/mongo/db/session/internal_transactions_reap_service.idl diff --git a/src/mongo/db/transaction/internal_transactions_reap_service_test.cpp b/src/mongo/db/session/internal_transactions_reap_service_test.cpp similarity index 94% rename from src/mongo/db/transaction/internal_transactions_reap_service_test.cpp rename to src/mongo/db/session/internal_transactions_reap_service_test.cpp index 46a1c1fbc262a..84c5409192d90 100644 --- a/src/mongo/db/transaction/internal_transactions_reap_service_test.cpp +++ b/src/mongo/db/session/internal_transactions_reap_service_test.cpp @@ -27,17 +27,40 @@ * it in the license file. */ +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/repl/image_collection_entry_gen.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/internal_transactions_reap_service.h" +#include "mongo/db/session/internal_transactions_reap_service_gen.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" -#include "mongo/db/transaction/internal_transactions_reap_service.h" -#include "mongo/db/transaction/internal_transactions_reap_service_gen.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/session/kill_sessions.cpp b/src/mongo/db/session/kill_sessions.cpp index 99d8fd5122a0f..9a944532759e9 100644 --- a/src/mongo/db/session/kill_sessions.cpp +++ b/src/mongo/db/session/kill_sessions.cpp @@ -27,15 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include -#include "mongo/db/session/kill_sessions.h" +#include +#include "mongo/base/error_codes.h" #include "mongo/db/api_parameters.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/user.h" #include "mongo/db/operation_context.h" -#include "mongo/db/service_context.h" +#include "mongo/db/session/kill_sessions.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/read_through_cache.h" namespace mongo { diff --git a/src/mongo/db/session/kill_sessions.h b/src/mongo/db/session/kill_sessions.h index d4b29313bf2d5..fbb2095fa1ece 100644 --- a/src/mongo/db/session/kill_sessions.h +++ b/src/mongo/db/session/kill_sessions.h @@ -29,13 +29,22 @@ #pragma once +#include +#include +#include #include +#include +#include "mongo/crypto/hash_block.h" +#include "mongo/crypto/sha256_block.h" #include "mongo/db/api_parameters.h" #include "mongo/db/auth/role_name.h" #include "mongo/db/auth/user_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/session/kill_sessions_gen.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { diff --git a/src/mongo/db/session/kill_sessions_common.cpp b/src/mongo/db/session/kill_sessions_common.cpp index db9e7b7265367..8b175cfff5b4d 100644 --- a/src/mongo/db/session/kill_sessions_common.cpp +++ b/src/mongo/db/session/kill_sessions_common.cpp @@ -28,16 +28,27 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/session/kill_sessions_common.h" +#include +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/client.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/kill_sessions_common.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_killer.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/transport/session.h" +#include "mongo/util/net/hostandport.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/db/session/kill_sessions_common.h b/src/mongo/db/session/kill_sessions_common.h index 3f0a8ddb06deb..42e720039e87f 100644 --- a/src/mongo/db/session/kill_sessions_common.h +++ b/src/mongo/db/session/kill_sessions_common.h @@ -29,15 +29,29 @@ #pragma once -#include "mongo/db/session/kill_sessions.h" - +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/operation_context.h" +#include "mongo/db/session/kill_sessions.h" +#include "mongo/db/session/kill_sessions_gen.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/session_killer.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/session/kill_sessions_local.cpp b/src/mongo/db/session/kill_sessions_local.cpp index b40367cec278e..ecf1a75c97351 100644 --- a/src/mongo/db/session/kill_sessions_local.cpp +++ b/src/mongo/db/session/kill_sessions_local.cpp @@ -28,18 +28,32 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/db/session/kill_sessions_local.h" +#include #include "mongo/db/client.h" #include "mongo/db/cursor_manager.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/kill_sessions.h" #include "mongo/db/session/kill_sessions_common.h" +#include "mongo/db/session/kill_sessions_gen.h" +#include "mongo/db/session/kill_sessions_local.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_catalog.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -192,6 +206,13 @@ void killSessionsAbortAllPreparedTransactions(OperationContext* opCtx) { void yieldLocksForPreparedTransactions(OperationContext* opCtx) { // Create a new opCtx because we need an empty locker to refresh the locks. auto newClient = opCtx->getServiceContext()->makeClient("prepared-txns-yield-locks"); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*newClient.get()); + newClient.get()->setSystemOperationUnkillableByStepdown(lk); + } + AlternativeClientRegion acr(newClient); auto newOpCtx = cc().makeOperationContext(); diff --git a/src/mongo/db/session/kill_sessions_local.h b/src/mongo/db/session/kill_sessions_local.h index 46b3a72120a82..6a112ed6d2b39 100644 --- a/src/mongo/db/session/kill_sessions_local.h +++ b/src/mongo/db/session/kill_sessions_local.h @@ -29,6 +29,8 @@ #pragma once +#include "mongo/base/error_codes.h" +#include "mongo/db/operation_context.h" #include "mongo/db/session/session_killer.h" /** diff --git a/src/mongo/db/session/logical_session_cache.cpp b/src/mongo/db/session/logical_session_cache.cpp index c56e344a833a2..75093ff6373bf 100644 --- a/src/mongo/db/session/logical_session_cache.cpp +++ b/src/mongo/db/session/logical_session_cache.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/session/logical_session_cache.h" +#include #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_cache.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/session/logical_session_cache.h b/src/mongo/db/session/logical_session_cache.h index 8150de2487c9e..aded0db66c948 100644 --- a/src/mongo/db/session/logical_session_cache.h +++ b/src/mongo/db/session/logical_session_cache.h @@ -30,12 +30,21 @@ #pragma once #include +#include +#include +#include +#include +#include #include "mongo/base/status.h" +#include "mongo/crypto/sha256_block.h" #include "mongo/db/client.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_cache_gen.h" #include "mongo/db/session/logical_session_cache_stats_gen.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/logical_session_id_helpers.h" namespace mongo { diff --git a/src/mongo/db/session/logical_session_cache_impl.cpp b/src/mongo/db/session/logical_session_cache_impl.cpp index 7bc51fd1137ac..f3e8044751432 100644 --- a/src/mongo/db/session/logical_session_cache_impl.cpp +++ b/src/mongo/db/session/logical_session_cache_impl.cpp @@ -28,21 +28,43 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/session/logical_session_cache_impl.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/kill_sessions.h" +#include "mongo/db/session/logical_session_cache_gen.h" +#include "mongo/db/session/logical_session_cache_impl.h" #include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/session_killer.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" #include "mongo/logv2/log_severity_suppressor.h" -#include "mongo/platform/atomic_word.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" #include "mongo/util/duration.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl @@ -69,14 +91,26 @@ LogicalSessionCacheImpl::LogicalSessionCacheImpl(std::unique_ptr _stats.setLastSessionsCollectionJobTimestamp(_service->now()); _stats.setLastTransactionReaperJobTimestamp(_service->now()); - if (!disableLogicalSessionCacheRefresh) { - _service->scheduleJob({"LogicalSessionCacheRefresh", - [this](Client* client) { _periodicRefresh(client); }, - Milliseconds(logicalSessionRefreshMillis)}); + // Skip initializing this background thread when using 'recoverFromOplogAsStandalone=true' as + // the server is put in read-only mode after oplog recovery. + if (repl::ReplSettings::shouldRecoverFromOplogAsStandalone()) { + return; + } - _service->scheduleJob({"LogicalSessionCacheReap", - [this](Client* client) { _periodicReap(client); }, - Milliseconds(logicalSessionRefreshMillis)}); + if (!disableLogicalSessionCacheRefresh) { + _service->scheduleJob( + {"LogicalSessionCacheRefresh", + [this](Client* client) { _periodicRefresh(client); }, + Milliseconds(logicalSessionRefreshMillis), + // TODO(SERVER-74659): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/}); + + _service->scheduleJob( + {"LogicalSessionCacheReap", + [this](Client* client) { _periodicReap(client); }, + Milliseconds(logicalSessionRefreshMillis), + // TODO(SERVER-74659): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/}); } } diff --git a/src/mongo/db/session/logical_session_cache_impl.h b/src/mongo/db/session/logical_session_cache_impl.h index c23b643588299..f8027dd4b2bf0 100644 --- a/src/mongo/db/session/logical_session_cache_impl.h +++ b/src/mongo/db/session/logical_session_cache_impl.h @@ -29,12 +29,28 @@ #pragma once +#include +#include +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/crypto/sha256_block.h" +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_liaison.h" #include "mongo/db/session/logical_session_cache.h" +#include "mongo/db/session/logical_session_cache_stats_gen.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/sessions_collection.h" +#include "mongo/platform/mutex.h" #include "mongo/util/concurrency/with_lock.h" #include "mongo/util/functional.h" #include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/session/logical_session_cache_test.cpp b/src/mongo/db/session/logical_session_cache_test.cpp index 8d9220ebfb86b..7f0fec8ae8424 100644 --- a/src/mongo/db/session/logical_session_cache_test.cpp +++ b/src/mongo/db/session/logical_session_cache_test.cpp @@ -27,30 +27,39 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/session/logical_session_cache_impl.h" - +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include #include +#include +#include +#include -#include "mongo/bson/oid.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/auth/authorization_manager.h" -#include "mongo/db/auth/authorization_session_for_test.h" -#include "mongo/db/auth/authz_manager_external_state_mock.h" -#include "mongo/db/auth/authz_session_external_state_mock.h" -#include "mongo/db/auth/user_name.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/db/service_liaison_mock.h" #include "mongo/db/session/logical_session_cache.h" +#include "mongo/db/session/logical_session_cache_gen.h" +#include "mongo/db/session/logical_session_cache_impl.h" #include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/sessions_collection.h" #include "mongo/db/session/sessions_collection_mock.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/stdx/future.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/ensure_fcv.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_name.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/session/logical_session_id.cpp b/src/mongo/db/session/logical_session_id.cpp index d5d4d76af2c6c..5b80d7e0cd89d 100644 --- a/src/mongo/db/session/logical_session_id.cpp +++ b/src/mongo/db/session/logical_session_id.cpp @@ -27,10 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/session/logical_session_id.h" +#include +#include +#include + namespace mongo { LogicalSessionId makeLogicalSessionIdForTest() { @@ -67,4 +69,19 @@ LogicalSessionRecord makeLogicalSessionRecordForTest() { return record; } +OperationSessionInfoFromClient::OperationSessionInfoFromClient( + LogicalSessionFromClient lsidFromClient) { + setSessionId(std::move(lsidFromClient)); +} + +OperationSessionInfoFromClient::OperationSessionInfoFromClient(LogicalSessionId lsid, + boost::optional txnNumber) + : OperationSessionInfoFromClient([&] { + LogicalSessionFromClient lsidFromClient(lsid.getId()); + lsidFromClient.setUid(lsid.getUid()); + return lsidFromClient; + }()) { + setTxnNumber(std::move(txnNumber)); +} + } // namespace mongo diff --git a/src/mongo/db/session/logical_session_id.h b/src/mongo/db/session/logical_session_id.h index bb7bb148669e6..4b20d7067c658 100644 --- a/src/mongo/db/session/logical_session_id.h +++ b/src/mongo/db/session/logical_session_id.h @@ -29,12 +29,29 @@ #pragma once +#include +#include #include +#include +#include +#include +#include +#include +#include +#include #include #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/crypto/hash_block.h" +#include "mongo/crypto/sha256_block.h" #include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/duration.h" #include "mongo/util/uuid.h" namespace mongo { @@ -55,10 +72,7 @@ const TxnRetryCounter kUninitializedTxnRetryCounter = -1; class BSONObjBuilder; class OperationContext; -// The constant kLocalLogicalSessionTimeoutMinutesDefault comes from the generated -// header logical_session_id_gen.h. -constexpr Minutes kLogicalSessionDefaultTimeout = - Minutes(kLocalLogicalSessionTimeoutMinutesDefault); +constexpr Minutes kLogicalSessionDefaultTimeout{Minutes(kLocalLogicalSessionTimeoutMinutesDefault)}; inline bool operator==(const LogicalSessionId& lhs, const LogicalSessionId& rhs) { return (lhs.getId() == rhs.getId()) && (lhs.getTxnNumber() == rhs.getTxnNumber()) && @@ -108,14 +122,13 @@ struct LogicalSessionIdHash { struct LogicalSessionRecordHash { std::size_t operator()(const LogicalSessionRecord& lsid) const { - return LogicalSessionIdHash{}(lsid.getId()); + return _hasher(lsid.getId()); } private: - UUID::Hash _hasher; + LogicalSessionIdHash _hasher; }; - inline std::ostream& operator<<(std::ostream& s, const LogicalSessionId& lsid) { return (s << lsid.getId() << " - " << lsid.getUid() << " - " << (lsid.getTxnNumber() ? std::to_string(*lsid.getTxnNumber()) : "") << " - " @@ -159,9 +172,10 @@ class TxnNumberAndRetryCounter { BSONObj toBSON() const { BSONObjBuilder bob; - bob.append(OperationSessionInfo::kTxnNumberFieldName, _txnNumber); + bob.append(OperationSessionInfoFromClientBase::kTxnNumberFieldName, _txnNumber); if (_txnRetryCounter) { - bob.append(OperationSessionInfo::kTxnRetryCounterFieldName, *_txnRetryCounter); + bob.append(OperationSessionInfoFromClientBase::kTxnRetryCounterFieldName, + *_txnRetryCounter); } return bob.obj(); } @@ -195,4 +209,27 @@ inline bool operator!=(const TxnNumberAndRetryCounter& l, const TxnNumberAndRetr return !(l == r); } +/** + * Represents all the session-related state that a client can attach when invoking a command against + * the server. Clients are allowed to invoke commands without attaching a session in which case the + * default-constructed value means "no logical session". However, if a client sends a session, they + * must specify at least the "lsid" field. + */ +class OperationSessionInfoFromClient : public OperationSessionInfoFromClientBase { +public: + /** + * Returns a default-constructed object meaning "there is no logical session". + */ + OperationSessionInfoFromClient() = default; + OperationSessionInfoFromClient(LogicalSessionFromClient lsidFromClient); + + // TODO (SERVER-77506): This constructor performs incomplete conversion from LogicalSessionId to + // LogicalSessionFromClient which is what this class expects. The current usages are only in + // ClusterClientCursorParams. + OperationSessionInfoFromClient(LogicalSessionId lsid, boost::optional txnNumber); + + explicit OperationSessionInfoFromClient(OperationSessionInfoFromClientBase other) + : OperationSessionInfoFromClientBase(std::move(other)) {} +}; + } // namespace mongo diff --git a/src/mongo/db/session/logical_session_id.idl b/src/mongo/db/session/logical_session_id.idl index eb875e0e2a4ce..e6ea0c167fd52 100644 --- a/src/mongo/db/session/logical_session_id.idl +++ b/src/mongo/db/session/logical_session_id.idl @@ -26,9 +26,8 @@ # it in the license file. # -# This IDL file describes the BSON format for a LogicalSessionId, and -# handles the serialization to and deserialization from its BSON representation -# for that class. +# This IDL file describes the BSON format for a LogicalSessionId, and handles the serialization to +# and deserialization from its BSON representation for that class. global: cpp_namespace: "mongo" @@ -64,6 +63,7 @@ types: deserializer: "mongo::BSONElement::_numberInt" structs: + LogicalSessionId: description: "A struct representing a LogicalSessionId" strict: true @@ -131,6 +131,7 @@ structs: optional: true stability: stable + # TODO (SERVER-77506): This class is only used for OpLog persistence and should be moved to the OpLog's IDL files OperationSessionInfo: description: "Parser for serializing session transaction metadata" strict: false @@ -157,9 +158,18 @@ structs: type: bool optional: true - OperationSessionInfoFromClient: - description: "Parser for pulling out session transaction metadata from commands" + OperationSessionInfoFromClientBase: + description: "Parser for pulling out session transaction metadata from commands, as opposed to + the OperationSessionInfo class above, which is used for persistence. + + Any new fields added here must be added as optional:true so that the default + constructed value continues to mean \"no logical session was sent by the client\". + + Please refer to the implementing class OperationSessionInfoFromClient for more + information on its usage and contract. + " strict: false + unsafe_dangerous_disable_extra_field_duplicate_checks: true fields: lsid: type: LogicalSessionFromClient diff --git a/src/mongo/db/session/logical_session_id_helpers.cpp b/src/mongo/db/session/logical_session_id_helpers.cpp index 739c87f18bd17..d8fa796428182 100644 --- a/src/mongo/db/session/logical_session_id_helpers.cpp +++ b/src/mongo/db/session/logical_session_id_helpers.cpp @@ -27,16 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/session/logical_session_id_helpers.h" - +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/auth/user.h" #include "mongo/db/auth/user_name.h" -#include "mongo/db/feature_compatibility_version_documentation.h" +#include "mongo/db/client.h" #include "mongo/db/operation_context.h" -#include "mongo/db/session/logical_session_cache.h" +#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/read_through_cache.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -158,7 +173,8 @@ LogicalSessionId makeLogicalSessionId(const LogicalSessionFromClient& fromClient return authSession->isAuthorizedForPrivilege(priv); }) || authSession->isAuthorizedForPrivilege(Privilege( - ResourcePattern::forClusterResource(), ActionType::impersonate)) || + ResourcePattern::forClusterResource(authSession->getUserTenantId()), + ActionType::impersonate)) || getLogicalSessionUserDigestForLoggedInUser(opCtx) == fromClient.getUid()); lsid.setUid(*fromClient.getUid()); diff --git a/src/mongo/db/session/logical_session_id_helpers.h b/src/mongo/db/session/logical_session_id_helpers.h index cb140d292ed14..274ff52790367 100644 --- a/src/mongo/db/session/logical_session_id_helpers.h +++ b/src/mongo/db/session/logical_session_id_helpers.h @@ -29,11 +29,19 @@ #pragma once +#include #include #include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/crypto/sha256_block.h" #include "mongo/db/auth/privilege.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/session/logical_session_id_test.cpp b/src/mongo/db/session/logical_session_id_test.cpp index f66be64636774..deffbc2f7a2f1 100644 --- a/src/mongo/db/session/logical_session_id_test.cpp +++ b/src/mongo/db/session/logical_session_id_test.cpp @@ -27,37 +27,60 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include #include - -#include "mongo/db/session/logical_session_id.h" - +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/crypto/mechanism_scram.h" #include "mongo/crypto/sha1_block.h" #include "mongo/crypto/sha256_block.h" -#include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_manager_impl.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/authz_manager_external_state_mock.h" -#include "mongo/db/auth/authz_session_external_state_mock.h" +#include "mongo/db/auth/restriction_environment.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/sasl_options.h" #include "mongo/db/auth/user.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/initialize_operation_session_info.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/db/service_liaison_mock.h" -#include "mongo/db/session/initialize_operation_session_info.h" #include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_impl.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/sessions_collection.h" #include "mongo/db/session/sessions_collection_mock.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/rpc/op_msg.h" #include "mongo/transport/session.h" #include "mongo/transport/transport_layer_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_name.h" +#include "mongo/util/net/sockaddr.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -75,7 +98,11 @@ class LogicalSessionIdTest : public ServiceContextTest { session, std::make_unique(SockAddr(), SockAddr())); auto localManagerState = std::make_unique(); managerState = localManagerState.get(); - managerState->setAuthzVersion(AuthorizationManager::schemaVersion26Final); + { + auto opCtxHolder = makeOperationContext(); + auto* opCtx = opCtxHolder.get(); + managerState->setAuthzVersion(opCtx, AuthorizationManager::schemaVersion26Final); + } auto authzManager = std::make_unique( getServiceContext(), std::move(localManagerState)); authzManager->setAuthEnabled(true); @@ -276,9 +303,27 @@ TEST_F(LogicalSessionIdTest, GenWithoutAuthedUser) { ASSERT_THROWS(makeLogicalSessionId(_opCtx.get()), AssertionException); } +OperationSessionInfoFromClient initializeOpSessionInfoWithRequestBody( + OperationContext* opCtx, + const BSONObj& requestBody, + bool requiresAuth, + bool attachToOpCtx, + bool isReplSetMemberOrMongos) { + auto opMsgRequest = OpMsgRequestBuilder::create( + DatabaseName::createDatabaseName_forTest(boost::none, "test_unused_dbname"), + requestBody, + BSONObj()); + return initializeOperationSessionInfo( + opCtx, opMsgRequest, requiresAuth, attachToOpCtx, isReplSetMemberOrMongos); +} + TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_NoSessionIdNoTransactionNumber) { addSimpleUser(UserName("simple", "test")); - initializeOperationSessionInfo(_opCtx.get(), BSON("TestCmd" << 1), true, true, true); + initializeOpSessionInfoWithRequestBody(_opCtx.get(), + BSON("TestCmd" << 1), + true /* requiresAuth */, + true /* attachToOpCtx */, + true /* isReplSetMemberOrMongos */); ASSERT(!_opCtx->getLogicalSessionId()); ASSERT(!_opCtx->getTxnNumber()); @@ -289,12 +334,13 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SessionIdNoTransacti LogicalSessionFromClient lsid{}; lsid.setId(UUID::gen()); - initializeOperationSessionInfo(_opCtx.get(), - BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "OtherField" - << "TestField"), - true, - true, - true); + initializeOpSessionInfoWithRequestBody(_opCtx.get(), + BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() + << "OtherField" + << "TestField"), + true /* requiresAuth */, + true /* attachToOpCtx */, + true /* isReplSetMemberOrMongos */); ASSERT(_opCtx->getLogicalSessionId()); ASSERT_EQ(lsid.getId(), _opCtx->getLogicalSessionId()->getId()); @@ -304,15 +350,15 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SessionIdNoTransacti TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_MissingSessionIdWithTransactionNumber) { addSimpleUser(UserName("simple", "test")); - ASSERT_THROWS_CODE( - initializeOperationSessionInfo(_opCtx.get(), - BSON("TestCmd" << 1 << "txnNumber" << 100LL << "OtherField" - << "TestField"), - true, - true, - true), - AssertionException, - ErrorCodes::InvalidOptions); + ASSERT_THROWS_CODE(initializeOpSessionInfoWithRequestBody( + _opCtx.get(), + BSON("TestCmd" << 1 << "txnNumber" << 100LL << "OtherField" + << "TestField"), + true /* requiresAuth */, + true /* attachToOpCtx */, + true /* isReplSetMemberOrMongos */), + AssertionException, + ErrorCodes::InvalidOptions); } TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SessionIdAndTransactionNumber) { @@ -320,13 +366,13 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SessionIdAndTransact LogicalSessionFromClient lsid; lsid.setId(UUID::gen()); - initializeOperationSessionInfo(_opCtx.get(), - BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" - << 100LL << "OtherField" - << "TestField"), - true, - true, - true); + initializeOpSessionInfoWithRequestBody(_opCtx.get(), + BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() + << "txnNumber" << 100LL << "OtherField" + << "TestField"), + true /* requiresAuth */, + true /* attachToOpCtx */, + true /* isReplSetMemberOrMongos */); ASSERT(_opCtx->getLogicalSessionId()); ASSERT_EQ(lsid.getId(), _opCtx->getLogicalSessionId()->getId()); @@ -341,13 +387,13 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_IsReplSetMemberOrMon lsid.setId(UUID::gen()); ASSERT_THROWS_CODE( - initializeOperationSessionInfo(_opCtx.get(), - BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" - << 100LL << "OtherField" - << "TestField"), - true, - true, - false), + initializeOpSessionInfoWithRequestBody( + _opCtx.get(), + BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL << "OtherField" + << "TestField"), + true /* requiresAuth */, + true /* attachToOpCtx */, + false /* isReplSetMemberOrMongos */), AssertionException, ErrorCodes::IllegalOperation); } @@ -359,13 +405,13 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_IgnoresInfoIfNoCache LogicalSessionCache::set(_opCtx->getServiceContext(), nullptr); - auto sessionInfo = initializeOperationSessionInfo( + auto sessionInfo = initializeOpSessionInfoWithRequestBody( _opCtx.get(), BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL << "OtherField" << "TestField"), - true, - true, - true); + true /* requiresAuth */, + true /* attachToOpCtx */, + true /* isReplSetMemberOrMongos */); ASSERT(sessionInfo.getSessionId() == boost::none); ASSERT(sessionInfo.getTxnNumber() == boost::none); ASSERT(sessionInfo.getStartTransaction() == boost::none); @@ -377,13 +423,13 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_IgnoresInfoIfDoNotAt LogicalSessionFromClient lsid; lsid.setId(UUID::gen()); - auto sessionInfo = initializeOperationSessionInfo( + auto sessionInfo = initializeOpSessionInfoWithRequestBody( _opCtx.get(), BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL << "OtherField" << "TestField"), - true, - false, - true); + true /* requiresAuth */, + false /* attachToOpCtx */, + true /* isReplSetMemberOrMongos */); ASSERT(sessionInfo.getSessionId() == boost::none); ASSERT(sessionInfo.getTxnNumber() == boost::none); @@ -402,12 +448,12 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_VerifyUIDEvenIfDoNot auto invalidDigest = SHA256Block::computeHash({ConstDataRange("hacker")}); lsid.setUid(invalidDigest); - ASSERT_THROWS_CODE(initializeOperationSessionInfo( + ASSERT_THROWS_CODE(initializeOpSessionInfoWithRequestBody( _opCtx.get(), BSON("TestCmd" << 1 << "lsid" << lsid.toBSON() << "txnNumber" << 100LL), - true, - false, - true), + true /* requiresAuth */, + false /* attachToOpCtx */, + true /* isReplSetMemberOrMongos */), AssertionException, ErrorCodes::Unauthorized); } @@ -428,7 +474,11 @@ TEST_F(LogicalSessionIdTest, InitializeOperationSessionInfo_SendingInfoFailsInDi commandBuilder.appendElements(param); ASSERT_THROWS_CODE( - initializeOperationSessionInfo(_opCtx.get(), commandBuilder.obj(), true, true, true), + initializeOpSessionInfoWithRequestBody(_opCtx.get(), + commandBuilder.obj(), + true /* requiresAuth */, + true /* attachToOpCtx */, + true /* isReplSetMemberOrMongos */), AssertionException, 50891); } diff --git a/src/mongo/db/session/session_catalog.cpp b/src/mongo/db/session/session_catalog.cpp index 0537484c4f5e6..c17afbf56b8d2 100644 --- a/src/mongo/db/session/session_catalog.cpp +++ b/src/mongo/db/session/session_catalog.cpp @@ -28,16 +28,33 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/session/session_catalog.h" - +#include +#include +#include +#include +#include +#include #include +#include +#include + +#include +#include +#include -#include "mongo/db/server_options.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/session_catalog.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWrite @@ -52,6 +69,16 @@ const auto operationSessionDecoration = MONGO_FAIL_POINT_DEFINE(hangAfterIncrementingNumWaitingToCheckOut); +std::string provenanceToString(SessionCatalog::Provenance provenance) { + switch (provenance) { + case SessionCatalog::Provenance::kRouter: + return "router"; + case SessionCatalog::Provenance::kParticipant: + return "participant"; + } + MONGO_UNREACHABLE; +} + } // namespace SessionCatalog::~SessionCatalog() { @@ -314,10 +341,11 @@ SessionCatalog::SessionRuntimeInfo* SessionCatalog::_getOrCreateSessionRuntimeIn return sri; } -void SessionCatalog::_releaseSession(SessionRuntimeInfo* sri, - Session* session, - boost::optional killToken, - boost::optional clientTxnNumberStarted) { +void SessionCatalog::_releaseSession( + SessionRuntimeInfo* sri, + Session* session, + boost::optional killToken, + boost::optional clientTxnNumberStarted) { stdx::unique_lock ul(_mutex); // Make sure we have exactly the same session on the map and that it is still associated with an @@ -340,29 +368,34 @@ void SessionCatalog::_releaseSession(SessionRuntimeInfo* sri, std::vector eagerlyReapedSessions; if (clientTxnNumberStarted.has_value()) { + auto [txnNumber, provenance] = *clientTxnNumberStarted; + // Since the given txnNumber successfully started, we know any child sessions with older // txnNumbers can be discarded. This needed to wait until a transaction started because that // can fail, e.g. if the active transaction is prepared. + auto workerFn = _makeSessionWorkerFnForEagerReap(service, txnNumber, provenance); auto numReaped = stdx::erase_if(sri->childSessions, [&](auto&& it) { ObservableSession osession(ul, sri, &it.second); - if (it.first.getTxnNumber() && *it.first.getTxnNumber() < *clientTxnNumberStarted) { - osession.markForReap(ObservableSession::ReapMode::kExclusive); - } + workerFn(osession); - bool willReap = osession._shouldBeReaped(); + bool willReap = osession._shouldBeReaped() && + (osession._reapMode == ObservableSession::ReapMode::kExclusive); if (willReap) { eagerlyReapedSessions.push_back(std::move(it.first)); } return willReap; }); + sri->lastClientTxnNumberStarted = txnNumber; + LOGV2_DEBUG(6685200, 4, "Erased child sessions", "releasedLsid"_attr = session->getSessionId(), - "clientTxnNumber"_attr = *clientTxnNumberStarted, + "clientTxnNumber"_attr = txnNumber, "childSessionsRemaining"_attr = sri->childSessions.size(), - "numReaped"_attr = numReaped); + "numReaped"_attr = numReaped, + "provenance"_attr = provenanceToString(provenance)); } invariant(ul); @@ -373,6 +406,19 @@ void SessionCatalog::_releaseSession(SessionRuntimeInfo* sri, } } +SessionCatalog::ScanSessionsCallbackFn SessionCatalog::_defaultMakeSessionWorkerFnForEagerReap( + ServiceContext* service, TxnNumber clientTxnNumberStarted, Provenance provenance) { + return [clientTxnNumberStarted](ObservableSession& osession) { + // If a higher txnNumber has been seen for a client and started a transaction, assume any + // child sessions for lower transactions have been superseded and can be reaped. + const auto& transactionSessionId = osession.getSessionId(); + if (transactionSessionId.getTxnNumber() && + *transactionSessionId.getTxnNumber() < clientTxnNumberStarted) { + osession.markForReap(ObservableSession::ReapMode::kExclusive); + } + }; +} + Session* SessionCatalog::SessionRuntimeInfo::getSession(WithLock, const LogicalSessionId& lsid) { if (isParentSessionId(lsid)) { // We should have already compared the parent lsid when we found this SRI. @@ -393,16 +439,9 @@ SessionCatalog::KillToken ObservableSession::kill(ErrorCodes::Error reason) cons ++_sri->killsRequested; if (firstKiller && hasCurrentOperation()) { - // Interrupt the current OperationContext if its running on the transaction session - // that is being killed or if we are killing the parent transaction session. invariant(_clientLock.owns_lock()); - const auto checkedOutLsid = _sri->checkoutOpCtx->getLogicalSessionId(); - const auto lsidToKill = getSessionId(); - const bool isKillingParentSession = isParentSessionId(lsidToKill); - if (isKillingParentSession || (checkedOutLsid == lsidToKill)) { - const auto serviceContext = _sri->checkoutOpCtx->getServiceContext(); - serviceContext->killOperation(_clientLock, _sri->checkoutOpCtx, reason); - } + const auto serviceContext = _sri->checkoutOpCtx->getServiceContext(); + serviceContext->killOperation(_clientLock, _sri->checkoutOpCtx, reason); } return SessionCatalog::KillToken(getSessionId()); @@ -520,9 +559,10 @@ void OperationContextSession::checkOut(OperationContext* opCtx) { checkedOutSession.emplace(std::move(scopedCheckedOutSession)); } -void OperationContextSession::observeNewTxnNumberStarted(OperationContext* opCtx, - const LogicalSessionId& lsid, - TxnNumber txnNumber) { +void OperationContextSession::observeNewTxnNumberStarted( + OperationContext* opCtx, + const LogicalSessionId& lsid, + SessionCatalog::TxnNumberAndProvenance txnNumberAndProvenance) { auto& checkedOutSession = operationSessionDecoration(opCtx); invariant(checkedOutSession); @@ -530,7 +570,8 @@ void OperationContextSession::observeNewTxnNumberStarted(OperationContext* opCtx 4, "Observing new retryable write number started on session", "lsid"_attr = lsid, - "txnNumber"_attr = txnNumber); + "txnNumber"_attr = txnNumberAndProvenance.first, + "provenance"_attr = txnNumberAndProvenance.second); const auto& checkedOutLsid = (*checkedOutSession)->getSessionId(); if (isParentSessionId(lsid)) { @@ -540,7 +581,7 @@ void OperationContextSession::observeNewTxnNumberStarted(OperationContext* opCtx // parent. This is safe because both share the same SessionRuntimeInfo. dassert(lsid == checkedOutLsid || lsid == *getParentSessionId(checkedOutLsid)); - checkedOutSession->observeNewClientTxnNumberStarted(txnNumber); + checkedOutSession->observeNewClientTxnNumberStarted(txnNumberAndProvenance); } else if (isInternalSessionForRetryableWrite(lsid)) { // Observing a new internal transaction on a retryable session. @@ -548,7 +589,8 @@ void OperationContextSession::observeNewTxnNumberStarted(OperationContext* opCtx // directly. dassert(lsid == checkedOutLsid); - checkedOutSession->observeNewClientTxnNumberStarted(*lsid.getTxnNumber()); + checkedOutSession->observeNewClientTxnNumberStarted( + {*lsid.getTxnNumber(), txnNumberAndProvenance.second}); } } diff --git a/src/mongo/db/session/session_catalog.h b/src/mongo/db/session/session_catalog.h index 2f26798ee697b..374ce73855dc2 100644 --- a/src/mongo/db/session/session_catalog.h +++ b/src/mongo/db/session/session_catalog.h @@ -29,20 +29,34 @@ #pragma once +#include #include +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" #include "mongo/db/client.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session.h" #include "mongo/db/session/session_killer.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/functional.h" #include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -60,8 +74,20 @@ class SessionCatalog { friend class OperationContextSession; public: + /** + * Represents which role the SessionCatalog was accessed in. The participant role for actions + * from a data bearing node (e.g. mongod servicing a local command) and router for a routing + * node (e.g. a mongos command, or mongod running a mongos command). + */ + enum class Provenance { kParticipant, kRouter }; + + using TxnNumberAndProvenance = std::pair; + + using ScanSessionsCallbackFn = std::function; using OnEagerlyReapedSessionsFn = unique_function)>; + using MakeSessionWorkerFnForEagerReap = + unique_function; class ScopedCheckedOutSession; class SessionToKill; @@ -95,8 +121,6 @@ class SessionCatalog { */ SessionToKill checkOutSessionForKill(OperationContext* opCtx, KillToken killToken); - using ScanSessionsCallbackFn = std::function; - /** * Iterates through the SessionCatalog under the SessionCatalog mutex and applies 'workerFn' to * each Session which matches the specified 'lsid' or 'matcher'. Does not support reaping. @@ -142,12 +166,15 @@ class SessionCatalog { size_t size() const; /** - * Registers a callback to run when sessions are "eagerly" reaped from the catalog, ie without - * waiting for a logical session cache refresh. + * Registers two callbacks: one to run when sessions are "eagerly" reaped from the catalog, ie + * without waiting for a logical session cache refresh, and another to override the logic that + * determines when to eagerly reap a session. */ - void setOnEagerlyReapedSessionsFn(OnEagerlyReapedSessionsFn fn) { + void setEagerReapSessionsFns(OnEagerlyReapedSessionsFn onEagerlyReapedSessionsFn, + MakeSessionWorkerFnForEagerReap makeWorkerFnForEagerReap) { invariant(!_onEagerlyReapedSessionsFn); - _onEagerlyReapedSessionsFn = std::move(fn); + _onEagerlyReapedSessionsFn = std::move(onEagerlyReapedSessionsFn); + _makeSessionWorkerFnForEagerReap = std::move(makeWorkerFnForEagerReap); } private: @@ -169,6 +196,11 @@ class SessionCatalog { Session parentSession; LogicalSessionIdMap childSessions; + // The latest client txnNumber that has successfully started running on this logical + // session. This is set to kUninitializedTxnNumber initially, and is updated every time an + // opCtx that starts a new client txnNumber checks this logical session back in. + TxnNumber lastClientTxnNumberStarted = kUninitializedTxnNumber; + // Signaled when the state becomes available. Uses the transaction table's mutex to protect // the state transitions. stdx::condition_variable availableCondVar; @@ -188,6 +220,12 @@ class SessionCatalog { }; using SessionRuntimeInfoMap = LogicalSessionIdMap>; + /** + * Returns a callback with the default logic used to decide if a session may be reaped early. + */ + static ScanSessionsCallbackFn _defaultMakeSessionWorkerFnForEagerReap( + ServiceContext* service, TxnNumber clientTxnNumberStarted, Provenance provenance); + /** * Blocking method, which checks-out the session with the given 'lsid'. Called inside * '_checkOutSession' and 'checkOutSessionForKill'. @@ -220,13 +258,19 @@ class SessionCatalog { void _releaseSession(SessionRuntimeInfo* sri, Session* session, boost::optional killToken, - boost::optional clientTxnNumberStarted); + boost::optional clientTxnNumberStarted); // Called when sessions are reaped from memory "eagerly" ie directly by the SessionCatalog // without waiting for a logical session cache refresh. Note this is set at process startup // before multi-threading is enabled, so no synchronization is necessary. boost::optional _onEagerlyReapedSessionsFn; + // Returns a callback used to decide if a session may be "eagerly" reaped from the session + // catalog without waiting for typical logical session expiration. May be overwritten, but only + // at process startup before multi-threading is enabled, so no synchronization is necessary. + MakeSessionWorkerFnForEagerReap _makeSessionWorkerFnForEagerReap = + _defaultMakeSessionWorkerFnForEagerReap; + // Protects the state below mutable Mutex _mutex = MONGO_MAKE_LATCH(HierarchicalAcquisitionLevel(4), "SessionCatalog::_mutex"); @@ -253,7 +297,7 @@ class SessionCatalog::ScopedCheckedOutSession { ScopedCheckedOutSession(ScopedCheckedOutSession&& other) : _catalog(other._catalog), - _clientTxnNumberStarted(other._clientTxnNumberStarted), + _clientTxnNumberStartedAndProvenance(other._clientTxnNumberStartedAndProvenance), _sri(other._sri), _session(other._session), _killToken(std::move(other._killToken)) { @@ -267,7 +311,7 @@ class SessionCatalog::ScopedCheckedOutSession { ~ScopedCheckedOutSession() { if (_sri) { _catalog._releaseSession( - _sri, _session, std::move(_killToken), _clientTxnNumberStarted); + _sri, _session, std::move(_killToken), _clientTxnNumberStartedAndProvenance); } } @@ -291,8 +335,9 @@ class SessionCatalog::ScopedCheckedOutSession { return bool(_killToken); } - void observeNewClientTxnNumberStarted(TxnNumber txnNumber) { - _clientTxnNumberStarted = txnNumber; + void observeNewClientTxnNumberStarted( + SessionCatalog::TxnNumberAndProvenance txnNumberAndProvenance) { + _clientTxnNumberStartedAndProvenance = txnNumberAndProvenance; } private: @@ -302,8 +347,9 @@ class SessionCatalog::ScopedCheckedOutSession { // If this session began a retryable write or transaction while checked out, this is set to the // "client txnNumber" of that transaction, which is the top-level txnNumber for a retryable // write or transaction sent by a client or the txnNumber in the sessionId for a retryable - // child transaction. - boost::optional _clientTxnNumberStarted; + // child transaction, and the "provenance" of the number, ie whether the number came from the + // router or participant role. + boost::optional _clientTxnNumberStartedAndProvenance; SessionCatalog::SessionRuntimeInfo* _sri; Session* _session; @@ -364,6 +410,14 @@ class ObservableSession { return _session->_sessionId; } + /** + * The latest client txnNumber that has successfully started running on the logical session that + * this transaction session corresponds to. + */ + TxnNumber getLastClientTxnNumberStarted() const { + return _sri->lastClientTxnNumberStarted; + } + /** * Returns true if there is an operation currently running on the logical session that this * transaction session corresponds to. @@ -510,9 +564,10 @@ class OperationContextSession { * Notifies the session catalog when a new transaction/retryable write is begun on the operation * context's checked out session. */ - static void observeNewTxnNumberStarted(OperationContext* opCtx, - const LogicalSessionId& lsid, - TxnNumber txnNumber); + static void observeNewTxnNumberStarted( + OperationContext* opCtx, + const LogicalSessionId& lsid, + SessionCatalog::TxnNumberAndProvenance txnNumberAndProvenance); private: OperationContext* const _opCtx; diff --git a/src/mongo/db/session/session_catalog_mongod.cpp b/src/mongo/db/session/session_catalog_mongod.cpp index ea467cf0e67e0..bef1ddc24f7c3 100644 --- a/src/mongo/db/session/session_catalog_mongod.cpp +++ b/src/mongo/db/session/session_catalog_mongod.cpp @@ -30,29 +30,67 @@ #include "mongo/db/session/session_catalog_mongod.h" +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/index_builds_manager.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/create_indexes_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" -#include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/internal_transactions_reap_service.h" +#include "mongo/db/session/kill_sessions.h" +#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/session_killer.h" #include "mongo/db/session/session_txn_record_gen.h" #include "mongo/db/session/sessions_collection.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/transaction/transaction_participant.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/s/transaction_router.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/concurrency/admission_context.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction @@ -110,6 +148,13 @@ void killSessionTokens(OperationContext* opCtx, invariant(status); ThreadClient tc("Kill-Sessions", service); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + auto uniqueOpCtx = tc->makeOperationContext(); const auto opCtx = uniqueOpCtx.get(); const auto catalog = SessionCatalog::get(opCtx); @@ -126,9 +171,10 @@ void disallowDirectWritesUnderSession(OperationContext* opCtx) { bool isReplSet = replCoord->getReplicationMode() == repl::ReplicationCoordinator::modeReplSet; if (isReplSet) { uassert(40528, - str::stream() << "Direct writes against " - << NamespaceString::kSessionTransactionsTableNamespace - << " cannot be performed using a transaction or on a session.", + str::stream() + << "Direct writes against " + << NamespaceString::kSessionTransactionsTableNamespace.toStringForErrorMsg() + << " cannot be performed using a transaction or on a session.", !opCtx->getLogicalSessionId()); } } @@ -377,11 +423,12 @@ void createTransactionTable(OperationContext* opCtx) { // empty collection. This can happen after a failover because the collection and index // creation are recorded as separate oplog entries. } else { - uassertStatusOKWithContext(createCollectionStatus, - str::stream() - << "Failed to create the " - << NamespaceString::kSessionTransactionsTableNamespace.ns() - << " collection"); + uassertStatusOKWithContext( + createCollectionStatus, + str::stream() + << "Failed to create the " + << NamespaceString::kSessionTransactionsTableNamespace.toStringForErrorMsg() + << " collection"); } auto indexSpec = MongoDSessionCatalog::getConfigTxnPartialIndexSpec(); @@ -391,7 +438,8 @@ void createTransactionTable(OperationContext* opCtx) { uassertStatusOKWithContext( createIndexStatus, str::stream() << "Failed to create partial index for the " - << NamespaceString::kSessionTransactionsTableNamespace.ns() << " collection"); + << NamespaceString::kSessionTransactionsTableNamespace.toStringForErrorMsg() + << " collection"); } void createRetryableFindAndModifyTable(OperationContext* opCtx) { @@ -404,9 +452,10 @@ void createRetryableFindAndModifyTable(OperationContext* opCtx) { } uassertStatusOKWithContext(status, - str::stream() << "Failed to create the " - << NamespaceString::kConfigImagesNamespace.ns() - << " collection"); + str::stream() + << "Failed to create the " + << NamespaceString::kConfigImagesNamespace.toStringForErrorMsg() + << " collection"); } @@ -469,6 +518,16 @@ MongoDSessionCatalog* MongoDSessionCatalog::get(ServiceContext* service) { void MongoDSessionCatalog::set(ServiceContext* service, std::unique_ptr sessionCatalog) { getMongoDSessionCatalog(service) = std::move(sessionCatalog); + + // Set mongod specific behaviors on the SessionCatalog. + SessionCatalog::get(service)->setEagerReapSessionsFns( + InternalTransactionsReapService::onEagerlyReapedSessions, + [](ServiceContext* service, + TxnNumber clientTxnNumberStarted, + SessionCatalog::Provenance provenance) { + return MongoDSessionCatalog::get(service)->makeSessionWorkerFnForEagerReap( + clientTxnNumberStarted, provenance); + }); } BSONObj MongoDSessionCatalog::getConfigTxnPartialIndexSpec() { @@ -510,6 +569,12 @@ void MongoDSessionCatalog::onStepUp(OperationContext* opCtx) { { // Create a new opCtx because we need an empty locker to refresh the locks. auto newClient = opCtx->getServiceContext()->makeClient("restore-prepared-txn"); + + { + stdx::lock_guard lk(*newClient.get()); + newClient.get()->setSystemOperationUnkillableByStepdown(lk); + } + AlternativeClientRegion acr(newClient); for (const auto& sessionInfo : sessionsToReacquireLocks) { auto newOpCtx = cc().makeOperationContext(); @@ -548,11 +613,7 @@ void MongoDSessionCatalog::onStepUp(OperationContext* opCtx) { abortInProgressTransactions(opCtx, this, _ti.get()); createTransactionTable(opCtx); - // (Ignore FCV check): This is intentional to try creating the image_collection collection if - // the feature flag is ever enabled. - if (repl::feature_flags::gFeatureFlagRetryableFindAndModify.isEnabledAndIgnoreFCVUnsafe()) { - createRetryableFindAndModifyTable(opCtx); - } + createRetryableFindAndModifyTable(opCtx); } boost::optional MongoDSessionCatalog::getTransactionTableUUID(OperationContext* opCtx) { @@ -596,13 +657,21 @@ void MongoDSessionCatalog::observeDirectWriteToConfigTransactions(OperationConte LogicalSessionId::parse(IDLParserContext("lsid"), singleSessionDoc["_id"].Obj()); catalog->scanSession(lsid, [&, ti = _ti.get()](const ObservableSession& session) { uassert(ErrorCodes::PreparedTransactionInProgress, - str::stream() << "Cannot modify the entry for session " - << session.getSessionId().getId() + str::stream() << "Cannot modify the entry for session " << lsid.getId() << " because it is in the prepared state", !ti->isTransactionPrepared(session)); - opCtx->recoveryUnit()->registerChange( - std::make_unique(ti, session.kill())); + // Internal sessions for an old retryable write are marked as reapable as soon as a + // retryable write or transaction with a newer txnNumber starts. Therefore, when deleting + // the config.transactions doc for such internal sessions, the corresponding transaction + // sessions should not be interrupted since they are guaranteed to be performing a + // transaction or retryable write for newer txnNumber. + bool shouldRegisterKill = !isInternalSessionForRetryableWrite(lsid) || + *lsid.getTxnNumber() >= session.getLastClientTxnNumberStarted(); + if (shouldRegisterKill) { + opCtx->recoveryUnit()->registerChange(std::make_unique( + ti, session.kill(ErrorCodes::Interrupted))); + } }); } @@ -634,7 +703,7 @@ int MongoDSessionCatalog::reapSessionsOlderThan(OperationContext* opCtx, // around the fact that the logical sessions cache is not registered to listen for replication // state changes. const auto replCoord = repl::ReplicationCoordinator::get(opCtx); - if (!replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, DatabaseName::kConfig.toString())) + if (!replCoord->canAcceptWritesForDatabase_UNSAFE(opCtx, DatabaseName::kConfig)) return 0; return removeExpiredTransactionSessionsFromDisk( @@ -681,6 +750,11 @@ void MongoDSessionCatalog::checkOutUnscopedSession(OperationContext* opCtx) { _checkOutUnscopedSession(opCtx, _ti.get()); } +SessionCatalog::ScanSessionsCallbackFn MongoDSessionCatalog::makeSessionWorkerFnForEagerReap( + TxnNumber clientTxnNumberStarted, SessionCatalog::Provenance provenance) { + return _ti->makeSessionWorkerFnForEagerReap(clientTxnNumberStarted, provenance); +} + MongoDOperationContextSession::MongoDOperationContextSession( OperationContext* opCtx, MongoDSessionCatalogTransactionInterface* ti) : _operationContextSession(opCtx), _ti(ti) { diff --git a/src/mongo/db/session/session_catalog_mongod.h b/src/mongo/db/session/session_catalog_mongod.h index 910843c07447f..4ee652a276cc9 100644 --- a/src/mongo/db/session/session_catalog_mongod.h +++ b/src/mongo/db/session/session_catalog_mongod.h @@ -29,8 +29,22 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_catalog_mongod_transaction_interface.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -182,6 +196,13 @@ class MongoDSessionCatalog { OperationContextSession::CheckInReason reason); void checkOutUnscopedSession(OperationContext* opCtx); + /** + * Returns a function that should be used to determine when a session can be eagerly reaped from + * the SessionCatalog on a mongod. + */ + SessionCatalog::ScanSessionsCallbackFn makeSessionWorkerFnForEagerReap( + TxnNumber clientTxnNumberStarted, SessionCatalog::Provenance provenance); + private: std::unique_ptr _ti; }; diff --git a/src/mongo/db/session/session_catalog_mongod_test.cpp b/src/mongo/db/session/session_catalog_mongod_test.cpp index 62fbe5d02e2a4..5d6865abaafb7 100644 --- a/src/mongo/db/session/session_catalog_mongod_test.cpp +++ b/src/mongo/db/session/session_catalog_mongod_test.cpp @@ -27,16 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/session/sessions_collection.h" #include "mongo/db/session/sessions_collection_mock.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" namespace mongo { namespace { diff --git a/src/mongo/db/session/session_catalog_mongod_transaction_interface.h b/src/mongo/db/session/session_catalog_mongod_transaction_interface.h index 25baa540d0466..21f4993a8428d 100644 --- a/src/mongo/db/session/session_catalog_mongod_transaction_interface.h +++ b/src/mongo/db/session/session_catalog_mongod_transaction_interface.h @@ -30,11 +30,10 @@ #pragma once #include "mongo/db/operation_context.h" -#include "mongo/db/session/logical_session_id.h" // for TxnNumberAndRetryCounter -#include "mongo/db/session/logical_session_id_gen.h" // for OperationSessionInfo -#include "mongo/db/session/session_catalog.h" // for ObservableSession and ScanSessionsCallbackFn -#include "mongo/db/session/session_txn_record_gen.h" // for SessionTxnRecord -#include "mongo/db/transaction/transaction_participant.h" // for SessionToKill +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/session_catalog.h" +#include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/transaction/transaction_participant.h" namespace mongo { @@ -128,6 +127,13 @@ class MongoDSessionCatalogTransactionInterface { virtual ScanSessionsCallbackFn makeSessionWorkerFnForStepUp( std::vector* sessionKillTokens, std::vector* sessionsToReacquireLocks) = 0; + + /** + * Returns a function that should be used to determine when a session can be eagerly reaped from + * the SessionCatalog on a mongod. + */ + virtual ScanSessionsCallbackFn makeSessionWorkerFnForEagerReap( + TxnNumber clientTxnNumberStarted, SessionCatalog::Provenance provenance) = 0; }; } // namespace mongo diff --git a/src/mongo/db/session/session_catalog_test.cpp b/src/mongo/db/session/session_catalog_test.cpp index 3d333e4c8236c..84b15714a9107 100644 --- a/src/mongo/db/session/session_catalog_test.cpp +++ b/src/mongo/db/session/session_catalog_test.cpp @@ -28,21 +28,28 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include #include +#include +#include -#include "mongo/db/cancelable_operation_context.h" +#include "mongo/base/string_data.h" #include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/session/kill_sessions.h" #include "mongo/db/session/session_catalog.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" #include "mongo/stdx/future.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/concurrency/thread_pool.h" -#include "mongo/util/scopeguard.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -1149,7 +1156,7 @@ TEST_F(SessionCatalogTest, KillSessionWhenChildSessionIsNotCheckedOut) { runTest(parentLsid, makeLogicalSessionIdWithTxnUUIDForTest(parentLsid)); } -TEST_F(SessionCatalogTest, KillingChildSessionDoesNotInterruptParentSession) { +TEST_F(SessionCatalogTest, KillingChildSessionInterruptsParentSession) { auto runTest = [&](const LogicalSessionId& parentLsid, const LogicalSessionId& childLsid) { auto killToken = [this, &parentLsid, &childLsid] { assertCanCheckoutSession(childLsid); @@ -1160,8 +1167,9 @@ TEST_F(SessionCatalogTest, KillingChildSessionDoesNotInterruptParentSession) { auto killToken = catalog()->killSession(childLsid); - // Make sure the owning operation context is not interrupted. - opCtx->checkForInterrupt(); + // Make sure the owning operation context is interrupted. + ASSERT_THROWS_CODE( + opCtx->checkForInterrupt(), AssertionException, ErrorCodes::Interrupted); // Make sure that the checkOutForKill call will wait for the owning operation context to // check the session back in diff --git a/src/mongo/db/session/session_killer.cpp b/src/mongo/db/session/session_killer.cpp index fa3c6adda2555..16cfcf7833a3b 100644 --- a/src/mongo/db/session/session_killer.cpp +++ b/src/mongo/db/session/session_killer.cpp @@ -27,16 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/session/session_killer.h" - +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/client.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/session_killer.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/destructor_guard.h" -#include "mongo/util/scopeguard.h" namespace mongo { @@ -51,6 +61,12 @@ SessionKiller::SessionKiller(ServiceContext* sc, KillFunc killer) ThreadClient tc("SessionKiller", sc); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + stdx::unique_lock lk(_mutex); // While we're not in shutdown diff --git a/src/mongo/db/session/session_killer.h b/src/mongo/db/session/session_killer.h index 59e27dc7fc715..b73c058f52c77 100644 --- a/src/mongo/db/session/session_killer.h +++ b/src/mongo/db/session/session_killer.h @@ -29,17 +29,27 @@ #pragma once +#include #include +#include #include #include +#include #include #include #include "mongo/base/status_with.h" +#include "mongo/crypto/sha256_block.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/kill_sessions.h" +#include "mongo/db/session/kill_sessions_gen.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/stdx/unordered_set.h" #include "mongo/util/hierarchical_acquisition.h" #include "mongo/util/net/hostandport.h" @@ -88,7 +98,7 @@ class SessionKiller { private: KillAllSessionsByPatternSet _patterns; LogicalSessionIdMap _lsids; - stdx::unordered_map _uids; + stdx::unordered_map _uids; const KillAllSessionsByPattern* _killAll = nullptr; }; diff --git a/src/mongo/db/session/sessions_collection.cpp b/src/mongo/db/session/sessions_collection.cpp index 111f029288fa2..81be976a99ddd 100644 --- a/src/mongo/db/session/sessions_collection.cpp +++ b/src/mongo/db/session/sessions_collection.cpp @@ -27,22 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/session/sessions_collection.h" - +#include +#include #include -#include #include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" #include "mongo/client/dbclient_base.h" #include "mongo/db/create_indexes_gen.h" -#include "mongo/db/ops/write_ops.h" #include "mongo/db/repl/read_concern_args.h" #include "mongo/db/session/logical_session_id.h" -#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/sessions_collection.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" namespace mongo { namespace { diff --git a/src/mongo/db/session/sessions_collection.h b/src/mongo/db/session/sessions_collection.h index 34542c9a68216..8b36a3d2a6562 100644 --- a/src/mongo/db/session/sessions_collection.h +++ b/src/mongo/db/session/sessions_collection.h @@ -30,8 +30,13 @@ #pragma once #include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" namespace mongo { diff --git a/src/mongo/db/session/sessions_collection_mock.cpp b/src/mongo/db/session/sessions_collection_mock.cpp index 793d996cd2966..9d7beb89849a7 100644 --- a/src/mongo/db/session/sessions_collection_mock.cpp +++ b/src/mongo/db/session/sessions_collection_mock.cpp @@ -28,15 +28,18 @@ */ #include +#include + +#include +#include #include "mongo/db/session/sessions_collection_mock.h" -#include "mongo/platform/basic.h" namespace mongo { MockSessionsCollectionImpl::MockSessionsCollectionImpl() - : _refresh([=](const LogicalSessionRecordSet& sessions) { _refreshSessions(sessions); }), - _remove([=](const LogicalSessionIdSet& sessions) { _removeRecords(sessions); }) {} + : _refresh([=, this](const LogicalSessionRecordSet& sessions) { _refreshSessions(sessions); }), + _remove([=, this](const LogicalSessionIdSet& sessions) { _removeRecords(sessions); }) {} void MockSessionsCollectionImpl::setRefreshHook(RefreshHook hook) { _refresh = std::move(hook); @@ -47,10 +50,10 @@ void MockSessionsCollectionImpl::setRemoveHook(RemoveHook hook) { } void MockSessionsCollectionImpl::clearHooks() { - _refresh = [=](const LogicalSessionRecordSet& sessions) { + _refresh = [=, this](const LogicalSessionRecordSet& sessions) { _refreshSessions(sessions); }; - _remove = [=](const LogicalSessionIdSet& sessions) { + _remove = [=, this](const LogicalSessionIdSet& sessions) { _removeRecords(sessions); }; } diff --git a/src/mongo/db/session/sessions_collection_mock.h b/src/mongo/db/session/sessions_collection_mock.h index cafa41e05f19d..0884fcfd7aa46 100644 --- a/src/mongo/db/session/sessions_collection_mock.h +++ b/src/mongo/db/session/sessions_collection_mock.h @@ -30,8 +30,12 @@ #pragma once #include +#include +#include +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/sessions_collection.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_map.h" diff --git a/src/mongo/db/session/sessions_collection_rs.cpp b/src/mongo/db/session/sessions_collection_rs.cpp index 037af5c01e85e..ac77c29a71741 100644 --- a/src/mongo/db/session/sessions_collection_rs.cpp +++ b/src/mongo/db/session/sessions_collection_rs.cpp @@ -27,26 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/session/sessions_collection_rs.h" - -#include +#include +#include #include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" -#include "mongo/client/authenticate.h" -#include "mongo/client/connection_string.h" +#include "mongo/client/connpool.h" +#include "mongo/client/internal_auth.h" #include "mongo/client/read_preference.h" #include "mongo/client/remote_command_targeter_factory_impl.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/sessions_collection_rs.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" namespace mongo { @@ -86,7 +96,7 @@ bool SessionsCollectionRS::_isStandaloneOrPrimary(const NamespaceString& ns, auto coord = mongo::repl::ReplicationCoordinator::get(opCtx); - return coord->canAcceptWritesForDatabase(opCtx, ns.db()); + return coord->canAcceptWritesForDatabase(opCtx, ns.dbName()); } template @@ -154,7 +164,8 @@ void SessionsCollectionRS::checkSessionsCollectionExists(OperationContext* opCtx NamespaceString::kLogicalSessionsNamespace, includeBuildUUIDs, options); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << NamespaceString::kLogicalSessionsNamespace << " does not exist", + str::stream() << NamespaceString::kLogicalSessionsNamespace.toStringForErrorMsg() + << " does not exist", indexes.size() != 0u); auto index = std::find_if(indexes.begin(), indexes.end(), [](const BSONObj& index) { @@ -162,12 +173,12 @@ void SessionsCollectionRS::checkSessionsCollectionExists(OperationContext* opCtx }); uassert(ErrorCodes::IndexNotFound, - str::stream() << NamespaceString::kLogicalSessionsNamespace + str::stream() << NamespaceString::kLogicalSessionsNamespace.toStringForErrorMsg() << " does not have the required TTL index", index != indexes.end()); uassert(ErrorCodes::IndexOptionsConflict, - str::stream() << NamespaceString::kLogicalSessionsNamespace + str::stream() << NamespaceString::kLogicalSessionsNamespace.toStringForErrorMsg() << " currently has the incorrect timeout for the TTL index", index->hasField("expireAfterSeconds") && index->getField("expireAfterSeconds").Int() == diff --git a/src/mongo/db/session/sessions_collection_rs.h b/src/mongo/db/session/sessions_collection_rs.h index d88b32866c638..05a71dc9e8b3e 100644 --- a/src/mongo/db/session/sessions_collection_rs.h +++ b/src/mongo/db/session/sessions_collection_rs.h @@ -33,8 +33,10 @@ #include #include "mongo/client/connpool.h" +#include "mongo/client/dbclient_base.h" #include "mongo/client/remote_command_targeter.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/session/logical_session_id.h" #include "mongo/db/session/sessions_collection.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/session/sessions_collection_standalone.cpp b/src/mongo/db/session/sessions_collection_standalone.cpp index ef63bd4b2e0c6..b1a3394b71c18 100644 --- a/src/mongo/db/session/sessions_collection_standalone.cpp +++ b/src/mongo/db/session/sessions_collection_standalone.cpp @@ -27,13 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/session/sessions_collection_standalone.h" - +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/sessions_collection_standalone.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -60,9 +74,10 @@ void SessionsCollectionStandalone::setupSessionsCollection(OperationContext* opC BSONObj info; if (!client.runCommand(NamespaceString::kLogicalSessionsNamespace.dbName(), cmd, info)) { - uassertStatusOKWithContext(getStatusFromCommandResult(info), - str::stream() << "Failed to create " - << NamespaceString::kLogicalSessionsNamespace); + uassertStatusOKWithContext( + getStatusFromCommandResult(info), + str::stream() << "Failed to create " + << NamespaceString::kLogicalSessionsNamespace.toStringForErrorMsg()); } } } @@ -76,7 +91,8 @@ void SessionsCollectionStandalone::checkSessionsCollectionExists(OperationContex NamespaceString::kLogicalSessionsNamespace, includeBuildUUIDs, options); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << NamespaceString::kLogicalSessionsNamespace << " does not exist", + str::stream() << NamespaceString::kLogicalSessionsNamespace.toStringForErrorMsg() + << " does not exist", indexes.size() != 0u); auto index = std::find_if(indexes.begin(), indexes.end(), [](const BSONObj& index) { @@ -84,12 +100,12 @@ void SessionsCollectionStandalone::checkSessionsCollectionExists(OperationContex }); uassert(ErrorCodes::IndexNotFound, - str::stream() << NamespaceString::kLogicalSessionsNamespace + str::stream() << NamespaceString::kLogicalSessionsNamespace.toStringForErrorMsg() << " does not have the required TTL index", index != indexes.end()); uassert(ErrorCodes::IndexOptionsConflict, - str::stream() << NamespaceString::kLogicalSessionsNamespace + str::stream() << NamespaceString::kLogicalSessionsNamespace.toStringForErrorMsg() << " currently has the incorrect timeout for the TTL index", index->hasField("expireAfterSeconds") && index->getField("expireAfterSeconds").Int() == diff --git a/src/mongo/db/set_change_stream_state_coordinator.cpp b/src/mongo/db/set_change_stream_state_coordinator.cpp index 2244669dd6e55..fc4ff6790f260 100644 --- a/src/mongo/db/set_change_stream_state_coordinator.cpp +++ b/src/mongo/db/set_change_stream_state_coordinator.cpp @@ -27,15 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/set_change_stream_state_coordinator.h" - +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/change_stream_change_collection_manager.h" #include "mongo/db/change_stream_pre_images_collection_manager.h" #include "mongo/db/change_stream_state_gen.h" +#include "mongo/db/client.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/wait_for_majority_service.h" +#include "mongo/db/set_change_stream_state_coordinator.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/interruptible.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -98,7 +120,8 @@ class ChangeStreamStateCommandProcessor { auto& changeCollectionManager = ChangeStreamChangeCollectionManager::get(opCtx); changeCollectionManager.createChangeCollection(opCtx, tenantId); - ChangeStreamPreImagesCollectionManager::createPreImagesCollection(opCtx, tenantId); + ChangeStreamPreImagesCollectionManager::get(opCtx).createPreImagesCollection(opCtx, + tenantId); // Wait until the create requests are majority committed. waitForMajority(opCtx); @@ -112,7 +135,7 @@ class ChangeStreamStateCommandProcessor { auto& changeCollectionManager = ChangeStreamChangeCollectionManager::get(opCtx); changeCollectionManager.dropChangeCollection(opCtx, tenantId); - ChangeStreamPreImagesCollectionManager::dropPreImagesCollection(opCtx, tenantId); + ChangeStreamPreImagesCollectionManager::get(opCtx).dropPreImagesCollection(opCtx, tenantId); // Wait until the drop requests are majority committed. waitForMajority(opCtx); diff --git a/src/mongo/db/set_change_stream_state_coordinator.h b/src/mongo/db/set_change_stream_state_coordinator.h index 7ca1592dc808c..246fd3ac256bb 100644 --- a/src/mongo/db/set_change_stream_state_coordinator.h +++ b/src/mongo/db/set_change_stream_state_coordinator.h @@ -29,10 +29,24 @@ #pragma once -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/repl/primary_only_service.h" #include "mongo/db/repl/primary_only_service_util.h" +#include "mongo/db/service_context.h" #include "mongo/db/set_change_stream_state_coordinator_gen.h" +#include "mongo/executor/scoped_task_executor.h" +#include "mongo/platform/basic.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future.h" namespace mongo { diff --git a/src/mongo/db/shard_id.cpp b/src/mongo/db/shard_id.cpp index 39d66aa56a752..48249927f5130 100644 --- a/src/mongo/db/shard_id.cpp +++ b/src/mongo/db/shard_id.cpp @@ -29,7 +29,7 @@ #include "mongo/db/shard_id.h" -#include +#include "mongo/base/error_codes.h" namespace mongo { diff --git a/src/mongo/db/shard_id.h b/src/mongo/db/shard_id.h index 798f8e427a169..2f8803bddd174 100644 --- a/src/mongo/db/shard_id.h +++ b/src/mongo/db/shard_id.h @@ -29,9 +29,12 @@ #pragma once +#include #include #include +#include +#include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/bson/util/builder.h" diff --git a/src/mongo/db/shard_id_test.cpp b/src/mongo/db/shard_id_test.cpp index 6b8460c6ff431..558f010e5a968 100644 --- a/src/mongo/db/shard_id_test.cpp +++ b/src/mongo/db/shard_id_test.cpp @@ -29,9 +29,12 @@ #include "mongo/db/shard_id.h" +#include + #include "mongo/base/string_data.h" -#include "mongo/platform/basic.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/shard_role.cpp b/src/mongo/db/shard_role.cpp index c1df193e9ac4a..8220b0f434186 100644 --- a/src/mongo/db/shard_role.cpp +++ b/src/mongo/db/shard_role.cpp @@ -29,101 +29,153 @@ #include "mongo/db/shard_role.h" -#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: keep +#include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/catalog_helper.h" -#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_uuid_mismatch.h" -#include "mongo/db/catalog/collection_uuid_mismatch_info.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/s/collection_sharding_runtime.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/capped_snapshots.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot_helper.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" #include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding namespace mongo { -namespace { -auto getTransactionResources = OperationContext::declareDecoration< - std::unique_ptr>(); +using TransactionResources = shard_role_details::TransactionResources; -shard_role_details::TransactionResources& getOrMakeTransactionResources(OperationContext* opCtx) { - auto& readConcern = repl::ReadConcernArgs::get(opCtx); - auto& optTransactionResources = getTransactionResources(opCtx); - if (!optTransactionResources) { - optTransactionResources = - std::make_unique(readConcern); - } +namespace { - return *optTransactionResources; -} +enum class ResolutionType { kUUID, kNamespace }; struct ResolvedNamespaceOrViewAcquisitionRequest { - // Populated in the first phase of collection(s) acquisition + // Populated in the first phase of collection(s) acquisition. AcquisitionPrerequisites prerequisites; + ResolutionType resolvedBy; - // Populated optionally in the second phase of collection(s) acquisition + // Populated only for locked acquisitions in the second phase of collection(s) acquisition. std::shared_ptr dbLock; boost::optional collLock; + + // Resources for lock free reads. + struct LockFreeReadsResources { + // If this field is set, the reader will not take the ParallelBatchWriterMode lock and + // conflict with secondary batch application. + std::shared_ptr skipPBWMLock; + std::shared_ptr lockFreeReadsBlock; + std::shared_ptr globalLock; + } lockFreeReadsResources; }; using ResolvedNamespaceOrViewAcquisitionRequestsMap = std::map; +void validateResolvedCollectionByUUID(OperationContext* opCtx, + CollectionOrViewAcquisitionRequest ar, + const Collection* coll) { + invariant(ar.nssOrUUID.isUUID()); + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "Namespace " << ar.nssOrUUID.dbName().toStringForErrorMsg() << ":" + << ar.nssOrUUID.uuid() << " not found", + coll); + auto shardVersion = OperationShardingState::get(opCtx).getShardVersion(coll->ns()); + uassert(ErrorCodes::IncompatibleShardingMetadata, + str::stream() << "Collection " << ar.nssOrUUID.dbName().toStringForErrorMsg() << ":" + << ar.nssOrUUID.uuid() + << " acquired by UUID has a ShardVersion attached.", + !shardVersion || shardVersion == ShardVersion::UNSHARDED()); + uassert(ErrorCodes::NamespaceNotFound, + str::stream() << "Database name mismatch for " + << ar.nssOrUUID.dbName().toStringForErrorMsg() << ":" + << ar.nssOrUUID.uuid() + << ". Expected: " << ar.nssOrUUID.dbName().toStringForErrorMsg() + << " Actual: " << coll->ns().dbName().toStringForErrorMsg(), + coll->ns().dbName() == ar.nssOrUUID.dbName()); +} + /** - * Takes the input acquisitions, populates the NSS and UUID parts and returns a list, sorted by NSS, - * suitable for a defined lock acquisition order. + * Takes the input acquisitions, populates the NSS and returns a map sorted by NSS, suitable for + * locking them in NSS order. */ ResolvedNamespaceOrViewAcquisitionRequestsMap resolveNamespaceOrViewAcquisitionRequests( OperationContext* opCtx, + const CollectionCatalog& catalog, const std::vector& acquisitionRequests) { - auto catalog = CollectionCatalog::get(opCtx); ResolvedNamespaceOrViewAcquisitionRequestsMap sortedAcquisitionRequests; for (const auto& ar : acquisitionRequests) { - if (ar.nss) { - auto coll = catalog->lookupCollectionByNamespace(opCtx, *ar.nss); - if (ar.uuid) { - checkCollectionUUIDMismatch(opCtx, *ar.nss, coll, *ar.uuid); - } - - AcquisitionPrerequisites prerequisites( - *ar.nss, ar.uuid, ar.placementConcern, ar.operationType, ar.viewMode); + if (ar.nssOrUUID.isNamespaceString()) { + AcquisitionPrerequisites prerequisites(ar.nssOrUUID.nss(), + ar.expectedUUID, + ar.readConcern, + ar.placementConcern, + ar.operationType, + ar.viewMode); ResolvedNamespaceOrViewAcquisitionRequest resolvedAcquisitionRequest{ - prerequisites, nullptr, boost::none}; - sortedAcquisitionRequests.emplace(ResourceId(RESOURCE_COLLECTION, *ar.nss), + prerequisites, ResolutionType::kNamespace, nullptr, boost::none}; + + sortedAcquisitionRequests.emplace(ResourceId(RESOURCE_COLLECTION, ar.nssOrUUID.nss()), std::move(resolvedAcquisitionRequest)); - } else if (ar.dbname) { - invariant(ar.uuid); - auto coll = catalog->lookupCollectionByUUID(opCtx, *ar.uuid); - uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Namespace " << *ar.dbname << ":" << *ar.uuid << " not found", - coll); - uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Database name mismatch for " << *ar.dbname << ":" << *ar.uuid - << ". Expected: " << *ar.dbname - << " Actual: " << coll->ns().dbName(), - coll->ns().dbName() == *ar.dbname); - - if (ar.nss) { - checkCollectionUUIDMismatch(opCtx, *ar.nss, coll, *ar.uuid); - } + } else if (ar.nssOrUUID.isUUID()) { + auto coll = catalog.lookupCollectionByUUID(opCtx, ar.nssOrUUID.uuid()); + + validateResolvedCollectionByUUID(opCtx, ar, coll); - AcquisitionPrerequisites prerequisites( - coll->ns(), coll->uuid(), ar.placementConcern, ar.operationType, ar.viewMode); + AcquisitionPrerequisites prerequisites(coll->ns(), + coll->uuid(), + ar.readConcern, + ar.placementConcern, + ar.operationType, + ar.viewMode); ResolvedNamespaceOrViewAcquisitionRequest resolvedAcquisitionRequest{ - prerequisites, nullptr, boost::none}; + prerequisites, ResolutionType::kUUID, nullptr, boost::none}; sortedAcquisitionRequests.emplace(ResourceId(RESOURCE_COLLECTION, coll->ns()), std::move(resolvedAcquisitionRequest)); @@ -137,7 +189,8 @@ ResolvedNamespaceOrViewAcquisitionRequestsMap resolveNamespaceOrViewAcquisitionR void verifyDbAndCollection(OperationContext* opCtx, const NamespaceString& nss, - CollectionPtr& coll) { + CollectionPtr& coll, + AcquisitionPrerequisites::OperationType operationType) { invariant(coll); // In most cases we expect modifications for system.views to upgrade MODE_IX to MODE_X @@ -149,22 +202,26 @@ void verifyDbAndCollection(OperationContext* opCtx, "Modifications to system.views must take an exclusive lock", !nss.isSystemDotViews() || opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X)); - // If we are in a transaction, we cannot yield and wait when there are pending catalog changes. - // Instead, we must return an error in such situations. We ignore this restriction for the - // oplog, since it never has pending catalog changes. - if (opCtx->inMultiDocumentTransaction() && nss != NamespaceString::kRsOplogNamespace) { - if (auto minSnapshot = coll->getMinimumVisibleSnapshot()) { - auto mySnapshot = - opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx).get_value_or( - opCtx->recoveryUnit()->getCatalogConflictingTimestamp()); - - uassert( - ErrorCodes::SnapshotUnavailable, - str::stream() << "Unable to read from a snapshot due to pending collection catalog " - "changes; please retry the operation. Snapshot timestamp is " - << mySnapshot.toString() << ". Collection minimum is " - << minSnapshot->toString(), - mySnapshot.isNull() || mySnapshot >= minSnapshot.value()); + // Verify that we are using the latest instance if we intend to perform writes. + if (operationType == AcquisitionPrerequisites::OperationType::kWrite) { + auto latest = CollectionCatalog::latest(opCtx); + if (!latest->isLatestCollection(opCtx, coll.get())) { + throwWriteConflictException(str::stream() << "Unable to write to collection '" + << coll->ns().toStringForErrorMsg() + << "' due to catalog changes; please " + "retry the operation"); + } + if (opCtx->recoveryUnit()->isActive()) { + const auto mySnapshot = opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx); + if (mySnapshot && *mySnapshot < coll->getMinimumValidSnapshot()) { + throwWriteConflictException(str::stream() + << "Unable to write to collection '" + << coll->ns().toStringForErrorMsg() + << "' due to snapshot timestamp " << *mySnapshot + << " being older than collection minimum " + << *coll->getMinimumValidSnapshot() + << "; please retry the operation"); + } } } } @@ -174,7 +231,7 @@ void checkPlacementVersion(OperationContext* opCtx, const PlacementConcern& placementConcern) { const auto& receivedDbVersion = placementConcern.dbVersion; if (receivedDbVersion) { - DatabaseShardingState::assertMatchingDbVersion(opCtx, nss.db(), *receivedDbVersion); + DatabaseShardingState::assertMatchingDbVersion(opCtx, nss.dbName(), *receivedDbVersion); } const auto& receivedShardVersion = placementConcern.shardVersion; @@ -185,25 +242,34 @@ void checkPlacementVersion(OperationContext* opCtx, } std::variant> acquireLocalCollectionOrView( - OperationContext* opCtx, const AcquisitionPrerequisites& prerequisites) { + OperationContext* opCtx, + const CollectionCatalog& catalog, + const AcquisitionPrerequisites& prerequisites) { const auto& nss = prerequisites.nss; - const auto catalog = CollectionCatalog::get(opCtx); + auto readTimestamp = opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx); + auto coll = CollectionPtr( + catalog.establishConsistentCollection(opCtx, NamespaceStringOrUUID(nss), readTimestamp)); + checkCollectionUUIDMismatch(opCtx, catalog, nss, coll, prerequisites.uuid); + + if (coll) { + verifyDbAndCollection(opCtx, nss, coll, prerequisites.operationType); + + // Ban snapshot reads on capped collections. + const auto readConcernLevel = prerequisites.readConcern.getLevel(); + uassert(ErrorCodes::SnapshotUnavailable, + "Reading from capped collections with readConcern snapshot is not supported", + !coll->isCapped() || + readConcernLevel != repl::ReadConcernLevel::kSnapshotReadConcern); - if (auto coll = CollectionPtr(catalog->lookupCollectionByNamespace(opCtx, nss))) { - verifyDbAndCollection(opCtx, nss, coll); - checkCollectionUUIDMismatch(opCtx, nss, coll, prerequisites.uuid); return coll; - } else if (auto view = catalog->lookupView(opCtx, nss)) { - checkCollectionUUIDMismatch(opCtx, nss, coll, prerequisites.uuid); + } else if (auto view = catalog.lookupView(opCtx, nss)) { uassert(ErrorCodes::CommandNotSupportedOnView, - str::stream() << "Namespace " << nss << " is a view, not a collection", + str::stream() << "Namespace " << nss.toStringForErrorMsg() + << " is a view, not a collection", prerequisites.viewMode == AcquisitionPrerequisites::kCanBeView); return view; } else { - uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Namespace " << nss << " does not exist", - !prerequisites.uuid); return CollectionPtr(); } } @@ -215,20 +281,18 @@ struct SnapshotedServices { }; SnapshotedServices acquireServicesSnapshot(OperationContext* opCtx, + const CollectionCatalog& catalog, const AcquisitionPrerequisites& prerequisites) { if (stdx::holds_alternative( prerequisites.placementConcern)) { return SnapshotedServices{ - acquireLocalCollectionOrView(opCtx, prerequisites), boost::none, boost::none}; + acquireLocalCollectionOrView(opCtx, catalog, prerequisites), boost::none, boost::none}; } - const auto& nss = prerequisites.nss; const auto& placementConcern = stdx::get(prerequisites.placementConcern); - // Check placement version before acquiring the catalog snapshot - checkPlacementVersion(opCtx, nss, placementConcern); - - auto collOrView = acquireLocalCollectionOrView(opCtx, prerequisites); + auto collOrView = acquireLocalCollectionOrView(opCtx, catalog, prerequisites); + const auto& nss = prerequisites.nss; const bool isPlacementConcernVersioned = placementConcern.dbVersion || placementConcern.shardVersion; @@ -247,28 +311,31 @@ SnapshotedServices acquireServicesSnapshot(OperationContext* opCtx, *placementConcern.shardVersion)) : boost::none; - // Recheck the placement version after having acquired the catalog snapshot. If the placement - // version still matches, then the catalog we snapshoted is consistent with the placement - // concern too. - checkPlacementVersion(opCtx, nss, placementConcern); + // TODO: This will be removed when we no longer snapshot sharding state on CollectionPtr. + if (std::holds_alternative(collOrView) && collectionDescription.isSharded()) { + std::get(collOrView) + .setShardKeyPattern(collectionDescription.getKeyPattern()); + } return SnapshotedServices{ std::move(collOrView), std::move(collectionDescription), std::move(optOwnershipFilter)}; } -std::vector acquireResolvedCollectionsOrViewsWithoutTakingLocks( +CollectionOrViewAcquisitions acquireResolvedCollectionsOrViewsWithoutTakingLocks( OperationContext* opCtx, + const CollectionCatalog& catalog, ResolvedNamespaceOrViewAcquisitionRequestsMap sortedAcquisitionRequests) { - std::vector acquisitions; + CollectionOrViewAcquisitions acquisitions; for (auto& acquisitionRequest : sortedAcquisitionRequests) { - tassert(7328900, - "Cannot acquire for write without locks", - acquisitionRequest.second.prerequisites.operationType == - AcquisitionPrerequisites::kRead || - acquisitionRequest.second.collLock); - auto& prerequisites = acquisitionRequest.second.prerequisites; - auto snapshotedServices = acquireServicesSnapshot(opCtx, prerequisites); + auto& txnResources = TransactionResources::get(opCtx); + + invariant(txnResources.state != shard_role_details::TransactionResources::State::YIELDED, + "Cannot make a new acquisition in the YIELDED state"); + invariant(txnResources.state != shard_role_details::TransactionResources::State::FAILED, + "Cannot make a new acquisition in the FAILED state"); + + auto snapshotedServices = acquireServicesSnapshot(opCtx, catalog, prerequisites); const bool isCollection = std::holds_alternative(snapshotedServices.collectionPtrOrView); @@ -283,118 +350,420 @@ std::vector acquireResolvedCollectionsOrViews prerequisites.uuid = collectionPtr->uuid(); } + boost::optional lockFreeReadsBlock; + if (acquisitionRequest.second.lockFreeReadsResources.lockFreeReadsBlock) { + lockFreeReadsBlock.emplace(std::move( + *acquisitionRequest.second.lockFreeReadsResources.lockFreeReadsBlock)); + } + boost::optional globalLock; + if (acquisitionRequest.second.lockFreeReadsResources.globalLock) { + globalLock.emplace( + std::move(*acquisitionRequest.second.lockFreeReadsResources.globalLock)); + } + shard_role_details::AcquiredCollection& acquiredCollection = - getOrMakeTransactionResources(opCtx).addAcquiredCollection( + txnResources.addAcquiredCollection( {prerequisites, std::move(acquisitionRequest.second.dbLock), std::move(acquisitionRequest.second.collLock), + std::move(lockFreeReadsBlock), + std::move(globalLock), std::move(snapshotedServices.collectionDescription), std::move(snapshotedServices.ownershipFilter), std::move(std::get(snapshotedServices.collectionPtrOrView))}); - ScopedCollectionAcquisition scopedAcquisition(opCtx, acquiredCollection); - acquisitions.emplace_back(std::move(scopedAcquisition)); + CollectionAcquisition acquisition(txnResources, acquiredCollection); + acquisitions.emplace(prerequisites.nss, std::move(acquisition)); } else { // It's a view. - const shard_role_details::AcquiredView& acquiredView = - getOrMakeTransactionResources(opCtx).addAcquiredView( - {prerequisites, - std::move(acquisitionRequest.second.dbLock), - std::move(acquisitionRequest.second.collLock), - std::move(std::get>( - snapshotedServices.collectionPtrOrView))}); - - ScopedViewAcquisition scopedAcquisition(opCtx, acquiredView); - acquisitions.emplace_back(std::move(scopedAcquisition)); + auto& acquiredView = txnResources.addAcquiredView( + {prerequisites, + std::move(acquisitionRequest.second.dbLock), + std::move(acquisitionRequest.second.collLock), + std::move(std::get>( + snapshotedServices.collectionPtrOrView))}); + + ViewAcquisition acquisition(txnResources, acquiredView); + acquisitions.emplace(prerequisites.nss, std::move(acquisition)); } } return acquisitions; } +/* + * Establish a capped snapshot if necessary on the provided namespace. + */ +void establishCappedSnapshotIfNeeded(OperationContext* opCtx, + const std::shared_ptr& catalog, + const NamespaceStringOrUUID& nsOrUUID) { + auto coll = catalog->lookupCollectionByNamespaceOrUUID(opCtx, nsOrUUID); + if (coll && coll->usesCappedSnapshots()) { + CappedSnapshots::get(opCtx).establish(opCtx, coll); + } +} + +bool haveAcquiredConsistentCatalogAndSnapshot(const CollectionCatalog* catalogBeforeSnapshot, + const CollectionCatalog* catalogAfterSnapshot, + long long replTermBeforeSnapshot, + long long replTermAfterSnapshot) { + return catalogBeforeSnapshot == catalogAfterSnapshot && + replTermBeforeSnapshot == replTermAfterSnapshot; +} + +std::shared_ptr getConsistentCatalogAndSnapshot( + OperationContext* opCtx, const std::vector& acquisitionRequests) { + while (true) { + shard_role_details::SnapshotAttempt snapshotAttempt(opCtx, acquisitionRequests); + snapshotAttempt.snapshotInitialState(); + snapshotAttempt.changeReadSourceForSecondaryReads(); + snapshotAttempt.openStorageSnapshot(); + if (auto catalog = snapshotAttempt.getConsistentCatalog()) { + return catalog; + } + } +} + +std::vector toNamespaceStringOrUUIDs( + const std::list& acquiredCollections) { + std::vector requests; + for (const auto& acquiredCollection : acquiredCollections) { + const auto& prerequisites = acquiredCollection.prerequisites; + requests.emplace_back(prerequisites.nss); + } + return requests; +} + +std::vector toNamespaceStringOrUUIDs( + const std::vector& acquisitionRequests) { + std::vector requests; + for (const auto& ar : acquisitionRequests) { + requests.emplace_back(ar.nssOrUUID); + } + return requests; +} + +void validateRequests(const std::vector& acquisitionRequests) { + for (const auto& ar : acquisitionRequests) { + if (ar.nssOrUUID.isNamespaceString()) { + uassert(ErrorCodes::InvalidNamespace, + str::stream() << "Namespace " << ar.nssOrUUID.nss().toStringForErrorMsg() + << "is not a valid collection name", + ar.nssOrUUID.nss().isValid()); + } else if (ar.nssOrUUID.isUUID()) { + uassert(ErrorCodes::InvalidNamespace, + str::stream() << "Invalid db name " + << ar.nssOrUUID.dbName().toStringForErrorMsg(), + NamespaceString::validDBName(ar.nssOrUUID.dbName(), + NamespaceString::DollarInDbNameBehavior::Allow)); + } else { + MONGO_UNREACHABLE; + } + } +} + +void checkShardingPlacement( + OperationContext* opCtx, + const std::vector& acquisitionRequests) { + for (const auto& ar : acquisitionRequests) { + // We only have to check placement for collections that come from a router, which + // will have the namespace set. + if (ar.nssOrUUID.isNamespaceString()) { + checkPlacementVersion(opCtx, ar.nssOrUUID.nss(), ar.placementConcern); + } + } +} + +const Lock::GlobalLockSkipOptions kLockFreeReadsGlobalLockOptions{[] { + Lock::GlobalLockSkipOptions options; + options.skipRSTLLock = true; + return options; +}()}; + +ResolvedNamespaceOrViewAcquisitionRequest::LockFreeReadsResources takeGlobalLock( + OperationContext* opCtx, + const std::vector& acquisitionRequests) { + std::shared_ptr skipPBWMLock; + if (!opCtx->isLockFreeReadsOp() && + opCtx->getServiceContext()->getStorageEngine()->supportsReadConcernSnapshot()) { + skipPBWMLock = std::make_shared( + opCtx->lockState()); + } + auto lockFreeReadsBlock = std::make_shared(opCtx); + auto globalLock = std::make_shared(opCtx, + MODE_IS, + Date_t::max(), + Lock::InterruptBehavior::kThrow, + kLockFreeReadsGlobalLockOptions); + return {skipPBWMLock, lockFreeReadsBlock, globalLock}; +} + +std::shared_ptr stashConsistentCatalog( + OperationContext* opCtx, + const std::vector& acquisitionRequests) { + auto requests = toNamespaceStringOrUUIDs(acquisitionRequests); + auto catalog = getConsistentCatalogAndSnapshot(opCtx, requests); + // Stash the catalog, it will be automatically unstashed when the snapshot is released. + CollectionCatalog::stash(opCtx, catalog); + return catalog; +} + +// TODO SERVER-77067 simplify conditions +bool supportsLockFreeRead(OperationContext* opCtx) { + // Lock-free reads are not supported: + // * in multi-document transactions. + // * under an IX lock (nested reads under IX lock holding operations). + // * if a storage txn is already open w/o the lock-free reads operation flag set. + return !storageGlobalParams.disableLockFreeReads && !opCtx->inMultiDocumentTransaction() && + !opCtx->lockState()->isWriteLocked() && + !(opCtx->recoveryUnit()->isActive() && !opCtx->isLockFreeReadsOp()); +} } // namespace CollectionOrViewAcquisitionRequest CollectionOrViewAcquisitionRequest::fromOpCtx( OperationContext* opCtx, - NamespaceString nss, + NamespaceStringOrUUID nssOrUUID, AcquisitionPrerequisites::OperationType operationType, AcquisitionPrerequisites::ViewMode viewMode) { auto& oss = OperationShardingState::get(opCtx); auto& readConcern = repl::ReadConcernArgs::get(opCtx); + // Acquisitions by uuid cannot possibly have a corresponding ShardVersion attached. + PlacementConcern placementConcern = nssOrUUID.isNamespaceString() + ? PlacementConcern{oss.getDbVersion(nssOrUUID.dbName()), + oss.getShardVersion(nssOrUUID.nss())} + : PlacementConcern{oss.getDbVersion(nssOrUUID.dbName()), {}}; + return CollectionOrViewAcquisitionRequest( - nss, - {oss.getDbVersion(nss.db()), oss.getShardVersion(nss)}, - readConcern, - operationType, - viewMode); + nssOrUUID, placementConcern, readConcern, operationType, viewMode); } CollectionAcquisitionRequest CollectionAcquisitionRequest::fromOpCtx( OperationContext* opCtx, NamespaceString nss, + AcquisitionPrerequisites::OperationType operationType, + boost::optional expectedUUID) { + auto& oss = OperationShardingState::get(opCtx); + auto& readConcern = repl::ReadConcernArgs::get(opCtx); + + return CollectionAcquisitionRequest(nss, + expectedUUID, + {oss.getDbVersion(nss.dbName()), oss.getShardVersion(nss)}, + readConcern, + operationType); +} + +CollectionAcquisitionRequest CollectionAcquisitionRequest::fromOpCtx( + OperationContext* opCtx, + NamespaceStringOrUUID nssOrUUID, AcquisitionPrerequisites::OperationType operationType) { auto& oss = OperationShardingState::get(opCtx); auto& readConcern = repl::ReadConcernArgs::get(opCtx); - return CollectionAcquisitionRequest( - nss, {oss.getDbVersion(nss.db()), oss.getShardVersion(nss)}, readConcern, operationType); + // Acquisitions by uuid cannot possibly have a corresponding ShardVersion attached. + PlacementConcern placementConcern = nssOrUUID.isNamespaceString() + ? PlacementConcern{oss.getDbVersion(nssOrUUID.dbName()), + oss.getShardVersion(nssOrUUID.nss())} + : PlacementConcern{oss.getDbVersion(nssOrUUID.dbName()), {}}; + + return CollectionAcquisitionRequest(nssOrUUID, placementConcern, readConcern, operationType); } -const UUID& ScopedCollectionAcquisition::uuid() const { +CollectionAcquisition::CollectionAcquisition( + shard_role_details::TransactionResources& txnResources, + shard_role_details::AcquiredCollection& acquiredCollection) + : _txnResources(&txnResources), _acquiredCollection(&acquiredCollection) { + _txnResources->collectionAcquisitionReferences++; + _acquiredCollection->refCount++; +} + +CollectionAcquisition::CollectionAcquisition(const CollectionAcquisition& other) + : _txnResources(other._txnResources), _acquiredCollection(other._acquiredCollection) { + _txnResources->collectionAcquisitionReferences++; + _acquiredCollection->refCount++; +} + +CollectionAcquisition::CollectionAcquisition(CollectionAcquisition&& other) + : _txnResources(other._txnResources), _acquiredCollection(other._acquiredCollection) { + other._txnResources = nullptr; + other._acquiredCollection = nullptr; +} + +CollectionAcquisition& CollectionAcquisition::operator=(const CollectionAcquisition& other) { + this->~CollectionAcquisition(); + _txnResources = other._txnResources; + _acquiredCollection = other._acquiredCollection; + _txnResources->collectionAcquisitionReferences++; + _acquiredCollection->refCount++; + return *this; +} + +CollectionAcquisition& CollectionAcquisition::operator=(CollectionAcquisition&& other) { + this->~CollectionAcquisition(); + _txnResources = other._txnResources; + other._txnResources = nullptr; + _acquiredCollection = other._acquiredCollection; + other._acquiredCollection = nullptr; + return *this; +} + +CollectionAcquisition::CollectionAcquisition(CollectionOrViewAcquisition&& other) { + invariant(other.isCollection()); + auto& acquisition = get(other._collectionOrViewAcquisition); + _txnResources = acquisition._txnResources; + acquisition._txnResources = nullptr; + _acquiredCollection = acquisition._acquiredCollection; + acquisition._acquiredCollection = nullptr; + other._collectionOrViewAcquisition = std::monostate(); +} + +CollectionAcquisition::~CollectionAcquisition() { + if (!_txnResources) { + return; + } + + auto& transactionResources = *_txnResources; + + // If the TransactionResources have failed to restore or yield we've released all the resources. + // Our reference to the acquisition is invalid and we've already removed it from the list of + // acquisitions. + if (transactionResources.state == shard_role_details::TransactionResources::State::ACTIVE) { + auto currentRefCount = --_acquiredCollection->refCount; + if (currentRefCount == 0) + transactionResources.acquiredCollections.remove_if( + [&](const shard_role_details::AcquiredCollection& txnResourceAcquiredColl) { + return &txnResourceAcquiredColl == _acquiredCollection; + }); + } + + transactionResources.collectionAcquisitionReferences--; + if (transactionResources.acquiredCollections.empty() && + transactionResources.acquiredViews.empty()) { + transactionResources.releaseAllResourcesOnCommitOrAbort(); + transactionResources.state = shard_role_details::TransactionResources::State::EMPTY; + } +} + +const NamespaceString& CollectionAcquisition::nss() const { + return _acquiredCollection->prerequisites.nss; +} + +bool CollectionAcquisition::exists() const { + return bool(_acquiredCollection->collectionPtr); +} + +UUID CollectionAcquisition::uuid() const { invariant(exists(), - str::stream() << "Collection " << nss() + str::stream() << "Collection " << nss().toStringForErrorMsg() << " doesn't exist, so its UUID cannot be obtained"); - return *_acquiredCollection.prerequisites.uuid; + return _acquiredCollection->collectionPtr->uuid(); } -const ScopedCollectionDescription& ScopedCollectionAcquisition::getShardingDescription() const { +const ScopedCollectionDescription& CollectionAcquisition::getShardingDescription() const { // The collectionDescription will only not be set if the caller as acquired the acquisition // using the kLocalCatalogOnlyWithPotentialDataLoss placement concern - invariant(_acquiredCollection.collectionDescription); - return *_acquiredCollection.collectionDescription; + invariant(_acquiredCollection->collectionDescription); + return *_acquiredCollection->collectionDescription; } -const boost::optional& ScopedCollectionAcquisition::getShardingFilter() - const { - // The collectionDescription will only not be set if the caller as acquired the acquisition +const boost::optional& CollectionAcquisition::getShardingFilter() const { + // The collectionDescription will only not be set if the caller has acquired the acquisition // using the kLocalCatalogOnlyWithPotentialDataLoss placement concern - invariant(_acquiredCollection.collectionDescription); - return _acquiredCollection.ownershipFilter; + tassert(7740800, + "Getting shard filter on non-sharded or invalid collection", + _acquiredCollection->collectionDescription && + _acquiredCollection->collectionDescription->isSharded()); + return _acquiredCollection->ownershipFilter; } -ScopedCollectionAcquisition::~ScopedCollectionAcquisition() { - if (_opCtx) { - const auto& transactionResources = getTransactionResources(_opCtx); - if (transactionResources) { - transactionResources->acquiredCollections.remove_if( - [this](const shard_role_details::AcquiredCollection& txnResourceAcquiredColl) { - return &txnResourceAcquiredColl == &(this->_acquiredCollection); - }); - } - } +const CollectionPtr& CollectionAcquisition::getCollectionPtr() const { + tassert(ErrorCodes::InternalError, + "Collection acquisition has been invalidated", + !_acquiredCollection->invalidated); + return _acquiredCollection->collectionPtr; +} + +ViewAcquisition::ViewAcquisition(shard_role_details::TransactionResources& txnResources, + const shard_role_details::AcquiredView& acquiredView) + : _txnResources(&txnResources), _acquiredView(&acquiredView) { + _txnResources->viewAcquisitionReferences++; + _acquiredView->refCount++; +} + +ViewAcquisition::ViewAcquisition(const ViewAcquisition& other) + : _txnResources(other._txnResources), _acquiredView(other._acquiredView) { + _txnResources->viewAcquisitionReferences++; + _acquiredView->refCount++; +} + +ViewAcquisition::ViewAcquisition(ViewAcquisition&& other) + : _txnResources(other._txnResources), _acquiredView(other._acquiredView) { + other._txnResources = nullptr; + other._acquiredView = nullptr; +} + +ViewAcquisition& ViewAcquisition::operator=(const ViewAcquisition& other) { + this->~ViewAcquisition(); + _txnResources = other._txnResources; + _acquiredView = other._acquiredView; + _txnResources->viewAcquisitionReferences++; + _acquiredView->refCount++; + return *this; } -ScopedViewAcquisition::~ScopedViewAcquisition() { - if (_opCtx) { - const auto& transactionResources = getTransactionResources(_opCtx); - if (transactionResources) { - transactionResources->acquiredViews.remove_if( - [this](const shard_role_details::AcquiredView& txnResourceAcquiredView) { - return &txnResourceAcquiredView == &(this->_acquiredView); +ViewAcquisition& ViewAcquisition::operator=(ViewAcquisition&& other) { + this->~ViewAcquisition(); + _txnResources = other._txnResources; + _acquiredView = other._acquiredView; + other._txnResources = nullptr; + other._acquiredView = nullptr; + return *this; +} + +ViewAcquisition::~ViewAcquisition() { + if (!_txnResources) { + return; + } + + auto& transactionResources = *_txnResources; + + // If the TransactionResources have failed to restore or yield we've released all the resources. + // Our reference to the acquisition is invalid and we've already removed it from the list of + // acquisitions. + if (transactionResources.state == shard_role_details::TransactionResources::State::ACTIVE) { + auto currentRefCount = --_acquiredView->refCount; + if (currentRefCount == 0) { + transactionResources.acquiredViews.remove_if( + [&](const shard_role_details::AcquiredView& txnResourceAcquiredView) { + return &txnResourceAcquiredView == _acquiredView; }); } } + + transactionResources.viewAcquisitionReferences--; + if (transactionResources.acquiredCollections.empty() && + transactionResources.acquiredViews.empty()) { + transactionResources.releaseAllResourcesOnCommitOrAbort(); + transactionResources.state = shard_role_details::TransactionResources::State::EMPTY; + } +} + +const NamespaceString& ViewAcquisition::nss() const { + return _acquiredView->prerequisites.nss; +} + +const ViewDefinition& ViewAcquisition::getViewDefinition() const { + invariant(_acquiredView->viewDefinition); + return *_acquiredView->viewDefinition; } -ScopedCollectionAcquisition acquireCollection(OperationContext* opCtx, - CollectionAcquisitionRequest acquisitionRequest, - LockMode mode) { - return std::get( - acquireCollectionOrView(opCtx, acquisitionRequest, mode)); +CollectionAcquisition acquireCollection(OperationContext* opCtx, + CollectionAcquisitionRequest acquisitionRequest, + LockMode mode) { + return CollectionAcquisition(acquireCollectionOrView(opCtx, acquisitionRequest, mode)); } -std::vector acquireCollections( +CollectionAcquisitions acquireCollections( OperationContext* opCtx, std::vector acquisitionRequests, LockMode mode) { @@ -407,26 +776,214 @@ std::vector acquireCollections( // Acquire the collections auto acquisitions = acquireCollectionsOrViews(opCtx, namespaceOrViewAcquisitionRequests, mode); - // Transform the acquisitions to ScopedCollectionAcquisitions - std::vector collectionAcquisitions; + // Transform the acquisitions to CollectionAcquisitions + CollectionAcquisitions collectionAcquisitions; for (auto& acquisition : acquisitions) { // It must be a collection, because that's what the acquisition request stated. - invariant(std::holds_alternative(acquisition)); - - collectionAcquisitions.emplace_back( - std::move(std::get(acquisition))); + invariant(acquisition.second.isCollection()); + collectionAcquisitions.emplace(std::move(acquisition)); } return collectionAcquisitions; } -ScopedCollectionOrViewAcquisition acquireCollectionOrView( +CollectionOrViewAcquisition acquireCollectionOrView( OperationContext* opCtx, CollectionOrViewAcquisitionRequest acquisitionRequest, LockMode mode) { auto acquisition = acquireCollectionsOrViews(opCtx, {std::move(acquisitionRequest)}, mode); invariant(acquisition.size() == 1); - return std::move(acquisition.front()); + return std::move(acquisition.begin()->second); +} + +CollectionAcquisition acquireCollectionMaybeLockFree( + OperationContext* opCtx, CollectionAcquisitionRequest acquisitionRequest) { + return CollectionAcquisition(acquireCollectionOrViewMaybeLockFree(opCtx, acquisitionRequest)); +} + +CollectionOrViewAcquisition acquireCollectionOrViewMaybeLockFree( + OperationContext* opCtx, CollectionOrViewAcquisitionRequest acquisitionRequest) { + auto acquisition = + acquireCollectionsOrViewsMaybeLockFree(opCtx, {std::move(acquisitionRequest)}); + invariant(acquisition.size() == 1); + return std::move(acquisition.begin()->second); +} + +namespace shard_role_details { +void SnapshotAttempt::snapshotInitialState() { + // The read source used can change depending on replication state, so we must fetch the repl + // state beforehand, to compare with afterwards. + _replTermBeforeSnapshot = repl::ReplicationCoordinator::get(_opCtx)->getTerm(); + + _catalogBeforeSnapshot = CollectionCatalog::get(_opCtx); +} + +void SnapshotAttempt::changeReadSourceForSecondaryReads() { + invariant(_replTermBeforeSnapshot && _catalogBeforeSnapshot); + auto catalog = *_catalogBeforeSnapshot; + + for (auto& nsOrUUID : _acquisitionRequests) { + NamespaceString nss; + try { + nss = catalog->resolveNamespaceStringOrUUID(_opCtx, nsOrUUID); + } catch (const ExceptionFor&) { + invariant(nsOrUUID.isUUID()); + + const auto readSource = _opCtx->recoveryUnit()->getTimestampReadSource(); + if (readSource == RecoveryUnit::ReadSource::kNoTimestamp || + readSource == RecoveryUnit::ReadSource::kLastApplied) { + throw; + } + } + _shouldReadAtLastApplied = SnapshotHelper::changeReadSourceIfNeeded(_opCtx, nss); + if (*_shouldReadAtLastApplied) + return; + } +} + +void SnapshotAttempt::openStorageSnapshot() { + invariant(_shouldReadAtLastApplied); + + // If the collection requires capped snapshots (i.e. it is unreplicated, capped, not the + // oplog, and not clustered), establish a capped snapshot. This must happen before opening + // the storage snapshot to ensure a reader using tailable cursors would not miss any writes. + // + // It is safe to establish the capped snapshot here, on the Collection object in the latest + // version of the catalog, even if establishConsistentCollection is eventually called to + // construct a Collection object from the durable catalog because the only way that can be + // required for a collection that uses capped snapshots (i.e. a collection that is + // unreplicated and capped) is: + // * The present read operation is reading without a timestamp (since unreplicated + // collections + // don't support timestamped reads), and + // * When opening the storage snapshot (and thus when establishing the capped snapshot), + // there + // was a DDL operation pending on the namespace or UUID requested for this read (because + // this is the only time we need to construct a Collection object from the durable + // catalog for an untimestamped read). + // + // Because DDL operations require a collection X lock, there cannot have been any ongoing + // concurrent writes to the collection while establishing the capped snapshot. This means + // that if there was a capped snapshot, it should not have contained any uncommitted writes, + // and so the _lowestUncommittedRecord must be null. + for (auto& nssOrUUID : _acquisitionRequests) { + establishCappedSnapshotIfNeeded(_opCtx, *_catalogBeforeSnapshot, nssOrUUID); + } + + // TODO SERVER-77381 call preallocateSnapshotForOplogRead() when reading from the oplog. + if (!_opCtx->recoveryUnit()->isActive()) { + _opCtx->recoveryUnit()->preallocateSnapshot(); + _openedSnapshot = true; + } +} + +std::shared_ptr SnapshotAttempt::getConsistentCatalog() { + auto catalogAfterSnapshot = CollectionCatalog::get(_opCtx); + const auto replTermAfterSnapshot = repl::ReplicationCoordinator::get(_opCtx)->getTerm(); + + if (!haveAcquiredConsistentCatalogAndSnapshot(_catalogBeforeSnapshot->get(), + catalogAfterSnapshot.get(), + *_replTermBeforeSnapshot, + replTermAfterSnapshot)) { + return nullptr; + } + _successful = true; + return catalogAfterSnapshot; +} + +SnapshotAttempt::~SnapshotAttempt() { + if (_successful) { + // We were successful, nothing to clean up. + return; + } + + if (_openedSnapshot && !_opCtx->lockState()->inAWriteUnitOfWork()) { + _opCtx->recoveryUnit()->abandonSnapshot(); + } + CurOp::get(_opCtx)->yielded(); +} + +ResolvedNamespaceOrViewAcquisitionRequestsMap generateSortedAcquisitionRequests( + OperationContext* opCtx, + const CollectionCatalog& catalog, + const std::vector& acquisitionRequests, + ResolvedNamespaceOrViewAcquisitionRequest::LockFreeReadsResources&& lockFreeReadsResources) { + ResolvedNamespaceOrViewAcquisitionRequestsMap sortedAcquisitionRequests; + + auto readTimestamp = opCtx->recoveryUnit()->getPointInTimeReadTimestamp(opCtx); + + int counter = 0; + for (const auto& ar : acquisitionRequests) { + const auto resolvedBy = + ar.nssOrUUID.isNamespaceString() ? ResolutionType::kNamespace : ResolutionType::kUUID; + auto coll = catalog.establishConsistentCollection(opCtx, ar.nssOrUUID, readTimestamp); + + if (ar.nssOrUUID.isUUID()) { + validateResolvedCollectionByUUID(opCtx, ar, coll); + } + + const auto& nss = ar.nssOrUUID.isNamespaceString() ? ar.nssOrUUID.nss() : coll->ns(); + const auto& prerequisiteUUID = + ar.nssOrUUID.isUUID() ? ar.nssOrUUID.uuid() : ar.expectedUUID; + AcquisitionPrerequisites prerequisites(nss, + prerequisiteUUID, + ar.readConcern, + ar.placementConcern, + ar.operationType, + ar.viewMode); + + ResolvedNamespaceOrViewAcquisitionRequest resolvedAcquisitionRequest{ + prerequisites, resolvedBy, nullptr, boost::none, lockFreeReadsResources}; + // We don't care about ordering in this case, use a mock ResourceId as the key. + sortedAcquisitionRequests.emplace(ResourceId(RESOURCE_COLLECTION, counter++), + std::move(resolvedAcquisitionRequest)); + } + return sortedAcquisitionRequests; +} + +CollectionOrViewAcquisitions acquireCollectionsOrViewsLockFree( + OperationContext* opCtx, std::vector acquisitionRequests) { + if (acquisitionRequests.size() == 0) { + return {}; + } + + validateRequests(acquisitionRequests); + + // We shouldn't have an open snapshot unless a previous lock-free acquisition opened and + // stashed it already. + invariant(!opCtx->recoveryUnit()->isActive() || opCtx->isLockFreeReadsOp()); + + auto lockFreeReadsResources = takeGlobalLock(opCtx, acquisitionRequests); + + // Wait for a configured amount of time after acquiring locks if the failpoint is enabled + catalog_helper::setAutoGetCollectionWaitFailpointExecute( + [&](const BSONObj& data) { sleepFor(Milliseconds(data["waitForMillis"].numberInt())); }); + + // Make sure the sharding placement is correct before opening the storage snapshot, we will + // check it again after opening it to make sure it is consistent. This is specially + // important in secondaries since they can be lagging and might not be aware of the latests + // routing changes. + checkShardingPlacement(opCtx, acquisitionRequests); + + // Open a consistent catalog snapshot if needed. + bool openSnapshot = !opCtx->recoveryUnit()->isActive(); + auto catalog = openSnapshot ? stashConsistentCatalog(opCtx, acquisitionRequests) + : CollectionCatalog::get(opCtx); + + try { + // Second sharding placement check. + checkShardingPlacement(opCtx, acquisitionRequests); + + auto sortedAcquisitionRequests = shard_role_details::generateSortedAcquisitionRequests( + opCtx, *catalog, acquisitionRequests, std::move(lockFreeReadsResources)); + return acquireResolvedCollectionsOrViewsWithoutTakingLocks( + opCtx, *catalog, std::move(sortedAcquisitionRequests)); + } catch (...) { + if (openSnapshot && !opCtx->lockState()->inAWriteUnitOfWork()) + opCtx->recoveryUnit()->abandonSnapshot(); + throw; + } } +} // namespace shard_role_details -std::vector acquireCollectionsOrViews( +CollectionOrViewAcquisitions acquireCollectionsOrViews( OperationContext* opCtx, std::vector acquisitionRequests, LockMode mode) { @@ -434,19 +991,20 @@ std::vector acquireCollectionsOrViews( return {}; } - // Optimistically populate the nss and uuid parts of the resolved acquisition requests and sort - // them + validateRequests(acquisitionRequests); + while (true) { - auto sortedAcquisitionRequests = - resolveNamespaceOrViewAcquisitionRequests(opCtx, acquisitionRequests); + // Optimistically populate the nss and uuid parts of the resolved acquisition requests and + // sort them + auto sortedAcquisitionRequests = resolveNamespaceOrViewAcquisitionRequests( + opCtx, *CollectionCatalog::get(opCtx), acquisitionRequests); // At this point, sortedAcquisitionRequests contains fully resolved (both nss and uuid) // namespace or view requests in sorted order. However, there is still no guarantee that the // nss <-> uuid mapping won't change from underneath. // - // Lock the collection locks in the sorted order and pass the resolved namespaces to - // acquireCollectionsOrViewsWithoutTakingLocks. If it throws CollectionUUIDMismatch, we - // need to start over. + // Lock the collection locks in the sorted order and recheck the UUIDS. If it fails, we need + // to start over. const auto& dbName = sortedAcquisitionRequests.begin()->second.prerequisites.nss.dbName(); Lock::DBLockSkipOptions dbLockOptions = [&]() { Lock::DBLockSkipOptions dbLockOptions; @@ -485,84 +1043,110 @@ std::vector acquireCollectionsOrViews( ar.second.collLock.emplace(opCtx, nss, mode); } - try { - return acquireResolvedCollectionsOrViewsWithoutTakingLocks( - opCtx, std::move(sortedAcquisitionRequests)); - } catch (const ExceptionFor&) { + // Wait for a configured amount of time after acquiring locks if the failpoint is + // enabled + catalog_helper::setAutoGetCollectionWaitFailpointExecute([&](const BSONObj& data) { + sleepFor(Milliseconds(data["waitForMillis"].numberInt())); + }); + + checkShardingPlacement(opCtx, acquisitionRequests); + + // Recheck UUIDs. We only do this for resolutions performed via UUID exclusively as + // otherwise we have the correct mapping between nss <-> uuid since the nss is already the + // user provided one. Note that multi-document transactions will get a WCE thrown later + // during the checks performed by verifyDbAndCollection if the collection metadata has + // changed. + bool hasOptimisticResolutionFailed = false; + for (auto& ar : sortedAcquisitionRequests) { + const auto& prerequisites = ar.second.prerequisites; + if (ar.second.resolvedBy != ResolutionType::kUUID) { + continue; + } + const auto& currentCatalog = CollectionCatalog::get(opCtx); + const auto coll = currentCatalog->lookupCollectionByNamespace(opCtx, prerequisites.nss); + if (prerequisites.uuid && (!coll || coll->uuid() != prerequisites.uuid)) { + hasOptimisticResolutionFailed = true; + break; + } + } + + if (MONGO_unlikely(hasOptimisticResolutionFailed)) { + // Retry optimistic resolution. continue; } - } -} -std::vector acquireCollectionsOrViewsWithoutTakingLocks( - OperationContext* opCtx, - std::initializer_list acquisitionRequests) { - while (true) { - auto sortedAcquisitionRequests = - resolveNamespaceOrViewAcquisitionRequests(opCtx, acquisitionRequests); + // Open a consistent catalog snapshot if needed. + bool openSnapshot = !opCtx->recoveryUnit()->isActive(); + auto catalog = openSnapshot ? stashConsistentCatalog(opCtx, acquisitionRequests) + : CollectionCatalog::get(opCtx); try { return acquireResolvedCollectionsOrViewsWithoutTakingLocks( - opCtx, std::move(sortedAcquisitionRequests)); - } catch (const ExceptionFor&) { - continue; + opCtx, *catalog, std::move(sortedAcquisitionRequests)); + } catch (...) { + if (openSnapshot && !opCtx->lockState()->inAWriteUnitOfWork()) + opCtx->recoveryUnit()->abandonSnapshot(); + throw; } } } -ScopedCollectionAcquisition acquireCollectionForLocalCatalogOnlyWithPotentialDataLoss( +CollectionOrViewAcquisitions acquireCollectionsOrViewsMaybeLockFree( + OperationContext* opCtx, std::vector acquisitionRequests) { + const bool allAcquisitionsForRead = + std::all_of(acquisitionRequests.begin(), acquisitionRequests.end(), [](const auto& ar) { + return ar.operationType == AcquisitionPrerequisites::kRead; + }); + tassert(7740500, "Cannot acquire for write without locks", allAcquisitionsForRead); + + if (supportsLockFreeRead(opCtx)) { + return shard_role_details::acquireCollectionsOrViewsLockFree( + opCtx, std::move(acquisitionRequests)); + } else { + const auto lockMode = opCtx->inMultiDocumentTransaction() ? MODE_IX : MODE_IS; + return acquireCollectionsOrViews(opCtx, std::move(acquisitionRequests), lockMode); + } +} + +CollectionAcquisition acquireCollectionForLocalCatalogOnlyWithPotentialDataLoss( OperationContext* opCtx, const NamespaceString& nss, LockMode mode) { invariant(!OperationShardingState::isComingFromRouter(opCtx)); - auto& txnResources = getOrMakeTransactionResources(opCtx); + auto& txnResources = TransactionResources::get(opCtx); txnResources.assertNoAcquiredCollections(); auto dbLock = std::make_shared( opCtx, nss.dbName(), isSharedLockMode(mode) ? MODE_IS : MODE_IX); Lock::CollectionLock collLock(opCtx, nss, mode); - auto collOrView = acquireLocalCollectionOrView( - opCtx, + const auto catalog = CollectionCatalog::get(opCtx); + auto prerequisites = AcquisitionPrerequisites(nss, boost::none, + repl::ReadConcernArgs::get(opCtx), AcquisitionPrerequisites::kLocalCatalogOnlyWithPotentialDataLoss, AcquisitionPrerequisites::OperationType::kWrite, - AcquisitionPrerequisites::ViewMode::kMustBeCollection)); + AcquisitionPrerequisites::ViewMode::kMustBeCollection); + + auto collOrView = acquireLocalCollectionOrView(opCtx, *catalog, prerequisites); invariant(std::holds_alternative(collOrView)); auto& coll = std::get(collOrView); + if (coll) + prerequisites.uuid = boost::optional(coll->uuid()); shard_role_details::AcquiredCollection& acquiredCollection = txnResources.addAcquiredCollection( - {AcquisitionPrerequisites(nss, - coll ? boost::optional(coll->uuid()) : boost::none, - AcquisitionPrerequisites::kLocalCatalogOnlyWithPotentialDataLoss, - AcquisitionPrerequisites::OperationType::kWrite, - AcquisitionPrerequisites::ViewMode::kMustBeCollection), - std::move(dbLock), - std::move(collLock), - boost::none, - boost::none, - std::move(coll)}); + {prerequisites, std::move(dbLock), std::move(collLock), std::move(coll)}); - return ScopedCollectionAcquisition(opCtx, acquiredCollection); + return CollectionAcquisition(txnResources, acquiredCollection); } ScopedLocalCatalogWriteFence::ScopedLocalCatalogWriteFence(OperationContext* opCtx, - ScopedCollectionAcquisition* acquisition) - : _opCtx(opCtx), _acquiredCollection(&acquisition->_acquiredCollection) { - // Clear the collectionPtr from the acquisition to indicate that it should not be used until the - // caller is done with the DDL modifications + CollectionAcquisition* acquisition) + : _opCtx(opCtx), _acquiredCollection(acquisition->_acquiredCollection) { + // Clear the collectionPtr from the acquisition to indicate that it should not be used until + // the caller is done with the DDL modifications _acquiredCollection->collectionPtr = CollectionPtr(); - - // OnCommit, there is nothing to do because the caller is not allowed to use the collection in - // the scope of the ScopedLocalCatalogWriteFence and the destructor will take care of updating - // the acquisition to point to the latest changed value. - opCtx->recoveryUnit()->onRollback( - [acquiredCollection = _acquiredCollection](OperationContext* opCtx) mutable { - // OnRollback, the acquired collection must be set to reference the previously - // established catalog snapshot - _updateAcquiredLocalCollection(opCtx, acquiredCollection); - }); } ScopedLocalCatalogWriteFence::~ScopedLocalCatalogWriteFence() { @@ -572,13 +1156,24 @@ ScopedLocalCatalogWriteFence::~ScopedLocalCatalogWriteFence() { void ScopedLocalCatalogWriteFence::_updateAcquiredLocalCollection( OperationContext* opCtx, shard_role_details::AcquiredCollection* acquiredCollection) { try { - auto collectionOrView = - acquireLocalCollectionOrView(opCtx, acquiredCollection->prerequisites); - invariant(std::holds_alternative(collectionOrView)); - - acquiredCollection->collectionPtr = std::move(std::get(collectionOrView)); - } catch (...) { - fassertFailedWithStatus(737661, exceptionToStatus()); + const auto catalog = CollectionCatalog::latest(opCtx); + const auto& nss = acquiredCollection->prerequisites.nss; + auto collection = + catalog->lookupCollectionByNamespace(opCtx, acquiredCollection->prerequisites.nss); + checkCollectionUUIDMismatch(opCtx, nss, collection, acquiredCollection->prerequisites.uuid); + if (!acquiredCollection->collectionPtr && collection) { + // If the uuid wasn't originally set on the prerequisites, because the collection didn't + // exist, set it now so that on restore from yield we can check we are restoring the + // same instance of the ns. + acquiredCollection->prerequisites.uuid = collection->uuid(); + } + acquiredCollection->collectionPtr = CollectionPtr(collection); + } catch (const DBException& ex) { + LOGV2_DEBUG(7653800, + 1, + "Failed to update ScopedLocalCatalogWriteFence", + "ex"_attr = redact(ex.toString())); + acquiredCollection->invalidated = true; } } @@ -587,112 +1182,185 @@ YieldedTransactionResources::~YieldedTransactionResources() { } YieldedTransactionResources::YieldedTransactionResources( - std::unique_ptr&& yieldedResources) - : _yieldedResources(std::move(yieldedResources)) {} + std::unique_ptr yieldedResources, + shard_role_details::TransactionResources::State originalState) + : _yieldedResources(std::move(yieldedResources)), _originalState(originalState) {} + +void YieldedTransactionResources::transitionTransactionResourcesToFailedState( + OperationContext* opCtx) { + if (_yieldedResources) { + _yieldedResources->releaseAllResourcesOnCommitOrAbort(); + _yieldedResources->state = shard_role_details::TransactionResources::State::FAILED; + TransactionResources::attachToOpCtx(opCtx, std::move(_yieldedResources)); + } +} YieldedTransactionResources yieldTransactionResourcesFromOperationContext(OperationContext* opCtx) { - auto& transactionResources = getTransactionResources(opCtx); - if (!transactionResources) { - return YieldedTransactionResources(); - } + auto& transactionResources = TransactionResources::get(opCtx); + invariant( + !(transactionResources.yielded || + transactionResources.state == shard_role_details::TransactionResources::State::YIELDED)); - invariant(!transactionResources->yielded); + invariant(transactionResources.state == + shard_role_details::TransactionResources::State::ACTIVE || + transactionResources.state == shard_role_details::TransactionResources::State::EMPTY); - // Yielding kLocalCatalogOnlyWithPotentialDataLoss acquisitions is not allowed. - for (auto& acquisition : transactionResources->acquiredCollections) { + for (auto& acquisition : transactionResources.acquiredCollections) { + // Yielding kLocalCatalogOnlyWithPotentialDataLoss acquisitions is not allowed. invariant( !stdx::holds_alternative( acquisition.prerequisites.placementConcern), - str::stream() << "Collection " << acquisition.prerequisites.nss + str::stream() << "Collection " << acquisition.prerequisites.nss.toStringForErrorMsg() << " acquired with special placement concern and cannot be yielded"); } // Yielding view acquisitions is not supported. tassert(7300502, "Yielding view acquisitions is forbidden", - transactionResources->acquiredViews.empty()); + transactionResources.acquiredViews.empty()); - invariant(!transactionResources->lockSnapshot); - transactionResources->lockSnapshot.emplace(); - opCtx->lockState()->saveLockStateAndUnlock(&(*transactionResources->lockSnapshot)); + Locker::LockSnapshot lockSnapshot; + opCtx->lockState()->saveLockStateAndUnlock(&lockSnapshot); + transactionResources.yielded.emplace( + TransactionResources::YieldedStateHolder{std::move(lockSnapshot)}); - transactionResources->yielded = true; + auto originalState = std::exchange(transactionResources.state, + shard_role_details::TransactionResources::State::YIELDED); - return YieldedTransactionResources(std::move(transactionResources)); + return YieldedTransactionResources(TransactionResources::detachFromOpCtx(opCtx), originalState); } -void restoreTransactionResourcesToOperationContext(OperationContext* opCtx, - YieldedTransactionResources&& yieldedResources) { - if (!yieldedResources._yieldedResources) { +void restoreTransactionResourcesToOperationContext( + OperationContext* opCtx, YieldedTransactionResources yieldedResourcesHolder) { + if (!yieldedResourcesHolder._yieldedResources) { // Nothing to restore. return; } + TransactionResources::attachToOpCtx(opCtx, std::move(yieldedResourcesHolder._yieldedResources)); + auto& transactionResources = TransactionResources::get(opCtx); + // On failure to restore, release the yielded resources. ScopeGuard scopeGuard([&] { - yieldedResources._yieldedResources->releaseAllResourcesOnCommitOrAbort(); - yieldedResources._yieldedResources.reset(); + transactionResources.releaseAllResourcesOnCommitOrAbort(); + transactionResources.state = shard_role_details::TransactionResources::State::FAILED; }); - // Reacquire locks. - if (yieldedResources._yieldedResources->lockSnapshot) { - opCtx->lockState()->restoreLockState(opCtx, - *yieldedResources._yieldedResources->lockSnapshot); - yieldedResources._yieldedResources->lockSnapshot.reset(); - } - - // Reacquire service snapshots. Will throw if placement concern can no longer be met. - for (auto& acquiredCollection : yieldedResources._yieldedResources->acquiredCollections) { - const auto& prerequisites = acquiredCollection.prerequisites; + auto restoreFn = [&] { + // Reacquire locks + opCtx->lockState()->restoreLockState(opCtx, transactionResources.yielded->yieldedLocker); + transactionResources.yielded.reset(); + + // Reestablish a consistent catalog snapshot (multi document transactions don't yield). + auto requests = toNamespaceStringOrUUIDs(transactionResources.acquiredCollections); + auto catalog = getConsistentCatalogAndSnapshot(opCtx, requests); + + // Reacquire service snapshots. Will throw if placement concern can no longer be met. + for (auto& acquiredCollection : transactionResources.acquiredCollections) { + const auto& prerequisites = acquiredCollection.prerequisites; + + auto uassertCollectionAppearedAfterRestore = [&] { + uasserted(743870, + str::stream() + << "Collection " << prerequisites.nss.toStringForErrorMsg() + << " appeared after a restore, which violates the semantics of " + "restore"); + }; + + if (prerequisites.operationType == AcquisitionPrerequisites::OperationType::kRead) { + // Just reacquire the CollectionPtr. Reads don't care about placement changes + // because they have already established a ScopedCollectionFilter that acts as + // RangePreserver. + auto collOrView = acquireLocalCollectionOrView(opCtx, *catalog, prerequisites); + + // We do not support yielding view acquisitions. Therefore it is not possible + // that upon restore 'acquireLocalCollectionOrView' snapshoted a view -- it + // would not have met the prerequisite that the collection instance is still the + // same as the one before yielding. + invariant(std::holds_alternative(collOrView)); + if (!acquiredCollection.collectionPtr != !std::get(collOrView)) { + uassertCollectionAppearedAfterRestore(); + } + + // Update the services snapshot on TransactionResources + acquiredCollection.collectionPtr = std::move(std::get(collOrView)); + } else { + // Make sure that the placement is still correct. + if (std::holds_alternative(prerequisites.placementConcern)) { + checkPlacementVersion( + opCtx, + prerequisites.nss, + std::get(prerequisites.placementConcern)); + } + + auto reacquiredServicesSnapshot = + acquireServicesSnapshot(opCtx, *catalog, prerequisites); + + // We do not support yielding view acquisitions. Therefore it is not possible + // that upon restore 'acquireLocalCollectionOrView' snapshoted a view -- it + // would not have met the prerequisite that the collection instance is still the + // same as the one before yielding. + invariant(std::holds_alternative( + reacquiredServicesSnapshot.collectionPtrOrView)); + if (!acquiredCollection.collectionPtr != + !std::get(reacquiredServicesSnapshot.collectionPtrOrView)) { + uassertCollectionAppearedAfterRestore(); + } + + // Update the services snapshot on TransactionResources + acquiredCollection.collectionPtr = std::move( + std::get(reacquiredServicesSnapshot.collectionPtrOrView)); + acquiredCollection.collectionDescription = + std::move(reacquiredServicesSnapshot.collectionDescription); + acquiredCollection.ownershipFilter = + std::move(reacquiredServicesSnapshot.ownershipFilter); + } - auto uassertCollectionAppearedAfterRestore = [&] { - uasserted(743870, - str::stream() - << "Collection " << prerequisites.nss - << " appeared after a restore, which violates the semantics of restore"); - }; - - if (prerequisites.operationType == AcquisitionPrerequisites::OperationType::kRead) { - // Just reacquire the CollectionPtr. Reads don't care about placement changes because - // they have already established a ScopedCollectionFilter that acts as RangePreserver. - auto collOrView = acquireLocalCollectionOrView(opCtx, prerequisites); - - // We do not support yielding view acquisitions. Therefore it is not possible that upon - // restore 'acquireLocalCollectionOrView' snapshoted a view -- it would not have met the - // prerequisite that the collection instance is still the same as the one before - // yielding. - invariant(std::holds_alternative(collOrView)); - if (!acquiredCollection.collectionPtr != !std::get(collOrView)) - uassertCollectionAppearedAfterRestore(); - - // Update the services snapshot on TransactionResources - acquiredCollection.collectionPtr = std::move(std::get(collOrView)); - } else { - auto reacquiredServicesSnapshot = acquireServicesSnapshot(opCtx, prerequisites); - - // We do not support yielding view acquisitions. Therefore it is not possible that upon - // restore 'acquireLocalCollectionOrView' snapshoted a view -- it would not have met the - // prerequisite that the collection instance is still the same as the one before - // yielding. - invariant(std::holds_alternative( - reacquiredServicesSnapshot.collectionPtrOrView)); - if (!acquiredCollection.collectionPtr != - !std::get(reacquiredServicesSnapshot.collectionPtrOrView)) - uassertCollectionAppearedAfterRestore(); - - // Update the services snapshot on TransactionResources - acquiredCollection.collectionPtr = - std::move(std::get(reacquiredServicesSnapshot.collectionPtrOrView)); - acquiredCollection.collectionDescription = - std::move(reacquiredServicesSnapshot.collectionDescription); - acquiredCollection.ownershipFilter = - std::move(reacquiredServicesSnapshot.ownershipFilter); + // TODO: This will be removed when we no longer snapshot sharding state on CollectionPtr + invariant(acquiredCollection.collectionDescription); + if (acquiredCollection.collectionDescription->isSharded()) { + acquiredCollection.collectionPtr.setShardKeyPattern( + acquiredCollection.collectionDescription->getKeyPattern()); + } } + return catalog; + }; + + auto catalog = [&]() { + while (true) { + try { + return restoreFn(); + } catch (const ExceptionFor& ex) { + if (ShardVersion::isPlacementVersionIgnored(ex->getVersionReceived()) && + ex->getCriticalSectionSignal()) { + // If ShardVersion is IGNORED and we encountered a critical section, then yield, + // wait for the critical section to finish and then we'll resume the write from + // the point we had left. We do this to prevent large multi-writes from + // repeatedly failing due to StaleConfig and exhausting the mongos retry + // attempts. Yield the locks. + Locker::LockSnapshot lockSnapshot; + opCtx->recoveryUnit()->abandonSnapshot(); + opCtx->lockState()->saveLockStateAndUnlock(&lockSnapshot); + transactionResources.yielded.emplace( + TransactionResources::YieldedStateHolder{std::move(lockSnapshot)}); + // Wait for the critical section to finish. + OperationShardingState::waitForCriticalSectionToComplete( + opCtx, *ex->getCriticalSectionSignal()) + .ignore(); + // Try again to restore. + continue; + } + throw; + } + } + }(); + + transactionResources.state = yieldedResourcesHolder._originalState; + + if (!opCtx->inMultiDocumentTransaction()) { + CollectionCatalog::stash(opCtx, catalog); } - // Restore TransactionsResource on opCtx. - yieldedResources._yieldedResources->yielded = false; - getTransactionResources(opCtx) = std::move(yieldedResources)._yieldedResources; scopeGuard.dismiss(); } diff --git a/src/mongo/db/shard_role.h b/src/mongo/db/shard_role.h index 754557b8f1b38..41c1852627e34 100644 --- a/src/mongo/db/shard_role.h +++ b/src/mongo/db/shard_role.h @@ -29,85 +29,90 @@ #pragma once -#include "mongo/db/catalog_raii.h" -#include "mongo/db/db_raii.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/s/collection_sharding_state.h" +#include "mongo/db/s/database_sharding_state.h" +#include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/scoped_collection_metadata.h" #include "mongo/db/transaction_resources.h" -#include "mongo/s/database_version.h" +#include "mongo/db/views/view.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/uuid.h" namespace mongo { /** - * Structure used to declare all the prerequsites that the catalog needs to meet in order for an + * Structure used to declare all the prerequisites that the catalog needs to meet in order for an * acquisition of a namespace to succeed. */ struct CollectionOrViewAcquisitionRequest { /** - * Overload, which acquires a collection by NSS, ignoring the current UUID mapping. + * Overload, which acquires a collection by NSS or DB/UUID, without imposing an expected + * relationship between NSS and UUID. */ CollectionOrViewAcquisitionRequest( - NamespaceString nss, + NamespaceStringOrUUID nssOrUUID, PlacementConcern placementConcern, repl::ReadConcernArgs readConcern, AcquisitionPrerequisites::OperationType operationType, AcquisitionPrerequisites::ViewMode viewMode = AcquisitionPrerequisites::kCanBeView) - : nss(nss), - placementConcern(placementConcern), + : nssOrUUID(std::move(nssOrUUID)), + placementConcern(std::move(placementConcern)), readConcern(readConcern), operationType(operationType), viewMode(viewMode) {} /** - * Overload, which acquires a collection by NSS/UUID combination, requiring that the UUID of the - * namespace matches exactly. + * Overload, which acquires a collection by NSS/UUID combination, requiring that, if specified, + * the UUID of the namespace matches exactly. */ CollectionOrViewAcquisitionRequest( NamespaceString nss, - UUID uuid, + boost::optional uuid, PlacementConcern placementConcern, repl::ReadConcernArgs readConcern, AcquisitionPrerequisites::OperationType operationType, AcquisitionPrerequisites::ViewMode viewMode = AcquisitionPrerequisites::kCanBeView) - : nss(nss), - uuid(uuid), + : nssOrUUID(std::move(nss)), + expectedUUID(std::move(uuid)), placementConcern(placementConcern), readConcern(readConcern), operationType(operationType), viewMode(viewMode) {} /** - * Overload, which acquires a collection by NSS or DB/UUID, without imposing an expected - * relationship between NSS and UUID. - */ - CollectionOrViewAcquisitionRequest( - NamespaceStringOrUUID nssOrUUID, - PlacementConcern placementConcern, - repl::ReadConcernArgs readConcern, - AcquisitionPrerequisites::OperationType operationType, - AcquisitionPrerequisites::ViewMode viewMode = AcquisitionPrerequisites::kCanBeView) - : nss(nssOrUUID.nss()), - dbname(nssOrUUID.dbName()), - uuid(nssOrUUID.uuid()), - placementConcern(placementConcern), - readConcern(readConcern), - operationType(operationType), - viewMode(viewMode) {} - - /** - * Infers the placement and read concerns from the OperationShardingState and ReadConcern values - * on the OperationContext. + * Overload, which acquires a collection or view by NSS or DB/UUID and infers the placement and + * read concerns from the OperationShardingState and ReadConcern values on the OperationContext. */ static CollectionOrViewAcquisitionRequest fromOpCtx( OperationContext* opCtx, - NamespaceString nss, + NamespaceStringOrUUID nssOrUUID, AcquisitionPrerequisites::OperationType operationType, AcquisitionPrerequisites::ViewMode viewMode = AcquisitionPrerequisites::kCanBeView); - boost::optional nss; + NamespaceStringOrUUID nssOrUUID; - boost::optional dbname; - boost::optional uuid; + // When 'nssOrUUID' is in the NamespaceString form 'expectedUUID' may contain the expected + // collection UUID for that nss. When 'nssOrUUID' is in the UUID form, then 'expectedUUID' is + // boost::none because the 'nssOrUUID' already expresses the desired UUID. + boost::optional expectedUUID; PlacementConcern placementConcern; repl::ReadConcernArgs readConcern; @@ -117,24 +122,25 @@ struct CollectionOrViewAcquisitionRequest { struct CollectionAcquisitionRequest : public CollectionOrViewAcquisitionRequest { /** - * Overload, which acquires a collection by NSS, ignoring the current UUID mapping. + * Overload, which acquires a collection by NSS or DB/UUID, without imposing an expected + * relationship between NSS and UUID. */ - CollectionAcquisitionRequest(NamespaceString nss, + CollectionAcquisitionRequest(NamespaceStringOrUUID nssOrUUID, PlacementConcern placementConcern, repl::ReadConcernArgs readConcern, AcquisitionPrerequisites::OperationType operationType) - : CollectionOrViewAcquisitionRequest(nss, + : CollectionOrViewAcquisitionRequest(nssOrUUID, placementConcern, readConcern, operationType, AcquisitionPrerequisites::kMustBeCollection) {} /** - * Overload, which acquires a collection by NSS/UUID combination, requiring that the UUID of the - * namespace matches exactly. + * Overload, which acquires a collection by NSS/UUID combination, requiring that, if specified, + * the UUID of the namespace matches exactly. */ CollectionAcquisitionRequest(NamespaceString nss, - UUID uuid, + boost::optional uuid, PlacementConcern placementConcern, repl::ReadConcernArgs readConcern, AcquisitionPrerequisites::OperationType operationType) @@ -145,20 +151,6 @@ struct CollectionAcquisitionRequest : public CollectionOrViewAcquisitionRequest operationType, AcquisitionPrerequisites::kMustBeCollection) {} - /** - * Overload, which acquires a collection by NSS or DB/UUID, without imposing an expected - * relationship between NSS and UUID. - */ - CollectionAcquisitionRequest(NamespaceStringOrUUID nssOrUUID, - PlacementConcern placementConcern, - repl::ReadConcernArgs readConcern, - AcquisitionPrerequisites::OperationType operationType) - : CollectionOrViewAcquisitionRequest(nssOrUUID, - placementConcern, - readConcern, - operationType, - AcquisitionPrerequisites::kMustBeCollection) {} - /** * Infers the placement and read concerns from the OperationShardingState and ReadConcern values * on the OperationContext. @@ -166,102 +158,159 @@ struct CollectionAcquisitionRequest : public CollectionOrViewAcquisitionRequest static CollectionAcquisitionRequest fromOpCtx( OperationContext* opCtx, NamespaceString nss, + AcquisitionPrerequisites::OperationType operationType, + boost::optional expectedUUID = boost::none); + + static CollectionAcquisitionRequest fromOpCtx( + OperationContext* opCtx, + NamespaceStringOrUUID nssOrUUID, AcquisitionPrerequisites::OperationType operationType); }; -class ScopedCollectionAcquisition { +class CollectionOrViewAcquisition; + +/** + * A thread-unsafe ref-counted acquisition of a collection. The underlying acquisition stored inside + * the operation's TransactionResources is managed by this class. It will be released whenever the + * last reference to it is descoped. This class can be freely copied and moved around, each copy + * will point to the same acquisition. + * + * This class cannot be transferred to other threads/OperationContext since the pointed to resources + * lifetime would be held and manipulated by another thread. + */ +class CollectionAcquisition { public: - ScopedCollectionAcquisition(const mongo::ScopedCollectionAcquisition&) = delete; + explicit CollectionAcquisition(CollectionOrViewAcquisition&& other); - ScopedCollectionAcquisition(mongo::ScopedCollectionAcquisition&& other) - : _opCtx(other._opCtx), _acquiredCollection(other._acquiredCollection) { - other._opCtx = nullptr; - } + CollectionAcquisition(shard_role_details::TransactionResources& txnResources, + shard_role_details::AcquiredCollection& acquiredCollection); - ~ScopedCollectionAcquisition(); + CollectionAcquisition(const CollectionAcquisition& other); + CollectionAcquisition(CollectionAcquisition&& other); - ScopedCollectionAcquisition(OperationContext* opCtx, - shard_role_details::AcquiredCollection& acquiredCollection) - : _opCtx(opCtx), _acquiredCollection(acquiredCollection) {} + CollectionAcquisition& operator=(const CollectionAcquisition& other); + CollectionAcquisition& operator=(CollectionAcquisition&& other); - const NamespaceString& nss() const { - return _acquiredCollection.prerequisites.nss; - } + ~CollectionAcquisition(); + + const NamespaceString& nss() const; /** * Returns whether the acquisition found a collection or the collection didn't exist. */ - bool exists() const { - return bool(_acquiredCollection.prerequisites.uuid); - } + bool exists() const; /** * Returns the UUID of the acquired collection, but this operation is only allowed if the * collection `exists()`, otherwise this method will invariant. */ - const UUID& uuid() const; + UUID uuid() const; // Access to services associated with the specified collection top to bottom on the hierarchical // stack // Sharding catalog services - const ScopedCollectionDescription& getShardingDescription() const; const boost::optional& getShardingFilter() const; // Local catalog services - - const CollectionPtr& getCollectionPtr() const { - return _acquiredCollection.collectionPtr; - } + const CollectionPtr& getCollectionPtr() const; private: friend class ScopedLocalCatalogWriteFence; + // Points to the acquired resources that live on the TransactionResources opCtx decoration. The + // lifetime of these resources is tied to the lifetime of this CollectionAcquisition. + shard_role_details::TransactionResources* _txnResources; + shard_role_details::AcquiredCollection* _acquiredCollection; +}; - OperationContext* _opCtx; +/** + * A thread-unsafe ref-counted acquisition of a view. The underlying acquisition stored inside the + * operation's TransactionResources is managed by this class. It will be released whenever the last + * reference to it is descoped. This class can be freely copied and moved around, each copy will + * point to the same acquisition. + * + * This class cannot be transferred to other threads/OperationContext since the pointed to resources + * lifetime would be held and manipulated by another thread. + */ +class ViewAcquisition { +public: + ViewAcquisition(shard_role_details::TransactionResources& txnResources, + const shard_role_details::AcquiredView& acquiredView); + + ViewAcquisition(const ViewAcquisition& other); + ViewAcquisition(ViewAcquisition&& other); + + ViewAcquisition& operator=(const ViewAcquisition& other); + ViewAcquisition& operator=(ViewAcquisition&& other); + + ~ViewAcquisition(); + const NamespaceString& nss() const; + + // StorEx services + const ViewDefinition& getViewDefinition() const; + +private: // Points to the acquired resources that live on the TransactionResources opCtx decoration. The - // lifetime of these resources is tied to the lifetime of this - // ScopedCollectionOrViewAcquisition. - shard_role_details::AcquiredCollection& _acquiredCollection; + // lifetime of these resources is tied to the lifetime of this ViewAcquisition. + shard_role_details::TransactionResources* _txnResources; + const shard_role_details::AcquiredView* _acquiredView; }; -class ScopedViewAcquisition { +class CollectionOrViewAcquisition { public: - ScopedViewAcquisition(const mongo::ScopedViewAcquisition&) = delete; + CollectionOrViewAcquisition(CollectionAcquisition&& collection) + : _collectionOrViewAcquisition(std::move(collection)) {} + + CollectionOrViewAcquisition(ViewAcquisition&& view) + : _collectionOrViewAcquisition(std::move(view)) {} + + const NamespaceString& nss() const { + if (isCollection()) { + return getCollection().nss(); + } else { + return getView().nss(); + } + } - ScopedViewAcquisition(mongo::ScopedViewAcquisition&& other) - : _opCtx(other._opCtx), _acquiredView(other._acquiredView) { - other._opCtx = nullptr; + bool isCollection() const { + return std::holds_alternative(_collectionOrViewAcquisition); } - ~ScopedViewAcquisition(); + bool isView() const { + return std::holds_alternative(_collectionOrViewAcquisition); + } - ScopedViewAcquisition(OperationContext* opCtx, - const shard_role_details::AcquiredView& acquiredView) - : _opCtx(opCtx), _acquiredView(acquiredView) {} + const CollectionAcquisition& getCollection() const { + invariant(isCollection()); + return std::get(_collectionOrViewAcquisition); + } - const NamespaceString& nss() const { - return _acquiredView.prerequisites.nss; + const CollectionPtr& getCollectionPtr() const { + if (isCollection()) { + return getCollection().getCollectionPtr(); + } else { + return CollectionPtr::null; + } } - // StorEx services - const ViewDefinition& getViewDefinition() const { - invariant(_acquiredView.viewDefinition); - return *(_acquiredView.viewDefinition); + const ViewAcquisition& getView() const { + invariant(isView()); + return std::get(_collectionOrViewAcquisition); } private: - OperationContext* _opCtx; + friend class CollectionAcquisition; - // Points to the acquired resources that live on the TransactionResources opCtx decoration. The - // lifetime of these resources is tied to the lifetime of this - // ScopedCollectionOrViewAcquisition. - const shard_role_details::AcquiredView& _acquiredView; + std::variant + _collectionOrViewAcquisition; }; -using ScopedCollectionOrViewAcquisition = - std::variant; +using CollectionAcquisitions = stdx::unordered_map; + +using CollectionOrViewAcquisitions = + stdx::unordered_map; /** * Takes into account the specified namespace acquisition requests and if they can be satisfied, @@ -270,35 +319,45 @@ using ScopedCollectionOrViewAcquisition = * This method will acquire and 2-phase hold all the necessary hierarchical locks (Global, DB and * Collection). */ -ScopedCollectionAcquisition acquireCollection(OperationContext* opCtx, - CollectionAcquisitionRequest acquisitionRequest, - LockMode mode); +CollectionAcquisition acquireCollection(OperationContext* opCtx, + CollectionAcquisitionRequest acquisitionRequest, + LockMode mode); -std::vector acquireCollections( +CollectionAcquisitions acquireCollections( OperationContext* opCtx, std::vector acquisitionRequests, LockMode mode); -ScopedCollectionOrViewAcquisition acquireCollectionOrView( +CollectionOrViewAcquisition acquireCollectionOrView( OperationContext* opCtx, CollectionOrViewAcquisitionRequest acquisitionRequest, LockMode mode); -std::vector acquireCollectionsOrViews( +CollectionOrViewAcquisitions acquireCollectionsOrViews( OperationContext* opCtx, std::vector acquisitionRequests, LockMode mode); /** * Same semantics as `acquireCollectionsOrViews` above, but will not acquire or hold any of the - * 2-phase hierarchical locks. + * 2-phase hierarchical locks if allowed for this operation. The conditions required for the + * acquisition to be lock-free are: + * * The operation is not a multi-document transaction. + * * The global lock is not write locked. + * * No storage transaction is already open, or if it is, it has to be for a lock free operation. */ -std::vector acquireCollectionsOrViewsWithoutTakingLocks( +CollectionAcquisition acquireCollectionMaybeLockFree( + OperationContext* opCtx, CollectionAcquisitionRequest acquisitionRequest); + +CollectionOrViewAcquisition acquireCollectionOrViewMaybeLockFree( + OperationContext* opCtx, CollectionOrViewAcquisitionRequest acquisitionRequest); + +CollectionOrViewAcquisitions acquireCollectionsOrViewsMaybeLockFree( OperationContext* opCtx, std::vector acquisitionRequests); /** * Please read the comments on AcquisitionPrerequisites::kLocalCatalogOnlyWithPotentialDataLoss for * more information on the semantics of this acquisition. */ -ScopedCollectionAcquisition acquireCollectionForLocalCatalogOnlyWithPotentialDataLoss( +CollectionAcquisition acquireCollectionForLocalCatalogOnlyWithPotentialDataLoss( OperationContext* opCtx, const NamespaceString& nss, LockMode mode); /** @@ -319,12 +378,13 @@ ScopedCollectionAcquisition acquireCollectionForLocalCatalogOnlyWithPotentialDat */ class ScopedLocalCatalogWriteFence { public: - ScopedLocalCatalogWriteFence(OperationContext* opCtx, ScopedCollectionAcquisition* acquisition); - ~ScopedLocalCatalogWriteFence(); + ScopedLocalCatalogWriteFence(OperationContext* opCtx, CollectionAcquisition* acquisition); ScopedLocalCatalogWriteFence(ScopedLocalCatalogWriteFence&) = delete; ScopedLocalCatalogWriteFence(ScopedLocalCatalogWriteFence&&) = delete; + ~ScopedLocalCatalogWriteFence(); + private: static void _updateAcquiredLocalCollection( OperationContext* opCtx, shard_role_details::AcquiredCollection* acquiredCollection); @@ -336,23 +396,68 @@ class ScopedLocalCatalogWriteFence { /** * Serves as a temporary container for transaction resources which have been yielded via a call to * `yieldTransactionResources`. Must never be destroyed without having been restored and the - * transaction resources properly committed/aborted. + * transaction resources properly committed/aborted, or disposed of. */ -class YieldedTransactionResources { -public: - ~YieldedTransactionResources(); - - YieldedTransactionResources() = default; +struct YieldedTransactionResources { + YieldedTransactionResources(YieldedTransactionResources&&) = default; YieldedTransactionResources( - std::unique_ptr&& yieldedResources); + std::unique_ptr yieldedResources, + shard_role_details::TransactionResources::State originalState); + + ~YieldedTransactionResources(); + + /** + * Releases the yielded TransactionResources, transitions the resources back to the opCtx and + * marks them as FAILED. + */ + void transitionTransactionResourcesToFailedState(OperationContext* opCtx); std::unique_ptr _yieldedResources; + shard_role_details::TransactionResources::State _originalState; }; +/** + * This method puts the TransactionResources associated with the current OpCtx into the yielded + * state and then detaches them from the OpCtx, moving their ownership to the returned object. + * + * The returned object must either be properly restored by a later call to + * `restoreTransactionResourcesToOperationContext` or it must be + * `.transitionTransactionResourcesToFailedState()`d before destruction. + * + * It is not always allowed to yield the transaction resources and it is the caller's responsibility + * to verify a yield can be performed by calling Locker::canSaveLockState(). + */ YieldedTransactionResources yieldTransactionResourcesFromOperationContext(OperationContext* opCtx); -void restoreTransactionResourcesToOperationContext(OperationContext* opCtx, - YieldedTransactionResources&& yieldedResources); +void restoreTransactionResourcesToOperationContext( + OperationContext* opCtx, YieldedTransactionResources yieldedResourcesHolder); + +namespace shard_role_details { +class SnapshotAttempt { +public: + SnapshotAttempt(OperationContext* opCtx, + const std::vector& acquisitionRequests) + : _opCtx{opCtx}, _acquisitionRequests(acquisitionRequests) {} + + ~SnapshotAttempt(); + void snapshotInitialState(); + + void changeReadSourceForSecondaryReads(); + + void openStorageSnapshot(); + + [[nodiscard]] std::shared_ptr getConsistentCatalog(); + +private: + OperationContext* _opCtx; + const std::vector& _acquisitionRequests; + bool _openedSnapshot = false; + bool _successful = false; + boost::optional _replTermBeforeSnapshot; + boost::optional> _catalogBeforeSnapshot; + boost::optional _shouldReadAtLastApplied; +}; +} // namespace shard_role_details } // namespace mongo diff --git a/src/mongo/db/shard_role_test.cpp b/src/mongo/db/shard_role_test.cpp index 18332bc3fb63c..aea9c02e3ca2e 100644 --- a/src/mongo/db/shard_role_test.cpp +++ b/src/mongo/db/shard_role_test.cpp @@ -27,20 +27,68 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_uuid_mismatch_info.h" #include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/s/collection_metadata.h" #include "mongo/db/s/collection_sharding_runtime.h" #include "mongo/db/s/database_sharding_state.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/shard_id.h" #include "mongo/db/shard_role.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/s/stale_exception.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" namespace mongo { namespace { @@ -62,9 +110,9 @@ void createTestView(OperationContext* opCtx, void installDatabaseMetadata(OperationContext* opCtx, const DatabaseName& dbName, const DatabaseVersion& dbVersion) { - AutoGetDb autoDb(opCtx, dbName, MODE_X, {}); + AutoGetDb autoDb(opCtx, dbName, MODE_X, {}, {}); auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx, dbName); - scopedDss->setDbInfo(opCtx, {dbName.db(), ShardId("this"), dbVersion}); + scopedDss->setDbInfo(opCtx, {dbName.toString_forTest(), ShardId("this"), dbVersion}); } void installUnshardedCollectionMetadata(OperationContext* opCtx, const NamespaceString& nss) { @@ -133,7 +181,7 @@ class ShardRoleTest : public ServiceContextMongoDTest { const ShardId thisShardId{"this"}; - const DatabaseName dbNameTestDb{"test"}; + const DatabaseName dbNameTestDb = DatabaseName::createDatabaseName_forTest(boost::none, "test"); const DatabaseVersion dbVersionTestDb{UUID::gen(), Timestamp(1, 0)}; const NamespaceString nssUnshardedCollection1 = @@ -263,28 +311,116 @@ TEST_F(ShardRoleTest, NamespaceOrViewAcquisitionRequestWithOpCtxTakesPlacementFr } } +TEST_F(ShardRoleTest, AcquisitionWithInvalidNamespaceFails) { + const auto checkAcquisitionByNss = [&](const NamespaceString& nss) { + // With locks + ASSERT_THROWS_CODE( + acquireCollection(opCtx(), + {nss, {}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}, + MODE_IX), + DBException, + ErrorCodes::InvalidNamespace); + + // Without locks + ASSERT_THROWS_CODE( + acquireCollectionsOrViewsMaybeLockFree( + opCtx(), {{nss, {}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kRead}}), + DBException, + ErrorCodes::InvalidNamespace); + }; + + const auto checkAcquisitionByNssOrUUID = [&](const NamespaceStringOrUUID& nssOrUuid) { + // With locks + ASSERT_THROWS_CODE( + acquireCollection( + opCtx(), + {nssOrUuid, {}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}, + MODE_IX), + DBException, + ErrorCodes::InvalidNamespace); + + // Without locks + ASSERT_THROWS_CODE( + acquireCollectionsOrViewsMaybeLockFree( + opCtx(), + {{nssOrUuid, {}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kRead}}), + DBException, + ErrorCodes::InvalidNamespace); + }; + + const NamespaceString nssEmptyCollectionName = + NamespaceString::createNamespaceString_forTest(dbNameTestDb, ""); + checkAcquisitionByNss(nssEmptyCollectionName); + checkAcquisitionByNssOrUUID(nssEmptyCollectionName); + + const NamespaceString nssEmptyDbName = + NamespaceString::createNamespaceString_forTest("", "foo"); + checkAcquisitionByNss(nssEmptyDbName); + checkAcquisitionByNssOrUUID(nssEmptyDbName); + checkAcquisitionByNssOrUUID(NamespaceStringOrUUID("", UUID::gen())); +} + // --------------------------------------------------------------------------- // Placement checks when acquiring unsharded collections TEST_F(ShardRoleTest, AcquireUnshardedCollWithCorrectPlacementVersion) { PlacementConcern placementConcern{dbVersionTestDb, ShardVersion::UNSHARDED()}; - const auto acquisition = acquireCollection(opCtx(), - {nssUnshardedCollection1, - placementConcern, - repl::ReadConcernArgs(), - AcquisitionPrerequisites::kWrite}, - MODE_IX); - ASSERT_EQ(nssUnshardedCollection1, acquisition.nss()); - ASSERT_EQ(nssUnshardedCollection1, acquisition.getCollectionPtr()->ns()); - ASSERT_FALSE(acquisition.getShardingDescription().isSharded()); - ASSERT_FALSE(acquisition.getShardingFilter().has_value()); + auto validateAcquisition = [&](auto& acquisition) { + ASSERT_EQ(nssUnshardedCollection1, acquisition.nss()); + ASSERT_EQ(nssUnshardedCollection1, acquisition.getCollectionPtr()->ns()); + ASSERT_FALSE(acquisition.getShardingDescription().isSharded()); + }; + + // With locks. + { + const auto acquisition = acquireCollection(opCtx(), + {nssUnshardedCollection1, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kWrite}, + MODE_IX); + ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(dbNameTestDb, MODE_IX)); + ASSERT_TRUE( + opCtx()->lockState()->isCollectionLockedForMode(nssUnshardedCollection1, MODE_IX)); + validateAcquisition(acquisition); + } + + // Without locks. + { + const auto acquisitions = + acquireCollectionsOrViewsMaybeLockFree(opCtx(), + {{nssUnshardedCollection1, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead}}); + + ASSERT_EQ(1, acquisitions.size()); + ASSERT_EQ(nssUnshardedCollection1, acquisitions.begin()->first); + ASSERT_TRUE(acquisitions.at(nssUnshardedCollection1).isCollection()); + const auto& acquisition = acquisitions.at(nssUnshardedCollection1).getCollection(); + + ASSERT_FALSE(opCtx()->lockState()->isDbLockedForMode(dbNameTestDb, MODE_IS)); + ASSERT_FALSE( + opCtx()->lockState()->isCollectionLockedForMode(nssUnshardedCollection1, MODE_IS)); + validateAcquisition(acquisition); + } } TEST_F(ShardRoleTest, AcquireUnshardedCollWithIncorrectPlacementVersionThrows) { const auto incorrectDbVersion = DatabaseVersion(UUID::gen(), Timestamp(50, 0)); PlacementConcern placementConcern{incorrectDbVersion, ShardVersion::UNSHARDED()}; + + auto validateException = [&](const DBException& ex) { + const auto exInfo = ex.extraInfo(); + ASSERT_EQ(dbNameTestDb.toString_forTest(), exInfo->getDb()); + ASSERT_EQ(incorrectDbVersion, exInfo->getVersionReceived()); + ASSERT_EQ(dbVersionTestDb, exInfo->getVersionWanted()); + ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized()); + }; + + // With locks. ASSERT_THROWS_WITH_CHECK(acquireCollection(opCtx(), { nssUnshardedCollection1, @@ -294,24 +430,38 @@ TEST_F(ShardRoleTest, AcquireUnshardedCollWithIncorrectPlacementVersionThrows) { }, MODE_IX), ExceptionFor, - [&](const DBException& ex) { - const auto exInfo = ex.extraInfo(); - ASSERT_EQ(dbNameTestDb.db(), exInfo->getDb()); - ASSERT_EQ(incorrectDbVersion, exInfo->getVersionReceived()); - ASSERT_EQ(dbVersionTestDb, exInfo->getVersionWanted()); - ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized()); - }); + validateException); + + // Without locks. + ASSERT_THROWS_WITH_CHECK( + acquireCollectionsOrViewsMaybeLockFree(opCtx(), + {{ + nssUnshardedCollection1, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead, + }}), + ExceptionFor, + validateException); } TEST_F(ShardRoleTest, AcquireUnshardedCollWhenShardDoesNotKnowThePlacementVersionThrows) { { // Clear the database metadata - AutoGetDb autoDb(opCtx(), dbNameTestDb, MODE_X, {}); + AutoGetDb autoDb(opCtx(), dbNameTestDb, MODE_X, {}, {}); auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx(), dbNameTestDb); scopedDss->clearDbInfo(opCtx()); } + auto validateException = [&](const DBException& ex) { + const auto exInfo = ex.extraInfo(); + ASSERT_EQ(dbNameTestDb.toString_forTest(), exInfo->getDb()); + ASSERT_EQ(dbVersionTestDb, exInfo->getVersionReceived()); + ASSERT_EQ(boost::none, exInfo->getVersionWanted()); + ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized()); + }; + PlacementConcern placementConcern{dbVersionTestDb, ShardVersion::UNSHARDED()}; ASSERT_THROWS_WITH_CHECK(acquireCollection(opCtx(), {nssUnshardedCollection1, @@ -320,20 +470,23 @@ TEST_F(ShardRoleTest, AcquireUnshardedCollWhenShardDoesNotKnowThePlacementVersio AcquisitionPrerequisites::kWrite}, MODE_IX), ExceptionFor, - [&](const DBException& ex) { - const auto exInfo = ex.extraInfo(); - ASSERT_EQ(dbNameTestDb.db(), exInfo->getDb()); - ASSERT_EQ(dbVersionTestDb, exInfo->getVersionReceived()); - ASSERT_EQ(boost::none, exInfo->getVersionWanted()); - ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized()); - }); + validateException); + + ASSERT_THROWS_WITH_CHECK( + acquireCollectionsOrViewsMaybeLockFree(opCtx(), + {{nssUnshardedCollection1, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead}}), + ExceptionFor, + validateException); } TEST_F(ShardRoleTest, AcquireUnshardedCollWhenCriticalSectionIsActiveThrows) { const BSONObj criticalSectionReason = BSON("reason" << 1); { // Enter critical section. - AutoGetDb autoDb(opCtx(), dbNameTestDb, MODE_X, {}); + AutoGetDb autoDb(opCtx(), dbNameTestDb, MODE_X, {}, {}); auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx(), dbNameTestDb); scopedDss->enterCriticalSectionCatchUpPhase(opCtx(), criticalSectionReason); @@ -342,6 +495,15 @@ TEST_F(ShardRoleTest, AcquireUnshardedCollWhenCriticalSectionIsActiveThrows) { { PlacementConcern placementConcern{dbVersionTestDb, ShardVersion::UNSHARDED()}; + + auto validateException = [&](const DBException& ex) { + const auto exInfo = ex.extraInfo(); + ASSERT_EQ(dbNameTestDb.toString_forTest(), exInfo->getDb()); + ASSERT_EQ(dbVersionTestDb, exInfo->getVersionReceived()); + ASSERT_EQ(boost::none, exInfo->getVersionWanted()); + ASSERT_TRUE(exInfo->getCriticalSectionSignal().is_initialized()); + }; + ASSERT_THROWS_WITH_CHECK(acquireCollection(opCtx(), {nssUnshardedCollection1, placementConcern, @@ -349,19 +511,20 @@ TEST_F(ShardRoleTest, AcquireUnshardedCollWhenCriticalSectionIsActiveThrows) { AcquisitionPrerequisites::kWrite}, MODE_IX), ExceptionFor, - [&](const DBException& ex) { - const auto exInfo = ex.extraInfo(); - ASSERT_EQ(dbNameTestDb.db(), exInfo->getDb()); - ASSERT_EQ(dbVersionTestDb, exInfo->getVersionReceived()); - ASSERT_EQ(boost::none, exInfo->getVersionWanted()); - ASSERT_TRUE( - exInfo->getCriticalSectionSignal().is_initialized()); - }); + validateException); + ASSERT_THROWS_WITH_CHECK( + acquireCollectionsOrViewsMaybeLockFree(opCtx(), + {{nssUnshardedCollection1, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead}}), + ExceptionFor, + validateException); } { // Exit critical section. - AutoGetDb autoDb(opCtx(), dbNameTestDb, MODE_X, {}); + AutoGetDb autoDb(opCtx(), dbNameTestDb, MODE_X, {}, {}); const BSONObj criticalSectionReason = BSON("reason" << 1); auto scopedDss = DatabaseShardingState::assertDbLockedAndAcquireExclusive(opCtx(), dbNameTestDb); @@ -370,16 +533,43 @@ TEST_F(ShardRoleTest, AcquireUnshardedCollWhenCriticalSectionIsActiveThrows) { } TEST_F(ShardRoleTest, AcquireUnshardedCollWithoutSpecifyingPlacementVersion) { - const auto acquisition = - acquireCollection(opCtx(), - CollectionAcquisitionRequest::fromOpCtx( - opCtx(), nssUnshardedCollection1, AcquisitionPrerequisites::kWrite), - MODE_IX); - ASSERT_EQ(nssUnshardedCollection1, acquisition.nss()); - ASSERT_EQ(nssUnshardedCollection1, acquisition.getCollectionPtr()->ns()); - ASSERT_FALSE(acquisition.getShardingDescription().isSharded()); - ASSERT_FALSE(acquisition.getShardingFilter().has_value()); + auto validateAcquisition = [&](auto& acquisition) { + ASSERT_EQ(nssUnshardedCollection1, acquisition.nss()); + ASSERT_EQ(nssUnshardedCollection1, acquisition.getCollectionPtr()->ns()); + ASSERT_FALSE(acquisition.getShardingDescription().isSharded()); + }; + + // With locks. + { + const auto acquisition = acquireCollection( + opCtx(), + CollectionAcquisitionRequest::fromOpCtx( + opCtx(), nssUnshardedCollection1, AcquisitionPrerequisites::kWrite), + MODE_IX); + + ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(dbNameTestDb, MODE_IX)); + ASSERT_TRUE( + opCtx()->lockState()->isCollectionLockedForMode(nssUnshardedCollection1, MODE_IX)); + validateAcquisition(acquisition); + } + + // Without locks. + { + const auto acquisitions = acquireCollectionsOrViewsMaybeLockFree( + opCtx(), + {CollectionAcquisitionRequest::fromOpCtx( + opCtx(), nssUnshardedCollection1, AcquisitionPrerequisites::kRead)}); + + ASSERT_EQ(1, acquisitions.size()); + ASSERT_TRUE(acquisitions.at(nssUnshardedCollection1).isCollection()); + const auto& acquisition = acquisitions.at(nssUnshardedCollection1).getCollection(); + + ASSERT_FALSE(opCtx()->lockState()->isDbLockedForMode(dbNameTestDb, MODE_IS)); + ASSERT_FALSE( + opCtx()->lockState()->isCollectionLockedForMode(nssUnshardedCollection1, MODE_IS)); + validateAcquisition(acquisition); + } } TEST_F(ShardRoleTest, AcquireLocalCatalogOnlyWithPotentialDataLossUnsharded) { @@ -407,9 +597,9 @@ DEATH_TEST_F(ShardRoleTest, (void)acquisition.getShardingDescription(); } -DEATH_TEST_F(ShardRoleTest, - AcquireLocalCatalogOnlyWithPotentialDataLossForbiddenToAccessFilter, - "Invariant failure") { +DEATH_TEST_REGEX_F(ShardRoleTest, + AcquireLocalCatalogOnlyWithPotentialDataLossForbiddenToAccessFilter, + "Tripwire assertion.*7740800") { auto acquisition = acquireCollectionForLocalCatalogOnlyWithPotentialDataLoss( opCtx(), nssUnshardedCollection1, MODE_IX); @@ -421,21 +611,60 @@ DEATH_TEST_F(ShardRoleTest, TEST_F(ShardRoleTest, AcquireShardedCollWithCorrectPlacementVersion) { PlacementConcern placementConcern{{}, shardVersionShardedCollection1}; - const auto acquisition = acquireCollection(opCtx(), - {nssShardedCollection1, - placementConcern, - repl::ReadConcernArgs(), - AcquisitionPrerequisites::kWrite}, - MODE_IX); - ASSERT_EQ(nssShardedCollection1, acquisition.nss()); - ASSERT_EQ(nssShardedCollection1, acquisition.getCollectionPtr()->ns()); - ASSERT_TRUE(acquisition.getShardingDescription().isSharded()); - ASSERT_TRUE(acquisition.getShardingFilter().has_value()); + auto validateAcquisition = [&](auto& acquisition) { + ASSERT_EQ(nssShardedCollection1, acquisition.nss()); + ASSERT_EQ(nssShardedCollection1, acquisition.getCollectionPtr()->ns()); + ASSERT_TRUE(acquisition.getShardingDescription().isSharded()); + ASSERT_TRUE(acquisition.getShardingFilter().has_value()); + }; + + // With locks. + { + const auto acquisition = acquireCollection(opCtx(), + {nssShardedCollection1, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kWrite}, + MODE_IX); + ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(dbNameTestDb, MODE_IX)); + ASSERT_TRUE( + opCtx()->lockState()->isCollectionLockedForMode(nssShardedCollection1, MODE_IX)); + validateAcquisition(acquisition); + } + + // Without locks. + { + const auto acquisitions = + acquireCollectionsOrViewsMaybeLockFree(opCtx(), + {{nssShardedCollection1, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead}}); + + ASSERT_EQ(1, acquisitions.size()); + ASSERT_TRUE(acquisitions.at(nssShardedCollection1).isCollection()); + const auto& acquisition = acquisitions.at(nssShardedCollection1).getCollection(); + + ASSERT_FALSE(opCtx()->lockState()->isDbLockedForMode(dbNameTestDb, MODE_IS)); + ASSERT_FALSE( + opCtx()->lockState()->isCollectionLockedForMode(nssShardedCollection1, MODE_IS)); + validateAcquisition(acquisition); + } } TEST_F(ShardRoleTest, AcquireShardedCollWithIncorrectPlacementVersionThrows) { PlacementConcern placementConcern{dbVersionTestDb, ShardVersion::UNSHARDED()}; + + auto validateException = [&](const DBException& ex) { + const auto exInfo = ex.extraInfo(); + ASSERT_EQ(nssShardedCollection1, exInfo->getNss()); + ASSERT_EQ(ShardVersion::UNSHARDED(), exInfo->getVersionReceived()); + ASSERT_EQ(shardVersionShardedCollection1, exInfo->getVersionWanted()); + ASSERT_EQ(ShardId("this"), exInfo->getShardId()); + ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized()); + }; + ASSERT_THROWS_WITH_CHECK(acquireCollection(opCtx(), { nssShardedCollection1, @@ -445,15 +674,18 @@ TEST_F(ShardRoleTest, AcquireShardedCollWithIncorrectPlacementVersionThrows) { }, MODE_IX), ExceptionFor, - [&](const DBException& ex) { - const auto exInfo = ex.extraInfo(); - ASSERT_EQ(nssShardedCollection1, exInfo->getNss()); - ASSERT_EQ(ShardVersion::UNSHARDED(), exInfo->getVersionReceived()); - ASSERT_EQ(shardVersionShardedCollection1, - exInfo->getVersionWanted()); - ASSERT_EQ(ShardId("this"), exInfo->getShardId()); - ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized()); - }); + validateException); + + ASSERT_THROWS_WITH_CHECK( + acquireCollectionsOrViewsMaybeLockFree(opCtx(), + {{ + nssShardedCollection1, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead, + }}), + ExceptionFor, + validateException); } TEST_F(ShardRoleTest, AcquireShardedCollWhenShardDoesNotKnowThePlacementVersionThrows) { @@ -466,6 +698,16 @@ TEST_F(ShardRoleTest, AcquireShardedCollWhenShardDoesNotKnowThePlacementVersionT } PlacementConcern placementConcern{{}, shardVersionShardedCollection1}; + + auto validateException = [&](const DBException& ex) { + const auto exInfo = ex.extraInfo(); + ASSERT_EQ(nssShardedCollection1, exInfo->getNss()); + ASSERT_EQ(shardVersionShardedCollection1, exInfo->getVersionReceived()); + ASSERT_EQ(boost::none, exInfo->getVersionWanted()); + ASSERT_EQ(ShardId("this"), exInfo->getShardId()); + ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized()); + }; + ASSERT_THROWS_WITH_CHECK(acquireCollection(opCtx(), {nssShardedCollection1, placementConcern, @@ -473,15 +715,15 @@ TEST_F(ShardRoleTest, AcquireShardedCollWhenShardDoesNotKnowThePlacementVersionT AcquisitionPrerequisites::kWrite}, MODE_IX), ExceptionFor, - [&](const DBException& ex) { - const auto exInfo = ex.extraInfo(); - ASSERT_EQ(nssShardedCollection1, exInfo->getNss()); - ASSERT_EQ(shardVersionShardedCollection1, - exInfo->getVersionReceived()); - ASSERT_EQ(boost::none, exInfo->getVersionWanted()); - ASSERT_EQ(ShardId("this"), exInfo->getShardId()); - ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized()); - }); + validateException); + ASSERT_THROWS_WITH_CHECK( + acquireCollectionsOrViewsMaybeLockFree(opCtx(), + {{nssShardedCollection1, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead}}), + ExceptionFor, + validateException); } TEST_F(ShardRoleTest, AcquireShardedCollWhenCriticalSectionIsActiveThrows) { @@ -497,22 +739,31 @@ TEST_F(ShardRoleTest, AcquireShardedCollWhenCriticalSectionIsActiveThrows) { { PlacementConcern placementConcern{{}, shardVersionShardedCollection1}; + + auto validateException = [&](const DBException& ex) { + const auto exInfo = ex.extraInfo(); + ASSERT_EQ(nssShardedCollection1, exInfo->getNss()); + ASSERT_EQ(shardVersionShardedCollection1, exInfo->getVersionReceived()); + ASSERT_EQ(boost::none, exInfo->getVersionWanted()); + ASSERT_EQ(ShardId("this"), exInfo->getShardId()); + ASSERT_TRUE(exInfo->getCriticalSectionSignal().is_initialized()); + }; + ASSERT_THROWS_WITH_CHECK(acquireCollection(opCtx(), + {nssShardedCollection1, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kWrite}, + MODE_IX), + ExceptionFor, + validateException); ASSERT_THROWS_WITH_CHECK( - acquireCollection(opCtx(), - {nssShardedCollection1, - placementConcern, - repl::ReadConcernArgs(), - AcquisitionPrerequisites::kWrite}, - MODE_IX), + acquireCollectionsOrViewsMaybeLockFree(opCtx(), + {{nssShardedCollection1, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead}}), ExceptionFor, - [&](const DBException& ex) { - const auto exInfo = ex.extraInfo(); - ASSERT_EQ(nssShardedCollection1, exInfo->getNss()); - ASSERT_EQ(shardVersionShardedCollection1, exInfo->getVersionReceived()); - ASSERT_EQ(boost::none, exInfo->getVersionWanted()); - ASSERT_EQ(ShardId("this"), exInfo->getShardId()); - ASSERT_TRUE(exInfo->getCriticalSectionSignal().is_initialized()); - }); + validateException); } { @@ -536,7 +787,6 @@ TEST_F(ShardRoleTest, AcquireShardedCollWithoutSpecifyingPlacementVersion) { // Note that the collection is treated as unsharded because the operation is unversioned. ASSERT_FALSE(acquisition.getShardingDescription().isSharded()); - ASSERT_FALSE(acquisition.getShardingFilter().has_value()); } // --------------------------------------------------------------------------- @@ -545,14 +795,32 @@ TEST_F(ShardRoleTest, AcquireShardedCollWithoutSpecifyingPlacementVersion) { TEST_F(ShardRoleTest, AcquireCollectionNonExistentNamespace) { const NamespaceString inexistentNss = NamespaceString::createNamespaceString_forTest(dbNameTestDb, "inexistent"); - auto acquisition = - acquireCollection(opCtx(), - CollectionAcquisitionRequest::fromOpCtx( - opCtx(), inexistentNss, AcquisitionPrerequisites::kWrite), - MODE_IX); - ASSERT(!acquisition.getCollectionPtr()); - ASSERT(!acquisition.getShardingDescription().isSharded()); - ASSERT(!acquisition.getShardingFilter()); + + // With locks. + { + auto acquisition = + acquireCollection(opCtx(), + CollectionAcquisitionRequest::fromOpCtx( + opCtx(), inexistentNss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(!acquisition.getCollectionPtr()); + ASSERT(!acquisition.getShardingDescription().isSharded()); + } + + // Without locks. + { + auto acquisitions = acquireCollectionsOrViewsMaybeLockFree( + opCtx(), + {CollectionAcquisitionRequest::fromOpCtx( + opCtx(), inexistentNss, AcquisitionPrerequisites::kRead)}); + + ASSERT_EQ(1, acquisitions.size()); + ASSERT_TRUE(acquisitions.at(inexistentNss).isCollection()); + const auto& acquisition = acquisitions.at(inexistentNss).getCollection(); + + ASSERT(!acquisition.getCollectionPtr()); + ASSERT(!acquisition.getShardingDescription().isSharded()); + } } TEST_F(ShardRoleTest, AcquireInexistentCollectionWithWrongPlacementThrowsBecauseWrongPlacement) { @@ -561,6 +829,14 @@ TEST_F(ShardRoleTest, AcquireInexistentCollectionWithWrongPlacementThrowsBecause NamespaceString::createNamespaceString_forTest(dbNameTestDb, "inexistent"); PlacementConcern placementConcern{incorrectDbVersion, {}}; + + auto validateException = [&](const DBException& ex) { + const auto exInfo = ex.extraInfo(); + ASSERT_EQ(dbNameTestDb.toString_forTest(), exInfo->getDb()); + ASSERT_EQ(incorrectDbVersion, exInfo->getVersionReceived()); + ASSERT_EQ(dbVersionTestDb, exInfo->getVersionWanted()); + ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized()); + }; ASSERT_THROWS_WITH_CHECK(acquireCollection(opCtx(), {inexistentNss, placementConcern, @@ -568,13 +844,15 @@ TEST_F(ShardRoleTest, AcquireInexistentCollectionWithWrongPlacementThrowsBecause AcquisitionPrerequisites::kWrite}, MODE_IX), ExceptionFor, - [&](const DBException& ex) { - const auto exInfo = ex.extraInfo(); - ASSERT_EQ(dbNameTestDb.db(), exInfo->getDb()); - ASSERT_EQ(incorrectDbVersion, exInfo->getVersionReceived()); - ASSERT_EQ(dbVersionTestDb, exInfo->getVersionWanted()); - ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized()); - }); + validateException); + ASSERT_THROWS_WITH_CHECK( + acquireCollectionsOrViewsMaybeLockFree(opCtx(), + {{inexistentNss, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead}}), + ExceptionFor, + validateException); } TEST_F(ShardRoleTest, AcquireCollectionButItIsAView) { @@ -591,8 +869,8 @@ TEST_F(ShardRoleTest, AcquireCollectionButItIsAView) { opCtx(), nssView, AcquisitionPrerequisites::kWrite), MODE_IX); - ASSERT_TRUE(std::holds_alternative(acquisition)); - const ScopedViewAcquisition& viewAcquisition = std::get(acquisition); + ASSERT_TRUE(acquisition.isView()); + const ViewAcquisition& viewAcquisition = acquisition.getView(); ASSERT_EQ(nssView, viewAcquisition.nss()); ASSERT_EQ(nssUnshardedCollection1, viewAcquisition.getViewDefinition().viewOn()); @@ -602,6 +880,75 @@ TEST_F(ShardRoleTest, AcquireCollectionButItIsAView) { SimpleBSONObjComparator::kInstance.makeEqualTo())); } + +TEST_F(ShardRoleTest, WritesOnMultiDocTransactionsUseLatestCatalog) { + + { + opCtx()->setInMultiDocumentTransaction(); + opCtx()->recoveryUnit()->preallocateSnapshot(); + CollectionCatalog::stash(opCtx(), CollectionCatalog::get(opCtx())); + } + + // Drop a collection + { + auto newClient = opCtx()->getServiceContext()->makeClient("AlternativeClient"); + AlternativeClientRegion acr(newClient); + auto newOpCtx = cc().makeOperationContext(); + DBDirectClient directClient(newOpCtx.get()); + directClient.dropCollection(nssUnshardedCollection1); + } + + const auto acquireForRead = acquireCollectionOrView( + opCtx(), + CollectionOrViewAcquisitionRequest::fromOpCtx( + opCtx(), nssUnshardedCollection1, AcquisitionPrerequisites::kRead), + MODE_IX); + ASSERT_TRUE(acquireForRead.isCollection()); + + ASSERT_THROWS_CODE(acquireCollectionOrView( + opCtx(), + CollectionOrViewAcquisitionRequest::fromOpCtx( + opCtx(), nssUnshardedCollection1, AcquisitionPrerequisites::kWrite), + MODE_IX), + DBException, + ErrorCodes::WriteConflict); +} + +// --------------------------------------------------------------------------- +// MaybeLockFree +TEST_F(ShardRoleTest, AcquireCollectionMaybeLockFreeTakesLocksWhenInMultiDocTransaction) { + opCtx()->setInMultiDocumentTransaction(); + const auto acquisition = + acquireCollectionMaybeLockFree(opCtx(), + {nssUnshardedCollection1, + {dbVersionTestDb, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead}); + ASSERT_TRUE(opCtx()->lockState()->isCollectionLockedForMode(nssUnshardedCollection1, MODE_IS)); +} + +TEST_F(ShardRoleTest, AcquireCollectionMaybeLockFreeDoesNotTakeLocksWhenNotInMultiDocTransaction) { + const auto acquisition = + acquireCollectionMaybeLockFree(opCtx(), + {nssUnshardedCollection1, + {dbVersionTestDb, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead}); + ASSERT_FALSE(opCtx()->lockState()->isCollectionLockedForMode(nssUnshardedCollection1, MODE_IS)); +} + +DEATH_TEST_REGEX_F(ShardRoleTest, + AcquireCollectionMaybeLockFreeAllowedOnlyForRead, + "Tripwire assertion") { + ASSERT_THROWS_CODE(acquireCollectionMaybeLockFree(opCtx(), + {nssUnshardedCollection1, + {dbVersionTestDb, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kWrite}), + DBException, + 7740500); +} + // --------------------------------------------------------------------------- // Acquire multiple collections @@ -620,25 +967,12 @@ TEST_F(ShardRoleTest, AcquireMultipleCollectionsAllWithCorrectPlacementConcern) ASSERT_EQ(2, acquisitions.size()); - const auto& acquisitionUnshardedColl = - std::find_if(acquisitions.begin(), - acquisitions.end(), - [nss = nssUnshardedCollection1](const auto& acquisition) { - return acquisition.nss() == nss; - }); - ASSERT(acquisitionUnshardedColl != acquisitions.end()); - ASSERT_FALSE(acquisitionUnshardedColl->getShardingDescription().isSharded()); - ASSERT_FALSE(acquisitionUnshardedColl->getShardingFilter().has_value()); - - const auto& acquisitionShardedColl = - std::find_if(acquisitions.begin(), - acquisitions.end(), - [nss = nssShardedCollection1](const auto& acquisition) { - return acquisition.nss() == nss; - }); - ASSERT(acquisitionShardedColl != acquisitions.end()); - ASSERT_TRUE(acquisitionShardedColl->getShardingDescription().isSharded()); - ASSERT_TRUE(acquisitionShardedColl->getShardingFilter().has_value()); + const auto& acquisitionUnshardedColl = acquisitions.at(nssUnshardedCollection1); + ASSERT_FALSE(acquisitionUnshardedColl.getShardingDescription().isSharded()); + + const auto& acquisitionShardedColl = acquisitions.at(nssShardedCollection1); + ASSERT_TRUE(acquisitionShardedColl.getShardingDescription().isSharded()); + ASSERT_TRUE(acquisitionShardedColl.getShardingFilter().has_value()); // Assert the DB lock is held, but not recursively (i.e. only once). ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(dbNameTestDb, MODE_IX)); @@ -729,6 +1063,30 @@ TEST_F(ShardRoleTest, AcquireCollectionByWrongUUID) { ErrorCodes::NamespaceNotFound); } +TEST_F(ShardRoleTest, AcquireCollectionByUUIDWithShardVersionAttachedThrows) { + const auto uuid = getCollectionUUID(opCtx(), nssShardedCollection1); + const auto dbVersion = boost::none; + const auto shardVersion = shardVersionShardedCollection1; + ScopedSetShardRole setShardRole(opCtx(), nssShardedCollection1, shardVersion, dbVersion); + PlacementConcern placementConcern{dbVersionTestDb, ShardVersion::UNSHARDED()}; + ASSERT_THROWS_CODE(acquireCollection(opCtx(), + {NamespaceStringOrUUID(dbNameTestDb, uuid), + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kWrite}, + MODE_IX), + DBException, + ErrorCodes::IncompatibleShardingMetadata); + ASSERT_THROWS_CODE( + acquireCollectionsOrViewsMaybeLockFree(opCtx(), + {{NamespaceStringOrUUID(dbNameTestDb, uuid), + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead}}), + DBException, + ErrorCodes::IncompatibleShardingMetadata); +} + // --------------------------------------------------------------------------- // Acquire by nss and expected UUID @@ -749,19 +1107,28 @@ TEST_F(ShardRoleTest, AcquireCollectionByNssAndExpectedUUID) { TEST_F(ShardRoleTest, AcquireCollectionByNssAndWrongExpectedUUIDThrows) { const auto nss = nssUnshardedCollection1; const auto wrongUuid = UUID::gen(); + + auto validateException = [&](const DBException& ex) { + const auto exInfo = ex.extraInfo(); + ASSERT_EQ(nss.dbName(), exInfo->dbName()); + ASSERT_EQ(wrongUuid, exInfo->collectionUUID()); + ASSERT_EQ(nss.coll(), exInfo->expectedCollection()); + ASSERT_EQ(boost::none, exInfo->actualCollection()); + }; + ASSERT_THROWS_WITH_CHECK( acquireCollection( opCtx(), {nss, wrongUuid, {}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}, MODE_IX), ExceptionFor, - [&](const DBException& ex) { - const auto exInfo = ex.extraInfo(); - ASSERT_EQ(nss.dbName(), exInfo->dbName()); - ASSERT_EQ(wrongUuid, exInfo->collectionUUID()); - ASSERT_EQ(nss.coll(), exInfo->expectedCollection()); - ASSERT_EQ(boost::none, exInfo->actualCollection()); - }); + validateException); + ASSERT_THROWS_WITH_CHECK( + acquireCollectionsOrViewsMaybeLockFree( + opCtx(), + {{nss, wrongUuid, {}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kRead}}), + ExceptionFor, + validateException); } TEST_F(ShardRoleTest, AcquireViewWithExpectedUUIDAlwaysThrows) { @@ -805,7 +1172,7 @@ TEST_F(ShardRoleTest, AcquireCollectionOrView) { AcquisitionPrerequisites::kCanBeView, }, MODE_IX); - ASSERT_TRUE(std::holds_alternative(acquisition)); + ASSERT_TRUE(acquisition.isView()); } { @@ -818,7 +1185,7 @@ TEST_F(ShardRoleTest, AcquireCollectionOrView) { AcquisitionPrerequisites::kCanBeView, }, MODE_IX); - ASSERT_TRUE(std::holds_alternative(acquisition)); + ASSERT_TRUE(acquisition.isCollection()); } } @@ -838,21 +1205,56 @@ TEST_F(ShardRoleTest, YieldAndRestoreAcquisitionWithLocks) { }, MODE_IX); - ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(nss.db(), MODE_IX)); + ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); ASSERT_TRUE(opCtx()->lockState()->isCollectionLockedForMode(nss, MODE_IX)); // Yield the resources auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); - ASSERT_FALSE(opCtx()->lockState()->isDbLockedForMode(nss.db(), MODE_IX)); + opCtx()->recoveryUnit()->abandonSnapshot(); + + ASSERT_FALSE(opCtx()->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); ASSERT_FALSE(opCtx()->lockState()->isCollectionLockedForMode(nss, MODE_IX)); // Restore the resources restoreTransactionResourcesToOperationContext(opCtx(), std::move(yieldedTransactionResources)); - ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(nss.db(), MODE_IX)); + ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); ASSERT_TRUE(opCtx()->lockState()->isCollectionLockedForMode(nss, MODE_IX)); } -TEST_F(ShardRoleTest, RestoreForWriteFailsIfPlacementConcernNoLongerMet) { +TEST_F(ShardRoleTest, YieldAndRestoreAcquisitionWithoutLocks) { + const auto nss = nssUnshardedCollection1; + + PlacementConcern placementConcern{dbVersionTestDb, ShardVersion::UNSHARDED()}; + const auto acquisitions = + acquireCollectionsOrViewsMaybeLockFree(opCtx(), + {{ + nss, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead, + }}); + + ASSERT_EQ(1, acquisitions.size()); + ASSERT_TRUE(acquisitions.at(nss).isCollection()); + + ASSERT_TRUE(opCtx()->lockState()->isLockHeldForMode(resourceIdGlobal, MODE_IS)); + ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(nss.dbName(), MODE_NONE)); + + // Yield the resources + auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + opCtx()->recoveryUnit()->abandonSnapshot(); + + ASSERT_FALSE(opCtx()->lockState()->isLockHeldForMode(resourceIdGlobal, MODE_IS)); + ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(nss.dbName(), MODE_NONE)); + + // Restore the resources + restoreTransactionResourcesToOperationContext(opCtx(), std::move(yieldedTransactionResources)); + ASSERT_TRUE(opCtx()->lockState()->isLockHeldForMode(resourceIdGlobal, MODE_IS)); + ASSERT_TRUE(opCtx()->lockState()->isDbLockedForMode(nss.dbName(), MODE_NONE)); +} + +TEST_F(ShardRoleTest, + RestoreForWriteInvalidatesAcquisitionIfPlacementConcernShardVersionNoLongerMet) { const auto nss = nssShardedCollection1; PlacementConcern placementConcern{{}, shardVersionShardedCollection1}; @@ -863,6 +1265,7 @@ TEST_F(ShardRoleTest, RestoreForWriteFailsIfPlacementConcernNoLongerMet) { // Yield the resources auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + opCtx()->recoveryUnit()->abandonSnapshot(); // Placement changes const auto newShardVersion = [&]() { @@ -896,7 +1299,40 @@ TEST_F(ShardRoleTest, RestoreForWriteFailsIfPlacementConcernNoLongerMet) { ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized()); }); - ASSERT_FALSE(opCtx()->lockState()->isDbLockedForMode(nss.db(), MODE_IX)); + ASSERT_FALSE(opCtx()->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); + ASSERT_FALSE(opCtx()->lockState()->isCollectionLockedForMode(nss, MODE_IX)); +} + +TEST_F(ShardRoleTest, RestoreForWriteInvalidatesAcquisitionIfPlacementConcernDbVersionNoLongerMet) { + const auto nss = nssUnshardedCollection1; + + PlacementConcern placementConcern{dbVersionTestDb, {}}; + const auto acquisition = acquireCollection( + opCtx(), + {nss, placementConcern, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}, + MODE_IX); + + // Yield the resources + auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + opCtx()->recoveryUnit()->abandonSnapshot(); + + // Placement changes + const auto newDbVersion = dbVersionTestDb.makeUpdated(); + installDatabaseMetadata(opCtx(), nssUnshardedCollection1.dbName(), newDbVersion); + + // Try to restore the resources should fail because placement concern is no longer met. + ASSERT_THROWS_WITH_CHECK(restoreTransactionResourcesToOperationContext( + opCtx(), std::move(yieldedTransactionResources)), + ExceptionFor, + [&](const DBException& ex) { + const auto exInfo = ex.extraInfo(); + ASSERT_EQ(nss.db_forTest(), exInfo->getDb()); + ASSERT_EQ(dbVersionTestDb, exInfo->getVersionReceived()); + ASSERT_EQ(newDbVersion, exInfo->getVersionWanted()); + ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized()); + }); + + ASSERT_FALSE(opCtx()->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); ASSERT_FALSE(opCtx()->lockState()->isCollectionLockedForMode(nss, MODE_IX)); } @@ -919,6 +1355,7 @@ TEST_F(ShardRoleTest, RestoreWithShardVersionIgnored) { // Yield the resources auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + opCtx()->recoveryUnit()->abandonSnapshot(); // Placement changes const auto newShardVersion = [&]() { @@ -953,6 +1390,7 @@ void ShardRoleTest::testRestoreFailsIfCollectionBecomesCreated( // Yield the resources auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + opCtx()->recoveryUnit()->abandonSnapshot(); // Create the collection createTestCollection(opCtx(), nss); @@ -981,6 +1419,7 @@ void ShardRoleTest::testRestoreFailsIfCollectionNoLongerExists( // Yield the resources auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + opCtx()->recoveryUnit()->abandonSnapshot(); // Drop the collection { @@ -992,7 +1431,7 @@ void ShardRoleTest::testRestoreFailsIfCollectionNoLongerExists( ASSERT_THROWS_CODE(restoreTransactionResourcesToOperationContext( opCtx(), std::move(yieldedTransactionResources)), DBException, - ErrorCodes::NamespaceNotFound); + ErrorCodes::CollectionUUIDMismatch); } TEST_F(ShardRoleTest, RestoreForReadFailsIfCollectionNoLongerExists) { testRestoreFailsIfCollectionNoLongerExists(AcquisitionPrerequisites::kRead); @@ -1011,16 +1450,18 @@ void ShardRoleTest::testRestoreFailsIfCollectionRenamed( // Yield the resources auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + opCtx()->recoveryUnit()->abandonSnapshot(); // Rename the collection. { DBDirectClient client(opCtx()); BSONObj info; ASSERT_TRUE(client.runCommand( - DatabaseName(boost::none, dbNameTestDb.db()), + dbNameTestDb, BSON("renameCollection" - << nss.ns() << "to" - << NamespaceString::createNamespaceString_forTest(dbNameTestDb, "foo2").ns()), + << nss.ns_forTest() << "to" + << NamespaceString::createNamespaceString_forTest(dbNameTestDb, "foo2") + .ns_forTest()), info)); } @@ -1028,7 +1469,7 @@ void ShardRoleTest::testRestoreFailsIfCollectionRenamed( ASSERT_THROWS_CODE(restoreTransactionResourcesToOperationContext( opCtx(), std::move(yieldedTransactionResources)), DBException, - ErrorCodes::NamespaceNotFound); + ErrorCodes::CollectionUUIDMismatch); } TEST_F(ShardRoleTest, RestoreForReadFailsIfCollectionRenamed) { testRestoreFailsIfCollectionRenamed(AcquisitionPrerequisites::kRead); @@ -1047,6 +1488,7 @@ void ShardRoleTest::testRestoreFailsIfCollectionDroppedAndRecreated( // Yield the resources auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + opCtx()->recoveryUnit()->abandonSnapshot(); // Drop the collection and create a new one with the same nss. { @@ -1089,6 +1531,7 @@ TEST_F(ShardRoleTest, RestoreForReadSucceedsEvenIfPlacementHasChanged) { // Yield the resources auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + opCtx()->recoveryUnit()->abandonSnapshot(); ASSERT_FALSE(ongoingQueriesCompletionFuture.isReady()); ASSERT_TRUE(acquisition.getShardingFilter().has_value()); @@ -1151,6 +1594,9 @@ void ShardRoleTest::testRestoreFailsIfCollectionIsNowAView( // Yield the resources. auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + opCtx()->recoveryUnit()->abandonSnapshot(); + + opCtx()->recoveryUnit()->abandonSnapshot(); // Drop collection and create a view in its place. { @@ -1172,48 +1618,173 @@ TEST_F(ShardRoleTest, RestoreForWriteFailsIfCollectionIsNowAView) { testRestoreFailsIfCollectionIsNowAView(AcquisitionPrerequisites::kWrite); } -// --------------------------------------------------------------------------- -// ScopedLocalCatalogWriteFence +TEST_F(ShardRoleTest, RestoreChangesReadSourceAfterStepUp) { + const auto nss = nssShardedCollection1; -TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceWUOWCommitWithinWriterScope) { - auto acquisition = acquireCollection(opCtx(), - {nssShardedCollection1, - PlacementConcern{{}, shardVersionShardedCollection1}, - repl::ReadConcernArgs(), - AcquisitionPrerequisites::kRead}, - MODE_X); - ASSERT(!acquisition.getCollectionPtr()->isTemporary()); + // Set up secondary read state. + opCtx()->getClient()->setInDirectClient(true); + ASSERT_OK(repl::ReplicationCoordinator::get(getGlobalServiceContext()) + ->setFollowerMode(repl::MemberState::RS_SECONDARY)); + + // Initially we start with kNoTimestamp as our ReadSource. + ASSERT_EQUALS(RecoveryUnit::ReadSource::kNoTimestamp, + opCtx()->recoveryUnit()->getTimestampReadSource()); + + PlacementConcern placementConcern{dbVersionTestDb, ShardVersion::UNSHARDED()}; + const auto acquisitions = + acquireCollectionsOrViewsMaybeLockFree(opCtx(), + {{ + nssUnshardedCollection1, + placementConcern, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead, + }}); + + // Our read source should have been updated to kLastApplied. + ASSERT_EQUALS(RecoveryUnit::ReadSource::kLastApplied, + opCtx()->recoveryUnit()->getTimestampReadSource()); + + // Yield the resources + auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + opCtx()->recoveryUnit()->abandonSnapshot(); + + // Step up. + ASSERT_OK(repl::ReplicationCoordinator::get(getGlobalServiceContext()) + ->setFollowerMode(repl::MemberState::RS_PRIMARY)); + // Restore the resources + restoreTransactionResourcesToOperationContext(opCtx(), std::move(yieldedTransactionResources)); + + // Our read source should have been updated to kNoTimestamp. + ASSERT_EQUALS(RecoveryUnit::ReadSource::kNoTimestamp, + opCtx()->recoveryUnit()->getTimestampReadSource()); +} + +TEST_F(ShardRoleTest, RestoreCollectionCreatedUnderScopedLocalCatalogWriteFence) { + const auto nss = NamespaceString::createNamespaceString_forTest(dbNameTestDb, "inexistent"); + auto acquisition = acquireCollection( + opCtx(), + {nss, PlacementConcern{{}, {}}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}, + MODE_IX); + ASSERT_FALSE(acquisition.exists()); + + // Create the collection { WriteUnitOfWork wuow(opCtx()); - CollectionWriter localCatalogWriter(opCtx(), &acquisition); - localCatalogWriter.getWritableCollection(opCtx())->setIsTemp(opCtx(), true); + ScopedLocalCatalogWriteFence scopedLocalCatalogWriteFence(opCtx(), &acquisition); + createTestCollection(opCtx(), nss); wuow.commit(); } + ASSERT_TRUE(acquisition.exists()); - ASSERT(acquisition.getCollectionPtr()->isTemporary()); + // Yield + auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + + // Restore works + restoreTransactionResourcesToOperationContext(opCtx(), std::move(yieldedTransactionResources)); } -TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceWUOWCommitAfterWriterScope) { - auto acquisition = acquireCollection(opCtx(), - {nssShardedCollection1, - PlacementConcern{{}, shardVersionShardedCollection1}, - repl::ReadConcernArgs(), - AcquisitionPrerequisites::kRead}, - MODE_X); - ASSERT(!acquisition.getCollectionPtr()->isTemporary()); +TEST_F(ShardRoleTest, + RestoreCollectionCreatedUnderScopedLocalCatalogWriteFenceFailsIfNoLongerExists) { + const auto nss = NamespaceString::createNamespaceString_forTest(dbNameTestDb, "inexistent"); + auto acquisition = acquireCollection( + opCtx(), + {nss, PlacementConcern{{}, {}}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}, + MODE_IX); + ASSERT_FALSE(acquisition.exists()); - WriteUnitOfWork wuow(opCtx()); + // Create the collection { - CollectionWriter localCatalogWriter(opCtx(), &acquisition); - localCatalogWriter.getWritableCollection(opCtx())->setIsTemp(opCtx(), true); + WriteUnitOfWork wuow(opCtx()); + ScopedLocalCatalogWriteFence scopedLocalCatalogWriteFence(opCtx(), &acquisition); + createTestCollection(opCtx(), nss); + wuow.commit(); } - ASSERT(acquisition.getCollectionPtr()->isTemporary()); - wuow.commit(); - ASSERT(acquisition.getCollectionPtr()->isTemporary()); + ASSERT_TRUE(acquisition.exists()); + + // Yield + auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + + // Drop the collection + DBDirectClient client(opCtx()); + client.dropCollection(nss); + + // Restore should fail + ASSERT_THROWS_CODE(restoreTransactionResourcesToOperationContext( + opCtx(), std::move(yieldedTransactionResources)), + DBException, + ErrorCodes::CollectionUUIDMismatch); +} + +// --------------------------------------------------------------------------- +// Storage snapshot + +TEST_F(ShardRoleTest, SnapshotAttemptFailsIfReplTermChanges) { + const auto nss = nssShardedCollection1; + + PlacementConcern placementConcern = PlacementConcern{{}, shardVersionShardedCollection1}; + + std::vector requests = {{nss}}; + shard_role_details::SnapshotAttempt snapshotAttempt(opCtx(), requests); + snapshotAttempt.snapshotInitialState(); + snapshotAttempt.changeReadSourceForSecondaryReads(); + snapshotAttempt.openStorageSnapshot(); + + auto currentTerm = repl::ReplicationCoordinator::get(opCtx())->getTerm(); + ASSERT_OK(repl::ReplicationCoordinator::get(opCtx())->updateTerm(opCtx(), currentTerm + 1)); + + ASSERT_FALSE(snapshotAttempt.getConsistentCatalog()); } -TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceWUOWRollbackWithinWriterScope) { +TEST_F(ShardRoleTest, SnapshotAttemptFailsIfCatalogChanges) { + const auto nss = nssShardedCollection1; + + PlacementConcern placementConcern = PlacementConcern{{}, shardVersionShardedCollection1}; + + std::vector requests = {{nss}}; + shard_role_details::SnapshotAttempt snapshotAttempt(opCtx(), requests); + snapshotAttempt.snapshotInitialState(); + snapshotAttempt.changeReadSourceForSecondaryReads(); + snapshotAttempt.openStorageSnapshot(); + + auto nss2 = NamespaceString::createNamespaceString_forTest(dbNameTestDb, "newCollection"); + createTestCollection(opCtx(), nss2); + + ASSERT_FALSE(snapshotAttempt.getConsistentCatalog()); +} + +TEST_F(ShardRoleTest, ReadSourceChangesOnSecondary) { + const auto nss = nssShardedCollection1; + + // Set up secondary read state. + opCtx()->getClient()->setInDirectClient(true); + ASSERT_OK(repl::ReplicationCoordinator::get(getGlobalServiceContext()) + ->setFollowerMode(repl::MemberState::RS_SECONDARY)); + // Don't conflict with PBWM lock, as lock free reads do. + ShouldNotConflictWithSecondaryBatchApplicationBlock skipPBWMConflict(opCtx()->lockState()); + + // Initially we start with kNoTimestamp as our ReadSource. + ASSERT_EQUALS(RecoveryUnit::ReadSource::kNoTimestamp, + opCtx()->recoveryUnit()->getTimestampReadSource()); + + PlacementConcern placementConcern = PlacementConcern{{}, shardVersionShardedCollection1}; + std::vector requests = {{nss}}; + shard_role_details::SnapshotAttempt snapshotAttempt(opCtx(), requests); + snapshotAttempt.snapshotInitialState(); + snapshotAttempt.changeReadSourceForSecondaryReads(); + + // Our read source should have been updated to kLastApplied. + ASSERT_EQUALS(RecoveryUnit::ReadSource::kLastApplied, + opCtx()->recoveryUnit()->getTimestampReadSource()); + + snapshotAttempt.openStorageSnapshot(); + ASSERT_TRUE(snapshotAttempt.getConsistentCatalog()); +} + +// --------------------------------------------------------------------------- +// ScopedLocalCatalogWriteFence + +TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceWUOWCommitWithinWriterScope) { auto acquisition = acquireCollection(opCtx(), {nssShardedCollection1, PlacementConcern{{}, shardVersionShardedCollection1}, @@ -1226,11 +1797,13 @@ TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceWUOWRollbackWithinWriterScope) WriteUnitOfWork wuow(opCtx()); CollectionWriter localCatalogWriter(opCtx(), &acquisition); localCatalogWriter.getWritableCollection(opCtx())->setIsTemp(opCtx(), true); + wuow.commit(); } - ASSERT(!acquisition.getCollectionPtr()->isTemporary()); + + ASSERT(acquisition.getCollectionPtr()->isTemporary()); } -TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceWUOWRollbackAfterWriterScope) { +TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceWUOWCommitAfterWriterScope) { auto acquisition = acquireCollection(opCtx(), {nssShardedCollection1, PlacementConcern{{}, shardVersionShardedCollection1}, @@ -1239,15 +1812,14 @@ TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceWUOWRollbackAfterWriterScope) MODE_X); ASSERT(!acquisition.getCollectionPtr()->isTemporary()); + WriteUnitOfWork wuow(opCtx()); { - WriteUnitOfWork wuow(opCtx()); - { - CollectionWriter localCatalogWriter(opCtx(), &acquisition); - localCatalogWriter.getWritableCollection(opCtx())->setIsTemp(opCtx(), true); - } - ASSERT(acquisition.getCollectionPtr()->isTemporary()); + CollectionWriter localCatalogWriter(opCtx(), &acquisition); + localCatalogWriter.getWritableCollection(opCtx())->setIsTemp(opCtx(), true); } - ASSERT(!acquisition.getCollectionPtr()->isTemporary()); + ASSERT(acquisition.getCollectionPtr()->isTemporary()); + wuow.commit(); + ASSERT(acquisition.getCollectionPtr()->isTemporary()); } TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceOutsideWUOUCommit) { @@ -1291,5 +1863,137 @@ TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceOutsideWUOURollback) { ASSERT(!acquisition.getCollectionPtr()->isTemporary()); } +TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceWUOWRollbackAfterAcquisitionOutOfScope) { + // Tests that nothing breaks if ScopedLocalCatalogWriteFence's onRollback handler is executed + // when the collection acquisition has already gone out of scope. + WriteUnitOfWork wuow1(opCtx()); + { + auto acquisition = acquireCollection(opCtx(), + {nssShardedCollection1, + PlacementConcern{{}, shardVersionShardedCollection1}, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kRead}, + MODE_IX); + ScopedLocalCatalogWriteFence(opCtx(), &acquisition); + } +} + +TEST_F(ShardRoleTest, ScopedLocalCatalogWriteFenceWUOWRollbackAfterANotherClientCreatedCollection) { + const NamespaceString nss = + NamespaceString::createNamespaceString_forTest(dbNameTestDb, "inexistent"); + + // Acquire a collection that does not exist. + auto acquisition = acquireCollection( + opCtx(), + {nss, PlacementConcern{{}, {}}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}, + MODE_IX); + ASSERT_FALSE(acquisition.exists()); + + // Another client creates the collection + { + auto newClient = opCtx()->getServiceContext()->makeClient("MigrationCoordinator"); + auto newOpCtx = newClient->makeOperationContext(); + createTestCollection(newOpCtx.get(), nss); + } + + // Acquisition still reflects that the collection does not exist. + ASSERT_FALSE(acquisition.exists()); + + // Original client attempts to create the collection, which will result in a WriteConflict and + // rollback. + { + WriteUnitOfWork wuow(opCtx()); + ScopedLocalCatalogWriteFence localCatalogWriteFence(opCtx(), &acquisition); + auto db = DatabaseHolder::get(opCtx())->openDb(opCtx(), nss.dbName()); + db->createCollection(opCtx(), nss, CollectionOptions()); + ASSERT_THROWS_CODE(wuow.commit(), DBException, ErrorCodes::WriteConflict); + } + + // Check that after rollback the acquisition has been updated to reflect the latest state of the + // catalog (i.e. the collection exists). + ASSERT_TRUE(acquisition.exists()); +} + +DEATH_TEST_F(ShardRoleTest, + CannotAcquireWhileYielded, + "Cannot obtain TransactionResources as they've been detached from the opCtx") { + const NamespaceString nss = + NamespaceString::createNamespaceString_forTest(dbNameTestDb, "inexistent"); + + // Acquire a collection + auto acquisition = acquireCollection( + opCtx(), + {nss, PlacementConcern{{}, {}}, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}, + MODE_IX); + + auto yielded = yieldTransactionResourcesFromOperationContext(opCtx()); + + const auto otherNss = NamespaceString::createNamespaceString_forTest(dbNameTestDb, "otherNss"); + acquireCollection(opCtx(), + {otherNss, + PlacementConcern{{}, {}}, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kWrite}, + MODE_IX); +} + +DEATH_TEST_F(ShardRoleTest, + FailedStateCannotAcceptAcquisitions, + "Cannot make a new acquisition in the FAILED state") { + const auto nss = nssShardedCollection1; + + PlacementConcern placementConcern{{}, shardVersionShardedCollection1}; + const auto acquisition = acquireCollection( + opCtx(), + {nss, placementConcern, repl::ReadConcernArgs(), AcquisitionPrerequisites::kWrite}, + MODE_IX); + + // Yield the resources + auto yieldedTransactionResources = yieldTransactionResourcesFromOperationContext(opCtx()); + opCtx()->recoveryUnit()->abandonSnapshot(); + + // Placement changes + const auto newShardVersion = [&]() { + auto newPlacementVersion = shardVersionShardedCollection1.placementVersion(); + newPlacementVersion.incMajor(); + return ShardVersionFactory::make(newPlacementVersion, + boost::optional(boost::none)); + }(); + const auto uuid = getCollectionUUID(opCtx(), nss); + installShardedCollectionMetadata( + opCtx(), + nss, + dbVersionTestDb, + {ChunkType(uuid, + ChunkRange{BSON("skey" << MINKEY), BSON("skey" << MAXKEY)}, + newShardVersion.placementVersion(), + thisShardId)}, + thisShardId); + + // Try to restore the resources should fail because placement concern is no longer met. + ASSERT_THROWS_WITH_CHECK(restoreTransactionResourcesToOperationContext( + opCtx(), std::move(yieldedTransactionResources)), + ExceptionFor, + [&](const DBException& ex) { + const auto exInfo = ex.extraInfo(); + ASSERT_EQ(nssShardedCollection1, exInfo->getNss()); + ASSERT_EQ(shardVersionShardedCollection1, + exInfo->getVersionReceived()); + ASSERT_EQ(newShardVersion, exInfo->getVersionWanted()); + ASSERT_EQ(ShardId("this"), exInfo->getShardId()); + ASSERT_FALSE(exInfo->getCriticalSectionSignal().is_initialized()); + }); + + const NamespaceString otherNss = + NamespaceString::createNamespaceString_forTest(dbNameTestDb, "inexistent"); + + // Trying to acquire now should invariant and crash the server since we're in the FAILED state. + acquireCollection(opCtx(), + {otherNss, + PlacementConcern{{}, {}}, + repl::ReadConcernArgs(), + AcquisitionPrerequisites::kWrite}, + MODE_IX); +} } // namespace } // namespace mongo diff --git a/src/mongo/db/shutdown_in_progress_quiesce_info.cpp b/src/mongo/db/shutdown_in_progress_quiesce_info.cpp index 6753b14c77056..be479b0140236 100644 --- a/src/mongo/db/shutdown_in_progress_quiesce_info.cpp +++ b/src/mongo/db/shutdown_in_progress_quiesce_info.cpp @@ -27,13 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/shutdown_in_progress_quiesce_info.h" - -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/shutdown_in_progress_quiesce_info.h b/src/mongo/db/shutdown_in_progress_quiesce_info.h index 710c397162781..07dba1f6015f5 100644 --- a/src/mongo/db/shutdown_in_progress_quiesce_info.h +++ b/src/mongo/db/shutdown_in_progress_quiesce_info.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" diff --git a/src/mongo/db/signed_logical_time.cpp b/src/mongo/db/signed_logical_time.cpp index be984f41145ee..01221097b6092 100644 --- a/src/mongo/db/signed_logical_time.cpp +++ b/src/mongo/db/signed_logical_time.cpp @@ -29,7 +29,11 @@ #include "mongo/db/signed_logical_time.h" -#include "mongo/util/str.h" +#include + +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/crypto/hash_block.h" namespace mongo { diff --git a/src/mongo/db/signed_logical_time.h b/src/mongo/db/signed_logical_time.h index 6aebc2f2433d3..55cd8d2f375bb 100644 --- a/src/mongo/db/signed_logical_time.h +++ b/src/mongo/db/signed_logical_time.h @@ -29,9 +29,14 @@ #pragma once +#include +#include + +#include +#include + #include "mongo/db/logical_time.h" #include "mongo/db/time_proof_service.h" -#include namespace mongo { diff --git a/src/mongo/db/sorter/sorter.cpp b/src/mongo/db/sorter/sorter.cpp index 92680a15971ef..c888217ce1082 100644 --- a/src/mongo/db/sorter/sorter.cpp +++ b/src/mongo/db/sorter/sorter.cpp @@ -48,23 +48,53 @@ #include "mongo/db/sorter/sorter.h" +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include #include -#include "mongo/base/string_data.h" -#include "mongo/config.h" -#include "mongo/db/jsobj.h" +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/service_context.h" +#include "mongo/db/sorter/sorter_gen.h" +#include "mongo/db/sorter/sorter_stats.h" #include "mongo/db/storage/encryption_hooks.h" -#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/atomic_word.h" -#include "mongo/platform/overflow_arithmetic.h" #include "mongo/s/is_mongos.h" #include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" #include "mongo/util/destructor_guard.h" +#include "mongo/util/murmur3.h" +#include "mongo/util/shared_buffer_fragment.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -77,10 +107,8 @@ namespace { * Calculates and returns a new murmur hash value based on the prior murmur hash and a new piece * of data. */ -uint32_t addDataToChecksum(const void* startOfData, size_t sizeOfData, uint32_t checksum) { - unsigned newChecksum; - MurmurHash3_x86_32(startOfData, sizeOfData, checksum, &newChecksum); - return newChecksum; +uint32_t addDataToChecksum(const char* startOfData, size_t sizeOfData, uint32_t checksum) { + return murmur3(ConstDataRange{startOfData, sizeOfData}, checksum); } void checkNoExternalSortOnMongos(const SortOptions& opts) { @@ -177,7 +205,15 @@ class InMemIterator : public SortIteratorInterface { return out; } - const std::pair& current() override { + Key nextWithDeferredValue() override { + MONGO_UNREACHABLE; + } + + Value getDeferredValue() override { + MONGO_UNREACHABLE; + } + + const Key& current() override { tasserted(ErrorCodes::NotImplemented, "current() not implemented for InMemIterator"); } @@ -233,35 +269,48 @@ class FileIterator : public SortIteratorInterface { } bool more() { + invariant(!_startOfNewData); if (!_done) _fillBufferIfNeeded(); // may change _done return !_done; } Data next() { + Key deserializedKey = nextWithDeferredValue(); + Value deserializedValue = getDeferredValue(); + return Data(std::move(deserializedKey), std::move(deserializedValue)); + } + + Key nextWithDeferredValue() override { invariant(!_done); + invariant(!_startOfNewData); _fillBufferIfNeeded(); - const char* startOfNewData = static_cast(_bufferReader->pos()); + _startOfNewData = static_cast(_bufferReader->pos()); // Note: calling read() on the _bufferReader buffer in the deserialize function advances the // buffer. Since Key comes before Value in the _bufferReader, and C++ makes no function // parameter evaluation order guarantees, we cannot deserialize Key and Value straight into // the Data constructor - auto first = Key::deserializeForSorter(*_bufferReader, _settings.first); - auto second = Value::deserializeForSorter(*_bufferReader, _settings.second); + return Key::deserializeForSorter(*_bufferReader, _settings.first); + } + + Value getDeferredValue() override { + invariant(!_done); + invariant(_startOfNewData); + Value deserializedValue = Value::deserializeForSorter(*_bufferReader, _settings.second); // The difference of _bufferReader's position before and after reading the data // will provide the length of the data that was just read. const char* endOfNewData = static_cast(_bufferReader->pos()); _afterReadChecksum = - addDataToChecksum(startOfNewData, endOfNewData - startOfNewData, _afterReadChecksum); - - return Data(std::move(first), std::move(second)); + addDataToChecksum(_startOfNewData, endOfNewData - _startOfNewData, _afterReadChecksum); + _startOfNewData = nullptr; + return deserializedValue; } - const std::pair& current() override { + const Key& current() override { tasserted(ErrorCodes::NotImplemented, "current() not implemented for FileIterator"); } @@ -366,6 +415,10 @@ class FileIterator : public SortIteratorInterface { std::streamoff _fileEndOffset; // File offset at which the sorted data range ends. boost::optional _dbName; + // Points to the beginning of a serialized key in the key-value pair currently being read, and + // used for computing the checksum value. This is set to nullptr after reading each key-value + // pair. + const char* _startOfNewData = nullptr; // Checksum value that is updated with each read of a data object from disk. We can compare // this value with _originalChecksum to check for data corruption if and only if the // FileIterator is exhausted. @@ -379,8 +432,9 @@ class FileIterator : public SortIteratorInterface { /** * Merge-sorts results from 0 or more FileIterators, all of which should be iterating over sorted - * ranges within the same file. This class is given the data source file name upon construction and - * is responsible for deleting the data source file upon destruction. + * ranges within the same file. The input iterators must implement nextWithDeferredValue() and + * getDeferredValue(). This class is given the data source file name upon construction and is + * responsible for deleting the data source file upon destruction. */ template class MergeIterator : public SortIteratorInterface { @@ -398,7 +452,8 @@ class MergeIterator : public SortIteratorInterface { for (size_t i = 0; i < iters.size(); i++) { iters[i]->openSource(); if (iters[i]->more()) { - _heap.push_back(std::make_shared(i, iters[i]->next(), iters[i])); + _heap.push_back( + std::make_shared(i, iters[i]->nextWithDeferredValue(), iters[i])); if (i > _maxFile) { _maxFile = i; } @@ -431,7 +486,8 @@ class MergeIterator : public SortIteratorInterface { void addSource(std::shared_ptr iter) { iter->openSource(); if (iter->more()) { - _heap.push_back(std::make_shared(++_maxFile, iter->next(), iter)); + _heap.push_back( + std::make_shared(++_maxFile, iter->nextWithDeferredValue(), iter)); std::push_heap(_heap.begin(), _heap.end(), _greater); if (_greater(_current, _heap.front())) { @@ -452,7 +508,7 @@ class MergeIterator : public SortIteratorInterface { return false; } - const Data& current() override { + const Key& current() override { invariant(_remaining); if (!_positioned) { @@ -464,22 +520,31 @@ class MergeIterator : public SortIteratorInterface { } Data next() { - verify(_remaining); + invariant(_remaining); _remaining--; if (_positioned) { _positioned = false; - return _current->current(); + } else { + advance(); } + Key key = _current->current(); + Value value = _current->getDeferredValue(); + return Data(std::move(key), std::move(value)); + } - advance(); - return _current->current(); + Key nextWithDeferredValue() override { + MONGO_UNREACHABLE; + } + + Value getDeferredValue() override { + MONGO_UNREACHABLE; } void advance() { if (!_current->advance()) { - verify(!_heap.empty()); + invariant(!_heap.empty()); std::pop_heap(_heap.begin(), _heap.end(), _greater); _current = _heap.back(); _heap.pop_back(); @@ -500,16 +565,19 @@ class MergeIterator : public SortIteratorInterface { */ class Stream { public: - Stream(size_t fileNum, const Data& first, std::shared_ptr rest) + Stream(size_t fileNum, const Key& first, std::shared_ptr rest) : fileNum(fileNum), _current(first), _rest(rest) {} ~Stream() { _rest->closeSource(); } - const Data& current() const { + const Key& current() const { return _current; } + Value getDeferredValue() { + return _rest->getDeferredValue(); + } bool more() { return _rest->more(); } @@ -517,14 +585,14 @@ class MergeIterator : public SortIteratorInterface { if (!_rest->more()) return false; - _current = _rest->next(); + _current = _rest->nextWithDeferredValue(); return true; } const size_t fileNum; private: - Data _current; + Key _current; std::shared_ptr _rest; }; @@ -535,8 +603,8 @@ class MergeIterator : public SortIteratorInterface { template bool operator()(const Ptr& lhs, const Ptr& rhs) const { // first compare data - dassertCompIsSane(_comp, lhs->current().first, rhs->current().first); - int ret = _comp(lhs->current().first, rhs->current().first); + dassertCompIsSane(_comp, lhs->current(), rhs->current()); + int ret = _comp(lhs->current(), rhs->current()); if (ret) return ret > 0; @@ -841,7 +909,7 @@ class LimitOneSorter : public Sorter { LimitOneSorter(const SortOptions& opts, const Comparator& comp) : Sorter(opts), _comp(comp), _haveData(false) { - verify(opts.limit == 1); + invariant(opts.limit == 1); } template @@ -1392,7 +1460,7 @@ void SortedFileWriter::writeChunk() { std::string compressed; snappy::Compress(outBuffer, size, &compressed); - verify(compressed.size() <= size_t(std::numeric_limits::max())); + invariant(compressed.size() <= size_t(std::numeric_limits::max())); const bool shouldCompress = compressed.size() < (size_t(_buffer.len()) / 10 * 9); if (shouldCompress) { @@ -1535,7 +1603,7 @@ BoundedSorter::getState() const { return State::kReady; // Similarly, we can return the next element from the spilled iterator if it's < _min. - if (_spillIter && compare(_spillIter->current().first, *_min) < 0) + if (_spillIter && compare(_spillIter->current(), *_min) < 0) return State::kReady; // A later call to add() may improve _min. Or in the worst case, after done() is called @@ -1568,7 +1636,7 @@ std::pair BoundedSorter::next() }; if (!_heap.empty() && _spillIter) { - if (compare(_heap.top().first, _spillIter->current().first) <= 0) { + if (compare(_heap.top().first, _spillIter->current()) <= 0) { pullFromHeap(); } else { pullFromSpilled(); diff --git a/src/mongo/db/sorter/sorter.h b/src/mongo/db/sorter/sorter.h index 36f5948f3d9a3..162c4d1c02a62 100644 --- a/src/mongo/db/sorter/sorter.h +++ b/src/mongo/db/sorter/sorter.h @@ -29,14 +29,22 @@ #pragma once -#include - #include +#include +#include +#include +#include +#include #include -#include +#include +#include // IWYU pragma: keep +#include +#include #include #include #include +#include +#include #include #include @@ -45,9 +53,11 @@ #include "mongo/db/query/serialization_options.h" #include "mongo/db/sorter/sorter_gen.h" #include "mongo/db/sorter/sorter_stats.h" +#include "mongo/logv2/log_attr.h" #include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" #include "mongo/util/bufreader.h" +#include "mongo/util/shared_buffer_fragment.h" /** * This is the public API for the Sorter (both in-memory and external) @@ -231,8 +241,22 @@ class SortIteratorInterface { // Unowned objects are only valid until next call to any method virtual bool more() = 0; + /** + * Returns the new key-value pair. + */ virtual std::pair next() = 0; - virtual const std::pair& current() = 0; + + /** + * The following two methods are used together. nextWithDeferredValue() returns the next key. It + * must be followed by a call to getDeferredValue(), to return the pending deferred value, + * before calling next() or nextWithDeferredValue() again. This is intended specifically to + * avoid allocating memory for the value if the caller eventually decides to abandon the + * iterator and never consume any more values from it. + */ + virtual Key nextWithDeferredValue() = 0; + virtual Value getDeferredValue() = 0; + + virtual const Key& current() = 0; virtual ~SortIteratorInterface() {} diff --git a/src/mongo/db/sorter/sorter_stats.cpp b/src/mongo/db/sorter/sorter_stats.cpp index fefa24797bf71..b422b3341bc47 100644 --- a/src/mongo/db/sorter/sorter_stats.cpp +++ b/src/mongo/db/sorter/sorter_stats.cpp @@ -29,6 +29,8 @@ #include "mongo/db/sorter/sorter_stats.h" +#include + #include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/sorter/sorter_stats.h b/src/mongo/db/sorter/sorter_stats.h index db80dcf29a3fc..e04f8c8a9ffaa 100644 --- a/src/mongo/db/sorter/sorter_stats.h +++ b/src/mongo/db/sorter/sorter_stats.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/platform/atomic_word.h" namespace mongo { diff --git a/src/mongo/db/sorter/sorter_stats_test.cpp b/src/mongo/db/sorter/sorter_stats_test.cpp index 1a6889c6b4422..3d4d58ededa25 100644 --- a/src/mongo/db/sorter/sorter_stats_test.cpp +++ b/src/mongo/db/sorter/sorter_stats_test.cpp @@ -28,8 +28,13 @@ */ #include "mongo/db/sorter/sorter_stats.h" + +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/sorter/sorter_test.cpp b/src/mongo/db/sorter/sorter_test.cpp index 512cc2e082c50..5f7b84642eca7 100644 --- a/src/mongo/db/sorter/sorter_test.cpp +++ b/src/mongo/db/sorter/sorter_test.cpp @@ -27,24 +27,30 @@ * it in the license file. */ -#include "mongo/db/pipeline/document_source.h" - -#include "mongo/platform/basic.h" - -#include -#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: keep #include +#include +#include + #include "mongo/base/data_type_endian.h" #include "mongo/base/static_assert.h" -#include "mongo/config.h" +#include "mongo/base/string_data.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/sorter/sorter.h" #include "mongo/logv2/log.h" #include "mongo/platform/random.h" -#include "mongo/stdx/thread.h" +#include "mongo/stdx/thread.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" namespace mongo { @@ -148,7 +154,13 @@ class IntIterator : public IWIterator { _current += _increment; return out; } - const IWPair& current() { + IntWrapper nextWithDeferredValue() { + MONGO_UNREACHABLE; + } + IntWrapper getDeferredValue() { + MONGO_UNREACHABLE; + } + const IntWrapper& current() { MONGO_UNREACHABLE; } @@ -166,9 +178,15 @@ class EmptyIterator : public IWIterator { return false; } Data next() { - verify(false); + MONGO_UNREACHABLE; + } + IntWrapper nextWithDeferredValue() { + MONGO_UNREACHABLE; } - const Data& current() { + IntWrapper getDeferredValue() { + MONGO_UNREACHABLE; + } + const IntWrapper& current() { MONGO_UNREACHABLE; } }; @@ -177,7 +195,7 @@ class LimitIterator : public IWIterator { public: LimitIterator(long long limit, std::shared_ptr source) : _remaining(limit), _source(source) { - verify(limit > 0); + invariant(limit > 0); } void openSource() {} @@ -187,11 +205,17 @@ class LimitIterator : public IWIterator { return _remaining && _source->more(); } Data next() { - verify(more()); + invariant(more()); _remaining--; return _source->next(); } - const Data& current() { + IntWrapper nextWithDeferredValue() { + MONGO_UNREACHABLE; + } + IntWrapper getDeferredValue() { + MONGO_UNREACHABLE; + } + const IntWrapper& current() { MONGO_UNREACHABLE; } @@ -271,14 +295,43 @@ std::shared_ptr makeInMemIterator(const int (&array)[N]) { return std::make_shared>(vec); } +/** + * Spills the contents of inputIter to a file and returns a FileIterator for reading the data back. + * This is needed because the MergeIterator currently requires that it is merging from sorted spill + * file segments (as opposed to any other kind of iterator). + */ +template +std::shared_ptr spillToFile(IteratorPtr inputIter, const unittest::TempDir& tempDir) { + inputIter->openSource(); + if (!inputIter->more()) { + inputIter->closeSource(); + return std::make_shared(); + } + SorterFileStats sorterFileStats(nullptr /* sorterTracker */); + const SortOptions opts = SortOptions().TempDir(tempDir.path()); + auto spillFile = std::make_shared::File>( + opts.tempDir + "/" + nextFileName(), opts.sorterFileStats); + SortedFileWriter writer(opts, spillFile); + while (inputIter->more()) { + auto pair = inputIter->next(); + writer.addAlreadySorted(pair.first, pair.second); + } + auto outputIter = std::shared_ptr(writer.done()); + inputIter->closeSource(); + return outputIter; +} + template std::shared_ptr mergeIterators(IteratorPtr (&array)[N], + const unittest::TempDir& tempDir, Direction Dir = ASC, const SortOptions& opts = SortOptions()) { invariant(!opts.extSortAllowed); std::vector> vec; - for (int i = 0; i < N; i++) - vec.push_back(std::shared_ptr(array[i])); + for (int i = 0; i < N; i++) { + // Spill iterator outputs to a file and obtain a new iterator for it. + vec.push_back(spillToFile(array[i], tempDir)); + } return std::shared_ptr(IWIterator::merge(vec, opts, IWComparator(Dir))); } @@ -316,7 +369,13 @@ class InMemIterTests { _pos++; return ret; } - const IWPair& current() { + IntWrapper nextWithDeferredValue() { + MONGO_UNREACHABLE; + } + IntWrapper getDeferredValue() { + MONGO_UNREACHABLE; + } + const IntWrapper& current() { MONGO_UNREACHABLE; } size_t _pos; @@ -388,6 +447,7 @@ class SortedFileWriterAndFileIteratorTests { class MergeIteratorTests { public: void run() { + unittest::TempDir tempDir("mergeIteratorTests"); { // test empty (no inputs) std::vector> vec; std::shared_ptr mergeIter( @@ -399,7 +459,7 @@ class MergeIteratorTests { std::make_shared(), std::make_shared()}; - ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iterators, ASC), + ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iterators, tempDir, ASC), std::make_shared()); } @@ -410,7 +470,7 @@ class MergeIteratorTests { std::make_shared(0, 20, 2) // 0, 2, ... 18 }; - ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iterators, ASC), + ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iterators, tempDir, ASC), std::make_shared(0, 20, 1)); } @@ -421,7 +481,7 @@ class MergeIteratorTests { std::make_shared(28, 0, -3), // 28, 25, ... 1 std::make_shared()}; - ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iterators, DESC), + ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iterators, tempDir, DESC), std::make_shared(30, 0, -1)); } { // test Limit @@ -430,7 +490,7 @@ class MergeIteratorTests { std::make_shared(0, 20, 2)}; // 0, 2, ... 18 ASSERT_ITERATORS_EQUIVALENT( - mergeIterators(iterators, ASC, SortOptions().Limit(10)), + mergeIterators(iterators, tempDir, ASC, SortOptions().Limit(10)), std::make_shared(10, std::make_shared(0, 20, 1))); } @@ -443,15 +503,15 @@ class MergeIteratorTests { auto itD = std::make_shared(15, 20, 1); // 15, 16, ... 19 std::shared_ptr iteratorsAD[] = {itD, itA}; - auto mergedAD = mergeIterators(iteratorsAD, ASC); + auto mergedAD = mergeIterators(iteratorsAD, tempDir, ASC); ASSERT_ITERATORS_EQUIVALENT_FOR_N_STEPS(mergedAD, itFull, 5); std::shared_ptr iteratorsABD[] = {mergedAD, itB}; - auto mergedABD = mergeIterators(iteratorsABD, ASC); + auto mergedABD = mergeIterators(iteratorsABD, tempDir, ASC); ASSERT_ITERATORS_EQUIVALENT_FOR_N_STEPS(mergedABD, itFull, 5); std::shared_ptr iteratorsABCD[] = {itC, mergedABD}; - auto mergedABCD = mergeIterators(iteratorsABCD, ASC); + auto mergedABCD = mergeIterators(iteratorsABCD, tempDir, ASC); ASSERT_ITERATORS_EQUIVALENT_FOR_N_STEPS(mergedABCD, itFull, 5); } } @@ -481,7 +541,7 @@ class Basic { std::make_shared()); } - const auto runTests = [this, &opts](bool assertRanges) { + const auto runTests = [this, &opts, &tempDir](bool assertRanges) { { // test all data ASC std::shared_ptr sorter = makeSorter(opts, IWComparator(ASC)); addData(sorter.get()); @@ -514,8 +574,8 @@ class Basic { std::shared_ptr iters1[] = {done(sorters[0].get()), done(sorters[1].get())}; std::shared_ptr iters2[] = {correct(), correct()}; - ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iters1, ASC), - mergeIterators(iters2, ASC)); + ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iters1, tempDir, ASC), + mergeIterators(iters2, tempDir, ASC)); if (assertRanges) { assertRangeInfo(sorters[0], opts); @@ -533,8 +593,8 @@ class Basic { std::shared_ptr iters1[] = {done(sorters[0].get()), done(sorters[1].get())}; std::shared_ptr iters2[] = {correctReverse(), correctReverse()}; - ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iters1, DESC), - mergeIterators(iters2, DESC)); + ASSERT_ITERATORS_EQUIVALENT(mergeIterators(iters1, tempDir, DESC), + mergeIterators(iters2, tempDir, DESC)); if (assertRanges) { assertRangeInfo(sorters[0], opts); @@ -1004,6 +1064,36 @@ TEST_F(SorterMakeFromExistingRangesTest, RoundTrip) { } } +TEST_F(SorterMakeFromExistingRangesTest, NextWithDeferredValues) { + unittest::TempDir tempDir(_agent.getSuiteName() + "_" + _agent.getTestName()); + auto opts = SortOptions().ExtSortAllowed().TempDir(tempDir.path()); + + IWPair pair1(1, 100); + IWPair pair2(2, 200); + auto spillFile = std::make_shared::File>( + opts.tempDir + "/" + nextFileName(), opts.sorterFileStats); + SortedFileWriter writer(opts, spillFile); + writer.addAlreadySorted(pair1.first, pair1.second); + writer.addAlreadySorted(pair2.first, pair2.second); + auto iter = std::shared_ptr(writer.done()); + iter->openSource(); + + ASSERT(iter->more()); + IntWrapper key1 = iter->nextWithDeferredValue(); + IntWrapper value1 = iter->getDeferredValue(); + ASSERT_EQUALS(pair1.first, key1); + ASSERT_EQUALS(pair1.second, value1); + + ASSERT(iter->more()); + IntWrapper key2 = iter->nextWithDeferredValue(); + IntWrapper value2 = iter->getDeferredValue(); + ASSERT_EQUALS(pair2.first, key2); + ASSERT_EQUALS(pair2.second, value2); + + ASSERT_FALSE(iter->more()); + iter->closeSource(); +} + class BoundedSorterTest : public unittest::Test { public: using Key = IntWrapper; diff --git a/src/mongo/db/startup_recovery.cpp b/src/mongo/db/startup_recovery.cpp index c95b7aeff5463..f19d3234f2577 100644 --- a/src/mongo/db/startup_recovery.cpp +++ b/src/mongo/db/startup_recovery.cpp @@ -29,15 +29,46 @@ #include "mongo/db/startup_recovery.h" -#include "mongo/db/catalog/collection_write_path.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/create_collection.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/drop_collection.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/multi_index_block.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/commands/feature_compatibility_version.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/database_name.h" -#include "mongo/db/db_raii.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/feature_compatibility_version_document_gen.h" #include "mongo/db/feature_compatibility_version_documentation.h" @@ -46,16 +77,32 @@ #include "mongo/db/operation_context.h" #include "mongo/db/rebuild_indexes.h" #include "mongo/db/repair.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl_set_member_in_standalone_mode.h" -#include "mongo/db/server_options.h" +#include "mongo/db/resumable_index_builds_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/storage_repair_observer.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/timeseries/timeseries_extended_range.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" -#include "mongo/util/exit.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" #include "mongo/util/exit_code.h" #include "mongo/util/fail_point.h" #include "mongo/util/quick_exit.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -76,7 +123,7 @@ bool isWriteableStorageEngine() { // Attempt to restore the featureCompatibilityVersion document if it is missing. Status restoreMissingFeatureCompatibilityVersionDocument(OperationContext* opCtx) { - NamespaceString fcvNss(NamespaceString::kServerConfigurationNamespace); + const NamespaceString fcvNss(NamespaceString::kServerConfigurationNamespace); // If the admin database, which contains the server configuration collection with the // featureCompatibilityVersion document, does not exist, create it. @@ -91,8 +138,7 @@ Status restoreMissingFeatureCompatibilityVersionDocument(OperationContext* opCtx // If the server configuration collection, which contains the FCV document, does not exist, then // create it. auto catalog = CollectionCatalog::get(opCtx); - if (!catalog->lookupCollectionByNamespace(opCtx, - NamespaceString::kServerConfigurationNamespace)) { + if (!catalog->lookupCollectionByNamespace(opCtx, fcvNss)) { // (Generic FCV reference): This FCV reference should exist across LTS binary versions. LOGV2(4926905, "Re-creating featureCompatibilityVersion document that was deleted. Creating new " @@ -101,14 +147,19 @@ Status restoreMissingFeatureCompatibilityVersionDocument(OperationContext* opCtx uassertStatusOK(createCollection(opCtx, fcvNss.dbName(), BSON("create" << fcvNss.coll()))); } - const CollectionPtr fcvColl(catalog->lookupCollectionByNamespace( - opCtx, NamespaceString::kServerConfigurationNamespace)); - invariant(fcvColl); + const auto fcvColl = acquireCollection( + opCtx, + CollectionAcquisitionRequest(fcvNss, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + invariant(fcvColl.exists()); // Restore the featureCompatibilityVersion document if it is missing. BSONObj featureCompatibilityVersion; if (!Helpers::findOne(opCtx, - fcvColl, + fcvColl.getCollectionPtr(), BSON("_id" << multiversion::kParameterName), featureCompatibilityVersion)) { // (Generic FCV reference): This FCV reference should exist across LTS binary versions. @@ -121,16 +172,17 @@ Status restoreMissingFeatureCompatibilityVersionDocument(OperationContext* opCtx // (Generic FCV reference): This FCV reference should exist across LTS binary versions. fcvDoc.setVersion(multiversion::GenericFCV::kLastLTS); - writeConflictRetry(opCtx, "insertFCVDocument", fcvNss.ns(), [&] { + writeConflictRetry(opCtx, "insertFCVDocument", fcvNss, [&] { WriteUnitOfWork wunit(opCtx); - uassertStatusOK(collection_internal::insertDocument( - opCtx, fcvColl, InsertStatement(fcvDoc.toBSON()), nullptr /* OpDebug */, false)); + uassertStatusOK(Helpers::insert(opCtx, fcvColl, fcvDoc.toBSON())); wunit.commit(); }); } - invariant(Helpers::findOne( - opCtx, fcvColl, BSON("_id" << multiversion::kParameterName), featureCompatibilityVersion)); + invariant(Helpers::findOne(opCtx, + fcvColl.getCollectionPtr(), + BSON("_id" << multiversion::kParameterName), + featureCompatibilityVersion)); return Status::OK(); } @@ -212,8 +264,7 @@ Status ensureCollectionProperties(OperationContext* opCtx, const DatabaseName& dbName, EnsureIndexPolicy ensureIndexPolicy) { auto catalog = CollectionCatalog::get(opCtx); - for (auto collIt = catalog->begin(opCtx, dbName); collIt != catalog->end(opCtx); ++collIt) { - auto coll = *collIt; + for (auto&& coll : catalog->range(dbName)) { if (!coll) { break; } @@ -233,7 +284,7 @@ Status ensureCollectionProperties(OperationContext* opCtx, logAttrs(*coll)); if (EnsureIndexPolicy::kBuildMissing == ensureIndexPolicy) { auto writableCollection = - catalog->lookupCollectionByUUIDForMetadataWrite(opCtx, collIt.uuid()); + catalog->lookupCollectionByUUIDForMetadataWrite(opCtx, coll->uuid()); auto status = buildMissingIdIndex(opCtx, writableCollection); if (!status.isOK()) { LOGV2_ERROR(21021, @@ -383,7 +434,7 @@ void reconcileCatalogAndRebuildUnfinishedIndexes( fassert(40590, {ErrorCodes::InternalError, str::stream() << "failed to get index spec for index " << indexName - << " in collection " << collNss.toString()}); + << " in collection " << collNss.toStringForErrorMsg()}); } auto& indexesToRebuild = swIndexSpecs.getValue(); diff --git a/src/mongo/db/startup_warnings_common.cpp b/src/mongo/db/startup_warnings_common.cpp index 9f9ba2e1031b1..38d95fccbab62 100644 --- a/src/mongo/db/startup_warnings_common.cpp +++ b/src/mongo/db/startup_warnings_common.cpp @@ -28,20 +28,23 @@ */ -#include "mongo/platform/basic.h" +#include // IWYU pragma: keep +#include +#include -#include "mongo/db/startup_warnings_common.h" - -#include -#include - -#include "mongo/client/authenticate.h" -#include "mongo/config.h" +#include "mongo/client/internal_auth.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/server_options.h" +#include "mongo/db/startup_warnings_common.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/net/ssl_options.h" -#include "mongo/util/processinfo.h" -#include "mongo/util/version.h" + +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/db/startup_warnings_mongod.cpp b/src/mongo/db/startup_warnings_mongod.cpp index 2bb3b9b3119b5..c3bb7a762b303 100644 --- a/src/mongo/db/startup_warnings_mongod.cpp +++ b/src/mongo/db/startup_warnings_mongod.cpp @@ -28,17 +28,27 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include // IWYU pragma: keep -#include "mongo/db/startup_warnings_mongod.h" +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" -#include -#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/startup_warnings_mongod.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/util/errno_util.h" #ifndef _WIN32 #include #endif -#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/server_options.h" #include "mongo/db/startup_warnings_common.h" #include "mongo/db/storage/storage_options.h" @@ -46,7 +56,6 @@ #include "mongo/transport/service_entry_point.h" #include "mongo/util/processinfo.h" #include "mongo/util/str.h" -#include "mongo/util/version.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/db/startup_warnings_mongod.h b/src/mongo/db/startup_warnings_mongod.h index 00b41f4e8c6b5..d388457f8f3d8 100644 --- a/src/mongo/db/startup_warnings_mongod.h +++ b/src/mongo/db/startup_warnings_mongod.h @@ -27,9 +27,13 @@ * it in the license file. */ +#include + #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" namespace mongo { diff --git a/src/mongo/db/startup_warnings_mongod_test.cpp b/src/mongo/db/startup_warnings_mongod_test.cpp index 9103a77d30e40..3a5846f6480a4 100644 --- a/src/mongo/db/startup_warnings_mongod_test.cpp +++ b/src/mongo/db/startup_warnings_mongod_test.cpp @@ -27,14 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include // IWYU pragma: keep -#include -#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/startup_warnings_mongod.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" namespace { diff --git a/src/mongo/db/stats/SConscript b/src/mongo/db/stats/SConscript index b0a2a98198b0a..04790ba308e6c 100644 --- a/src/mongo/db/stats/SConscript +++ b/src/mongo/db/stats/SConscript @@ -87,10 +87,14 @@ env.Library( env.Library( target='change_collection_server_status', - source=['change_collection_server_status.cpp'], + source=[ + 'change_collection_server_status.cpp', + 'change_stream_preimages_server_status.cpp', + ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/db/change_stream_change_collection_manager', + '$BUILD_DIR/mongo/db/change_stream_pre_images_collection_manager', '$BUILD_DIR/mongo/db/change_stream_serverless_helpers', '$BUILD_DIR/mongo/db/commands/server_status_core', '$BUILD_DIR/mongo/db/server_base', @@ -174,6 +178,7 @@ env.CppUnitTest( '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/db/auth/authmocks', '$BUILD_DIR/mongo/db/repl/replmocks', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/mongo/db/shared_request_handling', '$BUILD_DIR/mongo/util/clock_source_mock', diff --git a/src/mongo/db/stats/api_version_metrics.cpp b/src/mongo/db/stats/api_version_metrics.cpp index 345cacaeddfb3..21738ee9373d5 100644 --- a/src/mongo/db/stats/api_version_metrics.cpp +++ b/src/mongo/db/stats/api_version_metrics.cpp @@ -28,7 +28,19 @@ */ #include "mongo/db/stats/api_version_metrics.h" -#include "mongo/db/commands/server_status.h" + +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/db/commands/server_status_metric.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" #include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/db/stats/api_version_metrics.h b/src/mongo/db/stats/api_version_metrics.h index 6d2b7eb6ce409..e18546c75e94d 100644 --- a/src/mongo/db/stats/api_version_metrics.h +++ b/src/mongo/db/stats/api_version_metrics.h @@ -29,14 +29,18 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/api_parameters.h" #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" #include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/concurrency/with_lock.h" #include "mongo/util/time_support.h" -#include - namespace mongo { /** diff --git a/src/mongo/db/stats/api_version_metrics_test.cpp b/src/mongo/db/stats/api_version_metrics_test.cpp index e5f099a44c465..041d1d6d95cb8 100644 --- a/src/mongo/db/stats/api_version_metrics_test.cpp +++ b/src/mongo/db/stats/api_version_metrics_test.cpp @@ -27,11 +27,22 @@ * it in the license file. */ +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/db/stats/api_version_metrics.h" -#include "mongo/util/assert_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" -#include "mongo/util/time_support.h" +#include "mongo/util/duration.h" namespace mongo { namespace { diff --git a/src/mongo/db/stats/change_collection_server_status.cpp b/src/mongo/db/stats/change_collection_server_status.cpp index f7d7f75a75c73..e37547c7ec341 100644 --- a/src/mongo/db/stats/change_collection_server_status.cpp +++ b/src/mongo/db/stats/change_collection_server_status.cpp @@ -27,11 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/change_stream_change_collection_manager.h" #include "mongo/db/change_stream_serverless_helpers.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" namespace mongo { /** diff --git a/src/mongo/db/stats/change_stream_preimages_server_status.cpp b/src/mongo/db/stats/change_stream_preimages_server_status.cpp new file mode 100644 index 0000000000000..7c5e38bb70c5c --- /dev/null +++ b/src/mongo/db/stats/change_stream_preimages_server_status.cpp @@ -0,0 +1,64 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/change_stream_pre_images_collection_manager.h" +#include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" + +namespace mongo { +/** + * Adds a section 'changeStreamPreImages' to the serverStatus metrics that provides aggregated + * statistics for change stream pre-images. + */ +class ChangeStreamPreImagesServerStatus final : public ServerStatusSection { +public: + ChangeStreamPreImagesServerStatus() : ServerStatusSection("changeStreamPreImages") {} + + bool includeByDefault() const override { + return true; + } + + void appendSection(OperationContext* opCtx, + const BSONElement& configElement, + BSONObjBuilder* result) const override { + // Append the section only when pre-images exists. + const auto& jobStats = + ChangeStreamPreImagesCollectionManager::get(opCtx).getPurgingJobStats(); + + result->append(getSectionName(), BSON("purgingJob" << jobStats.toBSON())); + } +} changeStreamPreImagesServerStatus; + +} // namespace mongo diff --git a/src/mongo/db/stats/counters.cpp b/src/mongo/db/stats/counters.cpp index e3481e4f133cd..0d44e29fcc7e6 100644 --- a/src/mongo/db/stats/counters.cpp +++ b/src/mongo/db/stats/counters.cpp @@ -30,11 +30,15 @@ #include "mongo/db/stats/counters.h" #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" #include "mongo/client/authenticate.h" #include "mongo/db/commands/server_status.h" -#include "mongo/db/jsobj.h" -#include "mongo/logv2/log.h" +#include "mongo/db/operation_context.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -348,5 +352,14 @@ OperatorCounters operatorCountersWindowAccumulatorExpressions{ "operatorCounters.windowAccumulators."}; CounterMetric updateManyCount("query.updateManyCount"); CounterMetric deleteManyCount("query.deleteManyCount"); +CounterMetric updateOneTargetedShardedCount("query.updateOneTargetedShardedCount"); +CounterMetric deleteOneTargetedShardedCount("query.deleteOneTargetedShardedCount"); +CounterMetric findAndModifyTargetedShardedCount("query.findAndModifyTargetedShardedCount"); +CounterMetric updateOneUnshardedCount("query.updateOneUnshardedCount"); +CounterMetric deleteOneUnshardedCount("query.deleteOneUnshardedCount"); +CounterMetric findAndModifyUnshardedCount("query.findAndModifyUnshardedCount"); +CounterMetric updateOneNonTargetedShardedCount("query.updateOneNonTargetedShardedCount"); +CounterMetric deleteOneNonTargetedShardedCount("query.deleteOneNonTargetedShardedCount"); +CounterMetric findAndModifyNonTargetedShardedCount("query.findAndModifyNonTargetedShardedCount"); } // namespace mongo diff --git a/src/mongo/db/stats/counters.h b/src/mongo/db/stats/counters.h index 3eab8b591a3e8..8853869cbfe73 100644 --- a/src/mongo/db/stats/counters.h +++ b/src/mongo/db/stats/counters.h @@ -29,16 +29,31 @@ #pragma once +#include +#include +#include +#include #include - +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/commands/server_status_metric.h" #include "mongo/db/curop.h" #include "mongo/db/jsobj.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/basic.h" #include "mongo/rpc/message.h" #include "mongo/util/aligned.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/spin_lock.h" #include "mongo/util/processinfo.h" +#include "mongo/util/str.h" #include "mongo/util/string_map.h" namespace mongo { @@ -571,5 +586,23 @@ extern OperatorCounters operatorCountersWindowAccumulatorExpressions; extern CounterMetric updateManyCount; // Track the number of deleteMany calls. extern CounterMetric deleteManyCount; +// Track the number of targeted updateOne commands on sharded collections. +extern CounterMetric updateOneTargetedShardedCount; +// Track the number of targeted deleteOne commands on sharded collections. +extern CounterMetric deleteOneTargetedShardedCount; +// Track the number of targeted findAndModify commands on sharded collections. +extern CounterMetric findAndModifyTargetedShardedCount; +// Track the number of updateOne commands on unsharded collections. +extern CounterMetric updateOneUnshardedCount; +// Track the number of deleteOne commands on unsharded collections. +extern CounterMetric deleteOneUnshardedCount; +// Track the number of findAndModify commands on unsharded collections. +extern CounterMetric findAndModifyUnshardedCount; +// Track the number of non-targeted updateOne commands on sharded collections +extern CounterMetric updateOneNonTargetedShardedCount; +// Track the number of non-targeted deleteOne commands on sharded collections +extern CounterMetric deleteOneNonTargetedShardedCount; +// Track the number of non-targeted findAndModify commands on sharded collections +extern CounterMetric findAndModifyNonTargetedShardedCount; } // namespace mongo diff --git a/src/mongo/db/stats/fill_locker_info.cpp b/src/mongo/db/stats/fill_locker_info.cpp index 5690e0a5ee120..71f853d5401ba 100644 --- a/src/mongo/db/stats/fill_locker_info.cpp +++ b/src/mongo/db/stats/fill_locker_info.cpp @@ -31,10 +31,18 @@ #include "mongo/db/stats/fill_locker_info.h" #include +#include +#include +#include +#include +#include + +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/lock_stats.h" #include "mongo/db/concurrency/locker.h" -#include "mongo/db/jsobj.h" -#include "mongo/logv2/log.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/db/stats/fill_locker_info.h b/src/mongo/db/stats/fill_locker_info.h index 7f5f6d6287411..f620f44fd0172 100644 --- a/src/mongo/db/stats/fill_locker_info.h +++ b/src/mongo/db/stats/fill_locker_info.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/concurrency/locker.h" namespace mongo { diff --git a/src/mongo/db/stats/fill_locker_info_test.cpp b/src/mongo/db/stats/fill_locker_info_test.cpp index 3bc23346349d2..ae7b5ca4d9db3 100644 --- a/src/mongo/db/stats/fill_locker_info_test.cpp +++ b/src/mongo/db/stats/fill_locker_info_test.cpp @@ -27,14 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" #include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/lock_stats.h" +#include "mongo/db/database_name.h" #include "mongo/db/stats/fill_locker_info.h" +#include "mongo/db/tenant_id.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -84,7 +95,9 @@ DEATH_TEST(FillLockerInfo, ShouldFailIfLocksAreNotSortedAppropriately, "Invarian LockerInfo info; // The global lock is supposed to come before the database lock. info.locks = { - OneLock{ResourceId(RESOURCE_DATABASE, DatabaseName(boost::none, "TestDB")), MODE_X}, + OneLock{ResourceId(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB")), + MODE_X}, OneLock{resourceIdGlobal, MODE_IX}}; BSONObjBuilder infoBuilder; @@ -92,7 +105,8 @@ DEATH_TEST(FillLockerInfo, ShouldFailIfLocksAreNotSortedAppropriately, "Invarian } TEST(FillLockerInfo, DoesReportLocksHeld) { - const ResourceId dbId(RESOURCE_DATABASE, "TestDB"_sd); + const ResourceId dbId(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "TestDB"_sd)); LockerInfo info; info.locks = {OneLock{resourceIdGlobal, MODE_IX}, OneLock{dbId, MODE_IX}}; @@ -109,8 +123,10 @@ TEST(FillLockerInfo, DoesReportLocksHeld) { } TEST(FillLockerInfo, ShouldReportMaxTypeHeldForResourceType) { - const ResourceId firstDbId(RESOURCE_DATABASE, "FirstDB"_sd); - const ResourceId secondDbId(RESOURCE_DATABASE, "SecondDB"_sd); + const ResourceId firstDbId(RESOURCE_DATABASE, + DatabaseName::createDatabaseName_forTest(boost::none, "FirstDB"_sd)); + const ResourceId secondDbId( + RESOURCE_DATABASE, DatabaseName::createDatabaseName_forTest(boost::none, "SecondDB"_sd)); LockerInfo info; info.locks = {OneLock{resourceIdGlobal, MODE_IX}, OneLock{firstDbId, MODE_IX}, diff --git a/src/mongo/db/stats/latency_server_status_section.cpp b/src/mongo/db/stats/latency_server_status_section.cpp index 7df644d898ccc..15c4639c620f4 100644 --- a/src/mongo/db/stats/latency_server_status_section.cpp +++ b/src/mongo/db/stats/latency_server_status_section.cpp @@ -27,10 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/commands/server_status.h" -#include "mongo/db/jsobj.h" #include "mongo/db/operation_context.h" #include "mongo/db/stats/top.h" diff --git a/src/mongo/db/stats/lock_server_status_section.cpp b/src/mongo/db/stats/lock_server_status_section.cpp index 75ddc824461b3..58bd3909c796a 100644 --- a/src/mongo/db/stats/lock_server_status_section.cpp +++ b/src/mongo/db/stats/lock_server_status_section.cpp @@ -27,15 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" #include "mongo/db/commands/server_status.h" #include "mongo/db/concurrency/lock_stats.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/stats/operation_latency_histogram.cpp b/src/mongo/db/stats/operation_latency_histogram.cpp index 829b1205fc76e..ffaccc248b28c 100644 --- a/src/mongo/db/stats/operation_latency_histogram.cpp +++ b/src/mongo/db/stats/operation_latency_histogram.cpp @@ -27,15 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/stats/operation_latency_histogram.h" - #include +#include +#include #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/server_options.h" +#include "mongo/db/stats/operation_latency_histogram.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/bits.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/stats/operation_latency_histogram.h b/src/mongo/db/stats/operation_latency_histogram.h index 927589688a242..68875e55d1872 100644 --- a/src/mongo/db/stats/operation_latency_histogram.h +++ b/src/mongo/db/stats/operation_latency_histogram.h @@ -29,7 +29,9 @@ #pragma once #include +#include +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" namespace mongo { diff --git a/src/mongo/db/stats/operation_latency_histogram_test.cpp b/src/mongo/db/stats/operation_latency_histogram_test.cpp index fb5b5ccf40ff6..a4e29e42f148b 100644 --- a/src/mongo/db/stats/operation_latency_histogram_test.cpp +++ b/src/mongo/db/stats/operation_latency_histogram_test.cpp @@ -27,18 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/stats/operation_latency_histogram.h" - #include -#include +#include +#include +#include #include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/server_options.h" +#include "mongo/db/stats/operation_latency_histogram.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/scopeguard.h" namespace mongo { diff --git a/src/mongo/db/stats/resource_consumption_metrics.cpp b/src/mongo/db/stats/resource_consumption_metrics.cpp index b67bb14135356..d2780027e2484 100644 --- a/src/mongo/db/stats/resource_consumption_metrics.cpp +++ b/src/mongo/db/stats/resource_consumption_metrics.cpp @@ -28,14 +28,20 @@ */ -#include +#include +#include -#include "mongo/db/stats/resource_consumption_metrics.h" +#include +#include "mongo/bson/bsonelement.h" #include "mongo/db/commands/server_status.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/stats/operation_resource_consumption_gen.h" +#include "mongo/db/stats/resource_consumption_metrics.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kResourceConsumption @@ -328,7 +334,7 @@ void ResourceConsumption::MetricsCollector::incrementOneIdxEntryWritten(StringDa } void ResourceConsumption::MetricsCollector::beginScopedCollecting(OperationContext* opCtx, - const std::string& dbName) { + const DatabaseName& dbName) { invariant(!isInScope()); _dbName = dbName; _collecting = ScopedCollectionState::kInScopeCollecting; @@ -366,7 +372,7 @@ void ResourceConsumption::MetricsCollector::incrementOneCursorSeek(StringData ur } ResourceConsumption::ScopedMetricsCollector::ScopedMetricsCollector(OperationContext* opCtx, - const std::string& dbName, + const DatabaseName& dbName, bool commandCollectsMetrics) : _opCtx(opCtx) { @@ -415,9 +421,9 @@ ResourceConsumption& ResourceConsumption::get(OperationContext* opCtx) { } void ResourceConsumption::merge(OperationContext* opCtx, - const std::string& dbName, + const DatabaseName& dbName, const OperationMetrics& metrics) { - invariant(!dbName.empty()); + invariant(!dbName.isEmpty()); LOGV2_DEBUG(7527700, 1, @@ -432,7 +438,7 @@ void ResourceConsumption::merge(OperationContext* opCtx, // inconsistent state is not impactful for the purposes of metrics collection, perform a // best-effort check so that we can record metrics for this operation. auto isPrimary = repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase_UNSAFE( - opCtx, DatabaseName::kAdmin.toString()); + opCtx, DatabaseName::kAdmin); AggregatedMetrics newMetrics; if (isPrimary) { @@ -446,8 +452,9 @@ void ResourceConsumption::merge(OperationContext* opCtx, } // Add all metrics into the the globally-aggregated metrics. + const auto& dbNameStr = dbName.toStringForResourceId(); stdx::lock_guard lk(_mutex); - _dbMetrics[dbName] += newMetrics; + _dbMetrics[dbNameStr] += newMetrics; _cpuTime += newMetrics.cpuNanos; } diff --git a/src/mongo/db/stats/resource_consumption_metrics.h b/src/mongo/db/stats/resource_consumption_metrics.h index 6c5bf853c4445..985fff5aa2a7a 100644 --- a/src/mongo/db/stats/resource_consumption_metrics.h +++ b/src/mongo/db/stats/resource_consumption_metrics.h @@ -29,14 +29,23 @@ #pragma once +#include +#include #include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/operation_cpu_timer.h" +#include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" namespace mongo { @@ -284,7 +293,7 @@ class ResourceConsumption { * When called, resource consumption metrics should be recorded for this operation. Clears * any metrics from previous collection periods. */ - void beginScopedCollecting(OperationContext* opCtx, const std::string& dbName); + void beginScopedCollecting(OperationContext* opCtx, const DatabaseName& dbName); /** * When called, sets state that a ScopedMetricsCollector is in scope, but is not recording @@ -319,7 +328,7 @@ class ResourceConsumption { return _hasCollectedMetrics; } - const std::string& getDbName() const { + const DatabaseName& getDbName() const { return _dbName; } @@ -328,12 +337,12 @@ class ResourceConsumption { * Metrics due to the Collector stopping without being associated with any database yet. */ OperationMetrics& getMetrics() { - invariant(!_dbName.empty(), "observing Metrics before a dbName has been set"); + invariant(!_dbName.isEmpty(), "observing Metrics before a dbName has been set"); return _metrics; } const OperationMetrics& getMetrics() const { - invariant(!_dbName.empty(), "observing Metrics before a dbName has been set"); + invariant(!_dbName.isEmpty(), "observing Metrics before a dbName has been set"); return _metrics; } @@ -437,7 +446,7 @@ class ResourceConsumption { }; ScopedCollectionState _collecting = ScopedCollectionState::kInactive; bool _hasCollectedMetrics = false; - std::string _dbName; + DatabaseName _dbName; OperationMetrics _metrics; bool _paused = false; }; @@ -450,9 +459,9 @@ class ResourceConsumption { class ScopedMetricsCollector { public: ScopedMetricsCollector(OperationContext* opCtx, - const std::string& dbName, + const DatabaseName& dbName, bool commandCollectsMetrics); - ScopedMetricsCollector(OperationContext* opCtx, const std::string& dbName) + ScopedMetricsCollector(OperationContext* opCtx, const DatabaseName& dbName) : ScopedMetricsCollector(opCtx, dbName, true) {} ~ScopedMetricsCollector(); @@ -495,9 +504,9 @@ class ResourceConsumption { /** * Returns whether the database's metrics should be collected. */ - static bool shouldCollectMetricsForDatabase(StringData dbName) { - if (dbName == DatabaseName::kAdmin.db() || dbName == DatabaseName::kConfig.db() || - dbName == DatabaseName::kLocal.db()) { + static bool shouldCollectMetricsForDatabase(const DatabaseName& dbName) { + if (dbName == DatabaseName::kAdmin || dbName == DatabaseName::kConfig || + dbName == DatabaseName::kLocal) { return false; } return true; @@ -526,7 +535,9 @@ class ResourceConsumption { * * The database name must not be an empty string. */ - void merge(OperationContext* opCtx, const std::string& dbName, const OperationMetrics& metrics); + void merge(OperationContext* opCtx, + const DatabaseName& dbName, + const OperationMetrics& metrics); /** * Returns a copy of the per-database metrics map. diff --git a/src/mongo/db/stats/resource_consumption_metrics_test.cpp b/src/mongo/db/stats/resource_consumption_metrics_test.cpp index 938415b2a3023..896f133a3d3d2 100644 --- a/src/mongo/db/stats/resource_consumption_metrics_test.cpp +++ b/src/mongo/db/stats/resource_consumption_metrics_test.cpp @@ -27,13 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" +#include +#include + +#include "mongo/bson/oid.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/service_context_test_fixture.h" #include "mongo/db/stats/operation_resource_consumption_gen.h" #include "mongo/db/stats/resource_consumption_metrics.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { @@ -41,9 +55,10 @@ namespace { ServerParameter* getServerParameter(const std::string& name) { return ServerParameterSet::getNodeParameterSet()->get(name); } + } // namespace -class ResourceConsumptionMetricsTest : public LockerNoopServiceContextTest { +class ResourceConsumptionMetricsTest : public ServiceContextTest { public: void setUp() { _opCtx = makeOperationContext(); @@ -74,7 +89,8 @@ TEST_F(ResourceConsumptionMetricsTest, Merge) { auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get()); - operationMetrics.beginScopedCollecting(_opCtx.get(), "db1"); + operationMetrics.beginScopedCollecting( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); globalResourceConsumption.merge( _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics()); globalResourceConsumption.merge( @@ -87,7 +103,8 @@ TEST_F(ResourceConsumptionMetricsTest, Merge) { ASSERT_EQ(dbMetrics.count("db3"), 0); operationMetrics.endScopedCollecting(); - operationMetrics.beginScopedCollecting(_opCtx.get(), "db2"); + operationMetrics.beginScopedCollecting( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db2")); globalResourceConsumption.merge( _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics()); globalResourceConsumption.merge( @@ -106,7 +123,10 @@ TEST_F(ResourceConsumptionMetricsTest, ScopedMetricsCollector) { // Collect { const bool collectMetrics = true; - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1", collectMetrics); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), + DatabaseName::createDatabaseName_forTest(boost::none, "db1"), + collectMetrics); ASSERT_TRUE(operationMetrics.isCollecting()); } @@ -118,7 +138,10 @@ TEST_F(ResourceConsumptionMetricsTest, ScopedMetricsCollector) { // Don't collect { const bool collectMetrics = false; - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1", collectMetrics); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), + DatabaseName::createDatabaseName_forTest(boost::none, "db1"), + collectMetrics); ASSERT_FALSE(operationMetrics.isCollecting()); } @@ -128,13 +151,19 @@ TEST_F(ResourceConsumptionMetricsTest, ScopedMetricsCollector) { ASSERT_EQ(metricsCopy.count("db1"), 0); // Collect - { ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); } + { + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); + } metricsCopy = globalResourceConsumption.getDbMetrics(); ASSERT_EQ(metricsCopy.count("db1"), 1); // Collect on a different database - { ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db2"); } + { + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db2")); + } metricsCopy = globalResourceConsumption.getDbMetrics(); ASSERT_EQ(metricsCopy.count("db1"), 1); @@ -156,16 +185,21 @@ TEST_F(ResourceConsumptionMetricsTest, NestedScopedMetricsCollector) { // Collect, nesting does not override that behavior or change the collection database. { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); ASSERT(operationMetrics.hasCollectedMetrics()); { const bool collectMetrics = false; - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db2", collectMetrics); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), + DatabaseName::createDatabaseName_forTest(boost::none, "db2"), + collectMetrics); ASSERT_TRUE(operationMetrics.isCollecting()); { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db3"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db3")); ASSERT_TRUE(operationMetrics.isCollecting()); } } @@ -181,17 +215,23 @@ TEST_F(ResourceConsumptionMetricsTest, NestedScopedMetricsCollector) { // Don't collect, nesting does not override that behavior. { const bool collectMetrics = false; - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db2", collectMetrics); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), + DatabaseName::createDatabaseName_forTest(boost::none, "db2"), + collectMetrics); ASSERT_FALSE(operationMetrics.hasCollectedMetrics()); { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db3"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db3")); ASSERT_FALSE(operationMetrics.isCollecting()); { ResourceConsumption::ScopedMetricsCollector scope( - _opCtx.get(), "db4", collectMetrics); + _opCtx.get(), + DatabaseName::createDatabaseName_forTest(boost::none, "db4"), + collectMetrics); ASSERT_FALSE(operationMetrics.isCollecting()); } } @@ -225,7 +265,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) { auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get()); { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); operationMetrics.incrementOneDocRead("", 2); operationMetrics.incrementOneIdxEntryRead("", 8); @@ -252,7 +293,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetrics) { reset(operationMetrics); { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); operationMetrics.incrementOneDocRead("", 32); operationMetrics.incrementOneIdxEntryRead("", 128); @@ -282,7 +324,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) { ->setFollowerMode(repl::MemberState::RS_SECONDARY)); { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); operationMetrics.incrementOneDocRead("", 2); operationMetrics.incrementOneIdxEntryRead("", 8); @@ -307,7 +350,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsSecondary) { reset(operationMetrics); { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); operationMetrics.incrementOneDocRead("", 32); operationMetrics.incrementOneIdxEntryRead("", 128); @@ -336,7 +380,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) { // Start collecting metrics in the primary state, then change to secondary. Metrics should be // attributed to the secondary state, since that is the state where the operation completed. { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); operationMetrics.incrementOneDocRead("", 2); operationMetrics.incrementOneIdxEntryRead("", 8); @@ -380,7 +425,8 @@ TEST_F(ResourceConsumptionMetricsTest, IncrementReadMetricsAcrossStates) { // Start collecting metrics in the secondary state, then change to primary. Metrics should be // attributed to the primary state only. { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); operationMetrics.incrementOneDocRead("", 2); operationMetrics.incrementOneIdxEntryRead("", 8); @@ -429,7 +475,8 @@ TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsRead) { int expectedUnits = 0; { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); // Each of these should be counted as 1 document unit (unit size = 128). operationMetrics.incrementOneDocRead("", 2); @@ -464,7 +511,8 @@ TEST_F(ResourceConsumptionMetricsTest, DocumentUnitsWritten) { int expectedUnits = 0; { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); // Each of these should be counted as 1 document unit (unit size = 128). operationMetrics.incrementOneDocWritten("", 2); @@ -498,7 +546,8 @@ TEST_F(ResourceConsumptionMetricsTest, TotalUnitsWritten) { int expectedUnits = 0; { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); // Each of these should be counted as 1 total unit (unit size = 128). operationMetrics.incrementOneDocWritten("", 2); @@ -560,7 +609,8 @@ TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsRead) { int expectedUnits = 0; { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); gIndexEntryUnitSizeBytes = 16; @@ -609,7 +659,8 @@ TEST_F(ResourceConsumptionMetricsTest, IdxEntryUnitsWritten) { int expectedUnits = 0; { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); gIndexEntryUnitSizeBytes = 16; @@ -674,7 +725,8 @@ TEST_F(ResourceConsumptionMetricsTest, CpuNanos) { { // Ensure that the CPU timer increases relative to a single operation. - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); auto lastNanos = operationMetrics.getMetrics().cpuTimer->getElapsed(); spinFor(Milliseconds(1)); ASSERT_GT(operationMetrics.getMetrics().cpuTimer->getElapsed(), lastNanos); @@ -690,7 +742,8 @@ TEST_F(ResourceConsumptionMetricsTest, CpuNanos) { ASSERT_EQ(dbMetrics["db1"].cpuNanos, nanos); { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); spinFor(Milliseconds(1)); } @@ -716,7 +769,8 @@ TEST_F(ResourceConsumptionMetricsTest, CursorSeeks) { int expectedSeeks = 0; { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); operationMetrics.incrementOneCursorSeek(""); operationMetrics.incrementOneCursorSeek(""); operationMetrics.incrementOneCursorSeek(""); @@ -733,7 +787,8 @@ TEST_F(ResourceConsumptionMetricsTest, PauseMetricsCollectorBlock) { auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get()); { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); { // Metrics increase within this scope should not be applied. ResourceConsumption::PauseMetricsCollectorBlock pauseMetricsCollection(_opCtx.get()); @@ -772,7 +827,8 @@ TEST_F(ResourceConsumptionMetricsTest, ResetMetricsBetweenCollection) { auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get()); { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db1"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db1")); operationMetrics.incrementOneDocRead("", 2); operationMetrics.incrementOneIdxEntryRead("", 4); @@ -795,7 +851,8 @@ TEST_F(ResourceConsumptionMetricsTest, ResetMetricsBetweenCollection) { // We expect this metrics collection to wipe out the metrics from the previous one. { - ResourceConsumption::ScopedMetricsCollector scope(_opCtx.get(), "db2"); + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(boost::none, "db2")); operationMetrics.incrementOneDocRead("", 64); operationMetrics.incrementOneIdxEntryRead("", 128); operationMetrics.incrementKeysSorted(256); @@ -828,4 +885,119 @@ TEST_F(ResourceConsumptionMetricsTest, ResetMetricsBetweenCollection) { ASSERT_EQ(metricsCopy["db2"].primaryReadMetrics.cursorSeeks, 1); } +TEST_F(ResourceConsumptionMetricsTest, MetricsWithTenantId) { + auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext()); + auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get()); + const TenantId tenantId = TenantId(OID::gen()); + + std::string dbName1Str = str::stream() << tenantId.toString() << "_db1"; + operationMetrics.beginScopedCollecting( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(tenantId, "db1")); + globalResourceConsumption.merge( + _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics()); + globalResourceConsumption.merge( + _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics()); + + auto dbMetrics = globalResourceConsumption.getDbMetrics(); + ASSERT_EQ(dbMetrics.count(dbName1Str), 1); + ASSERT_EQ(dbMetrics.count("db2"), 0); + operationMetrics.endScopedCollecting(); + + std::string dbName2Str = str::stream() << tenantId.toString() << "_db2"; + operationMetrics.beginScopedCollecting( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(tenantId, "db2")); + globalResourceConsumption.merge( + _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics()); + globalResourceConsumption.merge( + _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics()); + + dbMetrics = globalResourceConsumption.getDbMetrics(); + ASSERT_EQ(dbMetrics.count(dbName1Str), 1); + ASSERT_EQ(dbMetrics.count(dbName2Str), 1); + operationMetrics.endScopedCollecting(); + + // Same '_db2' but different tenant. + const TenantId otherTenantId = TenantId(OID::gen()); + dbMetrics = globalResourceConsumption.getDbMetrics(); + + std::string otherDbName2Str = str::stream() << otherTenantId.toString() << "_db2"; + operationMetrics.beginScopedCollecting( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(otherTenantId, "db2")); + globalResourceConsumption.merge( + _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics()); + globalResourceConsumption.merge( + _opCtx.get(), operationMetrics.getDbName(), operationMetrics.getMetrics()); + + dbMetrics = globalResourceConsumption.getDbMetrics(); + ASSERT_EQ(dbMetrics.count(dbName1Str), 1); + ASSERT_EQ(dbMetrics.count(dbName2Str), 1); + ASSERT_EQ(dbMetrics.count(otherDbName2Str), 1); + operationMetrics.endScopedCollecting(); +} + +TEST_F(ResourceConsumptionMetricsTest, MergeWithTenantId) { + auto& globalResourceConsumption = ResourceConsumption::get(getServiceContext()); + auto& operationMetrics = ResourceConsumption::MetricsCollector::get(_opCtx.get()); + const TenantId tenantId = TenantId(OID::gen()); + const TenantId otherTenantId = TenantId(OID::gen()); + + { + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(tenantId, "db1")); + + operationMetrics.incrementOneDocRead("", 2); + operationMetrics.incrementOneIdxEntryRead("", 4); + operationMetrics.incrementKeysSorted(8); + operationMetrics.incrementSorterSpills(16); + operationMetrics.incrementDocUnitsReturned("", makeDocUnits(32)); + operationMetrics.incrementOneCursorSeek(""); + } + + std::string dbName1Str = str::stream() << tenantId.toString() << "_db1"; + auto metricsCopy = globalResourceConsumption.getAndClearDbMetrics(); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsRead.bytes(), 2); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsRead.units(), 1); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.idxEntriesRead.bytes(), 4); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.idxEntriesRead.units(), 1); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.keysSorted, 8); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.sorterSpills, 16); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsReturned.bytes(), 32); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsReturned.units(), 1); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.cursorSeeks, 1); + + { + ResourceConsumption::ScopedMetricsCollector scope( + _opCtx.get(), DatabaseName::createDatabaseName_forTest(otherTenantId, "db1")); + + operationMetrics.incrementOneDocRead("", 2); + operationMetrics.incrementOneIdxEntryRead("", 4); + operationMetrics.incrementKeysSorted(8); + operationMetrics.incrementSorterSpills(16); + operationMetrics.incrementDocUnitsReturned("", makeDocUnits(32)); + operationMetrics.incrementOneCursorSeek(""); + } + + metricsCopy = globalResourceConsumption.getAndClearDbMetrics(); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsRead.bytes(), 0); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsRead.units(), 0); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.idxEntriesRead.bytes(), 0); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.idxEntriesRead.units(), 0); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.keysSorted, 0); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.sorterSpills, 0); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsReturned.bytes(), 0); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.docsReturned.units(), 0); + ASSERT_EQ(metricsCopy[dbName1Str].primaryReadMetrics.cursorSeeks, 0); + + std::string otherDbName1Str = str::stream() << otherTenantId.toString() << "_db1"; + ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.docsRead.bytes(), 2); + ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.docsRead.units(), 1); + ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.idxEntriesRead.bytes(), 4); + ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.idxEntriesRead.units(), 1); + ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.keysSorted, 8); + ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.sorterSpills, 16); + ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.docsReturned.bytes(), 32); + ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.docsReturned.units(), 1); + ASSERT_EQ(metricsCopy[otherDbName1Str].primaryReadMetrics.cursorSeeks, 1); +} + } // namespace mongo diff --git a/src/mongo/db/stats/server_read_concern_metrics.cpp b/src/mongo/db/stats/server_read_concern_metrics.cpp index 8f210c32cd2f4..c35da0bc06263 100644 --- a/src/mongo/db/stats/server_read_concern_metrics.cpp +++ b/src/mongo/db/stats/server_read_concern_metrics.cpp @@ -27,14 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/stats/server_read_concern_metrics.h" +#include +#include +#include +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands/server_status.h" -#include "mongo/db/jsobj.h" #include "mongo/db/operation_context.h" +#include "mongo/db/read_write_concern_provenance.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/service_context.h" +#include "mongo/db/stats/server_read_concern_metrics.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/db/stats/server_read_concern_metrics.h b/src/mongo/db/stats/server_read_concern_metrics.h index cb936d0a7aa1d..90adf1074f88c 100644 --- a/src/mongo/db/stats/server_read_concern_metrics.h +++ b/src/mongo/db/stats/server_read_concern_metrics.h @@ -33,6 +33,7 @@ #include "mongo/db/repl/read_concern_args.h" #include "mongo/db/service_context.h" #include "mongo/db/stats/read_concern_stats_gen.h" +#include "mongo/platform/atomic_word.h" namespace mongo { diff --git a/src/mongo/db/stats/server_write_concern_metrics.cpp b/src/mongo/db/stats/server_write_concern_metrics.cpp index 7dcedc40ef66a..16e0167b78f04 100644 --- a/src/mongo/db/stats/server_write_concern_metrics.cpp +++ b/src/mongo/db/stats/server_write_concern_metrics.cpp @@ -27,15 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/db/stats/server_write_concern_metrics.h" +#include +#include "mongo/bson/bsonelement.h" #include "mongo/db/commands/server_status.h" -#include "mongo/db/jsobj.h" #include "mongo/db/operation_context.h" +#include "mongo/db/read_write_concern_provenance.h" #include "mongo/db/service_context.h" +#include "mongo/db/stats/server_write_concern_metrics.h" #include "mongo/db/stats/server_write_concern_metrics_gen.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/stats/server_write_concern_metrics.h b/src/mongo/db/stats/server_write_concern_metrics.h index 46a99bab364f9..7c683a12eb87f 100644 --- a/src/mongo/db/stats/server_write_concern_metrics.h +++ b/src/mongo/db/stats/server_write_concern_metrics.h @@ -29,9 +29,16 @@ #pragma once +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/db/write_concern_options.h" +#include "mongo/platform/mutex.h" #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/stats/single_transaction_stats.cpp b/src/mongo/db/stats/single_transaction_stats.cpp index 18f2d9965ac12..9f434b227ffaf 100644 --- a/src/mongo/db/stats/single_transaction_stats.cpp +++ b/src/mongo/db/stats/single_transaction_stats.cpp @@ -27,9 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include +#include #include "mongo/db/stats/single_transaction_stats.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/stats/single_transaction_stats.h b/src/mongo/db/stats/single_transaction_stats.h index fa987e08a02ed..8111d148fada5 100644 --- a/src/mongo/db/stats/single_transaction_stats.h +++ b/src/mongo/db/stats/single_transaction_stats.h @@ -29,8 +29,25 @@ #pragma once +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" #include "mongo/db/curop.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/tick_source.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/stats/storage_stats.cpp b/src/mongo/db/stats/storage_stats.cpp index f5a4d04689967..fd3dd39c5e796 100644 --- a/src/mongo/db/stats/storage_stats.cpp +++ b/src/mongo/db/stats/storage_stats.cpp @@ -28,25 +28,51 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/s/balancer_stats_registry.h" +#include "mongo/db/server_options.h" +#include "mongo/db/stats/storage_stats.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" #include "mongo/db/timeseries/timeseries_stats.h" #include "mongo/logv2/log.h" -#include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/stdx/unordered_map.h" - -#include "mongo/db/stats/storage_stats.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kFTDC @@ -86,6 +112,7 @@ const stdx::unordered_map _mapStorageStatsField void _appendRecordStats(OperationContext* opCtx, const CollectionPtr& collection, const NamespaceString& collNss, + const SerializationContext& serializationCtx, bool isNamespaceAlwaysUnsharded, int scale, bool isTimeseries, @@ -97,7 +124,7 @@ void _appendRecordStats(OperationContext* opCtx, long long numRecords = collection->numRecords(opCtx); if (isTimeseries) { BSONObjBuilder bob(result->subobjStart("timeseries")); - bob.append("bucketsNs", NamespaceStringUtil::serialize(collNss)); + bob.append("bucketsNs", NamespaceStringUtil::serialize(collNss, serializationCtx)); bob.appendNumber("bucketCount", numRecords); if (numRecords) { bob.append("avgBucketSize", collection->averageObjectSize(opCtx)); @@ -255,14 +282,7 @@ void _appendInProgressIndexesStats(OperationContext* opCtx, } } - // Not all indexes in the collection stats may be visible or consistent with our - // snapshot. For this reason, it is unsafe to check `isReady` on the entry, which - // asserts that the index's in-memory state is consistent with our snapshot. - if (!entry->isPresentInMySnapshot(opCtx)) { - continue; - } - - if (!entry->isReadyInMySnapshot(opCtx)) { + if (!entry->isReady()) { indexBuilds.push_back(descriptor->indexName()); } } @@ -296,6 +316,7 @@ void _appendTotalSize(OperationContext* opCtx, Status appendCollectionStorageStats(OperationContext* opCtx, const NamespaceString& nss, const StorageStatsSpec& storageStatsSpec, + const SerializationContext& serializationCtx, BSONObjBuilder* result, const boost::optional& filterObj) { auto scale = storageStatsSpec.getScale().value_or(1); @@ -346,7 +367,7 @@ Status appendCollectionStorageStats(OperationContext* opCtx, result->append("indexSizes", BSONObj()); result->append("scaleFactor", scale); return {ErrorCodes::NamespaceNotFound, - "Collection [" + collNss.toString() + "] not found."}; + "Collection [" + collNss.toStringForErrorMsg() + "] not found."}; } // We will parse all 'filterObj' into different groups of data to compute. This groups will be @@ -382,6 +403,7 @@ Status appendCollectionStorageStats(OperationContext* opCtx, _appendRecordStats(opCtx, collection, collNss, + serializationCtx, nss.isNamespaceAlwaysUnsharded(), scale, isTimeseries, @@ -407,7 +429,7 @@ Status appendCollectionRecordCount(OperationContext* opCtx, AutoGetCollectionForReadCommandMaybeLockFree collection(opCtx, nss); if (!collection) { return {ErrorCodes::NamespaceNotFound, - str::stream() << "Collection [" << nss.toString() << "] not found."}; + str::stream() << "Collection [" << nss.toStringForErrorMsg() << "] not found."}; } result->appendNumber("count", static_cast(collection->numRecords(opCtx))); diff --git a/src/mongo/db/stats/storage_stats.h b/src/mongo/db/stats/storage_stats.h index f37b9aba612aa..31b1f34e3997c 100644 --- a/src/mongo/db/stats/storage_stats.h +++ b/src/mongo/db/stats/storage_stats.h @@ -29,12 +29,16 @@ #pragma once +#include +#include + #include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/pipeline/storage_stats_spec_gen.h" +#include "mongo/util/serialization_context.h" namespace mongo { @@ -94,6 +98,7 @@ namespace mongo { Status appendCollectionStorageStats(OperationContext* opCtx, const NamespaceString& nss, const StorageStatsSpec& spec, + const SerializationContext& serializationCtx, BSONObjBuilder* builder, const boost::optional& filterObj = boost::none); diff --git a/src/mongo/db/stats/timer_stats.cpp b/src/mongo/db/stats/timer_stats.cpp index 35b1027fff10b..bafad92bf9e63 100644 --- a/src/mongo/db/stats/timer_stats.cpp +++ b/src/mongo/db/stats/timer_stats.cpp @@ -29,6 +29,8 @@ #include "mongo/db/stats/timer_stats.h" +#include "mongo/bson/bsonobjbuilder.h" + namespace mongo { TimerHolder::TimerHolder(TimerStats* stats) : _stats(stats), _recorded(false) {} diff --git a/src/mongo/db/stats/timer_stats.h b/src/mongo/db/stats/timer_stats.h index 029a238577c3d..ad4f47df23988 100644 --- a/src/mongo/db/stats/timer_stats.h +++ b/src/mongo/db/stats/timer_stats.h @@ -29,7 +29,9 @@ #pragma once +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/timer.h" namespace mongo { diff --git a/src/mongo/db/stats/timer_stats_test.cpp b/src/mongo/db/stats/timer_stats_test.cpp index d96acdc178655..23962fc32e31e 100644 --- a/src/mongo/db/stats/timer_stats_test.cpp +++ b/src/mongo/db/stats/timer_stats_test.cpp @@ -27,12 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/stats/timer_stats.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/time_support.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/stats/top.cpp b/src/mongo/db/stats/top.cpp index 85ae1745e8329..5c2f0aaba01ff 100644 --- a/src/mongo/db/stats/top.cpp +++ b/src/mongo/db/stats/top.cpp @@ -28,13 +28,24 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/stats/top.h" - +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/client.h" #include "mongo/db/curop.h" -#include "mongo/db/jsobj.h" #include "mongo/db/service_context.h" +#include "mongo/db/stats/top.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -90,17 +101,18 @@ Top& Top::get(ServiceContext* service) { } void Top::record(OperationContext* opCtx, - StringData ns, + const NamespaceString& nss, LogicalOp logicalOp, LockType lockType, long long micros, bool command, Command::ReadWriteType readWriteType) { - if (ns[0] == '?') + const auto nssStr = NamespaceStringUtil::serialize(nss); + if (nssStr[0] == '?') return; - auto hashedNs = UsageMap::hasher().hashed_key(ns); - stdx::lock_guard lk(_lock); + auto hashedNs = UsageMap::hasher().hashed_key(nssStr); + stdx::lock_guard lk(_lock); CollectionData& coll = _usage[hashedNs]; _record(opCtx, coll, logicalOp, lockType, micros, readWriteType); @@ -114,7 +126,7 @@ void Top::record(OperationContext* opCtx, bool command, Command::ReadWriteType readWriteType) { for (const auto& nss : nssSet) { - record(opCtx, nss.ns(), logicalOp, lockType, micros, command, readWriteType); + record(opCtx, nss, logicalOp, lockType, micros, command, readWriteType); } } @@ -167,17 +179,13 @@ void Top::_record(OperationContext* opCtx, } void Top::collectionDropped(const NamespaceString& nss) { - stdx::lock_guard lk(_lock); - _usage.erase(nss.ns()); -} - -void Top::cloneMap(Top::UsageMap& out) const { - stdx::lock_guard lk(_lock); - out = _usage; + const auto nssStr = NamespaceStringUtil::serialize(nss); + stdx::lock_guard lk(_lock); + _usage.erase(nssStr); } void Top::append(BSONObjBuilder& b) { - stdx::lock_guard lk(_lock); + stdx::lock_guard lk(_lock); _appendToUsageMap(b, _usage); } @@ -225,11 +233,12 @@ void Top::_appendStatsEntry(BSONObjBuilder& b, const char* statsName, const Usag void Top::appendLatencyStats(const NamespaceString& nss, bool includeHistograms, BSONObjBuilder* builder) { - auto hashedNs = UsageMap::hasher().hashed_key(nss.ns()); - stdx::lock_guard lk(_lock); + const auto nssStr = NamespaceStringUtil::serialize(nss); + auto hashedNs = UsageMap::hasher().hashed_key(nssStr); + stdx::lock_guard lk(_lock); BSONObjBuilder latencyStatsBuilder; _usage[hashedNs].opLatencyHistogram.append(includeHistograms, false, &latencyStatsBuilder); - builder->append("ns", NamespaceStringUtil::serialize(nss)); + builder->append("ns", nssStr); builder->append("latencyStats", latencyStatsBuilder.obj()); } @@ -239,19 +248,19 @@ void Top::incrementGlobalLatencyStats(OperationContext* opCtx, if (!opCtx->shouldIncrementLatencyStats()) return; - stdx::lock_guard guard(_lock); + stdx::lock_guard guard(_lock); _incrementHistogram(opCtx, latency, &_globalHistogramStats, readWriteType); } void Top::appendGlobalLatencyStats(bool includeHistograms, bool slowMSBucketsOnly, BSONObjBuilder* builder) { - stdx::lock_guard guard(_lock); + stdx::lock_guard guard(_lock); _globalHistogramStats.append(includeHistograms, slowMSBucketsOnly, builder); } void Top::incrementGlobalTransactionLatencyStats(OperationContext* opCtx, uint64_t latency) { - stdx::lock_guard guard(_lock); + stdx::lock_guard guard(_lock); _globalHistogramStats.increment( latency, Command::ReadWriteType::kTransaction, isQuerableEncryptionOperation(opCtx)); } diff --git a/src/mongo/db/stats/top.h b/src/mongo/db/stats/top.h index 49a0da9e7b243..dfb0e3cbaa282 100644 --- a/src/mongo/db/stats/top.h +++ b/src/mongo/db/stats/top.h @@ -34,10 +34,17 @@ */ #include +#include +#include +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/stats/operation_latency_histogram.h" +#include "mongo/platform/mutex.h" +#include "mongo/rpc/message.h" #include "mongo/util/concurrency/mutex.h" #include "mongo/util/string_map.h" @@ -99,7 +106,7 @@ class Top { public: void record(OperationContext* opCtx, - StringData ns, + const NamespaceString& nss, LogicalOp logicalOp, LockType lockType, long long micros, @@ -166,7 +173,7 @@ class Top { OperationLatencyHistogram* histogram, Command::ReadWriteType readWriteType); - mutable SimpleMutex _lock; + Mutex _lock = MONGO_MAKE_LATCH("Top::_lock"); OperationLatencyHistogram _globalHistogramStats; UsageMap _usage; }; diff --git a/src/mongo/db/stats/top_test.cpp b/src/mongo/db/stats/top_test.cpp index c6f7b784bde67..c331028964a39 100644 --- a/src/mongo/db/stats/top_test.cpp +++ b/src/mongo/db/stats/top_test.cpp @@ -27,10 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/string_data.h" #include "mongo/db/stats/top.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/storage/SConscript b/src/mongo/db/storage/SConscript index b7258d7da1c1e..97c3a2afbac24 100644 --- a/src/mongo/db/storage/SConscript +++ b/src/mongo/db/storage/SConscript @@ -32,7 +32,7 @@ env.Library( 'snapshot_helper.cpp', ], LIBDEPS_PRIVATE=[ - '$BUILD_DIR/mongo/db/concurrency/lock_manager_defs', + '$BUILD_DIR/mongo/db/concurrency/lock_manager', '$BUILD_DIR/mongo/db/repl/read_concern_args', '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', '$BUILD_DIR/mongo/db/server_base', @@ -67,7 +67,7 @@ env.Library( env.Library( target='record_store_base', source=[ - 'collection_markers.cpp', + 'collection_truncate_markers.cpp', 'external_record_store.cpp', 'multi_bson_stream_cursor.cpp', 'named_pipe_posix.cpp' if not env.TargetOSIs('windows') else [], @@ -142,6 +142,7 @@ env.Library( '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/server_options', '$BUILD_DIR/mongo/db/service_context', + '$BUILD_DIR/mongo/util/namespace_string_database_name_util', ], ) @@ -205,6 +206,17 @@ env.Library( ], ) +env.Library( + target='concurrency_adjustment_parameters', + source=[ + 'execution_control/concurrency_adjustment_validator.cpp', + 'execution_control/concurrency_adjustment_parameters.idl', + ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/server_base', + ], +) + env.Library( target='storage_engine_parameters', source=[ @@ -219,6 +231,7 @@ env.Library( '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/util/concurrency/ticketholder', '$BUILD_DIR/mongo/util/processinfo', + 'concurrency_adjustment_parameters', 'storage_engine_feature_flags', ], ) @@ -231,6 +244,7 @@ env.CppUnitTest( '$BUILD_DIR/mongo/unittest/unittest', '$BUILD_DIR/mongo/util/concurrency/ticketholder', '$BUILD_DIR/mongo/util/periodic_runner', + 'concurrency_adjustment_parameters', 'storage_engine_parameters', ], ) @@ -275,11 +289,14 @@ env.Library( LIBDEPS=[ '$BUILD_DIR/mongo/db/record_id_helpers', '$BUILD_DIR/mongo/db/service_context', - '$BUILD_DIR/mongo/db/storage/storage_options', '$BUILD_DIR/mongo/unittest/unittest', 'index_entry_comparison', 'key_string', ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/concurrency/lock_manager', + 'storage_options', + ], ) env.Library( @@ -303,6 +320,9 @@ env.Library( ], LIBDEPS=[ 'sorted_data_interface_test_harness', + ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/concurrency/lock_manager', 'storage_options', ], ) @@ -315,6 +335,7 @@ bmEnv.Library( 'sorted_data_interface_bm_cursor.cpp', ], LIBDEPS=[ + '$BUILD_DIR/mongo/db/concurrency/lock_manager', '$BUILD_DIR/third_party/shim_benchmark', 'sorted_data_interface_test_harness', ], @@ -340,6 +361,7 @@ env.Library( LIBDEPS=[ '$BUILD_DIR/mongo/db/catalog/clustered_collection_options', '$BUILD_DIR/mongo/db/catalog/collection_options', + '$BUILD_DIR/mongo/db/concurrency/lock_manager', '$BUILD_DIR/mongo/db/record_id_helpers', '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/db/storage/record_store_base', @@ -416,16 +438,6 @@ env.Library( ], ) -env.Library( - target='historical_ident_tracker', - source=[ - 'historical_ident_tracker.cpp', - ], - LIBDEPS_PRIVATE=[ - '$BUILD_DIR/mongo/db/server_base', - ], -) - env.Library( target="write_unit_of_work", source=[ @@ -461,6 +473,16 @@ env.Library( ], ) +env.Library( + target='disk_space_util', + source=[ + 'disk_space_util.cpp', + ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/util/fail_point', 'storage_options' + ], +) + env.Library( target='storage_repair_observer', source=[ @@ -540,11 +562,10 @@ env.Library( env.CppUnitTest( target='db_storage_test', source=[ - 'collection_markers_test.cpp', + 'collection_truncate_markers_test.cpp', 'external_record_store_test.cpp', 'disk_space_monitor_test.cpp', 'flow_control_test.cpp', - 'historical_ident_tracker_test.cpp', 'index_entry_comparison_test.cpp', 'key_string_test.cpp', 'kv/durable_catalog_test.cpp', @@ -568,7 +589,7 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/service_context_d_test_fixture', '$BUILD_DIR/mongo/db/shard_role', '$BUILD_DIR/mongo/db/storage/devnull/storage_devnull_core', - '$BUILD_DIR/mongo/db/storage/durable_catalog_impl', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/db/storage/storage_control', '$BUILD_DIR/mongo/db/storage/storage_engine_impl', '$BUILD_DIR/mongo/db/storage/storage_repair_observer', @@ -580,7 +601,6 @@ env.CppUnitTest( 'disk_space_monitor', 'flow_control', 'flow_control_parameters', - 'historical_ident_tracker', 'key_string', 'kv/kv_drop_pending_ident_reaper', 'record_store_base', @@ -591,21 +611,17 @@ env.CppUnitTest( ) env.Library( - target='durable_catalog_impl', + target='durable_catalog', source=[ - 'durable_catalog_impl.cpp', + 'durable_catalog.cpp', ], LIBDEPS=[ - '$BUILD_DIR/mongo/bson/util/bson_extract', - '$BUILD_DIR/mongo/db/catalog/collection_catalog', - '$BUILD_DIR/mongo/db/concurrency/lock_manager', - '$BUILD_DIR/mongo/db/index/index_access_method', 'bson_collection_catalog_entry', ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/multitenancy', - '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', '$BUILD_DIR/mongo/db/server_base', + '$BUILD_DIR/mongo/util/namespace_string_database_name_util', 'record_store_base', 'storage_options', ], @@ -617,9 +633,10 @@ env.Library( 'storage_util.cpp', ], LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/catalog/collection_catalog', '$BUILD_DIR/mongo/db/multitenancy', '$BUILD_DIR/mongo/db/storage/storage_options', - 'durable_catalog_impl', + 'durable_catalog', 'kv/kv_drop_pending_ident_reaper', ], ) @@ -630,10 +647,8 @@ env.Library( 'disk_space_monitor.cpp', ], LIBDEPS_PRIVATE=[ - '$BUILD_DIR/mongo/db/commands/server_status_core', - '$BUILD_DIR/mongo/db/server_base', - '$BUILD_DIR/mongo/db/service_context', - 'storage_options', + '$BUILD_DIR/mongo/db/commands/server_status_core', '$BUILD_DIR/mongo/db/server_base', + '$BUILD_DIR/mongo/db/service_context', 'disk_space_util', 'storage_options' ], ) @@ -668,7 +683,6 @@ env.Library( LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/concurrency/lock_manager', '$BUILD_DIR/mongo/db/server_base', - 'historical_ident_tracker', 'storage_options', ], ) @@ -682,7 +696,7 @@ env.Library( LIBDEPS=[ '$BUILD_DIR/mongo/db/catalog/catalog_control', '$BUILD_DIR/mongo/db/shard_role', - '$BUILD_DIR/mongo/db/storage/durable_catalog_impl', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/db/storage/kv/kv_drop_pending_ident_reaper', '$BUILD_DIR/mongo/db/storage/storage_options', ], @@ -698,7 +712,6 @@ env.Library( '$BUILD_DIR/mongo/db/storage/storage_repair_observer', '$BUILD_DIR/mongo/db/vector_clock', 'backup_block', - 'historical_ident_tracker', 'storage_control', 'storage_util', 'two_phase_index_build_knobs_idl', diff --git a/src/mongo/db/storage/backup_block.cpp b/src/mongo/db/storage/backup_block.cpp index 357290caeb352..61b4207273d04 100644 --- a/src/mongo/db/storage/backup_block.cpp +++ b/src/mongo/db/storage/backup_block.cpp @@ -29,13 +29,13 @@ #include "mongo/db/storage/backup_block.h" -#include +#include #include +#include +#include + #include "mongo/base/string_data.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/storage/durable_catalog.h" -#include "mongo/db/storage/historical_ident_tracker.h" #include "mongo/db/storage/storage_options.h" namespace mongo { @@ -50,17 +50,19 @@ const std::set kRequiredMDBFiles = {"_mdb_catalog.wt", "sizeStorer. } // namespace BackupBlock::BackupBlock(OperationContext* opCtx, + boost::optional nss, + boost::optional uuid, std::string filePath, - const IdentToNamespaceAndUUIDMap& identToNamespaceAndUUIDMap, boost::optional checkpointTimestamp, std::uint64_t offset, std::uint64_t length, std::uint64_t fileSize) - : _filePath(filePath), _offset(offset), _length(length), _fileSize(fileSize) { - boost::filesystem::path path(filePath); - _filenameStem = path.stem().string(); - _initialize(opCtx, identToNamespaceAndUUIDMap, checkpointTimestamp); -} + : _filePath(filePath), + _offset(offset), + _length(length), + _fileSize(fileSize), + _nss(nss), + _uuid(uuid) {} bool BackupBlock::isRequired() const { // Extract the filename from the path. @@ -89,56 +91,21 @@ bool BackupBlock::isRequired() const { return true; } + if (!_nss) { + return false; + } + // Check if collection resides in an internal database (admin, local, or config). - if (_nss.isOnInternalDb()) { + if (_nss->isOnInternalDb()) { return true; } // Check if collection is 'system.views'. - if (_nss.isSystemDotViews()) { + if (_nss->isSystemDotViews()) { return true; } return false; } -void BackupBlock::_setNamespaceString(const NamespaceString& nss) { - // Remove "system.buckets." from time-series collection namespaces since it is an internal - // detail that is not intended to be visible externally. - if (nss.isTimeseriesBucketsCollection()) { - _nss = nss.getTimeseriesViewNamespace(); - return; - } - - _nss = nss; -} - -void BackupBlock::_initialize(OperationContext* opCtx, - const IdentToNamespaceAndUUIDMap& identToNamespaceAndUUIDMap, - boost::optional checkpointTimestamp) { - if (!opCtx) { - return; - } - - // Fetch the latest values for the ident. - auto it = identToNamespaceAndUUIDMap.find(_filenameStem); - if (it != identToNamespaceAndUUIDMap.end()) { - _uuid = it->second.second; - _setNamespaceString(it->second.first); - } - - if (!checkpointTimestamp) { - return; - } - - // Check if the ident had a different value at the checkpoint timestamp. If so, we want to use - // that instead as that will be the ident's value when restoring from the backup. - boost::optional> historicalEntry = - HistoricalIdentTracker::get(opCtx).lookup(_filenameStem, checkpointTimestamp.value()); - if (historicalEntry) { - _uuid = historicalEntry->second; - _setNamespaceString(historicalEntry->first); - } -} - } // namespace mongo diff --git a/src/mongo/db/storage/backup_block.h b/src/mongo/db/storage/backup_block.h index fd96b8e5e2e76..43537f226603d 100644 --- a/src/mongo/db/storage/backup_block.h +++ b/src/mongo/db/storage/backup_block.h @@ -30,6 +30,9 @@ #pragma once #include +#include +#include +#include #include #include "mongo/bson/timestamp.h" @@ -53,12 +56,10 @@ namespace mongo { */ class BackupBlock final { public: - using IdentToNamespaceAndUUIDMap = - stdx::unordered_map>; - explicit BackupBlock(OperationContext* opCtx, + boost::optional nss, + boost::optional uuid, std::string filePath, - const IdentToNamespaceAndUUIDMap& identToNamespaceAndUUIDMap, boost::optional checkpointTimestamp, std::uint64_t offset = 0, std::uint64_t length = 0, @@ -70,8 +71,8 @@ class BackupBlock final { return _filePath; } - std::string ns() const { - return _nss.toString(); + boost::optional ns() const { + return _nss; } std::uint64_t offset() const { @@ -96,27 +97,12 @@ class BackupBlock final { bool isRequired() const; private: - /** - * Sets '_nss' and '_uuid' that is representative of the ident at the checkpoint timestamp for: - * - collections - * - indexes, to the NSS/UUID of their respective collection - * - * The 'checkpointTimestamp' will be boost::none if the backup is being taken on a standalone - * node. - * A null opCtx is ignored. A null opCtx is exercised by FCBIS unit tests. - */ - void _initialize(OperationContext* opCtx, - const IdentToNamespaceAndUUIDMap& identToNamespaceAndUUIDMap, - boost::optional checkpointTimestamp); - void _setNamespaceString(const NamespaceString& nss); - const std::string _filePath; const std::uint64_t _offset; const std::uint64_t _length; const std::uint64_t _fileSize; - std::string _filenameStem; - NamespaceString _nss; + boost::optional _nss; boost::optional _uuid; }; } // namespace mongo diff --git a/src/mongo/db/storage/backup_cursor_hooks.cpp b/src/mongo/db/storage/backup_cursor_hooks.cpp index d656d26509a8d..918c135ca0f25 100644 --- a/src/mongo/db/storage/backup_cursor_hooks.cpp +++ b/src/mongo/db/storage/backup_cursor_hooks.cpp @@ -27,13 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/storage/backup_cursor_hooks.h" - -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/db/service_context.h" +#include "mongo/db/storage/backup_cursor_hooks.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/storage/backup_cursor_hooks.h b/src/mongo/db/storage/backup_cursor_hooks.h index bb7f5b712aadd..f58461b4779f2 100644 --- a/src/mongo/db/storage/backup_cursor_hooks.h +++ b/src/mongo/db/storage/backup_cursor_hooks.h @@ -29,10 +29,17 @@ #pragma once +#include #include +#include #include +#include "mongo/bson/timestamp.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/backup_cursor_state.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/util/uuid.h" namespace mongo { class OperationContext; diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.cpp b/src/mongo/db/storage/bson_collection_catalog_entry.cpp index fe5422a1001bb..977232cb0ed02 100644 --- a/src/mongo/db/storage/bson_collection_catalog_entry.cpp +++ b/src/mongo/db/storage/bson_collection_catalog_entry.cpp @@ -29,12 +29,27 @@ #include "mongo/db/storage/bson_collection_catalog_entry.h" +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include -#include - +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/field_ref.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/server_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -48,6 +63,9 @@ const size_t kMaxKeyPatternPathLength = 2048; const std::string kTimeseriesBucketsMayHaveMixedSchemaDataFieldName = "timeseriesBucketsMayHaveMixedSchemaData"; +const std::string kTimeseriesBucketingParametersHaveChanged = + "timeseriesBucketingParametersHaveChanged"; + /** * Encodes 'multikeyPaths' as binary data and appends it to 'bob'. * @@ -217,7 +235,7 @@ bool BSONCollectionCatalogEntry::MetaData::eraseIndex(StringData name) { BSONObj BSONCollectionCatalogEntry::MetaData::toBSON(bool hasExclusiveAccess) const { BSONObjBuilder b; - b.append("ns", nss.toStringWithTenantId()); + b.append("ns", NamespaceStringUtil::serializeForCatalog(nss)); b.append("options", options.toBSON()); { BSONArrayBuilder arr(b.subarrayStart("indexes")); @@ -261,11 +279,16 @@ BSONObj BSONCollectionCatalogEntry::MetaData::toBSON(bool hasExclusiveAccess) co *timeseriesBucketsMayHaveMixedSchemaData); } + if (timeseriesBucketingParametersHaveChanged) { + b.append(kTimeseriesBucketingParametersHaveChanged, + *timeseriesBucketingParametersHaveChanged); + } + return b.obj(); } void BSONCollectionCatalogEntry::MetaData::parse(const BSONObj& obj) { - nss = NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode( + nss = NamespaceStringUtil::parseFromStringExpectTenantIdInMultitenancyMode( obj.getStringField("ns").toString()); if (obj["options"].isABSONObj()) { @@ -310,5 +333,10 @@ void BSONCollectionCatalogEntry::MetaData::parse(const BSONObj& obj) { if (!timeseriesMixedSchemaElem.eoo() && timeseriesMixedSchemaElem.isBoolean()) { timeseriesBucketsMayHaveMixedSchemaData = timeseriesMixedSchemaElem.Bool(); } + + BSONElement tsBucketingParametersChangedElem = obj[kTimeseriesBucketingParametersHaveChanged]; + if (!tsBucketingParametersChangedElem.eoo() && tsBucketingParametersChangedElem.isBoolean()) { + timeseriesBucketingParametersHaveChanged = tsBucketingParametersChangedElem.Bool(); + } } } // namespace mongo diff --git a/src/mongo/db/storage/bson_collection_catalog_entry.h b/src/mongo/db/storage/bson_collection_catalog_entry.h index c613b03dc8875..805ba87bd2f37 100644 --- a/src/mongo/db/storage/bson_collection_catalog_entry.h +++ b/src/mongo/db/storage/bson_collection_catalog_entry.h @@ -29,11 +29,22 @@ #pragma once +#include +#include +#include #include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/index/multikey_paths.h" +#include "mongo/db/namespace_string.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -163,6 +174,12 @@ class BSONCollectionCatalogEntry { // up will have this flag set to false by default. This will be boost::none if this catalog // entry is not representing a time-series collection or if FCV < 5.2. boost::optional timeseriesBucketsMayHaveMixedSchemaData; + + // The flag will be set to false at the time of time-series collection creation. For any + // other collection type the flag will be boost::none. If a subsequent collMod operation + // changes either 'bucketRoundingSeconds' or 'bucketMaxSpanSeconds', we set the flag to + // true. + boost::optional timeseriesBucketingParametersHaveChanged; }; }; } // namespace mongo diff --git a/src/mongo/db/storage/capped_snapshots.cpp b/src/mongo/db/storage/capped_snapshots.cpp index dd7f974b91189..b1a526699cef8 100644 --- a/src/mongo/db/storage/capped_snapshots.cpp +++ b/src/mongo/db/storage/capped_snapshots.cpp @@ -28,6 +28,20 @@ */ #include "mongo/db/storage/capped_snapshots.h" + +#include +#include +#include +#include + +#include + +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" + namespace mongo { auto getCappedSnapshots = RecoveryUnit::Snapshot::declareDecoration(); diff --git a/src/mongo/db/storage/capped_snapshots.h b/src/mongo/db/storage/capped_snapshots.h index 141fee746aabf..732e65646add6 100644 --- a/src/mongo/db/storage/capped_snapshots.h +++ b/src/mongo/db/storage/capped_snapshots.h @@ -29,10 +29,15 @@ #pragma once -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" #include "mongo/db/catalog/capped_visibility.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/platform/basic.h" +#include "mongo/util/string_map.h" namespace mongo { class OperationContext; diff --git a/src/mongo/db/storage/checkpointer.cpp b/src/mongo/db/storage/checkpointer.cpp index 2595526973ed2..1b0a20cf3b3ac 100644 --- a/src/mongo/db/storage/checkpointer.cpp +++ b/src/mongo/db/storage/checkpointer.cpp @@ -28,16 +28,31 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/checkpointer.h" - +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" -#include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/checkpointer.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_proxy.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/concurrency/idle_thread_block.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -73,6 +88,11 @@ void Checkpointer::run() { ThreadClient tc(name(), getGlobalServiceContext()); LOGV2_DEBUG(22307, 1, "Starting thread", "threadName"_attr = name()); + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + while (true) { auto opCtx = tc->makeOperationContext(); @@ -82,6 +102,10 @@ void Checkpointer::run() { // Wait for 'storageGlobalParams.syncdelay' seconds; or until either shutdown is // signaled or a checkpoint is triggered. + LOGV2_DEBUG(7702900, + 1, + "Checkpoint thread sleeping", + "duration"_attr = static_cast(storageGlobalParams.syncdelay)); _sleepCV.wait_for( lock, stdx::chrono::seconds(static_cast(storageGlobalParams.syncdelay)), diff --git a/src/mongo/db/storage/checkpointer.h b/src/mongo/db/storage/checkpointer.h index 6db7d4f006543..f4bb002519752 100644 --- a/src/mongo/db/storage/checkpointer.h +++ b/src/mongo/db/storage/checkpointer.h @@ -29,6 +29,10 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/util/background.h" diff --git a/src/mongo/db/storage/collection_markers.cpp b/src/mongo/db/storage/collection_markers.cpp deleted file mode 100644 index debc696bc6039..0000000000000 --- a/src/mongo/db/storage/collection_markers.cpp +++ /dev/null @@ -1,580 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/storage/collection_markers.h" - -#include "mongo/db/operation_context.h" -#include "mongo/db/storage/storage_parameters_gen.h" -#include "mongo/logv2/log.h" -#include "mongo/util/concurrency/idle_thread_block.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage - -namespace mongo { - -// TODO SERVER-74250: Change to slowCollectionSamplingReads once 7.0 is released. -MONGO_FAIL_POINT_DEFINE(slowOplogSamplingReads); - -CollectionTruncateMarkers::CollectionTruncateMarkers(CollectionTruncateMarkers&& other) { - stdx::lock_guard lk(other._collectionMarkersReclaimMutex); - stdx::lock_guard lk2(other._markersMutex); - - _currentRecords.store(other._currentRecords.swap(0)); - _currentBytes.store(other._currentBytes.swap(0)); - _minBytesPerMarker = other._minBytesPerMarker; - _markers = std::move(other._markers); - _isDead = other._isDead; -} - -bool CollectionTruncateMarkers::isDead() { - stdx::lock_guard lk(_collectionMarkersReclaimMutex); - return _isDead; -} - -void CollectionTruncateMarkers::kill() { - stdx::lock_guard lk(_collectionMarkersReclaimMutex); - _isDead = true; - _reclaimCv.notify_one(); -} - - -void CollectionTruncateMarkers::awaitHasExcessMarkersOrDead(OperationContext* opCtx) { - // Wait until kill() is called or there are too many collection markers. - stdx::unique_lock lock(_collectionMarkersReclaimMutex); - while (!_isDead) { - { - MONGO_IDLE_THREAD_BLOCK; - stdx::lock_guard lk(_markersMutex); - if (_hasExcessMarkers(opCtx)) { - const auto& oldestMarker = _markers.front(); - invariant(oldestMarker.lastRecord.isValid()); - - LOGV2_DEBUG(7393215, - 2, - "Collection has excess markers", - "lastRecord"_attr = oldestMarker.lastRecord, - "wallTime"_attr = oldestMarker.wallTime); - return; - } - } - _reclaimCv.wait(lock); - } -} - -boost::optional -CollectionTruncateMarkers::peekOldestMarkerIfNeeded(OperationContext* opCtx) const { - stdx::lock_guard lk(_markersMutex); - - if (!_hasExcessMarkers(opCtx)) { - return {}; - } - - return _markers.front(); -} - - -void CollectionTruncateMarkers::popOldestMarker() { - stdx::lock_guard lk(_markersMutex); - _markers.pop_front(); -} - -CollectionTruncateMarkers::Marker& CollectionTruncateMarkers::createNewMarker( - const RecordId& lastRecord, Date_t wallTime) { - return _markers.emplace_back( - _currentRecords.swap(0), _currentBytes.swap(0), lastRecord, wallTime); -} - -void CollectionTruncateMarkers::createNewMarkerIfNeeded(OperationContext* opCtx, - const RecordId& lastRecord, - Date_t wallTime) { - auto logFailedLockAcquisition = [&](const std::string& lock) { - LOGV2_DEBUG(7393214, - 2, - "Failed to acquire lock to check if a new collection marker is needed", - "lock"_attr = lock); - }; - - // Try to lock both mutexes, if we fail to lock a mutex then someone else is either already - // creating a new marker or popping the oldest one. In the latter case, we let the next insert - // trigger the new marker's creation. - stdx::unique_lock reclaimLk(_collectionMarkersReclaimMutex, stdx::try_to_lock); - if (!reclaimLk) { - logFailedLockAcquisition("_collectionMarkersReclaimMutex"); - return; - } - - stdx::unique_lock lk(_markersMutex, stdx::try_to_lock); - if (!lk) { - logFailedLockAcquisition("_markersMutex"); - return; - } - - if (_currentBytes.load() < _minBytesPerMarker) { - // Must have raced to create a new marker, someone else already triggered it. - return; - } - - if (!_markers.empty() && lastRecord < _markers.back().lastRecord) { - // Skip creating a new marker when the record's position comes before the most recently - // created marker. We likely raced with another batch of inserts that caused us to try and - // make multiples markers. - return; - } - - auto& marker = createNewMarker(lastRecord, wallTime); - - LOGV2_DEBUG(7393213, - 2, - "Created a new collection marker", - "lastRecord"_attr = marker.lastRecord, - "wallTime"_attr = marker.wallTime, - "numMarkers"_attr = _markers.size()); - - pokeReclaimThread(opCtx); -} - - -void CollectionTruncateMarkers::updateCurrentMarkerAfterInsertOnCommit( - OperationContext* opCtx, - int64_t bytesInserted, - const RecordId& highestInsertedRecordId, - Date_t wallTime, - int64_t countInserted) { - opCtx->recoveryUnit()->onCommit([collectionMarkers = this, - bytesInserted, - recordId = highestInsertedRecordId, - wallTime, - countInserted](OperationContext* opCtx, auto) { - invariant(bytesInserted >= 0); - invariant(recordId.isValid()); - - collectionMarkers->_currentRecords.addAndFetch(countInserted); - int64_t newCurrentBytes = collectionMarkers->_currentBytes.addAndFetch(bytesInserted); - if (wallTime != Date_t() && newCurrentBytes >= collectionMarkers->_minBytesPerMarker) { - // When other transactions commit concurrently, an uninitialized wallTime may delay - // the creation of a new marker. This delay is limited to the number of concurrently - // running transactions, so the size difference should be inconsequential. - collectionMarkers->createNewMarkerIfNeeded(opCtx, recordId, wallTime); - } - }); -} - -void CollectionTruncateMarkers::clearMarkersOnCommit(OperationContext* opCtx) { - opCtx->recoveryUnit()->onCommit([this](OperationContext*, boost::optional) { - stdx::lock_guard lk(_markersMutex); - - _currentRecords.store(0); - _currentBytes.store(0); - _markers.clear(); - }); -} - -void CollectionTruncateMarkers::updateMarkersAfterCappedTruncateAfter( - int64_t recordsRemoved, int64_t bytesRemoved, const RecordId& firstRemovedId) { - stdx::lock_guard lk(_markersMutex); - - int64_t numMarkersToRemove = 0; - int64_t recordsInMarkersToRemove = 0; - int64_t bytesInMarkersToRemove = 0; - - // Compute the number and associated sizes of the records from markers that are either fully or - // partially truncated. - for (auto it = _markers.rbegin(); it != _markers.rend(); ++it) { - if (it->lastRecord < firstRemovedId) { - break; - } - numMarkersToRemove++; - recordsInMarkersToRemove += it->records; - bytesInMarkersToRemove += it->bytes; - } - - // Remove the markers corresponding to the records that were deleted. - int64_t offset = _markers.size() - numMarkersToRemove; - _markers.erase(_markers.begin() + offset, _markers.end()); - - // Account for any remaining records from a partially truncated marker in the marker currently - // being filled. - _currentRecords.addAndFetch(recordsInMarkersToRemove - recordsRemoved); - _currentBytes.addAndFetch(bytesInMarkersToRemove - bytesRemoved); -} - -void CollectionTruncateMarkers::setMinBytesPerMarker(int64_t size) { - invariant(size > 0); - - stdx::lock_guard lk(_markersMutex); - - _minBytesPerMarker = size; -} - - -void CollectionTruncateMarkers::pokeReclaimThread(OperationContext* opCtx) { - _reclaimCv.notify_one(); -} - -CollectionTruncateMarkers::InitialSetOfMarkers CollectionTruncateMarkers::createMarkersByScanning( - OperationContext* opCtx, - RecordStore* rs, - const NamespaceString& ns, - int64_t minBytesPerMarker, - std::function getRecordIdAndWallTime) { - auto startTime = curTimeMicros64(); - LOGV2_INFO(7393212, - "Scanning collection to determine where to place markers for truncation", - "namespace"_attr = ns); - - int64_t numRecords = 0; - int64_t dataSize = 0; - - auto cursor = rs->getCursor(opCtx, true); - - int64_t currentRecords = 0; - int64_t currentBytes = 0; - - std::deque markers; - - while (auto record = cursor->next()) { - currentRecords++; - currentBytes += record->data.size(); - if (currentBytes >= minBytesPerMarker) { - auto [rId, wallTime] = getRecordIdAndWallTime(*record); - - LOGV2_DEBUG(7393211, - 1, - "Marking entry as a potential future truncation point", - "wall"_attr = wallTime); - - markers.emplace_back( - std::exchange(currentRecords, 0), std::exchange(currentBytes, 0), rId, wallTime); - } - - numRecords++; - dataSize += record->data.size(); - } - - rs->updateStatsAfterRepair(opCtx, numRecords, dataSize); - auto endTime = curTimeMicros64(); - return CollectionTruncateMarkers::InitialSetOfMarkers{ - std::move(markers), - currentRecords, - currentBytes, - Microseconds{static_cast(endTime - startTime)}, - MarkersCreationMethod::Scanning}; -} - - -CollectionTruncateMarkers::InitialSetOfMarkers CollectionTruncateMarkers::createMarkersBySampling( - OperationContext* opCtx, - RecordStore* rs, - const NamespaceString& ns, - int64_t estimatedRecordsPerMarker, - int64_t estimatedBytesPerMarker, - std::function getRecordIdAndWallTime) { - auto startTime = curTimeMicros64(); - - LOGV2_INFO(7393210, - "Sampling the collection to determine where to place markers for truncation", - "namespace"_attr = ns); - RecordId earliestRecordId, latestRecordId; - - { - const bool forward = true; - auto cursor = rs->getCursor(opCtx, forward); - auto record = cursor->next(); - if (!record) { - // This shouldn't really happen unless the size storer values are far off from reality. - // The collection is probably empty, but fall back to scanning the collection just in - // case. - LOGV2(7393209, - "Failed to determine the earliest recordId, falling back to scanning the " - "collection", - "namespace"_attr = ns); - return CollectionTruncateMarkers::createMarkersByScanning( - opCtx, rs, ns, estimatedBytesPerMarker, std::move(getRecordIdAndWallTime)); - } - earliestRecordId = record->id; - } - - { - const bool forward = false; - auto cursor = rs->getCursor(opCtx, forward); - auto record = cursor->next(); - if (!record) { - // This shouldn't really happen unless the size storer values are far off from reality. - // The collection is probably empty, but fall back to scanning the collection just in - // case. - LOGV2( - 7393208, - "Failed to determine the latest recordId, falling back to scanning the collection", - "namespace"_attr = ns); - return CollectionTruncateMarkers::createMarkersByScanning( - opCtx, rs, ns, estimatedBytesPerMarker, std::move(getRecordIdAndWallTime)); - } - latestRecordId = record->id; - } - - LOGV2(7393207, - "Sampling from the collection to determine where to place markers for truncation", - "namespace"_attr = ns, - "from"_attr = earliestRecordId, - "to"_attr = latestRecordId); - - int64_t wholeMarkers = rs->numRecords(opCtx) / estimatedRecordsPerMarker; - // We don't use the wholeMarkers variable here due to integer division not being associative. - // For example, 10 * (47500 / 28700) = 10, but (10 * 47500) / 28700 = 16. - int64_t numSamples = - (kRandomSamplesPerMarker * rs->numRecords(opCtx)) / estimatedRecordsPerMarker; - - LOGV2(7393216, - "Taking samples and assuming each collection section contains equal amounts", - "namespace"_attr = ns, - "numSamples"_attr = numSamples, - "containsNumRecords"_attr = estimatedRecordsPerMarker, - "containsNumBytes"_attr = estimatedBytesPerMarker); - - // Divide the collection into 'wholeMarkers' logical sections, with each section containing - // approximately 'estimatedRecordsPerMarker'. Do so by oversampling the collection, sorting the - // samples in order of their RecordId, and then choosing the samples expected to be near the - // right edge of each logical section. - auto cursor = rs->getRandomCursor(opCtx); - std::vector collectionEstimates; - Timer lastProgressTimer; - for (int i = 0; i < numSamples; ++i) { - auto samplingLogIntervalSeconds = gCollectionSamplingLogIntervalSeconds.load(); - slowOplogSamplingReads.execute( - [&](const BSONObj& dataObj) { sleepsecs(dataObj["delay"].numberInt()); }); - auto record = cursor->next(); - if (!record) { - // This shouldn't really happen unless the size storer values are far off from reality. - // The collection is probably empty, but fall back to scanning the collection just in - // case. - LOGV2(7393206, - "Failed to get enough random samples, falling back to scanning the collection", - "namespace"_attr = ns); - return CollectionTruncateMarkers::createMarkersByScanning( - opCtx, rs, ns, estimatedBytesPerMarker, std::move(getRecordIdAndWallTime)); - } - - collectionEstimates.emplace_back(getRecordIdAndWallTime(*record)); - - if (samplingLogIntervalSeconds > 0 && - lastProgressTimer.elapsed() >= Seconds(samplingLogIntervalSeconds)) { - LOGV2(7393217, - "Collection sampling progress", - "namespace"_attr = ns, - "completed"_attr = (i + 1), - "total"_attr = numSamples); - lastProgressTimer.reset(); - } - } - std::sort( - collectionEstimates.begin(), - collectionEstimates.end(), - [](const RecordIdAndWallTime& a, const RecordIdAndWallTime& b) { return a.id < b.id; }); - LOGV2(7393205, "Collection sampling complete", "namespace"_attr = ns); - - std::deque markers; - for (int i = 1; i <= wholeMarkers; ++i) { - // Use every (kRandomSamplesPerMarker)th sample, starting with the - // (kRandomSamplesPerMarker - 1)th, as the last record for each marker. - // If parsing "wall" fails, we crash to allow user to fix their collection. - const auto& [id, wallTime] = collectionEstimates[kRandomSamplesPerMarker * i - 1]; - - LOGV2_DEBUG(7393204, - 1, - "Marking entry as a potential future truncation point", - "namespace"_attr = ns, - "wall"_attr = wallTime, - "ts"_attr = id); - - markers.emplace_back(estimatedRecordsPerMarker, estimatedBytesPerMarker, id, wallTime); - } - - // Account for the partially filled chunk. - auto currentRecords = rs->numRecords(opCtx) - estimatedRecordsPerMarker * wholeMarkers; - auto currentBytes = rs->dataSize(opCtx) - estimatedBytesPerMarker * wholeMarkers; - return CollectionTruncateMarkers::InitialSetOfMarkers{ - std::move(markers), - currentRecords, - currentBytes, - Microseconds{static_cast(curTimeMicros64() - startTime)}, - MarkersCreationMethod::Sampling}; -} - -CollectionTruncateMarkers::InitialSetOfMarkers -CollectionTruncateMarkers::createFromExistingRecordStore( - OperationContext* opCtx, - RecordStore* rs, - const NamespaceString& ns, - int64_t minBytesPerMarker, - std::function getRecordIdAndWallTime, - boost::optional numberOfMarkersToKeepLegacy) { - - long long numRecords = rs->numRecords(opCtx); - long long dataSize = rs->dataSize(opCtx); - - LOGV2(7393203, - "The size storer reports that the collection contains", - "numRecords"_attr = numRecords, - "dataSize"_attr = dataSize); - - // Don't calculate markers if this is a new collection. This is to prevent standalones from - // attempting to get a forward scanning cursor on an explicit create of the collection. These - // values can be wrong. The assumption is that if they are both observed to be zero, there must - // be very little data in the collection; the cost of being wrong is imperceptible. - if (numRecords == 0 && dataSize == 0) { - return CollectionTruncateMarkers::InitialSetOfMarkers{ - {}, 0, 0, Microseconds{0}, MarkersCreationMethod::EmptyCollection}; - } - - // Only use sampling to estimate where to place the collection markers if the number of samples - // drawn is less than 5% of the collection. - const uint64_t kMinSampleRatioForRandCursor = 20; - - // If the collection doesn't contain enough records to make sampling more efficient, then scan - // the collection to determine where to put down markers. - // - // Unless preserving legacy behavior, compute the number of markers which would be generated - // based on the estimated data size. - auto numMarkers = numberOfMarkersToKeepLegacy ? numberOfMarkersToKeepLegacy.get() - : dataSize / minBytesPerMarker; - if (numRecords <= 0 || dataSize <= 0 || - uint64_t(numRecords) < - kMinSampleRatioForRandCursor * kRandomSamplesPerMarker * numMarkers) { - return CollectionTruncateMarkers::createMarkersByScanning( - opCtx, rs, ns, minBytesPerMarker, std::move(getRecordIdAndWallTime)); - } - - // Use the collection's average record size to estimate the number of records in each marker, - // and thus estimate the combined size of the records. - double avgRecordSize = double(dataSize) / double(numRecords); - double estimatedRecordsPerMarker = std::ceil(minBytesPerMarker / avgRecordSize); - double estimatedBytesPerMarker = estimatedRecordsPerMarker * avgRecordSize; - - return CollectionTruncateMarkers::createMarkersBySampling(opCtx, - rs, - ns, - (int64_t)estimatedRecordsPerMarker, - (int64_t)estimatedBytesPerMarker, - std::move(getRecordIdAndWallTime)); -} - -CollectionTruncateMarkersWithPartialExpiration::CollectionTruncateMarkersWithPartialExpiration( - CollectionTruncateMarkersWithPartialExpiration&& other) - : CollectionTruncateMarkers(std::move(other)) { - stdx::lock_guard lk3(other._lastHighestRecordMutex); - _lastHighestRecordId = std::exchange(other._lastHighestRecordId, RecordId()); - _lastHighestWallTime = std::exchange(other._lastHighestWallTime, Date_t()); -} - -void CollectionTruncateMarkersWithPartialExpiration::updateCurrentMarkerAfterInsertOnCommit( - OperationContext* opCtx, - int64_t bytesInserted, - const RecordId& highestInsertedRecordId, - Date_t wallTime, - int64_t countInserted) { - opCtx->recoveryUnit()->onCommit([collectionMarkers = this, - bytesInserted, - recordId = highestInsertedRecordId, - wallTime, - countInserted](OperationContext* opCtx, auto) { - invariant(bytesInserted >= 0); - invariant(recordId.isValid()); - - // By putting the highest marker modification first we can guarantee than in the - // event of a race condition between expiring a partial marker the metrics increase - // will happen after the marker has been created. This guarantees that the metrics - // will eventually be correct as long as the expiration criteria checks for the - // metrics and the highest marker expiration. - collectionMarkers->_replaceNewHighestMarkingIfNecessary(recordId, wallTime); - collectionMarkers->_currentRecords.addAndFetch(countInserted); - int64_t newCurrentBytes = collectionMarkers->_currentBytes.addAndFetch(bytesInserted); - if (wallTime != Date_t() && newCurrentBytes >= collectionMarkers->_minBytesPerMarker) { - // When other transactions commit concurrently, an uninitialized wallTime may delay - // the creation of a new marker. This delay is limited to the number of concurrently - // running transactions, so the size difference should be inconsequential. - collectionMarkers->createNewMarkerIfNeeded(opCtx, recordId, wallTime); - } - }); -} - -void CollectionTruncateMarkersWithPartialExpiration::createPartialMarkerIfNecessary( - OperationContext* opCtx) { - auto logFailedLockAcquisition = [&](const std::string& lock) { - LOGV2_DEBUG(7393202, - 2, - "Failed to acquire lock to check if a new partial collection marker is needed", - "lock"_attr = lock); - }; - - // Try to lock all mutexes, if we fail to lock a mutex then someone else is either already - // creating a new marker or popping the oldest one. In the latter case, we let the next check - // trigger the new partial marker's creation. - stdx::unique_lock reclaimLk(_collectionMarkersReclaimMutex, stdx::try_to_lock); - if (!reclaimLk) { - logFailedLockAcquisition("_collectionMarkersReclaimMutex"); - return; - } - - stdx::unique_lock lk(_markersMutex, stdx::try_to_lock); - if (!lk) { - logFailedLockAcquisition("_markersMutex"); - return; - } - - stdx::unique_lock markerLk(_lastHighestRecordMutex, stdx::try_to_lock); - if (!markerLk) { - logFailedLockAcquisition("_lastHighestRecordMutex"); - return; - } - - if (_currentBytes.load() == 0 && _currentRecords.load() == 0) { - // Nothing can be used for a marker. Early exit now. - return; - } - - if (_hasPartialMarkerExpired(opCtx)) { - auto& marker = createNewMarker(_lastHighestRecordId, _lastHighestWallTime); - - LOGV2_DEBUG(7393201, - 2, - "Created a new partial collection marker", - "lastRecord"_attr = marker.lastRecord, - "wallTime"_attr = marker.wallTime, - "numMarkers"_attr = _markers.size()); - pokeReclaimThread(opCtx); - } -} - -void CollectionTruncateMarkersWithPartialExpiration::_replaceNewHighestMarkingIfNecessary( - const RecordId& rId, Date_t wallTime) { - stdx::unique_lock lk(_lastHighestRecordMutex); - _lastHighestRecordId = std::max(_lastHighestRecordId, rId); - _lastHighestWallTime = std::max(_lastHighestWallTime, wallTime); -} -} // namespace mongo diff --git a/src/mongo/db/storage/collection_markers.h b/src/mongo/db/storage/collection_markers.h deleted file mode 100644 index 05ce25909186c..0000000000000 --- a/src/mongo/db/storage/collection_markers.h +++ /dev/null @@ -1,306 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include - -#include "mongo/db/record_id.h" -#include "mongo/db/storage/record_store.h" -#include "mongo/platform/atomic_word.h" -#include "mongo/platform/mutex.h" -#include "mongo/stdx/condition_variable.h" - -namespace mongo { - -class OperationContext; - - -// Keep "markers" against a collection to efficiently remove ranges of old records when the -// collection grows. This class is meant to be used only with collections that have the following -// requirements: -// * The collection is an insert-only collection -// * The collection has no indexes -// * If a record with RecordId=Max has to be deleted then all previous records with RecordId D such -// that Min < D <= Max should be deleted. With RecordID=Min defining a lower boundary. -// -// If these requirements hold then this class can be used to compute and maintain up-to-date markers -// for ranges of deletions. These markers will be expired and returned to the deleter whenever the -// implementation defined '_hasExcessMarkers' returns true. -class CollectionTruncateMarkers { -public: - /** Markers represent "waypoints" of the collection that contain information between the current - * marker and the previous one. - * - * Markers are created by the class automatically whenever there are more than X number of bytes - * between the previous marker and the latest insertion. - * - * 'partial marker' - * |___________________|......|____________________|______ - * Oldest Marker Newest Marker - * Min rid <-------------------------------------------------<------- Max rid - * - * A 'Marker' is not created until it is full or its creation is requested by a caller. A - * 'partial marker' is not of type 'Marker', but rather metadata counting incoming records and - * bytes until it can be used to construct a 'Marker'. - * - * Marker - * |__________________| - * lastRecord - */ - struct Marker { - int64_t records; // Approximate number of records between the current marker and the - // previous marker. - int64_t bytes; // Approximate size of records between the current marker and the - // previous marker. - RecordId lastRecord; // RecordId of the record that created this marker. - Date_t wallTime; // Walltime of the record that created this marker. - - Marker(int64_t records, int64_t bytes, RecordId lastRecord, Date_t wallTime) - : records(records), - bytes(bytes), - lastRecord(std::move(lastRecord)), - wallTime(wallTime) {} - }; - - - CollectionTruncateMarkers(std::deque markers, - int64_t leftoverRecordsCount, - int64_t leftoverRecordsBytes, - int64_t minBytesPerMarker) - : _minBytesPerMarker(minBytesPerMarker), - _currentRecords(leftoverRecordsCount), - _currentBytes(leftoverRecordsBytes), - _markers(std::move(markers)) {} - - /** - * Whether the instance is going to get destroyed. - */ - bool isDead(); - - /** - * Mark this instance as serving a non-existent RecordStore. This is the case if either the - * RecordStore has been deleted or we're shutting down. Doing this will mark the instance as - * ready for destruction. - */ - void kill(); - - void awaitHasExcessMarkersOrDead(OperationContext* opCtx); - - boost::optional peekOldestMarkerIfNeeded(OperationContext* opCtx) const; - - void popOldestMarker(); - - void createNewMarkerIfNeeded(OperationContext* opCtx, - const RecordId& lastRecord, - Date_t wallTime); - - // Updates the current marker with the inserted value if the operation commits the WUOW. - virtual void updateCurrentMarkerAfterInsertOnCommit(OperationContext* opCtx, - int64_t bytesInserted, - const RecordId& highestInsertedRecordId, - Date_t wallTime, - int64_t countInserted); - - // Clears all the markers of the instance whenever the current WUOW commits. - void clearMarkersOnCommit(OperationContext* opCtx); - - // Updates the metadata about the collection markers after a rollback occurs. - void updateMarkersAfterCappedTruncateAfter(int64_t recordsRemoved, - int64_t bytesRemoved, - const RecordId& firstRemovedId); - - // The method used for creating the initial set of markers. - enum class MarkersCreationMethod { EmptyCollection, Scanning, Sampling }; - // The initial set of markers to use when constructing the CollectionMarkers object. - struct InitialSetOfMarkers { - std::deque markers; - int64_t leftoverRecordsCount; - int64_t leftoverRecordsBytes; - Microseconds timeTaken; - MarkersCreationMethod methodUsed; - }; - struct RecordIdAndWallTime { - RecordId id; - Date_t wall; - - RecordIdAndWallTime(RecordId lastRecord, Date_t wallTime) - : id(std::move(lastRecord)), wall(std::move(wallTime)) {} - }; - - // Creates the initial set of markers. This will decide whether to perform a collection scan or - // sampling based on the size of the collection. - // - // 'numberOfMarkersToKeepLegacy' exists solely to maintain legacy behavior of - // 'OplogTruncateMarkers' previously known as 'OplogStones'. It serves as the maximum number of - // truncate markers to keep before reclaiming the oldest truncate markers. - static InitialSetOfMarkers createFromExistingRecordStore( - OperationContext* opCtx, - RecordStore* rs, - const NamespaceString& ns, - int64_t minBytesPerMarker, - std::function getRecordIdAndWallTime, - boost::optional numberOfMarkersToKeepLegacy = boost::none); - - // Creates the initial set of markers by fully scanning the collection. The set of markers - // returned will have correct metrics. - static InitialSetOfMarkers createMarkersByScanning( - OperationContext* opCtx, - RecordStore* rs, - const NamespaceString& ns, - int64_t minBytesPerMarker, - std::function getRecordIdAndWallTime); - - // Creates the initial set of markers by sampling the collection. The set of markers - // returned will have approximate metrics. The metrics of each marker will be equal and contain - // the collection's size and record count divided by the number of markers. - static InitialSetOfMarkers createMarkersBySampling( - OperationContext* opCtx, - RecordStore* rs, - const NamespaceString& ns, - int64_t estimatedRecordsPerMarker, - int64_t estimatedBytesPerMarker, - std::function getRecordIdAndWallTime); - - void setMinBytesPerMarker(int64_t size); - - // - // The following methods are public only for use in tests. - // - - size_t numMarkers() const { - stdx::lock_guard lk(_markersMutex); - return _markers.size(); - } - - int64_t currentBytes() const { - return _currentBytes.load(); - } - - int64_t currentRecords() const { - return _currentRecords.load(); - } - -private: - friend class CollectionTruncateMarkersWithPartialExpiration; - - // Used to decide whether the oldest marker has expired. Implementations are free to use - // whichever process they want to discern if there are expired markers. - // This method will get called holding the _collectionMarkersReclaimMutex and _markersMutex. - virtual bool _hasExcessMarkers(OperationContext* opCtx) const = 0; - - static constexpr uint64_t kRandomSamplesPerMarker = 10; - - Mutex _collectionMarkersReclaimMutex = - MONGO_MAKE_LATCH("CollectionTruncateMarkers::_collectionMarkersReclaimMutex"); - stdx::condition_variable _reclaimCv; - - // True if '_rs' has been destroyed, e.g. due to repairDatabase being called on the collection's - // database, and false otherwise. - bool _isDead = false; - - // Minimum number of bytes the marker being filled should contain before it gets added to the - // deque of collection markers. - int64_t _minBytesPerMarker; - - AtomicWord _currentRecords; // Number of records in the marker being filled. - AtomicWord _currentBytes; // Number of bytes in the marker being filled. - - // Protects against concurrent access to the deque of collection markers. - mutable Mutex _markersMutex = MONGO_MAKE_LATCH("CollectionTruncateMarkers::_markersMutex"); - std::deque _markers; // front = oldest, back = newest. - -protected: - CollectionTruncateMarkers(CollectionTruncateMarkers&& other); - - const std::deque& getMarkers() const { - return _markers; - } - - void pokeReclaimThread(OperationContext* opCtx); - - Marker& createNewMarker(const RecordId& lastRecord, Date_t wallTime); -}; - -/** - * An extension of CollectionTruncateMarkers that provides support for creating "partial markers". - * - * Partial markers are normal markers that can be requested by the user calling - * CollectionTruncateMarkersWithPartialExpiration::createPartialMarkerIfNecessary. The - * implementation will then consider whether the current data awaiting a marker should be deleted - * according to some internal logic. This is useful in time-based expiration systems as there could - * be low activity collections containing data that should be expired but won't because there is no - * marker. - */ -class CollectionTruncateMarkersWithPartialExpiration : public CollectionTruncateMarkers { -public: - CollectionTruncateMarkersWithPartialExpiration(std::deque markers, - int64_t leftoverRecordsCount, - int64_t leftoverRecordsBytes, - int64_t minBytesPerMarker) - : CollectionTruncateMarkers( - std::move(markers), leftoverRecordsCount, leftoverRecordsBytes, minBytesPerMarker) {} - - // Creates a partially filled marker if necessary. The criteria used is whether there is data in - // the partial marker and whether the implementation's '_hasPartialMarkerExpired' returns true. - void createPartialMarkerIfNecessary(OperationContext* opCtx); - - virtual void updateCurrentMarkerAfterInsertOnCommit(OperationContext* opCtx, - int64_t bytesInserted, - const RecordId& highestInsertedRecordId, - Date_t wallTime, - int64_t countInserted) final; - -private: - // Highest marker seen during the lifetime of the class. Modifications must happen - // while holding '_lastHighestRecordMutex'. - mutable Mutex _lastHighestRecordMutex = - MONGO_MAKE_LATCH("CollectionTruncateMarkersWithPartialExpiration::_lastHighestRecordMutex"); - RecordId _lastHighestRecordId; - Date_t _lastHighestWallTime; - - // Replaces the highest marker if _isMarkerLargerThanHighest returns true. - void _replaceNewHighestMarkingIfNecessary(const RecordId& newMarkerRecordId, - Date_t newMarkerWallTime); - - // Used to decide if the current partially built marker has expired. - virtual bool _hasPartialMarkerExpired(OperationContext* opCtx) const { - return false; - } - -protected: - CollectionTruncateMarkersWithPartialExpiration( - CollectionTruncateMarkersWithPartialExpiration&& other); - - std::pair getPartialMarker() const { - return {_lastHighestRecordId, _lastHighestWallTime}; - } -}; - -} // namespace mongo diff --git a/src/mongo/db/storage/collection_markers_test.cpp b/src/mongo/db/storage/collection_markers_test.cpp deleted file mode 100644 index cd012d18865e0..0000000000000 --- a/src/mongo/db/storage/collection_markers_test.cpp +++ /dev/null @@ -1,441 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/storage/collection_markers.h" -#include "mongo/db/storage/storage_engine_test_fixture.h" -#include "mongo/unittest/unittest.h" - -namespace mongo { - -class CollectionMarkersTest : public StorageEngineTest { -public: - explicit CollectionMarkersTest(Options options = {}) : StorageEngineTest(std::move(options)) {} - - struct RecordIdAndWall { - RecordId recordId; - Date_t wallTime; - }; - std::vector insertElementsWithCollectionMarkerUpdate( - OperationContext* opCtx, - const NamespaceString& nss, - CollectionTruncateMarkers& testMarkers, - int numElements, - int dataLength) { - std::vector records; - AutoGetCollection coll(opCtx, nss, MODE_IX); - const auto insertedData = std::string(dataLength, 'a'); - WriteUnitOfWork wuow(opCtx); - for (int i = 0; i < numElements; i++) { - auto now = Date_t::now(); - auto ts = Timestamp(now); - auto recordIdStatus = coll.getCollection()->getRecordStore()->insertRecord( - opCtx, insertedData.data(), insertedData.length(), ts); - ASSERT_OK(recordIdStatus); - auto recordId = recordIdStatus.getValue(); - testMarkers.updateCurrentMarkerAfterInsertOnCommit( - opCtx, insertedData.length(), recordId, now, 1); - records.push_back(RecordIdAndWall{std::move(recordId), std::move(now)}); - } - wuow.commit(); - return records; - } - - RecordIdAndWall insertElementWithCollectionMarkerUpdate(OperationContext* opCtx, - const NamespaceString& nss, - CollectionTruncateMarkers& testMarkers, - int dataLength) { - auto records = - insertElementsWithCollectionMarkerUpdate(opCtx, nss, testMarkers, 1, dataLength); - return records.front(); - } - - RecordId insertWithSpecificTimestampAndRecordId(OperationContext* opCtx, - const NamespaceString& nss, - CollectionTruncateMarkers& testMarkers, - int dataLength, - Timestamp timestampToUse, - const RecordId& recordId) { - AutoGetCollection coll(opCtx, nss, MODE_IX); - const auto insertedData = std::string(dataLength, 'a'); - WriteUnitOfWork wuow(opCtx); - auto recordIdStatus = coll.getCollection()->getRecordStore()->insertRecord( - opCtx, recordId, insertedData.data(), insertedData.length(), timestampToUse); - ASSERT_OK(recordIdStatus); - ASSERT_EQ(recordIdStatus.getValue(), recordId); - auto now = Date_t::fromMillisSinceEpoch(timestampToUse.asInt64()); - testMarkers.updateCurrentMarkerAfterInsertOnCommit( - opCtx, insertedData.length(), recordId, now, 1); - wuow.commit(); - return recordId; - } - - void insertElements(OperationContext* opCtx, - const NamespaceString& nss, - int dataLength, - int numElements, - Timestamp timestampToUse) { - AutoGetCollection coll(opCtx, nss, MODE_IX); - const auto insertedData = std::string(dataLength, 'a'); - WriteUnitOfWork wuow(opCtx); - for (int i = 0; i < numElements; i++) { - auto recordIdStatus = coll.getCollection()->getRecordStore()->insertRecord( - opCtx, insertedData.data(), insertedData.length(), timestampToUse); - ASSERT_OK(recordIdStatus); - } - wuow.commit(); - } -}; -class TestCollectionMarkersWithPartialExpiration final - : public CollectionTruncateMarkersWithPartialExpiration { -public: - TestCollectionMarkersWithPartialExpiration(int64_t leftoverRecordsCount, - int64_t leftoverRecordsBytes, - int64_t minBytesPerMarker) - : CollectionTruncateMarkersWithPartialExpiration( - {}, leftoverRecordsCount, leftoverRecordsBytes, minBytesPerMarker){}; - - TestCollectionMarkersWithPartialExpiration(std::deque markers, - int64_t leftoverRecordsCount, - int64_t leftoverRecordsBytes, - int64_t minBytesPerMarker) - : CollectionTruncateMarkersWithPartialExpiration( - std::move(markers), leftoverRecordsCount, leftoverRecordsBytes, minBytesPerMarker){}; - - void setExpirePartialMarker(bool value) { - _expirePartialMarker = value; - } - -private: - bool _expirePartialMarker = false; - - virtual bool _hasExcessMarkers(OperationContext* opCtx) const override { - return !getMarkers().empty(); - } - - virtual bool _hasPartialMarkerExpired(OperationContext* opCtx) const override { - return _expirePartialMarker; - } -}; - -class TestCollectionMarkers final : public CollectionTruncateMarkers { -public: - TestCollectionMarkers(int64_t leftoverRecordsCount, - int64_t leftoverRecordsBytes, - int64_t minBytesPerMarker) - : CollectionTruncateMarkers( - {}, leftoverRecordsCount, leftoverRecordsBytes, minBytesPerMarker){}; - - TestCollectionMarkers(std::deque markers, - int64_t leftoverRecordsCount, - int64_t leftoverRecordsBytes, - int64_t minBytesPerMarker) - : CollectionTruncateMarkers( - std::move(markers), leftoverRecordsCount, leftoverRecordsBytes, minBytesPerMarker){}; - -private: - virtual bool _hasExcessMarkers(OperationContext* opCtx) const override { - return !getMarkers().empty(); - } -}; - -template -void normalTest(CollectionMarkersTest* fixture, std::string collectionName) { - T testMarkers(0, 0, 0); - - auto opCtx = fixture->getClient()->makeOperationContext(); - - auto collNs = NamespaceString("test", collectionName); - ASSERT_OK(fixture->createCollection(opCtx.get(), collNs)); - - static constexpr auto dataLength = 4; - auto [insertedRecordId, now] = fixture->insertElementWithCollectionMarkerUpdate( - opCtx.get(), collNs, testMarkers, dataLength); - - auto marker = testMarkers.peekOldestMarkerIfNeeded(opCtx.get()); - ASSERT_TRUE(marker); - ASSERT_EQ(marker->lastRecord, insertedRecordId); - ASSERT_EQ(marker->bytes, dataLength); - ASSERT_EQ(marker->wallTime, now); - ASSERT_EQ(marker->records, 1); - - testMarkers.popOldestMarker(); - - ASSERT_FALSE(testMarkers.peekOldestMarkerIfNeeded(opCtx.get())); -}; - -TEST_F(CollectionMarkersTest, NormalUsage) { - normalTest(this, "coll"); - normalTest(this, "partial_coll"); -} - -TEST_F(CollectionMarkersTest, NormalCollectionPartialMarkerUsage) { - TestCollectionMarkersWithPartialExpiration testMarkers(0, 0, 100); - - auto opCtx = getClient()->makeOperationContext(); - - auto collNs = NamespaceString("test", "coll"); - ASSERT_OK(createCollection(opCtx.get(), collNs)); - - static constexpr auto dataLength = 4; - auto [insertedRecordId, now] = - insertElementWithCollectionMarkerUpdate(opCtx.get(), collNs, testMarkers, dataLength); - - ASSERT_FALSE(testMarkers.peekOldestMarkerIfNeeded(opCtx.get())); - - testMarkers.setExpirePartialMarker(false); - testMarkers.createPartialMarkerIfNecessary(opCtx.get()); - ASSERT_FALSE(testMarkers.peekOldestMarkerIfNeeded(opCtx.get())); - - testMarkers.setExpirePartialMarker(true); - testMarkers.createPartialMarkerIfNecessary(opCtx.get()); - auto marker = testMarkers.peekOldestMarkerIfNeeded(opCtx.get()); - ASSERT_TRUE(marker); - - ASSERT_EQ(marker->lastRecord, insertedRecordId); - ASSERT_EQ(marker->bytes, dataLength); - ASSERT_EQ(marker->wallTime, now); - ASSERT_EQ(marker->records, 1); -} - -// Insert records into a collection and verify the number of markers that are created. -template -void createNewMarkerTest(CollectionMarkersTest* fixture, std::string collectionName) { - T testMarkers(0, 0, 100); - - auto collNs = NamespaceString("test", collectionName); - { - auto opCtx = fixture->getClient()->makeOperationContext(); - ASSERT_OK(fixture->createCollection(opCtx.get(), collNs)); - } - - { - auto opCtx = fixture->getClient()->makeOperationContext(); - - ASSERT_EQ(0U, testMarkers.numMarkers()); - - // Inserting a record smaller than 'minBytesPerMarker' shouldn't create a new collection - // marker. - auto insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( - opCtx.get(), collNs, testMarkers, 99, Timestamp(1, 1), RecordId(1, 1)); - ASSERT_EQ(insertedRecordId, RecordId(1, 1)); - ASSERT_EQ(0U, testMarkers.numMarkers()); - ASSERT_EQ(1, testMarkers.currentRecords()); - ASSERT_EQ(99, testMarkers.currentBytes()); - - // Inserting another record such that their combined size exceeds 'minBytesPerMarker' should - // cause a new marker to be created. - insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( - opCtx.get(), collNs, testMarkers, 51, Timestamp(1, 2), RecordId(1, 2)); - ASSERT_EQ(insertedRecordId, RecordId(1, 2)); - ASSERT_EQ(1U, testMarkers.numMarkers()); - ASSERT_EQ(0, testMarkers.currentRecords()); - ASSERT_EQ(0, testMarkers.currentBytes()); - - // Inserting a record such that the combined size of this record and the previously inserted - // one exceed 'minBytesPerMarker' shouldn't cause a new marker to be created because we've - // started filling a new marker. - insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( - opCtx.get(), collNs, testMarkers, 50, Timestamp(1, 3), RecordId(1, 3)); - ASSERT_EQ(insertedRecordId, RecordId(1, 3)); - ASSERT_EQ(1U, testMarkers.numMarkers()); - ASSERT_EQ(1, testMarkers.currentRecords()); - ASSERT_EQ(50, testMarkers.currentBytes()); - - // Inserting a record such that the combined size of this record and the previously inserted - // one is exactly equal to 'minBytesPerMarker' should cause a new marker to be created. - insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( - opCtx.get(), collNs, testMarkers, 50, Timestamp(1, 4), RecordId(1, 4)); - ASSERT_EQ(insertedRecordId, RecordId(1, 4)); - ASSERT_EQ(2U, testMarkers.numMarkers()); - ASSERT_EQ(0, testMarkers.currentRecords()); - ASSERT_EQ(0, testMarkers.currentBytes()); - - // Inserting a single record that exceeds 'minBytesPerMarker' should cause a new marker to - // be created. - insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( - opCtx.get(), collNs, testMarkers, 101, Timestamp(1, 5), RecordId(1, 5)); - ASSERT_EQ(insertedRecordId, RecordId(1, 5)); - ASSERT_EQ(3U, testMarkers.numMarkers()); - ASSERT_EQ(0, testMarkers.currentRecords()); - ASSERT_EQ(0, testMarkers.currentBytes()); - } -} - -TEST_F(CollectionMarkersTest, CreateNewMarker) { - createNewMarkerTest(this, "coll"); - createNewMarkerTest(this, "partial_coll"); -} - -// Verify that a collection marker isn't created if it would cause the logical representation of the -// records to not be in increasing order. -template -void ascendingOrderTest(CollectionMarkersTest* fixture, std::string collectionName) { - T testMarkers(0, 0, 100); - - auto collNs = NamespaceString("test", collectionName); - { - auto opCtx = fixture->getClient()->makeOperationContext(); - ASSERT_OK(fixture->createCollection(opCtx.get(), collNs)); - } - - { - auto opCtx = fixture->getClient()->makeOperationContext(); - - ASSERT_EQ(0U, testMarkers.numMarkers()); - auto insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( - opCtx.get(), collNs, testMarkers, 50, Timestamp(2, 2), RecordId(2, 2)); - ASSERT_EQ(insertedRecordId, RecordId(2, 2)); - ASSERT_EQ(0U, testMarkers.numMarkers()); - ASSERT_EQ(1, testMarkers.currentRecords()); - ASSERT_EQ(50, testMarkers.currentBytes()); - - // Inserting a record that has a smaller RecordId than the previously inserted record should - // be able to create a new marker when no markers already exist. - insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( - opCtx.get(), collNs, testMarkers, 50, Timestamp(2, 1), RecordId(2, 1)); - ASSERT_EQ(insertedRecordId, RecordId(2, 1)); - ASSERT_EQ(1U, testMarkers.numMarkers()); - ASSERT_EQ(0, testMarkers.currentRecords()); - ASSERT_EQ(0, testMarkers.currentBytes()); - - // However, inserting a record that has a smaller RecordId than most recently created - // marker's last record shouldn't cause a new marker to be created, even if the size of the - // inserted record exceeds 'minBytesPerMarker'. - insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( - opCtx.get(), collNs, testMarkers, 100, Timestamp(1, 1), RecordId(1, 1)); - ASSERT_EQ(insertedRecordId, RecordId(1, 1)); - ASSERT_EQ(1U, testMarkers.numMarkers()); - ASSERT_EQ(1, testMarkers.currentRecords()); - ASSERT_EQ(100, testMarkers.currentBytes()); - - // Inserting a record that has a larger RecordId than the most recently created marker's - // last record should then cause a new marker to be created. - insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( - opCtx.get(), collNs, testMarkers, 50, Timestamp(2, 3), RecordId(2, 3)); - ASSERT_EQ(insertedRecordId, RecordId(2, 3)); - ASSERT_EQ(2U, testMarkers.numMarkers()); - ASSERT_EQ(0, testMarkers.currentRecords()); - ASSERT_EQ(0, testMarkers.currentBytes()); - } -} -TEST_F(CollectionMarkersTest, AscendingOrder) { - ascendingOrderTest(this, "coll"); - ascendingOrderTest(this, "partial_coll"); -} - -// Test that initial marker creation works as expected when performing a scanning marker creation. -TEST_F(CollectionMarkersTest, ScanningMarkerCreation) { - static constexpr auto kNumElements = 51; - static constexpr auto kElementSize = 15; - static constexpr auto kMinBytes = (kElementSize * 2) - 1; - - auto collNs = NamespaceString("test", "coll"); - { - auto opCtx = getClient()->makeOperationContext(); - ASSERT_OK(createCollection(opCtx.get(), collNs)); - insertElements(opCtx.get(), collNs, kElementSize, kNumElements, Timestamp(1, 0)); - } - - { - auto opCtx = getClient()->makeOperationContext(); - - AutoGetCollection coll(opCtx.get(), collNs, MODE_IS); - - auto result = CollectionTruncateMarkers::createMarkersByScanning( - opCtx.get(), coll->getRecordStore(), collNs, kMinBytes, [](const Record& record) { - return CollectionTruncateMarkers::RecordIdAndWallTime{record.id, Date_t::now()}; - }); - ASSERT_EQ(result.leftoverRecordsBytes, kElementSize); - ASSERT_EQ(result.leftoverRecordsCount, 1); - ASSERT_EQ(result.markers.size(), 51 / 2); - for (const auto& marker : result.markers) { - ASSERT_EQ(marker.bytes, kElementSize * 2); - ASSERT_EQ(marker.records, 2); - } - } -} - -// Test that initial marker creation works as expected when using sampling -TEST_F(CollectionMarkersTest, SamplingMarkerCreation) { - static constexpr auto kNumRounds = 200; - static constexpr auto kElementSize = 15; - - int totalBytes = 0; - int totalRecords = 0; - auto collNs = NamespaceString("test", "coll"); - { - auto opCtx = getClient()->makeOperationContext(); - ASSERT_OK(createCollection(opCtx.get(), collNs)); - // Add documents of various sizes - for (int round = 0; round < kNumRounds; round++) { - for (int numBytes = 0; numBytes < kElementSize; numBytes++) { - insertElements(opCtx.get(), collNs, numBytes, 1, Timestamp(1, 0)); - totalRecords++; - totalBytes += numBytes; - } - } - } - - { - auto opCtx = getClient()->makeOperationContext(); - - AutoGetCollection coll(opCtx.get(), collNs, MODE_IS); - - static constexpr auto kNumMarkers = 15; - auto kMinBytesPerMarker = totalBytes / kNumMarkers; - auto kRecordsPerMarker = totalRecords / kNumMarkers; - - auto result = CollectionTruncateMarkers::createFromExistingRecordStore( - opCtx.get(), - coll->getRecordStore(), - collNs, - kMinBytesPerMarker, - [](const Record& record) { - return CollectionTruncateMarkers::RecordIdAndWallTime{record.id, Date_t::now()}; - }); - - ASSERT_EQ(result.methodUsed, CollectionTruncateMarkers::MarkersCreationMethod::Sampling); - const auto& firstMarker = result.markers.front(); - auto recordCount = firstMarker.records; - auto recordBytes = firstMarker.bytes; - ASSERT_EQ(result.leftoverRecordsBytes, totalBytes % kMinBytesPerMarker); - ASSERT_EQ(result.leftoverRecordsCount, totalRecords % kRecordsPerMarker); - ASSERT_GT(recordCount, 0); - ASSERT_GT(recordBytes, 0); - ASSERT_EQ(result.markers.size(), kNumMarkers); - for (const auto& marker : result.markers) { - ASSERT_EQ(marker.bytes, recordBytes); - ASSERT_EQ(marker.records, recordCount); - } - - ASSERT_EQ(recordBytes * kNumMarkers + result.leftoverRecordsBytes, totalBytes); - ASSERT_EQ(recordCount * kNumMarkers + result.leftoverRecordsCount, totalRecords); - } -} -} // namespace mongo diff --git a/src/mongo/db/storage/collection_truncate_markers.cpp b/src/mongo/db/storage/collection_truncate_markers.cpp new file mode 100644 index 0000000000000..6979ecbd9f74a --- /dev/null +++ b/src/mongo/db/storage/collection_truncate_markers.cpp @@ -0,0 +1,572 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/storage/collection_truncate_markers.h" + +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/mutex.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/timer.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage + +namespace mongo { +// TODO SERVER-74250: Change to slowCollectionSamplingReads once 7.0 is released. +MONGO_FAIL_POINT_DEFINE(slowOplogSamplingReads); + +namespace { + +// Strings for MarkerCreationMethods. +static constexpr StringData kEmptyCollectionString = "emptyCollection"_sd; +static constexpr StringData kScanningString = "scanning"_sd; +static constexpr StringData kSamplingString = "sampling"_sd; +} // namespace + +StringData CollectionTruncateMarkers::toString( + CollectionTruncateMarkers::MarkersCreationMethod creationMethod) { + switch (creationMethod) { + case CollectionTruncateMarkers::MarkersCreationMethod::EmptyCollection: + return kEmptyCollectionString; + case CollectionTruncateMarkers::MarkersCreationMethod::Scanning: + return kScanningString; + case CollectionTruncateMarkers::MarkersCreationMethod::Sampling: + return kSamplingString; + default: + MONGO_UNREACHABLE; + } +} + +boost::optional +CollectionTruncateMarkers::peekOldestMarkerIfNeeded(OperationContext* opCtx) const { + stdx::lock_guard lk(_markersMutex); + + if (!_hasExcessMarkers(opCtx)) { + return {}; + } + + return _markers.front(); +} + +void CollectionTruncateMarkers::popOldestMarker() { + stdx::lock_guard lk(_markersMutex); + _markers.pop_front(); +} + +CollectionTruncateMarkers::Marker& CollectionTruncateMarkers::createNewMarker( + const RecordId& lastRecord, Date_t wallTime) { + return _markers.emplace_back( + _currentRecords.swap(0), _currentBytes.swap(0), lastRecord, wallTime); +} + +void CollectionTruncateMarkers::createNewMarkerIfNeeded(OperationContext* opCtx, + const RecordId& lastRecord, + Date_t wallTime) { + auto logFailedLockAcquisition = [&](const std::string& lock) { + LOGV2_DEBUG(7393214, + 2, + "Failed to acquire lock to check if a new collection marker is needed", + "lock"_attr = lock); + }; + + // Try to lock the mutex, if we fail to lock then someone else is either already creating a new + // marker or popping the oldest one. In the latter case, we let the next insert trigger the new + // marker's creation. + stdx::unique_lock lk(_markersMutex, stdx::try_to_lock); + if (!lk) { + logFailedLockAcquisition("_markersMutex"); + return; + } + + if (_currentBytes.load() < _minBytesPerMarker) { + // Must have raced to create a new marker, someone else already triggered it. + return; + } + + if (!_markers.empty() && lastRecord < _markers.back().lastRecord) { + // Skip creating a new marker when the record's position comes before the most recently + // created marker. We likely raced with another batch of inserts that caused us to try and + // make multiples markers. + return; + } + + auto& marker = createNewMarker(lastRecord, wallTime); + + LOGV2_DEBUG(7393213, + 2, + "Created a new collection marker", + "lastRecord"_attr = marker.lastRecord, + "wallTime"_attr = marker.wallTime, + "numMarkers"_attr = _markers.size()); + + _notifyNewMarkerCreation(); +} + +void CollectionTruncateMarkers::updateCurrentMarkerAfterInsertOnCommit( + OperationContext* opCtx, + int64_t bytesInserted, + const RecordId& highestInsertedRecordId, + Date_t wallTime, + int64_t countInserted) { + opCtx->recoveryUnit()->onCommit([collectionMarkers = shared_from_this(), + bytesInserted, + recordId = highestInsertedRecordId, + wallTime, + countInserted](OperationContext* opCtx, auto) { + invariant(bytesInserted >= 0); + invariant(recordId.isValid()); + + collectionMarkers->_currentRecords.addAndFetch(countInserted); + int64_t newCurrentBytes = collectionMarkers->_currentBytes.addAndFetch(bytesInserted); + if (wallTime != Date_t() && newCurrentBytes >= collectionMarkers->_minBytesPerMarker) { + // When other transactions commit concurrently, an uninitialized wallTime may delay + // the creation of a new marker. This delay is limited to the number of concurrently + // running transactions, so the size difference should be inconsequential. + collectionMarkers->createNewMarkerIfNeeded(opCtx, recordId, wallTime); + } + }); +} + +void CollectionTruncateMarkers::setMinBytesPerMarker(int64_t size) { + invariant(size > 0); + + stdx::lock_guard lk(_markersMutex); + + _minBytesPerMarker = size; +} + +CollectionTruncateMarkers::InitialSetOfMarkers CollectionTruncateMarkers::createMarkersByScanning( + OperationContext* opCtx, + CollectionIterator& collectionIterator, + const NamespaceString& ns, + int64_t minBytesPerMarker, + std::function getRecordIdAndWallTime) { + auto startTime = curTimeMicros64(); + LOGV2_INFO(7393212, + "Scanning collection to determine where to place markers for truncation", + "namespace"_attr = ns); + + int64_t numRecords = 0; + int64_t dataSize = 0; + int64_t currentRecords = 0; + int64_t currentBytes = 0; + + std::deque markers; + + while (auto nextRecord = collectionIterator.getNext()) { + const auto& [rId, doc] = *nextRecord; + currentRecords++; + currentBytes += doc.objsize(); + if (currentBytes >= minBytesPerMarker) { + auto [_, wallTime] = + getRecordIdAndWallTime(Record{rId, RecordData{doc.objdata(), doc.objsize()}}); + + LOGV2_DEBUG(7393211, + 1, + "Marking entry as a potential future truncation point", + "wall"_attr = wallTime); + + markers.emplace_back( + std::exchange(currentRecords, 0), std::exchange(currentBytes, 0), rId, wallTime); + } + + numRecords++; + dataSize += doc.objsize(); + } + + collectionIterator.getRecordStore()->updateStatsAfterRepair(opCtx, numRecords, dataSize); + auto endTime = curTimeMicros64(); + return CollectionTruncateMarkers::InitialSetOfMarkers{ + std::move(markers), + currentRecords, + currentBytes, + Microseconds{static_cast(endTime - startTime)}, + MarkersCreationMethod::Scanning}; +} + + +CollectionTruncateMarkers::InitialSetOfMarkers CollectionTruncateMarkers::createMarkersBySampling( + OperationContext* opCtx, + CollectionIterator& collectionIterator, + const NamespaceString& ns, + int64_t estimatedRecordsPerMarker, + int64_t estimatedBytesPerMarker, + std::function getRecordIdAndWallTime) { + auto startTime = curTimeMicros64(); + + LOGV2_INFO(7393210, + "Sampling the collection to determine where to place markers for truncation", + "namespace"_attr = ns); + RecordId earliestRecordId, latestRecordId; + + { + auto record = [&] { + const bool forward = true; + auto rs = collectionIterator.getRecordStore(); + return rs->getCursor(opCtx, forward)->next(); + }(); + if (!record) { + // This shouldn't really happen unless the size storer values are far off from reality. + // The collection is probably empty, but fall back to scanning the collection just in + // case. + LOGV2(7393209, + "Failed to determine the earliest recordId, falling back to scanning the " + "collection", + "namespace"_attr = ns); + return CollectionTruncateMarkers::createMarkersByScanning( + opCtx, + collectionIterator, + ns, + estimatedBytesPerMarker, + std::move(getRecordIdAndWallTime)); + } + earliestRecordId = record->id; + } + + { + auto record = [&] { + const bool forward = false; + auto rs = collectionIterator.getRecordStore(); + return rs->getCursor(opCtx, forward)->next(); + }(); + if (!record) { + // This shouldn't really happen unless the size storer values are far off from reality. + // The collection is probably empty, but fall back to scanning the collection just in + // case. + LOGV2( + 7393208, + "Failed to determine the latest recordId, falling back to scanning the collection", + "namespace"_attr = ns); + return CollectionTruncateMarkers::createMarkersByScanning( + opCtx, + collectionIterator, + ns, + estimatedBytesPerMarker, + std::move(getRecordIdAndWallTime)); + } + latestRecordId = record->id; + } + + LOGV2(7393207, + "Sampling from the collection to determine where to place markers for truncation", + "namespace"_attr = ns, + "from"_attr = earliestRecordId, + "to"_attr = latestRecordId); + + int64_t wholeMarkers = collectionIterator.numRecords(opCtx) / estimatedRecordsPerMarker; + // We don't use the wholeMarkers variable here due to integer division not being associative. + // For example, 10 * (47500 / 28700) = 10, but (10 * 47500) / 28700 = 16. + int64_t numSamples = (CollectionTruncateMarkers::kRandomSamplesPerMarker * + collectionIterator.numRecords(opCtx)) / + estimatedRecordsPerMarker; + + LOGV2(7393216, + "Taking samples and assuming each collection section contains equal amounts", + "namespace"_attr = ns, + "numSamples"_attr = numSamples, + "containsNumRecords"_attr = estimatedRecordsPerMarker, + "containsNumBytes"_attr = estimatedBytesPerMarker); + + // Divide the collection into 'wholeMarkers' logical sections, with each section containing + // approximately 'estimatedRecordsPerMarker'. Do so by oversampling the collection, sorting the + // samples in order of their RecordId, and then choosing the samples expected to be near the + // right edge of each logical section. + + std::vector collectionEstimates; + Timer lastProgressTimer; + + for (int i = 0; i < numSamples; ++i) { + auto nextRandom = collectionIterator.getNextRandom(); + const auto [rId, doc] = *nextRandom; + auto samplingLogIntervalSeconds = gCollectionSamplingLogIntervalSeconds.load(); + slowOplogSamplingReads.execute( + [&](const BSONObj& dataObj) { sleepsecs(dataObj["delay"].numberInt()); }); + if (!nextRandom) { + // This shouldn't really happen unless the size storer values are far off from reality. + // The collection is probably empty, but fall back to scanning the collection just in + // case. + LOGV2(7393206, + "Failed to get enough random samples, falling back to scanning the collection", + "namespace"_attr = ns); + collectionIterator.reset(opCtx); + return CollectionTruncateMarkers::createMarkersByScanning( + opCtx, + collectionIterator, + ns, + estimatedBytesPerMarker, + std::move(getRecordIdAndWallTime)); + } + + collectionEstimates.emplace_back( + getRecordIdAndWallTime(Record{rId, RecordData{doc.objdata(), doc.objsize()}})); + + if (samplingLogIntervalSeconds > 0 && + lastProgressTimer.elapsed() >= Seconds(samplingLogIntervalSeconds)) { + LOGV2(7393217, + "Collection sampling progress", + "namespace"_attr = ns, + "completed"_attr = (i + 1), + "total"_attr = numSamples); + lastProgressTimer.reset(); + } + } + + std::sort(collectionEstimates.begin(), + collectionEstimates.end(), + [](const auto& a, const auto& b) { return a.id < b.id; }); + LOGV2(7393205, "Collection sampling complete", "namespace"_attr = ns); + + std::deque markers; + for (int i = 1; i <= wholeMarkers; ++i) { + // Use every (kRandomSamplesPerMarker)th sample, starting with the + // (kRandomSamplesPerMarker - 1)th, as the last record for each marker. + // If parsing "wall" fails, we crash to allow user to fix their collection. + const auto& [id, wallTime] = collectionEstimates[kRandomSamplesPerMarker * i - 1]; + + LOGV2_DEBUG(7393204, + 1, + "Marking entry as a potential future truncation point", + "namespace"_attr = ns, + "wall"_attr = wallTime, + "ts"_attr = id); + + markers.emplace_back(estimatedRecordsPerMarker, estimatedBytesPerMarker, id, wallTime); + } + + // Account for the partially filled chunk. + auto currentRecords = + collectionIterator.numRecords(opCtx) - estimatedRecordsPerMarker * wholeMarkers; + auto currentBytes = collectionIterator.dataSize(opCtx) - estimatedBytesPerMarker * wholeMarkers; + return CollectionTruncateMarkers::InitialSetOfMarkers{ + std::move(markers), + currentRecords, + currentBytes, + Microseconds{static_cast(curTimeMicros64() - startTime)}, + MarkersCreationMethod::Sampling}; +} + +CollectionTruncateMarkers::MarkersCreationMethod +CollectionTruncateMarkers::computeInitialCreationMethod( + int64_t numRecords, + int64_t dataSize, + int64_t minBytesPerMarker, + boost::optional numberOfMarkersToKeepForOplog) { + // Don't calculate markers if this is a new collection. This is to prevent standalones from + // attempting to get a forward scanning cursor on an explicit create of the collection. These + // values can be wrong. The assumption is that if they are both observed to be zero, there must + // be very little data in the collection; the cost of being wrong is imperceptible. + if (numRecords == 0 && dataSize == 0) { + return MarkersCreationMethod::EmptyCollection; + } + + // Only use sampling to estimate where to place the collection markers if the number of samples + // drawn is less than 5% of the collection. + const uint64_t kMinSampleRatioForRandCursor = 20; + + // If the collection doesn't contain enough records to make sampling more efficient, then scan + // the collection to determine where to put down markers. + // + // Unless preserving legacy behavior of 'OplogTruncateMarkers', compute the number of markers + // which would be generated based on the estimated data size. + auto numMarkers = numberOfMarkersToKeepForOplog ? numberOfMarkersToKeepForOplog.get() + : dataSize / minBytesPerMarker; + if (numRecords <= 0 || dataSize <= 0 || + uint64_t(numRecords) < + kMinSampleRatioForRandCursor * kRandomSamplesPerMarker * numMarkers) { + return MarkersCreationMethod::Scanning; + } + + return MarkersCreationMethod::Sampling; +} + +CollectionTruncateMarkers::InitialSetOfMarkers +CollectionTruncateMarkers::createFromCollectionIterator( + OperationContext* opCtx, + CollectionIterator& collectionIterator, + const NamespaceString& ns, + int64_t minBytesPerMarker, + std::function getRecordIdAndWallTime, + boost::optional numberOfMarkersToKeepForOplog) { + + long long numRecords = collectionIterator.numRecords(opCtx); + long long dataSize = collectionIterator.dataSize(opCtx); + + LOGV2(7393203, + "The size storer reports that the collection contains", + "numRecords"_attr = numRecords, + "dataSize"_attr = dataSize); + + auto creationMethod = CollectionTruncateMarkers::computeInitialCreationMethod( + numRecords, dataSize, minBytesPerMarker, numberOfMarkersToKeepForOplog); + + switch (creationMethod) { + case MarkersCreationMethod::EmptyCollection: + // Don't calculate markers if this is a new collection. This is to prevent standalones + // from attempting to get a forward scanning cursor on an explicit create of the + // collection. These values can be wrong. The assumption is that if they are both + // observed to be zero, there must be very little data in the collection; the cost of + // being wrong is imperceptible. + return CollectionTruncateMarkers::InitialSetOfMarkers{ + {}, 0, 0, Microseconds{0}, MarkersCreationMethod::EmptyCollection}; + case MarkersCreationMethod::Scanning: + return CollectionTruncateMarkers::createMarkersByScanning( + opCtx, + collectionIterator, + ns, + minBytesPerMarker, + std::move(getRecordIdAndWallTime)); + default: { + // Use the collection's average record size to estimate the number of records in each + // marker, + // and thus estimate the combined size of the records. + double avgRecordSize = double(dataSize) / double(numRecords); + double estimatedRecordsPerMarker = std::ceil(minBytesPerMarker / avgRecordSize); + double estimatedBytesPerMarker = estimatedRecordsPerMarker * avgRecordSize; + + return CollectionTruncateMarkers::createMarkersBySampling( + opCtx, + collectionIterator, + ns, + (int64_t)estimatedRecordsPerMarker, + (int64_t)estimatedBytesPerMarker, + std::move(getRecordIdAndWallTime)); + } + } +} + +void CollectionTruncateMarkersWithPartialExpiration::updateCurrentMarkerAfterInsertOnCommit( + OperationContext* opCtx, + int64_t bytesInserted, + const RecordId& highestInsertedRecordId, + Date_t wallTime, + int64_t countInserted) { + opCtx->recoveryUnit()->onCommit( + [collectionMarkers = + std::static_pointer_cast( + shared_from_this()), + bytesInserted, + recordId = highestInsertedRecordId, + wallTime, + countInserted](OperationContext* opCtx, auto) { + invariant(bytesInserted >= 0); + invariant(recordId.isValid()); + collectionMarkers->updateCurrentMarker( + opCtx, bytesInserted, recordId, wallTime, countInserted); + }); +} + +void CollectionTruncateMarkersWithPartialExpiration::createPartialMarkerIfNecessary( + OperationContext* opCtx) { + auto logFailedLockAcquisition = [&](const std::string& lock) { + LOGV2_DEBUG(7393202, + 2, + "Failed to acquire lock to check if a new partial collection marker is needed", + "lock"_attr = lock); + }; + + // Try to lock all mutexes, if we fail to lock a mutex then someone else is either already + // creating a new marker or popping the oldest one. In the latter case, we let the next check + // trigger the new partial marker's creation. + + stdx::unique_lock lk(_markersMutex, stdx::try_to_lock); + if (!lk) { + logFailedLockAcquisition("_markersMutex"); + return; + } + + stdx::unique_lock markerLk(_lastHighestRecordMutex, stdx::try_to_lock); + if (!markerLk) { + logFailedLockAcquisition("_lastHighestRecordMutex"); + return; + } + + if (_currentBytes.load() == 0 && _currentRecords.load() == 0) { + // Nothing can be used for a marker. Early exit now. + return; + } + + if (_hasPartialMarkerExpired(opCtx)) { + auto& marker = createNewMarker(_lastHighestRecordId, _lastHighestWallTime); + + LOGV2_DEBUG(7393201, + 2, + "Created a new partial collection marker", + "lastRecord"_attr = marker.lastRecord, + "wallTime"_attr = marker.wallTime, + "numMarkers"_attr = _markers.size()); + _notifyNewMarkerCreation(); + } +} + +void CollectionTruncateMarkersWithPartialExpiration::_updateHighestSeenRecordIdAndWallTime( + const RecordId& rId, Date_t wallTime) { + stdx::unique_lock lk(_lastHighestRecordMutex); + if (_lastHighestRecordId < rId) { + _lastHighestRecordId = rId; + } + if (_lastHighestWallTime < wallTime) { + _lastHighestWallTime = wallTime; + } +} + +void CollectionTruncateMarkersWithPartialExpiration::updateCurrentMarker( + OperationContext* opCtx, + int64_t bytesAdded, + const RecordId& highestRecordId, + Date_t highestWallTime, + int64_t numRecordsAdded) { + // By putting the highest marker modification first we can guarantee than in the + // event of a race condition between expiring a partial marker the metrics increase + // will happen after the marker has been created. This guarantees that the metrics + // will eventually be correct as long as the expiration criteria checks for the + // metrics and the highest marker expiration. + _updateHighestSeenRecordIdAndWallTime(highestRecordId, highestWallTime); + _currentRecords.addAndFetch(numRecordsAdded); + int64_t newCurrentBytes = _currentBytes.addAndFetch(bytesAdded); + if (highestWallTime != Date_t() && highestRecordId.isValid() && + newCurrentBytes >= _minBytesPerMarker) { + createNewMarkerIfNeeded(opCtx, highestRecordId, highestWallTime); + } +} + +} // namespace mongo diff --git a/src/mongo/db/storage/collection_truncate_markers.h b/src/mongo/db/storage/collection_truncate_markers.h new file mode 100644 index 0000000000000..ce40b99fc2cfd --- /dev/null +++ b/src/mongo/db/storage/collection_truncate_markers.h @@ -0,0 +1,494 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" + +namespace mongo { + +class OperationContext; + + +// Keep "markers" against a collection to efficiently remove ranges of old records when the +// collection grows. This class is meant to be used only with collections that have the following +// requirements: +// * The collection is an insert-only collection +// * The collection has no indexes +// * If a record with RecordId=Max has to be deleted then all previous records with RecordId D such +// that Min < D <= Max should be deleted. With RecordID=Min defining a lower boundary. +// +// If these requirements hold then this class can be used to compute and maintain up-to-date markers +// for ranges of deletions. These markers will be expired and returned to the deleter whenever the +// implementation defined '_hasExcessMarkers' returns true. +class CollectionTruncateMarkers : public std::enable_shared_from_this { +public: + /** Markers represent "waypoints" of the collection that contain information between the current + * marker and the previous one. + * + * Markers are created by the class automatically whenever there are more than X number of bytes + * between the previous marker and the latest insertion. + * + * 'partial marker' + * |___________________|......|____________________|______ + * Oldest Marker Newest Marker + * Min rid <-------------------------------------------------<------- Max rid + * + * A 'Marker' is not created until it is full or its creation is requested by a caller. A + * 'partial marker' is not of type 'Marker', but rather metadata counting incoming records and + * bytes until it can be used to construct a 'Marker'. + * + * Marker + * |__________________| + * lastRecord + */ + struct Marker { + int64_t records; // Approximate number of records between the current marker and the + // previous marker. + int64_t bytes; // Approximate size of records between the current marker and the + // previous marker. + RecordId lastRecord; // RecordId of the record that created this marker. + Date_t wallTime; // Walltime of the record that created this marker. + + Marker(int64_t records, int64_t bytes, RecordId lastRecord, Date_t wallTime) + : records(records), + bytes(bytes), + lastRecord(std::move(lastRecord)), + wallTime(wallTime) {} + }; + + + CollectionTruncateMarkers(std::deque markers, + int64_t leftoverRecordsCount, + int64_t leftoverRecordsBytes, + int64_t minBytesPerMarker) + : _minBytesPerMarker(minBytesPerMarker), + _currentRecords(leftoverRecordsCount), + _currentBytes(leftoverRecordsBytes), + _markers(std::move(markers)) {} + + boost::optional peekOldestMarkerIfNeeded(OperationContext* opCtx) const; + + void popOldestMarker(); + + void createNewMarkerIfNeeded(OperationContext* opCtx, + const RecordId& lastRecord, + Date_t wallTime); + + // Updates the current marker with the inserted value if the operation commits the WUOW. + virtual void updateCurrentMarkerAfterInsertOnCommit(OperationContext* opCtx, + int64_t bytesInserted, + const RecordId& highestInsertedRecordId, + Date_t wallTime, + int64_t countInserted); + + // The method used for creating the initial set of markers. + enum class MarkersCreationMethod { EmptyCollection, Scanning, Sampling }; + + static StringData toString(MarkersCreationMethod creationMethod); + + // The initial set of markers to use when constructing the CollectionMarkers object. + struct InitialSetOfMarkers { + std::deque markers; + int64_t leftoverRecordsCount; + int64_t leftoverRecordsBytes; + Microseconds timeTaken; + MarkersCreationMethod methodUsed; + }; + struct RecordIdAndWallTime { + RecordId id; + Date_t wall; + + RecordIdAndWallTime(RecordId lastRecord, Date_t wallTime) + : id(std::move(lastRecord)), wall(std::move(wallTime)) {} + }; + + // Given the estimated collection 'dataSize' and 'numRecords', along with a target + // 'minBytesPerMarker' and the desired 'numRandomSamplesPerMarker' (if sampling is the chosen + // creation method), computes the initial creation method to try for the initialization. + // + // It's possible the initial creation method is not the actual creation method. However, it will + // be the first creation method tried. For example, if estimates of 'dataSize' and 'numRecords' + // are really far off, sampling may default back to scanning later on. + // + // 'numberOfMarkersToKeepForOplog' exists solely to maintain legacy behavior of + // 'OplogTruncateMarkers'. It serves as the maximum number of truncate markers to keep before + // reclaiming the oldest truncate markers. + static CollectionTruncateMarkers::MarkersCreationMethod computeInitialCreationMethod( + int64_t numRecords, + int64_t dataSize, + int64_t minBytesPerMarker, + boost::optional numberOfMarkersToKeepForOplog = boost::none); + + /** + * A collection iterator class meant to encapsulate how the collection is scanned/sampled. As + * the initialisation step is only concerned about getting either the next element of the + * collection or a random one, this allows the user to specify how to perform these steps. This + * allows one for example to avoid yielding and use raw cursors or to use the query framework so + * that yielding is performed and we don't affect server stability. + * + * If we were to use query framework scans here we would incur on a layering violation as the + * storage layer shouldn't have to interact with the query (higher) layer in here. + */ + class CollectionIterator { + public: + // Returns the next element in the collection. Behaviour is the same as performing a normal + // collection scan. + virtual boost::optional> getNext() = 0; + + // Returns a random document from the collection. + virtual boost::optional> getNextRandom() = 0; + + virtual RecordStore* getRecordStore() const = 0; + + // Reset the iterator. This will recreate any internal cursors used by the class so that + // calling getNext* will start from the beginning again. + virtual void reset(OperationContext* opCtx) = 0; + + int64_t numRecords(OperationContext* opCtx) const { + return getRecordStore()->numRecords(opCtx); + } + + int64_t dataSize(OperationContext* opCtx) const { + return getRecordStore()->dataSize(opCtx); + } + }; + + // Creates the initial set of markers. This will decide whether to perform a collection scan or + // sampling based on the size of the collection. + // + // 'numberOfMarkersToKeepForOplog' exists solely to maintain legacy behavior of + // 'OplogTruncateMarkers'. It serves as the maximum number of truncate markers to keep before + // reclaiming the oldest truncate markers. + static InitialSetOfMarkers createFromCollectionIterator( + OperationContext* opCtx, + CollectionIterator& collIterator, + const NamespaceString& ns, + int64_t minBytesPerMarker, + std::function getRecordIdAndWallTime, + boost::optional numberOfMarkersToKeepForOplog = boost::none); + + // Creates the initial set of markers by fully scanning the collection. The set of markers + // returned will have correct metrics. + static InitialSetOfMarkers createMarkersByScanning( + OperationContext* opCtx, + CollectionIterator& collIterator, + const NamespaceString& ns, + int64_t minBytesPerMarker, + std::function getRecordIdAndWallTime); + + // Creates the initial set of markers by sampling the collection. The set of markers + // returned will have approximate metrics. The metrics of each marker will be equal and contain + // the collection's size and record count divided by the number of markers. + static InitialSetOfMarkers createMarkersBySampling( + OperationContext* opCtx, + CollectionIterator& collIterator, + const NamespaceString& ns, + int64_t estimatedRecordsPerMarker, + int64_t estimatedBytesPerMarker, + std::function getRecordIdAndWallTime); + + void setMinBytesPerMarker(int64_t size); + + static constexpr uint64_t kRandomSamplesPerMarker = 10; + + // + // The following methods are public only for use in tests. + // + + size_t numMarkers_forTest() const { + stdx::lock_guard lk(_markersMutex); + return _markers.size(); + } + + int64_t currentBytes_forTest() const { + return _currentBytes.load(); + } + + int64_t currentRecords_forTest() const { + return _currentRecords.load(); + } + + std::deque getMarkers_forTest() const { + // Return a copy of the vector. + return _markers; + } + +private: + friend class CollectionTruncateMarkersWithPartialExpiration; + + // Used to decide whether the oldest marker has expired. Implementations are free to use + // whichever process they want to discern if there are expired markers. + // This method will get called holding the _markersMutex. + virtual bool _hasExcessMarkers(OperationContext* opCtx) const = 0; + + // Method used to notify the implementation of a new marker being created. Implementations are + // free to implement this however they see fit by overriding it. By default this is a no-op. + virtual void _notifyNewMarkerCreation(){}; + + // Minimum number of bytes the marker being filled should contain before it gets added to the + // deque of collection markers. + int64_t _minBytesPerMarker; + + AtomicWord _currentRecords; // Number of records in the marker being filled. + AtomicWord _currentBytes; // Number of bytes in the marker being filled. + + // Protects against concurrent access to the deque of collection markers. + mutable Mutex _markersMutex = MONGO_MAKE_LATCH("CollectionTruncateMarkers::_markersMutex"); + std::deque _markers; // front = oldest, back = newest. + +protected: + struct PartialMarkerMetrics { + AtomicWord* currentRecords; + AtomicWord* currentBytes; + }; + + template + auto modifyMarkersWith(F&& f) { + static_assert(std::is_invocable_v&>, + "Function must be of type T(std::deque&)"); + stdx::lock_guard lk(_markersMutex); + return f(_markers); + } + + template + auto checkMarkersWith(F&& f) const { + static_assert(std::is_invocable_v&>, + "Function must be of type T(const std::deque&)"); + stdx::lock_guard lk(_markersMutex); + return f(_markers); + } + + const std::deque& getMarkers() const { + return _markers; + } + + /** + * Returns whether the truncate markers instace has no markers, whether partial or whole. Note + * that this method can provide a stale result unless the caller can guarantee that no more + * markers will be created. + */ + bool isEmpty() const { + stdx::lock_guard lk(_markersMutex); + return _markers.size() == 0 && _currentBytes.load() == 0 && _currentRecords.load() == 0; + } + + Marker& createNewMarker(const RecordId& lastRecord, Date_t wallTime); + + template + auto modifyPartialMarker(F&& f) { + static_assert(std::is_invocable_v, + "Function must be of type T(PartialMarkerMetrics)"); + PartialMarkerMetrics metrics{&_currentRecords, &_currentBytes}; + return f(metrics); + } +}; + +/** + * An extension of CollectionTruncateMarkers that provides support for creating "partial markers". + * + * Partial markers are normal markers that can be requested by the user calling + * CollectionTruncateMarkersWithPartialExpiration::createPartialMarkerIfNecessary. The + * implementation will then consider whether the current data awaiting a marker should be deleted + * according to some internal logic. This is useful in time-based expiration systems as there could + * be low activity collections containing data that should be expired but won't because there is no + * marker. + */ +class CollectionTruncateMarkersWithPartialExpiration : public CollectionTruncateMarkers { +public: + CollectionTruncateMarkersWithPartialExpiration(std::deque markers, + int64_t leftoverRecordsCount, + int64_t leftoverRecordsBytes, + int64_t minBytesPerMarker) + : CollectionTruncateMarkers( + std::move(markers), leftoverRecordsCount, leftoverRecordsBytes, minBytesPerMarker) {} + + // Creates a partially filled marker if necessary. The criteria used is whether there is data in + // the partial marker and whether the implementation's '_hasPartialMarkerExpired' returns true. + void createPartialMarkerIfNecessary(OperationContext* opCtx); + + virtual void updateCurrentMarkerAfterInsertOnCommit(OperationContext* opCtx, + int64_t bytesInserted, + const RecordId& highestInsertedRecordId, + Date_t wallTime, + int64_t countInserted) final; + + std::pair getPartialMarker_forTest() const { + return {_lastHighestRecordId, _lastHighestWallTime}; + } + +private: + // Highest marker seen during the lifetime of the class. Modifications must happen + // while holding '_lastHighestRecordMutex'. + mutable Mutex _lastHighestRecordMutex = + MONGO_MAKE_LATCH("CollectionTruncateMarkersWithPartialExpiration::_lastHighestRecordMutex"); + RecordId _lastHighestRecordId; + Date_t _lastHighestWallTime; + + // Used to decide if the current partially built marker has expired. + virtual bool _hasPartialMarkerExpired(OperationContext* opCtx) const { + return false; + } + + // Updates the highest seen RecordId and wall time if they are above the current ones. + void _updateHighestSeenRecordIdAndWallTime(const RecordId& rId, Date_t wallTime); + +protected: + std::pair getPartialMarker() const { + return {_lastHighestRecordId, _lastHighestWallTime}; + } + + void updateCurrentMarker(OperationContext* opCtx, + int64_t bytesAdded, + const RecordId& highestRecordId, + Date_t highestWallTime, + int64_t numRecordsAdded); +}; + +/** + * A Collection iterator meant to work with raw RecordStores. This iterator will not yield between + * calls to getNext()/getNextRandom(). + * + * It is only safe to use when the user is not accepting any user operation. Some examples of when + * this class can be used are during oplog initialisation, repair, recovery, etc. + */ +class UnyieldableCollectionIterator : public CollectionTruncateMarkers::CollectionIterator { +public: + UnyieldableCollectionIterator(OperationContext* opCtx, RecordStore* rs) : _rs(rs) { + reset(opCtx); + } + + virtual boost::optional> getNext() final { + auto record = _directionalCursor->next(); + if (!record) { + return boost::none; + } + return std::make_pair(std::move(record->id), record->data.releaseToBson()); + } + + virtual boost::optional> getNextRandom() final { + auto record = _randomCursor->next(); + if (!record) { + return boost::none; + } + return std::make_pair(std::move(record->id), record->data.releaseToBson()); + } + + virtual RecordStore* getRecordStore() const final { + return _rs; + } + + virtual void reset(OperationContext* opCtx) final { + _directionalCursor = _rs->getCursor(opCtx); + _randomCursor = _rs->getRandomCursor(opCtx); + } + +private: + RecordStore* _rs; + std::unique_ptr _directionalCursor; + std::unique_ptr _randomCursor; +}; + +/** + * A collection iterator that can yield between calls to getNext()/getNextRandom() + */ +class YieldableCollectionIterator : public CollectionTruncateMarkers::CollectionIterator { +public: + YieldableCollectionIterator(OperationContext* opCtx, VariantCollectionPtrOrAcquisition coll) + : _collection(coll) { + reset(opCtx); + } + + virtual boost::optional> getNext() final { + RecordId rId; + BSONObj doc; + if (_collScanExecutor->getNext(&doc, &rId) == PlanExecutor::IS_EOF) { + return boost::none; + } + return std::make_pair(std::move(rId), std::move(doc)); + } + + virtual boost::optional> getNextRandom() final { + RecordId rId; + BSONObj doc; + if (_sampleExecutor->getNext(&doc, &rId) == PlanExecutor::IS_EOF) { + return boost::none; + } + return std::make_pair(std::move(rId), std::move(doc)); + } + + virtual RecordStore* getRecordStore() const final { + return _collection.getCollectionPtr()->getRecordStore(); + } + + virtual void reset(OperationContext* opCtx) final { + _collScanExecutor = + InternalPlanner::collectionScan(opCtx, + _collection, + PlanYieldPolicy::YieldPolicy::YIELD_AUTO, + InternalPlanner::Direction::FORWARD); + _sampleExecutor = InternalPlanner::sampleCollection( + opCtx, _collection, PlanYieldPolicy::YieldPolicy::YIELD_AUTO); + } + +private: + VariantCollectionPtrOrAcquisition _collection; + std::unique_ptr _collScanExecutor; + std::unique_ptr _sampleExecutor; +}; + +} // namespace mongo diff --git a/src/mongo/db/storage/collection_truncate_markers_test.cpp b/src/mongo/db/storage/collection_truncate_markers_test.cpp new file mode 100644 index 0000000000000..5d66c24a613fd --- /dev/null +++ b/src/mongo/db/storage/collection_truncate_markers_test.cpp @@ -0,0 +1,578 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/collection_truncate_markers.h" +#include "mongo/db/storage/storage_engine_test_fixture.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" + +namespace mongo { + +class CollectionMarkersTest : public StorageEngineTest { +public: + explicit CollectionMarkersTest(Options options = {}) : StorageEngineTest(std::move(options)) {} + + struct RecordIdAndWall { + RecordId recordId; + Date_t wallTime; + }; + std::vector insertElementsWithCollectionMarkerUpdate( + OperationContext* opCtx, + const NamespaceString& nss, + CollectionTruncateMarkers& testMarkers, + int numElements, + int dataLength) { + std::vector records; + AutoGetCollection coll(opCtx, nss, MODE_IX); + const auto insertedData = std::string(dataLength, 'a'); + WriteUnitOfWork wuow(opCtx); + for (int i = 0; i < numElements; i++) { + auto now = Date_t::now(); + auto ts = Timestamp(now); + auto recordIdStatus = coll.getCollection()->getRecordStore()->insertRecord( + opCtx, insertedData.data(), insertedData.length(), ts); + ASSERT_OK(recordIdStatus); + auto recordId = recordIdStatus.getValue(); + testMarkers.updateCurrentMarkerAfterInsertOnCommit( + opCtx, insertedData.length(), recordId, now, 1); + records.push_back(RecordIdAndWall{std::move(recordId), std::move(now)}); + } + wuow.commit(); + return records; + } + + RecordIdAndWall insertElementWithCollectionMarkerUpdate(OperationContext* opCtx, + const NamespaceString& nss, + CollectionTruncateMarkers& testMarkers, + int dataLength) { + auto records = + insertElementsWithCollectionMarkerUpdate(opCtx, nss, testMarkers, 1, dataLength); + return records.front(); + } + + RecordId insertWithSpecificTimestampAndRecordId(OperationContext* opCtx, + const NamespaceString& nss, + CollectionTruncateMarkers& testMarkers, + int dataLength, + Timestamp timestampToUse, + const RecordId& recordId) { + AutoGetCollection coll(opCtx, nss, MODE_IX); + const auto insertedData = std::string(dataLength, 'a'); + WriteUnitOfWork wuow(opCtx); + auto recordIdStatus = coll.getCollection()->getRecordStore()->insertRecord( + opCtx, recordId, insertedData.data(), insertedData.length(), timestampToUse); + ASSERT_OK(recordIdStatus); + ASSERT_EQ(recordIdStatus.getValue(), recordId); + auto now = Date_t::fromMillisSinceEpoch(timestampToUse.asInt64()); + testMarkers.updateCurrentMarkerAfterInsertOnCommit( + opCtx, insertedData.length(), recordId, now, 1); + wuow.commit(); + return recordId; + } + + void insertElements(OperationContext* opCtx, + const NamespaceString& nss, + int dataLength, + int numElements, + Timestamp timestampToUse) { + AutoGetCollection coll(opCtx, nss, MODE_IX); + const auto correctedSize = dataLength - + BSON("x" + << "") + .objsize(); + invariant(correctedSize >= 0); + const auto objToInsert = BSON("x" << std::string(correctedSize, 'a')); + WriteUnitOfWork wuow(opCtx); + for (int i = 0; i < numElements; i++) { + auto recordIdStatus = coll.getCollection()->getRecordStore()->insertRecord( + opCtx, objToInsert.objdata(), objToInsert.objsize(), timestampToUse); + ASSERT_OK(recordIdStatus); + } + wuow.commit(); + } +}; +class TestCollectionMarkersWithPartialExpiration final + : public CollectionTruncateMarkersWithPartialExpiration { +public: + TestCollectionMarkersWithPartialExpiration(int64_t leftoverRecordsCount, + int64_t leftoverRecordsBytes, + int64_t minBytesPerMarker) + : CollectionTruncateMarkersWithPartialExpiration( + {}, leftoverRecordsCount, leftoverRecordsBytes, minBytesPerMarker){}; + + TestCollectionMarkersWithPartialExpiration(std::deque markers, + int64_t leftoverRecordsCount, + int64_t leftoverRecordsBytes, + int64_t minBytesPerMarker) + : CollectionTruncateMarkersWithPartialExpiration( + std::move(markers), leftoverRecordsCount, leftoverRecordsBytes, minBytesPerMarker){}; + + void setExpirePartialMarker(bool value) { + _expirePartialMarker = value; + } + +private: + bool _expirePartialMarker = false; + + virtual bool _hasExcessMarkers(OperationContext* opCtx) const override { + return !getMarkers().empty(); + } + + virtual bool _hasPartialMarkerExpired(OperationContext* opCtx) const override { + return _expirePartialMarker; + } +}; + +class TestCollectionMarkers final : public CollectionTruncateMarkers { +public: + TestCollectionMarkers(int64_t leftoverRecordsCount, + int64_t leftoverRecordsBytes, + int64_t minBytesPerMarker) + : CollectionTruncateMarkers( + {}, leftoverRecordsCount, leftoverRecordsBytes, minBytesPerMarker){}; + + TestCollectionMarkers(std::deque markers, + int64_t leftoverRecordsCount, + int64_t leftoverRecordsBytes, + int64_t minBytesPerMarker) + : CollectionTruncateMarkers( + std::move(markers), leftoverRecordsCount, leftoverRecordsBytes, minBytesPerMarker){}; + +private: + virtual bool _hasExcessMarkers(OperationContext* opCtx) const override { + return !getMarkers().empty(); + } +}; + +template +void normalTest(CollectionMarkersTest* fixture, std::string collectionName) { + auto testMarkers = std::make_shared(0, 0, 0); + + auto opCtx = fixture->getClient()->makeOperationContext(); + + auto collNs = NamespaceString::createNamespaceString_forTest("test", collectionName); + ASSERT_OK(fixture->createCollection(opCtx.get(), collNs)); + + static constexpr auto dataLength = 4; + auto [insertedRecordId, now] = fixture->insertElementWithCollectionMarkerUpdate( + opCtx.get(), collNs, *testMarkers, dataLength); + + auto marker = testMarkers->peekOldestMarkerIfNeeded(opCtx.get()); + ASSERT_TRUE(marker); + ASSERT_EQ(marker->lastRecord, insertedRecordId); + ASSERT_EQ(marker->bytes, dataLength); + ASSERT_EQ(marker->wallTime, now); + ASSERT_EQ(marker->records, 1); + + testMarkers->popOldestMarker(); + + ASSERT_FALSE(testMarkers->peekOldestMarkerIfNeeded(opCtx.get())); +}; + +TEST_F(CollectionMarkersTest, NormalUsage) { + normalTest(this, "coll"); + normalTest(this, "partial_coll"); +} + +TEST_F(CollectionMarkersTest, NormalCollectionPartialMarkerUsage) { + auto testMarkers = std::make_shared(0, 0, 100); + + auto opCtx = getClient()->makeOperationContext(); + + auto collNs = NamespaceString::createNamespaceString_forTest("test", "coll"); + ASSERT_OK(createCollection(opCtx.get(), collNs)); + + static constexpr auto dataLength = 4; + auto [insertedRecordId, now] = + insertElementWithCollectionMarkerUpdate(opCtx.get(), collNs, *testMarkers, dataLength); + + ASSERT_FALSE(testMarkers->peekOldestMarkerIfNeeded(opCtx.get())); + + testMarkers->setExpirePartialMarker(false); + testMarkers->createPartialMarkerIfNecessary(opCtx.get()); + ASSERT_FALSE(testMarkers->peekOldestMarkerIfNeeded(opCtx.get())); + + testMarkers->setExpirePartialMarker(true); + testMarkers->createPartialMarkerIfNecessary(opCtx.get()); + auto marker = testMarkers->peekOldestMarkerIfNeeded(opCtx.get()); + ASSERT_TRUE(marker); + + ASSERT_EQ(marker->lastRecord, insertedRecordId); + ASSERT_EQ(marker->bytes, dataLength); + ASSERT_EQ(marker->wallTime, now); + ASSERT_EQ(marker->records, 1); +} + +// Insert records into a collection and verify the number of markers that are created. +template +void createNewMarkerTest(CollectionMarkersTest* fixture, std::string collectionName) { + auto testMarkers = std::make_shared(0, 0, 100); + + auto collNs = NamespaceString::createNamespaceString_forTest("test", collectionName); + { + auto opCtx = fixture->getClient()->makeOperationContext(); + ASSERT_OK(fixture->createCollection(opCtx.get(), collNs)); + } + + { + auto opCtx = fixture->getClient()->makeOperationContext(); + + ASSERT_EQ(0U, testMarkers->numMarkers_forTest()); + + // Inserting a record smaller than 'minBytesPerMarker' shouldn't create a new collection + // marker. + auto insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( + opCtx.get(), collNs, *testMarkers, 99, Timestamp(1, 1), RecordId(1, 1)); + ASSERT_EQ(insertedRecordId, RecordId(1, 1)); + ASSERT_EQ(0U, testMarkers->numMarkers_forTest()); + ASSERT_EQ(1, testMarkers->currentRecords_forTest()); + ASSERT_EQ(99, testMarkers->currentBytes_forTest()); + + // Inserting another record such that their combined size exceeds 'minBytesPerMarker' should + // cause a new marker to be created. + insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( + opCtx.get(), collNs, *testMarkers, 51, Timestamp(1, 2), RecordId(1, 2)); + ASSERT_EQ(insertedRecordId, RecordId(1, 2)); + ASSERT_EQ(1U, testMarkers->numMarkers_forTest()); + ASSERT_EQ(0, testMarkers->currentRecords_forTest()); + ASSERT_EQ(0, testMarkers->currentBytes_forTest()); + + // Inserting a record such that the combined size of this record and the previously inserted + // one exceed 'minBytesPerMarker' shouldn't cause a new marker to be created because we've + // started filling a new marker. + insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( + opCtx.get(), collNs, *testMarkers, 50, Timestamp(1, 3), RecordId(1, 3)); + ASSERT_EQ(insertedRecordId, RecordId(1, 3)); + ASSERT_EQ(1U, testMarkers->numMarkers_forTest()); + ASSERT_EQ(1, testMarkers->currentRecords_forTest()); + ASSERT_EQ(50, testMarkers->currentBytes_forTest()); + + // Inserting a record such that the combined size of this record and the previously inserted + // one is exactly equal to 'minBytesPerMarker' should cause a new marker to be created. + insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( + opCtx.get(), collNs, *testMarkers, 50, Timestamp(1, 4), RecordId(1, 4)); + ASSERT_EQ(insertedRecordId, RecordId(1, 4)); + ASSERT_EQ(2U, testMarkers->numMarkers_forTest()); + ASSERT_EQ(0, testMarkers->currentRecords_forTest()); + ASSERT_EQ(0, testMarkers->currentBytes_forTest()); + + // Inserting a single record that exceeds 'minBytesPerMarker' should cause a new marker to + // be created. + insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( + opCtx.get(), collNs, *testMarkers, 101, Timestamp(1, 5), RecordId(1, 5)); + ASSERT_EQ(insertedRecordId, RecordId(1, 5)); + ASSERT_EQ(3U, testMarkers->numMarkers_forTest()); + ASSERT_EQ(0, testMarkers->currentRecords_forTest()); + ASSERT_EQ(0, testMarkers->currentBytes_forTest()); + } +} + +TEST_F(CollectionMarkersTest, CreateNewMarker) { + createNewMarkerTest(this, "coll"); + createNewMarkerTest(this, "partial_coll"); +} + +// Verify that a collection marker isn't created if it would cause the logical representation of the +// records to not be in increasing order. +template +void ascendingOrderTest(CollectionMarkersTest* fixture, std::string collectionName) { + auto testMarkers = std::make_shared(0, 0, 100); + + auto collNs = NamespaceString::createNamespaceString_forTest("test", collectionName); + { + auto opCtx = fixture->getClient()->makeOperationContext(); + ASSERT_OK(fixture->createCollection(opCtx.get(), collNs)); + } + + { + auto opCtx = fixture->getClient()->makeOperationContext(); + + ASSERT_EQ(0U, testMarkers->numMarkers_forTest()); + auto insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( + opCtx.get(), collNs, *testMarkers, 50, Timestamp(2, 2), RecordId(2, 2)); + ASSERT_EQ(insertedRecordId, RecordId(2, 2)); + ASSERT_EQ(0U, testMarkers->numMarkers_forTest()); + ASSERT_EQ(1, testMarkers->currentRecords_forTest()); + ASSERT_EQ(50, testMarkers->currentBytes_forTest()); + + // Inserting a record that has a smaller RecordId than the previously inserted record should + // be able to create a new marker when no markers already exist. + insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( + opCtx.get(), collNs, *testMarkers, 50, Timestamp(2, 1), RecordId(2, 1)); + ASSERT_EQ(insertedRecordId, RecordId(2, 1)); + ASSERT_EQ(1U, testMarkers->numMarkers_forTest()); + ASSERT_EQ(0, testMarkers->currentRecords_forTest()); + ASSERT_EQ(0, testMarkers->currentBytes_forTest()); + + // However, inserting a record that has a smaller RecordId than most recently created + // marker's last record shouldn't cause a new marker to be created, even if the size of the + // inserted record exceeds 'minBytesPerMarker'. + insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( + opCtx.get(), collNs, *testMarkers, 100, Timestamp(1, 1), RecordId(1, 1)); + ASSERT_EQ(insertedRecordId, RecordId(1, 1)); + ASSERT_EQ(1U, testMarkers->numMarkers_forTest()); + ASSERT_EQ(1, testMarkers->currentRecords_forTest()); + ASSERT_EQ(100, testMarkers->currentBytes_forTest()); + + // Inserting a record that has a larger RecordId than the most recently created marker's + // last record should then cause a new marker to be created. + insertedRecordId = fixture->insertWithSpecificTimestampAndRecordId( + opCtx.get(), collNs, *testMarkers, 50, Timestamp(2, 3), RecordId(2, 3)); + ASSERT_EQ(insertedRecordId, RecordId(2, 3)); + ASSERT_EQ(2U, testMarkers->numMarkers_forTest()); + ASSERT_EQ(0, testMarkers->currentRecords_forTest()); + ASSERT_EQ(0, testMarkers->currentBytes_forTest()); + } +} +TEST_F(CollectionMarkersTest, AscendingOrder) { + ascendingOrderTest(this, "coll"); + ascendingOrderTest(this, "partial_coll"); +} + +// Test that initial marker creation works as expected when performing a scanning marker creation. +TEST_F(CollectionMarkersTest, ScanningMarkerCreation) { + static constexpr auto kNumElements = 51; + static constexpr auto kElementSize = 15; + static constexpr auto kMinBytes = (kElementSize * 2) - 1; + + auto collNs = NamespaceString::createNamespaceString_forTest("test", "coll"); + { + auto opCtx = getClient()->makeOperationContext(); + ASSERT_OK(createCollection(opCtx.get(), collNs)); + insertElements(opCtx.get(), collNs, kElementSize, kNumElements, Timestamp(1, 0)); + } + + { + auto opCtx = getClient()->makeOperationContext(); + + AutoGetCollection coll(opCtx.get(), collNs, MODE_IS); + + UnyieldableCollectionIterator iterator(opCtx.get(), coll->getRecordStore()); + + auto result = CollectionTruncateMarkers::createMarkersByScanning( + opCtx.get(), iterator, collNs, kMinBytes, [](const Record& record) { + return CollectionTruncateMarkers::RecordIdAndWallTime{record.id, Date_t::now()}; + }); + ASSERT_EQ(result.leftoverRecordsBytes, kElementSize); + ASSERT_EQ(result.leftoverRecordsCount, 1); + ASSERT_EQ(result.markers.size(), 51 / 2); + for (const auto& marker : result.markers) { + ASSERT_EQ(marker.bytes, kElementSize * 2); + ASSERT_EQ(marker.records, 2); + } + } +} + +// Test that initial marker creation works as expected when using sampling +TEST_F(CollectionMarkersTest, SamplingMarkerCreation) { + static constexpr auto kNumRounds = 200; + static constexpr auto kElementSize = 15; + + int totalBytes = 0; + int totalRecords = 0; + auto collNs = NamespaceString::createNamespaceString_forTest("test", "coll"); + { + auto opCtx = getClient()->makeOperationContext(); + ASSERT_OK(createCollection(opCtx.get(), collNs)); + // Add documents of various sizes + for (int round = 0; round < kNumRounds; round++) { + for (int numBytes = kElementSize; numBytes < kElementSize * 2; numBytes++) { + insertElements(opCtx.get(), collNs, numBytes, 1, Timestamp(1, 0)); + totalRecords++; + totalBytes += numBytes; + } + } + } + + { + auto opCtx = getClient()->makeOperationContext(); + + AutoGetCollection coll(opCtx.get(), collNs, MODE_IS); + + static constexpr auto kNumMarkers = 15; + auto kMinBytesPerMarker = totalBytes / kNumMarkers; + auto kRecordsPerMarker = totalRecords / kNumMarkers; + + UnyieldableCollectionIterator iterator(opCtx.get(), coll->getRecordStore()); + + auto result = CollectionTruncateMarkers::createFromCollectionIterator( + opCtx.get(), iterator, collNs, kMinBytesPerMarker, [](const Record& record) { + return CollectionTruncateMarkers::RecordIdAndWallTime{record.id, Date_t::now()}; + }); + + ASSERT_EQ(result.methodUsed, CollectionTruncateMarkers::MarkersCreationMethod::Sampling); + const auto& firstMarker = result.markers.front(); + auto recordCount = firstMarker.records; + auto recordBytes = firstMarker.bytes; + ASSERT_EQ(result.leftoverRecordsBytes, totalBytes % kMinBytesPerMarker); + ASSERT_EQ(result.leftoverRecordsCount, totalRecords % kRecordsPerMarker); + ASSERT_GT(recordCount, 0); + ASSERT_GT(recordBytes, 0); + ASSERT_EQ(result.markers.size(), kNumMarkers); + for (const auto& marker : result.markers) { + ASSERT_EQ(marker.bytes, recordBytes); + ASSERT_EQ(marker.records, recordCount); + } + + ASSERT_EQ(recordBytes * kNumMarkers + result.leftoverRecordsBytes, totalBytes); + ASSERT_EQ(recordCount * kNumMarkers + result.leftoverRecordsCount, totalRecords); + } +} + +// Tests that auto yielding with query plan iterators works +TEST_F(CollectionMarkersTest, ScanningAutoYieldingWorks) { + // Manually set the yielding parameters to make the yield count computation simpler. + RAIIServerParameterControllerForTest queryYieldMs("internalQueryExecYieldPeriodMS", + 1'000 * 3'600); + RAIIServerParameterControllerForTest queryYieldDocsRead("internalQueryExecYieldIterations", + 1'000); + + static constexpr auto kNumElements = 5001; + static constexpr auto kElementSize = 15; + static constexpr auto kMinBytes = (kElementSize * 2) - 1; + + auto collNs = NamespaceString::createNamespaceString_forTest("test", "coll"); + { + auto opCtx = getClient()->makeOperationContext(); + ASSERT_OK(createCollection(opCtx.get(), collNs)); + insertElements(opCtx.get(), collNs, kElementSize, kNumElements, Timestamp(1, 0)); + } + + { + auto opCtx = getClient()->makeOperationContext(); + + AutoGetCollection coll(opCtx.get(), collNs, MODE_IS); + + YieldableCollectionIterator iterator(opCtx.get(), &coll.getCollection()); + + auto result = CollectionTruncateMarkers::createMarkersByScanning( + opCtx.get(), iterator, collNs, kMinBytes, [](const Record& record) { + return CollectionTruncateMarkers::RecordIdAndWallTime{record.id, Date_t::now()}; + }); + ASSERT_EQ(result.leftoverRecordsBytes, kElementSize); + ASSERT_EQ(result.leftoverRecordsCount, 1); + ASSERT_EQ(result.markers.size(), kNumElements / 2); + for (const auto& marker : result.markers) { + ASSERT_EQ(marker.bytes, kElementSize * 2); + ASSERT_EQ(marker.records, 2); + } + + ASSERT_EQ(CurOp::get(opCtx.get())->numYields(), + kNumElements / internalQueryExecYieldIterations.load()); + } +} + +// Tests that auto yielding with query plan iterators works +TEST_F(CollectionMarkersTest, SamplingAutoYieldingWorks) { + // Manually set the yielding parameters to make the yield count computation simpler. + RAIIServerParameterControllerForTest queryYieldMs("internalQueryExecYieldPeriodMS", + 1'000 * 3'600); + RAIIServerParameterControllerForTest queryYieldDocsRead("internalQueryExecYieldIterations", + 1'000); + + static constexpr auto kNumRounds = 5000; + static constexpr auto kElementSize = 15; + static constexpr auto kNumElements = kElementSize * kNumRounds; + static constexpr auto kNumElementsToSample = + kNumElements / 20; // We only sample 5% of the collection. + + int totalBytes = 0; + int totalRecords = 0; + auto collNs = NamespaceString::createNamespaceString_forTest("test", "coll"); + { + auto opCtx = getClient()->makeOperationContext(); + ASSERT_OK(createCollection(opCtx.get(), collNs)); + // Add documents of various sizes + for (int numBytes = kElementSize; numBytes < kElementSize * 2; numBytes++) { + insertElements(opCtx.get(), collNs, numBytes, kNumRounds, Timestamp(1, 0)); + totalRecords += kNumRounds; + totalBytes += numBytes * kNumRounds; + } + } + + ASSERT_EQ(totalRecords, kNumElements); + + { + auto opCtx = getClient()->makeOperationContext(); + + AutoGetCollection coll(opCtx.get(), collNs, MODE_IS); + + static constexpr auto kNumMarkers = 300; + auto kMinBytesPerMarker = totalBytes / kNumMarkers; + auto kRecordsPerMarker = totalRecords / kNumMarkers; + + YieldableCollectionIterator iterator(opCtx.get(), &coll.getCollection()); + + auto result = CollectionTruncateMarkers::createFromCollectionIterator( + opCtx.get(), iterator, collNs, kMinBytesPerMarker, [](const Record& record) { + return CollectionTruncateMarkers::RecordIdAndWallTime{record.id, Date_t::now()}; + }); + + ASSERT_EQ(result.methodUsed, CollectionTruncateMarkers::MarkersCreationMethod::Sampling); + const auto& firstMarker = result.markers.front(); + auto recordCount = firstMarker.records; + auto recordBytes = firstMarker.bytes; + ASSERT_EQ(result.leftoverRecordsBytes, totalBytes % kMinBytesPerMarker); + ASSERT_EQ(result.leftoverRecordsCount, totalRecords % kRecordsPerMarker); + ASSERT_GT(recordCount, 0); + ASSERT_GT(recordBytes, 0); + ASSERT_EQ(result.markers.size(), kNumMarkers); + for (const auto& marker : result.markers) { + ASSERT_EQ(marker.bytes, recordBytes); + ASSERT_EQ(marker.records, recordCount); + } + + ASSERT_EQ(recordBytes * kNumMarkers + result.leftoverRecordsBytes, totalBytes); + ASSERT_EQ(recordCount * kNumMarkers + result.leftoverRecordsCount, totalRecords); + + ASSERT_EQ(CurOp::get(opCtx.get())->numYields(), + kNumElementsToSample / internalQueryExecYieldIterations.load()); + } +} +} // namespace mongo diff --git a/src/mongo/db/storage/control/journal_flusher.cpp b/src/mongo/db/storage/control/journal_flusher.cpp index ebfc1c2718ae8..cab28dd8c33b7 100644 --- a/src/mongo/db/storage/control/journal_flusher.cpp +++ b/src/mongo/db/storage/control/journal_flusher.cpp @@ -28,18 +28,34 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/control/journal_flusher.h" - +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/operation_context.h" +#include "mongo/db/storage/control/journal_flusher.h" #include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" -#include "mongo/stdx/future.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" #include "mongo/util/concurrency/idle_thread_block.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -80,6 +96,12 @@ void JournalFlusher::run() { ThreadClient tc(name(), getGlobalServiceContext()); LOGV2_DEBUG(4584701, 1, "starting {name} thread", "name"_attr = name()); + // TODO(SERVER-74657): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + // The thread must not run and access the service context to create an opCtx while unit test // infrastructure is still being set up and expects sole access to the service context (there is // no conurrency control on the service context during this phase). diff --git a/src/mongo/db/storage/control/journal_flusher.h b/src/mongo/db/storage/control/journal_flusher.h index d80d5035f3c0f..f838087a51bc6 100644 --- a/src/mongo/db/storage/control/journal_flusher.h +++ b/src/mongo/db/storage/control/journal_flusher.h @@ -29,10 +29,18 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/status.h" #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/util/background.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" namespace mongo { diff --git a/src/mongo/db/storage/control/storage_control.cpp b/src/mongo/db/storage/control/storage_control.cpp index 8cef9f2a2de7b..c2e4d30b4210f 100644 --- a/src/mongo/db/storage/control/storage_control.cpp +++ b/src/mongo/db/storage/control/storage_control.cpp @@ -28,16 +28,20 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/storage/control/storage_control.h" +#include +#include "mongo/base/status.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/checkpointer.h" #include "mongo/db/storage/control/journal_flusher.h" +#include "mongo/db/storage/control/storage_control.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_options.h" -#include "mongo/logv2/log.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/deferred_drop_record_store.cpp b/src/mongo/db/storage/deferred_drop_record_store.cpp index 6abc449cb6064..296c1dd08efff 100644 --- a/src/mongo/db/storage/deferred_drop_record_store.cpp +++ b/src/mongo/db/storage/deferred_drop_record_store.cpp @@ -30,8 +30,14 @@ #include "mongo/db/storage/deferred_drop_record_store.h" +#include "mongo/base/status.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/storage/ident.h" #include "mongo/db/storage/storage_engine.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/deferred_drop_record_store.h b/src/mongo/db/storage/deferred_drop_record_store.h index 6d0ca1e799157..c8a9f7cf04927 100644 --- a/src/mongo/db/storage/deferred_drop_record_store.h +++ b/src/mongo/db/storage/deferred_drop_record_store.h @@ -29,9 +29,12 @@ #pragma once -#include "mongo/db/storage/temporary_record_store.h" +#include +#include +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/temporary_record_store.h" namespace mongo { diff --git a/src/mongo/db/storage/devnull/SConscript b/src/mongo/db/storage/devnull/SConscript index 55f9caf314d87..1ecff9670e0d3 100644 --- a/src/mongo/db/storage/devnull/SConscript +++ b/src/mongo/db/storage/devnull/SConscript @@ -19,6 +19,7 @@ env.Library( '$BUILD_DIR/mongo/db/catalog/collection_options', '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/storage/backup_block', + '$BUILD_DIR/mongo/util/namespace_string_database_name_util', ], ) @@ -28,7 +29,7 @@ env.Library( 'devnull_init.cpp', ], LIBDEPS=[ - '$BUILD_DIR/mongo/db/storage/durable_catalog_impl', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/db/storage/storage_engine_impl', 'storage_devnull_core', ], diff --git a/src/mongo/db/storage/devnull/devnull_init.cpp b/src/mongo/db/storage/devnull/devnull_init.cpp index 88cc9cac0b62b..237343d5e4359 100644 --- a/src/mongo/db/storage/devnull/devnull_init.cpp +++ b/src/mongo/db/storage/devnull/devnull_init.cpp @@ -27,11 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/devnull/devnull_kv_engine.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_impl.h" #include "mongo/db/storage/storage_engine_init.h" #include "mongo/db/storage/storage_engine_lock_file.h" diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp index e0877397d8454..3e6a94c1cb715 100644 --- a/src/mongo/db/storage/devnull/devnull_kv_engine.cpp +++ b/src/mongo/db/storage/devnull/devnull_kv_engine.cpp @@ -27,15 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/devnull/devnull_kv_engine.h" - +#include +#include #include - +#include + +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/damage_vector.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/catalog/validate_results.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/devnull/devnull_kv_engine.h" #include "mongo/db/storage/devnull/ephemeral_catalog_record_store.h" +#include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" #include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -61,7 +76,7 @@ class EmptyRecordCursor final : public SeekableRecordCursor { class DevNullRecordStore : public RecordStore { public: - DevNullRecordStore(StringData ns, + DevNullRecordStore(const NamespaceString& nss, boost::optional uuid, StringData identName, const CollectionOptions& options, @@ -69,7 +84,7 @@ class DevNullRecordStore : public RecordStore { : RecordStore(uuid, identName, options.capped), _options(options), _keyFormat(keyFormat), - _ns(ns.toString()) { + _ns(nss) { _numInserts = 0; _dummy = BSON("_id" << 1); } @@ -78,7 +93,7 @@ class DevNullRecordStore : public RecordStore { return "devnull"; } - virtual std::string ns(OperationContext* opCtx) const override { + virtual NamespaceString ns(OperationContext* opCtx) const override { return _ns; } @@ -193,7 +208,7 @@ class DevNullRecordStore : public RecordStore { KeyFormat _keyFormat; long long _numInserts; BSONObj _dummy; - std::string _ns; + NamespaceString _ns; }; class DevNullSortedDataBuilderInterface : public SortedDataBuilderInterface { @@ -203,7 +218,7 @@ class DevNullSortedDataBuilderInterface : public SortedDataBuilderInterface { public: DevNullSortedDataBuilderInterface() {} - virtual Status addKey(const KeyString::Value& keyString) { + virtual Status addKey(const key_string::Value& keyString) { return Status::OK(); } }; @@ -212,7 +227,7 @@ class DevNullSortedDataInterface : public SortedDataInterface { public: DevNullSortedDataInterface(StringData identName) : SortedDataInterface(identName, - KeyString::Version::kLatestVersion, + key_string::Version::kLatestVersion, Ordering::make(BSONObj()), KeyFormat::Long) {} @@ -224,22 +239,22 @@ class DevNullSortedDataInterface : public SortedDataInterface { } virtual Status insert(OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed, IncludeDuplicateRecordId includeDuplicateRecordId) { return Status::OK(); } virtual void unindex(OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed) {} - virtual Status dupKeyCheck(OperationContext* opCtx, const KeyString::Value& keyString) { + virtual Status dupKeyCheck(OperationContext* opCtx, const key_string::Value& keyString) { return Status::OK(); } virtual boost::optional findLoc(OperationContext* opCtx, - const KeyString::Value& keyString) const override { + const key_string::Value& keyString) const override { return boost::none; } @@ -270,7 +285,7 @@ class DevNullSortedDataInterface : public SortedDataInterface { } virtual void printIndexEntryMetadata(OperationContext* opCtx, - const KeyString::Value& keyString) const {} + const key_string::Value& keyString) const {} virtual std::unique_ptr newCursor(OperationContext* opCtx, bool isForward) const { @@ -282,7 +297,7 @@ class DevNullSortedDataInterface : public SortedDataInterface { } void insertWithRecordIdInValue_forTest(OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, RecordId rid) override { MONGO_UNREACHABLE; } @@ -290,8 +305,9 @@ class DevNullSortedDataInterface : public SortedDataInterface { DevNullKVEngine::DevNullKVEngine() { _mockBackupBlocks.push_back(BackupBlock(/*opCtx=*/nullptr, + /*nss=*/boost::none, + /*uuid=*/boost::none, "filename.wt", - /*identToNamespaceAndUUIDMap=*/{}, /*checkpointTimestamp=*/boost::none)); } @@ -301,17 +317,16 @@ std::unique_ptr DevNullKVEngine::getRecordStore(OperationContext* o const CollectionOptions& options) { if (ident == "_mdb_catalog") { return std::make_unique( - nss.ns(), options.uuid, ident, &_catalogInfo); + nss, options.uuid, ident, &_catalogInfo); } - return std::make_unique( - nss.ns(), options.uuid, ident, options, KeyFormat::Long); + return std::make_unique(nss, options.uuid, ident, options, KeyFormat::Long); } std::unique_ptr DevNullKVEngine::makeTemporaryRecordStore(OperationContext* opCtx, StringData ident, KeyFormat keyFormat) { return std::make_unique( - "" /* ns */, boost::none /* uuid */, ident, CollectionOptions(), keyFormat); + NamespaceString() /* ns */, boost::none /* uuid */, ident, CollectionOptions(), keyFormat); } std::unique_ptr DevNullKVEngine::getSortedDataInterface( @@ -339,6 +354,9 @@ class StreamingCursorImpl : public StorageEngine::StreamingCursor { return BSONObj(); } + void setCatalogEntries(const stdx::unordered_map>& + identsToNsAndUUID) {} + StatusWith> getNextBatch(OperationContext* opCtx, const std::size_t batchSize) { if (_exhaustCursor) { diff --git a/src/mongo/db/storage/devnull/devnull_kv_engine.h b/src/mongo/db/storage/devnull/devnull_kv_engine.h index c70c1ec816bca..dae1f2e1b96c8 100644 --- a/src/mongo/db/storage/devnull/devnull_kv_engine.h +++ b/src/mongo/db/storage/devnull/devnull_kv_engine.h @@ -29,11 +29,34 @@ #pragma once +#include +#include +#include +#include #include - +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/storage/backup_block.h" +#include "mongo/db/storage/column_store.h" +#include "mongo/db/storage/key_format.h" #include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/recovery_unit_noop.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp index 819e3cd6579c0..12d6881b0952c 100644 --- a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp +++ b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.cpp @@ -30,14 +30,27 @@ #include "mongo/db/storage/devnull/ephemeral_catalog_record_store.h" +#include +#include +#include +#include #include +#include +#include +#include -#include "mongo/db/jsobj.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/record_id_helpers.h" #include "mongo/db/storage/recovery_unit.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -261,15 +274,18 @@ class EphemeralForTestRecordStore::ReverseCursor final : public SeekableRecordCu // RecordStore // -EphemeralForTestRecordStore::EphemeralForTestRecordStore(StringData ns, +EphemeralForTestRecordStore::EphemeralForTestRecordStore(const NamespaceString& ns, boost::optional uuid, StringData identName, std::shared_ptr* dataInOut, bool isCapped) : RecordStore(uuid, identName, isCapped), _isCapped(isCapped), - _data(*dataInOut ? static_cast(dataInOut->get()) - : new Data(ns, NamespaceString::oplog(ns))) { + _data(*dataInOut ? static_cast(dataInOut->get()) : new Data(ns, ns.isOplog())) { + // TODO SERVER-78731 We should remove `ns` in the line above. + // NOTE : The static_cast here assumes that `dataInOut`, which is a void pointer, contains a + // NamespaceString object. As of now, DevNullKVEngine constructs a EphemeralForTestRecordStore + // by passing `_catalogInfo` to this method. if (!*dataInOut) { dataInOut->reset(_data); // takes ownership } diff --git a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h index cf0f4d184153e..192baf4d4ffe1 100644 --- a/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h +++ b/src/mongo/db/storage/devnull/ephemeral_catalog_record_store.h @@ -29,12 +29,33 @@ #pragma once +#include +#include #include +#include +#include +#include #include - +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/damage_vector.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" #include "mongo/platform/mutex.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -45,7 +66,7 @@ namespace mongo { */ class EphemeralForTestRecordStore : public RecordStore { public: - explicit EphemeralForTestRecordStore(StringData ns, + explicit EphemeralForTestRecordStore(const NamespaceString& ns, boost::optional uuid, StringData identName, std::shared_ptr* dataInOut, @@ -127,7 +148,7 @@ class EphemeralForTestRecordStore : public RecordStore { std::vector* out, size_t nRecords) final{}; - std::string ns(OperationContext* opCtx) const final { + NamespaceString ns(OperationContext* opCtx) const final { return _data->ns; } @@ -176,14 +197,14 @@ class EphemeralForTestRecordStore : public RecordStore { // This is the "persistent" data. struct Data { - Data(StringData nss, bool isOplog) + Data(const NamespaceString& nss, bool isOplog) : dataSize(0), recordsMutex(), nextId(1), ns(nss), isOplog(isOplog) {} int64_t dataSize; stdx::recursive_mutex recordsMutex; Records records; int64_t nextId; - std::string ns; + NamespaceString ns; const bool isOplog; }; diff --git a/src/mongo/db/storage/disk_space_monitor.cpp b/src/mongo/db/storage/disk_space_monitor.cpp index b8c4e82eb6fd7..7a33f8a25d493 100644 --- a/src/mongo/db/storage/disk_space_monitor.cpp +++ b/src/mongo/db/storage/disk_space_monitor.cpp @@ -31,12 +31,23 @@ #include "mongo/db/storage/disk_space_monitor.h" +#include +#include + +#include + +#include "mongo/base/status.h" #include "mongo/db/client.h" #include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/storage/disk_space_util.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_options.h" -#include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/logv2/log.h" -#include "mongo/util/fail_point.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" namespace mongo { @@ -44,31 +55,10 @@ CounterMetric monitorPasses("diskSpaceMonitor.passes"); CounterMetric tookAction("diskSpaceMonitor.tookAction"); namespace { -MONGO_FAIL_POINT_DEFINE(simulateAvailableDiskSpace); - static const auto _decoration = ServiceContext::declareDecoration(); -int64_t getAvailableDiskSpaceBytes(const std::string& path) { - boost::filesystem::path fsPath(path); - boost::system::error_code ec; - boost::filesystem::space_info spaceInfo = boost::filesystem::space(fsPath, ec); - if (ec) { - LOGV2(7333403, - "Failed to query filesystem disk stats", - "error"_attr = ec.message(), - "errorCode"_attr = ec.value()); - // We don't want callers to take any action if we can't collect stats. - return std::numeric_limits::max(); - } - return static_cast(spaceInfo.available); -} } // namespace void DiskSpaceMonitor::start(ServiceContext* svcCtx) { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (!feature_flags::gIndexBuildGracefulErrorHandling.isEnabledAndIgnoreFCVUnsafe()) { - return; - } - auto storageEngine = svcCtx->getStorageEngine(); const bool filesNotAllInSameDirectory = storageEngine->isUsingDirectoryPerDb() || storageEngine->isUsingDirectoryForIndexes(); @@ -93,8 +83,13 @@ DiskSpaceMonitor* DiskSpaceMonitor::get(ServiceContext* svcCtx) { void DiskSpaceMonitor::_start(ServiceContext* svcCtx) { LOGV2(7333401, "Starting the DiskSpaceMonitor"); invariant(!_job, "DiskSpaceMonitor is already started"); + _dbpath = storageGlobalParams.dbpath; _job = svcCtx->getPeriodicRunner()->makeJob(PeriodicRunner::PeriodicJob{ - "DiskSpaceMonitor", [this](Client* client) { _run(client); }, Seconds(1)}); + "DiskSpaceMonitor", + [this](Client* client) { _run(client); }, + Seconds(1), + // TODO(SERVER-74657): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/}); _job.start(); } @@ -125,12 +120,7 @@ void DiskSpaceMonitor::takeAction(OperationContext* opCtx, int64_t availableByte void DiskSpaceMonitor::_run(Client* client) try { auto opCtx = client->makeOperationContext(); - const auto availableBytes = []() { - if (auto fp = simulateAvailableDiskSpace.scoped(); fp.isActive()) { - return static_cast(fp.getData()["bytes"].numberLong()); - } - return getAvailableDiskSpaceBytes(storageGlobalParams.dbpath); - }(); + const auto availableBytes = getAvailableDiskSpaceBytesInDbPath(_dbpath); LOGV2_DEBUG(7333405, 2, "Available disk space", "bytes"_attr = availableBytes); takeAction(opCtx.get(), availableBytes); monitorPasses.increment(); diff --git a/src/mongo/db/storage/disk_space_monitor.h b/src/mongo/db/storage/disk_space_monitor.h index 1045e65805489..2b29777c1013d 100644 --- a/src/mongo/db/storage/disk_space_monitor.h +++ b/src/mongo/db/storage/disk_space_monitor.h @@ -30,8 +30,14 @@ #pragma once #include +#include +#include +#include +#include +#include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" #include "mongo/util/periodic_runner.h" namespace mongo { @@ -84,6 +90,8 @@ class DiskSpaceMonitor final { PeriodicJobAnchor _job; + // Copy of the dbpath which is always safe to access. + std::string _dbpath; // This mutex protects _actions and the entire run loop of the disk space monitor. Mutex _mutex = MONGO_MAKE_LATCH("DiskSpaceMonitor::_mutex"); std::vector> _actions; diff --git a/src/mongo/db/storage/disk_space_monitor_test.cpp b/src/mongo/db/storage/disk_space_monitor_test.cpp index 48b13c2cdfb32..18d376db809d4 100644 --- a/src/mongo/db/storage/disk_space_monitor_test.cpp +++ b/src/mongo/db/storage/disk_space_monitor_test.cpp @@ -27,10 +27,13 @@ * it in the license file. */ -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/db/storage/disk_space_monitor.h" +#include -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/storage/disk_space_monitor.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/storage/disk_space_util.cpp b/src/mongo/db/storage/disk_space_util.cpp new file mode 100644 index 0000000000000..30fbba19016d0 --- /dev/null +++ b/src/mongo/db/storage/disk_space_util.cpp @@ -0,0 +1,76 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include "mongo/db/storage/disk_space_util.h" + +#include + +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/fail_point.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage + +namespace mongo { + +namespace { +MONGO_FAIL_POINT_DEFINE(simulateAvailableDiskSpace); +} + +int64_t getAvailableDiskSpaceBytes(const std::string& path) { + boost::filesystem::path fsPath(path); + boost::system::error_code ec; + boost::filesystem::space_info spaceInfo = boost::filesystem::space(fsPath, ec); + if (ec) { + LOGV2(7333403, + "Failed to query filesystem disk stats", + "error"_attr = ec.message(), + "errorCode"_attr = ec.value()); + // We don't want callers to take any action if we can't collect stats. + return std::numeric_limits::max(); + } + return static_cast(spaceInfo.available); +} + +int64_t getAvailableDiskSpaceBytesInDbPath(const std::string& dbpath) { + if (auto fp = simulateAvailableDiskSpace.scoped(); fp.isActive()) { + return static_cast(fp.getData()["bytes"].numberLong()); + } + return getAvailableDiskSpaceBytes(dbpath); +} + +} // namespace mongo diff --git a/src/mongo/db/storage/disk_space_util.h b/src/mongo/db/storage/disk_space_util.h new file mode 100644 index 0000000000000..1a91f1912dfae --- /dev/null +++ b/src/mongo/db/storage/disk_space_util.h @@ -0,0 +1,44 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +#include "mongo/platform/basic.h" + +namespace mongo { + +// This takes the dbpath as an input because storageGlobalParams.dbpath isn't always safe +// to access; it is up to the caller to ensure that the correct path is passed and it is +// safe to access. +int64_t getAvailableDiskSpaceBytesInDbPath(const std::string& dbpath); + +} // namespace mongo diff --git a/src/mongo/db/storage/duplicate_key_error_info.cpp b/src/mongo/db/storage/duplicate_key_error_info.cpp index f2249287e501d..dd505ae43d6b2 100644 --- a/src/mongo/db/storage/duplicate_key_error_info.cpp +++ b/src/mongo/db/storage/duplicate_key_error_info.cpp @@ -27,16 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/storage/duplicate_key_error_info.h" +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/util/assert_util.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/stdx/variant.h" #include "mongo/util/hex.h" -#include "mongo/util/overloaded_visitor.h" -#include "mongo/util/text.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/text.h" // IWYU pragma: keep namespace mongo { namespace { diff --git a/src/mongo/db/storage/duplicate_key_error_info.h b/src/mongo/db/storage/duplicate_key_error_info.h index 1047530367051..80ca8b3e25f4d 100644 --- a/src/mongo/db/storage/duplicate_key_error_info.h +++ b/src/mongo/db/storage/duplicate_key_error_info.h @@ -29,6 +29,12 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" diff --git a/src/mongo/db/storage/durable_catalog.cpp b/src/mongo/db/storage/durable_catalog.cpp new file mode 100644 index 0000000000000..7b94b87cae8c4 --- /dev/null +++ b/src/mongo/db/storage/durable_catalog.cpp @@ -0,0 +1,832 @@ +/** + * Copyright (C) 2018-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine_interface.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage + +namespace mongo { +namespace { +// Does not escape letters, digits, '.', or '_'. +// Otherwise escapes to a '.' followed by a zero-filled 2- or 3-digit decimal number. +// Note that this escape table does not produce a 1:1 mapping to and from dbname, and +// collisions are possible. +// For example: +// "db.123", "db\0143", and "db\073" all escape to "db.123". +// {'d','b','1','2','3'} => "d" + "b" + "." + "1" + "2" + "3" => "db.123" +// {'d','b','\x0c','3'} => "d" + "b" + ".12" + "3" => "db.123" +// {'d','b','\x3b'} => "d" + "b" + ".123" => "db.123" +constexpr std::array escapeTable = { + ".00"_sd, ".01"_sd, ".02"_sd, ".03"_sd, ".04"_sd, ".05"_sd, ".06"_sd, ".07"_sd, + ".08"_sd, ".09"_sd, ".10"_sd, ".11"_sd, ".12"_sd, ".13"_sd, ".14"_sd, ".15"_sd, + ".16"_sd, ".17"_sd, ".18"_sd, ".19"_sd, ".20"_sd, ".21"_sd, ".22"_sd, ".23"_sd, + ".24"_sd, ".25"_sd, ".26"_sd, ".27"_sd, ".28"_sd, ".29"_sd, ".30"_sd, ".31"_sd, + ".32"_sd, ".33"_sd, ".34"_sd, ".35"_sd, ".36"_sd, ".37"_sd, ".38"_sd, ".39"_sd, + ".40"_sd, ".41"_sd, ".42"_sd, ".43"_sd, ".44"_sd, ".45"_sd, "."_sd, ".47"_sd, + "0"_sd, "1"_sd, "2"_sd, "3"_sd, "4"_sd, "5"_sd, "6"_sd, "7"_sd, + "8"_sd, "9"_sd, ".58"_sd, ".59"_sd, ".60"_sd, ".61"_sd, ".62"_sd, ".63"_sd, + ".64"_sd, "A"_sd, "B"_sd, "C"_sd, "D"_sd, "E"_sd, "F"_sd, "G"_sd, + "H"_sd, "I"_sd, "J"_sd, "K"_sd, "L"_sd, "M"_sd, "N"_sd, "O"_sd, + "P"_sd, "Q"_sd, "R"_sd, "S"_sd, "T"_sd, "U"_sd, "V"_sd, "W"_sd, + "X"_sd, "Y"_sd, "Z"_sd, ".91"_sd, ".92"_sd, ".93"_sd, ".94"_sd, "_"_sd, + ".96"_sd, "a"_sd, "b"_sd, "c"_sd, "d"_sd, "e"_sd, "f"_sd, "g"_sd, + "h"_sd, "i"_sd, "j"_sd, "k"_sd, "l"_sd, "m"_sd, "n"_sd, "o"_sd, + "p"_sd, "q"_sd, "r"_sd, "s"_sd, "t"_sd, "u"_sd, "v"_sd, "w"_sd, + "x"_sd, "y"_sd, "z"_sd, ".123"_sd, ".124"_sd, ".125"_sd, ".126"_sd, ".127"_sd, + ".128"_sd, ".129"_sd, ".130"_sd, ".131"_sd, ".132"_sd, ".133"_sd, ".134"_sd, ".135"_sd, + ".136"_sd, ".137"_sd, ".138"_sd, ".139"_sd, ".140"_sd, ".141"_sd, ".142"_sd, ".143"_sd, + ".144"_sd, ".145"_sd, ".146"_sd, ".147"_sd, ".148"_sd, ".149"_sd, ".150"_sd, ".151"_sd, + ".152"_sd, ".153"_sd, ".154"_sd, ".155"_sd, ".156"_sd, ".157"_sd, ".158"_sd, ".159"_sd, + ".160"_sd, ".161"_sd, ".162"_sd, ".163"_sd, ".164"_sd, ".165"_sd, ".166"_sd, ".167"_sd, + ".168"_sd, ".169"_sd, ".170"_sd, ".171"_sd, ".172"_sd, ".173"_sd, ".174"_sd, ".175"_sd, + ".176"_sd, ".177"_sd, ".178"_sd, ".179"_sd, ".180"_sd, ".181"_sd, ".182"_sd, ".183"_sd, + ".184"_sd, ".185"_sd, ".186"_sd, ".187"_sd, ".188"_sd, ".189"_sd, ".190"_sd, ".191"_sd, + ".192"_sd, ".193"_sd, ".194"_sd, ".195"_sd, ".196"_sd, ".197"_sd, ".198"_sd, ".199"_sd, + ".200"_sd, ".201"_sd, ".202"_sd, ".203"_sd, ".204"_sd, ".205"_sd, ".206"_sd, ".207"_sd, + ".208"_sd, ".209"_sd, ".210"_sd, ".211"_sd, ".212"_sd, ".213"_sd, ".214"_sd, ".215"_sd, + ".216"_sd, ".217"_sd, ".218"_sd, ".219"_sd, ".220"_sd, ".221"_sd, ".222"_sd, ".223"_sd, + ".224"_sd, ".225"_sd, ".226"_sd, ".227"_sd, ".228"_sd, ".229"_sd, ".230"_sd, ".231"_sd, + ".232"_sd, ".233"_sd, ".234"_sd, ".235"_sd, ".236"_sd, ".237"_sd, ".238"_sd, ".239"_sd, + ".240"_sd, ".241"_sd, ".242"_sd, ".243"_sd, ".244"_sd, ".245"_sd, ".246"_sd, ".247"_sd, + ".248"_sd, ".249"_sd, ".250"_sd, ".251"_sd, ".252"_sd, ".253"_sd, ".254"_sd, ".255"_sd}; + +std::string escapeDbName(StringData dbname) { + std::string escaped; + escaped.reserve(dbname.size()); + for (unsigned char c : dbname) { + StringData ce = escapeTable[c]; + escaped.append(ce.begin(), ce.end()); + } + return escaped; +} + +} // namespace + +class DurableCatalog::AddIdentChange : public RecoveryUnit::Change { +public: + AddIdentChange(DurableCatalog* catalog, RecordId catalogId) + : _catalog(catalog), _catalogId(std::move(catalogId)) {} + + void commit(OperationContext* opCtx, boost::optional) {} + void rollback(OperationContext* opCtx) { + stdx::lock_guard lk(_catalog->_catalogIdToEntryMapLock); + _catalog->_catalogIdToEntryMap.erase(_catalogId); + } + +private: + DurableCatalog* const _catalog; + const RecordId _catalogId; +}; + +DurableCatalog::DurableCatalog(RecordStore* rs, + bool directoryPerDb, + bool directoryForIndexes, + StorageEngineInterface* engine) + : _rs(rs), + _directoryPerDb(directoryPerDb), + _directoryForIndexes(directoryForIndexes), + _rand(_newRand()), + _next(0), + _engine(engine) {} + +bool DurableCatalog::_hasEntryCollidingWithRand(WithLock) const { + stdx::lock_guard lk(_catalogIdToEntryMapLock); + for (auto it = _catalogIdToEntryMap.begin(); it != _catalogIdToEntryMap.end(); ++it) { + if (StringData(it->second.ident).endsWith(_rand)) + return true; + } + return false; +} + +std::string DurableCatalog::_newInternalIdent(StringData identStem) { + stdx::lock_guard lk(_randLock); + StringBuilder buf; + buf << _kInternalIdentPrefix; + buf << identStem; + buf << _next++ << '-' << _rand; + return buf.str(); +} + +std::string DurableCatalog::getFilesystemPathForDb(const std::string& dbName) const { + if (_directoryPerDb) { + return storageGlobalParams.dbpath + '/' + escapeDbName(dbName); + } else { + return storageGlobalParams.dbpath; + } +} + +std::string DurableCatalog::generateUniqueIdent(NamespaceString nss, const char* kind) { + // If this changes to not put _rand at the end, _hasEntryCollidingWithRand will need fixing. + stdx::lock_guard lk(_randLock); + StringBuilder buf; + if (_directoryPerDb) { + buf << escapeDbName(nss.db()) << '/'; + } + buf << kind; + buf << (_directoryForIndexes ? '/' : '-'); + buf << _next++ << '-' << _rand; + return buf.str(); +} + +void DurableCatalog::init(OperationContext* opCtx) { + // No locking needed since called single threaded. + auto cursor = _rs->getCursor(opCtx); + while (auto record = cursor->next()) { + BSONObj obj = record->data.releaseToBson(); + + // For backwards compatibility where older version have a written feature document + if (isFeatureDocument(obj)) { + continue; + } + + // No rollback since this is just loading already committed data. + auto ident = obj["ident"].String(); + auto nss = NamespaceStringUtil::parseFromStringExpectTenantIdInMultitenancyMode( + obj["ns"].String()); + _catalogIdToEntryMap[record->id] = EntryIdentifier(record->id, ident, nss); + } + + // In the unlikely event that we have used this _rand before generate a new one. + stdx::lock_guard lk(_randLock); + while (_hasEntryCollidingWithRand(lk)) { + _rand = _newRand(); + } +} + +std::vector DurableCatalog::getAllCatalogEntries( + OperationContext* opCtx) const { + std::vector ret; + + auto cursor = _rs->getCursor(opCtx); + while (auto record = cursor->next()) { + BSONObj obj = record->data.releaseToBson(); + if (isFeatureDocument(obj)) { + // Skip over the version document because it doesn't correspond to a collection. + continue; + } + auto ident = obj["ident"].String(); + auto nss = NamespaceStringUtil::parseFromStringExpectTenantIdInMultitenancyMode( + obj["ns"].String()); + + ret.emplace_back(record->id, ident, nss); + } + + return ret; +} + +boost::optional DurableCatalog::scanForCatalogEntryByNss( + OperationContext* opCtx, const NamespaceString& nss) const { + auto cursor = _rs->getCursor(opCtx); + while (auto record = cursor->next()) { + BSONObj obj = record->data.releaseToBson(); + + if (isFeatureDocument(obj)) { + // Skip over the version document because it doesn't correspond to a collection. + continue; + } + + auto entryNss = NamespaceStringUtil::parseFromStringExpectTenantIdInMultitenancyMode( + obj["ns"].String()); + if (entryNss == nss) { + return _getDurableCatalogEntry(record->id, obj); + } + } + + return boost::none; +} + +boost::optional DurableCatalog::scanForCatalogEntryByUUID( + OperationContext* opCtx, const UUID& uuid) const { + auto cursor = _rs->getCursor(opCtx); + while (auto record = cursor->next()) { + BSONObj obj = record->data.releaseToBson(); + + if (isFeatureDocument(obj)) { + // Skip over the version document because it doesn't correspond to a collection. + continue; + } + + std::shared_ptr md = _parseMetaData(obj["md"]); + if (md->options.uuid == uuid) { + return _getDurableCatalogEntry(record->id, obj); + } + } + + return boost::none; +} + +DurableCatalog::EntryIdentifier DurableCatalog::getEntry(const RecordId& catalogId) const { + stdx::lock_guard lk(_catalogIdToEntryMapLock); + auto it = _catalogIdToEntryMap.find(catalogId); + invariant(it != _catalogIdToEntryMap.end()); + return it->second; +} + +StatusWith DurableCatalog::_addEntry( + OperationContext* opCtx, NamespaceString nss, const CollectionOptions& options) { + invariant(opCtx->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); + + auto ident = generateUniqueIdent(nss, "collection"); + + BSONObj obj; + { + BSONObjBuilder b; + b.append("ns", NamespaceStringUtil::serializeForCatalog(nss)); + b.append("ident", ident); + BSONCollectionCatalogEntry::MetaData md; + md.nss = nss; + md.options = options; + + if (options.timeseries) { + // All newly created catalog entries for time-series collections will have this flag set + // to false by default as mixed-schema data is only possible in versions 5.1 and + // earlier. + md.timeseriesBucketsMayHaveMixedSchemaData = false; + } + b.append("md", md.toBSON()); + obj = b.obj(); + } + StatusWith res = _rs->insertRecord(opCtx, obj.objdata(), obj.objsize(), Timestamp()); + if (!res.isOK()) + return res.getStatus(); + + stdx::lock_guard lk(_catalogIdToEntryMapLock); + invariant(_catalogIdToEntryMap.find(res.getValue()) == _catalogIdToEntryMap.end()); + _catalogIdToEntryMap[res.getValue()] = {res.getValue(), ident, nss}; + opCtx->recoveryUnit()->registerChange(std::make_unique(this, res.getValue())); + + LOGV2_DEBUG(22207, + 1, + "stored meta data for {nss} @ {res_getValue}", + logAttrs(nss), + "res_getValue"_attr = res.getValue()); + return {{res.getValue(), ident, nss}}; +} + +StatusWith DurableCatalog::_importEntry(OperationContext* opCtx, + NamespaceString nss, + const BSONObj& metadata) { + invariant(opCtx->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); + + auto ident = metadata["ident"].String(); + StatusWith res = + _rs->insertRecord(opCtx, metadata.objdata(), metadata.objsize(), Timestamp()); + if (!res.isOK()) + return res.getStatus(); + + stdx::lock_guard lk(_catalogIdToEntryMapLock); + invariant(_catalogIdToEntryMap.find(res.getValue()) == _catalogIdToEntryMap.end()); + _catalogIdToEntryMap[res.getValue()] = {res.getValue(), ident, nss}; + opCtx->recoveryUnit()->registerChange(std::make_unique(this, res.getValue())); + + LOGV2_DEBUG(5095101, 1, "imported meta data", logAttrs(nss), "metadata"_attr = res.getValue()); + return {{res.getValue(), ident, nss}}; +} + +std::string DurableCatalog::getIndexIdent(OperationContext* opCtx, + const RecordId& catalogId, + StringData idxName) const { + BSONObj obj = _findEntry(opCtx, catalogId); + BSONObj idxIdent = obj["idxIdent"].Obj(); + return idxIdent[idxName].String(); +} + +std::vector DurableCatalog::getIndexIdents(OperationContext* opCtx, + const RecordId& catalogId) const { + std::vector idents; + + BSONObj obj = _findEntry(opCtx, catalogId); + if (obj["idxIdent"].eoo()) { + // No index entries for this catalog entry. + return idents; + } + + BSONObj idxIdent = obj["idxIdent"].Obj(); + + BSONObjIterator it(idxIdent); + while (it.more()) { + BSONElement elem = it.next(); + idents.push_back(elem.String()); + } + + return idents; +} + +BSONObj DurableCatalog::_findEntry(OperationContext* opCtx, const RecordId& catalogId) const { + LOGV2_DEBUG(22208, 3, "looking up metadata for: {catalogId}", "catalogId"_attr = catalogId); + RecordData data; + if (!_rs->findRecord(opCtx, catalogId, &data)) { + return BSONObj(); + } + + return data.releaseToBson().getOwned(); +} + +boost::optional DurableCatalog::getParsedCatalogEntry( + OperationContext* opCtx, const RecordId& catalogId) const { + BSONObj obj = _findEntry(opCtx, catalogId); + if (obj.isEmpty()) { + return boost::none; + } + + // For backwards compatibility where older version have a written feature document. This + // document cannot be parsed into a DurableCatalogEntry. See SERVER-57125. + if (isFeatureDocument(obj)) { + return boost::none; + } + + return _getDurableCatalogEntry(catalogId, obj); +} + +std::shared_ptr DurableCatalog::_parseMetaData( + const BSONElement& mdElement) const { + std::shared_ptr md; + if (mdElement.isABSONObj()) { + LOGV2_DEBUG(22210, 3, "returning metadata: {mdElement}", "mdElement"_attr = mdElement); + md = std::make_shared(); + md->parse(mdElement.Obj()); + } + return md; +} + +void DurableCatalog::putMetaData(OperationContext* opCtx, + const RecordId& catalogId, + BSONCollectionCatalogEntry::MetaData& md) { + NamespaceString nss(md.nss); + BSONObj obj = _findEntry(opCtx, catalogId); + + { + // rebuilt doc + BSONObjBuilder b; + b.append("md", md.toBSON()); + + BSONObjBuilder newIdentMap; + BSONObj oldIdentMap; + if (obj["idxIdent"].isABSONObj()) + oldIdentMap = obj["idxIdent"].Obj(); + + for (size_t i = 0; i < md.indexes.size(); i++) { + const auto& index = md.indexes[i]; + if (!index.isPresent()) { + continue; + } + + auto name = index.nameStringData(); + + // All indexes with buildUUIDs must be ready:false. + invariant(!(index.buildUUID && index.ready), str::stream() << md.toBSON(true)); + + // fix ident map + BSONElement e = oldIdentMap[name]; + if (e.type() == String) { + newIdentMap.append(e); + continue; + } + // missing, create new + newIdentMap.append(name, generateUniqueIdent(nss, "index")); + } + b.append("idxIdent", newIdentMap.obj()); + + // add whatever is left + b.appendElementsUnique(obj); + obj = b.obj(); + } + + LOGV2_DEBUG(22211, 3, "recording new metadata: {obj}", "obj"_attr = obj); + Status status = _rs->updateRecord(opCtx, catalogId, obj.objdata(), obj.objsize()); + fassert(28521, status); +} + +Status DurableCatalog::_removeEntry(OperationContext* opCtx, const RecordId& catalogId) { + stdx::lock_guard lk(_catalogIdToEntryMapLock); + const auto it = _catalogIdToEntryMap.find(catalogId); + if (it == _catalogIdToEntryMap.end()) { + return Status(ErrorCodes::NamespaceNotFound, "collection not found"); + } + + opCtx->recoveryUnit()->onRollback([this, catalogId, entry = it->second](OperationContext*) { + stdx::lock_guard lk(_catalogIdToEntryMapLock); + _catalogIdToEntryMap[catalogId] = entry; + }); + + LOGV2_DEBUG(22212, + 1, + "deleting metadata for {it_second_namespace} @ {catalogId}", + "it_second_namespace"_attr = it->second.nss, + "catalogId"_attr = catalogId); + _rs->deleteRecord(opCtx, catalogId); + _catalogIdToEntryMap.erase(it); + + return Status::OK(); +} + +std::vector DurableCatalog::getAllIdents(OperationContext* opCtx) const { + std::vector v; + + auto cursor = _rs->getCursor(opCtx); + while (auto record = cursor->next()) { + BSONObj obj = record->data.releaseToBson(); + if (isFeatureDocument(obj)) { + // Skip over the version document because it doesn't correspond to a namespace entry and + // therefore doesn't refer to any idents. + continue; + } + v.push_back(obj["ident"].String()); + + BSONElement e = obj["idxIdent"]; + if (!e.isABSONObj()) + continue; + BSONObj idxIdent = e.Obj(); + + BSONObjIterator sub(idxIdent); + while (sub.more()) { + BSONElement e = sub.next(); + v.push_back(e.String()); + } + } + + return v; +} + +StatusWith DurableCatalog::newOrphanedIdent(OperationContext* opCtx, + std::string ident, + const CollectionOptions& optionsWithUUID) { + // The collection will be named local.orphan.xxxxx. + std::string identNs = ident; + std::replace(identNs.begin(), identNs.end(), '-', '_'); + NamespaceString nss{DatabaseName::kLocal.db(), + NamespaceString::kOrphanCollectionPrefix + identNs}; + + BSONObj obj; + { + BSONObjBuilder b; + b.append("ns", NamespaceStringUtil::serializeForCatalog(nss)); + b.append("ident", ident); + BSONCollectionCatalogEntry::MetaData md; + md.nss = nss; + // Default options with newly generated UUID. + md.options = optionsWithUUID; + b.append("md", md.toBSON()); + obj = b.obj(); + } + StatusWith res = _rs->insertRecord(opCtx, obj.objdata(), obj.objsize(), Timestamp()); + if (!res.isOK()) + return res.getStatus(); + + stdx::lock_guard lk(_catalogIdToEntryMapLock); + invariant(_catalogIdToEntryMap.find(res.getValue()) == _catalogIdToEntryMap.end()); + _catalogIdToEntryMap[res.getValue()] = EntryIdentifier(res.getValue(), ident, nss); + opCtx->recoveryUnit()->registerChange(std::make_unique(this, res.getValue())); + + LOGV2_DEBUG(22213, + 1, + "stored meta data for orphaned collection {namespace} @ {res_getValue}", + logAttrs(nss), + "res_getValue"_attr = res.getValue()); + return {NamespaceStringUtil::serializeForCatalog(nss)}; +} + +StatusWith>> DurableCatalog::createCollection( + OperationContext* opCtx, + const NamespaceString& nss, + const CollectionOptions& options, + bool allocateDefaultSpace) { + invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX)); + invariant(nss.coll().size() > 0); + + StatusWith swEntry = _addEntry(opCtx, nss, options); + if (!swEntry.isOK()) + return swEntry.getStatus(); + EntryIdentifier& entry = swEntry.getValue(); + + const auto keyFormat = [&] { + // Clustered collections require KeyFormat::String, but the opposite is not necessarily + // true: a clustered record store that is not associated with a collection has + // KeyFormat::String and and no CollectionOptions. + if (options.clusteredIndex) { + return KeyFormat::String; + } + return KeyFormat::Long; + }(); + Status status = + _engine->getEngine()->createRecordStore(opCtx, nss, entry.ident, options, keyFormat); + if (!status.isOK()) + return status; + + auto ru = opCtx->recoveryUnit(); + opCtx->recoveryUnit()->onRollback([ru, catalog = this, ident = entry.ident](OperationContext*) { + // Intentionally ignoring failure + catalog->_engine->getEngine()->dropIdent(ru, ident).ignore(); + }); + + auto rs = _engine->getEngine()->getRecordStore(opCtx, nss, entry.ident, options); + invariant(rs); + + return std::pair>(entry.catalogId, std::move(rs)); +} + +Status DurableCatalog::createIndex(OperationContext* opCtx, + const RecordId& catalogId, + const NamespaceString& nss, + const CollectionOptions& collOptions, + const IndexDescriptor* spec) { + std::string ident = getIndexIdent(opCtx, catalogId, spec->indexName()); + + auto kvEngine = _engine->getEngine(); + Status status = spec->getIndexType() == INDEX_COLUMN + ? kvEngine->createColumnStore(opCtx, nss, collOptions, ident, spec) + : kvEngine->createSortedDataInterface(opCtx, nss, collOptions, ident, spec); + if (status.isOK()) { + opCtx->recoveryUnit()->onRollback( + [this, ident, recoveryUnit = opCtx->recoveryUnit()](OperationContext*) { + // Intentionally ignoring failure. + auto kvEngine = _engine->getEngine(); + kvEngine->dropIdent(recoveryUnit, ident).ignore(); + }); + } + return status; +} + +StatusWith DurableCatalog::importCollection( + OperationContext* opCtx, + const NamespaceString& nss, + const BSONObj& metadata, + const BSONObj& storageMetadata, + const ImportOptions& importOptions) { + invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X)); + invariant(nss.coll().size() > 0); + + BSONCollectionCatalogEntry::MetaData md; + const BSONElement mdElement = metadata["md"]; + uassert(ErrorCodes::BadValue, "Malformed catalog metadata", mdElement.isABSONObj()); + md.parse(mdElement.Obj()); + + uassert(ErrorCodes::BadValue, + "Attempted to import catalog entry without an ident", + metadata.hasField("ident")); + + const auto& catalogEntry = [&] { + if (importOptions.importCollectionUUIDOption == + ImportOptions::ImportCollectionUUIDOption::kGenerateNew) { + // Generate a new UUID for the collection. + md.options.uuid = UUID::gen(); + BSONObjBuilder catalogEntryBuilder; + // Generate a new "md" field after setting the new UUID. + catalogEntryBuilder.append("md", md.toBSON()); + // Append the rest of the metadata. + catalogEntryBuilder.appendElementsUnique(metadata); + return catalogEntryBuilder.obj(); + } + return metadata; + }(); + + // Before importing the idents belonging to the collection and indexes, change '_rand' if there + // will be a conflict. + std::set indexIdents; + { + const std::string collectionIdent = catalogEntry["ident"].String(); + + if (!catalogEntry["idxIdent"].eoo()) { + for (const auto& indexIdent : catalogEntry["idxIdent"].Obj()) { + indexIdents.insert(indexIdent.String()); + } + } + + auto identsToImportConflict = [&](WithLock) -> bool { + if (StringData(collectionIdent).endsWith(_rand)) { + return true; + } + + for (const std::string& ident : indexIdents) { + if (StringData(ident).endsWith(_rand)) { + return true; + } + } + return false; + }; + + stdx::lock_guard lk(_randLock); + while (!importOptions.skipIdentCollisionCheck && + (_hasEntryCollidingWithRand(lk) || identsToImportConflict(lk))) { + _rand = _newRand(); + } + } + + StatusWith swEntry = _importEntry(opCtx, nss, catalogEntry); + if (!swEntry.isOK()) + return swEntry.getStatus(); + EntryIdentifier& entry = swEntry.getValue(); + + opCtx->recoveryUnit()->onRollback( + [catalog = this, ident = entry.ident, indexIdents = indexIdents](OperationContext* opCtx) { + catalog->_engine->getEngine()->dropIdentForImport(opCtx, ident); + for (const auto& indexIdent : indexIdents) { + catalog->_engine->getEngine()->dropIdentForImport(opCtx, indexIdent); + } + }); + + auto kvEngine = _engine->getEngine(); + Status status = kvEngine->importRecordStore(opCtx, entry.ident, storageMetadata, importOptions); + if (!status.isOK()) + return status; + + for (const std::string& indexIdent : indexIdents) { + status = + kvEngine->importSortedDataInterface(opCtx, indexIdent, storageMetadata, importOptions); + if (!status.isOK()) { + return status; + } + } + + auto rs = _engine->getEngine()->getRecordStore(opCtx, nss, entry.ident, md.options); + invariant(rs); + + return DurableCatalog::ImportResult(entry.catalogId, std::move(rs), md.options.uuid.value()); +} + +Status DurableCatalog::renameCollection(OperationContext* opCtx, + const RecordId& catalogId, + const NamespaceString& toNss, + BSONCollectionCatalogEntry::MetaData& md) { + BSONObj old = _findEntry(opCtx, catalogId).getOwned(); + { + BSONObjBuilder b; + + b.append("ns", NamespaceStringUtil::serializeForCatalog(toNss)); + b.append("md", md.toBSON()); + + b.appendElementsUnique(old); + + BSONObj obj = b.obj(); + Status status = _rs->updateRecord(opCtx, catalogId, obj.objdata(), obj.objsize()); + fassert(28522, status); + } + + stdx::lock_guard lk(_catalogIdToEntryMapLock); + const auto it = _catalogIdToEntryMap.find(catalogId); + invariant(it != _catalogIdToEntryMap.end()); + + NamespaceString fromName = it->second.nss; + it->second.nss = toNss; + opCtx->recoveryUnit()->onRollback([this, catalogId, fromName](OperationContext*) { + stdx::lock_guard lk(_catalogIdToEntryMapLock); + const auto it = _catalogIdToEntryMap.find(catalogId); + invariant(it != _catalogIdToEntryMap.end()); + it->second.nss = fromName; + }); + + return Status::OK(); +} + +Status DurableCatalog::dropCollection(OperationContext* opCtx, const RecordId& catalogId) { + EntryIdentifier entry; + { + stdx::lock_guard lk(_catalogIdToEntryMapLock); + entry = _catalogIdToEntryMap[catalogId]; + } + + invariant(opCtx->lockState()->isCollectionLockedForMode(entry.nss, MODE_X)); + invariant(getParsedCatalogEntry(opCtx, catalogId)->metadata->getTotalIndexCount() == 0); + + // Remove metadata from mdb_catalog + Status status = _removeEntry(opCtx, catalogId); + if (!status.isOK()) { + return status; + } + + return Status::OK(); +} + +Status DurableCatalog::dropAndRecreateIndexIdentForResume(OperationContext* opCtx, + const NamespaceString& nss, + const CollectionOptions& collOptions, + const IndexDescriptor* spec, + StringData ident) { + auto status = _engine->getEngine()->dropSortedDataInterface(opCtx, ident); + if (!status.isOK()) + return status; + + status = _engine->getEngine()->createSortedDataInterface(opCtx, nss, collOptions, ident, spec); + + return status; +} + +bool DurableCatalog::isIndexMultikey(OperationContext* opCtx, + const RecordId& catalogId, + StringData indexName, + MultikeyPaths* multikeyPaths) const { + auto catalogEntry = getParsedCatalogEntry(opCtx, catalogId); + auto md = catalogEntry->metadata; + + int offset = md->findIndexOffset(indexName); + invariant(offset >= 0, + str::stream() << "cannot get multikey for index " << indexName << " @ " << catalogId + << " : " << md->toBSON()); + + if (multikeyPaths && !md->indexes[offset].multikeyPaths.empty()) { + *multikeyPaths = md->indexes[offset].multikeyPaths; + } + + return md->indexes[offset].multikey; +} + +void DurableCatalog::getReadyIndexes(OperationContext* opCtx, + RecordId catalogId, + StringSet* names) const { + auto catalogEntry = getParsedCatalogEntry(opCtx, catalogId); + if (!catalogEntry) + return; + + auto md = catalogEntry->metadata; + for (const auto& index : md->indexes) { + if (index.ready) + names->insert(index.spec["name"].String()); + } +} + +bool DurableCatalog::isIndexPresent(OperationContext* opCtx, + const RecordId& catalogId, + StringData indexName) const { + auto catalogEntry = getParsedCatalogEntry(opCtx, catalogId); + if (!catalogEntry) + return false; + + int offset = catalogEntry->metadata->findIndexOffset(indexName); + return offset >= 0; +} + +DurableCatalogEntry DurableCatalog::_getDurableCatalogEntry(const RecordId& catalogId, + const BSONObj& obj) const { + BSONElement idxIdent = obj["idxIdent"]; + return DurableCatalogEntry{catalogId, + obj["ident"].String(), + idxIdent.eoo() ? BSONObj() : idxIdent.Obj().getOwned(), + _parseMetaData(obj["md"])}; +} + +} // namespace mongo diff --git a/src/mongo/db/storage/durable_catalog.h b/src/mongo/db/storage/durable_catalog.h index d44d8f3a645e4..95b6d8071c666 100644 --- a/src/mongo/db/storage/durable_catalog.h +++ b/src/mongo/db/storage/durable_catalog.h @@ -29,29 +29,54 @@ #pragma once +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/import_options.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/bson_collection_catalog_entry.h" #include "mongo/db/storage/durable_catalog_entry.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/storage_engine.h" +#include "mongo/platform/mutex.h" +#include "mongo/platform/random.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" namespace mongo { + +class StorageEngineInterface; + /** * An interface to modify the on-disk catalog metadata. */ -class DurableCatalog { +class DurableCatalog final { DurableCatalog(const DurableCatalog&) = delete; DurableCatalog& operator=(const DurableCatalog&) = delete; DurableCatalog(DurableCatalog&&) = delete; DurableCatalog& operator=(DurableCatalog&&) = delete; -protected: - DurableCatalog() = default; - public: static constexpr auto kIsFeatureDocumentFieldName = "isFeatureDoc"_sd; @@ -67,7 +92,12 @@ class DurableCatalog { NamespaceString nss; }; - virtual ~DurableCatalog() {} + DurableCatalog(RecordStore* rs, + bool directoryPerDb, + bool directoryForIndexes, + StorageEngineInterface* engine); + DurableCatalog() = delete; + static DurableCatalog* get(OperationContext* opCtx) { return opCtx->getServiceContext()->getStorageEngine()->getCatalog(); @@ -84,82 +114,81 @@ class DurableCatalog { return false; } - /** - * Gets the parsed namespace from a raw BSON catalog entry. - */ - static NamespaceString getNamespaceFromCatalogEntry(const BSONObj& catalogEntry) { - return NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode( - catalogEntry["ns"].checkAndGetStringData()); + static bool isUserDataIdent(StringData ident) { + // Indexes and collections are candidates for dropping when the storage engine's metadata + // does not align with the catalog metadata. + return ident.find("index-") != std::string::npos || + ident.find("index/") != std::string::npos || isCollectionIdent(ident); } - /** - * Gets the metadata as BSON from a raw BSON catalog entry. - */ - static BSONObj getMetadataFromCatalogEntry(const BSONObj& catalogEntry) { - return catalogEntry["md"].Obj(); + static bool isInternalIdent(StringData ident) { + return ident.find(_kInternalIdentPrefix) != std::string::npos; + } + + static bool isResumableIndexBuildIdent(StringData ident) { + invariant(isInternalIdent(ident), ident.toString()); + return ident.find(_kResumableIndexBuildIdentStem) != std::string::npos; + } + + static bool isCollectionIdent(StringData ident) { + // Internal idents prefixed "internal-" should not be considered collections, because + // they are not eligible for orphan recovery through repair. + return ident.find("collection-") != std::string::npos || + ident.find("collection/") != std::string::npos; } - virtual void init(OperationContext* opCtx) = 0; + void init(OperationContext* opCtx); - virtual std::vector getAllCatalogEntries(OperationContext* opCtx) const = 0; + std::vector getAllCatalogEntries(OperationContext* opCtx) const; /** * Scans the persisted catalog until an entry is found matching 'nss'. */ - virtual boost::optional scanForCatalogEntryByNss( - OperationContext* opCtx, const NamespaceString& nss) const = 0; + boost::optional scanForCatalogEntryByNss(OperationContext* opCtx, + const NamespaceString& nss) const; /** * Scans the persisted catalog until an entry is found matching 'uuid'. */ - virtual boost::optional scanForCatalogEntryByUUID( - OperationContext* opCtx, const UUID& uuid) const = 0; + boost::optional scanForCatalogEntryByUUID(OperationContext* opCtx, + const UUID& uuid) const; - virtual EntryIdentifier getEntry(const RecordId& catalogId) const = 0; + EntryIdentifier getEntry(const RecordId& catalogId) const; - virtual std::string getIndexIdent(OperationContext* opCtx, - const RecordId& id, - StringData idxName) const = 0; + std::string getIndexIdent(OperationContext* opCtx, + const RecordId& id, + StringData idxName) const; - virtual std::vector getIndexIdents(OperationContext* opCtx, - const RecordId& id) const = 0; + std::vector getIndexIdents(OperationContext* opCtx, const RecordId& id) const; /** * Get a raw catalog entry for catalogId as BSON. */ - virtual BSONObj getCatalogEntry(OperationContext* opCtx, const RecordId& catalogId) const = 0; - - /** - * Like 'getCatalogEntry' above but parses the catalog entry to common types. - */ - virtual boost::optional getParsedCatalogEntry( - OperationContext* opCtx, const RecordId& catalogId) const = 0; + BSONObj getCatalogEntry(OperationContext* opCtx, const RecordId& catalogId) const { + return _findEntry(opCtx, catalogId); + } /** - * Like 'getParsedCatalogEntry' above but only extracts the metadata component. + * Parses the catalog entry object at `catalogId` to common types. Returns boost::none if it + * doesn't exist or if the entry is the feature document. */ - virtual std::shared_ptr getMetaData( - OperationContext* opCtx, const RecordId& id) const = 0; + boost::optional getParsedCatalogEntry(OperationContext* opCtx, + const RecordId& catalogId) const; /** * Updates the catalog entry for the collection 'nss' with the fields specified in 'md'. If * 'md.indexes' contains a new index entry, then this method generates a new index ident and * adds it to the catalog entry. */ - virtual void putMetaData(OperationContext* opCtx, - const RecordId& id, - BSONCollectionCatalogEntry::MetaData& md) = 0; - - virtual std::vector getAllIdents(OperationContext* opCtx) const = 0; - - virtual bool isUserDataIdent(StringData ident) const = 0; + void putMetaData(OperationContext* opCtx, + const RecordId& id, + BSONCollectionCatalogEntry::MetaData& md); - virtual bool isInternalIdent(StringData ident) const = 0; + std::vector getAllIdents(OperationContext* opCtx) const; - virtual bool isCollectionIdent(StringData ident) const = 0; - - - virtual RecordStore* getRecordStore() = 0; + RecordStore* getRecordStore() { + return _rs; + } /** * Create an entry in the catalog for an orphaned collection found in the @@ -167,16 +196,18 @@ class DurableCatalog { * Note that this function does not recreate the _id index on the for non-clustered collections * because it does not have access to index catalog. */ - virtual StatusWith newOrphanedIdent(OperationContext* opCtx, - std::string ident, - const CollectionOptions& optionsWithUUID) = 0; + StatusWith newOrphanedIdent(OperationContext* opCtx, + std::string ident, + const CollectionOptions& optionsWithUUID); - virtual std::string getFilesystemPathForDb(const std::string& dbName) const = 0; + std::string getFilesystemPathForDb(const std::string& dbName) const; /** * Generate an internal ident name. */ - virtual std::string newInternalIdent() = 0; + std::string newInternalIdent() { + return _newInternalIdent(""); + } /** * Generates a new unique identifier for a new "thing". @@ -186,28 +217,30 @@ class DurableCatalog { * Warning: It's only unique as far as we know without checking every file on disk, but it is * possible that this ident collides with an existing one. */ - virtual std::string generateUniqueIdent(NamespaceString nss, const char* kind) = 0; + std::string generateUniqueIdent(NamespaceString nss, const char* kind); /** * Generate an internal resumable index build ident name. */ - virtual std::string newInternalResumableIndexBuildIdent() = 0; + std::string newInternalResumableIndexBuildIdent() { + return _newInternalIdent(_kResumableIndexBuildIdentStem); + } /** * On success, returns the RecordId which identifies the new record store in the durable catalog * in addition to ownership of the new RecordStore. */ - virtual StatusWith>> createCollection( + StatusWith>> createCollection( OperationContext* opCtx, const NamespaceString& nss, const CollectionOptions& options, - bool allocateDefaultSpace) = 0; + bool allocateDefaultSpace); - virtual Status createIndex(OperationContext* opCtx, - const RecordId& catalogId, - const NamespaceString& nss, - const CollectionOptions& collOptions, - const IndexDescriptor* spec) = 0; + Status createIndex(OperationContext* opCtx, + const RecordId& catalogId, + const NamespaceString& nss, + const CollectionOptions& collOptions, + const IndexDescriptor* spec); /** * Import a collection by inserting the given metadata into the durable catalog and instructing @@ -232,16 +265,16 @@ class DurableCatalog { UUID uuid; }; - virtual StatusWith importCollection(OperationContext* opCtx, - const NamespaceString& nss, - const BSONObj& metadata, - const BSONObj& storageMetadata, - const ImportOptions& importOptions) = 0; + StatusWith importCollection(OperationContext* opCtx, + const NamespaceString& nss, + const BSONObj& metadata, + const BSONObj& storageMetadata, + const ImportOptions& importOptions); - virtual Status renameCollection(OperationContext* opCtx, - const RecordId& catalogId, - const NamespaceString& toNss, - BSONCollectionCatalogEntry::MetaData& md) = 0; + Status renameCollection(OperationContext* opCtx, + const RecordId& catalogId, + const NamespaceString& toNss, + BSONCollectionCatalogEntry::MetaData& md); /** * Deletes the persisted collection catalog entry identified by 'catalogId'. @@ -249,30 +282,22 @@ class DurableCatalog { * Expects (invariants) that all of the index catalog entries have been removed already via * removeIndex. */ - virtual Status dropCollection(OperationContext* opCtx, const RecordId& catalogId) = 0; + Status dropCollection(OperationContext* opCtx, const RecordId& catalogId); /** * Drops the provided ident and recreates it as empty for use in resuming an index build. */ - virtual Status dropAndRecreateIndexIdentForResume(OperationContext* opCtx, - const NamespaceString& nss, - const CollectionOptions& collOptions, - const IndexDescriptor* spec, - StringData ident) = 0; + Status dropAndRecreateIndexIdentForResume(OperationContext* opCtx, + const NamespaceString& nss, + const CollectionOptions& collOptions, + const IndexDescriptor* spec, + StringData ident); - virtual int getTotalIndexCount(OperationContext* opCtx, const RecordId& catalogId) const = 0; + void getReadyIndexes(OperationContext* opCtx, RecordId catalogId, StringSet* names) const; - virtual void getReadyIndexes(OperationContext* opCtx, - RecordId catalogId, - StringSet* names) const = 0; - - virtual bool isIndexPresent(OperationContext* opCtx, - const RecordId& catalogId, - StringData indexName) const = 0; - - virtual bool isIndexReady(OperationContext* opCtx, - const RecordId& catalogId, - StringData indexName) const = 0; + bool isIndexPresent(OperationContext* opCtx, + const RecordId& catalogId, + StringData indexName) const; /** * Returns true if the index identified by 'indexName' is multikey, and returns false otherwise. @@ -285,13 +310,74 @@ class DurableCatalog { * multikey information, then 'multikeyPaths' is initialized as a vector with size equal to the * number of elements in the index key pattern of empty sets. */ - virtual bool isIndexMultikey(OperationContext* opCtx, - const RecordId& catalogId, - StringData indexName, - MultikeyPaths* multikeyPaths) const = 0; + bool isIndexMultikey(OperationContext* opCtx, + const RecordId& catalogId, + StringData indexName, + MultikeyPaths* multikeyPaths) const; + + void setRand_forTest(const std::string& rand) { + stdx::lock_guard lk(_randLock); + _rand = rand; + } + + std::string getRand_forTest() const { + stdx::lock_guard lk(_randLock); + return _rand; + } + +private: + static constexpr auto _kInternalIdentPrefix = "internal-"_sd; + static constexpr auto _kResumableIndexBuildIdentStem = "resumable-index-build-"_sd; + + class AddIdentChange; + + friend class StorageEngineImpl; + friend class DurableCatalogTest; + friend class StorageEngineTest; + + BSONObj _findEntry(OperationContext* opCtx, const RecordId& catalogId) const; + StatusWith _addEntry(OperationContext* opCtx, + NamespaceString nss, + const CollectionOptions& options); + StatusWith _importEntry(OperationContext* opCtx, + NamespaceString nss, + const BSONObj& metadata); + Status _removeEntry(OperationContext* opCtx, const RecordId& catalogId); + + std::shared_ptr _parseMetaData( + const BSONElement& mdElement) const; + + + std::string _newInternalIdent(StringData identStem); + + std::string _newRand() { + return str::stream() << SecureRandom().nextInt64(); + } + + /** + * Helper which constructs a DurableCatalogEntry given 'catalogId' and 'obj'. + */ + DurableCatalogEntry _getDurableCatalogEntry(const RecordId& catalogId, + const BSONObj& obj) const; + + /** + * The '_randLock' must be passed in. + */ + bool _hasEntryCollidingWithRand(WithLock) const; + + RecordStore* _rs; // not owned + const bool _directoryPerDb; + const bool _directoryForIndexes; + + // Protects '_rand' and '_next'. + mutable Mutex _randLock = MONGO_MAKE_LATCH("DurableCatalogImpl::_rand"); + std::string _rand; + unsigned long long _next; - virtual void setRand_forTest(const std::string& rand) = 0; + absl::flat_hash_map _catalogIdToEntryMap; + mutable Mutex _catalogIdToEntryMapLock = + MONGO_MAKE_LATCH("DurableCatalogImpl::_catalogIdToEntryMap"); - virtual std::string getRand_forTest() const = 0; + StorageEngineInterface* const _engine; }; } // namespace mongo diff --git a/src/mongo/db/storage/durable_catalog_impl.cpp b/src/mongo/db/storage/durable_catalog_impl.cpp deleted file mode 100644 index bea7ed3100c96..0000000000000 --- a/src/mongo/db/storage/durable_catalog_impl.cpp +++ /dev/null @@ -1,940 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - - -#include "mongo/db/storage/durable_catalog_impl.h" - -#include -#include -#include - -#include "mongo/bson/util/bson_extract.h" -#include "mongo/bson/util/builder.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/operation_context.h" -#include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/server_options.h" -#include "mongo/db/storage/kv/kv_engine.h" -#include "mongo/db/storage/record_store.h" -#include "mongo/db/storage/recovery_unit.h" -#include "mongo/db/storage/storage_engine_interface.h" -#include "mongo/logv2/log.h" -#include "mongo/platform/bits.h" -#include "mongo/platform/random.h" -#include "mongo/util/str.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage - - -namespace mongo { -namespace { -// This is a global resource, which protects accesses to the catalog metadata (instance-wide). -// It is never used with KVEngines that support doc-level locking so this should never conflict -// with anything else. - -const char kNamespaceFieldName[] = "ns"; -const char kNonRepairableFeaturesFieldName[] = "nonRepairable"; -const char kRepairableFeaturesFieldName[] = "repairable"; -const char kInternalIdentPrefix[] = "internal-"; -const char kResumableIndexBuildIdentStem[] = "resumable-index-build-"; - -void appendPositionsOfBitsSet(uint64_t value, StringBuilder* sb) { - invariant(sb); - - *sb << "[ "; - bool firstIteration = true; - while (value) { - const int lowestSetBitPosition = countTrailingZeros64(value); - if (!firstIteration) { - *sb << ", "; - } - *sb << lowestSetBitPosition; - value ^= (1ULL << lowestSetBitPosition); - firstIteration = false; - } - *sb << " ]"; -} - -// Does not escape letters, digits, '.', or '_'. -// Otherwise escapes to a '.' followed by a zero-filled 2- or 3-digit decimal number. -// Note that this escape table does not produce a 1:1 mapping to and from dbname, and -// collisions are possible. -// For example: -// "db.123", "db\0143", and "db\073" all escape to "db.123". -// {'d','b','1','2','3'} => "d" + "b" + "." + "1" + "2" + "3" => "db.123" -// {'d','b','\x0c','3'} => "d" + "b" + ".12" + "3" => "db.123" -// {'d','b','\x3b'} => "d" + "b" + ".123" => "db.123" -constexpr std::array escapeTable = { - ".00"_sd, ".01"_sd, ".02"_sd, ".03"_sd, ".04"_sd, ".05"_sd, ".06"_sd, ".07"_sd, - ".08"_sd, ".09"_sd, ".10"_sd, ".11"_sd, ".12"_sd, ".13"_sd, ".14"_sd, ".15"_sd, - ".16"_sd, ".17"_sd, ".18"_sd, ".19"_sd, ".20"_sd, ".21"_sd, ".22"_sd, ".23"_sd, - ".24"_sd, ".25"_sd, ".26"_sd, ".27"_sd, ".28"_sd, ".29"_sd, ".30"_sd, ".31"_sd, - ".32"_sd, ".33"_sd, ".34"_sd, ".35"_sd, ".36"_sd, ".37"_sd, ".38"_sd, ".39"_sd, - ".40"_sd, ".41"_sd, ".42"_sd, ".43"_sd, ".44"_sd, ".45"_sd, "."_sd, ".47"_sd, - "0"_sd, "1"_sd, "2"_sd, "3"_sd, "4"_sd, "5"_sd, "6"_sd, "7"_sd, - "8"_sd, "9"_sd, ".58"_sd, ".59"_sd, ".60"_sd, ".61"_sd, ".62"_sd, ".63"_sd, - ".64"_sd, "A"_sd, "B"_sd, "C"_sd, "D"_sd, "E"_sd, "F"_sd, "G"_sd, - "H"_sd, "I"_sd, "J"_sd, "K"_sd, "L"_sd, "M"_sd, "N"_sd, "O"_sd, - "P"_sd, "Q"_sd, "R"_sd, "S"_sd, "T"_sd, "U"_sd, "V"_sd, "W"_sd, - "X"_sd, "Y"_sd, "Z"_sd, ".91"_sd, ".92"_sd, ".93"_sd, ".94"_sd, "_"_sd, - ".96"_sd, "a"_sd, "b"_sd, "c"_sd, "d"_sd, "e"_sd, "f"_sd, "g"_sd, - "h"_sd, "i"_sd, "j"_sd, "k"_sd, "l"_sd, "m"_sd, "n"_sd, "o"_sd, - "p"_sd, "q"_sd, "r"_sd, "s"_sd, "t"_sd, "u"_sd, "v"_sd, "w"_sd, - "x"_sd, "y"_sd, "z"_sd, ".123"_sd, ".124"_sd, ".125"_sd, ".126"_sd, ".127"_sd, - ".128"_sd, ".129"_sd, ".130"_sd, ".131"_sd, ".132"_sd, ".133"_sd, ".134"_sd, ".135"_sd, - ".136"_sd, ".137"_sd, ".138"_sd, ".139"_sd, ".140"_sd, ".141"_sd, ".142"_sd, ".143"_sd, - ".144"_sd, ".145"_sd, ".146"_sd, ".147"_sd, ".148"_sd, ".149"_sd, ".150"_sd, ".151"_sd, - ".152"_sd, ".153"_sd, ".154"_sd, ".155"_sd, ".156"_sd, ".157"_sd, ".158"_sd, ".159"_sd, - ".160"_sd, ".161"_sd, ".162"_sd, ".163"_sd, ".164"_sd, ".165"_sd, ".166"_sd, ".167"_sd, - ".168"_sd, ".169"_sd, ".170"_sd, ".171"_sd, ".172"_sd, ".173"_sd, ".174"_sd, ".175"_sd, - ".176"_sd, ".177"_sd, ".178"_sd, ".179"_sd, ".180"_sd, ".181"_sd, ".182"_sd, ".183"_sd, - ".184"_sd, ".185"_sd, ".186"_sd, ".187"_sd, ".188"_sd, ".189"_sd, ".190"_sd, ".191"_sd, - ".192"_sd, ".193"_sd, ".194"_sd, ".195"_sd, ".196"_sd, ".197"_sd, ".198"_sd, ".199"_sd, - ".200"_sd, ".201"_sd, ".202"_sd, ".203"_sd, ".204"_sd, ".205"_sd, ".206"_sd, ".207"_sd, - ".208"_sd, ".209"_sd, ".210"_sd, ".211"_sd, ".212"_sd, ".213"_sd, ".214"_sd, ".215"_sd, - ".216"_sd, ".217"_sd, ".218"_sd, ".219"_sd, ".220"_sd, ".221"_sd, ".222"_sd, ".223"_sd, - ".224"_sd, ".225"_sd, ".226"_sd, ".227"_sd, ".228"_sd, ".229"_sd, ".230"_sd, ".231"_sd, - ".232"_sd, ".233"_sd, ".234"_sd, ".235"_sd, ".236"_sd, ".237"_sd, ".238"_sd, ".239"_sd, - ".240"_sd, ".241"_sd, ".242"_sd, ".243"_sd, ".244"_sd, ".245"_sd, ".246"_sd, ".247"_sd, - ".248"_sd, ".249"_sd, ".250"_sd, ".251"_sd, ".252"_sd, ".253"_sd, ".254"_sd, ".255"_sd}; - -std::string escapeDbName(StringData dbname) { - std::string escaped; - escaped.reserve(dbname.size()); - for (unsigned char c : dbname) { - StringData ce = escapeTable[c]; - escaped.append(ce.begin(), ce.end()); - } - return escaped; -} - -bool indexTypeSupportsPathLevelMultikeyTracking(StringData accessMethod) { - return accessMethod == IndexNames::BTREE || accessMethod == IndexNames::GEO_2DSPHERE; -} -} // namespace - -class DurableCatalogImpl::AddIdentChange : public RecoveryUnit::Change { -public: - AddIdentChange(DurableCatalogImpl* catalog, RecordId catalogId) - : _catalog(catalog), _catalogId(std::move(catalogId)) {} - - virtual void commit(OperationContext* opCtx, boost::optional) {} - virtual void rollback(OperationContext* opCtx) { - stdx::lock_guard lk(_catalog->_catalogIdToEntryMapLock); - _catalog->_catalogIdToEntryMap.erase(_catalogId); - } - - DurableCatalogImpl* const _catalog; - const RecordId _catalogId; -}; - -DurableCatalogImpl::DurableCatalogImpl(RecordStore* rs, - bool directoryPerDb, - bool directoryForIndexes, - StorageEngineInterface* engine) - : _rs(rs), - _directoryPerDb(directoryPerDb), - _directoryForIndexes(directoryForIndexes), - _rand(_newRand()), - _next(0), - _engine(engine) {} - -DurableCatalogImpl::~DurableCatalogImpl() { - _rs = nullptr; -} - -std::string DurableCatalogImpl::_newRand() { - return str::stream() << SecureRandom().nextInt64(); -} - -bool DurableCatalogImpl::_hasEntryCollidingWithRand(WithLock) const { - stdx::lock_guard lk(_catalogIdToEntryMapLock); - for (auto it = _catalogIdToEntryMap.begin(); it != _catalogIdToEntryMap.end(); ++it) { - if (StringData(it->second.ident).endsWith(_rand)) - return true; - } - return false; -} - -std::string DurableCatalogImpl::newInternalIdent() { - return _newInternalIdent(""); -} - -std::string DurableCatalogImpl::newInternalResumableIndexBuildIdent() { - return _newInternalIdent(kResumableIndexBuildIdentStem); -} - -std::string DurableCatalogImpl::_newInternalIdent(StringData identStem) { - stdx::lock_guard lk(_randLock); - StringBuilder buf; - buf << kInternalIdentPrefix; - buf << identStem; - buf << _next++ << '-' << _rand; - return buf.str(); -} - -std::string DurableCatalogImpl::getFilesystemPathForDb(const std::string& dbName) const { - if (_directoryPerDb) { - return storageGlobalParams.dbpath + '/' + escapeDbName(dbName); - } else { - return storageGlobalParams.dbpath; - } -} - -std::string DurableCatalogImpl::generateUniqueIdent(NamespaceString nss, const char* kind) { - // If this changes to not put _rand at the end, _hasEntryCollidingWithRand will need fixing. - stdx::lock_guard lk(_randLock); - StringBuilder buf; - if (_directoryPerDb) { - buf << escapeDbName(nss.db()) << '/'; - } - buf << kind; - buf << (_directoryForIndexes ? '/' : '-'); - buf << _next++ << '-' << _rand; - return buf.str(); -} - -void DurableCatalogImpl::init(OperationContext* opCtx) { - // No locking needed since called single threaded. - auto cursor = _rs->getCursor(opCtx); - while (auto record = cursor->next()) { - BSONObj obj = record->data.releaseToBson(); - - // For backwards compatibility where older version have a written feature document - if (isFeatureDocument(obj)) { - continue; - } - - // No rollback since this is just loading already committed data. - auto ident = obj["ident"].String(); - auto nss = - NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode(obj["ns"].String()); - _catalogIdToEntryMap[record->id] = EntryIdentifier(record->id, ident, nss); - } - - // In the unlikely event that we have used this _rand before generate a new one. - stdx::lock_guard lk(_randLock); - while (_hasEntryCollidingWithRand(lk)) { - _rand = _newRand(); - } -} - -std::vector DurableCatalogImpl::getAllCatalogEntries( - OperationContext* opCtx) const { - std::vector ret; - - auto cursor = _rs->getCursor(opCtx); - while (auto record = cursor->next()) { - BSONObj obj = record->data.releaseToBson(); - if (isFeatureDocument(obj)) { - // Skip over the version document because it doesn't correspond to a collection. - continue; - } - auto ident = obj["ident"].String(); - auto nss = - NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode(obj["ns"].String()); - - ret.emplace_back(record->id, ident, nss); - } - - return ret; -} - -boost::optional DurableCatalogImpl::scanForCatalogEntryByNss( - OperationContext* opCtx, const NamespaceString& nss) const { - auto cursor = _rs->getCursor(opCtx); - while (auto record = cursor->next()) { - BSONObj obj = record->data.releaseToBson(); - - if (isFeatureDocument(obj)) { - // Skip over the version document because it doesn't correspond to a collection. - continue; - } - - auto entryNss = - NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode(obj["ns"].String()); - if (entryNss == nss) { - BSONElement idxIdent = obj["idxIdent"]; - return DurableCatalogEntry{record->id, - obj["ident"].String(), - idxIdent.eoo() ? BSONObj() : idxIdent.Obj().getOwned(), - _parseMetaData(obj["md"])}; - } - } - - return boost::none; -} - -boost::optional DurableCatalogImpl::scanForCatalogEntryByUUID( - OperationContext* opCtx, const UUID& uuid) const { - auto cursor = _rs->getCursor(opCtx); - while (auto record = cursor->next()) { - BSONObj obj = record->data.releaseToBson(); - - if (isFeatureDocument(obj)) { - // Skip over the version document because it doesn't correspond to a collection. - continue; - } - - std::shared_ptr md = _parseMetaData(obj["md"]); - if (md->options.uuid == uuid) { - BSONElement idxIdent = obj["idxIdent"]; - return DurableCatalogEntry{record->id, - obj["ident"].String(), - idxIdent.eoo() ? BSONObj() : idxIdent.Obj().getOwned(), - md}; - } - } - - return boost::none; -} - -DurableCatalog::EntryIdentifier DurableCatalogImpl::getEntry(const RecordId& catalogId) const { - stdx::lock_guard lk(_catalogIdToEntryMapLock); - auto it = _catalogIdToEntryMap.find(catalogId); - invariant(it != _catalogIdToEntryMap.end()); - return it->second; -} - -StatusWith DurableCatalogImpl::_addEntry( - OperationContext* opCtx, NamespaceString nss, const CollectionOptions& options) { - invariant(opCtx->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); - - auto ident = generateUniqueIdent(nss, "collection"); - - BSONObj obj; - { - BSONObjBuilder b; - b.append("ns", nss.toStringWithTenantId()); - b.append("ident", ident); - BSONCollectionCatalogEntry::MetaData md; - md.nss = nss; - md.options = options; - - if (options.timeseries) { - // All newly created catalog entries for time-series collections will have this flag set - // to false by default as mixed-schema data is only possible in versions 5.1 and - // earlier. - md.timeseriesBucketsMayHaveMixedSchemaData = false; - } - b.append("md", md.toBSON()); - obj = b.obj(); - } - StatusWith res = _rs->insertRecord(opCtx, obj.objdata(), obj.objsize(), Timestamp()); - if (!res.isOK()) - return res.getStatus(); - - stdx::lock_guard lk(_catalogIdToEntryMapLock); - invariant(_catalogIdToEntryMap.find(res.getValue()) == _catalogIdToEntryMap.end()); - _catalogIdToEntryMap[res.getValue()] = {res.getValue(), ident, nss}; - opCtx->recoveryUnit()->registerChange(std::make_unique(this, res.getValue())); - - LOGV2_DEBUG(22207, - 1, - "stored meta data for {nss} @ {res_getValue}", - logAttrs(nss), - "res_getValue"_attr = res.getValue()); - return {{res.getValue(), ident, nss}}; -} - -StatusWith DurableCatalogImpl::_importEntry( - OperationContext* opCtx, NamespaceString nss, const BSONObj& metadata) { - invariant(opCtx->lockState()->isDbLockedForMode(nss.dbName(), MODE_IX)); - - auto ident = metadata["ident"].String(); - StatusWith res = - _rs->insertRecord(opCtx, metadata.objdata(), metadata.objsize(), Timestamp()); - if (!res.isOK()) - return res.getStatus(); - - stdx::lock_guard lk(_catalogIdToEntryMapLock); - invariant(_catalogIdToEntryMap.find(res.getValue()) == _catalogIdToEntryMap.end()); - _catalogIdToEntryMap[res.getValue()] = {res.getValue(), ident, nss}; - opCtx->recoveryUnit()->registerChange(std::make_unique(this, res.getValue())); - - LOGV2_DEBUG(5095101, 1, "imported meta data", logAttrs(nss), "metadata"_attr = res.getValue()); - return {{res.getValue(), ident, nss}}; -} - -std::string DurableCatalogImpl::getIndexIdent(OperationContext* opCtx, - const RecordId& catalogId, - StringData idxName) const { - BSONObj obj = _findEntry(opCtx, catalogId); - BSONObj idxIdent = obj["idxIdent"].Obj(); - return idxIdent[idxName].String(); -} - -std::vector DurableCatalogImpl::getIndexIdents(OperationContext* opCtx, - const RecordId& catalogId) const { - std::vector idents; - - BSONObj obj = _findEntry(opCtx, catalogId); - if (obj["idxIdent"].eoo()) { - // No index entries for this catalog entry. - return idents; - } - - BSONObj idxIdent = obj["idxIdent"].Obj(); - - BSONObjIterator it(idxIdent); - while (it.more()) { - BSONElement elem = it.next(); - idents.push_back(elem.String()); - } - - return idents; -} - -BSONObj DurableCatalogImpl::_findEntry(OperationContext* opCtx, const RecordId& catalogId) const { - LOGV2_DEBUG(22208, 3, "looking up metadata for: {catalogId}", "catalogId"_attr = catalogId); - RecordData data; - if (!_rs->findRecord(opCtx, catalogId, &data)) { - // since the in memory meta data isn't managed with mvcc - // its possible for different transactions to see slightly - // different things, which is ok via the locking above. - return BSONObj(); - } - - return data.releaseToBson().getOwned(); -} - -boost::optional DurableCatalogImpl::getParsedCatalogEntry( - OperationContext* opCtx, const RecordId& catalogId) const { - BSONObj obj = _findEntry(opCtx, catalogId); - if (obj.isEmpty()) { - return boost::none; - } - - BSONElement idxIdent = obj["idxIdent"]; - return DurableCatalogEntry{catalogId, - obj["ident"].String(), - idxIdent.eoo() ? BSONObj() : idxIdent.Obj().getOwned(), - _parseMetaData(obj["md"])}; -} - -std::shared_ptr DurableCatalogImpl::getMetaData( - OperationContext* opCtx, const RecordId& catalogId) const { - BSONObj obj = _findEntry(opCtx, catalogId); - LOGV2_DEBUG(22209, 3, " fetched CCE metadata: {obj}", "obj"_attr = obj); - return _parseMetaData(obj["md"]); -} - -std::shared_ptr DurableCatalogImpl::_parseMetaData( - const BSONElement& mdElement) const { - std::shared_ptr md; - if (mdElement.isABSONObj()) { - LOGV2_DEBUG(22210, 3, "returning metadata: {mdElement}", "mdElement"_attr = mdElement); - md = std::make_shared(); - md->parse(mdElement.Obj()); - } - return md; -} - -void DurableCatalogImpl::putMetaData(OperationContext* opCtx, - const RecordId& catalogId, - BSONCollectionCatalogEntry::MetaData& md) { - NamespaceString nss(md.nss); - BSONObj obj = _findEntry(opCtx, catalogId); - - { - // rebuilt doc - BSONObjBuilder b; - b.append("md", md.toBSON()); - - BSONObjBuilder newIdentMap; - BSONObj oldIdentMap; - if (obj["idxIdent"].isABSONObj()) - oldIdentMap = obj["idxIdent"].Obj(); - - for (size_t i = 0; i < md.indexes.size(); i++) { - const auto& index = md.indexes[i]; - if (!index.isPresent()) { - continue; - } - - auto name = index.nameStringData(); - - // All indexes with buildUUIDs must be ready:false. - invariant(!(index.buildUUID && index.ready), str::stream() << md.toBSON(true)); - - // fix ident map - BSONElement e = oldIdentMap[name]; - if (e.type() == String) { - newIdentMap.append(e); - continue; - } - // missing, create new - newIdentMap.append(name, generateUniqueIdent(nss, "index")); - } - b.append("idxIdent", newIdentMap.obj()); - - // add whatever is left - b.appendElementsUnique(obj); - obj = b.obj(); - } - - LOGV2_DEBUG(22211, 3, "recording new metadata: {obj}", "obj"_attr = obj); - Status status = _rs->updateRecord(opCtx, catalogId, obj.objdata(), obj.objsize()); - fassert(28521, status); -} - -Status DurableCatalogImpl::_replaceEntry(OperationContext* opCtx, - const RecordId& catalogId, - const NamespaceString& toNss, - BSONCollectionCatalogEntry::MetaData& md) { - BSONObj old = _findEntry(opCtx, catalogId).getOwned(); - { - BSONObjBuilder b; - - b.append("ns", toNss.toStringWithTenantId()); - b.append("md", md.toBSON()); - - b.appendElementsUnique(old); - - BSONObj obj = b.obj(); - Status status = _rs->updateRecord(opCtx, catalogId, obj.objdata(), obj.objsize()); - fassert(28522, status); - } - - stdx::lock_guard lk(_catalogIdToEntryMapLock); - const auto it = _catalogIdToEntryMap.find(catalogId); - invariant(it != _catalogIdToEntryMap.end()); - - NamespaceString fromName = it->second.nss; - it->second.nss = toNss; - opCtx->recoveryUnit()->onRollback([this, catalogId, fromName](OperationContext*) { - stdx::lock_guard lk(_catalogIdToEntryMapLock); - const auto it = _catalogIdToEntryMap.find(catalogId); - invariant(it != _catalogIdToEntryMap.end()); - it->second.nss = fromName; - }); - - return Status::OK(); -} - -Status DurableCatalogImpl::_removeEntry(OperationContext* opCtx, const RecordId& catalogId) { - stdx::lock_guard lk(_catalogIdToEntryMapLock); - const auto it = _catalogIdToEntryMap.find(catalogId); - if (it == _catalogIdToEntryMap.end()) { - return Status(ErrorCodes::NamespaceNotFound, "collection not found"); - } - - opCtx->recoveryUnit()->onRollback([this, catalogId, entry = it->second](OperationContext*) { - stdx::lock_guard lk(_catalogIdToEntryMapLock); - _catalogIdToEntryMap[catalogId] = entry; - }); - - LOGV2_DEBUG(22212, - 1, - "deleting metadata for {it_second_namespace} @ {catalogId}", - "it_second_namespace"_attr = it->second.nss, - "catalogId"_attr = catalogId); - _rs->deleteRecord(opCtx, catalogId); - _catalogIdToEntryMap.erase(it); - - return Status::OK(); -} - -std::vector DurableCatalogImpl::getAllIdents(OperationContext* opCtx) const { - std::vector v; - - auto cursor = _rs->getCursor(opCtx); - while (auto record = cursor->next()) { - BSONObj obj = record->data.releaseToBson(); - if (isFeatureDocument(obj)) { - // Skip over the version document because it doesn't correspond to a namespace entry and - // therefore doesn't refer to any idents. - continue; - } - v.push_back(obj["ident"].String()); - - BSONElement e = obj["idxIdent"]; - if (!e.isABSONObj()) - continue; - BSONObj idxIdent = e.Obj(); - - BSONObjIterator sub(idxIdent); - while (sub.more()) { - BSONElement e = sub.next(); - v.push_back(e.String()); - } - } - - return v; -} - -bool DurableCatalogImpl::isUserDataIdent(StringData ident) const { - // Indexes and collections are candidates for dropping when the storage engine's metadata does - // not align with the catalog metadata. - return ident.find("index-") != std::string::npos || ident.find("index/") != std::string::npos || - ident.find("collection-") != std::string::npos || - ident.find("collection/") != std::string::npos; -} - -bool DurableCatalogImpl::isInternalIdent(StringData ident) const { - return ident.find(kInternalIdentPrefix) != std::string::npos; -} - -bool DurableCatalogImpl::isResumableIndexBuildIdent(StringData ident) const { - invariant(isInternalIdent(ident), ident.toString()); - return ident.find(kResumableIndexBuildIdentStem) != std::string::npos; -} - -bool DurableCatalogImpl::isCollectionIdent(StringData ident) const { - // Internal idents prefixed "internal-" should not be considered collections, because - // they are not eligible for orphan recovery through repair. - return ident.find("collection-") != std::string::npos || - ident.find("collection/") != std::string::npos; -} - -StatusWith DurableCatalogImpl::newOrphanedIdent( - OperationContext* opCtx, std::string ident, const CollectionOptions& optionsWithUUID) { - // The collection will be named local.orphan.xxxxx. - std::string identNs = ident; - std::replace(identNs.begin(), identNs.end(), '-', '_'); - NamespaceString nss(NamespaceString(NamespaceString::kOrphanCollectionDb, - NamespaceString::kOrphanCollectionPrefix + identNs)); - - BSONObj obj; - { - BSONObjBuilder b; - b.append("ns", nss.toStringWithTenantId()); - b.append("ident", ident); - BSONCollectionCatalogEntry::MetaData md; - md.nss = nss; - // Default options with newly generated UUID. - md.options = optionsWithUUID; - b.append("md", md.toBSON()); - obj = b.obj(); - } - StatusWith res = _rs->insertRecord(opCtx, obj.objdata(), obj.objsize(), Timestamp()); - if (!res.isOK()) - return res.getStatus(); - - stdx::lock_guard lk(_catalogIdToEntryMapLock); - invariant(_catalogIdToEntryMap.find(res.getValue()) == _catalogIdToEntryMap.end()); - _catalogIdToEntryMap[res.getValue()] = EntryIdentifier(res.getValue(), ident, nss); - opCtx->recoveryUnit()->registerChange(std::make_unique(this, res.getValue())); - - LOGV2_DEBUG(22213, - 1, - "stored meta data for orphaned collection {namespace} @ {res_getValue}", - logAttrs(nss), - "res_getValue"_attr = res.getValue()); - return {nss.toStringWithTenantId()}; -} - -StatusWith>> DurableCatalogImpl::createCollection( - OperationContext* opCtx, - const NamespaceString& nss, - const CollectionOptions& options, - bool allocateDefaultSpace) { - invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX)); - invariant(nss.coll().size() > 0); - - if (CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss)) { - throwWriteConflictException(str::stream() - << "Namespace '" << nss.ns() << "' is already in use."); - } - - StatusWith swEntry = _addEntry(opCtx, nss, options); - if (!swEntry.isOK()) - return swEntry.getStatus(); - EntryIdentifier& entry = swEntry.getValue(); - - const auto keyFormat = [&] { - // Clustered collections require KeyFormat::String, but the opposite is not necessarily - // true: a clustered record store that is not associated with a collection has - // KeyFormat::String and and no CollectionOptions. - if (options.clusteredIndex) { - return KeyFormat::String; - } - return KeyFormat::Long; - }(); - Status status = - _engine->getEngine()->createRecordStore(opCtx, nss, entry.ident, options, keyFormat); - if (!status.isOK()) - return status; - - auto ru = opCtx->recoveryUnit(); - opCtx->recoveryUnit()->onRollback([ru, catalog = this, ident = entry.ident](OperationContext*) { - // Intentionally ignoring failure - catalog->_engine->getEngine()->dropIdent(ru, ident).ignore(); - }); - - auto rs = _engine->getEngine()->getRecordStore(opCtx, nss, entry.ident, options); - invariant(rs); - - return std::pair>(entry.catalogId, std::move(rs)); -} - -Status DurableCatalogImpl::createIndex(OperationContext* opCtx, - const RecordId& catalogId, - const NamespaceString& nss, - const CollectionOptions& collOptions, - const IndexDescriptor* spec) { - std::string ident = getIndexIdent(opCtx, catalogId, spec->indexName()); - - auto kvEngine = _engine->getEngine(); - Status status = spec->getIndexType() == INDEX_COLUMN - ? kvEngine->createColumnStore(opCtx, nss, collOptions, ident, spec) - : kvEngine->createSortedDataInterface(opCtx, nss, collOptions, ident, spec); - if (status.isOK()) { - opCtx->recoveryUnit()->onRollback( - [this, ident, recoveryUnit = opCtx->recoveryUnit()](OperationContext*) { - // Intentionally ignoring failure. - auto kvEngine = _engine->getEngine(); - kvEngine->dropIdent(recoveryUnit, ident).ignore(); - }); - } - return status; -} - -StatusWith DurableCatalogImpl::importCollection( - OperationContext* opCtx, - const NamespaceString& nss, - const BSONObj& metadata, - const BSONObj& storageMetadata, - const ImportOptions& importOptions) { - invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_X)); - invariant(nss.coll().size() > 0); - - uassert(ErrorCodes::NamespaceExists, - str::stream() << "Collection already exists. NS: " << nss.ns(), - !CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, nss)); - - BSONCollectionCatalogEntry::MetaData md; - const BSONElement mdElement = metadata["md"]; - uassert(ErrorCodes::BadValue, "Malformed catalog metadata", mdElement.isABSONObj()); - md.parse(mdElement.Obj()); - - uassert(ErrorCodes::BadValue, - "Attempted to import catalog entry without an ident", - metadata.hasField("ident")); - - const auto& catalogEntry = [&] { - if (importOptions.importCollectionUUIDOption == - ImportOptions::ImportCollectionUUIDOption::kGenerateNew) { - // Generate a new UUID for the collection. - md.options.uuid = UUID::gen(); - BSONObjBuilder catalogEntryBuilder; - // Generate a new "md" field after setting the new UUID. - catalogEntryBuilder.append("md", md.toBSON()); - // Append the rest of the metadata. - catalogEntryBuilder.appendElementsUnique(metadata); - return catalogEntryBuilder.obj(); - } - return metadata; - }(); - - // Before importing the idents belonging to the collection and indexes, change '_rand' if there - // will be a conflict. - std::set indexIdents; - { - const std::string collectionIdent = catalogEntry["ident"].String(); - - if (!catalogEntry["idxIdent"].eoo()) { - for (const auto& indexIdent : catalogEntry["idxIdent"].Obj()) { - indexIdents.insert(indexIdent.String()); - } - } - - auto identsToImportConflict = [&](WithLock) -> bool { - if (StringData(collectionIdent).endsWith(_rand)) { - return true; - } - - for (const std::string& ident : indexIdents) { - if (StringData(ident).endsWith(_rand)) { - return true; - } - } - return false; - }; - - stdx::lock_guard lk(_randLock); - while (!importOptions.skipIdentCollisionCheck && - (_hasEntryCollidingWithRand(lk) || identsToImportConflict(lk))) { - _rand = _newRand(); - } - } - - StatusWith swEntry = _importEntry(opCtx, nss, catalogEntry); - if (!swEntry.isOK()) - return swEntry.getStatus(); - EntryIdentifier& entry = swEntry.getValue(); - - opCtx->recoveryUnit()->onRollback( - [catalog = this, ident = entry.ident, indexIdents = indexIdents](OperationContext* opCtx) { - catalog->_engine->getEngine()->dropIdentForImport(opCtx, ident); - for (const auto& indexIdent : indexIdents) { - catalog->_engine->getEngine()->dropIdentForImport(opCtx, indexIdent); - } - }); - - auto kvEngine = _engine->getEngine(); - Status status = kvEngine->importRecordStore(opCtx, entry.ident, storageMetadata, importOptions); - if (!status.isOK()) - return status; - - for (const std::string& indexIdent : indexIdents) { - status = - kvEngine->importSortedDataInterface(opCtx, indexIdent, storageMetadata, importOptions); - if (!status.isOK()) { - return status; - } - } - - auto rs = _engine->getEngine()->getRecordStore(opCtx, nss, entry.ident, md.options); - invariant(rs); - - return DurableCatalog::ImportResult(entry.catalogId, std::move(rs), md.options.uuid.value()); -} - -Status DurableCatalogImpl::renameCollection(OperationContext* opCtx, - const RecordId& catalogId, - const NamespaceString& toNss, - BSONCollectionCatalogEntry::MetaData& md) { - return _replaceEntry(opCtx, catalogId, toNss, md); -} - -Status DurableCatalogImpl::dropCollection(OperationContext* opCtx, const RecordId& catalogId) { - EntryIdentifier entry; - { - stdx::lock_guard lk(_catalogIdToEntryMapLock); - entry = _catalogIdToEntryMap[catalogId]; - } - - invariant(opCtx->lockState()->isCollectionLockedForMode(entry.nss, MODE_X)); - invariant(getTotalIndexCount(opCtx, catalogId) == 0); - - // Remove metadata from mdb_catalog - Status status = _removeEntry(opCtx, catalogId); - if (!status.isOK()) { - return status; - } - - return Status::OK(); -} - -Status DurableCatalogImpl::dropAndRecreateIndexIdentForResume(OperationContext* opCtx, - const NamespaceString& nss, - const CollectionOptions& collOptions, - const IndexDescriptor* spec, - StringData ident) { - auto status = _engine->getEngine()->dropSortedDataInterface(opCtx, ident); - if (!status.isOK()) - return status; - - status = _engine->getEngine()->createSortedDataInterface(opCtx, nss, collOptions, ident, spec); - - return status; -} - -bool DurableCatalogImpl::isIndexMultikey(OperationContext* opCtx, - const RecordId& catalogId, - StringData indexName, - MultikeyPaths* multikeyPaths) const { - auto md = getMetaData(opCtx, catalogId); - - int offset = md->findIndexOffset(indexName); - invariant(offset >= 0, - str::stream() << "cannot get multikey for index " << indexName << " @ " << catalogId - << " : " << md->toBSON()); - - if (multikeyPaths && !md->indexes[offset].multikeyPaths.empty()) { - *multikeyPaths = md->indexes[offset].multikeyPaths; - } - - return md->indexes[offset].multikey; -} - -int DurableCatalogImpl::getTotalIndexCount(OperationContext* opCtx, - const RecordId& catalogId) const { - auto md = getMetaData(opCtx, catalogId); - if (!md) - return 0; - - return md->getTotalIndexCount(); -} - -void DurableCatalogImpl::getReadyIndexes(OperationContext* opCtx, - RecordId catalogId, - StringSet* names) const { - auto md = getMetaData(opCtx, catalogId); - - if (!md) { - return; - } - - for (const auto& index : md->indexes) { - if (index.ready) - names->insert(index.spec["name"].String()); - } -} - -bool DurableCatalogImpl::isIndexPresent(OperationContext* opCtx, - const RecordId& catalogId, - StringData indexName) const { - auto md = getMetaData(opCtx, catalogId); - if (!md) - return false; - - int offset = md->findIndexOffset(indexName); - return offset >= 0; -} - -bool DurableCatalogImpl::isIndexReady(OperationContext* opCtx, - const RecordId& catalogId, - StringData indexName) const { - auto md = getMetaData(opCtx, catalogId); - if (!md) - return false; - - int offset = md->findIndexOffset(indexName); - invariant(offset >= 0, - str::stream() << "cannot get ready status for index " << indexName << " @ " - << catalogId << " : " << md->toBSON()); - return md->indexes[offset].ready; -} - -void DurableCatalogImpl::setRand_forTest(const std::string& rand) { - stdx::lock_guard lk(_randLock); - _rand = rand; -} - -std::string DurableCatalogImpl::getRand_forTest() const { - stdx::lock_guard lk(_randLock); - return _rand; -} - -} // namespace mongo diff --git a/src/mongo/db/storage/durable_catalog_impl.h b/src/mongo/db/storage/durable_catalog_impl.h deleted file mode 100644 index 8cd9d32d07ef9..0000000000000 --- a/src/mongo/db/storage/durable_catalog_impl.h +++ /dev/null @@ -1,222 +0,0 @@ -/** - * Copyright (C) 2018-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include -#include -#include - -#include "mongo/base/string_data.h" -#include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/collection_options.h" -#include "mongo/db/record_id.h" -#include "mongo/db/storage/bson_collection_catalog_entry.h" -#include "mongo/db/storage/durable_catalog.h" -#include "mongo/platform/mutex.h" - -namespace mongo { - -class OperationContext; -class RecordStore; -class StorageEngineInterface; - -class DurableCatalogImpl : public DurableCatalog { -public: - /** - * The RecordStore must be thread-safe, in particular with concurrent calls to - * RecordStore::find, updateRecord, insertRecord, deleteRecord and dataFor. The - * DurableCatalogImpl does not utilize Cursors and those methods may omit further protection. - */ - DurableCatalogImpl(RecordStore* rs, - bool directoryPerDb, - bool directoryForIndexes, - StorageEngineInterface* engine); - ~DurableCatalogImpl(); - - void init(OperationContext* opCtx); - - std::vector getAllCatalogEntries(OperationContext* opCtx) const; - - boost::optional scanForCatalogEntryByNss(OperationContext* opCtx, - const NamespaceString& nss) const; - - boost::optional scanForCatalogEntryByUUID(OperationContext* opCtx, - const UUID& uuid) const; - - EntryIdentifier getEntry(const RecordId& catalogId) const; - - std::string getCollectionIdent(const RecordId& catalogId) const; - - std::string getIndexIdent(OperationContext* opCtx, - const RecordId& catalogId, - StringData idxName) const; - - std::vector getIndexIdents(OperationContext* opCtx, - const RecordId& catalogId) const; - - BSONObj getCatalogEntry(OperationContext* opCtx, const RecordId& catalogId) const { - return _findEntry(opCtx, catalogId); - } - - boost::optional getParsedCatalogEntry( - OperationContext* opCtx, const RecordId& catalogId) const override; - - std::shared_ptr getMetaData( - OperationContext* opCtx, const RecordId& catalogId) const; - void putMetaData(OperationContext* opCtx, - const RecordId& catalogId, - BSONCollectionCatalogEntry::MetaData& md); - - std::vector getAllIdents(OperationContext* opCtx) const; - - bool isUserDataIdent(StringData ident) const; - - bool isInternalIdent(StringData ident) const; - - bool isResumableIndexBuildIdent(StringData ident) const; - - bool isCollectionIdent(StringData ident) const; - - RecordStore* getRecordStore() { - return _rs; - } - - StatusWith newOrphanedIdent(OperationContext* opCtx, - std::string ident, - const CollectionOptions& optionsWithUUID); - - std::string getFilesystemPathForDb(const std::string& dbName) const; - - std::string newInternalIdent(); - std::string newInternalResumableIndexBuildIdent(); - - StatusWith>> createCollection( - OperationContext* opCtx, - const NamespaceString& nss, - const CollectionOptions& options, - bool allocateDefaultSpace); - - Status createIndex(OperationContext* opCtx, - const RecordId& catalogId, - const NamespaceString& nss, - const CollectionOptions& collOptions, - const IndexDescriptor* spec); - - StatusWith importCollection(OperationContext* opCtx, - const NamespaceString& nss, - const BSONObj& metadata, - const BSONObj& storageMetadata, - const ImportOptions& importOptions) override; - - Status renameCollection(OperationContext* opCtx, - const RecordId& catalogId, - const NamespaceString& toNss, - BSONCollectionCatalogEntry::MetaData& md); - - Status dropCollection(OperationContext* opCtx, const RecordId& catalogId); - - Status dropAndRecreateIndexIdentForResume(OperationContext* opCtx, - const NamespaceString& nss, - const CollectionOptions& collOptions, - const IndexDescriptor* spec, - StringData ident); - - bool isIndexMultikey(OperationContext* opCtx, - const RecordId& catalogId, - StringData indexName, - MultikeyPaths* multikeyPaths) const; - - int getTotalIndexCount(OperationContext* opCtx, const RecordId& catalogId) const; - - void getReadyIndexes(OperationContext* opCtx, RecordId catalogId, StringSet* names) const; - - bool isIndexPresent(OperationContext* opCtx, - const RecordId& catalogId, - StringData indexName) const; - - bool isIndexReady(OperationContext* opCtx, - const RecordId& catalogId, - StringData indexName) const; - - void setRand_forTest(const std::string& rand); - - std::string getRand_forTest() const; - - std::string generateUniqueIdent(NamespaceString nss, const char* kind); - -private: - class AddIdentChange; - - friend class StorageEngineImpl; - friend class DurableCatalogImplTest; - friend class StorageEngineTest; - - BSONObj _findEntry(OperationContext* opCtx, const RecordId& catalogId) const; - StatusWith _addEntry(OperationContext* opCtx, - NamespaceString nss, - const CollectionOptions& options); - StatusWith _importEntry(OperationContext* opCtx, - NamespaceString nss, - const BSONObj& metadata); - Status _replaceEntry(OperationContext* opCtx, - const RecordId& catalogId, - const NamespaceString& toNss, - BSONCollectionCatalogEntry::MetaData& md); - Status _removeEntry(OperationContext* opCtx, const RecordId& catalogId); - - std::shared_ptr _parseMetaData( - const BSONElement& mdElement) const; - - - std::string _newInternalIdent(StringData identStem); - - static std::string _newRand(); - - /** - * The '_randLock' must be passed in. - */ - bool _hasEntryCollidingWithRand(WithLock) const; - - RecordStore* _rs; // not owned - const bool _directoryPerDb; - const bool _directoryForIndexes; - - // Protects '_rand' and '_next'. - mutable Mutex _randLock = MONGO_MAKE_LATCH("DurableCatalogImpl::_rand"); - std::string _rand; - unsigned long long _next; - - std::map _catalogIdToEntryMap; - mutable Mutex _catalogIdToEntryMapLock = - MONGO_MAKE_LATCH("DurableCatalogImpl::_catalogIdToEntryMap"); - - StorageEngineInterface* const _engine; -}; -} // namespace mongo diff --git a/src/mongo/db/storage/durable_history_pin.cpp b/src/mongo/db/storage/durable_history_pin.cpp index 2bb962ad353cd..0bb57c7f95b28 100644 --- a/src/mongo/db/storage/durable_history_pin.cpp +++ b/src/mongo/db/storage/durable_history_pin.cpp @@ -31,15 +31,18 @@ #define LOGV2_FOR_RECOVERY(ID, DLEVEL, MESSAGE, ...) \ LOGV2_DEBUG_OPTIONS(ID, DLEVEL, {logv2::LogComponent::kStorageRecovery}, MESSAGE, ##__VA_ARGS__) -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/storage/durable_history_pin.h" - -#include "mongo/bson/bsonmisc.h" -#include "mongo/db/commands.h" -#include "mongo/db/db_raii.h" +#include "mongo/base/status_with.h" +#include "mongo/db/client.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/durable_history_pin.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/durable_history_pin.h b/src/mongo/db/storage/durable_history_pin.h index e50ed11944e23..08eba949d9af7 100644 --- a/src/mongo/db/storage/durable_history_pin.h +++ b/src/mongo/db/storage/durable_history_pin.h @@ -29,13 +29,14 @@ #pragma once +#include #include +#include #include -#include - #include "mongo/bson/timestamp.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/storage/encryption_hooks.cpp b/src/mongo/db/storage/encryption_hooks.cpp index 786f963e9e174..9233174e9cbde 100644 --- a/src/mongo/db/storage/encryption_hooks.cpp +++ b/src/mongo/db/storage/encryption_hooks.cpp @@ -27,16 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/encryption_hooks.h" - #include +#include #include +#include -#include "mongo/base/init.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/db/service_context.h" #include "mongo/db/storage/data_protector.h" +#include "mongo/db/storage/encryption_hooks.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/storage/encryption_hooks.h b/src/mongo/db/storage/encryption_hooks.h index c2c372d6533a9..7187727c9f201 100644 --- a/src/mongo/db/storage/encryption_hooks.h +++ b/src/mongo/db/storage/encryption_hooks.h @@ -29,10 +29,15 @@ #pragma once +#include +#include +#include #include #include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/db/jsobj.h" namespace boost { diff --git a/src/mongo/db/storage/execution_context.cpp b/src/mongo/db/storage/execution_context.cpp index a18cd3edc1688..fb404f1bd89fe 100644 --- a/src/mongo/db/storage/execution_context.cpp +++ b/src/mongo/db/storage/execution_context.cpp @@ -28,7 +28,10 @@ */ #include "mongo/db/storage/execution_context.h" -#include "mongo/db/storage/storage_parameters_gen.h" + +#include + +#include "mongo/util/decorable.h" namespace mongo { const OperationContext::Decoration StorageExecutionContext::get = diff --git a/src/mongo/db/storage/execution_context.h b/src/mongo/db/storage/execution_context.h index efe911cc0eb5a..878b25304e168 100644 --- a/src/mongo/db/storage/execution_context.h +++ b/src/mongo/db/storage/execution_context.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/db/index/column_key_generator.h" #include "mongo/db/index/multikey_paths.h" #include "mongo/db/operation_context.h" diff --git a/src/mongo/db/storage/execution_control/README.md b/src/mongo/db/storage/execution_control/README.md new file mode 100644 index 0000000000000..c1524becd4bad --- /dev/null +++ b/src/mongo/db/storage/execution_control/README.md @@ -0,0 +1,79 @@ +# Execution Control + +## Throughput Probing +### Server Parameters +- `throughputProbingInitialConcurrency -> gInitialConcurrency`: initial number of concurrent read and write transactions +- `throughputProbingMinConcurrency -> gMinConcurrency`: minimum concurrent read and write transactions +- `throughputProbingMaxConcurrency -> gMaxConcurrency`: maximum concurrenct read and write transactions +- `throughputProbingReadWriteRatio -> gReadWriteRatio`: ratio of read and write tickets where 0.5 indicates 1:1 ratio +- `throughputProbingConcurrencyMovingAverageWeight -> gConcurrencyMovingAverageWeight`: weight of new concurrency measurement in the exponentially-decaying moving average +- `throughputProbingStepMultiple -> gStepMultiple`: step size for throughput probing + +### Pseudocode +``` +setConcurrency(concurrency) + ticketsAllottedToReads := clamp((concurrency * gReadWriteRatio), gMinConcurrency, gMaxConcurrency) + ticketsAllottedToWrites := clamp((concurrency * (1-gReadWriteRatio)), gMinConcurrency, gMaxConcurrency) + +getCurrentConcurrency() + return ticketsAllocatedToReads + ticketsAllocatedToWrites + +exponentialMovingAverage(stableConcurrency, currentConcurrency) + return (currentConcurrency * gConcurrencyMovingAverageWeight) + (stableConcurrency * (1 - gConcurrencyMovingAverageWeight)) + +run() + currentThroughput := (# read tickets returned + # write tickets returned) / time elapsed + + Case of ProbingState + kStable probeStable(currentThroughput) + kUp probeUp(currentThroughput) + KDown probeDown(currentThroughput) + +probeStable(currentThroughput) + stableThroughput := currentThroughput + currentConcurrency := getCurrentConcurrency() + if (currentConcurrency < gMaxConcurrency && tickets exhausted) + setConcurrency(stableConcurrency * (1 + gStepMultiple)) + ProbingState := kUp + else if (currentConcurrency > gMinConcurrency) + setConcurrency(stableConcurrency * (1 - gStepMultiple)) + ProbingState := kDown + else (currentConcurrency == gMinConcurrency), no changes + +probeUp(currentThroughput) + if (currentThroughput > stableThroughput) + stableConcurrency := exponentialMovingAverage(stableConcurrency, getCurrentConcurrency()) + stableThroughput := currentThroughput + setConcurrency(stableConcurrency) + ProbingState := kStable + +probeDown(currentThroughput) + if (currentThroughput > stableThroughput) + stableConcurrency := exponentialMovingAverage(stableConcurrency, getCurrentConcurrency()) + stableThroughput := currentThroughput + setConcurrency(stableConcurrency) + ProbingState := kStable + +``` + +### Diagram +```mermaid +flowchart TB +A(Stable Probe) --> |at minimum and tickets not exhausted|A + +A --> |"(above minimum and tickets not exhausted) or at maximum"|C(Probe Down) +subgraph +C --> |throughput increased|F{{Decrease stable concurrency}} +C --> |throughput did not increase|G(Go back to stable concurrency) +end +F --> H +G --> H + +A --> |below maximum and tickets exhausted| B(Probe Up) +subgraph +B --> |throughput increased|D{{Increase stable concurrency}} +B --> |throughput did not increase|E{{Go back to stable concurrency}} +end +D --> H(Stable Probe) +E --> H +``` diff --git a/src/mongo/db/storage/execution_control/concurrency_adjustment_parameters.idl b/src/mongo/db/storage/execution_control/concurrency_adjustment_parameters.idl new file mode 100644 index 0000000000000..9c1e787b4eedd --- /dev/null +++ b/src/mongo/db/storage/execution_control/concurrency_adjustment_parameters.idl @@ -0,0 +1,71 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +global: + cpp_namespace: "mongo" + cpp_includes: + - "mongo/db/storage/execution_control/concurrency_adjustment_validator.h" + +enums: + StorageEngineConcurrencyAdjustmentAlgorithm: + description: "Algorithm for adjusting the number of concurrent storage engine transactions" + type: string + # 'kFixedConcurrentTransactions': Number of concurrent transactions are controlled by + # storageEngineConcurrentWriteTransactions/storageEngineConcurrentReadTransactions and will + # not be adjusted automatically based on overall system throughput. + # + # 'kThroughputProbing': Number of concurrent transactions are dynamically adjusted, either + # increasing or decreasing concurrency in the storage engine, based on system throughput. + values: + kFixedConcurrentTransactions: "fixedConcurrentTransactions" + kThroughputProbing: "throughputProbing" + +server_parameters: + + storageEngineConcurrencyAdjustmentAlgorithm: + description: >- + The algorithm to be used for adjusting the number of concurrent storage engine transactions. + This is gated behind featureFlagExecutionControl and will have no effect if that feature is + not enabled. + set_at: startup + cpp_vartype: std::string + cpp_varname: gStorageEngineConcurrencyAdjustmentAlgorithm + default: "throughputProbing" + validator: + callback: validateConcurrencyAdjustmentAlgorithm + + storageEngineConcurrencyAdjustmentIntervalMillis: + description: >- + The interval in milliseconds in which to run the concurrency adjustment algorithm, if it is + not set to fixedConcurrentTransactions. + set_at: startup + cpp_vartype: int32_t + cpp_varname: gStorageEngineConcurrencyAdjustmentIntervalMillis + default: 100 + validator: + gt: 0 diff --git a/src/mongo/db/storage/execution_control/concurrency_adjustment_validator.cpp b/src/mongo/db/storage/execution_control/concurrency_adjustment_validator.cpp new file mode 100644 index 0000000000000..6a90f54632e52 --- /dev/null +++ b/src/mongo/db/storage/execution_control/concurrency_adjustment_validator.cpp @@ -0,0 +1,50 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/storage/execution_control/concurrency_adjustment_validator.h" + +#include + +#include "mongo/base/status.h" +#include "mongo/db/storage/execution_control/concurrency_adjustment_parameters_gen.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" + +namespace mongo { + +Status validateConcurrencyAdjustmentAlgorithm(const std::string& name, + const boost::optional&) try { + StorageEngineConcurrencyAdjustmentAlgorithm_parse( + IDLParserContext{"storageEngineConcurrencyAdjustmentAlgorithm"}, name); + return Status::OK(); +} catch (const DBException& ex) { + return ex.toStatus(); +} + +} // namespace mongo diff --git a/src/mongo/db/storage/execution_control/concurrency_adjustment_validator.h b/src/mongo/db/storage/execution_control/concurrency_adjustment_validator.h new file mode 100644 index 0000000000000..b08dfd2691993 --- /dev/null +++ b/src/mongo/db/storage/execution_control/concurrency_adjustment_validator.h @@ -0,0 +1,44 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/db/tenant_id.h" + +namespace mongo { + +Status validateConcurrencyAdjustmentAlgorithm(const std::string& name, + const boost::optional&); + +} // namespace mongo diff --git a/src/mongo/db/storage/execution_control/throughput_probing.cpp b/src/mongo/db/storage/execution_control/throughput_probing.cpp index c3bc35c606118..d54b83af805b7 100644 --- a/src/mongo/db/storage/execution_control/throughput_probing.cpp +++ b/src/mongo/db/storage/execution_control/throughput_probing.cpp @@ -28,8 +28,20 @@ */ #include "mongo/db/storage/execution_control/throughput_probing.h" + +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/storage/execution_control/throughput_probing_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/processinfo.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -87,11 +99,11 @@ ThroughputProbing::ThroughputProbing(ServiceContext* svcCtx, : TicketHolderMonitor(svcCtx, readTicketHolder, writeTicketHolder, interval), _stableConcurrency(gInitialConcurrency ? gInitialConcurrency - : std::clamp(static_cast(ProcessInfo::getNumCores()), - gMinConcurrency, - gMaxConcurrency.load())) { - _readTicketHolder->resize(_stableConcurrency); - _writeTicketHolder->resize(_stableConcurrency); + : std::clamp(static_cast(ProcessInfo::getNumCores() * 2), + gMinConcurrency * 2, + gMaxConcurrency.load() * 2)), + _timer(svcCtx->getTickSource()) { + _resetConcurrency(); } void ThroughputProbing::appendStats(BSONObjBuilder& builder) const { @@ -103,9 +115,21 @@ void ThroughputProbing::_run(Client* client) { _readTicketHolder->numFinishedProcessing() + _writeTicketHolder->numFinishedProcessing(); invariant(numFinishedProcessing >= _prevNumFinishedProcessing); - auto throughput = (numFinishedProcessing - _prevNumFinishedProcessing) / - static_cast(durationCount(_interval())); - _stats.throughput.store(throughput); + // Initialize on first iteration. + if (_prevNumFinishedProcessing < 0) { + _prevNumFinishedProcessing = numFinishedProcessing; + _timer.reset(); + return; + } + + double elapsed = _timer.micros(); + if (elapsed == 0) { + // The clock used to sleep between iterations may not be reliable, and thus the timer may + // report that no time has elapsed. If this occurs, just wait for the next iteration. + return; + } + + auto throughput = (numFinishedProcessing - _prevNumFinishedProcessing) / elapsed; switch (_state) { case ProbingState::kStable: @@ -119,9 +143,35 @@ void ThroughputProbing::_run(Client* client) { break; } - _prevNumFinishedProcessing = numFinishedProcessing; + // Reset these with fresh values after we've made our adjustment to establish a better + // cause-effect relationship. + _prevNumFinishedProcessing = + _readTicketHolder->numFinishedProcessing() + _writeTicketHolder->numFinishedProcessing(); + _timer.reset(); +} + +namespace { +// Computes the moving average by weighing 'newValue' with the provided 'weight'. +double expMovingAverage(double average, double newValue, double weight) { + return (newValue * weight) + (average * (1 - weight)); } +std::pair newReadWriteConcurrencies(double stableConcurrency, double step) { + auto readPct = gReadWriteRatio.load(); + auto writePct = 1 - readPct; + + auto min = gMinConcurrency; + auto max = gMaxConcurrency.load(); + + auto clamp = [&](double pct) { + return std::clamp( + static_cast(std::round(stableConcurrency * pct * step)), min, max); + }; + + return {clamp(readPct), clamp(writePct)}; +} +} // namespace + void ThroughputProbing::_probeStable(double throughput) { invariant(_state == ProbingState::kStable); @@ -130,19 +180,21 @@ void ThroughputProbing::_probeStable(double throughput) { // Record the baseline reading. _stableThroughput = throughput; - auto outof = _readTicketHolder->outof(); - auto peakUsed = std::max(_readTicketHolder->getAndResetPeakUsed(), - _writeTicketHolder->getAndResetPeakUsed()); - if (outof < gMaxConcurrency.load() && peakUsed >= outof) { + auto readTotal = _readTicketHolder->outof(); + auto writeTotal = _writeTicketHolder->outof(); + auto readPeak = _readTicketHolder->getAndResetPeakUsed(); + auto writePeak = _writeTicketHolder->getAndResetPeakUsed(); + + if ((readTotal < gMaxConcurrency.load() && readPeak >= readTotal) || + (writeTotal < gMaxConcurrency.load() && writePeak >= writeTotal)) { // At least one of the ticket pools is exhausted, so try increasing concurrency. _state = ProbingState::kUp; - _setConcurrency(std::ceil(_stableConcurrency * (1 + gStepMultiple.load()))); - } else if (_readTicketHolder->used() > gMinConcurrency || - _writeTicketHolder->used() > gMinConcurrency) { + _increaseConcurrency(); + } else if (readPeak > gMinConcurrency || writePeak > gMinConcurrency) { // Neither of the ticket pools are exhausted, so try decreasing concurrency to just below // the current level of usage. _state = ProbingState::kDown; - _setConcurrency(std::floor(peakUsed * (1 - gStepMultiple.load()))); + _decreaseConcurrency(); } } @@ -152,19 +204,24 @@ void ThroughputProbing::_probeUp(double throughput) { LOGV2_DEBUG(7346001, 3, "Throughput Probing: up", "throughput"_attr = throughput); if (throughput > _stableThroughput) { - // Increasing concurrency caused throughput to increase, so promote this new level of - // concurrency to stable. - auto concurrency = _readTicketHolder->outof(); + // Increasing concurrency caused throughput to increase, so use this information to adjust + // our stable concurrency. We don't want to leave this at the current level. Instead, we use + // this to update the moving average to avoid over-correcting on recent measurements. + auto concurrency = _readTicketHolder->outof() + _writeTicketHolder->outof(); + auto newConcurrency = expMovingAverage( + _stableConcurrency, concurrency, gConcurrencyMovingAverageWeight.load()); + _stats.timesIncreased.fetchAndAdd(1); - _stats.totalAmountIncreased.fetchAndAdd(concurrency - _stableConcurrency); + _stats.totalAmountIncreased.fetchAndAdd(newConcurrency - _stableConcurrency); _state = ProbingState::kStable; _stableThroughput = throughput; - _stableConcurrency = concurrency; - } else if (_readTicketHolder->outof() > gMinConcurrency) { - // Increasing concurrency did not cause throughput to increase, so try decreasing - // concurrency instead. - _state = ProbingState::kDown; - _setConcurrency(std::floor(_stableConcurrency * (1 - gStepMultiple.load()))); + _stableConcurrency = newConcurrency; + _resetConcurrency(); + } else { + // Increasing concurrency did not cause throughput to increase, so go back to stable and get + // a new baseline to compare against. + _state = ProbingState::kStable; + _resetConcurrency(); } } @@ -174,33 +231,84 @@ void ThroughputProbing::_probeDown(double throughput) { LOGV2_DEBUG(7346002, 3, "Throughput Probing: down", "throughput"_attr = throughput); if (throughput > _stableThroughput) { - // Decreasing concurrency caused throughput to increase, so promote this new level of - // concurrency to stable. - auto concurrency = _readTicketHolder->outof(); + // Decreasing concurrency caused throughput to increase, so use this information to adjust + // our stable concurrency. We don't want to leave this at the current level. Instead, we use + // this to update the moving average to avoid over-correcting on recent measurements. + auto concurrency = _readTicketHolder->outof() + _writeTicketHolder->outof(); + auto newConcurrency = expMovingAverage( + _stableConcurrency, concurrency, gConcurrencyMovingAverageWeight.load()); + _stats.timesDecreased.fetchAndAdd(1); - _stats.totalAmountDecreased.fetchAndAdd(_stableConcurrency - concurrency); + _stats.totalAmountDecreased.fetchAndAdd(_stableConcurrency - newConcurrency); _state = ProbingState::kStable; _stableThroughput = throughput; - _stableConcurrency = concurrency; + _stableConcurrency = newConcurrency; + _resetConcurrency(); } else { // Decreasing concurrency did not cause throughput to increase, so go back to stable and get // a new baseline to compare against. _state = ProbingState::kStable; - _setConcurrency(_stableConcurrency); + _resetConcurrency(); + } +} + +void ThroughputProbing::_resetConcurrency() { + auto [newReadConcurrency, newWriteConcurrency] = + newReadWriteConcurrencies(_stableConcurrency, 1); + + _readTicketHolder->resize(newReadConcurrency); + _writeTicketHolder->resize(newWriteConcurrency); + + LOGV2_DEBUG(7796900, + 3, + "Throughput Probing: reset concurrency to stable", + "readConcurrency"_attr = newReadConcurrency, + "writeConcurrency"_attr = newWriteConcurrency); +} + +void ThroughputProbing::_increaseConcurrency() { + auto [newReadConcurrency, newWriteConcurrency] = + newReadWriteConcurrencies(_stableConcurrency, 1 + gStepMultiple.load()); + + if (newReadConcurrency == _readTicketHolder->outof()) { + ++newReadConcurrency; + } + if (newWriteConcurrency == _writeTicketHolder->outof()) { + ++newWriteConcurrency; } + + _readTicketHolder->resize(newReadConcurrency); + _writeTicketHolder->resize(newWriteConcurrency); + + LOGV2_DEBUG(7796901, + 3, + "Throughput Probing: increasing concurrency", + "readConcurrency"_attr = newReadConcurrency, + "writeConcurrency"_attr = newWriteConcurrency); } -void ThroughputProbing::_setConcurrency(int32_t concurrency) { - concurrency = std::clamp(concurrency, gMinConcurrency, gMaxConcurrency.load()); - _readTicketHolder->resize(concurrency); - _writeTicketHolder->resize(concurrency); +void ThroughputProbing::_decreaseConcurrency() { + auto [newReadConcurrency, newWriteConcurrency] = + newReadWriteConcurrencies(_stableConcurrency, 1 - gStepMultiple.load()); + + if (newReadConcurrency == _readTicketHolder->outof()) { + --newReadConcurrency; + } + if (newWriteConcurrency == _writeTicketHolder->outof()) { + --newWriteConcurrency; + } + + _readTicketHolder->resize(newReadConcurrency); + _writeTicketHolder->resize(newWriteConcurrency); - LOGV2_DEBUG( - 7346003, 3, "Throughput Probing: set concurrency", "concurrency"_attr = concurrency); + LOGV2_DEBUG(7796902, + 3, + "Throughput Probing: decreasing concurrency", + "readConcurrency"_attr = newReadConcurrency, + "writeConcurrency"_attr = newWriteConcurrency); } void ThroughputProbing::Stats::serialize(BSONObjBuilder& builder) const { - builder.append("throughput", throughput.load()); builder.append("timesDecreased", static_cast(timesDecreased.load())); builder.append("timesIncreased", static_cast(timesIncreased.load())); builder.append("totalAmountDecreased", static_cast(totalAmountDecreased.load())); diff --git a/src/mongo/db/storage/execution_control/throughput_probing.h b/src/mongo/db/storage/execution_control/throughput_probing.h index 1d8be04a369ba..c1e40e3e89e5d 100644 --- a/src/mongo/db/storage/execution_control/throughput_probing.h +++ b/src/mongo/db/storage/execution_control/throughput_probing.h @@ -29,7 +29,20 @@ #pragma once +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/ticketholder_monitor.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/concurrency/ticketholder.h" +#include "mongo/util/duration.h" +#include "mongo/util/timer.h" namespace mongo::execution_control { namespace throughput_probing { @@ -67,18 +80,21 @@ class ThroughputProbing : public TicketHolderMonitor { void _probeUp(double throughput); void _probeDown(double throughput); - void _setConcurrency(int32_t concurrency); + void _resetConcurrency(); + void _increaseConcurrency(); + void _decreaseConcurrency(); - int32_t _stableConcurrency; + // This value is split between reads and writes based on the read/write ratio. + double _stableConcurrency; double _stableThroughput = 0; ProbingState _state = ProbingState::kStable; + Timer _timer; - int64_t _prevNumFinishedProcessing = 0; + int64_t _prevNumFinishedProcessing = -1; struct Stats { void serialize(BSONObjBuilder& builder) const; - AtomicWord throughput; AtomicWord timesDecreased; AtomicWord timesIncreased; AtomicWord totalAmountDecreased; diff --git a/src/mongo/db/storage/execution_control/throughput_probing.idl b/src/mongo/db/storage/execution_control/throughput_probing.idl index c037b50669773..a117c1637a38f 100644 --- a/src/mongo/db/storage/execution_control/throughput_probing.idl +++ b/src/mongo/db/storage/execution_control/throughput_probing.idl @@ -36,15 +36,15 @@ server_parameters: set_at: [ startup, runtime ] cpp_vartype: AtomicWord cpp_varname: gStepMultiple - default: 0.2 + default: 0.1 validator: gte: 0.1 lte: 0.5 throughputProbingInitialConcurrency: description: >- - The initial number of concurrent read/write transactions for throughput probing. The default - value of 0 means to use the number of logical CPU cores. + The initial number of TOTAL concurrent read/write transactions for throughput probing. The + default value of 0 means to use the number of logical CPU cores. set_at: startup cpp_vartype: int32_t cpp_varname: gInitialConcurrency @@ -53,7 +53,9 @@ server_parameters: callback: validateInitialConcurrency throughputProbingMinConcurrency: - description: The minimum number of concurrent read/write transactions for throughput probing. + description: >- + The minimum number of concurrent read/write transactions for throughput probing. This minimum + is applied to reads and writes separately. set_at: startup cpp_vartype: int32_t cpp_varname: gMinConcurrency @@ -62,10 +64,37 @@ server_parameters: callback: validateMinConcurrency throughputProbingMaxConcurrency: - description: The maximum number of concurrent read/write transactions for throughput probing. + description: >- + The maximum number of concurrent read/write transactions for throughput probing. This maximum + is applied to reads and writes separately. set_at: [ startup, runtime ] cpp_vartype: AtomicWord cpp_varname: gMaxConcurrency default: 128 validator: callback: validateMaxConcurrency + + throughputProbingReadWriteRatio: + description: >- + The ratio of reads/writes when using throughput probing. A value of 0.5 indicates a 1:1 ratio, + while a value greater than 0.5 favors reads and a value less than 0.5 favors writes. + set_at: [ startup, runtime ] + cpp_vartype: AtomicWord + cpp_varname: gReadWriteRatio + default: 0.5 + validator: + gte: 0 + lte: 1 + + throughputProbingConcurrencyMovingAverageWeight: + description: >- + How much to weigh newer concurrency measurements into the exponentially-decaying moving + average. Higher values respond faster to changes, but with more variability. Lower values + respond slower, but with less variability. + set_at: [ startup, runtime ] + cpp_vartype: AtomicWord + cpp_varname: gConcurrencyMovingAverageWeight + default: 0.2 + validator: + gt: 0 + lte: 1 diff --git a/src/mongo/db/storage/execution_control/throughput_probing_test.cpp b/src/mongo/db/storage/execution_control/throughput_probing_test.cpp index 028cbefe714f0..786e57b09a8cd 100644 --- a/src/mongo/db/storage/execution_control/throughput_probing_test.cpp +++ b/src/mongo/db/storage/execution_control/throughput_probing_test.cpp @@ -28,8 +28,24 @@ */ #include "mongo/db/storage/execution_control/throughput_probing.h" + +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/storage/execution_control/throughput_probing_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/periodic_runner.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/tick_source.h" +#include "mongo/util/tick_source_mock.h" namespace mongo::execution_control { namespace throughput_probing { @@ -69,7 +85,7 @@ class MockPeriodicJob : public PeriodicRunner::ControllableJob { void resume() override {} void stop() override {} - Milliseconds getPeriod() override { + Milliseconds getPeriod() const override { return _job.interval; } @@ -103,31 +119,53 @@ class MockPeriodicRunner : public PeriodicRunner { std::shared_ptr _job; }; +namespace { +TickSourceMock* initTickSource(ServiceContext* svcCtx) { + auto mockTickSource = std::make_unique>(); + auto tickSourcePtr = mockTickSource.get(); + svcCtx->setTickSource(std::move(mockTickSource)); + return tickSourcePtr; +} +} // namespace + class ThroughputProbingTest : public unittest::Test { protected: - explicit ThroughputProbingTest(int32_t size = 64) + explicit ThroughputProbingTest(int32_t size = 64, double readWriteRatio = 0.5) : _runner([svcCtx = _svcCtx.get()] { auto runner = std::make_unique(); auto runnerPtr = runner.get(); svcCtx->setPeriodicRunner(std::move(runner)); return runnerPtr; }()), + _tickSource(initTickSource(_svcCtx.get())), _throughputProbing([&]() -> ThroughputProbing { throughput_probing::gInitialConcurrency = size; + throughput_probing::gReadWriteRatio.store(readWriteRatio); return {_svcCtx.get(), &_readTicketHolder, &_writeTicketHolder, Milliseconds{1}}; - }()) {} + }()) { + // We need to advance the ticks to something other than zero, since that is used to + // determine the if we're in the first iteration or not. + _tick(); + + // First loop is a no-op and initializes state. + _run(); + } void _run() { _runner->run(_client.get()); } + void _tick() { + _tickSource->advance(Microseconds(1000)); + } + ServiceContext::UniqueServiceContext _svcCtx = ServiceContext::make(); ServiceContext::UniqueClient _client = _svcCtx->makeClient("ThroughputProbingTest"); MockPeriodicRunner* _runner; MockTicketHolder _readTicketHolder; MockTicketHolder _writeTicketHolder; - + TickSourceMock* _tickSource; ThroughputProbing _throughputProbing; }; @@ -135,20 +173,36 @@ using namespace throughput_probing; class ThroughputProbingMaxConcurrencyTest : public ThroughputProbingTest { protected: - ThroughputProbingMaxConcurrencyTest() : ThroughputProbingTest(gMaxConcurrency.load()) {} + // This input is the total initial concurrency between both ticketholders, so it will be split + // evenly between each ticketholder. We are attempting to test a limit that is per-ticketholder. + ThroughputProbingMaxConcurrencyTest() : ThroughputProbingTest(gMaxConcurrency.load() * 2) {} }; class ThroughputProbingMinConcurrencyTest : public ThroughputProbingTest { protected: - ThroughputProbingMinConcurrencyTest() : ThroughputProbingTest(gMinConcurrency) {} + // This input is the total initial concurrency between both ticketholders, so it will be split + // evenly between each ticketholder. We are attempting to test a limit that is per-ticketholder. + ThroughputProbingMinConcurrencyTest() : ThroughputProbingTest(gMinConcurrency * 2) {} +}; + +class ThroughputProbingReadHeavyTest : public ThroughputProbingTest { +protected: + ThroughputProbingReadHeavyTest() : ThroughputProbingTest(16, 0.9) {} +}; + +class ThroughputProbingWriteHeavyTest : public ThroughputProbingTest { +protected: + ThroughputProbingWriteHeavyTest() : ThroughputProbingTest(16, 0.1) {} }; TEST_F(ThroughputProbingTest, ProbeUpSucceeds) { // Tickets are exhausted. - auto size = _readTicketHolder.outof(); + auto initialSize = _readTicketHolder.outof(); + auto size = initialSize; _readTicketHolder.setUsed(size); _readTicketHolder.setUsed(size - 1); _readTicketHolder.setNumFinishedProcessing(1); + _tick(); // Stable. Probe up next since tickets are exhausted. _run(); @@ -158,49 +212,24 @@ TEST_F(ThroughputProbingTest, ProbeUpSucceeds) { // Throughput inreases. size = _readTicketHolder.outof(); _readTicketHolder.setNumFinishedProcessing(3); + _tick(); - // Probing up succeeds; the new value is promoted to stable. - _run(); - ASSERT_EQ(_readTicketHolder.outof(), size); - ASSERT_EQ(_writeTicketHolder.outof(), size); -} - -TEST_F(ThroughputProbingTest, ProbeUpFailsDownSucceeds) { - // Tickets are exhausted. - auto size = _readTicketHolder.outof(); - _readTicketHolder.setUsed(size); - _readTicketHolder.setUsed(size - 1); - _readTicketHolder.setNumFinishedProcessing(1); - - // Stable. Probe up next since tickets are exhausted. - _run(); - ASSERT_GT(_readTicketHolder.outof(), size); - ASSERT_GT(_writeTicketHolder.outof(), size); - - // Throughput does not increase. - _readTicketHolder.setNumFinishedProcessing(2); - - // Probing up fails since throughput did not increase. Probe down next. + // Probing up succeeds; the new value is somewhere between the initial value and the probed-up + // value. _run(); ASSERT_LT(_readTicketHolder.outof(), size); + ASSERT_GT(_readTicketHolder.outof(), initialSize); ASSERT_LT(_writeTicketHolder.outof(), size); - - // Throughput inreases. - size = _readTicketHolder.outof(); - _readTicketHolder.setNumFinishedProcessing(4); - - // Probing down succeeds; the new value is promoted to stable. - _run(); - ASSERT_EQ(_readTicketHolder.outof(), size); - ASSERT_EQ(_writeTicketHolder.outof(), size); + ASSERT_GT(_writeTicketHolder.outof(), initialSize); } -TEST_F(ThroughputProbingTest, ProbeUpFailsDownFails) { +TEST_F(ThroughputProbingTest, ProbeUpFails) { // Tickets are exhausted. auto size = _readTicketHolder.outof(); _readTicketHolder.setUsed(size); _readTicketHolder.setUsed(size - 1); _readTicketHolder.setNumFinishedProcessing(1); + _tick(); // Stable. Probe up next since tickets are exhausted. _run(); @@ -209,16 +238,9 @@ TEST_F(ThroughputProbingTest, ProbeUpFailsDownFails) { // Throughput does not increase. _readTicketHolder.setNumFinishedProcessing(2); + _tick(); - // Probing up fails since throughput did not increase. Probe down next. - _run(); - ASSERT_LT(_readTicketHolder.outof(), size); - ASSERT_LT(_writeTicketHolder.outof(), size); - - // Throughput does not increase. - _readTicketHolder.setNumFinishedProcessing(3); - - // Probing down fails since throughput did not increase. Return back to stable. + // Probing up fails since throughput did not increase. Return to stable. _run(); ASSERT_EQ(_readTicketHolder.outof(), size); ASSERT_EQ(_writeTicketHolder.outof(), size); @@ -226,9 +248,11 @@ TEST_F(ThroughputProbingTest, ProbeUpFailsDownFails) { TEST_F(ThroughputProbingTest, ProbeDownSucceeds) { // Tickets are not exhausted. - auto size = _readTicketHolder.outof(); + auto initialSize = _readTicketHolder.outof(); + auto size = initialSize; _readTicketHolder.setUsed(size - 1); _readTicketHolder.setNumFinishedProcessing(1); + _tick(); // Stable. Probe down next since tickets are not exhausted. _run(); @@ -238,11 +262,15 @@ TEST_F(ThroughputProbingTest, ProbeDownSucceeds) { // Throughput increases. size = _readTicketHolder.outof(); _readTicketHolder.setNumFinishedProcessing(3); + _tick(); - // Probing down succeeds; the new value is promoted to stable. + // Probing up succeeds; the new value is somewhere between the initial value and the probed-up + // value. _run(); - ASSERT_EQ(_readTicketHolder.outof(), size); - ASSERT_EQ(_writeTicketHolder.outof(), size); + ASSERT_LT(_readTicketHolder.outof(), initialSize); + ASSERT_GT(_readTicketHolder.outof(), size); + ASSERT_LT(_writeTicketHolder.outof(), initialSize); + ASSERT_GT(_writeTicketHolder.outof(), size); } TEST_F(ThroughputProbingTest, ProbeDownFails) { @@ -250,6 +278,7 @@ TEST_F(ThroughputProbingTest, ProbeDownFails) { auto size = _readTicketHolder.outof(); _readTicketHolder.setUsed(size - 1); _readTicketHolder.setNumFinishedProcessing(1); + _tick(); // Stable. Probe down next since tickets are not exhausted. _run(); @@ -258,6 +287,7 @@ TEST_F(ThroughputProbingTest, ProbeDownFails) { // Throughput does not increase. _readTicketHolder.setNumFinishedProcessing(2); + _tick(); // Probing down fails since throughput did not increase. Return back to stable. _run(); @@ -271,6 +301,7 @@ TEST_F(ThroughputProbingMaxConcurrencyTest, NoProbeUp) { _readTicketHolder.setUsed(size); _readTicketHolder.setUsed(size - 1); _readTicketHolder.setNumFinishedProcessing(1); + _tick(); // Stable. Probe down since concurrency is already at its maximum allowed value, even though // ticktes are exhausted. @@ -284,6 +315,7 @@ TEST_F(ThroughputProbingMinConcurrencyTest, NoProbeDown) { auto size = _readTicketHolder.outof(); _readTicketHolder.setUsed(size - 1); _readTicketHolder.setNumFinishedProcessing(1); + _tick(); // Stable. Do not probe in either direction since tickets are not exhausted but concurrency is // already at its minimum allowed value. @@ -294,7 +326,10 @@ TEST_F(ThroughputProbingMinConcurrencyTest, NoProbeDown) { TEST_F(ThroughputProbingMinConcurrencyTest, StepSizeNonZero) { gStepMultiple.store(0.1); - auto size = _readTicketHolder.outof(); + // This value is chosen so that it takes two iterations to increase the stable concurrency by 1. + gConcurrencyMovingAverageWeight.store(0.3); + auto initialSize = _readTicketHolder.outof(); + auto size = initialSize; // The concurrency level is low enough that the step multiple on its own is not enough to get to // the next integer. @@ -304,6 +339,7 @@ TEST_F(ThroughputProbingMinConcurrencyTest, StepSizeNonZero) { _readTicketHolder.setUsed(size); _readTicketHolder.setUsed(size - 1); _readTicketHolder.setNumFinishedProcessing(1); + _tick(); // Stable. Probe up next since tickets are exhausted. _run(); @@ -312,12 +348,170 @@ TEST_F(ThroughputProbingMinConcurrencyTest, StepSizeNonZero) { // Throughput inreases. _readTicketHolder.setNumFinishedProcessing(3); + _tick(); - // Probing up succeeds; the new value is promoted to stable. + // Probing up succeeds; the new value is not enough to increase concurrency yet. + _run(); + ASSERT_EQ(_readTicketHolder.outof(), size); + ASSERT_EQ(_writeTicketHolder.outof(), size); + + // Run another iteration. + + // Tickets are exhausted. + _readTicketHolder.setUsed(size); + _readTicketHolder.setUsed(size - 1); + _readTicketHolder.setNumFinishedProcessing(4); + _tick(); + + // Stable. Probe up next since tickets are exhausted. + _run(); + ASSERT_EQ(_readTicketHolder.outof(), size + 1); + ASSERT_EQ(_writeTicketHolder.outof(), size + 1); + + // Throughput inreases. + _readTicketHolder.setNumFinishedProcessing(6); + _tick(); + + // Probing up succeeds; the new value is finally enough to increase concurrency. _run(); ASSERT_EQ(_readTicketHolder.outof(), size + 1); ASSERT_EQ(_writeTicketHolder.outof(), size + 1); } +TEST_F(ThroughputProbingTest, ReadWriteRatio) { + gReadWriteRatio.store(0.67); // 33% of tickets for writes, 67% for reads + ON_BLOCK_EXIT([]() { gReadWriteRatio.store(0.5); }); + + auto initialReads = _readTicketHolder.outof(); + auto reads = initialReads; + auto initialWrites = _writeTicketHolder.outof(); + auto writes = initialWrites; + + // Initially these should be equal. + ASSERT_EQ(reads, writes); + + // Write tickets are exhausted + _writeTicketHolder.setUsed(writes); + _writeTicketHolder.setUsed(writes - 1); + _readTicketHolder.setNumFinishedProcessing(1); + _tick(); + + // Stable. Probe up next since tickets are exhausted. We expect write tickets to drop because + // now the ratio is being applied. Total tickets should still increase. + _run(); + ASSERT_GT(_readTicketHolder.outof(), reads); + ASSERT_LT(_writeTicketHolder.outof(), writes); + ASSERT_GT(_readTicketHolder.outof() + _writeTicketHolder.outof(), reads + writes); + + // There should be an imbalance. + ASSERT_GT(_readTicketHolder.outof(), _writeTicketHolder.outof()); + + reads = _readTicketHolder.outof(); + writes = _writeTicketHolder.outof(); + + // Throughput inreases. + _readTicketHolder.setNumFinishedProcessing(3); + _tick(); + + // Probing up succeeds; the new value is somewhere between the initial value and the probed-up + // value. + _run(); + ASSERT_LT(_readTicketHolder.outof(), reads); + ASSERT_GT(_readTicketHolder.outof(), initialReads); + ASSERT_LT(_writeTicketHolder.outof(), writes); + ASSERT_LT(_writeTicketHolder.outof(), initialWrites); + ASSERT_GT(_readTicketHolder.outof() + _writeTicketHolder.outof(), initialReads + initialWrites); + + // This imbalance should still exist. + ASSERT_GT(_readTicketHolder.outof(), _writeTicketHolder.outof()); +} + +TEST_F(ThroughputProbingReadHeavyTest, StepSizeNonZeroIncreasing) { + auto reads = _readTicketHolder.outof(); + auto writes = _writeTicketHolder.outof(); + ASSERT_GT(reads, writes); + + // The concurrency level and read/write ratio are such that the step multiple on its own is not + // enough to get to the next integer for writes. + ASSERT_EQ(std::lround(writes * (1 + gStepMultiple.load())), writes); + + // Write tickets are exhausted. + _writeTicketHolder.setUsed(writes); + _writeTicketHolder.setUsed(writes - 1); + _writeTicketHolder.setNumFinishedProcessing(1); + _tick(); + + // Stable. Probe up next since tickets are exhausted. The number of write tickets should still + // go up by 1. + _run(); + ASSERT_GT(_readTicketHolder.outof(), reads); + ASSERT_EQ(_writeTicketHolder.outof(), writes + 1); +} + +TEST_F(ThroughputProbingReadHeavyTest, StepSizeNonZeroDecreasing) { + auto reads = _readTicketHolder.outof(); + auto writes = _writeTicketHolder.outof(); + ASSERT_GT(reads, writes); + + // The concurrency level and read/write ratio are such that the step multiple on its own is not + // enough to get to the next integer for writes. + ASSERT_EQ(std::lround(writes * (1 - gStepMultiple.load())), writes); + + // Tickets are not exhausted. + _readTicketHolder.setUsed(reads - 1); + _readTicketHolder.setNumFinishedProcessing(1); + _tick(); + + // Stable. Probe down next since tickets are not exhausted. The number of write tickets should + // still go down by 1. + _run(); + ASSERT_LT(_readTicketHolder.outof(), reads); + ASSERT_EQ(_writeTicketHolder.outof(), writes - 1); +} + +TEST_F(ThroughputProbingWriteHeavyTest, StepSizeNonZeroIncreasing) { + auto reads = _readTicketHolder.outof(); + auto writes = _writeTicketHolder.outof(); + ASSERT_LT(reads, writes); + + // The concurrency level and read/write ratio are such that the step multiple on its own is not + // enough to get to the next integer for reads. + ASSERT_EQ(std::lround(reads * (1 + gStepMultiple.load())), reads); + + // Read tickets are exhausted. + _readTicketHolder.setUsed(reads); + _readTicketHolder.setUsed(reads - 1); + _readTicketHolder.setNumFinishedProcessing(1); + _tick(); + + // Stable. Probe up next since tickets are exhausted. The number of read tickets should still + // go up by 1. + _run(); + ASSERT_EQ(_readTicketHolder.outof(), reads + 1); + ASSERT_GT(_writeTicketHolder.outof(), writes); +} + +TEST_F(ThroughputProbingWriteHeavyTest, StepSizeNonZeroDecreasing) { + auto reads = _readTicketHolder.outof(); + auto writes = _writeTicketHolder.outof(); + ASSERT_LT(reads, writes); + + // The concurrency level and read/write ratio are such that the step multiple on its own is not + // enough to get to the next integer for reads. + ASSERT_EQ(std::lround(reads * (1 + gStepMultiple.load())), reads); + + // Tickets are not exhausted. + _writeTicketHolder.setUsed(writes - 1); + _writeTicketHolder.setNumFinishedProcessing(1); + _tick(); + + // Stable. Probe down next since tickets are not exhausted. The number of read tickets should + // still go down by 1. + _run(); + ASSERT_EQ(_readTicketHolder.outof(), reads - 1); + ASSERT_LT(_writeTicketHolder.outof(), writes); +} + + } // namespace } // namespace mongo::execution_control diff --git a/src/mongo/db/storage/external_record_store.cpp b/src/mongo/db/storage/external_record_store.cpp index ecdc069bad25f..ccfd1f4e20fab 100644 --- a/src/mongo/db/storage/external_record_store.cpp +++ b/src/mongo/db/storage/external_record_store.cpp @@ -29,13 +29,18 @@ #include "mongo/db/storage/external_record_store.h" +#include + +#include + +#include "mongo/db/operation_context.h" #include "mongo/db/storage/multi_bson_stream_cursor.h" #include "mongo/db/storage/record_store.h" namespace mongo { // 'ident' is an identifer to WT table and a virtual collection does not have any persistent data // in WT. So, we set the "dummy" ident for a virtual collection. -ExternalRecordStore::ExternalRecordStore(StringData ns, +ExternalRecordStore::ExternalRecordStore(const NamespaceString& ns, boost::optional uuid, const VirtualCollectionOptions& vopts) : RecordStore(uuid, /*identName=*/"dummy"_sd, /*isCapped=*/false), _vopts(vopts), _ns(ns) {} diff --git a/src/mongo/db/storage/external_record_store.h b/src/mongo/db/storage/external_record_store.h index b02c13595df7d..b322d4bc74bf7 100644 --- a/src/mongo/db/storage/external_record_store.h +++ b/src/mongo/db/storage/external_record_store.h @@ -29,15 +29,34 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include + #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/damage_vector.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/virtual_collection_options.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" #include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { class ExternalRecordStore : public RecordStore { public: - ExternalRecordStore(StringData ns, + ExternalRecordStore(const NamespaceString& ns, boost::optional uuid, const VirtualCollectionOptions& vopts); @@ -53,7 +72,7 @@ class ExternalRecordStore : public RecordStore { return true; } - std::string ns(OperationContext* opCtx) const final { + NamespaceString ns(OperationContext* opCtx) const final { return _ns; } @@ -167,6 +186,6 @@ class ExternalRecordStore : public RecordStore { } VirtualCollectionOptions _vopts; - std::string _ns; + NamespaceString _ns; }; } // namespace mongo diff --git a/src/mongo/db/storage/external_record_store_test.cpp b/src/mongo/db/storage/external_record_store_test.cpp index e8376565327cd..1d50ac890e233 100644 --- a/src/mongo/db/storage/external_record_store_test.cpp +++ b/src/mongo/db/storage/external_record_store_test.cpp @@ -27,21 +27,40 @@ * it in the license file. */ -#include -#include -#include +#include +#include +#include #include - +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/virtual_collection_options.h" +#include "mongo/db/pipeline/external_data_source_option_gen.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/input_stream.h" #include "mongo/db/storage/multi_bson_stream_cursor.h" #include "mongo/db/storage/named_pipe.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/platform/mutex.h" #include "mongo/platform/random.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" #include "mongo/util/scopeguard.h" -#include -#include namespace mongo { using namespace fmt::literals; diff --git a/src/mongo/db/storage/flow_control.cpp b/src/mongo/db/storage/flow_control.cpp index 1866f16af6c82..51253747d2a9c 100644 --- a/src/mongo/db/storage/flow_control.cpp +++ b/src/mongo/db/storage/flow_control.cpp @@ -28,22 +28,36 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/flow_control.h" - -#include +#include +#include #include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include #include +#include +#include +#include +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/flow_control_ticketholder.h" #include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/lock_stats.h" #include "mongo/db/repl/member_data.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/server_options.h" +#include "mongo/db/storage/flow_control.h" #include "mongo/db/storage/flow_control_parameters_gen.h" #include "mongo/logv2/log.h" -#include "mongo/util/background.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -150,7 +164,9 @@ FlowControl::FlowControl(ServiceContext* service, repl::ReplicationCoordinator* [this](Client* client) { FlowControlTicketholder::get(client->getServiceContext())->refreshTo(getNumTickets()); }, - Seconds(1)}); + Seconds(1), + // TODO(SERVER-74657): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/}); _jobAnchor.start(); } diff --git a/src/mongo/db/storage/flow_control.h b/src/mongo/db/storage/flow_control.h index 96d35a349954b..e87576da6a185 100644 --- a/src/mongo/db/storage/flow_control.h +++ b/src/mongo/db/storage/flow_control.h @@ -29,15 +29,25 @@ #pragma once +#include #include +#include +#include +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/commands/server_status.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/member_data.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_fwd.h" #include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/util/periodic_runner.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/storage/flow_control_test.cpp b/src/mongo/db/storage/flow_control_test.cpp index cc63adb14a64e..d94edd9846749 100644 --- a/src/mongo/db/storage/flow_control_test.cpp +++ b/src/mongo/db/storage/flow_control_test.cpp @@ -28,8 +28,13 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/flow_control_ticketholder.h" #include "mongo/db/concurrency/lock_manager_defs.h" @@ -37,8 +42,9 @@ #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/storage/flow_control.h" #include "mongo/db/storage/flow_control_parameters_gen.h" -#include "mongo/logv2/log_debug.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/storage/historical_ident_tracker.cpp b/src/mongo/db/storage/historical_ident_tracker.cpp deleted file mode 100644 index 34a1a72b2a770..0000000000000 --- a/src/mongo/db/storage/historical_ident_tracker.cpp +++ /dev/null @@ -1,218 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - - -#include "mongo/db/storage/historical_ident_tracker.h" -#include "mongo/logv2/log.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage - - -namespace mongo { - -namespace { - -const auto getHistoricalIdentTracker = ServiceContext::declareDecoration(); - -} // namespace - -HistoricalIdentTracker& HistoricalIdentTracker::get(ServiceContext* svcCtx) { - return getHistoricalIdentTracker(svcCtx); -} - -HistoricalIdentTracker& HistoricalIdentTracker::get(OperationContext* opCtx) { - return get(opCtx->getServiceContext()); -} - -boost::optional> HistoricalIdentTracker::lookup( - const std::string& ident, Timestamp timestamp) const { - stdx::lock_guard lk(_mutex); - - auto mapIt = _historicalIdents.find(ident); - if (mapIt == _historicalIdents.end()) { - // No historical entries for this ident. - return boost::none; - } - - for (auto listIt = mapIt->second.begin(); listIt != mapIt->second.end(); listIt++) { - if (timestamp >= listIt->start && timestamp <= listIt->end) { - // Found the historical entry for the requested timestamp. - return std::make_pair(listIt->nss, listIt->uuid); - } - } - - // No historical entry for the requested timestamp was found. - return boost::none; -} - -void HistoricalIdentTracker::pinAtTimestamp(Timestamp timestamp) { - stdx::lock_guard lk(_mutex); - _pinnedTimestamp = timestamp; -} - -void HistoricalIdentTracker::unpin() { - stdx::lock_guard lk(_mutex); - _pinnedTimestamp = Timestamp::min(); -} - -void HistoricalIdentTracker::removeEntriesOlderThan(Timestamp timestamp) { - Timestamp removeOlderThan = - _pinnedTimestamp.isNull() ? timestamp : std::min(timestamp, _pinnedTimestamp); - - LOGV2_DEBUG( - 6321801, 2, "Removing historical entries older than", "timestamp"_attr = removeOlderThan); - - std::vector keysToRemove; - stdx::lock_guard lk(_mutex); - for (auto mapIt = _historicalIdents.begin(); mapIt != _historicalIdents.end(); mapIt++) { - - auto listIt = mapIt->second.begin(); - while (listIt != mapIt->second.end()) { - if (listIt->end < removeOlderThan) { - // This historical entry needs to be a removed, but we'll do a ranged delete later. - LOGV2_DEBUG(6321802, - 2, - "Removing historical entry", - "ident"_attr = mapIt->first, - "nss"_attr = listIt->nss, - "uuid"_attr = listIt->uuid, - "start"_attr = listIt->start, - "end"_attr = listIt->end); - listIt++; - continue; - } - - // We need to keep this and any following historical entries. We can do a ranged delete - // now for what we don't need. - mapIt->second.erase(mapIt->second.begin(), listIt); - break; - } - - if (listIt == mapIt->second.end()) { - // All of the historical entries need to be deleted for this ident. We'll erase the map - // entry outside of the loop to avoid iterator invalidation. - keysToRemove.push_back(mapIt->first); - } - } - - for (const auto& keyToRemove : keysToRemove) { - _historicalIdents.erase(keyToRemove); - } -} - -void HistoricalIdentTracker::rollbackTo(Timestamp timestamp) { - Timestamp rollbackTo = - _pinnedTimestamp.isNull() ? timestamp : std::max(timestamp, _pinnedTimestamp); - - LOGV2_DEBUG(6321803, 2, "Rolling back historical entries to", "timestamp"_attr = rollbackTo); - - std::vector keysToRemove; - stdx::lock_guard lk(_mutex); - for (auto mapIt = _historicalIdents.begin(); mapIt != _historicalIdents.end(); mapIt++) { - - auto listIt = mapIt->second.begin(); - while (listIt != mapIt->second.end()) { - if (listIt->end < rollbackTo) { - // This historical entry needs to be kept. - listIt++; - continue; - } - - LOGV2_DEBUG(6321804, - 2, - "Removing historical entries at and beyond", - "ident"_attr = mapIt->first, - "nss"_attr = listIt->nss, - "uuid"_attr = listIt->uuid, - "start"_attr = listIt->start, - "end"_attr = listIt->end); - - // We need to remove this and any following historical entries. We can do a ranged - // delete now for what we don't need. - mapIt->second.erase(listIt, mapIt->second.end()); - - if (mapIt->second.empty()) { - // Everything was erased. The map entry will be erased outside of the loop to avoid - // iterator invalidation. - keysToRemove.push_back(mapIt->first); - } - - break; - } - } - - for (const auto& keyToRemove : keysToRemove) { - _historicalIdents.erase(keyToRemove); - } -} - -void HistoricalIdentTracker::_addHistoricalIdent(const std::string& ident, - const NamespaceString& nss, - const UUID& uuid, - Timestamp timestamp) { - if (timestamp.isNull()) { - // Standalone nodes don't use timestamps. - return; - } - - HistoricalIdentEntry entry{nss, uuid, /*start=*/Timestamp::min(), /*end=*/timestamp - 1}; - - stdx::lock_guard lk(_mutex); - auto it = _historicalIdents.find(ident); - if (it == _historicalIdents.end()) { - // There are no historical entries for this ident yet. - LOGV2_DEBUG(6321805, - 2, - "Adding new historical entry", - "ident"_attr = ident, - "nss"_attr = entry.nss, - "uuid"_attr = entry.uuid, - "start"_attr = entry.start, - "end"_attr = entry.end); - _historicalIdents.insert({ident, {std::move(entry)}}); - return; - } - - invariant(!it->second.empty()); - - // Update the start timestamp to be the last entry's end timestamp + 1. - entry.start = it->second.back().end + 1; - - LOGV2_DEBUG(6321806, - 2, - "Adding new historical entry", - "ident"_attr = ident, - "nss"_attr = entry.nss, - "uuid"_attr = entry.uuid, - "start"_attr = entry.start, - "end"_attr = entry.end); - it->second.push_back(std::move(entry)); -} - -} // namespace mongo diff --git a/src/mongo/db/storage/historical_ident_tracker.h b/src/mongo/db/storage/historical_ident_tracker.h deleted file mode 100644 index fe14e83a04f74..0000000000000 --- a/src/mongo/db/storage/historical_ident_tracker.h +++ /dev/null @@ -1,145 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include -#include -#include - -#include "mongo/bson/timestamp.h" -#include "mongo/db/namespace_string.h" -#include "mongo/db/operation_context.h" -#include "mongo/db/service_context.h" -#include "mongo/platform/mutex.h" -#include "mongo/util/uuid.h" - -namespace mongo { - -/** - * Keeps track of historical ident information when a collection is renamed or dropped. - */ -class HistoricalIdentTracker final { -public: - HistoricalIdentTracker(const HistoricalIdentTracker&) = delete; - HistoricalIdentTracker(HistoricalIdentTracker&&) = delete; - - static HistoricalIdentTracker& get(ServiceContext* svcCtx); - static HistoricalIdentTracker& get(OperationContext* opCtx); - - HistoricalIdentTracker() = default; - ~HistoricalIdentTracker() = default; - - /** - * Returns the historical namespace and UUID for 'ident' at 'timestamp'. Returns boost::none if - * there was no historical namespace. - */ - boost::optional> lookup(const std::string& ident, - Timestamp timestamp) const; - - /** - * Pins the historical content to the given timestamp, preventing it from being removed. - * - * This is necessary for backup cursors, which need to report the namespace and UUID at the time - * of the checkpoint the backup is being taken on. When a backup cursor is open, it pins the - * checkpoint the backup is being taken on. Checkpoints can still be taken, which advances the - * last checkpoint timestamp and would remove historical content needed by the open backup - * cursor. This method prevents that from happening by pinning the content. - * - * The checkpoint timestamp of the backup can be earlier than the oldest timestamp, which - * prevents us from opening a snapshot at the checkpoint timestamp as history before the oldest - * timestamp is discarded. - */ - void pinAtTimestamp(Timestamp timestamp); - void unpin(); - - /** - * Records the idents namespace and UUID before it was renamed. - */ - void recordRename(const std::string& ident, - const NamespaceString& oldNss, - const UUID& uuid, - Timestamp timestamp) { - _addHistoricalIdent(ident, oldNss, uuid, timestamp); - } - - /** - * Records the idents namespace and UUID before it was dropped. - */ - void recordDrop(const std::string& ident, - const NamespaceString& nss, - const UUID& uuid, - Timestamp timestamp) { - _addHistoricalIdent(ident, nss, uuid, timestamp); - } - - /** - * Removes historical content that is no longer necessary. This is anything older than the last - * checkpoint timestamp. - * - * If there's a pinned timestamp, min(timestamp, _pinnedTimestamp) is used. - */ - void removeEntriesOlderThan(Timestamp timestamp); - - /** - * Historical content added may not be stable yet and can be rolled back. When rollback to - * stable runs, we need to remove any historical content that is considered current. - * - * If there's a pinned timestamp, max(timestamp, _pinnedTimestamp) is used. - */ - void rollbackTo(Timestamp timestamp); - -private: - /** - * Helper function for recordRename() and recordDrop(). - * - * Appends a new historical entry with 'nss' and 'uuid' for 'ident' in '_historicalIdents'. - * Sets the 'end' timestamp to be 'timestamp - 1'. - * Sets the 'start' timestamp to the timestamp of the last entry + 1, or Timestamp::min() if - * there was no earlier entry. - */ - void _addHistoricalIdent(const std::string& ident, - const NamespaceString& nss, - const UUID& uuid, - Timestamp timestamp); - - struct HistoricalIdentEntry { - const NamespaceString nss; - const UUID uuid; - Timestamp start; - Timestamp end; - }; - - // Protects all the member variables below. - mutable Mutex _mutex = MONGO_MAKE_LATCH("HistoricalIdentTracker::_mutex"); - stdx::unordered_map> _historicalIdents; - Timestamp _pinnedTimestamp; -}; - -} // namespace mongo diff --git a/src/mongo/db/storage/historical_ident_tracker_test.cpp b/src/mongo/db/storage/historical_ident_tracker_test.cpp deleted file mode 100644 index 072c10b443ee7..0000000000000 --- a/src/mongo/db/storage/historical_ident_tracker_test.cpp +++ /dev/null @@ -1,437 +0,0 @@ -/** - * Copyright (C) 2022-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/storage/historical_ident_tracker.h" -#include "mongo/unittest/unittest.h" - -namespace mongo { - -TEST(HistoricalIdentTracker, RecordHistoricalIdents) { - HistoricalIdentTracker tracker; - - const std::string ident = "ident"; - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.a"), - UUID::gen(), - Timestamp(10, 10)); - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.b"), - UUID::gen(), - Timestamp(20, 20)); - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.c"), - UUID::gen(), - Timestamp(21, 21)); - tracker.recordDrop(ident, - /*nss=*/NamespaceString::createNamespaceString_forTest("test.d"), - UUID::gen(), - Timestamp(25, 25)); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(9, 9))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - - ASSERT_EQ(tracker.lookup(ident, Timestamp(10, 10))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(15, 15))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(19, 19))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - - ASSERT_EQ(tracker.lookup(ident, Timestamp(20, 20))->first, - NamespaceString::createNamespaceString_forTest("test.c")); - - ASSERT_EQ(tracker.lookup(ident, Timestamp(21, 21))->first, - NamespaceString::createNamespaceString_forTest("test.d")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(24, 24))->first, - NamespaceString::createNamespaceString_forTest("test.d")); - - ASSERT(!tracker.lookup(ident, Timestamp(25, 25))); - ASSERT(!tracker.lookup(ident, Timestamp::max())); -} - -TEST(HistoricalIdentTracker, SkipRecordingNullTimestamps) { - HistoricalIdentTracker tracker; - - const std::string ident = "ident"; - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.a"), - UUID::gen(), - Timestamp(1)); - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.b"), - UUID::gen(), - Timestamp(2)); - tracker.recordDrop(ident, - /*nss=*/NamespaceString::createNamespaceString_forTest("test.c"), - UUID::gen(), - Timestamp(3)); - - ASSERT(!tracker.lookup(ident, Timestamp::min())); - ASSERT(!tracker.lookup(ident, Timestamp(1))); - ASSERT(!tracker.lookup(ident, Timestamp(2))); - ASSERT(!tracker.lookup(ident, Timestamp(3))); - ASSERT(!tracker.lookup(ident, Timestamp::max())); -} - -TEST(HistoricalIdentTracker, RemoveEntriesOlderThanSingle) { - HistoricalIdentTracker tracker; - - const std::string ident = "ident"; - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.a"), - UUID::gen(), - Timestamp(50, 50)); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(49, 49))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - - ASSERT(!tracker.lookup(ident, Timestamp(50, 50))); - - tracker.removeEntriesOlderThan(Timestamp::min()); - tracker.removeEntriesOlderThan(Timestamp(49, 49)); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(49, 49))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - - tracker.removeEntriesOlderThan(Timestamp(50, 50)); - - ASSERT(!tracker.lookup(ident, Timestamp::min())); - ASSERT(!tracker.lookup(ident, Timestamp(49, 49))); - ASSERT(!tracker.lookup(ident, Timestamp(50, 50))); -} - -TEST(HistoricalIdentTracker, RemoveEntriesOlderThanMultiple) { - HistoricalIdentTracker tracker; - - const std::string ident = "ident"; - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.a"), - UUID::gen(), - Timestamp(10, 10)); - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.b"), - UUID::gen(), - Timestamp(20, 20)); - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.c"), - UUID::gen(), - Timestamp(21, 21)); - - tracker.removeEntriesOlderThan(Timestamp::min()); - tracker.removeEntriesOlderThan(Timestamp(5, 5)); - tracker.removeEntriesOlderThan(Timestamp(9, 9)); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(9, 9))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(10, 10))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(19, 19))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(20, 20))->first, - NamespaceString::createNamespaceString_forTest("test.c")); - - tracker.removeEntriesOlderThan(Timestamp(15, 15)); - - ASSERT(!tracker.lookup(ident, Timestamp::min())); - ASSERT(!tracker.lookup(ident, Timestamp(9, 9))); - ASSERT_EQ(tracker.lookup(ident, Timestamp(10, 10))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(19, 19))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(20, 20))->first, - NamespaceString::createNamespaceString_forTest("test.c")); - - tracker.removeEntriesOlderThan(Timestamp(21, 21)); - - ASSERT(!tracker.lookup(ident, Timestamp::min())); - ASSERT(!tracker.lookup(ident, Timestamp(9, 9))); - ASSERT(!tracker.lookup(ident, Timestamp(10, 10))); - ASSERT(!tracker.lookup(ident, Timestamp(19, 19))); - ASSERT(!tracker.lookup(ident, Timestamp(20, 20))); - ASSERT(!tracker.lookup(ident, Timestamp::max())); -} - -TEST(HistoricalIdentTracker, RollbackToSingle) { - HistoricalIdentTracker tracker; - - const std::string ident = "ident"; - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.a"), - UUID::gen(), - Timestamp(10, 10)); - - tracker.rollbackTo(Timestamp(10, 10)); - tracker.rollbackTo(Timestamp::max()); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(9, 9))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - - tracker.rollbackTo(Timestamp(9, 9)); - - ASSERT(!tracker.lookup(ident, Timestamp::min())); - ASSERT(!tracker.lookup(ident, Timestamp(9, 9))); - ASSERT(!tracker.lookup(ident, Timestamp(10, 10))); - ASSERT(!tracker.lookup(ident, Timestamp::max())); -} - -TEST(HistoricalIdentTracker, RollbackToMultiple) { - HistoricalIdentTracker tracker; - - const std::string ident = "ident"; - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.a"), - UUID::gen(), - Timestamp(10, 10)); - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.b"), - UUID::gen(), - Timestamp(20, 20)); - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.c"), - UUID::gen(), - Timestamp(21, 21)); - - tracker.rollbackTo(Timestamp::max()); - tracker.rollbackTo(Timestamp(22, 22)); - tracker.rollbackTo(Timestamp(21, 21)); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(9, 9))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(10, 10))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(19, 19))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(20, 20))->first, - NamespaceString::createNamespaceString_forTest("test.c")); - - tracker.rollbackTo(Timestamp(15, 15)); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(9, 9))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT(!tracker.lookup(ident, Timestamp(10, 10))); - ASSERT(!tracker.lookup(ident, Timestamp(19, 19))); - ASSERT(!tracker.lookup(ident, Timestamp(20, 20))); - - tracker.rollbackTo(Timestamp::min()); - - ASSERT(!tracker.lookup(ident, Timestamp::min())); - ASSERT(!tracker.lookup(ident, Timestamp(9, 9))); - ASSERT(!tracker.lookup(ident, Timestamp(10, 10))); - ASSERT(!tracker.lookup(ident, Timestamp(19, 19))); - ASSERT(!tracker.lookup(ident, Timestamp(20, 20))); - ASSERT(!tracker.lookup(ident, Timestamp::max())); -} - -TEST(HistoricalIdentTracker, PinAndUnpinTimestamp) { - HistoricalIdentTracker tracker; - - const std::string ident = "ident"; - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.a"), - UUID::gen(), - Timestamp(10, 10)); - - tracker.pinAtTimestamp(Timestamp(5, 5)); - tracker.removeEntriesOlderThan(Timestamp::max()); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(9, 9))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - - tracker.pinAtTimestamp(Timestamp(9, 9)); - tracker.removeEntriesOlderThan(Timestamp::max()); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(9, 9))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - - tracker.unpin(); - tracker.removeEntriesOlderThan(Timestamp::max()); - - ASSERT(!tracker.lookup(ident, Timestamp::min())); - ASSERT(!tracker.lookup(ident, Timestamp(9, 9))); - ASSERT(!tracker.lookup(ident, Timestamp(10, 10))); - ASSERT(!tracker.lookup(ident, Timestamp::max())); -} - -TEST(HistoricalIdentTracker, PinnedTimestampRemoveEntriesOlderThan) { - HistoricalIdentTracker tracker; - - const std::string ident = "ident"; - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.a"), - UUID::gen(), - Timestamp(10, 10)); - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.b"), - UUID::gen(), - Timestamp(20, 20)); - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.c"), - UUID::gen(), - Timestamp(21, 21)); - - tracker.pinAtTimestamp(Timestamp(5, 5)); - tracker.removeEntriesOlderThan(Timestamp::max()); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(9, 9))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(10, 10))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(19, 19))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(20, 20))->first, - NamespaceString::createNamespaceString_forTest("test.c")); - - tracker.pinAtTimestamp(Timestamp(9, 9)); - tracker.removeEntriesOlderThan(Timestamp(9, 9)); - tracker.removeEntriesOlderThan(Timestamp(10, 10)); - tracker.removeEntriesOlderThan(Timestamp::max()); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(9, 9))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(10, 10))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(19, 19))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(20, 20))->first, - NamespaceString::createNamespaceString_forTest("test.c")); - - tracker.pinAtTimestamp(Timestamp(15, 15)); - tracker.removeEntriesOlderThan(Timestamp::max()); - - ASSERT(!tracker.lookup(ident, Timestamp::min())); - ASSERT(!tracker.lookup(ident, Timestamp(9, 9))); - ASSERT_EQ(tracker.lookup(ident, Timestamp(10, 10))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(19, 19))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(20, 20))->first, - NamespaceString::createNamespaceString_forTest("test.c")); - - tracker.pinAtTimestamp(Timestamp(21, 21)); - tracker.removeEntriesOlderThan(Timestamp::max()); - - ASSERT(!tracker.lookup(ident, Timestamp::min())); - ASSERT(!tracker.lookup(ident, Timestamp(9, 9))); - ASSERT(!tracker.lookup(ident, Timestamp(10, 10))); - ASSERT(!tracker.lookup(ident, Timestamp(19, 19))); - ASSERT(!tracker.lookup(ident, Timestamp(20, 20))); -} - -TEST(HistoricalIdentTracker, PinnedTimestampRollbackTo) { - HistoricalIdentTracker tracker; - - const std::string ident = "ident"; - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.a"), - UUID::gen(), - Timestamp(10, 10)); - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.b"), - UUID::gen(), - Timestamp(20, 20)); - tracker.recordRename(ident, - /*oldNss=*/NamespaceString::createNamespaceString_forTest("test.c"), - UUID::gen(), - Timestamp(21, 21)); - - tracker.pinAtTimestamp(Timestamp(30, 30)); - tracker.rollbackTo(Timestamp::min()); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(9, 9))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(10, 10))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(19, 19))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(20, 20))->first, - NamespaceString::createNamespaceString_forTest("test.c")); - - tracker.pinAtTimestamp(Timestamp(21, 21)); - tracker.rollbackTo(Timestamp::min()); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(9, 9))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(10, 10))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(19, 19))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(20, 20))->first, - NamespaceString::createNamespaceString_forTest("test.c")); - - tracker.pinAtTimestamp(Timestamp(20, 20)); - tracker.rollbackTo(Timestamp::min()); - - ASSERT_EQ(tracker.lookup(ident, Timestamp::min())->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(9, 9))->first, - NamespaceString::createNamespaceString_forTest("test.a")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(10, 10))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT_EQ(tracker.lookup(ident, Timestamp(19, 19))->first, - NamespaceString::createNamespaceString_forTest("test.b")); - ASSERT(!tracker.lookup(ident, Timestamp(20, 20))); - - - tracker.pinAtTimestamp(Timestamp(5, 5)); - tracker.rollbackTo(Timestamp::min()); - - ASSERT(!tracker.lookup(ident, Timestamp::min())); - ASSERT(!tracker.lookup(ident, Timestamp(9, 9))); - ASSERT(!tracker.lookup(ident, Timestamp(10, 10))); - ASSERT(!tracker.lookup(ident, Timestamp(19, 19))); - ASSERT(!tracker.lookup(ident, Timestamp(20, 20))); -} - -} // namespace mongo diff --git a/src/mongo/db/storage/index_entry_comparison.cpp b/src/mongo/db/storage/index_entry_comparison.cpp index 9e121f79e5f3a..ccdf215473aaa 100644 --- a/src/mongo/db/storage/index_entry_comparison.cpp +++ b/src/mongo/db/storage/index_entry_comparison.cpp @@ -26,17 +26,25 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/index_entry_comparison.h" - +#include +#include +#include #include -#include "mongo/db/jsobj.h" +#include +#include + +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/storage/index_entry_comparison.h" #include "mongo/db/storage/key_string.h" +#include "mongo/stdx/variant.h" #include "mongo/util/hex.h" -#include "mongo/util/text.h" +#include "mongo/util/str.h" +#include "mongo/util/text.h" // IWYU pragma: keep namespace mongo { @@ -112,13 +120,13 @@ int IndexEntryComparison::compare(const IndexKeyEntry& lhs, const IndexKeyEntry& return lhs.loc.compare(rhs.loc); // is supposed to ignore ordering } -KeyString::Value IndexEntryComparison::makeKeyStringFromSeekPointForSeek( - const IndexSeekPoint& seekPoint, KeyString::Version version, Ordering ord, bool isForward) { +key_string::Value IndexEntryComparison::makeKeyStringFromSeekPointForSeek( + const IndexSeekPoint& seekPoint, key_string::Version version, Ordering ord, bool isForward) { const bool inclusive = seekPoint.firstExclusive < 0; - const auto discriminator = isForward == inclusive ? KeyString::Discriminator::kExclusiveBefore - : KeyString::Discriminator::kExclusiveAfter; + const auto discriminator = isForward == inclusive ? key_string::Discriminator::kExclusiveBefore + : key_string::Discriminator::kExclusiveAfter; - KeyString::Builder builder(version, ord, discriminator); + key_string::Builder builder(version, ord, discriminator); // Appends keyPrefix elements to the builder. if (seekPoint.prefixLen > 0) { @@ -141,25 +149,26 @@ KeyString::Value IndexEntryComparison::makeKeyStringFromSeekPointForSeek( return builder.getValueCopy(); } -KeyString::Value IndexEntryComparison::makeKeyStringFromBSONKeyForSeek(const BSONObj& bsonKey, - KeyString::Version version, - Ordering ord, - bool isForward, - bool inclusive) { +key_string::Value IndexEntryComparison::makeKeyStringFromBSONKeyForSeek(const BSONObj& bsonKey, + key_string::Version version, + Ordering ord, + bool isForward, + bool inclusive) { return makeKeyStringFromBSONKey(bsonKey, version, ord, isForward == inclusive - ? KeyString::Discriminator::kExclusiveBefore - : KeyString::Discriminator::kExclusiveAfter); + ? key_string::Discriminator::kExclusiveBefore + : key_string::Discriminator::kExclusiveAfter); } -KeyString::Value IndexEntryComparison::makeKeyStringFromBSONKey(const BSONObj& bsonKey, - KeyString::Version version, - Ordering ord, - KeyString::Discriminator discrim) { +key_string::Value IndexEntryComparison::makeKeyStringFromBSONKey( + const BSONObj& bsonKey, + key_string::Version version, + Ordering ord, + key_string::Discriminator discrim) { BSONObj finalKey = BSONObj::stripFieldNames(bsonKey); - KeyString::Builder builder(version, finalKey, ord, discrim); + key_string::Builder builder(version, finalKey, ord, discrim); return builder.getValueCopy(); } @@ -174,7 +183,7 @@ Status buildDupKeyErrorStatus(const BSONObj& key, StringBuilder sb; sb << "E11000 duplicate key error"; - sb << " collection: " << collectionNamespace; + sb << " collection: " << collectionNamespace.toStringForErrorMsg(); if (indexName.size()) { // This helper may be used for clustered collections when there is no index for the cluster // key. @@ -252,23 +261,23 @@ Status buildDupKeyErrorStatus(const BSONObj& key, sb.str()); } -Status buildDupKeyErrorStatus(const KeyString::Value& keyString, +Status buildDupKeyErrorStatus(const key_string::Value& keyString, const NamespaceString& collectionNamespace, const std::string& indexName, const BSONObj& keyPattern, const BSONObj& indexCollation, const Ordering& ordering) { - const BSONObj key = KeyString::toBson( + const BSONObj key = key_string::toBson( keyString.getBuffer(), keyString.getSize(), ordering, keyString.getTypeBits()); return buildDupKeyErrorStatus(key, collectionNamespace, indexName, keyPattern, indexCollation); } Status buildDupKeyErrorStatus(OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, const Ordering& ordering, const IndexDescriptor* desc) { - const BSONObj key = KeyString::toBson( + const BSONObj key = key_string::toBson( keyString.getBuffer(), keyString.getSize(), ordering, keyString.getTypeBits()); return buildDupKeyErrorStatus(opCtx, key, desc); } diff --git a/src/mongo/db/storage/index_entry_comparison.h b/src/mongo/db/storage/index_entry_comparison.h index 4fa2cb5d0e857..37d95c5b802d3 100644 --- a/src/mongo/db/storage/index_entry_comparison.h +++ b/src/mongo/db/storage/index_entry_comparison.h @@ -29,17 +29,33 @@ #pragma once +#include +#include +#include +#include #include +#include #include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/ordering.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/record_id.h" #include "mongo/db/storage/duplicate_key_error_info.h" #include "mongo/db/storage/key_string.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/debug_util.h" namespace mongo { @@ -89,24 +105,24 @@ inline bool operator!=(const IndexKeyEntry& lhs, const IndexKeyEntry& rhs) { } /** - * Represents KeyString struct containing a KeyString::Value and its RecordId + * Represents KeyString struct containing a key_string::Value and its RecordId */ struct KeyStringEntry { - KeyStringEntry(KeyString::Value ks, RecordId id) : keyString(ks), loc(std::move(id)) { + KeyStringEntry(key_string::Value ks, RecordId id) : keyString(ks), loc(std::move(id)) { if (!kDebugBuild) { return; } loc.withFormat( [](RecordId::Null n) { invariant(false); }, [&](int64_t rid) { - invariant(loc == KeyString::decodeRecordIdLongAtEnd(ks.getBuffer(), ks.getSize())); + invariant(loc == key_string::decodeRecordIdLongAtEnd(ks.getBuffer(), ks.getSize())); }, [&](const char* str, int size) { - invariant(loc == KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); + invariant(loc == key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); }); } - KeyString::Value keyString; + key_string::Value keyString; RecordId loc; }; @@ -217,10 +233,10 @@ class IndexEntryComparison { * entries in an index is to support storage engines that require comparators that take * arguments of the same type. */ - static KeyString::Value makeKeyStringFromSeekPointForSeek(const IndexSeekPoint& seekPoint, - KeyString::Version version, - Ordering ord, - bool isForward); + static key_string::Value makeKeyStringFromSeekPointForSeek(const IndexSeekPoint& seekPoint, + key_string::Version version, + Ordering ord, + bool isForward); /** * Encodes the BSON Key into a KeyString object to pass in to SortedDataInterface::seek(). @@ -247,24 +263,24 @@ class IndexEntryComparison { * (which is less than bsonKey). WT's search_near() could land either on the previous key or the * bsonKey. WT will selectively call prev() if it's on bsonKey. */ - static KeyString::Value makeKeyStringFromBSONKeyForSeek(const BSONObj& bsonKey, - KeyString::Version version, - Ordering ord, - bool isForward, - bool inclusive); + static key_string::Value makeKeyStringFromBSONKeyForSeek(const BSONObj& bsonKey, + key_string::Version version, + Ordering ord, + bool isForward, + bool inclusive); /** * Encodes the BSON Key into a KeyString object to pass in to SortedDataInterface::seek() * or SortedDataInterface::setEndPosition(). * * This funcition is similar to IndexEntryComparison::makeKeyStringFromBSONKeyForSeek() - * but allows you to pick your own KeyString::Discriminator based on wether or not the + * but allows you to pick your own key_string::Discriminator based on wether or not the * resulting KeyString is for the start key or end key of a seek. */ - static KeyString::Value makeKeyStringFromBSONKey(const BSONObj& bsonKey, - KeyString::Version version, - Ordering ord, - KeyString::Discriminator discrim); + static key_string::Value makeKeyStringFromBSONKey(const BSONObj& bsonKey, + key_string::Version version, + Ordering ord, + key_string::Discriminator discrim); private: // Ordering is used in comparison() to compare BSONElements @@ -283,7 +299,7 @@ Status buildDupKeyErrorStatus(const BSONObj& key, DuplicateKeyErrorInfo::FoundValue&& foundValue = stdx::monostate{}, boost::optional duplicateRid = boost::none); -Status buildDupKeyErrorStatus(const KeyString::Value& keyString, +Status buildDupKeyErrorStatus(const key_string::Value& keyString, const NamespaceString& collectionNamespace, const std::string& indexName, const BSONObj& keyPattern, @@ -295,7 +311,7 @@ Status buildDupKeyErrorStatus(OperationContext* opCtx, const IndexDescriptor* desc); Status buildDupKeyErrorStatus(OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, const Ordering& ordering, const IndexDescriptor* desc); diff --git a/src/mongo/db/storage/index_entry_comparison_test.cpp b/src/mongo/db/storage/index_entry_comparison_test.cpp index 0ff271789c779..2339916fdbe66 100644 --- a/src/mongo/db/storage/index_entry_comparison_test.cpp +++ b/src/mongo/db/storage/index_entry_comparison_test.cpp @@ -27,12 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/bson/bsontypes.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/storage/duplicate_key_error_info.h" #include "mongo/db/storage/index_entry_comparison.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/hex.h" namespace mongo { diff --git a/src/mongo/db/storage/key_string.cpp b/src/mongo/db/storage/key_string.cpp index 7ff962f5c4588..855958923db3b 100644 --- a/src/mongo/db/storage/key_string.cpp +++ b/src/mongo/db/storage/key_string.cpp @@ -28,22 +28,38 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/key_string.h" - +#include +#include +#include #include +#include +#include +#include +#include +#include #include +#include +#include + #include "mongo/base/data_cursor.h" #include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" #include "mongo/bson/bson_depth.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/exec/sbe/values/value_builder.h" +#include "mongo/db/storage/key_string.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/bits.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/endian.h" #include "mongo/platform/strnlen.h" #include "mongo/util/decimal_counter.h" #include "mongo/util/hex.h" +#include "mongo/util/shared_buffer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -57,9 +73,9 @@ namespace mongo { using std::string; -template class StackBufBuilderBase; +template class StackBufBuilderBase; -namespace KeyString { +namespace key_string { namespace { @@ -2869,7 +2885,8 @@ int compare(const char* leftBuf, const char* rightBuf, size_t leftSize, size_t r } int Value::compareWithTypeBits(const Value& other) const { - return KeyString::compare(getBuffer(), other.getBuffer(), _buffer.size(), other._buffer.size()); + return key_string::compare( + getBuffer(), other.getBuffer(), _buffer.size(), other._buffer.size()); } bool readSBEValue(BufReader* reader, @@ -2886,7 +2903,7 @@ bool readSBEValue(BufReader* reader, // "discriminator" types are used for querying and are never stored in an index. invariant(ctype > kLess && ctype < kGreater); - const uint32_t depth = 1; // This function only gets called for a top-level KeyString::Value. + const uint32_t depth = 1; // This function only gets called for a top-level key_string::Value. toBsonValue(ctype, reader, typeBits, inverted, version, valueBuilder, depth); return true; } @@ -2900,7 +2917,7 @@ void appendSingleFieldToBSONAs( uint8_t ctype = readType(&reader, inverted); invariant(ctype != kEnd && ctype > kLess && ctype < kGreater); - const uint32_t depth = 1; // This function only gets called for a top-level KeyString::Value. + const uint32_t depth = 1; // This function only gets called for a top-level key_string::Value. // Callers discard their TypeBits. TypeBits typeBits(version); TypeBits::Reader typeBitsReader(typeBits); @@ -2917,7 +2934,7 @@ void appendToBSONArray(const char* buf, int len, BSONArrayBuilder* builder, Vers uint8_t ctype = readType(&reader, inverted); invariant(ctype != kEnd && ctype > kLess && ctype < kGreater); - // This function only gets called for a top-level KeyString::Value. + // This function only gets called for a top-level key_string::Value. const uint32_t depth = 1; // All users of this currently discard type bits. TypeBits typeBits(version); @@ -3053,9 +3070,9 @@ std::string explain(const char* buffer, RecordId recordId; if (keyFormat == KeyFormat::Long) { - recordId = KeyString::decodeRecordIdLongAtEnd(buffer, len); + recordId = key_string::decodeRecordIdLongAtEnd(buffer, len); } else { - recordId = KeyString::decodeRecordIdStrAtEnd(buffer, len); + recordId = key_string::decodeRecordIdStrAtEnd(buffer, len); } str << "Bytes: 0x" << hexblob::encodeLower(startPos, (len - startOff)) << "\n"; auto kfString = (*keyFormat == KeyFormat::Long) ? "Long" : "String"; @@ -3067,6 +3084,6 @@ std::string explain(const char* buffer, return str; } -} // namespace KeyString +} // namespace key_string } // namespace mongo diff --git a/src/mongo/db/storage/key_string.h b/src/mongo/db/storage/key_string.h index 95389d2a3d6d3..79cce5e07715f 100644 --- a/src/mongo/db/storage/key_string.h +++ b/src/mongo/db/storage/key_string.h @@ -29,23 +29,41 @@ #pragma once -#include - #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mongo/base/data_type_endian.h" #include "mongo/base/static_assert.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonelement_comparator_interface.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" #include "mongo/bson/ordering.h" #include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/record_id.h" #include "mongo/db/storage/key_format.h" #include "mongo/platform/decimal128.h" #include "mongo/util/assert_util.h" - -#include +#include "mongo/util/bufreader.h" +#include "mongo/util/shared_buffer_fragment.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -53,7 +71,7 @@ namespace sbe::value { class ValueBuilder; } -namespace KeyString { +namespace key_string { enum class Version : uint8_t { V0 = 0, V1 = 1, kLatestVersion = V1 }; @@ -175,7 +193,7 @@ class TypeBits { } // - // Everything below is only for use by KeyString::Builder. + // Everything below is only for use by key_string::Builder. // // Note: No space is used if all bits are 0 so the most common cases should be 0x0. @@ -354,7 +372,7 @@ class TypeBits { /** - * Value owns a buffer that corresponds to a completely generated KeyString::Builder with the + * Value owns a buffer that corresponds to a completely generated key_string::Builder with the * TypeBits appended. * * To optimize copy performance and space requirements of this structure, the buffer will contain @@ -383,7 +401,7 @@ class Value { } /** - * Compare with another KeyString::Value or Builder. + * Compare with another key_string::Value or Builder. */ template int compare(const T& other) const; @@ -391,7 +409,7 @@ class Value { int compareWithTypeBits(const Value& other) const; /** - * Compare with another KeyString::Value or Builder, ignoring the RecordId part of both. + * Compare with another key_string::Value or Builder, ignoring the RecordId part of both. */ template int compareWithoutRecordIdLong(const T& other) const; @@ -446,7 +464,7 @@ class Value { void serializeWithoutRecordIdStr(BufBuilder& buf) const; // Deserialize the Value from a serialized format. - static Value deserialize(BufReader& buf, KeyString::Version version) { + static Value deserialize(BufReader& buf, key_string::Version version) { const int32_t sizeOfKeystring = buf.read>(); const void* keystringPtr = buf.skip(sizeOfKeystring); @@ -485,7 +503,7 @@ class Value { // aggregate and free unused memory periodically. int memUsageForSorter() const { invariant(!_buffer.isShared(), - "Cannot obtain memory usage from shared buffer on KeyString::Value"); + "Cannot obtain memory usage from shared buffer on key_string::Value"); return sizeof(Value) + _buffer.underlyingCapacity(); } @@ -684,13 +702,13 @@ class BuilderBase { } /** - * Compare with another KeyString::Value or Builder. + * Compare with another key_string::Value or Builder. */ template int compare(const T& other) const; /** - * Compare with another KeyString::Value or Builder, ignoring the RecordId part of both. + * Compare with another key_string::Value or Builder, ignoring the RecordId part of both. */ template int compareWithoutRecordIdLong(const T& other) const; @@ -1087,18 +1105,18 @@ void appendSingleFieldToBSONAs(const char* buf, int len, StringData fieldName, BSONObjBuilder* builder, - Version version = KeyString::Version::kLatestVersion); + Version version = key_string::Version::kLatestVersion); template template int BuilderBase::compare(const T& other) const { - return KeyString::compare(getBuffer(), other.getBuffer(), getSize(), other.getSize()); + return key_string::compare(getBuffer(), other.getBuffer(), getSize(), other.getSize()); } template template int BuilderBase::compareWithoutRecordIdLong(const T& other) const { - return KeyString::compare( + return key_string::compare( getBuffer(), other.getBuffer(), !isEmpty() ? sizeWithoutRecordIdLongAtEnd(getBuffer(), getSize()) : 0, @@ -1108,7 +1126,7 @@ int BuilderBase::compareWithoutRecordIdLong(const T& other) const { template template int BuilderBase::compareWithoutRecordIdStr(const T& other) const { - return KeyString::compare( + return key_string::compare( getBuffer(), other.getBuffer(), !isEmpty() ? sizeWithoutRecordIdStrAtEnd(getBuffer(), getSize()) : 0, @@ -1117,12 +1135,12 @@ int BuilderBase::compareWithoutRecordIdStr(const T& other) const { template int Value::compare(const T& other) const { - return KeyString::compare(getBuffer(), other.getBuffer(), getSize(), other.getSize()); + return key_string::compare(getBuffer(), other.getBuffer(), getSize(), other.getSize()); } template int Value::compareWithoutRecordIdLong(const T& other) const { - return KeyString::compare( + return key_string::compare( getBuffer(), other.getBuffer(), !isEmpty() ? sizeWithoutRecordIdLongAtEnd(getBuffer(), getSize()) : 0, @@ -1131,7 +1149,7 @@ int Value::compareWithoutRecordIdLong(const T& other) const { template int Value::compareWithoutRecordIdStr(const T& other) const { - return KeyString::compare( + return key_string::compare( getBuffer(), other.getBuffer(), !isEmpty() ? sizeWithoutRecordIdStrAtEnd(getBuffer(), getSize()) : 0, @@ -1167,8 +1185,8 @@ std::string explain(const char* buffer, const TypeBits& typeBits, boost::optional keyFormat); -} // namespace KeyString +} // namespace key_string -using KeyStringSet = boost::container::flat_set; +using KeyStringSet = boost::container::flat_set; } // namespace mongo diff --git a/src/mongo/db/storage/key_string_bm.cpp b/src/mongo/db/storage/key_string_bm.cpp index 4e8b45c481880..bdffca249baf1 100644 --- a/src/mongo/db/storage/key_string_bm.cpp +++ b/src/mongo/db/storage/key_string_bm.cpp @@ -28,15 +28,23 @@ */ -#include "mongo/platform/basic.h" - #include +#include #include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/key_string.h" #include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" #include "mongo/util/bufreader.h" +#include "mongo/util/shared_buffer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -99,13 +107,13 @@ BSONObj generateBson(BsonValueType bsonValueType) { } static BsonsAndKeyStrings generateBsonsAndKeyStrings(BsonValueType bsonValueType, - KeyString::Version version) { + key_string::Version version) { BsonsAndKeyStrings result; result.bsonSize = 0; result.keystringSize = 0; for (int i = 0; i < kSampleSize; i++) { BSONObj bson = generateBson(bsonValueType); - KeyString::Builder ks(version, bson, ALL_ASCENDING); + key_string::Builder ks(version, bson, ALL_ASCENDING); result.bsonSize += bson.objsize(); result.keystringSize += ks.getSize(); result.bsons[i] = bson; @@ -122,13 +130,13 @@ static BsonsAndKeyStrings generateBsonsAndKeyStrings(BsonValueType bsonValueType } void BM_BSONToKeyString(benchmark::State& state, - const KeyString::Version version, + const key_string::Version version, BsonValueType bsonType) { const BsonsAndKeyStrings bsonsAndKeyStrings = generateBsonsAndKeyStrings(bsonType, version); for (auto _ : state) { benchmark::ClobberMemory(); for (const auto& bson : bsonsAndKeyStrings.bsons) { - benchmark::DoNotOptimize(KeyString::Builder(version, bson, ALL_ASCENDING)); + benchmark::DoNotOptimize(key_string::Builder(version, bson, ALL_ASCENDING)); } } state.SetBytesProcessed(state.iterations() * bsonsAndKeyStrings.bsonSize); @@ -136,7 +144,7 @@ void BM_BSONToKeyString(benchmark::State& state, } void BM_KeyStringToBSON(benchmark::State& state, - const KeyString::Version version, + const key_string::Version version, BsonValueType bsonType) { const BsonsAndKeyStrings bsonsAndKeyStrings = generateBsonsAndKeyStrings(bsonType, version); for (auto _ : state) { @@ -144,10 +152,10 @@ void BM_KeyStringToBSON(benchmark::State& state, for (size_t i = 0; i < kSampleSize; i++) { BufReader buf(bsonsAndKeyStrings.typebits[i].get(), bsonsAndKeyStrings.typebitsLens[i]); benchmark::DoNotOptimize( - KeyString::toBson(bsonsAndKeyStrings.keystrings[i].get(), - bsonsAndKeyStrings.keystringLens[i], - ALL_ASCENDING, - KeyString::TypeBits::fromBuffer(version, &buf))); + key_string::toBson(bsonsAndKeyStrings.keystrings[i].get(), + bsonsAndKeyStrings.keystringLens[i], + ALL_ASCENDING, + key_string::TypeBits::fromBuffer(version, &buf))); } } state.SetBytesProcessed(state.iterations() * bsonsAndKeyStrings.bsonSize); @@ -156,19 +164,19 @@ void BM_KeyStringToBSON(benchmark::State& state, void BM_KeyStringValueAssign(benchmark::State& state, BsonValueType bsonType) { // The KeyString version does not matter for this test. - const auto version = KeyString::Version::V1; + const auto version = key_string::Version::V1; const BsonsAndKeyStrings bsonsAndKeyStrings = generateBsonsAndKeyStrings(bsonType, version); // Pre-construct the values. - std::vector values; + std::vector values; for (size_t i = 0; i < kSampleSize; i++) { - KeyString::HeapBuilder builder(version, bsonsAndKeyStrings.bsons[i], ALL_ASCENDING); + key_string::HeapBuilder builder(version, bsonsAndKeyStrings.bsons[i], ALL_ASCENDING); values.emplace_back(builder.release()); } for (auto _ : state) { benchmark::ClobberMemory(); - KeyString::Value oldValue = values[0]; + key_string::Value oldValue = values[0]; for (size_t i = 1; i < kSampleSize; i++) { oldValue = values[i]; } @@ -179,13 +187,13 @@ void BM_KeyStringValueAssign(benchmark::State& state, BsonValueType bsonType) { void BM_KeyStringHeapBuilderRelease(benchmark::State& state, BsonValueType bsonType) { // The KeyString version does not matter for this test. - const auto version = KeyString::Version::V1; + const auto version = key_string::Version::V1; const BsonsAndKeyStrings bsonsAndKeyStrings = generateBsonsAndKeyStrings(bsonType, version); for (auto _ : state) { benchmark::ClobberMemory(); for (size_t i = 0; i < kSampleSize; i++) { - KeyString::HeapBuilder builder(version, bsonsAndKeyStrings.bsons[i], ALL_ASCENDING); + key_string::HeapBuilder builder(version, bsonsAndKeyStrings.bsons[i], ALL_ASCENDING); benchmark::DoNotOptimize(builder.release()); } } @@ -195,13 +203,13 @@ void BM_KeyStringHeapBuilderRelease(benchmark::State& state, BsonValueType bsonT void BM_KeyStringStackBuilderCopy(benchmark::State& state, BsonValueType bsonType) { // The KeyString version does not matter for this test. - const auto version = KeyString::Version::V1; + const auto version = key_string::Version::V1; const BsonsAndKeyStrings bsonsAndKeyStrings = generateBsonsAndKeyStrings(bsonType, version); for (auto _ : state) { benchmark::ClobberMemory(); for (size_t i = 0; i < kSampleSize; i++) { - KeyString::Builder builder(version, bsonsAndKeyStrings.bsons[i], ALL_ASCENDING); + key_string::Builder builder(version, bsonsAndKeyStrings.bsons[i], ALL_ASCENDING); benchmark::DoNotOptimize(builder.getValueCopy()); } } @@ -214,18 +222,18 @@ void BM_KeyStringRecordIdStrAppend(benchmark::State& state, const size_t size) { auto rid = RecordId(buf.c_str(), size); for (auto _ : state) { benchmark::ClobberMemory(); - benchmark::DoNotOptimize(KeyString::Builder(KeyString::Version::V1, rid)); + benchmark::DoNotOptimize(key_string::Builder(key_string::Version::V1, rid)); } } void BM_KeyStringRecordIdStrDecode(benchmark::State& state, const size_t size) { const auto buf = std::string(size, 'a'); - KeyString::Builder ks(KeyString::Version::V1, RecordId(buf.c_str(), size)); + key_string::Builder ks(key_string::Version::V1, RecordId(buf.c_str(), size)); auto ksBuf = ks.getBuffer(); auto ksSize = ks.getSize(); for (auto _ : state) { benchmark::ClobberMemory(); - benchmark::DoNotOptimize(KeyString::decodeRecordIdStrAtEnd(ksBuf, ksSize)); + benchmark::DoNotOptimize(key_string::decodeRecordIdStrAtEnd(ksBuf, ksSize)); } } @@ -247,25 +255,25 @@ BENCHMARK_CAPTURE(BM_KeyStringStackBuilderCopy, Decimal, DECIMAL); BENCHMARK_CAPTURE(BM_KeyStringStackBuilderCopy, String, STRING); BENCHMARK_CAPTURE(BM_KeyStringStackBuilderCopy, Array, ARRAY); -BENCHMARK_CAPTURE(BM_BSONToKeyString, V0_Int, KeyString::Version::V0, INT); -BENCHMARK_CAPTURE(BM_BSONToKeyString, V1_Int, KeyString::Version::V1, INT); -BENCHMARK_CAPTURE(BM_BSONToKeyString, V0_Double, KeyString::Version::V0, DOUBLE); -BENCHMARK_CAPTURE(BM_BSONToKeyString, V1_Double, KeyString::Version::V1, DOUBLE); -BENCHMARK_CAPTURE(BM_BSONToKeyString, V1_Decimal, KeyString::Version::V1, DECIMAL); -BENCHMARK_CAPTURE(BM_BSONToKeyString, V0_String, KeyString::Version::V0, STRING); -BENCHMARK_CAPTURE(BM_BSONToKeyString, V1_String, KeyString::Version::V1, STRING); -BENCHMARK_CAPTURE(BM_BSONToKeyString, V0_Array, KeyString::Version::V0, ARRAY); -BENCHMARK_CAPTURE(BM_BSONToKeyString, V1_Array, KeyString::Version::V1, ARRAY); - -BENCHMARK_CAPTURE(BM_KeyStringToBSON, V0_Int, KeyString::Version::V0, INT); -BENCHMARK_CAPTURE(BM_KeyStringToBSON, V1_Int, KeyString::Version::V1, INT); -BENCHMARK_CAPTURE(BM_KeyStringToBSON, V0_Double, KeyString::Version::V0, DOUBLE); -BENCHMARK_CAPTURE(BM_KeyStringToBSON, V1_Double, KeyString::Version::V1, DOUBLE); -BENCHMARK_CAPTURE(BM_KeyStringToBSON, V1_Decimal, KeyString::Version::V1, DECIMAL); -BENCHMARK_CAPTURE(BM_KeyStringToBSON, V0_String, KeyString::Version::V0, STRING); -BENCHMARK_CAPTURE(BM_KeyStringToBSON, V1_String, KeyString::Version::V1, STRING); -BENCHMARK_CAPTURE(BM_KeyStringToBSON, V0_Array, KeyString::Version::V0, ARRAY); -BENCHMARK_CAPTURE(BM_KeyStringToBSON, V1_Array, KeyString::Version::V1, ARRAY); +BENCHMARK_CAPTURE(BM_BSONToKeyString, V0_Int, key_string::Version::V0, INT); +BENCHMARK_CAPTURE(BM_BSONToKeyString, V1_Int, key_string::Version::V1, INT); +BENCHMARK_CAPTURE(BM_BSONToKeyString, V0_Double, key_string::Version::V0, DOUBLE); +BENCHMARK_CAPTURE(BM_BSONToKeyString, V1_Double, key_string::Version::V1, DOUBLE); +BENCHMARK_CAPTURE(BM_BSONToKeyString, V1_Decimal, key_string::Version::V1, DECIMAL); +BENCHMARK_CAPTURE(BM_BSONToKeyString, V0_String, key_string::Version::V0, STRING); +BENCHMARK_CAPTURE(BM_BSONToKeyString, V1_String, key_string::Version::V1, STRING); +BENCHMARK_CAPTURE(BM_BSONToKeyString, V0_Array, key_string::Version::V0, ARRAY); +BENCHMARK_CAPTURE(BM_BSONToKeyString, V1_Array, key_string::Version::V1, ARRAY); + +BENCHMARK_CAPTURE(BM_KeyStringToBSON, V0_Int, key_string::Version::V0, INT); +BENCHMARK_CAPTURE(BM_KeyStringToBSON, V1_Int, key_string::Version::V1, INT); +BENCHMARK_CAPTURE(BM_KeyStringToBSON, V0_Double, key_string::Version::V0, DOUBLE); +BENCHMARK_CAPTURE(BM_KeyStringToBSON, V1_Double, key_string::Version::V1, DOUBLE); +BENCHMARK_CAPTURE(BM_KeyStringToBSON, V1_Decimal, key_string::Version::V1, DECIMAL); +BENCHMARK_CAPTURE(BM_KeyStringToBSON, V0_String, key_string::Version::V0, STRING); +BENCHMARK_CAPTURE(BM_KeyStringToBSON, V1_String, key_string::Version::V1, STRING); +BENCHMARK_CAPTURE(BM_KeyStringToBSON, V0_Array, key_string::Version::V0, ARRAY); +BENCHMARK_CAPTURE(BM_KeyStringToBSON, V1_Array, key_string::Version::V1, ARRAY); BENCHMARK_CAPTURE(BM_KeyStringRecordIdStrAppend, 16B, 16); BENCHMARK_CAPTURE(BM_KeyStringRecordIdStrAppend, 512B, 512); diff --git a/src/mongo/db/storage/key_string_decode.cpp b/src/mongo/db/storage/key_string_decode.cpp index 95c6bc4ee48df..673511043d813 100644 --- a/src/mongo/db/storage/key_string_decode.cpp +++ b/src/mongo/db/storage/key_string_decode.cpp @@ -27,15 +27,34 @@ * it in the license file. */ +#include +#include #include #include +#include #include "key_format.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/key_string.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" +#include "mongo/util/hex.h" #include "mongo/util/options_parser/environment.h" +#include "mongo/util/options_parser/option_description.h" #include "mongo/util/options_parser/option_section.h" #include "mongo/util/options_parser/options_parser.h" +#include "mongo/util/options_parser/value.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -140,35 +159,35 @@ struct KSDecodeOptions { }; void decode(const KSDecodeOptions& options) { - KeyString::TypeBits typeBits(KeyString::Version::kLatestVersion); + key_string::TypeBits typeBits(key_string::Version::kLatestVersion); if (!options.binTypeBits.empty()) { BufReader reader(options.binTypeBits.c_str(), options.binTypeBits.size()); typeBits.resetFromBuffer(&reader); } - auto builder = KeyString::HeapBuilder(KeyString::Version::kLatestVersion); + auto builder = key_string::HeapBuilder(key_string::Version::kLatestVersion); builder.resetFromBuffer(options.binKeyString.c_str(), options.binKeyString.size()); if (OutputFormat::kExplain == options.outputFormat) { - std::cout << KeyString::explain(builder.getBuffer(), - builder.getSize(), - options.keyPattern, - typeBits, - options.keyFormat); + std::cout << key_string::explain(builder.getBuffer(), + builder.getSize(), + options.keyPattern, + typeBits, + options.keyFormat); } else if (OutputFormat::kBson == options.outputFormat) { - auto bson = KeyString::toBsonSafe( + auto bson = key_string::toBsonSafe( builder.getBuffer(), builder.getSize(), Ordering::make(options.keyPattern), typeBits); - auto rehydrated = KeyString::rehydrateKey(options.keyPattern, bson); + auto rehydrated = key_string::rehydrateKey(options.keyPattern, bson); str::stream out; if (options.binKeyString.size() >= 2 && options.keyFormat) { BSONObjBuilder bob(rehydrated); RecordId recordId; if (*options.keyFormat == KeyFormat::Long) { - recordId = KeyString::decodeRecordIdLongAtEnd(options.binKeyString.c_str(), - options.binKeyString.size()); + recordId = key_string::decodeRecordIdLongAtEnd(options.binKeyString.c_str(), + options.binKeyString.size()); } else { - recordId = KeyString::decodeRecordIdStrAtEnd(options.binKeyString.c_str(), - options.binKeyString.size()); + recordId = key_string::decodeRecordIdStrAtEnd(options.binKeyString.c_str(), + options.binKeyString.size()); } recordId.serializeToken("$recordId", &bob); out << bob.obj(); diff --git a/src/mongo/db/storage/key_string_test.cpp b/src/mongo/db/storage/key_string_test.cpp index a15c47ac3515c..e16dd15a21d40 100644 --- a/src/mongo/db/storage/key_string_test.cpp +++ b/src/mongo/db/storage/key_string_test.cpp @@ -28,30 +28,42 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +// IWYU pragma: no_include "cxxabi.h" #include #include -#include +#include #include #include #include -#include +#include #include +#include "mongo/base/error_codes.h" #include "mongo/base/simple_string_data_comparator.h" +#include "mongo/base/status.h" #include "mongo/bson/bson_depth.h" #include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj_comparator.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/storage/key_string.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/decimal128.h" #include "mongo/stdx/future.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/debug_util.h" #include "mongo/util/hex.h" +#include "mongo/util/shared_buffer.h" #include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -60,28 +72,28 @@ using std::string; using namespace mongo; -BSONObj toBson(const KeyString::Builder& ks, Ordering ord) { - return KeyString::toBson(ks.getBuffer(), ks.getSize(), ord, ks.getTypeBits()); +BSONObj toBson(const key_string::Builder& ks, Ordering ord) { + return key_string::toBson(ks.getBuffer(), ks.getSize(), ord, ks.getTypeBits()); } template -BSONObj toBsonAndCheckKeySize(const KeyString::BuilderBase& ks, Ordering ord) { +BSONObj toBsonAndCheckKeySize(const key_string::BuilderBase& ks, Ordering ord) { auto KeyStringBuilderSize = ks.getSize(); - // Validate size of the key in KeyString::Builder. + // Validate size of the key in key_string::Builder. ASSERT_EQUALS( KeyStringBuilderSize, - KeyString::getKeySize(ks.getBuffer(), KeyStringBuilderSize, ord, ks.getTypeBits())); - return KeyString::toBson(ks.getBuffer(), KeyStringBuilderSize, ord, ks.getTypeBits()); + key_string::getKeySize(ks.getBuffer(), KeyStringBuilderSize, ord, ks.getTypeBits())); + return key_string::toBson(ks.getBuffer(), KeyStringBuilderSize, ord, ks.getTypeBits()); } -BSONObj toBsonAndCheckKeySize(const KeyString::Value& ks, Ordering ord) { +BSONObj toBsonAndCheckKeySize(const key_string::Value& ks, Ordering ord) { auto KeyStringSize = ks.getSize(); - // Validate size of the key in KeyString::Value. + // Validate size of the key in key_string::Value. ASSERT_EQUALS(KeyStringSize, - KeyString::getKeySize(ks.getBuffer(), KeyStringSize, ord, ks.getTypeBits())); - return KeyString::toBson(ks.getBuffer(), KeyStringSize, ord, ks.getTypeBits()); + key_string::getKeySize(ks.getBuffer(), KeyStringSize, ord, ks.getTypeBits())); + return key_string::toBson(ks.getBuffer(), KeyStringSize, ord, ks.getTypeBits()); } Ordering ALL_ASCENDING = Ordering::make(BSONObj()); @@ -93,33 +105,33 @@ class KeyStringBuilderTest : public mongo::unittest::Test { void run() { auto base = static_cast(this); try { - version = KeyString::Version::V0; + version = key_string::Version::V0; base->run(); - version = KeyString::Version::V1; + version = key_string::Version::V1; base->run(); } catch (...) { LOGV2(22226, "exception while testing KeyStringBuilder version " "{mongo_KeyString_keyStringVersionToString_version}", "mongo_KeyString_keyStringVersionToString_version"_attr = - mongo::KeyString::keyStringVersionToString(version)); + mongo::key_string::keyStringVersionToString(version)); throw; } } protected: - KeyString::Version version; + key_string::Version version; }; template void checkSizeWhileAppendingTypeBits(int numOfBitsUsedForType, T&& appendBitsFunc) { - KeyString::TypeBits typeBits(KeyString::Version::V1); + key_string::TypeBits typeBits(key_string::Version::V1); const int kItems = 10000; // Pick an arbitrary large number. for (int i = 0; i < kItems; i++) { appendBitsFunc(typeBits); size_t currentRawSize = ((i + 1) * numOfBitsUsedForType - 1) / 8 + 1; size_t currentSize = currentRawSize; - if (currentRawSize > KeyString::TypeBits::kMaxBytesForShortEncoding) { + if (currentRawSize > key_string::TypeBits::kMaxBytesForShortEncoding) { // Case 4: plus 1 signal byte + 4 size bytes. currentSize += 5; ASSERT(typeBits.isLongEncoding()); @@ -165,59 +177,60 @@ TEST(InvalidKeyStringTest, FuzzedCodeWithScopeNesting) { &keyData); signed char typeBitsData[] = {0, 16, 0, 0, -127, 1}; BufReader typeBitsReader(typeBitsData, sizeof(typeBitsData)); - KeyString::TypeBits typeBits = - KeyString::TypeBits::fromBuffer(KeyString::Version::kLatestVersion, &typeBitsReader); - ASSERT_THROWS_CODE(KeyString::toBsonSafe(keyData.buf(), keyData.len(), ALL_ASCENDING, typeBits), - AssertionException, - ErrorCodes::Overflow); + key_string::TypeBits typeBits = + key_string::TypeBits::fromBuffer(key_string::Version::kLatestVersion, &typeBitsReader); + ASSERT_THROWS_CODE( + key_string::toBsonSafe(keyData.buf(), keyData.len(), ALL_ASCENDING, typeBits), + AssertionException, + ErrorCodes::Overflow); } TEST(TypeBitsTest, AppendSymbol) { checkSizeWhileAppendingTypeBits( - 1, [](KeyString::TypeBits& typeBits) -> void { typeBits.appendSymbol(); }); + 1, [](key_string::TypeBits& typeBits) -> void { typeBits.appendSymbol(); }); } TEST(TypeBitsTest, AppendString) { // The typeBits should be all zeros, so numOfBitsUsedForType is set to 0 for // passing the test although it technically uses 1 bit. checkSizeWhileAppendingTypeBits( - 0, [](KeyString::TypeBits& typeBits) -> void { typeBits.appendString(); }); + 0, [](key_string::TypeBits& typeBits) -> void { typeBits.appendString(); }); } TEST(typebitstest, appendDouble) { checkSizeWhileAppendingTypeBits( - 2, [](KeyString::TypeBits& typeBits) -> void { typeBits.appendNumberDouble(); }); + 2, [](key_string::TypeBits& typeBits) -> void { typeBits.appendNumberDouble(); }); } TEST(TypeBitsTest, AppendNumberLong) { checkSizeWhileAppendingTypeBits( - 2, [](KeyString::TypeBits& typeBits) -> void { typeBits.appendNumberLong(); }); + 2, [](key_string::TypeBits& typeBits) -> void { typeBits.appendNumberLong(); }); } TEST(TypeBitsTest, AppendNumberInt) { // The typeBits should be all zeros, so numOfBitsUsedForType is set to 0 for // passing the test although it technically uses 2 bits. checkSizeWhileAppendingTypeBits( - 0, [](KeyString::TypeBits& typeBits) -> void { typeBits.appendNumberInt(); }); + 0, [](key_string::TypeBits& typeBits) -> void { typeBits.appendNumberInt(); }); } TEST(TypeBitsTest, AppendNumberDecimal) { checkSizeWhileAppendingTypeBits( - 2, [](KeyString::TypeBits& typeBits) -> void { typeBits.appendNumberDecimal(); }); + 2, [](key_string::TypeBits& typeBits) -> void { typeBits.appendNumberDecimal(); }); } TEST(TypeBitsTest, AppendLongZero) { - checkSizeWhileAppendingTypeBits(2, [](KeyString::TypeBits& typeBits) -> void { - typeBits.appendZero(KeyString::TypeBits::kLong); + checkSizeWhileAppendingTypeBits(2, [](key_string::TypeBits& typeBits) -> void { + typeBits.appendZero(key_string::TypeBits::kLong); }); } TEST(TypeBitsTest, AppendDecimalZero) { - checkSizeWhileAppendingTypeBits(12 + 5, [](KeyString::TypeBits& typeBits) -> void { - typeBits.appendDecimalZero(KeyString::TypeBits::kDecimalZero1xxx); + checkSizeWhileAppendingTypeBits(12 + 5, [](key_string::TypeBits& typeBits) -> void { + typeBits.appendDecimalZero(key_string::TypeBits::kDecimalZero1xxx); }); } TEST(TypeBitsTest, AppendDecimalExponent) { checkSizeWhileAppendingTypeBits( - KeyString::TypeBits::kStoredDecimalExponentBits, - [](KeyString::TypeBits& typeBits) -> void { typeBits.appendDecimalExponent(1); }); + key_string::TypeBits::kStoredDecimalExponentBits, + [](key_string::TypeBits& typeBits) -> void { typeBits.appendDecimalExponent(1); }); } TEST(TypeBitsTest, UninitializedTypeBits) { - KeyString::TypeBits typeBits(KeyString::Version::V1); + key_string::TypeBits typeBits(key_string::Version::V1); ASSERT_EQ(typeBits.getSize(), 1u); ASSERT_EQ(typeBits.getBuffer()[0], 0); ASSERT(typeBits.isAllZeros()); @@ -227,8 +240,8 @@ TEST(TypeBitsTest, AllZerosTypeBits) { { std::string emptyBuffer = ""; BufReader reader(emptyBuffer.c_str(), 0); - KeyString::TypeBits typeBits = - KeyString::TypeBits::fromBuffer(KeyString::Version::V1, &reader); + key_string::TypeBits typeBits = + key_string::TypeBits::fromBuffer(key_string::Version::V1, &reader); ASSERT_EQ(typeBits.getSize(), 1u); ASSERT_EQ(typeBits.getBuffer()[0], 0); ASSERT(typeBits.isAllZeros()); @@ -237,8 +250,8 @@ TEST(TypeBitsTest, AllZerosTypeBits) { { char allZerosBuffer[16] = {0}; BufReader reader(allZerosBuffer, sizeof(allZerosBuffer)); - KeyString::TypeBits typeBits = - KeyString::TypeBits::fromBuffer(KeyString::Version::V1, &reader); + key_string::TypeBits typeBits = + key_string::TypeBits::fromBuffer(key_string::Version::V1, &reader); ASSERT_EQ(typeBits.getSize(), 1u); ASSERT_EQ(typeBits.getBuffer()[0], 0); ASSERT(typeBits.isAllZeros()); @@ -246,7 +259,7 @@ TEST(TypeBitsTest, AllZerosTypeBits) { } TEST(TypeBitsTest, AppendLotsOfZeroTypeBits) { - KeyString::TypeBits typeBits(KeyString::Version::V1); + key_string::TypeBits typeBits(key_string::Version::V1); for (int i = 0; i < 100000; i++) { typeBits.appendString(); } @@ -261,13 +274,13 @@ TEST_F(KeyStringBuilderTest, TooManyElementsInCompoundKey) { const char* data = "ooooooooooooooooooooooooooooooooo"; const size_t size = 33; - KeyString::Builder ks(KeyString::Version::V1); + key_string::Builder ks(key_string::Version::V1); ks.resetFromBuffer(data, size); // No exceptions should be thrown. - KeyString::toBsonSafe(data, size, ALL_ASCENDING, ks.getTypeBits()); - KeyString::decodeDiscriminator(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, ks.getTypeBits()); - KeyString::getKeySize(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, ks.getTypeBits()); + key_string::toBsonSafe(data, size, ALL_ASCENDING, ks.getTypeBits()); + key_string::decodeDiscriminator(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, ks.getTypeBits()); + key_string::getKeySize(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, ks.getTypeBits()); } TEST_F(KeyStringBuilderTest, MaxElementsInCompoundKey) { @@ -276,13 +289,13 @@ TEST_F(KeyStringBuilderTest, MaxElementsInCompoundKey) { const char* data = "oooooooooooooooooooooooooooooooo\x4"; const size_t size = 33; - KeyString::Builder ks(KeyString::Version::V1); + key_string::Builder ks(key_string::Version::V1); ks.resetFromBuffer(data, size); // No exceptions should be thrown. - KeyString::toBsonSafe(data, size, ALL_ASCENDING, ks.getTypeBits()); - KeyString::decodeDiscriminator(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, ks.getTypeBits()); - KeyString::getKeySize(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, ks.getTypeBits()); + key_string::toBsonSafe(data, size, ALL_ASCENDING, ks.getTypeBits()); + key_string::decodeDiscriminator(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, ks.getTypeBits()); + key_string::getKeySize(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, ks.getTypeBits()); } TEST_F(KeyStringBuilderTest, EmbeddedNullString) { @@ -290,21 +303,21 @@ TEST_F(KeyStringBuilderTest, EmbeddedNullString) { // characters and followed by \x00. const char* data = "\x3c\x00\xff\x00"; const size_t size = 4; - KeyString::TypeBits typeBits(KeyString::Version::kLatestVersion); + key_string::TypeBits typeBits(key_string::Version::kLatestVersion); // No exceptions should be thrown. - ASSERT_BSONOBJ_EQ(KeyString::toBson(data, size, ALL_ASCENDING, typeBits), + ASSERT_BSONOBJ_EQ(key_string::toBson(data, size, ALL_ASCENDING, typeBits), BSON("" << StringData("\x00", 1))); }; TEST_F(KeyStringBuilderTest, ExceededBSONDepth) { - KeyString::Builder ks(KeyString::Version::V1); + key_string::Builder ks(key_string::Version::V1); // Construct an illegal KeyString encoding with excessively nested BSON arrays '80' (P). const auto nestedArr = std::string(BSONDepth::getMaxAllowableDepth() + 1, 'P'); ks.resetFromBuffer(nestedArr.c_str(), nestedArr.size()); ASSERT_THROWS_CODE( - KeyString::toBsonSafe(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, ks.getTypeBits()), + key_string::toBsonSafe(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, ks.getTypeBits()), AssertionException, ErrorCodes::Overflow); @@ -321,7 +334,7 @@ TEST_F(KeyStringBuilderTest, ExceededBSONDepth) { // BSON. ks.resetToKey(nestedObj, ALL_ASCENDING, RecordId(1)); ASSERT_THROWS_CODE( - KeyString::toBsonSafe(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, ks.getTypeBits()), + key_string::toBsonSafe(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, ks.getTypeBits()), AssertionException, ErrorCodes::Overflow); } @@ -332,14 +345,14 @@ TEST_F(KeyStringBuilderTest, Simple1) { ASSERT_BSONOBJ_LT(a, b); - ASSERT_LESS_THAN(KeyString::Builder(version, a, ALL_ASCENDING, RecordId(1)), - KeyString::Builder(version, b, ALL_ASCENDING, RecordId(1))); + ASSERT_LESS_THAN(key_string::Builder(version, a, ALL_ASCENDING, RecordId(1)), + key_string::Builder(version, b, ALL_ASCENDING, RecordId(1))); } #define ROUNDTRIP_ORDER(version, x, order) \ do { \ const BSONObj _orig = x; \ - const KeyString::Builder _ks(version, _orig, order); \ + const key_string::Builder _ks(version, _orig, order); \ const BSONObj _converted = toBsonAndCheckKeySize(_ks, order); \ ASSERT_BSONOBJ_EQ(_converted, _orig); \ ASSERT(_converted.binaryEqual(_orig)); \ @@ -353,8 +366,8 @@ TEST_F(KeyStringBuilderTest, Simple1) { #define COMPARES_SAME(_v, _x, _y) \ do { \ - KeyString::Builder _xKS(_v, _x, ONE_ASCENDING); \ - KeyString::Builder _yKS(_v, _y, ONE_ASCENDING); \ + key_string::Builder _xKS(_v, _x, ONE_ASCENDING); \ + key_string::Builder _yKS(_v, _y, ONE_ASCENDING); \ if (SimpleBSONObjComparator::kInstance.evaluate(_x == _y)) { \ ASSERT_EQUALS(_xKS, _yKS); \ } else if (SimpleBSONObjComparator::kInstance.evaluate(_x < _y)) { \ @@ -382,7 +395,7 @@ TEST_F(KeyStringBuilderTest, ActualBytesDouble) { // just one test like this for utter sanity BSONObj a = BSON("" << 5.5); - KeyString::Builder ks(version, a, ALL_ASCENDING); + key_string::Builder ks(version, a, ALL_ASCENDING); LOGV2(22227, "{keyStringVersionToString_version} size: {ks_getSize} hex " "[{toHex_ks_getBuffer_ks_getSize}]", @@ -392,14 +405,15 @@ TEST_F(KeyStringBuilderTest, ActualBytesDouble) { ASSERT_EQUALS(10U, ks.getSize()); - string hex = version == KeyString::Version::V0 ? "2B" // kNumericPositive1ByteInt - "0B" // (5 << 1) | 1 - "02000000000000" // fractional bytes of double - "04" // kEnd - : "2B" // kNumericPositive1ByteInt - "0B" // (5 << 1) | 1 - "80000000000000" // fractional bytes - "04"; // kEnd + string hex = version == key_string::Version::V0 + ? "2B" // kNumericPositive1ByteInt + "0B" // (5 << 1) | 1 + "02000000000000" // fractional bytes of double + "04" // kEnd + : "2B" // kNumericPositive1ByteInt + "0B" // (5 << 1) | 1 + "80000000000000" // fractional bytes + "04"; // kEnd ASSERT_EQUALS(hex, hexblob::encode(ks.getBuffer(), ks.getSize())); @@ -457,14 +471,14 @@ TEST_F(KeyStringBuilderTest, Array1) { ROUNDTRIP(version, BSON("" << BSON_ARRAY(1 << 2 << 3))); { - KeyString::Builder a(version, emptyArray, ALL_ASCENDING, RecordId::minLong()); - KeyString::Builder b(version, emptyArray, ALL_ASCENDING, RecordId(5)); + key_string::Builder a(version, emptyArray, ALL_ASCENDING, RecordId::minLong()); + key_string::Builder b(version, emptyArray, ALL_ASCENDING, RecordId(5)); ASSERT_LESS_THAN(a, b); } { - KeyString::Builder a(version, emptyArray, ALL_ASCENDING, RecordId(0)); - KeyString::Builder b(version, emptyArray, ALL_ASCENDING, RecordId(5)); + key_string::Builder a(version, emptyArray, ALL_ASCENDING, RecordId(0)); + key_string::Builder b(version, emptyArray, ALL_ASCENDING, RecordId(5)); ASSERT_LESS_THAN(a, b); } } @@ -519,12 +533,12 @@ TEST_F(KeyStringBuilderTest, NumbersNearInt32Max) { } TEST_F(KeyStringBuilderTest, DecimalNumbers) { - if (version == KeyString::Version::V0) { + if (version == key_string::Version::V0) { LOGV2(22228, "not testing DecimalNumbers for KeyStringBuilder V0"); return; } - const auto V1 = KeyString::Version::V1; + const auto V1 = key_string::Version::V1; // Zeros ROUNDTRIP(V1, BSON("" << Decimal128("0"))); @@ -593,22 +607,22 @@ TEST_F(KeyStringBuilderTest, DecimalNumbers) { TEST_F(KeyStringBuilderTest, KeyStringValue) { // Test that KeyStringBuilder is releasable into a Value type that is comparable. Once // released, it is reusable once reset. - KeyString::HeapBuilder ks1(KeyString::Version::V1, BSON("" << 1), ALL_ASCENDING); - KeyString::Value data1 = ks1.release(); + key_string::HeapBuilder ks1(key_string::Version::V1, BSON("" << 1), ALL_ASCENDING); + key_string::Value data1 = ks1.release(); - KeyString::HeapBuilder ks2(KeyString::Version::V1, BSON("" << 2), ALL_ASCENDING); - KeyString::Value data2 = ks2.release(); + key_string::HeapBuilder ks2(key_string::Version::V1, BSON("" << 2), ALL_ASCENDING); + key_string::Value data2 = ks2.release(); ASSERT(data2.compare(data1) > 0); ASSERT(data1.compare(data2) < 0); // Test that Value is moveable. - KeyString::Value moved = std::move(data1); + key_string::Value moved = std::move(data1); ASSERT(data2.compare(moved) > 0); ASSERT(moved.compare(data2) < 0); // Test that Value is copyable. - KeyString::Value dataCopy = data2; + key_string::Value dataCopy = data2; ASSERT(data2.compare(dataCopy) == 0); } @@ -625,15 +639,15 @@ TEST_F(KeyStringBuilderTest, KeyStringValueReleaseReusableTest) { BSONObj doc2 = BSON("fieldA" << 2 << "fieldB" << 3); BSONObj bson1 = BSON("" << 1 << "" << 2); BSONObj bson2 = BSON("" << 2 << "" << 3); - KeyString::HeapBuilder ks1(KeyString::Version::V1); + key_string::HeapBuilder ks1(key_string::Version::V1); ks1.appendBSONElement(doc1["fieldA"]); ks1.appendBSONElement(doc1["fieldB"]); - KeyString::Value data1 = ks1.release(); + key_string::Value data1 = ks1.release(); ks1.resetToEmpty(); ks1.appendBSONElement(doc2["fieldA"]); ks1.appendBSONElement(doc2["fieldB"]); - KeyString::Value data2 = ks1.release(); + key_string::Value data2 = ks1.release(); COMPARE_KS_BSON(data1, bson1, ALL_ASCENDING); COMPARE_KS_BSON(data2, bson2, ALL_ASCENDING); } @@ -641,10 +655,10 @@ TEST_F(KeyStringBuilderTest, KeyStringValueReleaseReusableTest) { TEST_F(KeyStringBuilderTest, KeyStringGetValueCopyTest) { // Test that KeyStringGetValueCopyTest creates a copy. BSONObj doc = BSON("fieldA" << 1); - KeyString::HeapBuilder ks(KeyString::Version::V1, ALL_ASCENDING); + key_string::HeapBuilder ks(key_string::Version::V1, ALL_ASCENDING); ks.appendBSONElement(doc["fieldA"]); - KeyString::Value data1 = ks.getValueCopy(); - KeyString::Value data2 = ks.release(); + key_string::Value data1 = ks.getValueCopy(); + key_string::Value data2 = ks.release(); // Assert that a copy was actually made and they don't share a buffer. ASSERT_NOT_EQUALS(data1.getBuffer(), data2.getBuffer()); @@ -657,19 +671,19 @@ TEST_F(KeyStringBuilderTest, KeyStringBuilderAppendBsonElement) { // Test that appendBsonElement works. { BSONObj doc = BSON("fieldA" << 1 << "fieldB" << 2); - KeyString::HeapBuilder ks(KeyString::Version::V1, ALL_ASCENDING); + key_string::HeapBuilder ks(key_string::Version::V1, ALL_ASCENDING); ks.appendBSONElement(doc["fieldA"]); ks.appendBSONElement(doc["fieldB"]); - KeyString::Value data = ks.release(); + key_string::Value data = ks.release(); COMPARE_KS_BSON(data, BSON("" << 1 << "" << 2), ALL_ASCENDING); } { BSONObj doc = BSON("fieldA" << 1 << "fieldB" << 2); - KeyString::HeapBuilder ks(KeyString::Version::V1, ONE_DESCENDING); + key_string::HeapBuilder ks(key_string::Version::V1, ONE_DESCENDING); ks.appendBSONElement(doc["fieldA"]); ks.appendBSONElement(doc["fieldB"]); - KeyString::Value data = ks.release(); + key_string::Value data = ks.release(); COMPARE_KS_BSON(data, BSON("" << 1 << "" << 2), ONE_DESCENDING); } @@ -678,10 +692,10 @@ TEST_F(KeyStringBuilderTest, KeyStringBuilderAppendBsonElement) { << "value1" << "fieldB" << "value2"); - KeyString::HeapBuilder ks(KeyString::Version::V1, ONE_DESCENDING); + key_string::HeapBuilder ks(key_string::Version::V1, ONE_DESCENDING); ks.appendBSONElement(doc["fieldA"]); ks.appendBSONElement(doc["fieldB"]); - KeyString::Value data = ks.release(); + key_string::Value data = ks.release(); COMPARE_KS_BSON(data, BSON("" << "value1" @@ -694,12 +708,12 @@ TEST_F(KeyStringBuilderTest, KeyStringBuilderAppendBsonElement) { TEST_F(KeyStringBuilderTest, KeyStringBuilderOrdering) { // Test that ordering works. BSONObj doc = BSON("fieldA" << 1); - KeyString::HeapBuilder ks1(KeyString::Version::V1, ALL_ASCENDING); + key_string::HeapBuilder ks1(key_string::Version::V1, ALL_ASCENDING); ks1.appendBSONElement(doc["fieldA"]); - KeyString::HeapBuilder ks2(KeyString::Version::V1, ONE_DESCENDING); + key_string::HeapBuilder ks2(key_string::Version::V1, ONE_DESCENDING); ks2.appendBSONElement(doc["fieldA"]); - KeyString::Value data1 = ks1.release(); - KeyString::Value data2 = ks2.release(); + key_string::Value data1 = ks1.release(); + key_string::Value data2 = ks2.release(); ASSERT_EQUALS(data1.getSize(), data2.getSize()); // Confirm that the buffers are different, indicating that the data is stored inverted in the @@ -710,11 +724,11 @@ TEST_F(KeyStringBuilderTest, KeyStringBuilderOrdering) { TEST_F(KeyStringBuilderTest, KeyStringBuilderDiscriminator) { // test that when passed in a Discriminator it gets added. BSONObj doc = BSON("fieldA" << 1 << "fieldB" << 2); - KeyString::HeapBuilder ks( - KeyString::Version::V1, ALL_ASCENDING, KeyString::Discriminator::kExclusiveBefore); + key_string::HeapBuilder ks( + key_string::Version::V1, ALL_ASCENDING, key_string::Discriminator::kExclusiveBefore); ks.appendBSONElement(doc["fieldA"]); ks.appendBSONElement(doc["fieldB"]); - KeyString::Value data = ks.release(); + key_string::Value data = ks.release(); uint8_t appendedDescriminator = (uint8_t)(*(data.getBuffer() + (data.getSize() - 2))); uint8_t end = (uint8_t)(*(data.getBuffer() + (data.getSize() - 1))); ASSERT_EQ((uint8_t)'\001', appendedDescriminator); @@ -732,41 +746,41 @@ TEST_F(KeyStringBuilderTest, DoubleInvalidIntegerPartV0) { "\x40\x00\x00\x00\x00\x00\x01"; // ((1 << 53) << 1) + 1 const size_t size = 8; - mongo::KeyString::TypeBits tb(mongo::KeyString::Version::V0); + mongo::key_string::TypeBits tb(mongo::key_string::Version::V0); tb.appendNumberDouble(); ASSERT_THROWS_CODE( - mongo::KeyString::toBsonSafe(data, size, mongo::Ordering::make(mongo::BSONObj()), tb), + mongo::key_string::toBsonSafe(data, size, mongo::Ordering::make(mongo::BSONObj()), tb), AssertionException, 31209); } TEST_F(KeyStringBuilderTest, InvalidInfinityDecimalV0) { // Encode a Decimal positive infinity in a V1 keystring. - mongo::KeyString::Builder ks( - mongo::KeyString::Version::V1, BSON("" << Decimal128::kPositiveInfinity), ALL_ASCENDING); + mongo::key_string::Builder ks( + mongo::key_string::Version::V1, BSON("" << Decimal128::kPositiveInfinity), ALL_ASCENDING); // Construct V0 type bits that indicate a NumberDecimal has been encoded. - mongo::KeyString::TypeBits tb(mongo::KeyString::Version::V0); + mongo::key_string::TypeBits tb(mongo::key_string::Version::V0); tb.appendNumberDecimal(); // The conversion to BSON will fail because Decimal positive infinity cannot be encoded with V0 // type bits. ASSERT_THROWS_CODE( - mongo::KeyString::toBsonSafe(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, tb), + mongo::key_string::toBsonSafe(ks.getBuffer(), ks.getSize(), ALL_ASCENDING, tb), AssertionException, 31231); } TEST_F(KeyStringBuilderTest, ReasonableSize) { - // Tests that KeyString::Builders do not use an excessive amount of memory for small key + // Tests that key_string::Builders do not use an excessive amount of memory for small key // generation. These upper bounds were the calculated sizes of each type at the time this // test was written. - KeyString::Builder stackBuilder(KeyString::Version::kLatestVersion, BSONObj(), ALL_ASCENDING); + key_string::Builder stackBuilder(key_string::Version::kLatestVersion, BSONObj(), ALL_ASCENDING); static_assert(sizeof(stackBuilder) <= 624); - KeyString::HeapBuilder heapBuilder( - KeyString::Version::kLatestVersion, BSONObj(), ALL_ASCENDING); + key_string::HeapBuilder heapBuilder( + key_string::Version::kLatestVersion, BSONObj(), ALL_ASCENDING); static_assert(sizeof(heapBuilder) <= 104); // Use a small block size to ensure we do not use more. Additionally, the minimum allocation @@ -776,24 +790,24 @@ TEST_F(KeyStringBuilderTest, ReasonableSize) { minSize, SharedBufferFragmentBuilder::DoubleGrowStrategy( SharedBufferFragmentBuilder::kDefaultMaxBlockSize)); - KeyString::PooledBuilder pooledBuilder( - fragmentBuilder, KeyString::Version::kLatestVersion, BSONObj(), ALL_ASCENDING); + key_string::PooledBuilder pooledBuilder( + fragmentBuilder, key_string::Version::kLatestVersion, BSONObj(), ALL_ASCENDING); static_assert(sizeof(pooledBuilder) <= 104); // Test the dynamic memory usage reported to the sorter. - KeyString::Value value1 = stackBuilder.getValueCopy(); + key_string::Value value1 = stackBuilder.getValueCopy(); ASSERT_LTE(sizeof(value1), 32); ASSERT_LTE(value1.memUsageForSorter(), 34); - KeyString::Value value2 = heapBuilder.getValueCopy(); + key_string::Value value2 = heapBuilder.getValueCopy(); ASSERT_LTE(sizeof(value2), 32); ASSERT_LTE(value2.memUsageForSorter(), 34); - KeyString::Value value3 = heapBuilder.release(); + key_string::Value value3 = heapBuilder.release(); ASSERT_LTE(sizeof(value3), 32); ASSERT_LTE(value3.memUsageForSorter(), 64); - KeyString::Value value4 = pooledBuilder.getValueCopy(); + key_string::Value value4 = pooledBuilder.getValueCopy(); ASSERT_LTE(sizeof(value4), 32); // This is safe because we are operating on a copy of the value and it is not shared elsewhere. ASSERT_LTE(value4.memUsageForSorter(), 34); @@ -802,7 +816,7 @@ TEST_F(KeyStringBuilderTest, ReasonableSize) { // For values created with the pooledBuilder, it is invalid to call memUsageForSorter(). Instead // we look at the mem usage of the builder itself. - KeyString::Value value5 = pooledBuilder.release(); + key_string::Value value5 = pooledBuilder.release(); ASSERT_LTE(sizeof(value5), 32); ASSERT_LTE(fragmentBuilder.memUsage(), 64); } @@ -811,12 +825,12 @@ TEST_F(KeyStringBuilderTest, DiscardIfNotReleased) { SharedBufferFragmentBuilder fragmentBuilder(1024); { // Intentially not released, but the data should be discarded correctly. - KeyString::PooledBuilder pooledBuilder( - fragmentBuilder, KeyString::Version::kLatestVersion, BSONObj(), ALL_ASCENDING); + key_string::PooledBuilder pooledBuilder( + fragmentBuilder, key_string::Version::kLatestVersion, BSONObj(), ALL_ASCENDING); } { - KeyString::PooledBuilder pooledBuilder( - fragmentBuilder, KeyString::Version::kLatestVersion, BSONObj(), ALL_ASCENDING); + key_string::PooledBuilder pooledBuilder( + fragmentBuilder, key_string::Version::kLatestVersion, BSONObj(), ALL_ASCENDING); pooledBuilder.release(); } } @@ -865,11 +879,11 @@ TEST_F(KeyStringBuilderTest, LotsOfNumbers2) { TEST_F(KeyStringBuilderTest, RecordIdOrder1) { Ordering ordering = Ordering::make(BSON("a" << 1)); - KeyString::Builder a(version, BSON("" << 5), ordering, RecordId::minLong()); - KeyString::Builder b(version, BSON("" << 5), ordering, RecordId(2)); - KeyString::Builder c(version, BSON("" << 5), ordering, RecordId(3)); - KeyString::Builder d(version, BSON("" << 6), ordering, RecordId(4)); - KeyString::Builder e(version, BSON("" << 6), ordering, RecordId(1)); + key_string::Builder a(version, BSON("" << 5), ordering, RecordId::minLong()); + key_string::Builder b(version, BSON("" << 5), ordering, RecordId(2)); + key_string::Builder c(version, BSON("" << 5), ordering, RecordId(3)); + key_string::Builder d(version, BSON("" << 6), ordering, RecordId(4)); + key_string::Builder e(version, BSON("" << 6), ordering, RecordId(1)); ASSERT_LESS_THAN(a, b); ASSERT_LESS_THAN(b, c); @@ -880,10 +894,10 @@ TEST_F(KeyStringBuilderTest, RecordIdOrder1) { TEST_F(KeyStringBuilderTest, RecordIdOrder2) { Ordering ordering = Ordering::make(BSON("a" << -1 << "b" << -1)); - KeyString::Builder a(version, BSON("" << 5 << "" << 6), ordering, RecordId::minLong()); - KeyString::Builder b(version, BSON("" << 5 << "" << 6), ordering, RecordId(5)); - KeyString::Builder c(version, BSON("" << 5 << "" << 5), ordering, RecordId(4)); - KeyString::Builder d(version, BSON("" << 3 << "" << 4), ordering, RecordId(3)); + key_string::Builder a(version, BSON("" << 5 << "" << 6), ordering, RecordId::minLong()); + key_string::Builder b(version, BSON("" << 5 << "" << 6), ordering, RecordId(5)); + key_string::Builder c(version, BSON("" << 5 << "" << 5), ordering, RecordId(4)); + key_string::Builder d(version, BSON("" << 3 << "" << 4), ordering, RecordId(3)); ASSERT_LESS_THAN(a, b); ASSERT_LESS_THAN(b, c); @@ -896,9 +910,9 @@ TEST_F(KeyStringBuilderTest, RecordIdOrder2) { TEST_F(KeyStringBuilderTest, RecordIdOrder2Double) { Ordering ordering = Ordering::make(BSON("a" << -1 << "b" << -1)); - KeyString::Builder a(version, BSON("" << 5.0 << "" << 6.0), ordering, RecordId::minLong()); - KeyString::Builder b(version, BSON("" << 5.0 << "" << 6.0), ordering, RecordId(5)); - KeyString::Builder c(version, BSON("" << 3.0 << "" << 4.0), ordering, RecordId(3)); + key_string::Builder a(version, BSON("" << 5.0 << "" << 6.0), ordering, RecordId::minLong()); + key_string::Builder b(version, BSON("" << 5.0 << "" << 6.0), ordering, RecordId(5)); + key_string::Builder c(version, BSON("" << 3.0 << "" << 4.0), ordering, RecordId(3)); ASSERT_LESS_THAN(a, b); ASSERT_LESS_THAN(b, c); @@ -921,11 +935,11 @@ TEST_F(KeyStringBuilderTest, Timestamp) { ASSERT_BSONOBJ_LT(b, c); ASSERT_BSONOBJ_LT(c, d); - KeyString::Builder ka(version, a, ALL_ASCENDING); - KeyString::Builder kb(version, b, ALL_ASCENDING); - KeyString::Builder kc(version, c, ALL_ASCENDING); - KeyString::Builder kd(version, d, ALL_ASCENDING); - KeyString::Builder ke(version, e, ALL_ASCENDING); + key_string::Builder ka(version, a, ALL_ASCENDING); + key_string::Builder kb(version, b, ALL_ASCENDING); + key_string::Builder kc(version, c, ALL_ASCENDING); + key_string::Builder kd(version, d, ALL_ASCENDING); + key_string::Builder ke(version, e, ALL_ASCENDING); ASSERT(ka.compare(kb) < 0); ASSERT(kb.compare(kc) < 0); @@ -944,10 +958,10 @@ TEST_F(KeyStringBuilderTest, Timestamp) { ASSERT(c.woCompare(b, ALL_ASCENDING) < 0); ASSERT(b.woCompare(a, ALL_ASCENDING) < 0); - KeyString::Builder ka(version, a, ALL_ASCENDING); - KeyString::Builder kb(version, b, ALL_ASCENDING); - KeyString::Builder kc(version, c, ALL_ASCENDING); - KeyString::Builder kd(version, d, ALL_ASCENDING); + key_string::Builder ka(version, a, ALL_ASCENDING); + key_string::Builder kb(version, b, ALL_ASCENDING); + key_string::Builder kc(version, c, ALL_ASCENDING); + key_string::Builder kd(version, d, ALL_ASCENDING); ASSERT(ka.compare(kb) > 0); ASSERT(kb.compare(kc) > 0); @@ -972,7 +986,7 @@ TEST_F(KeyStringBuilderTest, AllTypesRoundtrip) { } } -const std::vector& getInterestingElements(KeyString::Version version) { +const std::vector& getInterestingElements(key_string::Version version) { static std::vector elements; elements.clear(); @@ -1040,7 +1054,7 @@ const std::vector& getInterestingElements(KeyString::Version version) { // Something that needs multiple bytes of typeBits elements.push_back(BSON("" << BSON_ARRAY("" << BSONSymbol("") << 0 << 0ll << 0.0 << -0.0))); - if (version != KeyString::Version::V0) { + if (version != key_string::Version::V0) { // Something with exceptional typeBits for Decimal elements.push_back( BSON("" << BSON_ARRAY("" << BSONSymbol("") << Decimal128::kNegativeInfinity @@ -1056,7 +1070,7 @@ const std::vector& getInterestingElements(KeyString::Version version) { elements.push_back(BSON("" << 0ll)); elements.push_back(BSON("" << 0.0)); elements.push_back(BSON("" << -0.0)); - if (version != KeyString::Version::V0) { + if (version != key_string::Version::V0) { Decimal128("0.0.0000000"); Decimal128("-0E1000"); } @@ -1114,7 +1128,7 @@ const std::vector& getInterestingElements(KeyString::Version version) { elements.push_back(BSON("" << -(dNum + -.1))); } - if (version != KeyString::Version::V0) { + if (version != key_string::Version::V0) { const Decimal128 dec(static_cast(lNum)); const Decimal128 one("1"); const Decimal128 half("0.5"); @@ -1168,7 +1182,7 @@ const std::vector& getInterestingElements(KeyString::Version version) { elements.push_back(BSON("" << closestBelow)); } - if (version != KeyString::Version::V0) { + if (version != key_string::Version::V0) { // Numbers that are hard to round to between binary and decimal. elements.push_back(BSON("" << 0.1)); elements.push_back(BSON("" << Decimal128("0.100000000"))); @@ -1204,18 +1218,18 @@ const std::vector& getInterestingElements(KeyString::Version version) { } // Tricky double precision number for binary/decimal conversion: very close to a decimal - if (version != KeyString::Version::V0) + if (version != key_string::Version::V0) elements.push_back(BSON("" << Decimal128("3743626360493413E-165"))); elements.push_back(BSON("" << 3743626360493413E-165)); return elements; } -void testPermutation(KeyString::Version version, +void testPermutation(key_string::Version version, const std::vector& elementsOrig, const std::vector& orderings, bool debug) { - // Since KeyString::Builders are compared using memcmp we can assume it provides a total + // Since key_string::Builders are compared using memcmp we can assume it provides a total // ordering // such // that there won't be cases where (a < b && b < c && !(a < c)). This test still needs to ensure @@ -1241,13 +1255,13 @@ void testPermutation(KeyString::Version version, LOGV2(22230, "\to1: {o1}", "o1"_attr = o1); ROUNDTRIP_ORDER(version, o1, ordering); - KeyString::Builder k1(version, o1, ordering); + key_string::Builder k1(version, o1, ordering); if (i + 1 < elements.size()) { const BSONObj& o2 = elements[i + 1]; if (debug) LOGV2(22231, "\t\t o2: {o2}", "o2"_attr = o2); - KeyString::Builder k2(version, o2, ordering); + key_string::Builder k2(version, o2, ordering); int bsonCmp = o1.woCompare(o2, ordering); invariant(bsonCmp <= 0); // We should be sorted... @@ -1304,7 +1318,7 @@ std::vector thinElements(std::vector elements, namespace { RecordId ridFromOid(const OID& oid) { - KeyString::Builder builder(KeyString::Version::kLatestVersion); + key_string::Builder builder(key_string::Version::kLatestVersion); builder.appendOID(oid); return RecordId(builder.getBuffer(), builder.getSize()); } @@ -1320,29 +1334,29 @@ TEST_F(KeyStringBuilderTest, RecordIdStr) { const RecordId rid = ridFromOid(OID::from(buf)); { // Test encoding / decoding of single RecordIds - const KeyString::Builder ks(version, rid); + const key_string::Builder ks(version, rid); invariant(ks.getSize() == 14); - ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); + ASSERT_EQ(key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); if (rid.isValid()) { - ASSERT_GT(ks, KeyString::Builder(version, RecordId(1))); - ASSERT_GT(ks, KeyString::Builder(version, ridFromOid(OID()))); - ASSERT_LT(ks, KeyString::Builder(version, ridFromOid(OID::max()))); + ASSERT_GT(ks, key_string::Builder(version, RecordId(1))); + ASSERT_GT(ks, key_string::Builder(version, ridFromOid(OID()))); + ASSERT_LT(ks, key_string::Builder(version, ridFromOid(OID::max()))); char bufLt[kSize]; memcpy(bufLt, buf, kSize); bufLt[kSize - 1] -= 1; auto ltRid = ridFromOid(OID::from(bufLt)); ASSERT(ltRid < rid); - ASSERT_GT(ks, KeyString::Builder(version, ltRid)); + ASSERT_GT(ks, key_string::Builder(version, ltRid)); char bufGt[kSize]; memcpy(bufGt, buf, kSize); bufGt[kSize - 1] += 1; auto gtRid = ridFromOid(OID::from(bufGt)); ASSERT(gtRid > rid); - ASSERT_LT(ks, KeyString::Builder(version, gtRid)); + ASSERT_LT(ks, key_string::Builder(version, gtRid)); } } @@ -1352,13 +1366,13 @@ TEST_F(KeyStringBuilderTest, RecordIdStr) { RecordId other = ridFromOid(OID::from(otherBuf)); if (rid == other) { - ASSERT_EQ(KeyString::Builder(version, rid), KeyString::Builder(version, other)); + ASSERT_EQ(key_string::Builder(version, rid), key_string::Builder(version, other)); } if (rid < other) { - ASSERT_LT(KeyString::Builder(version, rid), KeyString::Builder(version, other)); + ASSERT_LT(key_string::Builder(version, rid), key_string::Builder(version, other)); } if (rid > other) { - ASSERT_GT(KeyString::Builder(version, rid), KeyString::Builder(version, other)); + ASSERT_GT(key_string::Builder(version, rid), key_string::Builder(version, other)); } } } @@ -1367,7 +1381,7 @@ TEST_F(KeyStringBuilderTest, RecordIdStr) { namespace { RecordId ridFromStr(const char* str, size_t len) { - KeyString::Builder builder(KeyString::Version::kLatestVersion); + key_string::Builder builder(key_string::Version::kLatestVersion); builder.appendString(mongo::StringData(str, len)); return RecordId(builder.getBuffer(), builder.getSize()); } @@ -1380,20 +1394,20 @@ TEST_F(KeyStringBuilderTest, RecordIdStrBig1SizeSegment) { const int size = 90; const auto ridStr = std::string(size, 'a'); auto rid = ridFromStr(ridStr.c_str(), size); - const KeyString::Builder ks(version, rid); + const key_string::Builder ks(version, rid); ASSERT_EQ(ks.getSize(), size + pad); - ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); - ASSERT_EQ(0, KeyString::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); + ASSERT_EQ(key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); + ASSERT_EQ(0, key_string::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); } { // Max 1-byte encoded string size is 127B: 1B CType + ridStr + string terminator const int size = 125; const auto ridStr = std::string(size, 'a'); auto rid = ridFromStr(ridStr.c_str(), size); - const KeyString::Builder ks(version, rid); + const key_string::Builder ks(version, rid); ASSERT_EQ(ks.getSize(), size + pad); - ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); - ASSERT_EQ(0, KeyString::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); + ASSERT_EQ(key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); + ASSERT_EQ(0, key_string::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); } } @@ -1404,29 +1418,29 @@ TEST_F(KeyStringBuilderTest, RecordIdStrBig2SizeSegments) { const int size = 126; const auto ridStr = std::string(size, 'a'); auto rid = ridFromStr(ridStr.c_str(), size); - const KeyString::Builder ks(version, rid); + const key_string::Builder ks(version, rid); ASSERT_EQ(ks.getSize(), size + pad + 1); // 1 byte with continuation bit - ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); - ASSERT_EQ(0, KeyString::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); + ASSERT_EQ(key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); + ASSERT_EQ(0, key_string::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); } { const int size = 128; const auto ridStr = std::string(size, 'a'); auto rid = ridFromStr(ridStr.c_str(), size); - const KeyString::Builder ks(version, rid); + const key_string::Builder ks(version, rid); ASSERT_EQ(ks.getSize(), size + pad + 1); // 1 byte with continuation bit - ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); - ASSERT_EQ(0, KeyString::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); + ASSERT_EQ(key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); + ASSERT_EQ(0, key_string::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); } { // Max 2-byte encoded string size is 16383B: 1B CType + ridStr + string terminator const int size = 16381; const auto ridStr = std::string(size, 'a'); auto rid = ridFromStr(ridStr.c_str(), size); - const KeyString::Builder ks(version, rid); + const key_string::Builder ks(version, rid); ASSERT_EQ(ks.getSize(), size + pad + 1); // 1 byte with continuation bit - ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); - ASSERT_EQ(0, KeyString::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); + ASSERT_EQ(key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); + ASSERT_EQ(0, key_string::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); } } @@ -1437,20 +1451,20 @@ TEST_F(KeyStringBuilderTest, RecordIdStrBig3SizeSegments) { const int size = 16382; const auto ridStr = std::string(size, 'a'); auto rid = ridFromStr(ridStr.c_str(), size); - const KeyString::Builder ks(version, rid); + const key_string::Builder ks(version, rid); ASSERT_EQ(ks.getSize(), size + pad + 2); // 2 bytes with continuation bit - ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); - ASSERT_EQ(0, KeyString::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); + ASSERT_EQ(key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); + ASSERT_EQ(0, key_string::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); } { // Max 3-byte encoded string size is 2097151B: 1B CType + ridStr + string terminator const int size = 2097149; const auto ridStr = std::string(size, 'a'); auto rid = ridFromStr(ridStr.c_str(), size); - const KeyString::Builder ks(version, rid); + const key_string::Builder ks(version, rid); ASSERT_EQ(ks.getSize(), size + pad + 2); // 2 bytes with continuation bit - ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); - ASSERT_EQ(0, KeyString::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); + ASSERT_EQ(key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); + ASSERT_EQ(0, key_string::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); } } @@ -1461,20 +1475,20 @@ TEST_F(KeyStringBuilderTest, RecordIdStrBig4SizeSegments) { const int size = 2097150; const auto ridStr = std::string(size, 'a'); auto rid = ridFromStr(ridStr.c_str(), size); - const KeyString::Builder ks(version, rid); + const key_string::Builder ks(version, rid); ASSERT_EQ(ks.getSize(), size + pad + 3); // 3 bytes with continuation bit - ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); - ASSERT_EQ(0, KeyString::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); + ASSERT_EQ(key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); + ASSERT_EQ(0, key_string::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); } { // Support up to RecordId::kBigStrMaxSize const int size = RecordId::kBigStrMaxSize - 2 /* CType + string terminator */; const auto ridStr = std::string(size, 'a'); auto rid = ridFromStr(ridStr.c_str(), size); - const KeyString::Builder ks(version, rid); + const key_string::Builder ks(version, rid); ASSERT_EQ(ks.getSize(), size + pad + 3); // 3 bytes with continuation bit - ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); - ASSERT_EQ(0, KeyString::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); + ASSERT_EQ(key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); + ASSERT_EQ(0, key_string::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); } } @@ -1486,26 +1500,26 @@ TEST_F(KeyStringBuilderTest, RecordIdStrBigSizeWithoutRecordIdStr) { const int ridStrlen = 90; const auto ridStr = std::string(ridStrlen, 'a'); auto rid = ridFromStr(ridStr.c_str(), ridStrlen); - KeyString::Builder ks(version); + key_string::Builder ks(version); ks.appendString(mongo::StringData(str, strlen(str))); ks.appendRecordId(rid); ASSERT_EQ(ks.getSize(), strlen(str) + padStr + ridStrlen + pad); - ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); + ASSERT_EQ(key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); ASSERT_EQ(strlen(str) + padStr, - KeyString::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); + key_string::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); } { const int ridStrlen = 260; const auto ridStr = std::string(ridStrlen, 'a'); auto rid = ridFromStr(ridStr.c_str(), ridStrlen); - KeyString::Builder ks(version); + key_string::Builder ks(version); ks.appendString(mongo::StringData(str, strlen(str))); ks.appendRecordId(rid); ASSERT_EQ(ks.getSize(), strlen(str) + padStr + ridStrlen + pad + 1); // 1 0x80 cont byte - ASSERT_EQ(KeyString::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); + ASSERT_EQ(key_string::decodeRecordIdStrAtEnd(ks.getBuffer(), ks.getSize()), rid); ASSERT_EQ(strlen(str) + padStr, - KeyString::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); + key_string::sizeWithoutRecordIdStrAtEnd(ks.getBuffer(), ks.getSize())); } } @@ -1606,11 +1620,11 @@ TEST_F(KeyStringBuilderTest, NaNs) { // Since we only output a single NaN, we can only do ROUNDTRIP testing for nan1. ROUNDTRIP(version, BSON("" << nan1)); - const KeyString::Builder ks1a(version, BSON("" << nan1), ONE_ASCENDING); - const KeyString::Builder ks1d(version, BSON("" << nan1), ONE_DESCENDING); + const key_string::Builder ks1a(version, BSON("" << nan1), ONE_ASCENDING); + const key_string::Builder ks1d(version, BSON("" << nan1), ONE_DESCENDING); - const KeyString::Builder ks2a(version, BSON("" << nan2), ONE_ASCENDING); - const KeyString::Builder ks2d(version, BSON("" << nan2), ONE_DESCENDING); + const key_string::Builder ks2a(version, BSON("" << nan2), ONE_ASCENDING); + const key_string::Builder ks2d(version, BSON("" << nan2), ONE_DESCENDING); ASSERT_EQ(ks1a, ks2a); ASSERT_EQ(ks1d, ks2d); @@ -1620,18 +1634,18 @@ TEST_F(KeyStringBuilderTest, NaNs) { ASSERT(std::isnan(toBson(ks1d, ONE_DESCENDING)[""].Double())); ASSERT(std::isnan(toBson(ks2d, ONE_DESCENDING)[""].Double())); - if (version == KeyString::Version::V0) + if (version == key_string::Version::V0) return; const auto nan3 = Decimal128::kPositiveNaN; const auto nan4 = Decimal128::kNegativeNaN; // Since we only output a single NaN, we can only do ROUNDTRIP testing for nan1. ROUNDTRIP(version, BSON("" << nan3)); - const KeyString::Builder ks3a(version, BSON("" << nan3), ONE_ASCENDING); - const KeyString::Builder ks3d(version, BSON("" << nan3), ONE_DESCENDING); + const key_string::Builder ks3a(version, BSON("" << nan3), ONE_ASCENDING); + const key_string::Builder ks3d(version, BSON("" << nan3), ONE_DESCENDING); - const KeyString::Builder ks4a(version, BSON("" << nan4), ONE_ASCENDING); - const KeyString::Builder ks4d(version, BSON("" << nan4), ONE_DESCENDING); + const key_string::Builder ks4a(version, BSON("" << nan4), ONE_ASCENDING); + const key_string::Builder ks4d(version, BSON("" << nan4), ONE_DESCENDING); ASSERT_EQ(ks1a, ks4a); ASSERT_EQ(ks1d, ks4d); @@ -1647,25 +1661,25 @@ TEST_F(KeyStringBuilderTest, RecordIds) { const RecordId rid = RecordId(1ll << i); { // Test encoding / decoding of single RecordIds - const KeyString::Builder ks(version, rid); + const key_string::Builder ks(version, rid); ASSERT_GTE(ks.getSize(), 2u); ASSERT_LTE(ks.getSize(), 10u); - ASSERT_EQ(KeyString::decodeRecordIdLongAtEnd(ks.getBuffer(), ks.getSize()), rid); + ASSERT_EQ(key_string::decodeRecordIdLongAtEnd(ks.getBuffer(), ks.getSize()), rid); { BufReader reader(ks.getBuffer(), ks.getSize()); - ASSERT_EQ(KeyString::decodeRecordIdLong(&reader), rid); + ASSERT_EQ(key_string::decodeRecordIdLong(&reader), rid); ASSERT(reader.atEof()); } if (rid.isValid()) { - ASSERT_GTE(ks, KeyString::Builder(version, RecordId(1))); - ASSERT_GT(ks, KeyString::Builder(version, RecordId::minLong())); - ASSERT_LT(ks, KeyString::Builder(version, RecordId::maxLong())); + ASSERT_GTE(ks, key_string::Builder(version, RecordId(1))); + ASSERT_GT(ks, key_string::Builder(version, RecordId::minLong())); + ASSERT_LT(ks, key_string::Builder(version, RecordId::maxLong())); - ASSERT_GT(ks, KeyString::Builder(version, RecordId(rid.getLong() - 1))); - ASSERT_LT(ks, KeyString::Builder(version, RecordId(rid.getLong() + 1))); + ASSERT_GT(ks, key_string::Builder(version, RecordId(rid.getLong() - 1))); + ASSERT_LT(ks, key_string::Builder(version, RecordId(rid.getLong() + 1))); } } @@ -1673,18 +1687,18 @@ TEST_F(KeyStringBuilderTest, RecordIds) { RecordId other = RecordId(1ll << j); if (rid == other) { - ASSERT_EQ(KeyString::Builder(version, rid), KeyString::Builder(version, other)); + ASSERT_EQ(key_string::Builder(version, rid), key_string::Builder(version, other)); } if (rid < other) { - ASSERT_LT(KeyString::Builder(version, rid), KeyString::Builder(version, other)); + ASSERT_LT(key_string::Builder(version, rid), key_string::Builder(version, other)); } if (rid > other) { - ASSERT_GT(KeyString::Builder(version, rid), KeyString::Builder(version, other)); + ASSERT_GT(key_string::Builder(version, rid), key_string::Builder(version, other)); } { // Test concatenating RecordIds like in a unique index. - KeyString::Builder ks(version); + key_string::Builder ks(version); ks.appendRecordId(RecordId::maxLong()); // uses all bytes ks.appendRecordId(rid); ks.appendRecordId(RecordId(0xDEADBEEF)); // uses some extra bytes @@ -1693,17 +1707,17 @@ TEST_F(KeyStringBuilderTest, RecordIds) { ks.appendRecordId(rid); ks.appendRecordId(other); - ASSERT_EQ(KeyString::decodeRecordIdLongAtEnd(ks.getBuffer(), ks.getSize()), other); + ASSERT_EQ(key_string::decodeRecordIdLongAtEnd(ks.getBuffer(), ks.getSize()), other); // forward scan BufReader reader(ks.getBuffer(), ks.getSize()); - ASSERT_EQ(KeyString::decodeRecordIdLong(&reader), RecordId::maxLong()); - ASSERT_EQ(KeyString::decodeRecordIdLong(&reader), rid); - ASSERT_EQ(KeyString::decodeRecordIdLong(&reader), RecordId(0xDEADBEEF)); - ASSERT_EQ(KeyString::decodeRecordIdLong(&reader), rid); - ASSERT_EQ(KeyString::decodeRecordIdLong(&reader), RecordId(1)); - ASSERT_EQ(KeyString::decodeRecordIdLong(&reader), rid); - ASSERT_EQ(KeyString::decodeRecordIdLong(&reader), other); + ASSERT_EQ(key_string::decodeRecordIdLong(&reader), RecordId::maxLong()); + ASSERT_EQ(key_string::decodeRecordIdLong(&reader), rid); + ASSERT_EQ(key_string::decodeRecordIdLong(&reader), RecordId(0xDEADBEEF)); + ASSERT_EQ(key_string::decodeRecordIdLong(&reader), rid); + ASSERT_EQ(key_string::decodeRecordIdLong(&reader), RecordId(1)); + ASSERT_EQ(key_string::decodeRecordIdLong(&reader), rid); + ASSERT_EQ(key_string::decodeRecordIdLong(&reader), other); ASSERT(reader.atEof()); } } @@ -1742,9 +1756,9 @@ BSONObj buildKeyWhichWillHaveNByteOfTypeBits(size_t n, bool allZeros) { return obj; } -void checkKeyWithNByteOfTypeBits(KeyString::Version version, size_t n, bool allZeros) { +void checkKeyWithNByteOfTypeBits(key_string::Version version, size_t n, bool allZeros) { const BSONObj orig = buildKeyWhichWillHaveNByteOfTypeBits(n, allZeros); - const KeyString::Builder ks(version, orig, ALL_ASCENDING); + const key_string::Builder ks(version, orig, ALL_ASCENDING); const size_t typeBitsSize = ks.getTypeBits().getSize(); if (n == 1 || allZeros) { // Case 1&2 @@ -1764,7 +1778,7 @@ void checkKeyWithNByteOfTypeBits(KeyString::Version version, size_t n, bool allZ // Also test TypeBits::fromBuffer() BufReader bufReader(ks.getTypeBits().getBuffer(), typeBitsSize); - KeyString::TypeBits newTypeBits = KeyString::TypeBits::fromBuffer(version, &bufReader); + key_string::TypeBits newTypeBits = key_string::TypeBits::fromBuffer(version, &bufReader); ASSERT_EQ(hexblob::encode(newTypeBits.getBuffer(), newTypeBits.getSize()), hexblob::encode(ks.getTypeBits().getBuffer(), ks.getTypeBits().getSize())); } @@ -1787,14 +1801,14 @@ TEST_F(KeyStringBuilderTest, VeryLargeString) { } TEST_F(KeyStringBuilderTest, ToBsonSafeShouldNotTerminate) { - KeyString::TypeBits typeBits(KeyString::Version::V1); + key_string::TypeBits typeBits(key_string::Version::V1); const char invalidString[] = { 60, // CType::kStringLike 55, // Non-null terminated }; ASSERT_THROWS_CODE( - KeyString::toBsonSafe(invalidString, sizeof(invalidString), ALL_ASCENDING, typeBits), + key_string::toBsonSafe(invalidString, sizeof(invalidString), ALL_ASCENDING, typeBits), AssertionException, 50816); @@ -1811,14 +1825,14 @@ TEST_F(KeyStringBuilderTest, ToBsonSafeShouldNotTerminate) { 0, }; ASSERT_THROWS_CODE( - KeyString::toBsonSafe(invalidNumber, sizeof(invalidNumber), ALL_ASCENDING, typeBits), + key_string::toBsonSafe(invalidNumber, sizeof(invalidNumber), ALL_ASCENDING, typeBits), AssertionException, 50810); } TEST_F(KeyStringBuilderTest, InvalidDecimalExponent) { const Decimal128 dec("1125899906842624.1"); - const KeyString::Builder ks(KeyString::Version::V1, BSON("" << dec), ALL_ASCENDING); + const key_string::Builder ks(key_string::Version::V1, BSON("" << dec), ALL_ASCENDING); // Overwrite the 1st byte to 0, corrupting the exponent. This is meant to reproduce // SERVER-34767. @@ -1826,14 +1840,14 @@ TEST_F(KeyStringBuilderTest, InvalidDecimalExponent) { ksBuffer[1] = 0; ASSERT_THROWS_CODE( - KeyString::toBsonSafe(ksBuffer, ks.getSize(), ALL_ASCENDING, ks.getTypeBits()), + key_string::toBsonSafe(ksBuffer, ks.getSize(), ALL_ASCENDING, ks.getTypeBits()), AssertionException, 50814); } TEST_F(KeyStringBuilderTest, InvalidDecimalZero) { - const KeyString::Builder ks( - KeyString::Version::V1, BSON("" << Decimal128("-0")), ALL_ASCENDING); + const key_string::Builder ks( + key_string::Version::V1, BSON("" << Decimal128("-0")), ALL_ASCENDING); char* ksBuffer = (char*)ks.getBuffer(); ksBuffer[2] = 100; @@ -1842,14 +1856,14 @@ TEST_F(KeyStringBuilderTest, InvalidDecimalZero) { typeBits[1] = 147; ASSERT_THROWS_CODE( - KeyString::toBsonSafe(ksBuffer, ks.getSize(), ALL_ASCENDING, ks.getTypeBits()), + key_string::toBsonSafe(ksBuffer, ks.getSize(), ALL_ASCENDING, ks.getTypeBits()), AssertionException, 50846); } TEST_F(KeyStringBuilderTest, InvalidDecimalContinuation) { auto elem = Decimal128("1.797693134862315708145274237317043E308"); - const KeyString::Builder ks(KeyString::Version::V1, BSON("" << elem), ALL_ASCENDING); + const key_string::Builder ks(key_string::Version::V1, BSON("" << elem), ALL_ASCENDING); uint8_t* ksBuffer = (uint8_t*)ks.getBuffer(); ksBuffer[2] = 239; @@ -1858,7 +1872,7 @@ TEST_F(KeyStringBuilderTest, InvalidDecimalContinuation) { typeBits[1] = 231; ASSERT_THROWS_CODE( - KeyString::toBsonSafe((char*)ksBuffer, ks.getSize(), ALL_ASCENDING, ks.getTypeBits()), + key_string::toBsonSafe((char*)ksBuffer, ks.getSize(), ALL_ASCENDING, ks.getTypeBits()), AssertionException, 50850); } @@ -1868,9 +1882,9 @@ TEST_F(KeyStringBuilderTest, RandomizedInputsForToBsonSafe) { std::uniform_int_distribution randomNum(std::numeric_limits::min(), std::numeric_limits::max()); - const auto interestingElements = getInterestingElements(KeyString::Version::V1); + const auto interestingElements = getInterestingElements(key_string::Version::V1); for (const auto& elem : interestingElements) { - const KeyString::Builder ks(KeyString::Version::V1, elem, ALL_ASCENDING); + const key_string::Builder ks(key_string::Version::V1, elem, ALL_ASCENDING); auto ksBuffer = SharedBuffer::allocate(ks.getSize()); memcpy(ksBuffer.get(), ks.getBuffer(), ks.getSize()); @@ -1892,21 +1906,21 @@ TEST_F(KeyStringBuilderTest, RandomizedInputsForToBsonSafe) { BufReader reader(tbBuffer.get(), ks.getTypeBits().getSize()); try { - auto newTypeBits = KeyString::TypeBits::fromBuffer(KeyString::Version::V1, &reader); - KeyString::toBsonSafe(ksBuffer.get(), ks.getSize(), ALL_ASCENDING, newTypeBits); + auto newTypeBits = key_string::TypeBits::fromBuffer(key_string::Version::V1, &reader); + key_string::toBsonSafe(ksBuffer.get(), ks.getSize(), ALL_ASCENDING, newTypeBits); } catch (const AssertionException&) { // The expectation is that the randomized buffer is likely an invalid - // KeyString::Builder, + // key_string::Builder, // however attempting to decode it should fail gracefully. } // Retest with descending. try { - auto newTypeBits = KeyString::TypeBits::fromBuffer(KeyString::Version::V1, &reader); - KeyString::toBsonSafe(ksBuffer.get(), ks.getSize(), ONE_DESCENDING, newTypeBits); + auto newTypeBits = key_string::TypeBits::fromBuffer(key_string::Version::V1, &reader); + key_string::toBsonSafe(ksBuffer.get(), ks.getSize(), ONE_DESCENDING, newTypeBits); } catch (const AssertionException&) { // The expectation is that the randomized buffer is likely an invalid - // KeyString::Builder, + // key_string::Builder, // however attempting to decode it should fail gracefully. } } @@ -1921,7 +1935,7 @@ typedef std::vector Numbers; * Evaluates ROUNDTRIP on all items in Numbers a sufficient number of times to take at least * kMinPerfMicros microseconds. Logs the elapsed time per ROUNDTRIP evaluation. */ -void perfTest(KeyString::Version version, const Numbers& numbers) { +void perfTest(key_string::Version version, const Numbers& numbers) { uint64_t micros = 0; uint64_t iters; // Ensure at least 16 iterations are done and at least 50 milliseconds is timed @@ -1931,10 +1945,10 @@ void perfTest(KeyString::Version version, const Numbers& numbers) { for (uint64_t i = 0; i < iters; i++) for (const auto& item : numbers) { - // Assuming there are sufficient invariants in the to/from KeyString::Builder + // Assuming there are sufficient invariants in the to/from key_string::Builder // methods // that calls will not be optimized away. - const KeyString::Builder ks(version, item, ALL_ASCENDING); + const key_string::Builder ks(version, item, ALL_ASCENDING); const BSONObj& converted = toBson(ks, ALL_ASCENDING); invariant(converted.binaryEqual(item)); } @@ -1952,7 +1966,7 @@ void perfTest(KeyString::Version version, const Numbers& numbers) { "_1E3_micros_static_cast_double_iters_numbers_size"_attr = 1E3 * micros / static_cast(iters * numbers.size()), "mongo_KeyString_keyStringVersionToString_version"_attr = - mongo::KeyString::keyStringVersionToString(version), + mongo::key_string::keyStringVersionToString(version), "kDebugBuild_DEBUG_BUILD"_attr = (kDebugBuild ? " (DEBUG BUILD!)" : ""), "minmax_first"_attr = (*minmax.first)[""], "minmax_second"_attr = (*minmax.second)[""]); @@ -2014,7 +2028,7 @@ TEST_F(KeyStringBuilderTest, CommonDecimalPerf) { std::mt19937 gen(newSeed()); std::exponential_distribution expReal(1e-3); - if (version == KeyString::Version::V0) + if (version == key_string::Version::V0) return; std::vector numbers; @@ -2032,7 +2046,7 @@ TEST_F(KeyStringBuilderTest, UniformDecimalPerf) { std::uniform_int_distribution uniformInt64(std::numeric_limits::min(), std::numeric_limits::max()); - if (version == KeyString::Version::V0) + if (version == key_string::Version::V0) return; std::vector numbers; @@ -2052,7 +2066,7 @@ TEST_F(KeyStringBuilderTest, DecimalFromUniformDoublePerf) { std::uniform_int_distribution uniformInt64(std::numeric_limits::min(), std::numeric_limits::max()); - if (version == KeyString::Version::V0) + if (version == key_string::Version::V0) return; // In addition to serve as a data ponit for performance, this test also generates many decimal @@ -2078,8 +2092,8 @@ DEATH_TEST(KeyStringBuilderTest, ToBsonPromotesAssertionsToTerminate, "terminate 60, // CType::kStringLike 55, // Non-null terminated }; - KeyString::TypeBits typeBits(KeyString::Version::V1); - KeyString::toBson(invalidString, sizeof(invalidString), ALL_ASCENDING, typeBits); + key_string::TypeBits typeBits(key_string::Version::V1); + key_string::toBson(invalidString, sizeof(invalidString), ALL_ASCENDING, typeBits); } // The following tests run last because they take a very long time. @@ -2091,7 +2105,7 @@ TEST_F(KeyStringBuilderTest, LotsOfNumbers3) { futures.push_back(stdx::async(stdx::launch::async, [k, this] { for (double i = -1100; i < 1100; i++) { for (double j = 0; j < 52; j++) { - const auto V1 = KeyString::Version::V1; + const auto V1 = key_string::Version::V1; Decimal128::RoundingPrecision roundingPrecisions[]{ Decimal128::kRoundTo15Digits, Decimal128::kRoundTo34Digits}; Decimal128::RoundingMode roundingModes[]{Decimal128::kRoundTowardNegative, @@ -2182,16 +2196,16 @@ TEST_F(KeyStringBuilderTest, NumberOrderLots) { Ordering ordering = Ordering::make(BSON("a" << 1)); - std::vector> KeyStringBuilders; + std::vector> KeyStringBuilders; for (size_t i = 0; i < numbers.size(); i++) { KeyStringBuilders.push_back( - std::make_unique(version, numbers[i], ordering)); + std::make_unique(version, numbers[i], ordering)); } for (size_t i = 0; i < numbers.size(); i++) { for (size_t j = 0; j < numbers.size(); j++) { - const KeyString::Builder& a = *KeyStringBuilders[i]; - const KeyString::Builder& b = *KeyStringBuilders[j]; + const key_string::Builder& a = *KeyStringBuilders[i]; + const key_string::Builder& b = *KeyStringBuilders[j]; ASSERT_EQUALS(a.compare(b), -b.compare(a)); if (a.compare(b) != diff --git a/src/mongo/db/storage/key_string_to_bson_fuzzer.cpp b/src/mongo/db/storage/key_string_to_bson_fuzzer.cpp index 3ed29c66d5ba3..c4c34dd180500 100644 --- a/src/mongo/db/storage/key_string_to_bson_fuzzer.cpp +++ b/src/mongo/db/storage/key_string_to_bson_fuzzer.cpp @@ -32,31 +32,31 @@ const mongo::Ordering kAllAscending = mongo::Ordering::make(mongo::BSONObj()); const mongo::Ordering kOneDescending = mongo::Ordering::make(BSON("a" << -1)); -const auto kV1 = mongo::KeyString::Version::V1; -const auto kV0 = mongo::KeyString::Version::V0; +const auto kV1 = mongo::key_string::Version::V1; +const auto kV0 = mongo::key_string::Version::V0; uint8_t getZeroType(char val) { switch (val % 10) { case 0: - return mongo::KeyString::TypeBits::kInt; + return mongo::key_string::TypeBits::kInt; case 1: - return mongo::KeyString::TypeBits::kDouble; + return mongo::key_string::TypeBits::kDouble; case 2: - return mongo::KeyString::TypeBits::kLong; + return mongo::key_string::TypeBits::kLong; case 3: - return mongo::KeyString::TypeBits::kNegativeDoubleZero; + return mongo::key_string::TypeBits::kNegativeDoubleZero; case 4: - return mongo::KeyString::TypeBits::kDecimalZero0xxx; + return mongo::key_string::TypeBits::kDecimalZero0xxx; case 5: - return mongo::KeyString::TypeBits::kDecimalZero1xxx; + return mongo::key_string::TypeBits::kDecimalZero1xxx; case 6: - return mongo::KeyString::TypeBits::kDecimalZero2xxx; + return mongo::key_string::TypeBits::kDecimalZero2xxx; case 7: - return mongo::KeyString::TypeBits::kDecimalZero3xxx; + return mongo::key_string::TypeBits::kDecimalZero3xxx; case 8: - return mongo::KeyString::TypeBits::kDecimalZero4xxx; + return mongo::key_string::TypeBits::kDecimalZero4xxx; case 9: - return mongo::KeyString::TypeBits::kDecimalZero5xxx; + return mongo::key_string::TypeBits::kDecimalZero5xxx; default: return 0x00; } @@ -69,7 +69,7 @@ extern "C" int LLVMFuzzerTestOneInput(const char* Data, size_t Size) { const auto version = Data[0] % 2 == 0 ? kV0 : kV1; const auto ord = Data[1] % 2 == 0 ? kAllAscending : kOneDescending; - mongo::KeyString::TypeBits tb(version); + mongo::key_string::TypeBits tb(version); const size_t len = Data[2]; if (len > static_cast(Size - 3)) @@ -114,7 +114,7 @@ extern "C" int LLVMFuzzerTestOneInput(const char* Data, size_t Size) { try { mongo::BSONObj obj = - mongo::KeyString::toBsonSafe(&Data[2 + len], Size - (2 + len), ord, tb); + mongo::key_string::toBsonSafe(&Data[2 + len], Size - (2 + len), ord, tb); // We want to make sure the generated BSON is valid invariant(mongo::validateBSON(obj.objdata(), obj.objsize())); } catch (const mongo::AssertionException&) { diff --git a/src/mongo/db/storage/kv/SConscript b/src/mongo/db/storage/kv/SConscript index 3c3b9237b9773..aed9228276e44 100644 --- a/src/mongo/db/storage/kv/SConscript +++ b/src/mongo/db/storage/kv/SConscript @@ -24,10 +24,12 @@ env.Library( ], LIBDEPS=[ '$BUILD_DIR/mongo/db/catalog/catalog_impl', + '$BUILD_DIR/mongo/db/concurrency/lock_manager', + '$BUILD_DIR/mongo/db/index/index_access_method', '$BUILD_DIR/mongo/db/multitenancy', - '$BUILD_DIR/mongo/db/service_context', + '$BUILD_DIR/mongo/db/service_context_d', '$BUILD_DIR/mongo/db/service_context_test_fixture', - '$BUILD_DIR/mongo/db/storage/durable_catalog_impl', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/db/storage/key_string', '$BUILD_DIR/mongo/db/storage/record_store_base', '$BUILD_DIR/mongo/db/storage/storage_options', diff --git a/src/mongo/db/storage/kv/durable_catalog_test.cpp b/src/mongo/db/storage/kv/durable_catalog_test.cpp index 5a29311d40b18..2d5d1f112858c 100644 --- a/src/mongo/db/storage/kv/durable_catalog_test.cpp +++ b/src/mongo/db/storage/kv/durable_catalog_test.cpp @@ -27,28 +27,79 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/index_spec.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_impl.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/import_options.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/multikey_paths.h" #include "mongo/db/index_names.h" #include "mongo/db/multitenancy_gen.h" -#include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/db/storage/devnull/devnull_kv_engine.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/bson_collection_catalog_entry.h" +#include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/durable_catalog_entry.h" #include "mongo/db/storage/kv/kv_engine.h" -#include "mongo/db/storage/storage_engine_impl.h" -#include "mongo/db/timeseries/timeseries_options.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" +#include "mongo/util/uuid.h" +#include "mongo/util/version/releases.h" namespace mongo { namespace { @@ -114,12 +165,11 @@ class DurableCatalogTest : public CatalogTestFixture { operationContext(), nss, catalogId, - getCatalog()->getMetaData(operationContext(), catalogId), + getCatalog()->getParsedCatalogEntry(operationContext(), catalogId)->metadata, std::move(coll.second)); CollectionCatalog::write(operationContext(), [&](CollectionCatalog& catalog) { catalog.registerCollection(operationContext(), - options.uuid.value(), std::move(collection), /*ts=*/boost::none); }); @@ -145,7 +195,7 @@ class DurableCatalogTest : public CatalogTestFixture { spec.textDefaultLanguage("swedish"); } - auto desc = std::make_unique(indexType, spec.toBSON()); + auto desc = IndexDescriptor(indexType, spec.toBSON()); IndexCatalogEntry* entry = nullptr; auto collWriter = getCollectionWriter(); @@ -153,11 +203,10 @@ class DurableCatalogTest : public CatalogTestFixture { WriteUnitOfWork wuow(operationContext()); const bool isSecondaryBackgroundIndexBuild = false; boost::optional buildUUID(twoPhase, UUID::gen()); - ASSERT_OK(collWriter.getWritableCollection(operationContext()) - ->prepareForIndexBuild(operationContext(), - desc.get(), - buildUUID, - isSecondaryBackgroundIndexBuild)); + ASSERT_OK( + collWriter.getWritableCollection(operationContext()) + ->prepareForIndexBuild( + operationContext(), &desc, buildUUID, isSecondaryBackgroundIndexBuild)); entry = collWriter.getWritableCollection(operationContext()) ->getIndexCatalog() ->createIndexEntry(operationContext(), @@ -231,7 +280,7 @@ class ImportCollectionTest : public DurableCatalogTest { imd.spec = descriptor.infoObj(); imd.ready = true; - md = getCatalog()->getMetaData(operationContext(), catalogId); + md = getCatalog()->getParsedCatalogEntry(operationContext(), catalogId)->metadata; md->insertIndex(std::move(imd)); getCatalog()->putMetaData(operationContext(), catalogId, *md); @@ -258,6 +307,11 @@ class ImportCollectionTest : public DurableCatalogTest { Lock::DBLock dbLock(operationContext(), nss.dbName(), MODE_IX); Lock::CollectionLock collLock(operationContext(), nss, MODE_X); + uassert(ErrorCodes::NamespaceExists, + str::stream() << "Collection already exists. NS: " << nss.toStringForErrorMsg(), + !CollectionCatalog::get(operationContext()) + ->lookupCollectionByNamespace(operationContext(), nss)); + WriteUnitOfWork wuow(operationContext()); auto res = getCatalog()->importCollection( operationContext(), @@ -660,7 +714,9 @@ class ConcurrentMultikeyTest : public DurableCatalogTest { // Verify that the durable catalog has 'expected' as multikey paths for this index Lock::GlobalLock globalLock{operationContext(), MODE_IS}; - auto md = getCatalog()->getMetaData(operationContext(), collection->getCatalogId()); + auto md = getCatalog() + ->getParsedCatalogEntry(operationContext(), collection->getCatalogId()) + ->metadata; auto indexOffset = md->findIndexOffset(indexEntry->descriptor()->indexName()); assertMultikeyPathsAreEqual(md->indexes[indexOffset].multikeyPaths, expected); @@ -749,26 +805,28 @@ TEST_F(ImportCollectionTest, ImportCollection) { auto idxIdentObj = BSON("_id_" << idxIdent); // Import should fail with missing "md" field. - ASSERT_THROWS_CODE(importCollectionTest( - nss, - BSON("idxIdent" << idxIdentObj << "ns" << nss.ns() << "ident" << ident), - storageMetadata), - AssertionException, - ErrorCodes::BadValue); - - // Import should fail with missing "ident" field. ASSERT_THROWS_CODE( - importCollectionTest(nss, - BSON("md" << mdObj << "idxIdent" << idxIdentObj << "ns" << nss.ns()), - storageMetadata), + importCollectionTest( + nss, + BSON("idxIdent" << idxIdentObj << "ns" << nss.ns_forTest() << "ident" << ident), + storageMetadata), AssertionException, ErrorCodes::BadValue); + // Import should fail with missing "ident" field. + ASSERT_THROWS_CODE(importCollectionTest(nss, + BSON("md" << mdObj << "idxIdent" << idxIdentObj << "ns" + << nss.ns_forTest()), + storageMetadata), + AssertionException, + ErrorCodes::BadValue); + // Import should success with validate inputs. - auto swImportResult = importCollectionTest( - nss, - BSON("md" << mdObj << "idxIdent" << idxIdentObj << "ns" << nss.ns() << "ident" << ident), - storageMetadata); + auto swImportResult = + importCollectionTest(nss, + BSON("md" << mdObj << "idxIdent" << idxIdentObj << "ns" + << nss.ns_forTest() << "ident" << ident), + storageMetadata); ASSERT_OK(swImportResult.getStatus()); DurableCatalog::ImportResult importResult = std::move(swImportResult.getValue()); @@ -787,8 +845,8 @@ TEST_F(ImportCollectionTest, ImportCollection) { // match. md->options.uuid = importResult.uuid; ASSERT_BSONOBJ_EQ(getCatalog()->getCatalogEntry(operationContext(), importResult.catalogId), - BSON("md" << md->toBSON() << "idxIdent" << idxIdentObj << "ns" << nss.ns() - << "ident" << ident)); + BSON("md" << md->toBSON() << "idxIdent" << idxIdentObj << "ns" + << nss.ns_forTest() << "ident" << ident)); // Since there was not a collision, the rand should not have changed. ASSERT_EQ(rand, getCatalog()->getRand_forTest()); @@ -824,7 +882,7 @@ TEST_F(ImportCollectionTest, ImportCollectionRandConflict) { auto swImportResult = importCollectionTest(nss, BSON("md" << md->toBSON() << "idxIdent" << BSON("_id_" << idxIdent) - << "ns" << nss.ns() << "ident" << ident), + << "ns" << nss.ns_forTest() << "ident" << ident), storageMetadata); ASSERT_OK(swImportResult.getStatus()); } @@ -856,8 +914,8 @@ TEST_F(DurableCatalogTest, CheckTimeseriesBucketsMayHaveMixedSchemaDataFlagFCVLa ->lookupCollectionByNamespace(operationContext(), regularNss); RecordId catalogId = collection->getCatalogId(); ASSERT(!getCatalog() - ->getMetaData(operationContext(), catalogId) - ->timeseriesBucketsMayHaveMixedSchemaData); + ->getParsedCatalogEntry(operationContext(), catalogId) + ->metadata->timeseriesBucketsMayHaveMixedSchemaData); } { @@ -872,11 +930,11 @@ TEST_F(DurableCatalogTest, CheckTimeseriesBucketsMayHaveMixedSchemaDataFlagFCVLa ->lookupCollectionByNamespace(operationContext(), bucketsNss); RecordId catalogId = collection->getCatalogId(); ASSERT(getCatalog() - ->getMetaData(operationContext(), catalogId) - ->timeseriesBucketsMayHaveMixedSchemaData); + ->getParsedCatalogEntry(operationContext(), catalogId) + ->metadata->timeseriesBucketsMayHaveMixedSchemaData); ASSERT_FALSE(*getCatalog() - ->getMetaData(operationContext(), catalogId) - ->timeseriesBucketsMayHaveMixedSchemaData); + ->getParsedCatalogEntry(operationContext(), catalogId) + ->metadata->timeseriesBucketsMayHaveMixedSchemaData); } } @@ -895,9 +953,12 @@ TEST_F(DurableCatalogTest, CreateCollectionCatalogEntryHasCorrectTenantNamespace ASSERT_EQ(getCatalog()->getEntry(catalogId).nss, nss); Lock::GlobalLock globalLock{operationContext(), MODE_IS}; - ASSERT_EQ(getCatalog()->getMetaData(operationContext(), catalogId)->nss.tenantId(), + ASSERT_EQ(getCatalog() + ->getParsedCatalogEntry(operationContext(), catalogId) + ->metadata->nss.tenantId(), nss.tenantId()); - ASSERT_EQ(getCatalog()->getMetaData(operationContext(), catalogId)->nss, nss); + ASSERT_EQ(getCatalog()->getParsedCatalogEntry(operationContext(), catalogId)->metadata->nss, + nss); auto catalogEntry = getCatalog()->scanForCatalogEntryByNss(operationContext(), nss); @@ -938,7 +999,9 @@ TEST_F(DurableCatalogTest, ScanForCatalogEntryByNssBasic) { ASSERT(catalogEntryThird != boost::none); ASSERT_EQ(nssThird, catalogEntryThird->metadata->nss); ASSERT_EQ(catalogIdAndUUIDThird.uuid, catalogEntryThird->metadata->options.uuid); - ASSERT_EQ(getCatalog()->getMetaData(operationContext(), catalogIdAndUUIDThird.catalogId)->nss, + ASSERT_EQ(getCatalog() + ->getParsedCatalogEntry(operationContext(), catalogIdAndUUIDThird.catalogId) + ->metadata->nss, nssThird); ASSERT_EQ(getCatalog()->getEntry(catalogIdAndUUIDThird.catalogId).nss, nssThird); @@ -947,7 +1010,9 @@ TEST_F(DurableCatalogTest, ScanForCatalogEntryByNssBasic) { ASSERT_EQ(nssSecond, catalogEntrySecond->metadata->nss); ASSERT_EQ(catalogIdAndUUIDSecond.uuid, catalogEntrySecond->metadata->options.uuid); ASSERT(catalogEntrySecond->metadata->options.timeseries); - ASSERT_EQ(getCatalog()->getMetaData(operationContext(), catalogIdAndUUIDSecond.catalogId)->nss, + ASSERT_EQ(getCatalog() + ->getParsedCatalogEntry(operationContext(), catalogIdAndUUIDSecond.catalogId) + ->metadata->nss, nssSecond); ASSERT_EQ(getCatalog()->getEntry(catalogIdAndUUIDSecond.catalogId).nss, nssSecond); @@ -956,7 +1021,9 @@ TEST_F(DurableCatalogTest, ScanForCatalogEntryByNssBasic) { ASSERT_EQ(nssFirst, catalogEntryFirst->metadata->nss); ASSERT_EQ(catalogIdAndUUIDFirst.uuid, catalogEntryFirst->metadata->options.uuid); ASSERT_EQ(nssFirst.tenantId(), catalogEntryFirst->metadata->nss.tenantId()); - ASSERT_EQ(getCatalog()->getMetaData(operationContext(), catalogIdAndUUIDFirst.catalogId)->nss, + ASSERT_EQ(getCatalog() + ->getParsedCatalogEntry(operationContext(), catalogIdAndUUIDFirst.catalogId) + ->metadata->nss, nssFirst); ASSERT_EQ(getCatalog()->getEntry(catalogIdAndUUIDFirst.catalogId).nss, nssFirst); diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp index 4b97397043efd..b3ab0f2d3f4f4 100644 --- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp +++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.cpp @@ -28,17 +28,32 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/kv/kv_drop_pending_ident_reaper.h" - +#include +#include #include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/storage/ident.h" +#include "mongo/db/storage/kv/kv_drop_pending_ident_reaper.h" #include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -161,7 +176,7 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons for (auto& timestampAndIdentInfo : toDrop) { // Guards against catalog changes while dropping idents using KVEngine::dropIdent(). Yields // after dropping each ident. - writeConflictRetry(opCtx, "dropIdentsOlderThan", "", [&] { + writeConflictRetry(opCtx, "dropIdentsOlderThan", NamespaceString(), [&] { Lock::GlobalLock globalLock(opCtx, MODE_IX); const auto& dropTimestamp = timestampAndIdentInfo.first; @@ -239,7 +254,9 @@ void KVDropPendingIdentReaper::dropIdentsOlderThan(OperationContext* opCtx, cons } } -void KVDropPendingIdentReaper::clearDropPendingState() { +void KVDropPendingIdentReaper::clearDropPendingState(OperationContext* opCtx) { + invariant(opCtx->lockState()->isW()); + stdx::lock_guard lock(_mutex); _dropPendingIdents.clear(); _identToTimestamp.clear(); diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h index 6a4e47c912aa3..55b9b10ef9a32 100644 --- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h +++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper.h @@ -30,15 +30,22 @@ #pragma once #include +#include +#include #include +#include #include #include #include "mongo/base/string_data.h" #include "mongo/bson/timestamp.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/storage/ident.h" #include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/platform/mutex.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -118,9 +125,13 @@ class KVDropPendingIdentReaper { /** * Clears maps of drop pending idents but does not drop idents in storage engine. - * Used by rollback after recovering to a stable timestamp. + * Used by rollback before recovering to a stable timestamp. + * + * This function is called under the same critical section as rollback-to-stable, which happens + * under the global exclusive lock, and has to be called prior to re-opening the catalog, which + * can add drop pending idents. */ - void clearDropPendingState(); + void clearDropPendingState(OperationContext* opCtx); private: // Contains information identifying what collection/index data to drop as well as determining diff --git a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper_test.cpp b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper_test.cpp index 9b36ac2afd9c2..496aad2782ff2 100644 --- a/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper_test.cpp +++ b/src/mongo/db/storage/kv/kv_drop_pending_ident_reaper_test.cpp @@ -27,16 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include - -#include "mongo/db/concurrency/lock_state.h" +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/client.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/storage/column_store.h" #include "mongo/db/storage/ident.h" +#include "mongo/db/storage/key_format.h" #include "mongo/db/storage/kv/kv_drop_pending_ident_reaper.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" namespace mongo { namespace { diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp index 3967c9b9e6fae..033136fa4aa5e 100644 --- a/src/mongo/db/storage/kv/kv_engine_test_harness.cpp +++ b/src/mongo/db/storage/kv/kv_engine_test_harness.cpp @@ -29,20 +29,52 @@ #include "mongo/db/storage/kv/kv_engine_test_harness.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_impl.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/multitenancy_gen.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/db/storage/durable_catalog_impl.h" +#include "mongo/db/storage/bson_collection_catalog_entry.h" +#include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/key_string.h" #include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" -#include "mongo/util/clock_source_mock.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -63,7 +95,7 @@ class ClientAndCtx { ServiceContext::UniqueOperationContext _opCtx; }; -class DurableCatalogImplTest : public ServiceContextTest { +class DurableCatalogTest : public ServiceContextTest { protected: void setUp() override { helper = KVHarnessHelper::create(getServiceContext()); @@ -82,22 +114,20 @@ class DurableCatalogImplTest : public ServiceContextTest { RecordId newCollection(OperationContext* opCtx, const NamespaceString& ns, const CollectionOptions& options, - DurableCatalogImpl* catalog) { + DurableCatalog* catalog) { Lock::DBLock dbLk(opCtx, ns.dbName(), MODE_IX); auto swEntry = catalog->_addEntry(opCtx, ns, options); ASSERT_OK(swEntry.getStatus()); return swEntry.getValue().catalogId; } - Status dropCollection(OperationContext* opCtx, - RecordId catalogId, - DurableCatalogImpl* catalog) { + Status dropCollection(OperationContext* opCtx, RecordId catalogId, DurableCatalog* catalog) { Lock::GlobalLock globalLk(opCtx, MODE_IX); return catalog->_removeEntry(opCtx, catalogId); } void putMetaData(OperationContext* opCtx, - DurableCatalogImpl* catalog, + DurableCatalog* catalog, RecordId catalogId, BSONCollectionCatalogEntry::MetaData& md) { Lock::GlobalLock globalLk(opCtx, MODE_IX); @@ -105,7 +135,7 @@ class DurableCatalogImplTest : public ServiceContextTest { } std::string getIndexIdent(OperationContext* opCtx, - DurableCatalogImpl* catalog, + DurableCatalog* catalog, RecordId catalogId, StringData idxName) { Lock::GlobalLock globalLk(opCtx, MODE_IS); @@ -128,7 +158,6 @@ class KVEngineTestHarness : public ServiceContextTest { auto opCtx = makeOperationContext(); opCtx->setRecoveryUnit(std::unique_ptr(engine->newRecoveryUnit()), WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); - opCtx->swapLockState(std::make_unique(), WithLock::withoutLock()); return opCtx; } @@ -144,8 +173,6 @@ class KVEngineTestHarness : public ServiceContextTest { auto opCtx = client->makeOperationContext(); opCtx->setRecoveryUnit(std::unique_ptr(engine->newRecoveryUnit()), WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); - opCtx->swapLockState(std::make_unique(), WithLock::withoutLock()); - opCtxs.emplace_back(std::move(client), std::move(opCtx)); } @@ -162,9 +189,14 @@ TEST_F(KVEngineTestHarness, SimpleRS1) { std::unique_ptr rs; { auto opCtx = _makeOperationContext(engine); - ASSERT_OK( - engine->createRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions())); - rs = engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + ASSERT_OK(engine->createRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions())); + rs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT(rs); } @@ -181,6 +213,7 @@ TEST_F(KVEngineTestHarness, SimpleRS1) { { auto opCtx = _makeOperationContext(engine); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(std::string("abc"), rs->dataFor(opCtx.get(), loc).data()); } @@ -207,9 +240,14 @@ TEST_F(KVEngineTestHarness, Restart1) { std::unique_ptr rs; { auto opCtx = _makeOperationContext(engine); - ASSERT_OK(engine->createRecordStore( - opCtx.get(), NamespaceString(ns), ns, CollectionOptions())); - rs = engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + ASSERT_OK(engine->createRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions())); + rs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT(rs); } @@ -224,6 +262,7 @@ TEST_F(KVEngineTestHarness, Restart1) { { auto opCtx = _makeOperationContext(engine); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(std::string("abc"), rs->dataFor(opCtx.get(), loc).data()); } } @@ -233,7 +272,11 @@ TEST_F(KVEngineTestHarness, Restart1) { { std::unique_ptr rs; auto opCtx = _makeOperationContext(engine); - rs = engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); + rs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT_EQUALS(std::string("abc"), rs->dataFor(opCtx.get(), loc).data()); } } @@ -250,6 +293,10 @@ TEST_F(KVEngineTestHarness, SimpleSorted1) { CollectionOptions options; options.uuid = UUID::gen(); + auto mdPtr = std::make_shared(); + mdPtr->nss = nss; + mdPtr->options = options; + std::unique_ptr rs; { auto opCtx = _makeOperationContext(engine); @@ -264,7 +311,7 @@ TEST_F(KVEngineTestHarness, SimpleSorted1) { auto opCtx = _makeOperationContext(engine); WriteUnitOfWork uow(opCtx.get()); collection = - std::make_unique(opCtx.get(), nss, RecordId(0), options, std::move(rs)); + std::make_unique(opCtx.get(), nss, RecordId(0), mdPtr, std::move(rs)); uow.commit(); } @@ -281,10 +328,11 @@ TEST_F(KVEngineTestHarness, SimpleSorted1) { { auto opCtx = _makeOperationContext(engine); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); WriteUnitOfWork uow(opCtx.get()); const RecordId recordId(6, 4); - const KeyString::Value keyString = - KeyString::HeapBuilder( + const key_string::Value keyString = + key_string::HeapBuilder( sorted->getKeyStringVersion(), BSON("" << 5), sorted->getOrdering(), recordId) .release(); ASSERT_OK(sorted->insert(opCtx.get(), keyString, true)); @@ -293,6 +341,7 @@ TEST_F(KVEngineTestHarness, SimpleSorted1) { { auto opCtx = _makeOperationContext(engine); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } } @@ -322,6 +371,7 @@ TEST_F(KVEngineTestHarness, TemporaryRecordStoreSimple) { { auto opCtx = _makeOperationContext(engine); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(std::string("abc"), rs->dataFor(opCtx.get(), loc).data()); std::vector all = engine->getAllIdents(opCtx.get()); @@ -347,9 +397,14 @@ TEST_F(KVEngineTestHarness, AllDurableTimestamp) { std::unique_ptr rs; { auto opCtx = _makeOperationContext(engine); - ASSERT_OK( - engine->createRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions())); - rs = engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + ASSERT_OK(engine->createRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions())); + rs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT(rs); } @@ -411,15 +466,21 @@ TEST_F(KVEngineTestHarness, PinningOldestWithAnotherSession) { std::unique_ptr rs; { auto opCtx = _makeOperationContext(engine); - ASSERT_OK( - engine->createRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions())); - rs = engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + ASSERT_OK(engine->createRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions())); + rs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT(rs); } auto opCtxs = _makeOperationContexts(engine, 2); auto opCtx1 = opCtxs[0].second.get(); + Lock::GlobalLock globalLk1(opCtx1, MODE_IX); WriteUnitOfWork uow1(opCtx1); StatusWith res = rs->insertRecord(opCtx1, "abc", 4, Timestamp(10, 10)); RecordId rid = res.getValue(); @@ -430,6 +491,7 @@ TEST_F(KVEngineTestHarness, PinningOldestWithAnotherSession) { Timestamp(15, 15)); auto opCtx2 = opCtxs[1].second.get(); + Lock::GlobalLock globalLk2(opCtx2, MODE_IX); WriteUnitOfWork uow2(opCtx2); ASSERT(rs->findRecord(opCtx1, rid, &rd)); @@ -481,9 +543,14 @@ TEST_F(KVEngineTestHarness, AllDurable) { std::unique_ptr rs; { auto opCtx = _makeOperationContext(engine); - ASSERT_OK( - engine->createRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions())); - rs = engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + ASSERT_OK(engine->createRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions())); + rs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT(rs); } @@ -556,9 +623,14 @@ TEST_F(KVEngineTestHarness, BasicTimestampSingle) { std::unique_ptr rs; { auto opCtx = _makeOperationContext(engine); - ASSERT_OK( - engine->createRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions())); - rs = engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + ASSERT_OK(engine->createRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions())); + rs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT(rs); } @@ -569,6 +641,7 @@ TEST_F(KVEngineTestHarness, BasicTimestampSingle) { // Start a read transaction. auto opCtx1 = opCtxs[0].second.get(); + Lock::GlobalLock globalLk(opCtx1, MODE_IS); opCtx1->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided, kReadTimestamp); @@ -578,6 +651,7 @@ TEST_F(KVEngineTestHarness, BasicTimestampSingle) { RecordId rid; { auto opCtx2 = opCtxs[1].second.get(); + Lock::GlobalLock globalLk(opCtx2, MODE_IX); WriteUnitOfWork wuow(opCtx2); auto swRid = rs->insertRecord(opCtx2, "abc", 4, kInsertTimestamp); ASSERT_OK(swRid); @@ -626,9 +700,14 @@ TEST_F(KVEngineTestHarness, BasicTimestampMultiple) { std::unique_ptr rs; { auto opCtx = _makeOperationContext(engine); - ASSERT_OK( - engine->createRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions())); - rs = engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + ASSERT_OK(engine->createRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions())); + rs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT(rs); } @@ -639,6 +718,7 @@ TEST_F(KVEngineTestHarness, BasicTimestampMultiple) { { // Initial insert of record. auto opCtx = _makeOperationContext(engine); + Lock::GlobalLock globalLk(opCtx.get(), MODE_X); WriteUnitOfWork wuow(opCtx.get()); auto swRid = rs->insertRecord(opCtx.get(), "abc", 4, t10); ASSERT_OK(swRid); @@ -653,6 +733,7 @@ TEST_F(KVEngineTestHarness, BasicTimestampMultiple) { RecordData rd; auto opCtx = _makeOperationContext(engine); + Lock::GlobalLock globalLk(opCtx.get(), MODE_S); opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided, t10); ASSERT(rs->findRecord(opCtx.get(), rid, &rd)); ASSERT_EQUALS(std::string("abc"), rd.data()); @@ -683,15 +764,21 @@ DEATH_TEST_REGEX_F(KVEngineTestHarness, SnapshotHidesVisibility, ".*item not fou std::unique_ptr rs; { auto opCtx = _makeOperationContext(engine); - ASSERT_OK( - engine->createRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions())); - rs = engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + ASSERT_OK(engine->createRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions())); + rs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT(rs); } auto opCtxs = _makeOperationContexts(engine, 2); auto opCtx1 = opCtxs[0].second.get(); + Lock::GlobalLock globalLk1(opCtx1, MODE_IX); WriteUnitOfWork uow1(opCtx1); StatusWith res = rs->insertRecord(opCtx1, "abc", 4, Timestamp(10, 10)); ASSERT_OK(res); @@ -699,10 +786,11 @@ DEATH_TEST_REGEX_F(KVEngineTestHarness, SnapshotHidesVisibility, ".*item not fou uow1.commit(); // Snapshot was taken before the insert and will not find the record even after the commit. - RecordData rd; auto opCtx2 = opCtxs[1].second.get(); + Lock::GlobalLock globalLk2(opCtx2, MODE_IX); opCtx2->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided, Timestamp(9, 9)); + RecordData rd; ASSERT(!rs->findRecord(opCtx2, loc, &rd)); // Trying to write in an outdated snapshot will cause item not found. @@ -739,10 +827,14 @@ TEST_F(KVEngineTestHarness, SingleReadWithConflictWithOplog) { std::unique_ptr oplogRs; { auto opCtx = _makeOperationContext(engine); - ASSERT_OK( - engine->createRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions())); - collectionRs = - engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + ASSERT_OK(engine->createRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions())); + collectionRs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT(collectionRs); CollectionOptions options; @@ -781,6 +873,7 @@ TEST_F(KVEngineTestHarness, SingleReadWithConflictWithOplog) { } auto opCtx = _makeOperationContext(engine); + Lock::GlobalLock globalLk(opCtx.get(), MODE_S); opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided, t9); ASSERT(!collectionRs->findRecord(opCtx.get(), locCollection, &rd)); ASSERT(!oplogRs->findRecord(opCtx.get(), locOplog, &rd)); @@ -814,13 +907,19 @@ TEST_F(KVEngineTestHarness, PinningOldestTimestampWithReadConflict) { std::unique_ptr rs; { auto opCtx = _makeOperationContext(engine); - ASSERT_OK( - engine->createRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions())); - rs = engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + ASSERT_OK(engine->createRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions())); + rs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT(rs); } auto opCtx = _makeOperationContext(engine); + Lock::GlobalLock globalLk(opCtx.get(), MODE_X); WriteUnitOfWork uow(opCtx.get()); StatusWith res = rs->insertRecord(opCtx.get(), "abc", 4, Timestamp(10, 10)); RecordId rid = res.getValue(); @@ -858,9 +957,14 @@ DEATH_TEST_REGEX_F(KVEngineTestHarness, std::unique_ptr rs; { auto opCtx = _makeOperationContext(engine); - ASSERT_OK( - engine->createRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions())); - rs = engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + ASSERT_OK(engine->createRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions())); + rs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT(rs); } @@ -899,9 +1003,14 @@ TEST_F(KVEngineTestHarness, RollingBackToLastStable) { std::unique_ptr rs; { auto opCtx = _makeOperationContext(engine); - ASSERT_OK( - engine->createRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions())); - rs = engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + ASSERT_OK(engine->createRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions())); + rs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT(rs); } @@ -945,6 +1054,7 @@ TEST_F(KVEngineTestHarness, RollingBackToLastStable) { { // Rollback to the last stable timestamp. auto opCtx = _makeOperationContext(engine); + Lock::GlobalLock globalLk(opCtx.get(), MODE_X); StatusWith swTimestamp = engine->recoverToStableTimestamp(opCtx.get()); ASSERT_EQ(swTimestamp.getValue(), Timestamp(1, 1)); @@ -979,9 +1089,14 @@ DEATH_TEST_REGEX_F(KVEngineTestHarness, CommitBehindStable, "Fatal assertion.*39 std::unique_ptr rs; { auto opCtx = _makeOperationContext(engine); - ASSERT_OK( - engine->createRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions())); - rs = engine->getRecordStore(opCtx.get(), NamespaceString(ns), ns, CollectionOptions()); + ASSERT_OK(engine->createRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions())); + rs = engine->getRecordStore(opCtx.get(), + NamespaceString::createNamespaceString_forTest(ns), + ns, + CollectionOptions()); ASSERT(rs); } @@ -1006,20 +1121,25 @@ DEATH_TEST_REGEX_F(KVEngineTestHarness, CommitBehindStable, "Fatal assertion.*39 } } -TEST_F(DurableCatalogImplTest, Coll1) { +TEST_F(DurableCatalogTest, Coll1) { KVEngine* engine = helper->getEngine(); std::unique_ptr rs; - std::unique_ptr catalog; + std::unique_ptr catalog; { auto clientAndCtx = makeClientAndCtx("opCtx"); auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); - ASSERT_OK(engine->createRecordStore( - opCtx, NamespaceString("catalog"), "catalog", CollectionOptions())); - rs = engine->getRecordStore( - opCtx, NamespaceString("catalog"), "catalog", CollectionOptions()); - catalog = std::make_unique(rs.get(), false, false, nullptr); + ASSERT_OK( + engine->createRecordStore(opCtx, + NamespaceString::createNamespaceString_forTest("catalog"), + "catalog", + CollectionOptions())); + rs = engine->getRecordStore(opCtx, + NamespaceString::createNamespaceString_forTest("catalog"), + "catalog", + CollectionOptions()); + catalog = std::make_unique(rs.get(), false, false, nullptr); uow.commit(); } @@ -1028,8 +1148,10 @@ TEST_F(DurableCatalogImplTest, Coll1) { auto clientAndCtx = makeClientAndCtx("opCtx"); auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); - catalogId = - newCollection(opCtx, NamespaceString("a.b"), CollectionOptions(), catalog.get()); + catalogId = newCollection(opCtx, + NamespaceString::createNamespaceString_forTest("a.b"), + CollectionOptions(), + catalog.get()); ASSERT_NOT_EQUALS("a.b", catalog->getEntry(catalogId).ident); uow.commit(); } @@ -1041,7 +1163,7 @@ TEST_F(DurableCatalogImplTest, Coll1) { Lock::GlobalLock globalLk(opCtx, MODE_IX); WriteUnitOfWork uow(opCtx); - catalog = std::make_unique(rs.get(), false, false, nullptr); + catalog = std::make_unique(rs.get(), false, false, nullptr); catalog->init(opCtx); uow.commit(); } @@ -1053,27 +1175,34 @@ TEST_F(DurableCatalogImplTest, Coll1) { auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); dropCollection(opCtx, catalogId, catalog.get()).transitional_ignore(); - newCatalogId = - newCollection(opCtx, NamespaceString("a.b"), CollectionOptions(), catalog.get()); + newCatalogId = newCollection(opCtx, + NamespaceString::createNamespaceString_forTest("a.b"), + CollectionOptions(), + catalog.get()); uow.commit(); } ASSERT_NOT_EQUALS(ident, catalog->getEntry(newCatalogId).ident); } -TEST_F(DurableCatalogImplTest, Idx1) { +TEST_F(DurableCatalogTest, Idx1) { KVEngine* engine = helper->getEngine(); std::unique_ptr rs; - std::unique_ptr catalog; + std::unique_ptr catalog; { auto clientAndCtx = makeClientAndCtx("opCtx"); auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); - ASSERT_OK(engine->createRecordStore( - opCtx, NamespaceString("catalog"), "catalog", CollectionOptions())); - rs = engine->getRecordStore( - opCtx, NamespaceString("catalog"), "catalog", CollectionOptions()); - catalog = std::make_unique(rs.get(), false, false, nullptr); + ASSERT_OK( + engine->createRecordStore(opCtx, + NamespaceString::createNamespaceString_forTest("catalog"), + "catalog", + CollectionOptions())); + rs = engine->getRecordStore(opCtx, + NamespaceString::createNamespaceString_forTest("catalog"), + "catalog", + CollectionOptions()); + catalog = std::make_unique(rs.get(), false, false, nullptr); uow.commit(); } @@ -1082,10 +1211,12 @@ TEST_F(DurableCatalogImplTest, Idx1) { auto clientAndCtx = makeClientAndCtx("opCtx"); auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); - catalogId = - newCollection(opCtx, NamespaceString("a.b"), CollectionOptions(), catalog.get()); + catalogId = newCollection(opCtx, + NamespaceString::createNamespaceString_forTest("a.b"), + CollectionOptions(), + catalog.get()); ASSERT_NOT_EQUALS("a.b", catalog->getEntry(catalogId).ident); - ASSERT_TRUE(catalog->isUserDataIdent(catalog->getEntry(catalogId).ident)); + ASSERT_TRUE(DurableCatalog::isUserDataIdent(catalog->getEntry(catalogId).ident)); uow.commit(); } @@ -1120,7 +1251,7 @@ TEST_F(DurableCatalogImplTest, Idx1) { auto opCtx = clientAndCtx.opCtx(); ASSERT_EQUALS(idxIndent, getIndexIdent(opCtx, catalog.get(), catalogId, "foo")); ASSERT_TRUE( - catalog->isUserDataIdent(getIndexIdent(opCtx, catalog.get(), catalogId, "foo"))); + DurableCatalog::isUserDataIdent(getIndexIdent(opCtx, catalog.get(), catalogId, "foo"))); } { @@ -1150,20 +1281,25 @@ TEST_F(DurableCatalogImplTest, Idx1) { } } -TEST_F(DurableCatalogImplTest, DirectoryPerDb1) { +TEST_F(DurableCatalogTest, DirectoryPerDb1) { KVEngine* engine = helper->getEngine(); std::unique_ptr rs; - std::unique_ptr catalog; + std::unique_ptr catalog; { auto clientAndCtx = makeClientAndCtx("opCtx"); auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); - ASSERT_OK(engine->createRecordStore( - opCtx, NamespaceString("catalog"), "catalog", CollectionOptions())); - rs = engine->getRecordStore( - opCtx, NamespaceString("catalog"), "catalog", CollectionOptions()); - catalog = std::make_unique(rs.get(), true, false, nullptr); + ASSERT_OK( + engine->createRecordStore(opCtx, + NamespaceString::createNamespaceString_forTest("catalog"), + "catalog", + CollectionOptions())); + rs = engine->getRecordStore(opCtx, + NamespaceString::createNamespaceString_forTest("catalog"), + "catalog", + CollectionOptions()); + catalog = std::make_unique(rs.get(), true, false, nullptr); uow.commit(); } @@ -1172,10 +1308,12 @@ TEST_F(DurableCatalogImplTest, DirectoryPerDb1) { auto clientAndCtx = makeClientAndCtx("opCtx"); auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); - catalogId = - newCollection(opCtx, NamespaceString("a.b"), CollectionOptions(), catalog.get()); + catalogId = newCollection(opCtx, + NamespaceString::createNamespaceString_forTest("a.b"), + CollectionOptions(), + catalog.get()); ASSERT_STRING_CONTAINS(catalog->getEntry(catalogId).ident, "a/"); - ASSERT_TRUE(catalog->isUserDataIdent(catalog->getEntry(catalogId).ident)); + ASSERT_TRUE(DurableCatalog::isUserDataIdent(catalog->getEntry(catalogId).ident)); uow.commit(); } @@ -1197,25 +1335,30 @@ TEST_F(DurableCatalogImplTest, DirectoryPerDb1) { putMetaData(opCtx, catalog.get(), catalogId, md); ASSERT_STRING_CONTAINS(getIndexIdent(opCtx, catalog.get(), catalogId, "foo"), "a/"); ASSERT_TRUE( - catalog->isUserDataIdent(getIndexIdent(opCtx, catalog.get(), catalogId, "foo"))); + DurableCatalog::isUserDataIdent(getIndexIdent(opCtx, catalog.get(), catalogId, "foo"))); uow.commit(); } } -TEST_F(DurableCatalogImplTest, Split1) { +TEST_F(DurableCatalogTest, Split1) { KVEngine* engine = helper->getEngine(); std::unique_ptr rs; - std::unique_ptr catalog; + std::unique_ptr catalog; { auto clientAndCtx = makeClientAndCtx("opCtx"); auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); - ASSERT_OK(engine->createRecordStore( - opCtx, NamespaceString("catalog"), "catalog", CollectionOptions())); - rs = engine->getRecordStore( - opCtx, NamespaceString("catalog"), "catalog", CollectionOptions()); - catalog = std::make_unique(rs.get(), false, true, nullptr); + ASSERT_OK( + engine->createRecordStore(opCtx, + NamespaceString::createNamespaceString_forTest("catalog"), + "catalog", + CollectionOptions())); + rs = engine->getRecordStore(opCtx, + NamespaceString::createNamespaceString_forTest("catalog"), + "catalog", + CollectionOptions()); + catalog = std::make_unique(rs.get(), false, true, nullptr); uow.commit(); } @@ -1224,10 +1367,12 @@ TEST_F(DurableCatalogImplTest, Split1) { auto clientAndCtx = makeClientAndCtx("opCtx"); auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); - catalogId = - newCollection(opCtx, NamespaceString("a.b"), CollectionOptions(), catalog.get()); + catalogId = newCollection(opCtx, + NamespaceString::createNamespaceString_forTest("a.b"), + CollectionOptions(), + catalog.get()); ASSERT_STRING_CONTAINS(catalog->getEntry(catalogId).ident, "collection/"); - ASSERT_TRUE(catalog->isUserDataIdent(catalog->getEntry(catalogId).ident)); + ASSERT_TRUE(DurableCatalog::isUserDataIdent(catalog->getEntry(catalogId).ident)); uow.commit(); } @@ -1249,25 +1394,30 @@ TEST_F(DurableCatalogImplTest, Split1) { putMetaData(opCtx, catalog.get(), catalogId, md); ASSERT_STRING_CONTAINS(getIndexIdent(opCtx, catalog.get(), catalogId, "foo"), "index/"); ASSERT_TRUE( - catalog->isUserDataIdent(getIndexIdent(opCtx, catalog.get(), catalogId, "foo"))); + DurableCatalog::isUserDataIdent(getIndexIdent(opCtx, catalog.get(), catalogId, "foo"))); uow.commit(); } } -TEST_F(DurableCatalogImplTest, DirectoryPerAndSplit1) { +TEST_F(DurableCatalogTest, DirectoryPerAndSplit1) { KVEngine* engine = helper->getEngine(); std::unique_ptr rs; - std::unique_ptr catalog; + std::unique_ptr catalog; { auto clientAndCtx = makeClientAndCtx("opCtx"); auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); - ASSERT_OK(engine->createRecordStore( - opCtx, NamespaceString("catalog"), "catalog", CollectionOptions())); - rs = engine->getRecordStore( - opCtx, NamespaceString("catalog"), "catalog", CollectionOptions()); - catalog = std::make_unique(rs.get(), true, true, nullptr); + ASSERT_OK( + engine->createRecordStore(opCtx, + NamespaceString::createNamespaceString_forTest("catalog"), + "catalog", + CollectionOptions())); + rs = engine->getRecordStore(opCtx, + NamespaceString::createNamespaceString_forTest("catalog"), + "catalog", + CollectionOptions()); + catalog = std::make_unique(rs.get(), true, true, nullptr); uow.commit(); } @@ -1276,10 +1426,12 @@ TEST_F(DurableCatalogImplTest, DirectoryPerAndSplit1) { auto clientAndCtx = makeClientAndCtx("opCtx"); auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); - catalogId = - newCollection(opCtx, NamespaceString("a.b"), CollectionOptions(), catalog.get()); + catalogId = newCollection(opCtx, + NamespaceString::createNamespaceString_forTest("a.b"), + CollectionOptions(), + catalog.get()); ASSERT_STRING_CONTAINS(catalog->getEntry(catalogId).ident, "a/collection/"); - ASSERT_TRUE(catalog->isUserDataIdent(catalog->getEntry(catalogId).ident)); + ASSERT_TRUE(DurableCatalog::isUserDataIdent(catalog->getEntry(catalogId).ident)); uow.commit(); } @@ -1301,12 +1453,12 @@ TEST_F(DurableCatalogImplTest, DirectoryPerAndSplit1) { putMetaData(opCtx, catalog.get(), catalogId, md); ASSERT_STRING_CONTAINS(getIndexIdent(opCtx, catalog.get(), catalogId, "foo"), "a/index/"); ASSERT_TRUE( - catalog->isUserDataIdent(getIndexIdent(opCtx, catalog.get(), catalogId, "foo"))); + DurableCatalog::isUserDataIdent(getIndexIdent(opCtx, catalog.get(), catalogId, "foo"))); uow.commit(); } } -TEST_F(DurableCatalogImplTest, BackupImplemented) { +TEST_F(DurableCatalogTest, BackupImplemented) { KVEngine* engine = helper->getEngine(); ASSERT(engine); @@ -1318,7 +1470,7 @@ TEST_F(DurableCatalogImplTest, BackupImplemented) { } } -DEATH_TEST_REGEX_F(DurableCatalogImplTest, +DEATH_TEST_REGEX_F(DurableCatalogTest, TerminateOnNonNumericIndexVersion, "Fatal assertion.*50942") { KVEngine* engine = helper->getEngine(); @@ -1330,13 +1482,19 @@ DEATH_TEST_REGEX_F(DurableCatalogImplTest, CollectionOptions options; options.uuid = UUID::gen(); + auto mdPtr = std::make_shared(); + mdPtr->nss = nss; + mdPtr->options = options; + std::unique_ptr rs; { auto clientAndCtx = makeClientAndCtx("opCtx"); auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); - ASSERT_OK(engine->createRecordStore(opCtx, NamespaceString("catalog"), "catalog", options)); - rs = engine->getRecordStore(opCtx, NamespaceString("catalog"), "catalog", options); + ASSERT_OK(engine->createRecordStore( + opCtx, NamespaceString::createNamespaceString_forTest("catalog"), "catalog", options)); + rs = engine->getRecordStore( + opCtx, NamespaceString::createNamespaceString_forTest("catalog"), "catalog", options); uow.commit(); } @@ -1346,7 +1504,7 @@ DEATH_TEST_REGEX_F(DurableCatalogImplTest, auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); collection = - std::make_unique(opCtx, nss, RecordId(0), options, std::move(rs)); + std::make_unique(opCtx, nss, RecordId(0), mdPtr, std::move(rs)); uow.commit(); } @@ -1364,22 +1522,27 @@ DEATH_TEST_REGEX_F(DurableCatalogImplTest, } } -TEST_F(DurableCatalogImplTest, EntryIncludesTenantIdInMultitenantEnv) { +TEST_F(DurableCatalogTest, EntryIncludesTenantIdInMultitenantEnv) { gMultitenancySupport = true; KVEngine* engine = helper->getEngine(); // Create a DurableCatalog and RecordStore std::unique_ptr rs; - std::unique_ptr catalog; + std::unique_ptr catalog; { auto clientAndCtx = makeClientAndCtx("opCtx"); auto opCtx = clientAndCtx.opCtx(); WriteUnitOfWork uow(opCtx); - ASSERT_OK(engine->createRecordStore( - opCtx, NamespaceString("catalog"), "catalog", CollectionOptions())); - rs = engine->getRecordStore( - opCtx, NamespaceString("catalog"), "catalog", CollectionOptions()); - catalog = std::make_unique(rs.get(), false, false, nullptr); + ASSERT_OK( + engine->createRecordStore(opCtx, + NamespaceString::createNamespaceString_forTest("catalog"), + "catalog", + CollectionOptions())); + rs = engine->getRecordStore(opCtx, + NamespaceString::createNamespaceString_forTest("catalog"), + "catalog", + CollectionOptions()); + catalog = std::make_unique(rs.get(), false, false, nullptr); uow.commit(); } @@ -1407,7 +1570,7 @@ TEST_F(DurableCatalogImplTest, EntryIncludesTenantIdInMultitenantEnv) { Lock::GlobalLock globalLk(opCtx, MODE_IX); WriteUnitOfWork uow(opCtx); - catalog = std::make_unique(rs.get(), false, false, nullptr); + catalog = std::make_unique(rs.get(), false, false, nullptr); catalog->init(opCtx); uow.commit(); } diff --git a/src/mongo/db/storage/kv/kv_engine_test_harness.h b/src/mongo/db/storage/kv/kv_engine_test_harness.h index 234c1462a6d28..8358f3b710180 100644 --- a/src/mongo/db/storage/kv/kv_engine_test_harness.h +++ b/src/mongo/db/storage/kv/kv_engine_test_harness.h @@ -32,6 +32,7 @@ #include #include +#include "mongo/db/service_context.h" #include "mongo/db/storage/kv/kv_engine.h" namespace mongo { diff --git a/src/mongo/db/storage/kv/kv_engine_timestamps_test.cpp b/src/mongo/db/storage/kv/kv_engine_timestamps_test.cpp index 21614b2446aba..2494587fd070d 100644 --- a/src/mongo/db/storage/kv/kv_engine_timestamps_test.cpp +++ b/src/mongo/db/storage/kv/kv_engine_timestamps_test.cpp @@ -27,22 +27,36 @@ * it in the license file. */ -#include "mongo/db/storage/kv/kv_engine_test_harness.h" - #include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/operation_context_noop.h" -#include "mongo/db/repl/read_concern_level.h" -#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/kv/kv_engine_test_harness.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/snapshot_manager.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/storage/kv/storage_engine_test.cpp b/src/mongo/db/storage/kv/storage_engine_test.cpp index 537e4ef899faf..23736bc8694ff 100644 --- a/src/mongo/db/storage/kv/storage_engine_test.cpp +++ b/src/mongo/db/storage/kv/storage_engine_test.cpp @@ -27,37 +27,72 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include #include - -#include "mongo/base/checked_cast.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/index_names.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/index_builds.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/operation_context_noop.h" -#include "mongo/db/repl/repl_settings.h" -#include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/db/startup_recovery.h" -#include "mongo/db/storage/control/storage_control.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" #include "mongo/db/storage/devnull/devnull_kv_engine.h" +#include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/key_format.h" #include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_impl.h" +#include "mongo/db/storage/storage_engine_init.h" #include "mongo/db/storage/storage_engine_test_fixture.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/storage_repair_observer.h" +#include "mongo/db/storage/temporary_record_store.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/barrier.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/periodic_runner.h" #include "mongo/util/periodic_runner_factory.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault - namespace mongo { namespace { @@ -321,7 +356,7 @@ TEST_F(StorageEngineTest, ReconcileUnfinishedBackgroundSecondaryIndex) { // require it to be rebuilt. ASSERT_EQUALS(1UL, reconcileResult.indexesToRebuild.size()); StorageEngine::IndexIdentifier& toRebuild = reconcileResult.indexesToRebuild[0]; - ASSERT_EQUALS(ns.ns(), toRebuild.nss.ns()); + ASSERT_EQUALS(ns.ns_forTest(), toRebuild.nss.ns_forTest()); ASSERT_EQUALS(indexName, toRebuild.indexName); // There are no two-phase builds to restart or resume. @@ -462,7 +497,7 @@ TEST_F(StorageEngineRepairTest, LoadCatalogRecoversOrphansInCatalog) { // the actual drop in storage engine. { WriteUnitOfWork wuow(opCtx.get()); - ASSERT_OK(removeEntry(opCtx.get(), collNs.ns(), _storageEngine->getCatalog())); + ASSERT_OK(removeEntry(opCtx.get(), collNs.ns_forTest(), _storageEngine->getCatalog())); wuow.commit(); } @@ -496,7 +531,7 @@ TEST_F(StorageEngineTest, LoadCatalogDropsOrphans) { { AutoGetDb db(opCtx.get(), collNs.dbName(), LockMode::MODE_X); WriteUnitOfWork wuow(opCtx.get()); - ASSERT_OK(removeEntry(opCtx.get(), collNs.ns(), _storageEngine->getCatalog())); + ASSERT_OK(removeEntry(opCtx.get(), collNs.ns_forTest(), _storageEngine->getCatalog())); wuow.commit(); } ASSERT(!collectionExists(opCtx.get(), collNs)); @@ -734,5 +769,6 @@ TEST_F(StorageEngineTestNotEphemeral, UseAlternateStorageLocation) { ASSERT_TRUE(collectionExists(opCtx.get(), coll1Ns)); ASSERT_FALSE(collectionExists(opCtx.get(), coll2Ns)); } + } // namespace } // namespace mongo diff --git a/src/mongo/db/storage/multi_bson_stream_cursor.cpp b/src/mongo/db/storage/multi_bson_stream_cursor.cpp index 997c60f1a9b04..eb35ccc022699 100644 --- a/src/mongo/db/storage/multi_bson_stream_cursor.cpp +++ b/src/mongo/db/storage/multi_bson_stream_cursor.cpp @@ -29,8 +29,24 @@ #include "mongo/db/storage/multi_bson_stream_cursor.h" +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/catalog/virtual_collection_options.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" +#include "mongo/platform/compiler.h" namespace mongo { diff --git a/src/mongo/db/storage/multi_bson_stream_cursor.h b/src/mongo/db/storage/multi_bson_stream_cursor.h index d71e724032699..c982642aa9119 100644 --- a/src/mongo/db/storage/multi_bson_stream_cursor.h +++ b/src/mongo/db/storage/multi_bson_stream_cursor.h @@ -29,10 +29,24 @@ #pragma once +#include +#include +#include +#include + +#include +#include +#include +#include +#include + #include "mongo/db/catalog/virtual_collection_options.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/input_stream.h" #include "mongo/db/storage/named_pipe.h" #include "mongo/db/storage/record_store.h" +#include "mongo/util/assert_util.h" namespace mongo { class MultiBsonStreamCursor : public SeekableRecordCursor { diff --git a/src/mongo/db/storage/named_pipe_posix.cpp b/src/mongo/db/storage/named_pipe_posix.cpp index b5dfd5dd01247..3c3940385a7c0 100644 --- a/src/mongo/db/storage/named_pipe_posix.cpp +++ b/src/mongo/db/storage/named_pipe_posix.cpp @@ -29,18 +29,22 @@ #ifndef _WIN32 -#include "mongo/db/storage/named_pipe.h" - #include +#include #include #include +#include // IWYU pragma: keep #include #include -#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/storage/io_error_message.h" +#include "mongo/db/storage/named_pipe.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/stdx/thread.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp index 2388800265dab..69bbf7be8d5fc 100644 --- a/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp +++ b/src/mongo/db/storage/oplog_cap_maintainer_thread.cpp @@ -28,17 +28,29 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include #include "oplog_cap_maintainer_thread.h" #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/record_store.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/decorable.h" #include "mongo/util/exit.h" #include "mongo/util/fail_point.h" #include "mongo/util/time_support.h" @@ -113,6 +125,11 @@ void OplogCapMaintainerThread::run() { LOGV2_DEBUG(5295000, 1, "Oplog cap maintainer thread started", "threadName"_attr = _name); ThreadClient tc(_name, getGlobalServiceContext()); + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + while (!globalInShutdownDeprecated()) { if (MONGO_unlikely(hangOplogCapMaintainerThread.shouldFail())) { LOGV2(5095500, "Hanging the oplog cap maintainer thread due to fail point"); diff --git a/src/mongo/db/storage/oplog_cap_maintainer_thread.h b/src/mongo/db/storage/oplog_cap_maintainer_thread.h index e322a1e6a9e2e..f742f5f41811a 100644 --- a/src/mongo/db/storage/oplog_cap_maintainer_thread.h +++ b/src/mongo/db/storage/oplog_cap_maintainer_thread.h @@ -31,6 +31,7 @@ #include +#include "mongo/db/auth/cluster_auth_mode.h" #include "mongo/db/namespace_string.h" #include "mongo/util/background.h" @@ -62,8 +63,8 @@ class OplogCapMaintainerThread : public BackgroundJob { */ bool _deleteExcessDocuments(); - std::string _name = - std::string("OplogCapMaintainerThread-") + NamespaceString::kRsOplogNamespace.toString(); + std::string _name = std::string("OplogCapMaintainerThread-") + + toStringForLogging(NamespaceString::kRsOplogNamespace); }; } // namespace mongo diff --git a/src/mongo/db/storage/record_id_bm.cpp b/src/mongo/db/storage/record_id_bm.cpp index d1cdd27073cb4..7cfc946e68266 100644 --- a/src/mongo/db/storage/record_id_bm.cpp +++ b/src/mongo/db/storage/record_id_bm.cpp @@ -27,15 +27,18 @@ * it in the license file. */ +#include +#include +#include +#include #include +#include +#include -#include "mongo/platform/basic.h" - +#include "mongo/bson/oid.h" #include "mongo/db/record_id.h" #include "mongo/db/record_id_helpers.h" -#include - namespace mongo { namespace { diff --git a/src/mongo/db/storage/record_store.cpp b/src/mongo/db/storage/record_store.cpp index 1bae257ba7b44..62ab117a78cf7 100644 --- a/src/mongo/db/storage/record_store.cpp +++ b/src/mongo/db/storage/record_store.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/db/concurrency/locker.h" #include "mongo/db/operation_context.h" #include "mongo/db/storage/record_store.h" -#include "mongo/db/storage/storage_options.h" namespace mongo { namespace { @@ -88,6 +92,8 @@ Status RecordStore::rangeTruncate(OperationContext* opCtx, int64_t hintDataSizeDiff, int64_t hintNumRecordsDiff) { validateWriteAllowed(opCtx); + invariant(minRecordId != RecordId() || maxRecordId != RecordId(), + "Ranged truncate must have one bound defined"); invariant(minRecordId <= maxRecordId, "Start position cannot be after end position"); return doRangeTruncate(opCtx, minRecordId, maxRecordId, hintDataSizeDiff, hintNumRecordsDiff); } @@ -122,7 +128,7 @@ Status RecordStore::oplogDiskLocRegister(OperationContext* opCtx, // we never get here while holding an uninterruptible, read-ticketed lock. That would indicate // that we are operating with the wrong global lock semantics, and either hold too weak a lock // (e.g. IS) or that we upgraded in a way we shouldn't (e.g. IS -> IX). - invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->hasReadTicket() || + invariant(!opCtx->lockState()->hasReadTicket() || !opCtx->lockState()->uninterruptibleLocksRequested()); return oplogDiskLocRegisterImpl(opCtx, opTime, orderedCommit); @@ -134,7 +140,7 @@ void RecordStore::waitForAllEarlierOplogWritesToBeVisible(OperationContext* opCt // indicate we are holding a stronger lock than we need to, and that we could actually // contribute to ticket-exhaustion. That could prevent the write we are waiting on from // acquiring the lock it needs to update the oplog visibility. - invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->hasWriteTicket() || + invariant(!opCtx->lockState()->hasWriteTicket() || !opCtx->lockState()->uninterruptibleLocksRequested()); waitForAllEarlierOplogWritesToBeVisibleImpl(opCtx); @@ -151,13 +157,13 @@ uint64_t CappedInsertNotifier::getVersion() const { return _version; } -void CappedInsertNotifier::waitUntil(uint64_t prevVersion, Date_t deadline) const { +void CappedInsertNotifier::waitUntil(OperationContext* opCtx, + uint64_t prevVersion, + Date_t deadline) const { stdx::unique_lock lk(_mutex); - while (!_dead && prevVersion == _version) { - if (stdx::cv_status::timeout == _notifier.wait_until(lk, deadline.toSystemTimePoint())) { - return; - } - } + opCtx->waitForConditionOrInterruptUntil(_notifier, lk, deadline, [this, prevVersion]() { + return _dead || prevVersion != _version; + }); } void CappedInsertNotifier::kill() { diff --git a/src/mongo/db/storage/record_store.h b/src/mongo/db/storage/record_store.h index dd9d76be0deee..f90815d8466eb 100644 --- a/src/mongo/db/storage/record_store.h +++ b/src/mongo/db/storage/record_store.h @@ -29,18 +29,38 @@ #pragma once +#include #include +#include +#include +#include +#include #include +#include #include - +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/mutable/damage_vector.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/namespace_string.h" #include "mongo/db/record_id.h" #include "mongo/db/storage/ident.h" #include "mongo/db/storage/key_format.h" #include "mongo/db/storage/record_data.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -48,7 +68,6 @@ class Collection; class CollectionPtr; class MAdvise; class OperationContext; - class RecordStore; struct ValidateResults; @@ -237,7 +256,7 @@ class CappedInsertNotifier { * * NOTE: Waiting threads can be signaled by calling kill or notify* methods. */ - void waitUntil(uint64_t prevVersion, Date_t deadline) const; + void waitUntil(OperationContext* opCtx, uint64_t prevVersion, Date_t deadline) const; /** * Returns the version for use as an additional wake condition when used above. @@ -320,7 +339,7 @@ class RecordStore { /** * Get the namespace this RecordStore is associated with. */ - virtual std::string ns(OperationContext* opCtx) const = 0; + virtual NamespaceString ns(OperationContext* opCtx) const = 0; /** * The key format for this RecordStore's RecordIds. @@ -410,7 +429,7 @@ class RecordStore { RecordData data; invariant(findRecord(opCtx, loc, &data), str::stream() << "Didn't find RecordId " << loc << " in record store " - << ns(opCtx)); + << ns(opCtx).toStringForErrorMsg()); return data; } diff --git a/src/mongo/db/storage/record_store_test_datafor.cpp b/src/mongo/db/storage/record_store_test_datafor.cpp index 1558f5095752a..b397f860a3b11 100644 --- a/src/mongo/db/storage/record_store_test_datafor.cpp +++ b/src/mongo/db/storage/record_store_test_datafor.cpp @@ -27,35 +27,41 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/record_store_test_harness.h" - +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" -#include "mongo/unittest/unittest.h" - +#include "mongo/db/storage/record_store_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { -using std::string; -using std::stringstream; -using std::unique_ptr; - // Insert a record and verify its contents by calling dataFor() // on the returned RecordId. TEST(RecordStoreTestHarness, DataFor) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } - string data = "record-"; + std::string data = "record-"; RecordId loc; { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -77,6 +83,7 @@ TEST(RecordStoreTestHarness, DataFor) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); { + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); RecordData record = rs->dataFor(opCtx.get(), loc); ASSERT_EQUALS(data.size() + 1, static_cast(record.size())); ASSERT_EQUALS(data, record.data()); @@ -88,7 +95,7 @@ TEST(RecordStoreTestHarness, DataFor) { // on each of the returned RecordIds. TEST(RecordStoreTestHarness, DataForMultiple) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -100,9 +107,9 @@ TEST(RecordStoreTestHarness, DataForMultiple) { for (int i = 0; i < nToInsert; i++) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); { - stringstream ss; + std::stringstream ss; ss << "record----" << i; - string data = ss.str(); + std::string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith res = @@ -121,9 +128,10 @@ TEST(RecordStoreTestHarness, DataForMultiple) { for (int i = 0; i < nToInsert; i++) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); { - stringstream ss; + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); + std::stringstream ss; ss << "record----" << i; - string data = ss.str(); + std::string data = ss.str(); RecordData record = rs->dataFor(opCtx.get(), locs[i]); ASSERT_EQUALS(data.size() + 1, static_cast(record.size())); diff --git a/src/mongo/db/storage/record_store_test_datasize.cpp b/src/mongo/db/storage/record_store_test_datasize.cpp index a635159959bea..4353bb0f432e8 100644 --- a/src/mongo/db/storage/record_store_test_datasize.cpp +++ b/src/mongo/db/storage/record_store_test_datasize.cpp @@ -27,13 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/record_store_test_harness.h" +#include +#include +#include +#include +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/record_store.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/record_store_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/storage/record_store_test_deleterecord.cpp b/src/mongo/db/storage/record_store_test_deleterecord.cpp index b40c215332209..621d4414b3247 100644 --- a/src/mongo/db/storage/record_store_test_deleterecord.cpp +++ b/src/mongo/db/storage/record_store_test_deleterecord.cpp @@ -27,15 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/record_store_test_harness.h" +#include +#include +#include +#include +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/record_id.h" -#include "mongo/db/storage/record_data.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/record_store.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/record_store_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/storage/record_store_test_harness.cpp b/src/mongo/db/storage/record_store_test_harness.cpp index f026681a77d82..6bc3f8bdf4b4f 100644 --- a/src/mongo/db/storage/record_store_test_harness.cpp +++ b/src/mongo/db/storage/record_store_test_harness.cpp @@ -27,15 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/storage/record_store_test_harness.h" - +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/damage_vector.h" +#include "mongo/bson/oid.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" #include "mongo/db/record_id_helpers.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -56,19 +79,16 @@ auto newRecordStoreHarnessHelper(RecordStoreHarnessHelper::Options options) namespace { -using std::string; -using std::unique_ptr; - TEST(RecordStoreTestHarness, Simple1) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } - string s = "eliot was here"; + std::string s = "eliot was here"; RecordId loc1; @@ -83,11 +103,13 @@ TEST(RecordStoreTestHarness, Simple1) { uow.commit(); } + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc1).data()); } { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc1).data()); ASSERT_EQUALS(1, rs->numRecords(opCtx.get())); @@ -118,14 +140,14 @@ TEST(RecordStoreTestHarness, Simple1) { TEST(RecordStoreTestHarness, Delete1) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } - string s = "eliot was here"; + std::string s = "eliot was here"; RecordId loc; { @@ -140,6 +162,7 @@ TEST(RecordStoreTestHarness, Delete1) { uow.commit(); } + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc).data()); } @@ -163,14 +186,14 @@ TEST(RecordStoreTestHarness, Delete1) { TEST(RecordStoreTestHarness, Delete2) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } - string s = "eliot was here"; + std::string s = "eliot was here"; RecordId loc; { @@ -190,6 +213,7 @@ TEST(RecordStoreTestHarness, Delete2) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc).data()); ASSERT_EQUALS(2, rs->numRecords(opCtx.get())); } @@ -206,15 +230,15 @@ TEST(RecordStoreTestHarness, Delete2) { TEST(RecordStoreTestHarness, Update1) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } - string s1 = "eliot was here"; - string s2 = "eliot was here again"; + std::string s1 = "eliot was here"; + std::string s2 = "eliot was here again"; RecordId loc; { @@ -231,6 +255,7 @@ TEST(RecordStoreTestHarness, Update1) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(s1, rs->dataFor(opCtx.get(), loc).data()); } @@ -247,6 +272,7 @@ TEST(RecordStoreTestHarness, Update1) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, rs->numRecords(opCtx.get())); ASSERT_EQUALS(s2, rs->dataFor(opCtx.get(), loc).data()); } @@ -254,13 +280,13 @@ TEST(RecordStoreTestHarness, Update1) { TEST(RecordStoreTestHarness, UpdateInPlace1) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); if (!rs->updateWithDamagesSupported()) return; - string s1 = "aaa111bbb"; - string s2 = "aaa222bbb"; + std::string s1 = "aaa111bbb"; + std::string s2 = "aaa222bbb"; RecordId loc; const RecordData s1Rec(s1.c_str(), s1.size() + 1); @@ -278,6 +304,7 @@ TEST(RecordStoreTestHarness, UpdateInPlace1) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(s1, rs->dataFor(opCtx.get(), loc).data()); } @@ -302,21 +329,21 @@ TEST(RecordStoreTestHarness, UpdateInPlace1) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(s2, rs->dataFor(opCtx.get(), loc).data()); } } - TEST(RecordStoreTestHarness, Truncate1) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } - string s = "eliot was here"; + std::string s = "eliot was here"; RecordId loc; { @@ -334,6 +361,7 @@ TEST(RecordStoreTestHarness, Truncate1) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(s, rs->dataFor(opCtx.get(), loc).data()); } @@ -361,7 +389,7 @@ TEST(RecordStoreTestHarness, Cursor1) { const int N = 10; const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -373,7 +401,7 @@ TEST(RecordStoreTestHarness, Cursor1) { { WriteUnitOfWork uow(opCtx.get()); for (int i = 0; i < N; i++) { - string s = str::stream() << "eliot" << i; + std::string s = str::stream() << "eliot" << i; ASSERT_OK(rs->insertRecord(opCtx.get(), s.c_str(), s.size() + 1, Timestamp()) .getStatus()); } @@ -391,7 +419,7 @@ TEST(RecordStoreTestHarness, Cursor1) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getCursor(opCtx.get()); while (auto record = cursor->next()) { - string s = str::stream() << "eliot" << x++; + std::string s = str::stream() << "eliot" << x++; ASSERT_EQUALS(s, record->data.data()); } ASSERT_EQUALS(N, x); @@ -403,7 +431,7 @@ TEST(RecordStoreTestHarness, Cursor1) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); auto cursor = rs->getCursor(opCtx.get(), false); while (auto record = cursor->next()) { - string s = str::stream() << "eliot" << --x; + std::string s = str::stream() << "eliot" << --x; ASSERT_EQUALS(s, record->data.data()); } ASSERT_EQUALS(0, x); @@ -420,6 +448,7 @@ TEST(RecordStoreTestHarness, ClusteredRecordStore) { invariant(rs->keyFormat() == KeyFormat::String); auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); const int numRecords = 100; std::vector records; @@ -537,6 +566,7 @@ TEST(RecordStoreTestHarness, ClusteredRecordStoreSeekNear) { invariant(rs->keyFormat() == KeyFormat::String); auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); const int numRecords = 100; std::vector records; diff --git a/src/mongo/db/storage/record_store_test_harness.h b/src/mongo/db/storage/record_store_test_harness.h index 863e292d3c8bd..65dd27f82585a 100644 --- a/src/mongo/db/storage/record_store_test_harness.h +++ b/src/mongo/db/storage/record_store_test_harness.h @@ -30,19 +30,22 @@ #pragma once #include +#include #include +#include +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection_options.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/db/client.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/key_format.h" #include "mongo/db/storage/kv/kv_engine.h" #include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/test_harness_helper.h" namespace mongo { -class RecoveryUnit; - class RecordStoreHarnessHelper : public HarnessHelper { public: enum class Options { Standalone, ReplicationEnabled }; diff --git a/src/mongo/db/storage/record_store_test_insertrecord.cpp b/src/mongo/db/storage/record_store_test_insertrecord.cpp index acc6f018f81cb..d7d51f4b12107 100644 --- a/src/mongo/db/storage/record_store_test_insertrecord.cpp +++ b/src/mongo/db/storage/record_store_test_insertrecord.cpp @@ -27,13 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/record_id.h" -#include "mongo/db/storage/record_data.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/record_store.h" #include "mongo/db/storage/record_store_test_harness.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/storage/record_store_test_oplog.cpp b/src/mongo/db/storage/record_store_test_oplog.cpp index 270d50473045d..0203d3f7726d5 100644 --- a/src/mongo/db/storage/record_store_test_oplog.cpp +++ b/src/mongo/db/storage/record_store_test_oplog.cpp @@ -27,9 +27,34 @@ * it in the license file. */ -#include "mongo/db/catalog/capped_collection_maintenance.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/record_id_helpers.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/record_store_test_harness.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -72,7 +97,7 @@ TEST(RecordStoreTestHarness, SeekNearOplog) { { WriteUnitOfWork wuow(opCtx.get()); BSONObj obj = BSON("not_ts" << Timestamp(2, 1)); - ASSERT_EQ(rs->insertRecord(opCtx.get(), obj.objdata(), obj.objsize(), Timestamp(2, 1)) + ASSERT_EQ(rs->insertRecord(opCtx.get(), obj.objdata(), obj.objsize(), Timestamp()) .getStatus(), ErrorCodes::BadValue); } @@ -99,6 +124,7 @@ TEST(RecordStoreTestHarness, SeekNearOplog) { // Forward cursor seeks { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); WriteUnitOfWork wuow(opCtx.get()); auto cur = rs->getCursor(opCtx.get()); auto rec = cur->seekNear(RecordId(0, 1)); @@ -108,6 +134,7 @@ TEST(RecordStoreTestHarness, SeekNearOplog) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); WriteUnitOfWork wuow(opCtx.get()); auto cur = rs->getCursor(opCtx.get()); auto rec = cur->seekNear(RecordId(2, 1)); @@ -117,6 +144,7 @@ TEST(RecordStoreTestHarness, SeekNearOplog) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); WriteUnitOfWork wuow(opCtx.get()); auto cur = rs->getCursor(opCtx.get()); auto rec = cur->seekNear(RecordId(2, 2)); @@ -126,6 +154,7 @@ TEST(RecordStoreTestHarness, SeekNearOplog) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); WriteUnitOfWork wuow(opCtx.get()); auto cur = rs->getCursor(opCtx.get()); auto rec = cur->seekNear(RecordId(2, 3)); @@ -136,6 +165,7 @@ TEST(RecordStoreTestHarness, SeekNearOplog) { // Reverse cursor seeks { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); WriteUnitOfWork wuow(opCtx.get()); auto cur = rs->getCursor(opCtx.get(), false /* forward */); auto rec = cur->seekNear(RecordId(0, 1)); @@ -145,6 +175,7 @@ TEST(RecordStoreTestHarness, SeekNearOplog) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); WriteUnitOfWork wuow(opCtx.get()); auto cur = rs->getCursor(opCtx.get(), false /* forward */); auto rec = cur->seekNear(RecordId(2, 1)); @@ -154,6 +185,7 @@ TEST(RecordStoreTestHarness, SeekNearOplog) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); WriteUnitOfWork wuow(opCtx.get()); auto cur = rs->getCursor(opCtx.get(), false /* forward */); auto rec = cur->seekNear(RecordId(2, 2)); @@ -163,6 +195,7 @@ TEST(RecordStoreTestHarness, SeekNearOplog) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); WriteUnitOfWork wuow(opCtx.get()); auto cur = rs->getCursor(opCtx.get(), false /* forward */); auto rec = cur->seekNear(RecordId(2, 3)); @@ -180,6 +213,7 @@ TEST(RecordStoreTestHarness, SeekNearOplog) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); auto cur = rs->getCursor(opCtx.get()); auto rec = cur->seekNear(RecordId(2, 3)); ASSERT(rec); @@ -196,6 +230,7 @@ TEST(RecordStoreTestHarness, SeekNearOplog) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); auto cur = rs->getCursor(opCtx.get()); auto rec = cur->seekNear(RecordId(2, 3)); ASSERT(rec); @@ -212,6 +247,7 @@ TEST(RecordStoreTestHarness, SeekNearOplog) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); auto cur = rs->getCursor(opCtx.get()); auto rec = cur->seekNear(RecordId(2, 3)); ASSERT(rec); @@ -227,6 +263,7 @@ TEST(RecordStoreTestHarness, SeekNearOplog) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); auto cur = rs->getCursor(opCtx.get()); auto rec = cur->seekNear(RecordId(2, 3)); ASSERT_FALSE(rec); @@ -261,6 +298,7 @@ TEST(RecordStoreTestHarness, SeekNearOnNonOplog) { std::unique_ptr rs(harnessHelper->newRecordStore("local.NOT_oplog.foo")); ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); BSONObj obj = BSON("ts" << Timestamp(2, -1)); { @@ -335,6 +373,7 @@ TEST(RecordStoreTestHarness, OplogOrder) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); auto cursor = rs->getCursor(opCtx.get()); auto record = cursor->seekNear(RecordId(id1.getLong() + 1)); ASSERT(record); @@ -346,7 +385,6 @@ TEST(RecordStoreTestHarness, OplogOrder) { // now we insert 2 docs, but commit the 2nd one first. // we make sure we can't find the 2nd until the first is committed. ServiceContext::UniqueOperationContext earlyReader(harnessHelper->newOperationContext()); - auto earlyCursor = rs->getCursor(earlyReader.get()); ASSERT_EQ(earlyCursor->seekExact(id1)->id, id1); earlyCursor->save(); @@ -386,6 +424,7 @@ TEST(RecordStoreTestHarness, OplogOrder) { { auto client2 = harnessHelper->serviceContext()->makeClient("c2"); auto opCtx = harnessHelper->newOperationContext(client2.get()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); auto cursor = rs->getCursor(opCtx.get()); auto record = cursor->seekNear(id2); ASSERT(record) << stringifyForDebug(opCtx.get(), record, cursor.get()); @@ -397,6 +436,7 @@ TEST(RecordStoreTestHarness, OplogOrder) { { auto client2 = harnessHelper->serviceContext()->makeClient("c2"); auto opCtx = harnessHelper->newOperationContext(client2.get()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); auto cursor = rs->getCursor(opCtx.get()); auto record = cursor->seekNear(id3); ASSERT(record) << stringifyForDebug(opCtx.get(), record, cursor.get()); @@ -479,6 +519,7 @@ TEST(RecordStoreTestHarness, OplogOrder) { { auto client2 = harnessHelper->serviceContext()->makeClient("c2"); auto opCtx = harnessHelper->newOperationContext(client2.get()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); auto cursor = rs->getCursor(opCtx.get()); auto record = cursor->seekNear(id2); ASSERT(record); @@ -490,6 +531,7 @@ TEST(RecordStoreTestHarness, OplogOrder) { { auto client2 = harnessHelper->serviceContext()->makeClient("c2"); auto opCtx = harnessHelper->newOperationContext(client2.get()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); auto cursor = rs->getCursor(opCtx.get()); auto record = cursor->seekNear(id3); ASSERT(record); @@ -558,6 +600,7 @@ TEST(RecordStoreTestHarness, OplogVisibilityStandalone) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); auto cursor = rs->getCursor(opCtx.get()); auto record = cursor->seekNear(RecordId(id1.getLong() + 1)); ASSERT(record); diff --git a/src/mongo/db/storage/record_store_test_randomiter.cpp b/src/mongo/db/storage/record_store_test_randomiter.cpp index cd340496423fc..09914d69bcfd6 100644 --- a/src/mongo/db/storage/record_store_test_randomiter.cpp +++ b/src/mongo/db/storage/record_store_test_randomiter.cpp @@ -27,13 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/record_id.h" -#include "mongo/db/storage/record_data.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/record_store.h" #include "mongo/db/storage/record_store_test_harness.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/storage/record_store_test_recorditer.cpp b/src/mongo/db/storage/record_store_test_recorditer.cpp index c9710ee2222e6..e6eaf0eae1654 100644 --- a/src/mongo/db/storage/record_store_test_recorditer.cpp +++ b/src/mongo/db/storage/record_store_test_recorditer.cpp @@ -27,31 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/record_store_test_harness.h" - -#include - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/record_store_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { -using std::string; -using std::stringstream; -using std::unique_ptr; - // Insert multiple records and iterate through them in the forward direction. // When curr() or getNext() is called on an iterator positioned at EOF, // the iterator returns RecordId() and stays at EOF. TEST(RecordStoreTestHarness, IterateOverMultipleRecords) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -64,9 +71,9 @@ TEST(RecordStoreTestHarness, IterateOverMultipleRecords) { for (int i = 0; i < nToInsert; i++) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); { - stringstream ss; + std::stringstream ss; ss << "record " << i; - string data = ss.str(); + std::string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith res = @@ -101,7 +108,7 @@ TEST(RecordStoreTestHarness, IterateOverMultipleRecords) { // the iterator returns RecordId() and stays at EOF. TEST(RecordStoreTestHarness, IterateOverMultipleRecordsReversed) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -114,9 +121,9 @@ TEST(RecordStoreTestHarness, IterateOverMultipleRecordsReversed) { for (int i = 0; i < nToInsert; i++) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); { - stringstream ss; + std::stringstream ss; ss << "record " << i; - string data = ss.str(); + std::string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith res = @@ -151,7 +158,7 @@ TEST(RecordStoreTestHarness, IterateOverMultipleRecordsReversed) { // starting at an interior position. TEST(RecordStoreTestHarness, IterateStartFromMiddle) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -164,9 +171,9 @@ TEST(RecordStoreTestHarness, IterateStartFromMiddle) { for (int i = 0; i < nToInsert; i++) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); { - stringstream ss; + std::stringstream ss; ss << "record " << i; - string data = ss.str(); + std::string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith res = @@ -202,7 +209,7 @@ TEST(RecordStoreTestHarness, IterateStartFromMiddle) { // starting at an interior position. TEST(RecordStoreTestHarness, IterateStartFromMiddleReversed) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -215,9 +222,9 @@ TEST(RecordStoreTestHarness, IterateStartFromMiddleReversed) { for (int i = 0; i < nToInsert; i++) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); { - stringstream ss; + std::stringstream ss; ss << "record " << i; - string data = ss.str(); + std::string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith res = @@ -254,7 +261,7 @@ TEST(RecordStoreTestHarness, IterateStartFromMiddleReversed) { // that the iterator remains EOF. TEST(RecordStoreTestHarness, RecordIteratorEOF) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -269,7 +276,7 @@ TEST(RecordStoreTestHarness, RecordIteratorEOF) { { StringBuilder sb; sb << "record " << i; - string data = sb.str(); + std::string data = sb.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith res = @@ -306,7 +313,7 @@ TEST(RecordStoreTestHarness, RecordIteratorEOF) { StringBuilder sb; sb << "record " << nToInsert + 1; - string data = sb.str(); + std::string data = sb.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith res = @@ -325,7 +332,7 @@ TEST(RecordStoreTestHarness, RecordIteratorEOF) { // Test calling save and restore after each call to next TEST(RecordStoreTestHarness, RecordIteratorSaveRestore) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -340,7 +347,7 @@ TEST(RecordStoreTestHarness, RecordIteratorSaveRestore) { { StringBuilder sb; sb << "record " << i; - string data = sb.str(); + std::string data = sb.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith res = @@ -387,7 +394,7 @@ TEST(RecordStoreTestHarness, RecordIteratorSaveRestore) { // that next() returns the second record. TEST(RecordStoreTestHarness, SeekAfterEofAndContinue) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -397,7 +404,7 @@ TEST(RecordStoreTestHarness, SeekAfterEofAndContinue) { for (int i = 0; i < nToInsert; i++) { StringBuilder sb; sb << "record " << i; - string data = sb.str(); + std::string data = sb.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith res = @@ -443,6 +450,7 @@ TEST(RecordStoreTestHarness, SeekExactForMissingRecordReturnsNone) { const auto harnessHelper{newRecordStoreHarnessHelper()}; auto recordStore = harnessHelper->newRecordStore(); ServiceContext::UniqueOperationContext opCtx{harnessHelper->newOperationContext()}; + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); // Insert three records and remember their record ids. const int nToInsert = 3; @@ -450,7 +458,7 @@ TEST(RecordStoreTestHarness, SeekExactForMissingRecordReturnsNone) { for (int i = 0; i < nToInsert; ++i) { StringBuilder sb; sb << "record " << i; - string data = sb.str(); + std::string data = sb.str(); WriteUnitOfWork uow{opCtx.get()}; auto res = diff --git a/src/mongo/db/storage/record_store_test_recordstore.cpp b/src/mongo/db/storage/record_store_test_recordstore.cpp index 090a9788d9b7d..09d7322dc4cac 100644 --- a/src/mongo/db/storage/record_store_test_recordstore.cpp +++ b/src/mongo/db/storage/record_store_test_recordstore.cpp @@ -27,13 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/record_store_test_harness.h" - +#include +#include +#include "mongo/base/string_data.h" #include "mongo/db/storage/record_store.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/record_store_test_harness.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/storage/record_store_test_storagesize.cpp b/src/mongo/db/storage/record_store_test_storagesize.cpp index 4dfb3ff911783..b983a0c196130 100644 --- a/src/mongo/db/storage/record_store_test_storagesize.cpp +++ b/src/mongo/db/storage/record_store_test_storagesize.cpp @@ -27,25 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/record_store_test_harness.h" - +#include +#include +#include +#include +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/record_store.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/record_store_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { -using std::string; -using std::stringstream; -using std::unique_ptr; - // Verify that a nonempty collection maybe takes up some space on disk. TEST(RecordStoreTestHarness, StorageSizeNonEmpty) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -56,9 +62,9 @@ TEST(RecordStoreTestHarness, StorageSizeNonEmpty) { for (int i = 0; i < nToInsert; i++) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); { - stringstream ss; + std::stringstream ss; ss << "record " << i; - string data = ss.str(); + std::string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith res = @@ -75,6 +81,7 @@ TEST(RecordStoreTestHarness, StorageSizeNonEmpty) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT(rs->storageSize(opCtx.get(), nullptr) >= 0); } } diff --git a/src/mongo/db/storage/record_store_test_truncate.cpp b/src/mongo/db/storage/record_store_test_truncate.cpp index a26f2a5155ed2..3391a628a9e64 100644 --- a/src/mongo/db/storage/record_store_test_truncate.cpp +++ b/src/mongo/db/storage/record_store_test_truncate.cpp @@ -27,13 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/record_store_test_harness.h" +#include +#include +#include +#include +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/record_store.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/record_store_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -114,5 +124,15 @@ TEST(RecordStoreTestHarness, TruncateNonEmpty) { } } +DEATH_TEST(RecordStoreTestHarness, + RangeTruncateMustHaveBoundsTest, + "Ranged truncate must have one bound defined") { + const auto harnessHelper(newRecordStoreHarnessHelper()); + unique_ptr rs(harnessHelper->newRecordStore()); + + auto opCtx = harnessHelper->newOperationContext(); + + auto result = rs->rangeTruncate(opCtx.get(), RecordId(), RecordId(), 0, 0); +} } // namespace } // namespace mongo diff --git a/src/mongo/db/storage/record_store_test_updaterecord.cpp b/src/mongo/db/storage/record_store_test_updaterecord.cpp index 1f777afa91617..21569970657d1 100644 --- a/src/mongo/db/storage/record_store_test_updaterecord.cpp +++ b/src/mongo/db/storage/record_store_test_updaterecord.cpp @@ -27,32 +27,41 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" #include "mongo/db/storage/record_store_test_harness.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { -using std::string; -using std::stringstream; -using std::unique_ptr; - // Insert a record and try to update it. TEST(RecordStoreTestHarness, UpdateRecord) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } - string data = "my record"; + std::string data = "my record"; RecordId loc; { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -85,6 +94,7 @@ TEST(RecordStoreTestHarness, UpdateRecord) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); { RecordData record = rs->dataFor(opCtx.get(), loc); ASSERT_EQUALS(data.size() + 1, static_cast(record.size())); @@ -96,7 +106,7 @@ TEST(RecordStoreTestHarness, UpdateRecord) { // Insert multiple records and try to update them. TEST(RecordStoreTestHarness, UpdateMultipleRecords) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -108,9 +118,9 @@ TEST(RecordStoreTestHarness, UpdateMultipleRecords) { for (int i = 0; i < nToInsert; i++) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); { - stringstream ss; + std::stringstream ss; ss << "record " << i; - string data = ss.str(); + std::string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); StatusWith res = @@ -129,9 +139,9 @@ TEST(RecordStoreTestHarness, UpdateMultipleRecords) { for (int i = 0; i < nToInsert; i++) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); { - stringstream ss; + std::stringstream ss; ss << "update record-" << i; - string data = ss.str(); + std::string data = ss.str(); WriteUnitOfWork uow(opCtx.get()); Status res = rs->updateRecord(opCtx.get(), locs[i], data.c_str(), data.size() + 1); @@ -143,10 +153,11 @@ TEST(RecordStoreTestHarness, UpdateMultipleRecords) { for (int i = 0; i < nToInsert; i++) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); { - stringstream ss; + std::stringstream ss; ss << "update record-" << i; - string data = ss.str(); + std::string data = ss.str(); RecordData record = rs->dataFor(opCtx.get(), locs[i]); ASSERT_EQUALS(data.size() + 1, static_cast(record.size())); diff --git a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp index 36bf06c2b78f4..f780c858e8876 100644 --- a/src/mongo/db/storage/record_store_test_updatewithdamages.cpp +++ b/src/mongo/db/storage/record_store_test_updatewithdamages.cpp @@ -27,29 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/record_store_test_harness.h" - - +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" +#include "mongo/bson/mutable/damage_vector.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/record_store_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/update/document_diff_applier.h" #include "mongo/db/update/document_diff_calculator.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/shared_buffer.h" namespace mongo { namespace { -using std::string; -using std::unique_ptr; - // Insert a record and try to perform an in-place update on it. TEST(RecordStoreTestHarness, UpdateWithDamages) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); if (!rs->updateWithDamagesSupported()) return; @@ -59,7 +68,7 @@ TEST(RecordStoreTestHarness, UpdateWithDamages) { ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } - string data = "00010111"; + std::string data = "00010111"; RecordId loc; const RecordData rec(data.c_str(), data.size() + 1); { @@ -79,7 +88,7 @@ TEST(RecordStoreTestHarness, UpdateWithDamages) { ASSERT_EQUALS(1, rs->numRecords(opCtx.get())); } - string modifiedData = "11101000"; + std::string modifiedData = "11101000"; { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); { @@ -107,6 +116,7 @@ TEST(RecordStoreTestHarness, UpdateWithDamages) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); { RecordData record = rs->dataFor(opCtx.get(), loc); ASSERT_EQUALS(modifiedData, record.data()); @@ -118,7 +128,7 @@ TEST(RecordStoreTestHarness, UpdateWithDamages) { // containing overlapping DamageEvents. TEST(RecordStoreTestHarness, UpdateWithOverlappingDamageEvents) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); if (!rs->updateWithDamagesSupported()) return; @@ -128,7 +138,7 @@ TEST(RecordStoreTestHarness, UpdateWithOverlappingDamageEvents) { ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } - string data = "00010111"; + std::string data = "00010111"; RecordId loc; const RecordData rec(data.c_str(), data.size() + 1); { @@ -148,7 +158,7 @@ TEST(RecordStoreTestHarness, UpdateWithOverlappingDamageEvents) { ASSERT_EQUALS(1, rs->numRecords(opCtx.get())); } - string modifiedData = "10100010"; + std::string modifiedData = "10100010"; { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); { @@ -172,6 +182,7 @@ TEST(RecordStoreTestHarness, UpdateWithOverlappingDamageEvents) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); { RecordData record = rs->dataFor(opCtx.get(), loc); ASSERT_EQUALS(modifiedData, record.data()); @@ -184,7 +195,7 @@ TEST(RecordStoreTestHarness, UpdateWithOverlappingDamageEvents) { // specified by the DamageVector, and not -- for instance -- by the targetOffset. TEST(RecordStoreTestHarness, UpdateWithOverlappingDamageEventsReversed) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); if (!rs->updateWithDamagesSupported()) return; @@ -194,7 +205,7 @@ TEST(RecordStoreTestHarness, UpdateWithOverlappingDamageEventsReversed) { ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } - string data = "00010111"; + std::string data = "00010111"; RecordId loc; const RecordData rec(data.c_str(), data.size() + 1); { @@ -214,7 +225,7 @@ TEST(RecordStoreTestHarness, UpdateWithOverlappingDamageEventsReversed) { ASSERT_EQUALS(1, rs->numRecords(opCtx.get())); } - string modifiedData = "10111010"; + std::string modifiedData = "10111010"; { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); { @@ -238,6 +249,7 @@ TEST(RecordStoreTestHarness, UpdateWithOverlappingDamageEventsReversed) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); { RecordData record = rs->dataFor(opCtx.get(), loc); ASSERT_EQUALS(modifiedData, record.data()); @@ -248,7 +260,7 @@ TEST(RecordStoreTestHarness, UpdateWithOverlappingDamageEventsReversed) { // Insert a record and try to call updateWithDamages() with an empty DamageVector. TEST(RecordStoreTestHarness, UpdateWithNoDamages) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); if (!rs->updateWithDamagesSupported()) return; @@ -258,7 +270,7 @@ TEST(RecordStoreTestHarness, UpdateWithNoDamages) { ASSERT_EQUALS(0, rs->numRecords(opCtx.get())); } - string data = "my record"; + std::string data = "my record"; RecordId loc; const RecordData rec(data.c_str(), data.size() + 1); { @@ -293,6 +305,7 @@ TEST(RecordStoreTestHarness, UpdateWithNoDamages) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); { RecordData record = rs->dataFor(opCtx.get(), loc); ASSERT_EQUALS(data, record.data()); @@ -303,7 +316,7 @@ TEST(RecordStoreTestHarness, UpdateWithNoDamages) { // Insert a record and try to perform inserts and updates on it. TEST(RecordStoreTestHarness, UpdateWithDamagesScalar) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); if (!rs->updateWithDamagesSupported()) return; @@ -328,6 +341,7 @@ TEST(RecordStoreTestHarness, UpdateWithDamagesScalar) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT(obj0.binaryEqual(rs->dataFor(opCtx.get(), loc).toBson())); } @@ -336,10 +350,9 @@ TEST(RecordStoreTestHarness, UpdateWithDamagesScalar) { { WriteUnitOfWork uow(opCtx.get()); // {i: {c: "12", d: 2}} - auto diffOutput = doc_diff::computeOplogDiff(obj0, obj1, 0, nullptr); + auto diffOutput = doc_diff::computeOplogDiff(obj0, obj1, 0); ASSERT(diffOutput); - auto [_, damageSource, damages] = - doc_diff::computeDamages(obj0, diffOutput->diff, false); + auto [_, damageSource, damages] = doc_diff::computeDamages(obj0, *diffOutput, false); auto newRecStatus1 = rs->updateWithDamages(opCtx.get(), loc, obj0Rec, damageSource.get(), damages); ASSERT_OK(newRecStatus1.getStatus()); @@ -350,18 +363,19 @@ TEST(RecordStoreTestHarness, UpdateWithDamagesScalar) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT(obj1.binaryEqual(rs->dataFor(opCtx.get(), loc).toBson())); } { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); // {u: {c: "123", d: 3}, i: {a: 1, e: 1}} - auto diffOutput = doc_diff::computeOplogDiff(obj1, obj2, 0, nullptr); + auto diffOutput = doc_diff::computeOplogDiff(obj1, obj2, 0); ASSERT(diffOutput); - auto [_, damageSource, damages] = - doc_diff::computeDamages(obj1, diffOutput->diff, false); + auto [_, damageSource, damages] = doc_diff::computeDamages(obj1, *diffOutput, false); auto newRecStatus2 = rs->updateWithDamages( opCtx.get(), loc, rs->dataFor(opCtx.get(), loc), damageSource.get(), damages); ASSERT_OK(newRecStatus2.getStatus()); @@ -372,6 +386,7 @@ TEST(RecordStoreTestHarness, UpdateWithDamagesScalar) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT(obj2.binaryEqual(rs->dataFor(opCtx.get(), loc).toBson())); } } @@ -379,7 +394,7 @@ TEST(RecordStoreTestHarness, UpdateWithDamagesScalar) { // Insert a record with nested documents and try to perform updates on it. TEST(RecordStoreTestHarness, UpdateWithDamagesNested) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); if (!rs->updateWithDamagesSupported()) return; @@ -411,6 +426,7 @@ TEST(RecordStoreTestHarness, UpdateWithDamagesNested) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT(obj0.binaryEqual(rs->dataFor(opCtx.get(), loc).toBson())); } @@ -419,10 +435,9 @@ TEST(RecordStoreTestHarness, UpdateWithDamagesNested) { { WriteUnitOfWork uow(opCtx.get()); // {u: {c: "3"}, sb: {i: {q: 1}}, sd: {sp: {u: {x: {j: "1"}}}}} - auto diffOutput = doc_diff::computeOplogDiff(obj0, obj1, 0, nullptr); + auto diffOutput = doc_diff::computeOplogDiff(obj0, obj1, 0); ASSERT(diffOutput); - auto [_, damageSource, damages] = - doc_diff::computeDamages(obj0, diffOutput->diff, true); + auto [_, damageSource, damages] = doc_diff::computeDamages(obj0, *diffOutput, true); auto newRecStatus1 = rs->updateWithDamages(opCtx.get(), loc, obj0Rec, damageSource.get(), damages); ASSERT_OK(newRecStatus1.getStatus()); @@ -433,6 +448,7 @@ TEST(RecordStoreTestHarness, UpdateWithDamagesNested) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT(obj1.binaryEqual(rs->dataFor(opCtx.get(), loc).toBson())); } } @@ -440,7 +456,7 @@ TEST(RecordStoreTestHarness, UpdateWithDamagesNested) { // Insert a record with nested arrays and try to perform updates on it. TEST(RecordStoreTestHarness, UpdateWithDamagesArray) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); if (!rs->updateWithDamagesSupported()) return; @@ -465,6 +481,7 @@ TEST(RecordStoreTestHarness, UpdateWithDamagesArray) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT(obj0.binaryEqual(rs->dataFor(opCtx.get(), loc).toBson())); } @@ -473,10 +490,9 @@ TEST(RecordStoreTestHarness, UpdateWithDamagesArray) { { WriteUnitOfWork uow(opCtx.get()); // {sfield2: {a: true, l: 6, 's3': {a: true, l: 4, 'u2': [4]}, 'u5': 6}} - auto diffOutput = doc_diff::computeOplogDiff(obj0, obj1, 0, nullptr); + auto diffOutput = doc_diff::computeOplogDiff(obj0, obj1, 0); ASSERT(diffOutput); - auto [_, damageSource, damages] = - doc_diff::computeDamages(obj0, diffOutput->diff, true); + auto [_, damageSource, damages] = doc_diff::computeDamages(obj0, *diffOutput, true); auto newRecStatus1 = rs->updateWithDamages(opCtx.get(), loc, obj0Rec, damageSource.get(), damages); ASSERT_OK(newRecStatus1.getStatus()); @@ -487,6 +503,7 @@ TEST(RecordStoreTestHarness, UpdateWithDamagesArray) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT(obj1.binaryEqual(rs->dataFor(opCtx.get(), loc).toBson())); } } diff --git a/src/mongo/db/storage/recovery_unit.cpp b/src/mongo/db/storage/recovery_unit.cpp index eecd83b0837e2..40c15d67441b3 100644 --- a/src/mongo/db/storage/recovery_unit.cpp +++ b/src/mongo/db/storage/recovery_unit.cpp @@ -28,11 +28,22 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/storage/recovery_unit.h" -#include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/util/fail_point.h" #include "mongo/util/scopeguard.h" #include "mongo/util/time_support.h" @@ -81,15 +92,14 @@ void RecoveryUnit::registerChange(std::unique_ptr change) { _changes.push_back(std::move(change)); } -void RecoveryUnit::registerChangeForCatalogVisibility(std::unique_ptr change) { +void RecoveryUnit::registerChangeForTwoPhaseDrop(std::unique_ptr change) { validateInUnitOfWork(); - invariant(!_changeForCatalogVisibility); - _changeForCatalogVisibility = std::move(change); + _changesForTwoPhaseDrop.push_back(std::move(change)); } -bool RecoveryUnit::hasRegisteredChangeForCatalogVisibility() { +void RecoveryUnit::registerChangeForCatalogVisibility(std::unique_ptr change) { validateInUnitOfWork(); - return _changeForCatalogVisibility.get(); + _changesForCatalogVisibility.push_back(std::move(change)); } void RecoveryUnit::commitRegisteredChanges(boost::optional commitTimestamp) { @@ -136,10 +146,10 @@ void RecoveryUnit::setOperationContext(OperationContext* opCtx) { void RecoveryUnit::_executeCommitHandlers(boost::optional commitTimestamp) { invariant(_opCtx); - for (auto& change : _changes) { + for (auto& change : _changesForTwoPhaseDrop) { try { // Log at higher level because commits occur far more frequently than rollbacks. - LOGV2_DEBUG(22244, + LOGV2_DEBUG(7789501, 3, "Custom commit", "changeName"_attr = redact(demangleName(typeid(*change)))); @@ -148,21 +158,33 @@ void RecoveryUnit::_executeCommitHandlers(boost::optional commitTimes std::terminate(); } } - try { - if (_changeForCatalogVisibility) { + for (auto& change : _changesForCatalogVisibility) { + try { // Log at higher level because commits occur far more frequently than rollbacks. LOGV2_DEBUG(5255701, - 2, + 3, "Custom commit", - "changeName"_attr = - redact(demangleName(typeid(*_changeForCatalogVisibility)))); - _changeForCatalogVisibility->commit(_opCtx, commitTimestamp); + "changeName"_attr = redact(demangleName(typeid(*change)))); + change->commit(_opCtx, commitTimestamp); + } catch (...) { + std::terminate(); + } + } + for (auto& change : _changes) { + try { + // Log at higher level because commits occur far more frequently than rollbacks. + LOGV2_DEBUG(22244, + 3, + "Custom commit", + "changeName"_attr = redact(demangleName(typeid(*change)))); + change->commit(_opCtx, commitTimestamp); + } catch (...) { + std::terminate(); } - } catch (...) { - std::terminate(); } _changes.clear(); - _changeForCatalogVisibility.reset(); + _changesForCatalogVisibility.clear(); + _changesForTwoPhaseDrop.clear(); } void RecoveryUnit::abortRegisteredChanges() { @@ -175,27 +197,47 @@ void RecoveryUnit::abortRegisteredChanges() { void RecoveryUnit::_executeRollbackHandlers() { // Make sure we have an OperationContext when executing rollback handlers. Unless we have no // handlers to run, which might be the case in unit tests. - invariant(_opCtx || (_changes.empty() && !_changeForCatalogVisibility)); + invariant(_opCtx || + (_changes.empty() && _changesForCatalogVisibility.empty() && + _changesForTwoPhaseDrop.empty())); try { - if (_changeForCatalogVisibility) { - LOGV2_DEBUG(5255702, + for (Changes::const_reverse_iterator it = _changes.rbegin(), end = _changes.rend(); + it != end; + ++it) { + Change* change = it->get(); + LOGV2_DEBUG(22245, 2, "Custom rollback", - "changeName"_attr = - redact(demangleName(typeid(*_changeForCatalogVisibility)))); - _changeForCatalogVisibility->rollback(_opCtx); + "changeName"_attr = redact(demangleName(typeid(*change)))); + change->rollback(_opCtx); } - for (Changes::const_reverse_iterator it = _changes.rbegin(), end = _changes.rend(); + + for (Changes::const_reverse_iterator it = _changesForTwoPhaseDrop.rbegin(), + end = _changesForTwoPhaseDrop.rend(); it != end; ++it) { Change* change = it->get(); - LOGV2_DEBUG(22245, + LOGV2_DEBUG(7789502, + 2, + "Custom rollback", + "changeName"_attr = redact(demangleName(typeid(*change)))); + change->rollback(_opCtx); + } + + for (Changes::const_reverse_iterator it = _changesForCatalogVisibility.rbegin(), + end = _changesForCatalogVisibility.rend(); + it != end; + ++it) { + Change* change = it->get(); + LOGV2_DEBUG(5255702, 2, "Custom rollback", "changeName"_attr = redact(demangleName(typeid(*change)))); change->rollback(_opCtx); } - _changeForCatalogVisibility.reset(); + + _changesForTwoPhaseDrop.clear(); + _changesForCatalogVisibility.clear(); _changes.clear(); } catch (...) { std::terminate(); diff --git a/src/mongo/db/storage/recovery_unit.h b/src/mongo/db/storage/recovery_unit.h index c22a18b5638f7..8011cb04830aa 100644 --- a/src/mongo/db/storage/recovery_unit.h +++ b/src/mongo/db/storage/recovery_unit.h @@ -29,20 +29,28 @@ #pragma once +#include +#include #include #include +#include +#include #include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/bson/timestamp.h" #include "mongo/db/repl/read_concern_level.h" #include "mongo/db/storage/snapshot.h" #include "mongo/db/storage/storage_stats.h" +#include "mongo/util/assert_util.h" #include "mongo/util/decorable.h" namespace mongo { class BSONObjBuilder; + class OperationContext; /** @@ -76,6 +84,22 @@ enum class PrepareConflictBehavior { kIgnoreConflictsAllowWrites }; +/** + * DataCorruptionDetectionMode determines how we handle the discovery of evidence of data + * corruption. + */ +enum class DataCorruptionDetectionMode { + /** + * Always throw a DataCorruptionDetected error when evidence of data corruption is detected. + */ + kThrow, + /** + * When evidence of data corruption is decected, log an entry to the health log and the server + * logs, but do not throw an error. Continue attempting to return results. + */ + kLogAndContinue, +}; + /** * A RecoveryUnit is responsible for ensuring that data is persisted. * All on-disk information must be mutated through this interface. @@ -267,16 +291,6 @@ class RecoveryUnit { */ virtual void preallocateSnapshot() {} - /** - * Like preallocateSnapshot() above but also indicates that the snapshot will be used for - * reading the oplog. - * - * StorageEngines may not implement this in which case it works like preallocateSnapshot. - */ - virtual void preallocateSnapshotForOplogRead() { - preallocateSnapshot(); - } - /** * Returns whether or not a majority commmitted snapshot is available. If no snapshot has yet * been marked as Majority Committed, returns a status with error code @@ -401,29 +415,6 @@ class RecoveryUnit { "This storage engine does not support prepared transactions"); } - /** - * Sets catalog conflicting timestamp. - * This can only be called while the RecoveryUnit is in an inactive state. - * - * This value must be set when both of the following conditions are true: - * - A storage engine snapshot is opened without a read timestamp - * (RecoveryUnit::ReadSource::kNoTimestamp). - * - The transaction may touch collections it does not yet have locks for. - * In this circumstance, the catalog conflicting timestamp serves as a substitute for a read - * timestamp. This value must be set to a valid (i.e: no-holes) read timestamp prior to - * acquiring a storage engine snapshot. This timestamp will be used to determine if any changes - * had happened to the in-memory catalog after a storage engine snapshot got opened for that - * transaction. - */ - virtual void setCatalogConflictingTimestamp(Timestamp timestamp) {} - - /** - * Returns the catalog conflicting timestamp. - */ - virtual Timestamp getCatalogConflictingTimestamp() const { - return {}; - } - /** * MongoDB must update documents with non-decreasing timestamp values. A storage engine is * allowed to assert when this contract is violated. An untimestamped write is a subset of these @@ -524,6 +515,7 @@ class RecoveryUnit { virtual boost::optional getOplogVisibilityTs() { return boost::none; } + virtual void setOplogVisibilityTs(boost::optional oplogVisibilityTs) {} /** * Pinning informs callers not to change the ReadSource on this RecoveryUnit. Callers are @@ -604,22 +596,6 @@ class RecoveryUnit { virtual void commit(OperationContext* opCtx, boost::optional commitTime) = 0; }; - /** - * A SnapshotChange is an action that can be registered at anytime. When a WriteUnitOfWork - * begins, the openSnapshot() callback is called for any registered snapshot changes. Similarly, - * when the snapshot is abandoned, or the WriteUnitOfWork is committed or aborted, the - * closeSnapshot() callback is called. - * - * The same rules apply here that apply to the Change class. - */ - class SnapshotChange { - public: - virtual ~SnapshotChange() {} - - virtual void openSnapshot(OperationContext* opCtx) = 0; - virtual void closeSnapshot(OperationContext* opCtx) = 0; - }; - /** * The commitUnitOfWork() method calls the commit() method of each registered change in order of * registration. The endUnitOfWork() method calls the rollback() method of each registered @@ -658,21 +634,25 @@ class RecoveryUnit { /** * Like registerChange() above but should only be used to make new state visible in the - * in-memory catalog. Only one change of this kind may be registered at a given time to ensure - * catalog updates are atomic. Change registered with this function will commit after the commit - * changes registered with registerChange and rollback will run before the rollback changes - * registered with registerChange. + * in-memory catalog. Change registered with this function will commit before the commit + * changes registered with registerChange and rollback will run after the rollback changes + * registered with registerChange. Only one change of this kind should be registered at a given + * time to ensure catalog updates are atomic, however multiple callbacks are allowed for testing + * purposes. * - * This separation ensures that regular Changes that can modify state are run before the Change - * to install the new state in the in-memory catalog, after which there should be no further - * changes. + * This separation ensures that regular Changes can observe changes to catalog visibility. */ void registerChangeForCatalogVisibility(std::unique_ptr change); /** - * Returns true if a change has been registered with registerChangeForCatalogVisibility() above. + * Like registerChange() above but should only be used to push idents for two phase drop to the + * reaper. This currently needs to happen before a drop is made visible in the catalog to avoid + * a window where a reader would observe the drop in the catalog but not be able to find the + * ident in the reaper. + * + * TODO SERVER-77959: Remove this. */ - bool hasRegisteredChangeForCatalogVisibility(); + void registerChangeForTwoPhaseDrop(std::unique_ptr change); /** * Registers a callback to be called if the current WriteUnitOfWork rolls back. @@ -726,6 +706,31 @@ class RecoveryUnit { registerChange(std::make_unique(std::move(callback))); } + /** + * Registers a callback to be called if the current WriteUnitOfWork commits for two phase drop. + * + * Should only be used for adding drop pending idents to the reaper! + * + * TODO SERVER-77959: Remove this. + */ + template + void onCommitForTwoPhaseDrop(Callback callback) { + class OnCommitTwoPhaseChange final : public Change { + public: + OnCommitTwoPhaseChange(Callback&& callback) : _callback(std::move(callback)) {} + void rollback(OperationContext* opCtx) final {} + void commit(OperationContext* opCtx, boost::optional commitTime) final { + _callback(opCtx, commitTime); + } + + private: + Callback _callback; + }; + + registerChangeForTwoPhaseDrop( + std::make_unique(std::move(callback))); + } + virtual void setOrderedCommit(bool orderedCommit) = 0; /** @@ -807,6 +812,14 @@ class RecoveryUnit { return _noEvictionAfterRollback; } + void setDataCorruptionDetectionMode(DataCorruptionDetectionMode mode) { + _dataCorruptionDetectionMode = mode; + } + + DataCorruptionDetectionMode getDataCorruptionDetectionMode() const { + return _dataCorruptionDetectionMode; + } + /** * Returns true if this is an instance of RecoveryUnitNoop. */ @@ -867,6 +880,8 @@ class RecoveryUnit { AbandonSnapshotMode _abandonSnapshotMode = AbandonSnapshotMode::kAbort; + DataCorruptionDetectionMode _dataCorruptionDetectionMode = DataCorruptionDetectionMode::kThrow; + private: // Sets the snapshot associated with this RecoveryUnit to a new globally unique id number. void assignNextSnapshot(); @@ -882,12 +897,11 @@ class RecoveryUnit { typedef std::vector> Changes; Changes _changes; - typedef std::vector> SnapshotChanges; - SnapshotChanges _snapshotChanges; + Changes _changesForCatalogVisibility; + Changes _changesForTwoPhaseDrop; // The Snapshot is always initialized by the RecoveryUnit constructor. We use an optional to // simplify destructing and re-constructing the Snapshot in-place. boost::optional _snapshot; - std::unique_ptr _changeForCatalogVisibility; State _state = State::kInactive; OperationContext* _opCtx = nullptr; bool _readOnly = false; @@ -913,4 +927,31 @@ class PinReadSourceBlock { RecoveryUnit* const _recoveryUnit; }; + +/** + * RAII-style class to override the oplog visible timestamp of the WiredTigerRecoveryUnit while it's + * in scope. + */ +class ScopedOplogVisibleTimestamp { +public: + ScopedOplogVisibleTimestamp(RecoveryUnit* recoveryUnit, boost::optional oplogVisibleTs) + : _recoveryUnit(recoveryUnit), + _originalOplogVisibleTs(recoveryUnit->getOplogVisibilityTs()) { + _recoveryUnit->setOplogVisibilityTs(oplogVisibleTs); + } + + ScopedOplogVisibleTimestamp(const ScopedOplogVisibleTimestamp&) = delete; + ScopedOplogVisibleTimestamp& operator=(const ScopedOplogVisibleTimestamp&) = delete; + ScopedOplogVisibleTimestamp(ScopedOplogVisibleTimestamp&&) = delete; + ScopedOplogVisibleTimestamp& operator=(ScopedOplogVisibleTimestamp&&) = delete; + + ~ScopedOplogVisibleTimestamp() { + _recoveryUnit->setOplogVisibilityTs(_originalOplogVisibleTs); + } + +private: + RecoveryUnit* _recoveryUnit; + boost::optional _originalOplogVisibleTs; +}; + } // namespace mongo diff --git a/src/mongo/db/storage/recovery_unit_test_harness.cpp b/src/mongo/db/storage/recovery_unit_test_harness.cpp index f68550aa675ff..8512c949a41f3 100644 --- a/src/mongo/db/storage/recovery_unit_test_harness.cpp +++ b/src/mongo/db/storage/recovery_unit_test_harness.cpp @@ -28,13 +28,30 @@ */ #include "mongo/db/storage/recovery_unit_test_harness.h" + +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/repl/read_concern_level.h" -#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" #include "mongo/db/storage/recovery_unit.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/storage/recovery_unit_test_harness.h b/src/mongo/db/storage/recovery_unit_test_harness.h index 7ebf8a378e67c..fdd22da772e41 100644 --- a/src/mongo/db/storage/recovery_unit_test_harness.h +++ b/src/mongo/db/storage/recovery_unit_test_harness.h @@ -29,6 +29,13 @@ #pragma once +#include +#include +#include + +#include "mongo/db/operation_context.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/test_harness_helper.h" namespace mongo { diff --git a/src/mongo/db/storage/remove_saver.cpp b/src/mongo/db/storage/remove_saver.cpp index d4d40d92239f1..6f6404948eb79 100644 --- a/src/mongo/db/storage/remove_saver.cpp +++ b/src/mongo/db/storage/remove_saver.cpp @@ -28,19 +28,29 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/remove_saver.h" - #include -#include -#include +#include +#include +#include // IWYU pragma: keep +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/encryption_hooks.h" +#include "mongo/db/storage/remove_saver.h" #include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" #include "mongo/util/errno_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -64,7 +74,7 @@ RemoveSaver::RemoveSaver(const string& a, _root /= a; if (b.size()) _root /= b; - verify(a.size() || b.size()); + MONGO_verify(a.size() || b.size()); _file = _root; diff --git a/src/mongo/db/storage/remove_saver.h b/src/mongo/db/storage/remove_saver.h index 463fe7f84107a..63eebc3474c86 100644 --- a/src/mongo/db/storage/remove_saver.h +++ b/src/mongo/db/storage/remove_saver.h @@ -35,6 +35,7 @@ #include #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" #include "mongo/db/storage/data_protector.h" diff --git a/src/mongo/db/storage/snapshot_helper.cpp b/src/mongo/db/storage/snapshot_helper.cpp index a610672c78dbf..2d6c00e043a3e 100644 --- a/src/mongo/db/storage/snapshot_helper.cpp +++ b/src/mongo/db/storage/snapshot_helper.cpp @@ -28,13 +28,26 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/storage/snapshot_helper.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot_helper.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -100,7 +113,8 @@ bool shouldReadAtLastApplied(OperationContext* opCtx, // being applied and we can read from the default snapshot. If we are in a replication state // (like secondary or primary catch-up) where we are not accepting writes, we should read at // lastApplied. - if (repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx, "admin")) { + if (repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase( + opCtx, DatabaseName::kAdmin)) { if (reason) { *reason = "primary"; } diff --git a/src/mongo/db/storage/snapshot_helper.h b/src/mongo/db/storage/snapshot_helper.h index 19b383017d309..f22162863550f 100644 --- a/src/mongo/db/storage/snapshot_helper.h +++ b/src/mongo/db/storage/snapshot_helper.h @@ -29,6 +29,10 @@ #pragma once +#include + +#include "mongo/bson/timestamp.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" namespace mongo { diff --git a/src/mongo/db/storage/sorted_data_interface.h b/src/mongo/db/storage/sorted_data_interface.h index 098186f723608..133ffbf6d0ebe 100644 --- a/src/mongo/db/storage/sorted_data_interface.h +++ b/src/mongo/db/storage/sorted_data_interface.h @@ -59,7 +59,7 @@ class SortedDataInterface { * RecordStore. */ SortedDataInterface(StringData ident, - KeyString::Version keyStringVersion, + key_string::Version keyStringVersion, Ordering ordering, KeyFormat rsKeyFormat) : _ident(std::make_shared(ident.toString())), @@ -96,7 +96,7 @@ class SortedDataInterface { */ virtual Status insert( OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed, IncludeDuplicateRecordId includeDuplicateRecordId = IncludeDuplicateRecordId::kOff) = 0; @@ -109,7 +109,7 @@ class SortedDataInterface { * match, false otherwise */ virtual void unindex(OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed) = 0; /** @@ -118,7 +118,7 @@ class SortedDataInterface { * This will not accept a KeyString with a Discriminator other than kInclusive. */ virtual boost::optional findLoc(OperationContext* opCtx, - const KeyString::Value& keyString) const = 0; + const key_string::Value& keyString) const = 0; /** * Return ErrorCodes::DuplicateKey if there is more than one occurence of 'KeyString' in this @@ -127,7 +127,7 @@ class SortedDataInterface { * * @param opCtx the transaction under which this operation takes place */ - virtual Status dupKeyCheck(OperationContext* opCtx, const KeyString::Value& keyString) = 0; + virtual Status dupKeyCheck(OperationContext* opCtx, const key_string::Value& keyString) = 0; /** * Attempt to reduce the storage space used by this index via compaction. Only called if the @@ -176,7 +176,7 @@ class SortedDataInterface { * Prints any storage engine provided metadata for the index entry with key 'keyString'. */ virtual void printIndexEntryMetadata(OperationContext* opCtx, - const KeyString::Value& keyString) const = 0; + const key_string::Value& keyString) const = 0; /** * Return the number of entries in 'this' index. @@ -186,7 +186,7 @@ class SortedDataInterface { /* * Return the KeyString version for 'this' index. */ - KeyString::Version getKeyStringVersion() const { + key_string::Version getKeyStringVersion() const { return _keyStringVersion; } @@ -247,23 +247,12 @@ class SortedDataInterface { class Cursor { public: /** - * Tells methods that return an IndexKeyEntry what part of the data the caller is - * interested in. - * - * Methods returning an engaged optional will only return null RecordIds or empty - * BSONObjs if they have been explicitly left out of the request. - * - * Implementations are allowed to return more data than requested, but not less. + * Tells methods that return an IndexKeyEntry whether the caller is interested + * in including the key field. */ - enum RequestedInfo { - // Only usable part of the return is whether it is engaged or not. - kJustExistance = 0, - // Key must be filled in. - kWantKey = 1, - // Loc must be fulled in. - kWantLoc = 2, - // Both must be returned. - kKeyAndLoc = kWantKey | kWantLoc, + enum class KeyInclusion { + kExclude, + kInclude, }; virtual ~Cursor() = default; @@ -284,7 +273,8 @@ class SortedDataInterface { * Moves forward and returns the new data or boost::none if there is no more data. * If not positioned, returns boost::none. */ - virtual boost::optional next(RequestedInfo parts = kKeyAndLoc) = 0; + virtual boost::optional next( + KeyInclusion keyInclusion = KeyInclusion::kInclude) = 0; virtual boost::optional nextKeyString() = 0; // @@ -296,14 +286,15 @@ class SortedDataInterface { * The provided keyString has discriminator information encoded. */ virtual boost::optional seekForKeyString( - const KeyString::Value& keyString) = 0; + const key_string::Value& keyString) = 0; /** * Seeks to the provided keyString and returns the IndexKeyEntry. * The provided keyString has discriminator information encoded. */ - virtual boost::optional seek(const KeyString::Value& keyString, - RequestedInfo parts = kKeyAndLoc) = 0; + virtual boost::optional seek( + const key_string::Value& keyString, + KeyInclusion keyInclusion = KeyInclusion::kInclude) = 0; // // Saving and restoring state @@ -405,13 +396,13 @@ class SortedDataInterface { * For testing only. */ virtual void insertWithRecordIdInValue_forTest(OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, RecordId rid) = 0; protected: std::shared_ptr _ident; - const KeyString::Version _keyStringVersion; + const key_string::Version _keyStringVersion; const Ordering _ordering; const KeyFormat _rsKeyFormat; }; @@ -433,7 +424,7 @@ class SortedDataBuilderInterface { * transactionally. Other storage engines do not perform inserts transactionally and will ignore * any parent WriteUnitOfWork. */ - virtual Status addKey(const KeyString::Value& keyString) = 0; + virtual Status addKey(const key_string::Value& keyString) = 0; }; } // namespace mongo diff --git a/src/mongo/db/storage/sorted_data_interface_bm_cursor.cpp b/src/mongo/db/storage/sorted_data_interface_bm_cursor.cpp index c1acd3949b88e..4d027c82ec718 100644 --- a/src/mongo/db/storage/sorted_data_interface_bm_cursor.cpp +++ b/src/mongo/db/storage/sorted_data_interface_bm_cursor.cpp @@ -28,16 +28,25 @@ */ -#include -#include - #include +#include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/key_string.h" #include "mongo/db/storage/sorted_data_interface.h" #include "mongo/db/storage/sorted_data_interface_test_harness.h" -#include "mongo/logv2/log_debug.h" -#include "mongo/unittest/unittest.h" - +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" namespace mongo { namespace { @@ -46,11 +55,8 @@ using Cursor = SortedDataInterface::Cursor; enum Direction { kBackward, kForward }; enum Uniqueness { kUnique, kNonUnique }; enum EndPosition { kWithEnd, kWithoutEnd }; -const auto kWantLoc = Cursor::kWantLoc; -const auto kWantKey = Cursor::kWantKey; -const auto kKeyAndLoc = Cursor::kKeyAndLoc; -const auto kJustExistance = Cursor::kJustExistance; - +const auto kRecordId = Cursor::KeyInclusion::kExclude; +const auto kRecordIdAndKey = Cursor::KeyInclusion::kInclude; struct Fixture { Fixture(Uniqueness uniqueness, Direction direction, int nToInsert) @@ -60,6 +66,7 @@ struct Fixture { harness(newSortedDataInterfaceHarnessHelper()), sorted(harness->newSortedDataInterface(uniqueness == kUnique, /*partial*/ false)), opCtx(harness->newOperationContext()), + globalLock(opCtx.get(), MODE_X), cursor(sorted->newCursor(opCtx.get(), direction == kForward)), firstKey(makeKeyStringForSeek(sorted.get(), BSON("" << (direction == kForward ? 1 : nToInsert)), @@ -83,14 +90,15 @@ struct Fixture { std::unique_ptr harness; std::unique_ptr sorted; ServiceContext::UniqueOperationContext opCtx; + Lock::GlobalLock globalLock; std::unique_ptr cursor; - KeyString::Value firstKey; + key_string::Value firstKey; size_t itemsProcessed = 0; }; void BM_Advance(benchmark::State& state, Direction direction, - Cursor::RequestedInfo requestedInfo, + Cursor::KeyInclusion keyInclusion, Uniqueness uniqueness) { Fixture fix(uniqueness, direction, 100'000); @@ -98,7 +106,7 @@ void BM_Advance(benchmark::State& state, for (auto _ : state) { fix.cursor->seek(fix.firstKey); for (int i = 1; i < fix.nToInsert; i++) - fix.cursor->next(requestedInfo); + fix.cursor->next(keyInclusion); fix.itemsProcessed += fix.nToInsert; } ASSERT(!fix.cursor->next()); @@ -114,7 +122,7 @@ void BM_AdvanceWithEnd(benchmark::State& state, Direction direction, Uniqueness BSONObj lastKey = BSON("" << (direction == kForward ? fix.nToInsert : 1)); fix.cursor->setEndPosition(lastKey, /*inclusive*/ true); for (int i = 1; i < fix.nToInsert; i++) - fix.cursor->next(kWantLoc); + fix.cursor->next(kRecordId); fix.itemsProcessed += fix.nToInsert; } ASSERT(!fix.cursor->next()); @@ -122,19 +130,15 @@ void BM_AdvanceWithEnd(benchmark::State& state, Direction direction, Uniqueness }; -BENCHMARK_CAPTURE(BM_Advance, AdvanceForwardLoc, kForward, kWantLoc, kNonUnique); -BENCHMARK_CAPTURE(BM_Advance, AdvanceForwardKeyAndLoc, kForward, kKeyAndLoc, kNonUnique); -BENCHMARK_CAPTURE(BM_Advance, AdvanceForwardLocUnique, kForward, kWantLoc, kUnique); -BENCHMARK_CAPTURE(BM_Advance, AdvanceForwardKeyAndLocUnique, kForward, kKeyAndLoc, kUnique); - -BENCHMARK_CAPTURE(BM_Advance, AdvanceBackwardLoc, kBackward, kWantLoc, kNonUnique); -BENCHMARK_CAPTURE(BM_Advance, AdvanceBackwardKeyAndLoc, kBackward, kKeyAndLoc, kNonUnique); -BENCHMARK_CAPTURE(BM_Advance, AdvanceBackwardLocUnique, kBackward, kWantLoc, kUnique); -BENCHMARK_CAPTURE(BM_Advance, AdvanceBackwardKeyAndLocUnique, kBackward, kKeyAndLoc, kUnique); +BENCHMARK_CAPTURE(BM_Advance, AdvanceForwardLoc, kForward, kRecordId, kNonUnique); +BENCHMARK_CAPTURE(BM_Advance, AdvanceForwardKeyAndLoc, kForward, kRecordIdAndKey, kNonUnique); +BENCHMARK_CAPTURE(BM_Advance, AdvanceForwardLocUnique, kForward, kRecordId, kUnique); +BENCHMARK_CAPTURE(BM_Advance, AdvanceForwardKeyAndLocUnique, kForward, kRecordIdAndKey, kUnique); -// TODO(SERVER-72575): Remove these two cases -BENCHMARK_CAPTURE(BM_Advance, AdvanceForwardJustExistance, kForward, kJustExistance, kNonUnique); -BENCHMARK_CAPTURE(BM_Advance, AdvanceForwardWantKey, kForward, kWantKey, kNonUnique); +BENCHMARK_CAPTURE(BM_Advance, AdvanceBackwardLoc, kBackward, kRecordId, kNonUnique); +BENCHMARK_CAPTURE(BM_Advance, AdvanceBackwardKeyAndLoc, kBackward, kRecordIdAndKey, kNonUnique); +BENCHMARK_CAPTURE(BM_Advance, AdvanceBackwardLocUnique, kBackward, kRecordId, kUnique); +BENCHMARK_CAPTURE(BM_Advance, AdvanceBackwardKeyAndLocUnique, kBackward, kRecordIdAndKey, kUnique); BENCHMARK_CAPTURE(BM_AdvanceWithEnd, AdvanceForward, kForward, kNonUnique); BENCHMARK_CAPTURE(BM_AdvanceWithEnd, AdvanceForwardUnique, kForward, kUnique); diff --git a/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp b/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp index 18c0af48dfa64..c3fb52efcbf78 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_bulkbuilder.cpp @@ -27,13 +27,23 @@ * it in the license file. */ -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" #include "mongo/db/record_id_helpers.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/key_string.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -61,6 +71,7 @@ TEST(SortedDataInterface, BuilderAddKey) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } } @@ -73,7 +84,8 @@ TEST(SortedDataInterface, BuilderAddKeyString) { const std::unique_ptr sorted( harnessHelper->newSortedDataInterface(/*unique=*/false, /*partial=*/false)); - KeyString::Builder keyString1(sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1); + key_string::Builder keyString1( + sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1); { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -92,6 +104,7 @@ TEST(SortedDataInterface, BuilderAddKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } } @@ -122,6 +135,7 @@ TEST(SortedDataInterface, BuilderAddKeyWithReservedRecordIdLong) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } } @@ -149,6 +163,7 @@ TEST(SortedDataInterface, BuilderAddCompoundKey) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } } @@ -180,6 +195,7 @@ TEST(SortedDataInterface, BuilderAddSameKey) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } } @@ -193,9 +209,9 @@ TEST(SortedDataInterface, BuilderAddSameKeyString) { const std::unique_ptr sorted( harnessHelper->newSortedDataInterface(/*unique=*/true, /*partial=*/false)); - KeyString::Builder keyStringLoc1( + key_string::Builder keyStringLoc1( sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1); - KeyString::Builder keyStringLoc2( + key_string::Builder keyStringLoc2( sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc2); { @@ -216,6 +232,7 @@ TEST(SortedDataInterface, BuilderAddSameKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } } @@ -245,6 +262,7 @@ TEST(SortedDataInterface, BuilderAddSameKeyWithDupsAllowed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } } @@ -258,9 +276,9 @@ TEST(SortedDataInterface, BuilderAddSameKeyStringWithDupsAllowed) { const std::unique_ptr sorted( harnessHelper->newSortedDataInterface(/*unique=*/false, /*partial=*/false)); - KeyString::Builder keyStringLoc1( + key_string::Builder keyStringLoc1( sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1); - KeyString::Builder keyStringLoc2( + key_string::Builder keyStringLoc2( sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc2); { @@ -281,6 +299,7 @@ TEST(SortedDataInterface, BuilderAddSameKeyStringWithDupsAllowed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } } @@ -310,6 +329,7 @@ TEST(SortedDataInterface, BuilderAddMultipleKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(3, sorted->numEntries(opCtx.get())); } } @@ -322,9 +342,12 @@ TEST(SortedDataInterface, BuilderAddMultipleKeyStrings) { const std::unique_ptr sorted( harnessHelper->newSortedDataInterface(/*unique=*/false, /*partial=*/false)); - KeyString::Builder keyString1(sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1); - KeyString::Builder keyString2(sorted->getKeyStringVersion(), key2, sorted->getOrdering(), loc2); - KeyString::Builder keyString3(sorted->getKeyStringVersion(), key3, sorted->getOrdering(), loc3); + key_string::Builder keyString1( + sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1); + key_string::Builder keyString2( + sorted->getKeyStringVersion(), key2, sorted->getOrdering(), loc2); + key_string::Builder keyString3( + sorted->getKeyStringVersion(), key3, sorted->getOrdering(), loc3); { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); @@ -345,6 +368,7 @@ TEST(SortedDataInterface, BuilderAddMultipleKeyStrings) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(3, sorted->numEntries(opCtx.get())); } } @@ -376,6 +400,7 @@ TEST(SortedDataInterface, BuilderAddMultipleCompoundKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(5, sorted->numEntries(opCtx.get())); } } diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp index 6060ec9a95f71..376a792e182c2 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_cursor.cpp @@ -27,15 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - -#include +#include +#include #include - +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_string.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -53,9 +66,9 @@ TEST(SortedDataInterface, CursorIsEOFWhenEmpty) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), BSONObj(), true, true))); - // Cursor at EOF should remain at EOF when advanced ASSERT(!cursor->next()); } @@ -74,6 +87,7 @@ TEST(SortedDataInterface, CursorIsEOFWhenEmptyReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); @@ -99,6 +113,7 @@ TEST(SortedDataInterface, ExhaustCursor) { int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); BSONObj key = BSON("" << i); @@ -110,11 +125,13 @@ TEST(SortedDataInterface, ExhaustCursor) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); for (int i = 0; i < nToInsert; i++) { auto entry = i == 0 @@ -139,15 +156,16 @@ TEST(SortedDataInterface, ExhaustKeyStringCursor) { ASSERT(sorted->isEmpty(opCtx.get())); } - std::vector keyStrings; + std::vector keyStrings; int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); BSONObj key = BSON("" << i); RecordId loc(42, i * 2); - KeyString::Value ks = makeKeyString(sorted.get(), key, loc); + key_string::Value ks = makeKeyString(sorted.get(), key, loc); keyStrings.push_back(ks); ASSERT_OK(sorted->insert(opCtx.get(), ks, true)); uow.commit(); @@ -156,11 +174,13 @@ TEST(SortedDataInterface, ExhaustKeyStringCursor) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); for (int i = 0; i < nToInsert; i++) { auto entry = i == 0 ? cursor->seekForKeyString( @@ -191,6 +211,7 @@ TEST(SortedDataInterface, ExhaustCursorReversed) { int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); BSONObj key = BSON("" << i); @@ -202,11 +223,13 @@ TEST(SortedDataInterface, ExhaustCursorReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); for (int i = nToInsert - 1; i >= 0; i--) { @@ -232,15 +255,16 @@ TEST(SortedDataInterface, ExhaustKeyStringCursorReversed) { ASSERT(sorted->isEmpty(opCtx.get())); } - std::vector keyStrings; + std::vector keyStrings; int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); BSONObj key = BSON("" << i); RecordId loc(42, i * 2); - KeyString::Value ks = makeKeyString(sorted.get(), key, loc); + key_string::Value ks = makeKeyString(sorted.get(), key, loc); keyStrings.push_back(ks); ASSERT_OK(sorted->insert(opCtx.get(), ks, true)); uow.commit(); @@ -249,11 +273,13 @@ TEST(SortedDataInterface, ExhaustKeyStringCursorReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); for (int i = nToInsert - 1; i >= 0; i--) { @@ -279,6 +305,7 @@ TEST(SortedDataInterface, CursorIterate1) { int N = 5; for (int i = 0; i < N; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -289,6 +316,7 @@ TEST(SortedDataInterface, CursorIterate1) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); int n = 0; for (auto entry = cursor->seek(makeKeyStringForSeek(sorted.get(), BSONObj(), true, true)); @@ -309,6 +337,7 @@ TEST(SortedDataInterface, CursorIterate1WithSaveRestore) { int N = 5; for (int i = 0; i < N; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -319,6 +348,7 @@ TEST(SortedDataInterface, CursorIterate1WithSaveRestore) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); int n = 0; for (auto entry = cursor->seek(makeKeyStringForSeek(sorted.get(), BSONObj(), true, true)); @@ -342,6 +372,7 @@ TEST(SortedDataInterface, CursorIterateAllDupKeysWithSaveRestore) { int N = 5; for (int i = 0; i < N; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -352,6 +383,7 @@ TEST(SortedDataInterface, CursorIterateAllDupKeysWithSaveRestore) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); int n = 0; for (auto entry = cursor->seek(makeKeyStringForSeek(sorted.get(), BSONObj(), true, true)); @@ -372,6 +404,7 @@ void testBoundaries(bool unique, bool forward, bool inclusive) { harnessHelper->newSortedDataInterface(unique, /*partial=*/false)); const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); int nToInsert = 10; diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp index 83f9f599d2c70..44ebdcb4690fe 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_advanceto.cpp @@ -27,12 +27,24 @@ * it in the license file. */ -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include +#include #include - +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/index_entry_comparison.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -54,6 +66,7 @@ TEST(SortedDataInterface, AdvanceTo) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -69,11 +82,13 @@ TEST(SortedDataInterface, AdvanceTo) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(5, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, true)), @@ -121,6 +136,7 @@ TEST(SortedDataInterface, AdvanceToReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -136,12 +152,14 @@ TEST(SortedDataInterface, AdvanceToReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(5, sorted->numEntries(opCtx.get())); } { bool isForward = false; const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), isForward)); @@ -188,6 +206,7 @@ TEST(SortedDataInterface, AdvanceToKeyBeforeCursorPosition) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -198,11 +217,13 @@ TEST(SortedDataInterface, AdvanceToKeyBeforeCursorPosition) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, true)), @@ -238,6 +259,7 @@ TEST(SortedDataInterface, AdvanceToKeyAfterCursorPositionReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -248,12 +270,14 @@ TEST(SortedDataInterface, AdvanceToKeyAfterCursorPositionReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } { bool isForward = false; const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), isForward)); @@ -292,6 +316,7 @@ TEST(SortedDataInterface, AdvanceToKeyAtCursorPosition) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -301,11 +326,13 @@ TEST(SortedDataInterface, AdvanceToKeyAtCursorPosition) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, true)), @@ -343,6 +370,7 @@ TEST(SortedDataInterface, AdvanceToKeyAtCursorPositionReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -352,12 +380,14 @@ TEST(SortedDataInterface, AdvanceToKeyAtCursorPositionReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { bool isForward = false; const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), isForward)); @@ -395,6 +425,7 @@ TEST(SortedDataInterface, AdvanceToExclusive) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -410,11 +441,13 @@ TEST(SortedDataInterface, AdvanceToExclusive) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(5, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, true)), @@ -461,6 +494,7 @@ TEST(SortedDataInterface, AdvanceToExclusiveReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -476,12 +510,14 @@ TEST(SortedDataInterface, AdvanceToExclusiveReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(5, sorted->numEntries(opCtx.get())); } { bool isForward = false; const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), isForward)); @@ -530,6 +566,7 @@ TEST(SortedDataInterface, AdvanceToIndirect) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -541,11 +578,13 @@ TEST(SortedDataInterface, AdvanceToIndirect) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(3, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, true)), @@ -586,6 +625,7 @@ TEST(SortedDataInterface, AdvanceToIndirectReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -597,11 +637,13 @@ TEST(SortedDataInterface, AdvanceToIndirectReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(3, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); @@ -645,6 +687,7 @@ TEST(SortedDataInterface, AdvanceToIndirectExclusive) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -656,11 +699,13 @@ TEST(SortedDataInterface, AdvanceToIndirectExclusive) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(3, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, true)), @@ -711,6 +756,7 @@ TEST(SortedDataInterface, AdvanceToIndirectExclusiveReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -722,12 +768,14 @@ TEST(SortedDataInterface, AdvanceToIndirectExclusiveReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(3, sorted->numEntries(opCtx.get())); } { bool isForward = false; const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), isForward)); @@ -775,6 +823,7 @@ TEST(SortedDataInterface, AdvanceToCompoundWithPrefixAndSuffixInclusive) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -795,11 +844,13 @@ TEST(SortedDataInterface, AdvanceToCompoundWithPrefixAndSuffixInclusive) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(5, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey1a, true, true)), @@ -857,6 +908,7 @@ TEST(SortedDataInterface, AdvanceToCompoundWithPrefixExclusive) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -875,11 +927,13 @@ TEST(SortedDataInterface, AdvanceToCompoundWithPrefixExclusive) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(5, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey1a, true, true)), @@ -936,6 +990,7 @@ TEST(SortedDataInterface, AdvanceToCompoundWithPrefixAndSuffixExclusive) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -954,11 +1009,13 @@ TEST(SortedDataInterface, AdvanceToCompoundWithPrefixAndSuffixExclusive) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(5, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey1a, true, true)), @@ -1015,6 +1072,7 @@ TEST(SortedDataInterface, AdvanceToCompoundWithSuffixExclusive) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -1033,11 +1091,13 @@ TEST(SortedDataInterface, AdvanceToCompoundWithSuffixExclusive) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(5, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey1a, true, true)), diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp index 2e0e20e8a4b4d..b6172dd7f2474 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_end_position.cpp @@ -27,19 +27,26 @@ * it in the license file. */ -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/storage/index_entry_comparison.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { // Tests setEndPosition with next(). void testSetEndPosition_Next_Forward(bool unique, bool inclusive) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -50,6 +57,9 @@ void testSetEndPosition_Next_Forward(bool unique, bool inclusive) { {key5, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + // Dup key on end point. Illegal for unique indexes. if (!unique) insertToIndex(opCtx.get(), sorted.get(), {{key3, loc2}}); @@ -84,7 +94,6 @@ TEST(SortedDataInterface, SetEndPosition_Next_Forward_Standard_Exclusive) { void testSetEndPosition_Next_Reverse(bool unique, bool inclusive) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -95,6 +104,9 @@ void testSetEndPosition_Next_Reverse(bool unique, bool inclusive) { {key5, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + // Dup key on end point. Illegal for unique indexes. if (!unique) insertToIndex(opCtx.get(), sorted.get(), {{key3, loc2}}); @@ -130,7 +142,6 @@ TEST(SortedDataInterface, SetEndPosition_Next_Reverse_Standard_Exclusive) { // Tests setEndPosition with seek(). void testSetEndPosition_Seek_Forward(bool unique, bool inclusive) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -140,6 +151,9 @@ void testSetEndPosition_Seek_Forward(bool unique, bool inclusive) { {key4, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get()); cursor->setEndPosition(key3, inclusive); @@ -177,7 +191,6 @@ TEST(SortedDataInterface, SetEndPosition_Seek_Forward_Standard_Exclusive) { void testSetEndPosition_Seek_Reverse(bool unique, bool inclusive) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -187,6 +200,9 @@ void testSetEndPosition_Seek_Reverse(bool unique, bool inclusive) { {key4, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get(), false); cursor->setEndPosition(key2, inclusive); @@ -226,7 +242,6 @@ TEST(SortedDataInterface, SetEndPosition_Seek_Reverse_Standard_Exclusive) { // Test that restore never lands on the wrong side of the endPosition. void testSetEndPosition_Restore_Forward(bool unique) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -236,6 +251,9 @@ void testSetEndPosition_Restore_Forward(bool unique) { {key4, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get()); cursor->setEndPosition(key3, false); // Should never see key3 or key4. @@ -267,7 +285,6 @@ TEST(SortedDataInterface, SetEndPosition_Restore_Forward_Standard) { void testSetEndPosition_Restore_Reverse(bool unique) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -277,6 +294,9 @@ void testSetEndPosition_Restore_Reverse(bool unique) { {key4, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get(), false); cursor->setEndPosition(key2, false); // Should never see key1 or key2. @@ -313,7 +333,6 @@ TEST(SortedDataInterface, SetEndPosition_Restore_Reverse_Standard) { // restore end cursors would tend to fail this test. void testSetEndPosition_RestoreEndCursor_Forward(bool unique) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -321,6 +340,9 @@ void testSetEndPosition_RestoreEndCursor_Forward(bool unique) { {key4, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get()); cursor->setEndPosition(key2, true); @@ -351,7 +373,6 @@ TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Forward_Standard) { void testSetEndPosition_RestoreEndCursor_Reverse(bool unique) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -359,6 +380,9 @@ void testSetEndPosition_RestoreEndCursor_Reverse(bool unique) { {key4, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get(), false); cursor->setEndPosition(key3, true); @@ -390,7 +414,6 @@ TEST(SortedDataInterface, SetEndPosition_RestoreEndCursor_Reverse_Unique) { // inclusive flag or direction. void testSetEndPosition_Empty_Forward(bool unique, bool inclusive) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -399,6 +422,9 @@ void testSetEndPosition_Empty_Forward(bool unique, bool inclusive) { {key3, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get()); cursor->setEndPosition(BSONObj(), inclusive); @@ -423,7 +449,6 @@ TEST(SortedDataInterface, SetEndPosition_Empty_Forward_Standard_Exclusive) { void testSetEndPosition_Empty_Reverse(bool unique, bool inclusive) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -432,6 +457,9 @@ void testSetEndPosition_Empty_Reverse(bool unique, bool inclusive) { {key3, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get(), false); cursor->setEndPosition(BSONObj(), inclusive); @@ -456,9 +484,15 @@ TEST(SortedDataInterface, SetEndPosition_Empty_Reverse_Standard_Exclusive) { void testSetEndPosition_Character_Limits(bool unique, bool inclusive) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); + auto sorted = harnessHelper->newSortedDataInterface(unique, + /*partial=*/false, + { + {key7, loc1}, + {key8, loc1}, + }); + auto opCtx = harnessHelper->newOperationContext(); - auto sorted = harnessHelper->newSortedDataInterface( - unique, /*partial=*/false, {{key7, loc1}, {key8, loc1}}); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); auto cursor = sorted->newCursor(opCtx.get()); cursor->setEndPosition(key7, inclusive); diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp index 4fe50ffb511d1..6f0f8183d8940 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_locate.cpp @@ -27,12 +27,25 @@ * it in the license file. */ -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/index_entry_comparison.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -46,6 +59,7 @@ TEST(SortedDataInterface, Locate) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, true))); @@ -53,6 +67,7 @@ TEST(SortedDataInterface, Locate) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -62,6 +77,7 @@ TEST(SortedDataInterface, Locate) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, true)), @@ -79,6 +95,7 @@ TEST(SortedDataInterface, LocateReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), key1, false, true))); @@ -86,6 +103,7 @@ TEST(SortedDataInterface, LocateReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -95,6 +113,7 @@ TEST(SortedDataInterface, LocateReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); @@ -113,12 +132,14 @@ TEST(SortedDataInterface, LocateCompoundKey) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey1a, true, true))); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -129,6 +150,7 @@ TEST(SortedDataInterface, LocateCompoundKey) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey1a, true, true)), @@ -146,6 +168,7 @@ TEST(SortedDataInterface, LocateCompoundKeyReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey1a, false, true))); @@ -153,6 +176,7 @@ TEST(SortedDataInterface, LocateCompoundKeyReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -163,6 +187,7 @@ TEST(SortedDataInterface, LocateCompoundKeyReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); @@ -181,12 +206,14 @@ TEST(SortedDataInterface, LocateMultiple) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, true))); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -197,6 +224,7 @@ TEST(SortedDataInterface, LocateMultiple) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, true)), @@ -207,6 +235,7 @@ TEST(SortedDataInterface, LocateMultiple) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key3, loc3), true)); @@ -216,6 +245,7 @@ TEST(SortedDataInterface, LocateMultiple) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key2, true, true)), @@ -240,6 +270,7 @@ TEST(SortedDataInterface, LocateMultipleReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), key3, true, true))); @@ -247,6 +278,7 @@ TEST(SortedDataInterface, LocateMultipleReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -257,6 +289,7 @@ TEST(SortedDataInterface, LocateMultipleReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); @@ -268,6 +301,7 @@ TEST(SortedDataInterface, LocateMultipleReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key3, loc3), true)); @@ -277,6 +311,7 @@ TEST(SortedDataInterface, LocateMultipleReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); @@ -302,12 +337,14 @@ TEST(SortedDataInterface, LocateMultipleCompoundKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey1a, true, true))); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -322,6 +359,7 @@ TEST(SortedDataInterface, LocateMultipleCompoundKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey1a, true, true)), @@ -333,6 +371,7 @@ TEST(SortedDataInterface, LocateMultipleCompoundKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -345,6 +384,7 @@ TEST(SortedDataInterface, LocateMultipleCompoundKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey1a, true, true)), @@ -366,6 +406,7 @@ TEST(SortedDataInterface, LocateMultipleCompoundKeysReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey3a, false, true))); @@ -373,6 +414,7 @@ TEST(SortedDataInterface, LocateMultipleCompoundKeysReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -387,6 +429,7 @@ TEST(SortedDataInterface, LocateMultipleCompoundKeysReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); @@ -399,6 +442,7 @@ TEST(SortedDataInterface, LocateMultipleCompoundKeysReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -411,6 +455,7 @@ TEST(SortedDataInterface, LocateMultipleCompoundKeysReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); @@ -433,12 +478,14 @@ TEST(SortedDataInterface, LocateIndirect) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, true))); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -449,6 +496,7 @@ TEST(SortedDataInterface, LocateIndirect) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, false)), @@ -458,6 +506,7 @@ TEST(SortedDataInterface, LocateIndirect) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key3, loc3), true)); @@ -467,6 +516,7 @@ TEST(SortedDataInterface, LocateIndirect) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, true)), @@ -486,6 +536,7 @@ TEST(SortedDataInterface, LocateIndirectReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), key3, false, true))); @@ -493,6 +544,7 @@ TEST(SortedDataInterface, LocateIndirectReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -503,6 +555,7 @@ TEST(SortedDataInterface, LocateIndirectReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); @@ -513,6 +566,7 @@ TEST(SortedDataInterface, LocateIndirectReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key3, loc3), true)); @@ -522,6 +576,7 @@ TEST(SortedDataInterface, LocateIndirectReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); @@ -542,12 +597,14 @@ TEST(SortedDataInterface, LocateIndirectCompoundKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey1a, true, true))); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -562,6 +619,7 @@ TEST(SortedDataInterface, LocateIndirectCompoundKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey1a, true, false)), @@ -572,6 +630,7 @@ TEST(SortedDataInterface, LocateIndirectCompoundKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -584,6 +643,7 @@ TEST(SortedDataInterface, LocateIndirectCompoundKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey2a, true, true)), @@ -602,6 +662,7 @@ TEST(SortedDataInterface, LocateIndirectCompoundKeysReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), compoundKey3a, false, true))); @@ -609,6 +670,7 @@ TEST(SortedDataInterface, LocateIndirectCompoundKeysReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -623,6 +685,7 @@ TEST(SortedDataInterface, LocateIndirectCompoundKeysReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); @@ -634,6 +697,7 @@ TEST(SortedDataInterface, LocateIndirectCompoundKeysReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -646,6 +710,7 @@ TEST(SortedDataInterface, LocateIndirectCompoundKeysReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); @@ -671,6 +736,7 @@ TEST(SortedDataInterface, LocateEmpty) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), BSONObj(), true, true))); @@ -692,6 +758,7 @@ TEST(SortedDataInterface, LocateEmptyReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); @@ -711,12 +778,14 @@ TEST(SortedDataInterface, Locate1) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT(!cursor->seek(makeKeyStringForSeek(sorted.get(), key, true, true))); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key, loc), true)); @@ -726,6 +795,7 @@ TEST(SortedDataInterface, Locate1) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key, true, true)), IndexKeyEntry(key, loc)); @@ -739,9 +809,9 @@ TEST(SortedDataInterface, Locate2) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); - ASSERT_OK(sorted->insert( opCtx.get(), makeKeyString(sorted.get(), BSON("" << 1), RecordId(1, 2)), true)); ASSERT_OK(sorted->insert( @@ -754,6 +824,7 @@ TEST(SortedDataInterface, Locate2) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), BSON("a" << 2), true, true)), IndexKeyEntry(BSON("" << 2), RecordId(1, 4))); @@ -770,9 +841,9 @@ TEST(SortedDataInterface, Locate2Empty) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); - ASSERT_OK(sorted->insert( opCtx.get(), makeKeyString(sorted.get(), BSON("" << 1), RecordId(1, 2)), true)); ASSERT_OK(sorted->insert( @@ -785,6 +856,7 @@ TEST(SortedDataInterface, Locate2Empty) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), BSONObj(), true, true)), IndexKeyEntry(BSON("" << 1), RecordId(1, 2))); @@ -792,6 +864,7 @@ TEST(SortedDataInterface, Locate2Empty) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), BSONObj(), false, false)), @@ -811,6 +884,7 @@ TEST(SortedDataInterface, Locate3Descending) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); for (int i = 0; i < 10; i++) { if (i == 6) continue; @@ -823,6 +897,7 @@ TEST(SortedDataInterface, Locate3Descending) { } const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); std::unique_ptr cursor(sorted->newCursor(opCtx.get(), true)); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), BSON("" << 5), true, true)), buildEntry(5)); @@ -871,6 +946,7 @@ TEST(SortedDataInterface, Locate4) { { auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); auto cursor = sorted->newCursor(opCtx.get()); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), BSON("a" << 1), true, true)), IndexKeyEntry(BSON("" << 1), RecordId(1, 2))); @@ -883,6 +959,7 @@ TEST(SortedDataInterface, Locate4) { { auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); auto cursor = sorted->newCursor(opCtx.get(), false); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), BSON("a" << 1), false, true)), IndexKeyEntry(BSON("" << 1), RecordId(1, 6))); diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp index c2f4398a8e2c0..a6ff21876026a 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_saverestore.cpp @@ -27,15 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include +#include #include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/index_entry_comparison.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -50,12 +61,14 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursor) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); BSONObj key = BSON("" << i); @@ -67,11 +80,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursor) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); int i = 0; for (auto entry = cursor->seek(makeKeyStringForSeek(sorted.get(), BSONObj(), true, true)); @@ -98,12 +113,14 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithEndPositio { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); BSONObj key = BSON("" << i); @@ -115,11 +132,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithEndPositio { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); cursor->setEndPosition(BSON("" << std::numeric_limits::infinity()), true); @@ -148,12 +167,14 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); BSONObj key = BSON("" << i); @@ -165,11 +186,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); int i = nToInsert - 1; @@ -197,12 +220,14 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorOnIdIndex) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); BSONObj key = BSON("" << i); @@ -214,11 +239,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorOnIdIndex) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); int i = nToInsert - 1; @@ -246,12 +273,14 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorReversedOnIdIn { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); BSONObj key = BSON("" << i); @@ -263,11 +292,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorReversedOnIdIn { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); int i = nToInsert - 1; @@ -297,12 +328,14 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithDupKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); RecordId loc(42, i * 2); @@ -314,11 +347,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithDupKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); int i = 0; for (auto entry = cursor->seek(makeKeyStringForSeek(sorted.get(), BSONObj(), true, true)); @@ -346,12 +381,14 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithDupKeysRev { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); RecordId loc(42, i * 2); @@ -363,11 +400,13 @@ TEST(SortedDataInterface, SaveAndRestorePositionWhileIterateCursorWithDupKeysRev { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); int i = nToInsert - 1; @@ -395,11 +434,13 @@ TEST(SortedDataInterface, SavePositionWithoutRestore) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), false)); @@ -409,11 +450,13 @@ TEST(SortedDataInterface, SavePositionWithoutRestore) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); cursor->save(); } @@ -428,11 +471,13 @@ TEST(SortedDataInterface, SavePositionWithoutRestoreReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -442,11 +487,13 @@ TEST(SortedDataInterface, SavePositionWithoutRestoreReversed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); const std::unique_ptr cursor( sorted->newCursor(opCtx.get(), false)); cursor->save(); @@ -457,7 +504,6 @@ TEST(SortedDataInterface, SavePositionWithoutRestoreReversed) { // while saved. void testSaveAndRestorePositionSeesNewInserts(bool forward, bool unique) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -465,6 +511,9 @@ void testSaveAndRestorePositionSeesNewInserts(bool forward, bool unique) { {key3, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get(), forward); const auto seekPoint = forward ? key1 : key3; @@ -494,7 +543,6 @@ TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInserts_Reverse_Standard) // inserted while saved and the current position removed. void testSaveAndRestorePositionSeesNewInsertsAfterRemove(bool forward, bool unique) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -502,6 +550,9 @@ void testSaveAndRestorePositionSeesNewInsertsAfterRemove(bool forward, bool uniq {key3, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get(), forward); const auto seekPoint = forward ? key1 : key3; @@ -537,13 +588,15 @@ TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterRemove_Revers // cursor EOF. void testSaveAndRestorePositionSeesNewInsertsAfterEOF(bool forward, bool unique) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(/*unique=*/false, /*partial=*/false, { {key1, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get(), forward); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key1, forward, true)), @@ -579,7 +632,6 @@ TEST(SortedDataInterface, SaveAndRestorePositionSeesNewInsertsAfterEOF_Reverse_S // Make sure we restore to a RecordId at or ahead of save point if same key. TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_Forward) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(/*unique*/ false, /*partial=*/false, { @@ -588,6 +640,9 @@ TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_F {key3, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get()); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key1, true, true)), @@ -622,11 +677,17 @@ TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_F // Test that cursors over unique indices will never return the same key twice. TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_Forward) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); + auto sorted = harnessHelper->newSortedDataInterface(/*unique*/ true, + /*partial=*/false, + { + {key1, loc1}, + {key2, loc2}, + {key3, loc2}, + {key4, loc2}, + }); + auto opCtx = harnessHelper->newOperationContext(); - auto sorted = harnessHelper->newSortedDataInterface( - /*unique*/ true, - /*partial=*/false, - {{key1, loc1}, {key2, loc2}, {key3, loc2}, {key4, loc2}}); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); auto cursor = sorted->newCursor(opCtx.get()); @@ -665,7 +726,6 @@ TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_For // Make sure we restore to a RecordId at or ahead of save point if same key on reverse cursor. TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_Reverse) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(/*unique*/ false, /*partial=*/false, { @@ -674,6 +734,9 @@ TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_R {key2, loc2}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get(), false); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key2, false, true)), @@ -708,11 +771,17 @@ TEST(SortedDataInterface, SaveAndRestorePositionStandardIndexConsidersRecordId_R // Test that reverse cursors over unique indices will never return the same key twice. TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_Reverse) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); + auto sorted = harnessHelper->newSortedDataInterface(/*unique*/ true, + /*partial=*/false, + { + {key1, loc1}, + {key2, loc1}, + {key3, loc1}, + {key4, loc2}, + }); + auto opCtx = harnessHelper->newOperationContext(); - auto sorted = harnessHelper->newSortedDataInterface( - /*unique*/ true, - /*partial=*/false, - {{key1, loc1}, {key2, loc1}, {key3, loc1}, {key4, loc2}}); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); auto cursor = sorted->newCursor(opCtx.get(), false); @@ -751,7 +820,6 @@ TEST(SortedDataInterface, SaveAndRestorePositionUniqueIndexWontReturnDupKeys_Rev // Ensure that SaveUnpositioned allows later use of the cursor. TEST(SortedDataInterface, SaveUnpositionedAndRestore) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(/*unique=*/false, /*partial=*/false, { @@ -760,6 +828,9 @@ TEST(SortedDataInterface, SaveUnpositionedAndRestore) { {key3, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto cursor = sorted->newCursor(opCtx.get()); ASSERT_EQ(cursor->seek(makeKeyStringForSeek(sorted.get(), key2, true, true)), diff --git a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp index ecc07e23b3092..9db61b441ed9d 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_cursor_seek_exact.cpp @@ -27,19 +27,23 @@ * it in the license file. */ -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include #include +#include "mongo/base/string_data.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { + // Tests findLoc when it hits something. void testFindLoc_Hit(bool unique) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -48,6 +52,9 @@ void testFindLoc_Hit(bool unique) { {key3, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); + auto loc = sorted->findLoc(opCtx.get(), makeKeyString(sorted.get(), key2)); ASSERT_EQ(loc, loc1); } @@ -61,7 +68,6 @@ TEST(SortedDataInterface, SeekExact_Hit_Standard) { // Tests findLoc when it doesn't hit the query. void testFindLoc_Miss(bool unique) { const auto harnessHelper = newSortedDataInterfaceHarnessHelper(); - auto opCtx = harnessHelper->newOperationContext(); auto sorted = harnessHelper->newSortedDataInterface(unique, /*partial=*/false, { @@ -70,6 +76,9 @@ void testFindLoc_Miss(bool unique) { {key3, loc1}, }); + auto opCtx = harnessHelper->newOperationContext(); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); + ASSERT_EQ(sorted->findLoc(opCtx.get(), makeKeyString(sorted.get(), key2)), boost::none); } TEST(SortedDataInterface, SeekExact_Miss_Unique) { @@ -78,5 +87,6 @@ TEST(SortedDataInterface, SeekExact_Miss_Unique) { TEST(SortedDataInterface, SeekExact_Miss_Standard) { testFindLoc_Miss(false); } + } // namespace } // namespace mongo diff --git a/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp b/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp index c7d503b50d4b2..caddf5e72e854 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_dupkeycheck.cpp @@ -27,12 +27,19 @@ * it in the license file. */ -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include #include +#include "mongo/base/string_data.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -47,11 +54,13 @@ TEST(SortedDataInterface, DupKeyCheckAfterInsert) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), false)); @@ -61,11 +70,13 @@ TEST(SortedDataInterface, DupKeyCheckAfterInsert) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->dupKeyCheck(opCtx.get(), makeKeyString(sorted.get(), key1))); @@ -87,11 +98,13 @@ TEST(SortedDataInterface, DupKeyCheckAfterInsertKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), keyString1, false)); @@ -101,11 +114,13 @@ TEST(SortedDataInterface, DupKeyCheckAfterInsertKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_OK(sorted->dupKeyCheck(opCtx.get(), keyString1WithoutRecordId)); } } @@ -119,11 +134,13 @@ TEST(SortedDataInterface, DupKeyCheckEmpty) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_OK(sorted->dupKeyCheck(opCtx.get(), makeKeyString(sorted.get(), key1))); } } @@ -139,11 +156,13 @@ TEST(SortedDataInterface, DupKeyCheckEmptyKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_OK(sorted->dupKeyCheck(opCtx.get(), keyString1WithoutRecordId)); } } @@ -157,11 +176,13 @@ TEST(SortedDataInterface, DupKeyCheckWhenDiskLocBefore) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -171,11 +192,13 @@ TEST(SortedDataInterface, DupKeyCheckWhenDiskLocBefore) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->dupKeyCheck(opCtx.get(), makeKeyString(sorted.get(), key1))); @@ -193,11 +216,13 @@ TEST(SortedDataInterface, DupKeyCheckWhenDiskLocAfter) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -207,11 +232,13 @@ TEST(SortedDataInterface, DupKeyCheckWhenDiskLocAfter) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->dupKeyCheck(opCtx.get(), makeKeyString(sorted.get(), key1))); @@ -227,6 +254,7 @@ TEST(SortedDataInterface, DupKeyCheckWithDuplicates) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); WriteUnitOfWork uow(opCtx.get()); @@ -237,6 +265,7 @@ TEST(SortedDataInterface, DupKeyCheckWithDuplicates) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); ASSERT_NOT_OK(sorted->dupKeyCheck(opCtx.get(), makeKeyString(sorted.get(), key1))); } @@ -253,6 +282,7 @@ TEST(SortedDataInterface, DupKeyCheckWithDuplicateKeyStrings) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); WriteUnitOfWork uow(opCtx.get()); @@ -263,6 +293,7 @@ TEST(SortedDataInterface, DupKeyCheckWithDuplicateKeyStrings) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); ASSERT_NOT_OK(sorted->dupKeyCheck(opCtx.get(), keyString1WithoutRecordId)); } @@ -275,6 +306,7 @@ TEST(SortedDataInterface, DupKeyCheckWithDeletedFirstEntry) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); WriteUnitOfWork uow(opCtx.get()); @@ -285,6 +317,7 @@ TEST(SortedDataInterface, DupKeyCheckWithDeletedFirstEntry) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true); uow.commit(); @@ -292,6 +325,7 @@ TEST(SortedDataInterface, DupKeyCheckWithDeletedFirstEntry) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); ASSERT_OK(sorted->dupKeyCheck(opCtx.get(), makeKeyString(sorted.get(), key1))); } @@ -304,6 +338,7 @@ TEST(SortedDataInterface, DupKeyCheckWithDeletedSecondEntry) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); WriteUnitOfWork uow(opCtx.get()); @@ -314,12 +349,14 @@ TEST(SortedDataInterface, DupKeyCheckWithDeletedSecondEntry) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), makeKeyString(sorted.get(), key1, loc2), true); uow.commit(); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); ASSERT_OK(sorted->dupKeyCheck(opCtx.get(), makeKeyString(sorted.get(), key1))); } diff --git a/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp b/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp index 79b97a96add32..ba68fc02c1b9c 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_fullvalidate.cpp @@ -27,13 +27,22 @@ * it in the license file. */ -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include #include -#include "mongo/db/catalog/validate_results.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -53,6 +62,7 @@ TEST(SortedDataInterface, FullValidate) { int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); BSONObj key = BSON("" << i); @@ -64,6 +74,7 @@ TEST(SortedDataInterface, FullValidate) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get())); } } diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp index c264134332867..d83e1594f1e6a 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_harness.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_harness.cpp @@ -30,25 +30,45 @@ #include "mongo/db/storage/sorted_data_interface_test_harness.h" #include +#include +#include +#include #include +#include +#include "mongo/bson/ordering.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/assert_util_core.h" -auto mongo::SortedDataInterfaceHarnessHelper::newSortedDataInterface( +namespace mongo { +namespace { + +std::function()> + sortedDataInterfaceHarnessFactory; + +} // namespace + +auto SortedDataInterfaceHarnessHelper::newSortedDataInterface( bool unique, bool partial, std::initializer_list toInsert) -> std::unique_ptr { invariant(std::is_sorted( toInsert.begin(), toInsert.end(), IndexEntryComparison(Ordering::make(BSONObj())))); auto index = newSortedDataInterface(unique, partial); - insertToIndex(this, index.get(), toInsert); + auto client = serviceContext()->makeClient("insertToIndex"); + auto opCtx = newOperationContext(client.get()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + insertToIndex(opCtx.get(), index.get(), toInsert); return index; } -void mongo::insertToIndex(OperationContext* opCtx, - SortedDataInterface* index, - std::initializer_list toInsert) { +void insertToIndex(OperationContext* opCtx, + SortedDataInterface* index, + std::initializer_list toInsert) { WriteUnitOfWork wuow(opCtx); for (auto&& entry : toInsert) { ASSERT_OK(index->insert(opCtx, makeKeyString(index, entry.key, entry.loc), true)); @@ -56,9 +76,9 @@ void mongo::insertToIndex(OperationContext* opCtx, wuow.commit(); } -void mongo::removeFromIndex(OperationContext* opCtx, - SortedDataInterface* index, - std::initializer_list toRemove) { +void removeFromIndex(OperationContext* opCtx, + SortedDataInterface* index, + std::initializer_list toRemove) { WriteUnitOfWork wuow(opCtx); for (auto&& entry : toRemove) { index->unindex(opCtx, makeKeyString(index, entry.key, entry.loc), true); @@ -66,42 +86,36 @@ void mongo::removeFromIndex(OperationContext* opCtx, wuow.commit(); } -mongo::KeyString::Value mongo::makeKeyString(SortedDataInterface* sorted, - BSONObj bsonKey, - const boost::optional& rid) { - KeyString::Builder builder(sorted->getKeyStringVersion(), bsonKey, sorted->getOrdering()); +key_string::Value makeKeyString(SortedDataInterface* sorted, + BSONObj bsonKey, + const boost::optional& rid) { + key_string::Builder builder(sorted->getKeyStringVersion(), bsonKey, sorted->getOrdering()); if (rid) { builder.appendRecordId(*rid); } return builder.getValueCopy(); } -mongo::KeyString::Value mongo::makeKeyStringForSeek(SortedDataInterface* sorted, - BSONObj bsonKey, - bool isForward, - bool inclusive) { +key_string::Value makeKeyStringForSeek(SortedDataInterface* sorted, + BSONObj bsonKey, + bool isForward, + bool inclusive) { BSONObj finalKey = BSONObj::stripFieldNames(bsonKey); - KeyString::Builder builder(sorted->getKeyStringVersion(), - finalKey, - sorted->getOrdering(), - isForward == inclusive ? KeyString::Discriminator::kExclusiveBefore - : KeyString::Discriminator::kExclusiveAfter); + key_string::Builder builder(sorted->getKeyStringVersion(), + finalKey, + sorted->getOrdering(), + isForward == inclusive + ? key_string::Discriminator::kExclusiveBefore + : key_string::Discriminator::kExclusiveAfter); return builder.getValueCopy(); } -namespace mongo { -namespace { -std::function()> - sortedDataInterfaceHarnessFactory; -} - void registerSortedDataInterfaceHarnessHelperFactory( - std::function()> factory) { + std::function()> factory) { sortedDataInterfaceHarnessFactory = std::move(factory); } -auto newSortedDataInterfaceHarnessHelper() - -> std::unique_ptr { +auto newSortedDataInterfaceHarnessHelper() -> std::unique_ptr { return sortedDataInterfaceHarnessFactory(); } diff --git a/src/mongo/db/storage/sorted_data_interface_test_harness.h b/src/mongo/db/storage/sorted_data_interface_test_harness.h index 606f621e6dc1a..e974df579c1bf 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_harness.h +++ b/src/mongo/db/storage/sorted_data_interface_test_harness.h @@ -29,13 +29,23 @@ #pragma once +#include +#include +#include #include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/jsobj.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/db/operation_context.h" #include "mongo/db/record_id.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/key_string.h" #include "mongo/db/storage/sorted_data_interface.h" #include "mongo/db/storage/test_harness_helper.h" @@ -111,14 +121,14 @@ void registerSortedDataInterfaceHarnessHelperFactory( std::unique_ptr newSortedDataInterfaceHarnessHelper(); -KeyString::Value makeKeyString(SortedDataInterface* sorted, - BSONObj bsonKey, - const boost::optional& rid = boost::none); +key_string::Value makeKeyString(SortedDataInterface* sorted, + BSONObj bsonKey, + const boost::optional& rid = boost::none); -KeyString::Value makeKeyStringForSeek(SortedDataInterface* sorted, - BSONObj bsonKey, - bool isForward, - bool inclusive); +key_string::Value makeKeyStringForSeek(SortedDataInterface* sorted, + BSONObj bsonKey, + bool isForward, + bool inclusive); /** * Inserts all entries in toInsert into index. @@ -131,13 +141,6 @@ void insertToIndex(OperationContext* opCtx, SortedDataInterface* index, std::initializer_list toInsert); -inline void insertToIndex(HarnessHelper* harness, - SortedDataInterface* index, - std::initializer_list toInsert) { - auto client = harness->serviceContext()->makeClient("insertToIndex"); - insertToIndex(harness->newOperationContext(client.get()).get(), index, toInsert); -} - /** * Removes all entries in toRemove from index. * Always uses dupsAllowed=true. diff --git a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp index d86936fbee3a3..c9151ad716d9e 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_insert.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_insert.cpp @@ -27,14 +27,30 @@ * it in the license file. */ -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" #include "mongo/db/record_id_helpers.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_format.h" #include "mongo/db/storage/key_string.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" namespace mongo { namespace { @@ -52,6 +68,7 @@ TEST(SortedDataInterface, Insert) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -61,6 +78,7 @@ TEST(SortedDataInterface, Insert) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); @@ -84,6 +102,7 @@ TEST(SortedDataInterface, InsertKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), keyString1, true)); @@ -93,6 +112,7 @@ TEST(SortedDataInterface, InsertKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); @@ -109,11 +129,13 @@ TEST(SortedDataInterface, InsertCompoundKey) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT(sorted->isEmpty(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -124,6 +146,7 @@ TEST(SortedDataInterface, InsertCompoundKey) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } } @@ -143,6 +166,7 @@ TEST(SortedDataInterface, InsertSameDiskLoc) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -153,11 +177,13 @@ TEST(SortedDataInterface, InsertSameDiskLoc) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key3, loc1), true)); @@ -167,6 +193,7 @@ TEST(SortedDataInterface, InsertSameDiskLoc) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(3, sorted->numEntries(opCtx.get())); } } @@ -186,6 +213,7 @@ TEST(SortedDataInterface, InsertSameDiskLocWithDupsAllowed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), false)); @@ -197,11 +225,13 @@ TEST(SortedDataInterface, InsertSameDiskLocWithDupsAllowed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -212,6 +242,7 @@ TEST(SortedDataInterface, InsertSameDiskLocWithDupsAllowed) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(3, sorted->numEntries(opCtx.get())); } } @@ -230,6 +261,7 @@ TEST(SortedDataInterface, InsertSameKey) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), false)); @@ -242,6 +274,7 @@ TEST(SortedDataInterface, InsertSameKey) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); @@ -251,6 +284,7 @@ TEST(SortedDataInterface, InsertSameKey) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_NOT_OK( @@ -261,6 +295,7 @@ TEST(SortedDataInterface, InsertSameKey) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); @@ -278,9 +313,9 @@ TEST(SortedDataInterface, InsertSameKeyString) { const std::unique_ptr sorted( harnessHelper->newSortedDataInterface(/*unique=*/true, /*partial=*/false)); - KeyString::Builder keyStringLoc1( + key_string::Builder keyStringLoc1( sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc1); - KeyString::Builder keyStringLoc2( + key_string::Builder keyStringLoc2( sorted->getKeyStringVersion(), key1, sorted->getOrdering(), loc2); { @@ -290,6 +325,7 @@ TEST(SortedDataInterface, InsertSameKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), keyStringLoc1.getValueCopy(), false)); @@ -300,6 +336,7 @@ TEST(SortedDataInterface, InsertSameKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); @@ -309,6 +346,7 @@ TEST(SortedDataInterface, InsertSameKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_NOT_OK(sorted->insert(opCtx.get(), keyStringLoc2.getValueCopy(), false)); @@ -318,6 +356,7 @@ TEST(SortedDataInterface, InsertSameKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); @@ -326,13 +365,13 @@ TEST(SortedDataInterface, InsertSameKeyString) { } } -namespace { - -// Insert the same key multiple times and verify that all entries exists -// in the index when duplicates are allowed. Since it is illegal to open a cursor to an unique -// index while the unique constraint is violated, this is tested by running the test 3 times, -// removing all but one loc each time and verifying the correct loc remains. -void _testInsertSameKeyWithDupsAllowed(const RecordId locs[3]) { +/** + * Insert the same key multiple times and verify that all entries exists in the index when + * duplicates are allowed. Since it is illegal to open a cursor to an unique index while the unique + * constraint is violated, this is tested by running the test 3 times, removing all but one loc each + * time and verifying the correct loc remains. + */ +void testInsertSameKeyWithDupsAllowed(const RecordId locs[3]) { for (int keeper = 0; keeper < 3; keeper++) { const auto harnessHelper(newSortedDataInterfaceHarnessHelper()); const std::unique_ptr sorted( @@ -347,6 +386,7 @@ void _testInsertSameKeyWithDupsAllowed(const RecordId locs[3]) { { const ServiceContext::UniqueOperationContext opCtx( harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK( @@ -362,6 +402,7 @@ void _testInsertSameKeyWithDupsAllowed(const RecordId locs[3]) { { const ServiceContext::UniqueOperationContext opCtx( harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); for (int i = 0; i < 3; i++) { @@ -377,6 +418,7 @@ void _testInsertSameKeyWithDupsAllowed(const RecordId locs[3]) { { const ServiceContext::UniqueOperationContext opCtx( harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); const std::unique_ptr cursor( @@ -386,21 +428,19 @@ void _testInsertSameKeyWithDupsAllowed(const RecordId locs[3]) { } } } - -} // namespace - TEST(SortedDataInterface, InsertSameKeyWithDupsAllowedLocsAscending) { const RecordId locs[3] = {loc1, loc2, loc3}; - _testInsertSameKeyWithDupsAllowed(locs); + testInsertSameKeyWithDupsAllowed(locs); } - TEST(SortedDataInterface, InsertSameKeyWithDupsAllowedLocsDescending) { const RecordId locs[3] = {loc3, loc2, loc1}; - _testInsertSameKeyWithDupsAllowed(locs); + testInsertSameKeyWithDupsAllowed(locs); } -// Insert multiple keys and verify that the number of entries -// in the index equals the number that were inserted. +/** + * Insert multiple keys and verify that the number of entries in the index equals the number that + * were inserted. + */ TEST(SortedDataInterface, InsertMultiple) { const auto harnessHelper(newSortedDataInterfaceHarnessHelper()); const std::unique_ptr sorted( @@ -413,6 +453,7 @@ TEST(SortedDataInterface, InsertMultiple) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), false)); @@ -423,6 +464,7 @@ TEST(SortedDataInterface, InsertMultiple) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); @@ -434,6 +476,7 @@ TEST(SortedDataInterface, InsertMultiple) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key3, loc3), false)); @@ -443,6 +486,7 @@ TEST(SortedDataInterface, InsertMultiple) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(3, sorted->numEntries(opCtx.get())); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); @@ -455,7 +499,7 @@ TEST(SortedDataInterface, InsertMultiple) { } } -/* +/** * Insert multiple KeyStrings and verify that the number of entries in the index equals the number * that were inserted. */ @@ -475,6 +519,7 @@ TEST(SortedDataInterface, InsertMultipleKeyStrings) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), keyString1, false)); @@ -485,6 +530,7 @@ TEST(SortedDataInterface, InsertMultipleKeyStrings) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); @@ -496,6 +542,7 @@ TEST(SortedDataInterface, InsertMultipleKeyStrings) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), keyString3, false)); @@ -505,6 +552,7 @@ TEST(SortedDataInterface, InsertMultipleKeyStrings) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(3, sorted->numEntries(opCtx.get())); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); @@ -517,7 +565,7 @@ TEST(SortedDataInterface, InsertMultipleKeyStrings) { } } -/* +/** * Insert multiple KeyStrings and seek to the inserted KeyStrings */ TEST(SortedDataInterface, InsertAndSeekKeyString) { @@ -538,6 +586,7 @@ TEST(SortedDataInterface, InsertAndSeekKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), keyString1, false)); @@ -548,6 +597,7 @@ TEST(SortedDataInterface, InsertAndSeekKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); const std::unique_ptr cursor(sorted->newCursor(opCtx.get())); @@ -562,7 +612,7 @@ TEST(SortedDataInterface, InsertAndSeekKeyString) { } } -/* +/** * Insert multiple KeyStrings and use findLoc on the inserted KeyStrings. */ TEST(SortedDataInterface, InsertAndSeekExactKeyString) { @@ -583,6 +633,7 @@ TEST(SortedDataInterface, InsertAndSeekExactKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), keyString1, false)); @@ -593,6 +644,7 @@ TEST(SortedDataInterface, InsertAndSeekExactKeyString) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); ASSERT_EQ(loc1, sorted->findLoc(opCtx.get(), keyString1WithoutRecordId)); @@ -600,8 +652,10 @@ TEST(SortedDataInterface, InsertAndSeekExactKeyString) { } } -// Insert multiple compound keys and verify that the number of entries -// in the index equals the number that were inserted. +/** + * Insert multiple compound keys and verify that the number of entries in the index equals the + * number that were inserted. + */ TEST(SortedDataInterface, InsertMultipleCompoundKeys) { const auto harnessHelper(newSortedDataInterfaceHarnessHelper()); const std::unique_ptr sorted( @@ -614,6 +668,7 @@ TEST(SortedDataInterface, InsertMultipleCompoundKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -628,11 +683,13 @@ TEST(SortedDataInterface, InsertMultipleCompoundKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(3, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -645,6 +702,7 @@ TEST(SortedDataInterface, InsertMultipleCompoundKeys) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(5, sorted->numEntries(opCtx.get())); } } @@ -654,6 +712,8 @@ TEST(SortedDataInterface, InsertReservedRecordIdLong) { const std::unique_ptr sorted( harnessHelper->newSortedDataInterface(/*unique=*/false, /*partial=*/false)); const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + ASSERT(sorted->isEmpty(opCtx.get())); WriteUnitOfWork uow(opCtx.get()); RecordId reservedLoc(record_id_helpers::reservedIdFor( @@ -673,6 +733,7 @@ TEST(SortedDataInterface, InsertReservedRecordIdIntoUniqueIndex) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); constexpr char reservation[] = { static_cast(0xFF), @@ -689,6 +750,7 @@ TEST(SortedDataInterface, InsertReservedRecordIdIntoUniqueIndex) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); // There is only one reserved RecordId, kWildcardMultikeyMetadataId. In order to test that @@ -717,6 +779,7 @@ TEST(SortedDataInterface, InsertWithDups1) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -727,6 +790,7 @@ TEST(SortedDataInterface, InsertWithDups1) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -737,6 +801,7 @@ TEST(SortedDataInterface, InsertWithDups1) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } } @@ -748,6 +813,7 @@ TEST(SortedDataInterface, InsertWithDups2) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -758,6 +824,7 @@ TEST(SortedDataInterface, InsertWithDups2) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -768,6 +835,7 @@ TEST(SortedDataInterface, InsertWithDups2) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } } @@ -779,6 +847,7 @@ TEST(SortedDataInterface, InsertWithDups3AndRollback) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -789,6 +858,7 @@ TEST(SortedDataInterface, InsertWithDups3AndRollback) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -799,6 +869,7 @@ TEST(SortedDataInterface, InsertWithDups3AndRollback) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } } @@ -810,6 +881,7 @@ TEST(SortedDataInterface, InsertNoDups1) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -820,6 +892,7 @@ TEST(SortedDataInterface, InsertNoDups1) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -830,6 +903,7 @@ TEST(SortedDataInterface, InsertNoDups1) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } } @@ -841,6 +915,7 @@ TEST(SortedDataInterface, InsertNoDups2) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -851,6 +926,7 @@ TEST(SortedDataInterface, InsertNoDups2) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_NOT_OK(sorted->insert( @@ -861,6 +937,7 @@ TEST(SortedDataInterface, InsertNoDups2) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } } @@ -872,6 +949,7 @@ TEST(SortedDataInterface, InsertNoDups3) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -882,6 +960,7 @@ TEST(SortedDataInterface, InsertNoDups3) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_NOT_OK(sorted->insert( @@ -892,6 +971,7 @@ TEST(SortedDataInterface, InsertNoDups3) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } } diff --git a/src/mongo/db/storage/sorted_data_interface_test_isempty.cpp b/src/mongo/db/storage/sorted_data_interface_test_isempty.cpp index 220f5661e2f6c..41ecda013a356 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_isempty.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_isempty.cpp @@ -27,12 +27,19 @@ * it in the license file. */ -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include #include +#include "mongo/base/string_data.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -52,6 +59,7 @@ TEST(SortedDataInterface, IsEmpty) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), false)); @@ -68,6 +76,7 @@ TEST(SortedDataInterface, IsEmpty) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), false); diff --git a/src/mongo/db/storage/sorted_data_interface_test_keyformat_string.cpp b/src/mongo/db/storage/sorted_data_interface_test_keyformat_string.cpp index 0cb2604d2258a..13ac2de4e6968 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_keyformat_string.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_keyformat_string.cpp @@ -27,14 +27,29 @@ * it in the license file. */ -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include +#include +#include +#include #include - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" #include "mongo/db/record_id_helpers.h" -#include "mongo/db/storage/key_string.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_format.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" namespace mongo { namespace { @@ -44,6 +59,8 @@ TEST(SortedDataInterface, KeyFormatStringInsertDuplicates) { const std::unique_ptr sorted(harnessHelper->newSortedDataInterface( /*unique=*/false, /*partial=*/false, KeyFormat::String)); const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + ASSERT(sorted->isEmpty(opCtx.get())); char buf1[12]; @@ -115,6 +132,8 @@ TEST(SortedDataInterface, KeyFormatStringUniqueInsertRemoveDuplicates) { const std::unique_ptr sorted(harnessHelper->newSortedDataInterface( /*unique=*/true, /*partial=*/false, KeyFormat::String)); const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + ASSERT(sorted->isEmpty(opCtx.get())); std::string buf1(12, 0); @@ -199,6 +218,8 @@ TEST(SortedDataInterface, KeyFormatStringSetEndPosition) { const std::unique_ptr sorted(harnessHelper->newSortedDataInterface( /*unique=*/false, /*partial=*/false, KeyFormat::String)); const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + ASSERT(sorted->isEmpty(opCtx.get())); char buf1[12]; @@ -268,6 +289,8 @@ TEST(SortedDataInterface, KeyFormatStringUnindex) { const std::unique_ptr sorted(harnessHelper->newSortedDataInterface( /*unique=*/false, /*partial=*/false, KeyFormat::String)); const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + ASSERT(sorted->isEmpty(opCtx.get())); char buf1[12]; @@ -317,6 +340,8 @@ TEST(SortedDataInterface, KeyFormatStringUniqueUnindex) { const std::unique_ptr sorted(harnessHelper->newSortedDataInterface( /*unique=*/true, /*partial=*/false, KeyFormat::String)); const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + ASSERT(sorted->isEmpty(opCtx.get())); std::string buf1(12, 0); @@ -369,6 +394,8 @@ TEST(SortedDataInterface, InsertReservedRecordIdStr) { const std::unique_ptr sorted(harnessHelper->newSortedDataInterface( /*unique=*/false, /*partial=*/false, KeyFormat::String)); const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + ASSERT(sorted->isEmpty(opCtx.get())); WriteUnitOfWork uow(opCtx.get()); RecordId reservedLoc(record_id_helpers::reservedIdFor( @@ -406,6 +433,7 @@ TEST(SortedDataInterface, BuilderAddKeyWithReservedRecordIdStr) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } } diff --git a/src/mongo/db/storage/sorted_data_interface_test_rollback.cpp b/src/mongo/db/storage/sorted_data_interface_test_rollback.cpp index a633fa1e2e07b..7157f1c58fdb3 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_rollback.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_rollback.cpp @@ -27,12 +27,19 @@ * it in the license file. */ -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include #include +#include "mongo/base/string_data.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -51,6 +58,7 @@ TEST(SortedDataInterface, InsertWithoutCommit) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), false)); @@ -65,6 +73,7 @@ TEST(SortedDataInterface, InsertWithoutCommit) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key2, loc1), false)); @@ -94,6 +103,7 @@ TEST(SortedDataInterface, UnindexWithoutCommit) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -104,11 +114,13 @@ TEST(SortedDataInterface, UnindexWithoutCommit) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), makeKeyString(sorted.get(), key2, loc2), true); @@ -119,11 +131,13 @@ TEST(SortedDataInterface, UnindexWithoutCommit) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key3, loc3), true)); @@ -133,11 +147,13 @@ TEST(SortedDataInterface, UnindexWithoutCommit) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(3, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true); @@ -150,6 +166,7 @@ TEST(SortedDataInterface, UnindexWithoutCommit) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(3, sorted->numEntries(opCtx.get())); } } diff --git a/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp b/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp index 3aeda2442f22d..51f81559f5f7e 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_spaceused.cpp @@ -27,12 +27,22 @@ * it in the license file. */ -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -63,6 +73,7 @@ TEST(SortedDataInterface, GetSpaceUsedBytesNonEmpty) { int nToInsert = 10; for (int i = 0; i < nToInsert; i++) { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); BSONObj key = BSON("" << i); @@ -74,6 +85,7 @@ TEST(SortedDataInterface, GetSpaceUsedBytesNonEmpty) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(nToInsert, sorted->numEntries(opCtx.get())); } } diff --git a/src/mongo/db/storage/sorted_data_interface_test_unindex.cpp b/src/mongo/db/storage/sorted_data_interface_test_unindex.cpp index b866cc812d649..b4ad31a9ee053 100644 --- a/src/mongo/db/storage/sorted_data_interface_test_unindex.cpp +++ b/src/mongo/db/storage/sorted_data_interface_test_unindex.cpp @@ -27,17 +27,28 @@ * it in the license file. */ -#include "mongo/db/storage/sorted_data_interface_test_harness.h" - +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/sorted_data_interface.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { -// Insert a key and verify that it can be unindexed. +/** + * Insert a key and verify that it can be unindexed. + */ void unindex(bool partial) { const auto harnessHelper(newSortedDataInterfaceHarnessHelper()); const std::unique_ptr sorted( @@ -50,6 +61,7 @@ void unindex(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -59,11 +71,13 @@ void unindex(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true); @@ -77,16 +91,14 @@ void unindex(bool partial) { ASSERT(sorted->isEmpty(opCtx.get())); } } - TEST(SortedDataInterface, Unindex) { unindex(false); } - TEST(SortedDataInterface, UnindexPartial) { unindex(true); } -/* +/** * Insert a KeyString and verify that it can be unindexed. */ void unindexKeyString(bool partial) { @@ -103,6 +115,7 @@ void unindexKeyString(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), keyString1, true)); @@ -112,11 +125,13 @@ void unindexKeyString(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), keyString1, true); @@ -130,16 +145,16 @@ void unindexKeyString(bool partial) { ASSERT(sorted->isEmpty(opCtx.get())); } } - TEST(SortedDataInterface, UnindexKeyString) { unindexKeyString(false); } - TEST(SortedDataInterface, UnindexKeyStringPartial) { unindexKeyString(true); } -// Insert a compound key and verify that it can be unindexed. +/** + * Insert a compound key and verify that it can be unindexed. + */ void unindexCompoundKey(bool partial) { const auto harnessHelper(newSortedDataInterfaceHarnessHelper()); const std::unique_ptr sorted( @@ -152,6 +167,7 @@ void unindexCompoundKey(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -162,11 +178,13 @@ void unindexCompoundKey(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), makeKeyString(sorted.get(), compoundKey1a, loc1), true); @@ -180,16 +198,16 @@ void unindexCompoundKey(bool partial) { ASSERT(sorted->isEmpty(opCtx.get())); } } - TEST(SortedDataInterface, UnindexCompoundKey) { unindexCompoundKey(false); } - TEST(SortedDataInterface, UnindexCompoundKeyPartial) { unindexCompoundKey(true); } -// Insert multiple, distinct keys and verify that they can be unindexed. +/** + * Insert multiple, distinct keys and verify that they can be unindexed. + */ void unindexMultipleDistinct(bool partial) { const auto harnessHelper(newSortedDataInterfaceHarnessHelper()); const std::unique_ptr sorted( @@ -202,6 +220,7 @@ void unindexMultipleDistinct(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -212,11 +231,13 @@ void unindexMultipleDistinct(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), makeKeyString(sorted.get(), key2, loc2), true); @@ -227,11 +248,13 @@ void unindexMultipleDistinct(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key3, loc3), true)); @@ -241,11 +264,13 @@ void unindexMultipleDistinct(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true); @@ -261,16 +286,16 @@ void unindexMultipleDistinct(bool partial) { ASSERT(sorted->isEmpty(opCtx.get())); } } - TEST(SortedDataInterface, UnindexMultipleDistinct) { unindexMultipleDistinct(false); } - TEST(SortedDataInterface, UnindexMultipleDistinctPartial) { unindexMultipleDistinct(true); } -// Insert the same key multiple times and verify that each occurrence can be unindexed. +/** + * Insert the same key multiple times and verify that each occurrence can be unindexed. + */ void unindexMultipleSameKey(bool partial) { const auto harnessHelper(newSortedDataInterfaceHarnessHelper()); const std::unique_ptr sorted( @@ -283,6 +308,7 @@ void unindexMultipleSameKey(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true)); @@ -294,11 +320,13 @@ void unindexMultipleSameKey(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), makeKeyString(sorted.get(), key1, loc2), true); @@ -309,11 +337,13 @@ void unindexMultipleSameKey(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -324,11 +354,13 @@ void unindexMultipleSameKey(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true); @@ -344,17 +376,14 @@ void unindexMultipleSameKey(bool partial) { ASSERT(sorted->isEmpty(opCtx.get())); } } - - TEST(SortedDataInterface, UnindexMultipleSameKey) { unindexMultipleSameKey(false); } - TEST(SortedDataInterface, UnindexMultipleSameKeyPartial) { unindexMultipleSameKey(true); } -/* +/** * Insert the same KeyString multiple times and verify that each occurrence can be unindexed. */ void unindexMultipleSameKeyString(bool partial) { @@ -373,6 +402,7 @@ void unindexMultipleSameKeyString(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), keyStringLoc1, true)); @@ -383,11 +413,13 @@ void unindexMultipleSameKeyString(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), keyStringLoc2, true); @@ -398,11 +430,13 @@ void unindexMultipleSameKeyString(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert(opCtx.get(), keyStringLoc3, true /* allow duplicates */)); @@ -412,11 +446,13 @@ void unindexMultipleSameKeyString(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(2, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), keyStringLoc1, true); @@ -432,17 +468,16 @@ void unindexMultipleSameKeyString(bool partial) { ASSERT(sorted->isEmpty(opCtx.get())); } } - - TEST(SortedDataInterface, UnindexMultipleSameKeyString) { unindexMultipleSameKeyString(false); } - TEST(SortedDataInterface, UnindexMultipleSameKeyStringPartial) { unindexMultipleSameKeyString(true); } -// Call unindex() on a nonexistent key and verify the result is false. +/** + * Call unindex() on a nonexistent key and verify the result is false. + */ void unindexEmpty(bool partial) { const auto harnessHelper(newSortedDataInterfaceHarnessHelper()); const std::unique_ptr sorted( @@ -455,6 +490,7 @@ void unindexEmpty(bool partial) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex(opCtx.get(), makeKeyString(sorted.get(), key1, loc1), true); @@ -463,16 +499,16 @@ void unindexEmpty(bool partial) { } } } - TEST(SortedDataInterface, UnindexEmpty) { unindexEmpty(false); } - TEST(SortedDataInterface, UnindexEmptyPartial) { unindexEmpty(true); } -// Test partial indexing and unindexing. +/** + * Test partial indexing and unindexing. + */ TEST(SortedDataInterface, PartialIndex) { const auto harnessHelper(newSortedDataInterfaceHarnessHelper()); const std::unique_ptr sorted( @@ -485,6 +521,7 @@ TEST(SortedDataInterface, PartialIndex) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { { WriteUnitOfWork uow(opCtx.get()); @@ -520,6 +557,7 @@ TEST(SortedDataInterface, Unindex1) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -530,11 +568,13 @@ TEST(SortedDataInterface, Unindex1) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex( @@ -546,11 +586,13 @@ TEST(SortedDataInterface, Unindex1) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex( @@ -562,12 +604,14 @@ TEST(SortedDataInterface, Unindex1) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex( @@ -590,6 +634,7 @@ TEST(SortedDataInterface, Unindex2Rollback) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); ASSERT_OK(sorted->insert( @@ -600,11 +645,13 @@ TEST(SortedDataInterface, Unindex2Rollback) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); sorted->unindex( @@ -616,6 +663,7 @@ TEST(SortedDataInterface, Unindex2Rollback) { { const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(1, sorted->numEntries(opCtx.get())); } } diff --git a/src/mongo/db/storage/storage_change_lock.cpp b/src/mongo/db/storage/storage_change_lock.cpp index 9e7c8c19f81a2..e83a40197d679 100644 --- a/src/mongo/db/storage/storage_change_lock.cpp +++ b/src/mongo/db/storage/storage_change_lock.cpp @@ -27,10 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/db/storage/storage_change_lock.h" - +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/storage/storage_change_lock.h b/src/mongo/db/storage/storage_change_lock.h index 9663c64354287..ca500c830af91 100644 --- a/src/mongo/db/storage/storage_change_lock.h +++ b/src/mongo/db/storage/storage_change_lock.h @@ -29,6 +29,8 @@ #pragma once +#include +#include #include #include "mongo/platform/atomic_word.h" diff --git a/src/mongo/db/storage/storage_engine.h b/src/mongo/db/storage/storage_engine.h index 6e132a41eaf45..e184976cd2df2 100644 --- a/src/mongo/db/storage/storage_engine.h +++ b/src/mongo/db/storage/storage_engine.h @@ -320,6 +320,10 @@ class StorageEngine { virtual ~StreamingCursor() = default; + virtual void setCatalogEntries( + const stdx::unordered_map>& + identsToNsAndUUID) = 0; + virtual StatusWith> getNextBatch(OperationContext* opCtx, std::size_t batchSize) = 0; @@ -445,7 +449,7 @@ class StorageEngine { * Clears list of drop-pending idents in the storage engine. * Used primarily by rollback after recovering to a stable timestamp. */ - virtual void clearDropPendingState() = 0; + virtual void clearDropPendingState(OperationContext* opCtx) = 0; /** * Adds 'ident' to a list of indexes/collections whose data will be dropped when: diff --git a/src/mongo/db/storage/storage_engine_change_context.cpp b/src/mongo/db/storage/storage_engine_change_context.cpp index 812ab2d3ea366..1338802145f6a 100644 --- a/src/mongo/db/storage/storage_engine_change_context.cpp +++ b/src/mongo/db/storage/storage_engine_change_context.cpp @@ -28,14 +28,22 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/storage_engine_change_context.h" +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/db/client.h" #include "mongo/db/operation_context.h" -#include "mongo/db/storage/recovery_unit_noop.h" +#include "mongo/db/operation_id.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine_change_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -112,11 +120,13 @@ StorageChangeLock::Token StorageEngineChangeContext::killOpsForStorageEngineChan StorageEngineChangeOperationContextDoneNotifier::get(opCtxToKill); doneNotifier.setNotifyWhenDone(service); ++_numOpCtxtsToWaitFor; + killedOperationId = opCtxToKill->getOpID(); } LOGV2_DEBUG(5781190, 1, "Killed OpCtx for storage change", - "killedOperationId"_attr = killedOperationId); + "killedOperationId"_attr = killedOperationId, + "client"_attr = client->desc()); } } diff --git a/src/mongo/db/storage/storage_engine_change_context.h b/src/mongo/db/storage/storage_engine_change_context.h index d758044db7473..13eb1fae1afbf 100644 --- a/src/mongo/db/storage/storage_engine_change_context.h +++ b/src/mongo/db/storage/storage_engine_change_context.h @@ -29,12 +29,14 @@ #pragma once +#include #include #include "mongo/db/operation_id.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/storage_change_lock.h" #include "mongo/db/storage/storage_engine.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/unordered_set.h" diff --git a/src/mongo/db/storage/storage_engine_feature_flags.idl b/src/mongo/db/storage/storage_engine_feature_flags.idl index d2ef38f45762a..b026215857a72 100644 --- a/src/mongo/db/storage/storage_engine_feature_flags.idl +++ b/src/mongo/db/storage/storage_engine_feature_flags.idl @@ -34,8 +34,4 @@ feature_flags: cpp_varname: feature_flags::gFeatureFlagExecutionControl default: true version: 7.0 - featureFlagDeprioritizeLowPriorityOperations: - description: Enables the deprioritization of low priority operations - cpp_varname: feature_flags::gFeatureFlagDeprioritizeLowPriorityOperations - default: true - version: 7.0 + shouldBeFCVGated: true diff --git a/src/mongo/db/storage/storage_engine_impl.cpp b/src/mongo/db/storage/storage_engine_impl.cpp index 8999c455a24bb..d684501530a5c 100644 --- a/src/mongo/db/storage/storage_engine_impl.cpp +++ b/src/mongo/db/storage/storage_engine_impl.cpp @@ -30,35 +30,63 @@ #include "mongo/db/storage/storage_engine_impl.h" +#include +#include #include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/audit.h" #include "mongo/db/catalog/catalog_control.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_catalog_helper.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/historical_catalogid_tracker.h" +#include "mongo/db/catalog/index_builds.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/global_settings.h" -#include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/multitenancy.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/operation_context.h" -#include "mongo/db/server_options.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/resumable_index_builds_gen.h" +#include "mongo/db/storage/bson_collection_catalog_entry.h" #include "mongo/db/storage/deferred_drop_record_store.h" -#include "mongo/db/storage/durable_catalog_impl.h" +#include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/durable_catalog_entry.h" #include "mongo/db/storage/durable_history_pin.h" -#include "mongo/db/storage/historical_ident_tracker.h" #include "mongo/db/storage/kv/kv_engine.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/storage_repair_observer.h" #include "mongo/db/storage/storage_util.h" -#include "mongo/db/storage/two_phase_index_build_knobs_gen.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/stdx/unordered_map.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" -#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -90,21 +118,15 @@ StorageEngineImpl::StorageEngineImpl(OperationContext* opCtx, _minOfCheckpointAndOldestTimestampListener( TimestampMonitor::TimestampType::kMinOfCheckpointAndOldest, [this](Timestamp timestamp) { _onMinOfCheckpointAndOldestTimestampChanged(timestamp); }), - _historicalIdentTimestampListener( - TimestampMonitor::TimestampType::kCheckpoint, - [serviceContext = opCtx->getServiceContext()](Timestamp timestamp) { - HistoricalIdentTracker::get(serviceContext).removeEntriesOlderThan(timestamp); - }), _collectionCatalogCleanupTimestampListener( TimestampMonitor::TimestampType::kOldest, [serviceContext = opCtx->getServiceContext()](Timestamp timestamp) { // The global lock is held by the timestamp monitor while callbacks are executed, so // there can be no batched CollectionCatalog writer and we are thus safe to write // using the service context. - if (CollectionCatalog::latest(serviceContext) - ->needsCleanupForOldestTimestamp(timestamp)) { + if (CollectionCatalog::latest(serviceContext)->catalogIdTracker().dirty(timestamp)) { CollectionCatalog::write(serviceContext, [timestamp](CollectionCatalog& catalog) { - catalog.cleanupForOldestTimestampAdvanced(timestamp); + catalog.catalogIdTracker().cleanup(timestamp); }); } }), @@ -176,7 +198,7 @@ void StorageEngineImpl::loadCatalog(OperationContext* opCtx, _dumpCatalog(opCtx); } - _catalog.reset(new DurableCatalogImpl( + _catalog.reset(new DurableCatalog( _catalogRecordStore.get(), _options.directoryPerDB, _options.directoryForIndexes, this)); _catalog->init(opCtx); @@ -214,7 +236,7 @@ void StorageEngineImpl::loadCatalog(OperationContext* opCtx, // 'local.orphan.xxxxx' for it. However, in a nonrepair context, the orphaned idents // will be dropped in reconcileCatalogAndIdents(). for (const auto& ident : identsKnownToStorageEngine) { - if (_catalog->isCollectionIdent(ident)) { + if (DurableCatalog::isCollectionIdent(ident)) { bool isOrphan = !std::any_of(catalogEntries.begin(), catalogEntries.end(), [this, &ident](DurableCatalog::EntryIdentifier entry) { @@ -285,7 +307,7 @@ void StorageEngineImpl::loadCatalog(OperationContext* opCtx, Timestamp minValidTs = stableTs ? *stableTs : Timestamp::min(); CollectionCatalog::write(opCtx, [&minValidTs](CollectionCatalog& catalog) { // Let the CollectionCatalog know that we are maintaining timestamps from minValidTs - catalog.cleanupForCatalogReopen(minValidTs); + catalog.catalogIdTracker().rollback(minValidTs); }); for (DurableCatalog::EntryIdentifier entry : catalogEntries) { if (_options.forRestore) { @@ -341,9 +363,9 @@ void StorageEngineImpl::loadCatalog(OperationContext* opCtx, if (_options.forRepair) { StorageRepairObserver::get(getGlobalServiceContext()) - ->invalidatingModification(str::stream() - << "Collection " << entry.nss.ns() - << " dropped: " << status.reason()); + ->invalidatingModification( + str::stream() << "Collection " << entry.nss.toStringForErrorMsg() + << " dropped: " << status.reason()); } wuow.commit(); continue; @@ -413,9 +435,11 @@ void StorageEngineImpl::_initCollection(OperationContext* opCtx, bool forRepair, Timestamp minVisibleTs, Timestamp minValidTs) { - auto md = _catalog->getMetaData(opCtx, catalogId); + const auto catalogEntry = _catalog->getParsedCatalogEntry(opCtx, catalogId); + const auto md = catalogEntry->metadata; uassert(ErrorCodes::MustDowngrade, - str::stream() << "Collection does not have UUID in KVCatalog. Collection: " << nss, + str::stream() << "Collection does not have UUID in KVCatalog. Collection: " + << nss.toStringForErrorMsg(), md->options.uuid); auto ident = _catalog->getEntry(catalogId).ident; @@ -432,11 +456,9 @@ void StorageEngineImpl::_initCollection(OperationContext* opCtx, auto collectionFactory = Collection::Factory::get(getGlobalServiceContext()); auto collection = collectionFactory->make(opCtx, nss, catalogId, md, std::move(rs)); - collection->setMinimumVisibleSnapshot(minVisibleTs); CollectionCatalog::write(opCtx, [&](CollectionCatalog& catalog) { - catalog.registerCollection( - opCtx, md->options.uuid.value(), std::move(collection), /*commitTime*/ minValidTs); + catalog.registerCollection(opCtx, std::move(collection), /*commitTime*/ minValidTs); }); } @@ -469,9 +491,10 @@ Status StorageEngineImpl::_recoverOrphanedCollection(OperationContext* opCtx, "ident"_attr = collectionIdent); WriteUnitOfWork wuow(opCtx); - const auto metadata = _catalog->getMetaData(opCtx, catalogId); + const auto catalogEntry = _catalog->getParsedCatalogEntry(opCtx, catalogId); + const auto md = catalogEntry->metadata; Status status = - _engine->recoverOrphanedIdent(opCtx, collectionName, collectionIdent, metadata->options); + _engine->recoverOrphanedIdent(opCtx, collectionName, collectionIdent, md->options); bool dataModified = status.code() == ErrorCodes::DataModifiedByRepair; if (!status.isOK() && !dataModified) { @@ -479,8 +502,9 @@ Status StorageEngineImpl::_recoverOrphanedCollection(OperationContext* opCtx, } if (dataModified) { StorageRepairObserver::get(getGlobalServiceContext()) - ->invalidatingModification(str::stream() << "Collection " << collectionName - << " recovered: " << status.reason()); + ->invalidatingModification(str::stream() + << "Collection " << collectionName.toStringForErrorMsg() + << " recovered: " << status.reason()); } wuow.commit(); return Status::OK(); @@ -513,7 +537,7 @@ bool StorageEngineImpl::_handleInternalIdent(OperationContext* opCtx, ReconcileResult* reconcileResult, std::set* internalIdentsToDrop, std::set* allInternalIdents) { - if (!_catalog->isInternalIdent(ident)) { + if (!DurableCatalog::isInternalIdent(ident)) { return false; } @@ -526,7 +550,7 @@ bool StorageEngineImpl::_handleInternalIdent(OperationContext* opCtx, return true; } - if (!_catalog->isResumableIndexBuildIdent(ident)) { + if (!DurableCatalog::isResumableIndexBuildIdent(ident)) { return false; } @@ -638,13 +662,13 @@ StatusWith StorageEngineImpl::reconcileCatalogAn continue; } - if (!_catalog->isUserDataIdent(it)) { + if (!DurableCatalog::isUserDataIdent(it)) { continue; } // In repair context, any orphaned collection idents from the engine should already be // recovered in the catalog in loadCatalog(). - invariant(!(_catalog->isCollectionIdent(it) && _options.forRepair)); + invariant(!(DurableCatalog::isCollectionIdent(it) && _options.forRepair)); // Leave drop-pending idents alone. // These idents have to be retained as long as the corresponding drops are not part of a @@ -657,11 +681,7 @@ StatusWith StorageEngineImpl::reconcileCatalogAn } const auto& toRemove = it; - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - Timestamp identDropTs = - feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe() - ? stableTs - : Timestamp::min(); + const Timestamp identDropTs = stableTs; LOGV2(22251, "Dropping unknown ident", "ident"_attr = toRemove, "ts"_attr = identDropTs); if (!identDropTs.isNull()) { addDropPendingIdent(identDropTs, std::make_shared(toRemove), /*onDrop=*/nullptr); @@ -688,8 +708,9 @@ StatusWith StorageEngineImpl::reconcileCatalogAn for (const DurableCatalog::EntryIdentifier& entry : catalogEntries) { if (engineIdents.find(entry.ident) == engineIdents.end()) { return {ErrorCodes::UnrecoverableRollbackError, - str::stream() << "Expected collection does not exist. Collection: " - << entry.nss.ns() << " Ident: " << entry.ident}; + str::stream() + << "Expected collection does not exist. Collection: " + << entry.nss.toStringForErrorMsg() << " Ident: " << entry.ident}; } } } @@ -700,13 +721,12 @@ StatusWith StorageEngineImpl::reconcileCatalogAn // Also, remove unfinished builds except those that were background index builds started on a // secondary. for (const DurableCatalog::EntryIdentifier& entry : catalogEntries) { - std::shared_ptr metaData = - _catalog->getMetaData(opCtx, entry.catalogId); - NamespaceString nss(metaData->nss); + const auto catalogEntry = _catalog->getParsedCatalogEntry(opCtx, entry.catalogId); + const auto md = catalogEntry->metadata; // Batch up the indexes to remove them from `metaData` outside of the iterator. std::vector indexesToDrop; - for (const auto& indexMetaData : metaData->indexes) { + for (const auto& indexMetaData : md->indexes) { auto indexName = indexMetaData.nameStringData(); auto indexIdent = _catalog->getIndexIdent(opCtx, entry.catalogId, indexName); @@ -730,7 +750,7 @@ StatusWith StorageEngineImpl::reconcileCatalogAn "the index, or rerunning with the --repair option. See " "http://dochub.mongodb.org/core/repair for more information", "index"_attr = indexName, - logAttrs(nss)); + logAttrs(md->nss)); } if (!engineIdents.count(indexIdent)) { @@ -744,7 +764,7 @@ StatusWith StorageEngineImpl::reconcileCatalogAn "Index catalog entry ident not found", "ident"_attr = indexIdent, "entry"_attr = indexMetaData.spec, - logAttrs(nss)); + logAttrs(md->nss)); } // Any index build with a UUID is an unfinished two-phase build and must be restarted. @@ -754,13 +774,13 @@ StatusWith StorageEngineImpl::reconcileCatalogAn if (indexMetaData.buildUUID) { invariant(!indexMetaData.ready); - auto collUUID = metaData->options.uuid; + auto collUUID = md->options.uuid; invariant(collUUID); auto buildUUID = *indexMetaData.buildUUID; LOGV2(22253, "Found index from unfinished build", - logAttrs(nss), + logAttrs(md->nss), "uuid"_attr = *collUUID, "index"_attr = indexName, "buildUUID"_attr = buildUUID); @@ -784,10 +804,10 @@ StatusWith StorageEngineImpl::reconcileCatalogAn LOGV2(22255, "Expected background index build did not complete, rebuilding in foreground " "- see SERVER-43097", - logAttrs(nss), + logAttrs(md->nss), "index"_attr = indexName); reconcileResult.indexesToRebuild.push_back( - {entry.catalogId, nss, indexName.toString()}); + {entry.catalogId, md->nss, indexName.toString()}); continue; } @@ -802,7 +822,10 @@ StatusWith StorageEngineImpl::reconcileCatalogAn invariant(!indexMetaData.isBackgroundSecondaryBuild); invariant(!getGlobalReplSettings().usingReplSets()); - LOGV2(22256, "Dropping unfinished index", logAttrs(nss), "index"_attr = indexName); + LOGV2(22256, + "Dropping unfinished index", + logAttrs(md->nss), + "index"_attr = indexName); // Ensure the `ident` is dropped while we have the `indexIdent` value. Status status = _engine->dropIdent(opCtx->recoveryUnit(), indexIdent); if (!status.isOK()) { @@ -818,9 +841,9 @@ StatusWith StorageEngineImpl::reconcileCatalogAn } for (auto&& indexName : indexesToDrop) { - invariant(metaData->eraseIndex(indexName), - str::stream() << "Index is missing. Collection: " << nss.ns() - << " Index: " << indexName); + invariant(md->eraseIndex(indexName), + str::stream() << "Index is missing. Collection: " + << md->nss.toStringForErrorMsg() << " Index: " << indexName); } if (indexesToDrop.size() > 0) { WriteUnitOfWork wuow(opCtx); @@ -828,7 +851,7 @@ StatusWith StorageEngineImpl::reconcileCatalogAn CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForMetadataWrite( opCtx, entry.nss); invariant(collection->getCatalogId() == entry.catalogId); - collection->replaceMetadata(opCtx, std::move(metaData)); + collection->replaceMetadata(opCtx, std::move(md)); wuow.commit(); } } @@ -855,7 +878,7 @@ StatusWith StorageEngineImpl::reconcileCatalogAn } std::string StorageEngineImpl::getFilesystemPathForDb(const DatabaseName& dbName) const { - return _catalog->getFilesystemPathForDb(dbName.toString()); + return _catalog->getFilesystemPathForDb(DatabaseNameUtil::serializeForCatalog(dbName)); } void StorageEngineImpl::cleanShutdown(ServiceContext* svcCtx) { @@ -887,7 +910,6 @@ void StorageEngineImpl::startTimestampMonitor() { _engine.get(), getGlobalServiceContext()->getPeriodicRunner()); _timestampMonitor->addListener(&_minOfCheckpointAndOldestTimestampListener); - _timestampMonitor->addListener(&_historicalIdentTimestampListener); _timestampMonitor->addListener(&_collectionCatalogCleanupTimestampListener); } @@ -936,22 +958,9 @@ Status StorageEngineImpl::_dropCollections(OperationContext* opCtx, for (auto& uuid : toDrop) { auto coll = collectionCatalog->lookupCollectionByUUIDForMetadataWrite(opCtx, uuid); - // No need to remove the indexes from the IndexCatalog because eliminating the Collection - // will have the same effect. - auto ii = coll->getIndexCatalog()->getIndexIterator( - opCtx, - IndexCatalog::InclusionPolicy::kReady | IndexCatalog::InclusionPolicy::kUnfinished | - IndexCatalog::InclusionPolicy::kFrozen); - while (ii->more()) { - const IndexCatalogEntry* ice = ii->next(); - - audit::logDropIndex(opCtx->getClient(), ice->descriptor()->indexName(), coll->ns()); - - catalog::removeIndex(opCtx, - ice->descriptor()->indexName(), - coll, - coll->getIndexCatalog()->getEntryShared(ice->descriptor())); - } + // Drop all indexes in the collection. + coll->getIndexCatalog()->dropAllIndexes( + opCtx, coll, /*includingIdIndex=*/true, /*onDropFn=*/{}); audit::logDropCollection(opCtx->getClient(), coll->ns()); @@ -1036,8 +1045,8 @@ Status StorageEngineImpl::repairRecordStore(OperationContext* opCtx, } if (dataModified) { - repairObserver->invalidatingModification(str::stream() << "Collection " << nss << ": " - << status.reason()); + repairObserver->invalidatingModification( + str::stream() << "Collection " << nss.toStringForErrorMsg() << ": " << status.reason()); } // After repairing, re-initialize the collection with a valid RecordStore. @@ -1178,8 +1187,8 @@ bool StorageEngineImpl::supportsPendingDrops() const { return supportsReadConcernMajority(); } -void StorageEngineImpl::clearDropPendingState() { - _dropPendingIdentReaper.clearDropPendingState(); +void StorageEngineImpl::clearDropPendingState(OperationContext* opCtx) { + _dropPendingIdentReaper.clearDropPendingState(opCtx); } Timestamp StorageEngineImpl::getAllDurableTimestamp() const { @@ -1235,7 +1244,22 @@ void StorageEngineImpl::checkpoint(OperationContext* opCtx) { void StorageEngineImpl::_onMinOfCheckpointAndOldestTimestampChanged(const Timestamp& timestamp) { // No drop-pending idents present if getEarliestDropTimestamp() returns boost::none. if (auto earliestDropTimestamp = _dropPendingIdentReaper.getEarliestDropTimestamp()) { - if (timestamp >= *earliestDropTimestamp) { + + auto checkpoint = _engine->getCheckpointTimestamp(); + auto oldest = _engine->getOldestTimestamp(); + + // We won't try to drop anything unless we know it is both safe to drop (older than the + // oldest timestamp) and present in a checkpoint for non-ephemeral storage engines. + // Otherwise, the drop will fail, and we'll keep attempting a drop for each new `timestamp`. + // Note that this is not required for correctness and is only done to avoid unnecessary work + // and spamming the logs when we actually have nothing to do. Additionally, these values may + // have both advanced since `timestamp` was calculated, but this is not expected to be + // common and does not affect correctness. + // For ephemeral storage engines, we can always drop immediately. + const bool safeToDrop = oldest >= *earliestDropTimestamp; + const bool canDropWithoutTransientErrors = + isEphemeral() || checkpoint >= *earliestDropTimestamp; + if (safeToDrop && canDropWithoutTransientErrors) { LOGV2(22260, "Removing drop-pending idents with drop timestamps before timestamp", "timestamp"_attr = timestamp); @@ -1345,7 +1369,9 @@ void StorageEngineImpl::TimestampMonitor::_startup() { throw; } }, - Seconds(1)); + Seconds(1), + // TODO(SERVER-74657): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/); _job = _periodicRunner->makeJob(std::move(job)); _job.start(); @@ -1392,9 +1418,8 @@ int64_t StorageEngineImpl::sizeOnDiskForDb(OperationContext* opCtx, const Databa if (opCtx->isLockFreeReadsOp()) { auto collectionCatalog = CollectionCatalog::get(opCtx); - for (auto it = collectionCatalog->begin(opCtx, dbName); it != collectionCatalog->end(opCtx); - ++it) { - perCollectionWork(*it); + for (auto&& coll : collectionCatalog->range(dbName)) { + perCollectionWork(coll); } } else { catalog::forEachCollectionFromDb(opCtx, dbName, MODE_IS, perCollectionWork); diff --git a/src/mongo/db/storage/storage_engine_impl.h b/src/mongo/db/storage/storage_engine_impl.h index 1af6b32a8b9f4..2de205f903aee 100644 --- a/src/mongo/db/storage/storage_engine_impl.h +++ b/src/mongo/db/storage/storage_engine_impl.h @@ -29,28 +29,49 @@ #pragma once +#include +#include +#include +#include +#include #include +#include #include #include +#include #include +#include +#include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/timestamp.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/ident.h" #include "mongo/db/storage/journal_listener.h" +#include "mongo/db/storage/key_format.h" #include "mongo/db/storage/kv/kv_drop_pending_ident_reaper.h" +#include "mongo/db/storage/kv/kv_engine.h" #include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_interface.h" #include "mongo/db/storage/temporary_record_store.h" +#include "mongo/db/tenant_id.h" #include "mongo/platform/mutex.h" #include "mongo/util/periodic_runner.h" +#include "mongo/util/uuid.h" namespace mongo { -class DurableCatalogImpl; +class DurableCatalog; class KVEngine; struct StorageEngineOptions { @@ -160,7 +181,7 @@ class StorageEngineImpl final : public StorageEngineInterface, public StorageEng bool supportsPendingDrops() const final; - void clearDropPendingState() final; + void clearDropPendingState(OperationContext* opCtx) final; SnapshotManager* getSnapshotManager() const final; @@ -335,7 +356,7 @@ class StorageEngineImpl final : public StorageEngineInterface, public StorageEng const DurableCatalog* getCatalog() const override; /** - * When loading after an unclean shutdown, this performs cleanup on the DurableCatalogImpl. + * When loading after an unclean shutdown, this performs cleanup on the DurableCatalog. */ void loadCatalog(OperationContext* opCtx, boost::optional stableTs, @@ -393,7 +414,7 @@ class StorageEngineImpl final : public StorageEngineInterface, public StorageEng /** * When called in a repair context (_options.forRepair=true), attempts to recover a collection - * whose entry is present in the DurableCatalogImpl, but missing from the KVEngine. Returns an + * whose entry is present in the DurableCatalog, but missing from the KVEngine. Returns an * error Status if called outside of a repair context or the implementation of * KVEngine::recoverOrphanedIdent returns an error other than DataModifiedByRepair. * @@ -446,17 +467,13 @@ class StorageEngineImpl final : public StorageEngineInterface, public StorageEng // Listener for min of checkpoint and oldest timestamp changes. TimestampMonitor::TimestampListener _minOfCheckpointAndOldestTimestampListener; - // Listener for checkpoint timestamp changes to remove historical ident entries older than the - // checkpoint timestamp. - TimestampMonitor::TimestampListener _historicalIdentTimestampListener; - // Listener for cleanup of CollectionCatalog when oldest timestamp advances. TimestampMonitor::TimestampListener _collectionCatalogCleanupTimestampListener; const bool _supportsCappedCollections; std::unique_ptr _catalogRecordStore; - std::unique_ptr _catalog; + std::unique_ptr _catalog; // Flag variable that states if the storage engine is in backup mode. bool _inBackupMode = false; diff --git a/src/mongo/db/storage/storage_engine_init.cpp b/src/mongo/db/storage/storage_engine_init.cpp index b6c9e87b6b408..e48af21b71881 100644 --- a/src/mongo/db/storage/storage_engine_init.cpp +++ b/src/mongo/db/storage/storage_engine_init.cpp @@ -27,40 +27,53 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - #include "mongo/db/storage/storage_engine_init.h" +#include #include -#include - -#include "mongo/base/init.h" +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/operation_context.h" #include "mongo/db/storage/control/storage_control.h" +#include "mongo/db/storage/execution_control/concurrency_adjustment_parameters_gen.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/recovery_unit_noop.h" #include "mongo/db/storage/storage_engine_change_context.h" -#include "mongo/db/storage/storage_engine_feature_flags_gen.h" #include "mongo/db/storage/storage_engine_lock_file.h" #include "mongo/db/storage/storage_engine_metadata.h" #include "mongo/db/storage/storage_engine_parameters_gen.h" #include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/db/storage/storage_repair_observer.h" +#include "mongo/db/storage/ticketholder_manager.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" #include "mongo/util/concurrency/priority_ticketholder.h" -#include "mongo/util/concurrency/semaphore_ticketholder.h" -#include "mongo/util/concurrency/ticketholder.h" +#include "mongo/util/concurrency/semaphore_ticketholder.h" // IWYU pragma: keep +#include "mongo/util/decorable.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage - namespace mongo { - namespace { /** * Creates the lock file used to prevent concurrent processes from accessing the data files, @@ -156,48 +169,48 @@ StorageEngine::LastShutdownState initializeStorageEngine(OperationContext* opCtx // This should be set once during startup. if ((initFlags & StorageEngineInitFlags::kForRestart) == StorageEngineInitFlags{}) { auto readTransactions = gConcurrentReadTransactions.load(); + auto writeTransactions = gConcurrentWriteTransactions.load(); static constexpr auto DEFAULT_TICKETS_VALUE = 128; + bool userSetConcurrency = false; + + userSetConcurrency = readTransactions != 0 || writeTransactions != 0; readTransactions = readTransactions == 0 ? DEFAULT_TICKETS_VALUE : readTransactions; - auto writeTransactions = gConcurrentWriteTransactions.load(); writeTransactions = writeTransactions == 0 ? DEFAULT_TICKETS_VALUE : writeTransactions; + if (userSetConcurrency) { + // If the user manually set concurrency limits, then disable execution control + // implicitly. + gStorageEngineConcurrencyAdjustmentAlgorithm = "fixedConcurrentTransactions"; + } + auto svcCtx = opCtx->getServiceContext(); - if (feature_flags::gFeatureFlagDeprioritizeLowPriorityOperations - .isEnabledAndIgnoreFCVUnsafeAtStartup()) { - std::unique_ptr ticketHolderManager; + std::unique_ptr ticketHolderManager; #ifdef __linux__ - LOGV2_DEBUG(6902900, 1, "Using Priority Queue-based ticketing scheduler"); - - auto lowPriorityBypassThreshold = gLowPriorityAdmissionBypassThreshold.load(); - ticketHolderManager = std::make_unique( - svcCtx, - std::make_unique( - readTransactions, lowPriorityBypassThreshold, svcCtx), - std::make_unique( - writeTransactions, lowPriorityBypassThreshold, svcCtx)); + LOGV2_DEBUG(6902900, 1, "Using Priority Queue-based ticketing scheduler"); + + auto lowPriorityBypassThreshold = gLowPriorityAdmissionBypassThreshold.load(); + ticketHolderManager = std::make_unique( + svcCtx, + std::make_unique( + readTransactions, lowPriorityBypassThreshold, svcCtx), + std::make_unique( + writeTransactions, lowPriorityBypassThreshold, svcCtx)); #else - LOGV2_DEBUG(7207201, 1, "Using semaphore-based ticketing scheduler"); - - // PriorityTicketHolder is implemented using an equivalent mechanism to - // std::atomic::wait which isn't available until C++20. We've implemented it in Linux - // using futex calls. As this hasn't been implemented in non-Linux platforms we fallback - // to the existing semaphore implementation even if the feature flag is enabled. - // - // TODO SERVER-72616: Remove the ifdefs once TicketPool is implemented with atomic - // wait. - ticketHolderManager = std::make_unique( - svcCtx, - std::make_unique(readTransactions, svcCtx), - std::make_unique(writeTransactions, svcCtx)); + LOGV2_DEBUG(7207201, 1, "Using semaphore-based ticketing scheduler"); + + // PriorityTicketHolder is implemented using an equivalent mechanism to + // std::atomic::wait which isn't available until C++20. We've implemented it in Linux + // using futex calls. As this hasn't been implemented in non-Linux platforms we fallback + // to the existing semaphore implementation even if the feature flag is enabled. + // + // TODO SERVER-72616: Remove the ifdefs once TicketPool is implemented with atomic + // wait. + ticketHolderManager = std::make_unique( + svcCtx, + std::make_unique(readTransactions, svcCtx), + std::make_unique(writeTransactions, svcCtx)); #endif - TicketHolderManager::use(svcCtx, std::move(ticketHolderManager)); - } else { - auto ticketHolderManager = std::make_unique( - svcCtx, - std::make_unique(readTransactions, svcCtx), - std::make_unique(writeTransactions, svcCtx)); - TicketHolderManager::use(svcCtx, std::move(ticketHolderManager)); - } + TicketHolderManager::use(svcCtx, std::move(ticketHolderManager)); } ScopeGuard guard([&] { @@ -397,40 +410,4 @@ void appendStorageEngineList(ServiceContext* service, BSONObjBuilder* result) { result->append("storageEngines", storageEngineList(service)); } -namespace { - -class StorageClientObserver final : public ServiceContext::ClientObserver { -public: - void onCreateClient(Client* client) override{}; - void onDestroyClient(Client* client) override{}; - void onCreateOperationContext(OperationContext* opCtx) { - // Use a fully fledged lock manager even when the storage engine is not set. - opCtx->setLockState(std::make_unique(opCtx->getServiceContext())); - - auto service = opCtx->getServiceContext(); - - // There are a few cases where we don't have a storage engine available yet when creating an - // operation context. - // 1. During startup, we create an operation context to allow the storage engine - // initialization code to make use of the lock manager. - // 2. There are unit tests that create an operation context before initializing the storage - // engine. - // 3. Unit tests that use an operation context but don't require a storage engine for their - // testing purpose. - auto storageEngine = service->getStorageEngine(); - if (!storageEngine) { - return; - } - opCtx->setRecoveryUnit(std::unique_ptr(storageEngine->newRecoveryUnit()), - WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); - } - void onDestroyOperationContext(OperationContext* opCtx) {} -}; - -ServiceContext::ConstructorActionRegisterer registerStorageClientObserverConstructor{ - "RegisterStorageClientObserverConstructor", [](ServiceContext* service) { - service->registerClientObserver(std::make_unique()); - }}; - -} // namespace } // namespace mongo diff --git a/src/mongo/db/storage/storage_engine_init.h b/src/mongo/db/storage/storage_engine_init.h index 395e0446a6ab3..e1e403ccbb221 100644 --- a/src/mongo/db/storage/storage_engine_init.h +++ b/src/mongo/db/storage/storage_engine_init.h @@ -29,9 +29,13 @@ #pragma once +#include #include +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/storage_engine.h" #include "mongo/stdx/utility.h" diff --git a/src/mongo/db/storage/storage_engine_lock_file.cpp b/src/mongo/db/storage/storage_engine_lock_file.cpp index be9ee83048c08..0205ee3a854a0 100644 --- a/src/mongo/db/storage/storage_engine_lock_file.cpp +++ b/src/mongo/db/storage/storage_engine_lock_file.cpp @@ -28,11 +28,14 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/storage/storage_engine_lock_file.h" +#include +#include "mongo/db/storage/storage_engine_lock_file.h" #include "mongo/platform/process_id.h" +#include "mongo/util/decorable.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/storage_engine_lock_file.h b/src/mongo/db/storage/storage_engine_lock_file.h index bea803a1b9540..24df14d442922 100644 --- a/src/mongo/db/storage/storage_engine_lock_file.h +++ b/src/mongo/db/storage/storage_engine_lock_file.h @@ -30,10 +30,12 @@ #pragma once #include +#include #include #include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/service_context.h" namespace mongo { diff --git a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp index 41fefe73cdf57..ddb6d1b935b88 100644 --- a/src/mongo/db/storage/storage_engine_lock_file_posix.cpp +++ b/src/mongo/db/storage/storage_engine_lock_file_posix.cpp @@ -28,22 +28,37 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include -#include "mongo/db/storage/storage_engine_lock_file.h" - -#include +#ifndef _WIN32 #include -#include -#include #include #include -#include -#include +#endif +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/storage/storage_engine_lock_file.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/errno_util.h" #include "mongo/util/str.h" +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif + #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/storage_engine_lock_file_test.cpp b/src/mongo/db/storage/storage_engine_lock_file_test.cpp index df4967e2d415f..6b274de36babb 100644 --- a/src/mongo/db/storage/storage_engine_lock_file_test.cpp +++ b/src/mongo/db/storage/storage_engine_lock_file_test.cpp @@ -27,20 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include // IWYU pragma: keep +#include -#include -#include -#include +#ifndef _WIN32 +#include +#endif +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/storage/storage_engine_lock_file.h" #include "mongo/platform/process_id.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" -#ifndef _WIN32 -#include -#include +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) #include #endif diff --git a/src/mongo/db/storage/storage_engine_metadata.cpp b/src/mongo/db/storage/storage_engine_metadata.cpp index 4008ac80d9b86..182ee176f322f 100644 --- a/src/mongo/db/storage/storage_engine_metadata.cpp +++ b/src/mongo/db/storage/storage_engine_metadata.cpp @@ -28,34 +28,47 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/storage_engine_metadata.h" - -#include -#include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: keep +#include #include #ifdef __linux__ // Only needed by flushDirectory for Linux #include #include -#include -#include #endif +#include "mongo/base/data_range.h" #include "mongo/base/data_type_validated.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/bson/dotted_path_support.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/storage/storage_engine_metadata.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/object_check.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep #include "mongo/util/assert_util.h" +#include "mongo/util/errno_util.h" #include "mongo/util/file.h" #include "mongo/util/str.h" +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif + #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/storage_engine_metadata.h b/src/mongo/db/storage/storage_engine_metadata.h index e38a6df46abed..373e8ec06a462 100644 --- a/src/mongo/db/storage/storage_engine_metadata.h +++ b/src/mongo/db/storage/storage_engine_metadata.h @@ -30,11 +30,15 @@ #pragma once #include +#include #include +#include #include #include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" namespace mongo { diff --git a/src/mongo/db/storage/storage_engine_metadata_test.cpp b/src/mongo/db/storage/storage_engine_metadata_test.cpp index 3aed64f9fc8cd..7bd3526c2695c 100644 --- a/src/mongo/db/storage/storage_engine_metadata_test.cpp +++ b/src/mongo/db/storage/storage_engine_metadata_test.cpp @@ -27,19 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include // IWYU pragma: keep -#include -#include -#include -#include -#include +#include +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonobj.h" -#include "mongo/db/json.h" +#include "mongo/bson/json.h" #include "mongo/db/storage/storage_engine_metadata.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" namespace { diff --git a/src/mongo/db/storage/storage_engine_mock.h b/src/mongo/db/storage/storage_engine_mock.h index a444b835d01ef..ce9b407f840a8 100644 --- a/src/mongo/db/storage/storage_engine_mock.h +++ b/src/mongo/db/storage/storage_engine_mock.h @@ -125,7 +125,7 @@ class StorageEngineMock : public StorageEngine { bool supportsPendingDrops() const final { return false; } - void clearDropPendingState() final {} + void clearDropPendingState(OperationContext* opCtx) final {} StatusWith recoverToStableTimestamp(OperationContext* opCtx) final { fassertFailed(40547); } diff --git a/src/mongo/db/storage/storage_engine_parameters.idl b/src/mongo/db/storage/storage_engine_parameters.idl index cefe9bc4fa8b4..2aac8288d4f33 100644 --- a/src/mongo/db/storage/storage_engine_parameters.idl +++ b/src/mongo/db/storage/storage_engine_parameters.idl @@ -31,15 +31,6 @@ global: cpp_includes: - "mongo/db/storage/ticketholder_manager.h" -enums: - # TODO (SERVER-73342): Remove this enum once there is only one algorithm. - StorageEngineConcurrencyAdjustmentAlgorithm: - description: "Algorithm for adjusting the number of concurrent storage engine transactions" - type: string - values: - kNone: "" - kThroughputProbing: "throughputProbing" - server_parameters: storageEngineConcurrentWriteTransactions: @@ -51,6 +42,8 @@ server_parameters: deprecated_name: - wiredTigerConcurrentWriteTransactions # Default value being 0 means we're allowing the underlying storage engines to use their default values. + # This parameter is only compatible with the 'kFixedConcurrentTransactions' option for + # storageEngineConcurrencyAdjustmentAlgorithm. default: 0 validator: gte: 5 @@ -64,6 +57,8 @@ server_parameters: deprecated_name: - wiredTigerConcurrentReadTransactions # Default value being 0 means we're allowing the underlying storage engines to use their default values. + # This parameter is only compatible with the 'kFixedConcurrentTransactions' option for + # storageEngineConcurrencyAdjustmentAlgorithm. default: 0 validator: gte: 5 @@ -83,16 +78,3 @@ server_parameters: default: 5000 validator: gte: 0 - - # TODO (SERVER-73342): Remove this parameter once there is only one algorithm. - storageEngineConcurrencyAdjustmentAlgorithm: - description: >- - The algorithm to be used for adjusting the number of concurrent storage engine transactions. - This is gated behind featureFlagExecutionControl and will have no effect if that feature is - not enabled. - set_at: startup - cpp_vartype: std::string - cpp_varname: gStorageEngineConcurrencyAdjustmentAlgorithm - default: "throughputProbing" - validator: - callback: TicketHolderManager::validateConcurrencyAdjustmentAlgorithm diff --git a/src/mongo/db/storage/storage_engine_test_fixture.h b/src/mongo/db/storage/storage_engine_test_fixture.h index d0a79882e9f8a..c10f4105651b8 100644 --- a/src/mongo/db/storage/storage_engine_test_fixture.h +++ b/src/mongo/db/storage/storage_engine_test_fixture.h @@ -40,7 +40,6 @@ #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/storage/durable_catalog.h" -#include "mongo/db/storage/durable_catalog_impl.h" #include "mongo/db/storage/kv/kv_engine.h" #include "mongo/db/storage/storage_engine_impl.h" #include "mongo/db/storage/storage_repair_observer.h" @@ -78,12 +77,11 @@ class StorageEngineTest : public ServiceContextMongoDTest { opCtx, ns, catalogId, - _storageEngine->getCatalog()->getMetaData(opCtx, catalogId), + _storageEngine->getCatalog()->getParsedCatalogEntry(opCtx, catalogId)->metadata, std::move(rs)); CollectionCatalog::write(opCtx, [&](CollectionCatalog& catalog) { - catalog.registerCollection( - opCtx, options.uuid.get(), std::move(coll), /*ts=*/boost::none); + catalog.registerCollection(opCtx, std::move(coll), /*ts=*/boost::none); }); return {{_storageEngine->getCatalog()->getEntry(catalogId)}}; @@ -101,7 +99,7 @@ class StorageEngineTest : public ServiceContextMongoDTest { * Create a collection table in the KVEngine not reflected in the DurableCatalog. */ Status createCollTable(OperationContext* opCtx, NamespaceString collName) { - const std::string identName = "collection-" + collName.ns(); + const std::string identName = "collection-" + collName.ns_forTest(); return _storageEngine->getEngine()->createRecordStore( opCtx, collName, identName, CollectionOptions()); } @@ -190,18 +188,17 @@ class StorageEngineTest : public ServiceContextMongoDTest { Collection* collection = CollectionCatalog::get(opCtx)->lookupCollectionByNamespaceForMetadataWrite(opCtx, collNs); - auto descriptor = collection->getIndexCatalog()->findIndexByName( + auto writableEntry = collection->getIndexCatalog()->getWritableEntryByName( opCtx, key, IndexCatalog::InclusionPolicy::kReady | IndexCatalog::InclusionPolicy::kUnfinished); - collection->indexBuildSuccess(opCtx, descriptor->getEntry()); + collection->indexBuildSuccess(opCtx, writableEntry); } Status removeEntry(OperationContext* opCtx, StringData collNs, DurableCatalog* catalog) { const Collection* collection = CollectionCatalog::get(opCtx)->lookupCollectionByNamespace( opCtx, NamespaceString::createNamespaceString_forTest(collNs)); - return dynamic_cast(catalog)->_removeEntry(opCtx, - collection->getCatalogId()); + return catalog->_removeEntry(opCtx, collection->getCatalogId()); } StorageEngine* _storageEngine; diff --git a/src/mongo/db/storage/storage_file_util.cpp b/src/mongo/db/storage/storage_file_util.cpp index 96d0c8e751072..23939f54f0579 100644 --- a/src/mongo/db/storage/storage_file_util.cpp +++ b/src/mongo/db/storage/storage_file_util.cpp @@ -30,19 +30,29 @@ #include "mongo/db/storage/storage_file_util.h" +#include +#include #include -#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" #ifdef __linux__ #include -#include -#include #endif -#include - +#include "mongo/base/error_codes.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/errno_util.h" #include "mongo/util/file.h" +#include "mongo/util/str.h" + +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/storage_file_util.h b/src/mongo/db/storage/storage_file_util.h index 71eff7b025f59..694bf3862d4c7 100644 --- a/src/mongo/db/storage/storage_file_util.h +++ b/src/mongo/db/storage/storage_file_util.h @@ -29,11 +29,11 @@ #pragma once -#include "mongo/platform/basic.h" - #include +#include #include "mongo/base/status.h" +#include "mongo/platform/basic.h" namespace mongo { diff --git a/src/mongo/db/storage/storage_init.cpp b/src/mongo/db/storage/storage_init.cpp index f1a8e72319515..407e4aee325af 100644 --- a/src/mongo/db/storage/storage_init.cpp +++ b/src/mongo/db/storage/storage_init.cpp @@ -27,8 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/client.h" #include "mongo/db/commands/server_status.h" #include "mongo/db/operation_context.h" diff --git a/src/mongo/db/storage/storage_options.cpp b/src/mongo/db/storage/storage_options.cpp index d3bd9769c657e..234bb0f9550f6 100644 --- a/src/mongo/db/storage/storage_options.cpp +++ b/src/mongo/db/storage/storage_options.cpp @@ -27,12 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/server_parameter.h" #include "mongo/db/storage/storage_options.h" - #include "mongo/db/storage/storage_parameters_gen.h" -#include "mongo/platform/compiler.h" +#include "mongo/db/tenant_id.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/storage/storage_options.h b/src/mongo/db/storage/storage_options.h index e8a8ac92ba326..8f7805a368fd2 100644 --- a/src/mongo/db/storage/storage_options.h +++ b/src/mongo/db/storage/storage_options.h @@ -87,8 +87,10 @@ struct StorageGlobalParams { bool ephemeral = false; // --journalCommitInterval + // This parameter is both a server parameter and a configuration parameter, and to resolve + // conflicts between the two the default must be set here. static constexpr int kMaxJournalCommitIntervalMs = 500; - AtomicWord journalCommitIntervalMs; + AtomicWord journalCommitIntervalMs{100}; // --notablescan // no table scans allowed @@ -105,8 +107,10 @@ struct StorageGlobalParams { // one. A value of 0 indicates that checkpointing will be skipped. // Do not set this value on production systems. // In almost every situation, you should use the default setting. + // This parameter is both a server parameter and a configuration parameter, and to resolve + // conflicts between the two the default must be set here. static constexpr double kMaxSyncdelaySecs = 60 * 60; // 1hr - AtomicDouble syncdelay; // seconds between checkpoints + AtomicDouble syncdelay{60.0}; // seconds between checkpoints // --queryableBackupMode // Prevents user-originating operations from performing writes to the server. Internally diff --git a/src/mongo/db/storage/storage_parameters.idl b/src/mongo/db/storage/storage_parameters.idl index 5a90e6191d97e..1dde2c90afcdd 100644 --- a/src/mongo/db/storage/storage_parameters.idl +++ b/src/mongo/db/storage/storage_parameters.idl @@ -30,6 +30,7 @@ global: cpp_namespace: "mongo" cpp_includes: - "mongo/bson/bson_depth.h" + - "mongo/db/storage/storage_options.h" server_parameters: notablescan: @@ -39,6 +40,8 @@ server_parameters: If true, MongoDB will not execute queries that require a table scan and will return an error. set_at: [ startup, runtime ] cpp_varname: 'storageGlobalParams.noTableScan' + # This is an alias for the 'storage.syncPeriodSecs' config, and the default is defined in the + # StorageGlobalParams struct to resolve a conflict between config parameter and set parameter evaluation. syncdelay: # In almost every situation you should not set this value and use the default setting. description: >- @@ -49,6 +52,8 @@ server_parameters: validator: gte: 0.0 lte: { expr: 'StorageGlobalParams::kMaxSyncdelaySecs' } + # This is an alias for the 'storage.journal.commitIntervalMs' config, and the default is defined in the + # StorageGlobalParams struct to resolve a conflict between config parameter and set parameter evaluation. journalCommitInterval: description: 'Number of milliseconds between journal commits' set_at: [ startup, runtime ] @@ -164,65 +169,86 @@ server_parameters: default: 10 validator: { gte: 0 } + allowUnsafeUntimestampedWrites: + description: >- + Allows a replica set member in standalone mode to perform unsafe untimestamped writes + set_at: [ startup ] + cpp_varname: gAllowUnsafeUntimestampedWrites + cpp_vartype: bool + default: true + feature_flags: featureFlagTimeseriesMetricIndexes: description: "When enabled, support secondary indexes on time-series measurements" cpp_varname: feature_flags::gTimeseriesMetricIndexes default: true version: 6.0 + shouldBeFCVGated: true featureFlagCollModIndexUnique: description: "When enabled, collMod supports making an index unique" cpp_varname: feature_flags::gCollModIndexUnique default: true version: 6.0 + shouldBeFCVGated: true featureFlagSelectiveBackup: description: "When enabled, support selective backups and restores on collections" cpp_varname: feature_flags::gSelectiveBackup default: true version: 6.0 - featureFlagBatchMultiDeletes: - description: "When enabled, support batching multi-document deletions" - cpp_varname: feature_flags::gBatchMultiDeletes - default: true - version: 6.1 + shouldBeFCVGated: true + featureFlagLargeBatchedOperations: + description: >- + Enable support for replicating batched operations over multiple applyOps oplog + entries. Otherwise, batched operations that do not fit within a single applyOps + oplog entry will fail with a TransactionTooLarge error. + See maxNumberOfBatchedOperationsInSingleOplogEntry and + maxSizeOfBatchedOperationsInSingleOplogEntryBytes. + cpp_varname: gFeatureFlagLargeBatchedOperations + default: false + shouldBeFCVGated: true featureFlagDocumentSourceListCatalog: description: "When enabled, allow the use of the $listCatalog aggregation stage" cpp_varname: feature_flags::gDocumentSourceListCatalog default: true version: 6.0 + shouldBeFCVGated: true featureFlagDerivedMetadata: description: "When enabled, support storing derived collection and index metadata" cpp_varname: feature_flags::gDerivedMetadata default: false + shouldBeFCVGated: true featureFlagTimeseriesScalabilityImprovements: description: "Enable scalability and usability improvements for time-series collections" cpp_varname: feature_flags::gTimeseriesScalabilityImprovements default: true version: 6.3 - featureFlagExtendValidateCommand: - description: "Enable checks on more types of inconsistencies for the validate command" - cpp_varname: feature_flags::gExtendValidateCommand - default: true - version: 6.2 - featureFlagPointInTimeCatalogLookups: - description: "Support reads when in-memory catalog is out-of-sync with storage transaction" - cpp_varname: feature_flags::gPointInTimeCatalogLookups - default: true - version: 7.0 + shouldBeFCVGated: true featureFlagInternalWritesAreReplicatedTransactionally: description: Feature flag to enable internal writes to use the transactionally replicated WriteUnitOfWork API by default. cpp_varname: gFeatureFlagInternalWritesAreReplicatedTransactionally default: false + shouldBeFCVGated: true featureFlagTimeseriesDeletesSupport: description: "Enable support for arbitrary deletes on time-series collections" cpp_varname: feature_flags::gTimeseriesDeletesSupport default: true version: 7.0 + shouldBeFCVGated: true featureFlagTimeseriesUpdatesSupport: description: "Enable support for arbitrary updates on time-series collections" cpp_varname: feature_flags::gTimeseriesUpdatesSupport default: false + shouldBeFCVGated: true featureFlagIndexBuildGracefulErrorHandling: description: "Enable index build error handling improvements" cpp_varname: feature_flags::gIndexBuildGracefulErrorHandling + default: true + version: 7.1 + shouldBeFCVGated: true + featureFlagTimeseriesAlwaysUseCompressedBuckets: + description: >- + Enable support to always use the compressed time-series bucket format when inserting, + updating, or removing data into new, existing, or re-opened buckets. + cpp_varname: feature_flags::gTimeseriesAlwaysUseCompressedBuckets default: false + shouldBeFCVGated: true diff --git a/src/mongo/db/storage/storage_repair_observer.cpp b/src/mongo/db/storage/storage_repair_observer.cpp index 33359954d896a..84684cdffb547 100644 --- a/src/mongo/db/storage/storage_repair_observer.cpp +++ b/src/mongo/db/storage/storage_repair_observer.cpp @@ -31,27 +31,33 @@ #include "mongo/db/storage/storage_repair_observer.h" #include -#include -#include - -#ifdef __linux__ -#include -#include -#include -#endif - #include #include - +#include +#include + +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/repl_set_config_gen.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/control/journal_flusher.h" #include "mongo/db/storage/storage_file_util.h" -#include "mongo/db/storage/storage_options.h" #include "mongo/logv2/log.h" -#include "mongo/util/file.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/errno_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/storage_repair_observer.h b/src/mongo/db/storage/storage_repair_observer.h index a28cae64bb794..618b1a8e3fcfc 100644 --- a/src/mongo/db/storage/storage_repair_observer.h +++ b/src/mongo/db/storage/storage_repair_observer.h @@ -29,11 +29,14 @@ #pragma once -#include "mongo/platform/basic.h" - #include +#include +#include +#include +#include #include "mongo/db/service_context.h" +#include "mongo/platform/basic.h" namespace mongo { diff --git a/src/mongo/db/storage/storage_repair_observer_test.cpp b/src/mongo/db/storage/storage_repair_observer_test.cpp index 780a3518b9a4a..059baa79fb9e6 100644 --- a/src/mongo/db/storage/storage_repair_observer_test.cpp +++ b/src/mongo/db/storage/storage_repair_observer_test.cpp @@ -27,26 +27,43 @@ * it in the license file. */ +#include +#include +#include -#include "mongo/platform/basic.h" - -#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/storage_repair_observer.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/transaction_resources.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - namespace mongo { namespace { @@ -72,11 +89,15 @@ class StorageRepairObserverTest : public ServiceContextMongoDTest { void createMockReplConfig(OperationContext* opCtx) { BSONObj replConfig; - Lock::DBLock dbLock(opCtx, DatabaseName(boost::none, "local"), MODE_X); - Helpers::putSingleton( + Lock::DBLock dbLock(opCtx, DatabaseName::kLocal, MODE_X); + auto coll = acquireCollection( opCtx, - NamespaceString::createNamespaceString_forTest(boost::none, "local.system.replset"), - replConfig); + CollectionAcquisitionRequest(NamespaceString::kSystemReplSetNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_X); + Helpers::putSingleton(opCtx, coll, replConfig); } void assertReplConfigValid(OperationContext* opCtx, bool valid) { @@ -94,7 +115,7 @@ class StorageRepairObserverTest : public ServiceContextMongoDTest { bool hasReplConfig(OperationContext* opCtx) { BSONObj replConfig; - Lock::DBLock dbLock(opCtx, DatabaseName(boost::none, "local"), MODE_IS); + Lock::DBLock dbLock(opCtx, DatabaseName::kLocal, MODE_IS); return Helpers::getSingleton( opCtx, NamespaceString::createNamespaceString_forTest(boost::none, "local.system.replset"), diff --git a/src/mongo/db/storage/storage_util.cpp b/src/mongo/db/storage/storage_util.cpp index 2221261408c96..b21cceb1f242a 100644 --- a/src/mongo/db/storage/storage_util.cpp +++ b/src/mongo/db/storage/storage_util.cpp @@ -28,21 +28,30 @@ */ -#include "mongo/platform/basic.h" - -#include - #include +#include +#include -#include "mongo/db/storage/storage_util.h" +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/database_name.h" +#include "mongo/db/service_context.h" #include "mongo/db/storage/durable_catalog.h" #include "mongo/db/storage/ident.h" #include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_engine.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/storage_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -59,8 +68,7 @@ auto removeEmptyDirectory = auto collectionCatalog = CollectionCatalog::latest(svcCtx); const DatabaseName& dbName = ns.dbName(); if (!storageEngine->isUsingDirectoryPerDb() || - (storageEngine->supportsPendingDrops() && - collectionCatalog->begin(nullptr, dbName) != collectionCatalog->end(nullptr))) { + (storageEngine->supportsPendingDrops() && !collectionCatalog->range(dbName).empty())) { return; } @@ -69,7 +77,7 @@ auto removeEmptyDirectory = if (!ec) { LOGV2(4888200, "Removed empty database directory", logAttrs(dbName)); - } else if (collectionCatalog->begin(nullptr, dbName) == collectionCatalog->end(nullptr)) { + } else if (collectionCatalog->range(dbName).empty()) { // It is possible for a new collection to be created in the database between when we // check whether the database is empty and actually attempting to remove the directory. // In this case, don't log that the removal failed because it is expected. However, @@ -122,9 +130,7 @@ void removeIndex(OperationContext* opCtx, const bool isTwoPhaseDrop = storageEngine->supportsPendingDrops() && dataRemoval == DataRemoval::kTwoPhase; - // TODO SERVER-68674: Remove feature flag check. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe() && isTwoPhaseDrop) { + if (isTwoPhaseDrop) { invariant(entry); CollectionCatalog::get(opCtx)->dropIndex( opCtx, collection->ns(), entry, /*isDropPending=*/true); @@ -132,7 +138,7 @@ void removeIndex(OperationContext* opCtx, // Schedule the second phase of drop to delete the data when it is no longer in use, if the // first phase is successfully committed. - opCtx->recoveryUnit()->onCommit( + opCtx->recoveryUnit()->onCommitForTwoPhaseDrop( [svcCtx = opCtx->getServiceContext(), recoveryUnit, storageEngine, @@ -145,11 +151,7 @@ void removeIndex(OperationContext* opCtx, [svcCtx, storageEngine, nss, ident = ident->getIdent(), isTwoPhaseDrop] { removeEmptyDirectory(svcCtx, storageEngine, nss); - // TODO SERVER-68674: Remove feature flag check. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade - // concerns. - if (feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe() && - isTwoPhaseDrop) { + if (isTwoPhaseDrop) { CollectionCatalog::write(svcCtx, [&](CollectionCatalog& catalog) { catalog.notifyIdentDropped(ident); }); @@ -208,18 +210,14 @@ Status dropCollection(OperationContext* opCtx, // Schedule the second phase of drop to delete the data when it is no longer in use, if the // first phase is successfully committed. - opCtx->recoveryUnit()->onCommit( + opCtx->recoveryUnit()->onCommitForTwoPhaseDrop( [svcCtx = opCtx->getServiceContext(), recoveryUnit, storageEngine, nss, ident]( OperationContext*, boost::optional commitTimestamp) { StorageEngine::DropIdentCallback onDrop = [svcCtx, storageEngine, nss, ident = ident->getIdent()] { removeEmptyDirectory(svcCtx, storageEngine, nss); - // TODO SERVER-68674: Remove feature flag check. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade - // concerns. - if (feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe() && - storageEngine->supportsPendingDrops()) { + if (storageEngine->supportsPendingDrops()) { CollectionCatalog::write(svcCtx, [&](CollectionCatalog& catalog) { catalog.notifyIdentDropped(ident); }); diff --git a/src/mongo/db/storage/storage_util.h b/src/mongo/db/storage/storage_util.h index 00b79048e2fb8..87f315195de39 100644 --- a/src/mongo/db/storage/storage_util.h +++ b/src/mongo/db/storage/storage_util.h @@ -29,9 +29,19 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/record_id.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/storage/ident.h" #include "mongo/db/storage/write_unit_of_work.h" #include "mongo/util/uuid.h" @@ -123,7 +133,7 @@ Status insertBatchAndHandleRetry(OperationContext* opCtx, // Try to insert the batch one-at-a-time because the batch failed all-at-once inserting. for (auto it = docs.cbegin(); it != docs.cend(); ++it) { - auto status = writeConflictRetry(opCtx, "batchInsertDocuments", nsOrUUID.toString(), [&] { + auto status = writeConflictRetry(opCtx, "batchInsertDocuments", nsOrUUID, [&] { auto status = insertFn(opCtx, it, it + 1); if (!status.isOK()) { return status; diff --git a/src/mongo/db/storage/test_harness_helper.h b/src/mongo/db/storage/test_harness_helper.h index 6ccd812df9c5f..8cac66188c829 100644 --- a/src/mongo/db/storage/test_harness_helper.h +++ b/src/mongo/db/storage/test_harness_helper.h @@ -32,12 +32,9 @@ #include #include #include -#include #include "mongo/db/jsobj.h" -#include "mongo/db/operation_context_noop.h" #include "mongo/db/record_id.h" -#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/db/storage/sorted_data_interface.h" @@ -85,21 +82,4 @@ class HarnessHelper : public ScopedGlobalServiceContextForTest { ThreadClient _threadClient; }; -namespace harness_helper_detail { -template -std::unique_ptr noexcept_ptr_conversion(std::unique_ptr&& p, Target& t) noexcept { - p.release(); - return std::unique_ptr(std::addressof(t)); -} -} // namespace harness_helper_detail - -template -std::unique_ptr dynamic_ptr_cast(std::unique_ptr&& p) { - if (!p) { - throw std::runtime_error("Must not be null."); - } - Target& target = dynamic_cast(*p); - return harness_helper_detail::noexcept_ptr_conversion(std::move(p), target); -} - } // namespace mongo diff --git a/src/mongo/db/storage/ticketholder_manager.cpp b/src/mongo/db/storage/ticketholder_manager.cpp index c8fc0fa4d8c1d..d4cb518d83d90 100644 --- a/src/mongo/db/storage/ticketholder_manager.cpp +++ b/src/mongo/db/storage/ticketholder_manager.cpp @@ -28,13 +28,25 @@ */ #include "mongo/db/storage/ticketholder_manager.h" + +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/db/client.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/storage/execution_control/concurrency_adjustment_parameters_gen.h" #include "mongo/db/storage/execution_control/throughput_probing.h" #include "mongo/db/storage/storage_engine_feature_flags_gen.h" -#include "mongo/db/storage/storage_engine_parameters_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/priority_ticketholder.h" -#include "mongo/util/concurrency/semaphore_ticketholder.h" #include "mongo/util/concurrency/ticketholder.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -58,11 +70,14 @@ TicketHolderManager::TicketHolderManager(ServiceContext* svcCtx, switch (StorageEngineConcurrencyAdjustmentAlgorithm_parse( IDLParserContext{"storageEngineConcurrencyAdjustmentAlgorithm"}, gStorageEngineConcurrencyAdjustmentAlgorithm)) { - case StorageEngineConcurrencyAdjustmentAlgorithmEnum::kNone: + case StorageEngineConcurrencyAdjustmentAlgorithmEnum::kFixedConcurrentTransactions: return nullptr; case StorageEngineConcurrencyAdjustmentAlgorithmEnum::kThroughputProbing: return std::make_unique( - svcCtx, _readTicketHolder.get(), _writeTicketHolder.get(), Milliseconds{100}); + svcCtx, + _readTicketHolder.get(), + _writeTicketHolder.get(), + Milliseconds{gStorageEngineConcurrencyAdjustmentIntervalMillis}); } MONGO_UNREACHABLE; }()) { @@ -72,13 +87,9 @@ TicketHolderManager::TicketHolderManager(ServiceContext* svcCtx, } Status TicketHolderManager::updateConcurrentWriteTransactions(const int32_t& newWriteTransactions) { - // (Ignore FCV check): This feature flag doesn't have upgrade/downgrade concern. - if (feature_flags::gFeatureFlagExecutionControl.isEnabledAndIgnoreFCVUnsafe() && - !gStorageEngineConcurrencyAdjustmentAlgorithm.empty()) { - return {ErrorCodes::IllegalOperation, - "Cannot modify concurrent write transactions limit when it is being dynamically " - "adjusted"}; - } + auto concurrencyAlgorithm = StorageEngineConcurrencyAdjustmentAlgorithm_parse( + IDLParserContext{"storageEngineConcurrencyAdjustmentAlgorithm"}, + gStorageEngineConcurrencyAdjustmentAlgorithm); if (auto client = Client::getCurrent()) { auto ticketHolderManager = TicketHolderManager::get(client->getServiceContext()); @@ -90,6 +101,20 @@ Status TicketHolderManager::updateConcurrentWriteTransactions(const int32_t& new "Attempting to modify write transactions limit on an instance without a " "storage engine"); } + + auto& monitor = ticketHolderManager->_monitor; + // (Ignore FCV check): This feature flag doesn't have upgrade/downgrade concern. + if (monitor && feature_flags::gFeatureFlagExecutionControl.isEnabledAndIgnoreFCVUnsafe() && + concurrencyAlgorithm == + StorageEngineConcurrencyAdjustmentAlgorithmEnum::kThroughputProbing) { + // In order to manually set the number of read/write tickets, users must set the + // storageEngineConcurrencyAdjustmentAlgorithm to 'kFixedConcurrentTransactions'. + return { + ErrorCodes::IllegalOperation, + "Cannot modify concurrent write transactions limit when it is being dynamically " + "adjusted"}; + } + auto& writer = ticketHolderManager->_writeTicketHolder; if (writer) { writer->resize(newWriteTransactions); @@ -106,13 +131,9 @@ Status TicketHolderManager::updateConcurrentWriteTransactions(const int32_t& new }; Status TicketHolderManager::updateConcurrentReadTransactions(const int32_t& newReadTransactions) { - // (Ignore FCV check): This feature flag doesn't have upgrade/downgrade concern. - if (feature_flags::gFeatureFlagExecutionControl.isEnabledAndIgnoreFCVUnsafe() && - !gStorageEngineConcurrencyAdjustmentAlgorithm.empty()) { - return {ErrorCodes::IllegalOperation, - "Cannot modify concurrent read transactions limit when it is being dynamically " - "adjusted"}; - } + auto concurrencyAlgorithm = StorageEngineConcurrencyAdjustmentAlgorithm_parse( + IDLParserContext{"storageEngineConcurrencyAdjustmentAlgorithm"}, + gStorageEngineConcurrencyAdjustmentAlgorithm); if (auto client = Client::getCurrent()) { auto ticketHolderManager = TicketHolderManager::get(client->getServiceContext()); @@ -124,6 +145,19 @@ Status TicketHolderManager::updateConcurrentReadTransactions(const int32_t& newR "Attempting to modify read transactions limit on an instance without a " "storage engine"); } + + auto& monitor = ticketHolderManager->_monitor; + // (Ignore FCV check): This feature flag doesn't have upgrade/downgrade concern. + if (monitor && feature_flags::gFeatureFlagExecutionControl.isEnabledAndIgnoreFCVUnsafe() && + concurrencyAlgorithm == + StorageEngineConcurrencyAdjustmentAlgorithmEnum::kThroughputProbing) { + // In order to manually set the number of read/write tickets, users must set the + // storageEngineConcurrencyAdjustmentAlgorithm to 'kFixedConcurrentTransactions'. + return {ErrorCodes::IllegalOperation, + "Cannot modify concurrent read transactions limit when it is being dynamically " + "adjusted"}; + } + auto& reader = ticketHolderManager->_readTicketHolder; if (reader) { reader->resize(newReadTransactions); @@ -223,12 +257,4 @@ void TicketHolderManager::appendStats(BSONObjBuilder& b) { } } -Status TicketHolderManager::validateConcurrencyAdjustmentAlgorithm( - const std::string& name, const boost::optional&) try { - StorageEngineConcurrencyAdjustmentAlgorithm_parse( - IDLParserContext{"storageEngineConcurrencyAdjustmentAlgorithm"}, name); - return Status::OK(); -} catch (const DBException& ex) { - return ex.toStatus(); -} } // namespace mongo diff --git a/src/mongo/db/storage/ticketholder_manager.h b/src/mongo/db/storage/ticketholder_manager.h index 4e3228417414f..aab00b2c5f244 100644 --- a/src/mongo/db/storage/ticketholder_manager.h +++ b/src/mongo/db/storage/ticketholder_manager.h @@ -29,10 +29,17 @@ #pragma once +#include +#include +#include +#include + #include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/ticketholder_monitor.h" +#include "mongo/db/tenant_id.h" #include "mongo/util/concurrency/ticketholder.h" namespace mongo { diff --git a/src/mongo/db/storage/ticketholder_monitor.cpp b/src/mongo/db/storage/ticketholder_monitor.cpp index d77c6592ff7c0..ba79948bc7e5f 100644 --- a/src/mongo/db/storage/ticketholder_monitor.cpp +++ b/src/mongo/db/storage/ticketholder_monitor.cpp @@ -29,6 +29,8 @@ #include "mongo/db/storage/ticketholder_monitor.h" +#include + namespace mongo { TicketHolderMonitor::TicketHolderMonitor(ServiceContext* svcCtx, @@ -38,7 +40,11 @@ TicketHolderMonitor::TicketHolderMonitor(ServiceContext* svcCtx, : _readTicketHolder(readTicketHolder), _writeTicketHolder(writeTicketHolder), _job(svcCtx->getPeriodicRunner()->makeJob(PeriodicRunner::PeriodicJob{ - "TicketHolderMonitor", [this](Client* client) { _run(client); }, interval})) {} + "TicketHolderMonitor", + [this](Client* client) { _run(client); }, + interval, + // TODO(SERVER-74657): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/})) {} void TicketHolderMonitor::start() { _job.start(); diff --git a/src/mongo/db/storage/ticketholder_monitor.h b/src/mongo/db/storage/ticketholder_monitor.h index 5661ca2eaf80c..f4db3dce3b283 100644 --- a/src/mongo/db/storage/ticketholder_monitor.h +++ b/src/mongo/db/storage/ticketholder_monitor.h @@ -29,8 +29,11 @@ #pragma once +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" #include "mongo/db/service_context.h" #include "mongo/util/concurrency/ticketholder.h" +#include "mongo/util/duration.h" #include "mongo/util/periodic_runner.h" namespace mongo { @@ -52,10 +55,6 @@ class TicketHolderMonitor { TicketHolder* _readTicketHolder; TicketHolder* _writeTicketHolder; - Milliseconds _interval() { - return _job.getPeriod(); - } - private: virtual void _run(Client*) = 0; diff --git a/src/mongo/db/storage/wiredtiger/SConscript b/src/mongo/db/storage/wiredtiger/SConscript index 0d7e779feee90..287d5b6b3cb03 100644 --- a/src/mongo/db/storage/wiredtiger/SConscript +++ b/src/mongo/db/storage/wiredtiger/SConscript @@ -104,7 +104,6 @@ wtEnv.Library( 'wiredtiger_global_options.idl', ], LIBDEPS=[ - '$BUILD_DIR/mongo/db/shard_role', '$BUILD_DIR/mongo/db/storage/storage_engine_impl', '$BUILD_DIR/mongo/db/storage/storage_engine_lock_file', '$BUILD_DIR/mongo/db/storage/storage_engine_metadata', @@ -114,6 +113,7 @@ wtEnv.Library( LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/catalog/database_holder', '$BUILD_DIR/mongo/db/commands/server_status_core', + '$BUILD_DIR/mongo/db/shard_role', '$BUILD_DIR/mongo/db/storage/storage_engine_common', '$BUILD_DIR/mongo/util/options_parser/options_parser', ], @@ -148,14 +148,13 @@ wtEnv.CppUnitTest( '$BUILD_DIR/mongo/db/index/index_access_method', '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', '$BUILD_DIR/mongo/db/repl/replmocks', - '$BUILD_DIR/mongo/db/service_context', - '$BUILD_DIR/mongo/db/service_context_d', - '$BUILD_DIR/mongo/db/service_context_test_fixture', + '$BUILD_DIR/mongo/db/service_context_d_test_fixture', '$BUILD_DIR/mongo/db/snapshot_window_options', '$BUILD_DIR/mongo/db/storage/checkpointer', - '$BUILD_DIR/mongo/db/storage/durable_catalog_impl', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/db/storage/kv/kv_engine_test_harness', '$BUILD_DIR/mongo/db/storage/recovery_unit_test_harness', + '$BUILD_DIR/mongo/db/storage/storage_engine_common', '$BUILD_DIR/mongo/db/storage/storage_engine_metadata', '$BUILD_DIR/mongo/db/storage/storage_options', '$BUILD_DIR/mongo/util/clock_source_mock', @@ -172,7 +171,7 @@ wtEnv.Library( LIBDEPS=[ '$BUILD_DIR/mongo/db/repl/replmocks', '$BUILD_DIR/mongo/db/service_context_test_fixture', - '$BUILD_DIR/mongo/db/storage/durable_catalog_impl', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/db/storage/record_store_test_harness', '$BUILD_DIR/mongo/util/clock_source_mock', 'storage_wiredtiger_core', @@ -189,6 +188,9 @@ wtEnv.Library( '$BUILD_DIR/mongo/db/storage/sorted_data_interface_test_harness', 'storage_wiredtiger_core', ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/service_context_non_d', + ], ) wtEnv.Library( @@ -231,7 +233,7 @@ wtEnv.CppUnitTest( '$BUILD_DIR/mongo/db/auth/authmocks', '$BUILD_DIR/mongo/db/multitenancy', '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', - '$BUILD_DIR/mongo/db/storage/durable_catalog_impl', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/db/storage/sorted_data_interface_tests', 'storage_wiredtiger_core', 'wiredtiger_index_test_harness', @@ -248,7 +250,7 @@ wtEnv.Benchmark( '$BUILD_DIR/mongo/db/auth/authmocks', '$BUILD_DIR/mongo/db/multitenancy', '$BUILD_DIR/mongo/db/repl/repl_coordinator_interface', - '$BUILD_DIR/mongo/db/storage/durable_catalog_impl', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/db/storage/sorted_data_interface_bm', 'wiredtiger_index_test_harness', ], @@ -259,9 +261,9 @@ wtEnv.Benchmark( source='wiredtiger_begin_transaction_block_bm.cpp', LIBDEPS=[ '$BUILD_DIR/mongo/db/auth/authmocks', - '$BUILD_DIR/mongo/db/service_context', - '$BUILD_DIR/mongo/db/storage/durable_catalog_impl', - '$BUILD_DIR/mongo/unittest/unittest', + '$BUILD_DIR/mongo/db/service_context_non_d', + '$BUILD_DIR/mongo/db/service_context_test_fixture', + '$BUILD_DIR/mongo/db/storage/durable_catalog', '$BUILD_DIR/mongo/util/clock_source_mock', 'storage_wiredtiger_core', ], diff --git a/src/mongo/db/storage/wiredtiger/oplog_truncate_markers_server_status_section.cpp b/src/mongo/db/storage/wiredtiger/oplog_truncate_markers_server_status_section.cpp index 3b296685965d0..d0d6c2e1cd758 100644 --- a/src/mongo/db/storage/wiredtiger/oplog_truncate_markers_server_status_section.cpp +++ b/src/mongo/db/storage/wiredtiger/oplog_truncate_markers_server_status_section.cpp @@ -27,11 +27,19 @@ * it in the license file. */ +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/commands/server_status.h" -#include "mongo/db/database_name.h" -#include "mongo/db/db_raii.h" #include "mongo/db/namespace_string.h" -#include "mongo/logv2/log.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/storage_engine.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kFTDC diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.cpp index 5b1613af56800..26156fcc003f0 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.cpp @@ -28,15 +28,21 @@ */ -#include "mongo/platform/basic.h" - -#include #include +#include -#include "mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h" +#include +#include +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl_set_member_in_standalone_mode.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" -#include "mongo/util/errno_util.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -72,6 +78,14 @@ WiredTigerBeginTxnBlock::WiredTigerBeginTxnBlock( } if (allowUntimestampedWrite != RecoveryUnit::UntimestampedWriteAssertionLevel::kEnforce) { builder << "no_timestamp=true,"; + } else if (MONGO_unlikely(gAllowUnsafeUntimestampedWrites && + getReplSetMemberInStandaloneMode(getGlobalServiceContext()) && + !repl::ReplSettings::shouldRecoverFromOplogAsStandalone())) { + // We can safely ignore setting this configuration option when recovering from the oplog as + // standalone because: + // 1. Replaying oplog entries write with a timestamp. + // 2. The instance is put in read-only mode after oplog application has finished. + builder << "no_timestamp=true,"; } const std::string beginTxnConfigString = builder; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h b/src/mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h index 5634c5e252060..d60aa27cf88db 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h @@ -30,7 +30,6 @@ #pragma once #include - #include #include "mongo/base/status.h" diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block_bm.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block_bm.cpp index 658baa8c51bc1..14bf1e59e2f47 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block_bm.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block_bm.cpp @@ -27,23 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include - -#include "mongo/base/checked_cast.h" -#include "mongo/db/repl/repl_settings.h" -#include "mongo/db/repl/replication_coordinator_mock.h" +#include +#include +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" -#include "mongo/db/storage/recovery_unit_test_harness.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/clock_source_mock.h" namespace mongo { @@ -70,48 +78,34 @@ class WiredTigerConnection { WT_CONNECTION* _conn; }; -class WiredTigerTestHelper { +class WiredTigerTestHelper : public ScopedGlobalServiceContextForTest { public: WiredTigerTestHelper() - : _dbpath("wt_test"), - _connection(_dbpath.path(), ""), - _sessionCache(_connection.getConnection(), &_clockSource) { - _opCtx.reset(newOperationContext()); - auto ru = WiredTigerRecoveryUnit::get(_opCtx.get()); + : _threadClient(getServiceContext()), _opCtxHolder(_threadClient->makeOperationContext()) { + _opCtxHolder->setRecoveryUnit( + std::make_unique(&_sessionCache, &_oplogManager), + WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); + auto ru = WiredTigerRecoveryUnit::get(_opCtxHolder.get()); _wtSession = ru->getSession()->getSession(); invariant(wtRCToStatus(_wtSession->create(_wtSession, "table:mytable", nullptr), _wtSession) .isOK()); ru->abandonSnapshot(); } - WiredTigerSessionCache* getSessionCache() { - return &_sessionCache; - } - - WiredTigerOplogManager* getOplogManager() { - return &_oplogManager; - } - WT_SESSION* wtSession() const { return _wtSession; } - OperationContext* newOperationContext() { - return new OperationContextNoop( - new WiredTigerRecoveryUnit(getSessionCache(), &_oplogManager)); - } - - OperationContext* getOperationContext() const { - return _opCtx.get(); - } - private: - unittest::TempDir _dbpath; - WiredTigerConnection _connection; + unittest::TempDir _dbpath{"wt_test"}; + WiredTigerConnection _connection{_dbpath.path(), ""}; ClockSourceMock _clockSource; - WiredTigerSessionCache _sessionCache; + WiredTigerSessionCache _sessionCache{_connection.getConnection(), &_clockSource}; WiredTigerOplogManager _oplogManager; - std::unique_ptr _opCtx; + + ThreadClient _threadClient; + ServiceContext::UniqueOperationContext _opCtxHolder; + WT_SESSION* _wtSession; }; @@ -122,8 +116,6 @@ void BM_WiredTigerBeginTxnBlock(benchmark::State& state) { } } -using mongo::WiredTigerBeginTxnBlock; - template void BM_WiredTigerBeginTxnBlockWithArgs(benchmark::State& state) { WiredTigerTestHelper helper; @@ -136,7 +128,6 @@ void BM_WiredTigerBeginTxnBlockWithArgs(benchmark::State& state) { } } - void BM_setTimestamp(benchmark::State& state) { WiredTigerTestHelper helper; for (auto _ : state) { @@ -164,7 +155,6 @@ BENCHMARK_TEMPLATE(BM_WiredTigerBeginTxnBlockWithArgs, BENCHMARK_TEMPLATE(BM_WiredTigerBeginTxnBlockWithArgs, PrepareConflictBehavior::kIgnoreConflictsAllowWrites, RoundUpPreparedTimestamps::kRound); - BENCHMARK(BM_setTimestamp); } // namespace diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_c_api_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_c_api_test.cpp index 977f1783c3f26..b17623ab32dc3 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_c_api_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_c_api_test.cpp @@ -27,18 +27,25 @@ * it in the license file. */ -#include #include #include -#include +#include #include +#include #include #include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_column_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_column_store.cpp index 1f30be2163b3b..301fca2224921 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_column_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_column_store.cpp @@ -29,14 +29,41 @@ #include "mongo/db/storage/wiredtiger/wiredtiger_column_store.h" -#include "mongo/db/global_settings.h" + +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/index_names.h" +#include "mongo/db/service_context.h" +#include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/wiredtiger/wiredtiger_cursor.h" #include "mongo/db/storage/wiredtiger/wiredtiger_cursor_helpers.h" #include "mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h" #include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h" #include "mongo/db/storage/wiredtiger/wiredtiger_index_cursor_generic.h" #include "mongo/db/storage/wiredtiger/wiredtiger_index_util.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/endian.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_column_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_column_store.h index dbff3985456e4..07de90b5f8e60 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_column_store.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_column_store.h @@ -29,7 +29,18 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/validate_results.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/storage/column_store.h" namespace mongo { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_column_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_column_store_test.cpp index 0675b9ae37ecf..148f67dc291ef 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_column_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_column_store_test.cpp @@ -27,29 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include - -#include "mongo/base/init.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/catalog/index_catalog_entry.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/json.h" -#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/db/storage/wiredtiger/wiredtiger_column_store.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_util.h" -#include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/hex.h" namespace mongo { namespace { -using std::string; - TEST(WiredTigerColumnStoreTest, MakeKey) { std::string out = WiredTigerColumnStore::makeKey_ForTest("a.b", 66 /* RowId */); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.cpp index 49346bfc671b3..9c3f412ee7e87 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.cpp @@ -28,13 +28,18 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/wiredtiger/wiredtiger_cursor.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_cursor.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h index 025aa88c9c450..16fbf102ab393 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor.h @@ -29,6 +29,8 @@ #pragma once +#include +#include #include #include "mongo/db/operation_context.h" diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor_helpers.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor_helpers.cpp index d63559579c06f..74b0e15943f84 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_cursor_helpers.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_cursor_helpers.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/operation_context.h" #include "mongo/db/storage/wiredtiger/wiredtiger_cursor_helpers.h" #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include "mongo/platform/compiler.h" #include "mongo/util/stacktrace.h" #include "mongo/util/testing_proctor.h" diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.cpp index e8b1b1fac4b7c..855aea07015b9 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.cpp @@ -27,15 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h" - #include +#include -#include "mongo/base/init.h" +#include + +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/string_data.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_extensions.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_extensions.cpp index 564c4bae5c6a9..029aa81fc146a 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_extensions.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_extensions.cpp @@ -27,14 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/wiredtiger/wiredtiger_extensions.h" - #include +#include #include "mongo/base/string_data.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_extensions.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp index 47177fa405874..72e8976c91cee 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.cpp @@ -28,11 +28,12 @@ */ -#include "mongo/platform/basic.h" - #include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h" - +#include "mongo/base/error_codes.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/options_parser/value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.h b/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.h index d3c19989bdc27..854ab0014d258 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_global_options.h @@ -29,9 +29,11 @@ #pragma once +#include #include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/util/options_parser/environment.h" namespace mongo { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_import.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_import.cpp index 1dd84cb247c67..59afca71254cb 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_import.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_import.cpp @@ -30,20 +30,29 @@ #include "wiredtiger_import.h" -#include #include #include -#include "mongo/base/status.h" -#include "mongo/bson/bsonmisc.h" +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/repl/tenant_migration_access_blocker_util.h" #include "mongo/db/storage/bson_collection_catalog_entry.h" -#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" #include "mongo/logv2/log.h" -#include "mongo/util/database_name_util.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTenantMigration @@ -181,7 +190,7 @@ std::vector wiredTigerRollbackToStableAndGetMetadata( WT_ITEM catalogValue; uassertWTOK(mdbCatalogCursor->get_value(mdbCatalogCursor, &catalogValue), session); BSONObj rawCatalogEntry(static_cast(catalogValue.data)); - NamespaceString ns(NamespaceString::parseFromStringExpectTenantIdInMultitenancyMode( + NamespaceString ns(NamespaceStringUtil::parseFromStringExpectTenantIdInMultitenancyMode( rawCatalogEntry.getStringField("ns"))); if (!shouldImport(ns, migrationId)) { LOGV2_DEBUG(6113801, 1, "Not importing donor collection", "ns"_attr = ns); @@ -215,11 +224,11 @@ std::vector wiredTigerRollbackToStableAndGetMetadata( for (const auto& index : catalogEntry.indexes) { uassert(6113807, "No ident for donor index '{}' in collection '{}'"_format( - index.nameStringData(), ns.toString()), + index.nameStringData(), ns.toStringForErrorMsg()), indexNameToIdent.contains(index.nameStringData())); uassert(6114302, "Index '{}' for collection '{}' isn't ready"_format(index.nameStringData(), - ns.ns()), + ns.toStringForErrorMsg()), index.ready); WTIndexImportArgs indexImportArgs; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_import.h b/src/mongo/db/storage/wiredtiger/wiredtiger_import.h index 4f9fa567a93bb..4b5f2b78f6c15 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_import.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_import.h @@ -29,13 +29,16 @@ #pragma once +#include #include #include #include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/recovery_unit.h" +#include "mongo/util/uuid.h" namespace mongo { struct WTimportArgs { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp index 1c64d45943978..f5cedf0063ee8 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.cpp @@ -27,9 +27,12 @@ * it in the license file. */ - #include "mongo/db/storage/wiredtiger/wiredtiger_index.h" +#include "mongo/base/string_data.h" +#include "mongo/db/catalog/health_log.h" +#include "mongo/db/catalog/health_log_gen.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/storage/wiredtiger/wiredtiger_cursor_helpers.h" #include "mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h" #include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h" @@ -37,11 +40,11 @@ #include "mongo/db/storage/wiredtiger/wiredtiger_index_util.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" #include "mongo/logv2/log.h" +#include "mongo/util/stacktrace.h" #include "mongo/util/testing_proctor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage - #define TRACING_ENABLED 0 #define LOGV2_TRACE_CURSOR(ID, NAME, ...) \ @@ -65,8 +68,53 @@ namespace mongo { namespace { MONGO_FAIL_POINT_DEFINE(WTIndexPauseAfterSearchNear); +MONGO_FAIL_POINT_DEFINE(WTIndexUassertDuplicateRecordForIdIndex); +MONGO_FAIL_POINT_DEFINE(WTIndexUassertDuplicateRecordForKeyOnIdUnindex); static const WiredTigerItem emptyItem(nullptr, 0); + +/** + * Add a data corruption entry to the health log. + */ +void addDataCorruptionEntryToHealthLog(OperationContext* opCtx, + const NamespaceString& nss, + StringData operation, + StringData message, + const BSONObj& key, + StringData indexName, + StringData uri) { + HealthLogEntry entry; + entry.setNss(nss); + entry.setTimestamp(Date_t::now()); + entry.setSeverity(SeverityEnum::Error); + entry.setScope(ScopeEnum::Index); + entry.setOperation(operation); + entry.setMsg(message); + + BSONObjBuilder bob; + bob.append("key", key); + bob.append("indexName", indexName); + bob.append("uri", uri); + bob.appendElements(getStackTrace().getBSONRepresentation()); + entry.setData(bob.obj()); + + HealthLog::get(opCtx)->log(entry); +} + +/** + * Returns the logv2::LogOptions controlling the behaviour after logging a data corruption error. + * When the TestingProctor is enabled we will fatally assert. When the testing proctor is disabled + * or when 'forceUassert' is specified (for instance because a failpoint is enabled), we should log + * and throw DataCorruptionDetected. + */ +logv2::LogOptions getLogOptionsForDataCorruption(bool forceUassert = false) { + if (!TestingProctor::instance().isEnabled() || forceUassert) { + return logv2::LogOptions{logv2::UserAssertAfterLog(ErrorCodes::DataCorruptionDetected)}; + } else { + return logv2::LogOptions(logv2::LogComponent::kAutomaticDetermination); + } +} + } // namespace void WiredTigerIndex::setKey(WT_CURSOR* cursor, const WT_ITEM* item) { @@ -235,23 +283,23 @@ NamespaceString WiredTigerIndex::getCollectionNamespace(OperationContext* opCtx) } namespace { -void dassertRecordIdAtEnd(const KeyString::Value& keyString, KeyFormat keyFormat) { +void dassertRecordIdAtEnd(const key_string::Value& keyString, KeyFormat keyFormat) { if (!kDebugBuild) { return; } RecordId rid; if (keyFormat == KeyFormat::Long) { - rid = KeyString::decodeRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()); + rid = key_string::decodeRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()); } else { - rid = KeyString::decodeRecordIdStrAtEnd(keyString.getBuffer(), keyString.getSize()); + rid = key_string::decodeRecordIdStrAtEnd(keyString.getBuffer(), keyString.getSize()); } invariant(rid.isValid(), rid.toString()); } } // namespace Status WiredTigerIndex::insert(OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed, IncludeDuplicateRecordId includeDuplicateRecordId) { dassert(opCtx->lockState()->isWriteLocked()); @@ -267,7 +315,7 @@ Status WiredTigerIndex::insert(OperationContext* opCtx, } void WiredTigerIndex::unindex(OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed) { dassert(opCtx->lockState()->isWriteLocked()); dassertRecordIdAtEnd(keyString, _rsKeyFormat); @@ -281,10 +329,10 @@ void WiredTigerIndex::unindex(OperationContext* opCtx, } boost::optional WiredTigerIndex::findLoc(OperationContext* opCtx, - const KeyString::Value& key) const { - dassert(KeyString::decodeDiscriminator( + const key_string::Value& key) const { + dassert(key_string::decodeDiscriminator( key.getBuffer(), key.getSize(), getOrdering(), key.getTypeBits()) == - KeyString::Discriminator::kInclusive); + key_string::Discriminator::kInclusive); auto cursor = newCursor(opCtx); auto ksEntry = cursor->seekForKeyString(key); @@ -293,11 +341,11 @@ boost::optional WiredTigerIndex::findLoc(OperationContext* opCtx, } auto sizeWithoutRecordId = KeyFormat::Long == _rsKeyFormat - ? KeyString::sizeWithoutRecordIdLongAtEnd(ksEntry->keyString.getBuffer(), - ksEntry->keyString.getSize()) - : KeyString::sizeWithoutRecordIdStrAtEnd(ksEntry->keyString.getBuffer(), - ksEntry->keyString.getSize()); - if (KeyString::compare( + ? key_string::sizeWithoutRecordIdLongAtEnd(ksEntry->keyString.getBuffer(), + ksEntry->keyString.getSize()) + : key_string::sizeWithoutRecordIdStrAtEnd(ksEntry->keyString.getBuffer(), + ksEntry->keyString.getSize()); + if (key_string::compare( ksEntry->keyString.getBuffer(), key.getBuffer(), sizeWithoutRecordId, key.getSize()) == 0) { return ksEntry->loc; @@ -309,7 +357,6 @@ IndexValidateResults WiredTigerIndex::validate(OperationContext* opCtx, bool ful dassert(opCtx->lockState()->isReadLocked()); IndexValidateResults results; - WiredTigerUtil::validateTableLogging(opCtx, _uri, _isLogged, @@ -332,8 +379,9 @@ int64_t WiredTigerIndex::numEntries(OperationContext* opCtx) const { LOGV2_TRACE_INDEX(20094, "numEntries"); - auto requestedInfo = TRACING_ENABLED ? Cursor::kKeyAndLoc : Cursor::kJustExistance; - KeyString::Value keyStringForSeek = + auto keyInclusion = + TRACING_ENABLED ? Cursor::KeyInclusion::kInclude : Cursor::KeyInclusion::kExclude; + key_string::Value keyStringForSeek = IndexEntryComparison::makeKeyStringFromBSONKeyForSeek(BSONObj(), getKeyStringVersion(), getOrdering(), @@ -342,7 +390,7 @@ int64_t WiredTigerIndex::numEntries(OperationContext* opCtx) const { ); auto cursor = newCursor(opCtx); - for (auto kv = cursor->seek(keyStringForSeek, requestedInfo); kv; kv = cursor->next()) { + for (auto kv = cursor->seek(keyStringForSeek, keyInclusion); kv; kv = cursor->next()) { LOGV2_TRACE_INDEX(20095, "numEntries", "kv"_attr = kv); count++; } @@ -356,7 +404,7 @@ bool WiredTigerIndex::appendCustomStats(OperationContext* opCtx, return WiredTigerIndexUtil::appendCustomStats(opCtx, output, scale, _uri); } -Status WiredTigerIndex::dupKeyCheck(OperationContext* opCtx, const KeyString::Value& key) { +Status WiredTigerIndex::dupKeyCheck(OperationContext* opCtx, const key_string::Value& key) { invariant(unique()); WiredTigerCursor curwrap(_uri, _tableId, false, opCtx); @@ -374,7 +422,7 @@ bool WiredTigerIndex::isEmpty(OperationContext* opCtx) { } void WiredTigerIndex::printIndexEntryMetadata(OperationContext* opCtx, - const KeyString::Value& keyString) const { + const key_string::Value& keyString) const { // Printing the index entry metadata requires a new session. We cannot open other cursors when // there are open history store cursors in the session. We also need to make sure that the // existing session has not written data to avoid potential deadlocks. @@ -419,7 +467,7 @@ void WiredTigerIndex::printIndexEntryMetadata(OperationContext* opCtx, &value), cursor->session); - auto indexKey = KeyString::toBson( + auto indexKey = key_string::toBson( keyString.getBuffer(), keyString.getSize(), _ordering, keyString.getTypeBits()); LOGV2(6601200, @@ -495,7 +543,7 @@ boost::optional WiredTigerIndex::_keyExists(OperationContext* opCtx, invariantWTOK(c->get_value(c, &item), c->session); BufReader reader(item.data, item.size); - return KeyString::decodeRecordIdLong(&reader); + return key_string::decodeRecordIdLong(&reader); } WT_ITEM item; @@ -527,7 +575,7 @@ boost::optional WiredTigerIndex::_keyExists(OperationContext* opCtx, boost::optional WiredTigerIndex::_keyExistsBounded(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, size_t sizeWithoutRecordId) { // Given a KeyString KS with RecordId RID appended to the end, set the: // 1. Lower bound (inclusive) to be KS without RID @@ -565,16 +613,16 @@ boost::optional WiredTigerIndex::_keyExistsBounded(OperationContext* o invariantWTOK(c->get_value(c, &value), c->session); BufReader reader(value.data, value.size); - return KeyString::decodeRecordIdLong(&reader); + return key_string::decodeRecordIdLong(&reader); } return _decodeRecordIdAtEnd(key.data, key.size); } void WiredTigerIndex::_setUpperBound(WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, size_t sizeWithoutRecordId) { - KeyString::Builder builder(keyString.getVersion(), _ordering); + key_string::Builder builder(keyString.getVersion(), _ordering); builder.resetFromBuffer(keyString.getBuffer(), sizeWithoutRecordId); builder.appendRecordId(record_id_helpers::maxRecordId(_rsKeyFormat)); @@ -585,14 +633,14 @@ void WiredTigerIndex::_setUpperBound(WT_CURSOR* c, StatusWith WiredTigerIndex::_checkDups(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, IncludeDuplicateRecordId includeDuplicateRecordId) { int ret; // A prefix key is KeyString of index key. It is the component of the index entry that // should be unique. auto sizeWithoutRecordId = (_rsKeyFormat == KeyFormat::Long) - ? KeyString::sizeWithoutRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()) - : KeyString::sizeWithoutRecordIdStrAtEnd(keyString.getBuffer(), keyString.getSize()); + ? key_string::sizeWithoutRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()) + : key_string::sizeWithoutRecordIdStrAtEnd(keyString.getBuffer(), keyString.getSize()); WiredTigerItem prefixKeyItem(keyString.getBuffer(), sizeWithoutRecordId); // First phase inserts the prefix key to prohibit concurrent insertions of same key @@ -603,7 +651,7 @@ StatusWith WiredTigerIndex::_checkDups(OperationContext* opCtx, // An entry with prefix key already exists. This can happen only during rolling upgrade when // both timestamp unsafe and timestamp safe index format keys could be present. if (ret == WT_DUPLICATE_KEY) { - auto key = KeyString::toBson( + auto key = key_string::toBson( keyString.getBuffer(), sizeWithoutRecordId, _ordering, keyString.getTypeBits()); return buildDupKeyErrorStatus( key, getCollectionNamespace(opCtx), _indexName, _keyPattern, _collation); @@ -637,7 +685,7 @@ StatusWith WiredTigerIndex::_checkDups(OperationContext* opCtx, } return buildDupKeyErrorStatus( - KeyString::toBson( + key_string::toBson( keyString.getBuffer(), sizeWithoutRecordId, _ordering, keyString.getTypeBits()), getCollectionNamespace(opCtx), _indexName, @@ -683,21 +731,21 @@ void WiredTigerIndex::_repairDataFormatVersion(OperationContext* opCtx, } } -KeyString::Version WiredTigerIndex::_handleVersionInfo(OperationContext* ctx, - const std::string& uri, - StringData ident, - const IndexDescriptor* desc, - bool isLogged) { +key_string::Version WiredTigerIndex::_handleVersionInfo(OperationContext* ctx, + const std::string& uri, + StringData ident, + const IndexDescriptor* desc, + bool isLogged) { auto version = WiredTigerUtil::checkApplicationMetadataFormatVersion( ctx, uri, kMinimumIndexVersion, kMaximumIndexVersion); if (!version.isOK()) { auto collectionNamespace = desc->getEntry()->getNSSFromCatalog(ctx); Status versionStatus = version.getStatus(); - Status indexVersionStatus(ErrorCodes::UnsupportedFormat, - str::stream() - << versionStatus.reason() << " Index: {name: " - << desc->indexName() << ", ns: " << collectionNamespace - << "} - version either too old or too new for this mongod."); + Status indexVersionStatus( + ErrorCodes::UnsupportedFormat, + str::stream() << versionStatus.reason() << " Index: {name: " << desc->indexName() + << ", ns: " << collectionNamespace.toStringForErrorMsg() + << "} - version either too old or too new for this mongod."); fassertFailedWithStatus(28579, indexVersionStatus); } _dataFormatVersion = version.getValue(); @@ -711,7 +759,7 @@ KeyString::Version WiredTigerIndex::_handleVersionInfo(OperationContext* ctx, Status versionStatus(ErrorCodes::UnsupportedFormat, str::stream() << "Index: {name: " << desc->indexName() - << ", ns: " << collectionNamespace + << ", ns: " << collectionNamespace.toStringForErrorMsg() << "} has incompatible format version: " << _dataFormatVersion); fassertFailedWithStatusNoTrace(31179, versionStatus); } @@ -725,16 +773,16 @@ KeyString::Version WiredTigerIndex::_handleVersionInfo(OperationContext* ctx, return (_dataFormatVersion == kDataFormatV2KeyStringV1IndexVersionV2 || _dataFormatVersion == kDataFormatV4KeyStringV1UniqueIndexVersionV2 || _dataFormatVersion == kDataFormatV6KeyStringV1UniqueIndexVersionV2) - ? KeyString::Version::V1 - : KeyString::Version::V0; + ? key_string::Version::V1 + : key_string::Version::V0; } RecordId WiredTigerIndex::_decodeRecordIdAtEnd(const void* buffer, size_t size) { switch (_rsKeyFormat) { case KeyFormat::Long: - return KeyString::decodeRecordIdLongAtEnd(buffer, size); + return key_string::decodeRecordIdLongAtEnd(buffer, size); case KeyFormat::String: - return KeyString::decodeRecordIdStrAtEnd(buffer, size); + return key_string::decodeRecordIdStrAtEnd(buffer, size); } MONGO_UNREACHABLE; } @@ -768,14 +816,14 @@ class WiredTigerIndex::StandardBulkBuilder : public BulkBuilder { StandardBulkBuilder(WiredTigerIndex* idx, OperationContext* opCtx) : BulkBuilder(idx, opCtx), _idx(idx) {} - Status addKey(const KeyString::Value& keyString) override { + Status addKey(const key_string::Value& keyString) override { dassertRecordIdAtEnd(keyString, _idx->rsKeyFormat()); // Can't use WiredTigerCursor since we aren't using the cache. WiredTigerItem item(keyString.getBuffer(), keyString.getSize()); setKey(_cursor.get(), item.Get()); - const KeyString::TypeBits typeBits = keyString.getTypeBits(); + const key_string::TypeBits typeBits = keyString.getTypeBits(); WiredTigerItem valueItem = typeBits.isAllZeros() ? emptyItem : WiredTigerItem(typeBits.getBuffer(), typeBits.getSize()); @@ -812,7 +860,7 @@ class WiredTigerIndex::UniqueBulkBuilder : public BulkBuilder { invariant(!_idx->isIdIndex()); } - Status addKey(const KeyString::Value& newKeyString) override { + Status addKey(const key_string::Value& newKeyString) override { dassertRecordIdAtEnd(newKeyString, _idx->rsKeyFormat()); // Do a duplicate check, but only if dups aren't allowed. @@ -822,7 +870,7 @@ class WiredTigerIndex::UniqueBulkBuilder : public BulkBuilder { : newKeyString.compareWithoutRecordIdStr(_previousKeyString); if (cmp == 0) { // Duplicate found! - auto newKey = KeyString::toBson(newKeyString, _idx->_ordering); + auto newKey = key_string::toBson(newKeyString, _idx->_ordering); return buildDupKeyErrorStatus(newKey, _idx->getCollectionNamespace(_opCtx), _idx->indexName(), @@ -841,7 +889,7 @@ class WiredTigerIndex::UniqueBulkBuilder : public BulkBuilder { WiredTigerItem keyItem(newKeyString.getBuffer(), newKeyString.getSize()); setKey(_cursor.get(), keyItem.Get()); - const KeyString::TypeBits typeBits = newKeyString.getTypeBits(); + const key_string::TypeBits typeBits = newKeyString.getTypeBits(); WiredTigerItem valueItem = typeBits.isAllZeros() ? emptyItem : WiredTigerItem(typeBits.getBuffer(), typeBits.getSize()); @@ -863,7 +911,7 @@ class WiredTigerIndex::UniqueBulkBuilder : public BulkBuilder { private: WiredTigerIndex* _idx; const bool _dupsAllowed; - KeyString::Builder _previousKeyString; + key_string::Builder _previousKeyString; }; class WiredTigerIndex::IdBulkBuilder : public BulkBuilder { @@ -873,7 +921,7 @@ class WiredTigerIndex::IdBulkBuilder : public BulkBuilder { invariant(_idx->isIdIndex()); } - Status addKey(const KeyString::Value& newKeyString) override { + Status addKey(const key_string::Value& newKeyString) override { dassertRecordIdAtEnd(newKeyString, KeyFormat::Long); const int cmp = newKeyString.compareWithoutRecordIdLong(_previousKeyString); @@ -881,10 +929,10 @@ class WiredTigerIndex::IdBulkBuilder : public BulkBuilder { invariant(_previousKeyString.isEmpty() || cmp > 0); RecordId id = - KeyString::decodeRecordIdLongAtEnd(newKeyString.getBuffer(), newKeyString.getSize()); - KeyString::TypeBits typeBits = newKeyString.getTypeBits(); + key_string::decodeRecordIdLongAtEnd(newKeyString.getBuffer(), newKeyString.getSize()); + key_string::TypeBits typeBits = newKeyString.getTypeBits(); - KeyString::Builder value(_idx->getKeyStringVersion()); + key_string::Builder value(_idx->getKeyStringVersion()); value.appendRecordId(id); // When there is only one record, we can omit AllZeros TypeBits. Otherwise they need // to be included. @@ -892,8 +940,8 @@ class WiredTigerIndex::IdBulkBuilder : public BulkBuilder { value.appendTypeBits(typeBits); } - auto sizeWithoutRecordId = KeyString::sizeWithoutRecordIdLongAtEnd(newKeyString.getBuffer(), - newKeyString.getSize()); + auto sizeWithoutRecordId = key_string::sizeWithoutRecordIdLongAtEnd( + newKeyString.getBuffer(), newKeyString.getSize()); WiredTigerItem keyItem(newKeyString.getBuffer(), sizeWithoutRecordId); WiredTigerItem valueItem(value.getBuffer(), value.getSize()); @@ -911,7 +959,7 @@ class WiredTigerIndex::IdBulkBuilder : public BulkBuilder { private: WiredTigerIndex* _idx; - KeyString::Builder _previousKeyString; + key_string::Builder _previousKeyString; }; std::unique_ptr WiredTigerIdIndex::makeBulkBuilder( @@ -944,9 +992,9 @@ class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor, _cursor.emplace(_uri, _tableId, false, _opCtx); } - boost::optional next(RequestedInfo parts) override { + boost::optional next(KeyInclusion keyInclusion) override { advanceNext(); - return curr(parts); + return curr(keyInclusion); } boost::optional nextKeyString() override { @@ -968,20 +1016,21 @@ class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor, // NOTE: this uses the opposite rules as a normal seek because a forward scan should // end after the key if inclusive and before if exclusive. const auto discriminator = _forward == inclusive - ? KeyString::Discriminator::kExclusiveAfter - : KeyString::Discriminator::kExclusiveBefore; - _endPosition = std::make_unique(_version); + ? key_string::Discriminator::kExclusiveAfter + : key_string::Discriminator::kExclusiveBefore; + _endPosition = std::make_unique(_version); _endPosition->resetToKey(BSONObj::stripFieldNames(key), _ordering, discriminator); } - boost::optional seek(const KeyString::Value& keyString, - RequestedInfo parts = kKeyAndLoc) override { + boost::optional seek( + const key_string::Value& keyString, + KeyInclusion keyInclusion = KeyInclusion::kInclude) override { seekForKeyStringInternal(keyString); - return curr(parts); + return curr(keyInclusion); } boost::optional seekForKeyString( - const KeyString::Value& keyStringValue) override { + const key_string::Value& keyStringValue) override { seekForKeyStringInternal(keyStringValue); return getKeyStringEntry(); } @@ -1065,10 +1114,10 @@ class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor, // operation effectively skipping over this key. virtual void updateIdAndTypeBits() { if (_rsKeyFormat == KeyFormat::Long) { - _id = KeyString::decodeRecordIdLongAtEnd(_key.getBuffer(), _key.getSize()); + _id = key_string::decodeRecordIdLongAtEnd(_key.getBuffer(), _key.getSize()); } else { invariant(_rsKeyFormat == KeyFormat::String); - _id = KeyString::decodeRecordIdStrAtEnd(_key.getBuffer(), _key.getSize()); + _id = key_string::decodeRecordIdStrAtEnd(_key.getBuffer(), _key.getSize()); } invariant(!_id.isNull()); @@ -1083,7 +1132,7 @@ class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor, _typeBits.resetFromBuffer(&br); } - boost::optional curr(RequestedInfo parts) const { + boost::optional curr(KeyInclusion keyInclusion) const { if (_eof) return {}; @@ -1091,8 +1140,8 @@ class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor, dassert(!_id.isNull()); BSONObj bson; - if (TRACING_ENABLED || (parts & kWantKey)) { - bson = KeyString::toBson(_key.getBuffer(), _key.getSize(), _ordering, _typeBits); + if (TRACING_ENABLED || keyInclusion == KeyInclusion::kInclude) { + bson = key_string::toBson(_key.getBuffer(), _key.getSize(), _ordering, _typeBits); LOGV2_TRACE_CURSOR(20000, "returning {bson} {id}", "bson"_attr = bson, "id"_attr = _id); } @@ -1124,7 +1173,7 @@ class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor, // Seeks to query. Returns true on exact match. - bool seekWTCursor(const KeyString::Value& query) { + bool seekWTCursor(const key_string::Value& query) { // Ensure an active transaction is open. WiredTigerRecoveryUnit::get(_opCtx)->getSession(); @@ -1248,7 +1297,7 @@ class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor, } - void seekForKeyStringInternal(const KeyString::Value& keyStringValue) { + void seekForKeyStringInternal(const key_string::Value& keyStringValue) { dassert(_opCtx->lockState()->isReadLocked()); seekWTCursor(keyStringValue); @@ -1306,10 +1355,10 @@ class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor, if (_unique && (_isIdIndex || _key.getSize() == - KeyString::getKeySize(_key.getBuffer(), _key.getSize(), _ordering, _typeBits))) { + key_string::getKeySize(_key.getBuffer(), _key.getSize(), _ordering, _typeBits))) { // Create a copy of _key with a RecordId. Because _key is used during cursor restore(), // appending the RecordId would cause the cursor to be repositioned incorrectly. - KeyString::Builder keyWithRecordId(_key); + key_string::Builder keyWithRecordId(_key); keyWithRecordId.appendRecordId(_id); keyWithRecordId.setTypeBits(_typeBits); @@ -1336,7 +1385,7 @@ class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor, } const Ordering _ordering; - const KeyString::Version _version; + const key_string::Version _version; const KeyFormat _rsKeyFormat; const std::string _uri; const uint64_t _tableId; @@ -1347,11 +1396,11 @@ class WiredTigerIndexCursorBase : public SortedDataInterface::Cursor, // These are where this cursor instance is. They are not changed in the face of a failing // next(). - KeyString::Builder _key; - KeyString::TypeBits _typeBits; + key_string::Builder _key; + key_string::TypeBits _typeBits; RecordId _id; - std::unique_ptr _endPosition; + std::unique_ptr _endPosition; // This differs from _eof in that it always reflects the result of the most recent call to // reposition _cursor. @@ -1386,7 +1435,7 @@ class WiredTigerIndexUniqueCursor final : public WiredTigerIndexCursorBase { // id. _id indexes remain at the old format. When KeyString contains just the key, the // RecordId is in value. auto keySize = - KeyString::getKeySize(_key.getBuffer(), _key.getSize(), _ordering, _typeBits); + key_string::getKeySize(_key.getBuffer(), _key.getSize(), _ordering, _typeBits); if (_key.getSize() == keySize) { _updateIdAndTypeBitsFromValue(); @@ -1417,7 +1466,7 @@ class WiredTigerIndexUniqueCursor final : public WiredTigerIndexCursorBase { // Get the size of the prefix key auto keySize = - KeyString::getKeySize(_key.getBuffer(), _key.getSize(), _ordering, _typeBits); + key_string::getKeySize(_key.getBuffer(), _key.getSize(), _ordering, _typeBits); // This check is only to avoid returning the same key again after a restore. Keys // shorter than _key cannot have "prefix key" same as _key. Therefore we care only about @@ -1431,7 +1480,7 @@ class WiredTigerIndexUniqueCursor final : public WiredTigerIndexCursorBase { bool isRecordIdAtEndOfKeyString() const override { return _key.getSize() != - KeyString::getKeySize(_key.getBuffer(), _key.getSize(), _ordering, _typeBits); + key_string::getKeySize(_key.getBuffer(), _key.getSize(), _ordering, _typeBits); } private: @@ -1454,18 +1503,28 @@ class WiredTigerIndexUniqueCursor final : public WiredTigerIndexCursorBase { invariantWTOK(ret, c->session); BufReader br(item.data, item.size); - _id = KeyString::decodeRecordIdLong(&br); + _id = key_string::decodeRecordIdLong(&br); _typeBits.resetFromBuffer(&br); if (!br.atEof()) { - LOGV2_FATAL(28608, - "Unique index cursor seeing multiple records for key {key} in index " - "{index} ({uri}) belonging to collection {collection}", - "Unique index cursor seeing multiple records for key in index", - "key"_attr = redact(curr(kWantKey)->key), - "index"_attr = _indexName, - "uri"_attr = _uri, - logAttrs(getCollectionNamespace(_opCtx))); + const auto bsonKey = redact(curr(KeyInclusion::kInclude)->key); + const auto collectionNamespace = getCollectionNamespace(_opCtx); + addDataCorruptionEntryToHealthLog( + _opCtx, + collectionNamespace, + "WiredTigerIndexUniqueCursor::_updateIdAndTypeBitsFromValue", + "Unique index cursor seeing multiple records for key in index", + bsonKey, + _indexName, + _uri); + + LOGV2_ERROR_OPTIONS(7623202, + getLogOptionsForDataCorruption(), + "Unique index cursor seeing multiple records for key in index", + "key"_attr = bsonKey, + "index"_attr = _indexName, + "uri"_attr = _uri, + logAttrs(collectionNamespace)); } } }; @@ -1489,16 +1548,32 @@ class WiredTigerIdIndexCursor final : public WiredTigerIndexCursorBase { invariantWTOK(ret, c->session); BufReader br(item.data, item.size); - _id = KeyString::decodeRecordIdLong(&br); + _id = key_string::decodeRecordIdLong(&br); _typeBits.resetFromBuffer(&br); - if (!br.atEof()) { - LOGV2_FATAL(5176200, - "Index cursor seeing multiple records for key in _id index", - "key"_attr = redact(curr(kWantKey)->key), - "index"_attr = _indexName, - "uri"_attr = _uri, - logAttrs(getCollectionNamespace(_opCtx))); + const auto failWithDataCorruptionForTest = + WTIndexUassertDuplicateRecordForIdIndex.shouldFail(); + + if (!br.atEof() || MONGO_unlikely(failWithDataCorruptionForTest)) { + const auto bsonKey = redact(curr(KeyInclusion::kInclude)->key); + const auto collectionNamespace = getCollectionNamespace(_opCtx); + + addDataCorruptionEntryToHealthLog( + _opCtx, + collectionNamespace, + "WiredTigerIdIndexCursor::updateIdAndTypeBits", + "Index cursor seeing multiple records for key in _id index", + bsonKey, + _indexName, + _uri); + + LOGV2_ERROR_OPTIONS(5176200, + getLogOptionsForDataCorruption(failWithDataCorruptionForTest), + "Index cursor seeing multiple records for key in _id index", + "key"_attr = bsonKey, + "index"_attr = _indexName, + "uri"_attr = _uri, + logAttrs(collectionNamespace)); } } }; @@ -1539,7 +1614,7 @@ bool WiredTigerIndexUnique::isTimestampSafeUniqueIdx() const { bool WiredTigerIndexUnique::isDup(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& prefixKey) { + const key_string::Value& prefixKey) { // This procedure to determine duplicates is exclusive for timestamp safe unique indexes. // Check if a prefix key already exists in the index. When keyExists() returns true, the cursor // will be positioned on the first occurrence of the 'prefixKey'. @@ -1568,7 +1643,7 @@ bool WiredTigerIndexUnique::isDup(OperationContext* opCtx, } void WiredTigerIndexUnique::insertWithRecordIdInValue_forTest(OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, RecordId rid) { WiredTigerCursor curwrap(_uri, _tableId, false, opCtx); curwrap.assertInActiveTxn(); @@ -1577,7 +1652,7 @@ void WiredTigerIndexUnique::insertWithRecordIdInValue_forTest(OperationContext* // Now create the table key/value, the actual data record. WiredTigerItem keyItem(keyString.getBuffer(), keyString.getSize()); - KeyString::Builder valueBuilder(keyString.getVersion(), rid); + key_string::Builder valueBuilder(keyString.getVersion(), rid); valueBuilder.appendTypeBits(keyString.getTypeBits()); WiredTigerItem valueItem(valueBuilder.getBuffer(), valueBuilder.getSize()); @@ -1610,21 +1685,21 @@ std::unique_ptr WiredTigerIdIndex::newCursor(Operat Status WiredTigerIdIndex::_insert(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed, IncludeDuplicateRecordId includeDuplicateRecordId) { invariant(KeyFormat::Long == _rsKeyFormat); invariant(!dupsAllowed); const RecordId id = - KeyString::decodeRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()); + key_string::decodeRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()); invariant(id.isValid()); auto sizeWithoutRecordId = - KeyString::sizeWithoutRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()); + key_string::sizeWithoutRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()); WiredTigerItem keyItem(keyString.getBuffer(), sizeWithoutRecordId); - KeyString::Builder value(getKeyStringVersion(), id); - const KeyString::TypeBits typeBits = keyString.getTypeBits(); + key_string::Builder value(getKeyStringVersion(), id); + const key_string::TypeBits typeBits = keyString.getTypeBits(); if (!typeBits.isAllZeros()) value.appendTypeBits(typeBits); @@ -1650,11 +1725,11 @@ Status WiredTigerIdIndex::_insert(OperationContext* opCtx, invariantWTOK(c->get_value(c, &foundValue), c->session); BufReader reader(foundValue.data, foundValue.size); - duplicateRecordId = KeyString::decodeRecordIdLong(&reader); + duplicateRecordId = key_string::decodeRecordIdLong(&reader); foundValueRecordId = *duplicateRecordId; } - auto key = KeyString::toBson(keyString, _ordering); + auto key = key_string::toBson(keyString, _ordering); return buildDupKeyErrorStatus(key, getCollectionNamespace(opCtx), _indexName, @@ -1666,7 +1741,7 @@ Status WiredTigerIdIndex::_insert(OperationContext* opCtx, Status WiredTigerIndexUnique::_insert(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed, IncludeDuplicateRecordId includeDuplicateRecordId) { LOGV2_TRACE_INDEX( @@ -1687,7 +1762,7 @@ Status WiredTigerIndexUnique::_insert(OperationContext* opCtx, // Now create the table key/value, the actual data record. WiredTigerItem keyItem(keyString.getBuffer(), keyString.getSize()); - const KeyString::TypeBits typeBits = keyString.getTypeBits(); + const key_string::TypeBits typeBits = keyString.getTypeBits(); WiredTigerItem valueItem = typeBits.isAllZeros() ? emptyItem : WiredTigerItem(typeBits.getBuffer(), typeBits.getSize()); @@ -1714,21 +1789,23 @@ Status WiredTigerIndexUnique::_insert(OperationContext* opCtx, void WiredTigerIdIndex::_unindex(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed) { invariant(KeyFormat::Long == _rsKeyFormat); const RecordId id = - KeyString::decodeRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()); + key_string::decodeRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()); invariant(id.isValid()); auto sizeWithoutRecordId = - KeyString::sizeWithoutRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()); + key_string::sizeWithoutRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()); WiredTigerItem keyItem(keyString.getBuffer(), sizeWithoutRecordId); setKey(c, keyItem.Get()); + const auto failWithDataCorruptionForTest = + WTIndexUassertDuplicateRecordForKeyOnIdUnindex.shouldFail(); // On the _id index, the RecordId is stored in the value of the index entry. If the dupsAllowed // flag is not set, we blindly delete using only the key without checking the RecordId. - if (!dupsAllowed) { + if (!dupsAllowed && MONGO_likely(!failWithDataCorruptionForTest)) { int ret = WT_OP_CHECK(wiredTigerCursorRemove(opCtx, c)); if (ret == WT_NOTFOUND) { return; @@ -1758,16 +1835,27 @@ void WiredTigerIdIndex::_unindex(OperationContext* opCtx, BufReader br(old.data, old.size); invariant(br.remaining()); - RecordId idInIndex = KeyString::decodeRecordIdLong(&br); - KeyString::TypeBits typeBits = KeyString::TypeBits::fromBuffer(getKeyStringVersion(), &br); - if (!br.atEof()) { - auto bsonKey = KeyString::toBson(keyString, _ordering); - LOGV2_FATAL(5176201, - "Un-index seeing multiple records for key", - "key"_attr = bsonKey, - "index"_attr = _indexName, - "uri"_attr = _uri, - logAttrs(getCollectionNamespace(opCtx))); + RecordId idInIndex = key_string::decodeRecordIdLong(&br); + key_string::TypeBits typeBits = key_string::TypeBits::fromBuffer(getKeyStringVersion(), &br); + if (!br.atEof() || MONGO_unlikely(failWithDataCorruptionForTest)) { + auto bsonKey = key_string::toBson(keyString, _ordering); + const auto collectionNamespace = getCollectionNamespace(opCtx); + + addDataCorruptionEntryToHealthLog(opCtx, + collectionNamespace, + "WiredTigerIdIndex::_unindex", + "Un-index seeing multiple records for key", + bsonKey, + _indexName, + _uri); + + LOGV2_ERROR_OPTIONS(5176201, + getLogOptionsForDataCorruption(failWithDataCorruptionForTest), + "Un-index seeing multiple records for key", + "key"_attr = bsonKey, + "index"_attr = _indexName, + "uri"_attr = _uri, + logAttrs(collectionNamespace)); } // The RecordId matches, so remove the entry. @@ -1777,7 +1865,7 @@ void WiredTigerIdIndex::_unindex(OperationContext* opCtx, return; } - auto key = KeyString::toBson(keyString, _ordering); + auto key = key_string::toBson(keyString, _ordering); LOGV2_WARNING(51797, "Associated record not found in collection while removing index entry", logAttrs(getCollectionNamespace(opCtx)), @@ -1788,7 +1876,7 @@ void WiredTigerIdIndex::_unindex(OperationContext* opCtx, void WiredTigerIndexUnique::_unindex(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed) { // Note that the dupsAllowed flag asks us to check that the RecordId in the KeyString matches // before deleting any keys. Unique indexes store RecordIds in the keyString, so we get this @@ -1813,16 +1901,73 @@ void WiredTigerIndexUnique::_unindex(OperationContext* opCtx, return; } - // After a rolling upgrade an index can have keys from both timestamp unsafe (old) and - // timestamp safe (new) unique indexes. Old format keys just had the index key while new - // format key has index key + Record id. WT_NOTFOUND is possible if index key is in old format. - // Retry removal of key using old format. + // WT_NOTFOUND is possible if index key is in old (v4.0) format. Retry removal of key using old + // format. + _unindexTimestampUnsafe(opCtx, c, keyString, dupsAllowed); +} + +void WiredTigerIndexUnique::_unindexTimestampUnsafe(OperationContext* opCtx, + WT_CURSOR* c, + const key_string::Value& keyString, + bool dupsAllowed) { + // The old unique index format had a key-value of indexKey-RecordId. This means that the + // RecordId in an index entry might not match the indexKey+RecordId keyString passed into this + // function: an index on a field where multiple collection documents have the same field value + // but only one passes the partial index filter. + // + // The dupsAllowed flag is no longer relevant for the old unique index format. No new index + // entries are written in the old format, let alone during temporary phases of the server when + // duplicates are allowed. + + const RecordId id = + key_string::decodeRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()); + invariant(id.isValid()); + auto sizeWithoutRecordId = - KeyString::sizeWithoutRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()); + key_string::sizeWithoutRecordIdLongAtEnd(keyString.getBuffer(), keyString.getSize()); WiredTigerItem keyItem(keyString.getBuffer(), sizeWithoutRecordId); setKey(c, keyItem.Get()); - ret = WT_OP_CHECK(wiredTigerCursorRemove(opCtx, c)); + if (_partial) { + int ret = wiredTigerPrepareConflictRetry(opCtx, [&] { return c->search(c); }); + if (ret == WT_NOTFOUND) { + return; + } + invariantWTOK(ret, c->session); + + WT_ITEM value; + invariantWTOK(c->get_value(c, &value), c->session); + BufReader br(value.data, value.size); + fassert(40416, br.remaining()); + + // Check that the record id matches. We may be called to unindex records that are not + // present in the index due to the partial filter expression. + bool foundRecord = [&]() { + if (key_string::decodeRecordIdLong(&br) != id) { + return false; + } + return true; + }(); + + // Ensure the index entry value is not a list of RecordIds, which should only be possible + // temporarily in v4.0 when dupsAllowed is true, not ever across upgrades or in upgraded + // versions. + key_string::TypeBits::fromBuffer(getKeyStringVersion(), &br); + if (br.remaining()) { + LOGV2_FATAL_NOTRACE( + 7592201, + "An index entry was found that contains an unexpected old format that should no " + "longer exist. The index should be dropped and rebuilt.", + "indexName"_attr = _indexName, + "collectionUUID"_attr = _collectionUUID); + } + + if (!foundRecord) { + return; + } + } + + int ret = WT_OP_CHECK(wiredTigerCursorRemove(opCtx, c)); if (ret == WT_NOTFOUND) { return; } @@ -1853,7 +1998,7 @@ std::unique_ptr WiredTigerIndexStandard::makeBulkBui Status WiredTigerIndexStandard::_insert(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed, IncludeDuplicateRecordId includeDuplicateRecordId) { int ret; @@ -1870,7 +2015,7 @@ Status WiredTigerIndexStandard::_insert(OperationContext* opCtx, WiredTigerItem keyItem(keyString.getBuffer(), keyString.getSize()); - const KeyString::TypeBits typeBits = keyString.getTypeBits(); + const key_string::TypeBits typeBits = keyString.getTypeBits(); WiredTigerItem valueItem = typeBits.isAllZeros() ? emptyItem : WiredTigerItem(typeBits.getBuffer(), typeBits.getSize()); @@ -1896,7 +2041,7 @@ Status WiredTigerIndexStandard::_insert(OperationContext* opCtx, void WiredTigerIndexStandard::_unindex(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed) { invariant(dupsAllowed); WiredTigerItem item(keyString.getBuffer(), keyString.getSize()); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h index ee031d2cc9ada..9968094e59e9a 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_index.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index.h @@ -29,14 +29,32 @@ #pragma once +#include +#include +#include +#include +#include #include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/validate_results.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/duplicate_key_error_info.h" #include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_format.h" #include "mongo/db/storage/key_string.h" #include "mongo/db/storage/sorted_data_interface.h" #include "mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h" #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -131,23 +149,23 @@ class WiredTigerIndex : public SortedDataInterface { virtual Status insert( OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed, IncludeDuplicateRecordId includeDuplicateRecordId = IncludeDuplicateRecordId::kOff); virtual void unindex(OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed); virtual boost::optional findLoc(OperationContext* opCtx, - const KeyString::Value& keyString) const override; + const key_string::Value& keyString) const override; virtual IndexValidateResults validate(OperationContext* opCtx, bool full) const; virtual bool appendCustomStats(OperationContext* opCtx, BSONObjBuilder* output, double scale) const; - virtual Status dupKeyCheck(OperationContext* opCtx, const KeyString::Value& keyString); + virtual Status dupKeyCheck(OperationContext* opCtx, const key_string::Value& keyString); virtual bool isEmpty(OperationContext* opCtx); @@ -160,7 +178,7 @@ class WiredTigerIndex : public SortedDataInterface { virtual Status initAsEmpty(OperationContext* opCtx); virtual void printIndexEntryMetadata(OperationContext* opCtx, - const KeyString::Value& keyString) const; + const key_string::Value& keyString) const; Status compact(OperationContext* opCtx) override; @@ -194,11 +212,11 @@ class WiredTigerIndex : public SortedDataInterface { virtual bool isDup(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString) = 0; + const key_string::Value& keyString) = 0; virtual bool unique() const = 0; virtual bool isTimestampSafeUniqueIdx() const = 0; void insertWithRecordIdInValue_forTest(OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, RecordId rid) override { MONGO_UNREACHABLE; } @@ -207,13 +225,13 @@ class WiredTigerIndex : public SortedDataInterface { virtual Status _insert( OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed, IncludeDuplicateRecordId includeDuplicateRecordId = IncludeDuplicateRecordId::kOff) = 0; virtual void _unindex(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed) = 0; void setKey(WT_CURSOR* cursor, const WT_ITEM* item); @@ -235,14 +253,14 @@ class WiredTigerIndex : public SortedDataInterface { */ boost::optional _keyExistsBounded(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, size_t sizeWithoutRecordId); /** * Sets the upper bound on the passed in cursor to be the maximum value of the KeyString prefix. */ void _setUpperBound(WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, size_t sizeWithoutRecordId); /** @@ -253,18 +271,18 @@ class WiredTigerIndex : public SortedDataInterface { StatusWith _checkDups( OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, IncludeDuplicateRecordId includeDuplicateRecordId = IncludeDuplicateRecordId::kOff); /* * Determines the data format version from application metadata and verifies compatibility. * Returns the corresponding KeyString version. */ - KeyString::Version _handleVersionInfo(OperationContext* ctx, - const std::string& uri, - StringData ident, - const IndexDescriptor* desc, - bool isLogged); + key_string::Version _handleVersionInfo(OperationContext* ctx, + const std::string& uri, + StringData ident, + const IndexDescriptor* desc, + bool isLogged); /* * Attempts to repair the data format version in the index table metadata if there is a mismatch @@ -318,25 +336,35 @@ class WiredTigerIndexUnique : public WiredTigerIndex { bool isTimestampSafeUniqueIdx() const override; - bool isDup(OperationContext* opCtx, WT_CURSOR* c, const KeyString::Value& keyString) override; + bool isDup(OperationContext* opCtx, WT_CURSOR* c, const key_string::Value& keyString) override; void insertWithRecordIdInValue_forTest(OperationContext* opCtx, - const KeyString::Value& keyString, + const key_string::Value& keyString, RecordId rid) override; protected: Status _insert(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed, IncludeDuplicateRecordId includeDuplicateRecordId = IncludeDuplicateRecordId::kOff) override; void _unindex(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed) override; + /** + * This function continues to exist in order to support v4.0 unique partial index format: the + * format changed in v4.2 and onward. _unindex will call this if an index entry in the new + * format cannot be found, and this function will check for the old format. + */ + void _unindexTimestampUnsafe(OperationContext* opCtx, + WT_CURSOR* c, + const key_string::Value& keyString, + bool dupsAllowed); + private: bool _partial; }; @@ -368,7 +396,7 @@ class WiredTigerIdIndex : public WiredTigerIndex { return false; } - bool isDup(OperationContext* opCtx, WT_CURSOR* c, const KeyString::Value& keyString) override { + bool isDup(OperationContext* opCtx, WT_CURSOR* c, const key_string::Value& keyString) override { // Unimplemented by _id indexes for lack of need MONGO_UNREACHABLE; } @@ -376,14 +404,14 @@ class WiredTigerIdIndex : public WiredTigerIndex { protected: Status _insert(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed, IncludeDuplicateRecordId includeDuplicateRecordId = IncludeDuplicateRecordId::kOff) override; void _unindex(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed) override; /** @@ -391,7 +419,7 @@ class WiredTigerIdIndex : public WiredTigerIndex { */ Status _checkDups(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, IncludeDuplicateRecordId includeDuplicateRecordId) = delete; }; @@ -419,7 +447,7 @@ class WiredTigerIndexStandard : public WiredTigerIndex { return false; } - bool isDup(OperationContext* opCtx, WT_CURSOR* c, const KeyString::Value& keyString) override { + bool isDup(OperationContext* opCtx, WT_CURSOR* c, const key_string::Value& keyString) override { // Unimplemented by non-unique indexes MONGO_UNREACHABLE; } @@ -427,14 +455,14 @@ class WiredTigerIndexStandard : public WiredTigerIndex { protected: Status _insert(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed, IncludeDuplicateRecordId includeDuplicateRecordId = IncludeDuplicateRecordId::kOff) override; void _unindex(OperationContext* opCtx, WT_CURSOR* c, - const KeyString::Value& keyString, + const key_string::Value& keyString, bool dupsAllowed) override; }; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp index b5cc39473fe18..09bfc1ae1b126 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test.cpp @@ -27,23 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include - -#include "mongo/base/init.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/catalog/index_catalog_entry.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/json.h" -#include "mongo/db/storage/sorted_data_interface_test_harness.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/json.h" #include "mongo/db/storage/wiredtiger/wiredtiger_index.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_util.h" -#include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test_harness.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test_harness.cpp index f669632c374c1..c8039667ccfc2 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_index_test_harness.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index_test_harness.cpp @@ -27,37 +27,47 @@ * it in the license file. */ +#include +#include #include - -#include "mongo/base/init.h" +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection_mock.h" -#include "mongo/db/catalog/index_catalog_entry.h" -#include "mongo/db/concurrency/locker_noop_client_observer.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/json.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/sorted_data_interface.h" #include "mongo/db/storage/sorted_data_interface_test_harness.h" #include "mongo/db/storage/wiredtiger/wiredtiger_index.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/clock_source.h" #include "mongo/util/system_clock_source.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { -using std::string; - class WiredTigerIndexHarnessHelper final : public SortedDataInterfaceHarnessHelper { public: WiredTigerIndexHarnessHelper() : _dbpath("wt_test"), _conn(nullptr) { - auto service = getServiceContext(); - service->registerClientObserver(std::make_unique()); - const char* config = "create,cache_size=1G,"; int ret = wiredtiger_open(_dbpath.path().c_str(), nullptr, config, &_conn); invariantWTOK(ret, nullptr); @@ -73,8 +83,9 @@ class WiredTigerIndexHarnessHelper final : public SortedDataInterfaceHarnessHelp std::unique_ptr newIdIndexSortedDataInterface() final { std::string ns = "test.wt"; - NamespaceString nss(ns); - OperationContextNoop opCtx(newRecoveryUnit().release()); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(ns); + auto opCtxHolder{newOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; BSONObj spec = BSON("key" << BSON("_id" << 1) << "name" << "_id_" @@ -90,19 +101,20 @@ class WiredTigerIndexHarnessHelper final : public SortedDataInterfaceHarnessHelp kWiredTigerEngineName, "", "", nss, desc, isLogged); ASSERT_OK(result.getStatus()); - string uri = "table:" + ns; - invariant(Status::OK() == WiredTigerIndex::create(&opCtx, uri, result.getValue())); + std::string uri = "table:" + ns; + invariant(Status::OK() == WiredTigerIndex::create(opCtx, uri, result.getValue())); return std::make_unique( - &opCtx, uri, UUID::gen(), "" /* ident */, &desc, isLogged); + opCtx, uri, UUID::gen(), "" /* ident */, &desc, isLogged); } - std::unique_ptr newSortedDataInterface(bool unique, - bool partial, - KeyFormat keyFormat) final { + std::unique_ptr newSortedDataInterface(bool unique, + bool partial, + KeyFormat keyFormat) final { std::string ns = "test.wt"; - NamespaceString nss(ns); - OperationContextNoop opCtx(newRecoveryUnit().release()); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(ns); + auto opCtxHolder{newOperationContext()}; + auto* const opCtx{opCtxHolder.get()}; BSONObj spec = BSON("key" << BSON("a" << 1) << "name" << "testIndex" @@ -124,11 +136,11 @@ class WiredTigerIndexHarnessHelper final : public SortedDataInterfaceHarnessHelp kWiredTigerEngineName, "", "", nss, desc, WiredTigerUtil::useTableLogging(nss)); ASSERT_OK(result.getStatus()); - string uri = "table:" + ns; - invariant(Status::OK() == WiredTigerIndex::create(&opCtx, uri, result.getValue())); + std::string uri = "table:" + ns; + invariant(Status::OK() == WiredTigerIndex::create(opCtx, uri, result.getValue())); if (unique) { - return std::make_unique(&opCtx, + return std::make_unique(opCtx, uri, UUID::gen(), "" /* ident */, @@ -136,7 +148,7 @@ class WiredTigerIndexHarnessHelper final : public SortedDataInterfaceHarnessHelp &desc, WiredTigerUtil::useTableLogging(nss)); } - return std::make_unique(&opCtx, + return std::make_unique(opCtx, uri, UUID::gen(), "" /* ident */, @@ -158,12 +170,9 @@ class WiredTigerIndexHarnessHelper final : public SortedDataInterfaceHarnessHelp WiredTigerOplogManager _oplogManager; }; -std::unique_ptr makeWTIndexHarnessHelper() { - return std::make_unique(); -} - MONGO_INITIALIZER(RegisterSortedDataInterfaceHarnessFactory)(InitializerContext* const) { - mongo::registerSortedDataInterfaceHarnessHelperFactory(makeWTIndexHarnessHelper); + registerSortedDataInterfaceHarnessHelperFactory( + [] { return std::make_unique(); }); } } // namespace diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_index_util.cpp index 18797fe92fd0a..aefd843f91e87 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_index_util.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index_util.cpp @@ -27,10 +27,33 @@ * it in the license file. */ - #include "mongo/db/storage/wiredtiger/wiredtiger_index_util.h" + +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_cursor.h" #include "mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_session_data.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -86,7 +109,17 @@ Status WiredTigerIndexUtil::compact(OperationContext* opCtx, const std::string& if (!cache->isEphemeral()) { WT_SESSION* s = WiredTigerRecoveryUnit::get(opCtx)->getSession()->getSession(); opCtx->recoveryUnit()->abandonSnapshot(); + + // Set a pointer on the WT_SESSION to the opCtx, so that WT::compact can use a callback to + // check for interrupts. + SessionDataRAII sessionRaii(s, opCtx); + int ret = s->compact(s, uri.c_str(), "timeout=0"); + if (ret == WT_ERROR && !opCtx->checkForInterruptNoAssert().isOK()) { + return Status(ErrorCodes::Interrupted, + str::stream() << "Storage compaction interrupted on " << uri.c_str()); + } + if (MONGO_unlikely(WTCompactIndexEBUSY.shouldFail())) { ret = EBUSY; } @@ -96,6 +129,7 @@ Status WiredTigerIndexUtil::compact(OperationContext* opCtx, const std::string& str::stream() << "Compaction interrupted on " << uri.c_str() << " due to cache eviction pressure"); } + invariantWTOK(ret, s); } return Status::OK(); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_index_util.h b/src/mongo/db/storage/wiredtiger/wiredtiger_index_util.h index f6219f9581728..e0a5bbdc182f8 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_index_util.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_index_util.h @@ -29,6 +29,9 @@ #pragma once +#include +#include + #include "mongo/base/status.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/validate_results.h" diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp index c1e7ba4d1c6b5..c2379102ce16b 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init.cpp @@ -28,16 +28,28 @@ */ +#include +#include +#include +#include +#include +#include +#include + #if defined(__linux__) -#include +#include // IWYU pragma: keep +#include // IWYU pragma: keep #endif -#include "mongo/platform/basic.h" - -#include "mongo/base/init.h" -#include "mongo/db/catalog/collection_options.h" -#include "mongo/db/jsobj.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_impl.h" #include "mongo/db/storage/storage_engine_init.h" #include "mongo/db/storage/storage_engine_lock_file.h" @@ -46,11 +58,12 @@ #include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h" #include "mongo/db/storage/wiredtiger/wiredtiger_index.h" #include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_parameters_gen.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_server_status.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_tag.h" #include "mongo/util/processinfo.h" #if __has_feature(address_sanitizer) diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp index f53623761d144..50019fae62b9f 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_init_test.cpp @@ -27,18 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - - -#include "mongo/db/json.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_init.h" #include "mongo/db/storage/storage_engine_metadata.h" #include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/str.h" namespace { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp index 6f2804a72a850..3dc75b9fe1428 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.cpp @@ -34,40 +34,69 @@ LOGV2_DEBUG_OPTIONS( \ ID, DLEVEL, {logv2::LogComponent::kReplicationRollback}, MESSAGE, ##__VA_ARGS__) -#include "mongo/platform/basic.h" +#include "mongo/base/checked_cast.h" +#include "mongo/base/parse_number.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/catalog/collection_options_gen.h" +#include "mongo/db/index_names.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/storage/backup_block.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_proxy.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/exit_code.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" +#include "mongo/util/version/releases.h" #ifdef _WIN32 #define NVALGRIND #endif -#include -#include -#include - -#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" - +#include #include -#include #include #include #include -#include +#include +#include +#include +#include #include #include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "mongo/base/error_codes.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/bson/dotted_path_support.h" -#include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/client.h" -#include "mongo/db/commands/server_status_metric.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/concurrency/locker.h" #include "mongo/db/global_settings.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/mongod_options_storage_gen.h" #include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/server_options.h" @@ -87,6 +116,7 @@ #include "mongo/db/storage/wiredtiger/wiredtiger_extensions.h" #include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h" #include "mongo/db/storage/wiredtiger/wiredtiger_index.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" #include "mongo/db/storage/wiredtiger/wiredtiger_parameters_gen.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" @@ -97,14 +127,10 @@ #include "mongo/platform/atomic_word.h" #include "mongo/util/background.h" #include "mongo/util/concurrency/idle_thread_block.h" -#include "mongo/util/concurrency/ticketholder.h" #include "mongo/util/debug_util.h" -#include "mongo/util/exit.h" #include "mongo/util/log_and_backoff.h" -#include "mongo/util/processinfo.h" #include "mongo/util/quick_exit.h" #include "mongo/util/scopeguard.h" -#include "mongo/util/stacktrace.h" #include "mongo/util/testing_proctor.h" #include "mongo/util/time_support.h" @@ -129,10 +155,6 @@ MONGO_FAIL_POINT_DEFINE(WTDisableFastShutDown); const std::string kPinOldestTimestampAtStartupName = "_wt_startup"; -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif - #if __has_feature(address_sanitizer) constexpr bool kAddressSanitizerEnabled = true; #else @@ -251,6 +273,13 @@ class WiredTigerKVEngine::WiredTigerSessionSweeper : public BackgroundJob { virtual void run() { ThreadClient tc(name(), getGlobalServiceContext()); + + // TODO(SERVER-74657): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + LOGV2_DEBUG(22303, 1, "starting {name} thread", "name"_attr = name()); while (!_shuttingDown.load()) { @@ -397,7 +426,7 @@ WiredTigerKVEngine::WiredTigerKVEngine(OperationContext* opCtx, } ss << "),"; } - if (kAddressSanitizerEnabled || kThreadSanitizerEnabled) { + if constexpr (kAddressSanitizerEnabled || kThreadSanitizerEnabled) { // For applications using WT, advancing a cursor invalidates the data/memory that cursor was // pointing to. WT performs the optimization of managing its own memory. The unit of memory // allocation is a page. Walking a cursor from one key/value to the next often lands on the @@ -418,6 +447,12 @@ WiredTigerKVEngine::WiredTigerKVEngine(OperationContext* opCtx, // engine. ss << "debug_mode=(cursor_copy=true),"; } + if constexpr (kThreadSanitizerEnabled) { + // TSAN builds may take longer for certain operations, increase or disable the relevant + // timeouts. + ss << "cache_stuck_timeout_ms=600000,"; + ss << "generation_drain_timeout_ms=0,"; + } if (TestingProctor::instance().isEnabled()) { // Enable debug write-ahead logging for all tables when testing is enabled. // @@ -1051,6 +1086,11 @@ class StreamingCursorImpl : public StorageEngine::StreamingCursor { ~StreamingCursorImpl() = default; + void setCatalogEntries(const stdx::unordered_map>& + identsToNsAndUUID) { + _identsToNsAndUUID = std::move(identsToNsAndUUID); + } + StatusWith> getNextBatch(OperationContext* opCtx, const std::size_t batchSize) { int wtRet = 0; @@ -1112,9 +1152,11 @@ class StreamingCursorImpl : public StorageEngine::StreamingCursor { // to an entire file. Full backups cannot open an incremental cursor, even if they // are the initial incremental backup. const std::uint64_t length = options.incrementalBackup ? fileSize : 0; + auto nsAndUUID = _getNsAndUUID(filePath.stem().string()); backupBlocks.push_back(BackupBlock(opCtx, + nsAndUUID.first, + nsAndUUID.second, filePath.string(), - _wtBackup->identToNamespaceAndUUIDMap, _checkpointTimestamp, 0 /* offset */, length, @@ -1130,6 +1172,15 @@ class StreamingCursorImpl : public StorageEngine::StreamingCursor { } private: + std::pair, boost::optional> _getNsAndUUID( + const std::string& ident) const { + auto it = _identsToNsAndUUID.find(ident); + if (it == _identsToNsAndUUID.end()) { + return std::make_pair(boost::none, boost::none); + } + return it->second; + } + Status _getNextIncrementalBatchForFile(OperationContext* opCtx, const char* filename, boost::filesystem::path filePath, @@ -1172,9 +1223,11 @@ class StreamingCursorImpl : public StorageEngine::StreamingCursor { "offset"_attr = offset, "size"_attr = size, "type"_attr = type); + auto nsAndUUID = _getNsAndUUID(filePath.stem().string()); backupBlocks->push_back(BackupBlock(opCtx, + nsAndUUID.first, + nsAndUUID.second, filePath.string(), - _wtBackup->identToNamespaceAndUUIDMap, _checkpointTimestamp, offset, size, @@ -1184,9 +1237,11 @@ class StreamingCursorImpl : public StorageEngine::StreamingCursor { // If the file is unchanged, push a BackupBlock with offset=0 and length=0. This allows us // to distinguish between an unchanged file and a deleted file in an incremental backup. if (fileUnchangedFlag) { + auto nsAndUUID = _getNsAndUUID(filePath.stem().string()); backupBlocks->push_back(BackupBlock(opCtx, + nsAndUUID.first, + nsAndUUID.second, filePath.string(), - _wtBackup->identToNamespaceAndUUIDMap, _checkpointTimestamp, 0 /* offset */, 0 /* length */, @@ -1209,6 +1264,7 @@ class StreamingCursorImpl : public StorageEngine::StreamingCursor { WT_SESSION* _session; std::string _path; + stdx::unordered_map> _identsToNsAndUUID; boost::optional _checkpointTimestamp; WiredTigerBackup* _wtBackup; // '_wtBackup' is an out parameter. }; @@ -1268,34 +1324,10 @@ WiredTigerKVEngine::beginNonBlockingBackup(OperationContext* opCtx, invariant(_wtBackup.logFilePathsSeenByExtendBackupCursor.empty()); invariant(_wtBackup.logFilePathsSeenByGetNextBatch.empty()); - invariant(_wtBackup.identToNamespaceAndUUIDMap.empty()); - - // Fetching the catalog entries requires reading from the storage engine. During cache pressure, - // this read could be rolled back. In that case, we need to clear the map. - ScopeGuard clearGuard([&] { _wtBackup.identToNamespaceAndUUIDMap.clear(); }); - - { - Lock::GlobalLock lk(opCtx, MODE_IS); - DurableCatalog* catalog = DurableCatalog::get(opCtx); - std::vector catalogEntries = - catalog->getAllCatalogEntries(opCtx); - for (const DurableCatalog::EntryIdentifier& e : catalogEntries) { - // Populate the collection ident with its namespace and UUID. - UUID uuid = catalog->getMetaData(opCtx, e.catalogId)->options.uuid.value(); - _wtBackup.identToNamespaceAndUUIDMap.emplace(e.ident, std::make_pair(e.nss, uuid)); - - // Populate the collection's index idents with the collection's namespace and UUID. - std::vector idxIdents = catalog->getIndexIdents(opCtx, e.catalogId); - for (const std::string& idxIdent : idxIdents) { - _wtBackup.identToNamespaceAndUUIDMap.emplace(idxIdent, std::make_pair(e.nss, uuid)); - } - } - } auto streamingCursor = std::make_unique( session, _path, checkpointTimestamp, options, &_wtBackup); - clearGuard.dismiss(); pinOplogGuard.dismiss(); _backupSession = std::move(sessionRaii); _wtBackup.cursor = cursor; @@ -1316,7 +1348,6 @@ void WiredTigerKVEngine::endNonBlockingBackup(OperationContext* opCtx) { _wtBackup.dupCursor = nullptr; _wtBackup.logFilePathsSeenByExtendBackupCursor = {}; _wtBackup.logFilePathsSeenByGetNextBatch = {}; - _wtBackup.identToNamespaceAndUUIDMap = {}; boost::filesystem::remove(getOngoingBackupPath()); } @@ -1634,7 +1665,7 @@ Status WiredTigerKVEngine::createSortedDataInterface(OperationContext* opCtx, dps::extractElementAtPath(*storageEngineOptions, _canonicalName + ".configString") .str(); } - // Some unittests use a OperationContextNoop that can't support such lookups. + StatusWith result = WiredTigerIndex::generateCreateString(_canonicalName, _indexOptions, @@ -1902,6 +1933,10 @@ Status WiredTigerKVEngine::dropIdent(RecoveryUnit* ru, void WiredTigerKVEngine::dropIdentForImport(OperationContext* opCtx, StringData ident) { const std::string uri = _uri(ident); + WiredTigerRecoveryUnit* wtRu = checked_cast(opCtx->recoveryUnit()); + wtRu->getSessionNoTxn()->closeAllCursors(uri); + _sessionCache->closeAllCursors(uri); + WiredTigerSession session(_conn); // Don't wait for the global checkpoint lock to be obtained in WiredTiger as it can take a @@ -2720,7 +2755,13 @@ StatusWith WiredTigerKVEngine::getSanitizedStorageOptionsForSecondaryRe } void WiredTigerKVEngine::sizeStorerPeriodicFlush() { - if (_sizeStorerSyncTracker.intervalHasElapsed()) { + bool needSyncSizeInfo = false; + { + stdx::lock_guard lock(_sizeStorerSyncTrackerMutex); + needSyncSizeInfo = _sizeStorerSyncTracker.intervalHasElapsed(); + } + + if (needSyncSizeInfo) { syncSizeInfo(false); } } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h index bde533dc14eca..fcf48f0920789 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h @@ -29,24 +29,54 @@ #pragma once +#include +#include +#include +#include +#include +#include #include #include +#include #include +#include #include - -#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/ordering.h" #include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/import_options.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/storage/backup_block.h" +#include "mongo/db/storage/column_store.h" #include "mongo/db/storage/durable_catalog.h" +#include "mongo/db/storage/journal_listener.h" +#include "mongo/db/storage/key_format.h" #include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot_manager.h" +#include "mongo/db/storage/sorted_data_interface.h" #include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_size_storer.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/clock_source.h" #include "mongo/util/concurrency/with_lock.h" #include "mongo/util/elapsed_tracker.h" @@ -54,9 +84,11 @@ namespace mongo { class ClockSource; class JournalListener; + class WiredTigerRecordStore; class WiredTigerSessionCache; class WiredTigerSizeStorer; + class WiredTigerEngineRuntimeConfigParameter; Status validateExtraDiagnostics(const std::vector& value, @@ -82,7 +114,6 @@ struct WiredTigerBackup { WT_CURSOR* dupCursor = nullptr; std::set logFilePathsSeenByExtendBackupCursor; std::set logFilePathsSeenByGetNextBatch; - BackupBlock::IdentToNamespaceAndUUIDMap identToNamespaceAndUUIDMap; // 'wtBackupCursorMutex' provides concurrency control between beginNonBlockingBackup(), // endNonBlockingBackup(), and getNextBatch() because we stream the output of the backup cursor. @@ -497,6 +528,8 @@ class WiredTigerKVEngine final : public KVEngine { std::unique_ptr _sizeStorer; std::string _sizeStorerUri; mutable ElapsedTracker _sizeStorerSyncTracker; + mutable Mutex _sizeStorerSyncTrackerMutex = + MONGO_MAKE_LATCH("WiredTigerKVEngine::_sizeStorerSyncTrackerMutex"); bool _ephemeral; // whether we are using the in-memory mode of the WT engine const bool _inRepairMode; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_no_fixture_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_no_fixture_test.cpp index 6f733c8d7a8d6..994482a10cdbd 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_no_fixture_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_no_fixture_test.cpp @@ -27,22 +27,47 @@ * it in the license file. */ +#include #include -#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" #include "mongo/db/service_context.h" #include "mongo/db/snapshot_window_options_gen.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" // for WiredTigerSession #include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_component_settings.h" #include "mongo/logv2/log_manager.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" #include "mongo/util/scopeguard.h" @@ -170,7 +195,8 @@ TEST(WiredTigerKVEngineNoFixtureTest, Basic) { CollectionOptions collectionOptions; auto keyFormat = KeyFormat::Long; ASSERT_OK(kvEngine->createRecordStore(opCtx.get(), nss, ident, collectionOptions, keyFormat)) - << fmt::format("failed to create record store with namespace {}", nss.toString()); + << fmt::format("failed to create record store with namespace {}", + nss.toStringForErrorMsg()); // Pin oldest and stable to timestamps (1,10). // The timestamps in the RollbackToStable40 in the C API test translate to MDB timestamps @@ -198,7 +224,8 @@ TEST(WiredTigerKVEngineNoFixtureTest, Basic) { // Insert 3 keys with the value A. auto rs = kvEngine->getRecordStore(opCtx.get(), nss, ident, collectionOptions); - ASSERT(rs) << fmt::format("failed to look up record store with namespace {}", nss.toString()); + ASSERT(rs) << fmt::format("failed to look up record store with namespace {}", + nss.toString_forTest()); { Lock::GlobalLock globalLock(opCtx.get(), MODE_IX); WriteUnitOfWork wuow(opCtx.get()); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp index 0d856194ec024..68f6f45630ed0 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_kv_engine_test.cpp @@ -27,37 +27,62 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/kv/kv_engine_test_harness.h" - -#include #include +#include #include -#include - -#include "mongo/base/init.h" +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/global_settings.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/db/storage/checkpointer.h" +#include "mongo/db/storage/kv/kv_engine_test_harness.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/storage_engine_impl.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_proxy.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" +#include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - namespace mongo { namespace { @@ -132,7 +157,6 @@ class WiredTigerKVEngineTest : public ServiceContextTest { opCtx->setRecoveryUnit( std::unique_ptr(_helper.getEngine()->newRecoveryUnit()), WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); - opCtx->swapLockState(std::make_unique(), WithLock::withoutLock()); return opCtx; } @@ -209,6 +233,7 @@ TEST_F(WiredTigerKVEngineRepairTest, OrphanedDataFilesCanBeRecovered) { TEST_F(WiredTigerKVEngineRepairTest, UnrecoverableOrphanedDataFilesAreRebuilt) { auto opCtxPtr = _makeOperationContext(); + Lock::GlobalLock globalLk(opCtxPtr.get(), MODE_X); NamespaceString nss = NamespaceString::createNamespaceString_forTest("a.b"); std::string ident = "collection-1234"; @@ -278,34 +303,29 @@ TEST_F(WiredTigerKVEngineRepairTest, UnrecoverableOrphanedDataFilesAreRebuilt) { } TEST_F(WiredTigerKVEngineTest, TestOplogTruncation) { + // To diagnose any intermittent failures, maximize logging from WiredTigerKVEngine and friends. + auto severityGuard = unittest::MinimumLoggedSeverityGuard{logv2::LogComponent::kStorage, + logv2::LogSeverity::Debug(3)}; + + // Set syncdelay before starting the checkpoint thread, otherwise it can observe the default + // checkpoint frequency of 60 seconds, causing the test to fail due to a 10 second timeout. + storageGlobalParams.syncdelay.store(1); + std::unique_ptr checkpointer = std::make_unique(); checkpointer->go(); + // If the test fails we want to ensure the checkpoint thread shuts down to avoid accessing the + // storage engine during shutdown. + ON_BLOCK_EXIT([&] { + checkpointer->shutdown({ErrorCodes::ShutdownInProgress, "Test finished"}); + }); + auto opCtxPtr = _makeOperationContext(); // The initial data timestamp has to be set to take stable checkpoints. The first stable // timestamp greater than this will also trigger a checkpoint. The following loop of the // CheckpointThread will observe the new `syncdelay` value. _helper.getWiredTigerKVEngine()->setInitialDataTimestamp(Timestamp(1, 1)); - - // Ignore data race on this variable when running with TSAN, this is only an issue in this - // unittest and not in mongod - []() -#if defined(__has_feature) -#if __has_feature(thread_sanitizer) - __attribute__((no_sanitize("thread"))) -#endif -#endif - { - storageGlobalParams.syncdelay = 1; - } - (); - - - // To diagnose any intermittent failures, maximize logging from WiredTigerKVEngine and friends. - auto severityGuard = unittest::MinimumLoggedSeverityGuard{logv2::LogComponent::kStorage, - logv2::LogSeverity::Debug(3)}; - // Simulate the callback that queries config.transactions for the oldest active transaction. boost::optional oldestActiveTxnTimestamp; AtomicWord callbackShouldFail{false}; @@ -345,10 +365,9 @@ TEST_F(WiredTigerKVEngineTest, TestOplogTruncation) { } LOGV2(22367, - "Expected the pinned oplog to advance. Expected value: {newPinned} Published value: " - "{engine_getOplogNeededForCrashRecovery}", - "newPinned"_attr = newPinned, - "engine_getOplogNeededForCrashRecovery"_attr = + "Expected the pinned oplog to advance.", + "expectedValue"_attr = newPinned, + "publishedValue"_attr = _helper.getWiredTigerKVEngine()->getOplogNeededForCrashRecovery()); FAIL(""); }; @@ -379,8 +398,6 @@ TEST_F(WiredTigerKVEngineTest, TestOplogTruncation) { _helper.getWiredTigerKVEngine()->setStableTimestamp(Timestamp(30, 1), false); callbackShouldFail.store(false); assertPinnedMovesSoon(Timestamp(40, 1)); - - checkpointer->shutdown({ErrorCodes::ShutdownInProgress, "Test finished"}); } TEST_F(WiredTigerKVEngineTest, IdentDrop) { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp index 320246d2f45d5..d4ed9b1299d9a 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.cpp @@ -27,24 +27,39 @@ * it in the license file. */ +#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" -#include "mongo/platform/basic.h" - -#include - -#include "mongo/db/concurrency/lock_state.h" +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include + +#include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_util.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/platform/mutex.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/concurrency/idle_thread_block.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage - namespace mongo { MONGO_FAIL_POINT_DEFINE(WTPauseOplogVisibilityUpdateLoop); @@ -56,7 +71,7 @@ const int kDelayMillis = 100; void WiredTigerOplogManager::startVisibilityThread(OperationContext* opCtx, WiredTigerRecordStore* oplogRecordStore) { - invariant(!_isRunning); + invariant(!_isRunning.loadRelaxed()); // Prime the oplog read timestamp. std::unique_ptr reverseOplogCursor = oplogRecordStore->getCursor(opCtx, false /* false = reverse cursor */); @@ -90,26 +105,33 @@ void WiredTigerOplogManager::startVisibilityThread(OperationContext* opCtx, WiredTigerRecoveryUnit::get(opCtx)->getSessionCache(), oplogRecordStore); - _isRunning = true; + _isRunning.store(true); _shuttingDown = false; } void WiredTigerOplogManager::haltVisibilityThread() { + // This is called from two places; on clean shutdown and when the record store for the + // oplog is destroyed. We will perform the actual shutdown on the first call and the + // second call will be a no-op. Calling this on clean shutdown is necessary because the + // oplog manager makes calls into WiredTiger to retrieve the all durable timestamp. Lock + // Free Reads introduced shared collections which can offset when their respective + // destructors run. This created a scenario where the oplog manager visibility loop can + // be executed after the storage engine has shutdown. + if (!_isRunning.loadRelaxed()) { + return; + } + { stdx::lock_guard lk(_oplogVisibilityStateMutex); - if (!_isRunning) { - // This is called from two places; on clean shutdown and when the record store for the - // oplog is destroyed. We will perform the actual shutdown on the first call and the - // second call will be a no-op. Calling this on clean shutdown is necessary because the - // oplog manager makes calls into WiredTiger to retrieve the all durable timestamp. Lock - // Free Reads introduced shared collections which can offset when their respective - // destructors run. This created a scenario where the oplog manager visibility loop can - // be executed after the storage engine has shutdown. + + // In between when we checked '_isRunning' above and when we acquired the mutex, it's + // possible another thread modified '_isRunning', so check it again. + if (!_isRunning.loadRelaxed()) { return; } + _isRunning.store(false); _shuttingDown = true; - _isRunning = false; } if (_oplogVisibilityThread.joinable()) { @@ -128,7 +150,7 @@ void WiredTigerOplogManager::triggerOplogVisibilityUpdate() { void WiredTigerOplogManager::waitForAllEarlierOplogWritesToBeVisible( const WiredTigerRecordStore* oplogRecordStore, OperationContext* opCtx) { - invariant(opCtx->lockState()->isNoop() || !opCtx->lockState()->inAWriteUnitOfWork()); + invariant(!opCtx->lockState()->inAWriteUnitOfWork()); // In order to reliably detect rollback situations, we need to fetch the latestVisibleTimestamp // prior to querying the end of the oplog. @@ -194,6 +216,12 @@ void WiredTigerOplogManager::_updateOplogVisibilityLoop(WiredTigerSessionCache* WiredTigerRecordStore* oplogRecordStore) { Client::initThread("OplogVisibilityThread"); + // TODO(SERVER-74657): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + // This thread updates the oplog read timestamp, the timestamp used to read from the oplog with // forward cursors. The timestamp is used to hide oplog entries that might be committed but have // uncommitted entries behind them. This prevents cursors from seeing 'holes' in the oplog and diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h index 10f6c80f34352..4dc0b647089ac 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h @@ -29,7 +29,13 @@ #pragma once +#include + +#include "mongo/bson/timestamp.h" +#include "mongo/db/operation_context.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" @@ -70,8 +76,7 @@ class WiredTigerOplogManager { void haltVisibilityThread(); bool isRunning() { - stdx::lock_guard lk(_oplogVisibilityStateMutex); - return _isRunning && !_shuttingDown; + return _isRunning.load(); } /** @@ -122,7 +127,8 @@ class WiredTigerOplogManager { mutable Mutex _oplogVisibilityStateMutex = MONGO_MAKE_LATCH("WiredTigerOplogManager::_oplogVisibilityStateMutex"); - bool _isRunning = false; + AtomicWord _isRunning{false}; + bool _shuttingDown = false; // Triggers an oplog visibility update -- can be delayed if no callers are waiting for an diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp index d962647e91cc3..fe15e7e3e9043 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_options_init.cpp @@ -27,15 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/util/options_parser/startup_option_init.h" - #include +#include +#include +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h" #include "mongo/util/exit_code.h" +#include "mongo/util/options_parser/startup_option_init.h" #include "mongo/util/options_parser/startup_options.h" +#include "mongo/util/options_parser/value.h" + +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif namespace moe = mongo::optionenvironment; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp index 4386ec48ef850..c93ce37789b4e 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_parameters.cpp @@ -27,11 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/operation_context.h" #include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" #include "mongo/db/storage/wiredtiger/wiredtiger_parameters_gen.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.cpp index 7ef1d0542c52b..f524ea0f794d6 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.cpp @@ -28,15 +28,35 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h" - +#include +#include #include +#include +#include +#include +#include + +#include "mongo/db/client.h" +#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/lock_stats.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" +#include "mongo/db/prepare_conflict_tracker.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/stacktrace.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -70,4 +90,90 @@ void wiredTigerPrepareConflictOplogResourceLog() { }); } +int wiredTigerPrepareConflictRetrySlow(OperationContext* opCtx, std::function func) { + if (opCtx->recoveryUnit()->isTimestamped()) { + // This transaction is holding a resource in the form of an oplog slot. Committed + // transactions that get a later oplog slot will be unable to replicate until this resource + // is released (in the form of this transaction committing or aborting). For this case, we + // choose to abort our transaction and retry instead of blocking. It's possible that we can + // be blocking on a prepared update that requires replication to make progress, creating a + // stall in the MDB cluster. + wiredTigerPrepareConflictOplogResourceLog(); + throwWriteConflictException("Holding a resource (oplog slot)."); + } + + auto recoveryUnit = WiredTigerRecoveryUnit::get(opCtx); + int attempts = 1; + // If we return from this function, we have either returned successfully or we've returned an + // error other than WT_PREPARE_CONFLICT. Reset PrepareConflictTracker accordingly. + ON_BLOCK_EXIT([opCtx] { PrepareConflictTracker::get(opCtx).endPrepareConflict(opCtx); }); + PrepareConflictTracker::get(opCtx).beginPrepareConflict(opCtx); + + auto client = opCtx->getClient(); + if (client->isFromSystemConnection()) { + // System (internal) connections that hit a prepare conflict should be killable to prevent + // deadlocks with prepared transactions on replica set step up and step down. + stdx::lock_guard lk(*client); + invariant(client->canKillSystemOperationInStepdown(lk)); + } + + // It is contradictory to be running into a prepare conflict when we are ignoring interruptions, + // particularly when running code inside an + // OperationContext::runWithoutInterruptionExceptAtGlobalShutdown block. Operations executed in + // this way are expected to be set to ignore prepare conflicts. + invariant(!opCtx->isIgnoringInterrupts()); + + if (MONGO_unlikely(WTPrintPrepareConflictLog.shouldFail())) { + wiredTigerPrepareConflictFailPointLog(); + } + + CurOp::get(opCtx)->debug().additiveMetrics.incrementPrepareReadConflicts(1); + wiredTigerPrepareConflictLog(attempts); + + const auto lockerInfo = opCtx->lockState()->getLockerInfo(boost::none); + invariant(lockerInfo); + for (const auto& lock : lockerInfo->locks) { + const auto type = lock.resourceId.getType(); + // If a user operation on secondaries acquires a lock in MODE_S and then blocks on a prepare + // conflict with a prepared transaction, deadlock will occur at the commit time of the + // prepared transaction when it attempts to reacquire (since locks were yielded on + // secondaries) an IX lock that conflicts with the MODE_S lock held by the user operation. + // User operations that acquire MODE_X locks and block on prepare conflicts could lead to + // the same problem. However, user operations on secondaries should never hold MODE_X locks. + // Since prepared transactions will not reacquire RESOURCE_MUTEX / RESOURCE_METADATA / + // RESOURCE_DDL_* locks at commit time, these lock types are safe. Therefore, invariant here + // that we do not get a prepare conflict while holding a global, database, or collection + // MODE_S lock (or MODE_X lock for completeness). + if (type == RESOURCE_GLOBAL || type == RESOURCE_DATABASE || type == RESOURCE_COLLECTION) + invariant(lock.mode != MODE_S && lock.mode != MODE_X, + str::stream() << lock.resourceId.toString() << " in " << modeName(lock.mode)); + } + + if (MONGO_unlikely(WTSkipPrepareConflictRetries.shouldFail())) { + // Callers of wiredTigerPrepareConflictRetry() should eventually call wtRCToStatus() via + // invariantWTOK() and have the WT_ROLLBACK error bubble up as a WriteConflictException. + // Enabling the "skipWriteConflictRetries" failpoint in conjunction with the + // "WTSkipPrepareConflictRetries" failpoint prevents the higher layers from retrying the + // entire operation. + return WT_ROLLBACK; + } + + while (true) { + attempts++; + auto lastCount = recoveryUnit->getSessionCache()->getPrepareCommitOrAbortCount(); + int ret = WT_READ_CHECK(func()); + + if (ret != WT_PREPARE_CONFLICT) + return ret; + + CurOp::get(opCtx)->debug().additiveMetrics.incrementPrepareReadConflicts(1); + wiredTigerPrepareConflictLog(attempts); + + // Wait on the session cache to signal that a unit of work has been committed or aborted. + recoveryUnit->getSessionCache()->waitUntilPreparedUnitOfWorkCommitsOrAborts(opCtx, + lastCount); + } +} + + } // namespace mongo diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h index 61c911059803b..9da2fac61cda1 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h @@ -29,11 +29,17 @@ #pragma once +#include +#include +#include + #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/curop.h" +#include "mongo/db/operation_context.h" #include "mongo/db/prepare_conflict_tracker.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/fail_point.h" namespace mongo { @@ -62,94 +68,17 @@ void wiredTigerPrepareConflictOplogResourceLog(); * re-try f, so any required timeout behavior must be enforced within f. * The function f must return a WiredTiger error code. */ +int wiredTigerPrepareConflictRetrySlow(OperationContext* opCtx, std::function func); + template int wiredTigerPrepareConflictRetry(OperationContext* opCtx, F&& f) { - invariant(opCtx); + dassert(opCtx); int ret = WT_READ_CHECK(f()); if (ret != WT_PREPARE_CONFLICT) return ret; - if (opCtx->recoveryUnit()->isTimestamped()) { - // This transaction is holding a resource in the form of an oplog slot. Committed - // transactions that get a later oplog slot will be unable to replicate until this resource - // is released (in the form of this transaction committing or aborting). For this case, we - // choose to abort our transaction and retry instead of blocking. It's possible that we can - // be blocking on a prepared update that requires replication to make progress, creating a - // stall in the MDB cluster. - wiredTigerPrepareConflictOplogResourceLog(); - throwWriteConflictException("Holding a resource (oplog slot)."); - } - - auto recoveryUnit = WiredTigerRecoveryUnit::get(opCtx); - int attempts = 1; - // If we return from this function, we have either returned successfully or we've returned an - // error other than WT_PREPARE_CONFLICT. Reset PrepareConflictTracker accordingly. - ON_BLOCK_EXIT([opCtx] { PrepareConflictTracker::get(opCtx).endPrepareConflict(opCtx); }); - PrepareConflictTracker::get(opCtx).beginPrepareConflict(opCtx); - - auto client = opCtx->getClient(); - if (client->isFromSystemConnection()) { - // System (internal) connections that hit a prepare conflict should be killable to prevent - // deadlocks with prepared transactions on replica set step up and step down. - stdx::lock_guard lk(*client); - invariant(client->canKillSystemOperationInStepdown(lk)); - } - // It is contradictory to be running into a prepare conflict when we are ignoring interruptions, - // particularly when running code inside an - // OperationContext::runWithoutInterruptionExceptAtGlobalShutdown block. Operations executed in - // this way are expected to be set to ignore prepare conflicts. - invariant(!opCtx->isIgnoringInterrupts()); - - if (MONGO_unlikely(WTPrintPrepareConflictLog.shouldFail())) { - wiredTigerPrepareConflictFailPointLog(); - } - - CurOp::get(opCtx)->debug().additiveMetrics.incrementPrepareReadConflicts(1); - wiredTigerPrepareConflictLog(attempts); - - const auto lockerInfo = opCtx->lockState()->getLockerInfo(boost::none); - invariant(lockerInfo); - for (const auto& lock : lockerInfo->locks) { - const auto type = lock.resourceId.getType(); - // If a user operation on secondaries acquires a lock in MODE_S and then blocks on a prepare - // conflict with a prepared transaction, deadlock will occur at the commit time of the - // prepared transaction when it attempts to reacquire (since locks were yielded on - // secondaries) an IX lock that conflicts with the MODE_S lock held by the user operation. - // User operations that acquire MODE_X locks and block on prepare conflicts could lead to - // the same problem. However, user operations on secondaries should never hold MODE_X locks. - // Since prepared transactions will not reacquire RESOURCE_MUTEX / RESOURCE_METADATA locks - // at commit time, these lock types are safe. Therefore, invariant here that we do not get a - // prepare conflict while holding a global, database, or collection MODE_S lock (or MODE_X - // lock for completeness). - if (type == RESOURCE_GLOBAL || type == RESOURCE_DATABASE || type == RESOURCE_COLLECTION) - invariant(lock.mode != MODE_S && lock.mode != MODE_X, - str::stream() << lock.resourceId.toString() << " in " << modeName(lock.mode)); - } - - if (MONGO_unlikely(WTSkipPrepareConflictRetries.shouldFail())) { - // Callers of wiredTigerPrepareConflictRetry() should eventually call wtRCToStatus() via - // invariantWTOK() and have the WT_ROLLBACK error bubble up as a WriteConflictException. - // Enabling the "skipWriteConflictRetries" failpoint in conjunction with the - // "WTSkipPrepareConflictRetries" failpoint prevents the higher layers from retrying the - // entire operation. - return WT_ROLLBACK; - } - - while (true) { - attempts++; - auto lastCount = recoveryUnit->getSessionCache()->getPrepareCommitOrAbortCount(); - ret = WT_READ_CHECK(f()); - - if (ret != WT_PREPARE_CONFLICT) - return ret; - - CurOp::get(opCtx)->debug().additiveMetrics.incrementPrepareReadConflicts(1); - wiredTigerPrepareConflictLog(attempts); - - // Wait on the session cache to signal that a unit of work has been committed or aborted. - recoveryUnit->getSessionCache()->waitUntilPreparedUnitOfWorkCommitsOrAborts(opCtx, - lastCount); - } + return wiredTigerPrepareConflictRetrySlow(opCtx, f); } + } // namespace mongo diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict_test.cpp index aabbcbfc36939..cb163f4829bc8 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict_test.cpp @@ -27,11 +27,25 @@ * it in the license file. */ +#include +#include + +#include + +#include "mongo/base/string_data.h" #include "mongo/db/curop.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" #include "mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" namespace mongo { @@ -69,12 +83,6 @@ class WiredTigerPrepareConflictTest : public unittest::Test { kvEngine = makeKVEngine(serviceContext, home.path(), &cs); opCtx->setRecoveryUnit(std::unique_ptr(kvEngine->newRecoveryUnit()), WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); - - // Sets internal states to pass invariants inside 'wiredTigerPrepareConflictRetry()'. - { - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); - } } unittest::TempDir home{"temp"}; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp index af579d750b9e7..10528345f357c 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.cpp @@ -27,50 +27,79 @@ * it in the license file. */ - #define LOGV2_FOR_RECOVERY(ID, DLEVEL, MESSAGE, ...) \ LOGV2_DEBUG_OPTIONS(ID, DLEVEL, {logv2::LogComponent::kStorageRecovery}, MESSAGE, ##__VA_ARGS__) -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include #include - -#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" - +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include #include +#include +#include -#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" #include "mongo/base/static_assert.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/health_log_gen.h" #include "mongo/db/catalog/health_log_interface.h" #include "mongo/db/catalog/validate_results.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/concurrency/locker.h" -#include "mongo/db/global_settings.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/record_id_helpers.h" -#include "mongo/db/repl/repl_settings.h" -#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/server_options.h" #include "mongo/db/server_recovery.h" #include "mongo/db/service_context.h" #include "mongo/db/stats/resource_consumption_metrics.h" #include "mongo/db/storage/capped_snapshots.h" +#include "mongo/db/storage/collection_truncate_markers.h" +#include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/db/storage/ident.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/wiredtiger/oplog_truncate_marker_parameters_gen.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h" #include "mongo/db/storage/wiredtiger/wiredtiger_cursor_helpers.h" #include "mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h" #include "mongo/db/storage/wiredtiger/wiredtiger_global_options.h" #include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_truncate_markers.h" #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_session_data.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/redaction.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/variant.h" #include "mongo/util/assert_util.h" #include "mongo/util/concurrency/idle_thread_block.h" -#include "mongo/util/fail_point.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/sockaddr.h" #include "mongo/util/scopeguard.h" #include "mongo/util/stacktrace.h" #include "mongo/util/str.h" @@ -80,12 +109,9 @@ #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage - namespace mongo { using namespace fmt::literals; -using std::string; -using std::unique_ptr; namespace { @@ -146,7 +172,6 @@ boost::optional namespaceForUUID(OperationContext* opCtx, // TODO SERVER-73111: Remove the dependency on CollectionCatalog return CollectionCatalog::get(opCtx)->lookupNSSByUUID(opCtx, *uuid); } - } // namespace MONGO_FAIL_POINT_DEFINE(WTCompactRecordStoreEBUSY); @@ -156,7 +181,7 @@ MONGO_FAIL_POINT_DEFINE(WTWriteConflictExceptionForReads); const std::string kWiredTigerEngineName = "wiredTiger"; -WiredTigerRecordStore::OplogTruncateMarkers +std::shared_ptr WiredTigerRecordStore::OplogTruncateMarkers::createOplogTruncateMarkers(OperationContext* opCtx, WiredTigerRecordStore* rs, const NamespaceString& ns) { @@ -184,14 +209,19 @@ WiredTigerRecordStore::OplogTruncateMarkers::createOplogTruncateMarkers(Operatio fmt::format("Cannot create oplog of size less than {} bytes", numTruncateMarkersToKeep), minBytesPerTruncateMarker > 0); - auto initialSetOfMarkers = CollectionTruncateMarkers::createFromExistingRecordStore( + // We need to read the whole oplog, override the recoveryUnit's oplogVisibleTimestamp. + ScopedOplogVisibleTimestamp scopedOplogVisibleTimestamp(opCtx->recoveryUnit(), boost::none); + UnyieldableCollectionIterator iterator(opCtx, rs); + auto initialSetOfMarkers = CollectionTruncateMarkers::createFromCollectionIterator( opCtx, - rs, + iterator, ns, minBytesPerTruncateMarker, [](const Record& record) { BSONObj obj = record.data.toBson(); - auto wallTime = obj.hasField("wall") ? obj["wall"].Date() : obj["ts"].timestampTime(); + auto wallTime = obj.hasField(repl::DurableOplogEntry::kWallClockTimeFieldName) + ? obj[repl::DurableOplogEntry::kWallClockTimeFieldName].Date() + : obj[repl::DurableOplogEntry::kTimestampFieldName].timestampTime(); return RecordIdAndWallTime(record.id, wallTime); }, numTruncateMarkersToKeep); @@ -199,13 +229,14 @@ WiredTigerRecordStore::OplogTruncateMarkers::createOplogTruncateMarkers(Operatio "WiredTiger record store oplog processing took {duration}ms", "WiredTiger record store oplog processing finished", "duration"_attr = duration_cast(initialSetOfMarkers.timeTaken)); - return WiredTigerRecordStore::OplogTruncateMarkers(std::move(initialSetOfMarkers.markers), - initialSetOfMarkers.leftoverRecordsCount, - initialSetOfMarkers.leftoverRecordsBytes, - minBytesPerTruncateMarker, - initialSetOfMarkers.timeTaken, - initialSetOfMarkers.methodUsed, - rs); + return std::make_shared( + std::move(initialSetOfMarkers.markers), + initialSetOfMarkers.leftoverRecordsCount, + initialSetOfMarkers.leftoverRecordsBytes, + minBytesPerTruncateMarker, + initialSetOfMarkers.timeTaken, + initialSetOfMarkers.methodUsed, + rs); } WiredTigerRecordStore::OplogTruncateMarkers::OplogTruncateMarkers( @@ -223,6 +254,82 @@ WiredTigerRecordStore::OplogTruncateMarkers::OplogTruncateMarkers( _processBySampling(creationMethod == CollectionTruncateMarkers::MarkersCreationMethod::Sampling) {} +bool WiredTigerRecordStore::OplogTruncateMarkers::isDead() { + stdx::lock_guard lk(_reclaimMutex); + return _isDead; +} + +void WiredTigerRecordStore::OplogTruncateMarkers::kill() { + stdx::lock_guard lk(_reclaimMutex); + _isDead = true; + _reclaimCv.notify_one(); +} + +void WiredTigerRecordStore::OplogTruncateMarkers::clearMarkersOnCommit(OperationContext* opCtx) { + opCtx->recoveryUnit()->onCommit([this](OperationContext*, boost::optional) { + modifyMarkersWith([&](std::deque& markers) { + markers.clear(); + modifyPartialMarker([&](CollectionTruncateMarkers::PartialMarkerMetrics metrics) { + metrics.currentRecords->store(0); + metrics.currentBytes->store(0); + }); + }); + }); +} + +void WiredTigerRecordStore::OplogTruncateMarkers::updateMarkersAfterCappedTruncateAfter( + int64_t recordsRemoved, int64_t bytesRemoved, const RecordId& firstRemovedId) { + modifyMarkersWith([&](std::deque& markers) { + int64_t numMarkersToRemove = 0; + int64_t recordsInMarkersToRemove = 0; + int64_t bytesInMarkersToRemove = 0; + + // Compute the number and associated sizes of the records from markers that are either fully + // or partially truncated. + for (auto it = markers.rbegin(); it != markers.rend(); ++it) { + if (it->lastRecord < firstRemovedId) { + break; + } + numMarkersToRemove++; + recordsInMarkersToRemove += it->records; + bytesInMarkersToRemove += it->bytes; + } + + // Remove the markers corresponding to the records that were deleted. + int64_t offset = markers.size() - numMarkersToRemove; + markers.erase(markers.begin() + offset, markers.end()); + + // Account for any remaining records from a partially truncated marker in the marker + // currently being filled. + modifyPartialMarker([&](CollectionTruncateMarkers::PartialMarkerMetrics metrics) { + metrics.currentRecords->fetchAndAdd(recordsInMarkersToRemove - recordsRemoved); + metrics.currentBytes->fetchAndAdd(bytesInMarkersToRemove - bytesRemoved); + }); + }); +} + +void WiredTigerRecordStore::OplogTruncateMarkers::awaitHasExcessMarkersOrDead( + OperationContext* opCtx) { + // Wait until kill() is called or there are too many collection markers. + stdx::unique_lock lock(_reclaimMutex); + while (!_isDead) { + { + MONGO_IDLE_THREAD_BLOCK; + if (auto marker = peekOldestMarkerIfNeeded(opCtx)) { + invariant(marker->lastRecord.isValid()); + + LOGV2_DEBUG(7393215, + 2, + "Collection has excess markers", + "lastRecord"_attr = marker->lastRecord, + "wallTime"_attr = marker->wallTime); + return; + } + } + _reclaimCv.wait(lock); + } +} + bool WiredTigerRecordStore::OplogTruncateMarkers::_hasExcessMarkers(OperationContext* opCtx) const { int64_t totalBytes = 0; for (const auto& marker : getMarkers()) { @@ -272,7 +379,8 @@ void WiredTigerRecordStore::OplogTruncateMarkers::adjust(OperationContext* opCtx size_t numTruncateMarkersToKeep = std::min( kMaxTruncateMarkersToKeep, std::max(kMinTruncateMarkersToKeep, numTruncateMarkers)); setMinBytesPerMarker(maxSize / numTruncateMarkersToKeep); - pokeReclaimThread(opCtx); + // Notify the reclaimer thread as there might be an opportunity to recover space. + _reclaimCv.notify_all(); } StatusWith WiredTigerRecordStore::parseOptionsField(const BSONObj options) { @@ -532,12 +640,12 @@ WiredTigerRecordStore::WiredTigerRecordStore(WiredTigerKVEngine* kvEngine, const std::string wtTableConfig = uassertStatusOK(WiredTigerUtil::getMetadataCreate(ctx, _uri)); const bool wtTableConfigMatchesStringKeyFormat = - wtTableConfig.find("key_format=u") != string::npos; + wtTableConfig.find("key_format=u") != std::string::npos; invariant(wtTableConfigMatchesStringKeyFormat); } if (_oplogMaxSize) { - invariant(_isOplog, str::stream() << "Namespace " << params.nss); + invariant(_isOplog, str::stream() << "Namespace " << params.nss.toStringForErrorMsg()); } Status versionStatus = WiredTigerUtil::checkApplicationMetadataFormatVersion( @@ -593,12 +701,10 @@ WiredTigerRecordStore::~WiredTigerRecordStore() { } } -std::string WiredTigerRecordStore::ns(OperationContext* opCtx) const { +NamespaceString WiredTigerRecordStore::ns(OperationContext* opCtx) const { auto nss = namespaceForUUID(opCtx, _uuid); - if (!nss) - return ""; - return nss->ns(); + return nss ? *nss : NamespaceString(); } void WiredTigerRecordStore::checkSize(OperationContext* opCtx) { @@ -620,7 +726,8 @@ void WiredTigerRecordStore::checkSize(OperationContext* opCtx) { "Record store was empty; setting count metadata to zero but marking " "record store as needing size adjustment during recovery. ns: " "{isTemp_temp_ns}, ident: {ident}", - "isTemp_temp_ns"_attr = (isTemp() ? "(temp)" : ns(opCtx)), + "isTemp_temp_ns"_attr = + (isTemp() ? "(temp)" : toStringForLogging(ns(opCtx))), "ident"_attr = getIdent()); sizeRecoveryState(getGlobalServiceContext()) .markCollectionAsAlwaysNeedsSizeAdjustment(getIdent()); @@ -638,8 +745,7 @@ void WiredTigerRecordStore::postConstructorInit(OperationContext* opCtx, // OplogCapMaintainerThread does not get started in this instance. if (_isOplog && opCtx->getServiceContext()->userWritesAllowed() && !storageGlobalParams.repair) { - _oplogTruncateMarkers = std::make_shared( - OplogTruncateMarkers::createOplogTruncateMarkers(opCtx, this, ns)); + _oplogTruncateMarkers = OplogTruncateMarkers::createOplogTruncateMarkers(opCtx, this, ns); } if (_isOplog) { @@ -745,7 +851,8 @@ bool WiredTigerRecordStore::findRecord(OperationContext* opCtx, } void WiredTigerRecordStore::doDeleteRecord(OperationContext* opCtx, const RecordId& id) { - invariant(opCtx->lockState()->inAWriteUnitOfWork() || opCtx->lockState()->isNoop()); + invariant(opCtx->lockState()->inAWriteUnitOfWork()); + // SERVER-48453: Initialize the next record id counter before deleting. This ensures we won't // reuse record ids, which can be problematic for the _mdb_catalog. if (_keyFormat == KeyFormat::Long) { @@ -792,8 +899,7 @@ bool WiredTigerRecordStore::yieldAndAwaitOplogDeletionRequest(OperationContext* // Release any locks before waiting on the condition variable. It is illegal to access any // methods or members of this record store after this line because it could be deleted. - bool releasedAnyLocks = locker->saveLockStateAndUnlock(&snapshot); - invariant(releasedAnyLocks); + locker->saveLockStateAndUnlock(&snapshot); // The top-level locks were freed, so also release any potential low-level (storage engine) // locks that might be held. @@ -931,7 +1037,7 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx, Record* records, const Timestamp* timestamps, size_t nRecords) { - invariant(opCtx->lockState()->inAWriteUnitOfWork() || opCtx->lockState()->isNoop()); + invariant(opCtx->lockState()->inAWriteUnitOfWork()); int64_t totalLength = 0; for (size_t i = 0; i < nRecords; i++) @@ -953,11 +1059,32 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx, for (size_t i = 0; i < nRecords; i++) { auto& record = records[i]; if (_isOplog) { - StatusWith status = - record_id_helpers::extractKeyOptime(record.data.data(), record.data.size()); - if (!status.isOK()) - return status.getStatus(); - record.id = std::move(status.getValue()); + auto swRecordId = record_id_helpers::keyForOptime(timestamps[i], KeyFormat::Long); + if (!swRecordId.isOK()) + return swRecordId.getStatus(); + + // In the normal write paths, a timestamp is always set. It is only in unusual cases + // like inserting the oplog seed document where the caller does not provide a + // timestamp. + if (MONGO_unlikely(timestamps[i].isNull() || kDebugBuild)) { + auto swRecordIdFromBSON = + record_id_helpers::extractKeyOptime(record.data.data(), record.data.size()); + if (!swRecordIdFromBSON.isOK()) + return swRecordIdFromBSON.getStatus(); + + // Double-check that the 'ts' field in the oplog entry matches the assigned + // timestamp, if it was provided. + dassert(timestamps[i].isNull() || + swRecordIdFromBSON.getValue() == swRecordId.getValue(), + fmt::format( + "ts field in oplog entry {} does not equal assigned timestamp {}", + swRecordIdFromBSON.getValue().toString(), + swRecordId.getValue().toString())); + + record.id = std::move(swRecordIdFromBSON.getValue()); + } else { + record.id = std::move(swRecordId.getValue()); + } } else { // Some RecordStores, like TemporaryRecordStores, may want to set their own // RecordIds. @@ -1035,7 +1162,7 @@ Status WiredTigerRecordStore::_insertRecords(OperationContext* opCtx, if (_oplogTruncateMarkers) { auto wall = [&] { BSONObj obj = highestIdRecord.data.toBson(); - BSONElement ele = obj["wall"]; + BSONElement ele = obj[repl::DurableOplogEntry::kWallClockTimeFieldName]; if (!ele) { // This shouldn't happen in normal cases, but this is needed because some tests do // not add wall clock times. Note that, with this addition, it's possible that the @@ -1064,7 +1191,6 @@ StatusWith WiredTigerRecordStore::getLatestOplogTimestamp( OperationContext* opCtx) const { invariant(_isOplog); invariant(_keyFormat == KeyFormat::Long); - dassert(opCtx->lockState()->isReadLocked()); // Using this function inside a UOW is not supported because the main reason to call it is to // synchronize to the last op before waiting for write concern, so it makes little sense to do @@ -1084,10 +1210,11 @@ StatusWith WiredTigerRecordStore::getLatestOplogTimestamp( } }); - WT_CURSOR* cursor = writeConflictRetry(opCtx, "getLatestOplogTimestamp", "local.oplog.rs", [&] { - auto cachedCursor = session->getCachedCursor(_tableId, ""); - return cachedCursor ? cachedCursor : session->getNewCursor(_uri); - }); + WT_CURSOR* cursor = writeConflictRetry( + opCtx, "getLatestOplogTimestamp", NamespaceString::kRsOplogNamespace, [&] { + auto cachedCursor = session->getCachedCursor(_tableId, ""); + return cachedCursor ? cachedCursor : session->getNewCursor(_uri); + }); ON_BLOCK_EXIT([&] { session->releaseCursor(_tableId, cursor, ""); }); int ret = cursor->prev(cursor); if (ret == WT_NOTFOUND) { @@ -1113,8 +1240,8 @@ StatusWith WiredTigerRecordStore::getEarliestOplogTimestamp(Operation if (firstRecordTimestamp == Timestamp()) { WiredTigerSessionCache* cache = WiredTigerRecoveryUnit::get(opCtx)->getSessionCache(); auto sessRaii = cache->getSession(); - WT_CURSOR* cursor = - writeConflictRetry(opCtx, "getEarliestOplogTimestamp", "local.oplog.rs", [&] { + WT_CURSOR* cursor = writeConflictRetry( + opCtx, "getEarliestOplogTimestamp", NamespaceString::kRsOplogNamespace, [&] { auto cachedCursor = sessRaii->getCachedCursor(_tableId, ""); return cachedCursor ? cachedCursor : sessRaii->getNewCursor(_uri); }); @@ -1138,7 +1265,7 @@ Status WiredTigerRecordStore::doUpdateRecord(OperationContext* opCtx, const RecordId& id, const char* data, int len) { - invariant(opCtx->lockState()->inAWriteUnitOfWork() || opCtx->lockState()->isNoop()); + invariant(opCtx->lockState()->inAWriteUnitOfWork()); WiredTigerCursor curwrap(_uri, _tableId, true, opCtx); curwrap.assertInActiveTxn(); @@ -1150,8 +1277,8 @@ Status WiredTigerRecordStore::doUpdateRecord(OperationContext* opCtx, invariantWTOK(ret, c->session, - str::stream() << "Namespace: " << ns(opCtx) << "; Key: " << getKey(c) - << "; Read Timestamp: " + str::stream() << "Namespace: " << ns(opCtx).toStringForErrorMsg() + << "; Key: " << getKey(c) << "; Read Timestamp: " << opCtx->recoveryUnit() ->getPointInTimeReadTimestamp(opCtx) .value_or(Timestamp{}) @@ -1405,12 +1532,15 @@ Status WiredTigerRecordStore::doRangeTruncate(OperationContext* opCtx, return Status::OK(); } invariantWTOK(ret, start->session); + // Make sure to reset the cursor since we have to replace it with what the user provided us. + invariantWTOK(start->reset(start), start->session); boost::optional startKey; if (minRecordId != RecordId()) { - invariantWTOK(start->reset(start), start->session); startKey = makeCursorKey(minRecordId, _keyFormat); setKey(start, &(*startKey)); + } else { + start = nullptr; } WiredTigerCursor endWrap(_uri, _tableId, true, opCtx); boost::optional endKey; @@ -1438,7 +1568,18 @@ Status WiredTigerRecordStore::doCompact(OperationContext* opCtx) { if (!cache->isEphemeral()) { WT_SESSION* s = WiredTigerRecoveryUnit::get(opCtx)->getSession()->getSession(); opCtx->recoveryUnit()->abandonSnapshot(); + + // Set a pointer on the WT_SESSION to the opCtx, so that WT::compact can use a callback to + // check for interrupts. + SessionDataRAII sessionRaii(s, opCtx); + int ret = s->compact(s, getURI().c_str(), "timeout=0"); + if (ret == WT_ERROR && !opCtx->checkForInterruptNoAssert().isOK()) { + return Status(ErrorCodes::Interrupted, + str::stream() + << "Storage compaction interrupted on " << getURI().c_str()); + } + if (MONGO_unlikely(WTCompactRecordStoreEBUSY.shouldFail())) { ret = EBUSY; } @@ -1639,7 +1780,7 @@ void WiredTigerRecordStore::_initNextIdIfNeeded(OperationContext* opCtx) { rollbackReason = rollbackReason ? rollbackReason : "undefined"; throwWriteConflictException( fmt::format("Rollback ocurred while performing initial write to '{}'. Reason: '{}'", - ns(opCtx), + ns(opCtx).toStringForErrorMsg(), rollbackReason)); } else if (ret != WT_NOTFOUND) { if (ret == ENOTSUP) { @@ -1682,6 +1823,12 @@ long long WiredTigerRecordStore::_reserveIdBlock(OperationContext* opCtx, size_t void WiredTigerRecordStore::_changeNumRecordsAndDataSize(OperationContext* opCtx, int64_t numRecordDiff, int64_t dataSizeDiff) { + if (numRecordDiff == 0 && dataSizeDiff == 0) { + // If there's nothing to increment/decrement this will be a no-op. Avoid all the other + // checks and early return. + return; + } + if (!_tracksSizeAdjustments) { return; } @@ -1930,39 +2077,7 @@ boost::optional WiredTigerRecordStoreCursorBase::next() { if ((_forward && _lastReturnedId >= id) || (!_forward && !_lastReturnedId.isNull() && id >= _lastReturnedId) || MONGO_unlikely(failWithOutOfOrderForTest)) { - HealthLogEntry entry; - entry.setNss(namespaceForUUID(_opCtx, _uuid)); - entry.setTimestamp(Date_t::now()); - entry.setSeverity(SeverityEnum::Error); - entry.setScope(ScopeEnum::Collection); - entry.setOperation("WT_Cursor::next"); - entry.setMsg("Cursor returned out-of-order keys"); - - BSONObjBuilder bob; - bob.append("forward", _forward); - bob.append("next", id.toString()); - bob.append("last", _lastReturnedId.toString()); - bob.append("ident", _ident); - bob.appendElements(getStackTrace().getBSONRepresentation()); - entry.setData(bob.obj()); - - HealthLogInterface::get(_opCtx)->log(entry); - - if (!failWithOutOfOrderForTest) { - // Crash when testing diagnostics are enabled and not explicitly uasserting on - // out-of-order keys. - invariant(!TestingProctor::instance().isEnabled(), "cursor returned out-of-order keys"); - } - - // uassert with 'DataCorruptionDetected' after logging. - LOGV2_ERROR_OPTIONS(22406, - {logv2::UserAssertAfterLog(ErrorCodes::DataCorruptionDetected)}, - "WT_Cursor::next -- returned out-of-order keys", - "forward"_attr = _forward, - "next"_attr = id, - "last"_attr = _lastReturnedId, - "ident"_attr = _ident, - "ns"_attr = namespaceForUUID(_opCtx, _uuid)); + reportOutOfOrderRead(id, failWithOutOfOrderForTest); } WT_ITEM value; @@ -1977,6 +2092,53 @@ boost::optional WiredTigerRecordStoreCursorBase::next() { return {{std::move(id), {static_cast(value.data), static_cast(value.size)}}}; } +void WiredTigerRecordStoreCursorBase::reportOutOfOrderRead(RecordId& id, + bool failWithOutOfOrderForTest) { + HealthLogEntry entry; + entry.setNss(namespaceForUUID(_opCtx, _uuid)); + entry.setTimestamp(Date_t::now()); + entry.setSeverity(SeverityEnum::Error); + entry.setScope(ScopeEnum::Collection); + entry.setOperation("WT_Cursor::next"); + entry.setMsg("Cursor returned out-of-order keys"); + + BSONObjBuilder bob; + bob.append("forward", _forward); + bob.append("next", id.toString()); + bob.append("last", _lastReturnedId.toString()); + bob.append("ident", _ident); + bob.appendElements(getStackTrace().getBSONRepresentation()); + entry.setData(bob.obj()); + + HealthLogInterface::get(_opCtx)->log(entry); + + if (!failWithOutOfOrderForTest) { + // Crash when testing diagnostics are enabled and not explicitly uasserting on + // out-of-order keys. + invariant(!TestingProctor::instance().isEnabled(), "cursor returned out-of-order keys"); + } + + auto options = [&] { + if (_opCtx->recoveryUnit()->getDataCorruptionDetectionMode() == + DataCorruptionDetectionMode::kThrow) { + // uassert with 'DataCorruptionDetected' after logging. + return logv2::LogOptions{logv2::UserAssertAfterLog(ErrorCodes::DataCorruptionDetected)}; + } else { + return logv2::LogOptions(logv2::LogComponent::kAutomaticDetermination); + } + }(); + + LOGV2_ERROR_OPTIONS(22406, + options, + "WT_Cursor::next -- returned out-of-order keys", + "forward"_attr = _forward, + "next"_attr = id, + "last"_attr = _lastReturnedId, + "ident"_attr = _ident, + "ns"_attr = namespaceForUUID(_opCtx, _uuid)); +} + + boost::optional WiredTigerRecordStoreCursorBase::seekExact(const RecordId& id) { invariant(_hasRestored); if (_readTimestampForOplog && id.getLong() > *_readTimestampForOplog) { @@ -2132,7 +2294,6 @@ bool WiredTigerRecordStoreCursorBase::isVisible(const RecordId& id) { void WiredTigerRecordStoreCursorBase::initCappedVisibility(OperationContext* opCtx) { if (_isOplog) { auto wtRu = WiredTigerRecoveryUnit::get(opCtx); - wtRu->setIsOplogReader(); if (_forward) { _oplogVisibleTs = wtRu->getOplogVisibilityTs(); } @@ -2180,9 +2341,19 @@ bool WiredTigerRecordStoreCursorBase::restore(bool tolerateCappedRepositioning) WT_CURSOR* c = _cursor->get(); auto key = makeCursorKey(_lastReturnedId, _keyFormat); setKey(c, &key); + // Use a bounded cursor to avoid unnecessarily traversing deleted records while repositioning + // the cursor. This is particularly useful in capped collections when we're making a lot of + // deletes and the cursor traverses many deleted records to reposition itself. + if (_forward) { + invariantWTOK(c->bound(c, "bound=lower"), c->session); + } else { + invariantWTOK(c->bound(c, "bound=upper"), c->session); + } int cmp; int ret = wiredTigerPrepareConflictRetry(_opCtx, [&] { return c->search_near(c, &cmp); }); + invariantWTOK(c->bound(c, "action=clear"), c->session); + if (ret == WT_NOTFOUND) { _eof = true; @@ -2214,6 +2385,9 @@ bool WiredTigerRecordStoreCursorBase::restore(bool tolerateCappedRepositioning) _skipNextAdvance = true; } else if (!_forward && cmp < 0) { _skipNextAdvance = true; + } else { + // Check that the cursor hasn't landed before _lastReturnedId + dassert(_forward ? cmp >= 0 : cmp <= 0); } return true; @@ -2261,11 +2435,6 @@ void StandardWiredTigerRecordStore::setKey(WT_CURSOR* cursor, const CursorKey* k std::unique_ptr StandardWiredTigerRecordStore::getCursor( OperationContext* opCtx, bool forward) const { - if (_isOplog && forward) { - WiredTigerRecoveryUnit* wru = WiredTigerRecoveryUnit::get(opCtx); - wru->setIsOplogReader(); - } - return std::make_unique(opCtx, *this, forward); } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h index ac58df8db370e..3548a04b277a7 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store.h @@ -29,13 +29,34 @@ #pragma once +#include +#include +#include +#include +#include #include #include #include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/damage_vector.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/capped_visibility.h" -#include "mongo/db/storage/collection_markers.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/validate_results.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/storage/collection_truncate_markers.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_cursor.h" #include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" @@ -43,10 +64,12 @@ #include "mongo/db/storage/wiredtiger/wiredtiger_size_storer.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" #include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" #include "mongo/util/fail_point.h" +#include "mongo/util/uuid.h" /** * Either executes the specified operation and returns it's value or randomly throws a write @@ -227,7 +250,7 @@ class WiredTigerRecordStore : public RecordStore { return _tableId; } - std::string ns(OperationContext* opCtx) const final; + NamespaceString ns(OperationContext* opCtx) const final; /* * Check the size information for this RecordStore. This function opens a cursor on the @@ -447,6 +470,7 @@ class WiredTigerRecordStoreCursorBase : public SeekableRecordCursor { private: bool isVisible(const RecordId& id); void initCappedVisibility(OperationContext* opCtx); + void reportOutOfOrderRead(RecordId& id, bool failWithOutOfOrderForTest); /** * This value is used for visibility calculations on what oplog entries can be returned to a diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_truncate_markers.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_truncate_markers.h index 825c930647baa..12deb7f2b2248 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_truncate_markers.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_truncate_markers.h @@ -31,7 +31,7 @@ #include -#include "mongo/db/storage/collection_markers.h" +#include "mongo/db/storage/collection_truncate_markers.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" @@ -54,6 +54,28 @@ class WiredTigerRecordStore::OplogTruncateMarkers final : public CollectionTrunc CollectionTruncateMarkers::MarkersCreationMethod creationMethod, WiredTigerRecordStore* rs); + /** + * Whether the instance is going to get destroyed. + */ + bool isDead(); + + /** + * Mark this instance as serving a non-existent RecordStore. This is the case if either the + * RecordStore has been deleted or we're shutting down. Doing this will mark the instance as + * ready for destruction. + */ + void kill(); + + void awaitHasExcessMarkersOrDead(OperationContext* opCtx); + + // Clears all the markers of the instance whenever the current WUOW commits. + void clearMarkersOnCommit(OperationContext* opCtx); + + // Updates the metadata about the collection markers after a rollback occurs. + void updateMarkersAfterCappedTruncateAfter(int64_t recordsRemoved, + int64_t bytesRemoved, + const RecordId& firstRemovedId); + void getOplogTruncateMarkersStats(BSONObjBuilder& builder) const { builder.append("totalTimeProcessingMicros", _totalTimeProcessing.count()); builder.append("processingMethod", _processBySampling ? "sampling" : "scanning"); @@ -69,7 +91,7 @@ class WiredTigerRecordStore::OplogTruncateMarkers final : public CollectionTrunc // efficiently truncate records with WiredTiger by skipping over tombstones, etc. RecordId firstRecord; - static WiredTigerRecordStore::OplogTruncateMarkers createOplogTruncateMarkers( + static std::shared_ptr createOplogTruncateMarkers( OperationContext* opCtx, WiredTigerRecordStore* rs, const NamespaceString& ns); // // The following methods are public only for use in tests. @@ -82,6 +104,17 @@ class WiredTigerRecordStore::OplogTruncateMarkers final : public CollectionTrunc private: virtual bool _hasExcessMarkers(OperationContext* opCtx) const final; + virtual void _notifyNewMarkerCreation() final { + _reclaimCv.notify_all(); + } + + Mutex _reclaimMutex = MONGO_MAKE_LATCH("OplogTruncateMarkers::_reclaimMutex"); + stdx::condition_variable _reclaimCv; + + // True if '_rs' has been destroyed, e.g. due to repairDatabase being called on the collection's + // database, and false otherwise. + bool _isDead = false; + WiredTigerRecordStore* _rs; Microseconds _totalTimeProcessing; // Amount of time spent scanning and/or sampling the diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp index bd2e356eb0f95..3ee7b4d4685ef 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test.cpp @@ -27,38 +27,55 @@ * it in the license file. */ -#include -#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include "mongo/base/checked_cast.h" -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/catalog/clustered_collection_util.h" -#include "mongo/db/json.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/record_store_test_harness.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store_oplog_truncate_markers.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.h" #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_size_storer.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { -using std::string; -using std::stringstream; -using std::unique_ptr; - TEST(WiredTigerRecordStoreTest, GenerateCreateStringEmptyDocument) { BSONObj spec = fromjson("{}"); StatusWith result = WiredTigerRecordStore::parseOptionsField(spec); @@ -102,13 +119,14 @@ TEST(WiredTigerRecordStoreTest, GenerateCreateStringValidConfigStringOption) { TEST(WiredTigerRecordStoreTest, Isolation1) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); RecordId id1; RecordId id2; { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); @@ -125,12 +143,17 @@ TEST(WiredTigerRecordStoreTest, Isolation1) { } { - ServiceContext::UniqueOperationContext t1(harnessHelper->newOperationContext()); + auto client1 = harnessHelper->serviceContext()->makeClient("c1"); + auto t1 = harnessHelper->newOperationContext(client1.get()); + Lock::GlobalLock gl1(t1.get(), MODE_IX); + auto client2 = harnessHelper->serviceContext()->makeClient("c2"); auto t2 = harnessHelper->newOperationContext(client2.get()); + boost::optional gl2; + gl2.emplace(t2.get(), MODE_IX); - unique_ptr w1(new WriteUnitOfWork(t1.get())); - unique_ptr w2(new WriteUnitOfWork(t2.get())); + std::unique_ptr w1(new WriteUnitOfWork(t1.get())); + std::unique_ptr w2(new WriteUnitOfWork(t2.get())); rs->dataFor(t1.get(), id1); rs->dataFor(t2.get(), id1); @@ -143,8 +166,9 @@ TEST(WiredTigerRecordStoreTest, Isolation1) { rs->updateRecord(t2.get(), id1, "c", 2).transitional_ignore(); ASSERT(0); } catch (WriteConflictException&) { - w2.reset(nullptr); - t2.reset(nullptr); + w2.reset(); + gl2.reset(); + t2.reset(); } w1->commit(); // this should succeed @@ -153,13 +177,14 @@ TEST(WiredTigerRecordStoreTest, Isolation1) { TEST(WiredTigerRecordStoreTest, Isolation2) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); RecordId id1; RecordId id2; { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); @@ -176,9 +201,13 @@ TEST(WiredTigerRecordStoreTest, Isolation2) { } { - ServiceContext::UniqueOperationContext t1(harnessHelper->newOperationContext()); + auto client1 = harnessHelper->serviceContext()->makeClient("c1"); + auto t1 = harnessHelper->newOperationContext(client1.get()); + Lock::GlobalLock gl1(t1.get(), MODE_IX); + auto client2 = harnessHelper->serviceContext()->makeClient("c2"); auto t2 = harnessHelper->newOperationContext(client2.get()); + Lock::GlobalLock gl2(t2.get(), MODE_IX); // ensure we start transactions rs->dataFor(t1.get(), id2); @@ -192,7 +221,7 @@ TEST(WiredTigerRecordStoreTest, Isolation2) { { WriteUnitOfWork w(t2.get()); - ASSERT_EQUALS(string("a"), rs->dataFor(t2.get(), id1).data()); + ASSERT_EQUALS(std::string("a"), rs->dataFor(t2.get(), id1).data()); try { // this should fail as our version of id1 is too old rs->updateRecord(t2.get(), id1, "c", 2).transitional_ignore(); @@ -204,7 +233,7 @@ TEST(WiredTigerRecordStoreTest, Isolation2) { } StatusWith insertBSON(ServiceContext::UniqueOperationContext& opCtx, - unique_ptr& rs, + std::unique_ptr& rs, const Timestamp& opTime) { BSONObj obj = BSON("ts" << opTime); WriteUnitOfWork wuow(opCtx.get()); @@ -219,9 +248,9 @@ StatusWith insertBSON(ServiceContext::UniqueOperationContext& opCtx, return res; } -RecordId _oplogOrderInsertOplog(OperationContext* opCtx, - const unique_ptr& rs, - int inc) { +RecordId oplogOrderInsertOplog(OperationContext* opCtx, + const std::unique_ptr& rs, + int inc) { Timestamp opTime = Timestamp(5, inc); Status status = rs->oplogDiskLocRegister(opCtx, opTime, false); ASSERT_OK(status); @@ -231,20 +260,23 @@ RecordId _oplogOrderInsertOplog(OperationContext* opCtx, return res.getValue(); } -// Test that even when the oplog durability loop is paused, we can still advance the commit point as -// long as the commit for each insert comes before the next insert starts. +/** + * Test that even when the oplog durability loop is paused, we can still advance the commit point as + * long as the commit for each insert comes before the next insert starts. + */ TEST(WiredTigerRecordStoreTest, OplogDurableVisibilityInOrder) { ON_BLOCK_EXIT([] { WTPauseOplogVisibilityUpdateLoop.setMode(FailPoint::off); }); WTPauseOplogVisibilityUpdateLoop.setMode(FailPoint::alwaysOn); - unique_ptr harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newOplogRecordStore()); + std::unique_ptr harnessHelper(newRecordStoreHarnessHelper()); + std::unique_ptr rs(harnessHelper->newOplogRecordStore()); auto wtrs = checked_cast(rs.get()); { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); WriteUnitOfWork uow(opCtx.get()); - RecordId id = _oplogOrderInsertOplog(opCtx.get(), rs, 1); + RecordId id = oplogOrderInsertOplog(opCtx.get(), rs, 1); ASSERT(wtrs->isOpHidden_forTest(id)); uow.commit(); ASSERT(wtrs->isOpHidden_forTest(id)); @@ -252,28 +284,32 @@ TEST(WiredTigerRecordStoreTest, OplogDurableVisibilityInOrder) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); WriteUnitOfWork uow(opCtx.get()); - RecordId id = _oplogOrderInsertOplog(opCtx.get(), rs, 2); + RecordId id = oplogOrderInsertOplog(opCtx.get(), rs, 2); ASSERT(wtrs->isOpHidden_forTest(id)); uow.commit(); ASSERT(wtrs->isOpHidden_forTest(id)); } } -// Test that Oplog entries inserted while there are hidden entries do not become visible until the -// op and all earlier ops are durable. +/** + * Test that Oplog entries inserted while there are hidden entries do not become visible until the + * op and all earlier ops are durable. + */ TEST(WiredTigerRecordStoreTest, OplogDurableVisibilityOutOfOrder) { ON_BLOCK_EXIT([] { WTPauseOplogVisibilityUpdateLoop.setMode(FailPoint::off); }); WTPauseOplogVisibilityUpdateLoop.setMode(FailPoint::alwaysOn); - unique_ptr harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newOplogRecordStore()); + std::unique_ptr harnessHelper(newRecordStoreHarnessHelper()); + std::unique_ptr rs(harnessHelper->newOplogRecordStore()); auto wtrs = checked_cast(rs.get()); ServiceContext::UniqueOperationContext longLivedOp(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(longLivedOp.get(), MODE_IX); WriteUnitOfWork uow(longLivedOp.get()); - RecordId id1 = _oplogOrderInsertOplog(longLivedOp.get(), rs, 1); + RecordId id1 = oplogOrderInsertOplog(longLivedOp.get(), rs, 1); ASSERT(wtrs->isOpHidden_forTest(id1)); @@ -282,8 +318,9 @@ TEST(WiredTigerRecordStoreTest, OplogDurableVisibilityOutOfOrder) { auto innerClient = harnessHelper->serviceContext()->makeClient("inner"); ServiceContext::UniqueOperationContext opCtx( harnessHelper->newOperationContext(innerClient.get())); + Lock::GlobalLock globalLock(opCtx.get(), MODE_IX); WriteUnitOfWork uow(opCtx.get()); - id2 = _oplogOrderInsertOplog(opCtx.get(), rs, 2); + id2 = oplogOrderInsertOplog(opCtx.get(), rs, 2); ASSERT(wtrs->isOpHidden_forTest(id2)); uow.commit(); } @@ -311,7 +348,7 @@ TEST(WiredTigerRecordStoreTest, OplogDurableVisibilityOutOfOrder) { TEST(WiredTigerRecordStoreTest, AppendCustomStatsMetadata) { std::unique_ptr harnessHelper = newRecordStoreHarnessHelper(); - unique_ptr rs(harnessHelper->newRecordStore("a.b")); + std::unique_ptr rs(harnessHelper->newRecordStore("a.b")); ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); BSONObjBuilder builder; @@ -335,7 +372,7 @@ TEST(WiredTigerRecordStoreTest, AppendCustomStatsMetadata) { TEST(WiredTigerRecordStoreTest, AppendCustomNumericStats) { std::unique_ptr harnessHelper = newRecordStoreHarnessHelper(); - unique_ptr rs(harnessHelper->newRecordStore("a.c")); + std::unique_ptr rs(harnessHelper->newRecordStore("a.c")); ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); BSONObjBuilder builder; @@ -389,7 +426,9 @@ StatusWith insertBSONWithSize(OperationContext* opCtx, return res; } -// Insert records into an oplog and verify the number of truncate markers that are created. +/** + * Insert records into an oplog and verify the number of truncate markers that are created. + */ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_CreateNewMarker) { std::unique_ptr harnessHelper = newRecordStoreHarnessHelper(); std::unique_ptr rs(harnessHelper->newOplogRecordStore()); @@ -401,51 +440,54 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_CreateNewMarker) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); - ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers()); + ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers_forTest()); // Inserting a record smaller than 'minBytesPerTruncateMarker' shouldn't create a new oplog // truncate marker. ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 1), 99), RecordId(1, 1)); - ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(99, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(99, oplogTruncateMarkers->currentBytes_forTest()); // Inserting another record such that their combined size exceeds // 'minBytesPerTruncateMarker' should cause a new truncate marker to be created. ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 2), 51), RecordId(1, 2)); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); // Inserting a record such that the combined size of this record and the previously inserted // one exceed 'minBytesPerTruncateMarker' shouldn't cause a new truncate marker to be // created because we've started filling a new truncate marker. ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 3), 50), RecordId(1, 3)); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(50, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(50, oplogTruncateMarkers->currentBytes_forTest()); // Inserting a record such that the combined size of this record and the previously inserted // one is exactly equal to 'minBytesPerTruncateMarker' should cause a new truncate marker to // be created. ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 4), 50), RecordId(1, 4)); - ASSERT_EQ(2U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(2U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); // Inserting a single record that exceeds 'minBytesPerTruncateMarker' should cause a new // truncate marker to // be created. ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 5), 101), RecordId(1, 5)); - ASSERT_EQ(3U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(3U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); } } -// Insert records into an oplog and try to update them. The updates shouldn't succeed if the size of -// record is changed. +/** + * Insert records into an oplog and try to update them. The updates shouldn't succeed if the size of + * record is changed. + */ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_UpdateRecord) { std::unique_ptr harnessHelper = newRecordStoreHarnessHelper(); std::unique_ptr rs(harnessHelper->newOplogRecordStore()); @@ -459,13 +501,14 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_UpdateRecord) { // the truncate marker currently being filled. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 1), 100), RecordId(1, 1)); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 2), 50), RecordId(1, 2)); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(50, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(50, oplogTruncateMarkers->currentBytes_forTest()); } // Attempts to grow the records should fail. @@ -512,14 +555,16 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_UpdateRecord) { rs->updateRecord(opCtx.get(), RecordId(1, 2), changed2.objdata(), changed2.objsize())); wuow.commit(); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(50, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(50, oplogTruncateMarkers->currentBytes_forTest()); } } -// Insert multiple records and truncate the oplog using RecordStore::truncate(). The operation -// should leave no truncate markers, including the partially filled one. +/** + * Insert multiple records and truncate the oplog using RecordStore::truncate(). The operation + * should leave no truncate markers, including the partially filled one. + */ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_Truncate) { std::unique_ptr harnessHelper = newRecordStoreHarnessHelper(); std::unique_ptr rs(harnessHelper->newOplogRecordStore()); @@ -531,18 +576,20 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_Truncate) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 1), 50), RecordId(1, 1)); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 2), 50), RecordId(1, 2)); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 3), 50), RecordId(1, 3)); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(50, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(50, oplogTruncateMarkers->currentBytes_forTest()); } { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQ(3, rs->numRecords(opCtx.get())); ASSERT_EQ(150, rs->dataSize(opCtx.get())); @@ -553,15 +600,17 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_Truncate) { ASSERT_EQ(0, rs->dataSize(opCtx.get())); ASSERT_EQ(0, rs->numRecords(opCtx.get())); - ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); } } -// Insert multiple records, truncate the oplog using RecordStore::cappedTruncateAfter(), and -// verify that the metadata for each truncate marker is updated. If a full truncate marker is -// partially truncated, then it should become the truncate marker currently being filled. +/** + * Insert multiple records, truncate the oplog using RecordStore::cappedTruncateAfter(), and verify + * that the metadata for each truncate marker is updated. If a full truncate marker is partially + * truncated, then it should become the truncate marker currently being filled. + */ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_CappedTruncateAfter) { std::unique_ptr harnessHelper = newRecordStoreHarnessHelper(); std::unique_ptr rs(harnessHelper->newOplogRecordStore()); @@ -573,6 +622,7 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_CappedTruncateAfter) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 1), 400), RecordId(1, 1)); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 2), 800), RecordId(1, 2)); @@ -588,18 +638,22 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_CappedTruncateAfter) { ASSERT_EQ(9, rs->numRecords(opCtx.get())); ASSERT_EQ(2600, rs->dataSize(opCtx.get())); - ASSERT_EQ(2U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(3, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(300, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(2U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(3, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(300, oplogTruncateMarkers->currentBytes_forTest()); } // Make sure all are visible. - rs->waitForAllEarlierOplogWritesToBeVisible(harnessHelper->newOperationContext().get()); + { + ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + rs->waitForAllEarlierOplogWritesToBeVisible(opCtx.get()); + } // Truncate data using an inclusive RecordId that exists inside the truncate marker currently // being filled. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); rs->cappedTruncateAfter(opCtx.get(), RecordId(1, 8), @@ -608,9 +662,9 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_CappedTruncateAfter) { ASSERT_EQ(7, rs->numRecords(opCtx.get())); ASSERT_EQ(2350, rs->dataSize(opCtx.get())); - ASSERT_EQ(2U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(50, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(2U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(50, oplogTruncateMarkers->currentBytes_forTest()); } // Truncate data using an inclusive RecordId that refers to the 'lastRecord' of a full truncate @@ -618,6 +672,7 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_CappedTruncateAfter) { // The truncate marker should become the one currently being filled. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); rs->cappedTruncateAfter(opCtx.get(), RecordId(1, 6), @@ -626,15 +681,16 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_CappedTruncateAfter) { ASSERT_EQ(5, rs->numRecords(opCtx.get())); ASSERT_EQ(1950, rs->dataSize(opCtx.get())); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(3, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(750, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(3, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(750, oplogTruncateMarkers->currentBytes_forTest()); } // Truncate data using a non-inclusive RecordId that exists inside the truncate marker currently // being filled. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); rs->cappedTruncateAfter(opCtx.get(), RecordId(1, 3), @@ -643,9 +699,9 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_CappedTruncateAfter) { ASSERT_EQ(3, rs->numRecords(opCtx.get())); ASSERT_EQ(1400, rs->dataSize(opCtx.get())); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(200, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(200, oplogTruncateMarkers->currentBytes_forTest()); } // Truncate data using a non-inclusive RecordId that refers to the 'lastRecord' of a full @@ -653,6 +709,7 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_CappedTruncateAfter) { // The truncate marker should remain intact. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); rs->cappedTruncateAfter(opCtx.get(), RecordId(1, 2), @@ -661,15 +718,16 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_CappedTruncateAfter) { ASSERT_EQ(2, rs->numRecords(opCtx.get())); ASSERT_EQ(1200, rs->dataSize(opCtx.get())); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); } // Truncate data using a non-inclusive RecordId that exists inside a full truncate marker. The // truncate marker should become the one currently being filled. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); rs->cappedTruncateAfter(opCtx.get(), RecordId(1, 1), @@ -678,13 +736,15 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_CappedTruncateAfter) { ASSERT_EQ(1, rs->numRecords(opCtx.get())); ASSERT_EQ(400, rs->dataSize(opCtx.get())); - ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(400, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(400, oplogTruncateMarkers->currentBytes_forTest()); } } -// Verify that oplog truncate markers are reclaimed when cappedMaxSize is exceeded. +/** + * Verify that oplog truncate markers are reclaimed when cappedMaxSize is exceeded. + */ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_ReclaimTruncateMarkers) { std::unique_ptr harnessHelper = newRecordStoreHarnessHelper(); std::unique_ptr rs(harnessHelper->newOplogRecordStore()); @@ -701,6 +761,7 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_ReclaimTruncateMarkers) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 1), 100), RecordId(1, 1)); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 2), 110), RecordId(1, 2)); @@ -708,9 +769,9 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_ReclaimTruncateMarkers) { ASSERT_EQ(3, rs->numRecords(opCtx.get())); ASSERT_EQ(330, rs->dataSize(opCtx.get())); - ASSERT_EQ(3U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(3U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); } // Fail to truncate the truncate marker when cappedMaxSize is exceeded, but the persisted @@ -718,33 +779,36 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_ReclaimTruncateMarkers) { // rely on). { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); - harnessHelper->advanceStableTimestamp(Timestamp(1, 0)); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + harnessHelper->advanceStableTimestamp(Timestamp(1, 0)); wtrs->reclaimOplog(opCtx.get()); ASSERT_EQ(3, rs->numRecords(opCtx.get())); ASSERT_EQ(330, rs->dataSize(opCtx.get())); - ASSERT_EQ(3U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(3U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); } // Truncate a truncate marker when cappedMaxSize is exceeded. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); harnessHelper->advanceStableTimestamp(Timestamp(1, 3)); wtrs->reclaimOplog(opCtx.get()); ASSERT_EQ(2, rs->numRecords(opCtx.get())); ASSERT_EQ(230, rs->dataSize(opCtx.get())); - ASSERT_EQ(2U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(2U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); } { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 4), 130), RecordId(1, 4)); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 5), 140), RecordId(1, 5)); @@ -752,69 +816,74 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_ReclaimTruncateMarkers) { ASSERT_EQ(5, rs->numRecords(opCtx.get())); ASSERT_EQ(550, rs->dataSize(opCtx.get())); - ASSERT_EQ(4U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(50, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(4U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(50, oplogTruncateMarkers->currentBytes_forTest()); } // Truncate multiple truncate markers if necessary. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); harnessHelper->advanceStableTimestamp(Timestamp(1, 6)); wtrs->reclaimOplog(opCtx.get()); ASSERT_EQ(2, rs->numRecords(opCtx.get())); ASSERT_EQ(190, rs->dataSize(opCtx.get())); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(50, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(50, oplogTruncateMarkers->currentBytes_forTest()); } // No-op if dataSize <= cappedMaxSize. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); harnessHelper->advanceStableTimestamp(Timestamp(1, 6)); wtrs->reclaimOplog(opCtx.get()); ASSERT_EQ(2, rs->numRecords(opCtx.get())); ASSERT_EQ(190, rs->dataSize(opCtx.get())); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(50, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(50, oplogTruncateMarkers->currentBytes_forTest()); } // Don't truncate the last truncate marker before the truncate point, even if the truncate point // is ahead of it. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 7), 190), RecordId(1, 7)); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 9), 120), RecordId(1, 9)); ASSERT_EQ(4, rs->numRecords(opCtx.get())); ASSERT_EQ(500, rs->dataSize(opCtx.get())); - ASSERT_EQ(3U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(3U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); } { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); harnessHelper->advanceStableTimestamp(Timestamp(1, 8)); wtrs->reclaimOplog(opCtx.get()); ASSERT_EQ(3, rs->numRecords(opCtx.get())); ASSERT_EQ(360, rs->dataSize(opCtx.get())); - ASSERT_EQ(2U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(2U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); } // Don't truncate entire oplog. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 10), 90), RecordId(1, 10)); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 11), 210), @@ -822,53 +891,59 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_ReclaimTruncateMarkers) { ASSERT_EQ(5, rs->numRecords(opCtx.get())); ASSERT_EQ(660, rs->dataSize(opCtx.get())); - ASSERT_EQ(3U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(3U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); } { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); harnessHelper->advanceStableTimestamp(Timestamp(1, 12)); wtrs->reclaimOplog(opCtx.get()); ASSERT_EQ(2, rs->numRecords(opCtx.get())); ASSERT_EQ(300, rs->dataSize(opCtx.get())); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); } // OK to truncate all truncate markers if there are records in the oplog that are before or at // the truncate-up-to point, that have not yet created a truncate marker. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + // Use timestamp (1, 13) as we can't commit at the stable timestamp (1, 12). auto t = insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 13), 90); ASSERT_EQ(t, RecordId(1, 13)); ASSERT_EQ(3, rs->numRecords(opCtx.get())); ASSERT_EQ(390, rs->dataSize(opCtx.get())); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(90, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(90, oplogTruncateMarkers->currentBytes_forTest()); } { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); harnessHelper->advanceStableTimestamp(Timestamp(1, 13)); wtrs->reclaimOplog(opCtx.get()); ASSERT_EQ(1, rs->numRecords(opCtx.get())); ASSERT_EQ(90, rs->dataSize(opCtx.get())); - ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(90, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(90, oplogTruncateMarkers->currentBytes_forTest()); } } -// Verify that an oplog truncate marker isn't created if it would cause the logical representation -// of the records to not be in increasing order. +/** + * Verify that an oplog truncate marker isn't created if it would cause the logical representation + * of the records to not be in increasing order. + */ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_AscendingOrder) { std::unique_ptr harnessHelper = newRecordStoreHarnessHelper(); std::unique_ptr rs(harnessHelper->newOplogRecordStore()); @@ -880,34 +955,35 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_AscendingOrder) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); - ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers()); + ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers_forTest()); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(2, 2), 50), RecordId(2, 2)); - ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(50, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(0U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(50, oplogTruncateMarkers->currentBytes_forTest()); // Inserting a record that has a smaller RecordId than the previously inserted record should // be able to create a new truncate marker when no truncate markers already exist. ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(2, 1), 50), RecordId(2, 1)); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); // However, inserting a record that has a smaller RecordId than most recently created // truncate marker's last record shouldn't cause a new truncate marker to be created, even // if the size of the inserted record exceeds 'minBytesPerTruncateMarker'. ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 1), 100), RecordId(1, 1)); - ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(1, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(100, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(1U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(1, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(100, oplogTruncateMarkers->currentBytes_forTest()); // Inserting a record that has a larger RecordId than the most recently created truncate // marker's last record should then cause a new truncate marker to be created. ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(2, 3), 50), RecordId(2, 3)); - ASSERT_EQ(2U, oplogTruncateMarkers->numMarkers()); - ASSERT_EQ(0, oplogTruncateMarkers->currentRecords()); - ASSERT_EQ(0, oplogTruncateMarkers->currentBytes()); + ASSERT_EQ(2U, oplogTruncateMarkers->numMarkers_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentRecords_forTest()); + ASSERT_EQ(0, oplogTruncateMarkers->currentBytes_forTest()); } } @@ -915,8 +991,8 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_AscendingOrder) { // generated because the estimated 'dataSize' is smaller than the minimum size for a truncate // marker, tets that // (1) The oplog is scanned -// (2) OplogTruncateMarkers::currentBytes() reflects the actual size of the oplog instead of the -// estimated size. +// (2) OplogTruncateMarkers::currentBytes_forTest() reflects the actual size of the oplog instead +// of the estimated size. TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_NoMarkersGeneratedFromScanning) { std::unique_ptr harnessHelper = newRecordStoreHarnessHelper(); auto wtHarnessHelper = dynamic_cast(harnessHelper.get()); @@ -928,6 +1004,8 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_NoMarkersGeneratedFromScann int realSizePerRecord = 100; { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + for (int i = 1; i <= realNumRecords; i++) { ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(i, 0), realSizePerRecord), RecordId(i, 0)); @@ -938,24 +1016,25 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_NoMarkersGeneratedFromScann auto wtKvEngine = dynamic_cast(harnessHelper->getEngine()); wtKvEngine->getOplogManager()->setOplogReadTimestamp(Timestamp(realNumRecords, 0)); - // Force the estimates of 'dataSize' and 'numRecords' to be lower than the real values. wtrs->setNumRecords(realNumRecords - 1); wtrs->setDataSize((realNumRecords - 1) * realSizePerRecord); // Initialize the truncate markers. ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + wtrs->postConstructorInit(opCtx.get(), NamespaceString::kRsOplogNamespace); auto oplogTruncateMarkers = wtrs->oplogTruncateMarkers(); ASSERT_FALSE(oplogTruncateMarkers->processedBySampling()); - auto numMarkers = oplogTruncateMarkers->numMarkers(); + auto numMarkers = oplogTruncateMarkers->numMarkers_forTest(); ASSERT_EQ(numMarkers, 0U); // A forced scan over the RecordStore should force the 'currentBytes' to be accurate in the // truncate markers as well as the RecordStore's 'numRecords' and 'dataSize'. - ASSERT_EQ(oplogTruncateMarkers->currentBytes(), realNumRecords * realSizePerRecord); + ASSERT_EQ(oplogTruncateMarkers->currentBytes_forTest(), realNumRecords * realSizePerRecord); ASSERT_EQ(wtrs->dataSize(opCtx.get()), realNumRecords * realSizePerRecord); ASSERT_EQ(wtrs->numRecords(opCtx.get()), realNumRecords); } @@ -975,6 +1054,8 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_Duplicates) { // Before initializing the RecordStore, which also starts the oplog sampling process, // populate with a few records. ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(1, 0), 100), RecordId(1, 0)); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(2, 0), 100), RecordId(2, 0)); ASSERT_EQ(insertBSONWithSize(opCtx.get(), rs.get(), Timestamp(3, 0), 100), RecordId(3, 0)); @@ -998,16 +1079,18 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_Duplicates) { // Confirm that sampling occurred and that some truncate markers were generated. ASSERT(oplogTruncateMarkers->processedBySampling()); - auto truncateMarkersBefore = oplogTruncateMarkers->numMarkers(); + auto truncateMarkersBefore = oplogTruncateMarkers->numMarkers_forTest(); ASSERT_GT(truncateMarkersBefore, 0U); - ASSERT_GT(oplogTruncateMarkers->currentBytes(), 0); + ASSERT_GT(oplogTruncateMarkers->currentBytes_forTest(), 0); { // Reclaiming should do nothing because the data size is still under the maximum. ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + wtHarnessHelper->advanceStableTimestamp(Timestamp(4, 0)); wtrs->reclaimOplog(opCtx.get()); - ASSERT_EQ(truncateMarkersBefore, oplogTruncateMarkers->numMarkers()); + ASSERT_EQ(truncateMarkersBefore, oplogTruncateMarkers->numMarkers_forTest()); // Reduce the oplog size to ensure we create a truncate marker and truncate on the next // insert. @@ -1021,7 +1104,7 @@ TEST(WiredTigerRecordStoreTest, OplogTruncateMarkers_Duplicates) { // Ensure every truncate marker has been cleaned up except for the last one ending in 6. wtHarnessHelper->advanceStableTimestamp(Timestamp(6, 0)); wtrs->reclaimOplog(opCtx.get()); - ASSERT_EQ(1, oplogTruncateMarkers->numMarkers()); + ASSERT_EQ(1, oplogTruncateMarkers->numMarkers_forTest()); // The original oplog should have rolled over and the size and count should be accurate. ASSERT_EQ(1, wtrs->numRecords(opCtx.get())); @@ -1033,7 +1116,7 @@ void testTruncateRange(int64_t numRecordsToInsert, int64_t deletionPosBegin, int64_t deletionPosEnd) { auto harnessHelper = newRecordStoreHarnessHelper(); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); auto wtrs = checked_cast(rs.get()); @@ -1083,106 +1166,83 @@ void testTruncateRange(int64_t numRecordsToInsert, } ASSERT_EQ(expectedRemainingRecordIds, actualRemainingRecordIds); } - TEST(WiredTigerRecordStoreTest, RangeTruncateTest) { testTruncateRange(100, 3, 50); } - TEST(WiredTigerRecordStoreTest, RangeTruncateSameValueTest) { testTruncateRange(100, 3, 3); } - DEATH_TEST(WiredTigerRecordStoreTest, RangeTruncateIncorrectOrderTest, "Start position cannot be after end position") { testTruncateRange(100, 4, 3); } -TEST(WiredTigerRecordStoreTest, RangeTruncateAllTest) { - unique_ptr harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); - - auto wtrs = checked_cast(rs.get()); - - auto opCtx = harnessHelper->newOperationContext(); - - static constexpr auto kNumRecordsToInsert = 100; - for (int i = 0; i < kNumRecordsToInsert; i++) { - auto recordId = insertBSONWithSize(opCtx.get(), wtrs, Timestamp(1, 0), 100); - ASSERT_OK(recordId); - } - - auto sizePerRecord = wtrs->dataSize(opCtx.get()) / wtrs->numRecords(opCtx.get()); - - { - WriteUnitOfWork wuow(opCtx.get()); - ASSERT_OK(wtrs->rangeTruncate(opCtx.get(), - RecordId(), - RecordId(), - -(sizePerRecord * kNumRecordsToInsert), - -kNumRecordsToInsert)); - ASSERT_EQ(wtrs->dataSize(opCtx.get()), 0); - ASSERT_EQ(wtrs->numRecords(opCtx.get()), 0); - wuow.commit(); - } - - auto cursor = wtrs->getCursor(opCtx.get(), true); - ASSERT_FALSE(cursor->next()); -} - TEST(WiredTigerRecordStoreTest, GetLatestOplogTest) { - unique_ptr harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newOplogRecordStore()); + std::unique_ptr harnessHelper(newRecordStoreHarnessHelper()); + std::unique_ptr rs(harnessHelper->newOplogRecordStore()); auto wtrs = checked_cast(rs.get()); // 1) Initialize the top of oplog to "1". ServiceContext::UniqueOperationContext op1(harnessHelper->newOperationContext()); - op1->recoveryUnit()->beginUnitOfWork(op1->readOnly()); - Timestamp tsOne = Timestamp( - static_cast(_oplogOrderInsertOplog(op1.get(), rs, 1).getLong())); - op1->recoveryUnit()->commitUnitOfWork(); + Lock::GlobalLock gl1(op1.get(), MODE_IX); + + Timestamp tsOne = [&] { + WriteUnitOfWork op1WUOW(op1.get()); + Timestamp tsOne = Timestamp( + static_cast(oplogOrderInsertOplog(op1.get(), rs, 1).getLong())); + op1WUOW.commit(); + return tsOne; + }(); // Asserting on a recovery unit without a snapshot. ASSERT_EQ(tsOne, wtrs->getLatestOplogTimestamp(op1.get())); // 2) Open a hole at time "2". - op1->recoveryUnit()->beginUnitOfWork(op1->readOnly()); + boost::optional op1WUOW(op1.get()); // Don't save the return value because the compiler complains about unused variables. - _oplogOrderInsertOplog(op1.get(), rs, 2); + oplogOrderInsertOplog(op1.get(), rs, 2); // Store the client with an uncommitted transaction. Create a new, concurrent client. auto client1 = Client::releaseCurrent(); Client::initThread("client2"); ServiceContext::UniqueOperationContext op2(harnessHelper->newOperationContext()); + boost::optional gl2; + gl2.emplace(op2.get(), MODE_IX); // Should not see uncommited write from op1. ASSERT_EQ(tsOne, wtrs->getLatestOplogTimestamp(op2.get())); - op2->recoveryUnit()->beginUnitOfWork(op2->readOnly()); - Timestamp tsThree = Timestamp( - static_cast(_oplogOrderInsertOplog(op2.get(), rs, 3).getLong())); - op2->recoveryUnit()->commitUnitOfWork(); + Timestamp tsThree = [&] { + WriteUnitOfWork op2WUOW(op2.get()); + Timestamp tsThree = Timestamp( + static_cast(oplogOrderInsertOplog(op2.get(), rs, 3).getLong())); + op2WUOW.commit(); + return tsThree; + }(); // After committing, three is the top of oplog. ASSERT_EQ(tsThree, wtrs->getLatestOplogTimestamp(op2.get())); // Switch to client 1. + gl2.reset(); op2.reset(); auto client2 = Client::releaseCurrent(); Client::setCurrent(std::move(client1)); - op1->recoveryUnit()->commitUnitOfWork(); + op1WUOW->commit(); // Committing the write at timestamp "2" does not change the top of oplog result. A new query // with client 1 will see timestamp "3". ASSERT_EQ(tsThree, wtrs->getLatestOplogTimestamp(op1.get())); } TEST(WiredTigerRecordStoreTest, CursorInActiveTxnAfterNext) { - unique_ptr harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr harnessHelper(newRecordStoreHarnessHelper()); + std::unique_ptr rs(harnessHelper->newRecordStore()); RecordId rid1; { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); WriteUnitOfWork uow(opCtx.get()); StatusWith res = rs->insertRecord(opCtx.get(), "a", 2, Timestamp()); @@ -1198,6 +1258,8 @@ TEST(WiredTigerRecordStoreTest, CursorInActiveTxnAfterNext) { // Cursors should always ensure they are in an active transaction when next() is called. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto ru = WiredTigerRecoveryUnit::get(opCtx.get()); auto cursor = rs->getCursor(opCtx.get()); @@ -1217,12 +1279,13 @@ TEST(WiredTigerRecordStoreTest, CursorInActiveTxnAfterNext) { } TEST(WiredTigerRecordStoreTest, CursorInActiveTxnAfterSeek) { - unique_ptr harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr harnessHelper(newRecordStoreHarnessHelper()); + std::unique_ptr rs(harnessHelper->newRecordStore()); RecordId rid1; { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); WriteUnitOfWork uow(opCtx.get()); StatusWith res = rs->insertRecord(opCtx.get(), "a", 2, Timestamp()); @@ -1238,6 +1301,8 @@ TEST(WiredTigerRecordStoreTest, CursorInActiveTxnAfterSeek) { // Cursors should always ensure they are in an active transaction when seekExact() is called. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto ru = WiredTigerRecoveryUnit::get(opCtx.get()); auto cursor = rs->getCursor(opCtx.get()); @@ -1260,10 +1325,10 @@ TEST(WiredTigerRecordStoreTest, CursorInActiveTxnAfterSeek) { // This test case complements StorageEngineTest:TemporaryRecordStoreClustered which verifies // clustered temporary record stores. TEST(WiredTigerRecordStoreTest, ClusteredRecordStore) { - const unique_ptr harnessHelper(newRecordStoreHarnessHelper()); + const std::unique_ptr harnessHelper(newRecordStoreHarnessHelper()); const ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); - ASSERT(opCtx.get()); const std::string ns = "testRecordStore"; const NamespaceString nss = NamespaceString::createNamespaceString_forTest(ns); const std::string uri = WiredTigerKVEngine::kTableUriPrefix + ns; @@ -1336,11 +1401,13 @@ TEST(WiredTigerRecordStoreTest, ClusteredRecordStore) { // transaction deletes the same rows before we have a chance of patching up the metadata. TEST(WiredTigerRecordStoreTest, SizeInfoAccurateAfterRollbackWithDelete) { const auto harnessHelper(newRecordStoreHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr rs(harnessHelper->newRecordStore()); RecordId rid; // This record will be deleted by two transactions. ServiceContext::UniqueOperationContext ctx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(ctx.get(), MODE_IX); + { WriteUnitOfWork uow(ctx.get()); rid = rs->insertRecord(ctx.get(), "a", 2, Timestamp()).getValue(); @@ -1361,6 +1428,7 @@ TEST(WiredTigerRecordStoreTest, SizeInfoAccurateAfterRollbackWithDelete) { stdx::thread abortedThread([&harnessHelper, &rs, &rid, aborted, deleted]() { auto client = harnessHelper->serviceContext()->makeClient("c1"); auto ctx = harnessHelper->newOperationContext(client.get()); + Lock::GlobalLock globalLock(ctx.get(), MODE_IX); WriteUnitOfWork txn(ctx.get()); // Registered changes are executed in reverse order. rs->deleteRecord(ctx.get(), rid); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp index 742c43b626217..52004e1df00a9 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.cpp @@ -28,7 +28,33 @@ */ #include "mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.h" + +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status_with.h" +#include "mongo/db/client.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_util.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" namespace mongo { @@ -42,7 +68,6 @@ std::string _testLoggingSettings(std::string extraStrings) { WiredTigerHarnessHelper::WiredTigerHarnessHelper(Options options, StringData extraStrings) : _dbpath("wt_test"), - _lockerNoopClientObserverRegisterer(getServiceContext()), _engine(Client::getCurrent()->makeOperationContext().get(), kWiredTigerEngineName, _dbpath.path(), @@ -67,16 +92,16 @@ std::unique_ptr WiredTigerHarnessHelper::newRecordStore( WiredTigerRecoveryUnit* ru = checked_cast(opCtx->recoveryUnit()); std::string uri = WiredTigerKVEngine::kTableUriPrefix + ns; StringData ident = ns; - NamespaceString nss(ns); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(ns); StatusWith result = WiredTigerRecordStore::generateCreateString( kWiredTigerEngineName, - NamespaceString(ns), + NamespaceString::createNamespaceString_forTest(ns), ident, collOptions, "", keyFormat, - WiredTigerUtil::useTableLogging(NamespaceString(ns))); + WiredTigerUtil::useTableLogging(NamespaceString::createNamespaceString_forTest(ns))); ASSERT_TRUE(result.isOK()); std::string config = result.getValue(); @@ -116,7 +141,7 @@ std::unique_ptr WiredTigerHarnessHelper::newOplogRecordStore() { std::unique_ptr WiredTigerHarnessHelper::newOplogRecordStoreNoInit() { ServiceContext::UniqueOperationContext opCtx(newOperationContext()); WiredTigerRecoveryUnit* ru = checked_cast(opCtx->recoveryUnit()); - std::string ident = NamespaceString::kRsOplogNamespace.ns(); + std::string ident = NamespaceString::kRsOplogNamespace.ns().toString(); std::string uri = WiredTigerKVEngine::kTableUriPrefix + ident; CollectionOptions options; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.h b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.h index 8a9ef19b16e8d..194a30bb5bc99 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.h @@ -27,11 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" +#include "mongo/base/string_data.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/record_store_test_harness.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" +#include "mongo/platform/basic.h" #include "mongo/unittest/temp_dir.h" #include "mongo/util/clock_source_mock.h" @@ -80,11 +89,6 @@ class WiredTigerHarnessHelper final : public RecordStoreHarnessHelper { private: unittest::TempDir _dbpath; ClockSourceMock _cs; - - // Since WTKVEngine starts threads that require the global service context, we load - // the client observer for LockerNoop before creating the storage engine to avoid a - // potential data race (that might be reported by a tool like TSAN). - LockerNoopClientObserverRegisterer _lockerNoopClientObserverRegisterer; WiredTigerKVEngine _engine; }; } // namespace mongo diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp index 8ff328371c40c..0a8cc303948fb 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.cpp @@ -28,28 +28,48 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include -#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/parse_number.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/server_options.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h" #include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_prepare_conflict.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/db/storage/wiredtiger/wiredtiger_stats.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/log_truncation.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" #include "mongo/util/hex.h" #include "mongo/util/stacktrace.h" +#include "mongo/util/str.h" #include "mongo/util/testing_proctor.h" -#include -#include -#include - #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -221,14 +241,14 @@ void WiredTigerRecoveryUnit::setTxnModified() { } boost::optional WiredTigerRecoveryUnit::getOplogVisibilityTs() { - if (!_isOplogReader) { - return boost::none; - } - getSession(); return _oplogVisibleTs; } +void WiredTigerRecoveryUnit::setOplogVisibilityTs(boost::optional oplogVisibleTs) { + _oplogVisibleTs = oplogVisibleTs; +} + WiredTigerSession* WiredTigerRecoveryUnit::getSession() { if (!_isActive()) { _txnOpen(); @@ -257,12 +277,6 @@ void WiredTigerRecoveryUnit::preallocateSnapshot() { getSession(); } -void WiredTigerRecoveryUnit::preallocateSnapshotForOplogRead() { - // Indicate that we are an oplog reader before opening the snapshot - setIsOplogReader(); - preallocateSnapshot(); -} - void WiredTigerRecoveryUnit::_txnClose(bool commit) { invariant(_isActive(), toString(_getState())); @@ -389,9 +403,7 @@ void WiredTigerRecoveryUnit::_txnClose(bool commit) { _multiTimestampConstraintTracker = MultiTimestampConstraintTracker(); _prepareTimestamp = Timestamp(); _durableTimestamp = Timestamp(); - _catalogConflictTimestamp = Timestamp(); _roundUpPreparedTimestamps = RoundUpPreparedTimestamps::kNoRound; - _isOplogReader = false; _oplogVisibleTs = boost::none; _orderedCommit = true; // Default value is true; we assume all writes are ordered. if (_untimestampedWriteAssertionLevel != @@ -448,7 +460,7 @@ boost::optional WiredTigerRecoveryUnit::getPointInTimeReadTimestamp( } // Ensure a transaction is opened. Storage engine operations require the global lock. - invariant(opCtx->lockState()->isNoop() || opCtx->lockState()->isLocked()); + invariant(opCtx->lockState()->isLocked()); getSession(); switch (_timestampReadSource) { @@ -490,9 +502,7 @@ void WiredTigerRecoveryUnit::_txnOpen() { switch (_timestampReadSource) { case ReadSource::kNoTimestamp: { - if (_isOplogReader) { - _oplogVisibleTs = static_cast(_oplogManager->getOplogReadTimestamp()); - } + _oplogVisibleTs = static_cast(_oplogManager->getOplogReadTimestamp()); WiredTigerBeginTxnBlock(session, _prepareConflictBehavior, _roundUpPreparedTimestamps, @@ -913,25 +923,6 @@ std::unique_ptr WiredTigerRecoveryUnit::computeOperationStatistics return operationStats; } -void WiredTigerRecoveryUnit::setCatalogConflictingTimestamp(Timestamp timestamp) { - // This cannot be called after a storage snapshot is allocated. - invariant(!_isActive(), toString(_getState())); - invariant(_timestampReadSource == ReadSource::kNoTimestamp, - str::stream() << "Illegal to set catalog conflicting timestamp for a read source " - << static_cast(_timestampReadSource)); - invariant(_catalogConflictTimestamp.isNull(), - str::stream() << "Trying to set catalog conflicting timestamp to " - << timestamp.toString() << ". It's already set to " - << _catalogConflictTimestamp.toString()); - invariant(!timestamp.isNull()); - - _catalogConflictTimestamp = timestamp; -} - -Timestamp WiredTigerRecoveryUnit::getCatalogConflictingTimestamp() const { - return _catalogConflictTimestamp; -} - bool WiredTigerRecoveryUnit::gatherWriteContextForDebugging() const { return _gatherWriteContextForDebugging; } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h index 4b8541e4d52ff..f403575074875 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h @@ -29,24 +29,35 @@ #pragma once -#include - +#include +#include #include +#include +#include #include #include #include #include +#include #include "mongo/base/checked_cast.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/timestamp.h" #include "mongo/db/operation_context.h" #include "mongo/db/record_id.h" #include "mongo/db/repl/read_concern_level.h" #include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_stats.h" #include "mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_stats.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/timer.h" + namespace mongo { using RoundUpPreparedTimestamps = WiredTigerBeginTxnBlock::RoundUpPreparedTimestamps; @@ -77,7 +88,6 @@ class WiredTigerRecoveryUnit final : public RecoveryUnit { bool waitUntilUnjournaledWritesDurable(OperationContext* opCtx, bool stableCheckpoint) override; void preallocateSnapshot() override; - void preallocateSnapshotForOplogRead() override; Status majorityCommittedSnapshotAvailable() const override; @@ -109,10 +119,6 @@ class WiredTigerRecoveryUnit final : public RecoveryUnit { void setRoundUpPreparedTimestamps(bool value) override; - void setCatalogConflictingTimestamp(Timestamp timestamp) override; - - Timestamp getCatalogConflictingTimestamp() const override; - void allowOneUntimestampedWrite() override { invariant(!_isActive()); _untimestampedWriteAssertionLevel = @@ -159,13 +165,6 @@ class WiredTigerRecoveryUnit final : public RecoveryUnit { // ---- WT STUFF WiredTigerSession* getSession(); - void setIsOplogReader() { - _isOplogReader = true; - } - - bool getIsOplogReader() const { - return _isOplogReader; - } /** * Enter a period of wait or computation during which there are no WT calls. @@ -195,6 +194,7 @@ class WiredTigerRecoveryUnit final : public RecoveryUnit { void setTxnModified(); boost::optional getOplogVisibilityTs() override; + void setOplogVisibilityTs(boost::optional oplogVisibilityTs) override; static WiredTigerRecoveryUnit* get(OperationContext* opCtx) { return checked_cast(opCtx->recoveryUnit()); @@ -285,11 +285,12 @@ class WiredTigerRecoveryUnit final : public RecoveryUnit { Timestamp _prepareTimestamp; boost::optional _lastTimestampSet; Timestamp _readAtTimestamp; - Timestamp _catalogConflictTimestamp; UntimestampedWriteAssertionLevel _untimestampedWriteAssertionLevel = UntimestampedWriteAssertionLevel::kEnforce; std::unique_ptr _timer; - bool _isOplogReader = false; + // The guaranteed 'no holes' point in the oplog. Forward cursor oplog reads can only read up to + // this timestamp if they want to avoid missing any entries in the oplog that may not yet have + // committed ('holes'). @see WiredTigerOplogManager::getOplogReadTimestamp boost::optional _oplogVisibleTs = boost::none; bool _gatherWriteContextForDebugging = false; std::vector _writeContextForDebugging; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp index 8a576ee5e3fcf..c1d75f1e261dd 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_recovery_unit_test.cpp @@ -29,22 +29,51 @@ #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include +#include +#include +#include + +#include +#include +#include +#include + #include "mongo/base/checked_cast.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/global_settings.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/recovery_unit_test_harness.h" +#include "mongo/db/storage/snapshot_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_cursor_helpers.h" #include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" #include "mongo/util/assert_util.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" namespace mongo { namespace { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp index c5e0e9b220b82..67a57fc3197b4 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.cpp @@ -28,20 +28,31 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/storage/wiredtiger/wiredtiger_server_status.h" +#include +#include #include "mongo/base/checked_cast.h" +#include "mongo/base/status.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_server_status.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kFTDC diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h index 1203a311fb046..ed4788870f499 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_server_status.h @@ -29,7 +29,10 @@ #pragma once +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" namespace mongo { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp index 2946b3572ba61..2d2e850499c00 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.cpp @@ -28,23 +28,31 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" - +#include +#include #include +#include + +#include +#include +#include +#include #include "mongo/base/error_codes.h" -#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/base/status.h" #include "mongo/db/global_settings.h" #include "mongo/db/repl/repl_settings.h" #include "mongo/db/storage/journal_listener.h" #include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" #include "mongo/db/storage/wiredtiger/wiredtiger_parameters_gen.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" #include "mongo/logv2/log.h" -#include "mongo/stdx/thread.h" -#include "mongo/util/scopeguard.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -356,6 +364,13 @@ void WiredTigerSessionCache::waitUntilDurable(OperationContext* opCtx, if (token) { journalListener->onDurable(token.value()); } + + // The session is reset periodically so that WT doesn't consider it a rogue session and log + // about it. The session doesn't actually pin any resources that need to be released. + if (_timeSinceLastDurabilitySessionReset.millis() > (5 * 60 * 1000 /* 5 minutes */)) { + _waitUntilDurableSession->reset(_waitUntilDurableSession); + _timeSinceLastDurabilitySessionReset.reset(); + } } void WiredTigerSessionCache::waitUntilPreparedUnitOfWorkCommitsOrAborts(OperationContext* opCtx, diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h index 98b5e01ed2e32..380577283f9f6 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache.h @@ -29,16 +29,24 @@ #pragma once +#include +#include #include +#include #include - +#include #include +#include "mongo/db/operation_context.h" #include "mongo/db/storage/journal_listener.h" #include "mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/clock_source.h" #include "mongo/util/concurrency/spin_lock.h" +#include "mongo/util/time_support.h" +#include "mongo/util/timer.h" namespace mongo { @@ -401,6 +409,8 @@ class WiredTigerSessionCache { WT_SESSION* _waitUntilDurableSession = nullptr; // owned, and never explicitly closed // (uses connection close to clean up) + // Tracks the time since the last _waitUntilDurableSession reset(). + Timer _timeSinceLastDurabilitySessionReset; /** * Returns a session to the cache for later reuse. If closeAll was called between getting this diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache_test.cpp index de95fcce0e7b3..d8ce35323a695 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_cache_test.cpp @@ -27,17 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include #include +#include + #include "mongo/base/string_data.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_cursor.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" #include "mongo/util/system_clock_source.h" namespace mongo { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_session_data.h b/src/mongo/db/storage/wiredtiger/wiredtiger_session_data.h new file mode 100644 index 0000000000000..42ce6277d1ca1 --- /dev/null +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_session_data.h @@ -0,0 +1,66 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include + +#include "mongo/db/operation_context.h" + +namespace mongo { + +void failPointPauseBeforeStorageCompactCommand(); + +/** + * Sets up a WT_SESSION to have callback data with which to check for MDB layer interrupts. + * See the 'general_handle' callback defined for wiredtiger_open() for more details. + */ +class SessionDataRAII { +public: + /** + * Allows WT operations running on this 'session' access to the MDB layer 'opCtx'. + */ + SessionDataRAII(WT_SESSION* session, OperationContext* opCtx) : _session(session) { + invariant(!_session->app_private); + _session->app_private = opCtx; + } + + /** + * Clears on exit the WT_SESSION::app_private void*. This allows the WT_SESSION to be safely + * returned to the WiredTigerSessionCache. + */ + ~SessionDataRAII() { + _session->app_private = nullptr; + } + +private: + WT_SESSION* _session; +}; + +} // namespace mongo diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp index e4b2e3935e21c..2b68a7a5f5b3e 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.cpp @@ -28,22 +28,33 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_cursor.h" #include "mongo/db/storage/wiredtiger/wiredtiger_customization_hooks.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/db/storage/wiredtiger/wiredtiger_size_storer.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/duration.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h index 077fbfe75e4d7..385e63a0f7809 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_size_storer.h @@ -29,14 +29,18 @@ #pragma once +#include +#include +#include #include - #include #include "mongo/base/string_data.h" +#include "mongo/db/operation_context.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp index 27769702efc5a..b527e6092ba01 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.cpp @@ -28,15 +28,24 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include -#include "mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h" +#include +#include "mongo/base/error_codes.h" #include "mongo/db/server_options.h" #include "mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_util.h" -#include "mongo/logv2/log.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h index 99daa480d34e6..37bf72b2ff960 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_snapshot_manager.h @@ -29,10 +29,13 @@ #pragma once +#include #include +#include #include #include "mongo/bson/timestamp.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/snapshot_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_begin_transaction_block.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp index e90f7ba05eec1..4bde501f79181 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_index_test.cpp @@ -27,33 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/catalog/collection_mock.h" -#include "mongo/db/catalog/index_catalog_entry.h" -#include "mongo/db/concurrency/locker_noop_client_observer.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/json.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/sorted_data_interface.h" #include "mongo/db/storage/sorted_data_interface_test_harness.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_index.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" -#include "mongo/db/storage/wiredtiger/wiredtiger_util.h" -#include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/system_clock_source.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { -using std::string; - TEST(WiredTigerStandardIndexText, CursorInActiveTxnAfterNext) { auto harnessHelper = newSortedDataInterfaceHarnessHelper(); bool unique = false; @@ -63,6 +61,7 @@ TEST(WiredTigerStandardIndexText, CursorInActiveTxnAfterNext) { // Populate data. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); WriteUnitOfWork uow(opCtx.get()); auto ks = makeKeyString(sdi.get(), BSON("" << 1), RecordId(1)); @@ -79,6 +78,8 @@ TEST(WiredTigerStandardIndexText, CursorInActiveTxnAfterNext) { // Cursors should always ensure they are in an active transaction when next() is called. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto ru = WiredTigerRecoveryUnit::get(opCtx.get()); auto cursor = sdi->newCursor(opCtx.get()); @@ -108,6 +109,7 @@ TEST(WiredTigerStandardIndexText, CursorInActiveTxnAfterSeek) { // Populate data. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); WriteUnitOfWork uow(opCtx.get()); auto ks = makeKeyString(sdi.get(), BSON("" << 1), RecordId(1)); @@ -124,6 +126,8 @@ TEST(WiredTigerStandardIndexText, CursorInActiveTxnAfterSeek) { // Cursors should always ensure they are in an active transaction when seek() is called. { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + auto ru = WiredTigerRecoveryUnit::get(opCtx.get()); auto cursor = sdi->newCursor(opCtx.get()); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp index e29ed5b634c42..a293c029dda1c 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_standard_record_store_test.cpp @@ -27,50 +27,61 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include #include -#include #include +#include #include "mongo/base/checked_cast.h" -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/json.h" +#include "mongo/bson/mutable/damage_vector.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_record_store.h" #include "mongo/db/storage/wiredtiger/wiredtiger_record_store_test_harness.h" #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/db/storage/wiredtiger/wiredtiger_size_storer.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/fail_point.h" -#include "mongo/util/scopeguard.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { -using std::string; -using std::stringstream; -using std::unique_ptr; - namespace { + TEST(WiredTigerRecordStoreTest, StorageSizeStatisticsDisabled) { WiredTigerHarnessHelper harnessHelper("statistics=(none)"); - unique_ptr rs(harnessHelper.newRecordStore("a.b")); + std::unique_ptr rs(harnessHelper.newRecordStore("a.b")); ServiceContext::UniqueOperationContext opCtx(harnessHelper.newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_THROWS(rs->storageSize(opCtx.get()), AssertionException); } TEST(WiredTigerRecordStoreTest, SizeStorer1) { - unique_ptr harnessHelper(new WiredTigerHarnessHelper()); - unique_ptr rs(harnessHelper->newRecordStore()); + std::unique_ptr harnessHelper(new WiredTigerHarnessHelper()); + std::unique_ptr rs(harnessHelper->newRecordStore()); - string ident = rs->getIdent(); - string uri = checked_cast(rs.get())->getURI(); + std::string ident = rs->getIdent(); + std::string uri = checked_cast(rs.get())->getURI(); - string indexUri = WiredTigerKVEngine::kTableUriPrefix + "myindex"; + std::string indexUri = WiredTigerKVEngine::kTableUriPrefix + "myindex"; WiredTigerSizeStorer ss(harnessHelper->conn(), indexUri); checked_cast(rs.get())->setSizeStorer(&ss); @@ -78,6 +89,7 @@ TEST(WiredTigerRecordStoreTest, SizeStorer1) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); { WriteUnitOfWork uow(opCtx.get()); for (int i = 0; i < N; i++) { @@ -90,6 +102,7 @@ TEST(WiredTigerRecordStoreTest, SizeStorer1) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(N, rs->numRecords(opCtx.get())); } @@ -103,6 +116,8 @@ TEST(WiredTigerRecordStoreTest, SizeStorer1) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + WiredTigerRecordStore::Params params; params.nss = NamespaceString::createNamespaceString_forTest("a.b"); params.ident = ident; @@ -123,11 +138,14 @@ TEST(WiredTigerRecordStoreTest, SizeStorer1) { { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_S); ASSERT_EQUALS(N, rs->numRecords(opCtx.get())); } { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + WiredTigerRecoveryUnit* ru = checked_cast(opCtx->recoveryUnit()); { @@ -196,6 +214,7 @@ TEST_F(SizeStorerUpdateTest, Basic) { TEST_F(SizeStorerUpdateTest, DataSizeModification) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); RecordId recordId; { @@ -252,6 +271,8 @@ TEST_F(SizeStorerUpdateTest, DataSizeModification) { // properly flushed to disk. TEST_F(SizeStorerUpdateTest, ReloadAfterRollbackAndFlush) { ServiceContext::UniqueOperationContext opCtx(harnessHelper->newOperationContext()); + Lock::GlobalLock globalLock(opCtx.get(), MODE_X); + // Do an op for which the sizeInfo is persisted, for safety so we don't check against 0. { WriteUnitOfWork uow(opCtx.get()); diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_stats.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_stats.cpp index cf0faef03cc0b..ed70b4acaaa84 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_stats.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_stats.cpp @@ -29,8 +29,21 @@ #include "mongo/db/storage/wiredtiger/wiredtiger_stats.h" +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_stats.h b/src/mongo/db/storage/wiredtiger/wiredtiger_stats.h index d35a582cd3462..2f68cba080a99 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_stats.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_stats.h @@ -29,8 +29,11 @@ #pragma once +#include +#include #include +#include "mongo/bson/bsonobj.h" #include "mongo/db/storage/storage_stats.h" namespace mongo { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_stats_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_stats_test.cpp index f29f360772c4c..aaff7a56c9a2d 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_stats_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_stats_test.cpp @@ -28,10 +28,24 @@ */ #include "mongo/db/storage/wiredtiger/wiredtiger_stats.h" + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" -#include namespace mongo { namespace { @@ -168,7 +182,7 @@ TEST_F(WiredTigerStatsTest, EmptySession) { // Read and write statistics should be empty. Check "data" field does not exist. "wait" fields // such as the schemaLock might have some value. auto statsBson = WiredTigerStats{_session}.toBSON(); - ASSERT_FALSE(statsBson.hasField("data")); + ASSERT_FALSE(statsBson.hasField("data")) << statsBson; } TEST_F(WiredTigerStatsTest, SessionWithWrite) { diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp index af633d7d38a72..a4ce178ba9abc 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.cpp @@ -30,21 +30,62 @@ #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" -#include -#include - +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/simple_string_data_comparator.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/concurrency/exception_util.h" #include "mongo/db/concurrency/exception_util_gen.h" #include "mongo/db/global_settings.h" -#include "mongo/db/server_options_general_gen.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/snapshot_window_options_gen.h" +#include "mongo/db/storage/storage_options.h" #include "mongo/db/storage/wiredtiger/wiredtiger_kv_engine.h" #include "mongo/db/storage/wiredtiger/wiredtiger_parameters_gen.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" +#include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_component_settings.h" +#include "mongo/logv2/log_manager.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/pcre.h" #include "mongo/util/processinfo.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/static_immortal.h" +#include "mongo/util/str.h" #include "mongo/util/testing_proctor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kWiredTiger @@ -191,13 +232,13 @@ Status wtRCToStatus_slow(int retCode, WT_SESSION* session, StringData prefix) { if (reasonWasCachePressure) { if (txnTooLargeEnabled && isCacheInsufficientForTransaction(session, cacheThreshold)) { - auto s = generateContextStrStream(WT_TXN_ROLLBACK_REASON_TOO_LARGE_FOR_CACHE); - throwTransactionTooLargeForCache(s); + throwTransactionTooLargeForCache( + generateContextStrStream(WT_TXN_ROLLBACK_REASON_TOO_LARGE_FOR_CACHE)); } if (temporarilyUnavailableEnabled) { - auto s = generateContextStrStream(WT_TXN_ROLLBACK_REASON_OLDEST_FOR_EVICTION); - throwTemporarilyUnavailableException(s); + throwTemporarilyUnavailableException( + generateContextStrStream(WT_TXN_ROLLBACK_REASON_OLDEST_FOR_EVICTION)); } } @@ -808,11 +849,41 @@ int mdb_handle_progress(WT_EVENT_HANDLER* handler, return 0; } +/** + * Defines a callback function that can be passed via a WT_EVENT_HANDLER* + * (WT_EVENT_HANDLER::handle_general) into WT::wiredtiger_open() call. + * + * The void* WT_SESSION::app_private is leveraged to inject MDB state into the WT code layer. + * Long running WT::compact operations will periodically use this callback function to check whether + * or not to quit early and fail the WT::compact operation. + */ +int mdb_handle_general(WT_EVENT_HANDLER* handler, + WT_CONNECTION* wt_conn, + WT_SESSION* session, + WT_EVENT_TYPE type, + void* arg) { + if (type != WT_EVENT_COMPACT_CHECK) { + return 0; + } + + OperationContext* opCtx = reinterpret_cast(session->app_private); + invariant(opCtx); + + Status status = opCtx->checkForInterruptNoAssert(); + if (!status.isOK()) { + // Returning non-zero indicates an error to WT. The precise value is irrelevant. + return -1; + } + + return 0; +} + WT_EVENT_HANDLER defaultEventHandlers() { WT_EVENT_HANDLER handlers = {}; handlers.handle_error = mdb_handle_error; handlers.handle_message = mdb_handle_message; handlers.handle_progress = mdb_handle_progress; + handlers.handle_general = mdb_handle_general; return handlers; } } // namespace @@ -825,7 +896,7 @@ WiredTigerEventHandler::WiredTigerEventHandler() { handler->handle_message = mdb_handle_message; handler->handle_progress = mdb_handle_progress; handler->handle_close = nullptr; - handler->handle_general = nullptr; + handler->handle_general = mdb_handle_general; } WT_EVENT_HANDLER* WiredTigerEventHandler::getWtEventHandler() { @@ -939,7 +1010,7 @@ bool WiredTigerUtil::useTableLogging(const NamespaceString& nss) { invariant(nss.size() > 0); // Of the replica set configurations: - if (!nss.isLocal()) { + if (!nss.isLocalDB()) { // All replicated collections are not logged. return false; } diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util.h b/src/mongo/db/storage/wiredtiger/wiredtiger_util.h index d3211d5a11d9b..dbf7fe6a71393 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_util.h +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util.h @@ -29,14 +29,28 @@ #pragma once +#include +#include +#include +#include +#include #include +#include +#include +#include #include #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/import_options.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/storage/durable_catalog.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -44,6 +58,7 @@ namespace mongo { class BSONObjBuilder; class OperationContext; class WiredTigerConfigParser; + class WiredTigerKVEngine; class WiredTigerSession; class WiredTigerSessionCache; diff --git a/src/mongo/db/storage/wiredtiger/wiredtiger_util_test.cpp b/src/mongo/db/storage/wiredtiger/wiredtiger_util_test.cpp index 83851b4ba9b3d..3d283b2157988 100644 --- a/src/mongo/db/storage/wiredtiger/wiredtiger_util_test.cpp +++ b/src/mongo/db/storage/wiredtiger/wiredtiger_util_test.cpp @@ -27,30 +27,39 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - +#include +#include #include #include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" -#include "mongo/db/operation_context_noop.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/wiredtiger/wiredtiger_oplog_manager.h" #include "mongo/db/storage/wiredtiger/wiredtiger_recovery_unit.h" #include "mongo/db/storage/wiredtiger/wiredtiger_session_cache.h" #include "mongo/db/storage/wiredtiger/wiredtiger_util.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/clock_source.h" #include "mongo/util/system_clock_source.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage - namespace mongo { - -using std::string; -using std::stringstream; +namespace { class WiredTigerConnection { public: @@ -61,7 +70,7 @@ class WiredTigerConnection { std::stringstream ss; ss << "create,"; ss << extraStrings; - string config = ss.str(); + std::string config = ss.str(); _fastClockSource = std::make_unique(); int ret = wiredtiger_open(dbpath.toString().c_str(), eventHandler, config.c_str(), &_conn); ASSERT_OK(wtRCToStatus(ret, nullptr)); @@ -86,13 +95,11 @@ class WiredTigerUtilHarnessHelper { public: explicit WiredTigerUtilHarnessHelper(StringData extraStrings, WiredTigerEventHandler* eventHandler = nullptr) - : _dbpath("wt_test"), - _connection(_dbpath.path(), + : _connection(_dbpath.path(), extraStrings, eventHandler == nullptr ? nullptr : eventHandler->getWtEventHandler()), _sessionCache(_connection.getConnection(), _connection.getClockSource()) {} - WiredTigerSessionCache* getSessionCache() { return &_sessionCache; } @@ -101,49 +108,39 @@ class WiredTigerUtilHarnessHelper { return &_oplogManager; } - OperationContext* newOperationContext() { - return new OperationContextNoop( - new WiredTigerRecoveryUnit(getSessionCache(), &_oplogManager)); - } - private: - unittest::TempDir _dbpath; + unittest::TempDir _dbpath{"wt_test"}; WiredTigerConnection _connection; WiredTigerSessionCache _sessionCache; WiredTigerOplogManager _oplogManager; }; -class WiredTigerUtilMetadataTest : public mongo::unittest::Test { -public: - virtual void setUp() { - _harnessHelper.reset(new WiredTigerUtilHarnessHelper("")); - _opCtx.reset(_harnessHelper->newOperationContext()); - } - - virtual void tearDown() { - _opCtx.reset(nullptr); - _harnessHelper.reset(nullptr); +class WiredTigerUtilMetadataTest : public ServiceContextTest { +protected: + WiredTigerUtilMetadataTest() : _harnessHelper(""), _opCtxHolder(makeOperationContext()) { + _opCtxHolder->setRecoveryUnit( + std::make_unique(_harnessHelper.getSessionCache(), + _harnessHelper.getOplogManager()), + WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); } -protected: const char* getURI() const { return "table:mytable"; } OperationContext* getOperationContext() const { - ASSERT(_opCtx.get()); - return _opCtx.get(); + return _opCtxHolder.get(); } void createSession(const char* config) { WT_SESSION* wtSession = - WiredTigerRecoveryUnit::get(_opCtx.get())->getSession()->getSession(); + WiredTigerRecoveryUnit::get(getOperationContext())->getSession()->getSession(); ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, getURI(), config), wtSession)); } private: - std::unique_ptr _harnessHelper; - std::unique_ptr _opCtx; + WiredTigerUtilHarnessHelper _harnessHelper; + ServiceContext::UniqueOperationContext _opCtxHolder; }; TEST_F(WiredTigerUtilMetadataTest, GetMetadataCreateInvalid) { @@ -298,13 +295,16 @@ TEST_F(WiredTigerUtilMetadataTest, CheckApplicationMetadataFormatInvalidURI) { ASSERT_EQUALS(ErrorCodes::FailedToParse, result.code()); } -TEST(WiredTigerUtilTest, GetStatisticsValueMissingTable) { +class WiredTigerUtilTest : public ServiceContextTest {}; + +TEST_F(WiredTigerUtilTest, GetStatisticsValueMissingTable) { WiredTigerUtilHarnessHelper harnessHelper("statistics=(all)"); - WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache(), - harnessHelper.getOplogManager()); - std::unique_ptr opCtx{harnessHelper.newOperationContext()}; - recoveryUnit.setOperationContext(opCtx.get()); - WiredTigerSession* session = recoveryUnit.getSession(); + auto opCtx{makeOperationContext()}; + opCtx->setRecoveryUnit(std::make_unique( + harnessHelper.getSessionCache(), harnessHelper.getOplogManager()), + WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); + WiredTigerSession* session = + checked_cast(opCtx->recoveryUnit())->getSession(); auto result = WiredTigerUtil::getStatisticsValue(session->getSession(), "statistics:table:no_such_table", "statistics=(fast)", @@ -313,13 +313,14 @@ TEST(WiredTigerUtilTest, GetStatisticsValueMissingTable) { ASSERT_EQUALS(ErrorCodes::CursorNotFound, result.getStatus().code()); } -TEST(WiredTigerUtilTest, GetStatisticsValueStatisticsDisabled) { +TEST_F(WiredTigerUtilTest, GetStatisticsValueStatisticsDisabled) { WiredTigerUtilHarnessHelper harnessHelper("statistics=(none)"); - WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache(), - harnessHelper.getOplogManager()); - std::unique_ptr opCtx{harnessHelper.newOperationContext()}; - recoveryUnit.setOperationContext(opCtx.get()); - WiredTigerSession* session = recoveryUnit.getSession(); + auto opCtx{makeOperationContext()}; + opCtx->setRecoveryUnit(std::make_unique( + harnessHelper.getSessionCache(), harnessHelper.getOplogManager()), + WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); + WiredTigerSession* session = + checked_cast(opCtx->recoveryUnit())->getSession(); WT_SESSION* wtSession = session->getSession(); ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", nullptr), wtSession)); auto result = WiredTigerUtil::getStatisticsValue(session->getSession(), @@ -330,13 +331,14 @@ TEST(WiredTigerUtilTest, GetStatisticsValueStatisticsDisabled) { ASSERT_EQUALS(ErrorCodes::CursorNotFound, result.getStatus().code()); } -TEST(WiredTigerUtilTest, GetStatisticsValueInvalidKey) { +TEST_F(WiredTigerUtilTest, GetStatisticsValueInvalidKey) { WiredTigerUtilHarnessHelper harnessHelper("statistics=(all)"); - WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache(), - harnessHelper.getOplogManager()); - std::unique_ptr opCtx{harnessHelper.newOperationContext()}; - recoveryUnit.setOperationContext(opCtx.get()); - WiredTigerSession* session = recoveryUnit.getSession(); + auto opCtx{makeOperationContext()}; + opCtx->setRecoveryUnit(std::make_unique( + harnessHelper.getSessionCache(), harnessHelper.getOplogManager()), + WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); + WiredTigerSession* session = + checked_cast(opCtx->recoveryUnit())->getSession(); WT_SESSION* wtSession = session->getSession(); ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", nullptr), wtSession)); // Use connection statistics key which does not apply to a table. @@ -348,13 +350,14 @@ TEST(WiredTigerUtilTest, GetStatisticsValueInvalidKey) { ASSERT_EQUALS(ErrorCodes::NoSuchKey, result.getStatus().code()); } -TEST(WiredTigerUtilTest, GetStatisticsValueValidKey) { +TEST_F(WiredTigerUtilTest, GetStatisticsValueValidKey) { WiredTigerUtilHarnessHelper harnessHelper("statistics=(all)"); - WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache(), - harnessHelper.getOplogManager()); - std::unique_ptr opCtx{harnessHelper.newOperationContext()}; - recoveryUnit.setOperationContext(opCtx.get()); - WiredTigerSession* session = recoveryUnit.getSession(); + auto opCtx{makeOperationContext()}; + opCtx->setRecoveryUnit(std::make_unique( + harnessHelper.getSessionCache(), harnessHelper.getOplogManager()), + WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); + WiredTigerSession* session = + checked_cast(opCtx->recoveryUnit())->getSession(); WT_SESSION* wtSession = session->getSession(); ASSERT_OK(wtRCToStatus(wtSession->create(wtSession, "table:mytable", nullptr), wtSession)); // Use connection statistics key which does not apply to a table. @@ -367,7 +370,7 @@ TEST(WiredTigerUtilTest, GetStatisticsValueValidKey) { ASSERT_EQUALS(0U, result.getValue()); } -TEST(WiredTigerUtilTest, ParseAPIMessages) { +TEST_F(WiredTigerUtilTest, ParseAPIMessages) { // Custom event handler. WiredTigerEventHandler eventHandler; @@ -379,11 +382,13 @@ TEST(WiredTigerUtilTest, ParseAPIMessages) { WiredTigerUtilHarnessHelper harnessHelper(connection_cfg.c_str(), &eventHandler); // Create a session. - WiredTigerRecoveryUnit recoveryUnit(harnessHelper.getSessionCache(), - harnessHelper.getOplogManager()); - std::unique_ptr opCtx{harnessHelper.newOperationContext()}; - recoveryUnit.setOperationContext(opCtx.get()); - WT_SESSION* wtSession = recoveryUnit.getSession()->getSession(); + auto opCtx{makeOperationContext()}; + opCtx->setRecoveryUnit(std::make_unique( + harnessHelper.getSessionCache(), harnessHelper.getOplogManager()), + WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); + WiredTigerSession* session = + checked_cast(opCtx->recoveryUnit())->getSession(); + WT_SESSION* wtSession = session->getSession(); // Perform simple WiredTiger operations while capturing the generated logs. startCapturingLogMessages(); @@ -402,7 +407,7 @@ TEST(WiredTigerUtilTest, ParseAPIMessages) { ASSERT_TRUE(foundWTMessage); } -TEST(WiredTigerUtilTest, ParseCompactMessages) { +TEST_F(WiredTigerUtilTest, ParseCompactMessages) { // Custom event handler. WiredTigerEventHandler eventHandler; @@ -439,7 +444,7 @@ TEST(WiredTigerUtilTest, ParseCompactMessages) { ASSERT_TRUE(foundWTMessage); } -TEST(WiredTigerUtilTest, GenerateVerboseConfiguration) { +TEST_F(WiredTigerUtilTest, GenerateVerboseConfiguration) { // Perform each test in their own limited scope in order to establish different // severity levels. @@ -465,7 +470,7 @@ TEST(WiredTigerUtilTest, GenerateVerboseConfiguration) { } } -TEST(WiredTigerUtilTest, RemoveEncryptionFromConfigString) { +TEST_F(WiredTigerUtilTest, RemoveEncryptionFromConfigString) { { // Found at the middle. std::string input{ "debug_mode=(table_logging=true,checkpoint_retention=4),encryption=(name=AES256-CBC," @@ -528,4 +533,6 @@ TEST(WiredTigerUtilTest, RemoveEncryptionFromConfigString) { ASSERT_EQUALS(input, expectedOutput); } } + +} // namespace } // namespace mongo diff --git a/src/mongo/db/storage/write_unit_of_work.cpp b/src/mongo/db/storage/write_unit_of_work.cpp index ea9f65909c973..b61fc3e397271 100644 --- a/src/mongo/db/storage/write_unit_of_work.cpp +++ b/src/mongo/db/storage/write_unit_of_work.cpp @@ -28,12 +28,23 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/storage/write_unit_of_work.h" +#include +#include +#include +#include "mongo/bson/timestamp.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/op_observer/op_observer.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/storage/write_unit_of_work.h b/src/mongo/db/storage/write_unit_of_work.h index 53be2f8d2b335..bdd9e0fb11756 100644 --- a/src/mongo/db/storage/write_unit_of_work.h +++ b/src/mongo/db/storage/write_unit_of_work.h @@ -29,6 +29,7 @@ #pragma once +#include #include namespace mongo { diff --git a/src/mongo/db/system_index.cpp b/src/mongo/db/system_index.cpp index 259d3b9fbf2cc..675995efd0fde 100644 --- a/src/mongo/db/system_index.cpp +++ b/src/mongo/db/system_index.cpp @@ -28,25 +28,42 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/system_index.h" +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/index_spec.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/commit_quorum_options.h" +#include "mongo/db/catalog/index_builds_manager.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/index_key_validate.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_builds_coordinator.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/storage/storage_options.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/system_index.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -95,7 +112,7 @@ void generateSystemIndexForExistingCollection(OperationContext* opCtx, uassert(ErrorCodes::NotWritablePrimary, "Not primary while creating authorization index", replCoord->getReplicationMode() != repl::ReplicationCoordinator::modeReplSet || - replCoord->canAcceptWritesForDatabase(opCtx, ns.db())); + replCoord->canAcceptWritesForDatabase(opCtx, ns.dbName())); invariant(!opCtx->lockState()->inAWriteUnitOfWork()); diff --git a/src/mongo/db/tenant_id.cpp b/src/mongo/db/tenant_id.cpp index fae76f9dff02e..d44c532ff46d5 100644 --- a/src/mongo/db/tenant_id.cpp +++ b/src/mongo/db/tenant_id.cpp @@ -29,7 +29,14 @@ #include "mongo/db/tenant_id.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/oid.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/tenant_id.h b/src/mongo/db/tenant_id.h index 3e387a64fe055..9b95f4f8724e1 100644 --- a/src/mongo/db/tenant_id.h +++ b/src/mongo/db/tenant_id.h @@ -29,6 +29,9 @@ #pragma once +#include +#include +#include #include #include @@ -38,6 +41,8 @@ #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/oid.h" #include "mongo/bson/util/builder.h" +#include "mongo/util/static_immortal.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -95,6 +100,17 @@ class TenantId { return H::combine(std::move(h), tenantId._oid); } + /** + * Hash function compatible with absl::Hash with an absl::unordered_{map,set} keyed with + * boost::optional. + */ + template + friend H AbslHashValue(H h, const boost::optional& tenantId) { + if (tenantId) + h = H::combine(std::move(h), tenantId->_oid); + return H::combine(std::move(h), tenantId.has_value()); + } + /** * Parse tenant id from BSON. The function is used by IDL parsers. */ @@ -107,6 +123,9 @@ class TenantId { void serializeToBSON(BSONArrayBuilder* builder) const; private: + friend class NamespaceString; + friend class DatabaseName; + OID _oid; }; diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_collation_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_collation_node.txt index ac45c72853038..dba4b24dce838 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_collation_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_collation_node.txt @@ -31,7 +31,7 @@ PhysicalScan [{'': scan0}, collName] [4] project [s4 = getField(s1, "c")] [3] project [s3 = getField(s1, "b")] [2] project [s2 = getField(s1, "a")] -[1] scan s1 none none none none none [] @"" true false +[1] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower collation node with two fields ==== -- INPUT: @@ -64,4 +64,4 @@ PhysicalScan [{'': scan0}, collName] [4] project [s4 = getField(s1, "c")] [3] project [s3 = getField(s1, "b")] [2] project [s2 = getField(s1, "a")] -[1] scan s1 none none none none none [] @"" true false +[1] scan s1 none none none none none none none lowPriority [] @"" true false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_exchange_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_exchange_node.txt index a3a558b567baa..b1afaee3ba015 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_exchange_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_exchange_node.txt @@ -21,7 +21,7 @@ PhysicalScan [{'': scan0}, collName] [0] exchange [s2, s3] 1 hash [1] project [s3 = getField(s1, "a")] [1] project [s2 = getField(s1, "a")] -[2] scan s1 none none none none none [] @"" true false +[2] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower exchange node of type Centralized ==== -- INPUT: @@ -44,7 +44,7 @@ PhysicalScan [{'': scan0}, collName] [0] exchange [s2, s3] 1 bcast [1] project [s3 = getField(s1, "a")] [1] project [s2 = getField(s1, "a")] -[2] scan s1 none none none none none [] @"" true false +[2] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower exchange node of type RoundRobin ==== -- INPUT: @@ -67,7 +67,7 @@ PhysicalScan [{'': scan0}, collName] [0] exchange [s2, s3] 1 round [1] project [s3 = getField(s1, "a")] [1] project [s2 = getField(s1, "a")] -[2] scan s1 none none none none none [] @"" true false +[2] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower exchange node of type Replicated ==== -- INPUT: @@ -90,4 +90,4 @@ PhysicalScan [{'': scan0}, collName] [0] exchange [s2, s3] 1 bcast [1] project [s3 = getField(s1, "a")] [1] project [s2 = getField(s1, "a")] -[2] scan s1 none none none none none [] @"" true false +[2] scan s1 none none none none none none none lowPriority [] @"" true false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_filter_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_filter_node.txt index 5387036275901..8a75e32724067 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_filter_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_filter_node.txt @@ -16,7 +16,7 @@ PhysicalScan [{'': scan0}, collName] -- OUTPUT: [1] filter {(((getField(s1, "a") <=> 23) >= 0ll) ?: false)} -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: filter for constant: true ==== -- INPUT: @@ -28,4 +28,4 @@ PhysicalScan [{'': scan0}, collName] -- OUTPUT: [1] cfilter {(true ?: false)} -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_group_by_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_group_by_node.txt index 197ee0d6108c5..338bc5e081010 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_group_by_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_group_by_node.txt @@ -26,7 +26,7 @@ PhysicalScan [{'': scan0}, collName] [3] project [s4 = getField(s1, "c")] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: GroupByNode multiple outputs with type Complete ==== -- INPUT: @@ -62,7 +62,7 @@ PhysicalScan [{'': scan0}, collName] [3] project [s4 = getField(s1, "c")] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: GroupByNode one output with type Local ==== -- INPUT: @@ -90,7 +90,7 @@ PhysicalScan [{'': scan0}, collName] [3] project [s4 = getField(s1, "c")] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: GroupByNode multiple outputs with type Local ==== -- INPUT: @@ -126,7 +126,7 @@ PhysicalScan [{'': scan0}, collName] [3] project [s4 = getField(s1, "c")] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: GroupByNode one output with type Global ==== -- INPUT: @@ -154,7 +154,7 @@ PhysicalScan [{'': scan0}, collName] [3] project [s4 = getField(s1, "c")] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: GroupByNode multiple outputs with type Global ==== -- INPUT: @@ -190,4 +190,4 @@ PhysicalScan [{'': scan0}, collName] [3] project [s4 = getField(s1, "c")] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_hash_join_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_hash_join_node.txt index b24016b971dec..7ce2ba53eb76d 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_hash_join_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_hash_join_node.txt @@ -38,11 +38,11 @@ PhysicalScan [{'': scan0}, collName] left [s5] [s6] [7] project [s6 = getField(s4, "other_info")] [5] project [s5 = getField(s4, "id")] - [4] scan s4 none none none none none [] @"" true false + [4] scan s4 none none none none none none none lowPriority [] @"" true false right [s2] [s3] [3] project [s3 = getField(s1, "info")] [1] project [s2 = getField(s1, "other_id")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Hash join with two equalities ==== @@ -101,12 +101,12 @@ PhysicalScan [{'': scan0}, collName] [11] project [s10 = getField(s7, "another")] [9] project [s9 = getField(s7, "state_id")] [8] project [s8 = getField(s7, "cityField")] - [7] scan s7 none none none none none [] @"" true false + [7] scan s7 none none none none none none none lowPriority [] @"" true false right [s2, s3] [s4, s5, s6] [6] project [s6 = getField(s1, "some_more_info")] [4] project [s5 = getField(s1, "more_info")] [3] project [s4 = getField(s1, "info")] [2] project [s3 = getField(s1, "state")] [1] project [s2 = getField(s1, "city")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_index_scan_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_index_scan_node.txt index d0ed2080b99c3..2786522ff59e8 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_index_scan_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_index_scan_node.txt @@ -5,25 +5,25 @@ IndexScan [{'': rid}, scanDefName: collName, indexDefName: index0, interval: {(Const [23], Const [35]]}] -- OUTPUT: -[0] ixseek ks(2ll, 0, 23L, 2ll) ks(2ll, 0, 35L, 2ll) none s1 none [] @"" @"" true +[0] ixseek ks(1ll, 0, 23L, 2ll) ks(1ll, 0, 35L, 2ll) none s1 none none [] @"" @"" true ==== VARIATION: Covering forward index scan with one field ==== -- INPUT: IndexScan [{' 0': proj0}, scanDefName: collName, indexDefName: index0, interval: {[Const [26], Const [35])}] -- OUTPUT: -[0] ixseek ks(2ll, 0, 26L, 1ll) ks(2ll, 0, 35L, 1ll) none none none [s1 = 0] @"" @"" true +[0] ixseek ks(1ll, 0, 26L, 1ll) ks(1ll, 0, 35L, 1ll) none none none none [s1 = 0] @"" @"" true ==== VARIATION: Basic reverse index scan with RID ==== -- INPUT: IndexScan [{'': rid}, scanDefName: collName, indexDefName: index0, interval: {[Const [27], Const [135])}, reversed] -- OUTPUT: -[0] ixseek ks(2ll, 0, 135L, 1ll) ks(2ll, 0, 27L, 1ll) none s1 none [] @"" @"" false +[0] ixseek ks(1ll, 0, 135L, 1ll) ks(1ll, 0, 27L, 1ll) none s1 none none [] @"" @"" false ==== VARIATION: Covering reverse index scan with one field ==== -- INPUT: IndexScan [{' 0': proj0}, scanDefName: collName, indexDefName: index0, interval: {[Const [29], Const [47]]}, reversed] -- OUTPUT: -[0] ixseek ks(2ll, 0, 47L, 2ll) ks(2ll, 0, 29L, 1ll) none none none [s1 = 0] @"" @"" false +[0] ixseek ks(1ll, 0, 47L, 2ll) ks(1ll, 0, 29L, 1ll) none none none none [s1 = 0] @"" @"" false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_limit_skip_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_limit_skip_node.txt index 3726b09e5c053..349497a34e0d6 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_limit_skip_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_limit_skip_node.txt @@ -7,7 +7,7 @@ PhysicalScan [{'': scan0}, collName] -- OUTPUT: [1] limitskip 5 0 -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower single skip without limit ==== -- INPUT: @@ -16,7 +16,7 @@ PhysicalScan [{'': scan0}, collName] -- OUTPUT: [1] limitskip 0 4 -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower LimitSkip node with values for both limit and skip ==== -- INPUT: @@ -25,4 +25,4 @@ PhysicalScan [{'': scan0}, collName] -- OUTPUT: [1] limitskip 4 2 -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_merge_join_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_merge_join_node.txt index 5cb6cafeb4091..94764f684d110 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_merge_join_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_merge_join_node.txt @@ -22,10 +22,10 @@ PhysicalScan [{'': scan0}, collName] [4] mj [asc] left [s2] [] [1] project [s2 = getField(s1, "other_id")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false right [s4] [] [3] project [s4 = getField(s3, "id")] - [2] scan s3 none none none none none [] @"" true false + [2] scan s3 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower merge join with one projection (collation=Descending) ==== @@ -50,10 +50,10 @@ PhysicalScan [{'': scan0}, collName] [4] mj [desc] left [s2] [] [1] project [s2 = getField(s1, "other_id")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false right [s4] [] [3] project [s4 = getField(s3, "id")] - [2] scan s3 none none none none none [] @"" true false + [2] scan s3 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower merge join with two projections (collation=Ascending, Ascending) ==== @@ -89,11 +89,11 @@ PhysicalScan [{'': scan0}, collName] [asc, asc] left [s2, s3] [] [2] project [s3 = getField(s1, "city")] [1] project [s2 = getField(s1, "other_id")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false right [s5, s6] [] [5] project [s6 = getField(s4, "city")] [4] project [s5 = getField(s4, "id")] - [3] scan s4 none none none none none [] @"" true false + [3] scan s4 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower merge join with two projections (collation=Ascending, Descending) ==== @@ -129,11 +129,11 @@ PhysicalScan [{'': scan0}, collName] [asc, desc] left [s2, s3] [] [2] project [s3 = getField(s1, "city")] [1] project [s2 = getField(s1, "other_id")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false right [s5, s6] [] [5] project [s6 = getField(s4, "city")] [4] project [s5 = getField(s4, "id")] - [3] scan s4 none none none none none [] @"" true false + [3] scan s4 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower merge join with required projection (collation=Ascending) ==== @@ -176,11 +176,11 @@ PhysicalScan [{'': scan0}, collName] [asc] left [s2] [s3] [3] project [s3 = getField(s1, "city")] [1] project [s2 = getField(s1, "other_id")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false right [s5] [s6] [7] project [s6 = getField(s4, "city")] [5] project [s5 = getField(s4, "id")] - [4] scan s4 none none none none none [] @"" true false + [4] scan s4 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower merge join with two projections (collation=Descending, Ascending) ==== @@ -216,11 +216,11 @@ PhysicalScan [{'': scan0}, collName] [desc, asc] left [s2, s3] [] [2] project [s3 = getField(s1, "city")] [1] project [s2 = getField(s1, "other_id")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false right [s5, s6] [] [5] project [s6 = getField(s4, "city")] [4] project [s5 = getField(s4, "id")] - [3] scan s4 none none none none none [] @"" true false + [3] scan s4 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower merge join with two projections (collation=Descending, Descending) ==== @@ -256,11 +256,11 @@ PhysicalScan [{'': scan0}, collName] [desc, desc] left [s2, s3] [] [2] project [s3 = getField(s1, "city")] [1] project [s2 = getField(s1, "other_id")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false right [s5, s6] [] [5] project [s6 = getField(s4, "city")] [4] project [s5 = getField(s4, "id")] - [3] scan s4 none none none none none [] @"" true false + [3] scan s4 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower merge join with required projection (collation=Descending) ==== @@ -303,9 +303,9 @@ PhysicalScan [{'': scan0}, collName] [desc] left [s2] [s3] [3] project [s3 = getField(s1, "city")] [1] project [s2 = getField(s1, "other_id")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false right [s5] [s6] [7] project [s6 = getField(s4, "city")] [5] project [s5 = getField(s4, "id")] - [4] scan s4 none none none none none [] @"" true false + [4] scan s4 none none none none none none none lowPriority [] @"" true false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_multiple_evaluation_nodes.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_multiple_evaluation_nodes.txt index 5ad8432c3801f..ea4182dc3ecee 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_multiple_evaluation_nodes.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_multiple_evaluation_nodes.txt @@ -15,4 +15,4 @@ PhysicalScan [{'': scan0}, collName] -- OUTPUT: [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_nested_loop_join_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_nested_loop_join_node.txt index 2392dfdf8f362..452a4af951f5e 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_nested_loop_join_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_nested_loop_join_node.txt @@ -37,10 +37,10 @@ PhysicalScan [{'': scan0}, collName] left [3] project [s3 = getField(s1, "zipcode")] [1] project [s2 = getField(s1, "city")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false right [5] project [s5 = getField(s4, "id")] - [4] scan s4 none none none none none [] @"" true false + [4] scan s4 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Nested loop join with equality predicate (Left join) ==== @@ -80,8 +80,8 @@ PhysicalScan [{'': scan0}, collName] left [3] project [s3 = getField(s1, "zipcode")] [1] project [s2 = getField(s1, "city")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false right [5] project [s5 = getField(s4, "id")] - [4] scan s4 none none none none none [] @"" true false + [4] scan s4 none none none none none none none lowPriority [] @"" true false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_physical_scan_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_physical_scan_node.txt index 0b689e3bdc361..8810f6c5e21c9 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_physical_scan_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_physical_scan_node.txt @@ -5,28 +5,28 @@ PhysicalScan [{'': root0}, collName] -- OUTPUT: -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Physical scan with RID projection (not parallel) ==== -- INPUT: PhysicalScan [{'': RID0}, collName] -- OUTPUT: -[0] scan none s1 none none none none [] @"" true false +[0] scan none s1 none none none none none none lowPriority [] @"" true false ==== VARIATION: Physical scan with root and RID projections (not parallel) ==== -- INPUT: PhysicalScan [{'': RID0, '': root0}, collName] -- OUTPUT: -[0] scan s2 s1 none none none none [] @"" true false +[0] scan s2 s1 none none none none none none lowPriority [] @"" true false ==== VARIATION: Physical scan with root, RID and field projections (not parallel) ==== -- INPUT: PhysicalScan [{'': RID0, '': root0, 'field': field2}, collName] -- OUTPUT: -[0] scan s2 s1 none none none none [s3 = field] @"" true false +[0] scan s2 s1 none none none none none none lowPriority [s3 = field] @"" true false ==== VARIATION: Physical scan with root projection (parallel) ==== -- INPUT: diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_seek_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_seek_node.txt index 5835285d2336e..0292de4fdbae0 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_seek_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_seek_node.txt @@ -11,8 +11,8 @@ IndexScan [{'': rid}, scanDefName: collName, indexDefName: index0, interval -- OUTPUT: [3] nlj inner [] [s1] {true} left - [0] ixseek ks(2ll, 0, 23L, 2ll) ks(2ll, 0, 35L, 2ll) none s1 none [] @"" @"" true + [0] ixseek ks(1ll, 0, 23L, 2ll) ks(1ll, 0, 35L, 2ll) none s1 none none [] @"" @"" true right [2] limitskip 1 0 - [1] seek s1 s2 none none none none none [] @"" true false + [1] seek s1 s2 none none none none none none none [] @"" true false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_sorted_merge_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_sorted_merge_node.txt index b983e00179044..030533a3b4705 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_sorted_merge_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_sorted_merge_node.txt @@ -19,7 +19,7 @@ PhysicalScan [{'': scan0}, collName] [3] smerge [s4] [asc] [ [s2] [s2] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: two sources sorted on `a` Ascending ==== @@ -50,10 +50,10 @@ PhysicalScan [{'': scan0}, collName] [6] smerge [s7] [asc] [ [s2] [s2] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false , + [0] scan s1 none none none none none none none lowPriority [] @"" true false , [s5] [s5] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false + [3] scan s4 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: five sources sorted on `a` Ascending ==== @@ -111,19 +111,19 @@ PhysicalScan [{'': scan0}, collName] [15] smerge [s16] [asc] [ [s2] [s2] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false , + [0] scan s1 none none none none none none none lowPriority [] @"" true false , [s5] [s5] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false , + [3] scan s4 none none none none none none none lowPriority [] @"" true false , [s8] [s8] [8] project [s9 = getField(s7, "b")] [7] project [s8 = getField(s7, "a")] - [6] scan s7 none none none none none [] @"" true false , + [6] scan s7 none none none none none none none lowPriority [] @"" true false , [s11] [s11] [11] project [s12 = getField(s10, "b")] [10] project [s11 = getField(s10, "a")] - [9] scan s10 none none none none none [] @"" true false , + [9] scan s10 none none none none none none none lowPriority [] @"" true false , [s14] [s14] [14] project [s15 = getField(s13, "b")] [13] project [s14 = getField(s13, "a")] - [12] scan s13 none none none none none [] @"" true false + [12] scan s13 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: one source sorted on `a` Ascending and `b` Ascending ==== @@ -146,7 +146,7 @@ PhysicalScan [{'': scan0}, collName] [3] smerge [s4, s5] [asc, asc] [ [s2, s3] [s2, s3] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: two sources sorted on `a` Ascending and `b` Ascending ==== @@ -178,10 +178,10 @@ PhysicalScan [{'': scan0}, collName] [6] smerge [s7, s8] [asc, asc] [ [s2, s3] [s2, s3] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false , + [0] scan s1 none none none none none none none lowPriority [] @"" true false , [s5, s6] [s5, s6] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false + [3] scan s4 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: five sources sorted on `a` Ascending and `b` Ascending ==== @@ -240,19 +240,19 @@ PhysicalScan [{'': scan0}, collName] [15] smerge [s16, s17] [asc, asc] [ [s2, s3] [s2, s3] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false , + [0] scan s1 none none none none none none none lowPriority [] @"" true false , [s5, s6] [s5, s6] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false , + [3] scan s4 none none none none none none none lowPriority [] @"" true false , [s8, s9] [s8, s9] [8] project [s9 = getField(s7, "b")] [7] project [s8 = getField(s7, "a")] - [6] scan s7 none none none none none [] @"" true false , + [6] scan s7 none none none none none none none lowPriority [] @"" true false , [s11, s12] [s11, s12] [11] project [s12 = getField(s10, "b")] [10] project [s11 = getField(s10, "a")] - [9] scan s10 none none none none none [] @"" true false , + [9] scan s10 none none none none none none none lowPriority [] @"" true false , [s14, s15] [s14, s15] [14] project [s15 = getField(s13, "b")] [13] project [s14 = getField(s13, "a")] - [12] scan s13 none none none none none [] @"" true false + [12] scan s13 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: one source sorted on `a` Ascending and `b` Descending ==== @@ -275,7 +275,7 @@ PhysicalScan [{'': scan0}, collName] [3] smerge [s4, s5] [asc, desc] [ [s2, s3] [s2, s3] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: two sources sorted on `a` Ascending and `b` Descending ==== @@ -307,10 +307,10 @@ PhysicalScan [{'': scan0}, collName] [6] smerge [s7, s8] [asc, desc] [ [s2, s3] [s2, s3] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false , + [0] scan s1 none none none none none none none lowPriority [] @"" true false , [s5, s6] [s5, s6] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false + [3] scan s4 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: five sources sorted on `a` Ascending and `b` Descending ==== @@ -369,19 +369,19 @@ PhysicalScan [{'': scan0}, collName] [15] smerge [s16, s17] [asc, desc] [ [s2, s3] [s2, s3] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false , + [0] scan s1 none none none none none none none lowPriority [] @"" true false , [s5, s6] [s5, s6] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false , + [3] scan s4 none none none none none none none lowPriority [] @"" true false , [s8, s9] [s8, s9] [8] project [s9 = getField(s7, "b")] [7] project [s8 = getField(s7, "a")] - [6] scan s7 none none none none none [] @"" true false , + [6] scan s7 none none none none none none none lowPriority [] @"" true false , [s11, s12] [s11, s12] [11] project [s12 = getField(s10, "b")] [10] project [s11 = getField(s10, "a")] - [9] scan s10 none none none none none [] @"" true false , + [9] scan s10 none none none none none none none lowPriority [] @"" true false , [s14, s15] [s14, s15] [14] project [s15 = getField(s13, "b")] [13] project [s14 = getField(s13, "a")] - [12] scan s13 none none none none none [] @"" true false + [12] scan s13 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: one source sorted on `a` Descending ==== @@ -403,7 +403,7 @@ PhysicalScan [{'': scan0}, collName] [3] smerge [s4] [desc] [ [s2] [s2] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: two sources sorted on `a` Descending ==== @@ -434,10 +434,10 @@ PhysicalScan [{'': scan0}, collName] [6] smerge [s7] [desc] [ [s2] [s2] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false , + [0] scan s1 none none none none none none none lowPriority [] @"" true false , [s5] [s5] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false + [3] scan s4 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: five sources sorted on `a` Descending ==== @@ -495,19 +495,19 @@ PhysicalScan [{'': scan0}, collName] [15] smerge [s16] [desc] [ [s2] [s2] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false , + [0] scan s1 none none none none none none none lowPriority [] @"" true false , [s5] [s5] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false , + [3] scan s4 none none none none none none none lowPriority [] @"" true false , [s8] [s8] [8] project [s9 = getField(s7, "b")] [7] project [s8 = getField(s7, "a")] - [6] scan s7 none none none none none [] @"" true false , + [6] scan s7 none none none none none none none lowPriority [] @"" true false , [s11] [s11] [11] project [s12 = getField(s10, "b")] [10] project [s11 = getField(s10, "a")] - [9] scan s10 none none none none none [] @"" true false , + [9] scan s10 none none none none none none none lowPriority [] @"" true false , [s14] [s14] [14] project [s15 = getField(s13, "b")] [13] project [s14 = getField(s13, "a")] - [12] scan s13 none none none none none [] @"" true false + [12] scan s13 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: one source sorted on `a` Descending and `b` Ascending ==== @@ -530,7 +530,7 @@ PhysicalScan [{'': scan0}, collName] [3] smerge [s4, s5] [desc, asc] [ [s2, s3] [s2, s3] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: two sources sorted on `a` Descending and `b` Ascending ==== @@ -562,10 +562,10 @@ PhysicalScan [{'': scan0}, collName] [6] smerge [s7, s8] [desc, asc] [ [s2, s3] [s2, s3] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false , + [0] scan s1 none none none none none none none lowPriority [] @"" true false , [s5, s6] [s5, s6] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false + [3] scan s4 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: five sources sorted on `a` Descending and `b` Ascending ==== @@ -624,19 +624,19 @@ PhysicalScan [{'': scan0}, collName] [15] smerge [s16, s17] [desc, asc] [ [s2, s3] [s2, s3] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false , + [0] scan s1 none none none none none none none lowPriority [] @"" true false , [s5, s6] [s5, s6] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false , + [3] scan s4 none none none none none none none lowPriority [] @"" true false , [s8, s9] [s8, s9] [8] project [s9 = getField(s7, "b")] [7] project [s8 = getField(s7, "a")] - [6] scan s7 none none none none none [] @"" true false , + [6] scan s7 none none none none none none none lowPriority [] @"" true false , [s11, s12] [s11, s12] [11] project [s12 = getField(s10, "b")] [10] project [s11 = getField(s10, "a")] - [9] scan s10 none none none none none [] @"" true false , + [9] scan s10 none none none none none none none lowPriority [] @"" true false , [s14, s15] [s14, s15] [14] project [s15 = getField(s13, "b")] [13] project [s14 = getField(s13, "a")] - [12] scan s13 none none none none none [] @"" true false + [12] scan s13 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: one source sorted on `a` Descending and `b` Descending ==== @@ -659,7 +659,7 @@ PhysicalScan [{'': scan0}, collName] [3] smerge [s4, s5] [desc, desc] [ [s2, s3] [s2, s3] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: two sources sorted on `a` Descending and `b` Descending ==== @@ -691,10 +691,10 @@ PhysicalScan [{'': scan0}, collName] [6] smerge [s7, s8] [desc, desc] [ [s2, s3] [s2, s3] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false , + [0] scan s1 none none none none none none none lowPriority [] @"" true false , [s5, s6] [s5, s6] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false + [3] scan s4 none none none none none none none lowPriority [] @"" true false ] ==== VARIATION: five sources sorted on `a` Descending and `b` Descending ==== @@ -753,17 +753,17 @@ PhysicalScan [{'': scan0}, collName] [15] smerge [s16, s17] [desc, desc] [ [s2, s3] [s2, s3] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false , + [0] scan s1 none none none none none none none lowPriority [] @"" true false , [s5, s6] [s5, s6] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false , + [3] scan s4 none none none none none none none lowPriority [] @"" true false , [s8, s9] [s8, s9] [8] project [s9 = getField(s7, "b")] [7] project [s8 = getField(s7, "a")] - [6] scan s7 none none none none none [] @"" true false , + [6] scan s7 none none none none none none none lowPriority [] @"" true false , [s11, s12] [s11, s12] [11] project [s12 = getField(s10, "b")] [10] project [s11 = getField(s10, "a")] - [9] scan s10 none none none none none [] @"" true false , + [9] scan s10 none none none none none none none lowPriority [] @"" true false , [s14, s15] [s14, s15] [14] project [s15 = getField(s13, "b")] [13] project [s14 = getField(s13, "a")] - [12] scan s13 none none none none none [] @"" true false + [12] scan s13 none none none none none none none lowPriority [] @"" true false ] diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_spool_nodes.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_spool_nodes.txt index 97565bc3e9182..314601b638aee 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_spool_nodes.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_spool_nodes.txt @@ -17,7 +17,7 @@ PhysicalScan [{'': scan0}, collName] branch0 [s2] [2] espool sp1 [s2] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false branch1 [s3] [3] cspool sp1 [s3] @@ -39,7 +39,7 @@ PhysicalScan [{'': scan0}, collName] branch0 [s2] [2] espool sp1 [s2] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false branch1 [s3] [3] sspool sp1 [s3] @@ -61,7 +61,7 @@ PhysicalScan [{'': scan0}, collName] branch0 [s2] [2] lspool sp1 [s2] {true} [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false branch1 [s3] [3] cspool sp1 [s3] @@ -83,7 +83,7 @@ PhysicalScan [{'': scan0}, collName] branch0 [s2] [2] lspool sp1 [s2] {true} [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false branch1 [s3] [3] sspool sp1 [s3] @@ -113,7 +113,7 @@ PhysicalScan [{'': scan0}, collName] branch0 [s2] [2] lspool sp1 [s2] {(((getField(s1, "b") <=> 23) >= 0ll) ?: false)} [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false branch1 [s3] [3] sspool sp1 [s3] diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_union_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_union_node.txt index 2c94235a20ec6..decca85534cd7 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_union_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_union_node.txt @@ -16,7 +16,7 @@ PhysicalScan [{'': scan0}, collName] -- OUTPUT: [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: UnionNode with two children ==== -- INPUT: @@ -45,11 +45,11 @@ PhysicalScan [{'': scan0}, collName] branch0 [s2] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false branch1 [s5] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false + [3] scan s4 none none none none none none none lowPriority [] @"" true false ==== VARIATION: UnionNode with many children ==== @@ -106,21 +106,21 @@ PhysicalScan [{'': scan0}, collName] branch0 [s2] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] - [0] scan s1 none none none none none [] @"" true false + [0] scan s1 none none none none none none none lowPriority [] @"" true false branch1 [s5] [5] project [s6 = getField(s4, "b")] [4] project [s5 = getField(s4, "a")] - [3] scan s4 none none none none none [] @"" true false + [3] scan s4 none none none none none none none lowPriority [] @"" true false branch2 [s8] [8] project [s9 = getField(s7, "b")] [7] project [s8 = getField(s7, "a")] - [6] scan s7 none none none none none [] @"" true false + [6] scan s7 none none none none none none none lowPriority [] @"" true false branch3 [s11] [11] project [s12 = getField(s10, "b")] [10] project [s11 = getField(s10, "a")] - [9] scan s10 none none none none none [] @"" true false + [9] scan s10 none none none none none none none lowPriority [] @"" true false branch4 [s14] [14] project [s15 = getField(s13, "b")] [13] project [s14 = getField(s13, "a")] - [12] scan s13 none none none none none [] @"" true false + [12] scan s13 none none none none none none none lowPriority [] @"" true false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_unique_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_unique_node.txt index c9d51e2e49a9e..6b3143a8f9d88 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_unique_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_unique_node.txt @@ -12,7 +12,7 @@ PhysicalScan [{'': scan0}, collName] -- OUTPUT: [2] unique [s2] [1] project [s2 = getField(s1, "a")] -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower unique node with multiple keys ==== -- INPUT: @@ -36,4 +36,4 @@ PhysicalScan [{'': scan0}, collName] [3] project [s4 = getField(s1, "c")] [2] project [s3 = getField(s1, "b")] [1] project [s2 = getField(s1, "a")] -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_unwind_node.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_unwind_node.txt index f18a29a7df0e8..18b7d6d07fc58 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_unwind_node.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_unwind_node.txt @@ -12,7 +12,7 @@ PhysicalScan [{'': scan0}, collName] -- OUTPUT: [2] unwind s3 s4 s2 false [1] project [s2 = getField(s1, "a")] -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false ==== VARIATION: Lower UnwindNode keep non-arrays ==== -- INPUT: @@ -26,4 +26,4 @@ PhysicalScan [{'': scan0}, collName] -- OUTPUT: [2] unwind s3 s4 s2 true [1] project [s2 = getField(s1, "a")] -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false diff --git a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_var_expression.txt b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_var_expression.txt index 375f485c59137..2fea47bf6c722 100644 --- a/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_var_expression.txt +++ b/src/mongo/db/test_output/exec/sbe/a_b_t_plan_generation/lower_var_expression.txt @@ -10,4 +10,4 @@ PhysicalScan [{'': scan0}, collName] -- OUTPUT: [1] project [s2 = getField(s1, "a")] -[0] scan s1 none none none none none [] @"" true false +[0] scan s1 none none none none none none none lowPriority [] @"" true false diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_optimization_test/optimize_pipeline_tests.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_optimization_test/optimize_pipeline_tests.txt index b3e25db6a9c07..303ac77e9d7a1 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_optimization_test/optimize_pipeline_tests.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_optimization_test/optimize_pipeline_tests.txt @@ -18,7 +18,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 1 distribution and paths: @@ -27,7 +27,7 @@ metadata: requirements: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: MemoSubstitutionPhase @@ -65,7 +65,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 1 distribution and paths: @@ -74,7 +74,7 @@ metadata: requirements: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: MemoSubstitutionPhase @@ -105,7 +105,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPre MemoExplorationPhase @@ -151,7 +151,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: MemoExplorationPhase MemoImplementationPhase @@ -187,7 +187,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPre @@ -234,7 +234,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: MemoExplorationPhase MemoImplementationPhase @@ -286,7 +286,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPost ConstEvalPre @@ -336,7 +336,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 1 distribution and paths: @@ -345,7 +345,7 @@ metadata: requirements: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: MemoExplorationPhase MemoImplementationPhase @@ -379,7 +379,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 0 distribution and paths: @@ -388,7 +388,7 @@ metadata: requirements: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPre MemoExplorationPhase @@ -426,7 +426,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 0 distribution and paths: @@ -435,7 +435,7 @@ metadata: requirements: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPre MemoExplorationPhase @@ -478,7 +478,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 0 distribution and paths: @@ -487,7 +487,7 @@ metadata: requirements: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPre MemoExplorationPhase @@ -535,7 +535,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 0 distribution and paths: @@ -544,7 +544,7 @@ metadata: requirements: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPre MemoExplorationPhase @@ -600,7 +600,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 0 distribution and paths: @@ -609,7 +609,7 @@ metadata: requirements: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPre MemoExplorationPhase @@ -656,7 +656,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 1 distribution and paths: @@ -665,7 +665,7 @@ metadata: requirements: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: MemoExplorationPhase MemoImplementationPhase @@ -701,7 +701,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 1 distribution and paths: @@ -710,7 +710,7 @@ metadata: requirements: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: MemoExplorationPhase MemoImplementationPhase @@ -766,7 +766,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 1 distribution and paths: @@ -775,7 +775,7 @@ metadata: requirements: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: MemoExplorationPhase MemoImplementationPhase @@ -810,7 +810,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 1 distribution and paths: @@ -819,7 +819,7 @@ metadata: requirements: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPost MemoExplorationPhase @@ -854,7 +854,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPost ConstEvalPre @@ -912,7 +912,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) collB: options: distribution and paths: @@ -920,7 +920,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: MemoExplorationPhase MemoImplementationPhase @@ -960,7 +960,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPre @@ -1004,7 +1004,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPre MemoExplorationPhase @@ -1045,7 +1045,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: MemoSubstitutionPhase diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_optimization_test/partial_index.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_optimization_test/partial_index.txt index 2454082483736..e7f944ff1f376 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_optimization_test/partial_index.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_optimization_test/partial_index.txt @@ -18,7 +18,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 1 distribution and paths: @@ -28,7 +28,7 @@ metadata: {{{scan_0, 'PathGet [b] PathTraverse [1] PathIdentity []', {{{=Const [2]}}}}}} collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPost ConstEvalPre @@ -77,7 +77,7 @@ metadata: PathIdentity [] collation op: Ascending - version: 2 + version: 1 ordering bits: 0 is multi-key: 1 distribution and paths: @@ -87,7 +87,7 @@ metadata: {{{scan_0, 'PathGet [b] PathTraverse [1] PathIdentity []', {{{=Const [2]}}}}}} collection exists: 1 - CE type: -1 + CE type: (empty) optimization phases: ConstEvalPost ConstEvalPre diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/and_or_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/and_or_translation.txt index f55a78c4db18b..c09a9cad76c49 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/and_or_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/and_or_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -45,7 +45,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -77,7 +77,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/computed_projection_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/computed_projection_translation.txt index 4e5ac46063f99..df1d58670c7dd 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/computed_projection_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/computed_projection_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{combinedProjection_0}] @@ -45,7 +45,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{combinedProjection_0}] @@ -85,7 +85,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{combinedProjection_0}] @@ -133,7 +133,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{combinedProjection_0}] @@ -173,7 +173,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{newRoot_0}] @@ -198,7 +198,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{combinedProjection_0}] diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/elem_match_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/elem_match_translation.txt index 7bff5c24fe832..79cebdce693f7 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/elem_match_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/elem_match_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -47,7 +47,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -78,7 +78,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{combinedProjection_0}] @@ -131,7 +131,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/eq_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/eq_translation.txt index 0d85d73b9bb58..690b08ac347dc 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/eq_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/eq_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -38,7 +38,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -70,7 +70,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/exists_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/exists_translation.txt index 09d46b1afa654..d2b861e89801d 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/exists_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/exists_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -37,7 +37,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -67,7 +67,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/group_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/group_translation.txt index 3b141a7308440..a63907d9a8770 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/group_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/group_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{combinedProjection_0}] @@ -67,7 +67,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{agg_project_0}] @@ -119,7 +119,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{agg_project_0}] @@ -164,7 +164,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{agg_project_0}] diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/in_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/in_translation.txt index 05cb5c9f224ee..ba37383391ef8 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/in_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/in_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -36,7 +36,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -63,7 +63,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -90,7 +90,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/inequality_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/inequality_translation.txt index 83c3c47cbffcf..c5e04647fc748 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/inequality_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/inequality_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -41,7 +41,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/match_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/match_translation.txt index 6fa33dc0d1625..fedc02072d7c9 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/match_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/match_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [] @@ -52,7 +52,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [] @@ -91,7 +91,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [] @@ -124,7 +124,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [] @@ -170,7 +170,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [] @@ -217,7 +217,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [] @@ -258,7 +258,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [] diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/not_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/not_translation.txt index bf41193d80c8d..92fa56ff6f3ef 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/not_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/not_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -41,7 +41,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -72,7 +72,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/simple_projection_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/simple_projection_translation.txt index 1e8c394b67701..7fe6b01df376a 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/simple_projection_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/simple_projection_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{combinedProjection_0}] @@ -35,7 +35,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{combinedProjection_0}] @@ -65,7 +65,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{combinedProjection_0}] @@ -89,7 +89,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{combinedProjection_0}] diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/sort_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/sort_translation.txt index a1d136bca4b2b..abb478b9bd9d0 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/sort_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/sort_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] @@ -44,7 +44,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/union_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/union_translation.txt index a35d90c74946c..5035cac90cbc9 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/union_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/union_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) collB: options: distribution and paths: @@ -19,7 +19,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{scan_0}] diff --git a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/unwind_translation.txt b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/unwind_translation.txt index 67aebdbe85239..eba06128873a3 100644 --- a/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/unwind_translation.txt +++ b/src/mongo/db/test_output/pipeline/abt/a_b_t_translation_test/unwind_translation.txt @@ -11,7 +11,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{embedProj_0}] @@ -49,7 +49,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{embedPidProj_0}] @@ -105,7 +105,7 @@ metadata: distribution paths: indexes: collection exists: 1 - CE type: -1 + CE type: (empty) -- OUTPUT: Root [{agg_project_0}] diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/check_collation_is_encoded.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/check_collation_is_encoded.txt index 2bad74dcb2996..e2968be4f49e1 100644 --- a/src/mongo/db/test_output/query/canonical_query_encoder_test/check_collation_is_encoded.txt +++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/check_collation_is_encoded.txt @@ -5,4 +5,4 @@ Sort: {} Proj: {} Collation: { locale: "mock_reverse_string" } -an[eqa,eqb]|||mock_reverse_string02300000|ff +an[eqa,eqb]|||mock_reverse_string02300000f diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key.txt index 64f1be5f7487e..4e4a43d3a420a 100644 --- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key.txt +++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key.txt @@ -1,80 +1,80 @@ ==== VARIATION: query={}, sort={}, proj={} -an||||ff +an|||f ==== VARIATION: query={ $or: [ { a: 1 }, { b: 2 } ] }, sort={}, proj={} -or[eqa,eqb]||||ff +or[eqa,eqb]|||f ==== VARIATION: query={ $or: [ { a: 1 }, { b: 1 }, { c: 1 } ], d: 1 }, sort={}, proj={} -an[or[eqa,eqb,eqc],eqd]||||ff +an[or[eqa,eqb,eqc],eqd]|||f ==== VARIATION: query={ $or: [ { a: 1 }, { b: 1 } ], c: 1, d: 1 }, sort={}, proj={} -an[or[eqa,eqb],eqc,eqd]||||ff +an[or[eqa,eqb],eqc,eqd]|||f ==== VARIATION: query={ a: 1, b: 1, c: 1 }, sort={}, proj={} -an[eqa,eqb,eqc]||||ff +an[eqa,eqb,eqc]|||f ==== VARIATION: query={ a: 1, beqc: 1 }, sort={}, proj={} -an[eqa,eqbeqc]||||ff +an[eqa,eqbeqc]|||f ==== VARIATION: query={ ap1a: 1 }, sort={}, proj={} -eqap1a||||ff +eqap1a|||f ==== VARIATION: query={ aab: 1 }, sort={}, proj={} -eqaab||||ff +eqaab|||f ==== VARIATION: query={}, sort={ a: 1 }, proj={} -an|aa|||ff +an|aa||f ==== VARIATION: query={}, sort={ a: -1 }, proj={} -an|da|||ff +an|da||f ==== VARIATION: query={ $text: { $search: "search keywords" } }, sort={ a: { $meta: "textScore" } }, proj={ a: { $meta: "textScore" } } -te_fts|ta|||ff +te_fts|ta||f ==== VARIATION: query={ a: 1 }, sort={ b: 1 }, proj={} -eqa|ab|||ff +eqa|ab||f ==== VARIATION: query={}, sort={}, proj={ a: 1 } -an||_id-a||ff +an||_id-a|f ==== VARIATION: query={}, sort={}, proj={ a: -1 } -an||_id-a||ff +an||_id-a|f ==== VARIATION: query={}, sort={}, proj={ a: -1.0 } -an||_id-a||ff +an||_id-a|f ==== VARIATION: query={}, sort={}, proj={ a: true } -an||_id-a||ff +an||_id-a|f ==== VARIATION: query={}, sort={}, proj={ a: 0 } -an||||ff +an|||f ==== VARIATION: query={}, sort={}, proj={ a: false } -an||||ff +an|||f ==== VARIATION: query={}, sort={}, proj={ a: 99 } -an||_id-a||ff +an||_id-a|f ==== VARIATION: query={}, sort={}, proj={ a: "foo" } -an||_id||ff +an||_id|f ==== VARIATION: query={}, sort={}, proj={ a: { $slice: [ 3, 5 ] } } -an||||ff +an|||f ==== VARIATION: query={}, sort={}, proj={ a: { $slice: [ 3, 5 ] }, b: 0 } -an||||ff +an|||f ==== VARIATION: query={}, sort={}, proj={ a: { $slice: [ 3, 5 ] }, b: 1 } -an||||ff +an|||f ==== VARIATION: query={}, sort={}, proj={ a: { $elemMatch: { x: 2 } } } -an||||ff +an|||f ==== VARIATION: query={}, sort={}, proj={ a: { $elemMatch: { x: 2 } }, b: 0 } -an||||ff +an|||f ==== VARIATION: query={}, sort={}, proj={ a: { $elemMatch: { x: 2 } }, b: 1 } -an||||ff +an|||f ==== VARIATION: query={}, sort={}, proj={ a: { $slice: [ 3, 5 ] }, b: { $elemMatch: { x: 2 } } } -an||||ff +an|||f ==== VARIATION: query={}, sort={}, proj={ a: ObjectId('507f191e810c19729de860ea') } -an||_id||ff +an||_id|f ==== VARIATION: query={}, sort={}, proj={ _id: 0, a: ObjectId('507f191e810c19729de860ea'), b: "foo" } -an||||ff +an|||f ==== VARIATION: query={ a: 1 }, sort={}, proj={ a.$: 1 } -eqa||||ff +eqa|||f ==== VARIATION: query={ a: 1 }, sort={}, proj={ a: 1 } -eqa||_id-a||ff +eqa||_id-a|f ==== VARIATION: query={}, sort={}, proj={ a: 1, b: 1 } -an||_id-a-b||ff +an||_id-a-b|f ==== VARIATION: query={}, sort={}, proj={ b: 1, a: 1 } -an||_id-a-b||ff +an||_id-a-b|f ==== VARIATION: query={}, sort={}, proj={ b-1: 1, a-2: 1 } -an||_id-a\-2-b\-1||ff +an||_id-a\-2-b\-1|f ==== VARIATION: query={}, sort={ x: 1 }, proj={ $sortKey: { $meta: "sortKey" } } -an|ax|||ff +an|ax||f ==== VARIATION: query={}, sort={}, proj={} -an||||ff +an|||f ==== VARIATION: query={}, sort={ x: 1 }, proj={ a: 1, $sortKey: { $meta: "sortKey" } } -an|ax|_id-a||ff +an|ax|_id-a|f ==== VARIATION: query={}, sort={}, proj={ a: 1 } -an||_id-a||ff +an||_id-a|f ==== VARIATION: query={ $or: [ { a: 1 } ] }, sort={}, proj={ _id: 0, a: 1 } -eqa||a||ff +eqa||a|f ==== VARIATION: query={ $or: [ { a: 1 } ] }, sort={}, proj={ a.$: 1 } -eqa||||ff +eqa|||f diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_escaped.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_escaped.txt index 3c7413b8cb371..0bcde6f50a113 100644 --- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_escaped.txt +++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_escaped.txt @@ -1,8 +1,8 @@ ==== VARIATION: query={ a,[]~|-<>: 1 }, sort={}, proj={} -eqa\,\[\]~\|\-<>||||ff +eqa\,\[\]~\|\-<>|||f ==== VARIATION: query={}, sort={ a,[]~|-<>: 1 }, proj={} -an|aa\,\[\]~\|\-<>|||ff +an|aa\,\[\]~\|\-<>||f ==== VARIATION: query={}, sort={}, proj={ a,[]~|-<>: 1 } -an||_id-a\,\[\]~\|\-<>||ff +an||_id-a\,\[\]~\|\-<>|f ==== VARIATION: query={}, sort={}, proj={ a: "foo,[]~|-<>" } -an||_id||ff +an||_id|f diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_geo_near.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_geo_near.txt index c5b995b1985ad..87d2ccce79d00 100644 --- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_geo_near.txt +++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_geo_near.txt @@ -1,6 +1,6 @@ ==== VARIATION: query={ a: { $near: [ 0, 0 ], $maxDistance: 0.3 } }, sort={}, proj={} -gnanrfl||||ff +gnanrfl|||f ==== VARIATION: query={ a: { $nearSphere: [ 0, 0 ], $maxDistance: 0.31 } }, sort={}, proj={} -gnanssp||||ff +gnanssp|||f ==== VARIATION: query={ a: { $geoNear: { $geometry: { type: "Point", coordinates: [ 0, 0 ] }, $maxDistance: 100 } } }, sort={}, proj={} -gnanrsp||||ff +gnanrsp|||f diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_match_in_depends_on_presence_of_regex_and_flags.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_match_in_depends_on_presence_of_regex_and_flags.txt index 517a68b0467bd..3dbc2f51d6325 100644 --- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_match_in_depends_on_presence_of_regex_and_flags.txt +++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_match_in_depends_on_presence_of_regex_and_flags.txt @@ -1,30 +1,30 @@ ==== VARIATION: query={ a: { $in: [ /foo/ ] } }, sort={}, proj={} -rea||||ff +rea|||f ==== VARIATION: query={ a: { $in: [ /foo/i ] } }, sort={}, proj={} -rea/i/||||ff +rea/i/|||f ==== VARIATION: query={ a: { $in: [ 1, "foo" ] } }, sort={}, proj={} -ina||||ff +ina|||f ==== VARIATION: query={ a: { $in: [ 1, /foo/ ] } }, sort={}, proj={} -ina_re||||ff +ina_re|||f ==== VARIATION: query={ a: { $in: [ 1, /foo/is ] } }, sort={}, proj={} -ina_re/is/||||ff +ina_re/is/|||f ==== VARIATION: query={ a: { $in: [ 1, /foo/si ] } }, sort={}, proj={} -ina_re/is/||||ff +ina_re/is/|||f ==== VARIATION: query={ a: { $in: [ 1, /foo/i, /bar/m, /baz/s ] } }, sort={}, proj={} -ina_re/ims/||||ff +ina_re/ims/|||f ==== VARIATION: query={ a: { $in: [ 1, /foo/i, /bar/m, /baz/s, /qux/i, /quux/s ] } }, sort={}, proj={} -ina_re/ims/||||ff +ina_re/ims/|||f ==== VARIATION: query={ a: { $in: [ 1, /foo/ism, /bar/msi, /baz/im, /qux/si, /quux/im ] } }, sort={}, proj={} -ina_re/ims/||||ff +ina_re/ims/|||f ==== VARIATION: query={ a: { $in: [ 1, /foo/msi, /bar/ism, /baz/is, /qux/mi, /quux/im ] } }, sort={}, proj={} -ina_re/ims/||||ff +ina_re/ims/|||f ==== VARIATION: query={ a: { $not: { $in: [ 1, "foo" ] } } }, sort={}, proj={} -nt[ina]||||ff +nt[ina]|||f ==== VARIATION: query={ a: { $not: { $in: [ 1, /foo/ ] } } }, sort={}, proj={} -nt[ina_re]||||ff +nt[ina_re]|||f ==== VARIATION: query={ a: { $not: { $in: [ 1, /foo/i, /bar/i, /baz/msi ] } } }, sort={}, proj={} -nt[ina_re/ims/]||||ff +nt[ina_re/ims/]|||f ==== VARIATION: query={ a: { $not: { $in: [ /foo/ ] } } }, sort={}, proj={} -nt[rea]||||ff +nt[rea]|||f ==== VARIATION: query={ a: { $not: { $in: [ /foo/i ] } } }, sort={}, proj={} -nt[rea/i/]||||ff +nt[rea/i/]|||f diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_regex_depends_on_flags.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_regex_depends_on_flags.txt index 7805d635360db..9e6fd7c6d07dc 100644 --- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_regex_depends_on_flags.txt +++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_regex_depends_on_flags.txt @@ -1,24 +1,24 @@ ==== VARIATION: query={ a: /sometext/ }, sort={}, proj={} -rea||||tf +rea|||f ==== VARIATION: query={ a: /sometext/ }, sort={}, proj={} -rea||||tf +rea|||f ==== VARIATION: query={ a: /sometext/s }, sort={}, proj={} -rea/s/||||tf +rea/s/|||f ==== VARIATION: query={ a: /sometext/ms }, sort={}, proj={} -rea/ms/||||tf +rea/ms/|||f ==== VARIATION: query={ a: /sometext/im }, sort={}, proj={} -rea/im/||||tf +rea/im/|||f ==== VARIATION: query={ a: /sometext/mi }, sort={}, proj={} -rea/im/||||tf +rea/im/|||f ==== VARIATION: query={ a: /abc/mi }, sort={}, proj={} -rea/im/||||tf +rea/im/|||f ==== VARIATION: query={ a: /efg/mi }, sort={}, proj={} -rea/im/||||tf +rea/im/|||f ==== VARIATION: query={ a: //ms }, sort={}, proj={} -rea/ms/||||tf +rea/ms/|||f ==== VARIATION: query={ a: /___/ms }, sort={}, proj={} -rea/ms/||||tf +rea/ms/|||f ==== VARIATION: query={ a: { $regex: "abc", $options: "imxsu" } }, sort={}, proj={} -rea/imsx/||||tf +rea/imsx/|||f ==== VARIATION: query={ a: /abc/im }, sort={}, proj={} -rea/im/||||tf +rea/im/|||f diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_with_api_strict.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_with_api_strict.txt index 150e5b4ddb5ce..5f8411ecd6ff8 100644 --- a/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_with_api_strict.txt +++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/compute_key_with_api_strict.txt @@ -1,7 +1,7 @@ ==== VARIATION: query={}, sort={}, proj={} -an||||ff +an|||f ==== VARIATION: query={}, sort={}, proj={} -an||||ft +an|||t ==== VARIATION: sbe, query={}, sort={}, proj={} YW4ABQAAAAB8fHx8AAAAAAAAAAAAAAAAbm5ubgUAAAAAZnw= ==== VARIATION: sbe, query={}, sort={}, proj={} diff --git a/src/mongo/db/test_output/query/canonical_query_encoder_test/encode_not_equal_null_predicates.txt b/src/mongo/db/test_output/query/canonical_query_encoder_test/encode_not_equal_null_predicates.txt index 284989f183efe..2736b67479a13 100644 --- a/src/mongo/db/test_output/query/canonical_query_encoder_test/encode_not_equal_null_predicates.txt +++ b/src/mongo/db/test_output/query/canonical_query_encoder_test/encode_not_equal_null_predicates.txt @@ -1,10 +1,10 @@ ==== VARIATION: query={ a: { $not: { $eq: null } } }, sort={}, proj={ _id: 0, a: 1 } -ntnot_eq_null[eqa]||a||ff +ntnot_eq_null[eqa]||a|f ==== VARIATION: query={ a: { $not: { $eq: null } } }, sort={ a: 1 }, proj={ _id: 0, a: 1 } -ntnot_eq_null[eqa]|aa|a||ff +ntnot_eq_null[eqa]|aa|a|f ==== VARIATION: query={ a: { $not: { $gte: null } } }, sort={ a: 1 }, proj={ _id: 0, a: 1 } -ntnot_eq_null[gea]|aa|a||ff +ntnot_eq_null[gea]|aa|a|f ==== VARIATION: query={ a: { $not: { $lte: null } } }, sort={ a: 1 }, proj={ _id: 0, a: 1 } -ntnot_eq_null[lea]|aa|a||ff +ntnot_eq_null[lea]|aa|a|f ==== VARIATION: query={ a: { $not: { $eq: true } } }, sort={ a: 1 }, proj={ _id: 0, a: 1 } -nt[eqa]|aa|a||ff +nt[eqa]|aa|a|f diff --git a/src/mongo/db/thread_client_test.cpp b/src/mongo/db/thread_client_test.cpp index 45fff378380d6..a9f38f6ef80c5 100644 --- a/src/mongo/db/thread_client_test.cpp +++ b/src/mongo/db/thread_client_test.cpp @@ -27,13 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" #include "mongo/db/client.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" +#include "mongo/transport/session.h" #include "mongo/transport/transport_layer_mock.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/concurrency/thread_name.h" namespace mongo { namespace { diff --git a/src/mongo/db/time_proof_service.cpp b/src/mongo/db/time_proof_service.cpp index da65e441712c3..1af901d502c47 100644 --- a/src/mongo/db/time_proof_service.cpp +++ b/src/mongo/db/time_proof_service.cpp @@ -27,13 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/time_proof_service.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/logical_time.h" +#include "mongo/db/time_proof_service.h" #include "mongo/platform/random.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/time_proof_service.h b/src/mongo/db/time_proof_service.h index 43b6d97a68157..0a62770e975cd 100644 --- a/src/mongo/db/time_proof_service.h +++ b/src/mongo/db/time_proof_service.h @@ -29,7 +29,12 @@ #pragma once +#include +#include +#include + #include "mongo/base/status.h" +#include "mongo/crypto/hash_block.h" #include "mongo/crypto/sha1_block.h" #include "mongo/db/logical_time.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/db/time_proof_service_test.cpp b/src/mongo/db/time_proof_service_test.cpp index 59bf7df8bdfca..6856d51987afa 100644 --- a/src/mongo/db/time_proof_service_test.cpp +++ b/src/mongo/db/time_proof_service_test.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/timestamp.h" #include "mongo/db/logical_time.h" #include "mongo/db/time_proof_service.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/timeseries/SConscript b/src/mongo/db/timeseries/SConscript index b6fafcaaef4ec..68e46ff4ea79f 100644 --- a/src/mongo/db/timeseries/SConscript +++ b/src/mongo/db/timeseries/SConscript @@ -21,6 +21,7 @@ env.Library( 'timeseries_options.cpp', ], LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/serialization_options', '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/storage/storage_options', '$BUILD_DIR/mongo/util/processinfo', @@ -111,6 +112,7 @@ env.Library( '$BUILD_DIR/mongo/db/catalog/collection_crud', '$BUILD_DIR/mongo/db/catalog/collection_query_info', '$BUILD_DIR/mongo/db/catalog/document_validation', + '$BUILD_DIR/mongo/db/dbdirectclient', '$BUILD_DIR/mongo/db/ops/write_ops_exec_util', '$BUILD_DIR/mongo/db/query/op_metrics', '$BUILD_DIR/mongo/db/record_id_helpers', @@ -118,6 +120,8 @@ env.Library( '$BUILD_DIR/mongo/db/shard_role', '$BUILD_DIR/mongo/db/timeseries/bucket_catalog/bucket_catalog', '$BUILD_DIR/mongo/db/update/update_common', + 'bucket_compression', + 'timeseries_conversion_util', 'timeseries_options', ], ) @@ -158,3 +162,16 @@ env.CppUnitTest( 'timeseries_write_util', ], ) + +env.Library( + target='timeseries_op_observer', + source=[ + 'timeseries_op_observer.cpp', + ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/catalog/collection_catalog', + '$BUILD_DIR/mongo/db/op_observer/op_observer', + '$BUILD_DIR/mongo/db/timeseries/bucket_catalog/bucket_catalog', + 'timeseries_extended_range', + ], +) diff --git a/src/mongo/db/timeseries/bucket_catalog/SConscript b/src/mongo/db/timeseries/bucket_catalog/SConscript index c1547a498d4ae..033c836d58087 100644 --- a/src/mongo/db/timeseries/bucket_catalog/SConscript +++ b/src/mongo/db/timeseries/bucket_catalog/SConscript @@ -14,7 +14,6 @@ env.Library( 'bucket_catalog_server_status.cpp', 'bucket_identifiers.cpp', 'bucket_metadata.cpp', - 'bucket_state.cpp', 'bucket_state_registry.cpp', 'closed_bucket.cpp', 'execution_stats.cpp', diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket.cpp b/src/mongo/db/timeseries/bucket_catalog/bucket.cpp index 603391c521319..6659db67aad9a 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/bucket.cpp @@ -29,6 +29,20 @@ #include "mongo/db/timeseries/bucket_catalog/bucket.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/util/assert_util_core.h" + namespace mongo::timeseries::bucket_catalog { namespace { diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket.h b/src/mongo/db/timeseries/bucket_catalog/bucket.h index 9ed284bd13250..57323bed84315 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket.h +++ b/src/mongo/db/timeseries/bucket_catalog/bucket.h @@ -30,7 +30,16 @@ #pragma once #include - +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/oid.h" #include "mongo/db/operation_id.h" #include "mongo/db/repl/optime.h" @@ -42,7 +51,10 @@ #include "mongo/db/timeseries/bucket_catalog/write_batch.h" #include "mongo/db/timeseries/bucket_compression.h" #include "mongo/platform/atomic_word.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/util/future.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" namespace mongo::timeseries::bucket_catalog { diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog.cpp b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog.cpp index 6ee28976dea89..c38a9cf5a2b4b 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog.cpp @@ -27,28 +27,37 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/platform/basic.h" - -#include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" - -#include -#include - -#include "mongo/db/catalog/database_holder.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" #include "mongo/db/storage/storage_parameters_gen.h" -#include "mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog_internal.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_metadata.h" +#include "mongo/db/timeseries/bucket_catalog/flat_bson.h" +#include "mongo/db/timeseries/bucket_catalog/rollover.h" #include "mongo/db/timeseries/bucket_compression.h" -#include "mongo/db/timeseries/timeseries_constants.h" -#include "mongo/db/timeseries/timeseries_options.h" -#include "mongo/logv2/log.h" -#include "mongo/platform/compiler.h" -#include "mongo/stdx/thread.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -118,17 +127,6 @@ void finishWriteBatch(WriteBatch& batch, const CommitInfo& info) { invariant(batch.commitRights.load()); batch.promise.emplaceValue(info); } - -/** - * Abandons the write batch and notifies any waiters that the bucket has been cleared. - */ -void abortWriteBatch(WriteBatch& batch, const Status& status) { - if (batch.promise.getFuture().isReady()) { - return; - } - - batch.promise.setError(status); -} } // namespace BucketCatalog& BucketCatalog::get(ServiceContext* svcCtx) { @@ -196,24 +194,22 @@ Status prepareCommit(BucketCatalog& catalog, std::shared_ptr batch) internal::waitToCommitBatch(catalog.bucketStateRegistry, stripe, batch); stdx::lock_guard stripeLock{stripe.mutex}; - Bucket* bucket = internal::useBucketAndChangeState( - catalog.bucketStateRegistry, - stripe, - stripeLock, - batch->bucketHandle.bucketId, - [](boost::optional input, std::uint64_t) -> boost::optional { - invariant(input.has_value()); - return input.value().setFlag(BucketStateFlag::kPrepared); - }); if (isWriteBatchFinished(*batch)) { // Someone may have aborted it while we were waiting. Since we have the prepared batch, we // should now be able to fully abort the bucket. - if (bucket) { - internal::abort(catalog, stripe, stripeLock, batch, getBatchStatus()); - } + internal::abort(catalog, stripe, stripeLock, batch, getBatchStatus()); return getBatchStatus(); - } else if (!bucket) { + } + + Bucket* bucket = + internal::useBucketAndChangePreparedState(catalog.bucketStateRegistry, + stripe, + stripeLock, + batch->bucketHandle.bucketId, + internal::BucketPrepareAction::kPrepare); + + if (!bucket) { internal::abort(catalog, stripe, stripeLock, @@ -242,15 +238,12 @@ boost::optional finish(BucketCatalog& catalog, auto& stripe = catalog.stripes[batch->bucketHandle.stripe]; stdx::lock_guard stripeLock{stripe.mutex}; - Bucket* bucket = internal::useBucketAndChangeState( - catalog.bucketStateRegistry, - stripe, - stripeLock, - batch->bucketHandle.bucketId, - [](boost::optional input, std::uint64_t) -> boost::optional { - invariant(input.has_value()); - return input.value().unsetFlag(BucketStateFlag::kPrepared); - }); + Bucket* bucket = + internal::useBucketAndChangePreparedState(catalog.bucketStateRegistry, + stripe, + stripeLock, + batch->bucketHandle.bucketId, + internal::BucketPrepareAction::kUnprepare); if (bucket) { bucket->preparedBatch.reset(); } @@ -324,51 +317,24 @@ void abort(BucketCatalog& catalog, std::shared_ptr batch, const Stat void directWriteStart(BucketStateRegistry& registry, const NamespaceString& ns, const OID& oid) { invariant(!ns.isTimeseriesBucketsCollection()); - auto result = changeBucketState( - registry, - BucketId{ns, oid}, - [](boost::optional input, std::uint64_t) -> boost::optional { - if (input.has_value()) { - if (input.value().isPrepared()) { - return input.value(); - } - return input.value().addDirectWrite(); - } - // The underlying bucket isn't tracked by the catalog, but we need to insert a state - // here so that we can conflict reopening this bucket until we've completed our write - // and the reader has refetched. - return BucketState{}.setFlag(BucketStateFlag::kUntracked).addDirectWrite(); - }); - if (result.has_value() && result.value().isPrepared()) { - hangTimeseriesDirectModificationBeforeWriteConflict.pauseWhileSet(); - throwWriteConflictException("Prepared bucket can no longer be inserted into."); - } + auto state = addDirectWrite(registry, BucketId{ns, oid}); hangTimeseriesDirectModificationAfterStart.pauseWhileSet(); + + if (stdx::holds_alternative(state)) { + // The direct write count was successfully incremented. + return; + } + + // We cannot perform direct writes on prepared buckets. + invariant(isBucketStatePrepared(state)); + hangTimeseriesDirectModificationBeforeWriteConflict.pauseWhileSet(); + throwWriteConflictException("Prepared bucket can no longer be inserted into."); } void directWriteFinish(BucketStateRegistry& registry, const NamespaceString& ns, const OID& oid) { invariant(!ns.isTimeseriesBucketsCollection()); hangTimeseriesDirectModificationBeforeFinish.pauseWhileSet(); - (void)changeBucketState( - registry, - BucketId{ns, oid}, - [](boost::optional input, std::uint64_t) -> boost::optional { - if (!input.has_value()) { - // We may have had multiple direct writes to this document in the same storage - // transaction. If so, a previous call to directWriteFinish may have cleaned up the - // state. - return boost::none; - } - - auto& modified = input.value().removeDirectWrite(); - if (!modified.isSet(BucketStateFlag::kPendingDirectWrite) && - modified.isSet(BucketStateFlag::kUntracked)) { - // The underlying bucket is no longer tracked by the catalog, so we can clean up the - // state. - return boost::none; - } - return modified; - }); + removeDirectWrite(registry, BucketId{ns, oid}); } void clear(BucketCatalog& catalog, ShouldClearFn&& shouldClear) { @@ -407,10 +373,9 @@ void clear(BucketCatalog& catalog, const NamespaceString& ns) { clear(catalog, [ns](const NamespaceString& bucketNs) { return bucketNs == ns; }); } -void clear(BucketCatalog& catalog, StringData dbName) { - clear(catalog, [dbName = dbName.toString()](const NamespaceString& bucketNs) { - return bucketNs.db() == dbName; - }); +void clear(BucketCatalog& catalog, const DatabaseName& dbName) { + clear(catalog, + [dbName](const NamespaceString& bucketNs) { return bucketNs.dbName() == dbName; }); } void appendExecutionStats(const BucketCatalog& catalog, diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog.h b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog.h index 90fb5d2e4efae..21057891c4c02 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog.h +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog.h @@ -31,17 +31,35 @@ #include #include +#include +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/single_write_result_gen.h" #include "mongo/db/service_context.h" #include "mongo/db/timeseries/bucket_catalog/bucket.h" #include "mongo/db/timeseries/bucket_catalog/bucket_identifiers.h" #include "mongo/db/timeseries/bucket_catalog/bucket_metadata.h" -#include "mongo/db/timeseries/bucket_catalog/bucket_state.h" #include "mongo/db/timeseries/bucket_catalog/bucket_state_registry.h" #include "mongo/db/timeseries/bucket_catalog/closed_bucket.h" #include "mongo/db/timeseries/bucket_catalog/execution_stats.h" @@ -51,9 +69,13 @@ #include "mongo/db/timeseries/bucket_catalog/write_batch.h" #include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/views/view.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_map.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/hierarchical_acquisition.h" #include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" namespace mongo::timeseries::bucket_catalog { @@ -127,7 +149,9 @@ class BucketCatalog { static BucketCatalog& get(ServiceContext* svcCtx); static BucketCatalog& get(OperationContext* opCtx); - BucketCatalog() = default; + BucketCatalog() : stripes(numberOfStripes) {} + BucketCatalog(size_t numberOfStripes) + : numberOfStripes(numberOfStripes), stripes(numberOfStripes){}; BucketCatalog(const BucketCatalog&) = delete; BucketCatalog operator=(const BucketCatalog&) = delete; @@ -135,9 +159,10 @@ class BucketCatalog { BucketStateRegistry bucketStateRegistry; // The actual buckets in the catalog are distributed across a number of 'Stripe's. Each can be - // independently locked and operated on in parallel. - static constexpr std::size_t kNumberOfStripes = 32; - std::array stripes; + // independently locked and operated on in parallel. The size of the stripe vector should not be + // changed after initialization. + const std::size_t numberOfStripes = 32; + std::vector stripes; // Per-namespace execution stats. This map is protected by 'mutex'. Once you complete your // lookup, you can keep the shared_ptr to an individual namespace's stats object and release the @@ -168,8 +193,9 @@ BSONObj getMetadata(BucketCatalog& catalog, const BucketHandle& bucket); /** * Tries to insert 'doc' into a suitable bucket. If an open bucket is full (or has incompatible * schema), but is otherwise suitable, we will close it and open a new bucket. If we find no bucket - * with matching data and a time range that can accomodate 'doc', we will not open a new bucket, but - * rather let the caller know to search for an archived or closed bucket that can accomodate 'doc'. + * with matching data and a time range that can accommodate 'doc', we will not open a new bucket, + * but rather let the caller know to search for an archived or closed bucket that can accommodate + * 'doc'. * * If a suitable bucket is found or opened, returns the WriteBatch into which 'doc' was inserted and * a list of any buckets that were closed to make space to insert 'doc'. Any caller who receives the @@ -178,7 +204,7 @@ BSONObj getMetadata(BucketCatalog& catalog, const BucketHandle& bucket); * * If no suitable bucket is found or opened, returns an optional bucket ID. If set, the bucket ID * corresponds to an archived bucket which should be fetched; otherwise the caller should search for - * a previously-closed bucket that can accomodate 'doc'. The caller should proceed to call 'insert' + * a previously-closed bucket that can accommodate 'doc'. The caller should proceed to call 'insert' * to insert 'doc', passing any fetched bucket. */ StatusWith tryInsert(OperationContext* opCtx, @@ -264,7 +290,7 @@ void clear(BucketCatalog& catalog, const NamespaceString& ns); * Clears the buckets for the given database by removing the bucket from the catalog asynchronously * through the BucketStateRegistry. */ -void clear(BucketCatalog& catalog, StringData dbName); +void clear(BucketCatalog& catalog, const DatabaseName& dbName); /** * Appends the execution stats for the given namespace to the builder. diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.cpp b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.cpp index 022b9b5630d88..21e57bea24912 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.cpp @@ -29,13 +29,33 @@ #include "mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.h" -#include "mongo/db/dbdirectclient.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/record_id_helpers.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" #include "mongo/db/timeseries/timeseries_constants.h" -#include "mongo/logv2/log.h" #include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -132,7 +152,7 @@ void normalizeObject(BSONObjBuilder* builder, const BSONObj& obj) { * Generates a match filter used to identify suitable buckets for reopening, represented by: * * {$and: - * [{"control.version":1}, + * [{"control.version":1}, // Only when gTimeseriesAlwaysUseCompressedBuckets is disabled. * {$or: [{"control.closed":{$exists:false}}, * {"control.closed":false}] * }, @@ -148,8 +168,12 @@ BSONObj generateReopeningMatchFilter(const Date_t& time, const std::string& controlMinTimePath, const std::string& maxDataTimeFieldPath, int64_t bucketMaxSpanSeconds) { - // The bucket must be uncompressed. - auto versionFilter = BSON(kControlVersionPath << kTimeseriesControlUncompressedVersion); + boost::optional versionFilter; + if (!feature_flags::gTimeseriesAlwaysUseCompressedBuckets.isEnabled( + serverGlobalParams.featureCompatibility)) { + // The bucket must be uncompressed. + versionFilter = BSON(kControlVersionPath << kTimeseriesControlUncompressedVersion); + } // The bucket cannot be closed (aka open for new measurements). auto closedFlagFilter = @@ -177,8 +201,14 @@ BSONObj generateReopeningMatchFilter(const Date_t& time, // full and we do not want to insert future measurements into it. auto measurementSizeFilter = BSON(maxDataTimeFieldPath << BSON("$exists" << false)); - return BSON("$and" << BSON_ARRAY(versionFilter << closedFlagFilter << timeRangeFilter - << metaFieldFilter << measurementSizeFilter)); + if (versionFilter) { + return BSON("$and" << BSON_ARRAY(*versionFilter << closedFlagFilter << timeRangeFilter + << metaFieldFilter + << measurementSizeFilter)); + } else { + return BSON("$and" << BSON_ARRAY(closedFlagFilter << timeRangeFilter << metaFieldFilter + << measurementSizeFilter)); + } } } // namespace diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.h b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.h index 3f501ec08a65b..c7bb36e40371c 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.h +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.h @@ -29,13 +29,26 @@ #pragma once +#include +#include +#include +#include +#include + #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/base/string_data_comparator_interface.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/storage/kv/kv_engine.h" #include "mongo/db/timeseries/bucket_catalog/flat_bson.h" #include "mongo/db/timeseries/timeseries_options.h" +#include "mongo/util/time_support.h" namespace mongo::timeseries::bucket_catalog { diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers_test.cpp b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers_test.cpp index 8171fac5a4e07..57be5324d95ff 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers_test.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers_test.cpp @@ -27,17 +27,37 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/json.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/catalog/catalog_test_fixture.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo::timeseries::bucket_catalog { namespace { @@ -606,5 +626,48 @@ TEST_F(BucketCatalogHelpersTest, FindDocumentFromOID) { } } +TEST_F(BucketCatalogHelpersTest, FindSuitableCompressedBucketForMeasurement) { + RAIIServerParameterControllerForTest featureFlagController{ + "featureFlagTimeseriesAlwaysUseCompressedBuckets", true}; + + ASSERT_OK(createCollection( + operationContext(), + kNss.dbName(), + BSON("create" << kNss.coll() << "timeseries" + << BSON("timeField" << _timeField << "metaField" << _metaField)))); + + AutoGetCollection autoColl(operationContext(), kNss.makeTimeseriesBucketsNamespace(), MODE_IX); + ASSERT(autoColl->getTimeseriesOptions() && autoColl->getTimeseriesOptions()->getMetaField()); + + auto tsOptions = *autoColl->getTimeseriesOptions(); + + // control.version indicates the bucket is compressed. + BSONObj bucketDoc = mongo::fromjson(R"({ + "_id":{"$oid":"62e7e6ec27c28d338ab29200"}, + "control":{"version":2,"min":{"_id":1,"time":{"$date":"2021-08-01T11:00:00Z"},"a":1}, + "max":{"_id":3,"time":{"$date":"2021-08-01T12:00:00Z"},"a":3}, + "closed":false}, + "meta":1, + "data":{"time":{"0":{"$date":"2021-08-01T11:00:00Z"}, + "1":{"$date":"2021-08-01T11:00:00Z"}, + "2":{"$date":"2021-08-01T11:00:00Z"}}, + "a":{"0":1,"1":2,"2":3}}})"); + + // Insert bucket document into the system.buckets collection. + _insertIntoBucketColl(bucketDoc); + + auto time = dateFromISOString("2021-08-01T11:30:00Z"); + BSONObj docWithSuitableBucket = + BSON("_id" << 1 << _timeField << time.getValue() << _metaField << 1); + + // Verify that we can find a suitable bucket to insert into. + auto result = _findSuitableBucket(operationContext(), + kNss.makeTimeseriesBucketsNamespace(), + tsOptions, + docWithSuitableBucket); + ASSERT_FALSE(result.isEmpty()); + ASSERT_EQ(bucketDoc["_id"].OID(), result["_id"].OID()); +} + } // namespace } // namespace mongo::timeseries::bucket_catalog diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_internal.cpp b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_internal.cpp index 95113f710cb36..4b47a4d62052d 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_internal.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_internal.cpp @@ -29,13 +29,61 @@ #include "mongo/db/timeseries/bucket_catalog/bucket_catalog_internal.h" -#include - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: keep +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bsoncolumn.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/operation_id.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/kv/kv_engine.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_metadata.h" +#include "mongo/db/timeseries/bucket_catalog/flat_bson.h" +#include "mongo/db/timeseries/bucket_compression.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/timeseries/timeseries_global_options.h" +#include "mongo/db/timeseries/timeseries_options.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" +#include "mongo/platform/random.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo::timeseries::bucket_catalog::internal { namespace { @@ -43,6 +91,10 @@ MONGO_FAIL_POINT_DEFINE(alwaysUseSameBucketCatalogStripe); MONGO_FAIL_POINT_DEFINE(hangTimeseriesInsertBeforeReopeningBucket); MONGO_FAIL_POINT_DEFINE(hangWaitingForConflictingPreparedBatch); +Mutex _bucketIdGenLock = + MONGO_MAKE_LATCH(HierarchicalAcquisitionLevel(0), "bucket_catalog_internal::_bucketIdGenLock"); +PseudoRandom _bucketIdGenPRNG(SecureRandom().nextInt64()); + OperationId getOpId(OperationContext* opCtx, CombineWithInsertsFromOtherClients combine) { switch (combine) { case CombineWithInsertsFromOtherClients::kAllow: @@ -109,11 +161,11 @@ void abortWriteBatch(WriteBatch& batch, const Status& status) { } } // namespace -StripeNumber getStripeNumber(const BucketKey& key) { +StripeNumber getStripeNumber(const BucketKey& key, size_t numberOfStripes) { if (MONGO_unlikely(alwaysUseSameBucketCatalogStripe.shouldFail())) { return 0; } - return key.hash % BucketCatalog::kNumberOfStripes; + return key.hash % numberOfStripes; } StatusWith> extractBucketingParameters( @@ -159,7 +211,7 @@ const Bucket* findBucket(BucketStateRegistry& registry, } if (auto state = getBucketState(registry, it->second.get()); - state && !state.value().conflictsWithInsertion()) { + state && !conflictsWithInsertions(state.value())) { return it->second.get(); } } @@ -174,15 +226,17 @@ Bucket* useBucket(BucketStateRegistry& registry, return const_cast(findBucket(registry, stripe, stripeLock, bucketId, mode)); } -Bucket* useBucketAndChangeState(BucketStateRegistry& registry, - Stripe& stripe, - WithLock stripeLock, - const BucketId& bucketId, - const BucketStateRegistry::StateChangeFn& change) { +Bucket* useBucketAndChangePreparedState(BucketStateRegistry& registry, + Stripe& stripe, + WithLock stripeLock, + const BucketId& bucketId, + BucketPrepareAction prepare) { auto it = stripe.openBucketsById.find(bucketId); if (it != stripe.openBucketsById.end()) { - if (auto state = changeBucketState(registry, it->second.get(), change); - state && !state.value().conflictsWithInsertion()) { + StateChangeSucessful stateChangeResult = (prepare == BucketPrepareAction::kPrepare) + ? prepareBucketState(registry, it->second.get()->bucketId, it->second.get()) + : unprepareBucketState(registry, it->second.get()->bucketId, it->second.get()); + if (stateChangeResult == StateChangeSucessful::kYes) { return it->second.get(); } } @@ -217,7 +271,7 @@ Bucket* useBucket(BucketCatalog& catalog, } if (auto state = getBucketState(catalog.bucketStateRegistry, bucket); - state && !state.value().conflictsWithInsertion()) { + state && !conflictsWithInsertions(state.value())) { markBucketNotIdle(stripe, stripeLock, *bucket); return bucket; } @@ -263,13 +317,13 @@ Bucket* useAlternateBucket(BucketCatalog& catalog, auto state = getBucketState(catalog.bucketStateRegistry, potentialBucket); invariant(state); - if (!state.value().conflictsWithInsertion()) { + if (!conflictsWithInsertions(state.value())) { invariant(!potentialBucket->idleListEntry.has_value()); return potentialBucket; } // Clean up the bucket if it has been cleared. - if (state.value().isSet(BucketStateFlag::kCleared)) { + if (state && isBucketStateCleared(state.value())) { abort(catalog, stripe, stripeLock, @@ -412,24 +466,12 @@ StatusWith> reopenBucket(BucketCatalog& catalog, expireIdleBuckets(catalog, stripe, stripeLock, stats, closedBuckets); - // We may need to initialize the bucket's state. - bool conflicts = false; - auto initializeStateFn = - [targetEra, &conflicts](boost::optional input, - std::uint64_t currentEra) -> boost::optional { - if (targetEra < currentEra || - (input.has_value() && input.value().conflictsWithReopening())) { - conflicts = true; - return input; - } - conflicts = false; - return input.has_value() ? input.value() : BucketState{}; - }; + auto status = initializeBucketState( + catalog.bucketStateRegistry, bucket->bucketId, bucket.get(), targetEra); - auto state = - changeBucketState(catalog.bucketStateRegistry, bucket->bucketId, initializeStateFn); - if (conflicts) { - return {ErrorCodes::WriteConflict, "Bucket may be stale"}; + // Forward the WriteConflict if the bucket has been cleared or has a pending direct write. + if (!status.isOK()) { + return status; } // If this bucket was archived, we need to remove it from the set of archived buckets. @@ -490,23 +532,11 @@ StatusWith> reuseExistingBucket(BucketCatalog& ca Bucket& existingBucket, std::uint64_t targetEra) { // If we have an existing bucket, passing the Bucket* will let us check if the bucket was - // cleared as part of a set since the last time it was used. If we were to just check by - // OID, we may miss if e.g. there was a move chunk operation. - bool conflicts = false; - auto state = changeBucketState( - catalog.bucketStateRegistry, - &existingBucket, - [targetEra, &conflicts](boost::optional input, - std::uint64_t currentEra) -> boost::optional { - if (targetEra < currentEra || - (input.has_value() && input.value().conflictsWithReopening())) { - conflicts = true; - return input; - } - conflicts = false; - return input.has_value() ? input.value() : BucketState{}; - }); - if (state.has_value() && state.value().isSet(BucketStateFlag::kCleared)) { + // cleared as part of a set since the last time it was used. If we were to just check by OID, we + // may miss if e.g. there was a move chunk operation. + auto state = getBucketState(catalog.bucketStateRegistry, &existingBucket); + invariant(state); + if (isBucketStateCleared(state.value())) { abort(catalog, stripe, stripeLock, @@ -514,9 +544,9 @@ StatusWith> reuseExistingBucket(BucketCatalog& ca nullptr, getTimeseriesBucketClearedError(existingBucket.bucketId.ns, existingBucket.bucketId.oid)); - conflicts = true; - } - if (conflicts) { + return {ErrorCodes::WriteConflict, "Bucket may be stale"}; + } else if (conflictsWithReopening(state.value())) { + // Avoid reusing the bucket if it conflicts with reopening. return {ErrorCodes::WriteConflict, "Bucket may be stale"}; } @@ -631,7 +661,7 @@ StatusWith insert(OperationContext* opCtx, // Buckets are spread across independently-lockable stripes to improve parallelism. We map a // bucket to a stripe by hashing the BucketKey. - auto stripeNumber = getStripeNumber(key); + auto stripeNumber = getStripeNumber(key, catalog.numberOfStripes); InsertResult result; result.catalogEra = getCurrentEra(catalog.bucketStateRegistry); @@ -814,21 +844,27 @@ void removeBucket( switch (mode) { case RemovalMode::kClose: { auto state = getBucketState(catalog.bucketStateRegistry, bucket.bucketId); - invariant(state.has_value()); - invariant(state.value().isSet(BucketStateFlag::kPendingCompression)); + if (feature_flags::gTimeseriesAlwaysUseCompressedBuckets.isEnabled( + serverGlobalParams.featureCompatibility)) { + // When removing a closed bucket, the BucketStateRegistry may contain state for this + // bucket due to an untracked ongoing direct write (such as TTL delete). + if (state.has_value()) { + invariant(stdx::holds_alternative(state.value()), + bucketStateToString(*state)); + invariant(stdx::get(state.value()) < 0, + bucketStateToString(*state)); + } + } else { + // Ensure that we are in a state of pending compression (represented by a negative + // direct write counter). + invariant(state.has_value()); + invariant(stdx::holds_alternative(state.value())); + invariant(stdx::get(state.value()) < 0); + } break; } case RemovalMode::kAbort: - changeBucketState(catalog.bucketStateRegistry, - bucket.bucketId, - [](boost::optional input, - std::uint64_t) -> boost::optional { - invariant(input.has_value()); - if (input->conflictsWithReopening()) { - return input.value().setFlag(BucketStateFlag::kUntracked); - } - return boost::none; - }); + stopTrackingBucketState(catalog.bucketStateRegistry, bucket.bucketId); break; case RemovalMode::kArchive: // No state change @@ -895,21 +931,15 @@ boost::optional findArchivedCandidate(BucketCatalog& catalog, // We need to make sure our measurement can fit without violating max span. If not, we // can't use this bucket. if (info.time - candidateTime < Seconds(*info.options.getBucketMaxSpanSeconds())) { - auto state = getBucketState(catalog.bucketStateRegistry, candidateBucket.bucketId); - if (state && !state.value().conflictsWithReopening()) { + auto bucketState = getBucketState(catalog.bucketStateRegistry, candidateBucket.bucketId); + if (bucketState && !conflictsWithReopening(bucketState.value())) { return candidateBucket.bucketId.oid; } else { - if (state) { - changeBucketState(catalog.bucketStateRegistry, - candidateBucket.bucketId, - [](boost::optional input, - std::uint64_t) -> boost::optional { - if (!input.has_value()) { - return boost::none; - } - invariant(input.value().conflictsWithReopening()); - return input.value().setFlag(BucketStateFlag::kUntracked); - }); + if (bucketState) { + // If the bucket is represented by a state in the registry, it conflicts with + // reopening so we can mark it as untracked to drop the state once the directWrite + // finishes. + stopTrackingBucketState(catalog.bucketStateRegistry, candidateBucket.bucketId); } long long memory = marginalMemoryUsageForArchivedBucket(candidateBucket, archivedSet.size() == 1); @@ -1015,13 +1045,7 @@ void abort(BucketCatalog& catalog, if (doRemove) { removeBucket(catalog, stripe, stripeLock, bucket, RemovalMode::kAbort); } else { - changeBucketState( - catalog.bucketStateRegistry, - bucket.bucketId, - [](boost::optional input, std::uint64_t) -> boost::optional { - invariant(input.has_value()); - return input.value().setFlag(BucketStateFlag::kCleared); - }); + clearBucketState(catalog.bucketStateRegistry, bucket.bucketId); } } @@ -1056,11 +1080,11 @@ void expireIdleBuckets(BucketCatalog& catalog, Bucket* bucket = stripe.idleBuckets.back(); auto state = getBucketState(catalog.bucketStateRegistry, bucket); - if (canArchive && state && !state.value().conflictsWithInsertion()) { + if (canArchive && state && !conflictsWithInsertions(state.value())) { // Can archive a bucket if it's still eligible for insertions. archiveBucket(catalog, stripe, stripeLock, *bucket, closedBuckets); stats.incNumBucketsArchivedDueToMemoryThreshold(); - } else if (state && state.value().isSet(BucketStateFlag::kCleared)) { + } else if (state && isBucketStateCleared(state.value())) { // Bucket was cleared and just needs to be removed from catalog. removeBucket(catalog, stripe, stripeLock, *bucket, RemovalMode::kAbort); } else { @@ -1098,7 +1122,7 @@ void expireIdleBuckets(BucketCatalog& catalog, } std::pair generateBucketOID(const Date_t& time, const TimeseriesOptions& options) { - OID oid = OID::gen(); + OID oid; // We round the measurement timestamp down to the nearest minute, hour, or day depending on the // granularity. We do this for two reasons. The first is so that if measurements come in @@ -1110,28 +1134,29 @@ std::pair generateBucketOID(const Date_t& time, const TimeseriesOpt int64_t const roundedSeconds = durationCount(roundedTime.toDurationSinceEpoch()); oid.setTimestamp(roundedSeconds); - // Now, if we stopped here we could end up with bucket OID collisions. Consider the case where - // we have the granularity set to 'Hours'. This means we will round down to the nearest day, so - // any bucket generated on the same machine on the same day will have the same timestamp portion - // and unique instance portion of the OID. Only the increment will differ. Since we only use 3 - // bytes for the increment portion, we run a serious risk of overflow if we are generating lots - // of buckets. + // Now, if we used the standard OID generation method for the remaining bytes we could end up + // with lots of bucket OID collisions. Consider the case where we have the granularity set to + // 'Hours'. This means we will round down to the nearest day, so any bucket generated on the + // same machine on the same day will have the same timestamp portion and unique instance portion + // of the OID. Only the increment would differ. Since we only use 3 bytes for the increment + // portion, we run a serious risk of overflow if we are generating lots of buckets. // - // To address this, we'll take the difference between the actual timestamp and the rounded - // timestamp and add it to the instance portion of the OID to ensure we can't have a collision. - // for timestamps generated on the same machine. - // - // This leaves open the possibility that in the case of step-down/step-up, we could get a - // collision if the old primary and the new primary have unique instance bits that differ by - // less than the maximum rounding difference. This is quite unlikely though, and can be resolved - // by restarting the new primary. It remains an open question whether we can fix this in a - // better way. - // TODO (SERVER-61412): Avoid time-series bucket OID collisions after election - auto instance = oid.getInstanceUnique(); - uint32_t sum = DataView(reinterpret_cast(instance.bytes)).read(1) + - (durationCount(time.toDurationSinceEpoch()) - roundedSeconds); - DataView(reinterpret_cast(instance.bytes)).write(sum, 1); + // To address this, we'll instead use a PRNG to generate the rest of the bytes. With 8 bytes of + // randomness, we should have a pretty low chance of collisions. The limit of the birthday + // paradox converges to roughly the square root of the size of the space, so we would need a few + // billion buckets with the same timestamp to expect collisions. In the rare case that we do get + // a collision, we can (and do) simply regenerate the bucket _id at a higher level. + OID::InstanceUnique instance; + OID::Increment increment; + { + // We need to serialize access to '_bucketIdGenPRNG' since this instance is shared between + // all bucket_catalog operations, and not protected by the catalog or stripe locks. + stdx::unique_lock lk{_bucketIdGenLock}; + _bucketIdGenPRNG.fill(instance.bytes, OID::kInstanceUniqueSize); + _bucketIdGenPRNG.fill(increment.bytes, OID::kIncrementSize); + } oid.setInstanceUnique(instance); + oid.setIncrement(increment); return {oid, roundedTime}; } @@ -1169,16 +1194,14 @@ Bucket& allocateBucket(BucketCatalog& catalog, Bucket* bucket = it->second.get(); stripe.openBucketsByKey[info.key].emplace(bucket); - auto state = changeBucketState( - catalog.bucketStateRegistry, - it->first, - [](boost::optional input, std::uint64_t) -> boost::optional { - invariant(!input.has_value()); - return BucketState{}; - }); - invariant(state == BucketState{}); - catalog.numberOfActiveBuckets.fetchAndAdd(1); + auto status = initializeBucketState(catalog.bucketStateRegistry, bucket->bucketId); + if (!status.isOK()) { + stripe.openBucketsByKey[info.key].erase(bucket); + stripe.openBucketsById.erase(it); + throwWriteConflictException(status.reason()); + } + catalog.numberOfActiveBuckets.fetchAndAdd(1); if (info.openedDuetoMetadata) { info.stats.incNumBucketsOpenedDueToMetadata(); } @@ -1339,8 +1362,8 @@ std::shared_ptr getExecutionStats(const BucketCatalog& catalog, Status getTimeseriesBucketClearedError(const NamespaceString& ns, const OID& oid) { return {ErrorCodes::TimeseriesBucketCleared, - str::stream() << "Time-series bucket " << oid << " for namespace " << ns - << " was cleared"}; + str::stream() << "Time-series bucket " << oid << " for namespace " + << ns.toStringForErrorMsg() << " was cleared"}; } void closeOpenBucket(BucketCatalog& catalog, @@ -1348,6 +1371,15 @@ void closeOpenBucket(BucketCatalog& catalog, WithLock stripeLock, Bucket& bucket, ClosedBuckets& closedBuckets) { + if (feature_flags::gTimeseriesAlwaysUseCompressedBuckets.isEnabled( + serverGlobalParams.featureCompatibility)) { + // Remove the bucket from the bucket state registry. + stopTrackingBucketState(catalog.bucketStateRegistry, bucket.bucketId); + + removeBucket(catalog, stripe, stripeLock, bucket, RemovalMode::kClose); + return; + } + bool error = false; try { closedBuckets.emplace_back(&catalog.bucketStateRegistry, @@ -1366,6 +1398,15 @@ void closeOpenBucket(BucketCatalog& catalog, WithLock stripeLock, Bucket& bucket, boost::optional& closedBucket) { + if (feature_flags::gTimeseriesAlwaysUseCompressedBuckets.isEnabled( + serverGlobalParams.featureCompatibility)) { + // Remove the bucket from the bucket state registry. + stopTrackingBucketState(catalog.bucketStateRegistry, bucket.bucketId); + + removeBucket(catalog, stripe, stripeLock, bucket, RemovalMode::kClose); + return; + } + bool error = false; try { closedBucket = boost::in_place(&catalog.bucketStateRegistry, @@ -1383,6 +1424,13 @@ void closeOpenBucket(BucketCatalog& catalog, void closeArchivedBucket(BucketStateRegistry& registry, ArchivedBucket& bucket, ClosedBuckets& closedBuckets) { + if (feature_flags::gTimeseriesAlwaysUseCompressedBuckets.isEnabled( + serverGlobalParams.featureCompatibility)) { + // Remove the bucket from the bucket state registry. + stopTrackingBucketState(registry, bucket.bucketId); + return; + } + try { closedBuckets.emplace_back(®istry, bucket.bucketId, bucket.timeField, boost::none); } catch (...) { diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_internal.h b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_internal.h index 4b5c62f72f089..152a041c5f288 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_internal.h +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_internal.h @@ -29,7 +29,35 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/timeseries/bucket_catalog/bucket.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_identifiers.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_state_registry.h" +#include "mongo/db/timeseries/bucket_catalog/closed_bucket.h" +#include "mongo/db/timeseries/bucket_catalog/execution_stats.h" +#include "mongo/db/timeseries/bucket_catalog/reopening.h" +#include "mongo/db/timeseries/bucket_catalog/rollover.h" +#include "mongo/db/timeseries/bucket_catalog/write_batch.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/time_support.h" namespace mongo::timeseries::bucket_catalog::internal { @@ -69,10 +97,15 @@ enum class RemovalMode { */ enum class IgnoreBucketState { kYes, kNo }; +/** + * Mode enum to control whether we prepare or unprepare a bucket. + */ +enum class BucketPrepareAction { kPrepare, kUnprepare }; + /** * Maps bucket key to the stripe that is responsible for it. */ -StripeNumber getStripeNumber(const BucketKey& key); +StripeNumber getStripeNumber(const BucketKey& key, size_t numberOfStripes); /** * Extracts the information from the input 'doc' that is used to map the document to a bucket. @@ -102,13 +135,13 @@ Bucket* useBucket(BucketStateRegistry& registry, IgnoreBucketState mode); /** - * Retrieve a bucket for write use, updating the state in the process. + * Retrieve a bucket for write use and prepare/unprepare the 'BucketState'. */ -Bucket* useBucketAndChangeState(BucketStateRegistry& registry, - Stripe& stripe, - WithLock stripeLock, - const BucketId& bucketId, - const BucketStateRegistry::StateChangeFn& change); +Bucket* useBucketAndChangePreparedState(BucketStateRegistry& registry, + Stripe& stripe, + WithLock stripeLock, + const BucketId& bucketId, + BucketPrepareAction prepare); /** * Retrieve the open bucket for write use if one exists. If none exists and 'mode' is set to kYes, diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_server_status.cpp b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_server_status.cpp index e747230a970a9..567d10deeb54e 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_server_status.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_server_status.cpp @@ -27,8 +27,19 @@ * it in the license file. */ +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_state_registry.h" +#include "mongo/db/timeseries/bucket_catalog/execution_stats.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/unordered_map.h" namespace mongo::timeseries::bucket_catalog { namespace { diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_test.cpp b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_test.cpp index 1ef04fb4cab6c..3c09c655625f1 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_test.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_catalog_test.cpp @@ -27,22 +27,48 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/catalog/catalog_test_fixture.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog_internal.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_metadata.h" #include "mongo/db/timeseries/bucket_compression.h" #include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/stdx/future.h" +#include "mongo/stdx/thread.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" -#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/debug_util.h" #include "mongo/util/fail_point.h" #include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo::timeseries::bucket_catalog { namespace { @@ -109,6 +135,20 @@ class BucketCatalogWithoutMetadataTest : public BucketCatalogTest { BSONObj _makeTimeseriesOptionsForCreate() const override; }; +class BucketCatalogInMultitenancyEnv : public BucketCatalogTest { +protected: + void setUp() override; + +private: + boost::optional __multitenancyController; + +protected: + NamespaceString _tenant1Ns1 = + NamespaceString::createNamespaceString_forTest({TenantId(OID::gen())}, "db1", "coll1"); + NamespaceString _tenant2Ns1 = + NamespaceString::createNamespaceString_forTest({TenantId(OID::gen())}, "db1", "coll1"); +}; + void BucketCatalogTest::setUp() { CatalogTestFixture::setUp(); @@ -123,6 +163,21 @@ void BucketCatalogTest::setUp() { } } +void BucketCatalogInMultitenancyEnv::setUp() { + __multitenancyController.emplace("multitenancySupport", true); + CatalogTestFixture::setUp(); + + _opCtx = operationContext(); + _bucketCatalog = &BucketCatalog::get(_opCtx); + + for (const auto& ns : {_tenant1Ns1, _tenant2Ns1}) { + ASSERT_OK(createCollection( + _opCtx, + ns.dbName(), + BSON("create" << ns.coll() << "timeseries" << _makeTimeseriesOptionsForCreate()))); + } +} + BucketCatalogTest::RunBackgroundTaskAndWaitForFailpoint::RunBackgroundTaskAndWaitForFailpoint( const std::string& failpointName, std::function&& fn) { auto fp = globalFailPointRegistry().find(failpointName); @@ -244,7 +299,7 @@ Status BucketCatalogTest::_reopenBucket(const CollectionPtr& coll, const BSONObj const boost::optional options = coll->getTimeseriesOptions(); invariant(options, str::stream() << "Attempting to reopen a bucket for a non-timeseries collection: " - << ns); + << ns.toStringForErrorMsg()); BSONElement metadata; auto metaFieldName = options->getMetaField(); @@ -272,7 +327,7 @@ Status BucketCatalogTest::_reopenBucket(const CollectionPtr& coll, const BSONObj } auto bucket = std::move(res.getValue()); - auto stripeNumber = internal::getStripeNumber(key); + auto stripeNumber = internal::getStripeNumber(key, _bucketCatalog->numberOfStripes); // Register the reopened bucket with the catalog. auto& stripe = _bucketCatalog->stripes[stripeNumber]; @@ -395,6 +450,44 @@ TEST_F(BucketCatalogTest, InsertIntoDifferentBuckets) { } } +TEST_F(BucketCatalogTest, InsertThroughDifferentCatalogsIntoDifferentBuckets) { + BucketCatalog temporaryBucketCatalog(/*numberOfStripes=*/1); + auto result1 = insert(_opCtx, + *_bucketCatalog, + _ns1, + _getCollator(_ns1), + _getTimeseriesOptions(_ns1), + BSON(_timeField << Date_t::now()), + CombineWithInsertsFromOtherClients::kAllow); + auto batch1 = result1.getValue().batch; + auto result2 = insert(_opCtx, + temporaryBucketCatalog, + _ns1, + _getCollator(_ns1), + _getTimeseriesOptions(_ns1), + BSON(_timeField << Date_t::now()), + CombineWithInsertsFromOtherClients::kAllow); + auto batch2 = result2.getValue().batch; + + // Inserts should be into different buckets (and therefore batches) because they went through + // different bucket catalogs. + ASSERT_NE(batch1, batch2); + + // Committing one bucket should only return the one document in that bucket and should not + // affect the other bucket. + ASSERT(claimWriteBatchCommitRights(*batch1)); + ASSERT_OK(prepareCommit(*_bucketCatalog, batch1)); + ASSERT_EQ(batch1->measurements.size(), 1); + ASSERT_EQ(batch1->numPreviouslyCommittedMeasurements, 0); + finish(*_bucketCatalog, batch1, {}); + + ASSERT(claimWriteBatchCommitRights(*batch2)); + ASSERT_OK(prepareCommit(temporaryBucketCatalog, batch2)); + ASSERT_EQ(batch2->measurements.size(), 1); + ASSERT_EQ(batch2->numPreviouslyCommittedMeasurements, 0); + finish(temporaryBucketCatalog, batch2, {}); +} + TEST_F(BucketCatalogTest, InsertIntoSameBucketArray) { auto result1 = insert( _opCtx, @@ -548,13 +641,27 @@ TEST_F(BucketCatalogTest, ClearDatabaseBuckets) { _insertOneAndCommit(_ns2, 0); _insertOneAndCommit(_ns3, 0); - clear(*_bucketCatalog, _ns1.db()); + clear(*_bucketCatalog, _ns1.dbName()); _insertOneAndCommit(_ns1, 0); _insertOneAndCommit(_ns2, 0); _insertOneAndCommit(_ns3, 1); } +TEST_F(BucketCatalogInMultitenancyEnv, ClearDatabaseBuckets) { + _insertOneAndCommit(_tenant1Ns1, 0); + _insertOneAndCommit(_tenant2Ns1, 0); + + // Clear the buckets for the database of tenant1. + clear(*_bucketCatalog, _tenant1Ns1.dbName()); + _insertOneAndCommit(_tenant1Ns1, 0); + _insertOneAndCommit(_tenant2Ns1, 1); + + // Clear the buckets for the database of tenant2. + clear(*_bucketCatalog, _tenant2Ns1.dbName()); + _insertOneAndCommit(_tenant2Ns1, 0); +} + TEST_F(BucketCatalogTest, InsertBetweenPrepareAndFinish) { auto batch1 = insert(_opCtx, *_bucketCatalog, @@ -833,7 +940,7 @@ TEST_F(BucketCatalogTest, PrepareCommitOnClearedBatchWithAlreadyPreparedBatch) { ASSERT_EQ(batch1->bucketHandle.bucketId, batch2->bucketHandle.bucketId); // Now clear the bucket. Since there's a prepared batch it should conflict. - clear(*_bucketCatalog, _ns1); + clearBucketState(_bucketCatalog->bucketStateRegistry, batch1->bucketHandle.bucketId); // Now try to prepare the second batch. Ensure it aborts the batch. ASSERT(claimWriteBatchCommitRights(*batch2)); @@ -1038,6 +1145,7 @@ TEST_F(BucketCatalogTest, AbortingBatchEnsuresBucketIsEventuallyClosed) { // Wait for the batch 2 task to finish preparing commit. Since batch 1 finished, batch 2 should // be unblocked. Note that after aborting batch 3, batch 2 was not in a prepared state, so we // expect the prepareCommit() call to fail. + ASSERT_NOT_OK(prepareCommit(*_bucketCatalog, batch2)); ASSERT(isWriteBatchFinished(*batch2)); // Make sure a new batch ends up in a new bucket. diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_identifiers.cpp b/src/mongo/db/timeseries/bucket_catalog/bucket_identifiers.cpp index c9d82ead0c176..293c9ff05e515 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_identifiers.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_identifiers.cpp @@ -29,6 +29,8 @@ #include "mongo/db/timeseries/bucket_catalog/bucket_identifiers.h" +#include + namespace mongo::timeseries::bucket_catalog { BucketId::BucketId(const NamespaceString& n, const OID& o) diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_identifiers.h b/src/mongo/db/timeseries/bucket_catalog/bucket_identifiers.h index 8da310377bd79..4ee6707dc6188 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_identifiers.h +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_identifiers.h @@ -29,6 +29,7 @@ #pragma once +#include #include #include "mongo/bson/oid.h" diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_metadata.cpp b/src/mongo/db/timeseries/bucket_catalog/bucket_metadata.cpp index ecf07a484ed9c..8f5442165ac6e 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_metadata.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_metadata.cpp @@ -29,7 +29,12 @@ #include "mongo/db/timeseries/bucket_catalog/bucket_metadata.h" +#include + +#include + #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.h" namespace mongo::timeseries::bucket_catalog { diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_metadata.h b/src/mongo/db/timeseries/bucket_catalog/bucket_metadata.h index 6f68df7181911..4ea2fbb4a90ce 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_metadata.h +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_metadata.h @@ -32,6 +32,8 @@ #include #include +#include + #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_state.cpp b/src/mongo/db/timeseries/bucket_catalog/bucket_state.cpp deleted file mode 100644 index 73a3fcc86a4cd..0000000000000 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_state.cpp +++ /dev/null @@ -1,140 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/timeseries/bucket_catalog/bucket_state.h" - -#include "mongo/util/str.h" - -namespace mongo::timeseries::bucket_catalog { - -BucketState& BucketState::setFlag(BucketStateFlag flag) { - _state |= static_cast(flag); - return *this; -} - -BucketState& BucketState::unsetFlag(BucketStateFlag flag) { - _state &= ~static_cast(flag); - return *this; -} - -BucketState& BucketState::addDirectWrite() { - // Track the number of DirectWrites on the Bucket so we can properly unset the flag later on. - _numberOfDirectWrites++; - return setFlag(BucketStateFlag::kPendingDirectWrite); -} - -BucketState& BucketState::removeDirectWrite() { - invariant(isSet(BucketStateFlag::kPendingDirectWrite)); - - // We only unset the 'kPendingDirectWrite' flag when the number of direct writers reaches 0. - _numberOfDirectWrites--; - if (_numberOfDirectWrites > 0) { - return *this; - } - - // The last pending direct write must set the 'kCleared' flag. - return unsetFlag(BucketStateFlag::kPendingDirectWrite).setFlag(BucketStateFlag::kCleared); -} - -BucketState& BucketState::reset() { - _state = 0; - _numberOfDirectWrites = 0; - return *this; -} - -int32_t BucketState::getNumberOfDirectWrites() const { - return _numberOfDirectWrites; -} - -bool BucketState::isSet(BucketStateFlag flag) const { - return _state & static_cast(flag); -} - -bool BucketState::isPrepared() const { - constexpr decltype(_state) mask = static_cast(BucketStateFlag::kPrepared); - return _state & mask; -} - -bool BucketState::conflictsWithReopening() const { - constexpr decltype(_state) mask = - static_cast(BucketStateFlag::kPendingCompression) | - static_cast(BucketStateFlag::kPendingDirectWrite); - return _state & mask; -} - -bool BucketState::conflictsWithInsertion() const { - constexpr decltype(_state) mask = static_cast(BucketStateFlag::kCleared) | - static_cast(BucketStateFlag::kPendingCompression) | - static_cast(BucketStateFlag::kPendingDirectWrite); - return _state & mask; -} - -bool BucketState::operator==(const BucketState& other) const { - return _state == other._state; -} - -std::string BucketState::toString() const { - str::stream str; - str << "["; - - bool first = true; - auto output = [&first, &str](std::string name) { - if (first) { - first = false; - } else { - str << ", "; - } - str << name; - }; - - if (isSet(BucketStateFlag::kPrepared)) { - output("prepared"); - } - - if (isSet(BucketStateFlag::kCleared)) { - output("cleared"); - } - - if (isSet(BucketStateFlag::kPendingCompression)) { - output("pendingCompression"); - } - - if (isSet(BucketStateFlag::kPendingDirectWrite)) { - output("pendingDirectWrite(count=" + std::to_string(_numberOfDirectWrites) + ")"); - } - - if (isSet(BucketStateFlag::kUntracked)) { - output("untracked"); - } - - str << "]"; - return str; -} - -} // namespace mongo::timeseries::bucket_catalog diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_state.h b/src/mongo/db/timeseries/bucket_catalog/bucket_state.h deleted file mode 100644 index f8976ddac5c5f..0000000000000 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_state.h +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include -#include - -namespace mongo::timeseries::bucket_catalog { - -enum class BucketStateFlag : std::uint8_t { - // Bucket has a prepared batch outstanding. - kPrepared = 0b00000001, - // In-memory representation of the bucket may be out of sync with on-disk data. Bucket - // should not be inserted into. - kCleared = 0b00000010, - // Bucket is effectively closed, but has an outstanding compression operation pending, so it - // is also not eligible for reopening. - kPendingCompression = 0b00000100, - // Bucket is effectively closed, but has an outstanding direct write pending, so it is also - // not eligible for reopening. - kPendingDirectWrite = 0b00001000, - // Bucket state is stored in the catalog for synchronization purposes only, but the actual - // bucket isn't stored in the catalog, nor is it archived. - kUntracked = 0b00010000, -}; - -class BucketState { -public: - BucketState& setFlag(BucketStateFlag); - BucketState& unsetFlag(BucketStateFlag); - BucketState& addDirectWrite(); - BucketState& removeDirectWrite(); - BucketState& reset(); - - int32_t getNumberOfDirectWrites() const; - - bool isSet(BucketStateFlag) const; - bool isPrepared() const; - bool conflictsWithReopening() const; - bool conflictsWithInsertion() const; - - bool operator==(const BucketState&) const; - std::string toString() const; - -private: - std::underlying_type::type _state = 0; - int32_t _numberOfDirectWrites = 0; -}; - -} // namespace mongo::timeseries::bucket_catalog diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_state_registry.cpp b/src/mongo/db/timeseries/bucket_catalog/bucket_state_registry.cpp index 0767a7843b4ae..7234090a55189 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_state_registry.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_state_registry.cpp @@ -29,7 +29,23 @@ #include "mongo/db/timeseries/bucket_catalog/bucket_state_registry.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/timeseries/bucket_catalog/bucket.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/with_lock.h" namespace mongo::timeseries::bucket_catalog { @@ -87,69 +103,16 @@ bool isMemberOfClearedSet(BucketStateRegistry& registry, WithLock lock, Bucket* return false; } -boost::optional changeBucketStateHelper( - BucketStateRegistry& registry, - WithLock lock, - const BucketId& bucketId, - const BucketStateRegistry::StateChangeFn& change) { +void markIndividualBucketCleared(BucketStateRegistry& registry, + WithLock catalogLock, + const BucketId& bucketId) { auto it = registry.bucketStates.find(bucketId); - const boost::optional initial = - (it == registry.bucketStates.end()) ? boost::none : boost::make_optional(it->second); - const boost::optional target = change(initial, registry.currentEra); - - // If we are initiating or finishing a direct write, we need to advance the era. This allows us - // to synchronize with reopening attempts that do not directly observe a state with the - // kPendingDirectWrite flag set, but which nevertheless may be trying to reopen a stale bucket. - if ((target.has_value() && target.value().isSet(BucketStateFlag::kPendingDirectWrite) && - (!initial.has_value() || !initial.value().isSet(BucketStateFlag::kPendingDirectWrite))) || - (initial.has_value() && initial.value().isSet(BucketStateFlag::kPendingDirectWrite) && - (!target.has_value() || !target.value().isSet(BucketStateFlag::kPendingDirectWrite)))) { - ++registry.currentEra; - } - - // If initial and target are not both set, then we are either initializing or erasing the state. - if (!target.has_value()) { - if (initial.has_value()) { - registry.bucketStates.erase(it); - } - return boost::none; - } else if (!initial.has_value()) { - registry.bucketStates.emplace(bucketId, target.value()); - return target; - } - - // At this point we can now assume that both initial and target are set. - - // We cannot prepare a bucket that isn't eligible for insertions. We expect to attempt this when - // we try to prepare a batch on a bucket that's been recently cleared. - if (!initial.value().isPrepared() && target.value().isPrepared() && - initial.value().conflictsWithInsertion()) { - return initial; + if (it == registry.bucketStates.end() || + stdx::holds_alternative(it->second)) { + return; } - - // We cannot transition from a prepared state to pending compression, as that would indicate a - // programmer error. - invariant(!initial.value().isPrepared() || - !target.value().isSet(BucketStateFlag::kPendingCompression)); - - it->second = target.value(); - - return target; -} - -boost::optional markIndividualBucketCleared(BucketStateRegistry& registry, - WithLock lock, - const BucketId& bucketId) { - return changeBucketStateHelper( - registry, - lock, - bucketId, - [](boost::optional input, std::uint64_t) -> boost::optional { - if (!input.has_value()) { - return boost::none; - } - return input.value().setFlag(BucketStateFlag::kCleared); - }); + it->second = (isBucketStatePrepared(it->second)) ? BucketState::kPreparedAndCleared + : BucketState::kCleared; } } // namespace @@ -191,39 +154,229 @@ std::uint64_t getClearedSetsCount(const BucketStateRegistry& registry) { return registry.clearedSets.size(); } -boost::optional getBucketState(BucketStateRegistry& registry, Bucket* bucket) { +boost::optional> getBucketState( + BucketStateRegistry& registry, Bucket* bucket) { stdx::lock_guard catalogLock{registry.mutex}; + // If the bucket has been cleared, we will set the bucket state accordingly to reflect that. if (isMemberOfClearedSet(registry, catalogLock, bucket)) { - return markIndividualBucketCleared(registry, catalogLock, bucket->bucketId); + markIndividualBucketCleared(registry, catalogLock, bucket->bucketId); } + auto it = registry.bucketStates.find(bucket->bucketId); - return it != registry.bucketStates.end() ? boost::make_optional(it->second) : boost::none; + if (it == registry.bucketStates.end()) { + return boost::none; + } + + return it->second; } -boost::optional getBucketState(const BucketStateRegistry& registry, - const BucketId& bucketId) { +boost::optional> getBucketState( + BucketStateRegistry& registry, const BucketId& bucketId) { stdx::lock_guard catalogLock{registry.mutex}; + auto it = registry.bucketStates.find(bucketId); - return it != registry.bucketStates.end() ? boost::make_optional(it->second) : boost::none; + if (it == registry.bucketStates.end()) { + return boost::none; + } + + return it->second; } -boost::optional changeBucketState(BucketStateRegistry& registry, - Bucket* bucket, - const BucketStateRegistry::StateChangeFn& change) { +bool isBucketStateCleared(stdx::variant& state) { + if (auto* bucketState = stdx::get_if(&state)) { + return *bucketState == BucketState::kCleared || + *bucketState == BucketState::kPreparedAndCleared; + } + return false; +} + +bool isBucketStatePrepared(stdx::variant& state) { + if (auto* bucketState = stdx::get_if(&state)) { + return *bucketState == BucketState::kPrepared || + *bucketState == BucketState::kPreparedAndCleared; + } + return false; +} + +bool conflictsWithReopening(stdx::variant& state) { + return stdx::holds_alternative(state); +} + +bool conflictsWithInsertions(stdx::variant& state) { + return conflictsWithReopening(state) || isBucketStateCleared(state); +} + +Status initializeBucketState(BucketStateRegistry& registry, + const BucketId& bucketId, + Bucket* bucket, + boost::optional targetEra) { stdx::lock_guard catalogLock{registry.mutex}; - if (isMemberOfClearedSet(registry, catalogLock, bucket)) { - return markIndividualBucketCleared(registry, catalogLock, bucket->bucketId); + + // Returns a WriteConflict error if the target Era is older than the registry Era or if the + // 'bucket' is cleared. + if (targetEra.has_value() && targetEra < registry.currentEra) { + return {ErrorCodes::WriteConflict, "Bucket may be stale"}; + } else if (bucket && isMemberOfClearedSet(registry, catalogLock, bucket)) { + markIndividualBucketCleared(registry, catalogLock, bucketId); + return {ErrorCodes::WriteConflict, "Bucket may be stale"}; } - return changeBucketStateHelper(registry, catalogLock, bucket->bucketId, change); + auto it = registry.bucketStates.find(bucketId); + if (it == registry.bucketStates.end()) { + registry.bucketStates.emplace(bucketId, BucketState::kNormal); + return Status::OK(); + } else if (conflictsWithReopening(it->second)) { + // If the bucket is cleared or we are currently performing direct writes on it we cannot + // initialize the bucket to a normal state. + return {ErrorCodes::WriteConflict, + "Bucket initialization failed: conflict with an exisiting bucket"}; + } + + invariant(!isBucketStatePrepared(it->second)); + it->second = BucketState::kNormal; + + return Status::OK(); } -boost::optional changeBucketState(BucketStateRegistry& registry, - const BucketId& bucketId, - const BucketStateRegistry::StateChangeFn& change) { +StateChangeSucessful prepareBucketState(BucketStateRegistry& registry, + const BucketId& bucketId, + Bucket* bucket) { stdx::lock_guard catalogLock{registry.mutex}; - return changeBucketStateHelper(registry, catalogLock, bucketId, change); + + if (bucket && isMemberOfClearedSet(registry, catalogLock, bucket)) { + markIndividualBucketCleared(registry, catalogLock, bucketId); + return StateChangeSucessful::kNo; + } + + auto it = registry.bucketStates.find(bucketId); + invariant(it != registry.bucketStates.end()); + + // We cannot update the bucket if it is in a cleared state or has a pending direct write. + if (conflictsWithInsertions(it->second)) { + return StateChangeSucessful::kNo; + } + + // We cannot prepare an already prepared bucket. + invariant(!isBucketStatePrepared(it->second)); + + it->second = BucketState::kPrepared; + return StateChangeSucessful::kYes; +} + +StateChangeSucessful unprepareBucketState(BucketStateRegistry& registry, + const BucketId& bucketId, + Bucket* bucket) { + stdx::lock_guard catalogLock{registry.mutex}; + + if (bucket && isMemberOfClearedSet(registry, catalogLock, bucket)) { + markIndividualBucketCleared(registry, catalogLock, bucketId); + return StateChangeSucessful::kNo; + } + + auto it = registry.bucketStates.find(bucketId); + invariant(it != registry.bucketStates.end() && + stdx::holds_alternative(it->second)); + invariant(isBucketStatePrepared(it->second)); + + auto bucketState = stdx::get(it->second); + // There is also a chance the state got cleared, in which case we should keep the state as + // 'kCleared'. + it->second = (bucketState == BucketState::kPreparedAndCleared) ? BucketState::kCleared + : BucketState::kNormal; + return StateChangeSucessful::kYes; +} + +stdx::variant addDirectWrite(BucketStateRegistry& registry, + const BucketId& bucketId, + bool stopTracking) { + stdx::lock_guard catalogLock{registry.mutex}; + + auto it = registry.bucketStates.find(bucketId); + DirectWriteCounter newDirectWriteCount = 1; + if (it == registry.bucketStates.end()) { + // If we are initiating a direct write, we need to advance the era. This allows us to + // synchronize with reopening attempts that do not directly observe a state with direct + // write counter, but which nevertheless may be trying to reopen a stale bucket. + ++registry.currentEra; + + // We can perform direct writes on buckets not being tracked by the registry. Tracked by a + // negative value to signify we must delete the state from the 'registry' when the counter + // reaches 0. + newDirectWriteCount *= -1; + registry.bucketStates.emplace(bucketId, newDirectWriteCount); + return newDirectWriteCount; + } else if (auto* directWriteCount = stdx::get_if(&it->second)) { + if (*directWriteCount > 0) { + newDirectWriteCount = *directWriteCount + 1; + } else { + newDirectWriteCount = *directWriteCount - 1; + } + } else if (isBucketStatePrepared(it->second)) { + // Cannot perform direct writes on prepared buckets. + return it->second; + } + + // Convert the direct write counter to a negative value so we can interpret it as an untracked + // state when the counter goes to 0. + if (stopTracking && newDirectWriteCount > 0) { + newDirectWriteCount *= -1; + } + it->second = newDirectWriteCount; + return it->second; +} + +void removeDirectWrite(BucketStateRegistry& registry, const BucketId& bucketId) { + stdx::lock_guard catalogLock{registry.mutex}; + + auto it = registry.bucketStates.find(bucketId); + invariant(it != registry.bucketStates.end() && + stdx::holds_alternative(it->second)); + + bool removingFinalDirectWrite = true; + auto directWriteCount = stdx::get(it->second); + if (directWriteCount == 1) { + it->second = BucketState::kCleared; + } else if (directWriteCount == -1) { + registry.bucketStates.erase(it); + } else { + removingFinalDirectWrite = false; + directWriteCount = (directWriteCount > 0) ? directWriteCount - 1 : directWriteCount + 1; + it->second = directWriteCount; + } + + if (removingFinalDirectWrite) { + // If we are finishing a direct write, we need to advance the era. This allows us to + // synchronize with reopening attempts that do not directly observe a state with direct + // write counter, but which nevertheless may be trying to reopen a stale bucket. + ++registry.currentEra; + } +} + +void clearBucketState(BucketStateRegistry& registry, const BucketId& bucketId) { + stdx::lock_guard catalogLock{registry.mutex}; + markIndividualBucketCleared(registry, catalogLock, bucketId); +} + +void stopTrackingBucketState(BucketStateRegistry& registry, const BucketId& bucketId) { + stdx::lock_guard catalogLock{registry.mutex}; + auto it = registry.bucketStates.find(bucketId); + if (it == registry.bucketStates.end()) { + return; + } + + if (conflictsWithReopening(it->second)) { + // We cannot release the bucket state of pending direct writes. + auto directWriteCount = stdx::get(it->second); + if (directWriteCount > 0) { + // A negative value signals the immediate removal of the bucket state after the + // completion of the direct writes. + directWriteCount *= -1; + } + it->second = directWriteCount; + } else { + registry.bucketStates.erase(it); + } } void appendStats(const BucketStateRegistry& registry, BSONObjBuilder& base) { @@ -239,4 +392,29 @@ void appendStats(const BucketStateRegistry& registry, BSONObjBuilder& base) { static_cast(registry.clearedSets.size())); } +std::string bucketStateToString(const stdx::variant& state) { + if (auto* directWriteCount = stdx::get_if(&state)) { + return fmt::format("{{type: DirectWrite, value: {}}}", *directWriteCount); + } + + auto bucketState = stdx::get(state); + switch (bucketState) { + case BucketState::kNormal: { + return "{{type: BucketState, value: kNormal}}"; + } + case BucketState::kPrepared: { + return "{{type: BucketState, value: kPrepared}}"; + } + case BucketState::kCleared: { + return "{{type: BucketState, value: kCleared}}"; + } + case BucketState::kPreparedAndCleared: { + return "{{type: BucketState, value: kPreparedAndCleared}}"; + } + default: { + MONGO_UNREACHABLE; + } + } +} + } // namespace mongo::timeseries::bucket_catalog diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_state_registry.h b/src/mongo/db/timeseries/bucket_catalog/bucket_state_registry.h index 1276fe929b66d..63a3467ec35c5 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_state_registry.h +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_state_registry.h @@ -29,16 +29,72 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/oid.h" #include "mongo/db/namespace_string.h" #include "mongo/db/timeseries/bucket_catalog/bucket_identifiers.h" -#include "mongo/db/timeseries/bucket_catalog/bucket_state.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/hierarchical_acquisition.h" namespace mongo::timeseries::bucket_catalog { struct Bucket; +/** + * Describes if the state within the BucketStateRegistry was successfully changed. + */ +enum class StateChangeSucessful { kYes, kNo }; + +/** + * State Transition Chart: + * {+ = valid transition, INV = invariants, WCE = throws WriteConflictException, nop = no-operation} + * + * | Current State | Tranistion State | + * |--------------------|:---------:|:------:|:-----:|:--------:|:------------------:| + * | | Untracked | Normal | Clear | Prepared | DirectWriteCounter | + * |--------------------|-----------|--------|-------|----------|--------------------| + * | Untracked | nop | + | nop | INV | + | + * | Normal | + | + | + | + | + | + * | Clear | + | + | + | nop | + | + * | Prepared | + | INV | + | INV | no-op | + * | PreparedAndCleared | + | WCE | + | nop | WCE | + * | DirectWriteCounter | nop | WCE | nop | nop | + | + * + * Note: we never explicitly set the 'kPreparedAndCleared' state. + */ +enum class BucketState : uint8_t { + kNormal, // Can accept inserts. + kPrepared, // Can accept inserts, and has an outstanding prepared commit. + kCleared, // Cannot accept inserts as the bucket will soon be removed from the registry. + kPreparedAndCleared // Cannot accept inserts, and has an outstanding prepared commit. This + // state will propogate WriteConflictExceptions to all writers aside from + // the writer who prepared the commit. +}; + +/** + * Writes initiated outside of the BucketCatalog are considered "direct writes" since they are + * operating directly on the 'system.buckets' collection. We must synchronize these writes with the + * BucketCatalog to ensure we don't try to insert into a bucket that is currently being written to. + * We also represent buckets undergoing compression with a DirectWriteCounter. + * + * Note: we cannot perform direct writes on prepared buckets and there can be multiple direct writes + * on the same bucket. Conflicts between multiple simultaneous direct writes are mediated by the + * storage engine. + */ +using DirectWriteCounter = std::int8_t; + /** * A helper struct to hold and synchronize both individual bucket states and global state about the * catalog era used to support asynchronous 'clear' operations. @@ -49,8 +105,6 @@ struct Bucket; struct BucketStateRegistry { using Era = std::uint64_t; using ShouldClearFn = std::function; - using StateChangeFn = - std::function(boost::optional, Era)>; mutable Mutex mutex = MONGO_MAKE_LATCH(HierarchicalAcquisitionLevel(0), "BucketStateRegistry::mutex"); @@ -63,7 +117,8 @@ struct BucketStateRegistry { std::map bucketsPerEra; // Bucket state for synchronization with direct writes. - stdx::unordered_map bucketStates; + stdx::unordered_map, BucketHasher> + bucketStates; // Registry storing 'clearSetOfBuckets' operations. Maps from era to a lambda function which // takes in information about a Bucket and returns whether the Bucket belongs to the cleared @@ -93,44 +148,168 @@ std::uint64_t getClearedSetsCount(const BucketStateRegistry& registry); * Retrieves the bucket state if it is tracked in the catalog. Modifies the bucket state if * the bucket is found to have been cleared. */ -boost::optional getBucketState(BucketStateRegistry& registry, Bucket* bucket); +boost::optional> getBucketState( + BucketStateRegistry& registry, Bucket* bucket); /** * Retrieves the bucket state if it is tracked in the catalog. */ -boost::optional getBucketState(const BucketStateRegistry& registry, - const BucketId& bucketId); +boost::optional> getBucketState( + BucketStateRegistry& registry, const BucketId& bucketId); + +/** + * Returns true if the state is cleared. + */ +bool isBucketStateCleared(stdx::variant& state); + +/** + * Returns true if the state is prepared. + */ +bool isBucketStatePrepared(stdx::variant& state); + +/** + * Returns true if the state conflicts with reopening (aka a direct write). + */ +bool conflictsWithReopening(stdx::variant& state); /** - * Checks whether the bucket has been cleared before changing the bucket state as requested. - * If the bucket has been cleared, it will set the kCleared flag instead and ignore the - * requested 'change'. For more details about how the 'change' is processed, see the other - * variant of this function that takes an 'OID' parameter. + * Returns true if the state conflicts with reopening or is cleared. */ -boost::optional changeBucketState(BucketStateRegistry& registry, - Bucket* bucket, - const BucketStateRegistry::StateChangeFn& change); +bool conflictsWithInsertions(stdx::variant& state); /** - * Changes the bucket state, taking into account the current state, the requested 'change', - * and allowed state transitions. The return value, if set, is the final state of the bucket - * with the given ID. + * Initializes the state of the bucket within the registry to a state of 'kNormal'. If included, + * checks the registry Era against the 'targetEra' prior to performing the initialization to prevent + * operating on a potentially stale bucket. Returns WriteConflict if the current bucket state + * conflicts with reopening. * - * If no state is currently tracked for 'id', then the optional input state to 'change' will - * be 'none'. To initialize the state, 'change' may return a valid `BucketState', and it - * will be added to the set of tracked states. + * | Current State | Result + * |--------------------|----------- + * | Untracked | kNormal + * | Normal | kNormal + * | Clear | kNormal + * | Prepared | invariants + * | PreparedAndCleared | throws WCE + * | DirectWriteCounter | throws WCE + */ +Status initializeBucketState(BucketStateRegistry& registry, + const BucketId& bucketId, + Bucket* bucket = nullptr, + boost::optional targetEra = boost::none); + +/** + * Transitions bucket state to 'kPrepared'. If included, checks if the 'bucket' has been marked as + * cleared prior to performing transition to prevent operating on a potentially stale bucket. + * Returns enum describing if the state change was successful or not. + * + * | Current State | Result + * |--------------------|----------- + * | Untracked | invariants + * | Normal | kPrepared + * | Clear | - + * | Prepared | invariants + * | PreparedAndCleared | - + * | DirectWriteCounter | - + */ +StateChangeSucessful prepareBucketState(BucketStateRegistry& registry, + const BucketId& bucketId, + Bucket* bucket = nullptr); + +/** + * Detransition bucket state from 'kPrepared' to 'kNormal' (or 'kCleared' if the bucket was cleared + * while the bucket was in the 'kPrepared' state). If included, checks if the 'bucket' has been + * marked as cleared prior to performing transition to prevent operating on a potentially stale + * bucket. Returns enum describing if the state change was successful or not. + * + * | Current State | Result + * |--------------------|----------- + * | Untracked | invariants + * | Normal | invariants + * | Clear | invariants + * | Prepared | kNormal + * | PreparedAndCleared | KCleared + * | DirectWriteCounter | invariants + */ +StateChangeSucessful unprepareBucketState(BucketStateRegistry& registry, + const BucketId& bucketId, + Bucket* bucket = nullptr); + +/** + * Tracks the bucket with a counter which is incremented everytime this function is called and must + * be followed by a call to 'removeDirectWrite'. We cannot perform transition on prepared buckets. + * If 'stopTracking' is set, we will erase the bucket from the registry upon finishing all direct + * writes else the bucket will transition to 'kCleared'. + * + * | Current State | Result + * |--------------------|----------------- + * | Untracked | negative count + * | Normal | positive count + * | Clear | positive count + * | Prepared | - + * | PreparedAndCleared | - + * | DirectWriteCounter | increments value + */ +stdx::variant addDirectWrite(BucketStateRegistry& registry, + const BucketId& bucketId, + bool stopTracking = false); + +/** + * Requires the state to be tracked by a counter. The direct write counter can be positive or + * negative which affects the behavior of the state when the counter reaches 0. When positive, we + * decrement the counter and transition the state to 'kCleared' when it reaches 0. When negative, we + * increment the counter and erase the state when we reach 0. + * + * | Current State | Result + * |--------------------|----------------- + * | Untracked | invariants + * | Normal | invariants + * | Clear | invariants + * | Prepared | invariants + * | PreparedAndCleared | invariants + * | DirectWriteCounter | decrements value + */ +void removeDirectWrite(BucketStateRegistry& registry, const BucketId& bucketId); + +/** + * Transitions bucket state to 'kCleared' or 'kPreparedAndCleared'. No action is required for: + * i. buckets not currently being tracked by the registry + * ii. buckets with pending direct writes (since they will either be cleared or removed from the + * registry upon finishing) + * + * | Current State | Result + * |--------------------|-------------------- + * | Untracked | - + * | Normal | kCleared + * | Clear | kCleared + * | Prepared | kPreparedAndCleared + * | PreparedAndCleared | kPreparedAndCleared + * | DirectWriteCounter | - + */ +void clearBucketState(BucketStateRegistry& registry, const BucketId& bucketId); + +/** + * Erases the bucket state from the registry. If there are on-going direct writes, erase the state + * once the writes finish. * - * Similarly, if 'change' returns 'none', the value will be removed from the registry. To - * perform a noop (i.e. if upon inspecting the input, the change would be invalid), 'change' - * may simply return its input state unchanged. + * | Current State | Result + * |--------------------|---------------- + * | Untracked | - + * | Normal | erases entry + * | Clear | erases entry + * | Prepared | erases entry + * | PreparedAndCleared | erases entry + * | DirectWriteCounter | negative value */ -boost::optional changeBucketState(BucketStateRegistry& registry, - const BucketId& bucketId, - const BucketStateRegistry::StateChangeFn& change); +void stopTrackingBucketState(BucketStateRegistry& registry, const BucketId& bucketId); /** * Appends statistics for observability. */ void appendStats(const BucketStateRegistry& registry, BSONObjBuilder& builder); +/** + * Helper to stringify BucketState. + */ +std::string bucketStateToString(const stdx::variant& state); + } // namespace mongo::timeseries::bucket_catalog diff --git a/src/mongo/db/timeseries/bucket_catalog/bucket_state_registry_test.cpp b/src/mongo/db/timeseries/bucket_catalog/bucket_state_registry_test.cpp index cb7ea2c97e55c..7a3647cfb2bdf 100644 --- a/src/mongo/db/timeseries/bucket_catalog/bucket_state_registry_test.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/bucket_state_registry_test.cpp @@ -27,12 +27,41 @@ * it in the license file. */ -#include "mongo/db/storage/storage_parameters_gen.h" +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/oid.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/timeseries/bucket_catalog/bucket.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog_internal.h" -#include "mongo/db/timeseries/bucket_catalog/bucket_state.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_identifiers.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_metadata.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_state_registry.h" +#include "mongo/db/timeseries/bucket_catalog/closed_bucket.h" +#include "mongo/db/timeseries/bucket_catalog/execution_stats.h" +#include "mongo/db/timeseries/bucket_catalog/rollover.h" +#include "mongo/db/timeseries/bucket_catalog/write_batch.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/bson_test_util.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/time_support.h" namespace mongo::timeseries::bucket_catalog { namespace { @@ -48,7 +77,19 @@ class BucketStateRegistryTest : public BucketCatalog, public unittest::Test { bool hasBeenCleared(Bucket& bucket) { auto state = getBucketState(bucketStateRegistry, &bucket); - return (state && state.value().isSet(BucketStateFlag::kCleared)); + if (!state.has_value()) { + return false; + } + + return stdx::visit(OverloadedVisitor{[](BucketState bucketState) { + return bucketState == BucketState::kCleared || + bucketState == + BucketState::kPreparedAndCleared; + }, + [](DirectWriteCounter dwcount) { + return false; + }}, + *state); } Bucket& createBucket(const internal::CreationInfo& info) { @@ -60,7 +101,7 @@ class BucketStateRegistryTest : public BucketCatalog, public unittest::Test { bool cannotAccessBucket(Bucket& bucket) { if (hasBeenCleared(bucket)) { internal::removeBucket(*this, - stripes[internal::getStripeNumber(bucket.key)], + stripes[internal::getStripeNumber(bucket.key, numberOfStripes)], withLock, bucket, internal::RemovalMode::kAbort); @@ -71,25 +112,48 @@ class BucketStateRegistryTest : public BucketCatalog, public unittest::Test { } void checkAndRemoveClearedBucket(Bucket& bucket) { - auto a = internal::findBucket(bucketStateRegistry, - stripes[internal::getStripeNumber(bucket.key)], - withLock, - bucket.bucketId, - internal::IgnoreBucketState::kYes); + auto a = + internal::findBucket(bucketStateRegistry, + stripes[internal::getStripeNumber(bucket.key, numberOfStripes)], + withLock, + bucket.bucketId, + internal::IgnoreBucketState::kYes); ASSERT(a == &bucket); - auto b = internal::findBucket(bucketStateRegistry, - stripes[internal::getStripeNumber(bucket.key)], - withLock, - bucket.bucketId, - internal::IgnoreBucketState::kNo); + auto b = + internal::findBucket(bucketStateRegistry, + stripes[internal::getStripeNumber(bucket.key, numberOfStripes)], + withLock, + bucket.bucketId, + internal::IgnoreBucketState::kNo); ASSERT(b == nullptr); internal::removeBucket(*this, - stripes[internal::getStripeNumber(bucket.key)], + stripes[internal::getStripeNumber(bucket.key, numberOfStripes)], withLock, bucket, internal::RemovalMode::kAbort); } + bool doesBucketStateMatch(const BucketId& bucketId, + boost::optional expectedBucketState) { + auto state = getBucketState(bucketStateRegistry, bucketId); + if (!state.has_value()) { + // We don't expect the bucket to be tracked within the BucketStateRegistry. + return !expectedBucketState.has_value(); + } else if (stdx::holds_alternative(*state)) { + // If the state is tracked by a direct write counter, then the states are not equal. + return false; + } + + // Interpret the variant value as BucketState and check it against the expected value. + auto bucketState = stdx::get(*state); + return bucketState == expectedBucketState.value(); + } + + bool doesBucketHaveDirectWrite(const BucketId& bucketId) { + auto state = getBucketState(bucketStateRegistry, bucketId); + return state.has_value() && stdx::holds_alternative(*state); + } + WithLock withLock = WithLock::withoutLock(); NamespaceString ns1 = NamespaceString::createNamespaceString_forTest("db.test1"); NamespaceString ns2 = NamespaceString::createNamespaceString_forTest("db.test2"); @@ -103,213 +167,255 @@ class BucketStateRegistryTest : public BucketCatalog, public unittest::Test { TimeseriesOptions options; ExecutionStatsController stats = internal::getOrInitializeExecutionStats(*this, ns1); ClosedBuckets closedBuckets; - internal::CreationInfo info1{ - bucketKey1, internal::getStripeNumber(bucketKey1), date, options, stats, &closedBuckets}; - internal::CreationInfo info2{ - bucketKey2, internal::getStripeNumber(bucketKey2), date, options, stats, &closedBuckets}; - internal::CreationInfo info3{ - bucketKey3, internal::getStripeNumber(bucketKey3), date, options, stats, &closedBuckets}; + internal::CreationInfo info1{bucketKey1, + internal::getStripeNumber(bucketKey1, numberOfStripes), + date, + options, + stats, + &closedBuckets}; + internal::CreationInfo info2{bucketKey2, + internal::getStripeNumber(bucketKey2, numberOfStripes), + date, + options, + stats, + &closedBuckets}; + internal::CreationInfo info3{bucketKey3, + internal::getStripeNumber(bucketKey3, numberOfStripes), + date, + options, + stats, + &closedBuckets}; }; -TEST_F(BucketStateRegistryTest, BucketStateSetUnsetFlag) { - BucketState state; - auto testFlags = [&state](std::initializer_list set, - std::initializer_list unset) { - for (auto flag : set) { - ASSERT_TRUE(state.isSet(flag)); - } - for (auto flag : unset) { - ASSERT_FALSE(state.isSet(flag)); - } - }; - - testFlags({}, - { - BucketStateFlag::kPrepared, - BucketStateFlag::kCleared, - BucketStateFlag::kPendingCompression, - BucketStateFlag::kPendingDirectWrite, - }); - - state.setFlag(BucketStateFlag::kPrepared); - testFlags( - { - BucketStateFlag::kPrepared, - }, - { - BucketStateFlag::kCleared, - BucketStateFlag::kPendingCompression, - BucketStateFlag::kPendingDirectWrite, - }); - - state.setFlag(BucketStateFlag::kCleared); - testFlags( - { - BucketStateFlag::kPrepared, - BucketStateFlag::kCleared, - }, - { - BucketStateFlag::kPendingCompression, - BucketStateFlag::kPendingDirectWrite, - }); - - state.setFlag(BucketStateFlag::kPendingCompression); - testFlags( - { - BucketStateFlag::kPrepared, - BucketStateFlag::kCleared, - BucketStateFlag::kPendingCompression, - }, - { - - BucketStateFlag::kPendingDirectWrite, - }); - - state.setFlag(BucketStateFlag::kPendingDirectWrite); - testFlags( - { - BucketStateFlag::kPrepared, - BucketStateFlag::kCleared, - BucketStateFlag::kPendingCompression, - BucketStateFlag::kPendingDirectWrite, - }, - {}); - - state.unsetFlag(BucketStateFlag::kPrepared); - testFlags( - { - BucketStateFlag::kCleared, - BucketStateFlag::kPendingCompression, - BucketStateFlag::kPendingDirectWrite, - - }, - { - BucketStateFlag::kPrepared, - }); - - state.unsetFlag(BucketStateFlag::kCleared); - testFlags( - { - BucketStateFlag::kPendingCompression, - BucketStateFlag::kPendingDirectWrite, - - }, - { - BucketStateFlag::kPrepared, - BucketStateFlag::kCleared, - }); - - state.unsetFlag(BucketStateFlag::kPendingCompression); - testFlags( - { - BucketStateFlag::kPendingDirectWrite, - }, - { - BucketStateFlag::kPrepared, - BucketStateFlag::kCleared, - BucketStateFlag::kPendingCompression, - }); - - state.unsetFlag(BucketStateFlag::kPendingDirectWrite); - testFlags({}, - { - BucketStateFlag::kPrepared, - BucketStateFlag::kCleared, - BucketStateFlag::kPendingCompression, - BucketStateFlag::kPendingDirectWrite, - }); -} - -TEST_F(BucketStateRegistryTest, BucketStateReset) { - BucketState state; - - state.setFlag(BucketStateFlag::kPrepared); - state.setFlag(BucketStateFlag::kCleared); - state.setFlag(BucketStateFlag::kPendingCompression); - state.setFlag(BucketStateFlag::kPendingDirectWrite); - ASSERT_TRUE(state.isSet(BucketStateFlag::kPrepared)); - ASSERT_TRUE(state.isSet(BucketStateFlag::kCleared)); - ASSERT_TRUE(state.isSet(BucketStateFlag::kPendingCompression)); - ASSERT_TRUE(state.isSet(BucketStateFlag::kPendingDirectWrite)); - - state.reset(); - - ASSERT_FALSE(state.isSet(BucketStateFlag::kPrepared)); - ASSERT_FALSE(state.isSet(BucketStateFlag::kCleared)); - ASSERT_FALSE(state.isSet(BucketStateFlag::kPendingCompression)); - ASSERT_FALSE(state.isSet(BucketStateFlag::kPendingDirectWrite)); +TEST_F(BucketStateRegistryTest, TransitionsFromUntrackedState) { + RAIIServerParameterControllerForTest controller{"featureFlagTimeseriesScalabilityImprovements", + true}; + // Start with an untracked bucket in the registry. + auto& bucket = createBucket(info1); + stopTrackingBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, boost::none)); + + // We expect a no-op when attempting to stop tracking an already untracked bucket. + stopTrackingBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, boost::none)); + + // We expect a no-op when clearing an untracked bucket. + clearBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, boost::none)); + + // We expect transition to 'kNormal' to succeed. + ASSERT_OK(initializeBucketState(bucketStateRegistry, bucket.bucketId, /*bucket*/ nullptr)); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kNormal)); + // Reset the state. + stopTrackingBucketState(bucketStateRegistry, bucket.bucketId); + + // We expect direct writes to succeed on untracked buckets. + addDirectWrite(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucket.bucketId)); } -TEST_F(BucketStateRegistryTest, BucketStateIsPrepared) { - BucketState state; - - ASSERT_FALSE(state.isPrepared()); - - state.setFlag(BucketStateFlag::kPrepared); - ASSERT_TRUE(state.isPrepared()); - - state.setFlag(BucketStateFlag::kCleared); - state.setFlag(BucketStateFlag::kPendingCompression); - state.setFlag(BucketStateFlag::kPendingDirectWrite); - ASSERT_TRUE(state.isPrepared()); +DEATH_TEST_F(BucketStateRegistryTest, CannotPrepareAnUntrackedBucket, "invariant") { + // Start with an untracked bucket in the registry. + auto& bucket = createBucket(info1); + stopTrackingBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, boost::none)); - state.unsetFlag(BucketStateFlag::kPrepared); - ASSERT_FALSE(state.isPrepared()); + // We expect to invariant when attempting to prepare an untracked bucket. + ASSERT_TRUE(prepareBucketState(bucketStateRegistry, bucket.bucketId) == + StateChangeSucessful::kNo); } -TEST_F(BucketStateRegistryTest, BucketStateConflictsWithInsert) { - BucketState state; - ASSERT_FALSE(state.conflictsWithInsertion()); - - // Just prepared is false - state.setFlag(BucketStateFlag::kPrepared); - ASSERT_FALSE(state.conflictsWithInsertion()); - - // Prepared and cleared is true - state.setFlag(BucketStateFlag::kCleared); - ASSERT_TRUE(state.conflictsWithInsertion()); - - // Just cleared is true - state.reset(); - state.setFlag(BucketStateFlag::kCleared); - ASSERT_TRUE(state.conflictsWithInsertion()); - - // Pending operations are true - state.reset(); - state.setFlag(BucketStateFlag::kPendingCompression); - ASSERT_TRUE(state.conflictsWithInsertion()); +TEST_F(BucketStateRegistryTest, TransitionsFromNormalState) { + RAIIServerParameterControllerForTest controller{"featureFlagTimeseriesScalabilityImprovements", + true}; + // Start with a 'kNormal' bucket in the registry. + auto& bucket = createBucket(info1); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kNormal)); + + // We expect transition to 'kNormal' to succeed. + ASSERT_OK(initializeBucketState(bucketStateRegistry, bucket.bucketId, /*bucket*/ nullptr)); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kNormal)); + + // We can stop tracking a 'kNormal' bucket. + stopTrackingBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, boost::none)); + // Reset the state. + ASSERT_OK(initializeBucketState(bucketStateRegistry, bucket.bucketId, /*bucket*/ nullptr)); + + // We expect transition to 'kPrepared' to succeed. + (void)prepareBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kPrepared)); + // Reset the state. + (void)unprepareBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kNormal)); + + // We expect transition to 'kClear' to succeed. + clearBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kCleared)); + // Reset the state. + stopTrackingBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_OK(initializeBucketState(bucketStateRegistry, bucket.bucketId, /*bucket*/ nullptr)); + + // We expect direct writes to succeed on 'kNormal' buckets. + addDirectWrite(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucket.bucketId)); +} - state.reset(); - state.setFlag(BucketStateFlag::kPendingDirectWrite); - ASSERT_TRUE(state.conflictsWithInsertion()); +TEST_F(BucketStateRegistryTest, TransitionsFromClearedState) { + RAIIServerParameterControllerForTest controller{"featureFlagTimeseriesScalabilityImprovements", + true}; + // Start with a 'kCleared' bucket in the registry. + auto& bucket = createBucket(info1); + clearBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kCleared)); + + // We expect transition to 'kCleared' to succeed. + clearBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kCleared)); + + // We can stop tracking a 'kCleared' bucket. + stopTrackingBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, boost::none)); + // Reset the state. + ASSERT_OK(initializeBucketState(bucketStateRegistry, bucket.bucketId, /*bucket*/ nullptr)); + clearBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kCleared)); + + // We expect transition to 'kNormal' to succeed. + ASSERT_OK(initializeBucketState(bucketStateRegistry, bucket.bucketId).code()); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kNormal)); + // Reset the state. + clearBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kCleared)); + + // We expect transition to 'kPrepared' to fail. + ASSERT_TRUE(prepareBucketState(bucketStateRegistry, bucket.bucketId) == + StateChangeSucessful::kNo); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kCleared)); + + // We expect direct writes to succeed on 'kCleared' buckets. + addDirectWrite(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucket.bucketId)); } -TEST_F(BucketStateRegistryTest, BucketStateConflictsWithReopening) { - BucketState state; - ASSERT_FALSE(state.conflictsWithReopening()); +TEST_F(BucketStateRegistryTest, TransitionsFromPreparedState) { + RAIIServerParameterControllerForTest controller{"featureFlagTimeseriesScalabilityImprovements", + true}; + // Start with a 'kPrepared' bucket in the registry. + auto& bucket = createBucket(info1); + (void)prepareBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kPrepared)); + + // We expect direct writes to fail and leave the state as 'kPrepared'. + (void)addDirectWrite(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kPrepared)); + + // We expect unpreparing bucket will transition the bucket state to 'kNormal'. + unprepareBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kNormal)); + // Reset the state. + (void)prepareBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kPrepared)); + + // We expect transition to 'kCleared' to succeed and update the state as 'kPreparedAndCleared'. + clearBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kPreparedAndCleared)); + + // We can untrack a 'kPrepared' bucket + stopTrackingBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, boost::none)); +} - // Just prepared is false - state.setFlag(BucketStateFlag::kPrepared); - ASSERT_FALSE(state.conflictsWithReopening()); +DEATH_TEST_F(BucketStateRegistryTest, CannotInitializeAPreparedBucket, "invariant") { + // Start with a 'kPrepared' bucket in the registry. + auto& bucket = createBucket(info1); + ASSERT_OK(initializeBucketState(bucketStateRegistry, bucket.bucketId)); + (void)prepareBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kPrepared)); - // Prepared and cleared is false - state.setFlag(BucketStateFlag::kCleared); - ASSERT_FALSE(state.conflictsWithReopening()); + // We expect to invariant when attempting to prepare an 'kPrepared' bucket. + ASSERT_OK(initializeBucketState(bucketStateRegistry, bucket.bucketId)); +} - // Just cleared is false - state.reset(); - state.setFlag(BucketStateFlag::kCleared); - ASSERT_FALSE(state.conflictsWithReopening()); +DEATH_TEST_F(BucketStateRegistryTest, CannotPrepareAnAlreadyPreparedBucket, "invariant") { + // Start with a 'kPrepared' bucket in the registry. + auto& bucket = createBucket(info1); + ASSERT_OK(initializeBucketState(bucketStateRegistry, bucket.bucketId)); + (void)prepareBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kPrepared)); + // We expect to invariant when attempting to prepare an untracked bucket. + (void)prepareBucketState(bucketStateRegistry, bucket.bucketId); +} - // Pending operations are true - state.reset(); - state.setFlag(BucketStateFlag::kPendingCompression); - ASSERT_TRUE(state.conflictsWithReopening()); +TEST_F(BucketStateRegistryTest, TransitionsFromPreparedAndClearedState) { + RAIIServerParameterControllerForTest controller{"featureFlagTimeseriesScalabilityImprovements", + true}; + // Start with a 'kPreparedAndCleared' bucket in the registry. + auto& bucket = createBucket(info1); + (void)prepareBucketState(bucketStateRegistry, bucket.bucketId); + clearBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kPreparedAndCleared)); + + // We expect transition to 'kPrepared' to fail. + ASSERT_TRUE(prepareBucketState(bucketStateRegistry, bucket.bucketId) == + StateChangeSucessful::kNo); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kPreparedAndCleared)); + + // We expect direct writes to fail and leave the state as 'kPreparedAndCleared'. + (void)addDirectWrite(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kPreparedAndCleared)); + + // We expect clearing the bucket state will not affect the state. + clearBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kPreparedAndCleared)); + + // We expect untracking 'kPreparedAndCleared' buckets to remove the state. + stopTrackingBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, boost::none)); + // Reset the state. + ASSERT_OK(initializeBucketState(bucketStateRegistry, bucket.bucketId)); + ASSERT_TRUE(prepareBucketState(bucketStateRegistry, bucket.bucketId) == + StateChangeSucessful::kYes); + clearBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kPreparedAndCleared)); + + // We expect unpreparing 'kPreparedAndCleared' buckets to transition to 'kCleared'. + unprepareBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketStateMatch(bucket.bucketId, BucketState::kCleared)); +} - state.reset(); - state.setFlag(BucketStateFlag::kPendingDirectWrite); - ASSERT_TRUE(state.conflictsWithReopening()); +TEST_F(BucketStateRegistryTest, TransitionsFromDirectWriteState) { + RAIIServerParameterControllerForTest controller{"featureFlagTimeseriesScalabilityImprovements", + true}; + // Start with a bucket with a direct write in the registry. + auto& bucket = createBucket(info1); + ASSERT_OK(initializeBucketState(bucketStateRegistry, bucket.bucketId)); + auto bucketState = addDirectWrite(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucket.bucketId)); + auto originalDirectWriteCount = stdx::get(bucketState); + + // We expect future direct writes to add-on. + bucketState = addDirectWrite(bucketStateRegistry, bucket.bucketId); + auto newDirectWriteCount = stdx::get(bucketState); + ASSERT_GT(newDirectWriteCount, originalDirectWriteCount); + + // We expect untracking to leave the state unaffected. + stopTrackingBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucket.bucketId)); + + // We expect transition to 'kNormal' to return a WriteConflict. + ASSERT_EQ(initializeBucketState(bucketStateRegistry, bucket.bucketId), + ErrorCodes::WriteConflict); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucket.bucketId)); + + // We expect transition to 'kCleared' to leave the state unaffected. + clearBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucket.bucketId)); + + // We expect transition to 'kPrepared' to leave the state unaffected. + (void)prepareBucketState(bucketStateRegistry, bucket.bucketId); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucket.bucketId)); } TEST_F(BucketStateRegistryTest, EraAdvancesAsExpected) { @@ -545,8 +651,7 @@ TEST_F(BucketStateRegistryTest, ArchivingBucketPreservesState) { internal::archiveBucket( *this, stripes[info1.stripe], WithLock::withoutLock(), bucket, closedBuckets); auto state = getBucketState(bucketStateRegistry, bucketId); - ASSERT_TRUE(state.has_value()); - ASSERT_TRUE(state == BucketState{}); + ASSERT_TRUE(doesBucketStateMatch(bucketId, BucketState::kNormal)); } TEST_F(BucketStateRegistryTest, AbortingBatchRemovesBucketState) { @@ -570,14 +675,13 @@ TEST_F(BucketStateRegistryTest, ClosingBucketGoesThroughPendingCompressionState) auto& bucket = createBucket(info1); auto bucketId = bucket.bucketId; - ASSERT(getBucketState(bucketStateRegistry, bucketId).value() == BucketState{}); + ASSERT_TRUE(doesBucketStateMatch(bucketId, BucketState::kNormal)); auto stats = internal::getOrInitializeExecutionStats(*this, info1.key.ns); auto batch = std::make_shared(BucketHandle{bucketId, info1.stripe}, 0, stats); ASSERT(claimWriteBatchCommitRights(*batch)); ASSERT_OK(prepareCommit(*this, batch)); - ASSERT(getBucketState(bucketStateRegistry, bucketId).value() == - BucketState{}.setFlag(BucketStateFlag::kPrepared)); + ASSERT_TRUE(doesBucketStateMatch(bucketId, BucketState::kPrepared)); { // Fool the system by marking the bucket for closure, then finish the batch so it detects @@ -588,15 +692,13 @@ TEST_F(BucketStateRegistryTest, ClosingBucketGoesThroughPendingCompressionState) ASSERT(closedBucket.has_value()); ASSERT_EQ(closedBucket.value().bucketId.oid, bucketId.oid); - // Bucket should now be in pending compression state. - ASSERT(getBucketState(bucketStateRegistry, bucketId).has_value()); - ASSERT(getBucketState(bucketStateRegistry, bucketId).value() == - BucketState{}.setFlag(BucketStateFlag::kPendingCompression)); + // Bucket should now be in pending compression state represented by direct write. + ASSERT_TRUE(doesBucketHaveDirectWrite(bucketId)); } // Destructing the 'ClosedBucket' struct should report it compressed should remove it from the // catalog. - ASSERT(getBucketState(bucketStateRegistry, bucketId) == boost::none); + ASSERT_TRUE(doesBucketStateMatch(bucketId, boost::none)); } TEST_F(BucketStateRegistryTest, DirectWriteStartInitializesBucketState) { @@ -605,9 +707,7 @@ TEST_F(BucketStateRegistryTest, DirectWriteStartInitializesBucketState) { auto bucketId = BucketId{ns1, OID()}; directWriteStart(bucketStateRegistry, ns1, bucketId.oid); - auto state = getBucketState(bucketStateRegistry, bucketId); - ASSERT_TRUE(state.has_value()); - ASSERT_TRUE(state.value().isSet(BucketStateFlag::kPendingDirectWrite)); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucketId)); } TEST_F(BucketStateRegistryTest, DirectWriteFinishRemovesBucketState) { @@ -616,13 +716,10 @@ TEST_F(BucketStateRegistryTest, DirectWriteFinishRemovesBucketState) { auto bucketId = BucketId{ns1, OID()}; directWriteStart(bucketStateRegistry, ns1, bucketId.oid); - auto state = getBucketState(bucketStateRegistry, bucketId); - ASSERT_TRUE(state.has_value()); - ASSERT_TRUE(state.value().isSet(BucketStateFlag::kPendingDirectWrite)); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucketId)); directWriteFinish(bucketStateRegistry, ns1, bucketId.oid); - state = getBucketState(bucketStateRegistry, bucketId); - ASSERT_FALSE(state.has_value()); + ASSERT_TRUE(doesBucketStateMatch(bucketId, boost::none)); } TEST_F(BucketStateRegistryTest, TestDirectWriteStartCounter) { @@ -632,34 +729,30 @@ TEST_F(BucketStateRegistryTest, TestDirectWriteStartCounter) { auto bucketId = bucket.bucketId; // Under the hood, the BucketState will contain a counter on the number of ongoing DirectWrites. - int32_t dwCounter = 0; + DirectWriteCounter dwCounter = 0; // If no direct write has been initiated, the direct write counter should be 0. auto state = getBucketState(bucketStateRegistry, bucketId); ASSERT_TRUE(state.has_value()); - ASSERT_EQ(dwCounter, state.value().getNumberOfDirectWrites()); + ASSERT_TRUE(stdx::holds_alternative(*state)); // Start a direct write and ensure the counter is incremented correctly. while (dwCounter < 4) { directWriteStart(bucketStateRegistry, ns1, bucketId.oid); dwCounter++; - state = getBucketState(bucketStateRegistry, bucketId); - ASSERT_TRUE(state.value().isSet(BucketStateFlag::kPendingDirectWrite)); - ASSERT_EQ(dwCounter, state.value().getNumberOfDirectWrites()); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucketId)); } while (dwCounter > 1) { directWriteFinish(bucketStateRegistry, ns1, bucketId.oid); dwCounter--; - state = getBucketState(bucketStateRegistry, bucketId); - ASSERT_TRUE(state.value().isSet(BucketStateFlag::kPendingDirectWrite)); - ASSERT_EQ(dwCounter, state.value().getNumberOfDirectWrites()); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucketId)); } // When the number of direct writes reaches 0, we should clear the bucket. directWriteFinish(bucketStateRegistry, ns1, bucketId.oid); - state = getBucketState(bucketStateRegistry, bucketId); - ASSERT_TRUE(hasBeenCleared(bucket)); + ASSERT_FALSE(doesBucketHaveDirectWrite(bucketId)); + ASSERT_TRUE(doesBucketStateMatch(bucketId, BucketState::kCleared)); } TEST_F(BucketStateRegistryTest, ConflictingDirectWrites) { @@ -671,24 +764,17 @@ TEST_F(BucketStateRegistryTest, ConflictingDirectWrites) { // First direct write initializes state as untracked. directWriteStart(bucketStateRegistry, bucketId.ns, bucketId.oid); - state = getBucketState(bucketStateRegistry, bucketId); - ASSERT(state.has_value()); - ASSERT(state.value().isSet(BucketStateFlag::kPendingDirectWrite)); - ASSERT(state.value().isSet(BucketStateFlag::kUntracked)); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucketId)); directWriteStart(bucketStateRegistry, bucketId.ns, bucketId.oid); // First finish does not remove the state from the registry. directWriteFinish(bucketStateRegistry, bucketId.ns, bucketId.oid); - state = getBucketState(bucketStateRegistry, bucketId); - ASSERT(state.has_value()); - ASSERT(state.value().isSet(BucketStateFlag::kPendingDirectWrite)); - ASSERT(state.value().isSet(BucketStateFlag::kUntracked)); + ASSERT_TRUE(doesBucketHaveDirectWrite(bucketId)); // Second one removes it. directWriteFinish(bucketStateRegistry, bucketId.ns, bucketId.oid); - state = getBucketState(bucketStateRegistry, bucketId); - ASSERT_FALSE(state.has_value()); + ASSERT_TRUE(doesBucketStateMatch(bucketId, boost::none)); } } // namespace diff --git a/src/mongo/db/timeseries/bucket_catalog/closed_bucket.cpp b/src/mongo/db/timeseries/bucket_catalog/closed_bucket.cpp index 26bf33d655485..616a4b6dd010b 100644 --- a/src/mongo/db/timeseries/bucket_catalog/closed_bucket.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/closed_bucket.cpp @@ -29,19 +29,21 @@ #include "mongo/db/timeseries/bucket_catalog/closed_bucket.h" +#include + +#include +#include + +#include "mongo/db/feature_flag.h" +#include "mongo/db/server_options.h" +#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/util/assert_util_core.h" + namespace mongo::timeseries::bucket_catalog { ClosedBucket::~ClosedBucket() { if (_bucketStateRegistry) { - changeBucketState( - *_bucketStateRegistry, - bucketId, - [](boost::optional input, std::uint64_t) -> boost::optional { - uassert(7443900, - "Expected bucket to be pending compression", - input.has_value() && input->isSet(BucketStateFlag::kPendingCompression)); - return boost::none; - }); + removeDirectWrite(*_bucketStateRegistry, bucketId); } } @@ -51,15 +53,12 @@ ClosedBucket::ClosedBucket(BucketStateRegistry* bsr, boost::optional nm) : bucketId{bucketId}, timeField{tf}, numMeasurements{nm}, _bucketStateRegistry{bsr} { invariant(_bucketStateRegistry); - changeBucketState( - *_bucketStateRegistry, - bucketId, - [](boost::optional input, std::uint64_t) -> boost::optional { - uassert(7443901, - "Expected bucket to be in normal state", - input.has_value() && !input->conflictsWithInsertion()); - return input.value().setFlag(BucketStateFlag::kPendingCompression); - }); + + // When enabled, we skip constructing ClosedBuckets as we don't need to compress the bucket. + invariant(!feature_flags::gTimeseriesAlwaysUseCompressedBuckets.isEnabled( + serverGlobalParams.featureCompatibility)); + + addDirectWrite(*_bucketStateRegistry, bucketId, /*stopTracking*/ true); } ClosedBucket::ClosedBucket(ClosedBucket&& other) diff --git a/src/mongo/db/timeseries/bucket_catalog/closed_bucket.h b/src/mongo/db/timeseries/bucket_catalog/closed_bucket.h index b9209c1f11e21..c08f03da8614a 100644 --- a/src/mongo/db/timeseries/bucket_catalog/closed_bucket.h +++ b/src/mongo/db/timeseries/bucket_catalog/closed_bucket.h @@ -29,11 +29,11 @@ #pragma once +#include +#include #include #include -#include - #include "mongo/db/timeseries/bucket_catalog/bucket_identifiers.h" #include "mongo/db/timeseries/bucket_catalog/bucket_state_registry.h" diff --git a/src/mongo/db/timeseries/bucket_catalog/execution_stats.cpp b/src/mongo/db/timeseries/bucket_catalog/execution_stats.cpp index 174a15fa9ec74..45c8341f9af41 100644 --- a/src/mongo/db/timeseries/bucket_catalog/execution_stats.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/execution_stats.cpp @@ -29,6 +29,8 @@ #include "mongo/db/timeseries/bucket_catalog/execution_stats.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/server_options.h" #include "mongo/db/storage/storage_parameters_gen.h" namespace mongo::timeseries::bucket_catalog { @@ -201,5 +203,36 @@ void appendExecutionStatsToBuilder(const ExecutionStats& stats, BSONObjBuilder& } } +void addCollectionExecutionStats(ExecutionStatsController& stats, const ExecutionStats& collStats) { + stats.incNumBucketInserts(collStats.numBucketInserts.load()); + stats.incNumBucketUpdates(collStats.numBucketUpdates.load()); + stats.incNumBucketsOpenedDueToMetadata(collStats.numBucketsOpenedDueToMetadata.load()); + stats.incNumBucketsClosedDueToCount(collStats.numBucketsClosedDueToCount.load()); + stats.incNumBucketsClosedDueToSchemaChange(collStats.numBucketsClosedDueToSchemaChange.load()); + stats.incNumBucketsClosedDueToSize(collStats.numBucketsClosedDueToSize.load()); + stats.incNumBucketsClosedDueToCachePressure( + collStats.numBucketsClosedDueToCachePressure.load()); + stats.incNumBucketsClosedDueToTimeForward(collStats.numBucketsClosedDueToTimeForward.load()); + stats.incNumBucketsClosedDueToTimeBackward(collStats.numBucketsClosedDueToTimeBackward.load()); + stats.incNumBucketsClosedDueToMemoryThreshold( + collStats.numBucketsClosedDueToMemoryThreshold.load()); + stats.incNumBucketsClosedDueToReopening(collStats.numBucketsClosedDueToReopening.load()); + stats.incNumBucketsArchivedDueToMemoryThreshold( + collStats.numBucketsArchivedDueToMemoryThreshold.load()); + stats.incNumBucketsArchivedDueToTimeBackward( + collStats.numBucketsArchivedDueToTimeBackward.load()); + stats.incNumCommits(collStats.numCommits.load()); + stats.incNumWaits(collStats.numWaits.load()); + stats.incNumMeasurementsCommitted(collStats.numMeasurementsCommitted.load()); + stats.incNumBucketsReopened(collStats.numBucketsReopened.load()); + stats.incNumBucketsKeptOpenDueToLargeMeasurements( + collStats.numBucketsKeptOpenDueToLargeMeasurements.load()); + stats.incNumBucketsFetched(collStats.numBucketsFetched.load()); + stats.incNumBucketsQueried(collStats.numBucketsQueried.load()); + stats.incNumBucketFetchesFailed(collStats.numBucketFetchesFailed.load()); + stats.incNumBucketQueriesFailed(collStats.numBucketQueriesFailed.load()); + stats.incNumBucketReopeningsFailed(collStats.numBucketReopeningsFailed.load()); + stats.incNumDuplicateBucketsReopened(collStats.numDuplicateBucketsReopened.load()); +} } // namespace mongo::timeseries::bucket_catalog diff --git a/src/mongo/db/timeseries/bucket_catalog/execution_stats.h b/src/mongo/db/timeseries/bucket_catalog/execution_stats.h index 5b2b00c990ab2..829b51db3dbb2 100644 --- a/src/mongo/db/timeseries/bucket_catalog/execution_stats.h +++ b/src/mongo/db/timeseries/bucket_catalog/execution_stats.h @@ -103,4 +103,10 @@ class ExecutionStatsController { void appendExecutionStatsToBuilder(const ExecutionStats& stats, BSONObjBuilder& builder); +/** + * Adds the execution stats of a collection to both the collection and global stats of an execution + * stats controller. + */ +void addCollectionExecutionStats(ExecutionStatsController& stats, const ExecutionStats& collStats); + } // namespace mongo::timeseries::bucket_catalog diff --git a/src/mongo/db/timeseries/bucket_catalog/flat_bson.cpp b/src/mongo/db/timeseries/bucket_catalog/flat_bson.cpp index 758b23e03609b..82396d6435698 100644 --- a/src/mongo/db/timeseries/bucket_catalog/flat_bson.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/flat_bson.cpp @@ -29,9 +29,22 @@ #include "mongo/db/timeseries/bucket_catalog/flat_bson.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/update/document_diff_serialization.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decimal_counter.h" +#include "mongo/util/str.h" namespace mongo::timeseries::bucket_catalog { namespace { diff --git a/src/mongo/db/timeseries/bucket_catalog/flat_bson.h b/src/mongo/db/timeseries/bucket_catalog/flat_bson.h index 658ba003c0f19..0539ff5e07827 100644 --- a/src/mongo/db/timeseries/bucket_catalog/flat_bson.h +++ b/src/mongo/db/timeseries/bucket_catalog/flat_bson.h @@ -29,14 +29,24 @@ #pragma once -#include "mongo/base/string_data.h" -#include "mongo/bson/bsonelement.h" -#include "mongo/util/string_map.h" - +#include +#include +#include +#include +#include +#include #include #include +#include +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/util/string_map.h" + namespace mongo::timeseries::bucket_catalog { /** @@ -183,6 +193,7 @@ class FlatBSONStore { * iteration, insertion and search capability for subelements. */ class Obj { + public: friend class FlatBSONStore; diff --git a/src/mongo/db/timeseries/bucket_catalog/minmax_test.cpp b/src/mongo/db/timeseries/bucket_catalog/minmax_test.cpp index e3113975604a5..898a4eddca3ee 100644 --- a/src/mongo/db/timeseries/bucket_catalog/minmax_test.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/minmax_test.cpp @@ -27,13 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include #include "mongo/base/simple_string_data_comparator.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/timeseries/bucket_catalog/flat_bson.h" -#include "mongo/unittest/unittest.h" - -#include +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo::timeseries::bucket_catalog { namespace { diff --git a/src/mongo/db/timeseries/bucket_catalog/reopening.cpp b/src/mongo/db/timeseries/bucket_catalog/reopening.cpp index 54f3a729ccf09..7476e18f7b96a 100644 --- a/src/mongo/db/timeseries/bucket_catalog/reopening.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/reopening.cpp @@ -29,7 +29,12 @@ #include "mongo/db/timeseries/bucket_catalog/reopening.h" +#include + +#include + #include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" +#include "mongo/util/time_support.h" namespace mongo::timeseries::bucket_catalog { diff --git a/src/mongo/db/timeseries/bucket_catalog/reopening.h b/src/mongo/db/timeseries/bucket_catalog/reopening.h index 65ceb05f60884..2a69a1bb82eba 100644 --- a/src/mongo/db/timeseries/bucket_catalog/reopening.h +++ b/src/mongo/db/timeseries/bucket_catalog/reopening.h @@ -29,10 +29,13 @@ #pragma once +#include #include +#include #include +#include -#include +#include #include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" diff --git a/src/mongo/db/timeseries/bucket_catalog/write_batch.cpp b/src/mongo/db/timeseries/bucket_catalog/write_batch.cpp index 0226e0ba32b40..b44372a593bc6 100644 --- a/src/mongo/db/timeseries/bucket_catalog/write_batch.cpp +++ b/src/mongo/db/timeseries/bucket_catalog/write_batch.cpp @@ -29,9 +29,19 @@ #include "mongo/db/timeseries/bucket_catalog/write_batch.h" +#include +#include +#include #include +#include +#include +#include -#include "mongo/db/timeseries/bucket_catalog/bucket.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" namespace mongo::timeseries::bucket_catalog { diff --git a/src/mongo/db/timeseries/bucket_catalog/write_batch.h b/src/mongo/db/timeseries/bucket_catalog/write_batch.h index c0c39cb6da07d..25097db33c5e2 100644 --- a/src/mongo/db/timeseries/bucket_catalog/write_batch.h +++ b/src/mongo/db/timeseries/bucket_catalog/write_batch.h @@ -30,7 +30,14 @@ #pragma once #include - +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/oid.h" #include "mongo/db/operation_id.h" #include "mongo/db/repl/optime.h" @@ -39,6 +46,8 @@ #include "mongo/db/timeseries/bucket_compression.h" #include "mongo/platform/atomic_word.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/string_map.h" namespace mongo::timeseries::bucket_catalog { diff --git a/src/mongo/db/timeseries/bucket_compression.cpp b/src/mongo/db/timeseries/bucket_compression.cpp index 8d46c34a9df17..aaedb92d7731f 100644 --- a/src/mongo/db/timeseries/bucket_compression.cpp +++ b/src/mongo/db/timeseries/bucket_compression.cpp @@ -30,13 +30,37 @@ #include "mongo/db/timeseries/bucket_compression.h" +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bsoncolumn.h" #include "mongo/bson/util/bsoncolumnbuilder.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decimal_counter.h" #include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -212,7 +236,7 @@ CompressionResult compressBucket(const BSONObj& bucketDoc, if (!validateDecompression) return true; - BSONColumn col(binary, ""_sd); + BSONColumn col(binary); auto measurementEnd = measurements.end(); auto columnEnd = col.end(); diff --git a/src/mongo/db/timeseries/bucket_compression.h b/src/mongo/db/timeseries/bucket_compression.h index dc3363b9163ec..c4d7f495ecd6e 100644 --- a/src/mongo/db/timeseries/bucket_compression.h +++ b/src/mongo/db/timeseries/bucket_compression.h @@ -29,8 +29,11 @@ #pragma once +#include #include +#include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/timeseries/timeseries_gen.h" diff --git a/src/mongo/db/timeseries/bucket_compression_test.cpp b/src/mongo/db/timeseries/bucket_compression_test.cpp index 0f2387f1514ea..973de3e824a6e 100644 --- a/src/mongo/db/timeseries/bucket_compression_test.cpp +++ b/src/mongo/db/timeseries/bucket_compression_test.cpp @@ -27,11 +27,15 @@ * it in the license file. */ +#include + +#include + #include "mongo/bson/json.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" #include "mongo/db/timeseries/bucket_compression.h" -#include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/timeseries/catalog_helper.cpp b/src/mongo/db/timeseries/catalog_helper.cpp index ca58a5aa7a624..dbaec3e1076f1 100644 --- a/src/mongo/db/timeseries/catalog_helper.cpp +++ b/src/mongo/db/timeseries/catalog_helper.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/timeseries/catalog_helper.h" +#include +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/timeseries/catalog_helper.h" namespace mongo { diff --git a/src/mongo/db/timeseries/catalog_helper.h b/src/mongo/db/timeseries/catalog_helper.h index 14cc681b4c328..2b703cded3f10 100644 --- a/src/mongo/db/timeseries/catalog_helper.h +++ b/src/mongo/db/timeseries/catalog_helper.h @@ -29,6 +29,10 @@ #pragma once +#include + +#include "mongo/db/namespace_string.h" +#include "mongo/db/server_parameter.h" #include "mongo/db/timeseries/timeseries_gen.h" namespace mongo { diff --git a/src/mongo/db/timeseries/timeseries.idl b/src/mongo/db/timeseries/timeseries.idl index e86d7a8adf242..8cd7fd5ab9573 100644 --- a/src/mongo/db/timeseries/timeseries.idl +++ b/src/mongo/db/timeseries/timeseries.idl @@ -107,6 +107,7 @@ structs: TimeseriesOptions: description: "The options that define a time-series collection." strict: true + query_shape_component: true fields: timeField: description: "The name of the top-level field to be used for time. Inserted @@ -114,6 +115,9 @@ structs: datetime type (0x9)" type: string stability: stable + query_shape: anonymize + validator: + callback: "validateTimeAndMetaField" metaField: description: "The name of the top-level field describing the series. This field is used to group related data and may be of any BSON type. This may not @@ -121,11 +125,15 @@ structs: type: string optional: true stability: stable + query_shape: anonymize + validator: + callback: "validateTimeAndMetaField" granularity: description: "Describes the expected interval between subsequent measurements" type: BucketGranularity optional: true stability: stable + query_shape: parameter bucketRoundingSeconds: description: "Used to determine the minimum time boundary when opening a new bucket by rounding the first timestamp down to the next multiple of this @@ -134,12 +142,14 @@ structs: optional: true validator: { gte: 1, lte: 31536000 } stability: stable + query_shape: literal bucketMaxSpanSeconds: description: "The maximum range of time values for a bucket, in seconds" type: safeInt optional: true validator: { gte: 1, lte: 31536000 } stability: stable + query_shape: literal CollModTimeseries: description: "A type representing the adjustable options on timeseries collections" diff --git a/src/mongo/db/timeseries/timeseries_collmod.cpp b/src/mongo/db/timeseries/timeseries_collmod.cpp index c90240a569514..24644586ca338 100644 --- a/src/mongo/db/timeseries/timeseries_collmod.cpp +++ b/src/mongo/db/timeseries/timeseries_collmod.cpp @@ -30,11 +30,27 @@ #include "mongo/db/timeseries/timeseries_collmod.h" +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/coll_mod.h" +#include "mongo/db/pipeline/change_stream_pre_and_post_images_options_gen.h" #include "mongo/db/timeseries/catalog_helper.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" -#include "mongo/logv2/log.h" +#include "mongo/db/timeseries/timeseries_options.h" #include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage diff --git a/src/mongo/db/timeseries/timeseries_collmod.h b/src/mongo/db/timeseries/timeseries_collmod.h index 5bf8f3130f9c4..9813b30cfa55b 100644 --- a/src/mongo/db/timeseries/timeseries_collmod.h +++ b/src/mongo/db/timeseries/timeseries_collmod.h @@ -29,7 +29,13 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/coll_mod_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" namespace mongo { namespace timeseries { diff --git a/src/mongo/db/timeseries/timeseries_commands_conversion_helper.cpp b/src/mongo/db/timeseries/timeseries_commands_conversion_helper.cpp index 6481aa7bbf973..4f0368e3b2087 100644 --- a/src/mongo/db/timeseries/timeseries_commands_conversion_helper.cpp +++ b/src/mongo/db/timeseries/timeseries_commands_conversion_helper.cpp @@ -30,16 +30,38 @@ #include "mongo/db/timeseries/timeseries_commands_conversion_helper.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/commit_quorum_options.h" +#include "mongo/db/exec/timeseries/bucket_spec.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_names.h" -#include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" -#include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/server_options.h" #include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" -#include "mongo/logv2/log.h" #include "mongo/logv2/redaction.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -165,7 +187,6 @@ CreateIndexesCommand makeTimeseriesCreateIndexesCommand(OperationContext* opCtx, auto [hasMetricPred, bucketPred] = BucketSpec::pushdownPredicate(expCtx, options, - collationMatchesDefault, pred, haveComputedMetaField, includeMetaField, diff --git a/src/mongo/db/timeseries/timeseries_commands_conversion_helper.h b/src/mongo/db/timeseries/timeseries_commands_conversion_helper.h index d2641a17d651d..7a948efa29ca1 100644 --- a/src/mongo/db/timeseries/timeseries_commands_conversion_helper.h +++ b/src/mongo/db/timeseries/timeseries_commands_conversion_helper.h @@ -29,11 +29,15 @@ #pragma once +#include + #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/create_indexes_gen.h" #include "mongo/db/drop_indexes_gen.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/timeseries/timeseries_gen.h" namespace mongo::timeseries { diff --git a/src/mongo/db/timeseries/timeseries_constants.h b/src/mongo/db/timeseries/timeseries_constants.h index d2e00e34a2bfc..b0d4e715e3a57 100644 --- a/src/mongo/db/timeseries/timeseries_constants.h +++ b/src/mongo/db/timeseries/timeseries_constants.h @@ -75,7 +75,8 @@ static const StringDataSet kAllowedCollectionCreationOptions{ CreateCommand::kCollationFieldName, CreateCommand::kTimeseriesFieldName, CreateCommand::kExpireAfterSecondsFieldName, - CreateCommand::kDollarTenantFieldName}; + CreateCommand::kDollarTenantFieldName, + CreateCommand::kTempFieldName}; } // namespace timeseries } // namespace mongo diff --git a/src/mongo/db/timeseries/timeseries_dotted_path_support.cpp b/src/mongo/db/timeseries/timeseries_dotted_path_support.cpp index 2672744ca0fb7..7208c22ee81da 100644 --- a/src/mongo/db/timeseries/timeseries_dotted_path_support.cpp +++ b/src/mongo/db/timeseries/timeseries_dotted_path_support.cpp @@ -27,19 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/timeseries/timeseries_dotted_path_support.h" - +#include +#include +#include +#include +#include +#include #include +#include +#include +#include + +#include "mongo/bson/bson_depth.h" #include "mongo/bson/bsonelement.h" -#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bsoncolumn.h" #include "mongo/db/timeseries/bucket_compression.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/timeseries/timeseries_dotted_path_support.h" +#include "mongo/util/assert_util.h" #include "mongo/util/ctype.h" namespace mongo { diff --git a/src/mongo/db/timeseries/timeseries_dotted_path_support.h b/src/mongo/db/timeseries/timeseries_dotted_path_support.h index b9a35b8cb3e8d..6e1f96eaee1a2 100644 --- a/src/mongo/db/timeseries/timeseries_dotted_path_support.h +++ b/src/mongo/db/timeseries/timeseries_dotted_path_support.h @@ -30,12 +30,16 @@ #pragma once #include +#include #include +#include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement_comparator_interface.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/util/bsoncolumn.h" #include "mongo/db/index/multikey_paths.h" +#include "mongo/platform/compiler.h" namespace mongo { namespace timeseries { diff --git a/src/mongo/db/timeseries/timeseries_dotted_path_support_test.cpp b/src/mongo/db/timeseries/timeseries_dotted_path_support_test.cpp index 3a895d4afd7d2..717d39208f1fe 100644 --- a/src/mongo/db/timeseries/timeseries_dotted_path_support_test.cpp +++ b/src/mongo/db/timeseries/timeseries_dotted_path_support_test.cpp @@ -27,14 +27,21 @@ * it in the license file. */ -#include "mongo/bson/json.h" -#include "mongo/platform/basic.h" +#include +#include +#include + +#include +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/timeseries/bucket_compression.h" #include "mongo/db/timeseries/timeseries_dotted_path_support.h" -#include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/timeseries/timeseries_extended_range.cpp b/src/mongo/db/timeseries/timeseries_extended_range.cpp index 16121dc8f8b9c..74ecfb15b8278 100644 --- a/src/mongo/db/timeseries/timeseries_extended_range.cpp +++ b/src/mongo/db/timeseries/timeseries_extended_range.cpp @@ -29,7 +29,28 @@ #include "mongo/db/timeseries/timeseries_extended_range.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/data_view.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" namespace mongo::timeseries { diff --git a/src/mongo/db/timeseries/timeseries_extended_range.h b/src/mongo/db/timeseries/timeseries_extended_range.h index a1c14f341d8da..4d31906b52cfc 100644 --- a/src/mongo/db/timeseries/timeseries_extended_range.h +++ b/src/mongo/db/timeseries/timeseries_extended_range.h @@ -29,9 +29,14 @@ #pragma once +#include + #include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/util/time_support.h" namespace mongo::timeseries { diff --git a/src/mongo/db/timeseries/timeseries_extended_range_test.cpp b/src/mongo/db/timeseries/timeseries_extended_range_test.cpp index a6392aabd1b97..3d0411e78bdef 100644 --- a/src/mongo/db/timeseries/timeseries_extended_range_test.cpp +++ b/src/mongo/db/timeseries/timeseries_extended_range_test.cpp @@ -27,10 +27,12 @@ * it in the license file. */ +#include "mongo/base/string_data.h" #include "mongo/bson/json.h" #include "mongo/db/timeseries/timeseries_extended_range.h" -#include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" namespace mongo { namespace { diff --git a/src/mongo/db/timeseries/timeseries_global_options.cpp b/src/mongo/db/timeseries/timeseries_global_options.cpp index 5f4e3193c778e..3ea002987396b 100644 --- a/src/mongo/db/timeseries/timeseries_global_options.cpp +++ b/src/mongo/db/timeseries/timeseries_global_options.cpp @@ -27,6 +27,9 @@ * it in the license file. */ +#include + +#include "mongo/platform/atomic_word.h" #include "mongo/util/processinfo.h" namespace mongo { diff --git a/src/mongo/db/timeseries/timeseries_global_options.h b/src/mongo/db/timeseries/timeseries_global_options.h index 2e13432e8e97c..a2c958a10ec21 100644 --- a/src/mongo/db/timeseries/timeseries_global_options.h +++ b/src/mongo/db/timeseries/timeseries_global_options.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/base/status.h" #include "mongo/platform/atomic_word.h" namespace mongo { @@ -36,4 +37,15 @@ namespace mongo { extern AtomicWord gTimeseriesIdleBucketExpiryMemoryUsageThresholdBytes; uint64_t getTimeseriesIdleBucketExpiryMemoryUsageThresholdBytes(); +/** + * Checks the time or the meta field doesn't contain embedded null bytes. + */ +inline Status validateTimeAndMetaField(const std::string& str) { + if (str.find('\0') != std::string::npos) { + return Status(ErrorCodes::BadValue, + "The 'timeField' or the 'metaField' cannot contain embedded null bytes"); + } + return Status::OK(); +} + } // namespace mongo diff --git a/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions.cpp b/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions.cpp index ac49912d3dc2a..a9327b74ab315 100644 --- a/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions.cpp +++ b/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions.cpp @@ -30,16 +30,44 @@ #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" -#include "mongo/db/catalog/index_catalog_impl.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_names.h" #include "mongo/db/matcher/expression_algo.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/server_options.h" #include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/db/timeseries/timeseries_gen.h" -#include "mongo/logv2/log.h" #include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -106,9 +134,10 @@ StatusWith createBucketsSpecFromTimeseriesSpec(const TimeseriesOptions& if (!elem.isNumber()) { return {ErrorCodes::BadValue, str::stream() - << "Invalid index spec for time-series collection: " - << redact(timeseriesIndexSpecBSON) - << ". Indexes on the time field must be ascending or descending " + << "Invalid " << (isShardKeySpec ? "shard key" : "index spec") + << " for time-series collection: " << redact(timeseriesIndexSpecBSON) + << ". " << (isShardKeySpec ? "Shard keys" : "Indexes") + << " on the time field must be ascending or descending " "(numbers only): " << elem}; } @@ -480,11 +509,11 @@ bool doesBucketsIndexIncludeMeasurement(OperationContext* opCtx, statusWithFilter.isOK()); auto filter = std::move(statusWithFilter.getValue()); - if (!expression::isOnlyDependentOn(*filter, - {std::string{timeseries::kBucketMetaFieldName}, - controlMinTimeField, - controlMaxTimeField, - idField})) { + if (!expression::isOnlyDependentOnConst(*filter, + {std::string{timeseries::kBucketMetaFieldName}, + controlMinTimeField, + controlMaxTimeField, + idField})) { // Partial filter expression depends on a non-time, non-metadata field. return true; } diff --git a/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions.h b/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions.h index fb9d32dbc7584..d39fde6d3d1e9 100644 --- a/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions.h +++ b/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions.h @@ -29,9 +29,15 @@ #pragma once +#include +#include + #include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_options.h" /** diff --git a/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions_test.cpp b/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions_test.cpp index 823c6363c0fc3..04768ade8b0ab 100644 --- a/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions_test.cpp +++ b/src/mongo/db/timeseries/timeseries_index_schema_conversion_functions_test.cpp @@ -27,15 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/timeseries/timeseries_op_observer.cpp b/src/mongo/db/timeseries/timeseries_op_observer.cpp new file mode 100644 index 0000000000000..c669aae9f5046 --- /dev/null +++ b/src/mongo/db/timeseries/timeseries_op_observer.cpp @@ -0,0 +1,164 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/db/timeseries/timeseries_op_observer.h" + +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/oid.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_operation_source.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.h" +#include "mongo/db/timeseries/timeseries_extended_range.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" + + +namespace mongo { + +void TimeSeriesOpObserver::onInserts(OperationContext* opCtx, + const CollectionPtr& coll, + std::vector::const_iterator first, + std::vector::const_iterator last, + std::vector fromMigrate, + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { + const auto& nss = coll->ns(); + + if (!nss.isTimeseriesBucketsCollection()) { + return; + } + + // Check if the bucket _id is sourced from a date outside the standard range. If our writes + // end up erroring out or getting rolled back, then this flag will stay set. This is okay + // though, as it only disables some query optimizations and won't result in any correctness + // issues if the flag is set when it doesn't need to be (as opposed to NOT being set when it + // DOES need to be -- that will cause correctness issues). Additionally, if the user tried + // to insert measurements with dates outside the standard range, chances are they will do so + // again, and we will have only set the flag a little early. + invariant(opCtx->lockState()->isCollectionLockedForMode(nss, MODE_IX)); + // Hold reference to the catalog for collection lookup without locks to be safe. + auto catalog = CollectionCatalog::get(opCtx); + auto bucketsColl = catalog->lookupCollectionByNamespace(opCtx, nss); + tassert(6905201, "Could not find collection for write", bucketsColl); + auto timeSeriesOptions = bucketsColl->getTimeseriesOptions(); + if (timeSeriesOptions.has_value()) { + if (auto currentSetting = bucketsColl->getRequiresTimeseriesExtendedRangeSupport(); + !currentSetting && + timeseries::bucketsHaveDateOutsideStandardRange( + timeSeriesOptions.value(), first, last)) { + bucketsColl->setRequiresTimeseriesExtendedRangeSupport(opCtx); + } + } +} + +void TimeSeriesOpObserver::onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { + const auto& nss = args.coll->ns(); + + if (!nss.isTimeseriesBucketsCollection()) { + return; + } + + if (args.updateArgs->source != OperationSource::kTimeseriesInsert) { + OID bucketId = args.updateArgs->updatedDoc["_id"].OID(); + timeseries::bucket_catalog::handleDirectWrite(opCtx, nss, bucketId); + } +} + +void TimeSeriesOpObserver::aboutToDelete(OperationContext* opCtx, + const CollectionPtr& coll, + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { + const auto& nss = coll->ns(); + + if (!nss.isTimeseriesBucketsCollection()) { + return; + } + + OID bucketId = doc["_id"].OID(); + timeseries::bucket_catalog::handleDirectWrite(opCtx, nss, bucketId); +} + +void TimeSeriesOpObserver::onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) { + auto& bucketCatalog = timeseries::bucket_catalog::BucketCatalog::get(opCtx); + timeseries::bucket_catalog::clear(bucketCatalog, dbName); +} + +repl::OpTime TimeSeriesOpObserver::onDropCollection(OperationContext* opCtx, + const NamespaceString& collectionName, + const UUID& uuid, + std::uint64_t numRecords, + CollectionDropType dropType, + bool markFromMigrate) { + if (collectionName.isTimeseriesBucketsCollection()) { + auto& bucketCatalog = timeseries::bucket_catalog::BucketCatalog::get(opCtx); + timeseries::bucket_catalog::clear(bucketCatalog, + collectionName.getTimeseriesViewNamespace()); + } + + return {}; +} + +void TimeSeriesOpObserver::onReplicationRollback(OperationContext* opCtx, + const RollbackObserverInfo& rbInfo) { + stdx::unordered_set timeseriesNamespaces; + for (const auto& ns : rbInfo.rollbackNamespaces) { + if (ns.isTimeseriesBucketsCollection()) { + timeseriesNamespaces.insert(ns.getTimeseriesViewNamespace()); + } + } + + if (timeseriesNamespaces.empty()) { + return; + } + + auto& bucketCatalog = timeseries::bucket_catalog::BucketCatalog::get(opCtx); + timeseries::bucket_catalog::clear( + bucketCatalog, + [timeseriesNamespaces = std::move(timeseriesNamespaces)](const NamespaceString& bucketNs) { + return timeseriesNamespaces.contains(bucketNs); + }); +} + +} // namespace mongo diff --git a/src/mongo/db/timeseries/timeseries_op_observer.h b/src/mongo/db/timeseries/timeseries_op_observer.h new file mode 100644 index 0000000000000..cf6db2fa82104 --- /dev/null +++ b/src/mongo/db/timeseries/timeseries_op_observer.h @@ -0,0 +1,94 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" +#include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/util/uuid.h" + +namespace mongo { + +/** + * OpObserver for time-series collections. Notify the Bucket Catalog of events so it can update its + * state. + */ +class TimeSeriesOpObserver final : public OpObserverNoop { + TimeSeriesOpObserver(const TimeSeriesOpObserver&) = delete; + TimeSeriesOpObserver& operator=(const TimeSeriesOpObserver&) = delete; + +public: + TimeSeriesOpObserver() = default; + ~TimeSeriesOpObserver() = default; + + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kSystem, NamespaceFilter::kSystem}; + } + + void onInserts(OperationContext* opCtx, + const CollectionPtr& coll, + std::vector::const_iterator first, + std::vector::const_iterator last, + std::vector fromMigrate, + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; + + void aboutToDelete(OperationContext* opCtx, + const CollectionPtr& coll, + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final; + + repl::OpTime onDropCollection(OperationContext* opCtx, + const NamespaceString& collectionName, + const UUID& uuid, + std::uint64_t numRecords, + CollectionDropType dropType, + bool markFromMigrate) final; + + void onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final; +}; + +} // namespace mongo diff --git a/src/mongo/db/timeseries/timeseries_options.cpp b/src/mongo/db/timeseries/timeseries_options.cpp index d384f662a9992..8f9b82d80f8c0 100644 --- a/src/mongo/db/timeseries/timeseries_options.cpp +++ b/src/mongo/db/timeseries/timeseries_options.cpp @@ -27,14 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/timeseries/timeseries_options.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/server_options.h" #include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/timeseries/timeseries_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" namespace mongo { @@ -224,17 +236,24 @@ StatusWith> applyTimeseriesOptionsModificatio } BSONObj generateViewPipeline(const TimeseriesOptions& options, bool asArray) { + // TODO(SERVER-76411): Remove this and read the value directly after 'bucketMaxSpanSeconds' is + // guaranteed to be present. + // Generates the 'bucketMaxSpanSeconds' field if missing. + auto maxSpanSeconds = + options.getBucketMaxSpanSeconds().get_value_or(getMaxSpanSecondsFromGranularity( + options.getGranularity().get_value_or(BucketGranularityEnum::Seconds))); + if (options.getMetaField()) { return wrapInArrayIf( asArray, BSON("$_internalUnpackBucket" << BSON( "timeField" << options.getTimeField() << "metaField" << *options.getMetaField() - << "bucketMaxSpanSeconds" << *options.getBucketMaxSpanSeconds()))); + << "bucketMaxSpanSeconds" << maxSpanSeconds))); } return wrapInArrayIf(asArray, - BSON("$_internalUnpackBucket" << BSON( - "timeField" << options.getTimeField() << "bucketMaxSpanSeconds" - << *options.getBucketMaxSpanSeconds()))); + BSON("$_internalUnpackBucket" + << BSON("timeField" << options.getTimeField() + << "bucketMaxSpanSeconds" << maxSpanSeconds))); } bool optionsAreEqual(const TimeseriesOptions& option1, const TimeseriesOptions& option2) { diff --git a/src/mongo/db/timeseries/timeseries_options.h b/src/mongo/db/timeseries/timeseries_options.h index 9aed29244dc4c..1e63e14468ff5 100644 --- a/src/mongo/db/timeseries/timeseries_options.h +++ b/src/mongo/db/timeseries/timeseries_options.h @@ -29,7 +29,13 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/timeseries/timeseries_options_test.cpp b/src/mongo/db/timeseries/timeseries_options_test.cpp index bcf990d750c97..ebee92dbb261d 100644 --- a/src/mongo/db/timeseries/timeseries_options_test.cpp +++ b/src/mongo/db/timeseries/timeseries_options_test.cpp @@ -27,10 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/timeseries/timeseries_options.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/db/timeseries/timeseries_stats.cpp b/src/mongo/db/timeseries/timeseries_stats.cpp index 61aa3a968aa33..ff8af434cce2b 100644 --- a/src/mongo/db/timeseries/timeseries_stats.cpp +++ b/src/mongo/db/timeseries/timeseries_stats.cpp @@ -29,7 +29,10 @@ #include "mongo/db/timeseries/timeseries_stats.h" +#include + #include "mongo/db/catalog/collection.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/db/timeseries/timeseries_stats.h b/src/mongo/db/timeseries/timeseries_stats.h index df3ac8d4ec974..d93be5d1324df 100644 --- a/src/mongo/db/timeseries/timeseries_stats.h +++ b/src/mongo/db/timeseries/timeseries_stats.h @@ -31,7 +31,10 @@ #include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection.h" +#include "mongo/platform/atomic_word.h" namespace mongo { /** diff --git a/src/mongo/db/timeseries/timeseries_update_delete_util.cpp b/src/mongo/db/timeseries/timeseries_update_delete_util.cpp index 3f03e57386ebe..aae207092fb28 100644 --- a/src/mongo/db/timeseries/timeseries_update_delete_util.cpp +++ b/src/mongo/db/timeseries/timeseries_update_delete_util.cpp @@ -29,10 +29,34 @@ #include "mongo/db/timeseries/timeseries_update_delete_util.h" -#include "mongo/db/exec/bucket_unpacker.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/timeseries/bucket_spec.h" +#include "mongo/db/exec/timeseries/bucket_unpacker.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_algo.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/matcher/expression_tree.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/ops/parsed_writes_common.h" +#include "mongo/db/query/util/make_data_structure.h" #include "mongo/db/timeseries/timeseries_constants.h" namespace mongo::timeseries { @@ -52,7 +76,7 @@ static const std::unique_ptr closedBucketFilter = * Returns whether the given metaField is the first element of the dotted path in the given * field. */ -bool isMetaFieldFirstElementOfDottedPathField(StringData field, StringData metaField) { +bool isFieldFirstElementOfDottedPathField(StringData field, StringData metaField) { return field.substr(0, field.find('.')) == metaField; } @@ -74,12 +98,13 @@ void assertQueryFieldIsMetaField(bool isMetaField, StringData metaField) { isMetaField); } -void assertUpdateFieldIsMetaField(bool isMetaField, StringData metaField) { - uassert(ErrorCodes::InvalidOptions, - fmt::format("Cannot perform an update on a time-series collection which updates a " - "field that is not the metaField '{}'", - metaField), - isMetaField); +Status checkUpdateFieldIsMetaField(bool isMetaField, StringData metaField) { + return isMetaField + ? Status::OK() + : Status(ErrorCodes::InvalidOptions, + fmt::format("Cannot perform an update on a time-series collection which updates a " + "field that is not the metaField '{}'", + metaField)); } /** @@ -130,7 +155,7 @@ void replaceQueryMetaFieldName(mutablebson::Element elem, } if (isTopLevelField && (fieldName.empty() || fieldName[0] != '$')) { - assertQueryFieldIsMetaField(isMetaFieldFirstElementOfDottedPathField(fieldName, metaField), + assertQueryFieldIsMetaField(isFieldFirstElementOfDottedPathField(fieldName, metaField), metaField); invariantStatusOK(elem.rename(getRenamedField(fieldName))); } @@ -155,18 +180,28 @@ BSONObj translateQuery(const BSONObj& query, StringData metaField) { return queryDoc.getObject(); } -write_ops::UpdateModification translateUpdate(const write_ops::UpdateModification& updateMod, - StringData metaField) { - invariant(!metaField.empty()); +StatusWith translateUpdate( + const write_ops::UpdateModification& updateMod, boost::optional metaField) { invariant(updateMod.type() != write_ops::UpdateModification::Type::kDelta); - uassert(ErrorCodes::InvalidOptions, - "Cannot perform an update on a time-series collection using a pipeline update", - updateMod.type() != write_ops::UpdateModification::Type::kPipeline); + if (updateMod.type() == write_ops::UpdateModification::Type::kPipeline) { + return Status( + ErrorCodes::InvalidOptions, + "Cannot perform an update on a time-series collection using a pipeline update"); + } - uassert(ErrorCodes::InvalidOptions, - "Cannot perform an update on a time-series collection using a replacement document", - updateMod.type() != write_ops::UpdateModification::Type::kReplacement); + if (updateMod.type() == write_ops::UpdateModification::Type::kReplacement) { + return Status( + ErrorCodes::InvalidOptions, + "Cannot perform an update on a time-series collection using a replacement document"); + } + + // We can't translate an update without a meta field. + if (!metaField) { + return Status(ErrorCodes::InvalidOptions, + "Cannot perform an update on a time-series collection that does not have a " + "metaField"); + } const auto& document = updateMod.getUpdateModifier(); @@ -189,16 +224,23 @@ write_ops::UpdateModification translateUpdate(const write_ops::UpdateModificatio fieldValuePair = fieldValuePair.rightSibling()) { auto fieldName = fieldValuePair.getFieldName(); - assertUpdateFieldIsMetaField( - isMetaFieldFirstElementOfDottedPathField(fieldName, metaField), metaField); + auto status = checkUpdateFieldIsMetaField( + isFieldFirstElementOfDottedPathField(fieldName, *metaField), *metaField); + if (!status.isOK()) { + return status; + } invariantStatusOK(fieldValuePair.rename(getRenamedField(fieldName))); // If this is a $rename, we also need to translate the value. if (updatePair.getFieldName() == "$rename") { - assertUpdateFieldIsMetaField(fieldValuePair.isType(BSONType::String) && - isMetaFieldFirstElementOfDottedPathField( - fieldValuePair.getValueString(), metaField), - metaField); + auto status = checkUpdateFieldIsMetaField( + fieldValuePair.isType(BSONType::String) && + isFieldFirstElementOfDottedPathField(fieldValuePair.getValueString(), + *metaField), + *metaField); + if (!status.isOK()) { + return status; + } invariantStatusOK(fieldValuePair.setValueString( getRenamedField(fieldValuePair.getValueString()))); } @@ -214,57 +256,116 @@ std::function numMeasurementsForBucketCounter(StringData }; } +namespace { +/** + * Combines the given MatchExpressions into a single AndMatchExpression by $and-ing them. If there + * is only one non-null expression, returns that expression. If there are no non-null expressions, + * returns nullptr. + * + * Ts must be convertible to std::unique_ptr. + */ +template +std::unique_ptr andCombineMatchExpressions(Ts&&... matchExprs) { + std::vector> matchExprVector = + makeVectorIfNotNull(std::forward(matchExprs)...); + if (matchExprVector.empty()) { + return nullptr; + } + return matchExprVector.size() > 1 + ? std::make_unique(std::move(matchExprVector)) + : std::move(matchExprVector[0]); +} +} // namespace + BSONObj getBucketLevelPredicateForRouting(const BSONObj& originalQuery, const boost::intrusive_ptr& expCtx, - boost::optional metaField) { - if (!metaField) { - // In case the time-series collection does not have meta field defined, we broadcast the - // request to all shards or use two phase protocol using empty predicate. - // - // TODO SERVER-75160: Move this block into the if gTimeseriesDeletesSupport is not enabled - // block as soon as we implement SERVER-75160. + const TimeseriesOptions& tsOptions, + bool allowArbitraryWrites) { + if (originalQuery.isEmpty()) { return BSONObj(); } - if (!feature_flags::gTimeseriesDeletesSupport.isEnabled( - serverGlobalParams.featureCompatibility)) { + auto&& metaField = tsOptions.getMetaField(); + if (!allowArbitraryWrites) { + if (!metaField) { + // In case the time-series collection does not have meta field defined, we broadcast + // the request to all shards or use two phase protocol using empty predicate. + return BSONObj(); + } + // Translate the delete query into a query on the time-series collection's underlying // buckets collection. return timeseries::translateQuery(originalQuery, *metaField); } - auto swMatchExpr = + auto matchExpr = uassertStatusOK( MatchExpressionParser::parse(originalQuery, expCtx, ExtensionsCallbackNoop(), - MatchExpressionParser::kAllowAllSpecialFeatures); - uassertStatusOKWithContext(swMatchExpr.getStatus(), "Failed to parse delete query"); - auto metaFieldStr = metaField->toString(); - // Split out the bucket-level predicate from the delete query and rename the meta field to the - // internal name, 'meta'. - auto [bucketLevelPredicate, _] = expression::splitMatchExpressionBy( - std::move(swMatchExpr.getValue()), - {metaFieldStr} /*fields*/, - {{metaFieldStr, timeseries::kBucketMetaFieldName.toString()}} /*renames*/, - expression::isOnlyDependentOn); - - if (bucketLevelPredicate) { - return bucketLevelPredicate->serialize(); + MatchExpressionParser::kAllowAllSpecialFeatures)); + + // If the meta field exists, split out the meta field predicate which can be potentially used + // for bucket-level routing. + auto [metaOnlyPred, residualPred] = + BucketSpec::splitOutMetaOnlyPredicate(std::move(matchExpr), metaField); + + // Split out the time field predicate which can be potentially used for bucket-level routing. + auto timeOnlyPred = residualPred + ? expression::splitMatchExpressionBy(std::move(residualPred), + {tsOptions.getTimeField().toString()} /*fields*/, + {} /*renames*/, + expression::isOnlyDependentOn) + .first + : std::unique_ptr{}; + + // Translate the time field predicate into a predicate on the bucket-level time field. + std::unique_ptr timeBucketPred = timeOnlyPred + ? BucketSpec::createPredicatesOnBucketLevelField( + timeOnlyPred.get(), + BucketSpec{ + tsOptions.getTimeField().toString(), + metaField.map([](StringData s) { return s.toString(); }), + }, + *tsOptions.getBucketMaxSpanSeconds(), + expCtx, + false /*haveComputedMetaField*/, + false /*includeMetaField*/, + true /*assumeNoMixedSchemaData*/, + BucketSpec::IneligiblePredicatePolicy::kIgnore /*policy*/) + .loosePredicate + : nullptr; + + // In case that the delete query does not contain any potential bucket-level routing predicate, + // target the request to all shards using empty predicate. + if (!metaOnlyPred && !timeBucketPred) { + return BSONObj(); } - // In case that the delete query does not contain bucket-level predicate that can be split out - // and renamed, target the request to all shards using empty predicate. - return BSONObj(); + // Combine the meta field and time field predicates into a single predicate by $and-ing them + // together. + // Note: At least one of 'metaOnlyPred' or 'timeBucketPred' is not null. So, the result + // expression is guaranteed to be not null. + return andCombineMatchExpressions(std::move(metaOnlyPred), std::move(timeBucketPred)) + ->serialize(); } -std::unique_ptr getBucketLevelPredicateForWrites( - std::unique_ptr bucketExpr) { - if (bucketExpr) { - std::vector> bucketExprs; - bucketExprs.emplace_back(std::move(bucketExpr)); - bucketExprs.emplace_back(closedBucketFilter->clone()); - return std::make_unique(std::move(bucketExprs)); - } - return closedBucketFilter->clone(); +TimeseriesWritesQueryExprs getMatchExprsForWrites( + const boost::intrusive_ptr& expCtx, + const TimeseriesOptions& tsOptions, + const BSONObj& writeQuery) { + auto [metaOnlyExpr, bucketMetricExpr, residualExpr] = + BucketSpec::getPushdownPredicates(expCtx, + tsOptions, + writeQuery, + /*haveComputedMetaField*/ false, + tsOptions.getMetaField().has_value(), + /*assumeNoMixedSchemaData*/ true, + BucketSpec::IneligiblePredicatePolicy::kIgnore); + + // Combine the closed bucket filter and the bucket metric filter and the meta-only filter into a + // single filter by $and-ing them together. + return {._bucketExpr = andCombineMatchExpressions( + closedBucketFilter->clone(), std::move(metaOnlyExpr), std::move(bucketMetricExpr)), + ._residualExpr = std::move(residualExpr)}; } } // namespace mongo::timeseries diff --git a/src/mongo/db/timeseries/timeseries_update_delete_util.h b/src/mongo/db/timeseries/timeseries_update_delete_util.h index 252fb63396860..7038202d58a6e 100644 --- a/src/mongo/db/timeseries/timeseries_update_delete_util.h +++ b/src/mongo/db/timeseries/timeseries_update_delete_util.h @@ -29,9 +29,25 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/parsed_writes_common.h" #include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/util/assert_util.h" namespace mongo::timeseries { /** @@ -46,10 +62,11 @@ BSONObj translateQuery(const BSONObj& query, StringData metaField); * Translates the given update on the time-series collection to an update on the time-series * collection's underlying buckets collection. Creates and returns a translated UpdateModification * where all occurrences of metaField in updateMod are replaced with the literal "meta". Requires - * that updateMod is an update document and that the given metaField is not empty. + * that updateMod is an update document and that the given metaField is not empty. Returns an + * invalid status if the update cannot be translated. */ -write_ops::UpdateModification translateUpdate(const write_ops::UpdateModification& updateMod, - StringData metaField); +StatusWith translateUpdate( + const write_ops::UpdateModification& updateMod, boost::optional metaField); /** * Returns the function to use to count the number of documents updated or deleted. @@ -62,12 +79,75 @@ std::function numMeasurementsForBucketCounter(StringData */ BSONObj getBucketLevelPredicateForRouting(const BSONObj& originalQuery, const boost::intrusive_ptr& expCtx, - boost::optional metaField); + const TimeseriesOptions& tsOptions, + bool allowArbitraryWrites); + +/** + * Returns the match expressions for the bucket and residual filters for a timeseries write + * operation. + */ +TimeseriesWritesQueryExprs getMatchExprsForWrites( + const boost::intrusive_ptr& expCtx, + const TimeseriesOptions& tsOptions, + const BSONObj& writeQuery); + +// Type requirement 1 for isTimeseries() +template +concept IsRequestableWithTimeseriesBucketNamespace = requires(const T& t) { + t.getNamespace(); + t.getIsTimeseriesNamespace(); +}; + +// Type requirement 2 for isTimeseries() +template +concept IsRequestableOnUserTimeseriesNamespace = requires(const T& t) { + t.getNsString(); +}; + +// Disjuction of type requirements for isTimeseries() +template +concept IsRequestableOnTimeseries = + IsRequestableWithTimeseriesBucketNamespace || IsRequestableOnUserTimeseriesNamespace; /** - * Get the bucket-level predicate for a time-series delete that filters out any buckets with - * 'control.closed' set. + * Returns a pair of (whether 'request' is made on a timeseries collection and the timeseries + * system bucket collection namespace if so). + * + * If the 'request' is not made on a timeseries collection, the second element of the pair is same + * as the namespace of the 'request'. */ -std::unique_ptr getBucketLevelPredicateForWrites( - std::unique_ptr bucketExpr = nullptr); +template +requires IsRequestableOnTimeseries std::pair isTimeseries( + OperationContext* opCtx, const T& request) { + const auto [nss, bucketNss] = [&] { + if constexpr (IsRequestableWithTimeseriesBucketNamespace) { + auto nss = request.getNamespace(); + uassert(5916400, + "'isTimeseriesNamespace' parameter can only be set when the request is sent on " + "system.buckets namespace", + !request.getIsTimeseriesNamespace() || nss.isTimeseriesBucketsCollection()); + return request.getIsTimeseriesNamespace() + ? std::pair{nss, nss} + : std::pair{nss, nss.makeTimeseriesBucketsNamespace()}; + } else { + auto nss = request.getNsString(); + return std::pair{ + nss, + nss.isTimeseriesBucketsCollection() ? nss : nss.makeTimeseriesBucketsNamespace()}; + } + }(); + + // If the buckets collection exists now, the time-series insert path will check for the + // existence of the buckets collection later on with a lock. + // If this check is concurrent with the creation of a time-series collection and the buckets + // collection does not yet exist, this check may return false unnecessarily. As a result, an + // insert attempt into the time-series namespace will either succeed or fail, depending on who + // wins the race. + // Hold reference to the catalog for collection lookup without locks to be safe. + auto catalog = CollectionCatalog::get(opCtx); + auto coll = catalog->lookupCollectionByNamespace(opCtx, bucketNss); + bool isTimeseries = (coll && coll->getTimeseriesOptions()); + + return {isTimeseries, isTimeseries ? bucketNss : nss}; +} } // namespace mongo::timeseries diff --git a/src/mongo/db/timeseries/timeseries_update_delete_util_test.cpp b/src/mongo/db/timeseries/timeseries_update_delete_util_test.cpp index e82516fdff09d..fcbd9f58b0a73 100644 --- a/src/mongo/db/timeseries/timeseries_update_delete_util_test.cpp +++ b/src/mongo/db/timeseries/timeseries_update_delete_util_test.cpp @@ -27,16 +27,20 @@ * it in the license file. */ +#include + +#include + #include "mongo/base/error_codes.h" #include "mongo/bson/bsonobj.h" -#include "mongo/db/pipeline/document_source_merge_gen.h" -#include "mongo/db/pipeline/expression_context.h" -#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" -#include "mongo/db/pipeline/pipeline.h" +#include "mongo/bson/json.h" +#include "mongo/db/client.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/timeseries/timeseries_update_delete_util.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -58,8 +62,9 @@ class TimeseriesUpdateDeleteUtilTest : public ServiceContextMongoDTest { } BSONObj _translateUpdate(const BSONObj& update) const { - return timeseries::translateUpdate( - write_ops::UpdateModification::parseFromClassicUpdate(update), _metaField) + return uassertStatusOK( + timeseries::translateUpdate( + write_ops::UpdateModification::parseFromClassicUpdate(update), _metaField)) .getUpdateModifier(); } diff --git a/src/mongo/db/timeseries/timeseries_write_util.cpp b/src/mongo/db/timeseries/timeseries_write_util.cpp index bfa6883076c95..1d1cb0133d4c3 100644 --- a/src/mongo/db/timeseries/timeseries_write_util.cpp +++ b/src/mongo/db/timeseries/timeseries_write_util.cpp @@ -29,17 +29,70 @@ #include "mongo/db/timeseries/timeseries_write_util.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "timeseries_index_schema_conversion_functions.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_operation_source.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/document_validation.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/exception_util.h" #include "mongo/db/curop.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/ops/write_ops_exec_util.h" -#include "mongo/db/query/collection_query_info.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id_helpers.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/tenant_migration_decoration.h" +#include "mongo/db/server_options.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/timeseries/bucket_catalog/bucket_catalog_helpers.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_catalog_internal.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_identifiers.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_metadata.h" +#include "mongo/db/timeseries/bucket_catalog/execution_stats.h" +#include "mongo/db/timeseries/bucket_catalog/flat_bson.h" +#include "mongo/db/timeseries/bucket_catalog/reopening.h" +#include "mongo/db/timeseries/bucket_compression.h" #include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/timeseries/timeseries_options.h" #include "mongo/db/update/document_diff_applier.h" +#include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decimal_counter.h" +#include "mongo/util/decorable.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo::timeseries { namespace { @@ -116,8 +169,177 @@ BSONObj makeNewDocument(const OID& bucketId, return builder.obj(); } + +// Makes a write command request base and sets the statement Ids if provided a non-empty vector. +write_ops::WriteCommandRequestBase makeTimeseriesWriteOpBase(std::vector&& stmtIds) { + write_ops::WriteCommandRequestBase base; + + // The schema validation configured in the bucket collection is intended for direct + // operations by end users and is not applicable here. + base.setBypassDocumentValidation(true); + + if (!stmtIds.empty()) { + base.setStmtIds(std::move(stmtIds)); + } + + return base; +} + +// Builds the delta update oplog entry from a time-series insert write batch. +write_ops::UpdateOpEntry makeTimeseriesUpdateOpEntry( + OperationContext* opCtx, + std::shared_ptr batch, + const BSONObj& metadata) { + BSONObjBuilder updateBuilder; + { + if (!batch->min.isEmpty() || !batch->max.isEmpty()) { + BSONObjBuilder controlBuilder(updateBuilder.subobjStart( + str::stream() << doc_diff::kSubDiffSectionFieldPrefix << "control")); + if (!batch->min.isEmpty()) { + controlBuilder.append( + str::stream() << doc_diff::kSubDiffSectionFieldPrefix << "min", batch->min); + } + if (!batch->max.isEmpty()) { + controlBuilder.append( + str::stream() << doc_diff::kSubDiffSectionFieldPrefix << "max", batch->max); + } + } + } + { // doc_diff::kSubDiffSectionFieldPrefix + => {: ..., :} + StringDataMap dataFieldBuilders; + auto metadataElem = metadata.firstElement(); + DecimalCounter count(batch->numPreviouslyCommittedMeasurements); + for (const auto& doc : batch->measurements) { + for (const auto& elem : doc) { + auto key = elem.fieldNameStringData(); + if (metadataElem && key == metadataElem.fieldNameStringData()) { + continue; + } + auto& builder = dataFieldBuilders[key]; + builder.appendAs(elem, count); + } + ++count; + } + + // doc_diff::kSubDiffSectionFieldPrefix + + BSONObjBuilder dataBuilder(updateBuilder.subobjStart("sdata")); + BSONObjBuilder newDataFieldsBuilder; + for (auto& pair : dataFieldBuilders) { + // Existing 'data' fields with measurements require different treatment from fields + // not observed before (missing from control.min and control.max). + if (batch->newFieldNamesToBeInserted.count(pair.first)) { + newDataFieldsBuilder.append(pair.first, pair.second.obj()); + } + } + auto newDataFields = newDataFieldsBuilder.obj(); + if (!newDataFields.isEmpty()) { + dataBuilder.append(doc_diff::kInsertSectionFieldName, newDataFields); + } + for (auto& pair : dataFieldBuilders) { + // Existing 'data' fields with measurements require different treatment from fields + // not observed before (missing from control.min and control.max). + if (!batch->newFieldNamesToBeInserted.count(pair.first)) { + dataBuilder.append(doc_diff::kSubDiffSectionFieldPrefix + pair.first.toString(), + BSON(doc_diff::kInsertSectionFieldName << pair.second.obj())); + } + } + } + write_ops::UpdateModification::DiffOptions options; + options.mustCheckExistenceForInsertOperations = + static_cast(repl::tenantMigrationInfo(opCtx)); + write_ops::UpdateModification u( + updateBuilder.obj(), write_ops::UpdateModification::DeltaTag{}, options); + auto oid = batch->bucketHandle.bucketId.oid; + write_ops::UpdateOpEntry update(BSON("_id" << oid), std::move(u)); + invariant(!update.getMulti(), oid.toString()); + invariant(!update.getUpsert(), oid.toString()); + return update; +} + +// Performs the storage write of an update to a time-series bucket document. +void updateTimeseriesDocument(OperationContext* opCtx, + const CollectionPtr& coll, + const write_ops::UpdateCommandRequest& op, + OpDebug* opDebug, + bool fromMigrate, + StmtId stmtId) { + invariant(op.getUpdates().size() == 1); + auto& update = op.getUpdates().front(); + + invariant(coll->isClustered()); + auto recordId = record_id_helpers::keyForOID(update.getQ()["_id"].OID()); + + auto original = coll->docFor(opCtx, recordId); + + CollectionUpdateArgs args{original.value()}; + args.criteria = update.getQ(); + args.stmtIds = {stmtId}; + if (fromMigrate) { + args.source = OperationSource::kFromMigrate; + } + + BSONObj updated; + BSONObj diffFromUpdate; + const BSONObj* diffOnIndexes = + collection_internal::kUpdateAllIndexes; // Assume all indexes are affected. + if (update.getU().type() == write_ops::UpdateModification::Type::kDelta) { + diffFromUpdate = update.getU().getDiff(); + updated = doc_diff::applyDiff( + original.value(), diffFromUpdate, static_cast(repl::tenantMigrationInfo(opCtx))); + diffOnIndexes = &diffFromUpdate; + args.update = update_oplog_entry::makeDeltaOplogEntry(diffFromUpdate); + } else if (update.getU().type() == write_ops::UpdateModification::Type::kTransform) { + const auto& transform = update.getU().getTransform(); + auto transformed = transform(original.value()); + tassert(7667900, + "Could not apply transformation to time series bucket document", + transformed.has_value()); + updated = std::move(transformed.value()); + args.update = update_oplog_entry::makeReplacementOplogEntry(updated); + } else if (update.getU().type() == write_ops::UpdateModification::Type::kReplacement) { + updated = update.getU().getUpdateReplacement(); + args.update = update_oplog_entry::makeReplacementOplogEntry(updated); + } else { + invariant(false, "Unexpected update type"); + } + + collection_internal::updateDocument(opCtx, + coll, + recordId, + original, + updated, + diffOnIndexes, + nullptr /*indexesAffected*/, + opDebug, + &args); +} + +std::shared_ptr& extractFromSelf( + std::shared_ptr& batch) { + return batch; +} } // namespace +write_ops::UpdateCommandRequest buildSingleUpdateOp(const write_ops::UpdateCommandRequest& wholeOp, + size_t opIndex) { + write_ops::UpdateCommandRequest singleUpdateOp(wholeOp.getNamespace(), + {wholeOp.getUpdates()[opIndex]}); + auto commandBase = singleUpdateOp.getWriteCommandRequestBase(); + commandBase.setOrdered(wholeOp.getOrdered()); + commandBase.setBypassDocumentValidation(wholeOp.getBypassDocumentValidation()); + + return singleUpdateOp; +} + +void assertTimeseriesBucketsCollection(const Collection* bucketsColl) { + uassert(ErrorCodes::NamespaceNotFound, + "Could not find time-series buckets collection for write", + bucketsColl); + uassert(ErrorCodes::InvalidOptions, + "Time-series buckets collection is missing time-series options", + bucketsColl->getTimeseriesOptions()); +} + BSONObj makeNewDocumentForWrite(std::shared_ptr batch, const BSONObj& metadata) { StringDataMap dataBuilders; @@ -128,6 +350,29 @@ BSONObj makeNewDocumentForWrite(std::shared_ptr batc batch->bucketHandle.bucketId.oid, metadata, batch->min, batch->max, dataBuilders); } +BSONObj makeNewCompressedDocumentForWrite(std::shared_ptr batch, + const BSONObj& metadata, + const NamespaceString& nss, + StringData timeField) { + // Builds the data field of a bucket document. + StringDataMap dataBuilders; + processTimeseriesMeasurements( + {batch->measurements.begin(), batch->measurements.end()}, metadata, dataBuilders); + + BSONObj uncompressedDoc = makeNewDocument( + batch->bucketHandle.bucketId.oid, metadata, batch->min, batch->max, dataBuilders); + + const bool validateCompression = gValidateTimeseriesCompression.load(); + auto compressed = + timeseries::compressBucket(uncompressedDoc, timeField, nss, validateCompression); + if (compressed.compressedBucket) { + return *compressed.compressedBucket; + } + + // Return the uncompressed document if compression has failed. + return uncompressedDoc; +} + BSONObj makeNewDocumentForWrite( const OID& bucketId, const std::vector& measurements, @@ -143,13 +388,301 @@ BSONObj makeNewDocumentForWrite( return makeNewDocument(bucketId, metadata, minmax->min(), minmax->max(), dataBuilders); } -Status performAtomicWrites(OperationContext* opCtx, - const CollectionPtr& coll, - const RecordId& recordId, - const stdx::variant& modificationOp, - bool fromMigrate, - StmtId stmtId) try { +BSONObj makeBucketDocument(const std::vector& measurements, + const NamespaceString& nss, + const TimeseriesOptions& options, + const StringData::ComparatorInterface* comparator) { + std::vector insertOps; + auto res = uassertStatusOK(bucket_catalog::internal::extractBucketingParameters( + nss, comparator, options, measurements[0])); + auto time = res.second; + auto [oid, _] = bucket_catalog::internal::generateBucketOID(time, options); + return makeNewDocumentForWrite( + oid, measurements, res.first.metadata.toBSON(), options, comparator); +} + +stdx::variant makeModificationOp( + const OID& bucketId, const CollectionPtr& coll, const std::vector& measurements) { + if (measurements.empty()) { + write_ops::DeleteOpEntry deleteEntry(BSON("_id" << bucketId), false); + write_ops::DeleteCommandRequest op(coll->ns(), {deleteEntry}); + return op; + } + auto timeseriesOptions = coll->getTimeseriesOptions(); + auto metaFieldName = timeseriesOptions->getMetaField(); + auto metadata = [&] { + if (!metaFieldName) { // Collection has no metadata field. + return BSONObj(); + } + // Look for the metadata field on this bucket and return it if present. + auto metaField = measurements[0].getField(*metaFieldName); + return metaField ? metaField.wrap() : BSONObj(); + }(); + auto replaceBucket = makeNewDocumentForWrite( + bucketId, measurements, metadata, timeseriesOptions, coll->getDefaultCollator()); + + write_ops::UpdateModification u(replaceBucket); + write_ops::UpdateOpEntry updateEntry(BSON("_id" << bucketId), std::move(u)); + write_ops::UpdateCommandRequest op(coll->ns(), {updateEntry}); + return op; +} + +write_ops::UpdateOpEntry makeTimeseriesTransformationOpEntry( + OperationContext* opCtx, + const OID& bucketId, + write_ops::UpdateModification::TransformFunc transformationFunc) { + write_ops::UpdateModification u(std::move(transformationFunc)); + write_ops::UpdateOpEntry update(BSON("_id" << bucketId), std::move(u)); + invariant(!update.getMulti(), bucketId.toString()); + invariant(!update.getUpsert(), bucketId.toString()); + return update; +} + +void getOpTimeAndElectionId(OperationContext* opCtx, + boost::optional* opTime, + boost::optional* electionId) { + auto* replCoord = repl::ReplicationCoordinator::get(opCtx->getServiceContext()); + const auto replMode = replCoord->getReplicationMode(); + + *opTime = replMode != repl::ReplicationCoordinator::modeNone + ? boost::make_optional(repl::ReplClientInfo::forClient(opCtx->getClient()).getLastOp()) + : boost::none; + *electionId = replMode == repl::ReplicationCoordinator::modeReplSet + ? boost::make_optional(replCoord->getElectionId()) + : boost::none; +} + +write_ops::InsertCommandRequest makeTimeseriesInsertOp( + std::shared_ptr batch, + const NamespaceString& bucketsNs, + const BSONObj& metadata, + std::vector&& stmtIds) { + write_ops::InsertCommandRequest op{bucketsNs, {makeNewDocumentForWrite(batch, metadata)}}; + op.setWriteCommandRequestBase(makeTimeseriesWriteOpBase(std::move(stmtIds))); + return op; +} + +write_ops::UpdateCommandRequest makeTimeseriesUpdateOp( + OperationContext* opCtx, + std::shared_ptr batch, + const NamespaceString& bucketsNs, + const BSONObj& metadata, + std::vector&& stmtIds) { + write_ops::UpdateCommandRequest op(bucketsNs, + {makeTimeseriesUpdateOpEntry(opCtx, batch, metadata)}); + op.setWriteCommandRequestBase(makeTimeseriesWriteOpBase(std::move(stmtIds))); + return op; +} + +write_ops::UpdateCommandRequest makeTimeseriesDecompressAndUpdateOp( + OperationContext* opCtx, + std::shared_ptr batch, + const NamespaceString& bucketsNs, + const BSONObj& metadata, + std::vector&& stmtIds) { + // Generate the diff and apply it against the previously decompressed bucket document. + const bool mustCheckExistenceForInsertOperations = + static_cast(repl::tenantMigrationInfo(opCtx)); + auto diff = makeTimeseriesUpdateOpEntry(opCtx, batch, metadata).getU().getDiff(); + auto after = doc_diff::applyDiff( + batch->decompressed.value().after, diff, mustCheckExistenceForInsertOperations); + + auto bucketDecompressionFunc = + [before = std::move(batch->decompressed.value().before), + after = std::move(after)](const BSONObj& bucketDoc) -> boost::optional { + // Make sure the document hasn't changed since we read it into the BucketCatalog. + // This should not happen, but since we can double-check it here, we can guard + // against the missed update that would result from simply replacing with 'after'. + if (!bucketDoc.binaryEqual(before)) { + throwWriteConflictException("Bucket document changed between initial read and update"); + } + return after; + }; + + write_ops::UpdateCommandRequest op( + bucketsNs, + {makeTimeseriesTransformationOpEntry( + opCtx, batch->bucketHandle.bucketId.oid, std::move(bucketDecompressionFunc))}); + op.setWriteCommandRequestBase(makeTimeseriesWriteOpBase(std::move(stmtIds))); + return op; +} + +StatusWith attemptInsertIntoBucket( + OperationContext* opCtx, + bucket_catalog::BucketCatalog& bucketCatalog, + const NamespaceString& viewNs, + const Collection* bucketsColl, + TimeseriesOptions& timeSeriesOptions, + const BSONObj& measurementDoc, + bucket_catalog::CombineWithInsertsFromOtherClients combine, + bool fromUpdates) { + StatusWith swResult = + Status{ErrorCodes::BadValue, "Uninitialized InsertResult"}; + do { + // Avoids reopening existing buckets for the inserts of the updated measurements from + // time-series user updates. + if (!fromUpdates && + feature_flags::gTimeseriesScalabilityImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + swResult = bucket_catalog::tryInsert(opCtx, + bucketCatalog, + viewNs, + bucketsColl->getDefaultCollator(), + timeSeriesOptions, + measurementDoc, + combine); + + if (swResult.isOK()) { + const auto& insertResult = swResult.getValue(); + + // If the InsertResult doesn't contain a batch, we failed to insert the + // measurement into an open bucket and need to create/reopen a bucket. + if (!insertResult.batch) { + bucket_catalog::BucketFindResult bucketFindResult; + BSONObj suitableBucket; + + if (auto* bucketId = stdx::get_if(&insertResult.candidate)) { + DBDirectClient client{opCtx}; + suitableBucket = + client.findOne(bucketsColl->ns(), BSON("_id" << *bucketId)); + bucketFindResult.fetchedBucket = true; + } else if (auto* pipeline = + stdx::get_if>(&insertResult.candidate)) { + // Resort to Query-Based reopening approach. + DBDirectClient client{opCtx}; + + // Ensure we have a index on meta and time for the time-series + // collection before performing the query. Without the index we + // will perform a full collection scan which could cause us to + // take a performance hit. + if (collectionHasIndexSupportingReopeningQuery( + opCtx, bucketsColl->getIndexCatalog(), timeSeriesOptions)) { + + // Run an aggregation to find a suitable bucket to reopen. + AggregateCommandRequest aggRequest(bucketsColl->ns(), *pipeline); + + auto cursor = uassertStatusOK( + DBClientCursor::fromAggregationRequest(&client, + aggRequest, + false /* secondaryOk + */, false /* + useExhaust*/)); + + if (cursor->more()) { + suitableBucket = cursor->next(); + } + bucketFindResult.queriedBucket = true; + } + } + + boost::optional bucketToReopen = boost::none; + if (!suitableBucket.isEmpty()) { + auto validator = [&](OperationContext * opCtx, + const BSONObj& bucketDoc) -> auto { + return bucketsColl->checkValidation(opCtx, bucketDoc); + }; + auto bucketToReopen = bucket_catalog::BucketToReopen{ + suitableBucket, validator, insertResult.catalogEra}; + bucketFindResult.bucketToReopen = std::move(bucketToReopen); + } + + swResult = bucket_catalog::insert(opCtx, + bucketCatalog, + viewNs, + bucketsColl->getDefaultCollator(), + timeSeriesOptions, + measurementDoc, + combine, + std::move(bucketFindResult)); + } + } + } else { + bucket_catalog::BucketFindResult bucketFindResult; + swResult = bucket_catalog::insert(opCtx, + bucketCatalog, + viewNs, + bucketsColl->getDefaultCollator(), + timeSeriesOptions, + measurementDoc, + combine, + bucketFindResult); + } + + // If there is an era offset (between the bucket we want to reopen and the + // catalog's current era), we could hit a WriteConflict error indicating we will + // need to refetch a bucket document as it is potentially stale. + } while (!swResult.isOK() && (swResult.getStatus().code() == ErrorCodes::WriteConflict)); + return swResult; +} + +void makeWriteRequest(OperationContext* opCtx, + std::shared_ptr batch, + const BSONObj& metadata, + TimeseriesStmtIds& stmtIds, + const NamespaceString& bucketsNs, + std::vector* insertOps, + std::vector* updateOps) { + if (batch.get()->numPreviouslyCommittedMeasurements == 0) { + insertOps->push_back( + makeTimeseriesInsertOp(batch, + bucketsNs, + metadata, + std::move(stmtIds[batch.get()->bucketHandle.bucketId.oid]))); + return; + } + if (batch.get()->decompressed.has_value()) { + updateOps->push_back(makeTimeseriesDecompressAndUpdateOp( + opCtx, + batch, + bucketsNs, + metadata, + std::move(stmtIds[batch.get()->bucketHandle.bucketId.oid]))); + } else { + updateOps->push_back( + makeTimeseriesUpdateOp(opCtx, + batch, + bucketsNs, + metadata, + std::move(stmtIds[batch.get()->bucketHandle.bucketId.oid]))); + } +} + +TimeseriesBatches insertIntoBucketCatalogForUpdate(OperationContext* opCtx, + bucket_catalog::BucketCatalog& bucketCatalog, + const CollectionPtr& bucketsColl, + const std::vector& measurements, + const NamespaceString& bucketsNs, + TimeseriesOptions& timeSeriesOptions) { + TimeseriesBatches batches; + auto viewNs = bucketsNs.getTimeseriesViewNamespace(); + + for (const auto& measurement : measurements) { + auto insertResult = uassertStatusOK( + attemptInsertIntoBucket(opCtx, + bucketCatalog, + viewNs, + bucketsColl.get(), + timeSeriesOptions, + measurement, + bucket_catalog::CombineWithInsertsFromOtherClients::kDisallow, + /*fromUpdates=*/true)); + batches.emplace_back(std::move(insertResult.batch)); + } + + return batches; +} + +void performAtomicWrites( + OperationContext* opCtx, + const CollectionPtr& coll, + const RecordId& recordId, + const boost::optional>& modificationOp, + const std::vector& insertOps, + bool fromMigrate, + StmtId stmtId) { + tassert( + 7655102, "must specify at least one type of write", modificationOp || !insertOps.empty()); NamespaceString ns = coll->ns(); DisableDocumentValidation disableDocumentValidation{opCtx}; @@ -162,61 +695,156 @@ Status performAtomicWrites(OperationContext* opCtx, write_ops_exec::assertCanWrite_inlock(opCtx, ns); - WriteUnitOfWork wuow{opCtx}; + // Groups all operations in one or several chained oplog entries to ensure the writes are + // replicated atomically. + auto groupOplogEntries = !opCtx->getTxnNumber() && !insertOps.empty() && modificationOp; + WriteUnitOfWork wuow{opCtx, groupOplogEntries}; + + if (modificationOp) { + stdx::visit( + OverloadedVisitor{[&](const write_ops::UpdateCommandRequest& updateOp) { + updateTimeseriesDocument( + opCtx, coll, updateOp, &curOp->debug(), fromMigrate, stmtId); + }, + [&](const write_ops::DeleteCommandRequest& deleteOp) { + invariant(deleteOp.getDeletes().size() == 1); + auto deleteId = record_id_helpers::keyForOID( + deleteOp.getDeletes().front().getQ()["_id"].OID()); + invariant(recordId == deleteId); + collection_internal::deleteDocument( + opCtx, coll, stmtId, recordId, &curOp->debug(), fromMigrate); + }}, + *modificationOp); + } - stdx::visit( - OverloadedVisitor{ - [&](const write_ops::UpdateCommandRequest& updateOp) { - invariant(updateOp.getUpdates().size() == 1); - auto& update = updateOp.getUpdates().front(); + if (!insertOps.empty()) { + std::vector insertStatements; + for (auto& op : insertOps) { + invariant(op.getDocuments().size() == 1); + if (modificationOp) { + insertStatements.emplace_back(op.getDocuments().front()); + } else { + // Appends the stmtId for upsert. + insertStatements.emplace_back(stmtId, op.getDocuments().front()); + } + } + uassertStatusOK(collection_internal::insertDocuments( + opCtx, coll, insertStatements.begin(), insertStatements.end(), &curOp->debug())); + } - invariant(coll->isClustered()); + wuow.commit(); - auto original = coll->docFor(opCtx, recordId); + lastOpFixer.finishedOpSuccessfully(); +} - CollectionUpdateArgs args{original.value()}; - args.criteria = update.getQ(); - args.stmtIds = {stmtId}; - if (fromMigrate) { - args.source = OperationSource::kFromMigrate; - } +void commitTimeseriesBucketsAtomically( + OperationContext* opCtx, + bucket_catalog::BucketCatalog& bucketCatalog, + const CollectionPtr& coll, + const RecordId& recordId, + const boost::optional>& modificationOp, + TimeseriesBatches* batches, + const NamespaceString& bucketsNs, + bool fromMigrate, + StmtId stmtId) { + auto batchesToCommit = determineBatchesToCommit(*batches, extractFromSelf); + if (batchesToCommit.empty()) { + return; + } - BSONObj diffFromUpdate; - const BSONObj* diffOnIndexes = - collection_internal::kUpdateAllIndexes; // Assume all indexes are affected. - - // Overwrites the original bucket. - invariant(update.getU().type() == - write_ops::UpdateModification::Type::kReplacement); - auto updated = update.getU().getUpdateReplacement(); - args.update = update_oplog_entry::makeReplacementOplogEntry(updated); - - collection_internal::updateDocument(opCtx, - coll, - recordId, - original, - updated, - diffOnIndexes, - &curOp->debug(), - &args); - }, - [&](const write_ops::DeleteCommandRequest& deleteOp) { - invariant(deleteOp.getDeletes().size() == 1); - auto deleteId = - record_id_helpers::keyForOID(deleteOp.getDeletes().front().getQ()["_id"].OID()); - invariant(recordId == deleteId); - collection_internal::deleteDocument( - opCtx, coll, stmtId, recordId, &curOp->debug(), fromMigrate); - }}, - modificationOp); + Status abortStatus = Status::OK(); + ScopeGuard batchGuard{[&] { + for (auto batch : batchesToCommit) { + if (batch.get()) { + abort(bucketCatalog, batch, abortStatus); + } + } + }}; + + try { + std::vector insertOps; + std::vector updateOps; + + for (auto batch : batchesToCommit) { + auto metadata = getMetadata(bucketCatalog, batch.get()->bucketHandle); + auto prepareCommitStatus = prepareCommit(bucketCatalog, batch); + if (!prepareCommitStatus.isOK()) { + abortStatus = prepareCommitStatus; + return; + } - wuow.commit(); + TimeseriesStmtIds emptyStmtIds = {}; + makeWriteRequest( + opCtx, batch, metadata, emptyStmtIds, bucketsNs, &insertOps, &updateOps); + } - lastOpFixer.finishedOpSuccessfully(); + performAtomicWrites(opCtx, coll, recordId, modificationOp, insertOps, fromMigrate, stmtId); + + boost::optional opTime; + boost::optional electionId; + getOpTimeAndElectionId(opCtx, &opTime, &electionId); - return Status::OK(); -} catch (const DBException& ex) { - return ex.toStatus(); + for (auto batch : batchesToCommit) { + finish(bucketCatalog, batch, bucket_catalog::CommitInfo{opTime, electionId}); + batch.get().reset(); + } + } catch (const DBException& ex) { + abortStatus = ex.toStatus(); + throw; + } + + batchGuard.dismiss(); } +void performAtomicWritesForDelete(OperationContext* opCtx, + const CollectionPtr& coll, + const RecordId& recordId, + const std::vector& unchangedMeasurements, + bool fromMigrate, + StmtId stmtId) { + OID bucketId = record_id_helpers::toBSONAs(recordId, "_id")["_id"].OID(); + auto modificationOp = makeModificationOp(bucketId, coll, unchangedMeasurements); + performAtomicWrites(opCtx, coll, recordId, modificationOp, {}, fromMigrate, stmtId); +} + +void performAtomicWritesForUpdate( + OperationContext* opCtx, + const CollectionPtr& coll, + const RecordId& recordId, + const boost::optional>& unchangedMeasurements, + const std::vector& modifiedMeasurements, + bool fromMigrate, + StmtId stmtId) { + // Uses a side bucket catalog to make sure updated measurements go to new buckets. + bucket_catalog::BucketCatalog sideBucketCatalog{1}; + auto timeSeriesOptions = *coll->getTimeseriesOptions(); + auto batches = insertIntoBucketCatalogForUpdate( + opCtx, sideBucketCatalog, coll, modifiedMeasurements, coll->ns(), timeSeriesOptions); + + auto modificationRequest = unchangedMeasurements + ? boost::make_optional( + makeModificationOp(record_id_helpers::toBSONAs(recordId, "_id")["_id"].OID(), + coll, + *unchangedMeasurements)) + : boost::none; + commitTimeseriesBucketsAtomically(opCtx, + sideBucketCatalog, + coll, + recordId, + modificationRequest, + &batches, + coll->ns(), + fromMigrate, + stmtId); + + // Merges the stats of the side bucket catalog into the main one. + auto& bucketCatalog = bucket_catalog::BucketCatalog::get(opCtx); + auto viewNs = coll->ns().getTimeseriesViewNamespace(); + invariant(sideBucketCatalog.executionStats.size() == 1); + bucket_catalog::ExecutionStatsController stats = + bucket_catalog::internal::getOrInitializeExecutionStats(bucketCatalog, viewNs); + const auto& collStats = *sideBucketCatalog.executionStats.begin()->second; + bucket_catalog::addCollectionExecutionStats(stats, collStats); +} } // namespace mongo::timeseries diff --git a/src/mongo/db/timeseries/timeseries_write_util.h b/src/mongo/db/timeseries/timeseries_write_util.h index ed63d9082b77a..50e801c38c6a6 100644 --- a/src/mongo/db/timeseries/timeseries_write_util.h +++ b/src/mongo/db/timeseries/timeseries_write_util.h @@ -29,18 +29,64 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_catalog.h" #include "mongo/db/timeseries/bucket_catalog/write_batch.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_options.h" +#include "mongo/stdx/unordered_map.h" namespace mongo::timeseries { + +/** + * Constructs an update request using a single update statement at position `opIndex`. + */ +write_ops::UpdateCommandRequest buildSingleUpdateOp(const write_ops::UpdateCommandRequest& wholeOp, + size_t opIndex); + + +/** + * Asserts the buckets collection exists and has valid time-series options. + * + * Assumes already holding a lock on the collection. + */ +void assertTimeseriesBucketsCollection(const Collection* bucketsColl); + /** * Returns the document for writing a new bucket with a write batch. */ BSONObj makeNewDocumentForWrite(std::shared_ptr batch, const BSONObj& metadata); +/** + * Returns a new document, compressed, with which to initialize a new bucket containing only the + * given 'batch'. If compression fails for any reason, an uncompressed document will be returned. + */ +BSONObj makeNewCompressedDocumentForWrite( + std::shared_ptr batch, + const BSONObj& metadata, + const NamespaceString& nss, + StringData timeField); + /** * Returns the document for writing a new bucket with 'measurements'. Calculates the min and max * fields while building the document. @@ -54,18 +100,162 @@ BSONObj makeNewDocumentForWrite( const boost::optional& options, const boost::optional& comparator); +/** + * Returns the document for writing a new bucket with 'measurements'. Generates the id and + * calculates the min and max fields while building the document. + * + * The measurements must already be known to fit in the same bucket. No checks will be done. + */ +BSONObj makeBucketDocument(const std::vector& measurements, + const NamespaceString& nss, + const TimeseriesOptions& options, + const StringData::ComparatorInterface* comparator); + +/** + * Returns an update request to the bucket when the 'measurements' is non-empty. Otherwise, returns + * a delete request to the bucket. + */ +stdx::variant makeModificationOp( + const OID& bucketId, const CollectionPtr& coll, const std::vector& measurements); + +using TimeseriesBatches = std::vector>; +using TimeseriesStmtIds = stdx::unordered_map, OID::Hasher>; + +/** + * Builds the transform update oplog entry with a transform function. + */ +write_ops::UpdateOpEntry makeTimeseriesTransformationOpEntry( + OperationContext* opCtx, + const OID& bucketId, + write_ops::UpdateModification::TransformFunc transformationFunc); + +/** + * Retrieves the opTime and electionId according to the current replication mode. + */ +void getOpTimeAndElectionId(OperationContext* opCtx, + boost::optional* opTime, + boost::optional* electionId); + +/** + * Builds the insert command request from a time-series insert write batch. + */ +write_ops::InsertCommandRequest makeTimeseriesInsertOp( + std::shared_ptr batch, + const NamespaceString& bucketsNs, + const BSONObj& metadata, + std::vector&& stmtIds = {}); + +/** + * Builds the update command request from a time-series insert write batch. + */ +write_ops::UpdateCommandRequest makeTimeseriesUpdateOp( + OperationContext* opCtx, + std::shared_ptr batch, + const NamespaceString& bucketsNs, + const BSONObj& metadata, + std::vector&& stmtIds = {}); + +/** + * Builds the decompress and update command request from a time-series insert write batch. + */ +write_ops::UpdateCommandRequest makeTimeseriesDecompressAndUpdateOp( + OperationContext* opCtx, + std::shared_ptr batch, + const NamespaceString& bucketsNs, + const BSONObj& metadata, + std::vector&& stmtIds = {}); + +/** + * Attempts to insert a measurement doc into a bucket in the bucket catalog and retries + * automatically on certain errors. Only reopens existing buckets if the insert was initiated from a + * user insert. + * + * Returns the write batch of the insert and other information if succeeded. + */ +StatusWith attemptInsertIntoBucket( + OperationContext* opCtx, + bucket_catalog::BucketCatalog& bucketCatalog, + const NamespaceString& viewNs, + const Collection* bucketsColl, + TimeseriesOptions& timeSeriesOptions, + const BSONObj& measurementDoc, + bucket_catalog::CombineWithInsertsFromOtherClients combine, + bool fromUpdates = false); + +/** + * Prepares the final write batches needed for performing the writes to storage. + */ +template +std::vector>> +determineBatchesToCommit(T& batches, Fn&& extractElem) { + std::vector>> + batchesToCommit; + for (auto& elem : batches) { + std::shared_ptr& batch = extractElem(elem); + if (timeseries::bucket_catalog::claimWriteBatchCommitRights(*batch)) { + batchesToCommit.push_back(batch); + } + } + + // Sort by bucket so that preparing the commit for each batch cannot deadlock. + std::sort(batchesToCommit.begin(), batchesToCommit.end(), [](auto left, auto right) { + return left.get()->bucketHandle.bucketId.oid < right.get()->bucketHandle.bucketId.oid; + }); + + return batchesToCommit; +} + +/** + * Builds the insert and update requests for performing the writes to storage from the write batches + * provided. + */ +void makeWriteRequest(OperationContext* opCtx, + std::shared_ptr batch, + const BSONObj& metadata, + TimeseriesStmtIds& stmtIds, + const NamespaceString& bucketsNs, + std::vector* insertOps, + std::vector* updateOps); + /** * Performs modifications atomically for a user command on a time-series collection. + * * Replaces the bucket document for a partial bucket modification and removes the bucket for a full - * bucket modification. + * bucket modification. Inserts new bucket documents if provided. * - * Guarantees write atomicity per bucket document. - */ -Status performAtomicWrites(OperationContext* opCtx, - const CollectionPtr& coll, - const RecordId& recordId, - const stdx::variant& modificationOp, - bool fromMigrate, - StmtId stmtId); + * All the modifications are written and replicated atomically. + */ +void performAtomicWrites( + OperationContext* opCtx, + const CollectionPtr& coll, + const RecordId& recordId, + const boost::optional>& modificationOp, + const std::vector& insertOps, + bool fromMigrate, + StmtId stmtId); + +/** + * Constructs the write request with the provided measurements and performs the write atomically for + * a time-series user delete on one bucket. + */ +void performAtomicWritesForDelete(OperationContext* opCtx, + const CollectionPtr& coll, + const RecordId& recordId, + const std::vector& unchangedMeasurements, + bool fromMigrate, + StmtId stmtId); + +/** + * Constructs the write requests with the provided measurements and performs the writes atomically + * for a time-series user update on one bucket. + */ +void performAtomicWritesForUpdate( + OperationContext* opCtx, + const CollectionPtr& coll, + const RecordId& recordId, + const boost::optional>& unchangedMeasurements, + const std::vector& modifiedMeasurements, + bool fromMigrate, + StmtId stmtId); } // namespace mongo::timeseries diff --git a/src/mongo/db/timeseries/timeseries_write_util_test.cpp b/src/mongo/db/timeseries/timeseries_write_util_test.cpp index 790e96fbdb2b8..ee9f9cb9eb589 100644 --- a/src/mongo/db/timeseries/timeseries_write_util_test.cpp +++ b/src/mongo/db/timeseries/timeseries_write_util_test.cpp @@ -27,35 +27,65 @@ * it in the license file. */ +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include +#include + +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" #include "mongo/db/catalog/catalog_test_fixture.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/record_id_helpers.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/timeseries//timeseries_constants.h" +#include "mongo/db/timeseries/bucket_catalog/bucket_identifiers.h" +#include "mongo/db/timeseries/bucket_catalog/execution_stats.h" +#include "mongo/db/timeseries/bucket_compression.h" #include "mongo/db/timeseries/timeseries_write_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::timeseries { namespace { +const TimeseriesOptions kTimeseriesOptions("time"); + class TimeseriesWriteUtilTest : public CatalogTestFixture { protected: using CatalogTestFixture::setUp; -}; + std::shared_ptr generateBatch(const NamespaceString& ns) { + OID oid = OID::createFromString("629e1e680958e279dc29a517"_sd); + bucket_catalog::BucketId bucketId(ns, oid); + std::uint8_t stripe = 0; + auto opId = 0; + bucket_catalog::ExecutionStats globalStats; + auto collectionStats = std::make_shared(); + bucket_catalog::ExecutionStatsController stats(collectionStats, globalStats); + return std::make_shared( + bucket_catalog::BucketHandle{bucketId, stripe}, opId, stats); + } +}; TEST_F(TimeseriesWriteUtilTest, MakeNewBucketFromWriteBatch) { - NamespaceString ns{"db_timeseries_write_util_test", "MakeNewBucketFromWriteBatch"}; + NamespaceString ns = NamespaceString::createNamespaceString_forTest( + "db_timeseries_write_util_test", "MakeNewBucketFromWriteBatch"); // Builds a write batch. - OID oid = OID::createFromString("629e1e680958e279dc29a517"_sd); - bucket_catalog::BucketId bucketId(ns, oid); - std::uint8_t stripe = 0; - auto opId = 0; - bucket_catalog::ExecutionStats globalStats; - auto collectionStats = std::make_shared(); - bucket_catalog::ExecutionStatsController stats(collectionStats, globalStats); - auto batch = std::make_shared( - bucket_catalog::BucketHandle{bucketId, stripe}, opId, stats); + auto batch = generateBatch(ns); const std::vector measurements = { fromjson(R"({"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":1,"b":1})"), fromjson(R"({"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":2,"b":2})"), @@ -83,18 +113,11 @@ TEST_F(TimeseriesWriteUtilTest, MakeNewBucketFromWriteBatch) { } TEST_F(TimeseriesWriteUtilTest, MakeNewBucketFromWriteBatchWithMeta) { - NamespaceString ns{"db_timeseries_write_util_test", "MakeNewBucketFromWriteBatchWithMeta"}; + NamespaceString ns = NamespaceString::createNamespaceString_forTest( + "db_timeseries_write_util_test", "MakeNewBucketFromWriteBatchWithMeta"); // Builds a write batch. - OID oid = OID::createFromString("629e1e680958e279dc29a517"_sd); - bucket_catalog::BucketId bucketId(ns, oid); - std::uint8_t stripe = 0; - auto opId = 0; - bucket_catalog::ExecutionStats globalStats; - auto collectionStats = std::make_shared(); - bucket_catalog::ExecutionStatsController stats(collectionStats, globalStats); - auto batch = std::make_shared( - bucket_catalog::BucketHandle{bucketId, stripe}, opId, stats); + auto batch = generateBatch(ns); const std::vector measurements = { fromjson(R"({"time":{"$date":"2022-06-06T15:34:30.000Z"},"meta":{"tag":1},"a":1,"b":1})"), fromjson(R"({"time":{"$date":"2022-06-06T15:34:30.000Z"},"meta":{"tag":1},"a":2,"b":2})"), @@ -123,6 +146,92 @@ TEST_F(TimeseriesWriteUtilTest, MakeNewBucketFromWriteBatchWithMeta) { ASSERT_EQ(0, comparator.compare(newDoc, bucketDoc)); } +TEST_F(TimeseriesWriteUtilTest, MakeNewCompressedBucketFromWriteBatch) { + NamespaceString ns = NamespaceString::createNamespaceString_forTest( + "db_timeseries_write_util_test", "MakeNewCompressedBucketFromWriteBatch"); + + // Builds a write batch with out-of-order time to verify that bucket compression sorts by time. + auto batch = generateBatch(ns); + const std::vector measurements = { + fromjson(R"({"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":1,"b":1})"), + fromjson(R"({"time":{"$date":"2022-06-06T15:34:50.000Z"},"a":3,"b":3})"), + fromjson(R"({"time":{"$date":"2022-06-06T15:34:40.000Z"},"a":2,"b":2})")}; + batch->measurements = {measurements.begin(), measurements.end()}; + batch->min = fromjson(R"({"time":{"$date":"2022-06-06T15:34:00.000Z"},"a":1,"b":1})"); + batch->max = fromjson(R"({"time":{"$date":"2022-06-06T15:34:50.000Z"},"a":3,"b":3})"); + + // Makes the new compressed document for write. + auto compressedDoc = timeseries::makeNewCompressedDocumentForWrite( + batch, /*metadata=*/{}, ns, kTimeseriesOptions.getTimeField()); + + // makeNewCompressedDocumentForWrite() can return the uncompressed bucket if an error was + // encountered during compression. Check that compression was successful. + ASSERT_EQ(timeseries::kTimeseriesControlCompressedVersion, + compressedDoc.getObjectField(timeseries::kBucketControlFieldName) + .getIntField(timeseries::kBucketControlVersionFieldName)); + + auto decompressedDoc = decompressBucket(compressedDoc); + ASSERT(decompressedDoc); + + // Checks the measurements are stored in the bucket format. + const BSONObj bucketDoc = fromjson( + R"({"_id":{"$oid":"629e1e680958e279dc29a517"}, + "control":{"version":1,"min":{"time":{"$date":"2022-06-06T15:34:00.000Z"},"a":1,"b":1}, + "max":{"time":{"$date":"2022-06-06T15:34:50.000Z"},"a":3,"b":3}}, + "data":{"time":{"0":{"$date":"2022-06-06T15:34:30.000Z"}, + "1":{"$date":"2022-06-06T15:34:40.000Z"}, + "2":{"$date":"2022-06-06T15:34:50.000Z"}}, + "a":{"0":1,"1":2,"2":3}, + "b":{"0":1,"1":2,"2":3}}})"); + + UnorderedFieldsBSONObjComparator comparator; + ASSERT_EQ(0, comparator.compare(*decompressedDoc, bucketDoc)); +} + +TEST_F(TimeseriesWriteUtilTest, MakeNewCompressedBucketFromWriteBatchWithMeta) { + NamespaceString ns = NamespaceString::createNamespaceString_forTest( + "db_timeseries_write_util_test", "MakeNewCompressedBucketFromWriteBatchWithMeta"); + + // Builds a write batch with out-of-order time to verify that bucket compression sorts by time. + auto batch = generateBatch(ns); + const std::vector measurements = { + fromjson(R"({"time":{"$date":"2022-06-06T15:34:30.000Z"},"meta":{"tag":1},"a":1,"b":1})"), + fromjson(R"({"time":{"$date":"2022-06-06T15:34:50.000Z"},"meta":{"tag":1},"a":3,"b":3})"), + fromjson(R"({"time":{"$date":"2022-06-06T15:34:40.000Z"},"meta":{"tag":1},"a":2,"b":2})")}; + batch->measurements = {measurements.begin(), measurements.end()}; + batch->min = fromjson(R"({"time":{"$date":"2022-06-06T15:34:00.000Z"},"a":1,"b":1})"); + batch->max = fromjson(R"({"time":{"$date":"2022-06-06T15:34:50.000Z"},"a":3,"b":3})"); + auto metadata = fromjson(R"({"meta":{"tag":1}})"); + + // Makes the new compressed document for write. + auto compressedDoc = timeseries::makeNewCompressedDocumentForWrite( + batch, metadata, ns, kTimeseriesOptions.getTimeField()); + + // makeNewCompressedDocumentForWrite() can return the uncompressed bucket if an error was + // encountered during compression. Check that compression was successful. + ASSERT_EQ(timeseries::kTimeseriesControlCompressedVersion, + compressedDoc.getObjectField(timeseries::kBucketControlFieldName) + .getIntField(timeseries::kBucketControlVersionFieldName)); + + auto decompressedDoc = decompressBucket(compressedDoc); + ASSERT(decompressedDoc); + + // Checks the measurements are stored in the bucket format. + const BSONObj bucketDoc = fromjson( + R"({"_id":{"$oid":"629e1e680958e279dc29a517"}, + "control":{"version":1,"min":{"time":{"$date":"2022-06-06T15:34:00.000Z"},"a":1,"b":1}, + "max":{"time":{"$date":"2022-06-06T15:34:50.000Z"},"a":3,"b":3}}, + "meta":{"tag":1}, + "data":{"time":{"0":{"$date":"2022-06-06T15:34:30.000Z"}, + "1":{"$date":"2022-06-06T15:34:40.000Z"}, + "2":{"$date":"2022-06-06T15:34:50.000Z"}}, + "a":{"0":1,"1":2,"2":3}, + "b":{"0":1,"1":2,"2":3}}})"); + + UnorderedFieldsBSONObjComparator comparator; + ASSERT_EQ(0, comparator.compare(*decompressedDoc, bucketDoc)); +} + TEST_F(TimeseriesWriteUtilTest, MakeNewBucketFromMeasurements) { OID oid = OID::createFromString("629e1e680958e279dc29a517"_sd); TimeseriesOptions options("time"); @@ -223,12 +332,14 @@ TEST_F(TimeseriesWriteUtilTest, PerformAtomicDelete) { op.setWriteCommandRequestBase(std::move(base)); - ASSERT_OK(performAtomicWrites(opCtx, - bucketsColl.getCollection(), - recordId, - op, - /*fromMigrate=*/false, - /*stmtId=*/kUninitializedStmtId)); + ASSERT_DOES_NOT_THROW(performAtomicWrites( + opCtx, + bucketsColl.getCollection(), + recordId, + stdx::variant{op}, + {}, + /*fromMigrate=*/false, + /*stmtId=*/kUninitializedStmtId)); } // Checks the document is removed. @@ -290,12 +401,14 @@ TEST_F(TimeseriesWriteUtilTest, PerformAtomicUpdate) { op.setWriteCommandRequestBase(std::move(base)); - ASSERT_OK(performAtomicWrites(opCtx, - bucketsColl.getCollection(), - recordId, - op, - /*fromMigrate=*/false, - /*stmtId=*/kUninitializedStmtId)); + ASSERT_DOES_NOT_THROW(performAtomicWrites( + opCtx, + bucketsColl.getCollection(), + recordId, + stdx::variant{op}, + {}, + /*fromMigrate=*/false, + /*stmtId=*/kUninitializedStmtId)); } // Checks the document is updated. @@ -309,5 +422,345 @@ TEST_F(TimeseriesWriteUtilTest, PerformAtomicUpdate) { } } +TEST_F(TimeseriesWriteUtilTest, PerformAtomicDeleteAndInsert) { + NamespaceString ns = NamespaceString::createNamespaceString_forTest( + "db_timeseries_write_util_test", "PerformAtomicDeleteAndInsert"); + auto opCtx = operationContext(); + ASSERT_OK(createCollection(opCtx, + ns.dbName(), + BSON("create" << ns.coll() << "timeseries" + << BSON("timeField" + << "time")))); + + // Inserts a bucket document. + const BSONObj bucketDoc1 = ::mongo::fromjson( + R"({"_id":{"$oid":"629e1e680958e279dc29a517"}, + "control":{"version":1,"min":{"time":{"$date":"2022-06-06T15:34:00.000Z"},"a":1,"b":1}, + "max":{"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":3,"b":3}}, + "data":{"time":{"0":{"$date":"2022-06-06T15:34:30.000Z"}, + "1":{"$date":"2022-06-06T15:34:30.000Z"}, + "2":{"$date":"2022-06-06T15:34:30.000Z"}}, + "a":{"0":1,"1":2,"2":3}, + "b":{"0":1,"1":2,"2":3}}})"); + OID bucketId1 = bucketDoc1["_id"].OID(); + auto recordId1 = record_id_helpers::keyForOID(bucketId1); + + AutoGetCollection bucketsColl(opCtx, ns.makeTimeseriesBucketsNamespace(), LockMode::MODE_IX); + { + WriteUnitOfWork wunit{opCtx}; + ASSERT_OK(collection_internal::insertDocument( + opCtx, *bucketsColl, InsertStatement{bucketDoc1}, nullptr)); + wunit.commit(); + } + + // Deletes the bucket document and inserts a new bucket document. + const BSONObj bucketDoc2 = ::mongo::fromjson( + R"({"_id":{"$oid":"629e1e680958e279dc29a518"}, + "control":{"version":1,"min":{"time":{"$date":"2022-06-06T15:34:00.000Z"}, + "a":10, + "b":10}, + "max":{"time":{"$date":"2022-06-06T15:34:30.000Z"}, + "a":30, + "b":30}}, + "data":{"time":{"0":{"$date":"2022-06-06T15:34:30.000Z"}, + "1":{"$date":"2022-06-06T15:34:30.000Z"}, + "2":{"$date":"2022-06-06T15:34:30.000Z"}}, + "a":{"0":10,"1":20,"2":30}, + "b":{"0":10,"1":20,"2":30}}})"); + OID bucketId2 = bucketDoc2["_id"].OID(); + auto recordId2 = record_id_helpers::keyForOID(bucketId2); + { + write_ops::DeleteOpEntry deleteEntry(BSON("_id" << bucketId1), false); + write_ops::DeleteCommandRequest deleteOp(ns.makeTimeseriesBucketsNamespace(), + {deleteEntry}); + write_ops::WriteCommandRequestBase base; + base.setBypassDocumentValidation(true); + base.setStmtIds(std::vector{kUninitializedStmtId}); + deleteOp.setWriteCommandRequestBase(base); + + write_ops::InsertCommandRequest insertOp(ns.makeTimeseriesBucketsNamespace(), {bucketDoc2}); + insertOp.setWriteCommandRequestBase(base); + + ASSERT_DOES_NOT_THROW(performAtomicWrites( + opCtx, + bucketsColl.getCollection(), + recordId1, + stdx::variant{ + deleteOp}, + {insertOp}, + /*fromMigrate=*/false, + /*stmtId=*/kUninitializedStmtId)); + } + + // Checks document1 is removed and document2 is added. + { + Snapshotted doc; + bool found = bucketsColl->findDoc(opCtx, recordId1, &doc); + ASSERT_FALSE(found); + + found = bucketsColl->findDoc(opCtx, recordId2, &doc); + ASSERT_TRUE(found); + UnorderedFieldsBSONObjComparator comparator; + ASSERT_EQ(0, comparator.compare(doc.value(), bucketDoc2)); + } +} + +TEST_F(TimeseriesWriteUtilTest, PerformAtomicUpdateAndInserts) { + NamespaceString ns = NamespaceString::createNamespaceString_forTest( + "db_timeseries_write_util_test", "PerformAtomicUpdateAndInserts"); + auto opCtx = operationContext(); + ASSERT_OK(createCollection(opCtx, + ns.dbName(), + BSON("create" << ns.coll() << "timeseries" + << BSON("timeField" + << "time")))); + + // Inserts a bucket document. + const BSONObj bucketDoc1 = ::mongo::fromjson( + R"({"_id":{"$oid":"629e1e680958e279dc29a517"}, + "control":{"version":1,"min":{"time":{"$date":"2022-06-06T15:34:00.000Z"},"a":1,"b":1}, + "max":{"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":3,"b":3}}, + "meta":1, + "data":{"time":{"0":{"$date":"2022-06-06T15:34:30.000Z"}, + "1":{"$date":"2022-06-06T15:34:30.000Z"}, + "2":{"$date":"2022-06-06T15:34:30.000Z"}}, + "a":{"0":1,"1":2,"2":3}, + "b":{"0":1,"1":2,"2":3}}})"); + OID bucketId1 = bucketDoc1["_id"].OID(); + auto recordId1 = record_id_helpers::keyForOID(bucketId1); + + AutoGetCollection bucketsColl(opCtx, ns.makeTimeseriesBucketsNamespace(), LockMode::MODE_IX); + { + WriteUnitOfWork wunit{opCtx}; + ASSERT_OK(collection_internal::insertDocument( + opCtx, *bucketsColl, InsertStatement{bucketDoc1}, nullptr)); + wunit.commit(); + } + + // Updates the bucket document and inserts two new bucket documents. + const BSONObj replaceDoc = ::mongo::fromjson( + R"({"_id":{"$oid":"629e1e680958e279dc29a517"}, + "control":{"version":1,"min":{"time":{"$date":"2022-06-06T15:34:00.000Z"},"a":3,"b":3}, + "max":{"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":3,"b":3}}, + "meta":1, + "data":{"time":{"0":{"$date":"2022-06-06T15:34:30.000Z"}}, + "a":{"0":3}, + "b":{"0":3}}})"); + const BSONObj bucketDoc2 = ::mongo::fromjson( + R"({"_id":{"$oid":"629e1e680958e279dc29a518"}, + "control":{"version":1,"min":{"time":{"$date":"2022-06-06T15:34:00.000Z"}, + "a":1, + "b":1}, + "max":{"time":{"$date":"2022-06-06T15:34:30.000Z"}, + "a":1, + "b":1}}, + "meta":2, + "data":{"time":{"0":{"$date":"2022-06-06T15:34:30.000Z"}}, + "a":{"0":1}, + "b":{"0":1}}})"); + OID bucketId2 = bucketDoc2["_id"].OID(); + auto recordId2 = record_id_helpers::keyForOID(bucketId2); + const BSONObj bucketDoc3 = ::mongo::fromjson( + R"({"_id":{"$oid":"629e1e680958e279dc29a519"}, + "control":{"version":1,"min":{"time":{"$date":"2022-06-06T15:34:00.000Z"}, + "a":2, + "b":2}, + "max":{"time":{"$date":"2022-06-06T15:34:30.000Z"}, + "a":2, + "b":2}}, + "meta":3, + "data":{"time":{"0":{"$date":"2022-06-06T15:34:30.000Z"}}, + "a":{"0":2}, + "b":{"0":2}}})"); + OID bucketId3 = bucketDoc3["_id"].OID(); + auto recordId3 = record_id_helpers::keyForOID(bucketId3); + { + write_ops::UpdateModification u(replaceDoc); + write_ops::UpdateOpEntry update(BSON("_id" << bucketId1), std::move(u)); + write_ops::UpdateCommandRequest updateOp(ns.makeTimeseriesBucketsNamespace(), {update}); + write_ops::WriteCommandRequestBase base; + base.setBypassDocumentValidation(true); + base.setStmtIds(std::vector{kUninitializedStmtId}); + updateOp.setWriteCommandRequestBase(base); + + write_ops::InsertCommandRequest insertOp1(ns.makeTimeseriesBucketsNamespace(), + {bucketDoc2}); + insertOp1.setWriteCommandRequestBase(base); + write_ops::InsertCommandRequest insertOp2(ns.makeTimeseriesBucketsNamespace(), + {bucketDoc3}); + insertOp2.setWriteCommandRequestBase(base); + + ASSERT_DOES_NOT_THROW(performAtomicWrites( + opCtx, + bucketsColl.getCollection(), + recordId1, + stdx::variant{ + updateOp}, + {insertOp1, insertOp2}, + /*fromMigrate=*/false, + /*stmtId=*/kUninitializedStmtId)); + } + + // Checks document1 is updated and document2 and document3 are added. + { + Snapshotted doc; + bool found = bucketsColl->findDoc(opCtx, recordId1, &doc); + ASSERT_TRUE(found); + UnorderedFieldsBSONObjComparator comparator; + ASSERT_EQ(0, comparator.compare(doc.value(), replaceDoc)); + + found = bucketsColl->findDoc(opCtx, recordId2, &doc); + ASSERT_TRUE(found); + ASSERT_EQ(0, comparator.compare(doc.value(), bucketDoc2)); + + found = bucketsColl->findDoc(opCtx, recordId3, &doc); + ASSERT_TRUE(found); + ASSERT_EQ(0, comparator.compare(doc.value(), bucketDoc3)); + } +} + +TEST_F(TimeseriesWriteUtilTest, PerformAtomicWritesForUserDelete) { + NamespaceString ns = NamespaceString::createNamespaceString_forTest( + "db_timeseries_write_util_test", "PerformAtomicWritesForUserDelete"); + auto opCtx = operationContext(); + ASSERT_OK(createCollection(opCtx, + ns.dbName(), + BSON("create" << ns.coll() << "timeseries" + << BSON("timeField" + << "time")))); + + // Inserts a bucket document. + const BSONObj bucketDoc = ::mongo::fromjson( + R"({"_id":{"$oid":"629e1e680958e279dc29a517"}, + "control":{"version":1,"min":{"time":{"$date":"2022-06-06T15:34:00.000Z"},"a":1,"b":1}, + "max":{"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":3,"b":3}}, + "data":{"time":{"0":{"$date":"2022-06-06T15:34:30.000Z"}, + "1":{"$date":"2022-06-06T15:34:30.000Z"}, + "2":{"$date":"2022-06-06T15:34:30.000Z"}}, + "a":{"0":1,"1":2,"2":3}, + "b":{"0":1,"1":2,"2":3}}})"); + OID bucketId = bucketDoc["_id"].OID(); + auto recordId = record_id_helpers::keyForOID(bucketId); + + AutoGetCollection bucketsColl(opCtx, ns.makeTimeseriesBucketsNamespace(), LockMode::MODE_IX); + { + WriteUnitOfWork wunit{opCtx}; + ASSERT_OK(collection_internal::insertDocument( + opCtx, *bucketsColl, InsertStatement{bucketDoc}, nullptr)); + wunit.commit(); + } + + // Deletes two measurements from the bucket. + { + ASSERT_DOES_NOT_THROW(performAtomicWritesForDelete( + opCtx, + bucketsColl.getCollection(), + recordId, + {::mongo::fromjson(R"({"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":2,"b":2})")}, + /*fromMigrate=*/false, + /*stmtId=*/kUninitializedStmtId)); + } + + // Checks only one measurement is left in the bucket. + { + const BSONObj replaceDoc = ::mongo::fromjson( + R"({"_id":{"$oid":"629e1e680958e279dc29a517"}, + "control":{"version":1,"min":{"time":{"$date":"2022-06-06T15:34:00.000Z"},"a":2,"b":2}, + "max":{"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":2,"b":2}}, + "data":{"time":{"0":{"$date":"2022-06-06T15:34:30.000Z"}}, + "a":{"0":2}, + "b":{"0":2}}})"); + Snapshotted doc; + bool found = bucketsColl->findDoc(opCtx, recordId, &doc); + + ASSERT_TRUE(found); + UnorderedFieldsBSONObjComparator comparator; + ASSERT_EQ(0, comparator.compare(doc.value(), replaceDoc)); + } + + // Deletes the last measurement from the bucket. + { + ASSERT_DOES_NOT_THROW(performAtomicWritesForDelete(opCtx, + bucketsColl.getCollection(), + recordId, + {}, + /*fromMigrate=*/false, + /*stmtId=*/kUninitializedStmtId)); + } + + // Checks the document is removed. + { + Snapshotted doc; + bool found = bucketsColl->findDoc(opCtx, recordId, &doc); + ASSERT_FALSE(found); + } +} + +TEST_F(TimeseriesWriteUtilTest, PerformAtomicWritesForUserUpdate) { + NamespaceString ns = NamespaceString::createNamespaceString_forTest( + "db_timeseries_write_util_test", "PerformAtomicWritesForUserUpdate"); + auto opCtx = operationContext(); + ASSERT_OK(createCollection(opCtx, + ns.dbName(), + BSON("create" << ns.coll() << "timeseries" + << BSON("timeField" + << "time")))); + + // Inserts a bucket document. + const BSONObj bucketDoc = ::mongo::fromjson( + R"({"_id":{"$oid":"629e1e680958e279dc29a517"}, + "control":{"version":1,"min":{"time":{"$date":"2022-06-06T15:34:00.000Z"},"a":1,"b":1}, + "max":{"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":3,"b":3}}, + "data":{"time":{"0":{"$date":"2022-06-06T15:34:30.000Z"}, + "1":{"$date":"2022-06-06T15:34:30.000Z"}, + "2":{"$date":"2022-06-06T15:34:30.000Z"}}, + "a":{"0":1,"1":2,"2":3}, + "b":{"0":1,"1":2,"2":3}}})"); + OID bucketId = bucketDoc["_id"].OID(); + auto recordId = record_id_helpers::keyForOID(bucketId); + + AutoGetCollection bucketsColl(opCtx, ns.makeTimeseriesBucketsNamespace(), LockMode::MODE_IX); + { + WriteUnitOfWork wunit{opCtx}; + ASSERT_OK(collection_internal::insertDocument( + opCtx, *bucketsColl, InsertStatement{bucketDoc}, nullptr)); + wunit.commit(); + } + + // Updates two measurements from the bucket. + { + std::vector unchangedMeasurements{ + ::mongo::fromjson(R"({"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":2,"b":2})")}; + ASSERT_DOES_NOT_THROW(performAtomicWritesForUpdate( + opCtx, + bucketsColl.getCollection(), + recordId, + unchangedMeasurements, + {::mongo::fromjson(R"({"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":10,"b":10})"), + ::mongo::fromjson(R"({"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":30,"b":30})")}, + /*fromMigrate=*/false, + /*stmtId=*/kUninitializedStmtId)); + } + + // Checks only one measurement is left in the original bucket and a new document was inserted. + { + const BSONObj replaceDoc = ::mongo::fromjson( + R"({"_id":{"$oid":"629e1e680958e279dc29a517"}, + "control":{"version":1,"min":{"time":{"$date":"2022-06-06T15:34:00.000Z"},"a":2,"b":2}, + "max":{"time":{"$date":"2022-06-06T15:34:30.000Z"},"a":2,"b":2}}, + "data":{"time":{"0":{"$date":"2022-06-06T15:34:30.000Z"}}, + "a":{"0":2}, + "b":{"0":2}}})"); + Snapshotted doc; + bool found = bucketsColl->findDoc(opCtx, recordId, &doc); + + ASSERT_TRUE(found); + UnorderedFieldsBSONObjComparator comparator; + ASSERT_EQ(0, comparator.compare(doc.value(), replaceDoc)); + + ASSERT_EQ(2, bucketsColl->numRecords(opCtx)); + } +} + } // namespace } // namespace mongo::timeseries diff --git a/src/mongo/db/traffic_reader.cpp b/src/mongo/db/traffic_reader.cpp index 0258f20e6a524..90fd1d596abf5 100644 --- a/src/mongo/db/traffic_reader.cpp +++ b/src/mongo/db/traffic_reader.cpp @@ -27,35 +27,45 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include #include #include #include -#include -#include +#include #ifdef _WIN32 #include -#else -#include #endif -#include "mongo/base/data_cursor.h" #include "mongo/base/data_range_cursor.h" #include "mongo/base/data_type_endian.h" -#include "mongo/base/data_type_validated.h" +#include "mongo/base/data_type_terminated.h" #include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/traffic_reader.h" #include "mongo/rpc/factory.h" #include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" #include "mongo/util/assert_util.h" #include "mongo/util/errno_util.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/shared_buffer.h" +#include "mongo/util/str.h" #include "mongo/util/time_support.h" +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif + namespace { // Taken from src/mongo/gotools/mongoreplay/util.go // Time.Unix() returns the number of seconds from the unix epoch but time's diff --git a/src/mongo/db/traffic_reader.h b/src/mongo/db/traffic_reader.h index 196a7dacc4e70..00fa8de3de3bf 100644 --- a/src/mongo/db/traffic_reader.h +++ b/src/mongo/db/traffic_reader.h @@ -27,6 +27,10 @@ * it in the license file. */ +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/rpc/op_msg.h" #pragma once diff --git a/src/mongo/db/traffic_reader_main.cpp b/src/mongo/db/traffic_reader_main.cpp index cf94cf89dcb1e..5f09101f36d7f 100644 --- a/src/mongo/db/traffic_reader_main.cpp +++ b/src/mongo/db/traffic_reader_main.cpp @@ -27,25 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include -#include +// IWYU pragma: no_include "boost/program_options/detail/parsers.hpp" +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: keep #include #include +#include #ifdef _WIN32 #include #endif +#include // IWYU pragma: keep + #include "mongo/base/initializer.h" +#include "mongo/base/status.h" #include "mongo/db/traffic_reader.h" +#include "mongo/stdx/type_traits.h" #include "mongo/util/exit_code.h" #include "mongo/util/signal_handlers.h" -#include "mongo/util/text.h" - -#include -#include +#include "mongo/util/text.h" // IWYU pragma: keep using namespace mongo; diff --git a/src/mongo/db/traffic_recorder.cpp b/src/mongo/db/traffic_recorder.cpp index 8ca593101b350..fccf9432f1efd 100644 --- a/src/mongo/db/traffic_recorder.cpp +++ b/src/mongo/db/traffic_recorder.cpp @@ -27,24 +27,43 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/traffic_recorder.h" -#include "mongo/db/traffic_recorder_gen.h" - -#include -#include +#include +#include +#include +#include // IWYU pragma: keep +#include +#include +#include +#include +#include + +#include +#include #include "mongo/base/data_builder.h" +#include "mongo/base/data_range_cursor.h" +#include "mongo/base/data_type_endian.h" #include "mongo/base/data_type_terminated.h" -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" -#include "mongo/rpc/factory.h" +#include "mongo/db/traffic_recorder.h" +#include "mongo/db/traffic_recorder_gen.h" #include "mongo/stdx/thread.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/producer_consumer_queue.h" -#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/traffic_recorder.h b/src/mongo/db/traffic_recorder.h index daa9d024bf704..d841942b85d1a 100644 --- a/src/mongo/db/traffic_recorder.h +++ b/src/mongo/db/traffic_recorder.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include "mongo/db/service_context.h" #include "mongo/db/traffic_recorder_gen.h" @@ -37,6 +38,7 @@ #include "mongo/platform/mutex.h" #include "mongo/rpc/message.h" #include "mongo/transport/session.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/traffic_recorder_validators.cpp b/src/mongo/db/traffic_recorder_validators.cpp index c74075f77380f..3f4136b83bfdb 100644 --- a/src/mongo/db/traffic_recorder_validators.cpp +++ b/src/mongo/db/traffic_recorder_validators.cpp @@ -31,6 +31,9 @@ #include +#include + +#include "mongo/base/error_codes.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/traffic_recorder_validators.h b/src/mongo/db/traffic_recorder_validators.h index 532280e203c3e..f3dc96243f3b7 100644 --- a/src/mongo/db/traffic_recorder_validators.h +++ b/src/mongo/db/traffic_recorder_validators.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include #include "mongo/base/status.h" diff --git a/src/mongo/db/transaction/SConscript b/src/mongo/db/transaction/SConscript index c47c180e46f78..24e99bdd1c448 100644 --- a/src/mongo/db/transaction/SConscript +++ b/src/mongo/db/transaction/SConscript @@ -7,7 +7,6 @@ env = env.Clone() env.Library( target='transaction', source=[ - 'internal_transactions_reap_service.cpp', 'retryable_writes_stats.cpp', 'server_transactions_metrics.cpp', 'session_catalog_mongod_transaction_interface_impl.cpp', @@ -15,7 +14,6 @@ env.Library( 'transaction_metrics_observer.cpp', 'transaction_participant.cpp', 'transaction_participant_resource_yielder.cpp', - 'internal_transactions_reap_service.idl', 'transaction_participant.idl', 'transactions_stats.idl', ], @@ -28,6 +26,7 @@ env.Library( '$BUILD_DIR/mongo/db/concurrency/exception_util', '$BUILD_DIR/mongo/db/curop_failpoint_helpers', '$BUILD_DIR/mongo/db/dbdirectclient', + '$BUILD_DIR/mongo/db/dbhelpers', '$BUILD_DIR/mongo/db/index/index_access_method', '$BUILD_DIR/mongo/db/index_builds_coordinator_interface', '$BUILD_DIR/mongo/db/index_commands_idl', @@ -39,7 +38,6 @@ env.Library( '$BUILD_DIR/mongo/db/repl/repl_server_parameters', '$BUILD_DIR/mongo/db/repl/replica_set_aware_service', '$BUILD_DIR/mongo/db/server_base', - '$BUILD_DIR/mongo/db/session/session_catalog', '$BUILD_DIR/mongo/db/session/session_catalog_mongod', '$BUILD_DIR/mongo/db/shard_role', '$BUILD_DIR/mongo/db/stats/fill_locker_info', @@ -92,7 +90,6 @@ env.Library( env.CppUnitTest( target='db_transaction_test', source=[ - 'internal_transactions_reap_service_test.cpp', 'transaction_api_test.cpp', 'transaction_history_iterator_test.cpp', 'transaction_operations_test.cpp', @@ -102,7 +99,6 @@ env.CppUnitTest( LIBDEPS=[ '$BUILD_DIR/mongo/db/auth/authmocks', '$BUILD_DIR/mongo/db/dbhelpers', - '$BUILD_DIR/mongo/db/op_observer/op_observer', '$BUILD_DIR/mongo/db/op_observer/op_observer_impl', '$BUILD_DIR/mongo/db/op_observer/oplog_writer_impl', '$BUILD_DIR/mongo/db/repl/image_collection_entry', @@ -110,12 +106,11 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/repl/replica_set_aware_service', '$BUILD_DIR/mongo/db/repl/storage_interface_impl', '$BUILD_DIR/mongo/db/service_context_d_test_fixture', - '$BUILD_DIR/mongo/db/service_context_test_fixture', - '$BUILD_DIR/mongo/db/session/session_catalog', '$BUILD_DIR/mongo/db/session/session_catalog_mongod', '$BUILD_DIR/mongo/db/stats/transaction_stats', '$BUILD_DIR/mongo/db/storage/storage_control', '$BUILD_DIR/mongo/executor/inline_executor', + '$BUILD_DIR/mongo/s/sharding_router_api', 'transaction', 'transaction_api', 'transaction_operations', diff --git a/src/mongo/db/transaction/internal_transaction_metrics.cpp b/src/mongo/db/transaction/internal_transaction_metrics.cpp index b2f916e3e819e..b2e73faab8e7f 100644 --- a/src/mongo/db/transaction/internal_transaction_metrics.cpp +++ b/src/mongo/db/transaction/internal_transaction_metrics.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/transaction/internal_transaction_metrics.h" +#include +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/transaction/internal_transaction_metrics.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/db/transaction/internal_transaction_metrics.h b/src/mongo/db/transaction/internal_transaction_metrics.h index 109f292fb6eb8..e5c00dc2c079d 100644 --- a/src/mongo/db/transaction/internal_transaction_metrics.h +++ b/src/mongo/db/transaction/internal_transaction_metrics.h @@ -29,8 +29,11 @@ #pragma once +#include + #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/platform/atomic_word.h" namespace mongo { diff --git a/src/mongo/db/transaction/retryable_writes_stats.cpp b/src/mongo/db/transaction/retryable_writes_stats.cpp index 4235ccfa10e15..25608ee6bf435 100644 --- a/src/mongo/db/transaction/retryable_writes_stats.cpp +++ b/src/mongo/db/transaction/retryable_writes_stats.cpp @@ -27,15 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/transaction/retryable_writes_stats.h" - -#include "mongo/db/commands/server_status.h" -#include "mongo/db/jsobj.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/transaction/retryable_writes_stats.h" #include "mongo/db/transaction/transactions_stats_gen.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/db/transaction/server_transactions_metrics.cpp b/src/mongo/db/transaction/server_transactions_metrics.cpp index c25edced5ffe9..5026d2f9ba69b 100644 --- a/src/mongo/db/transaction/server_transactions_metrics.cpp +++ b/src/mongo/db/transaction/server_transactions_metrics.cpp @@ -27,17 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/transaction/server_transactions_metrics.h" +#include +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/commands/server_status.h" -#include "mongo/db/jsobj.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/optime.h" #include "mongo/db/service_context.h" #include "mongo/db/transaction/retryable_writes_stats.h" +#include "mongo/db/transaction/server_transactions_metrics.h" #include "mongo/db/transaction/transactions_stats_gen.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/db/transaction/server_transactions_metrics.h b/src/mongo/db/transaction/server_transactions_metrics.h index 2c3ee8d6f07e4..2f512f7cbb627 100644 --- a/src/mongo/db/transaction/server_transactions_metrics.h +++ b/src/mongo/db/transaction/server_transactions_metrics.h @@ -29,13 +29,18 @@ #pragma once +#include +#include +#include #include +#include "mongo/bson/bsonobj.h" #include "mongo/bson/timestamp.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/optime.h" #include "mongo/db/service_context.h" #include "mongo/db/transaction/transactions_stats_gen.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/util/concurrency/with_lock.h" diff --git a/src/mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.cpp b/src/mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.cpp index b7a9aedf03f47..ab7563594abfb 100644 --- a/src/mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.cpp +++ b/src/mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.cpp @@ -29,9 +29,20 @@ #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" +#include +#include + +#include +#include +#include + +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/transaction_router.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction @@ -105,7 +116,7 @@ MongoDSessionCatalogTransactionInterface::ScanSessionsCallbackFn MongoDSessionCatalogTransactionInterfaceImpl::makeParentSessionWorkerFnForReap( TxnNumber* parentSessionActiveTxnNumber) { return [parentSessionActiveTxnNumber](ObservableSession& parentSession) { - const auto transactionSessionId = parentSession.getSessionId(); + const auto& transactionSessionId = parentSession.getSessionId(); const auto txnParticipant = TransactionParticipant::get(parentSession); const auto txnRouter = TransactionRouter::get(parentSession); @@ -127,7 +138,7 @@ MongoDSessionCatalogTransactionInterface::ScanSessionsCallbackFn MongoDSessionCatalogTransactionInterfaceImpl::makeChildSessionWorkerFnForReap( const TxnNumber& parentSessionActiveTxnNumber) { return [&parentSessionActiveTxnNumber](ObservableSession& childSession) { - const auto transactionSessionId = childSession.getSessionId(); + const auto& transactionSessionId = childSession.getSessionId(); const auto txnParticipant = TransactionParticipant::get(childSession); const auto txnRouter = TransactionRouter::get(childSession); @@ -183,4 +194,25 @@ MongoDSessionCatalogTransactionInterfaceImpl::makeSessionWorkerFnForStepUp( }; } +MongoDSessionCatalogTransactionInterface::ScanSessionsCallbackFn +MongoDSessionCatalogTransactionInterfaceImpl::makeSessionWorkerFnForEagerReap( + TxnNumber clientTxnNumberStarted, SessionCatalog::Provenance provenance) { + return [clientTxnNumberStarted, provenance](ObservableSession& osession) { + const auto& transactionSessionId = osession.getSessionId(); + const auto txnParticipant = TransactionParticipant::get(osession); + + // If a retryable session has been used for a TransactionParticipant, it may be in the + // retryable participant catalog. A participant triggers eager reaping after clearing its + // participant catalog, but a router may trigger reaping before, so we can only eager reap + // an initialized participant if the reap came from the participant role. + if (provenance == SessionCatalog::Provenance::kParticipant || + txnParticipant.getActiveTxnNumberAndRetryCounter().getTxnNumber() == + kUninitializedTxnNumber) { + if (isInternalSessionForRetryableWrite(transactionSessionId) && + *transactionSessionId.getTxnNumber() < clientTxnNumberStarted) { + osession.markForReap(ObservableSession::ReapMode::kExclusive); + } + } + }; +} } // namespace mongo diff --git a/src/mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h b/src/mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h index aa982da3ccc91..65f3f9e68ddf6 100644 --- a/src/mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h +++ b/src/mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h @@ -29,7 +29,14 @@ #pragma once +#include + +#include "mongo/db/operation_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_catalog_mongod_transaction_interface.h" +#include "mongo/db/session/session_txn_record_gen.h" namespace mongo { @@ -74,6 +81,9 @@ class MongoDSessionCatalogTransactionInterfaceImpl ScanSessionsCallbackFn makeSessionWorkerFnForStepUp( std::vector* sessionKillTokens, std::vector* sessionsToReacquireLocks) override; + + ScanSessionsCallbackFn makeSessionWorkerFnForEagerReap( + TxnNumber clientTxnNumberStarted, SessionCatalog::Provenance provenance) override; }; } // namespace mongo diff --git a/src/mongo/db/transaction/transaction_api.cpp b/src/mongo/db/transaction/transaction_api.cpp index 85be197c38cb8..b339afaf415e6 100644 --- a/src/mongo/db/transaction/transaction_api.cpp +++ b/src/mongo/db/transaction/transaction_api.cpp @@ -30,10 +30,27 @@ #include "mongo/db/transaction/transaction_api.h" +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/api_parameters.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/cancelable_operation_context.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/commands/txn_cmds_gen.h" @@ -41,29 +58,44 @@ #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/operation_time_tracker.h" -#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/query/cursor_response.h" #include "mongo/db/query/getmore_command_gen.h" +#include "mongo/db/read_write_concern_provenance_base_gen.h" #include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/repl_client_info.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/session/internal_session_pool.h" #include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_catalog.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/internal_transaction_metrics.h" #include "mongo/db/transaction_validation.h" #include "mongo/db/write_concern_options.h" #include "mongo/executor/inline_executor.h" #include "mongo/executor/task_executor.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/factory.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" #include "mongo/rpc/reply_interface.h" #include "mongo/s/is_mongos.h" -#include "mongo/stdx/future.h" #include "mongo/transport/service_entry_point.h" #include "mongo/util/cancellation.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/notification.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction @@ -85,24 +117,24 @@ void runFutureInline(executor::InlineExecutor* inlineExecutor, Notification sleepableExecutor, + std::shared_ptr sleepAndCleanupExecutor, std::unique_ptr resourceYielder, std::shared_ptr inlineExecutor, std::unique_ptr txnClient) : _resourceYielder(std::move(resourceYielder)), _inlineExecutor(inlineExecutor), - _sleepExec(sleepableExecutor), + _sleepExec(inlineExecutor->getSleepableExecutor(sleepAndCleanupExecutor)), + _cleanupExecutor(sleepAndCleanupExecutor), _txn(std::make_shared( opCtx, _sleepExec, - _source.token(), + opCtx->getCancellationToken(), txnClient ? std::move(txnClient) : std::make_unique( opCtx, inlineExecutor, - sleepableExecutor, + _sleepExec, std::make_unique()))) { - // Callers should always provide a yielder when using the API with a session checked out, // otherwise commands run by the API won't be able to check out that session. invariant(!OperationContextSession::get(opCtx) || _resourceYielder); @@ -117,45 +149,50 @@ StatusWith SyncTransactionWithRetries::runNoThrow(OperationContext } Notification mayReturn; - auto txnFuture = _txn->run(opCtx, std::move(callback)) + auto txnFuture = _txn->run(std::move(callback)) .unsafeToInlineFuture() .tapAll([&](auto&&) { mayReturn.set(); }) .semi(); - runFutureInline(_inlineExecutor.get(), mayReturn); auto txnResult = txnFuture.getNoThrow(opCtx); - // Cancel the source to guarantee the transaction will terminate if our opCtx was interrupted. - _source.cancel(); - - // Wait for transaction to complete before returning so variables referenced by its callback are - // guaranteed to be in scope even if the API caller's opCtx was interrupted. - txnFuture.wait(); - // Post transaction processing, which must also happen inline. OperationTimeTracker::get(opCtx)->updateOperationTime(_txn->getOperationTime()); repl::ReplClientInfo::forClient(opCtx->getClient()) .setLastProxyWriteTimestampForward(_txn->getOperationTime().asTimestamp()); - // Run cleanup tasks after the caller has finished waiting so the caller can't be blocked. - // Attempt to wait for cleanup so it appears synchronous for most callers, but allow - // interruptions so we return immediately if the opCtx has been cancelled. - // - // Also schedule after getting the transaction's operation time so the best effort abort can't - // unnecessarily advance it. - Notification mayReturnFromCleanup; - auto cleanUpFuture = _txn->cleanUpIfNecessary().unsafeToInlineFuture().tapAll( - [&](auto&&) { mayReturnFromCleanup.set(); }); - - runFutureInline(_inlineExecutor.get(), mayReturnFromCleanup); - - cleanUpFuture.getNoThrow(opCtx).ignore(); + if (_txn->needsCleanup()) { + // Schedule cleanup on an out of line executor so it runs even if the transaction was + // cancelled. Attempt to wait for cleanup so it appears synchronous for most callers, but + // allow interruptions so we return immediately if the opCtx has been cancelled. + // + // Also schedule after getting the transaction's operation time so the best effort abort + // can't unnecessarily advance it. + ExecutorFuture(_cleanupExecutor) + .then([txn = _txn, inlineExecutor = _inlineExecutor]() mutable { + Notification mayReturnFromCleanup; + auto cleanUpFuture = txn->cleanUp().unsafeToInlineFuture().tapAll( + [&](auto&&) { mayReturnFromCleanup.set(); }); + runFutureInline(inlineExecutor.get(), mayReturnFromCleanup); + return cleanUpFuture; + }) + .getNoThrow(opCtx) + .ignore(); + } auto unyieldStatus = _resourceYielder ? _resourceYielder->unyieldNoThrow(opCtx) : Status::OK(); if (!txnResult.isOK()) { + if (auto interruptStatus = opCtx->checkForInterruptNoAssert(); !interruptStatus.isOK()) { + // The caller was interrupted during the transaction, so if the transaction failed, + // return the caller's interruption code instead. The transaction uses a + // CancelableOperationContext inherited from the caller's opCtx, but that type can only + // kill with an Interrupted error, so this is meant as a workaround to preserve the + // presumably more meaningful error the caller was interrupted with. + return interruptStatus; + } return txnResult; } else if (!unyieldStatus.isOK()) { return unyieldStatus; @@ -273,16 +310,34 @@ std::string errorHandlingStepToString(Transaction::ErrorHandlingStep nextStep) { MONGO_UNREACHABLE; } -void logNextStep(Transaction::ErrorHandlingStep nextStep, const BSONObj& txnInfo, int attempts) { - LOGV2(5918600, - "Chose internal transaction error handling step", - "nextStep"_attr = errorHandlingStepToString(nextStep), - "txnInfo"_attr = txnInfo, - "attempts"_attr = attempts); +void logNextStep(Transaction::ErrorHandlingStep nextStep, + const BSONObj& txnInfo, + int attempts, + const StatusWith& swResult, + StringData errorHandler) { + // DynamicAttributes doesn't allow rvalues, so make some local variables. + auto nextStepString = errorHandlingStepToString(nextStep); + std::string redactedError, redactedCommitError, redactedCommitWCError; + + logv2::DynamicAttributes attr; + attr.add("nextStep", nextStepString); + attr.add("txnInfo", txnInfo); + attr.add("attempts", attempts); + if (!swResult.isOK()) { + redactedError = redact(swResult.getStatus()); + attr.add("error", redactedError); + } else { + redactedCommitError = redact(swResult.getValue().cmdStatus); + attr.add("commitError", redactedCommitError); + redactedCommitWCError = redact(swResult.getValue().wcError.toStatus()); + attr.add("commitWCError", redactedCommitWCError); + } + attr.add("errorHandler", errorHandler); + + LOGV2(5918600, "Chose internal transaction error handling step", attr); } -SemiFuture TransactionWithRetries::run(OperationContext* opCtx, - Callback callback) noexcept { +SemiFuture TransactionWithRetries::run(Callback callback) noexcept { InternalTransactionMetrics::get(_internalTxn->getParentServiceContext())->incrementStarted(); _internalTxn->setCallback(std::move(callback)); @@ -310,7 +365,8 @@ ExecutorFuture TransactionWithRetries::_runBodyHandleErrors(int bodyAttemp return _internalTxn->runCallback().thenRunOn(_executor).onError( [this, bodyAttempts](Status bodyStatus) { auto nextStep = _internalTxn->handleError(bodyStatus, bodyAttempts); - logNextStep(nextStep, _internalTxn->reportStateForLog(), bodyAttempts); + logNextStep( + nextStep, _internalTxn->reportStateForLog(), bodyAttempts, bodyStatus, "runBody"); if (nextStep == Transaction::ErrorHandlingStep::kDoNotRetry) { iassert(bodyStatus); @@ -342,7 +398,11 @@ ExecutorFuture TransactionWithRetries::_runCommitHandleErrors(int } auto nextStep = _internalTxn->handleError(swCommitResult, commitAttempts); - logNextStep(nextStep, _internalTxn->reportStateForLog(), commitAttempts); + logNextStep(nextStep, + _internalTxn->reportStateForLog(), + commitAttempts, + swCommitResult, + "runCommit"); if (nextStep == Transaction::ErrorHandlingStep::kDoNotRetry) { return ExecutorFuture(_executor, swCommitResult); @@ -383,7 +443,7 @@ ExecutorFuture TransactionWithRetries::_bestEffortAbort() { return _internalTxn->abort().thenRunOn(_executor).onError([this](Status abortStatus) { LOGV2(5875900, "Unable to abort internal transaction", - "reason"_attr = abortStatus, + "reason"_attr = redact(abortStatus), "txnInfo"_attr = _internalTxn->reportStateForLog()); }); } @@ -395,9 +455,6 @@ void primeInternalClient(Client* client) { if (as) { as->grantInternalAuthorization(client); } - - stdx::lock_guard lk(*client); - client->setSystemOperationKillableByStepdown(lk); } Future DefaultSEPTransactionClientBehaviors::handleRequest( @@ -430,9 +487,10 @@ ExecutorFuture SEPTransactionClient::_runCommand(const DatabaseName& db return _behaviors->handleRequest(cancellableOpCtx.get(), requestMessage) .thenRunOn(_executor) .then([this](DbResponse dbResponse) { + // NOTE: The API uses this method to run commit and abort, so be careful about adding + // new logic here to ensure it cannot interfere with error handling for either command. auto reply = rpc::makeReply(&dbResponse.response)->getCommandReply().getOwned(); _hooks->runReplyHook(reply); - uassertStatusOK(getStatusFromCommandResult(reply)); return reply; }); } @@ -453,6 +511,30 @@ SemiFuture SEPTransactionClient::runCommand(const DatabaseName& dbName, return _runCommand(dbName, cmdObj).semi(); } +ExecutorFuture SEPTransactionClient::_runCommandChecked(const DatabaseName& dbName, + BSONObj cmdObj) const { + return _runCommand(dbName, cmdObj).then([](BSONObj reply) { + uassertStatusOK(getStatusFromCommandResult(reply)); + return reply; + }); +} + +SemiFuture SEPTransactionClient::runCommandChecked(const DatabaseName& dbName, + BSONObj cmdObj) const { + return _runCommandChecked(dbName, cmdObj).semi(); +} + +BSONObj SEPTransactionClient::runCommandCheckedSync(const DatabaseName& dbName, + BSONObj cmdObj) const { + Notification mayReturn; + auto result = _runCommandChecked(dbName, cmdObj).unsafeToInlineFuture().tapAll([&](auto&&) { + mayReturn.set(); + }); + runFutureInline(_inlineExecutor.get(), mayReturn); + + return std::move(result).get(); +} + ExecutorFuture SEPTransactionClient::_runCRUDOp( const BatchedCommandRequest& cmd, std::vector stmtIds) const { invariant(!stmtIds.size() || (cmd.sizeWriteOps() == stmtIds.size()), @@ -487,7 +569,6 @@ SemiFuture SEPTransactionClient::runCRUDOp( BatchedCommandResponse SEPTransactionClient::runCRUDOpSync(const BatchedCommandRequest& cmd, std::vector stmtIds) const { - Notification mayReturn; auto result = @@ -502,6 +583,43 @@ BatchedCommandResponse SEPTransactionClient::runCRUDOpSync(const BatchedCommandR return std::move(result).get(); } +ExecutorFuture SEPTransactionClient::_runCRUDOp( + const BulkWriteCommandRequest& cmd) const { + BSONObjBuilder cmdBob(cmd.toBSON(BSONObj())); + // BulkWrite can only execute on admin DB. + return runCommand(DatabaseName::kAdmin, cmdBob.obj()) + .thenRunOn(_executor) + .then([](BSONObj reply) { + uassertStatusOK(getStatusFromWriteCommandReply(reply)); + + IDLParserContext ctx("BulkWriteCommandReplyParse"); + auto response = BulkWriteCommandReply::parse(ctx, reply); + return response; + }); +} + +SemiFuture SEPTransactionClient::runCRUDOp( + const BulkWriteCommandRequest& cmd) const { + return _runCRUDOp(cmd).semi(); +} + +BulkWriteCommandReply SEPTransactionClient::runCRUDOpSync( + const BulkWriteCommandRequest& cmd) const { + + Notification mayReturn; + + auto result = + _runCRUDOp(cmd) + .unsafeToInlineFuture() + // Use tap and tapError instead of tapAll since tapAll is not move-only type friendly + .tap([&](auto&&) { mayReturn.set(); }) + .tapError([&](auto&&) { mayReturn.set(); }); + + runFutureInline(_inlineExecutor.get(), mayReturn); + + return std::move(result).get(); +} + ExecutorFuture> SEPTransactionClient::_exhaustiveFind( const FindCommandRequest& cmd) const { return runCommand(cmd.getDbName(), cmd.toBSON({})) @@ -710,10 +828,8 @@ Transaction::ErrorHandlingStep Transaction::handleError(const StatusWith TransactionWithRetries::cleanUpIfNecessary() { - if (!_internalTxn->needsCleanup()) { - return SemiFuture(Status::OK()); - } +bool TransactionWithRetries::needsCleanup() { + return _internalTxn->needsCleanup(); +} + +SemiFuture TransactionWithRetries::cleanUp() { + tassert(7567600, "Unnecessarily cleaning up transaction", _internalTxn->needsCleanup()); return _bestEffortAbort() // Safe to inline because the continuation only holds state. @@ -799,7 +917,9 @@ void Transaction::prepareRequest(BSONObjBuilder* cmdBuilder) { !isRetryableWriteCommand( cmdBuilder->asTempObj().firstElement().fieldNameStringData()) || (cmdBuilder->hasField(write_ops::WriteCommandRequestBase::kStmtIdsFieldName) || - cmdBuilder->hasField(write_ops::WriteCommandRequestBase::kStmtIdFieldName)), + cmdBuilder->hasField(write_ops::WriteCommandRequestBase::kStmtIdFieldName)) || + (cmdBuilder->hasField(BulkWriteCommandRequest::kStmtIdFieldName) || + cmdBuilder->hasField(BulkWriteCommandRequest::kStmtIdsFieldName)), str::stream() << "In a retryable write transaction every retryable write command should have an " "explicit statement id, command: " @@ -904,7 +1024,13 @@ BSONObj Transaction::reportStateForLog() const { BSONObj Transaction::_reportStateForLog(WithLock) const { return BSON("execContext" << execContextToString(_execContext) << "sessionInfo" - << _sessionInfo.toBSON() << "state" << _state.toString()); + << _sessionInfo.toBSON() << "state" << _state.toString() + << "lastOperationTime" << _lastOperationTime.toString() + << "latestResponseHasTransientTransactionErrorLabel" + << _latestResponseHasTransientTransactionErrorLabel << "deadline" + << (_opDeadline ? _opDeadline->toString() : "none") << "writeConcern" + << _writeConcern << "readConcern" << _readConcern << "APIParameters" + << _apiParameters.toBSON() << "canceled" << _token.isCanceled()); } void Transaction::_setSessionInfo(WithLock, diff --git a/src/mongo/db/transaction/transaction_api.h b/src/mongo/db/transaction/transaction_api.h index ff5cb5aa33545..a2c04fd9cbc9c 100644 --- a/src/mongo/db/transaction/transaction_api.h +++ b/src/mongo/db/transaction/transaction_api.h @@ -29,19 +29,46 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/commands/bulk_write_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbmessage.h" +#include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" -#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/resource_yielder.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/executor/inline_executor.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/write_concern_error_detail.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" #include "mongo/util/concurrency/notification.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/functional.h" #include "mongo/util/future.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/time_support.h" namespace mongo::txn_api { namespace details { @@ -99,8 +126,15 @@ class TransactionClient { * Runs the given command as part of the transaction that owns this transaction client. */ virtual SemiFuture runCommand(const DatabaseName& dbName, BSONObj cmd) const = 0; - virtual BSONObj runCommandSync(const DatabaseName& dbName, BSONObj cmd) const = 0; + + /** + * Same as runCommand but will assert the command status is ok. + */ + virtual SemiFuture runCommandChecked(const DatabaseName& dbName, + BSONObj cmd) const = 0; + virtual BSONObj runCommandCheckedSync(const DatabaseName& dbName, BSONObj cmd) const = 0; + /** * Helper method to run commands representable as a BatchedCommandRequest in the transaction * client's transaction. @@ -128,6 +162,13 @@ class TransactionClient { virtual BatchedCommandResponse runCRUDOpSync(const BatchedCommandRequest& cmd, std::vector stmtIds) const = 0; + /** + * Helper method to run BulkWriteCommandRequest in the transaction client's transaction. + */ + virtual SemiFuture runCRUDOp( + const BulkWriteCommandRequest& cmd) const = 0; + virtual BulkWriteCommandReply runCRUDOpSync(const BulkWriteCommandRequest& cmd) const = 0; + /** * Helper method that runs the given find in the transaction client's transaction and will * iterate and exhaust the find's cursor, returning a vector with all matching documents. @@ -170,13 +211,16 @@ class SyncTransactionWithRetries { * * Optionally accepts a custom TransactionClient and will default to a client that runs commands * against the local service entry point. + * + * Will run all tasks synchronously on the caller's thread via the InlineExecutor. Will sleep + * between retries and schedule any necessary cleanup (e.g. abortTransaction commands) using the + * sleepAndCleanupExecutor. */ - SyncTransactionWithRetries( - OperationContext* opCtx, - std::shared_ptr sleepableExecutor, - std::unique_ptr resourceYielder, - std::shared_ptr executor, - std::unique_ptr txnClient = nullptr); + SyncTransactionWithRetries(OperationContext* opCtx, + std::shared_ptr sleepAndCleanupExecutor, + std::unique_ptr resourceYielder, + std::shared_ptr executor, + std::unique_ptr txnClient = nullptr); /** * Returns a bundle with the commit command status and write concern error, if any. Any error * prior to receiving a response from commit (e.g. an interruption or a user assertion in the @@ -203,10 +247,10 @@ class SyncTransactionWithRetries { } private: - CancellationSource _source; std::unique_ptr _resourceYielder; std::shared_ptr _inlineExecutor; std::shared_ptr _sleepExec; + std::shared_ptr _cleanupExecutor; std::shared_ptr _txn; }; @@ -286,11 +330,19 @@ class SEPTransactionClient : public TransactionClient { virtual SemiFuture runCommand(const DatabaseName& dbName, BSONObj cmd) const override; virtual BSONObj runCommandSync(const DatabaseName& dbName, BSONObj cmd) const override; + virtual SemiFuture runCommandChecked(const DatabaseName& dbName, + BSONObj cmd) const override; + virtual BSONObj runCommandCheckedSync(const DatabaseName& dbName, BSONObj cmd) const override; + virtual SemiFuture runCRUDOp( const BatchedCommandRequest& cmd, std::vector stmtIds) const override; virtual BatchedCommandResponse runCRUDOpSync(const BatchedCommandRequest& cmd, std::vector stmtIds) const override; + virtual SemiFuture runCRUDOp( + const BulkWriteCommandRequest& cmd) const override; + virtual BulkWriteCommandReply runCRUDOpSync(const BulkWriteCommandRequest& cmd) const override; + virtual SemiFuture> exhaustiveFind( const FindCommandRequest& cmd) const override; virtual std::vector exhaustiveFindSync(const FindCommandRequest& cmd) const override; @@ -306,9 +358,13 @@ class SEPTransactionClient : public TransactionClient { private: ExecutorFuture _runCommand(const DatabaseName& dbName, BSONObj cmd) const; + ExecutorFuture _runCommandChecked(const DatabaseName& dbName, BSONObj cmd) const; + ExecutorFuture _runCRUDOp(const BatchedCommandRequest& cmd, std::vector stmtIds) const; + ExecutorFuture _runCRUDOp(const BulkWriteCommandRequest& cmd) const; + ExecutorFuture> _exhaustiveFind(const FindCommandRequest& cmd) const; private: @@ -583,7 +639,7 @@ class TransactionWithRetries : public std::enable_shared_from_this run(OperationContext* opCtx, Callback callback) noexcept; + SemiFuture run(Callback callback) noexcept; /** * Returns the latest operationTime returned by a command in this transaction. @@ -593,10 +649,15 @@ class TransactionWithRetries : public std::enable_shared_from_this cleanUpIfNecessary(); + SemiFuture cleanUp(); private: // Helper methods for running a transaction. diff --git a/src/mongo/db/transaction/transaction_api_test.cpp b/src/mongo/db/transaction/transaction_api_test.cpp index af21988333a03..54f041b2eeab0 100644 --- a/src/mongo/db/transaction/transaction_api_test.cpp +++ b/src/mongo/db/transaction/transaction_api_test.cpp @@ -27,34 +27,65 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include - -#include "mongo/config.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/api_parameters_gen.h" #include "mongo/db/commands.h" #include "mongo/db/commands/txn_cmds_gen.h" #include "mongo/db/error_labels.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/internal_transaction_metrics.h" #include "mongo/db/transaction/transaction_api.h" +#include "mongo/db/write_concern_options.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/platform/atomic_word.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/database_version.h" #include "mongo/s/is_mongos.h" +#include "mongo/s/shard_version.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/stdx/future.h" #include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" #include "mongo/util/concurrency/thread_pool.h" -#include "mongo/util/executor_test_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" #include "mongo/util/scopeguard.h" namespace mongo { @@ -80,6 +111,19 @@ const BSONObj kRetryableWriteConcernError = const BSONObj kResWithRetryableWriteConcernError = BSON("ok" << 1 << "writeConcernError" << kRetryableWriteConcernError); +const BSONObj kResWithTransientCommitErrorAndRetryableWriteConcernError = + BSON("ok" << 0 << "code" << ErrorCodes::LockTimeout << kErrorLabelsFieldName + << BSON_ARRAY(ErrorLabel::kTransientTransaction) << "writeConcernError" + << kRetryableWriteConcernError); + +const BSONObj kResWithNonTransientCommitErrorAndRetryableWriteConcernError = + BSON("ok" << 0 << "code" << ErrorCodes::NoSuchTransaction << "writeConcernError" + << kRetryableWriteConcernError); + +const BSONObj kResWithNonTransientCommitErrorAndNonRetryableWriteConcernError = + BSON("ok" << 0 << "code" << ErrorCodes::NoSuchTransaction << "writeConcernError" + << kWriteConcernError); + class MockResourceYielder : public ResourceYielder { public: void yield(OperationContext*) { @@ -152,17 +196,19 @@ class MockTransactionClient : public SEPTransactionClient { stdx::unique_lock ul(_mutex); [&]() { StringData cmdName = cmd.firstElementFieldNameStringData(); - if (cmdName != AbortTransaction::kCommandName) { + if (!(cmdName == AbortTransaction::kCommandName || + cmdName == CommitTransaction::kCommandName)) { // Only hang abort commands. return; } - if (_hangNextAbortCommand) { + if (_hangNextCommitOrAbortCommand) { // Tests that expect to hang an abort must use the barrier to synchronize since the // abort runs on a different thread. - _hitHungAbort.countDownAndWait(); + _hitHungCommitOrAbort.countDownAndWait(); } - _hangNextAbortCommandCV.wait(ul, [&] { return !_hangNextAbortCommand; }); + _hangNextCommitOrAbortCommandCV.wait(ul, + [&] { return !_hangNextCommitOrAbortCommand; }); }(); auto cmdBob = BSONObjBuilder(std::move(cmd)); @@ -194,6 +240,15 @@ class MockTransactionClient : public SEPTransactionClient { MONGO_UNREACHABLE; } + virtual BSONObj runCommandCheckedSync(const DatabaseName& dbName, BSONObj cmd) const override { + MONGO_UNREACHABLE; + } + + virtual SemiFuture runCommandChecked(const DatabaseName& dbName, + BSONObj cmd) const override { + MONGO_UNREACHABLE; + } + virtual SemiFuture runCRUDOp( const BatchedCommandRequest& cmd, std::vector stmtIds) const override { MONGO_UNREACHABLE; @@ -204,6 +259,15 @@ class MockTransactionClient : public SEPTransactionClient { MONGO_UNREACHABLE; } + virtual SemiFuture runCRUDOp( + const BulkWriteCommandRequest& cmd) const override { + MONGO_UNREACHABLE; + } + + virtual BulkWriteCommandReply runCRUDOpSync(const BulkWriteCommandRequest& cmd) const override { + MONGO_UNREACHABLE; + } + virtual bool supportsClientTransactionContext() const override { return true; } @@ -230,16 +294,16 @@ class MockTransactionClient : public SEPTransactionClient { _responses.push(res); } - void setHangNextAbortCommand(bool enable) { + void setHangNextCommitOrAbortCommand(bool enable) { stdx::lock_guard lg(_mutex); - _hangNextAbortCommand = enable; + _hangNextCommitOrAbortCommand = enable; // Wake up any waiting threads. - _hangNextAbortCommandCV.notify_all(); + _hangNextCommitOrAbortCommandCV.notify_all(); } - void waitForHungAbortWaiter() { - _hitHungAbort.countDownAndWait(); + void waitForHungCommitOrAbort() { + _hitHungCommitOrAbort.countDownAndWait(); } private: @@ -250,9 +314,9 @@ class MockTransactionClient : public SEPTransactionClient { bool _runningLocalTransaction{false}; mutable Mutex _mutex = MONGO_MAKE_LATCH("MockTransactionClient"); - mutable stdx::condition_variable _hangNextAbortCommandCV; - bool _hangNextAbortCommand{false}; - mutable unittest::Barrier _hitHungAbort{2}; + mutable stdx::condition_variable _hangNextCommitOrAbortCommandCV; + bool _hangNextCommitOrAbortCommand{false}; + mutable unittest::Barrier _hitHungCommitOrAbort{2}; }; } // namespace txn_api::details @@ -379,14 +443,12 @@ class TxnAPITest : public ServiceContextTest { _executor->startup(); _inlineExecutor = std::make_shared(); - _sleepInlineExecutor = _inlineExecutor->getSleepableExecutor(_executor); - auto mockClient = std::make_unique( - opCtx(), _inlineExecutor, _sleepInlineExecutor, nullptr); + opCtx(), _inlineExecutor, _inlineExecutor->getSleepableExecutor(_executor), nullptr); _mockClient = mockClient.get(); _txnWithRetries = std::make_unique(opCtx(), - _sleepInlineExecutor, + _executor, nullptr /* resourceYielder */, _inlineExecutor, std::move(mockClient)); @@ -426,14 +488,15 @@ class TxnAPITest : public ServiceContextTest { return *_txnWithRetries; } - void resetTxnWithRetries( - std::unique_ptr resourceYielder = nullptr, - std::shared_ptr executor = nullptr) { - auto executorToUse = _sleepInlineExecutor; - + void resetTxnWithRetries(std::unique_ptr resourceYielder = nullptr, + std::shared_ptr executor = nullptr) { + auto executorToUse = executor ? executor : _executor; auto mockClient = std::make_unique( - opCtx(), _inlineExecutor, executorToUse, nullptr); + opCtx(), + _inlineExecutor, + _inlineExecutor->getSleepableExecutor(executorToUse), + nullptr); _mockClient = mockClient.get(); if (resourceYielder) { _resourceYielder = resourceYielder.get(); @@ -459,7 +522,7 @@ class TxnAPITest : public ServiceContextTest { waitForAllEarlierTasksToComplete(); _txnWithRetries = nullptr; _txnWithRetries = std::make_unique( - opCtx(), _sleepInlineExecutor, nullptr, _inlineExecutor, std::move(txnClient)); + opCtx(), _executor, nullptr, _inlineExecutor, std::move(txnClient)); } void expectSentAbort(TxnNumber txnNumber, BSONObj writeConcern) { @@ -477,7 +540,6 @@ class TxnAPITest : public ServiceContextTest { std::shared_ptr _executor; std::shared_ptr _inlineExecutor; - std::shared_ptr _sleepInlineExecutor; txn_api::details::MockTransactionClient* _mockClient{nullptr}; MockResourceYielder* _resourceYielder{nullptr}; std::unique_ptr _txnWithRetries; @@ -495,6 +557,15 @@ class MockClusterOperationTransactionClient : public txn_api::TransactionClient MONGO_UNREACHABLE; } + virtual BSONObj runCommandCheckedSync(const DatabaseName& dbName, BSONObj cmd) const override { + MONGO_UNREACHABLE; + } + + virtual SemiFuture runCommandChecked(const DatabaseName& dbName, + BSONObj cmd) const override { + MONGO_UNREACHABLE; + } + virtual SemiFuture runCRUDOp( const BatchedCommandRequest& cmd, std::vector stmtIds) const override { MONGO_UNREACHABLE; @@ -505,6 +576,15 @@ class MockClusterOperationTransactionClient : public txn_api::TransactionClient MONGO_UNREACHABLE; } + virtual SemiFuture runCRUDOp( + const BulkWriteCommandRequest& cmd) const override { + MONGO_UNREACHABLE; + } + + virtual BulkWriteCommandReply runCRUDOpSync(const BulkWriteCommandRequest& cmd) const override { + MONGO_UNREACHABLE; + } + virtual SemiFuture> exhaustiveFind( const FindCommandRequest& cmd) const override { MONGO_UNREACHABLE; @@ -527,12 +607,13 @@ TEST_F(TxnAPITest, OwnSession_AttachesTxnMetadata) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata( mockClient()->getLastSentRequest(), 0 /* txnNumber */, true /* startTransaction */); @@ -540,12 +621,13 @@ TEST_F(TxnAPITest, OwnSession_AttachesTxnMetadata) { assertAPIParameters(mockClient()->getLastSentRequest(), boost::none); mockClient()->setNextCommandResponse(kOKInsertResponse); - insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata(mockClient()->getLastSentRequest(), 0 /* txnNumber */, @@ -585,12 +667,13 @@ TEST_F(TxnAPITest, AttachesAPIVersion) { attempt += 1; mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata(mockClient()->getLastSentRequest(), attempt + 1 /* txnNumber */, @@ -599,12 +682,13 @@ TEST_F(TxnAPITest, AttachesAPIVersion) { assertAPIParameters(mockClient()->getLastSentRequest(), params); mockClient()->setNextCommandResponse(kOKInsertResponse); - insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata(mockClient()->getLastSentRequest(), attempt + 1 /* txnNumber */, @@ -658,7 +742,8 @@ TEST_F(TxnAPITest, OwnSession_AttachesWriteConcernOnCommit) { // No write concern on requests prior to commit/abort. mockClient()->setNextCommandResponse(kOKInsertResponse); auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), + .runCommand(DatabaseName::createDatabaseName_forTest( + boost::none, "user"_sd), BSON("insert" << "foo" << "documents" << BSON_ARRAY(BSON("x" << 1)))) @@ -676,7 +761,8 @@ TEST_F(TxnAPITest, OwnSession_AttachesWriteConcernOnCommit) { mockClient()->setNextCommandResponse(kOKInsertResponse); insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, + "user"_sd), BSON("insert" << "foo" << "documents" << BSON_ARRAY(BSON("x" << 1)))) @@ -738,7 +824,8 @@ TEST_F(TxnAPITest, OwnSession_AttachesWriteConcernOnAbort) { opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), + .runCommand(DatabaseName::createDatabaseName_forTest( + boost::none, "user"_sd), BSON("insert" << "foo" << "documents" << BSON_ARRAY(BSON("x" << 1)))) @@ -780,7 +867,8 @@ TEST_F(TxnAPITest, OwnSession_AttachesReadConcernOnStartTransaction) { attempt += 1; mockClient()->setNextCommandResponse(kOKInsertResponse); auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), + .runCommand(DatabaseName::createDatabaseName_forTest( + boost::none, "user"_sd), BSON("insert" << "foo" << "documents" << BSON_ARRAY(BSON("x" << 1)))) @@ -799,7 +887,8 @@ TEST_F(TxnAPITest, OwnSession_AttachesReadConcernOnStartTransaction) { // Subsequent requests shouldn't have a read concern. mockClient()->setNextCommandResponse(kOKInsertResponse); insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, + "user"_sd), BSON("insert" << "foo" << "documents" << BSON_ARRAY(BSON("x" << 1)))) @@ -843,12 +932,13 @@ TEST_F(TxnAPITest, OwnSession_AbortsOnError) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. // The best effort abort response, the client should ignore this. @@ -909,12 +999,13 @@ TEST_F(TxnAPITest, OwnSession_RetriesOnTransientError) { mockClient()->setNextCommandResponse(attempt == 0 ? kNoSuchTransactionResponse : kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); // The commit or implicit abort response. mockClient()->setNextCommandResponse(kOKCommandResponse); @@ -961,12 +1052,13 @@ TEST_F(TxnAPITest, OwnSession_RetriesOnTransientClientError) { } mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_OK(getStatusFromWriteCommandReply(insertRes)); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. @@ -1003,12 +1095,13 @@ TEST_F(TxnAPITest, OwnSession_CommitError) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_OK(getStatusFromWriteCommandReply(insertRes)); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. @@ -1041,18 +1134,62 @@ TEST_F(TxnAPITest, OwnSession_CommitError) { ASSERT_EQ(lastRequest.firstElementFieldNameStringData(), "commitTransaction"_sd); } +TEST_F(TxnAPITest, DoesNotRetryOnNonTransientCommitErrorWithNonRetryableCommitWCError) { + auto swResult = txnWithRetries().runNoThrow( + opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { + mockClient()->setNextCommandResponse(kOKInsertResponse); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); + ASSERT_OK(getStatusFromWriteCommandReply(insertRes)); + + ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. + assertTxnMetadata( + mockClient()->getLastSentRequest(), 0 /* txnNumber */, true /* startTransaction */); + assertSessionIdMetadata(mockClient()->getLastSentRequest(), LsidAssertion::kStandalone); + + // The commit response. + mockClient()->setNextCommandResponse( + kResWithNonTransientCommitErrorAndNonRetryableWriteConcernError); + + return SemiFuture::makeReady(); + }); + ASSERT(swResult.getStatus().isOK()); + ASSERT_EQ(swResult.getValue().cmdStatus, ErrorCodes::NoSuchTransaction); + ASSERT_EQ(swResult.getValue().wcError.toStatus(), ErrorCodes::WriteConcernFailed); + ASSERT_EQ(swResult.getValue().getEffectiveStatus(), ErrorCodes::NoSuchTransaction); + + ASSERT_EQ(1, InternalTransactionMetrics::get(opCtx())->getStarted()); + ASSERT_EQ(0, InternalTransactionMetrics::get(opCtx())->getRetriedTransactions()); + ASSERT_EQ(0, InternalTransactionMetrics::get(opCtx())->getRetriedCommits()); + ASSERT_EQ(0, InternalTransactionMetrics::get(opCtx())->getSucceeded()); + + auto lastRequest = mockClient()->getLastSentRequest(); + assertTxnMetadata(lastRequest, + 0 /* txnNumber */, + boost::none /* startTransaction */, + boost::none /* readConcern */, + WriteConcernOptions().toBSON() /* writeConcern */); + ASSERT_EQ(lastRequest.firstElementFieldNameStringData(), "commitTransaction"_sd); +} + TEST_F(TxnAPITest, OwnSession_TransientCommitError) { int attempt = -1; auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { attempt += 1; mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_OK(getStatusFromWriteCommandReply(insertRes)); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. @@ -1092,12 +1229,13 @@ TEST_F(TxnAPITest, OwnSession_RetryableCommitError) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_OK(getStatusFromWriteCommandReply(insertRes)); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. @@ -1134,12 +1272,13 @@ TEST_F(TxnAPITest, OwnSession_NonRetryableCommitWCError) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata( mockClient()->getLastSentRequest(), 0 /* txnNumber */, true /* startTransaction */); @@ -1174,12 +1313,13 @@ TEST_F(TxnAPITest, OwnSession_RetryableCommitWCError) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_OK(getStatusFromWriteCommandReply(insertRes)); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. @@ -1210,6 +1350,97 @@ TEST_F(TxnAPITest, OwnSession_RetryableCommitWCError) { ASSERT_EQ(lastRequest.firstElementFieldNameStringData(), "commitTransaction"_sd); } +TEST_F(TxnAPITest, RetriesOnNonTransientCommitWithErrorRetryableCommitWCError) { + auto swResult = txnWithRetries().runNoThrow( + opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { + mockClient()->setNextCommandResponse(kOKInsertResponse); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); + ASSERT_OK(getStatusFromWriteCommandReply(insertRes)); + + ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. + assertTxnMetadata( + mockClient()->getLastSentRequest(), 0 /* txnNumber */, true /* startTransaction */); + assertSessionIdMetadata(mockClient()->getLastSentRequest(), LsidAssertion::kStandalone); + + // The commit responses. + mockClient()->setNextCommandResponse( + kResWithNonTransientCommitErrorAndRetryableWriteConcernError); + mockClient()->setNextCommandResponse(kOKCommandResponse); + return SemiFuture::makeReady(); + }); + ASSERT(swResult.getStatus().isOK()); + ASSERT(swResult.getValue().getEffectiveStatus().isOK()); + + ASSERT_EQ(1, InternalTransactionMetrics::get(opCtx())->getStarted()); + ASSERT_EQ(0, InternalTransactionMetrics::get(opCtx())->getRetriedTransactions()); + ASSERT_EQ(1, InternalTransactionMetrics::get(opCtx())->getRetriedCommits()); + ASSERT_EQ(1, InternalTransactionMetrics::get(opCtx())->getSucceeded()); + + auto lastRequest = mockClient()->getLastSentRequest(); + assertTxnMetadata(lastRequest, + 0 /* txnNumber */, + boost::none /* startTransaction */, + boost::none /* readConcern */, + CommandHelpers::kMajorityWriteConcern.toBSON()); + assertSessionIdMetadata(mockClient()->getLastSentRequest(), LsidAssertion::kStandalone); + ASSERT_EQ(lastRequest.firstElementFieldNameStringData(), "commitTransaction"_sd); +} + +TEST_F(TxnAPITest, RetriesOnTransientCommitErrorWithRetryableWCError) { + int attempt = -1; + auto swResult = txnWithRetries().runNoThrow( + opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { + attempt += 1; + mockClient()->setNextCommandResponse(kOKInsertResponse); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); + ASSERT_OK(getStatusFromWriteCommandReply(insertRes)); + + ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. + assertTxnMetadata(mockClient()->getLastSentRequest(), + attempt /* txnNumber */, + true /* startTransaction */); + assertSessionIdMetadata(mockClient()->getLastSentRequest(), LsidAssertion::kStandalone); + + // Set commit response. Initial commit response is a transient txn error so the + // transaction retries. + if (attempt == 0) { + mockClient()->setNextCommandResponse( + kResWithTransientCommitErrorAndRetryableWriteConcernError); + } else { + mockClient()->setNextCommandResponse(kOKCommandResponse); + } + return SemiFuture::makeReady(); + }); + ASSERT(swResult.getStatus().isOK()); + ASSERT(swResult.getValue().getEffectiveStatus().isOK()); + + ASSERT_EQ(1, InternalTransactionMetrics::get(opCtx())->getStarted()); + ASSERT_EQ(1, InternalTransactionMetrics::get(opCtx())->getRetriedTransactions()); + ASSERT_EQ(0, InternalTransactionMetrics::get(opCtx())->getRetriedCommits()); + ASSERT_EQ(1, InternalTransactionMetrics::get(opCtx())->getSucceeded()); + + auto lastRequest = mockClient()->getLastSentRequest(); + assertTxnMetadata(lastRequest, + attempt /* txnNumber */, + boost::none /* startTransaction */, + boost::none /* readConcern */, + WriteConcernOptions().toBSON() /* writeConcern */); + assertSessionIdMetadata(mockClient()->getLastSentRequest(), LsidAssertion::kStandalone); + ASSERT_EQ(lastRequest.firstElementFieldNameStringData(), "commitTransaction"_sd); +} + TEST_F(TxnAPITest, RunNoErrors) { txnWithRetries().run(opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { @@ -1230,47 +1461,51 @@ TEST_F(TxnAPITest, RunThrowsOnBodyError) { TEST_F(TxnAPITest, RunThrowsOnCommitCmdError) { ASSERT_THROWS_CODE( - txnWithRetries().run( - opCtx(), - [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { - mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + txnWithRetries().run(opCtx(), + [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { + mockClient()->setNextCommandResponse(kOKInsertResponse); + auto insertRes = + txnClient + .runCommand( + DatabaseName::createDatabaseName_forTest(boost::none, + "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); - // The commit response. - mockClient()->setNextCommandResponse( - BSON("ok" << 0 << "code" << ErrorCodes::InternalError)); - mockClient()->setNextCommandResponse( - kOKCommandResponse); // Best effort abort response. - return SemiFuture::makeReady(); - }), + // The commit response. + mockClient()->setNextCommandResponse( + BSON("ok" << 0 << "code" << ErrorCodes::InternalError)); + mockClient()->setNextCommandResponse( + kOKCommandResponse); // Best effort abort response. + return SemiFuture::makeReady(); + }), DBException, ErrorCodes::InternalError); } TEST_F(TxnAPITest, RunThrowsOnCommitWCError) { ASSERT_THROWS_CODE( - txnWithRetries().run( - opCtx(), - [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { - mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + txnWithRetries().run(opCtx(), + [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { + mockClient()->setNextCommandResponse(kOKInsertResponse); + auto insertRes = + txnClient + .runCommand( + DatabaseName::createDatabaseName_forTest(boost::none, + "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); - // The commit response. - mockClient()->setNextCommandResponse(kResWithWriteConcernError); - mockClient()->setNextCommandResponse( - kOKCommandResponse); // Best effort abort response. - return SemiFuture::makeReady(); - }), + // The commit response. + mockClient()->setNextCommandResponse(kResWithWriteConcernError); + mockClient()->setNextCommandResponse( + kOKCommandResponse); // Best effort abort response. + return SemiFuture::makeReady(); + }), DBException, ErrorCodes::WriteConcernFailed); } @@ -1280,12 +1515,13 @@ TEST_F(TxnAPITest, UnyieldsAfterBodyError) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); uasserted(ErrorCodes::InternalError, "Simulated body error"); return SemiFuture::makeReady(); }); @@ -1301,12 +1537,13 @@ TEST_F(TxnAPITest, HandlesExceptionWhileYielding) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); return SemiFuture::makeReady(); }); ASSERT_EQ(swResult.getStatus(), ErrorCodes::Interrupted); @@ -1321,12 +1558,13 @@ TEST_F(TxnAPITest, HandlesExceptionWhileUnyielding) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); return SemiFuture::makeReady(); }); ASSERT_EQ(swResult.getStatus(), ErrorCodes::Interrupted); @@ -1335,48 +1573,106 @@ TEST_F(TxnAPITest, HandlesExceptionWhileUnyielding) { ASSERT_EQ(resourceYielder()->timesUnyielded(), 1); } -// TODO SERVER-75553 - with inline executors, this test runs in the wrong order -#if 0 -TEST_F(TxnAPITest, UnyieldsAfterCancellation) { +TEST_F(TxnAPITest, TransactionErrorTakesPrecedenceOverUnyieldError) { resetTxnWithRetries(std::make_unique()); + auto swResult = txnWithRetries().runNoThrow( + opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { + mockClient()->setNextCommandResponse(kOKInsertResponse); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); + + resourceYielder()->throwInUnyield(ErrorCodes::Interrupted); + + uasserted(ErrorCodes::InternalError, "Mock error"); + + return SemiFuture::makeReady(); + }); + + // The transaction should fail with the error the transaction failed with instead of the + // ResourceYielder error. + ASSERT_EQ(swResult.getStatus(), ErrorCodes::InternalError); + // Yield before starting and corresponding unyield. + ASSERT_EQ(resourceYielder()->timesYielded(), 1); + ASSERT_EQ(resourceYielder()->timesUnyielded(), 1); +} + +TEST_F(TxnAPITest, TransactionObeysCallerOpCtxBeingInterrupted) { unittest::Barrier txnApiStarted(2); unittest::Barrier opCtxKilled(2); auto killerThread = stdx::thread([&txnApiStarted, &opCtxKilled, opCtx = opCtx()] { txnApiStarted.countDownAndWait(); - opCtx->markKilled(); + opCtx->markKilled(ErrorCodes::InterruptedDueToReplStateChange); opCtxKilled.countDownAndWait(); }); + // Hang commit so we know the transaction obeys the cancellation, otherwise the test would hang. + mockClient()->setHangNextCommitOrAbortCommand(true); + auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); + + txnApiStarted.countDownAndWait(); + opCtxKilled.countDownAndWait(); + + return SemiFuture::makeReady(); + }); + + // The transaction should fail with the error the caller was interrupted with. + ASSERT_EQ(swResult.getStatus(), ErrorCodes::InterruptedDueToReplStateChange); - resourceYielder()->throwInUnyield(ErrorCodes::InternalError); + killerThread.join(); +} + +TEST_F(TxnAPITest, CallerInterruptionErrorTakesPrecedenceOverTransactionError) { + unittest::Barrier txnApiStarted(2); + unittest::Barrier opCtxKilled(2); + + auto killerThread = stdx::thread([&txnApiStarted, &opCtxKilled, opCtx = opCtx()] { + txnApiStarted.countDownAndWait(); + opCtx->markKilled(ErrorCodes::InterruptedDueToReplStateChange); + opCtxKilled.countDownAndWait(); + }); + + auto swResult = txnWithRetries().runNoThrow( + opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { + mockClient()->setNextCommandResponse(kOKInsertResponse); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); txnApiStarted.countDownAndWait(); opCtxKilled.countDownAndWait(); + // Fail the transaction to verify the caller interruption error is returned instead. + uasserted(ErrorCodes::InternalError, "Mock error"); + return SemiFuture::makeReady(); }); - // The transaction should fail with an Interrupted error from killing the opCtx using the - // API instead of the ResourceYielder error from within the API callback. - ASSERT_EQ(swResult.getStatus(), ErrorCodes::Interrupted); - // Yield before starting and corresponding unyield. - ASSERT_EQ(resourceYielder()->timesYielded(), 1); - ASSERT_EQ(resourceYielder()->timesUnyielded(), 1); + // The transaction should fail with the error the caller was interrupted with. + ASSERT_EQ(swResult.getStatus(), ErrorCodes::InterruptedDueToReplStateChange); killerThread.join(); } -#endif TEST_F(TxnAPITest, ClientSession_UsesNonRetryableInternalSession) { opCtx()->setLogicalSessionId(makeLogicalSessionIdForTest()); @@ -1389,12 +1685,13 @@ TEST_F(TxnAPITest, ClientSession_UsesNonRetryableInternalSession) { attempt += 1; mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata(mockClient()->getLastSentRequest(), attempt /* txnNumber */, @@ -1440,16 +1737,17 @@ TEST_F(TxnAPITest, ClientRetryableWrite_UsesRetryableInternalSession) { attempt += 1; mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" - << BSON_ARRAY(BSON("x" << 1)) - // Retryable transactions must include stmtIds for - // retryable write commands. - << "stmtIds" << BSON_ARRAY(1))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" + << BSON_ARRAY(BSON("x" << 1)) + // Retryable transactions must include stmtIds for + // retryable write commands. + << "stmtIds" << BSON_ARRAY(1))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata(mockClient()->getLastSentRequest(), attempt /* txnNumber */, @@ -1461,18 +1759,19 @@ TEST_F(TxnAPITest, ClientRetryableWrite_UsesRetryableInternalSession) { // Verify a non-retryable write command does not need to include stmtIds. mockClient()->setNextCommandResponse(kOKCommandResponse); - auto findRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("find" - << "foo")) - .get(); + auto findRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("find" + << "foo")) + .get(); ASSERT(findRes["ok"]); // Verify the mocked response was returned. // Verify the alternate format for stmtIds is allowed. mockClient()->setNextCommandResponse(kOKInsertResponse); insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), BSON("insert" << "foo" << "documents" << BSON_ARRAY(BSON("x" << 1)) << "stmtId" << 1)) @@ -1516,12 +1815,13 @@ DEATH_TEST_F(TxnAPITest, auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); return SemiFuture::makeReady(); }); @@ -1538,12 +1838,13 @@ TEST_F(TxnAPITest, ClientTransaction_UsesClientTransactionOptionsAndDoesNotCommi auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata(mockClient()->getLastSentRequest(), *opCtx()->getTxnNumber(), @@ -1578,12 +1879,13 @@ TEST_F(TxnAPITest, ClientTransaction_DoesNotAppendStartTransactionFields) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata(mockClient()->getLastSentRequest(), *opCtx()->getTxnNumber(), @@ -1612,12 +1914,13 @@ TEST_F(TxnAPITest, ClientTransaction_DoesNotBestEffortAbortOnFailure) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata(mockClient()->getLastSentRequest(), *opCtx()->getTxnNumber(), @@ -1647,12 +1950,13 @@ TEST_F(TxnAPITest, ClientTransaction_DoesNotRetryOnTransientErrors) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata(mockClient()->getLastSentRequest(), *opCtx()->getTxnNumber(), @@ -1677,12 +1981,13 @@ TEST_F(TxnAPITest, HandleErrorRetryCommitOnNetworkError) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata( @@ -1730,12 +2035,13 @@ TEST_F(TxnAPITest, RetryCommitMultipleTimesIncludesMajorityWriteConcern) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. return SemiFuture::makeReady(); @@ -1788,12 +2094,13 @@ TEST_F(TxnAPITest, CommitAfterTransientErrorAfterRetryCommitUsesOriginalWriteCon auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. return SemiFuture::makeReady(); @@ -1965,12 +2272,13 @@ TEST_F(TxnAPITest, OwnSession_StartTransactionRetryLimitOnTransientErrors) { // Command response used for insert below and eventually abortTransaction. mockClient()->setNextCommandResponse(kOKCommandResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); uasserted(ErrorCodes::HostUnreachable, "Host unreachable error"); return SemiFuture::makeReady(); }); @@ -1994,12 +2302,13 @@ TEST_F(TxnAPITest, OwnSession_CommitTransactionRetryLimitOnTransientErrors) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata( mockClient()->getLastSentRequest(), 0 /* txnNumber */, true /* startTransaction */); @@ -2045,12 +2354,13 @@ TEST_F(TxnAPITest, MaxTimeMSIsSetIfOperationContextHasDeadlineAndIgnoresDefaultR attempt += 1; mockClient()->setNextCommandResponse(kOKInsertResponse); - auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), - BSON("insert" - << "foo" - << "documents" << BSON_ARRAY(BSON("x" << 1)))) - .get(); + auto insertRes = + txnClient + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), + BSON("insert" + << "foo" + << "documents" << BSON_ARRAY(BSON("x" << 1)))) + .get(); ASSERT_EQ(insertRes["n"].Int(), 1); // Verify the mocked response was returned. assertTxnMetadata(mockClient()->getLastSentRequest(), attempt + 1 /* txnNumber */, @@ -2149,7 +2459,8 @@ TEST_F(TxnAPITest, FailoverAndShutdownErrorsAreFatalForLocalTransactionBodyError mockClient()->setNextCommandResponse(kOKInsertResponse); auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), + .runCommand(DatabaseName::createDatabaseName_forTest( + boost::none, "user"_sd), BSON("insert" << "foo" << "documents" << BSON_ARRAY(BSON("x" << 1)))) @@ -2198,7 +2509,8 @@ TEST_F(TxnAPITest, FailoverAndShutdownErrorsAreFatalForLocalTransactionCommandEr mockClient()->setNextCommandResponse(kOKInsertResponse); auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), + .runCommand(DatabaseName::createDatabaseName_forTest( + boost::none, "user"_sd), BSON("insert" << "foo" << "documents" << BSON_ARRAY(BSON("x" << 1)))) @@ -2246,7 +2558,8 @@ TEST_F(TxnAPITest, FailoverAndShutdownErrorsAreFatalForLocalTransactionWCError) mockClient()->setNextCommandResponse(kOKInsertResponse); auto insertRes = txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), + .runCommand(DatabaseName::createDatabaseName_forTest( + boost::none, "user"_sd), BSON("insert" << "foo" << "documents" << BSON_ARRAY(BSON("x" << 1)))) @@ -2284,20 +2597,18 @@ TEST_F(TxnAPITest, FailoverAndShutdownErrorsAreFatalForLocalTransactionWCError) runTest(true, Status(ErrorCodes::HostUnreachable, "mock retriable error")); } -// TODO SERVER-75553 - test needs to be aborted and assumes multiple-threads -#if 0 TEST_F(TxnAPITest, DoesNotWaitForBestEffortAbortIfCancelled) { // Start the transaction with an insert. mockClient()->setNextCommandResponse(kOKInsertResponse); // Hang the best effort abort that will be triggered after giving up on the transaction. - mockClient()->setHangNextAbortCommand(true); + mockClient()->setHangNextCommitOrAbortCommand(true); mockClient()->setNextCommandResponse(kOKCommandResponse); auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), BSON("insert" << "foo" << "documents" << BSON_ARRAY(BSON("x" << 1)))) @@ -2313,12 +2624,12 @@ TEST_F(TxnAPITest, DoesNotWaitForBestEffortAbortIfCancelled) { ASSERT_FALSE(swResult.getStatus().isOK()); // The abort should get hung and not have been processed yet. - mockClient()->waitForHungAbortWaiter(); + mockClient()->waitForHungCommitOrAbort(); auto lastRequest = mockClient()->getLastSentRequest(); ASSERT_NE(lastRequest.firstElementFieldNameStringData(), "abortTransaction"_sd); // Unblock the abort and verify it eventually runs. - mockClient()->setHangNextAbortCommand(false); + mockClient()->setHangNextCommitOrAbortCommand(false); waitForAllEarlierTasksToComplete(); expectSentAbort(0 /* txnNumber */, WriteConcernOptions().toBSON()); } @@ -2335,21 +2646,20 @@ TEST_F(TxnAPITest, WaitsForBestEffortAbortOnNonTransientErrorIfNotCancelled) { std::make_unique(std::move(options)), executor::makeNetworkInterface("TxnAPITestNetwork")); executor->startup(); - auto exec1 = executor::InlineExecutor().getSleepableExecutor(executor); - resetTxnWithRetries(nullptr /* resourceYielder */, exec1); + resetTxnWithRetries(nullptr /* resourceYielder */, executor); // Start the transaction with an insert. mockClient()->setNextCommandResponse(kOKInsertResponse); // Hang the best effort abort that will be triggered before retrying after we throw an error. - mockClient()->setHangNextAbortCommand(true); + mockClient()->setHangNextCommitOrAbortCommand(true); mockClient()->setNextCommandResponse(kOKCommandResponse); auto future = stdx::async(stdx::launch::async, [&] { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), BSON("insert" << "foo" << "documents" << BSON_ARRAY(BSON("x" << 1)))) @@ -2367,12 +2677,12 @@ TEST_F(TxnAPITest, WaitsForBestEffortAbortOnNonTransientErrorIfNotCancelled) { ASSERT(stdx::future_status::ready != future.wait_for(Milliseconds(10).toSystemDuration())); // The abort should get hung and not have been processed yet. - mockClient()->waitForHungAbortWaiter(); + mockClient()->waitForHungCommitOrAbort(); auto lastRequest = mockClient()->getLastSentRequest(); ASSERT_NE(lastRequest.firstElementFieldNameStringData(), "abortTransaction"_sd); // Allow the abort to finish and it should unblock the API. - mockClient()->setHangNextAbortCommand(false); + mockClient()->setHangNextCommitOrAbortCommand(false); future.get(); // After the abort finishes, the API should not have retried. @@ -2395,15 +2705,13 @@ TEST_F(TxnAPITest, WaitsForBestEffortAbortOnTransientError) { std::make_unique(std::move(options)), executor::makeNetworkInterface("TxnAPITestNetwork")); executor->startup(); - auto exec1 = executor::InlineExecutor().getSleepableExecutor(executor); - - resetTxnWithRetries(nullptr /* resourceYielder */, exec1); + resetTxnWithRetries(nullptr /* resourceYielder */, executor); // Start the transaction with an insert. mockClient()->setNextCommandResponse(kOKInsertResponse); // Hang the best effort abort that will be triggered before retrying after we throw an error. - mockClient()->setHangNextAbortCommand(true); + mockClient()->setHangNextCommitOrAbortCommand(true); mockClient()->setNextCommandResponse(kOKCommandResponse); // Second attempt's insert and successful commit response. @@ -2415,7 +2723,7 @@ TEST_F(TxnAPITest, WaitsForBestEffortAbortOnTransientError) { auto swResult = txnWithRetries().runNoThrow( opCtx(), [&](const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { txnClient - .runCommand(DatabaseName(boost::none, "user"_sd), + .runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "user"_sd), BSON("insert" << "foo" << "documents" << BSON_ARRAY(BSON("x" << 1)))) @@ -2434,12 +2742,12 @@ TEST_F(TxnAPITest, WaitsForBestEffortAbortOnTransientError) { ASSERT(stdx::future_status::ready != future.wait_for(Milliseconds(10).toSystemDuration())); // The abort should get hung and not have been processed yet. - mockClient()->waitForHungAbortWaiter(); + mockClient()->waitForHungCommitOrAbort(); auto lastRequest = mockClient()->getLastSentRequest(); ASSERT_NE(lastRequest.firstElementFieldNameStringData(), "abortTransaction"_sd); // Allow the abort to finish and it should unblock the API. - mockClient()->setHangNextAbortCommand(false); + mockClient()->setHangNextCommitOrAbortCommand(false); future.get(); // After the abort finishes, the API should retry and successfully commit. @@ -2455,7 +2763,6 @@ TEST_F(TxnAPITest, WaitsForBestEffortAbortOnTransientError) { executor->shutdown(); executor->join(); } -#endif } // namespace } // namespace mongo diff --git a/src/mongo/db/transaction/transaction_history_iterator.cpp b/src/mongo/db/transaction/transaction_history_iterator.cpp index 09227f63a8de3..4ef75f1979aec 100644 --- a/src/mongo/db/transaction/transaction_history_iterator.cpp +++ b/src/mongo/db/transaction/transaction_history_iterator.cpp @@ -27,18 +27,44 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" -#include "mongo/db/exec/working_set_common.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/get_executor.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/stats/top.h" #include "mongo/db/transaction/transaction_history_iterator.h" #include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/transaction/transaction_history_iterator.h b/src/mongo/db/transaction/transaction_history_iterator.h index e81131131cc47..87b4ab1c27b81 100644 --- a/src/mongo/db/transaction/transaction_history_iterator.h +++ b/src/mongo/db/transaction/transaction_history_iterator.h @@ -30,6 +30,7 @@ #pragma once #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/optime.h" namespace mongo { diff --git a/src/mongo/db/transaction/transaction_history_iterator_test.cpp b/src/mongo/db/transaction/transaction_history_iterator_test.cpp index 57f67845f90db..e291d76c2eb03 100644 --- a/src/mongo/db/transaction/transaction_history_iterator_test.cpp +++ b/src/mongo/db/transaction/transaction_history_iterator_test.cpp @@ -27,26 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include -#include "mongo/base/init.h" -#include "mongo/db/client.h" -#include "mongo/db/curop.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/operation_context.h" #include "mongo/db/repl/mock_repl_coord_server_fixture.h" -#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/optime.h" -#include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/service_context.h" -#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" #include "mongo/db/transaction/transaction_history_iterator.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/transaction/transaction_metrics_observer.cpp b/src/mongo/db/transaction/transaction_metrics_observer.cpp index 0324b23139690..a2ddee61995b7 100644 --- a/src/mongo/db/transaction/transaction_metrics_observer.cpp +++ b/src/mongo/db/transaction/transaction_metrics_observer.cpp @@ -27,13 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/transaction/transaction_metrics_observer.h" +#include +#include "mongo/bson/bsonobj.h" #include "mongo/db/curop.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_stats.h" #include "mongo/db/transaction/server_transactions_metrics.h" -#include "mongo/db/transaction/transaction_participant.h" +#include "mongo/db/transaction/transaction_metrics_observer.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/db/transaction/transaction_metrics_observer.h b/src/mongo/db/transaction/transaction_metrics_observer.h index 2f4b1e4b32a86..051981186b342 100644 --- a/src/mongo/db/transaction/transaction_metrics_observer.h +++ b/src/mongo/db/transaction/transaction_metrics_observer.h @@ -29,10 +29,17 @@ #pragma once +#include + +#include "mongo/bson/timestamp.h" #include "mongo/db/curop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/db/stats/single_transaction_stats.h" #include "mongo/db/stats/top.h" #include "mongo/db/transaction/server_transactions_metrics.h" +#include "mongo/util/tick_source.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/transaction/transaction_operations.cpp b/src/mongo/db/transaction/transaction_operations.cpp index 9c6b8282fc429..6f93226412d90 100644 --- a/src/mongo/db/transaction/transaction_operations.cpp +++ b/src/mongo/db/transaction/transaction_operations.cpp @@ -29,8 +29,25 @@ #include "mongo/db/transaction/transaction_operations.h" +#include +#include #include +#include +#include #include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -317,7 +334,7 @@ std::size_t TransactionOperations::logOplogEntries( if (imageToWrite) { uassert(6054002, - str::stream() << NamespaceString::kConfigImagesNamespace + str::stream() << NamespaceString::kConfigImagesNamespace.toStringForErrorMsg() << " can only store the pre or post image of one " "findAndModify operation for each " "transaction", diff --git a/src/mongo/db/transaction/transaction_operations.h b/src/mongo/db/transaction/transaction_operations.h index 55c98d0df36a6..e4f30dad1b975 100644 --- a/src/mongo/db/transaction/transaction_operations.h +++ b/src/mongo/db/transaction/transaction_operations.h @@ -29,14 +29,25 @@ #pragma once +#include +#include #include +#include #include +#include +#include #include #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/repl/oplog.h" // for OplogSlot #include "mongo/db/repl/oplog_entry.h" // for ReplOperation +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/db/transaction/transaction_operations_test.cpp b/src/mongo/db/transaction/transaction_operations_test.cpp index d7ea5b8da039a..0997a17564af6 100644 --- a/src/mongo/db/transaction/transaction_operations_test.cpp +++ b/src/mongo/db/transaction/transaction_operations_test.cpp @@ -27,12 +27,34 @@ * it in the license file. */ -#include - +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/transaction_operations.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/transaction/transaction_participant.cpp b/src/mongo/db/transaction/transaction_participant.cpp index fa0611f70b80f..914c4bdaaf732 100644 --- a/src/mongo/db/transaction/transaction_participant.cpp +++ b/src/mongo/db/transaction/transaction_participant.cpp @@ -32,50 +32,116 @@ #include "mongo/db/transaction/transaction_participant.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include - -#include "mongo/base/shim.h" +#include // IWYU pragma: keep +#include +#include + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/util/builder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/local_oplog_info.h" #include "mongo/db/catalog/uncommitted_catalog_updates.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" -#include "mongo/db/concurrency/lock_state.h" -#include "mongo/db/concurrency/locker.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker_impl.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" +#include "mongo/db/curop.h" #include "mongo/db/curop_failpoint_helpers.h" +#include "mongo/db/database_name.h" +#include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/dbhelpers.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/internal_transactions_feature_flag_gen.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/ops/update.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/ops/write_ops_retryability.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/record_id.h" #include "mongo/db/repl/apply_ops_command_info.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/s/sharding_write_router.h" -#include "mongo/db/server_recovery.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/internal_session_pool.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_catalog_mongod.h" +#include "mongo/db/shard_role.h" #include "mongo/db/stats/fill_locker_info.h" -#include "mongo/db/storage/flow_control.h" +#include "mongo/db/stats/resource_consumption_metrics.h" +#include "mongo/db/stats/top.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/storage_stats.h" #include "mongo/db/transaction/retryable_writes_stats.h" #include "mongo/db/transaction/server_transactions_metrics.h" #include "mongo/db/transaction/transaction_history_iterator.h" #include "mongo/db/transaction/transaction_participant_gen.h" +#include "mongo/db/transaction_resources.h" #include "mongo/db/txn_retry_counter_too_old_info.h" #include "mongo/db/vector_clock_mutable.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/s/grid.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" #include "mongo/s/would_change_owning_shard_exception.h" -#include "mongo/util/debugger.h" +#include "mongo/transport/session.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/log_with_sampling.h" #include "mongo/util/net/socket_utils.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/tick_source.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -171,6 +237,10 @@ void validateTransactionHistoryApplyOpsOplogEntry(const repl::OplogEntry& oplogE template auto performReadWithNoTimestampDBDirectClient(OperationContext* opCtx, Callable&& callable) { ReadSourceScope readSourceScope(opCtx, RecoveryUnit::ReadSource::kNoTimestamp); + // ReadConcern must also be fixed for the new scope. It will get restored when exiting this. + auto originalReadConcern = + std::exchange(repl::ReadConcernArgs::get(opCtx), repl::ReadConcernArgs()); + ON_BLOCK_EXIT([&] { repl::ReadConcernArgs::get(opCtx) = std::move(originalReadConcern); }); DBDirectClient client(opCtx); return callable(&client); @@ -183,10 +253,10 @@ void rethrowPartialIndexQueryBadValueWithContext(const DBException& ex) { ex.toStatus(), str::stream() << "Failed to find partial index for " - << NamespaceString::kSessionTransactionsTableNamespace.ns() + << NamespaceString::kSessionTransactionsTableNamespace.toStringForErrorMsg() << ". Please create an index directly on this replica set with the specification: " << MongoDSessionCatalog::getConfigTxnPartialIndexSpec() << " or drop the " - << NamespaceString::kSessionTransactionsTableNamespace.ns() + << NamespaceString::kSessionTransactionsTableNamespace.toStringForErrorMsg() << " collection and step up a new primary."); } } @@ -209,6 +279,9 @@ ActiveTransactionHistory fetchActiveTransactionHistory(OperationContext* opCtx, result.lastTxnRecord = [&]() -> boost::optional { ReadSourceScope readSourceScope(opCtx, RecoveryUnit::ReadSource::kNoTimestamp); + auto originalReadConcern = + std::exchange(repl::ReadConcernArgs::get(opCtx), repl::ReadConcernArgs()); + ON_BLOCK_EXIT([&] { repl::ReadConcernArgs::get(opCtx) = std::move(originalReadConcern); }); AutoGetCollectionForRead autoRead(opCtx, NamespaceString::kSessionTransactionsTableNamespace); @@ -274,6 +347,9 @@ ActiveTransactionHistory fetchActiveTransactionHistory(OperationContext* opCtx, // Restore the current timestamp read source after fetching transaction history, which may // change our ReadSource. ReadSourceScope readSourceScope(opCtx, RecoveryUnit::ReadSource::kNoTimestamp); + auto originalReadConcern = + std::exchange(repl::ReadConcernArgs::get(opCtx), repl::ReadConcernArgs()); + ON_BLOCK_EXIT([&] { repl::ReadConcernArgs::get(opCtx) = std::move(originalReadConcern); }); auto it = TransactionHistoryIterator(result.lastTxnRecord->getLastWriteOpTime()); while (it.hasNext()) { @@ -316,14 +392,6 @@ ActiveTransactionHistory fetchActiveTransactionHistory(OperationContext* opCtx, continue; } - // TODO (SERVER-64172): Remove leftover upgrade/downgrade code from 4.2 in - // fetchActiveTransactionHistory. - if (entry.getCommandType() == repl::OplogEntry::CommandType::kApplyOps && - !entry.shouldPrepare() && !entry.isPartialTransaction()) { - result.lastTxnRecord->setState(DurableTxnStateEnum::kCommitted); - return result; - } - insertStmtIdsForOplogEntry(entry); } } catch (const DBException& ex) { @@ -393,39 +461,48 @@ void updateSessionEntry(OperationContext* opCtx, // TODO SERVER-58243: evaluate whether this is safe or whether acquiring the lock can block. AllowLockAcquisitionOnTimestampedUnitOfWork allowLockAcquisition(opCtx->lockState()); - AutoGetCollection collection( - opCtx, NamespaceString::kSessionTransactionsTableNamespace, MODE_IX); + const auto collection = acquireCollection( + opCtx, + CollectionAcquisitionRequest(NamespaceString::kSessionTransactionsTableNamespace, + PlacementConcern{boost::none, ShardVersion::UNSHARDED()}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); - uassert(40527, - str::stream() << "Unable to persist transaction state because the session transaction " - "collection is missing. This indicates that the " - << NamespaceString::kSessionTransactionsTableNamespace.ns() - << " collection has been manually deleted.", - collection.getCollection()); + uassert( + 40527, + str::stream() << "Unable to persist transaction state because the session transaction " + "collection is missing. This indicates that the " + << NamespaceString::kSessionTransactionsTableNamespace.toStringForErrorMsg() + << " collection has been manually deleted.", + collection.exists()); WriteUnitOfWork wuow(opCtx); - auto idIndex = collection->getIndexCatalog()->findIdIndex(opCtx); - - uassert(40672, - str::stream() << "Failed to fetch _id index for " - << NamespaceString::kSessionTransactionsTableNamespace.ns(), - idIndex); + auto idIndex = collection.getCollectionPtr()->getIndexCatalog()->findIdIndex(opCtx); - auto indexAccess = - collection->getIndexCatalog()->getEntry(idIndex)->accessMethod()->asSortedData(); + uassert( + 40672, + str::stream() << "Failed to fetch _id index for " + << NamespaceString::kSessionTransactionsTableNamespace.toStringForErrorMsg(), + idIndex); + + const IndexCatalogEntry* entry = + collection.getCollectionPtr()->getIndexCatalog()->getEntry(idIndex); + auto indexAccess = entry->accessMethod()->asSortedData(); // Since we are looking up a key inside the _id index, create a key object consisting of only // the _id field. auto idToFetch = updateRequest.getQuery().firstElement(); auto toUpdateIdDoc = idToFetch.wrap(); dassert(idToFetch.fieldNameStringData() == "_id"_sd); - auto recordId = indexAccess->findSingle(opCtx, *collection, toUpdateIdDoc); + auto recordId = + indexAccess->findSingle(opCtx, collection.getCollectionPtr(), entry, toUpdateIdDoc); auto startingSnapshotId = opCtx->recoveryUnit()->getSnapshotId(); if (recordId.isNull()) { // Upsert case. auto status = collection_internal::insertDocument( - opCtx, *collection, InsertStatement(updateMod), nullptr, false); + opCtx, collection.getCollectionPtr(), InsertStatement(updateMod), nullptr, false); if (status == ErrorCodes::DuplicateKey) { throwWriteConflictException( @@ -438,17 +515,20 @@ void updateSessionEntry(OperationContext* opCtx, return; } - auto originalRecordData = collection->getRecordStore()->dataFor(opCtx, recordId); + auto originalRecordData = + collection.getCollectionPtr()->getRecordStore()->dataFor(opCtx, recordId); auto originalDoc = originalRecordData.toBson(); const auto parentLsidFieldName = SessionTxnRecord::kParentSessionIdFieldName; - uassert(5875700, - str::stream() << "Cannot modify the '" << parentLsidFieldName << "' field of " - << NamespaceString::kSessionTransactionsTableNamespace << " entries", - updateMod.getObjectField(parentLsidFieldName) - .woCompare(originalDoc.getObjectField(parentLsidFieldName)) == 0); - - invariant(collection->getDefaultCollator() == nullptr); + uassert( + 5875700, + str::stream() << "Cannot modify the '" << parentLsidFieldName << "' field of " + << NamespaceString::kSessionTransactionsTableNamespace.toStringForErrorMsg() + << " entries", + updateMod.getObjectField(parentLsidFieldName) + .woCompare(originalDoc.getObjectField(parentLsidFieldName)) == 0); + + invariant(collection.getCollectionPtr()->getDefaultCollator() == nullptr); boost::intrusive_ptr expCtx( new ExpressionContext(opCtx, nullptr, updateRequest.getNamespaceString())); @@ -468,12 +548,13 @@ void updateSessionEntry(OperationContext* opCtx, // Specify kUpdateNoIndexes because the sessions collection has two indexes: {_id: 1} and // {parentLsid: 1, _id.txnNumber: 1, _id: 1}, and none of the fields are mutable. collection_internal::updateDocument(opCtx, - *collection, + collection.getCollectionPtr(), recordId, Snapshotted(startingSnapshotId, originalDoc), updateMod, collection_internal::kUpdateNoIndexes, - nullptr, + nullptr /* indexesAffected */, + nullptr /* opDebug */, &args); wuow.commit(); @@ -548,10 +629,10 @@ void TransactionParticipant::performNoopWrite(OperationContext* opCtx, StringDat AutoGetOplog oplogWrite(opCtx, OplogAccessMode::kWrite); uassert(ErrorCodes::NotWritablePrimary, "Not primary when performing noop write for {}"_format(msg), - replCoord->canAcceptWritesForDatabase(opCtx, "admin")); + replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin)); writeConflictRetry( - opCtx, "performNoopWrite", NamespaceString::kRsOplogNamespace.ns(), [&opCtx, &msg] { + opCtx, "performNoopWrite", NamespaceString::kRsOplogNamespace, [&opCtx, &msg] { WriteUnitOfWork wuow(opCtx); opCtx->getClient()->getServiceContext()->getOpObserver()->onOpMessage( opCtx, BSON("msg" << msg)); @@ -567,6 +648,13 @@ TransactionParticipant::getOldestActiveTimestamp(Timestamp stableTimestamp) { // the server, and it both blocks this thread from querying config.transactions and waits for // this thread to terminate. auto client = getGlobalServiceContext()->makeClient("OldestActiveTxnTimestamp"); + + // TODO(SERVER-74656): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } + AlternativeClientRegion acr(client); try { @@ -934,7 +1022,7 @@ void TransactionParticipant::Participant::beginOrContinue( auto replCoord = repl::ReplicationCoordinator::get(opCtx); uassert(ErrorCodes::NotWritablePrimary, "Not primary so we cannot begin or continue a transaction", - replCoord->canAcceptWritesForDatabase(opCtx, "admin")); + replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin)); // Disallow multi-statement transactions on shard servers that have // writeConcernMajorityJournalDefault=false unless enableTestCommands=true. But allow // retryable writes (autocommit == boost::none). @@ -1087,10 +1175,6 @@ TransactionParticipant::Participant::onConflictingInternalTransactionCompletion( void TransactionParticipant::Participant::_setReadSnapshot(OperationContext* opCtx, repl::ReadConcernArgs readConcernArgs) { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - bool pitLookupFeatureEnabled = - feature_flags::gPointInTimeCatalogLookups.isEnabledAndIgnoreFCVUnsafe(); - if (readConcernArgs.getArgsAtClusterTime()) { // Read concern code should have already set the timestamp on the recovery unit. const auto readTimestamp = readConcernArgs.getArgsAtClusterTime()->asTimestamp(); @@ -1119,42 +1203,25 @@ void TransactionParticipant::Participant::_setReadSnapshot(OperationContext* opC // Using 'kNoTimestamp' ensures that transactions with mode 'local' are always able to read // writes from earlier transactions with mode 'local' on the same connection. opCtx->recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kNoTimestamp); - // Catalog conflicting timestamps must be set on primaries performing transactions. - // However, secondaries performing oplog application must avoid setting - // _catalogConflictTimestamp. Currently, only oplog application on secondaries can run - // inside a transaction, thus `writesAreReplicated` is a suitable proxy to single out - // transactions on primaries. - if (!pitLookupFeatureEnabled && opCtx->writesAreReplicated()) { - // Since this snapshot may reflect oplog holes, record the most visible timestamp before - // opening a storage transaction. This timestamp will be used later to detect any - // changes in the catalog after a storage transaction is opened. - opCtx->recoveryUnit()->setCatalogConflictingTimestamp( - opCtx->getServiceContext()->getStorageEngine()->getAllDurableTimestamp()); - } } - - if (pitLookupFeatureEnabled) { - // Allocate the snapshot together with a consistent CollectionCatalog instance. As we have - // no critical section we use optimistic concurrency control and check that there was no - // write to the CollectionCatalog while we allocated the storage snapshot. Stash the catalog - // instance so collection lookups within this transaction are consistent with the snapshot. - auto catalog = CollectionCatalog::get(opCtx); - while (true) { - opCtx->recoveryUnit()->preallocateSnapshot(); - auto after = CollectionCatalog::get(opCtx); - if (catalog == after) { - // Catalog did not change, break out of the retry loop and use this instance - break; - } - // Catalog change detected, reallocate the snapshot and try again. - opCtx->recoveryUnit()->abandonSnapshot(); - catalog = std::move(after); - } - CollectionCatalog::stash(opCtx, std::move(catalog)); - } else { + // Allocate the snapshot together with a consistent CollectionCatalog instance. As we have no + // critical section we use optimistic concurrency control and check that there was no write to + // the CollectionCatalog while we allocated the storage snapshot. Stash the catalog instance so + // collection lookups within this transaction are consistent with the snapshot. + auto catalog = CollectionCatalog::get(opCtx); + while (true) { opCtx->recoveryUnit()->preallocateSnapshot(); + auto after = CollectionCatalog::get(opCtx); + if (catalog == after) { + // Catalog did not change, break out of the retry loop and use this instance + break; + } + // Catalog change detected, reallocate the snapshot and try again. + opCtx->recoveryUnit()->abandonSnapshot(); + catalog = std::move(after); } + CollectionCatalog::stash(opCtx, std::move(catalog)); } TransactionParticipant::OplogSlotReserver::OplogSlotReserver(OperationContext* opCtx, @@ -1224,8 +1291,8 @@ TransactionParticipant::TxnResources::TxnResources(WithLock wl, // On secondaries, we yield the locks for transactions. if (stashStyle == StashStyle::kSecondary) { _lockSnapshot = std::make_unique(); - // Transactions have at least a global IX lock. Invariant that we have something to release. - invariant(_locker->releaseWriteUnitOfWorkAndUnlock(_lockSnapshot.get())); + // Transactions have at least a global IX lock. + _locker->releaseWriteUnitOfWorkAndUnlock(_lockSnapshot.get()); } // This thread must still respect the transaction lock timeout, since it can prevent the @@ -1609,7 +1676,8 @@ void TransactionParticipant::Participant::refreshLocksForPreparedTransaction( o(lk).txnResourceStash = TxnResources(lk, opCtx, stashStyle); } -Timestamp TransactionParticipant::Participant::prepareTransaction( +std::pair> +TransactionParticipant::Participant::prepareTransaction( OperationContext* opCtx, boost::optional prepareOptime) { ScopeGuard abortGuard([&] { @@ -1620,8 +1688,7 @@ Timestamp TransactionParticipant::Participant::prepareTransaction( // This shouldn't cause deadlocks with other prepared txns, because the acquisition // of RSTL lock inside abortTransaction will be no-op since we already have it. // This abortGuard gets dismissed before we release the RSTL while transitioning to - // prepared. - // TODO (SERVER-71610): Fix to be interruptible or document exception. + // the prepared state. UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. abortTransaction(opCtx); } catch (...) { @@ -1648,12 +1715,17 @@ Timestamp TransactionParticipant::Participant::prepareTransaction( // collection multiple times: it is a costly check. auto transactionOperationUuids = completedTransactionOperations->getCollectionUUIDs(); auto catalog = CollectionCatalog::get(opCtx); + + std::vector affectedNamespaces; + affectedNamespaces.reserve(transactionOperationUuids.size()); + for (const auto& uuid : transactionOperationUuids) { auto collection = catalog->lookupCollectionByUUID(opCtx, uuid); + affectedNamespaces.emplace_back(collection->ns()); uassert(ErrorCodes::OperationNotSupportedInTransaction, str::stream() << "prepareTransaction failed because one of the transaction " "operations was done against a temporary collection '" - << collection->ns() << "'.", + << collection->ns().toStringForErrorMsg() << "'.", !collection->isTemporary()); } @@ -1665,7 +1737,9 @@ Timestamp TransactionParticipant::Participant::prepareTransaction( // prepared) transaction is killed, but it still ends up in the prepared state opCtx->checkForInterrupt(); o(lk).txnState.transitionTo(TransactionState::kPrepared); + o(lk).affectedNamespaces = affectedNamespaces; } + std::vector reservedSlots; if (prepareOptime) { // On secondary, we just prepare the transaction and discard the buffered ops. @@ -1701,23 +1775,27 @@ Timestamp TransactionParticipant::Participant::prepareTransaction( hangAfterReservingPrepareTimestamp.pauseWhileSet(); } } + auto applyOpsOplogSlotAndOperationAssignment = completedTransactionOperations->getApplyOpsInfo( + reservedSlots, + getMaxNumberOfTransactionOperationsInSingleOplogEntry(), + getMaxSizeOfTransactionOperationsInSingleOplogEntryBytes(), + /*prepare=*/true); + auto opObserver = opCtx->getServiceContext()->getOpObserver(); const auto wallClockTime = opCtx->getServiceContext()->getFastClockSource()->now(); - auto applyOpsOplogSlotAndOperationAssignment = opObserver->preTransactionPrepare( - opCtx, reservedSlots, *completedTransactionOperations, wallClockTime); + opObserver->preTransactionPrepare(opCtx, + *completedTransactionOperations, + applyOpsOplogSlotAndOperationAssignment, + wallClockTime); opCtx->recoveryUnit()->setPrepareTimestamp(prepareOplogSlot.getTimestamp()); opCtx->getWriteUnitOfWork()->prepare(); p().needToWriteAbortEntry = true; - tassert(6278510, - "Operation assignments to applyOps entries should be present", - applyOpsOplogSlotAndOperationAssignment); - opObserver->onTransactionPrepare(opCtx, reservedSlots, *completedTransactionOperations, - *applyOpsOplogSlotAndOperationAssignment, + applyOpsOplogSlotAndOperationAssignment, p().transactionOperations.getNumberOfPrePostImagesToWrite(), wallClockTime); @@ -1749,7 +1827,7 @@ Timestamp TransactionParticipant::Participant::prepareTransaction( const bool unlocked = opCtx->lockState()->unlockRSTLforPrepare(); invariant(unlocked); - return prepareOplogSlot.getTimestamp(); + return {prepareOplogSlot.getTimestamp(), std::move(affectedNamespaces)}; } void TransactionParticipant::Participant::setPrepareOpTimeForRecovery(OperationContext* opCtx, @@ -1818,7 +1896,24 @@ void TransactionParticipant::Participant::commitUnpreparedTransaction(OperationC auto opObserver = opCtx->getServiceContext()->getOpObserver(); invariant(opObserver); - opObserver->onUnpreparedTransactionCommit(opCtx, *txnOps); + // Reserve all the optimes in advance, so we only need to get the optime mutex once. We + // reserve enough entries for all statements in the transaction. + std::vector reservedSlots; + if (!txnOps->isEmpty()) { + reservedSlots = LocalOplogInfo::get(opCtx)->getNextOpTimes( + opCtx, txnOps->numOperations() + txnOps->getNumberOfPrePostImagesToWrite()); + } + + // Serialize transaction statements to BSON and determine their assignment to "applyOps" + // entries. + const auto applyOpsOplogSlotAndOperationAssignment = + txnOps->getApplyOpsInfo(reservedSlots, + getMaxNumberOfTransactionOperationsInSingleOplogEntry(), + getMaxSizeOfTransactionOperationsInSingleOplogEntryBytes(), + /*prepare=*/false); + + opObserver->onUnpreparedTransactionCommit( + opCtx, reservedSlots, *txnOps, applyOpsOplogSlotAndOperationAssignment); // Read-only transactions with all read concerns must wait for any data they read to be majority // committed. For local read concern this is to match majority read concern. For both local and @@ -1875,7 +1970,7 @@ void TransactionParticipant::Participant::commitPreparedTransaction( if (opCtx->writesAreReplicated()) { uassert(ErrorCodes::NotWritablePrimary, "Not primary so we cannot commit a prepared transaction", - replCoord->canAcceptWritesForDatabase(opCtx, "admin")); + replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin)); } uassert( @@ -1907,8 +2002,8 @@ void TransactionParticipant::Participant::commitPreparedTransaction( // We can no longer uassert without terminating. unlockGuard.dismiss(); - // Once entering "committing with prepare" we cannot throw an exception. - // TODO (SERVER-71610): Fix to be interruptible or document exception. + // Once entering "committing with prepare" we cannot throw an exception, + // and therefore our lock acquisitions cannot be interruptible. UninterruptibleLockGuard noInterrupt(opCtx->lockState()); // NOLINT. // On secondary, we generate a fake empty oplog slot, since it's not used by opObserver. @@ -1943,28 +2038,25 @@ void TransactionParticipant::Participant::commitPreparedTransaction( invariant(!o().lastWriteOpTime.isNull()); auto commitOplogSlotOpTime = commitOplogEntryOpTime.value_or(commitOplogSlot); + + // If we are a primary committing a transaction that was split into smaller prepared + // transactions, cascade the commit. auto* splitPrepareManager = repl::ReplicationCoordinator::get(opCtx)->getSplitPrepareSessionManager(); if (opCtx->writesAreReplicated() && splitPrepareManager->isSessionSplit( _sessionId(), o().activeTxnNumberAndRetryCounter.getTxnNumber())) { - // If we are a primary committing a transaction that was split into smaller prepared - // transactions, use a special commit code path. - _commitSplitPreparedTxnOnPrimary(opCtx, - splitPrepareManager, - _sessionId(), - o().activeTxnNumberAndRetryCounter.getTxnNumber(), - commitTimestamp, - commitOplogSlot.getTimestamp()); - } else { - // If commitOplogEntryOpTime is a nullopt, then we grab the OpTime from the - // commitOplogSlot which will only be set if we are primary. Otherwise, the - // commitOplogEntryOpTime must have been passed in during secondary oplog application. - opCtx->recoveryUnit()->setCommitTimestamp(commitTimestamp); - opCtx->recoveryUnit()->setDurableTimestamp(commitOplogSlotOpTime.getTimestamp()); - _commitStorageTransaction(opCtx); + _commitSplitPreparedTxnOnPrimary( + opCtx, splitPrepareManager, commitTimestamp, commitOplogSlot.getTimestamp()); } + // If commitOplogEntryOpTime is a nullopt, then we grab the OpTime from commitOplogSlot + // which will only be set if we are primary. Otherwise, the commitOplogEntryOpTime must + // have been passed in during secondary oplog application. + opCtx->recoveryUnit()->setCommitTimestamp(commitTimestamp); + opCtx->recoveryUnit()->setDurableTimestamp(commitOplogSlotOpTime.getTimestamp()); + _commitStorageTransaction(opCtx); + auto opObserver = opCtx->getServiceContext()->getOpObserver(); invariant(opObserver); @@ -2012,11 +2104,12 @@ void TransactionParticipant::Participant::_commitStorageTransaction(OperationCon void TransactionParticipant::Participant::_commitSplitPreparedTxnOnPrimary( OperationContext* userOpCtx, repl::SplitPrepareSessionManager* splitPrepareManager, - const LogicalSessionId& userSessionId, - const TxnNumber& userTxnNumber, const Timestamp& commitTimestamp, const Timestamp& durableTimestamp) { + const auto& userSessionId = _sessionId(); + const auto& userTxnNumber = o().activeTxnNumberAndRetryCounter.getTxnNumber(); + for (const auto& sessInfos : splitPrepareManager->getSplitSessions(userSessionId, userTxnNumber).get()) { @@ -2030,8 +2123,14 @@ void TransactionParticipant::Participant::_commitSplitPreparedTxnOnPrimary( auto splitClientOwned = userOpCtx->getServiceContext()->makeClient("tempSplitClient"); auto splitOpCtx = splitClientOwned->makeOperationContext(); - AlternativeClientRegion acr(splitClientOwned); + // TODO(SERVER-74656): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*splitClientOwned.get()); + splitClientOwned.get()->setSystemOperationUnkillableByStepdown(lk); + } + + AlternativeClientRegion acr(splitClientOwned); std::unique_ptr checkedOutSession; repl::UnreplicatedWritesBlock notReplicated(splitOpCtx.get()); @@ -2067,9 +2166,6 @@ void TransactionParticipant::Participant::_commitSplitPreparedTxnOnPrimary( } splitPrepareManager->releaseSplitSessions(userSessionId, userTxnNumber); - userOpCtx->recoveryUnit()->setCommitTimestamp(commitTimestamp); - userOpCtx->recoveryUnit()->setDurableTimestamp(durableTimestamp); - this->_commitStorageTransaction(userOpCtx); } void TransactionParticipant::Participant::_finishCommitTransaction( @@ -2161,7 +2257,7 @@ void TransactionParticipant::Participant::_abortActivePreparedTransaction(Operat auto replCoord = repl::ReplicationCoordinator::get(opCtx); uassert(ErrorCodes::NotWritablePrimary, "Not primary so we cannot abort a prepared transaction", - replCoord->canAcceptWritesForDatabase(opCtx, "admin")); + replCoord->canAcceptWritesForDatabase(opCtx, DatabaseName::kAdmin)); } _abortActiveTransaction(opCtx, TransactionState::kPrepared); @@ -2171,17 +2267,11 @@ void TransactionParticipant::Participant::_abortActiveTransaction( OperationContext* opCtx, TransactionState::StateSet expectedStates) { invariant(!o().txnResourceStash); - // If this is a split-prepared transaction, cascade the abort. auto* splitPrepareManager = repl::ReplicationCoordinator::get(opCtx)->getSplitPrepareSessionManager(); - if (opCtx->writesAreReplicated() && + bool isSplitPreparedTxn = opCtx->writesAreReplicated() && splitPrepareManager->isSessionSplit(_sessionId(), - o().activeTxnNumberAndRetryCounter.getTxnNumber())) { - _abortSplitPreparedTxnOnPrimary(opCtx, - splitPrepareManager, - _sessionId(), - o().activeTxnNumberAndRetryCounter.getTxnNumber()); - } + o().activeTxnNumberAndRetryCounter.getTxnNumber()); if (!o().txnState.isInRetryableWriteMode()) { stdx::lock_guard lk(*opCtx->getClient()); @@ -2199,6 +2289,12 @@ void TransactionParticipant::Participant::_abortActiveTransaction( // abort oplog entry. OplogSlotReserver oplogSlotReserver(opCtx); + // If we are a primary aborting a transaction that was split into smaller prepared + // transactions, cascade the abort. + if (isSplitPreparedTxn) { + _abortSplitPreparedTxnOnPrimary(opCtx, splitPrepareManager); + } + // Clean up the transaction resources on the opCtx even if the transaction resources on the // session were not aborted. This actually aborts the storage-transaction. _cleanUpTxnResourceOnOpCtx(opCtx, TerminationCause::kAborted); @@ -2235,6 +2331,7 @@ void TransactionParticipant::Participant::_abortActiveTransaction( // is not cleaning up some internal TransactionParticipant state, updating metrics, or // logging the end of the transaction. That will either be cleaned up in the // ServiceEntryPoint's abortGuard or when the next transaction begins. + invariant(!isSplitPreparedTxn); _cleanUpTxnResourceOnOpCtx(opCtx, TerminationCause::kAborted); opObserver->onTransactionAbort(opCtx, boost::none); _finishAbortingActiveTransaction(opCtx, expectedStates); @@ -2242,19 +2339,26 @@ void TransactionParticipant::Participant::_abortActiveTransaction( } void TransactionParticipant::Participant::_abortSplitPreparedTxnOnPrimary( - OperationContext* opCtx, - repl::SplitPrepareSessionManager* splitPrepareManager, - const LogicalSessionId& sessionId, - const TxnNumber& txnNumber) { + OperationContext* userOpCtx, repl::SplitPrepareSessionManager* splitPrepareManager) { + + const auto& userSessionId = _sessionId(); + const auto& userTxnNumber = o().activeTxnNumberAndRetryCounter.getTxnNumber(); + // If there are split prepared sessions, it must be because this transaction was prepared // via an oplog entry applied as a secondary. for (const repl::SplitSessionInfo& sessionInfo : - splitPrepareManager->getSplitSessions(sessionId, txnNumber).get()) { + splitPrepareManager->getSplitSessions(userSessionId, userTxnNumber).get()) { - auto splitClientOwned = opCtx->getServiceContext()->makeClient("tempSplitClient"); + auto splitClientOwned = userOpCtx->getServiceContext()->makeClient("tempSplitClient"); auto splitOpCtx = splitClientOwned->makeOperationContext(); - AlternativeClientRegion acr(splitClientOwned); + // TODO(SERVER-74656): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*splitClientOwned.get()); + splitClientOwned.get()->setSystemOperationUnkillableByStepdown(lk); + } + + AlternativeClientRegion acr(splitClientOwned); std::unique_ptr checkedOutSession; repl::UnreplicatedWritesBlock notReplicated(splitOpCtx.get()); @@ -2275,7 +2379,7 @@ void TransactionParticipant::Participant::_abortSplitPreparedTxnOnPrimary( checkedOutSession->checkIn(splitOpCtx.get(), OperationContextSession::CheckInReason::kDone); } - splitPrepareManager->releaseSplitSessions(_sessionId(), + splitPrepareManager->releaseSplitSessions(userSessionId, o().activeTxnNumberAndRetryCounter.getTxnNumber()); } @@ -2336,7 +2440,7 @@ void TransactionParticipant::Participant::_abortTransactionOnSession(OperationCo if (o().txnResourceStash && opCtx->recoveryUnit()->getNoEvictionAfterRollback()) { o(lk).txnResourceStash->setNoEvictionAfterRollback(); } - _resetTransactionStateAndUnlock(&lk, nextState); + _resetTransactionStateAndUnlock(&lk, opCtx, nextState); _resetRetryableWriteState(); } @@ -2870,14 +2974,16 @@ void TransactionParticipant::Participant::_setNewTxnNumberAndRetryCounter( o(lk).transactionMetricsObserver.resetSingleTransactionStats(txnNumberAndRetryCounter); // Reset the transactional state - _resetTransactionStateAndUnlock(&lk, TransactionState::kNone); + _resetTransactionStateAndUnlock(&lk, opCtx, TransactionState::kNone); invariant(!lk); if (isParentSessionId(_sessionId())) { // Only observe parent sessions because retryable transactions begin the same txnNumber on // their parent session. OperationContextSession::observeNewTxnNumberStarted( - opCtx, _sessionId(), txnNumberAndRetryCounter.getTxnNumber()); + opCtx, + _sessionId(), + {txnNumberAndRetryCounter.getTxnNumber(), SessionCatalog::Provenance::kParticipant}); } } @@ -3147,6 +3253,7 @@ void TransactionParticipant::Participant::_refreshActiveTransactionParticipantsF << parentTxnParticipant._sessionId() << " to be " << *activeRetryableWriteTxnNumber << " found a " << NamespaceString::kSessionTransactionsTableNamespace + .toStringForErrorMsg() << " entry for an internal transaction for retryable writes with " << "transaction number " << *childLsid.getTxnNumber(), *childLsid.getTxnNumber() == *activeRetryableWriteTxnNumber); @@ -3258,7 +3365,7 @@ void TransactionParticipant::Participant::_resetRetryableWriteState() { } void TransactionParticipant::Participant::_resetTransactionStateAndUnlock( - stdx::unique_lock* lk, TransactionState::StateFlag state) { + stdx::unique_lock* lk, OperationContext* opCtx, TransactionState::StateFlag state) { invariant(lk && lk->owns_lock()); // If we are transitioning to kNone, we are either starting a new transaction or aborting a @@ -3286,6 +3393,12 @@ void TransactionParticipant::Participant::_resetTransactionStateAndUnlock( boost::optional temporary; swap(o(*lk).txnResourceStash, temporary); lk->unlock(); + + // Make sure we have a valid OperationContext set in the RecoveryUnit when it is destroyed as it + // is passed to registered rollback handlers. + if (temporary && temporary->recoveryUnit()) { + temporary->recoveryUnit()->setOperationContext(opCtx); + } temporary = boost::none; } @@ -3313,7 +3426,7 @@ void TransactionParticipant::Participant::invalidate(OperationContext* opCtx) { retryableWriteTxnParticipantCatalog.invalidate(); } - _resetTransactionStateAndUnlock(&lk, TransactionState::kNone); + _resetTransactionStateAndUnlock(&lk, opCtx, TransactionState::kNone); } boost::optional TransactionParticipant::Participant::checkStatementExecuted( @@ -3441,7 +3554,10 @@ void TransactionParticipant::Participant::handleWouldChangeOwningShardError( invariant(opCtx->getTxnNumber()); stdx::lock_guard lk(*opCtx->getClient()); _resetRetryableWriteState(); - } else if (_isInternalSessionForRetryableWrite()) { + } else if (_isInternalSessionForRetryableWrite() && + // (Ignore FCV check): The feature flag is fully disabled. + feature_flags::gFeatureFlagUpdateDocumentShardKeyUsingTransactionApi + .isEnabledAndIgnoreFCVUnsafe()) { // If this was a retryable transaction, add a sentinel noop to the transaction's operations // so retries can detect that a WouldChangeOwningShard error was thrown and know to throw // IncompleteTransactionHistory. @@ -3463,8 +3579,7 @@ void TransactionParticipant::Participant::handleWouldChangeOwningShardError( invariant(wouldChangeOwningShardInfo->getUuid()); operation.setNss(*wouldChangeOwningShardInfo->getNs()); operation.setUuid(*wouldChangeOwningShardInfo->getUuid()); - ShardingWriteRouter shardingWriteRouter( - opCtx, *wouldChangeOwningShardInfo->getNs(), Grid::get(opCtx)->catalogCache()); + ShardingWriteRouter shardingWriteRouter(opCtx, *wouldChangeOwningShardInfo->getNs()); operation.setDestinedRecipient(shardingWriteRouter.getReshardingDestinedRecipient( wouldChangeOwningShardInfo->getPreImage())); @@ -3527,7 +3642,9 @@ void TransactionParticipant::Participant::_registerUpdateCacheOnCommit( onPrimaryTransactionalWrite.execute([&](const BSONObj& data) { const auto closeConnectionElem = data["closeConnection"]; if (closeConnectionElem.eoo() || closeConnectionElem.Bool()) { - opCtx->getClient()->session()->end(); + if (auto session = opCtx->getClient()->session()) { + session->end(); + } } const auto failBeforeCommitExceptionElem = data["failBeforeCommitExceptionCode"]; @@ -3541,4 +3658,15 @@ void TransactionParticipant::Participant::_registerUpdateCacheOnCommit( }); } +std::size_t getMaxNumberOfTransactionOperationsInSingleOplogEntry() { + tassert(6278503, + "gMaxNumberOfTransactionOperationsInSingleOplogEntry should be positive number", + gMaxNumberOfTransactionOperationsInSingleOplogEntry > 0); + return static_cast(gMaxNumberOfTransactionOperationsInSingleOplogEntry); +} + +std::size_t getMaxSizeOfTransactionOperationsInSingleOplogEntryBytes() { + return static_cast(BSONObjMaxUserSize); +} + } // namespace mongo diff --git a/src/mongo/db/transaction/transaction_participant.h b/src/mongo/db/transaction/transaction_participant.h index 533bdc4826cfb..334d23b4acbe2 100644 --- a/src/mongo/db/transaction/transaction_participant.h +++ b/src/mongo/db/transaction/transaction_participant.h @@ -29,27 +29,51 @@ #pragma once +#include #include +#include +#include +#include +#include #include #include - +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/api_parameters.h" #include "mongo/db/catalog/uncommitted_multikey.h" +#include "mongo/db/client.h" #include "mongo/db/commands/txn_cmds_gen.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_stats.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/multi_key_path_tracker.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/update_request.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/split_prepare_session_manager.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session.h" #include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_txn_record_gen.h" #include "mongo/db/stats/single_transaction_stats.h" #include "mongo/db/storage/recovery_unit.h" #include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/transaction_metrics_observer.h" #include "mongo/db/transaction/transaction_operations.h" #include "mongo/idl/mutable_observer_registry.h" @@ -57,7 +81,10 @@ #include "mongo/stdx/unordered_map.h" #include "mongo/util/assert_util.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -82,7 +109,7 @@ class TransactionParticipant { struct ObservableState; /** - * Indicates the state of the current multi-document transaction, if any. If the transaction is + * Indicates the state of the current multi-document transaction, if any. If the transaction is * in any state but kInProgress, no more operations can be collected. Once the transaction is in * kPrepared, the transaction is not allowed to abort outside of an 'abortTransaction' command. * At this point, aborting the transaction must log an 'abortTransaction' oplog entry. @@ -346,6 +373,10 @@ class TransactionParticipant { return o().txnState.isInRetryableWriteMode(); } + const std::vector& affectedNamespaces() const { + return o().affectedNamespaces; + } + /** * If this session is holding stashed locks in txnResourceStash, reports the current state * of the session using the provided builder. @@ -565,12 +596,13 @@ class TransactionParticipant { void unstashTransactionResources(OperationContext* opCtx, const std::string& cmdName); /** - * Puts a transaction into a prepared state and returns the prepareTimestamp. + * Puts a transaction into a prepared state and returns the prepareTimestamp and the list of + * affected namespaces. * * On secondary, the "prepareTimestamp" will be given in the oplog. */ - Timestamp prepareTransaction(OperationContext* opCtx, - boost::optional prepareOptime); + std::pair> prepareTransaction( + OperationContext* opCtx, boost::optional prepareOptime); /** * Sets the prepare optime used for recovery. @@ -858,8 +890,6 @@ class TransactionParticipant { // `durableTimestamp`. void _commitSplitPreparedTxnOnPrimary(OperationContext* opCtx, repl::SplitPrepareSessionManager* splitPrepareManager, - const LogicalSessionId& sessionId, - const TxnNumber& txnNumber, const Timestamp& commitTimestamp, const Timestamp& durableTimestamp); @@ -876,9 +906,7 @@ class TransactionParticipant { // split the storage writes into multiple RecoveryUnits. This method will be invoked by a // primary such that it looks for all recovery units and aborts them. void _abortSplitPreparedTxnOnPrimary(OperationContext* opCtx, - repl::SplitPrepareSessionManager* splitPrepareManager, - const LogicalSessionId& sessionId, - const TxnNumber& txnNumber); + repl::SplitPrepareSessionManager* splitPrepareManager); // Factors out code for clarity from _abortActiveTransaction. void _finishAbortingActiveTransaction(OperationContext* opCtx, @@ -1001,6 +1029,7 @@ class TransactionParticipant { // invalidating a transaction, or starting a new transaction. It releases the Client lock // before releasing this participant's locks and aborting its storage transaction. void _resetTransactionStateAndUnlock(stdx::unique_lock* lk, + OperationContext* opCtx, TransactionState::StateFlag state); /* Releases the resources held in *o().txnResources to the operation context. @@ -1165,6 +1194,9 @@ class TransactionParticipant { // Tracks and updates transaction metrics upon the appropriate transaction event. TransactionMetricsObserver transactionMetricsObserver; + + // Contains a list of affected namespaces to be reported to transaction coordinator. + std::vector affectedNamespaces; } _o; /** @@ -1315,4 +1347,27 @@ class RetryableWriteTransactionParticipantCatalog { bool _hasSeenIncomingConflictingRetryableTransaction{false}; }; +/** + * Returns maximum number of operations to pack into a single oplog entry, + * when multi-oplog format for transactions is in use. + * + * Stop packing when either number of transaction operations is reached, or when the + * next one would make the total size of operations larger than the maximum BSON Object + * User Size. We rely on the headroom between BSONObjMaxUserSize and + * BSONObjMaxInternalSize to cover the BSON overhead and the other "applyOps" entry + * fields. But if a single operation in the set exceeds BSONObjMaxUserSize, we still fit + * it, as a single max-length operation should be able to be packed into an "applyOps" + * entry. + */ +std::size_t getMaxNumberOfTransactionOperationsInSingleOplogEntry(); + +/** + * Returns maximum size (bytes) of operations to pack into a single oplog entry, + * when multi-oplog format for transactions is in use. + * + * Refer to getMaxNumberOfTransactionOperationsInSingleOplogEntry() comments for a + * description on packing transaction operations into "applyOps" entries. + */ +std::size_t getMaxSizeOfTransactionOperationsInSingleOplogEntryBytes(); + } // namespace mongo diff --git a/src/mongo/db/transaction/transaction_participant_resource_yielder.cpp b/src/mongo/db/transaction/transaction_participant_resource_yielder.cpp index fd335bb64127f..071707f7ef8b4 100644 --- a/src/mongo/db/transaction/transaction_participant_resource_yielder.cpp +++ b/src/mongo/db/transaction/transaction_participant_resource_yielder.cpp @@ -29,6 +29,7 @@ #include "mongo/db/transaction/transaction_participant_resource_yielder.h" +#include "mongo/db/session/session.h" #include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/transaction/transaction_participant.h" diff --git a/src/mongo/db/transaction/transaction_participant_resource_yielder.h b/src/mongo/db/transaction/transaction_participant_resource_yielder.h index 346463db481c8..6b99b32060262 100644 --- a/src/mongo/db/transaction/transaction_participant_resource_yielder.h +++ b/src/mongo/db/transaction/transaction_participant_resource_yielder.h @@ -29,8 +29,15 @@ #pragma once +#include +#include + +#include + +#include "mongo/base/string_data.h" #include "mongo/db/operation_context.h" #include "mongo/db/resource_yielder.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/transaction/transaction_participant_retryable_writes_test.cpp b/src/mongo/db/transaction/transaction_participant_retryable_writes_test.cpp index c04d37bf4c4b8..cfc861447b93b 100644 --- a/src/mongo/db/transaction/transaction_participant_retryable_writes_test.cpp +++ b/src/mongo/db/transaction/transaction_participant_retryable_writes_test.cpp @@ -27,32 +27,70 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include - -#include "mongo/db/client.h" -#include "mongo/db/db_raii.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_noop.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/mock_repl_coord_server_fixture.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/session.h" +#include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/db/transaction/server_transactions_metrics.h" +#include "mongo/db/session/session_txn_record_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" +#include "mongo/db/transaction/transaction_operations.h" #include "mongo/db/transaction/transaction_participant.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/stdx/future.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/net/socket_utils.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -121,9 +159,12 @@ class OpObserverMock : public OpObserverNoop { }; void onUnpreparedTransactionCommit( - OperationContext* opCtx, const TransactionOperations& transactionOperations) override { + OperationContext* opCtx, + const std::vector& reservedSlots, + const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + OpStateAccumulator* opAccumulator = nullptr) override { ASSERT_TRUE(opCtx->lockState()->inAWriteUnitOfWork()); - OpObserverNoop::onUnpreparedTransactionCommit(opCtx, transactionOperations); uassert(ErrorCodes::OperationFailed, "onUnpreparedTransactionCommit() failed", @@ -163,12 +204,12 @@ class OpObserverMock : public OpObserverNoop { preparedTransactionCommitted = true; }; - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) override { + const CollectionDropType dropType, + bool markFromMigrate) override { // If the oplog is not disabled for this namespace, then we need to reserve an op time for // the drop. if (!repl::ReplicationCoordinator::get(opCtx)->isOplogDisabledFor(opCtx, collectionName)) { diff --git a/src/mongo/db/transaction/transaction_participant_test.cpp b/src/mongo/db/transaction/transaction_participant_test.cpp index 1b38eba07e9a9..5d8934af3f752 100644 --- a/src/mongo/db/transaction/transaction_participant_test.cpp +++ b/src/mongo/db/transaction/transaction_participant_test.cpp @@ -27,46 +27,105 @@ * it in the license file. */ +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/local_oplog_info.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" -#include "mongo/db/concurrency/replication_state_transition_lock_guard.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbhelpers.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/global_settings.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_noop.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_impl.h" #include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/mock_repl_coord_server_fixture.h" #include "mongo/db/repl/oplog.h" #include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/session/internal_session_pool.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_catalog_mongod.h" #include "mongo/db/session/session_txn_record_gen.h" -#include "mongo/db/stats/fill_locker_info.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/shard_role.h" #include "mongo/db/storage/durable_history_pin.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/storage_stats.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/server_transactions_metrics.h" #include "mongo/db/transaction/session_catalog_mongod_transaction_interface_impl.h" #include "mongo/db/transaction/transaction_participant.h" #include "mongo/db/transaction/transaction_participant_gen.h" +#include "mongo/db/transaction_resources.h" #include "mongo/db/txn_retry_counter_too_old_info.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/s/session_catalog_router.h" +#include "mongo/s/transaction_router.h" #include "mongo/stdx/future.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/net/socket_utils.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/tick_source.h" #include "mongo/util/tick_source_mock.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -108,12 +167,6 @@ repl::OplogEntry makeOplogEntry(repl::OpTime opTime, class OpObserverMock : public OpObserverNoop { public: - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) override; - void onTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, @@ -127,8 +180,12 @@ class OpObserverMock : public OpObserverNoop { std::function onTransactionPrepareFn = []() { }; - void onUnpreparedTransactionCommit(OperationContext* opCtx, - const TransactionOperations& transactionOperations) override; + void onUnpreparedTransactionCommit( + OperationContext* opCtx, + const std::vector& reservedSlots, + const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + OpStateAccumulator* opAccumulator = nullptr) override; bool onUnpreparedTransactionCommitThrowsException = false; bool unpreparedTransactionCommitted = false; std::function&)> onUnpreparedTransactionCommitFn = @@ -154,24 +211,16 @@ class OpObserverMock : public OpObserverNoop { bool onTransactionAbortThrowsException = false; bool transactionAborted = false; - using OpObserver::onDropCollection; repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) override; + CollectionDropType dropType, + bool markFromMigrate) override; const repl::OpTime dropOpTime = {Timestamp(Seconds(100), 1U), 1LL}; }; -std::unique_ptr -OpObserverMock::preTransactionPrepare(OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) { - return std::make_unique(/*prepare=*/true); -} - void OpObserverMock::onTransactionPrepare( OperationContext* opCtx, const std::vector& reservedSlots, @@ -195,15 +244,22 @@ void OpObserverMock::onTransactionPrepare( } void OpObserverMock::onUnpreparedTransactionCommit( - OperationContext* opCtx, const TransactionOperations& transactionOperations) { + OperationContext* opCtx, + const std::vector& reservedSlots, + const TransactionOperations& transactionOperations, + const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, + OpStateAccumulator* opAccumulator) { ASSERT(opCtx->lockState()->inAWriteUnitOfWork()); - OpObserverNoop::onUnpreparedTransactionCommit(opCtx, transactionOperations); - uassert(ErrorCodes::OperationFailed, "onUnpreparedTransactionCommit() failed", !onUnpreparedTransactionCommitThrowsException); + ASSERT_EQUALS(transactionOperations.numOperations() + + transactionOperations.getNumberOfPrePostImagesToWrite(), + reservedSlots.size()); + ASSERT_FALSE(applyOpsOperationAssignment.prepare); + unpreparedTransactionCommitted = true; const auto& statements = transactionOperations.getOperationsForOpObserver(); onUnpreparedTransactionCommitFn(statements); @@ -240,7 +296,8 @@ repl::OpTime OpObserverMock::onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - const CollectionDropType dropType) { + const CollectionDropType dropType, + bool markFromMigrate) { // If the oplog is not disabled for this namespace, then we need to reserve an op time for the // drop. if (!repl::ReplicationCoordinator::get(opCtx)->isOplogDisabledFor(opCtx, collectionName)) { @@ -460,7 +517,10 @@ TEST_F(TxnParticipantTest, TransactionThrowsLockTimeoutIfLockIsUnavailable) { auto txnParticipant = TransactionParticipant::get(opCtx()); txnParticipant.unstashTransactionResources(opCtx(), "insert"); - { Lock::DBLock dbXLock(opCtx(), DatabaseName(boost::none, dbName), MODE_X); } + { + Lock::DBLock dbXLock( + opCtx(), DatabaseName::createDatabaseName_forTest(boost::none, dbName), MODE_X); + } txnParticipant.stashTransactionResources(opCtx()); auto clientWithDatabaseXLock = Client::releaseCurrent(); @@ -494,15 +554,23 @@ TEST_F(TxnParticipantTest, TransactionThrowsLockTimeoutIfLockIsUnavailable) { newTxnParticipant.unstashTransactionResources(newOpCtx.get(), "insert"); Date_t t1 = Date_t::now(); - ASSERT_THROWS_CODE(Lock::DBLock(newOpCtx.get(), DatabaseName(boost::none, dbName), MODE_X), - AssertionException, - ErrorCodes::LockTimeout); + ASSERT_THROWS_CODE( + Lock::DBLock(newOpCtx.get(), + DatabaseName::createDatabaseName_forTest(boost::none, dbName), + MODE_X), + AssertionException, + ErrorCodes::LockTimeout); Date_t t2 = Date_t::now(); int defaultMaxTransactionLockRequestTimeoutMillis = 5; ASSERT_GTE(t2 - t1, Milliseconds(defaultMaxTransactionLockRequestTimeoutMillis)); // A non-conflicting lock acquisition should work just fine. - { Lock::DBLock tempLock(newOpCtx.get(), DatabaseName(boost::none, "NewTestDB"), MODE_X); } + { + Lock::DBLock tempLock( + newOpCtx.get(), + DatabaseName::createDatabaseName_forTest(boost::none, "NewTestDB"), + MODE_X); + } } // Restore the original client so that teardown works. Client::releaseCurrent(); @@ -683,7 +751,7 @@ TEST_F(TxnParticipantTest, EmptyPreparedTransactionCommit) { // The transaction machinery cannot store an empty locker. { Lock::GlobalLock lk(opCtx(), MODE_IX, Date_t::now(), Lock::InterruptBehavior::kThrow); } - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); const auto commitTS = Timestamp(prepareTimestamp.getSecs(), prepareTimestamp.getInc() + 1); txnParticipant.commitPreparedTransaction(opCtx(), commitTS, {}); txnParticipant.stashTransactionResources(opCtx()); @@ -701,7 +769,7 @@ TEST_F(TxnParticipantTest, PrepareSucceedsWithNestedLocks) { Lock::GlobalLock lk2(opCtx(), MODE_IX, Date_t::now(), Lock::InterruptBehavior::kThrow); } - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); const auto commitTS = Timestamp(prepareTimestamp.getSecs(), prepareTimestamp.getInc() + 1); txnParticipant.commitPreparedTransaction(opCtx(), commitTS, {}); txnParticipant.stashTransactionResources(opCtx()); @@ -711,7 +779,7 @@ TEST_F(TxnParticipantTest, PrepareSucceedsWithNestedLocks) { TEST_F(TxnParticipantTest, PrepareFailsOnTemporaryCollection) { NamespaceString tempCollNss = - NamespaceString::createNamespaceString_forTest(kNss.db(), "tempCollection"); + NamespaceString::createNamespaceString_forTest(kNss.db_forTest(), "tempCollection"); UUID tempCollUUID = UUID::gen(); // Create a temporary collection so that we can write to it. @@ -751,7 +819,8 @@ TEST_F(TxnParticipantTest, CommitTransactionSetsCommitTimestampOnPreparedTransac // The transaction machinery cannot store an empty locker. { Lock::GlobalLock lk(opCtx(), MODE_IX, Date_t::now(), Lock::InterruptBehavior::kThrow); } - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto prepareResponse = txnParticipant.prepareTransaction(opCtx(), {}); + const auto& prepareTimestamp = prepareResponse.first; const auto commitTS = Timestamp(prepareTimestamp.getSecs(), prepareTimestamp.getInc() + 1); auto originalFn = _opObserver->onPreparedTransactionCommitFn; @@ -852,7 +921,8 @@ TEST_F(TxnParticipantTest, // The transaction machinery cannot store an empty locker. { Lock::GlobalLock lk(opCtx(), MODE_IX, Date_t::now(), Lock::InterruptBehavior::kThrow); } - auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto prepareResponse = txnParticipant.prepareTransaction(opCtx(), {}); + const auto& prepareTimestamp = prepareResponse.first; ASSERT_THROWS_CODE(txnParticipant.commitPreparedTransaction( opCtx(), Timestamp(prepareTimestamp.getSecs() - 1, 1), {}), AssertionException, @@ -893,7 +963,8 @@ TEST_F(TxnParticipantTest, KillOpBeforeCommittingPreparedTransaction) { // Prepare the transaction. txnParticipant.unstashTransactionResources(opCtx(), "prepareTransaction"); - auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto prepareResponse = txnParticipant.prepareTransaction(opCtx(), {}); + const auto& prepareTimestamp = prepareResponse.first; opCtx()->markKilled(ErrorCodes::Interrupted); try { // The commit should throw, since the operation was killed. @@ -938,7 +1009,8 @@ TEST_F(TxnParticipantTest, KillOpBeforeAbortingPreparedTransaction) { // Prepare the transaction. txnParticipant.unstashTransactionResources(opCtx(), "prepareTransaction"); - auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto prepareResponse = txnParticipant.prepareTransaction(opCtx(), {}); + const auto& prepareTimestamp = prepareResponse.first; opCtx()->markKilled(ErrorCodes::Interrupted); try { // The abort should throw, since the operation was killed. @@ -1031,12 +1103,13 @@ TEST_F(TxnParticipantTest, UnstashFailsShouldLeaveTxnResourceStashUnchanged) { // Simulate the locking of an insert. { - Lock::DBLock dbLock(opCtx(), DatabaseName(boost::none, "test"), MODE_IX); + Lock::DBLock dbLock( + opCtx(), DatabaseName::createDatabaseName_forTest(boost::none, "test"), MODE_IX); Lock::CollectionLock collLock( opCtx(), NamespaceString::createNamespaceString_forTest("test.foo"), MODE_IX); } - auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); // Simulate a secondary style lock stashing such that the locks are yielded. { @@ -1093,7 +1166,7 @@ TEST_F(TxnParticipantTest, StepDownAfterPrepareDoesNotBlockThenCommit) { auto txnParticipant = TransactionParticipant::get(opCtx()); txnParticipant.unstashTransactionResources(opCtx(), "prepareTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); // Test that we can acquire the RSTL in mode X, and then immediately release it so the test can // complete successfully. @@ -1166,7 +1239,7 @@ TEST_F(TxnParticipantTest, StepDownDuringPreparedCommitFails) { auto txnParticipant = TransactionParticipant::get(opCtx()); txnParticipant.unstashTransactionResources(opCtx(), "prepareTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); const auto commitTS = Timestamp(prepareTimestamp.getSecs(), prepareTimestamp.getInc() + 1); ASSERT_OK(repl::ReplicationCoordinator::get(opCtx())->setFollowerMode( @@ -1188,7 +1261,8 @@ TEST_F(TxnParticipantTest, StepDownDuringPreparedAbortReleasesRSTL) { // Simulate the locking of an insert. { - Lock::DBLock dbLock(opCtx(), DatabaseName(boost::none, "test"), MODE_IX); + Lock::DBLock dbLock( + opCtx(), DatabaseName::createDatabaseName_forTest(boost::none, "test"), MODE_IX); Lock::CollectionLock collLock( opCtx(), NamespaceString::createNamespaceString_forTest("test.foo"), MODE_IX); } @@ -1242,7 +1316,8 @@ TEST_F(TxnParticipantTest, StepDownDuringPreparedCommitReleasesRSTL) { // Simulate the locking of an insert. { - Lock::DBLock dbLock(opCtx(), DatabaseName(boost::none, "test"), MODE_IX); + Lock::DBLock dbLock( + opCtx(), DatabaseName::createDatabaseName_forTest(boost::none, "test"), MODE_IX); Lock::CollectionLock collLock( opCtx(), NamespaceString::createNamespaceString_forTest("test.foo"), MODE_IX); } @@ -1255,7 +1330,8 @@ TEST_F(TxnParticipantTest, StepDownDuringPreparedCommitReleasesRSTL) { txnParticipant.unstashTransactionResources(opCtx(), "prepareTransaction"); ASSERT_EQ(opCtx()->lockState()->getLockMode(resourceIdReplicationStateTransitionLock), MODE_IX); - auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto prepareResponse = txnParticipant.prepareTransaction(opCtx(), {}); + const auto& prepareTimestamp = prepareResponse.first; ASSERT_EQ(opCtx()->lockState()->getLockMode(resourceIdReplicationStateTransitionLock), MODE_NONE); txnParticipant.stashTransactionResources(opCtx()); @@ -1430,7 +1506,7 @@ TEST_F(TxnParticipantTest, CannotStartNewTransactionWhilePreparedTransactionInPr }; // Check that prepareTimestamp gets set. - auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); ASSERT_EQ(ruPrepareTimestamp, prepareTimestamp); txnParticipant.stashTransactionResources(opCtx()); @@ -1621,6 +1697,46 @@ TEST_F(TxnParticipantTest, CorrectlyStashAPIParameters) { ASSERT_TRUE(*storedAPIParams.getAPIDeprecationErrors()); } +TEST_F(TxnParticipantTest, PrepareReturnsAListOfAffectedNamespaces) { + const std::vector kNamespaces = { + NamespaceString::createNamespaceString_forTest("TestDB1", "TestColl1"), + NamespaceString::createNamespaceString_forTest("TestDB1", "TestColl2"), + NamespaceString::createNamespaceString_forTest("TestDB2", "TestColl1")}; + + std::vector uuids; + uuids.reserve(kNamespaces.size()); + + // Create collections + for (const auto& nss : kNamespaces) { + AutoGetDb autoDb(opCtx(), nss.dbName(), MODE_X); + auto db = autoDb.ensureDbExists(opCtx()); + ASSERT_TRUE(db); + + WriteUnitOfWork wuow(opCtx()); + CollectionOptions options; + auto collection = db->createCollection(opCtx(), nss, options); + wuow.commit(); + uuids.push_back(collection->uuid()); + } + + auto sessionCheckout = checkOutSession(); + + auto txnParticipant = TransactionParticipant::get(opCtx()); + ASSERT(txnParticipant.transactionIsOpen()); + + txnParticipant.unstashTransactionResources(opCtx(), "insert"); + for (size_t collIndex = 0; collIndex < kNamespaces.size(); ++collIndex) { + auto operation = repl::DurableOplogEntry::makeInsertOperation( + kNamespaces[collIndex], uuids[collIndex], BSON("_id" << 0), BSON("_id" << 0)); + txnParticipant.addTransactionOperation(opCtx(), operation); + } + auto [timestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); + ASSERT_EQ(namespaces, txnParticipant.affectedNamespaces()); + + std::sort(namespaces.begin(), namespaces.end()); + ASSERT_EQ(namespaces, kNamespaces); +} + /** * Test fixture for testing behavior that depends on a server's cluster role. * @@ -1813,7 +1929,7 @@ class ConfigTxnParticipantTest : public ShardedClusterParticipantTest { protected: void setUp() final { TxnParticipantTest::setUp(); - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; } void tearDown() final { @@ -1912,7 +2028,8 @@ TEST_F(TxnParticipantTest, ReacquireLocksForPreparedTransactionsOnStepUp) { txnParticipant.unstashTransactionResources(opCtx(), "prepareTransaction"); // Simulate the locking of an insert. { - Lock::DBLock dbLock(opCtx(), DatabaseName(boost::none, "test"), MODE_IX); + Lock::DBLock dbLock( + opCtx(), DatabaseName::createDatabaseName_forTest(boost::none, "test"), MODE_IX); Lock::CollectionLock collLock( opCtx(), NamespaceString::createNamespaceString_forTest("test.foo"), MODE_IX); } @@ -2003,7 +2120,7 @@ TEST_F(TransactionsMetricsTest, IncrementTotalPreparedThenCommitted) { auto txnParticipant = TransactionParticipant::get(opCtx()); txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); unsigned long long beforePreparedThenCommittedCount = ServerTransactionsMetrics::get(opCtx())->getTotalPreparedThenCommitted(); @@ -2053,7 +2170,7 @@ TEST_F(TransactionsMetricsTest, IncrementCurrentPreparedWithCommit) { auto txnParticipant = TransactionParticipant::get(opCtx()); txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); ASSERT_EQ(ServerTransactionsMetrics::get(opCtx())->getCurrentPrepared(), beforeCurrentPrepared + 1U); @@ -2277,7 +2394,7 @@ TEST_F(TransactionsMetricsTest, TrackCurrentActiveAndInactivePreparedTransaction auto txnParticipant = TransactionParticipant::get(opCtx()); txnParticipant.unstashTransactionResources(opCtx(), "prepareTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); ASSERT_EQ(ServerTransactionsMetrics::get(opCtx())->getCurrentActive(), beforeActivePreparedCounter + 1U); @@ -2417,7 +2534,7 @@ TEST_F(TransactionsMetricsTest, SingleTransactionStatsPreparedDurationShouldBeSe tickSource->advance(Microseconds(10)); // Prepare the transaction and extend the duration in the prepared state. - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); tickSource->advance(Microseconds(100)); @@ -2505,7 +2622,7 @@ TEST_F(TransactionsMetricsTest, { Lock::GlobalLock lk(opCtx(), MODE_IX, Date_t::now(), Lock::InterruptBehavior::kThrow); } // Prepare the transaction and extend the duration in the prepared state. - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); tickSource->advance(Microseconds(100)); // The prepared transaction's duration should have increased. @@ -3028,7 +3145,7 @@ TEST_F(TransactionsMetricsTest, ReportStashedResources) { ASSERT(opCtx()->lockState()->isLocked()); // Prepare the transaction and extend the duration in the prepared state. - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); const long preparedDuration = 10; tickSource->advance(Microseconds(preparedDuration)); @@ -3724,7 +3841,7 @@ TEST_F(TransactionsMetricsTest, TestPreparedTransactionInfoForLogAfterCommit) { // Prepare the transaction and extend the duration in the prepared state. auto txnParticipant = TransactionParticipant::get(opCtx()); txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); tickSource->advance(Microseconds(10)); @@ -3968,7 +4085,7 @@ TEST_F(TransactionsMetricsTest, LogPreparedTransactionInfoAfterSlowCommit) { tickSource->advance(Microseconds(11 * 1000)); txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); startCapturingLogMessages(); txnParticipant.commitPreparedTransaction(opCtx(), prepareTimestamp, {}); @@ -4332,7 +4449,7 @@ TEST_F(TxnParticipantTest, RollbackResetsInMemoryStateOfPreparedTransaction) { ASSERT_BSONOBJ_EQ(operation.toBSON(), txnParticipant.getTransactionOperationsForTest()[0].toBSON()); - auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); ASSERT_FALSE(txnParticipant.transactionIsAborted()); @@ -4365,7 +4482,8 @@ TEST_F(TxnParticipantTest, PrepareTransactionAsSecondarySetsThePrepareOpTime) { auto txnParticipant = TransactionParticipant::get(opCtx()); txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), prepareOpTime); + const auto [prepareTimestamp, namespaces] = + txnParticipant.prepareTransaction(opCtx(), prepareOpTime); ASSERT(txnParticipant.transactionIsPrepared()); ASSERT_EQ(prepareTimestamp, prepareOpTime.getTimestamp()); ASSERT_EQ(txnParticipant.getPrepareOpTime(), prepareOpTime); @@ -4390,7 +4508,8 @@ TEST_F(TxnParticipantTest, CommitPreparedTransactionAsSecondarySetsTheFinishOpTi ASSERT(!opCtx()->writesAreReplicated()); txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), prepareOpTime); + const auto [prepareTimestamp, namespaces] = + txnParticipant.prepareTransaction(opCtx(), prepareOpTime); ASSERT(txnParticipant.transactionIsPrepared()); ASSERT_EQ(prepareTimestamp, prepareOpTime.getTimestamp()); ASSERT_EQ(txnParticipant.getPrepareOpTime(), prepareOpTime); @@ -4414,7 +4533,8 @@ DEATH_TEST_F(TxnParticipantTest, ASSERT(!opCtx()->writesAreReplicated()); txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), prepareOpTime); + const auto [prepareTimestamp, namespaces] = + txnParticipant.prepareTransaction(opCtx(), prepareOpTime); ASSERT(txnParticipant.transactionIsPrepared()); ASSERT_EQ(prepareTimestamp, prepareOpTime.getTimestamp()); ASSERT_EQ(txnParticipant.getPrepareOpTime(), prepareOpTime); @@ -4432,7 +4552,8 @@ DEATH_TEST_F(TxnParticipantTest, auto txnParticipant = TransactionParticipant::get(opCtx()); txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), prepareOpTime); + const auto [prepareTimestamp, namespaces] = + txnParticipant.prepareTransaction(opCtx(), prepareOpTime); ASSERT(txnParticipant.transactionIsPrepared()); ASSERT_EQ(prepareTimestamp, prepareOpTime.getTimestamp()); ASSERT_EQ(txnParticipant.getPrepareOpTime(), prepareOpTime); @@ -4657,7 +4778,8 @@ TEST_F(TxnParticipantTest, ExitPreparePromiseIsFulfilledOnCommitAfterPrepare) { ASSERT_TRUE(txnParticipant.onExitPrepare().isReady()); const auto prepareOpTime = repl::OpTime({3, 2}, 0); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), prepareOpTime); + const auto [prepareTimestamp, namespaces] = + txnParticipant.prepareTransaction(opCtx(), prepareOpTime); const auto exitPrepareFuture = txnParticipant.onExitPrepare(); ASSERT_FALSE(exitPrepareFuture.isReady()); @@ -4807,7 +4929,7 @@ TEST_F(ShardTxnParticipantTest, auto txnParticipant = TransactionParticipant::get(opCtx()); ASSERT(txnParticipant.transactionIsInProgress()); txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); const auto commitTS = Timestamp(prepareTimestamp.getSecs(), prepareTimestamp.getInc() + 1); txnParticipant.commitPreparedTransaction(opCtx(), commitTS, {}); ASSERT_TRUE(txnParticipant.transactionIsCommitted()); @@ -5502,7 +5624,7 @@ TEST_F(ShardTxnParticipantTest, CanRetryCommittedPreparedTransactionForRetryable auto txnParticipant = TransactionParticipant::get(opCtx()); ASSERT(txnParticipant.transactionIsInProgress()); txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); const auto commitTS = Timestamp(prepareTimestamp.getSecs(), prepareTimestamp.getInc() + 1); txnParticipant.commitPreparedTransaction(opCtx(), commitTS, {}); ASSERT_TRUE(txnParticipant.transactionIsCommitted()); @@ -5533,7 +5655,7 @@ TEST_F(ShardTxnParticipantTest, AbortingCommittedPreparedTransactionForRetryable auto txnParticipant = TransactionParticipant::get(opCtx()); ASSERT(txnParticipant.transactionIsInProgress()); txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); const auto commitTS = Timestamp(prepareTimestamp.getSecs(), prepareTimestamp.getInc() + 1); txnParticipant.commitPreparedTransaction(opCtx(), commitTS, {}); ASSERT_TRUE(txnParticipant.transactionIsCommitted()); @@ -5567,7 +5689,7 @@ TEST_F(ShardTxnParticipantTest, CannotAddOperationToCommittedPreparedTransaction auto txnParticipant = TransactionParticipant::get(opCtx()); ASSERT(txnParticipant.transactionIsInProgress()); txnParticipant.unstashTransactionResources(opCtx(), "commitTransaction"); - const auto prepareTimestamp = txnParticipant.prepareTransaction(opCtx(), {}); + const auto [prepareTimestamp, namespaces] = txnParticipant.prepareTransaction(opCtx(), {}); const auto commitTS = Timestamp(prepareTimestamp.getSecs(), prepareTimestamp.getInc() + 1); txnParticipant.commitPreparedTransaction(opCtx(), commitTS, {}); ASSERT_TRUE(txnParticipant.transactionIsCommitted()); @@ -6211,9 +6333,11 @@ TEST_F(TxnParticipantTest, CommitSplitPreparedTransaction) { // Update `2` to increment its `value` to 2. This must be done in the same split session as the // insert. callUnderSplitSession(splitSessions[1].session, [nullOpDbg](OperationContext* opCtx) { - AutoGetCollection userColl(opCtx, kNss, LockMode::MODE_IX); - Helpers::update( - opCtx, userColl->ns(), BSON("_id" << 2), BSON("$inc" << BSON("value" << 1))); + auto userColl = acquireCollection( + opCtx, + CollectionAcquisitionRequest::fromOpCtx(opCtx, kNss, AcquisitionPrerequisites::kWrite), + MODE_IX); + Helpers::update(opCtx, userColl, BSON("_id" << 2), BSON("$inc" << BSON("value" << 1))); }); // Mimic the methods to call for a secondary performing a split prepare. Those are called inside @@ -6789,5 +6913,123 @@ TEST_F(ShardTxnParticipantTest, CheckingOutEagerlyReapedSessionDoesNotCrash) { ErrorCodes::TransactionTooOld); } +class TxnParticipantAndTxnRouterTest : public TxnParticipantTest { +protected: + bool doesExistInCatalog(const LogicalSessionId& lsid, SessionCatalog* sessionCatalog) { + bool existsInCatalog{false}; + sessionCatalog->scanSession( + lsid, [&](const ObservableSession& session) { existsInCatalog = true; }); + return existsInCatalog; + } + + void runRouterTransactionLeaveOpen(LogicalSessionId lsid, TxnNumber txnNumber) { + runFunctionFromDifferentOpCtx([&](OperationContext* opCtx) { + opCtx->setLogicalSessionId(lsid); + opCtx->setTxnNumber(txnNumber); + opCtx->setInMultiDocumentTransaction(); + auto opCtxSession = std::make_unique(opCtx); + + auto txnRouter = TransactionRouter::get(opCtx); + txnRouter.beginOrContinueTxn( + opCtx, *opCtx->getTxnNumber(), TransactionRouter::TransactionActions::kStart); + }); + } + + void runParticipantTransactionLeaveOpen(LogicalSessionId lsid, TxnNumber txnNumber) { + runFunctionFromDifferentOpCtx([&](OperationContext* opCtx) { + opCtx->setLogicalSessionId(lsid); + opCtx->setTxnNumber(txnNumber); + opCtx->setInMultiDocumentTransaction(); + auto opCtxSession = MongoDSessionCatalog::get(opCtx)->checkOutSession(opCtx); + + auto txnParticipant = TransactionParticipant::get(opCtx); + txnParticipant.beginOrContinue(opCtx, + {*opCtx->getTxnNumber()}, + false /* autocommit */, + true /* startTransaction */); + }); + } +}; + +TEST_F(TxnParticipantAndTxnRouterTest, SkipEagerReapingSessionUsedByParticipantFromRouter) { + auto sessionCatalog = SessionCatalog::get(getServiceContext()); + ASSERT_EQ(sessionCatalog->size(), 0); + + // Add a parent session with two retryable children. + + auto parentLsid = makeLogicalSessionIdForTest(); + auto parentTxnNumber = 0; + runRouterTransactionLeaveOpen(parentLsid, parentTxnNumber); + + auto retryableChildLsid = + makeLogicalSessionIdWithTxnNumberAndUUIDForTest(parentLsid, parentTxnNumber); + runRouterTransactionLeaveOpen(retryableChildLsid, 0); + + auto retryableChildLsidReapable = + makeLogicalSessionIdWithTxnNumberAndUUIDForTest(parentLsid, parentTxnNumber); + runRouterTransactionLeaveOpen(retryableChildLsidReapable, 0); + + ASSERT_EQ(sessionCatalog->size(), 1); + ASSERT(doesExistInCatalog(parentLsid, sessionCatalog)); + ASSERT(doesExistInCatalog(retryableChildLsid, sessionCatalog)); + ASSERT(doesExistInCatalog(retryableChildLsidReapable, sessionCatalog)); + + // Use one retryable session with a TransactionParticipant and verify this blocks the router + // role from reaping it. + + runParticipantTransactionLeaveOpen(retryableChildLsid, 0); + + // Start a higher txnNumber client transaction in the router role and verify the child used with + // TransactionParticipant was not erased but the other one was. + + parentTxnNumber++; + runRouterTransactionLeaveOpen(parentLsid, parentTxnNumber); + + ASSERT_EQ(sessionCatalog->size(), 1); + ASSERT(doesExistInCatalog(parentLsid, sessionCatalog)); + ASSERT(doesExistInCatalog(retryableChildLsid, sessionCatalog)); + ASSERT_FALSE(doesExistInCatalog(retryableChildLsidReapable, sessionCatalog)); + + // Verify the participant role can reap the child. + + auto higherRetryableChildLsid = + makeLogicalSessionIdWithTxnNumberAndUUIDForTest(parentLsid, parentTxnNumber); + runParticipantTransactionLeaveOpen(higherRetryableChildLsid, 5); + + ASSERT_EQ(sessionCatalog->size(), 1); + ASSERT(doesExistInCatalog(parentLsid, sessionCatalog)); + ASSERT(doesExistInCatalog(higherRetryableChildLsid, sessionCatalog)); + ASSERT_FALSE(doesExistInCatalog(retryableChildLsid, sessionCatalog)); + ASSERT_FALSE(doesExistInCatalog(retryableChildLsidReapable, sessionCatalog)); + + // Sanity check that higher txnNumbers are reaped correctly and eager reaping only applies to + // parent and children sessions in the same "family." + + auto parentLsid2 = makeLogicalSessionIdForTest(); + auto parentTxnNumber2 = parentTxnNumber + 11; + runParticipantTransactionLeaveOpen(parentLsid2, parentTxnNumber2); + + auto retryableChildLsid2 = + makeLogicalSessionIdWithTxnNumberAndUUIDForTest(parentLsid2, parentTxnNumber2); + runRouterTransactionLeaveOpen(retryableChildLsid2, 12131); + + ASSERT_EQ(sessionCatalog->size(), 2); + ASSERT(doesExistInCatalog(parentLsid, sessionCatalog)); + ASSERT(doesExistInCatalog(higherRetryableChildLsid, sessionCatalog)); + ASSERT(doesExistInCatalog(parentLsid2, sessionCatalog)); + ASSERT(doesExistInCatalog(retryableChildLsid2, sessionCatalog)); + + parentTxnNumber2++; + runParticipantTransactionLeaveOpen(parentLsid2, parentTxnNumber2); + + // The unrelated sessions still exist and the superseded child was reaped. + ASSERT_EQ(sessionCatalog->size(), 2); + ASSERT(doesExistInCatalog(parentLsid, sessionCatalog)); + ASSERT(doesExistInCatalog(higherRetryableChildLsid, sessionCatalog)); + ASSERT(doesExistInCatalog(parentLsid2, sessionCatalog)); + ASSERT_FALSE(doesExistInCatalog(retryableChildLsid2, sessionCatalog)); + ASSERT_FALSE(doesExistInCatalog(retryableChildLsid2, sessionCatalog)); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/transaction_resources.cpp b/src/mongo/db/transaction_resources.cpp index 6e0c95b9eae51..0c243fcda5b30 100644 --- a/src/mongo/db/transaction_resources.cpp +++ b/src/mongo/db/transaction_resources.cpp @@ -29,17 +29,118 @@ #include "mongo/db/transaction_resources.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/db/logical_time.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" + namespace mongo { + +const PlacementConcern AcquisitionPrerequisites::kPretendUnsharded = + PlacementConcern{boost::none, boost::none}; + namespace shard_role_details { +namespace { -TransactionResources::TransactionResources(repl::ReadConcernArgs readConcern) - : readConcern(std::move(readConcern)) {} +auto getTransactionResources = OperationContext::declareDecoration< + std::unique_ptr>(); + +/** + * This method ensures that two read concerns are equivalent for the purposes of acquiring a + * transactional snapshot. Equivalence means that they don't acquire snapshot at conflicting levels, + * such as one operation asking for local and a subsequent one for majority. Similarly, we can't + * have two subsequent acquisitions asking for snapshots at two different timestamps. + */ +void assertReadConcernsAreEquivalent(const repl::ReadConcernArgs& rc1, + const repl::ReadConcernArgs& rc2) { + tassert(771230, + str::stream() << "Acquired two different collections on the same transaction with " + "read concerns that are not equivalent (" + << rc1.toString() << " != " << rc2.toString() << ")", + rc1.getLevel() == rc2.getLevel() && + rc1.getArgsAtClusterTime() == rc2.getArgsAtClusterTime()); +} + +} // namespace + +TransactionResources::TransactionResources() = default; + +TransactionResources::~TransactionResources() { + invariant(!locker); + invariant(acquiredCollections.empty()); + invariant(acquiredViews.empty()); + invariant(collectionAcquisitionReferences == 0); + invariant(viewAcquisitionReferences == 0); + invariant(!yielded); +} + +TransactionResources& TransactionResources::get(OperationContext* opCtx) { + auto& transactionResources = getTransactionResources(opCtx); + invariant(transactionResources, + "Cannot obtain TransactionResources as they've been detached from the opCtx in order " + "to yield"); + return *transactionResources; +} + +std::unique_ptr TransactionResources::detachFromOpCtx( + OperationContext* opCtx) { + auto& transactionResources = getTransactionResources(opCtx); + invariant(transactionResources); + return std::move(transactionResources); +} + +void TransactionResources::attachToOpCtx( + OperationContext* opCtx, std::unique_ptr newTransactionResources) { + auto& transactionResources = getTransactionResources(opCtx); + invariant(!transactionResources); + transactionResources = std::move(newTransactionResources); +} + +AcquiredCollection& TransactionResources::addAcquiredCollection( + AcquiredCollection&& acquiredCollection) { + if (!readConcern) { + readConcern = acquiredCollection.prerequisites.readConcern; + } + + invariant(state != State::FAILED, "Cannot make a new acquisition in the FAILED state"); + invariant(state != State::YIELDED, "Cannot make a new acquisition in the YIELDED state"); + + assertReadConcernsAreEquivalent(*readConcern, acquiredCollection.prerequisites.readConcern); + + if (state == State::EMPTY) { + state = State::ACTIVE; + } + + return acquiredCollections.emplace_back(std::move(acquiredCollection)); +} + +const AcquiredView& TransactionResources::addAcquiredView(AcquiredView&& acquiredView) { + invariant(state != State::FAILED, "Cannot make a new acquisition in the FAILED state"); + invariant(state != State::YIELDED, "Cannot make a new acquisition in the YIELDED state"); + + if (state == State::EMPTY) { + state = State::ACTIVE; + } + + return acquiredViews.emplace_back(std::move(acquiredView)); +} void TransactionResources::releaseAllResourcesOnCommitOrAbort() noexcept { + readConcern.reset(); locker.reset(); - lockSnapshot.reset(); acquiredCollections.clear(); acquiredViews.clear(); + yielded.reset(); } void TransactionResources::assertNoAcquiredCollections() const { @@ -48,17 +149,10 @@ void TransactionResources::assertNoAcquiredCollections() const { std::stringstream ss("Found acquired collections:"); for (const auto& acquisition : acquiredCollections) { - ss << "\n" << acquisition.prerequisites.nss; + ss << "\n" << acquisition.prerequisites.nss.toStringForErrorMsg(); } fassertFailedWithStatus(737660, Status{ErrorCodes::InternalError, ss.str()}); } -TransactionResources::~TransactionResources() { - invariant(!locker); - invariant(!lockSnapshot); - invariant(acquiredCollections.empty()); - invariant(acquiredViews.empty()); -} - } // namespace shard_role_details } // namespace mongo diff --git a/src/mongo/db/transaction_resources.h b/src/mongo/db/transaction_resources.h index a0a03c8799c4f..373a4055112db 100644 --- a/src/mongo/db/transaction_resources.h +++ b/src/mongo/db/transaction_resources.h @@ -29,15 +29,25 @@ #pragma once +#include +#include +#include #include +#include +#include +#include #include "mongo/db/catalog/collection.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/locker.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" #include "mongo/db/s/scoped_collection_metadata.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/views/view.h" +#include "mongo/s/database_version.h" #include "mongo/s/shard_version.h" #include "mongo/util/uuid.h" @@ -49,6 +59,11 @@ struct PlacementConcern { }; struct AcquisitionPrerequisites { + // Pretends that the collection is unsharded. Acquisitions with this PlacementConcern will have + // always have UNSHARDED description and filter, even if they are sharded. Only for use in + // internal code paths that require it. Possible data loss if used incorrectly! + static const PlacementConcern kPretendUnsharded; + enum PlacementConcernPlaceholder { /** * Special PlacementConcern which mimics direct connection to a shard, causing the @@ -71,11 +86,13 @@ struct AcquisitionPrerequisites { AcquisitionPrerequisites(NamespaceString nss, boost::optional uuid, + repl::ReadConcernArgs readConcern, PlacementConcernVariant placementConcern, OperationType operationType, ViewMode viewMode) : nss(std::move(nss)), uuid(std::move(uuid)), + readConcern(std::move(readConcern)), placementConcern(std::move(placementConcern)), operationType(operationType), viewMode(viewMode) {} @@ -83,6 +100,7 @@ struct AcquisitionPrerequisites { NamespaceString nss; boost::optional uuid; + repl::ReadConcernArgs readConcern; PlacementConcernVariant placementConcern; OperationType operationType; ViewMode viewMode; @@ -91,15 +109,57 @@ struct AcquisitionPrerequisites { namespace shard_role_details { struct AcquiredCollection { + AcquiredCollection(AcquisitionPrerequisites prerequisites, + std::shared_ptr dbLock, + boost::optional collectionLock, + boost::optional lockFreeReadsBlock, + boost::optional globalLock, + boost::optional collectionDescription, + boost::optional ownershipFilter, + CollectionPtr collectionPtr) + : prerequisites(std::move(prerequisites)), + dbLock(std::move(dbLock)), + collectionLock(std::move(collectionLock)), + lockFreeReadsBlock(std::move(lockFreeReadsBlock)), + globalLock(std::move(globalLock)), + collectionDescription(std::move(collectionDescription)), + ownershipFilter(std::move(ownershipFilter)), + collectionPtr(std::move(collectionPtr)), + invalidated(false) {} + + AcquiredCollection(AcquisitionPrerequisites prerequisites, + std::shared_ptr dbLock, + boost::optional collectionLock, + CollectionPtr collectionPtr) + : AcquiredCollection(std::move(prerequisites), + std::move(dbLock), + std::move(collectionLock), + boost::none, + boost::none, + boost::none, + boost::none, + std::move(collectionPtr)){}; + AcquisitionPrerequisites prerequisites; std::shared_ptr dbLock; boost::optional collectionLock; + boost::optional lockFreeReadsBlock; + boost::optional globalLock; + boost::optional collectionDescription; boost::optional ownershipFilter; CollectionPtr collectionPtr; + + // Indicates whether this acquisition has been invalidated after a ScopedLocalCatalogWriteFence + // was unable to restore it on rollback. + bool invalidated; + + // Maintains a reference count to how many references there are to this acquisition by the + // CollectionAcquisition class. + mutable int64_t refCount = 0; }; struct AcquiredView { @@ -109,6 +169,10 @@ struct AcquiredView { boost::optional collectionLock; std::shared_ptr viewDefinition; + + // Maintains a reference count to how many references there are to this acquisition by the + // ViewAcquisition class. + mutable int64_t refCount = 0; }; /** @@ -148,7 +212,7 @@ struct AcquiredView { * the read concern of the operation). */ struct TransactionResources { - TransactionResources(repl::ReadConcernArgs readConcern); + TransactionResources(); TransactionResources(TransactionResources&&) = delete; TransactionResources& operator=(TransactionResources&&) = delete; @@ -158,15 +222,14 @@ struct TransactionResources { ~TransactionResources(); - AcquiredCollection& addAcquiredCollection(AcquiredCollection&& acquiredCollection) { - return acquiredCollections.emplace_back(std::move(acquiredCollection)); - } + static TransactionResources& get(OperationContext* opCtx); - void releaseCollection(UUID uuid); + static std::unique_ptr detachFromOpCtx(OperationContext* opCtx); + static void attachToOpCtx(OperationContext* opCtx, + std::unique_ptr transactionResources); - const AcquiredView& addAcquiredView(AcquiredView&& acquiredView) { - return acquiredViews.emplace_back(std::move(acquiredView)); - } + AcquiredCollection& addAcquiredCollection(AcquiredCollection&& acquiredCollection); + const AcquiredView& addAcquiredView(AcquiredView&& acquiredView); void releaseAllResourcesOnCommitOrAbort() noexcept; @@ -177,37 +240,55 @@ struct TransactionResources { */ void assertNoAcquiredCollections() const; - // The read concern with which the whole operation started. Remains the same for the duration of - // the entire operation. - repl::ReadConcernArgs readConcern; + /** + * Transaction resources can only be in one of 4 states: + * - EMPTY: This state is equivalent to a brand new constructed transaction resources which have + * never received an acquisition. + * - ACTIVE: There is at least one acquisition in use and the resources have not been yielded. + * - YIELDED: The resources are either yielded or in the process of reacquisition after a yield. + * - FAILED: The reacquisition after a yield failed, we cannot perform any new acquisitions and + * the operation must release all acquisitions. The operation must effectively cancel the + * current operation. + * + * The set of valid transitions are: + * - EMPTY <-> ACTIVE <-> YIELDED + * - YIELDED -> FAILED -> EMPTY + */ + enum class State { EMPTY, ACTIVE, YIELDED, FAILED }; - // Indicates whether yield has been performed on these resources - bool yielded{false}; + State state{State::EMPTY}; //////////////////////////////////////////////////////////////////////////////////////// // Global resources (cover all collections for the operation) + // The read concern with which the transaction runs. All acquisitions must match that read + // concern. + boost::optional readConcern; + // Set of locks acquired by the operation or nullptr if yielded. std::unique_ptr locker; - // If '_locker' has been yielded, contains a snapshot of the locks which have been yielded. - // Otherwise boost::none. - boost::optional lockSnapshot; - - // The storage engine snapshot associated with this transaction (when yielded). - struct YieldedRecoveryUnit { - std::unique_ptr recoveryUnit; - WriteUnitOfWork::RecoveryUnitState recoveryUnitState; - }; - - boost::optional yieldRecoveryUnit; - //////////////////////////////////////////////////////////////////////////////////////// // Per-collection resources // Set of all collections which are currently acquired std::list acquiredCollections; std::list acquiredViews; + + // Reference counters used for controlling how many references there are to the + // TransactionResources object. + int64_t collectionAcquisitionReferences = 0; + int64_t viewAcquisitionReferences = 0; + + //////////////////////////////////////////////////////////////////////////////////////// + // Yield/restore logic + + // If this value is set, indicates that yield has been performed on the owning + // TransactionResources resources and the yielded state is contained in the structure below. + struct YieldedStateHolder { + Locker::LockSnapshot yieldedLocker; + }; + boost::optional yielded; }; } // namespace shard_role_details diff --git a/src/mongo/db/transaction_resources_init_mongod.cpp b/src/mongo/db/transaction_resources_init_mongod.cpp new file mode 100644 index 0000000000000..0cd2ba4f6b3cb --- /dev/null +++ b/src/mongo/db/transaction_resources_init_mongod.cpp @@ -0,0 +1,87 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include + +#include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/concurrency/locker_impl.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" + +namespace mongo { +namespace { + +class TransactionResourcesMongoDClientObserver : public ServiceContext::ClientObserver { +public: + TransactionResourcesMongoDClientObserver() = default; + ~TransactionResourcesMongoDClientObserver() = default; + + void onCreateClient(Client* client) final {} + + void onDestroyClient(Client* client) final {} + + void onCreateOperationContext(OperationContext* opCtx) final { + auto service = opCtx->getServiceContext(); + + shard_role_details::TransactionResources::attachToOpCtx( + opCtx, std::make_unique()); + opCtx->setLockState(std::make_unique(service)); + + // There are a few cases where we don't have a storage engine available yet when creating an + // operation context. + // 1. During startup, we create an operation context to allow the storage engine + // initialization code to make use of the lock manager. + // 2. There are unit tests that create an operation context before initializing the storage + // engine. + // 3. Unit tests that use an operation context but don't require a storage engine for their + // testing purpose. + auto storageEngine = service->getStorageEngine(); + if (storageEngine) { + opCtx->setRecoveryUnit(std::unique_ptr(storageEngine->newRecoveryUnit()), + WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); + } + } + + void onDestroyOperationContext(OperationContext* opCtx) final {} +}; + +ServiceContext::ConstructorActionRegisterer transactionResourcesConstructor{ + "TransactionResourcesConstructor", [](ServiceContext* service) { + service->registerClientObserver( + std::make_unique()); + }}; + +} // namespace +} // namespace mongo diff --git a/src/mongo/db/transaction_resources_init_non_mongod.cpp b/src/mongo/db/transaction_resources_init_non_mongod.cpp new file mode 100644 index 0000000000000..a2f30e11c7187 --- /dev/null +++ b/src/mongo/db/transaction_resources_init_non_mongod.cpp @@ -0,0 +1,65 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include + +#include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/concurrency/locker_impl.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" + +namespace mongo { +namespace { + +class TransactionResourcesNonMongoDClientObserver : public ServiceContext::ClientObserver { +public: + TransactionResourcesNonMongoDClientObserver() = default; + ~TransactionResourcesNonMongoDClientObserver() = default; + + void onCreateClient(Client* client) final {} + + void onDestroyClient(Client* client) final {} + + void onCreateOperationContext(OperationContext* opCtx) final { + opCtx->setLockState(std::make_unique(opCtx->getServiceContext())); + } + + void onDestroyOperationContext(OperationContext* opCtx) final {} +}; + +ServiceContext::ConstructorActionRegisterer transactionResourcesConstructor{ + "TransactionResourcesConstructor", [](ServiceContext* service) { + service->registerClientObserver( + std::make_unique()); + }}; + +} // namespace +} // namespace mongo diff --git a/src/mongo/db/transaction_validation.cpp b/src/mongo/db/transaction_validation.cpp index fb6c328ea9c53..7e5def9c0939f 100644 --- a/src/mongo/db/transaction_validation.cpp +++ b/src/mongo/db/transaction_validation.cpp @@ -27,17 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/transaction_validation.h" - #include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/commands.h" #include "mongo/db/commands/txn_cmds_gen.h" #include "mongo/db/commands/txn_two_phase_commit_cmds_gen.h" -#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/transaction_validation.h" #include "mongo/db/write_concern_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -111,12 +113,12 @@ void doTransactionValidationForWrites(OperationContext* opCtx, const NamespaceSt if (!opCtx->inMultiDocumentTransaction()) return; uassert(50791, - str::stream() << "Cannot write to system collection " << ns.toString() + str::stream() << "Cannot write to system collection " << ns.toStringForErrorMsg() << " within a transaction.", !ns.isSystem() || ns.isPrivilegeCollection() || ns.isTimeseriesBucketsCollection()); const auto replCoord = repl::ReplicationCoordinator::get(opCtx); uassert(50790, - str::stream() << "Cannot write to unreplicated collection " << ns.toString() + str::stream() << "Cannot write to unreplicated collection " << ns.toStringForErrorMsg() << " within a transaction.", !replCoord->isOplogDisabledFor(opCtx, ns)); } diff --git a/src/mongo/db/transaction_validation.h b/src/mongo/db/transaction_validation.h index ee81bddfc05f1..2c64df70fca52 100644 --- a/src/mongo/db/transaction_validation.h +++ b/src/mongo/db/transaction_validation.h @@ -29,9 +29,13 @@ #pragma once +#include "mongo/base/string_data.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/write_concern_options.h" namespace mongo { diff --git a/src/mongo/db/ttl.cpp b/src/mongo/db/ttl.cpp index 98e7d5fe662fd..62a71054e4a1b 100644 --- a/src/mongo/db/ttl.cpp +++ b/src/mongo/db/ttl.cpp @@ -30,41 +30,102 @@ #include "mongo/db/ttl.h" +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/user_name.h" #include "mongo/db/catalog/coll_mod.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/catalog/database_holder.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/index_key_validate.h" #include "mongo/db/client.h" +#include "mongo/db/coll_mod_gen.h" #include "mongo/db/commands/fsync_locked.h" #include "mongo/db/commands/server_status_metric.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/exec/batched_delete_stage.h" +#include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/exec/delete_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_names.h" #include "mongo/db/namespace_string.h" #include "mongo/db/ops/insert.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/record_id_bound.h" #include "mongo/db/record_id_helpers.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/replica_set_aware_service.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/tenant_migration_access_blocker.h" #include "mongo/db/repl/tenant_migration_access_blocker_registry.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/s/shard_filtering_metadata_refresh.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" #include "mongo/db/stats/resource_consumption_metrics.h" -#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/db/transaction_resources.h" #include "mongo/db/ttl_collection_cache.h" #include "mongo/db/ttl_gen.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/s/stale_exception.h" #include "mongo/util/assert_util.h" #include "mongo/util/background.h" +#include "mongo/util/concurrency/admission_context.h" #include "mongo/util/concurrency/idle_thread_block.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/log_with_sampling.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kIndex @@ -74,11 +135,6 @@ namespace mongo { namespace { const auto getTTLMonitor = ServiceContext::declareDecoration>(); -bool isBatchingEnabled() { - return feature_flags::gBatchMultiDeletes.isEnabled(serverGlobalParams.featureCompatibility) && - ttlMonitorBatchDeletes.load(); -} - // When batching is enabled, returns BatchedDeleteStageParams that limit the amount of work done in // a delete such that it is possible not all expired documents will be removed. Returns nullptr // otherwise. @@ -351,11 +407,6 @@ void TTLMonitor::run() { ThreadClient tc(name(), getGlobalServiceContext()); AuthorizationSession::get(cc())->grantInternalAuthorization(&cc()); - { - stdx::lock_guard lk(*tc.get()); - tc.get()->setSystemOperationKillableByStepdown(lk); - } - while (true) { { auto startTime = Date_t::now(); @@ -513,13 +564,7 @@ bool TTLMonitor::_doTTLIndexDelete(OperationContext* opCtx, TTLCollectionCache* ttlCollectionCache, const UUID& uuid, const TTLCollectionCache::Info& info) { - // Skip collections that have not been made visible yet. The TTLCollectionCache - // already has the index information available, so we want to avoid removing it - // until the collection is visible. auto collectionCatalog = CollectionCatalog::get(opCtx); - if (collectionCatalog->isCollectionAwaitingVisibility(uuid)) { - return false; - } // The collection was dropped. auto nss = collectionCatalog->lookupNSSByUUID(opCtx, uuid); @@ -541,26 +586,33 @@ bool TTLMonitor::_doTTLIndexDelete(OperationContext* opCtx, try { uassertStatusOK(userAllowedWriteNS(opCtx, *nss)); - auto catalogCache = Grid::get(opCtx)->catalogCache(); + auto catalogCache = + Grid::get(opCtx)->isInitialized() ? Grid::get(opCtx)->catalogCache() : nullptr; auto sii = catalogCache ? uassertStatusOK(catalogCache->getCollectionRoutingInfo(opCtx, *nss)).sii : boost::none; // Attach IGNORED placement version to skip orphans (the range deleter will clear them up) - auto scopedRole = ScopedSetShardRole( - opCtx, - *nss, - ShardVersionFactory::make(ChunkVersion::IGNORED(), - sii ? boost::make_optional(sii->getCollectionIndexes()) - : boost::none), - boost::none); - AutoGetCollection coll(opCtx, *nss, MODE_IX); - // The collection with `uuid` might be renamed before the lock and the wrong namespace would - // be locked and looked up so we double check here. - if (!coll || coll->uuid() != uuid) + const auto shardVersion = ShardVersionFactory::make( + ChunkVersion::IGNORED(), + sii ? boost::make_optional(sii->getCollectionIndexes()) : boost::none); + auto scopedRole = ScopedSetShardRole(opCtx, *nss, shardVersion, boost::none); + const auto coll = + acquireCollection(opCtx, + CollectionAcquisitionRequest(*nss, + {boost::none, shardVersion}, + repl::ReadConcernArgs::get(opCtx), + AcquisitionPrerequisites::kWrite), + MODE_IX); + + // The collection with `uuid` might be renamed before the lock and the wrong namespace + // would be locked and looked up so we double check here. + if (!coll.exists() || coll.uuid() != uuid) return false; // Allow TTL deletion on non-capped collections, and on capped clustered collections. - invariant(!coll->isCapped() || (coll->isCapped() && coll->isClustered())); + const auto& collectionPtr = coll.getCollectionPtr(); + invariant(!collectionPtr->isCapped() || + (collectionPtr->isCapped() && collectionPtr->isClustered())); if (MONGO_unlikely(hangTTLMonitorWithLock.shouldFail())) { LOGV2(22534, @@ -574,28 +626,25 @@ bool TTLMonitor::_doTTLIndexDelete(OperationContext* opCtx, } std::shared_ptr mtab; - if (coll.getDb() && - nullptr != + if (nullptr != (mtab = TenantMigrationAccessBlockerRegistry::get(opCtx->getServiceContext()) - .getTenantMigrationAccessBlockerForDbName(coll.getDb()->name(), + .getTenantMigrationAccessBlockerForDbName(coll.nss().dbName(), MtabType::kRecipient)) && mtab->checkIfShouldBlockTTL()) { LOGV2_DEBUG(53768, 1, "Postpone TTL of DB because of active tenant migration", "tenantMigrationAccessBlocker"_attr = mtab->getDebugInfo().jsonString(), - "database"_attr = coll.getDb()->name()); + "database"_attr = coll.nss().dbName()); return false; } - ResourceConsumption::ScopedMetricsCollector scopedMetrics(opCtx, nss->db().toString()); + ResourceConsumption::ScopedMetricsCollector scopedMetrics(opCtx, nss->dbName()); - const auto& collection = coll.getCollection(); if (info.isClustered()) { - return _deleteExpiredWithCollscan(opCtx, ttlCollectionCache, collection); + return _deleteExpiredWithCollscan(opCtx, ttlCollectionCache, coll); } else { - return _deleteExpiredWithIndex( - opCtx, ttlCollectionCache, collection, info.getIndexName()); + return _deleteExpiredWithIndex(opCtx, ttlCollectionCache, coll, info.getIndexName()); } } catch (const ExceptionForCat& ex) { // The TTL index tried to delete some information from a sharded collection @@ -613,11 +662,6 @@ bool TTLMonitor::_doTTLIndexDelete(OperationContext* opCtx, ExecutorFuture(executor) .then([serviceContext = opCtx->getServiceContext(), nss, staleInfo] { ThreadClient tc("TTLShardVersionRecovery", serviceContext); - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } - auto uniqueOpCtx = tc->makeOperationContext(); auto opCtx = uniqueOpCtx.get(); @@ -662,16 +706,17 @@ bool TTLMonitor::_doTTLIndexDelete(OperationContext* opCtx, bool TTLMonitor::_deleteExpiredWithIndex(OperationContext* opCtx, TTLCollectionCache* ttlCollectionCache, - const CollectionPtr& collection, + const CollectionAcquisition& collection, std::string indexName) { - if (!collection->isIndexPresent(indexName)) { - ttlCollectionCache->deregisterTTLIndexByName(collection->uuid(), indexName); + const auto& collectionPtr = collection.getCollectionPtr(); + if (!collectionPtr->isIndexPresent(indexName)) { + ttlCollectionCache->deregisterTTLIndexByName(collection.uuid(), indexName); return false; } - BSONObj spec = collection->getIndexSpec(indexName); + BSONObj spec = collectionPtr->getIndexSpec(indexName); const IndexDescriptor* desc = - getValidTTLIndex(opCtx, ttlCollectionCache, collection, spec, indexName); + getValidTTLIndex(opCtx, ttlCollectionCache, collectionPtr, spec, indexName); if (!desc) { return false; @@ -680,13 +725,13 @@ bool TTLMonitor::_deleteExpiredWithIndex(OperationContext* opCtx, LOGV2_DEBUG(22533, 1, "running TTL job for index", - logAttrs(collection->ns()), + logAttrs(collection.nss()), "key"_attr = desc->keyPattern(), "name"_attr = indexName); auto expireAfterSeconds = spec[IndexDescriptor::kExpireAfterSecondsFieldName].safeNumberLong(); const Date_t kDawnOfTime = Date_t::fromMillisSinceEpoch(std::numeric_limits::min()); - const auto expirationDate = safeExpirationDate(opCtx, collection, expireAfterSeconds); + const auto expirationDate = safeExpirationDate(opCtx, collectionPtr, expireAfterSeconds); const BSONObj startKey = BSON("" << kDawnOfTime); const BSONObj endKey = BSON("" << expirationDate); @@ -702,7 +747,7 @@ bool TTLMonitor::_deleteExpiredWithIndex(OperationContext* opCtx, // not actually expired when our snapshot changes during deletion. const char* keyFieldName = key.firstElement().fieldName(); BSONObj query = BSON(keyFieldName << BSON("$gte" << kDawnOfTime << "$lte" << expirationDate)); - auto findCommand = std::make_unique(collection->ns()); + auto findCommand = std::make_unique(collection.nss()); findCommand->setFilter(query); auto canonicalQuery = CanonicalQuery::canonicalize(opCtx, std::move(findCommand)); invariant(canonicalQuery.getStatus()); @@ -714,11 +759,11 @@ bool TTLMonitor::_deleteExpiredWithIndex(OperationContext* opCtx, // Maintain a consistent view of whether batching is enabled - batching depends on // parameters that can be set at runtime, and it is illegal to try to get // BatchedDeleteStageStats from a non-batched delete. - bool batchingEnabled = isBatchingEnabled(); + const bool batchingEnabled = ttlMonitorBatchDeletes.load(); Timer timer; auto exec = InternalPlanner::deleteWithIndexScan(opCtx, - &collection, + collection, std::move(params), desc, startKey, @@ -740,7 +785,7 @@ bool TTLMonitor::_deleteExpiredWithIndex(OperationContext* opCtx, .first) { LOGV2(5479200, "Deleted expired documents using index", - logAttrs(collection->ns()), + logAttrs(collection.nss()), "index"_attr = indexName, "numDeleted"_attr = numDeleted, "duration"_attr = duration); @@ -760,25 +805,26 @@ bool TTLMonitor::_deleteExpiredWithIndex(OperationContext* opCtx, bool TTLMonitor::_deleteExpiredWithCollscan(OperationContext* opCtx, TTLCollectionCache* ttlCollectionCache, - const CollectionPtr& collection) { - const auto& collOptions = collection->getCollectionOptions(); + const CollectionAcquisition& collection) { + const auto& collectionPtr = collection.getCollectionPtr(); + const auto& collOptions = collectionPtr->getCollectionOptions(); uassert(5400701, "collection is not clustered but is described as being TTL", collOptions.clusteredIndex); - invariant(collection->isClustered()); + invariant(collectionPtr->isClustered()); auto expireAfterSeconds = collOptions.expireAfterSeconds; if (!expireAfterSeconds) { - ttlCollectionCache->deregisterTTLClusteredIndex(collection->uuid()); + ttlCollectionCache->deregisterTTLClusteredIndex(collection.uuid()); return false; } - LOGV2_DEBUG(5400704, 1, "running TTL job for clustered collection", logAttrs(collection->ns())); + LOGV2_DEBUG(5400704, 1, "running TTL job for clustered collection", logAttrs(collection.nss())); - const auto startId = makeCollScanStartBound(collection, Date_t::min()); + const auto startId = makeCollScanStartBound(collectionPtr, Date_t::min()); - const auto expirationDate = safeExpirationDate(opCtx, collection, *expireAfterSeconds); - const auto endId = makeCollScanEndBound(collection, expirationDate); + const auto expirationDate = safeExpirationDate(opCtx, collectionPtr, *expireAfterSeconds); + const auto endId = makeCollScanEndBound(collectionPtr, expirationDate); auto params = std::make_unique(); params->isMulti = true; @@ -786,14 +832,14 @@ bool TTLMonitor::_deleteExpiredWithCollscan(OperationContext* opCtx, // Maintain a consistent view of whether batching is enabled - batching depends on // parameters that can be set at runtime, and it is illegal to try to get // BatchedDeleteStageStats from a non-batched delete. - bool batchingEnabled = isBatchingEnabled(); + const bool batchingEnabled = ttlMonitorBatchDeletes.load(); // Deletes records using a bounded collection scan from the beginning of time to the // expiration time (inclusive). Timer timer; auto exec = InternalPlanner::deleteWithCollectionScan( opCtx, - &collection, + collection, std::move(params), PlanYieldPolicy::YieldPolicy::YIELD_AUTO, InternalPlanner::Direction::FORWARD, @@ -814,7 +860,7 @@ bool TTLMonitor::_deleteExpiredWithCollscan(OperationContext* opCtx, .first) { LOGV2(5400702, "Deleted expired documents using collection scan", - logAttrs(collection->ns()), + logAttrs(collection.nss()), "numDeleted"_attr = numDeleted, "duration"_attr = duration); } @@ -851,9 +897,6 @@ void TTLMonitor::onStepUp(OperationContext* opCtx) { auto ttlInfos = ttlCollectionCache.getTTLInfos(); for (const auto& [uuid, infos] : ttlInfos) { auto collectionCatalog = CollectionCatalog::get(opCtx); - if (collectionCatalog->isCollectionAwaitingVisibility(uuid)) { - continue; - } // The collection was dropped. auto nss = collectionCatalog->lookupNSSByUUID(opCtx, uuid); @@ -899,7 +942,7 @@ void TTLMonitor::onStepUp(OperationContext* opCtx) { // processCollModCommand() will acquire MODE_X access to the collection. BSONObjBuilder builder; uassertStatusOK( - processCollModCommand(opCtx, {nss->db(), uuid}, collModCmd, &builder)); + processCollModCommand(opCtx, {nss->dbName(), uuid}, collModCmd, &builder)); auto result = builder.obj(); LOGV2( 6847701, diff --git a/src/mongo/db/ttl.h b/src/mongo/db/ttl.h index 6aa0f137ea321..56397818a77c6 100644 --- a/src/mongo/db/ttl.h +++ b/src/mongo/db/ttl.h @@ -29,8 +29,20 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" #include "mongo/db/ttl_collection_cache.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/util/background.h" +#include "mongo/util/duration.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -137,7 +149,7 @@ class TTLMonitor : public BackgroundJob { */ bool _deleteExpiredWithIndex(OperationContext* opCtx, TTLCollectionCache* ttlCollectionCache, - const CollectionPtr& collection, + const CollectionAcquisition& collection, std::string indexName); /* @@ -153,7 +165,7 @@ class TTLMonitor : public BackgroundJob { */ bool _deleteExpiredWithCollscan(OperationContext* opCtx, TTLCollectionCache* ttlCollectionCache, - const CollectionPtr& collection); + const CollectionAcquisition& collection); // Protects the state below. mutable Mutex _stateMutex = MONGO_MAKE_LATCH("TTLMonitorStateMutex"); diff --git a/src/mongo/db/ttl_collection_cache.cpp b/src/mongo/db/ttl_collection_cache.cpp index 966700ec8624d..8041a572555cc 100644 --- a/src/mongo/db/ttl_collection_cache.cpp +++ b/src/mongo/db/ttl_collection_cache.cpp @@ -28,23 +28,22 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/ttl_collection_cache.h" - #include +#include + +#include +#include #include "mongo/db/service_context.h" -#include "mongo/logv2/log.h" -#include "mongo/util/fail_point.h" +#include "mongo/db/ttl_collection_cache.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage namespace mongo { -MONGO_FAIL_POINT_DEFINE(hangTTLCollectionCacheAfterRegisteringInfo); - namespace { const auto getTTLCollectionCache = ServiceContext::declareDecoration(); } @@ -58,11 +57,6 @@ void TTLCollectionCache::registerTTLInfo(UUID uuid, const Info& info) { stdx::lock_guard lock(_ttlInfosLock); _ttlInfos[uuid].push_back(info); } - - if (MONGO_unlikely(hangTTLCollectionCacheAfterRegisteringInfo.shouldFail())) { - LOGV2(4664000, "Hanging due to hangTTLCollectionCacheAfterRegisteringInfo fail point"); - hangTTLCollectionCacheAfterRegisteringInfo.pauseWhileSet(); - } } void TTLCollectionCache::_deregisterTTLInfo(UUID uuid, const Info& info) { diff --git a/src/mongo/db/ttl_collection_cache.h b/src/mongo/db/ttl_collection_cache.h index f1a5fe1247618..330f9a448eff8 100644 --- a/src/mongo/db/ttl_collection_cache.h +++ b/src/mongo/db/ttl_collection_cache.h @@ -29,13 +29,16 @@ #pragma once -#include "mongo/stdx/variant.h" #include +#include +#include #include #include "mongo/db/namespace_string.h" #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/variant.h" #include "mongo/util/uuid.h" /** diff --git a/src/mongo/db/ttl_collection_cache_test.cpp b/src/mongo/db/ttl_collection_cache_test.cpp index d252faa6b7b91..1ce60a5b7484f 100644 --- a/src/mongo/db/ttl_collection_cache_test.cpp +++ b/src/mongo/db/ttl_collection_cache_test.cpp @@ -28,7 +28,14 @@ */ #include "mongo/db/ttl_collection_cache.h" -#include "mongo/unittest/unittest.h" + +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/ttl_test.cpp b/src/mongo/db/ttl_test.cpp index 24b2994d80ac8..0d88195d05a90 100644 --- a/src/mongo/db/ttl_test.cpp +++ b/src/mongo/db/ttl_test.cpp @@ -27,19 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/catalog/create_collection.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_builds_manager.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/index_build_entry_helpers.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/service_context_d_test_fixture.h" #include "mongo/db/ttl.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -173,8 +190,6 @@ class SimpleClient { }; TEST_F(TTLTest, TTLPassSingleCollectionTwoIndexes) { - RAIIServerParameterControllerForTest featureFlagController("featureFlagBatchMultiDeletes", - true); RAIIServerParameterControllerForTest ttlBatchDeletesController("ttlMonitorBatchDeletes", true); SimpleClient client(opCtx()); @@ -205,8 +220,6 @@ TEST_F(TTLTest, TTLPassSingleCollectionTwoIndexes) { } TEST_F(TTLTest, TTLPassMultipCollectionsPass) { - RAIIServerParameterControllerForTest featureFlagController("featureFlagBatchMultiDeletes", - true); RAIIServerParameterControllerForTest ttlBatchDeletesController("ttlMonitorBatchDeletes", true); SimpleClient client(opCtx()); @@ -253,8 +266,6 @@ TEST_F(TTLTest, TTLPassMultipCollectionsPass) { // Demonstrate sub-pass behavior when all expired documents are drained before the sub-pass reaches // its time limit. TEST_F(TTLTest, TTLSingleSubPass) { - RAIIServerParameterControllerForTest featureFlagController("featureFlagBatchMultiDeletes", - true); RAIIServerParameterControllerForTest ttlBatchDeletesController("ttlMonitorBatchDeletes", true); // Set 'ttlMonitorSubPasstargetSecs' to a day to guarantee the sub-pass target time is never @@ -300,8 +311,6 @@ TEST_F(TTLTest, TTLSingleSubPass) { } TEST_F(TTLTest, TTLSubPassesRemoveExpiredDocuments) { - RAIIServerParameterControllerForTest featureFlagController("featureFlagBatchMultiDeletes", - true); RAIIServerParameterControllerForTest ttlBatchDeletesController("ttlMonitorBatchDeletes", true); // Set the target time for each sub-pass to 0 to test when only a single iteration of deletes is @@ -381,8 +390,6 @@ TEST_F(TTLTest, TTLSubPassesRemoveExpiredDocuments) { } TEST_F(TTLTest, TTLSubPassesRemoveExpiredDocumentsAddedBetweenSubPasses) { - RAIIServerParameterControllerForTest featureFlagController("featureFlagBatchMultiDeletes", - true); RAIIServerParameterControllerForTest ttlBatchDeletesController("ttlMonitorBatchDeletes", true); // Set the target time for each sub-pass to 0 to test when only a single iteration of deletes is @@ -466,8 +473,6 @@ TEST_F(TTLTest, TTLSubPassesRemoveExpiredDocumentsAddedBetweenSubPasses) { // Tests that, between sub-passes, newly added TTL indexes are not ignored. TEST_F(TTLTest, TTLSubPassesStartRemovingFromNewTTLIndex) { - RAIIServerParameterControllerForTest featureFlagController("featureFlagBatchMultiDeletes", - true); RAIIServerParameterControllerForTest ttlBatchDeletesController("ttlMonitorBatchDeletes", true); // Set the target time for each sub-pass to 0 to test when only a single iteration of deletes is diff --git a/src/mongo/db/txn_retry_counter_too_old_info.cpp b/src/mongo/db/txn_retry_counter_too_old_info.cpp index ba5f01429d98d..cb760d78d0889 100644 --- a/src/mongo/db/txn_retry_counter_too_old_info.cpp +++ b/src/mongo/db/txn_retry_counter_too_old_info.cpp @@ -27,11 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/txn_retry_counter_too_old_info.h" - -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" namespace mongo { diff --git a/src/mongo/db/txn_retry_counter_too_old_info.h b/src/mongo/db/txn_retry_counter_too_old_info.h index 7e0c95e0a6015..350199833b11c 100644 --- a/src/mongo/db/txn_retry_counter_too_old_info.h +++ b/src/mongo/db/txn_retry_counter_too_old_info.h @@ -29,10 +29,14 @@ #pragma once +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/update/SConscript b/src/mongo/db/update/SConscript index 2b3d9973aeb28..5590570b599bf 100644 --- a/src/mongo/db/update/SConscript +++ b/src/mongo/db/update/SConscript @@ -170,6 +170,7 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/query/query_planner', '$BUILD_DIR/mongo/db/query/query_test_service_context', '$BUILD_DIR/mongo/db/query_expressions', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/mongo/db/vector_clock_trivial', 'update', diff --git a/src/mongo/db/update/addtoset_node.cpp b/src/mongo/db/update/addtoset_node.cpp index f7b821815d178..ddc4325fea7c2 100644 --- a/src/mongo/db/update/addtoset_node.cpp +++ b/src/mongo/db/update/addtoset_node.cpp @@ -27,12 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/update/addtoset_node.h" +#include -#include "mongo/bson/bsonelement_comparator.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement_comparator_interface.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/mutable/document.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/update/addtoset_node.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/update/addtoset_node.h b/src/mongo/db/update/addtoset_node.h index e2782e5398dc5..cfb5699d8c737 100644 --- a/src/mongo/db/update/addtoset_node.h +++ b/src/mongo/db/update/addtoset_node.h @@ -32,9 +32,21 @@ #include #include +#include + +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/update/modifier_node.h" +#include "mongo/db/update/update_node.h" +#include "mongo/db/update/update_node_visitor.h" namespace mongo { diff --git a/src/mongo/db/update/addtoset_node_test.cpp b/src/mongo/db/update/addtoset_node_test.cpp index 6c031e43875f8..83cb3ac714244 100644 --- a/src/mongo/db/update/addtoset_node_test.cpp +++ b/src/mongo/db/update/addtoset_node_test.cpp @@ -27,18 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/update/addtoset_node.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/algorithm.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/bson/mutable/document.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/update/addtoset_node.h" +#include "mongo/db/update/update_executor.h" #include "mongo/db/update/update_node_test_fixture.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -132,8 +139,7 @@ TEST_F(AddToSetNodeTest, ApplyNonEach) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -153,8 +159,7 @@ TEST_F(AddToSetNodeTest, ApplyNonEachArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, [1]]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -173,8 +178,7 @@ TEST_F(AddToSetNodeTest, ApplyEach) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 1, 2]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -193,8 +197,7 @@ TEST_F(AddToSetNodeTest, ApplyToEmptyArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1, 2]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -213,8 +216,7 @@ TEST_F(AddToSetNodeTest, ApplyDeduplicateElementsToAdd) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -234,8 +236,7 @@ TEST_F(AddToSetNodeTest, ApplyDoNotAddExistingElements) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -255,8 +256,7 @@ TEST_F(AddToSetNodeTest, ApplyDoNotDeduplicateExistingElements) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 0, 1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -275,8 +275,7 @@ TEST_F(AddToSetNodeTest, ApplyNoElementsToAdd) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -295,8 +294,7 @@ TEST_F(AddToSetNodeTest, ApplyNoNonDuplicateElementsToAdd) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -315,8 +313,7 @@ TEST_F(AddToSetNodeTest, ApplyCreateArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -335,8 +332,7 @@ TEST_F(AddToSetNodeTest, ApplyCreateEmptyArrayIsNotNoop) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -358,8 +354,7 @@ TEST_F(AddToSetNodeTest, ApplyDeduplicationOfElementsToAddRespectsCollation) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: ['abc', 'def']}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -381,8 +376,7 @@ TEST_F(AddToSetNodeTest, ApplyComparisonToExistingElementsRespectsCollation) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: ['ABC', 'def']}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -405,8 +399,7 @@ TEST_F(AddToSetNodeTest, ApplyRespectsCollationFromSetCollator) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: ['abc', 'def']}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -451,8 +444,7 @@ TEST_F(AddToSetNodeTest, ApplyNestedArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"][1]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{ _id : 1, a : [1, [1]] }"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -472,8 +464,7 @@ TEST_F(AddToSetNodeTest, ApplyIndexesNotAffected) { addIndexedPath("b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_FALSE(doc.isInPlaceModeEnabled()); assertOplogEntry(fromjson("{$v: 2, diff: {u: {a: [0, 1]}}}")); @@ -491,8 +482,7 @@ TEST_F(AddToSetNodeTest, ApplyNoIndexDataOrLogBuilder) { setLogBuilderToNull(); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); diff --git a/src/mongo/db/update/arithmetic_node.cpp b/src/mongo/db/update/arithmetic_node.cpp index 7b8f53efacf9a..427a1b67c5f61 100644 --- a/src/mongo/db/update/arithmetic_node.cpp +++ b/src/mongo/db/update/arithmetic_node.cpp @@ -27,11 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/update/arithmetic_node.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/algorithm.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/db/update/arithmetic_node.h" +#include "mongo/util/safe_num.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/update/arithmetic_node.h b/src/mongo/db/update/arithmetic_node.h index 4b25a52946c3d..11aedf465c20a 100644 --- a/src/mongo/db/update/arithmetic_node.h +++ b/src/mongo/db/update/arithmetic_node.h @@ -31,8 +31,21 @@ #include +#include + +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/update/modifier_node.h" +#include "mongo/db/update/update_node.h" +#include "mongo/db/update/update_node_visitor.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/update/arithmetic_node_test.cpp b/src/mongo/db/update/arithmetic_node_test.cpp index 328ecbb8d62fc..04488e17b0b53 100644 --- a/src/mongo/db/update/arithmetic_node_test.cpp +++ b/src/mongo/db/update/arithmetic_node_test.cpp @@ -27,17 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/update/arithmetic_node.h" +#include -#include "mongo/bson/mutable/algorithm.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/bson/mutable/document.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/update/arithmetic_node.h" +#include "mongo/db/update/runtime_update_path.h" +#include "mongo/db/update/update_executor.h" #include "mongo/db/update/update_node_test_fixture.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -122,8 +130,7 @@ TEST_F(ArithmeticNodeTest, ApplyIncNoOp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 5}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -142,8 +149,7 @@ TEST_F(ArithmeticNodeTest, ApplyMulNoOp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 5}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -162,8 +168,7 @@ TEST_F(ArithmeticNodeTest, ApplyRoundingNoOp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 6.022e23}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -182,8 +187,7 @@ TEST_F(ArithmeticNodeTest, ApplyEmptyPathToCreate) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 11}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -203,8 +207,7 @@ TEST_F(ArithmeticNodeTest, ApplyCreatePath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {d: 5, b: {c: 6}}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -224,8 +227,7 @@ TEST_F(ArithmeticNodeTest, ApplyExtendPath) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {c: 1, b: 2}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.b}"); @@ -242,8 +244,7 @@ TEST_F(ArithmeticNodeTest, ApplyCreatePathFromRoot) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{c: 5, a: {b: 6}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -263,8 +264,7 @@ TEST_F(ArithmeticNodeTest, ApplyPositional) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"][1]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 7, 2]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -302,8 +302,7 @@ TEST_F(ArithmeticNodeTest, ApplyNonViablePathToCreateFromReplicationIsNoOp) { setFromOplogApplication(true); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 5}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -322,8 +321,7 @@ TEST_F(ArithmeticNodeTest, ApplyNoIndexDataNoLogBuilder) { setLogBuilderToNull(); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 11}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -340,8 +338,7 @@ TEST_F(ArithmeticNodeTest, ApplyDoesNotAffectIndexes) { addIndexedPath("b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 11}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -358,8 +355,7 @@ TEST_F(ArithmeticNodeTest, IncTypePromotionIsNotANoOp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: NumberLong(2)}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -376,8 +372,7 @@ TEST_F(ArithmeticNodeTest, MulTypePromotionIsNotANoOp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: NumberLong(2)}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -394,8 +389,7 @@ TEST_F(ArithmeticNodeTest, TypePromotionFromIntToDecimalIsNotANoOp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: NumberDecimal(\"5.0\")}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -414,8 +408,7 @@ TEST_F(ArithmeticNodeTest, TypePromotionFromLongToDecimalIsNotANoOp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: NumberDecimal(\"5.0\")}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -434,8 +427,7 @@ TEST_F(ArithmeticNodeTest, TypePromotionFromDoubleToDecimalIsNotANoOp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: NumberDecimal(\"5.25\")}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -458,8 +450,7 @@ TEST_F(ArithmeticNodeTest, ApplyPromoteToFloatingPoint) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 1.2}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -476,8 +467,7 @@ TEST_F(ArithmeticNodeTest, IncrementedDecimalStaysDecimal) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: NumberDecimal(\"11.5\")}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -502,8 +492,7 @@ TEST_F(ArithmeticNodeTest, OverflowIntToLong) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType()); ASSERT_EQUALS(BSON("a" << static_cast(initialValue) + 1), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -523,8 +512,7 @@ TEST_F(ArithmeticNodeTest, UnderflowIntToLong) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(mongo::NumberLong, doc.root()["a"].getType()); ASSERT_EQUALS(BSON("a" << static_cast(initialValue) - 1), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -543,8 +531,7 @@ TEST_F(ArithmeticNodeTest, IncModeCanBeReused) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc1.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 2}"), doc1); ASSERT_TRUE(doc1.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -554,8 +541,7 @@ TEST_F(ArithmeticNodeTest, IncModeCanBeReused) { addIndexedPath("a"); result = node.apply(getApplyParams(doc2.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 3}"), doc2); ASSERT_TRUE(doc1.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -572,8 +558,7 @@ TEST_F(ArithmeticNodeTest, CreatedNumberHasSameTypeAsInc) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: 6, a: NumberLong(5)}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -590,8 +575,7 @@ TEST_F(ArithmeticNodeTest, CreatedNumberHasSameTypeAsMul) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: 6, a: NumberLong(0)}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -608,8 +592,7 @@ TEST_F(ArithmeticNodeTest, ApplyEmptyDocument) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -695,8 +678,7 @@ TEST_F(ArithmeticNodeTest, ApplyNewPath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: 1, a: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -729,8 +711,7 @@ TEST_F(ArithmeticNodeTest, ApplyNoOpDottedPath) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b : 2}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.b}"); @@ -747,8 +728,7 @@ TEST_F(ArithmeticNodeTest, TypePromotionOnDottedPathIsNotANoOp) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b : NumberLong(2)}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.b}"); @@ -781,8 +761,7 @@ TEST_F(ArithmeticNodeTest, ApplyInPlaceDottedPath) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 3}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.b}"); @@ -799,8 +778,7 @@ TEST_F(ArithmeticNodeTest, ApplyPromotionDottedPath) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: NumberLong(5)}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.b}"); @@ -817,8 +795,7 @@ TEST_F(ArithmeticNodeTest, ApplyDottedPathEmptyDoc) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.b}"); @@ -835,8 +812,7 @@ TEST_F(ArithmeticNodeTest, ApplyFieldWithDot) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{'a.b':4, a: {b: 2}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.b}"); @@ -853,8 +829,7 @@ TEST_F(ArithmeticNodeTest, ApplyNoOpArrayIndex) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"][2]["b"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 0},{b: 1},{b: 2}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.2.b}"); @@ -872,8 +847,7 @@ TEST_F(ArithmeticNodeTest, TypePromotionInArrayIsNotANoOp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"][2]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 0},{b: 1},{b: NumberLong(2)}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.2.b}"); @@ -906,8 +880,7 @@ TEST_F(ArithmeticNodeTest, ApplyInPlaceArrayIndex) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"][2]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 0},{b: 1},{b: 3}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.2.b}"); @@ -925,8 +898,7 @@ TEST_F(ArithmeticNodeTest, ApplyAppendArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 0},{b: 1},{b: 2}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -944,8 +916,7 @@ TEST_F(ArithmeticNodeTest, ApplyPaddingArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 0},null,{b: 2}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -963,8 +934,7 @@ TEST_F(ArithmeticNodeTest, ApplyNumericObject) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 0, '2': {b: 2}}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.2.b}"); @@ -984,8 +954,7 @@ TEST_F(ArithmeticNodeTest, ApplyNumericField) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["2"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {'2': {b: 3}}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.2.b}"); @@ -1005,8 +974,7 @@ TEST_F(ArithmeticNodeTest, ApplyExtendNumericField) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["2"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {'2': {c: 1, b: 2}}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.2.b}"); @@ -1024,8 +992,7 @@ TEST_F(ArithmeticNodeTest, ApplyNumericFieldToEmptyObject) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {'2': {b: 2}}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a.2.b}"); @@ -1043,8 +1010,7 @@ TEST_F(ArithmeticNodeTest, ApplyEmptyArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [null, null, {b: 2}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -1114,8 +1080,7 @@ TEST_F(ArithmeticNodeTest, ApplyDeserializedDocNotNoOp) { setPathToCreate("b"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 1, b: NumberInt(0)}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1136,8 +1101,7 @@ TEST_F(ArithmeticNodeTest, ApplyToDeserializedDocNoOp) { setPathTaken(makeRuntimeUpdatePathForTest("a")); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: NumberInt(2)}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -1158,8 +1122,7 @@ TEST_F(ArithmeticNodeTest, ApplyToDeserializedDocNestedNoop) { setPathTaken(makeRuntimeUpdatePathForTest("a.b")); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: NumberInt(1)}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1180,8 +1143,7 @@ TEST_F(ArithmeticNodeTest, ApplyToDeserializedDocNestedNotNoop) { setPathTaken(makeRuntimeUpdatePathForTest("a.b")); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 3}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); diff --git a/src/mongo/db/update/array_culling_node.cpp b/src/mongo/db/update/array_culling_node.cpp index 76e2dd2578022..993abfe0142e8 100644 --- a/src/mongo/db/update/array_culling_node.cpp +++ b/src/mongo/db/update/array_culling_node.cpp @@ -27,10 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/update/array_culling_node.h" #include "mongo/db/update/storage_validation.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -75,6 +80,7 @@ void ArrayCullingNode::validateUpdate(mutablebson::ConstElement updatedElement, recursionLevel, false, /* allowTopLevelDollarPrefixedFields */ false, /* should validate for storage */ + false, /* isEmbeddedInIdField */ containsDotsAndDollarsField); } diff --git a/src/mongo/db/update/array_culling_node.h b/src/mongo/db/update/array_culling_node.h index 93aa5adef5fb0..4de79d7480f83 100644 --- a/src/mongo/db/update/array_culling_node.h +++ b/src/mongo/db/update/array_culling_node.h @@ -29,9 +29,15 @@ #pragma once +#include #include #include "mongo/base/clonable_ptr.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/mutable/const_element.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/update/modifier_node.h" namespace mongo { diff --git a/src/mongo/db/update/bit_node.cpp b/src/mongo/db/update/bit_node.cpp index 98c91ada662ec..d3471e8450c05 100644 --- a/src/mongo/db/update/bit_node.cpp +++ b/src/mongo/db/update/bit_node.cpp @@ -27,11 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/update/bit_node.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/algorithm.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/db/update/bit_node.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/update/bit_node.h b/src/mongo/db/update/bit_node.h index 8438e72b89ff4..133b7ceb2fab3 100644 --- a/src/mongo/db/update/bit_node.h +++ b/src/mongo/db/update/bit_node.h @@ -30,10 +30,24 @@ #pragma once #include +#include +#include + +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/update/modifier_node.h" +#include "mongo/db/update/update_node.h" +#include "mongo/db/update/update_node_visitor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/safe_num.h" namespace mongo { diff --git a/src/mongo/db/update/bit_node_test.cpp b/src/mongo/db/update/bit_node_test.cpp index 0e218ebc29ec1..02e2d2d9d0cc5 100644 --- a/src/mongo/db/update/bit_node_test.cpp +++ b/src/mongo/db/update/bit_node_test.cpp @@ -27,17 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/bit_node.h" - -#include "mongo/bson/mutable/algorithm.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/json.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/mutable_bson_test_utils.h" // IWYU pragma: keep #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/update/bit_node.h" +#include "mongo/db/update/update_executor.h" #include "mongo/db/update/update_node_test_fixture.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/update/compare_node.cpp b/src/mongo/db/update/compare_node.cpp index 5381d2e1d7062..9f5d8d3e74b18 100644 --- a/src/mongo/db/update/compare_node.cpp +++ b/src/mongo/db/update/compare_node.cpp @@ -27,11 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/compare_node.h" +#include #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/update/compare_node.h" namespace mongo { diff --git a/src/mongo/db/update/compare_node.h b/src/mongo/db/update/compare_node.h index 210d91b3a7c3d..1430707733561 100644 --- a/src/mongo/db/update/compare_node.h +++ b/src/mongo/db/update/compare_node.h @@ -31,8 +31,21 @@ #include +#include + +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/update/modifier_node.h" +#include "mongo/db/update/update_node.h" +#include "mongo/db/update/update_node_visitor.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/update/compare_node_test.cpp b/src/mongo/db/update/compare_node_test.cpp index c72d0a41fe5ff..5a877223f7408 100644 --- a/src/mongo/db/update/compare_node_test.cpp +++ b/src/mongo/db/update/compare_node_test.cpp @@ -27,18 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/update/compare_node.h" +#include +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/algorithm.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/bson/mutable/document.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/update/compare_node.h" +#include "mongo/db/update/update_executor.h" #include "mongo/db/update/update_node_test_fixture.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -67,8 +73,7 @@ TEST_F(CompareNodeTest, ApplyMaxSameNumber) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 1}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -86,8 +91,7 @@ TEST_F(CompareNodeTest, ApplyMinSameNumber) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 1}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -105,8 +109,7 @@ TEST_F(CompareNodeTest, ApplyMaxNumberIsLess) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 1}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -124,8 +127,7 @@ TEST_F(CompareNodeTest, ApplyMinNumberIsMore) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 1}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -143,8 +145,7 @@ TEST_F(CompareNodeTest, ApplyMaxSameValInt) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 1.0}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -162,8 +163,7 @@ TEST_F(CompareNodeTest, ApplyMaxSameValIntZero) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 0.0}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -181,8 +181,7 @@ TEST_F(CompareNodeTest, ApplyMinSameValIntZero) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 0.0}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -200,8 +199,7 @@ TEST_F(CompareNodeTest, ApplyMissingFieldMinNumber) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 0}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -219,8 +217,7 @@ TEST_F(CompareNodeTest, ApplyExistingNumberMinNumber) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 0}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -238,8 +235,7 @@ TEST_F(CompareNodeTest, ApplyMissingFieldMaxNumber) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 0}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -257,8 +253,7 @@ TEST_F(CompareNodeTest, ApplyExistingNumberMaxNumber) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 2}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -276,8 +271,7 @@ TEST_F(CompareNodeTest, ApplyExistingDateMaxDate) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {$date: 123123123}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -295,8 +289,7 @@ TEST_F(CompareNodeTest, ApplyExistingEmbeddedDocMaxDoc) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 3}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -314,8 +307,7 @@ TEST_F(CompareNodeTest, ApplyExistingEmbeddedDocMaxNumber) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -336,8 +328,7 @@ TEST_F(CompareNodeTest, ApplyMinRespectsCollation) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 'dba'}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -359,8 +350,7 @@ TEST_F(CompareNodeTest, ApplyMinRespectsCollationFromSetCollator) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 'dba'}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -382,8 +372,7 @@ TEST_F(CompareNodeTest, ApplyMaxRespectsCollationFromSetCollator) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 'abd'}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -427,8 +416,7 @@ TEST_F(CompareNodeTest, ApplyIndexesNotAffected) { addIndexedPath("b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 1}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -446,8 +434,7 @@ TEST_F(CompareNodeTest, ApplyNoIndexDataOrLogBuilder) { setLogBuilderToNull(); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 1}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); } diff --git a/src/mongo/db/update/current_date_node.cpp b/src/mongo/db/update/current_date_node.cpp index aa6cb3ec66323..701468b1a68ca 100644 --- a/src/mongo/db/update/current_date_node.cpp +++ b/src/mongo/db/update/current_date_node.cpp @@ -27,12 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/current_date_node.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/update/current_date_node.h" #include "mongo/db/vector_clock_mutable.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/update/current_date_node.h b/src/mongo/db/update/current_date_node.h index f9ec59acb21b4..403bde69ee6b4 100644 --- a/src/mongo/db/update/current_date_node.h +++ b/src/mongo/db/update/current_date_node.h @@ -29,10 +29,21 @@ #pragma once +#include #include +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/service_context.h" #include "mongo/db/update/modifier_node.h" +#include "mongo/db/update/update_node.h" +#include "mongo/db/update/update_node_visitor.h" namespace mongo { diff --git a/src/mongo/db/update/current_date_node_test.cpp b/src/mongo/db/update/current_date_node_test.cpp index 2643d2bd28a7e..07de19123d5f7 100644 --- a/src/mongo/db/update/current_date_node_test.cpp +++ b/src/mongo/db/update/current_date_node_test.cpp @@ -27,17 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/current_date_node.h" +#include -#include "mongo/bson/mutable/algorithm.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/bson/mutable/document.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/update/current_date_node.h" +#include "mongo/db/update/update_executor.h" #include "mongo/db/update/update_node_test_fixture.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -142,8 +146,7 @@ TEST_F(CurrentDateNodeTest, ApplyTrue) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(doc.root().countChildren(), 1U); ASSERT_TRUE(doc.root()["a"].ok()); @@ -163,8 +166,7 @@ TEST_F(CurrentDateNodeTest, ApplyFalse) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(doc.root().countChildren(), 1U); ASSERT_TRUE(doc.root()["a"].ok()); @@ -184,8 +186,7 @@ TEST_F(CurrentDateNodeTest, ApplyDate) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(doc.root().countChildren(), 1U); ASSERT_TRUE(doc.root()["a"].ok()); @@ -205,8 +206,7 @@ TEST_F(CurrentDateNodeTest, ApplyTimestamp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(doc.root().countChildren(), 1U); ASSERT_TRUE(doc.root()["a"].ok()); @@ -226,8 +226,7 @@ TEST_F(CurrentDateNodeTest, ApplyFieldDoesNotExist) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(doc.root().countChildren(), 1U); ASSERT_TRUE(doc.root()["a"].ok()); @@ -249,8 +248,7 @@ TEST_F(CurrentDateNodeTest, ApplyIndexesNotAffected) { addIndexedPath("b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); assertOplogEntryIsUpdateOfExpectedType(getOplogEntry(), "a"); } @@ -266,8 +264,7 @@ TEST_F(CurrentDateNodeTest, ApplyNoIndexDataOrLogBuilder) { setLogBuilderToNull(); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); - ASSERT_EQUALS(result.indexesAffected, getIndexAffectedFromLogEntry()); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(doc.root().countChildren(), 1U); ASSERT_TRUE(doc.root()["a"].ok()); diff --git a/src/mongo/db/update/delta_executor.cpp b/src/mongo/db/update/delta_executor.cpp index 983a3161d90ef..c0e43e12db7f8 100644 --- a/src/mongo/db/update/delta_executor.cpp +++ b/src/mongo/db/update/delta_executor.cpp @@ -27,13 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/update/delta_executor.h" - #include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/update/document_diff_applier.h" #include "mongo/db/update/object_replace_executor.h" -#include "mongo/db/update/update_oplog_entry_serialization.h" namespace mongo { @@ -41,14 +39,12 @@ DeltaExecutor::ApplyResult DeltaExecutor::applyUpdate( UpdateExecutor::ApplyParams applyParams) const { const auto originalDoc = applyParams.element.getDocument().getObject(); - auto applyDiffOutput = doc_diff::applyDiff( - originalDoc, _diff, applyParams.indexData, _mustCheckExistenceForInsertOperations); - const auto& postImage = applyDiffOutput.postImage; + auto postImage = + doc_diff::applyDiff(originalDoc, _diff, _mustCheckExistenceForInsertOperations); auto postImageHasId = postImage.hasField("_id"); auto result = ObjectReplaceExecutor::applyReplacementUpdate( std::move(applyParams), postImage, postImageHasId); - result.indexesAffected = applyDiffOutput.indexesAffected; result.oplogEntry = _outputOplogEntry; return result; } diff --git a/src/mongo/db/update/delta_executor.h b/src/mongo/db/update/delta_executor.h index db1ed84a7a62e..44c66020653d4 100644 --- a/src/mongo/db/update/delta_executor.h +++ b/src/mongo/db/update/delta_executor.h @@ -29,11 +29,15 @@ #pragma once -#include "mongo/db/update/update_executor.h" +#include +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/update/document_diff_applier.h" #include "mongo/db/update/document_diff_serialization.h" +#include "mongo/db/update/update_executor.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/update/delta_executor_test.cpp b/src/mongo/db/update/delta_executor_test.cpp index 1c49f4ec3cec8..f7d6b02baf452 100644 --- a/src/mongo/db/update/delta_executor_test.cpp +++ b/src/mongo/db/update/delta_executor_test.cpp @@ -28,189 +28,203 @@ */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/json.h" #include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/field_ref_set.h" #include "mongo/db/update/delta_executor.h" -#include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/update/document_diff_calculator.h" +#include "mongo/db/update_index_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - namespace mongo { namespace { +/** + * Return a bitmask where each bit indicates whether the matching position in the indexData + * argument is for an index that has been affected by the modification described by the logEntry + * argument. E.g., a return value of 0 means "no indexes are affected", a return value of 1 (2^0) + * means that the index whose UpdateIndexData descriptor is placed in the first position of the + * indexData argument is affected by the changes, a return value of 2 (2^1) points to the second + * entry in the indexData vector, a return value of 3 (1+2) indicates that both first and second + * entry in the idnexDaa vector are affected by the modification. + */ +unsigned long getIndexAffectedFromLogEntry(std::vector indexData, + BSONObj logEntry) { + auto diff = update_oplog_entry::extractDiffFromOplogEntry(logEntry); + if (!diff) { + return (unsigned long)-1; + } + return mongo::doc_diff::anyIndexesMightBeAffected(*diff, indexData).to_ulong(); +} + TEST(DeltaExecutorTest, Delete) { BSONObj preImage(fromjson("{f1: {a: {b: {c: 1}, c: 1}}}")); - UpdateIndexData indexData; + UpdateIndexData indexData1, indexData2; constexpr bool mustCheckExistenceForInsertOperations = true; - indexData.addPath(FieldRef("p.a.b")); - indexData.addPath(FieldRef("f1.a.b")); + indexData1.addPath(FieldRef("p.a.b")); + indexData2.addPath(FieldRef("f1.a.b")); FieldRefSet fieldRefSet; { // When a path in the diff is a prefix of index path. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; DeltaExecutor test(fromjson("{d: {f1: false, f2: false, f3: false}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), BSONObj()); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // When a path in the diff is same as index path. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; DeltaExecutor test(fromjson("{sf1: {sa: {d: {p: false, c: false, b: false}}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: {a: {}}}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // When the index path is a prefix of a path in the diff. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; auto test = DeltaExecutor(fromjson("{sf1: {sa: {sb: {d: {c: false}}}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: {a: {b: {}, c: 1}}}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // With common parent, but path diverges. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; auto test = DeltaExecutor(fromjson("{sf1: {sa: {d: {c: false}}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: {a: {b: {c: 1}}}}")); - ASSERT(!result.indexesAffected); + ASSERT_EQ(0, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } } TEST(DeltaExecutorTest, Update) { BSONObj preImage(fromjson("{f1: {a: {b: {c: 1}, c: 1}}}")); - UpdateIndexData indexData; + UpdateIndexData indexData1, indexData2; constexpr bool mustCheckExistenceForInsertOperations = true; - indexData.addPath(FieldRef("p.a.b")); - indexData.addPath(FieldRef("f1.a.b")); + indexData1.addPath(FieldRef("p.a.b")); + indexData2.addPath(FieldRef("f1.a.b")); FieldRefSet fieldRefSet; { // When a path in the diff is a prefix of index path. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; DeltaExecutor test(fromjson("{u: {f1: false, f2: false, f3: false}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: false, f2: false, f3: false}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // When a path in the diff is same as index path. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; auto test = DeltaExecutor(fromjson("{sf1: {sa: {u: {p: false, c: false, b: false}}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: {a: {b: false, c: false, p: false}}}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // When the index path is a prefix of a path in the diff. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; auto test = DeltaExecutor(fromjson("{sf1: {sa: {sb: {u: {c: false}}}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: {a: {b: {c: false}, c: 1}}}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // With common parent, but path diverges. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; auto test = DeltaExecutor(fromjson("{sf1: {sa: {u: {c: false}}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: {a: {b: {c: 1}, c: false}}}")); - ASSERT(!result.indexesAffected); + ASSERT_EQ(0, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } } TEST(DeltaExecutorTest, Insert) { - UpdateIndexData indexData; + UpdateIndexData indexData1, indexData2; constexpr bool mustCheckExistenceForInsertOperations = true; - indexData.addPath(FieldRef("p.a.b")); + indexData1.addPath(FieldRef("p.a.b")); // 'UpdateIndexData' will canonicalize the path and remove all numeric components. So the '2' // and '33' components should not matter. - indexData.addPath(FieldRef("f1.2.a.33.b")); + indexData2.addPath(FieldRef("f1.2.a.33.b")); FieldRefSet fieldRefSet; { // When a path in the diff is a prefix of index path. auto doc = mutablebson::Document(BSONObj()); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; DeltaExecutor test(fromjson("{i: {f1: false, f2: false, f3: false}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: false, f2: false, f3: false}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // When a path in the diff is same as index path. auto doc = mutablebson::Document(fromjson("{f1: {a: {c: true}}}}")); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; auto test = DeltaExecutor(fromjson("{sf1: {sa: {i: {p: false, c: false, b: false}}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: {a: {p: false, c: false, b: false}}}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // When the index path is a prefix of a path in the diff. auto doc = mutablebson::Document(fromjson("{f1: {a: {b: {c: {e: 1}}}}}")); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; auto test = DeltaExecutor(fromjson("{sf1: {sa: {sb: {sc: {i : {d: 2} }}}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: {a: {b: {c: {e: 1, d: 2}}}}}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // With common parent, but path diverges. auto doc = mutablebson::Document(fromjson("{f1: {a: {b: {c: 1}}}}")); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; auto test = DeltaExecutor(fromjson("{sf1: {sa: {i: {c: 2}}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: {a: {b: {c: 1}, c: 2}}}")); - ASSERT(!result.indexesAffected); + ASSERT_EQ(0, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } } @@ -226,24 +240,22 @@ TEST(DeltaExecutorTest, InsertNumericFieldNamesTopLevel) { { auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; DeltaExecutor test(fromjson("{i: {'0': false, '1': false, '2': false}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{'0': false, '1': false, '2': false}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(1, getIndexAffectedFromLogEntry({&indexData}, result.oplogEntry)); } { auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; DeltaExecutor test(fromjson("{i: {'0': false, '2': false}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{'0': false, '2': false}")); - ASSERT(!result.indexesAffected); + ASSERT_EQ(0, getIndexAffectedFromLogEntry({&indexData}, result.oplogEntry)); } } @@ -259,130 +271,121 @@ TEST(DeltaExecutorTest, InsertNumericFieldNamesNested) { { auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; DeltaExecutor test(fromjson("{sa: {i: {'0': false, '1': false, '2': false}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{a: {'0': false, '1': false, '2': false}}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(1, getIndexAffectedFromLogEntry({&indexData}, result.oplogEntry)); } { auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; DeltaExecutor test(fromjson("{sa: {i: {'0': false, '2': false}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{a: {'0': false, '2': false}}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(1, getIndexAffectedFromLogEntry({&indexData}, result.oplogEntry)); } } TEST(DeltaExecutorTest, ArraysInIndexPath) { BSONObj preImage(fromjson("{f1: [{a: {b: {c: 1}, c: 1}}, 1]}")); - UpdateIndexData indexData; + UpdateIndexData indexData1, indexData2; constexpr bool mustCheckExistenceForInsertOperations = true; - indexData.addPath(FieldRef("p.a.b")); + indexData1.addPath(FieldRef("p.a.b")); // Numeric components will be ignored, so they should not matter. - indexData.addPath(FieldRef("f1.9.a.10.b")); + indexData2.addPath(FieldRef("f1.9.a.10.b")); FieldRefSet fieldRefSet; { // Test resize. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; DeltaExecutor test(fromjson("{sf1: {a: true, l: 1}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: [{a: {b: {c: 1}, c: 1}}]}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // When the index path is a prefix of a path in the diff and also involves numeric // components along the way. The numeric components should always be ignored. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; auto test = DeltaExecutor(fromjson("{sf1: {a: true, s0: {sa: {sb: {i: {d: 1} }}}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: [{a: {b: {c: 1, d: 1}, c: 1}}, 1]}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // When inserting a sub-object into array, and the sub-object diverges from the index path. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; auto test = DeltaExecutor(fromjson("{sf1: {a: true, u2: {b: 1}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: [{a: {b: {c: 1}, c: 1}}, 1, {b:1}]}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // When a common array path element is updated, but the paths diverge at the last element. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; auto test = DeltaExecutor(fromjson("{sf1: {a: true, s0: {sa: {d: {c: false} }}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: [{a: {b: {c: 1}}}, 1]}")); - ASSERT(!result.indexesAffected); + ASSERT_EQ(0, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } } TEST(DeltaExecutorTest, ArraysAfterIndexPath) { BSONObj preImage(fromjson("{f1: {a: {b: [{c: 1}, 2]}}}")); - UpdateIndexData indexData; + UpdateIndexData indexData1, indexData2; constexpr bool mustCheckExistenceForInsertOperations = true; - indexData.addPath(FieldRef("p.a.b")); + indexData1.addPath(FieldRef("p.a.b")); // 'UpdateIndexData' will canonicalize the path and remove all numeric components. So the '9' // and '10' components should not matter. - indexData.addPath(FieldRef("f1.9.a.10")); + indexData2.addPath(FieldRef("f1.9.a.10")); FieldRefSet fieldRefSet; { // Test resize. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; DeltaExecutor test(fromjson("{sf1: {sa: {sb: {a: true, l: 1}}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: {a: {b: [{c: 1}]}}}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // Updating a sub-array element. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; auto test = DeltaExecutor(fromjson("{sf1: {sa: {sb: {a: true, s0: {u: {c: 2}} }}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: {a: {b: [{c: 2}, 2]}}}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } { // Updating an array element. auto doc = mutablebson::Document(preImage); UpdateExecutor::ApplyParams params(doc.root(), fieldRefSet); - params.indexData = &indexData; auto test = DeltaExecutor(fromjson("{sf1: {sa: {sb: {a: true, u0: 1 }}}}"), mustCheckExistenceForInsertOperations); auto result = test.applyUpdate(params); ASSERT_BSONOBJ_BINARY_EQ(params.element.getDocument().getObject(), fromjson("{f1: {a: {b: [1, 2]}}}")); - ASSERT(result.indexesAffected); + ASSERT_EQ(2, getIndexAffectedFromLogEntry({&indexData1, &indexData2}, result.oplogEntry)); } } diff --git a/src/mongo/db/update/document_diff_applier.cpp b/src/mongo/db/update/document_diff_applier.cpp index a9349772bcb8d..246233fdbed10 100644 --- a/src/mongo/db/update/document_diff_applier.cpp +++ b/src/mongo/db/update/document_diff_applier.cpp @@ -27,14 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/field_ref.h" #include "mongo/db/update/document_diff_applier.h" #include "mongo/db/update_index_data.h" - #include "mongo/stdx/variant.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/str.h" #include "mongo/util/string_map.h" namespace mongo::doc_diff { @@ -474,9 +493,8 @@ int32_t computeDamageOnArray(const BSONObj& preImageRoot, class DiffApplier { public: - DiffApplier(const UpdateIndexData* indexData, bool mustCheckExistenceForInsertOperations) - : _indexData(indexData), - _mustCheckExistenceForInsertOperations{mustCheckExistenceForInsertOperations} {} + DiffApplier(bool mustCheckExistenceForInsertOperations) + : _mustCheckExistenceForInsertOperations{mustCheckExistenceForInsertOperations} {} void applyDiffToObject(const BSONObj& preImage, FieldRef* path, @@ -492,7 +510,6 @@ class DiffApplier { for (auto&& elt : tables.fieldsToInsert) { builder->append(elt); FieldRef::FieldRefTempAppend tempAppend(*path, elt.fieldNameStringData()); - updateIndexesAffected(path); } return; } @@ -514,12 +531,10 @@ class DiffApplier { OverloadedVisitor{ [this, &path](Delete) { // Do not append anything. - updateIndexesAffected(path); }, [this, &path, &builder, &elt, &fieldsToSkipInserting](const Update& update) { builder->append(update.newElt); - updateIndexesAffected(path); fieldsToSkipInserting.insert(elt.fieldNameStringData()); }, @@ -548,7 +563,6 @@ class DiffApplier { // null and expect the future value to overwrite the value here. builder->appendNull(elt.fieldNameStringData()); - updateIndexesAffected(path); } // Note: There's no need to update 'fieldsToSkipInserting' here, because a @@ -577,7 +591,6 @@ class DiffApplier { if (isComponentPartOfCanonicalizedIndexPath || !alreadyDidUpdateIndexAffectedForBasePath || path->empty()) { FieldRef::FieldRefTempAppend tempAppend(*path, elt.fieldNameStringData()); - updateIndexesAffected(path); // If we checked whether the update affects indexes for a path where the tail // element is not considered part of the 'canonicalized' path (as defined by @@ -592,10 +605,6 @@ class DiffApplier { } } - bool indexesAffected() const { - return _indexesAffected; - } - private: /** * Given an (optional) member of the pre image array and a modification, apply the modification @@ -610,7 +619,6 @@ class DiffApplier { [this, &path, builder](const BSONElement& update) { invariant(!update.eoo()); builder->append(update); - updateIndexesAffected(path); }, [this, builder, &preImageValue, &path](auto reader) { if (!preImageValue) { @@ -618,7 +626,6 @@ class DiffApplier { // future oplog entry will either re-write the value of this array index // (or some parent) so we append a null and move on. builder->appendNull(); - updateIndexesAffected(path); return; } if constexpr (std::is_same_v) { @@ -639,7 +646,6 @@ class DiffApplier { // entry will either re-write the value of this array index (or some // parent) so we append a null and move on. builder->appendNull(); - updateIndexesAffected(path); }, }, modification); @@ -656,11 +662,6 @@ class DiffApplier { auto nextMod = reader->next(); BSONObjIterator preImageIt(arrayPreImage); - // If there is a resize of array, check if indexes are affected by the array modification. - if (resizeVal) { - updateIndexesAffected(path); - } - size_t idx = 0; for (; preImageIt.more() && (!resizeVal || idx < *resizeVal); ++idx, ++preImageIt) { auto idxAsStr = std::to_string(idx); @@ -684,7 +685,6 @@ class DiffApplier { nextMod = reader->next(); } else { // This field is not mentioned in the diff so we pad the post image with null. - updateIndexesAffected(path); builder->appendNull(); } } @@ -692,25 +692,16 @@ class DiffApplier { invariant(!resizeVal || *resizeVal == idx); } - void updateIndexesAffected(FieldRef* path) { - if (_indexData) { - _indexesAffected = _indexesAffected || _indexData->mightBeIndexed(*path); - } - } - - const UpdateIndexData* _indexData; bool _mustCheckExistenceForInsertOperations = true; - bool _indexesAffected = false; }; } // namespace -ApplyDiffOutput applyDiff(const BSONObj& pre, - const Diff& diff, - const UpdateIndexData* indexData, - bool mustCheckExistenceForInsertOperations) { +BSONObj applyDiff(const BSONObj& pre, + const Diff& diff, + bool mustCheckExistenceForInsertOperations) { DocumentDiffReader reader(diff); BSONObjBuilder out; - DiffApplier applier(indexData, mustCheckExistenceForInsertOperations); + DiffApplier applier(mustCheckExistenceForInsertOperations); FieldRef path; // Use size of pre + diff as an approximation for size needed for post object when the diff is @@ -722,7 +713,7 @@ ApplyDiffOutput applyDiff(const BSONObj& pre, out.bb().reserveBytes(estimatedSize); out.bb().claimReservedBytes(estimatedSize); applier.applyDiffToObject(pre, &path, &reader, &out); - return {out.obj(), applier.indexesAffected()}; + return out.obj(); } DamagesOutput computeDamages(const BSONObj& pre, diff --git a/src/mongo/db/update/document_diff_applier.h b/src/mongo/db/update/document_diff_applier.h index f9b760dc90847..d6b02e8827066 100644 --- a/src/mongo/db/update/document_diff_applier.h +++ b/src/mongo/db/update/document_diff_applier.h @@ -29,18 +29,16 @@ #pragma once +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/damage_vector.h" #include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update_index_data.h" +#include "mongo/util/shared_buffer.h" namespace mongo { namespace doc_diff { -struct ApplyDiffOutput { - BSONObj postImage; - bool indexesAffected; -}; - struct DamagesOutput { const BSONObj preImage; SharedBuffer damageSource; @@ -48,17 +46,15 @@ struct DamagesOutput { }; /** - * Applies the diff to 'pre' and returns the post image. Throws if the diff is invalid. The - * 'indexData' parameter is optional, if provided computes whether the indexes are affected. - * The final, 'mustCheckExistenceForInsertOperations' parameter signals whether we must check if an - * inserted field already exists within a (sub)document. This should generally be set to true, - * unless the caller has knowledge of the pre-image and the diff, and can guarantee that we will not - * re-insert anything. + * Applies the diff to 'pre' and returns the post image. + * Throws if the diff is invalid. + * + * 'mustCheckExistenceForInsertOperations' parameter signals whether we must check if an inserted + * field already exists within a (sub)document. This should generally be set to true, unless the + * caller has knowledge of the pre-image and the diff, and can guarantee that we will not re-insert + * anything. */ -ApplyDiffOutput applyDiff(const BSONObj& pre, - const Diff& diff, - const UpdateIndexData* indexData, - bool mustCheckExistenceForInsertOperations); +BSONObj applyDiff(const BSONObj& pre, const Diff& diff, bool mustCheckExistenceForInsertOperations); /** * Computes the damage events from the diff for 'pre' and return the pre-image, damage source, and diff --git a/src/mongo/db/update/document_diff_applier_test.cpp b/src/mongo/db/update/document_diff_applier_test.cpp index f12dce43eb05d..251835ad70283 100644 --- a/src/mongo/db/update/document_diff_applier_test.cpp +++ b/src/mongo/db/update/document_diff_applier_test.cpp @@ -28,14 +28,20 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/update/document_diff_applier.h" #include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/document_diff_test_helpers.h" -#include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/update/document_diff_calculator.cpp b/src/mongo/db/update/document_diff_calculator.cpp index 626bcf955f56f..526b486bd4c69 100644 --- a/src/mongo/db/update/document_diff_calculator.cpp +++ b/src/mongo/db/update/document_diff_calculator.cpp @@ -27,10 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include #include "mongo/base/checked_cast.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/field_ref.h" #include "mongo/db/update/document_diff_calculator.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" namespace mongo::doc_diff { namespace { @@ -211,48 +232,6 @@ class StringWrapper { StringData str; }; -template -bool anyIndexesMightBeAffected(const DiffNode* node, - const UpdateIndexData* indexData, - FieldRef* path) { - for (auto&& [field, child] : node->getChildren()) { - // The 'field' here can either be an integer or a string. - StringWrapper wrapper(field); - FieldRef::FieldRefTempAppend tempAppend(*path, wrapper.getStr()); - switch (child->type()) { - case diff_tree::NodeType::kDelete: - case diff_tree::NodeType::kUpdate: - case diff_tree::NodeType::kInsert: { - if (indexData && indexData->mightBeIndexed(*path)) { - return true; - } - break; - } - case diff_tree::NodeType::kDocumentSubDiff: { - if (anyIndexesMightBeAffected( - checked_cast(child.get()), - indexData, - path)) { - return true; - } - break; - } - case diff_tree::NodeType::kArray: { - auto* arrayNode = checked_cast(child.get()); - if ((arrayNode->getResize() && indexData && indexData->mightBeIndexed(*path)) || - anyIndexesMightBeAffected(arrayNode, indexData, path)) { - return true; - } - break; - } - case diff_tree::NodeType::kDocumentInsert: { - MONGO_UNREACHABLE; - } - } - } - return false; -} - /** * Appends the given element to the given BSONObjBuilder. If the element is an object, sets the * value of the most inner field(s) to 'innerValue'. Otherwise, sets the value of the field to @@ -443,17 +422,11 @@ void anyIndexesMightBeAffected(ArrayDiffReader* reader, } } // namespace -boost::optional computeOplogDiff(const BSONObj& pre, - const BSONObj& post, - size_t padding, - const UpdateIndexData* indexData) { +boost::optional computeOplogDiff(const BSONObj& pre, const BSONObj& post, size_t padding) { if (auto diffNode = computeDocDiff(pre, post, false /* ignoreSizeLimit */, padding)) { auto diff = diffNode->serialize(); if (diff.objsize() < post.objsize()) { - FieldRef path; - return DiffResult{diff, - anyIndexesMightBeAffected( - diffNode.get(), indexData, &path)}; + return diff; } } return {}; diff --git a/src/mongo/db/update/document_diff_calculator.h b/src/mongo/db/update/document_diff_calculator.h index 6eb5b603c490b..9a3f4d9bcfab5 100644 --- a/src/mongo/db/update/document_diff_calculator.h +++ b/src/mongo/db/update/document_diff_calculator.h @@ -30,6 +30,10 @@ #pragma once #include +#include +#include +#include +#include #include "mongo/bson/bsonobj.h" #include "mongo/db/update/document_diff_serialization.h" @@ -37,11 +41,6 @@ namespace mongo::doc_diff { -struct DiffResult { - Diff diff; - bool indexesAffected; // Whether the index data need to be modified if the diff is applied. -}; - /** * Returns the oplog v2 diff between the given 'pre' and 'post' images. The diff has the following * format: @@ -55,14 +54,11 @@ struct DiffResult { * } * If the size of the computed diff is larger than the 'post' image then the function returns * 'boost::none'. The 'paddingForDiff' represents the additional size that needs be added to the - * size of the diff, while comparing whether the diff is viable. If any paths in 'indexData' are - * affected by the generated diff, then the 'indexesAffected' field in the output will be set to - * true, false otherwise. + * size of the diff, while comparing whether the diff is viable. */ -boost::optional computeOplogDiff(const BSONObj& pre, - const BSONObj& post, - size_t paddingForDiff, - const UpdateIndexData* indexData); +boost::optional computeOplogDiff(const BSONObj& pre, + const BSONObj& post, + size_t paddingForDiff); /** * Returns the inline diff between the given 'pre' and 'post' images. The diff has the same schema diff --git a/src/mongo/db/update/document_diff_calculator_test.cpp b/src/mongo/db/update/document_diff_calculator_test.cpp index 3cf9e680ea169..8953f1a353d4a 100644 --- a/src/mongo/db/update/document_diff_calculator_test.cpp +++ b/src/mongo/db/update/document_diff_calculator_test.cpp @@ -27,18 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include + +#include +#include "mongo/base/string_data.h" #include "mongo/bson/bson_depth.h" #include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/update/document_diff_calculator.h" -#include "mongo/logv2/log.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -53,9 +58,9 @@ void assertBsonObjEqualUnordered(const BSONObj& lhs, const BSONObj& rhs) { TEST(DocumentDiffCalculatorTest, SameObjectsNoDiff) { auto assertDiffEmpty = [](const BSONObj& doc) { - auto oplogDiff = doc_diff::computeOplogDiff(doc, doc, 5, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(doc, doc, 5); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, BSONObj()); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, BSONObj()); auto inlineDiff = doc_diff::computeInlineDiff(doc, doc); ASSERT(inlineDiff); ASSERT_BSONOBJ_BINARY_EQ(*inlineDiff, BSONObj()); @@ -72,7 +77,7 @@ TEST(DocumentDiffCalculatorTest, SameObjectsNoDiff) { TEST(DocumentDiffCalculatorTest, EmptyObjsNoDiff) { auto preObj = BSONObj(); auto postObj = BSONObj(); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT_FALSE(oplogDiff); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -82,9 +87,9 @@ TEST(DocumentDiffCalculatorTest, EmptyObjsNoDiff) { TEST(DocumentDiffCalculatorTest, SimpleInsert) { auto preObj = fromjson("{a: {b: 1}}"); auto postObj = fromjson("{a: {b: 1}, c: 1}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{i: {c: 1}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{i: {c: 1}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{c: 'i'}")); @@ -93,9 +98,9 @@ TEST(DocumentDiffCalculatorTest, SimpleInsert) { TEST(DocumentDiffCalculatorTest, SimpleUpdate) { auto preObj = fromjson("{a: {b: 1}, c: 1}}"); auto postObj = fromjson("{a: {b: 1}, c: 2}}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{u: {c: 2}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{u: {c: 2}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{c: 'u'}")); @@ -104,9 +109,9 @@ TEST(DocumentDiffCalculatorTest, SimpleUpdate) { TEST(DocumentDiffCalculatorTest, SimpleDelete) { auto preObj = fromjson("{a: {b: 1}, c: 1}}"); auto postObj = fromjson("{a: {b: 1}}}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{d: {c: false}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{d: {c: false}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{c: 'd'}")); @@ -115,9 +120,9 @@ TEST(DocumentDiffCalculatorTest, SimpleDelete) { TEST(DocumentDiffCalculatorTest, SimpleInsertNestedSingle) { auto preObj = fromjson("{a: {}, e: 1, f: 1}"); auto postObj = fromjson("{a: {b: {c: {d: 1}}}, e: 1, f: 1}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{u: {a: {b: {c: {d: 1}}}}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{u: {a: {b: {c: {d: 1}}}}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{a: {b: {c: {d: 'i'}}}}")); @@ -126,10 +131,9 @@ TEST(DocumentDiffCalculatorTest, SimpleInsertNestedSingle) { TEST(DocumentDiffCalculatorTest, SimpleInsertNestedMultiple) { auto preObj = fromjson("{a: 1, g: 1, h: 1}"); auto postObj = fromjson("{a: {b: 2, c: [2], d: {e: 2}, f: 1}, g: 1, h: 1}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, - fromjson("{u: {a: {b: 2, c: [2], d: {e: 2}, f: 1}}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{u: {a: {b: 2, c: [2], d: {e: 2}, f: 1}}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, @@ -139,9 +143,9 @@ TEST(DocumentDiffCalculatorTest, SimpleInsertNestedMultiple) { TEST(DocumentDiffCalculatorTest, SimpleUpdateNestedObj) { auto preObj = fromjson("{a: {b: {c: {d: 1, e: 1, f: 1}}}}"); auto postObj = fromjson("{a: {b: {c: {d: 2, e: 1, f: 1}}}}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{sa: {sb: {sc: {u: {d: 2}}}}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{sa: {sb: {sc: {u: {d: 2}}}}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{a: {b: {c: {d: 'u'}}}}")); @@ -150,9 +154,9 @@ TEST(DocumentDiffCalculatorTest, SimpleUpdateNestedObj) { TEST(DocumentDiffCalculatorTest, SimpleUpdateNestedArray) { auto preObj = fromjson("{a: {b: {c: {d: [1], e: 1, f: 1}}}}"); auto postObj = fromjson("{a: {b: {c: {d: [2], e: 1, f: 1}}}}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{sa: {sb: {sc: {u: {d: [2]}}}}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{sa: {sb: {sc: {u: {d: [2]}}}}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{a: {b: {c: {d: 'u'}}}}")); @@ -161,9 +165,9 @@ TEST(DocumentDiffCalculatorTest, SimpleUpdateNestedArray) { TEST(DocumentDiffCalculatorTest, SimpleDeleteNestedObj) { auto preObj = fromjson("{a: {b: {c: {d: 1}, e: 1, f: 1}}}"); auto postObj = fromjson("{a: {b: {c: {}, e: 1, f: 1}}}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{sa: {sb: {u: {c: {}}}}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{sa: {sb: {u: {c: {}}}}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{a: {b: {c: {d: 'd'}}}}")); @@ -172,7 +176,7 @@ TEST(DocumentDiffCalculatorTest, SimpleDeleteNestedObj) { TEST(DocumentDiffCalculatorTest, ReplaceAllFieldsLargeDelta) { auto preObj = fromjson("{a: 1, b: 2, c: 3}"); auto postObj = fromjson("{A: 1, B: 2, C: 3}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT_FALSE(oplogDiff); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -183,7 +187,7 @@ TEST(DocumentDiffCalculatorTest, ReplaceAllFieldsLargeDelta) { TEST(DocumentDiffCalculatorTest, InsertFrontFieldLargeDelta) { auto preObj = fromjson("{b: 1, c: 1, d: 1}"); auto postObj = fromjson("{a: 1, b: 1, c: 1, d: 1}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT_FALSE(oplogDiff); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -194,7 +198,7 @@ TEST(DocumentDiffCalculatorTest, EmptyPostObjLargeDelta) { { auto preObj = fromjson("{b: 1}"); auto postObj = BSONObj(); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT_FALSE(oplogDiff); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -203,7 +207,7 @@ TEST(DocumentDiffCalculatorTest, EmptyPostObjLargeDelta) { { auto preObj = fromjson("{a: {b: 1}}"); auto postObj = fromjson("{}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT_FALSE(oplogDiff); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -212,7 +216,7 @@ TEST(DocumentDiffCalculatorTest, EmptyPostObjLargeDelta) { { auto preObj = fromjson("{b: [1, 2, 3]}"); auto postObj = BSONObj(); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT_FALSE(oplogDiff); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -221,7 +225,7 @@ TEST(DocumentDiffCalculatorTest, EmptyPostObjLargeDelta) { { auto preObj = fromjson("{b: {}}"); auto postObj = BSONObj(); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT_FALSE(oplogDiff); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -233,7 +237,7 @@ TEST(DocumentDiffCalculatorTest, EmptyPreObjLargeDelta) { { auto preObj = BSONObj(); auto postObj = fromjson("{b: 1}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT_FALSE(oplogDiff); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -242,7 +246,7 @@ TEST(DocumentDiffCalculatorTest, EmptyPreObjLargeDelta) { { auto preObj = fromjson("{}"); auto postObj = fromjson("{a: {b: 1}}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT_FALSE(oplogDiff); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -251,7 +255,7 @@ TEST(DocumentDiffCalculatorTest, EmptyPreObjLargeDelta) { { auto preObj = BSONObj(); auto postObj = fromjson("{b: [1, 2, 3]}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT_FALSE(oplogDiff); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -260,7 +264,7 @@ TEST(DocumentDiffCalculatorTest, EmptyPreObjLargeDelta) { { auto preObj = BSONObj(); auto postObj = fromjson("{b: {}}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT_FALSE(oplogDiff); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -271,16 +275,16 @@ TEST(DocumentDiffCalculatorTest, EmptyPreObjLargeDelta) { TEST(DocumentDiffCalculatorTest, BSONSizeLimitLargeDelta) { auto preObj = BSON(std::string(BSONObjMaxUserSize, 'a') << 1); auto postObj = BSONObj(); - ASSERT_FALSE(doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr)); + ASSERT_FALSE(doc_diff::computeOplogDiff(preObj, postObj, 0)); ASSERT_FALSE(doc_diff::computeInlineDiff(preObj, postObj)); } TEST(DocumentDiffCalculatorTest, DeleteAndInsertFieldAtTheEnd) { auto preObj = fromjson("{a: 1, b: 'valueString', c: 3, d: 4}"); auto postObj = fromjson("{b: 'valueString', c: 3, d: 4, a: 1}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{i: {a: 1}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{i: {a: 1}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{a: 'i'}")); @@ -290,9 +294,9 @@ TEST(DocumentDiffCalculatorTest, DeletesRecordedInAscendingOrderOfFieldNames) { { auto preObj = fromjson("{b: 1, a: 2, c: 3, d: 4, e: 'valueString'}"); auto postObj = fromjson("{c: 3, d: 4, e: 'valueString'}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{d: {a: false, b: false}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{d: {a: false, b: false}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{a: 'd', b: 'd'}")); @@ -303,9 +307,9 @@ TEST(DocumentDiffCalculatorTest, DeletesRecordedInAscendingOrderOfFieldNames) { auto preObj = fromjson("{b: 1, a: 2, c: 'value', d: 'valueString', e: 'valueString', g: 1, f: 1}"); auto postObj = fromjson("{c: 'value', d: 'valueString', e: 'valueString'}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{d: {g: false, f: false, a: false, b: false}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -320,23 +324,23 @@ TEST(DocumentDiffCalculatorTest, DataTypeChangeRecorded) { fromjson("{a: 'valueString', b: 2, c: {subField1: 1, subField2: 3}, d: 4}"); const auto objWithLongValue = fromjson("{a: 'valueString', b: 2, c: {subField1: 1, subField2: NumberLong(3)}, d: 4}"); - auto oplogDiff = doc_diff::computeOplogDiff(objWithDoubleValue, objWithIntValue, 15, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(objWithDoubleValue, objWithIntValue, 15); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{sc: {u: {subField2: 3}} }")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{sc: {u: {subField2: 3}} }")); auto inlineDiff = doc_diff::computeInlineDiff(objWithDoubleValue, objWithIntValue); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{c: {subField2: 'u'}}")); - oplogDiff = doc_diff::computeOplogDiff(objWithIntValue, objWithLongValue, 15, nullptr); + oplogDiff = doc_diff::computeOplogDiff(objWithIntValue, objWithLongValue, 15); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{sc: {u: {subField2: NumberLong(3)}} }")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{sc: {u: {subField2: NumberLong(3)}} }")); inlineDiff = doc_diff::computeInlineDiff(objWithIntValue, objWithLongValue); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{c: {subField2: 'u'}}")); - oplogDiff = doc_diff::computeOplogDiff(objWithLongValue, objWithDoubleValue, 15, nullptr); + oplogDiff = doc_diff::computeOplogDiff(objWithLongValue, objWithDoubleValue, 15); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{sc: {u: {subField2: 3.0}} }")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{sc: {u: {subField2: 3.0}} }")); inlineDiff = doc_diff::computeInlineDiff(objWithLongValue, objWithDoubleValue); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{c: {subField2: 'u'}}")); @@ -346,7 +350,7 @@ TEST(DocumentDiffCalculatorTest, NullAndMissing) { { auto preObj = fromjson("{a: null}"); auto postObj = fromjson("{}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15); ASSERT_FALSE(oplogDiff); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -356,9 +360,9 @@ TEST(DocumentDiffCalculatorTest, NullAndMissing) { { auto preObj = fromjson("{a: null, b: undefined, c: null, d: 'someValueStr'}"); auto postObj = fromjson("{a: null, b: undefined, c: undefined, d: 'someValueStr'}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{u: {c: undefined}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{u: {c: undefined}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{c: 'u'}")); @@ -368,9 +372,9 @@ TEST(DocumentDiffCalculatorTest, NullAndMissing) { TEST(DocumentDiffCalculatorTest, FieldOrder) { auto preObj = fromjson("{a: 1, b: 2, c: {p: 1, q: 1, s: 1, r: 2}, d: 4}"); auto postObj = fromjson("{a: 1, b: 2, c: {p: 1, q: 1, r: 2, s: 1}, d: 4}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 10, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 10); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{sc: {i: {s: 1}} }")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{sc: {i: {s: 1}} }")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{c: {s: 'i'}}")); @@ -379,9 +383,9 @@ TEST(DocumentDiffCalculatorTest, FieldOrder) { TEST(DocumentDiffCalculatorTest, SimpleArrayPush) { auto preObj = fromjson("{field1: 'abcd', field2: [1, 2, 3]}"); auto postObj = fromjson("{field1: 'abcd', field2: [1, 2, 3, 4]}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 5, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 5); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{sfield2: {a: true, 'u3': 4}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{sfield2: {a: true, 'u3': 4}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{field2: 'u'}")); @@ -391,11 +395,11 @@ TEST(DocumentDiffCalculatorTest, NestedArray) { { auto preObj = fromjson("{field1: 'abcd', field2: [1, 2, 3, [[2]]]}"); auto postObj = fromjson("{field1: 'abcd', field2: [1, 2, 3, [[4]]]}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); // When the sub-array delta is larger than the size of the sub-array, we record it as an // update operation. - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{sfield2: {a: true, 'u3': [[4]]}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{sfield2: {a: true, 'u3': [[4]]}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{field2: 'u'}")); @@ -406,10 +410,10 @@ TEST(DocumentDiffCalculatorTest, NestedArray) { "{field1: 'abcd', field2: [1, 2, 3, [1, 'longString', [2], 4, 5, 6], 5, 5, 5]}"); auto postObj = fromjson("{field1: 'abcd', field2: [1, 2, 3, [1, 'longString', [4], 4], 5, 6]}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); ASSERT_BSONOBJ_BINARY_EQ( - oplogDiff->diff, + *oplogDiff, fromjson("{sfield2: {a: true, l: 6, 's3': {a: true, l: 4, 'u2': [4]}, 'u5': 6}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -422,10 +426,10 @@ TEST(DocumentDiffCalculatorTest, SubObjHasEmptyFieldName) { fromjson("{'': '1', field2: [1, 2, 3, {'': {'': 1, padding: 'largeFieldValue'}}]}"); auto postObj = fromjson("{'': '2', field2: [1, 2, 3, {'': {'': 2, padding: 'largeFieldValue'}}]}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15); ASSERT(oplogDiff); ASSERT_BSONOBJ_BINARY_EQ( - oplogDiff->diff, fromjson("{u: {'': '2'}, sfield2: {a: true, s3: {s: {u: {'': 2}}} }}")); + *oplogDiff, fromjson("{u: {'': '2'}, sfield2: {a: true, s3: {s: {u: {'': 2}}} }}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{'': 'u', field2: 'u'}")); @@ -438,10 +442,10 @@ TEST(DocumentDiffCalculatorTest, SubObjInSubArrayUpdateElements) { auto postObj = fromjson( "{field1: 'abcd', field2: [1, 2, 3, {'field3': " "['veryLargeStringValueveryLargeStringValue', 2, 4, 3, 5]}]}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); ASSERT_BSONOBJ_BINARY_EQ( - oplogDiff->diff, + *oplogDiff, fromjson("{sfield2: {a: true, s3: {sfield3: {a: true, u2: 4, u3: 3, u4: 5}} }}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -453,11 +457,10 @@ TEST(DocumentDiffCalculatorTest, SubObjInSubArrayDeleteElements) { fromjson("{field1: 'abcd', field2: [1, 2, 3, {'field3': ['largeString', 2, 3, 4, 5]}]}"); auto postObj = fromjson("{field1: 'abcd', field2: [1, 2, 3, {'field3': ['largeString', 2, 3, 5]}]}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15); ASSERT(oplogDiff); ASSERT_BSONOBJ_BINARY_EQ( - oplogDiff->diff, - fromjson("{sfield2: {a: true, 's3': {sfield3: {a: true, l: 4, 'u3': 5}} }}")); + *oplogDiff, fromjson("{sfield2: {a: true, 's3': {sfield3: {a: true, l: 4, 'u3': 5}} }}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{field2: 'u'}")); @@ -472,9 +475,9 @@ TEST(DocumentDiffCalculatorTest, NestedSubObjs) { "{level1Field1: 'abcd', level1Field2: {level2Field1: {level3Field1: {q: 1}, " "level3Field2: {q: 1}}, level2Field2: 2}, level1Field3: '3', level1Field4: " "{level2Field1: {level3Field1: {q: 1}, level3Field2: {q: 1}}} }"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{u: {level1Field3: '3'}, slevel1Field2: {slevel2Field1: {u: " "{level3Field1: {q: 1}}}}, slevel1Field4: {slevel2Field1: " "{u: {level3Field1: {q: 1}}}} }")); @@ -490,9 +493,9 @@ TEST(DocumentDiffCalculatorTest, NestedSubObjs) { TEST(DocumentDiffCalculatorTest, SubArrayInSubArrayLargeDelta) { auto preObj = fromjson("{field1: 'abcd', field2: [1, 2, 3, {field3: [2, 3, 4, 5]}]}"); auto postObj = fromjson("{field1: 'abcd', field2: [1, 2, 3, {field3: [1, 2, 3, 4, 5]}]}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 15); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{sfield2: {a: true, 'u3': {field3: [1, 2, 3, 4, 5]}} }")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); @@ -502,10 +505,9 @@ TEST(DocumentDiffCalculatorTest, SubArrayInSubArrayLargeDelta) { TEST(DocumentDiffCalculatorTest, SubObjInSubArrayLargeDelta) { auto preObj = fromjson("{field1: [1, 2, 3, 4, 5, 6, {a: 1, b: 2, c: 3, d: 4}, 7]}"); auto postObj = fromjson("{field1: [1, 2, 3, 4, 5, 6, {p: 1, q: 2}, 7]}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, - fromjson("{sfield1: {a: true, 'u6': {p: 1, q: 2}} }")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{sfield1: {a: true, 'u6': {p: 1, q: 2}} }")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{field1: 'u'}")); @@ -514,9 +516,9 @@ TEST(DocumentDiffCalculatorTest, SubObjInSubArrayLargeDelta) { TEST(DocumentDiffCalculatorTest, SubObjInSubObjLargeDelta) { auto preObj = fromjson("{field: {p: 'someString', q: 2, r: {a: 1, b: 2, c: 3, 'd': 4}, s: 3}}"); auto postObj = fromjson("{field: {p: 'someString', q: 2, r: {p: 1, q: 2}, s: 3}}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{sfield: {u: {r: {p: 1, q: 2} }} }")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{sfield: {u: {r: {p: 1, q: 2} }} }")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered( @@ -526,9 +528,9 @@ TEST(DocumentDiffCalculatorTest, SubObjInSubObjLargeDelta) { TEST(DocumentDiffCalculatorTest, SubArrayInSubObjLargeDelta) { auto preObj = fromjson("{field: {p: 'someString', q: 2, r: [1, 3, 4, 5], s: 3}}"); auto postObj = fromjson("{field: {p: 'someString', q: 2, r: [1, 2, 3, 4], s: 3}}"); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{sfield: {u: {r: [1, 2, 3, 4]}} }")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{sfield: {u: {r: [1, 2, 3, 4]}} }")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{field: {r: 'u'}}")); @@ -566,9 +568,9 @@ TEST(DocumentDiffCalculatorTest, DeeplyNestObjectGenerateDiff) { auto postObj = postBob.done(); // Just deleting the deeply nested sub-object should give the post object. - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, fromjson("{d: {subObj: false}}")); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, fromjson("{d: {subObj: false}}")); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); assertBsonObjEqualUnordered(*inlineDiff, fromjson("{subObj: 'd'}")); @@ -579,9 +581,9 @@ TEST(DocumentDiffCalculatorTest, DeeplyNestObjectGenerateDiff) { auto postObj2 = postBob2.done(); // Deleting the deepest field should give the post object. - oplogDiff = doc_diff::computeOplogDiff(preObj, postObj2, 0, nullptr); + oplogDiff = doc_diff::computeOplogDiff(preObj, postObj2, 0); ASSERT(oplogDiff); - ASSERT_OK(validateBSON(oplogDiff->diff)); + ASSERT_OK(validateBSON(*oplogDiff)); BSONObjBuilder expectedOplogDiffBuilder; buildDeepObj(&expectedOplogDiffBuilder, "ssubObj", @@ -592,7 +594,7 @@ TEST(DocumentDiffCalculatorTest, DeeplyNestObjectGenerateDiff) { builder->append("d", BSON("subObj" << false)); } }); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, expectedOplogDiffBuilder.obj()); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, expectedOplogDiffBuilder.obj()); inlineDiff = doc_diff::computeInlineDiff(preObj, postObj2); ASSERT(inlineDiff); @@ -632,9 +634,9 @@ TEST(DocumentDiffCalculatorTest, DeepestObjectSubDiff) { auto postObj = postBob.done(); ASSERT_OK(validateBSON(postObj)); - auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0, nullptr); + auto oplogDiff = doc_diff::computeOplogDiff(preObj, postObj, 0); ASSERT(oplogDiff); - ASSERT_OK(validateBSON(oplogDiff->diff)); + ASSERT_OK(validateBSON(*oplogDiff)); BSONObjBuilder expectedOplogDiffBuilder; buildDeepObj(&expectedOplogDiffBuilder, "ssubObj", @@ -645,7 +647,7 @@ TEST(DocumentDiffCalculatorTest, DeepestObjectSubDiff) { builder->append("u", BSON("field" << 2)); } }); - ASSERT_BSONOBJ_BINARY_EQ(oplogDiff->diff, expectedOplogDiffBuilder.obj()); + ASSERT_BSONOBJ_BINARY_EQ(*oplogDiff, expectedOplogDiffBuilder.obj()); auto inlineDiff = doc_diff::computeInlineDiff(preObj, postObj); ASSERT(inlineDiff); diff --git a/src/mongo/db/update/document_diff_serialization.cpp b/src/mongo/db/update/document_diff_serialization.cpp index 55975a56feacf..cd7705a27bf0d 100644 --- a/src/mongo/db/update/document_diff_serialization.cpp +++ b/src/mongo/db/update/document_diff_serialization.cpp @@ -27,17 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/update/document_diff_serialization.h" - +#include +#include +#include #include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +// IWYU pragma: no_include "boost/move/algo/detail/set_difference.hpp" +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/update/document_diff_serialization.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" -#include "mongo/util/string_map.h" -#include -#include namespace mongo { namespace diff_tree { @@ -80,7 +95,9 @@ Node* DocumentSubDiffNode::addChild(StringData fieldName, std::unique_ptr sizeTracker.addEntry(fieldName.size(), nodePtr); auto result = children.insert({fieldName.toString(), std::move(node)}); - invariant(result.second); + uassert(7693400, + str::stream() << "Document already has a field named '" << fieldName << "'", + result.second); StringData storedFieldName = result.first->first; switch (nodePtr->type()) { case (NodeType::kArray): diff --git a/src/mongo/db/update/document_diff_serialization.h b/src/mongo/db/update/document_diff_serialization.h index 907638f413db8..3a0679dd4ce75 100644 --- a/src/mongo/db/update/document_diff_serialization.h +++ b/src/mongo/db/update/document_diff_serialization.h @@ -29,13 +29,33 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "mongo/base/checked_cast.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" #include "mongo/stdx/variant.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/itoa.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep +#include "mongo/util/str.h" #include "mongo/util/string_map.h" // This file contains classes for serializing document diffs to a format that can be stored in the diff --git a/src/mongo/db/update/document_diff_serialization_test.cpp b/src/mongo/db/update/document_diff_serialization_test.cpp index 73c3c6a78444f..5726872d4fe8a 100644 --- a/src/mongo/db/update/document_diff_serialization_test.cpp +++ b/src/mongo/db/update/document_diff_serialization_test.cpp @@ -28,13 +28,23 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" -#include "mongo/db/update/document_diff_applier.h" #include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/document_diff_test_helpers.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/update/document_diff_test.cpp b/src/mongo/db/update/document_diff_test.cpp index bd409809d04c7..6a37258637778 100644 --- a/src/mongo/db/update/document_diff_test.cpp +++ b/src/mongo/db/update/document_diff_test.cpp @@ -28,17 +28,26 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/bson/bson_depth.h" -#include "mongo/bson/json.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/update/document_diff_calculator.h" #include "mongo/db/update/document_diff_test_helpers.h" #include "mongo/db/update/update_oplog_entry_serialization.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/random.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -141,16 +150,13 @@ void runTest(TestOptions* options) { std::vector diffs; diffs.reserve(options->documents.size() - 1); for (size_t i = 1; i < options->documents.size(); ++i) { - const auto diffOutput = - computeOplogDiff(preDoc, - options->documents[i], - update_oplog_entry::kSizeOfDeltaOplogEntryMetadata, - nullptr); + const auto diffOutput = computeOplogDiff( + preDoc, options->documents[i], update_oplog_entry::kSizeOfDeltaOplogEntryMetadata); ASSERT(diffOutput); - diffs.push_back(diffOutput->diff); + diffs.push_back(*diffOutput); const auto postObj = applyDiffTestHelper( - preDoc, diffOutput->diff, options->mustCheckExistenceForInsertOperations); + preDoc, *diffOutput, options->mustCheckExistenceForInsertOperations); ASSERT_BSONOBJ_BINARY_EQ(options->documents[i], postObj); if (options->mustCheckExistenceForInsertOperations) { @@ -158,7 +164,7 @@ void runTest(TestOptions* options) { ASSERT_BSONOBJ_BINARY_EQ( postObj, applyDiffTestHelper( - postObj, diffOutput->diff, options->mustCheckExistenceForInsertOperations)); + postObj, *diffOutput, options->mustCheckExistenceForInsertOperations)); } preDoc = options->documents[i]; diff --git a/src/mongo/db/update/document_diff_test_helpers.cpp b/src/mongo/db/update/document_diff_test_helpers.cpp index be2d05ab8d63e..eb76bfdc84945 100644 --- a/src/mongo/db/update/document_diff_test_helpers.cpp +++ b/src/mongo/db/update/document_diff_test_helpers.cpp @@ -27,13 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/update/document_diff_test_helpers.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/update/document_diff_applier.h" +#include "mongo/db/update/document_diff_test_helpers.h" #include "mongo/platform/random.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo::doc_diff { @@ -121,8 +130,7 @@ BSONObj generateDoc(PseudoRandom* rng, MutableDocument* doc, int depthLevel) { BSONObj applyDiffTestHelper(BSONObj preImage, BSONObj diff, bool mustCheckExistenceForInsertOperations) { - UpdateIndexData indexData; - return applyDiff(preImage, diff, &indexData, mustCheckExistenceForInsertOperations).postImage; + return applyDiff(preImage, diff, mustCheckExistenceForInsertOperations); } } // namespace mongo::doc_diff diff --git a/src/mongo/db/update/document_diff_test_helpers.h b/src/mongo/db/update/document_diff_test_helpers.h index e5433014b9b4e..bbb14b04d8149 100644 --- a/src/mongo/db/update/document_diff_test_helpers.h +++ b/src/mongo/db/update/document_diff_test_helpers.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/bson/mutable/document.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" diff --git a/src/mongo/db/update/field_checker.h b/src/mongo/db/update/field_checker.h index 1bc75b51709bb..14a7324ab2d41 100644 --- a/src/mongo/db/update/field_checker.h +++ b/src/mongo/db/update/field_checker.h @@ -29,7 +29,10 @@ #pragma once +#include + #include "mongo/base/status.h" +#include "mongo/base/string_data.h" namespace mongo { diff --git a/src/mongo/db/update/field_checker_test.cpp b/src/mongo/db/update/field_checker_test.cpp index d95b2bc681f6a..99d13afdf4d77 100644 --- a/src/mongo/db/update/field_checker_test.cpp +++ b/src/mongo/db/update/field_checker_test.cpp @@ -29,10 +29,13 @@ #include "mongo/db/update/field_checker.h" +#include + #include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/db/field_ref.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/update/modifier_node.cpp b/src/mongo/db/update/modifier_node.cpp index d1872b411ac24..daf77468e8242 100644 --- a/src/mongo/db/update/modifier_node.cpp +++ b/src/mongo/db/update/modifier_node.cpp @@ -27,13 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/update/modifier_node.h" - +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/mutable/document.h" #include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/field_ref_set.h" +#include "mongo/db/update/modifier_node.h" #include "mongo/db/update/path_support.h" #include "mongo/db/update/storage_validation.h" +#include "mongo/db/update/update_executor.h" +#include "mongo/util/str.h" namespace mongo { @@ -192,11 +207,6 @@ UpdateExecutor::ApplyResult ModifierNode::applyToExistingElement( ApplyResult applyResult; - if (!applyParams.indexData || - !applyParams.indexData->mightBeIndexed(updateNodeApplyParams.pathTaken->fieldRef())) { - applyResult.indexesAffected = false; - } - const uint32_t recursionLevel = updateNodeApplyParams.pathTaken->size(); validateUpdate(applyParams.element, leftSibling, @@ -309,20 +319,6 @@ UpdateExecutor::ApplyResult ModifierNode::applyToNonexistentElement( } invariant(fullPathTypes.size() == fullPathFr.numParts()); - // Determine if indexes are affected. If we did not create a new element in an array, check - // whether the full path affects indexes. If we did create a new element in an array, check - // whether the array itself might affect any indexes. This is necessary because if there is - // an index {"a.b": 1}, and we set "a.1.c" and implicitly create an array element in "a", - // then we may need to add a null key to the index, even though "a.1.c" does not appear to - // affect the index. - if (!applyParams.indexData || - !applyParams.indexData->mightBeIndexed( - applyParams.element.getType() != BSONType::Array - ? fullPathFr - : updateNodeApplyParams.pathTaken->fieldRef())) { - applyResult.indexesAffected = false; - } - if (auto logBuilder = updateNodeApplyParams.logBuilder) { logUpdate(logBuilder, RuntimeUpdatePath(std::move(fullPathFr), std::move(fullPathTypes)), @@ -380,6 +376,7 @@ void ModifierNode::validateUpdate(mutablebson::ConstElement updatedElement, recursionLevel, false, /* allowTopLevelDollarPrefixedFields */ validateForStorage, + false, /* isEmbeddedInIdField */ containsDotsAndDollarsField); } diff --git a/src/mongo/db/update/modifier_node.h b/src/mongo/db/update/modifier_node.h index 42bb70ee336c5..4124440666f4f 100644 --- a/src/mongo/db/update/modifier_node.h +++ b/src/mongo/db/update/modifier_node.h @@ -29,6 +29,8 @@ #pragma once +#include +#include #include #include #include @@ -36,7 +38,15 @@ #include #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/mutable/const_element.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/update/log_builder_interface.h" +#include "mongo/db/update/runtime_update_path.h" #include "mongo/db/update/update_leaf_node.h" +#include "mongo/db/update/update_node.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/update/modifier_table.cpp b/src/mongo/db/update/modifier_table.cpp index 9e181b40dc1b6..da5b2bb2bd585 100644 --- a/src/mongo/db/update/modifier_table.cpp +++ b/src/mongo/db/update/modifier_table.cpp @@ -33,9 +33,12 @@ #include #include -#include "mongo/base/init.h" +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/simple_string_data_comparator.h" -#include "mongo/base/status.h" +#include "mongo/base/string_data_comparator_interface.h" #include "mongo/db/update/addtoset_node.h" #include "mongo/db/update/arithmetic_node.h" #include "mongo/db/update/bit_node.h" @@ -49,6 +52,7 @@ #include "mongo/db/update/rename_node.h" #include "mongo/db/update/set_node.h" #include "mongo/db/update/unset_node.h" +#include "mongo/db/update/update_node.h" namespace mongo { diff --git a/src/mongo/db/update/modifier_table.h b/src/mongo/db/update/modifier_table.h index 11b58d5d79c64..c8f6ac90d526e 100644 --- a/src/mongo/db/update/modifier_table.h +++ b/src/mongo/db/update/modifier_table.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/base/string_data.h" #include "mongo/db/update/update_leaf_node.h" namespace mongo { diff --git a/src/mongo/db/update/modifier_table_test.cpp b/src/mongo/db/update/modifier_table_test.cpp index 75c413ab33f9c..5efca5a3a1d2e 100644 --- a/src/mongo/db/update/modifier_table_test.cpp +++ b/src/mongo/db/update/modifier_table_test.cpp @@ -31,7 +31,8 @@ #include -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/db/update/object_replace_executor.cpp b/src/mongo/db/update/object_replace_executor.cpp index e2d9262e00144..166d0b21c0496 100644 --- a/src/mongo/db/update/object_replace_executor.cpp +++ b/src/mongo/db/update/object_replace_executor.cpp @@ -27,17 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/update/object_replace_executor.h" +#include +#include "mongo/base/data_type_endian.h" #include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/field_ref_set.h" +#include "mongo/db/logical_time.h" #include "mongo/db/service_context.h" +#include "mongo/db/update/object_replace_executor.h" #include "mongo/db/update/storage_validation.h" #include "mongo/db/update/update_oplog_entry_serialization.h" #include "mongo/db/vector_clock_mutable.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/update/object_replace_executor.h b/src/mongo/db/update/object_replace_executor.h index f9c70bde91991..c61be46dc9649 100644 --- a/src/mongo/db/update/object_replace_executor.h +++ b/src/mongo/db/update/object_replace_executor.h @@ -35,6 +35,8 @@ #include #include +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/update/update_executor.h" namespace mongo { diff --git a/src/mongo/db/update/object_replace_executor_test.cpp b/src/mongo/db/update/object_replace_executor_test.cpp index 3785aa94d5b91..9b97daf02de86 100644 --- a/src/mongo/db/update/object_replace_executor_test.cpp +++ b/src/mongo/db/update/object_replace_executor_test.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/update/object_replace_executor.h" - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/algorithm.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/update/object_replace_executor.h" #include "mongo/db/update/update_node_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -52,7 +60,6 @@ TEST_F(ObjectReplaceExecutorTest, Noop) { mutablebson::Document doc(fromjson("{a: 1, b: 2}")); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: 1, b: 2}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{}"), result.oplogEntry); @@ -65,7 +72,6 @@ TEST_F(ObjectReplaceExecutorTest, ShouldNotCreateIdIfNoIdExistsAndNoneIsSpecifie mutablebson::Document doc(fromjson("{c: 1, d: 2}")); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: 1, b: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{a: 1, b: 2}"), result.oplogEntry); @@ -78,7 +84,6 @@ TEST_F(ObjectReplaceExecutorTest, ShouldPreserveIdOfExistingDocumentIfIdNotSpeci mutablebson::Document doc(fromjson("{_id: 0, c: 1, d: 2}")); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{_id: 0, a: 1, b: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{_id: 0, a: 1, b: 2}"), result.oplogEntry); @@ -92,7 +97,6 @@ TEST_F(ObjectReplaceExecutorTest, ShouldSucceedWhenImmutableIdIsNotModified) { addImmutablePath("_id"); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{_id: 0, a: 1, b: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{_id: 0, a: 1, b: 2}"), result.oplogEntry); @@ -105,7 +109,6 @@ TEST_F(ObjectReplaceExecutorTest, IdTimestampNotModified) { mutablebson::Document doc(fromjson("{}")); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{_id: Timestamp(0,0)}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{_id: Timestamp(0,0)}"), result.oplogEntry); @@ -118,7 +121,6 @@ TEST_F(ObjectReplaceExecutorTest, NonIdTimestampsModified) { mutablebson::Document doc(fromjson("{}")); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(doc.root().countChildren(), 2U); @@ -145,7 +147,6 @@ TEST_F(ObjectReplaceExecutorTest, ComplexDoc) { mutablebson::Document doc(fromjson("{a: 1, b: [0, 2, 2], e: []}")); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: 1, b: [0, 1, 2], c: {d: 1}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{a: 1, b: [0, 1, 2], c: {d: 1}}"), result.oplogEntry); @@ -172,7 +173,6 @@ TEST_F(ObjectReplaceExecutorTest, IdFieldIsNotRemoved) { addImmutablePath("_id"); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{_id: 0, a: 1}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{_id: 0, a: 1}"), result.oplogEntry); @@ -238,7 +238,6 @@ TEST_F(ObjectReplaceExecutorTest, CanAddImmutableField) { addImmutablePath("a.b"); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: {b: 1}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{a: {b: 1}}"), result.oplogEntry); @@ -252,7 +251,6 @@ TEST_F(ObjectReplaceExecutorTest, CanAddImmutableId) { addImmutablePath("_id"); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{_id: 0}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{_id: 0}"), result.oplogEntry); @@ -266,7 +264,6 @@ TEST_F(ObjectReplaceExecutorTest, CanCreateDollarPrefixedNameWhenValidateForStor setValidateForStorage(false); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: {b: 1, $bad: 1}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{a: {b: 1, $bad: 1}}"), result.oplogEntry); @@ -280,10 +277,80 @@ TEST_F(ObjectReplaceExecutorTest, NoLogBuilder) { setLogBuilderToNull(); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: 1}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); } +TEST_F(ObjectReplaceExecutorTest, DuplicateIdFieldsCheck) { + BSONObj replacement = BSON("a" + << "1" + << "_id" << 1 << "_id" << 2 << "_id" << 3); + ObjectReplaceExecutor node(replacement); + + mutablebson::Document doc(fromjson("{a: 1, _id: 1}")); + ASSERT_THROWS_CODE_AND_WHAT(node.applyUpdate(getApplyParams(doc.root())), + AssertionException, + ErrorCodes::BadValue, + "Can't have multiple _id fields in one document"); +} + +TEST_F(ObjectReplaceExecutorTest, DuplicateIdFieldsCheckOnEmptyDoc) { + BSONObj replacement = BSON("a" + << "1" + << "_id" << 1 << "_id" << 2 << "_id" << 3); + ObjectReplaceExecutor node(replacement); + + mutablebson::Document doc(fromjson("")); + ASSERT_THROWS_CODE_AND_WHAT(node.applyUpdate(getApplyParams(doc.root())), + AssertionException, + ErrorCodes::BadValue, + "Can't have multiple _id fields in one document"); +} + + +TEST_F(ObjectReplaceExecutorTest, DuplicateIdFieldsCheckOnInvalidDoc) { + BSONObj replacement = BSON("a" + << "2" + << "_id" << 4 << "_id" << 5 << "_id" << 6); + ObjectReplaceExecutor node(replacement); + + BSONObj invalid = BSON("a" + << "1" + << "_id" << 1 << "_id" << 2 << "_id" << 3); + mutablebson::Document doc(invalid); + ASSERT_THROWS_CODE_AND_WHAT(node.applyUpdate(getApplyParams(doc.root())), + AssertionException, + ErrorCodes::BadValue, + "Can't have multiple _id fields in one document"); +} + +TEST_F(ObjectReplaceExecutorTest, DuplicateIdFieldsCheckAllowsCorrection) { + ObjectReplaceExecutor node(fromjson("{a: 4, _id: 3}")); + + BSONObj invalid = BSON("a" + << "1" + << "_id" << 1 << "_id" << 2 << "_id" << 3); + mutablebson::Document doc(invalid); + auto result = node.applyUpdate(getApplyParams(doc.root())); + ASSERT_FALSE(result.noop); + ASSERT_EQUALS(fromjson("{a: 4, _id: 3}"), doc); + ASSERT_FALSE(doc.isInPlaceModeEnabled()); + ASSERT_BSONOBJ_BINARY_EQ(fromjson("{a: 4, _id: 3}"), result.oplogEntry); +} + +TEST_F(ObjectReplaceExecutorTest, DuplicateIdFieldsCheckAllowsNoop) { + BSONObj replacement = BSON("a" + << "1" + << "_id" << 1 << "_id" << 2 << "_id" << 3); + ObjectReplaceExecutor node(replacement); + + mutablebson::Document doc(replacement); + auto result = node.applyUpdate(getApplyParams(doc.root())); + ASSERT_TRUE(result.noop); + ASSERT_EQUALS(replacement, doc); + ASSERT_TRUE(doc.isInPlaceModeEnabled()); + ASSERT_BSONOBJ_BINARY_EQ(fromjson("{}"), result.oplogEntry); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/update/object_transform_executor.cpp b/src/mongo/db/update/object_transform_executor.cpp index 597b7dbb33593..1f59c6924aa74 100644 --- a/src/mongo/db/update/object_transform_executor.cpp +++ b/src/mongo/db/update/object_transform_executor.cpp @@ -27,13 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/object_transform_executor.h" +#include +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" #include "mongo/db/update/object_replace_executor.h" -#include "mongo/db/update/storage_validation.h" +#include "mongo/db/update/object_transform_executor.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/update/object_transform_executor.h b/src/mongo/db/update/object_transform_executor.h index aee80ac80b7be..878f8be589fbf 100644 --- a/src/mongo/db/update/object_transform_executor.h +++ b/src/mongo/db/update/object_transform_executor.h @@ -29,8 +29,12 @@ #pragma once +#include +#include #include +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/update/update_executor.h" namespace mongo { diff --git a/src/mongo/db/update/object_transform_executor_test.cpp b/src/mongo/db/update/object_transform_executor_test.cpp index 23b9d52ff6df8..0ec74a30b271b 100644 --- a/src/mongo/db/update/object_transform_executor_test.cpp +++ b/src/mongo/db/update/object_transform_executor_test.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/update/object_transform_executor.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/algorithm.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/update/object_transform_executor.h" #include "mongo/db/update/update_node_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -55,7 +63,6 @@ TEST_F(ObjectTransformExecutorTest, Noop) { mutablebson::Document doc(input); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: 1, b: 2}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{}"), result.oplogEntry); @@ -67,7 +74,6 @@ TEST_F(ObjectTransformExecutorTest, NoneNoop) { mutablebson::Document doc(fromjson("{a: 1, b: 2}")); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: 1, b: 2}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{}"), result.oplogEntry); @@ -85,7 +91,6 @@ TEST_F(ObjectTransformExecutorTest, Replace) { mutablebson::Document doc(from); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(to, doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(to, result.oplogEntry); @@ -103,7 +108,6 @@ TEST_F(ObjectTransformExecutorTest, ShouldSucceedWhenImmutableIdIsNotModified) { addImmutablePath("_id"); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(to, doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(to, result.oplogEntry); @@ -186,7 +190,6 @@ TEST_F(ObjectTransformExecutorTest, CanAddImmutableField) { addImmutablePath("a.b"); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: {b: 1}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{a: {b: 1}}"), result.oplogEntry); @@ -200,7 +203,6 @@ TEST_F(ObjectTransformExecutorTest, CanAddImmutableId) { addImmutablePath("_id"); auto result = node.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{_id: 0}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{_id: 0}"), result.oplogEntry); diff --git a/src/mongo/db/update/path_support.cpp b/src/mongo/db/update/path_support.cpp index ac7e7e706572c..6681b14e8661c 100644 --- a/src/mongo/db/update/path_support.cpp +++ b/src/mongo/db/update/path_support.cpp @@ -29,7 +29,16 @@ #include "mongo/db/update/path_support.h" +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/algorithm.h" #include "mongo/bson/mutable/document.h" #include "mongo/bson/mutable/element.h" @@ -69,14 +78,14 @@ Status maybePadTo(mutablebson::Element* elemArray, size_t sizeRequired) { } // unnamed namespace -Status findLongestPrefix(const FieldRef& prefix, - mutablebson::Element root, - FieldIndex* idxFound, - mutablebson::Element* elemFound) { +StatusWith findLongestPrefix(const FieldRef& prefix, + mutablebson::Element root, + FieldIndex* idxFound, + mutablebson::Element* elemFound) { // If root is empty or the prefix is so, there's no point in looking for a prefix. const FieldIndex prefixSize = prefix.numParts(); if (!root.hasChildren() || prefixSize == 0) { - return Status(ErrorCodes::NonExistentPath, "either the document or the path are empty"); + return false; } // Loop through prefix's parts. At each iteration, check that the part ('curr') exists @@ -122,7 +131,7 @@ Status findLongestPrefix(const FieldRef& prefix, // parts in 'prefix' exist in 'root', or (d) all parts do. In each case, we need to // figure out what index and Element pointer to return. if (i == 0) { - return Status(ErrorCodes::NonExistentPath, "cannot find path in the document"); + return false; } else if (!viable) { *idxFound = i - 1; *elemFound = prev; @@ -133,11 +142,11 @@ Status findLongestPrefix(const FieldRef& prefix, } else if (curr.ok()) { *idxFound = i - 1; *elemFound = curr; - return Status::OK(); + return true; } else { *idxFound = i - 1; *elemFound = prev; - return Status::OK(); + return true; } } @@ -269,17 +278,17 @@ Status setElementAtPath(const FieldRef& path, mutablebson::Element deepestElem(doc->end()); // Get the existing parents of this path - Status status = findLongestPrefix(path, doc->root(), &deepestElemPathPart, &deepestElem); + auto swFound = findLongestPrefix(path, doc->root(), &deepestElemPathPart, &deepestElem); // TODO: All this is pretty awkward, why not return the position immediately after the // consumed path or use a signed sentinel? Why is it a special case when we've consumed the // whole path? - if (!status.isOK() && status.code() != ErrorCodes::NonExistentPath) - return status; + if (!swFound.isOK()) + return swFound.getStatus(); // Inc the path by one *unless* we matched nothing - if (status.code() != ErrorCodes::NonExistentPath) { + if (swFound.getValue()) { ++deepestElemPathPart; } else { deepestElemPathPart = 0; @@ -348,24 +357,6 @@ static Status checkEqualityConflicts(const EqualityMatches& equalities, const Fi return Status(ErrorCodes::NotSingleValueField, errMsg); } -/** - * Helper function to check if path conflicts are all prefixes. - */ -static Status checkPathIsPrefixOf(const FieldRef& path, const FieldRefSet& conflictPaths) { - for (FieldRefSet::const_iterator it = conflictPaths.begin(); it != conflictPaths.end(); ++it) { - const FieldRef* conflictingPath = *it; - // Conflicts are always prefixes (or equal to) the path, or vice versa - if (path.numParts() > conflictingPath->numParts()) { - string errMsg = stream() << "field at '" << conflictingPath->dottedField() - << "' must be exactly specified, field at sub-path '" - << path.dottedField() << "'found"; - return Status(ErrorCodes::NotExactValueField, errMsg); - } - } - - return Status::OK(); -} - static Status _extractFullEqualityMatches(const MatchExpression& root, const FieldRefSet* fullPathsToExtract, EqualityMatches* equalities) { @@ -377,16 +368,18 @@ static Status _extractFullEqualityMatches(const MatchExpression& root, if (fullPathsToExtract) { FieldRefSet conflictPaths; - fullPathsToExtract->findConflicts(&path, &conflictPaths); + auto swFlag = fullPathsToExtract->checkForConflictsAndPrefix(&path); + + // Found a conflicting path that is not a prefix + if (!swFlag.isOK()) { + return swFlag.getStatus(); + } // Ignore if this path is unrelated to the full paths - if (conflictPaths.empty()) + const bool hasConflict = swFlag.getValue(); + if (!hasConflict) { return Status::OK(); - - // Make sure we're a prefix of all the conflict paths - Status status = checkPathIsPrefixOf(path, conflictPaths); - if (!status.isOK()) - return status; + } } Status status = checkEqualityConflicts(*equalities, path); diff --git a/src/mongo/db/update/path_support.h b/src/mongo/db/update/path_support.h index 5698bffd7ef80..74d268f3f4fc2 100644 --- a/src/mongo/db/update/path_support.h +++ b/src/mongo/db/update/path_support.h @@ -29,16 +29,21 @@ #pragma once +#include +#include #include #include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/mutable/element.h" #include "mongo/db/field_ref.h" #include "mongo/db/field_ref_set.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/util/ctype.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -49,7 +54,7 @@ namespace pathsupport { static const size_t kMaxPaddingAllowed = 1500000; // Convenience type to hold equality matches at particular paths from a MatchExpression -typedef std::map EqualityMatches; +using EqualityMatches = StringDataMap; struct cmpPathsAndArrayIndexes { // While there is a string to number parser in the codebase, we use this for performance @@ -122,10 +127,10 @@ struct cmpPathsAndArrayIndexes { * 'a.0.b' is NOT a viable path in {a: 1}, because a would have changed types * 'a.5.b' is a viable path in in {a: []} (padding would occur) */ -Status findLongestPrefix(const FieldRef& prefix, - mutablebson::Element root, - FieldIndex* idxFound, - mutablebson::Element* elemFound); +StatusWith findLongestPrefix(const FieldRef& prefix, + mutablebson::Element root, + FieldIndex* idxFound, + mutablebson::Element* elemFound); /** * Creates the parts 'prefix[idxRoot]', 'prefix[idxRoot+1]', ..., 'prefix[-1]' under diff --git a/src/mongo/db/update/path_support_test.cpp b/src/mongo/db/update/path_support_test.cpp index 26137bfcef0e5..887969319cabd 100644 --- a/src/mongo/db/update/path_support_test.cpp +++ b/src/mongo/db/update/path_support_test.cpp @@ -29,27 +29,37 @@ #include "mongo/db/update/path_support.h" -#include #include #include +#include #include +#include +#include + #include "mongo/base/error_codes.h" #include "mongo/base/simple_string_data_comparator.h" #include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement_comparator.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/algorithm.h" +#include "mongo/bson/mutable/const_element.h" #include "mongo/bson/mutable/document.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" #include "mongo/db/field_ref.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/str.h" namespace { @@ -91,8 +101,9 @@ TEST_F(EmptyDoc, EmptyPath) { FieldIndex idxFound; Element elemFound = root(); - Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound); - ASSERT_EQUALS(status, ErrorCodes::NonExistentPath); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_FALSE(swFound.getValue()); } TEST_F(EmptyDoc, NewField) { @@ -100,8 +111,9 @@ TEST_F(EmptyDoc, NewField) { FieldIndex idxFound; Element elemFound = root(); - Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound); - ASSERT_EQUALS(status, ErrorCodes::NonExistentPath); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_FALSE(swFound.getValue()); Element newElem = doc().makeElementInt("a", 1); ASSERT_TRUE(newElem.ok()); @@ -116,8 +128,9 @@ TEST_F(EmptyDoc, NewPath) { FieldIndex idxFound; Element elemFound = root(); - Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound); - ASSERT_EQUALS(status, ErrorCodes::NonExistentPath); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_FALSE(swFound.getValue()); Element newElem = doc().makeElementInt("c", 1); ASSERT_TRUE(newElem.ok()); @@ -161,8 +174,9 @@ TEST_F(SimpleDoc, EmptyPath) { FieldIndex idxFound; Element elemFound = root(); - Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound); - ASSERT_EQUALS(status, ErrorCodes::NonExistentPath); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_FALSE(swFound.getValue()); } TEST_F(SimpleDoc, SimplePath) { @@ -170,7 +184,9 @@ TEST_F(SimpleDoc, SimplePath) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(idxFound, 0U); ASSERT_EQUALS(elemFound.compareWithElement(root()["a"], nullptr), 0); @@ -181,8 +197,8 @@ TEST_F(SimpleDoc, LongerPath) { FieldIndex idxFound; Element elemFound = root(); - Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound); - ASSERT_EQUALS(status, ErrorCodes::PathNotViable); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_EQUALS(swFound.getStatus().code(), ErrorCodes::PathNotViable); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(idxFound, 0U); ASSERT_EQUALS(elemFound.compareWithElement(root()["a"], nullptr), 0); @@ -193,8 +209,9 @@ TEST_F(SimpleDoc, NotCommonPrefix) { FieldIndex idxFound; Element elemFound = root(); - Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound); - ASSERT_EQUALS(status, ErrorCodes::NonExistentPath); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_FALSE(swFound.getValue()); // From this point on, handles the creation of the '.b' part that wasn't found. Element newElem = doc().makeElementInt("b", 1); @@ -272,7 +289,9 @@ TEST_F(NestedDoc, SimplePath) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(idxFound, 0U); ASSERT_EQUALS(elemFound.compareWithElement(root()["a"], nullptr), 0); @@ -283,7 +302,9 @@ TEST_F(NestedDoc, ShorterPath) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_EQUALS(idxFound, 1U); ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"], nullptr), 0); } @@ -293,7 +314,9 @@ TEST_F(NestedDoc, ExactPath) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(idxFound, 2U); ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"]["c"], nullptr), 0); @@ -305,8 +328,8 @@ TEST_F(NestedDoc, LongerPath) { FieldIndex idxFound; Element elemFound = root(); - Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound); - ASSERT_EQUALS(status.code(), ErrorCodes::PathNotViable); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_EQUALS(swFound.getStatus().code(), ErrorCodes::PathNotViable); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(idxFound, 2U); ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"]["c"], nullptr), 0); @@ -317,7 +340,9 @@ TEST_F(NestedDoc, NewFieldNested) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_EQUALS(idxFound, 1U); ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"], nullptr), 0); @@ -337,7 +362,9 @@ TEST_F(NestedDoc, NotStartingFromRoot) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root()["a"], &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root()["a"], &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_EQUALS(idxFound, 1U); ASSERT_EQUALS(elemFound.compareWithElement(root()["a"]["b"]["c"], nullptr), 0); } @@ -388,7 +415,9 @@ TEST_F(ArrayDoc, PathOnEmptyArray) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(idxFound, 0U); ASSERT_EQUALS(elemFound.compareWithElement(root()["a"], nullptr), 0); @@ -399,7 +428,9 @@ TEST_F(ArrayDoc, PathOnPopulatedArray) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(idxFound, 1U); ASSERT_EQUALS(elemFound.compareWithElement(root()["b"][0], nullptr), 0); @@ -410,7 +441,9 @@ TEST_F(ArrayDoc, MixedArrayAndObjectPath) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(idxFound, 2U); ASSERT_EQUALS(elemFound.compareWithElement(root()["b"][0]["c"], nullptr), 0); @@ -421,7 +454,9 @@ TEST_F(ArrayDoc, ExtendingExistingObject) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(idxFound, 1U); ASSERT_EQUALS(elemFound.compareWithElement(root()["b"][0], nullptr), 0); @@ -443,7 +478,9 @@ TEST_F(ArrayDoc, NewObjectInsideArray) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(idxFound, 0U); ASSERT_EQUALS(elemFound.compareWithElement(root()["b"], nullptr), 0); @@ -465,7 +502,9 @@ TEST_F(ArrayDoc, NewNestedObjectInsideArray) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(idxFound, 0U); ASSERT_EQUALS(elemFound.compareWithElement(root()["b"], nullptr), 0); @@ -487,7 +526,9 @@ TEST_F(ArrayDoc, ArrayPaddingNecessary) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(idxFound, 0U); ASSERT_EQUALS(elemFound.compareWithElement(root()["b"], nullptr), 0); @@ -512,7 +553,9 @@ TEST_F(ArrayDoc, ExcessivePaddingRequested) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); // From this point on, try to create the padded part that wasn't found. Element newElem = doc().makeElementInt("", 1); @@ -538,7 +581,9 @@ TEST_F(ArrayDoc, ExcessivePaddingNotRequestedIfArrayAlreadyPadded) { FieldIndex idxFound; Element elemFound = root(); - ASSERT_OK(findLongestPrefix(field(), root(), &idxFound, &elemFound)); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_OK(swFound); + ASSERT_TRUE(swFound.getValue()); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(countChildren(elemFound), 5u); @@ -563,8 +608,8 @@ TEST_F(ArrayDoc, NonNumericPathInArray) { FieldIndex idxFound; Element elemFound = root(); - Status status = findLongestPrefix(field(), root(), &idxFound, &elemFound); - ASSERT_EQUALS(status.code(), ErrorCodes::PathNotViable); + auto swFound = findLongestPrefix(field(), root(), &idxFound, &elemFound); + ASSERT_EQUALS(swFound.getStatus().code(), ErrorCodes::PathNotViable); ASSERT_TRUE(elemFound.ok()); ASSERT_EQUALS(idxFound, 0U); ASSERT_EQUALS(elemFound.compareWithElement(root()["b"], nullptr), 0); diff --git a/src/mongo/db/update/pattern_cmp.cpp b/src/mongo/db/update/pattern_cmp.cpp index 3dbaa6c386e68..b0327c1c1b555 100644 --- a/src/mongo/db/update/pattern_cmp.cpp +++ b/src/mongo/db/update/pattern_cmp.cpp @@ -29,11 +29,17 @@ #include "mongo/db/update/pattern_cmp.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/mutable/const_element.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value_comparator.h" #include "mongo/db/field_ref.h" -#include "mongo/db/jsobj.h" +#include "mongo/util/str.h" namespace mongo { namespace pattern_cmp { diff --git a/src/mongo/db/update/pattern_cmp.h b/src/mongo/db/update/pattern_cmp.h index fd5d5c94ac3e2..2910bd198f39b 100644 --- a/src/mongo/db/update/pattern_cmp.h +++ b/src/mongo/db/update/pattern_cmp.h @@ -29,6 +29,9 @@ #pragma once +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/mutable/document.h" #include "mongo/bson/mutable/element.h" #include "mongo/db/exec/document_value/value.h" diff --git a/src/mongo/db/update/pattern_cmp_test.cpp b/src/mongo/db/update/pattern_cmp_test.cpp index 83f4c940e8a82..ed0fc17593c7a 100644 --- a/src/mongo/db/update/pattern_cmp_test.cpp +++ b/src/mongo/db/update/pattern_cmp_test.cpp @@ -29,16 +29,22 @@ #include "mongo/db/update/pattern_cmp.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/algorithm.h" #include "mongo/bson/mutable/document.h" #include "mongo/bson/mutable/element.h" #include "mongo/db/exec/document_value/document_value_test_util.h" #include "mongo/db/exec/document_value/value.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" #include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/update/pipeline_executor.cpp b/src/mongo/db/update/pipeline_executor.cpp index 47abdc0519435..bbdbd8dcaf6f6 100644 --- a/src/mongo/db/update/pipeline_executor.cpp +++ b/src/mongo/db/update/pipeline_executor.cpp @@ -27,20 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/update/pipeline_executor.h" - +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/mutable/document.h" -#include "mongo/db/bson/dotted_path_support.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_queue.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" #include "mongo/db/pipeline/variable_validation.h" -#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/update/document_diff_calculator.h" #include "mongo/db/update/object_replace_executor.h" -#include "mongo/db/update/storage_validation.h" +#include "mongo/db/update/pipeline_executor.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" namespace mongo { @@ -117,14 +133,10 @@ UpdateExecutor::ApplyResult PipelineExecutor::applyUpdate(ApplyParams applyParam if (applyParams.logMode == ApplyParams::LogMode::kGenerateOplogEntry) { // We're allowed to generate $v: 2 log entries. The $v:2 has certain meta-fields like // '$v', 'diff'. So we pad some additional byte while computing diff. - const auto diffOutput = - doc_diff::computeOplogDiff(originalDoc, - transformedDoc, - update_oplog_entry::kSizeOfDeltaOplogEntryMetadata, - applyParams.indexData); - if (diffOutput) { - ret.oplogEntry = update_oplog_entry::makeDeltaOplogEntry(diffOutput->diff); - ret.indexesAffected = diffOutput->indexesAffected; + const auto diff = doc_diff::computeOplogDiff( + originalDoc, transformedDoc, update_oplog_entry::kSizeOfDeltaOplogEntryMetadata); + if (diff) { + ret.oplogEntry = update_oplog_entry::makeDeltaOplogEntry(*diff); return ret; } } diff --git a/src/mongo/db/update/pipeline_executor.h b/src/mongo/db/update/pipeline_executor.h index e0b16b851c23c..4f0d21cffb04f 100644 --- a/src/mongo/db/update/pipeline_executor.h +++ b/src/mongo/db/update/pipeline_executor.h @@ -29,15 +29,22 @@ #pragma once +#include +#include +#include #include #include #include #include #include +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/update/update_executor.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/db/update/pipeline_executor_test.cpp b/src/mongo/db/update/pipeline_executor_test.cpp index 02fde0b0865ff..75a4eeb8310aa 100644 --- a/src/mongo/db/update/pipeline_executor_test.cpp +++ b/src/mongo/db/update/pipeline_executor_test.cpp @@ -27,18 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/pipeline_executor.h" +#include +#include -#include "mongo/bson/mutable/algorithm.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/mutable/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/json.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/update/pipeline_executor.h" #include "mongo/db/update/update_node_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -52,7 +59,6 @@ TEST_F(UpdateTestFixture, Noop) { mutablebson::Document doc(fromjson("{a: 1, b: 2}")); auto result = exec.applyUpdate(getApplyParams(doc.root())); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: 1, b: 2}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_TRUE(result.oplogEntry.isEmpty()); @@ -69,7 +75,6 @@ TEST_F(UpdateTestFixture, ShouldNotCreateIdIfNoIdExistsAndNoneIsSpecified) { ASSERT_FALSE(result.noop); ASSERT_EQUALS(fromjson("{c: 1, d: 'largeStringValue', a: 1, b: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); - ASSERT_FALSE(result.indexesAffected); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{$v: 2, diff: {i: {a: 1, b: 2}}}"), result.oplogEntry); } @@ -83,7 +88,6 @@ TEST_F(UpdateTestFixture, ShouldPreserveIdOfExistingDocumentIfIdNotReplaced) { mutablebson::Document doc(fromjson("{_id: 0, c: 1, d: 2}")); auto result = exec.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{_id: 0, a: 1, b: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{_id: 0, a: 1, b: 2}"), result.oplogEntry); @@ -102,7 +106,6 @@ TEST_F(UpdateTestFixture, ShouldSucceedWhenImmutableIdIsNotModified) { ASSERT_EQUALS(fromjson("{_id: 0, c: 1, d: 'largeStringValue', a: 1, b: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); - ASSERT_FALSE(result.indexesAffected); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{$v: 2, diff: {i: {a: 1, b: 2 }}}"), result.oplogEntry); } @@ -118,7 +121,6 @@ TEST_F(UpdateTestFixture, ComplexDoc) { ASSERT_EQUALS(fromjson("{a: 1, b: [0, 1, 2], e: ['val1', 'val2'], c: {d: 1}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); - ASSERT_FALSE(result.indexesAffected); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{$v: 2, diff: {i: {c: {d: 1}}, sb: {a: true, u1: 1} }}"), result.oplogEntry); } @@ -149,7 +151,6 @@ TEST_F(UpdateTestFixture, IdFieldIsNotRemoved) { addImmutablePath("_id"); auto result = exec.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{_id: 0}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{_id: 0}"), result.oplogEntry); @@ -225,7 +226,6 @@ TEST_F(UpdateTestFixture, CanAddImmutableField) { addImmutablePath("a.b"); auto result = exec.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{c: 1, a: {b: 1}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{c: 1, a: {b: 1}}"), result.oplogEntry); @@ -241,7 +241,6 @@ TEST_F(UpdateTestFixture, CanAddImmutableId) { addImmutablePath("_id"); auto result = exec.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{c: 1, _id: 0}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{c: 1, _id: 0}"), result.oplogEntry); @@ -264,7 +263,6 @@ TEST_F(UpdateTestFixture, NoLogBuilder) { setLogBuilderToNull(); auto result = exec.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{b: 1, a: 1}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); } @@ -317,7 +315,6 @@ TEST_F(UpdateTestFixture, CanUseConstants) { mutablebson::Document doc(fromjson("{a: 1}")); const auto result = exec.applyUpdate(getApplyParams(doc.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: 1, b: 10, c : {x: 1, y: 2}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{a: 1, b: 10, c : {x: 1, y: 2}}"), result.oplogEntry); @@ -335,7 +332,6 @@ TEST_F(UpdateTestFixture, CanUseConstantsAcrossMultipleUpdates) { mutablebson::Document doc1(fromjson("{a: 1}")); auto result = exec.applyUpdate(getApplyParams(doc1.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: 1, b: 'foo'}"), doc1); ASSERT_FALSE(doc1.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{a: 1, b: 'foo'}"), result.oplogEntry); @@ -345,7 +341,6 @@ TEST_F(UpdateTestFixture, CanUseConstantsAcrossMultipleUpdates) { resetApplyParams(); result = exec.applyUpdate(getApplyParams(doc2.root())); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: 2, b: 'foo'}"), doc2); ASSERT_FALSE(doc2.isInPlaceModeEnabled()); ASSERT_BSONOBJ_BINARY_EQ(fromjson("{a: 2, b: 'foo'}"), result.oplogEntry); @@ -361,7 +356,6 @@ TEST_F(UpdateTestFixture, NoopWithConstants) { mutablebson::Document doc(fromjson("{a: 1, b: 2}")); const auto result = exec.applyUpdate(getApplyParams(doc.root())); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: 1, b: 2}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_TRUE(result.oplogEntry.isEmpty()); @@ -384,7 +378,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithDeletes) { // Verify post-image and diff format. ASSERT_EQUALS(doc, fromjson("{paddingField: 'largeValueString'}")); ASSERT_BSONOBJ_BINARY_EQ(result.oplogEntry, fromjson("{$v: 2, diff: {d: {f1: false}}}")); - ASSERT(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry(result.oplogEntry)); } { // When a path in the diff is same as index path. @@ -400,7 +394,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithDeletes) { "{f1: {a: {paddingField: 'largeValueString'}}, paddingField: 'largeValueString'}")); ASSERT_BSONOBJ_BINARY_EQ(result.oplogEntry, fromjson("{$v: 2, diff: {sf1: {sa: {d: {b: false, c: false}}}}}")); - ASSERT(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry(result.oplogEntry)); } { // When the index path is a prefix of a path in the diff. @@ -416,7 +410,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithDeletes) { doc, fromjson("{f1: {a: {b: {paddingField: 'largeValueString'}, c: 1, paddingField: " "'largeValueString'}}, paddingField: 'largeValueString'}")); - ASSERT(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry(result.oplogEntry)); } { // With common parent, but path diverges. @@ -432,7 +426,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithDeletes) { doc, fromjson("{f1: {a: {b: {c: 1, paddingField: 'largeValueString'}, paddingField: " "'largeValueString'}}, paddingField: 'largeValueString'}")); - ASSERT(!result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry(result.oplogEntry)); } } @@ -455,7 +449,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithUpdatesAndInserts) { ASSERT_EQUALS(doc, fromjson("{f1: true, paddingField: 'largeValueString', f2: true}")); ASSERT_BSONOBJ_BINARY_EQ(result.oplogEntry, fromjson("{$v: 2, diff: {u: {f1: true}, i: {f2: true}}}")); - ASSERT(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry(result.oplogEntry)); } { // When a path in the diff is same as index path. @@ -467,7 +461,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithUpdatesAndInserts) { // Verify diff format. ASSERT_BSONOBJ_BINARY_EQ(result.oplogEntry, fromjson("{$v: 2, diff: {sf1: {sa: {i: {newField: true}}}}}")); - ASSERT(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry(result.oplogEntry)); } { // When the index path is a prefix of a path in the diff. @@ -484,7 +478,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithUpdatesAndInserts) { fromjson( "{f1: {a: {b: {c: true, paddingField: 'largeValueString'}, c: 1, paddingField: " "'largeValueString'}}, paddingField: 'largeValueString'}")); - ASSERT(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry(result.oplogEntry)); } { // With common parent, but path diverges. @@ -500,7 +494,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithUpdatesAndInserts) { doc, fromjson("{f1: {a: {b: {c: 1, paddingField: 'largeValueString'}, c: 1, paddingField: " "'largeValueString', p: true}}, paddingField: 'largeValueString'}")); - ASSERT(!result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry(result.oplogEntry)); } } @@ -530,7 +524,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithArraysAlongIndexPath) { ASSERT_BSONOBJ_BINARY_EQ( result.oplogEntry, fromjson("{$v: 2, diff: {sf1: {a: true, s1: {sa: {sb: {a: true, l: 1}}}}}}")); - ASSERT(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry(result.oplogEntry)); } { // When the index path is a prefix of a path in the diff and also involves numeric @@ -552,7 +546,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithArraysAlongIndexPath) { result.oplogEntry, fromjson( "{$v: 2, diff: {sf1: {a: true, s1: {sa: {sb: {a: true, s1: {i: {d: 1} }}}}}}}")); - ASSERT(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry(result.oplogEntry)); } { // When inserting a sub-object into array, and the sub-object diverges from the index path. @@ -571,7 +565,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithArraysAlongIndexPath) { "'largeValueString'}")); ASSERT_BSONOBJ_BINARY_EQ(result.oplogEntry, fromjson("{$v: 2, diff: {sf1: {a: true, u2: {newField: 1} }}}")); - ASSERT(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry(result.oplogEntry)); } { // When a common array path element is updated, but the paths diverge at the last element. @@ -591,7 +585,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithArraysAlongIndexPath) { fromjson( "{f1: [0, {a: {b: ['someStringValue', {c: 1, paddingField: 'largeValueString'}], " "c: 2, paddingField: 'largeValueString'}}], paddingField: 'largeValueString'}")); - ASSERT(!result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry(result.oplogEntry)); } } @@ -618,7 +612,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithArraysAfterIndexPath) { "'largeValueString'}}, paddingField: 'largeValueString'}")); ASSERT_BSONOBJ_BINARY_EQ( result.oplogEntry, fromjson("{$v: 2, diff: {sf1: {sa: {sb: {sc: {a: true, l: 1}}}}}}")); - ASSERT(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry(result.oplogEntry)); } { // Add an array element. @@ -636,7 +630,7 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithArraysAfterIndexPath) { ASSERT_BSONOBJ_BINARY_EQ( result.oplogEntry, fromjson("{$v: 2, diff: {sf1: {sa: {sb: {sc: {a: true, u2: {newField: 1} }}}}}}")); - ASSERT(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry(result.oplogEntry)); } { // Updating a sub-array element. @@ -654,9 +648,49 @@ TEST_F(UpdateTestFixture, TestIndexesAffectedWithArraysAfterIndexPath) { ASSERT_BSONOBJ_BINARY_EQ( result.oplogEntry, fromjson("{$v: 2, diff: {sf1: {sa: {sb: {sc: {a: true, u1: 'updatedVal'}}}}}}")); - ASSERT(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry(result.oplogEntry)); } } +/** + * Verifies the fix for SERVER-76934 in the case where the original document has a duplicate field. + */ +TEST_F(UpdateTestFixture, TestIndexesAffectedWithArraysAfterIndexPath1) { + boost::intrusive_ptr expCtx(new ExpressionContextForTest()); + BSONObj preImage( + fromjson("{f1: {paddingField: 'largeValueString'}, k: {c: 1, c: 2, paddingField: " + "'largeValueString'}}")); + + auto doc = mutablebson::Document(preImage); + const std::vector pipeline{ + fromjson("{$replaceWith: {$literal: {f1: {paddingField: 'largeValueString'}, k: {c: 4, " + "paddingField: 'largeValueString'}} }}")}; + PipelineExecutor exec(expCtx, pipeline); + ASSERT_THROWS_CODE_AND_WHAT(exec.applyUpdate(getApplyParams(doc.root())), + AssertionException, + 7693400, + "Document already has a field named 'c'"); +} + +/** + * Verifies the fix for SERVER-76934 in the case where the pipeline tries to add a duplicate field. + */ +TEST_F(UpdateTestFixture, TestIndexesAffectedWithArraysAfterIndexPath2) { + boost::intrusive_ptr expCtx(new ExpressionContextForTest()); + BSONObj preImage( + fromjson("{f1: {paddingField: 'largeValueString'}, k: {c: 1, paddingField: " + "'largeValueString'}}")); + + auto doc = mutablebson::Document(preImage); + const std::vector pipeline{ + fromjson("{$replaceWith: {$literal: {f1: {paddingField: 'largeValueString'}, k: {c: 4, c: " + "5, paddingField: 'largeValueString'}} }}")}; + PipelineExecutor exec(expCtx, pipeline); + ASSERT_THROWS_CODE_AND_WHAT(exec.applyUpdate(getApplyParams(doc.root())), + AssertionException, + 7693400, + "Document already has a field named 'c'"); +} + } // namespace } // namespace mongo diff --git a/src/mongo/db/update/pop_node.cpp b/src/mongo/db/update/pop_node.cpp index c7942a32aa91e..5437abe1fd28b 100644 --- a/src/mongo/db/update/pop_node.cpp +++ b/src/mongo/db/update/pop_node.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/pop_node.h" +#include -#include "mongo/db/matcher/expression_parser.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/update/pop_node.h" #include "mongo/db/update/storage_validation.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -87,6 +92,7 @@ void PopNode::validateUpdate(mutablebson::ConstElement updatedElement, recursionLevel, false, /* allowTopLevelDollarPrefixedFields */ false, /* Should validate for storage */ + false, /* isEmbeddedInIdField */ containsDotsAndDollarsField); } diff --git a/src/mongo/db/update/pop_node.h b/src/mongo/db/update/pop_node.h index d5047b59507ac..aaca5bbfdfe1e 100644 --- a/src/mongo/db/update/pop_node.h +++ b/src/mongo/db/update/pop_node.h @@ -29,10 +29,25 @@ #pragma once +#include #include +#include + +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/const_element.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/update/modifier_node.h" +#include "mongo/db/update/update_node.h" +#include "mongo/db/update/update_node_visitor.h" namespace mongo { diff --git a/src/mongo/db/update/pop_node_test.cpp b/src/mongo/db/update/pop_node_test.cpp index 4159be9a475a0..c184c1fe1d583 100644 --- a/src/mongo/db/update/pop_node_test.cpp +++ b/src/mongo/db/update/pop_node_test.cpp @@ -27,16 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/pop_node.h" +#include +#include "mongo/base/error_codes.h" #include "mongo/bson/json.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" +#include "mongo/bson/mutable/document.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/update/pop_node.h" +#include "mongo/db/update/update_executor.h" #include "mongo/db/update/update_node_test_fixture.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/safe_num.h" namespace mongo { namespace { @@ -113,7 +120,7 @@ TEST_F(PopNodeTest, NoopWhenFirstPathComponentDoesNotExist) { addIndexedPath("a.b"); auto result = popNode.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry(result.oplogEntry)); ASSERT_EQUALS(fromjson("{b: [1, 2, 3]}"), doc); assertOplogEntryIsNoop(); @@ -132,7 +139,7 @@ TEST_F(PopNodeTest, NoopWhenPathPartiallyExists) { addIndexedPath("a.b.c"); auto result = popNode.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry(result.oplogEntry)); ASSERT_EQUALS(fromjson("{a: {}}"), doc); assertOplogEntryIsNoop(); @@ -151,7 +158,7 @@ TEST_F(PopNodeTest, NoopWhenNumericalPathComponentExceedsArrayLength) { addIndexedPath("a.0"); auto result = popNode.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry(result.oplogEntry)); ASSERT_EQUALS(fromjson("{a: []}"), doc); assertOplogEntryIsNoop(); @@ -216,7 +223,7 @@ TEST_F(PopNodeTest, NoopWhenPathContainsAnEmptyArray) { addIndexedPath("a.b"); auto result = popNode.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry(result.oplogEntry)); ASSERT_EQUALS(fromjson("{a: {b: []}}"), doc); assertOplogEntryIsNoop(); ASSERT_EQUALS("{a.b}", getModifiedPaths()); @@ -234,7 +241,7 @@ TEST_F(PopNodeTest, PopsSingleElementFromTheBack) { addIndexedPath("a.b"); auto result = popNode.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: []}}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {sa: {u: {b: []}}}}")); @@ -253,7 +260,7 @@ TEST_F(PopNodeTest, PopsSingleElementFromTheFront) { addIndexedPath("a"); auto result = popNode.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: []}}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {sa: {u: {b: []}}}}")); @@ -272,7 +279,7 @@ TEST_F(PopNodeTest, PopsFromTheBackOfMultiElementArray) { addIndexedPath("a.b.c"); auto result = popNode.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: [1, 2]}}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {sa: {u: {b: [1, 2]}}}}")); @@ -291,7 +298,7 @@ TEST_F(PopNodeTest, PopsFromTheFrontOfMultiElementArray) { addIndexedPath("a.b"); auto result = popNode.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: [2, 3]}}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {sa: {u: {b: [2, 3]}}}}")); @@ -310,7 +317,7 @@ TEST_F(PopNodeTest, PopsFromTheFrontOfMultiElementArrayWithoutAffectingIndexes) addIndexedPath("unrelated.path"); auto result = popNode.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: [2, 3]}}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {sa: {u: {b: [2, 3]}}}}")); @@ -328,7 +335,7 @@ TEST_F(PopNodeTest, SucceedsWithNullUpdateIndexData) { setPathTaken(makeRuntimeUpdatePathForTest("a.b")); auto result = popNode.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: [1, 2]}}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {sa: {u: {b: [1, 2]}}}}")); @@ -348,7 +355,6 @@ TEST_F(PopNodeTest, SucceedsWithNullLogBuilder) { setLogBuilderToNull(); auto result = popNode.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: {b: [1, 2]}}"), doc); ASSERT_EQUALS("{a.b}", getModifiedPaths()); } @@ -421,7 +427,7 @@ TEST_F(PopNodeTest, NoopOnImmutablePathSucceeds) { addIndexedPath("a.b"); auto result = popNode.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: []}}"), doc); assertOplogEntryIsNoop(); diff --git a/src/mongo/db/update/pull_node.cpp b/src/mongo/db/update/pull_node.cpp index def1a62140284..1c6240d632627 100644 --- a/src/mongo/db/update/pull_node.cpp +++ b/src/mongo/db/update/pull_node.cpp @@ -27,12 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/update/pull_node.h" +#include +#include "mongo/base/clonable_ptr.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/mutable/const_element.h" #include "mongo/db/matcher/copyable_match_expression.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback.h" +#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/update/pull_node.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/update/pull_node.h b/src/mongo/db/update/pull_node.h index 63fe5791fabcb..0a0f3bad54895 100644 --- a/src/mongo/db/update/pull_node.h +++ b/src/mongo/db/update/pull_node.h @@ -31,8 +31,15 @@ #include +#include + +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/update/array_culling_node.h" +#include "mongo/db/update/update_node.h" +#include "mongo/db/update/update_node_visitor.h" namespace mongo { diff --git a/src/mongo/db/update/pull_node_test.cpp b/src/mongo/db/update/pull_node_test.cpp index 85141fa3841a1..9b09627bf33c9 100644 --- a/src/mongo/db/update/pull_node_test.cpp +++ b/src/mongo/db/update/pull_node_test.cpp @@ -27,18 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/pull_node.h" +#include -#include "mongo/bson/mutable/algorithm.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/update/pull_node.h" +#include "mongo/db/update/update_executor.h" #include "mongo/db/update/update_node_test_fixture.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -139,7 +145,7 @@ TEST_F(PullNodeTest, TargetNotFound) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -207,7 +213,7 @@ TEST_F(PullNodeTest, ApplyToMissingElement) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]["c"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: {c: {}}}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -225,7 +231,7 @@ TEST_F(PullNodeTest, ApplyToEmptyArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -243,7 +249,7 @@ TEST_F(PullNodeTest, ApplyToArrayMatchingNone) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [2, 3, 4, 5]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -261,7 +267,7 @@ TEST_F(PullNodeTest, ApplyToArrayMatchingOne) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1, 2, 3]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -279,7 +285,7 @@ TEST_F(PullNodeTest, ApplyToArrayMatchingSeveral) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1, 2, 3, 4, 5]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -297,7 +303,7 @@ TEST_F(PullNodeTest, ApplyToArrayMatchingAll) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -315,7 +321,7 @@ TEST_F(PullNodeTest, ApplyToArrayWithEq) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 2, 3]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -333,7 +339,7 @@ TEST_F(PullNodeTest, ApplyNoIndexDataNoLogBuilder) { setLogBuilderToNull(); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1, 2, 3]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); } @@ -354,7 +360,7 @@ TEST_F(PullNodeTest, ApplyWithCollation) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: ['zaa', 'zbb']}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -375,7 +381,7 @@ TEST_F(PullNodeTest, ApplyWithCollationDoesNotAffectNonStringMatches) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [2, 1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -396,7 +402,7 @@ TEST_F(PullNodeTest, ApplyWithCollationDoesNotAffectRegexMatches) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: ['b', 'cb']}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -417,7 +423,7 @@ TEST_F(PullNodeTest, ApplyStringLiteralMatchWithCollation) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -438,7 +444,7 @@ TEST_F(PullNodeTest, ApplyCollationDoesNotAffectNumberLiteralMatches) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: ['a', 'b', 2, 'c', 'd']}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -456,7 +462,7 @@ TEST_F(PullNodeTest, ApplyStringMatchAfterSetCollator) { setPathTaken(makeRuntimeUpdatePathForTest("a")); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: ['a', 'b', 'd']}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -468,7 +474,7 @@ TEST_F(PullNodeTest, ApplyStringMatchAfterSetCollator) { setPathTaken(makeRuntimeUpdatePathForTest("a")); result = node.apply(getApplyParams(doc2.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc2); ASSERT_FALSE(doc2.isInPlaceModeEnabled()); } @@ -484,7 +490,7 @@ TEST_F(PullNodeTest, ApplyElementMatchAfterSetCollator) { setPathTaken(makeRuntimeUpdatePathForTest("a")); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: ['a', 'b']}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -496,7 +502,7 @@ TEST_F(PullNodeTest, ApplyElementMatchAfterSetCollator) { setPathTaken(makeRuntimeUpdatePathForTest("a")); result = node.apply(getApplyParams(doc2.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc2); ASSERT_FALSE(doc2.isInPlaceModeEnabled()); } @@ -512,7 +518,7 @@ TEST_F(PullNodeTest, ApplyObjectMatchAfterSetCollator) { setPathTaken(makeRuntimeUpdatePathForTest("a")); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a : [{b: 'w'}, {b: 'x'}, {b: 'z'}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -524,7 +530,7 @@ TEST_F(PullNodeTest, ApplyObjectMatchAfterSetCollator) { setPathTaken(makeRuntimeUpdatePathForTest("a")); result = node.apply(getApplyParams(doc2.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc2); ASSERT_FALSE(doc2.isInPlaceModeEnabled()); } @@ -545,7 +551,7 @@ TEST_F(PullNodeTest, SetCollatorDoesNotAffectClone) { setPathTaken(makeRuntimeUpdatePathForTest("a")); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -555,7 +561,7 @@ TEST_F(PullNodeTest, SetCollatorDoesNotAffectClone) { setPathTaken(makeRuntimeUpdatePathForTest("a")); result = cloneNode->apply(getApplyParams(doc2.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: ['a', 'b', 'd']}"), doc2); ASSERT_FALSE(doc2.isInPlaceModeEnabled()); } @@ -575,7 +581,7 @@ TEST_F(PullNodeTest, ApplyComplexDocAndMatching1) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: [{x: 1}, {x: 2}]}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -593,7 +599,7 @@ TEST_F(PullNodeTest, ApplyComplexDocAndMatching2) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: [{x: 1}, {x: 2}, {z: 'z'}]}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -611,7 +617,7 @@ TEST_F(PullNodeTest, ApplyComplexDocAndMatching3) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: [{x: 2}, {z: 'z'}]}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -633,7 +639,7 @@ TEST_F(PullNodeTest, ApplyFullPredicateWithCollation) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: []}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -651,7 +657,7 @@ TEST_F(PullNodeTest, ApplyScalarValueMod) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [2, 2, 2]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -669,7 +675,7 @@ TEST_F(PullNodeTest, ApplyObjectValueMod) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{x: 1}, {x: 1}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -688,7 +694,7 @@ TEST_F(PullNodeTest, DocumentationExample1) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["flags"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{flags: ['vme', 'de', 'pse', 'tsc', 'pae', 'mce']}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -707,7 +713,7 @@ TEST_F(PullNodeTest, DocumentationExample2a) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["votes"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{votes: [3, 5, 6, 8]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -725,7 +731,7 @@ TEST_F(PullNodeTest, DocumentationExample2b) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["votes"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{votes: [3, 5, 6]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -743,7 +749,7 @@ TEST_F(PullNodeTest, ApplyPullWithObjectValueToArrayWithNonObjectValue) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [2]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -777,7 +783,7 @@ TEST_F(PullNodeTest, SERVER_3988) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["y"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{x: 1, y: [2, 3, 4, 'abc']}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); diff --git a/src/mongo/db/update/pullall_node.cpp b/src/mongo/db/update/pullall_node.cpp index 3890dd854d3dc..ff554fe820c8c 100644 --- a/src/mongo/db/update/pullall_node.cpp +++ b/src/mongo/db/update/pullall_node.cpp @@ -27,12 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/update/pullall_node.h" +#include +#include "mongo/base/clonable_ptr.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/mutable/const_element.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/update/pullall_node.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/update/pullall_node.h b/src/mongo/db/update/pullall_node.h index 432c143ee52bc..703acd46f7e59 100644 --- a/src/mongo/db/update/pullall_node.h +++ b/src/mongo/db/update/pullall_node.h @@ -31,8 +31,15 @@ #include +#include + +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/update/array_culling_node.h" +#include "mongo/db/update/update_node.h" +#include "mongo/db/update/update_node_visitor.h" namespace mongo { diff --git a/src/mongo/db/update/pullall_node_test.cpp b/src/mongo/db/update/pullall_node_test.cpp index 5286fc4d653f3..f01c82c333749 100644 --- a/src/mongo/db/update/pullall_node_test.cpp +++ b/src/mongo/db/update/pullall_node_test.cpp @@ -27,18 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/pullall_node.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/algorithm.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/update/pullall_node.h" +#include "mongo/db/update/update_executor.h" #include "mongo/db/update/update_node_test_fixture.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -94,7 +101,7 @@ TEST_F(PullAllNodeTest, TargetNotFound) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1, 'a', {r: 1, b: 2}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -113,7 +120,7 @@ TEST_F(PullAllNodeTest, TargetArrayElementNotFound) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1, 2]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -147,7 +154,7 @@ TEST_F(PullAllNodeTest, ApplyWithSingleNumber) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: ['a', {r: 1, b: 2}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -165,7 +172,6 @@ TEST_F(PullAllNodeTest, ApplyNoIndexDataNoLogBuilder) { setLogBuilderToNull(); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: ['a', {r: 1, b: 2}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); } @@ -181,7 +187,7 @@ TEST_F(PullAllNodeTest, ApplyWithElementNotPresentInArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1, 'a', {r: 1, b: 2}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -199,7 +205,7 @@ TEST_F(PullAllNodeTest, ApplyWithWithTwoElements) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{r: 1, b: 2}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -217,7 +223,7 @@ TEST_F(PullAllNodeTest, ApplyWithAllArrayElements) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -235,7 +241,7 @@ TEST_F(PullAllNodeTest, ApplyWithAllArrayElementsButOutOfOrder) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -253,7 +259,7 @@ TEST_F(PullAllNodeTest, ApplyWithAllArrayElementsAndThenSome) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -274,7 +280,7 @@ TEST_F(PullAllNodeTest, ApplyWithCollator) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: ['baz']}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -303,7 +309,6 @@ TEST_F(PullAllNodeTest, ApplyAfterSetCollator) { setPathTaken(makeRuntimeUpdatePathForTest("a")); result = node.apply(getApplyParams(doc2.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: ['baz']}"), doc2); ASSERT_FALSE(doc2.isInPlaceModeEnabled()); } diff --git a/src/mongo/db/update/push_node.cpp b/src/mongo/db/update/push_node.cpp index 59146dd8c3115..48589e3ba458c 100644 --- a/src/mongo/db/update/push_node.cpp +++ b/src/mongo/db/update/push_node.cpp @@ -27,17 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/update/push_node.h" - +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/simple_string_data_comparator.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/algorithm.h" -#include "mongo/db/matcher/expression_parser.h" -#include "mongo/db/update/update_internal_node.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/db/update/push_node.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/update/push_node.h b/src/mongo/db/update/push_node.h index 77cc6b5d125c0..0169b71e62b41 100644 --- a/src/mongo/db/update/push_node.h +++ b/src/mongo/db/update/push_node.h @@ -29,14 +29,30 @@ #pragma once +#include #include +#include +#include +#include #include #include #include +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/update/log_builder_interface.h" #include "mongo/db/update/modifier_node.h" #include "mongo/db/update/pattern_cmp.h" +#include "mongo/db/update/runtime_update_path.h" +#include "mongo/db/update/update_node.h" +#include "mongo/db/update/update_node_visitor.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/update/push_node_test.cpp b/src/mongo/db/update/push_node_test.cpp index 8999a2707a1bf..fd4718a6f8b78 100644 --- a/src/mongo/db/update/push_node_test.cpp +++ b/src/mongo/db/update/push_node_test.cpp @@ -27,18 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/update/push_node.h" - +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/algorithm.h" +#include "mongo/bson/mutable/document.h" #include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/update/push_node.h" +#include "mongo/db/update/update_executor.h" #include "mongo/db/update/update_node_test_fixture.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -284,7 +299,7 @@ TEST_F(PushNodeTest, ApplyToEmptyArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -303,7 +318,7 @@ TEST_F(PushNodeTest, ApplyToEmptyDocument) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -322,7 +337,7 @@ TEST_F(PushNodeTest, ApplyToArrayWithOneElement) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -348,7 +363,7 @@ TEST_F(PushNodeTest, ApplyToDottedPathElement) { auto result = node.apply(getApplyParams(doc.root()["choices"]["first"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{_id: 1, " " question: 'a', " " choices: {first: {choice: 'b', votes: [1]}, " @@ -372,7 +387,7 @@ TEST_F(PushNodeTest, ApplySimpleEachToEmptyArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -391,7 +406,7 @@ TEST_F(PushNodeTest, ApplySimpleEachToEmptyDocument) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -410,7 +425,7 @@ TEST_F(PushNodeTest, ApplyMultipleEachToEmptyDocument) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1, 2]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -429,7 +444,7 @@ TEST_F(PushNodeTest, ApplySimpleEachToArrayWithOneElement) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -448,7 +463,7 @@ TEST_F(PushNodeTest, ApplyMultipleEachToArrayWithOneElement) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 1, 2]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -467,7 +482,7 @@ TEST_F(PushNodeTest, ApplyEmptyEachToEmptyArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -486,7 +501,7 @@ TEST_F(PushNodeTest, ApplyEmptyEachToEmptyDocument) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -505,7 +520,7 @@ TEST_F(PushNodeTest, ApplyEmptyEachToArrayWithOneElement) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -524,7 +539,7 @@ TEST_F(PushNodeTest, ApplyToArrayWithSlice) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [3]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -543,7 +558,7 @@ TEST_F(PushNodeTest, ApplyWithNumericSort) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [-1, 2, 3]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -562,7 +577,7 @@ TEST_F(PushNodeTest, ApplyWithReverseNumericSort) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [4, 3, -1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -581,7 +596,7 @@ TEST_F(PushNodeTest, ApplyWithMixedSort) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [-1, 3, 4, 't', {a: 1}, {b: 1}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -600,7 +615,7 @@ TEST_F(PushNodeTest, ApplyWithReverseMixedSort) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 1}, {a: 1}, 't', 4, 3, -1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -619,7 +634,7 @@ TEST_F(PushNodeTest, ApplyWithEmbeddedFieldSort) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [3, 't', {b: 1}, 4, -1, {a: 1}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -641,7 +656,7 @@ TEST_F(PushNodeTest, ApplySortWithCollator) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: ['ha', 'gb', 'fc', 'dd']}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -684,15 +699,13 @@ void checkDocumentAndResult(BSONObj updateModifier, BSONObj expectedDocument, const mutablebson::Document& actualDocument, UpdateExecutor::ApplyResult applyResult) { - if (expectedDocument == actualDocument && !applyResult.noop && !applyResult.indexesAffected) { + if (expectedDocument == actualDocument && !applyResult.noop) { // Check succeeded. } else { FAIL(str::stream() << "apply() failure for " << updateModifier << ". Expected " - << expectedDocument - << " (noop = false, indexesAffected = false) but got " - << actualDocument.toString() << " (noop = " - << (applyResult.noop ? "true" : "false") << ", indexesAffected = " - << (applyResult.indexesAffected ? "true" : "false") << ")."); + << expectedDocument << " (noop = false) but got " + << actualDocument.toString() + << " (noop = " << (applyResult.noop ? "true" : "false")); } } @@ -871,7 +884,7 @@ TEST_F(PushNodeTest, ApplyToEmptyArrayWithPositionZero) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -890,7 +903,7 @@ TEST_F(PushNodeTest, ApplyToEmptyArrayWithPositionOne) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -909,7 +922,7 @@ TEST_F(PushNodeTest, ApplyToEmptyArrayWithLargePosition) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -928,7 +941,7 @@ TEST_F(PushNodeTest, ApplyToSingletonArrayWithPositionZero) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1, 0]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -947,7 +960,7 @@ TEST_F(PushNodeTest, ApplyToSingletonArrayWithLargePosition) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -966,7 +979,7 @@ TEST_F(PushNodeTest, ApplyToEmptyArrayWithNegativePosition) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -985,7 +998,7 @@ TEST_F(PushNodeTest, ApplyToSingletonArrayWithNegativePosition) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1, 0]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1004,7 +1017,7 @@ TEST_F(PushNodeTest, ApplyToPopulatedArrayWithNegativePosition) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 1, 2, 5, 3, 4]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1023,7 +1036,7 @@ TEST_F(PushNodeTest, ApplyToPopulatedArrayWithOutOfBoundsNegativePosition) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [5, 0, 1, 2, 3, 4]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1042,7 +1055,7 @@ TEST_F(PushNodeTest, ApplyMultipleElementsPushWithNegativePosition) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 1, 2, 5, 6, 7, 3, 4]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1063,7 +1076,7 @@ TEST_F(PushNodeTest, PushWithMinIntAsPosition) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [5, 0, 1, 2, 3, 4]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); diff --git a/src/mongo/db/update/rename_node.cpp b/src/mongo/db/update/rename_node.cpp index 7537925851ce9..3023127e70539 100644 --- a/src/mongo/db/update/rename_node.cpp +++ b/src/mongo/db/update/rename_node.cpp @@ -27,17 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include -#include "mongo/db/update/rename_node.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/algorithm.h" +#include "mongo/bson/mutable/const_element.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref_set.h" #include "mongo/db/update/field_checker.h" #include "mongo/db/update/modifier_node.h" #include "mongo/db/update/path_support.h" -#include "mongo/db/update/storage_validation.h" +#include "mongo/db/update/rename_node.h" +#include "mongo/db/update/runtime_update_path.h" #include "mongo/db/update/unset_node.h" +#include "mongo/db/update/update_executor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -256,16 +270,11 @@ UpdateExecutor::ApplyResult RenameNode::apply(ApplyParams applyParams, UnsetNode unsetElement; auto unsetElementApplyResult = unsetElement.apply(unsetParams, unsetUpdateNodeApplyParams); - // Determine the final result based on the results of the $set and $unset. - ApplyResult applyResult; - applyResult.indexesAffected = - setElementApplyResult.indexesAffected || unsetElementApplyResult.indexesAffected; - // The $unset would only be a no-op if the source element did not exist, in which case we would // have exited early with a no-op result. invariant(!unsetElementApplyResult.noop); - return applyResult; + return {}; } } // namespace mongo diff --git a/src/mongo/db/update/rename_node.h b/src/mongo/db/update/rename_node.h index 668f42d5125f3..35804bf5951bf 100644 --- a/src/mongo/db/update/rename_node.h +++ b/src/mongo/db/update/rename_node.h @@ -35,7 +35,20 @@ #include #include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/update/update_leaf_node.h" +#include "mongo/db/update/update_node.h" +#include "mongo/db/update/update_node_visitor.h" namespace mongo { diff --git a/src/mongo/db/update/rename_node_test.cpp b/src/mongo/db/update/rename_node_test.cpp index f8b7e010ca7f3..bb3363ebc2905 100644 --- a/src/mongo/db/update/rename_node_test.cpp +++ b/src/mongo/db/update/rename_node_test.cpp @@ -27,18 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/update/rename_node.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/algorithm.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/update/rename_node.h" +#include "mongo/db/update/update_executor.h" #include "mongo/db/update/update_node_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -122,7 +125,7 @@ TEST_F(RenameNodeTest, SimpleNumberAtRoot) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: 2}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {d: {a: false}, i: {b: 2}}}")); @@ -140,7 +143,7 @@ TEST_F(RenameNodeTest, ToExistsAtSameLevel) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: 2}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {d: {a: false}, u: {b: 2}}}")); @@ -158,7 +161,7 @@ TEST_F(RenameNodeTest, ToAndFromHaveSameValue) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: 2}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {d: {a: false}, u: {b: 2}}}")); @@ -176,7 +179,7 @@ TEST_F(RenameNodeTest, RenameToFieldWithSameValueButDifferentType) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: 1}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {d: {a: false}, u: {b: 1}}}")); @@ -194,7 +197,7 @@ TEST_F(RenameNodeTest, FromDottedElement) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {}, b: {d: 6}}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {u: {b: {d: 6}}, sa: {d: {c: false}}}}")); @@ -212,7 +215,7 @@ TEST_F(RenameNodeTest, RenameToExistingNestedFieldDoesNotReorderFields) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]["c"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: {c: 4, d: 2}}, b: 3, c: {}}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {sa: {sb: {u: {c: 4}}}, sc: {d: {d: false}}}}")); @@ -231,7 +234,7 @@ TEST_F(RenameNodeTest, MissingCompleteTo) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["c"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: 1, c: {r: {d: 2}}}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {d: {a: false}, sc: {i: {r: {d: 2}}}}}")); @@ -249,7 +252,7 @@ TEST_F(RenameNodeTest, ToIsCompletelyMissing) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: {c: {d: 2}}}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {d: {a: false}, i: {b: {c: {d: 2}}}}}")); @@ -267,7 +270,7 @@ TEST_F(RenameNodeTest, ToMissingDottedField) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: {c: {d: [{a:2, b:1}]}}}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {d: {a: false}, i: {b: {c: {d: [{a: 2, b: 1}]}}}}}")); @@ -386,7 +389,7 @@ TEST_F(RenameNodeTest, ReplaceArrayField) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: 2}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {d: {a: false}, u: {b: 2}}}")); @@ -404,7 +407,7 @@ TEST_F(RenameNodeTest, ReplaceWithArrayField) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: []}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {d: {a: false}, u: {b: []}}}")); @@ -422,7 +425,7 @@ TEST_F(RenameNodeTest, CanRenameFromInvalidFieldName) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 2}"), doc); assertOplogEntry(fromjson("{$v: 2, diff: {d: {$a: false}, i: {a: 2}}}")); @@ -454,7 +457,7 @@ TEST_F(RenameNodeTest, RenameFromNonExistentPathIsNoOp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["b"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: 2}"), doc); assertOplogEntryIsNoop(); @@ -473,7 +476,7 @@ TEST_F(RenameNodeTest, ApplyCanRemoveRequiredPartOfDBRefIfValidateForStorageIsFa setValidateForStorage(false); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); auto updated = BSON("a" << BSON("$ref" << "c") << "b" << 0); @@ -544,7 +547,7 @@ TEST_F(RenameNodeTest, ApplyCanRemoveImmutablePathIfNoop) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: {}}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); diff --git a/src/mongo/db/update/set_node.cpp b/src/mongo/db/update/set_node.cpp index 3175cb38f6bae..c302964f497aa 100644 --- a/src/mongo/db/update/set_node.cpp +++ b/src/mongo/db/update/set_node.cpp @@ -27,11 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/set_node.h" +#include -#include "mongo/db/update/path_support.h" +#include "mongo/db/update/set_node.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/update/set_node.h b/src/mongo/db/update/set_node.h index e9920826f2933..e8b92bb60d1fb 100644 --- a/src/mongo/db/update/set_node.h +++ b/src/mongo/db/update/set_node.h @@ -31,8 +31,20 @@ #include +#include + +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/update/modifier_node.h" +#include "mongo/db/update/update_node.h" +#include "mongo/db/update/update_node_visitor.h" namespace mongo { diff --git a/src/mongo/db/update/set_node_test.cpp b/src/mongo/db/update/set_node_test.cpp index 9c9b7733aeb9d..d5550115dd072 100644 --- a/src/mongo/db/update/set_node_test.cpp +++ b/src/mongo/db/update/set_node_test.cpp @@ -27,18 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/set_node.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/algorithm.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/bson/mutable/document.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/update/runtime_update_path.h" +#include "mongo/db/update/set_node.h" +#include "mongo/db/update/update_executor.h" #include "mongo/db/update/update_node_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -72,7 +78,7 @@ TEST_F(SetNodeTest, ApplyNoOp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 5}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -91,7 +97,7 @@ TEST_F(SetNodeTest, ApplyEmptyPathToCreate) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 6}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -111,7 +117,7 @@ TEST_F(SetNodeTest, ApplyCreatePath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {d: 5, b: {c: 6}}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -130,7 +136,7 @@ TEST_F(SetNodeTest, ApplyCreatePathFromRoot) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{c: 5, a: {b: 6}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -150,7 +156,7 @@ TEST_F(SetNodeTest, ApplyPositional) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"][1]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, 6, 2]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -188,7 +194,7 @@ TEST_F(SetNodeTest, ApplyNonViablePathToCreateFromReplicationIsNoOp) { setFromOplogApplication(true); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 5}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -207,7 +213,7 @@ TEST_F(SetNodeTest, ApplyNoIndexDataNoLogBuilder) { setLogBuilderToNull(); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 6}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); @@ -224,7 +230,7 @@ TEST_F(SetNodeTest, ApplyDoesNotAffectIndexes) { addIndexedPath("b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 6}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); @@ -241,7 +247,7 @@ TEST_F(SetNodeTest, TypeChangeIsNotANoop) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: NumberLong(2)}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); @@ -262,7 +268,7 @@ TEST_F(SetNodeTest, IdentityOpOnDeserializedIsNotANoOp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b : NumberInt(2)}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); @@ -279,7 +285,7 @@ TEST_F(SetNodeTest, ApplyEmptyDocument) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); @@ -296,7 +302,7 @@ TEST_F(SetNodeTest, ApplyInPlace) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 2}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); @@ -313,7 +319,7 @@ TEST_F(SetNodeTest, ApplyOverridePath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); @@ -330,7 +336,7 @@ TEST_F(SetNodeTest, ApplyChangeType) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); @@ -347,7 +353,7 @@ TEST_F(SetNodeTest, ApplyNewPath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: 1, a: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); @@ -380,7 +386,7 @@ TEST_F(SetNodeTest, ApplyNoOpDottedPath) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b : 2}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.b}", getModifiedPaths()); @@ -397,7 +403,7 @@ TEST_F(SetNodeTest, TypeChangeOnDottedPathIsNotANoOp) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b : NumberLong(2)}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.b}", getModifiedPaths()); @@ -446,7 +452,7 @@ TEST_F(SetNodeTest, ApplyInPlaceDottedPath) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.b}", getModifiedPaths()); @@ -463,7 +469,7 @@ TEST_F(SetNodeTest, ApplyChangeTypeDottedPath) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.b}", getModifiedPaths()); @@ -480,7 +486,7 @@ TEST_F(SetNodeTest, ApplyChangePath) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.b}", getModifiedPaths()); @@ -498,7 +504,7 @@ TEST_F(SetNodeTest, ApplyExtendPath) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {c: 1, b: 2}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.b}", getModifiedPaths()); @@ -515,7 +521,7 @@ TEST_F(SetNodeTest, ApplyNewDottedPath) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{c: 1, a: {b: 2}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.b}", getModifiedPaths()); @@ -532,7 +538,7 @@ TEST_F(SetNodeTest, ApplyEmptyDoc) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.b}", getModifiedPaths()); @@ -549,7 +555,7 @@ TEST_F(SetNodeTest, ApplyFieldWithDot) { addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{'a.b':4, a: {b: 2}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.b}", getModifiedPaths()); @@ -566,7 +572,7 @@ TEST_F(SetNodeTest, ApplyNoOpArrayIndex) { addIndexedPath("a.2.b"); auto result = node.apply(getApplyParams(doc.root()["a"][2]["b"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 0},{b: 1},{b: 2}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.2.b}", getModifiedPaths()); @@ -583,7 +589,7 @@ TEST_F(SetNodeTest, TypeChangeInArrayIsNotANoOp) { addIndexedPath("a.2.b"); auto result = node.apply(getApplyParams(doc.root()["a"][2]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 0},{b: 1},{b: NumberInt(2)}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.2.b}", getModifiedPaths()); @@ -616,7 +622,7 @@ TEST_F(SetNodeTest, ApplyInPlaceArrayIndex) { addIndexedPath("a.2.b"); auto result = node.apply(getApplyParams(doc.root()["a"][2]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 0},{b: 1},{b: 2}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.2.b}", getModifiedPaths()); @@ -634,7 +640,7 @@ TEST_F(SetNodeTest, ApplyNormalArray) { addIndexedPath("a.2.b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 0},{b: 1},{b: 2}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); @@ -652,7 +658,7 @@ TEST_F(SetNodeTest, ApplyPaddingArray) { addIndexedPath("a.2.b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 0},null,{b: 2}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); @@ -670,7 +676,7 @@ TEST_F(SetNodeTest, ApplyNumericObject) { addIndexedPath("a.2.b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 0, '2': {b: 2}}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.2.b}", getModifiedPaths()); @@ -690,7 +696,7 @@ TEST_F(SetNodeTest, ApplyNumericField) { addIndexedPath("a.2.b"); auto result = node.apply(getApplyParams(doc.root()["a"]["2"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {'2': {b: 2}}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.2.b}", getModifiedPaths()); @@ -710,7 +716,7 @@ TEST_F(SetNodeTest, ApplyExtendNumericField) { addIndexedPath("a.2.b"); auto result = node.apply(getApplyParams(doc.root()["a"]["2"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {'2': {c: 1, b: 2}}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.2.b}", getModifiedPaths()); @@ -728,7 +734,7 @@ TEST_F(SetNodeTest, ApplyEmptyObject) { addIndexedPath("a.2.b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {'2': {b: 2}}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.2.b}", getModifiedPaths()); @@ -746,7 +752,7 @@ TEST_F(SetNodeTest, ApplyEmptyArray) { addIndexedPath("a.2.b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [null, null, {b: 2}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); @@ -814,7 +820,7 @@ TEST_F(SetNodeTest, ApplyNoOpComplex) { addIndexedPath("a.1.b"); auto result = node.apply(getApplyParams(doc.root()["a"][1]["b"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: {c: 0, d: 0}}, {b: {c: 1, d: 1}}]}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.1.b}", getModifiedPaths()); @@ -831,7 +837,7 @@ TEST_F(SetNodeTest, ApplySameStructure) { addIndexedPath("a.1.b"); auto result = node.apply(getApplyParams(doc.root()["a"][1]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: {c: 0, d: 0}}, {b: {c: 1, d: 1}}]}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.1.b}", getModifiedPaths()); @@ -866,7 +872,7 @@ TEST_F(SetNodeTest, SingleFieldFromReplication) { setFromOplogApplication(true); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{_id:1, a: 1}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.1.b}", getModifiedPaths()); @@ -885,7 +891,7 @@ TEST_F(SetNodeTest, SingleFieldNoIdFromReplication) { setFromOplogApplication(true); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 1}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.1.b}", getModifiedPaths()); @@ -904,7 +910,7 @@ TEST_F(SetNodeTest, NestedFieldFromReplication) { setFromOplogApplication(true); auto result = node.apply(getApplyParams(doc.root()["a"]["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{_id:1, a: {a: 1}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.a.1.b}", getModifiedPaths()); @@ -923,7 +929,7 @@ TEST_F(SetNodeTest, DoubleNestedFieldFromReplication) { setFromOplogApplication(true); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]["c"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{_id:1, a: {b: {c: 1}}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.b.c.d}", getModifiedPaths()); @@ -942,7 +948,7 @@ TEST_F(SetNodeTest, NestedFieldNoIdFromReplication) { setFromOplogApplication(true); auto result = node.apply(getApplyParams(doc.root()["a"]["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {a: 1}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.a.1.b}", getModifiedPaths()); @@ -961,7 +967,7 @@ TEST_F(SetNodeTest, ReplayArrayFieldNotAppendedIntermediateFromReplication) { setFromOplogApplication(true); auto result = node.apply(getApplyParams(doc.root()["a"][0]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{_id: 0, a: [1, {b: [1]}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.0.b}", getModifiedPaths()); @@ -978,7 +984,7 @@ TEST_F(SetNodeTest, Set6) { addIndexedPath("r.a"); auto result = node.apply(getApplyParams(doc.root()["r"]["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{_id: 1, r: {a:2, b:2}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -998,7 +1004,7 @@ TEST_F(SetNodeTest, Set6FromRepl) { setFromOplogApplication(true); auto result = node.apply(getApplyParams(doc.root()["r"]["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{_id: 1, r: {a:2, b:2} }"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -1025,7 +1031,7 @@ TEST_F(SetNodeTest, ApplySetModToEphemeralDocument) { addIndexedPath("x"); auto result = node.apply(getApplyParams(doc.root()["x"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{ x : { a : 100, b : 2 } }"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); } @@ -1042,7 +1048,7 @@ TEST_F(SetNodeTest, ApplyCanCreateDollarPrefixedFieldNameWhenValidateForStorageI setValidateForStorage(false); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{$bad: 1}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1078,7 +1084,7 @@ TEST_F(SetNodeTest, ApplyCanPerformNoopOnImmutablePath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -1131,7 +1137,7 @@ TEST_F(SetNodeTest, ApplyCanPerformNoopOnPrefixOfImmutablePath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -1151,7 +1157,7 @@ TEST_F(SetNodeTest, ApplyCanOverwritePrefixToCreateImmutablePath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1171,7 +1177,7 @@ TEST_F(SetNodeTest, ApplyCanOverwritePrefixOfImmutablePathIfNoopOnImmutablePath) addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 2, c: 3}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1207,7 +1213,7 @@ TEST_F(SetNodeTest, ApplyCanPerformNoopOnSuffixOfImmutablePath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]["c"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: {c: 2}}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -1262,7 +1268,7 @@ TEST_F(SetNodeTest, ApplyCanCreateImmutablePath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 2}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1282,7 +1288,7 @@ TEST_F(SetNodeTest, ApplyCanCreatePrefixOfImmutablePath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1302,7 +1308,7 @@ TEST_F(SetNodeTest, ApplySetFieldInNonExistentArrayElementAffectsIndexOnSiblingF addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 0}, {c: 2}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1322,7 +1328,7 @@ TEST_F(SetNodeTest, ApplySetFieldInExistingArrayElementDoesNotAffectIndexOnSibli addIndexedPath("a.b"); auto result = node.apply(getApplyParams(doc.root()["a"][0]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 0, c: 2}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1343,7 +1349,10 @@ TEST_F(SetNodeTest, ApplySetFieldInNonExistentNumericFieldDoesNotAffectIndexOnSi addIndexedPath("a.1.b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + // TODO: SERVER-77344 anyIndexesMightBeAffected is tricked into thinking that inserting a.1 is + // going to affect the index on a.1.b, but it doesn't see that the inserted path is actually + // a.1.c. + // ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {'0': {b: 0}, '1': {c: 2}}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1362,7 +1371,7 @@ TEST_F(SetNodeTest, ApplySetOnInsertIsNoopWhenInsertIsFalse) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -1383,7 +1392,6 @@ TEST_F(SetNodeTest, ApplySetOnInsertIsAppliedWhenInsertIsTrue) { setLogBuilderToNull(); // The log builder is null for inserts. auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: 2}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); @@ -1402,7 +1410,6 @@ TEST_F(SetNodeTest, ApplySetOnInsertExistingPath) { setLogBuilderToNull(); // The log builder is null for inserts. auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); ASSERT_EQUALS(fromjson("{a: 2}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a}", getModifiedPaths()); diff --git a/src/mongo/db/update/storage_validation.cpp b/src/mongo/db/update/storage_validation.cpp index c02d42a0b2734..72a7a684e8eaa 100644 --- a/src/mongo/db/update/storage_validation.cpp +++ b/src/mongo/db/update/storage_validation.cpp @@ -29,12 +29,17 @@ #include "mongo/db/update/storage_validation.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_depth.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/algorithm.h" +#include "mongo/bson/mutable/const_element.h" #include "mongo/bson/mutable/document.h" #include "mongo/db/query/dbref.h" -#include "mongo/db/query/query_feature_flags_gen.h" -#include "mongo/db/update/modifier_table.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -49,6 +54,7 @@ void scanDocumentChildren(mutablebson::ConstElement elem, std::uint32_t recursionLevel, const bool allowTopLevelDollarPrefixes, const bool shouldValidate, + const bool isEmbeddedInIdField, bool* containsDotsAndDollarsField) { if (!elem.hasChildren()) { return; @@ -61,6 +67,7 @@ void scanDocumentChildren(mutablebson::ConstElement elem, recursionLevel + 1, allowTopLevelDollarPrefixes, shouldValidate, + isEmbeddedInIdField, containsDotsAndDollarsField); curr = curr.rightSibling(); } @@ -149,6 +156,7 @@ void scanDocument(const mutablebson::Document& doc, const bool allowTopLevelDollarPrefixes, const bool shouldValidate, bool* containsDotsAndDollarsField) { + bool hasId = false; auto currElem = doc.root().leftChild(); while (currElem.ok()) { if (currElem.getFieldName() == idFieldName && shouldValidate) { @@ -160,10 +168,13 @@ void scanDocument(const mutablebson::Document& doc, 0 /* recursionLevel - forces _id fields to be treated as top-level. */, false /* Top-level _id fields cannot be $-prefixed. */, shouldValidate, + true /* Indicates the element is embedded inside an _id field. */, containsDotsAndDollarsField); } else { uassertStatusOK(storageValidIdField(currElem.getValue())); } + uassert(ErrorCodes::BadValue, "Can't have multiple _id fields in one document", !hasId); + hasId = true; } else { // Validate this child element. const auto deep = true; @@ -173,6 +184,7 @@ void scanDocument(const mutablebson::Document& doc, recursionLevel, allowTopLevelDollarPrefixes, shouldValidate, + false /* Not embedded inside an _id field. */, containsDotsAndDollarsField); } @@ -185,6 +197,7 @@ void scanDocument(mutablebson::ConstElement elem, std::uint32_t recursionLevel, const bool allowTopLevelDollarPrefixes, const bool shouldValidate, + const bool isEmbeddedInIdField, bool* containsDotsAndDollarsField) { if (shouldValidate) { uassert(ErrorCodes::BadValue, "Invalid elements cannot be stored.", elem.ok()); @@ -201,8 +214,10 @@ void scanDocument(mutablebson::ConstElement elem, const bool childOfArray = parent.ok() ? (parent.getType() == BSONType::Array) : false; // Only check top-level fields if 'allowTopLevelDollarPrefixes' is false, and don't validate any - // fields for '$'-prefixes if 'allowTopLevelDollarPrefixes' is true. - const bool checkTopLevelFields = !allowTopLevelDollarPrefixes && (recursionLevel == 1); + // fields for '$'-prefixes if 'allowTopLevelDollarPrefixes' is true. If 'isEmbeddedInIdField' is + // true, check for '$'-prefixes at all the levels. + const bool checkTopLevelFields = + !allowTopLevelDollarPrefixes && (recursionLevel == 1 || isEmbeddedInIdField); auto fieldName = elem.getFieldName(); if (fieldName[0] == '$') { @@ -226,6 +241,7 @@ void scanDocument(mutablebson::ConstElement elem, recursionLevel, allowTopLevelDollarPrefixes, shouldValidate, + isEmbeddedInIdField, containsDotsAndDollarsField); } } diff --git a/src/mongo/db/update/storage_validation.h b/src/mongo/db/update/storage_validation.h index c1ce7e5a951c1..b8da041d04526 100644 --- a/src/mongo/db/update/storage_validation.h +++ b/src/mongo/db/update/storage_validation.h @@ -29,6 +29,10 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/mutable/element.h" namespace mongo { @@ -73,12 +77,16 @@ void scanDocument(const mutablebson::Document& doc, * * 'containsDotsAndDollarsField' is set to true if there exists any field name containing '.'/'$' * during validation. + * + * 'isEmbeddedInIdField' is set to true if the element is embedded inside an _id field. This allows + * to reject $-prefixed fields at all levels under an _id field. */ void scanDocument(mutablebson::ConstElement elem, bool deep, std::uint32_t recursionLevel, bool allowTopLevelDollarPrefixes, bool shouldValidate, + bool isEmbeddedInIdField, bool* containsDotsAndDollarsField); } // namespace storage_validation diff --git a/src/mongo/db/update/unset_node.cpp b/src/mongo/db/update/unset_node.cpp index be814cc73d12e..0252276707de3 100644 --- a/src/mongo/db/update/unset_node.cpp +++ b/src/mongo/db/update/unset_node.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/unset_node.h" +#include +#include +#include "mongo/bson/bsontypes.h" #include "mongo/db/update/storage_validation.h" +#include "mongo/db/update/unset_node.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -77,6 +81,7 @@ void UnsetNode::validateUpdate(mutablebson::ConstElement updatedElement, recursionLevelForCheck, false, /* allowTopLevelDollarPrefixedFields */ validateForStorage, + false, /* isEmbeddedInIdField */ containsDotsAndDollarsField); } @@ -86,6 +91,7 @@ void UnsetNode::validateUpdate(mutablebson::ConstElement updatedElement, recursionLevelForCheck, false, /* allowTopLevelDollarPrefixedFields */ validateForStorage, + false, /* isEmbeddedInIdField */ containsDotsAndDollarsField); } } diff --git a/src/mongo/db/update/unset_node.h b/src/mongo/db/update/unset_node.h index bd73f88d790ac..77adb5d7c7139 100644 --- a/src/mongo/db/update/unset_node.h +++ b/src/mongo/db/update/unset_node.h @@ -29,10 +29,28 @@ #pragma once +#include #include +#include +#include + +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/const_element.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/update/log_builder_interface.h" #include "mongo/db/update/modifier_node.h" +#include "mongo/db/update/runtime_update_path.h" +#include "mongo/db/update/update_node.h" +#include "mongo/db/update/update_node_visitor.h" namespace mongo { diff --git a/src/mongo/db/update/unset_node_test.cpp b/src/mongo/db/update/unset_node_test.cpp index 91cb9b9ab63a2..57be26bb5e0b8 100644 --- a/src/mongo/db/update/unset_node_test.cpp +++ b/src/mongo/db/update/unset_node_test.cpp @@ -27,18 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/update/unset_node.h" +#include -#include "mongo/bson/mutable/algorithm.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/json.h" +#include "mongo/bson/mutable/document.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/update/unset_node.h" +#include "mongo/db/update/update_executor.h" #include "mongo/db/update/update_node_test_fixture.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -85,7 +89,7 @@ TEST_F(UnsetNodeTest, UnsetNoOp) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: 5}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -105,7 +109,7 @@ TEST_F(UnsetNodeTest, UnsetNoOpDottedPath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 5}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -125,7 +129,7 @@ TEST_F(UnsetNodeTest, UnsetNoOpThroughArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a:[{b:1}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -144,7 +148,7 @@ TEST_F(UnsetNodeTest, UnsetNoOpEmptyDoc) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -163,7 +167,7 @@ TEST_F(UnsetNodeTest, UnsetTopLevelPath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -182,7 +186,7 @@ TEST_F(UnsetNodeTest, UnsetNestedPath) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]["c"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: {}}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -201,7 +205,7 @@ TEST_F(UnsetNodeTest, UnsetObject) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -220,7 +224,7 @@ TEST_F(UnsetNodeTest, UnsetArrayElement) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"][0]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a:[null], b:1}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -240,7 +244,7 @@ TEST_F(UnsetNodeTest, UnsetPositional) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"][1]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0, null, 2]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -259,7 +263,7 @@ TEST_F(UnsetNodeTest, UnsetEntireArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -278,7 +282,7 @@ TEST_F(UnsetNodeTest, UnsetFromObjectInArray) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"][0]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a:[{}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -297,7 +301,7 @@ TEST_F(UnsetNodeTest, CanUnsetInvalidField) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"][0]["$b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{b: 1, a: [{}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -316,7 +320,6 @@ TEST_F(UnsetNodeTest, ApplyNoIndexDataNoLogBuilder) { setLogBuilderToNull(); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); ASSERT_EQUALS(fromjson("{}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS(getModifiedPaths(), "{a}"); @@ -333,7 +336,7 @@ TEST_F(UnsetNodeTest, ApplyDoesNotAffectIndexes) { addIndexedPath("b"); auto result = node.apply(getApplyParams(doc.root()["a"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -352,7 +355,7 @@ TEST_F(UnsetNodeTest, ApplyFieldWithDot) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{'a.b':4, a: {}}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -370,7 +373,7 @@ TEST_F(UnsetNodeTest, ApplyCannotRemoveRequiredPartOfDBRef) { setPathTaken(makeRuntimeUpdatePathForTest("a.$id")); auto result = node.apply(getApplyParams(doc.root()["a"]["$id"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); auto updated = BSON("a" << BSON("$ref" << "c")); ASSERT_EQUALS(updated, doc); @@ -392,7 +395,7 @@ TEST_F(UnsetNodeTest, ApplyCanRemoveRequiredPartOfDBRefIfValidateForStorageIsFal setValidateForStorage(false); auto result = node.apply(getApplyParams(doc.root()["a"]["$id"]), getUpdateNodeApplyParams()); ASSERT_FALSE(result.noop); - ASSERT_TRUE(result.indexesAffected); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); auto updated = BSON("a" << BSON("$ref" << "c")); ASSERT_EQUALS(updated, doc); @@ -463,7 +466,7 @@ TEST_F(UnsetNodeTest, ApplyCanRemoveImmutablePathIfNoop) { addIndexedPath("a"); auto result = node.apply(getApplyParams(doc.root()["a"]["b"]), getUpdateNodeApplyParams()); ASSERT_TRUE(result.noop); - ASSERT_FALSE(result.indexesAffected); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: {b: 1}}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); diff --git a/src/mongo/db/update/update_array_node.cpp b/src/mongo/db/update/update_array_node.cpp index c7c1ea904f63e..b39ccbcdcd183 100644 --- a/src/mongo/db/update/update_array_node.cpp +++ b/src/mongo/db/update/update_array_node.cpp @@ -154,8 +154,6 @@ UpdateExecutor::ApplyResult UpdateArrayNode::apply( auto childApplyResult = mergedChild->apply(childApplyParams, childUpdateNodeApplyParams); - applyResult.indexesAffected = - applyResult.indexesAffected || childApplyResult.indexesAffected; applyResult.noop = applyResult.noop && childApplyResult.noop; if (!childApplyResult.noop) { modifiedElement = childElement; diff --git a/src/mongo/db/update/update_array_node_test.cpp b/src/mongo/db/update/update_array_node_test.cpp index 8999a4c6c6b3d..9c77bc246948c 100644 --- a/src/mongo/db/update/update_array_node_test.cpp +++ b/src/mongo/db/update/update_array_node_test.cpp @@ -116,8 +116,8 @@ TEST_F(UpdateArrayNodeTest, UpdateIsAppliedToAllMatchingElements) { mutablebson::Document doc(fromjson("{a: [0, 1, 0]}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [2, 1, 2]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -166,8 +166,8 @@ TEST_F(UpdateArrayNodeTest, UpdateForEmptyIdentifierIsAppliedToAllArrayElements) mutablebson::Document doc(fromjson("{a: [0, 0, 0]}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1, 1, 1]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -215,8 +215,8 @@ TEST_F(UpdateArrayNodeTest, ApplyMultipleUpdatesToArrayElement) { mutablebson::Document doc(fromjson("{a: [{b: 0, c: 0, d: 0}]}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 1, c: 1, d: 1}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -255,8 +255,8 @@ TEST_F(UpdateArrayNodeTest, ApplyMultipleUpdatesToArrayElementsUsingMergedChildr mutablebson::Document doc(fromjson("{a: [{b: 0, c: 0}, {b: 0, c: 0}]}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 1, c: 1}, {b: 1, c: 1}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -304,8 +304,8 @@ TEST_F(UpdateArrayNodeTest, ApplyMultipleUpdatesToArrayElementsWithoutMergedChil mutablebson::Document doc(fromjson("{a: [{b: 0, c: 0, d: 1}, {b: 1, c: 0, d: 0}]}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 2, c: 2, d: 1}, {b: 1, c: 2, d: 2}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -335,8 +335,8 @@ TEST_F(UpdateArrayNodeTest, ApplyMultipleUpdatesToArrayElementWithEmptyIdentifie mutablebson::Document doc(fromjson("{a: [{b: 0, c: 0}]}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: 1, c: 1}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -381,8 +381,8 @@ TEST_F(UpdateArrayNodeTest, ApplyNestedArrayUpdates) { mutablebson::Document doc(fromjson("{a: [{x: 0, b: [{c: 0, d: 0}]}]}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{x: 0, b: [{c: 1, d: 1}]}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -524,8 +524,8 @@ TEST_F(UpdateArrayNodeTest, NoArrayElementsMatch) { mutablebson::Document doc(fromjson("{a: [2, 2, 2]}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_FALSE(result.indexesAffected); ASSERT_TRUE(result.noop); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [2, 2, 2]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -552,8 +552,8 @@ TEST_F(UpdateArrayNodeTest, UpdatesToAllArrayElementsAreNoops) { mutablebson::Document doc(fromjson("{a: [1, 1, 1]}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_FALSE(result.indexesAffected); ASSERT_TRUE(result.noop); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1, 1, 1]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -580,8 +580,10 @@ TEST_F(UpdateArrayNodeTest, NoArrayElementAffectsIndexes) { mutablebson::Document doc(fromjson("{a: [{c: 0}, {c: 0}, {c: 0}]}")); addIndexedPath("a.c"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_FALSE(result.indexesAffected); ASSERT_FALSE(result.noop); + // TODO: SERVER-77344 detection of index related changes is tricked into thinking this is + // affected. + // ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{c: 0, b: 0}, {c: 0, b: 0}, {c: 0, b: 0}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -609,8 +611,8 @@ TEST_F(UpdateArrayNodeTest, WhenOneElementIsMatchedLogElementUpdateDirectly) { mutablebson::Document doc(fromjson("{a: [{c: 1}, {c: 0}, {c: 1}]}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{c: 1}, {c: 0, b: 0}, {c: 1}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -637,8 +639,8 @@ TEST_F(UpdateArrayNodeTest, WhenOneElementIsModifiedLogElement) { mutablebson::Document doc(fromjson("{a: [{c: 0, b: 0}, {c: 0}, {c: 1}]}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{c: 0, b: 0}, {c: 0, b: 0}, {c: 1}]}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -662,8 +664,8 @@ TEST_F(UpdateArrayNodeTest, ArrayUpdateOnEmptyArrayIsANoop) { mutablebson::Document doc(fromjson("{a: []}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_FALSE(result.indexesAffected); ASSERT_TRUE(result.noop); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: []}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -691,8 +693,8 @@ TEST_F(UpdateArrayNodeTest, ApplyPositionalInsideArrayUpdate) { addIndexedPath("a"); setMatchedField("1"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [{b: [0, 1], c: 0}]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -720,8 +722,8 @@ TEST_F(UpdateArrayNodeTest, ApplyArrayUpdateFromReplication) { addIndexedPath("a"); setFromOplogApplication(true); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_FALSE(result.indexesAffected); ASSERT_TRUE(result.noop); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [0]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -772,8 +774,8 @@ TEST_F(UpdateArrayNodeTest, ApplyArrayUpdateWithoutLogBuilderOrIndexData) { mutablebson::Document doc(fromjson("{a: [0]}")); setLogBuilderToNull(); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_FALSE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: [1]}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); ASSERT_EQUALS("{a.0}", getModifiedPaths()); diff --git a/src/mongo/db/update/update_driver.cpp b/src/mongo/db/update/update_driver.cpp index be832127f01cf..e5fc861ccbcd2 100644 --- a/src/mongo/db/update/update_driver.cpp +++ b/src/mongo/db/update/update_driver.cpp @@ -29,27 +29,36 @@ #include "mongo/db/update/update_driver.h" +#include +#include +#include +#include + +#include +#include +#include #include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" -#include "mongo/bson/mutable/algorithm.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/document.h" -#include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/curop_failpoint_helpers.h" -#include "mongo/db/field_ref.h" -#include "mongo/db/matcher/expression_leaf.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_noop.h" -#include "mongo/db/server_options.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/update/delta_executor.h" #include "mongo/db/update/modifier_table.h" #include "mongo/db/update/object_replace_executor.h" #include "mongo/db/update/object_transform_executor.h" #include "mongo/db/update/path_support.h" -#include "mongo/db/update/storage_validation.h" -#include "mongo/db/update/update_oplog_entry_version.h" -#include "mongo/stdx/variant.h" -#include "mongo/util/embedded_builder.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/db/update/pipeline_executor.h" +#include "mongo/db/update/update_object_node.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep #include "mongo/util/str.h" namespace mongo { @@ -205,10 +214,10 @@ Status UpdateDriver::populateDocumentWithQueryFields(OperationContext* opCtx, } std::unique_ptr cq = std::move(statusWithCQ.getValue()); - return populateDocumentWithQueryFields(*cq, immutablePaths, doc); + return populateDocumentWithQueryFields(*cq->root(), immutablePaths, doc); } -Status UpdateDriver::populateDocumentWithQueryFields(const CanonicalQuery& query, +Status UpdateDriver::populateDocumentWithQueryFields(const MatchExpression& query, const FieldRefSet& immutablePaths, mutablebson::Document& doc) const { EqualityMatches equalities; @@ -216,11 +225,10 @@ Status UpdateDriver::populateDocumentWithQueryFields(const CanonicalQuery& query if (_updateType == UpdateType::kReplacement) { // Extract only immutable fields. - status = - pathsupport::extractFullEqualityMatches(*query.root(), immutablePaths, &equalities); + status = pathsupport::extractFullEqualityMatches(query, immutablePaths, &equalities); } else { // Extract all fields from op-style update. - status = pathsupport::extractEqualityMatches(*query.root(), &equalities); + status = pathsupport::extractEqualityMatches(query, &equalities); } if (!status.isOK()) @@ -241,8 +249,6 @@ Status UpdateDriver::update(OperationContext* opCtx, FieldRefSetWithStorage* modifiedPaths) { // TODO: assert that update() is called at most once in a !_multi case. - _affectIndices = _updateType == UpdateType::kReplacement && _indexedFields != nullptr; - _logDoc.reset(); UpdateExecutor::ApplyParams applyParams(doc->root(), immutablePaths); @@ -251,7 +257,6 @@ Status UpdateDriver::update(OperationContext* opCtx, applyParams.fromOplogApplication = _fromOplogApplication; applyParams.skipDotsDollarsCheck = _skipDotsDollarsCheck; applyParams.validateForStorage = validateForStorage; - applyParams.indexData = _indexedFields; applyParams.modifiedPaths = modifiedPaths; // The supplied 'modifiedPaths' must be an empty set. invariant(!modifiedPaths || modifiedPaths->empty()); @@ -273,9 +278,6 @@ Status UpdateDriver::update(OperationContext* opCtx, invariant(_updateExecutor); auto applyResult = _updateExecutor->applyUpdate(applyParams); - if (applyResult.indexesAffected) { - _affectIndices = true; - } if (docWasModified) { *docWasModified = !applyResult.noop; } diff --git a/src/mongo/db/update/update_driver.h b/src/mongo/db/update/update_driver.h index 94d1f73c8d409..639797545c0fa 100644 --- a/src/mongo/db/update/update_driver.h +++ b/src/mongo/db/update/update_driver.h @@ -29,24 +29,43 @@ #pragma once +#include +#include +#include +#include +#include +#include #include #include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/mutable/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/field_ref_set.h" #include "mongo/db/jsobj.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_with_placeholder.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/update/modifier_table.h" #include "mongo/db/update/object_replace_executor.h" #include "mongo/db/update/pipeline_executor.h" +#include "mongo/db/update/update_executor.h" +#include "mongo/db/update/update_node.h" #include "mongo/db/update/update_node_visitor.h" #include "mongo/db/update/update_object_node.h" #include "mongo/db/update/update_tree_executor.h" #include "mongo/db/update_index_data.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -87,7 +106,7 @@ class UpdateDriver { const FieldRefSet& immutablePaths, mutablebson::Document& doc) const; - Status populateDocumentWithQueryFields(const CanonicalQuery& query, + Status populateDocumentWithQueryFields(const MatchExpression& query, const FieldRefSet& immutablePaths, mutablebson::Document& doc) const; @@ -150,13 +169,6 @@ class UpdateDriver { return _updateType; } - bool modsAffectIndices() const { - return _affectIndices; - } - void refreshIndexKeys(const UpdateIndexData* indexedFields) { - _indexedFields = indexedFields; - } - bool logOp() const { return _logOp; } @@ -222,12 +234,6 @@ class UpdateDriver { std::unique_ptr _updateExecutor; - // What are the list of fields in the collection over which the update is going to be - // applied that participate in indices? - // - // NOTE: Owned by the collection's info cache!. - const UpdateIndexData* _indexedFields = nullptr; - // // mutable properties after parsing // @@ -244,10 +250,6 @@ class UpdateDriver { boost::intrusive_ptr _expCtx; - // Are any of the fields mentioned in the mods participating in any index? Is set anew - // at each call to update. - bool _affectIndices = false; - // Do any of the mods require positional match details when calling 'prepare'? bool _positional = false; diff --git a/src/mongo/db/update/update_driver_test.cpp b/src/mongo/db/update/update_driver_test.cpp index 7cf7888671c4a..2b1d40078f14c 100644 --- a/src/mongo/db/update/update_driver_test.cpp +++ b/src/mongo/db/update/update_driver_test.cpp @@ -29,22 +29,36 @@ #include "mongo/db/update/update_driver.h" - +#include +#include #include +#include +#include +#include +#include + +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/simple_string_data_comparator.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement_comparator.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/document.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/field_ref.h" -#include "mongo/db/json.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/query/query_test_service_context.h" -#include "mongo/db/update_index_data.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/service_context.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -565,8 +579,7 @@ class ModifiedPathsTestFixture : public mongo::unittest::Test { const write_ops::UpdateModification& updateSpec, StringData matchedField = StringData(), std::vector arrayFilterSpec = {}, - bool fromOplog = false, - UpdateIndexData* indexData = nullptr) { + bool fromOplog = false) { boost::intrusive_ptr expCtx(new ExpressionContextForTest()); _driver = std::make_unique(expCtx); std::map> arrayFilters; @@ -579,7 +592,6 @@ class ModifiedPathsTestFixture : public mongo::unittest::Test { } _driver->setFromOplogApplication(fromOplog); - _driver->refreshIndexKeys(indexData); _driver->parse(updateSpec, arrayFilters); const bool validateForStorage = true; @@ -681,51 +693,18 @@ TEST_F(ModifiedPathsTestFixture, ReplaceFullDocumentAlwaysAffectsIndex) { ASSERT_EQ(_modifiedPaths, "{}"); } - -TEST_F(ModifiedPathsTestFixture, PipelineUpdatesAlwaysAffectsIndex) { - BSONObj spec = fromjson("{$set: {'a.1.b': 1}}"); - mutablebson::Document doc(fromjson("{a: [{b: 0}]}")); - runUpdate(&doc, std::vector{spec}); - ASSERT(_driver->modsAffectIndices()); +TEST_F(ModifiedPathsTestFixture, NeedsMatchDetailsIsTrueForPositionalUpdate) { + BSONObj spec = fromjson("{$set: {'a.$': 1}}"); + mutablebson::Document doc(fromjson("{a: [0, 1, 2]}")); + runUpdate(&doc, makeUpdateMod(spec), "0"_sd); + ASSERT_EQ(true, _driver->needMatchDetails()); } -TEST_F(ModifiedPathsTestFixture, DeltaUpdateNotAffectingIndex) { - BSONObj spec = fromjson("{d: {a: false}}"); - mutablebson::Document doc(fromjson("{a: [{b: 0}]}")); - runUpdate(&doc, - write_ops::UpdateModification::parseFromV2Delta( - spec, write_ops::UpdateModification::DiffOptions{}), - ""_sd, - {}, - true /* fromOplog */); - ASSERT(!_driver->modsAffectIndices()); - - UpdateIndexData indexData; - indexData.addPath(FieldRef("p")); - runUpdate(&doc, - write_ops::UpdateModification::parseFromV2Delta( - spec, write_ops::UpdateModification::DiffOptions{}), - ""_sd, - {}, - true /* fromOplog */, - &indexData); - ASSERT(!_driver->modsAffectIndices()); -} - -TEST_F(ModifiedPathsTestFixture, DeltaUpdateAffectingIndex) { - BSONObj spec = fromjson("{u: {a: 1}}"); - mutablebson::Document doc(fromjson("{a: [{b: 0}]}")); - UpdateIndexData indexData; - indexData.addPath(FieldRef("q")); - indexData.addPath(FieldRef("a.p")); - runUpdate(&doc, - write_ops::UpdateModification::parseFromV2Delta( - spec, write_ops::UpdateModification::DiffOptions{}), - ""_sd, - {}, - true /* fromOplog */, - &indexData); - ASSERT(_driver->modsAffectIndices()); +TEST_F(ModifiedPathsTestFixture, NeedsMatchDetailsIsFalseForNonPositionalUpdate) { + BSONObj spec = fromjson("{$set: {a: 1}}"); + mutablebson::Document doc(fromjson("{a: 0}")); + runUpdate(&doc, makeUpdateMod(spec)); + ASSERT_EQ(false, _driver->needMatchDetails()); } } // namespace diff --git a/src/mongo/db/update/update_executor.h b/src/mongo/db/update/update_executor.h index 8f674ff25e09f..ae755cdd68d9b 100644 --- a/src/mongo/db/update/update_executor.h +++ b/src/mongo/db/update/update_executor.h @@ -90,9 +90,6 @@ class UpdateExecutor { // constraints. bool validateForStorage = true; - // Used to determine whether indexes are affected. - const UpdateIndexData* indexData = nullptr; - // Indicates whether/what type of oplog entry should be produced by the update executor. // If 'logMode' indicates an oplog entry should be produced but the update turns out to be // a noop, an oplog entry may not be produced. @@ -108,13 +105,11 @@ class UpdateExecutor { struct ApplyResult { static ApplyResult noopResult() { ApplyResult applyResult; - applyResult.indexesAffected = false; applyResult.noop = true; applyResult.containsDotsAndDollarsField = false; return applyResult; } - bool indexesAffected = true; bool noop = false; bool containsDotsAndDollarsField = false; diff --git a/src/mongo/db/update/update_leaf_node.cpp b/src/mongo/db/update/update_leaf_node.cpp index b09919772a2cf..c5bb9a6329a31 100644 --- a/src/mongo/db/update/update_leaf_node.cpp +++ b/src/mongo/db/update/update_leaf_node.cpp @@ -27,10 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/update/update_leaf_node.h" - +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/update/update_leaf_node.h b/src/mongo/db/update/update_leaf_node.h index 362da4ad471bd..bcd3f5b9d3dc7 100644 --- a/src/mongo/db/update/update_leaf_node.h +++ b/src/mongo/db/update/update_leaf_node.h @@ -29,6 +29,12 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/update/update_node.h" diff --git a/src/mongo/db/update/update_node.cpp b/src/mongo/db/update/update_node.cpp index 60a2dd2107169..807f601b5e2ad 100644 --- a/src/mongo/db/update/update_node.cpp +++ b/src/mongo/db/update/update_node.cpp @@ -27,13 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/db/update/update_node.h" - -#include "mongo/base/status_with.h" +#include "mongo/base/error_codes.h" #include "mongo/db/update/update_array_node.h" #include "mongo/db/update/update_object_node.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/db/update/update_node.h b/src/mongo/db/update/update_node.h index 0f0e76fde098d..b2861a86b89ac 100644 --- a/src/mongo/db/update/update_node.h +++ b/src/mongo/db/update/update_node.h @@ -36,7 +36,9 @@ #include #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" #include "mongo/db/field_ref_set.h" #include "mongo/db/update/log_builder_interface.h" #include "mongo/db/update/runtime_update_path.h" diff --git a/src/mongo/db/update/update_node_test_fixture.h b/src/mongo/db/update/update_node_test_fixture.h index 609eb1f283313..85cde681e764e 100644 --- a/src/mongo/db/update/update_node_test_fixture.h +++ b/src/mongo/db/update/update_node_test_fixture.h @@ -29,8 +29,9 @@ #pragma once -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" +#include "mongo/bson/json.h" #include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" #include "mongo/db/update/document_diff_calculator.h" #include "mongo/db/update/update_node.h" #include "mongo/db/update/update_oplog_entry_serialization.h" @@ -39,7 +40,7 @@ namespace mongo { -class UpdateTestFixture : public LockerNoopServiceContextTest { +class UpdateTestFixture : public ServiceContextTest { public: ~UpdateTestFixture() override = default; @@ -85,18 +86,15 @@ class UpdateTestFixture : public LockerNoopServiceContextTest { applyParams.insert = _insert; applyParams.fromOplogApplication = _fromOplogApplication; applyParams.validateForStorage = _validateForStorage; - applyParams.indexData = _indexData.get(); applyParams.modifiedPaths = &_modifiedPaths; applyParams.logMode = ApplyParams::LogMode::kGenerateOplogEntry; return applyParams; } - bool getIndexAffectedFromLogEntry() { - if (!_logBuilder || !_indexData) { + bool getIndexAffectedFromLogEntry(BSONObj logEntry) { + if (!_indexData) { return false; } - // Keep the object alive, extractDiffFromOplogEntry returns a subdocument of this document. - BSONObj logEntry = getOplogEntry(); auto diff = update_oplog_entry::extractDiffFromOplogEntry(logEntry); if (!diff) { return false; @@ -106,6 +104,13 @@ class UpdateTestFixture : public LockerNoopServiceContextTest { .any(); } + bool getIndexAffectedFromLogEntry() { + if (!_logBuilder) { + return false; + } + return getIndexAffectedFromLogEntry(getOplogEntry()); + } + UpdateNode::UpdateNodeApplyParams getUpdateNodeApplyParams() { UpdateNode::UpdateNodeApplyParams applyParams; applyParams.pathToCreate = _pathToCreate; diff --git a/src/mongo/db/update/update_object_node.cpp b/src/mongo/db/update/update_object_node.cpp index 256c79d2baecb..833b4bbcbe1c0 100644 --- a/src/mongo/db/update/update_object_node.cpp +++ b/src/mongo/db/update/update_object_node.cpp @@ -145,7 +145,6 @@ void applyChild(const UpdateNode& child, UpdateNode::UpdateNodeApplyParams childUpdateNodeApplyParams = *updateNodeApplyParams; auto childApplyResult = child.apply(childApplyParams, childUpdateNodeApplyParams); - applyResult->indexesAffected = applyResult->indexesAffected || childApplyResult.indexesAffected; applyResult->noop = applyResult->noop && childApplyResult.noop; applyResult->containsDotsAndDollarsField = applyResult->containsDotsAndDollarsField || childApplyResult.containsDotsAndDollarsField; diff --git a/src/mongo/db/update/update_object_node_test.cpp b/src/mongo/db/update/update_object_node_test.cpp index ee77ffaee1b27..f02607816066a 100644 --- a/src/mongo/db/update/update_object_node_test.cpp +++ b/src/mongo/db/update/update_object_node_test.cpp @@ -1769,8 +1769,8 @@ TEST_F(UpdateObjectNodeTest, ApplyCreateField) { mutablebson::Document doc(fromjson("{a: 5}")); addIndexedPath("b"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 5, b: 6}"), doc); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1794,8 +1794,8 @@ TEST_F(UpdateObjectNodeTest, ApplyExistingField) { mutablebson::Document doc(fromjson("{a: 5}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_EQUALS(fromjson("{a: 6}"), doc); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -1837,8 +1837,8 @@ TEST_F(UpdateObjectNodeTest, ApplyExistingAndNonexistingFields) { mutablebson::Document doc(fromjson("{a: 0, c: 0}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: 5, c: 7, b: 6, d: 8}"), doc.getObject()); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1880,8 +1880,8 @@ TEST_F(UpdateObjectNodeTest, ApplyExistingNestedPaths) { mutablebson::Document doc(fromjson("{a: {b: 5, c: 5}, b: {d: 5, e: 5}}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: {b: 6, c: 7}, b: {d: 8, e: 9}}"), doc.getObject()); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -1923,8 +1923,8 @@ TEST_F(UpdateObjectNodeTest, ApplyCreateNestedPaths) { mutablebson::Document doc(fromjson("{z: 0}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{z: 0, a: {b: 6, c: 7}, b: {d: 8, e: 9}}"), doc.getObject()); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -1960,8 +1960,8 @@ TEST_F(UpdateObjectNodeTest, ApplyCreateDeeplyNestedPaths) { mutablebson::Document doc(fromjson("{z: 0}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{z: 0, a: {b: {c: {d: 6, e: 7}}, f: 8}}"), doc.getObject()); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -2009,8 +2009,8 @@ TEST_F(UpdateObjectNodeTest, ChildrenShouldBeAppliedInAlphabeticalOrder) { mutablebson::Document doc(fromjson("{z: 0, a: 0}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{z: 9, a: 5, b: 8, c: 7, d: 6}"), doc.getObject()); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -2043,8 +2043,8 @@ TEST_F(UpdateObjectNodeTest, CollatorShouldNotAffectUpdateOrder) { mutablebson::Document doc(fromjson("{}")); addIndexedPath("abc"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{abc: 5, cba: 6}"), doc.getObject()); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -2081,8 +2081,8 @@ TEST_F(UpdateObjectNodeTest, ApplyNoop) { addIndexedPath("b"); addIndexedPath("c"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_FALSE(result.indexesAffected); ASSERT_TRUE(result.noop); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: 5, b: 6, c: 7}"), doc.getObject()); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -2120,8 +2120,8 @@ TEST_F(UpdateObjectNodeTest, ApplySomeChildrenNoops) { addIndexedPath("b"); addIndexedPath("c"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: 5, b: 6, c: 7}"), doc.getObject()); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -2174,8 +2174,8 @@ TEST_F(UpdateObjectNodeTest, ApplyBlockingElementFromReplication) { addIndexedPath("a"); setFromOplogApplication(true); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_FALSE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: 0, b: 6}"), doc.getObject()); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -2228,8 +2228,8 @@ TEST_F(UpdateObjectNodeTest, ApplyMergePositionalChild) { setMatchedField("0"); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: [{b: 5, c: 6}]}"), doc.getObject()); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -2272,8 +2272,8 @@ TEST_F(UpdateObjectNodeTest, ApplyOrderMergedPositionalChild) { setMatchedField("1"); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: {'0': 7, '1': {b: 6, c: 8}, '2': 5}}"), doc.getObject()); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -2339,8 +2339,8 @@ TEST_F(UpdateObjectNodeTest, ApplyDoNotMergePositionalChild) { setMatchedField("1"); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: {'0': 5, '1': 7, '2': 6}}"), doc.getObject()); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -2377,8 +2377,8 @@ TEST_F(UpdateObjectNodeTest, ApplyPositionalChildLast) { setMatchedField("2"); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: {'0': 6, '1': 7, '2': 5}}"), doc.getObject()); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -2409,8 +2409,8 @@ TEST_F(UpdateObjectNodeTest, ApplyUseStoredMergedPositional) { setMatchedField("0"); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: [{b: 5, c: 6}]}"), doc.getObject()); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -2422,8 +2422,8 @@ TEST_F(UpdateObjectNodeTest, ApplyUseStoredMergedPositional) { setMatchedField("0"); addIndexedPath("a"); result = root.apply(getApplyParams(doc2.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: [{b: 5, c: 6}]}"), doc2.getObject()); ASSERT_TRUE(doc2.isInPlaceModeEnabled()); @@ -2460,8 +2460,8 @@ TEST_F(UpdateObjectNodeTest, ApplyDoNotUseStoredMergedPositional) { setMatchedField("0"); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: [{b: 5, c: 6}, {c: 0, d: 7}]}"), doc.getObject()); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -2474,8 +2474,8 @@ TEST_F(UpdateObjectNodeTest, ApplyDoNotUseStoredMergedPositional) { setMatchedField("1"); addIndexedPath("a"); result = root.apply(getApplyParams(doc2.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: [{b: 5, c: 0}, {c: 6, d: 7}]}"), doc2.getObject()); ASSERT_TRUE(doc2.isInPlaceModeEnabled()); @@ -2505,8 +2505,8 @@ TEST_F(UpdateObjectNodeTest, ApplyToArrayByIndexWithLeadingZero) { mutablebson::Document doc(fromjson("{a: [0, 0, 0, 0, 0]}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: [0, 0, 2, 0, 0]}"), doc.getObject()); ASSERT_TRUE(doc.isInPlaceModeEnabled()); @@ -2542,8 +2542,8 @@ TEST_F(UpdateObjectNodeTest, ApplyMultipleArrayUpdates) { mutablebson::Document doc(fromjson("{a: []}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ( fromjson("{a: [null, null, 2, null, null, null, null, null, null, null, 10]}"), doc.getObject()); @@ -2574,8 +2574,8 @@ TEST_F(UpdateObjectNodeTest, ApplyMultipleUpdatesToDocumentInArray) { mutablebson::Document doc(fromjson("{a: []}")); addIndexedPath("a"); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_TRUE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_TRUE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: [null, null, {b: 1, c: 1}]}"), doc.getObject()); ASSERT_FALSE(doc.isInPlaceModeEnabled()); @@ -2625,8 +2625,8 @@ TEST_F(UpdateObjectNodeTest, SetAndPopModifiersWithCommonPrefixApplySuccessfully mutablebson::Document doc(fromjson("{a: {b: 3, c: [1, 2, 3, 4]}}")); auto result = root.apply(getApplyParams(doc.root()), getUpdateNodeApplyParams()); - ASSERT_FALSE(result.indexesAffected); ASSERT_FALSE(result.noop); + ASSERT_FALSE(getIndexAffectedFromLogEntry()); ASSERT_BSONOBJ_EQ(fromjson("{a: {b: 5, c: [2, 3, 4]}}"), doc.getObject()); ASSERT_FALSE(doc.isInPlaceModeEnabled()); diff --git a/src/mongo/db/update/update_oplog_entry_serialization.cpp b/src/mongo/db/update/update_oplog_entry_serialization.cpp index 55c5d1f2d6e72..2d3a02a8730b3 100644 --- a/src/mongo/db/update/update_oplog_entry_serialization.cpp +++ b/src/mongo/db/update/update_oplog_entry_serialization.cpp @@ -29,10 +29,19 @@ #include "mongo/db/update/update_oplog_entry_serialization.h" +#include +#include #include +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/update_oplog_entry_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" using namespace fmt::literals; diff --git a/src/mongo/db/update/update_oplog_entry_serialization.h b/src/mongo/db/update/update_oplog_entry_serialization.h index 018f819071f99..5056b75546888 100644 --- a/src/mongo/db/update/update_oplog_entry_serialization.h +++ b/src/mongo/db/update/update_oplog_entry_serialization.h @@ -29,6 +29,12 @@ #pragma once +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/update_oplog_entry_version.h" diff --git a/src/mongo/db/update/update_oplog_entry_serialization_test.cpp b/src/mongo/db/update/update_oplog_entry_serialization_test.cpp index b6ef9b1692e37..34904d6b1ff4a 100644 --- a/src/mongo/db/update/update_oplog_entry_serialization_test.cpp +++ b/src/mongo/db/update/update_oplog_entry_serialization_test.cpp @@ -28,12 +28,16 @@ */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/update/update_serialization_test.cpp b/src/mongo/db/update/update_serialization_test.cpp index 1f0d7f716fae2..149cc65361466 100644 --- a/src/mongo/db/update/update_serialization_test.cpp +++ b/src/mongo/db/update/update_serialization_test.cpp @@ -27,19 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include -#include #include #include +#include #include #include "mongo/base/string_data.h" -#include "mongo/db/json.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/matcher/expression_with_placeholder.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/update/update_driver.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/db/update/update_util.cpp b/src/mongo/db/update/update_util.cpp index 90f41da7dd1af..81bcef3a35f00 100644 --- a/src/mongo/db/update/update_util.cpp +++ b/src/mongo/db/update/update_util.cpp @@ -29,9 +29,32 @@ #include "mongo/db/update/update_util.h" +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/algorithm.h" -#include "mongo/db/s/operation_sharding_state.h" -#include "mongo/db/update/storage_validation.h" +#include "mongo/bson/mutable/const_element.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" namespace mongo { namespace update { @@ -75,7 +98,9 @@ void generateNewDocumentFromSuppliedDoc(OperationContext* opCtx, UpdateDriver replacementDriver(nullptr); // Create a new replacement-style update from the supplied document. - replacementDriver.parse(write_ops::UpdateModification::parseFromClassicUpdate(suppliedDoc), {}); + replacementDriver.parse( + write_ops::UpdateModification(suppliedDoc, write_ops::UpdateModification::ReplacementTag{}), + {}); replacementDriver.setLogOp(false); // We do not validate for storage, as we will validate the full document before inserting. @@ -98,7 +123,7 @@ void produceDocumentForUpsert(OperationContext* opCtx, // First: populate the document's immutable paths with equality predicate values from the query, // if available. This generates the pre-image document that we will run the update against. if (auto* cq = canonicalQuery) { - uassertStatusOK(driver->populateDocumentWithQueryFields(*cq, immutablePaths, doc)); + uassertStatusOK(driver->populateDocumentWithQueryFields(*cq->root(), immutablePaths, doc)); } else { fassert(17354, CanonicalQuery::isSimpleIdQuery(request->getQuery())); fassert(17352, doc.root().appendElement(request->getQuery()[idFieldName])); @@ -165,9 +190,7 @@ void makeUpdateRequest(OperationContext* opCtx, requestOut->setMulti(false); requestOut->setExplain(explain); - requestOut->setYieldPolicy(opCtx->inMultiDocumentTransaction() - ? PlanYieldPolicy::YieldPolicy::INTERRUPT_ONLY - : PlanYieldPolicy::YieldPolicy::YIELD_AUTO); + requestOut->setYieldPolicy(PlanYieldPolicy::YieldPolicy::YIELD_AUTO); } } // namespace update diff --git a/src/mongo/db/update/update_util.h b/src/mongo/db/update/update_util.h index edec49f6f8a12..aabe839e9af8a 100644 --- a/src/mongo/db/update/update_util.h +++ b/src/mongo/db/update/update_util.h @@ -27,14 +27,37 @@ * it in the license file. */ +#include + +#include "mongo/bson/mutable/document.h" +#include "mongo/db/field_ref_set.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/update/update_driver.h" #include "mongo/util/safe_num.h" namespace mongo { namespace update { +/** + * Generate a new document based on an update modification using an UpdateDriver. + */ +void generateNewDocumentFromUpdateOp(OperationContext* opCtx, + const FieldRefSet& immutablePaths, + UpdateDriver* driver, + mutablebson::Document& document); + +/** + * Generate a new document based on the supplied upsert document. + */ +void generateNewDocumentFromSuppliedDoc(OperationContext* opCtx, + const FieldRefSet& immutablePaths, + const UpdateRequest* request, + mutablebson::Document& document); + /** * Use an UpdateDriver and UpdateRequest to produce the document to insert. **/ diff --git a/src/mongo/db/update/v2_log_builder.cpp b/src/mongo/db/update/v2_log_builder.cpp index 08ba0adb3a28e..029484609b53f 100644 --- a/src/mongo/db/update/v2_log_builder.cpp +++ b/src/mongo/db/update/v2_log_builder.cpp @@ -27,14 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/update/v2_log_builder.h" - -#include +#include #include "mongo/base/checked_cast.h" +#include "mongo/db/field_ref.h" #include "mongo/db/update/update_oplog_entry_serialization.h" +#include "mongo/db/update/v2_log_builder.h" +#include "mongo/util/assert_util.h" namespace mongo::v2_log_builder { Status V2LogBuilder::logUpdatedField(const RuntimeUpdatePath& path, mutablebson::Element elt) { diff --git a/src/mongo/db/update/v2_log_builder.h b/src/mongo/db/update/v2_log_builder.h index 743a3b6b224ac..63fa2c4b1e61d 100644 --- a/src/mongo/db/update/v2_log_builder.h +++ b/src/mongo/db/update/v2_log_builder.h @@ -29,9 +29,15 @@ #pragma once +#include +#include +#include + #include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" #include "mongo/db/update/document_diff_serialization.h" #include "mongo/db/update/log_builder_interface.h" #include "mongo/db/update/runtime_update_path.h" diff --git a/src/mongo/db/update/v2_log_builder_test.cpp b/src/mongo/db/update/v2_log_builder_test.cpp index 325c00dc573fc..41d09ab47b3ed 100644 --- a/src/mongo/db/update/v2_log_builder_test.cpp +++ b/src/mongo/db/update/v2_log_builder_test.cpp @@ -29,12 +29,21 @@ #include "mongo/db/update/v2_log_builder.h" -#include "mongo/base/status.h" +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/mutable/mutable_bson_test_utils.h" -#include "mongo/db/json.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/db/field_ref.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/safe_num.h" namespace mongo::v2_log_builder { diff --git a/src/mongo/db/update_index_data.cpp b/src/mongo/db/update_index_data.cpp index 0c5a6a4f5b8a2..512ef5473b668 100644 --- a/src/mongo/db/update_index_data.cpp +++ b/src/mongo/db/update_index_data.cpp @@ -28,7 +28,13 @@ */ #include "mongo/db/update_index_data.h" -#include "mongo/bson/util/builder.h" + +#include +#include +#include +#include + +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" namespace mongo { diff --git a/src/mongo/db/update_index_data.h b/src/mongo/db/update_index_data.h index d23f0fbea9c4f..6119f2b6c9ebe 100644 --- a/src/mongo/db/update_index_data.h +++ b/src/mongo/db/update_index_data.h @@ -29,7 +29,8 @@ #pragma once -#include +#include +#include #include "mongo/base/string_data.h" #include "mongo/db/field_ref.h" @@ -89,8 +90,8 @@ class UpdateIndexData { */ bool _startsWith(const FieldRef& a, const FieldRef& b) const; - std::set _canonicalPaths; - std::set _pathComponents; + absl::btree_set _canonicalPaths; + absl::btree_set _pathComponents; bool _allPathsIndexed; }; diff --git a/src/mongo/db/update_index_data_test.cpp b/src/mongo/db/update_index_data_test.cpp index ae230e70f3026..92696f2e9b179 100644 --- a/src/mongo/db/update_index_data_test.cpp +++ b/src/mongo/db/update_index_data_test.cpp @@ -27,9 +27,9 @@ * it in the license file. */ -#include "mongo/unittest/unittest.h" - #include "mongo/db/update_index_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/db/vector_clock.cpp b/src/mongo/db/vector_clock.cpp index 076d025889a50..6d93ca42c7020 100644 --- a/src/mongo/db/vector_clock.cpp +++ b/src/mongo/db/vector_clock.cpp @@ -27,16 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/vector_clock.h" - +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/crypto/hash_block.h" +#include "mongo/crypto/sha1_block.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/client.h" #include "mongo/db/logical_time_validator.h" -#include "mongo/db/vector_clock_document_gen.h" +#include "mongo/db/signed_logical_time.h" +#include "mongo/db/time_proof_service.h" +#include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_gen.h" -#include "mongo/util/static_immortal.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/db/vector_clock.h b/src/mongo/db/vector_clock.h index eb40b36f4990e..a4065b13788c5 100644 --- a/src/mongo/db/vector_clock.h +++ b/src/mongo/db/vector_clock.h @@ -30,12 +30,23 @@ #pragma once #include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" #include "mongo/transport/session.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/db/vector_clock_metadata_hook.cpp b/src/mongo/db/vector_clock_metadata_hook.cpp index b6a04ba80663d..1e67d94dcebde 100644 --- a/src/mongo/db/vector_clock_metadata_hook.cpp +++ b/src/mongo/db/vector_clock_metadata_hook.cpp @@ -27,14 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/vector_clock_metadata_hook.h" - #include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" #include "mongo/db/operation_time_tracker.h" +#include "mongo/db/service_context.h" #include "mongo/db/vector_clock.h" +#include "mongo/db/vector_clock_metadata_hook.h" +#include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/db/vector_clock_mongod.cpp b/src/mongo/db/vector_clock_mongod.cpp index bee92fd5d2daa..5603984d70334 100644 --- a/src/mongo/db/vector_clock_mongod.cpp +++ b/src/mongo/db/vector_clock_mongod.cpp @@ -28,20 +28,58 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/logical_time.h" #include "mongo/db/logical_time_validator.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/persistent_task_store.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replica_set_aware_service.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/db/repl/storage_interface.h" -#include "mongo/db/s/sharding_state.h" #include "mongo/db/s/topology_time_ticker.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_document_gen.h" #include "mongo/db/vector_clock_mutable.h" -#include "mongo/executor/scoped_task_executor.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/mutex.h" +#include "mongo/s/catalog/type_shard.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -298,6 +336,12 @@ VectorClock::VectorTime VectorClockMongoD::recoverDirect(OperationContext* opCtx _advanceTime( {newDurableTime.clusterTime(), newDurableTime.configTime(), newDurableTime.topologyTime()}); + LOGV2_DEBUG(1, + 6011000, + "Recovered persisted vector clock", + "configTime"_attr = newDurableTime.configTime(), + "topologyTime"_attr = newDurableTime.topologyTime()); + return newDurableTime; } @@ -373,12 +417,6 @@ Future VectorClockMongoD::_doWhileQueueNotEmptyOrError(ServiceContext* ser }(); ThreadClient tc("VectorClockStateOperation", service); - - { - stdx::lock_guard lk(*tc.get()); - tc->setSystemOperationKillableByStepdown(lk); - } - const auto opCtxHolder = tc->makeOperationContext(); auto* const opCtx = opCtxHolder.get(); @@ -454,10 +492,8 @@ void VectorClockMongoD::_tickTo(Component component, LogicalTime newTime) { return; } - if (component == Component::ConfigTime) { - // The ConfigTime is allowed to be tickTo on the ConfigServer and on the shard only when - // called from ShardingStateRecovery - // TODO SERVER-60110 re-add clusterRole == configServer condition like for TopologyTime + if (component == Component::ConfigTime && + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { _advanceComponentTimeTo(component, std::move(newTime)); return; } diff --git a/src/mongo/db/vector_clock_mongod_test.cpp b/src/mongo/db/vector_clock_mongod_test.cpp index 3b82ac93a321d..c8a679853faf7 100644 --- a/src/mongo/db/vector_clock_mongod_test.cpp +++ b/src/mongo/db/vector_clock_mongod_test.cpp @@ -27,20 +27,40 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/keys_collection_client_direct.h" #include "mongo/db/keys_collection_manager.h" +#include "mongo/db/logical_time.h" #include "mongo/db/logical_time_validator.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_mock.h" -#include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/s/sharding_mongod_test_fixture.h" #include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_mutable.h" +#include "mongo/transport/session.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" namespace mongo { namespace { @@ -142,12 +162,11 @@ DEATH_TEST_F(VectorClockMongoDTest, CannotTickConfigTime, "Hit a MONGO_UNREACHAB vc->tickConfigTime(1); } -// TODO SERVER-60110 re-enable the following test -// DEATH_TEST_F(VectorClockMongoDTest, CannotTickToConfigTime, "Hit a MONGO_UNREACHABLE") { -// auto sc = getServiceContext(); -// auto vc = VectorClockMutable::get(sc); -// vc->tickConfigTimeTo(LogicalTime()); -//} +DEATH_TEST_F(VectorClockMongoDTest, CannotTickToConfigTime, "Hit a MONGO_UNREACHABLE") { + auto sc = getServiceContext(); + auto vc = VectorClockMutable::get(sc); + vc->tickConfigTimeTo(LogicalTime()); +} DEATH_TEST_F(VectorClockMongoDTest, CannotTickTopologyTime, "Hit a MONGO_UNREACHABLE") { auto sc = getServiceContext(); diff --git a/src/mongo/db/vector_clock_mutable.cpp b/src/mongo/db/vector_clock_mutable.cpp index 9c68f998ccdeb..795f2cea9b579 100644 --- a/src/mongo/db/vector_clock_mutable.cpp +++ b/src/mongo/db/vector_clock_mutable.cpp @@ -28,11 +28,25 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/vector_clock_mutable.h" +#include +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" +#include "mongo/db/vector_clock_mutable.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/db/vector_clock_mutable.h b/src/mongo/db/vector_clock_mutable.h index fb586f037b8fa..7021e2d63c048 100644 --- a/src/mongo/db/vector_clock_mutable.h +++ b/src/mongo/db/vector_clock_mutable.h @@ -29,7 +29,13 @@ #pragma once +#include + +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/vector_clock.h" +#include "mongo/util/future.h" namespace mongo { diff --git a/src/mongo/db/vector_clock_test.cpp b/src/mongo/db/vector_clock_test.cpp index a80481cd5a8a3..c84bd33b25b8e 100644 --- a/src/mongo/db/vector_clock_test.cpp +++ b/src/mongo/db/vector_clock_test.cpp @@ -28,20 +28,29 @@ */ -#include "mongo/platform/basic.h" - +#include #include +#include -#include "mongo/bson/bsonobj.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/timestamp.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_gen.h" #include "mongo/db/vector_clock_mutable.h" #include "mongo/db/vector_clock_test_fixture.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/clock_source_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/db/vector_clock_test_fixture.cpp b/src/mongo/db/vector_clock_test_fixture.cpp index 7ba7145ed3f1a..6f3c94fadf90a 100644 --- a/src/mongo/db/vector_clock_test_fixture.cpp +++ b/src/mongo/db/vector_clock_test_fixture.cpp @@ -27,23 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/vector_clock_test_fixture.h" - #include +#include "mongo/base/checked_cast.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/logical_time.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_impl.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/service_context.h" -#include "mongo/db/signed_logical_time.h" -#include "mongo/db/time_proof_service.h" +#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_mutable.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/vector_clock_test_fixture.h" +#include "mongo/unittest/assert.h" #include "mongo/util/clock_source_mock.h" namespace mongo { diff --git a/src/mongo/db/vector_clock_test_fixture.h b/src/mongo/db/vector_clock_test_fixture.h index 4ae782cbb7a1b..a0379193c0261 100644 --- a/src/mongo/db/vector_clock_test_fixture.h +++ b/src/mongo/db/vector_clock_test_fixture.h @@ -29,7 +29,12 @@ #pragma once +#include + +#include "mongo/db/logical_time.h" #include "mongo/db/s/sharding_mongod_test_fixture.h" +#include "mongo/util/clock_source_mock.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/vector_clock_trivial.cpp b/src/mongo/db/vector_clock_trivial.cpp index 8b8b154a72887..8c3c8c00cfa89 100644 --- a/src/mongo/db/vector_clock_trivial.cpp +++ b/src/mongo/db/vector_clock_trivial.cpp @@ -27,9 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_mutable.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/future.h" namespace mongo { namespace { diff --git a/src/mongo/db/views/SConscript b/src/mongo/db/views/SConscript index 53cfc7e4bec9a..d9f4c544e674e 100644 --- a/src/mongo/db/views/SConscript +++ b/src/mongo/db/views/SConscript @@ -7,6 +7,7 @@ env = env.Clone() env.Library( target='views', source=[ + 'util.cpp', 'view.cpp', 'view_graph.cpp', ], @@ -44,17 +45,6 @@ env.Library( ], ) -env.Library( - target='util', - source=[ - 'util.cpp', - ], - LIBDEPS_PRIVATE=[ - '$BUILD_DIR/mongo/util/namespace_string_database_name_util', - 'views', - ], -) - env.CppUnitTest( target='db_views_test', source=[ diff --git a/src/mongo/db/views/resolved_view.cpp b/src/mongo/db/views/resolved_view.cpp index ec3a7ae8a492c..2f2c36cbd1358 100644 --- a/src/mongo/db/views/resolved_view.cpp +++ b/src/mongo/db/views/resolved_view.cpp @@ -27,18 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +// IWYU pragma: no_include "ext/alloc_traits.h" #include "mongo/db/views/resolved_view.h" -#include "mongo/base/init.h" +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/pipeline/document_source_coll_stats.h" #include "mongo/db/pipeline/document_source_index_stats.h" #include "mongo/db/pipeline/document_source_internal_convert_bucket_index_stats.h" #include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" namespace mongo { @@ -133,45 +146,47 @@ std::shared_ptr ResolvedView::parse(const BSONObj& cmdRepl return std::make_shared(fromBSON(cmdReply)); } -AggregateCommandRequest ResolvedView::asExpandedViewAggregation( - const AggregateCommandRequest& request) const { - // Perform the aggregation on the resolved namespace. The new pipeline consists of two parts: - // first, 'pipeline' in this ResolvedView; then, the pipeline in 'request'. - std::vector resolvedPipeline; - resolvedPipeline.reserve(_pipeline.size() + request.getPipeline().size()); - resolvedPipeline.insert(resolvedPipeline.end(), _pipeline.begin(), _pipeline.end()); - resolvedPipeline.insert( - resolvedPipeline.end(), request.getPipeline().begin(), request.getPipeline().end()); - - // $indexStats needs special handling for time-series-collections. Normally for a regular read, - // $_internalUnpackBucket unpacks the buckets entries into time-series document format and then - // passes the time-series documents on through the pipeline. Instead we need to read the buckets - // collection's index stats unmodified and then pass the results through an additional stage to - // specially convert them to the time-series collection's schema, and then onward. There is no - // need for the $_internalUnpackBucket stage with $indexStats, so we remove it. - if (resolvedPipeline.size() >= 2 && - resolvedPipeline[0][DocumentSourceInternalUnpackBucket::kStageNameInternal] && - resolvedPipeline[1][DocumentSourceIndexStats::kStageName]) { - // Clear the $_internalUnpackBucket stage. - auto unpackStage = resolvedPipeline[0]; - resolvedPipeline[0] = resolvedPipeline[1]; - - // Grab the $_internalUnpackBucket stage's time-series collection schema options and pass - // them into the $_internalConvertBucketIndexStats stage to use for schema conversion. - BSONObjBuilder builder; - for (const auto& elem : - unpackStage[DocumentSourceInternalUnpackBucket::kStageNameInternal].Obj()) { - if (elem.fieldNameStringData() == timeseries::kTimeFieldName || - elem.fieldNameStringData() == timeseries::kMetaFieldName) { +void ResolvedView::handleTimeseriesRewrites(std::vector* resolvedPipeline) const { + // Stages that are constrained to be the first stage of the pipeline ($collStats, $indexStats) + // require special handling since $_internalUnpackBucket is the first stage. + if (resolvedPipeline->size() >= 2 && + (*resolvedPipeline)[0][DocumentSourceInternalUnpackBucket::kStageNameInternal] && + ((*resolvedPipeline)[1][DocumentSourceIndexStats::kStageName] || + (*resolvedPipeline)[1][DocumentSourceCollStats::kStageName])) { + // Normally for a regular read, $_internalUnpackBucket unpacks the buckets entries into + // time-series document format and then passes the time-series documents on through the + // pipeline. Instead, for $indexStats, we need to read the buckets collection's index + // stats unmodified and then pass the results through an additional stage to specially + // convert them to the time-series collection's schema, and then onward. We grab the + // $_internalUnpackBucket stage's time-series collection schema options and pass them + // into the $_internalConvertBucketIndexStats stage to use for schema conversion. + if ((*resolvedPipeline)[1][DocumentSourceIndexStats::kStageName]) { + auto unpackStage = (*resolvedPipeline)[0]; + (*resolvedPipeline)[0] = (*resolvedPipeline)[1]; + BSONObjBuilder builder; + for (const auto& elem : + unpackStage[DocumentSourceInternalUnpackBucket::kStageNameInternal].Obj()) { + if (elem.fieldNameStringData() == timeseries::kTimeFieldName || + elem.fieldNameStringData() == timeseries::kMetaFieldName) { + builder.append(elem); + } + } + (*resolvedPipeline)[1] = + BSON(DocumentSourceInternalConvertBucketIndexStats::kStageName << builder.obj()); + } else { + auto collStatsStage = (*resolvedPipeline)[1]; + BSONObjBuilder builder; + for (const auto& elem : collStatsStage[DocumentSourceCollStats::kStageName].Obj()) { builder.append(elem); } + builder.append("$_requestOnTimeseriesView", true); + (*resolvedPipeline)[1] = BSON(DocumentSourceCollStats::kStageName << builder.obj()); + // For $collStats, we directly read the collection stats from the buckets + // collection, and skip $_internalUnpackBucket. + resolvedPipeline->erase(resolvedPipeline->begin()); } - - resolvedPipeline[1] = - BSON(DocumentSourceInternalConvertBucketIndexStats::kStageName << builder.obj()); - } else if (resolvedPipeline.size() >= 1 && - resolvedPipeline[0][DocumentSourceInternalUnpackBucket::kStageNameInternal]) { - auto unpackStage = resolvedPipeline[0]; + } else { + auto unpackStage = (*resolvedPipeline)[0]; BSONObjBuilder builder; for (const auto& elem : @@ -184,11 +199,28 @@ AggregateCommandRequest ResolvedView::asExpandedViewAggregation( builder.append(DocumentSourceInternalUnpackBucket::kUsesExtendedRange, ((_timeseriesUsesExtendedRange && *_timeseriesUsesExtendedRange))); - resolvedPipeline[0] = + (*resolvedPipeline)[0] = BSON(DocumentSourceInternalUnpackBucket::kStageNameInternal << builder.obj()); } +} + +AggregateCommandRequest ResolvedView::asExpandedViewAggregation( + const AggregateCommandRequest& request) const { + // Perform the aggregation on the resolved namespace. The new pipeline consists of two parts: + // first, 'pipeline' in this ResolvedView; then, the pipeline in 'request'. + std::vector resolvedPipeline; + resolvedPipeline.reserve(_pipeline.size() + request.getPipeline().size()); + resolvedPipeline.insert(resolvedPipeline.end(), _pipeline.begin(), _pipeline.end()); + resolvedPipeline.insert( + resolvedPipeline.end(), request.getPipeline().begin(), request.getPipeline().end()); + + if (resolvedPipeline.size() >= 1 && + resolvedPipeline[0][DocumentSourceInternalUnpackBucket::kStageNameInternal]) { + handleTimeseriesRewrites(&resolvedPipeline); + } - AggregateCommandRequest expandedRequest{_namespace, resolvedPipeline}; + AggregateCommandRequest expandedRequest{ + _namespace, std::move(resolvedPipeline), request.getSerializationContext()}; if (request.getExplain()) { expandedRequest.setExplain(request.getExplain()); diff --git a/src/mongo/db/views/resolved_view.h b/src/mongo/db/views/resolved_view.h index 82238556288e3..2b818e02d0a13 100644 --- a/src/mongo/db/views/resolved_view.h +++ b/src/mongo/db/views/resolved_view.h @@ -29,12 +29,22 @@ #pragma once +#include +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/timeseries/timeseries_gen.h" namespace mongo { @@ -59,6 +69,8 @@ class ResolvedView final : public ErrorExtraInfo { static ResolvedView fromBSON(const BSONObj& commandResponseObj); + void handleTimeseriesRewrites(std::vector* resolvedPipeline) const; + /** * Convert an aggregation command on a view to the equivalent command against the view's * underlying collection. @@ -91,12 +103,12 @@ class ResolvedView final : public ErrorExtraInfo { NamespaceString _namespace; std::vector _pipeline; - // The default collation associated with this view. An empty object means that the default is - // the simple collation. + // The default collation associated with this view. An empty object means that the default + // is the simple collation. // // Currently all operations which run over a view must use the default collation. This means - // that operations on the view which do not specify a collation inherit the default. Operations - // on the view which specify any other collation fail with a user error. + // that operations on the view which do not specify a collation inherit the default. + // Operations on the view which specify any other collation fail with a user error. BSONObj _defaultCollation; boost::optional _timeseriesOptions; diff --git a/src/mongo/db/views/resolved_view_test.cpp b/src/mongo/db/views/resolved_view_test.cpp index 334c0fad08981..d2cabef99c02a 100644 --- a/src/mongo/db/views/resolved_view_test.cpp +++ b/src/mongo/db/views/resolved_view_test.cpp @@ -27,20 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include +#include + +#include "mongo/base/status.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" -#include "mongo/db/exec/document_value/document.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/query/explain_options.h" #include "mongo/db/views/resolved_view.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/serialization_context.h" namespace mongo { namespace { @@ -180,6 +192,30 @@ TEST(ResolvedViewTest, ExpandingAggRequestPreservesDefaultCollationOfView) { << "fr_CA")); } +TEST(ResolvedViewTest, EnsureSerializationContextCopy) { + const ResolvedView resolvedView{backingNss, emptyPipeline, kSimpleCollation}; + + AggregateCommandRequest requestOnViewDefault{viewNss, emptyPipeline}; + + auto resultDefault = resolvedView.asExpandedViewAggregation(requestOnViewDefault); + ASSERT_TRUE(resultDefault.getSerializationContext() == + SerializationContext::stateCommandRequest()); + + SerializationContext scCommand = SerializationContext::stateCommandRequest(); + scCommand.setTenantIdSource(true); + scCommand.setPrefixState(true); + AggregateCommandRequest requestOnViewCommand{viewNss, emptyPipeline, scCommand}; + + auto resultCommand = resolvedView.asExpandedViewAggregation(requestOnViewCommand); + ASSERT_EQ(resultCommand.getSerializationContext().getSource(), + SerializationContext::Source::Command); + ASSERT_EQ(resultCommand.getSerializationContext().getCallerType(), + SerializationContext::CallerType::Request); + ASSERT_TRUE(resultCommand.getSerializationContext().receivedNonPrefixedTenantId()); + ASSERT_EQ(resultCommand.getSerializationContext().getPrefix(), + SerializationContext::Prefix::IncludePrefix); +} + TEST(ResolvedViewTest, FromBSONFailsIfMissingResolvedView) { BSONObj badCmdResponse = BSON("x" << 1); ASSERT_THROWS_CODE(ResolvedView::fromBSON(badCmdResponse), AssertionException, 40248); @@ -201,25 +237,26 @@ TEST(ResolvedViewTest, FromBSONFailsOnInvalidViewNsType) { } TEST(ResolvedViewTest, FromBSONFailsIfMissingPipeline) { - BSONObj badCmdResponse = BSON("resolvedView" << BSON("ns" << backingNss.ns())); + BSONObj badCmdResponse = BSON("resolvedView" << BSON("ns" << backingNss.ns_forTest())); ASSERT_THROWS_CODE(ResolvedView::fromBSON(badCmdResponse), AssertionException, 40251); } TEST(ResolvedViewTest, FromBSONFailsOnInvalidPipelineType) { BSONObj badCmdResponse = - BSON("resolvedView" << BSON("ns" << backingNss.ns() << "pipeline" << 7)); + BSON("resolvedView" << BSON("ns" << backingNss.ns_forTest() << "pipeline" << 7)); ASSERT_THROWS_CODE(ResolvedView::fromBSON(badCmdResponse), AssertionException, 40251); } TEST(ResolvedViewTest, FromBSONFailsOnInvalidCollationType) { - BSONObj badCmdResponse = BSON("resolvedView" << BSON("ns" << backingNss.ns() << "pipeline" - << BSONArray() << "collation" << 1)); + BSONObj badCmdResponse = + BSON("resolvedView" << BSON("ns" << backingNss.ns_forTest() << "pipeline" << BSONArray() + << "collation" << 1)); ASSERT_THROWS_CODE(ResolvedView::fromBSON(badCmdResponse), AssertionException, 40639); } TEST(ResolvedViewTest, FromBSONSuccessfullyParsesEmptyBSONArrayIntoEmptyVector) { BSONObj cmdResponse = - BSON("resolvedView" << BSON("ns" << backingNss.ns() << "pipeline" << BSONArray())); + BSON("resolvedView" << BSON("ns" << backingNss.ns_forTest() << "pipeline" << BSONArray())); const ResolvedView result = ResolvedView::fromBSON(cmdResponse); ASSERT_EQ(result.getNamespace(), backingNss); ASSERT(std::equal(emptyPipeline.begin(), @@ -229,7 +266,7 @@ TEST(ResolvedViewTest, FromBSONSuccessfullyParsesEmptyBSONArrayIntoEmptyVector) } TEST(ResolvedViewTest, FromBSONSuccessfullyParsesCollation) { - BSONObj cmdResponse = BSON("resolvedView" << BSON("ns" << backingNss.ns() << "pipeline" + BSONObj cmdResponse = BSON("resolvedView" << BSON("ns" << backingNss.ns_forTest() << "pipeline" << BSONArray() << "collation" << BSON("locale" << "fil"))); @@ -268,7 +305,8 @@ TEST(ResolvedViewTest, IsResolvedViewErrorResponseDetectsKickbackErrorCodeSucces BSONObj errorResponse = BSON("ok" << 0 << "code" << ErrorCodes::CommandOnShardedViewNotSupportedOnMongod << "errmsg" << "This view is sharded and cannot be run on mongod" - << "resolvedView" << BSON("ns" << backingNss.ns() << "pipeline" << BSONArray())); + << "resolvedView" + << BSON("ns" << backingNss.ns_forTest() << "pipeline" << BSONArray())); auto status = getStatusFromCommandResult(errorResponse); ASSERT_EQ(status, ErrorCodes::CommandOnShardedViewNotSupportedOnMongod); ASSERT(status.extraInfo()); diff --git a/src/mongo/db/views/util.cpp b/src/mongo/db/views/util.cpp index ee75fcffee12d..720ac795a32b9 100644 --- a/src/mongo/db/views/util.cpp +++ b/src/mongo/db/views/util.cpp @@ -29,8 +29,16 @@ #include "mongo/db/views/util.h" -#include "mongo/db/multitenancy_gen.h" -#include "mongo/db/server_feature_flags_gen.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/namespace_string.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" namespace mongo::view_util { void validateViewDefinitionBSON(OperationContext* opCtx, @@ -52,7 +60,7 @@ void validateViewDefinitionBSON(OperationContext* opCtx, auto viewName = NamespaceStringUtil::deserialize(dbName.tenantId(), viewNameElem.str()); - bool viewNameIsValid = NamespaceString::validCollectionComponent(viewName.ns()) && + bool viewNameIsValid = NamespaceString::validCollectionComponent(viewName) && NamespaceString::validDBName(viewName.dbName()); valid &= viewNameIsValid; @@ -80,10 +88,12 @@ void validateViewDefinitionBSON(OperationContext* opCtx, auto timeseries = viewDefinition["timeseries"]; valid &= !timeseries || timeseries.type() == BSONType::Object; - uassert(ErrorCodes::InvalidViewDefinition, - str::stream() << "found invalid view definition " << viewDefinition["_id"] - << " while reading '" - << NamespaceString::makeSystemDotViewsNamespace(dbName) << "'", - valid); + uassert( + ErrorCodes::InvalidViewDefinition, + str::stream() << "found invalid view definition " << viewDefinition["_id"] + << " while reading '" + << NamespaceString::makeSystemDotViewsNamespace(dbName).toStringForErrorMsg() + << "'", + valid); } } // namespace mongo::view_util diff --git a/src/mongo/db/views/util.h b/src/mongo/db/views/util.h index 9c3cfcc1889d5..256d01cc158e7 100644 --- a/src/mongo/db/views/util.h +++ b/src/mongo/db/views/util.h @@ -29,6 +29,8 @@ #pragma once +#include "mongo/bson/bsonobj.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" #include "mongo/db/views/view.h" diff --git a/src/mongo/db/views/view.cpp b/src/mongo/db/views/view.cpp index e9d9ec5b7cab1..2ae583d66949e 100644 --- a/src/mongo/db/views/view.cpp +++ b/src/mongo/db/views/view.cpp @@ -27,13 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/views/view.h" - #include +#include + +#include #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/views/view.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { diff --git a/src/mongo/db/views/view.h b/src/mongo/db/views/view.h index 72573d91be517..0e98e13290efd 100644 --- a/src/mongo/db/views/view.h +++ b/src/mongo/db/views/view.h @@ -34,6 +34,7 @@ #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/query/collation/collator_interface.h" @@ -95,8 +96,7 @@ class ViewDefinition { * time-series buckets collection. */ bool timeseries() const { - return _viewOnNss.isTimeseriesBucketsCollection() && - _viewOnNss.getTimeseriesViewNamespace() == _viewNss; + return _viewOnNss.isTimeseriesBucketsCollection(); } void setViewOn(const NamespaceString& viewOnNss); diff --git a/src/mongo/db/views/view_catalog_helpers.cpp b/src/mongo/db/views/view_catalog_helpers.cpp index 4e47c0900eb00..7d80cab226707 100644 --- a/src/mongo/db/views/view_catalog_helpers.cpp +++ b/src/mongo/db/views/view_catalog_helpers.cpp @@ -29,6 +29,23 @@ #include "mongo/db/views/view_catalog_helpers.h" +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/curop.h" #include "mongo/db/pipeline/aggregate_command_gen.h" @@ -37,7 +54,20 @@ #include "mongo/db/pipeline/lite_parsed_pipeline.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/query/collation/collation_spec.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/server_options.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/views/view_graph.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" +#include "mongo/util/version/releases.h" namespace mongo { namespace view_catalog_helpers { @@ -106,7 +136,7 @@ StatusWith> validatePipeline(OperationConte << firstPersistentStage->get()->getSourceName() << " in location " << std::distance(sources.begin(), firstPersistentStage) << " of the pipeline cannot be used in the view definition of " - << viewDef.name().ns() << " because it writes to disk", + << viewDef.name().toStringForErrorMsg() << " because it writes to disk", firstPersistentStage == sources.end()); uassert(ErrorCodes::OptionNotSupportedOnView, @@ -189,8 +219,8 @@ StatusWith resolveView(OperationContext* opCtx, if (view->timeseries()) { auto tsCollection = catalog->lookupCollectionByNamespace(opCtx, *resolvedNss); uassert(6067201, - str::stream() << "expected time-series buckets collection " << *resolvedNss - << " to exist", + str::stream() << "expected time-series buckets collection " + << (*resolvedNss).toStringForErrorMsg() << " to exist", tsCollection); if (tsCollection) { mixedData = tsCollection->getTimeseriesBucketsMayHaveMixedSchemaData(); diff --git a/src/mongo/db/views/view_catalog_helpers.h b/src/mongo/db/views/view_catalog_helpers.h index 02f87dd2b4e5c..498a97f96d2c5 100644 --- a/src/mongo/db/views/view_catalog_helpers.h +++ b/src/mongo/db/views/view_catalog_helpers.h @@ -30,13 +30,17 @@ #pragma once #include +#include +#include #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/views/resolved_view.h" #include "mongo/db/views/view.h" +#include "mongo/stdx/unordered_set.h" namespace mongo { namespace view_catalog_helpers { diff --git a/src/mongo/db/views/view_catalog_test.cpp b/src/mongo/db/views/view_catalog_test.cpp index 1b1232dba64b9..83d1d3621ed1b 100644 --- a/src/mongo/db/views/view_catalog_test.cpp +++ b/src/mongo/db/views/view_catalog_test.cpp @@ -27,35 +27,59 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include #include #include +#include +#include +#include + +#include +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/catalog/catalog_test_fixture.h" #include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/resource_catalog.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/query/query_test_service_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" -#include "mongo/db/server_options.h" -#include "mongo/db/service_context.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/views/resolved_view.h" #include "mongo/db/views/view.h" #include "mongo/db/views/view_catalog_helpers.h" #include "mongo/db/views/view_graph.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" namespace mongo { @@ -78,7 +102,8 @@ const auto kTinyMatchStage = BSON("$match" << BSONObj()); class ViewCatalogFixture : public CatalogTestFixture { public: - ViewCatalogFixture() : ViewCatalogFixture(DatabaseName(boost::none, "db")) {} + ViewCatalogFixture() + : ViewCatalogFixture(DatabaseName::createDatabaseName_forTest(boost::none, "db")) {} ViewCatalogFixture(DatabaseName dbName) : _dbName(std::move(dbName)) {} @@ -86,8 +111,8 @@ class ViewCatalogFixture : public CatalogTestFixture { CatalogTestFixture::setUp(); _db = _createDatabase(_dbName); - _createDatabase({_dbName.tenantId(), "db1"}); - _createDatabase({_dbName.tenantId(), "db2"}); + _createDatabase(DatabaseName::createDatabaseName_forTest(_dbName.tenantId(), "db1")); + _createDatabase(DatabaseName::createDatabaseName_forTest(_dbName.tenantId(), "db2")); } void tearDown() override { @@ -531,7 +556,7 @@ TEST_F(ViewCatalogFixture, LookupRIDExistingView) { auto resourceID = ResourceId(RESOURCE_COLLECTION, NamespaceString::createNamespaceString_forTest(boost::none, "db.view")); - ASSERT_EQ(ResourceCatalog::get(getServiceContext()).name(resourceID), std::string{"db.view"}); + ASSERT_EQ(ResourceCatalog::get().name(resourceID), std::string{"db.view"}); } TEST_F(ViewCatalogFixture, LookupRIDExistingViewRollback) { @@ -556,7 +581,7 @@ TEST_F(ViewCatalogFixture, LookupRIDExistingViewRollback) { auto resourceID = ResourceId(RESOURCE_COLLECTION, NamespaceString::createNamespaceString_forTest(boost::none, "db.view")); - ASSERT(!ResourceCatalog::get(getServiceContext()).name(resourceID)); + ASSERT(!ResourceCatalog::get().name(resourceID)); } TEST_F(ViewCatalogFixture, LookupRIDAfterDrop) { @@ -569,7 +594,7 @@ TEST_F(ViewCatalogFixture, LookupRIDAfterDrop) { auto resourceID = ResourceId(RESOURCE_COLLECTION, NamespaceString::createNamespaceString_forTest(boost::none, "db.view")); - ASSERT(!ResourceCatalog::get(getServiceContext()).name(resourceID)); + ASSERT(!ResourceCatalog::get().name(resourceID)); } TEST_F(ViewCatalogFixture, LookupRIDAfterDropRollback) { @@ -583,8 +608,8 @@ TEST_F(ViewCatalogFixture, LookupRIDAfterDropRollback) { WriteUnitOfWork wunit(operationContext()); ASSERT_OK(createView(operationContext(), viewName, viewOn, emptyPipeline, emptyCollation)); wunit.commit(); - ASSERT_EQ(ResourceCatalog::get(getServiceContext()).name(resourceID).value(), - viewName.ns()); + ASSERT_EQ(ResourceCatalog::get().name(resourceID).value(), + viewName.ns_forTest().toString()); } { @@ -600,7 +625,7 @@ TEST_F(ViewCatalogFixture, LookupRIDAfterDropRollback) { // Do not commit, rollback. } // Make sure drop was rolled back and view is still in catalog. - ASSERT_EQ(ResourceCatalog::get(getServiceContext()).name(resourceID), viewName.ns()); + ASSERT_EQ(ResourceCatalog::get().name(resourceID), viewName.ns_forTest().toString()); } TEST_F(ViewCatalogFixture, LookupRIDAfterModify) { @@ -612,7 +637,7 @@ TEST_F(ViewCatalogFixture, LookupRIDAfterModify) { NamespaceString::createNamespaceString_forTest(boost::none, "db.view")); ASSERT_OK(createView(operationContext(), viewName, viewOn, emptyPipeline, emptyCollation)); ASSERT_OK(modifyView(operationContext(), viewName, viewOn, emptyPipeline)); - ASSERT_EQ(ResourceCatalog::get(getServiceContext()).name(resourceID), viewName.ns()); + ASSERT_EQ(ResourceCatalog::get().name(resourceID), viewName.ns_forTest().toString()); } TEST_F(ViewCatalogFixture, LookupRIDAfterModifyRollback) { @@ -626,7 +651,7 @@ TEST_F(ViewCatalogFixture, LookupRIDAfterModifyRollback) { WriteUnitOfWork wunit(operationContext()); ASSERT_OK(createView(operationContext(), viewName, viewOn, emptyPipeline, emptyCollation)); wunit.commit(); - ASSERT_EQ(ResourceCatalog::get(getServiceContext()).name(resourceID), viewName.ns()); + ASSERT_EQ(ResourceCatalog::get().name(resourceID), viewName.ns_forTest().toString()); } { @@ -643,11 +668,11 @@ TEST_F(ViewCatalogFixture, LookupRIDAfterModifyRollback) { viewOn, emptyPipeline, view_catalog_helpers::validatePipeline)); - ASSERT_EQ(ResourceCatalog::get(getServiceContext()).name(resourceID), viewName.ns()); + ASSERT_EQ(ResourceCatalog::get().name(resourceID), viewName.ns_forTest().toString()); // Do not commit, rollback. } // Make sure view resource is still available after rollback. - ASSERT_EQ(ResourceCatalog::get(getServiceContext()).name(resourceID), viewName.ns()); + ASSERT_EQ(ResourceCatalog::get().name(resourceID), viewName.ns_forTest().toString()); } TEST_F(ViewCatalogFixture, CreateViewThenDropAndLookup) { @@ -675,7 +700,7 @@ TEST_F(ViewCatalogFixture, Iterate) { Lock::DBLock dbLock(operationContext(), view1.dbName(), MODE_IX); getCatalog()->iterateViews( operationContext(), view1.dbName(), [&viewNames](const ViewDefinition& view) { - std::string name = view.name().toString(); + std::string name = view.name().toString_forTest(); ASSERT(viewNames.end() != viewNames.find(name)); viewNames.erase(name); return true; @@ -720,7 +745,8 @@ TEST_F(ViewCatalogFixture, ResolveViewCorrectPipeline) { } TEST_F(ViewCatalogFixture, ResolveViewOnCollectionNamespace) { - const NamespaceString collectionNamespace("db.coll"); + const NamespaceString collectionNamespace = + NamespaceString::createNamespaceString_forTest("db.coll"); Lock::DBLock dbLock(operationContext(), collectionNamespace.dbName(), MODE_IS); auto resolvedView = uassertStatusOK(view_catalog_helpers::resolveView( @@ -770,7 +796,9 @@ TEST_F(ViewCatalogFixture, ResolveViewCorrectlyExtractsDefaultCollation) { class ServerlessViewCatalogFixture : public ViewCatalogFixture { public: - ServerlessViewCatalogFixture() : ViewCatalogFixture(DatabaseName(TenantId(OID::gen()), "db")) {} + ServerlessViewCatalogFixture() + : ViewCatalogFixture(DatabaseName::createDatabaseName_forTest(TenantId(OID::gen()), "db")) { + } }; TEST_F(ServerlessViewCatalogFixture, LookupExistingViewBeforeAndAfterDropFeatureFlagOff) { diff --git a/src/mongo/db/views/view_definition_test.cpp b/src/mongo/db/views/view_definition_test.cpp index e6f71d33efe4b..d757c7ef28337 100644 --- a/src/mongo/db/views/view_definition_test.cpp +++ b/src/mongo/db/views/view_definition_test.cpp @@ -27,20 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include +#include #include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/views/view.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -118,7 +128,7 @@ TEST(ViewDefinitionTest, SetViewOnSucceedsIfNewViewOnIsInSameDatabaseAsView) { } TEST(ViewDefinitionTest, SetPipelineSucceedsOnValidArrayBSONElement) { - ViewDefinition viewDef(viewNss.db(), viewNss.coll(), backingNss.coll(), BSONObj(), nullptr); + ViewDefinition viewDef(viewNss.dbName(), viewNss.coll(), backingNss.coll(), BSONObj(), nullptr); ASSERT(viewDef.pipeline().empty()); BSONObj matchStage = BSON("match" << BSON("x" << 9)); @@ -137,14 +147,7 @@ TEST(ViewDefinitionTest, SetPipelineSucceedsOnValidArrayBSONElement) { TEST(ViewDefinitionTest, ViewDefinitionCreationCorrectlySetsTimeseries) { ViewDefinition viewDef( viewNss.dbName(), viewNss.coll(), bucketsColl.coll(), samplePipeline, nullptr); - ASSERT_FALSE(viewDef.timeseries()); - - ViewDefinition timeseriesDef(timeseriesColl.dbName(), - timeseriesColl.coll(), - bucketsColl.coll(), - samplePipeline, - nullptr); - ASSERT_TRUE(timeseriesDef.timeseries()); + ASSERT(viewDef.timeseries()); } TEST(ViewDefinitionTest, ViewDefinitionCreationCorrectlyBuildsNamespaceStringsWithTenantIds) { diff --git a/src/mongo/db/views/view_graph.cpp b/src/mongo/db/views/view_graph.cpp index 66ed548450c05..691f8f2954f93 100644 --- a/src/mongo/db/views/view_graph.cpp +++ b/src/mongo/db/views/view_graph.cpp @@ -27,13 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/views/view_graph.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/views/view.h" +#include "mongo/db/views/view_graph.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" namespace mongo { @@ -192,9 +201,9 @@ Status ViewGraph::_validateParents(uint64_t currentId, int currentDepth, StatsMa !CollatorInterface::collatorsMatch(currentNode.collator.get(), parentNode.collator.get())) { return {ErrorCodes::OptionNotSupportedOnView, - str::stream() << "View " << currentNode.nss.ns() + str::stream() << "View " << currentNode.nss.toStringForErrorMsg() << " has a collation that does not match the collation of view " - << parentNode.nss.ns()}; + << parentNode.nss.toStringForErrorMsg()}; } if (!(*statsMap)[parentId].checked) { @@ -235,9 +244,9 @@ Status ViewGraph::_validateChildren(uint64_t startingId, auto errmsg = StringBuilder(); errmsg << "View cycle detected: "; - errmsg << _graph[*iterator].nss.ns(); + errmsg << _graph[*iterator].nss.toStringForErrorMsg(); for (; iterator != traversalIds->rend(); ++iterator) { - errmsg << " => " << _graph[*iterator].nss.ns(); + errmsg << " => " << _graph[*iterator].nss.toStringForErrorMsg(); } return {ErrorCodes::GraphContainsCycle, errmsg.str()}; } @@ -262,9 +271,9 @@ Status ViewGraph::_validateChildren(uint64_t startingId, !CollatorInterface::collatorsMatch(currentNode.collator.get(), childNode.collator.get())) { return {ErrorCodes::OptionNotSupportedOnView, - str::stream() << "View " << currentNode.nss.ns() + str::stream() << "View " << currentNode.nss.toStringForErrorMsg() << " has a collation that does not match the collation of view " - << childNode.nss.ns()}; + << childNode.nss.toStringForErrorMsg()}; } auto res = _validateChildren(startingId, childId, currentDepth + 1, statsMap, traversalIds); diff --git a/src/mongo/db/views/view_graph.h b/src/mongo/db/views/view_graph.h index 7b8ad7d3d30c9..de62ad469d0a4 100644 --- a/src/mongo/db/views/view_graph.h +++ b/src/mongo/db/views/view_graph.h @@ -28,7 +28,11 @@ */ #pragma once +#include #include +#include +#include +#include #include #include "mongo/base/status.h" diff --git a/src/mongo/db/views/view_graph_test.cpp b/src/mongo/db/views/view_graph_test.cpp index 5ca5fd716fbdf..587f21ba49536 100644 --- a/src/mongo/db/views/view_graph_test.cpp +++ b/src/mongo/db/views/view_graph_test.cpp @@ -27,26 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include + +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/query_test_service_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/views/view.h" #include "mongo/db/views/view_graph.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { constexpr auto kEmptyPipelineSize = 0; -const auto kTestDb = DatabaseName(boost::none, "test"); +const auto kTestDb = DatabaseName::createDatabaseName_forTest(boost::none, "test"); constexpr auto kFooName = "foo"_sd; constexpr auto kBarName = "bar"_sd; constexpr auto kQuxName = "qux"_sd; @@ -249,8 +259,8 @@ TEST_F(ViewGraphFixture, DroppingViewPreservesNodeInGraphIfDependedOnByOtherView TEST_F(ViewGraphFixture, DifferentTenantsCanCreateViewWithConflictingNamespaces) { RAIIServerParameterControllerForTest multitenancyController("multitenancySupport", true); - DatabaseName db1(TenantId(OID::gen()), "test"); - DatabaseName db2(TenantId(OID::gen()), "test"); + DatabaseName db1 = DatabaseName::createDatabaseName_forTest(TenantId(OID::gen()), "test"); + DatabaseName db2 = DatabaseName::createDatabaseName_forTest(TenantId(OID::gen()), "test"); NamespaceString viewOn1 = NamespaceString::createNamespaceString_forTest(db1, kBarName); NamespaceString viewOn2 = NamespaceString::createNamespaceString_forTest(db2, kBarName); diff --git a/src/mongo/db/wire_version.cpp b/src/mongo/db/wire_version.cpp index 5df5930998060..328f9e03bed05 100644 --- a/src/mongo/db/wire_version.cpp +++ b/src/mongo/db/wire_version.cpp @@ -28,15 +28,23 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/db/wire_version.h" +#include +#include #include "mongo/base/error_codes.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/db/wire_version.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/util/assert_util.h" #include "mongo/util/static_immortal.h" +#include "mongo/util/str.h" #include "mongo/util/thread_safety_context.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/db/wire_version.h b/src/mongo/db/wire_version.h index 0d43eff5b68c4..2be50b96fb247 100644 --- a/src/mongo/db/wire_version.h +++ b/src/mongo/db/wire_version.h @@ -30,9 +30,14 @@ #pragma once #include +#include +#include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/mutex.h" #include "mongo/util/assert_util.h" #include "mongo/util/version/releases.h" @@ -188,9 +193,9 @@ class WireSpec { * Appends the min and max versions in 'wireVersionInfo' to 'builder' in the format expected for * reporting information about the internal client. * - * Intended for use as part of performing the isMaster handshake with a remote node. When an - * internal clients make a connection to another node in the cluster, it includes internal - * client information as a parameter to the isMaster command. This parameter has the following + * Intended for use as part of performing the isMaster/hello handshake with a remote node. When + * an internal clients make a connection to another node in the cluster, it includes internal + * client information as a parameter to the hello command. This parameter has the following * format: * * internalClient: { diff --git a/src/mongo/db/wire_version_test.cpp b/src/mongo/db/wire_version_test.cpp index 615f73734a0cb..b4302425c22cd 100644 --- a/src/mongo/db/wire_version_test.cpp +++ b/src/mongo/db/wire_version_test.cpp @@ -27,11 +27,18 @@ * it in the license file. */ +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/wire_version.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/db/write_block_bypass.cpp b/src/mongo/db/write_block_bypass.cpp index 27be52471987a..df37603d9e1b3 100644 --- a/src/mongo/db/write_block_bypass.cpp +++ b/src/mongo/db/write_block_bypass.cpp @@ -27,10 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/client.h" #include "mongo/db/write_block_bypass.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { @@ -48,18 +53,19 @@ void WriteBlockBypass::setFromMetadata(OperationContext* opCtx, const BSONElemen if (opCtx->getClient()->isInDirectClient()) { return; } + auto as = AuthorizationSession::get(opCtx->getClient()); if (elem) { // If the mayBypassWriteBlocking field is set, then (after ensuring the client is // authorized) set our state from that field. - uassert(6317500, - "Client is not properly authorized to propagate mayBypassWriteBlocking", - AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + uassert( + 6317500, + "Client is not properly authorized to propagate mayBypassWriteBlocking", + as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(as->getUserTenantId()), ActionType::internal)); set(elem.Bool()); } else { // Otherwise, set our state based on the AuthorizationSession state. - set(AuthorizationSession::get(opCtx->getClient())->mayBypassWriteBlockingMode()); + set(as->mayBypassWriteBlockingMode()); } } diff --git a/src/mongo/db/write_concern.cpp b/src/mongo/db/write_concern.cpp index b9d04b112cd32..6eb1096bb00be 100644 --- a/src/mongo/db/write_concern.cpp +++ b/src/mongo/db/write_concern.cpp @@ -30,14 +30,27 @@ #include "mongo/db/write_concern.h" -#include "mongo/bson/util/bson_extract.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/curop.h" #include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults.h" +#include "mongo/db/read_write_concern_defaults_gen.h" +#include "mongo/db/read_write_concern_provenance.h" #include "mongo/db/repl/optime.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/storage_interface.h" #include "mongo/db/server_options.h" @@ -48,7 +61,14 @@ #include "mongo/db/transaction_validation.h" #include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -228,7 +248,7 @@ void WriteConcernResult::appendTo(BSONObjBuilder* result) const { */ void waitForNoOplogHolesIfNeeded(OperationContext* opCtx) { auto const replCoord = repl::ReplicationCoordinator::get(opCtx); - if (replCoord->getConfigVotingMembers().size() == 1) { + if (replCoord->getNumConfigVotingMembers() == 1) { // It is safe for secondaries in multi-node single voter replica sets to truncate writes if // there are oplog holes. They can catch up again. repl::StorageInterface::get(opCtx)->waitForAllEarlierOplogWritesToBeVisible( diff --git a/src/mongo/db/write_concern.h b/src/mongo/db/write_concern.h index dbaebabffd291..2e32595d02835 100644 --- a/src/mongo/db/write_concern.h +++ b/src/mongo/db/write_concern.h @@ -29,7 +29,15 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/write_concern_options.h" #include "mongo/util/net/hostandport.h" diff --git a/src/mongo/db/write_concern_options.cpp b/src/mongo/db/write_concern_options.cpp index 7682f388f7d5c..88ed3b632ef83 100644 --- a/src/mongo/db/write_concern_options.cpp +++ b/src/mongo/db/write_concern_options.cpp @@ -27,16 +27,27 @@ * it in the license file. */ -#include +#include +#include +#include +#include -#include "mongo/db/write_concern_options.h" +#include +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/write_concern_options.h" #include "mongo/db/write_concern_options_gen.h" -#include "mongo/util/str.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/db/write_concern_options.h b/src/mongo/db/write_concern_options.h index e1525ec29f3b9..5fdec94f74f69 100644 --- a/src/mongo/db/write_concern_options.h +++ b/src/mongo/db/write_concern_options.h @@ -29,10 +29,22 @@ #pragma once +#include +#include #include +#include +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/jsobj.h" #include "mongo/db/read_write_concern_provenance.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/duration.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/db/write_concern_options_test.cpp b/src/mongo/db/write_concern_options_test.cpp index 00a235477b80b..67d623d71608e 100644 --- a/src/mongo/db/write_concern_options_test.cpp +++ b/src/mongo/db/write_concern_options_test.cpp @@ -27,11 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/jsobj.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/db/write_concern_options.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/dbtests/SConscript b/src/mongo/dbtests/SConscript index cb2282af6ba45..7b99f15b069ed 100644 --- a/src/mongo/dbtests/SConscript +++ b/src/mongo/dbtests/SConscript @@ -17,13 +17,10 @@ env.Library( ], LIBDEPS=[ '$BUILD_DIR/mongo/db/catalog/catalog_impl', - '$BUILD_DIR/mongo/db/catalog/database_holder', '$BUILD_DIR/mongo/db/dbdirectclient', - '$BUILD_DIR/mongo/db/index/index_access_method', '$BUILD_DIR/mongo/db/index_builds_coordinator_mongod', '$BUILD_DIR/mongo/db/op_observer/op_observer', '$BUILD_DIR/mongo/db/server_base', - '$BUILD_DIR/mongo/db/service_context_d', '$BUILD_DIR/mongo/db/storage/flow_control_parameters', '$BUILD_DIR/mongo/db/storage/storage_control', '$BUILD_DIR/mongo/db/storage/storage_options', @@ -32,6 +29,7 @@ env.Library( ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/db/s/sharding_runtime_d', + '$BUILD_DIR/mongo/db/service_context_d', '$BUILD_DIR/mongo/util/options_parser/options_parser_init', ], ) @@ -159,9 +157,10 @@ env.Program( "$BUILD_DIR/mongo/db/repl/serveronly_repl", "$BUILD_DIR/mongo/db/repl/storage_interface_impl", "$BUILD_DIR/mongo/db/server_base", + "$BUILD_DIR/mongo/db/service_context_d", "$BUILD_DIR/mongo/db/session/sessions_collection_standalone", "$BUILD_DIR/mongo/db/shard_role", - "$BUILD_DIR/mongo/db/storage/durable_catalog_impl", + "$BUILD_DIR/mongo/db/storage/durable_catalog", "$BUILD_DIR/mongo/db/storage/storage_engine_impl", "$BUILD_DIR/mongo/db/traffic_reader", "$BUILD_DIR/mongo/db/transaction/transaction", diff --git a/src/mongo/dbtests/basictests.cpp b/src/mongo/dbtests/basictests.cpp index 5b5e999d45f3d..93490036f9d8c 100644 --- a/src/mongo/dbtests/basictests.cpp +++ b/src/mongo/dbtests/basictests.cpp @@ -27,17 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include #include - -#include "mongo/db/client.h" -#include "mongo/dbtests/dbtests.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/base64.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/queue.h" #include "mongo/util/str.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep #include "mongo/util/timer.h" namespace BasicTests { diff --git a/src/mongo/dbtests/catalogtests.cpp b/src/mongo/dbtests/catalogtests.cpp index 4c80257952269..94365c2b6d179 100644 --- a/src/mongo/dbtests/catalogtests.cpp +++ b/src/mongo/dbtests/catalogtests.cpp @@ -27,18 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" -#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/uncommitted_catalog_updates.h" -#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -54,7 +60,8 @@ class ConcurrentCreateCollectionTest { void run() { auto serviceContext = getGlobalServiceContext(); - NamespaceString competingNss("test.competingCollection"); + NamespaceString competingNss = + NamespaceString::createNamespaceString_forTest("test.competingCollection"); auto client1 = serviceContext->makeClient("client1"); auto client2 = serviceContext->makeClient("client2"); diff --git a/src/mongo/dbtests/clienttests.cpp b/src/mongo/dbtests/clienttests.cpp index 262a0010a5a6c..ff4de136cb466 100644 --- a/src/mongo/dbtests/clienttests.cpp +++ b/src/mongo/dbtests/clienttests.cpp @@ -27,16 +27,39 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" #include "mongo/client/dbclient_cursor.h" +#include "mongo/client/index_spec.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" namespace ClientTests { @@ -46,12 +69,12 @@ using std::vector; class Base { public: - Base(string coll) : _nss("test." + coll) { + Base(string coll) : _nss(NamespaceString::createNamespaceString_forTest("test." + coll)) { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; DBDirectClient db(&opCtx); - db.dropDatabase({boost::none, "test"}); + db.dropDatabase(DatabaseName::createDatabaseName_forTest(boost::none, "test")); } virtual ~Base() { @@ -66,8 +89,8 @@ class Base { return _nss; } - const std::string& ns() { - return _nss.toString(); + StringData ns() { + return _nss.ns_forTest(); } const NamespaceString _nss; @@ -161,7 +184,7 @@ class CS_10 : public Base { ASSERT_OK(dbtests::createIndex(&opCtx, ns(), BSON("a" << 1 << "b" << 1))); - FindCommandRequest findRequest{NamespaceString{ns()}}; + FindCommandRequest findRequest{NamespaceString::createNamespaceString_forTest(ns())}; findRequest.setSort(BSON("a" << 1 << "b" << 1)); unique_ptr c = db.find(std::move(findRequest)); ASSERT_EQUALS(1111, c->itcount()); @@ -180,7 +203,7 @@ class PushBack : public Base { db.insert(nss(), BSON("i" << i)); } - FindCommandRequest findRequest{NamespaceString{ns()}}; + FindCommandRequest findRequest{NamespaceString::createNamespaceString_forTest(ns())}; findRequest.setSort(BSON("i" << 1)); std::unique_ptr c = db.find(std::move(findRequest)); @@ -223,7 +246,8 @@ class Create : public Base { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; DBDirectClient db(&opCtx); - const NamespaceString nss("unittests.clienttests.create"); + const NamespaceString nss = + NamespaceString::createNamespaceString_forTest("unittests.clienttests.create"); db.createCollection(nss); BSONObj info; ASSERT(db.runCommand(nss.dbName(), diff --git a/src/mongo/dbtests/commandtests.cpp b/src/mongo/dbtests/commandtests.cpp index dfad9428cf35d..802d994a1ceba 100644 --- a/src/mongo/dbtests/commandtests.cpp +++ b/src/mongo/dbtests/commandtests.cpp @@ -28,18 +28,39 @@ */ -#include "mongo/platform/basic.h" - -#include - +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/client.h" -#include "mongo/db/commands.h" -#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/op_msg.h" +#include "mongo/rpc/protocol.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -52,7 +73,7 @@ TEST(CommandTests, InputDocumentSequeceWorksEndToEnd) { const auto opCtxHolder = cc().makeOperationContext(); auto opCtx = opCtxHolder.get(); - NamespaceString nss("test", "doc_seq"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest("test", "doc_seq"); DBDirectClient db(opCtx); db.dropCollection(nss); ASSERT_EQ(db.count(nss), 0u); @@ -86,10 +107,10 @@ class Base { } NamespaceString nss() { - return NamespaceString("test.testCollection"); + return NamespaceString::createNamespaceString_forTest("test.testCollection"); } DatabaseName nsDb() { - return {boost::none, "test"}; + return DatabaseName::createDatabaseName_forTest(boost::none, "test"); } const char* nsColl() { return "testCollection"; @@ -105,11 +126,12 @@ namespace FileMD5 { struct Base { Base() : db(&_opCtx) { db.dropCollection(nss()); - ASSERT_OK(dbtests::createIndex(&_opCtx, nss().ns(), BSON("files_id" << 1 << "n" << 1))); + ASSERT_OK( + dbtests::createIndex(&_opCtx, nss().ns_forTest(), BSON("files_id" << 1 << "n" << 1))); } NamespaceString nss() { - return NamespaceString("test.fs.chunks"); + return NamespaceString::createNamespaceString_forTest("test.fs.chunks"); } const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); @@ -136,7 +158,9 @@ struct Type0 : Base { } BSONObj result; - ASSERT(db.runCommand({boost::none, "test"}, BSON("filemd5" << 0), result)); + ASSERT(db.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "test"), + BSON("filemd5" << 0), + result)); ASSERT_EQUALS(string("5eb63bbbe01eeed093cb22bb8f5acdc3"), result.getStringField("md5")); } }; @@ -160,7 +184,9 @@ struct Type2 : Base { } BSONObj result; - ASSERT(db.runCommand({boost::none, "test"}, BSON("filemd5" << 0), result)); + ASSERT(db.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "test"), + BSON("filemd5" << 0), + result)); ASSERT_EQUALS(string("5eb63bbbe01eeed093cb22bb8f5acdc3"), result.getStringField("md5")); } }; diff --git a/src/mongo/dbtests/counttests.cpp b/src/mongo/dbtests/counttests.cpp index 90c72adde342c..2dbcb0d8001e2 100644 --- a/src/mongo/dbtests/counttests.cpp +++ b/src/mongo/dbtests/counttests.cpp @@ -27,14 +27,39 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/json.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace CountTests { @@ -84,7 +109,7 @@ class Base { } static NamespaceString nss() { - return NamespaceString(ns()); + return NamespaceString::createNamespaceString_forTest(ns()); } void insert(const char* s) { diff --git a/src/mongo/dbtests/cursor_manager_test.cpp b/src/mongo/dbtests/cursor_manager_test.cpp index 6184577569565..fb2d9e58a3cb3 100644 --- a/src/mongo/dbtests/cursor_manager_test.cpp +++ b/src/mongo/dbtests/cursor_manager_test.cpp @@ -27,29 +27,63 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include - #include - +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/client.h" #include "mongo/db/clientcursor.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/cursor_manager.h" #include "mongo/db/cursor_server_params.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/queued_data_stage.h" #include "mongo/db/exec/working_set.h" -#include "mongo/db/exec/working_set_common.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_test_service_context.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/read_concern_level.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/stdx/unordered_set.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -788,8 +822,8 @@ TEST_F(CursorManagerTestCustomOpCtx, // Add a cursor for kTestNss. auto pinned = makeCursor(opCtx.get()); // Get cursors for a different NamespaceString. - auto cursorsForNamespace = - useCursorManager()->getCursorIdsForNamespace(NamespaceString("somerandom.nss")); + auto cursorsForNamespace = useCursorManager()->getCursorIdsForNamespace( + NamespaceString::createNamespaceString_forTest("somerandom.nss")); ASSERT_EQUALS(cursorsForNamespace.size(), 0ull); } diff --git a/src/mongo/dbtests/dbhelper_tests.cpp b/src/mongo/dbtests/dbhelper_tests.cpp index 6db45c253247d..06eb0aa239454 100644 --- a/src/mongo/dbtests/dbhelper_tests.cpp +++ b/src/mongo/dbtests/dbhelper_tests.cpp @@ -27,23 +27,50 @@ * it in the license file. */ +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" #include "mongo/db/global_settings.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" #include "mongo/db/op_observer/oplog_writer_impl.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/write_concern_options.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -77,7 +104,7 @@ class RemoveRange { BSONArray docs(OperationContext* opCtx) const { DBDirectClient client(opCtx); - FindCommandRequest findRequest{NamespaceString{ns}}; + FindCommandRequest findRequest{NamespaceString::createNamespaceString_forTest(ns)}; findRequest.setHint(BSON("_id" << 1)); std::unique_ptr cursor = client.find(std::move(findRequest)); BSONArrayBuilder bab; @@ -105,7 +132,8 @@ class FindAndNoopUpdateTest { repl::ReplicationCoordinator::set( serviceContext, std::unique_ptr(coordinatorMock)); - NamespaceString nss("test.findandnoopupdate"); + NamespaceString nss = + NamespaceString::createNamespaceString_forTest("test.findandnoopupdate"); auto client1 = serviceContext->makeClient("client1"); auto opCtx1 = client1->makeOperationContext(); @@ -193,18 +221,21 @@ class FindAndNoopUpdateTest { WriteUnitOfWork wuow1(opCtx1); WriteUnitOfWork wuow2(opCtx2); - auto collection2 = - CollectionCatalog::get(opCtx2)->lookupCollectionByNamespace(opCtx2, nss); - ASSERT(collection2); + const auto collection2 = + acquireCollection(opCtx2, + CollectionAcquisitionRequest::fromOpCtx( + opCtx2, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(collection2.exists()); auto lastApplied = repl::ReplicationCoordinator::get(opCtx2->getServiceContext()) ->getMyLastAppliedOpTime() .getTimestamp(); ASSERT_OK(opCtx2->recoveryUnit()->setTimestamp(lastApplied + 1)); BSONObj res; - ASSERT_TRUE( - Helpers::findByIdAndNoopUpdate(opCtx2, CollectionPtr(collection2), idQuery, res)); + ASSERT_TRUE(Helpers::findByIdAndNoopUpdate( + opCtx2, collection2.getCollectionPtr(), idQuery, res)); - ASSERT_THROWS(Helpers::emptyCollection(opCtx1, nss), WriteConflictException); + ASSERT_THROWS(Helpers::emptyCollection(opCtx1, collection2), WriteConflictException); wuow2.commit(); } @@ -233,11 +264,18 @@ class FindAndNoopUpdateTest { const BSONObj& idQuery) { { WriteUnitOfWork wuow1(opCtx1); - auto lastApplied = repl::ReplicationCoordinator::get(opCtx1->getServiceContext()) - ->getMyLastAppliedOpTime() - .getTimestamp(); - ASSERT_OK(opCtx1->recoveryUnit()->setTimestamp(lastApplied + 1)); - Helpers::emptyCollection(opCtx1, nss); + { + const auto coll = + acquireCollection(opCtx1, + CollectionAcquisitionRequest::fromOpCtx( + opCtx1, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + auto lastApplied = repl::ReplicationCoordinator::get(opCtx1->getServiceContext()) + ->getMyLastAppliedOpTime() + .getTimestamp(); + ASSERT_OK(opCtx1->recoveryUnit()->setTimestamp(lastApplied + 1)); + Helpers::emptyCollection(opCtx1, coll); + } { WriteUnitOfWork wuow2(opCtx2); diff --git a/src/mongo/dbtests/dbtests.cpp b/src/mongo/dbtests/dbtests.cpp index 97ae6f11d6d85..0754d4e06914e 100644 --- a/src/mongo/dbtests/dbtests.cpp +++ b/src/mongo/dbtests/dbtests.cpp @@ -31,37 +31,58 @@ * Runs db unit tests. */ -#include "mongo/platform/basic.h" - -#include "mongo/dbtests/dbtests.h" - +#include +#include #include +#include +#include +#include + +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/initializer.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_base.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/catalog/multi_index_block.h" -#include "mongo/db/commands.h" #include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/cursor_manager.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" #include "mongo/db/repl/storage_interface_mock.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" #include "mongo/db/service_entry_point_mongod.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/wire_version.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/dbtests/framework.h" #include "mongo/scripting/engine.h" +#include "mongo/transport/service_entry_point.h" #include "mongo/transport/transport_layer_manager.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" #include "mongo/util/quick_exit.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/signal_handlers_synchronous.h" #include "mongo/util/testing_proctor.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep +#include "mongo/util/version/releases.h" namespace mongo { namespace dbtests { @@ -100,7 +121,7 @@ Status createIndex(OperationContext* opCtx, StringData ns, const BSONObj& keys, } Status createIndexFromSpec(OperationContext* opCtx, StringData ns, const BSONObj& spec) { - NamespaceString nss(ns); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(ns); AutoGetDb autoDb(opCtx, nss.dbName(), MODE_IX); { Lock::CollectionLock collLock(opCtx, nss, MODE_X); @@ -110,7 +131,7 @@ Status createIndexFromSpec(OperationContext* opCtx, StringData ns, const BSONObj if (!coll) { auto db = autoDb.ensureDbExists(opCtx); invariant(db); - coll = db->createCollection(opCtx, NamespaceString(ns)); + coll = db->createCollection(opCtx, NamespaceString::createNamespaceString_forTest(ns)); } invariant(coll); wunit.commit(); @@ -179,16 +200,16 @@ Status createIndexFromSpec(OperationContext* opCtx, StringData ns, const BSONObj } WriteContextForTests::WriteContextForTests(OperationContext* opCtx, StringData ns) - : _opCtx(opCtx), _nss(ns) { + : _opCtx(opCtx), _nss(NamespaceString::createNamespaceString_forTest(ns)) { // Lock the database and collection - _autoDb.emplace(opCtx, _nss.db(), MODE_IX); + _autoDb.emplace(opCtx, _nss.dbName(), MODE_IX); _collLock.emplace(opCtx, _nss, MODE_IX); const bool doShardVersionCheck = false; _clientContext.emplace(opCtx, _nss, doShardVersionCheck); auto db = _autoDb->ensureDbExists(opCtx); - invariant(db, _nss.ns()); + invariant(db, _nss.toStringForErrorMsg()); invariant(db == _clientContext->db()); // If the collection exists, there is no need to lock into stronger mode diff --git a/src/mongo/dbtests/dbtests.h b/src/mongo/dbtests/dbtests.h index 54c9fa33d58a5..217dd73f5d89d 100644 --- a/src/mongo/dbtests/dbtests.h +++ b/src/mongo/dbtests/dbtests.h @@ -29,7 +29,24 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/db_raii.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/inline_auto_update.h" +#include "mongo/unittest/test_info.h" #include "mongo/unittest/unittest.h" using namespace mongo; diff --git a/src/mongo/dbtests/deferred_writer.cpp b/src/mongo/dbtests/deferred_writer.cpp index e676ec6a8f6e1..c8d37ae77cfe7 100644 --- a/src/mongo/dbtests/deferred_writer.cpp +++ b/src/mongo/dbtests/deferred_writer.cpp @@ -28,15 +28,39 @@ */ #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/deferred_writer.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/internal_plans.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/stdx/chrono.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/service_context.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace deferred_writer_tests { @@ -57,7 +81,8 @@ struct BSONObjCompare { }; } // namespace -static const NamespaceString kTestNamespace("unittests", "deferred_writer_tests"); +static const NamespaceString kTestNamespace = + NamespaceString::createNamespaceString_forTest("unittests", "deferred_writer_tests"); /** * For exception-safe code with DeferredWriter. diff --git a/src/mongo/dbtests/directclienttests.cpp b/src/mongo/dbtests/directclienttests.cpp index 1efd19100a288..72b8d9adb290b 100644 --- a/src/mongo/dbtests/directclienttests.cpp +++ b/src/mongo/dbtests/directclienttests.cpp @@ -27,16 +27,36 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include - +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/db/client.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/json.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/util/timer.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace DirectClientTests { @@ -81,7 +101,9 @@ class BadNSCmd { BSONObj result; BSONObj cmdObj = BSON("count" << ""); - ASSERT(!client.runCommand({boost::none, ""}, cmdObj, result)) << result; + ASSERT(!client.runCommand( + DatabaseName::createDatabaseName_forTest(boost::none, ""), cmdObj, result)) + << result; ASSERT_EQ(getStatusFromCommandResult(result), ErrorCodes::InvalidNamespace); } }; diff --git a/src/mongo/dbtests/documentsourcetests.cpp b/src/mongo/dbtests/documentsourcetests.cpp index 957a17c452aa4..6fa8e716179f7 100644 --- a/src/mongo/dbtests/documentsourcetests.cpp +++ b/src/mongo/dbtests/documentsourcetests.cpp @@ -31,29 +31,65 @@ * Unit tests for DocumentSource classes. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/collection_scan.h" +#include "mongo/db/exec/collection_scan_common.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/document_value_test_util.h" -#include "mongo/db/exec/multi_plan.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregate_command_gen.h" -#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_cursor.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/get_executor.h" -#include "mongo/db/query/mock_yield_policies.h" #include "mongo/db/query/multiple_collection_accessor.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" -#include "mongo/db/query/query_planner.h" -#include "mongo/db/query/stage_builder.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/query_planner_params.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/tailable_mode_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/scopeguard.h" namespace mongo { @@ -63,7 +99,8 @@ using boost::intrusive_ptr; using std::unique_ptr; using std::vector; -static const NamespaceString nss("unittests.documentsourcetests"); +static const NamespaceString nss = + NamespaceString::createNamespaceString_forTest("unittests.documentsourcetests"); static const BSONObj metaTextScore = BSON("$meta" << "textScore"); @@ -92,7 +129,7 @@ class DocumentSourceCursorTest : public unittest::Test { // clean up first if this was called before _source.reset(); - dbtests::WriteContextForTests ctx(opCtx(), nss.ns()); + dbtests::WriteContextForTests ctx(opCtx(), nss.ns_forTest()); _coll = ctx.getCollection(); auto findCommand = std::make_unique(nss); @@ -312,7 +349,7 @@ TEST_F(DocumentSourceCursorTest, TailableAwaitDataCursorShouldErrorAfterTimeout) auto filter = BSON("a" << 1); auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx())); auto collectionScan = std::make_unique(ctx().get(), - readLock.getCollection(), + &readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get()); @@ -356,7 +393,7 @@ TEST_F(DocumentSourceCursorTest, NonAwaitDataCursorShouldErrorAfterTimeout) { auto filter = BSON("a" << 1); auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx())); auto collectionScan = std::make_unique(ctx().get(), - readLock.getCollection(), + &readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get()); @@ -408,7 +445,7 @@ TEST_F(DocumentSourceCursorTest, TailableAwaitDataCursorShouldErrorAfterBeingKil auto filter = BSON("a" << 1); auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx())); auto collectionScan = std::make_unique(ctx().get(), - readLock.getCollection(), + &readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get()); @@ -451,7 +488,7 @@ TEST_F(DocumentSourceCursorTest, NormalCursorShouldErrorAfterBeingKilled) { auto filter = BSON("a" << 1); auto matchExpression = uassertStatusOK(MatchExpressionParser::parse(filter, ctx())); auto collectionScan = std::make_unique(ctx().get(), - readLock.getCollection(), + &readLock.getCollection(), collScanParams, workingSet.get(), matchExpression.get()); diff --git a/src/mongo/dbtests/extensions_callback_real_test.cpp b/src/mongo/dbtests/extensions_callback_real_test.cpp index bd75718e375c7..98b0a58a1dbdd 100644 --- a/src/mongo/dbtests/extensions_callback_real_test.cpp +++ b/src/mongo/dbtests/extensions_callback_real_test.cpp @@ -27,17 +27,39 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" -#include "mongo/db/json.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/fts/fts_query.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_text.h" +#include "mongo/db/matcher/expression_text_base.h" #include "mongo/db/matcher/extensions_callback_real.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/query_knobs_gen.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -48,7 +70,9 @@ namespace { class ExtensionsCallbackRealTest : public unittest::Test { public: - ExtensionsCallbackRealTest() : _nss("unittests.extensions_callback_real_test") { + ExtensionsCallbackRealTest() + : _nss(NamespaceString::createNamespaceString_forTest( + "unittests.extensions_callback_real_test")) { _isDesugarWhereToFunctionOn = internalQueryDesugarWhereToFunction.load(); } @@ -92,7 +116,7 @@ TEST_F(ExtensionsCallbackRealTest, TextNoIndex) { TEST_F(ExtensionsCallbackRealTest, TextBasic) { ASSERT_OK(dbtests::createIndex(&_opCtx, - _nss.ns(), + _nss.ns_forTest(), BSON("a" << "text"), false)); // isUnique @@ -114,7 +138,7 @@ TEST_F(ExtensionsCallbackRealTest, TextBasic) { TEST_F(ExtensionsCallbackRealTest, TextLanguageError) { ASSERT_OK(dbtests::createIndex(&_opCtx, - _nss.ns(), + _nss.ns_forTest(), BSON("a" << "text"), false)); // isUnique @@ -128,7 +152,7 @@ TEST_F(ExtensionsCallbackRealTest, TextLanguageError) { TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveTrue) { ASSERT_OK(dbtests::createIndex(&_opCtx, - _nss.ns(), + _nss.ns_forTest(), BSON("a" << "text"), false)); // isUnique @@ -145,7 +169,7 @@ TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveTrue) { TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveFalse) { ASSERT_OK(dbtests::createIndex(&_opCtx, - _nss.ns(), + _nss.ns_forTest(), BSON("a" << "text"), false)); // isUnique @@ -162,7 +186,7 @@ TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveFalse) { TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveError) { ASSERT_OK(dbtests::createIndex(&_opCtx, - _nss.ns(), + _nss.ns_forTest(), BSON("a" << "text"), false)); // isUnique @@ -176,7 +200,7 @@ TEST_F(ExtensionsCallbackRealTest, TextCaseSensitiveError) { TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveTrue) { ASSERT_OK(dbtests::createIndex(&_opCtx, - _nss.ns(), + _nss.ns_forTest(), BSON("a" << "text"), false)); // isUnique @@ -193,7 +217,7 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveTrue) { TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveFalse) { ASSERT_OK(dbtests::createIndex(&_opCtx, - _nss.ns(), + _nss.ns_forTest(), BSON("a" << "text"), false)); // isUnique @@ -210,7 +234,7 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveFalse) { TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveError) { ASSERT_OK(dbtests::createIndex(&_opCtx, - _nss.ns(), + _nss.ns_forTest(), BSON("a" << "text"), false)); // isUnique @@ -224,7 +248,7 @@ TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveError) { TEST_F(ExtensionsCallbackRealTest, TextDiacriticSensitiveAndCaseSensitiveTrue) { ASSERT_OK(dbtests::createIndex(&_opCtx, - _nss.ns(), + _nss.ns_forTest(), BSON("a" << "text"), false)); // isUnique diff --git a/src/mongo/dbtests/framework.cpp b/src/mongo/dbtests/framework.cpp index e326e6292ec46..9045a5c503755 100644 --- a/src/mongo/dbtests/framework.cpp +++ b/src/mongo/dbtests/framework.cpp @@ -29,34 +29,41 @@ #include "mongo/dbtests/framework.h" +#include +#include +#include #include +#include +#include -#include "mongo/base/checked_cast.h" -#include "mongo/base/status.h" -#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/client/dbclient_base.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_impl.h" +#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/database_holder_impl.h" #include "mongo/db/client.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/index_builds_coordinator.h" #include "mongo/db/index_builds_coordinator_mongod.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/collection_sharding_state_factory_shard.h" -#include "mongo/db/s/sharding_state.h" #include "mongo/db/service_context.h" #include "mongo/db/storage/control/storage_control.h" #include "mongo/db/storage/storage_engine_init.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/dbtests/framework_options.h" #include "mongo/logv2/log.h" -#include "mongo/platform/mutex.h" +#include "mongo/logv2/log_component.h" #include "mongo/scripting/dbdirectclient_factory.h" #include "mongo/scripting/engine.h" -#include "mongo/util/assert_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/exit.h" #include "mongo/util/exit_code.h" +#include "mongo/util/periodic_runner.h" #include "mongo/util/periodic_runner_factory.h" -#include "mongo/util/scopeguard.h" -#include "mongo/util/version.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/dbtests/framework_options.cpp b/src/mongo/dbtests/framework_options.cpp index 2ab6f22d148bb..30cd29f477024 100644 --- a/src/mongo/dbtests/framework_options.cpp +++ b/src/mongo/dbtests/framework_options.cpp @@ -28,25 +28,42 @@ */ -#include "mongo/platform/basic.h" - - -#include "mongo/dbtests/framework_options.h" - #include #include +#include +#include + +#include +#include +#include +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/bson/util/builder.h" -#include "mongo/db/query/find.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" #include "mongo/db/storage/flow_control_parameters_gen.h" #include "mongo/db/storage/storage_options.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/dbtests/framework_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/options_parser/environment.h" +#include "mongo/util/options_parser/option_section.h" #include "mongo/util/options_parser/startup_options.h" -#include "mongo/util/password.h" +#include "mongo/util/options_parser/value.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/dbtests/framework_options.h b/src/mongo/dbtests/framework_options.h index b79b4eca9051f..5c7c45432ab2f 100644 --- a/src/mongo/dbtests/framework_options.h +++ b/src/mongo/dbtests/framework_options.h @@ -33,6 +33,7 @@ #include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" namespace mongo { diff --git a/src/mongo/dbtests/framework_options_init.cpp b/src/mongo/dbtests/framework_options_init.cpp index 702c28589e6a3..f052f1bd09273 100644 --- a/src/mongo/dbtests/framework_options_init.cpp +++ b/src/mongo/dbtests/framework_options_init.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include +#include +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" #include "mongo/dbtests/framework_options.h" +#include "mongo/util/assert_util.h" #include "mongo/util/exit_code.h" +#include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/startup_option_init.h" #include "mongo/util/options_parser/startup_options.h" #include "mongo/util/quick_exit.h" diff --git a/src/mongo/dbtests/index_access_method_test.cpp b/src/mongo/dbtests/index_access_method_test.cpp index 710358daf5887..e70ef91b7817d 100644 --- a/src/mongo/dbtests/index_access_method_test.cpp +++ b/src/mongo/dbtests/index_access_method_test.cpp @@ -27,16 +27,41 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "boost/container/detail/flat_tree.hpp" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/index/index_access_method.h" -#include "mongo/db/json.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/key_string.h" #include "mongo/db/storage/storage_parameters_gen.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/unittest/unittest.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { @@ -45,8 +70,8 @@ namespace { KeyStringSet makeKeyStringSet(std::initializer_list objs) { KeyStringSet keyStrings; for (auto& obj : objs) { - KeyString::HeapBuilder keyString( - KeyString::Version::kLatestVersion, obj, Ordering::make(BSONObj())); + key_string::HeapBuilder keyString( + key_string::Version::kLatestVersion, obj, Ordering::make(BSONObj())); keyStrings.insert(keyString.release()); } return keyStrings; @@ -212,14 +237,14 @@ TEST(IndexAccessMethodSetDifference, ShouldNotReportOverlapsFromNonDisjointSets) for (auto&& keyString : diff.first) { ASSERT(left.find(keyString) != left.end()); // Make sure it's not in the intersection. - auto obj = KeyString::toBson(keyString, Ordering::make(BSONObj())); + auto obj = key_string::toBson(keyString, Ordering::make(BSONObj())); ASSERT_BSONOBJ_NE(obj, BSON("" << 1)); ASSERT_BSONOBJ_NE(obj, BSON("" << 4)); } for (auto&& keyString : diff.second) { ASSERT(right.find(keyString) != right.end()); // Make sure it's not in the intersection. - auto obj = KeyString::toBson(keyString, Ordering::make(BSONObj())); + auto obj = key_string::toBson(keyString, Ordering::make(BSONObj())); ASSERT_BSONOBJ_NE(obj, BSON("" << 1)); ASSERT_BSONOBJ_NE(obj, BSON("" << 4)); } @@ -233,7 +258,7 @@ TEST(IndexAccessMethodInsertKeys, DuplicatesCheckingOnSecondaryUniqueIndexes) { auto indexName = "a_1"; auto indexSpec = BSON("name" << indexName << "key" << BSON("a" << 1) << "unique" << true << "v" << static_cast(IndexDescriptor::IndexVersion::kV2)); - ASSERT_OK(dbtests::createIndexFromSpec(opCtx, nss.ns(), indexSpec)); + ASSERT_OK(dbtests::createIndexFromSpec(opCtx, nss.ns_forTest(), indexSpec)); AutoGetCollection autoColl(opCtx, nss, LockMode::MODE_X); const auto& coll = autoColl.getCollection(); @@ -241,22 +266,24 @@ TEST(IndexAccessMethodInsertKeys, DuplicatesCheckingOnSecondaryUniqueIndexes) { auto indexAccessMethod = coll->getIndexCatalog()->getEntry(indexDescriptor)->accessMethod()->asSortedData(); - KeyString::HeapBuilder keyString1( - KeyString::Version::kLatestVersion, BSON("" << 1), Ordering::make(BSONObj()), RecordId(1)); - KeyString::HeapBuilder keyString2( - KeyString::Version::kLatestVersion, BSON("" << 1), Ordering::make(BSONObj()), RecordId(2)); + key_string::HeapBuilder keyString1( + key_string::Version::kLatestVersion, BSON("" << 1), Ordering::make(BSONObj()), RecordId(1)); + key_string::HeapBuilder keyString2( + key_string::Version::kLatestVersion, BSON("" << 1), Ordering::make(BSONObj()), RecordId(2)); KeyStringSet keys{keyString1.release(), keyString2.release()}; struct InsertDeleteOptions options; /* options.dupsAllowed = false */ int64_t numInserted; // Checks duplicates and returns the error code when constraints are enforced. - auto status = indexAccessMethod->insertKeys(opCtx, coll, keys, options, {}, &numInserted); + auto status = indexAccessMethod->insertKeys( + opCtx, coll, indexDescriptor->getEntry(), keys, options, {}, &numInserted); ASSERT_EQ(status.code(), ErrorCodes::DuplicateKey); ASSERT_EQ(numInserted, 0); // Skips the check on duplicates when constraints are not enforced. opCtx->setEnforceConstraints(false); - ASSERT_OK(indexAccessMethod->insertKeys(opCtx, coll, keys, options, {}, &numInserted)); + ASSERT_OK(indexAccessMethod->insertKeys( + opCtx, coll, indexDescriptor->getEntry(), keys, options, {}, &numInserted)); ASSERT_EQ(numInserted, 2); } @@ -270,7 +297,7 @@ TEST(IndexAccessMethodInsertKeys, InsertWhenPrepareUnique) { auto indexSpec = BSON("name" << indexName << "key" << BSON("a" << 1) << "prepareUnique" << true << "v" << static_cast(IndexDescriptor::IndexVersion::kV2)); - ASSERT_OK(dbtests::createIndexFromSpec(opCtx, nss.ns(), indexSpec)); + ASSERT_OK(dbtests::createIndexFromSpec(opCtx, nss.ns_forTest(), indexSpec)); AutoGetCollection autoColl(opCtx, nss, LockMode::MODE_X); const auto& coll = autoColl.getCollection(); @@ -278,20 +305,21 @@ TEST(IndexAccessMethodInsertKeys, InsertWhenPrepareUnique) { auto indexAccessMethod = coll->getIndexCatalog()->getEntry(indexDescriptor)->accessMethod()->asSortedData(); - KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion, - BSON("" << 1), - Ordering::make(BSONObj()), - RecordId(1)); - KeyString::HeapBuilder keyString2(KeyString::Version::kLatestVersion, - BSON("" << 1), - Ordering::make(BSONObj()), - RecordId(2)); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + BSON("" << 1), + Ordering::make(BSONObj()), + RecordId(1)); + key_string::HeapBuilder keyString2(key_string::Version::kLatestVersion, + BSON("" << 1), + Ordering::make(BSONObj()), + RecordId(2)); KeyStringSet keys{keyString1.release(), keyString2.release()}; struct InsertDeleteOptions options; int64_t numInserted; // Disallows new duplicates in a regular index and rejects the insert. - auto status = indexAccessMethod->insertKeys(opCtx, coll, keys, options, {}, &numInserted); + auto status = indexAccessMethod->insertKeys( + opCtx, coll, indexDescriptor->getEntry(), keys, options, {}, &numInserted); ASSERT_EQ(status.code(), ErrorCodes::DuplicateKey); ASSERT_EQ(numInserted, 0); } @@ -307,7 +335,7 @@ TEST(IndexAccessMethodUpdateKeys, UpdateWhenPrepareUnique) { auto indexSpec = BSON("name" << indexName << "key" << BSON("a" << 1) << "prepareUnique" << true << "v" << static_cast(IndexDescriptor::IndexVersion::kV2)); - ASSERT_OK(dbtests::createIndexFromSpec(opCtx, nss.ns(), indexSpec)); + ASSERT_OK(dbtests::createIndexFromSpec(opCtx, nss.ns_forTest(), indexSpec)); AutoGetCollection autoColl(opCtx, nss, LockMode::MODE_X); const auto& coll = autoColl.getCollection(); @@ -315,18 +343,18 @@ TEST(IndexAccessMethodUpdateKeys, UpdateWhenPrepareUnique) { auto indexAccessMethod = coll->getIndexCatalog()->getEntry(indexDescriptor)->accessMethod()->asSortedData(); - KeyString::HeapBuilder keyString1(KeyString::Version::kLatestVersion, - BSON("" << 1), - Ordering::make(BSONObj()), - RecordId(1)); - KeyString::HeapBuilder keyString2_old(KeyString::Version::kLatestVersion, - BSON("" << 2), - Ordering::make(BSONObj()), - RecordId(2)); - KeyString::HeapBuilder keyString2_new(KeyString::Version::kLatestVersion, - BSON("" << 1), - Ordering::make(BSONObj()), - RecordId(2)); + key_string::HeapBuilder keyString1(key_string::Version::kLatestVersion, + BSON("" << 1), + Ordering::make(BSONObj()), + RecordId(1)); + key_string::HeapBuilder keyString2_old(key_string::Version::kLatestVersion, + BSON("" << 2), + Ordering::make(BSONObj()), + RecordId(2)); + key_string::HeapBuilder keyString2_new(key_string::Version::kLatestVersion, + BSON("" << 1), + Ordering::make(BSONObj()), + RecordId(2)); KeyStringSet key1{keyString1.release()}; KeyStringSet key2_old{keyString2_old.release()}; KeyStringSet key2_new{keyString2_new.release()}; @@ -336,13 +364,16 @@ TEST(IndexAccessMethodUpdateKeys, UpdateWhenPrepareUnique) { int64_t numDeleted; // Inserts two keys. - ASSERT_OK(indexAccessMethod->insertKeys(opCtx, coll, key1, options, {}, &numInserted)); + ASSERT_OK(indexAccessMethod->insertKeys( + opCtx, coll, indexDescriptor->getEntry(), key1, options, {}, &numInserted)); ASSERT_EQ(numInserted, 1); - ASSERT_OK(indexAccessMethod->insertKeys(opCtx, coll, key2_old, options, {}, &numInserted)); + ASSERT_OK(indexAccessMethod->insertKeys( + opCtx, coll, indexDescriptor->getEntry(), key2_old, options, {}, &numInserted)); ASSERT_EQ(numInserted, 1); // Disallows new duplicates in a regular index and rejects the update. - auto status = indexAccessMethod->doUpdate(opCtx, coll, ticket, &numInserted, &numDeleted); + auto status = indexAccessMethod->doUpdate( + opCtx, coll, indexDescriptor->getEntry(), ticket, &numInserted, &numDeleted); ASSERT_EQ(status.code(), ErrorCodes::DuplicateKey); ASSERT_EQ(numInserted, 0); ASSERT_EQ(numDeleted, 0); diff --git a/src/mongo/dbtests/indexcatalogtests.cpp b/src/mongo/dbtests/indexcatalogtests.cpp index 092acdf75a5f8..99462f0c46a80 100644 --- a/src/mongo/dbtests/indexcatalogtests.cpp +++ b/src/mongo/dbtests/indexcatalogtests.cpp @@ -27,21 +27,42 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace IndexCatalogTests { namespace { const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2; } // namespace -static const NamespaceString _nss("unittests.indexcatalog"); +static const NamespaceString _nss = + NamespaceString::createNamespaceString_forTest("unittests.indexcatalog"); class IndexCatalogTestBase { protected: @@ -81,12 +102,12 @@ class IndexIteratorTests : IndexCatalogTestBase { void run() { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; - dbtests::WriteContextForTests ctx(&opCtx, _nss.ns()); + dbtests::WriteContextForTests ctx(&opCtx, _nss.ns_forTest()); int numFinishedIndexesStart = indexCatalog(&opCtx)->numIndexesReady(); - dbtests::createIndex(&opCtx, _nss.ns(), BSON("x" << 1)).transitional_ignore(); - dbtests::createIndex(&opCtx, _nss.ns(), BSON("y" << 1)).transitional_ignore(); + dbtests::createIndex(&opCtx, _nss.ns_forTest(), BSON("x" << 1)).transitional_ignore(); + dbtests::createIndex(&opCtx, _nss.ns_forTest(), BSON("y" << 1)).transitional_ignore(); ASSERT_TRUE(indexCatalog(&opCtx)->numIndexesReady() == numFinishedIndexesStart + 2); @@ -128,7 +149,7 @@ class IndexCatalogEntryDroppedTest : IndexCatalogTestBase { void run() { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; - dbtests::WriteContextForTests ctx(&opCtx, _nss.ns()); + dbtests::WriteContextForTests ctx(&opCtx, _nss.ns_forTest()); const IndexDescriptor* idDesc = indexCatalog(&opCtx)->findIdIndex(&opCtx); std::shared_ptr entry = @@ -150,8 +171,12 @@ class IndexCatalogEntryDroppedTest : IndexCatalogTestBase { WriteUnitOfWork wuow(&opCtx); ASSERT_OK(autoColl.getDb()->dropCollection(&opCtx, _nss)); wuow.commit(); - ASSERT_TRUE(entry->isDropped()); } + + // The original index entry is not marked as dropped. When dropping the collection, a + // copy-on-write is performed on the index entry and the previous index entry is left + // untouched. + ASSERT_FALSE(entry->isDropped()); } }; @@ -185,11 +210,11 @@ class RefreshEntry : IndexCatalogTestBase { void run() { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; - dbtests::WriteContextForTests ctx(&opCtx, _nss.ns()); + dbtests::WriteContextForTests ctx(&opCtx, _nss.ns_forTest()); const std::string indexName = "x_1"; ASSERT_OK(dbtests::createIndexFromSpec(&opCtx, - _nss.ns(), + _nss.ns_forTest(), BSON("name" << indexName << "key" << BSON("x" << 1) << "v" << static_cast(kIndexVersion) << "expireAfterSeconds" << 5))); diff --git a/src/mongo/dbtests/indexupdatetests.cpp b/src/mongo/dbtests/indexupdatetests.cpp index e71395a710c8f..e53a054b3a500 100644 --- a/src/mongo/dbtests/indexupdatetests.cpp +++ b/src/mongo/dbtests/indexupdatetests.cpp @@ -27,17 +27,53 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/client/index_spec.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/catalog/multi_index_block.h" #include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_init.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" namespace mongo { namespace IndexUpdateTests { @@ -46,7 +82,7 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2; } // namespace static const char* const _ns = "unittests.indexupdate"; -static const NamespaceString _nss = NamespaceString(_ns); +static const NamespaceString _nss = NamespaceString::createNamespaceString_forTest(_ns); /** * Test fixture for a write locked test using collection _ns. Includes functionality to @@ -61,7 +97,7 @@ class IndexBuildBase { AutoGetCollection autoColl(_opCtx, _nss, LockMode::MODE_IX); WriteUnitOfWork wuow(_opCtx); auto db = autoColl.ensureDbExists(_opCtx); - ASSERT(db->createCollection(_opCtx, _nss)) << _nss; + ASSERT(db->createCollection(_opCtx, _nss)) << _nss.toStringForErrorMsg(); wuow.commit(); } @@ -71,7 +107,7 @@ class IndexBuildBase { AutoGetCollection autoColl(_opCtx, _nss, LockMode::MODE_X); WriteUnitOfWork wuow(_opCtx); auto db = autoColl.ensureDbExists(_opCtx); - ASSERT_OK(db->dropCollection(_opCtx, _nss, {})) << _nss; + ASSERT_OK(db->dropCollection(_opCtx, _nss, {})) << _nss.toStringForErrorMsg(); wuow.commit(); } @@ -128,7 +164,7 @@ class InsertBuildIgnoreUnique : public IndexBuildBase { void run() { AutoGetCollection autoColl(_opCtx, _nss, LockMode::MODE_X); auto db = autoColl.ensureDbExists(_opCtx); - ASSERT(db) << _nss; + ASSERT(db) << _nss.toStringForErrorMsg(); auto& coll = collection(); { WriteUnitOfWork wunit(_opCtx); @@ -154,8 +190,7 @@ class InsertBuildIgnoreUnique : public IndexBuildBase { const BSONObj spec = BSON("name" << "a" << "key" << BSON("a" << 1) << "v" - << static_cast(kIndexVersion) << "unique" << true - << "background" << background); + << static_cast(kIndexVersion) << "unique" << true); ScopeGuard abortOnExit([&] { indexer.abortIndexBuild(_opCtx, collection(), MultiIndexBlock::kNoopOnCleanUpFn); @@ -184,7 +219,7 @@ class InsertBuildEnforceUnique : public IndexBuildBase { { AutoGetCollection autoColl(_opCtx, _nss, LockMode::MODE_IX); auto db = autoColl.ensureDbExists(_opCtx); - ASSERT(db) << _nss; + ASSERT(db) << _nss.toStringForErrorMsg(); auto& coll = collection(); { @@ -212,8 +247,7 @@ class InsertBuildEnforceUnique : public IndexBuildBase { const BSONObj spec = BSON("name" << "a" << "key" << BSON("a" << 1) << "v" - << static_cast(kIndexVersion) << "unique" << true - << "background" << background); + << static_cast(kIndexVersion) << "unique" << true); ScopeGuard abortOnExit([&] { indexer.abortIndexBuild(_opCtx, collection(), MultiIndexBlock::kNoopOnCleanUpFn); }); @@ -244,7 +278,7 @@ class InsertBuildIndexInterrupt : public IndexBuildBase { { AutoGetCollection autoColl(_opCtx, _nss, LockMode::MODE_X); auto db = autoColl.ensureDbExists(_opCtx); - ASSERT(db) << _nss; + ASSERT(db) << _nss.toStringForErrorMsg(); auto& coll = collection(); { @@ -480,8 +514,8 @@ class SameSpecDifferentSparse : public ComplexIndex { void run() { ASSERT_OK(createIndex(BSON("name" << "super3" - << "unique" << 1 << "sparse" << false << "background" << true - << "expireAfterSeconds" << 3600 << "key" + << "unique" << 1 << "sparse" << false << "expireAfterSeconds" + << 3600 << "key" << BSON("superIdx" << "2d") << "v" << static_cast(kIndexVersion)))); @@ -658,13 +692,14 @@ class IndexingSymbolWithInheritedCollationShouldFail { DBDirectClient client(opCtx.get()); client.dropCollection(_nss); BSONObj cmdResult; - ASSERT_TRUE(client.runCommand({boost::none, "unittests"}, - BSON("create" - << "indexupdate" - << "collation" - << BSON("locale" - << "fr")), - cmdResult)); + ASSERT_TRUE( + client.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "unittests"), + BSON("create" + << "indexupdate" + << "collation" + << BSON("locale" + << "fr")), + cmdResult)); IndexSpec indexSpec; indexSpec.addKey("a"); client.createIndex(_nss, indexSpec); diff --git a/src/mongo/dbtests/insert_test.cpp b/src/mongo/dbtests/insert_test.cpp index 450eb9a18e647..7b94cf17783ec 100644 --- a/src/mongo/dbtests/insert_test.cpp +++ b/src/mongo/dbtests/insert_test.cpp @@ -27,13 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_depth.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/insert.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/service_context.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/dbtests/jsobjtests.cpp b/src/mongo/dbtests/jsobjtests.cpp index 2eb4c94c0e8ff..566eb88c1b8e4 100644 --- a/src/mongo/dbtests/jsobjtests.cpp +++ b/src/mongo/dbtests/jsobjtests.cpp @@ -32,25 +32,53 @@ */ -#include "mongo/platform/basic.h" - #include +#include +#include +#include #include - +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_comparator_interface_base.h" #include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonelement_comparator_interface.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" #include "mongo/bson/simple_bsonelement_comparator.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/bson/bson_helper.h" #include "mongo/db/bson/dotted_path_support.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/allocator.h" +#include "mongo/util/assert_util.h" #include "mongo/util/embedded_builder.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" #include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -170,7 +198,7 @@ FieldCompareResult compareDottedFieldNames(const string& l, "compareDottedFieldNames ERROR l: {l} r: {r} TOO MANY LOOPS", "l"_attr = l, "r"_attr = r); - verify(0); + MONGO_verify(0); return SAME; // will never get here } } // namespace mongo @@ -290,9 +318,9 @@ class IsPrefixOf : public Base { } { BSONObj k = BSON("x" << 1); - verify(k.isFieldNamePrefixOf(BSON("x" - << "hi"))); - verify(!k.isFieldNamePrefixOf(BSON("a" << 1))); + MONGO_verify(k.isFieldNamePrefixOf(BSON("x" + << "hi"))); + MONGO_verify(!k.isFieldNamePrefixOf(BSON("a" << 1))); } } }; diff --git a/src/mongo/dbtests/jsontests.cpp b/src/mongo/dbtests/jsontests.cpp index e10f30277c45a..edc902aae40fe 100644 --- a/src/mongo/dbtests/jsontests.cpp +++ b/src/mongo/dbtests/jsontests.cpp @@ -33,16 +33,53 @@ #include #include +#include +#include +#include +#include +#include #include -#include +#include // IWYU pragma: keep +#include #include +#include #include - -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" -#include "mongo/dbtests/dbtests.h" +#include +#include +#include + +#include +#include +#include +// IWYU pragma: no_include "boost/multi_index/detail/bidir_node_iterator.hpp" +#include +// IWYU pragma: no_include "boost/property_tree/detail/exception_implementation.hpp" +// IWYU pragma: no_include "boost/property_tree/detail/ptree_implementation.hpp" +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/decimal128.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/errno_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/dbtests/jstests.cpp b/src/mongo/dbtests/jstests.cpp index 7ac36d0c506fe..932e8e71e3da3 100644 --- a/src/mongo/dbtests/jstests.cpp +++ b/src/mongo/dbtests/jstests.cpp @@ -27,17 +27,46 @@ * it in the license file. */ -#include "mongo/base/parse_number.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/client.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/hasher.h" -#include "mongo/db/json.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/platform/decimal128.h" #include "mongo/scripting/engine.h" #include "mongo/shell/shell_utils.h" -#include "mongo/util/concurrency/thread_name.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/time_support.h" #include "mongo/util/timer.h" diff --git a/src/mongo/dbtests/logical_sessions_tests.cpp b/src/mongo/dbtests/logical_sessions_tests.cpp index 99ea8f08f908e..2676c171a9b0b 100644 --- a/src/mongo/dbtests/logical_sessions_tests.cpp +++ b/src/mongo/dbtests/logical_sessions_tests.cpp @@ -27,23 +27,47 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include - -#include "mongo/client/index_spec.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/client.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" -#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/sessions_collection_standalone.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" namespace mongo { namespace { -const NamespaceString kTestNS("config.system.sessions"); +const NamespaceString kTestNS = + NamespaceString::createNamespaceString_forTest("config.system.sessions"); LogicalSessionRecord makeRecord(Date_t time = Date_t::now()) { auto record = makeLogicalSessionRecordForTest(); diff --git a/src/mongo/dbtests/matchertests.cpp b/src/mongo/dbtests/matchertests.cpp index 1ea2ba9233ec4..4b798f24ac50c 100644 --- a/src/mongo/dbtests/matchertests.cpp +++ b/src/mongo/dbtests/matchertests.cpp @@ -27,18 +27,39 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include - +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" #include "mongo/db/client.h" #include "mongo/db/db_raii.h" -#include "mongo/db/json.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_real.h" +#include "mongo/db/matcher/match_details.h" #include "mongo/db/matcher/matcher.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/service_context.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/timer.h" namespace MatcherTests { @@ -54,7 +75,7 @@ class CollectionBase { virtual ~CollectionBase() {} }; -const NamespaceString kTestNss = NamespaceString("db.dummy"); +const NamespaceString kTestNss = NamespaceString::createNamespaceString_forTest("db.dummy"); template class Basic { @@ -226,7 +247,8 @@ class WhereSimple1 { void run() { const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; - const NamespaceString nss("unittests.matchertests"); + const NamespaceString nss = + NamespaceString::createNamespaceString_forTest("unittests.matchertests"); AutoGetCollectionForReadCommand ctx(&opCtx, nss); const boost::intrusive_ptr expCtx(new ExpressionContext( diff --git a/src/mongo/dbtests/mock/mock_conn_registry.cpp b/src/mongo/dbtests/mock/mock_conn_registry.cpp index 1508c7877e46c..a769dbdcb4c0c 100644 --- a/src/mongo/dbtests/mock/mock_conn_registry.cpp +++ b/src/mongo/dbtests/mock/mock_conn_registry.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/dbtests/mock/mock_conn_registry.h" +#include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/string_data.h" +#include "mongo/dbtests/mock/mock_conn_registry.h" #include "mongo/dbtests/mock/mock_dbclient_connection.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/dbtests/mock/mock_conn_registry.h b/src/mongo/dbtests/mock/mock_conn_registry.h index 798671caeaf49..b775adf502107 100644 --- a/src/mongo/dbtests/mock/mock_conn_registry.h +++ b/src/mongo/dbtests/mock/mock_conn_registry.h @@ -29,9 +29,16 @@ #pragma once +#include +#include + #include "mongo/base/status.h" +#include "mongo/client/client_api_version_parameters_gen.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/dbclient_base.h" #include "mongo/dbtests/mock/mock_dbclient_connection.h" #include "mongo/dbtests/mock/mock_remote_db_server.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_map.h" #include "mongo/util/concurrency/mutex.h" diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp index ea7c9853df784..69246fb4264b4 100644 --- a/src/mongo/dbtests/mock/mock_dbclient_connection.cpp +++ b/src/mongo/dbtests/mock/mock_dbclient_connection.cpp @@ -27,14 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/dbtests/mock/mock_dbclient_connection.h" - +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/client/dbclient_mockcursor.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/mock/mock_dbclient_connection.h" +#include "mongo/platform/atomic_word.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/util/net/socket_exception.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #include "mongo/util/time_support.h" using mongo::BSONObj; diff --git a/src/mongo/dbtests/mock/mock_dbclient_connection.h b/src/mongo/dbtests/mock/mock_dbclient_connection.h index 5d9787aa0ff5c..f9a908d1c7d58 100644 --- a/src/mongo/dbtests/mock/mock_dbclient_connection.h +++ b/src/mongo/dbtests/mock/mock_dbclient_connection.h @@ -29,12 +29,38 @@ #pragma once +#include +#include #include +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/dbclient_base.h" #include "mongo/client/dbclient_connection.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/find_command.h" #include "mongo/dbtests/mock/mock_remote_db_server.h" +#include "mongo/platform/mutex.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_options.h" namespace mongo { /** diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.cpp b/src/mongo/dbtests/mock/mock_remote_db_server.cpp index 9c6e880edeec6..f7c416f83f7e2 100644 --- a/src/mongo/dbtests/mock/mock_remote_db_server.cpp +++ b/src/mongo/dbtests/mock/mock_remote_db_server.cpp @@ -27,20 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/dbtests/mock/mock_remote_db_server.h" - +#include +#include #include -#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/projection_parser.h" -#include "mongo/dbtests/mock/mock_dbclient_connection.h" -#include "mongo/rpc/metadata.h" +#include "mongo/db/query/projection_policies.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" #include "mongo/rpc/op_msg_rpc_impls.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/rpc/reply_interface.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/net/socket_exception.h" #include "mongo/util/str.h" #include "mongo/util/time_support.h" @@ -60,7 +69,7 @@ MockRemoteDBServer::CircularBSONIterator::CircularBSONIterator( } StatusWith MockRemoteDBServer::CircularBSONIterator::next() { - verify(_iter != _replyObjs.end()); + MONGO_verify(_iter != _replyObjs.end()); StatusWith reply = _iter->isOK() ? StatusWith(_iter->getValue().copy()) : *_iter; ++_iter; @@ -127,22 +136,23 @@ void MockRemoteDBServer::setCommandReply(const string& cmdName, void MockRemoteDBServer::insert(const NamespaceString& nss, BSONObj obj) { scoped_spinlock sLock(_lock); - vector& mockCollection = _dataMgr[nss.ns()]; + vector& mockCollection = _dataMgr[nss.toString_forTest()]; mockCollection.push_back(obj.copy()); } void MockRemoteDBServer::remove(const NamespaceString& nss, const BSONObj&) { scoped_spinlock sLock(_lock); - if (_dataMgr.count(nss.ns()) == 0) { + auto ns = nss.toString_forTest(); + if (_dataMgr.count(ns) == 0) { return; } - _dataMgr.erase(nss.ns()); + _dataMgr.erase(ns); } -void MockRemoteDBServer::assignCollectionUuid(const std::string& ns, const mongo::UUID& uuid) { +void MockRemoteDBServer::assignCollectionUuid(StringData ns, const mongo::UUID& uuid) { scoped_spinlock sLock(_lock); - _uuidToNs[uuid] = ns; + _uuidToNs[uuid] = ns.toString(); } rpc::UniqueReply MockRemoteDBServer::runCommand(InstanceID id, const OpMsgRequest& request) { @@ -213,7 +223,7 @@ mongo::BSONArray MockRemoteDBServer::findImpl(InstanceID id, scoped_spinlock sLock(_lock); _queryCount++; - auto ns = nsOrUuid.uuid() ? _uuidToNs[*nsOrUuid.uuid()] : nsOrUuid.nss()->ns(); + auto ns = nsOrUuid.isUUID() ? _uuidToNs[nsOrUuid.uuid()] : nsOrUuid.nss().toString_forTest(); const vector& coll = _dataMgr[ns]; BSONArrayBuilder result; for (vector::const_iterator iter = coll.begin(); iter != coll.end(); ++iter) { diff --git a/src/mongo/dbtests/mock/mock_remote_db_server.h b/src/mongo/dbtests/mock/mock_remote_db_server.h index 170d1617d6bda..74b1a83b025ce 100644 --- a/src/mongo/dbtests/mock/mock_remote_db_server.h +++ b/src/mongo/dbtests/mock/mock_remote_db_server.h @@ -29,15 +29,26 @@ #pragma once +#include +#include +#include #include #include +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/connection_string.h" #include "mongo/db/jsobj.h" -#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/find_command.h" +#include "mongo/rpc/op_msg.h" #include "mongo/rpc/unique_message.h" #include "mongo/stdx/unordered_map.h" #include "mongo/util/concurrency/spin_lock.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo { namespace projection_executor { @@ -155,7 +166,7 @@ class MockRemoteDBServer { * @param ns the namespace to be associated with the uuid. * @param uuid the uuid to associate with the namespace. */ - void assignCollectionUuid(const std::string& ns, const mongo::UUID& uuid); + void assignCollectionUuid(StringData ns, const mongo::UUID& uuid); // // DBClientBase methods diff --git a/src/mongo/dbtests/mock/mock_replica_set.cpp b/src/mongo/dbtests/mock/mock_replica_set.cpp index b03ffd477edd8..e70d02320f8bb 100644 --- a/src/mongo/dbtests/mock/mock_replica_set.cpp +++ b/src/mongo/dbtests/mock/mock_replica_set.cpp @@ -29,13 +29,29 @@ #include "mongo/dbtests/mock/mock_replica_set.h" +#include +#include +#include #include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/client/mongo_uri.h" +#include "mongo/client/sdam/server_description.h" #include "mongo/client/sdam/topology_description_builder.h" +#include "mongo/db/repl/member_config.h" #include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_set_tag.h" #include "mongo/dbtests/mock/mock_conn_registry.h" -#include "mongo/dbtests/mock/mock_dbclient_connection.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" using namespace mongo::repl; @@ -149,7 +165,7 @@ void MockReplicaSet::setPrimary(const string& hostAndPort) { _primaryHost = hostAndPort; - mockIsMasterCmd(); + mockHelloCmd(); mockReplSetGetStatusCmd(); } @@ -183,12 +199,12 @@ repl::ReplSetConfig MockReplicaSet::getReplConfig() const { void MockReplicaSet::setConfig(const repl::ReplSetConfig& newConfig) { _replConfig = newConfig; - mockIsMasterCmd(); + mockHelloCmd(); mockReplSetGetStatusCmd(); } void MockReplicaSet::kill(const string& hostAndPort) { - verify(_nodeMap.count(hostAndPort) == 1); + MONGO_verify(_nodeMap.count(hostAndPort) == 1); _nodeMap[hostAndPort]->shutdown(); } @@ -199,7 +215,7 @@ void MockReplicaSet::kill(const vector& hostList) { } void MockReplicaSet::restore(const string& hostAndPort) { - verify(_nodeMap.count(hostAndPort) == 1); + MONGO_verify(_nodeMap.count(hostAndPort) == 1); _nodeMap[hostAndPort]->reboot(); } @@ -211,14 +227,14 @@ BSONObj MockReplicaSet::mockHelloResponseFor(const MockRemoteDBServer& server) c const MemberConfig* member = _replConfig.findMemberByHostAndPort(hostAndPort); if (!member) { - builder.append("ismaster", false); + builder.append("isWritablePrimary", false); builder.append("secondary", false); vector hostList; builder.append("hosts", hostList); } else { const bool isPrimary = hostAndPort.toString() == getPrimary(); - builder.append("ismaster", isPrimary); + builder.append("isWritablePrimary", isPrimary); builder.append("secondary", !isPrimary); { @@ -285,14 +301,12 @@ BSONObj MockReplicaSet::mockHelloResponseFor(const MockRemoteDBServer& server) c return builder.obj(); } -void MockReplicaSet::mockIsMasterCmd() { +void MockReplicaSet::mockHelloCmd() { for (ReplNodeMap::iterator nodeIter = _nodeMap.begin(); nodeIter != _nodeMap.end(); ++nodeIter) { - auto isMaster = mockHelloResponseFor(*nodeIter->second); + auto helloReply = mockHelloResponseFor(*nodeIter->second); - // DBClientBase::isMaster() sends "ismaster", but ReplicaSetMonitor sends "isMaster". - nodeIter->second->setCommandReply("ismaster", isMaster); - nodeIter->second->setCommandReply("isMaster", isMaster); + nodeIter->second->setCommandReply("hello", helloReply); } } diff --git a/src/mongo/dbtests/mock/mock_replica_set.h b/src/mongo/dbtests/mock/mock_replica_set.h index 677d0b822a16f..374eb115f1fa8 100644 --- a/src/mongo/dbtests/mock/mock_replica_set.h +++ b/src/mongo/dbtests/mock/mock_replica_set.h @@ -29,14 +29,19 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/client/connection_string.h" #include "mongo/client/sdam/sdam_datatypes.h" #include "mongo/db/repl/member_config.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/dbtests/mock/mock_remote_db_server.h" - -#include -#include -#include +#include "mongo/util/clock_source.h" +#include "mongo/util/net/hostandport.h" namespace mongo { @@ -57,7 +62,7 @@ class ClockSource; class MockReplicaSet { public: /** - * Creates a mock replica set and automatically mocks the isMaster and replSetGetStatus commands + * Creates a mock replica set and automatically mocks the hello and replSetGetStatus commands * based on the default replica set configuration. Either the first node is primary and the * others are secondaries, or all are secondaries. By default, hostnames begin with "$", which * signals to ReplicaSetMonitor and to ConnectionString::connect that these are mocked hosts. @@ -87,12 +92,11 @@ class MockReplicaSet { std::vector getSecondaries() const; /** - * Sets the configuration for this replica sets. This also has a side effect - * of mocking the ismaster and replSetGetStatus command responses based on - * the new config. + * Sets the configuration for this replica sets. This also has a side effect of mocking the + * hello and replSetGetStatus command responses based on the new config. * - * Note: does not automatically select a new primary. Can be done manually by - * calling setPrimary. + * Note: does not automatically select a new primary. Can be done manually by calling + * setPrimary. */ void setConfig(const repl::ReplSetConfig& newConfig); @@ -138,10 +142,9 @@ class MockReplicaSet { typedef std::map ReplNodeMap; /** - * Mocks the ismaster command based on the information on the current - * replica set configuration. + * Mocks the "hello" command based on the information on the current replica set configuration. */ - void mockIsMasterCmd(); + void mockHelloCmd(); /** * Mock the hello response for the given server. diff --git a/src/mongo/dbtests/mock_dbclient_conn_test.cpp b/src/mongo/dbtests/mock_dbclient_conn_test.cpp index 30ce38d1084d4..975f8b228a00e 100644 --- a/src/mongo/dbtests/mock_dbclient_conn_test.cpp +++ b/src/mongo/dbtests/mock_dbclient_conn_test.cpp @@ -31,18 +31,47 @@ * This file includes integration testing between the MockDBClientBase and MockRemoteDB. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/db/jsobj.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/tenant_id.h" #include "mongo/dbtests/mock/mock_dbclient_connection.h" -#include "mongo/unittest/unittest.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/net/socket_exception.h" +#include "mongo/util/net/ssl_options.h" +#include "mongo/util/time_support.h" #include "mongo/util/timer.h" -#include -#include -#include - using std::string; using std::vector; @@ -203,7 +232,7 @@ TEST(MockDBClientConnTest, InsertAndQueryTwice) { TEST(MockDBClientConnTest, QueryWithNoResults) { MockRemoteDBServer server("test"); - const NamespaceString nss("test.user"); + const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.user"); server.insert(nss, BSON("x" << 1)); MockDBClientConnection conn(&server); @@ -378,7 +407,7 @@ TEST(MockDBClientConnTest, MultiNSRemove) { TEST(MockDBClientConnTest, InsertAfterRemove) { MockRemoteDBServer server("test"); - const NamespaceString nss("test.user"); + const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.user"); { MockDBClientConnection conn(&server); @@ -420,7 +449,9 @@ TEST(MockDBClientConnTest, SetCmdReply) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand({boost::none, "foo"}, BSON("serverStatus" << 1), response)); + ASSERT(conn.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("serverStatus" << 1), + response)); ASSERT_EQUALS(1, response["ok"].numberInt()); ASSERT_EQUALS("local", response["host"].str()); @@ -431,7 +462,9 @@ TEST(MockDBClientConnTest, SetCmdReply) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand({boost::none, "foo"}, BSON("serverStatus" << 1), response)); + ASSERT(conn.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("serverStatus" << 1), + response)); ASSERT_EQUALS(1, response["ok"].numberInt()); ASSERT_EQUALS("local", response["host"].str()); @@ -441,7 +474,9 @@ TEST(MockDBClientConnTest, SetCmdReply) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand({boost::none, "foo"}, BSON("serverStatus" << 1), response)); + ASSERT(conn.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("serverStatus" << 1), + response)); ASSERT_EQUALS(1, response["ok"].numberInt()); ASSERT_EQUALS("local", response["host"].str()); @@ -453,23 +488,25 @@ TEST(MockDBClientConnTest, CyclingCmd) { MockRemoteDBServer server("test"); { - vector> isMasterSequence; - isMasterSequence.push_back(BSON("set" - << "a" - << "isMaster" << true << "ok" << 1)); - isMasterSequence.push_back(BSON("set" - << "a" - << "isMaster" << false << "ok" << 1)); - server.setCommandReply("isMaster", isMasterSequence); + vector> helloReplySequence; + helloReplySequence.push_back(BSON("set" + << "a" + << "isWritablePrimary" << true << "ok" << 1)); + helloReplySequence.push_back(BSON("set" + << "a" + << "isWritablePrimary" << false << "ok" << 1)); + server.setCommandReply("hello", helloReplySequence); } { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand({boost::none, "foo"}, BSON("isMaster" << 1), response)); + ASSERT(conn.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("hello" << 1), + response)); ASSERT_EQUALS(1, response["ok"].numberInt()); ASSERT_EQUALS("a", response["set"].str()); - ASSERT(response["isMaster"].trueValue()); + ASSERT(response["isWritablePrimary"].trueValue()); ASSERT_EQUALS(1U, server.getCmdCount()); } @@ -477,10 +514,12 @@ TEST(MockDBClientConnTest, CyclingCmd) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand({boost::none, "foo"}, BSON("isMaster" << 1), response)); + ASSERT(conn.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("hello" << 1), + response)); ASSERT_EQUALS(1, response["ok"].numberInt()); ASSERT_EQUALS("a", response["set"].str()); - ASSERT(!response["isMaster"].trueValue()); + ASSERT(!response["isWritablePrimary"].trueValue()); ASSERT_EQUALS(2U, server.getCmdCount()); } @@ -488,10 +527,12 @@ TEST(MockDBClientConnTest, CyclingCmd) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand({boost::none, "foo"}, BSON("isMaster" << 1), response)); + ASSERT(conn.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("hello" << 1), + response)); ASSERT_EQUALS(1, response["ok"].numberInt()); ASSERT_EQUALS("a", response["set"].str()); - ASSERT(response["isMaster"].trueValue()); + ASSERT(response["isWritablePrimary"].trueValue()); ASSERT_EQUALS(3U, server.getCmdCount()); } @@ -500,21 +541,22 @@ TEST(MockDBClientConnTest, CyclingCmd) { TEST(MockDBClientConnTest, MultipleStoredResponse) { MockRemoteDBServer server("test"); server.setCommandReply("serverStatus", BSON("ok" << 0)); - server.setCommandReply("isMaster", BSON("ok" << 1 << "secondary" << false)); + server.setCommandReply("hello", BSON("ok" << 1 << "secondary" << false)); MockDBClientConnection conn(&server); { BSONObj response; - ASSERT(conn.runCommand({boost::none, "foo"}, - BSON("isMaster" - << "abc"), + ASSERT(conn.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("hello" << 1), response)); ASSERT(!response["secondary"].trueValue()); } { BSONObj response; - ASSERT(!conn.runCommand({boost::none, "a"}, BSON("serverStatus" << 1), response)); + ASSERT(!conn.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "a"), + BSON("serverStatus" << 1), + response)); } } @@ -527,14 +569,18 @@ TEST(MockDBClientConnTest, CmdCount) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand({boost::none, "foo"}, BSON("serverStatus" << 1), response)); + ASSERT(conn.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("serverStatus" << 1), + response)); ASSERT_EQUALS(1U, server.getCmdCount()); } { MockDBClientConnection conn(&server); BSONObj response; - ASSERT(conn.runCommand({boost::none, "baz"}, BSON("serverStatus" << 1), response)); + ASSERT(conn.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "baz"), + BSON("serverStatus" << 1), + response)); ASSERT_EQUALS(2U, server.getCmdCount()); } } @@ -558,7 +604,9 @@ TEST(MockDBClientConnTest, Shutdown) { { MockDBClientConnection conn(&server); BSONObj response; - ASSERT_THROWS(conn.runCommand({boost::none, "test"}, BSON("serverStatus" << 1), response), + ASSERT_THROWS(conn.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "test"), + BSON("serverStatus" << 1), + response), mongo::NetworkException); } @@ -576,7 +624,9 @@ TEST(MockDBClientConnTest, Restart) { // new instance still has it conn1.find(FindCommandRequest(NamespaceString::createNamespaceString_forTest("test.user"))); BSONObj response; - conn1.runCommand({boost::none, "test"}, BSON("serverStatus" << 1), response); + conn1.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "test"), + BSON("serverStatus" << 1), + response); server.shutdown(); ASSERT_THROWS( @@ -620,7 +670,9 @@ TEST(MockDBClientConnTest, ClearCounter) { conn.find(FindCommandRequest( FindCommandRequest(NamespaceString::createNamespaceString_forTest("test.user")))); BSONObj response; - conn.runCommand({boost::none, "test"}, BSON("serverStatus" << 1), response); + conn.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "test"), + BSON("serverStatus" << 1), + response); server.clearCounters(); ASSERT_EQUALS(0U, server.getQueryCount()); @@ -647,7 +699,9 @@ TEST(MockDBClientConnTest, Delay) { { mongo::Timer timer; BSONObj response; - conn.runCommand({boost::none, "x"}, BSON("serverStatus" << 1), response); + conn.runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "x"), + BSON("serverStatus" << 1), + response); const int nowInMilliSec = timer.millis(); ASSERT_GREATER_THAN_OR_EQUALS(nowInMilliSec, 130); } diff --git a/src/mongo/dbtests/mock_replica_set_test.cpp b/src/mongo/dbtests/mock_replica_set_test.cpp index 1d2630d99a59d..049f8411f10a2 100644 --- a/src/mongo/dbtests/mock_replica_set_test.cpp +++ b/src/mongo/dbtests/mock_replica_set_test.cpp @@ -27,14 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/dbtests/mock/mock_dbclient_connection.h" -#include "mongo/dbtests/mock/mock_replica_set.h" -#include "mongo/unittest/unittest.h" - +#include +#include #include #include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/mutable/mutable_bson_test_utils.h" +#include "mongo/client/connection_string.h" +#include "mongo/db/database_name.h" +#include "mongo/db/pipeline/expression.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_id.h" +#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/mock/mock_dbclient_connection.h" +#include "mongo/dbtests/mock/mock_remote_db_server.h" +#include "mongo/dbtests/mock/mock_replica_set.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/net/hostandport.h" using mongo::BSONArrayBuilder; using mongo::BSONElement; @@ -71,7 +90,7 @@ TEST(MockReplicaSetTest, GetNode) { ASSERT(replSet.getNode("$n3:27017") == nullptr); } -TEST(MockReplicaSetTest, IsMasterNode0) { +TEST(MockReplicaSetTest, HelloNode0) { MockReplicaSet replSet("n", 3); set expectedHosts; expectedHosts.insert("$n0:27017"); @@ -81,10 +100,12 @@ TEST(MockReplicaSetTest, IsMasterNode0) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n0:27017"); bool ok = MockDBClientConnection(node).runCommand( - {boost::none, "foo"}, BSON("ismaster" << 1), cmdResponse); + mongo::DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("hello" << 1), + cmdResponse); ASSERT(ok); - ASSERT(cmdResponse["ismaster"].trueValue()); + ASSERT(cmdResponse["isWritablePrimary"].trueValue()); ASSERT(!cmdResponse["secondary"].trueValue()); ASSERT_EQUALS("$n0:27017", cmdResponse["me"].str()); ASSERT_EQUALS("$n0:27017", cmdResponse["primary"].str()); @@ -99,7 +120,7 @@ TEST(MockReplicaSetTest, IsMasterNode0) { ASSERT(expectedHosts == hostList); } -TEST(MockReplicaSetTest, IsMasterNode1) { +TEST(MockReplicaSetTest, HelloNode1) { MockReplicaSet replSet("n", 3); set expectedHosts; expectedHosts.insert("$n0:27017"); @@ -109,10 +130,12 @@ TEST(MockReplicaSetTest, IsMasterNode1) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n1:27017"); bool ok = MockDBClientConnection(node).runCommand( - {boost::none, "foo"}, BSON("ismaster" << 1), cmdResponse); + mongo::DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("hello" << 1), + cmdResponse); ASSERT(ok); - ASSERT(!cmdResponse["ismaster"].trueValue()); + ASSERT(!cmdResponse["isWritablePrimary"].trueValue()); ASSERT(cmdResponse["secondary"].trueValue()); ASSERT_EQUALS("$n1:27017", cmdResponse["me"].str()); ASSERT_EQUALS("$n0:27017", cmdResponse["primary"].str()); @@ -127,7 +150,7 @@ TEST(MockReplicaSetTest, IsMasterNode1) { ASSERT(expectedHosts == hostList); } -TEST(MockReplicaSetTest, IsMasterNode2) { +TEST(MockReplicaSetTest, HelloNode2) { MockReplicaSet replSet("n", 3); set expectedHosts; expectedHosts.insert("$n0:27017"); @@ -137,10 +160,12 @@ TEST(MockReplicaSetTest, IsMasterNode2) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n2:27017"); bool ok = MockDBClientConnection(node).runCommand( - {boost::none, "foo"}, BSON("ismaster" << 1), cmdResponse); + mongo::DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("hello" << 1), + cmdResponse); ASSERT(ok); - ASSERT(!cmdResponse["ismaster"].trueValue()); + ASSERT(!cmdResponse["isWritablePrimary"].trueValue()); ASSERT(cmdResponse["secondary"].trueValue()); ASSERT_EQUALS("$n2:27017", cmdResponse["me"].str()); ASSERT_EQUALS("$n0:27017", cmdResponse["primary"].str()); @@ -165,7 +190,9 @@ TEST(MockReplicaSetTest, ReplSetGetStatusNode0) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n0:27017"); bool ok = MockDBClientConnection(node).runCommand( - {boost::none, "foo"}, BSON("replSetGetStatus" << 1), cmdResponse); + mongo::DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("replSetGetStatus" << 1), + cmdResponse); ASSERT(ok); ASSERT_EQUALS("n", cmdResponse["set"].str()); @@ -198,7 +225,9 @@ TEST(MockReplicaSetTest, ReplSetGetStatusNode1) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n1:27017"); bool ok = MockDBClientConnection(node).runCommand( - {boost::none, "foo"}, BSON("replSetGetStatus" << 1), cmdResponse); + mongo::DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("replSetGetStatus" << 1), + cmdResponse); ASSERT(ok); ASSERT_EQUALS("n", cmdResponse["set"].str()); @@ -233,7 +262,9 @@ TEST(MockReplicaSetTest, ReplSetGetStatusNode2) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n2:27017"); bool ok = MockDBClientConnection(node).runCommand( - {boost::none, "foo"}, BSON("replSetGetStatus" << 1), cmdResponse); + mongo::DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("replSetGetStatus" << 1), + cmdResponse); ASSERT(ok); ASSERT_EQUALS("n", cmdResponse["set"].str()); @@ -290,7 +321,7 @@ ReplSetConfig _getConfigWithMemberRemoved(const ReplSetConfig& oldConfig, } } // namespace -TEST(MockReplicaSetTest, IsMasterReconfigNodeRemoved) { +TEST(MockReplicaSetTest, HelloReconfigNodeRemoved) { MockReplicaSet replSet("n", 3); ReplSetConfig oldConfig = replSet.getReplConfig(); @@ -299,14 +330,16 @@ TEST(MockReplicaSetTest, IsMasterReconfigNodeRemoved) { replSet.setConfig(newConfig); { - // Check isMaster for node still in set + // Check that node is still a writable primary. BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n0:27017"); bool ok = MockDBClientConnection(node).runCommand( - {boost::none, "foo"}, BSON("ismaster" << 1), cmdResponse); + mongo::DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("hello" << 1), + cmdResponse); ASSERT(ok); - ASSERT(cmdResponse["ismaster"].trueValue()); + ASSERT(cmdResponse["isWritablePrimary"].trueValue()); ASSERT(!cmdResponse["secondary"].trueValue()); ASSERT_EQUALS("$n0:27017", cmdResponse["me"].str()); ASSERT_EQUALS("$n0:27017", cmdResponse["primary"].str()); @@ -327,14 +360,16 @@ TEST(MockReplicaSetTest, IsMasterReconfigNodeRemoved) { } { - // Check isMaster for node still not in set anymore + // Check node is no longer a writable primary. BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode(hostToRemove); bool ok = MockDBClientConnection(node).runCommand( - {boost::none, "foo"}, BSON("ismaster" << 1), cmdResponse); + mongo::DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("hello" << 1), + cmdResponse); ASSERT(ok); - ASSERT(!cmdResponse["ismaster"].trueValue()); + ASSERT(!cmdResponse["isWritablePrimary"].trueValue()); ASSERT(!cmdResponse["secondary"].trueValue()); ASSERT_EQUALS(hostToRemove, cmdResponse["me"].str()); ASSERT_EQUALS("n", cmdResponse["setName"].str()); @@ -354,7 +389,9 @@ TEST(MockReplicaSetTest, replSetGetStatusReconfigNodeRemoved) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode("$n2:27017"); bool ok = MockDBClientConnection(node).runCommand( - {boost::none, "foo"}, BSON("replSetGetStatus" << 1), cmdResponse); + mongo::DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("replSetGetStatus" << 1), + cmdResponse); ASSERT(ok); ASSERT_EQUALS("n", cmdResponse["set"].str()); @@ -387,7 +424,9 @@ TEST(MockReplicaSetTest, replSetGetStatusReconfigNodeRemoved) { BSONObj cmdResponse; MockRemoteDBServer* node = replSet.getNode(hostToRemove); bool ok = MockDBClientConnection(node).runCommand( - {boost::none, "foo"}, BSON("replSetGetStatus" << 1), cmdResponse); + mongo::DatabaseName::createDatabaseName_forTest(boost::none, "foo"), + BSON("replSetGetStatus" << 1), + cmdResponse); ASSERT(ok); ASSERT_EQUALS("n", cmdResponse["set"].str()); diff --git a/src/mongo/dbtests/multikey_paths_test.cpp b/src/mongo/dbtests/multikey_paths_test.cpp index 5aac5e9d65634..5e04812bf4b18 100644 --- a/src/mongo/dbtests/multikey_paths_test.cpp +++ b/src/mongo/dbtests/multikey_paths_test.cpp @@ -27,17 +27,49 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/index/index_descriptor.h" #include "mongo/db/index/multikey_paths.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/db/update/document_diff_applier.h" #include "mongo/db/update/document_diff_calculator.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/unittest/unittest.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/shared_buffer.h" #include "mongo/util/str.h" namespace mongo { @@ -53,7 +85,8 @@ const auto kIndexVersion = IndexDescriptor::IndexVersion::kV2; */ class MultikeyPathsTest : public unittest::Test { public: - MultikeyPathsTest() : _nss("unittests.multikey_paths") {} + MultikeyPathsTest() + : _nss(NamespaceString::createNamespaceString_forTest("unittests.multikey_paths")) {} void setUp() final { AutoGetCollection autoColl(_opCtx.get(), _nss, MODE_IX); @@ -85,7 +118,7 @@ class MultikeyPathsTest : public unittest::Test { Status createIndex(const CollectionPtr& collection, BSONObj indexSpec) { - return dbtests::createIndexFromSpec(_opCtx.get(), collection->ns().ns(), indexSpec); + return dbtests::createIndexFromSpec(_opCtx.get(), collection->ns().ns_forTest(), indexSpec); } void assertMultikeyPaths(const CollectionPtr& collection, @@ -260,6 +293,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentUpdate) { oldDoc, BSON("_id" << 0 << "a" << 5 << "b" << BSON_ARRAY(1 << 2 << 3)), collection_internal::kUpdateAllIndexes, + nullptr /* indexesAffected */, opDebug, &args); wuow.commit(); @@ -297,8 +331,8 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentUpdateWithDamages) { auto oldDoc = collection->docFor(_opCtx.get(), record->id); auto newDoc = BSON("_id" << 0 << "a" << 5 << "b" << BSON_ARRAY(1 << 2 << 3)); - auto diffResult = doc_diff::computeOplogDiff(oldDoc.value(), newDoc, 0, nullptr); - auto damagesOutput = doc_diff::computeDamages(oldDoc.value(), diffResult->diff, false); + auto diffResult = doc_diff::computeOplogDiff(oldDoc.value(), newDoc, 0); + auto damagesOutput = doc_diff::computeDamages(oldDoc.value(), *diffResult, false); { WriteUnitOfWork wuow(_opCtx.get()); OpDebug* opDebug = nullptr; @@ -311,6 +345,7 @@ TEST_F(MultikeyPathsTest, PathsUpdatedOnDocumentUpdateWithDamages) { damagesOutput.damageSource.get(), damagesOutput.damages, collection_internal::kUpdateAllIndexes, + nullptr /* indexesAffected */, opDebug, &args); ASSERT_TRUE(newDocResult.getValue().woCompare(newDoc) == 0); diff --git a/src/mongo/dbtests/pdfiletests.cpp b/src/mongo/dbtests/pdfiletests.cpp index 89f71a624f9cd..882067aa65976 100644 --- a/src/mongo/dbtests/pdfiletests.cpp +++ b/src/mongo/dbtests/pdfiletests.cpp @@ -27,12 +27,37 @@ * it in the license file. */ +#include +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" -#include "mongo/db/json.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/insert.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace PdfileTests { @@ -52,7 +77,7 @@ class Base { protected: static NamespaceString nss() { - return NamespaceString("unittests.pdfiletests.Insert"); + return NamespaceString::createNamespaceString_forTest("unittests.pdfiletests.Insert"); } CollectionPtr collection() { return CollectionPtr( diff --git a/src/mongo/dbtests/plan_executor_invalidation_test.cpp b/src/mongo/dbtests/plan_executor_invalidation_test.cpp index 6c552e9e3a47a..b24c836332c38 100644 --- a/src/mongo/dbtests/plan_executor_invalidation_test.cpp +++ b/src/mongo/dbtests/plan_executor_invalidation_test.cpp @@ -27,27 +27,52 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/client/dbclient_cursor.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/collection_yield_restore.h" -#include "mongo/db/catalog/database.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/collection_scan.h" +#include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/json.h" -#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/internal_plans.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_planner_params.h" #include "mongo/db/service_context.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -64,7 +89,7 @@ class PlanExecutorInvalidationTest : public unittest::Test { public: PlanExecutorInvalidationTest() : _client(&_opCtx), _expCtx(make_intrusive(&_opCtx, nullptr, nss)) { - _ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns())); + _ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns_forTest())); _client.dropCollection(nss); for (int i = 0; i < N(); ++i) { @@ -75,7 +100,7 @@ class PlanExecutorInvalidationTest : public unittest::Test { } /** - * Return a plan executor that is going over the collection in nss.ns(). + * Return a plan executor that is going over the collection in nss.ns_forTest(). */ std::unique_ptr getCollscan() { unique_ptr ws(new WorkingSet()); @@ -83,7 +108,7 @@ class PlanExecutorInvalidationTest : public unittest::Test { params.direction = CollectionScanParams::FORWARD; params.tailable = false; unique_ptr scan( - new CollectionScan(_expCtx.get(), collection(), params, ws.get(), nullptr)); + new CollectionScan(_expCtx.get(), &collection(), params, ws.get(), nullptr)); // Create a plan executor to hold it auto findCommand = std::make_unique(nss); @@ -124,13 +149,14 @@ class PlanExecutorInvalidationTest : public unittest::Test { } bool dropDatabase(const std::string& dbname) { - bool res = _client.dropDatabase({boost::none, dbname}); + bool res = + _client.dropDatabase(DatabaseName::createDatabaseName_forTest(boost::none, dbname)); _refreshCollection(); return res; } - bool dropCollection(const std::string& ns) { - bool res = _client.dropCollection(NamespaceString(ns)); + bool dropCollection(StringData ns) { + bool res = _client.dropCollection(NamespaceString::createNamespaceString_forTest(ns)); _refreshCollection(); return res; } @@ -148,8 +174,8 @@ class PlanExecutorInvalidationTest : public unittest::Test { void renameCollection(const std::string& to) { BSONObj info; ASSERT_TRUE(_client.runCommand( - DatabaseName(boost::none, "admin"), - BSON("renameCollection" << nss.ns() << "to" << to << "dropTarget" << true), + DatabaseName::kAdmin, + BSON("renameCollection" << nss.ns_forTest() << "to" << to << "dropTarget" << true), info)); _refreshCollection(); } @@ -252,7 +278,7 @@ TEST_F(PlanExecutorInvalidationTest, PlanExecutorThrowsOnRestoreWhenCollectionIs exec->saveState(); - dropCollection(nss.ns()); + dropCollection(nss.ns_forTest()); ASSERT_THROWS_CODE(exec->restoreState(&collection()), DBException, ErrorCodes::QueryPlanKilled); } @@ -261,7 +287,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenAllIndicesDro auto exec = getCollscan(); BSONObj obj; - ASSERT_OK(createIndex(&_opCtx, nss.ns(), BSON("foo" << 1))); + ASSERT_OK(createIndex(&_opCtx, nss.ns_forTest(), BSON("foo" << 1))); // Read some of it. for (int i = 0; i < 10; ++i) { @@ -284,7 +310,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenOneIndexDropp auto exec = getCollscan(); BSONObj obj; - ASSERT_OK(createIndex(&_opCtx, nss.ns(), BSON("foo" << 1))); + ASSERT_OK(createIndex(&_opCtx, nss.ns_forTest(), BSON("foo" << 1))); // Read some of it. for (int i = 0; i < 10; ++i) { @@ -305,10 +331,10 @@ TEST_F(PlanExecutorInvalidationTest, CollScanExecutorDoesNotDieWhenOneIndexDropp TEST_F(PlanExecutorInvalidationTest, IxscanExecutorDiesWhenAllIndexesDropped) { BSONObj keyPattern = BSON("foo" << 1); - ASSERT_OK(createIndex(&_opCtx, nss.ns(), keyPattern)); + ASSERT_OK(createIndex(&_opCtx, nss.ns_forTest(), keyPattern)); // Create a second index which is not used by the plan executor. - ASSERT_OK(createIndex(&_opCtx, nss.ns(), BSON("bar" << 1))); + ASSERT_OK(createIndex(&_opCtx, nss.ns_forTest(), BSON("bar" << 1))); auto exec = makeIxscanPlan(keyPattern, BSON("foo" << 0), BSON("foo" << N())); @@ -329,7 +355,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanExecutorDiesWhenAllIndexesDropped) { TEST_F(PlanExecutorInvalidationTest, IxscanExecutorDiesWhenIndexBeingScannedIsDropped) { BSONObj keyPattern = BSON("foo" << 1); - ASSERT_OK(createIndex(&_opCtx, nss.ns(), keyPattern)); + ASSERT_OK(createIndex(&_opCtx, nss.ns_forTest(), keyPattern)); auto exec = makeIxscanPlan(keyPattern, BSON("foo" << 0), BSON("foo" << N())); @@ -351,8 +377,8 @@ TEST_F(PlanExecutorInvalidationTest, IxscanExecutorDiesWhenIndexBeingScannedIsDr TEST_F(PlanExecutorInvalidationTest, IxscanExecutorSurvivesWhenUnrelatedIndexIsDropped) { BSONObj keyPatternFoo = BSON("foo" << 1); BSONObj keyPatternBar = BSON("bar" << 1); - ASSERT_OK(createIndex(&_opCtx, nss.ns(), keyPatternFoo)); - ASSERT_OK(createIndex(&_opCtx, nss.ns(), keyPatternBar)); + ASSERT_OK(createIndex(&_opCtx, nss.ns_forTest(), keyPatternFoo)); + ASSERT_OK(createIndex(&_opCtx, nss.ns_forTest(), keyPatternBar)); auto exec = makeIxscanPlan(keyPatternFoo, BSON("foo" << 0), BSON("foo" << N())); @@ -392,7 +418,7 @@ TEST_F(PlanExecutorInvalidationTest, ExecutorThrowsOnRestoreWhenDatabaseIsDroppe // requires a "global write lock." _ctx.reset(); dropDatabase("somesillydb"); - _ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns())); + _ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns_forTest())); exec->restoreState(&collection()); ASSERT_EQUALS(PlanExecutor::ADVANCED, exec->getNext(&obj, nullptr)); @@ -403,7 +429,7 @@ TEST_F(PlanExecutorInvalidationTest, ExecutorThrowsOnRestoreWhenDatabaseIsDroppe // Drop our DB. Once again, must give up the lock. _ctx.reset(); dropDatabase("unittests"); - _ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns())); + _ctx.reset(new dbtests::WriteContextForTests(&_opCtx, nss.ns_forTest())); ASSERT_THROWS_CODE(exec->restoreState(&collection()), DBException, ErrorCodes::QueryPlanKilled); } @@ -428,7 +454,7 @@ TEST_F(PlanExecutorInvalidationTest, CollScanDiesOnCollectionRenameWithinDatabas // TODO SERVER-31695: Allow PlanExecutors to remain valid after collection rename. TEST_F(PlanExecutorInvalidationTest, IxscanDiesOnCollectionRenameWithinDatabase) { BSONObj keyPattern = BSON("foo" << 1); - ASSERT_OK(createIndex(&_opCtx, nss.ns(), keyPattern)); + ASSERT_OK(createIndex(&_opCtx, nss.ns_forTest(), keyPattern)); auto exec = makeIxscanPlan(keyPattern, BSON("foo" << 0), BSON("foo" << N())); @@ -448,7 +474,7 @@ TEST_F(PlanExecutorInvalidationTest, IxscanDiesOnCollectionRenameWithinDatabase) TEST_F(PlanExecutorInvalidationTest, IxscanDiesWhenTruncateCollectionDropsAllIndices) { BSONObj keyPattern = BSON("foo" << 1); - ASSERT_OK(createIndex(&_opCtx, nss.ns(), keyPattern)); + ASSERT_OK(createIndex(&_opCtx, nss.ns_forTest(), keyPattern)); auto exec = makeIxscanPlan(keyPattern, BSON("foo" << 0), BSON("foo" << N())); diff --git a/src/mongo/dbtests/plan_ranking.cpp b/src/mongo/dbtests/plan_ranking.cpp index 6cac295925dcb..c98f76a95d320 100644 --- a/src/mongo/dbtests/plan_ranking.cpp +++ b/src/mongo/dbtests/plan_ranking.cpp @@ -31,25 +31,55 @@ * This file tests db/query/plan_ranker.cpp and db/query/multi_plan_runner.cpp. */ -#include "mongo/client/dbclient_cursor.h" -#include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database.h" +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/client.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/multi_plan.h" -#include "mongo/db/exec/trial_period_utils.h" -#include "mongo/db/index/index_descriptor.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/collection_query_info.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/get_executor.h" #include "mongo/db/query/mock_yield_policies.h" +#include "mongo/db/query/plan_cache.h" +#include "mongo/db/query/plan_cache_debug_info.h" +#include "mongo/db/query/plan_cache_key_factory.h" +#include "mongo/db/query/plan_ranking_decision.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_lib.h" +#include "mongo/db/query/query_solution.h" #include "mongo/db/query/stage_builder_util.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/service_context.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -64,7 +94,8 @@ extern AtomicWord internalQueryPlanEvaluationMaxResults; namespace PlanRankingTests { -static const NamespaceString nss("unittests.PlanRankingTests"); +static const NamespaceString nss = + NamespaceString::createNamespaceString_forTest("unittests.PlanRankingTests"); class PlanRankingTestBase { public: @@ -78,7 +109,7 @@ class PlanRankingTestBase { // Ensure N is significantly larger then internalQueryPlanEvaluationWorks. ASSERT_GTE(N, internalQueryPlanEvaluationWorks.load() + 1000); - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); _client.dropCollection(nss); } @@ -89,12 +120,12 @@ class PlanRankingTestBase { } void insert(const BSONObj& obj) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); _client.insert(nss, obj); } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns_forTest(), obj)); } /** @@ -117,16 +148,16 @@ class PlanRankingTestBase { ASSERT_GREATER_THAN_OR_EQUALS(solutions.size(), 1U); // Fill out the MPR. - _mps.reset(new MultiPlanStage(_expCtx.get(), collection.getCollection(), cq)); + _mps.reset(new MultiPlanStage(_expCtx.get(), &collection.getCollection(), cq)); std::unique_ptr ws(new WorkingSet()); // Put each solution from the planner into the MPR. for (size_t i = 0; i < solutions.size(); ++i) { auto&& root = stage_builder::buildClassicExecutableTree( - &_opCtx, collection.getCollection(), *cq, *solutions[i], ws.get()); + &_opCtx, &collection.getCollection(), *cq, *solutions[i], ws.get()); _mps->addPlan(std::move(solutions[i]), std::move(root), ws.get()); } // This is what sets a backup plan, should we test for it. - NoopYieldPolicy yieldPolicy(_opCtx.getServiceContext()->getFastClockSource()); + NoopYieldPolicy yieldPolicy(&_opCtx, _opCtx.getServiceContext()->getFastClockSource()); _mps->pickBestPlan(&yieldPolicy).transitional_ignore(); ASSERT(_mps->bestPlanChosen()); @@ -282,7 +313,7 @@ class PlanRankingIntersectOverride : public PlanRankingTestBase { auto findCommand = std::make_unique(nss); findCommand->setFilter(BSON("a" << 100 << "b" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand)); - verify(statusWithCQ.isOK()); + MONGO_verify(statusWithCQ.isOK()); cq = std::move(statusWithCQ.getValue()); ASSERT(cq.get()); } @@ -302,7 +333,7 @@ class PlanRankingIntersectOverride : public PlanRankingTestBase { auto findCommand = std::make_unique(nss); findCommand->setFilter(BSON("a" << 100 << "b" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand)); - verify(statusWithCQ.isOK()); + MONGO_verify(statusWithCQ.isOK()); cq = std::move(statusWithCQ.getValue()); } @@ -337,7 +368,7 @@ class PlanRankingIntersectWithBackup : public PlanRankingTestBase { auto findCommand = std::make_unique(nss); findCommand->setFilter(BSON("a" << 1 << "b" << BSON("$gt" << 1))); auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand)); - verify(statusWithCQ.isOK()); + MONGO_verify(statusWithCQ.isOK()); std::unique_ptr cq = std::move(statusWithCQ.getValue()); ASSERT(nullptr != cq.get()); @@ -486,7 +517,7 @@ class PlanRankingPreferImmediateEOF : public PlanRankingTestBase { auto findCommand = std::make_unique(nss); findCommand->setFilter(BSON("a" << N + 1 << "b" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand)); - verify(statusWithCQ.isOK()); + MONGO_verify(statusWithCQ.isOK()); std::unique_ptr cq = std::move(statusWithCQ.getValue()); ASSERT(nullptr != cq.get()); @@ -523,7 +554,7 @@ class PlanRankingPreferImmediateEOFAgainstHashed : public PlanRankingTestBase { auto findCommand = std::make_unique(nss); findCommand->setFilter(BSON("a" << BSON("$gte" << N + 1) << "b" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand)); - verify(statusWithCQ.isOK()); + MONGO_verify(statusWithCQ.isOK()); std::unique_ptr cq = std::move(statusWithCQ.getValue()); ASSERT(nullptr != cq.get()); @@ -584,7 +615,7 @@ class PlanRankingCollscan : public PlanRankingTestBase { auto findCommand = std::make_unique(nss); findCommand->setFilter(BSON("foo" << 2001)); auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand)); - verify(statusWithCQ.isOK()); + MONGO_verify(statusWithCQ.isOK()); std::unique_ptr cq = std::move(statusWithCQ.getValue()); ASSERT(nullptr != cq.get()); diff --git a/src/mongo/dbtests/query_plan_executor.cpp b/src/mongo/dbtests/query_plan_executor.cpp index bce758439babd..48c4bc1e4a5d1 100644 --- a/src/mongo/dbtests/query_plan_executor.cpp +++ b/src/mongo/dbtests/query_plan_executor.cpp @@ -28,34 +28,58 @@ */ -#include "mongo/platform/basic.h" - #include - +#include +#include +#include + +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" -#include "mongo/db/clientcursor.h" -#include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/collection_scan.h" +#include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/exec/fetch.h" #include "mongo/db/exec/index_scan.h" #include "mongo/db/exec/plan_stage.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/db/json.h" -#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/document_source_cursor.h" -#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" -#include "mongo/db/pipeline/plan_executor_pipeline.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/multiple_collection_accessor.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_planner_params.h" -#include "mongo/db/query/query_solution.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/tailable_mode_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { @@ -64,7 +88,8 @@ using std::shared_ptr; using std::string; using std::unique_ptr; -static const NamespaceString nss("unittests.QueryPlanExecutor"); +static const NamespaceString nss = + NamespaceString::createNamespaceString_forTest("unittests.QueryPlanExecutor"); class PlanExecutorTest : public unittest::Test { public: @@ -75,7 +100,7 @@ class PlanExecutorTest : public unittest::Test { } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns_forTest(), obj)); } void insert(const BSONObj& obj) { @@ -114,11 +139,11 @@ class PlanExecutorTest : public unittest::Test { auto statusWithCQ = CanonicalQuery::canonicalize(&_opCtx, std::move(findCommand)); ASSERT_OK(statusWithCQ.getStatus()); unique_ptr cq = std::move(statusWithCQ.getValue()); - verify(nullptr != cq.get()); + MONGO_verify(nullptr != cq.get()); // Make the stage. unique_ptr root( - new CollectionScan(cq->getExpCtxRaw(), *coll, csparams, ws.get(), cq.get()->root())); + new CollectionScan(cq->getExpCtxRaw(), coll, csparams, ws.get(), cq.get()->root())); // Hand the plan off to the executor. auto statusWithPlanExecutor = plan_executor_factory::make(std::move(cq), @@ -153,16 +178,15 @@ class PlanExecutorTest : public unittest::Test { ixparams.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; unique_ptr ws(new WorkingSet()); - auto ixscan = - std::make_unique(_expCtx.get(), *coll, ixparams, ws.get(), nullptr); - unique_ptr root = std::make_unique( - _expCtx.get(), ws.get(), std::move(ixscan), nullptr, *coll); + auto ixscan = std::make_unique(_expCtx.get(), coll, ixparams, ws.get(), nullptr); + unique_ptr root = + std::make_unique(_expCtx.get(), ws.get(), std::move(ixscan), nullptr, coll); auto findCommand = std::make_unique(nss); auto statusWithCQ = CanonicalQuery::canonicalize(&_opCtx, std::move(findCommand)); - verify(statusWithCQ.isOK()); + MONGO_verify(statusWithCQ.isOK()); unique_ptr cq = std::move(statusWithCQ.getValue()); - verify(nullptr != cq.get()); + MONGO_verify(nullptr != cq.get()); // Hand the plan off to the executor. auto statusWithPlanExecutor = @@ -201,7 +225,7 @@ class PlanExecutorTest : public unittest::Test { * Test dropping the collection while an agg PlanExecutor is doing an index scan. */ TEST_F(PlanExecutorTest, DropIndexScanAgg) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); insert(BSON("_id" << 1 << "a" << 6)); insert(BSON("_id" << 2 << "a" << 7)); @@ -239,7 +263,7 @@ TEST_F(PlanExecutorTest, DropIndexScanAgg) { } TEST_F(PlanExecutorTest, ShouldReportErrorIfExceedsTimeLimitDuringYield) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); insert(BSON("_id" << 1)); insert(BSON("_id" << 2)); @@ -256,7 +280,7 @@ TEST_F(PlanExecutorTest, ShouldReportErrorIfExceedsTimeLimitDuringYield) { } TEST_F(PlanExecutorTest, ShouldReportErrorIfKilledDuringYieldButIsTailableAndAwaitData) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); insert(BSON("_id" << 1)); insert(BSON("_id" << 2)); @@ -276,7 +300,7 @@ TEST_F(PlanExecutorTest, ShouldReportErrorIfKilledDuringYieldButIsTailableAndAwa } TEST_F(PlanExecutorTest, ShouldNotSwallowExceedsTimeLimitDuringYieldButIsTailableButNotAwaitData) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); insert(BSON("_id" << 1)); insert(BSON("_id" << 2)); @@ -296,7 +320,7 @@ TEST_F(PlanExecutorTest, ShouldNotSwallowExceedsTimeLimitDuringYieldButIsTailabl } TEST_F(PlanExecutorTest, ShouldReportErrorIfKilledDuringYield) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); insert(BSON("_id" << 1)); insert(BSON("_id" << 2)); @@ -363,7 +387,7 @@ class PlanExecutorSnapshotTest : public PlanExecutorTest { * scan. */ TEST_F(PlanExecutorSnapshotTest, SnapshotControl) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); setupCollection(); BSONObj filterObj = fromjson("{a: {$gte: 2}}"); @@ -387,7 +411,7 @@ TEST_F(PlanExecutorSnapshotTest, SnapshotControl) { * index scan. */ TEST_F(PlanExecutorSnapshotTest, SnapshotTest) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); setupCollection(); BSONObj indexSpec = BSON("_id" << 1); addIndex(indexSpec); diff --git a/src/mongo/dbtests/query_stage_and.cpp b/src/mongo/dbtests/query_stage_and.cpp index ce7448a5f10db..e540e189e7772 100644 --- a/src/mongo/dbtests/query_stage_and.cpp +++ b/src/mongo/dbtests/query_stage_and.cpp @@ -33,26 +33,55 @@ */ -#include "mongo/platform/basic.h" - +#include #include - -#include "mongo/client/dbclient_cursor.h" +#include +#include +#include +#include + +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/and_hash.h" #include "mongo/db/exec/and_sorted.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/fetch.h" #include "mongo/db/exec/index_scan.h" #include "mongo/db/exec/mock_stage.h" #include "mongo/db/exec/plan_stage.h" -#include "mongo/db/json.h" -#include "mongo/db/matcher/expression_parser.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/str.h" namespace QueryStageAnd { @@ -149,8 +178,8 @@ class QueryStageAndBase { return BSONObj(); } - const char* ns() { - return _nss.ns().c_str(); + StringData ns() { + return _nss.ns_forTest(); } const NamespaceString& nss() { return _nss; @@ -205,12 +234,12 @@ class QueryStageAndHashDeleteDuringYield : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Bar >= 10. params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // 'ah' reads the first child into its hash table: foo=20, foo=19, ..., foo=0 // in that order. Read half of them. @@ -291,13 +320,13 @@ class QueryStageAndHashDeleteLookaheadDuringYield : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Bar <= 19 (descending). params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 19); params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // First call to work reads the first result from the children. The first result for the // first scan over foo is {foo: 20, bar: 20, baz: 20}. The first result for the second scan @@ -371,13 +400,13 @@ class QueryStageAndHashTwoLeaf : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // foo == bar == baz, and foo<=20, bar>=10, so our values are: // foo == 10, 11, 12, 13, 14, 15. 16, 17, 18, 19, 20 @@ -420,13 +449,13 @@ class QueryStageAndHashTwoLeafFirstChildLargeKeys : public QueryStageAndBase { makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1 << "big" << 1), coll)); params.bounds.startKey = BSON("" << 20 << "" << big); params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); ASSERT_THROWS_CODE(countResults(ah.get()), DBException, @@ -467,13 +496,13 @@ class QueryStageAndHashTwoLeafLastChildLargeKeys : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1 << "big" << 1), coll)); params.bounds.startKey = BSON("" << 10 << "" << big); params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // foo == bar == baz, and foo<=20, bar>=10, so our values are: // foo == 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20. @@ -508,18 +537,18 @@ class QueryStageAndHashThreeLeaf : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // 5 <= baz <= 15 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("baz" << 1), coll)); params.bounds.startKey = BSON("" << 5); params.bounds.endKey = BSON("" << 15); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // foo == bar == baz, and foo<=20, bar>=10, 5<=baz<=15, so our values are: // foo == 10, 11, 12, 13, 14, 15. @@ -566,18 +595,18 @@ class QueryStageAndHashThreeLeafMiddleChildLargeKeys : public QueryStageAndBase auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1 << "big" << 1), coll)); params.bounds.startKey = BSON("" << 10 << "" << big); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // 5 <= baz <= 15 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("baz" << 1), coll)); params.bounds.startKey = BSON("" << 5); params.bounds.endKey = BSON("" << 15); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Stage execution should fail. ASSERT_THROWS_CODE(countResults(ah.get()), @@ -613,13 +642,13 @@ class QueryStageAndHashWithNothing : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Bar == 5. Index scan should be eof. params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 5); params.bounds.endKey = BSON("" << 5); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); int count = 0; int works = 0; @@ -668,7 +697,7 @@ class QueryStageAndHashProducesNothing : public QueryStageAndBase { // Foo >= 100 auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 100); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Bar <= 100 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); @@ -680,7 +709,7 @@ class QueryStageAndHashProducesNothing : public QueryStageAndBase { << ""); params.bounds.boundInclusion = BoundInclusion::kIncludeStartKeyOnly; params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); ASSERT_EQUALS(0, countResults(ah.get())); } @@ -716,18 +745,18 @@ class QueryStageAndHashFirstChildFetched : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - auto firstScan = std::make_unique(_expCtx.get(), coll, params, &ws, nullptr); + auto firstScan = std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr); // First child of the AND_HASH stage is a Fetch. The NULL in the // constructor means there is no filter. auto fetch = - std::make_unique(_expCtx.get(), &ws, std::move(firstScan), nullptr, coll); + std::make_unique(_expCtx.get(), &ws, std::move(firstScan), nullptr, &coll); ah->addChild(std::move(fetch)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Check that the AndHash stage returns docs {foo: 10, bar: 10} // through {foo: 20, bar: 20}. @@ -769,17 +798,17 @@ class QueryStageAndHashSecondChildFetched : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.direction = -1; - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Bar >= 10 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 10); - auto secondScan = std::make_unique(_expCtx.get(), coll, params, &ws, nullptr); + auto secondScan = std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr); // Second child of the AND_HASH stage is a Fetch. The NULL in the // constructor means there is no filter. auto fetch = - std::make_unique(_expCtx.get(), &ws, std::move(secondScan), nullptr, coll); + std::make_unique(_expCtx.get(), &ws, std::move(secondScan), nullptr, &coll); ah->addChild(std::move(fetch)); // Check that the AndHash stage returns docs {foo: 10, bar: 10} @@ -940,13 +969,13 @@ class QueryStageAndSortedDeleteDuringYield : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Scan over bar == 1. params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Get the set of RecordIds in our collection to use later. set data; @@ -1057,19 +1086,19 @@ class QueryStageAndSortedThreeLeaf : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // bar == 1 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // baz == 1 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("baz" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); ASSERT_EQUALS(50, countResults(ah.get())); } @@ -1102,13 +1131,13 @@ class QueryStageAndSortedWithNothing : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 7); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Bar == 20, not EOF. params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSON("" << 20); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); ASSERT_EQUALS(0, countResults(ah.get())); } @@ -1145,13 +1174,13 @@ class QueryStageAndSortedProducesNothing : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 7); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // bar == 20. params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 20); params.bounds.endKey = BSON("" << 20); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); ASSERT_EQUALS(0, countResults(ah.get())); } @@ -1184,13 +1213,13 @@ class QueryStageAndSortedByLastChild : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // Intersect with 7 <= bar < 10000 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 7); params.bounds.endKey = BSON("" << 10000); - ah->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ah->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); WorkingSetID lastId = WorkingSet::INVALID_ID; @@ -1246,19 +1275,19 @@ class QueryStageAndSortedFirstChildFetched : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - auto firstScan = std::make_unique(_expCtx.get(), coll, params, &ws, nullptr); + auto firstScan = std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr); // First child of the AND_SORTED stage is a Fetch. The NULL in the // constructor means there is no filter. auto fetch = - std::make_unique(_expCtx.get(), &ws, std::move(firstScan), nullptr, coll); + std::make_unique(_expCtx.get(), &ws, std::move(firstScan), nullptr, &coll); as->addChild(std::move(fetch)); // bar == 1 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - as->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + as->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); for (int i = 0; i < 50; i++) { BSONObj obj = getNext(as.get(), &ws); @@ -1299,18 +1328,18 @@ class QueryStageAndSortedSecondChildFetched : public QueryStageAndBase { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("foo" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - as->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + as->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); // bar == 1 params = makeIndexScanParams(&_opCtx, coll, getIndex(BSON("bar" << 1), coll)); params.bounds.startKey = BSON("" << 1); params.bounds.endKey = BSON("" << 1); - auto secondScan = std::make_unique(_expCtx.get(), coll, params, &ws, nullptr); + auto secondScan = std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr); // Second child of the AND_SORTED stage is a Fetch. The NULL in the // constructor means there is no filter. auto fetch = - std::make_unique(_expCtx.get(), &ws, std::move(secondScan), nullptr, coll); + std::make_unique(_expCtx.get(), &ws, std::move(secondScan), nullptr, &coll); as->addChild(std::move(fetch)); for (int i = 0; i < 50; i++) { diff --git a/src/mongo/dbtests/query_stage_batched_delete.cpp b/src/mongo/dbtests/query_stage_batched_delete.cpp index ada2e2af46ca8..cd580bc1939ec 100644 --- a/src/mongo/dbtests/query_stage_batched_delete.cpp +++ b/src/mongo/dbtests/query_stage_batched_delete.cpp @@ -27,28 +27,64 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/batched_delete_stage.h" #include "mongo/db/exec/collection_scan.h" +#include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/exec/delete_stage.h" -#include "mongo/db/exec/queued_data_stage.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_noop.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/record_id.h" #include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" #include "mongo/db/storage/checkpointer.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/tick_source.h" #include "mongo/util/tick_source_mock.h" +#include "mongo/util/timer.h" namespace mongo { namespace QueryStageBatchedDelete { -static const NamespaceString nss("unittests.QueryStageBatchedDelete"); +static const NamespaceString nss = + NamespaceString::createNamespaceString_forTest("unittests.QueryStageBatchedDelete"); // For the following tests, fix the targetBatchDocs to 10 documents. static const int targetBatchDocs = 10; @@ -65,7 +101,9 @@ class ClockAdvancingOpObserver : public OpObserverNoop { public: void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) override { + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) override { if (docDurationMap.find(doc) != docDurationMap.end()) { tickSource->advance(docDurationMap.find(doc)->second); @@ -172,13 +210,13 @@ class QueryStageBatchedDeleteTest : public unittest::Test { params.tailable = false; std::unique_ptr scan( - new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr)); + new CollectionScan(_expCtx.get(), &collection, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); - verify(member->hasRecordId()); + MONGO_verify(member->hasRecordId()); out->push_back(member->recordId); } } @@ -194,14 +232,14 @@ class QueryStageBatchedDeleteTest : public unittest::Test { // Uses the default _expCtx tied to the test suite. std::unique_ptr makeBatchedDeleteStage( - WorkingSet* ws, const CollectionPtr& coll, CanonicalQuery* deleteParamsFilter = nullptr) { + WorkingSet* ws, CollectionAcquisition coll, CanonicalQuery* deleteParamsFilter = nullptr) { return makeBatchedDeleteStage(ws, coll, _expCtx.get(), deleteParamsFilter); } // Defaults batch params to be test defaults for targetBatchTimeMS and targetBatchDocs. std::unique_ptr makeBatchedDeleteStage( WorkingSet* ws, - const CollectionPtr& coll, + CollectionAcquisition coll, ExpressionContext* expCtx, CanonicalQuery* deleteParamsFilter = nullptr) { @@ -214,7 +252,7 @@ class QueryStageBatchedDeleteTest : public unittest::Test { std::unique_ptr makeBatchedDeleteStage( WorkingSet* ws, - const CollectionPtr& coll, + CollectionAcquisition coll, ExpressionContext* expCtx, std::unique_ptr batchedDeleteParams, CanonicalQuery* deleteParamsFilter = nullptr) { @@ -252,12 +290,14 @@ TickSourceMock* QueryStageBatchedDeleteTest::_tickSource = nullptr // Confirms batched deletes wait until a batch meets the targetBatchDocs before deleting documents. TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchDocsBasic) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); auto nDocs = 52; prePopulateCollection(nDocs); - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); + const auto coll = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(coll.exists()); WorkingSet ws; auto deleteStage = makeBatchedDeleteStage(&ws, coll); @@ -288,16 +328,18 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchDocsBasic) { // state, BatchedDeleteStage's snapshot is incremented and it can see the document has been removed // and skips over it. TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsDeleted) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); auto nDocs = 11; prePopulateCollection(nDocs); - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); + const auto coll = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(coll.exists()); // Get the RecordIds that would be returned by an in-order scan. std::vector recordIds; - getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds); + getRecordIds(coll.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds); WorkingSet ws; auto deleteStage = makeBatchedDeleteStage(&ws, coll); @@ -318,11 +360,12 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsDeleted) { { // Delete a document that has already been added to the delete batch. deleteStage->saveState(); - BSONObj targetDoc = coll->docFor(&_opCtx, recordIds[pauseBatchingIdx - 2]).value(); + BSONObj targetDoc = + coll.getCollectionPtr()->docFor(&_opCtx, recordIds[pauseBatchingIdx - 2]).value(); ASSERT(!targetDoc.isEmpty()); remove(targetDoc); // Increases the snapshotId. - deleteStage->restoreState(&coll); + deleteStage->restoreState(&coll.getCollectionPtr()); } while ((state = deleteStage->work(&id)) != PlanStage::IS_EOF) { @@ -355,14 +398,17 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsDeletedWriteConflict auto nDocs = 11; prePopulateCollection(nDocs); - CollectionPtr coll(CollectionCatalog::get(batchedDeleteOpCtx.get()) - ->lookupCollectionByNamespace(batchedDeleteOpCtx.get(), nss)); - ASSERT(coll); + const auto coll = + acquireCollection(batchedDeleteOpCtx.get(), + CollectionAcquisitionRequest::fromOpCtx( + batchedDeleteOpCtx.get(), nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(coll.exists()); // Get the RecordIds that would be returned by an in-order scan. std::vector recordIds; - getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds); + getRecordIds(coll.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds); WorkingSet ws; @@ -382,8 +428,9 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsDeletedWriteConflict } // Find the document to delete with the same OpertionContext that holds the locks. - BSONObj targetDoc = - coll->docFor(batchedDeleteOpCtx.get(), recordIds[pauseBatchingIdx - 2]).value(); + BSONObj targetDoc = coll.getCollectionPtr() + ->docFor(batchedDeleteOpCtx.get(), recordIds[pauseBatchingIdx - 2]) + .value(); ASSERT(!targetDoc.isEmpty()); { @@ -417,12 +464,14 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsDeletedWriteConflict // One of the staged documents is updated and then the BatchedDeleteStage increments its snapshot // before discovering the mismatch. TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsUpdatedToNotMatch) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); auto nDocs = 11; prePopulateCollection(nDocs); - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); + const auto coll = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(coll.exists()); // Only delete documents whose 'a' field is greater than or equal to 0. const BSONObj query = BSON("a" << BSON("$gte" << 0)); @@ -451,7 +500,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsUpdatedToNotMatch) { BSONObj updateObj = BSON("a" << -1); update(queryObj, updateObj); // Increases the snapshotId. - deleteStage->restoreState(&coll); + deleteStage->restoreState(&coll.getCollectionPtr()); } while ((state = deleteStage->work(&id)) != PlanStage::IS_EOF) { @@ -484,10 +533,13 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsUpdatedToNotMatchCli auto nDocs = 11; prePopulateCollection(nDocs); - CollectionPtr coll(CollectionCatalog::get(batchedDeleteOpCtx.get()) - ->lookupCollectionByNamespace(batchedDeleteOpCtx.get(), nss)); - ASSERT(coll); + const auto coll = + acquireCollection(batchedDeleteOpCtx.get(), + CollectionAcquisitionRequest::fromOpCtx( + batchedDeleteOpCtx.get(), nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(coll.exists()); // Only delete documents whose 'a' field is greater than or equal to 0. const BSONObj query = BSON("a" << BSON("$gte" << 0)); @@ -541,7 +593,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteStagedDocIsUpdatedToNotMatchCli // Tests targetBatchTimeMS is enforced. TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchTimeMSBasic) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); std::vector> timedBatch0{ {BSON("_id" << 1 << "a" << 1), Milliseconds(2)}, @@ -560,8 +612,11 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchTimeMSBasic) { int batchSize1 = timedBatch1.size(); int nDocs = batchSize0 + batchSize1; - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); + const auto coll = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(coll.exists()); WorkingSet ws; auto deleteStage = makeBatchedDeleteStage(&ws, coll); @@ -606,7 +661,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchTimeMSBasic) { // Tests when the total time it takes to delete targetBatchDocs exceeds targetBatchTimeMS. TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchTimeMSWithTargetBatchDocs) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); std::vector> timedBatch0{ {BSON("_id" << 1 << "a" << 1), Milliseconds(1)}, @@ -639,8 +694,11 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchTimeMSWithTargetBatc int batchSize2 = timedBatch2.size(); int nDocs = batchSize0 + batchSize1 + batchSize2; - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); + const auto coll = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(coll.exists()); WorkingSet ws; auto deleteStage = makeBatchedDeleteStage(&ws, coll); @@ -703,12 +761,15 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetBatchTimeMSWithTargetBatc } TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassDocsBasic) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); auto nDocs = 52; prePopulateCollection(nDocs); - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); + const auto coll = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(coll.exists()); WorkingSet ws; @@ -772,12 +833,15 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassDocsBasic) { // No limits on batch targets means all deletes will be committed in a single batch, and the // 'targetPassDocs' will be ignored. TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassDocsWithUnlimitedBatchTargets) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); auto nDocs = 52; prePopulateCollection(nDocs); - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); + const auto coll = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(coll.exists()); WorkingSet ws; @@ -825,12 +889,15 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassDocsWithUnlimitedBatc } TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSBasic) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); auto nDocs = 52; prePopulateCollection(nDocs); - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); + const auto coll = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(coll.exists()); WorkingSet ws; @@ -873,12 +940,15 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSBasic) { // Demonstrates 'targetPassTimeMS' has no impact when there are no batch limits. TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSWithUnlimitedBatchTargets) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); auto nDocs = 52; prePopulateCollection(nDocs); - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); + const auto coll = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(coll.exists()); WorkingSet ws; @@ -927,7 +997,7 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSWithUnlimitedBa // Tests a more realistic scenario where both batch and pass targets are set. In this case, // 'targetPassTimeMS' is met before 'targetPassDocs' is. TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSReachedBeforeTargetPassDocs) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); // Prepare the targets such that 'targetPassTimeMS' will be reached before 'targetPassDocs'. auto targetBatchDocs = 3; @@ -976,8 +1046,11 @@ TEST_F(QueryStageBatchedDeleteTest, BatchedDeleteTargetPassTimeMSReachedBeforeTa batchedDeleteParams->targetPassTimeMS = targetPassTimeMS; batchedDeleteParams->targetPassDocs = targetPassDocs; - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); + const auto coll = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(coll.exists()); WorkingSet ws; diff --git a/src/mongo/dbtests/query_stage_cached_plan.cpp b/src/mongo/dbtests/query_stage_cached_plan.cpp index 08e70dad3c9cc..ebbfec68a796e 100644 --- a/src/mongo/dbtests/query_stage_cached_plan.cpp +++ b/src/mongo/dbtests/query_stage_cached_plan.cpp @@ -27,27 +27,66 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/cached_plan.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/mock_stage.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/collection_query_info.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/get_executor.h" #include "mongo/db/query/mock_yield_policies.h" #include "mongo/db/query/plan_cache.h" #include "mongo/db/query/plan_cache_key_factory.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner_params.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/platform/atomic_proxy.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/scopeguard.h" namespace mongo { @@ -55,7 +94,8 @@ using unittest::assertGet; namespace QueryStageCachedPlan { -static const NamespaceString nss("unittests.QueryStageCachedPlan"); +static const NamespaceString nss = + NamespaceString::createNamespaceString_forTest("unittests.QueryStageCachedPlan"); namespace { std::unique_ptr canonicalQueryFromFilterObj(OperationContext* opCtx, @@ -81,7 +121,7 @@ class QueryStageCachedPlan : public unittest::Test { addIndex(BSON("a" << 1)); addIndex(BSON("b" << 1)); - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); CollectionPtr collection = ctx.getCollection(); ASSERT(collection); @@ -92,7 +132,7 @@ class QueryStageCachedPlan : public unittest::Test { } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns_forTest(), obj)); } void dropIndex(BSONObj keyPattern) { @@ -158,7 +198,7 @@ class QueryStageCachedPlan : public unittest::Test { } CachedPlanStage cachedPlanStage(_expCtx.get(), - collection, + &collection, &_ws, cq, plannerParams, @@ -166,7 +206,7 @@ class QueryStageCachedPlan : public unittest::Test { std::move(mockChild)); // This should succeed after triggering a replan. - NoopYieldPolicy yieldPolicy(_opCtx.getServiceContext()->getFastClockSource()); + NoopYieldPolicy yieldPolicy(&_opCtx, _opCtx.getServiceContext()->getFastClockSource()); ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy)); } @@ -213,7 +253,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailureMemoryLimitExceeded) { // High enough so that we shouldn't trigger a replan based on works. const size_t decisionWorks = 50; CachedPlanStage cachedPlanStage(_expCtx.get(), - collection.getCollection(), + &collection.getCollection(), &_ws, cq.get(), plannerParams, @@ -221,7 +261,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanFailureMemoryLimitExceeded) { std::move(mockChild)); // This should succeed after triggering a replan. - NoopYieldPolicy yieldPolicy(_opCtx.getServiceContext()->getFastClockSource()); + NoopYieldPolicy yieldPolicy(&_opCtx, _opCtx.getServiceContext()->getFastClockSource()); ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy)); ASSERT_EQ(getNumResultsForStage(_ws, &cachedPlanStage, cq.get()), 2U); @@ -267,7 +307,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanHitMaxWorks) { } CachedPlanStage cachedPlanStage(_expCtx.get(), - collection.getCollection(), + &collection.getCollection(), &_ws, cq.get(), plannerParams, @@ -275,7 +315,7 @@ TEST_F(QueryStageCachedPlan, QueryStageCachedPlanHitMaxWorks) { std::move(mockChild)); // This should succeed after triggering a replan. - NoopYieldPolicy yieldPolicy(_opCtx.getServiceContext()->getFastClockSource()); + NoopYieldPolicy yieldPolicy(&_opCtx, _opCtx.getServiceContext()->getFastClockSource()); ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy)); ASSERT_EQ(getNumResultsForStage(_ws, &cachedPlanStage, cq.get()), 2U); @@ -487,7 +527,7 @@ TEST_F(QueryStageCachedPlan, ThrowsOnYieldRecoveryWhenIndexIsDroppedBeforePlanSe const size_t decisionWorks = 10; CachedPlanStage cachedPlanStage(_expCtx.get(), - collection, + &collection, &_ws, cq.get(), plannerParams, @@ -531,14 +571,14 @@ TEST_F(QueryStageCachedPlan, DoesNotThrowOnYieldRecoveryWhenIndexIsDroppedAferPl const size_t decisionWorks = 10; CachedPlanStage cachedPlanStage(_expCtx.get(), - collection, + &collection, &_ws, cq.get(), plannerParams, decisionWorks, std::make_unique(_expCtx.get(), &_ws)); - NoopYieldPolicy yieldPolicy(_opCtx.getServiceContext()->getFastClockSource()); + NoopYieldPolicy yieldPolicy(&_opCtx, _opCtx.getServiceContext()->getFastClockSource()); ASSERT_OK(cachedPlanStage.pickBestPlan(&yieldPolicy)); // Drop an index while the CachedPlanStage is in a saved state. We should be able to restore diff --git a/src/mongo/dbtests/query_stage_collscan.cpp b/src/mongo/dbtests/query_stage_collscan.cpp index e020823d3fc8c..1d034ed50ebea 100644 --- a/src/mongo/dbtests/query_stage_collscan.cpp +++ b/src/mongo/dbtests/query_stage_collscan.cpp @@ -31,35 +31,83 @@ * This file tests db/exec/collection_scan.cpp. */ -#include - -#include "mongo/client/dbclient_cursor.h" +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: keep +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/database.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/collection_scan.h" +#include "mongo/db/exec/collection_scan_common.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/plan_stage.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_leaf.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_planner_params.h" +#include "mongo/db/query/record_id_bound.h" +#include "mongo/db/record_id.h" #include "mongo/db/record_id_helpers.h" -#include "mongo/db/storage/record_store.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/logv2/log.h" -#include "mongo/util/fail_point.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest namespace mongo { namespace query_stage_collection_scan { -static const NamespaceString kNss{"unittests.QueryStageCollectionScan"}; +static const NamespaceString kNss = + NamespaceString::createNamespaceString_forTest("unittests.QueryStageCollectionScan"); // // Stage-specific tests. @@ -68,7 +116,7 @@ static const NamespaceString kNss{"unittests.QueryStageCollectionScan"}; class QueryStageCollectionScanTest : public unittest::Test { public: QueryStageCollectionScanTest() : _client(&_opCtx) { - dbtests::WriteContextForTests ctx(&_opCtx, kNss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, kNss.ns_forTest()); for (int i = 0; i < numObj(); ++i) { BSONObjBuilder bob; @@ -78,7 +126,7 @@ class QueryStageCollectionScanTest : public unittest::Test { } virtual ~QueryStageCollectionScanTest() { - dbtests::WriteContextForTests ctx(&_opCtx, kNss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, kNss.ns_forTest()); _client.dropCollection(kNss); } @@ -97,13 +145,13 @@ class QueryStageCollectionScanTest : public unittest::Test { // Make the filter. StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(filterObj, _expCtx); - verify(statusWithMatcher.isOK()); + MONGO_verify(statusWithMatcher.isOK()); std::unique_ptr filterExpr = std::move(statusWithMatcher.getValue()); // Make a scan and have the runner own it. std::unique_ptr ws = std::make_unique(); std::unique_ptr ps = std::make_unique( - _expCtx.get(), collection.getCollection(), params, ws.get(), filterExpr.get()); + _expCtx.get(), &collection.getCollection(), params, ws.get(), filterExpr.get()); auto statusWithPlanExecutor = plan_executor_factory::make(_expCtx, @@ -135,13 +183,13 @@ class QueryStageCollectionScanTest : public unittest::Test { params.tailable = false; std::unique_ptr scan( - new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr)); + new CollectionScan(_expCtx.get(), &collection, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); - verify(member->hasRecordId()); + MONGO_verify(member->hasRecordId()); out->push_back(member->recordId); } } @@ -231,7 +279,7 @@ class QueryStageCollectionScanTest : public unittest::Test { params.maxRecord = RecordIdBound(maxRecord); WorkingSet ws; - auto scan = std::make_unique(_expCtx.get(), coll, params, &ws, nullptr); + auto scan = std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr); int count = 0; while (!scan->isEOF()) { @@ -276,7 +324,7 @@ class QueryStageCollectionScanTest : public unittest::Test { params.boundInclusion = boundInclusion; WorkingSet ws; - auto scan = std::make_unique(_expCtx.get(), coll, params, &ws, filter); + auto scan = std::make_unique(_expCtx.get(), &coll, params, &ws, filter); int idx = 0; while (!scan->isEOF()) { @@ -337,7 +385,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanBasicBackwardWithMatch) { TEST_F(QueryStageCollectionScanTest, QueryTestCollscanStopsScanningOnFilterFailureInClusteredCollectionIfSpecified) { - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto collDeleter = createClusteredCollection(ns, false /* prePopulate */); for (int i = 1; i <= numObj(); ++i) { insertDocument(ns, BSON("_id" << i << "foo" << i)); @@ -356,7 +404,7 @@ TEST_F(QueryStageCollectionScanTest, params.shouldReturnEofOnFilterMismatch = true; WorkingSet ws; LTEMatchExpression filter{"foo"_sd, Value(threshold)}; - auto scan = std::make_unique(_expCtx.get(), coll, params, &ws, &filter); + auto scan = std::make_unique(_expCtx.get(), &coll, params, &ws, &filter); // Scan all matching documents. WorkingSetID id = WorkingSet::INVALID_ID; @@ -379,7 +427,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderForward) { // Make a scan and have the runner own it. std::unique_ptr ws = std::make_unique(); std::unique_ptr ps = std::make_unique( - _expCtx.get(), collection.getCollection(), params, ws.get(), nullptr); + _expCtx.get(), &collection.getCollection(), params, ws.get(), nullptr); auto statusWithPlanExecutor = plan_executor_factory::make(_expCtx, @@ -412,7 +460,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderBackward) { std::unique_ptr ws = std::make_unique(); std::unique_ptr ps = std::make_unique( - _expCtx.get(), collection.getCollection(), params, ws.get(), nullptr); + _expCtx.get(), &collection.getCollection(), params, ws.get(), nullptr); auto statusWithPlanExecutor = plan_executor_factory::make(_expCtx, @@ -437,7 +485,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanObjectsInOrderBackward) { // Scan through half the objects, delete the one we're about to fetch, then expect to get the "next" // object we would have gotten after that. TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObject) { - dbtests::WriteContextForTests ctx(&_opCtx, kNss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, kNss.ns_forTest()); const CollectionPtr& coll = ctx.getCollection(); @@ -451,7 +499,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObject) { params.tailable = false; WorkingSet ws; - std::unique_ptr scan(new CollectionScan(_expCtx.get(), coll, params, &ws, nullptr)); + std::unique_ptr scan(new CollectionScan(_expCtx.get(), &coll, params, &ws, nullptr)); int count = 0; while (count < 10) { @@ -491,7 +539,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObject) { // Scan through half the objects, delete the one we're about to fetch, then expect to get the "next" // object we would have gotten after that. But, do it in reverse! TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObjectBackward) { - dbtests::WriteContextForTests ctx(&_opCtx, kNss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, kNss.ns_forTest()); const CollectionPtr& coll = ctx.getCollection(); // Get the RecordIds that would be returned by an in-order scan. @@ -504,7 +552,7 @@ TEST_F(QueryStageCollectionScanTest, QueryStageCollscanDeleteUpcomingObjectBackw params.tailable = false; WorkingSet ws; - std::unique_ptr scan(new CollectionScan(_expCtx.get(), coll, params, &ws, nullptr)); + std::unique_ptr scan(new CollectionScan(_expCtx.get(), &coll, params, &ws, nullptr)); int count = 0; while (count < 10) { @@ -563,7 +611,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekSuc // Create plan stage. std::unique_ptr ws = std::make_unique(); std::unique_ptr ps = std::make_unique( - _expCtx.get(), collection.getCollection(), params, ws.get(), nullptr); + _expCtx.get(), &collection.getCollection(), params, ws.get(), nullptr); // Run the rest of the scan and verify the results. auto statusWithPlanExecutor = @@ -589,7 +637,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekSuc // Verify that if we fail to seek to the resumeAfterRecordId, the plan stage fails. TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekFailure) { - dbtests::WriteContextForTests ctx(&_opCtx, kNss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, kNss.ns_forTest()); auto coll = ctx.getCollection(); // Get the RecordIds that would be returned by an in-order scan. @@ -612,7 +660,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekFai // Create plan stage. std::unique_ptr ws = std::make_unique(); std::unique_ptr ps = - std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr); + std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr); WorkingSetID id = WorkingSet::INVALID_ID; @@ -621,7 +669,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanResumeAfterRecordIdSeekFai } TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMax) { - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto collDeleter = createClusteredCollection(ns); AutoGetCollectionForRead autoColl(&_opCtx, ns); const CollectionPtr& coll = autoColl.getCollection(); @@ -641,7 +689,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMax) { params.maxRecord = RecordIdBound(recordIds[recordIds.size() - 1]); WorkingSet ws; - auto scan = std::make_unique(_expCtx.get(), coll, params, &ws, nullptr); + auto scan = std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr); // Expect to see all RecordIds. int count = 0; @@ -674,7 +722,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMaxBoundsDateT "scanDirection"_attr = (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD")); - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); // Create a clustered collection pre-populated with RecordIds generated from type // 'objectId'. @@ -701,7 +749,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMaxDateTypeMat "scanDirection"_attr = (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD")); - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); // Create a clustered collection pre-populated with RecordIds generated from type // 'objectId'. @@ -734,7 +782,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredIgnoreNumericReco "Running clustered collection scan test case", "scanDirection"_attr = (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD")); - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto scopedCollectionDeleter = createClusteredCollection(ns, false /* prePopulate */); int numOIDDocs = 20; @@ -770,7 +818,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMaxDateExclusi "scanDirection"_attr = (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD")); - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto scopedCollectionDeleter = createClusteredCollection(ns, false /* prePopulate */); @@ -803,7 +851,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMaxDateExclusi WorkingSet ws; auto scan = - std::make_unique(_expCtx.get(), coll, params, &ws, filter.get()); + std::make_unique(_expCtx.get(), &coll, params, &ws, filter.get()); int count = 0; while (!scan->isEOF()) { @@ -826,7 +874,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMaxDateExclusi } TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredReverse) { - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto collDeleter = createClusteredCollection(ns); AutoGetCollectionForRead autoColl(&_opCtx, ns); const CollectionPtr& coll = autoColl.getCollection(); @@ -848,7 +896,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredReverse) { params.maxRecord = RecordIdBound(recordIds[0]); WorkingSet ws; - auto scan = std::make_unique(_expCtx.get(), coll, params, &ws, nullptr); + auto scan = std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr); // Expect to see all RecordIds. int count = 0; @@ -870,7 +918,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredReverse) { } TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMaxFullObjectIdRange) { - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto collDeleter = createClusteredCollection(ns); AutoGetCollectionForRead autoColl(&_opCtx, ns); const CollectionPtr& coll = autoColl.getCollection(); @@ -892,7 +940,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMaxFullObjectI params.maxRecord = RecordIdBound(record_id_helpers::keyForOID(OID::max())); WorkingSet ws; - auto scan = std::make_unique(_expCtx.get(), coll, params, &ws, nullptr); + auto scan = std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr); // Expect to see all RecordIds. int count = 0; @@ -914,7 +962,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredMinMaxFullObjectI } TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRange) { - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto collDeleter = createClusteredCollection(ns); AutoGetCollectionForRead autoColl(&_opCtx, ns); const CollectionPtr& coll = autoColl.getCollection(); @@ -940,7 +988,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRange) { params.maxRecord = RecordIdBound(recordIds[endOffset]); WorkingSet ws; - auto scan = std::make_unique(_expCtx.get(), coll, params, &ws, nullptr); + auto scan = std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr); int count = 0; while (!scan->isEOF()) { @@ -963,7 +1011,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRange) { } TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRangeExclusiveFilter) { - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto collDeleter = createClusteredCollection(ns); AutoGetCollectionForRead autoColl(&_opCtx, ns); const CollectionPtr& coll = autoColl.getCollection(); @@ -1004,7 +1052,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRangeExclusi auto filter = std::move(swMatch.getValue()); WorkingSet ws; - auto scan = std::make_unique(_expCtx.get(), coll, params, &ws, filter.get()); + auto scan = std::make_unique(_expCtx.get(), &coll, params, &ws, filter.get()); // The expected range should not include the first or last records. std::vector expectedIds{recordIds.begin() + startOffset + 1, @@ -1028,7 +1076,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRangeExclusi } TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRangeExclusiveFilterReverse) { - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto collDeleter = createClusteredCollection(ns); AutoGetCollectionForRead autoColl(&_opCtx, ns); const CollectionPtr& coll = autoColl.getCollection(); @@ -1071,7 +1119,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInnerRangeExclusi auto filter = std::move(swMatch.getValue()); WorkingSet ws; - auto scan = std::make_unique(_expCtx.get(), coll, params, &ws, filter.get()); + auto scan = std::make_unique(_expCtx.get(), &coll, params, &ws, filter.get()); // The expected range should not include the first or last records. std::vector expectedIds{recordIds.begin() + startOffset + 1, @@ -1106,7 +1154,7 @@ TEST_F(QueryStageCollectionScanTest, "Running clustered collection scan test case", "scanDirection"_attr = (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD")); - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto scopedCollectionDeleter = createClusteredCollection(ns, false /* prePopulate */); std::vector docs{BSON("_id" << 0), @@ -1149,7 +1197,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInclusionBoundInc "Running clustered collection scan test case", "scanDirection"_attr = (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD")); - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto scopedCollectionDeleter = createClusteredCollection(ns, false /* prePopulate */); std::vector docs{BSON("_id" << 0), @@ -1193,7 +1241,7 @@ TEST_F(QueryStageCollectionScanTest, "Running clustered collection scan test case", "scanDirection"_attr = (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD")); - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto scopedCollectionDeleter = createClusteredCollection(ns, false /* prePopulate */); std::vector docs{BSON("_id" << 0), @@ -1236,7 +1284,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInclusionBoundYie "Running clustered collection scan test case", "scanDirection"_attr = (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD")); - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto scopedCollectionDeleter = createClusteredCollection(ns, false /* prePopulate */); std::vector docs{BSON("_id" << 0), @@ -1271,7 +1319,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInclusionBoundsOv "scanDirection"_attr = (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD")); - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto scopedCollectionDeleter = createClusteredCollection(ns, false /* prePopulate */); std::vector docs{BSON("_id" << 0), @@ -1325,7 +1373,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInclusionBoundsHa "scanDirection"_attr = (direction == CollectionScanParams::FORWARD ? "FORWARD" : "BACKWARD")); - auto ns = NamespaceString("a.b"); + auto ns = NamespaceString::createNamespaceString_forTest("a.b"); auto scopedCollectionDeleter = createClusteredCollection(ns, false /* prePopulate */); std::vector docs{BSON("_id" << 0), @@ -1356,6 +1404,9 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanClusteredInclusionBoundsHa // Tests behavior of getLatestOplogTimestamp() method when the scanned collection is the change // collection. TEST_F(QueryStageCollectionScanTest, QueryTestCollscanChangeCollectionGetLatestOplogTimestamp) { + // This test is disabled for the reasons outlined in SERVER-76288. + // TODO SERVER-76309 reenable this test. + return; // Setup the change collection. auto collectionName = NamespaceString::makeChangeCollectionNSS(boost::none); auto scopedCollectionDeleter = @@ -1374,7 +1425,7 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanChangeCollectionGetLatestO params.direction = CollectionScanParams::FORWARD; params.shouldTrackLatestOplogTimestamp = true; WorkingSet ws; - auto scanStage = std::make_unique(_expCtx.get(), coll, params, &ws, nullptr); + auto scanStage = std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr); WorkingSetID id = WorkingSet::INVALID_ID; auto advance = [&]() { PlanStage::StageState state; @@ -1393,5 +1444,49 @@ TEST_F(QueryStageCollectionScanTest, QueryTestCollscanChangeCollectionGetLatestO ASSERT_EQUALS(PlanStage::IS_EOF, state) << " state: " << PlanStage::stateStr(state); ASSERT_EQUALS(Timestamp(16, 1), scanStage->getLatestOplogTimestamp()); } + +// Tests behavior of getLatestOplogTimestamp() method when the scanned collection is the change +// collection. +TEST_F(QueryStageCollectionScanTest, QueryTestCollscanChangeCollectionLatestTimestampIsNotGlobal) { + // This test exercises the temporary behaviour introduced in SERVER-76288. + // TODO SERVER-76309 remove this test. + // Setup the change collection. + auto collectionName = NamespaceString::makeChangeCollectionNSS(boost::none); + auto scopedCollectionDeleter = + createClusteredCollection(collectionName, false /* prePopulate */); + insertDocument(collectionName, BSON("_id" << Timestamp(15, 5) << "ts" << Timestamp(15, 5))); + + // Set the read timestamp. + _opCtx.recoveryUnit()->setTimestampReadSource(RecoveryUnit::ReadSource::kProvided, + Timestamp(16, 1)); + + // Build the collection scan stage. + AutoGetCollectionForRead autoColl(&_opCtx, collectionName); + const CollectionPtr& coll = autoColl.getCollection(); + CollectionScanParams params; + params.tailable = true; + params.direction = CollectionScanParams::FORWARD; + params.shouldTrackLatestOplogTimestamp = true; + WorkingSet ws; + auto scanStage = std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr); + WorkingSetID id = WorkingSet::INVALID_ID; + auto advance = [&]() { + PlanStage::StageState state; + while ((state = scanStage->work(&id)) == PlanStage::NEED_TIME) { + }; + return state; + }; + + // Verify that the latest oplog timestamp is equal to the 'ts' field of the retrieved document. + auto state = advance(); + ASSERT_EQUALS(PlanStage::ADVANCED, state) << " state: " << PlanStage::stateStr(state); + ASSERT_EQUALS(Timestamp(15, 5), scanStage->getLatestOplogTimestamp()); + + // Verify that on EOF, the latest oplog timestamp is still equal to the ts of the last document + // retrieved, and has NOT advanced to the read timestamp. + state = advance(); + ASSERT_EQUALS(PlanStage::IS_EOF, state) << " state: " << PlanStage::stateStr(state); + ASSERT_EQUALS(Timestamp(15, 5), scanStage->getLatestOplogTimestamp()); +} } // namespace query_stage_collection_scan } // namespace mongo diff --git a/src/mongo/dbtests/query_stage_count.cpp b/src/mongo/dbtests/query_stage_count.cpp index 0f1a3e97a3a16..97acec11a2c80 100644 --- a/src/mongo/dbtests/query_stage_count.cpp +++ b/src/mongo/dbtests/query_stage_count.cpp @@ -27,28 +27,71 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" #include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" #include "mongo/db/exec/collection_scan.h" #include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/exec/count.h" #include "mongo/db/exec/index_scan.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/working_set.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/count_command_gen.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/record_id.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace QueryStageCount { const int kDocuments = 100; const int kInterjections = kDocuments; -const NamespaceString kTestNss = NamespaceString("db.dummy"); +const NamespaceString kTestNss = NamespaceString::createNamespaceString_forTest("db.dummy"); class CountStageTest { public: @@ -93,13 +136,13 @@ class CountStageTest { params.tailable = false; std::unique_ptr scan( - new CollectionScan(_expCtx.get(), _coll, params, &ws, nullptr)); + new CollectionScan(_expCtx.get(), &_coll, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); - verify(member->hasRecordId()); + MONGO_verify(member->hasRecordId()); _recordIds.push_back(member->recordId); } } @@ -132,7 +175,8 @@ class CountStageTest { Snapshotted(_opCtx.recoveryUnit()->getSnapshotId(), oldDoc), newDoc, collection_internal::kUpdateAllIndexes, - nullptr, + nullptr /* indexesAffected */, + nullptr /* opDebug */, &args); wunit.commit(); } @@ -216,14 +260,14 @@ class CountStageTest { params.direction = 1; // This child stage gets owned and freed by its parent CountStage - return new IndexScan(_expCtx.get(), _coll, params, ws, expr); + return new IndexScan(_expCtx.get(), &_coll, params, ws, expr); } CollectionScan* createCollScan(MatchExpression* expr, WorkingSet* ws) { CollectionScanParams params; // This child stage gets owned and freed by its parent CountStage - return new CollectionScan(_expCtx.get(), _coll, params, ws, expr); + return new CollectionScan(_expCtx.get(), &_coll, params, ws, expr); } static const char* ns() { @@ -231,7 +275,7 @@ class CountStageTest { } static NamespaceString nss() { - return NamespaceString(ns()); + return NamespaceString::createNamespaceString_forTest(ns()); } protected: @@ -247,7 +291,7 @@ class CountStageTest { class QueryStageCountNoChangeDuringYield : public CountStageTest { public: void run() { - CountCommandRequest request((NamespaceString(ns()))); + CountCommandRequest request((NamespaceString::createNamespaceString_forTest(ns()))); request.setQuery(BSON("x" << LT << kDocuments / 2)); testCount(request, kDocuments / 2); @@ -258,7 +302,7 @@ class QueryStageCountNoChangeDuringYield : public CountStageTest { class QueryStageCountYieldWithSkip : public CountStageTest { public: void run() { - CountCommandRequest request((NamespaceString(ns()))); + CountCommandRequest request((NamespaceString::createNamespaceString_forTest(ns()))); request.setQuery(BSON("x" << GTE << 0)); request.setSkip(2); @@ -270,7 +314,7 @@ class QueryStageCountYieldWithSkip : public CountStageTest { class QueryStageCountYieldWithLimit : public CountStageTest { public: void run() { - CountCommandRequest request((NamespaceString(ns()))); + CountCommandRequest request((NamespaceString::createNamespaceString_forTest(ns()))); request.setQuery(BSON("x" << GTE << 0)); request.setSkip(0); request.setLimit(2); @@ -284,7 +328,7 @@ class QueryStageCountYieldWithLimit : public CountStageTest { class QueryStageCountInsertDuringYield : public CountStageTest { public: void run() { - CountCommandRequest request((NamespaceString(ns()))); + CountCommandRequest request((NamespaceString::createNamespaceString_forTest(ns()))); request.setQuery(BSON("x" << 1)); testCount(request, kInterjections + 1); @@ -302,7 +346,7 @@ class QueryStageCountDeleteDuringYield : public CountStageTest { void run() { // expected count would be 99 but we delete the second record // after doing the first unit of work - CountCommandRequest request((NamespaceString(ns()))); + CountCommandRequest request((NamespaceString::createNamespaceString_forTest(ns()))); request.setQuery(BSON("x" << GTE << 1)); testCount(request, kDocuments - 2); @@ -326,7 +370,7 @@ class QueryStageCountDeleteDuringYield : public CountStageTest { class QueryStageCountUpdateDuringYield : public CountStageTest { public: void run() { - CountCommandRequest request((NamespaceString(ns()))); + CountCommandRequest request((NamespaceString::createNamespaceString_forTest(ns()))); request.setQuery(BSON("x" << GTE << 2)); // We call 'interject' after first unit of work that skips the first document, so it is @@ -353,7 +397,7 @@ class QueryStageCountUpdateDuringYield : public CountStageTest { class QueryStageCountMultiKeyDuringYield : public CountStageTest { public: void run() { - CountCommandRequest request((NamespaceString(ns()))); + CountCommandRequest request((NamespaceString::createNamespaceString_forTest(ns()))); request.setQuery(BSON("x" << 1)); testCount(request, kDocuments + 1, true); // only applies to indexed case } diff --git a/src/mongo/dbtests/query_stage_count_scan.cpp b/src/mongo/dbtests/query_stage_count_scan.cpp index 0b57fcefd088c..dd107041f4085 100644 --- a/src/mongo/dbtests/query_stage_count_scan.cpp +++ b/src/mongo/dbtests/query_stage_count_scan.cpp @@ -28,22 +28,35 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/client/dbclient_cursor.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/exec/collection_scan.h" #include "mongo/db/exec/count_scan.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/working_set.h" -#include "mongo/db/json.h" -#include "mongo/db/matcher/expression_parser.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/util/fail_point.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/service_context.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace QueryStageCountScan { @@ -54,12 +67,12 @@ class CountBase { CountBase() : _client(&_opCtx) {} virtual ~CountBase() { - dbtests::WriteContextForTests ctx(&_opCtx, ns().ns()); + dbtests::WriteContextForTests ctx(&_opCtx, ns().ns_forTest()); _client.dropCollection(ns()); } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_opCtx, ns().ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, ns().ns_forTest(), obj)); } void insert(const BSONObj& obj) { @@ -108,7 +121,8 @@ class CountBase { } static NamespaceString ns() { - return {"unittests", "QueryStageCountScanScan"}; + return NamespaceString::createNamespaceString_forTest("unittests", + "QueryStageCountScanScan"); } protected: @@ -129,7 +143,7 @@ class CountBase { class QueryStageCountScanDups : public CountBase { public: void run() { - dbtests::WriteContextForTests ctx(&_opCtx, ns().ns()); + dbtests::WriteContextForTests ctx(&_opCtx, ns().ns_forTest()); // Insert some docs insert(BSON("a" << BSON_ARRAY(5 << 7))); @@ -148,7 +162,7 @@ class QueryStageCountScanDups : public CountBase { params.endKeyInclusive = true; WorkingSet ws; - CountScan count(_expCtx.get(), coll, params, &ws); + CountScan count(_expCtx.get(), &coll, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(2, numCounted); @@ -161,7 +175,7 @@ class QueryStageCountScanDups : public CountBase { class QueryStageCountScanInclusiveBounds : public CountBase { public: void run() { - dbtests::WriteContextForTests ctx(&_opCtx, ns().ns()); + dbtests::WriteContextForTests ctx(&_opCtx, ns().ns_forTest()); // Insert some docs for (int i = 0; i < 10; ++i) { @@ -181,7 +195,7 @@ class QueryStageCountScanInclusiveBounds : public CountBase { params.endKeyInclusive = true; WorkingSet ws; - CountScan count(_expCtx.get(), coll, params, &ws); + CountScan count(_expCtx.get(), &coll, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(5, numCounted); @@ -194,7 +208,7 @@ class QueryStageCountScanInclusiveBounds : public CountBase { class QueryStageCountScanExclusiveBounds : public CountBase { public: void run() { - dbtests::WriteContextForTests ctx(&_opCtx, ns().ns()); + dbtests::WriteContextForTests ctx(&_opCtx, ns().ns_forTest()); // Insert some docs for (int i = 0; i < 10; ++i) { @@ -214,7 +228,7 @@ class QueryStageCountScanExclusiveBounds : public CountBase { params.endKeyInclusive = false; WorkingSet ws; - CountScan count(_expCtx.get(), coll, params, &ws); + CountScan count(_expCtx.get(), &coll, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(3, numCounted); @@ -227,7 +241,7 @@ class QueryStageCountScanExclusiveBounds : public CountBase { class QueryStageCountScanLowerBound : public CountBase { public: void run() { - dbtests::WriteContextForTests ctx(&_opCtx, ns().ns()); + dbtests::WriteContextForTests ctx(&_opCtx, ns().ns_forTest()); // Insert doc, add index insert(BSON("a" << 2)); @@ -243,7 +257,7 @@ class QueryStageCountScanLowerBound : public CountBase { params.endKeyInclusive = false; WorkingSet ws; - CountScan count(_expCtx.get(), coll, params, &ws); + CountScan count(_expCtx.get(), &coll, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(0, numCounted); @@ -256,7 +270,7 @@ class QueryStageCountScanLowerBound : public CountBase { class QueryStageCountScanNothingInInterval : public CountBase { public: void run() { - dbtests::WriteContextForTests ctx(&_opCtx, ns().ns()); + dbtests::WriteContextForTests ctx(&_opCtx, ns().ns_forTest()); // Insert documents, add index insert(BSON("a" << 2)); @@ -273,7 +287,7 @@ class QueryStageCountScanNothingInInterval : public CountBase { params.endKeyInclusive = false; WorkingSet ws; - CountScan count(_expCtx.get(), coll, params, &ws); + CountScan count(_expCtx.get(), &coll, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(0, numCounted); @@ -287,7 +301,7 @@ class QueryStageCountScanNothingInInterval : public CountBase { class QueryStageCountScanNothingInIntervalFirstMatchTooHigh : public CountBase { public: void run() { - dbtests::WriteContextForTests ctx(&_opCtx, ns().ns()); + dbtests::WriteContextForTests ctx(&_opCtx, ns().ns_forTest()); // Insert some documents, add index insert(BSON("a" << 2)); @@ -304,7 +318,7 @@ class QueryStageCountScanNothingInIntervalFirstMatchTooHigh : public CountBase { params.endKeyInclusive = true; WorkingSet ws; - CountScan count(_expCtx.get(), coll, params, &ws); + CountScan count(_expCtx.get(), &coll, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(0, numCounted); @@ -318,7 +332,7 @@ class QueryStageCountScanNothingInIntervalFirstMatchTooHigh : public CountBase { class QueryStageCountScanNoChangeDuringYield : public CountBase { public: void run() { - dbtests::WriteContextForTests ctx(&_opCtx, ns().ns()); + dbtests::WriteContextForTests ctx(&_opCtx, ns().ns_forTest()); // Insert documents, add index for (int i = 0; i < 10; ++i) { @@ -336,7 +350,7 @@ class QueryStageCountScanNoChangeDuringYield : public CountBase { params.endKeyInclusive = true; WorkingSet ws; - CountScan count(_expCtx.get(), coll, params, &ws); + CountScan count(_expCtx.get(), &coll, params, &ws); WorkingSetID wsid; int numCounted = 0; @@ -374,7 +388,7 @@ class QueryStageCountScanNoChangeDuringYield : public CountBase { class QueryStageCountScanDeleteDuringYield : public CountBase { public: void run() { - dbtests::WriteContextForTests ctx(&_opCtx, ns().ns()); + dbtests::WriteContextForTests ctx(&_opCtx, ns().ns_forTest()); // Insert documents, add index for (int i = 0; i < 10; ++i) { @@ -392,7 +406,7 @@ class QueryStageCountScanDeleteDuringYield : public CountBase { params.endKeyInclusive = true; WorkingSet ws; - CountScan count(_expCtx.get(), coll, params, &ws); + CountScan count(_expCtx.get(), &coll, params, &ws); WorkingSetID wsid; int numCounted = 0; @@ -433,7 +447,7 @@ class QueryStageCountScanDeleteDuringYield : public CountBase { class QueryStageCountScanInsertNewDocsDuringYield : public CountBase { public: void run() { - dbtests::WriteContextForTests ctx(&_opCtx, ns().ns()); + dbtests::WriteContextForTests ctx(&_opCtx, ns().ns_forTest()); // Insert documents, add index for (int i = 0; i < 10; ++i) { @@ -451,7 +465,7 @@ class QueryStageCountScanInsertNewDocsDuringYield : public CountBase { params.endKeyInclusive = true; WorkingSet ws; - CountScan count(_expCtx.get(), coll, params, &ws); + CountScan count(_expCtx.get(), &coll, params, &ws); WorkingSetID wsid; int numCounted = 0; @@ -494,7 +508,7 @@ class QueryStageCountScanInsertNewDocsDuringYield : public CountBase { class QueryStageCountScanUnusedKeys : public CountBase { public: void run() { - dbtests::WriteContextForTests ctx(&_opCtx, ns().ns()); + dbtests::WriteContextForTests ctx(&_opCtx, ns().ns_forTest()); // Insert docs, add index for (int i = 0; i < 10; ++i) { @@ -517,7 +531,7 @@ class QueryStageCountScanUnusedKeys : public CountBase { params.endKeyInclusive = true; WorkingSet ws; - CountScan count(_expCtx.get(), coll, params, &ws); + CountScan count(_expCtx.get(), &coll, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(7, numCounted); @@ -530,7 +544,7 @@ class QueryStageCountScanUnusedKeys : public CountBase { class QueryStageCountScanUnusedEndKey : public CountBase { public: void run() { - dbtests::WriteContextForTests ctx(&_opCtx, ns().ns()); + dbtests::WriteContextForTests ctx(&_opCtx, ns().ns_forTest()); // Insert docs, add index for (int i = 0; i < 10; ++i) { @@ -551,7 +565,7 @@ class QueryStageCountScanUnusedEndKey : public CountBase { params.endKeyInclusive = true; // yes? WorkingSet ws; - CountScan count(_expCtx.get(), coll, params, &ws); + CountScan count(_expCtx.get(), &coll, params, &ws); int numCounted = runCount(&count); ASSERT_EQUALS(9, numCounted); @@ -564,7 +578,7 @@ class QueryStageCountScanUnusedEndKey : public CountBase { class QueryStageCountScanKeyBecomesUnusedDuringYield : public CountBase { public: void run() { - dbtests::WriteContextForTests ctx(&_opCtx, ns().ns()); + dbtests::WriteContextForTests ctx(&_opCtx, ns().ns_forTest()); // Insert documents, add index for (int i = 0; i < 10; ++i) { @@ -582,7 +596,7 @@ class QueryStageCountScanKeyBecomesUnusedDuringYield : public CountBase { params.endKeyInclusive = true; WorkingSet ws; - CountScan count(_expCtx.get(), coll, params, &ws); + CountScan count(_expCtx.get(), &coll, params, &ws); WorkingSetID wsid; int numCounted = 0; diff --git a/src/mongo/dbtests/query_stage_delete.cpp b/src/mongo/dbtests/query_stage_delete.cpp index f0b76d0c31918..dcb912182ba10 100644 --- a/src/mongo/dbtests/query_stage_delete.cpp +++ b/src/mongo/dbtests/query_stage_delete.cpp @@ -27,24 +27,53 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/collection_scan.h" +#include "mongo/db/exec/collection_scan_common.h" #include "mongo/db/exec/delete_stage.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/queued_data_stage.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/record_id.h" #include "mongo/db/service_context.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace QueryStageDelete { -static const NamespaceString nss("unittests.QueryStageDelete"); +static const NamespaceString nss = + NamespaceString::createNamespaceString_forTest("unittests.QueryStageDelete"); // // Stage-specific tests. @@ -53,7 +82,7 @@ static const NamespaceString nss("unittests.QueryStageDelete"); class QueryStageDeleteBase { public: QueryStageDeleteBase() : _client(&_opCtx) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); for (size_t i = 0; i < numObj(); ++i) { BSONObjBuilder bob; @@ -64,7 +93,7 @@ class QueryStageDeleteBase { } virtual ~QueryStageDeleteBase() { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); _client.dropCollection(nss); } @@ -82,13 +111,13 @@ class QueryStageDeleteBase { params.tailable = false; std::unique_ptr scan( - new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr)); + new CollectionScan(_expCtx.get(), &collection, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); - verify(member->hasRecordId()); + MONGO_verify(member->hasRecordId()); out->push_back(member->recordId); } } @@ -122,14 +151,16 @@ class QueryStageDeleteBase { class QueryStageDeleteUpcomingObjectWasDeleted : public QueryStageDeleteBase { public: void run() { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + const auto coll = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); + ASSERT(coll.exists()); // Get the RecordIds that would be returned by an in-order scan. std::vector recordIds; - getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds); + getRecordIds(coll.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds); // Configure the scan. CollectionScanParams collScanParams; @@ -160,10 +191,11 @@ class QueryStageDeleteUpcomingObjectWasDeleted : public QueryStageDeleteBase { // Remove recordIds[targetDocIndex]; static_cast(&deleteStage)->saveState(); - BSONObj targetDoc = coll->docFor(&_opCtx, recordIds[targetDocIndex]).value(); + BSONObj targetDoc = + coll.getCollectionPtr()->docFor(&_opCtx, recordIds[targetDocIndex]).value(); ASSERT(!targetDoc.isEmpty()); remove(targetDoc); - static_cast(&deleteStage)->restoreState(&coll); + static_cast(&deleteStage)->restoreState(&coll.getCollectionPtr()); // Remove the rest. while (!deleteStage.isEOF()) { @@ -184,9 +216,12 @@ class QueryStageDeleteReturnOldDoc : public QueryStageDeleteBase { public: void run() { // Various variables we'll need. - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); + const auto coll = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + + ASSERT(coll.exists()); const int targetDocIndex = 0; const BSONObj query = BSON("foo" << BSON("$gte" << targetDocIndex)); const auto ws = std::make_unique(); @@ -194,7 +229,7 @@ class QueryStageDeleteReturnOldDoc : public QueryStageDeleteBase { // Get the RecordIds that would be returned by an in-order scan. std::vector recordIds; - getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds); + getRecordIds(coll.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds); // Configure a QueuedDataStage to pass the first object in the collection back in a // RID_AND_OBJ state. diff --git a/src/mongo/dbtests/query_stage_distinct.cpp b/src/mongo/dbtests/query_stage_distinct.cpp index 377f0f5030061..b3723c37b16a7 100644 --- a/src/mongo/dbtests/query_stage_distinct.cpp +++ b/src/mongo/dbtests/query_stage_distinct.cpp @@ -27,22 +27,41 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/client/dbclient_cursor.h" +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/distinct_scan.h" #include "mongo/db/exec/plan_stage.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/index/index_descriptor.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/interval.h" #include "mongo/db/query/plan_executor.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/service_context.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" /** * This file tests db/exec/distinct.cpp @@ -50,7 +69,8 @@ namespace QueryStageDistinct { -static const NamespaceString nss{"unittests.QueryStageDistinct"}; +static const NamespaceString nss = + NamespaceString::createNamespaceString_forTest("unittests.QueryStageDistinct"); class DistinctBase { public: @@ -62,7 +82,7 @@ class DistinctBase { } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns(), obj)); + ASSERT_OK(dbtests::createIndex(&_opCtx, nss.ns_forTest(), obj)); } void insert(const BSONObj& obj) { @@ -145,7 +165,7 @@ class QueryStageDistinctBasic : public DistinctBase { params.bounds.fields.push_back(oil); WorkingSet ws; - DistinctScan distinct(_expCtx.get(), coll, std::move(params), &ws); + DistinctScan distinct(_expCtx.get(), &coll, std::move(params), &ws); WorkingSetID wsid; // Get our first result. @@ -199,7 +219,7 @@ class QueryStageDistinctMultiKey : public DistinctBase { std::vector indexes; coll->getIndexCatalog()->findIndexesByKeyPattern( &_opCtx, BSON("a" << 1), IndexCatalog::InclusionPolicy::kReady, &indexes); - verify(indexes.size() == 1); + MONGO_verify(indexes.size() == 1); DistinctParams params{&_opCtx, coll, indexes[0]}; ASSERT_TRUE(params.isMultiKey); @@ -214,7 +234,7 @@ class QueryStageDistinctMultiKey : public DistinctBase { params.bounds.fields.push_back(oil); WorkingSet ws; - DistinctScan distinct(_expCtx.get(), coll, std::move(params), &ws); + DistinctScan distinct(_expCtx.get(), &coll, std::move(params), &ws); // We should see each number in the range [1, 6] exactly once. std::set seen; @@ -283,7 +303,7 @@ class QueryStageDistinctCompoundIndex : public DistinctBase { params.bounds.fields.push_back(bOil); WorkingSet ws; - DistinctScan distinct(_expCtx.get(), coll, std::move(params), &ws); + DistinctScan distinct(_expCtx.get(), &coll, std::move(params), &ws); WorkingSetID wsid; PlanStage::StageState state; diff --git a/src/mongo/dbtests/query_stage_fetch.cpp b/src/mongo/dbtests/query_stage_fetch.cpp index 9f55b46f1d8b6..aa6c2931254d5 100644 --- a/src/mongo/dbtests/query_stage_fetch.cpp +++ b/src/mongo/dbtests/query_stage_fetch.cpp @@ -31,22 +31,49 @@ * This file tests db/exec/fetch.cpp. Fetch goes to disk so we cannot test outside of a dbtest. */ -#include "mongo/platform/basic.h" - +#include #include - -#include "mongo/client/dbclient_cursor.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/database.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/fetch.h" #include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/queued_data_stage.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace QueryStageFetch { @@ -81,7 +108,7 @@ class QueryStageFetchBase { return "unittests.QueryStageFetch"; } static NamespaceString nss() { - return NamespaceString(ns()); + return NamespaceString::createNamespaceString_forTest(ns()); } protected: @@ -143,7 +170,7 @@ class FetchStageAlreadyFetched : public QueryStageFetchBase { } auto fetchStage = - std::make_unique(_expCtx.get(), &ws, std::move(mockStage), nullptr, coll); + std::make_unique(_expCtx.get(), &ws, std::move(mockStage), nullptr, &coll); WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state; @@ -205,12 +232,12 @@ class FetchStageFilter : public QueryStageFetchBase { BSONObj filterObj = BSON("foo" << 6); StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(filterObj, _expCtx); - verify(statusWithMatcher.isOK()); + MONGO_verify(statusWithMatcher.isOK()); unique_ptr filterExpr = std::move(statusWithMatcher.getValue()); // Matcher requires that foo==6 but we only have data with foo==5. auto fetchStage = std::make_unique( - _expCtx.get(), &ws, std::move(mockStage), filterExpr.get(), coll); + _expCtx.get(), &ws, std::move(mockStage), filterExpr.get(), &coll); // First call should return a fetch request as it's not in memory. WorkingSetID id = WorkingSet::INVALID_ID; diff --git a/src/mongo/dbtests/query_stage_ixscan.cpp b/src/mongo/dbtests/query_stage_ixscan.cpp index 6cde679ae98d2..d5e281fef10ad 100644 --- a/src/mongo/dbtests/query_stage_ixscan.cpp +++ b/src/mongo/dbtests/query_stage_ixscan.cpp @@ -27,16 +27,46 @@ * it in the license file. */ +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/client/dbclient_base.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" #include "mongo/db/exec/index_scan.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/working_set.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/interval.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace QueryStageIxscan { @@ -112,7 +142,7 @@ class IndexScanTest { // This child stage gets owned and freed by the caller. MatchExpression* filter = nullptr; - return new IndexScan(_expCtx.get(), _collPtr, params, &_ws, filter); + return new IndexScan(_expCtx.get(), &_collPtr, params, &_ws, filter); } IndexScan* createIndexScan(BSONObj startKey, @@ -137,14 +167,14 @@ class IndexScanTest { params.bounds.fields.push_back(oil); MatchExpression* filter = nullptr; - return new IndexScan(_expCtx.get(), _collPtr, params, &_ws, filter); + return new IndexScan(_expCtx.get(), &_collPtr, params, &_ws, filter); } static const char* ns() { return "unittest.QueryStageIxscan"; } static NamespaceString nss() { - return NamespaceString(ns()); + return NamespaceString::createNamespaceString_forTest(ns()); } protected: diff --git a/src/mongo/dbtests/query_stage_limit_skip.cpp b/src/mongo/dbtests/query_stage_limit_skip.cpp index 8b1656a2c5b00..187a3134578eb 100644 --- a/src/mongo/dbtests/query_stage_limit_skip.cpp +++ b/src/mongo/dbtests/query_stage_limit_skip.cpp @@ -32,18 +32,34 @@ */ -#include "mongo/platform/basic.h" - +#include #include +#include +#include + +#include -#include "mongo/client/dbclient_cursor.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/limit.h" #include "mongo/db/exec/mock_stage.h" #include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/skip.h" -#include "mongo/db/json.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" using namespace mongo; @@ -98,7 +114,9 @@ class QueryStageLimitSkipBasicTest { public: void run() { const boost::intrusive_ptr expCtx(make_intrusive( - _opCtx, std::unique_ptr(nullptr), NamespaceString("test.dummyNS"))); + _opCtx, + std::unique_ptr(nullptr), + NamespaceString::createNamespaceString_forTest("test.dummyNS"))); for (int i = 0; i < 2 * N; ++i) { WorkingSet ws; diff --git a/src/mongo/dbtests/query_stage_merge_sort.cpp b/src/mongo/dbtests/query_stage_merge_sort.cpp index c328209b1258b..0b4e4c5e906c5 100644 --- a/src/mongo/dbtests/query_stage_merge_sort.cpp +++ b/src/mongo/dbtests/query_stage_merge_sort.cpp @@ -27,27 +27,55 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include #include - -#include "mongo/client/dbclient_cursor.h" +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/fetch.h" #include "mongo/db/exec/index_scan.h" #include "mongo/db/exec/merge_sort.h" #include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/working_set.h" #include "mongo/db/exec/working_set_common.h" -#include "mongo/db/json.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_planner_params.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" /** * This file tests db/exec/merge_sort.cpp @@ -130,7 +158,7 @@ class QueryStageMergeSortTestBase { } static NamespaceString nss() { - return NamespaceString(ns()); + return NamespaceString::createNamespaceString_forTest(ns()); } protected: @@ -179,14 +207,14 @@ class QueryStageMergeSortPrefixIndex : public QueryStageMergeSortTestBase { // a:1 auto params = makeIndexScanParams(&_opCtx, coll, getIndex(firstIndex, coll)); - ms->addChild(std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr)); + ms->addChild(std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, coll, getIndex(secondIndex, coll)); - ms->addChild(std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr)); + ms->addChild(std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr)); unique_ptr fetchStage = - make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll); + make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, &coll); // Must fetch if we want to easily pull out an obj. auto statusWithPlanExecutor = plan_executor_factory::make(_expCtx, @@ -248,13 +276,13 @@ class QueryStageMergeSortDups : public QueryStageMergeSortTestBase { // a:1 auto params = makeIndexScanParams(&_opCtx, coll, getIndex(firstIndex, coll)); - ms->addChild(std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr)); + ms->addChild(std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, coll, getIndex(secondIndex, coll)); - ms->addChild(std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr)); + ms->addChild(std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr)); unique_ptr fetchStage = - make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll); + make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, &coll); auto statusWithPlanExecutor = plan_executor_factory::make(_expCtx, @@ -316,13 +344,13 @@ class QueryStageMergeSortDupsNoDedup : public QueryStageMergeSortTestBase { // a:1 auto params = makeIndexScanParams(&_opCtx, coll, getIndex(firstIndex, coll)); - ms->addChild(std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr)); + ms->addChild(std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, coll, getIndex(secondIndex, coll)); - ms->addChild(std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr)); + ms->addChild(std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr)); unique_ptr fetchStage = - make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll); + make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, &coll); auto statusWithPlanExecutor = plan_executor_factory::make(_expCtx, @@ -388,15 +416,15 @@ class QueryStageMergeSortPrefixIndexReverse : public QueryStageMergeSortTestBase auto params = makeIndexScanParams(&_opCtx, coll, getIndex(firstIndex, coll)); params.bounds.startKey = objWithMaxKey(1); params.bounds.endKey = objWithMinKey(1); - ms->addChild(std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr)); + ms->addChild(std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, coll, getIndex(secondIndex, coll)); params.bounds.startKey = objWithMaxKey(1); params.bounds.endKey = objWithMinKey(1); - ms->addChild(std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr)); + ms->addChild(std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr)); unique_ptr fetchStage = - make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll); + make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, &coll); auto statusWithPlanExecutor = plan_executor_factory::make(_expCtx, @@ -458,15 +486,15 @@ class QueryStageMergeSortOneStageEOF : public QueryStageMergeSortTestBase { // a:1 auto params = makeIndexScanParams(&_opCtx, coll, getIndex(firstIndex, coll)); - ms->addChild(std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr)); + ms->addChild(std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr)); // b:51 (EOF) params = makeIndexScanParams(&_opCtx, coll, getIndex(secondIndex, coll)); params.bounds.startKey = BSON("" << 51 << "" << MinKey); params.bounds.endKey = BSON("" << 51 << "" << MaxKey); - ms->addChild(std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr)); + ms->addChild(std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr)); unique_ptr fetchStage = - make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll); + make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, &coll); auto statusWithPlanExecutor = plan_executor_factory::make(_expCtx, @@ -524,10 +552,10 @@ class QueryStageMergeSortManyShort : public QueryStageMergeSortTestBase { for (int i = 0; i < numIndices; ++i) { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(indexSpec[i], coll)); ms->addChild( - std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr)); + std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr)); } unique_ptr fetchStage = - make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll); + make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, &coll); auto statusWithPlanExecutor = plan_executor_factory::make(_expCtx, @@ -586,7 +614,7 @@ class QueryStageMergeSortDeletedDocument : public QueryStageMergeSortTestBase { for (int i = 0; i < numIndices; ++i) { auto params = makeIndexScanParams(&_opCtx, coll, getIndex(indexSpec[i], ctx.getCollection())); - ms->addChild(std::make_unique(_expCtx.get(), coll, params, &ws, nullptr)); + ms->addChild(std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr)); } set recordIds; @@ -644,7 +672,7 @@ class QueryStageMergeSortDeletedDocument : public QueryStageMergeSortTestBase { // An attempt to fetch the WSM should show that the key is no longer present in the // index. - NamespaceString fakeNS("test", "coll"); + NamespaceString fakeNS = NamespaceString::createNamespaceString_forTest("test", "coll"); ASSERT_FALSE(WorkingSetCommon::fetch( &_opCtx, &ws, id, coll->getCursor(&_opCtx).get(), coll, fakeNS)); @@ -712,9 +740,9 @@ class QueryStageMergeSortConcurrentUpdateDedup : public QueryStageMergeSortTestB auto fetchStage = std::make_unique( _expCtx.get(), &ws, - std::make_unique(_expCtx.get(), coll, params, &ws, nullptr), + std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr), nullptr, - coll); + &coll); ms->addChild(std::move(fetchStage)); } @@ -726,9 +754,9 @@ class QueryStageMergeSortConcurrentUpdateDedup : public QueryStageMergeSortTestB auto fetchStage = std::make_unique( _expCtx.get(), &ws, - std::make_unique(_expCtx.get(), coll, params, &ws, nullptr), + std::make_unique(_expCtx.get(), &coll, params, &ws, nullptr), nullptr, - coll); + &coll); ms->addChild(std::move(fetchStage)); } @@ -812,14 +840,14 @@ class QueryStageMergeSortStringsWithNullCollation : public QueryStageMergeSortTe // a:1 auto params = makeIndexScanParams(&_opCtx, coll, getIndex(firstIndex, coll)); - ms->addChild(std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr)); + ms->addChild(std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr)); // b:1 params = makeIndexScanParams(&_opCtx, coll, getIndex(secondIndex, coll)); - ms->addChild(std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr)); + ms->addChild(std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr)); auto fetchStage = - make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, coll); + make_unique(_expCtx.get(), ws.get(), std::move(ms), nullptr, &coll); // Must fetch if we want to easily pull out an obj. auto statusWithPlanExecutor = plan_executor_factory::make(_expCtx, @@ -884,19 +912,19 @@ class QueryStageMergeSortStringsRespectsCollation : public QueryStageMergeSortTe // a:1 auto params = makeIndexScanParams(&_opCtx, coll, getIndex(firstIndex, coll)); - auto idxScan = std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr); + auto idxScan = std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr); // Wrap 'idxScan' with a FETCH stage so a document is fetched and MERGE_SORT is forced to // use the provided collator 'collator'. Also, this permits easier retrieval of result // objects in the result verification code. ms->addChild( - make_unique(_expCtx.get(), ws.get(), std::move(idxScan), nullptr, coll)); + make_unique(_expCtx.get(), ws.get(), std::move(idxScan), nullptr, &coll)); // b:1 params = makeIndexScanParams(&_opCtx, coll, getIndex(secondIndex, coll)); - idxScan = std::make_unique(_expCtx.get(), coll, params, ws.get(), nullptr); + idxScan = std::make_unique(_expCtx.get(), &coll, params, ws.get(), nullptr); ms->addChild( - make_unique(_expCtx.get(), ws.get(), std::move(idxScan), nullptr, coll)); + make_unique(_expCtx.get(), ws.get(), std::move(idxScan), nullptr, &coll)); auto statusWithPlanExecutor = plan_executor_factory::make(_expCtx, diff --git a/src/mongo/dbtests/query_stage_multiplan.cpp b/src/mongo/dbtests/query_stage_multiplan.cpp index 42a1488093765..f6e65c2da95e9 100644 --- a/src/mongo/dbtests/query_stage_multiplan.cpp +++ b/src/mongo/dbtests/query_stage_multiplan.cpp @@ -27,36 +27,87 @@ * it in the license file. */ +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/collection_scan.h" +#include "mongo/db/exec/collection_scan_common.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/fetch.h" #include "mongo/db/exec/index_scan.h" #include "mongo/db/exec/mock_stage.h" #include "mongo/db/exec/multi_plan.h" +#include "mongo/db/exec/plan_cache_util.h" #include "mongo/db/exec/plan_stage.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/plan_stats.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/classic_plan_cache.h" #include "mongo/db/query/collection_query_info.h" +#include "mongo/db/query/explain.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/get_executor.h" +#include "mongo/db/query/index_bounds.h" #include "mongo/db/query/mock_yield_policies.h" +#include "mongo/db/query/plan_cache.h" #include "mongo/db/query/plan_cache_key_factory.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" #include "mongo/db/query/plan_executor_impl.h" +#include "mongo/db/query/plan_explainer.h" #include "mongo/db/query/plan_summary_stats.h" -#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner.h" +#include "mongo/db/query/query_planner_params.h" #include "mongo/db/query/query_planner_test_lib.h" +#include "mongo/db/query/query_solution.h" #include "mongo/db/query/stage_builder_util.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/serialization_context.h" namespace mongo { @@ -72,7 +123,8 @@ namespace { using std::unique_ptr; using std::vector; -static const NamespaceString nss("unittests.QueryStageMultiPlan"); +static const NamespaceString nss = + NamespaceString::createNamespaceString_forTest("unittests.QueryStageMultiPlan"); std::unique_ptr createQuerySolution() { auto soln = std::make_unique(); @@ -85,26 +137,26 @@ std::unique_ptr createQuerySolution() { class QueryStageMultiPlanTest : public unittest::Test { public: QueryStageMultiPlanTest() : _client(_opCtx.get()) { - dbtests::WriteContextForTests ctx(_opCtx.get(), nss.ns()); + dbtests::WriteContextForTests ctx(_opCtx.get(), nss.ns_forTest()); _client.dropCollection(nss); } virtual ~QueryStageMultiPlanTest() { - dbtests::WriteContextForTests ctx(_opCtx.get(), nss.ns()); + dbtests::WriteContextForTests ctx(_opCtx.get(), nss.ns_forTest()); _client.dropCollection(nss); } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(_opCtx.get(), nss.ns(), obj)); + ASSERT_OK(dbtests::createIndex(_opCtx.get(), nss.ns_forTest(), obj)); } void insert(const BSONObj& obj) { - dbtests::WriteContextForTests ctx(_opCtx.get(), nss.ns()); + dbtests::WriteContextForTests ctx(_opCtx.get(), nss.ns_forTest()); _client.insert(nss, obj); } void remove(const BSONObj& obj) { - dbtests::WriteContextForTests ctx(_opCtx.get(), nss.ns()); + dbtests::WriteContextForTests ctx(_opCtx.get(), nss.ns_forTest()); _client.remove(nss, obj); } @@ -154,8 +206,8 @@ unique_ptr getIxScanPlan(ExpressionContext* expCtx, ixparams.bounds.boundInclusion = BoundInclusion::kIncludeBothStartAndEndKeys; ixparams.direction = 1; - auto ixscan = std::make_unique(expCtx, coll, ixparams, sharedWs, nullptr); - return std::make_unique(expCtx, sharedWs, std::move(ixscan), nullptr, coll); + auto ixscan = std::make_unique(expCtx, &coll, ixparams, sharedWs, nullptr); + return std::make_unique(expCtx, sharedWs, std::move(ixscan), nullptr, &coll); } unique_ptr makeMatchExpressionFromFilter(ExpressionContext* expCtx, @@ -175,7 +227,7 @@ unique_ptr getCollScanPlan(ExpressionContext* expCtx, CollectionScanParams csparams; csparams.direction = CollectionScanParams::FORWARD; - unique_ptr root(new CollectionScan(expCtx, coll, csparams, sharedWs, matchExpr)); + unique_ptr root(new CollectionScan(expCtx, &coll, csparams, sharedWs, matchExpr)); return root; } @@ -205,12 +257,13 @@ std::unique_ptr runMultiPlanner(ExpressionContext* expCtx, // Hand the plans off to the MPS. auto cq = makeCanonicalQuery(expCtx->opCtx, nss, BSON("foo" << desiredFooValue)); - unique_ptr mps = std::make_unique(expCtx, coll, cq.get()); + unique_ptr mps = std::make_unique(expCtx, &coll, cq.get()); mps->addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get()); mps->addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get()); // Plan 0 aka the first plan aka the index scan should be the best. - NoopYieldPolicy yieldPolicy(expCtx->opCtx->getServiceContext()->getFastClockSource()); + NoopYieldPolicy yieldPolicy(expCtx->opCtx, + expCtx->opCtx->getServiceContext()->getFastClockSource()); ASSERT_OK(mps->pickBestPlan(&yieldPolicy)); ASSERT(mps->bestPlanChosen()); ASSERT_EQUALS(getBestPlanRoot(mps.get()), ixScanRootPtr); @@ -258,7 +311,7 @@ TEST_F(QueryStageMultiPlanTest, MPSCollectionScanVsHighlySelectiveIXScan) { auto cq = makeCanonicalQuery(_opCtx.get(), nss, filterObj); unique_ptr mps = - std::make_unique(_expCtx.get(), ctx.getCollection(), cq.get()); + std::make_unique(_expCtx.get(), &ctx.getCollection(), cq.get()); mps->addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get()); mps->addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get()); @@ -392,7 +445,7 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) { findCommand->setFilter(BSON("a" << 1 << "b" << 1)); findCommand->setSort(BSON("b" << 1)); auto statusWithCQ = CanonicalQuery::canonicalize(opCtx(), std::move(findCommand)); - verify(statusWithCQ.isOK()); + MONGO_verify(statusWithCQ.isOK()); unique_ptr cq = std::move(statusWithCQ.getValue()); ASSERT(nullptr != cq.get()); auto key = plan_cache_key_factory::make(*cq, collection.getCollection()); @@ -416,17 +469,17 @@ TEST_F(QueryStageMultiPlanTest, MPSBackupPlan) { // Fill out the MultiPlanStage. unique_ptr mps( - new MultiPlanStage(_expCtx.get(), collection.getCollection(), cq.get())); + new MultiPlanStage(_expCtx.get(), &collection.getCollection(), cq.get())); unique_ptr ws(new WorkingSet()); // Put each solution from the planner into the MPR. for (size_t i = 0; i < solutions.size(); ++i) { auto&& root = stage_builder::buildClassicExecutableTree( - _opCtx.get(), collection.getCollection(), *cq, *solutions[i], ws.get()); + _opCtx.get(), &collection.getCollection(), *cq, *solutions[i], ws.get()); mps->addPlan(std::move(solutions[i]), std::move(root), ws.get()); } // This sets a backup plan. - NoopYieldPolicy yieldPolicy(_clock); + NoopYieldPolicy yieldPolicy(_expCtx->opCtx, _clock); ASSERT_OK(mps->pickBestPlan(&yieldPolicy)); ASSERT(mps->bestPlanChosen()); ASSERT(mps->hasBackupPlan()); @@ -507,7 +560,7 @@ TEST_F(QueryStageMultiPlanTest, MPSExplainAllPlans) { findCommand->setFilter(BSON("x" << 1)); auto cq = uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(findCommand))); unique_ptr mps = - std::make_unique(_expCtx.get(), ctx.getCollection(), cq.get()); + std::make_unique(_expCtx.get(), &ctx.getCollection(), cq.get()); // Put each plan into the MultiPlanStage. Takes ownership of 'firstPlan' and 'secondPlan'. mps->addPlan(std::make_unique(), std::move(firstPlan), ws.get()); @@ -533,6 +586,7 @@ TEST_F(QueryStageMultiPlanTest, MPSExplainAllPlans) { ctx.getCollection(), ExplainOptions::Verbosity::kExecAllPlans, BSONObj(), + SerializationContext::stateCommandReply(), BSONObj(), &bob); BSONObj explained = bob.done(); @@ -636,11 +690,12 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlannin auto canonicalQuery = uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(findCommand))); MultiPlanStage multiPlanStage( - _expCtx.get(), coll.getCollection(), canonicalQuery.get(), PlanCachingMode::NeverCache); + _expCtx.get(), &coll.getCollection(), canonicalQuery.get(), PlanCachingMode::NeverCache); multiPlanStage.addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get()); multiPlanStage.addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get()); - AlwaysTimeOutYieldPolicy alwaysTimeOutPolicy(serviceContext()->getFastClockSource()); + AlwaysTimeOutYieldPolicy alwaysTimeOutPolicy(_expCtx->opCtx, + serviceContext()->getFastClockSource()); const auto status = multiPlanStage.pickBestPlan(&alwaysTimeOutPolicy); ASSERT_EQ(ErrorCodes::ExceededTimeLimit, status); ASSERT_STRING_CONTAINS(status.reason(), "error while multiplanner was selecting best plan"); @@ -676,11 +731,12 @@ TEST_F(QueryStageMultiPlanTest, ShouldReportErrorIfKilledDuringPlanning) { auto canonicalQuery = uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(findCommand))); MultiPlanStage multiPlanStage( - _expCtx.get(), coll.getCollection(), canonicalQuery.get(), PlanCachingMode::NeverCache); + _expCtx.get(), &coll.getCollection(), canonicalQuery.get(), PlanCachingMode::NeverCache); multiPlanStage.addPlan(createQuerySolution(), std::move(ixScanRoot), sharedWs.get()); multiPlanStage.addPlan(createQuerySolution(), std::move(collScanRoot), sharedWs.get()); - AlwaysPlanKilledYieldPolicy alwaysPlanKilledYieldPolicy(serviceContext()->getFastClockSource()); + AlwaysPlanKilledYieldPolicy alwaysPlanKilledYieldPolicy(_expCtx->opCtx, + serviceContext()->getFastClockSource()); ASSERT_EQ(ErrorCodes::QueryPlanKilled, multiPlanStage.pickBestPlan(&alwaysPlanKilledYieldPolicy)); } @@ -720,14 +776,14 @@ TEST_F(QueryStageMultiPlanTest, AddsContextDuringException) { auto canonicalQuery = uassertStatusOK(CanonicalQuery::canonicalize(opCtx(), std::move(findCommand))); MultiPlanStage multiPlanStage( - _expCtx.get(), ctx.getCollection(), canonicalQuery.get(), PlanCachingMode::NeverCache); + _expCtx.get(), &ctx.getCollection(), canonicalQuery.get(), PlanCachingMode::NeverCache); unique_ptr sharedWs(new WorkingSet()); multiPlanStage.addPlan( createQuerySolution(), std::make_unique(_expCtx.get()), sharedWs.get()); multiPlanStage.addPlan( createQuerySolution(), std::make_unique(_expCtx.get()), sharedWs.get()); - NoopYieldPolicy yieldPolicy(_clock); + NoopYieldPolicy yieldPolicy(_expCtx->opCtx, _clock); auto status = multiPlanStage.pickBestPlan(&yieldPolicy); ASSERT_EQ(ErrorCodes::InternalError, status); ASSERT_STRING_CONTAINS(status.reason(), "error while multiplanner was selecting best plan"); diff --git a/src/mongo/dbtests/query_stage_near.cpp b/src/mongo/dbtests/query_stage_near.cpp index da0a4d4e85ee3..d8e37de3bef86 100644 --- a/src/mongo/dbtests/query_stage_near.cpp +++ b/src/mongo/dbtests/query_stage_near.cpp @@ -32,23 +32,47 @@ */ -#include "mongo/platform/basic.h" - #include +#include #include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/near.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/queued_data_stage.h" -#include "mongo/db/exec/working_set_common.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/stage_types.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { -const NamespaceString kTestNamespace("test.coll"); +const NamespaceString kTestNamespace = NamespaceString::createNamespaceString_forTest("test.coll"); const BSONObj kTestKeyPattern = BSON("testIndex" << 1); class QueryStageNearTest : public unittest::Test { @@ -57,7 +81,7 @@ class QueryStageNearTest : public unittest::Test { _expCtx = make_intrusive(_opCtx, nullptr, kTestNamespace); directClient.createCollection(kTestNamespace); - ASSERT_OK(dbtests::createIndex(_opCtx, kTestNamespace.ns(), kTestKeyPattern)); + ASSERT_OK(dbtests::createIndex(_opCtx, kTestNamespace.ns_forTest(), kTestKeyPattern)); _autoColl.emplace(_opCtx, kTestNamespace); const auto& coll = _autoColl->getCollection(); @@ -111,7 +135,7 @@ class MockNearStage final : public NearStage { "MOCK_DISTANCE_SEARCH_STAGE", STAGE_UNKNOWN, workingSet, - coll, + &coll, indexDescriptor), _pos(0) {} @@ -119,9 +143,8 @@ class MockNearStage final : public NearStage { _intervals.push_back(std::make_unique(data, min, max)); } - std::unique_ptr nextInterval(OperationContext* opCtx, - WorkingSet* workingSet, - const CollectionPtr& collection) final { + virtual std::unique_ptr nextInterval(OperationContext* opCtx, + WorkingSet* workingSet) final { if (_pos == static_cast(_intervals.size())) return nullptr; diff --git a/src/mongo/dbtests/query_stage_sort.cpp b/src/mongo/dbtests/query_stage_sort.cpp index 99444d70eedbe..7180ae418a038 100644 --- a/src/mongo/dbtests/query_stage_sort.cpp +++ b/src/mongo/dbtests/query_stage_sort.cpp @@ -27,28 +27,66 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include - -#include "mongo/client/dbclient_cursor.h" +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_write_path.h" #include "mongo/db/catalog/collection_yield_restore.h" #include "mongo/db/catalog/database.h" #include "mongo/db/client.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/curop.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/exec/fetch.h" #include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/queued_data_stage.h" #include "mongo/db/exec/sort.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/sort_key_generator.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" #include "mongo/db/query/plan_executor_impl.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/query/query_knobs_gen.h" #include "mongo/db/query/query_planner_params.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" /** * This file tests db/exec/sort.cpp @@ -180,7 +218,7 @@ class QueryStageSortTestBase { std::move(keyGenStage)); auto fetchStage = std::make_unique( - _expCtx.get(), ws.get(), std::move(sortStage), nullptr, coll); + _expCtx.get(), ws.get(), std::move(sortStage), nullptr, &coll); // Must fetch so we can look at the doc as a BSONObj. auto statusWithPlanExecutor = @@ -245,7 +283,7 @@ class QueryStageSortTestBase { return "unittests.QueryStageSort"; } static NamespaceString nss() { - return NamespaceString(ns()); + return NamespaceString::createNamespaceString_forTest(ns()); } protected: @@ -408,7 +446,8 @@ class QueryStageSortMutationInvalidation : public QueryStageSortTestBase { oldDoc, newDoc(oldDoc), collection_internal::kUpdateNoIndexes, - nullptr, + nullptr /* indexesAffected */, + nullptr /* opDebug */, &args); wuow.commit(); } @@ -433,7 +472,8 @@ class QueryStageSortMutationInvalidation : public QueryStageSortTestBase { oldDoc, newDoc(oldDoc), collection_internal::kUpdateNoIndexes, - nullptr, + nullptr /* indexesAffected */, + nullptr /* opDebug */, &args); wuow.commit(); } @@ -631,7 +671,7 @@ class QueryStageSortParallelArrays : public QueryStageSortTestBase { std::move(keyGenStage)); auto fetchStage = std::make_unique( - _expCtx.get(), ws.get(), std::move(sortStage), nullptr, coll); + _expCtx.get(), ws.get(), std::move(sortStage), nullptr, &coll); // We don't get results back since we're sorting some parallel arrays. auto statusWithPlanExecutor = diff --git a/src/mongo/dbtests/query_stage_sort_key_generator.cpp b/src/mongo/dbtests/query_stage_sort_key_generator.cpp index b1a77f75be23e..0c37c0d7f5fbe 100644 --- a/src/mongo/dbtests/query_stage_sort_key_generator.cpp +++ b/src/mongo/dbtests/query_stage_sort_key_generator.cpp @@ -27,17 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include - +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/document_value/document_value_test_util.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/queued_data_stage.h" #include "mongo/db/exec/sort_key_generator.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" #include "mongo/db/query/query_test_service_context.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -55,7 +72,7 @@ Value extractKeyFromKeyGenStage(SortKeyGeneratorStage* sortKeyGen, WorkingSet* w return wsm->metadata().getSortKey(); } -const NamespaceString kTestNss = NamespaceString("db.dummy"); +const NamespaceString kTestNss = NamespaceString::createNamespaceString_forTest("db.dummy"); /** * Given a JSON string 'sortSpec' representing a sort pattern, returns the corresponding sort key diff --git a/src/mongo/dbtests/query_stage_subplan.cpp b/src/mongo/dbtests/query_stage_subplan.cpp index 03e4082427c6c..45e803624c068 100644 --- a/src/mongo/dbtests/query_stage_subplan.cpp +++ b/src/mongo/dbtests/query_stage_subplan.cpp @@ -27,44 +27,71 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include - +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database.h" -#include "mongo/db/catalog/database_holder.h" #include "mongo/db/client.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/exec/plan_stage.h" #include "mongo/db/exec/subplan.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_noop.h" -#include "mongo/db/pipeline/expression_context_for_test.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/get_executor.h" #include "mongo/db/query/mock_yield_policies.h" -#include "mongo/db/query/query_test_service_context.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/query/query_knobs_gen.h" +#include "mongo/db/query/query_planner_params.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/service_context.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/platform/atomic_word.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { -static const NamespaceString nss("unittests.QueryStageSubplan"); +static const NamespaceString nss = + NamespaceString::createNamespaceString_forTest("unittests.QueryStageSubplan"); class QueryStageSubplanTest : public unittest::Test { public: QueryStageSubplanTest() : _client(_opCtx.get()) {} virtual ~QueryStageSubplanTest() { - dbtests::WriteContextForTests ctx(opCtx(), nss.ns()); + dbtests::WriteContextForTests ctx(opCtx(), nss.ns_forTest()); _client.dropCollection(nss); } void addIndex(const BSONObj& obj) { - ASSERT_OK(dbtests::createIndex(opCtx(), nss.ns(), obj)); + ASSERT_OK(dbtests::createIndex(opCtx(), nss.ns_forTest(), obj)); } void dropIndex(BSONObj keyPattern) { @@ -126,7 +153,7 @@ class QueryStageSubplanTest : public unittest::Test { * back to regular planning. */ TEST_F(QueryStageSubplanTest, QueryStageSubplanGeo2dOr) { - dbtests::WriteContextForTests ctx(opCtx(), nss.ns()); + dbtests::WriteContextForTests ctx(opCtx(), nss.ns_forTest()); addIndex(BSON("a" << "2d" << "b" << 1)); @@ -151,10 +178,10 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanGeo2dOr) { WorkingSet ws; std::unique_ptr subplan( - new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get())); + new SubplanStage(_expCtx.get(), &collection, &ws, plannerParams, cq.get())); // Plan selection should succeed due to falling back on regular planning. - NoopYieldPolicy yieldPolicy(_clock); + NoopYieldPolicy yieldPolicy(_expCtx->opCtx, _clock); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); } @@ -190,9 +217,9 @@ void assertSubplanFromCache(QueryStageSubplanTest* test, const dbtests::WriteCon WorkingSet ws; std::unique_ptr subplan( - new SubplanStage(test->expCtx(), collection, &ws, plannerParams, cq.get())); + new SubplanStage(test->expCtx(), &collection, &ws, plannerParams, cq.get())); - NoopYieldPolicy yieldPolicy(test->serviceContext()->getFastClockSource()); + NoopYieldPolicy yieldPolicy(test->opCtx(), test->serviceContext()->getFastClockSource()); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); // Nothing is in the cache yet, so neither branch should have been planned from @@ -203,7 +230,7 @@ void assertSubplanFromCache(QueryStageSubplanTest* test, const dbtests::WriteCon // If we repeat the same query, the plan for the first branch should have come from // the cache. ws.clear(); - subplan.reset(new SubplanStage(test->expCtx(), collection, &ws, plannerParams, cq.get())); + subplan.reset(new SubplanStage(test->expCtx(), &collection, &ws, plannerParams, cq.get())); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); @@ -215,7 +242,7 @@ void assertSubplanFromCache(QueryStageSubplanTest* test, const dbtests::WriteCon * Test the SubplanStage's ability to plan an individual branch using the plan cache. */ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanFromCache) { - dbtests::WriteContextForTests ctx(opCtx(), nss.ns()); + dbtests::WriteContextForTests ctx(opCtx(), nss.ns_forTest()); addIndex(BSON("a" << 1)); addIndex(BSON("a" << 1 << "b" << 1)); @@ -228,7 +255,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanFromCache) { * Test that the SubplanStage can plan an individual branch from the cache using a $** index. */ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanFromCacheWithWildcardIndex) { - dbtests::WriteContextForTests ctx(opCtx(), nss.ns()); + dbtests::WriteContextForTests ctx(opCtx(), nss.ns_forTest()); addIndex(BSON("$**" << 1)); addIndex(BSON("a.$**" << 1)); @@ -240,7 +267,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanFromCacheWithWildcardIndex) { * Ensure that the subplan stage doesn't create a plan cache entry if there are no query results. */ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheZeroResults) { - dbtests::WriteContextForTests ctx(opCtx(), nss.ns()); + dbtests::WriteContextForTests ctx(opCtx(), nss.ns_forTest()); addIndex(BSON("a" << 1 << "b" << 1)); addIndex(BSON("a" << 1)); @@ -270,9 +297,9 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheZeroResults) { WorkingSet ws; std::unique_ptr subplan( - new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get())); + new SubplanStage(_expCtx.get(), &collection, &ws, plannerParams, cq.get())); - NoopYieldPolicy yieldPolicy(_clock); + NoopYieldPolicy yieldPolicy(_expCtx->opCtx, _clock); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); // Nothing is in the cache yet, so neither branch should have been planned from @@ -284,7 +311,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheZeroResults) { // from the cache (because the first call to pickBestPlan() refrained from creating any // cache entries). ws.clear(); - subplan.reset(new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get())); + subplan.reset(new SubplanStage(_expCtx.get(), &collection, &ws, plannerParams, cq.get())); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); @@ -296,7 +323,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheZeroResults) { * Ensure that the subplan stage doesn't create a plan cache entry if the candidate plans tie. */ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheTies) { - dbtests::WriteContextForTests ctx(opCtx(), nss.ns()); + dbtests::WriteContextForTests ctx(opCtx(), nss.ns_forTest()); addIndex(BSON("a" << 1 << "b" << 1)); addIndex(BSON("a" << 1 << "c" << 1)); @@ -326,9 +353,9 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheTies) { WorkingSet ws; std::unique_ptr subplan( - new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get())); + new SubplanStage(_expCtx.get(), &collection, &ws, plannerParams, cq.get())); - NoopYieldPolicy yieldPolicy(_clock); + NoopYieldPolicy yieldPolicy(_expCtx->opCtx, _clock); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); // Nothing is in the cache yet, so neither branch should have been planned from @@ -340,7 +367,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanDontCacheTies) { // from the cache (because the first call to pickBestPlan() refrained from creating any // cache entries). ws.clear(); - subplan.reset(new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get())); + subplan.reset(new SubplanStage(_expCtx.get(), &collection, &ws, plannerParams, cq.get())); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); @@ -476,7 +503,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanCanUseSubplanning) { * Regression test for SERVER-19388. */ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanRootedOrNE) { - dbtests::WriteContextForTests ctx(opCtx(), nss.ns()); + dbtests::WriteContextForTests ctx(opCtx(), nss.ns_forTest()); addIndex(BSON("a" << 1 << "b" << 1)); addIndex(BSON("a" << 1 << "c" << 1)); @@ -498,9 +525,9 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanRootedOrNE) { WorkingSet ws; std::unique_ptr subplan( - new SubplanStage(_expCtx.get(), collection, &ws, plannerParams, cq.get())); + new SubplanStage(_expCtx.get(), &collection, &ws, plannerParams, cq.get())); - NoopYieldPolicy yieldPolicy(_clock); + NoopYieldPolicy yieldPolicy(_expCtx->opCtx, _clock); ASSERT_OK(subplan->pickBestPlan(&yieldPolicy)); size_t numResults = 0; @@ -517,7 +544,7 @@ TEST_F(QueryStageSubplanTest, QueryStageSubplanPlanRootedOrNE) { } TEST_F(QueryStageSubplanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlanning) { - dbtests::WriteContextForTests ctx(opCtx(), nss.ns()); + dbtests::WriteContextForTests ctx(opCtx(), nss.ns_forTest()); // Build a query with a rooted $or. auto findCommand = std::make_unique(nss); findCommand->setFilter(BSON("$or" << BSON_ARRAY(BSON("p1" << 1) << BSON("p2" << 2)))); @@ -543,14 +570,15 @@ TEST_F(QueryStageSubplanTest, ShouldReportErrorIfExceedsTimeLimitDuringPlanning) // Create the SubplanStage. WorkingSet workingSet; auto coll = ctx.getCollection(); - SubplanStage subplanStage(_expCtx.get(), coll, &workingSet, params, canonicalQuery.get()); + SubplanStage subplanStage(_expCtx.get(), &coll, &workingSet, params, canonicalQuery.get()); - AlwaysTimeOutYieldPolicy alwaysTimeOutPolicy(serviceContext()->getFastClockSource()); + AlwaysTimeOutYieldPolicy alwaysTimeOutPolicy(_expCtx->opCtx, + serviceContext()->getFastClockSource()); ASSERT_EQ(ErrorCodes::ExceededTimeLimit, subplanStage.pickBestPlan(&alwaysTimeOutPolicy)); } TEST_F(QueryStageSubplanTest, ShouldReportErrorIfKilledDuringPlanning) { - dbtests::WriteContextForTests ctx(opCtx(), nss.ns()); + dbtests::WriteContextForTests ctx(opCtx(), nss.ns_forTest()); // Build a query with a rooted $or. auto findCommand = std::make_unique(nss); findCommand->setFilter(BSON("$or" << BSON_ARRAY(BSON("p1" << 1) << BSON("p2" << 2)))); @@ -568,16 +596,17 @@ TEST_F(QueryStageSubplanTest, ShouldReportErrorIfKilledDuringPlanning) { // Create the SubplanStage. WorkingSet workingSet; auto coll = ctx.getCollection(); - SubplanStage subplanStage(_expCtx.get(), coll, &workingSet, params, canonicalQuery.get()); + SubplanStage subplanStage(_expCtx.get(), &coll, &workingSet, params, canonicalQuery.get()); - AlwaysPlanKilledYieldPolicy alwaysPlanKilledYieldPolicy(serviceContext()->getFastClockSource()); + AlwaysPlanKilledYieldPolicy alwaysPlanKilledYieldPolicy(_expCtx->opCtx, + serviceContext()->getFastClockSource()); ASSERT_EQ(ErrorCodes::QueryPlanKilled, subplanStage.pickBestPlan(&alwaysPlanKilledYieldPolicy)); } TEST_F(QueryStageSubplanTest, ShouldThrowOnRestoreIfIndexDroppedBeforePlanSelection) { CollectionPtr collection; { - dbtests::WriteContextForTests ctx{opCtx(), nss.ns()}; + dbtests::WriteContextForTests ctx{opCtx(), nss.ns_forTest()}; addIndex(BSON("p1" << 1 << "opt1" << 1)); addIndex(BSON("p1" << 1 << "opt2" << 1)); addIndex(BSON("p2" << 1 << "opt1" << 1)); @@ -604,7 +633,8 @@ TEST_F(QueryStageSubplanTest, ShouldThrowOnRestoreIfIndexDroppedBeforePlanSelect // Create the SubplanStage. WorkingSet workingSet; - SubplanStage subplanStage(_expCtx.get(), collection, &workingSet, params, canonicalQuery.get()); + SubplanStage subplanStage( + _expCtx.get(), &collection, &workingSet, params, canonicalQuery.get()); // Mimic a yield by saving the state of the subplan stage. Then, drop an index not being used // while yielded. @@ -623,7 +653,7 @@ TEST_F(QueryStageSubplanTest, ShouldThrowOnRestoreIfIndexDroppedBeforePlanSelect TEST_F(QueryStageSubplanTest, ShouldNotThrowOnRestoreIfIndexDroppedAfterPlanSelection) { CollectionPtr collection; { - dbtests::WriteContextForTests ctx{opCtx(), nss.ns()}; + dbtests::WriteContextForTests ctx{opCtx(), nss.ns_forTest()}; addIndex(BSON("p1" << 1 << "opt1" << 1)); addIndex(BSON("p1" << 1 << "opt2" << 1)); addIndex(BSON("p2" << 1 << "opt1" << 1)); @@ -651,9 +681,10 @@ TEST_F(QueryStageSubplanTest, ShouldNotThrowOnRestoreIfIndexDroppedAfterPlanSele // Create the SubplanStage. WorkingSet workingSet; - SubplanStage subplanStage(_expCtx.get(), collection, &workingSet, params, canonicalQuery.get()); + SubplanStage subplanStage( + _expCtx.get(), &collection, &workingSet, params, canonicalQuery.get()); - NoopYieldPolicy yieldPolicy(serviceContext()->getFastClockSource()); + NoopYieldPolicy yieldPolicy(_expCtx->opCtx, serviceContext()->getFastClockSource()); ASSERT_OK(subplanStage.pickBestPlan(&yieldPolicy)); // Mimic a yield by saving the state of the subplan stage and dropping our lock. Then drop an diff --git a/src/mongo/dbtests/query_stage_tests.cpp b/src/mongo/dbtests/query_stage_tests.cpp index b08beb2045405..7d967aa3af28c 100644 --- a/src/mongo/dbtests/query_stage_tests.cpp +++ b/src/mongo/dbtests/query_stage_tests.cpp @@ -27,25 +27,48 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include - -#include "mongo/client/dbclient_cursor.h" +#include +#include +#include + +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/index_scan.h" #include "mongo/db/exec/plan_stage.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/working_set.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/matcher/expression.h" #include "mongo/db/matcher/expression_parser.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/plan_executor.h" #include "mongo/db/query/plan_executor_factory.h" +#include "mongo/db/query/plan_yield_policy.h" #include "mongo/db/query/query_planner_params.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" /** * This file tests db/exec/index_scan.cpp @@ -82,16 +105,17 @@ class IndexScanBase { } int countResults(const IndexScanParams& params, BSONObj filterObj = BSONObj()) { - AutoGetCollectionForReadCommand ctx(&_opCtx, NamespaceString(ns())); + AutoGetCollectionForReadCommand ctx(&_opCtx, + NamespaceString::createNamespaceString_forTest(ns())); StatusWithMatchExpression statusWithMatcher = MatchExpressionParser::parse(filterObj, _expCtx); - verify(statusWithMatcher.isOK()); + MONGO_verify(statusWithMatcher.isOK()); unique_ptr filterExpr = std::move(statusWithMatcher.getValue()); unique_ptr ws = std::make_unique(); unique_ptr ix = std::make_unique( - _expCtx.get(), ctx.getCollection(), params, ws.get(), filterExpr.get()); + _expCtx.get(), &ctx.getCollection(), params, ws.get(), filterExpr.get()); auto statusWithPlanExecutor = plan_executor_factory::make(_expCtx, @@ -115,7 +139,8 @@ class IndexScanBase { } const IndexDescriptor* getIndex(const BSONObj& obj) { - AutoGetCollectionForReadCommand collection(&_opCtx, NamespaceString(ns())); + AutoGetCollectionForReadCommand collection( + &_opCtx, NamespaceString::createNamespaceString_forTest(ns())); std::vector indexes; collection->getIndexCatalog()->findIndexesByKeyPattern( &_opCtx, obj, IndexCatalog::InclusionPolicy::kReady, &indexes); @@ -124,7 +149,8 @@ class IndexScanBase { IndexScanParams makeIndexScanParams(OperationContext* opCtx, const IndexDescriptor* descriptor) { - AutoGetCollectionForReadCommand collection(&_opCtx, NamespaceString(ns())); + AutoGetCollectionForReadCommand collection( + &_opCtx, NamespaceString::createNamespaceString_forTest(ns())); IndexScanParams params(opCtx, *collection, descriptor); params.bounds.isSimpleRange = true; params.bounds.endKey = BSONObj(); @@ -140,15 +166,15 @@ class IndexScanBase { return "unittests.IndexScan"; } static NamespaceString nss() { - return NamespaceString(ns()); + return NamespaceString::createNamespaceString_forTest(ns()); } protected: const ServiceContext::UniqueOperationContext _txnPtr = cc().makeOperationContext(); OperationContext& _opCtx = *_txnPtr; - boost::intrusive_ptr _expCtx = - new ExpressionContext(&_opCtx, nullptr, NamespaceString(ns())); + boost::intrusive_ptr _expCtx = new ExpressionContext( + &_opCtx, nullptr, NamespaceString::createNamespaceString_forTest(ns())); private: DBDirectClient _client; diff --git a/src/mongo/dbtests/query_stage_trial.cpp b/src/mongo/dbtests/query_stage_trial.cpp index 72437204c7e1d..de7dab5d4b005 100644 --- a/src/mongo/dbtests/query_stage_trial.cpp +++ b/src/mongo/dbtests/query_stage_trial.cpp @@ -27,24 +27,43 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include #include - +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/mock_stage.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/trial_stage.h" #include "mongo/db/exec/working_set.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/mock_yield_policies.h" -#include "mongo/db/service_context_d_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/query/plan_yield_policy.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { -const NamespaceString kTestNss = NamespaceString("db.dummy"); +const NamespaceString kTestNss = NamespaceString::createNamespaceString_forTest("db.dummy"); class TrialStageTest : public unittest::Test { public: @@ -88,7 +107,7 @@ class TrialStageTest : public unittest::Test { std::unique_ptr yieldPolicy() { return std::make_unique( - opCtx()->getServiceContext()->getFastClockSource()); + opCtx(), opCtx()->getServiceContext()->getFastClockSource()); } OperationContext* opCtx() { diff --git a/src/mongo/dbtests/query_stage_update.cpp b/src/mongo/dbtests/query_stage_update.cpp index e50510aa2c65c..859e0da6c4272 100644 --- a/src/mongo/dbtests/query_stage_update.cpp +++ b/src/mongo/dbtests/query_stage_update.cpp @@ -31,29 +31,58 @@ * This file tests the UpdateStage class */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include #include - +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/catalog/collection.h" -#include "mongo/db/catalog/database.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/exec/collection_scan.h" +#include "mongo/db/exec/collection_scan_common.h" +#include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/eof.h" +#include "mongo/db/exec/plan_stage.h" +#include "mongo/db/exec/plan_stats.h" #include "mongo/db/exec/queued_data_stage.h" #include "mongo/db/exec/update_stage.h" #include "mongo/db/exec/upsert_stage.h" #include "mongo/db/exec/working_set.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/db/matcher/expression_with_placeholder.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/plan_executor.h" +#include "mongo/db/record_id.h" #include "mongo/db/service_context.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/transaction_resources.h" #include "mongo/db/update/update_driver.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace QueryStageUpdate { @@ -61,18 +90,19 @@ using std::make_unique; using std::unique_ptr; using std::vector; -static const NamespaceString nss("unittests.QueryStageUpdate"); +static const NamespaceString nss = + NamespaceString::createNamespaceString_forTest("unittests.QueryStageUpdate"); class QueryStageUpdateBase { public: QueryStageUpdateBase() : _client(&_opCtx) { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); _client.dropCollection(nss); _client.createCollection(nss); } virtual ~QueryStageUpdateBase() { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + dbtests::WriteContextForTests ctx(&_opCtx, nss.ns_forTest()); _client.dropCollection(nss); } @@ -123,13 +153,13 @@ class QueryStageUpdateBase { params.tailable = false; unique_ptr scan( - new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr)); + new CollectionScan(_expCtx.get(), &collection, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); - verify(member->hasObj()); + MONGO_verify(member->hasObj()); out->push_back(member->doc.value().toBson().getOwned()); } } @@ -145,13 +175,13 @@ class QueryStageUpdateBase { params.tailable = false; unique_ptr scan( - new CollectionScan(_expCtx.get(), collection, params, &ws, nullptr)); + new CollectionScan(_expCtx.get(), &collection, params, &ws, nullptr)); while (!scan->isEOF()) { WorkingSetID id = WorkingSet::INVALID_ID; PlanStage::StageState state = scan->work(&id); if (PlanStage::ADVANCED == state) { WorkingSetMember* member = ws.get(id); - verify(member->hasRecordId()); + MONGO_verify(member->hasRecordId()); out->push_back(member->recordId); } } @@ -190,12 +220,15 @@ class QueryStageUpdateUpsertEmptyColl : public QueryStageUpdateBase { void run() { // Run the update. { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + const auto collection = + acquireCollection(&_opCtx, + CollectionAcquisitionRequest::fromOpCtx( + &_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(collection.exists()); CurOp& curOp = *CurOp::get(_opCtx); OpDebug* opDebug = &curOp.debug(); UpdateDriver driver(_expCtx); - CollectionPtr collection = ctx.getCollection(); - ASSERT(collection); // Collection should be empty. ASSERT_EQUALS(0U, count(BSONObj())); @@ -254,7 +287,12 @@ class QueryStageUpdateSkipDeletedDoc : public QueryStageUpdateBase { void run() { // Run the update. { - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + const auto collection = + acquireCollection(&_opCtx, + CollectionAcquisitionRequest::fromOpCtx( + &_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(collection.exists()); // Populate the collection. for (int i = 0; i < 10; ++i) { @@ -265,13 +303,10 @@ class QueryStageUpdateSkipDeletedDoc : public QueryStageUpdateBase { CurOp& curOp = *CurOp::get(_opCtx); OpDebug* opDebug = &curOp.debug(); UpdateDriver driver(_expCtx); - CollectionPtr coll( - CollectionCatalog::get(&_opCtx)->lookupCollectionByNamespace(&_opCtx, nss)); - ASSERT(coll); // Get the RecordIds that would be returned by an in-order scan. vector recordIds; - getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds); + getRecordIds(collection.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds); auto request = UpdateRequest(); request.setNamespaceString(nss); @@ -304,10 +339,10 @@ class QueryStageUpdateSkipDeletedDoc : public QueryStageUpdateBase { auto ws = make_unique(); auto cs = make_unique( - _expCtx.get(), coll, collScanParams, ws.get(), cq->root()); + _expCtx.get(), collection, collScanParams, ws.get(), cq->root()); - auto updateStage = - make_unique(_expCtx.get(), updateParams, ws.get(), coll, cs.release()); + auto updateStage = make_unique( + _expCtx.get(), updateParams, ws.get(), collection, cs.release()); const UpdateStats* stats = static_cast(updateStage->getSpecificStats()); @@ -322,10 +357,12 @@ class QueryStageUpdateSkipDeletedDoc : public QueryStageUpdateBase { // Remove recordIds[targetDocIndex]; static_cast(updateStage.get())->saveState(); - BSONObj targetDoc = coll->docFor(&_opCtx, recordIds[targetDocIndex]).value(); + BSONObj targetDoc = + collection.getCollectionPtr()->docFor(&_opCtx, recordIds[targetDocIndex]).value(); ASSERT(!targetDoc.isEmpty()); remove(targetDoc); - static_cast(updateStage.get())->restoreState(&coll); + static_cast(updateStage.get()) + ->restoreState(&collection.getCollectionPtr()); // Do the remaining updates. while (!updateStage->isEOF()) { @@ -374,10 +411,12 @@ class QueryStageUpdateReturnOldDoc : public QueryStageUpdateBase { ASSERT_EQUALS(10U, count(BSONObj())); // Various variables we'll need. - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + const auto collection = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(collection.exists()); OpDebug* opDebug = &CurOp::get(_opCtx)->debug(); - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); auto request = UpdateRequest(); request.setNamespaceString(nss); UpdateDriver driver(_expCtx); @@ -388,7 +427,7 @@ class QueryStageUpdateReturnOldDoc : public QueryStageUpdateBase { // Get the RecordIds that would be returned by an in-order scan. vector recordIds; - getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds); + getRecordIds(collection.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds); // Populate the request. request.setQuery(query); @@ -419,8 +458,8 @@ class QueryStageUpdateReturnOldDoc : public QueryStageUpdateBase { UpdateStageParams updateParams(&request, &driver, opDebug); updateParams.canonicalQuery = cq.get(); - const auto updateStage = - make_unique(_expCtx.get(), updateParams, ws.get(), coll, qds.release()); + const auto updateStage = make_unique( + _expCtx.get(), updateParams, ws.get(), collection, qds.release()); // Should return advanced. id = WorkingSet::INVALID_ID; @@ -444,7 +483,7 @@ class QueryStageUpdateReturnOldDoc : public QueryStageUpdateBase { // Should have done the update. BSONObj newDoc = BSON("_id" << targetDocIndex << "foo" << targetDocIndex << "x" << 0); vector objs; - getCollContents(coll, &objs); + getCollContents(collection.getCollectionPtr(), &objs); ASSERT_BSONOBJ_EQ(objs[targetDocIndex], newDoc); // That should be it. @@ -467,10 +506,12 @@ class QueryStageUpdateReturnNewDoc : public QueryStageUpdateBase { ASSERT_EQUALS(50U, count(BSONObj())); // Various variables we'll need. - dbtests::WriteContextForTests ctx(&_opCtx, nss.ns()); + const auto collection = acquireCollection( + &_opCtx, + CollectionAcquisitionRequest::fromOpCtx(&_opCtx, nss, AcquisitionPrerequisites::kWrite), + MODE_IX); + ASSERT(collection.exists()); OpDebug* opDebug = &CurOp::get(_opCtx)->debug(); - const CollectionPtr& coll = ctx.getCollection(); - ASSERT(coll); auto request = UpdateRequest(); request.setNamespaceString(nss); UpdateDriver driver(_expCtx); @@ -481,7 +522,7 @@ class QueryStageUpdateReturnNewDoc : public QueryStageUpdateBase { // Get the RecordIds that would be returned by an in-order scan. vector recordIds; - getRecordIds(coll, CollectionScanParams::FORWARD, &recordIds); + getRecordIds(collection.getCollectionPtr(), CollectionScanParams::FORWARD, &recordIds); // Populate the request. request.setQuery(query); @@ -512,8 +553,8 @@ class QueryStageUpdateReturnNewDoc : public QueryStageUpdateBase { UpdateStageParams updateParams(&request, &driver, opDebug); updateParams.canonicalQuery = cq.get(); - auto updateStage = - make_unique(_expCtx.get(), updateParams, ws.get(), coll, qds.release()); + auto updateStage = make_unique( + _expCtx.get(), updateParams, ws.get(), collection, qds.release()); // Should return advanced. id = WorkingSet::INVALID_ID; @@ -537,7 +578,7 @@ class QueryStageUpdateReturnNewDoc : public QueryStageUpdateBase { // Should have done the update. vector objs; - getCollContents(coll, &objs); + getCollContents(collection.getCollectionPtr(), &objs); ASSERT_BSONOBJ_EQ(objs[targetDocIndex], newDoc); // That should be it. diff --git a/src/mongo/dbtests/querytests.cpp b/src/mongo/dbtests/querytests.cpp index 091357cf35d99..8b6500ad30f3e 100644 --- a/src/mongo/dbtests/querytests.cpp +++ b/src/mongo/dbtests/querytests.cpp @@ -27,34 +27,84 @@ * it in the license file. */ -#include +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/ordering.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/dbclient_cursor.h" +#include "mongo/client/index_spec.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/multi_index_block.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" #include "mongo/db/clientcursor.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/cursor_manager.h" +#include "mongo/db/database_name.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" #include "mongo/db/dbhelpers.h" -#include "mongo/db/exec/queued_data_stage.h" +#include "mongo/db/index/index_build_interceptor.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/json.h" -#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/query/find.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/service_context.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/timer.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { -void insertOplogDocument(OperationContext* opCtx, Timestamp ts, const char* ns) { - AutoGetCollection coll(opCtx, NamespaceString{ns}, MODE_IX); +void insertOplogDocument(OperationContext* opCtx, Timestamp ts, StringData ns) { + AutoGetCollection coll(opCtx, NamespaceString::createNamespaceString_forTest(ns), MODE_IX); WriteUnitOfWork wuow(opCtx); auto doc = BSON("ts" << ts); InsertStatement stmt; @@ -261,7 +311,7 @@ class BoundedKey : public ClientBase { a.appendMaxKey("$lt"); BSONObj limit = a.done(); ASSERT(!_client.findOne(_nss, BSON("a" << limit)).isEmpty()); - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("a" << 1))); FindCommandRequest findCmd{_nss}; findCmd.setFilter(BSON("a" << limit)); findCmd.setHint(BSON("a" << 1)); @@ -730,7 +780,7 @@ class OplogScanWithGtTimstampPred : public ClientBase { << "oplog.querytests.OplogScanWithGtTimstampPred"), info); } - const char* ns = _nss.ns().c_str(); + const auto ns = _nss.ns_forTest(); insertOplogDocument(&_opCtx, Timestamp(1000, 0), ns); insertOplogDocument(&_opCtx, Timestamp(1000, 1), ns); insertOplogDocument(&_opCtx, Timestamp(1000, 2), ns); @@ -783,7 +833,7 @@ class OplogScanGtTsExplain : public ClientBase { info); } - const char* ns = _nss.ns().c_str(); + const auto ns = _nss.ns_forTest(); insertOplogDocument(&_opCtx, Timestamp(1000, 0), ns); insertOplogDocument(&_opCtx, Timestamp(1000, 1), ns); insertOplogDocument(&_opCtx, Timestamp(1000, 2), ns); @@ -816,7 +866,7 @@ class BasicCount : public ClientBase { _client.dropCollection(_nss); } void run() { - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("a" << 1))); count(0); insert(_nss, BSON("a" << 3)); count(0); @@ -843,7 +893,7 @@ class ArrayId : public ClientBase { _client.dropCollection(_nss); } void run() { - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("_id" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("_id" << 1))); auto response = _client.insertAcknowledged(_nss, {fromjson("{'_id':[1,2]}")}); ASSERT_NOT_OK(getStatusFromWriteCommandReply(response)); @@ -930,7 +980,7 @@ class EmbeddedNumericTypes : public ClientBase { void run() { _client.insert(_nss, BSON("a" << BSON("b" << 1))); ASSERT(!_client.findOne(_nss, BSON("a" << BSON("b" << 1.0))).isEmpty()); - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("a" << 1))); ASSERT(!_client.findOne(_nss, BSON("a" << BSON("b" << 1.0))).isEmpty()); } @@ -956,17 +1006,17 @@ class AutoResetIndexCache : public ClientBase { ASSERT_EQUALS(0u, _client.getIndexSpecs(_nss, includeBuildUUIDs, options).size()); } void checkIndex() { - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("a" << 1))); index(); } void run() { - _client.dropDatabase({boost::none, "unittests"}); + _client.dropDatabase(DatabaseName::createDatabaseName_forTest(boost::none, "unittests")); noIndex(); checkIndex(); _client.dropCollection(_nss); noIndex(); checkIndex(); - _client.dropDatabase({boost::none, "unittests"}); + _client.dropDatabase(DatabaseName::createDatabaseName_forTest(boost::none, "unittests")); noIndex(); checkIndex(); } @@ -982,12 +1032,12 @@ class UniqueIndex : public ClientBase { _client.dropCollection(_nss); } void run() { - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << 1), true)); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("a" << 1), true)); _client.insert(_nss, BSON("a" << 4 << "b" << 2)); _client.insert(_nss, BSON("a" << 4 << "b" << 3)); ASSERT_EQUALS(1U, _client.count(_nss, BSONObj())); _client.dropCollection(_nss); - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("b" << 1), true)); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("b" << 1), true)); _client.insert(_nss, BSON("a" << 4 << "b" << 2)); _client.insert(_nss, BSON("a" << 4 << "b" << 3)); ASSERT_EQUALS(2U, _client.count(_nss, BSONObj())); @@ -1007,7 +1057,7 @@ class UniqueIndexPreexistingData : public ClientBase { _client.insert(_nss, BSON("a" << 4 << "b" << 2)); _client.insert(_nss, BSON("a" << 4 << "b" << 3)); ASSERT_EQUALS(ErrorCodes::DuplicateKey, - dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << 1), true)); + dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("a" << 1), true)); } private: @@ -1038,7 +1088,7 @@ class Size : public ClientBase { } void run() { _client.insert(_nss, fromjson("{a:[1,2,3]}")); - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("a" << 1))); FindCommandRequest findRequest{_nss}; findRequest.setFilter(BSON("a" << mongo::BSIZE << 3)); findRequest.setHint(BSON("a" << 1)); @@ -1060,7 +1110,7 @@ class FullArray : public ClientBase { FindCommandRequest findRequest{_nss}; findRequest.setFilter(fromjson("{a:[1,2,3]}")); ASSERT(_client.find(findRequest)->more()); - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("a" << 1))); findRequest.setFilter(fromjson("{a:{$in:[1,[1,2,3]]}}")); findRequest.setHint(BSON("a" << 1)); ASSERT(_client.find(findRequest)->more()); @@ -1081,7 +1131,7 @@ class InsideArray : public ClientBase { void run() { _client.insert(_nss, fromjson("{a:[[1],2]}")); check("$natural"); - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("a" << 1))); check("a"); } @@ -1111,7 +1161,7 @@ class IndexInsideArrayCorrect : public ClientBase { void run() { _client.insert(_nss, fromjson("{'_id':1,a:[1]}")); _client.insert(_nss, fromjson("{'_id':2,a:[[1]]}")); - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("a" << 1))); FindCommandRequest findRequest{_nss}; findRequest.setFilter(fromjson("{a:[1]}")); findRequest.setHint(BSON("a" << 1)); @@ -1131,7 +1181,7 @@ class SubobjArr : public ClientBase { void run() { _client.insert(_nss, fromjson("{a:[{b:[1]}]}")); check("$natural"); - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("a" << 1))); check("a"); } @@ -1157,7 +1207,7 @@ class MatchCodeCodeWScope : public ClientBase { } void run() { checkMatch(); - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("a" << 1))); checkMatch(); } @@ -1196,7 +1246,7 @@ class MatchDBRefType : public ClientBase { } void run() { checkMatch(); - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("a" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("a" << 1))); checkMatch(); } @@ -1224,7 +1274,7 @@ class DirectLocking : public ClientBase { OldClientContext ctx( &_opCtx, NamespaceString::createNamespaceString_forTest("unittests.DirectLocking")); _client.remove(NamespaceString::createNamespaceString_forTest("a.b"), BSONObj()); - ASSERT_EQUALS("unittests", ctx.db()->name().db()); + ASSERT_EQUALS("unittests", ctx.db()->name().toString_forTest()); } const char* ns; }; @@ -1238,7 +1288,7 @@ class FastCountIn : public ClientBase { _client.insert(_nss, BSON("i" << "a")); - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("i" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("i" << 1))); ASSERT_EQUALS(1U, _client.count(_nss, fromjson("{i:{$in:['a']}}"))); } @@ -1283,7 +1333,7 @@ class DifferentNumbers : public ClientBase { std::unique_ptr cursor = _client.find(std::move(findRequest)); while (cursor->more()) { BSONObj o = cursor->next(); - verify(validateBSON(o).isOK()); + MONGO_verify(validateBSON(o).isOK()); } } void run() { @@ -1319,7 +1369,7 @@ class DifferentNumbers : public ClientBase { } t(_nss); - ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns(), BSON("7" << 1))); + ASSERT_OK(dbtests::createIndex(&_opCtx, _nss.ns_forTest(), BSON("7" << 1))); t(_nss); } @@ -1347,8 +1397,8 @@ class CollectionBase : public ClientBase { return CursorManager::get(&_opCtx)->numCursors(); } - const char* ns() { - return _nss.ns().c_str(); + StringData ns() { + return _nss.ns_forTest(); } const NamespaceString& nss() { return _nss; @@ -1414,7 +1464,7 @@ class TailableCappedRaceCondition : public CollectionBase { CollectionOptions collectionOptions = unittest::assertGet( CollectionOptions::parse(fromjson("{ capped : true, size : 2000, max: 10000 }"), CollectionOptions::parseForCommand)); - NamespaceString nss(ns()); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(ns()); ASSERT(ctx.db()->userCreateNS(&_opCtx, nss, collectionOptions, false).isOK()); wunit.commit(); } @@ -1558,7 +1608,7 @@ class FindingStart : public CollectionBase { BSONObj info; // Must use local db so that the collection is not replicated, to allow autoIndexId:false. - _client.runCommand({boost::none, "local"}, + _client.runCommand(DatabaseName::kLocal, BSON("create" << "oplog.querytests.findingstart" << "capped" << true << "size" << 4096 << "autoIndexId" << false), @@ -1570,7 +1620,7 @@ class FindingStart : public CollectionBase { // To ensure we are working with a clean oplog (an oplog without entries), we resort // to truncating the oplog instead. if (_opCtx.getServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) { - _client.runCommand({boost::none, "local"}, + _client.runCommand(DatabaseName::kLocal, BSON("emptycapped" << "oplog.querytests.findingstart"), info); @@ -1593,11 +1643,12 @@ class FindingStart : public CollectionBase { for (int k = 0; k < 5; ++k) { auto ts = Timestamp(1000, i++); insertOplogDocument(&_opCtx, ts, ns()); - FindCommandRequest findRequest{NamespaceString{ns()}}; + FindCommandRequest findRequest{NamespaceString::createNamespaceString_forTest(ns())}; findRequest.setSort(BSON("$natural" << 1)); unsigned min = _client.find(findRequest)->next()["ts"].timestamp().getInc(); for (unsigned j = -1; j < i; ++j) { - FindCommandRequest findRequestInner{NamespaceString{ns()}}; + FindCommandRequest findRequestInner{ + NamespaceString::createNamespaceString_forTest(ns())}; findRequestInner.setFilter(BSON("ts" << GTE << Timestamp(1000, j))); std::unique_ptr c = _client.find(findRequestInner); ASSERT(c->more()); @@ -1627,7 +1678,7 @@ class FindingStartPartiallyFull : public CollectionBase { BSONObj info; // Must use local db so that the collection is not replicated, to allow autoIndexId:false. - _client.runCommand({boost::none, "local"}, + _client.runCommand(DatabaseName::kLocal, BSON("create" << "oplog.querytests.findingstart" << "capped" << true << "size" << 4096 << "autoIndexId" << false), @@ -1639,7 +1690,7 @@ class FindingStartPartiallyFull : public CollectionBase { // To ensure we are working with a clean oplog (an oplog without entries), we resort // to truncating the oplog instead. if (_opCtx.getServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) { - _client.runCommand({boost::none, "local"}, + _client.runCommand(DatabaseName::kLocal, BSON("emptycapped" << "oplog.querytests.findingstart"), info); @@ -1651,11 +1702,12 @@ class FindingStartPartiallyFull : public CollectionBase { for (int k = 0; k < 5; ++k) { insertOplogDocument(&_opCtx, Timestamp(1000, i++), ns()); - FindCommandRequest findRequest{NamespaceString{ns()}}; + FindCommandRequest findRequest{NamespaceString::createNamespaceString_forTest(ns())}; findRequest.setSort(BSON("$natural" << 1)); unsigned min = _client.find(findRequest)->next()["ts"].timestamp().getInc(); for (unsigned j = -1; j < i; ++j) { - FindCommandRequest findRequestInner{NamespaceString{ns()}}; + FindCommandRequest findRequestInner{ + NamespaceString::createNamespaceString_forTest(ns())}; findRequestInner.setFilter(BSON("ts" << GTE << Timestamp(1000, j))); std::unique_ptr c = _client.find(findRequestInner); ASSERT(c->more()); @@ -1697,7 +1749,7 @@ class FindingStartStale : public CollectionBase { BSONObj info; // Must use local db so that the collection is not replicated, to allow autoIndexId:false. - _client.runCommand({boost::none, "local"}, + _client.runCommand(DatabaseName::kLocal, BSON("create" << "oplog.querytests.findingstart" << "capped" << true << "size" << 4096 << "autoIndexId" << false), @@ -1709,14 +1761,14 @@ class FindingStartStale : public CollectionBase { // To ensure we are working with a clean oplog (an oplog without entries), we resort // to truncating the oplog instead. if (_opCtx.getServiceContext()->getStorageEngine()->supportsRecoveryTimestamp()) { - _client.runCommand({boost::none, "local"}, + _client.runCommand(DatabaseName::kLocal, BSON("emptycapped" << "oplog.querytests.findingstart"), info); } // Check oplog replay mode with empty collection. - FindCommandRequest findRequest{NamespaceString{ns()}}; + FindCommandRequest findRequest{NamespaceString::createNamespaceString_forTest(ns())}; findRequest.setFilter(BSON("ts" << GTE << Timestamp(1000, 50))); std::unique_ptr c = _client.find(findRequest); ASSERT(!c->more()); @@ -1740,7 +1792,7 @@ class WhatsMyUri : public CollectionBase { WhatsMyUri() : CollectionBase("whatsmyuri") {} void run() { BSONObj result; - _client.runCommand({boost::none, "admin"}, BSON("whatsmyuri" << 1), result); + _client.runCommand(DatabaseName::kAdmin, BSON("whatsmyuri" << 1), result); ASSERT_EQUALS("", result["you"].str()); } }; @@ -1750,7 +1802,7 @@ class WhatsMySni : public CollectionBase { WhatsMySni() : CollectionBase("whatsmysni") {} void run() { BSONObj result; - _client.runCommand({boost::none, "admin"}, BSON("whatsmysni" << 1), result); + _client.runCommand(DatabaseName::kAdmin, BSON("whatsmysni" << 1), result); ASSERT_EQUALS("", result["sni"].str()); } }; @@ -1774,7 +1826,7 @@ class QueryByUuid : public CollectionBase { insert(nss(), BSON("a" << 3)); std::unique_ptr cursor = _client.find(FindCommandRequest{NamespaceStringOrUUID{"unittests", *coll_opts.uuid}}); - ASSERT_EQUALS(string(ns()), cursor->getns()); + ASSERT_EQUALS(nss(), cursor->getNamespaceString()); for (int i = 1; i <= 3; ++i) { ASSERT(cursor->more()); BSONObj obj(cursor->next()); @@ -1910,7 +1962,7 @@ class CollectionInternalBase : public CollectionBase { public: CollectionInternalBase(const char* nsLeaf) : CollectionBase(nsLeaf), - _lk(&_opCtx, DatabaseName(boost::none, "unittests"), MODE_X), + _lk(&_opCtx, DatabaseName::createDatabaseName_forTest(boost::none, "unittests"), MODE_X), _ctx(&_opCtx, nss()) {} private: @@ -1928,7 +1980,7 @@ class QueryReadsAll : public CollectionBase { { // With five results and a batch size of 5, a cursor is created since we don't know // there are no more results. - FindCommandRequest findRequest{NamespaceString{ns()}}; + FindCommandRequest findRequest{NamespaceString::createNamespaceString_forTest(ns())}; findRequest.setBatchSize(5); std::unique_ptr c = _client.find(std::move(findRequest)); ASSERT(c->more()); @@ -1942,7 +1994,7 @@ class QueryReadsAll : public CollectionBase { { // With a batchsize of 6 we know there are no more results so we don't create a // cursor. - FindCommandRequest findRequest{NamespaceString{ns()}}; + FindCommandRequest findRequest{NamespaceString::createNamespaceString_forTest(ns())}; findRequest.setBatchSize(6); std::unique_ptr c = _client.find(std::move(findRequest)); ASSERT(c->more()); diff --git a/src/mongo/dbtests/replica_set_tests.cpp b/src/mongo/dbtests/replica_set_tests.cpp index 18323df59478b..e48251cd58949 100644 --- a/src/mongo/dbtests/replica_set_tests.cpp +++ b/src/mongo/dbtests/replica_set_tests.cpp @@ -27,19 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/drop_pending_collection_reaper.h" #include "mongo/db/repl/last_vote.h" #include "mongo/db/repl/replication_consistency_markers_impl.h" #include "mongo/db/repl/replication_coordinator_external_state_impl.h" #include "mongo/db/repl/replication_process.h" #include "mongo/db/repl/replication_recovery.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" #include "mongo/db/service_context.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/concurrency/admission_context.h" namespace mongo { namespace { diff --git a/src/mongo/dbtests/repltests.cpp b/src/mongo/dbtests/repltests.cpp index c0746151e8613..0a58b8be4b5dc 100644 --- a/src/mongo/dbtests/repltests.cpp +++ b/src/mongo/dbtests/repltests.cpp @@ -27,26 +27,83 @@ * it in the license file. */ -#include "mongo/bson/mutable/document.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/mutable_bson_test_utils.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" #include "mongo/db/concurrency/exception_util.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/json.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/oplog_writer_impl.h" -#include "mongo/db/ops/update.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/apply_ops_command_info.h" +#include "mongo/db/repl/member_state.h" #include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/oplog_applier_impl.h" +#include "mongo/db/repl/oplog_applier_utils.h" +#include "mongo/db/repl/oplog_entry.h" +#include "mongo/db/repl/oplog_entry_gen.h" +#include "mongo/db/repl/oplog_entry_or_grouped_inserts.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/storage/storage_parameters_gen.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/shard_role.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" #include "mongo/transport/asio/asio_transport_layer.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -169,7 +226,7 @@ class Base { return "unittests.repltests"; } static NamespaceString nss() { - return NamespaceString(ns()); + return NamespaceString::createNamespaceString_forTest(ns()); } static const char* cllNS() { return "local.oplog.rs"; @@ -210,7 +267,7 @@ class Base { } int opCount() { return DBDirectClient(&_opCtx) - .find(FindCommandRequest{NamespaceString{cllNS()}}) + .find(FindCommandRequest{NamespaceString::createNamespaceString_forTest(cllNS())}) ->itcount(); } void applyAllOperations() { @@ -218,7 +275,8 @@ class Base { std::vector ops; { DBDirectClient db(&_opCtx); - auto cursor = db.find(FindCommandRequest{NamespaceString{cllNS()}}); + auto cursor = db.find( + FindCommandRequest{NamespaceString::createNamespaceString_forTest(cllNS())}); while (cursor->more()) { ops.push_back(cursor->nextSafe()); } @@ -240,9 +298,22 @@ class Base { // Handle the case of batched writes which generate command-type (applyOps) oplog // entries. if (entry.getOpType() == repl::OpTypeEnum::kCommand) { - uassertStatusOK(applyCommand_inlock( - &_opCtx, ApplierOperation{&entry}, getOplogApplicationMode())); + std::vector ops; + auto stmts = ApplyOps::extractOperations(entry); + for (auto& stmt : stmts) { + ops.push_back(ApplierOperation(&stmt)); + } + _opCtx.releaseAndReplaceRecoveryUnit(); + uassertStatusOK( + OplogApplierUtils::applyOplogBatchCommon(&_opCtx, + &ops, + getOplogApplicationMode(), + true, + true, + &applyOplogEntryOrGroupedInserts)); } else { + auto coll = acquireCollection( + &_opCtx, {nss(), {}, {}, AcquisitionPrerequisites::kWrite}, MODE_IX); WriteUnitOfWork wunit(&_opCtx); auto lastApplied = repl::ReplicationCoordinator::get(_opCtx.getServiceContext()) ->getMyLastAppliedOpTime() @@ -251,7 +322,7 @@ class Base { ASSERT_OK(_opCtx.recoveryUnit()->setTimestamp(nextTimestamp)); const bool dataIsConsistent = true; uassertStatusOK(applyOperation_inlock(&_opCtx, - ctx.db(), + coll, ApplierOperation{&entry}, false, getOplogApplicationMode(), @@ -262,8 +333,8 @@ class Base { } // These deletes don't get logged. void deleteAll(const char* ns) const { - ::mongo::writeConflictRetry(&_opCtx, "deleteAll", ns, [&] { - NamespaceString nss(ns); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(ns); + ::mongo::writeConflictRetry(&_opCtx, "deleteAll", nss, [&] { Lock::GlobalWrite lk(&_opCtx); OldClientContext ctx(&_opCtx, nss); WriteUnitOfWork wunit(&_opCtx); @@ -338,15 +409,6 @@ class Base { b.appendElements(fromjson(json)); return b.obj(); } - -private: - // Disable batched deletes. We use batched writes for the delete in the Remove() case which - // tries to group two deletes in one applyOps. It is illegal to batch writes outside a WUOW, - // so we disable batching for this test. - // TODO SERVER-69316: When featureFlagBatchMultiDeletes is removed, we want the Remove() test - // to issue two different applyOps deletes, or wrap the applyOps in a WUOW. - RAIIServerParameterControllerForTest _featureFlagController{"featureFlagBatchMultiDeletes", - false}; }; @@ -794,7 +856,7 @@ class MultiInc : public Recovering { public: std::string s() const { StringBuilder ss; - FindCommandRequest findRequest{NamespaceString{ns()}}; + FindCommandRequest findRequest{NamespaceString::createNamespaceString_forTest(ns())}; findRequest.setSort(BSON("_id" << 1)); std::unique_ptr cc = _client.find(std::move(findRequest)); bool first = true; diff --git a/src/mongo/dbtests/rollbacktests.cpp b/src/mongo/dbtests/rollbacktests.cpp index 70aee1d3d1ae4..0e9c23a9e4f8e 100644 --- a/src/mongo/dbtests/rollbacktests.cpp +++ b/src/mongo/dbtests/rollbacktests.cpp @@ -27,17 +27,51 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/drop_collection.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/rename_collection.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_descriptor.h" -#include "mongo/db/record_id.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/uuid.h" namespace mongo { namespace RollbackTests { @@ -57,9 +91,9 @@ void dropDatabase(OperationContext* opCtx, const NamespaceString& nss) { } } -bool collectionExists(OperationContext* opCtx, OldClientContext* ctx, const std::string& ns) { - return (bool)CollectionCatalog::get(opCtx)->lookupCollectionByNamespace(opCtx, - NamespaceString(ns)); +bool collectionExists(OperationContext* opCtx, OldClientContext* ctx, StringData ns) { + return (bool)CollectionCatalog::get(opCtx)->lookupCollectionByNamespace( + opCtx, NamespaceString::createNamespaceString_forTest(ns)); } void createCollection(OperationContext* opCtx, const NamespaceString& nss) { @@ -67,10 +101,10 @@ void createCollection(OperationContext* opCtx, const NamespaceString& nss) { OldClientContext ctx(opCtx, nss); { WriteUnitOfWork uow(opCtx); - ASSERT(!collectionExists(opCtx, &ctx, nss.ns())); + ASSERT(!collectionExists(opCtx, &ctx, nss.ns_forTest())); CollectionOptions defaultCollectionOptions; ASSERT_OK(ctx.db()->userCreateNS(opCtx, nss, defaultCollectionOptions, false)); - ASSERT(collectionExists(opCtx, &ctx, nss.ns())); + ASSERT(collectionExists(opCtx, &ctx, nss.ns_forTest())); uow.commit(); } } @@ -137,9 +171,9 @@ size_t getNumIndexEntries(OperationContext* opCtx, if (desc) { auto iam = catalog->getEntry(desc)->accessMethod()->asSortedData(); auto cursor = iam->newCursor(opCtx); - KeyString::Builder keyString(iam->getSortedDataInterface()->getKeyStringVersion(), - BSONObj(), - iam->getSortedDataInterface()->getOrdering()); + key_string::Builder keyString(iam->getSortedDataInterface()->getKeyStringVersion(), + BSONObj(), + iam->getSortedDataInterface()->getOrdering()); for (auto kv = cursor->seek(keyString.getValueCopy()); kv; kv = cursor->next()) { numEntries++; } @@ -150,11 +184,12 @@ size_t getNumIndexEntries(OperationContext* opCtx, void dropIndex(OperationContext* opCtx, const NamespaceString& nss, const std::string& idxName) { CollectionWriter coll(opCtx, nss); - auto desc = - coll.getWritableCollection(opCtx)->getIndexCatalog()->findIndexByName(opCtx, idxName); - ASSERT(desc); - ASSERT_OK(coll.getWritableCollection(opCtx)->getIndexCatalog()->dropIndex( - opCtx, coll.getWritableCollection(opCtx), desc)); + auto writableEntry = + coll.getWritableCollection(opCtx)->getIndexCatalog()->getWritableEntryByName(opCtx, + idxName); + ASSERT(writableEntry); + ASSERT_OK(coll.getWritableCollection(opCtx)->getIndexCatalog()->dropIndexEntry( + opCtx, coll.getWritableCollection(opCtx), writableEntry)); } } // namespace @@ -171,7 +206,7 @@ class CreateCollection { std::string ns = "unittests.rollback_create_collection"; const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; - NamespaceString nss(ns); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(ns); dropDatabase(&opCtx, nss); Lock::DBLock dbXLock(&opCtx, nss.dbName(), MODE_X); @@ -208,7 +243,7 @@ class DropCollection { std::string ns = "unittests.rollback_drop_collection"; const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; - NamespaceString nss(ns); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(ns); dropDatabase(&opCtx, nss); Lock::DBLock dbXLock(&opCtx, nss.dbName(), MODE_X); @@ -252,8 +287,10 @@ class RenameCollection { return; } - NamespaceString source("unittests.rollback_rename_collection_src"); - NamespaceString target("unittests.rollback_rename_collection_dest"); + NamespaceString source = NamespaceString::createNamespaceString_forTest( + "unittests.rollback_rename_collection_src"); + NamespaceString target = NamespaceString::createNamespaceString_forTest( + "unittests.rollback_rename_collection_dest"); const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; @@ -265,34 +302,34 @@ class RenameCollection { { WriteUnitOfWork uow(&opCtx); - ASSERT(!collectionExists(&opCtx, &ctx, source.ns())); - ASSERT(!collectionExists(&opCtx, &ctx, target.ns())); + ASSERT(!collectionExists(&opCtx, &ctx, source.ns_forTest())); + ASSERT(!collectionExists(&opCtx, &ctx, target.ns_forTest())); auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj(); CollectionOptions collectionOptions = assertGet(CollectionOptions::parse(options, CollectionOptions::parseForCommand)); ASSERT_OK(ctx.db()->userCreateNS(&opCtx, source, collectionOptions, defaultIndexes)); uow.commit(); } - ASSERT(collectionExists(&opCtx, &ctx, source.ns())); - ASSERT(!collectionExists(&opCtx, &ctx, target.ns())); + ASSERT(collectionExists(&opCtx, &ctx, source.ns_forTest())); + ASSERT(!collectionExists(&opCtx, &ctx, target.ns_forTest())); // END OF SETUP / START OF TEST { WriteUnitOfWork uow(&opCtx); ASSERT_OK(renameCollection(&opCtx, source, target)); - ASSERT(!collectionExists(&opCtx, &ctx, source.ns())); - ASSERT(collectionExists(&opCtx, &ctx, target.ns())); + ASSERT(!collectionExists(&opCtx, &ctx, source.ns_forTest())); + ASSERT(collectionExists(&opCtx, &ctx, target.ns_forTest())); if (!rollback) { uow.commit(); } } if (rollback) { - ASSERT(collectionExists(&opCtx, &ctx, source.ns())); - ASSERT(!collectionExists(&opCtx, &ctx, target.ns())); + ASSERT(collectionExists(&opCtx, &ctx, source.ns_forTest())); + ASSERT(!collectionExists(&opCtx, &ctx, target.ns_forTest())); } else { - ASSERT(!collectionExists(&opCtx, &ctx, source.ns())); - ASSERT(collectionExists(&opCtx, &ctx, target.ns())); + ASSERT(!collectionExists(&opCtx, &ctx, source.ns_forTest())); + ASSERT(collectionExists(&opCtx, &ctx, target.ns_forTest())); } } }; @@ -306,8 +343,10 @@ class RenameDropTargetCollection { return; } - NamespaceString source("unittests.rollback_rename_droptarget_collection_src"); - NamespaceString target("unittests.rollback_rename_droptarget_collection_dest"); + NamespaceString source = NamespaceString::createNamespaceString_forTest( + "unittests.rollback_rename_droptarget_collection_src"); + NamespaceString target = NamespaceString::createNamespaceString_forTest( + "unittests.rollback_rename_droptarget_collection_dest"); const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; @@ -324,8 +363,8 @@ class RenameDropTargetCollection { { WriteUnitOfWork uow(&opCtx); - ASSERT(!collectionExists(&opCtx, &ctx, source.ns())); - ASSERT(!collectionExists(&opCtx, &ctx, target.ns())); + ASSERT(!collectionExists(&opCtx, &ctx, source.ns_forTest())); + ASSERT(!collectionExists(&opCtx, &ctx, target.ns_forTest())); auto options = capped ? BSON("capped" << true << "size" << 1000) : BSONObj(); CollectionOptions collectionOptions = assertGet(CollectionOptions::parse(options, CollectionOptions::parseForCommand)); @@ -338,8 +377,8 @@ class RenameDropTargetCollection { uow.commit(); } - ASSERT(collectionExists(&opCtx, &ctx, source.ns())); - ASSERT(collectionExists(&opCtx, &ctx, target.ns())); + ASSERT(collectionExists(&opCtx, &ctx, source.ns_forTest())); + ASSERT(collectionExists(&opCtx, &ctx, target.ns_forTest())); assertOnlyRecord(&opCtx, source, sourceDoc); assertOnlyRecord(&opCtx, target, targetDoc); @@ -353,21 +392,21 @@ class RenameDropTargetCollection { {}, DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops)); ASSERT_OK(renameCollection(&opCtx, source, target)); - ASSERT(!collectionExists(&opCtx, &ctx, source.ns())); - ASSERT(collectionExists(&opCtx, &ctx, target.ns())); + ASSERT(!collectionExists(&opCtx, &ctx, source.ns_forTest())); + ASSERT(collectionExists(&opCtx, &ctx, target.ns_forTest())); assertOnlyRecord(&opCtx, target, sourceDoc); if (!rollback) { uow.commit(); } } if (rollback) { - ASSERT(collectionExists(&opCtx, &ctx, source.ns())); - ASSERT(collectionExists(&opCtx, &ctx, target.ns())); + ASSERT(collectionExists(&opCtx, &ctx, source.ns_forTest())); + ASSERT(collectionExists(&opCtx, &ctx, target.ns_forTest())); assertOnlyRecord(&opCtx, source, sourceDoc); assertOnlyRecord(&opCtx, target, targetDoc); } else { - ASSERT(!collectionExists(&opCtx, &ctx, source.ns())); - ASSERT(collectionExists(&opCtx, &ctx, target.ns())); + ASSERT(!collectionExists(&opCtx, &ctx, source.ns_forTest())); + ASSERT(collectionExists(&opCtx, &ctx, target.ns_forTest())); assertOnlyRecord(&opCtx, target, sourceDoc); } } @@ -377,7 +416,8 @@ template class ReplaceCollection { public: void run() { - NamespaceString nss("unittests.rollback_replace_collection"); + NamespaceString nss = + NamespaceString::createNamespaceString_forTest("unittests.rollback_replace_collection"); const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; dropDatabase(&opCtx, nss); @@ -392,14 +432,14 @@ class ReplaceCollection { { WriteUnitOfWork uow(&opCtx); - ASSERT(!collectionExists(&opCtx, &ctx, nss.ns())); + ASSERT(!collectionExists(&opCtx, &ctx, nss.ns_forTest())); CollectionOptions collectionOptions = assertGet(CollectionOptions::parse(BSONObj(), CollectionOptions::parseForCommand)); ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes)); insertRecord(&opCtx, nss, oldDoc); uow.commit(); } - ASSERT(collectionExists(&opCtx, &ctx, nss.ns())); + ASSERT(collectionExists(&opCtx, &ctx, nss.ns_forTest())); assertOnlyRecord(&opCtx, nss, oldDoc); // END OF SETUP / START OF TEST @@ -411,18 +451,18 @@ class ReplaceCollection { nss, {}, DropCollectionSystemCollectionMode::kDisallowSystemCollectionDrops)); - ASSERT(!collectionExists(&opCtx, &ctx, nss.ns())); + ASSERT(!collectionExists(&opCtx, &ctx, nss.ns_forTest())); CollectionOptions collectionOptions = assertGet(CollectionOptions::parse(BSONObj(), CollectionOptions::parseForCommand)); ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes)); - ASSERT(collectionExists(&opCtx, &ctx, nss.ns())); + ASSERT(collectionExists(&opCtx, &ctx, nss.ns_forTest())); insertRecord(&opCtx, nss, newDoc); assertOnlyRecord(&opCtx, nss, newDoc); if (!rollback) { uow.commit(); } } - ASSERT(collectionExists(&opCtx, &ctx, nss.ns())); + ASSERT(collectionExists(&opCtx, &ctx, nss.ns_forTest())); if (rollback) { assertOnlyRecord(&opCtx, nss, oldDoc); } else { @@ -435,7 +475,8 @@ template class TruncateCollection { public: void run() { - NamespaceString nss("unittests.rollback_truncate_collection"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest( + "unittests.rollback_truncate_collection"); const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; dropDatabase(&opCtx, nss); @@ -446,14 +487,14 @@ class TruncateCollection { BSONObj doc = BSON("_id" << "foo"); - ASSERT(!collectionExists(&opCtx, &ctx, nss.ns())); + ASSERT(!collectionExists(&opCtx, &ctx, nss.ns_forTest())); { WriteUnitOfWork uow(&opCtx); CollectionOptions collectionOptions = assertGet(CollectionOptions::parse(BSONObj(), CollectionOptions::parseForCommand)); ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, defaultIndexes)); - ASSERT(collectionExists(&opCtx, &ctx, nss.ns())); + ASSERT(collectionExists(&opCtx, &ctx, nss.ns_forTest())); insertRecord(&opCtx, nss, doc); assertOnlyRecord(&opCtx, nss, doc); uow.commit(); @@ -466,14 +507,14 @@ class TruncateCollection { WriteUnitOfWork uow(&opCtx); ASSERT_OK(truncateCollection(&opCtx, nss)); - ASSERT(collectionExists(&opCtx, &ctx, nss.ns())); + ASSERT(collectionExists(&opCtx, &ctx, nss.ns_forTest())); assertEmpty(&opCtx, nss); if (!rollback) { uow.commit(); } } - ASSERT(collectionExists(&opCtx, &ctx, nss.ns())); + ASSERT(collectionExists(&opCtx, &ctx, nss.ns_forTest())); if (rollback) { assertOnlyRecord(&opCtx, nss, doc); } else { @@ -489,7 +530,7 @@ class CreateIndex { std::string ns = "unittests.rollback_create_index"; const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; - NamespaceString nss(ns); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(ns); dropDatabase(&opCtx, nss); createCollection(&opCtx, nss); @@ -531,7 +572,7 @@ class DropIndex { std::string ns = "unittests.rollback_drop_index"; const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; - NamespaceString nss(ns); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(ns); dropDatabase(&opCtx, nss); createCollection(&opCtx, nss); @@ -585,7 +626,7 @@ class CreateDropIndex { std::string ns = "unittests.rollback_create_drop_index"; const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; - NamespaceString nss(ns); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(ns); dropDatabase(&opCtx, nss); createCollection(&opCtx, nss); @@ -629,7 +670,7 @@ class CreateCollectionAndIndexes { std::string ns = "unittests.rollback_create_collection_and_indexes"; const ServiceContext::UniqueOperationContext opCtxPtr = cc().makeOperationContext(); OperationContext& opCtx = *opCtxPtr; - NamespaceString nss(ns); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(ns); dropDatabase(&opCtx, nss); Lock::DBLock dbXLock(&opCtx, nss.dbName(), MODE_X); @@ -649,11 +690,11 @@ class CreateCollectionAndIndexes { { WriteUnitOfWork uow(&opCtx); - ASSERT(!collectionExists(&opCtx, &ctx, nss.ns())); + ASSERT(!collectionExists(&opCtx, &ctx, nss.ns_forTest())); CollectionOptions collectionOptions = assertGet(CollectionOptions::parse(BSONObj(), CollectionOptions::parseForCommand)); ASSERT_OK(ctx.db()->userCreateNS(&opCtx, nss, collectionOptions, false)); - ASSERT(collectionExists(&opCtx, &ctx, nss.ns())); + ASSERT(collectionExists(&opCtx, &ctx, nss.ns_forTest())); CollectionWriter coll(&opCtx, nss); auto writableColl = coll.getWritableCollection(&opCtx); IndexCatalog* catalog = writableColl->getIndexCatalog(); diff --git a/src/mongo/dbtests/socktests.cpp b/src/mongo/dbtests/socktests.cpp index 1f06dc7109740..e2c22e324b8d1 100644 --- a/src/mongo/dbtests/socktests.cpp +++ b/src/mongo/dbtests/socktests.cpp @@ -31,10 +31,13 @@ * sock.{h,cpp} unit tests. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/repl/isself.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/base/string_data.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/net/socket_utils.h" diff --git a/src/mongo/dbtests/storage_debug_util.cpp b/src/mongo/dbtests/storage_debug_util.cpp index 89f5ca4858205..a8a59a2b25d06 100644 --- a/src/mongo/dbtests/storage_debug_util.cpp +++ b/src/mongo/dbtests/storage_debug_util.cpp @@ -28,15 +28,36 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/dbtests/storage_debug_util.h" - +#include + +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/ordering.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/validate_results.h" -#include "mongo/db/db_raii.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/storage/index_entry_comparison.h" #include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/dbtests/storage_debug_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -80,10 +101,10 @@ void printCollectionAndIndexTableEntries(OperationContext* opCtx, const Namespac auto indexCursor = iam->newCursor(opCtx, /*forward*/ true); const BSONObj& keyPattern = indexDescriptor->keyPattern(); - const KeyString::Version version = iam->getSortedDataInterface()->getKeyStringVersion(); + const key_string::Version version = iam->getSortedDataInterface()->getKeyStringVersion(); const auto ordering = Ordering::make(keyPattern); - KeyString::Builder firstKeyString( - version, BSONObj(), ordering, KeyString::Discriminator::kExclusiveBefore); + key_string::Builder firstKeyString( + version, BSONObj(), ordering, key_string::Discriminator::kExclusiveBefore); LOGV2(51810, "[Debugging] {keyPattern_str} index table entries:", @@ -92,15 +113,15 @@ void printCollectionAndIndexTableEntries(OperationContext* opCtx, const Namespac for (auto keyStringEntry = indexCursor->seekForKeyString(firstKeyString.getValueCopy()); keyStringEntry; keyStringEntry = indexCursor->nextKeyString()) { - auto keyString = KeyString::toBsonSafe(keyStringEntry->keyString.getBuffer(), - keyStringEntry->keyString.getSize(), - ordering, - keyStringEntry->keyString.getTypeBits()); - KeyString::logKeyString(keyStringEntry->loc, - keyStringEntry->keyString, - keyPattern, - keyString, - "[Debugging](index)"); + auto keyString = key_string::toBsonSafe(keyStringEntry->keyString.getBuffer(), + keyStringEntry->keyString.getSize(), + ordering, + keyStringEntry->keyString.getTypeBits()); + key_string::logKeyString(keyStringEntry->loc, + keyStringEntry->keyString, + keyPattern, + keyString, + "[Debugging](index)"); } } } diff --git a/src/mongo/dbtests/threadedtests.cpp b/src/mongo/dbtests/threadedtests.cpp index 9486227a1ccfd..fd990a27850b7 100644 --- a/src/mongo/dbtests/threadedtests.cpp +++ b/src/mongo/dbtests/threadedtests.cpp @@ -28,23 +28,37 @@ */ -#include "mongo/platform/basic.h" - #include -#include +#include #include - -#include "mongo/config.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/client.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/atomic_word.h" -#include "mongo/platform/bits.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/admission_context.h" +#include "mongo/util/concurrency/mutex.h" #include "mongo/util/concurrency/priority_ticketholder.h" #include "mongo/util/concurrency/semaphore_ticketholder.h" #include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/concurrency/ticketholder.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -79,7 +93,7 @@ class ThreadedTest { if (!remaining) return; - stdx::thread athread([=] { subthread(remaining); }); + stdx::thread athread([=, this] { subthread(remaining); }); launch_subthreads(remaining - 1); athread.join(); } @@ -134,7 +148,7 @@ class ThreadPoolTest { tp.startup(); for (unsigned i = 0; i < iterations; i++) { - tp.schedule([=](auto status) { + tp.schedule([=, this](auto status) { ASSERT_OK(status); increment(2); }); @@ -264,7 +278,7 @@ class TicketHolderWaits : public ThreadedTest<10> { void checkIn() { stdx::lock_guard lk(_frontDesk); _checkedIn++; - verify(_checkedIn <= _nRooms); + MONGO_verify(_checkedIn <= _nRooms); if (_checkedIn > _maxRooms) _maxRooms = _checkedIn; } @@ -272,7 +286,7 @@ class TicketHolderWaits : public ThreadedTest<10> { void checkOut() { stdx::lock_guard lk(_frontDesk); _checkedIn--; - verify(_checkedIn >= 0); + MONGO_verify(_checkedIn >= 0); } Mutex _frontDesk = MONGO_MAKE_LATCH("Hotel::_frontDesk"); @@ -313,7 +327,7 @@ class TicketHolderWaits : public ThreadedTest<10> { virtual void validate() { // This should always be true, assuming that it takes < 1 sec for the hardware to process a // check-out/check-in Time for test is then ~ #threads / _nRooms * 2 seconds - verify(_hotel._maxRooms == _hotel._nRooms); + MONGO_verify(_hotel._maxRooms == _hotel._nRooms); } protected: diff --git a/src/mongo/dbtests/updatetests.cpp b/src/mongo/dbtests/updatetests.cpp index 6222643c54943..b76224872b61c 100644 --- a/src/mongo/dbtests/updatetests.cpp +++ b/src/mongo/dbtests/updatetests.cpp @@ -31,19 +31,35 @@ * unit tests relating to update requests */ -#include "mongo/platform/basic.h" - +#include #include - +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" #include "mongo/bson/mutable/mutable_bson_test_utils.h" #include "mongo/client/dbclient_cursor.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/client.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/json.h" -#include "mongo/db/ops/update.h" -#include "mongo/dbtests/dbtests.h" -#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/service_context.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace UpdateTests { @@ -159,7 +175,7 @@ class SetBase : public ClientBase { } NamespaceString nss() const { - return NamespaceString{ns()}; + return NamespaceString::createNamespaceString_forTest(ns()); } }; @@ -425,7 +441,7 @@ class MultiInc : public SetBase { public: string s() { stringstream ss; - FindCommandRequest findRequest{NamespaceString{ns()}}; + FindCommandRequest findRequest{NamespaceString::createNamespaceString_forTest(ns())}; findRequest.setSort(BSON("_id" << 1)); std::unique_ptr cc = _client.find(std::move(findRequest)); bool first = true; @@ -963,7 +979,7 @@ class PushSortBase : public ClientBase { } NamespaceString nss() const { - return NamespaceString{ns()}; + return NamespaceString::createNamespaceString_forTest(ns()); } void setParams(const BSONArray& fields, @@ -1852,7 +1868,7 @@ class Base : public ClientBase { virtual const char* ns() = 0; NamespaceString nss() { - return NamespaceString(ns()); + return NamespaceString::createNamespaceString_forTest(ns()); }; virtual void dotest() = 0; @@ -1866,7 +1882,8 @@ class Base : public ClientBase { } BSONObj findOne() { - return _client.findOne(NamespaceString{ns()}, BSONObj{} /*filter*/); + return _client.findOne(NamespaceString::createNamespaceString_forTest(ns()), + BSONObj{} /*filter*/); } void test(const char* initial, const char* mod, const char* after) { diff --git a/src/mongo/dbtests/validate_tests.cpp b/src/mongo/dbtests/validate_tests.cpp index fac9153c7895a..96055766d3bf9 100644 --- a/src/mongo/dbtests/validate_tests.cpp +++ b/src/mongo/dbtests/validate_tests.cpp @@ -27,21 +27,81 @@ * it in the license file. */ +#include +#include +#include +// IWYU pragma: no_include "boost/intrusive/detail/iterator.hpp" +// IWYU pragma: no_include "boost/move/algo/detail/set_difference.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/data_view.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/clustered_collection_options_gen.h" #include "mongo/db/catalog/clustered_collection_util.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" #include "mongo/db/catalog/collection_validation.h" #include "mongo/db/catalog/collection_write_path.h" +#include "mongo/db/catalog/database.h" #include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" +#include "mongo/db/catalog/validate_results.h" +#include "mongo/db/catalog_raii.h" #include "mongo/db/client.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/curop.h" #include "mongo/db/db_raii.h" #include "mongo/db/index/index_access_method.h" #include "mongo/db/index/index_build_interceptor.h" #include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/record_id.h" #include "mongo/db/record_id_helpers.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/bson_collection_catalog_entry.h" #include "mongo/db/storage/durable_catalog.h" -#include "mongo/db/storage/execution_context.h" -#include "mongo/dbtests/dbtests.h" +#include "mongo/db/storage/durable_catalog_entry.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/snapshot.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/dbtests/dbtests.h" // IWYU pragma: keep #include "mongo/dbtests/storage_debug_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/shared_buffer_fragment.h" +#include "mongo/util/uuid.h" namespace mongo { namespace ValidateTests { @@ -79,7 +139,11 @@ static const char* const _ns = "unittests.validate_tests"; class ValidateBase { public: explicit ValidateBase(bool full, bool background, bool clustered) - : _full(full), _background(background), _nss(_ns), _autoDb(nullptr), _db(nullptr) { + : _full(full), + _background(background), + _nss(NamespaceString::createNamespaceString_forTest(_ns)), + _autoDb(nullptr), + _db(nullptr) { CollectionOptions options; if (clustered) { @@ -90,11 +154,11 @@ class ValidateBase { AutoGetCollection autoColl(&_opCtx, _nss, MODE_IX); auto db = autoColl.ensureDbExists(&_opCtx); - ASSERT_TRUE(db) << _nss; + ASSERT_TRUE(db) << _nss.toStringForErrorMsg(); WriteUnitOfWork wuow(&_opCtx); auto coll = db->createCollection(&_opCtx, _nss, options, createIdIndex); - ASSERT_TRUE(coll) << _nss; + ASSERT_TRUE(coll) << _nss.toStringForErrorMsg(); wuow.commit(); _engineSupportsCheckpoints = @@ -308,12 +372,11 @@ class ValidateSecondaryIndexCount : public ValidateBase { } auto status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << "a" << "key" << BSON("a" << 1) << "v" - << static_cast(kIndexVersion) - << "background" << false)); + << static_cast(kIndexVersion))); ASSERT_OK(status); releaseDb(); @@ -378,12 +441,11 @@ class ValidateSecondaryIndex : public ValidateBase { } auto status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << "a" << "key" << BSON("a" << 1) << "v" - << static_cast(kIndexVersion) - << "background" << false)); + << static_cast(kIndexVersion))); ASSERT_OK(status); releaseDb(); @@ -531,12 +593,11 @@ class ValidateMultiKeyIndex : public ValidateBase { // Create multi-key index. auto status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << "multikey_index" << "key" << BSON("a.b" << 1) << "v" - << static_cast(kIndexVersion) - << "background" << false)); + << static_cast(kIndexVersion))); ASSERT_OK(status); releaseDb(); @@ -604,7 +665,7 @@ class ValidateSparseIndex : public ValidateBase { // Create a sparse index. auto status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << "sparse_index" << "key" << BSON("a" << 1) << "v" @@ -671,7 +732,7 @@ class ValidatePartialIndex : public ValidateBase { // Create a partial index. auto status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << "partial_index" << "key" << BSON("a" << 1) << "v" @@ -731,30 +792,28 @@ class ValidatePartialIndexOnCollectionWithNonIndexableFields : public ValidateBa } // Create a partial geo index that indexes the document. This should return an error. - ASSERT_NOT_OK( - dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), - BSON("name" - << "partial_index" - << "key" - << BSON("x" - << "2dsphere") - << "v" << static_cast(kIndexVersion) - << "background" << false << "partialFilterExpression" - << BSON("a" << BSON("$eq" << 2))))); + ASSERT_NOT_OK(dbtests::createIndexFromSpec(&_opCtx, + coll()->ns().ns_forTest(), + BSON("name" + << "partial_index" + << "key" + << BSON("x" + << "2dsphere") + << "v" << static_cast(kIndexVersion) + << "partialFilterExpression" + << BSON("a" << BSON("$eq" << 2))))); // Create a partial geo index that does not index the document. - auto status = - dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), - BSON("name" - << "partial_index" - << "key" - << BSON("x" - << "2dsphere") - << "v" << static_cast(kIndexVersion) - << "background" << false << "partialFilterExpression" - << BSON("a" << BSON("$eq" << 1)))); + auto status = dbtests::createIndexFromSpec(&_opCtx, + coll()->ns().ns_forTest(), + BSON("name" + << "partial_index" + << "key" + << BSON("x" + << "2dsphere") + << "v" << static_cast(kIndexVersion) + << "partialFilterExpression" + << BSON("a" << BSON("$eq" << 1)))); ASSERT_OK(status); releaseDb(); ensureValidateWorked(); @@ -808,21 +867,19 @@ class ValidateCompoundIndex : public ValidateBase { // Create two compound indexes, one forward and one reverse, to test // validate()'s index direction parsing. auto status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << "compound_index_1" << "key" << BSON("a" << 1 << "b" << -1) - << "v" << static_cast(kIndexVersion) - << "background" << false)); + << "v" << static_cast(kIndexVersion))); ASSERT_OK(status); status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << "compound_index_2" << "key" << BSON("a" << -1 << "b" << 1) << "v" - << static_cast(kIndexVersion) - << "background" << false)); + << static_cast(kIndexVersion))); ASSERT_OK(status); releaseDb(); @@ -857,7 +914,7 @@ class ValidateIndexEntry : public ValidateBase { } SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // Create a new collection, insert three records and check it's valid. lockDb(MODE_X); @@ -880,11 +937,11 @@ class ValidateIndexEntry : public ValidateBase { } const std::string indexName = "bad_index"; - auto status = dbtests::createIndexFromSpec( - &_opCtx, - coll()->ns().ns(), - BSON("name" << indexName << "key" << BSON("a" << 1) << "v" - << static_cast(kIndexVersion) << "background" << false)); + auto status = + dbtests::createIndexFromSpec(&_opCtx, + coll()->ns().ns_forTest(), + BSON("name" << indexName << "key" << BSON("a" << 1) << "v" + << static_cast(kIndexVersion))); ASSERT_OK(status); releaseDb(); @@ -910,6 +967,7 @@ class ValidateIndexEntry : public ValidateBase { iam->getKeys( &_opCtx, coll(), + descriptor->getEntry(), pooledBuilder, actualKey, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered, @@ -919,11 +977,12 @@ class ValidateIndexEntry : public ValidateBase { nullptr, id1); - auto removeStatus = - iam->removeKeys(&_opCtx, {keys.begin(), keys.end()}, options, &numDeleted); + auto removeStatus = iam->removeKeys( + &_opCtx, descriptor->getEntry(), {keys.begin(), keys.end()}, options, &numDeleted); auto insertStatus = iam->insert(&_opCtx, pooledBuilder, coll(), + descriptor->getEntry(), {{id1, Timestamp(), &badKey}}, options, &numInserted); @@ -945,7 +1004,7 @@ class ValidateIndexMetadata : public ValidateBase { void run() { SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // Create an index with bad index specs. lockDb(MODE_X); @@ -953,7 +1012,7 @@ class ValidateIndexMetadata : public ValidateBase { const std::string indexName = "bad_specs_index"; auto status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexName << "key" << BSON("a" << 1) << "v" << static_cast(kIndexVersion) << "sparse" << "false")); @@ -989,11 +1048,11 @@ class ValidateWildCardIndex : public ValidateBase { // Create a $** index. const auto indexName = "wildcardIndex"; const auto indexKey = BSON("$**" << 1); - auto status = dbtests::createIndexFromSpec( - &_opCtx, - coll()->ns().ns(), - BSON("name" << indexName << "key" << indexKey << "v" << static_cast(kIndexVersion) - << "background" << false)); + auto status = + dbtests::createIndexFromSpec(&_opCtx, + coll()->ns().ns_forTest(), + BSON("name" << indexName << "key" << indexKey << "v" + << static_cast(kIndexVersion))); ASSERT_OK(status); // Insert non-multikey documents. @@ -1049,12 +1108,12 @@ class ValidateWildCardIndex : public ValidateBase { auto sortedDataInterface = accessMethod->getSortedDataInterface(); { WriteUnitOfWork wunit(&_opCtx); - const KeyString::Value indexKey = - KeyString::HeapBuilder(sortedDataInterface->getKeyStringVersion(), - BSON("" << 1 << "" - << "non_existent_path"), - sortedDataInterface->getOrdering(), - recordId) + const key_string::Value indexKey = + key_string::HeapBuilder(sortedDataInterface->getKeyStringVersion(), + BSON("" << 1 << "" + << "non_existent_path"), + sortedDataInterface->getOrdering(), + recordId) .release(); auto insertStatus = sortedDataInterface->insert(&_opCtx, indexKey, true /* dupsAllowed */); @@ -1072,12 +1131,12 @@ class ValidateWildCardIndex : public ValidateBase { lockDb(MODE_X); { WriteUnitOfWork wunit(&_opCtx); - const KeyString::Value indexKey = - KeyString::HeapBuilder(sortedDataInterface->getKeyStringVersion(), - BSON("" << 1 << "" - << "mk_1"), - sortedDataInterface->getOrdering(), - recordId) + const key_string::Value indexKey = + key_string::HeapBuilder(sortedDataInterface->getKeyStringVersion(), + BSON("" << 1 << "" + << "mk_1"), + sortedDataInterface->getOrdering(), + recordId) .release(); sortedDataInterface->unindex(&_opCtx, indexKey, true /* dupsAllowed */); wunit.commit(); @@ -1115,11 +1174,11 @@ class ValidateWildCardIndexWithProjection : public ValidateBase { // Create a $** index with a projection on "a". const auto indexName = "wildcardIndex"; const auto indexKey = BSON("a.$**" << 1); - auto status = dbtests::createIndexFromSpec( - &_opCtx, - coll()->ns().ns(), - BSON("name" << indexName << "key" << indexKey << "v" << static_cast(kIndexVersion) - << "background" << false)); + auto status = + dbtests::createIndexFromSpec(&_opCtx, + coll()->ns().ns_forTest(), + BSON("name" << indexName << "key" << indexKey << "v" + << static_cast(kIndexVersion))); ASSERT_OK(status); // Insert documents with indexed and not-indexed paths. @@ -1177,12 +1236,12 @@ class ValidateWildCardIndexWithProjection : public ValidateBase { WriteUnitOfWork wunit(&_opCtx); RecordId recordId(record_id_helpers::reservedIdFor( record_id_helpers::ReservationId::kWildcardMultikeyMetadataId, KeyFormat::Long)); - const KeyString::Value indexKey = - KeyString::HeapBuilder(sortedDataInterface->getKeyStringVersion(), - BSON("" << 1 << "" - << "a"), - sortedDataInterface->getOrdering(), - recordId) + const key_string::Value indexKey = + key_string::HeapBuilder(sortedDataInterface->getKeyStringVersion(), + BSON("" << 1 << "" + << "a"), + sortedDataInterface->getOrdering(), + recordId) .release(); sortedDataInterface->unindex(&_opCtx, indexKey, true /* dupsAllowed */); wunit.commit(); @@ -1217,11 +1276,11 @@ class ValidateMissingAndExtraIndexEntryResults : public ValidateBase { // Create an index. const auto indexName = "a"; const auto indexKey = BSON("a" << 1); - auto status = dbtests::createIndexFromSpec( - &_opCtx, - coll()->ns().ns(), - BSON("name" << indexName << "key" << indexKey << "v" << static_cast(kIndexVersion) - << "background" << false)); + auto status = + dbtests::createIndexFromSpec(&_opCtx, + coll()->ns().ns_forTest(), + BSON("name" << indexName << "key" << indexKey << "v" + << static_cast(kIndexVersion))); ASSERT_OK(status); // Insert documents. @@ -1298,7 +1357,7 @@ class ValidateMissingIndexEntryResults : public ValidateBase { } SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // Create a new collection. lockDb(MODE_X); @@ -1313,11 +1372,11 @@ class ValidateMissingIndexEntryResults : public ValidateBase { // Create an index. const auto indexName = "a"; const auto indexKey = BSON("a" << 1); - auto status = dbtests::createIndexFromSpec( - &_opCtx, - coll()->ns().ns(), - BSON("name" << indexName << "key" << indexKey << "v" << static_cast(kIndexVersion) - << "background" << false)); + auto status = + dbtests::createIndexFromSpec(&_opCtx, + coll()->ns().ns_forTest(), + BSON("name" << indexName << "key" << indexKey << "v" + << static_cast(kIndexVersion))); ASSERT_OK(status); // Insert documents. @@ -1357,6 +1416,7 @@ class ValidateMissingIndexEntryResults : public ValidateBase { iam->getKeys( &_opCtx, coll(), + descriptor->getEntry(), pooledBuilder, actualKey, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered, @@ -1365,8 +1425,8 @@ class ValidateMissingIndexEntryResults : public ValidateBase { nullptr, nullptr, rid); - auto removeStatus = - iam->removeKeys(&_opCtx, {keys.begin(), keys.end()}, options, &numDeleted); + auto removeStatus = iam->removeKeys( + &_opCtx, descriptor->getEntry(), {keys.begin(), keys.end()}, options, &numDeleted); ASSERT_EQUALS(numDeleted, 1); ASSERT_OK(removeStatus); @@ -1429,11 +1489,11 @@ class ValidateExtraIndexEntryResults : public ValidateBase { // Create an index. const auto indexName = "a"; const auto indexKey = BSON("a" << 1); - auto status = dbtests::createIndexFromSpec( - &_opCtx, - coll()->ns().ns(), - BSON("name" << indexName << "key" << indexKey << "v" << static_cast(kIndexVersion) - << "background" << false)); + auto status = + dbtests::createIndexFromSpec(&_opCtx, + coll()->ns().ns_forTest(), + BSON("name" << indexName << "key" << indexKey << "v" + << static_cast(kIndexVersion))); ASSERT_OK(status); // Insert documents. @@ -1518,7 +1578,7 @@ class ValidateMissingAndExtraIndexEntryRepair : public ValidateBase { const auto indexKeyA = BSON("a" << 1); ASSERT_OK( dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexNameA << "key" << indexKeyA << "v" << static_cast(kIndexVersion)))); @@ -1527,7 +1587,7 @@ class ValidateMissingAndExtraIndexEntryRepair : public ValidateBase { const auto indexKeyB = BSON("b" << 1); ASSERT_OK( dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexNameB << "key" << indexKeyB << "v" << static_cast(kIndexVersion)))); @@ -1686,7 +1746,7 @@ class ValidateMissingIndexEntryRepair : public ValidateBase { void run() { SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // Create a new collection. lockDb(MODE_X); @@ -1703,7 +1763,7 @@ class ValidateMissingIndexEntryRepair : public ValidateBase { const auto indexKey = BSON("a" << 1); auto status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexName << "key" << indexKey << "v" << static_cast(kIndexVersion))); ASSERT_OK(status); @@ -1745,6 +1805,7 @@ class ValidateMissingIndexEntryRepair : public ValidateBase { iam->getKeys( &_opCtx, coll(), + descriptor->getEntry(), pooledBuilder, actualKey, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered, @@ -1753,8 +1814,8 @@ class ValidateMissingIndexEntryRepair : public ValidateBase { nullptr, nullptr, rid); - auto removeStatus = - iam->removeKeys(&_opCtx, {keys.begin(), keys.end()}, options, &numDeleted); + auto removeStatus = iam->removeKeys( + &_opCtx, descriptor->getEntry(), {keys.begin(), keys.end()}, options, &numDeleted); ASSERT_EQUALS(numDeleted, 1); ASSERT_OK(removeStatus); @@ -1881,7 +1942,7 @@ class ValidateExtraIndexEntryRepair : public ValidateBase { const auto indexKey = BSON("a" << 1); auto status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexName << "key" << indexKey << "v" << static_cast(kIndexVersion))); ASSERT_OK(status); @@ -2020,7 +2081,7 @@ class ValidateDuplicateDocumentMissingIndexEntryRepair : public ValidateBase { void run() { SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // Create a new collection and insert a document. lockDb(MODE_X); @@ -2041,7 +2102,7 @@ class ValidateDuplicateDocumentMissingIndexEntryRepair : public ValidateBase { const auto indexKey = BSON("a" << 1); auto status = dbtests::createIndexFromSpec( &_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexName << "key" << indexKey << "v" << static_cast(kIndexVersion) << "unique" << true)); ASSERT_OK(status); @@ -2091,6 +2152,7 @@ class ValidateDuplicateDocumentMissingIndexEntryRepair : public ValidateBase { KeyStringSet keys; iam->getKeys(&_opCtx, coll(), + entry, pooledBuilder, dupObj, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraints, @@ -2108,12 +2170,13 @@ class ValidateDuplicateDocumentMissingIndexEntryRepair : public ValidateBase { auto insertStatus = iam->insertKeysAndUpdateMultikeyPaths( &_opCtx, coll(), + entry, {keys.begin(), keys.end()}, {}, MultikeyPaths{}, options, - [this, &interceptor](const KeyString::Value& duplicateKey) { - return interceptor->recordDuplicateKey(&_opCtx, duplicateKey); + [this, &entry, &interceptor](const key_string::Value& duplicateKey) { + return interceptor->recordDuplicateKey(&_opCtx, entry, duplicateKey); }, &numInserted); @@ -2123,7 +2186,7 @@ class ValidateDuplicateDocumentMissingIndexEntryRepair : public ValidateBase { wunit.commit(); } - ASSERT_OK(interceptor->checkDuplicateKeyConstraints(&_opCtx)); + ASSERT_OK(interceptor->checkDuplicateKeyConstraints(&_opCtx, entry)); } releaseDb(); @@ -2257,7 +2320,7 @@ class ValidateDoubleDuplicateDocumentMissingIndexEntryRepair : public ValidateBa void run() { SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // Create a new collection and insert a document. lockDb(MODE_X); @@ -2282,7 +2345,7 @@ class ValidateDoubleDuplicateDocumentMissingIndexEntryRepair : public ValidateBa const auto indexKeyA = BSON("a" << 1); auto status = dbtests::createIndexFromSpec( &_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexNameA << "key" << indexKeyA << "v" << static_cast(kIndexVersion) << "unique" << true)); ASSERT_OK(status); @@ -2293,7 +2356,7 @@ class ValidateDoubleDuplicateDocumentMissingIndexEntryRepair : public ValidateBa const auto indexKeyB = BSON("b" << 1); auto status = dbtests::createIndexFromSpec( &_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexNameB << "key" << indexKeyB << "v" << static_cast(kIndexVersion) << "unique" << true)); ASSERT_OK(status); @@ -2345,6 +2408,7 @@ class ValidateDoubleDuplicateDocumentMissingIndexEntryRepair : public ValidateBa KeyStringSet keys; iam->getKeys(&_opCtx, coll(), + entry, pooledBuilder, dupObj, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraints, @@ -2362,12 +2426,13 @@ class ValidateDoubleDuplicateDocumentMissingIndexEntryRepair : public ValidateBa auto insertStatus = iam->insertKeysAndUpdateMultikeyPaths( &_opCtx, coll(), + entry, {keys.begin(), keys.end()}, {}, MultikeyPaths{}, options, - [this, &interceptor](const KeyString::Value& duplicateKey) { - return interceptor->recordDuplicateKey(&_opCtx, duplicateKey); + [this, &entry, &interceptor](const key_string::Value& duplicateKey) { + return interceptor->recordDuplicateKey(&_opCtx, entry, duplicateKey); }, &numInserted); @@ -2377,7 +2442,7 @@ class ValidateDoubleDuplicateDocumentMissingIndexEntryRepair : public ValidateBa wunit.commit(); } - ASSERT_OK(interceptor->checkDuplicateKeyConstraints(&_opCtx)); + ASSERT_OK(interceptor->checkDuplicateKeyConstraints(&_opCtx, entry)); } releaseDb(); @@ -2517,7 +2582,7 @@ class ValidateDoubleDuplicateDocumentOppositeMissingIndexEntryRepair : public Va void run() { SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // Create a new collection and insert a document. lockDb(MODE_X); @@ -2544,7 +2609,7 @@ class ValidateDoubleDuplicateDocumentOppositeMissingIndexEntryRepair : public Va const auto indexKeyA = BSON("a" << 1); auto status = dbtests::createIndexFromSpec( &_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexNameA << "key" << indexKeyA << "v" << static_cast(kIndexVersion) << "unique" << true)); ASSERT_OK(status); @@ -2555,7 +2620,7 @@ class ValidateDoubleDuplicateDocumentOppositeMissingIndexEntryRepair : public Va const auto indexKeyB = BSON("b" << 1); auto status = dbtests::createIndexFromSpec( &_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexNameB << "key" << indexKeyB << "v" << static_cast(kIndexVersion) << "unique" << true)); ASSERT_OK(status); @@ -2594,6 +2659,7 @@ class ValidateDoubleDuplicateDocumentOppositeMissingIndexEntryRepair : public Va iam->getKeys( &_opCtx, coll(), + descriptor->getEntry(), pooledBuilder, actualKey, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered, @@ -2602,8 +2668,11 @@ class ValidateDoubleDuplicateDocumentOppositeMissingIndexEntryRepair : public Va nullptr, nullptr, rid1); - auto removeStatus = - iam->removeKeys(&_opCtx, {keys.begin(), keys.end()}, options, &numDeleted); + auto removeStatus = iam->removeKeys(&_opCtx, + descriptor->getEntry(), + {keys.begin(), keys.end()}, + options, + &numDeleted); ASSERT_EQUALS(numDeleted, 1); ASSERT_OK(removeStatus); @@ -2646,6 +2715,7 @@ class ValidateDoubleDuplicateDocumentOppositeMissingIndexEntryRepair : public Va KeyStringSet keys; iam->getKeys(&_opCtx, coll(), + entry, pooledBuilder, dupObj, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraints, @@ -2663,12 +2733,13 @@ class ValidateDoubleDuplicateDocumentOppositeMissingIndexEntryRepair : public Va auto insertStatus = iam->insertKeysAndUpdateMultikeyPaths( &_opCtx, coll(), + entry, {keys.begin(), keys.end()}, {}, MultikeyPaths{}, options, - [this, &interceptor](const KeyString::Value& duplicateKey) { - return interceptor->recordDuplicateKey(&_opCtx, duplicateKey); + [this, &entry, &interceptor](const key_string::Value& duplicateKey) { + return interceptor->recordDuplicateKey(&_opCtx, entry, duplicateKey); }, &numInserted); @@ -2678,7 +2749,7 @@ class ValidateDoubleDuplicateDocumentOppositeMissingIndexEntryRepair : public Va wunit.commit(); } - ASSERT_OK(interceptor->checkDuplicateKeyConstraints(&_opCtx)); + ASSERT_OK(interceptor->checkDuplicateKeyConstraints(&_opCtx, entry)); } // Insert the key on b. @@ -2691,6 +2762,7 @@ class ValidateDoubleDuplicateDocumentOppositeMissingIndexEntryRepair : public Va KeyStringSet keys; iam->getKeys(&_opCtx, coll(), + entry, pooledBuilder, dupObj, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraints, @@ -2708,12 +2780,13 @@ class ValidateDoubleDuplicateDocumentOppositeMissingIndexEntryRepair : public Va auto insertStatus = iam->insertKeysAndUpdateMultikeyPaths( &_opCtx, coll(), + entry, {keys.begin(), keys.end()}, {}, MultikeyPaths{}, options, - [this, &interceptor](const KeyString::Value& duplicateKey) { - return interceptor->recordDuplicateKey(&_opCtx, duplicateKey); + [this, &entry, &interceptor](const key_string::Value& duplicateKey) { + return interceptor->recordDuplicateKey(&_opCtx, entry, duplicateKey); }, &numInserted); @@ -2723,7 +2796,7 @@ class ValidateDoubleDuplicateDocumentOppositeMissingIndexEntryRepair : public Va wunit.commit(); } - ASSERT_OK(interceptor->checkDuplicateKeyConstraints(&_opCtx)); + ASSERT_OK(interceptor->checkDuplicateKeyConstraints(&_opCtx, entry)); } releaseDb(); @@ -2860,7 +2933,7 @@ class ValidateIndexWithMissingMultikeyDocRepair : public ValidateBase { void run() { SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // Create a new collection and insert non-multikey document. lockDb(MODE_X); @@ -2883,7 +2956,7 @@ class ValidateIndexWithMissingMultikeyDocRepair : public ValidateBase { const auto indexName = "non_mk_index"; auto status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexName << "key" << BSON("a" << 1) << "v" << static_cast(kIndexVersion))); ASSERT_OK(status); @@ -2907,6 +2980,7 @@ class ValidateIndexWithMissingMultikeyDocRepair : public ValidateBase { iam->getKeys( &_opCtx, coll(), + descriptor->getEntry(), pooledBuilder, doc, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered, @@ -2918,8 +2992,11 @@ class ValidateIndexWithMissingMultikeyDocRepair : public ValidateBase { ASSERT_EQ(keys.size(), 1); int64_t numDeleted; - auto removeStatus = - iam->removeKeys(&_opCtx, {keys.begin(), keys.end()}, options, &numDeleted); + auto removeStatus = iam->removeKeys(&_opCtx, + descriptor->getEntry(), + {keys.begin(), keys.end()}, + options, + &numDeleted); ASSERT_OK(removeStatus); ASSERT_EQUALS(numDeleted, 1); wunit.commit(); @@ -3040,7 +3117,7 @@ class ValidateDuplicateDocumentIndexKeySet : public ValidateBase { void run() { SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // Create a new collection. lockDb(MODE_X); @@ -3056,22 +3133,22 @@ class ValidateDuplicateDocumentIndexKeySet : public ValidateBase { { const auto indexName = "a"; const auto indexKey = BSON("a" << 1); - auto status = dbtests::createIndexFromSpec( - &_opCtx, - coll()->ns().ns(), - BSON("name" << indexName << "key" << indexKey << "v" - << static_cast(kIndexVersion) << "background" << false)); + auto status = + dbtests::createIndexFromSpec(&_opCtx, + coll()->ns().ns_forTest(), + BSON("name" << indexName << "key" << indexKey << "v" + << static_cast(kIndexVersion))); ASSERT_OK(status); } { const auto indexName = "b"; const auto indexKey = BSON("b" << 1); - auto status = dbtests::createIndexFromSpec( - &_opCtx, - coll()->ns().ns(), - BSON("name" << indexName << "key" << indexKey << "v" - << static_cast(kIndexVersion) << "background" << false)); + auto status = + dbtests::createIndexFromSpec(&_opCtx, + coll()->ns().ns_forTest(), + BSON("name" << indexName << "key" << indexKey << "v" + << static_cast(kIndexVersion))); ASSERT_OK(status); } @@ -3112,6 +3189,7 @@ class ValidateDuplicateDocumentIndexKeySet : public ValidateBase { iam->getKeys( &_opCtx, coll(), + descriptor->getEntry(), pooledBuilder, actualKey, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered, @@ -3120,8 +3198,8 @@ class ValidateDuplicateDocumentIndexKeySet : public ValidateBase { nullptr, nullptr, rid); - auto removeStatus = - iam->removeKeys(&_opCtx, {keys.begin(), keys.end()}, options, &numDeleted); + auto removeStatus = iam->removeKeys( + &_opCtx, descriptor->getEntry(), {keys.begin(), keys.end()}, options, &numDeleted); ASSERT_EQUALS(numDeleted, 1); ASSERT_OK(removeStatus); @@ -3149,6 +3227,7 @@ class ValidateDuplicateDocumentIndexKeySet : public ValidateBase { iam->getKeys( &_opCtx, coll(), + descriptor->getEntry(), pooledBuilder, actualKey, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered, @@ -3157,8 +3236,8 @@ class ValidateDuplicateDocumentIndexKeySet : public ValidateBase { nullptr, nullptr, rid); - auto removeStatus = - iam->removeKeys(&_opCtx, {keys.begin(), keys.end()}, options, &numDeleted); + auto removeStatus = iam->removeKeys( + &_opCtx, descriptor->getEntry(), {keys.begin(), keys.end()}, options, &numDeleted); ASSERT_EQUALS(numDeleted, 1); ASSERT_OK(removeStatus); @@ -3188,7 +3267,7 @@ class ValidateDuplicateKeysUniqueIndex : public ValidateBase { } SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // Create a new collection. lockDb(MODE_X); @@ -3206,10 +3285,9 @@ class ValidateDuplicateKeysUniqueIndex : public ValidateBase { const auto indexKey = BSON("a" << 1); auto status = dbtests::createIndexFromSpec( &_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexName << "key" << indexKey << "v" - << static_cast(kIndexVersion) << "background" << false << "unique" - << true)); + << static_cast(kIndexVersion) << "unique" << true)); ASSERT_OK(status); } @@ -3265,6 +3343,7 @@ class ValidateDuplicateKeysUniqueIndex : public ValidateBase { KeyStringSet keys; iam->getKeys(&_opCtx, coll(), + entry, pooledBuilder, dupObj, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraints, @@ -3282,12 +3361,13 @@ class ValidateDuplicateKeysUniqueIndex : public ValidateBase { auto insertStatus = iam->insertKeysAndUpdateMultikeyPaths( &_opCtx, coll(), + entry, {keys.begin(), keys.end()}, {}, MultikeyPaths{}, options, - [this, &interceptor](const KeyString::Value& duplicateKey) { - return interceptor->recordDuplicateKey(&_opCtx, duplicateKey); + [this, &entry, &interceptor](const key_string::Value& duplicateKey) { + return interceptor->recordDuplicateKey(&_opCtx, entry, duplicateKey); }, &numInserted); @@ -3297,7 +3377,7 @@ class ValidateDuplicateKeysUniqueIndex : public ValidateBase { wunit.commit(); } - ASSERT_OK(interceptor->checkDuplicateKeyConstraints(&_opCtx)); + ASSERT_OK(interceptor->checkDuplicateKeyConstraints(&_opCtx, entry)); } // Insert the key on "a". @@ -3310,6 +3390,7 @@ class ValidateDuplicateKeysUniqueIndex : public ValidateBase { KeyStringSet keys; iam->getKeys(&_opCtx, coll(), + entry, pooledBuilder, dupObj, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraints, @@ -3327,12 +3408,13 @@ class ValidateDuplicateKeysUniqueIndex : public ValidateBase { auto insertStatus = iam->insertKeysAndUpdateMultikeyPaths( &_opCtx, coll(), + entry, {keys.begin(), keys.end()}, {}, MultikeyPaths{}, options, - [this, &interceptor](const KeyString::Value& duplicateKey) { - return interceptor->recordDuplicateKey(&_opCtx, duplicateKey); + [this, &entry, &interceptor](const key_string::Value& duplicateKey) { + return interceptor->recordDuplicateKey(&_opCtx, entry, duplicateKey); }, &numInserted); @@ -3342,7 +3424,7 @@ class ValidateDuplicateKeysUniqueIndex : public ValidateBase { wunit.commit(); } - ASSERT_NOT_OK(interceptor->checkDuplicateKeyConstraints(&_opCtx)); + ASSERT_NOT_OK(interceptor->checkDuplicateKeyConstraints(&_opCtx, entry)); } releaseDb(); @@ -3618,7 +3700,7 @@ class ValidateIndexWithMultikeyDocRepair : public ValidateBase { void run() { SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // Create a new collection and insert non-multikey document. lockDb(MODE_X); @@ -3641,7 +3723,7 @@ class ValidateIndexWithMultikeyDocRepair : public ValidateBase { const auto indexName = "non_mk_index"; auto status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexName << "key" << BSON("a" << 1) << "v" << static_cast(kIndexVersion))); ASSERT_OK(status); @@ -3665,6 +3747,7 @@ class ValidateIndexWithMultikeyDocRepair : public ValidateBase { iam->getKeys( &_opCtx, coll(), + descriptor->getEntry(), pooledBuilder, doc, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered, @@ -3676,8 +3759,11 @@ class ValidateIndexWithMultikeyDocRepair : public ValidateBase { ASSERT_EQ(keys.size(), 1); int64_t numDeleted; - auto removeStatus = - iam->removeKeys(&_opCtx, {keys.begin(), keys.end()}, options, &numDeleted); + auto removeStatus = iam->removeKeys(&_opCtx, + descriptor->getEntry(), + {keys.begin(), keys.end()}, + options, + &numDeleted); ASSERT_OK(removeStatus); ASSERT_EQUALS(numDeleted, 1); wunit.commit(); @@ -3701,6 +3787,7 @@ class ValidateIndexWithMultikeyDocRepair : public ValidateBase { iam->getKeys( &_opCtx, coll(), + descriptor->getEntry(), pooledBuilder, mkDoc, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered, @@ -3718,6 +3805,7 @@ class ValidateIndexWithMultikeyDocRepair : public ValidateBase { auto keysIterator = keys.begin(); auto insertStatus = iam->insertKeysAndUpdateMultikeyPaths(&_opCtx, coll(), + descriptor->getEntry(), {*keysIterator}, {}, MultikeyPaths{}, @@ -3731,6 +3819,7 @@ class ValidateIndexWithMultikeyDocRepair : public ValidateBase { numInserted = 0; insertStatus = iam->insertKeysAndUpdateMultikeyPaths(&_opCtx, coll(), + descriptor->getEntry(), {*keysIterator}, {}, MultikeyPaths{}, @@ -3841,7 +3930,7 @@ class ValidateMultikeyPathCoverageRepair : public ValidateBase { void run() { SharedBufferFragmentBuilder pooledBuilder( - KeyString::HeapBuilder::kHeapAllocatorDefaultBytes); + key_string::HeapBuilder::kHeapAllocatorDefaultBytes); // Create a new collection and insert multikey document. lockDb(MODE_X); @@ -3864,7 +3953,7 @@ class ValidateMultikeyPathCoverageRepair : public ValidateBase { // Create a multikey index. const auto indexName = "mk_index"; auto status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexName << "key" << BSON("a" << 1 << "b" << 1) << "v" << static_cast(kIndexVersion))); @@ -3891,6 +3980,7 @@ class ValidateMultikeyPathCoverageRepair : public ValidateBase { iam->getKeys( &_opCtx, coll(), + descriptor->getEntry(), pooledBuilder, doc1, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered, @@ -3902,8 +3992,11 @@ class ValidateMultikeyPathCoverageRepair : public ValidateBase { ASSERT_EQ(keys.size(), 2); int64_t numDeleted; - auto removeStatus = - iam->removeKeys(&_opCtx, {keys.begin(), keys.end()}, options, &numDeleted); + auto removeStatus = iam->removeKeys(&_opCtx, + descriptor->getEntry(), + {keys.begin(), keys.end()}, + options, + &numDeleted); ASSERT_OK(removeStatus); ASSERT_EQ(numDeleted, 2); wunit.commit(); @@ -3928,6 +4021,7 @@ class ValidateMultikeyPathCoverageRepair : public ValidateBase { iam->getKeys( &_opCtx, coll(), + descriptor->getEntry(), pooledBuilder, doc2, InsertDeleteOptions::ConstraintEnforcementMode::kRelaxConstraintsUnfiltered, @@ -3939,8 +4033,15 @@ class ValidateMultikeyPathCoverageRepair : public ValidateBase { ASSERT_EQ(keys.size(), 2); int64_t numInserted; - auto insertStatus = iam->insertKeysAndUpdateMultikeyPaths( - &_opCtx, coll(), keys, {}, oldMultikeyPaths, options, nullptr, &numInserted); + auto insertStatus = iam->insertKeysAndUpdateMultikeyPaths(&_opCtx, + coll(), + descriptor->getEntry(), + keys, + {}, + oldMultikeyPaths, + options, + nullptr, + &numInserted); ASSERT_EQUALS(numInserted, 2); ASSERT_OK(insertStatus); @@ -4060,7 +4161,7 @@ class ValidateAddNewMultikeyPaths : public ValidateBase { const auto indexName = "mk_index"; auto status = dbtests::createIndexFromSpec(&_opCtx, - coll()->ns().ns(), + coll()->ns().ns_forTest(), BSON("name" << indexName << "key" << BSON("a" << 1 << "b" << 1) << "v" << static_cast(kIndexVersion))); @@ -4070,8 +4171,9 @@ class ValidateAddNewMultikeyPaths : public ValidateBase { // of a pre-3.4 index. { WriteUnitOfWork wunit(&_opCtx); - auto collMetadata = - DurableCatalog::get(&_opCtx)->getMetaData(&_opCtx, coll()->getCatalogId()); + auto collMetadata = DurableCatalog::get(&_opCtx) + ->getParsedCatalogEntry(&_opCtx, coll()->getCatalogId()) + ->metadata; int offset = collMetadata->findIndexOffset(indexName); ASSERT_GTE(offset, 0); @@ -4083,10 +4185,11 @@ class ValidateAddNewMultikeyPaths : public ValidateBase { } // Reload the index from the modified catalog. - auto descriptor = coll()->getIndexCatalog()->findIndexByName(&_opCtx, indexName); + const IndexDescriptor* descriptor = nullptr; { WriteUnitOfWork wunit(&_opCtx); auto writableCatalog = writer.getWritableCollection(&_opCtx)->getIndexCatalog(); + descriptor = writableCatalog->findIndexByName(&_opCtx, indexName); descriptor = writableCatalog->refreshEntry(&_opCtx, writer.getWritableCollection(&_opCtx), descriptor, @@ -4272,11 +4375,11 @@ class ValidateReportInfoOnClusteredCollection : public ValidateBase { // Create an index. const auto indexName = "a"; const auto indexKey = BSON("a" << 1); - auto status = dbtests::createIndexFromSpec( - &_opCtx, - coll()->ns().ns(), - BSON("name" << indexName << "key" << indexKey << "v" << static_cast(kIndexVersion) - << "background" << false)); + auto status = + dbtests::createIndexFromSpec(&_opCtx, + coll()->ns().ns_forTest(), + BSON("name" << indexName << "key" << indexKey << "v" + << static_cast(kIndexVersion))); ASSERT_OK(status); // Insert documents. @@ -4383,11 +4486,11 @@ class ValidateRepairOnClusteredCollection : public ValidateBase { // Create an index. const auto indexName = "a"; const auto indexKey = BSON("a" << 1); - auto status = dbtests::createIndexFromSpec( - &_opCtx, - coll()->ns().ns(), - BSON("name" << indexName << "key" << indexKey << "v" << static_cast(kIndexVersion) - << "background" << false)); + auto status = + dbtests::createIndexFromSpec(&_opCtx, + coll()->ns().ns_forTest(), + BSON("name" << indexName << "key" << indexKey << "v" + << static_cast(kIndexVersion))); ASSERT_OK(status); // Insert documents. @@ -4546,11 +4649,11 @@ class ValidateInvalidRecordIdOnClusteredCollection : public ValidateBase { // Create index on {a: 1} const auto indexName = "a"; const auto indexKey = BSON("a" << 1); - auto status = dbtests::createIndexFromSpec( - &_opCtx, - coll()->ns().ns(), - BSON("name" << indexName << "key" << indexKey << "v" - << static_cast(kIndexVersion) << "background" << false)); + auto status = + dbtests::createIndexFromSpec(&_opCtx, + coll()->ns().ns_forTest(), + BSON("name" << indexName << "key" << indexKey << "v" + << static_cast(kIndexVersion))); ASSERT_OK(status); } diff --git a/src/mongo/dbtests/wildcard_access_method_test.cpp b/src/mongo/dbtests/wildcard_access_method_test.cpp index 0cf7c3d0a98e5..93f0d451c7797 100644 --- a/src/mongo/dbtests/wildcard_access_method_test.cpp +++ b/src/mongo/dbtests/wildcard_access_method_test.cpp @@ -27,13 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" -#include "mongo/db/index/wildcard_access_method.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/interval.h" #include "mongo/db/query/query_planner_test_lib.h" #include "mongo/db/query/wildcard_multikey_paths.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp b/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp index b3bc4faf4c3f4..777f3fd5e1f8d 100644 --- a/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp +++ b/src/mongo/dbtests/wildcard_multikey_persistence_test.cpp @@ -28,17 +28,62 @@ */ +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include #include - +#include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/ordering.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/catalog/index_catalog.h" +#include "mongo/db/catalog/index_catalog_entry.h" #include "mongo/db/catalog/multi_index_block.h" +#include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/db_raii.h" -#include "mongo/db/index/wildcard_access_method.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_metadata_access_stats.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/dependencies.h" #include "mongo/db/query/wildcard_multikey_paths.h" +#include "mongo/db/record_id.h" #include "mongo/db/record_id_helpers.h" +#include "mongo/db/repl/oplog.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/index_entry_comparison.h" +#include "mongo/db/storage/key_format.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/db/storage/snapshot.h" #include "mongo/db/storage/sorted_data_interface.h" +#include "mongo/db/storage/write_unit_of_work.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/inline_auto_update.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -110,8 +155,8 @@ class WildcardMultikeyPersistenceTestFixture : public unittest::Test { // Obtain a cursor over the index, and confirm that the keys are present in order. auto indexCursor = getIndexCursor(collection.getCollection(), indexName); - KeyString::Value keyStringForSeek = IndexEntryComparison::makeKeyStringFromBSONKeyForSeek( - BSONObj(), KeyString::Version::V1, Ordering::make(BSONObj()), true, true); + key_string::Value keyStringForSeek = IndexEntryComparison::makeKeyStringFromBSONKeyForSeek( + BSONObj(), key_string::Version::V1, Ordering::make(BSONObj()), true, true); auto indexKey = indexCursor->seek(keyStringForSeek); try { @@ -151,11 +196,9 @@ class WildcardMultikeyPersistenceTestFixture : public unittest::Test { ASSERT_EQ(expectedPaths.size(), expectedFieldRefs.size()); AutoGetCollectionForRead collection(opCtx(), nss); - auto indexAccessMethod = getIndex(collection.getCollection(), indexName); + auto indexEntry = getIndexCatalogEntry(collection.getCollection(), indexName); MultikeyMetadataAccessStats stats; - auto wam = dynamic_cast(indexAccessMethod); - ASSERT(wam != nullptr); - auto multikeyPathSet = getWildcardMultikeyPathSet(wam, opCtx(), &stats); + auto multikeyPathSet = getWildcardMultikeyPathSet(opCtx(), indexEntry, &stats); ASSERT(expectedFieldRefs == multikeyPathSet); } @@ -202,7 +245,7 @@ class WildcardMultikeyPersistenceTestFixture : public unittest::Test { if (!pathProjection.isEmpty()) bob << IndexDescriptor::kWildcardProjectionFieldName << pathProjection; - auto indexSpec = (bob << "v" << kIndexVersion << "background" << background).obj(); + auto indexSpec = (bob << "v" << kIndexVersion).obj(); Lock::DBLock dbLock(opCtx(), nss.dbName(), MODE_X); AutoGetCollection autoColl(opCtx(), nss, MODE_X); @@ -241,17 +284,17 @@ class WildcardMultikeyPersistenceTestFixture : public unittest::Test { return collection->getIndexCatalog()->findIndexByName(opCtx(), indexName); } - const SortedDataIndexAccessMethod* getIndex(const CollectionPtr& collection, - const StringData indexName) { - return collection->getIndexCatalog() - ->getEntry(getIndexDesc(collection, indexName)) - ->accessMethod() - ->asSortedData(); + const IndexCatalogEntry* getIndexCatalogEntry(const CollectionPtr& collection, + const StringData indexName) { + return collection->getIndexCatalog()->getEntry(getIndexDesc(collection, indexName)); } std::unique_ptr getIndexCursor(const CollectionPtr& collection, const StringData indexName) { - return getIndex(collection, indexName)->newCursor(opCtx()); + return getIndexCatalogEntry(collection, indexName) + ->accessMethod() + ->asSortedData() + ->newCursor(opCtx()); } CollectionOptions collOptions() { diff --git a/src/mongo/embedded/SConscript b/src/mongo/embedded/SConscript index 9e18a565ec6f2..737592444cd4d 100644 --- a/src/mongo/embedded/SConscript +++ b/src/mongo/embedded/SConscript @@ -59,6 +59,7 @@ env.Library( 'embedded_auth_session.cpp', 'embedded_ismaster.cpp', 'embedded_options.cpp', + 'embedded_options.idl', 'embedded_options_init.cpp', 'embedded_options_parser_init.cpp', 'index_builds_coordinator_embedded.cpp', @@ -68,15 +69,13 @@ env.Library( 'read_write_concern_defaults_cache_lookup_embedded.cpp', 'replication_coordinator_embedded.cpp', 'service_entry_point_embedded.cpp', + 'transaction_coordinator_curop_embedded.cpp', 'transaction_coordinator_factory_embedded.cpp', 'transaction_coordinator_worker_curop_repository_embedded.cpp', - 'transaction_coordinator_curop_embedded.cpp', - 'embedded_options.idl', - ], - LIBDEPS=[ - '$BUILD_DIR/mongo/base', + 'transaction_resources_init_embedded.cpp', ], LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/db/audit', '$BUILD_DIR/mongo/db/auth/auth', '$BUILD_DIR/mongo/db/catalog/catalog_impl', @@ -103,7 +102,6 @@ env.Library( '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/db/service_entry_point_common', '$BUILD_DIR/mongo/db/service_liaison_mongod', - '$BUILD_DIR/mongo/db/session/logical_session_cache', '$BUILD_DIR/mongo/db/session/logical_session_cache_impl', '$BUILD_DIR/mongo/db/session/sessions_collection_standalone', '$BUILD_DIR/mongo/db/startup_recovery', @@ -116,6 +114,7 @@ env.Library( '$BUILD_DIR/mongo/db/vector_clock_trivial', '$BUILD_DIR/mongo/db/wire_version', '$BUILD_DIR/mongo/rpc/client_metadata', + '$BUILD_DIR/mongo/transport/service_executor', '$BUILD_DIR/mongo/util/latch_analyzer' if get_option('use-diagnostic-latches') == 'on' else [], '$BUILD_DIR/mongo/util/options_parser/options_parser', diff --git a/src/mongo/embedded/embedded.cpp b/src/mongo/embedded/embedded.cpp index c1776683e7ffe..407d04d2abc7f 100644 --- a/src/mongo/embedded/embedded.cpp +++ b/src/mongo/embedded/embedded.cpp @@ -27,38 +27,69 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - #include "mongo/embedded/embedded.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/initializer.h" #include "mongo/base/status.h" -#include "mongo/config.h" -#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/dbclient_base.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/catalog/collection.h" #include "mongo/db/catalog/collection_impl.h" +#include "mongo/db/catalog/database_holder.h" #include "mongo/db/catalog/database_holder_impl.h" #include "mongo/db/catalog/index_key_validate.h" #include "mongo/db/client.h" #include "mongo/db/commands/feature_compatibility_version.h" #include "mongo/db/commands/fsync_locked.h" -#include "mongo/db/concurrency/lock_state.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/dbdirectclient.h" -#include "mongo/db/global_settings.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/op_observer/op_observer.h" #include "mongo/db/op_observer/op_observer_impl.h" #include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/read_write_concern_defaults.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_impl.h" +#include "mongo/db/s/collection_sharding_state.h" #include "mongo/db/s/collection_sharding_state_factory_standalone.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_liaison_mongod.h" -#include "mongo/db/session/kill_sessions_local.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_impl.h" -#include "mongo/db/session/session_killer.h" +#include "mongo/db/session/sessions_collection.h" #include "mongo/db/session/sessions_collection_standalone.h" #include "mongo/db/startup_recovery.h" #include "mongo/db/storage/control/storage_control.h" -#include "mongo/db/storage/encryption_hooks.h" +#include "mongo/db/storage/storage_engine.h" #include "mongo/db/storage/storage_engine_init.h" -#include "mongo/db/ttl.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/db/wire_version.h" #include "mongo/embedded/embedded_options_parser_init.h" #include "mongo/embedded/index_builds_coordinator_embedded.h" #include "mongo/embedded/oplog_writer_embedded.h" @@ -67,20 +98,23 @@ #include "mongo/embedded/replication_coordinator_embedded.h" #include "mongo/embedded/service_entry_point_embedded.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" #include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/platform/process_id.h" #include "mongo/scripting/dbdirectclient_factory.h" -#include "mongo/util/background.h" +#include "mongo/transport/service_entry_point.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" #include "mongo/util/exit.h" #include "mongo/util/exit_code.h" -#include "mongo/util/periodic_runner_factory.h" +#include "mongo/util/periodic_runner.h" #include "mongo/util/quick_exit.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/time_support.h" -#include - #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage - namespace mongo { namespace embedded { namespace { @@ -150,7 +184,6 @@ ServiceContext::ConstructorActionRegisterer collectionShardingStateFactoryRegist } // namespace using logv2::LogComponent; -using std::endl; void shutdown(ServiceContext* srvContext) { { @@ -158,27 +191,34 @@ void shutdown(ServiceContext* srvContext) { auto const client = Client::getCurrent(); auto const serviceContext = client->getServiceContext(); + // TODO(SERVER-74659): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + serviceContext->setKillAllOperations(); // We should always be able to acquire the global lock at shutdown. // Close all open databases, shutdown storage engine and run all deinitializers. auto shutdownOpCtx = serviceContext->makeOperationContext(client); - { - // TODO (SERVER-71610): Fix to be interruptible or document exception. - UninterruptibleLockGuard noInterrupt(shutdownOpCtx->lockState()); // NOLINT. - Lock::GlobalLock lk(shutdownOpCtx.get(), MODE_X); - auto databaseHolder = DatabaseHolder::get(shutdownOpCtx.get()); - databaseHolder->closeAll(shutdownOpCtx.get()); + // Service context is in shutdown mode, even new operation contexts are considered killed. + // Marking the opCtx as executing shutdown prevents this, and makes the opCtx ignore all + // interrupts. + shutdownOpCtx->setIsExecutingShutdown(); - LogicalSessionCache::set(serviceContext, nullptr); + Lock::GlobalLock lk(shutdownOpCtx.get(), MODE_X); + auto databaseHolder = DatabaseHolder::get(shutdownOpCtx.get()); + databaseHolder->closeAll(shutdownOpCtx.get()); - repl::ReplicationCoordinator::get(serviceContext)->shutdown(shutdownOpCtx.get()); - IndexBuildsCoordinator::get(serviceContext)->shutdown(shutdownOpCtx.get()); + LogicalSessionCache::set(serviceContext, nullptr); - // Global storage engine may not be started in all cases before we exit - if (serviceContext->getStorageEngine()) { - shutdownGlobalStorageEngineCleanly(serviceContext); - } + repl::ReplicationCoordinator::get(serviceContext)->shutdown(shutdownOpCtx.get()); + IndexBuildsCoordinator::get(serviceContext)->shutdown(shutdownOpCtx.get()); + + // Global storage engine may not be started in all cases before we exit + if (serviceContext->getStorageEngine()) { + shutdownGlobalStorageEngineCleanly(serviceContext); } } setGlobalServiceContext(nullptr); @@ -202,6 +242,13 @@ ServiceContext* initialize(const char* yaml_config) { setGlobalServiceContext(ServiceContext::make()); Client::initThread("initandlisten"); + + // TODO(SERVER-74659): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + // Make sure current thread have no client set in thread_local when we leave this function ScopeGuard clientGuard([] { Client::releaseCurrent(); }); @@ -280,12 +327,12 @@ ServiceContext* initialize(const char* yaml_config) { { std::stringstream ss; - ss << endl; - ss << "*********************************************************************" << endl; - ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << endl; - ss << " Create this directory or give existing directory in --dbpath." << endl; - ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << endl; - ss << "*********************************************************************" << endl; + ss << std::endl; + ss << "*********************************************************************" << std::endl; + ss << " ERROR: dbpath (" << storageGlobalParams.dbpath << ") does not exist." << std::endl; + ss << " Create this directory or give existing directory in --dbpath." << std::endl; + ss << " See http://dochub.mongodb.org/core/startingandstoppingmongo" << std::endl; + ss << "*********************************************************************" << std::endl; uassert(50677, ss.str().c_str(), boost::filesystem::exists(storageGlobalParams.dbpath)); } diff --git a/src/mongo/embedded/embedded_auth_manager.cpp b/src/mongo/embedded/embedded_auth_manager.cpp index fd512b0e5d8fd..755c298894846 100644 --- a/src/mongo/embedded/embedded_auth_manager.cpp +++ b/src/mongo/embedded/embedded_auth_manager.cpp @@ -27,11 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include +#include #include "mongo/base/shim.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" #include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege_format.h" +#include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/user.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/embedded/not_implemented.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/embedded/embedded_auth_session.cpp b/src/mongo/embedded/embedded_auth_session.cpp index 29ad5f71c1b8e..0f78f78204b13 100644 --- a/src/mongo/embedded/embedded_auth_session.cpp +++ b/src/mongo/embedded/embedded_auth_session.cpp @@ -27,13 +27,40 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include #include "mongo/base/shim.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/auth/action_set.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/auth_name.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/user.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/explain_verbosity_gen.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/tenant_id.h" #include "mongo/embedded/not_implemented.h" -#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/time_support.h" namespace mongo { namespace embedded { @@ -86,6 +113,10 @@ class AuthorizationSession : public mongo::AuthorizationSession { UASSERT_NOT_IMPLEMENTED; } + boost::optional getUserTenantId() const override { + return boost::none; + } + bool shouldIgnoreAuthChecks() override { return true; } @@ -120,8 +151,7 @@ class AuthorizationSession : public mongo::AuthorizationSession { UASSERT_NOT_IMPLEMENTED; } - StatusWith checkAuthorizedToListCollections(StringData, - const BSONObj&) override { + StatusWith checkAuthorizedToListCollections(const ListCollections&) override { return PrivilegeVector(); } @@ -206,7 +236,7 @@ class AuthorizationSession : public mongo::AuthorizationSession { return Status::OK(); } - bool isAuthorizedForAnyActionOnAnyResourceInDB(StringData) override { + bool isAuthorizedForAnyActionOnAnyResourceInDB(const DatabaseName&) override { return true; } diff --git a/src/mongo/embedded/embedded_ismaster.cpp b/src/mongo/embedded/embedded_ismaster.cpp index bcd5089523be5..915571951d1a9 100644 --- a/src/mongo/embedded/embedded_ismaster.cpp +++ b/src/mongo/embedded/embedded_ismaster.cpp @@ -27,13 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/audit.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/wire_version.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/embedded/embedded_options.cpp b/src/mongo/embedded/embedded_options.cpp index 5699e55cc25d6..bba4a461cc915 100644 --- a/src/mongo/embedded/embedded_options.cpp +++ b/src/mongo/embedded/embedded_options.cpp @@ -30,13 +30,16 @@ #include "mongo/embedded/embedded_options.h" +#include // IWYU pragma: keep +#include + +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/server_options_base.h" #include "mongo/db/server_options_helpers.h" #include "mongo/db/storage/storage_options.h" #include "mongo/embedded/embedded_options_gen.h" - -#include -#include +#include "mongo/util/options_parser/value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/embedded/embedded_options.h b/src/mongo/embedded/embedded_options.h index efd05e930951b..b8f41a39820bb 100644 --- a/src/mongo/embedded/embedded_options.h +++ b/src/mongo/embedded/embedded_options.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/base/status.h" #include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/option_section.h" diff --git a/src/mongo/embedded/embedded_options_init.cpp b/src/mongo/embedded/embedded_options_init.cpp index 65c11d66f795c..0c37025ecd334 100644 --- a/src/mongo/embedded/embedded_options_init.cpp +++ b/src/mongo/embedded/embedded_options_init.cpp @@ -27,8 +27,14 @@ * it in the license file. */ -#include "mongo/embedded/embedded_options.h" +#include +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/embedded/embedded_options.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/startup_option_init.h" #include "mongo/util/options_parser/startup_options.h" diff --git a/src/mongo/embedded/embedded_options_parser_init.cpp b/src/mongo/embedded/embedded_options_parser_init.cpp index 31a5593a3e079..4a15f8b954302 100644 --- a/src/mongo/embedded/embedded_options_parser_init.cpp +++ b/src/mongo/embedded/embedded_options_parser_init.cpp @@ -27,17 +27,14 @@ * it in the license file. */ -#include "mongo/util/options_parser/startup_options.h" - -#include - #include "mongo/embedded/embedded_options_parser_init.h" -#include "mongo/util/exit_code.h" -#include "mongo/util/options_parser/option_description.h" -#include "mongo/util/options_parser/option_section.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/options_parser.h" -#include "mongo/util/options_parser/startup_option_init.h" -#include "mongo/util/quick_exit.h" +#include "mongo/util/options_parser/startup_options.h" namespace mongo { namespace optionenvironment { diff --git a/src/mongo/embedded/embedded_options_parser_init.h b/src/mongo/embedded/embedded_options_parser_init.h index 658aadc89daab..a727597eb00cd 100644 --- a/src/mongo/embedded/embedded_options_parser_init.h +++ b/src/mongo/embedded/embedded_options_parser_init.h @@ -29,15 +29,15 @@ #pragma once -#include "mongo/embedded/embedded_options.h" +#include +#include +#include +#include "mongo/embedded/embedded_options.h" #include "mongo/util/options_parser/startup_option_init.h" #include "mongo/util/options_parser/startup_options.h" #include "mongo/util/static_immortal.h" -#include -#include - namespace mongo::embedded { class EmbeddedOptionsConfig { diff --git a/src/mongo/embedded/index_builds_coordinator_embedded.cpp b/src/mongo/embedded/index_builds_coordinator_embedded.cpp index 5dd97bff40a9b..b152f16b1daf7 100644 --- a/src/mongo/embedded/index_builds_coordinator_embedded.cpp +++ b/src/mongo/embedded/index_builds_coordinator_embedded.cpp @@ -28,14 +28,16 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/embedded/index_builds_coordinator_embedded.h" - -#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/operation_context.h" -#include "mongo/db/service_context.h" -#include "mongo/util/str.h" +#include "mongo/embedded/index_builds_coordinator_embedded.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kStorage @@ -88,7 +90,7 @@ IndexBuildsCoordinatorEmbedded::resumeIndexBuild(OperationContext* opCtx, } void IndexBuildsCoordinatorEmbedded::_signalPrimaryForAbortAndWaitForExternalAbort( - OperationContext* opCtx, ReplIndexBuildState* replState, const Status& abortStatus) {} + OperationContext* opCtx, ReplIndexBuildState* replState) {} void IndexBuildsCoordinatorEmbedded::_signalPrimaryForCommitReadiness( OperationContext* opCtx, std::shared_ptr replState) {} diff --git a/src/mongo/embedded/index_builds_coordinator_embedded.h b/src/mongo/embedded/index_builds_coordinator_embedded.h index 11e77083e1194..6b9a564042740 100644 --- a/src/mongo/embedded/index_builds_coordinator_embedded.h +++ b/src/mongo/embedded/index_builds_coordinator_embedded.h @@ -29,7 +29,23 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/commit_quorum_options.h" +#include "mongo/db/database_name.h" #include "mongo/db/index_builds_coordinator.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl_index_build_state.h" +#include "mongo/db/resumable_index_builds_gen.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -96,8 +112,7 @@ class IndexBuildsCoordinatorEmbedded : public IndexBuildsCoordinator { std::shared_ptr replState) override; void _signalPrimaryForAbortAndWaitForExternalAbort(OperationContext* opCtx, - ReplIndexBuildState* replState, - const Status& abortStatus) override; + ReplIndexBuildState* replState) override; void _signalPrimaryForCommitReadiness(OperationContext* opCtx, std::shared_ptr replState) override; diff --git a/src/mongo/embedded/mongo_embedded/mongo_embedded.cpp b/src/mongo/embedded/mongo_embedded/mongo_embedded.cpp index a83f3aec813fc..d3459bd8a5716 100644 --- a/src/mongo/embedded/mongo_embedded/mongo_embedded.cpp +++ b/src/mongo/embedded/mongo_embedded/mongo_embedded.cpp @@ -26,32 +26,47 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo_embedded/mongo_embedded.h" - -#include +#include #include #include -#include -#include +#include +#include +#include +#include +#include +#include #include #include "api_common.h" +#include "mongo_embedded/mongo_embedded.h" +#include +#include +// IWYU pragma: no_include "boost/log/detail/attachable_sstream_buf.hpp" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/db/client.h" +#include "mongo/db/dbmessage.h" #include "mongo/db/service_context.h" #include "mongo/embedded/embedded.h" #include "mongo/embedded/embedded_log_backend.h" #include "mongo/logv2/component_settings_filter.h" +#include "mongo/logv2/log_attr.h" #include "mongo/logv2/log_domain_global.h" #include "mongo/logv2/log_manager.h" #include "mongo/logv2/plain_formatter.h" +#include "mongo/platform/atomic_word.h" #include "mongo/rpc/message.h" -#include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/type_traits.h" #include "mongo/transport/service_entry_point.h" #include "mongo/transport/transport_layer_mock.h" #include "mongo/util/assert_util.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/future.h" #include "mongo/util/shared_buffer.h" #if defined(_WIN32) diff --git a/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp b/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp index 316f424fe924f..f809677af16cc 100644 --- a/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp +++ b/src/mongo/embedded/mongo_embedded/mongo_embedded_test.cpp @@ -27,35 +27,44 @@ * it in the license file. */ - -#include "mongo_embedded/mongo_embedded.h" - +#include +#include #include #include -#include +#include +#include +#include +#include "mongo_embedded/mongo_embedded.h" +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_validate.h" -#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" #include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/json.h" #include "mongo/db/server_options.h" #include "mongo/embedded/mongo_embedded/mongo_embedded_test_gen.h" #include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" -#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" #include "mongo/unittest/thread_assertion_monitor.h" -#include "mongo/unittest/unittest.h" #include "mongo/util/exit_code.h" #include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/option_section.h" #include "mongo/util/options_parser/options_parser.h" +#include "mongo/util/options_parser/value.h" #include "mongo/util/quick_exit.h" -#include "mongo/util/scopeguard.h" #include "mongo/util/shared_buffer.h" #include "mongo/util/signal_handlers_synchronous.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep namespace moe = mongo::optionenvironment; @@ -63,7 +72,6 @@ mongo_embedded_v1_lib* global_lib_handle; namespace { - auto& getGlobalTempDir() { static std::unique_ptr globalTempDir; if (!globalTempDir) { @@ -564,6 +572,7 @@ TEST_F(MongodbCAPITest, CreateMultipleDBs) { ASSERT_EQUALS(mongo_embedded_v1_status_get_error(status.get()), MONGO_EMBEDDED_V1_ERROR_DB_MAX_OPEN); } + } // namespace // Define main function as an entry to these tests. diff --git a/src/mongo/embedded/oplog_writer_embedded.h b/src/mongo/embedded/oplog_writer_embedded.h index cd7fd0fb12af2..e04888d5a31f7 100644 --- a/src/mongo/embedded/oplog_writer_embedded.h +++ b/src/mongo/embedded/oplog_writer_embedded.h @@ -46,16 +46,14 @@ class OplogWriterEmbedded : public OplogWriter { repl::OplogLink* oplogLink, const std::vector& stmtIds) override {} - std::vector logInsertOps( - OperationContext* opCtx, - repl::MutableOplogEntry* oplogEntryTemplate, - std::vector::const_iterator begin, - std::vector::const_iterator end, - std::vector fromMigrate, - std::function(const BSONObj& doc)> getDestinedRecipientFn, - const CollectionPtr& collectionPtr) override { - return {}; - } + void logOplogRecords(OperationContext* opCtx, + const NamespaceString& nss, + std::vector* records, + const std::vector& timestamps, + const CollectionPtr& oplogCollection, + repl::OpTime finalOpTime, + Date_t wallTime, + bool isAbortIndexBuild) override {} repl::OpTime logOp(OperationContext* opCtx, repl::MutableOplogEntry* oplogEntry) override { return {}; diff --git a/src/mongo/embedded/periodic_runner_embedded.cpp b/src/mongo/embedded/periodic_runner_embedded.cpp index ac52b8535dcfd..19b99967e86f1 100644 --- a/src/mongo/embedded/periodic_runner_embedded.cpp +++ b/src/mongo/embedded/periodic_runner_embedded.cpp @@ -27,14 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/embedded/periodic_runner_embedded.h" +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include #include "mongo/db/client.h" #include "mongo/db/service_context.h" +#include "mongo/embedded/periodic_runner_embedded.h" +#include "mongo/stdx/mutex.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/clock_source.h" -#include "mongo/util/scopeguard.h" namespace mongo { @@ -192,7 +198,7 @@ void PeriodicRunnerEmbedded::PeriodicJobImpl::stop() { } } -Milliseconds PeriodicRunnerEmbedded::PeriodicJobImpl::getPeriod() { +Milliseconds PeriodicRunnerEmbedded::PeriodicJobImpl::getPeriod() const { stdx::lock_guard lk(_mutex); return _job.interval; } diff --git a/src/mongo/embedded/periodic_runner_embedded.h b/src/mongo/embedded/periodic_runner_embedded.h index caa5f46c26ba5..6ec1700897faf 100644 --- a/src/mongo/embedded/periodic_runner_embedded.h +++ b/src/mongo/embedded/periodic_runner_embedded.h @@ -32,11 +32,13 @@ #include #include -#include "mongo/db/service_context_fwd.h" +#include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" #include "mongo/util/clock_source.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" #include "mongo/util/periodic_runner.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -69,7 +71,7 @@ class PeriodicRunnerEmbedded : public PeriodicRunner { void pause() override; void resume() override; void stop() override; - Milliseconds getPeriod() override; + Milliseconds getPeriod() const override; void setPeriod(Milliseconds ms) override; bool isAlive(WithLock lk); @@ -90,7 +92,7 @@ class PeriodicRunnerEmbedded : public PeriodicRunner { // The mutex is protecting _execStatus, the variable that can be accessed from other // threads. - Mutex _mutex = MONGO_MAKE_LATCH("PeriodicJobImpl::_mutex"); + mutable Mutex _mutex = MONGO_MAKE_LATCH("PeriodicJobImpl::_mutex"); // The current execution status of the job. ExecutionStatus _execStatus{ExecutionStatus::kNotScheduled}; diff --git a/src/mongo/embedded/process_interface_factory_embedded.cpp b/src/mongo/embedded/process_interface_factory_embedded.cpp index ee165457a0e10..9018becdde3f4 100644 --- a/src/mongo/embedded/process_interface_factory_embedded.cpp +++ b/src/mongo/embedded/process_interface_factory_embedded.cpp @@ -27,9 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/base/shim.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/pipeline/process_interface/standalone_process_interface.h" namespace mongo { diff --git a/src/mongo/embedded/read_concern_embedded.cpp b/src/mongo/embedded/read_concern_embedded.cpp index f96171219a74e..50f4b99262139 100644 --- a/src/mongo/embedded/read_concern_embedded.cpp +++ b/src/mongo/embedded/read_concern_embedded.cpp @@ -27,10 +27,19 @@ * it in the license file. */ +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/shim.h" +#include "mongo/base/status.h" +#include "mongo/db/database_name.h" #include "mongo/db/read_concern.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/speculative_majority_read_info.h" +#include "mongo/util/duration.h" namespace mongo { namespace { diff --git a/src/mongo/embedded/read_write_concern_defaults_cache_lookup_embedded.cpp b/src/mongo/embedded/read_write_concern_defaults_cache_lookup_embedded.cpp index 316056e684b45..9b9a89fa0fa8c 100644 --- a/src/mongo/embedded/read_write_concern_defaults_cache_lookup_embedded.cpp +++ b/src/mongo/embedded/read_write_concern_defaults_cache_lookup_embedded.cpp @@ -29,6 +29,10 @@ #include "mongo/embedded/read_write_concern_defaults_cache_lookup_embedded.h" +#include + +#include + namespace mongo { boost::optional readWriteConcernDefaultsCacheLookupEmbedded( diff --git a/src/mongo/embedded/read_write_concern_defaults_cache_lookup_embedded.h b/src/mongo/embedded/read_write_concern_defaults_cache_lookup_embedded.h index d9fb9e2dbcacc..1df2d7f4b5401 100644 --- a/src/mongo/embedded/read_write_concern_defaults_cache_lookup_embedded.h +++ b/src/mongo/embedded/read_write_concern_defaults_cache_lookup_embedded.h @@ -29,7 +29,11 @@ #pragma once +#include + +#include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults.h" +#include "mongo/db/read_write_concern_defaults_gen.h" namespace mongo { diff --git a/src/mongo/embedded/replication_coordinator_embedded.cpp b/src/mongo/embedded/replication_coordinator_embedded.cpp index 66e56bd22daae..795a1cc1764a7 100644 --- a/src/mongo/embedded/replication_coordinator_embedded.cpp +++ b/src/mongo/embedded/replication_coordinator_embedded.cpp @@ -28,13 +28,17 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/embedded/replication_coordinator_embedded.h" +#include +#include "mongo/base/error_codes.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/repl_set_config.h" #include "mongo/embedded/not_implemented.h" +#include "mongo/embedded/replication_coordinator_embedded.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kReplication @@ -78,12 +82,12 @@ bool ReplicationCoordinatorEmbedded::isWritablePrimaryForReportingPurposes() { } bool ReplicationCoordinatorEmbedded::canAcceptWritesForDatabase(OperationContext* opCtx, - StringData dbName) { + const DatabaseName& dbName) { return true; } bool ReplicationCoordinatorEmbedded::canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, - StringData dbName) { + const DatabaseName& dbName) { return true; } @@ -209,7 +213,7 @@ void ReplicationCoordinatorEmbedded::setMyHeartbeatMessage(const std::string&) { } void ReplicationCoordinatorEmbedded::setMyLastAppliedOpTimeAndWallTimeForward( - const OpTimeAndWallTime&) { + const OpTimeAndWallTime&, bool) { UASSERT_NOT_IMPLEMENTED; } @@ -337,6 +341,10 @@ std::vector ReplicationCoordinatorEmbedded::getConfigVotingMembers UASSERT_NOT_IMPLEMENTED; } +size_t ReplicationCoordinatorEmbedded::getNumConfigVotingMembers() const { + UASSERT_NOT_IMPLEMENTED; +} + std::int64_t ReplicationCoordinatorEmbedded::getConfigTerm() const { UASSERT_NOT_IMPLEMENTED; } @@ -656,5 +664,9 @@ bool ReplicationCoordinatorEmbedded::isRetryableWrite(OperationContext* opCtx) c return false; } +boost::optional ReplicationCoordinatorEmbedded::getInitialSyncId(OperationContext* opCtx) { + UASSERT_NOT_IMPLEMENTED; +} + } // namespace embedded } // namespace mongo diff --git a/src/mongo/embedded/replication_coordinator_embedded.h b/src/mongo/embedded/replication_coordinator_embedded.h index d01f1b770f9eb..7e688b5b8799d 100644 --- a/src/mongo/embedded/replication_coordinator_embedded.h +++ b/src/mongo/embedded/replication_coordinator_embedded.h @@ -29,7 +29,47 @@ #pragma once +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/member_config.h" +#include "mongo/db/repl/member_data.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/repl_set_config.h" +#include "mongo/db/repl/repl_set_heartbeat_response.h" +#include "mongo/db/repl/repl_settings.h" #include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/split_horizon.h" +#include "mongo/db/repl/split_prepare_session_manager.h" +#include "mongo/db/repl/sync_source_selector.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/task_executor.h" +#include "mongo/rpc/metadata/oplog_query_metadata.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace embedded { @@ -71,8 +111,9 @@ class ReplicationCoordinatorEmbedded final : public repl::ReplicationCoordinator bool isInPrimaryOrSecondaryState(OperationContext* opCtx) const override; bool isInPrimaryOrSecondaryState_UNSAFE() const override; - bool canAcceptWritesForDatabase(OperationContext* opCtx, StringData dbName) override; - bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, StringData dbName) override; + bool canAcceptWritesForDatabase(OperationContext* opCtx, const DatabaseName& dbName) override; + bool canAcceptWritesForDatabase_UNSAFE(OperationContext* opCtx, + const DatabaseName& dbName) override; bool canAcceptWritesFor(OperationContext* opCtx, const NamespaceStringOrUUID& nsOrUUID) override; @@ -129,8 +170,8 @@ class ReplicationCoordinatorEmbedded final : public repl::ReplicationCoordinator const repl::OpTimeAndWallTime& opTimeAndWallTime) override; void setMyLastDurableOpTimeAndWallTime( const repl::OpTimeAndWallTime& opTimeAndWallTime) override; - void setMyLastAppliedOpTimeAndWallTimeForward( - const repl::OpTimeAndWallTime& opTimeAndWallTime) override; + void setMyLastAppliedOpTimeAndWallTimeForward(const repl::OpTimeAndWallTime& opTimeAndWallTime, + bool advanceGlobalTime) override; void setMyLastDurableOpTimeAndWallTimeForward( const repl::OpTimeAndWallTime& opTimeAndWallTime) override; @@ -188,6 +229,8 @@ class ReplicationCoordinatorEmbedded final : public repl::ReplicationCoordinator std::vector getConfigVotingMembers() const override; + size_t getNumConfigVotingMembers() const override; + std::int64_t getConfigTerm() const override; std::int64_t getConfigVersion() const override; @@ -365,6 +408,8 @@ class ReplicationCoordinatorEmbedded final : public repl::ReplicationCoordinator virtual bool isRetryableWrite(OperationContext* opCtx) const override; + virtual boost::optional getInitialSyncId(OperationContext* opCtx) override; + private: // Back pointer to the ServiceContext that has started the instance. ServiceContext* const _service; diff --git a/src/mongo/embedded/service_entry_point_embedded.cpp b/src/mongo/embedded/service_entry_point_embedded.cpp index fffe65ed58686..7c970a0256dab 100644 --- a/src/mongo/embedded/service_entry_point_embedded.cpp +++ b/src/mongo/embedded/service_entry_point_embedded.cpp @@ -28,16 +28,32 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/embedded/service_entry_point_embedded.h" - +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/client.h" +#include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/read_concern.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/speculative_majority_read_info.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_entry_point_common.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/write_concern.h" #include "mongo/embedded/not_implemented.h" #include "mongo/embedded/periodic_runner_embedded.h" +#include "mongo/embedded/service_entry_point_embedded.h" +#include "mongo/rpc/op_msg.h" #include "mongo/transport/service_executor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/polymorphic_scoped.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/embedded/service_entry_point_embedded.h b/src/mongo/embedded/service_entry_point_embedded.h index 0db5e9dc0a08a..2f419f1c008cb 100644 --- a/src/mongo/embedded/service_entry_point_embedded.h +++ b/src/mongo/embedded/service_entry_point_embedded.h @@ -29,7 +29,18 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/dbmessage.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/rpc/message.h" #include "mongo/transport/service_entry_point.h" +#include "mongo/transport/session.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" namespace mongo { diff --git a/src/mongo/embedded/stitch_support/SConscript b/src/mongo/embedded/stitch_support/SConscript index 813f231772c46..b0d48619bdc86 100644 --- a/src/mongo/embedded/stitch_support/SConscript +++ b/src/mongo/embedded/stitch_support/SConscript @@ -57,11 +57,12 @@ env.AutoInstall( if get_option('link-model') != 'dynamic-sdk': stitchSupportTestEnv = env.Clone() unitTest = stitchSupportTestEnv.CppUnitTest( - target="stitch_support_test", + target='stitch_support_test', source=[ - "stitch_support_test.cpp", + 'stitch_support_test.cpp', ], LIBDEPS=[ + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/unittest/unittest', 'stitch_support', ], diff --git a/src/mongo/embedded/stitch_support/stitch_support.cpp b/src/mongo/embedded/stitch_support/stitch_support.cpp index b0c56833f0f73..70c572a191292 100644 --- a/src/mongo/embedded/stitch_support/stitch_support.cpp +++ b/src/mongo/embedded/stitch_support/stitch_support.cpp @@ -27,28 +27,55 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "api_common.h" #include "stitch_support/stitch_support.h" +#include +#include -#include "api_common.h" +#include "mongo/base/error_codes.h" #include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" -#include "mongo/db/concurrency/locker_noop_client_observer.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/db/client.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" #include "mongo/db/exec/projection_executor.h" #include "mongo/db/exec/projection_executor_builder.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/field_ref_set.h" +#include "mongo/db/matcher/expression_with_placeholder.h" +#include "mongo/db/matcher/match_details.h" #include "mongo/db/matcher/matcher.h" #include "mongo/db/namespace_string.h" #include "mongo/db/ops/parsed_update_array_filters.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/projection.h" #include "mongo/db/query/projection_parser.h" +#include "mongo/db/query/projection_policies.h" #include "mongo/db/service_context.h" #include "mongo/db/update/update_driver.h" +#include "mongo/stdx/type_traits.h" #include "mongo/util/assert_util.h" - -#include -#include -#include +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/time_support.h" #if defined(_WIN32) #define MONGO_API_CALL __cdecl @@ -124,10 +151,7 @@ ServiceContext* initialize() { Status status = mongo::runGlobalInitializers(std::vector{}); uassertStatusOKWithContext(status, "Global initialization failed"); setGlobalServiceContext(ServiceContext::make()); - auto serviceContext = getGlobalServiceContext(); - serviceContext->registerClientObserver(std::make_unique()); - - return serviceContext; + return getGlobalServiceContext(); } struct ServiceContextDestructor { @@ -336,8 +360,14 @@ stitch_support_v1_matcher* matcher_create(stitch_support_v1_lib* const lib, "is not yet initialized."}; } - return new stitch_support_v1_matcher( - lib->serviceContext->makeClient("stitch_support"), filter, collator); + auto client = lib->serviceContext->makeClient("stitch_support"); + // TODO(SERVER-74662): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } + + return new stitch_support_v1_matcher(std::move(client), filter, collator); } stitch_support_v1_projection* projection_create(stitch_support_v1_lib* const lib, @@ -356,9 +386,14 @@ stitch_support_v1_projection* projection_create(stitch_support_v1_lib* const lib "Library is not yet initialized."}; } + auto client = lib->serviceContext->makeClient("stitch_support"); + // TODO(SERVER-74662): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } - return new stitch_support_v1_projection( - lib->serviceContext->makeClient("stitch_support"), spec, matcher, collator); + return new stitch_support_v1_projection(std::move(client), spec, matcher, collator); } stitch_support_v1_update* update_create(stitch_support_v1_lib* const lib, @@ -378,11 +413,15 @@ stitch_support_v1_update* update_create(stitch_support_v1_lib* const lib, "Cannot create a new udpate when the Stitch Support Library is not yet initialized."}; } - return new stitch_support_v1_update(lib->serviceContext->makeClient("stitch_support"), - updateExpr, - arrayFilters, - matcher, - collator); + auto client = lib->serviceContext->makeClient("stitch_support"); + // TODO(SERVER-74662): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } + + return new stitch_support_v1_update( + std::move(client), updateExpr, arrayFilters, matcher, collator); } int capi_status_get_error(const stitch_support_v1_status* const status) noexcept { diff --git a/src/mongo/embedded/stitch_support/stitch_support_test.cpp b/src/mongo/embedded/stitch_support/stitch_support_test.cpp index f1c0ab59b4b65..6a0a60d3de120 100644 --- a/src/mongo/embedded/stitch_support/stitch_support_test.cpp +++ b/src/mongo/embedded/stitch_support/stitch_support_test.cpp @@ -28,14 +28,26 @@ */ #include +#include +#include #include +#include #include +#include #include "stitch_support/stitch_support.h" #include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" -#include "mongo/unittest/unittest.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/exit_code.h" #include "mongo/util/quick_exit.h" #include "mongo/util/scopeguard.h" @@ -98,7 +110,7 @@ class StitchSupportTest : public mongo::unittest::Test { ASSERT(matcher); ON_BLOCK_EXIT([matcher] { stitch_support_v1_matcher_destroy(matcher); }); return std::all_of( - documentsJSON.begin(), documentsJSON.end(), [=](const char* documentJSON) { + documentsJSON.begin(), documentsJSON.end(), [=, this](const char* documentJSON) { bool isMatch; stitch_support_v1_check_match( matcher, toBSONForAPI(documentJSON).first, &isMatch, nullptr); @@ -153,7 +165,7 @@ class StitchSupportTest : public mongo::unittest::Test { std::transform(documentsJSON.begin(), documentsJSON.end(), std::back_inserter(results), - [=](const char* documentJSON) { + [=, this](const char* documentJSON) { auto bson = stitch_support_v1_projection_apply( projection, toBSONForAPI(documentJSON).first, nullptr); auto result = fromBSONForAPI(bson); diff --git a/src/mongo/embedded/transaction_coordinator_curop_embedded.cpp b/src/mongo/embedded/transaction_coordinator_curop_embedded.cpp index 0a5548a24a477..8ffc8521d01c5 100644 --- a/src/mongo/embedded/transaction_coordinator_curop_embedded.cpp +++ b/src/mongo/embedded/transaction_coordinator_curop_embedded.cpp @@ -27,10 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/base/shim.h" -#include "mongo/db/s/transaction_coordinator_curop.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" namespace mongo { namespace { diff --git a/src/mongo/embedded/transaction_coordinator_factory_embedded.cpp b/src/mongo/embedded/transaction_coordinator_factory_embedded.cpp index eec8fa5eb49fc..95a2846ead2d5 100644 --- a/src/mongo/embedded/transaction_coordinator_factory_embedded.cpp +++ b/src/mongo/embedded/transaction_coordinator_factory_embedded.cpp @@ -27,10 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include + +#include #include "mongo/base/shim.h" -#include "mongo/db/s/transaction_coordinator_factory.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/session/logical_session_id.h" namespace mongo { namespace { diff --git a/src/mongo/embedded/transaction_coordinator_worker_curop_repository_embedded.cpp b/src/mongo/embedded/transaction_coordinator_worker_curop_repository_embedded.cpp index 7db2668533802..1cef41366e187 100644 --- a/src/mongo/embedded/transaction_coordinator_worker_curop_repository_embedded.cpp +++ b/src/mongo/embedded/transaction_coordinator_worker_curop_repository_embedded.cpp @@ -27,8 +27,15 @@ * it in the license file. */ +#include +#include + #include "mongo/base/shim.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/operation_context.h" #include "mongo/db/s/transaction_coordinator_worker_curop_repository.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" namespace mongo { namespace { diff --git a/src/mongo/embedded/transaction_resources_init_embedded.cpp b/src/mongo/embedded/transaction_resources_init_embedded.cpp new file mode 100644 index 0000000000000..dab4df1568647 --- /dev/null +++ b/src/mongo/embedded/transaction_resources_init_embedded.cpp @@ -0,0 +1,87 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include + +#include "mongo/db/client.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/concurrency/locker_impl.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/storage/storage_engine.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/transaction_resources.h" + +namespace mongo { +namespace { + +class TransactionResourcesEmbeddedClientObserver : public ServiceContext::ClientObserver { +public: + TransactionResourcesEmbeddedClientObserver() = default; + ~TransactionResourcesEmbeddedClientObserver() = default; + + void onCreateClient(Client* client) final {} + + void onDestroyClient(Client* client) final {} + + void onCreateOperationContext(OperationContext* opCtx) final { + auto service = opCtx->getServiceContext(); + + shard_role_details::TransactionResources::attachToOpCtx( + opCtx, std::make_unique()); + opCtx->setLockState(std::make_unique(service)); + + // There are a few cases where we don't have a storage engine available yet when creating an + // operation context. + // 1. During startup, we create an operation context to allow the storage engine + // initialization code to make use of the lock manager. + // 2. There are unit tests that create an operation context before initializing the storage + // engine. + // 3. Unit tests that use an operation context but don't require a storage engine for their + // testing purpose. + auto storageEngine = service->getStorageEngine(); + if (storageEngine) { + opCtx->setRecoveryUnit(std::unique_ptr(storageEngine->newRecoveryUnit()), + WriteUnitOfWork::RecoveryUnitState::kNotInUnitOfWork); + } + } + + void onDestroyOperationContext(OperationContext* opCtx) final {} +}; + +ServiceContext::ConstructorActionRegisterer transactionResourcesConstructor{ + "TransactionResourcesConstructor", [](ServiceContext* service) { + service->registerClientObserver( + std::make_unique()); + }}; + +} // namespace +} // namespace mongo diff --git a/src/mongo/executor/README.md b/src/mongo/executor/README.md index 18d6ec091b8a4..f96b70362438f 100644 --- a/src/mongo/executor/README.md +++ b/src/mongo/executor/README.md @@ -41,7 +41,11 @@ when desired. execute any work scheduled on the executor. * `ScopedTaskExecutor`: wraps a `TaskExecutor` and cancels any outstanding operations on destruction. +* `PinnedConnectionTaskExecutor`: wraps a `TaskExecutor` and acts as a `ScopedTaskExecutor` that +additionally runs all RPCs/remote operations scheduled through it over the same transport connection. * `TaskExecutorCursor`: manages a remote cursor that uses an asynchronous task executor to run all -stages of the command cursor protocol (initial command, getMore, killCursors). +stages of the command cursor protocol (initial command, getMore, killCursors). Offers a `pinConnections` +option that utilizes a `PinnedConnectionTaskExecutor` to run all operations on the cursor over the +same transport connection. * `TaskExecutorPool`: represents a pool of `TaskExecutors`. Work which requires a `TaskExecutor` can ask for an executor from the pool. This allows for work to be distributed across several executors. diff --git a/src/mongo/executor/SConscript b/src/mongo/executor/SConscript index b112f2c6aa319..6efeaeb87e6b9 100644 --- a/src/mongo/executor/SConscript +++ b/src/mongo/executor/SConscript @@ -37,6 +37,7 @@ env.Library( LIBDEPS=[ '$BUILD_DIR/mongo/db/api_parameters', '$BUILD_DIR/mongo/rpc/metadata', + '$BUILD_DIR/mongo/s/mongos_server_parameters', '$BUILD_DIR/mongo/util/net/network', ], ) @@ -124,6 +125,16 @@ env.Library( ], ) +env.Library( + target='connection_pool_controllers', + source=[ + 'connection_pool_controllers.cpp', + ], + LIBDEPS=[ + 'connection_pool_executor', + ], +) + env.Library( target='network_test_env', source=[ @@ -286,6 +297,7 @@ env.Library( ], LIBDEPS=[ '$BUILD_DIR/mongo/base', + '$BUILD_DIR/mongo/db/commands', '$BUILD_DIR/mongo/db/commands/kill_common', '$BUILD_DIR/mongo/idl/generic_args_with_types_idl', '$BUILD_DIR/mongo/rpc/command_status', @@ -402,6 +414,7 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/commands/standalone', '$BUILD_DIR/mongo/db/query/command_request_response', '$BUILD_DIR/mongo/db/repl/hello_command', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/mongo/s/mongos_server_parameters', '$BUILD_DIR/mongo/s/sharding_router_test_fixture', diff --git a/src/mongo/executor/async_multicaster.cpp b/src/mongo/executor/async_multicaster.cpp index c932d58a0dd48..74ad8bf50cbe7 100644 --- a/src/mongo/executor/async_multicaster.cpp +++ b/src/mongo/executor/async_multicaster.cpp @@ -28,14 +28,13 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/async_multicaster.h" - #include +#include +#include -#include "mongo/base/status.h" #include "mongo/db/operation_context.h" +#include "mongo/executor/async_multicaster.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/executor/async_multicaster.h b/src/mongo/executor/async_multicaster.h index 400a7371f0aad..f825b6d9580a6 100644 --- a/src/mongo/executor/async_multicaster.h +++ b/src/mongo/executor/async_multicaster.h @@ -29,12 +29,18 @@ #pragma once +#include #include +#include +#include #include +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" #include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/executor/async_request_executor.cpp b/src/mongo/executor/async_request_executor.cpp index 53b9386b14312..e76627dd681a1 100644 --- a/src/mongo/executor/async_request_executor.cpp +++ b/src/mongo/executor/async_request_executor.cpp @@ -30,11 +30,21 @@ #include "mongo/executor/async_request_executor.h" +#include +#include + +#include +#include + #include "mongo/db/client_strand.h" #include "mongo/db/operation_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kExecutor diff --git a/src/mongo/executor/async_request_executor.h b/src/mongo/executor/async_request_executor.h index 7a7f2f62cac0d..be51427e465e3 100644 --- a/src/mongo/executor/async_request_executor.h +++ b/src/mongo/executor/async_request_executor.h @@ -32,6 +32,7 @@ #include #include +#include "mongo/base/status.h" #include "mongo/db/request_execution_context.h" #include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/future.h" diff --git a/src/mongo/executor/async_rpc.cpp b/src/mongo/executor/async_rpc.cpp index a9bb1f0e6b79e..8aaaa42cc9268 100644 --- a/src/mongo/executor/async_rpc.cpp +++ b/src/mongo/executor/async_rpc.cpp @@ -28,12 +28,23 @@ */ #include "mongo/executor/async_rpc.h" + +#include +#include +#include + +#include +#include +#include + #include "mongo/base/error_codes.h" #include "mongo/executor/remote_command_request.h" +#include "mongo/rpc/metadata.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/net/hostandport.h" -#include namespace mongo::async_rpc { namespace detail { @@ -48,41 +59,70 @@ class AsyncRPCRunnerImpl : public AsyncRPCRunner { * * Do not call directly - this is not part of the public API. */ - ExecutorFuture _sendCommand(StringData dbName, - BSONObj cmdBSON, - Targeter* targeter, - OperationContext* opCtx, - std::shared_ptr exec, - CancellationToken token, - BatonHandle baton) final { + ExecutorFuture _sendCommand( + StringData dbName, + BSONObj cmdBSON, + Targeter* targeter, + OperationContext* opCtx, + std::shared_ptr exec, + CancellationToken token, + BatonHandle baton, + boost::optional clientOperationKey) final { auto proxyExec = std::make_shared(baton, exec); auto targetsUsed = std::make_shared>(); return targeter->resolve(token) .thenRunOn(proxyExec) - .then([dbName, + .then([dbName = dbName.toString(), cmdBSON, opCtx, exec = std::move(exec), token, baton = std::move(baton), - targetsUsed](std::vector targets) { + targetsUsed, + clientOperationKey](std::vector targets) { invariant(targets.size(), "Successful targeting implies there are hosts to target."); *targetsUsed = targets; executor::RemoteCommandRequestOnAny executorRequest( - targets, dbName.toString(), cmdBSON, rpc::makeEmptyMetadata(), opCtx); + targets, + dbName, + cmdBSON, + rpc::makeEmptyMetadata(), + opCtx, + executor::RemoteCommandRequest::kNoTimeout, + {}, + clientOperationKey); return exec->scheduleRemoteCommandOnAny(executorRequest, token, std::move(baton)); }) - .onError([targetsUsed](Status s) -> StatusWith { - // If there was a scheduling error or other local error before the - // command was accepted by the executor. - return Status{AsyncRPCErrorInfo(s, *targetsUsed), - "Remote command execution failed"}; - }) - .then([targetsUsed](TaskExecutor::ResponseOnAnyStatus r) { + .onError( + [targetsUsed, targeter](Status s) -> StatusWith { + if (targetsUsed->size()) { + // TODO SERVER-78148 Mirrors AsyncRequestsSender in that the first target is + // used. The assumption that the first target triggers the error is wrong + // and will be changed as part of the TODO. + targeter->onRemoteCommandError((*targetsUsed)[0], s).get(); + } + // If there was a scheduling error or other local error before the + // command was accepted by the executor. + return Status{AsyncRPCErrorInfo(s, *targetsUsed), + "Remote command execution failed"}; + }) + .then([targetsUsed, targeter](TaskExecutor::ResponseOnAnyStatus r) { auto s = makeErrorIfNeeded(r, *targetsUsed); + // Update targeter for errors. + if (!s.isOK() && s.code() == ErrorCodes::RemoteCommandExecutionError) { + auto extraInfo = s.extraInfo(); + if (extraInfo->isLocal()) { + targeter->onRemoteCommandError(*(r.target), extraInfo->asLocal()).get(); + } else { + targeter + ->onRemoteCommandError(*(r.target), + extraInfo->asRemote().getRemoteCommandResult()) + .get(); + } + } uassertStatusOK(s); - return AsyncRPCInternalResponse{r.data, r.target.get()}; + return AsyncRPCInternalResponse{r.data, r.target.get(), *r.elapsed}; }); } }; diff --git a/src/mongo/executor/async_rpc.h b/src/mongo/executor/async_rpc.h index 515dfb5b0a0ca..13dd77eec200e 100644 --- a/src/mongo/executor/async_rpc.h +++ b/src/mongo/executor/async_rpc.h @@ -29,13 +29,30 @@ #pragma once +#include +#include +#include #include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/client/async_remote_command_targeter_adapter.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/hedging_mode_gen.h" #include "mongo/client/mongo_uri.h" +#include "mongo/client/read_preference.h" +#include "mongo/client/remote_command_targeter.h" #include "mongo/client/remote_command_targeter_factory_impl.h" +#include "mongo/db/baton.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/service_context.h" #include "mongo/db/shard_id.h" #include "mongo/executor/async_rpc_error_info.h" #include "mongo/executor/async_rpc_retry_policy.h" @@ -43,17 +60,21 @@ #include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" #include "mongo/idl/generic_args_with_types_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/async_rpc_shard_targeter.h" +#include "mongo/s/catalog/type_tags.h" +#include "mongo/transport/baton.h" #include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" +#include "mongo/util/duration.h" #include "mongo/util/future.h" #include "mongo/util/future_util.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/time_support.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT mongo::logv2::LogComponent::kExecutor +#include "mongo/util/uuid.h" /** * This header provides an API of `sendCommand(...)` functions that can be used to asynchronously @@ -87,6 +108,7 @@ template struct AsyncRPCResponse { CommandReplyType response; HostAndPort targetUsed; + Microseconds elapsed; GenericReplyFields genericReplyFields; }; @@ -97,12 +119,13 @@ struct AsyncRPCResponse { template <> struct AsyncRPCResponse { HostAndPort targetUsed; + Microseconds elapsed; }; template struct AsyncRPCOptions { AsyncRPCOptions(CommandType cmd, - std::shared_ptr exec, + const std::shared_ptr& exec, CancellationToken token, std::shared_ptr retryPolicy = std::make_shared(), GenericArgs genericArgs = GenericArgs(), @@ -113,6 +136,18 @@ struct AsyncRPCOptions { retryPolicy{retryPolicy}, genericArgs{genericArgs}, baton{std::move(baton)} {} + AsyncRPCOptions(CommandType cmd, + const std::shared_ptr& exec, + CancellationToken token, + GenericArgs genericArgs, + std::shared_ptr retryPolicy = std::make_shared(), + BatonHandle baton = nullptr) + : cmd{cmd}, + exec{exec}, + token{token}, + retryPolicy{retryPolicy}, + genericArgs{genericArgs}, + baton{std::move(baton)} {} CommandType cmd; std::shared_ptr exec; CancellationToken token; @@ -129,6 +164,7 @@ namespace detail { struct AsyncRPCInternalResponse { BSONObj response; HostAndPort targetUsed; + Microseconds elapsed; }; /** @@ -152,20 +188,24 @@ class AsyncRPCRunner { OperationContext* opCtx, std::shared_ptr exec, CancellationToken token, - BatonHandle baton) = 0; - ExecutorFuture _sendCommand(StringData dbName, - BSONObj cmdBSON, - Targeter* targeter, - OperationContext* opCtx, - std::shared_ptr exec, - CancellationToken token) { + BatonHandle baton, + boost::optional clientOperationKey) = 0; + ExecutorFuture _sendCommand( + StringData dbName, + BSONObj cmdBSON, + Targeter* targeter, + OperationContext* opCtx, + std::shared_ptr exec, + CancellationToken token, + boost::optional clientOperationKey) { return _sendCommand(std::move(dbName), std::move(cmdBSON), std::move(targeter), std::move(opCtx), std::move(exec), std::move(token), - nullptr); + nullptr, + std::move(clientOperationKey)); } static AsyncRPCRunner* get(ServiceContext* serviceContext); static void set(ServiceContext* serviceContext, std::unique_ptr theRunner); @@ -239,7 +279,8 @@ ExecutorFuture> sendCommandWithRun targeter.get(), opCtx, options->exec, - options->token); + options->token, + options->genericArgs.stable.getClientOperationKey()); }; auto resFuture = AsyncTry(std::move(tryBody)) @@ -259,7 +300,10 @@ ExecutorFuture> sendCommandWithRun IDLParserContext("AsyncRPCRunner"), r.response); auto unstableReplyFields = GenericReplyFieldsWithTypesUnstableV1::parseSharingOwnership( IDLParserContext("AsyncRPCRunner"), r.response); - return {res, r.targetUsed, GenericReplyFields{stableReplyFields, unstableReplyFields}}; + return {res, + r.targetUsed, + r.elapsed, + GenericReplyFields{stableReplyFields, unstableReplyFields}}; }) .unsafeToInlineFuture() .onError( @@ -295,6 +339,14 @@ ExecutorFuture> sendCommandWithRun } } // namespace detail +namespace { +void createOperationKeyIfNeeded(GenericArgs& genericArgs) { + if (!genericArgs.stable.getClientOperationKey()) { + genericArgs.stable.setClientOperationKey(UUID::gen()); + } +} +} // namespace + /** * Execute the command asynchronously on the given target with the provided executor. * @@ -326,6 +378,7 @@ ExecutorFuture> sendCommand( OperationContext* opCtx, std::unique_ptr targeter) { auto runner = detail::AsyncRPCRunner::get(opCtx->getServiceContext()); + createOperationKeyIfNeeded(options->genericArgs); auto genericArgs = options->genericArgs.stable.toBSON().addFields(options->genericArgs.unstable.toBSON()); auto cmdBSON = options->cmd.toBSON(genericArgs); @@ -346,6 +399,7 @@ ExecutorFuture> sendCommand( // Wrapping this function allows us to separate the CommandType parsing logic from the // implementation details of executing the remote command asynchronously. auto runner = detail::AsyncRPCRunner::get(svcCtx); + createOperationKeyIfNeeded(options->genericArgs); auto genericArgs = options->genericArgs.stable.toBSON().addFields(options->genericArgs.unstable.toBSON()); auto cmdBSON = options->cmd.toBSON(genericArgs); @@ -364,7 +418,10 @@ ExecutorFuture> sendCommand( std::unique_ptr targeter = std::make_unique(shardId, opCtx, readPref, options->exec); auto runner = detail::AsyncRPCRunner::get(opCtx->getServiceContext()); - auto cmdBSON = options->cmd.toBSON({}); + createOperationKeyIfNeeded(options->genericArgs); + auto genericArgs = + options->genericArgs.stable.toBSON().addFields(options->genericArgs.unstable.toBSON()); + auto cmdBSON = options->cmd.toBSON(genericArgs); return detail::sendCommandWithRunner(cmdBSON, options, runner, opCtx, std::move(targeter)); } @@ -385,7 +442,10 @@ ExecutorFuture> sendCommand( std::make_unique(readPref, remoteCommandTargeter); auto runner = detail::AsyncRPCRunner::get(opCtx->getServiceContext()); - auto cmdBSON = options->cmd.toBSON({}); + createOperationKeyIfNeeded(options->genericArgs); + auto genericArgs = + options->genericArgs.stable.toBSON().addFields(options->genericArgs.unstable.toBSON()); + auto cmdBSON = options->cmd.toBSON(genericArgs); return detail::sendCommandWithRunner(cmdBSON, options, runner, opCtx, std::move(targeter)); } @@ -400,4 +460,3 @@ ExecutorFuture> sendCommand( } } // namespace mongo::async_rpc -#undef MONGO_LOGV2_DEFAULT_COMPONENT diff --git a/src/mongo/executor/async_rpc_error_info.cpp b/src/mongo/executor/async_rpc_error_info.cpp index 1a42d12f2e31f..06bc3d29b5c6f 100644 --- a/src/mongo/executor/async_rpc_error_info.cpp +++ b/src/mongo/executor/async_rpc_error_info.cpp @@ -29,7 +29,10 @@ #include "mongo/executor/async_rpc_error_info.h" -#include "mongo/base/init.h" + +#include + +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/bson/bsonobjbuilder.h" namespace mongo { @@ -66,6 +69,21 @@ Status unpackRPCStatus(Status status) { return out; } +Status unpackRPCStatusIgnoringWriteErrors(Status status) { + invariant(status == ErrorCodes::RemoteCommandExecutionError); + auto errorInfo = status.extraInfo(); + if (errorInfo->isLocal()) { + return errorInfo->asLocal(); + } + invariant(errorInfo->isRemote()); + auto remoteError = errorInfo->asRemote(); + Status out = remoteError.getRemoteCommandResult(); + if (out.isOK()) { + out = remoteError.getRemoteCommandWriteConcernError(); + } + return out; +} + Status unpackRPCStatusIgnoringWriteConcernAndWriteErrors(Status status) { invariant(status == ErrorCodes::RemoteCommandExecutionError); auto errorInfo = status.extraInfo(); diff --git a/src/mongo/executor/async_rpc_error_info.h b/src/mongo/executor/async_rpc_error_info.h index 9fa772014e76c..7f3f229d19639 100644 --- a/src/mongo/executor/async_rpc_error_info.h +++ b/src/mongo/executor/async_rpc_error_info.h @@ -29,12 +29,27 @@ #pragma once +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/executor/remote_command_response.h" #include "mongo/idl/generic_args_with_types_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/stdx/variant.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" namespace mongo { using executor::RemoteCommandOnAnyResponse; @@ -73,7 +88,8 @@ class AsyncRPCErrorInfo final : public ErrorExtraInfo { _remoteCommandResult{getStatusFromCommandResult(_error)}, _remoteCommandWriteConcernError{getWriteConcernStatusFromCommandResult(_error)}, _remoteCommandFirstWriteError{getFirstWriteErrorStatusFromCommandResult(_error)}, - _targetUsed{*rcr.target} { + _targetUsed{*rcr.target}, + _elapsed{*rcr.elapsed} { // The buffer backing the default empty BSONObj has static duration so it is effectively // owned. invariant(_error.isOwned() || _error.objdata() == BSONObj().objdata()); @@ -111,6 +127,10 @@ class AsyncRPCErrorInfo final : public ErrorExtraInfo { return _targetUsed; } + Microseconds getElapsed() const { + return _elapsed; + } + GenericReplyFields getGenericReplyFields() const { return _genericReplyFields; } @@ -122,6 +142,7 @@ class AsyncRPCErrorInfo final : public ErrorExtraInfo { Status _remoteCommandFirstWriteError; std::vector _errLabels; HostAndPort _targetUsed; + Microseconds _elapsed; GenericReplyFields _genericReplyFields; }; @@ -213,6 +234,19 @@ namespace async_rpc { */ Status unpackRPCStatus(Status status); +/** + * Converts a RemoteCommandExecutionError from the async_rpc::sendCommand API + * into the highest-priority 'underlying error' responsible for the RPC error, + * ignoring write errors returned from the remote. This means: + * (1) If there was an error on the local node that caused the failure, + * that error is returned. + * (2) If we received an {ok: 0} response from the remote node, that error + * is returned. + * (3) If we received an {ok: 1} response from the remote node, but a write + * concern error, the write concern error is returned. + */ +Status unpackRPCStatusIgnoringWriteErrors(Status status); + /** * Converts a RemoteCommandExecutionError from the async_rpc::sendCommand API * into the highest-priority 'underlying error' responsible for the RPC error, diff --git a/src/mongo/executor/async_rpc_test.cpp b/src/mongo/executor/async_rpc_test.cpp index 6f20e410871c3..b6f31eef2b033 100644 --- a/src/mongo/executor/async_rpc_test.cpp +++ b/src/mongo/executor/async_rpc_test.cpp @@ -27,33 +27,60 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/connection_string.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/cursor_response.h" -#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/cursor_response_gen.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/hello_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/tenant_id.h" #include "mongo/executor/async_rpc.h" #include "mongo/executor/async_rpc_error_info.h" #include "mongo/executor/async_rpc_retry_policy.h" #include "mongo/executor/async_rpc_targeter.h" #include "mongo/executor/async_rpc_test_fixture.h" #include "mongo/executor/async_transaction_rpc.h" -#include "mongo/executor/network_test_env.h" -#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/network_interface_mock.h" #include "mongo/executor/task_executor.h" -#include "mongo/executor/task_executor_test_fixture.h" -#include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/idl/generic_args_with_types_gen.h" #include "mongo/rpc/topology_version_gen.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/transaction_router.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/thread_assertion_monitor.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/concurrency/notification.h" #include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/net/hostandport.h" -#include -#include "mongo/logv2/log.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork namespace mongo { @@ -177,7 +204,9 @@ TEST_F(AsyncRPCTestFixture, SuccessfulHelloWithGenericFields) { GenericReplyFieldsWithTypesUnstableV1 genericReplyUnstable; genericReplyUnstable.setOk(1); genericReplyUnstable.setDollarConfigTime(Timestamp(1, 1)); - const LogicalTime clusterTime = LogicalTime(Timestamp(2, 3)); + auto clusterTime = ClusterTime(); + clusterTime.setClusterTime(LogicalTime(Timestamp(2, 3))); + clusterTime.setSignature(ClusterTimeSignature(std::vector(), 0)); genericReplyApiV1.setDollarClusterTime(clusterTime); auto configTime = Timestamp(1, 1); genericArgsUnstable.setDollarConfigTime(configTime); @@ -205,7 +234,7 @@ TEST_F(AsyncRPCTestFixture, SuccessfulHelloWithGenericFields) { BSONObjBuilder reply = BSONObjBuilder(helloReply.toBSON()); reply.append("ok", 1); reply.append("$configTime", Timestamp(1, 1)); - clusterTime.serializeToBSON("$clusterTime", &reply); + reply.append("$clusterTime", clusterTime.toBSON()); return reply.obj(); }); @@ -417,7 +446,10 @@ TEST_F(AsyncRPCTestFixture, RemoteErrorWithGenericReplyFields) { auto resultFuture = sendCommand(options, opCtxHolder.get(), std::move(targeter)); GenericReplyFieldsWithTypesV1 stableFields; - stableFields.setDollarClusterTime(LogicalTime(Timestamp(2, 3))); + auto clusterTime = ClusterTime(); + clusterTime.setClusterTime(LogicalTime(Timestamp(2, 3))); + clusterTime.setSignature(ClusterTimeSignature(std::vector(), 0)); + stableFields.setDollarClusterTime(clusterTime); GenericReplyFieldsWithTypesUnstableV1 unstableFields; unstableFields.setDollarConfigTime(Timestamp(1, 1)); unstableFields.setOk(false); @@ -453,7 +485,7 @@ TEST_F(AsyncRPCTestFixture, RemoteErrorWithGenericReplyFields) { TEST_F(AsyncRPCTestFixture, SuccessfulFind) { std::unique_ptr targeter = std::make_unique(); auto opCtxHolder = makeOperationContext(); - DatabaseName testDbName = DatabaseName("testdb", boost::none); + DatabaseName testDbName = DatabaseName::createDatabaseName_forTest(boost::none, "testdb"); NamespaceString nss = NamespaceString::createNamespaceString_forTest(testDbName); FindCommandRequest findCmd(nss); @@ -490,8 +522,7 @@ TEST_F(AsyncRPCTestFixture, WriteConcernError) { const BSONObj writeConcernError = BSON("code" << ErrorCodes::WriteConcernFailed << "errmsg" << "mock"); - const BSONObj resWithWriteConcernError = - BSON("ok" << 1 << "writeConcernError" << writeConcernError); + BSONObj resWithWriteConcernError = BSON("ok" << 1 << "writeConcernError" << writeConcernError); auto opCtxHolder = makeOperationContext(); auto options = std::make_shared>( @@ -533,7 +564,7 @@ TEST_F(AsyncRPCTestFixture, WriteError) { const BSONObj writeError = BSON("code" << ErrorCodes::DocumentValidationFailure << "errInfo" << writeErrorExtraInfo << "errmsg" << "Document failed validation"); - const BSONObj resWithWriteError = BSON("ok" << 1 << "writeErrors" << BSON_ARRAY(writeError)); + BSONObj resWithWriteError = BSON("ok" << 1 << "writeErrors" << BSON_ARRAY(writeError)); auto opCtxHolder = makeOperationContext(); auto options = std::make_shared>( helloCmd, getExecutorPtr(), _cancellationToken); @@ -765,7 +796,7 @@ TEST_F(AsyncRPCTestFixture, SendTxnCommandWithoutTxnRouterAppendsNoTxnFields) { auto opCtxHolder = makeOperationContext(); auto targeter = std::make_unique( shardId, opCtxHolder.get(), readPref, getExecutorPtr(), testHost); - DatabaseName testDbName = DatabaseName("testdb", boost::none); + DatabaseName testDbName = DatabaseName::createDatabaseName_forTest(boost::none, "testdb"); NamespaceString nss = NamespaceString::createNamespaceString_forTest(testDbName); FindCommandRequest findCmd(nss); @@ -798,7 +829,7 @@ TEST_F(AsyncRPCTxnTestFixture, MultipleSendTxnCommand) { // Use a mock ShardIdTargeter to avoid calling into the ShardRegistry to get a target shard. auto targeter = std::make_unique( shardId, getOpCtx(), readPref, getExecutorPtr(), testHost); - DatabaseName testDbName = DatabaseName("testdb", boost::none); + DatabaseName testDbName = DatabaseName::createDatabaseName_forTest(boost::none, "testdb"); NamespaceString nss = NamespaceString::createNamespaceString_forTest(testDbName); // Set up the transaction metadata. @@ -864,7 +895,7 @@ TEST_F(AsyncRPCTxnTestFixture, EnsureProcessParticipantCalledCorrectlyOnSuccess) // Use a mock ShardIdTargeter to avoid calling into the ShardRegistry to get a target shard. auto targeter = std::make_unique( shardId, getOpCtx(), readPref, getExecutorPtr(), testHost); - DatabaseName testDbName = DatabaseName("testdb", boost::none); + DatabaseName testDbName = DatabaseName::createDatabaseName_forTest(boost::none, "testdb"); NamespaceString nss = NamespaceString::createNamespaceString_forTest(testDbName); // Set up the transaction metadata. @@ -927,7 +958,7 @@ TEST_F(AsyncRPCTxnTestFixture, EnsureProcessParticipantCalledCorrectlyOnRemoteEr // Use a mock ShardIdTargeter to avoid calling into the ShardRegistry to get a target shard. auto targeter = std::make_unique( shardId, getOpCtx(), readPref, getExecutorPtr(), testHost); - DatabaseName testDbName = DatabaseName("testdb", boost::none); + DatabaseName testDbName = DatabaseName::createDatabaseName_forTest(boost::none, "testdb"); NamespaceString nss = NamespaceString::createNamespaceString_forTest(testDbName); // Set up the transaction metadata. @@ -989,7 +1020,7 @@ TEST_F(AsyncRPCTxnTestFixture, SendTxnCommandWithGenericArgs) { // Use a mock ShardIdTargeter to avoid calling into the ShardRegistry to get a target shard. auto targeter = std::make_unique( shardId, getOpCtx(), readPref, getExecutorPtr(), testHost); - DatabaseName testDbName = DatabaseName("testdb", boost::none); + DatabaseName testDbName = DatabaseName::createDatabaseName_forTest(boost::none, "testdb"); NamespaceString nss = NamespaceString::createNamespaceString_forTest(testDbName); // Set up the transaction metadata. @@ -1055,7 +1086,7 @@ TEST_F(AsyncRPCTxnTestFixture, SendTxnCommandReturnsRemoteError) { // Use a mock ShardIdTargeter to avoid calling into the ShardRegistry to get a target shard. auto targeter = std::make_unique( shardId, getOpCtx(), readPref, getExecutorPtr(), testHost); - DatabaseName testDbName = DatabaseName("testdb", boost::none); + DatabaseName testDbName = DatabaseName::createDatabaseName_forTest(boost::none, "testdb"); NamespaceString nss = NamespaceString::createNamespaceString_forTest(testDbName); // Set up the transaction metadata. @@ -1092,7 +1123,7 @@ TEST_F(AsyncRPCTxnTestFixture, SendTxnCommandReturnsLocalError) { // Use a mock ShardIdTargeter to avoid calling into the ShardRegistry to get a target shard. auto targeter = std::make_unique( shardId, getOpCtx(), readPref, getExecutorPtr(), testHost); - DatabaseName testDbName = DatabaseName("testdb", boost::none); + DatabaseName testDbName = DatabaseName::createDatabaseName_forTest(boost::none, "testdb"); NamespaceString nss = NamespaceString::createNamespaceString_forTest(testDbName); // Set up the transaction metadata. @@ -1204,6 +1235,102 @@ TEST_F(AsyncRPCTestFixture, RemoteErrorAttemptedTargetMatchesActual) { ASSERT_EQ(target, targetHeardFrom); } +auto extractUUID(const BSONElement& element) { + return UUID::fromCDR(element.uuid()); +} + +auto getOpKeyFromCommand(const BSONObj& cmdObj) { + return extractUUID(cmdObj["clientOperationKey"]); +} + +TEST_F(AsyncRPCTestFixture, OperationKeyIsSetByDefault) { + std::unique_ptr targeter = std::make_unique(); + auto opCtxHolder = makeOperationContext(); + DatabaseName testDbName = DatabaseName::createDatabaseName_forTest(boost::none, "testdb"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(testDbName); + + FindCommandRequest findCmd(nss); + auto options = std::make_shared>( + findCmd, getExecutorPtr(), _cancellationToken); + auto future = sendCommand(options, opCtxHolder.get(), std::move(targeter)); + ASSERT_DOES_NOT_THROW([&] { + onCommand([&](const auto& request) { + (void)getOpKeyFromCommand(request.cmdObj); + return CursorResponse(nss, 0LL, {BSON("x" << 1)}) + .toBSON(CursorResponse::ResponseType::InitialResponse); + }); + }()); + auto network = getNetworkInterfaceMock(); + network->enterNetwork(); + network->runReadyNetworkOperations(); + network->exitNetwork(); + + future.get(); +} + +TEST_F(AsyncRPCTestFixture, UseOperationKeyWhenProvided) { + const auto opKey = UUID::gen(); + + std::unique_ptr targeter = std::make_unique(); + auto opCtxHolder = makeOperationContext(); + DatabaseName testDbName = DatabaseName::createDatabaseName_forTest(boost::none, "testdb"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(testDbName); + + FindCommandRequest findCmd(nss); + auto options = std::make_shared>( + findCmd, getExecutorPtr(), _cancellationToken); + // Set OperationKey via AsyncRPCOptions. + options->genericArgs.stable.setClientOperationKey(opKey); + auto future = sendCommand(options, opCtxHolder.get(), std::move(targeter)); + onCommand([&](const auto& request) { + ASSERT_EQ(getOpKeyFromCommand(request.cmdObj), opKey); + return CursorResponse(nss, 0LL, {BSON("x" << 1)}) + .toBSON(CursorResponse::ResponseType::InitialResponse); + }); + future.get(); +} + +/** + * Checks that if cancellation occurs after TaskExecutor receives a network response, the + * cancellation fails and the network response fulfills the final response. + */ +TEST_F(AsyncRPCTestFixture, CancelAfterNetworkResponse) { + auto pauseAfterNetworkResponseFailPoint = + globalFailPointRegistry().find("pauseTaskExecutorAfterReceivesNetworkRespones"); + pauseAfterNetworkResponseFailPoint->setMode(FailPoint::alwaysOn); + std::unique_ptr targeter = std::make_unique(); + auto opCtxHolder = makeOperationContext(); + DatabaseName testDbName = DatabaseName::createDatabaseName_forTest(boost::none, "testdb"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(testDbName); + + CancellationSource source; + CancellationToken token = source.token(); + FindCommandRequest findCmd(nss); + auto options = + std::make_shared>(findCmd, getExecutorPtr(), token); + auto future = sendCommand(options, opCtxHolder.get(), std::move(targeter)); + + // Will pause processing response after network interface. + stdx::thread worker([&] { + onCommand([&](const auto& request) { + return CursorResponse(nss, 0LL, {BSON("x" << 1)}) + .toBSON(CursorResponse::ResponseType::InitialResponse); + }); + }); + + // Cancel after network response received in the TaskExecutor. + pauseAfterNetworkResponseFailPoint->waitForTimesEntered(1); + source.cancel(); + pauseAfterNetworkResponseFailPoint->setMode(FailPoint::off); + + // Canceling after network response received does not change the final response and + // does not send killOperation. + CursorInitialReply res = std::move(future).get().response; + ASSERT_BSONOBJ_EQ(res.getCursor()->getFirstBatch()[0], BSON("x" << 1)); + + worker.join(); +} + } // namespace } // namespace async_rpc } // namespace mongo diff --git a/src/mongo/executor/async_rpc_test_fixture.h b/src/mongo/executor/async_rpc_test_fixture.h index 5de2e5b303ae7..33aa439002cc1 100644 --- a/src/mongo/executor/async_rpc_test_fixture.h +++ b/src/mongo/executor/async_rpc_test_fixture.h @@ -26,10 +26,10 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ + #include #include "mongo/bson/oid.h" -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" #include "mongo/db/repl/hello_gen.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/executor/async_rpc.h" @@ -125,7 +125,7 @@ class AsyncRPCTestFixture : public ServiceContextTest { */ template void initializeCommand(CommandType& c) { - c.setDbName(DatabaseName("testdb", boost::none)); + c.setDbName(DatabaseName::createDatabaseName_forTest(boost::none, "testdb")); } /** @@ -209,6 +209,10 @@ class ShardIdTargeterForTest : public ShardIdTargeter { return SemiFuture>::makeReady(_resolvedHosts); } + SemiFuture onRemoteCommandError(HostAndPort h, Status s) override final { + return SemiFuture::makeReady(); + } + private: std::vector _resolvedHosts; }; diff --git a/src/mongo/executor/async_rpc_util.h b/src/mongo/executor/async_rpc_util.h new file mode 100644 index 0000000000000..34f8689e3bf5b --- /dev/null +++ b/src/mongo/executor/async_rpc_util.h @@ -0,0 +1,207 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/executor/async_rpc_error_info.h" +#include "mongo/executor/async_rpc_retry_policy.h" +#include "mongo/executor/async_rpc_targeter.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor.h" +#include "mongo/idl/generic_args_with_types_gen.h" +#include "mongo/logv2/log.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_rpc_shard_targeter.h" +#include "mongo/s/transaction_router.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/future.h" +#include "mongo/util/future_util.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" + +namespace mongo::async_rpc { + +/** + * Mirrors command helper methods found in commands.h or cluster_command_helpers.h. + */ +struct AsyncRPCCommandHelpers { + // TODO SERVER-78237 Remove ignoreMinimumAcceptableWTimeout, clarify logic. + static void appendMajorityWriteConcern(GenericArgs& args, + WriteConcernOptions defaultWC = WriteConcernOptions(), + bool ignoreMinimumAcceptableWTimeout = true) { + WriteConcernOptions newWC = CommandHelpers::kMajorityWriteConcern; + if (auto wc = args.stable.getWriteConcern()) { + // The command has a writeConcern field and it's majority, so we can return it as-is. + if (wc->isMajority()) { + return; + } + + newWC = WriteConcernOptions{ + WriteConcernOptions::kMajority, WriteConcernOptions::SyncMode::UNSET, wc->wTimeout}; + } else if (!defaultWC.usedDefaultConstructedWC) { + auto minimumAcceptableWTimeout = newWC.wTimeout; + newWC = defaultWC; + newWC.w = "majority"; + + // Some code may choose to serialize write concern with timeout = 0 (no timeout). + if (!ignoreMinimumAcceptableWTimeout && + defaultWC.wTimeout < minimumAcceptableWTimeout) { + newWC.wTimeout = minimumAcceptableWTimeout; + } + } + args.stable.setWriteConcern(newWC); + } + + static void appendDbVersionIfPresent(GenericArgs& args, DatabaseVersion dbVersion) { + if (!dbVersion.isFixed()) { + args.unstable.setDatabaseVersion(dbVersion); + } + } + + static void appendOSI(GenericArgs& args, const OperationSessionInfo& osi) { + args.stable.setLsid(osi.getSessionId()); + args.stable.setTxnNumber(osi.getTxnNumber()); + args.unstable.setTxnRetryCounter(osi.getTxnRetryCounter()); + args.stable.setAutocommit(osi.getAutocommit()); + args.stable.setStartTransaction(osi.getStartTransaction()); + } +}; + +template +struct SharedResult { + SharedResult(Promise resultPromise) : resultPromise(std::move(resultPromise)) {} + AtomicWord done{false}; + Promise resultPromise; +}; + +/** + * Template to process futures into a SharedResult. + */ +template +Future processMultipleFutures(std::vector>&& futures, + ProcessSWCallable&& processStatusWith) { + auto [resultPromise, resultFuture] = makePromiseFuture(); + + auto sharedResult = std::make_shared>(std::move(resultPromise)); + + for (size_t i = 0; i < futures.size(); ++i) { + std::move(futures[i]) + .getAsync( + [index = i, sharedResult, processStatusWith](StatusOrStatusWith sw) { + processStatusWith(sw, sharedResult, index); + }); + } + return std::move(resultFuture); +} + +/** + * Given a vector of input Futures, returns a Future which holds the value + * of the first of those futures to resolve with a status, value, and index that + * satisfies the conditions in the ConditionCallable Callable. + */ +template +Future whenAnyThat(std::vector>&& futures, + ConditionCallable&& shouldAccept) { + invariant(futures.size() > 0); + + auto processSW = [shouldAccept](StatusOrStatusWith value, + std::shared_ptr> sharedResult, + size_t index) { + if (shouldAccept(value, index)) { + // If this is the first input future to complete and satisfy the + // shouldAccept condition, change done to true and set the value on the + // promise. + if (!sharedResult->done.swap(true)) { + sharedResult->resultPromise.setFrom(std::move(value)); + } + } + }; + + return processMultipleFutures(std::move(futures), std::move(processSW)); +} + +/** + * Given a vector of input Futures and a processResponse callable, processes the responses + * from each of the futures and pushes the results onto a vector. Cancels early on error + * status. + */ +template +Future> getAllResponsesOrFirstErrorWithCancellation( + std::vector>&& futures, + CancellationToken token, + ProcessResponseCallable&& processResponse) { + + struct SharedUtil { + SharedUtil(int responsesLeft, CancellationToken token) + : responsesLeft(responsesLeft), source(token) {} + Mutex mutex = MONGO_MAKE_LATCH("SharedUtil::mutex"); + int responsesLeft; + std::vector results = std::vector(); + CancellationSource source; + }; + + auto sharedUtil = std::make_shared(futures.size(), token); + auto processWrapper = [sharedUtil, processResponse]( + StatusOrStatusWith sw, + std::shared_ptr>> sharedResult, + size_t index) { + if (sharedUtil->source.token().isCanceled()) { + return; + } + + if (!sw.isOK()) { + sharedResult->done.store(true); + sharedUtil->source.cancel(); + sharedResult->resultPromise.setError(async_rpc::unpackRPCStatus(sw.getStatus())); + return; + } + + auto reply = sw.getValue(); + auto response = processResponse(reply, index); + + stdx::unique_lock lk(sharedUtil->mutex); + invariant(sharedUtil->responsesLeft != 0); + sharedUtil->results.push_back(response); + if (--sharedUtil->responsesLeft == 0) { + sharedResult->done.store(true); + sharedResult->resultPromise.emplaceValue(sharedUtil->results); + } + }; + + return processMultipleFutures>( + std::move(futures), std::move(processWrapper)); +} + +} // namespace mongo::async_rpc diff --git a/src/mongo/executor/async_timer_mock.cpp b/src/mongo/executor/async_timer_mock.cpp index 9153cba5f0b5d..28aa6002dfa0b 100644 --- a/src/mongo/executor/async_timer_mock.cpp +++ b/src/mongo/executor/async_timer_mock.cpp @@ -29,8 +29,13 @@ #include "mongo/executor/async_timer_mock.h" +#include #include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/system_error.h" namespace mongo { diff --git a/src/mongo/executor/async_timer_mock.h b/src/mongo/executor/async_timer_mock.h index 5e3a83e3275cf..4be2f47bd0fed 100644 --- a/src/mongo/executor/async_timer_mock.h +++ b/src/mongo/executor/async_timer_mock.h @@ -29,11 +29,16 @@ #pragma once +#include +#include +#include #include #include "mongo/executor/async_timer_interface.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { namespace executor { diff --git a/src/mongo/executor/cancelable_executor.h b/src/mongo/executor/cancelable_executor.h index 178d946dd44dc..d5dd760d39879 100644 --- a/src/mongo/executor/cancelable_executor.h +++ b/src/mongo/executor/cancelable_executor.h @@ -29,8 +29,13 @@ #pragma once +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/util/cancellation.h" +#include "mongo/util/functional.h" #include "mongo/util/out_of_line_executor.h" namespace mongo { diff --git a/src/mongo/executor/cancelable_executor_test.cpp b/src/mongo/executor/cancelable_executor_test.cpp index fa7f58209c912..4e16c744a1f38 100644 --- a/src/mongo/executor/cancelable_executor_test.cpp +++ b/src/mongo/executor/cancelable_executor_test.cpp @@ -27,16 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/executor/cancelable_executor.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/thread_assertion_monitor.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" #include "mongo/util/executor_test_util.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" namespace mongo { namespace executor { diff --git a/src/mongo/executor/connection_pool.cpp b/src/mongo/executor/connection_pool.cpp index eab05b593eb2a..91d0481823cd9 100644 --- a/src/mongo/executor/connection_pool.cpp +++ b/src/mongo/executor/connection_pool.cpp @@ -30,23 +30,40 @@ #include "mongo/executor/connection_pool.h" -#include "mongo/db/service_context.h" +#include +#include +#include +#include +#include +#include #include -#include +#include +#include #include +#include +#include +#include + +#include -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/server_feature_flags_gen.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/service_context.h" #include "mongo/executor/connection_pool_stats.h" -#include "mongo/executor/remote_command_request.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" #include "mongo/logv2/log_severity_suppressor.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/assert_util.h" #include "mongo/util/debug_util.h" #include "mongo/util/destructor_guard.h" +#include "mongo/util/fail_point.h" #include "mongo/util/hierarchical_acquisition.h" #include "mongo/util/lru_cache.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kConnectionPool @@ -556,7 +573,7 @@ ConnectionPool::ConnectionPool(std::shared_ptr im _factory(std::move(impl)), _options(std::move(options)), _controller(_options.controllerFactory()), - _manager(options.egressTagCloserManager) { + _manager(_options.egressTagCloserManager) { if (_manager) { _manager->add(this); } @@ -688,8 +705,7 @@ SemiFuture ConnectionPool::_get(const HostAndP // Only count connections being checked-out for ordinary use, not lease, towards cumulative wait // time. - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (gFeatureFlagConnHealthMetrics.isEnabledAndIgnoreFCVUnsafe() && !lease) { + if (!lease) { connFuture = std::move(connFuture).tap([connRequestedAt, pool = pool](const auto& conn) { pool->recordConnectionWaitTime(connRequestedAt); }); @@ -716,10 +732,7 @@ void ConnectionPool::appendConnectionStats(ConnectionPoolStats* stats) const { pool->getOnceUsedConnections(), pool->getTotalConnUsageTime()}; - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (gFeatureFlagConnHealthMetrics.isEnabledAndIgnoreFCVUnsafe()) { - hostStats.acquisitionWaitTimes = pool->connectionWaitTimeStats(); - } + hostStats.acquisitionWaitTimes = pool->connectionWaitTimeStats(); stats->updateStatsForHost(_name, host, hostStats); } } @@ -959,6 +972,16 @@ void ConnectionPool::SpecificPool::finishRefresh(ConnectionInterface* connPtr, S return; } + // If the host and port were dropped, let this lapse and spawn new connections + if (!conn || conn->getGeneration() != _generation) { + LOGV2_DEBUG(22564, + kDiagnosticLogLevel, + "Dropping late refreshed connection to {hostAndPort}", + "Dropping late refreshed connection", + "hostAndPort"_attr = _hostAndPort); + return; + } + // Pass a failure on through if (!status.isOK()) { LOGV2_DEBUG(22563, @@ -971,16 +994,6 @@ void ConnectionPool::SpecificPool::finishRefresh(ConnectionInterface* connPtr, S return; } - // If the host and port were dropped, let this lapse and spawn new connections - if (!conn || conn->getGeneration() != _generation) { - LOGV2_DEBUG(22564, - kDiagnosticLogLevel, - "Dropping late refreshed connection to {hostAndPort}", - "Dropping late refreshed connection", - "hostAndPort"_attr = _hostAndPort); - return; - } - LOGV2_DEBUG(22565, kDiagnosticLogLevel, "Finishing connection refresh for {hostAndPort}", @@ -1010,7 +1023,29 @@ void ConnectionPool::SpecificPool::returnConnection(ConnectionInterface* connPtr } if (auto status = conn->getStatus(); !status.isOK()) { - // TODO: alert via some callback if the host is bad + // Our error handling here is determined by the MongoDB SDAM specification for handling + // application errors on established connections. In particular, if a network error occurs, + // we must close all idle sockets in the connection pool for the server: "if one socket is + // bad, it is likely that all are." However, if the error is just a network _timeout_ error, + // we don't drop the connections because the timeout may indicate a slow operation rather + // than an unavailable server. Additionally, if we can isolate the error to a single + // socket/connection based on it's type, we won't drop other connections/sockets. + // + // See the spec for additional details: + // https://github.com/mongodb/specifications/blob/master/source/server-discovery-and-monitoring/server-discovery-and-monitoring.rst#application-errors + bool isSingleConnectionError = status.code() == ErrorCodes::ConnectionError; + if (ErrorCodes::isNetworkError(status) && !isSingleConnectionError && + !ErrorCodes::isNetworkTimeoutError(status)) { + LOGV2_DEBUG(7719500, + kDiagnosticLogLevel, + "Connection failed to {hostAndPort} due to {error}", + "Connection failed", + "hostAndPort"_attr = _hostAndPort, + "error"_attr = redact(status)); + processFailure(status); + return; + } + // Otherwise, drop the one connection. LOGV2(22566, "Ending connection to host {hostAndPort} due to bad connection status: {error}; " "{numOpenConns} connections to that host remain open", diff --git a/src/mongo/executor/connection_pool.h b/src/mongo/executor/connection_pool.h index de1adc2a5fa8c..f6dd271fbd4d4 100644 --- a/src/mongo/executor/connection_pool.h +++ b/src/mongo/executor/connection_pool.h @@ -29,20 +29,34 @@ #pragma once +#include +#include +#include +#include #include +#include #include #include #include - -#include "mongo/config.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/executor/egress_tag_closer.h" #include "mongo/executor/egress_tag_closer_manager.h" +#include "mongo/platform/compiler.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_map.h" #include "mongo/transport/session.h" #include "mongo/transport/transport_layer.h" #include "mongo/util/clock_source.h" #include "mongo/util/duration.h" +#include "mongo/util/functional.h" #include "mongo/util/future.h" #include "mongo/util/hierarchical_acquisition.h" #include "mongo/util/net/hostandport.h" @@ -546,6 +560,10 @@ class ConnectionPool::ControllerInterface { return _pool; } + Options getPoolOptions() const { + return _pool->_options; + } + virtual void updateConnectionPoolStats([[maybe_unused]] ConnectionPoolStats* cps) const = 0; protected: diff --git a/src/mongo/executor/connection_pool_controllers.cpp b/src/mongo/executor/connection_pool_controllers.cpp new file mode 100644 index 0000000000000..790de44ea719c --- /dev/null +++ b/src/mongo/executor/connection_pool_controllers.cpp @@ -0,0 +1,83 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include + +#include +#include +#include + +#include "mongo/executor/connection_pool_controllers.h" +#include "mongo/util/assert_util_core.h" + +namespace mongo::executor { +namespace { +template +auto& getOrInvariant(Map&& map, const Key& key) { + auto it = map.find(key); + invariant(it != map.end(), "Unable to find key in map"); + + return it->second; +} +} // namespace + +void DynamicLimitController::init(executor::ConnectionPool* parent) { + ControllerInterface::init(parent); +} + +void DynamicLimitController::addHost(PoolId id, const HostAndPort& host) { + stdx::lock_guard lk(_mutex); + auto ret = _poolData.insert({id, {host}}); + using namespace fmt::literals; + invariant( + ret.second, + "ConnectionPool controller {} received a request to track host {} that was already being tracked."_format( + _name, host)); +} + +DynamicLimitController::HostGroupState DynamicLimitController::updateHost(PoolId id, + const HostState& stats) { + stdx::lock_guard lk(_mutex); + auto& data = getOrInvariant(_poolData, id); + data.target = + std::clamp(stats.requests + stats.active + stats.leased, _minLoader(), _maxLoader()); + return {{data.host}, stats.health.isExpired}; +} + +void DynamicLimitController::removeHost(PoolId id) { + stdx::lock_guard lk(_mutex); + invariant(_poolData.erase(id)); +} + +ConnectionPool::ConnectionControls DynamicLimitController::getControls(PoolId id) { + stdx::lock_guard lk(_mutex); + return {getPoolOptions().maxConnecting, getOrInvariant(_poolData, id).target}; +} + +} // namespace mongo::executor diff --git a/src/mongo/executor/connection_pool_controllers.h b/src/mongo/executor/connection_pool_controllers.h new file mode 100644 index 0000000000000..edb48faa44c11 --- /dev/null +++ b/src/mongo/executor/connection_pool_controllers.h @@ -0,0 +1,110 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/executor/connection_pool.h" +#include "mongo/executor/connection_pool_stats.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" + +namespace mongo::executor { +/** + * This file is intended for simple implementations of ConnectionPool::ControllerInterface that + * might be shared between different libraries. Currently, it contains only one such implementation, + * the DyamicLimitController below. + */ + +/** + * A simple controller that allows for the maximum and minimum pool size to have dynamic values. + * At construction, provide callables that return the current maximum and minimum sizes to be used + * by the pool. + * + * Currently, the callables that provide the max and min are stateless and don't inspect any data + * about the pool. The other pool parameters (maxConnecting, host/pending/refresh timeouts) are + * simply taken from the Optoins the relevant ConnectionPool was started with. However, this + * type is intended to be easily extensible to add these features in the future if needed. + */ +class DynamicLimitController final : public ConnectionPool::ControllerInterface { +public: + DynamicLimitController(std::function minLoader, + std::function maxLoader, + StringData name) + : _minLoader(std::move(minLoader)), + _maxLoader(std::move(maxLoader)), + _name(std::move(name)) {} + + void init(ConnectionPool* parent) override; + + void addHost(PoolId id, const HostAndPort& host) override; + HostGroupState updateHost(PoolId id, const HostState& stats) override; + void removeHost(PoolId id) override; + + ConnectionControls getControls(PoolId id) override; + + Milliseconds hostTimeout() const override { + return getPoolOptions().hostTimeout; + } + + Milliseconds pendingTimeout() const override { + return getPoolOptions().refreshTimeout; + } + + Milliseconds toRefreshTimeout() const override { + return getPoolOptions().refreshRequirement; + } + + StringData name() const override { + return _name; + } + + void updateConnectionPoolStats(ConnectionPoolStats* cps) const override {} + +private: + struct PoolData { + HostAndPort host; + size_t target = 0; + }; + + std::function _minLoader; + std::function _maxLoader; + std::string _name; + Mutex _mutex = MONGO_MAKE_LATCH("DynamicLimitController::_mutex"); + stdx::unordered_map _poolData; +}; +} // namespace mongo::executor diff --git a/src/mongo/executor/connection_pool_stats.cpp b/src/mongo/executor/connection_pool_stats.cpp index e4493f1008f09..23ce98662fe9c 100644 --- a/src/mongo/executor/connection_pool_stats.cpp +++ b/src/mongo/executor/connection_pool_stats.cpp @@ -27,16 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/connection_pool_stats.h" - #include -#include #include +#include +#include +#include +#include + +#include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/server_feature_flags_gen.h" +#include "mongo/executor/connection_pool_stats.h" #include "mongo/util/duration.h" namespace mongo { @@ -112,22 +114,17 @@ void ConnectionPoolStats::updateStatsForHost(std::string pool, } void ConnectionPoolStats::appendToBSON(mongo::BSONObjBuilder& result, bool forFTDC) { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - const auto isCCHMEnabled = gFeatureFlagConnHealthMetrics.isEnabledAndIgnoreFCVUnsafe(); - result.appendNumber("totalInUse", static_cast(totalInUse)); result.appendNumber("totalAvailable", static_cast(totalAvailable)); result.appendNumber("totalLeased", static_cast(totalLeased)); result.appendNumber("totalCreated", static_cast(totalCreated)); result.appendNumber("totalRefreshing", static_cast(totalRefreshing)); result.appendNumber("totalRefreshed", static_cast(totalRefreshed)); - if (isCCHMEnabled) { - result.appendNumber("totalWasNeverUsed", static_cast(totalWasNeverUsed)); - if (forFTDC) { - result.appendNumber("totalWasUsedOnce", static_cast(totalWasUsedOnce)); - result.appendNumber("totalConnUsageTimeMillis", - durationCount(totalConnUsageTime)); - } + result.appendNumber("totalWasNeverUsed", static_cast(totalWasNeverUsed)); + if (forFTDC) { + result.appendNumber("totalWasUsedOnce", static_cast(totalWasUsedOnce)); + result.appendNumber("totalConnUsageTimeMillis", + durationCount(totalConnUsageTime)); } if (forFTDC) { @@ -135,11 +132,9 @@ void ConnectionPoolStats::appendToBSON(mongo::BSONObjBuilder& result, bool forFT for (const auto& [pool, stats] : statsByPool) { BSONObjBuilder poolInfo(poolBuilder.subobjStart(pool)); poolInfo.appendNumber("poolInUse", static_cast(stats.inUse)); - if (isCCHMEnabled) { - poolInfo.appendNumber("poolWasUsedOnce", static_cast(stats.wasUsedOnce)); - poolInfo.appendNumber("poolConnUsageTimeMillis", - durationCount(stats.connUsageTime)); - } + poolInfo.appendNumber("poolWasUsedOnce", static_cast(stats.wasUsedOnce)); + poolInfo.appendNumber("poolConnUsageTimeMillis", + durationCount(stats.connUsageTime)); for (const auto& [host, stats] : stats.statsByHost) { BSONObjBuilder hostInfo(poolInfo.subobjStart(host.toString())); poolInfo.appendNumber("inUse", static_cast(stats.inUse)); @@ -166,11 +161,8 @@ void ConnectionPoolStats::appendToBSON(mongo::BSONObjBuilder& result, bool forFT poolInfo.appendNumber("poolCreated", static_cast(stats.created)); poolInfo.appendNumber("poolRefreshing", static_cast(stats.refreshing)); poolInfo.appendNumber("poolRefreshed", static_cast(stats.refreshed)); - if (isCCHMEnabled) { - poolInfo.appendNumber("poolWasNeverUsed", - static_cast(stats.wasNeverUsed)); - appendHistogram(poolInfo, stats.acquisitionWaitTimes, kAcquisitionWaitTimesKey); - } + poolInfo.appendNumber("poolWasNeverUsed", static_cast(stats.wasNeverUsed)); + appendHistogram(poolInfo, stats.acquisitionWaitTimes, kAcquisitionWaitTimesKey); for (const auto& [host, stats] : stats.statsByHost) { BSONObjBuilder hostInfo(poolInfo.subobjStart(host.toString())); @@ -180,11 +172,8 @@ void ConnectionPoolStats::appendToBSON(mongo::BSONObjBuilder& result, bool forFT hostInfo.appendNumber("created", static_cast(stats.created)); hostInfo.appendNumber("refreshing", static_cast(stats.refreshing)); hostInfo.appendNumber("refreshed", static_cast(stats.refreshed)); - if (isCCHMEnabled) { - hostInfo.appendNumber("wasNeverUsed", - static_cast(stats.wasNeverUsed)); - appendHistogram(hostInfo, stats.acquisitionWaitTimes, kAcquisitionWaitTimesKey); - } + hostInfo.appendNumber("wasNeverUsed", static_cast(stats.wasNeverUsed)); + appendHistogram(hostInfo, stats.acquisitionWaitTimes, kAcquisitionWaitTimesKey); } } } @@ -200,10 +189,8 @@ void ConnectionPoolStats::appendToBSON(mongo::BSONObjBuilder& result, bool forFT hostInfo.appendNumber("created", static_cast(stats.created)); hostInfo.appendNumber("refreshing", static_cast(stats.refreshing)); hostInfo.appendNumber("refreshed", static_cast(stats.refreshed)); - if (isCCHMEnabled) { - hostInfo.appendNumber("wasNeverUsed", static_cast(stats.wasNeverUsed)); - appendHistogram(hostInfo, stats.acquisitionWaitTimes, kAcquisitionWaitTimesKey); - } + hostInfo.appendNumber("wasNeverUsed", static_cast(stats.wasNeverUsed)); + appendHistogram(hostInfo, stats.acquisitionWaitTimes, kAcquisitionWaitTimesKey); } } } diff --git a/src/mongo/executor/connection_pool_stats.h b/src/mongo/executor/connection_pool_stats.h index dec837141b09d..08a326845481c 100644 --- a/src/mongo/executor/connection_pool_stats.h +++ b/src/mongo/executor/connection_pool_stats.h @@ -29,6 +29,14 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/s/sharding_task_executor_pool_controller.h" #include "mongo/stdx/unordered_map.h" #include "mongo/util/duration.h" diff --git a/src/mongo/executor/connection_pool_test.cpp b/src/mongo/executor/connection_pool_test.cpp index 1c657e26ec843..aec72fd3caece 100644 --- a/src/mongo/executor/connection_pool_test.cpp +++ b/src/mongo/executor/connection_pool_test.cpp @@ -27,24 +27,30 @@ * it in the license file. */ -#include "mongo/executor/connection_pool_test_fixture.h" - -#include "mongo/util/duration.h" -#include "mongo/util/net/hostandport.h" #include +#include +#include #include #include +#include +#include #include #include +#include -#include -#include +#include +#include #include "mongo/executor/connection_pool.h" #include "mongo/executor/connection_pool_stats.h" -#include "mongo/stdx/future.h" +#include "mongo/executor/connection_pool_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/thread_assertion_monitor.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/executor_test_util.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/scopeguard.h" namespace mongo { @@ -94,6 +100,12 @@ class ConnectionPoolTest : public unittest::Test { ExecutorFuture(_executor).getAsync([conn = std::move(conn)](auto) {}); } + void doneWithError(ConnectionPool::ConnectionHandle& conn, Status error) { + dynamic_cast(conn.get())->indicateFailure(error); + + ExecutorFuture(_executor).getAsync([conn = std::move(conn)](auto) {}); + } + using StatusWithConn = StatusWith; auto getId(const ConnectionPool::ConnectionHandle& conn) { @@ -439,6 +451,159 @@ TEST_F(ConnectionPoolTest, FailedConnDifferentConn) { ASSERT_NE(conn1Id, conn2Id); } +/** + * Verify that a connection returned with an error indicating the remote + * is unavailable drops current generation connections to that remote. + */ +TEST_F(ConnectionPoolTest, FailedHostDropsConns) { + auto pool = makePool(); + + ASSERT_EQ(pool->getNumConnectionsPerHost(HostAndPort()), 0U); + + constexpr size_t kSize = 100; + std::vector connections; + std::vector monitors(kSize); + + // Ensure that no matter how we leave the test, we mark any + // checked out connections as OK before implicity returning them + // to the pool by destroying the 'connections' vector. Otherwise, + // this test would cause an invariant failure instead of a normal + // test failure if it fails, which would be confusing. + auto drainConnPool = [&] { + while (!connections.empty()) { + try { + ConnectionPool::ConnectionHandle conn = std::move(connections.back()); + connections.pop_back(); + doneWith(conn); + } catch (...) { + } + } + }; + const ScopeGuard guard(drainConnPool); + + auto now = Date_t::now(); + PoolImpl::setNow(now); + + // Check out kSize connections from the pool. + for (size_t i = 0; i != kSize; ++i) { + ConnectionImpl::pushSetup(Status::OK()); + auto cb = [&](StatusWith swConn) { + monitors[i].exec([&]() { + ASSERT(swConn.isOK()); + connections.push_back(std::move(swConn.getValue())); + monitors[i].notifyDone(); + }); + }; + auto timeout = Milliseconds(5000); + auto errorCode = ErrorCodes::NetworkInterfaceExceededTimeLimit; + + pool->get_forTest(HostAndPort(), timeout, errorCode, cb); + } + + for (auto& monitor : monitors) { + monitor.wait(); + } + + ASSERT_EQ(pool->getNumConnectionsPerHost(HostAndPort()), kSize); + + // Return one connection with a network error. + ConnectionPool::ConnectionHandle conn = std::move(connections.back()); + connections.pop_back(); + doneWithError(conn, {ErrorCodes::HostUnreachable, "error"}); + + // We should still have all of the connections open, minus the one we just returned with an + // error. + ASSERT_EQ(pool->getNumConnectionsPerHost(HostAndPort()), kSize - 1); + + // Put the remaining connections back. + drainConnPool(); + + // They should all be discarded since the host should be marked as down + // due to the connection returned with a network error. + ASSERT_EQ(pool->getNumConnectionsPerHost(HostAndPort()), 0); +} + +/** + * Verify that a connection returned with an error that does _not_ indicate + * the remote is unavailable does _not_ drop current generation connections to that remote. + */ +TEST_F(ConnectionPoolTest, OtherErrorsDontDropConns) { + auto pool = makePool(); + + ASSERT_EQ(pool->getNumConnectionsPerHost(HostAndPort()), 0U); + + constexpr size_t kSize = 100; + std::vector connections; + + // Ensure that no matter how we leave the test, we mark any + // checked out connections as OK before implicity returning them + // to the pool by destroying the 'connections' vector. Otherwise, + // this test would cause an invariant failure instead of a normal + // test failure if it fails, which would be confusing. + auto drainConnPool = [&] { + while (!connections.empty()) { + try { + ConnectionPool::ConnectionHandle conn = std::move(connections.back()); + connections.pop_back(); + doneWith(conn); + } catch (...) { + } + } + }; + const ScopeGuard guard(drainConnPool); + + auto now = Date_t::now(); + PoolImpl::setNow(now); + + auto checkOutConnections = [&] { + std::vector monitors(kSize); + for (size_t i = 0; i != kSize; ++i) { + ConnectionImpl::pushSetup(Status::OK()); + auto cb = [&](StatusWith swConn) { + monitors[i].exec([&]() { + ASSERT(swConn.isOK()); + connections.push_back(std::move(swConn.getValue())); + monitors[i].notifyDone(); + }); + }; + auto timeout = Milliseconds(5000); + auto errorCode = ErrorCodes::NetworkInterfaceExceededTimeLimit; + + pool->get_forTest(HostAndPort(), timeout, errorCode, cb); + } + + for (auto& monitor : monitors) { + monitor.wait(); + } + + ASSERT_EQ(pool->getNumConnectionsPerHost(HostAndPort()), kSize); + }; + + // All three types of error that shouldn't result in us dropping connections - a non-network + // error; a network timeout error, and a network error that we can isolate to a specific + // connection. + std::array errors = { + ErrorCodes::InternalError, ErrorCodes::NetworkTimeout, ErrorCodes::ConnectionError}; + for (size_t i = 0; i < errors.size(); ++i) { + // Check out kSize connections from the pool. + checkOutConnections(); + // Return one connection with a non-network error. + ConnectionPool::ConnectionHandle conn = std::move(connections.back()); + connections.pop_back(); + doneWithError(conn, {errors[i], "error"}); + + // We should still have all of the connections open, minus the one we just returned with an + // error. + ASSERT_EQ(pool->getNumConnectionsPerHost(HostAndPort()), kSize - 1); + + // Put the remaining connections back. + drainConnPool(); + + // They should all still be open. + ASSERT_EQ(pool->getNumConnectionsPerHost(HostAndPort()), kSize - 1); + } +} + /** * Verify that providing different host and ports gives you different * connections. diff --git a/src/mongo/executor/connection_pool_test_fixture.cpp b/src/mongo/executor/connection_pool_test_fixture.cpp index bb98af43b5473..ea40ae040e1a1 100644 --- a/src/mongo/executor/connection_pool_test_fixture.cpp +++ b/src/mongo/executor/connection_pool_test_fixture.cpp @@ -29,9 +29,17 @@ #include "mongo/executor/connection_pool_test_fixture.h" -#include "mongo/util/assert_util.h" - +#include +#include +#include #include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/executor/connection_pool_test_fixture.h b/src/mongo/executor/connection_pool_test_fixture.h index caf1b9cdf64a6..29163eff50768 100644 --- a/src/mongo/executor/connection_pool_test_fixture.h +++ b/src/mongo/executor/connection_pool_test_fixture.h @@ -27,20 +27,31 @@ * it in the license file. */ +#include +#include #include #include #include +#include +#include "mongo/base/status.h" #include "mongo/executor/connection_pool.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" #include "mongo/util/executor_test_util.h" #include "mongo/util/functional.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/time_support.h" namespace mongo { namespace executor { namespace connection_pool_test_details { class ConnectionPoolTest; + class PoolImpl; /** diff --git a/src/mongo/executor/connection_pool_tl.cpp b/src/mongo/executor/connection_pool_tl.cpp index f64e42109b6ef..6ec1a6a8eb7c7 100644 --- a/src/mongo/executor/connection_pool_tl.cpp +++ b/src/mongo/executor/connection_pool_tl.cpp @@ -28,22 +28,59 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/connection_pool_tl.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/checked_cast.h" #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/client/authenticate.h" -#include "mongo/config.h" +#include "mongo/client/internal_auth.h" +#include "mongo/client/sasl_client_session.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/auth/user.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/connection_health_metrics_parameter_gen.h" -#include "mongo/db/server_feature_flags_gen.h" +#include "mongo/executor/connection_pool_tl.h" #include "mongo/executor/network_interface_tl_gen.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" #include "mongo/logv2/log_severity_suppressor.h" +#include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" #include "mongo/util/duration.h" +#include "mongo/util/functional.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_manager.h" +#include "mongo/util/net/ssl_types.h" +#include "mongo/util/read_through_cache.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kASIO @@ -65,11 +102,6 @@ auto makeSeveritySuppressor() { Seconds{1}, logv2::LogSeverity::Log(), logv2::LogSeverity::Debug(2)); } -bool connHealthMetricsEnabled() { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - return gFeatureFlagConnHealthMetrics.isEnabledAndIgnoreFCVUnsafe(); -} - bool connHealthMetricsLoggingEnabled() { return gEnableDetailedConnectionHealthMetricLogLines; } @@ -89,7 +121,7 @@ void logSlowConnection(const HostAndPort& peer, const ConnectionMetrics& connMet } CounterMetric totalConnectionEstablishmentTime( - "network.totalEgressConnectionEstablishmentTimeMillis", connHealthMetricsEnabled); + "network.totalEgressConnectionEstablishmentTimeMillis"); } // namespace @@ -211,7 +243,7 @@ class TLConnectionSetupHook : public executor::NetworkConnectionHook { explicit TLConnectionSetupHook(executor::NetworkConnectionHook* hookToWrap, bool x509AuthOnly) : _wrappedHook(hookToWrap), _x509AuthOnly(x509AuthOnly) {} - BSONObj augmentIsMasterRequest(const HostAndPort& remoteHost, BSONObj cmdObj) override { + BSONObj augmentHelloRequest(const HostAndPort& remoteHost, BSONObj cmdObj) override { BSONObjBuilder bob(std::move(cmdObj)); bob.append("hangUpOnStepDown", false); auto systemUser = internalSecurity.getUser(); @@ -229,9 +261,9 @@ class TLConnectionSetupHook : public executor::NetworkConnectionHook { } Status validateHost(const HostAndPort& remoteHost, - const BSONObj& isMasterRequest, - const RemoteCommandResponse& isMasterReply) override try { - const auto& reply = isMasterReply.data; + const BSONObj& helloRequest, + const RemoteCommandResponse& helloReply) override try { + const auto& reply = helloReply.data; // X.509 auth only means we only want to use a single mechanism regards of what hello says if (_x509AuthOnly) { @@ -255,7 +287,7 @@ class TLConnectionSetupHook : public executor::NetworkConnectionHook { if (!_wrappedHook) { return Status::OK(); } else { - return _wrappedHook->validateHost(remoteHost, isMasterRequest, isMasterReply); + return _wrappedHook->validateHost(remoteHost, helloRequest, helloReply); } } catch (const DBException& e) { return e.toStatus(); @@ -371,7 +403,7 @@ void TLConnection::setup(Milliseconds timeout, SetupCallback cb, std::string ins #endif // For transient connections, only use X.509 auth. - auto isMasterHook = std::make_shared(_onConnectHook, x509AuthOnly); + auto helloHook = std::make_shared(_onConnectHook, x509AuthOnly); AsyncDBClient::connect(_peer, _sslMode, @@ -389,29 +421,29 @@ void TLConnection::setup(Milliseconds timeout, SetupCallback cb, std::string ins return Status(ErrorCodes::HostUnreachable, status.reason()); } }) - .then([this, isMasterHook, instanceName = std::move(instanceName)]( + .then([this, helloHook, instanceName = std::move(instanceName)]( AsyncDBClient::Handle client) { _client = std::move(client); - return _client->initWireVersion(instanceName, isMasterHook.get()); + return _client->initWireVersion(instanceName, helloHook.get()); }) - .then([this, isMasterHook]() -> Future { + .then([this, helloHook]() -> Future { if (_skipAuth) { return false; } - return _client->completeSpeculativeAuth(isMasterHook->getSession(), + return _client->completeSpeculativeAuth(helloHook->getSession(), auth::getInternalAuthDB(), - isMasterHook->getSpeculativeAuthenticateReply(), - isMasterHook->getSpeculativeAuthType()); + helloHook->getSpeculativeAuthenticateReply(), + helloHook->getSpeculativeAuthType()); }) - .then([this, isMasterHook, authParametersProvider](bool authenticatedDuringConnect) { + .then([this, helloHook, authParametersProvider](bool authenticatedDuringConnect) { if (_skipAuth || authenticatedDuringConnect) { return Future::makeReady(); } boost::optional mechanism; - if (!isMasterHook->saslMechsForInternalAuth().empty()) - mechanism = isMasterHook->saslMechsForInternalAuth().front(); + if (!helloHook->saslMechsForInternalAuth().empty()) + mechanism = helloHook->saslMechsForInternalAuth().front(); return _client->authenticateInternal(std::move(mechanism), authParametersProvider); }) .then([this] { @@ -438,17 +470,14 @@ void TLConnection::setup(Milliseconds timeout, SetupCallback cb, std::string ins cancelTimeout(); if (status.isOK()) { - if (connHealthMetricsEnabled()) { - totalConnectionEstablishmentTime.increment(_connMetrics.total().count()); - if (connHealthMetricsLoggingEnabled() && - _connMetrics.total() >= - Milliseconds(gSlowConnectionThresholdMillis.load())) { - logSlowConnection(_peer, _connMetrics); - } + totalConnectionEstablishmentTime.increment(_connMetrics.total().count()); + if (connHealthMetricsLoggingEnabled() && + _connMetrics.total() >= Milliseconds(gSlowConnectionThresholdMillis.load())) { + logSlowConnection(_peer, _connMetrics); } handler->promise.emplaceValue(); } else { - if (ErrorCodes::isNetworkTimeoutError(status) && connHealthMetricsEnabled() && + if (ErrorCodes::isNetworkTimeoutError(status) && connHealthMetricsLoggingEnabled()) { logSlowConnection(_peer, _connMetrics); } @@ -484,8 +513,7 @@ void TLConnection::refresh(Milliseconds timeout, RefreshCallback cb) { }); _client - ->runCommandRequest( - {_peer, std::string("admin"), BSON("isMaster" << 1), BSONObj(), nullptr}) + ->runCommandRequest({_peer, std::string("admin"), BSON("hello" << 1), BSONObj(), nullptr}) .then([](executor::RemoteCommandResponse response) { return Future::makeReady(response.status); }) diff --git a/src/mongo/executor/connection_pool_tl.h b/src/mongo/executor/connection_pool_tl.h index 88b3e02a9bd33..5d527c454a994 100644 --- a/src/mongo/executor/connection_pool_tl.h +++ b/src/mongo/executor/connection_pool_tl.h @@ -29,16 +29,30 @@ #pragma once +#include +#include #include +#include +#include #include "mongo/client/async_client.h" +#include "mongo/db/service_context.h" #include "mongo/executor/connection_metrics.h" #include "mongo/executor/connection_pool.h" #include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/unordered_set.h" #include "mongo/transport/ssl_connection_context.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/util/duration.h" #include "mongo/util/future.h" #include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/time_support.h" +#include "mongo/util/timer.h" namespace mongo { namespace executor { diff --git a/src/mongo/executor/egress_tag_closer_manager.cpp b/src/mongo/executor/egress_tag_closer_manager.cpp index 78311c9597c26..01a98e18ad28a 100644 --- a/src/mongo/executor/egress_tag_closer_manager.cpp +++ b/src/mongo/executor/egress_tag_closer_manager.cpp @@ -28,11 +28,13 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/egress_tag_closer_manager.h" +#include +#include +#include +#include "mongo/executor/egress_tag_closer_manager.h" +#include "mongo/util/decorable.h" #include "mongo/util/net/hostandport.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kExecutor diff --git a/src/mongo/executor/egress_tag_closer_manager.h b/src/mongo/executor/egress_tag_closer_manager.h index e2243ae1c6cef..787ac1d510833 100644 --- a/src/mongo/executor/egress_tag_closer_manager.h +++ b/src/mongo/executor/egress_tag_closer_manager.h @@ -31,6 +31,8 @@ #include +#include + #include "mongo/db/service_context.h" #include "mongo/executor/egress_tag_closer.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/executor/hedge_options_util.cpp b/src/mongo/executor/hedge_options_util.cpp index 598ae2d1a2075..295eb2aa132f8 100644 --- a/src/mongo/executor/hedge_options_util.cpp +++ b/src/mongo/executor/hedge_options_util.cpp @@ -29,11 +29,19 @@ #include "mongo/executor/hedge_options_util.h" +#include +#include +#include + +#include + +#include "mongo/client/hedging_mode_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/mongos_server_parameters.h" #include "mongo/s/mongos_server_parameters_gen.h" +#include "mongo/util/ctype.h" #include "mongo/util/sort.h" -#include "mongo/util/optional_util.h" - namespace mongo { MONGO_FAIL_POINT_DEFINE(hedgedReadsSendRequestsToTargetHostsInAlphabeticalOrder); namespace { diff --git a/src/mongo/executor/hedge_options_util.h b/src/mongo/executor/hedge_options_util.h index 7a6517e9f918d..8b83c89073321 100644 --- a/src/mongo/executor/hedge_options_util.h +++ b/src/mongo/executor/hedge_options_util.h @@ -28,9 +28,15 @@ */ #pragma once -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/client/read_preference.h" +#include "mongo/platform/basic.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" namespace mongo { /** Failpoint used to test hedged reads. */ diff --git a/src/mongo/executor/hedge_options_util_test.cpp b/src/mongo/executor/hedge_options_util_test.cpp index f4d53650dc295..363167627fcae 100644 --- a/src/mongo/executor/hedge_options_util_test.cpp +++ b/src/mongo/executor/hedge_options_util_test.cpp @@ -27,14 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/read_preference.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/executor/hedge_options_util.h" -#include "mongo/s/mongos_server_parameters_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" -#include "mongo/util/duration.h" namespace mongo { namespace { diff --git a/src/mongo/executor/hedged_async_rpc.h b/src/mongo/executor/hedged_async_rpc.h index f7862d6a0cfb7..f37bd713276c6 100644 --- a/src/mongo/executor/hedged_async_rpc.h +++ b/src/mongo/executor/hedged_async_rpc.h @@ -29,32 +29,53 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/baton.h" #include "mongo/db/commands/kill_operations_gen.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/cursor_response.h" #include "mongo/db/service_context.h" #include "mongo/executor/async_rpc.h" #include "mongo/executor/async_rpc_error_info.h" +#include "mongo/executor/async_rpc_retry_policy.h" #include "mongo/executor/async_rpc_targeter.h" +#include "mongo/executor/async_rpc_util.h" #include "mongo/executor/hedge_options_util.h" #include "mongo/executor/hedging_metrics.h" #include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" +#include "mongo/idl/generic_args_with_types_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/mongos_server_parameters_gen.h" #include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" +#include "mongo/util/duration.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" #include "mongo/util/net/hostandport.h" -#include -#include -#include +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT mongo::logv2::LogComponent::kExecutor @@ -62,52 +83,30 @@ namespace mongo { namespace async_rpc { namespace hedging_rpc_details { -/** - * Given a vector of input Futures, whenAnyThat returns a Future which holds the value - * of the first of those futures to resolve with a status, value, and index that - * satisfies the conditions in the ConditionCallable Callable. - */ -template -Future whenAnyThat(std::vector>&& futures, - ConditionCallable&& shouldAccept) { - invariant(futures.size() > 0); - - struct SharedBlock { - SharedBlock(Promise result) : resultPromise(std::move(result)) {} - // Tracks whether or not the resultPromise has been set. - AtomicWord done{false}; - // The promise corresponding to the resulting SemiFuture returned by this function. - Promise resultPromise; - }; - - Promise promise{NonNullPromiseTag{}}; - auto future = promise.getFuture(); - auto sharedBlock = std::make_shared(std::move(promise)); - - for (size_t i = 0; i < futures.size(); ++i) { - std::move(futures[i]) - .getAsync( - [sharedBlock, myIndex = i, shouldAccept](StatusOrStatusWith value) { - if (shouldAccept(value, myIndex)) { - // If this is the first input future to complete and satisfy the - // shouldAccept condition, change done to true and set the value on the - // promise. - if (!sharedBlock->done.swap(true)) { - sharedBlock->resultPromise.setFrom(std::move(value)); - } - } - }); - } - - return future; -} - HedgingMetrics* getHedgingMetrics(ServiceContext* svcCtx) { auto hm = HedgingMetrics::get(svcCtx); invariant(hm); return hm; } +UUID getOrCreateOperationKey(bool isHedge, GenericArgs& genericArgs) { + // Check if the caller has provided an operation key, and hedging is not enabled. If so, + // we will attach the caller-provided key to all remote commands sent to resolved + // targets. Note that doing so may have side-effects if the operation is retried: + // cancelling the Nth attempt may impact the (N + 1)th attempt as they share `opKey`. + if (auto& opKey = genericArgs.stable.getClientOperationKey(); opKey && !isHedge) { + return *opKey; + } + + // The caller has not provided an operation key or hedging is enabled, so we generate a + // new `clientOperationKey` for each attempt. The operationKey allows cancelling remote + // operations. A new one is generated here to ensure retry attempts are isolated: + // cancelling the Nth attempt does not impact the (N + 1)th attempt. + auto opKey = UUID::gen(); + genericArgs.stable.setClientOperationKey(opKey); + return opKey; +} + /** * Schedules a remote `_killOperations` on `exec` (or `baton`) for all targets, aiming to kill any * operations identified by `opKey`. @@ -134,6 +133,7 @@ void killOperations(ServiceContext* svcCtx, .getAsync([](Status) {}); } } + } // namespace hedging_rpc_details /** @@ -158,41 +158,20 @@ SemiFuture> sendHedgedCommand( std::shared_ptr retryPolicy = std::make_shared(), ReadPreferenceSetting readPref = ReadPreferenceSetting(ReadPreference::PrimaryOnly), GenericArgs genericArgs = GenericArgs(), - BatonHandle baton = nullptr, - boost::optional clientOperationKey = boost::none) { + BatonHandle baton = nullptr) { using SingleResponse = AsyncRPCResponse; invariant(opCtx); auto svcCtx = opCtx->getServiceContext(); - if (MONGO_unlikely(clientOperationKey && !genericArgs.stable.getClientOperationKey())) { - genericArgs.stable.setClientOperationKey(*clientOperationKey); - } - // Set up cancellation token to cancel remaining hedged operations. CancellationSource hedgeCancellationToken{token}; auto targetsAttempted = std::make_shared>(); auto proxyExec = std::make_shared(baton, exec); auto tryBody = [=, targeter = std::move(targeter)]() mutable { HedgeOptions opts = getHedgeOptions(CommandType::kCommandName, readPref); - auto operationKey = [&] { - // Check if the caller has provided an operation key, and hedging is not enabled. If so, - // we will attach the caller-provided key to all remote commands sent to resolved - // targets. Note that doing so may have side-effects if the operation is retried: - // cancelling the Nth attempt may impact the (N + 1)th attempt as they share `opKey`. - if (auto& opKey = genericArgs.stable.getClientOperationKey(); - opKey && !opts.isHedgeEnabled) { - return *opKey; - } - - // The caller has not provided an operation key or hedging is enabled, so we generate a - // new `clientOperationKey` for each attempt. The operationKey allows cancelling remote - // operations. A new one is generated here to ensure retry attempts are isolated: - // cancelling the Nth attempt does not impact the (N + 1)th attempt. - auto opKey = UUID::gen(); - genericArgs.stable.setClientOperationKey(opKey); - return opKey; - }(); + auto operationKey = + hedging_rpc_details::getOrCreateOperationKey(opts.isHedgeEnabled, genericArgs); return targeter->resolve(token) .thenRunOn(proxyExec) @@ -226,6 +205,7 @@ SemiFuture> sendHedgedCommand( }); } + const auto globalMaxTimeMSForHedgedReads = gMaxTimeMSForHedgedReads.load(); for (size_t i = 0; i < hostsToTarget; i++) { std::unique_ptr t = std::make_unique(targets[i]); // We explicitly pass "NeverRetryPolicy" here because the retry mechanism @@ -237,6 +217,22 @@ SemiFuture> sendHedgedCommand( hedgeCancellationToken.token(), std::make_shared(), genericArgs); + + // If the request is a hedged request, set maxTimeMSOpOnly to the smaller of + // the server parameter maxTimeMSForHedgedReads or remaining max time from the + // opCtx. + if (opts.isHedgeEnabled && i != 0) { + auto maxTimeMSOpOnly = globalMaxTimeMSForHedgedReads; + if (opCtx->hasDeadline()) { + if (auto remainingMaxTime = opCtx->getRemainingMaxTimeMillis().count(); + remainingMaxTime < maxTimeMSOpOnly) { + maxTimeMSOpOnly = remainingMaxTime; + } + } + + options->genericArgs.unstable.setMaxTimeMSOpOnly(maxTimeMSOpOnly); + } + options->baton = baton; requests.push_back( sendCommand(options, opCtx, std::move(t)).thenRunOn(proxyExec)); @@ -252,7 +248,7 @@ SemiFuture> sendHedgedCommand( * "authoritative" request. This is the codepath followed when we are not * hedging or there is only 1 target provided. */ - return hedging_rpc_details::whenAnyThat( + return whenAnyThat( std::move(requests), [&](StatusWith response, size_t index) { Status commandStatus = response.getStatus(); @@ -267,7 +263,6 @@ SemiFuture> sendHedgedCommand( return true; } - // TODO SERVER-69592 Account for interior executor shutdown invariant(commandStatus.code() == ErrorCodes::RemoteCommandExecutionError, commandStatus.toString()); diff --git a/src/mongo/executor/hedged_async_rpc_test.cpp b/src/mongo/executor/hedged_async_rpc_test.cpp index de36a80b67c8d..17cd65f1dc422 100644 --- a/src/mongo/executor/hedged_async_rpc_test.cpp +++ b/src/mongo/executor/hedged_async_rpc_test.cpp @@ -27,36 +27,60 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/async_remote_command_targeter_adapter.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/hedging_mode_gen.h" #include "mongo/client/read_preference.h" #include "mongo/client/remote_command_targeter.h" #include "mongo/client/remote_command_targeter_factory_mock.h" #include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/client/remote_command_targeter_rs.h" -#include "mongo/db/database_name.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/query/cursor_response.h" #include "mongo/db/query/cursor_response_gen.h" -#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/repl/hello_gen.h" #include "mongo/executor/async_rpc.h" #include "mongo/executor/async_rpc_targeter.h" #include "mongo/executor/async_rpc_test_fixture.h" #include "mongo/executor/hedged_async_rpc.h" #include "mongo/executor/hedging_metrics.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface.h" -#include "mongo/executor/network_interface_integration_fixture.h" #include "mongo/executor/network_interface_mock.h" -#include "mongo/executor/network_test_env.h" #include "mongo/executor/remote_command_request.h" #include "mongo/executor/remote_command_response.h" +#include "mongo/rpc/topology_version_gen.h" #include "mongo/s/mongos_server_parameters_gen.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/assert_that.h" -#include "mongo/unittest/barrier.h" #include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/matcher.h" +#include "mongo/unittest/matcher_core.h" #include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" +#include "mongo/util/concurrency/notification.h" #include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" @@ -113,8 +137,7 @@ class HedgedAsyncRPCTest : public AsyncRPCTestFixture { std::vector hosts, std::shared_ptr retryPolicy = std::make_shared(), GenericArgs genericArgs = GenericArgs(), - BatonHandle bh = nullptr, - boost::optional opKey = boost::none) { + BatonHandle bh = nullptr) { // Use a readPreference that's elgible for hedging. ReadPreferenceSetting readPref(ReadPreference::Nearest); readPref.hedgingMode = HedgingMode(); @@ -136,8 +159,7 @@ class HedgedAsyncRPCTest : public AsyncRPCTestFixture { retryPolicy, readPref, genericArgs, - bh, - opKey); + bh); } void ignoreKillOperations() { @@ -185,6 +207,13 @@ TEST_F(HedgedAsyncRPCTest, FindHedgeRequestTwoHosts) { auto resultFuture = sendHedgedCommandWithHosts(testFindCmd, kTwoHosts); onCommand([&](const auto& request) { + // Only check maxTimeMSOpOnly on hedged requests + if (request.target != kTwoHosts[0]) { + ASSERT(request.cmdObj["maxTimeMSOpOnly"]); + ASSERT_EQ(request.cmdObj["maxTimeMSOpOnly"].Long(), gMaxTimeMSForHedgedReads.load()); + } else { + ASSERT(!request.cmdObj["maxTimeMSOpOnly"]); + } ASSERT(request.cmdObj["find"]); return CursorResponse(testNS, 0LL, {testFirstBatch}) .toBSON(CursorResponse::ResponseType::InitialResponse); @@ -216,6 +245,7 @@ TEST_F(HedgedAsyncRPCTest, HelloHedgeRequest) { auto resultFuture = sendHedgedCommandWithHosts(helloCmd, kTwoHosts); onCommand([&](const auto& request) { + ASSERT(!request.cmdObj["maxTimeMSOpOnly"]); ASSERT(request.cmdObj["hello"]); return helloReply.toBSON(); }); @@ -278,7 +308,10 @@ TEST_F(HedgedAsyncRPCTest, HelloHedgeRemoteErrorWithGenericReplyFields) { network->enterNetwork(); GenericReplyFieldsWithTypesV1 stableFields; - stableFields.setDollarClusterTime(LogicalTime(Timestamp(2, 3))); + auto clusterTime = ClusterTime(); + clusterTime.setClusterTime(LogicalTime(Timestamp(2, 3))); + clusterTime.setSignature(ClusterTimeSignature(std::vector(), 0)); + stableFields.setDollarClusterTime(clusterTime); GenericReplyFieldsWithTypesUnstableV1 unstableFields; unstableFields.setDollarConfigTime(Timestamp(1, 1)); unstableFields.setOk(false); @@ -293,7 +326,7 @@ TEST_F(HedgedAsyncRPCTest, HelloHedgeRemoteErrorWithGenericReplyFields) { remoteErrorBson = remoteErrorBson.addFields(unstableFields.toBSON()); const auto rcr = RemoteCommandResponse(remoteErrorBson, Milliseconds(1)); network->scheduleResponse(hedged, now, rcr); - network->scheduleSuccessfulResponse(authoritative, now + Milliseconds(1000), rcr); + network->scheduleSuccessfulResponse(authoritative, now + Milliseconds(100), rcr); }); network->runUntil(now + Milliseconds(1500)); @@ -401,6 +434,81 @@ TEST_F(HedgedAsyncRPCTest, ExecutorShutdown) { ASSERT(ErrorCodes::isA(extraInfo->asLocal())); } +/** + * Check that hedged commands return expiration errors if timeout is exceeded. + * A lot of the deadline behavior is test only, but we only want to check that the timeout is + * set correctly and respected here. + */ +TEST_F(HedgedAsyncRPCTest, TimeoutExceeded) { + auto resultFuture = sendHedgedCommandWithHosts(testFindCmd, kTwoHosts); + + auto network = getNetworkInterfaceMock(); + auto now = getNetworkInterfaceMock()->now(); + network->enterNetwork(); + + // Send hedged requests to exceed max time, which would fail the operation with + // NetworkInterfaceMock's fatal error. In production, "MaxTimeMSExpired" would be returned, + // and these would be considered "success" by the network interface. But because there is + // no deadline set by service entry point, we rely on NetworkInterfaceMock's timeout checks. + performAuthoritativeHedgeBehavior( + network, + [&](NetworkInterfaceMock::NetworkOperationIterator authoritative, + NetworkInterfaceMock::NetworkOperationIterator hedged) { + network->scheduleSuccessfulResponse( + authoritative, now + Milliseconds(1100), testSuccessResponse); + network->scheduleSuccessfulResponse( + hedged, now + Milliseconds(1000), testSuccessResponse); + }); + + network->runUntil(now + Milliseconds(1500)); + + auto counters = network->getCounters(); + network->exitNetwork(); + ASSERT_EQ(counters.failed, 1); + ASSERT_EQ(counters.canceled, 1); + + auto error = resultFuture.getNoThrow().getStatus(); + ASSERT_EQ(error.code(), ErrorCodes::RemoteCommandExecutionError); + + auto extraInfo = error.extraInfo(); + ASSERT(extraInfo); + + // Fails NetworkInterfaceMock's timeout check. + ASSERT(extraInfo->isLocal()); + auto localError = extraInfo->asLocal(); + ASSERT_EQ(localError, ErrorCodes::NetworkInterfaceExceededTimeLimit); +} + +/** + * Check that hedged commands send maxTimeMSOpOnly with opCtx deadline given that the deadline is + * smaller than the global maxTimeMSForHedgedReads default. + */ +TEST_F(HedgedAsyncRPCTest, OpCtxRemainingDeadline) { + const auto kDeadline = 100; + getOpCtx()->setDeadlineAfterNowBy(Milliseconds(kDeadline), ErrorCodes::MaxTimeMSExpired); + auto resultFuture = sendHedgedCommandWithHosts(testFindCmd, kTwoHosts); + + onCommand([&](const auto& request) { + // Only check deadline from opCtx here, success/fail doesn't matter. In a real system, + // this deadline would have real timeout effects on the target host. + if (request.target != kTwoHosts[0]) { + ASSERT(request.cmdObj["maxTimeMSOpOnly"]); + ASSERT_EQ(request.cmdObj["maxTimeMSOpOnly"].Long(), kDeadline); + } + ASSERT(request.cmdObj["find"]); + return CursorResponse(testNS, 0LL, {testFirstBatch}) + .toBSON(CursorResponse::ResponseType::InitialResponse); + }); + + auto network = getNetworkInterfaceMock(); + auto now = getNetworkInterfaceMock()->now(); + network->enterNetwork(); + network->runUntil(now + Milliseconds(150)); + network->exitNetwork(); + + resultFuture.wait(); +} + /** * When a hedged command is sent and one request resolves with a non-ignorable error, we propagate * that error upwards and cancel the other requests. @@ -451,7 +559,7 @@ TEST_F(HedgedAsyncRPCTest, BothCommandsFailWithIgnorableError) { NetworkInterfaceMock::NetworkOperationIterator hedged) { network->scheduleResponse(hedged, now, testIgnorableErrorResponse); network->scheduleSuccessfulResponse( - authoritative, now + Milliseconds(1000), testIgnorableErrorResponse); + authoritative, now + Milliseconds(100), testIgnorableErrorResponse); }); network->runUntil(now + Milliseconds(1500)); @@ -486,7 +594,7 @@ TEST_F(HedgedAsyncRPCTest, AllCommandsFailWithIgnorableError) { [&](NetworkInterfaceMock::NetworkOperationIterator authoritative, NetworkInterfaceMock::NetworkOperationIterator hedged1) { network->scheduleResponse( - authoritative, now + Milliseconds(1000), testIgnorableErrorResponse); + authoritative, now + Milliseconds(100), testIgnorableErrorResponse); network->scheduleResponse(hedged1, now, testIgnorableErrorResponse); }); @@ -529,7 +637,7 @@ TEST_F(HedgedAsyncRPCTest, HedgedFailsWithIgnorableErrorAuthoritativeSucceeds) { NetworkInterfaceMock::NetworkOperationIterator hedged) { network->scheduleResponse(hedged, now, testIgnorableErrorResponse); network->scheduleSuccessfulResponse( - authoritative, now + Milliseconds(1000), testSuccessResponse); + authoritative, now + Milliseconds(100), testSuccessResponse); }); network->runUntil(now + Milliseconds(1500)); @@ -570,7 +678,7 @@ TEST_F(HedgedAsyncRPCTest, AuthoritativeFailsWithIgnorableErrorHedgedCancelled) NetworkInterfaceMock::NetworkOperationIterator hedged) { network->scheduleResponse(authoritative, now, testIgnorableErrorResponse); network->scheduleSuccessfulResponse( - hedged, now + Milliseconds(1000), testSuccessResponse); + hedged, now + Milliseconds(100), testSuccessResponse); }); network->runUntil(now + Milliseconds(1500)); @@ -612,7 +720,7 @@ TEST_F(HedgedAsyncRPCTest, AuthoritativeFailsWithFatalErrorHedgedCancelled) { NetworkInterfaceMock::NetworkOperationIterator hedged) { network->scheduleResponse(authoritative, now, testFatalErrorResponse); network->scheduleSuccessfulResponse( - hedged, now + Milliseconds(1000), testSuccessResponse); + hedged, now + Milliseconds(100), testSuccessResponse); }); network->runUntil(now + Milliseconds(1500)); @@ -653,7 +761,7 @@ TEST_F(HedgedAsyncRPCTest, AuthoritativeSucceedsHedgedCancelled) { NetworkInterfaceMock::NetworkOperationIterator hedged) { network->scheduleSuccessfulResponse(authoritative, now, testSuccessResponse); network->scheduleSuccessfulResponse( - hedged, now + Milliseconds(1000), testFatalErrorResponse); + hedged, now + Milliseconds(100), testFatalErrorResponse); }); network->runUntil(now + Milliseconds(1500)); @@ -687,7 +795,7 @@ TEST_F(HedgedAsyncRPCTest, HedgedSucceedsAuthoritativeCancelled) { [&](NetworkInterfaceMock::NetworkOperationIterator authoritative, NetworkInterfaceMock::NetworkOperationIterator hedged) { network->scheduleSuccessfulResponse( - authoritative, now + Milliseconds(1000), testFatalErrorResponse); + authoritative, now + Milliseconds(100), testFatalErrorResponse); network->scheduleSuccessfulResponse(hedged, now, testSuccessResponse); }); @@ -729,7 +837,7 @@ TEST_F(HedgedAsyncRPCTest, HedgedThenAuthoritativeFailsWithIgnorableError) { [&](NetworkInterfaceMock::NetworkOperationIterator authoritative, NetworkInterfaceMock::NetworkOperationIterator hedged) { network->scheduleResponse( - authoritative, now + Milliseconds(1000), testIgnorableErrorResponse); + authoritative, now + Milliseconds(100), testIgnorableErrorResponse); network->scheduleResponse(hedged, now, testAlternateIgnorableErrorResponse); }); @@ -773,7 +881,7 @@ TEST_F(HedgedAsyncRPCTest, HedgedFailsWithIgnorableErrorAuthoritativeFailsWithFa [&](NetworkInterfaceMock::NetworkOperationIterator authoritative, NetworkInterfaceMock::NetworkOperationIterator hedged) { network->scheduleResponse( - authoritative, now + Milliseconds(1000), testFatalErrorResponse); + authoritative, now + Milliseconds(100), testFatalErrorResponse); network->scheduleResponse(hedged, now, testIgnorableErrorResponse); }); @@ -815,7 +923,7 @@ TEST_F(HedgedAsyncRPCTest, AuthoritativeSucceedsHedgedFailsWithIgnorableError) { network, [&](NetworkInterfaceMock::NetworkOperationIterator authoritative, NetworkInterfaceMock::NetworkOperationIterator hedged) { - network->scheduleResponse(authoritative, now + Milliseconds(1000), testSuccessResponse); + network->scheduleResponse(authoritative, now + Milliseconds(100), testSuccessResponse); network->scheduleResponse(hedged, now, testIgnorableErrorResponse); }); @@ -851,7 +959,7 @@ TEST_F(HedgedAsyncRPCTest, HedgedFailsWithFatalErrorAuthoritativeCanceled) { network, [&](NetworkInterfaceMock::NetworkOperationIterator authoritative, NetworkInterfaceMock::NetworkOperationIterator hedged) { - network->scheduleResponse(authoritative, now + Milliseconds(1000), testSuccessResponse); + network->scheduleResponse(authoritative, now + Milliseconds(100), testSuccessResponse); network->scheduleResponse(hedged, now, testFatalErrorResponse); }); @@ -977,7 +1085,7 @@ TEST_F(HedgedAsyncRPCTest, RemoteErrorAttemptedTargetsContainActual) { network, [&](NetworkInterfaceMock::NetworkOperationIterator authoritative, NetworkInterfaceMock::NetworkOperationIterator hedged) { - network->scheduleResponse(authoritative, now + Milliseconds(1000), testSuccessResponse); + network->scheduleResponse(authoritative, now + Milliseconds(100), testSuccessResponse); network->scheduleResponse(hedged, now, testFatalErrorResponse); }); @@ -1098,13 +1206,16 @@ TEST_F(HedgedAsyncRPCTest, OperationKeyIsSetByDefault) { TEST_F(HedgedAsyncRPCTest, UseOperationKeyWhenProvided) { const auto opKey = UUID::gen(); + + // Set OperationKey via GenericArgs + GenericArgs args; + args.stable.setClientOperationKey(opKey); auto future = sendHedgedCommandWithHosts(write_ops::InsertCommandRequest(testNS, {BSON("id" << 1)}), kTwoHosts, std::make_shared(), - GenericArgs(), - nullptr, - opKey); + args, + nullptr); onCommand([&](const auto& request) { ASSERT_EQ(getOpKeyFromCommand(request.cmdObj), opKey); return write_ops::InsertCommandReply().toBSON(); @@ -1114,12 +1225,10 @@ TEST_F(HedgedAsyncRPCTest, UseOperationKeyWhenProvided) { TEST_F(HedgedAsyncRPCTest, RewriteOperationKeyWhenHedging) { const auto opKey = UUID::gen(); - auto future = sendHedgedCommandWithHosts(testFindCmd, - kTwoHosts, - std::make_shared(), - GenericArgs(), - nullptr, - opKey); + GenericArgs args; + args.stable.setClientOperationKey(opKey); + auto future = sendHedgedCommandWithHosts( + testFindCmd, kTwoHosts, std::make_shared(), args, nullptr); onCommand([&](const auto& request) { ASSERT_NE(getOpKeyFromCommand(request.cmdObj), opKey); return CursorResponse(testNS, 0LL, {testFirstBatch}) diff --git a/src/mongo/executor/hedging_metrics.cpp b/src/mongo/executor/hedging_metrics.cpp index 9f42ebfe8aa5d..768c0ec32999b 100644 --- a/src/mongo/executor/hedging_metrics.cpp +++ b/src/mongo/executor/hedging_metrics.cpp @@ -29,6 +29,11 @@ #include "mongo/executor/hedging_metrics.h" +#include + +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/util/decorable.h" + namespace mongo { namespace { diff --git a/src/mongo/executor/hedging_metrics.h b/src/mongo/executor/hedging_metrics.h index fae226f342adf..c49b6b709b630 100644 --- a/src/mongo/executor/hedging_metrics.h +++ b/src/mongo/executor/hedging_metrics.h @@ -29,8 +29,10 @@ #pragma once +#include "mongo/bson/bsonobj.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/platform/atomic_word.h" namespace mongo { diff --git a/src/mongo/executor/inline_executor.cpp b/src/mongo/executor/inline_executor.cpp index f24d96c289bcf..4f89e4e94cdd5 100644 --- a/src/mongo/executor/inline_executor.cpp +++ b/src/mongo/executor/inline_executor.cpp @@ -28,12 +28,20 @@ */ #include +#include +#include -#include "mongo/executor/inline_executor.h" +#include +#include +#include #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/executor/inline_executor.h" #include "mongo/transport/baton.h" #include "mongo/util/assert_util.h" +#include "mongo/util/functional.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/time_support.h" namespace mongo::executor { diff --git a/src/mongo/executor/inline_executor.h b/src/mongo/executor/inline_executor.h index b325bfc2c1ad9..852c4d8388ced 100644 --- a/src/mongo/executor/inline_executor.h +++ b/src/mongo/executor/inline_executor.h @@ -29,6 +29,7 @@ #pragma once +#include #include #include "mongo/db/baton.h" diff --git a/src/mongo/executor/inline_executor_test.cpp b/src/mongo/executor/inline_executor_test.cpp index 2a6ff28f4fc8a..00ee0e87390b1 100644 --- a/src/mongo/executor/inline_executor_test.cpp +++ b/src/mongo/executor/inline_executor_test.cpp @@ -27,22 +27,44 @@ * it in the license file. */ +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include #include +#include +#include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/executor/inline_executor.h" #include "mongo/executor/network_interface_mock.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" #include "mongo/stdx/thread.h" #include "mongo/transport/baton.h" +#include "mongo/transport/session.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/thread_assertion_monitor.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/concurrency/notification.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/waitable.h" namespace mongo::executor { namespace { diff --git a/src/mongo/executor/mock_async_rpc.h b/src/mongo/executor/mock_async_rpc.h index 1f6b34e307817..59e98349860a1 100644 --- a/src/mongo/executor/mock_async_rpc.h +++ b/src/mongo/executor/mock_async_rpc.h @@ -27,14 +27,52 @@ * it in the license file. */ +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/baton.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" #include "mongo/executor/async_rpc.h" #include "mongo/executor/async_rpc_error_info.h" #include "mongo/executor/async_rpc_targeter.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/producer_consumer_queue.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" namespace mongo::async_rpc { @@ -101,7 +139,8 @@ class SyncMockAsyncRPCRunner : public detail::AsyncRPCRunner { OperationContext* opCtx, std::shared_ptr exec, CancellationToken token, - BatonHandle) final { + BatonHandle, + boost::optional clientOperationKey) final { auto [p, f] = makePromiseFuture(); auto targetsAttempted = std::make_shared>(); return targeter->resolve(token) @@ -109,7 +148,8 @@ class SyncMockAsyncRPCRunner : public detail::AsyncRPCRunner { .onError([](Status s) -> StatusWith> { return Status{AsyncRPCErrorInfo(s), "Remote command execution failed"}; }) - .then([=, f = std::move(f), p = std::move(p)](auto&& targets) mutable { + .then([=, this, f = std::move(f), p = std::move(p), dbName = dbName.toString()]( + auto&& targets) mutable { stdx::lock_guard lg{_m}; *targetsAttempted = targets; _requests.emplace_back(cmdBSON, dbName, targets[0], std::move(p)); @@ -214,7 +254,8 @@ class AsyncMockAsyncRPCRunner : public detail::AsyncRPCRunner { OperationContext* opCtx, std::shared_ptr exec, CancellationToken token, - BatonHandle) final { + BatonHandle, + boost::optional clientOperationKey) final { auto [p, f] = makePromiseFuture(); auto targetsAttempted = std::make_shared>(); return targeter->resolve(token).thenRunOn(exec).then([this, @@ -356,4 +397,33 @@ std::ostream& operator<<(std::ostream& s, const AsyncMockAsyncRPCRunner::Expecta return s << o.name; } +/** + * The NoopMockAsyncRPCRunner is a mock implementation that returns silently and successfully when a + * command is sent. + */ +class NoopMockAsyncRPCRunner : public detail::AsyncRPCRunner { +public: + /** + * Mock implementation of the core functionality of the RCR. Records the provided request, and + * notifies waiters that a new request has been scheduled. + */ + ExecutorFuture _sendCommand( + StringData dbName, + BSONObj cmdBSON, + Targeter* targeter, + OperationContext* opCtx, + std::shared_ptr exec, + CancellationToken token, + BatonHandle, + boost::optional) final { + + return ExecutorFuture(exec).then([] { + detail::AsyncRPCInternalResponse response; + response.response = BSON("ok" << 1); + response.targetUsed = HostAndPort("localhost", serverGlobalParams.port); + return response; + }); + } +}; + } // namespace mongo::async_rpc diff --git a/src/mongo/executor/mock_async_rpc_test.cpp b/src/mongo/executor/mock_async_rpc_test.cpp index 1ef18fbd35755..a036c96d6b2f7 100644 --- a/src/mongo/executor/mock_async_rpc_test.cpp +++ b/src/mongo/executor/mock_async_rpc_test.cpp @@ -28,17 +28,27 @@ */ #include "mongo/executor/mock_async_rpc.h" -#include +#include +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/commands.h" #include "mongo/db/repl/hello_gen.h" #include "mongo/db/service_context.h" #include "mongo/executor/async_rpc.h" +#include "mongo/executor/async_rpc_retry_policy.h" #include "mongo/executor/async_rpc_targeter.h" #include "mongo/executor/async_rpc_test_fixture.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/debugger.h" -#include "mongo/util/optional_util.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/time_support.h" namespace mongo::async_rpc { namespace { @@ -90,6 +100,14 @@ class MockAsyncRPCRunnerTestFixture : public AsyncRPCTestFixture { ServiceContext::UniqueOperationContext _opCtx = makeOperationContext(); }; +auto extractUUID(const BSONElement& element) { + return UUID::fromCDR(element.uuid()); +} + +auto getOpKeyFromCommand(const BSONObj& cmdObj) { + return extractUUID(cmdObj["clientOperationKey"]); +} + using SyncMockAsyncRPCRunnerTestFixture = MockAsyncRPCRunnerTestFixture; using AsyncMockAsyncRPCRunnerTestFixture = MockAsyncRPCRunnerTestFixture; @@ -211,7 +229,10 @@ TEST_F(SyncMockAsyncRPCRunnerTestFixture, OnCommand) { initializeCommand(hello); getMockRunner().onCommand([&](const RequestInfo& ri) { - ASSERT_BSONOBJ_EQ(hello.toBSON({}), ri._cmd); + // OperationKey not provided, so internally created OperationKey must be extracted to make + // this assertion valid + ASSERT_BSONOBJ_EQ(hello.toBSON(BSON("clientOperationKey" << getOpKeyFromCommand(ri._cmd))), + ri._cmd); return expectedResultObj; }); ASSERT_BSONOBJ_EQ(responseFut.get().response.toBSON(), helloReply.toBSON()); @@ -235,7 +256,8 @@ TEST_F(SyncMockAsyncRPCRunnerTestFixture, SyncMockAsyncRPCRunnerWithRetryPolicy) initializeCommand(hello); getMockRunner().onCommand([&](const RequestInfo& ri) { - ASSERT_BSONOBJ_EQ(hello.toBSON({}), ri._cmd); + ASSERT_BSONOBJ_EQ(hello.toBSON(BSON("clientOperationKey" << getOpKeyFromCommand(ri._cmd))), + ri._cmd); return expectedResultObj; }); auto net = getNetworkInterfaceMock(); @@ -245,7 +267,8 @@ TEST_F(SyncMockAsyncRPCRunnerTestFixture, SyncMockAsyncRPCRunnerWithRetryPolicy) } getMockRunner().onCommand([&](const RequestInfo& ri) { - ASSERT_BSONOBJ_EQ(hello.toBSON({}), ri._cmd); + ASSERT_BSONOBJ_EQ(hello.toBSON(BSON("clientOperationKey" << getOpKeyFromCommand(ri._cmd))), + ri._cmd); return expectedResultObj; }); ASSERT_BSONOBJ_EQ(responseFut.get().response.toBSON(), helloReply.toBSON()); @@ -460,7 +483,9 @@ TEST_F(AsyncMockAsyncRPCRunnerTestFixture, UnexpectedRequests) { ASSERT_EQ(unexpectedRequests.size(), 1); HelloCommand hello; initializeCommand(hello); - ASSERT_BSONOBJ_EQ(unexpectedRequests[0].cmdBSON, hello.toBSON({})); + ASSERT_BSONOBJ_EQ(unexpectedRequests[0].cmdBSON, + hello.toBSON(BSON("clientOperationKey" + << getOpKeyFromCommand(unexpectedRequests[0].cmdBSON)))); ASSERT_EQ(unexpectedRequests[0].dbName, "testdb"_sd); HostAndPort localhost = HostAndPort("localhost", serverGlobalParams.port); ASSERT_EQ(unexpectedRequests[0].target, localhost); diff --git a/src/mongo/executor/mock_network_fixture.cpp b/src/mongo/executor/mock_network_fixture.cpp index a53ca55146099..3e0b4bdc16cd1 100644 --- a/src/mongo/executor/mock_network_fixture.cpp +++ b/src/mongo/executor/mock_network_fixture.cpp @@ -28,13 +28,18 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/mock_network_fixture.h" +#include #include "mongo/db/matcher/matcher.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/executor/mock_network_fixture.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -61,7 +66,7 @@ bool MockNetwork::_allExpectationsSatisfied() const { }); } -void MockNetwork::_runUntilIdle() { +void MockNetwork::runUntilIdle() { executor::NetworkInterfaceMock::InNetworkGuard guard(_net); do { // The main responsibility of the mock network is to host incoming requests and scheduled @@ -130,7 +135,7 @@ void MockNetwork::runUntilExpectationsSatisfied() { // network is idle, the extra threads may be running and will schedule new requests. As a // result, the current best practice is to busy-loop to prepare for that. while (!_allExpectationsSatisfied()) { - _runUntilIdle(); + runUntilIdle(); } } @@ -144,7 +149,7 @@ void MockNetwork::runUntil(Date_t target) { _net->runUntil(target); } // Run until idle. - _runUntilIdle(); + runUntilIdle(); } LOGV2_DEBUG(5015403, 1, "mock reached time", "target"_attr = target); } diff --git a/src/mongo/executor/mock_network_fixture.h b/src/mongo/executor/mock_network_fixture.h index ce6431558c2f7..6c4cd4526656b 100644 --- a/src/mongo/executor/mock_network_fixture.h +++ b/src/mongo/executor/mock_network_fixture.h @@ -29,14 +29,29 @@ #pragma once +#include +#include +#include #include - +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/stdx/thread.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { class BSONObj; + using executor::RemoteCommandResponse; namespace test { @@ -335,12 +350,14 @@ class MockNetwork { // Advance time to the target. Run network operations and process requests along the way. void runUntil(Date_t targetTime); + // Run until both the executor and the network are idle. Otherwise, it hangs forever. + void runUntilIdle(); + // Run until both the executor and the network are idle and all expectations are satisfied. // Otherwise, it hangs forever. void runUntilExpectationsSatisfied(); private: - void _runUntilIdle(); bool _allExpectationsSatisfied() const; std::vector> _expectations; diff --git a/src/mongo/executor/mock_network_fixture_test.cpp b/src/mongo/executor/mock_network_fixture_test.cpp index ef3632fa59e92..89131a3483bd1 100644 --- a/src/mongo/executor/mock_network_fixture_test.cpp +++ b/src/mongo/executor/mock_network_fixture_test.cpp @@ -28,16 +28,22 @@ */ -#include "mongo/platform/basic.h" - +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/executor/mock_network_fixture.h" -#include "mongo/executor/network_interface.h" #include "mongo/executor/network_interface_mock.h" #include "mongo/executor/network_interface_mock_test_fixture.h" -#include "mongo/executor/thread_pool_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/metadata.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/executor/network_connection_hook.h b/src/mongo/executor/network_connection_hook.h index 04de5571cb613..8510344c85a01 100644 --- a/src/mongo/executor/network_connection_hook.h +++ b/src/mongo/executor/network_connection_hook.h @@ -54,16 +54,16 @@ class NetworkConnectionHook { virtual ~NetworkConnectionHook() = default; /** - * Optionally augments the isMaster request sent while initializing the wire protocol. + * Optionally augments the "hello" request sent while initializing the wire protocol. * * By default this will just return the cmdObj passed in unaltered. */ - virtual BSONObj augmentIsMasterRequest(const HostAndPort& remoteHost, BSONObj cmdObj) { + virtual BSONObj augmentHelloRequest(const HostAndPort& remoteHost, BSONObj cmdObj) { return cmdObj; } /** - * Runs optional validation logic on an isMaster reply from a remote host. If a non-OK + * Runs optional validation logic on an "hello" reply from a remote host. If a non-OK * Status is returned, it will be propagated up to the completion handler for the command * that initiated the request that caused this connection to be created. This will * be called once for each connection that is created, even if a remote host with the @@ -77,8 +77,8 @@ class NetworkConnectionHook { * std::terminate. */ virtual Status validateHost(const HostAndPort& remoteHost, - const BSONObj& isMasterRequest, - const RemoteCommandResponse& isMasterReply) = 0; + const BSONObj& helloRequest, + const RemoteCommandResponse& helloReply) = 0; /** * Generates a command to run on the remote host immediately after connecting to it. diff --git a/src/mongo/executor/network_interface.cpp b/src/mongo/executor/network_interface.cpp index 132edf62e0292..e942759a1fb3d 100644 --- a/src/mongo/executor/network_interface.cpp +++ b/src/mongo/executor/network_interface.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/executor/network_interface.h" diff --git a/src/mongo/executor/network_interface.h b/src/mongo/executor/network_interface.h index 8547dc2d81ae0..060f0cf479f78 100644 --- a/src/mongo/executor/network_interface.h +++ b/src/mongo/executor/network_interface.h @@ -29,17 +29,32 @@ #pragma once +#include #include +#include +#include #include +#include #include +#include +#include "mongo/base/status.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/async_client.h" +#include "mongo/db/baton.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" #include "mongo/transport/baton.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/functional.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { namespace executor { diff --git a/src/mongo/executor/network_interface_factory.cpp b/src/mongo/executor/network_interface_factory.cpp index 0489d9adde005..032691780bc9a 100644 --- a/src/mongo/executor/network_interface_factory.cpp +++ b/src/mongo/executor/network_interface_factory.cpp @@ -27,17 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/network_interface_factory.h" - #include +#include -#include "mongo/base/init.h" -#include "mongo/base/status.h" -#include "mongo/config.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/service_context.h" #include "mongo/executor/connection_pool.h" +#include "mongo/executor/egress_tag_closer_manager.h" #include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/network_interface_factory.h" #include "mongo/executor/network_interface_tl.h" #include "mongo/rpc/metadata/metadata_hook.h" diff --git a/src/mongo/executor/network_interface_integration_fixture.cpp b/src/mongo/executor/network_interface_integration_fixture.cpp index 2ba7e8158b61c..a5469266dc514 100644 --- a/src/mongo/executor/network_interface_integration_fixture.cpp +++ b/src/mongo/executor/network_interface_integration_fixture.cpp @@ -28,10 +28,19 @@ */ -#include "mongo/platform/basic.h" - +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include #include - +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/client/connection_string.h" #include "mongo/db/wire_version.h" #include "mongo/executor/network_interface_factory.h" @@ -39,10 +48,15 @@ #include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/stdx/future.h" +#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/integration_test.h" #include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kASIO diff --git a/src/mongo/executor/network_interface_integration_fixture.h b/src/mongo/executor/network_interface_integration_fixture.h index 3e5eafbabda40..e7995a12abb47 100644 --- a/src/mongo/executor/network_interface_integration_fixture.h +++ b/src/mongo/executor/network_interface_integration_fixture.h @@ -28,14 +28,29 @@ */ #pragma once -#include "mongo/unittest/unittest.h" - +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/connection_string.h" +#include "mongo/db/baton.h" #include "mongo/executor/connection_pool.h" #include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/platform/random.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/unittest.h" +#include "mongo/util/duration.h" #include "mongo/util/future.h" namespace mongo { diff --git a/src/mongo/executor/network_interface_integration_test.cpp b/src/mongo/executor/network_interface_integration_test.cpp index 3d84a640cbf13..989928546f8a6 100644 --- a/src/mongo/executor/network_interface_integration_test.cpp +++ b/src/mongo/executor/network_interface_integration_test.cpp @@ -28,28 +28,68 @@ */ -#include -#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/client/async_client.h" #include "mongo/client/connection_string.h" -#include "mongo/db/concurrency/locker_noop_client_observer.h" +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/wire_version.h" #include "mongo/executor/connection_pool_stats.h" +#include "mongo/executor/hedge_options_util.h" #include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/network_interface.h" #include "mongo/executor/network_interface_integration_fixture.h" -#include "mongo/executor/test_network_connection_hook.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/factory.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/rpc/message.h" #include "mongo/rpc/topology_version_gen.h" -#include "mongo/stdx/future.h" -#include "mongo/unittest/integration_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -162,7 +202,7 @@ class NetworkInterfaceTest : public NetworkInterfaceIntegrationFixture { } void setUp() override { - startNet(std::make_unique(this)); + startNet(std::make_unique(this)); } // NetworkInterfaceIntegrationFixture::tearDown() shuts down the NetworkInterface. We always @@ -256,33 +296,33 @@ class NetworkInterfaceTest : public NetworkInterfaceIntegrationFixture { return ++numCurrentOpRan; } - struct IsMasterData { + struct HelloData { BSONObj request; RemoteCommandResponse response; }; - IsMasterData waitForIsMaster() { + HelloData waitForHello() { stdx::unique_lock lk(_mutex); - _isMasterCond.wait(lk, [this] { return _isMasterResult != boost::none; }); + _helloCondVar.wait(lk, [this] { return _helloResult != boost::none; }); - return std::move(*_isMasterResult); + return std::move(*_helloResult); } - bool hasIsMaster() { + bool hasHelloResult() { stdx::lock_guard lk(_mutex); - return _isMasterResult != boost::none; + return _helloResult != boost::none; } private: - class WaitForIsMasterHook : public NetworkConnectionHook { + class WaitForHelloHook : public NetworkConnectionHook { public: - explicit WaitForIsMasterHook(NetworkInterfaceTest* parent) : _parent(parent) {} + explicit WaitForHelloHook(NetworkInterfaceTest* parent) : _parent(parent) {} Status validateHost(const HostAndPort& host, const BSONObj& request, - const RemoteCommandResponse& isMasterReply) override { + const RemoteCommandResponse& helloReply) override { stdx::lock_guard lk(_parent->_mutex); - _parent->_isMasterResult = IsMasterData{request, isMasterReply}; - _parent->_isMasterCond.notify_all(); + _parent->_helloResult = HelloData{request, helloReply}; + _parent->_helloCondVar.notify_all(); return Status::OK(); } @@ -299,8 +339,8 @@ class NetworkInterfaceTest : public NetworkInterfaceIntegrationFixture { }; Mutex _mutex = MONGO_MAKE_LATCH("NetworkInterfaceTest::_mutex"); - stdx::condition_variable _isMasterCond; - boost::optional _isMasterResult; + stdx::condition_variable _helloCondVar; + boost::optional _helloResult; }; class NetworkInterfaceInternalClientTest : public NetworkInterfaceTest { @@ -331,7 +371,7 @@ TEST_F(NetworkInterfaceTest, CancelLocally) { auto deferred = runCommand(cbh, makeTestCommand(kMaxWait, makeEchoCmdObj())); - waitForIsMaster(); + waitForHello(); fpb->waitForTimesEntered(fpb.initialTimesEntered() + 1); @@ -532,7 +572,7 @@ TEST_F(NetworkInterfaceTest, AsyncOpTimeout) { auto request = makeTestCommand(Milliseconds{1000}, makeSleepCmdObj()); auto deferred = runCommand(cb, request); - waitForIsMaster(); + waitForHello(); auto result = deferred.get(); @@ -553,7 +593,6 @@ TEST_F(NetworkInterfaceTest, AsyncOpTimeoutWithOpCtxDeadlineSooner) { constexpr auto requestTimeout = Milliseconds{1000}; auto serviceContext = ServiceContext::make(); - serviceContext->registerClientObserver(std::make_unique()); auto client = serviceContext->makeClient("NetworkClient"); auto opCtx = client->makeOperationContext(); opCtx->setDeadlineAfterNowBy(opCtxDeadline, ErrorCodes::ExceededTimeLimit); @@ -562,7 +601,7 @@ TEST_F(NetworkInterfaceTest, AsyncOpTimeoutWithOpCtxDeadlineSooner) { auto deferred = runCommand(cb, request); - waitForIsMaster(); + waitForHello(); auto result = deferred.get(); @@ -589,7 +628,6 @@ TEST_F(NetworkInterfaceTest, AsyncOpTimeoutWithOpCtxDeadlineLater) { constexpr auto requestTimeout = Milliseconds{600}; auto serviceContext = ServiceContext::make(); - serviceContext->registerClientObserver(std::make_unique()); auto client = serviceContext->makeClient("NetworkClient"); auto opCtx = client->makeOperationContext(); opCtx->setDeadlineAfterNowBy(opCtxDeadline, ErrorCodes::ExceededTimeLimit); @@ -597,7 +635,7 @@ TEST_F(NetworkInterfaceTest, AsyncOpTimeoutWithOpCtxDeadlineLater) { auto deferred = runCommand(cb, request); - waitForIsMaster(); + waitForHello(); auto result = deferred.get(); @@ -756,6 +794,44 @@ TEST_F(NetworkInterfaceTest, SetAlarm) { ASSERT_FALSE(swResult.isOK()); } +TEST_F(NetworkInterfaceTest, UseOperationKeyWhenProvided) { + const auto opKey = UUID::gen(); + assertCommandOK("admin", + BSON("configureFailPoint" + << "failIfOperationKeyMismatch" + << "mode" + << "alwaysOn" + << "data" << BSON("clientOperationKey" << opKey)), + kNoTimeout); + + ON_BLOCK_EXIT([&] { + assertCommandOK("admin", + BSON("configureFailPoint" + << "failIfOperationKeyMismatch" + << "mode" + << "off"), + kNoTimeout); + }); + + RemoteCommandRequest::Options rcrOptions; + rcrOptions.hedgeOptions.isHedgeEnabled = true; + rcrOptions.hedgeOptions.hedgeCount = fixture().getServers().size(); + RemoteCommandRequestOnAny rcr(fixture().getServers(), + "admin", + makeEchoCmdObj(), + BSONObj(), + nullptr, + kNoTimeout, + std::move(rcrOptions), + opKey); + // Only internal clients can run hedged operations. + resetIsInternalClient(true); + ON_BLOCK_EXIT([&] { resetIsInternalClient(false); }); + auto cbh = makeCallbackHandle(); + auto fut = runCommand(cbh, std::move(rcr)); + fut.get(); +} + class HedgeCancellationTest : public NetworkInterfaceTest { public: enum class CancellationMode { kAfterCompletion, kAfterScheduling }; @@ -911,13 +987,13 @@ TEST_F(HedgeCancellationTest, CancelAfterCompletion) { } TEST_F(NetworkInterfaceInternalClientTest, - IsMasterRequestContainsOutgoingWireVersionInternalClientInfo) { + HelloRequestContainsOutgoingWireVersionInternalClientInfo) { auto deferred = runCommand(makeCallbackHandle(), makeTestCommand(kNoTimeout, makeEchoCmdObj())); - auto isMasterHandshake = waitForIsMaster(); + auto helloHandshake = waitForHello(); - // Verify that the isMaster reply has the expected internalClient data. + // Verify that the "hello" reply has the expected internalClient data. auto wireSpec = WireSpec::instance().get(); - auto internalClientElem = isMasterHandshake.request["internalClient"]; + auto internalClientElem = helloHandshake.request["internalClient"]; ASSERT_EQ(internalClientElem.type(), BSONType::Object); auto minWireVersionElem = internalClientElem.Obj()["minWireVersion"]; auto maxWireVersionElem = internalClientElem.Obj()["maxWireVersion"]; @@ -932,14 +1008,14 @@ TEST_F(NetworkInterfaceInternalClientTest, assertNumOps(0u, 0u, 0u, 1u); } -TEST_F(NetworkInterfaceTest, IsMasterRequestMissingInternalClientInfoWhenNotInternalClient) { +TEST_F(NetworkInterfaceTest, HelloRequestMissingInternalClientInfoWhenNotInternalClient) { resetIsInternalClient(false); auto deferred = runCommand(makeCallbackHandle(), makeTestCommand(kNoTimeout, makeEchoCmdObj())); - auto isMasterHandshake = waitForIsMaster(); + auto helloHandshake = waitForHello(); - // Verify that the isMaster reply has the expected internalClient data. - ASSERT_FALSE(isMasterHandshake.request["internalClient"]); + // Verify that the "hello" reply has the expected internalClient data. + ASSERT_FALSE(helloHandshake.request["internalClient"]); // Verify that the ping op is counted as a success. auto res = deferred.get(); ASSERT(res.elapsed); @@ -1079,6 +1155,36 @@ TEST_F(NetworkInterfaceTest, StartExhaustCommandShouldStopOnFailure) { } } +TEST_F(NetworkInterfaceTest, ExhaustCommandCancelRunsOutOfLine) { + thread_local bool inCancellationContext = false; + auto pf = makePromiseFuture(); + auto cbh = makeCallbackHandle(); + auto callback = [&](auto&&) mutable { + pf.promise.emplaceValue(inCancellationContext); + }; + + auto deferred = [&] { + FailPointEnableBlock fpb("networkInterfaceHangCommandsAfterAcquireConn"); + + auto deferred = startExhaustCommand( + cbh, makeTestCommand(kMaxWait, makeEchoCmdObj()), std::move(callback)); + + waitForHello(); + + fpb->waitForTimesEntered(fpb.initialTimesEntered() + 1); + + inCancellationContext = true; + net().cancelCommand(cbh); + inCancellationContext = false; + return deferred; + }(); + + auto result = deferred.getNoThrow(); + ASSERT_EQ(ErrorCodes::CallbackCanceled, result); + bool cancellationRanInline = pf.future.get(); + ASSERT_FALSE(cancellationRanInline); +} + TEST_F(NetworkInterfaceTest, TearDownWaitsForInProgress) { boost::optional tearDownThread; auto tearDownPF = makePromiseFuture(); diff --git a/src/mongo/executor/network_interface_mock.cpp b/src/mongo/executor/network_interface_mock.cpp index f56f5161b7094..e7dbfdf710db8 100644 --- a/src/mongo/executor/network_interface_mock.cpp +++ b/src/mongo/executor/network_interface_mock.cpp @@ -28,17 +28,27 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/network_interface_mock.h" - +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +// IWYU pragma: no_include "ext/alloc_traits.h" #include #include #include +#include -#include "mongo/executor/connection_pool_stats.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/network_interface_mock.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/executor/network_interface_mock.h b/src/mongo/executor/network_interface_mock.h index cad3b0b690036..600a673b3feb4 100644 --- a/src/mongo/executor/network_interface_mock.h +++ b/src/mongo/executor/network_interface_mock.h @@ -29,21 +29,43 @@ #pragma once +#include +#include +#include +#include +#include #include #include +#include #include +#include #include #include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/baton.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/rpc/metadata/metadata_hook.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/unordered_map.h" #include "mongo/stdx/unordered_set.h" #include "mongo/transport/mock_session.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/util/assert_util.h" #include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" +#include "mongo/util/functional.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" namespace mongo { @@ -287,9 +309,9 @@ class NetworkInterfaceMock : public NetworkInterface { void runReadyNetworkOperations(); /** - * Sets the reply of the 'isMaster' handshake for a specific host. This reply will only + * Sets the reply of the 'hello' handshake for a specific host. This reply will only * be given to the 'validateHost' method of the ConnectionHook set on this object - NOT - * to the completion handlers of any 'isMaster' commands scheduled with 'startCommand'. + * to the completion handlers of any 'hello' commands scheduled with 'startCommand'. * * This reply will persist until it is changed again using this method. * diff --git a/src/mongo/executor/network_interface_mock_test.cpp b/src/mongo/executor/network_interface_mock_test.cpp index c3f419a391f3a..67e302e5002fb 100644 --- a/src/mongo/executor/network_interface_mock_test.cpp +++ b/src/mongo/executor/network_interface_mock_test.cpp @@ -27,17 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include #include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/executor/network_connection_hook.h" -#include "mongo/executor/network_interface.h" +#include "mongo/executor/network_interface_mock.h" #include "mongo/executor/network_interface_mock_test_fixture.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/test_network_connection_hook.h" +#include "mongo/rpc/metadata.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { namespace executor { @@ -70,23 +90,23 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHook) { Milliseconds(30)}; // need to copy as it will be moved - auto isMasterReplyData = BSON("iamyour" - << "father"); + auto helloReplyData = BSON("iamyour" + << "father"); - RemoteCommandResponse isMasterReply{isMasterReplyData.copy(), Milliseconds(20)}; + RemoteCommandResponse helloReply{helloReplyData.copy(), Milliseconds(20)}; - net().setHandshakeReplyForHost(testHost(), std::move(isMasterReply)); + net().setHandshakeReplyForHost(testHost(), std::move(helloReply)); // Since the contract of these methods is that they do not throw, we run the ASSERTs in // the test scope. net().setConnectionHook(makeTestHook( [&](const HostAndPort& remoteHost, const BSONObj&, - const RemoteCommandResponse& isMasterReply) { + const RemoteCommandResponse& helloReply) { validateCalled = true; hostCorrectForValidate = (remoteHost == testHost()); - replyCorrectForValidate = SimpleBSONObjComparator::kInstance.evaluate( - isMasterReply.data == isMasterReplyData); + replyCorrectForValidate = + SimpleBSONObjComparator::kInstance.evaluate(helloReply.data == helloReplyData); return Status::OK(); }, [&](const HostAndPort& remoteHost) { @@ -169,9 +189,8 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHook) { TEST_F(NetworkInterfaceMockTest, ConnectionHookFailedValidation) { net().setConnectionHook(makeTestHook( - [&](const HostAndPort& remoteHost, - const BSONObj&, - const RemoteCommandResponse& isMasterReply) -> Status { + [&](const HostAndPort& remoteHost, const BSONObj&, const RemoteCommandResponse& helloReply) + -> Status { // We just need some obscure non-OK code. return {ErrorCodes::ConflictingOperationInProgress, "blah"}; }, @@ -199,7 +218,7 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHookFailedValidation) { { net().enterNetwork(); // We should have short-circuited the network and immediately called the callback. - // If we change isMaster replies to go through the normal network mechanism, + // If we change "hello" replies to go through the normal network mechanism, // this test will need to change. ASSERT(!net().hasReadyRequests()); net().exitNetwork(); @@ -212,9 +231,8 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHookFailedValidation) { TEST_F(NetworkInterfaceMockTest, ConnectionHookNoRequest) { bool makeRequestCalled = false; net().setConnectionHook(makeTestHook( - [&](const HostAndPort& remoteHost, - const BSONObj&, - const RemoteCommandResponse& isMasterReply) -> Status { return Status::OK(); }, + [&](const HostAndPort& remoteHost, const BSONObj&, const RemoteCommandResponse& helloReply) + -> Status { return Status::OK(); }, [&](const HostAndPort& remoteHost) -> StatusWith> { makeRequestCalled = true; return {boost::none}; @@ -248,9 +266,8 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHookNoRequest) { TEST_F(NetworkInterfaceMockTest, ConnectionHookMakeRequestFails) { bool makeRequestCalled = false; net().setConnectionHook(makeTestHook( - [&](const HostAndPort& remoteHost, - const BSONObj&, - const RemoteCommandResponse& isMasterReply) -> Status { return Status::OK(); }, + [&](const HostAndPort& remoteHost, const BSONObj&, const RemoteCommandResponse& helloReply) + -> Status { return Status::OK(); }, [&](const HostAndPort& remoteHost) -> StatusWith> { makeRequestCalled = true; return {ErrorCodes::InvalidSyncSource, "blah"}; @@ -285,9 +302,8 @@ TEST_F(NetworkInterfaceMockTest, ConnectionHookMakeRequestFails) { TEST_F(NetworkInterfaceMockTest, ConnectionHookHandleReplyFails) { bool handleReplyCalled = false; net().setConnectionHook(makeTestHook( - [&](const HostAndPort& remoteHost, - const BSONObj&, - const RemoteCommandResponse& isMasterReply) -> Status { return Status::OK(); }, + [&](const HostAndPort& remoteHost, const BSONObj&, const RemoteCommandResponse& helloReply) + -> Status { return Status::OK(); }, [&](const HostAndPort& remoteHost) -> StatusWith> { return boost::make_optional({}); }, diff --git a/src/mongo/executor/network_interface_mock_test_fixture.cpp b/src/mongo/executor/network_interface_mock_test_fixture.cpp index c44bd190e25b2..bf1bf8ee275cc 100644 --- a/src/mongo/executor/network_interface_mock_test_fixture.cpp +++ b/src/mongo/executor/network_interface_mock_test_fixture.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/executor/network_interface_mock_test_fixture.h" namespace mongo { diff --git a/src/mongo/executor/network_interface_mock_test_fixture.h b/src/mongo/executor/network_interface_mock_test_fixture.h index 2c127eac264fe..3e1dfd87e43ae 100644 --- a/src/mongo/executor/network_interface_mock_test_fixture.h +++ b/src/mongo/executor/network_interface_mock_test_fixture.h @@ -29,9 +29,18 @@ #pragma once +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/thread_pool_mock.h" +#include "mongo/rpc/metadata.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/unittest.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace executor { diff --git a/src/mongo/executor/network_interface_thread_pool.cpp b/src/mongo/executor/network_interface_thread_pool.cpp index 151970b5fff2f..8a00159f4ef7e 100644 --- a/src/mongo/executor/network_interface_thread_pool.cpp +++ b/src/mongo/executor/network_interface_thread_pool.cpp @@ -28,14 +28,19 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/network_interface_thread_pool.h" +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/executor/network_interface.h" +#include "mongo/executor/network_interface_thread_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/destructor_guard.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/functional.h" #define MONGO_LOGV2_DEFAULT_COMPONENT mongo::logv2::LogComponent::kExecutor diff --git a/src/mongo/executor/network_interface_thread_pool.h b/src/mongo/executor/network_interface_thread_pool.h index 6cf058cac50b5..44f22d1dfffe6 100644 --- a/src/mongo/executor/network_interface_thread_pool.h +++ b/src/mongo/executor/network_interface_thread_pool.h @@ -29,13 +29,16 @@ #pragma once +#include #include +#include #include #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/util/concurrency/thread_pool_interface.h" #include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/out_of_line_executor.h" namespace mongo { namespace executor { diff --git a/src/mongo/executor/network_interface_tl.cpp b/src/mongo/executor/network_interface_tl.cpp index 0092648472e16..9a158a95b2d5e 100644 --- a/src/mongo/executor/network_interface_tl.cpp +++ b/src/mongo/executor/network_interface_tl.cpp @@ -30,15 +30,30 @@ #include "mongo/executor/network_interface_tl.h" +#include +#include +#include +#include +#include +#include +#include #include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include #include "mongo/base/checked_cast.h" -#include "mongo/config.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/connection_health_metrics_parameter_gen.h" -#include "mongo/db/server_feature_flags_gen.h" -#include "mongo/db/server_options.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/wire_version.h" #include "mongo/executor/connection_pool_tl.h" #include "mongo/executor/hedge_options_util.h" @@ -46,10 +61,20 @@ #include "mongo/executor/network_interface.h" #include "mongo/executor/network_interface_tl_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/transport/ssl_connection_context.h" #include "mongo/transport/transport_layer_manager.h" -#include "mongo/util/concurrency/idle_thread_block.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_name.h" +#include "mongo/util/fail_point.h" #include "mongo/util/net/socket_utils.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #include "mongo/util/testing_proctor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kASIO @@ -64,15 +89,9 @@ namespace { MONGO_FAIL_POINT_DEFINE(triggerSendRequestNetworkTimeout); MONGO_FAIL_POINT_DEFINE(forceConnectionNetworkTimeout); -bool connHealthMetricsEnabled() { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - return gFeatureFlagConnHealthMetrics.isEnabledAndIgnoreFCVUnsafe(); -} - -CounterMetric numConnectionNetworkTimeouts("operation.numConnectionNetworkTimeouts", - connHealthMetricsEnabled); +CounterMetric numConnectionNetworkTimeouts("operation.numConnectionNetworkTimeouts"); CounterMetric timeSpentWaitingBeforeConnectionTimeoutMillis( - "operation.totalTimeWaitingBeforeConnectionTimeoutMillis", connHealthMetricsEnabled); + "operation.totalTimeWaitingBeforeConnectionTimeoutMillis"); Status appendMetadata(RemoteCommandRequestOnAny* request, const std::unique_ptr& hook) { @@ -248,12 +267,7 @@ std::string NetworkInterfaceTL::getDiagnosticString() { } void NetworkInterfaceTL::appendConnectionStats(ConnectionPoolStats* stats) const { - auto pool = [&] { - stdx::lock_guard lk(_mutex); - return _pool.get(); - }(); - if (pool) - pool->appendConnectionStats(stats); + _pool->appendConnectionStats(stats); } void NetworkInterfaceTL::appendStats(BSONObjBuilder& bob) const { @@ -271,8 +285,6 @@ std::string NetworkInterfaceTL::getHostName() { } void NetworkInterfaceTL::startup() { - stdx::lock_guard lk(_mutex); - _ioThread = stdx::thread([this] { setThreadName(_instanceName); _run(); @@ -362,24 +374,19 @@ bool NetworkInterfaceTL::inShutdown() const { } void NetworkInterfaceTL::waitForWork() { - stdx::unique_lock lk(_mutex); - MONGO_IDLE_THREAD_BLOCK; - _workReadyCond.wait(lk, [this] { return _isExecutorRunnable; }); + // waitForWork should only be used by network-mocking code and should not be reachable in the + // NetworkInterfaceTL. + MONGO_UNREACHABLE; } void NetworkInterfaceTL::waitForWorkUntil(Date_t when) { - stdx::unique_lock lk(_mutex); - MONGO_IDLE_THREAD_BLOCK; - _workReadyCond.wait_until(lk, when.toSystemTimePoint(), [this] { return _isExecutorRunnable; }); + // waitForWorkUntil should only be used by network-mocking code and should not be reachable in + // the NetworkInterfaceTL. + MONGO_UNREACHABLE; } -void NetworkInterfaceTL::signalWorkAvailable() { - stdx::unique_lock lk(_mutex); - if (!_isExecutorRunnable) { - _isExecutorRunnable = true; - _workReadyCond.notify_one(); - } -} +// This is a no-op in the NetworkInterfaceTL since the waitForWork API is unreachable here. +void NetworkInterfaceTL::signalWorkAvailable() {} Date_t NetworkInterfaceTL::now() { // TODO This check is because we set up NetworkInterfaces in MONGO_INITIALIZERS and then expect @@ -1056,12 +1063,14 @@ NetworkInterfaceTL::ExhaustCommandState::ExhaustCommandState( auto NetworkInterfaceTL::ExhaustCommandState::make(NetworkInterfaceTL* interface, RemoteCommandRequestOnAny request, const TaskExecutor::CallbackHandle& cbHandle, - RemoteCommandOnReplyFn&& onReply) { + RemoteCommandOnReplyFn&& onReply, + const BatonHandle& baton) { auto state = std::make_shared( interface, std::move(request), cbHandle, std::move(onReply)); auto [promise, future] = makePromiseFuture(); state->promise = std::move(promise); std::move(future) + .thenRunOn(makeGuaranteedExecutor(baton, interface->_reactor)) .onError([state](Status error) { stdx::lock_guard lk(state->stopwatchMutex); state->onReplyFn(RemoteCommandOnAnyResponse( @@ -1190,7 +1199,7 @@ Status NetworkInterfaceTL::startExhaustCommand(const TaskExecutor::CallbackHandl return status; } - auto cmdState = ExhaustCommandState::make(this, request, cbHandle, std::move(onReply)); + auto cmdState = ExhaustCommandState::make(this, request, cbHandle, std::move(onReply), baton); if (cmdState->requestOnAny.timeout != cmdState->requestOnAny.kNoTimeout) { cmdState->deadline = cmdState->stopwatch.start() + cmdState->requestOnAny.timeout; } diff --git a/src/mongo/executor/network_interface_tl.h b/src/mongo/executor/network_interface_tl.h index 880a8f46e21a4..3b9696893ee16 100644 --- a/src/mongo/executor/network_interface_tl.h +++ b/src/mongo/executor/network_interface_tl.h @@ -29,24 +29,53 @@ #pragma once -#include - +#include +#include #include - +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/async_client.h" +#include "mongo/db/baton.h" #include "mongo/db/service_context.h" #include "mongo/executor/connection_pool.h" #include "mongo/executor/connection_pool_tl.h" +#include "mongo/executor/hedge_options_util.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor.h" #include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" #include "mongo/stdx/unordered_map.h" #include "mongo/transport/baton.h" #include "mongo/transport/transport_layer.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/functional.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/strong_weak_finish_line.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -258,7 +287,8 @@ class NetworkInterfaceTL : public NetworkInterface { static auto make(NetworkInterfaceTL* interface, RemoteCommandRequestOnAny request, const TaskExecutor::CallbackHandle& cbHandle, - RemoteCommandOnReplyFn&& onReply); + RemoteCommandOnReplyFn&& onReply, + const BatonHandle& baton); Future sendRequest( std::shared_ptr requestState) override; @@ -395,10 +425,6 @@ class NetworkInterfaceTL : public NetworkInterface { std::unique_ptr _ownedTransportLayer; transport::ReactorHandle _reactor; - // TODO SERVER-75830: This Mutex used to be at hierarcichal acquisition level 3. We temporary - // removed the level because it is sometimes acquired as part of task-scheduling when - // lower-level mutexes (like the ConnectionPool's) are held. - mutable Mutex _mutex = MONGO_MAKE_LATCH("NetworkInterfaceTL::_mutex"); const ConnectionPool::Options _connPoolOpts; std::unique_ptr _onConnectHook; std::shared_ptr _pool; diff --git a/src/mongo/executor/network_interface_tl.idl b/src/mongo/executor/network_interface_tl.idl index 03decc33c969d..11226a554b92b 100644 --- a/src/mongo/executor/network_interface_tl.idl +++ b/src/mongo/executor/network_interface_tl.idl @@ -34,6 +34,7 @@ feature_flags: description: Suppress network interface transport layer exceptions cpp_varname: gSuppressNetworkInterfaceTransportLayerExceptions default: false + shouldBeFCVGated: true server_parameters: opportunisticSecondaryTargeting: diff --git a/src/mongo/executor/network_test_env.cpp b/src/mongo/executor/network_test_env.cpp index a8a746acaa941..e02fc201a1153 100644 --- a/src/mongo/executor/network_test_env.cpp +++ b/src/mongo/executor/network_test_env.cpp @@ -27,13 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/executor/network_test_env.h" +#include #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/executor/network_test_env.h" namespace mongo { @@ -109,8 +114,8 @@ void NetworkTestEnv::onFindCommand(OnFindCommandFunction func) { arr.append(obj); } - const NamespaceString nss = - NamespaceString(request.dbname, request.cmdObj.firstElement().String()); + const NamespaceString nss = NamespaceString::createNamespaceString_forTest( + request.dbname, request.cmdObj.firstElement().String()); BSONObjBuilder result; appendCursorResponseObject(0LL, nss, arr.arr(), boost::none, &result); @@ -135,8 +140,8 @@ void NetworkTestEnv::onFindWithMetadataCommand(OnFindCommandWithMetadataFunction arr.append(obj); } - const NamespaceString nss = - NamespaceString(request.dbname, request.cmdObj.firstElement().String()); + const NamespaceString nss = NamespaceString::createNamespaceString_forTest( + request.dbname, request.cmdObj.firstElement().String()); BSONObjBuilder resultBuilder(std::move(metadata)); appendCursorResponseObject(0LL, nss, arr.arr(), boost::none, &resultBuilder); diff --git a/src/mongo/executor/network_test_env.h b/src/mongo/executor/network_test_env.h index da3b9300fdad9..788b3e115141f 100644 --- a/src/mongo/executor/network_test_env.h +++ b/src/mongo/executor/network_test_env.h @@ -29,16 +29,28 @@ #pragma once +#include +#include +#include #include +#include #include #include +#include #include +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" #include "mongo/stdx/future.h" #include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/executor/pinned_connection_task_executor.cpp b/src/mongo/executor/pinned_connection_task_executor.cpp index eb3083b81d7e0..c7fb9d033d11d 100644 --- a/src/mongo/executor/pinned_connection_task_executor.cpp +++ b/src/mongo/executor/pinned_connection_task_executor.cpp @@ -28,9 +28,28 @@ */ #include "pinned_connection_task_executor.h" + +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/client/async_client.h" #include "mongo/executor/network_interface.h" -#include "mongo/executor/thread_pool_task_executor.h" -#include "mongo/util/scoped_unlock.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/scoped_unlock.h" // IWYU pragma: keep namespace mongo::executor { /** @@ -170,7 +189,7 @@ StatusWith PinnedConnectionTaskExecutor::scheduleR "RPCs scheduled through PinnedConnectionTaskExecutor can only target a single host."); RemoteCommandRequest req = RemoteCommandRequest(requestOnAny, 0); auto state = PinnedConnectionTaskExecutor::CallbackState::make(cb, baton); - _requestQueue.push_front({req, state}); + _requestQueue.push_back({req, state}); CallbackHandle cbHandle; setCallbackForHandle(&cbHandle, state); @@ -304,6 +323,21 @@ void PinnedConnectionTaskExecutor::_doNetworking(stdx::unique_lock&& lk) .getAsync([req, this, self = shared_from_this()](StatusWith result) { stdx::unique_lock lk{_mutex}; _inProgressRequest.reset(); + // If we used the _stream, update it accordingly. + if (req.second->startedNetworking) { + if (auto status = result.getStatus(); status.isOK()) { + _stream->indicateUsed(); + _stream->indicateSuccess(); + } else { + // We didn't get a response from the remote. + // We assume the stream is broken and therefore can do no more work. Notify the + // stream of the failure, destroy it, and shutdown. + _stream->indicateFailure(status); + _stream.reset(); + _shutdown(lk); + } + } + // Now run the completion callback for the command. if (auto& state = req.second->state; MONGO_unlikely(state == CallbackState::State::kCanceled)) { CallbackState::runCallbackCanceled(lk, req, this); @@ -322,19 +356,6 @@ void PinnedConnectionTaskExecutor::_doNetworking(stdx::unique_lock&& lk) } CallbackState::runCallbackFinished(lk, req, this, result, target); } - // If we used the _stream, update it accordingly. - if (req.second->startedNetworking) { - if (auto status = result.getStatus(); status.isOK()) { - _stream->indicateUsed(); - _stream->indicateSuccess(); - } else { - // We didn't get a response from the remote. - // We assume the stream is broken and therefore can do no more work. Notify the - // stream of the failure, and shutdown. - _stream->indicateFailure(status); - _shutdown(lk); - } - } // If we weren't able to acquire a stream, shut-down. if (!_stream) { _shutdown(lk); diff --git a/src/mongo/executor/pinned_connection_task_executor.h b/src/mongo/executor/pinned_connection_task_executor.h index 9d423931327e1..46f56504eeda6 100644 --- a/src/mongo/executor/pinned_connection_task_executor.h +++ b/src/mongo/executor/pinned_connection_task_executor.h @@ -28,11 +28,31 @@ */ #pragma once +#include #include +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/baton.h" +#include "mongo/db/operation_context.h" #include "mongo/executor/network_interface.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/scoped_task_executor.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo::executor { diff --git a/src/mongo/executor/pinned_connection_task_executor_factory.cpp b/src/mongo/executor/pinned_connection_task_executor_factory.cpp index e5871adbaa0d8..3369db5843d9d 100644 --- a/src/mongo/executor/pinned_connection_task_executor_factory.cpp +++ b/src/mongo/executor/pinned_connection_task_executor_factory.cpp @@ -28,11 +28,14 @@ */ #include +#include -#include "mongo/executor/pinned_connection_task_executor_factory.h" +#include #include "mongo/executor/pinned_connection_task_executor.h" +#include "mongo/executor/pinned_connection_task_executor_factory.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/util/assert_util_core.h" namespace mongo { namespace executor { diff --git a/src/mongo/executor/pinned_connection_task_executor_test.cpp b/src/mongo/executor/pinned_connection_task_executor_test.cpp index d5629c898ab6e..6dcea6e62e524 100644 --- a/src/mongo/executor/pinned_connection_task_executor_test.cpp +++ b/src/mongo/executor/pinned_connection_task_executor_test.cpp @@ -27,12 +27,44 @@ * it in the license file. */ +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include + +#include "pinned_connection_task_executor_test_fixture.h" + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/pinned_connection_task_executor.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor.h" +#include "mongo/platform/atomic_word.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" #include "mongo/rpc/op_msg_rpc_impls.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" - -#include "pinned_connection_task_executor_test_fixture.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/thread_assertion_monitor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo::executor { namespace { @@ -377,5 +409,54 @@ TEST_F(PinnedConnectionTaskExecutorTest, StreamFailureShutsDownAndCancels) { ASSERT_EQ(_indicateFailureCalls.load(), 1); pinnedTE->join(); } + +/** + * We want to test the following sequence: + * (1) A command is scheduled. + * (2) The command fails due to a network error. + * (3) The command is notified of the failure (its onResponse callback is invoked). + * + * We want to ensure that the stream used by PCTE is destroyed _before_ the command is + * notified of the failure. This allows the underlying NetworkInterface to + * observe the failure on the initial stream & correctly update internally before it might + * be asked to provide another stream to i.e. retry the command. + */ +TEST_F(PinnedConnectionTaskExecutorTest, EnsureStreamDestroyedBeforeCommandCompleted) { + auto pinnedTE = makePinnedConnTaskExecutor(); + HostAndPort remote("mock"); + + auto rcr = makeRCR(remote, BSON("forTest" << 0)); + auto pf = makePromiseFuture(); + ASSERT_EQ(_streamDestroyedCalls.load(), 0); + unittest::ThreadAssertionMonitor monitor; + auto completionCallback = [&](const TaskExecutor::RemoteCommandCallbackArgs& args) { + monitor.exec([&]() { + // Ensure the stream was destroyed before we are notified of the command completing. + ASSERT_EQ(_streamDestroyedCalls.load(), 1); + pf.promise.setWith([&] { return args.response.status; }); + monitor.notifyDone(); + }); + }; + + ASSERT_OK(pinnedTE->scheduleRemoteCommand(rcr, std::move(completionCallback))); + + int32_t responseToId; + expectSinkMessage([&](Message m) { + responseToId = m.header().getId(); + assertMessageBodyAndDBName(m, BSON("hello" << 1), BSON("forTest" << 0), "admin"); + return Status::OK(); + }); + + // Fail the first request + Status testFailure{ErrorCodes::BadValue, "test failure"}; + expectSourceMessage([&]() { return testFailure; }); + + // Ensure we ran the completion callback. + monitor.wait(); + + auto localErr = pf.future.getNoThrow(); + ASSERT_EQ(localErr, testFailure); +} + } // namespace } // namespace mongo::executor diff --git a/src/mongo/executor/pinned_connection_task_executor_test_fixture.h b/src/mongo/executor/pinned_connection_task_executor_test_fixture.h index 8e7c3c503d10c..11f1a622deefe 100644 --- a/src/mongo/executor/pinned_connection_task_executor_test_fixture.h +++ b/src/mongo/executor/pinned_connection_task_executor_test_fixture.h @@ -202,6 +202,9 @@ class PinnedConnectionTaskExecutorTest : public ThreadPoolExecutorTest { invariant(session); _client = std::make_shared(hp, std::move(session), nullptr); } + ~LeasedStream() { + _fixture->_streamDestroyedCalls.fetchAndAdd(1); + } AsyncDBClient* getClient() override { return _client.get(); } @@ -221,11 +224,12 @@ class PinnedConnectionTaskExecutorTest : public ThreadPoolExecutorTest { }; protected: - // Track the success/used/failure calls across LeasedStreams created via this fixture. - // Accessible to children so tests can read them directly. + // Track the success/used/failure/destruction calls across LeasedStreams created via this + // fixture. Accessible to children so tests can read them directly. AtomicWord _indicateSuccessCalls{0}; AtomicWord _indicateUsedCalls{0}; AtomicWord _indicateFailureCalls{0}; + AtomicWord _streamDestroyedCalls{0}; }; diff --git a/src/mongo/executor/remote_command_request.cpp b/src/mongo/executor/remote_command_request.cpp index b3fa3fb687f5d..319626288ae02 100644 --- a/src/mongo/executor/remote_command_request.cpp +++ b/src/mongo/executor/remote_command_request.cpp @@ -27,17 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/remote_command_request.h" - +#include +#include #include +#include + +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/api_parameters.h" #include "mongo/db/operation_context.h" -#include "mongo/db/query/query_request_helper.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" #include "mongo/util/str.h" using namespace fmt::literals; @@ -60,12 +71,14 @@ RemoteCommandRequestBase::RemoteCommandRequestBase(RequestId requestId, const BSONObj& metadataObj, OperationContext* opCtx, Milliseconds timeoutMillis, - Options options) + Options options, + boost::optional opKey) : id(requestId), dbname(theDbName), metadata(metadataObj), opCtx(opCtx), options(options), + operationKey(opKey), timeout(timeoutMillis) { // If there is a comment associated with the current operation, append it to the command that we // are about to dispatch to the shards. @@ -73,14 +86,18 @@ RemoteCommandRequestBase::RemoteCommandRequestBase(RequestId requestId, ? theCmdObj.addField(*opCtx->getComment()) : cmdObj = theCmdObj; - // maxTimeMSOpOnly is set in the network interface based on the remaining max time attached to - // the OpCtx. It should never be specified explicitly. - uassert(4924403, - str::stream() << "Command request object should not manually specify " - << query_request_helper::kMaxTimeMSOpOnlyField, - !cmdObj.hasField(query_request_helper::kMaxTimeMSOpOnlyField)); + // For hedged requests, adjust timeout + if (cmdObj.hasField("maxTimeMSOpOnly")) { + int maxTimeField = cmdObj["maxTimeMSOpOnly"].Number(); + if (auto maxTimeMSOpOnly = Milliseconds(maxTimeField); + timeout == executor::RemoteCommandRequest::kNoTimeout || maxTimeMSOpOnly < timeout) { + timeout = maxTimeMSOpOnly; + } + } - if (options.hedgeOptions.isHedgeEnabled) { + // Assumes if argument for opKey is not empty, cmdObj already contains serialized version + // of the opKey. + if (operationKey == boost::none && options.hedgeOptions.isHedgeEnabled) { operationKey.emplace(UUID::gen()); cmdObj = cmdObj.addField(BSON("clientOperationKey" << operationKey.value()).firstElement()); } @@ -128,9 +145,16 @@ RemoteCommandRequestImpl::RemoteCommandRequestImpl(RequestId requestId, const BSONObj& metadataObj, OperationContext* opCtx, Milliseconds timeoutMillis, - Options options) - : RemoteCommandRequestBase( - requestId, theDbName, theCmdObj, metadataObj, opCtx, timeoutMillis, options), + Options options, + boost::optional operationKey) + : RemoteCommandRequestBase(requestId, + theDbName, + theCmdObj, + metadataObj, + opCtx, + timeoutMillis, + options, + operationKey), target(theTarget) { if constexpr (std::is_same_v>) { invariant(!theTarget.empty()); @@ -144,7 +168,8 @@ RemoteCommandRequestImpl::RemoteCommandRequestImpl(const T& theTarget, const BSONObj& metadataObj, OperationContext* opCtx, Milliseconds timeoutMillis, - Options options) + Options options, + boost::optional operationKey) : RemoteCommandRequestImpl(requestIdCounter.addAndFetch(1), theTarget, theDbName, @@ -152,7 +177,8 @@ RemoteCommandRequestImpl::RemoteCommandRequestImpl(const T& theTarget, metadataObj, opCtx, timeoutMillis, - options) {} + options, + operationKey) {} template std::string RemoteCommandRequestImpl::toString() const { diff --git a/src/mongo/executor/remote_command_request.h b/src/mongo/executor/remote_command_request.h index 5d8e8fbf8c5aa..5363213ca4178 100644 --- a/src/mongo/executor/remote_command_request.h +++ b/src/mongo/executor/remote_command_request.h @@ -29,18 +29,28 @@ #pragma once +#include +#include +#include +#include +#include #include #include +#include +#include #include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/jsobj.h" +#include "mongo/db/operation_context.h" #include "mongo/executor/hedge_options_util.h" #include "mongo/rpc/metadata.h" #include "mongo/transport/transport_layer.h" -#include "mongo/util/concepts.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace executor { @@ -70,7 +80,8 @@ struct RemoteCommandRequestBase { const BSONObj& metadataObj, OperationContext* opCtx, Milliseconds timeoutMillis, - Options options); + Options options, + boost::optional operationKey = boost::none); // Internal id of this request. Not interpreted and used for tracing purposes only. RequestId id; @@ -127,15 +138,16 @@ struct RemoteCommandRequestImpl : RemoteCommandRequestBase { RemoteCommandRequestImpl(); // Allow implicit conversion from RemoteCommandRequest to RemoteCommandRequestOnAny - REQUIRES_FOR_NON_TEMPLATE(std::is_same_v>) - RemoteCommandRequestImpl(const RemoteCommandRequestImpl& other) + template + requires std::is_same_v> RemoteCommandRequestImpl( + const RemoteCommandRequestImpl& other) : RemoteCommandRequestBase(other), target({other.target}) {} // Allow conversion from RemoteCommandRequestOnAny to RemoteCommandRequest with the index of a // particular host - REQUIRES_FOR_NON_TEMPLATE(std::is_same_v) - RemoteCommandRequestImpl(const RemoteCommandRequestImpl>& other, - size_t idx) + template + requires std::is_same_v RemoteCommandRequestImpl( + const RemoteCommandRequestImpl>& other, size_t idx) : RemoteCommandRequestBase(other), target(other.target[idx]) {} RemoteCommandRequestImpl(RequestId requestId, @@ -145,7 +157,8 @@ struct RemoteCommandRequestImpl : RemoteCommandRequestBase { const BSONObj& metadataObj, OperationContext* opCtx, Milliseconds timeoutMillis = kNoTimeout, - Options options = {}); + Options options = {}, + boost::optional operationKey = boost::none); RemoteCommandRequestImpl(const Target& theTarget, const std::string& theDbName, @@ -153,16 +166,24 @@ struct RemoteCommandRequestImpl : RemoteCommandRequestBase { const BSONObj& metadataObj, OperationContext* opCtx, Milliseconds timeoutMillis = kNoTimeout, - Options options = {}); + Options options = {}, + boost::optional operationKey = boost::none); RemoteCommandRequestImpl(const Target& theTarget, const std::string& theDbName, const BSONObj& theCmdObj, const BSONObj& metadataObj, OperationContext* opCtx, - Options options) - : RemoteCommandRequestImpl( - theTarget, theDbName, theCmdObj, metadataObj, opCtx, kNoTimeout, options) {} + Options options, + boost::optional operationKey = boost::none) + : RemoteCommandRequestImpl(theTarget, + theDbName, + theCmdObj, + metadataObj, + opCtx, + kNoTimeout, + options, + operationKey) {} RemoteCommandRequestImpl(const Target& theTarget, @@ -170,14 +191,16 @@ struct RemoteCommandRequestImpl : RemoteCommandRequestBase { const BSONObj& theCmdObj, OperationContext* opCtx, Milliseconds timeoutMillis = kNoTimeout, - Options options = {}) + Options options = {}, + boost::optional operationKey = boost::none) : RemoteCommandRequestImpl(theTarget, theDbName, theCmdObj, rpc::makeEmptyMetadata(), opCtx, timeoutMillis, - options) {} + options, + operationKey) {} std::string toString() const; diff --git a/src/mongo/executor/remote_command_response.cpp b/src/mongo/executor/remote_command_response.cpp index 8a751665a5566..7ade7ca6b2780 100644 --- a/src/mongo/executor/remote_command_response.cpp +++ b/src/mongo/executor/remote_command_response.cpp @@ -27,16 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/remote_command_response.h" - +#include +#include #include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/rpc/reply_interface.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/duration.h" -#include "mongo/util/str.h" namespace mongo { namespace executor { diff --git a/src/mongo/executor/remote_command_response.h b/src/mongo/executor/remote_command_response.h index 53c785ab874ac..540fda7f06dcd 100644 --- a/src/mongo/executor/remote_command_response.h +++ b/src/mongo/executor/remote_command_response.h @@ -29,14 +29,19 @@ #pragma once +#include #include +#include #include #include #include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" #include "mongo/rpc/message.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/executor/scoped_task_executor.cpp b/src/mongo/executor/scoped_task_executor.cpp index 392f77a116b9e..3d647464bc9c9 100644 --- a/src/mongo/executor/scoped_task_executor.cpp +++ b/src/mongo/executor/scoped_task_executor.cpp @@ -27,10 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/baton.h" +#include "mongo/db/operation_context.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/scoped_task_executor.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/functional.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { namespace executor { diff --git a/src/mongo/executor/scoped_task_executor_test.cpp b/src/mongo/executor/scoped_task_executor_test.cpp index e02c8fa37d804..6a9b43f9525ad 100644 --- a/src/mongo/executor/scoped_task_executor_test.cpp +++ b/src/mongo/executor/scoped_task_executor_test.cpp @@ -27,16 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/executor/scoped_task_executor.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/scoped_task_executor.h" #include "mongo/executor/thread_pool_mock.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace executor { diff --git a/src/mongo/executor/split_timer.h b/src/mongo/executor/split_timer.h index 32f956ed8dcfd..94246977ebe2a 100644 --- a/src/mongo/executor/split_timer.h +++ b/src/mongo/executor/split_timer.h @@ -30,12 +30,23 @@ #pragma once #include +#include #include +#include +#include +#include #include +#include +#include +#include #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/duration.h" #include "mongo/util/timer.h" diff --git a/src/mongo/executor/split_timer_test.cpp b/src/mongo/executor/split_timer_test.cpp index b0146911d15f6..39f091bb0e595 100644 --- a/src/mongo/executor/split_timer_test.cpp +++ b/src/mongo/executor/split_timer_test.cpp @@ -30,13 +30,18 @@ #include "mongo/executor/split_timer.h" #include -#include #include +#include #include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/assert_that.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/matcher.h" +#include "mongo/unittest/matcher_core.h" #include "mongo/util/duration.h" +#include "mongo/util/tick_source.h" #include "mongo/util/tick_source_mock.h" namespace mongo { diff --git a/src/mongo/executor/task_executor.cpp b/src/mongo/executor/task_executor.cpp index a5da182f044b7..7d1221ccdf242 100644 --- a/src/mongo/executor/task_executor.cpp +++ b/src/mongo/executor/task_executor.cpp @@ -26,18 +26,29 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" -#include "mongo/platform/mutex.h" +#include +#include +#include +#include + +#include #include "mongo/executor/task_executor.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" namespace mongo { namespace executor { namespace { +MONGO_FAIL_POINT_DEFINE(pauseTaskExecutorAfterReceivesNetworkRespones); MONGO_FAIL_POINT_DEFINE(pauseScheduleCallWithCancelTokenUntilCanceled); +} // namespace + /** * Provides exclusive access to an underlying Promise at set-time, guaranteeing that the Promise * will be set at most one time globally. This prevents races between completion and cancellation, @@ -73,11 +84,11 @@ class ExclusivePromiseAccess { AtomicWord _completed; }; -template +namespace { + Status wrapCallbackHandleWithCancelToken( const std::shared_ptr executor, const StatusWith swCallbackHandle, - std::shared_ptr> promise, const CancellationToken& token) { if (!swCallbackHandle.isOK()) { return swCallbackHandle.getStatus(); @@ -85,11 +96,9 @@ Status wrapCallbackHandleWithCancelToken( token.onCancel() .unsafeToInlineFuture() - .then( - [executor, promise, callbackHandle = std::move(swCallbackHandle.getValue())]() mutable { - executor->cancel(callbackHandle); - promise->setError(TaskExecutor::kCallbackCanceledErrorStatus); - }) + .then([executor, callbackHandle = std::move(swCallbackHandle.getValue())]() mutable { + executor->cancel(callbackHandle); + }) .getAsync([](auto) {}); return Status::OK(); } @@ -132,6 +141,7 @@ ExecutorFuture wrapScheduleCallWithCancelTokenAndFuture( // canceled). Errors from the remote host will be contained in the response. exclusivePromiseAccess->setError(status); } + pauseTaskExecutorAfterReceivesNetworkRespones.pauseWhileSet(); } }; @@ -148,7 +158,6 @@ ExecutorFuture wrapScheduleCallWithCancelTokenAndFuture( auto scheduleStatus = wrapCallbackHandleWithCancelToken( executor, std::forward(schedule)(request, std::move(signalPromiseOnCompletion), baton), - exclusivePromiseAccess, token); if (!scheduleStatus.isOK()) { @@ -213,8 +222,8 @@ ExecutorFuture TaskExecutor::sleepUntil(Date_t when, const CancellationTok when, [alarmState](const auto& args) mutable { alarmState->signal(args.status); }); // Handle cancellation via the input CancellationToken. - auto scheduleStatus = wrapCallbackHandleWithCancelToken( - shared_from_this(), std::move(cbHandle), exclusivePromiseAccess, token); + auto scheduleStatus = + wrapCallbackHandleWithCancelToken(shared_from_this(), std::move(cbHandle), token); if (!scheduleStatus.isOK()) { // If scheduleStatus is not okay, then the callback passed to scheduleWorkAt should never diff --git a/src/mongo/executor/task_executor.h b/src/mongo/executor/task_executor.h index 492dd4c7fedf4..a30dfdd068d8a 100644 --- a/src/mongo/executor/task_executor.h +++ b/src/mongo/executor/task_executor.h @@ -29,19 +29,30 @@ #pragma once +#include +#include #include #include #include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/baton.h" +#include "mongo/db/operation_context.h" #include "mongo/executor/remote_command_request.h" #include "mongo/executor/remote_command_response.h" #include "mongo/stdx/condition_variable.h" #include "mongo/transport/baton.h" #include "mongo/util/cancellation.h" +#include "mongo/util/duration.h" +#include "mongo/util/functional.h" #include "mongo/util/future.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/out_of_line_executor.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/executor/task_executor_cursor.cpp b/src/mongo/executor/task_executor_cursor.cpp index dcbe6e1a9451f..a7fe6b0a472c7 100644 --- a/src/mongo/executor/task_executor_cursor.cpp +++ b/src/mongo/executor/task_executor_cursor.cpp @@ -27,16 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/executor/task_executor_cursor.h" +#include +#include +#include +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/query/getmore_command_gen.h" #include "mongo/db/query/kill_cursors_gen.h" +#include "mongo/db/service_context.h" #include "mongo/executor/pinned_connection_task_executor_factory.h" +#include "mongo/executor/task_executor_cursor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/fail_point.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -49,7 +61,7 @@ MONGO_FAIL_POINT_DEFINE(blockBeforePinnedExecutorIsDestroyedOnUnderlying); TaskExecutorCursor::TaskExecutorCursor(std::shared_ptr executor, const RemoteCommandRequest& rcr, - Options&& options) + Options options) : _rcr(rcr), _options(std::move(options)), _batchIter(_batch.end()) { if (rcr.opCtx) { @@ -254,16 +266,44 @@ void TaskExecutorCursor::_processResponse(OperationContext* opCtx, CursorRespons _batch = response.releaseBatch(); _batchIter = _batch.begin(); - // If we got a cursor id back, pre-fetch the next batch - if (_cursorId) { - GetMoreCommandRequest getMoreRequest(_cursorId, _ns.coll().toString()); - getMoreRequest.setBatchSize(_options.batchSize); + // If the previous response contained a cursorId and pre-fetching is enabled, schedule the + // getMore. + if ((_cursorId != kClosedCursorId) && _options.preFetchNextBatch) { + _scheduleGetMore(opCtx); + } +} + +void TaskExecutorCursor::_scheduleGetMore(OperationContext* opCtx) { + // The previous response must have returned an open cursor ID. + invariant(_cursorId >= kMinLegalCursorId); + // There cannot be an existing in-flight request. + invariant(!_cmdState); + GetMoreCommandRequest getMoreRequest(_cursorId, _ns.coll().toString()); + getMoreRequest.setBatchSize(_options.batchSize); + + if (_options.getMoreAugmentationWriter) { + // Prefetching must be disabled to use the augmenting functionality. + invariant(!_options.preFetchNextBatch); + BSONObjBuilder getMoreBob; + getMoreRequest.serialize({}, &getMoreBob); + _options.getMoreAugmentationWriter(getMoreBob); + _runRemoteCommand(_createRequest(opCtx, getMoreBob.obj())); + } else { _runRemoteCommand(_createRequest(opCtx, getMoreRequest.toBSON({}))); } } void TaskExecutorCursor::_getNextBatch(OperationContext* opCtx) { - invariant(_cmdState, "_getNextBatch() requires an async request to have already been sent."); + // If we don't have an in-flight request, schedule one. This will occur when the + // 'preFetchNextBatch' option is false. + if (!_cmdState) { + invariant(!_options.preFetchNextBatch); + _scheduleGetMore(opCtx); + } + + // There should be an in-flight request at this point, either sent asyncronously when we + // processed the previous response or just scheduled. + invariant(_cmdState); invariant(_cursorId != kClosedCursorId); auto clock = opCtx->getServiceContext()->getPreciseClockSource(); @@ -291,12 +331,15 @@ void TaskExecutorCursor::_getNextBatch(OperationContext* opCtx) { tassert(6253100, "Expected at least one response for cursor", cursorResponses.size() > 0); CursorResponse cr = uassertStatusOK(std::move(cursorResponses[0])); _processResponse(opCtx, std::move(cr)); + // If we have more responses, build them into cursors then hold them until a caller accesses // them. Skip the first response, we used it to populate this cursor. // Ensure we update the RCR we give to each 'child cursor' with the current opCtx. auto freshRcr = _createRequest(opCtx, _rcr.cmdObj); auto copyOptions = [&] { TaskExecutorCursor::Options options; + // In the case that pinConnection is true, we need to ensure that additional cursors also + // pin their connection to the same socket as the original cursor. options.pinConnection = _options.pinConnection; return options; }; diff --git a/src/mongo/executor/task_executor_cursor.h b/src/mongo/executor/task_executor_cursor.h index 458e010f976f7..7d205109e856e 100644 --- a/src/mongo/executor/task_executor_cursor.h +++ b/src/mongo/executor/task_executor_cursor.h @@ -29,21 +29,35 @@ #pragma once +#include +#include #include +#include +#include +#include #include +#include +#include #include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/cursor_id.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/cursor_response.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor.h" #include "mongo/executor/task_executor_cursor_parameters_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/util/assert_util.h" #include "mongo/util/duration.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" namespace mongo { namespace executor { @@ -54,8 +68,8 @@ namespace executor { * * The main differentiator for this type over DBClientCursor is the use of a task executor (which * provides access to a different connection pool, as well as interruptibility) and the ability to - * overlap getMores. This starts fetching the next batch as soon as one is exhausted (rather than - * on a call to getNext()). + * overlap getMores. This starts fetching the next batch as soon as the previous one is received + * (rather than on a call to 'getNext()'). */ class TaskExecutorCursor { public: @@ -70,20 +84,32 @@ class TaskExecutorCursor { struct Options { boost::optional batchSize; bool pinConnection{gPinTaskExecCursorConns.load()}; + // If true, we will fetch the next batch as soon as the current one is recieved. + // If false, we will fetch the next batch when the current batch is exhausted and + // 'getNext()' is invoked. + bool preFetchNextBatch{true}; + + // This function, if specified, may modify a getMore request to include additional + // information. + std::function getMoreAugmentationWriter; + Options() {} }; /** - * Construct the cursor with a RemoteCommandRequest wrapping the initial command + * Construct the cursor with a RemoteCommandRequest wrapping the initial command. + * + * Doesn't retry the command if we fail to establish the cursor. To create a TaskExecutorCursor + * with the option to retry the initial command, see `makeTaskExecutorCursor`below. * * One value is carried over in successive calls to getMore/killCursor: * * opCtx - The Logical Session Id from the initial command is carried over in all later stages. * NOTE - the actual command must not include the lsid */ - explicit TaskExecutorCursor(std::shared_ptr executor, - const RemoteCommandRequest& rcr, - Options&& options = {}); + TaskExecutorCursor(std::shared_ptr executor, + const RemoteCommandRequest& rcr, + Options options = {}); /** * Construct the cursor from a cursor response from a previously executed RemoteCommandRequest. @@ -171,14 +197,13 @@ class TaskExecutorCursor { void _runRemoteCommand(const RemoteCommandRequest& rcr); /** - * Gets the next batch with interruptibility via the opCtx + * Gets the next batch with interruptibility via the opCtx. */ void _getNextBatch(OperationContext* opCtx); /** * Helper for '_getNextBatch' that handles the reading of the 'CursorResponse' object and - * storing of relevant values. This is also responsible for issuing a getMore request if it - * is required to populate the next batch. + * storing of relevant values. */ void _processResponse(OperationContext* opCtx, CursorResponse&& response); @@ -187,6 +212,14 @@ class TaskExecutorCursor { */ const RemoteCommandRequest& _createRequest(OperationContext* opCtx, const BSONObj& cmd); + /** + * Schedules a 'GetMore' request to run asyncronously. + * This function can only be invoked when: + * - There is no in-flight request ('_cmdState' is null). + * - We have an open '_cursorId'. + */ + void _scheduleGetMore(OperationContext* opCtx); + std::shared_ptr _executor; // If we are pinning connections, we need to keep a separate reference to the // non-pinning, normal executor, so that we can shut down the pinned executor @@ -239,5 +272,29 @@ class TaskExecutorCursor { std::vector _additionalCursors; }; +// Make a new TaskExecutorCursor using the provided executor, RCR, and options. If we fail to create +// the cursor, the retryPolicy can inspect the error and make a decision as to whether we should +// retry. If we do retry, the error is swallowed and another attempt is made. If we don't retry, +// this function throws the error we failed with. +inline TaskExecutorCursor makeTaskExecutorCursor( + OperationContext* opCtx, + std::shared_ptr executor, + const RemoteCommandRequest& rcr, + TaskExecutorCursor::Options options = {}, + std::function retryPolicy = nullptr) { + for (;;) { + try { + TaskExecutorCursor tec(executor, rcr, options); + tec.populateCursor(opCtx); + return tec; + } catch (const DBException& ex) { + bool shouldRetry = retryPolicy && retryPolicy(ex.toStatus()); + if (!shouldRetry) { + throw; + } + } + } +} + } // namespace executor } // namespace mongo diff --git a/src/mongo/executor/task_executor_cursor_integration_test.cpp b/src/mongo/executor/task_executor_cursor_integration_test.cpp index c3dcfce091a48..7548f8362ef8f 100644 --- a/src/mongo/executor/task_executor_cursor_integration_test.cpp +++ b/src/mongo/executor/task_executor_cursor_integration_test.cpp @@ -27,21 +27,46 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/task_executor_cursor.h" - +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" #include "mongo/client/dbclient_base.h" -#include "mongo/db/concurrency/locker_noop_client_observer.h" +#include "mongo/db/client.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" #include "mongo/executor/connection_pool_stats.h" +#include "mongo/executor/network_interface.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/network_interface_thread_pool.h" -#include "mongo/executor/pinned_connection_task_executor.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor.h" +#include "mongo/executor/task_executor_cursor.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/integration_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -49,11 +74,9 @@ namespace mongo { namespace executor { namespace { -class TaskExecutorCursorFixture : public mongo::unittest::Test { +class TaskExecutorCursorFixture : public ServiceContextTest { public: - TaskExecutorCursorFixture() { - _serviceCtx->registerClientObserver(std::make_unique()); - } + TaskExecutorCursorFixture() = default; void setUp() override { _ni = makeNetworkInterface("TaskExecutorCursorTest"); diff --git a/src/mongo/executor/task_executor_cursor_test.cpp b/src/mongo/executor/task_executor_cursor_test.cpp index eaece63fd6c4a..af83f15fdd69b 100644 --- a/src/mongo/executor/task_executor_cursor_test.cpp +++ b/src/mongo/executor/task_executor_cursor_test.cpp @@ -28,16 +28,35 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/concurrency/locker_noop_client_observer.h" -#include "mongo/executor/pinned_connection_task_executor.h" +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/db/client.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/executor/network_interface_mock.h" #include "mongo/executor/pinned_connection_task_executor_test_fixture.h" #include "mongo/executor/task_executor_cursor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" #include "mongo/rpc/op_msg_rpc_impls.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/thread_assertion_monitor.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -492,6 +511,80 @@ class TaskExecutorCursorTestFixture : public Base { th.join(); } + /** + * Test that if 'preFetchNextBatch' is false, the TaskExecutorCursor does not request GetMores + * until the current batch is exhausted and 'getNext()' is invoked. + */ + void NoPrefetchGetMore() { + unittest::threadAssertionMonitoredTest([&](auto& monitor) { + CursorId cursorId = 1; + RemoteCommandRequest rcr(HostAndPort("localhost"), + "test", + BSON("search" + << "foo"), + opCtx.get()); + + // The lambda that will be used to augment the getMore request sent below is passed into + // the TEC constructor. + auto augmentGetMore = [](BSONObjBuilder& bob) { + bob.append("test", 1); + }; + + // Construction of the TaskExecutorCursor enqueues a request in the + // NetworkInterfaceMock. + TaskExecutorCursor tec = makeTec(rcr, [&augmentGetMore] { + TaskExecutorCursor::Options opts; + opts.batchSize = 2; + opts.preFetchNextBatch = false; + opts.getMoreAugmentationWriter = augmentGetMore; + return opts; + }()); + + // Mock the response for the first batch. + scheduleSuccessfulCursorResponse("firstBatch", 1, 2, cursorId); + + // Exhaust the first batch. + ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 1); + ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 2); + + // Assert that the TaskExecutorCursor has not requested a GetMore. This enforces that + // 'preFetchNextBatch' works as expected. + ASSERT_FALSE(hasReadyRequests()); + + // As soon as 'getNext()' is invoked, the TaskExecutorCursor will try to send a GetMore + // and that will block this thread in the NetworkInterfaceMock until there is a + // scheduled response. However, we cannot schedule the cursor response on the main + // thread before we call 'getNext()' as that will cause the NetworkInterfaceMock to + // block until there is request enqueued ('getNext()' is the function which will enqueue + // such as request). To avoid this deadlock, we start a new thread which will schedule a + // response on the NetworkInterfaceMock. + auto responseSchedulerThread = monitor.spawn([&] { + auto recievedGetMoreCmd = scheduleSuccessfulCursorResponse("nextBatch", 3, 4, 0); + + // Assert that the command processed for the above response matches with the + // lambda to augment the getMore command used during construction of the TEC + // above. + const auto expectedGetMoreCmd = BSON("getMore" << 1LL << "collection" + << "test" + << "batchSize" << 2 << "test" << 1); + ASSERT_BSONOBJ_EQ(expectedGetMoreCmd, recievedGetMoreCmd); + }); + + // Schedules the GetMore request and exhausts the cursor. + ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 3); + ASSERT_EQUALS(tec.getNext(opCtx.get()).value()["x"].Int(), 4); + + // Joining the thread which schedules the cursor response for the GetMore here forces + // the destructor of NetworkInterfaceMock::InNetworkGuard to run, which ensures that the + // 'NetworkInterfaceMock' stops executing as the network thread. This is required before + // we invoke 'hasReadyRequests()' which enters the network again. + responseSchedulerThread.join(); + + // Assert no GetMore is requested. + ASSERT_FALSE(hasReadyRequests()); + }); + } + ServiceContext::UniqueServiceContext serviceCtx = ServiceContext::make(); ServiceContext::UniqueClient client; ServiceContext::UniqueOperationContext opCtx; @@ -505,10 +598,10 @@ class NonPinningTaskExecutorCursorTestFixture launchExecutorThread(); } - BSONObj scheduleSuccessfulCursorResponse(StringData fieldName, - size_t start, - size_t end, - size_t cursorId) { + virtual BSONObj scheduleSuccessfulCursorResponse(StringData fieldName, + size_t start, + size_t end, + size_t cursorId) { NetworkInterfaceMock::InNetworkGuard ing(getNet()); @@ -653,10 +746,51 @@ class PinnedConnTaskExecutorCursorTestFixture } }; +class NoPrefetchTaskExecutorCursorTestFixture : public NonPinningTaskExecutorCursorTestFixture { +public: + TaskExecutorCursor makeTec(RemoteCommandRequest rcr, + TaskExecutorCursor::Options&& options = {}) { + options.preFetchNextBatch = false; + return TaskExecutorCursor(getExecutorPtr(), rcr, std::move(options)); + } + + BSONObj scheduleSuccessfulCursorResponse(StringData fieldName, + size_t start, + size_t end, + size_t cursorId) { + NetworkInterfaceMock::InNetworkGuard ing(getNet()); + // Don't assert that the network has requests like we do in other classes. This is to enable + // the test in 'NoPrefetchGetMore'. + auto rcr = + ing->scheduleSuccessfulResponse(buildCursorResponse(fieldName, start, end, cursorId)); + ing->runReadyNetworkOperations(); + return rcr.cmdObj.getOwned(); + } +}; + +class NoPrefetchPinnedTaskExecutorCursorTestFixture + : public PinnedConnTaskExecutorCursorTestFixture { +public: + TaskExecutorCursor makeTec(RemoteCommandRequest rcr, + TaskExecutorCursor::Options&& options = {}) { + options.preFetchNextBatch = false; + options.pinConnection = true; + return TaskExecutorCursor(getExecutorPtr(), rcr, std::move(options)); + } +}; + TEST_F(NonPinningTaskExecutorCursorTestFixture, SingleBatchWorks) { SingleBatchWorksTest(); } +TEST_F(NoPrefetchTaskExecutorCursorTestFixture, SingleBatchWorks) { + SingleBatchWorksTest(); +} + +TEST_F(NoPrefetchPinnedTaskExecutorCursorTestFixture, SingleBatchWorks) { + SingleBatchWorksTest(); +} + TEST_F(PinnedConnTaskExecutorCursorTestFixture, SingleBatchWorks) { SingleBatchWorksTest(); } @@ -665,6 +799,14 @@ TEST_F(NonPinningTaskExecutorCursorTestFixture, MultipleCursorsSingleBatchSuccee MultipleCursorsSingleBatchSucceedsTest(); } +TEST_F(NoPrefetchTaskExecutorCursorTestFixture, MultipleCursorsSingleBatchSucceeds) { + MultipleCursorsSingleBatchSucceedsTest(); +} + +TEST_F(NoPrefetchPinnedTaskExecutorCursorTestFixture, MultipleCursorsSingleBatchSucceeds) { + MultipleCursorsSingleBatchSucceedsTest(); +} + TEST_F(PinnedConnTaskExecutorCursorTestFixture, MultipleCursorsSingleBatchSucceeds) { MultipleCursorsSingleBatchSucceedsTest(); } @@ -674,14 +816,33 @@ TEST_F(NonPinningTaskExecutorCursorTestFixture, ChildTaskExecutorCursorsAreSafeIfOriginalOpCtxDestructedTest(); } +TEST_F(NoPrefetchTaskExecutorCursorTestFixture, + ChildTaskExecutorCursorsAreSafeIfOriginalOpCtxDestructed) { + ChildTaskExecutorCursorsAreSafeIfOriginalOpCtxDestructedTest(); +} + +TEST_F(NoPrefetchPinnedTaskExecutorCursorTestFixture, + ChildTaskExecutorCursorsAreSafeIfOriginalOpCtxDestructed) { + ChildTaskExecutorCursorsAreSafeIfOriginalOpCtxDestructedTest(); +} + TEST_F(PinnedConnTaskExecutorCursorTestFixture, ChildTaskExecutorCursorsAreSafeIfOriginalOpCtxDestructed) { ChildTaskExecutorCursorsAreSafeIfOriginalOpCtxDestructedTest(); } + TEST_F(NonPinningTaskExecutorCursorTestFixture, MultipleCursorsGetMoreWorks) { MultipleCursorsGetMoreWorksTest(); } +TEST_F(NoPrefetchTaskExecutorCursorTestFixture, MultipleCursorsGetMoreWorks) { + MultipleCursorsGetMoreWorksTest(); +} + +TEST_F(NoPrefetchPinnedTaskExecutorCursorTestFixture, MultipleCursorsGetMoreWorks) { + MultipleCursorsGetMoreWorksTest(); +} + TEST_F(PinnedConnTaskExecutorCursorTestFixture, MultipleCursorsGetMoreWorks) { MultipleCursorsGetMoreWorksTest(); } @@ -690,13 +851,21 @@ TEST_F(NonPinningTaskExecutorCursorTestFixture, FailureInFind) { FailureInFindTest(); } +TEST_F(NoPrefetchTaskExecutorCursorTestFixture, FailureInFind) { + FailureInFindTest(); +} + +TEST_F(NoPrefetchPinnedTaskExecutorCursorTestFixture, FailureInFind) { + FailureInFindTest(); +} + TEST_F(PinnedConnTaskExecutorCursorTestFixture, FailureInFind) { FailureInFindTest(); } /** * Ensure early termination of the cursor calls killCursor (if we know about the cursor id) - * Only applicapble to the unpinned case - if the connection is pinned, and a getMore is + * Only applicable to the unpinned case - if the connection is pinned, and a getMore is * in progress and/or fails, the most we can do is kill the connection. We can't re-use * the connection to send killCursors. */ @@ -730,6 +899,14 @@ TEST_F(NonPinningTaskExecutorCursorTestFixture, MultipleBatchesWorks) { MultipleBatchesWorksTest(); } +TEST_F(NoPrefetchTaskExecutorCursorTestFixture, MultipleBatchesWorks) { + MultipleBatchesWorksTest(); +} + +TEST_F(NoPrefetchPinnedTaskExecutorCursorTestFixture, MultipleBatchesWorks) { + MultipleBatchesWorksTest(); +} + TEST_F(PinnedConnTaskExecutorCursorTestFixture, MultipleBatchesWorks) { MultipleBatchesWorksTest(); } @@ -738,6 +915,14 @@ TEST_F(NonPinningTaskExecutorCursorTestFixture, EmptyFirstBatch) { EmptyFirstBatchTest(); } +TEST_F(NoPrefetchTaskExecutorCursorTestFixture, EmptyFirstBatch) { + EmptyFirstBatchTest(); +} + +TEST_F(NoPrefetchPinnedTaskExecutorCursorTestFixture, EmptyFirstBatch) { + EmptyFirstBatchTest(); +} + TEST_F(PinnedConnTaskExecutorCursorTestFixture, EmptyFirstBatch) { EmptyFirstBatchTest(); } @@ -746,6 +931,14 @@ TEST_F(NonPinningTaskExecutorCursorTestFixture, EmptyNonInitialBatch) { EmptyNonInitialBatchTest(); } +TEST_F(NoPrefetchTaskExecutorCursorTestFixture, EmptyNonInitialBatch) { + EmptyNonInitialBatchTest(); +} + +TEST_F(NoPrefetchPinnedTaskExecutorCursorTestFixture, EmptyNonInitialBatch) { + EmptyNonInitialBatchTest(); +} + TEST_F(PinnedConnTaskExecutorCursorTestFixture, EmptyNonInitialBatch) { EmptyNonInitialBatchTest(); } @@ -797,6 +990,14 @@ TEST_F(NonPinningTaskExecutorCursorTestFixture, LsidIsPassed) { ASSERT_FALSE(hasReadyRequests()); } +TEST_F(NoPrefetchTaskExecutorCursorTestFixture, NoPrefetchGetMore) { + NoPrefetchGetMore(); +} + +TEST_F(NoPrefetchPinnedTaskExecutorCursorTestFixture, NoPrefetchWithPinning) { + NoPrefetchGetMore(); +} + } // namespace } // namespace executor } // namespace mongo diff --git a/src/mongo/executor/task_executor_pool.cpp b/src/mongo/executor/task_executor_pool.cpp index b6869489793f6..89a47a29eaebe 100644 --- a/src/mongo/executor/task_executor_pool.cpp +++ b/src/mongo/executor/task_executor_pool.cpp @@ -27,14 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include #include "mongo/executor/task_executor_pool.h" - -#include - -#include "mongo/executor/task_executor_pool_parameters_gen.h" -#include "mongo/util/processinfo.h" +#include "mongo/executor/task_executor_pool_parameters_gen.h" // IWYU pragma: keep +#include "mongo/util/assert_util_core.h" +#include "mongo/util/processinfo.h" // IWYU pragma: keep namespace mongo { namespace executor { diff --git a/src/mongo/executor/task_executor_pool.h b/src/mongo/executor/task_executor_pool.h index b5bbbd72352c6..dc52fcfe140bc 100644 --- a/src/mongo/executor/task_executor_pool.h +++ b/src/mongo/executor/task_executor_pool.h @@ -29,8 +29,11 @@ #pragma once +#include +#include #include +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/executor/connection_pool_stats.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/atomic_word.h" diff --git a/src/mongo/executor/task_executor_test_common.cpp b/src/mongo/executor/task_executor_test_common.cpp index b54c5a6adfb55..fb199ec05eeea 100644 --- a/src/mongo/executor/task_executor_test_common.cpp +++ b/src/mongo/executor/task_executor_test_common.cpp @@ -28,25 +28,49 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/task_executor_test_common.h" - +#include +#include +#include +#include +#include #include - -#include "mongo/db/concurrency/locker_noop_client_observer.h" -#include "mongo/db/operation_context.h" -#include "mongo/executor/network_interface.h" +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/service_context.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" +#include "mongo/executor/task_executor_test_common.h" #include "mongo/executor/task_executor_test_fixture.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" #include "mongo/stdx/unordered_map.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/cancellation.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -287,7 +311,8 @@ EventChainAndWaitingTest::~EventChainAndWaitingTest() { } void EventChainAndWaitingTest::run() { - executor->onEvent(goEvent, [=](const TaskExecutor::CallbackArgs& cbData) { onGo(cbData); }) + executor + ->onEvent(goEvent, [=, this](const TaskExecutor::CallbackArgs& cbData) { onGo(cbData); }) .status_with_transitional_ignore(); executor->signalEvent(goEvent); executor->waitForEvent(goEvent); @@ -340,8 +365,9 @@ void EventChainAndWaitingTest::onGo(const TaskExecutor::CallbackArgs& cbData) { return; } - cbHandle = executor->onEvent( - goEvent, [=](const TaskExecutor::CallbackArgs& cbData) { onGoAfterTriggered(cbData); }); + cbHandle = executor->onEvent(goEvent, [=, this](const TaskExecutor::CallbackArgs& cbData) { + onGoAfterTriggered(cbData); + }); if (!cbHandle.isOK()) { status1 = cbHandle.getStatus(); executor->shutdown(); @@ -1104,7 +1130,7 @@ COMMON_EXECUTOR_TEST(ScheduleExhaustRemoteCommandFutureIsResolvedWithErrorOnCanc net->exitNetwork(); // Response should be cancelled. ASSERT_EQUALS(responseFuture.getNoThrow().getStatus().code(), ErrorCodes::CallbackCanceled); - ASSERT_EQUALS(numTimesCallbackCalled, 1); + ASSERT_EQUALS(numTimesCallbackCalled, 2); shutdownExecutorThread(); joinExecutorThread(); diff --git a/src/mongo/executor/task_executor_test_fixture.cpp b/src/mongo/executor/task_executor_test_fixture.cpp index 7f52fc06c1958..7ff8c568af0ba 100644 --- a/src/mongo/executor/task_executor_test_fixture.cpp +++ b/src/mongo/executor/task_executor_test_fixture.cpp @@ -27,15 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/task_executor_test_fixture.h" - #include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/executor/network_interface_mock.h" #include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor.h" +#include "mongo/executor/task_executor_test_fixture.h" +#include "mongo/unittest/assert.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" diff --git a/src/mongo/executor/task_executor_test_fixture.h b/src/mongo/executor/task_executor_test_fixture.h index ffa1c5dc7f1e0..fc9c9c514bf68 100644 --- a/src/mongo/executor/task_executor_test_fixture.h +++ b/src/mongo/executor/task_executor_test_fixture.h @@ -31,8 +31,10 @@ #include +#include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/executor/remote_command_request.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/unittest.h" namespace mongo { diff --git a/src/mongo/executor/test_network_connection_hook.h b/src/mongo/executor/test_network_connection_hook.h index 5a22ac24f5490..c825903c7dcec 100644 --- a/src/mongo/executor/test_network_connection_hook.h +++ b/src/mongo/executor/test_network_connection_hook.h @@ -53,8 +53,8 @@ class TestConnectionHook final : public NetworkConnectionHook { Status validateHost(const HostAndPort& remoteHost, const BSONObj& request, - const RemoteCommandResponse& isMasterReply) override { - return _validateFunc(remoteHost, request, isMasterReply); + const RemoteCommandResponse& helloReply) override { + return _validateFunc(remoteHost, request, helloReply); } StatusWith> makeRequest(const HostAndPort& remoteHost) { diff --git a/src/mongo/executor/thread_pool_mock.cpp b/src/mongo/executor/thread_pool_mock.cpp index e39520487adde..eddaf9578e27b 100644 --- a/src/mongo/executor/thread_pool_mock.cpp +++ b/src/mongo/executor/thread_pool_mock.cpp @@ -28,12 +28,19 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/thread_pool_mock.h" +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/thread_pool_mock.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/functional.h" #define MONGO_LOGV2_DEFAULT_COMPONENT mongo::logv2::LogComponent::kExecutor diff --git a/src/mongo/executor/thread_pool_mock.h b/src/mongo/executor/thread_pool_mock.h index c73bcfdeea303..d5e171024cc68 100644 --- a/src/mongo/executor/thread_pool_mock.h +++ b/src/mongo/executor/thread_pool_mock.h @@ -31,12 +31,14 @@ #include #include +#include #include #include "mongo/platform/mutex.h" #include "mongo/platform/random.h" #include "mongo/stdx/thread.h" #include "mongo/util/concurrency/thread_pool_interface.h" +#include "mongo/util/out_of_line_executor.h" namespace mongo { namespace executor { diff --git a/src/mongo/executor/thread_pool_task_executor.cpp b/src/mongo/executor/thread_pool_task_executor.cpp index 1a1a56ba6ec62..a6ed53fa9235a 100644 --- a/src/mongo/executor/thread_pool_task_executor.cpp +++ b/src/mongo/executor/thread_pool_task_executor.cpp @@ -28,24 +28,38 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/thread_pool_task_executor.h" - -#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include #include +#include +#include #include +#include #include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/db/operation_context.h" #include "mongo/executor/connection_pool_stats.h" #include "mongo/executor/network_interface.h" +#include "mongo/executor/thread_pool_task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/platform/atomic_word.h" -#include "mongo/transport/baton.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool_interface.h" #include "mongo/util/fail_point.h" +#include "mongo/util/functional.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT mongo::logv2::LogComponent::kExecutor @@ -90,8 +104,9 @@ class ThreadPoolTaskExecutor::CallbackState : public TaskExecutor::CallbackState } // All fields except for "canceled" are guarded by the owning task executor's _mutex. The - // "canceled" field may be observed without holding _mutex, but may only be set while holding - // _mutex. + // "canceled" field may be observed without holding _mutex only if we are checking if the value + // is true. This is because once "canceled" stores true, we never set it back to false. The + // "canceled" field may only be set while holding _mutex. CallbackFn callback; AtomicWord canceled{0U}; @@ -731,35 +746,23 @@ StatusWith ThreadPoolTaskExecutor::scheduleExhaust return; } - if (cbState->canceled.load()) { - // Release any resources the callback function is holding - TaskExecutor::CallbackFn callback = [](const CallbackArgs&) { - }; - std::swap(cbState->callback, callback); - - _networkInProgressQueue.erase(cbState->iter); - cbState->exhaustErased.store(1); - - if (cbState->exhaustIter) { - _poolInProgressQueue.erase(cbState->exhaustIter.value()); - cbState->exhaustIter = boost::none; - } - - return; - } - // Swap the callback function with the new one CallbackFn newCb = [cb, scheduledRequest, response](const CallbackArgs& cbData) { remoteCommandFinished(cbData, cb, scheduledRequest, response); }; swap(cbState->callback, newCb); - // If this is the last response, invoke the non-exhaust path. This will mark cbState as - // finished and remove the task from _networkInProgressQueue - if (!response.moreToCome) { + // If this is the last response, or command was cancelled, invoke the non-exhaust path. + // This will mark cbState as finished and remove the task from _networkInProgressQueue. + if (!response.moreToCome || cbState->canceled.load()) { _networkInProgressQueue.erase(cbState->iter); cbState->exhaustErased.store(1); + if (cbState->canceled.load() && cbState->exhaustIter) { + _poolInProgressQueue.erase(cbState->exhaustIter.value()); + cbState->exhaustIter = boost::none; + } + WorkQueue result; result.emplace_front(cbState); result.front()->iter = result.begin(); @@ -828,19 +831,19 @@ void ThreadPoolTaskExecutor::runCallbackExhaust(std::shared_ptr c std::move(cbHandle), cbState->canceled.load() ? kCallbackCanceledErrorStatus : Status::OK()); - if (!cbState->isFinished.load()) { + if (auto lk = stdx::unique_lock(_mutex); !cbState->isFinished.load()) { TaskExecutor::CallbackFn callback = [](const CallbackArgs&) { }; { - auto lk = stdx::lock_guard(_mutex); std::swap(cbState->callback, callback); + lk.unlock(); } callback(std::move(args)); + lk.lock(); // Leave the empty callback function if the request has been marked canceled or finished // while running the callback to avoid leaking resources. if (!cbState->canceled.load() && !cbState->isFinished.load()) { - auto lk = stdx::lock_guard(_mutex); std::swap(callback, cbState->callback); } } diff --git a/src/mongo/executor/thread_pool_task_executor.h b/src/mongo/executor/thread_pool_task_executor.h index 900ed7508c8f8..c3a60364d307d 100644 --- a/src/mongo/executor/thread_pool_task_executor.h +++ b/src/mongo/executor/thread_pool_task_executor.h @@ -31,14 +31,24 @@ #include #include +#include +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/baton.h" +#include "mongo/db/operation_context.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" #include "mongo/transport/baton.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" #include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/executor/thread_pool_task_executor_integration_test.cpp b/src/mongo/executor/thread_pool_task_executor_integration_test.cpp index 4be1d0739942d..e001e08c442d9 100644 --- a/src/mongo/executor/thread_pool_task_executor_integration_test.cpp +++ b/src/mongo/executor/thread_pool_task_executor_integration_test.cpp @@ -27,18 +27,41 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/task_executor.h" - -#include "mongo/db/concurrency/locker_noop_client_observer.h" -#include "mongo/db/namespace_string.h" +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/client/connection_string.h" +#include "mongo/db/client.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/executor/network_interface.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/network_interface_thread_pool.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/topology_version_gen.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/integration_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -47,12 +70,9 @@ namespace mongo { namespace executor { namespace { -class TaskExecutorFixture : public mongo::unittest::Test { +class TaskExecutorFixture : public ServiceContextTest { public: - TaskExecutorFixture() { - _serviceCtx = ServiceContext::make(); - _serviceCtx->registerClientObserver(std::make_unique()); - } + TaskExecutorFixture() = default; void setUp() override { std::shared_ptr net = makeNetworkInterface("TaskExecutorTest"); @@ -81,7 +101,7 @@ class TaskExecutorFixture : public mongo::unittest::Test { return false; } - ServiceContext::UniqueServiceContext _serviceCtx; + ServiceContext::UniqueServiceContext _serviceCtx{ServiceContext::make()}; std::shared_ptr _executor; }; diff --git a/src/mongo/executor/thread_pool_task_executor_test.cpp b/src/mongo/executor/thread_pool_task_executor_test.cpp index c8a68e9cc6136..57bcf5e02d8d1 100644 --- a/src/mongo/executor/thread_pool_task_executor_test.cpp +++ b/src/mongo/executor/thread_pool_task_executor_test.cpp @@ -27,23 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include + +#include -#include "mongo/base/checked_cast.h" -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/status.h" -#include "mongo/executor/network_interface.h" +#include "mongo/base/string_data.h" #include "mongo/executor/network_interface_mock.h" #include "mongo/executor/task_executor_test_common.h" -#include "mongo/executor/task_executor_test_fixture.h" -#include "mongo/executor/thread_pool_mock.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/functional.h" namespace mongo { namespace executor { diff --git a/src/mongo/executor/thread_pool_task_executor_test_fixture.cpp b/src/mongo/executor/thread_pool_task_executor_test_fixture.cpp index 1f19aeeefe65e..673ae8800e3e6 100644 --- a/src/mongo/executor/thread_pool_task_executor_test_fixture.cpp +++ b/src/mongo/executor/thread_pool_task_executor_test_fixture.cpp @@ -27,13 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/executor/thread_pool_task_executor_test_fixture.h" - #include +#include #include "mongo/executor/thread_pool_mock.h" +#include "mongo/executor/thread_pool_task_executor_test_fixture.h" namespace mongo { namespace executor { diff --git a/src/mongo/executor/thread_pool_task_executor_test_fixture.h b/src/mongo/executor/thread_pool_task_executor_test_fixture.h index 47862bde8aefd..8bb8d813c6c29 100644 --- a/src/mongo/executor/thread_pool_task_executor_test_fixture.h +++ b/src/mongo/executor/thread_pool_task_executor_test_fixture.h @@ -32,6 +32,7 @@ #include #include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/task_executor_test_fixture.h" #include "mongo/executor/thread_pool_mock.h" #include "mongo/executor/thread_pool_task_executor.h" diff --git a/src/mongo/idl/SConscript b/src/mongo/idl/SConscript index 7fd0e80b436a1..cee26a04e5cba 100644 --- a/src/mongo/idl/SConscript +++ b/src/mongo/idl/SConscript @@ -102,6 +102,7 @@ env.CppUnitTest( 'cluster_server_parameter_common_test.cpp', 'cluster_server_parameter_initializer_test.cpp', 'cluster_server_parameter_op_observer_test.cpp', + 'cluster_server_parameter_refresher_test.cpp', 'cluster_server_parameter_test_util.cpp', 'cluster_server_parameter_test.idl', ], @@ -123,6 +124,7 @@ env.CppUnitTest( 'cluster_server_parameter_common', 'cluster_server_parameter_initializer', 'cluster_server_parameter_op_observer', + 'cluster_server_parameter_refresher', ], ) @@ -138,6 +140,7 @@ env.Library( '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/session/logical_session_id', '$BUILD_DIR/mongo/rpc/client_metadata', + '$BUILD_DIR/mongo/rpc/metadata_impersonated_user', '$BUILD_DIR/mongo/s/common_s', ], ) diff --git a/src/mongo/idl/cluster_parameter_synchronization_helpers.cpp b/src/mongo/idl/cluster_parameter_synchronization_helpers.cpp index 9e9346ce738b9..711015d505143 100644 --- a/src/mongo/idl/cluster_parameter_synchronization_helpers.cpp +++ b/src/mongo/idl/cluster_parameter_synchronization_helpers.cpp @@ -29,11 +29,23 @@ #include "mongo/idl/cluster_parameter_synchronization_helpers.h" +#include +#include +#include +#include + +#include + #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/audit.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl @@ -43,6 +55,19 @@ constexpr auto kIdField = "_id"_sd; constexpr auto kCPTField = "clusterParameterTime"_sd; constexpr auto kOplog = "oplog"_sd; +void validateParameter(OperationContext* opCtx, + BSONObj doc, + const boost::optional& tenantId) { + auto nameElem = doc[kIdField]; + uassert(ErrorCodes::OperationFailed, + "Validate with invalid parameter name", + nameElem.type() == String); + auto name = nameElem.valueStringData(); + auto* sp = ServerParameterSet::getClusterParameterSet()->getIfExists(name); + uassert(ErrorCodes::OperationFailed, "Validate on unknown cluster parameter", sp); + uassertStatusOK(sp->validate(doc, tenantId)); +} + void updateParameter(OperationContext* opCtx, BSONObj doc, StringData mode, @@ -82,6 +107,8 @@ void updateParameter(OperationContext* opCtx, return; } + uassertStatusOK(sp->validate(doc, tenantId)); + BSONObjBuilder oldValueBob; sp->append(opCtx, &oldValueBob, name.toString(), tenantId); audit::logUpdateCachedClusterParameter(opCtx->getClient(), oldValueBob.obj(), doc, tenantId); diff --git a/src/mongo/idl/cluster_parameter_synchronization_helpers.h b/src/mongo/idl/cluster_parameter_synchronization_helpers.h index 9bba0c02faf03..6f3d3841f7add 100644 --- a/src/mongo/idl/cluster_parameter_synchronization_helpers.h +++ b/src/mongo/idl/cluster_parameter_synchronization_helpers.h @@ -29,12 +29,37 @@ #pragma once +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/catalog/collection.h" #include "mongo/db/db_raii.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/storage/record_data.h" +#include "mongo/db/storage/record_store.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/tenant_id.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace cluster_parameters { +void validateParameter(OperationContext* opCtx, + BSONObj doc, + const boost::optional& tenantId); + void updateParameter(OperationContext* opCtx, BSONObj doc, StringData mode, @@ -86,7 +111,9 @@ void doLoadAllTenantParametersFromDisk(OperationContext* opCtx, auto cursor = coll->getCursor(opCtx); for (auto doc = cursor->next(); doc; doc = cursor->next()) { try { - onEntry(opCtx, doc.get().data.toBson(), mode, tenantId); + auto data = doc.get().data.toBson(); + validateParameter(opCtx, data, tenantId); + onEntry(opCtx, data, mode, tenantId); } catch (const DBException& ex) { failures.push_back(ex.toStatus()); } diff --git a/src/mongo/idl/cluster_server_parameter.idl b/src/mongo/idl/cluster_server_parameter.idl index 8991e4f47f904..25224c7bab7d8 100644 --- a/src/mongo/idl/cluster_server_parameter.idl +++ b/src/mongo/idl/cluster_server_parameter.idl @@ -87,15 +87,11 @@ structs: default: 0 feature_flags: - featureFlagClusterWideConfigM2: - description: Mechanism for cluster-wide configuration options, milestone 2 - cpp_varname: gFeatureFlagClusterWideConfigM2 - default: true - version: 6.1 featureFlagClusterWideToaster: description: Feature flag for testing use of feature flags with CW server params cpp_varname: gFeatureFlagClusterWideToaster default: false + shouldBeFCVGated: true server_parameters: testIntClusterParameter: diff --git a/src/mongo/idl/cluster_server_parameter_common.cpp b/src/mongo/idl/cluster_server_parameter_common.cpp index 7a6660122be4e..0e366452dd4bc 100644 --- a/src/mongo/idl/cluster_server_parameter_common.cpp +++ b/src/mongo/idl/cluster_server_parameter_common.cpp @@ -27,15 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include -#include "mongo/idl/cluster_server_parameter_common.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/client/remote_command_targeter.h" #include "mongo/db/commands/list_databases_for_all_tenants_gen.h" -#include "mongo/db/dbdirectclient.h" +#include "mongo/db/database_name.h" #include "mongo/db/multitenancy_gen.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/idl/cluster_server_parameter_common.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/idl/cluster_server_parameter_common.h b/src/mongo/idl/cluster_server_parameter_common.h index 5437513978c98..0e1dc9ba51a90 100644 --- a/src/mongo/idl/cluster_server_parameter_common.h +++ b/src/mongo/idl/cluster_server_parameter_common.h @@ -28,8 +28,13 @@ */ #pragma once +#include +#include + +#include "mongo/base/status_with.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/executor/task_executor.h" #include "mongo/s/client/shard.h" diff --git a/src/mongo/idl/cluster_server_parameter_common_test.cpp b/src/mongo/idl/cluster_server_parameter_common_test.cpp index fb22d6ee5d13b..d5b62af7a196d 100644 --- a/src/mongo/idl/cluster_server_parameter_common_test.cpp +++ b/src/mongo/idl/cluster_server_parameter_common_test.cpp @@ -27,10 +27,25 @@ * it in the license file. */ +#include + +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/db/catalog/create_collection.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/commands/create_gen.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/s/shard_local.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" #include "mongo/idl/cluster_server_parameter_common.h" #include "mongo/idl/cluster_server_parameter_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -40,7 +55,7 @@ class ClusterServerParameterCommonTest : public ClusterServerParameterTestBase { public: void setUp() override { ClusterServerParameterTestBase::setUp(); - serverGlobalParams.clusterRole = ClusterRole::ConfigServer; + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; _shardLocal = std::make_unique(ShardId::kConfigServerId); } diff --git a/src/mongo/idl/cluster_server_parameter_initializer.cpp b/src/mongo/idl/cluster_server_parameter_initializer.cpp index ec769da2ebba5..7da3cd05b0865 100644 --- a/src/mongo/idl/cluster_server_parameter_initializer.cpp +++ b/src/mongo/idl/cluster_server_parameter_initializer.cpp @@ -29,12 +29,25 @@ #include "mongo/idl/cluster_server_parameter_initializer.h" +#include +#include +#include + +#include +#include + #include "mongo/base/string_data.h" -#include "mongo/db/catalog_raii.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/multitenancy_gen.h" #include "mongo/db/repl/replica_set_aware_service.h" #include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/cluster_parameter_synchronization_helpers.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/idl/cluster_server_parameter_initializer.h b/src/mongo/idl/cluster_server_parameter_initializer.h index 94e48bdacf5d7..31f5aadc8641c 100644 --- a/src/mongo/idl/cluster_server_parameter_initializer.h +++ b/src/mongo/idl/cluster_server_parameter_initializer.h @@ -29,10 +29,13 @@ #pragma once -#include "mongo/platform/basic.h" +#include #include "mongo/db/db_raii.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/replica_set_aware_service.h" +#include "mongo/db/service_context.h" +#include "mongo/platform/basic.h" namespace mongo { diff --git a/src/mongo/idl/cluster_server_parameter_initializer_test.cpp b/src/mongo/idl/cluster_server_parameter_initializer_test.cpp index 85cd10864c18c..726e89ba3b9fa 100644 --- a/src/mongo/idl/cluster_server_parameter_initializer_test.cpp +++ b/src/mongo/idl/cluster_server_parameter_initializer_test.cpp @@ -27,18 +27,29 @@ * it in the license file. */ +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/create_collection.h" -#include "mongo/db/change_stream_options_manager.h" -#include "mongo/db/dbdirectclient.h" -#include "mongo/db/repl/replication_coordinator_mock.h" -#include "mongo/db/repl/storage_interface_mock.h" +#include "mongo/db/client.h" +#include "mongo/db/commands/create_gen.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/server_parameter.h" -#include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/server_parameter_with_storage.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/cluster_parameter_synchronization_helpers.h" #include "mongo/idl/cluster_server_parameter_initializer.h" +#include "mongo/idl/cluster_server_parameter_test_gen.h" #include "mongo/idl/cluster_server_parameter_test_util.h" -#include "mongo/logv2/log.h" -#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/idl/cluster_server_parameter_op_observer.cpp b/src/mongo/idl/cluster_server_parameter_op_observer.cpp index 7ecdc6a974895..bf31d75c3e536 100644 --- a/src/mongo/idl/cluster_server_parameter_op_observer.cpp +++ b/src/mongo/idl/cluster_server_parameter_op_observer.cpp @@ -29,9 +29,24 @@ #include "mongo/idl/cluster_server_parameter_op_observer.h" -#include "mongo/db/dbdirectclient.h" +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/storage/recovery_unit.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/cluster_parameter_synchronization_helpers.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl @@ -46,8 +61,8 @@ constexpr auto kOplog = "oplog"_sd; * associated. This is used in the aboutToDelte/onDelete handlers since the document is not * necessarily available in the latter. */ -const auto aboutToDeleteDoc = OperationContext::declareDecoration(); -const auto tenantIdToDelete = OperationContext::declareDecoration>(); +const auto aboutToDeleteDoc = OplogDeleteEntryArgs::declareDecoration(); +const auto tenantIdToDelete = OplogDeleteEntryArgs::declareDecoration>(); bool isConfigNamespace(const NamespaceString& nss) { return nss == NamespaceString::makeClusterParametersNSS(nss.dbName().tenantId()); @@ -60,40 +75,49 @@ void ClusterServerParameterOpObserver::onInserts(OperationContext* opCtx, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) { + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator) { if (!isConfigNamespace(coll->ns())) { return; } for (auto it = first; it != last; ++it) { - opCtx->recoveryUnit()->onCommit([doc = it->doc, tenantId = coll->ns().dbName().tenantId()]( - OperationContext* opCtx, boost::optional) { - cluster_parameters::updateParameter(opCtx, doc, kOplog, tenantId); - }); + auto& doc = it->doc; + auto tenantId = coll->ns().dbName().tenantId(); + cluster_parameters::validateParameter(opCtx, doc, tenantId); + opCtx->recoveryUnit()->onCommit( + [doc, tenantId](OperationContext* opCtx, boost::optional) { + cluster_parameters::updateParameter(opCtx, doc, kOplog, tenantId); + }); } } void ClusterServerParameterOpObserver::onUpdate(OperationContext* opCtx, - const OplogUpdateEntryArgs& args) { + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator) { auto updatedDoc = args.updateArgs->updatedDoc; if (!isConfigNamespace(args.coll->ns()) || args.updateArgs->update.isEmpty()) { return; } - opCtx->recoveryUnit()->onCommit([updatedDoc, tenantId = args.coll->ns().dbName().tenantId()]( - OperationContext* opCtx, boost::optional) { - cluster_parameters::updateParameter(opCtx, updatedDoc, kOplog, tenantId); - }); + auto tenantId = args.coll->ns().dbName().tenantId(); + cluster_parameters::validateParameter(opCtx, updatedDoc, tenantId); + opCtx->recoveryUnit()->onCommit( + [updatedDoc, tenantId](OperationContext* opCtx, boost::optional) { + cluster_parameters::updateParameter(opCtx, updatedDoc, kOplog, tenantId); + }); } void ClusterServerParameterOpObserver::aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) { + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator) { std::string docBeingDeleted; if (isConfigNamespace(coll->ns())) { // Store the tenantId associated with the doc to be deleted. - tenantIdToDelete(opCtx) = coll->ns().dbName().tenantId(); + tenantIdToDelete(args) = coll->ns().dbName().tenantId(); auto elem = doc[kIdField]; if (elem.type() == String) { docBeingDeleted = elem.str(); @@ -111,16 +135,17 @@ void ClusterServerParameterOpObserver::aboutToDelete(OperationContext* opCtx, // Stash the name of the config doc being deleted (if any) // in an opCtx decoration for use in the onDelete() hook below // since OpLogDeleteEntryArgs isn't guaranteed to have the deleted doc. - aboutToDeleteDoc(opCtx) = std::move(docBeingDeleted); + aboutToDeleteDoc(args) = std::move(docBeingDeleted); } void ClusterServerParameterOpObserver::onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) { - const auto& docName = aboutToDeleteDoc(opCtx); + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator) { + const auto& docName = aboutToDeleteDoc(args); if (!docName.empty()) { - opCtx->recoveryUnit()->onCommit([docName, tenantId = tenantIdToDelete(opCtx)]( + opCtx->recoveryUnit()->onCommit([docName, tenantId = tenantIdToDelete(args)]( OperationContext* opCtx, boost::optional) { cluster_parameters::clearParameter(opCtx, docName, tenantId); }); @@ -129,7 +154,7 @@ void ClusterServerParameterOpObserver::onDelete(OperationContext* opCtx, void ClusterServerParameterOpObserver::onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) { - if (dbName.db() == DatabaseName::kConfig.db()) { + if (dbName.isConfigDB()) { // Entire config DB deleted, reset to default state. opCtx->recoveryUnit()->onCommit( [tenantId = dbName.tenantId()](OperationContext* opCtx, boost::optional) { @@ -143,7 +168,8 @@ repl::OpTime ClusterServerParameterOpObserver::onDropCollection( const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) { + CollectionDropType dropType, + bool markFromMigrate) { if (isConfigNamespace(collectionName)) { // Entire collection deleted, reset to default state. opCtx->recoveryUnit()->onCommit([tenantId = collectionName.dbName().tenantId()]( @@ -155,8 +181,8 @@ repl::OpTime ClusterServerParameterOpObserver::onDropCollection( return {}; } -void ClusterServerParameterOpObserver::_onReplicationRollback(OperationContext* opCtx, - const RollbackObserverInfo& rbInfo) { +void ClusterServerParameterOpObserver::onReplicationRollback(OperationContext* opCtx, + const RollbackObserverInfo& rbInfo) { for (const auto& nss : rbInfo.rollbackNamespaces) { if (isConfigNamespace(nss)) { // We can call resynchronize directly because onReplicationRollback is guaranteed to be diff --git a/src/mongo/idl/cluster_server_parameter_op_observer.h b/src/mongo/idl/cluster_server_parameter_op_observer.h index a06f4dbde1787..d4e533050e72c 100644 --- a/src/mongo/idl/cluster_server_parameter_op_observer.h +++ b/src/mongo/idl/cluster_server_parameter_op_observer.h @@ -27,225 +27,66 @@ * it in the license file. */ -#include +#include #include -#include "mongo/base/status.h" -#include "mongo/base/string_data.h" -#include "mongo/db/client.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/op_observer/op_observer.h" -#include "mongo/db/op_observer/op_observer_registry.h" +#include "mongo/db/op_observer/op_observer_noop.h" #include "mongo/db/operation_context.h" -#include "mongo/db/service_context.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/util/uuid.h" namespace mongo { /** * Update in-memory cluster server parameters on insert/update/remove. */ -class ClusterServerParameterOpObserver final : public OpObserver { +class ClusterServerParameterOpObserver final : public OpObserverNoop { public: - // Interface methods. + NamespaceFilters getNamespaceFilters() const final { + return {NamespaceFilter::kConfig, NamespaceFilter::kConfig}; + } void onInserts(OperationContext* opCtx, const CollectionPtr& coll, std::vector::const_iterator first, std::vector::const_iterator last, std::vector fromMigrate, - bool defaultFromMigrate) final; - void onInsertGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final{}; - void onDeleteGlobalIndexKey(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUuid, - const BSONObj& key, - const BSONObj& docKey) final {} - void onUpdate(OperationContext* opCtx, const OplogUpdateEntryArgs& args) final; + bool defaultFromMigrate, + OpStateAccumulator* opAccumulator = nullptr) final; + + void onUpdate(OperationContext* opCtx, + const OplogUpdateEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; + void aboutToDelete(OperationContext* opCtx, const CollectionPtr& coll, - const BSONObj& doc) final; + const BSONObj& doc, + OplogDeleteEntryArgs* args, + OpStateAccumulator* opAccumulator = nullptr) final; + void onDelete(OperationContext* opCtx, const CollectionPtr& coll, StmtId stmtId, - const OplogDeleteEntryArgs& args) final; + const OplogDeleteEntryArgs& args, + OpStateAccumulator* opAccumulator = nullptr) final; + void onDropDatabase(OperationContext* opCtx, const DatabaseName& dbName) final; - using OpObserver::onDropCollection; + repl::OpTime onDropCollection(OperationContext* opCtx, const NamespaceString& collectionName, const UUID& uuid, std::uint64_t numRecords, - CollectionDropType dropType) final; - -private: - void _onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final; - -public: - // Remainder of operations are ignorable. - void postRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - bool stayTemp) final {} - void onImportCollection(OperationContext* opCtx, - const UUID& importUUID, - const NamespaceString& nss, - long long numRecords, - long long dataSize, - const BSONObj& catalogEntry, - const BSONObj& storageMetadata, - bool isDryRun) final {} - - void onCreateGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID) final{}; - - void onModifyCollectionShardingIndexCatalog(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc) final {} - - void onDropGlobalIndex(OperationContext* opCtx, - const NamespaceString& globalIndexNss, - const UUID& globalIndexUUID, - long long numKeys) final{}; - - void onCreateIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - BSONObj indexDoc, - bool fromMigrate) final {} - - void onStartIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onStartIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onAbortIndexBuildSinglePhase(OperationContext* opCtx, const NamespaceString& nss) final {} - - void onCommitIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - bool fromMigrate) final {} - - void onAbortIndexBuild(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& collUUID, - const UUID& indexBuildUUID, - const std::vector& indexes, - const Status& cause, - bool fromMigrate) final {} - - void onInternalOpMessage(OperationContext* opCtx, - const NamespaceString& nss, - const boost::optional& uuid, - const BSONObj& msgObj, - const boost::optional o2MsgObj, - const boost::optional preImageOpTime, - const boost::optional postImageOpTime, - const boost::optional prevWriteOpTimeInTransaction, - const boost::optional slot) final {} - - void onCreateCollection(OperationContext* opCtx, - const CollectionPtr& coll, - const NamespaceString& collectionName, - const CollectionOptions& options, - const BSONObj& idIndex, - const OplogSlot& createOpTime, - bool fromMigrate) final {} - - void onCollMod(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const BSONObj& collModCmd, - const CollectionOptions& oldCollOptions, - boost::optional indexInfo) final {} - - void onDropIndex(OperationContext* opCtx, - const NamespaceString& nss, - const UUID& uuid, - const std::string& indexName, - const BSONObj& indexInfo) final {} - - using OpObserver::preRenameCollection; - repl::OpTime preRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final { - return repl::OpTime(); - } - - using OpObserver::onRenameCollection; - void onRenameCollection(OperationContext* opCtx, - const NamespaceString& fromCollection, - const NamespaceString& toCollection, - const UUID& uuid, - const boost::optional& dropTargetUUID, - std::uint64_t numRecords, - bool stayTemp) final {} - - void onApplyOps(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& applyOpCmd) final {} - - void onEmptyCapped(OperationContext* opCtx, - const NamespaceString& collectionName, - const UUID& uuid) final {} - - void onTransactionStart(OperationContext* opCtx) final {} - - void onUnpreparedTransactionCommit(OperationContext* opCtx, - const TransactionOperations& transactionOperations) final {} - - void onBatchedWriteStart(OperationContext* opCtx) final {} - - void onBatchedWriteCommit(OperationContext* opCtx) final {} - - void onBatchedWriteAbort(OperationContext* opCtx) final {} - - void onPreparedTransactionCommit( - OperationContext* opCtx, - OplogSlot commitOplogEntryOpTime, - Timestamp commitTimestamp, - const std::vector& statements) noexcept final {} - - std::unique_ptr preTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - Date_t wallClockTime) final { - return nullptr; - } - - void onTransactionPrepare( - OperationContext* opCtx, - const std::vector& reservedSlots, - const TransactionOperations& transactionOperations, - const ApplyOpsOplogSlotAndOperationAssignment& applyOpsOperationAssignment, - size_t numberOfPrePostImagesToWrite, - Date_t wallClockTime) final {} - - void onTransactionPrepareNonPrimary(OperationContext* opCtx, - const std::vector& statements, - const repl::OpTime& prepareOpTime) final {} - - void onTransactionAbort(OperationContext* opCtx, - boost::optional abortOplogEntryOpTime) final {} + CollectionDropType dropType, + bool markFromMigrate) final; - void onMajorityCommitPointUpdate(ServiceContext* service, - const repl::OpTime& newCommitPoint) final {} + void onReplicationRollback(OperationContext* opCtx, const RollbackObserverInfo& rbInfo) final; }; } // namespace mongo diff --git a/src/mongo/idl/cluster_server_parameter_op_observer_test.cpp b/src/mongo/idl/cluster_server_parameter_op_observer_test.cpp index 05280fcd82b55..449fbd05a9f0a 100644 --- a/src/mongo/idl/cluster_server_parameter_op_observer_test.cpp +++ b/src/mongo/idl/cluster_server_parameter_op_observer_test.cpp @@ -27,11 +27,44 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/catalog/create_collection.h" #include "mongo/db/catalog_raii.h" +#include "mongo/db/client.h" +#include "mongo/db/commands/create_gen.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/server_parameter_with_storage.h" +#include "mongo/db/storage/write_unit_of_work.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/cluster_server_parameter_gen.h" #include "mongo/idl/cluster_server_parameter_op_observer.h" +#include "mongo/idl/cluster_server_parameter_test_gen.h" #include "mongo/idl/cluster_server_parameter_test_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl @@ -107,8 +140,8 @@ class ClusterServerParameterOpObserverTest : public ClusterServerParameterTestBa auto opCtx = cc().makeOperationContext(); WriteUnitOfWork wuow(opCtx.get()); AutoGetCollection autoColl(opCtx.get(), nss, MODE_IX); - observer.aboutToDelete(opCtx.get(), *autoColl, deletedDoc); OplogDeleteEntryArgs args; + observer.aboutToDelete(opCtx.get(), *autoColl, deletedDoc, &args); args.deletedDoc = includeDeletedDoc ? &deletedDoc : nullptr; observer.onDelete(opCtx.get(), *autoColl, 1 /* StmtId */, args); if (commit) @@ -151,6 +184,31 @@ class ClusterServerParameterOpObserverTest : public ClusterServerParameterTestBa ASSERT_EQ(finalCspTest.getStrValue(), initialCspTest.getStrValue()); } + // Asserts that this action causes a failure, and state does not change. + template + void assertFailure(const NamespaceString& nss, + F fn, + const boost::optional& tenantId) { + auto* sp = + ServerParameterSet::getClusterParameterSet()->get(kCSPTest); + ASSERT(sp != nullptr); + + const auto initialCPTime = sp->getClusterParameterTime(tenantId); + ClusterServerParameterTest initialCspTest = sp->getValue(tenantId); + bool failed = false; + try { + fn(nss); + } catch (const DBException&) { + failed = true; + } + ASSERT(failed); + ClusterServerParameterTest finalCspTest = sp->getValue(tenantId); + + ASSERT_EQ(sp->getClusterParameterTime(tenantId), initialCPTime); + ASSERT_EQ(finalCspTest.getIntValue(), initialCspTest.getIntValue()); + ASSERT_EQ(finalCspTest.getStrValue(), initialCspTest.getStrValue()); + } + std::pair initializeState() { Timestamp now(time(nullptr)); const auto doc = @@ -196,6 +254,13 @@ class ClusterServerParameterOpObserverTest : public ClusterServerParameterTestBa assertIgnored(NamespaceString::makeClusterParametersNSS(kTenantId), fn, tenantId); } + template + void assertFailsOnlyCPNamespace(F fn, const boost::optional& tenantId) { + assertIgnoredOtherNamespaces(fn, tenantId); + assertFailure(NamespaceString::makeClusterParametersNSS(boost::none), fn, tenantId); + assertFailure(NamespaceString::makeClusterParametersNSS(kTenantId), fn, tenantId); + } + void assertParameterState(int line, const boost::optional& tenantId, int intVal, @@ -247,12 +312,12 @@ TEST_F(ClusterServerParameterOpObserverTest, OnInsertRecord) { const auto multiStrValue = "OnInsertRecord.multi"; ASSERT_LT(singleLogicalTime, multiLogicalTime); - doInserts(NamespaceString::kClusterParametersNamespace, - { - BSON(ClusterServerParameter::k_idFieldName << "ignored"), - makeClusterParametersDoc(multiLogicalTime, multiIntValue, multiStrValue), - BSON(ClusterServerParameter::k_idFieldName << "alsoIgnored"), - }); + doInserts( + NamespaceString::kClusterParametersNamespace, + { + makeClusterParametersDoc(multiLogicalTime, multiIntValue, multiStrValue, "cspTest2"), + makeClusterParametersDoc(multiLogicalTime, multiIntValue, multiStrValue), + }); ASSERT_PARAMETER_STATE(boost::none, multiIntValue, multiStrValue, multiLogicalTime); ASSERT_PARAMETER_STATE(kTenantId, kInitialTenantIntValue, kInitialTenantStrValue); @@ -273,22 +338,22 @@ TEST_F(ClusterServerParameterOpObserverTest, OnInsertRecord) { }, boost::none); - // Unknown CSP record ignored on all namespaces. - assertIgnoredAlways( + // Unknown CSP record fails + assertFailsOnlyCPNamespace( [this](const auto& nss) { doInserts(nss, {BSON("_id" << "ignored")}); }, boost::none); - // Unknown CSP, multi-insert. - assertIgnoredAlways( + + // Unknown CSP and not unknown CSP fails, multi-insert. + assertFailsOnlyCPNamespace( [this](const auto& nss) { doInserts(nss, - {BSON("_id" - << "ignored"), + {makeClusterParametersDoc(LogicalTime(), 456, "yellow"), BSON("_id" - << "also-ingored")}); + << "ignored")}); }, boost::none); @@ -329,8 +394,8 @@ TEST_F(ClusterServerParameterOpObserverTest, OnUpdateRecord) { }, boost::none); - // Non cluster parameter doc. - assertIgnoredAlways( + // Non cluster parameter doc fails. + assertFailsOnlyCPNamespace( [this](const auto& nss) { doUpdate(nss, BSON(ClusterServerParameter::k_idFieldName << "ignored")); }, @@ -419,7 +484,7 @@ TEST_F(ClusterServerParameterOpObserverTest, onDropDatabase) { // Reinitialize and drop the other tenant's config DB. initializeState(); - doDropDatabase(DatabaseName(kTenantId, kConfigDB)); + doDropDatabase(DatabaseName::createDatabaseName_forTest(kTenantId, kConfigDB)); ASSERT_PARAMETER_STATE(boost::none, kInitialIntValue, kInitialStrValue); ASSERT_PARAMETER_STATE(kTenantId, kDefaultIntValue, kDefaultStrValue); @@ -498,8 +563,10 @@ TEST_F(ClusterServerParameterOpObserverTest, abortsAfterObservation) { ASSERT_PARAMETER_STATE(boost::none, kInitialIntValue, kInitialStrValue); ASSERT_PARAMETER_STATE(kTenantId, kInitialTenantIntValue, kInitialTenantStrValue); - doDropDatabase(DatabaseName(boost::none, kConfigDB), false /* commit */); - doDropDatabase(DatabaseName(kTenantId, kConfigDB), false /* commit */); + doDropDatabase(DatabaseName::createDatabaseName_forTest(boost::none, kConfigDB), + false /* commit */); + doDropDatabase(DatabaseName::createDatabaseName_forTest(kTenantId, kConfigDB), + false /* commit */); ASSERT_PARAMETER_STATE(boost::none, kInitialIntValue, kInitialStrValue); ASSERT_PARAMETER_STATE(kTenantId, kInitialTenantIntValue, kInitialTenantStrValue); diff --git a/src/mongo/idl/cluster_server_parameter_refresher.cpp b/src/mongo/idl/cluster_server_parameter_refresher.cpp index 6b35914cb57d6..50d21bd48c9c5 100644 --- a/src/mongo/idl/cluster_server_parameter_refresher.cpp +++ b/src/mongo/idl/cluster_server_parameter_refresher.cpp @@ -27,27 +27,69 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/idl/cluster_server_parameter_refresher.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/audit.h" -#include "mongo/db/commands/list_databases_for_all_tenants_gen.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/client.h" #include "mongo/db/feature_compatibility_version_parser.h" -#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/resource_yielder.h" +#include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/transaction/transaction_api.h" -#include "mongo/db/vector_clock.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/idl/cluster_server_parameter_common.h" +#include "mongo/idl/cluster_server_parameter_refresher.h" #include "mongo/idl/cluster_server_parameter_refresher_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/is_mongos.h" -#include "mongo/util/stacktrace.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl MONGO_FAIL_POINT_DEFINE(skipClusterParameterRefresh); +MONGO_FAIL_POINT_DEFINE(blockAndFailClusterParameterRefresh); +MONGO_FAIL_POINT_DEFINE(blockAndSucceedClusterParameterRefresh); +MONGO_FAIL_POINT_DEFINE(countPromiseWaitersClusterParameterRefresh); + namespace mongo { namespace { @@ -64,6 +106,11 @@ getFCVAndClusterParametersFromConfigServer() { // Use an alternative client region, because we call refreshParameters both from the internal // refresher process and from getClusterParameter. auto altClient = getGlobalServiceContext()->makeClient("clusterParameterRefreshTransaction"); + // TODO(SERVER-74660): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*altClient.get()); + altClient.get()->setSystemOperationUnkillableByStepdown(lk); + } AlternativeClientRegion clientRegion(altClient); auto opCtx = cc().makeOperationContext(); auto as = AuthorizationSession::get(cc()); @@ -144,9 +191,7 @@ getFCVAndClusterParametersFromConfigServer() { auto executor = Grid::get(opCtx.get())->getExecutorPool()->getFixedExecutor(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); - txn_api::SyncTransactionWithRetries txn( - opCtx.get(), sleepInlineExecutor, nullptr, inlineExecutor); + txn_api::SyncTransactionWithRetries txn(opCtx.get(), executor, nullptr, inlineExecutor); txn.run(opCtx.get(), doFetch); return {*fcv, *allDocs}; } @@ -181,6 +226,49 @@ void ClusterServerParameterRefresher::setPeriod(Milliseconds period) { } Status ClusterServerParameterRefresher::refreshParameters(OperationContext* opCtx) { + stdx::unique_lock lk(_mutex); + if (_refreshPromise) { + // We expect the future to never be ready here, because we complete the promise and then + // delete it under a lock, meaning new futures taken out on the current promise under a lock + // are always on active promises. If the future is ready here, the below logic will still + // work, but this is unexpected. + auto future = _refreshPromise->getFuture(); + if (MONGO_unlikely(future.isReady())) { + LOGV2_DEBUG(7782200, + 3, + "Cluster parameter refresh request unexpectedly joining on " + "already-fulfilled refresh call"); + } + countPromiseWaitersClusterParameterRefresh.shouldFail(); + // Wait for the job to finish and return its result with getNoThrow. + lk.unlock(); + return future.getNoThrow(); + } + // No active job; make a new promise and run the job ourselves. + _refreshPromise = std::make_unique>(); + lk.unlock(); + // Run _refreshParameters unlocked to allow new futures to be gotten from our promise. + Status status = _refreshParameters(opCtx); + lk.lock(); + // Complete the promise and detach it from the object, allowing a new job to be created the + // next time refreshParameters is run. Note that the futures of this promise hold references to + // it which will still be valid after we detach it from the object. + _refreshPromise->setFrom(status); + _refreshPromise = nullptr; + return status; +} + +Status ClusterServerParameterRefresher::_refreshParameters(OperationContext* opCtx) { + if (MONGO_unlikely(blockAndFailClusterParameterRefresh.shouldFail())) { + blockAndFailClusterParameterRefresh.pauseWhileSet(); + return Status(ErrorCodes::FailPointEnabled, "failClusterParameterRefresh was enabled"); + } + + if (MONGO_unlikely(blockAndSucceedClusterParameterRefresh.shouldFail())) { + blockAndSucceedClusterParameterRefresh.pauseWhileSet(); + return Status::OK(); + } + invariant(isMongos()); multiversion::FeatureCompatibilityVersion fcv; TenantIdMap> clusterParameterDocs; @@ -300,7 +388,9 @@ void ClusterServerParameterRefresher::start(ServiceContext* serviceCtx, Operatio PeriodicRunner::PeriodicJob job( "ClusterServerParameterRefresher", [serviceCtx](Client* client) { getClusterServerParameterRefresher(serviceCtx)->run(); }, - loadInterval()); + loadInterval(), + // TODO(SERVER-74659): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/); refresher->_job = std::make_unique(periodicRunner->makeJob(std::move(job))); @@ -322,4 +412,12 @@ void ClusterServerParameterRefresher::run() { } } +void ClusterServerParameterRefresher::onShutdown(ServiceContext* serviceCtx) { + // Make sure that we finish the possibly running transaction and don't start any more. + auto& refresher = getClusterServerParameterRefresher(serviceCtx); + if (refresher && refresher->_job && refresher->_job->isValid()) { + refresher->_job->pause(); + } +} + } // namespace mongo diff --git a/src/mongo/idl/cluster_server_parameter_refresher.h b/src/mongo/idl/cluster_server_parameter_refresher.h index 5eaeb5e6557d5..0c1546107c1b4 100644 --- a/src/mongo/idl/cluster_server_parameter_refresher.h +++ b/src/mongo/idl/cluster_server_parameter_refresher.h @@ -28,8 +28,19 @@ */ #pragma once +#include + +#include + +#include "mongo/base/status.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/periodic_runner.h" +#include "mongo/util/version/releases.h" namespace mongo { @@ -50,6 +61,12 @@ class ClusterServerParameterRefresher { */ static void start(ServiceContext* serviceCtx, OperationContext* opCtx); + /** + * Callback to be called when the mongos is shutting down. Will stop the currently running + * refresh, if one is running, and stop running periodically. + */ + static void onShutdown(ServiceContext* serviceCtx); + /** * Refreshes all cluster server parameters from the config servers. Called periodically in the * run method, which executes in a background thread. Also called in-line during @@ -57,17 +74,24 @@ class ClusterServerParameterRefresher { */ Status refreshParameters(OperationContext* opCtx); + // What the actual refresh job runs to do a refresh. + Status _refreshParameters(OperationContext* opCtx); + /** * Set the period of the background job. This should only be used internally (by the * setParameter). */ void setPeriod(Milliseconds period); + // Public for testing. + std::unique_ptr> _refreshPromise; + private: void run(); std::unique_ptr _job; multiversion::FeatureCompatibilityVersion _lastFcv; + Mutex _mutex = MONGO_MAKE_LATCH("ClusterServerParameterRefresher::_mutex"); }; Status clusterServerParameterRefreshIntervalSecsNotify(const int& newValue); diff --git a/src/mongo/idl/cluster_server_parameter_refresher_test.cpp b/src/mongo/idl/cluster_server_parameter_refresher_test.cpp new file mode 100644 index 0000000000000..8933a76e8d1e7 --- /dev/null +++ b/src/mongo/idl/cluster_server_parameter_refresher_test.cpp @@ -0,0 +1,134 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/client.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/idl/cluster_server_parameter_refresher.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/fail_point.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest + +namespace mongo { +namespace { +class ClusterServerParameterRefresherTest : public ServiceContextTest { +public: + void setUp() override { + _opCtx = getClient()->makeOperationContext(); + _refresher = std::make_unique(); + } + + OperationContext* opCtx() { + return _opCtx.get(); + } + + ServiceContext::UniqueOperationContext _opCtx; + std::unique_ptr _refresher; +}; + +TEST_F(ClusterServerParameterRefresherTest, testRefresherConcurrency) { + auto runRefreshTestIteration = [&](StringData blockingFPName, Status expectedStatus) { + ASSERT_EQ(_refresher->_refreshPromise, nullptr); + // Set up failpoints and get their initial times entered. + auto blockFp = globalFailPointRegistry().find(blockingFPName); + auto waiterCountFp = + globalFailPointRegistry().find("countPromiseWaitersClusterParameterRefresh"); + auto initBlockFpTE = blockFp->setMode(FailPoint::alwaysOn); + auto initWaiterFpTE = waiterCountFp->setMode(FailPoint::alwaysOn); + + // Since there should be no active job at this point, we expect thread 1 to create the + // promise, run _refreshParameters, and block on the failpoint. + stdx::thread firstRun([&]() { + Status status = _refresher->refreshParameters(opCtx()); + ASSERT_EQ(status, expectedStatus); + }); + + // Wait for thread 1 to reach the blocking failpoint. Note that each time we enter the + // blocking failpoint, we increment timesEntered by 2, because we first check for shouldFail + // and then call pauseWhileSet. + blockFp->waitForTimesEntered(initBlockFpTE + 2); + ASSERT(_refresher->_refreshPromise && !_refresher->_refreshPromise->getFuture().isReady()); + + // Toggle the countPromiseWaiters failpoint to get the times entered. This count should not + // have changed from the initial count as we have no futures waiting on the promise yet. + waiterCountFp->setMode(FailPoint::off); + auto waiterCountAfterBlock = waiterCountFp->setMode(FailPoint::alwaysOn); + ASSERT_EQ(waiterCountAfterBlock, initWaiterFpTE); + + // Threads 2 and 3 should both see that there is an active promise and take out a future on + // it, not entering _refreshParameters themselves. + stdx::thread secondRun([&]() { + Status status = _refresher->refreshParameters(opCtx()); + ASSERT_EQ(status, expectedStatus); + }); + stdx::thread thirdRun([&]() { + Status status = _refresher->refreshParameters(opCtx()); + ASSERT_EQ(status, expectedStatus); + }); + + // Allow both new threads to hit the future wait before unblocking the first thread + waiterCountFp->waitForTimesEntered(initWaiterFpTE + 2); + waiterCountFp->setMode(FailPoint::off); + + // We expect that neither of threads 2 and 3 entered _refreshParameters, so neither should + // have hit the blocking failpoint; assert its count is the same as before. + auto afterSleepTE = blockFp->setMode(FailPoint::off); + ASSERT_EQ(afterSleepTE, initBlockFpTE + 2); + + // The first thread should now finish, setting the job to ready and notifying threads 2 and + // 3, which should finish. + firstRun.join(); + ASSERT_EQ(_refresher->_refreshPromise, nullptr); + + secondRun.join(); + thirdRun.join(); + }; + + const int major_iters = 3; + const int minor_iters = 2; + for (int i = 0; i < major_iters; i++) { + // Interlace testing of the OK and failure cases to ensure that we are never getting a stale + // status. + for (int j = 0; j < minor_iters; j++) { + runRefreshTestIteration("blockAndSucceedClusterParameterRefresh", Status::OK()); + } + for (int j = 0; j < minor_iters; j++) { + // Note that status comparison only cares about error code. + runRefreshTestIteration("blockAndFailClusterParameterRefresh", + Status(ErrorCodes::FailPointEnabled, "...")); + } + } +} +} // namespace +} // namespace mongo diff --git a/src/mongo/idl/cluster_server_parameter_test.idl b/src/mongo/idl/cluster_server_parameter_test.idl index c92ea485d5bac..e9143b7642e49 100644 --- a/src/mongo/idl/cluster_server_parameter_test.idl +++ b/src/mongo/idl/cluster_server_parameter_test.idl @@ -57,3 +57,9 @@ server_parameters: description: "Cluster server parameter OpObserver test param" cpp_vartype: ClusterServerParameterTest cpp_varname: cspTestStorage + + cspTest2: + set_at: cluster + description: "Cluster server parameter OpObserver test param #2" + cpp_vartype: ClusterServerParameterTest + cpp_varname: cspTest2Storage diff --git a/src/mongo/idl/cluster_server_parameter_test_util.cpp b/src/mongo/idl/cluster_server_parameter_test_util.cpp index c9a47100db6ae..e06caf171c481 100644 --- a/src/mongo/idl/cluster_server_parameter_test_util.cpp +++ b/src/mongo/idl/cluster_server_parameter_test_util.cpp @@ -29,6 +29,30 @@ #include "mongo/idl/cluster_server_parameter_test_util.h" +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbdirectclient.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/idl/cluster_server_parameter_gen.h" +#include "mongo/idl/cluster_server_parameter_test_gen.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + namespace mongo { namespace cluster_server_parameter_test_util { @@ -44,19 +68,21 @@ void upsert(BSONObj doc, const boost::optional& tenantId) { DBDirectClient client(opCtx); - auto opMsgRequest = OpMsgRequestBuilder::create(DatabaseName(tenantId, "config"), [&] { - write_ops::UpdateCommandRequest updateOp( - NamespaceString::makeClusterParametersNSS(tenantId)); - updateOp.setUpdates({[&] { - write_ops::UpdateOpEntry entry; - entry.setQ(BSON(ClusterServerParameter::k_idFieldName << kCSPTest)); - entry.setU(write_ops::UpdateModification::parseFromClassicUpdate(BSON("$set" << doc))); - entry.setMulti(false); - entry.setUpsert(true); - return entry; - }()}); - return updateOp.toBSON(kMajorityWriteConcern); - }()); + auto opMsgRequest = OpMsgRequestBuilder::create( + DatabaseName::createDatabaseName_forTest(tenantId, "config"), [&] { + write_ops::UpdateCommandRequest updateOp( + NamespaceString::makeClusterParametersNSS(tenantId)); + updateOp.setUpdates({[&] { + write_ops::UpdateOpEntry entry; + entry.setQ(BSON(ClusterServerParameter::k_idFieldName << kCSPTest)); + entry.setU( + write_ops::UpdateModification::parseFromClassicUpdate(BSON("$set" << doc))); + entry.setMulti(false); + entry.setUpsert(true); + return entry; + }()}); + return updateOp.toBSON(kMajorityWriteConcern); + }()); auto res = client.runCommand(opMsgRequest)->getCommandReply(); @@ -74,17 +100,18 @@ void remove(const boost::optional& tenantId) { auto uniqueOpCtx = cc().makeOperationContext(); auto* opCtx = uniqueOpCtx.get(); - auto opMsgRequest = OpMsgRequestBuilder::create(DatabaseName(tenantId, "config"), [&] { - write_ops::DeleteCommandRequest deleteOp( - NamespaceString::makeClusterParametersNSS(tenantId)); - deleteOp.setDeletes({[] { - write_ops::DeleteOpEntry entry; - entry.setQ(BSON(ClusterServerParameter::k_idFieldName << kCSPTest)); - entry.setMulti(true); - return entry; - }()}); - return deleteOp.toBSON({}); - }()); + auto opMsgRequest = OpMsgRequestBuilder::create( + DatabaseName::createDatabaseName_forTest(tenantId, "config"), [&] { + write_ops::DeleteCommandRequest deleteOp( + NamespaceString::makeClusterParametersNSS(tenantId)); + deleteOp.setDeletes({[] { + write_ops::DeleteOpEntry entry; + entry.setQ(BSON(ClusterServerParameter::k_idFieldName << kCSPTest)); + entry.setMulti(true); + return entry; + }()}); + return deleteOp.toBSON({}); + }()); auto res = DBDirectClient(opCtx).runCommand(opMsgRequest)->getCommandReply(); @@ -97,9 +124,12 @@ void remove(const boost::optional& tenantId) { uassertStatusOK(response.toStatus()); } -BSONObj makeClusterParametersDoc(const LogicalTime& cpTime, int intValue, StringData strValue) { +BSONObj makeClusterParametersDoc(const LogicalTime& cpTime, + int intValue, + StringData strValue, + StringData parameterName) { ClusterServerParameter csp; - csp.set_id(kCSPTest); + csp.set_id(parameterName); csp.setClusterParameterTime(cpTime); ClusterServerParameterTest cspt; diff --git a/src/mongo/idl/cluster_server_parameter_test_util.h b/src/mongo/idl/cluster_server_parameter_test_util.h index bb412dd877dad..2b1b034db5a46 100644 --- a/src/mongo/idl/cluster_server_parameter_test_util.h +++ b/src/mongo/idl/cluster_server_parameter_test_util.h @@ -29,15 +29,30 @@ #pragma once +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/change_stream_options_manager.h" +#include "mongo/db/client.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/logical_time.h" #include "mongo/db/multitenancy_gen.h" +#include "mongo/db/repl/member_state.h" +#include "mongo/db/repl/oplog.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/repl/storage_interface.h" #include "mongo/db/repl/storage_interface_mock.h" #include "mongo/db/service_context_d_test_fixture.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/cluster_server_parameter_gen.h" #include "mongo/idl/cluster_server_parameter_test_gen.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/unittest.h" namespace mongo { @@ -92,7 +107,10 @@ class ClusterServerParameterTestBase : public ServiceContextMongoDTest { void upsert(BSONObj doc, const boost::optional& tenantId = boost::none); void remove(const boost::optional& tenantId = boost::none); -BSONObj makeClusterParametersDoc(const LogicalTime& cpTime, int intValue, StringData strValue); +BSONObj makeClusterParametersDoc(const LogicalTime& cpTime, + int intValue, + StringData strValue, + StringData parameterName = kCSPTest); } // namespace cluster_server_parameter_test_util } // namespace mongo diff --git a/src/mongo/idl/command_generic_argument.cpp b/src/mongo/idl/command_generic_argument.cpp index 83532622893e0..bbcdde9277d92 100644 --- a/src/mongo/idl/command_generic_argument.cpp +++ b/src/mongo/idl/command_generic_argument.cpp @@ -27,8 +27,7 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/string_data.h" #include "mongo/idl/generic_argument_gen.h" namespace mongo { diff --git a/src/mongo/idl/command_generic_argument_test.cpp b/src/mongo/idl/command_generic_argument_test.cpp index 74be68aa13baa..9327ae0efc854 100644 --- a/src/mongo/idl/command_generic_argument_test.cpp +++ b/src/mongo/idl/command_generic_argument_test.cpp @@ -27,10 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include #include "mongo/idl/command_generic_argument.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace test { diff --git a/src/mongo/idl/config_option_test.cpp b/src/mongo/idl/config_option_test.cpp index cdc56cb0138ca..363f4caf189c0 100644 --- a/src/mongo/idl/config_option_test.cpp +++ b/src/mongo/idl/config_option_test.cpp @@ -27,15 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/idl/config_option_no_init_test_gen.h" +#include "mongo/idl/config_option_test.h" #include "mongo/idl/config_option_test_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cmdline_utils/censor_cmdline.h" #include "mongo/util/cmdline_utils/censor_cmdline_test.h" +#include "mongo/util/options_parser/environment.h" +#include "mongo/util/options_parser/option_section.h" #include "mongo/util/options_parser/options_parser.h" #include "mongo/util/options_parser/startup_option_init.h" #include "mongo/util/options_parser/startup_options.h" +#include "mongo/util/options_parser/value.h" namespace mongo { namespace test { diff --git a/src/mongo/idl/feature_flag_test.cpp b/src/mongo/idl/feature_flag_test.cpp index f0b43850b781f..916a0fbcb8995 100644 --- a/src/mongo/idl/feature_flag_test.cpp +++ b/src/mongo/idl/feature_flag_test.cpp @@ -27,13 +27,25 @@ * it in the license file. */ -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/platform/basic.h" +#include + +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/feature_flag_test_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/version/releases.h" namespace mongo { @@ -51,6 +63,7 @@ class FeatureFlagTest : public unittest::Test { protected: ServerParameter* _featureFlagBlender{nullptr}; ServerParameter* _featureFlagSpoon{nullptr}; + ServerParameter* _featureFlagFork{nullptr}; }; @@ -199,6 +212,28 @@ TEST_F(FeatureFlagTest, RAIIFeatureFlagController) { feature_flags::gFeatureFlagBlender.isEnabled(serverGlobalParams.featureCompatibility)); } +// Test feature flags that should not be FCV Gated +TEST_F(FeatureFlagTest, ShouldBeFCVGatedFalse) { + // Test that feature flag that is enabled and not FCV gated will return true for isEnabled. + // Test newest version + // (Generic FCV reference): feature flag test + serverGlobalParams.mutableFeatureCompatibility.setVersion(multiversion::GenericFCV::kLatest); + + ASSERT_TRUE(feature_flags::gFeatureFlagFork.isEnabled(serverGlobalParams.featureCompatibility)); + + // Test oldest version + // (Generic FCV reference): feature flag test + serverGlobalParams.mutableFeatureCompatibility.setVersion(multiversion::GenericFCV::kLastLTS); + + ASSERT_TRUE(feature_flags::gFeatureFlagFork.isEnabled(serverGlobalParams.featureCompatibility)); + + // Test uninitialized FCV + serverGlobalParams.mutableFeatureCompatibility.setVersion( + multiversion::FeatureCompatibilityVersion::kUnsetDefaultLastLTSBehavior); + + ASSERT_TRUE(feature_flags::gFeatureFlagFork.isEnabled(serverGlobalParams.featureCompatibility)); +} + } // namespace } // namespace mongo diff --git a/src/mongo/idl/generic_args_with_types.idl b/src/mongo/idl/generic_args_with_types.idl index 57e659434a23d..d9965344a8776 100644 --- a/src/mongo/idl/generic_args_with_types.idl +++ b/src/mongo/idl/generic_args_with_types.idl @@ -41,6 +41,7 @@ imports: - "mongo/db/s/forwardable_operation_metadata.idl" - "mongo/db/write_concern_options.idl" - "mongo/rpc/metadata/client_metadata.idl" + - "mongo/rpc/metadata/impersonated_user_metadata.idl" - "mongo/s/sharding_types.idl" # This file lists the generic arguments accepted by all commands, and generic @@ -139,7 +140,7 @@ structs: is_generic_cmd_list: "arg" fields: $audit: - type: AuthenticationMetadata + type: ImpersonatedUserMetadata cpp_name: "dollarAudit" forward_to_shards: false optional: true @@ -217,6 +218,32 @@ structs: forward_to_shards: true optional: true + ClusterTimeSignature: + description: "Structure used in '$clusterTime'." + strict: false + fields: + hash: + type: bindata_generic + forward_from_shards: false + optional: false + keyId: + type: safeInt64 + forward_from_shards: false + optional: false + + ClusterTime: + description: "Field under '$clusterTime' that appears in GenericReplyFields. Matches VectorClock::SignedComponentFormat" + strict: false + fields: + clusterTime: + type: logicalTime + forward_from_shards: false + optional: true + signature: + type: ClusterTimeSignature + forward_from_shards: false + optional: true + GenericReplyFieldsWithTypesV1: # This struct is meant to mirror 'generic_reply_fields_api_v1' in '../idl/generic_argument.idl'. @@ -225,7 +252,7 @@ structs: is_generic_cmd_list: "reply" fields: $clusterTime: - type: logicalTime + type: ClusterTime cpp_name: "dollarClusterTime" forward_from_shards: false optional: true diff --git a/src/mongo/idl/idl_parser.cpp b/src/mongo/idl/idl_parser.cpp index b847cd7e90d45..529237e2e5a9b 100644 --- a/src/mongo/idl/idl_parser.cpp +++ b/src/mongo/idl/idl_parser.cpp @@ -27,14 +27,19 @@ * it in the license file. */ +#include +// IWYU pragma: no_include "ext/alloc_traits.h" #include +#include +#include +#include #include #include -#include "mongo/idl/idl_parser.h" - +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/idl/command_generic_argument.h" +#include "mongo/idl/idl_parser.h" #include "mongo/util/str.h" namespace mongo { @@ -240,9 +245,8 @@ void IDLParserContext::throwAPIStrictErrorIfApplicable(StringData fieldName) con !_apiStrict); } -NamespaceString IDLParserContext::parseNSCollectionRequired(const DatabaseName& dbName, - const BSONElement& element, - bool allowGlobalCollectionName) { +StringData IDLParserContext::checkAndAssertCollectionName(const BSONElement& element, + bool allowGlobalCollectionName) { const bool isUUID = (element.canonicalType() == canonicalizeBSONType(mongo::BinData) && element.binDataType() == BinDataType::newUUID); uassert(ErrorCodes::BadValue, @@ -255,29 +259,23 @@ NamespaceString IDLParserContext::parseNSCollectionRequired(const DatabaseName& str::stream() << "Invalid command format: the '" << element.fieldNameStringData() << "' field must specify a collection name or 1", element.number() == 1); - return NamespaceString(dbName, collectionlessAggregateCursorCol); + return collectionlessAggregateCursorCol; } uassert(ErrorCodes::BadValue, str::stream() << "collection name has invalid type " << typeName(element.type()), element.canonicalType() == canonicalizeBSONType(mongo::String)); - const NamespaceString nss(dbName, element.valueStringData()); - - uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid namespace specified '" << nss.ns() << "'", - nss.isValid()); - - return nss; + return element.valueStringData(); } -NamespaceStringOrUUID IDLParserContext::parseNsOrUUID(const DatabaseName& dbName, - const BSONElement& element) { +stdx::variant IDLParserContext::checkAndAssertCollectionNameOrUUID( + const BSONElement& element) { if (element.type() == BinData && element.binDataType() == BinDataType::newUUID) { - return {dbName, uassertStatusOK(UUID::parse(element))}; + return uassertStatusOK(UUID::parse(element)); } else { // Ensure collection identifier is not a Command - return {parseNSCollectionRequired(dbName, element, false)}; + return checkAndAssertCollectionName(element, false); } } diff --git a/src/mongo/idl/idl_parser.h b/src/mongo/idl/idl_parser.h index f568fbf05aeb6..d6e950ecee194 100644 --- a/src/mongo/idl/idl_parser.h +++ b/src/mongo/idl/idl_parser.h @@ -29,17 +29,34 @@ #pragma once +#include +#include +#include +#include +#include +#include #include +#include +#include +#include #include +#include "mongo/base/data_range.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/compiler.h" #include "mongo/stdx/type_traits.h" +#include "mongo/util/assert_util.h" #include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" +#include "mongo/util/version/releases.h" namespace mongo { @@ -362,18 +379,18 @@ class IDLParserContext { void throwAPIStrictErrorIfApplicable(BSONElement fieldName) const; /** - * Equivalent to CommandHelpers::parseNsCollectionRequired. - * 'allowGlobalCollectionName' allows use of global collection name, e.g. {aggregate: 1}. + * Check that the collection name in 'element' is valid. Throws an exception if not valid. + * Returns the collection name otherwise. */ - static NamespaceString parseNSCollectionRequired(const DatabaseName& dbname, - const BSONElement& element, - bool allowGlobalCollectionName); + static StringData checkAndAssertCollectionName(const BSONElement& element, + bool allowGlobalCollectionName); /** - * Equivalent to CommandHelpers::parseNsOrUUID + * Check that the collection name or UUID in 'element' is valid. Throws an exception if not + * valid. Returns either the collection name or UUID otherwise. */ - static NamespaceStringOrUUID parseNsOrUUID(const DatabaseName& dbname, - const BSONElement& element); + static stdx::variant checkAndAssertCollectionNameOrUUID( + const BSONElement& element); /** * Take all the well known command generic arguments from commandPassthroughFields, but ignore diff --git a/src/mongo/idl/idl_test.cpp b/src/mongo/idl/idl_test.cpp index c70ca8adb59e0..34f2400749536 100644 --- a/src/mongo/idl/idl_test.cpp +++ b/src/mongo/idl/idl_test.cpp @@ -27,24 +27,72 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/static_assert.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" #include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/auth/access_checks_gen.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_contract.h" +#include "mongo/db/auth/privilege.h" #include "mongo/db/auth/resource_pattern.h" -#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/write_concern_options_gen.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/idl/idl_test.h" +#include "mongo/idl/idl_test_types.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/idl/unittest_gen.h" +#include "mongo/idl/unittest_import_gen.h" +#include "mongo/platform/decimal128.h" #include "mongo/rpc/op_msg.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/serialization_context.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" using namespace mongo::idl::test; using namespace mongo::idl::import; @@ -2470,6 +2518,12 @@ TEST(IDLEnum, ExtraDataEnum) { ASSERT_BSONOBJ_EQ(s2Data, s2Expected); } +TEST(IDLEnum, NonContiguousIntEnum) { + ASSERT_EQ(static_cast(NonContiguousIntEnum::one), 1); + ASSERT_EQ(static_cast(NonContiguousIntEnum::five), 5); + ASSERT_EQ(static_cast(NonContiguousIntEnum::ten), 10); +} + OpMsgRequest makeOMR(BSONObj obj) { OpMsgRequest request; request.body = obj; @@ -2545,24 +2599,32 @@ TEST(IDLCommand, TestConcatentateWithDb_WithTenant) { .append(BasicConcatenateWithDbCommand::kCommandName, "coll1") .append("field1", 3) .append("field2", "five") + .append("expectPrefix", true) .append("$db", prefixedDb) .obj(); + auto targetDoc = BSONObjBuilder{} + .append(BasicConcatenateWithDbCommand::kCommandName, "coll1") + .append("field1", 3) + .append("field2", "five") + .append("$db", prefixedDb) + .obj(); + auto testStruct = BasicConcatenateWithDbCommand::parse(ctxt, makeOMRWithTenant(testDoc, tenantId)); - ASSERT_EQUALS(testStruct.getDbName(), DatabaseName(tenantId, "db")); + ASSERT_EQUALS(testStruct.getDbName(), DatabaseName::createDatabaseName_forTest(tenantId, "db")); ASSERT_EQUALS(testStruct.getNamespace(), NamespaceString::createNamespaceString_forTest(tenantId, "db.coll1")); assert_same_types(); // Positive: Test we can roundtrip from the just parsed document - ASSERT_BSONOBJ_EQ(testDoc, serializeCmd(testStruct)); + ASSERT_BSONOBJ_EQ(targetDoc, serializeCmd(testStruct)); } TEST(IDLCommand, TestConcatentateWithDb_TestConstructor) { const auto tenantId = TenantId(OID::gen()); - const DatabaseName dbName(tenantId, "db"); + const DatabaseName dbName = DatabaseName::createDatabaseName_forTest(tenantId, "db"); const NamespaceString nss = NamespaceString::createNamespaceString_forTest(dbName, "coll1"); BasicConcatenateWithDbCommand testRequest(nss); @@ -2648,7 +2710,7 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestNSS) { auto testStruct = BasicConcatenateWithDbOrUUIDCommand::parse(ctxt, makeOMR(testDoc)); ASSERT_EQUALS(testStruct.getField1(), 3); ASSERT_EQUALS(testStruct.getField2(), "five"); - ASSERT_EQUALS(testStruct.getNamespaceOrUUID().nss().value(), + ASSERT_EQUALS(testStruct.getNamespaceOrUUID().nss(), NamespaceString::createNamespaceString_forTest("db.coll1")); assert_same_types(); @@ -2695,19 +2757,27 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestNSS_WithTenant) { .append(BasicConcatenateWithDbOrUUIDCommand::kCommandName, "coll1") .append("field1", 3) .append("field2", "five") + .append("expectPrefix", true) .append("$db", prefixedDb) .obj(); + auto targetDoc = BSONObjBuilder{} + .append(BasicConcatenateWithDbOrUUIDCommand::kCommandName, "coll1") + .append("field1", 3) + .append("field2", "five") + .append("$db", prefixedDb) + .obj(); + auto testStruct = BasicConcatenateWithDbOrUUIDCommand::parse(ctxt, makeOMRWithTenant(testDoc, tenantId)); - ASSERT_EQUALS(testStruct.getDbName(), DatabaseName(tenantId, "db")); - ASSERT_EQUALS(testStruct.getNamespaceOrUUID().nss().value(), + ASSERT_EQUALS(testStruct.getDbName(), DatabaseName::createDatabaseName_forTest(tenantId, "db")); + ASSERT_EQUALS(testStruct.getNamespaceOrUUID().nss(), NamespaceString::createNamespaceString_forTest(tenantId, "db.coll1")); assert_same_types(); // Positive: Test we can roundtrip from the just parsed document - ASSERT_BSONOBJ_EQ(testDoc, serializeCmd(testStruct)); + ASSERT_BSONOBJ_EQ(targetDoc, serializeCmd(testStruct)); } // Positive: demonstrate a command with concatenate with db or uuid - test UUID @@ -2725,7 +2795,7 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestUUID) { auto testStruct = BasicConcatenateWithDbOrUUIDCommand::parse(ctxt, makeOMR(testDoc)); ASSERT_EQUALS(testStruct.getField1(), 3); ASSERT_EQUALS(testStruct.getField2(), "five"); - ASSERT_EQUALS(testStruct.getNamespaceOrUUID().uuid().value(), uuid); + ASSERT_EQUALS(testStruct.getNamespaceOrUUID().uuid(), uuid); assert_same_types(); @@ -2766,6 +2836,15 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestUUID_WithTenant) { const auto prefixedDb = std::string(str::stream() << tenantId.toString() << "_db"); auto testDoc = + BSONObjBuilder{} + .appendElements(BSON(BasicConcatenateWithDbOrUUIDCommand::kCommandName << uuid)) + .append("field1", 3) + .append("field2", "five") + .append("expectPrefix", true) + .append("$db", prefixedDb) + .obj(); + + auto targetDoc = BSONObjBuilder{} .appendElements(BSON(BasicConcatenateWithDbOrUUIDCommand::kCommandName << uuid)) .append("field1", 3) @@ -2775,19 +2854,20 @@ TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestUUID_WithTenant) { auto testStruct = BasicConcatenateWithDbOrUUIDCommand::parse(ctxt, makeOMRWithTenant(testDoc, tenantId)); - ASSERT_EQUALS(testStruct.getDbName(), DatabaseName(tenantId, "db")); - ASSERT_EQUALS(testStruct.getNamespaceOrUUID().dbName().value(), DatabaseName(tenantId, "db")); + ASSERT_EQUALS(testStruct.getDbName(), DatabaseName::createDatabaseName_forTest(tenantId, "db")); + ASSERT_EQUALS(testStruct.getNamespaceOrUUID().dbName(), + DatabaseName::createDatabaseName_forTest(tenantId, "db")); assert_same_types(); // Positive: Test we can roundtrip from the just parsed document - ASSERT_BSONOBJ_EQ(testDoc, serializeCmd(testStruct)); + ASSERT_BSONOBJ_EQ(targetDoc, serializeCmd(testStruct)); } TEST(IDLCommand, TestConcatentateWithDbOrUUID_TestConstructor) { const UUID uuid = UUID::gen(); const auto tenantId = TenantId(OID::gen()); - const DatabaseName dbName(tenantId, "db"); + const DatabaseName dbName = DatabaseName::createDatabaseName_forTest(tenantId, "db"); const NamespaceStringOrUUID withUUID(dbName, uuid); BasicConcatenateWithDbOrUUIDCommand testRequest1(withUUID); @@ -2877,7 +2957,7 @@ TEST(IDLCommand, TestIgnore) { BasicIgnoredCommand one_new; one_new.setField1(3); one_new.setField2("five"); - one_new.setDbName(DatabaseName(boost::none, "admin")); + one_new.setDbName(DatabaseName::kAdmin); ASSERT_BSONOBJ_EQ(testDocWithDB, serializeCmd(one_new)); } } @@ -2941,7 +3021,7 @@ void TestLoopbackCommandTypeVariant(TestT test_value) { // Test the constructor. CommandT constructed(test_value); - constructed.setDbName(DatabaseName(boost::none, "db")); + constructed.setDbName(DatabaseName::createDatabaseName_forTest(boost::none, "db")); if constexpr (std::is_same_v) { ASSERT_BSONOBJ_EQ(stdx::get(parsed.getValue()), test_value); } else { @@ -3798,7 +3878,7 @@ TEST(IDLTypeCommand, TestString) { BSONObjBuilder builder; CommandTypeStringCommand one_new("foo"); one_new.setField1(3); - one_new.setDbName(DatabaseName(boost::none, "db")); + one_new.setDbName(DatabaseName::createDatabaseName_forTest(boost::none, "db")); one_new.serialize(BSONObj(), &builder); auto serializedDoc = builder.obj(); @@ -3809,7 +3889,7 @@ TEST(IDLTypeCommand, TestString) { { CommandTypeStringCommand one_new("foo"); one_new.setField1(3); - one_new.setDbName(DatabaseName(boost::none, "db")); + one_new.setDbName(DatabaseName::createDatabaseName_forTest(boost::none, "db")); OpMsgRequest reply = one_new.serialize(BSONObj()); ASSERT_BSONOBJ_EQ(testDoc, serializeCmd(one_new)); } @@ -3839,7 +3919,7 @@ TEST(IDLTypeCommand, TestArrayObject) { vec.emplace_back(BSON("sample" << "doc")); CommandTypeArrayObjectCommand one_new(vec); - one_new.setDbName(DatabaseName(boost::none, "db")); + one_new.setDbName(DatabaseName::createDatabaseName_forTest(boost::none, "db")); ASSERT_BSONOBJ_EQ(testDoc, serializeCmd(one_new)); } } @@ -3875,7 +3955,7 @@ TEST(IDLTypeCommand, TestStruct) { One_string os; os.setValue("sample"); CommandTypeStructCommand one_new(os); - one_new.setDbName(DatabaseName(boost::none, "db")); + one_new.setDbName(DatabaseName::createDatabaseName_forTest(boost::none, "db")); ASSERT_BSONOBJ_EQ(testDoc, serializeCmd(one_new)); } } @@ -3905,7 +3985,7 @@ TEST(IDLTypeCommand, TestStructArray) { os.setValue("sample"); vec.push_back(os); CommandTypeArrayStructCommand one_new(vec); - one_new.setDbName(DatabaseName(boost::none, "db")); + one_new.setDbName(DatabaseName::createDatabaseName_forTest(boost::none, "db")); ASSERT_BSONOBJ_EQ(testDoc, serializeCmd(one_new)); } } @@ -3935,7 +4015,7 @@ TEST(IDLTypeCommand, TestUnderscoreCommand) { BSONObjBuilder builder; WellNamedCommand one_new("foo"); one_new.setField1(3); - one_new.setDbName(DatabaseName(boost::none, "db")); + one_new.setDbName(DatabaseName::createDatabaseName_forTest(boost::none, "db")); one_new.serialize(BSONObj(), &builder); auto serializedDoc = builder.obj(); @@ -3946,7 +4026,7 @@ TEST(IDLTypeCommand, TestUnderscoreCommand) { { WellNamedCommand one_new("foo"); one_new.setField1(3); - one_new.setDbName(DatabaseName(boost::none, "db")); + one_new.setDbName(DatabaseName::createDatabaseName_forTest(boost::none, "db")); ASSERT_BSONOBJ_EQ(testDoc, serializeCmd(one_new)); } } @@ -4048,7 +4128,7 @@ TEST(IDLCommand, BasicNamespaceConstGetterCommand_TestNonConstGetterGeneration) auto testStruct = BasicNamespaceConstGetterCommand::parse(ctxt, makeOMR(testDoc)); ASSERT_EQUALS(testStruct.getField1(), 3); - ASSERT_EQUALS(testStruct.getNamespaceOrUUID().uuid().value(), uuid); + ASSERT_EQUALS(testStruct.getNamespaceOrUUID().uuid(), uuid); // Verify that both const and non-const getters are generated. assert_same_types< @@ -4060,22 +4140,6 @@ TEST(IDLCommand, BasicNamespaceConstGetterCommand_TestNonConstGetterGeneration) // Test we can roundtrip from the just parsed document. ASSERT_BSONOBJ_EQ(testDoc, serializeCmd(testStruct)); - - // Test mutable getter modifies the command object. - { - auto& nssOrUuid = testStruct.getNamespaceOrUUID(); - const auto nss = NamespaceString::createNamespaceString_forTest("test.coll"); - nssOrUuid.setNss(nss); - nssOrUuid.preferNssForSerialization(); - - BSONObjBuilder builder; - testStruct.serialize(BSONObj(), &builder); - - // Verify that nss was used for serialization over uuid. - ASSERT_BSONOBJ_EQ(builder.obj(), - BSON(BasicNamespaceConstGetterCommand::kCommandName << "coll" - << "field1" << 3)); - } } TEST(IDLTypeCommand, TestCommandWithIDLAnyTypeOwnedField) { @@ -4176,7 +4240,8 @@ TEST(IDLCommand, const auto tenantId = TenantId(OID::gen()); auto testStruct = CommandTypeNamespaceCommand::parse(ctxt, makeOMRWithTenant(testDoc, tenantId)); - ASSERT_EQUALS(testStruct.getDbName(), DatabaseName(tenantId, "admin")); + ASSERT_EQUALS(testStruct.getDbName(), + DatabaseName::createDatabaseName_forTest(tenantId, "admin")); ASSERT_EQUALS(testStruct.getCommandParameter(), NamespaceString::createNamespaceString_forTest(tenantId, "db.coll1")); assert_same_types(); @@ -4200,7 +4265,8 @@ TEST(IDLCommand, TestCommandTypeNamespaceCommand_WithMultitenancySupportOn) { auto testStruct = CommandTypeNamespaceCommand::parse(ctxt, makeOMR(testDoc)); - ASSERT_EQUALS(testStruct.getDbName(), DatabaseName(tenantId, "admin")); + ASSERT_EQUALS(testStruct.getDbName(), + DatabaseName::createDatabaseName_forTest(tenantId, "admin")); // Deserialize called from parse correctly sets the tenantId field. ASSERT_EQUALS(testStruct.getCommandParameter(), NamespaceString::createNamespaceString_forTest(tenantId, "db.coll1")); @@ -4324,10 +4390,10 @@ TEST(IDLTypeCommand, TestCommandWithBypassAndNamespaceMember_Parse) { featureFlag); IDLParserContext ctxt("root"); - const char* ns1 = "db.coll1"; - const char* ns2 = "a.b"; - const char* ns3 = "c.d"; - auto nsInfoStructBSON = [&](const char* ns) { + const std::string ns1 = "db.coll1"; + const std::string ns2 = "a.b"; + const std::string ns3 = "c.d"; + auto nsInfoStructBSON = [&](StringData ns) { BSONObjBuilder builder; builder.append("ns", ns); return builder.obj(); @@ -4356,33 +4422,33 @@ TEST(IDLTypeCommand, TestCommandWithBypassAndNamespaceMember_Parse) { auto testStruct = CommandWithBypassAndNamespaceStruct::parse(ctxt, request); auto serializationContextCommand = testStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextCommand._source, + ASSERT_EQUALS(serializationContextCommand.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextCommand._callerType, + ASSERT_EQUALS(serializationContextCommand.getCallerType(), SerializationContext::CallerType::Request); auto bypassStruct = testStruct.getField1(); auto serializationContextBypass = bypassStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextBypass._source, + ASSERT_EQUALS(serializationContextBypass.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextBypass._callerType, + ASSERT_EQUALS(serializationContextBypass.getCallerType(), SerializationContext::CallerType::Request); auto nsInfoStruct = bypassStruct.getField1(); auto serializationContextNsInfo = nsInfoStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfo._source, + ASSERT_EQUALS(serializationContextNsInfo.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextNsInfo._callerType, + ASSERT_EQUALS(serializationContextNsInfo.getCallerType(), SerializationContext::CallerType::Request); auto nsInfoArray = bypassStruct.getField2(); for (const auto& nsInfo : nsInfoArray) { auto serializationContextNsInfoArr = nsInfo.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfoArr._source, + ASSERT_EQUALS(serializationContextNsInfoArr.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextNsInfoArr._callerType, + ASSERT_EQUALS(serializationContextNsInfoArr.getCallerType(), SerializationContext::CallerType::Request); } } @@ -4400,10 +4466,10 @@ TEST(IDLTypeCommand, TestStructWithBypassAndNamespaceMember_Parse) { multitenancySupport ? boost::make_optional(TenantId(OID::gen())) : boost::none; IDLParserContext ctxt("root", false, tenantId); - const char* ns1 = "db.coll1"; - const char* ns2 = "a.b"; - const char* ns3 = "c.d"; - auto nsInfoStructBSON = [&](const char* ns) { + const std::string ns1 = "db.coll1"; + const std::string ns2 = "a.b"; + const std::string ns3 = "c.d"; + auto nsInfoStructBSON = [&](StringData ns) { BSONObjBuilder builder; builder.append("ns", ns); return builder.obj(); @@ -4421,24 +4487,24 @@ TEST(IDLTypeCommand, TestStructWithBypassAndNamespaceMember_Parse) { auto testStruct = BypassStruct::parse(ctxt, testDoc); auto serializationContextBypass = testStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextBypass._source, + ASSERT_EQUALS(serializationContextBypass.getSource(), SerializationContext::Source::Default); - ASSERT_EQUALS(serializationContextBypass._callerType, + ASSERT_EQUALS(serializationContextBypass.getCallerType(), SerializationContext::CallerType::None); auto nsInfoStruct = testStruct.getField1(); auto serializationContextNsInfo = nsInfoStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfo._source, + ASSERT_EQUALS(serializationContextNsInfo.getSource(), SerializationContext::Source::Default); - ASSERT_EQUALS(serializationContextNsInfo._callerType, + ASSERT_EQUALS(serializationContextNsInfo.getCallerType(), SerializationContext::CallerType::None); auto nsInfoArray = testStruct.getField2(); for (const auto& nsInfo : nsInfoArray) { auto serializationContextNsInfoArr = nsInfo.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfoArr._source, + ASSERT_EQUALS(serializationContextNsInfoArr.getSource(), SerializationContext::Source::Default); - ASSERT_EQUALS(serializationContextNsInfoArr._callerType, + ASSERT_EQUALS(serializationContextNsInfoArr.getCallerType(), SerializationContext::CallerType::None); } } @@ -4456,10 +4522,10 @@ TEST(IDLTypeCommand, TestStructWithBypassReplyAndNamespaceMember_Parse) { multitenancySupport ? boost::make_optional(TenantId(OID::gen())) : boost::none; IDLParserContext ctxt("root", false, tenantId); - const char* ns1 = "db.coll1"; - const char* ns2 = "a.b"; - const char* ns3 = "c.d"; - auto nsInfoStructBSON = [&](const char* ns) { + const std::string ns1 = "db.coll1"; + const std::string ns2 = "a.b"; + const std::string ns3 = "c.d"; + auto nsInfoStructBSON = [&](StringData ns) { BSONObjBuilder builder; builder.append("ns", ns); return builder.obj(); @@ -4477,24 +4543,24 @@ TEST(IDLTypeCommand, TestStructWithBypassReplyAndNamespaceMember_Parse) { auto testStruct = BypassReplyStruct::parse(ctxt, testDoc); auto serializationContextBypass = testStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextBypass._source, + ASSERT_EQUALS(serializationContextBypass.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextBypass._callerType, + ASSERT_EQUALS(serializationContextBypass.getCallerType(), SerializationContext::CallerType::Reply); auto nsInfoStruct = testStruct.getField1(); auto serializationContextNsInfo = nsInfoStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfo._source, + ASSERT_EQUALS(serializationContextNsInfo.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextNsInfo._callerType, + ASSERT_EQUALS(serializationContextNsInfo.getCallerType(), SerializationContext::CallerType::Reply); auto nsInfoArray = testStruct.getField2(); for (const auto& nsInfo : nsInfoArray) { auto serializationContextNsInfoArr = nsInfo.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfoArr._source, + ASSERT_EQUALS(serializationContextNsInfoArr.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextNsInfoArr._callerType, + ASSERT_EQUALS(serializationContextNsInfoArr.getCallerType(), SerializationContext::CallerType::Reply); } } @@ -4512,23 +4578,23 @@ TEST(IDLTypeCommand, TestCommandWithBypassAndNamespaceMember_EmptyConstruct) { auto testStruct = CommandWithBypassAndNamespaceStruct(); auto serializationContextCommand = testStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextCommand._source, + ASSERT_EQUALS(serializationContextCommand.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextCommand._callerType, + ASSERT_EQUALS(serializationContextCommand.getCallerType(), SerializationContext::CallerType::Request); auto bypassStruct = testStruct.getField1(); auto serializationContextBypass = bypassStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextBypass._source, + ASSERT_EQUALS(serializationContextBypass.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextBypass._callerType, + ASSERT_EQUALS(serializationContextBypass.getCallerType(), SerializationContext::CallerType::Request); auto nsInfoStruct = bypassStruct.getField1(); auto serializationContextNsInfo = nsInfoStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfo._source, + ASSERT_EQUALS(serializationContextNsInfo.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextNsInfo._callerType, + ASSERT_EQUALS(serializationContextNsInfo.getCallerType(), SerializationContext::CallerType::Request); // the vector container is empty, which means that the SerializationContext obj's will @@ -4551,16 +4617,16 @@ TEST(IDLTypeCommand, TestStructWithBypassAndNamespaceMember_EmptyConstruct) { auto testStruct = BypassStruct(); auto serializationContextBypass = testStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextBypass._source, + ASSERT_EQUALS(serializationContextBypass.getSource(), SerializationContext::Source::Default); - ASSERT_EQUALS(serializationContextBypass._callerType, + ASSERT_EQUALS(serializationContextBypass.getCallerType(), SerializationContext::CallerType::None); auto nsInfoStruct = testStruct.getField1(); auto serializationContextNsInfo = nsInfoStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfo._source, + ASSERT_EQUALS(serializationContextNsInfo.getSource(), SerializationContext::Source::Default); - ASSERT_EQUALS(serializationContextNsInfo._callerType, + ASSERT_EQUALS(serializationContextNsInfo.getCallerType(), SerializationContext::CallerType::None); // the vector container is empty, which means that the SerializationContext obj's will @@ -4583,16 +4649,16 @@ TEST(IDLTypeCommand, TestStructWithBypassReplyAndNamespaceMember_EmptyConstruct) auto testStruct = BypassReplyStruct(); auto serializationContextBypass = testStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextBypass._source, + ASSERT_EQUALS(serializationContextBypass.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextBypass._callerType, + ASSERT_EQUALS(serializationContextBypass.getCallerType(), SerializationContext::CallerType::Reply); auto nsInfoStruct = testStruct.getField1(); auto serializationContextNsInfo = nsInfoStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfo._source, + ASSERT_EQUALS(serializationContextNsInfo.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextNsInfo._callerType, + ASSERT_EQUALS(serializationContextNsInfo.getCallerType(), SerializationContext::CallerType::Reply); // the vector container is empty, which means that the SerializationContext obj's will @@ -4614,7 +4680,7 @@ TEST(IDLTypeCommand, TestCommandWithBypassAndNamespaceMember_ConstructWithArgsNo boost::optional tenantId = multitenancySupport ? boost::make_optional(TenantId(OID::gen())) : boost::none; - const char* ns1 = "db.coll1"; + const std::string ns1 = "db.coll1"; NamespaceInfoStruct nsArg( NamespaceString::createNamespaceString_forTest(tenantId, ns1)); @@ -4626,34 +4692,34 @@ TEST(IDLTypeCommand, TestCommandWithBypassAndNamespaceMember_ConstructWithArgsNo auto testStruct = CommandWithBypassAndNamespaceStruct(bypassArg); auto serializationContextCommand = testStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextCommand._source, + ASSERT_EQUALS(serializationContextCommand.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextCommand._callerType, + ASSERT_EQUALS(serializationContextCommand.getCallerType(), SerializationContext::CallerType::Request); // bypassArg was NOT passed in any SerializationContext flags so its flags are the // default auto bypassStruct = testStruct.getField1(); auto serializationContextBypass = bypassStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextBypass._source, + ASSERT_EQUALS(serializationContextBypass.getSource(), SerializationContext::Source::Default); - ASSERT_EQUALS(serializationContextBypass._callerType, + ASSERT_EQUALS(serializationContextBypass.getCallerType(), SerializationContext::CallerType::None); // nsArg was NOT passed in any SerializationContext flags so its flags are the default auto nsInfoStruct = bypassStruct.getField1(); auto serializationContextNsInfo = nsInfoStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfo._source, + ASSERT_EQUALS(serializationContextNsInfo.getSource(), SerializationContext::Source::Default); - ASSERT_EQUALS(serializationContextNsInfo._callerType, + ASSERT_EQUALS(serializationContextNsInfo.getCallerType(), SerializationContext::CallerType::None); auto nsInfoArray = bypassStruct.getField2(); for (const auto& nsInfo : nsInfoArray) { auto serializationContextNsInfoArr = nsInfo.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfoArr._source, + ASSERT_EQUALS(serializationContextNsInfoArr.getSource(), SerializationContext::Source::Default); - ASSERT_EQUALS(serializationContextNsInfoArr._callerType, + ASSERT_EQUALS(serializationContextNsInfoArr.getCallerType(), SerializationContext::CallerType::None); } } @@ -4670,7 +4736,7 @@ TEST(IDLTypeCommand, TestCommandWithBypassAndNamespaceMember_ConstructWithArgs) boost::optional tenantId = multitenancySupport ? boost::make_optional(TenantId(OID::gen())) : boost::none; - const char* ns1 = "db.coll1"; + const std::string ns1 = "db.coll1"; NamespaceInfoStruct nsArg(NamespaceString::createNamespaceString_forTest(tenantId, ns1), SerializationContext::stateCommandRequest()); @@ -4680,35 +4746,35 @@ TEST(IDLTypeCommand, TestCommandWithBypassAndNamespaceMember_ConstructWithArgs) auto testStruct = CommandWithBypassAndNamespaceStruct(bypassArg); auto serializationContextCommand = testStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextCommand._source, + ASSERT_EQUALS(serializationContextCommand.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextCommand._callerType, + ASSERT_EQUALS(serializationContextCommand.getCallerType(), SerializationContext::CallerType::Request); // bypassArg was NOT passed in any SerializationContext flags so its flags are the // default auto bypassStruct = testStruct.getField1(); auto serializationContextBypass = bypassStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextBypass._source, + ASSERT_EQUALS(serializationContextBypass.getSource(), SerializationContext::Source::Default); - ASSERT_EQUALS(serializationContextBypass._callerType, + ASSERT_EQUALS(serializationContextBypass.getCallerType(), SerializationContext::CallerType::None); // ...but we can still get the correct SerializationContext state if the state is // manually passed into nested structs auto nsInfoStruct = bypassStruct.getField1(); auto serializationContextNsInfo = nsInfoStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfo._source, + ASSERT_EQUALS(serializationContextNsInfo.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextNsInfo._callerType, + ASSERT_EQUALS(serializationContextNsInfo.getCallerType(), SerializationContext::CallerType::Request); auto nsInfoArray = bypassStruct.getField2(); for (const auto& nsInfo : nsInfoArray) { auto serializationContextNsInfoArr = nsInfo.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfoArr._source, + ASSERT_EQUALS(serializationContextNsInfoArr.getSource(), SerializationContext::Source::Command); - ASSERT_EQUALS(serializationContextNsInfoArr._callerType, + ASSERT_EQUALS(serializationContextNsInfoArr.getCallerType(), SerializationContext::CallerType::Request); } } @@ -4721,10 +4787,10 @@ TEST(IDLTypeCommand, TestCommandParseExpectPrefix_MissingExpectPrefix) { IDLParserContext ctxt("root"); - const char* ns1 = "db.coll1"; - const char* ns2 = "a.b"; - const char* ns3 = "c.d"; - auto nsInfoStructBSON = [&](const char* ns) { + const std::string ns1 = "db.coll1"; + const std::string ns2 = "a.b"; + const std::string ns3 = "c.d"; + auto nsInfoStructBSON = [&](StringData ns) { BSONObjBuilder builder; builder.append("ns", ns); return builder.obj(); @@ -4749,20 +4815,20 @@ TEST(IDLTypeCommand, TestCommandParseExpectPrefix_MissingExpectPrefix) { auto testStruct = CommandWithBypassAndNamespaceStruct::parse(ctxt, request); auto serializationContextCommand = testStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextCommand._prefixState, SerializationContext::Prefix::Default); + ASSERT_EQUALS(serializationContextCommand.getPrefix(), SerializationContext::Prefix::Default); auto bypassStruct = testStruct.getField1(); auto serializationContextBypass = bypassStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextBypass._prefixState, SerializationContext::Prefix::Default); + ASSERT_EQUALS(serializationContextBypass.getPrefix(), SerializationContext::Prefix::Default); auto nsInfoStruct = bypassStruct.getField1(); auto serializationContextNsInfo = nsInfoStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfo._prefixState, SerializationContext::Prefix::Default); + ASSERT_EQUALS(serializationContextNsInfo.getPrefix(), SerializationContext::Prefix::Default); auto nsInfoArray = bypassStruct.getField2(); for (const auto& nsInfo : nsInfoArray) { auto serializationContextNsInfoArr = nsInfo.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfoArr._prefixState, + ASSERT_EQUALS(serializationContextNsInfoArr.getPrefix(), SerializationContext::Prefix::Default); } } @@ -4774,10 +4840,15 @@ TEST(IDLTypeCommand, TestCommandParseExpectPrefix) { for (bool expectPrefix : {false, true}) { IDLParserContext ctxt("root"); - const char* ns1 = "db.coll1"; - const char* ns2 = "a.b"; - const char* ns3 = "c.d"; - auto nsInfoStructBSON = [&](const char* ns) { + const auto tenantId = TenantId(OID::gen()); + std::string prefix = ""; + if (expectPrefix) + prefix = str::stream() << tenantId.toString() << "_"; + + const std::string ns1 = prefix + "db.coll1"; + const std::string ns2 = prefix + "a.b"; + const std::string ns3 = prefix + "c.d"; + auto nsInfoStructBSON = [&](StringData ns) { BSONObjBuilder builder; builder.append("ns", ns); return builder.obj(); @@ -4793,36 +4864,37 @@ TEST(IDLTypeCommand, TestCommandParseExpectPrefix) { .append("CommandWithBypassAndNamespaceMember", 1) .append("field1", bypassStructBSON()) .append("expectPrefix", expectPrefix) - .append("$db", "admin") + .append("$db", prefix + "admin") .obj(); + std::cout << "expectPrefix: " << (expectPrefix ? "true" : "false") << std::endl; OpMsgRequest request; - const auto tenantId = TenantId(OID::gen()); request = makeOMRWithTenant(testDoc, tenantId); + std::cout << "request.body: " << request.body << std::endl; auto testStruct = CommandWithBypassAndNamespaceStruct::parse(ctxt, request); auto serializationContextCommand = testStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextCommand._prefixState, + ASSERT_EQUALS(serializationContextCommand.getPrefix(), expectPrefix ? SerializationContext::Prefix::IncludePrefix : SerializationContext::Prefix::ExcludePrefix); auto bypassStruct = testStruct.getField1(); auto serializationContextBypass = bypassStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextBypass._prefixState, + ASSERT_EQUALS(serializationContextBypass.getPrefix(), expectPrefix ? SerializationContext::Prefix::IncludePrefix : SerializationContext::Prefix::ExcludePrefix); auto nsInfoStruct = bypassStruct.getField1(); auto serializationContextNsInfo = nsInfoStruct.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfo._prefixState, + ASSERT_EQUALS(serializationContextNsInfo.getPrefix(), expectPrefix ? SerializationContext::Prefix::IncludePrefix : SerializationContext::Prefix::ExcludePrefix); auto nsInfoArray = bypassStruct.getField2(); for (const auto& nsInfo : nsInfoArray) { auto serializationContextNsInfoArr = nsInfo.getSerializationContext(); - ASSERT_EQUALS(serializationContextNsInfoArr._prefixState, + ASSERT_EQUALS(serializationContextNsInfoArr.getPrefix(), expectPrefix ? SerializationContext::Prefix::IncludePrefix : SerializationContext::Prefix::ExcludePrefix); } @@ -4837,10 +4909,10 @@ TEST(IDLTypeCommand, TestCommandParseDuplicateExpectPrefix) { featureFlag); IDLParserContext ctxt("root"); - const char* ns1 = "db.coll1"; - const char* ns2 = "a.b"; - const char* ns3 = "c.d"; - auto nsInfoStructBSON = [&](const char* ns) { + const std::string ns1 = "db.coll1"; + const std::string ns2 = "a.b"; + const std::string ns3 = "c.d"; + auto nsInfoStructBSON = [&](StringData ns) { BSONObjBuilder builder; builder.append("ns", ns); return builder.obj(); @@ -4889,20 +4961,27 @@ TEST(IDLAccessCheck, TestSimpleAccessCheck) { TEST(IDLAccessCheck, TestSimplePrivilegeAccessCheck) { AuthorizationContract ac; - ac.addPrivilege(Privilege(ResourcePattern::forClusterResource(), ActionType::addShard)); - ac.addPrivilege(Privilege(ResourcePattern::forClusterResource(), ActionType::serverStatus)); + ac.addPrivilege( + Privilege(ResourcePattern::forClusterResource(boost::none), ActionType::addShard)); + ac.addPrivilege( + Privilege(ResourcePattern::forClusterResource(boost::none), ActionType::serverStatus)); verifyContract(ac, AccessCheckSimplePrivilege::kAuthorizationContract); } TEST(IDLAccessCheck, TestComplexAccessCheck) { + const auto kTestDB = DatabaseName::createDatabaseName_forTest(boost::none, "test"_sd); AuthorizationContract ac; - ac.addPrivilege(Privilege(ResourcePattern::forClusterResource(), ActionType::addShard)); - ac.addPrivilege(Privilege(ResourcePattern::forClusterResource(), ActionType::serverStatus)); + ac.addPrivilege( + Privilege(ResourcePattern::forClusterResource(boost::none), ActionType::addShard)); + ac.addPrivilege( + Privilege(ResourcePattern::forClusterResource(boost::none), ActionType::serverStatus)); - ac.addPrivilege(Privilege(ResourcePattern::forDatabaseName("test"), ActionType::trafficRecord)); + ac.addPrivilege( + Privilege(ResourcePattern::forDatabaseName(kTestDB), ActionType::trafficRecord)); - ac.addPrivilege(Privilege(ResourcePattern::forAnyResource(), ActionType::splitVector)); + ac.addPrivilege( + Privilege(ResourcePattern::forAnyResource(boost::none), ActionType::splitVector)); ac.addAccessCheck(AccessCheckEnum::kIsAuthenticated); ac.addAccessCheck(AccessCheckEnum::kIsAuthorizedToParseNamespaceElement); @@ -4978,6 +5057,7 @@ TEST(IDLFieldTests, TenantOverrideField) { auto obj = BasicIgnoredCommand::parse(IDLParserContext{"nil"}, mkdoc(boost::none)); auto tenant = obj.getDollarTenant(); ASSERT(tenant == boost::none); + ASSERT_FALSE(obj.getSerializationContext().receivedNonPrefixedTenantId()); } // Test passing an tenant id (acting on behalf of a specific tenant) @@ -4986,6 +5066,7 @@ TEST(IDLFieldTests, TenantOverrideField) { auto obj = BasicIgnoredCommand::parse(IDLParserContext{"oid"}, mkdoc(id)); auto tenant = obj.getDollarTenant(); ASSERT(tenant == id); + ASSERT_TRUE(obj.getSerializationContext().receivedNonPrefixedTenantId()); } } @@ -5046,5 +5127,43 @@ TEST(IDLOwnershipTests, ParseSharingOwnershipTmpIDLStruct) { // accessing free'd memory which should error on ASAN and debug builds. ASSERT_BSONOBJ_EQ(bson["value"].Obj(), BSON("x" << 42)); } + + +TEST(IDLDangerousIgnoreChecks, ValidateDuplicateChecking) { + IDLParserContext ctxt("root"); + + // Positive: non-strict + { + auto testDoc = BSON("field1" + << "abc" + << "field0" + << "def" + << "extra" << 1); + Struct_with_ignore_extra_duplicates::parse(ctxt, testDoc); + } + + // Positive: duplicate extra + { + auto testDoc = BSON("extra" << 2 << "field1" + << "abc" + << "field0" + << "def" + << "extra" << 1); + Struct_with_ignore_extra_duplicates::parse(ctxt, testDoc); + } + + // Negative: duplicate required field + { + auto testDoc = BSON("field0" + << "ghi" + << "field1" + << "abc" + << "field0" + << "def" + << "extra" << 1); + ASSERT_THROWS(Struct_with_ignore_extra_duplicates::parse(ctxt, testDoc), + AssertionException); + } +} } // namespace } // namespace mongo diff --git a/src/mongo/idl/idl_test.h b/src/mongo/idl/idl_test.h index 9720ed2ab7a22..d73e0fc9a00b7 100644 --- a/src/mongo/idl/idl_test.h +++ b/src/mongo/idl/idl_test.h @@ -27,11 +27,19 @@ * it in the license file. */ +#pragma once + +#include +#include +#include #include #include +// IWYU pragma: no_include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/idl/unittest_import_gen.h" +#include "mongo/util/ctype.h" // IWYU pragma: keep namespace mongo { namespace idl { diff --git a/src/mongo/idl/server_parameter_specialized_test.cpp b/src/mongo/idl/server_parameter_specialized_test.cpp index a8c2cea06c05f..4b16138a6ee15 100644 --- a/src/mongo/idl/server_parameter_specialized_test.cpp +++ b/src/mongo/idl/server_parameter_specialized_test.cpp @@ -27,12 +27,39 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/parse_number.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" #include "mongo/bson/unordered_fields_bsonobj_comparator.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/server_parameter_specialized_test.h" #include "mongo/idl/server_parameter_specialized_test_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/assert_that.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/matcher.h" +#include "mongo/unittest/matcher_core.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" namespace mongo { namespace test { diff --git a/src/mongo/idl/server_parameter_specialized_test.h b/src/mongo/idl/server_parameter_specialized_test.h index eb807fff0829a..418a3c5c4500c 100644 --- a/src/mongo/idl/server_parameter_specialized_test.h +++ b/src/mongo/idl/server_parameter_specialized_test.h @@ -29,11 +29,20 @@ #pragma once -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/logical_time.h" +#include "mongo/platform/basic.h" +#include "mongo/platform/mutex.h" #include "mongo/stdx/mutex.h" namespace mongo { diff --git a/src/mongo/idl/server_parameter_test_util.h b/src/mongo/idl/server_parameter_test_util.h index 0a61033d22c8e..0d45d2a30000f 100644 --- a/src/mongo/idl/server_parameter_test_util.h +++ b/src/mongo/idl/server_parameter_test_util.h @@ -26,6 +26,7 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ +#pragma once #include diff --git a/src/mongo/idl/server_parameter_with_storage_test.cpp b/src/mongo/idl/server_parameter_with_storage_test.cpp index b95f8bb16f3a2..a6711cff621c9 100644 --- a/src/mongo/idl/server_parameter_with_storage_test.cpp +++ b/src/mongo/idl/server_parameter_with_storage_test.cpp @@ -28,9 +28,28 @@ */ #include "mongo/db/server_parameter_with_storage.h" + +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/idl/cluster_server_parameter_gen.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/idl/server_parameter_with_storage_test.h" #include "mongo/idl/server_parameter_with_storage_test_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/idl/server_parameter_with_storage_test_structs_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/time_support.h" namespace mongo { AtomicWord test::gStdIntPreallocated; diff --git a/src/mongo/idl/server_parameter_with_storage_test.h b/src/mongo/idl/server_parameter_with_storage_test.h index 98c04d95c7126..5c6a2b360590d 100644 --- a/src/mongo/idl/server_parameter_with_storage_test.h +++ b/src/mongo/idl/server_parameter_with_storage_test.h @@ -29,10 +29,17 @@ #pragma once -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/server_parameter_with_storage_test_structs_gen.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/basic.h" namespace mongo { namespace test { diff --git a/src/mongo/idl/unittest.idl b/src/mongo/idl/unittest.idl index 7f99e3d58c45b..3cad3129f2cdd 100644 --- a/src/mongo/idl/unittest.idl +++ b/src/mongo/idl/unittest.idl @@ -146,6 +146,15 @@ enums: - { bar: "baz" } baz: '"qu\\nx"' + NonContiguousIntEnum: + description: "An enum with non-sequential int values" + type: int + values: + one: 1 + five: 5 + ten: 10 + + ################################################################################################## # # Unit test structs for a single value to ensure type validation works correctly @@ -693,7 +702,7 @@ structs: strict: false fields: update: string - + one_variant: description: UnitTest for a single variant which accepts int or string strict: false @@ -984,6 +993,20 @@ structs: unix: unixEpoch ecma: millisEpoch +################################################################################################## +# +# Test unsafe_dangerous_disable_extra_field_duplicate_checks ignores extra fields +# +################################################################################################## + + struct_with_ignore_extra_duplicates: + description: A struct expecting a unix epoch and a millis epoch. + unsafe_dangerous_disable_extra_field_duplicate_checks: true + strict: false + fields: + field0: string + field1: string + ################################################################################################## # # Test commands diff --git a/src/mongo/logv2/SConscript b/src/mongo/logv2/SConscript index 34f91574b7f74..d17c02baa5722 100644 --- a/src/mongo/logv2/SConscript +++ b/src/mongo/logv2/SConscript @@ -14,7 +14,6 @@ env.CppUnitTest( ], LIBDEPS=[ '$BUILD_DIR/mongo/db/auth/security_token', - '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/util/clock_source_mock', ], ) diff --git a/src/mongo/logv2/bson_formatter.cpp b/src/mongo/logv2/bson_formatter.cpp index edf69478e20b9..e4bae619ffa00 100644 --- a/src/mongo/logv2/bson_formatter.cpp +++ b/src/mongo/logv2/bson_formatter.cpp @@ -31,20 +31,31 @@ #include #include - +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/tenant_id.h" #include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/attributes.h" #include "mongo/logv2/constants.h" #include "mongo/logv2/log_component.h" #include "mongo/logv2/log_severity.h" #include "mongo/logv2/log_tag.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" -#include - namespace mongo::logv2 { namespace { @@ -57,22 +68,28 @@ struct BSONValueExtractor { } void operator()(StringData name, CustomAttributeValue const& val) { - // Try to format as BSON first if available. Prefer BSONAppend if available as we might only - // want the value and not the whole element. - if (val.BSONAppend) { - val.BSONAppend(_builder, name); - } else if (val.BSONSerialize) { - BSONObjBuilder subObjBuilder = _builder.subobjStart(name); - val.BSONSerialize(subObjBuilder); - subObjBuilder.done(); - } else if (val.toBSONArray) { - _builder.append(name, val.toBSONArray()); - } else if (val.stringSerialize) { - fmt::memory_buffer buffer; - val.stringSerialize(buffer); - _builder.append(name, fmt::to_string(buffer)); - } else { - _builder.append(name, val.toString()); + try { + // Try to format as BSON first if available. Prefer BSONAppend if available as we might + // only want the value and not the whole element. + if (val.BSONAppend) { + val.BSONAppend(_builder, name); + } else if (val.BSONSerialize) { + BSONObjBuilder subObjBuilder = _builder.subobjStart(name); + val.BSONSerialize(subObjBuilder); + subObjBuilder.done(); + } else if (val.toBSONArray) { + _builder.append(name, val.toBSONArray()); + } else if (val.stringSerialize) { + fmt::memory_buffer buffer; + val.stringSerialize(buffer); + _builder.append(name, fmt::to_string(buffer)); + } else { + _builder.append(name, val.toString()); + } + } catch (...) { + Status s = exceptionToStatus(); + _builder.append(name, + std::string("Failed to serialize due to exception") + s.toString()); } } @@ -124,8 +141,9 @@ void BSONFormatter::operator()(boost::log::record_view const& rec, BSONObjBuilde builder.append(constants::kComponentFieldName, extract(attributes::component(), rec).get().getNameForLog()); builder.append(constants::kIdFieldName, extract(attributes::id(), rec).get()); - if (auto ptr = extract(attributes::tenant(), rec).get_ptr()) { - builder.append(constants::kTenantFieldName, ptr->toString()); + const auto& tenant = extract(attributes::tenant(), rec); + if (!tenant.empty()) { + builder.append(constants::kTenantFieldName, tenant.get()); } builder.append(constants::kContextFieldName, extract(attributes::threadName(), rec).get()); diff --git a/src/mongo/logv2/bson_formatter.h b/src/mongo/logv2/bson_formatter.h index e099658c38fef..e9d6914152785 100644 --- a/src/mongo/logv2/bson_formatter.h +++ b/src/mongo/logv2/bson_formatter.h @@ -31,9 +31,12 @@ #include #include +#include +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/logv2/constants.h" +#include "mongo/platform/atomic_word.h" namespace mongo::logv2 { diff --git a/src/mongo/logv2/console.cpp b/src/mongo/logv2/console.cpp index 4691b332d4786..59cfc6e538208 100644 --- a/src/mongo/logv2/console.cpp +++ b/src/mongo/logv2/console.cpp @@ -27,14 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/logv2/console.h" - #include -#include "mongo/base/init.h" -#include "mongo/config.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/logv2/console.h" #ifdef _WIN32 #include diff --git a/src/mongo/logv2/file_rotate_sink.cpp b/src/mongo/logv2/file_rotate_sink.cpp index aa1038b618d11..6ce2bb3fdb0a0 100644 --- a/src/mongo/logv2/file_rotate_sink.cpp +++ b/src/mongo/logv2/file_rotate_sink.cpp @@ -29,21 +29,46 @@ #include "mongo/logv2/file_rotate_sink.h" +#include +#include +#include #include +#include #include #include +#include #include -#include +#include +#include +#include +#include +#include +#include #include -#include +#include // IWYU pragma: keep +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/errc.hpp" +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/json_formatter.h" -#include "mongo/logv2/log_detail.h" -#include "mongo/logv2/shared_access_fstream.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/logv2/log_truncation.h" +#include "mongo/logv2/shared_access_fstream.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_name.h" #include "mongo/util/exit_code.h" #include "mongo/util/quick_exit.h" #include "mongo/util/stacktrace.h" #include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" namespace mongo::logv2 { namespace { @@ -196,7 +221,7 @@ void FileRotateSink::consume(const boost::log::record_view& rec, "Writing to log file failed, aborting application", TypeErasedAttributeStorage(attrs), LogTag::kNone, - nullptr /* tenantID */, + std::string() /* tenantID */, LogTruncation::Disabled); // Commented out log line below to get validation of the log id with the errorcodes // linter LOGV2(4522200, "Writing to log file failed, aborting application"); diff --git a/src/mongo/logv2/file_rotate_sink.h b/src/mongo/logv2/file_rotate_sink.h index f4c492fa41782..439806649f500 100644 --- a/src/mongo/logv2/file_rotate_sink.h +++ b/src/mongo/logv2/file_rotate_sink.h @@ -30,10 +30,14 @@ #pragma once #include +#include #include #include +#include + #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/logv2/log_format.h" namespace mongo::logv2 { diff --git a/src/mongo/logv2/json_formatter.cpp b/src/mongo/logv2/json_formatter.cpp index 4a815d257ad16..4afe63d72e6e3 100644 --- a/src/mongo/logv2/json_formatter.cpp +++ b/src/mongo/logv2/json_formatter.cpp @@ -30,18 +30,34 @@ #include "mongo/logv2/json_formatter.h" #include -#include #include - +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" #include "mongo/logv2/attributes.h" #include "mongo/logv2/constants.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" #include "mongo/util/str_escape.h" -#include -#include - namespace mongo::logv2 { namespace { @@ -50,52 +66,58 @@ struct JSONValueExtractor { : _buffer(buffer), _attributeMaxSize(attributeMaxSize) {} void operator()(const char* name, CustomAttributeValue const& val) { - // Try to format as BSON first if available. Prefer BSONAppend if available as we might only - // want the value and not the whole element. - if (val.BSONAppend) { - BSONObjBuilder builder; - val.BSONAppend(builder, name); - // This is a JSON subobject, no quotes needed - storeUnquoted(name); - BSONElement element = builder.done().getField(name); - BSONObj truncated = element.jsonStringBuffer(JsonStringFormat::ExtendedRelaxedV2_0_0, - false, + try { + // Try to format as BSON first if available. Prefer BSONAppend if available as we might + // only want the value and not the whole element. + if (val.BSONAppend) { + BSONObjBuilder builder; + val.BSONAppend(builder, name); + // This is a JSON subobject, no quotes needed + storeUnquoted(name); + BSONElement element = builder.done().getField(name); + BSONObj truncated = + element.jsonStringBuffer(JsonStringFormat::ExtendedRelaxedV2_0_0, + false, + false, + 0, + _buffer, + bufferSizeToTriggerTruncation()); + addTruncationReport(name, truncated, element.size()); + } else if (val.BSONSerialize) { + // This is a JSON subobject, no quotes needed + BSONObjBuilder builder; + val.BSONSerialize(builder); + BSONObj obj = builder.done(); + storeUnquoted(name); + BSONObj truncated = obj.jsonStringBuffer(JsonStringFormat::ExtendedRelaxedV2_0_0, + 0, false, + _buffer, + bufferSizeToTriggerTruncation()); + addTruncationReport(name, truncated, builder.done().objsize()); + + } else if (val.toBSONArray) { + // This is a JSON subarray, no quotes needed + BSONArray arr = val.toBSONArray(); + storeUnquoted(name); + BSONObj truncated = arr.jsonStringBuffer(JsonStringFormat::ExtendedRelaxedV2_0_0, 0, + true, _buffer, bufferSizeToTriggerTruncation()); - addTruncationReport(name, truncated, element.size()); - } else if (val.BSONSerialize) { - // This is a JSON subobject, no quotes needed - storeUnquoted(name); - BSONObjBuilder builder; - val.BSONSerialize(builder); - BSONObj obj = builder.done(); - BSONObj truncated = obj.jsonStringBuffer(JsonStringFormat::ExtendedRelaxedV2_0_0, - 0, - false, - _buffer, - bufferSizeToTriggerTruncation()); - addTruncationReport(name, truncated, builder.done().objsize()); - - } else if (val.toBSONArray) { - // This is a JSON subarray, no quotes needed - storeUnquoted(name); - BSONArray arr = val.toBSONArray(); - BSONObj truncated = arr.jsonStringBuffer(JsonStringFormat::ExtendedRelaxedV2_0_0, - 0, - true, - _buffer, - bufferSizeToTriggerTruncation()); - addTruncationReport(name, truncated, arr.objsize()); - - } else if (val.stringSerialize) { - fmt::memory_buffer intermediate; - val.stringSerialize(intermediate); - storeQuoted(name, StringData(intermediate.data(), intermediate.size())); - } else { - // This is a string, surround value with quotes - storeQuoted(name, val.toString()); + addTruncationReport(name, truncated, arr.objsize()); + + } else if (val.stringSerialize) { + fmt::memory_buffer intermediate; + val.stringSerialize(intermediate); + storeQuoted(name, StringData(intermediate.data(), intermediate.size())); + } else { + // This is a string, surround value with quotes + storeQuoted(name, val.toString()); + } + } catch (...) { + Status s = exceptionToStatus(); + storeQuoted(name, std::string("Failed to serialize due to exception: ") + s.toString()); } } @@ -228,7 +250,7 @@ void JSONFormatter::format(fmt::memory_buffer& buffer, StringData message, const TypeErasedAttributeStorage& attrs, LogTag tags, - const TenantId* tenant, + const std::string& tenant, LogTruncation truncation) const { namespace c = constants; static constexpr auto kFmt = JsonStringFormat::ExtendedRelaxedV2_0_0; @@ -339,8 +361,8 @@ void JSONFormatter::format(fmt::memory_buffer& buffer, field(top, c::kSeverityFieldName, padNextComma(top, 5, quote(strFn(severityString)))); field(top, c::kComponentFieldName, padNextComma(top, 11, quote(strFn(componentString)))); field(top, c::kIdFieldName, padNextComma(top, 8, intFn(id))); - if (tenant) { - field(top, c::kTenantFieldName, quote(strFn(tenant->toString()))); + if (!tenant.empty()) { + field(top, c::kTenantFieldName, quote(strFn(tenant))); } field(top, c::kContextFieldName, quote(strFn(context))); field(top, c::kMessageFieldName, quote(escFn(message))); @@ -366,6 +388,7 @@ void JSONFormatter::operator()(boost::log::record_view const& rec, fmt::memory_buffer buffer; + const auto& tenant = extract(attributes::tenant(), rec); format(buffer, extract(attributes::severity(), rec).get(), extract(attributes::component(), rec).get(), @@ -375,7 +398,7 @@ void JSONFormatter::operator()(boost::log::record_view const& rec, extract(attributes::message(), rec).get(), extract(attributes::attributes(), rec).get(), extract(attributes::tags(), rec).get(), - extract(attributes::tenant(), rec).get_ptr(), + !tenant.empty() ? tenant.get() : std::string(), extract(attributes::truncation(), rec).get()); // Write final JSON object to output stream diff --git a/src/mongo/logv2/json_formatter.h b/src/mongo/logv2/json_formatter.h index 44d9b7ea66d15..931342387546e 100644 --- a/src/mongo/logv2/json_formatter.h +++ b/src/mongo/logv2/json_formatter.h @@ -31,8 +31,11 @@ #include #include +#include +#include +#include -#include "mongo/db/tenant_id.h" +#include "mongo/base/string_data.h" #include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/constants.h" #include "mongo/logv2/log_component.h" @@ -40,6 +43,7 @@ #include "mongo/logv2/log_severity.h" #include "mongo/logv2/log_tag.h" #include "mongo/logv2/log_truncation.h" +#include "mongo/platform/atomic_word.h" #include "mongo/util/time_support.h" namespace mongo::logv2 { @@ -59,7 +63,7 @@ class JSONFormatter { StringData message, const TypeErasedAttributeStorage& attrs, LogTag tags, - const TenantId* tenant, + const std::string& tenant, LogTruncation truncation) const; void operator()(boost::log::record_view const& rec, boost::log::formatting_ostream& strm) const; diff --git a/src/mongo/logv2/log_component.cpp b/src/mongo/logv2/log_component.cpp index ced2b349a7200..878da995c4610 100644 --- a/src/mongo/logv2/log_component.cpp +++ b/src/mongo/logv2/log_component.cpp @@ -27,12 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/logv2/log_component.h" +#include #include "mongo/base/static_assert.h" -#include "mongo/util/assert_util.h" +#include "mongo/logv2/log_component.h" namespace mongo::logv2 { diff --git a/src/mongo/logv2/log_component.h b/src/mongo/logv2/log_component.h index 3c5b49c0b9d1b..a484ed7bf8f45 100644 --- a/src/mongo/logv2/log_component.h +++ b/src/mongo/logv2/log_component.h @@ -33,7 +33,7 @@ #include #include "mongo/base/string_data.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep namespace mongo::logv2 { @@ -73,7 +73,6 @@ namespace mongo::logv2 { X(kShardingMigration, , "migration" , "MIGRATE" , kSharding) \ X(kResharding, , "reshard" , "RESHARD" , kSharding) \ X(kShardMigrationPerf, , "migrationPerf" , "MIG_PERF", kSharding) \ - X(kMovePrimary, , "movePrimary" , "MVPRIMRY", kSharding) \ X(kStorage, , "storage" , "STORAGE" , kDefault) \ X(kStorageRecovery, , "recovery" , "RECOVERY", kStorage) \ X(kJournal, , "journal" , "JOURNAL" , kStorage) \ diff --git a/src/mongo/logv2/log_component_settings.cpp b/src/mongo/logv2/log_component_settings.cpp index 122a845d0cab8..1932da7ed199a 100644 --- a/src/mongo/logv2/log_component_settings.cpp +++ b/src/mongo/logv2/log_component_settings.cpp @@ -27,11 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/logv2/log_component_settings.h" +#include +#include "mongo/logv2/log_component_settings.h" #include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" namespace mongo::logv2 { diff --git a/src/mongo/logv2/log_detail.cpp b/src/mongo/logv2/log_detail.cpp index 5f476a400127b..a36a698cf197a 100644 --- a/src/mongo/logv2/log_detail.cpp +++ b/src/mongo/logv2/log_detail.cpp @@ -28,24 +28,59 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" + #ifdef _WIN32 #include #endif -#include "mongo/db/tenant_id.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/attributes.h" -#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_detail.h" #include "mongo/logv2/log_domain.h" #include "mongo/logv2/log_domain_internal.h" #include "mongo/logv2/log_options.h" +#include "mongo/logv2/log_severity.h" #include "mongo/logv2/log_source.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/errno_util.h" #include "mongo/util/scopeguard.h" #include "mongo/util/static_immortal.h" #include "mongo/util/testing_proctor.h" +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif + #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl @@ -218,12 +253,12 @@ void _doLogImpl(int32_t id, attrs))); if (auto fn = getTenantID()) { - if (auto tenant = fn()) { + auto tenant = fn(); + if (!tenant.empty()) { record.attribute_values().insert( attributes::tenant(), boost::log::attribute_value( - new boost::log::attributes::attribute_value_impl( - tenant.value()))); + new boost::log::attributes::attribute_value_impl(tenant))); } } diff --git a/src/mongo/logv2/log_detail.h b/src/mongo/logv2/log_detail.h index 7717fbe08fe0e..6677d7ad3d18a 100644 --- a/src/mongo/logv2/log_detail.h +++ b/src/mongo/logv2/log_detail.h @@ -30,10 +30,16 @@ #pragma once #include +#include +#include +#include +#include +#include +#include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/util/builder.h" -#include "mongo/db/tenant_id.h" #include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log_attr.h" #include "mongo/logv2/log_component.h" @@ -51,7 +57,7 @@ bool loggingInProgress(); void signalSafeWriteToStderr(StringData message); namespace detail { -using GetTenantIDFn = std::function()>; +using GetTenantIDFn = std::function; void setGetTenantIDCallback(GetTenantIDFn&& fn); void doLogImpl(int32_t id, diff --git a/src/mongo/logv2/log_domain.h b/src/mongo/logv2/log_domain.h index 914a794a98b77..958d075c0d832 100644 --- a/src/mongo/logv2/log_domain.h +++ b/src/mongo/logv2/log_domain.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/logv2/log_component.h" #include "mongo/logv2/log_severity.h" #include "mongo/logv2/log_tag.h" diff --git a/src/mongo/logv2/log_domain_global.cpp b/src/mongo/logv2/log_domain_global.cpp index c00d8f8cc9d22..8eb379ff2b4ee 100644 --- a/src/mongo/logv2/log_domain_global.cpp +++ b/src/mongo/logv2/log_domain_global.cpp @@ -30,24 +30,49 @@ #include "log_domain_global.h" -#include "mongo/config.h" +#include +#include +#include +#include +#include +#include + +#include +#include +// IWYU pragma: no_include "boost/log/detail/attachable_sstream_buf.hpp" +// IWYU pragma: no_include "boost/log/detail/locking_ptr.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/logv2/attributes.h" #include "mongo/logv2/component_settings_filter.h" #include "mongo/logv2/composite_backend.h" #include "mongo/logv2/console.h" #include "mongo/logv2/file_rotate_sink.h" #include "mongo/logv2/json_formatter.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" #include "mongo/logv2/log_source.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/logv2/plain_formatter.h" +#include "mongo/logv2/ramlog.h" #include "mongo/logv2/ramlog_sink.h" -#include "mongo/logv2/shared_access_fstream.h" #include "mongo/logv2/tagged_severity_filter.h" -#include "mongo/logv2/text_formatter.h" #include "mongo/logv2/uassert_sink.h" - -#include -#include -#include -#include +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -144,7 +169,7 @@ Status LogDomainGlobal::Impl::configure(LogDomainGlobal::ConfigurationOptions co mapping[LogSeverity::Debug(3)] = boost::log::sinks::syslog::debug; mapping[LogSeverity::Debug(2)] = boost::log::sinks::syslog::debug; mapping[LogSeverity::Debug(1)] = boost::log::sinks::syslog::debug; - mapping[LogSeverity::Log()] = boost::log::sinks::syslog::debug; + mapping[LogSeverity::Log()] = boost::log::sinks::syslog::info; mapping[LogSeverity::Info()] = boost::log::sinks::syslog::info; mapping[LogSeverity::Warning()] = boost::log::sinks::syslog::warning; mapping[LogSeverity::Error()] = boost::log::sinks::syslog::critical; diff --git a/src/mongo/logv2/log_domain_global.h b/src/mongo/logv2/log_domain_global.h index ff2918dc90733..5b964a815b6c6 100644 --- a/src/mongo/logv2/log_domain_global.h +++ b/src/mongo/logv2/log_domain_global.h @@ -29,9 +29,20 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/logv2/constants.h" +#include "mongo/logv2/log_component_settings.h" +#include "mongo/logv2/log_domain.h" #include "mongo/logv2/log_domain_internal.h" #include "mongo/logv2/log_format.h" +#include "mongo/logv2/log_source.h" +#include "mongo/platform/atomic_word.h" namespace mongo::logv2 { class LogDomainGlobal : public LogDomain::Internal { diff --git a/src/mongo/logv2/log_manager.cpp b/src/mongo/logv2/log_manager.cpp index 68f97592f46f8..86ee06a5ab8df 100644 --- a/src/mongo/logv2/log_manager.cpp +++ b/src/mongo/logv2/log_manager.cpp @@ -27,18 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include - -#include "mongo/logv2/log_manager.h" - -#include - -#include "mongo/base/init.h" -#include "mongo/logv2/log.h" -#include "mongo/logv2/log_detail.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/logv2/log_component_settings.h" #include "mongo/logv2/log_domain.h" #include "mongo/logv2/log_domain_global.h" +#include "mongo/logv2/log_domain_internal.h" +#include "mongo/logv2/log_manager.h" #include "mongo/logv2/log_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/logv2/log_manager.h b/src/mongo/logv2/log_manager.h index 245a4e4cb7bb6..4967d37f402ec 100644 --- a/src/mongo/logv2/log_manager.h +++ b/src/mongo/logv2/log_manager.h @@ -29,11 +29,11 @@ #pragma once -#include "mongo/logv2/log_format.h" - #include #include +#include "mongo/logv2/log_format.h" + namespace mongo::logv2 { class LogDomain; diff --git a/src/mongo/logv2/log_options.h b/src/mongo/logv2/log_options.h index d0a4316e3da43..6f45892782335 100644 --- a/src/mongo/logv2/log_options.h +++ b/src/mongo/logv2/log_options.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/logv2/constants.h" #include "mongo/logv2/log_component.h" #include "mongo/logv2/log_manager.h" #include "mongo/logv2/log_tag.h" diff --git a/src/mongo/logv2/log_severity.cpp b/src/mongo/logv2/log_severity.cpp index 0d1f30edea7e0..6f9deac447286 100644 --- a/src/mongo/logv2/log_severity.cpp +++ b/src/mongo/logv2/log_severity.cpp @@ -27,12 +27,8 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/logv2/log_severity.h" -#include - namespace mongo::logv2 { namespace { diff --git a/src/mongo/logv2/log_severity_suppressor.h b/src/mongo/logv2/log_severity_suppressor.h index d5e489e315d7f..a97d2903845d2 100644 --- a/src/mongo/logv2/log_severity_suppressor.h +++ b/src/mongo/logv2/log_severity_suppressor.h @@ -35,12 +35,16 @@ #include #include #include +// IWYU pragma: no_include "boost/multi_index/detail/adl_swap.hpp" +#include #include +#include #include "mongo/logv2/log_severity.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_map.h" #include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" namespace mongo::logv2 { diff --git a/src/mongo/logv2/log_severity_suppressor_test.cpp b/src/mongo/logv2/log_severity_suppressor_test.cpp index f8b3d94e11785..392a300d3eb0e 100644 --- a/src/mongo/logv2/log_severity_suppressor_test.cpp +++ b/src/mongo/logv2/log_severity_suppressor_test.cpp @@ -29,7 +29,11 @@ #include "mongo/logv2/log_severity_suppressor.h" -#include "mongo/unittest/unittest.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/clock_source_mock.h" #include "mongo/util/duration.h" diff --git a/src/mongo/logv2/log_tag.cpp b/src/mongo/logv2/log_tag.cpp index 6fdd543a721d7..6d18df60bc238 100644 --- a/src/mongo/logv2/log_tag.cpp +++ b/src/mongo/logv2/log_tag.cpp @@ -29,6 +29,7 @@ #include "mongo/logv2/log_tag.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" namespace mongo::logv2 { diff --git a/src/mongo/logv2/log_tag.h b/src/mongo/logv2/log_tag.h index 9959fff61c889..090557a974a33 100644 --- a/src/mongo/logv2/log_tag.h +++ b/src/mongo/logv2/log_tag.h @@ -29,11 +29,11 @@ #pragma once -#include "mongo/bson/bsonobj.h" - #include #include +#include "mongo/bson/bsonobj.h" + namespace mongo::logv2 { class LogTag { diff --git a/src/mongo/logv2/log_util.cpp b/src/mongo/logv2/log_util.cpp index ba87e3f8a2b12..78162fa3201cb 100644 --- a/src/mongo/logv2/log_util.cpp +++ b/src/mongo/logv2/log_util.cpp @@ -30,13 +30,20 @@ #include "mongo/logv2/log_util.h" +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/atomic_word.h" +#include "mongo/util/str.h" #include "mongo/util/time_support.h" -#include -#include - #define MONGO_LOGV2_DEFAULT_COMPONENT mongo::logv2::LogComponent::kControl diff --git a/src/mongo/logv2/log_util.h b/src/mongo/logv2/log_util.h index ffa24897a1fa3..704195bac9d4e 100644 --- a/src/mongo/logv2/log_util.h +++ b/src/mongo/logv2/log_util.h @@ -29,9 +29,9 @@ #pragma once -#include - #include +#include +#include #include "mongo/base/status.h" #include "mongo/base/string_data.h" diff --git a/src/mongo/logv2/logv2_bm.cpp b/src/mongo/logv2/logv2_bm.cpp index 595ad2a128ef3..dbd189ed29826 100644 --- a/src/mongo/logv2/logv2_bm.cpp +++ b/src/mongo/logv2/logv2_bm.cpp @@ -28,19 +28,38 @@ */ -#include "mongo/logv2/component_settings_filter.h" -#include "mongo/logv2/log.h" -#include "mongo/logv2/log_domain_global.h" -#include "mongo/logv2/text_formatter.h" -#include "mongo/platform/basic.h" - #include #include #include #include #include -#include #include +#include + +#include +#include +#include +// IWYU pragma: no_include "boost/iostreams/detail/error.hpp" +// IWYU pragma: no_include "boost/iostreams/detail/streambuf/indirect_streambuf.hpp" +// IWYU pragma: no_include "boost/iostreams/detail/wrap_unwrap.hpp" +#include +#include +// IWYU pragma: no_include "boost/log/detail/attachable_sstream_buf.hpp" +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/logv2/component_settings_filter.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_domain_global.h" +#include "mongo/logv2/log_manager.h" +#include "mongo/logv2/text_formatter.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/logv2/logv2_component_test.cpp b/src/mongo/logv2/logv2_component_test.cpp index 1d40d1c675348..9ceb96d010f3d 100644 --- a/src/mongo/logv2/logv2_component_test.cpp +++ b/src/mongo/logv2/logv2_component_test.cpp @@ -27,9 +27,14 @@ * it in the license file. */ +#include + +#include "mongo/base/string_data.h" #include "mongo/logv2/log_component.h" #include "mongo/logv2/log_component_settings.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::logv2 { diff --git a/src/mongo/logv2/logv2_options.cpp b/src/mongo/logv2/logv2_options.cpp index 7d5cc14eea744..4aeae6c5428ac 100644 --- a/src/mongo/logv2/logv2_options.cpp +++ b/src/mongo/logv2/logv2_options.cpp @@ -27,14 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log_util.h" #include "mongo/logv2/logv2_options_gen.h" -#include "mongo/util/options_parser/option_section.h" -#include "mongo/util/options_parser/startup_option_init.h" -#include "mongo/util/options_parser/startup_options.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/logv2/logv2_test.cpp b/src/mongo/logv2/logv2_test.cpp index 27872a8b3ec55..84fd6d4bc7e4e 100644 --- a/src/mongo/logv2/logv2_test.cpp +++ b/src/mongo/logv2/logv2_test.cpp @@ -28,51 +28,117 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include -#include +#include +#include +#include +#include +#include +#include // IWYU pragma: keep +#include +#include +#include +#include +#include +#include +#include #include #include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/log/detail/attachable_sstream_buf.hpp" +// IWYU pragma: no_include "boost/log/detail/locking_ptr.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/multi_index/detail/bidir_node_iterator.hpp" +#include +#include +#include +#include +// IWYU pragma: no_include "boost/property_tree/detail/exception_implementation.hpp" +// IWYU pragma: no_include "boost/property_tree/detail/ptree_implementation.hpp" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" #include "mongo/bson/oid.h" -#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/tenant_id.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/bson_formatter.h" #include "mongo/logv2/component_settings_filter.h" #include "mongo/logv2/composite_backend.h" #include "mongo/logv2/constants.h" #include "mongo/logv2/json_formatter.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" #include "mongo/logv2/log_capture_backend.h" #include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_debug.h" +#include "mongo/logv2/log_detail.h" #include "mongo/logv2/log_domain.h" #include "mongo/logv2/log_domain_global.h" #include "mongo/logv2/log_domain_internal.h" #include "mongo/logv2/log_manager.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/log_source.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/logv2/log_truncation.h" #include "mongo/logv2/plain_formatter.h" +#include "mongo/logv2/ramlog.h" #include "mongo/logv2/ramlog_sink.h" #include "mongo/logv2/text_formatter.h" #include "mongo/logv2/uassert_sink.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/decimal128.h" #include "mongo/stdx/thread.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/thread_name.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/duration.h" #include "mongo/util/exit_code.h" #include "mongo/util/str_escape.h" #include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" -#include -#include -#include -#include - #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -462,8 +528,7 @@ class LogV2TypesTest : public LogV2Test { public: using LogV2Test::LogV2Test; LogV2TypesTest() : LogV2Test() { - detail::setGetTenantIDCallback( - [this]() -> boost::optional { return this->tenant; }); + detail::setGetTenantIDCallback([this]() -> std::string { return this->tenant.toString(); }); } ~LogV2TypesTest() { detail::setGetTenantIDCallback(nullptr); @@ -481,7 +546,7 @@ class LogV2TypesTest : public LogV2Test { } auto lastBSONElement() { - ASSERT_EQUALS(BSONObj(bson.back().data()).getField(kTenantFieldName).str(), + ASSERT_EQUALS(BSONObj(bson.back().data()).getField(kTenantFieldName).String(), tenant.toString()); return BSONObj(bson.back().data()).getField(kAttributesFieldName).Obj().getField("name"_sd); } @@ -828,6 +893,46 @@ TEST_F(LogV2TypesTest, Duration) { ms.count()); } +void exceptionThrower() { + uasserted(7733401, "exception in logger"); +} + +template +void testExceptionHandling(T arg, LogV2Test::LineCapture& text, LogV2Test::LineCapture& json) { + LOGV2(7733402, "test1 {a1}", "a1"_attr = arg); + + ASSERT_EQ( + mongo::fromjson(json.back()).getField(kAttributesFieldName).Obj().getField("a1").String(), + "Failed to serialize due to exception: Location7733401: exception in logger"); + + ASSERT_EQ(text.back(), + "test1 Failed to serialize due to exception: Location7733401: exception in logger"); +} + +// Throw an exception in a BSON serialization method +TEST_F(LogV2TypesTest, AttrExceptionBSONSerialize) { + struct TypeWithBSONSerialize { + void serialize(BSONObjBuilder*) const { + exceptionThrower(); + } + }; + + testExceptionHandling(TypeWithBSONSerialize(), text, json); +} + +// Throw an exception in a BSON Array serialization method +TEST_F(LogV2TypesTest, AttrExceptionBSONToARray) { + struct TypeToArray { + BSONArray toBSONArray() const { + exceptionThrower(); + return {}; + } + }; + + testExceptionHandling(TypeToArray(), text, json); +} + + TEST_F(LogV2Test, TextFormat) { auto lines = makeLineCapture(TextFormatter()); diff --git a/src/mongo/logv2/plain_formatter.cpp b/src/mongo/logv2/plain_formatter.cpp index ba8e24ba22df4..fb8dcd7256614 100644 --- a/src/mongo/logv2/plain_formatter.cpp +++ b/src/mongo/logv2/plain_formatter.cpp @@ -29,44 +29,65 @@ #include "mongo/logv2/plain_formatter.h" -#include "mongo/bson/bsonobj.h" -#include "mongo/logv2/attribute_storage.h" -#include "mongo/logv2/attributes.h" -#include "mongo/logv2/constants.h" -#include "mongo/stdx/variant.h" -#include "mongo/util/str_escape.h" - -#include +#include +#include #include -#include #include - -#include +#include #include #include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/logv2/attribute_storage.h" +#include "mongo/logv2/attributes.h" +#include "mongo/logv2/constants.h" +#include "mongo/logv2/log_truncation.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" namespace mongo::logv2 { namespace { struct TextValueExtractor { void operator()(const char* name, CustomAttributeValue const& val) { - if (val.stringSerialize) { - fmt::memory_buffer buffer; - val.stringSerialize(buffer); - _addString(name, fmt::to_string(buffer)); - } else if (val.toString) { - _addString(name, val.toString()); - } else if (val.BSONAppend) { - BSONObjBuilder builder; - val.BSONAppend(builder, name); - BSONElement element = builder.done().getField(name); - _addString(name, element.toString(false)); - } else if (val.BSONSerialize) { - BSONObjBuilder builder; - val.BSONSerialize(builder); - operator()(name, builder.done()); - } else if (val.toBSONArray) { - operator()(name, val.toBSONArray()); + try { + if (val.stringSerialize) { + fmt::memory_buffer buffer; + val.stringSerialize(buffer); + _addString(name, fmt::to_string(buffer)); + } else if (val.toString) { + _addString(name, val.toString()); + } else if (val.BSONAppend) { + BSONObjBuilder builder; + val.BSONAppend(builder, name); + BSONElement element = builder.done().getField(name); + _addString(name, element.toString(false)); + } else if (val.BSONSerialize) { + BSONObjBuilder builder; + val.BSONSerialize(builder); + operator()(name, builder.done()); + } else if (val.toBSONArray) { + operator()(name, val.toBSONArray()); + } + } catch (...) { + Status s = exceptionToStatus(); + _addString(name, std::string("Failed to serialize due to exception: ") + s.toString()); } } diff --git a/src/mongo/logv2/plain_formatter.h b/src/mongo/logv2/plain_formatter.h index bc74dd0a81075..a61a77628057d 100644 --- a/src/mongo/logv2/plain_formatter.h +++ b/src/mongo/logv2/plain_formatter.h @@ -31,9 +31,12 @@ #include #include +#include +#include #include "mongo/logv2/constants.h" #include "mongo/logv2/log_format.h" +#include "mongo/platform/atomic_word.h" namespace mongo::logv2 { diff --git a/src/mongo/logv2/ramlog.cpp b/src/mongo/logv2/ramlog.cpp index 8e9b6e151f259..0db661fe42d03 100644 --- a/src/mongo/logv2/ramlog.cpp +++ b/src/mongo/logv2/ramlog.cpp @@ -27,15 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/logv2/ramlog.h" - #include +#include -#include "mongo/base/init.h" -#include "mongo/base/status.h" -#include "mongo/util/str.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/logv2/ramlog.h" +#include "mongo/util/assert_util.h" namespace mongo::logv2 { diff --git a/src/mongo/logv2/ramlog.h b/src/mongo/logv2/ramlog.h index 5afd8a2e48dd8..578699e73e7e4 100644 --- a/src/mongo/logv2/ramlog.h +++ b/src/mongo/logv2/ramlog.h @@ -29,6 +29,9 @@ #pragma once +#include +#include +#include #include #include diff --git a/src/mongo/logv2/redaction.cpp b/src/mongo/logv2/redaction.cpp index f3ab4e4173005..13a1dd100b82c 100644 --- a/src/mongo/logv2/redaction.cpp +++ b/src/mongo/logv2/redaction.cpp @@ -28,14 +28,15 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/logv2/redaction.h" +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/logv2/log_util.h" -#include "mongo/logv2/logv2_options_gen.h" +#include "mongo/logv2/redaction.h" #include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/logv2/redaction_test.cpp b/src/mongo/logv2/redaction_test.cpp index 5572e4b6c806c..b97417e1c69fa 100644 --- a/src/mongo/logv2/redaction_test.cpp +++ b/src/mongo/logv2/redaction_test.cpp @@ -30,12 +30,22 @@ #include "mongo/logv2/redaction.h" -#include "mongo/base/error_extra_info.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/bsontypes.h" -#include "mongo/db/jsobj.h" #include "mongo/logv2/log_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/logv2/shared_access_fstream.cpp b/src/mongo/logv2/shared_access_fstream.cpp index 2179e27fe6c19..3a6f86174f952 100644 --- a/src/mongo/logv2/shared_access_fstream.cpp +++ b/src/mongo/logv2/shared_access_fstream.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/logv2/shared_access_fstream.h" #if defined(_WIN32) && defined(_MSC_VER) @@ -37,7 +35,7 @@ #include #include -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep namespace mongo { FILE* Win32SharedAccessFileDescriptor::_open(const wchar_t* filename, diff --git a/src/mongo/logv2/shared_access_fstream.h b/src/mongo/logv2/shared_access_fstream.h index 6157705a5bebd..ec227d9538185 100644 --- a/src/mongo/logv2/shared_access_fstream.h +++ b/src/mongo/logv2/shared_access_fstream.h @@ -31,7 +31,7 @@ #if defined(_WIN32) && defined(_MSC_VER) -#include +#include // IWYU pragma: keep #include "mongo/base/string_data.h" diff --git a/src/mongo/logv2/text_formatter.cpp b/src/mongo/logv2/text_formatter.cpp index b0421abf8e92e..a63a6ab0d8c93 100644 --- a/src/mongo/logv2/text_formatter.cpp +++ b/src/mongo/logv2/text_formatter.cpp @@ -29,17 +29,21 @@ #include "mongo/logv2/text_formatter.h" +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/logv2/attributes.h" #include "mongo/logv2/log_component.h" #include "mongo/logv2/log_severity.h" #include "mongo/logv2/log_tag.h" #include "mongo/util/time_support.h" -#include -#include - -#include - namespace mongo::logv2 { void TextFormatter::operator()(boost::log::record_view const& rec, diff --git a/src/mongo/logv2/text_formatter.h b/src/mongo/logv2/text_formatter.h index fbf49be999fb6..c93b66b3c1422 100644 --- a/src/mongo/logv2/text_formatter.h +++ b/src/mongo/logv2/text_formatter.h @@ -29,7 +29,14 @@ #pragma once +#include + +#include +#include + +#include "mongo/logv2/log_format.h" #include "mongo/logv2/plain_formatter.h" +#include "mongo/platform/atomic_word.h" namespace mongo::logv2 { diff --git a/src/mongo/logv2/uassert_sink.h b/src/mongo/logv2/uassert_sink.h index f0fe425570809..c4ad7b8069cbb 100644 --- a/src/mongo/logv2/uassert_sink.h +++ b/src/mongo/logv2/uassert_sink.h @@ -35,6 +35,7 @@ #include #include "mongo/logv2/bson_formatter.h" +#include "mongo/logv2/plain_formatter.h" #include "mongo/util/assert_util.h" namespace mongo::logv2 { diff --git a/src/mongo/platform/atomic_proxy.h b/src/mongo/platform/atomic_proxy.h index 2d408c4b5e35d..01c14f168f6e0 100644 --- a/src/mongo/platform/atomic_proxy.h +++ b/src/mongo/platform/atomic_proxy.h @@ -35,7 +35,7 @@ #include #include "mongo/base/static_assert.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep namespace mongo { diff --git a/src/mongo/platform/atomic_proxy_test.cpp b/src/mongo/platform/atomic_proxy_test.cpp index fce9a962cc385..699044cb3c80b 100644 --- a/src/mongo/platform/atomic_proxy_test.cpp +++ b/src/mongo/platform/atomic_proxy_test.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include "mongo/base/string_data.h" #include "mongo/platform/atomic_proxy.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/platform/atomic_word.h b/src/mongo/platform/atomic_word.h index c277497b15cfe..5a7d38ccca7eb 100644 --- a/src/mongo/platform/atomic_word.h +++ b/src/mongo/platform/atomic_word.h @@ -87,9 +87,7 @@ class Base { } /** - * Gets the current value of this AtomicWord. - * - * Has relaxed semantics. + * Gets the current value of this AtomicWord using relaxed memory order. */ WordType loadRelaxed() const { return _value.load(std::memory_order_relaxed); @@ -102,6 +100,13 @@ class Base { _value.store(newValue); } + /** + * Sets the value of this AtomicWord to "newValue" using relaxed memory order. + */ + void storeRelaxed(WordType newValue) { + _value.store(newValue, std::memory_order_relaxed); + } + /** * Atomically swaps the current value of this with "newValue". * diff --git a/src/mongo/platform/atomic_word_test.cpp b/src/mongo/platform/atomic_word_test.cpp index 6b30c70eca09a..78839a28f6d6c 100644 --- a/src/mongo/platform/atomic_word_test.cpp +++ b/src/mongo/platform/atomic_word_test.cpp @@ -27,13 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include +#include +#include +#include "mongo/base/string_data.h" #include "mongo/platform/atomic_word.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" namespace mongo { namespace { diff --git a/src/mongo/platform/bits_test.cpp b/src/mongo/platform/bits_test.cpp index f62346606823a..8f1b7f24439b1 100644 --- a/src/mongo/platform/bits_test.cpp +++ b/src/mongo/platform/bits_test.cpp @@ -29,7 +29,9 @@ #include "mongo/platform/bits.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/platform/compiler.h b/src/mongo/platform/compiler.h index ba74feb862d1e..c68373f3e75d5 100644 --- a/src/mongo/platform/compiler.h +++ b/src/mongo/platform/compiler.h @@ -189,9 +189,15 @@ #if defined(_MSC_VER) -#include "mongo/platform/compiler_msvc.h" +#include "mongo/platform/compiler_msvc.h" // IWYU pragma: export #elif defined(__GNUC__) -#include "mongo/platform/compiler_gcc.h" +#include "mongo/platform/compiler_gcc.h" // IWYU pragma: export #else #error "Unsupported compiler family" #endif + +// Define clang's has_feature macro for other compilers +// See https://clang.llvm.org/docs/LanguageExtensions.html#has-feature-and-has-extension +#if !defined(__has_feature) +#define __has_feature(x) 0 +#endif diff --git a/src/mongo/platform/decimal128.cpp b/src/mongo/platform/decimal128.cpp index 957bf335ed782..d27d8fea3e8ef 100644 --- a/src/mongo/platform/decimal128.cpp +++ b/src/mongo/platform/decimal128.cpp @@ -29,25 +29,27 @@ #include "mongo/platform/decimal128.h" -#include "mongo/platform/basic.h" #include #include -#include #include -#include #include -#include + +#include + // The Intel C library typedefs wchar_t, but it is a distinct fundamental type // in C++, so we #define _WCHAR_T here to prevent the library from trying to typedef. #define _WCHAR_T #include #include + #undef _WCHAR_T +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" #include "mongo/base/static_assert.h" #include "mongo/base/string_data.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/platform/endian.h" #include "mongo/util/assert_util.h" #include "mongo/util/ctype.h" diff --git a/src/mongo/platform/decimal128.h b/src/mongo/platform/decimal128.h index 6e788f75d8b04..21fed8353418b 100644 --- a/src/mongo/platform/decimal128.h +++ b/src/mongo/platform/decimal128.h @@ -30,16 +30,20 @@ #pragma once #include +#include #include +#include #include #include #include #include #include -#include "mongo/config.h" - #include "mongo/base/data_type.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/platform/decimal128_bson_test.cpp b/src/mongo/platform/decimal128_bson_test.cpp index d9cc6a931818f..d1e5bfd5ab3cf 100644 --- a/src/mongo/platform/decimal128_bson_test.cpp +++ b/src/mongo/platform/decimal128_bson_test.cpp @@ -28,24 +28,25 @@ */ -#include "mongo/platform/basic.h" - #include -#include -#include -#include +#include #include #include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/config.h" -#include "mongo/db/json.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/logv2/log.h" -#include "mongo/platform/decimal128.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/hex.h" +#include "mongo/util/shared_buffer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/platform/decimal128_test.cpp b/src/mongo/platform/decimal128_test.cpp index f933de4a48498..bab692f73cb98 100644 --- a/src/mongo/platform/decimal128_test.cpp +++ b/src/mongo/platform/decimal128_test.cpp @@ -30,14 +30,17 @@ #include "mongo/platform/decimal128.h" #include +#include #include #include +#include #include #include -#include -#include "mongo/config.h" -#include "mongo/unittest/unittest.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/platform/endian_bm.cpp b/src/mongo/platform/endian_bm.cpp index e38ad01e67039..1277fd620e0fb 100644 --- a/src/mongo/platform/endian_bm.cpp +++ b/src/mongo/platform/endian_bm.cpp @@ -28,19 +28,11 @@ */ -#include "mongo/platform/basic.h" - -#include +#include #include -#include #include -#include -#include #include -#include - -#include "mongo/base/string_data.h" #include "mongo/platform/endian.h" namespace mongo { diff --git a/src/mongo/platform/endian_test.cpp b/src/mongo/platform/endian_test.cpp index c26eab902cb7d..4952db53e9456 100644 --- a/src/mongo/platform/endian_test.cpp +++ b/src/mongo/platform/endian_test.cpp @@ -31,8 +31,10 @@ #include -#include "mongo/config.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::endian { namespace { diff --git a/src/mongo/platform/int128_test.cpp b/src/mongo/platform/int128_test.cpp index be36cd3c4ed4d..b22fefccfbaf2 100644 --- a/src/mongo/platform/int128_test.cpp +++ b/src/mongo/platform/int128_test.cpp @@ -28,7 +28,15 @@ */ #include "mongo/platform/int128.h" -#include "mongo/unittest/unittest.h" + +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" using namespace mongo; diff --git a/src/mongo/platform/mutex.cpp b/src/mongo/platform/mutex.cpp index 8766b3dc20b41..7e58bfb9cc3ef 100644 --- a/src/mongo/platform/mutex.cpp +++ b/src/mongo/platform/mutex.cpp @@ -29,7 +29,11 @@ #include "mongo/platform/mutex.h" -#include "mongo/base/init.h" +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/bson/bsonobjbuilder.h" namespace mongo::latch_detail { diff --git a/src/mongo/platform/mutex.h b/src/mongo/platform/mutex.h index 94361dd938e36..203c3e8987fc8 100644 --- a/src/mongo/platform/mutex.h +++ b/src/mongo/platform/mutex.h @@ -29,19 +29,28 @@ #pragma once +#include +#include #include +#include +#include +#include +#include #include +#include +#include #include +#include #include #include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" -#include "mongo/config.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/platform/atomic_word.h" #include "mongo/platform/source_location.h" #include "mongo/stdx/mutex.h" #include "mongo/util/assert_util.h" -#include "mongo/util/concepts.h" #include "mongo/util/decorable.h" #include "mongo/util/duration.h" #include "mongo/util/hierarchical_acquisition.h" @@ -196,8 +205,8 @@ inline auto& getDiagnosticListenerState() noexcept { * DiagnosticListeners subclass, please provide the switch on that subclass to noop its * functions. It is only safe to add a DiagnosticListener during a MONGO_INITIALIZER. */ -TEMPLATE(typename ListenerT) -REQUIRES(std::is_base_of_v) +template +requires std::is_base_of_v void installDiagnosticListener() { auto& state = getDiagnosticListenerState(); diff --git a/src/mongo/platform/mutex_test.cpp b/src/mongo/platform/mutex_test.cpp index 122dd58b29996..feefac1686eee 100644 --- a/src/mongo/platform/mutex_test.cpp +++ b/src/mongo/platform/mutex_test.cpp @@ -27,10 +27,10 @@ * it in the license file. */ -#include "mongo/unittest/unittest.h" - -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/platform/mutex.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { TEST(MutexTest, BasicSingleThread) { diff --git a/src/mongo/platform/overflow_arithmetic_test.cpp b/src/mongo/platform/overflow_arithmetic_test.cpp index 0d4aa13cbab4d..7dc5398ed933b 100644 --- a/src/mongo/platform/overflow_arithmetic_test.cpp +++ b/src/mongo/platform/overflow_arithmetic_test.cpp @@ -27,13 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include +#include "mongo/base/string_data.h" #include "mongo/platform/overflow_arithmetic.h" #include "mongo/stdx/type_traits.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/platform/posix_fadvise.cpp b/src/mongo/platform/posix_fadvise.cpp index a9b605dc185c9..c187cfef760ed 100644 --- a/src/mongo/platform/posix_fadvise.cpp +++ b/src/mongo/platform/posix_fadvise.cpp @@ -33,7 +33,7 @@ #include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/status.h" namespace mongo { diff --git a/src/mongo/platform/process_id.cpp b/src/mongo/platform/process_id.cpp index eff158ca41ed1..a4f705f11f46d 100644 --- a/src/mongo/platform/process_id.cpp +++ b/src/mongo/platform/process_id.cpp @@ -27,29 +27,28 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/platform/process_id.h" + +// IWYU pragma: no_include "syscall.h" + #ifndef _WIN32 -#include +#include // IWYU pragma: keep #endif #if defined(__linux__) -#include -#include +#include // IWYU pragma: keep +#include // IWYU pragma: keep #endif - #ifdef __FreeBSD__ #include #endif -#include #include -#include +#include // IWYU pragma: keep #include "mongo/base/static_assert.h" -#include "mongo/util/assert_util.h" +#include "mongo/util/assert_util.h" // IWYU pragma: keep namespace mongo { diff --git a/src/mongo/platform/process_id_test.cpp b/src/mongo/platform/process_id_test.cpp index de81f389d6202..d129dd9031f25 100644 --- a/src/mongo/platform/process_id_test.cpp +++ b/src/mongo/platform/process_id_test.cpp @@ -27,10 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/string_data.h" #include "mongo/platform/process_id.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/platform/random.cpp b/src/mongo/platform/random.cpp index 1b57d92b4828f..d885a92ee5ebe 100644 --- a/src/mongo/platform/random.cpp +++ b/src/mongo/platform/random.cpp @@ -28,11 +28,10 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/platform/random.h" - +#include #include +#include // IWYU pragma: keep +#include #ifdef _WIN32 #include @@ -42,17 +41,18 @@ #endif #define _CRT_RAND_S -#include -#include -#include -#include -#include -#include -#include +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/random.h" #include "mongo/util/assert_util.h" +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif + #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/platform/random.h b/src/mongo/platform/random.h index def8ddf906780..75b8c7b5b2afe 100644 --- a/src/mongo/platform/random.h +++ b/src/mongo/platform/random.h @@ -35,6 +35,7 @@ #include #include #include +#include namespace mongo { diff --git a/src/mongo/platform/random_test.cpp b/src/mongo/platform/random_test.cpp index ac368a5dd3ee2..5fb34b78c0027 100644 --- a/src/mongo/platform/random_test.cpp +++ b/src/mongo/platform/random_test.cpp @@ -28,13 +28,22 @@ */ +#include #include +#include #include -#include "mongo/platform/random.h" +#include +#include "mongo/base/string_data.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/random.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/platform/shared_library.cpp b/src/mongo/platform/shared_library.cpp index eefd7635311ac..f6508b551f939 100644 --- a/src/mongo/platform/shared_library.cpp +++ b/src/mongo/platform/shared_library.cpp @@ -27,7 +27,7 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/platform/shared_library.h" diff --git a/src/mongo/platform/shared_library.h b/src/mongo/platform/shared_library.h index 9aeeeadcad8e6..ebcec39fd2359 100644 --- a/src/mongo/platform/shared_library.h +++ b/src/mongo/platform/shared_library.h @@ -29,9 +29,11 @@ #pragma once #include +#include #include #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" namespace mongo { diff --git a/src/mongo/platform/shared_library_posix.cpp b/src/mongo/platform/shared_library_posix.cpp index 4d5499e4916e0..a20662a7136c1 100644 --- a/src/mongo/platform/shared_library_posix.cpp +++ b/src/mongo/platform/shared_library_posix.cpp @@ -27,16 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/platform/shared_library.h" - -#include #include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/logv2/log.h" -#include "mongo/util/assert_util.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/shared_library.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kControl diff --git a/src/mongo/platform/source_location.h b/src/mongo/platform/source_location.h index 1909ab1c4364e..798918109279a 100644 --- a/src/mongo/platform/source_location.h +++ b/src/mongo/platform/source_location.h @@ -32,6 +32,7 @@ #include #include #include +#include #if !defined(_MSC_VER) && !defined(__clang__) // not windows or clang #include diff --git a/src/mongo/platform/source_location_test.cpp b/src/mongo/platform/source_location_test.cpp index 53bf3c444b34d..961a8876c0351 100644 --- a/src/mongo/platform/source_location_test.cpp +++ b/src/mongo/platform/source_location_test.cpp @@ -28,10 +28,15 @@ */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/source_location_test.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/platform/source_location_test.h b/src/mongo/platform/source_location_test.h index 4b97c72777eeb..f462125b5cd21 100644 --- a/src/mongo/platform/source_location_test.h +++ b/src/mongo/platform/source_location_test.h @@ -29,10 +29,10 @@ #pragma once -#include "mongo/unittest/unittest.h" - #include "mongo/platform/source_location.h" +#include "mongo/unittest/unittest.h" + namespace mongo { inline bool operator==(const SourceLocationHolder& lhs, const SourceLocationHolder& rhs) { return lhs.line() == rhs.line() // diff --git a/src/mongo/platform/stack_locator.cpp b/src/mongo/platform/stack_locator.cpp index d29428a2def93..da6244d933817 100644 --- a/src/mongo/platform/stack_locator.cpp +++ b/src/mongo/platform/stack_locator.cpp @@ -27,10 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/platform/stack_locator.h" +#include +#include "mongo/platform/stack_locator.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/platform/stack_locator.h b/src/mongo/platform/stack_locator.h index 37ee279aaa613..706034f3c22e2 100644 --- a/src/mongo/platform/stack_locator.h +++ b/src/mongo/platform/stack_locator.h @@ -28,6 +28,7 @@ */ #include +#include #include namespace mongo { diff --git a/src/mongo/platform/stack_locator_test.cpp b/src/mongo/platform/stack_locator_test.cpp index 2ed4aa95bc259..9a162461e2f14 100644 --- a/src/mongo/platform/stack_locator_test.cpp +++ b/src/mongo/platform/stack_locator_test.cpp @@ -27,11 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#ifndef _WIN32 +#include +#endif + +#include "mongo/base/string_data.h" #include "mongo/platform/stack_locator.h" #include "mongo/stdx/thread.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/platform/strcasestr.cpp b/src/mongo/platform/strcasestr.cpp index 0a1cd683b8cb8..8c7b370b5f4aa 100644 --- a/src/mongo/platform/strcasestr.cpp +++ b/src/mongo/platform/strcasestr.cpp @@ -32,7 +32,7 @@ #if defined(__sun) #include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/status.h" #endif diff --git a/src/mongo/platform/strnlen.cpp b/src/mongo/platform/strnlen.cpp index 755d76f335f2f..7e22840bc54e8 100644 --- a/src/mongo/platform/strnlen.cpp +++ b/src/mongo/platform/strnlen.cpp @@ -28,7 +28,7 @@ */ #include "mongo/platform/strnlen.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #ifndef MONGO_CONFIG_HAVE_STRNLEN diff --git a/src/mongo/platform/strnlen.h b/src/mongo/platform/strnlen.h index 21e987f21b67e..95e18110a405c 100644 --- a/src/mongo/platform/strnlen.h +++ b/src/mongo/platform/strnlen.h @@ -29,11 +29,11 @@ #pragma once -#include "mongo/config.h" - #include #include +#include "mongo/config.h" // IWYU pragma: keep + namespace mongo { #ifdef MONGO_CONFIG_HAVE_STRNLEN diff --git a/src/mongo/platform/visibility_test1.cpp b/src/mongo/platform/visibility_test1.cpp index f52253eedd79b..b0ed313748b93 100644 --- a/src/mongo/platform/visibility_test1.cpp +++ b/src/mongo/platform/visibility_test1.cpp @@ -27,13 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/platform/visibility_test_lib1.h" #include "mongo/util/exit_code.h" -#include - int main(int argc, char* argv[]) { mongo::visibility_test_lib1::Base b("hello"); return (b.name() == "hello") ? static_cast(mongo::ExitCode::clean) diff --git a/src/mongo/platform/visibility_test2.cpp b/src/mongo/platform/visibility_test2.cpp index 56621f7b16762..10793368c2b96 100644 --- a/src/mongo/platform/visibility_test2.cpp +++ b/src/mongo/platform/visibility_test2.cpp @@ -27,13 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/platform/visibility_test_lib2.h" #include "mongo/util/exit_code.h" -#include - int main(int argc, char* argv[]) { mongo::visibility_test_lib2::Derived d("hello", argc); return (d.value() == argc) ? static_cast(mongo::ExitCode::clean) diff --git a/src/mongo/platform/visibility_test_lib1.cpp b/src/mongo/platform/visibility_test_lib1.cpp index 073f743c2af77..ac2c666c12147 100644 --- a/src/mongo/platform/visibility_test_lib1.cpp +++ b/src/mongo/platform/visibility_test_lib1.cpp @@ -27,12 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/platform/visibility_test_lib1.h" - #include +#include "mongo/platform/visibility_test_lib1.h" #include "mongo/platform/visibility_test_libcommon.h" namespace mongo { diff --git a/src/mongo/platform/visibility_test_lib1.h b/src/mongo/platform/visibility_test_lib1.h index d35ff10e4e6db..4b01683258c39 100644 --- a/src/mongo/platform/visibility_test_lib1.h +++ b/src/mongo/platform/visibility_test_lib1.h @@ -26,6 +26,7 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ +#pragma once #include #include diff --git a/src/mongo/platform/visibility_test_lib2.cpp b/src/mongo/platform/visibility_test_lib2.cpp index 0fc9d9d9f65f1..801eaf765b886 100644 --- a/src/mongo/platform/visibility_test_lib2.cpp +++ b/src/mongo/platform/visibility_test_lib2.cpp @@ -27,12 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/platform/visibility_test_lib2.h" - #include +#include "mongo/platform/visibility_test_lib2.h" #include "mongo/platform/visibility_test_libcommon.h" namespace mongo { diff --git a/src/mongo/platform/visibility_test_lib2.h b/src/mongo/platform/visibility_test_lib2.h index 94143460cec2a..89caac11824d5 100644 --- a/src/mongo/platform/visibility_test_lib2.h +++ b/src/mongo/platform/visibility_test_lib2.h @@ -28,6 +28,8 @@ */ +#include + #include "mongo/platform/visibility.h" #include "mongo/platform/visibility_test_lib1.h" diff --git a/src/mongo/platform/visibility_test_libcommon.cpp b/src/mongo/platform/visibility_test_libcommon.cpp index 112554a4a9966..3b1b28b09f068 100644 --- a/src/mongo/platform/visibility_test_libcommon.cpp +++ b/src/mongo/platform/visibility_test_libcommon.cpp @@ -27,12 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/platform/visibility_test_libcommon.h" -#include - namespace mongo { namespace visibility_test_libcommon { diff --git a/src/mongo/rpc/SConscript b/src/mongo/rpc/SConscript index 5f1acff99a63c..6727ce333c5ef 100644 --- a/src/mongo/rpc/SConscript +++ b/src/mongo/rpc/SConscript @@ -44,6 +44,7 @@ protoEnv.Library( '$BUILD_DIR/mongo/db/auth/security_token', '$BUILD_DIR/mongo/db/bson/dotted_path_support', '$BUILD_DIR/mongo/db/server_base', + '$BUILD_DIR/mongo/db/serverless/multitenancy_check', '$BUILD_DIR/mongo/util/namespace_string_database_name_util', '$BUILD_DIR/third_party/wiredtiger/wiredtiger_checksum' if wiredtiger else [], ], @@ -190,6 +191,7 @@ if wiredtiger: '$BUILD_DIR/mongo/db/auth/auth', '$BUILD_DIR/mongo/db/auth/authmocks', '$BUILD_DIR/mongo/db/server_base', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/third_party/wiredtiger/wiredtiger_checksum', 'client_metadata', diff --git a/src/mongo/rpc/check_allowed_op_query_cmd.cpp b/src/mongo/rpc/check_allowed_op_query_cmd.cpp index 165240f57c3b0..51f3ef170530f 100644 --- a/src/mongo/rpc/check_allowed_op_query_cmd.cpp +++ b/src/mongo/rpc/check_allowed_op_query_cmd.cpp @@ -28,13 +28,14 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/db/stats/counters.h" #include "mongo/rpc/check_allowed_op_query_cmd.h" - -#include -#include +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/rpc/check_allowed_op_query_cmd.h b/src/mongo/rpc/check_allowed_op_query_cmd.h index d6b45f377d016..b44068b39ea51 100644 --- a/src/mongo/rpc/check_allowed_op_query_cmd.h +++ b/src/mongo/rpc/check_allowed_op_query_cmd.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/base/string_data.h" #include "mongo/db/client.h" namespace mongo { diff --git a/src/mongo/rpc/factory.cpp b/src/mongo/rpc/factory.cpp index 7e001edc0af78..02b6cbc62b2cc 100644 --- a/src/mongo/rpc/factory.cpp +++ b/src/mongo/rpc/factory.cpp @@ -27,17 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/rpc/factory.h" - #include +#include "mongo/base/error_codes.h" +#include "mongo/rpc/factory.h" #include "mongo/rpc/legacy_reply.h" #include "mongo/rpc/legacy_reply_builder.h" #include "mongo/rpc/legacy_request.h" #include "mongo/rpc/message.h" #include "mongo/rpc/op_msg_rpc_impls.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/rpc/reply_interface.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" diff --git a/src/mongo/rpc/factory.h b/src/mongo/rpc/factory.h index 514e7bd73c7b3..abab59fec0df0 100644 --- a/src/mongo/rpc/factory.h +++ b/src/mongo/rpc/factory.h @@ -29,12 +29,13 @@ #pragma once +#include + #include "mongo/db/client.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" #include "mongo/rpc/protocol.h" -#include - /** * Utilities to construct the correct concrete rpc class based on what the remote server * supports, and what the client has been configured to do. diff --git a/src/mongo/rpc/get_status_from_command_result.cpp b/src/mongo/rpc/get_status_from_command_result.cpp index 315d4ef313585..32a9562d0c99c 100644 --- a/src/mongo/rpc/get_status_from_command_result.cpp +++ b/src/mongo/rpc/get_status_from_command_result.cpp @@ -27,14 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/rpc/get_status_from_command_result.h" +#include #include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" +#include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/write_concern_error_detail.h" #include "mongo/util/str.h" diff --git a/src/mongo/rpc/get_status_from_command_result_test.cpp b/src/mongo/rpc/get_status_from_command_result_test.cpp index b44e3a6fe22c9..c0e0ce172916f 100644 --- a/src/mongo/rpc/get_status_from_command_result_test.cpp +++ b/src/mongo/rpc/get_status_from_command_result_test.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/json.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/rpc/legacy_reply.cpp b/src/mongo/rpc/legacy_reply.cpp index affdadbd38c48..f853f35a44d2a 100644 --- a/src/mongo/rpc/legacy_reply.cpp +++ b/src/mongo/rpc/legacy_reply.cpp @@ -27,17 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/rpc/legacy_reply.h" - -#include -#include +#include -#include "mongo/bson/bson_validate.h" -#include "mongo/rpc/legacy_reply_builder.h" -#include "mongo/rpc/metadata.h" -#include "mongo/rpc/object_check.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/dbmessage.h" +#include "mongo/rpc/legacy_reply.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep #include "mongo/util/assert_util.h" #include "mongo/util/str.h" diff --git a/src/mongo/rpc/legacy_reply.h b/src/mongo/rpc/legacy_reply.h index 62cd1e2799530..49fa546fbb158 100644 --- a/src/mongo/rpc/legacy_reply.h +++ b/src/mongo/rpc/legacy_reply.h @@ -29,8 +29,10 @@ #pragma once +#include "mongo/bson/bsonobj.h" #include "mongo/db/dbmessage.h" #include "mongo/db/jsobj.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/protocol.h" #include "mongo/rpc/reply_interface.h" diff --git a/src/mongo/rpc/legacy_reply_builder.cpp b/src/mongo/rpc/legacy_reply_builder.cpp index c9e970707dea8..4b0866db21c28 100644 --- a/src/mongo/rpc/legacy_reply_builder.cpp +++ b/src/mongo/rpc/legacy_reply_builder.cpp @@ -27,19 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/rpc/legacy_reply_builder.h" - -#include -#include +#include #include "mongo/db/dbmessage.h" -#include "mongo/db/jsobj.h" -#include "mongo/rpc/metadata.h" -#include "mongo/s/stale_exception.h" +#include "mongo/rpc/legacy_reply_builder.h" #include "mongo/util/assert_util.h" -#include "mongo/util/str.h" namespace mongo { namespace rpc { diff --git a/src/mongo/rpc/legacy_reply_builder.h b/src/mongo/rpc/legacy_reply_builder.h index 22a9c4566f6a7..0391871bf531d 100644 --- a/src/mongo/rpc/legacy_reply_builder.h +++ b/src/mongo/rpc/legacy_reply_builder.h @@ -29,9 +29,12 @@ #pragma once +#include #include #include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/builder.h" #include "mongo/rpc/message.h" #include "mongo/rpc/protocol.h" diff --git a/src/mongo/rpc/legacy_request.cpp b/src/mongo/rpc/legacy_request.cpp index 79b0060e494bc..80cd9a1c77f49 100644 --- a/src/mongo/rpc/legacy_request.cpp +++ b/src/mongo/rpc/legacy_request.cpp @@ -27,15 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include - +#include "mongo/rpc/legacy_request.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/dbmessage.h" #include "mongo/db/namespace_string.h" -#include "mongo/rpc/legacy_request.h" #include "mongo/rpc/metadata.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace rpc { @@ -48,7 +47,7 @@ OpMsgRequest opMsgRequestFromLegacyRequest(const Message& message) { if (qm.queryOptions & QueryOption_Exhaust) { uasserted(18527, str::stream() << "The 'exhaust' OP_QUERY flag is invalid for commands: " - << ns.ns() << " " << qm.query.toString()); + << ns.toStringForErrorMsg() << " " << qm.query.toString()); } uassert(40473, diff --git a/src/mongo/rpc/message.cpp b/src/mongo/rpc/message.cpp index 69c8972f9d667..d5187012533a8 100644 --- a/src/mongo/rpc/message.cpp +++ b/src/mongo/rpc/message.cpp @@ -27,13 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/rpc/message.h" - #include +#include +#include +#include "mongo/bson/bsonobj.h" #include "mongo/platform/atomic_word.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" namespace mongo { diff --git a/src/mongo/rpc/message.h b/src/mongo/rpc/message.h index 20db670d6a106..2ceb9f23cbd61 100644 --- a/src/mongo/rpc/message.h +++ b/src/mongo/rpc/message.h @@ -30,11 +30,17 @@ #pragma once #include +#include +#include +#include #include "mongo/base/data_type_endian.h" #include "mongo/base/data_view.h" #include "mongo/base/encoded_value_storage.h" #include "mongo/base/static_assert.h" +#include "mongo/bson/util/builder.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/shared_buffer.h" #include "mongo/util/str.h" namespace mongo { @@ -389,7 +395,7 @@ class Message { explicit Message(SharedBuffer data) : _buf(std::move(data)) {} MsgData::View header() const { - verify(!empty()); + MONGO_verify(!empty()); return _buf.get(); } @@ -431,7 +437,7 @@ class Message { // use to set first buffer if empty void setData(SharedBuffer buf) { - verify(empty()); + MONGO_verify(empty()); _buf = std::move(buf); } diff --git a/src/mongo/rpc/metadata.cpp b/src/mongo/rpc/metadata.cpp index 1586556a0f483..7db358c5482bd 100644 --- a/src/mongo/rpc/metadata.cpp +++ b/src/mongo/rpc/metadata.cpp @@ -27,27 +27,42 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/rpc/metadata.h" - +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/auth/validated_tenancy_scope.h" #include "mongo/db/dbmessage.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/logical_time_validator.h" -#include "mongo/db/multitenancy.h" +#include "mongo/db/operation_context.h" #include "mongo/db/vector_clock.h" #include "mongo/db/write_block_bypass.h" +#include "mongo/rpc/metadata.h" #include "mongo/rpc/metadata/client_metadata.h" #include "mongo/rpc/metadata/impersonated_user_metadata.h" #include "mongo/rpc/metadata/tracking_metadata.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" #include "mongo/util/string_map.h" #include "mongo/util/testing_proctor.h" +#include "mongo/util/uuid.h" namespace mongo { namespace rpc { +MONGO_FAIL_POINT_DEFINE(failIfOperationKeyMismatch); BSONObj makeEmptyMetadata() { return BSONObj(); @@ -83,10 +98,17 @@ void readRequestMetadata(OperationContext* opCtx, const OpMsg& opMsg, bool cmdRe if (clientOperationKeyElem && (TestingProctor::instance().isEnabled() || - authSession->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal))) { + authSession->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(opMsg.getValidatedTenantId()), + ActionType::internal))) { auto opKey = uassertStatusOK(UUID::parse(clientOperationKeyElem)); opCtx->setOperationKey(std::move(opKey)); + failIfOperationKeyMismatch.execute([&](const BSONObj& data) { + tassert(7446600, + "OperationKey in request does not match test provided OperationKey", + data["clientOperationKey"].String() == + opCtx->getOperationKey()->toBSON()["uuid"].String()); + }); } if (readPreferenceElem) { diff --git a/src/mongo/rpc/metadata.h b/src/mongo/rpc/metadata.h index e042f0f0ebb1d..cb09bbb8d9ae9 100644 --- a/src/mongo/rpc/metadata.h +++ b/src/mongo/rpc/metadata.h @@ -32,7 +32,13 @@ #include #include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/database_name.h" #include "mongo/rpc/op_msg.h" namespace mongo { diff --git a/src/mongo/rpc/metadata/client_metadata.cpp b/src/mongo/rpc/metadata/client_metadata.cpp index 24971fbdadcad..9244bd6101760 100644 --- a/src/mongo/rpc/metadata/client_metadata.cpp +++ b/src/mongo/rpc/metadata/client_metadata.cpp @@ -28,21 +28,33 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/rpc/metadata/client_metadata.h" - +#include +#include +#include +#include +#include #include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/client.h" #include "mongo/db/operation_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/process_id.h" +#include "mongo/rpc/metadata/client_metadata.h" #include "mongo/s/is_mongos.h" -#include "mongo/util/debug_util.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/net/socket_utils.h" #include "mongo/util/processinfo.h" #include "mongo/util/str.h" diff --git a/src/mongo/rpc/metadata/client_metadata.h b/src/mongo/rpc/metadata/client_metadata.h index 713fab5f819eb..1ce8a0481e7fe 100644 --- a/src/mongo/rpc/metadata/client_metadata.h +++ b/src/mongo/rpc/metadata/client_metadata.h @@ -29,11 +29,14 @@ #pragma once +#include #include +#include #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" diff --git a/src/mongo/rpc/metadata/client_metadata_test.cpp b/src/mongo/rpc/metadata/client_metadata_test.cpp index b50a9a69185e9..1d5942dbb031b 100644 --- a/src/mongo/rpc/metadata/client_metadata_test.cpp +++ b/src/mongo/rpc/metadata/client_metadata_test.cpp @@ -27,26 +27,25 @@ * it in the license file. */ +#include +#include -#include "mongo/platform/basic.h" - -#include "mongo/rpc/metadata/client_metadata.h" - -#include -#include +#include +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/json.h" #include "mongo/db/client.h" -#include "mongo/db/concurrency/locker_noop_client_observer.h" -#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/platform/process_id.h" +#include "mongo/rpc/metadata/client_metadata.h" #include "mongo/s/is_mongos.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/processinfo.h" -#include "mongo/util/scopeguard.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/testing_proctor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -351,7 +350,6 @@ TEST(ClientMetadataTest, TestMongoSAppend) { TEST(ClientMetadataTest, TestInvalidDocWhileSettingOpCtxMetadata) { auto svcCtx = ServiceContext::make(); - svcCtx->registerClientObserver(std::make_unique()); auto client = svcCtx->makeClient("ClientMetadataTest"); auto opCtx = client->makeOperationContext(); // metadataElem is of BSON type int @@ -371,7 +369,6 @@ TEST(ClientMetadataTest, TestInvalidDocWhileSettingOpCtxMetadata) { TEST(ClientMetadataTest, TestEooElemAsValueToSetOpCtxMetadata) { auto svcCtx = ServiceContext::make(); - svcCtx->registerClientObserver(std::make_unique()); auto client = svcCtx->makeClient("ClientMetadataTest"); auto opCtx = client->makeOperationContext(); // metadataElem is of BSON type eoo @@ -389,7 +386,6 @@ TEST(ClientMetadataTest, TestEooElemAsValueToSetOpCtxMetadata) { TEST(ClientMetadataTest, InternalClientLimit) { auto svcCtx = ServiceContext::make(); - svcCtx->registerClientObserver(std::make_unique()); auto client = svcCtx->makeClient("ClientMetadataTest"); std::string tooLargeValue(600, 'x'); diff --git a/src/mongo/rpc/metadata/egress_metadata_hook_list.cpp b/src/mongo/rpc/metadata/egress_metadata_hook_list.cpp index 9a09573955742..d8135c9c62623 100644 --- a/src/mongo/rpc/metadata/egress_metadata_hook_list.cpp +++ b/src/mongo/rpc/metadata/egress_metadata_hook_list.cpp @@ -27,12 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/rpc/metadata/egress_metadata_hook_list.h" namespace mongo { namespace rpc { diff --git a/src/mongo/rpc/metadata/egress_metadata_hook_list_test.cpp b/src/mongo/rpc/metadata/egress_metadata_hook_list_test.cpp index 65b9e7427585e..fa288d968c87c 100644 --- a/src/mongo/rpc/metadata/egress_metadata_hook_list_test.cpp +++ b/src/mongo/rpc/metadata/egress_metadata_hook_list_test.cpp @@ -27,15 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include +#include + +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/net/hostandport.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" using std::string; diff --git a/src/mongo/rpc/metadata/impersonated_user_metadata.cpp b/src/mongo/rpc/metadata/impersonated_user_metadata.cpp index 944f98088e011..2cc5f88d9a978 100644 --- a/src/mongo/rpc/metadata/impersonated_user_metadata.cpp +++ b/src/mongo/rpc/metadata/impersonated_user_metadata.cpp @@ -27,12 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/rpc/metadata/impersonated_user_metadata.h" - +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/auth/auth_name.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/server_options.h" +#include "mongo/db/auth/role_name.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/metadata/impersonated_user_metadata.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/synchronized_value.h" namespace mongo { namespace rpc { @@ -82,10 +97,11 @@ void readImpersonatedUserMetadata(const BSONElement& elem, OperationContext* opC *getForOpCtx(opCtx) = std::move(newData); } -void writeAuthDataToImpersonatedUserMetadata(OperationContext* opCtx, BSONObjBuilder* out) { +boost::optional getAuthDataToImpersonatedUserMetadata( + OperationContext* opCtx) { // If we have no opCtx, which does appear to happen, don't do anything. if (!opCtx) { - return; + return {}; } // Otherwise construct a metadata section from the list of authenticated users/roles @@ -99,27 +115,68 @@ void writeAuthDataToImpersonatedUserMetadata(OperationContext* opCtx, BSONObjBui // If there are no users/roles being impersonated just exit if (!userName && !roleNames.more()) { - return; + return {}; } ImpersonatedUserMetadata metadata; - if (serverGlobalParams.featureCompatibility.isLessThanOrEqualTo( - multiversion::FeatureCompatibilityVersion::kVersion_6_2)) { - if (userName) { - metadata.setUsers({{userName.value()}}); - } else { - metadata.setUsers({}); - } - } else { - if (userName) { - metadata.setUser(userName.value()); - } + if (userName) { + metadata.setUser(userName.value()); } metadata.setRoles(roleNameIteratorToContainer>(roleNames)); + return metadata; +} + +void writeAuthDataToImpersonatedUserMetadata(OperationContext* opCtx, BSONObjBuilder* out) { + if (auto meta = getAuthDataToImpersonatedUserMetadata(opCtx)) { + BSONObjBuilder section(out->subobjStart(kImpersonationMetadataSectionName)); + meta->serialize(§ion); + } +} + +std::size_t estimateImpersonatedUserMetadataSize(OperationContext* opCtx) { + if (!opCtx) { + return 0; + } + + // Otherwise construct a metadata section from the list of authenticated users/roles + auto authSession = AuthorizationSession::get(opCtx->getClient()); + auto userName = authSession->getImpersonatedUserName(); + auto roleNames = authSession->getImpersonatedRoleNames(); + if (!userName && !roleNames.more()) { + userName = authSession->getAuthenticatedUserName(); + roleNames = authSession->getAuthenticatedRoleNames(); + } + + // If there are no users/roles being impersonated just exit + if (!userName && !roleNames.more()) { + return 0; + } + + std::size_t ret = 4 + // BSONObj size + 1 + kImpersonationMetadataSectionName.size() + 1 + // "$audit" sub-object key + 4; // $audit object length + + if (userName) { + // BSONObjType + "impersonatedUser" + NULL + UserName object. + ret += 1 + ImpersonatedUserMetadata::kUserFieldName.size() + 1 + userName->getBSONObjSize(); + } + + // BSONArrayType + "impersonatedRoles" + NULL + BSONArray Length + ret += 1 + ImpersonatedUserMetadata::kRolesFieldName.size() + 1 + 4; + for (std::size_t i = 0; roleNames.more(); roleNames.next(), ++i) { + // BSONType::Object + strlen(indexId) + NULL byte + // to_string(i).size() will be log10(i) plus some rounding and fuzzing. + // Increment prior to taking the log so that we never take log10(0) which is NAN. + // This estimates one extra byte every time we reach (i % 10) == 9. + ret += 1 + static_cast(1.1 + log10(i + 1)) + 1; + ret += roleNames.get().getBSONObjSize(); + } + + // EOD terminators for: impersonatedRoles, $audit, and metadata + ret += 1 + 1 + 1; - BSONObjBuilder section(out->subobjStart(kImpersonationMetadataSectionName)); - metadata.serialize(§ion); + return ret; } } // namespace rpc diff --git a/src/mongo/rpc/metadata/impersonated_user_metadata.h b/src/mongo/rpc/metadata/impersonated_user_metadata.h index fb03cd740d6f8..428ba8acb08be 100644 --- a/src/mongo/rpc/metadata/impersonated_user_metadata.h +++ b/src/mongo/rpc/metadata/impersonated_user_metadata.h @@ -29,8 +29,14 @@ #pragma once +#include #include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/operation_context.h" #include "mongo/rpc/metadata/impersonated_user_metadata_gen.h" @@ -65,10 +71,22 @@ MaybeImpersonatedUserMetadata getImpersonatedUserMetadata(OperationContext* opCt */ void readImpersonatedUserMetadata(const BSONElement& elem, OperationContext* opCtx); +/* + * Get impersonation metadata off the opCtx + */ +boost::optional getAuthDataToImpersonatedUserMetadata( + OperationContext* opCtx); + /* * Writes the current impersonation metadata off the opCtx and into a BSONObjBuilder */ void writeAuthDataToImpersonatedUserMetadata(OperationContext* opCtx, BSONObjBuilder* out); +/* + * Estimates the size of impersonation metadata which will be written by + * writeAuthDataToImpersonatedUserMetadata. + */ +std::size_t estimateImpersonatedUserMetadataSize(OperationContext* opCtx); + } // namespace rpc } // namespace mongo diff --git a/src/mongo/rpc/metadata/oplog_query_metadata.cpp b/src/mongo/rpc/metadata/oplog_query_metadata.cpp index 577f7296f7a7a..606d7f144a0d1 100644 --- a/src/mongo/rpc/metadata/oplog_query_metadata.cpp +++ b/src/mongo/rpc/metadata/oplog_query_metadata.cpp @@ -29,11 +29,17 @@ #include "mongo/rpc/metadata/oplog_query_metadata.h" -#include "mongo/bson/util/bson_check.h" +#include + +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" #include "mongo/db/repl/bson_extract_optime.h" -#include "mongo/rpc/metadata.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { namespace rpc { diff --git a/src/mongo/rpc/metadata/oplog_query_metadata.h b/src/mongo/rpc/metadata/oplog_query_metadata.h index a2e42519ec541..5fcab13452444 100644 --- a/src/mongo/rpc/metadata/oplog_query_metadata.h +++ b/src/mongo/rpc/metadata/oplog_query_metadata.h @@ -29,6 +29,11 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/oid.h" #include "mongo/db/repl/optime.h" diff --git a/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp b/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp index f0515ac100fdf..3505a2a06389a 100644 --- a/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp +++ b/src/mongo/rpc/metadata/oplog_query_metadata_test.cpp @@ -27,11 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/rpc/metadata/oplog_query_metadata.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { namespace rpc { diff --git a/src/mongo/rpc/metadata/repl_set_metadata.cpp b/src/mongo/rpc/metadata/repl_set_metadata.cpp index d68e1f2d028b9..237cff5b7c154 100644 --- a/src/mongo/rpc/metadata/repl_set_metadata.cpp +++ b/src/mongo/rpc/metadata/repl_set_metadata.cpp @@ -29,11 +29,18 @@ #include "mongo/rpc/metadata/repl_set_metadata.h" -#include "mongo/bson/util/bson_check.h" +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" #include "mongo/db/repl/bson_extract_optime.h" -#include "mongo/rpc/metadata.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { namespace rpc { diff --git a/src/mongo/rpc/metadata/repl_set_metadata.h b/src/mongo/rpc/metadata/repl_set_metadata.h index 88aabbc4d1480..ee21bd4f36381 100644 --- a/src/mongo/rpc/metadata/repl_set_metadata.h +++ b/src/mongo/rpc/metadata/repl_set_metadata.h @@ -29,6 +29,11 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/oid.h" #include "mongo/db/repl/optime.h" diff --git a/src/mongo/rpc/metadata/repl_set_metadata_test.cpp b/src/mongo/rpc/metadata/repl_set_metadata_test.cpp index 16b0722f3fe09..9b7f80bf46e5c 100644 --- a/src/mongo/rpc/metadata/repl_set_metadata_test.cpp +++ b/src/mongo/rpc/metadata/repl_set_metadata_test.cpp @@ -27,11 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/rpc/metadata/repl_set_metadata.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { namespace rpc { diff --git a/src/mongo/rpc/metadata/security_token_metadata_test.cpp b/src/mongo/rpc/metadata/security_token_metadata_test.cpp index 3e4599c1a7433..ec8dc077e6756 100644 --- a/src/mongo/rpc/metadata/security_token_metadata_test.cpp +++ b/src/mongo/rpc/metadata/security_token_metadata_test.cpp @@ -27,19 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/oid.h" -#include "mongo/crypto/sha256_block.h" #include "mongo/db/auth/security_token_gen.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/auth/validated_tenancy_scope.h" -#include "mongo/db/client.h" -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" -#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" #include "mongo/db/tenant_id.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/rpc/op_msg.h" #include "mongo/rpc/op_msg_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace rpc { @@ -58,7 +74,7 @@ BSONObj makeSecurityToken(const UserName& userName) { .getOwned(); } -class SecurityTokenMetadataTest : public LockerNoopServiceContextTest { +class SecurityTokenMetadataTest : public ServiceContextTest { protected: void setUp() final { client = getServiceContext()->makeClient("test"); diff --git a/src/mongo/rpc/metadata/tracking_metadata.cpp b/src/mongo/rpc/metadata/tracking_metadata.cpp index ba2fedb5d4d09..3a24de240b0b1 100644 --- a/src/mongo/rpc/metadata/tracking_metadata.cpp +++ b/src/mongo/rpc/metadata/tracking_metadata.cpp @@ -27,14 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/rpc/metadata/tracking_metadata.h" +#include +#include -#include "mongo/bson/util/bson_check.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/jsobj.h" -#include "mongo/rpc/metadata.h" +#include "mongo/rpc/metadata/tracking_metadata.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" namespace mongo { namespace rpc { diff --git a/src/mongo/rpc/metadata/tracking_metadata.h b/src/mongo/rpc/metadata/tracking_metadata.h index dada8332874e0..914397d2434fd 100644 --- a/src/mongo/rpc/metadata/tracking_metadata.h +++ b/src/mongo/rpc/metadata/tracking_metadata.h @@ -29,6 +29,16 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/db/jsobj.h" #include "mongo/db/operation_context.h" diff --git a/src/mongo/rpc/metadata/tracking_metadata_test.cpp b/src/mongo/rpc/metadata/tracking_metadata_test.cpp index 2244483dfb525..124b41a78e3c3 100644 --- a/src/mongo/rpc/metadata/tracking_metadata_test.cpp +++ b/src/mongo/rpc/metadata/tracking_metadata_test.cpp @@ -27,14 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/repl/optime.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/rpc/metadata/tracking_metadata.h" -#include "mongo/stdx/chrono.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/rpc/metadata_test.cpp b/src/mongo/rpc/metadata_test.cpp index 6084a3b731f88..6c7e4299a340c 100644 --- a/src/mongo/rpc/metadata_test.cpp +++ b/src/mongo/rpc/metadata_test.cpp @@ -27,15 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include #include +#include + +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/json.h" -#include "mongo/db/dbmessage.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/tenant_id.h" #include "mongo/rpc/metadata.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/shared_buffer.h" namespace { using namespace mongo; @@ -50,7 +62,9 @@ void checkUpconvert(const BSONObj& legacyCommand, const int legacyQueryFlags, BSONObj upconvertedCommand) { upconvertedCommand = addDollarDB(std::move(upconvertedCommand), "db"); - auto converted = upconvertRequest({boost::none, "db"}, legacyCommand, legacyQueryFlags); + auto converted = upconvertRequest(DatabaseName::createDatabaseName_forTest(boost::none, "db"), + legacyCommand, + legacyQueryFlags); // We don't care about the order of the fields in the metadata object const auto sorted = [](const BSONObj& obj) { @@ -99,9 +113,11 @@ TEST(Metadata, UpconvertDuplicateReadPreference) { bob.append("$queryOptions", BSON("$readPreference" << secondaryReadPref)); bob.append("$readPreference", nearestReadPref); - ASSERT_THROWS_CODE(rpc::upconvertRequest({boost::none, "db"}, bob.obj(), 0), - AssertionException, - ErrorCodes::InvalidOptions); + ASSERT_THROWS_CODE( + rpc::upconvertRequest( + DatabaseName::createDatabaseName_forTest(boost::none, "db"), bob.obj(), 0), + AssertionException, + ErrorCodes::InvalidOptions); } TEST(Metadata, UpconvertUsesDocumentSequecesCorrectly) { @@ -123,7 +139,8 @@ TEST(Metadata, UpconvertUsesDocumentSequecesCorrectly) { }; for (const auto& cmd : valid) { - const auto converted = rpc::upconvertRequest({boost::none, "db"}, cmd, 0); + const auto converted = rpc::upconvertRequest( + DatabaseName::createDatabaseName_forTest(boost::none, "db"), cmd, 0); ASSERT_BSONOBJ_EQ(converted.body, fromjson("{insert: 'coll', $db: 'db'}")); ASSERT_EQ(converted.sequences.size(), 1u); ASSERT_EQ(converted.sequences[0].name, "documents"); @@ -145,7 +162,8 @@ TEST(Metadata, UpconvertUsesDocumentSequecesCorrectly) { } for (const auto& cmd : invalid) { - const auto converted = rpc::upconvertRequest({boost::none, "db"}, cmd, 0); + const auto converted = rpc::upconvertRequest( + DatabaseName::createDatabaseName_forTest(boost::none, "db"), cmd, 0); ASSERT_BSONOBJ_EQ(converted.body, addDollarDB(cmd, "db")); ASSERT_EQ(converted.sequences.size(), 0u); } diff --git a/src/mongo/rpc/object_check.cpp b/src/mongo/rpc/object_check.cpp index 2109d96776d65..b6c71981470b3 100644 --- a/src/mongo/rpc/object_check.cpp +++ b/src/mongo/rpc/object_check.cpp @@ -26,10 +26,7 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/rpc/object_check.h" - +#include "mongo/rpc/object_check.h" // IWYU pragma: keep #include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" diff --git a/src/mongo/rpc/object_check.h b/src/mongo/rpc/object_check.h index ac9ddb899d8be..840dc9e8d51e9 100644 --- a/src/mongo/rpc/object_check.h +++ b/src/mongo/rpc/object_check.h @@ -30,12 +30,19 @@ #pragma once #include +#include +#include #include "mongo/base/data_type_validated.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsontypes.h" #include "mongo/db/server_options.h" #include "mongo/logv2/redaction.h" +#include "mongo/util/assert_util.h" #include "mongo/util/hex.h" // We do not use the rpc namespace here so we can specialize Validator. @@ -64,7 +71,7 @@ struct Validator { ", length: " + std::to_string(length) + // Using std::min with hex dump length, too, to ensure we do not throw in hexdump() // because of exceeded length and miss out on the core dump of the fassert below. - ", hex dump: " + hexdump(ptr, std::min(length, (size_t)(1000000 - 1))); + ", hex dump: " + hexdump(ptr, std::min(length, kHexDumpMaxSize - 1)); Status builtStatus(ErrorCodes::InvalidBSON, redact(msg)); fassertFailedWithStatus(50761, builtStatus); } diff --git a/src/mongo/rpc/object_check_test.cpp b/src/mongo/rpc/object_check_test.cpp index 6cb6120255189..7846c0e408fc9 100644 --- a/src/mongo/rpc/object_check_test.cpp +++ b/src/mongo/rpc/object_check_test.cpp @@ -27,16 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include #include "mongo/base/data_range_cursor.h" -#include "mongo/db/jsobj.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/server_options.h" -#include "mongo/rpc/object_check.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/scopeguard.h" namespace { diff --git a/src/mongo/rpc/op_legacy_integration_test.cpp b/src/mongo/rpc/op_legacy_integration_test.cpp index 4c8b1e67e5ca7..12f08587ae3e2 100644 --- a/src/mongo/rpc/op_legacy_integration_test.cpp +++ b/src/mongo/rpc/op_legacy_integration_test.cpp @@ -28,12 +28,40 @@ */ +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/util/builder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/dbclient_base.h" #include "mongo/client/dbclient_connection.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbmessage.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/tenant_id.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/integration_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/bufreader.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -129,7 +157,7 @@ TEST(OpLegacy, GetLastError) { static const auto getLastErrorCommand = fromjson(R"({"getlasterror": 1})"); BSONObj replyObj; - conn->runCommand({boost::none, "admin"}, getLastErrorCommand, replyObj); + conn->runCommand(DatabaseName::kAdmin, getLastErrorCommand, replyObj); // 'getLastError' command is no longer supported and will always fail. auto status = getStatusFromCommandResult(replyObj); @@ -181,7 +209,9 @@ TEST(OpLegacy, UnsupportedReadOps) { documents: [ {a: 1},{a: 2},{a: 3},{a: 4},{a: 5},{a: 6},{a: 7} ] })"); BSONObj ignoreResponse; - ASSERT(conn->runCommand({boost::none, "testOpLegacy"}, insert, ignoreResponse)); + ASSERT(conn->runCommand(DatabaseName::createDatabaseName_forTest(boost::none, "testOpLegacy"), + insert, + ignoreResponse)); // Issue the unsupported requests. They all should fail one way or another. Message opQueryRequest = makeUnsupportedOpQueryMessage(ns, @@ -241,7 +271,7 @@ void testAllowedCommand(const char* command, auto serverStatusCmd = fromjson("{serverStatus: 1}"); BSONObj serverStatus; - ASSERT(conn->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatus)); + ASSERT(conn->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatus)); auto opCountersPrior = serverStatus["opcounters"]["deprecated"]; const auto queryCountPrior = opCountersPrior ? opCountersPrior["query"].Long() : 0; @@ -253,7 +283,7 @@ void testAllowedCommand(const char* command, auto status = getStatusFromCommandResult(obj); ASSERT_EQ(status.code(), code); - ASSERT(conn->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatus)); + ASSERT(conn->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatus)); auto opCounters = serverStatus["opcounters"]["deprecated"]; const auto queryCount = opCounters ? opCounters["query"].Long() : 0; diff --git a/src/mongo/rpc/op_msg.cpp b/src/mongo/rpc/op_msg.cpp index 88b040e77a401..e4fb3c8bb68e5 100644 --- a/src/mongo/rpc/op_msg.cpp +++ b/src/mongo/rpc/op_msg.cpp @@ -28,24 +28,39 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/rpc/op_msg.h" - #include +#include +#include +#include #include +#include +#include +#include +#include + #include "mongo/base/data_type_endian.h" -#include "mongo/config.h" -#include "mongo/db/auth/security_token_gen.h" +#include "mongo/base/data_type_validated.h" +#include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/bson/dotted_path_support.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/multitenancy_gen.h" #include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/serverless/multitenancy_check.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/object_check.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep +#include "mongo/rpc/op_msg.h" #include "mongo/util/bufreader.h" #include "mongo/util/database_name_util.h" +#include "mongo/util/debug_util.h" #include "mongo/util/hex.h" +#include "mongo/util/str.h" #ifdef MONGO_CONFIG_WIREDTIGER_ENABLED #include @@ -73,8 +88,8 @@ enum class Section : uint8_t { constexpr int kCrc32Size = 4; #ifdef MONGO_CONFIG_WIREDTIGER_ENABLED -// All fields including size, requestId, and responseTo must already be set. The size must already -// include the final 4-byte checksum. +// All fields including size, requestId, and responseTo must already be set. The size must +// already include the final 4-byte checksum. uint32_t calculateChecksum(const Message& message) { if (message.operation() != dbMsg) { return 0; @@ -158,7 +173,8 @@ OpMsg OpMsg::parse(const Message& message, Client* client) try { // The sections begin after the flags and before the checksum (if present). BufReader sectionsBuf(message.singleData().data() + sizeof(flags), dataSize); - // TODO some validation may make more sense in the IDL parser. I've tagged them with comments. + // TODO some validation may make more sense in the IDL parser. I've tagged them with + // comments. bool haveBody = false; OpMsg msg; BSONObj securityToken; @@ -170,16 +186,17 @@ OpMsg OpMsg::parse(const Message& message, Client* client) try { haveBody = true; msg.body = sectionsBuf.read>(); - uassert(ErrorCodes::InvalidOptions, - "Multitenancy not enabled, cannot set $tenant in command body", - gMultitenancySupport || !msg.body["$tenant"_sd]); + if (auto* multitenancyCheck = MultitenancyCheck::getPtr()) { + multitenancyCheck->checkDollarTenantField(msg.body); + } break; } case Section::kDocSequence: { - // We use an O(N^2) algorithm here and an O(N*M) algorithm below. These are fastest - // for the current small values of N, but would be problematic if it is large. - // If we need more document sequences, raise the limit and use a better algorithm. + // We use an O(N^2) algorithm here and an O(N*M) algorithm below. These are + // fastest for the current small values of N, but would be problematic if it is + // large. If we need more document sequences, raise the limit and use a better + // algorithm. uassert(ErrorCodes::TooManyDocumentSequences, "Too many document sequences in OP_MSG", msg.sequences.size() < 2); // Limit is <=2 since we are about to add one. @@ -248,13 +265,17 @@ OpMsg OpMsg::parse(const Message& message, Client* client) try { "invalid message: {ex_code} {ex} -- {hexdump_message_singleData_view2ptr_message_size}", "ex_code"_attr = ex.code(), "ex"_attr = redact(ex), + // Using std::min to reduce the size of the output and ensure we do not throw in hexdump() + // because of the exceeded length. "hexdump_message_singleData_view2ptr_message_size"_attr = - redact(hexdump(message.singleData().view2ptr(), message.size()))); + redact(hexdump(message.singleData().view2ptr(), + std::min(static_cast(message.size()), kHexDumpMaxSize - 1)))); throw; } OpMsgRequest OpMsgRequest::fromDBAndBody(StringData db, BSONObj body, const BSONObj& extraFields) { - return OpMsgRequestBuilder::create({boost::none, db}, std::move(body), extraFields); + return OpMsgRequestBuilder::create( + DatabaseNameUtil::deserialize(boost::none, db), std::move(body), extraFields); } boost::optional parseDollarTenant(const BSONObj body) { @@ -278,8 +299,7 @@ bool appendDollarTenant(BSONObjBuilder& builder, } if (gMultitenancySupport) { - if (serverGlobalParams.featureCompatibility.isVersionInitialized() && - gFeatureFlagRequireTenantID.isEnabled(serverGlobalParams.featureCompatibility)) { + if (gFeatureFlagRequireTenantID.isEnabled(serverGlobalParams.featureCompatibility)) { tenant.serializeToBSON("$tenant", &builder); return true; } @@ -292,7 +312,7 @@ void appendDollarDbAndTenant(BSONObjBuilder& builder, boost::optional existingDollarTenant = boost::none) { if (!dbName.tenantId() || appendDollarTenant(builder, dbName.tenantId().value(), existingDollarTenant)) { - builder.append("$db", dbName.db()); + builder.append("$db", dbName.serializeWithoutTenantPrefix()); } else { builder.append("$db", DatabaseNameUtil::serialize(dbName)); } @@ -334,10 +354,36 @@ OpMsgRequest OpMsgRequestBuilder::createWithValidatedTenancyScope( request.validatedTenancyScope = validatedTenancyScope; return request; } +namespace { +std::string compactStr(const std::string& input) { + if (input.length() > 2024) { + return input.substr(0, 1000) + " ... " + input.substr(input.length() - 1000); + } + return input; +} +} // namespace OpMsgRequest OpMsgRequestBuilder::create(const DatabaseName& dbName, BSONObj body, const BSONObj& extraFields) { + int bodySize = body.objsize(); + int extraFieldsSize = extraFields.objsize(); + + // Log a warning if the sum of the sizes of 'body' and 'extraFields' exceeds + // 'BSONObjMaxInternalSize'. + if (bodySize + extraFieldsSize > BSONObjMaxInternalSize) { + LOGV2_WARNING( + 6491800, + "Request body exceeded limit with body.objsize() = {bodySize} bytes, " + "extraFields.objsize() = {extraFieldsSize} bytes, body.toString() = {body}, db = " + "{db}, extraFields.toString() = {extraFields}", + "bodySize"_attr = bodySize, + "extraFieldsSize"_attr = extraFieldsSize, + "body"_attr = compactStr(body.toString()), + "db"_attr = dbName.toStringForErrorMsg(), + "extraFields"_attr = compactStr(extraFields.toString())); + } + auto dollarTenant = parseDollarTenant(body); BSONObjBuilder bodyBuilder(std::move(body)); bodyBuilder.appendElements(extraFields); diff --git a/src/mongo/rpc/op_msg.h b/src/mongo/rpc/op_msg.h index 6563bbe747361..217ffd5b27e1a 100644 --- a/src/mongo/rpc/op_msg.h +++ b/src/mongo/rpc/op_msg.h @@ -30,15 +30,31 @@ #pragma once #include +#include +#include #include +#include +#include +#include +#include #include +#include #include #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/database_name.h" #include "mongo/db/jsobj.h" +#include "mongo/db/tenant_id.h" +#include "mongo/platform/atomic_word.h" #include "mongo/rpc/message.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/shared_buffer.h" namespace mongo { @@ -190,6 +206,17 @@ struct OpMsgRequest : public OpMsg { static OpMsgRequest fromDBAndBody(StringData db, BSONObj body, const BSONObj& extraFields = {}); + // There are no valid reasons for which a database name should not be a string, but + // some autogenerated tests can create invalid entries, and some areas of the codebase + // are required never to throw an exception; these functions should use this version + // of the getDatabase API. + StringData getDatabaseNoThrow() const noexcept { + if (auto elem = body["$db"]; elem.type() == mongo::String) { + return elem.valueStringData(); + } + return ""_sd; + } + StringData getDatabase() const { if (auto elem = body["$db"]) return elem.checkAndGetStringData(); diff --git a/src/mongo/rpc/op_msg_integration_test.cpp b/src/mongo/rpc/op_msg_integration_test.cpp index 30946582a76d6..59e1ad7acc308 100644 --- a/src/mongo/rpc/op_msg_integration_test.cpp +++ b/src/mongo/rpc/op_msg_integration_test.cpp @@ -28,21 +28,66 @@ */ +#include +#include #include - -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/util/builder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/dbclient_base.h" #include "mongo/client/dbclient_connection.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/client/dbclient_rs.h" +#include "mongo/client/mongo_uri.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/ops/write_ops.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/getmore_command_gen.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_interface.h" +#include "mongo/rpc/unique_message.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/integration_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_options.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -112,7 +157,7 @@ TEST(OpMsg, UnknownOptionalFlagIsIgnored) { TEST(OpMsg, FireAndForgetInsertWorks) { auto conn = getIntegrationTestConnection(); - conn->dropCollection(NamespaceString("test.collection")); + conn->dropCollection(NamespaceString::createNamespaceString_forTest("test.collection")); conn->runFireAndForgetCommand(OpMsgRequest::fromDBAndBody("test", fromjson(R"({ insert: "collection", @@ -128,7 +173,7 @@ TEST(OpMsg, FireAndForgetInsertWorks) { TEST(OpMsg, DocumentSequenceLargeDocumentMultiInsertWorks) { auto conn = getIntegrationTestConnection(); - conn->dropCollection(NamespaceString("test.collection")); + conn->dropCollection(NamespaceString::createNamespaceString_forTest("test.collection")); OpMsgBuilder msgBuilder; @@ -159,7 +204,7 @@ TEST(OpMsg, DocumentSequenceLargeDocumentMultiInsertWorks) { TEST(OpMsg, DocumentSequenceMaxWriteBatchWorks) { auto conn = getIntegrationTestConnection(); - conn->dropCollection(NamespaceString("test.collection")); + conn->dropCollection(NamespaceString::createNamespaceString_forTest("test.collection")); OpMsgBuilder msgBuilder; @@ -240,7 +285,7 @@ TEST(OpMsg, CloseConnectionOnFireAndForgetNotWritablePrimaryError) { // Disable eager checking of primary to simulate a stepdown occurring after the check. This // should respect w:0. BSONObj output; - ASSERT(conn.runCommand({boost::none, "admin"}, + ASSERT(conn.runCommand(DatabaseName::kAdmin, fromjson(R"({ configureFailPoint: 'skipCheckingForNotPrimaryInCommandDispatch', mode: 'alwaysOn' @@ -249,7 +294,7 @@ TEST(OpMsg, CloseConnectionOnFireAndForgetNotWritablePrimaryError) { << output; ON_BLOCK_EXIT([&] { uassertStatusOK(conn.connect(host, "integration_test-cleanup", boost::none)); - ASSERT(conn.runCommand({boost::none, "admin"}, + ASSERT(conn.runCommand(DatabaseName::kAdmin, fromjson(R"({ configureFailPoint: 'skipCheckingForNotPrimaryInCommandDispatch', @@ -344,7 +389,7 @@ void exhaustGetMoreTest(bool enableChecksum) { // Issue a find request to open a cursor but return 0 documents. Specify a sort in order to // guarantee their return order. auto findCmd = BSON("find" << nss.coll() << "batchSize" << 0 << "sort" << BSON("_id" << 1)); - auto opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db(), findCmd); + auto opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db_forTest(), findCmd); auto request = opMsgRequest.serialize(); Message reply; @@ -361,7 +406,7 @@ void exhaustGetMoreTest(bool enableChecksum) { int batchSize = 2; GetMoreCommandRequest getMoreRequest(cursorId, nss.coll().toString()); getMoreRequest.setBatchSize(batchSize); - opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db(), getMoreRequest.toBSON({})); + opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db_forTest(), getMoreRequest.toBSON({})); request = opMsgRequest.serialize(); OpMsg::setFlag(&request, OpMsg::kExhaustSupported); @@ -430,7 +475,7 @@ TEST(OpMsg, FindIgnoresExhaust) { // Issue a find request with exhaust flag. Returns 0 documents. auto findCmd = BSON("find" << nss.coll() << "batchSize" << 0); - auto opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db(), findCmd); + auto opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db_forTest(), findCmd); auto request = opMsgRequest.serialize(); OpMsg::setFlag(&request, OpMsg::kExhaustSupported); @@ -462,7 +507,7 @@ TEST(OpMsg, ServerDoesNotSetMoreToComeOnErrorInGetMore) { // Issue a find request to open a cursor but return 0 documents. auto findCmd = BSON("find" << nss.coll() << "batchSize" << 0); - auto opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db(), findCmd); + auto opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db_forTest(), findCmd); auto request = opMsgRequest.serialize(); Message reply; @@ -479,7 +524,7 @@ TEST(OpMsg, ServerDoesNotSetMoreToComeOnErrorInGetMore) { int batchSize = 2; GetMoreCommandRequest getMoreRequest(cursorId, nss.coll().toString()); getMoreRequest.setBatchSize(batchSize); - opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db(), getMoreRequest.toBSON({})); + opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db_forTest(), getMoreRequest.toBSON({})); request = opMsgRequest.serialize(); OpMsg::setFlag(&request, OpMsg::kExhaustSupported); @@ -510,7 +555,7 @@ TEST(OpMsg, MongosIgnoresExhaustForGetMore) { // Issue a find request to open a cursor but return 0 documents. Specify a sort in order to // guarantee their return order. auto findCmd = BSON("find" << nss.coll() << "batchSize" << 0 << "sort" << BSON("_id" << 1)); - auto opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db(), findCmd); + auto opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db_forTest(), findCmd); auto request = opMsgRequest.serialize(); Message reply; @@ -524,7 +569,7 @@ TEST(OpMsg, MongosIgnoresExhaustForGetMore) { int batchSize = 2; GetMoreCommandRequest getMoreRequest(cursorId, nss.coll().toString()); getMoreRequest.setBatchSize(batchSize); - opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db(), getMoreRequest.toBSON({})); + opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db_forTest(), getMoreRequest.toBSON({})); request = opMsgRequest.serialize(); OpMsg::setFlag(&request, OpMsg::kExhaustSupported); @@ -563,7 +608,7 @@ TEST(OpMsg, ExhaustWorksForAggCursor) { // guarantee their return order. auto aggCmd = BSON("aggregate" << nss.coll() << "cursor" << BSON("batchSize" << 0) << "pipeline" << BSON_ARRAY(BSON("$sort" << BSON("_id" << 1)))); - auto opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db(), aggCmd); + auto opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db_forTest(), aggCmd); auto request = opMsgRequest.serialize(); Message reply; @@ -578,7 +623,7 @@ TEST(OpMsg, ExhaustWorksForAggCursor) { int batchSize = 2; GetMoreCommandRequest getMoreRequest(cursorId, nss.coll().toString()); getMoreRequest.setBatchSize(batchSize); - opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db(), getMoreRequest.toBSON({})); + opMsgRequest = OpMsgRequest::fromDBAndBody(nss.db_forTest(), getMoreRequest.toBSON({})); request = opMsgRequest.serialize(); OpMsg::setFlag(&request, OpMsg::kExhaustSupported); @@ -780,7 +825,7 @@ void serverStatusCorrectlyShowsExhaustMetrics(std::string commandName) { ASSERT(waitForCondition([&] { auto serverStatusCmd = BSON("serverStatus" << 1); BSONObj serverStatusReply; - ASSERT(conn->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); return serverStatusReply["connections"]["exhaustIsMaster"].numberInt() == 0 && serverStatusReply["connections"]["exhaustHello"].numberInt() == 0; })); @@ -815,7 +860,7 @@ void serverStatusCorrectlyShowsExhaustMetrics(std::string commandName) { auto serverStatusCmd = BSON("serverStatus" << 1); BSONObj serverStatusReply; - ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn2->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); if (useLegacyCommandName) { ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt()); ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt()); @@ -854,7 +899,7 @@ void exhaustMetricSwitchingCommandNames(bool useLegacyCommandNameAtStart) { ASSERT(waitForCondition([&] { auto serverStatusCmd = BSON("serverStatus" << 1); BSONObj serverStatusReply; - ASSERT(conn1->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn1->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); return serverStatusReply["connections"]["exhaustIsMaster"].numberInt() == 0 && serverStatusReply["connections"]["exhaustHello"].numberInt() == 0; })); @@ -901,7 +946,7 @@ void exhaustMetricSwitchingCommandNames(bool useLegacyCommandNameAtStart) { auto serverStatusCmd = BSON("serverStatus" << 1); BSONObj serverStatusReply; - ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn2->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); if (useLegacyCommandNameAtStart) { ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt()); ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt()); @@ -928,7 +973,7 @@ void exhaustMetricSwitchingCommandNames(bool useLegacyCommandNameAtStart) { })); // Terminating the exhaust stream should not decrement the number of exhaust connections. - ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn2->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); if (useLegacyCommandNameAtStart) { ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt()); ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt()); @@ -956,7 +1001,7 @@ void exhaustMetricSwitchingCommandNames(bool useLegacyCommandNameAtStart) { // exhaust metric should decrease for the exhaust type that was closed, and increase for the // exhaust type that was just opened. - ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn2->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); if (useLegacyCommandNameAtStart) { ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustIsMaster"].numberInt()); ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustHello"].numberInt()); @@ -991,7 +1036,7 @@ void exhaustMetricDecrementsOnNewOpAfterTerminatingExhaustStream(bool useLegacyC ASSERT(waitForCondition([&] { auto serverStatusCmd = BSON("serverStatus" << 1); BSONObj serverStatusReply; - ASSERT(conn1->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn1->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); return serverStatusReply["connections"]["exhaustIsMaster"].numberInt() == 0 && serverStatusReply["connections"]["exhaustHello"].numberInt() == 0; })); @@ -1037,7 +1082,7 @@ void exhaustMetricDecrementsOnNewOpAfterTerminatingExhaustStream(bool useLegacyC auto serverStatusCmd = BSON("serverStatus" << 1); BSONObj serverStatusReply; - ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn2->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); if (useLegacyCommandName) { ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt()); ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt()); @@ -1064,7 +1109,7 @@ void exhaustMetricDecrementsOnNewOpAfterTerminatingExhaustStream(bool useLegacyC })); // Terminating the exhaust stream should not decrement the number of exhaust connections. - ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn2->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); if (useLegacyCommandName) { ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt()); ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt()); @@ -1075,7 +1120,7 @@ void exhaustMetricDecrementsOnNewOpAfterTerminatingExhaustStream(bool useLegacyC // exhaust metric should now decrement after calling serverStatus on the connection that used // to have the exhaust stream. - ASSERT(conn1->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn1->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustIsMaster"].numberInt()); ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt()); } @@ -1104,7 +1149,7 @@ void exhaustMetricOnNewExhaustAfterTerminatingExhaustStream(bool useLegacyComman ASSERT(waitForCondition([&] { auto serverStatusCmd = BSON("serverStatus" << 1); BSONObj serverStatusReply; - ASSERT(conn1->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn1->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); return serverStatusReply["connections"]["exhaustIsMaster"].numberInt() == 0 && serverStatusReply["connections"]["exhaustHello"].numberInt() == 0; })); @@ -1150,7 +1195,7 @@ void exhaustMetricOnNewExhaustAfterTerminatingExhaustStream(bool useLegacyComman auto serverStatusCmd = BSON("serverStatus" << 1); BSONObj serverStatusReply; - ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn2->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); if (useLegacyCommandName) { ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt()); ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt()); @@ -1177,7 +1222,7 @@ void exhaustMetricOnNewExhaustAfterTerminatingExhaustStream(bool useLegacyComman })); // Terminating the exhaust stream should not decrement the number of exhaust connections. - ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn2->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); if (useLegacyCommandName) { ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt()); ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt()); @@ -1197,7 +1242,7 @@ void exhaustMetricOnNewExhaustAfterTerminatingExhaustStream(bool useLegacyComman ASSERT_OK(getStatusFromCommandResult(res)); // exhaust metric should not increment or decrement after initiating a new exhaust stream. - ASSERT(conn2->runCommand({boost::none, "admin"}, serverStatusCmd, serverStatusReply)); + ASSERT(conn2->runCommand(DatabaseName::kAdmin, serverStatusCmd, serverStatusReply)); if (useLegacyCommandName) { ASSERT_EQUALS(1, serverStatusReply["connections"]["exhaustIsMaster"].numberInt()); ASSERT_EQUALS(0, serverStatusReply["connections"]["exhaustHello"].numberInt()); diff --git a/src/mongo/rpc/op_msg_test.cpp b/src/mongo/rpc/op_msg_test.cpp index 01b1f91728d34..3e687f40c2fc6 100644 --- a/src/mongo/rpc/op_msg_test.cpp +++ b/src/mongo/rpc/op_msg_test.cpp @@ -28,26 +28,56 @@ */ -#include "mongo/rpc/op_msg_test.h" - -#include "mongo/platform/basic.h" - -#include "mongo/base/static_assert.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/auth_name.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_manager_impl.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/authorization_session_impl.h" #include "mongo/db/auth/authz_manager_external_state_mock.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/role_name.h" #include "mongo/db/auth/security_token_gen.h" +#include "mongo/db/auth/user.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/auth/validated_tenancy_scope.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/multitenancy_gen.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/tenant_id.h" #include "mongo/idl/server_parameter_test_util.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/death_test.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/rpc/op_msg_test.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" #include "mongo/util/hex.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -63,7 +93,7 @@ class AuthorizationSessionImplTestHelper { static void grantUseTenant(Client& client) { User user(UserRequest(UserName("useTenant", "admin"), boost::none)); user.setPrivileges( - {Privilege(ResourcePattern::forClusterResource(), ActionType::useTenant)}); + {Privilege(ResourcePattern::forClusterResource(boost::none), ActionType::useTenant)}); auto* as = dynamic_cast(AuthorizationSession::get(client)); if (as->_authenticatedUser != boost::none) { as->logoutAllDatabases(&client, "AuthorizationSessionImplTestHelper"_sd); @@ -943,7 +973,8 @@ TEST(OpMsgRequestBuilder, WithTenantInDatabaseName) { true); const TenantId tenantId(OID::gen()); auto const body = fromjson("{ping: 1}"); - OpMsgRequest msg = OpMsgRequestBuilder::create({tenantId, "testDb"}, body); + OpMsgRequest msg = OpMsgRequestBuilder::create( + DatabaseName::createDatabaseName_forTest(tenantId, "testDb"), body); ASSERT_EQ(msg.body.getField("$tenant").eoo(), false); ASSERT_EQ(TenantId::parseFromBSON(msg.body.getField("$tenant")), tenantId); ASSERT_EQ(msg.getDatabase(), "testDb"); @@ -955,7 +986,8 @@ TEST(OpMsgRequestBuilder, WithTenantInDatabaseName_FeatureFlagOff) { false); const TenantId tenantId(OID::gen()); auto const body = fromjson("{ping: 1}"); - OpMsgRequest msg = OpMsgRequestBuilder::create({tenantId, "testDb"}, body); + OpMsgRequest msg = OpMsgRequestBuilder::create( + DatabaseName::createDatabaseName_forTest(tenantId, "testDb"), body); ASSERT(msg.body.getField("$tenant").eoo()); ASSERT_EQ(msg.getDatabase(), tenantId.toString() + "_testDb"); } @@ -966,7 +998,8 @@ TEST(OpMsgRequestBuilder, WithSameTenantInBody) { true); const TenantId tenantId(OID::gen()); auto const body = BSON("ping" << 1 << "$tenant" << tenantId); - OpMsgRequest msg = OpMsgRequestBuilder::create({tenantId, "testDb"}, body); + OpMsgRequest msg = OpMsgRequestBuilder::create( + DatabaseName::createDatabaseName_forTest(tenantId, "testDb"), body); ASSERT_EQ(msg.body.getField("$tenant").eoo(), false); ASSERT_EQ(TenantId::parseFromBSON(msg.body.getField("$tenant")), tenantId); } @@ -980,8 +1013,8 @@ TEST(OpMsgRequestBuilder, WithVTS) { using VTS = auth::ValidatedTenancyScope; VTS vts = VTS(tenantId, VTS::TenantForTestingTag{}); - OpMsgRequest msg = - OpMsgRequestBuilder::createWithValidatedTenancyScope({tenantId, "testDb"}, vts, body); + OpMsgRequest msg = OpMsgRequestBuilder::createWithValidatedTenancyScope( + DatabaseName::createDatabaseName_forTest(tenantId, "testDb"), vts, body); ASSERT(msg.validatedTenancyScope); ASSERT_EQ(msg.validatedTenancyScope->tenantId(), tenantId); // Verify $tenant is added to the msg body, as the vts does not come from security token. @@ -998,8 +1031,10 @@ TEST(OpMsgRequestBuilder, FailWithDiffTenantInBody) { const TenantId otherTenantId(OID::gen()); auto const body = BSON("ping" << 1 << "$tenant" << tenantId); - ASSERT_THROWS_CODE( - OpMsgRequestBuilder::create({otherTenantId, "testDb"}, body), DBException, 8423373); + ASSERT_THROWS_CODE(OpMsgRequestBuilder::create( + DatabaseName::createDatabaseName_forTest(otherTenantId, "testDb"), body), + DBException, + 8423373); } TEST(OpMsgRequestBuilder, CreateDoesNotCopy) { @@ -1009,7 +1044,8 @@ TEST(OpMsgRequestBuilder, CreateDoesNotCopy) { const TenantId tenantId(OID::gen()); auto body = fromjson("{ping: 1}"); const void* const bodyPtr = body.objdata(); - auto msg = OpMsgRequestBuilder::create({tenantId, "db"}, std::move(body)); + auto msg = OpMsgRequestBuilder::create(DatabaseName::createDatabaseName_forTest(tenantId, "db"), + std::move(body)); auto const newBody = BSON("ping" << 1 << "$tenant" << tenantId << "$db" << "db"); diff --git a/src/mongo/rpc/op_msg_test.h b/src/mongo/rpc/op_msg_test.h index ea556e3d4a89b..c74e021dbf2b8 100644 --- a/src/mongo/rpc/op_msg_test.h +++ b/src/mongo/rpc/op_msg_test.h @@ -29,12 +29,21 @@ #pragma once +#include +#include #include #include +#include +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/util/builder.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" +#include "mongo/util/shared_buffer.h" namespace mongo { diff --git a/src/mongo/rpc/reply_builder_interface.cpp b/src/mongo/rpc/reply_builder_interface.cpp index ac4e3626ec23d..6a0f9abfa1dbc 100644 --- a/src/mongo/rpc/reply_builder_interface.cpp +++ b/src/mongo/rpc/reply_builder_interface.cpp @@ -29,12 +29,19 @@ #include "mongo/rpc/reply_builder_interface.h" +#include +#include #include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/basic_types_gen.h" #include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/jsobj.h" +#include "mongo/idl/idl_parser.h" namespace mongo { namespace rpc { diff --git a/src/mongo/rpc/reply_builder_interface.h b/src/mongo/rpc/reply_builder_interface.h index bd87aef468fea..90e6f1bb698be 100644 --- a/src/mongo/rpc/reply_builder_interface.h +++ b/src/mongo/rpc/reply_builder_interface.h @@ -29,12 +29,21 @@ #pragma once +#include +#include #include +#include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/builder.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" #include "mongo/rpc/protocol.h" +#include "mongo/util/assert_util.h" namespace mongo { class BSONObj; diff --git a/src/mongo/rpc/reply_builder_test.cpp b/src/mongo/rpc/reply_builder_test.cpp index 63bfbf4738690..549066b5e510a 100644 --- a/src/mongo/rpc/reply_builder_test.cpp +++ b/src/mongo/rpc/reply_builder_test.cpp @@ -27,17 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/bson/util/builder.h" -#include "mongo/db/jsobj.h" -#include "mongo/db/json.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/legacy_reply.h" #include "mongo/rpc/legacy_reply_builder.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" #include "mongo/rpc/op_msg_rpc_impls.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace { diff --git a/src/mongo/rpc/rewrite_state_change_errors.cpp b/src/mongo/rpc/rewrite_state_change_errors.cpp index 7f09b36806ee0..4a6b4d0a9cf74 100644 --- a/src/mongo/rpc/rewrite_state_change_errors.cpp +++ b/src/mongo/rpc/rewrite_state_change_errors.cpp @@ -30,26 +30,34 @@ #include "mongo/rpc/rewrite_state_change_errors.h" -#include "mongo/platform/basic.h" - #include +#include +#include +#include +#include +#include #include +#include -#include -#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/mutable/document.h" #include "mongo/bson/mutable/element.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" -#include "mongo/platform/atomic_word.h" -#include "mongo/rpc/message.h" -#include "mongo/rpc/op_msg.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/rewrite_state_change_errors_server_parameter_gen.h" #include "mongo/s/is_mongos.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/pcre.h" #include "mongo/util/static_immortal.h" diff --git a/src/mongo/rpc/rewrite_state_change_errors.h b/src/mongo/rpc/rewrite_state_change_errors.h index c9aea2af4f435..4c0d4eaa66d40 100644 --- a/src/mongo/rpc/rewrite_state_change_errors.h +++ b/src/mongo/rpc/rewrite_state_change_errors.h @@ -30,7 +30,9 @@ #pragma once #include +#include +#include "mongo/bson/bsonobj.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/rpc/message.h" diff --git a/src/mongo/rpc/rewrite_state_change_errors_test.cpp b/src/mongo/rpc/rewrite_state_change_errors_test.cpp index c86217d719149..5ecda666a03eb 100644 --- a/src/mongo/rpc/rewrite_state_change_errors_test.cpp +++ b/src/mongo/rpc/rewrite_state_change_errors_test.cpp @@ -27,17 +27,21 @@ * it in the license file. */ -#include "mongo/rpc/rewrite_state_change_errors.h" +#include +#include +#include +#include +#include #include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/concurrency/locker_noop_client_observer.h" #include "mongo/db/service_context.h" -#include "mongo/rpc/message.h" -#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/rewrite_state_change_errors.h" #include "mongo/s/is_mongos.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo::rpc { namespace { @@ -46,7 +50,6 @@ class RewriteStateChangeErrorsTest : public unittest::Test { public: RewriteStateChangeErrorsTest() { sc = ServiceContext::make(); - sc->registerClientObserver(std::make_unique()); cc = sc->makeClient("test", nullptr); opCtx = sc->makeOperationContext(cc.get()); } diff --git a/src/mongo/rpc/write_concern_error_detail.cpp b/src/mongo/rpc/write_concern_error_detail.cpp index b46962857b494..b0e2f514c62ce 100644 --- a/src/mongo/rpc/write_concern_error_detail.cpp +++ b/src/mongo/rpc/write_concern_error_detail.cpp @@ -29,8 +29,19 @@ #include "mongo/rpc/write_concern_error_detail.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/write_concern_error_gen.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/rpc/write_concern_error_detail.h b/src/mongo/rpc/write_concern_error_detail.h index 090b77c306119..09cfd6aa147f4 100644 --- a/src/mongo/rpc/write_concern_error_detail.h +++ b/src/mongo/rpc/write_concern_error_detail.h @@ -29,10 +29,15 @@ #pragma once +#include #include +#include #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" namespace mongo { diff --git a/src/mongo/s/SConscript b/src/mongo/s/SConscript index b5480f1460b5c..797551d83958f 100644 --- a/src/mongo/s/SConscript +++ b/src/mongo/s/SConscript @@ -39,14 +39,17 @@ env.Library( 'write_ops/write_without_shard_key_util.cpp', ], LIBDEPS=[ + '$BUILD_DIR/mongo/db/commands/bulk_write_common', '$BUILD_DIR/mongo/db/commands/server_status_core', '$BUILD_DIR/mongo/db/fle_crud', '$BUILD_DIR/mongo/db/not_primary_error_tracker', '$BUILD_DIR/mongo/db/ops/parsed_update', '$BUILD_DIR/mongo/db/pipeline/pipeline', '$BUILD_DIR/mongo/db/pipeline/process_interface/mongos_process_interface', + '$BUILD_DIR/mongo/db/shard_role_api', '$BUILD_DIR/mongo/db/timeseries/timeseries_conversion_util', '$BUILD_DIR/mongo/db/timeseries/timeseries_options', + '$BUILD_DIR/mongo/db/timeseries/timeseries_write_util', 'query/cluster_query', 'sharding_router_api', 'write_ops/batch_write_types', @@ -173,7 +176,6 @@ env.Library( source=[ 'analyze_shard_key_common.idl', 'analyze_shard_key_documents.idl', - 'analyze_shard_key_feature_flag.idl', 'analyze_shard_key_server_parameters.idl', 'analyze_shard_key_role.cpp', 'query_analysis_sample_tracker.cpp', @@ -229,7 +231,6 @@ env.Library( 'index_version.cpp', 'index_version.idl', 'mongod_and_mongos_server_parameters.idl', - 'move_primary/move_primary_feature_flag.idl', 'refresh_query_analyzer_configuration_cmd.idl', 'request_types/abort_reshard_collection.idl', 'request_types/add_shard_request_type.cpp', @@ -247,19 +248,19 @@ env.Library( 'request_types/flush_resharding_state_change.idl', 'request_types/flush_routing_table_cache_updates.idl', 'request_types/get_database_version.idl', - 'request_types/get_historical_placement_info.idl', 'request_types/get_stats_for_balancing.idl', 'request_types/merge_chunk_request.idl', 'request_types/migration_secondary_throttle_options.cpp', 'request_types/move_primary.idl', 'request_types/move_range_request.idl', + 'request_types/placement_history_commands.idl', 'request_types/remove_shard_from_zone_request_type.cpp', 'request_types/reshard_collection.idl', 'request_types/resharding_operation_time.idl', 'request_types/set_allow_migrations.idl', 'request_types/sharded_ddl_commands.idl', 'request_types/shardsvr_join_migrations_request.idl', - 'request_types/transition_to_catalog_shard.idl', + 'request_types/transition_from_dedicated_config_server.idl', 'request_types/transition_to_dedicated_config_server.idl', 'request_types/update_zone_key_range_request_type.cpp', 'request_types/wait_for_fail_point.idl', @@ -287,6 +288,8 @@ env.Library( '$BUILD_DIR/mongo/db/commands/set_user_write_block_mode_idl', '$BUILD_DIR/mongo/db/common', '$BUILD_DIR/mongo/db/index_commands_idl', + '$BUILD_DIR/mongo/db/serialization_options', + '$BUILD_DIR/mongo/executor/async_rpc_error_info', '$BUILD_DIR/mongo/rpc/message', '$BUILD_DIR/mongo/util/caching', 'analyze_shard_key_common', @@ -323,6 +326,7 @@ env.Library( source=[ 'balancer_configuration.cpp', 'catalog_cache.cpp', + 'client/config_shard_wrapper.cpp', 'client/shard_factory.cpp', 'client/shard_registry.cpp', 'sharding_index_catalog_cache.cpp', @@ -409,9 +413,11 @@ env.Library( source=[ 'sessions_collection_sharded.cpp', ], - LIBDEPS_PRIVATE=[ - '$BUILD_DIR/mongo/db/session/logical_session_id', + LIBDEPS=[ '$BUILD_DIR/mongo/db/session/sessions_collection', + ], + LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/db/server_base', 'sharding_api', ], ) @@ -470,9 +476,8 @@ env.Library( '$BUILD_DIR/mongo/db/read_write_concern_defaults', '$BUILD_DIR/mongo/db/server_options', '$BUILD_DIR/mongo/db/server_options_base', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/service_liaison_mongos', - '$BUILD_DIR/mongo/db/session/logical_session_cache', - '$BUILD_DIR/mongo/db/session/logical_session_cache_impl', '$BUILD_DIR/mongo/db/session/session_catalog', '$BUILD_DIR/mongo/db/startup_warnings_common', '$BUILD_DIR/mongo/db/stats/counters', @@ -536,9 +541,12 @@ env.Library( '$BUILD_DIR/mongo/db/commands/rwc_defaults_commands', '$BUILD_DIR/mongo/db/ftdc/ftdc_mongos', '$BUILD_DIR/mongo/db/process_health/fault_manager', + '$BUILD_DIR/mongo/db/query/query_settings_manager', '$BUILD_DIR/mongo/db/read_write_concern_defaults', '$BUILD_DIR/mongo/db/serverinit', + '$BUILD_DIR/mongo/db/serverless/multitenancy_check', '$BUILD_DIR/mongo/db/service_liaison_mongos', + '$BUILD_DIR/mongo/db/session/logical_session_cache_impl', '$BUILD_DIR/mongo/db/session/session_catalog', '$BUILD_DIR/mongo/db/startup_warnings_common', '$BUILD_DIR/mongo/idl/cluster_server_parameter_refresher', @@ -616,6 +624,7 @@ env.Library( ], LIBDEPS=[ '$BUILD_DIR/mongo/client/remote_command_targeter_mock', + '$BUILD_DIR/mongo/db/concurrency/lock_manager', '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/mongo/executor/network_interface_mock', '$BUILD_DIR/mongo/executor/network_test_env', @@ -662,10 +671,12 @@ env.CppUnitTest( 'catalog/type_tags_test.cpp', 'catalog_cache_refresh_test.cpp', 'catalog_cache_test.cpp', + 'chunks_test_util.cpp', 'chunk_manager_query_test.cpp', 'chunk_map_test.cpp', 'chunk_test.cpp', 'chunk_version_test.cpp', + 'client/config_shard_wrapper_test.cpp', 'client/shard_remote_test.cpp', 'cluster_identity_loader_test.cpp', 'collection_routing_info_targeter_test.cpp', @@ -698,6 +709,7 @@ env.CppUnitTest( 'write_ops/batch_write_op_test.cpp', 'write_ops/batched_command_request_test.cpp', 'write_ops/batched_command_response_test.cpp', + 'write_ops/bulk_write_command_modifier_test.cpp', 'write_ops/bulk_write_exec_test.cpp', 'write_ops/write_op_test.cpp', 'write_ops/write_without_shard_key_util_test.cpp', @@ -710,6 +722,7 @@ env.CppUnitTest( '$BUILD_DIR/mongo/db/pipeline/process_interface/mongos_process_interface_factory', '$BUILD_DIR/mongo/db/query/query_test_service_context', '$BUILD_DIR/mongo/db/repl/replmocks', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/timeseries/timeseries_conversion_util', '$BUILD_DIR/mongo/db/timeseries/timeseries_options', '$BUILD_DIR/mongo/dbtests/mocklib', diff --git a/src/mongo/s/analyze_shard_key_cmd.idl b/src/mongo/s/analyze_shard_key_cmd.idl index b579adab48542..351d006510b78 100644 --- a/src/mongo/s/analyze_shard_key_cmd.idl +++ b/src/mongo/s/analyze_shard_key_cmd.idl @@ -79,37 +79,40 @@ structs: description: "The metrics about the characteristics of a shard key." strict: false fields: - numDocs: + numDocsTotal: type: long - description: "The number of the documents in the collection." + description: "The number of documents in the collection." + validator: { gt: 0 } + numOrphanDocs: + type: long + description: "The number of orphan documents in the collection." validator: { gte: 0 } optional: true + avgDocSizeBytes: + type: long + description: "The average document size in bytes." + validator: { gt: 0 } + numDocsSampled: + type: long + description: "The number of sampled documents when calculating the metrics about the + characteristics of the shard key." + validator: { gt: 0 } isUnique: type: bool description: "Whether the shard key index enforces a uniqueness constraint." - optional: true numDistinctValues: type: long description: "The number of distinct shard key values in the collection." - validator: { gte: 0 } - optional: true + validator: { gt: 0 } mostCommonValues: type: array description: "The value and frequency of the most common shard key values." - optional: true monotonicity: type: MonotonicityMetrics description: "The monotonicity metrics for the shard key." - optional: true - avgDocSizeBytes: - type: long - description: "The average document size in bytes." - validator: { gte: 0 } - optional: true - numOrphanDocs: - type: long - description: "The number of the orphan documents in the collection." - validator: { gte: 0 } + note: + description: "The note about how to interpret the metrics." + type: string optional: true ReadSampleSize: @@ -289,20 +292,16 @@ structs: analyzeShardKeyResponse: description: "The response for the 'analyzeShardKey' command." strict: false - inline_chained_structs: true - chained_structs: - KeyCharacteristicsMetrics: keyCharacteristics fields: + keyCharacteristics: + type: KeyCharacteristicsMetrics + optional: true readDistribution: type: ReadDistributionMetrics optional: true writeDistribution: type: WriteDistributionMetrics optional: true - note: - description: "The note about how to interpret the metrics." - type: string - optional: true commands: analyzeShardKey: @@ -318,6 +317,34 @@ commands: description: "The shard key to evaluate." validator: callback: validateShardKeyPattern + keyCharacteristics: + type: bool + cpp_name: analyzeKeyCharacteristics + description: "The boolean specifying whether the command should calculate the metrics about + the characteristics of the shard key." + default: true + readWriteDistribution: + type: bool + cpp_name: analyzeReadWriteDistribution + description: "The boolean specifying whether the command should calculate the metrics about + the read and write distribution." + default: true + sampleRate: + description: "The proportion of the documents in the collection, in the range (0,1] to + sample when calculating the metrics about the characteristics of the shard + key." + type: safeDouble + validator: + gt: 0 + lte: 1 + optional: true + sampleSize: + description: "The number of documents to sample when calculating the metrics about the + characteristics of the shard key." + type: safeInt64 + validator: + gt: 0 + optional: true $readPreference: type: readPreference cpp_name: readPreference diff --git a/src/mongo/s/analyze_shard_key_common.idl b/src/mongo/s/analyze_shard_key_common.idl index d4eb0c0ef0fe1..9fdd5741212ab 100644 --- a/src/mongo/s/analyze_shard_key_common.idl +++ b/src/mongo/s/analyze_shard_key_common.idl @@ -62,8 +62,8 @@ structs: mode: type: QueryAnalyzerMode description: "The query analyzer mode." - sampleRate: - type: double + samplesPerSecond: + type: safeDouble description: "The maximum number of queries to sample per second, in total across the cluster (not per mongos or mongod)." optional: true @@ -79,8 +79,8 @@ structs: collectionUuid: type: uuid description: "The UUID of the collection." - sampleRate: - type: double + samplesPerSecond: + type: safeDouble description: "The maximum number of queries to sample per second." startTime: type: date @@ -102,8 +102,8 @@ structs: type: uuid description: "The UUID of the collection." optional: true - sampleRate: - type: double + samplesPerSecond: + type: safeDouble description: "The maximum number of queries to sample per second. Only reported by mongos." optional: true startTime: diff --git a/src/mongo/s/analyze_shard_key_documents.idl b/src/mongo/s/analyze_shard_key_documents.idl index e460d8b2e3e95..0627ecb959de6 100644 --- a/src/mongo/s/analyze_shard_key_documents.idl +++ b/src/mongo/s/analyze_shard_key_documents.idl @@ -41,12 +41,13 @@ structs: strict: false fields: _id: + type: namespacestring + description: "The namespace of the collection." + cpp_name: ns + collUuid: type: uuid description: "The UUID of the collection." cpp_name: collectionUuid - ns: - type: namespacestring - description: "The namespace of the collection." startTime: type: date description: "The time at which query sampling began." @@ -123,7 +124,7 @@ structs: by an analyzeShardKey command." strict: false fields: - commandId: + analyzeShardKeyId: type: uuid description: "The unique id for the command that generated this split point." splitPointId: diff --git a/src/mongo/s/analyze_shard_key_feature_flag.idl b/src/mongo/s/analyze_shard_key_feature_flag.idl index 4e92dcac66711..b995fe0430f46 100644 --- a/src/mongo/s/analyze_shard_key_feature_flag.idl +++ b/src/mongo/s/analyze_shard_key_feature_flag.idl @@ -40,3 +40,4 @@ feature_flags: cpp_varname: gFeatureFlagAnalyzeShardKey default: true version: 7.0 + shouldBeFCVGated: true diff --git a/src/mongo/s/analyze_shard_key_role.cpp b/src/mongo/s/analyze_shard_key_role.cpp index f3b2ef327aa85..d8ecd074da0cc 100644 --- a/src/mongo/s/analyze_shard_key_role.cpp +++ b/src/mongo/s/analyze_shard_key_role.cpp @@ -29,9 +29,10 @@ #include "mongo/s/analyze_shard_key_role.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/multitenancy_gen.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/s/analyze_shard_key_feature_flag_gen.h" +#include "mongo/db/server_options.h" #include "mongo/s/is_mongos.h" namespace mongo { @@ -49,21 +50,7 @@ bool isReplEnabled(ServiceContext* serviceContext) { } // namespace -bool isFeatureFlagEnabled(bool ignoreFCV) { - if (ignoreFCV) { - // (Ignore FCV check): In the following cases, ignoreFCV is set to true. - // 1. The call is before FCV initialization. - // 2. We want to stop QueryAnalysisSampler regardless of FCV. - return gFeatureFlagAnalyzeShardKey.isEnabledAndIgnoreFCVUnsafe(); - } - return serverGlobalParams.featureCompatibility.isVersionInitialized() && - gFeatureFlagAnalyzeShardKey.isEnabled(serverGlobalParams.featureCompatibility); -} - -bool supportsCoordinatingQueryAnalysis(bool isReplEnabled, bool ignoreFCV) { - if (!isFeatureFlagEnabled(ignoreFCV)) { - return false; - } +bool supportsCoordinatingQueryAnalysis(bool isReplEnabled) { if (isMongos()) { return false; } @@ -72,14 +59,11 @@ bool supportsCoordinatingQueryAnalysis(bool isReplEnabled, bool ignoreFCV) { serverGlobalParams.clusterRole.has(ClusterRole::None)); } -bool supportsCoordinatingQueryAnalysis(OperationContext* opCtx, bool ignoreFCV) { - return supportsCoordinatingQueryAnalysis(isReplEnabled(opCtx->getServiceContext()), ignoreFCV); +bool supportsCoordinatingQueryAnalysis(OperationContext* opCtx) { + return supportsCoordinatingQueryAnalysis(isReplEnabled(opCtx->getServiceContext())); } -bool supportsPersistingSampledQueries(bool isReplEnabled, bool ignoreFCV) { - if (!isFeatureFlagEnabled(ignoreFCV)) { - return false; - } +bool supportsPersistingSampledQueries(bool isReplEnabled) { if (isMongos()) { return false; } @@ -88,14 +72,11 @@ bool supportsPersistingSampledQueries(bool isReplEnabled, bool ignoreFCV) { serverGlobalParams.clusterRole.has(ClusterRole::None)); } -bool supportsPersistingSampledQueries(OperationContext* opCtx, bool ignoreFCV) { - return supportsPersistingSampledQueries(isReplEnabled(opCtx->getServiceContext()), ignoreFCV); +bool supportsPersistingSampledQueries(OperationContext* opCtx) { + return supportsPersistingSampledQueries(isReplEnabled(opCtx->getServiceContext())); } -bool supportsSamplingQueries(bool isReplEnabled, bool ignoreFCV) { - if (!isFeatureFlagEnabled(ignoreFCV)) { - return false; - } +bool supportsSamplingQueries(bool isReplEnabled) { if (isMongos()) { return true; } @@ -104,12 +85,12 @@ bool supportsSamplingQueries(bool isReplEnabled, bool ignoreFCV) { serverGlobalParams.clusterRole.has(ClusterRole::None)); } -bool supportsSamplingQueries(ServiceContext* serviceContext, bool ignoreFCV) { - return supportsSamplingQueries(isReplEnabled(serviceContext), ignoreFCV); +bool supportsSamplingQueries(ServiceContext* serviceContext) { + return supportsSamplingQueries(isReplEnabled(serviceContext)); } -bool supportsSamplingQueries(OperationContext* opCtx, bool ignoreFCV) { - return supportsSamplingQueries(opCtx->getServiceContext(), ignoreFCV); +bool supportsSamplingQueries(OperationContext* opCtx) { + return supportsSamplingQueries(opCtx->getServiceContext()); } } // namespace analyze_shard_key diff --git a/src/mongo/s/analyze_shard_key_role.h b/src/mongo/s/analyze_shard_key_role.h index 2c6ed7b3143b5..f6be83183ec97 100644 --- a/src/mongo/s/analyze_shard_key_role.h +++ b/src/mongo/s/analyze_shard_key_role.h @@ -30,6 +30,7 @@ #pragma once #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/platform/basic.h" namespace mongo { @@ -37,15 +38,15 @@ namespace analyze_shard_key { bool isFeatureFlagEnabled(bool ignoreFCV = false); -bool supportsCoordinatingQueryAnalysis(bool isReplEnabled, bool ignoreFCV = false); -bool supportsCoordinatingQueryAnalysis(OperationContext* opCtx, bool ignoreFCV = false); +bool supportsCoordinatingQueryAnalysis(bool isReplEnabled); +bool supportsCoordinatingQueryAnalysis(OperationContext* opCtx); -bool supportsPersistingSampledQueries(bool isReplEnabled, bool ignoreFCV = false); -bool supportsPersistingSampledQueries(OperationContext* opCtx, bool ignoreFCV = false); +bool supportsPersistingSampledQueries(bool isReplEnabled); +bool supportsPersistingSampledQueries(OperationContext* opCtx); -bool supportsSamplingQueries(bool isReplEnabled, bool ignoreFCV = false); -bool supportsSamplingQueries(OperationContext* opCtx, bool ignoreFCV = false); -bool supportsSamplingQueries(ServiceContext* serviceContext, bool ignoreFCV = false); +bool supportsSamplingQueries(bool isReplEnabled); +bool supportsSamplingQueries(OperationContext* opCtx); +bool supportsSamplingQueries(ServiceContext* serviceContext); } // namespace analyze_shard_key } // namespace mongo diff --git a/src/mongo/s/analyze_shard_key_server_parameters.idl b/src/mongo/s/analyze_shard_key_server_parameters.idl index fe441c20673c1..13cdb4cbd05ae 100644 --- a/src/mongo/s/analyze_shard_key_server_parameters.idl +++ b/src/mongo/s/analyze_shard_key_server_parameters.idl @@ -30,6 +30,8 @@ global: cpp_namespace: "mongo::analyze_shard_key" + cpp_includes: + - "mongo/db/ops/write_ops.h" imports: - "mongo/db/basic_types.idl" @@ -80,7 +82,17 @@ server_parameters: cpp_vartype: AtomicWord cpp_varname: gAnalyzeShardKeySplitPointExpirationSecs default: - expr: 15 * 60 + expr: 5 * 60 + validator: + gt: 0 + analyzeShardKeyCharacteristicsDefaultSampleSize: + description: The default number of documents to sample when calculating the metrics about + the characteristics of the shard key if both 'sampleRate' and 'sampleSize' + are not specified. + set_at: [startup, runtime] + cpp_vartype: AtomicWord + cpp_varname: gKeyCharacteristicsDefaultSampleSize + default: 10000000 # 10 million validator: gt: 0 @@ -111,7 +123,7 @@ server_parameters: set_at: startup cpp_vartype: int cpp_varname: gQueryAnalysisSamplerConfigurationRefreshSecs - default: 5 + default: 10 validator: gt: 0 queryAnalysisSamplerBurstMultiplier: @@ -147,9 +159,12 @@ server_parameters: set_at: [startup, runtime] cpp_vartype: AtomicWord cpp_varname: gQueryAnalysisWriterMaxBatchSize - default: 100000 + default: + expr: write_ops::kMaxWriteBatchSize validator: gt: 0 + lte: + expr: write_ops::kMaxWriteBatchSize queryAnalysisWriterMinThreadPoolSize: description: The minimum number of threads in the writer's thread pool. set_at: startup diff --git a/src/mongo/s/analyze_shard_key_util.cpp b/src/mongo/s/analyze_shard_key_util.cpp index 528948c3ff82e..0f43a1ed1f523 100644 --- a/src/mongo/s/analyze_shard_key_util.cpp +++ b/src/mongo/s/analyze_shard_key_util.cpp @@ -29,8 +29,26 @@ #include "mongo/s/analyze_shard_key_util.h" +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/catalog/collection.h" +#include "mongo/db/catalog/collection_catalog.h" +#include "mongo/db/catalog/collection_options.h" +#include "mongo/db/client.h" #include "mongo/db/db_raii.h" -#include "mongo/logv2/log.h" +#include "mongo/db/read_write_concern_provenance_base_gen.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -53,21 +71,30 @@ Status validateNamespace(const NamespaceString& nss) { return Status::OK(); } -StatusWith validateCollectionOptionsLocally(OperationContext* opCtx, - const NamespaceString& nss) { - if (CollectionCatalog::get(opCtx)->lookupView(opCtx, nss)) { - return Status{ErrorCodes::CommandNotSupportedOnView, "The namespace corresponds to a view"}; - } +StatusWith validateCollectionOptions(OperationContext* opCtx, const NamespaceString& nss) { + AutoGetCollectionForReadMaybeLockFree collection( + opCtx, + nss, + AutoGetCollection::Options{}.viewMode(auto_get_collection::ViewMode::kViewsPermitted)); - AutoGetCollectionForReadCommandMaybeLockFree collection(opCtx, nss); + if (auto view = collection.getView()) { + if (view->timeseries()) { + return Status{ErrorCodes::IllegalOperation, + "Operation not supported for a timeseries collection"}; + } + return Status{ErrorCodes::CommandNotSupportedOnView, "Operation not supported for a view"}; + } if (!collection) { return Status{ErrorCodes::NamespaceNotFound, str::stream() << "The namespace does not exist"}; } if (collection->getCollectionOptions().encryptedFieldConfig.has_value()) { - return Status{ErrorCodes::IllegalOperation, - str::stream() << "The collection has queryable encryption enabled"}; + return Status{ + ErrorCodes::IllegalOperation, + str::stream() + << "Operation not supported for a collection with queryable encryption enabled"}; } + return collection->uuid(); } @@ -101,5 +128,10 @@ double calculatePercentage(double part, double whole) { return round(part / whole * 100, kMaxNumDecimalPlaces); } +bool isInternalClient(OperationContext* opCtx) { + return !opCtx->getClient()->session() || + (opCtx->getClient()->session()->getTags() & transport::Session::kInternalClient); +} + } // namespace analyze_shard_key } // namespace mongo diff --git a/src/mongo/s/analyze_shard_key_util.h b/src/mongo/s/analyze_shard_key_util.h index c6971501f0fd7..9e9b7567dc71d 100644 --- a/src/mongo/s/analyze_shard_key_util.h +++ b/src/mongo/s/analyze_shard_key_util.h @@ -29,11 +29,16 @@ #pragma once -#include "mongo/platform/basic.h" - +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/platform/basic.h" #include "mongo/s/shard_key_pattern.h" +#include "mongo/util/uuid.h" namespace mongo { namespace analyze_shard_key { @@ -41,9 +46,15 @@ namespace analyze_shard_key { // The maximum number of decimal places for the metrics returned by the analyzeShardKey command. const int kMaxNumDecimalPlaces = 10; -// The size limit for the documents to an insert in a single batch. Leave some padding for other +// The size limit for the documents to insert in a single command. The 2MB padding is to account +// for the size of the fields other than the "documents" field, and the fact that BSON stores an +// array as {'0': , '1': , ...}. The math is as follows. The limit for the number +// of documents that can be included in a single insert command is 100'000. So the size of the +// largest field name is 5 bytes (since the max index is 99999). There is 1 byte doc for the field +// name's null terminator and 1 byte per document for the type. So the upper bound for the overhead +// for the "documents" field is 700kB. The remaining > 1MB should be more than enough for the other // fields in the insert command. -constexpr int kMaxBSONObjSizePerInsertBatch = BSONObjMaxUserSize - 100 * 1024; +constexpr int kMaxBSONObjSizePerInsertBatch = BSONObjMaxUserSize - 2 * 1024 * 1024; // // The helpers used for validation within the analyzeShardKey or configureQueryAnalyzer command. @@ -55,14 +66,13 @@ constexpr int kMaxBSONObjSizePerInsertBatch = BSONObjMaxUserSize - 100 * 1024; Status validateNamespace(const NamespaceString& nss); /* - * If the namespace doesn't exist locally, returns a NamespaceNotFound error. If the namespace - * corresponds to a view, returns a CommandNotSupportedOnView error. If the collection has - * queryable encryption enabled, returns an IllegalOperation error. Throws DBException on any error - * that occurs during the validation. If the validation passed, returns an OK status and the - * collection UUID for the collection when the validation occurred. + * If the namespace doesn't exist locally, returns a NamespaceNotFound error. If the collection is a + * timeseries collection or has queryable encryption enabled, returns an IllegalOperation error. If + * the namespace corresponds to a view, returns a CommandNotSupportedOnView error. Throws + * DBException on any error that occurs during the validation. If the validation passed, returns an + * OK status and the collection UUID for the collection when the validation occurred. */ -StatusWith validateCollectionOptionsLocally(OperationContext* opCtx, - const NamespaceString& nss); +StatusWith validateCollectionOptions(OperationContext* opCtx, const NamespaceString& nss); /* * If the shard key is invalid, returns a BadValue error. Otherwise, returns an OK status. This @@ -106,5 +116,10 @@ double round(double val, int n); */ double calculatePercentage(double part, double whole); +/* + * Returns true if the client is internal. + */ +bool isInternalClient(OperationContext* opCtx); + } // namespace analyze_shard_key } // namespace mongo diff --git a/src/mongo/s/append_raw_responses_test.cpp b/src/mongo/s/append_raw_responses_test.cpp index 7124c2ea4f875..8e5961ff94022 100644 --- a/src/mongo/s/append_raw_responses_test.cpp +++ b/src/mongo/s/append_raw_responses_test.cpp @@ -27,16 +27,57 @@ * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_mock.h" #include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/write_concern_error_detail.h" #include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_mock.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" #include "mongo/s/sharding_router_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/stale_exception.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/s/async_requests_sender.cpp b/src/mongo/s/async_requests_sender.cpp index 990358f7e4297..4508c2e17f74e 100644 --- a/src/mongo/s/async_requests_sender.cpp +++ b/src/mongo/s/async_requests_sender.cpp @@ -28,25 +28,35 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/async_requests_sender.h" - +#include +#include #include #include +#include +#include -#include "mongo/client/remote_command_targeter.h" +#include +#include + +#include "mongo/bson/bsonelement.h" #include "mongo/db/curop.h" +#include "mongo/db/service_context.h" #include "mongo/executor/hedge_options_util.h" #include "mongo/executor/remote_command_request.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_requests_sender.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" -#include "mongo/transport/baton.h" -#include "mongo/transport/transport_layer.h" #include "mongo/util/assert_util.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -70,7 +80,8 @@ AsyncRequestsSender::AsyncRequestsSender(OperationContext* opCtx, const std::vector& requests, const ReadPreferenceSetting& readPreference, Shard::RetryPolicy retryPolicy, - std::unique_ptr resourceYielder) + std::unique_ptr resourceYielder, + const ShardHostMap& designatedHostsMap) : _opCtx(opCtx), _db(dbName.toString()), _readPreference(readPreference), @@ -87,7 +98,12 @@ AsyncRequestsSender::AsyncRequestsSender(OperationContext* opCtx, _remotes.reserve(requests.size()); for (const auto& request : requests) { // Kick off requests immediately. - _remotes.emplace_back(this, request.shardId, request.cmdObj).executeRequest(); + auto designatedHostIter = designatedHostsMap.find(request.shardId); + auto designatedHost = designatedHostIter != designatedHostsMap.end() + ? designatedHostIter->second + : HostAndPort(); + _remotes.emplace_back(this, request.shardId, request.cmdObj, std::move(designatedHost)) + .executeRequest(); } CurOp::get(_opCtx)->ensureRecordRemoteOpWait(); @@ -184,8 +200,12 @@ AsyncRequestsSender::Request::Request(ShardId shardId, BSONObj cmdObj) AsyncRequestsSender::RemoteData::RemoteData(AsyncRequestsSender* ars, ShardId shardId, - BSONObj cmdObj) - : _ars(ars), _shardId(std::move(shardId)), _cmdObj(std::move(cmdObj)) {} + BSONObj cmdObj, + HostAndPort designatedHostAndPort) + : _ars(ars), + _shardId(std::move(shardId)), + _cmdObj(std::move(cmdObj)), + _designatedHostAndPort(std::move(designatedHostAndPort)) {} SemiFuture> AsyncRequestsSender::RemoteData::getShard() noexcept { return Grid::get(getGlobalServiceContext()) @@ -212,7 +232,17 @@ auto AsyncRequestsSender::RemoteData::scheduleRequest() -> SemiFuture { return getShard() .thenRunOn(*_ars->_subBaton) - .then([this](auto&& shard) { + .then([this](auto&& shard) -> SemiFuture> { + if (!_designatedHostAndPort.empty()) { + const auto& connStr = shard->getTargeter()->connectionString(); + const auto& servers = connStr.getServers(); + uassert(ErrorCodes::HostNotFound, + str::stream() << "Host " << _designatedHostAndPort + << " is not a host in shard " << shard->getId(), + std::find(servers.begin(), servers.end(), _designatedHostAndPort) != + servers.end()); + return std::vector{_designatedHostAndPort}; + } return shard->getTargeter()->findHosts(_ars->_readPreference, CancellationToken::uncancelable()); }) diff --git a/src/mongo/s/async_requests_sender.h b/src/mongo/s/async_requests_sender.h index f8952a8a3c00c..731d08344bb40 100644 --- a/src/mongo/s/async_requests_sender.h +++ b/src/mongo/s/async_requests_sender.h @@ -29,19 +29,29 @@ #pragma once +#include #include +#include +#include +#include +#include +#include #include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/client/read_preference.h" #include "mongo/db/baton.h" +#include "mongo/db/operation_context.h" #include "mongo/db/resource_yielder.h" #include "mongo/db/shard_id.h" #include "mongo/executor/remote_command_response.h" #include "mongo/executor/scoped_task_executor.h" #include "mongo/executor/task_executor.h" #include "mongo/s/client/shard.h" +#include "mongo/util/future.h" #include "mongo/util/interruptible.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/producer_consumer_queue.h" @@ -127,9 +137,14 @@ class AsyncRequestsSender { boost::optional shardHostAndPort; }; + typedef stdx::unordered_map ShardHostMap; + /** * Constructs a new AsyncRequestsSender. The OperationContext* and TaskExecutor* must remain * valid for the lifetime of the ARS. + * + * The designatedHostsMap overrides the read preference for the shards specified, and requires + * those shards target only the host in the map. */ AsyncRequestsSender(OperationContext* opCtx, std::shared_ptr executor, @@ -137,7 +152,8 @@ class AsyncRequestsSender { const std::vector& requests, const ReadPreferenceSetting& readPreference, Shard::RetryPolicy retryPolicy, - std::unique_ptr resourceYielder); + std::unique_ptr resourceYielder, + const ShardHostMap& designatedHostsMap); /** * Returns true if responses for all requests have been returned via next(). @@ -176,7 +192,10 @@ class AsyncRequestsSender { /** * Creates a new uninitialized remote state with a command to send. */ - RemoteData(AsyncRequestsSender* ars, ShardId shardId, BSONObj cmdObj); + RemoteData(AsyncRequestsSender* ars, + ShardId shardId, + BSONObj cmdObj, + HostAndPort designatedHost); /** * Returns a SemiFuture containing a shard object associated with this remote. @@ -252,6 +271,9 @@ class AsyncRequestsSender { // The command object to send to the remote host. BSONObj _cmdObj; + // The designated host and port to send the command to, if provided. Otherwise is empty(). + HostAndPort _designatedHostAndPort; + // The exact host on which the remote command was run. Is unset until a request has been // sent. boost::optional _shardHostAndPort; diff --git a/src/mongo/s/async_requests_sender_test.cpp b/src/mongo/s/async_requests_sender_test.cpp index b27d9b5644ea5..60229ee0385d0 100644 --- a/src/mongo/s/async_requests_sender_test.cpp +++ b/src/mongo/s/async_requests_sender_test.cpp @@ -27,15 +27,27 @@ * it in the license file. */ +#include +// IWYU pragma: no_include "cxxabi.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_factory_mock.h" #include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/executor/network_test_env.h" #include "mongo/s/async_requests_sender.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/s/client/shard_registry.h" #include "mongo/s/sharding_router_test_fixture.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -69,6 +81,8 @@ class AsyncRequestsSenderTest : public ShardingTestFixture { std::unique_ptr targeter( std::make_unique()); + _targeters.push_back(targeter.get()); + targeter->setConnectionStringReturnValue(ConnectionString(kTestShardHosts[i])); targeter->setFindHostReturnValue(kTestShardHosts[i]); @@ -78,6 +92,9 @@ class AsyncRequestsSenderTest : public ShardingTestFixture { setupShards(shards); } + +protected: + std::vector _targeters; // Targeters are owned by the factory. }; TEST_F(AsyncRequestsSenderTest, HandlesExceptionWhenYielding) { @@ -108,11 +125,12 @@ TEST_F(AsyncRequestsSenderTest, HandlesExceptionWhenYielding) { auto ars = AsyncRequestsSender(operationContext(), executor(), - kTestNss.db(), + kTestNss.db_forTest(), requests, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, Shard::RetryPolicy::kNoRetry, - std::make_unique()); + std::make_unique(), + {} /* designatedHostsMap */); // Issue blocking waits on a different thread. auto future = launchAsync([&]() { @@ -169,11 +187,12 @@ TEST_F(AsyncRequestsSenderTest, HandlesExceptionWhenUnyielding) { auto ars = AsyncRequestsSender(operationContext(), executor(), - kTestNss.db(), + kTestNss.db_forTest(), requests, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, Shard::RetryPolicy::kNoRetry, - std::make_unique()); + std::make_unique(), + {} /* designatedHostsMap */); auto firstResponseProcessed = unittest::Barrier(2); @@ -238,11 +257,12 @@ TEST_F(AsyncRequestsSenderTest, ExceptionWhileWaitingDoesNotSkipUnyield) { auto yielderPointer = yielder.get(); auto ars = AsyncRequestsSender(operationContext(), executor(), - kTestNss.db(), + kTestNss.db_forTest(), requests, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, Shard::RetryPolicy::kNoRetry, - std::move(yielder)); + std::move(yielder), + {} /* designatedHostsMap */); // Issue blocking wait on a different thread. auto future = launchAsync([&]() { @@ -260,5 +280,102 @@ TEST_F(AsyncRequestsSenderTest, ExceptionWhileWaitingDoesNotSkipUnyield) { ASSERT_EQ(yielderPointer->timesYielded, 1); ASSERT_EQ(yielderPointer->timesUnyielded, 1); } + +TEST_F(AsyncRequestsSenderTest, DesignatedHostChosen) { + std::vector requests; + requests.emplace_back(kTestShardIds[0], + BSON("find" + << "bar")); + requests.emplace_back(kTestShardIds[1], + BSON("find" + << "bar")); + requests.emplace_back(kTestShardIds[2], + BSON("find" + << "bar")); + + AsyncRequestsSender::ShardHostMap designatedHosts; + + auto shard1Secondary = HostAndPort("SecondaryHostShard1", 12345); + _targeters[1]->setConnectionStringReturnValue( + ConnectionString::forReplicaSet("shard1_rs"_sd, {kTestShardHosts[1], shard1Secondary})); + designatedHosts[kTestShardIds[1]] = shard1Secondary; + auto ars = AsyncRequestsSender(operationContext(), + executor(), + kTestNss.db(), + requests, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + Shard::RetryPolicy::kNoRetry, + nullptr /* no yielder */, + designatedHosts); + + auto future = launchAsync([&]() { + auto response = ars.next(); + ASSERT(response.swResponse.getStatus().isOK()); + ASSERT_EQ(response.shardId, kTestShardIds[0]); + ASSERT_EQ(response.shardHostAndPort, kTestShardHosts[0]); + + response = ars.next(); + ASSERT(response.swResponse.getStatus().isOK()); + ASSERT_EQ(response.shardId, kTestShardIds[1]); + ASSERT_EQ(response.shardHostAndPort, shard1Secondary); + + response = ars.next(); + ASSERT(response.swResponse.getStatus().isOK()); + ASSERT_EQ(response.shardId, kTestShardIds[2]); + ASSERT_EQ(response.shardHostAndPort, kTestShardHosts[2]); + }); + + onCommand([&](const auto& request) { + ASSERT(request.cmdObj["find"]); + ASSERT_EQ(request.target, kTestShardHosts[0]); + return CursorResponse(kTestNss, 0LL, {BSON("x" << 1)}) + .toBSON(CursorResponse::ResponseType::InitialResponse); + }); + + onCommand([&](const auto& request) { + ASSERT(request.cmdObj["find"]); + ASSERT_EQ(request.target, shard1Secondary); + return CursorResponse(kTestNss, 0LL, {BSON("x" << 2)}) + .toBSON(CursorResponse::ResponseType::InitialResponse); + }); + + onCommand([&](const auto& request) { + ASSERT(request.cmdObj["find"]); + ASSERT_EQ(request.target, kTestShardHosts[2]); + return CursorResponse(kTestNss, 0LL, {BSON("x" << 3)}) + .toBSON(CursorResponse::ResponseType::InitialResponse); + }); + future.default_timed_get(); +} + +TEST_F(AsyncRequestsSenderTest, DesignatedHostMustBeInShard) { + std::vector requests; + requests.emplace_back(kTestShardIds[0], + BSON("find" + << "bar")); + requests.emplace_back(kTestShardIds[1], + BSON("find" + << "bar")); + requests.emplace_back(kTestShardIds[2], + BSON("find" + << "bar")); + + AsyncRequestsSender::ShardHostMap designatedHosts; + designatedHosts[kTestShardIds[1]] = HostAndPort("HostNotInShard", 12345); + auto ars = AsyncRequestsSender(operationContext(), + executor(), + kTestNss.db(), + requests, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + Shard::RetryPolicy::kNoRetry, + nullptr /* no yielder */, + designatedHosts); + + // We see the error immediately, because it happens in construction. + auto response = ars.next(); + ASSERT_EQ(response.swResponse.getStatus(), ErrorCodes::HostNotFound); + ASSERT_EQ(response.shardId, kTestShardIds[1]); +} + } // namespace } // namespace mongo diff --git a/src/mongo/s/async_rpc_shard_retry_policy.h b/src/mongo/s/async_rpc_shard_retry_policy.h new file mode 100644 index 0000000000000..77df4fd8771d4 --- /dev/null +++ b/src/mongo/s/async_rpc_shard_retry_policy.h @@ -0,0 +1,112 @@ +/** + * Copyright (C) 2022-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include "mongo/base/error_codes.h" +#include "mongo/executor/async_rpc_error_info.h" +#include "mongo/executor/async_rpc_retry_policy.h" +#include "mongo/s/client/shard.h" + +namespace mongo { +namespace async_rpc { + +namespace { +// See ARS::kMaxNumFailedHostRetryAttempts and Shard::kOnErrorNumRetries +const int kOnErrorNumRetries = 3; +} // namespace + +class ShardRetryPolicy : public RetryPolicy { +public: + ShardRetryPolicy(Shard::RetryPolicy shardInternalRetryPolicy) + : _shardInternalRetryPolicy(shardInternalRetryPolicy) {} + + virtual bool shouldRetry(Status s, int retryCount) { + if (_retryCount >= kOnErrorNumRetries) { + return false; + } + + if (s.isOK() || s.code() != ErrorCodes::RemoteCommandExecutionError) { + return false; + } + + auto extraInfo = s.extraInfo(); + if (extraInfo->isLocal()) { + return false; + } + return Shard::remoteIsRetriableError(extraInfo->asRemote().getRemoteCommandResult().code(), + _shardInternalRetryPolicy); + } + + bool recordAndEvaluateRetry(Status s) override { + bool shouldRetryVal = shouldRetry(s, _retryCount); + + if (shouldRetryVal) { + ++_retryCount; + } + return shouldRetryVal; + } + + Milliseconds getNextRetryDelay() override final { + return Milliseconds::zero(); + } + + BSONObj toBSON() const override final { + return BSON("retryPolicyType" + << "shardRetryPolicy"); + } + +private: + // The internal retry policy used by the Shard class. + Shard::RetryPolicy _shardInternalRetryPolicy; + + // The number of retries that have been attempted by the corresponding async_rpc runner. + int _retryCount = 0; +}; + +class ShardRetryPolicyWithIsStartingTransaction : public ShardRetryPolicy { +public: + ShardRetryPolicyWithIsStartingTransaction(Shard::RetryPolicy shardInternalRetryPolicy, + bool isStartingTransaction) + : ShardRetryPolicy(shardInternalRetryPolicy), + _isStartingTransaction(isStartingTransaction) {} + + bool shouldRetry(Status s, int retryCount) override { + if (_isStartingTransaction) { + return false; + } + return ShardRetryPolicy::shouldRetry(s, retryCount); + } + +private: + bool _isStartingTransaction; +}; + +} // namespace async_rpc +} // namespace mongo diff --git a/src/mongo/s/async_rpc_shard_targeter.h b/src/mongo/s/async_rpc_shard_targeter.h index 5fa5a133f2d64..751642e3e9026 100644 --- a/src/mongo/s/async_rpc_shard_targeter.h +++ b/src/mongo/s/async_rpc_shard_targeter.h @@ -29,21 +29,34 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include + #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/client/read_preference.h" +#include "mongo/client/remote_command_targeter.h" #include "mongo/db/operation_context.h" #include "mongo/db/s/operation_sharding_state.h" #include "mongo/db/service_context.h" #include "mongo/db/shard_id.h" #include "mongo/executor/async_rpc_targeter.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/util/assert_util.h" #include "mongo/util/assert_util_core.h" +#include "mongo/util/cancellation.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/out_of_line_executor.h" -#include -#include namespace mongo { @@ -70,7 +83,7 @@ class ShardIdTargeter : public Targeter { /** * Update underlying shard targeter's view of topology on error. */ - SemiFuture onRemoteCommandError(HostAndPort h, Status s) override final { + SemiFuture onRemoteCommandError(HostAndPort h, Status s) override { invariant(_shardFromLastResolve, "Cannot propagate a remote command error to a ShardTargeter before calling " "resolve and obtaining a shard."); diff --git a/src/mongo/s/async_rpc_shard_targeter_test.cpp b/src/mongo/s/async_rpc_shard_targeter_test.cpp index 1d3c4262ed067..1d5ecbd227a5d 100644 --- a/src/mongo/s/async_rpc_shard_targeter_test.cpp +++ b/src/mongo/s/async_rpc_shard_targeter_test.cpp @@ -27,25 +27,42 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include + #include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/client/read_preference.h" -#include "mongo/db/operation_context.h" +#include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/cursor_response_gen.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/shard_id.h" #include "mongo/executor/async_rpc.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/async_rpc_shard_targeter.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/s/sharding_initialization.h" #include "mongo/s/sharding_router_test_fixture.h" -#include "mongo/s/sharding_test_fixture_common.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" #include "mongo/util/cancellation.h" #include "mongo/util/future.h" #include "mongo/util/net/hostandport.h" -#include -#include namespace mongo { namespace async_rpc { @@ -298,11 +315,12 @@ DEATH_TEST_F(AsyncRPCShardingTestFixture, CannotCallOnRemoteErrorBeforeResolve, * Test ShardId overload version of 'sendCommand'. */ TEST_F(AsyncRPCShardingTestFixture, ShardIdOverload) { - const NamespaceString testNS = NamespaceString("testdb", "testcoll"); + const NamespaceString testNS = + NamespaceString::createNamespaceString_forTest("testdb", "testcoll"); const BSONObj testFirstBatch = BSON("x" << 1); const FindCommandRequest findCmd = FindCommandRequest(testNS); - const BSONObj findReply = CursorResponse(testNS, 0LL, {testFirstBatch}) - .toBSON(CursorResponse::ResponseType::InitialResponse); + BSONObj findReply = CursorResponse(testNS, 0LL, {testFirstBatch}) + .toBSON(CursorResponse::ResponseType::InitialResponse); auto options = std::make_shared>( findCmd, executor(), CancellationToken::uncancelable()); diff --git a/src/mongo/s/balancer_configuration.cpp b/src/mongo/s/balancer_configuration.cpp index 9375fae3338f1..3d150080c67ed 100644 --- a/src/mongo/s/balancer_configuration.cpp +++ b/src/mongo/s/balancer_configuration.cpp @@ -28,23 +28,47 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/balancer_configuration.h" - -#include -#include -#include #include - +// IWYU pragma: no_include "boost/date_time/gregorian_calendar.ipp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/balancer_configuration.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -83,8 +107,7 @@ const char kAttemptToBalanceJumboChunks[] = "attemptToBalanceJumboChunks"; } // namespace const char BalancerSettingsType::kKey[] = "balancer"; -// TODO SERVER-75757: get rid of legacy `autoSplitOnly` mode -const char* BalancerSettingsType::kBalancerModes[] = {"full", "autoSplitOnly", "off"}; +const char* BalancerSettingsType::kBalancerModes[] = {"full", "off"}; const char ChunkSizeSettingsType::kKey[] = "chunksize"; const uint64_t ChunkSizeSettingsType::kDefaultMaxChunkSizeBytes{128 * 1024 * 1024}; @@ -306,10 +329,22 @@ StatusWith BalancerSettingsType::fromBSON(const BSONObj& o return status; auto it = std::find(std::begin(kBalancerModes), std::end(kBalancerModes), modeStr); if (it == std::end(kBalancerModes)) { - return Status(ErrorCodes::BadValue, "Invalid balancer mode"); + std::vector supportedModes; + std::transform(std::begin(kBalancerModes), + std::end(kBalancerModes), + std::back_inserter(supportedModes), + [](const char* supportedMode) -> std::string { + return std::string(supportedMode); + }); + LOGV2_WARNING( + 7575700, + "Balancer turned off because currently set balancing mode is not valid", + "currentMode"_attr = modeStr, + "supportedModes"_attr = supportedModes); + settings._mode = kOff; + } else { + settings._mode = static_cast(it - std::begin(kBalancerModes)); } - - settings._mode = static_cast(it - std::begin(kBalancerModes)); } } diff --git a/src/mongo/s/balancer_configuration.h b/src/mongo/s/balancer_configuration.h index 2a46669956d62..319e36630fbdc 100644 --- a/src/mongo/s/balancer_configuration.h +++ b/src/mongo/s/balancer_configuration.h @@ -30,9 +30,14 @@ #pragma once #include +#include #include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/s/request_types/migration_secondary_throttle_options.h" @@ -51,7 +56,7 @@ class StatusWith; * * balancer: { * stopped: , - * mode: , // Only consulted if "stopped" is missing or + * mode: , // Only consulted if "stopped" is missing or * false activeWindow: { start: "", stop: "" } * } */ @@ -59,9 +64,8 @@ class BalancerSettingsType { public: // Supported balancer modes enum BalancerMode { - kFull, // Balancer will always try to keep the cluster even - kAutoSplitOnly, // Only balance on auto splits - kOff, // Balancer is completely off + kFull, // Balancer will always try to keep the cluster even + kOff, // Balancer is completely off }; // The key under which this setting is stored on the config server diff --git a/src/mongo/s/balancer_configuration_test.cpp b/src/mongo/s/balancer_configuration_test.cpp index d06d8ba489772..4d18175763691 100644 --- a/src/mongo/s/balancer_configuration_test.cpp +++ b/src/mongo/s/balancer_configuration_test.cpp @@ -27,23 +27,40 @@ * it in the license file. */ -#include -#include #include -#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/query_request_helper.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/vector_clock.h" +#include "mongo/executor/network_test_env.h" #include "mongo/executor/remote_command_request.h" #include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/rpc/metadata/tracking_metadata.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/balancer_configuration.h" #include "mongo/s/sharding_router_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/net/hostandport.h" namespace mongo { @@ -80,7 +97,8 @@ class BalancerConfigurationTestFixture : public ShardingTestFixture { auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj); auto findCommand = query_request_helper::makeFromFindCommandForTests(opMsg.body); - ASSERT_EQ(findCommand->getNamespaceOrUUID().nss()->ns(), "config.settings"); + ASSERT_EQ(findCommand->getNamespaceOrUUID().nss(), + NamespaceString::kConfigSettingsNamespace); ASSERT_BSONOBJ_EQ(findCommand->getFilter(), BSON("_id" << key)); checkReadConcern(request.cmdObj, @@ -184,11 +202,15 @@ TEST(BalancerSettingsType, AllValidBalancerModeOptions) { } TEST(BalancerSettingsType, InvalidBalancerModeOption) { - ASSERT_EQ(ErrorCodes::BadValue, - BalancerSettingsType::fromBSON(BSON("mode" - << "BAD")) - .getStatus() - .code()); + startCapturingLogMessages(); + ASSERT_EQ(BalancerSettingsType::kOff, + assertGet(BalancerSettingsType::fromBSON(BSON("mode" + << "BAD"))) + .getMode()); + stopCapturingLogMessages(); + ASSERT_EQ(1, + countTextFormatLogLinesContaining( + "Balancer turned off because currently set balancing mode is not valid")); } TEST(BalancerSettingsType, BalancingWindowStartLessThanStop) { diff --git a/src/mongo/s/cannot_implicitly_create_collection_info.cpp b/src/mongo/s/cannot_implicitly_create_collection_info.cpp index dffacc5483e9d..753c7a6d61ef0 100644 --- a/src/mongo/s/cannot_implicitly_create_collection_info.cpp +++ b/src/mongo/s/cannot_implicitly_create_collection_info.cpp @@ -27,13 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/s/cannot_implicitly_create_collection_info.h" - -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { namespace { @@ -43,7 +41,7 @@ MONGO_INIT_REGISTER_ERROR_EXTRA_INFO(CannotImplicitlyCreateCollectionInfo); } // namespace void CannotImplicitlyCreateCollectionInfo::serialize(BSONObjBuilder* bob) const { - bob->append("ns", _nss.ns()); + bob->append("ns", NamespaceStringUtil::serialize(_nss)); } std::shared_ptr CannotImplicitlyCreateCollectionInfo::parse( diff --git a/src/mongo/s/cannot_implicitly_create_collection_info.h b/src/mongo/s/cannot_implicitly_create_collection_info.h index 7a082c1abda6c..97db698e7c6bb 100644 --- a/src/mongo/s/cannot_implicitly_create_collection_info.h +++ b/src/mongo/s/cannot_implicitly_create_collection_info.h @@ -29,7 +29,13 @@ #pragma once +#include +#include + #include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/namespace_string.h" namespace mongo { diff --git a/src/mongo/s/catalog/sharding_catalog_client.cpp b/src/mongo/s/catalog/sharding_catalog_client.cpp index 9383160429a00..240f0c3e7af3f 100644 --- a/src/mongo/s/catalog/sharding_catalog_client.cpp +++ b/src/mongo/s/catalog/sharding_catalog_client.cpp @@ -27,10 +27,7 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/s/catalog/sharding_catalog_client.h" - #include "mongo/db/write_concern_options.h" namespace mongo { @@ -46,4 +43,8 @@ const WriteConcernOptions ShardingCatalogClient::kMajorityWriteConcern( const WriteConcernOptions ShardingCatalogClient::kLocalWriteConcern( 1, WriteConcernOptions::SyncMode::UNSET, Seconds(0)); +// An empty namespace is used as a reserved value to persist initialization metadata of +// config.placementHistory. +const NamespaceString ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker{}; + } // namespace mongo diff --git a/src/mongo/s/catalog/sharding_catalog_client.h b/src/mongo/s/catalog/sharding_catalog_client.h index 0167c53eb3093..e725fef971e6c 100644 --- a/src/mongo/s/catalog/sharding_catalog_client.h +++ b/src/mongo/s/catalog/sharding_catalog_client.h @@ -29,21 +29,44 @@ #pragma once +#include #include +#include #include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/read_preference.h" #include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/optime_with.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/shard_id.h" #include "mongo/db/write_concern_options.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_index_catalog.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/s/catalog/type_namespace_placement_gen.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/catalog/type_tags.h" #include "mongo/s/chunk_version.h" #include "mongo/s/client/shard.h" #include "mongo/s/index_version.h" -#include "mongo/s/request_types/get_historical_placement_info_gen.h" +#include "mongo/s/request_types/placement_history_commands_gen.h" +#include "mongo/util/duration.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -52,13 +75,16 @@ class BSONArrayBuilder; class BSONObj; class BSONObjBuilder; class ChunkType; + class CollectionType; class ConnectionString; + class DatabaseType; class LogicalTime; class OperationContext; class ShardKeyPattern; class TagsType; + class VersionType; namespace executor { @@ -87,6 +113,9 @@ class ShardingCatalogClient { // Constant to use for configuration data local writes static const WriteConcernOptions kLocalWriteConcern; + // Identifier for the "initialization metadata" documents of config.placementHistory + static const NamespaceString kConfigPlacementHistoryInitializationMarker; + virtual ~ShardingCatalogClient() = default; virtual std::vector runCatalogAggregation( @@ -283,14 +312,21 @@ class ShardingCatalogClient { repl::ReadConcernLevel readConcern) = 0; /** - * Returns keys for the given purpose and with an expiresAt value greater than newerThanThis. + * Returns internal keys for the given purpose and have an expiresAt value greater than + * newerThanThis. */ - virtual StatusWith> getNewKeys( + virtual StatusWith> getNewInternalKeys( OperationContext* opCtx, StringData purpose, const LogicalTime& newerThanThis, repl::ReadConcernLevel readConcernLevel) = 0; + /** + * Returns all external (i.e. validation-only) keys for the given purpose. + */ + virtual StatusWith> getAllExternalKeys( + OperationContext* opCtx, StringData purpose, repl::ReadConcernLevel readConcernLevel) = 0; + /** * Directly inserts a document in the specified namespace on the config server. The document * must have an _id index. Must only be used for insertions in the 'config' database. @@ -349,24 +385,35 @@ class ShardingCatalogClient { boost::optional hint = boost::none) = 0; /** - * Returns the list of active shards that still contains data for the specified collection or - * that used to contain data for the specified collection at clusterTime >= input clusterTime - * based on placementHistory + * Returns shard-placement information for collection named 'collName' at the requested point in + * time 'clusterTime'. + * - When an exact response may be computed, this will be composed by the shards hosting data of + * collName + the primary shard of the parent db. + * - Otherwise, an approximated response is generated based on a past snapshot of config.shards. + * References to shards that aren't currently part of the cluster may be included in the + * response. */ virtual HistoricalPlacement getShardsThatOwnDataForCollAtClusterTime( OperationContext* opCtx, const NamespaceString& collName, const Timestamp& clusterTime) = 0; /** - * Returns the list of active shards that still contains data for the specified database or - * that used to contain data for the specified database at clusterTime >= input clusterTime - * based on placementHistory + * Returns shard-placement information for database named 'dbName' at the requested point in + * time 'clusterTime'. + * When no exact response may be computed, an approximated one is generated based on a past + * snapshot of config.shards. + * References to shards that aren't currently part of the cluster may be included in the + * response. */ virtual HistoricalPlacement getShardsThatOwnDataForDbAtClusterTime( OperationContext* opCtx, const NamespaceString& dbName, const Timestamp& clusterTime) = 0; /** - * Returns the list of active shards that still contains data or that used to contain data - * at clusterTime >= input clusterTime based on placementHistory + * Returns shard-placement information for the whole cluster at the requested point in time + * 'clusterTime'. + * When no exact response may be computed, an approximated one is generated based on a past + * snapshot of config.shards. + * References to shards that aren't currently part of the cluster may be included in the + * response. */ virtual HistoricalPlacement getShardsThatOwnDataAtClusterTime(OperationContext* opCtx, const Timestamp& clusterTime) = 0; diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp index cc7349c4cf7f4..40aae7920dae5 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_impl.cpp +++ b/src/mongo/s/catalog/sharding_catalog_client_impl.cpp @@ -29,57 +29,80 @@ #include "mongo/s/catalog/sharding_catalog_client_impl.h" +#include +#include +#include +#include +#include +#include +#include #include -#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/client/read_preference.h" -#include "mongo/client/remote_command_targeter.h" #include "mongo/db/catalog_shard_feature_flag_gen.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/pipeline/aggregate_command_gen.h" -#include "mongo/db/pipeline/document_source_add_fields.h" #include "mongo/db/pipeline/document_source_facet.h" #include "mongo/db/pipeline/document_source_group.h" +#include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/document_source_lookup.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_project.h" #include "mongo/db/pipeline/document_source_sort.h" #include "mongo/db/pipeline/document_source_union_with.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/repl/repl_client_info.h" -#include "mongo/db/session/logical_session_cache.h" +#include "mongo/db/server_options.h" #include "mongo/db/vector_clock.h" -#include "mongo/executor/network_interface.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" #include "mongo/s/catalog/type_config_version.h" #include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_namespace_placement_gen.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" #include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/client/shard_remote_gen.h" #include "mongo/s/database_version.h" #include "mongo/s/grid.h" -#include "mongo/s/shard_key_pattern.h" -#include "mongo/s/shard_util.h" -#include "mongo/s/write_ops/batch_write_op.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/stdx/variant.h" #include "mongo/util/assert_util.h" -#include "mongo/util/net/hostandport.h" -#include "mongo/util/pcre.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/pcre_util.h" #include "mongo/util/str.h" -#include "mongo/util/time_support.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -122,7 +145,8 @@ AggregateCommandRequest makeCollectionAndChunksAggregation(OperationContext* opC // } // } stages.emplace_back(DocumentSourceMatch::create( - Doc{{CollectionType::kNssFieldName, nss.toString()}}.toBson(), expCtx)); + Doc{{CollectionType::kNssFieldName, NamespaceStringUtil::serialize(nss)}}.toBson(), + expCtx)); // 2. Two $unionWith stages guarded by a mutually exclusive condition on whether the refresh is // incremental ('lastmodEpoch' matches sinceVersion.epoch), so that only a single one of them @@ -246,7 +270,9 @@ AggregateCommandRequest makeCollectionAndChunksAggregation(OperationContext* opC return Doc{ {"coll", CollectionType::ConfigNS.coll()}, {"pipeline", - Arr{Value{Doc{{"$match", Doc{{CollectionType::kNssFieldName, nss.toString()}}}}}, + Arr{Value{Doc{ + {"$match", + Doc{{CollectionType::kNssFieldName, NamespaceStringUtil::serialize(nss)}}}}}, Value{Doc{{"$match", Doc{{CollectionType::kEpochFieldName, lastmodEpochMatch}}}}}, Value{Doc{{"$lookup", lookupPipeline}}}, Value{Doc{{"$unwind", Doc{{"path", "$" + chunksLookupOutputFieldName}}}}}, @@ -290,7 +316,8 @@ AggregateCommandRequest makeCollectionAndIndexesAggregation(OperationContext* op // } // } stages.emplace_back(DocumentSourceMatch::create( - Doc{{CollectionType::kNssFieldName, nss.toString()}}.toBson(), expCtx)); + Doc{{CollectionType::kNssFieldName, NamespaceStringUtil::serialize(nss)}}.toBson(), + expCtx)); // 2. Retrieve config.csrs.indexes entries with the same uuid as the one from the // config.collections document. @@ -319,6 +346,45 @@ AggregateCommandRequest makeCollectionAndIndexesAggregation(OperationContext* op return AggregateCommandRequest(CollectionType::ConfigNS, std::move(serializedPipeline)); } +/** + * Returns keys for the given purpose and have an expiresAt value greater than newerThanThis on the + * given shard. + */ +template +StatusWith> _getNewKeys(OperationContext* opCtx, + std::shared_ptr shard, + const NamespaceString& nss, + StringData purpose, + const LogicalTime& newerThanThis, + repl::ReadConcernLevel readConcernLevel) { + BSONObjBuilder queryBuilder; + queryBuilder.append("purpose", purpose); + queryBuilder.append("expiresAt", BSON("$gt" << newerThanThis.asTimestamp())); + + auto findStatus = shard->exhaustiveFindOnConfig(opCtx, + kConfigReadSelector, + readConcernLevel, + nss, + queryBuilder.obj(), + BSON("expiresAt" << 1), + boost::none); + if (!findStatus.isOK()) { + return findStatus.getStatus(); + } + const auto& objs = findStatus.getValue().docs; + + std::vector keyDocs; + keyDocs.reserve(objs.size()); + for (auto&& obj : objs) { + try { + keyDocs.push_back(KeyDocumentType::parse(IDLParserContext("keyDoc"), obj)); + } catch (...) { + return exceptionToStatus(); + } + } + return keyDocs; +} + } // namespace ShardingCatalogClientImpl::ShardingCatalogClientImpl(std::shared_ptr overrideConfigShard) @@ -453,14 +519,6 @@ std::vector ShardingCatalogClientImpl::runCatalogAggregation( aggRequest.setWriteConcern(WriteConcernOptions()); const auto readPref = [&]() -> ReadPreferenceSetting { - // (Ignore FCV check): Config servers always use ShardRemote for themselves in 7.0. - if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && - !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafe()) { - // When the feature flag is on, the config server may read from any node in its replica - // set, so we should use the typical config server read preference. - return {}; - } - const auto vcTime = VectorClock::get(opCtx)->getTime(); ReadPreferenceSetting readPref{kConfigReadSelector}; readPref.minClusterTime = vcTime.configTime().asTimestamp(); @@ -508,12 +566,13 @@ CollectionType ShardingCatalogClientImpl::getCollection(OperationContext* opCtx, kConfigReadSelector, readConcernLevel, CollectionType::ConfigNS, - BSON(CollectionType::kNssFieldName << nss.ns()), + BSON(CollectionType::kNssFieldName + << NamespaceStringUtil::serialize(nss)), BSONObj(), 1)) .value; uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "collection " << nss.ns() << " not found", + str::stream() << "collection " << nss.toStringForErrorMsg() << " not found", !collDoc.empty()); return CollectionType(collDoc[0]); @@ -619,12 +678,14 @@ StatusWith ShardingCatalogClientImpl::getConfigVersion( if (queryResults.size() > 1) { return {ErrorCodes::TooManyMatchingDocuments, - str::stream() << "should only have 1 document in " << VersionType::ConfigNS.ns()}; + str::stream() << "should only have 1 document in " + << VersionType::ConfigNS.toStringForErrorMsg()}; } if (queryResults.empty()) { return {ErrorCodes::NoMatchingDocument, - str::stream() << "No documents found in " << VersionType::ConfigNS.ns()}; + str::stream() << "No documents found in " + << VersionType::ConfigNS.toStringForErrorMsg()}; } BSONObj versionDoc = queryResults.front(); @@ -726,7 +787,7 @@ std::pair> ShardingCatalogClientImpl::get opCtx, aggRequest, readConcern, Milliseconds(gFindChunksOnConfigTimeoutMS.load())); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection " << nss.ns() << " not found", + str::stream() << "Collection " << nss.toStringForErrorMsg() << " not found", !aggResult.empty()); @@ -765,7 +826,8 @@ std::pair> ShardingCatalogClientImpl::get } uassert(ErrorCodes::ConflictingOperationInProgress, - str::stream() << "No chunks were found for the collection " << nss, + str::stream() << "No chunks were found for the collection " + << nss.toStringForErrorMsg(), !chunks.empty()); } @@ -781,11 +843,12 @@ ShardingCatalogClientImpl::getCollectionAndShardingIndexCatalogEntries( std::vector aggResult = runCatalogAggregation(opCtx, aggRequest, readConcern); uassert(ErrorCodes::NamespaceNotFound, - str::stream() << "Collection " << nss.ns() << " not found", + str::stream() << "Collection " << nss.toStringForErrorMsg() << " not found", !aggResult.empty()); uassert(6958800, - str::stream() << "More than one collection for ns " << nss.ns() << " found", + str::stream() << "More than one collection for ns " << nss.toStringForErrorMsg() + << " found", aggResult.size() == 1); boost::optional coll; @@ -804,13 +867,14 @@ ShardingCatalogClientImpl::getCollectionAndShardingIndexCatalogEntries( StatusWith> ShardingCatalogClientImpl::getTagsForCollection( OperationContext* opCtx, const NamespaceString& nss) { - auto findStatus = _exhaustiveFindOnConfig(opCtx, - kConfigReadSelector, - repl::ReadConcernLevel::kMajorityReadConcern, - TagsType::ConfigNS, - BSON(TagsType::ns(nss.ns())), - BSON(TagsType::min() << 1), - boost::none); // no limit + auto findStatus = + _exhaustiveFindOnConfig(opCtx, + kConfigReadSelector, + repl::ReadConcernLevel::kMajorityReadConcern, + TagsType::ConfigNS, + BSON(TagsType::ns(NamespaceStringUtil::serialize(nss))), + BSON(TagsType::min() << 1), + boost::none); // no limit if (!findStatus.isOK()) { return findStatus.getStatus().withContext("Failed to load tags"); } @@ -871,16 +935,8 @@ std::vector ShardingCatalogClientImpl::getAllNssThatHaveZonesFo // Run the aggregation const auto readConcern = [&]() -> repl::ReadConcernArgs { - // (Ignore FCV check): Config servers always use ShardRemote for themselves in 7.0. - if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && - !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafe()) { - // When the feature flag is on, the config server may read from a secondary which may - // need to wait for replication, so we should use afterClusterTime. - return {repl::ReadConcernLevel::kMajorityReadConcern}; - } else { - const auto time = VectorClock::get(opCtx)->getTime(); - return {time.configTime(), repl::ReadConcernLevel::kMajorityReadConcern}; - } + const auto time = VectorClock::get(opCtx)->getTime(); + return {time.configTime(), repl::ReadConcernLevel::kMajorityReadConcern}; }(); auto aggResult = runCatalogAggregation(opCtx, aggRequest, readConcern); @@ -1170,7 +1226,7 @@ Status ShardingCatalogClientImpl::removeConfigDocuments(OperationContext* opCtx, const BSONObj& query, const WriteConcernOptions& writeConcern, boost::optional hint) { - invariant(nss.db() == DatabaseName::kConfig.db()); + invariant(nss.isConfigDB()); BatchedCommandRequest request([&] { write_ops::DeleteCommandRequest deleteOp(nss); @@ -1211,42 +1267,31 @@ ShardingCatalogClientImpl::_exhaustiveFindOnConfig(OperationContext* opCtx, response.getValue().opTime); } -StatusWith> ShardingCatalogClientImpl::getNewKeys( +StatusWith> ShardingCatalogClientImpl::getNewInternalKeys( OperationContext* opCtx, StringData purpose, const LogicalTime& newerThanThis, repl::ReadConcernLevel readConcernLevel) { - BSONObjBuilder queryBuilder; - queryBuilder.append("purpose", purpose); - queryBuilder.append("expiresAt", BSON("$gt" << newerThanThis.asTimestamp())); - - auto findStatus = - _getConfigShard(opCtx)->exhaustiveFindOnConfig(opCtx, - kConfigReadSelector, - readConcernLevel, - NamespaceString::kKeysCollectionNamespace, - queryBuilder.obj(), - BSON("expiresAt" << 1), - boost::none); - - if (!findStatus.isOK()) { - return findStatus.getStatus(); - } - - const auto& keyDocs = findStatus.getValue().docs; - std::vector keys; - keys.reserve(keyDocs.size()); - for (auto&& keyDoc : keyDocs) { - try { - keys.push_back(KeysCollectionDocument::parse(IDLParserContext("keyDoc"), keyDoc)); - } catch (...) { - return exceptionToStatus(); - } - } - - return keys; + return _getNewKeys(opCtx, + _getConfigShard(opCtx), + NamespaceString::kKeysCollectionNamespace, + purpose, + newerThanThis, + readConcernLevel); } +StatusWith> +ShardingCatalogClientImpl::getAllExternalKeys(OperationContext* opCtx, + StringData purpose, + repl::ReadConcernLevel readConcernLevel) { + return _getNewKeys( + opCtx, + _getConfigShard(opCtx), + NamespaceString::kExternalKeysCollectionNamespace, + purpose, + LogicalTime(), + readConcernLevel); +} HistoricalPlacement ShardingCatalogClientImpl::getShardsThatOwnDataForCollAtClusterTime( OperationContext* opCtx, const NamespaceString& collName, const Timestamp& clusterTime) { @@ -1268,7 +1313,7 @@ HistoricalPlacement ShardingCatalogClientImpl::getShardsThatOwnDataForDbAtCluste uassert(ErrorCodes::InvalidOptions, "A full db namespace must be specified", - dbName.coll().empty() && !dbName.db().empty()); + dbName.coll().empty() && !dbName.isEmpty()); if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { return getHistoricalPlacement(opCtx, clusterTime, dbName); @@ -1394,7 +1439,7 @@ HistoricalPlacement ShardingCatalogClientImpl::getHistoricalPlacement( "timestamp": { "$lte": }, - "nss": kConfigsvrPlacementHistoryFcvMarkerNamespace + "nss": kConfigPlacementHistoryInitializationMarker } }, { @@ -1425,7 +1470,8 @@ HistoricalPlacement ShardingCatalogClientImpl::getHistoricalPlacement( // Build the pipeline for the exact placement data. // 1. Get all the history entries prior to the requested time concerning either the collection // or the parent database. - const auto& kMarkerNss = NamespaceString::kConfigsvrPlacementHistoryFcvMarkerNamespace.ns(); + const auto& kMarkerNss = NamespaceStringUtil::serialize( + ShardingCatalogClient::kConfigPlacementHistoryInitializationMarker); auto matchStage = [&]() { bool isClusterSearch = !nss.has_value(); if (isClusterSearch) @@ -1512,16 +1558,8 @@ HistoricalPlacement ShardingCatalogClientImpl::getHistoricalPlacement( // Run the aggregation const auto readConcern = [&]() -> repl::ReadConcernArgs { - // (Ignore FCV check): Config servers always use ShardRemote for themselves in 7.0. - if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && - !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafe()) { - // When the feature flag is on, the config server may read from a secondary which may - // need to wait for replication, so we should use afterClusterTime. - return {repl::ReadConcernLevel::kSnapshotReadConcern}; - } else { - const auto vcTime = VectorClock::get(opCtx)->getTime(); - return {vcTime.configTime(), repl::ReadConcernLevel::kSnapshotReadConcern}; - } + const auto vcTime = VectorClock::get(opCtx)->getTime(); + return {vcTime.configTime(), repl::ReadConcernLevel::kSnapshotReadConcern}; }(); auto aggrResult = runCatalogAggregation(opCtx, aggRequest, readConcern); diff --git a/src/mongo/s/catalog/sharding_catalog_client_impl.h b/src/mongo/s/catalog/sharding_catalog_client_impl.h index 63c120c6b65ab..92e5b341ce315 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_impl.h +++ b/src/mongo/s/catalog/sharding_catalog_client_impl.h @@ -29,11 +29,46 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/write_concern_options.h" #include "mongo/platform/mutex.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/s/catalog/type_namespace_placement_gen.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/catalog/type_tags.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" +#include "mongo/s/request_types/placement_history_commands_gen.h" +#include "mongo/util/duration.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -172,43 +207,27 @@ class ShardingCatalogClientImpl final : public ShardingCatalogClient { const WriteConcernOptions& writeConcern, boost::optional hint = boost::none) override; - StatusWith> getNewKeys( + StatusWith> getNewInternalKeys( OperationContext* opCtx, StringData purpose, const LogicalTime& newerThanThis, repl::ReadConcernLevel readConcernLevel) override; + StatusWith> getAllExternalKeys( + OperationContext* opCtx, + StringData purpose, + repl::ReadConcernLevel readConcernLevel) override; - /* - * Return all shards that used to own data for the collection at the given clusterTime. - * The result should be either: - 1. The list of shards if the collection was sharded - 2. A list 1 element containing only the primary shard if the collection was unsharded, - dropped or renamed. - 3. An empty array if the collection and the database are not found - * In case at least one of the shard is no longer active, a SnapshotTooOld error is thrown. - */ HistoricalPlacement getShardsThatOwnDataForCollAtClusterTime( OperationContext* opCtx, const NamespaceString& collName, const Timestamp& clusterTime) override; - /* - * Return all shards that used to own data for the database at the given clusterTime. - * The result is - 1. a vector of unique shardids - 2. An empty array if the collection and the database are not found - * In case at least one of the shard is no longer active, a SnapshotTooOld error is thrown. - */ HistoricalPlacement getShardsThatOwnDataForDbAtClusterTime( OperationContext* opCtx, const NamespaceString& dbName, const Timestamp& clusterTime) override; - /** - * Returns the list of active shards that still contains data or that used to contain data - * at clusterTime >= input clusterTime based on placementHistory - */ HistoricalPlacement getShardsThatOwnDataAtClusterTime(OperationContext* opCtx, const Timestamp& clusterTime) override; diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp index f8abd73c81bbb..b4266f74247eb 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_mock.cpp +++ b/src/mongo/s/catalog/sharding_catalog_client_mock.cpp @@ -27,10 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/s/catalog/sharding_catalog_client_mock.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/s/catalog/sharding_catalog_client_mock.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_config_version.h" @@ -38,6 +40,7 @@ #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" #include "mongo/s/client/shard.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -199,7 +202,7 @@ Status ShardingCatalogClientMock::createDatabase(OperationContext* opCtx, return {ErrorCodes::InternalError, "Method not implemented"}; } -StatusWith> ShardingCatalogClientMock::getNewKeys( +StatusWith> ShardingCatalogClientMock::getNewInternalKeys( OperationContext* opCtx, StringData purpose, const LogicalTime& newerThanThis, @@ -207,6 +210,13 @@ StatusWith> ShardingCatalogClientMock::getNe return {ErrorCodes::InternalError, "Method not implemented"}; } +StatusWith> +ShardingCatalogClientMock::getAllExternalKeys(OperationContext* opCtx, + StringData purpose, + repl::ReadConcernLevel readConcernLevel) { + return {ErrorCodes::InternalError, "Method not implemented"}; +} + StatusWith>> ShardingCatalogClientMock::_exhaustiveFindOnConfig(OperationContext* opCtx, const ReadPreferenceSetting& readPref, diff --git a/src/mongo/s/catalog/sharding_catalog_client_mock.h b/src/mongo/s/catalog/sharding_catalog_client_mock.h index 9d7f895525b36..7f94d392e6388 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_mock.h +++ b/src/mongo/s/catalog/sharding_catalog_client_mock.h @@ -29,7 +29,40 @@ #pragma once +#include +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/keys_collection_document_gen.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/write_concern_options.h" #include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/s/catalog/type_namespace_placement_gen.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/catalog/type_tags.h" +#include "mongo/s/chunk_version.h" +#include "mongo/util/duration.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -150,12 +183,17 @@ class ShardingCatalogClientMock : public ShardingCatalogClient { Status createDatabase(OperationContext* opCtx, StringData dbName, ShardId primaryShard); - StatusWith> getNewKeys( + StatusWith> getNewInternalKeys( OperationContext* opCtx, StringData purpose, const LogicalTime& newerThanThis, repl::ReadConcernLevel readConcernLevel) override; + StatusWith> getAllExternalKeys( + OperationContext* opCtx, + StringData purpose, + repl::ReadConcernLevel readConcernLevel) override; + HistoricalPlacement getShardsThatOwnDataForCollAtClusterTime( OperationContext* opCtx, const NamespaceString& collName, diff --git a/src/mongo/s/catalog/sharding_catalog_client_test.cpp b/src/mongo/s/catalog/sharding_catalog_client_test.cpp index 3e4a021016438..fedabf26a5a7e 100644 --- a/src/mongo/s/catalog/sharding_catalog_client_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_client_test.cpp @@ -27,29 +27,58 @@ * it in the license file. */ +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/json.h" #include "mongo/client/remote_command_targeter_mock.h" #include "mongo/db/commands.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/query_request_helper.h" -#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/time_proof_service.h" #include "mongo/db/vector_clock.h" +#include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/rpc/metadata/tracking_metadata.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" #include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" -#include "mongo/s/client/shard_registry.h" #include "mongo/s/database_version.h" #include "mongo/s/sharding_router_test_fixture.h" #include "mongo/s/write_ops/batched_command_response.h" -#include "mongo/stdx/future.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -104,10 +133,10 @@ TEST_F(ShardingCatalogClientTest, GetCollectionExisting) { auto query = query_request_helper::makeFromFindCommandForTests(opMsg.body); // Ensure the query is correct - ASSERT_EQ(query->getNamespaceOrUUID().nss().value_or(NamespaceString()), - CollectionType::ConfigNS); - ASSERT_BSONOBJ_EQ(query->getFilter(), - BSON(CollectionType::kNssFieldName << expectedColl.getNss().ns())); + ASSERT_EQ(query->getNamespaceOrUUID().nss(), CollectionType::ConfigNS); + ASSERT_BSONOBJ_EQ( + query->getFilter(), + BSON(CollectionType::kNssFieldName << expectedColl.getNss().ns_forTest())); ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj()); ASSERT_EQ(query->getLimit().value(), 1); @@ -178,8 +207,7 @@ TEST_F(ShardingCatalogClientTest, GetDatabaseExisting) { auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj); auto query = query_request_helper::makeFromFindCommandForTests(opMsg.body); - ASSERT_EQ(query->getNamespaceOrUUID().nss().value_or(NamespaceString()), - NamespaceString::kConfigDatabasesNamespace); + ASSERT_EQ(query->getNamespaceOrUUID().nss(), NamespaceString::kConfigDatabasesNamespace); ASSERT_BSONOBJ_EQ(query->getFilter(), BSON(DatabaseType::kNameFieldName << expectedDb.getName())); ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj()); @@ -310,8 +338,7 @@ TEST_F(ShardingCatalogClientTest, GetAllShardsValid) { auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj); auto query = query_request_helper::makeFromFindCommandForTests(opMsg.body); - ASSERT_EQ(query->getNamespaceOrUUID().nss().value_or(NamespaceString()), - NamespaceString::kConfigsvrShardsNamespace); + ASSERT_EQ(query->getNamespaceOrUUID().nss(), NamespaceString::kConfigsvrShardsNamespace); ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj()); ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj()); ASSERT_FALSE(query->getLimit().has_value()); @@ -391,7 +418,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) { auto future = launchAsync([this, &chunksQuery, newOpTime, &collEpoch, &collTimestamp] { OpTime opTime; - const auto chunks = + auto chunks = assertGet(catalogClient()->getChunks(operationContext(), chunksQuery, BSON(ChunkType::lastmod() << -1), @@ -414,8 +441,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForNSWithSortAndLimit) { auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj); auto query = query_request_helper::makeFromFindCommandForTests(opMsg.body); - ASSERT_EQ(query->getNamespaceOrUUID().nss().value_or(NamespaceString()), - ChunkType::ConfigNS); + ASSERT_EQ(query->getNamespaceOrUUID().nss(), ChunkType::ConfigNS); ASSERT_BSONOBJ_EQ(query->getFilter(), chunksQuery); ASSERT_BSONOBJ_EQ(query->getSort(), BSON(ChunkType::lastmod() << -1)); ASSERT_EQ(query->getLimit().value(), 1); @@ -459,7 +485,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForUUIDNoSortNoLimit) { << BSON("$gte" << static_cast(queryChunkVersion.toLong())))); auto future = launchAsync([this, &chunksQuery, &collEpoch, &collTimestamp] { - const auto chunks = + auto chunks = assertGet(catalogClient()->getChunks(operationContext(), chunksQuery, BSONObj(), @@ -480,8 +506,7 @@ TEST_F(ShardingCatalogClientTest, GetChunksForUUIDNoSortNoLimit) { auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj); auto query = query_request_helper::makeFromFindCommandForTests(opMsg.body); - ASSERT_EQ(query->getNamespaceOrUUID().nss().value_or(NamespaceString()), - ChunkType::ConfigNS); + ASSERT_EQ(query->getNamespaceOrUUID().nss(), ChunkType::ConfigNS); ASSERT_BSONOBJ_EQ(query->getFilter(), chunksQuery); ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj()); ASSERT_FALSE(query->getLimit().has_value()); @@ -801,8 +826,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsNoDb) { auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj); auto query = query_request_helper::makeFromFindCommandForTests(opMsg.body); - ASSERT_EQ(query->getNamespaceOrUUID().nss().value_or(NamespaceString()), - CollectionType::ConfigNS); + ASSERT_EQ(query->getNamespaceOrUUID().nss(), CollectionType::ConfigNS); ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj()); ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj()); @@ -859,8 +883,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsValidResultsWithDb) { auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj); auto query = query_request_helper::makeFromFindCommandForTests(opMsg.body); - ASSERT_EQ(query->getNamespaceOrUUID().nss().value_or(NamespaceString()), - CollectionType::ConfigNS); + ASSERT_EQ(query->getNamespaceOrUUID().nss(), CollectionType::ConfigNS); { BSONObjBuilder b; b.appendRegex(CollectionType::kNssFieldName, "^test\\."); @@ -902,8 +925,7 @@ TEST_F(ShardingCatalogClientTest, GetCollectionsInvalidCollectionType) { auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj); auto query = query_request_helper::makeFromFindCommandForTests(opMsg.body); - ASSERT_EQ(query->getNamespaceOrUUID().nss().value_or(NamespaceString()), - CollectionType::ConfigNS); + ASSERT_EQ(query->getNamespaceOrUUID().nss(), CollectionType::ConfigNS); { BSONObjBuilder b; b.appendRegex(CollectionType::kNssFieldName, "^test\\."); @@ -941,8 +963,7 @@ TEST_F(ShardingCatalogClientTest, GetDatabasesForShardValid) { auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj); auto query = query_request_helper::makeFromFindCommandForTests(opMsg.body); - ASSERT_EQ(query->getNamespaceOrUUID().nss().value_or(NamespaceString()), - NamespaceString::kConfigDatabasesNamespace); + ASSERT_EQ(query->getNamespaceOrUUID().nss(), NamespaceString::kConfigDatabasesNamespace); ASSERT_BSONOBJ_EQ(query->getFilter(), BSON(DatabaseType::kPrimaryFieldName << dbt1.getPrimary())); ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj()); @@ -1012,8 +1033,7 @@ TEST_F(ShardingCatalogClientTest, GetTagsForCollection) { auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj); auto query = query_request_helper::makeFromFindCommandForTests(opMsg.body); - ASSERT_EQ(query->getNamespaceOrUUID().nss().value_or(NamespaceString()), - TagsType::ConfigNS); + ASSERT_EQ(query->getNamespaceOrUUID().nss(), TagsType::ConfigNS); ASSERT_BSONOBJ_EQ(query->getFilter(), BSON(TagsType::ns("TestDB.TestColl"))); ASSERT_BSONOBJ_EQ(query->getSort(), BSON(TagsType::min() << 1)); @@ -1209,10 +1229,10 @@ TEST_F(ShardingCatalogClientTest, GetNewKeys) { repl::ReadConcernLevel readConcernLevel(repl::ReadConcernLevel::kMajorityReadConcern); auto future = launchAsync([this, purpose, currentTime, readConcernLevel] { - auto status = - catalogClient()->getNewKeys(operationContext(), purpose, currentTime, readConcernLevel); - ASSERT_OK(status.getStatus()); - return status.getValue(); + auto swKeys = catalogClient()->getNewInternalKeys( + operationContext(), purpose, currentTime, readConcernLevel); + ASSERT_OK(swKeys.getStatus()); + return swKeys.getValue(); }); LogicalTime dummyTime(Timestamp(9876, 5432)); @@ -1237,8 +1257,7 @@ TEST_F(ShardingCatalogClientTest, GetNewKeys) { fromjson("{purpose: 'none'," "expiresAt: {$gt: {$timestamp: {t: 1234, i: 5678}}}}")); - ASSERT_EQ(NamespaceString::kKeysCollectionNamespace, - query->getNamespaceOrUUID().nss().value_or(NamespaceString())); + ASSERT_EQ(NamespaceString::kKeysCollectionNamespace, query->getNamespaceOrUUID().nss()); ASSERT_BSONOBJ_EQ(expectedQuery, query->getFilter()); ASSERT_BSONOBJ_EQ(BSON("expiresAt" << 1), query->getSort()); ASSERT_FALSE(query->getLimit().has_value()); @@ -1274,10 +1293,10 @@ TEST_F(ShardingCatalogClientTest, GetNewKeysWithEmptyCollection) { repl::ReadConcernLevel readConcernLevel(repl::ReadConcernLevel::kMajorityReadConcern); auto future = launchAsync([this, purpose, currentTime, readConcernLevel] { - auto status = - catalogClient()->getNewKeys(operationContext(), purpose, currentTime, readConcernLevel); - ASSERT_OK(status.getStatus()); - return status.getValue(); + auto swKeys = catalogClient()->getNewInternalKeys( + operationContext(), purpose, currentTime, readConcernLevel); + ASSERT_OK(swKeys.getStatus()); + return swKeys.getValue(); }); onFindCommand([this](const RemoteCommandRequest& request) { @@ -1291,8 +1310,7 @@ TEST_F(ShardingCatalogClientTest, GetNewKeysWithEmptyCollection) { fromjson("{purpose: 'none'," "expiresAt: {$gt: {$timestamp: {t: 1234, i: 5678}}}}")); - ASSERT_EQ(NamespaceString::kKeysCollectionNamespace, - query->getNamespaceOrUUID().nss().value_or(NamespaceString())); + ASSERT_EQ(NamespaceString::kKeysCollectionNamespace, query->getNamespaceOrUUID().nss()); ASSERT_BSONOBJ_EQ(expectedQuery, query->getFilter()); ASSERT_BSONOBJ_EQ(BSON("expiresAt" << 1), query->getSort()); ASSERT_FALSE(query->getLimit().has_value()); diff --git a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp index 1bee8df06e4ae..13495f7574177 100644 --- a/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp +++ b/src/mongo/s/catalog/sharding_catalog_write_retry_test.cpp @@ -28,31 +28,52 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" #include #include #include +#include +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/query_request_helper.h" +#include "mongo/db/record_id.h" #include "mongo/db/storage/duplicate_key_error_info.h" #include "mongo/db/write_concern.h" -#include "mongo/executor/task_executor.h" -#include "mongo/rpc/metadata/repl_set_metadata.h" +#include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/write_concern_error_detail.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_impl.h" -#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" -#include "mongo/s/catalog/type_shard.h" -#include "mongo/s/client/shard_registry.h" -#include "mongo/s/grid.h" #include "mongo/s/sharding_router_test_fixture.h" #include "mongo/s/write_ops/batched_command_response.h" -#include "mongo/stdx/future.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -72,7 +93,8 @@ using unittest::assertGet; using InsertRetryTest = ShardingTestFixture; using UpdateRetryTest = ShardingTestFixture; -const NamespaceString kTestNamespace("config.TestColl"); +const NamespaceString kTestNamespace = + NamespaceString::createNamespaceString_forTest("config.TestColl"); const HostAndPort kTestHosts[] = { HostAndPort("TestHost1:12345"), HostAndPort("TestHost2:12345"), HostAndPort("TestHost3:12345")}; diff --git a/src/mongo/s/catalog/type_changelog.cpp b/src/mongo/s/catalog/type_changelog.cpp index e5ffe2ea4197a..f5fc3fec553bc 100644 --- a/src/mongo/s/catalog/type_changelog.cpp +++ b/src/mongo/s/catalog/type_changelog.cpp @@ -27,14 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/s/catalog/type_changelog.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/s/catalog/type_changelog.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" diff --git a/src/mongo/s/catalog/type_changelog.h b/src/mongo/s/catalog/type_changelog.h index 668f94dfc7b44..9961364f36d58 100644 --- a/src/mongo/s/catalog/type_changelog.h +++ b/src/mongo/s/catalog/type_changelog.h @@ -29,9 +29,15 @@ #pragma once +#include #include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/s/catalog/type_changelog_test.cpp b/src/mongo/s/catalog/type_changelog_test.cpp index 3142901d06aaf..c6d914d8cf7dc 100644 --- a/src/mongo/s/catalog/type_changelog_test.cpp +++ b/src/mongo/s/catalog/type_changelog_test.cpp @@ -27,12 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/s/catalog/type_changelog.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/time_support.h" namespace { diff --git a/src/mongo/s/catalog/type_chunk.cpp b/src/mongo/s/catalog/type_chunk.cpp index 97ba7178b0cd5..2d47a0f397fee 100644 --- a/src/mongo/s/catalog/type_chunk.cpp +++ b/src/mongo/s/catalog/type_chunk.cpp @@ -29,13 +29,23 @@ #include "mongo/s/catalog/type_chunk.h" +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/server_options.h" -#include "mongo/logv2/log.h" +#include "mongo/idl/idl_parser.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" @@ -631,8 +641,9 @@ Status ChunkType::validate() const { if (!_history.empty()) { if (_history.front().getShard() != *_shard) { return {ErrorCodes::BadValue, - str::stream() << "History contains an invalid shard " - << _history.front().getShard()}; + str::stream() << "Latest entry of chunk history refer to shard " + << _history.front().getShard() + << " that does not match the current shard " << *_shard}; } if (_onCurrentShardSince.has_value() && _history.front().getValidAfter() != *_onCurrentShardSince) { diff --git a/src/mongo/s/catalog/type_chunk.h b/src/mongo/s/catalog/type_chunk.h index 06768cc40cb89..d65e6c09bed03 100644 --- a/src/mongo/s/catalog/type_chunk.h +++ b/src/mongo/s/catalog/type_chunk.h @@ -29,16 +29,35 @@ #pragma once +#include #include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include +#include +#include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bson_field.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/shard_id.h" #include "mongo/s/catalog/type_chunk_base_gen.h" #include "mongo/s/chunk_version.h" #include "mongo/s/shard_key_pattern.h" #include "mongo/stdx/type_traits.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/s/catalog/type_chunk_test.cpp b/src/mongo/s/catalog/type_chunk_test.cpp index b7ad47a97fbc7..abab6f63550b2 100644 --- a/src/mongo/s/catalog/type_chunk_test.cpp +++ b/src/mongo/s/catalog/type_chunk_test.cpp @@ -27,11 +27,19 @@ * it in the license file. */ +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/s/catalog/type_chunk.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/time_support.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/s/catalog/type_collection.cpp b/src/mongo/s/catalog/type_collection.cpp index 0dc49ef37c76f..aafee8fe01212 100644 --- a/src/mongo/s/catalog/type_collection.cpp +++ b/src/mongo/s/catalog/type_collection.cpp @@ -27,16 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/s/catalog/type_collection.h" +#include +#include +#include +#include -#include "mongo/base/status_with.h" +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonobj.h" -#include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/s/balancer_configuration.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -62,7 +63,7 @@ CollectionType::CollectionType(const BSONObj& obj) { CollectionType::parseProtected(IDLParserContext("CollectionType"), obj); invariant(getTimestamp() != Timestamp(0, 0)); uassert(ErrorCodes::BadValue, - str::stream() << "Invalid namespace " << getNss(), + str::stream() << "Invalid namespace " << getNss().toStringForErrorMsg(), getNss().isValid()); if (!getPre22CompatibleEpoch()) { setPre22CompatibleEpoch(OID()); diff --git a/src/mongo/s/catalog/type_collection.h b/src/mongo/s/catalog/type_collection.h index e5866d88e3081..f9bfc1eb6e1f7 100644 --- a/src/mongo/s/catalog/type_collection.h +++ b/src/mongo/s/catalog/type_collection.h @@ -29,9 +29,26 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" #include "mongo/s/catalog/type_collection_gen.h" #include "mongo/s/chunk_version.h" #include "mongo/s/index_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/s/catalog/type_collection_test.cpp b/src/mongo/s/catalog/type_collection_test.cpp index 2688d7106b63b..e179333ca5f5e 100644 --- a/src/mongo/s/catalog/type_collection_test.cpp +++ b/src/mongo/s/catalog/type_collection_test.cpp @@ -27,10 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/s/catalog/type_collection.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/resharding/common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/s/catalog/type_config_version.cpp b/src/mongo/s/catalog/type_config_version.cpp index b0fbcc98fa03c..5c999d385c815 100644 --- a/src/mongo/s/catalog/type_config_version.cpp +++ b/src/mongo/s/catalog/type_config_version.cpp @@ -29,12 +29,17 @@ #include "mongo/s/catalog/type_config_version.h" +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/util/assert_util.h" -#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/s/catalog/type_config_version.h b/src/mongo/s/catalog/type_config_version.h index ba64965686038..00967d33cf922 100644 --- a/src/mongo/s/catalog/type_config_version.h +++ b/src/mongo/s/catalog/type_config_version.h @@ -30,8 +30,14 @@ #pragma once #include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" #include "mongo/db/namespace_string.h" namespace mongo { diff --git a/src/mongo/s/catalog/type_config_version_test.cpp b/src/mongo/s/catalog/type_config_version_test.cpp index 81e604ba8d1fd..51ebd344b1e65 100644 --- a/src/mongo/s/catalog/type_config_version_test.cpp +++ b/src/mongo/s/catalog/type_config_version_test.cpp @@ -27,12 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/s/catalog/type_config_version.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" /** * Basic tests for config version parsing. diff --git a/src/mongo/s/catalog/type_database.idl b/src/mongo/s/catalog/type_database.idl index f608bb9bc6aa3..76200806f7454 100644 --- a/src/mongo/s/catalog/type_database.idl +++ b/src/mongo/s/catalog/type_database.idl @@ -54,14 +54,6 @@ structs: validator: callback: "ShardId::validate" optional: false - # The following field has been deprecated in 6.0 and should not be used - # TODO SERVER-63983 make this field optional - partitioned: - description: "Specify if it is allowed to create sharded collection on this database." - cpp_name: sharded - type: bool - default: false - optional: false version: description: "Version of the database." type: database_version diff --git a/src/mongo/s/catalog/type_database_test.cpp b/src/mongo/s/catalog/type_database_test.cpp index f81016bae22ff..ba5e90fadde85 100644 --- a/src/mongo/s/catalog/type_database_test.cpp +++ b/src/mongo/s/catalog/type_database_test.cpp @@ -27,12 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/base/status_with.h" -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/shard_id.h" +#include "mongo/idl/idl_parser.h" #include "mongo/s/catalog/type_database_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/database_version.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/uuid.h" namespace { @@ -49,16 +58,14 @@ TEST(DatabaseType, Empty) { TEST(DatabaseType, Basic) { UUID uuid = UUID::gen(); Timestamp timestamp = Timestamp(1, 1); - const auto dbObj = - BSON(DatabaseType::kNameFieldName - << "mydb" << DatabaseType::kPrimaryFieldName << "shard" - << DatabaseType::kShardedFieldName << true << DatabaseType::kVersionFieldName - << BSON("uuid" << uuid << "lastMod" << 0 << "timestamp" << timestamp)); + const auto dbObj = BSON(DatabaseType::kNameFieldName + << "mydb" << DatabaseType::kPrimaryFieldName << "shard" + << DatabaseType::kVersionFieldName + << BSON("uuid" << uuid << "lastMod" << 0 << "timestamp" << timestamp)); const auto db = DatabaseType::parse(IDLParserContext("DatabaseType"), dbObj); ASSERT_EQUALS(db.getName(), "mydb"); ASSERT_EQUALS(db.getPrimary(), "shard"); - ASSERT_TRUE(db.getSharded()); ASSERT_EQUALS(db.getVersion().getUuid(), uuid); ASSERT_EQUALS(db.getVersion().getLastMod(), 0); } diff --git a/src/mongo/s/catalog/type_index_catalog.h b/src/mongo/s/catalog/type_index_catalog.h index b3b9a6daf0b9d..27b5896e17ef8 100644 --- a/src/mongo/s/catalog/type_index_catalog.h +++ b/src/mongo/s/catalog/type_index_catalog.h @@ -29,7 +29,13 @@ #pragma once +#include +#include + +#include "mongo/bson/timestamp.h" +#include "mongo/db/namespace_string.h" #include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/s/catalog/type_mongos.cpp b/src/mongo/s/catalog/type_mongos.cpp index b20c7b6f51e22..1ff162c1a67ac 100644 --- a/src/mongo/s/catalog/type_mongos.cpp +++ b/src/mongo/s/catalog/type_mongos.cpp @@ -28,9 +28,17 @@ */ #include "mongo/s/catalog/type_mongos.h" +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" diff --git a/src/mongo/s/catalog/type_mongos.h b/src/mongo/s/catalog/type_mongos.h index 154abd5da6722..54df48803e13c 100644 --- a/src/mongo/s/catalog/type_mongos.h +++ b/src/mongo/s/catalog/type_mongos.h @@ -29,10 +29,16 @@ #pragma once +#include #include +#include #include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/s/catalog/type_mongos_test.cpp b/src/mongo/s/catalog/type_mongos_test.cpp index 327482acad49c..e16009a6469ac 100644 --- a/src/mongo/s/catalog/type_mongos_test.cpp +++ b/src/mongo/s/catalog/type_mongos_test.cpp @@ -27,12 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" -#include "mongo/db/jsobj.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/s/catalog/type_mongos.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/time_support.h" namespace { diff --git a/src/mongo/s/catalog/type_shard.cpp b/src/mongo/s/catalog/type_shard.cpp index a619ffd9f1b30..3e1241344a72b 100644 --- a/src/mongo/s/catalog/type_shard.cpp +++ b/src/mongo/s/catalog/type_shard.cpp @@ -27,14 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/s/catalog/type_shard.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/s/catalog/type_shard.h" #include "mongo/util/assert_util.h" #include "mongo/util/str.h" diff --git a/src/mongo/s/catalog/type_shard.h b/src/mongo/s/catalog/type_shard.h index 4f75dabad7456..4de224b9019b4 100644 --- a/src/mongo/s/catalog/type_shard.h +++ b/src/mongo/s/catalog/type_shard.h @@ -29,10 +29,17 @@ #pragma once +#include #include +#include #include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/shard_id.h" diff --git a/src/mongo/s/catalog/type_shard_test.cpp b/src/mongo/s/catalog/type_shard_test.cpp index 05e009ff3d5c8..2e8ab62c7cf03 100644 --- a/src/mongo/s/catalog/type_shard_test.cpp +++ b/src/mongo/s/catalog/type_shard_test.cpp @@ -27,13 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/catalog/type_shard.h" - #include "mongo/base/status_with.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/s/catalog/type_tags.cpp b/src/mongo/s/catalog/type_tags.cpp index b46f890e0ff48..89f00a4b610ef 100644 --- a/src/mongo/s/catalog/type_tags.cpp +++ b/src/mongo/s/catalog/type_tags.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/s/catalog/type_tags.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_tags.h" #include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/str.h" namespace mongo { @@ -136,7 +144,7 @@ BSONObj TagsType::toBSON() const { BSONObjBuilder builder; if (_ns) - builder.append(ns.name(), getNS().ns()); + builder.append(ns.name(), NamespaceStringUtil::serialize(getNS())); if (_tag) builder.append(tag.name(), getTag()); if (_minKey) diff --git a/src/mongo/s/catalog/type_tags.h b/src/mongo/s/catalog/type_tags.h index d69d9eeb057be..1433a7eae278c 100644 --- a/src/mongo/s/catalog/type_tags.h +++ b/src/mongo/s/catalog/type_tags.h @@ -29,9 +29,15 @@ #pragma once +#include #include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/s/catalog/type_chunk.h" diff --git a/src/mongo/s/catalog/type_tags_test.cpp b/src/mongo/s/catalog/type_tags_test.cpp index 1cd8ed6d276a2..5f9514faba20d 100644 --- a/src/mongo/s/catalog/type_tags_test.cpp +++ b/src/mongo/s/catalog/type_tags_test.cpp @@ -27,13 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/catalog/type_tags.h" - +#include "mongo/base/error_codes.h" #include "mongo/base/status_with.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/s/catalog/type_tags.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace { @@ -51,7 +54,7 @@ TEST(TagsType, Valid) { TagsType tag = status.getValue(); - ASSERT_EQUALS(tag.getNS().ns(), "test.mycol"); + ASSERT_EQUALS(tag.getNS().ns_forTest(), "test.mycol"); ASSERT_EQUALS(tag.getTag(), "tag"); ASSERT_BSONOBJ_EQ(tag.getMinKey(), BSON("a" << 10)); ASSERT_BSONOBJ_EQ(tag.getMaxKey(), BSON("a" << 20)); diff --git a/src/mongo/s/catalog_cache.cpp b/src/mongo/s/catalog_cache.cpp index 351dc74f93402..a1044addc2905 100644 --- a/src/mongo/s/catalog_cache.cpp +++ b/src/mongo/s/catalog_cache.cpp @@ -29,27 +29,62 @@ #include "mongo/s/catalog_cache.h" +#include +#include +#include +#include +#include #include - +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog_shard_feature_flag_gen.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/locker.h" #include "mongo/db/curop.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/logical_time.h" #include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/repl/optime_with.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/server_options.h" #include "mongo/db/vector_clock.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" -#include "mongo/s/is_mongos.h" +#include "mongo/s/index_version.h" #include "mongo/s/mongod_and_mongos_server_parameters_gen.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" #include "mongo/s/shard_cannot_refresh_due_to_locks_held_exception.h" #include "mongo/s/shard_version_factory.h" #include "mongo/s/sharding_feature_flags_gen.h" -#include "mongo/s/stale_exception.h" -#include "mongo/util/concurrency/with_lock.h" -#include "mongo/util/scopeguard.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/invalidating_lru_cache.h" +#include "mongo/util/str.h" #include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -84,7 +119,7 @@ std::shared_ptr createUpdatedRoutingTableHistory( tassert(7032310, fmt::format("allowMigrations field of collection '{}' changed without changing the " "collection placement version {}. Old value: {}, new value: {}", - nss.toString(), + nss.toStringForErrorMsg(), existingHistory->optRt->getVersion().toString(), existingHistory->optRt->allowMigrations(), collectionAndChunks.allowMigrations), @@ -95,7 +130,7 @@ std::shared_ptr createUpdatedRoutingTableHistory( tassert(7032311, fmt::format("reshardingFields field of collection '{}' changed without changing " "the collection placement version {}. Old value: {}, new value: {}", - nss.toString(), + nss.toStringForErrorMsg(), existingHistory->optRt->getVersion().toString(), oldReshardingFields->toBSON().toString(), newReshardingFields->toBSON().toString()), @@ -314,7 +349,7 @@ StatusWith CatalogCache::_getCollectionPlacementInfoAt( "Do not hold a lock while refreshing the catalog cache. Doing so would potentially " "hold the lock during a network call, and can lead to a deadlock as described in " "SERVER-37398.", - allowLocks || !opCtx->lockState() || !opCtx->lockState()->isLocked()); + allowLocks || !opCtx->lockState()->isLocked()); try { const auto swDbInfo = getDatabase(opCtx, nss.db(), allowLocks); @@ -409,6 +444,11 @@ StatusWith CatalogCache::getCollectionRoutingInfoAt( OperationContext* opCtx, const NamespaceString& nss, Timestamp atClusterTime) { try { auto cm = uassertStatusOK(_getCollectionPlacementInfoAt(opCtx, nss, atClusterTime)); + if (!cm.isSharded()) { + // If the collection is unsharded, it cannot have global indexes so there is no need to + // fetch the index information. + return CollectionRoutingInfo{std::move(cm), boost::none}; + } auto sii = _getCollectionIndexInfoAt(opCtx, nss); return retryUntilConsistentRoutingInfo(opCtx, nss, std::move(cm), std::move(sii)); } catch (const DBException& ex) { @@ -422,6 +462,11 @@ StatusWith CatalogCache::getCollectionRoutingInfo(Operati try { auto cm = uassertStatusOK(_getCollectionPlacementInfoAt(opCtx, nss, boost::none, allowLocks)); + if (!cm.isSharded()) { + // If the collection is unsharded, it cannot have global indexes so there is no need to + // fetch the index information. + return CollectionRoutingInfo{std::move(cm), boost::none}; + } auto sii = _getCollectionIndexInfoAt(opCtx, nss, allowLocks); return retryUntilConsistentRoutingInfo(opCtx, nss, std::move(cm), std::move(sii)); } catch (const DBException& ex) { @@ -429,6 +474,17 @@ StatusWith CatalogCache::getCollectionRoutingInfo(Operati } } +StatusWith CatalogCache::_getCollectionRoutingInfoWithoutOptimization( + OperationContext* opCtx, const NamespaceString& nss) { + try { + auto cm = uassertStatusOK(_getCollectionPlacementInfoAt(opCtx, nss, boost::none)); + auto sii = _getCollectionIndexInfoAt(opCtx, nss); + return retryUntilConsistentRoutingInfo(opCtx, nss, std::move(cm), std::move(sii)); + } catch (const DBException& ex) { + return ex.toStatus(); + } +} + boost::optional CatalogCache::_getCollectionIndexInfoAt( OperationContext* opCtx, const NamespaceString& nss, bool allowLocks) { @@ -504,11 +560,6 @@ boost::optional CatalogCache::_getCollectionIndexIn if (acquireTries == kMaxInconsistentCollectionRefreshAttempts) { throw; } - - // TODO (SERVER-71278) Remove this handling of SnapshotUnavailable - if (ex.code() == ErrorCodes::SnapshotUnavailable) { - sleepmillis(100); - } } indexEntryFuture = _indexCache.acquireAsync(nss, CacheCausalConsistency::kLatestKnown); @@ -541,7 +592,7 @@ StatusWith CatalogCache::getCollectionRoutingInfoWithRefr try { _triggerPlacementVersionRefresh(opCtx, nss); _triggerIndexVersionRefresh(opCtx, nss); - return getCollectionRoutingInfo(opCtx, nss, false); + return _getCollectionRoutingInfoWithoutOptimization(opCtx, nss); } catch (const DBException& ex) { return ex.toStatus(); } @@ -561,7 +612,7 @@ StatusWith CatalogCache::getCollectionRoutingInfoWithInde OperationContext* opCtx, const NamespaceString& nss) { try { _triggerIndexVersionRefresh(opCtx, nss); - return getCollectionRoutingInfo(opCtx, nss, false); + return _getCollectionRoutingInfoWithoutOptimization(opCtx, nss); } catch (const DBException& ex) { return ex.toStatus(); } @@ -571,7 +622,8 @@ CollectionRoutingInfo CatalogCache::getShardedCollectionRoutingInfo(OperationCon const NamespaceString& nss) { auto cri = uassertStatusOK(getCollectionRoutingInfo(opCtx, nss)); uassert(ErrorCodes::NamespaceNotSharded, - str::stream() << "Expected collection " << nss << " to be sharded", + str::stream() << "Expected collection " << nss.toStringForErrorMsg() + << " to be sharded", cri.cm.isSharded()); return cri; } @@ -581,7 +633,8 @@ StatusWith CatalogCache::getShardedCollectionRoutingInfoW try { auto cri = uassertStatusOK(getCollectionRoutingInfoWithRefresh(opCtx, nss)); uassert(ErrorCodes::NamespaceNotSharded, - str::stream() << "Expected collection " << nss << " to be sharded", + str::stream() << "Expected collection " << nss.toStringForErrorMsg() + << " to be sharded", cri.cm.isSharded()); return cri; } catch (const DBException& ex) { @@ -594,7 +647,8 @@ StatusWith CatalogCache::getShardedCollectionRoutingInfoW try { auto cri = uassertStatusOK(getCollectionRoutingInfoWithPlacementRefresh(opCtx, nss)); uassert(ErrorCodes::NamespaceNotSharded, - str::stream() << "Expected collection " << nss << " to be sharded", + str::stream() << "Expected collection " << nss.toStringForErrorMsg() + << " to be sharded", cri.cm.isSharded()); return cri; } catch (const DBException& ex) { @@ -851,7 +905,7 @@ CatalogCache::CollectionCache::LookupResult CatalogCache::CollectionCache::_look OperationContext* opCtx, const NamespaceString& nss, const RoutingTableHistoryValueHandle& existingHistory, - const ComparableChunkVersion& previousVersion) { + const ComparableChunkVersion& timeInStore) { const bool isIncremental(existingHistory && existingHistory->optRt); _updateRefreshesStats(isIncremental, true); blockCollectionCacheLookup.pauseWhileSet(opCtx); @@ -870,7 +924,7 @@ CatalogCache::CollectionCache::LookupResult CatalogCache::CollectionCache::_look "Refreshing cached collection", logAttrs(nss), "lookupSinceVersion"_attr = lookupVersion, - "timeInStore"_attr = previousVersion); + "timeInStore"_attr = timeInStore); auto collectionAndChunks = _catalogCacheLoader.getChunksSince(nss, lookupVersion).get(); @@ -885,21 +939,24 @@ CatalogCache::CollectionCache::LookupResult CatalogCache::CollectionCache::_look newRoutingHistory->getAllShardIds(&shardIds); for (const auto& shardId : shardIds) { uassertStatusOKWithContext(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, shardId), - str::stream() << "Collection " << nss + str::stream() << "Collection " << nss.toStringForErrorMsg() << " references shard which does not exist"); } const ChunkVersion newVersion = newRoutingHistory->getVersion(); newComparableVersion.setChunkVersion(newVersion); - LOGV2_FOR_CATALOG_REFRESH(4619901, - isIncremental || newComparableVersion != previousVersion ? 0 : 1, - "Refreshed cached collection", - logAttrs(nss), - "lookupSinceVersion"_attr = lookupVersion, - "newVersion"_attr = newComparableVersion, - "timeInStore"_attr = previousVersion, - "duration"_attr = Milliseconds(t.millis())); + // The log below is logged at debug(0) (equivalent to info level) only if the new placement + // version is different than the one we already had (if any). + LOGV2_FOR_CATALOG_REFRESH( + 4619901, + (!isIncremental || newVersion != existingHistory->optRt->getVersion()) ? 0 : 1, + "Refreshed cached collection", + logAttrs(nss), + "lookupSinceVersion"_attr = lookupVersion, + "newVersion"_attr = newComparableVersion, + "timeInStore"_attr = timeInStore, + "duration"_attr = Milliseconds(t.millis())); _updateRefreshesStats(isIncremental, false); return LookupResult(OptionalRoutingTableHistory(std::move(newRoutingHistory)), @@ -964,16 +1021,8 @@ CatalogCache::IndexCache::LookupResult CatalogCache::IndexCache::_lookupIndexes( "timeInStore"_attr = previousVersion); const auto readConcern = [&]() -> repl::ReadConcernArgs { - // (Ignore FCV check): This is in mongos so we expect to ignore FCV. - if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && - !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafe()) { - // When the feature flag is on, the config server may read from a secondary which - // may need to wait for replication, so we should use afterClusterTime. - return {repl::ReadConcernLevel::kSnapshotReadConcern}; - } else { - const auto vcTime = VectorClock::get(opCtx)->getTime(); - return {vcTime.configTime(), repl::ReadConcernLevel::kSnapshotReadConcern}; - } + const auto vcTime = VectorClock::get(opCtx)->getTime(); + return {vcTime.configTime(), repl::ReadConcernLevel::kSnapshotReadConcern}; }(); auto collAndIndexes = Grid::get(opCtx)->catalogClient()->getCollectionAndShardingIndexCatalogEntries( diff --git a/src/mongo/s/catalog_cache.h b/src/mongo/s/catalog_cache.h index 2048ac9d4bab4..79b1fd5ff29b5 100644 --- a/src/mongo/s/catalog_cache.h +++ b/src/mongo/s/catalog_cache.h @@ -29,17 +29,36 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" #include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_index_catalog.h" #include "mongo/s/catalog_cache_loader.h" #include "mongo/s/chunk_manager.h" +#include "mongo/s/database_version.h" #include "mongo/s/shard_version.h" #include "mongo/s/sharding_index_catalog_cache.h" #include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/thread_pool_interface.h" #include "mongo/util/read_through_cache.h" namespace mongo { @@ -399,6 +418,12 @@ class CatalogCache { void _triggerIndexVersionRefresh(OperationContext* opCtx, const NamespaceString& nss); + // Same as getCollectionRoutingInfo but will fetch the index information from the cache even if + // the placement information is not sharded. Used internally when the a refresh is requested for + // the index component. + StatusWith _getCollectionRoutingInfoWithoutOptimization( + OperationContext* opCtx, const NamespaceString& nss); + // Interface from which chunks will be retrieved CatalogCacheLoader& _cacheLoader; diff --git a/src/mongo/s/catalog_cache_loader.cpp b/src/mongo/s/catalog_cache_loader.cpp index 876845b874377..2bf2bb7cbc168 100644 --- a/src/mongo/s/catalog_cache_loader.cpp +++ b/src/mongo/s/catalog_cache_loader.cpp @@ -27,9 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include +#include #include "mongo/s/catalog_cache_loader.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/s/catalog_cache_loader.h b/src/mongo/s/catalog_cache_loader.h index 053e72b417530..fd1f5dc4425cf 100644 --- a/src/mongo/s/catalog_cache_loader.h +++ b/src/mongo/s/catalog_cache_loader.h @@ -29,17 +29,29 @@ #pragma once +#include +#include +#include #include #include #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/logv2/log.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/chunk_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" #include "mongo/s/type_collection_common_types_gen.h" #include "mongo/util/concurrency/notification.h" +#include "mongo/util/future.h" #include "mongo/util/uuid.h" namespace mongo { @@ -119,6 +131,11 @@ class CatalogCacheLoader { */ virtual void onStepUp() = 0; + /** + * Interrupts ongoing refreshes on rollback. + */ + virtual void onReplicationRollback() = 0; + /** * Transitions into shut down and cleans up state. Once this transitions to shut down, should * not be able to transition back to normal. Should be safe to be called more than once. @@ -127,9 +144,11 @@ class CatalogCacheLoader { /** * Notifies the loader that the persisted collection placement version for 'nss' has been - * updated. + * updated. `commitTime` represents the commit time of the update to config.collections for + * `nss` setting the refreshing flag to false. */ - virtual void notifyOfCollectionPlacementVersionUpdate(const NamespaceString& nss) = 0; + virtual void notifyOfCollectionRefreshEndMarkerSeen(const NamespaceString& nss, + const Timestamp& commitTime) = 0; /** * Non-blocking call, which returns the chunks changed since the specified version to be diff --git a/src/mongo/s/catalog_cache_loader_mock.cpp b/src/mongo/s/catalog_cache_loader_mock.cpp index 2c190ce65305f..4f3977bccf782 100644 --- a/src/mongo/s/catalog_cache_loader_mock.cpp +++ b/src/mongo/s/catalog_cache_loader_mock.cpp @@ -27,14 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/s/catalog_cache_loader_mock.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/keypattern.h" #include "mongo/db/operation_context.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" -#include "mongo/stdx/thread.h" +#include "mongo/s/catalog_cache_loader_mock.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/util/assert_util.h" namespace mongo { @@ -61,9 +67,14 @@ void CatalogCacheLoaderMock::onStepUp() { MONGO_UNREACHABLE; } +void CatalogCacheLoaderMock::onReplicationRollback() { + MONGO_UNREACHABLE; +} + void CatalogCacheLoaderMock::shutDown() {} -void CatalogCacheLoaderMock::notifyOfCollectionPlacementVersionUpdate(const NamespaceString& nss) { +void CatalogCacheLoaderMock::notifyOfCollectionRefreshEndMarkerSeen(const NamespaceString& nss, + const Timestamp& commitTime) { MONGO_UNREACHABLE; } diff --git a/src/mongo/s/catalog_cache_loader_mock.h b/src/mongo/s/catalog_cache_loader_mock.h index b846b4caf9470..9eb1a89245dbe 100644 --- a/src/mongo/s/catalog_cache_loader_mock.h +++ b/src/mongo/s/catalog_cache_loader_mock.h @@ -29,8 +29,26 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog_cache_loader.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future.h" namespace mongo { @@ -52,8 +70,10 @@ class CatalogCacheLoaderMock final : public CatalogCacheLoader { void initializeReplicaSetRole(bool isPrimary) override; void onStepDown() override; void onStepUp() override; + void onReplicationRollback() override; void shutDown() override; - void notifyOfCollectionPlacementVersionUpdate(const NamespaceString& nss) override; + void notifyOfCollectionRefreshEndMarkerSeen(const NamespaceString& nss, + const Timestamp& commitTime) override; void waitForCollectionFlush(OperationContext* opCtx, const NamespaceString& nss) override; void waitForDatabaseFlush(OperationContext* opCtx, StringData dbName) override; diff --git a/src/mongo/s/catalog_cache_mock.cpp b/src/mongo/s/catalog_cache_mock.cpp index 56e4a825b5136..ab393b473f00c 100644 --- a/src/mongo/s/catalog_cache_mock.cpp +++ b/src/mongo/s/catalog_cache_mock.cpp @@ -28,8 +28,17 @@ */ #include "mongo/s/catalog_cache_mock.h" + +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/service_context.h" #include "mongo/s/catalog_cache_loader_mock.h" +#include "mongo/s/sharding_index_catalog_cache.h" namespace mongo { diff --git a/src/mongo/s/catalog_cache_mock.h b/src/mongo/s/catalog_cache_mock.h index d57fb4f56605c..010642378acaa 100644 --- a/src/mongo/s/catalog_cache_mock.h +++ b/src/mongo/s/catalog_cache_mock.h @@ -29,8 +29,18 @@ #pragma once +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/catalog_cache_loader_mock.h" +#include "mongo/s/chunk_manager.h" namespace mongo { diff --git a/src/mongo/s/catalog_cache_refresh_test.cpp b/src/mongo/s/catalog_cache_refresh_test.cpp index 4078a32323b10..b32e199feb43c 100644 --- a/src/mongo/s/catalog_cache_refresh_test.cpp +++ b/src/mongo/s/catalog_cache_refresh_test.cpp @@ -27,16 +27,59 @@ * it in the license file. */ -#include "mongo/db/concurrency/locker_noop.h" +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/concurrency/d_concurrency.h" +#include "mongo/db/concurrency/lock_manager_defs.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/catalog_cache_test_fixture.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -59,7 +102,7 @@ class CatalogCacheRefreshTest : public CatalogCacheTestFixture { void expectGetDatabase() { expectFindSendBSONObjVector(kConfigHostAndPort, [&]() { DatabaseType db( - kNss.db().toString(), {"0"}, DatabaseVersion(UUID::gen(), Timestamp(1, 1))); + kNss.db_forTest().toString(), {"0"}, DatabaseVersion(UUID::gen(), Timestamp(1, 1))); return std::vector{db.toBSON()}; }()); } @@ -163,18 +206,12 @@ TEST_F(CatalogCacheRefreshTest, NoLoadIfShardNotMarkedStaleInOperationContext) { ASSERT_EQ(2, cri.cm.numChunks()); } -class MockLockerAlwaysReportsToBeLocked : public LockerNoop { -public: - using LockerNoop::LockerNoop; - - bool isLocked() const final { - return true; - } -}; - -DEATH_TEST_F(CatalogCacheRefreshTest, ShouldFailToRefreshWhenLocksAreHeld, "Invariant") { - operationContext()->setLockState(std::make_unique()); - scheduleRoutingInfoUnforcedRefresh(kNss); +DEATH_TEST_REGEX_F(CatalogCacheRefreshTest, + ShouldFailToRefreshWhenLocksAreHeld, + "Tripwire assertion.*7032314") { + Lock::GlobalLock globalLock(operationContext(), MODE_X); + auto future = scheduleRoutingInfoUnforcedRefresh(kNss); + ASSERT_THROWS_CODE(future.default_timed_get(), DBException, 7032314); } TEST_F(CatalogCacheRefreshTest, DatabaseNotFound) { @@ -221,13 +258,6 @@ TEST_F(CatalogCacheRefreshTest, CollectionNotFound) { // Return an empty collection expectFindSendBSONObjVector(kConfigHostAndPort, {}); - onCommand([&](const executor::RemoteCommandRequest& request) { - ASSERT_EQ(request.target, kConfigHostAndPort); - ASSERT_EQ(request.dbname, "config"); - return CursorResponse(CollectionType::ConfigNS, CursorId{0}, {}) - .toBSON(CursorResponse::ResponseType::InitialResponse); - }); - auto cri = *future.default_timed_get(); ASSERT(!cri.cm.isSharded()); ASSERT_EQ(ShardId{"0"}, cri.cm.dbPrimary()); diff --git a/src/mongo/s/catalog_cache_test.cpp b/src/mongo/s/catalog_cache_test.cpp index fe6bc8c6f1c22..6c5ee784f366f 100644 --- a/src/mongo/s/catalog_cache_test.cpp +++ b/src/mongo/s/catalog_cache_test.cpp @@ -27,15 +27,47 @@ * it in the license file. */ +// IWYU pragma: no_include "cxxabi.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/query/cursor_response.h" #include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/catalog_cache_loader_mock.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/shard_version_factory.h" #include "mongo/s/sharding_router_test_fixture.h" -#include "mongo/s/stale_exception.h" #include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -132,7 +164,7 @@ class CatalogCacheTest : public ShardingTestFixture { } CollectionType loadCollection(const ShardVersion& version) { - const auto coll = makeCollectionType(version); + auto coll = makeCollectionType(version); const auto scopedCollProv = scopedCollectionProvider(coll); const auto scopedChunksProv = scopedChunksProvider(makeChunks(version.placementVersion())); auto future = launchAsync([&] { @@ -275,11 +307,12 @@ TEST_F(CatalogCacheTest, InvalidateSingleDbOnShardRemoval) { TEST_F(CatalogCacheTest, OnStaleDatabaseVersionNoVersion) { // onStaleDatabaseVesrsion must invalidate the database entry if invoked with no version const auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1)); - loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); + loadDatabases({DatabaseType(kNss.db_forTest().toString(), kShards[0], dbVersion)}); - _catalogCache->onStaleDatabaseVersion(kNss.db(), boost::none); + _catalogCache->onStaleDatabaseVersion(kNss.db_forTest(), boost::none); - const auto status = _catalogCache->getDatabase(operationContext(), kNss.db()).getStatus(); + const auto status = + _catalogCache->getDatabase(operationContext(), kNss.db_forTest()).getStatus(); ASSERT(status == ErrorCodes::InternalError); } @@ -289,7 +322,7 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithSameVersion) { const auto cachedCollVersion = ShardVersionFactory::make( ChunkVersion(gen, {1, 0}), boost::optional(boost::none)); - loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); + loadDatabases({DatabaseType(kNss.db_forTest().toString(), kShards[0], dbVersion)}); loadCollection(cachedCollVersion); _catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection( kNss, cachedCollVersion, kShards[0]); @@ -302,7 +335,7 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithNoVersion) { const auto cachedCollVersion = ShardVersionFactory::make( ChunkVersion(gen, {1, 0}), boost::optional(boost::none)); - loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); + loadDatabases({DatabaseType(kNss.db_forTest().toString(), kShards[0], dbVersion)}); loadCollection(cachedCollVersion); _catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection( kNss, boost::none, kShards[0]); @@ -319,7 +352,7 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithGreaterPlacementVersion) { const auto wantedCollVersion = ShardVersionFactory::make( ChunkVersion(gen, {2, 0}), boost::optional(boost::none)); - loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); + loadDatabases({DatabaseType(kNss.db_forTest().toString(), kShards[0], dbVersion)}); loadCollection(cachedCollVersion); _catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection( kNss, wantedCollVersion, kShards[0]); @@ -334,7 +367,7 @@ TEST_F(CatalogCacheTest, TimeseriesFieldsAreProperlyPropagatedOnCC) { const auto version = ShardVersionFactory::make(ChunkVersion(gen, {1, 0}), boost::optional(boost::none)); - loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); + loadDatabases({DatabaseType(kNss.db_forTest().toString(), kShards[0], dbVersion)}); auto coll = makeCollectionType(version); auto chunks = makeChunks(version.placementVersion()); @@ -404,7 +437,7 @@ TEST_F(CatalogCacheTest, LookupCollectionWithInvalidOptions) { const auto version = ShardVersionFactory::make(ChunkVersion(gen, {1, 0}), boost::optional(boost::none)); - loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); + loadDatabases({DatabaseType(kNss.db_forTest().toString(), kShards[0], dbVersion)}); auto coll = makeCollectionType(version); @@ -427,7 +460,7 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionWithGreaterIndexVersion) { const auto wantedCollVersion = ShardVersionFactory::make( ChunkVersion(gen, {1, 0}), CollectionIndexes(kUUID, Timestamp(1, 0))); - loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); + loadDatabases({DatabaseType(kNss.db_forTest().toString(), kShards[0], dbVersion)}); CollectionType coll = loadCollection(cachedCollVersion); _catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection( kNss, wantedCollVersion, kShards[0]); @@ -454,7 +487,7 @@ TEST_F(CatalogCacheTest, OnStaleShardVersionIndexVersionBumpNotNone) { const auto wantedCollVersion = ShardVersionFactory::make( ChunkVersion(gen, {1, 0}), CollectionIndexes(kUUID, Timestamp(2, 0))); - loadDatabases({DatabaseType(kNss.db().toString(), kShards[0], dbVersion)}); + loadDatabases({DatabaseType(kNss.db_forTest().toString(), kShards[0], dbVersion)}); CollectionType coll = loadCollection(cachedCollVersion); _catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection( kNss, wantedCollVersion, kShards[0]); diff --git a/src/mongo/s/catalog_cache_test_fixture.cpp b/src/mongo/s/catalog_cache_test_fixture.cpp index d2a36500d79f1..545ed9cc4d6b5 100644 --- a/src/mongo/s/catalog_cache_test_fixture.cpp +++ b/src/mongo/s/catalog_cache_test_fixture.cpp @@ -29,20 +29,48 @@ #include "mongo/s/catalog_cache_test_fixture.h" +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_factory_mock.h" #include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/client.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/collation/collator_factory_mock.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_version.h" #include "mongo/s/sharding_feature_flags_gen.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/scopeguard.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/s/catalog_cache_test_fixture.h b/src/mongo/s/catalog_cache_test_fixture.h index efdd93686f856..5aec467dedc5e 100644 --- a/src/mongo/s/catalog_cache_test_fixture.h +++ b/src/mongo/s/catalog_cache_test_fixture.h @@ -30,11 +30,27 @@ #pragma once #include +#include #include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/sharding_router_test_fixture.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/s/chunk.cpp b/src/mongo/s/chunk.cpp index 61287e85ea163..bb2cc9063d4e3 100644 --- a/src/mongo/s/chunk.cpp +++ b/src/mongo/s/chunk.cpp @@ -29,6 +29,17 @@ #include "mongo/s/chunk.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bson_field.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -105,7 +116,7 @@ bool ChunkInfo::containsKey(const BSONObj& shardKey) const { std::string ChunkInfo::toString() const { return str::stream() << ChunkType::shard() << ": " << _shardId << ", " << ChunkType::lastmod() - << ": " << _lastmod.toString() << ", " << _range.toString(); + << ": " << _lastmod.toString() << ", range: " << _range.toString(); } void ChunkInfo::markAsJumbo() { diff --git a/src/mongo/s/chunk.h b/src/mongo/s/chunk.h index 7c17c0b84be1f..8aad6aa0ac695 100644 --- a/src/mongo/s/chunk.h +++ b/src/mongo/s/chunk.h @@ -29,7 +29,16 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/shard_id.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_version.h" namespace mongo { diff --git a/src/mongo/s/chunk_manager.cpp b/src/mongo/s/chunk_manager.cpp index 181fdd63b8911..ea641b7b6104a 100644 --- a/src/mongo/s/chunk_manager.cpp +++ b/src/mongo/s/chunk_manager.cpp @@ -29,12 +29,32 @@ #include "mongo/s/chunk_manager.h" +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/query/collation/collation_index_key.h" -#include "mongo/db/storage/key_string.h" -#include "mongo/logv2/log.h" #include "mongo/s/mongod_and_mongos_server_parameters_gen.h" #include "mongo/s/shard_invalidated_for_targeting_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -131,29 +151,31 @@ ShardPlacementVersionMap ChunkMap::constructShardPlacementVersionMap() const { // Tracks the max placement version for the shard on which the current range will reside auto placementVersionIt = placementVersions.find(currentRangeShardId); if (placementVersionIt == placementVersions.end()) { - placementVersionIt = - placementVersions - .emplace(std::piecewise_construct, - std::forward_as_tuple(currentRangeShardId), - std::forward_as_tuple(_collectionPlacementVersion.epoch(), - _collectionPlacementVersion.getTimestamp())) - .first; + placementVersionIt = placementVersions + .emplace(std::piecewise_construct, + std::forward_as_tuple(currentRangeShardId), + std::forward_as_tuple(CollectionGeneration{ + _collectionPlacementVersion.epoch(), + _collectionPlacementVersion.getTimestamp()})) + .first; } auto& maxPlacementVersion = placementVersionIt->second.placementVersion; + auto& maxValidAfter = placementVersionIt->second.validAfter; - current = - std::find_if(current, - _chunkMap.cend(), - [¤tRangeShardId, &maxPlacementVersion](const auto& currentChunk) { - if (currentChunk->getShardIdAt(boost::none) != currentRangeShardId) - return true; + current = std::find_if(current, _chunkMap.cend(), [&](const auto& currentChunk) { + if (currentChunk->getShardIdAt(boost::none) != currentRangeShardId) + return true; - if (maxPlacementVersion.isOlderThan(currentChunk->getLastmod())) - maxPlacementVersion = currentChunk->getLastmod(); + if (maxPlacementVersion.isOlderThan(currentChunk->getLastmod())) + maxPlacementVersion = currentChunk->getLastmod(); - return false; - }); + if (auto& history = currentChunk->getHistory(); + !history.empty() && maxValidAfter < history.front().getValidAfter()) + maxValidAfter = history.front().getValidAfter(); + + return false; + }); const auto rangeLast = *std::prev(current); @@ -264,6 +286,19 @@ BSONObj ChunkMap::toBSON() const { return builder.obj(); } +std::string ChunkMap::toString() const { + StringBuilder sb; + + sb << "Chunks (" << size() << "):\n"; + for (const auto& chunkInfoPtr : _chunkMap) { + sb << "\t" << chunkInfoPtr->toString() << '\n'; + } + + sb << "Collection placement version:" << _collectionPlacementVersion.toString() << '\n'; + + return sb.str(); +} + bool ChunkMap::allElementsAreOfType(BSONType type, const BSONObj& obj) { for (auto&& elem : obj) { if (elem.type() != type) { @@ -305,9 +340,8 @@ ChunkMap::_overlappingBounds(const BSONObj& min, const BSONObj& max, bool isMaxI return {itMin, itMax}; } -PlacementVersionTargetingInfo::PlacementVersionTargetingInfo(const OID& epoch, - const Timestamp& timestamp) - : placementVersion({epoch, timestamp}, {0, 0}) {} +PlacementVersionTargetingInfo::PlacementVersionTargetingInfo(const CollectionGeneration& generation) + : placementVersion(generation, {0, 0}) {} RoutingTableHistory::RoutingTableHistory( NamespaceString nss, @@ -350,6 +384,8 @@ void RoutingTableHistory::setAllShardsRefreshed() { Chunk ChunkManager::findIntersectingChunk(const BSONObj& shardKey, const BSONObj& collation, bool bypassIsFieldHashedCheck) const { + tassert(7626418, "Expected routing table to be initialized", _rt->optRt); + const bool hasSimpleCollation = (collation.isEmpty() && !_rt->optRt->getDefaultCollator()) || SimpleBSONObjComparator::kInstance.evaluate(collation == CollationSpec::kSimpleSpec); if (!hasSimpleCollation) { @@ -367,7 +403,7 @@ Chunk ChunkManager::findIntersectingChunk(const BSONObj& shardKey, uassert(ErrorCodes::ShardKeyNotFound, str::stream() << "Cannot target single shard due to collation of key " << elt.fieldNameStringData() << " for namespace " - << _rt->optRt->nss(), + << _rt->optRt->nss().toStringForErrorMsg(), !CollationIndexKey::isCollatableType(elt.type()) && (!isFieldHashed || bypassIsFieldHashedCheck)); } @@ -377,13 +413,15 @@ Chunk ChunkManager::findIntersectingChunk(const BSONObj& shardKey, uassert(ErrorCodes::ShardKeyNotFound, str::stream() << "Cannot target single shard using key " << shardKey - << " for namespace " << _rt->optRt->nss(), + << " for namespace " << _rt->optRt->nss().toStringForErrorMsg(), chunkInfo && chunkInfo->containsKey(shardKey)); return Chunk(*chunkInfo, _clusterTime); } bool ChunkManager::keyBelongsToShard(const BSONObj& shardKey, const ShardId& shardId) const { + tassert(7626419, "Expected routing table to be initialized", _rt->optRt); + if (shardKey.isEmpty()) return false; @@ -400,6 +438,8 @@ void ChunkManager::getShardIdsForRange(const BSONObj& min, const BSONObj& max, std::set* shardIds, std::set* chunkRanges) const { + tassert(7626420, "Expected routing table to be initialized", _rt->optRt); + // If our range is [MinKey, MaxKey], we can simply return all shard ids right away. However, // this optimization does not apply when we are reading from a snapshot because // _placementVersions contains shards with chunks and is built based on the last refresh. @@ -433,6 +473,8 @@ void ChunkManager::getShardIdsForRange(const BSONObj& min, } bool ChunkManager::rangeOverlapsShard(const ChunkRange& range, const ShardId& shardId) const { + tassert(7626421, "Expected routing table to be initialized", _rt->optRt); + bool overlapFound = false; _rt->optRt->forEachOverlappingChunk( @@ -450,6 +492,8 @@ bool ChunkManager::rangeOverlapsShard(const ChunkRange& range, const ShardId& sh boost::optional ChunkManager::getNextChunkOnShard(const BSONObj& shardKey, const ShardId& shardId) const { + tassert(7626422, "Expected routing table to be initialized", _rt->optRt); + boost::optional optChunk; forEachChunk( [&](const Chunk& chunk) { @@ -465,6 +509,8 @@ boost::optional ChunkManager::getNextChunkOnShard(const BSONObj& shardKey } ShardId ChunkManager::getMinKeyShardIdWithSimpleCollation() const { + tassert(7626423, "Expected routing table to be initialized", _rt->optRt); + auto minKey = getShardKeyPattern().getKeyPattern().globalMin(); return findIntersectingChunkWithSimpleCollation(minKey).getShardId(); } @@ -499,22 +545,15 @@ std::string ChunkManager::toString() const { return _rt->optRt ? _rt->optRt->toString() : "UNSHARDED"; } -bool RoutingTableHistory::compatibleWith(const RoutingTableHistory& other, - const ShardId& shardName) const { - // Return true if the placement version is the same in the two chunk managers - // TODO: This doesn't need to be so strong, just major vs - return other.getVersion(shardName) == getVersion(shardName); -} - -ChunkVersion RoutingTableHistory::_getVersion(const ShardId& shardName, - bool throwOnStaleShard) const { +PlacementVersionTargetingInfo RoutingTableHistory::_getVersion(const ShardId& shardName, + bool throwOnStaleShard) const { auto it = _placementVersions.find(shardName); if (it == _placementVersions.end()) { // Shards without explicitly tracked placement versions (meaning they have no chunks) always - // have a version of (0, 0, epoch, timestamp) - const auto collPlacementVersion = _chunkMap.getVersion(); - return ChunkVersion({collPlacementVersion.epoch(), collPlacementVersion.getTimestamp()}, - {0, 0}); + // have a version of (epoch, timestamp, 0, 0) + auto collPlacementVersion = _chunkMap.getVersion(); + return PlacementVersionTargetingInfo(ChunkVersion(collPlacementVersion, {0, 0}), + Timestamp(0, 0)); } if (throwOnStaleShard && gEnableFinerGrainedCatalogCacheRefresh) { @@ -523,30 +562,22 @@ ChunkVersion RoutingTableHistory::_getVersion(const ShardId& shardName, !it->second.isStale.load()); } - return it->second.placementVersion; -} - -ChunkVersion RoutingTableHistory::getVersion(const ShardId& shardName) const { - return _getVersion(shardName, true); -} - -ChunkVersion RoutingTableHistory::getVersionForLogging(const ShardId& shardName) const { - return _getVersion(shardName, false); + const auto& placementVersionTargetingInfo = it->second; + return PlacementVersionTargetingInfo(placementVersionTargetingInfo.placementVersion, + placementVersionTargetingInfo.validAfter); } std::string RoutingTableHistory::toString() const { StringBuilder sb; - sb << "RoutingTableHistory: " << _nss.ns() << " key: " << _shardKeyPattern.toString() << '\n'; + sb << "RoutingTableHistory: " << toStringForLogging(_nss) + << " key: " << _shardKeyPattern.toString() << '\n'; - sb << "Chunks:\n"; - _chunkMap.forEach([&sb](const auto& chunk) { - sb << "\t" << chunk->toString() << '\n'; - return true; - }); + sb << _chunkMap.toString(); sb << "Shard placement versions:\n"; for (const auto& entry : _placementVersions) { - sb << "\t" << entry.first << ": " << entry.second.placementVersion.toString() << '\n'; + sb << "\t" << entry.first << ": " << entry.second.placementVersion.toString() << " @ " + << entry.second.validAfter.toString() << '\n'; } return sb.str(); diff --git a/src/mongo/s/chunk_manager.h b/src/mongo/s/chunk_manager.h index 2f5ca4c4aed7f..98bcd48d25413 100644 --- a/src/mongo/s/chunk_manager.h +++ b/src/mongo/s/chunk_manager.h @@ -29,13 +29,30 @@ #pragma once +#include +#include +#include +#include +#include +#include #include #include +#include #include +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/query/collation/collation_spec.h" #include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/shard_id.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/chunk.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/database_version.h" #include "mongo/s/resharding/type_collection_fields_gen.h" #include "mongo/s/shard_key_pattern.h" @@ -43,19 +60,32 @@ #include "mongo/s/type_collection_common_types_gen.h" #include "mongo/stdx/unordered_map.h" #include "mongo/util/read_through_cache.h" +#include "mongo/util/uuid.h" namespace mongo { class ChunkManager; struct PlacementVersionTargetingInfo { - // Indicates whether the shard is stale and thus needs a catalog cache refresh - AtomicWord isStale{false}; + /** + * Constructs a placement information for a collection with the specified generation, starting + * at placementVersion {0, 0} and maxValidAfter of Timestamp{0, 0}. The expectation is that the + * incremental refresh algorithm will increment these values as it processes the incoming + * chunks. + */ + explicit PlacementVersionTargetingInfo(const CollectionGeneration& generation); + PlacementVersionTargetingInfo(ChunkVersion placementVersion, Timestamp validAfter) + : placementVersion(std::move(placementVersion)), validAfter(std::move(validAfter)) {} - // Max chunk version for the shard + // Max chunk version for the shard, effectively this is the shard placement version. ChunkVersion placementVersion; - PlacementVersionTargetingInfo(const OID& epoch, const Timestamp& timestamp); + // Max validAfter for the shard, effectively this is the timestamp of the latest placement + // change that occurred on a particular shard. + Timestamp validAfter; + + // Indicates whether the shard is stale and thus needs a catalog cache refresh + AtomicWord isStale{false}; }; // Map from a shard to a struct indicating both the max chunk version on that shard and whether the @@ -107,6 +137,7 @@ class ChunkMap { } ShardPlacementVersionMap constructShardPlacementVersionMap() const; + std::shared_ptr findIntersectingChunk(const BSONObj& shardKey) const; void appendChunk(const std::shared_ptr& chunk); @@ -115,6 +146,8 @@ class ChunkMap { BSONObj toBSON() const; + std::string toString() const; + static bool allElementsAreOfType(BSONType type, const BSONObj& obj); private: @@ -215,6 +248,10 @@ class RoutingTableHistory { */ void setAllShardsRefreshed(); + /** + * Returns the maximum version across all shards (also known as the "collection placement + * version"). + */ ChunkVersion getVersion() const { return _chunkMap.getVersion(); } @@ -223,14 +260,26 @@ class RoutingTableHistory { * Retrieves the placement version for the given shard. Will throw a * ShardInvalidatedForTargeting exception if the shard is marked as stale. */ - ChunkVersion getVersion(const ShardId& shardId) const; + ChunkVersion getVersion(const ShardId& shardId) const { + return _getVersion(shardId, true).placementVersion; + } /** * Retrieves the placement version for the given shard. Will not throw if the shard is marked as * stale. Only use when logging the given chunk version -- if the caller must execute logic * based on the returned version, use getVersion() instead. */ - ChunkVersion getVersionForLogging(const ShardId& shardId) const; + ChunkVersion getVersionForLogging(const ShardId& shardId) const { + return _getVersion(shardId, false).placementVersion; + } + + /** + * Retrieves the maximum validAfter timestamp for the given shard. Will throw a + * ShardInvalidatedForTargeting exception if the shard is marked as stale. + */ + Timestamp getMaxValidAfter(const ShardId& shardId) const { + return _getVersion(shardId, true).validAfter; + } size_t numChunks() const { return _chunkMap.size(); @@ -271,11 +320,6 @@ class RoutingTableHistory { return _placementVersions.size(); } - /** - * Returns true if, for this shard, the chunks are identical in both chunk managers - */ - bool compatibleWith(const RoutingTableHistory& other, const ShardId& shard) const; - std::string toString() const; bool uuidMatches(const UUID& uuid) const { @@ -311,7 +355,7 @@ class RoutingTableHistory { bool allowMigrations, ChunkMap chunkMap); - ChunkVersion _getVersion(const ShardId& shardName, bool throwOnStaleShard) const; + PlacementVersionTargetingInfo _getVersion(const ShardId& shardId, bool throwOnStaleShard) const; // Namespace to which this routing information corresponds NamespaceString _nss; @@ -530,31 +574,56 @@ class ChunkManager { // Methods only supported on sharded collections (caller must check isSharded()) const ShardKeyPattern& getShardKeyPattern() const { + tassert(7626400, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->getShardKeyPattern(); } const CollatorInterface* getDefaultCollator() const { + tassert(7626401, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->getDefaultCollator(); } bool isUnique() const { + tassert(7626402, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->isUnique(); } ChunkVersion getVersion() const { + tassert(7626403, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->getVersion(); } + /** + * Retrieves the placement version for the given shard. Will throw a + * ShardInvalidatedForTargeting exception if the shard is marked as stale. + */ ChunkVersion getVersion(const ShardId& shardId) const { + tassert(7626404, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->getVersion(shardId); } + /** + * Retrieves the maximum validAfter timestamp for the given shard. Will throw a + * ShardInvalidatedForTargeting exception if the shard is marked as stale. + */ + Timestamp getMaxValidAfter(const ShardId& shardId) const { + tassert(7626405, "Expected routing table to be initialized", _rt->optRt); + return _rt->optRt->getMaxValidAfter(shardId); + } + + /** + * Retrieves the placement version for the given shard. Will not throw if the shard is marked as + * stale. Only use when logging the given chunk version -- if the caller must execute logic + * based on the returned version, use getVersion() instead. + */ ChunkVersion getVersionForLogging(const ShardId& shardId) const { + tassert(7626406, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->getVersionForLogging(shardId); } template void forEachChunk(Callable&& handler, const BSONObj& shardKey = BSONObj()) const { + tassert(7626407, "Expected routing table to be initialized", _rt->optRt); _rt->optRt->forEachChunk( [this, handler = std::forward(handler)](const auto& chunkInfo) mutable { if (!handler(Chunk{*chunkInfo, _clusterTime})) @@ -604,6 +673,7 @@ class ChunkManager { * Same as findIntersectingChunk, but assumes the simple collation. */ Chunk findIntersectingChunkWithSimpleCollation(const BSONObj& shardKey) const { + tassert(7626408, "Expected routing table to be initialized", _rt->optRt); return findIntersectingChunk(shardKey, CollationSpec::kSimpleSpec); } @@ -628,6 +698,7 @@ class ChunkManager { * Returns the ids of all shards on which the collection has any chunks. */ void getAllShardIds(std::set* all) const { + tassert(7626409, "Expected routing table to be initialized", _rt->optRt); _rt->optRt->getAllShardIds(all); } @@ -635,6 +706,7 @@ class ChunkManager { * Returns the chunk ranges of all shards on which the collection has any chunks. */ void getAllChunkRanges(std::set* all) const { + tassert(7626410, "Expected routing table to be initialized", _rt->optRt); _rt->optRt->getAllChunkRanges(all); } @@ -642,6 +714,7 @@ class ChunkManager { * Returns the number of shards on which the collection has any chunks */ size_t getNShardsOwningChunks() const { + tassert(7626411, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->getNShardsOwningChunks(); } @@ -651,34 +724,33 @@ class ChunkManager { */ static ChunkManager makeAtTime(const ChunkManager& cm, Timestamp clusterTime); - /** - * Returns true if, for this shard, the chunks are identical in both chunk managers - */ - bool compatibleWith(const ChunkManager& other, const ShardId& shard) const { - return _rt->optRt->compatibleWith(*other._rt->optRt, shard); - } - bool uuidMatches(const UUID& uuid) const { + tassert(7626412, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->uuidMatches(uuid); } const UUID& getUUID() const { + tassert(7626413, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->getUUID(); } const NamespaceString& getNss() const { + tassert(7626414, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->nss(); } const boost::optional& getTimeseriesFields() const { + tassert(7626415, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->getTimeseriesFields(); } const boost::optional& getReshardingFields() const { + tassert(7626416, "Expected routing table to be initialized", _rt->optRt); return _rt->optRt->getReshardingFields(); } const RoutingTableHistory& getRoutingTableHistory_ForTest() const { + tassert(7626417, "Expected routing table to be initialized", _rt->optRt); return *_rt->optRt; } diff --git a/src/mongo/s/chunk_manager_query_test.cpp b/src/mongo/s/chunk_manager_query_test.cpp index 76e9e8b5a6949..3f4de2c1a461c 100644 --- a/src/mongo/s/chunk_manager_query_test.cpp +++ b/src/mongo/s/chunk_manager_query_test.cpp @@ -27,16 +27,47 @@ * it in the license file. */ +#include #include - -#include "mongo/db/catalog/catalog_test_fixture.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/collation/collator_interface_mock.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog_cache.h" #include "mongo/s/catalog_cache_test_fixture.h" #include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/shard_key_pattern_query_util.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/s/chunk_map_test.cpp b/src/mongo/s/chunk_map_test.cpp index f0e71e7aaaacd..70524ef0b5016 100644 --- a/src/mongo/s/chunk_map_test.cpp +++ b/src/mongo/s/chunk_map_test.cpp @@ -27,15 +27,84 @@ * it in the license file. */ +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/shard_id.h" +#include "mongo/platform/random.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk.h" #include "mongo/s/chunk_manager.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/chunks_test_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest namespace mongo { + +using chunks_test_util::assertEqualChunkInfo; +using chunks_test_util::calculateCollVersion; +using chunks_test_util::calculateIntermediateShardKey; +using chunks_test_util::genChunkVector; +using chunks_test_util::performRandomChunkOperations; + namespace { -const NamespaceString kNss = NamespaceString::createNamespaceString_forTest("TestDB", "TestColl"); +PseudoRandom _random{SecureRandom().nextInt64()}; + const ShardId kThisShard("testShard"); +ShardPlacementVersionMap getShardVersionMap(const ChunkMap& chunkMap) { + return chunkMap.constructShardPlacementVersionMap(); +} + +std::map calculateShardVersions( + const std::vector>& chunkVector) { + std::map svMap; + for (const auto& chunk : chunkVector) { + auto mapIt = svMap.find(chunk->getShardId()); + if (mapIt == svMap.end()) { + svMap.emplace(chunk->getShardId(), chunk->getLastmod()); + continue; + } + if (mapIt->second.isOlderThan(chunk->getLastmod())) { + mapIt->second = chunk->getLastmod(); + } + } + return svMap; +} + +std::vector> toChunkInfoPtrVector( + const std::vector& chunkTypes) { + std::vector> chunkPtrs; + chunkPtrs.reserve(chunkTypes.size()); + for (const auto& chunkType : chunkTypes) { + chunkPtrs.push_back(std::make_shared(chunkType)); + } + return chunkPtrs; +} + class ChunkMapTest : public unittest::Test { public: const KeyPattern& getShardKeyPattern() const { @@ -46,16 +115,33 @@ class ChunkMapTest : public unittest::Test { return _uuid; } + const OID& collEpoch() const { + return _epoch; + } + + const Timestamp& collTimestamp() const { + return _collTimestamp; + } + + ChunkMap makeChunkMap(const std::vector>& chunks) const { + return ChunkMap{collEpoch(), collTimestamp()}.createMerged(chunks); + } + + std::vector genRandomChunkVector(size_t maxNumChunks = 30, + size_t minNumChunks = 1) const { + return chunks_test_util::genRandomChunkVector( + _uuid, _epoch, _collTimestamp, maxNumChunks, minNumChunks); + } + private: - KeyPattern _shardKeyPattern{BSON("a" << 1)}; + KeyPattern _shardKeyPattern{chunks_test_util::kShardKeyPattern}; const UUID _uuid = UUID::gen(); + const OID _epoch{OID::gen()}; + const Timestamp _collTimestamp{1, 1}; }; -} // namespace - TEST_F(ChunkMapTest, TestAddChunk) { - const OID epoch = OID::gen(); - ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0}); + ChunkVersion version({collEpoch(), collTimestamp()}, {1, 0}); auto chunk = std::make_shared( ChunkType{uuid(), @@ -63,18 +149,192 @@ TEST_F(ChunkMapTest, TestAddChunk) { version, kThisShard}); - ChunkMap chunkMap{epoch, Timestamp(1, 1)}; - auto newChunkMap = chunkMap.createMerged({chunk}); + auto newChunkMap = makeChunkMap({chunk}); ASSERT_EQ(newChunkMap.size(), 1); } +TEST_F(ChunkMapTest, ConstructChunkMapRandom) { + auto chunkVector = toChunkInfoPtrVector(genRandomChunkVector()); + + const auto expectedShardVersions = calculateShardVersions(chunkVector); + const auto expectedCollVersion = calculateCollVersion(expectedShardVersions); + + const auto chunkMap = makeChunkMap(chunkVector); + + // Check that it contains all the chunks + ASSERT_EQ(chunkVector.size(), chunkMap.size()); + // Check collection version + ASSERT_EQ(expectedCollVersion, chunkMap.getVersion()); + + size_t i = 0; + chunkMap.forEach([&](const auto& chunkPtr) { + const auto& expectedChunkPtr = chunkVector[i++]; + assertEqualChunkInfo(*expectedChunkPtr, *chunkPtr); + return true; + }); + + // Validate all shard versions + const auto shardVersions = getShardVersionMap(chunkMap); + ASSERT_EQ(expectedShardVersions.size(), shardVersions.size()); + for (const auto& mapIt : shardVersions) { + ASSERT_EQ(expectedShardVersions.at(mapIt.first), mapIt.second.placementVersion); + } +} + +TEST_F(ChunkMapTest, ConstructChunkMapRandomAllChunksSameVersion) { + auto chunkVector = genRandomChunkVector(); + auto commonVersion = chunkVector.front().getVersion(); + + // Set same version on all chunks + for (auto& chunk : chunkVector) { + chunk.setVersion(commonVersion); + } + + auto chunkInfoVector = toChunkInfoPtrVector(chunkVector); + const auto expectedShardVersions = calculateShardVersions(chunkInfoVector); + const auto expectedCollVersion = calculateCollVersion(expectedShardVersions); + + ASSERT_EQ(commonVersion, expectedCollVersion); + + const auto chunkMap = makeChunkMap(chunkInfoVector); + + // Check that it contains all the chunks + ASSERT_EQ(chunkInfoVector.size(), chunkMap.size()); + // Check collection version + ASSERT_EQ(expectedCollVersion, chunkMap.getVersion()); + + size_t i = 0; + chunkMap.forEach([&](const auto& chunkPtr) { + const auto& expectedChunkPtr = chunkInfoVector[i++]; + assertEqualChunkInfo(*expectedChunkPtr, *chunkPtr); + return true; + }); + + // Validate all shard versions + const auto shardVersions = getShardVersionMap(chunkMap); + ASSERT_EQ(expectedShardVersions.size(), shardVersions.size()); + for (const auto& mapIt : shardVersions) { + ASSERT_EQ(expectedShardVersions.at(mapIt.first), mapIt.second.placementVersion); + } +} + +/* + * Check that constucting a ChunkMap with chunks that have mismatching timestamp fails. + */ +TEST_F(ChunkMapTest, ConstructChunkMapMismatchingTimestamp) { + auto chunkVector = toChunkInfoPtrVector(genRandomChunkVector()); + + // Set a different epoch in one of the chunks + const Timestamp wrongTimestamp{Date_t::now()}; + ASSERT_NE(wrongTimestamp, collTimestamp()); + const auto wrongChunkIdx = _random.nextInt32(chunkVector.size()); + const auto oldChunk = chunkVector.at(wrongChunkIdx); + const auto oldVersion = oldChunk->getLastmod(); + const ChunkVersion wrongVersion{{collEpoch(), wrongTimestamp}, + {oldVersion.majorVersion(), oldVersion.minorVersion()}}; + chunkVector[wrongChunkIdx] = std::make_shared( + ChunkType{uuid(), oldChunk->getRange(), wrongVersion, oldChunk->getShardId()}); + + ASSERT_THROWS_CODE( + makeChunkMap(chunkVector), AssertionException, ErrorCodes::ConflictingOperationInProgress); +} + +/* + * Check that updating a ChunkMap with chunks that have mismatching timestamp fails. + */ +TEST_F(ChunkMapTest, UpdateChunkMapMismatchingTimestamp) { + auto chunkVector = toChunkInfoPtrVector(genRandomChunkVector()); + + auto chunkMap = makeChunkMap(chunkVector); + auto collVersion = chunkMap.getVersion(); + + // Set a different epoch in one of the chunks + const Timestamp wrongTimestamp{Date_t::now()}; + const auto wrongChunkIdx = _random.nextInt32(chunkVector.size()); + const auto oldChunk = chunkVector.at(wrongChunkIdx); + const ChunkVersion wrongVersion{{collEpoch(), wrongTimestamp}, + {collVersion.majorVersion(), collVersion.minorVersion()}}; + auto updateChunk = std::make_shared( + ChunkType{uuid(), oldChunk->getRange(), wrongVersion, oldChunk->getShardId()}); + + ASSERT_THROWS_CODE(chunkMap.createMerged({updateChunk}), + AssertionException, + ErrorCodes::ConflictingOperationInProgress); +} + +/* + * Check that updating a ChunkMap with chunks that have lower version fails. + */ +TEST_F(ChunkMapTest, UpdateChunkMapLowerVersion) { + auto chunkVector = toChunkInfoPtrVector(genRandomChunkVector()); + + auto chunkMap = makeChunkMap(chunkVector); + + const auto wrongChunkIdx = _random.nextInt32(chunkVector.size()); + const auto oldChunk = chunkVector.at(wrongChunkIdx); + const ChunkVersion wrongVersion{{collEpoch(), collTimestamp()}, {0, 1}}; + auto updateChunk = std::make_shared( + ChunkType{uuid(), oldChunk->getRange(), wrongVersion, oldChunk->getShardId()}); + + ASSERT_THROWS_CODE(chunkMap.createMerged({updateChunk}), AssertionException, 626840); +} +/* + * Test update of ChunkMap with random chunk manipulation (splits/merges/moves); + */ +TEST_F(ChunkMapTest, UpdateChunkMapRandom) { + auto initialChunks = genRandomChunkVector(); + auto initialChunksInfo = toChunkInfoPtrVector(initialChunks); + + const auto initialChunkMap = makeChunkMap(initialChunksInfo); + + const auto initialShardVersions = calculateShardVersions(initialChunksInfo); + const auto initialCollVersion = calculateCollVersion(initialShardVersions); + + auto chunks = initialChunks; + + const auto maxNumChunkOps = 2 * initialChunks.size(); + const auto numChunkOps = _random.nextInt32(maxNumChunkOps); + performRandomChunkOperations(&chunks, numChunkOps); + + auto chunksInfo = toChunkInfoPtrVector(initialChunks); + + std::vector> updatedChunksInfo; + for (auto& chunkPtr : chunksInfo) { + if (!chunkPtr->getLastmod().isOlderOrEqualThan(initialCollVersion)) { + updatedChunksInfo.push_back(std::make_shared(ChunkType{ + uuid(), chunkPtr->getRange(), chunkPtr->getLastmod(), chunkPtr->getShardId()})); + } + } + + const auto expectedShardVersions = calculateShardVersions(chunksInfo); + const auto expectedCollVersion = calculateCollVersion(expectedShardVersions); + auto chunkMap = initialChunkMap.createMerged(updatedChunksInfo); + + // Check that it contains all the chunks + ASSERT_EQ(chunksInfo.size(), chunkMap.size()); + // Check collection version + ASSERT_EQ(expectedCollVersion, chunkMap.getVersion()); + + size_t i = 0; + chunkMap.forEach([&](const auto& chunkPtr) { + const auto& expectedChunkPtr = chunksInfo[i++]; + assertEqualChunkInfo(*expectedChunkPtr, *chunkPtr); + return true; + }); + + // Validate all shard versions + const auto shardVersions = getShardVersionMap(chunkMap); + ASSERT_EQ(expectedShardVersions.size(), shardVersions.size()); + for (const auto& mapIt : shardVersions) { + ASSERT_EQ(expectedShardVersions.at(mapIt.first), mapIt.second.placementVersion); + } +} + TEST_F(ChunkMapTest, TestEnumerateAllChunks) { - const OID epoch = OID::gen(); - ChunkMap chunkMap{epoch, Timestamp(1, 1)}; - ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0}); + ChunkVersion version{{collEpoch(), collTimestamp()}, {1, 0}}; - auto newChunkMap = chunkMap.createMerged( + auto newChunkMap = makeChunkMap( {std::make_shared( ChunkType{uuid(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)}, @@ -105,11 +365,9 @@ TEST_F(ChunkMapTest, TestEnumerateAllChunks) { } TEST_F(ChunkMapTest, TestIntersectingChunk) { - const OID epoch = OID::gen(); - ChunkMap chunkMap{epoch, Timestamp(1, 1)}; - ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0}); + ChunkVersion version{{collEpoch(), collTimestamp()}, {1, 0}}; - auto newChunkMap = chunkMap.createMerged( + auto newChunkMap = makeChunkMap( {std::make_shared( ChunkType{uuid(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)}, @@ -134,12 +392,23 @@ TEST_F(ChunkMapTest, TestIntersectingChunk) { BSON("a" << 100))); } +TEST_F(ChunkMapTest, TestIntersectingChunkRandom) { + auto chunks = toChunkInfoPtrVector(genRandomChunkVector()); + + const auto chunkMap = makeChunkMap(chunks); + + auto targetChunkIt = chunks.begin() + _random.nextInt64(chunks.size()); + auto intermediateKey = calculateIntermediateShardKey( + (*targetChunkIt)->getMin(), (*targetChunkIt)->getMax(), 0.2 /* minKeyProb */); + + auto intersectingChunkPtr = chunkMap.findIntersectingChunk(intermediateKey); + assertEqualChunkInfo(**(targetChunkIt), *intersectingChunkPtr); +} + TEST_F(ChunkMapTest, TestEnumerateOverlappingChunks) { - const OID epoch = OID::gen(); - ChunkMap chunkMap{epoch, Timestamp(1, 1)}; - ChunkVersion version({epoch, Timestamp(1, 1)}, {1, 0}); + ChunkVersion version{{collEpoch(), collTimestamp()}, {1, 0}}; - auto newChunkMap = chunkMap.createMerged( + auto newChunkMap = makeChunkMap( {std::make_shared( ChunkType{uuid(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)}, @@ -167,4 +436,27 @@ TEST_F(ChunkMapTest, TestEnumerateOverlappingChunks) { ASSERT_EQ(count, 3); } +TEST_F(ChunkMapTest, TestEnumerateOverlappingChunksRandom) { + auto chunks = toChunkInfoPtrVector(genRandomChunkVector()); + + const auto chunkMap = makeChunkMap(chunks); + + auto firstChunkIt = chunks.begin() + _random.nextInt64(chunks.size()); + auto lastChunkIt = firstChunkIt + _random.nextInt64(std::distance(firstChunkIt, chunks.end())); + + auto minBound = calculateIntermediateShardKey( + (*firstChunkIt)->getMin(), (*firstChunkIt)->getMax(), 0.2 /* minKeyProb */); + auto maxBound = calculateIntermediateShardKey( + (*lastChunkIt)->getMin(), (*lastChunkIt)->getMax(), 0.2 /* minKeyProb */); + + auto it = firstChunkIt; + chunkMap.forEachOverlappingChunk(minBound, maxBound, true, [&](const auto& chunkInfoPtr) { + assertEqualChunkInfo(**(it++), *chunkInfoPtr); + return true; + }); + ASSERT_EQ(0, std::distance(it, std::next(lastChunkIt))); +} + +} // namespace + } // namespace mongo diff --git a/src/mongo/s/chunk_test.cpp b/src/mongo/s/chunk_test.cpp index 9ecdc310306e4..b9633455ab0ea 100644 --- a/src/mongo/s/chunk_test.cpp +++ b/src/mongo/s/chunk_test.cpp @@ -28,7 +28,21 @@ */ #include "mongo/s/chunk.h" -#include "mongo/unittest/unittest.h" + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/namespace_string.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { diff --git a/src/mongo/s/chunk_version.cpp b/src/mongo/s/chunk_version.cpp index fa3931160ea71..c8bde7119dcd8 100644 --- a/src/mongo/s/chunk_version.cpp +++ b/src/mongo/s/chunk_version.cpp @@ -29,6 +29,9 @@ #include "mongo/s/chunk_version.h" +#include + +#include "mongo/idl/idl_parser.h" #include "mongo/s/chunk_version_gen.h" #include "mongo/util/str.h" @@ -76,4 +79,12 @@ std::string ChunkVersion::toString() const { << _timestamp.toString(); } +BSONObj ChunkVersion::toBSON() const { + BSONObjBuilder builder; + builder.append("majorVersion", static_cast(majorVersion())); + builder.append("minorVersion", static_cast(minorVersion())); + builder.append("epoch", _epoch); + builder.append("timestamp", _timestamp); + return builder.obj(); +} } // namespace mongo diff --git a/src/mongo/s/chunk_version.h b/src/mongo/s/chunk_version.h index 158caea2699b2..8c71f55a2ade5 100644 --- a/src/mongo/s/chunk_version.h +++ b/src/mongo/s/chunk_version.h @@ -29,9 +29,22 @@ #pragma once +#include +#include +#include +#include + #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/db/jsobj.h" #include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -227,6 +240,8 @@ class ChunkVersion : public CollectionGeneration, public CollectionPlacement { void serialize(StringData field, BSONObjBuilder* builder) const; std::string toString() const; + + BSONObj toBSON() const; }; inline std::ostream& operator<<(std::ostream& s, const ChunkVersion& v) { diff --git a/src/mongo/s/chunk_version_test.cpp b/src/mongo/s/chunk_version_test.cpp index e92bec19aa83f..cf6d5ff46d649 100644 --- a/src/mongo/s/chunk_version_test.cpp +++ b/src/mongo/s/chunk_version_test.cpp @@ -30,7 +30,8 @@ #include #include "mongo/s/chunk_version.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/s/chunks_test_util.cpp b/src/mongo/s/chunks_test_util.cpp new file mode 100644 index 0000000000000..63fb6f230ec1e --- /dev/null +++ b/src/mongo/s/chunks_test_util.cpp @@ -0,0 +1,358 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest + +#include "mongo/s/chunks_test_util.h" + +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/random.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" + +namespace mongo::chunks_test_util { +namespace { + +PseudoRandom _random{SecureRandom().nextInt64()}; + +std::vector genChunkHistory(const ShardId& currentShard, + const Timestamp& onCurrentShardSince, + size_t numShards, + size_t maxLenght) { + std::vector history; + const auto historyLength = _random.nextInt64(maxLenght); + auto lastTime = onCurrentShardSince; + for (int64_t i = 0; i < historyLength; i++) { + auto shard = i == 0 ? currentShard : getShardId(_random.nextInt64(numShards)); + history.emplace_back(onCurrentShardSince, shard); + lastTime = lastTime - 1 - _random.nextInt64(10000); + } + return history; +} + +} // namespace + +void assertEqualChunkInfo(const ChunkInfo& x, const ChunkInfo& y) { + ASSERT_BSONOBJ_EQ(x.getMin(), y.getMin()); + ASSERT_BSONOBJ_EQ(x.getMax(), y.getMax()); + ASSERT_EQ(x.getMaxKeyString(), y.getMaxKeyString()); + ASSERT_EQ(x.getShardId(), y.getShardId()); + ASSERT_EQ(x.getLastmod(), y.getLastmod()); + ASSERT_EQ(x.isJumbo(), y.isJumbo()); + ASSERT_EQ(x.getHistory(), y.getHistory()); +} + +ShardId getShardId(int shardIdx) { + return {std::string(str::stream() << "shard_" << shardIdx)}; +} + +std::vector genRandomSplitPoints(size_t numChunks) { + std::vector splitPoints; + splitPoints.reserve(numChunks + 1); + splitPoints.emplace_back(kShardKeyPattern.globalMin()); + int nextSplit{-1000}; + for (size_t i = 0; i < numChunks - 1; ++i) { + nextSplit += i * 10 * (_random.nextInt32(10) + 1); + splitPoints.emplace_back(BSON(kSKey << nextSplit)); + } + splitPoints.emplace_back(kShardKeyPattern.globalMax()); + return splitPoints; +} + +std::vector genRandomVersions(size_t num, const ChunkVersion& initialVersion) { + std::vector versions; + versions.reserve(num); + auto major = initialVersion.majorVersion(); + auto minor = initialVersion.minorVersion(); + + for (size_t i = 0; i < num; ++i) { + if (_random.nextInt32(2)) { + ++major; + minor = 0; + } else { + ++minor; + } + versions.emplace_back( + CollectionGeneration(initialVersion.epoch(), initialVersion.getTimestamp()), + CollectionPlacement(major, minor)); + } + std::shuffle(versions.begin(), versions.end(), _random.urbg()); + return versions; +} + +std::vector genChunkVector(const UUID& uuid, + const std::vector& splitPoints, + const ChunkVersion& initialVersion, + size_t numShards) { + + return genChunkVector( + uuid, splitPoints, genRandomVersions(splitPoints.size() - 1, initialVersion), numShards); +} + +std::vector genChunkVector(const UUID& uuid, + const std::vector& splitPoints, + const std::vector& versions, + size_t numShards) { + + invariant(SimpleBSONObjComparator::kInstance.evaluate(splitPoints.front() == + kShardKeyPattern.globalMin())); + invariant(SimpleBSONObjComparator::kInstance.evaluate(splitPoints.back() == + kShardKeyPattern.globalMax())); + const auto numChunks = splitPoints.size() - 1; + invariant(numChunks == versions.size()); + + std::vector chunks; + chunks.reserve(numChunks); + auto minKey = splitPoints.front(); + for (size_t i = 0; i < numChunks; ++i) { + auto maxKey = splitPoints.at(i + 1); + const auto shard = getShardId(_random.nextInt64(numShards)); + const auto version = versions.at(i); + ChunkType chunk{uuid, ChunkRange{minKey, maxKey}, version, shard}; + chunk.setHistory( + genChunkHistory(shard, Timestamp{Date_t::now()}, numShards, 10 /* maxLenght */)); + chunks.emplace_back(std::move(chunk)); + minKey = std::move(maxKey); + } + return chunks; +} + +std::map calculateShardsMaxValidAfter( + const std::vector& chunkVector) { + + std::map vaMap; + for (const auto& chunk : chunkVector) { + if (chunk.getHistory().empty()) + continue; + + const auto& chunkMaxValidAfter = chunk.getHistory().front().getValidAfter(); + auto mapIt = vaMap.find(chunk.getShard()); + if (mapIt == vaMap.end()) { + vaMap.emplace(chunk.getShard(), chunkMaxValidAfter); + continue; + } + if (chunkMaxValidAfter > mapIt->second) { + mapIt->second = chunkMaxValidAfter; + } + } + return vaMap; +} + +ChunkVersion calculateCollVersion(const std::map& shardVersions) { + return std::max_element(shardVersions.begin(), + shardVersions.end(), + [](const std::pair& p1, + const std::pair& p2) { + return p1.second.isOlderThan(p2.second); + }) + ->second; +} + +std::map calculateShardVersions(const std::vector& chunkVector) { + std::map svMap; + for (const auto& chunk : chunkVector) { + auto mapIt = svMap.find(chunk.getShard()); + if (mapIt == svMap.end()) { + svMap.emplace(chunk.getShard(), chunk.getVersion()); + continue; + } + if (mapIt->second.isOlderThan(chunk.getVersion())) { + mapIt->second = chunk.getVersion(); + } + } + return svMap; +} + +std::vector genRandomChunkVector(const UUID& uuid, + const OID& epoch, + const Timestamp& timestamp, + size_t maxNumChunks, + size_t minNumChunks) { + invariant(minNumChunks <= maxNumChunks); + const auto numChunks = minNumChunks + _random.nextInt32((maxNumChunks - minNumChunks) + 1); + const auto numShards = _random.nextInt32(numChunks) + 1; + const ChunkVersion initialVersion{{epoch, timestamp}, {1, 0}}; + + LOGV2(7162700, + "Generating random chunk vector", + "numChunks"_attr = numChunks, + "numShards"_attr = numShards); + + return genChunkVector(uuid, genRandomSplitPoints(numChunks), initialVersion, numShards); +} + +BSONObj calculateIntermediateShardKey(const BSONObj& leftKey, + const BSONObj& rightKey, + double minKeyProb) { + invariant(0 <= minKeyProb && minKeyProb <= 1, "minKeyProb out of range [0, 1]"); + + if (_random.nextInt32(100) < minKeyProb * 100) { + return leftKey; + } + + const auto isMinKey = leftKey.woCompare(kShardKeyPattern.globalMin()) == 0; + const auto isMaxKey = rightKey.woCompare(kShardKeyPattern.globalMax()) == 0; + + int splitPoint; + if (isMinKey && isMaxKey) { + // [min, max] -> split at 0 + splitPoint = 0; + } else if (!isMinKey && !isMaxKey) { + // [x, y] -> split in the middle + auto min = leftKey.firstElement().numberInt(); + auto max = rightKey.firstElement().numberInt(); + invariant(min + 1 < max, + str::stream() << "Can't split range [" << min << ", " << max << "]"); + splitPoint = min + ((max - min) / 2); + } else if (isMaxKey) { + // [x, maxKey] -> split at x*2; + auto prevBound = leftKey.firstElement().numberInt(); + auto increment = prevBound ? prevBound : _random.nextInt32(100) + 1; + splitPoint = prevBound + std::abs(increment); + } else if (isMinKey) { + // [minKey, x] -> split at x*2; + auto prevBound = rightKey.firstElement().numberInt(); + auto increment = prevBound ? prevBound : _random.nextInt32(100) + 1; + splitPoint = prevBound - std::abs(increment); + } else { + MONGO_UNREACHABLE; + } + + return BSON(kSKey << splitPoint); +} + +void performRandomChunkOperations(std::vector* chunksPtr, size_t numOperations) { + auto& chunks = *chunksPtr; + auto collVersion = calculateCollVersion(calculateShardVersions(chunks)); + + auto moveChunk = [&] { + auto& chunkToMigrate = chunks[_random.nextInt32(chunks.size())]; + collVersion.incMajor(); + + auto controlChunkIt = std::find_if(chunks.begin(), chunks.end(), [&](const auto& chunk) { + return chunk.getShard() == chunkToMigrate.getShard() && + !chunk.getRange().overlaps(chunkToMigrate.getRange()); + }); + if (controlChunkIt != chunks.end()) { + controlChunkIt->setVersion(collVersion); + collVersion.incMinor(); + } + auto newShard = getShardId(_random.nextInt64(chunks.size())); + chunkToMigrate.setShard(newShard); + chunkToMigrate.setVersion(collVersion); + chunkToMigrate.setHistory([&] { + auto history = chunkToMigrate.getHistory(); + history.emplace(history.begin(), Timestamp{Date_t::now()}, newShard); + return history; + }()); + }; + + auto splitChunk = [&] { + auto chunkToSplitIt = chunks.begin() + _random.nextInt32(chunks.size()); + while (chunkToSplitIt != chunks.begin() && chunkToSplitIt != std::prev(chunks.end()) && + (chunkToSplitIt->getMax().firstElement().numberInt() - + chunkToSplitIt->getMin().firstElement().numberInt()) < 2) { + // If the chunk is unsplittable select another one + chunkToSplitIt = chunks.begin() + _random.nextInt32(chunks.size()); + } + + const auto& chunkToSplit = *chunkToSplitIt; + + auto splitKey = calculateIntermediateShardKey(chunkToSplit.getMin(), chunkToSplit.getMax()); + + collVersion.incMinor(); + const ChunkRange leftRange{chunkToSplit.getMin(), splitKey}; + ChunkType leftChunk{ + chunkToSplit.getCollectionUUID(), leftRange, collVersion, chunkToSplit.getShard()}; + + collVersion.incMinor(); + const ChunkRange rightRange{splitKey, chunkToSplit.getMax()}; + ChunkType rightChunk{ + chunkToSplit.getCollectionUUID(), rightRange, collVersion, chunkToSplit.getShard()}; + + auto it = chunks.erase(chunkToSplitIt); + it = chunks.insert(it, std::move(rightChunk)); + it = chunks.insert(it, std::move(leftChunk)); + }; + + auto mergeChunks = [&] { + const auto firstChunkIt = chunks.begin() + _random.nextInt32(chunks.size()); + const auto& shardId = firstChunkIt->getShard(); + auto lastChunkIt = std::find_if(firstChunkIt, chunks.end(), [&](const auto& chunk) { + return chunk.getShard() != shardId; + }); + const auto numContiguosChunks = std::distance(firstChunkIt, lastChunkIt); + if (numContiguosChunks < 2) { + // nothing to merge + return; + } + const auto numChunkToMerge = _random.nextInt32(numContiguosChunks - 1) + 2; + lastChunkIt = firstChunkIt + numChunkToMerge; + const auto& firstChunk = *firstChunkIt; + collVersion.incMinor(); + const ChunkRange mergedRange{firstChunk.getMin(), std::prev(lastChunkIt)->getMax()}; + ChunkType mergedChunk{ + firstChunk.getCollectionUUID(), mergedRange, collVersion, firstChunk.getShard()}; + + auto it = chunks.erase(firstChunkIt, lastChunkIt); + it = chunks.insert(it, mergedChunk); + }; + + for (size_t i = 0; i < numOperations; i++) { + switch (_random.nextInt32(3)) { + case 0: + moveChunk(); + break; + case 1: + splitChunk(); + break; + case 2: + mergeChunks(); + break; + default: + MONGO_UNREACHABLE; + break; + } + } +} + +} // namespace mongo::chunks_test_util diff --git a/src/mongo/s/chunks_test_util.h b/src/mongo/s/chunks_test_util.h new file mode 100644 index 0000000000000..56e87abae69fa --- /dev/null +++ b/src/mongo/s/chunks_test_util.h @@ -0,0 +1,130 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/shard_id.h" +#include "mongo/platform/basic.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk.h" +#include "mongo/s/chunk_version.h" +#include "mongo/util/uuid.h" + +namespace mongo::chunks_test_util { + +static const std::string kSKey{"a"}; +static const KeyPattern kShardKeyPattern{BSON(kSKey << 1)}; + +/* + * Assert that all the fields contained in the provided ChunkInfo are equal. + * + * This is needed since ChunkInfo class does not provide an equal operator. + */ +void assertEqualChunkInfo(const ChunkInfo& x, const ChunkInfo& y); + +ShardId getShardId(int shardIdx); + +/** + * Return a vector of randomly generated split points. + * covering the entire shard key space including the boundaries [minKey, maxKey) + * + * e.g. {"a": } + */ +std::vector genRandomSplitPoints(size_t numChunks); + +/* + * Generate a shuffled list of random chunk versions. + * + * The generated versions are all strictly greater than the provided initialVersion. + */ +std::vector genRandomVersions(size_t num, const ChunkVersion& initialVersion); + +/* + * Generate a vector of chunks whose boundaries are defined by the provided split points and random + * chunk versions. + */ +std::vector genChunkVector(const UUID& uuid, + const std::vector& splitPoints, + const ChunkVersion& initialVersion, + size_t numShards); + +/* + * Generate a vector of chunks. + */ +std::vector genChunkVector(const UUID& uuid, + const std::vector& splitPoints, + const std::vector& versions, + size_t numShards); + +/* + * Return a randomly generated vector of chunks that are properly sorted based on their min value + * and cover the full space from [MinKey, MaxKey]. + */ +std::vector genRandomChunkVector(const UUID& uuid, + const OID& epoch, + const Timestamp& timestamp, + size_t maxNumChunks, + size_t minNumChunks = 1); + +std::map calculateShardVersions(const std::vector& chunkVector); + +std::map calculateShardsMaxValidAfter( + const std::vector& chunkVector); + +ChunkVersion calculateCollVersion(const std::map& shardVersions); + +/* + * Return a shardkey that is in between the given range [leftKey, rightKey] + */ +BSONObj calculateIntermediateShardKey(const BSONObj& leftKey, + const BSONObj& rightKey, + double minKeyProb = 0.0); + +/* + * Perform a series of random operations on the given list of chunks. + * + * The operations performed resemble all possible operations that could happen to a routing table in + * a production cluster (move, merge, split, etc...) + * + * @chunks: list of chunks ordered by minKey + * @numOperations: number of operations to perform + */ +void performRandomChunkOperations(std::vector* chunks, size_t numOperations); + +} // namespace mongo::chunks_test_util diff --git a/src/mongo/s/client/config_shard_wrapper.cpp b/src/mongo/s/client/config_shard_wrapper.cpp new file mode 100644 index 0000000000000..fd61fbde8c1c8 --- /dev/null +++ b/src/mongo/s/client/config_shard_wrapper.cpp @@ -0,0 +1,127 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#include "mongo/s/client/config_shard_wrapper.h" + +#include +#include + +#include + +#include "mongo/bson/timestamp.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/vector_clock.h" +#include "mongo/util/assert_util_core.h" + +namespace mongo { + +ConfigShardWrapper::ConfigShardWrapper(std::shared_ptr configShard) + : Shard(configShard->getId()), _configShard(std::move(configShard)) { + invariant(_configShard->isConfig()); +} + +ConnectionString ConfigShardWrapper::getConnString() const { + return _configShard->getConnString(); +} + +std::shared_ptr ConfigShardWrapper::getTargeter() const { + return _configShard->getTargeter(); +}; + +void ConfigShardWrapper::updateReplSetMonitor(const HostAndPort& remoteHost, + const Status& remoteCommandStatus) { + return _configShard->updateReplSetMonitor(remoteHost, remoteCommandStatus); +} + +std::string ConfigShardWrapper::toString() const { + return _configShard->toString(); +} + +bool ConfigShardWrapper::isRetriableError(ErrorCodes::Error code, RetryPolicy options) { + return _configShard->isRetriableError(code, options); +} + +void ConfigShardWrapper::runFireAndForgetCommand(OperationContext* opCtx, + const ReadPreferenceSetting& readPref, + const std::string& dbName, + const BSONObj& cmdObj) { + const auto readPrefWithConfigTime = _attachConfigTimeToMinClusterTime(opCtx, readPref, dbName); + return _configShard->runFireAndForgetCommand(opCtx, readPrefWithConfigTime, dbName, cmdObj); +} + +Status ConfigShardWrapper::runAggregation( + OperationContext* opCtx, + const AggregateCommandRequest& aggRequest, + std::function& batch, + const boost::optional& postBatchResumeToken)> callback) { + return _configShard->runAggregation(opCtx, aggRequest, std::move(callback)); +} + +StatusWith ConfigShardWrapper::_runCommand( + OperationContext* opCtx, + const ReadPreferenceSetting& readPref, + StringData dbName, + Milliseconds maxTimeMSOverrideUnused, + const BSONObj& cmdObj) { + const auto readPrefWithConfigTime = + _attachConfigTimeToMinClusterTime(opCtx, readPref, dbName.toString()); + return _configShard->_runCommand( + opCtx, readPrefWithConfigTime, dbName, maxTimeMSOverrideUnused, cmdObj); +} + +StatusWith ConfigShardWrapper::_runExhaustiveCursorCommand( + OperationContext* opCtx, + const ReadPreferenceSetting& readPref, + StringData dbName, + Milliseconds maxTimeMSOverride, + const BSONObj& cmdObj) { + return _configShard->_runExhaustiveCursorCommand( + opCtx, readPref, dbName, maxTimeMSOverride, cmdObj); +} + +StatusWith ConfigShardWrapper::_exhaustiveFindOnConfig( + OperationContext* opCtx, + const ReadPreferenceSetting& readPref, + const repl::ReadConcernLevel& readConcernLevel, + const NamespaceString& nss, + const BSONObj& query, + const BSONObj& sort, + boost::optional limit, + const boost::optional& hint) { + return _configShard->_exhaustiveFindOnConfig( + opCtx, readPref, readConcernLevel, nss, query, sort, limit, hint); +} + +ReadPreferenceSetting ConfigShardWrapper::_attachConfigTimeToMinClusterTime( + OperationContext* opCtx, const ReadPreferenceSetting& readPref, const StringData& dbName) { + const auto vcTime = VectorClock::get(opCtx)->getTime(); + ReadPreferenceSetting readPrefToReturn{readPref}; + readPrefToReturn.minClusterTime = vcTime.configTime().asTimestamp(); + return readPrefToReturn; +} +} // namespace mongo diff --git a/src/mongo/s/client/config_shard_wrapper.h b/src/mongo/s/client/config_shard_wrapper.h new file mode 100644 index 0000000000000..9e6af5bdfe013 --- /dev/null +++ b/src/mongo/s/client/config_shard_wrapper.h @@ -0,0 +1,128 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/s/client/shard.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" + +namespace mongo { + +/** + * Wraps/decorates a Shard object (representing the config server) to attach extra config server + * specific logic to the member functions in the Shard object. + * + * The ConfigShardWrapper wrapper should be used everytime you are specifically targeting a config + * server (such as when running commands to modify catalog data). This wrapper is automtically + * created when a config shard is retrieved through ShardRegistry::getConfigShard() and + * ShardRegistry::createLocalConfigShard(); + */ +class ConfigShardWrapper : public Shard { + ConfigShardWrapper(const ConfigShardWrapper&) = delete; + ConfigShardWrapper& operator=(const ConfigShardWrapper&) = delete; + +public: + ConfigShardWrapper(std::shared_ptr configShard); + + ~ConfigShardWrapper() = default; + + ConnectionString getConnString() const override; + + std::shared_ptr getTargeter() const override; + + void updateReplSetMonitor(const HostAndPort& remoteHost, + const Status& remoteCommandStatus) override; + + std::string toString() const override; + + bool isRetriableError(ErrorCodes::Error code, RetryPolicy options) final; + + void runFireAndForgetCommand(OperationContext* opCtx, + const ReadPreferenceSetting& readPref, + const std::string& dbName, + const BSONObj& cmdObj) final; + + Status runAggregation( + OperationContext* opCtx, + const AggregateCommandRequest& aggRequest, + std::function& batch, + const boost::optional& postBatchResumeToken)> callback); + +private: + StatusWith _runCommand(OperationContext* opCtx, + const ReadPreferenceSetting& readPref, + StringData dbName, + Milliseconds maxTimeMSOverride, + const BSONObj& cmdObj) final; + + StatusWith _runExhaustiveCursorCommand( + OperationContext* opCtx, + const ReadPreferenceSetting& readPref, + StringData dbName, + Milliseconds maxTimeMSOverride, + const BSONObj& cmdObj) final; + + StatusWith _exhaustiveFindOnConfig( + OperationContext* opCtx, + const ReadPreferenceSetting& readPref, + const repl::ReadConcernLevel& readConcernLevel, + const NamespaceString& nss, + const BSONObj& query, + const BSONObj& sort, + boost::optional limit, + const boost::optional& hint = boost::none) final; + + ReadPreferenceSetting _attachConfigTimeToMinClusterTime(OperationContext* opCtx, + const ReadPreferenceSetting& readPref, + const StringData& dbName); + + const std::shared_ptr _configShard; +}; + +} // namespace mongo diff --git a/src/mongo/s/client/config_shard_wrapper_test.cpp b/src/mongo/s/client/config_shard_wrapper_test.cpp new file mode 100644 index 0000000000000..7266be67ce4f2 --- /dev/null +++ b/src/mongo/s/client/config_shard_wrapper_test.cpp @@ -0,0 +1,188 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include + +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/remote_command_targeter.h" +#include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/vector_clock.h" +#include "mongo/s/client/config_shard_wrapper.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/sharding_router_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" + +namespace mongo { +namespace { + +class MockShard : public Shard { + MockShard(const MockShard&) = delete; + MockShard& operator=(const MockShard&) = delete; + +public: + explicit MockShard(const ShardId& id) : Shard(id) {} + ~MockShard() = default; + + ConnectionString getConnString() const override { + const HostAndPort configHost{"configHost1"}; + ConnectionString configCS{ConnectionString::forReplicaSet("configReplSet", {configHost})}; + return configCS; + } + + std::shared_ptr getTargeter() const override { + return std::make_shared(); + } + + void updateReplSetMonitor(const HostAndPort& remoteHost, + const Status& remoteCommandStatus) override {} + + std::string toString() const override { + return getId().toString(); + } + + bool isRetriableError(ErrorCodes::Error code, RetryPolicy options) final { + return false; + } + + void runFireAndForgetCommand(OperationContext* opCtx, + const ReadPreferenceSetting& readPref, + const std::string& dbName, + const BSONObj& cmdObj) override { + lastReadPref = readPref; + } + Status runAggregation( + OperationContext* opCtx, + const AggregateCommandRequest& aggRequest, + std::function& batch, + const boost::optional& postBatchResumeToken)> callback) { + return Status::OK(); + } + + ReadPreferenceSetting lastReadPref; + +private: + StatusWith _runCommand(OperationContext* opCtx, + const ReadPreferenceSetting& readPref, + StringData dbName, + Milliseconds maxTimeMSOverride, + const BSONObj& cmdObj) final { + lastReadPref = readPref; + return Shard::CommandResponse{boost::none, BSON("ok" << 1), Status::OK(), Status::OK()}; + } + + StatusWith _runExhaustiveCursorCommand( + OperationContext* opCtx, + const ReadPreferenceSetting& readPref, + StringData dbName, + Milliseconds maxTimeMSOverride, + const BSONObj& cmdObj) final { + lastReadPref = readPref; + return Shard::QueryResponse{std::vector{}, repl::OpTime::max()}; + } + + StatusWith _exhaustiveFindOnConfig( + OperationContext* opCtx, + const ReadPreferenceSetting& readPref, + const repl::ReadConcernLevel& readConcernLevel, + const NamespaceString& nss, + const BSONObj& query, + const BSONObj& sort, + boost::optional limit, + const boost::optional& hint = boost::none) final { + lastReadPref = readPref; + return Shard::QueryResponse{std::vector{}, repl::OpTime::max()}; + } +}; + +class ConfigShardWrapperTest : public ShardingTestFixture { +protected: + std::shared_ptr _mockConfigShard; + std::unique_ptr _configShardWrapper; + + void setUp() override { + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; + + ShardingTestFixture::setUp(); + + _mockConfigShard = std::make_shared(ShardId(ShardId::kConfigServerId)); + _configShardWrapper = std::make_unique(_mockConfigShard); + } + + void tearDown() override { + ShardingTestFixture::tearDown(); + } +}; + +TEST_F(ConfigShardWrapperTest, RunCommandAttachesMinClusterTime) { + const auto vcTime = VectorClock::get(operationContext())->getTime(); + auto expectedMinClusterTime = vcTime.configTime(); + expectedMinClusterTime.addTicks(10); + VectorClock::get(operationContext())->advanceConfigTime_forTest(expectedMinClusterTime); + + auto result = _configShardWrapper->runCommand(operationContext(), + ReadPreferenceSetting{}, + DatabaseName::kConfig.db().toString(), + BSONObj{}, + Shard::RetryPolicy::kNoRetry); + + ASSERT_EQ(_mockConfigShard->lastReadPref.minClusterTime, expectedMinClusterTime.asTimestamp()); +} + +TEST_F(ConfigShardWrapperTest, RunFireAndForgetCommandAttachesMinClusterTime) { + const auto vcTime = VectorClock::get(operationContext())->getTime(); + auto expectedMinClusterTime = vcTime.configTime(); + expectedMinClusterTime.addTicks(10); + VectorClock::get(operationContext())->advanceConfigTime_forTest(expectedMinClusterTime); + + _configShardWrapper->runFireAndForgetCommand(operationContext(), + ReadPreferenceSetting{}, + DatabaseName::kConfig.db().toString(), + BSONObj{}); + + ASSERT_EQ(_mockConfigShard->lastReadPref.minClusterTime, expectedMinClusterTime.asTimestamp()); +} + +TEST_F(ConfigShardWrapperTest, GetConfigShardReturnsConfigShardWrapper) { + ASSERT_EQ(typeid(*shardRegistry()->getConfigShard()).name(), typeid(ConfigShardWrapper).name()); +} + +} // namespace +} // namespace mongo diff --git a/src/mongo/s/client/num_hosts_targeted_metrics.cpp b/src/mongo/s/client/num_hosts_targeted_metrics.cpp index 82188f4f30033..1c21352fa2dea 100644 --- a/src/mongo/s/client/num_hosts_targeted_metrics.cpp +++ b/src/mongo/s/client/num_hosts_targeted_metrics.cpp @@ -29,12 +29,14 @@ #include "mongo/s/client/num_hosts_targeted_metrics.h" -#include "mongo/base/init.h" +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/curop.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" -#include "mongo/s/grid.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/s/client/shard.cpp b/src/mongo/s/client/shard.cpp index aa83f7e5b893a..812ff9357dfdc 100644 --- a/src/mongo/s/client/shard.cpp +++ b/src/mongo/s/client/shard.cpp @@ -28,14 +28,22 @@ */ -#include "mongo/platform/basic.h" +#include + +#include +#include #include "mongo/client/remote_command_retry_scheduler.h" #include "mongo/db/operation_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" #include "mongo/s/client/shard.h" -#include "mongo/s/client/shard_registry.h" #include "mongo/s/client/shard_remote_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/s/client/shard.h b/src/mongo/s/client/shard.h index 73a612ff5ba52..065adce680836 100644 --- a/src/mongo/s/client/shard.h +++ b/src/mongo/s/client/shard.h @@ -29,24 +29,42 @@ #pragma once +#include +#include #include - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/client/connection_string.h" #include "mongo/client/read_preference.h" #include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/repl/optime.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/shard_id.h" #include "mongo/executor/remote_command_response.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" namespace mongo { class OperationContext; + class RemoteCommandTargeter; /** @@ -304,6 +322,8 @@ class Shard { * Identifier of the shard as obtained from the configuration data (i.e. shard0000). */ const ShardId _id; + + friend class ConfigShardWrapper; }; } // namespace mongo diff --git a/src/mongo/s/client/shard_factory.cpp b/src/mongo/s/client/shard_factory.cpp index d789aadf03ce5..82a9f94288085 100644 --- a/src/mongo/s/client/shard_factory.cpp +++ b/src/mongo/s/client/shard_factory.cpp @@ -28,15 +28,13 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/client/shard_factory.h" - #include +#include + +#include -#include "mongo/base/status_with.h" #include "mongo/client/connection_string.h" -#include "mongo/client/remote_command_targeter.h" +#include "mongo/s/client/shard_factory.h" #include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/s/client/shard_factory.h b/src/mongo/s/client/shard_factory.h index bbd07b061f9cf..b02a8bdaa0581 100644 --- a/src/mongo/s/client/shard_factory.h +++ b/src/mongo/s/client/shard_factory.h @@ -36,6 +36,7 @@ #include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_factory.h" +#include "mongo/db/shard_id.h" #include "mongo/s/client/shard.h" namespace mongo { diff --git a/src/mongo/s/client/shard_registry.cpp b/src/mongo/s/client/shard_registry.cpp index e68c1985a9aa3..0aff2f8a902f0 100644 --- a/src/mongo/s/client/shard_registry.cpp +++ b/src/mongo/s/client/shard_registry.cpp @@ -29,22 +29,58 @@ #include "mongo/s/client/shard_registry.h" +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/client/replica_set_monitor.h" #include "mongo/db/catalog_shard_feature_flag_gen.h" #include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/server_options.h" #include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_metadata_hook.h" +#include "mongo/executor/network_connection_hook.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/network_interface_thread_pool.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/rpc/metadata/metadata_hook.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/client/config_shard_wrapper.h" #include "mongo/s/grid.h" +#include "mongo/util/cancellation.h" +#include "mongo/util/duration.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/invalidating_lru_cache.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -87,9 +123,6 @@ ShardRegistry::ShardRegistry(ServiceContext* service, if (_initConfigServerCS) { invariant(_initConfigServerCS->isValid()); - } else { - // (Ignore FCV check): This is in mongos so we expect to ignore FCV. - invariant(gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafe()); } _threadPool.startup(); @@ -190,7 +223,7 @@ ShardRegistry::Cache::LookupResult ShardRegistry::_lookup(OperationContext* opCt auto name = shard->getConnString().getSetName(); if (shardId != ShardId::kConfigServerId) { - // Don't remove the catalog shard's RSM because it is used to target the config server. + // Don't remove the config shard's RSM because it is used to target the config server. ReplicaSetMonitor::remove(name); } _removeReplicaSet(name); @@ -279,7 +312,7 @@ std::shared_ptr ShardRegistry::getConfigShard() const { auto configShard = _configShardData.findShard(ShardId::kConfigServerId); // Note this should only throw if the local node has not learned its replica set config yet. uassert(ErrorCodes::NotYetInitialized, "Config shard has not been set up yet", configShard); - return configShard; + return std::make_shared(configShard); } StatusWith> ShardRegistry::getShard(OperationContext* opCtx, @@ -426,7 +459,9 @@ std::unique_ptr ShardRegistry::createConnection(const ConnectionString& c std::shared_ptr ShardRegistry::createLocalConfigShard() const { invariant(serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); - return _shardFactory->createShard(ShardId::kConfigServerId, ConnectionString::forLocal()); + std::shared_ptr configShard = + _shardFactory->createShard(ShardId::kConfigServerId, ConnectionString::forLocal()); + return std::make_shared(configShard); } void ShardRegistry::toBSON(BSONObjBuilder* result) const { @@ -462,6 +497,12 @@ void ShardRegistry::updateReplicaSetOnConfigServer(ServiceContext* serviceContex const ConnectionString& connStr) noexcept { ThreadClient tc("UpdateReplicaSetOnConfigServer", serviceContext); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + auto opCtx = tc->makeOperationContext(); auto const grid = Grid::get(opCtx.get()); auto sr = grid->shardRegistry(); diff --git a/src/mongo/s/client/shard_registry.h b/src/mongo/s/client/shard_registry.h index 13daa987df857..87acedb7cb550 100644 --- a/src/mongo/s/client/shard_registry.h +++ b/src/mongo/s/client/shard_registry.h @@ -29,20 +29,39 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include #include +#include #include +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/connection_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/s/client/shard.h" #include "mongo/s/client/shard_factory.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/future.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/read_through_cache.h" namespace mongo { @@ -156,10 +175,30 @@ class ShardRegistryData { }; /** - * Maintains the set of all shards known to the instance and their connections and exposes - * functionality to run commands against shards. All commands which this registry executes are - * retried on NotPrimary class of errors and in addition all read commands are retried on network - * errors automatically as well. + * Each node (router, shard server, config server, primaries and secondaries) has one instance of + * this object. It is an in-memory cache mirroring the `config.shards` collection on the config + * server, whose causal consistency is driven by the `topologyTime` component of the vector clock. + * The collection (and thus the cache) contains an entry for each shard in the cluster. Each entry + * contains the connection string for that shard. + * + * Retrieving a shard from the registry returns a `Shard` object. Using that object, one can access + * more information about a shard and run commands against that shard. A `Shard` object can be + * retrieved from the registry by using any of: + * - The shard's name + * - The replica set's name + * - The HostAndPort object + * - The connection string + * + * REFRESHES: The shard registry refreshes itself in these scenarios: + * - Upon the node's start-up + * - Upon completion of a background job that runs every thirty seconds + * - Upon an attempt to retrieve a shard that doesn’t have a matching entry in the cache + * - Upon calling the ShardRegistry’s reload function (ShardRegistry::reload()) + * - After an operation has gossipped-in a higher `topologyTime` + * + * The shard registry makes updates to the `config.shards` collection in one case. If the shard + * registry discovers an updated connection string for another shard via a replica set topology + * change, it will persist that update to `config.shards`. */ class ShardRegistry { ShardRegistry(const ShardRegistry&) = delete; diff --git a/src/mongo/s/client/shard_remote.cpp b/src/mongo/s/client/shard_remote.cpp index 8f7e7443e238c..5bfc00d5690d9 100644 --- a/src/mongo/s/client/shard_remote.cpp +++ b/src/mongo/s/client/shard_remote.cpp @@ -31,27 +31,53 @@ #include "mongo/s/client/shard_remote.h" #include +#include +#include +#include +#include #include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/dbclient_base.h" #include "mongo/client/fetcher.h" #include "mongo/client/read_preference.h" -#include "mongo/client/remote_command_retry_scheduler.h" #include "mongo/client/remote_command_targeter.h" -#include "mongo/client/replica_set_monitor.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/query_request_helper.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/server_options.h" #include "mongo/db/vector_clock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/rpc/metadata/tracking_metadata.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/client/shard_remote_gen.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/str.h" -#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -503,20 +529,7 @@ StatusWith ShardRemote::_scheduleCommand( const BSONObj& cmdObj, const TaskExecutor::RemoteCommandCallbackFn& cb) { - const auto readPrefWithConfigTime = [&]() -> ReadPreferenceSetting { - // TODO SERVER-74281: Append this higher up when we know we're targeting the config to read - // metadata or use a better filter to avoid matching logical sessions collection. - if (isConfig() && (dbName == DatabaseName::kConfig || dbName == DatabaseName::kAdmin)) { - const auto vcTime = VectorClock::get(opCtx)->getTime(); - ReadPreferenceSetting readPrefToReturn{readPref}; - readPrefToReturn.minClusterTime = vcTime.configTime().asTimestamp(); - return readPrefToReturn; - } else { - return {readPref}; - } - }(); - - const auto swHost = _targeter->findHost(opCtx, readPrefWithConfigTime); + const auto swHost = _targeter->findHost(opCtx, readPref); if (!swHost.isOK()) { return swHost.getStatus(); } @@ -531,7 +544,7 @@ StatusWith ShardRemote::_scheduleCommand( asyncHandle.hostTargetted, dbName.toString(), appendMaxTimeToCmdObj(requestTimeout, cmdObj), - _appendMetadataForCommand(opCtx, readPrefWithConfigTime), + _appendMetadataForCommand(opCtx, readPref), opCtx, requestTimeout < Milliseconds::max() ? requestTimeout : RemoteCommandRequest::kNoTimeout); diff --git a/src/mongo/s/client/shard_remote.h b/src/mongo/s/client/shard_remote.h index 7262b05ce5fab..b75684e0188e9 100644 --- a/src/mongo/s/client/shard_remote.h +++ b/src/mongo/s/client/shard_remote.h @@ -29,12 +29,30 @@ #pragma once +#include +#include +#include +#include #include - -#include "mongo/s/client/shard.h" - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/shard_id.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" +#include "mongo/s/client/shard.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/s/client/shard_remote_test.cpp b/src/mongo/s/client/shard_remote_test.cpp index 0b4d8f1116b81..f7827c1698bb0 100644 --- a/src/mongo/s/client/shard_remote_test.cpp +++ b/src/mongo/s/client/shard_remote_test.cpp @@ -27,16 +27,28 @@ * it in the license file. */ +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/connection_string.h" -#include "mongo/db/logical_time.h" -#include "mongo/db/query/cursor_response.h" -#include "mongo/db/vector_clock.h" +#include "mongo/client/remote_command_targeter_factory_mock.h" +#include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/s/client/shard_factory.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/client/shard_remote.h" -#include "mongo/s/query/establish_cursors.h" #include "mongo/s/sharding_router_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/s/client/sharding_connection_hook.cpp b/src/mongo/s/client/sharding_connection_hook.cpp index be6498ccc6f71..a79089a371989 100644 --- a/src/mongo/s/client/sharding_connection_hook.cpp +++ b/src/mongo/s/client/sharding_connection_hook.cpp @@ -28,17 +28,27 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/client/sharding_connection_hook.h" - #include - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/client/authenticate.h" -#include "mongo/db/client.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/internal_auth.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/client/sharding_connection_hook.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -75,15 +85,14 @@ void ShardingConnectionHook::onCreate(DBClientBase* conn) { if (conn->type() == ConnectionString::ConnectionType::kStandalone) { - BSONObj isMasterResponse; - if (!conn->runCommand( - DatabaseName(boost::none, "admin"), BSON("ismaster" << 1), isMasterResponse)) { - uassertStatusOK(getStatusFromCommandResult(isMasterResponse)); + BSONObj helloResponse; + if (!conn->runCommand(DatabaseName::kAdmin, BSON("hello" << 1), helloResponse)) { + uassertStatusOK(getStatusFromCommandResult(helloResponse)); } long long configServerModeNumber; Status status = - bsonExtractIntegerField(isMasterResponse, "configsvr", &configServerModeNumber); + bsonExtractIntegerField(helloResponse, "configsvr", &configServerModeNumber); if (status == ErrorCodes::NoSuchKey) { // This isn't a config server we're talking to. diff --git a/src/mongo/s/client/sharding_connection_hook.h b/src/mongo/s/client/sharding_connection_hook.h index 7ef8cd3925f24..2a49e15025af9 100644 --- a/src/mongo/s/client/sharding_connection_hook.h +++ b/src/mongo/s/client/sharding_connection_hook.h @@ -29,7 +29,10 @@ #pragma once +#include + #include "mongo/client/connpool.h" +#include "mongo/client/dbclient_base.h" #include "mongo/rpc/metadata.h" #include "mongo/rpc/metadata/metadata_hook.h" diff --git a/src/mongo/s/client/sharding_network_connection_hook.cpp b/src/mongo/s/client/sharding_network_connection_hook.cpp index fda37ac9f5ba8..3bdf19d03a6f0 100644 --- a/src/mongo/s/client/sharding_network_connection_hook.cpp +++ b/src/mongo/s/client/sharding_network_connection_hook.cpp @@ -29,17 +29,23 @@ #include "mongo/s/client/sharding_network_connection_hook.h" +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/catalog_shard_feature_flag_gen.h" -#include "mongo/db/server_options.h" -#include "mongo/db/wire_version.h" +#include "mongo/db/service_context.h" #include "mongo/executor/remote_command_request.h" #include "mongo/executor/remote_command_response.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/str.h" @@ -48,12 +54,12 @@ namespace mongo { Status ShardingNetworkConnectionHook::validateHost( const HostAndPort& remoteHost, const BSONObj&, - const executor::RemoteCommandResponse& isMasterReply) { - return validateHostImpl(remoteHost, isMasterReply); + const executor::RemoteCommandResponse& helloReply) { + return validateHostImpl(remoteHost, helloReply); } Status ShardingNetworkConnectionHook::validateHostImpl( - const HostAndPort& remoteHost, const executor::RemoteCommandResponse& isMasterReply) { + const HostAndPort& remoteHost, const executor::RemoteCommandResponse& helloReply) { auto shard = Grid::get(getGlobalServiceContext())->shardRegistry()->getShardForHostNoReload(remoteHost); if (!shard) { @@ -62,11 +68,11 @@ Status ShardingNetworkConnectionHook::validateHostImpl( } long long configServerModeNumber; - auto status = bsonExtractIntegerField(isMasterReply.data, "configsvr", &configServerModeNumber); + auto status = bsonExtractIntegerField(helloReply.data, "configsvr", &configServerModeNumber); switch (status.code()) { case ErrorCodes::OK: { - // The ismaster response indicates remoteHost is a config server. + // The hello response indicates remoteHost is a config server. if (!shard->isConfig()) { return {ErrorCodes::InvalidOptions, str::stream() << "Surprised to discover that " << remoteHost.toString() @@ -75,7 +81,7 @@ Status ShardingNetworkConnectionHook::validateHostImpl( return Status::OK(); } case ErrorCodes::NoSuchKey: { - // The ismaster response indicates that remoteHost is not a config server, or that + // The hello response indicates that remoteHost is not a config server, or that // the config server is running a version prior to the 3.1 development series. if (!shard->isConfig()) { return Status::OK(); @@ -86,7 +92,7 @@ Status ShardingNetworkConnectionHook::validateHostImpl( << " does not believe it is a config server"}; } default: - // The ismaster response was malformed. + // The hello response was malformed. return status; } } diff --git a/src/mongo/s/client/sharding_network_connection_hook.h b/src/mongo/s/client/sharding_network_connection_hook.h index c77428e56d906..d39e342ffba90 100644 --- a/src/mongo/s/client/sharding_network_connection_hook.h +++ b/src/mongo/s/client/sharding_network_connection_hook.h @@ -29,7 +29,14 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/util/net/hostandport.h" namespace mongo { @@ -45,18 +52,18 @@ class ShardingNetworkConnectionHook final : public executor::NetworkConnectionHo /** * Checks that the given host is valid to be used in this sharded cluster, based on its - * isMaster response. + * "hello" response. */ Status validateHost(const HostAndPort& remoteHost, const BSONObj& request, - const executor::RemoteCommandResponse& isMasterReply) override; + const executor::RemoteCommandResponse& helloReply) override; /** * Implementation of validateHost can be called without a ShardingNetworkConnectionHook * instance. */ static Status validateHostImpl(const HostAndPort& remoteHost, - const executor::RemoteCommandResponse& isMasterReply); + const executor::RemoteCommandResponse& helloReply); /** * Makes a SetShardVersion request for initializing sharding information on the new connection. diff --git a/src/mongo/s/cluster_commands_helpers.cpp b/src/mongo/s/cluster_commands_helpers.cpp index 345c4eae3f7ba..02633dcb1a2aa 100644 --- a/src/mongo/s/cluster_commands_helpers.cpp +++ b/src/mongo/s/cluster_commands_helpers.cpp @@ -29,36 +29,65 @@ #include "mongo/s/cluster_commands_helpers.h" -#include - -#include "mongo/bson/util/bson_extract.h" +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/client/connection_string.h" #include "mongo/db/catalog/collection_uuid_mismatch_info.h" #include "mongo/db/commands.h" #include "mongo/db/curop.h" #include "mongo/db/error_labels.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/process_interface/mongo_process_interface.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/read_concern_support_result.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/write_concern_error_detail.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/client/shard_registry.h" -#include "mongo/s/collection_routing_info_targeter.h" #include "mongo/s/grid.h" #include "mongo/s/multi_statement_transaction_requests_sender.h" #include "mongo/s/query_analysis_sampler_util.h" -#include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/shard_key_pattern_query_util.h" -#include "mongo/s/stale_exception.h" #include "mongo/s/transaction_router.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand +using mongo::repl::ReadConcernArgs; +using mongo::repl::ReadConcernLevel; + namespace mongo { void appendWriteConcernErrorDetailToCmdResponse(const ShardId& shardId, @@ -132,7 +161,7 @@ namespace { * sample id for it. */ std::vector buildVersionedRequestsForTargetedShards( - OperationContext* opCtx, + boost::intrusive_ptr expCtx, const NamespaceString& nss, const CollectionRoutingInfo& cri, const std::set& shardsToSkip, @@ -140,6 +169,7 @@ std::vector buildVersionedRequestsForTargetedShard const BSONObj& query, const BSONObj& collation, bool eligibleForSampling = false) { + auto opCtx = expCtx->opCtx; const auto& cm = cri.cm; @@ -180,7 +210,6 @@ std::vector buildVersionedRequestsForTargetedShard CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation)); } - auto expCtx = make_intrusive(opCtx, std::move(collator), nss); getShardIdsForQuery(expCtx, query, collation, cm, &shardIds, nullptr /* info */); const auto targetedSampleId = eligibleForSampling @@ -214,7 +243,7 @@ std::vector gatherResponsesImpl( MultiStatementTransactionRequestsSender ars( opCtx, Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(), - dbName, + DatabaseNameUtil::deserialize(boost::none, dbName), requests, readPref, retryPolicy); @@ -422,11 +451,37 @@ std::vector scatterGatherVersionedTargetByRouting Shard::RetryPolicy retryPolicy, const BSONObj& query, const BSONObj& collation, + const boost::optional& letParameters, + const boost::optional& runtimeConstants, bool eligibleForSampling) { - const auto requests = buildVersionedRequestsForTargetedShards( - opCtx, nss, cri, {} /* shardsToSkip */, cmdObj, query, collation, eligibleForSampling); + auto expCtx = makeExpressionContextWithDefaultsForTargeter( + opCtx, nss, collation, boost::none /*explainVerbosity*/, letParameters, runtimeConstants); + return scatterGatherVersionedTargetByRoutingTable(expCtx, + dbName, + nss, + cri, + cmdObj, + readPref, + retryPolicy, + query, + collation, + eligibleForSampling); +} - return gatherResponses(opCtx, dbName, readPref, retryPolicy, requests); +[[nodiscard]] std::vector scatterGatherVersionedTargetByRoutingTable( + boost::intrusive_ptr expCtx, + StringData dbName, + const NamespaceString& nss, + const CollectionRoutingInfo& cri, + const BSONObj& cmdObj, + const ReadPreferenceSetting& readPref, + Shard::RetryPolicy retryPolicy, + const BSONObj& query, + const BSONObj& collation, + bool eligibleForSampling) { + const auto requests = buildVersionedRequestsForTargetedShards( + expCtx, nss, cri, {} /* shardsToSkip */, cmdObj, query, collation, eligibleForSampling); + return gatherResponses(expCtx->opCtx, dbName, readPref, retryPolicy, requests); } std::vector @@ -440,9 +495,13 @@ scatterGatherVersionedTargetByRoutingTableNoThrowOnStaleShardVersionErrors( const ReadPreferenceSetting& readPref, Shard::RetryPolicy retryPolicy, const BSONObj& query, - const BSONObj& collation) { + const BSONObj& collation, + const boost::optional& letParameters, + const boost::optional& runtimeConstants) { + auto expCtx = makeExpressionContextWithDefaultsForTargeter( + opCtx, nss, collation, boost::none /*explainVerbosity*/, letParameters, runtimeConstants); const auto requests = buildVersionedRequestsForTargetedShards( - opCtx, nss, cri, shardsToSkip, cmdObj, query, collation); + expCtx, nss, cri, shardsToSkip, cmdObj, query, collation); return gatherResponsesNoThrowOnStaleShardVersionErrors( opCtx, dbName, readPref, retryPolicy, requests); @@ -477,6 +536,12 @@ AsyncRequestsSender::Response executeCommandAgainstShardWithMinKeyChunk( const BSONObj& cmdObj, const ReadPreferenceSetting& readPref, Shard::RetryPolicy retryPolicy) { + auto expCtx = makeExpressionContextWithDefaultsForTargeter(opCtx, + nss, + BSONObj() /*collation*/, + boost::none /*explainVerbosity*/, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); const auto query = cri.cm.isSharded() ? cri.cm.getShardKeyPattern().getKeyPattern().globalMin() : BSONObj(); @@ -487,10 +552,11 @@ AsyncRequestsSender::Response executeCommandAgainstShardWithMinKeyChunk( readPref, retryPolicy, buildVersionedRequestsForTargetedShards( - opCtx, nss, cri, {} /* shardsToSkip */, cmdObj, query, BSONObj() /* collation */)); + expCtx, nss, cri, {} /* shardsToSkip */, cmdObj, query, BSONObj() /* collation */)); return std::move(responses.front()); } +// TODO SERVER-74478: Rewrite function to process AsyncRPC responses RawResponsesResult appendRawResponses( OperationContext* opCtx, std::string* errmsg, @@ -670,10 +736,14 @@ std::vector> getVersionedRequestsForTargetedShards( const CollectionRoutingInfo& cri, const BSONObj& cmdObj, const BSONObj& query, - const BSONObj& collation) { + const BSONObj& collation, + const boost::optional& letParameters, + const boost::optional& runtimeConstants) { + auto expCtx = makeExpressionContextWithDefaultsForTargeter( + opCtx, nss, collation, boost::none /*explainVerbosity*/, letParameters, runtimeConstants); std::vector> requests; auto ars_requests = buildVersionedRequestsForTargetedShards( - opCtx, nss, cri, {} /* shardsToSkip */, cmdObj, query, collation); + expCtx, nss, cri, {} /* shardsToSkip */, cmdObj, query, collation); std::transform(std::make_move_iterator(ars_requests.begin()), std::make_move_iterator(ars_requests.end()), std::back_inserter(requests), @@ -697,13 +767,14 @@ StatusWith getCollectionRoutingInfoForTxnCmd(OperationCon // Return the latest routing table if not running in a transaction with snapshot level read // concern. - auto txnRouter = TransactionRouter::get(opCtx); - if (!txnRouter || !txnRouter.mustUseAtClusterTime()) { - return catalogCache->getCollectionRoutingInfo(opCtx, nss); + if (auto txnRouter = TransactionRouter::get(opCtx)) { + if (auto atClusterTime = txnRouter.getSelectedAtClusterTime()) { + return catalogCache->getCollectionRoutingInfoAt( + opCtx, nss, atClusterTime->asTimestamp()); + } } - auto atClusterTime = txnRouter.getSelectedAtClusterTime(); - return catalogCache->getCollectionRoutingInfoAt(opCtx, nss, atClusterTime.asTimestamp()); + return catalogCache->getCollectionRoutingInfo(opCtx, nss); } StatusWith loadIndexesFromAuthoritativeShard(OperationContext* opCtx, @@ -715,6 +786,13 @@ StatusWith loadIndexesFromAuthoritativeShard(OperationCont const auto& [cm, sii] = cri; auto cmdNoVersion = applyReadWriteConcern( opCtx, true /* appendRC */, false /* appendWC */, BSON("listIndexes" << nss.coll())); + + // force the read concern level to "local" as other values are not supported for listIndexes + BSONObjBuilder bob(cmdNoVersion.removeField(ReadConcernArgs::kReadConcernFieldName)); + bob.append(ReadConcernArgs::kReadConcernFieldName, + BSON(ReadConcernArgs::kLevelFieldName << repl::readConcernLevels::kLocalName)); + cmdNoVersion = bob.obj(); + if (cm.isSharded()) { // For a sharded collection we must load indexes from a shard with chunks. For // consistency with cluster listIndexes, load from the shard that owns the minKey chunk. diff --git a/src/mongo/s/cluster_commands_helpers.h b/src/mongo/s/cluster_commands_helpers.h index 549e91b529ca8..de0359011d78b 100644 --- a/src/mongo/s/cluster_commands_helpers.h +++ b/src/mongo/s/cluster_commands_helpers.h @@ -29,18 +29,37 @@ #pragma once +#include +#include +#include #include +#include #include +#include #include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/db/commands.h" #include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/shard_id.h" #include "mongo/rpc/write_concern_error_detail.h" #include "mongo/s/async_requests_sender.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" namespace mongo { @@ -183,8 +202,24 @@ std::vector scatterGatherUnversionedTargetAllShar Shard::RetryPolicy retryPolicy, const BSONObj& query, const BSONObj& collation, + const boost::optional& letParameters, + const boost::optional& runtimeConstants, + bool eligibleForSampling = false); +/** + * This overload is for callers which already have a fully initialized 'ExpressionContext' (e.g. + * callers from the aggregation framework). Most callers should prefer the overload above. + */ +[[nodiscard]] std::vector scatterGatherVersionedTargetByRoutingTable( + boost::intrusive_ptr expCtx, + StringData dbName, + const NamespaceString& nss, + const CollectionRoutingInfo& cri, + const BSONObj& cmdObj, + const ReadPreferenceSetting& readPref, + Shard::RetryPolicy retryPolicy, + const BSONObj& query, + const BSONObj& collation, bool eligibleForSampling = false); - /** * Utility for dispatching versioned commands on a namespace, deciding which shards to @@ -205,7 +240,9 @@ scatterGatherVersionedTargetByRoutingTableNoThrowOnStaleShardVersionErrors( const ReadPreferenceSetting& readPref, Shard::RetryPolicy retryPolicy, const BSONObj& query, - const BSONObj& collation); + const BSONObj& collation, + const boost::optional& letParameters, + const boost::optional& runtimeConstants); /** * Utility for dispatching commands against the primary of a database and attaching the appropriate @@ -297,7 +334,9 @@ std::vector> getVersionedRequestsForTargetedShards( const CollectionRoutingInfo& cri, const BSONObj& cmdObj, const BSONObj& query, - const BSONObj& collation); + const BSONObj& collation, + const boost::optional& letParameters, + const boost::optional& runtimeConstants); /** * If the command is running in a transaction, returns the proper routing table to use for targeting diff --git a/src/mongo/s/cluster_cursor_stats.cpp b/src/mongo/s/cluster_cursor_stats.cpp index 46ae32f88cda8..15fdacc79d5c4 100644 --- a/src/mongo/s/cluster_cursor_stats.cpp +++ b/src/mongo/s/cluster_cursor_stats.cpp @@ -27,7 +27,12 @@ * it in the license file. */ +#include +#include + +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/service_context.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_cursor_manager.h" @@ -52,6 +57,7 @@ class ClusterCursorStats final : public ServerStatusMetric { auto stats = grid->getCursorManager()->stats(); openBob.append("multiTarget", static_cast(stats.cursorsMultiTarget)); openBob.append("singleTarget", static_cast(stats.cursorsSingleTarget)); + openBob.append("queuedData", static_cast(stats.cursorsQueuedData)); openBob.append("pinned", static_cast(stats.cursorsPinned)); openBob.append( "total", diff --git a/src/mongo/s/cluster_ddl.cpp b/src/mongo/s/cluster_ddl.cpp index 551e2bcaec008..4608f9a1d4a74 100644 --- a/src/mongo/s/cluster_ddl.cpp +++ b/src/mongo/s/cluster_ddl.cpp @@ -28,12 +28,37 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/cluster_ddl.h" - +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/cluster_ddl.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/read_through_cache.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/s/cluster_ddl.h b/src/mongo/s/cluster_ddl.h index 6f15aa4e73f85..4c135570edc57 100644 --- a/src/mongo/s/cluster_ddl.h +++ b/src/mongo/s/cluster_ddl.h @@ -29,6 +29,12 @@ #pragma once +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/shard_id.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" diff --git a/src/mongo/s/cluster_identity_loader.cpp b/src/mongo/s/cluster_identity_loader.cpp index 25c6a665dce65..e421e5f81594c 100644 --- a/src/mongo/s/cluster_identity_loader.cpp +++ b/src/mongo/s/cluster_identity_loader.cpp @@ -29,10 +29,20 @@ #include "mongo/s/cluster_identity_loader.h" +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include + #include "mongo/base/status_with.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" #include "mongo/s/catalog/type_config_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -52,6 +62,11 @@ ClusterIdentityLoader* ClusterIdentityLoader::get(OperationContext* operationCon } OID ClusterIdentityLoader::getClusterId() { + // TODO SERVER-78051: Re-evaluate use of ClusterIdentityLoader for shards. + tassert(7800000, + "Unexpectedly tried to get cluster id on a non config server node", + serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)); + stdx::unique_lock lk(_mutex); invariant(_initializationState == InitializationState::kInitialized && _lastLoadResult.isOK()); return _lastLoadResult.getValue(); diff --git a/src/mongo/s/cluster_identity_loader.h b/src/mongo/s/cluster_identity_loader.h index 43e51b9dc1739..4f0b7b285d0bb 100644 --- a/src/mongo/s/cluster_identity_loader.h +++ b/src/mongo/s/cluster_identity_loader.h @@ -29,10 +29,17 @@ #pragma once +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/bson/oid.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/service_context.h" #include "mongo/platform/mutex.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/stdx/condition_variable.h" diff --git a/src/mongo/s/cluster_identity_loader_test.cpp b/src/mongo/s/cluster_identity_loader_test.cpp index 1a06260d13492..6d7ff26481f7e 100644 --- a/src/mongo/s/cluster_identity_loader_test.cpp +++ b/src/mongo/s/cluster_identity_loader_test.cpp @@ -28,23 +28,37 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/commands.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/query_request_helper.h" -#include "mongo/db/service_context.h" +#include "mongo/db/server_options.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor.h" #include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/rpc/metadata/tracking_metadata.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/type_config_version.h" -#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_identity_loader.h" #include "mongo/s/sharding_router_test_fixture.h" -#include "mongo/stdx/future.h" -#include "mongo/util/str.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/net/hostandport.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -67,6 +81,9 @@ BSONObj getReplSecondaryOkMetadata() { class ClusterIdentityTest : public ShardingTestFixture { public: void setUp() { + // TODO SERVER-78051: Remove once shards can access the loaded cluster id. + serverGlobalParams.clusterRole = {ClusterRole::ShardServer, ClusterRole::ConfigServer}; + ShardingTestFixture::setUp(); configTargeter()->setFindHostReturnValue(configHost); } @@ -80,7 +97,7 @@ class ClusterIdentityTest : public ShardingTestFixture { auto opMsg = OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj); auto query = query_request_helper::makeFromFindCommandForTests(opMsg.body); - ASSERT_EQ(query->getNamespaceOrUUID().nss()->ns(), "config.version"); + ASSERT_EQ(query->getNamespaceOrUUID().nss(), NamespaceString::kConfigVersionNamespace); ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj()); ASSERT_FALSE(query->getLimit().has_value()); diff --git a/src/mongo/s/cluster_write.cpp b/src/mongo/s/cluster_write.cpp index 4fba9bc05934e..f6b3c85115882 100644 --- a/src/mongo/s/cluster_write.cpp +++ b/src/mongo/s/cluster_write.cpp @@ -28,16 +28,20 @@ */ -#include "mongo/logv2/log.h" - -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/s/cluster_write.h" +#include #include "mongo/db/fle_crud.h" #include "mongo/db/not_primary_error_tracker.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/cluster_write.h" #include "mongo/s/collection_routing_info_targeter.h" -#include "mongo/s/grid.h" +#include "mongo/s/ns_targeter.h" +#include "mongo/s/write_ops/bulk_write_exec.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -47,6 +51,7 @@ namespace cluster { void write(OperationContext* opCtx, const BatchedCommandRequest& request, + NamespaceString* nss, BatchWriteExecStats* stats, BatchedCommandResponse* response, boost::optional targetEpoch) { @@ -63,6 +68,9 @@ void write(OperationContext* opCtx, &NotPrimaryErrorTracker::get(opCtx->getClient())); CollectionRoutingInfoTargeter targeter(opCtx, request.getNS(), targetEpoch); + if (nss) { + *nss = targeter.getNS(); + } LOGV2_DEBUG_OPTIONS( 4817400, 2, {logv2::LogComponent::kShardMigrationPerf}, "Starting batch write"); diff --git a/src/mongo/s/cluster_write.h b/src/mongo/s/cluster_write.h index 2bc816aad7fe8..aeea7a3537944 100644 --- a/src/mongo/s/cluster_write.h +++ b/src/mongo/s/cluster_write.h @@ -29,7 +29,18 @@ #pragma once +#include +#include +#include + +#include "mongo/bson/oid.h" +#include "mongo/db/commands/bulk_write_gen.h" +#include "mongo/db/commands/bulk_write_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/s/write_ops/batch_write_exec.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/s/write_ops/batched_command_response.h" #include "mongo/s/write_ops/bulk_write_exec.h" namespace mongo { @@ -41,10 +52,16 @@ namespace cluster { */ void write(OperationContext* opCtx, const BatchedCommandRequest& request, + NamespaceString* nss, BatchWriteExecStats* stats, BatchedCommandResponse* response, boost::optional targetEpoch = boost::none); +/** + * Execute a bulkWrite request as a router. + * + * Note: Caller is responsible for passing in a valid BulkWriteCommandRequest. + */ std::vector bulkWrite(OperationContext* opCtx, const BulkWriteCommandRequest& request); diff --git a/src/mongo/s/collection_routing_info_targeter.cpp b/src/mongo/s/collection_routing_info_targeter.cpp index 6d4befd609e29..aae8262d7490c 100644 --- a/src/mongo/s/collection_routing_info_targeter.cpp +++ b/src/mongo/s/collection_routing_info_targeter.cpp @@ -29,30 +29,59 @@ #include "mongo/s/collection_routing_info_targeter.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/db/commands/server_status_metric.h" -#include "mongo/db/curop.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_noop.h" -#include "mongo/db/pipeline/pipeline.h" -#include "mongo/db/pipeline/process_interface/mongos_process_interface.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/collation/collation_index_key.h" -#include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/query/query_planner.h" -#include "mongo/db/query/query_planner_common.h" +#include "mongo/db/query/collation/collation_spec.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/server_options.h" #include "mongo/db/stats/counters.h" #include "mongo/db/storage/storage_parameters_gen.h" #include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/db/timeseries/timeseries_options.h" #include "mongo/db/timeseries/timeseries_update_delete_util.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/chunk.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/cluster_ddl.h" #include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/index_version.h" #include "mongo/s/shard_key_pattern_query_util.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/util/assert_util.h" #include "mongo/util/intrusive_counter.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -82,8 +111,8 @@ CounterMetric updateOneOpStyleBroadcastWithExactIDCount( * or * coll.update({x: 1}, [{$addFields: {y: 2}}]) */ -void validateUpdateDoc(const write_ops::UpdateOpEntry& updateDoc) { - const auto& updateMod = updateDoc.getU(); +void validateUpdateDoc(const UpdateRef updateRef) { + const auto& updateMod = updateRef.getUpdateMods(); if (updateMod.type() == write_ops::UpdateModification::Type::kPipeline) { return; } @@ -108,7 +137,7 @@ void validateUpdateDoc(const write_ops::UpdateOpEntry& updateDoc) { uassert(ErrorCodes::InvalidOptions, "Replacement-style updates cannot be {multi:true}", - updateType == UpdateType::kModifier || !updateDoc.getMulti()); + updateType == UpdateType::kModifier || !updateRef.getMulti()); } /** @@ -385,14 +414,18 @@ std::vector CollectionRoutingInfoTargeter::targetUpdate( // to have a missing '_id', and if the '_id' exists in the query, it will be emplaced in the // replacement document for targeting purposes. - const auto& updateOp = itemRef.getUpdate(); + const auto& updateOp = itemRef.getUpdateRef(); + const bool isMulti = updateOp.getMulti(); - if (updateOp.getMulti()) { + if (isMulti) { updateManyCount.increment(1); } // If the collection is not sharded, forward the update to the primary shard. if (!_cri.cm.isSharded()) { + if (!isMulti) { + updateOneUnshardedCount.increment(1); + } // TODO (SERVER-51070): Remove the boost::none when the config server can support // shardVersion in commands return std::vector{ShardEndpoint( @@ -412,56 +445,66 @@ std::vector CollectionRoutingInfoTargeter::targetUpdate( itemRef.getLegacyRuntimeConstants()); const bool isUpsert = updateOp.getUpsert(); - auto query = updateOp.getQ(); + auto query = updateOp.getFilter(); if (_isRequestOnTimeseriesViewNamespace) { - uassert(ErrorCodes::NotImplemented, - str::stream() << "Updates are disallowed on sharded timeseries collections.", - feature_flags::gFeatureFlagShardedTimeSeriesUpdateDelete.isEnabled( - serverGlobalParams.featureCompatibility)); uassert(ErrorCodes::InvalidOptions, str::stream() << "A {multi:false} update on a sharded timeseries collection is disallowed.", - updateOp.getMulti()); + feature_flags::gTimeseriesUpdatesSupport.isEnabled( + serverGlobalParams.featureCompatibility) || + isMulti); uassert(ErrorCodes::InvalidOptions, str::stream() << "An {upsert:true} update on a sharded timeseries collection is disallowed.", - !isUpsert); + feature_flags::gTimeseriesUpdatesSupport.isEnabled( + serverGlobalParams.featureCompatibility) || + !isUpsert); - // Since this is a timeseries query, we may need to rename the metaField. - if (auto metaField = _cri.cm.getTimeseriesFields().value().getMetaField()) { - query = timeseries::translateQuery(query, *metaField); - } else { - // We want to avoid targeting the query incorrectly if no metaField is defined on the - // timeseries collection, since we only allow queries on the metaField for timeseries - // updates. Note: any non-empty query should fail to update once it reaches the shards - // because there is no metaField for it to query for, but we don't want to validate this - // during routing. - query = BSONObj(); - } + // Translate the update query on a timeseries collection into the bucket-level predicate + // so that we can target the request to the correct shard or broadcast the request if + // the bucket-level predicate is empty. + // + // Note: The query returned would match a super set of the documents matched by the + // original query. + query = timeseries::getBucketLevelPredicateForRouting( + query, + expCtx, + _cri.cm.getTimeseriesFields()->getTimeseriesOptions(), + feature_flags::gTimeseriesUpdatesSupport.isEnabled( + serverGlobalParams.featureCompatibility)); } validateUpdateDoc(updateOp); const auto updateExpr = - getUpdateExprForTargeting(expCtx, shardKeyPattern, query, updateOp.getU()); + getUpdateExprForTargeting(expCtx, shardKeyPattern, query, updateOp.getUpdateMods()); // Utility function to target an update by shard key, and to handle any potential error results. - auto targetByShardKey = [this, &collation, &chunkRanges](StatusWith swShardKey, - std::string msg) { + auto targetByShardKey = [this, &collation, &chunkRanges, isUpsert, isMulti]( + StatusWith swShardKey, std::string msg) { const auto& shardKey = uassertStatusOKWithContext(std::move(swShardKey), msg); - uassert(ErrorCodes::ShardKeyNotFound, - str::stream() << msg << " :: could not extract exact shard key", - !shardKey.isEmpty()); - return std::vector{ - uassertStatusOKWithContext(_targetShardKey(shardKey, collation, chunkRanges), msg)}; + if (shardKey.isEmpty()) { + if (isUpsert && !isMulti) { // Single upsert + updateOneNonTargetedShardedCount.increment(1); + } + uasserted(ErrorCodes::ShardKeyNotFound, + str::stream() << msg << " :: could not extract exact shard key"); + } else { + if (isUpsert && !isMulti) { // Single upsert + updateOneTargetedShardedCount.increment(1); + } + return std::vector{ + uassertStatusOKWithContext(_targetShardKey(shardKey, collation, chunkRanges), msg)}; + } }; // With the introduction of PM-1632, we can use the two phase write protocol to successfully // target an upsert without the full shard key. Else, the the query must contain an exact match // on the shard key. If we were to target based on the replacement doc, it could result in an // insertion even if a document matching the query exists on another shard. - if (!feature_flags::gFeatureFlagUpdateOneWithoutShardKey.isEnabled( - serverGlobalParams.featureCompatibility) && + if ((!feature_flags::gFeatureFlagUpdateOneWithoutShardKey.isEnabled( + serverGlobalParams.featureCompatibility) || + updateOp.getMulti()) && isUpsert) { return targetByShardKey( extractShardKeyFromBasicQueryWithContext(expCtx, shardKeyPattern, query), @@ -472,19 +515,23 @@ std::vector CollectionRoutingInfoTargeter::targetUpdate( // or upsert to a single shard, so return immediately if we are able to target a single shard. auto endPoints = uassertStatusOK(_targetQuery(expCtx, query, collation, chunkRanges)); if (endPoints.size() == 1) { + updateOneTargetedShardedCount.increment(1); return endPoints; } + auto isShardedTimeseriesCollection = isShardedTimeSeriesBucketsNamespace(); + // Targeting by replacement document is no longer necessary when an updateOne without shard key // is allowed, since we're able to decisively select a document to modify with the two phase // write without shard key protocol. if (!feature_flags::gFeatureFlagUpdateOneWithoutShardKey.isEnabled( serverGlobalParams.featureCompatibility) || - isExactIdQuery(opCtx, _nss, query, collation, _cri.cm)) { + (isExactIdQuery(opCtx, _nss, query, collation, _cri.cm) && + !isShardedTimeseriesCollection)) { // Replacement-style updates must always target a single shard. If we were unable to do so // using the query, we attempt to extract the shard key from the replacement and target // based on it. - if (updateOp.getU().type() == write_ops::UpdateModification::Type::kReplacement) { + if (updateOp.getUpdateMods().type() == write_ops::UpdateModification::Type::kReplacement) { if (chunkRanges) { chunkRanges->clear(); } @@ -494,7 +541,8 @@ std::vector CollectionRoutingInfoTargeter::targetUpdate( } // If we are here then this is an op-style update and we were not able to target a single shard. - // Non-multi updates must target a single shard or an exact _id. + // Non-multi updates must target a single shard or an exact _id. Time-series single updates must + // target a single shard. uassert(ErrorCodes::InvalidOptions, str::stream() << "A {multi:false} update on a sharded collection must contain an " @@ -502,13 +550,19 @@ std::vector CollectionRoutingInfoTargeter::targetUpdate( "single shard (and have the simple collation), but this update targeted " << endPoints.size() << " shards. Update request: " << updateOp.toBSON() << ", shard key pattern: " << shardKeyPattern.toString(), - updateOp.getMulti() || isExactIdQuery(opCtx, _nss, query, collation, _cri.cm) || + isMulti || + (isExactIdQuery(opCtx, _nss, query, collation, _cri.cm) && + !isShardedTimeseriesCollection) || feature_flags::gFeatureFlagUpdateOneWithoutShardKey.isEnabled( serverGlobalParams.featureCompatibility)); - // If the request is {multi:false}, then this is a single op-style update which we are - // broadcasting to multiple shards by exact _id. Record this event in our serverStatus metrics. - if (!updateOp.getMulti()) { + // If the request is {multi:false} and it's not a write without shard key, then this is a single + // op-style update which we are broadcasting to multiple shards by exact _id. Record this event + // in our serverStatus metrics. + if (!isMulti && + (isExactIdQuery(opCtx, _nss, query, collation, _cri.cm) && + !isShardedTimeseriesCollection)) { + updateOneTargetedShardedCount.increment(1); updateOneOpStyleBroadcastWithExactIDCount.increment(1); } @@ -517,7 +571,7 @@ std::vector CollectionRoutingInfoTargeter::targetUpdate( std::vector CollectionRoutingInfoTargeter::targetDelete( OperationContext* opCtx, const BatchItemRef& itemRef, std::set* chunkRanges) const { - const auto& deleteOp = itemRef.getDelete(); + const auto& deleteOp = itemRef.getDeleteRef(); const auto collation = write_ops::collationOf(deleteOp); auto expCtx = makeExpressionContextWithDefaultsForTargeter(opCtx, @@ -531,15 +585,10 @@ std::vector CollectionRoutingInfoTargeter::targetDelete( deleteManyCount.increment(1); } - BSONObj deleteQuery = deleteOp.getQ(); + BSONObj deleteQuery = deleteOp.getFilter(); BSONObj shardKey; if (_cri.cm.isSharded()) { if (_isRequestOnTimeseriesViewNamespace) { - uassert(ErrorCodes::NotImplemented, - "Deletes on sharded time-series collections feature is not enabled", - feature_flags::gFeatureFlagShardedTimeSeriesUpdateDelete.isEnabled( - serverGlobalParams.featureCompatibility)); - uassert(ErrorCodes::IllegalOperation, "Cannot perform a non-multi delete on a time-series collection", feature_flags::gTimeseriesDeletesSupport.isEnabled( @@ -556,7 +605,11 @@ std::vector CollectionRoutingInfoTargeter::targetDelete( // Note: The query returned would match a super set of the documents matched by the // original query. deleteQuery = timeseries::getBucketLevelPredicateForRouting( - deleteQuery, expCtx, tsFields->getMetaField()); + deleteQuery, + expCtx, + tsFields->getTimeseriesOptions(), + feature_flags::gTimeseriesDeletesSupport.isEnabled( + serverGlobalParams.featureCompatibility)); } // Sharded collections have the following further requirements for targeting: @@ -570,6 +623,7 @@ std::vector CollectionRoutingInfoTargeter::targetDelete( if (!shardKey.isEmpty()) { auto swEndpoint = _targetShardKey(shardKey, collation, chunkRanges); if (swEndpoint.isOK()) { + deleteOneTargetedShardedCount.increment(1); return std::vector{std::move(swEndpoint.getValue())}; } } @@ -591,21 +645,33 @@ std::vector CollectionRoutingInfoTargeter::targetDelete( MatchExpressionParser::kAllowAllSpecialFeatures), str::stream() << "Could not parse delete query " << deleteQuery); - // Single deletes must target a single shard or be exact-ID. + // Regular single deletes must target a single shard or be exact-ID. + // Time-series single deletes must target a single shard. + auto isShardedTimeseriesCollection = isShardedTimeSeriesBucketsNamespace(); uassert(ErrorCodes::ShardKeyNotFound, - str::stream() << "A single delete on a sharded collection must contain an exact match " - "on _id (and have the collection default collation) or contain the " - "shard key (and have the simple collation). Delete request: " - << deleteOp.toBSON() - << ", shard key pattern: " << _cri.cm.getShardKeyPattern().toString(), + fmt::format("A single delete on a sharded {} contain the shard key (and have the " + "simple collation). Delete request: {}, shard key pattern: {}", + isShardedTimeseriesCollection + ? "time-series collection must" + : "collection must contain an exact match on _id (and have the " + "collection default collation) or", + deleteOp.toBSON().toString(), + _cri.cm.getShardKeyPattern().toString()), !_cri.cm.isSharded() || deleteOp.getMulti() || - (isExactIdQuery(opCtx, *cq, _cri.cm) && !isShardedTimeSeriesBucketsNamespace()) || + (isExactIdQuery(opCtx, *cq, _cri.cm) && !isShardedTimeseriesCollection) || feature_flags::gFeatureFlagUpdateOneWithoutShardKey.isEnabled( serverGlobalParams.featureCompatibility)); if (chunkRanges) { chunkRanges->clear(); } + + if (!_cri.cm.isSharded()) { + deleteOneUnshardedCount.increment(1); + } else if (isExactIdQuery(opCtx, *cq, _cri.cm)) { + deleteOneTargetedShardedCount.increment(1); + } + return uassertStatusOK(_targetQuery(expCtx, deleteQuery, collation, chunkRanges)); } diff --git a/src/mongo/s/collection_routing_info_targeter.h b/src/mongo/s/collection_routing_info_targeter.h index 8dc910063f602..e9524337a3cd9 100644 --- a/src/mongo/s/collection_routing_info_targeter.h +++ b/src/mongo/s/collection_routing_info_targeter.h @@ -31,16 +31,32 @@ #include #include +#include +#include +#include +#include +#include + +#include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator_interface.h" +#include "mongo/bson/oid.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/ns_targeter.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/stale_exception.h" +#include "mongo/s/write_ops/batched_command_request.h" namespace mongo { diff --git a/src/mongo/s/collection_routing_info_targeter_test.cpp b/src/mongo/s/collection_routing_info_targeter_test.cpp index 1404a5a795dca..bfee5fa75b68c 100644 --- a/src/mongo/s/collection_routing_info_targeter_test.cpp +++ b/src/mongo/s/collection_routing_info_targeter_test.cpp @@ -27,20 +27,47 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/hasher.h" -#include "mongo/db/pipeline/expression_context_for_test.h" -#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/timeseries/timeseries_constants.h" #include "mongo/db/timeseries/timeseries_index_schema_conversion_functions.h" #include "mongo/db/timeseries/timeseries_options.h" -#include "mongo/logv2/log.h" +#include "mongo/idl/server_parameter_test_util.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/catalog_cache_test_fixture.h" +#include "mongo/s/chunk.h" #include "mongo/s/collection_routing_info_targeter.h" -#include "mongo/s/session_catalog_router.h" -#include "mongo/s/transaction_router.h" +#include "mongo/s/database_version.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/s/type_collection_common_types_gen.h" #include "mongo/s/write_ops/batched_command_request.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -94,6 +121,10 @@ class CollectionRoutingInfoTargeterTest : public CatalogCacheTestFixture { void testTargetDeleteWithRangePrefixHashedShardKey(); void testTargetDeleteWithHashedPrefixHashedShardKey(); void testTargetDeleteWithExactId(); + + // The tests using this fixture expects that a write without shard key is not allowed. + RAIIServerParameterControllerForTest _featureFlagController{ + "featureFlagUpdateOneWithoutShardKey", false}; }; class CollectionRoutingInfoTargeterWithChunkRangesTest : public CollectionRoutingInfoTargeterTest { diff --git a/src/mongo/s/collection_uuid_mismatch.cpp b/src/mongo/s/collection_uuid_mismatch.cpp index 4df6f92067ee0..12e8528450fe9 100644 --- a/src/mongo/s/collection_uuid_mismatch.cpp +++ b/src/mongo/s/collection_uuid_mismatch.cpp @@ -29,11 +29,36 @@ #include "mongo/s/collection_uuid_mismatch.h" +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/client/read_preference.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/catalog/collection_uuid_mismatch_info.h" +#include "mongo/db/client.h" #include "mongo/db/list_collections_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { Status populateCollectionUUIDMismatch(OperationContext* opCtx, @@ -50,12 +75,19 @@ Status populateCollectionUUIDMismatch(OperationContext* opCtx, // The listCollections command cannot be run in multi-document transactions, so run it using an // alternative client. auto client = opCtx->getServiceContext()->makeClient("populateCollectionUUIDMismatch"); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*client.get()); + client.get()->setSystemOperationUnkillableByStepdown(lk); + } + auto alternativeOpCtx = client->makeOperationContext(); opCtx = alternativeOpCtx.get(); AlternativeClientRegion acr{client}; - auto swDbInfo = - Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, info->dbName().toStringWithTenantId()); + auto swDbInfo = Grid::get(opCtx)->catalogCache()->getDatabase( + opCtx, DatabaseNameUtil::serializeForCatalog(info->dbName())); if (!swDbInfo.isOK()) { return swDbInfo.getStatus(); } @@ -67,7 +99,7 @@ Status populateCollectionUUIDMismatch(OperationContext* opCtx, auto response = executeCommandAgainstDatabasePrimary(opCtx, - info->dbName().db(), + DatabaseNameUtil::serialize(info->dbName()), swDbInfo.getValue(), listCollections.toBSON({}), ReadPreferenceSetting{ReadPreference::PrimaryOnly}, diff --git a/src/mongo/s/collection_uuid_mismatch.h b/src/mongo/s/collection_uuid_mismatch.h index 6eaee37841a7e..8ff04227f0a9b 100644 --- a/src/mongo/s/collection_uuid_mismatch.h +++ b/src/mongo/s/collection_uuid_mismatch.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/base/status.h" #include "mongo/db/operation_context.h" namespace mongo { diff --git a/src/mongo/s/commands/SConscript b/src/mongo/s/commands/SConscript index ae3982f6d75b4..d85b801ce1e69 100644 --- a/src/mongo/s/commands/SConscript +++ b/src/mongo/s/commands/SConscript @@ -33,7 +33,7 @@ env.Library( 'cluster_abort_transaction_cmd_s.cpp', 'cluster_analyze_cmd.cpp', 'cluster_build_info.cpp', - 'cluster_bulk_write_cmd.cpp', + 'cluster_bulk_write_cmd_s.cpp', 'cluster_cleanup_reshard_collection_cmd.cpp', 'cluster_coll_stats_cmd.cpp', 'cluster_collection_mod_cmd.cpp', @@ -58,6 +58,7 @@ env.Library( 'cluster_filemd5_cmd.cpp', 'cluster_find_and_modify_cmd.cpp', 'cluster_find_cmd_s.cpp', + 'cluster_fle2_cleanup_cmd.cpp', 'cluster_fle2_compact_cmd.cpp', 'cluster_fle2_get_count_info_cmd.cpp', 'cluster_fsync_cmd.cpp', @@ -87,6 +88,7 @@ env.Library( 'cluster_rename_collection_cmd.cpp', 'cluster_repair_sharded_collection_chunks_history_cmd.cpp', 'cluster_repl_set_get_status_cmd.cpp', + 'cluster_reset_placement_history_cmd.cpp', 'cluster_reshard_collection_cmd.cpp', 'cluster_rwc_defaults_commands.cpp', 'cluster_set_allow_migrations_cmd.cpp', @@ -97,7 +99,7 @@ env.Library( 'cluster_set_user_write_block_mode_command.cpp', 'cluster_shard_collection_cmd.cpp', 'cluster_shutdown_cmd.cpp', - 'cluster_transition_to_catalog_shard_cmd.cpp', + 'cluster_transition_from_dedicated_config_server_cmd.cpp', 'cluster_transition_to_dedicated_config_server_cmd.cpp', 'cluster_validate_cmd.cpp', 'cluster_validate_db_metadata_cmd.cpp', @@ -115,6 +117,7 @@ env.Library( '$BUILD_DIR/mongo/db/auth/auth_checks', '$BUILD_DIR/mongo/db/catalog/collection_uuid_mismatch_info', '$BUILD_DIR/mongo/db/change_stream_options_manager', + '$BUILD_DIR/mongo/db/commands/bulk_write_common', '$BUILD_DIR/mongo/db/commands/bulk_write_parser', '$BUILD_DIR/mongo/db/commands/cluster_server_parameter_cmds_idl', '$BUILD_DIR/mongo/db/commands/cluster_server_parameter_commands_invocation', @@ -144,6 +147,7 @@ env.Library( '$BUILD_DIR/mongo/db/query/command_request_response', '$BUILD_DIR/mongo/db/query/cursor_response_idl', '$BUILD_DIR/mongo/db/query/map_reduce_output_format', + '$BUILD_DIR/mongo/db/query/query_shape', '$BUILD_DIR/mongo/db/read_write_concern_defaults', '$BUILD_DIR/mongo/db/repl/hello_auth', '$BUILD_DIR/mongo/db/repl/hello_command', @@ -201,6 +205,7 @@ env.Library( '$BUILD_DIR/mongo/s/sharding_api', '$BUILD_DIR/mongo/s/sharding_router_api', '$BUILD_DIR/mongo/transport/message_compressor', + '$BUILD_DIR/mongo/transport/service_executor', '$BUILD_DIR/mongo/transport/transport_layer_common', ], ) @@ -256,27 +261,30 @@ env.Library( ) env.CppUnitTest( - target="s_commands_test", + target='s_commands_test', source=[ - "cluster_aggregate_test.cpp", - "cluster_command_test_fixture.cpp", - "cluster_delete_test.cpp", - "cluster_distinct_test.cpp", - "cluster_find_and_modify_test.cpp", - "cluster_find_test.cpp", - "cluster_insert_test.cpp", - "cluster_update_test.cpp", - "cluster_validate_db_metadata_cmd_test.cpp", - "document_shard_key_update_test.cpp", + 'cluster_aggregate_test.cpp', + 'cluster_bulk_write_test.cpp', + 'cluster_command_test_fixture.cpp', + 'cluster_delete_test.cpp', + 'cluster_distinct_test.cpp', + 'cluster_find_and_modify_test.cpp', + 'cluster_find_test.cpp', + 'cluster_insert_test.cpp', + 'cluster_update_test.cpp', + 'cluster_validate_db_metadata_cmd_test.cpp', + 'document_shard_key_update_test.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/db/auth/authmocks', '$BUILD_DIR/mongo/db/auth/saslauth', '$BUILD_DIR/mongo/db/pipeline/process_interface/mongos_process_interface_factory', '$BUILD_DIR/mongo/db/read_write_concern_defaults_mock', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/s/query/cluster_aggregate', '$BUILD_DIR/mongo/s/sharding_router_test_fixture', '$BUILD_DIR/mongo/s/vector_clock_mongos', + '$BUILD_DIR/mongo/transport/service_executor', 'cluster_commands', 'cluster_commands_common', ], diff --git a/src/mongo/s/commands/cluster_abort_reshard_collection_cmd.cpp b/src/mongo/s/commands/cluster_abort_reshard_collection_cmd.cpp index 78d181db9f5b6..29fbe13b2d68e 100644 --- a/src/mongo/s/commands/cluster_abort_reshard_collection_cmd.cpp +++ b/src/mongo/s/commands/cluster_abort_reshard_collection_cmd.cpp @@ -28,14 +28,26 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/abort_reshard_collection_gen.h" -#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -100,10 +112,7 @@ class AbortReshardCollectionCommand : public TypedCommand +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/s/commands/cluster_abort_transaction_cmd.h" namespace mongo { @@ -42,7 +51,7 @@ struct ClusterAbortTransactionCmdS { return kApiVersions1; } - static Status checkAuthForOperation(OperationContext* opCtx) { + static Status checkAuthForOperation(OperationContext*, const DatabaseName&, const BSONObj&) { return Status::OK(); } diff --git a/src/mongo/s/commands/cluster_add_shard_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_cmd.cpp index 9c059c51e592b..6a56ab9a04b78 100644 --- a/src/mongo/s/commands/cluster_add_shard_cmd.cpp +++ b/src/mongo/s/commands/cluster_add_shard_cmd.cpp @@ -28,14 +28,27 @@ */ -#include "mongo/platform/basic.h" - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/add_shard_request_type.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -67,11 +80,11 @@ class AddShardCmd : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::addShard)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::addShard)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp b/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp index 83628cd10f9b7..9346567c9223e 100644 --- a/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp +++ b/src/mongo/s/commands/cluster_add_shard_to_zone_cmd.cpp @@ -28,15 +28,30 @@ */ -#include "mongo/platform/basic.h" - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/write_concern_options.h" -#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/add_shard_to_zone_request_type.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -81,12 +96,13 @@ class AddShardToZoneCmd : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const final { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::enableSharding)) { + if (as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::enableSharding)) { return Status::OK(); } diff --git a/src/mongo/s/commands/cluster_aggregate_test.cpp b/src/mongo/s/commands/cluster_aggregate_test.cpp index 2f9c9a137b8b2..d1022358e7a7d 100644 --- a/src/mongo/s/commands/cluster_aggregate_test.cpp +++ b/src/mongo/s/commands/cluster_aggregate_test.cpp @@ -28,11 +28,33 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/client.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/commands/cluster_command_test_fixture.h" #include "mongo/s/query/cluster_aggregate.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/s/commands/cluster_analyze_cmd.cpp b/src/mongo/s/commands/cluster_analyze_cmd.cpp index ea0ccdb84a7ba..346146eb3661c 100644 --- a/src/mongo/s/commands/cluster_analyze_cmd.cpp +++ b/src/mongo/s/commands/cluster_analyze_cmd.cpp @@ -27,16 +27,40 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" #include "mongo/db/query/analyze_command_gen.h" #include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/server_options.h" #include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -72,9 +96,8 @@ class ClusterAnalyzeCmd final : public TypedCommand { void typedRun(OperationContext* opCtx) { uassert(6765500, "Analyze command requires common query framework feature flag to be enabled", - serverGlobalParams.featureCompatibility.isVersionInitialized() && - feature_flags::gFeatureFlagCommonQueryFramework.isEnabled( - serverGlobalParams.featureCompatibility)); + feature_flags::gFeatureFlagCommonQueryFramework.isEnabled( + serverGlobalParams.featureCompatibility)); const NamespaceString& nss = ns(); @@ -91,8 +114,10 @@ class ClusterAnalyzeCmd final : public TypedCommand { CommandHelpers::filterCommandRequestForPassthrough(unparsedRequest().body)), ReadPreferenceSetting::get(opCtx), Shard::RetryPolicy::kIdempotent, - BSONObj() /* query */, - BSONObj() /* collation */); + BSONObj() /*query*/, + BSONObj() /*collation*/, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); for (const auto& shardResult : shardResponses) { const auto& shardResponse = uassertStatusOK(std::move(shardResult.swResponse)); @@ -109,7 +134,8 @@ class ClusterAnalyzeCmd final : public TypedCommand { const NamespaceString& ns = request().getNamespace(); uassert(ErrorCodes::Unauthorized, - str::stream() << "Not authorized to call analyze on collection " << ns, + str::stream() << "Not authorized to call analyze on collection " + << ns.toStringForErrorMsg(), authzSession->isAuthorizedForActionsOnNamespace(ns, ActionType::analyze)); } }; diff --git a/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp b/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp index c04ad8cd926ad..142a5b664acbe 100644 --- a/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp +++ b/src/mongo/s/commands/cluster_analyze_shard_key_cmd.cpp @@ -27,17 +27,48 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/platform/random.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/analyze_shard_key_cmd_gen.h" -#include "mongo/s/analyze_shard_key_feature_flag_gen.h" #include "mongo/s/analyze_shard_key_util.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -76,7 +107,7 @@ class AnalyzeShardKeyCmd : public TypedCommand { const auto& catalogCache = Grid::get(opCtx)->catalogCache(); const auto cri = uassertStatusOK(catalogCache->getCollectionRoutingInfo(opCtx, nss)); - const auto primaryShardId = cri.cm.dbPrimary(); + auto primaryShardId = cri.cm.dbPrimary(); std::set candidateShardIds; if (cri.cm.isSharded()) { @@ -194,10 +225,7 @@ class AnalyzeShardKeyCmd : public TypedCommand { std::string help() const override { return "Returns metrics for evaluating a shard key for a collection."; } -}; - -MONGO_REGISTER_FEATURE_FLAGGED_COMMAND(AnalyzeShardKeyCmd, - analyze_shard_key::gFeatureFlagAnalyzeShardKey); +} analyzeShardKeyCmd; } // namespace diff --git a/src/mongo/s/commands/cluster_balancer_collection_status_cmd.cpp b/src/mongo/s/commands/cluster_balancer_collection_status_cmd.cpp index f0903d3894c9c..edf9a227c4b25 100644 --- a/src/mongo/s/commands/cluster_balancer_collection_status_cmd.cpp +++ b/src/mongo/s/commands/cluster_balancer_collection_status_cmd.cpp @@ -28,21 +28,26 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/auth/action_set.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/repl_client_info.h" +#include "mongo/db/service_context.h" #include "mongo/idl/idl_parser.h" -#include "mongo/s/catalog_cache_loader.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/balancer_collection_status_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/s/commands/cluster_build_info.cpp b/src/mongo/s/commands/cluster_build_info.cpp index 86661722ff849..0592c6d75832e 100644 --- a/src/mongo/s/commands/cluster_build_info.cpp +++ b/src/mongo/s/commands/cluster_build_info.cpp @@ -28,11 +28,24 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/request_execution_context.h" +#include "mongo/db/service_context.h" #include "mongo/executor/async_request_executor.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/util/decorable.h" #include "mongo/util/future.h" #include "mongo/util/version.h" diff --git a/src/mongo/s/commands/cluster_bulk_write_cmd.cpp b/src/mongo/s/commands/cluster_bulk_write_cmd.cpp deleted file mode 100644 index 7591ee50d63e7..0000000000000 --- a/src/mongo/s/commands/cluster_bulk_write_cmd.cpp +++ /dev/null @@ -1,120 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/platform/basic.h" - -#include -#include - -#include "mongo/db/auth/action_set.h" -#include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog/collection_operation_source.h" -#include "mongo/db/commands.h" -#include "mongo/db/commands/bulk_write_gen.h" -#include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/server_feature_flags_gen.h" -#include "mongo/db/server_options.h" -#include "mongo/db/service_context.h" -#include "mongo/s/cluster_write.h" - -namespace mongo { -namespace { - -class ClusterBulkWriteCmd : public BulkWriteCmdVersion1Gen { -public: - bool adminOnly() const final { - return true; - } - - AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { - return AllowedOnSecondary::kNever; - } - - bool supportsRetryableWrite() const final { - return true; - } - - bool allowedInTransactions() const final { - return true; - } - - ReadWriteType getReadWriteType() const final { - return Command::ReadWriteType::kWrite; - } - - bool collectsResourceConsumptionMetrics() const final { - return true; - } - - bool shouldAffectCommandCounter() const final { - return false; - } - - std::string help() const override { - return "command to apply inserts, updates and deletes in bulk"; - } - - class Invocation final : public InvocationBaseGen { - public: - using InvocationBaseGen::InvocationBaseGen; - - bool supportsWriteConcern() const final { - return true; - } - - NamespaceString ns() const final { - return NamespaceString(request().getDbName()); - } - - Reply typedRun(OperationContext* opCtx) final { - uassert(ErrorCodes::CommandNotSupported, - "BulkWrite on mongos is not currently supported.", - false); - - uassert( - ErrorCodes::CommandNotSupported, - "BulkWrite may not be run without featureFlagBulkWriteCommand enabled", - gFeatureFlagBulkWriteCommand.isEnabled(serverGlobalParams.featureCompatibility)); - - auto replyItems = cluster::bulkWrite(opCtx, request()); - - auto reply = Reply(); - // TODO(SERVER-72794): Support cursor response for bulkWrite on mongos. - reply.setCursor(BulkWriteCommandResponseCursor(0, replyItems)); - return reply; - } - - void doCheckAuthorization(OperationContext* opCtx) const final {} - }; - -} clusterBulkWriteCmd; - -} // namespace -} // namespace mongo diff --git a/src/mongo/s/commands/cluster_bulk_write_cmd.h b/src/mongo/s/commands/cluster_bulk_write_cmd.h new file mode 100644 index 0000000000000..fa79ff3c64056 --- /dev/null +++ b/src/mongo/s/commands/cluster_bulk_write_cmd.h @@ -0,0 +1,303 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/commands.h" +#include "mongo/db/commands/bulk_write_common.h" +#include "mongo/db/commands/bulk_write_gen.h" +#include "mongo/db/commands/bulk_write_parser.h" +#include "mongo/db/curop.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/not_primary_error_tracker.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/find_common.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/server_feature_flags_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/cluster_write.h" +#include "mongo/s/grid.h" +#include "mongo/s/query/cluster_client_cursor.h" +#include "mongo/s/query/cluster_client_cursor_guard.h" +#include "mongo/s/query/cluster_client_cursor_impl.h" +#include "mongo/s/query/cluster_client_cursor_params.h" +#include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/s/query/cluster_query_result.h" +#include "mongo/s/query/router_exec_stage.h" +#include "mongo/s/query/router_stage_queued_data.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" + +namespace mongo { + +template +class ClusterBulkWriteCmd : public Command { +public: + ClusterBulkWriteCmd(StringData name) : Command(name) {} + + bool adminOnly() const final { + return true; + } + + const std::set& apiVersions() const { + return Impl::getApiVersions(); + } + + std::unique_ptr parse(OperationContext* opCtx, + const OpMsgRequest& request) final { + auto parsedRequest = + BulkWriteCommandRequest::parse(IDLParserContext{"clusterBulkWriteParse"}, request); + return std::make_unique(this, request, std::move(parsedRequest)); + } + + AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { + return AllowedOnSecondary::kNever; + } + + bool supportsRetryableWrite() const final { + return true; + } + + bool allowedInTransactions() const final { + return true; + } + + ReadWriteType getReadWriteType() const final { + return Command::ReadWriteType::kWrite; + } + + bool collectsResourceConsumptionMetrics() const final { + return true; + } + + bool shouldAffectCommandCounter() const final { + return false; + } + + std::string help() const override { + return "command to apply inserts, updates and deletes in bulk"; + } + + class Invocation : public CommandInvocation { + public: + Invocation(const ClusterBulkWriteCmd* command, + const OpMsgRequest& request, + BulkWriteCommandRequest bulkRequest) + : CommandInvocation(command), + _opMsgRequest{&request}, + _request{std::move(bulkRequest)} {} + + const BulkWriteCommandRequest& getBulkRequest() const { + return _request; + } + + bool getBypass() const { + return _request.getBypassDocumentValidation(); + } + + private: + void preRunImplHook(OperationContext* opCtx) const { + Impl::checkCanRunHere(opCtx); + } + + void doCheckAuthorizationHook(AuthorizationSession* authzSession) const { + Impl::doCheckAuthorization(authzSession, getBypass(), getBulkRequest()); + } + + NamespaceString ns() const final { + return NamespaceString(_request.getDbName()); + } + + bool supportsWriteConcern() const override { + return true; + } + + void doCheckAuthorization(OperationContext* opCtx) const final { + try { + doCheckAuthorizationHook(AuthorizationSession::get(opCtx->getClient())); + } catch (const DBException& e) { + NotPrimaryErrorTracker::get(opCtx->getClient()).recordError(e.code()); + throw; + } + } + + const ClusterBulkWriteCmd* command() const { + return static_cast(definition()); + } + + BulkWriteCommandReply _populateCursorReply( + OperationContext* opCtx, + BulkWriteCommandRequest& bulkRequest, + const OpMsgRequest& unparsedRequest, + std::vector replyItems) const { + const auto& req = bulkRequest; + auto reqObj = unparsedRequest.body; + + const NamespaceString cursorNss = + NamespaceString::makeBulkWriteNSS(req.getDollarTenant()); + ClusterClientCursorParams params(cursorNss, + APIParameters::get(opCtx), + ReadPreferenceSetting::get(opCtx), + repl::ReadConcernArgs::get(opCtx), + [&] { + if (!opCtx->getLogicalSessionId()) + return OperationSessionInfoFromClient(); + // TODO (SERVER-77506): This code path does not + // clear the setAutocommit field on the presence of + // TransactionRouter::get + return OperationSessionInfoFromClient( + *opCtx->getLogicalSessionId(), + opCtx->getTxnNumber()); + }()); + + long long batchSize = std::numeric_limits::max(); + if (req.getCursor() && req.getCursor()->getBatchSize()) { + params.batchSize = req.getCursor()->getBatchSize(); + batchSize = *req.getCursor()->getBatchSize(); + } + params.originatingCommandObj = reqObj.getOwned(); + params.originatingPrivileges = bulk_write_common::getPrivileges(req); + + auto queuedDataStage = std::make_unique(opCtx); + for (auto& replyItem : replyItems) { + queuedDataStage->queueResult(replyItem.toBSON()); + } + + auto ccc = + ClusterClientCursorImpl::make(opCtx, std::move(queuedDataStage), std::move(params)); + + size_t numRepliesInFirstBatch = 0; + FindCommon::BSONArrayResponseSizeTracker responseSizeTracker; + for (long long objCount = 0; objCount < batchSize; objCount++) { + auto next = uassertStatusOK(ccc->next()); + + if (next.isEOF()) { + break; + } + + auto nextObj = *next.getResult(); + if (!responseSizeTracker.haveSpaceForNext(nextObj)) { + ccc->queueResult(nextObj); + break; + } + + numRepliesInFirstBatch++; + responseSizeTracker.add(nextObj); + } + CurOp::get(opCtx)->setEndOfOpMetrics(numRepliesInFirstBatch); + if (numRepliesInFirstBatch == replyItems.size()) { + return BulkWriteCommandReply( + BulkWriteCommandResponseCursor( + 0, std::vector(std::move(replyItems))), + 0 /* TODO SERVER-76267: correctly populate numErrors */); + } + + ccc->detachFromOperationContext(); + ccc->incNBatches(); + + auto authUser = + AuthorizationSession::get(opCtx->getClient())->getAuthenticatedUserName(); + auto cursorId = uassertStatusOK(Grid::get(opCtx)->getCursorManager()->registerCursor( + opCtx, + ccc.releaseCursor(), + cursorNss, + ClusterCursorManager::CursorType::QueuedData, + ClusterCursorManager::CursorLifetime::Mortal, + authUser)); + + // Record the cursorID in CurOp. + CurOp::get(opCtx)->debug().cursorid = cursorId; + + replyItems.resize(numRepliesInFirstBatch); + return BulkWriteCommandReply( + BulkWriteCommandResponseCursor( + cursorId, std::vector(std::move(replyItems))), + 0 /* TODO SERVER-76267: correctly populate numErrors */); + } + + bool runImpl(OperationContext* opCtx, + const OpMsgRequest& request, + BulkWriteCommandRequest& bulkRequest, + BSONObjBuilder& result) const { + BulkWriteCommandReply response; + + uassert( + ErrorCodes::CommandNotSupported, + "BulkWrite may not be run without featureFlagBulkWriteCommand enabled", + gFeatureFlagBulkWriteCommand.isEnabled(serverGlobalParams.featureCompatibility)); + + bulk_write_common::validateRequest(bulkRequest, opCtx->isRetryableWrite()); + + auto replyItems = cluster::bulkWrite(opCtx, bulkRequest); + response = _populateCursorReply(opCtx, bulkRequest, request, std::move(replyItems)); + result.appendElements(response.toBSON()); + return true; + } + + void run(OperationContext* opCtx, rpc::ReplyBuilderInterface* result) { + preRunImplHook(opCtx); + + BSONObjBuilder bob = result->getBodyBuilder(); + bool ok = runImpl(opCtx, *_opMsgRequest, _request, bob); + if (!ok) { + CommandHelpers::appendSimpleCommandStatus(bob, ok); + } + } + + const OpMsgRequest* _opMsgRequest; + BulkWriteCommandRequest _request; + }; +}; + +} // namespace mongo diff --git a/src/mongo/s/commands/cluster_bulk_write_cmd_s.cpp b/src/mongo/s/commands/cluster_bulk_write_cmd_s.cpp new file mode 100644 index 0000000000000..3cd3176e85c4d --- /dev/null +++ b/src/mongo/s/commands/cluster_bulk_write_cmd_s.cpp @@ -0,0 +1,59 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/s/commands/cluster_bulk_write_cmd.h" + +#include "mongo/db/commands/bulk_write_common.h" + +namespace mongo { +namespace { + +struct ClusterBulkWriteCmdS { + static constexpr StringData kName = "bulkWrite"_sd; + + static const std::set& getApiVersions() { + return kApiVersions1; + } + + static void doCheckAuthorization(AuthorizationSession* authzSession, + bool bypass, + const BulkWriteCommandRequest& op) { + uassert(ErrorCodes::Unauthorized, + "unauthorized", + authzSession->isAuthorizedForPrivileges(bulk_write_common::getPrivileges(op))); + } + + static void checkCanRunHere(OperationContext* opCtx) { + // Can always run on a mongos. + } +}; +ClusterBulkWriteCmd clusterBulkWriteCmdS{ClusterBulkWriteCmdS::kName}; + +} // namespace +} // namespace mongo diff --git a/src/mongo/s/commands/cluster_bulk_write_test.cpp b/src/mongo/s/commands/cluster_bulk_write_test.cpp new file mode 100644 index 0000000000000..ef57710e467e7 --- /dev/null +++ b/src/mongo/s/commands/cluster_bulk_write_test.cpp @@ -0,0 +1,106 @@ +/** + * Copyright (C) 2018-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include "mongo/platform/basic.h" + +#include "mongo/db/commands.h" +#include "mongo/db/commands/bulk_write_gen.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/s/commands/cluster_command_test_fixture.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault + + +namespace mongo { +namespace { + +class ClusterBulkWriteTest : public ClusterCommandTestFixture { +protected: + const BSONObj kBulkWriteCmdTargeted{fromjson( + "{bulkWrite: 1, ops: [{insert: 0, document: {'_id': -1}}], nsInfo: [{ns: 'test.coll'}]}")}; + + const BSONObj kBulkWriteCmdScatterGather{ + fromjson("{bulkWrite: 1, ops: [{insert: 0, document: {'_id': -1}}, {insert: 0, document: " + "{'_id': 1}}], nsInfo: [{ns: 'test.coll'}]}")}; + + void expectInspectRequest(int shardIndex, InspectionCallback cb) override { + onCommandForPoolExecutor([&](const executor::RemoteCommandRequest& request) { + ASSERT_EQ(1, request.cmdObj.firstElement().Int()); + cb(request); + + BSONObjBuilder bob; + std::vector replyItems; + BulkWriteReplyItem item{0}; + item.setN(1); + replyItems.push_back(item); + auto cursor = BulkWriteCommandResponseCursor(0, replyItems); + bob.append("cursor", cursor.toBSON()); + bob.append("numErrors", 0); + appendTxnResponseMetadata(bob); + return bob.obj(); + }); + } + + void expectReturnsSuccess(int shardIndex) override { + onCommandForPoolExecutor([this, shardIndex](const executor::RemoteCommandRequest& request) { + ASSERT_EQ(1, request.cmdObj.firstElement().Int()); + + BSONObjBuilder bob; + std::vector replyItems; + BulkWriteReplyItem item{0}; + item.setN(1); + replyItems.push_back(item); + auto cursor = BulkWriteCommandResponseCursor(0, replyItems); + bob.append("cursor", cursor.toBSON()); + bob.append("numErrors", 0); + appendTxnResponseMetadata(bob); + return bob.obj(); + }); + } +}; + +TEST_F(ClusterBulkWriteTest, NoErrors) { + RAIIServerParameterControllerForTest controller("featureFlagBulkWriteCommand", true); + testNoErrors(kBulkWriteCmdTargeted, kBulkWriteCmdScatterGather); +} + +TEST_F(ClusterBulkWriteTest, AttachesAtClusterTimeForSnapshotReadConcern) { + RAIIServerParameterControllerForTest controller("featureFlagBulkWriteCommand", true); + testAttachesAtClusterTimeForSnapshotReadConcern(kBulkWriteCmdTargeted, + kBulkWriteCmdScatterGather); +} + +TEST_F(ClusterBulkWriteTest, SnapshotReadConcernWithAfterClusterTime) { + RAIIServerParameterControllerForTest controller("featureFlagBulkWriteCommand", true); + testSnapshotReadConcernWithAfterClusterTime(kBulkWriteCmdTargeted, kBulkWriteCmdScatterGather); +} + +} // namespace +} // namespace mongo diff --git a/src/mongo/s/commands/cluster_check_metadata_consistency_cmd.cpp b/src/mongo/s/commands/cluster_check_metadata_consistency_cmd.cpp index 477e51f2d16fd..fdcd80f132943 100644 --- a/src/mongo/s/commands/cluster_check_metadata_consistency_cmd.cpp +++ b/src/mongo/s/commands/cluster_check_metadata_consistency_cmd.cpp @@ -28,19 +28,70 @@ */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/json.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/commands.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/metadata_consistency_types_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/query/cursor_response_gen.h" #include "mongo/db/query/find_common.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog_cache.h" #include "mongo/s/check_metadata_consistency_gen.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/query/async_results_merger_params_gen.h" +#include "mongo/s/query/cluster_client_cursor.h" +#include "mongo/s/query/cluster_client_cursor_guard.h" #include "mongo/s/query/cluster_client_cursor_impl.h" +#include "mongo/s/query/cluster_client_cursor_params.h" #include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/s/query/cluster_query_result.h" #include "mongo/s/query/establish_cursors.h" -#include "mongo/s/query/store_possible_cursor.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/shard_version.h" #include "mongo/s/sharding_feature_flags_gen.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -59,10 +110,12 @@ stdx::unordered_set getAllDbPrimaryShards(OperationContext* opCtx) { opCtx, aggRequest, {repl::ReadConcernLevel::kMajorityReadConcern}); stdx::unordered_set shardIds; - shardIds.reserve(aggResponse.size()); + shardIds.reserve(aggResponse.size() + 1); for (auto&& responseEntry : aggResponse) { shardIds.insert(responseEntry.firstElement().str()); } + // The config server is authoritative for config database + shardIds.insert(ShardId::kConfigServerId); return shardIds; } @@ -111,10 +164,12 @@ class CheckMetadataConsistencyCmd final : public TypedCommand> requests; ShardsvrCheckMetadataConsistency shardsvrRequest{nss}; @@ -122,17 +177,21 @@ class CheckMetadataConsistencyCmd final : public TypedCommandcatalogCache()->getDatabase( - opCtx, nss.dbName().toStringWithTenantId())); + opCtx, DatabaseNameUtil::serializeForCatalog(nss.dbName()))); ShardsvrCheckMetadataConsistency shardsvrRequest{nss}; shardsvrRequest.setDbName(nss.dbName()); @@ -166,12 +225,15 @@ class CheckMetadataConsistencyCmd final : public TypedCommand>& requests) { + const std::vector>& requests, + std::vector opKeys = {}) { ClusterClientCursorParams params( nss, APIParameters::get(opCtx), - ReadPreferenceSetting(ReadPreference::PrimaryOnly, TagSet::primaryOnly())); + ReadPreferenceSetting(ReadPreference::PrimaryOnly, TagSet::primaryOnly()), + boost::none /* repl::ReadConcernArgs */, + OperationSessionInfoFromClient()); // Establish the cursors with a consistent shardVersion across shards. params.remotes = establishCursors( @@ -180,7 +242,9 @@ class CheckMetadataConsistencyCmd final : public TypedCommanddetachFromOperationContext(); + auto&& opDebug = CurOp::get(opCtx)->debug(); + opDebug.nShards = ccc->getNumRemotes(); + opDebug.additiveMetrics.nBatches = 1; + opDebug.additiveMetrics.nreturned = firstBatch.size(); + if (cursorState == ClusterCursorManager::CursorState::Exhausted) { + opDebug.cursorExhausted = true; + CursorInitialReply resp; InitialResponseCursor initRespCursor{std::move(firstBatch)}; initRespCursor.setResponseCursorBase({0LL /* cursorId */, nss}); @@ -256,6 +327,9 @@ class CheckMetadataConsistencyCmd final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(resourcePattern, - ActionType::checkMetadataConsistency); + auto* as = AuthorizationSession::get(opCtx->getClient()); + const auto isAuthorizedOnResource = [as](const ResourcePattern& resourcePattern) { + return as->isAuthorizedForActionsOnResource(resourcePattern, + ActionType::checkMetadataConsistency); }; const auto nss = ns(); @@ -283,22 +357,26 @@ class CheckMetadataConsistencyCmd final : public TypedCommand +#include +#include "mongo/base/error_codes.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/cleanup_reshard_collection_gen.h" -#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -99,10 +111,7 @@ class CleanupReshardCollectionCmd : public TypedCommand #include #include -#include "mongo/db/auth/action_set.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/commands/cluster_commands_gen.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/shard_key_pattern_query_util.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/s/commands/cluster_coll_stats_cmd.cpp b/src/mongo/s/commands/cluster_coll_stats_cmd.cpp index d830e04415cff..23511514baae1 100644 --- a/src/mongo/s/commands/cluster_coll_stats_cmd.cpp +++ b/src/mongo/s/commands/cluster_coll_stats_cmd.cpp @@ -28,16 +28,51 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/db/timeseries/timeseries_commands_conversion_helper.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/collection_routing_info_targeter.h" -#include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -247,8 +282,10 @@ class CollectionStats : public BasicCommand { opCtx, this, CommandHelpers::filterCommandRequestForPassthrough(cmdObjToSend)), ReadPreferenceSetting::get(opCtx), Shard::RetryPolicy::kIdempotent, - {}, - {}); + {} /*query*/, + {} /*collation*/, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); BSONObjBuilder shardStats; std::map counts; @@ -346,7 +383,7 @@ class CollectionStats : public BasicCommand { shardStats.append(shardId.toString(), scaleIndividualShardStatistics(res, scale)); } - result.append("ns", nss.ns()); + result.append("ns", NamespaceStringUtil::serialize(nss)); for (const auto& countEntry : counts) { if (fieldIsAnyOf(countEntry.first, diff --git a/src/mongo/s/commands/cluster_collection_mod_cmd.cpp b/src/mongo/s/commands/cluster_collection_mod_cmd.cpp index a930d04bcb5a9..9eb69025e261d 100644 --- a/src/mongo/s/commands/cluster_collection_mod_cmd.cpp +++ b/src/mongo/s/commands/cluster_collection_mod_cmd.cpp @@ -28,18 +28,52 @@ */ -#include "mongo/platform/basic.h" - +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/authorization_checks.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/catalog/collection_uuid_mismatch_info.h" +#include "mongo/db/client.h" #include "mongo/db/coll_mod_gen.h" #include "mongo/db/coll_mod_reply_validation.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/explain_verbosity_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/read_through_cache.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -98,7 +132,7 @@ class CollectionModCmd : public BasicCommandWithRequestParser "command"_attr = redact(cmdObj)); auto swDbInfo = Grid::get(opCtx)->catalogCache()->getDatabase( - opCtx, cmd.getDbName().toStringWithTenantId()); + opCtx, DatabaseNameUtil::serializeForCatalog(cmd.getDbName())); if (swDbInfo == ErrorCodes::NamespaceNotFound) { uassert( CollectionUUIDMismatchInfo( @@ -112,7 +146,8 @@ class CollectionModCmd : public BasicCommandWithRequestParser // Check for config.settings in the user command since a validator is allowed // internally on this collection but the user may not modify the validator. uassert(ErrorCodes::InvalidOptions, - str::stream() << "Document validators not allowed on system collection " << nss, + str::stream() << "Document validators not allowed on system collection " + << nss.toStringForErrorMsg(), nss != NamespaceString::kConfigSettingsNamespace); } @@ -122,7 +157,7 @@ class CollectionModCmd : public BasicCommandWithRequestParser auto cmdResponse = uassertStatusOK(executeCommandAgainstDatabasePrimary( opCtx, - dbName.toStringWithTenantId(), + DatabaseNameUtil::serialize(dbName), dbInfo, CommandHelpers::appendMajorityWriteConcern( collModCommand.toBSON({}), opCtx->getWriteConcern()), diff --git a/src/mongo/s/commands/cluster_command_test_fixture.cpp b/src/mongo/s/commands/cluster_command_test_fixture.cpp index 171c1772e6222..80c0d85bff76e 100644 --- a/src/mongo/s/commands/cluster_command_test_fixture.cpp +++ b/src/mongo/s/commands/cluster_command_test_fixture.cpp @@ -28,21 +28,44 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/commands/cluster_command_test_fixture.h" - +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include + +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/commands/txn_cmds_gen.h" #include "mongo/db/keys_collection_client_sharded.h" #include "mongo/db/keys_collection_manager.h" +#include "mongo/db/keys_collection_manager_gen.h" #include "mongo/db/logical_time_validator.h" #include "mongo/db/read_write_concern_defaults.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/request_execution_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/vector_clock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/commands/cluster_command_test_fixture.h" #include "mongo/s/commands/strategy.h" +#include "mongo/s/grid.h" +#include "mongo/transport/service_executor.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future.h" #include "mongo/util/options_parser/startup_option_init.h" +#include "mongo/util/tick_source.h" #include "mongo/util/tick_source_mock.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -129,7 +152,14 @@ DbResponse ClusterCommandTestFixture::runCommand(BSONObj cmd) { transport::ServiceExecutorContext::set(client.get(), std::move(seCtx)); } - const auto opMsgRequest = OpMsgRequest::fromDBAndBody(kNss.db(), cmd); + OpMsgRequest opMsgRequest; + + // If bulkWrite then append adminDB. + if (cmd.firstElementFieldNameStringData() == "bulkWrite") { + opMsgRequest = OpMsgRequest::fromDBAndBody(DatabaseName::kAdmin.db(), cmd); + } else { + opMsgRequest = OpMsgRequest::fromDBAndBody(kNss.db(), cmd); + } AlternativeClientRegion acr(client); auto rec = std::make_shared(opCtx.get(), opMsgRequest.serialize()); diff --git a/src/mongo/s/commands/cluster_command_test_fixture.h b/src/mongo/s/commands/cluster_command_test_fixture.h index 6b0fedc80fb0b..9b0e18001584d 100644 --- a/src/mongo/s/commands/cluster_command_test_fixture.h +++ b/src/mongo/s/commands/cluster_command_test_fixture.h @@ -29,10 +29,22 @@ #pragma once -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/dbmessage.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/read_write_concern_defaults_cache_lookup_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/platform/basic.h" #include "mongo/s/catalog_cache_test_fixture.h" +#include "mongo/util/fail_point.h" namespace mongo { diff --git a/src/mongo/s/commands/cluster_commit_reshard_collection_cmd.cpp b/src/mongo/s/commands/cluster_commit_reshard_collection_cmd.cpp index e6727fa53797a..32166d0d55b46 100644 --- a/src/mongo/s/commands/cluster_commit_reshard_collection_cmd.cpp +++ b/src/mongo/s/commands/cluster_commit_reshard_collection_cmd.cpp @@ -28,18 +28,26 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/s/catalog_cache.h" -#include "mongo/s/cluster_commands_helpers.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/commit_reshard_collection_gen.h" -#include "mongo/s/request_types/sharded_ddl_commands_gen.h" -#include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -103,10 +111,7 @@ class CommitReshardCollectionCmd : public TypedCommand +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/s/commands/cluster_commit_transaction_cmd.h" namespace mongo { @@ -42,7 +51,9 @@ struct ClusterCommitTransactionCmdS { return kApiVersions1; } - static Status checkAuthForOperation(OperationContext* opCtx) { + static Status checkAuthForOperation(OperationContext* opCtx, + const DatabaseName&, + const BSONObj&) { return Status::OK(); } diff --git a/src/mongo/s/commands/cluster_compact_cmd.cpp b/src/mongo/s/commands/cluster_compact_cmd.cpp index 51f9602b777dd..bd33b891b3806 100644 --- a/src/mongo/s/commands/cluster_compact_cmd.cpp +++ b/src/mongo/s/commands/cluster_compact_cmd.cpp @@ -27,10 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/s/commands/cluster_configure_collection_balancing.cpp b/src/mongo/s/commands/cluster_configure_collection_balancing.cpp index f7f429176eee6..6b4e2fc0d052d 100644 --- a/src/mongo/s/commands/cluster_configure_collection_balancing.cpp +++ b/src/mongo/s/commands/cluster_configure_collection_balancing.cpp @@ -28,21 +28,29 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" -#include "mongo/db/catalog_raii.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/repl_client_info.h" -#include "mongo/idl/idl_parser.h" -#include "mongo/s/catalog_cache_loader.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/configure_collection_balancing_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/s/commands/cluster_configure_query_analyzer_cmd.cpp b/src/mongo/s/commands/cluster_configure_query_analyzer_cmd.cpp index 1a511206f046b..4930ac971bf31 100644 --- a/src/mongo/s/commands/cluster_configure_query_analyzer_cmd.cpp +++ b/src/mongo/s/commands/cluster_configure_query_analyzer_cmd.cpp @@ -27,15 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" -#include "mongo/logv2/log.h" -#include "mongo/s/analyze_shard_key_feature_flag_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/analyze_shard_key_util.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/configure_query_analyzer_cmd_gen.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -45,6 +64,19 @@ namespace analyze_shard_key { namespace { +/** + * Returns a new command object with shard version and database version appended to it based on + * the given routing info. + */ +BSONObj makeVersionedCmdObj(const CollectionRoutingInfo& cri, + const BSONObj& unversionedCmdObj, + ShardId shardId) { + auto versionedCmdObj = appendShardVersion(unversionedCmdObj, + cri.cm.isSharded() ? cri.getShardVersion(shardId) + : ShardVersion::UNSHARDED()); + return appendDbVersionIfPresent(versionedCmdObj, cri.cm.dbVersion()); +} + class ConfigureQueryAnalyzerCmd : public TypedCommand { public: using Request = ConfigureQueryAnalyzer; @@ -58,13 +90,21 @@ class ConfigureQueryAnalyzerCmd : public TypedCommand const auto& nss = ns(); uassertStatusOK(validateNamespace(nss)); - const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - - auto swResponse = configShard->runCommandWithFixedRetryAttempts( + const auto cri = uassertStatusOK( + Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss)); + const auto primaryShardId = cri.cm.dbPrimary(); + + auto shard = + uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, primaryShardId)); + auto versionedCmdObj = makeVersionedCmdObj( + cri, + CommandHelpers::filterCommandRequestForPassthrough(request().toBSON({})), + primaryShardId); + auto swResponse = shard->runCommandWithFixedRetryAttempts( opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, DatabaseName::kAdmin.toString(), - request().toBSON({}), + versionedCmdObj, Shard::RetryPolicy::kIdempotent); uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(swResponse)); @@ -103,10 +143,7 @@ class ConfigureQueryAnalyzerCmd : public TypedCommand return "Starts or stops collecting metrics about read and write queries against " "collection."; } -}; - -MONGO_REGISTER_FEATURE_FLAGGED_COMMAND(ConfigureQueryAnalyzerCmd, - analyze_shard_key::gFeatureFlagAnalyzeShardKey); +} configureQueryAnalyzerCmd; } // namespace diff --git a/src/mongo/s/commands/cluster_control_balancer_cmd.cpp b/src/mongo/s/commands/cluster_control_balancer_cmd.cpp index b92bff5ca16d8..7d1fef3e44eea 100644 --- a/src/mongo/s/commands/cluster_control_balancer_cmd.cpp +++ b/src/mongo/s/commands/cluster_control_balancer_cmd.cpp @@ -28,16 +28,33 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/base/init.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/s/commands/cluster_convert_to_capped_cmd.cpp b/src/mongo/s/commands/cluster_convert_to_capped_cmd.cpp index 6fa1d62d5d75e..004620b79218d 100644 --- a/src/mongo/s/commands/cluster_convert_to_capped_cmd.cpp +++ b/src/mongo/s/commands/cluster_convert_to_capped_cmd.cpp @@ -28,12 +28,39 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -54,14 +81,16 @@ bool nonShardedCollectionCommandPassthrough(OperationContext* opCtx, !cri.cm.isSharded()); auto responses = scatterGatherVersionedTargetByRoutingTable(opCtx, - dbName.toStringWithTenantId(), + DatabaseNameUtil::serialize(dbName), nss, cri, cmdObj, ReadPreferenceSetting::get(opCtx), retryPolicy, - {}, - {}); + {} /*query*/, + {} /*collation*/, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); invariant(responses.size() == 1); const auto cmdResponse = uassertStatusOK(std::move(responses.front().swResponse)); diff --git a/src/mongo/s/commands/cluster_coordinate_commit_txn.cpp b/src/mongo/s/commands/cluster_coordinate_commit_txn.cpp index 62abf79ce99a7..1e5925e5b29b7 100644 --- a/src/mongo/s/commands/cluster_coordinate_commit_txn.cpp +++ b/src/mongo/s/commands/cluster_coordinate_commit_txn.cpp @@ -27,9 +27,15 @@ * it in the license file. */ +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/commands/txn_two_phase_commit_cmds_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/s/commands/cluster_count_cmd.h b/src/mongo/s/commands/cluster_count_cmd.h index 1325c89a21eec..cde790171c82a 100644 --- a/src/mongo/s/commands/cluster_count_cmd.h +++ b/src/mongo/s/commands/cluster_count_cmd.h @@ -96,7 +96,7 @@ class ClusterCountCmdBase final : public ErrmsgCommandDeprecated { return {ErrorCodes::Unauthorized, "unauthorized"}; } - return Impl::checkAuthForOperation(opCtx); + return Impl::checkAuthForOperation(opCtx, dbName, cmdObj); } bool errmsgRun(OperationContext* opCtx, @@ -107,9 +107,11 @@ class ClusterCountCmdBase final : public ErrmsgCommandDeprecated { Impl::checkCanRunHere(opCtx); CommandHelpers::handleMarkKillOnClientDisconnect(opCtx); - const NamespaceString nss(parseNs({boost::none, dbname}, cmdObj)); + const NamespaceString nss( + parseNs(DatabaseNameUtil::deserialize(boost::none, dbname), cmdObj)); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid namespace specified '" << nss.ns() << "'", + str::stream() << "Invalid namespace specified '" << nss.toStringForErrorMsg() + << "'", nss.isValid()); std::vector shardResponses; @@ -152,7 +154,9 @@ class ClusterCountCmdBase final : public ErrmsgCommandDeprecated { Shard::RetryPolicy::kIdempotent, countRequest.getQuery(), collation, - true /* eligibleForSampling */); + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/, + true /*eligibleForSampling*/); } catch (const ExceptionFor& ex) { // Rewrite the count command as an aggregation. auto countRequest = CountCommandRequest::parse(IDLParserContext("count"), cmdObj); @@ -227,9 +231,12 @@ class ClusterCountCmdBase final : public ErrmsgCommandDeprecated { return exceptionToStatus(); } - const NamespaceString nss(parseNs(request.getDatabase(), cmdObj)); + const NamespaceString nss = parseNs( + DatabaseNameUtil::deserialize(request.getValidatedTenantId(), request.getDatabase()), + cmdObj); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid namespace specified '" << nss.ns() << "'", + str::stream() << "Invalid namespace specified '" << nss.toStringForErrorMsg() + << "'", nss.isValid()); // If the command has encryptionInformation, rewrite the query as necessary. @@ -260,7 +267,9 @@ class ClusterCountCmdBase final : public ErrmsgCommandDeprecated { ReadPreferenceSetting::get(opCtx), Shard::RetryPolicy::kIdempotent, targetingQuery, - targetingCollation); + targetingCollation, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); } catch (const ExceptionFor& ex) { CountCommandRequest countRequest(NamespaceStringOrUUID(NamespaceString{})); try { diff --git a/src/mongo/s/commands/cluster_count_cmd_s.cpp b/src/mongo/s/commands/cluster_count_cmd_s.cpp index 3bdf689385b08..fcf7d9d061350 100644 --- a/src/mongo/s/commands/cluster_count_cmd_s.cpp +++ b/src/mongo/s/commands/cluster_count_cmd_s.cpp @@ -27,7 +27,18 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/s/commands/cluster_count_cmd.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -42,7 +53,7 @@ struct ClusterCountCmdS { return kApiVersions1; } - static Status checkAuthForOperation(OperationContext*) { + static Status checkAuthForOperation(OperationContext*, const DatabaseName&, const BSONObj&) { // No additional required privileges on a mongos. return Status::OK(); } diff --git a/src/mongo/s/commands/cluster_create_cmd.cpp b/src/mongo/s/commands/cluster_create_cmd.cpp index d147861fff306..45a692f77e1d1 100644 --- a/src/mongo/s/commands/cluster_create_cmd.cpp +++ b/src/mongo/s/commands/cluster_create_cmd.cpp @@ -27,17 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/authorization_checks.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" #include "mongo/db/commands/create_gen.h" -#include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/cluster_ddl.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" namespace mongo { namespace { @@ -76,7 +92,7 @@ class CreateCmd final : public CreateCmdVersion1Gen { CreateCommandReply typedRun(OperationContext* opCtx) final { auto cmd = request(); auto dbName = cmd.getDbName(); - cluster::createDatabase(opCtx, dbName.toStringWithTenantId()); + cluster::createDatabase(opCtx, DatabaseNameUtil::serialize(dbName)); uassert(ErrorCodes::InvalidOptions, "specify size: when capped is true", @@ -87,11 +103,11 @@ class CreateCmd final : public CreateCmdVersion1Gen { // Manually forward the create collection command to the primary shard. const auto dbInfo = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase( - opCtx, dbName.toStringWithTenantId())); + opCtx, DatabaseNameUtil::serializeForCatalog(dbName))); auto response = uassertStatusOK( executeCommandAgainstDatabasePrimary( opCtx, - dbName.db(), + DatabaseNameUtil::serialize(dbName), dbInfo, applyReadWriteConcern( opCtx, diff --git a/src/mongo/s/commands/cluster_create_indexes_cmd.cpp b/src/mongo/s/commands/cluster_create_indexes_cmd.cpp index e07aa1700e061..88511208b552b 100644 --- a/src/mongo/s/commands/cluster_create_indexes_cmd.cpp +++ b/src/mongo/s/commands/cluster_create_indexes_cmd.cpp @@ -28,18 +28,44 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/commands.h" #include "mongo/db/create_indexes_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/query/explain_verbosity_gen.h" +#include "mongo/db/service_context.h" #include "mongo/db/timeseries/timeseries_commands_conversion_helper.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/cluster_ddl.h" #include "mongo/s/collection_routing_info_targeter.h" -#include "mongo/s/grid.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -102,7 +128,7 @@ class CreateIndexesCmd : public BasicCommandWithRequestParser "command"_attr = redact(cmdObj)); // TODO SERVER-67798 Change cluster::createDatabase to use DatabaseName - cluster::createDatabase(opCtx, dbName.toStringWithTenantId()); + cluster::createDatabase(opCtx, DatabaseNameUtil::serialize(dbName)); auto targeter = CollectionRoutingInfoTargeter(opCtx, nss); auto routingInfo = targeter.getRoutingInfo(); @@ -121,8 +147,10 @@ class CreateIndexesCmd : public BasicCommandWithRequestParser applyReadWriteConcern(opCtx, this, cmdToBeSent)), ReadPreferenceSetting(ReadPreference::PrimaryOnly), Shard::RetryPolicy::kNoRetry, - BSONObj() /* query */, - BSONObj() /* collation */); + BSONObj() /*query*/, + BSONObj() /*collation*/, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); std::string errmsg; const bool ok = diff --git a/src/mongo/s/commands/cluster_current_op.cpp b/src/mongo/s/commands/cluster_current_op.cpp index 040c43bfb5da2..2947ba8ee2ff1 100644 --- a/src/mongo/s/commands/cluster_current_op.cpp +++ b/src/mongo/s/commands/cluster_current_op.cpp @@ -27,17 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/commands/current_op_common.h" - -#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" -#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands.h" +#include "mongo/db/commands/current_op_common.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/query/cursor_response.h" #include "mongo/s/query/cluster_aggregate.h" namespace mongo { @@ -51,11 +58,12 @@ class ClusterCurrentOpCommand final : public CurrentOpCommandBase { ClusterCurrentOpCommand() = default; Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const final { - bool isAuthorized = AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::inprog); + bool isAuthorized = + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::inprog); return isAuthorized ? Status::OK() : Status(ErrorCodes::Unauthorized, "Unauthorized"); } @@ -82,7 +90,7 @@ class ClusterCurrentOpCommand final : public CurrentOpCommandBase { ClusterAggregate::Namespaces{nss, nss}, request, {request}, - {Privilege(ResourcePattern::forClusterResource(), ActionType::inprog)}, + {Privilege(ResourcePattern::forClusterResource(nss.tenantId()), ActionType::inprog)}, &responseBuilder); if (!status.isOK()) { diff --git a/src/mongo/s/commands/cluster_data_size_cmd.cpp b/src/mongo/s/commands/cluster_data_size_cmd.cpp index dda5ed02fcfc1..800181aa4a2ea 100644 --- a/src/mongo/s/commands/cluster_data_size_cmd.cpp +++ b/src/mongo/s/commands/cluster_data_size_cmd.cpp @@ -28,14 +28,38 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/dbcommands_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -88,8 +112,10 @@ class DataSizeCmd : public TypedCommand { CommandHelpers::filterCommandRequestForPassthrough(cmd.toBSON({}))), ReadPreferenceSetting::get(opCtx), Shard::RetryPolicy::kIdempotent, - {}, - {}); + {} /*query*/, + {} /*collation*/, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); std::int64_t size = 0; std::int64_t numObjects = 0; diff --git a/src/mongo/s/commands/cluster_db_stats_cmd.cpp b/src/mongo/s/commands/cluster_db_stats_cmd.cpp index cbfa95d019216..42987b64872b8 100644 --- a/src/mongo/s/commands/cluster_db_stats_cmd.cpp +++ b/src/mongo/s/commands/cluster_db_stats_cmd.cpp @@ -27,16 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/dbcommands_gen.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" -#include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { @@ -116,7 +134,7 @@ class CmdDBStats final : public BasicCommandWithRequestParser { const DatabaseName& dbname, const BSONObj&) const final { auto as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname.db()), + if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forDatabaseName(dbname), ActionType::dbStats)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } @@ -133,7 +151,7 @@ class CmdDBStats final : public BasicCommandWithRequestParser { auto shardResponses = scatterGatherUnversionedTargetAllShards( opCtx, - dbName.db(), + DatabaseNameUtil::serialize(dbName), applyReadWriteConcern( opCtx, this, CommandHelpers::filterCommandRequestForPassthrough(cmdObj)), ReadPreferenceSetting::get(opCtx), @@ -143,7 +161,7 @@ class CmdDBStats final : public BasicCommandWithRequestParser { uasserted(ErrorCodes::OperationFailed, errmsg); } - output.append("db", dbName.db()); + output.append("db", DatabaseNameUtil::serialize(dbName)); aggregateResults(cmd.getScale(), shardResponses, output); return true; } diff --git a/src/mongo/s/commands/cluster_delete_test.cpp b/src/mongo/s/commands/cluster_delete_test.cpp index aa8974aa84e6f..6f6d523ac6d76 100644 --- a/src/mongo/s/commands/cluster_delete_test.cpp +++ b/src/mongo/s/commands/cluster_delete_test.cpp @@ -28,9 +28,20 @@ */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/namespace_string.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/commands/cluster_command_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/s/commands/cluster_distinct_cmd.cpp b/src/mongo/s/commands/cluster_distinct_cmd.cpp index 3c9ff302f7a81..26ec49009d590 100644 --- a/src/mongo/s/commands/cluster_distinct_cmd.cpp +++ b/src/mongo/s/commands/cluster_distinct_cmd.cpp @@ -28,23 +28,71 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobj_comparator.h" +#include "mongo/bson/bsonobj_comparator_interface.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/parsed_distinct.h" #include "mongo/db/query/view_response_formatter.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/service_context.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/views/resolved_view.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/commands/cluster_explain.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_aggregate.h" #include "mongo/s/transaction_router.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" #include "mongo/util/decimal_counter.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -111,7 +159,10 @@ class DistinctCmd : public BasicCommand { ExplainOptions::Verbosity verbosity, rpc::ReplyBuilderInterface* result) const override { const BSONObj& cmdObj = opMsgRequest.body; - const NamespaceString nss(parseNs(opMsgRequest.getDatabase(), cmdObj)); + const NamespaceString nss( + parseNs(DatabaseNameUtil::deserialize(opMsgRequest.getValidatedTenantId(), + opMsgRequest.getDatabase()), + cmdObj)); auto parsedDistinctCmd = ParsedDistinct::parse(opCtx, nss, cmdObj, ExtensionsCallbackNoop(), true); @@ -146,7 +197,9 @@ class DistinctCmd : public BasicCommand { ReadPreferenceSetting::get(opCtx), Shard::RetryPolicy::kIdempotent, targetingQuery, - targetingCollation); + targetingCollation, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); } catch (const ExceptionFor& ex) { auto parsedDistinct = ParsedDistinct::parse( opCtx, ex->getNamespace(), cmdObj, ExtensionsCallbackNoop(), true); @@ -234,6 +287,8 @@ class DistinctCmd : public BasicCommand { Shard::RetryPolicy::kIdempotent, query, collation, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/, true /* eligibleForSampling */); } catch (const ExceptionFor& ex) { auto parsedDistinct = ParsedDistinct::parse( @@ -260,7 +315,7 @@ class DistinctCmd : public BasicCommand { } BSONObj aggResult = CommandHelpers::runCommandDirectly( - opCtx, OpMsgRequest::fromDBAndBody(dbName.db(), std::move(resolvedAggCmd))); + opCtx, OpMsgRequestBuilder::create(dbName, std::move(resolvedAggCmd))); ViewResponseFormatter formatter(aggResult); auto formatStatus = formatter.appendAsDistinctResponse(&result, boost::none); diff --git a/src/mongo/s/commands/cluster_distinct_test.cpp b/src/mongo/s/commands/cluster_distinct_test.cpp index 55ec1fd0ab571..c11a64337df5c 100644 --- a/src/mongo/s/commands/cluster_distinct_test.cpp +++ b/src/mongo/s/commands/cluster_distinct_test.cpp @@ -28,9 +28,21 @@ */ -#include "mongo/platform/basic.h" - +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/namespace_string.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/commands/cluster_command_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/s/commands/cluster_drop_collection_cmd.cpp b/src/mongo/s/commands/cluster_drop_collection_cmd.cpp index efbfe0f26ac52..2792078665e54 100644 --- a/src/mongo/s/commands/cluster_drop_collection_cmd.cpp +++ b/src/mongo/s/commands/cluster_drop_collection_cmd.cpp @@ -28,21 +28,42 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/catalog/collection_uuid_mismatch_info.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/drop_gen.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" #include "mongo/s/catalog_cache.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -72,7 +93,8 @@ class DropCmd : public DropCmdVersion1Gen { void doCheckAuthorization(OperationContext* opCtx) const final { auto ns = request().getNamespace(); uassert(ErrorCodes::Unauthorized, - str::stream() << "Not authorized to drop collection '" << ns << "'", + str::stream() << "Not authorized to drop collection '" + << ns.toStringForErrorMsg() << "'", AuthorizationSession::get(opCtx->getClient()) ->isAuthorizedForActionsOnNamespace(ns, ActionType::dropCollection)); } @@ -100,7 +122,7 @@ class DropCmd : public DropCmdVersion1Gen { // Send it to the primary shard ShardsvrDropCollection dropCollectionCommand(nss); - dropCollectionCommand.setDbName(nss.db()); + dropCollectionCommand.setDbName(nss.dbName()); dropCollectionCommand.setCollectionUUID(request().getCollectionUUID()); auto cmdResponse = executeCommandAgainstDatabasePrimary( diff --git a/src/mongo/s/commands/cluster_drop_database_cmd.cpp b/src/mongo/s/commands/cluster_drop_database_cmd.cpp index cbba68c13c361..9ac13353af662 100644 --- a/src/mongo/s/commands/cluster_drop_database_cmd.cpp +++ b/src/mongo/s/commands/cluster_drop_database_cmd.cpp @@ -28,19 +28,33 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/drop_database_gen.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -64,8 +78,8 @@ class DropDatabaseCmd : public DropDatabaseCmdVersion1Gen { } void doCheckAuthorization(OperationContext* opCtx) const final { uassert(ErrorCodes::Unauthorized, - str::stream() << "Not authorized to drop database '" << request().getDbName() - << "'", + str::stream() << "Not authorized to drop database '" + << request().getDbName().toStringForErrorMsg() << "'", AuthorizationSession::get(opCtx->getClient()) ->isAuthorizedForActionsOnNamespace(ns(), ActionType::dropDatabase)); } @@ -85,12 +99,13 @@ class DropDatabaseCmd : public DropDatabaseCmdVersion1Gen { try { const CachedDatabaseInfo dbInfo = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase( - opCtx, dbName.toStringWithTenantId())); + opCtx, DatabaseNameUtil::serializeForCatalog(dbName))); // Invalidate the database metadata so the next access kicks off a full reload, even // if sending the command to the config server fails due to e.g. a NetworkError. ON_BLOCK_EXIT([opCtx, dbName] { - Grid::get(opCtx)->catalogCache()->purgeDatabase(dbName.toStringWithTenantId()); + Grid::get(opCtx)->catalogCache()->purgeDatabase( + DatabaseNameUtil::serializeForCatalog(dbName)); }); // Send it to the primary shard @@ -99,7 +114,7 @@ class DropDatabaseCmd : public DropDatabaseCmdVersion1Gen { auto cmdResponse = executeCommandAgainstDatabasePrimary( opCtx, - dbName.db(), + DatabaseNameUtil::serialize(dbName), dbInfo, CommandHelpers::appendMajorityWriteConcern(dropDatabaseCommand.toBSON({}), opCtx->getWriteConcern()), diff --git a/src/mongo/s/commands/cluster_drop_indexes_cmd.cpp b/src/mongo/s/commands/cluster_drop_indexes_cmd.cpp index ca48f94522569..77701c561f5e9 100644 --- a/src/mongo/s/commands/cluster_drop_indexes_cmd.cpp +++ b/src/mongo/s/commands/cluster_drop_indexes_cmd.cpp @@ -27,13 +27,40 @@ * it in the license file. */ +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/drop_indexes_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/explain_verbosity_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -119,13 +146,14 @@ class DropIndexesCmd : public BasicCommandWithRequestParser { shardsvrDropIndexCmd.setDropIndexesRequest(requestParser.request().getDropIndexesRequest()); // TODO SERVER-67797 Change CatalogCache to use DatabaseName object - const CachedDatabaseInfo dbInfo = uassertStatusOK( - Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbName.toStringWithTenantId())); + const CachedDatabaseInfo dbInfo = + uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase( + opCtx, DatabaseNameUtil::serializeForCatalog(dbName))); // TODO SERVER-67411 change executeCommandAgainstDatabasePrimary to take in DatabaseName auto cmdResponse = executeCommandAgainstDatabasePrimary( opCtx, - dbName.toStringWithTenantId(), + DatabaseNameUtil::serialize(dbName), dbInfo, CommandHelpers::appendMajorityWriteConcern(shardsvrDropIndexCmd.toBSON({}), opCtx->getWriteConcern()), diff --git a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp index ffe1e870fd110..c2b820fc07fbe 100644 --- a/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp +++ b/src/mongo/s/commands/cluster_enable_sharding_cmd.cpp @@ -28,14 +28,35 @@ */ -#include "mongo/db/auth/action_set.h" +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/commands/cluster_commands_gen.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -70,9 +91,10 @@ class EnableShardingCmd final : public TypedCommand { const auto dbName = getDbName(); auto catalogCache = Grid::get(opCtx)->catalogCache(); - ScopeGuard purgeDatabaseOnExit([&] { catalogCache->purgeDatabase(dbName); }); + ScopeGuard purgeDatabaseOnExit( + [&] { catalogCache->purgeDatabase(DatabaseNameUtil::serialize(dbName)); }); - ConfigsvrCreateDatabase configsvrCreateDatabase{dbName.toString()}; + ConfigsvrCreateDatabase configsvrCreateDatabase{DatabaseNameUtil::serialize(dbName)}; configsvrCreateDatabase.setDbName(DatabaseName::kAdmin); configsvrCreateDatabase.setPrimaryShardId(request().getPrimaryShard()); @@ -91,16 +113,19 @@ class EnableShardingCmd final : public TypedCommand { auto createDbResponse = ConfigsvrCreateDatabaseResponse::parse( IDLParserContext("configsvrCreateDatabaseResponse"), response.response); - catalogCache->onStaleDatabaseVersion(dbName, createDbResponse.getDatabaseVersion()); + catalogCache->onStaleDatabaseVersion(DatabaseNameUtil::serialize(dbName), + createDbResponse.getDatabaseVersion()); purgeDatabaseOnExit.dismiss(); } private: - StringData getDbName() const { - return request().getCommandParameter(); + DatabaseName getDbName() const { + const auto& cmd = request(); + return DatabaseNameUtil::deserialize(cmd.getDbName().tenantId(), + cmd.getCommandParameter()); } NamespaceString ns() const override { - return {getDbName(), ""}; + return NamespaceString(getDbName()); } bool supportsWriteConcern() const override { diff --git a/src/mongo/s/commands/cluster_explain.cpp b/src/mongo/s/commands/cluster_explain.cpp index 1fdb00914afb3..c674e78f4257a 100644 --- a/src/mongo/s/commands/cluster_explain.cpp +++ b/src/mongo/s/commands/cluster_explain.cpp @@ -27,16 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/client/connection_string.h" #include "mongo/db/commands.h" #include "mongo/db/query/explain_common.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/idl/command_generic_argument.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/commands/cluster_explain.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/s/commands/cluster_explain.h b/src/mongo/s/commands/cluster_explain.h index f1ff6716de8ce..005ac6494907b 100644 --- a/src/mongo/s/commands/cluster_explain.h +++ b/src/mongo/s/commands/cluster_explain.h @@ -29,8 +29,14 @@ #pragma once +#include #include +#include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/explain_options.h" #include "mongo/s/async_requests_sender.h" diff --git a/src/mongo/s/commands/cluster_explain_cmd.cpp b/src/mongo/s/commands/cluster_explain_cmd.cpp index 8b8f03db4074f..f1cef19c4f6d9 100644 --- a/src/mongo/s/commands/cluster_explain_cmd.cpp +++ b/src/mongo/s/commands/cluster_explain_cmd.cpp @@ -27,13 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/explain_gen.h" -#include "mongo/db/query/explain.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/service_context.h" #include "mongo/idl/command_generic_argument.h" -#include "mongo/s/query/cluster_find.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -170,7 +195,7 @@ std::unique_ptr ClusterExplainCmd::parse(OperationContext* op IDLParserContext(ExplainCommandRequest::kCommandName, APIParameters::get(opCtx).getAPIStrict().value_or(false)), request.body); - std::string dbName = cmdObj.getDbName().toString(); + std::string dbName = DatabaseNameUtil::serialize(cmdObj.getDbName()); ExplainOptions::Verbosity verbosity = cmdObj.getVerbosity(); // This is the nested command which we are explaining. We need to propagate generic // arguments into the inner command since it is what is passed to the virtual diff --git a/src/mongo/s/commands/cluster_filemd5_cmd.cpp b/src/mongo/s/commands/cluster_filemd5_cmd.cpp index 63b5a2a5033ce..5901f65fbbed5 100644 --- a/src/mongo/s/commands/cluster_filemd5_cmd.cpp +++ b/src/mongo/s/commands/cluster_filemd5_cmd.cpp @@ -28,13 +28,46 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/query/collation/collation_spec.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -107,7 +140,9 @@ class FileMD5Cmd : public BasicCommand { ReadPreferenceSetting::get(opCtx), Shard::RetryPolicy::kIdempotent, routingQuery, - CollationSpec::kSimpleSpec); + CollationSpec::kSimpleSpec, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); invariant(shardResults.size() == 1); const auto shardResponse = uassertStatusOK(std::move(shardResults[0].swResponse)); uassertStatusOK(shardResponse.status); @@ -170,7 +205,7 @@ class FileMD5Cmd : public BasicCommand { BSON("files_id" << cmdObj.firstElement() << "n" << numGridFSChunksProcessed)); uassert(16246, - str::stream() << "Shard for database " << nss.db() + str::stream() << "Shard for database " << nss.dbName().toStringForErrorMsg() << " is too old to support GridFS sharded by {files_id:1, n:1}", res.hasField("md5state")); diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp index f994cbd030693..71f7a76281772 100644 --- a/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp +++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.cpp @@ -29,43 +29,89 @@ #include "mongo/s/commands/cluster_find_and_modify_cmd.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_extra_info.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" +#include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/document_validation.h" #include "mongo/db/commands.h" +#include "mongo/db/commands/server_status_metric.h" #include "mongo/db/commands/update_metrics.h" +#include "mongo/db/curop.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/fle_crud.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" #include "mongo/db/ops/write_ops_gen.h" -#include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/resource_yielder.h" +#include "mongo/db/server_options.h" +#include "mongo/db/stats/counters.h" #include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/timeseries/timeseries_update_delete_util.h" #include "mongo/db/transaction/transaction_api.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor_pool.h" -#include "mongo/logv2/log.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/s/balancer_configuration.h" +#include "mongo/rpc/write_concern_error_detail.h" +#include "mongo/s/async_requests_sender.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/cluster_ddl.h" #include "mongo/s/commands/cluster_explain.h" #include "mongo/s/commands/document_shard_key_update_util.h" -#include "mongo/s/commands/strategy.h" #include "mongo/s/grid.h" #include "mongo/s/multi_statement_transaction_requests_sender.h" #include "mongo/s/query_analysis_sampler_util.h" #include "mongo/s/request_types/cluster_commands_without_shard_key_gen.h" #include "mongo/s/session_catalog_router.h" #include "mongo/s/shard_key_pattern_query_util.h" -#include "mongo/s/stale_exception.h" #include "mongo/s/transaction_router.h" #include "mongo/s/transaction_router_resource_yielder.h" +#include "mongo/s/type_collection_common_types_gen.h" #include "mongo/s/would_change_owning_shard_exception.h" #include "mongo/s/write_ops/write_without_shard_key_util.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -130,19 +176,34 @@ boost::optional getLegacyRuntimeConstants(const BSONObj& return boost::none; } -BSONObj getShardKey(OperationContext* opCtx, - const ChunkManager& chunkMgr, - const NamespaceString& nss, - const BSONObj& query, - const BSONObj& collation, - const boost::optional verbosity, - const boost::optional& let, - const boost::optional& runtimeConstants) { - auto expCtx = makeExpressionContextWithDefaultsForTargeter( - opCtx, nss, collation, verbosity, let, runtimeConstants); +namespace { +BSONObj getQueryForShardKey(boost::intrusive_ptr expCtx, + const ChunkManager& cm, + const BSONObj& query) { + if (auto tsFields = cm.getTimeseriesFields(); cm.isSharded() && tsFields) { + // Note: The useTwoPhaseProtocol() uses the shard key extractor to decide whether it should + // use the two phase protocol and the shard key extractor is only based on the equality + // query. But we still should be able to route the query to the correct shard from a range + // query on shard keys (not always) and unfortunately, even an equality query on the time + // field for timeseries collections would be translated into a range query on + // control.min.time and control.max.time. So, with this, we can support targeted + // findAndModify based on the time field. + // + // If the collection is a sharded timeseries collection, rewrite the query into a + // bucket-level query. + return timeseries::getBucketLevelPredicateForRouting( + query, expCtx, tsFields->getTimeseriesOptions(), /* allowArbitraryWrites */ true); + } + + return query; +} +} // namespace - BSONObj shardKey = uassertStatusOK( - extractShardKeyFromBasicQueryWithContext(expCtx, chunkMgr.getShardKeyPattern(), query)); +BSONObj getShardKey(boost::intrusive_ptr expCtx, + const ChunkManager& chunkMgr, + const BSONObj& query) { + BSONObj shardKey = uassertStatusOK(extractShardKeyFromBasicQueryWithContext( + expCtx, chunkMgr.getShardKeyPattern(), getQueryForShardKey(expCtx, chunkMgr, query))); uassert(ErrorCodes::ShardKeyNotFound, "Query for sharded findAndModify must contain the shard key", !shardKey.isEmpty()); @@ -156,11 +217,10 @@ void handleWouldChangeOwningShardErrorNonTransaction( const write_ops::FindAndModifyCommandRequest& request, BSONObjBuilder* result) { auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor( - Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()); + auto& executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto txn = txn_api::SyncTransactionWithRetries( - opCtx, sleepInlineExecutor, nullptr /* resourceYielder */, inlineExecutor); + opCtx, executor, nullptr /* resourceYielder */, inlineExecutor); // Shared state for the transaction API use below. struct SharedBlock { @@ -219,11 +279,18 @@ void updateReplyOnWouldChangeOwningShardSuccess(bool matchedDocOrUpserted, } lastErrorObjBuilder.doneFast(); + // For timeseries collections, the 'postMeasurementImage' is returned back through + // WouldChangeOwningShardInfo from the old shard as well and it should be returned to the user + // instead of the post-image. + auto postImage = [&] { + return changeInfo.getUserPostImage() ? *changeInfo.getUserPostImage() + : changeInfo.getPostImage(); + }(); + if (updatedExistingDocument) { - result->append( - "value", shouldReturnPostImage ? changeInfo.getPostImage() : changeInfo.getPreImage()); + result->append("value", shouldReturnPostImage ? postImage : changeInfo.getPreImage()); } else if (upserted && shouldReturnPostImage) { - result->append("value", changeInfo.getPostImage()); + result->append("value", postImage); } else { result->appendNull("value"); } @@ -257,11 +324,10 @@ void handleWouldChangeOwningShardErrorTransaction( try { auto& executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); auto txn = txn_api::SyncTransactionWithRetries( opCtx, - sleepInlineExecutor, + executor, TransactionRouterResourceYielder::makeForLocalHandoff(), inlineExecutor); @@ -399,7 +465,7 @@ Status FindAndModifyCmd::checkAuthForOperation(OperationContext* opCtx, } auto nss = CommandHelpers::parseNsFromCommand(dbName, cmdObj); - ResourcePattern resource(CommandHelpers::resourcePatternForNamespace(nss.ns())); + ResourcePattern resource(CommandHelpers::resourcePatternForNamespace(nss)); uassert(17137, "Invalid target namespace " + resource.toString(), resource.isExactNamespacePattern()); @@ -412,11 +478,119 @@ Status FindAndModifyCmd::checkAuthForOperation(OperationContext* opCtx, return Status::OK(); } +namespace { +/** + * Replaces the target namespace in the 'cmdObj' by 'bucketNss'. Also sets the + * 'isTimeseriesNamespace' flag. + */ +BSONObj replaceNamespaceByBucketNss(const BSONObj& cmdObj, const NamespaceString& bucketNss) { + BSONObjBuilder bob; + for (const auto& elem : cmdObj) { + const auto name = elem.fieldNameStringData(); + if (name == write_ops::FindAndModifyCommandRequest::kCommandName) { + bob.append(write_ops::FindAndModifyCommandRequest::kCommandName, bucketNss.coll()); + } else { + bob.append(elem); + } + } + bob.append(write_ops::FindAndModifyCommandRequest::kIsTimeseriesNamespaceFieldName, true); + + return bob.obj(); +} + +/** + * Returns CollectionRoutingInfo for 'maybeTsNss' namespace. If 'maybeTsNss' is a timeseries + * collection, returns CollectionRoutingInfo for the corresponding timeseries buckets collection. + */ +CollectionRoutingInfo getCollectionRoutingInfo(OperationContext* opCtx, + const BSONObj& cmdObj, + const NamespaceString& maybeTsNss) { + // Apparently, we should return the CollectionRoutingInfo for the original namespace if we're + // not writing to a timeseries collection. + auto cri = uassertStatusOK(getCollectionRoutingInfoForTxnCmd(opCtx, maybeTsNss)); + + // Note: We try to get CollectionRoutingInfo for the timeseries buckets collection only when the + // timeseries deletes or updates feature flag is enabled. + const bool arbitraryTimeseriesWritesEnabled = + feature_flags::gTimeseriesDeletesSupport.isEnabled( + serverGlobalParams.featureCompatibility) || + feature_flags::gTimeseriesUpdatesSupport.isEnabled(serverGlobalParams.featureCompatibility); + if (!arbitraryTimeseriesWritesEnabled || cri.cm.isSharded() || + maybeTsNss.isTimeseriesBucketsCollection()) { + return cri; + } + + // If the 'maybeTsNss' namespace is not a timeseries buckets collection and not sharded, try + // to get the CollectionRoutingInfo for the corresponding timeseries buckets collection to + // see if it's sharded and it really is a timeseries buckets collection. We should do this to + // figure out whether we need to use the two phase write protocol or not on timeseries buckets + // collections. + auto bucketCollNss = maybeTsNss.makeTimeseriesBucketsNamespace(); + auto bucketCollCri = uassertStatusOK(getCollectionRoutingInfoForTxnCmd(opCtx, bucketCollNss)); + if (!bucketCollCri.cm.isSharded() || !bucketCollCri.cm.getTimeseriesFields()) { + return cri; + } + + uassert(ErrorCodes::InvalidOptions, + "Cannot perform findAndModify with sort on a sharded timeseries collection", + !cmdObj.hasField("sort")); + + return bucketCollCri; +} + +boost::intrusive_ptr makeExpCtx( + OperationContext* opCtx, + const NamespaceString& nss, + const BSONObj& collation, + const boost::optional verbosity, + const boost::optional& let, + const boost::optional& runtimeConstants) { + return makeExpressionContextWithDefaultsForTargeter( + opCtx, nss, collation, verbosity, let, runtimeConstants); +} + +/** + * Returns the shard id if the 'query' can be targeted to a single shard. Otherwise, returns + * boost::none. + */ +boost::optional targetPotentiallySingleShard( + boost::intrusive_ptr expCtx, + const ChunkManager& cm, + const BSONObj& query, + const BSONObj& collation) { + // Special case: there's only one shard owning all the chunks. + if (cm.getNShardsOwningChunks() == 1) { + std::set shardIds; + cm.getAllShardIds(&shardIds); + return *shardIds.begin(); + } + + std::set shardIds; + getShardIdsForQuery(expCtx, getQueryForShardKey(expCtx, cm, query), collation, cm, &shardIds); + + if (shardIds.size() == 1) { + // If we can find a single shard to target, we can skip the two phase write protocol. + return *shardIds.begin(); + } + + return boost::none; +} + +BSONObj makeExplainCmd(OperationContext* opCtx, + const BSONObj& cmdObj, + ExplainOptions::Verbosity verbosity) { + return ClusterExplain::wrapAsExplain(appendLegacyRuntimeConstantsToCommandObject(opCtx, cmdObj), + verbosity); +} +} // namespace + Status FindAndModifyCmd::explain(OperationContext* opCtx, const OpMsgRequest& request, ExplainOptions::Verbosity verbosity, rpc::ReplyBuilderInterface* result) const { - const DatabaseName dbName(request.getValidatedTenantId(), request.getDatabase()); + const DatabaseName dbName = + DatabaseNameUtil::deserialize(request.getValidatedTenantId(), request.getDatabase()); + auto bodyBuilder = result->getBodyBuilder(); const BSONObj& cmdObj = [&]() { // Check whether the query portion needs to be rewritten for FLE. auto findAndModifyRequest = write_ops::FindAndModifyCommandRequest::parse( @@ -430,66 +604,82 @@ Status FindAndModifyCmd::explain(OperationContext* opCtx, return request.body; } }(); - const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj)); + NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj)); - const auto cri = - uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss)); + const auto cri = getCollectionRoutingInfo(opCtx, cmdObj, nss); const auto& cm = cri.cm; - - std::shared_ptr shard; + auto isShardedTimeseries = cm.isSharded() && cm.getTimeseriesFields(); + if (isShardedTimeseries) { + nss = std::move(cm.getNss()); + } + // Note: at this point, 'nss' should be the timeseries buckets collection namespace if we're + // writing to a sharded timeseries collection. + + boost::optional shardId; + const BSONObj query = cmdObj.getObjectField("query"); + const BSONObj collation = getCollation(cmdObj); + const auto isUpsert = cmdObj.getBoolField("upsert"); + const auto let = getLet(cmdObj); + const auto rc = getLegacyRuntimeConstants(cmdObj); if (cm.isSharded()) { - const BSONObj query = cmdObj.getObjectField("query"); - const BSONObj collation = getCollation(cmdObj); - const auto let = getLet(cmdObj); - const auto rc = getLegacyRuntimeConstants(cmdObj); - const BSONObj shardKey = getShardKey(opCtx, cm, nss, query, collation, verbosity, let, rc); - const auto chunk = cm.findIntersectingChunk(shardKey, collation); - - shard = - uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, chunk.getShardId())); + auto expCtx = makeExpCtx(opCtx, nss, collation, boost::none, let, rc); + if (write_without_shard_key::useTwoPhaseProtocol( + opCtx, nss, false /* isUpdateOrDelete */, isUpsert, query, collation, let, rc)) { + shardId = targetPotentiallySingleShard(expCtx, cm, query, collation); + } else { + const BSONObj shardKey = getShardKey(expCtx, cm, query); + const auto chunk = cm.findIntersectingChunk(shardKey, collation); + shardId = chunk.getShardId(); + } } else { - shard = uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, cm.dbPrimary())); + shardId = cm.dbPrimary(); } - const auto explainCmd = ClusterExplain::wrapAsExplain( - appendLegacyRuntimeConstantsToCommandObject(opCtx, cmdObj), verbosity); - // Time how long it takes to run the explain command on the shard. Timer timer; BSONObjBuilder bob; - - if (cm.isSharded()) { - _runCommand(opCtx, - shard->getId(), - cri.getShardVersion(shard->getId()), - boost::none, - nss, - applyReadWriteConcern(opCtx, false, false, explainCmd), - true /* isExplain */, - boost::none /* allowShardKeyUpdatesWithoutFullShardKeyInQuery */, - &bob); - } else { - _runCommand(opCtx, - shard->getId(), - boost::make_optional(!cm.dbVersion().isFixed(), ShardVersion::UNSHARDED()), - cm.dbVersion(), - nss, - applyReadWriteConcern(opCtx, false, false, explainCmd), - true /* isExplain */, - boost::none /* allowShardKeyUpdatesWithoutFullShardKeyInQuery */, - &bob); + if (!shardId) { + // The two phase write protocol requires that the target namespace in the command is sharded + // and we shard the underlying timeseries buckets collection. So, if we're writing to a + // sharded timeseries collection, replace the target namespace in the command by 'nss' which + // is the buckets namespace. + _runExplainWithoutShardKey( + opCtx, + nss, + makeExplainCmd(opCtx, + isShardedTimeseries ? replaceNamespaceByBucketNss(cmdObj, nss) : cmdObj, + verbosity), + verbosity, + &bob); + bodyBuilder.appendElementsUnique(bob.obj()); + return Status::OK(); } + auto shardVersion = cm.isSharded() + ? boost::make_optional(cri.getShardVersion(*shardId)) + : boost::make_optional(!cm.dbVersion().isFixed(), ShardVersion::UNSHARDED()); + + _runCommand( + opCtx, + *shardId, + shardVersion, + cm.dbVersion(), + nss, + applyReadWriteConcern(opCtx, false, false, makeExplainCmd(opCtx, cmdObj, verbosity)), + true /* isExplain */, + boost::none /* allowShardKeyUpdatesWithoutFullShardKeyInQuery */, + &bob); + const auto millisElapsed = timer.millis(); executor::RemoteCommandResponse response(bob.obj(), Milliseconds(millisElapsed)); // We fetch an arbitrary host from the ConnectionString, since // ClusterExplain::buildExplainResult() doesn't use the given HostAndPort. + auto shard = uassertStatusOK(Grid::get(opCtx)->shardRegistry()->getShard(opCtx, *shardId)); AsyncRequestsSender::Response arsResponse{ - shard->getId(), response, shard->getConnString().getServers().front()}; + *shardId, response, shard->getConnString().getServers().front()}; - auto bodyBuilder = result->getBodyBuilder(); return ClusterExplain::buildExplainResult( opCtx, {arsResponse}, ClusterExplain::kSingleShard, millisElapsed, cmdObj, &bodyBuilder); } @@ -498,7 +688,7 @@ bool FindAndModifyCmd::run(OperationContext* opCtx, const DatabaseName& dbName, const BSONObj& cmdObj, BSONObjBuilder& result) { - const NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj)); + NamespaceString nss(CommandHelpers::parseNsCollectionRequired(dbName, cmdObj)); if (processFLEFindAndModify(opCtx, cmdObj, result) == FLEBatchResult::kProcessed) { return true; @@ -511,55 +701,71 @@ bool FindAndModifyCmd::run(OperationContext* opCtx, // would require that the parsing be pulled into this function. cluster::createDatabase(opCtx, nss.db()); + auto cri = getCollectionRoutingInfo(opCtx, cmdObj, nss); + const auto& cm = cri.cm; + auto isShardedTimeseries = cm.isSharded() && cm.getTimeseriesFields(); + if (isShardedTimeseries) { + nss = std::move(cm.getNss()); + } + // Note: at this point, 'nss' should be the timeseries buckets collection namespace if we're + // writing to a sharded timeseries collection. + // Append mongoS' runtime constants to the command object before forwarding it to the shard. auto cmdObjForShard = appendLegacyRuntimeConstantsToCommandObject(opCtx, cmdObj); - - const auto cri = uassertStatusOK(getCollectionRoutingInfoForTxnCmd(opCtx, nss)); - const auto& cm = cri.cm; if (cm.isSharded()) { - const BSONObj query = cmdObjForShard.getObjectField("query"); + BSONObj query = cmdObjForShard.getObjectField("query"); const bool isUpsert = cmdObjForShard.getBoolField("upsert"); const BSONObj collation = getCollation(cmdObjForShard); - if (write_without_shard_key::useTwoPhaseProtocol( - opCtx, nss, false /* isUpdateOrDelete */, isUpsert, query, collation)) { + const auto letParams = getLet(cmdObjForShard); + const auto runtimeConstants = getLegacyRuntimeConstants(cmdObjForShard); + auto expCtx = makeExpCtx(opCtx, nss, collation, boost::none, letParams, runtimeConstants); + + if (write_without_shard_key::useTwoPhaseProtocol(opCtx, + nss, + false /* isUpdateOrDelete */, + isUpsert, + query, + collation, + letParams, + runtimeConstants)) { + findAndModifyNonTargetedShardedCount.increment(1); auto allowShardKeyUpdatesWithoutFullShardKeyInQuery = opCtx->isRetryableWrite() || opCtx->inMultiDocumentTransaction(); - if (cm.getNShardsOwningChunks() == 1) { - std::set allShardsContainingChunksForNs; - cm.getAllShardIds(&allShardsContainingChunksForNs); - auto shardId = *allShardsContainingChunksForNs.begin(); - - // If we can find a single shard to target, we can skip the two - // phase write protocol. + if (auto shardId = targetPotentiallySingleShard(expCtx, cm, query, collation)) { + // If we can find a single shard to target, we can skip the two phase write + // protocol. _runCommand(opCtx, - shardId, - cri.getShardVersion(shardId), + *shardId, + cri.getShardVersion(*shardId), boost::none, nss, applyReadWriteConcern(opCtx, this, cmdObjForShard), false /* isExplain */, allowShardKeyUpdatesWithoutFullShardKeyInQuery, &result); - } else { - _runCommandWithoutShardKey(opCtx, - nss, - applyReadWriteConcern(opCtx, this, cmdObjForShard), - false /* isExplain */, - allowShardKeyUpdatesWithoutFullShardKeyInQuery, - &result); + // The two phase write protocol requires that the target namespace in the command is + // sharded and we shard the underlying timeseries buckets collection. So, if we're + // writing to a sharded timeseries collection, replace the target namespace in the + // command by 'nss' which is the buckets namespace. + if (isShardedTimeseries) { + cmdObjForShard = replaceNamespaceByBucketNss(cmdObjForShard, nss); + } + + _runCommandWithoutShardKey( + opCtx, nss, applyReadWriteConcern(opCtx, this, cmdObjForShard), &result); } } else { - const auto let = getLet(cmdObjForShard); - const auto rc = getLegacyRuntimeConstants(cmdObjForShard); - const BSONObj shardKey = - getShardKey(opCtx, cm, nss, query, collation, boost::none, let, rc); + findAndModifyTargetedShardedCount.increment(1); + + const BSONObj shardKey = getShardKey(expCtx, cm, query); // For now, set bypassIsFieldHashedCheck to be true in order to skip the // isFieldHashedCheck in the special case where _id is hashed and used as the shard // key. This means that we always assume that a findAndModify request using _id is // targetable to a single shard. auto chunk = cm.findIntersectingChunk(shardKey, collation, true); + _runCommand(opCtx, chunk.getShardId(), cri.getShardVersion(chunk.getShardId()), @@ -571,6 +777,7 @@ bool FindAndModifyCmd::run(OperationContext* opCtx, &result); } } else { + findAndModifyUnshardedCount.increment(1); _runCommand(opCtx, cm.dbPrimary(), boost::make_optional(!cm.dbVersion().isFixed(), ShardVersion::UNSHARDED()), @@ -654,19 +861,18 @@ void FindAndModifyCmd::_constructResult(OperationContext* opCtx, } // Two-phase protocol to run a findAndModify command without a shard key or _id. -void FindAndModifyCmd::_runCommandWithoutShardKey( - OperationContext* opCtx, - const NamespaceString& nss, - const BSONObj& cmdObj, - bool isExplain, - boost::optional allowShardKeyUpdatesWithoutFullShardKeyInQuery, - BSONObjBuilder* result) { +void FindAndModifyCmd::_runCommandWithoutShardKey(OperationContext* opCtx, + const NamespaceString& nss, + const BSONObj& cmdObj, + BSONObjBuilder* result) { + auto allowShardKeyUpdatesWithoutFullShardKeyInQuery = + opCtx->isRetryableWrite() || opCtx->inMultiDocumentTransaction(); auto cmdObjForPassthrough = prepareCmdObjForPassthrough(opCtx, cmdObj, nss, - isExplain, + false /* isExplain */, boost::none /* dbVersion */, boost::none /* shardVersion */, allowShardKeyUpdatesWithoutFullShardKeyInQuery); @@ -708,6 +914,50 @@ void FindAndModifyCmd::_runCommandWithoutShardKey( result); } +// Two-phase protocol to run an explain for a findAndModify command without a shard key or _id. +void FindAndModifyCmd::_runExplainWithoutShardKey(OperationContext* opCtx, + const NamespaceString& nss, + const BSONObj& cmdObj, + ExplainOptions::Verbosity verbosity, + BSONObjBuilder* result) { + auto cmdObjForPassthrough = prepareCmdObjForPassthrough( + opCtx, + cmdObj, + nss, + true /* isExplain */, + boost::none /* dbVersion */, + boost::none /* shardVersion */, + boost::none /* allowShardKeyUpdatesWithoutFullShardKeyInQuery */); + + // Explain currently cannot be run within a transaction, so each command is instead run + // separately outside of a transaction, and we compose the results at the end. + auto clusterQueryWithoutShardKeyExplainRes = [&] { + ClusterQueryWithoutShardKey clusterQueryWithoutShardKeyCommand(cmdObjForPassthrough); + const auto explainClusterQueryWithoutShardKeyCmd = + ClusterExplain::wrapAsExplain(clusterQueryWithoutShardKeyCommand.toBSON({}), verbosity); + auto opMsg = OpMsgRequest::fromDBAndBody(nss.db(), explainClusterQueryWithoutShardKeyCmd); + return CommandHelpers::runCommandDirectly(opCtx, opMsg).getOwned(); + }(); + + auto clusterWriteWithoutShardKeyExplainRes = [&] { + // Since 'explain' does not return the results of the query, we do not have an _id + // document to target by from the 'Read Phase'. We instead will use a dummy _id + // target document for the 'Write Phase'. + ClusterWriteWithoutShardKey clusterWriteWithoutShardKeyCommand( + cmdObjForPassthrough, + clusterQueryWithoutShardKeyExplainRes.getStringField("targetShardId").toString(), + write_without_shard_key::targetDocForExplain); + const auto explainClusterWriteWithoutShardKeyCmd = + ClusterExplain::wrapAsExplain(clusterWriteWithoutShardKeyCommand.toBSON({}), verbosity); + auto opMsg = OpMsgRequest::fromDBAndBody(nss.db(), explainClusterWriteWithoutShardKeyCmd); + return CommandHelpers::runCommandDirectly(opCtx, opMsg).getOwned(); + }(); + + auto output = write_without_shard_key::generateExplainResponseForTwoPhaseWriteProtocol( + clusterQueryWithoutShardKeyExplainRes, clusterWriteWithoutShardKeyExplainRes); + result->appendElementsUnique(output); +} + // Command invocation to be used if a shard key is specified or the collection is unsharded. void FindAndModifyCmd::_runCommand( OperationContext* opCtx, @@ -787,15 +1037,14 @@ void FindAndModifyCmd::_handleWouldChangeOwningShardErrorRetryableWriteLegacy( false /* isUpdateOrDelete */, cmdObj.getBoolField("upsert"), cmdObj.getObjectField("query"), - getCollation(cmdObj))) { - _runCommandWithoutShardKey(opCtx, - nss, - stripWriteConcern(cmdObj), - false /* isExplain */, - true /* allowShardKeyUpdatesWithoutFullShardKeyInQuery */, - result); + getCollation(cmdObj), + getLet(cmdObj), + getLegacyRuntimeConstants(cmdObj))) { + findAndModifyNonTargetedShardedCount.increment(1); + _runCommandWithoutShardKey(opCtx, nss, stripWriteConcern(cmdObj), result); } else { + findAndModifyTargetedShardedCount.increment(1); _runCommand(opCtx, shardId, shardVersion, diff --git a/src/mongo/s/commands/cluster_find_and_modify_cmd.h b/src/mongo/s/commands/cluster_find_and_modify_cmd.h index ca0ec63fff31d..bb1e0ad524e32 100644 --- a/src/mongo/s/commands/cluster_find_and_modify_cmd.h +++ b/src/mongo/s/commands/cluster_find_and_modify_cmd.h @@ -29,7 +29,17 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/auth/action_set.h" #include "mongo/db/auth/action_type.h" @@ -38,15 +48,25 @@ #include "mongo/db/catalog/document_validation.h" #include "mongo/db/commands.h" #include "mongo/db/commands/update_metrics.h" +#include "mongo/db/database_name.h" #include "mongo/db/fle_crud.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/db/storage/duplicate_key_error_info.h" #include "mongo/db/transaction/transaction_api.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" #include "mongo/s/balancer_configuration.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/client/shard_registry.h" @@ -55,12 +75,14 @@ #include "mongo/s/commands/cluster_explain.h" #include "mongo/s/commands/document_shard_key_update_util.h" #include "mongo/s/commands/strategy.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" #include "mongo/s/multi_statement_transaction_requests_sender.h" #include "mongo/s/query_analysis_sampler_util.h" #include "mongo/s/request_types/cluster_commands_without_shard_key_gen.h" #include "mongo/s/session_catalog_router.h" #include "mongo/s/shard_key_pattern_query_util.h" +#include "mongo/s/shard_version.h" #include "mongo/s/stale_exception.h" #include "mongo/s/transaction_router.h" #include "mongo/s/transaction_router_resource_yielder.h" @@ -156,13 +178,17 @@ class FindAndModifyCmd : public BasicCommand { BSONObjBuilder* result); // Two-phase protocol to run a findAndModify command without a shard key or _id. - static void _runCommandWithoutShardKey( - OperationContext* opCtx, - const NamespaceString& nss, - const BSONObj& cmdObj, - bool isExplain, - boost::optional allowShardKeyUpdatesWithoutFullShardKeyInQuery, - BSONObjBuilder* result); + static void _runCommandWithoutShardKey(OperationContext* opCtx, + const NamespaceString& nss, + const BSONObj& cmdObj, + BSONObjBuilder* result); + + // Two-phase protocol to run an explain for a findAndModify command without a shard key or _id. + static void _runExplainWithoutShardKey(OperationContext* opCtx, + const NamespaceString& nss, + const BSONObj& cmdObj, + ExplainOptions::Verbosity verbosity, + BSONObjBuilder* result); // Command invocation to be used if a shard key is specified or the collection is unsharded. static void _runCommand(OperationContext* opCtx, diff --git a/src/mongo/s/commands/cluster_find_and_modify_test.cpp b/src/mongo/s/commands/cluster_find_and_modify_test.cpp index be0d97ddb4954..71d92d6c46c75 100644 --- a/src/mongo/s/commands/cluster_find_and_modify_test.cpp +++ b/src/mongo/s/commands/cluster_find_and_modify_test.cpp @@ -28,9 +28,20 @@ */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/namespace_string.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/commands/cluster_command_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/s/commands/cluster_find_cmd.h b/src/mongo/s/commands/cluster_find_cmd.h index e8ed843575af4..a5673d31ad810 100644 --- a/src/mongo/s/commands/cluster_find_cmd.h +++ b/src/mongo/s/commands/cluster_find_cmd.h @@ -38,7 +38,9 @@ #include "mongo/db/fle_crud.h" #include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/query/cursor_response.h" -#include "mongo/db/query/telemetry.h" +#include "mongo/db/query/query_shape.h" +#include "mongo/db/query/query_stats.h" +#include "mongo/db/query/query_stats_find_key_generator.h" #include "mongo/db/stats/counters.h" #include "mongo/db/views/resolved_view.h" #include "mongo/rpc/get_status_from_command_result.h" @@ -105,7 +107,8 @@ class ClusterFindCmdBase final : public Command { Invocation(const ClusterFindCmdBase* definition, const OpMsgRequest& request) : CommandInvocation(definition), _request(request), - _dbName(request.getValidatedTenantId(), request.getDatabase()) {} + _dbName(DatabaseNameUtil::deserialize(request.getValidatedTenantId(), + request.getDatabase())) {} private: bool supportsWriteConcern() const override { @@ -151,17 +154,19 @@ class ClusterFindCmdBase final : public Command { Timer timer; const auto cri = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo( - opCtx, *findCommand->getNamespaceOrUUID().nss())); + opCtx, findCommand->getNamespaceOrUUID().nss())); shardResponses = scatterGatherVersionedTargetByRoutingTable( opCtx, - findCommand->getNamespaceOrUUID().nss()->db(), - *findCommand->getNamespaceOrUUID().nss(), + findCommand->getNamespaceOrUUID().nss().db(), + findCommand->getNamespaceOrUUID().nss(), cri, explainCmd, ReadPreferenceSetting::get(opCtx), Shard::RetryPolicy::kIdempotent, findCommand->getFilter(), - findCommand->getCollation()); + findCommand->getCollation(), + findCommand->getLet(), + findCommand->getLegacyRuntimeConstants()); millisElapsed = timer.millis(); const char* mongosStageName = @@ -182,7 +187,7 @@ class ClusterFindCmdBase final : public Command { auto aggCmdOnView = uassertStatusOK(query_request_helper::asAggregationCommand(*findCommand)); auto viewAggregationCommand = - OpMsgRequest::fromDBAndBody(_dbName.db(), aggCmdOnView).body; + OpMsgRequestBuilder::create(_dbName, aggCmdOnView).body; auto aggRequestOnView = aggregation_request_helper::parseFromBSON( opCtx, @@ -209,21 +214,25 @@ class ClusterFindCmdBase final : public Command { Impl::checkCanRunHere(opCtx); - auto findCommand = _parseCmdObjectToFindCommandRequest(opCtx, ns(), _request.body); - - const boost::intrusive_ptr expCtx; - auto cq = uassertStatusOK( - CanonicalQuery::canonicalize(opCtx, - std::move(findCommand), - false, /* isExplain */ - expCtx, - ExtensionsCallbackNoop(), - MatchExpressionParser::kAllowAllSpecialFeatures)); + auto&& parsedFindResult = uassertStatusOK(parsed_find_command::parse( + opCtx, + _parseCmdObjectToFindCommandRequest(opCtx, ns(), _request.body), + ExtensionsCallbackNoop(), + MatchExpressionParser::kAllowAllSpecialFeatures)); + auto& expCtx = parsedFindResult.first; + auto& parsedFind = parsedFindResult.second; if (!_didDoFLERewrite) { - telemetry::registerFindRequest( - cq->getFindCommandRequest(), cq->nss(), opCtx, cq->getExpCtx()); + BSONObj queryShape = query_shape::extractQueryShape( + *parsedFind, + SerializationOptions::kRepresentativeQueryShapeSerializeOptions, + expCtx); + query_stats::registerRequest(opCtx, expCtx->ns, [&]() { + return std::make_unique( + expCtx, *parsedFind, std::move(queryShape)); + }); } + auto cq = uassertStatusOK(CanonicalQuery::canonicalize(expCtx, std::move(parsedFind))); try { // Do the work to generate the first batch of results. This blocks waiting to get @@ -252,7 +261,7 @@ class ClusterFindCmdBase final : public Command { auto aggCmdOnView = uassertStatusOK( query_request_helper::asAggregationCommand(cq->getFindCommandRequest())); auto viewAggregationCommand = - OpMsgRequest::fromDBAndBody(_dbName.db(), aggCmdOnView).body; + OpMsgRequestBuilder::create(_dbName, aggCmdOnView).body; auto aggRequestOnView = aggregation_request_helper::parseFromBSON( opCtx, @@ -299,11 +308,11 @@ class ClusterFindCmdBase final : public Command { !findCommand->getLegacyRuntimeConstants()); if (shouldDoFLERewrite(findCommand)) { - invariant(findCommand->getNamespaceOrUUID().nss()); + invariant(findCommand->getNamespaceOrUUID().isNamespaceString()); if (!findCommand->getEncryptionInformation()->getCrudProcessed().value_or(false)) { processFLEFindS( - opCtx, findCommand->getNamespaceOrUUID().nss().get(), findCommand.get()); + opCtx, findCommand->getNamespaceOrUUID().nss(), findCommand.get()); _didDoFLERewrite = true; } diff --git a/src/mongo/s/commands/cluster_find_cmd_s.cpp b/src/mongo/s/commands/cluster_find_cmd_s.cpp index a46cedaf47ea7..cde53f6e75717 100644 --- a/src/mongo/s/commands/cluster_find_cmd_s.cpp +++ b/src/mongo/s/commands/cluster_find_cmd_s.cpp @@ -27,7 +27,22 @@ * it in the license file. */ +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/auth/authorization_checks.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/query/parsed_find_command.h" #include "mongo/s/commands/cluster_find_cmd.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { namespace { diff --git a/src/mongo/s/commands/cluster_find_test.cpp b/src/mongo/s/commands/cluster_find_test.cpp index 8d0dc6792d464..f96484766b519 100644 --- a/src/mongo/s/commands/cluster_find_test.cpp +++ b/src/mongo/s/commands/cluster_find_test.cpp @@ -27,10 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/commands/cluster_command_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/s/commands/cluster_fle2_cleanup_cmd.cpp b/src/mongo/s/commands/cluster_fle2_cleanup_cmd.cpp new file mode 100644 index 0000000000000..d202ff8708964 --- /dev/null +++ b/src/mongo/s/commands/cluster_fle2_cleanup_cmd.cpp @@ -0,0 +1,147 @@ +/** + * Copyright (C) 2022-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands.h" +#include "mongo/db/commands/fle2_cleanup_gen.h" +#include "mongo/db/curop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" + +namespace mongo { +namespace { + +class ClusterCleanupStructuredEncryptionDataCmd final + : public TypedCommand { +public: + using Request = CleanupStructuredEncryptionData; + using Reply = CleanupStructuredEncryptionData::Reply; + + class Invocation final : public InvocationBase { + public: + using InvocationBase::InvocationBase; + + Reply typedRun(OperationContext* opCtx); + + private: + bool supportsWriteConcern() const final { + return false; + } + + void doCheckAuthorization(OperationContext* opCtx) const final { + auto* as = AuthorizationSession::get(opCtx->getClient()); + uassert(ErrorCodes::Unauthorized, + "Not authorized to cleanup structured encryption data", + as->isAuthorizedForActionsOnResource( + ResourcePattern::forExactNamespace(request().getNamespace()), + ActionType::cleanupStructuredEncryptionData)); + } + + NamespaceString ns() const final { + return request().getNamespace(); + } + }; + + AllowedOnSecondary secondaryAllowed(ServiceContext*) const final { + return BasicCommand::AllowedOnSecondary::kNever; + } + + bool adminOnly() const final { + return false; + } + + std::set sensitiveFieldNames() const final { + return {CleanupStructuredEncryptionData::kCleanupTokensFieldName}; + } +} clusterCleanupStructuredEncryptionDataCmd; + +using Cmd = ClusterCleanupStructuredEncryptionDataCmd; +Cmd::Reply Cmd::Invocation::typedRun(OperationContext* opCtx) { + CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation = true; + + auto nss = request().getNamespace(); + const auto dbInfo = + uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, nss.db())); + + // Rewrite command verb to _shardSvrCleanupStructuredEnccryptionData. + auto cmd = request().toBSON({}); + BSONObjBuilder req; + for (const auto& elem : cmd) { + if (elem.fieldNameStringData() == Request::kCommandName) { + req.appendAs(elem, "_shardsvrCleanupStructuredEncryptionData"); + } else { + req.append(elem); + } + } + + auto response = uassertStatusOK( + executeCommandAgainstDatabasePrimary( + opCtx, + nss.db(), + dbInfo, + CommandHelpers::appendMajorityWriteConcern(req.obj(), opCtx->getWriteConcern()), + ReadPreferenceSetting(ReadPreference::PrimaryOnly), + Shard::RetryPolicy::kIdempotent) + .swResponse); + + BSONObjBuilder result; + CommandHelpers::filterCommandReplyForPassthrough(response.data, &result); + + auto reply = result.obj(); + uassertStatusOK(getStatusFromCommandResult(reply)); + return Reply::parse(IDLParserContext{Request::kCommandName}, reply.removeField("ok"_sd)); +} + +} // namespace +} // namespace mongo diff --git a/src/mongo/s/commands/cluster_fle2_compact_cmd.cpp b/src/mongo/s/commands/cluster_fle2_compact_cmd.cpp index 9fba98c5d5614..b9e9d182cda72 100644 --- a/src/mongo/s/commands/cluster_fle2_compact_cmd.cpp +++ b/src/mongo/s/commands/cluster_fle2_compact_cmd.cpp @@ -27,13 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/fle2_compact_gen.h" +#include "mongo/db/curop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/s/commands/cluster_fle2_get_count_info_cmd.cpp b/src/mongo/s/commands/cluster_fle2_get_count_info_cmd.cpp index 91b6aa8cbfc8c..5fa33f2627692 100644 --- a/src/mongo/s/commands/cluster_fle2_get_count_info_cmd.cpp +++ b/src/mongo/s/commands/cluster_fle2_get_count_info_cmd.cpp @@ -27,13 +27,39 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/fle2_get_count_info_command_gen.h" +#include "mongo/db/curop.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/read_concern_support_result.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -86,8 +112,9 @@ class ClusterGetQueryableEncryptionCountInfoCmd final auto* as = AuthorizationSession::get(opCtx->getClient()); uassert(ErrorCodes::Unauthorized, "Not authorized to read tags", - as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } NamespaceString ns() const final { diff --git a/src/mongo/s/commands/cluster_fsync_cmd.cpp b/src/mongo/s/commands/cluster_fsync_cmd.cpp index b79193f2b65e5..c299faa72ed06 100644 --- a/src/mongo/s/commands/cluster_fsync_cmd.cpp +++ b/src/mongo/s/commands/cluster_fsync_cmd.cpp @@ -27,15 +27,32 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/read_preference.h" -#include "mongo/client/remote_command_targeter.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -61,11 +78,11 @@ class FsyncCommand : public ErrmsgCommandDeprecated { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::fsync)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::fsync)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/s/commands/cluster_ftdc_commands.cpp b/src/mongo/s/commands/cluster_ftdc_commands.cpp index 31fe274fb5ec3..611df112da3aa 100644 --- a/src/mongo/s/commands/cluster_ftdc_commands.cpp +++ b/src/mongo/s/commands/cluster_ftdc_commands.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/base/init.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/ftdc/controller.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" namespace mongo { namespace { @@ -66,22 +74,15 @@ class GetDiagnosticDataCommand final : public ErrmsgCommandDeprecated { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::serverStatus)) { - return Status(ErrorCodes::Unauthorized, "Unauthorized"); - } - - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::replSetGetStatus)) { - return Status(ErrorCodes::Unauthorized, "Unauthorized"); - } - - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::connPoolStats)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + {ActionType::serverStatus, + ActionType::replSetGetStatus, + ActionType::connPoolStats})) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } diff --git a/src/mongo/s/commands/cluster_get_cluster_parameter_cmd.cpp b/src/mongo/s/commands/cluster_get_cluster_parameter_cmd.cpp index d0539898b961c..c4071790b1956 100644 --- a/src/mongo/s/commands/cluster_get_cluster_parameter_cmd.cpp +++ b/src/mongo/s/commands/cluster_get_cluster_parameter_cmd.cpp @@ -28,19 +28,25 @@ */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/db/audit.h" +#include "mongo/base/error_codes.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/cluster_server_parameter_cmds_gen.h" #include "mongo/db/commands/get_cluster_parameter_invocation.h" -#include "mongo/idl/cluster_server_parameter_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/service_context.h" #include "mongo/idl/cluster_server_parameter_refresher.h" -#include "mongo/logv2/log.h" -#include "mongo/s/cluster_commands_helpers.h" -#include "mongo/s/grid.h" -#include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -73,17 +79,11 @@ class GetClusterParameterCmd final : public TypedCommand Reply typedRun(OperationContext* opCtx) { GetClusterParameterInvocation invocation; - if (gFeatureFlagClusterWideConfigM2.isEnabled( - serverGlobalParams.featureCompatibility)) { - // Refresh cached cluster server parameters via a majority read from the config - // servers. - uassertStatusOK( - ClusterServerParameterRefresher::get(opCtx)->refreshParameters(opCtx)); + // Refresh cached cluster server parameters via a majority read from the config + // servers. + uassertStatusOK(ClusterServerParameterRefresher::get(opCtx)->refreshParameters(opCtx)); - return invocation.getCachedParameters(opCtx, request()); - } - - return invocation.getDurableParameters(opCtx, request()); + return invocation.getCachedParameters(opCtx, request()); } private: @@ -96,7 +96,8 @@ class GetClusterParameterCmd final : public TypedCommand uassert(ErrorCodes::Unauthorized, "Not authorized to retrieve cluster parameters", authzSession->isAuthorizedForPrivilege(Privilege{ - ResourcePattern::forClusterResource(), ActionType::getClusterParameter})); + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::getClusterParameter})); } NamespaceString ns() const override { diff --git a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp index 01f790218111b..d278a2426eaa4 100644 --- a/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp +++ b/src/mongo/s/commands/cluster_get_shard_version_cmd.cpp @@ -28,15 +28,44 @@ */ -#include "mongo/db/auth/action_set.h" +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog_cache.h" -#include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/index_version.h" +#include "mongo/s/sharding_index_catalog_cache.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/read_through_cache.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -95,22 +124,24 @@ class GetShardVersion : public BasicCommand { if (nss.coll().empty()) { // Return the database's information. - auto cachedDbInfo = uassertStatusOK(catalogCache->getDatabase(opCtx, nss.ns())); + auto cachedDbInfo = uassertStatusOK( + catalogCache->getDatabase(opCtx, NamespaceStringUtil::serialize(nss))); result.append("primaryShard", cachedDbInfo->getPrimary().toString()); - result.append("shardingEnabled", cachedDbInfo->getSharded()); result.append("version", cachedDbInfo->getVersion().toBSON()); } else { // Return the collection's information. const auto [cm, sii] = uassertStatusOK(catalogCache->getCollectionRoutingInfo(opCtx, nss)); uassert(ErrorCodes::NamespaceNotSharded, - str::stream() << "Collection " << nss.ns() << " is not sharded.", + str::stream() << "Collection " << nss.toStringForErrorMsg() + << " is not sharded.", cm.isSharded()); result.appendTimestamp("version", cm.getVersion().toLong()); result.append("versionEpoch", cm.getVersion().epoch()); result.append("versionTimestamp", cm.getVersion().getTimestamp()); - + // Added to the result bson if the max bson size is exceeded + BSONObjBuilder exceededSizeElt(BSON("exceededSize" << true)); if (sii) { result.append("indexVersion", sii->getCollectionIndexes().indexVersion()); @@ -131,7 +162,8 @@ class GetShardVersion : public BasicCommand { chunkBB.append(chunk.getMin()); chunkBB.append(chunk.getMax()); chunkBB.done(); - if (chunksArrBuilder.len() + result.len() > BSONObjMaxUserSize) { + if (chunksArrBuilder.len() + result.len() + exceededSizeElt.len() > + BSONObjMaxUserSize) { exceedsSizeLimit = true; } } @@ -146,7 +178,8 @@ class GetShardVersion : public BasicCommand { BSONArrayBuilder indexesArrBuilder; sii->forEachIndex([&](const auto& index) { BSONObjBuilder indexB(index.toBSON()); - if (result.len() + indexesArrBuilder.len() + indexB.len() > + if (result.len() + exceededSizeElt.len() + indexesArrBuilder.len() + + indexB.len() > BSONObjMaxUserSize) { exceedsSizeLimit = true; } else { @@ -159,6 +192,10 @@ class GetShardVersion : public BasicCommand { result.append("indexes", indexesArrBuilder.arr()); } } + + if (exceedsSizeLimit) { + result.appendElements(exceededSizeElt.done()); + } } } diff --git a/src/mongo/s/commands/cluster_getmore_cmd_s.cpp b/src/mongo/s/commands/cluster_getmore_cmd_s.cpp index 63a04722d93f5..c8d6879271541 100644 --- a/src/mongo/s/commands/cluster_getmore_cmd_s.cpp +++ b/src/mongo/s/commands/cluster_getmore_cmd_s.cpp @@ -27,7 +27,18 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/auth/authorization_checks.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/s/commands/cluster_getmore_cmd.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/s/commands/cluster_hello_cmd.cpp b/src/mongo/s/commands/cluster_hello_cmd.cpp index e5f7ceabb2807..7e7d8c9c1ac42 100644 --- a/src/mongo/s/commands/cluster_hello_cmd.cpp +++ b/src/mongo/s/commands/cluster_hello_cmd.cpp @@ -27,30 +27,68 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/audit.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/read_concern_support_result.h" #include "mongo/db/repl/hello_auth.h" #include "mongo/db/repl/hello_gen.h" -#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/tenant_id.h" #include "mongo/db/wire_version.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/rpc/reply_builder_interface.h" #include "mongo/rpc/rewrite_state_change_errors.h" #include "mongo/rpc/topology_version_gen.h" #include "mongo/s/load_balancer_support.h" +#include "mongo/s/mongos_hello_response.h" #include "mongo/s/mongos_topology_coordinator.h" +#include "mongo/transport/hello_metrics.h" #include "mongo/transport/message_compressor_manager.h" -#include "mongo/util/net/socket_utils.h" -#include "mongo/util/version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/string_map.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/s/commands/cluster_index_filter_cmd.cpp b/src/mongo/s/commands/cluster_index_filter_cmd.cpp index f2a1bf18dba35..28811d0c37664 100644 --- a/src/mongo/s/commands/cluster_index_filter_cmd.cpp +++ b/src/mongo/s/commands/cluster_index_filter_cmd.cpp @@ -27,12 +27,42 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/query/collation/collation_spec.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -101,7 +131,9 @@ class ClusterIndexFilterCmd : public BasicCommand { ReadPreferenceSetting::get(opCtx), Shard::RetryPolicy::kIdempotent, query, - CollationSpec::kSimpleSpec); + CollationSpec::kSimpleSpec, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); // Sort shard responses by shard id. std::sort(shardResponses.begin(), diff --git a/src/mongo/s/commands/cluster_insert_test.cpp b/src/mongo/s/commands/cluster_insert_test.cpp index 3284df2aa54c4..87d06925e5660 100644 --- a/src/mongo/s/commands/cluster_insert_test.cpp +++ b/src/mongo/s/commands/cluster_insert_test.cpp @@ -28,10 +28,20 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/commands.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/namespace_string.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/commands/cluster_command_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp b/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp index d962e62e5102a..16a0663871786 100644 --- a/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp +++ b/src/mongo/s/commands/cluster_is_db_grid_cmd.cpp @@ -27,9 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/util/net/socket_utils.h" namespace mongo { diff --git a/src/mongo/s/commands/cluster_kill_op.cpp b/src/mongo/s/commands/cluster_kill_op.cpp index be1e4c5e0f754..7b9b4195ca5aa 100644 --- a/src/mongo/s/commands/cluster_kill_op.cpp +++ b/src/mongo/s/commands/cluster_kill_op.cpp @@ -27,23 +27,33 @@ * it in the license file. */ +#include #include +#include "mongo/base/parse_number.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/bson/bsontypes.h" #include "mongo/client/connpool.h" +#include "mongo/client/dbclient_base.h" #include "mongo/db/api_parameters.h" -#include "mongo/db/audit.h" -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/commands.h" #include "mongo/db/commands/kill_op_cmd_base.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/metadata.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/s/commands/cluster_killcursors_cmd.cpp b/src/mongo/s/commands/cluster_killcursors_cmd.cpp index 31faadc80f228..9c2cf828fe570 100644 --- a/src/mongo/s/commands/cluster_killcursors_cmd.cpp +++ b/src/mongo/s/commands/cluster_killcursors_cmd.cpp @@ -27,14 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" #include "mongo/db/auth/authorization_checks.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/commands.h" #include "mongo/db/commands/killcursors_common.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/kill_cursors_gen.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_cursor_manager.h" -#include "mongo/s/transaction_router.h" namespace mongo { namespace { diff --git a/src/mongo/s/commands/cluster_killoperations_cmd.cpp b/src/mongo/s/commands/cluster_killoperations_cmd.cpp index e1923c71bf450..6dd69bcebd8c0 100644 --- a/src/mongo/s/commands/cluster_killoperations_cmd.cpp +++ b/src/mongo/s/commands/cluster_killoperations_cmd.cpp @@ -27,13 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/db/commands.h" #include "mongo/db/commands/killoperations_common.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_cursor_manager.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/s/commands/cluster_list_collections_cmd.cpp b/src/mongo/s/commands/cluster_list_collections_cmd.cpp index e89a3e7c70a69..e3088257a6151 100644 --- a/src/mongo/s/commands/cluster_list_collections_cmd.cpp +++ b/src/mongo/s/commands/cluster_list_collections_cmd.cpp @@ -28,16 +28,57 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" #include "mongo/bson/mutable/algorithm.h" #include "mongo/bson/mutable/document.h" +#include "mongo/bson/mutable/element.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/user.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/commands.h" +#include "mongo/db/commands/test_commands_enabled.h" +#include "mongo/db/database_name.h" #include "mongo/db/list_collections_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/explain_verbosity_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/grid.h" #include "mongo/s/query/store_possible_cursor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/read_through_cache.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -57,7 +98,7 @@ bool cursorCommandPassthroughPrimaryShard(OperationContext* opCtx, // TODO SERVER-67411 change executeCommandAgainstDatabasePrimary to take in DatabaseName auto response = executeCommandAgainstDatabasePrimary( opCtx, - dbName.toStringWithTenantId(), + DatabaseNameUtil::serialize(dbName), dbInfo, CommandHelpers::filterCommandRequestForPassthrough(cmdObj), ReadPreferenceSetting::get(opCtx), @@ -110,7 +151,7 @@ BSONObj rewriteCommandForListingOwnCollections(OperationContext* opCtx, // DB resource grants all non-system collections, so filter out system collections. This is done // inside the $or, since some system collections might be granted specific privileges. if (authzSession->isAuthorizedForAnyActionOnResource( - ResourcePattern::forDatabaseName(dbName.toStringWithTenantId()))) { + ResourcePattern::forDatabaseName(dbName))) { mutablebson::Element systemCollectionsFilter = rewrittenCmdObj.makeElementObject( "", BSON("name" << BSON("$regex" << BSONRegEx("^(?!system\\.)")))); uassertStatusOK(newFilterOr.pushBack(systemCollectionsFilter)); @@ -119,8 +160,9 @@ BSONObj rewriteCommandForListingOwnCollections(OperationContext* opCtx, // system_buckets DB resource grants all system_buckets.* collections so create a filter to // include them if (authzSession->isAuthorizedForAnyActionOnResource( - ResourcePattern::forAnySystemBucketsInDatabase(dbName.toStringWithTenantId())) || - authzSession->isAuthorizedForAnyActionOnResource(ResourcePattern::forAnySystemBuckets())) { + ResourcePattern::forAnySystemBucketsInDatabase(dbName)) || + authzSession->isAuthorizedForAnyActionOnResource( + ResourcePattern::forAnySystemBuckets(dbName.tenantId()))) { mutablebson::Element systemCollectionsFilter = rewrittenCmdObj.makeElementObject( "", BSON("name" << BSON("$regex" << BSONRegEx("^system\\.buckets\\.")))); uassertStatusOK(newFilterOr.pushBack(systemCollectionsFilter)); @@ -131,14 +173,12 @@ BSONObj rewriteCommandForListingOwnCollections(OperationContext* opCtx, if (auto authUser = authzSession->getAuthenticatedUser()) { for (const auto& [resource, privilege] : authUser.value()->getPrivileges()) { if (resource.isCollectionPattern() || - (resource.isExactNamespacePattern() && - resource.databaseToMatch() == dbName.toStringWithTenantId())) { + (resource.isExactNamespacePattern() && resource.dbNameToMatch() == dbName)) { collectionNames.emplace(resource.collectionToMatch().toString()); } if (resource.isAnySystemBucketsCollectionInAnyDB() || - (resource.isExactSystemBucketsCollection() && - resource.databaseToMatch() == dbName.toStringWithTenantId())) { + (resource.isExactSystemBucketsCollection() && resource.dbNameToMatch() == dbName)) { collectionNames.emplace(systemBucketsDot + resource.collectionToMatch().toString()); } } @@ -213,7 +253,10 @@ class CmdListCollections : public BasicCommandWithRequestParsergetClient()); - return authzSession->checkAuthorizedToListCollections(dbName.db(), cmdObj).getStatus(); + const bool apiStrict = APIParameters::get(opCtx).getAPIStrict().value_or(false); + IDLParserContext ctxt("ListCollection", apiStrict, dbName.tenantId()); + auto request = ListCollections::parse(ctxt, cmdObj); + return authzSession->checkAuthorizedToListCollections(request).getStatus(); } bool runWithRequestParser(OperationContext* opCtx, @@ -234,8 +277,8 @@ class CmdListCollections : public BasicCommandWithRequestParsercatalogCache()->getDatabase(opCtx, dbName.toStringWithTenantId()); + auto dbInfoStatus = Grid::get(opCtx)->catalogCache()->getDatabase( + opCtx, DatabaseNameUtil::serializeForCatalog(dbName)); if (!dbInfoStatus.isOK()) { appendEmptyResultSet(opCtx, output, dbInfoStatus.getStatus(), nss); return true; @@ -250,9 +293,8 @@ class CmdListCollections : public BasicCommandWithRequestParsergetClient()) - ->checkAuthorizedToListCollections(dbName.toStringWithTenantId(), cmdObj))); + uassertStatusOK(AuthorizationSession::get(opCtx->getClient()) + ->checkAuthorizedToListCollections(requestParser.request()))); } void validateResult(const BSONObj& result) final { diff --git a/src/mongo/s/commands/cluster_list_databases_cmd.cpp b/src/mongo/s/commands/cluster_list_databases_cmd.cpp index 66448312301ec..838952bb57bc4 100644 --- a/src/mongo/s/commands/cluster_list_databases_cmd.cpp +++ b/src/mongo/s/commands/cluster_list_databases_cmd.cpp @@ -27,22 +27,45 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include +#include +#include +#include +#include #include -#include "mongo/bson/util/bson_extract.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/read_preference.h" -#include "mongo/client/remote_command_targeter.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/list_databases_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" -#include "mongo/s/commands/strategy.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -84,20 +107,21 @@ class ListDatabasesCmd final : public ListDatabasesCmdVersion1Gen& authDB) { - const bool mayListAllDatabases = as->isAuthorizedForActionsOnResource( - ResourcePattern::forClusterResource(), ActionType::listDatabases); - if (authDB) { - uassert(ErrorCodes::Unauthorized, - "Insufficient permissions to list all databases", - authDB.value() || mayListAllDatabases); - return authDB.value(); - } + const bool authorizedDatabases = + ([as, tenantId = cmd.getDbName().tenantId()](const boost::optional& authDB) { + const bool mayListAllDatabases = as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(tenantId), ActionType::listDatabases); + if (authDB) { + uassert(ErrorCodes::Unauthorized, + "Insufficient permissions to list all databases", + authDB.value() || mayListAllDatabases); + return authDB.value(); + } - // By default, list all databases if we can, otherwise - // only those we're allowed to find on. - return !mayListAllDatabases; - })(cmd.getAuthorizedDatabases()); + // By default, list all databases if we can, otherwise + // only those we're allowed to find on. + return !mayListAllDatabases; + })(cmd.getAuthorizedDatabases()); auto const shardRegistry = Grid::get(opCtx)->shardRegistry(); @@ -166,27 +190,29 @@ class ListDatabasesCmd final : public ListDatabasesCmdVersion1Gen items; + const auto& tenantId = cmd.getDbName().tenantId(); for (const auto& sizeEntry : sizes) { - const auto& name = sizeEntry.first; + const auto dbname = DatabaseNameUtil::deserialize(tenantId, sizeEntry.first); const long long size = sizeEntry.second; // Skip the local database, since all shards have their own independent local - if (name == DatabaseName::kLocal.db()) + if (dbname.isLocalDB()) { continue; + } - if (authorizedDatabases && !as->isAuthorizedForAnyActionOnAnyResourceInDB(name)) { + if (authorizedDatabases && !as->isAuthorizedForAnyActionOnAnyResourceInDB(dbname)) { // We don't have listDatabases on the cluser or find on this database. continue; } - ListDatabasesReplyItem item(name); + ListDatabasesReplyItem item(sizeEntry.first); if (!nameOnly) { item.setSizeOnDisk(size); item.setEmpty(size == 1); - item.setShards(dbShardInfo[name]->obj()); + item.setShards(dbShardInfo[sizeEntry.first]->obj()); uassert(ErrorCodes::BadValue, - str::stream() << "Found negative 'sizeOnDisk' in: " << name, + str::stream() << "Found negative 'sizeOnDisk' in: " << dbname, size >= 0); totalSize += size; diff --git a/src/mongo/s/commands/cluster_list_indexes_cmd.cpp b/src/mongo/s/commands/cluster_list_indexes_cmd.cpp index 214d8f7b614c4..ae7a7b09c9d3d 100644 --- a/src/mongo/s/commands/cluster_list_indexes_cmd.cpp +++ b/src/mongo/s/commands/cluster_list_indexes_cmd.cpp @@ -28,16 +28,44 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/list_indexes_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/timeseries/timeseries_commands_conversion_helper.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/collection_routing_info_targeter.h" +#include "mongo/s/grid.h" #include "mongo/s/query/store_possible_cursor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -98,17 +126,17 @@ class CmdListIndexes final : public ListIndexesCmdVersion1Gen { } NamespaceString ns() const final { - const auto& nss = request().getNamespaceOrUUID().nss(); - uassert( - ErrorCodes::BadValue, "Mongos requires a namespace for listIndexes command", nss); - return nss.value(); + uassert(ErrorCodes::BadValue, + "Mongos requires a namespace for listIndexes command", + request().getNamespaceOrUUID().isNamespaceString()); + return request().getNamespaceOrUUID().nss(); } void doCheckAuthorization(OperationContext* opCtx) const final { AuthorizationSession* authzSession = AuthorizationSession::get(opCtx->getClient()); uassert(ErrorCodes::Unauthorized, str::stream() << "Not authorized to list indexes on collection:" - << ns().toString(), + << ns().toStringForErrorMsg(), authzSession->isAuthorizedForActionsOnResource( ResourcePattern::forExactNamespace(ns()), ActionType::listIndexes)); } diff --git a/src/mongo/s/commands/cluster_list_shards_cmd.cpp b/src/mongo/s/commands/cluster_list_shards_cmd.cpp index e7b8462696377..a013fe4908bcc 100644 --- a/src/mongo/s/commands/cluster_list_shards_cmd.cpp +++ b/src/mongo/s/commands/cluster_list_shards_cmd.cpp @@ -27,12 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/service_context.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -58,11 +73,11 @@ class ListShardsCmd : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::listShards)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::listShards)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/s/commands/cluster_map_reduce_agg.cpp b/src/mongo/s/commands/cluster_map_reduce_agg.cpp index 05622808c78a4..67ae1a305052f 100644 --- a/src/mongo/s/commands/cluster_map_reduce_agg.cpp +++ b/src/mongo/s/commands/cluster_map_reduce_agg.cpp @@ -29,27 +29,60 @@ #include "mongo/s/commands/cluster_map_reduce_agg.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" #include "mongo/db/catalog/document_validation.h" -#include "mongo/db/client.h" #include "mongo/db/commands.h" -#include "mongo/db/commands/map_reduce_agg.h" #include "mongo/db/commands/map_reduce_gen.h" +#include "mongo/db/commands/map_reduce_global_variable_scope.h" +#include "mongo/db/commands/map_reduce_out_options.h" #include "mongo/db/commands/mr_common.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/mongos_process_interface.h" #include "mongo/db/pipeline/sharded_agg_helpers.h" +#include "mongo/db/pipeline/variables.h" #include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/explain_common.h" #include "mongo/db/query/map_reduce_output_format.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/catalog_cache.h" -#include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/grid.h" +#include "mongo/s/query/cluster_aggregate.h" #include "mongo/s/query/cluster_aggregation_planner.h" #include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/debug_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/s/commands/cluster_map_reduce_agg.h b/src/mongo/s/commands/cluster_map_reduce_agg.h index ea0745991c7f6..481d37f58de74 100644 --- a/src/mongo/s/commands/cluster_map_reduce_agg.h +++ b/src/mongo/s/commands/cluster_map_reduce_agg.h @@ -27,8 +27,11 @@ * it in the license file. */ +#include + #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/explain_options.h" diff --git a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp index ac95b4d825d0b..b906649719e73 100644 --- a/src/mongo/s/commands/cluster_map_reduce_cmd.cpp +++ b/src/mongo/s/commands/cluster_map_reduce_cmd.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands.h" #include "mongo/db/commands/map_reduce_command_base.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/service_context.h" #include "mongo/s/commands/cluster_map_reduce_agg.h" namespace mongo { diff --git a/src/mongo/s/commands/cluster_merge_all_chunks_on_shard_cmd.cpp b/src/mongo/s/commands/cluster_merge_all_chunks_on_shard_cmd.cpp index 4fe9865a22bcd..3f1b14dfe3511 100644 --- a/src/mongo/s/commands/cluster_merge_all_chunks_on_shard_cmd.cpp +++ b/src/mongo/s/commands/cluster_merge_all_chunks_on_shard_cmd.cpp @@ -28,11 +28,30 @@ */ +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/merge_chunk_request_gen.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp index 4812db80d8ced..335b764cdbe16 100644 --- a/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp +++ b/src/mongo/s/commands/cluster_merge_chunks_cmd.cpp @@ -27,14 +27,45 @@ * it in the license file. */ +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/field_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -94,7 +125,8 @@ class ClusterMergeChunksCommand : public ErrmsgCommandDeprecated { const BSONObj& cmdObj, std::string& errmsg, BSONObjBuilder& result) override { - const NamespaceString nss(parseNs({boost::none, dbname}, cmdObj)); + const NamespaceString nss( + parseNs(DatabaseNameUtil::deserialize(boost::none, dbname), cmdObj)); std::vector bounds; if (!FieldParser::extract(cmdObj, boundsField, &bounds, &errmsg)) { diff --git a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp index b22bd76fe0b31..fea0abe3c33fe 100644 --- a/src/mongo/s/commands/cluster_move_chunk_cmd.cpp +++ b/src/mongo/s/commands/cluster_move_chunk_cmd.cpp @@ -27,23 +27,51 @@ * it in the license file. */ -#include "mongo/db/audit.h" -#include "mongo/db/auth/action_set.h" +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types.h" #include "mongo/db/commands.h" -#include "mongo/db/write_concern_options.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" -#include "mongo/s/balancer_configuration.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" -#include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/commands/cluster_commands_gen.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/migration_secondary_throttle_options.h" #include "mongo/s/request_types/move_range_request_gen.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/shard_key_pattern_query_util.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/s/commands/cluster_move_primary_cmd.cpp b/src/mongo/s/commands/cluster_move_primary_cmd.cpp index e401f4811ea18..f38e4bcd55b20 100644 --- a/src/mongo/s/commands/cluster_move_primary_cmd.cpp +++ b/src/mongo/s/commands/cluster_move_primary_cmd.cpp @@ -27,104 +27,113 @@ * it in the license file. */ +#include +#include -#include "mongo/platform/basic.h" - +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" -#include "mongo/logv2/log.h" -#include "mongo/s/catalog/type_shard.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/move_primary_gen.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - namespace mongo { - -using std::string; - namespace { -class MoveDatabasePrimaryCommand : public BasicCommand { +class MovePrimaryCommand final : public TypedCommand { public: - MoveDatabasePrimaryCommand() : BasicCommand("movePrimary", "moveprimary") {} - - AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { - return AllowedOnSecondary::kAlways; - } - - virtual bool adminOnly() const { - return true; - } + using Request = MovePrimary; + + MovePrimaryCommand() : TypedCommand(MovePrimary::kCommandName, MovePrimary::kCommandAlias) {} + + class Invocation final : public InvocationBase { + public: + using InvocationBase::InvocationBase; + + void typedRun(OperationContext* opCtx) { + const auto& dbNss = ns(); + const auto& toShardId = request().getTo(); + + ScopeGuard onBlockExit([&] { + // Invalidate the routing table cache entry for this database in order to reload it + // at the next access, even if sending the command to the primary shard fails (e.g., + // NetworkError). + Grid::get(opCtx)->catalogCache()->purgeDatabase(dbNss.db()); + }); + + const auto dbInfo = + uassertStatusOK(Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, dbNss.db())); + + ShardsvrMovePrimary shardsvrRequest{dbNss.dbName()}; + shardsvrRequest.setDbName(DatabaseName::kAdmin); + shardsvrRequest.getMovePrimaryRequestBase().setTo(toShardId); + + const auto commandResponse = executeCommandAgainstDatabasePrimary( + opCtx, + DatabaseName::kAdmin.toString(), + dbInfo, + CommandHelpers::appendMajorityWriteConcern(shardsvrRequest.toBSON({}), + opCtx->getWriteConcern()), + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + Shard::RetryPolicy::kIdempotent); + + const auto remoteResponse = uassertStatusOK(commandResponse.swResponse); + uassertStatusOK(getStatusFromCommandResult(remoteResponse.data)); + } - virtual bool supportsWriteConcern(const BSONObj& cmd) const override { - return true; - } + private: + NamespaceString ns() const override { + return NamespaceString(request().getCommandParameter()); + } - std::string help() const override { - return " example: { moveprimary : 'foo' , to : 'localhost:9999' }"; - } + bool supportsWriteConcern() const override { + return true; + } - Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& cmdObj) const { - if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource( - ResourcePattern::forDatabaseName(parseNs(dbName, cmdObj).db()), - ActionType::moveChunk)) { - return Status(ErrorCodes::Unauthorized, "Unauthorized"); + void doCheckAuthorization(OperationContext* opCtx) const override { + uassert( + ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forDatabaseName(ns().dbName()), ActionType::moveChunk)); } + }; - return Status::OK(); +private: + bool adminOnly() const override { + return true; } - NamespaceString parseNs(const DatabaseName& dbName, const BSONObj& cmdObj) const override { - const auto nsElt = cmdObj.firstElement(); - uassert(ErrorCodes::InvalidNamespace, - "'movePrimary' must be of type String", - nsElt.type() == BSONType::String); - return NamespaceStringUtil::parseNamespaceFromRequest(dbName.tenantId(), nsElt.str()); + bool skipApiVersionCheck() const override { + return true; } - virtual bool run(OperationContext* opCtx, - const DatabaseName& dbName, - const BSONObj& cmdObj, - BSONObjBuilder& result) { - auto request = MovePrimary::parse(IDLParserContext("MovePrimary"), cmdObj); - const string db = parseNs(dbName, cmdObj).dbName().db(); - const StringData toShard(request.getTo()); - - // Invalidate the routing table cache entry for this database so that we reload the - // collection the next time it's accessed, even if we receive a failure, e.g. NetworkError. - ON_BLOCK_EXIT([opCtx, db] { Grid::get(opCtx)->catalogCache()->purgeDatabase(db); }); - - ShardMovePrimary movePrimaryRequest; - movePrimaryRequest.set_shardsvrMovePrimary(NamespaceString(db)); - movePrimaryRequest.setTo(toShard); - - auto catalogCache = Grid::get(opCtx)->catalogCache(); - const auto dbInfo = uassertStatusOK(catalogCache->getDatabase(opCtx, db)); - - auto cmdResponse = executeCommandAgainstDatabasePrimary( - opCtx, - "admin", - dbInfo, - CommandHelpers::appendMajorityWriteConcern( - CommandHelpers::appendGenericCommandArgs(cmdObj, movePrimaryRequest.toBSON())), - ReadPreferenceSetting(ReadPreference::PrimaryOnly), - Shard::RetryPolicy::kIdempotent); - - const auto remoteResponse = uassertStatusOK(cmdResponse.swResponse); - CommandHelpers::filterCommandReplyForPassthrough(remoteResponse.data, &result); - return true; + AllowedOnSecondary secondaryAllowed(ServiceContext* context) const override { + return AllowedOnSecondary::kNever; } -} clusterMovePrimaryCmd; + std::string help() const override { + return "Reassigns the primary shard holding all un-sharded collections in the database"; + } +} movePrimaryCommand; } // namespace } // namespace mongo diff --git a/src/mongo/s/commands/cluster_move_range_cmd.cpp b/src/mongo/s/commands/cluster_move_range_cmd.cpp index d5f432a8141fc..f9bb9dfbf0c78 100644 --- a/src/mongo/s/commands/cluster_move_range_cmd.cpp +++ b/src/mongo/s/commands/cluster_move_range_cmd.cpp @@ -28,11 +28,31 @@ */ +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/move_range_request_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -103,7 +123,7 @@ class ClusterMoveRangeCommand final : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), + ->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns()), ActionType::moveChunk)); } }; diff --git a/src/mongo/s/commands/cluster_multicast_cmd.cpp b/src/mongo/s/commands/cluster_multicast_cmd.cpp index 0d82a3d85a105..92efd34ba3908 100644 --- a/src/mongo/s/commands/cluster_multicast_cmd.cpp +++ b/src/mongo/s/commands/cluster_multicast_cmd.cpp @@ -27,19 +27,39 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include #include -#include "mongo/base/init.h" +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/executor/async_multicaster.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor_pool.h" -#include "mongo/s/catalog/type_shard.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/commands/cluster_commands_gen.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" #include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace { diff --git a/src/mongo/s/commands/cluster_netstat_cmd.cpp b/src/mongo/s/commands/cluster_netstat_cmd.cpp index ca740074eeaa5..4cb4881bd2a28 100644 --- a/src/mongo/s/commands/cluster_netstat_cmd.cpp +++ b/src/mongo/s/commands/cluster_netstat_cmd.cpp @@ -27,8 +27,21 @@ * it in the license file. */ +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" @@ -56,11 +69,11 @@ class NetStatCmd : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::netstat)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::netstat)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/s/commands/cluster_oplog_note_cmd.cpp b/src/mongo/s/commands/cluster_oplog_note_cmd.cpp index 4ba10bd0dcaa5..f97eb2af43a08 100644 --- a/src/mongo/s/commands/cluster_oplog_note_cmd.cpp +++ b/src/mongo/s/commands/cluster_oplog_note_cmd.cpp @@ -28,20 +28,28 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/commands.h" - -#include "mongo/bson/util/bson_extract.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/auth/resource_pattern.h" -#include "mongo/db/concurrency/d_concurrency.h" -#include "mongo/db/curop.h" -#include "mongo/db/namespace_string.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -73,11 +81,12 @@ class AppendOplogNoteCmd : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::appendOplogNote)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::appendOplogNote)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); @@ -90,7 +99,7 @@ class AppendOplogNoteCmd : public BasicCommand { auto shardResponses = scatterGatherUnversionedTargetAllShards( opCtx, - dbName.toStringWithTenantId(), + DatabaseNameUtil::serialize(dbName), applyReadWriteConcern( opCtx, this, CommandHelpers::filterCommandRequestForPassthrough(cmdObj)), ReadPreferenceSetting::get(opCtx), diff --git a/src/mongo/s/commands/cluster_pipeline_cmd.h b/src/mongo/s/commands/cluster_pipeline_cmd.h index 8ac60460ad8eb..ad6126e9e9574 100644 --- a/src/mongo/s/commands/cluster_pipeline_cmd.h +++ b/src/mongo/s/commands/cluster_pipeline_cmd.h @@ -161,7 +161,7 @@ class ClusterPipelineCommandBase final : public Command { } void doCheckAuthorization(OperationContext* opCtx) const override { - Impl::doCheckAuthorization(opCtx, _privileges); + Impl::doCheckAuthorization(opCtx, _request, _privileges); } NamespaceString ns() const override { diff --git a/src/mongo/s/commands/cluster_pipeline_cmd_s.cpp b/src/mongo/s/commands/cluster_pipeline_cmd_s.cpp index 8930e532fa6e0..01adeb16c8377 100644 --- a/src/mongo/s/commands/cluster_pipeline_cmd_s.cpp +++ b/src/mongo/s/commands/cluster_pipeline_cmd_s.cpp @@ -27,7 +27,27 @@ * it in the license file. */ +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/commands.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/commands/cluster_pipeline_cmd.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" namespace mongo { namespace { @@ -42,7 +62,9 @@ struct ClusterPipelineCommandS { return kApiVersions1; } - static void doCheckAuthorization(OperationContext* opCtx, const PrivilegeVector& privileges) { + static void doCheckAuthorization(OperationContext* opCtx, + const OpMsgRequest&, + const PrivilegeVector& privileges) { uassert( ErrorCodes::Unauthorized, "unauthorized", @@ -64,7 +86,8 @@ struct ClusterPipelineCommandS { bool apiStrict) { return aggregation_request_helper::parseFromBSON( opCtx, - DatabaseName(opMsgRequest.getValidatedTenantId(), opMsgRequest.getDatabase()), + DatabaseNameUtil::deserialize(opMsgRequest.getValidatedTenantId(), + opMsgRequest.getDatabase()), opMsgRequest.body, explainVerbosity, apiStrict); diff --git a/src/mongo/s/commands/cluster_plan_cache_clear_cmd.cpp b/src/mongo/s/commands/cluster_plan_cache_clear_cmd.cpp index b1e9ff0106266..8333da16f6baa 100644 --- a/src/mongo/s/commands/cluster_plan_cache_clear_cmd.cpp +++ b/src/mongo/s/commands/cluster_plan_cache_clear_cmd.cpp @@ -27,15 +27,42 @@ * it in the license file. */ +#include +#include +#include + +#include +#include + #include "mongo/base/error_codes.h" -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/query/collation/collation_spec.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" -#include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -105,7 +132,9 @@ bool ClusterPlanCacheClearCmd::run(OperationContext* opCtx, ReadPreferenceSetting::get(opCtx), Shard::RetryPolicy::kIdempotent, query, - CollationSpec::kSimpleSpec); + CollationSpec::kSimpleSpec, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); // Sort shard responses by shard id. std::sort(shardResponses.begin(), diff --git a/src/mongo/s/commands/cluster_profile_cmd.cpp b/src/mongo/s/commands/cluster_profile_cmd.cpp index 8a58b49745196..da7acd3fc988d 100644 --- a/src/mongo/s/commands/cluster_profile_cmd.cpp +++ b/src/mongo/s/commands/cluster_profile_cmd.cpp @@ -27,14 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/catalog/collection_catalog.h" -#include "mongo/db/commands.h" #include "mongo/db/commands/profile_common.h" #include "mongo/db/commands/profile_gen.h" #include "mongo/db/commands/set_profiling_filter_globally_cmd.h" +#include "mongo/db/concurrency/locker.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" #include "mongo/db/profile_filter_impl.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -67,7 +74,7 @@ class ProfileCmd : public ProfileCmdBase { "values", profilingLevel == -1 || profilingLevel == 0); - const auto oldSettings = CollectionCatalog::get(opCtx)->getDatabaseProfileSettings(dbName); + auto oldSettings = CollectionCatalog::get(opCtx)->getDatabaseProfileSettings(dbName); if (auto filterOrUnset = request.getFilter()) { auto newSettings = oldSettings; diff --git a/src/mongo/s/commands/cluster_query_without_shard_key_cmd.cpp b/src/mongo/s/commands/cluster_query_without_shard_key_cmd.cpp index 28aaaab81040a..473ed37f390e4 100644 --- a/src/mongo/s/commands/cluster_query_without_shard_key_cmd.cpp +++ b/src/mongo/s/commands/cluster_query_without_shard_key_cmd.cpp @@ -26,34 +26,97 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/explain_gen.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" -#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_project.h" #include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_options.h" #include "mongo/db/update/update_util.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/commands/cluster_explain.h" #include "mongo/s/grid.h" #include "mongo/s/is_mongos.h" #include "mongo/s/multi_statement_transaction_requests_sender.h" +#include "mongo/s/query/async_results_merger_params_gen.h" +#include "mongo/s/query/cluster_query_result.h" +#include "mongo/s/query/router_exec_stage.h" #include "mongo/s/query/router_stage_merge.h" #include "mongo/s/query/router_stage_remove_metadata_fields.h" #include "mongo/s/request_types/cluster_commands_without_shard_key_gen.h" #include "mongo/s/shard_key_pattern_query_util.h" -#include "mongo/s/write_ops/batch_write_op.h" +#include "mongo/s/type_collection_common_types_gen.h" #include "mongo/s/write_ops/write_without_shard_key_util.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand namespace mongo { namespace { +MONGO_FAIL_POINT_DEFINE(hangBeforeMetadataRefreshClusterQuery); constexpr auto kIdFieldName = "_id"_sd; struct ParsedCommandInfo { @@ -63,6 +126,7 @@ struct ParsedCommandInfo { bool upsert = false; int stmtId = kUninitializedStmtId; boost::optional updateRequest; + boost::optional hint; }; struct AsyncRequestSenderResponseData { @@ -73,14 +137,26 @@ struct AsyncRequestSenderResponseData { : shardId(shardId), cursorResponse(std::move(cursorResponse)) {} }; +// Computes the final sort pattern if necessary metadata is needed. +BSONObj parseSortPattern(OperationContext* opCtx, + NamespaceString nss, + const ParsedCommandInfo& parsedInfo) { + std::unique_ptr collator; + if (!parsedInfo.collation.isEmpty()) { + collator = uassertStatusOK(CollatorFactoryInterface::get(opCtx->getServiceContext()) + ->makeFromBSON(parsedInfo.collation)); + } + auto expCtx = make_intrusive(opCtx, std::move(collator), nss); + auto sortPattern = SortPattern(parsedInfo.sort.value_or(BSONObj()), expCtx); + return sortPattern.serialize(SortPattern::SortKeySerialization::kForSortKeyMerging).toBson(); +} + std::set getShardsToTarget(OperationContext* opCtx, const ChunkManager& cm, NamespaceString nss, const ParsedCommandInfo& parsedInfo) { std::set allShardsContainingChunksForNs; - uassert(ErrorCodes::InvalidOptions, - "_clusterQueryWithoutShardKey can only be run against sharded collections", - cm.isSharded()); + uassert(ErrorCodes::NamespaceNotSharded, "The collection was dropped.", cm.isSharded()); auto query = parsedInfo.query; auto collation = parsedInfo.collation; @@ -100,6 +176,38 @@ std::set getShardsToTarget(OperationContext* opCtx, return allShardsContainingChunksForNs; } +// TODO: SERVER-75760 Remove this validation since mongos should be doing this upfront. +void validateFindAndModifyCommand(const write_ops::FindAndModifyCommandRequest& request) { + uassert(ErrorCodes::FailedToParse, + "Either an update or remove=true must be specified", + request.getRemove().value_or(false) || request.getUpdate()); + if (request.getRemove().value_or(false)) { + uassert(ErrorCodes::FailedToParse, + "Cannot specify both an 'update' and 'remove'=true", + !request.getUpdate()); + + uassert(ErrorCodes::FailedToParse, + "Cannot specify both 'upsert'=true and 'remove'=true ", + !request.getUpsert() || !*request.getUpsert()); + + uassert( + ErrorCodes::FailedToParse, + "Cannot specify both 'new'=true and 'remove'=true; 'remove' always returns the deleted " + "document", + !request.getNew() || !*request.getNew()); + + uassert(ErrorCodes::FailedToParse, + "Cannot specify 'arrayFilters' and 'remove'=true", + !request.getArrayFilters()); + } + + if (request.getUpdate() && + request.getUpdate()->type() == write_ops::UpdateModification::Type::kPipeline && + request.getArrayFilters()) { + uasserted(ErrorCodes::FailedToParse, "Cannot specify 'arrayFilters' and a pipeline update"); + } +} + BSONObj createAggregateCmdObj( OperationContext* opCtx, const ParsedCommandInfo& parsedInfo, @@ -119,6 +227,10 @@ BSONObj createAggregateCmdObj( aggregate.setStmtId(parsedInfo.stmtId); } + if (parsedInfo.hint) { + aggregate.setHint(parsedInfo.hint); + } + aggregate.setPipeline([&]() { std::vector pipeline; if (timeseriesFields) { @@ -130,8 +242,6 @@ BSONObj createAggregateCmdObj( } pipeline.emplace_back(BSON(DocumentSourceMatch::kStageName << parsedInfo.query)); if (parsedInfo.sort) { - // TODO (SERVER-73083): skip the sort option for 'findAndModify' calls on time-series - // collections. pipeline.emplace_back(BSON(DocumentSourceSort::kStageName << *parsedInfo.sort)); } pipeline.emplace_back(BSON(DocumentSourceLimit::kStageName << 1)); @@ -142,14 +252,14 @@ BSONObj createAggregateCmdObj( return aggregate.toBSON({}); } -ParsedCommandInfo parseWriteCommand(OperationContext* opCtx, - StringData commandName, - const BSONObj& writeCmdObj) { +ParsedCommandInfo parseWriteCommand(OperationContext* opCtx, const BSONObj& writeCmdObj) { + auto commandName = writeCmdObj.firstElementFieldNameStringData(); ParsedCommandInfo parsedInfo; if (commandName == write_ops::UpdateCommandRequest::kCommandName) { auto updateRequest = write_ops::UpdateCommandRequest::parse( IDLParserContext("_clusterQueryWithoutShardKeyForUpdate"), writeCmdObj); parsedInfo.query = updateRequest.getUpdates().front().getQ(); + parsedInfo.hint = updateRequest.getUpdates().front().getHint(); // In the batch write path, when the request is reconstructed to be passed to // the two phase write protocol, only the stmtIds field is used. @@ -159,6 +269,7 @@ ParsedCommandInfo parseWriteCommand(OperationContext* opCtx, if ((parsedInfo.upsert = updateRequest.getUpdates().front().getUpsert())) { parsedInfo.updateRequest = updateRequest.getUpdates().front(); + parsedInfo.updateRequest->setNamespaceString(updateRequest.getNamespace()); } if (auto parsedCollation = updateRequest.getUpdates().front().getCollation()) { @@ -168,6 +279,7 @@ ParsedCommandInfo parseWriteCommand(OperationContext* opCtx, auto deleteRequest = write_ops::DeleteCommandRequest::parse( IDLParserContext("_clusterQueryWithoutShardKeyForDelete"), writeCmdObj); parsedInfo.query = deleteRequest.getDeletes().front().getQ(); + parsedInfo.hint = deleteRequest.getDeletes().front().getHint(); // In the batch write path, when the request is reconstructed to be passed to // the two phase write protocol, only the stmtIds field is used. @@ -182,9 +294,15 @@ ParsedCommandInfo parseWriteCommand(OperationContext* opCtx, commandName == write_ops::FindAndModifyCommandRequest::kCommandAlias) { auto findAndModifyRequest = write_ops::FindAndModifyCommandRequest::parse( IDLParserContext("_clusterQueryWithoutShardKeyFindAndModify"), writeCmdObj); + validateFindAndModifyCommand(findAndModifyRequest); + parsedInfo.query = findAndModifyRequest.getQuery(); parsedInfo.stmtId = findAndModifyRequest.getStmtId().value_or(kUninitializedStmtId); - parsedInfo.sort = findAndModifyRequest.getSort(); + parsedInfo.hint = findAndModifyRequest.getHint(); + parsedInfo.sort = + findAndModifyRequest.getSort() && !findAndModifyRequest.getSort()->isEmpty() + ? findAndModifyRequest.getSort() + : boost::none; if ((parsedInfo.upsert = findAndModifyRequest.getUpsert().get_value_or(false))) { parsedInfo.updateRequest = UpdateRequest{}; @@ -197,7 +315,8 @@ ParsedCommandInfo parseWriteCommand(OperationContext* opCtx, parsedInfo.collation = *parsedCollation; } } else { - uasserted(ErrorCodes::InvalidOptions, "Not a supported write command"); + uasserted(ErrorCodes::InvalidOptions, + str::stream() << commandName << " is not a supported batch write command"); } return parsedInfo; } @@ -220,24 +339,33 @@ class ClusterQueryWithoutShardKeyCmd : public TypedCommandgetSpec().toBSON(); + } + } + const auto& timeseriesFields = (cri.cm.isSharded() && cri.cm.getTimeseriesFields().has_value()) ? cri.cm.getTimeseriesFields() @@ -307,7 +435,12 @@ class ClusterQueryWithoutShardKeyCmd : public TypedCommand root = std::make_unique( opCtx, @@ -316,7 +449,7 @@ class ClusterQueryWithoutShardKeyCmd : public TypedCommand( - opCtx, std::move(root), StringDataSet{AsyncResultsMerger::kSortKeyField}); + opCtx, std::move(root), Document::allMetadataFieldNames); } if (auto nextResponse = uassertStatusOK(root->next()); !nextResponse.isEOF()) { @@ -327,15 +460,93 @@ class ClusterQueryWithoutShardKeyCmd : public TypedCommandgetTimeseriesOptions()) + : boost::none, + cri.cm.getDefaultCollator()); + res.setTargetDoc(upsertDoc); res.setUpsertRequired(true); + + if (timeseriesFields) { + res.setUserUpsertDocForTimeseries(userUpsertDoc); + } } return res; } private: + void explain(OperationContext* opCtx, + ExplainOptions::Verbosity verbosity, + rpc::ReplyBuilderInterface* result) override { + const auto writeCmdObj = [&] { + const auto explainCmdObj = request().getWriteCmd(); + const auto opMsgRequestExplainCmd = + OpMsgRequest::fromDBAndBody(ns().db(), explainCmdObj); + auto explainRequest = ExplainCommandRequest::parse( + IDLParserContext("_clusterQueryWithoutShardKeyExplain"), + opMsgRequestExplainCmd.body); + return explainRequest.getCommandParameter().getOwned(); + }(); + + // Get all shard ids for shards that have chunks in the desired namespace. + const NamespaceString nss = + CommandHelpers::parseNsCollectionRequired(ns().dbName(), writeCmdObj); + const auto cri = uassertStatusOK(getCollectionRoutingInfoForTxnCmd(opCtx, nss)); + + // Parse into OpMsgRequest to append the $db field, which is required for command + // parsing. + const auto opMsgRequestWriteCmd = OpMsgRequest::fromDBAndBody(ns().db(), writeCmdObj); + auto parsedInfoFromRequest = parseWriteCommand(opCtx, opMsgRequestWriteCmd.body); + + auto allShardsContainingChunksForNs = + getShardsToTarget(opCtx, cri.cm, nss, parsedInfoFromRequest); + auto cmdObj = createAggregateCmdObj(opCtx, parsedInfoFromRequest, nss, boost::none); + + const auto aggExplainCmdObj = ClusterExplain::wrapAsExplain(cmdObj, verbosity); + + std::vector requests; + for (const auto& shardId : allShardsContainingChunksForNs) { + requests.emplace_back( + shardId, appendShardVersion(aggExplainCmdObj, cri.getShardVersion(shardId))); + } + + Timer timer; + MultiStatementTransactionRequestsSender ars( + opCtx, + Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(), + request().getDbName(), + requests, + ReadPreferenceSetting(ReadPreference::PrimaryOnly), + Shard::RetryPolicy::kNoRetry); + + ShardId shardId; + std::vector responses; + + while (!ars.done()) { + auto response = ars.next(); + uassertStatusOK(response.swResponse); + responses.push_back(response); + shardId = response.shardId; + } + + const auto millisElapsed = timer.millis(); + + auto bodyBuilder = result->getBodyBuilder(); + uassertStatusOK(ClusterExplain::buildExplainResult( + opCtx, + responses, + parsedInfoFromRequest.sort ? ClusterExplain::kMergeSortFromShards + : ClusterExplain::kMergeFromShards, + millisElapsed, + writeCmdObj, + &bodyBuilder)); + bodyBuilder.append("targetShardId", shardId); + } + NamespaceString ns() const override { return NamespaceString(request().getDbName()); } @@ -348,8 +559,9 @@ class ClusterQueryWithoutShardKeyCmd : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; diff --git a/src/mongo/s/commands/cluster_refine_collection_shard_key_cmd.cpp b/src/mongo/s/commands/cluster_refine_collection_shard_key_cmd.cpp index 78578a16e1585..f5f0ed4d8ce1c 100644 --- a/src/mongo/s/commands/cluster_refine_collection_shard_key_cmd.cpp +++ b/src/mongo/s/commands/cluster_refine_collection_shard_key_cmd.cpp @@ -28,15 +28,38 @@ */ +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/commands/refine_collection_shard_key_gen.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp index 28b3e549d2202..a5d44bd72a8d2 100644 --- a/src/mongo/s/commands/cluster_remove_shard_cmd.cpp +++ b/src/mongo/s/commands/cluster_remove_shard_cmd.cpp @@ -28,15 +28,30 @@ */ -#include "mongo/platform/basic.h" - +#include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -68,8 +83,8 @@ class RemoveShardCmd : public BasicCommand { const DatabaseName& dbName, const BSONObj& cmdObj) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::removeShard)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::removeShard)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp b/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp index 38b9fd4f22e53..1d2380ff3e94e 100644 --- a/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp +++ b/src/mongo/s/commands/cluster_remove_shard_from_zone_cmd.cpp @@ -27,16 +27,31 @@ * it in the license file. */ -#include - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/write_concern_options.h" -#include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/remove_shard_from_zone_request_type.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -80,12 +95,13 @@ class RemoveShardFromZoneCmd : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const final { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::enableSharding)) { + if (as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::enableSharding)) { return Status::OK(); } diff --git a/src/mongo/s/commands/cluster_rename_collection_cmd.cpp b/src/mongo/s/commands/cluster_rename_collection_cmd.cpp index 11c3de553c7b3..b8158dad767d0 100644 --- a/src/mongo/s/commands/cluster_rename_collection_cmd.cpp +++ b/src/mongo/s/commands/cluster_rename_collection_cmd.cpp @@ -28,16 +28,43 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog/collection_uuid_mismatch_info.h" #include "mongo/db/commands.h" #include "mongo/db/commands/rename_collection_common.h" #include "mongo/db/commands/rename_collection_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/type_database_gen.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/shard_version.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/read_through_cache.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -66,17 +93,32 @@ class RenameCollectionCmd final : public TypedCommand { auto toNss = request().getTo(); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid source namespace: " << fromNss.ns(), + str::stream() << "Invalid source namespace: " << fromNss.toStringForErrorMsg(), fromNss.isValid()); uassert(ErrorCodes::InvalidNamespace, - str::stream() << "Invalid target namespace: " << toNss.ns(), + str::stream() << "Invalid target namespace: " << toNss.toStringForErrorMsg(), toNss.isValid()); uassert(ErrorCodes::IllegalOperation, "Can't rename a collection to itself", fromNss != toNss); + if (fromNss.isTimeseriesBucketsCollection()) { + uassert(ErrorCodes::IllegalOperation, + "Renaming system.buckets collections is not allowed", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(fromNss.tenantId()), + ActionType::setUserWriteBlockMode)); + + uassert(ErrorCodes::IllegalOperation, + str::stream() + << "Cannot rename time-series buckets collection {" << fromNss.ns() + << "} to a non-time-series buckets namespace {" << toNss.ns() << "}", + toNss.isTimeseriesBucketsCollection()); + } + RenameCollectionRequest renameCollReq(request().getTo()); renameCollReq.setStayTemp(request().getStayTemp()); renameCollReq.setExpectedSourceUUID(request().getCollectionUUID()); @@ -91,12 +133,13 @@ class RenameCollectionCmd final : public TypedCommand { request().getDropTarget()); ShardsvrRenameCollection renameCollRequest(fromNss); - renameCollRequest.setDbName(fromNss.db()); + renameCollRequest.setDbName(fromNss.dbName()); renameCollRequest.setRenameCollectionRequest(renameCollReq); renameCollRequest.setAllowEncryptedCollectionRename( AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::setUserWriteBlockMode)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(fromNss.tenantId()), + ActionType::setUserWriteBlockMode)); auto catalogCache = Grid::get(opCtx)->catalogCache(); auto swDbInfo = Grid::get(opCtx)->catalogCache()->getDatabase(opCtx, fromNss.db()); @@ -139,7 +182,7 @@ class RenameCollectionCmd final : public TypedCommand { void doCheckAuthorization(OperationContext* opCtx) const override { uassertStatusOK(rename_collection::checkAuthForRenameCollectionCommand( - opCtx->getClient(), ns().db().toString(), request().toBSON(BSONObj()))); + opCtx->getClient(), request())); } bool supportsWriteConcern() const override { diff --git a/src/mongo/s/commands/cluster_repair_sharded_collection_chunks_history_cmd.cpp b/src/mongo/s/commands/cluster_repair_sharded_collection_chunks_history_cmd.cpp index 5daff9c7d5ce1..cf88722941c7d 100644 --- a/src/mongo/s/commands/cluster_repair_sharded_collection_chunks_history_cmd.cpp +++ b/src/mongo/s/commands/cluster_repair_sharded_collection_chunks_history_cmd.cpp @@ -29,16 +29,30 @@ #define MONGO_LOG_DEFAULT_COMPONENT ::mongo::logger::LogComponent::kSharding -#include "mongo/platform/basic.h" - -#include "mongo/db/audit.h" -#include "mongo/db/auth/action_set.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/authorization_manager.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { namespace { @@ -97,8 +111,8 @@ class RepairShardedCollectionChunksHistoryCommand : public BasicCommand { BSONObjBuilder& result) override { const NamespaceString nss{parseNs(dbName, cmdObj)}; - BSONObjBuilder cmdBuilder( - BSON("_configsvrRepairShardedCollectionChunksHistory" << nss.ns())); + BSONObjBuilder cmdBuilder(BSON("_configsvrRepairShardedCollectionChunksHistory" + << NamespaceStringUtil::serialize(nss))); if (cmdObj["force"].booleanSafe()) cmdBuilder.appendBool("force", true); diff --git a/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp b/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp index 96c9d4e73f3bc..5981027a57a9b 100644 --- a/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp +++ b/src/mongo/s/commands/cluster_repl_set_get_status_cmd.cpp @@ -27,11 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/not_primary_error_tracker.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/s/commands/cluster_reset_placement_history_cmd.cpp b/src/mongo/s/commands/cluster_reset_placement_history_cmd.cpp new file mode 100644 index 0000000000000..3dc3cbd973b84 --- /dev/null +++ b/src/mongo/s/commands/cluster_reset_placement_history_cmd.cpp @@ -0,0 +1,120 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/grid.h" +#include "mongo/s/request_types/placement_history_commands_gen.h" +#include "mongo/util/assert_util.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand + + +namespace mongo { +namespace { + +class ClusterResetPlacementHistoryCommand final + : public TypedCommand { +public: + using Request = ClusterResetPlacementHistory; + + std::string help() const override { + return "Invoke adminCommand({resetPlacementHistory: 1}) to reset the log of namespace " + "placement changes. Meant to be used only under the guidance of tech support."; + } + + AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { + return AllowedOnSecondary::kNever; + } + + bool adminOnly() const override { + return true; + } + + class Invocation final : public InvocationBase { + public: + using InvocationBase::InvocationBase; + + void typedRun(OperationContext* opCtx) { + ConfigsvrResetPlacementHistory configsvrRequest; + configsvrRequest.setDbName(DatabaseName::kAdmin); + + auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); + const auto commandResponse = uassertStatusOK(configShard->runCommand( + opCtx, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + DatabaseName::kAdmin.toString(), + configsvrRequest.toBSON( + BSON(WriteConcernOptions::kWriteConcernField << WriteConcernOptions::Majority)), + Shard::RetryPolicy::kIdempotent)); + uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(commandResponse)); + } + + private: + NamespaceString ns() const override { + return NamespaceString(request().getDbName()); + } + + bool supportsWriteConcern() const override { + return true; + } + + void doCheckAuthorization(OperationContext* opCtx) const override { + uassert(ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource(ResourcePattern::forExactNamespace(ns()), + ActionType::moveChunk)); + } + }; +} resetPlacementHistory; + +} // namespace +} // namespace mongo diff --git a/src/mongo/s/commands/cluster_reshard_collection_cmd.cpp b/src/mongo/s/commands/cluster_reshard_collection_cmd.cpp index 3a21438243e43..daaf401cc72c4 100644 --- a/src/mongo/s/commands/cluster_reshard_collection_cmd.cpp +++ b/src/mongo/s/commands/cluster_reshard_collection_cmd.cpp @@ -28,18 +28,41 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" -#include "mongo/logv2/log.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/reshard_collection_gen.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/s/resharding/common_types_gen.h" #include "mongo/s/resharding/resharding_feature_flag_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -71,6 +94,24 @@ class ReshardCollectionCmd : public TypedCommand { reshardCollectionRequest.setNumInitialChunks(request().getNumInitialChunks()); reshardCollectionRequest.setCollectionUUID(request().getCollectionUUID()); + if (!resharding::gFeatureFlagReshardingImprovements.isEnabled( + serverGlobalParams.featureCompatibility)) { + uassert( + ErrorCodes::InvalidOptions, + "Resharding improvements is not enabled, reject shardDistribution parameter", + !request().getShardDistribution().has_value()); + uassert( + ErrorCodes::InvalidOptions, + "Resharding improvements is not enabled, reject forceRedistribution parameter", + !request().getForceRedistribution().has_value()); + uassert(ErrorCodes::InvalidOptions, + "Resharding improvements is not enabled, reject reshardingUUID parameter", + !request().getReshardingUUID().has_value()); + } + reshardCollectionRequest.setShardDistribution(request().getShardDistribution()); + reshardCollectionRequest.setForceRedistribution(request().getForceRedistribution()); + reshardCollectionRequest.setReshardingUUID(request().getReshardingUUID()); + shardsvrReshardCollection.setReshardCollectionRequest( std::move(reshardCollectionRequest)); @@ -119,9 +160,7 @@ class ReshardCollectionCmd : public TypedCommand { std::string help() const override { return "Reshard an already sharded collection on a new shard key."; } -}; - -MONGO_REGISTER_FEATURE_FLAGGED_COMMAND(ReshardCollectionCmd, resharding::gFeatureFlagResharding); +} reshardCollectionCmd; } // namespace } // namespace mongo diff --git a/src/mongo/s/commands/cluster_rwc_defaults_commands.cpp b/src/mongo/s/commands/cluster_rwc_defaults_commands.cpp index 180d03b0d08c7..be7fcac46d016 100644 --- a/src/mongo/s/commands/cluster_rwc_defaults_commands.cpp +++ b/src/mongo/s/commands/cluster_rwc_defaults_commands.cpp @@ -28,16 +28,44 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/rwc_defaults_commands_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults.h" +#include "mongo/db/read_write_concern_defaults_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/stdx/variant.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -94,11 +122,12 @@ class ClusterSetDefaultRWConcernCommand : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), - ActionType::setDefaultRWConcern})) { + ->isAuthorizedForPrivilege( + Privilege{ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::setDefaultRWConcern})) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); @@ -167,8 +196,9 @@ class ClusterGetDefaultRWConcernCommand final uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), - ActionType::getDefaultRWConcern})); + ->isAuthorizedForPrivilege(Privilege{ + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::getDefaultRWConcern})); } NamespaceString ns() const override { diff --git a/src/mongo/s/commands/cluster_set_allow_migrations_cmd.cpp b/src/mongo/s/commands/cluster_set_allow_migrations_cmd.cpp index 1f06a6b000885..cf0e12f6bf242 100644 --- a/src/mongo/s/commands/cluster_set_allow_migrations_cmd.cpp +++ b/src/mongo/s/commands/cluster_set_allow_migrations_cmd.cpp @@ -28,14 +28,29 @@ */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/set_allow_migrations_gen.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/s/commands/cluster_set_cluster_parameter_cmd.cpp b/src/mongo/s/commands/cluster_set_cluster_parameter_cmd.cpp index 3e26d8101e3a7..f3d8997bc2c17 100644 --- a/src/mongo/s/commands/cluster_set_cluster_parameter_cmd.cpp +++ b/src/mongo/s/commands/cluster_set_cluster_parameter_cmd.cpp @@ -28,14 +28,30 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/cluster_server_parameter_cmds_gen.h" -#include "mongo/s/cluster_commands_helpers.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -66,7 +82,7 @@ class SetClusterParameterCmd final : public TypedCommand void typedRun(OperationContext* opCtx) { ConfigsvrSetClusterParameter configsvrSetClusterParameter( request().getCommandParameter()); - configsvrSetClusterParameter.setDbName(ns().db()); + configsvrSetClusterParameter.setDbName(ns().dbName()); const auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); @@ -93,8 +109,9 @@ class SetClusterParameterCmd final : public TypedCommand uassert(ErrorCodes::Unauthorized, "Unauthorized", AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), - ActionType::setClusterParameter})); + ->isAuthorizedForPrivilege(Privilege{ + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::setClusterParameter})); } }; } setClusterParameterCmd; diff --git a/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp b/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp index 2f88401dc3097..3320a739c30c4 100644 --- a/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp +++ b/src/mongo/s/commands/cluster_set_feature_compatibility_version_cmd.cpp @@ -27,17 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" #include "mongo/db/commands/set_feature_compatibility_version_gen.h" +#include "mongo/db/database_name.h" #include "mongo/db/feature_compatibility_version_documentation.h" -#include "mongo/db/feature_compatibility_version_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" #include "mongo/util/version/releases.h" namespace mongo { @@ -99,7 +111,7 @@ class SetFeatureCompatibilityVersionCmd final auto response = uassertStatusOK(configShard->runCommandWithFixedRetryAttempts( opCtx, ReadPreferenceSetting{ReadPreference::PrimaryOnly}, - cmd.getDbName().db(), + DatabaseNameUtil::serialize(cmd.getDbName()), CommandHelpers::appendMajorityWriteConcern(cmd.toBSON({}), opCtx->getWriteConcern()), Shard::RetryPolicy::kIdempotent)); @@ -111,12 +123,12 @@ class SetFeatureCompatibilityVersionCmd final } void doCheckAuthorization(OperationContext* opCtx) const override { - uassert( - ErrorCodes::Unauthorized, - "Unauthorized", - AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::setFeatureCompatibilityVersion)); + uassert(ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::setFeatureCompatibilityVersion)); } bool supportsWriteConcern() const override { diff --git a/src/mongo/s/commands/cluster_set_free_monitoring_cmd.cpp b/src/mongo/s/commands/cluster_set_free_monitoring_cmd.cpp index 0e5853aa42b3f..8bb5514a3f77c 100644 --- a/src/mongo/s/commands/cluster_set_free_monitoring_cmd.cpp +++ b/src/mongo/s/commands/cluster_set_free_monitoring_cmd.cpp @@ -52,11 +52,12 @@ class ClusterSetFreeMonitoring : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const final { if (!AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::setFreeMonitoring)) { + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::setFreeMonitoring)) { return Status(ErrorCodes::Unauthorized, "Unauthorized"); } return Status::OK(); diff --git a/src/mongo/s/commands/cluster_set_index_commit_quorum_cmd.cpp b/src/mongo/s/commands/cluster_set_index_commit_quorum_cmd.cpp index 3cd1e3983253d..253b72f8be4a6 100644 --- a/src/mongo/s/commands/cluster_set_index_commit_quorum_cmd.cpp +++ b/src/mongo/s/commands/cluster_set_index_commit_quorum_cmd.cpp @@ -28,16 +28,38 @@ */ -#include "mongo/platform/basic.h" - #include #include - +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -116,8 +138,10 @@ class SetIndexCommitQuorumCommand : public BasicCommand { opCtx, this, CommandHelpers::filterCommandRequestForPassthrough(cmdObj)), ReadPreferenceSetting::get(opCtx), Shard::RetryPolicy::kNotIdempotent, - BSONObj() /* query */, - BSONObj() /* collation */); + BSONObj() /*query*/, + BSONObj() /*collation*/, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); std::string errmsg; const bool ok = diff --git a/src/mongo/s/commands/cluster_set_user_write_block_mode_command.cpp b/src/mongo/s/commands/cluster_set_user_write_block_mode_command.cpp index 60cfe9f972aa3..19bd2abd1712b 100644 --- a/src/mongo/s/commands/cluster_set_user_write_block_mode_command.cpp +++ b/src/mongo/s/commands/cluster_set_user_write_block_mode_command.cpp @@ -28,13 +28,27 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" -#include "mongo/logv2/log.h" +#include "mongo/db/commands/set_user_write_block_mode_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl @@ -91,8 +105,9 @@ class SetUserWriteBlockModeCommand final : public TypedCommandgetClient()) - ->isAuthorizedForPrivilege(Privilege{ResourcePattern::forClusterResource(), - ActionType::setUserWriteBlockMode})); + ->isAuthorizedForPrivilege(Privilege{ + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::setUserWriteBlockMode})); } }; } setUserWriteBlockModeCommand; diff --git a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp index cab047e6ade7d..1707e06f15a2b 100644 --- a/src/mongo/s/commands/cluster_shard_collection_cmd.cpp +++ b/src/mongo/s/commands/cluster_shard_collection_cmd.cpp @@ -28,17 +28,34 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/db/audit.h" -#include "mongo/db/auth/action_set.h" +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/client.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" -#include "mongo/logv2/log.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/timeseries/timeseries_gen.h" +#include "mongo/idl/idl_parser.h" #include "mongo/s/cluster_ddl.h" #include "mongo/s/commands/shard_collection_gen.h" +#include "mongo/s/request_types/sharded_ddl_commands_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -112,13 +129,13 @@ class ShardCollectionCmd : public BasicCommand { requestParamsObj.setImplicitlyCreateIndex(shardCollRequest.getImplicitlyCreateIndex()); requestParamsObj.setEnforceUniquenessCheck(shardCollRequest.getEnforceUniquenessCheck()); shardsvrCollRequest.setCreateCollectionRequest(std::move(requestParamsObj)); - shardsvrCollRequest.setDbName(nss.db()); + shardsvrCollRequest.setDbName(nss.dbName()); cluster::createCollection(opCtx, shardsvrCollRequest); // Add only collectionsharded as a response parameter and remove the version to maintain the // same format as before. - result.append("collectionsharded", nss.toString()); + result.append("collectionsharded", NamespaceStringUtil::serialize(nss)); return true; } diff --git a/src/mongo/s/commands/cluster_shutdown_cmd.cpp b/src/mongo/s/commands/cluster_shutdown_cmd.cpp index 0b5633d390fca..32fb70b3957b6 100644 --- a/src/mongo/s/commands/cluster_shutdown_cmd.cpp +++ b/src/mongo/s/commands/cluster_shutdown_cmd.cpp @@ -27,10 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/db/commands.h" #include "mongo/db/commands/shutdown.h" +#include "mongo/db/operation_context.h" +#include "mongo/util/duration.h" namespace mongo { namespace { diff --git a/src/mongo/s/commands/cluster_split_cmd.cpp b/src/mongo/s/commands/cluster_split_cmd.cpp index adeed2b44cffc..de75ad985c5d1 100644 --- a/src/mongo/s/commands/cluster_split_cmd.cpp +++ b/src/mongo/s/commands/cluster_split_cmd.cpp @@ -27,22 +27,52 @@ * it in the license file. */ +#include #include -#include -#include "mongo/db/auth/action_set.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/field_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/tenant_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" -#include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/shard_key_pattern_query_util.h" #include "mongo/s/shard_util.h" -#include "mongo/s/shard_version_factory.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -60,7 +90,7 @@ BSONObj selectMedianKey(OperationContext* opCtx, const CollectionRoutingInfo& cri, const ChunkRange& chunkRange) { BSONObjBuilder cmd; - cmd.append("splitVector", nss.ns()); + cmd.append("splitVector", NamespaceStringUtil::serialize(nss)); cmd.append("keyPattern", shardKeyPattern.toBSON()); chunkRange.append(&cmd); cmd.appendBool("force", true); @@ -132,7 +162,8 @@ class SplitCollectionCmd : public ErrmsgCommandDeprecated { const BSONObj& cmdObj, std::string& errmsg, BSONObjBuilder& result) override { - const NamespaceString nss(parseNs({boost::none, dbname}, cmdObj)); + const NamespaceString nss( + parseNs(DatabaseNameUtil::deserialize(boost::none, dbname), cmdObj)); const auto cri = uassertStatusOK( Grid::get(opCtx)->catalogCache()->getShardedCollectionRoutingInfoWithRefresh(opCtx, diff --git a/src/mongo/s/commands/cluster_split_vector_cmd.cpp b/src/mongo/s/commands/cluster_split_vector_cmd.cpp index 2a4ccd15cb04e..6896b5cec071e 100644 --- a/src/mongo/s/commands/cluster_split_vector_cmd.cpp +++ b/src/mongo/s/commands/cluster_split_vector_cmd.cpp @@ -28,12 +28,36 @@ */ -#include "mongo/platform/basic.h" - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -97,7 +121,7 @@ class SplitVectorCmd : public BasicCommand { auto commandResponse = uassertStatusOK(shard->runCommandWithFixedRetryAttempts( opCtx, ReadPreferenceSetting::get(opCtx), - dbName.toStringWithTenantId(), + DatabaseNameUtil::serialize(dbName), cm.dbVersion().isFixed() ? filteredCmdObj : filteredCmdObjWithVersion, Shard::RetryPolicy::kIdempotent)); diff --git a/src/mongo/s/commands/cluster_transition_from_dedicated_config_server_cmd.cpp b/src/mongo/s/commands/cluster_transition_from_dedicated_config_server_cmd.cpp new file mode 100644 index 0000000000000..215a56358f046 --- /dev/null +++ b/src/mongo/s/commands/cluster_transition_from_dedicated_config_server_cmd.cpp @@ -0,0 +1,127 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/catalog_shard_feature_flag_gen.h" +#include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" +#include "mongo/s/grid.h" +#include "mongo/s/request_types/transition_from_dedicated_config_server_gen.h" +#include "mongo/util/assert_util.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand + +namespace mongo { +namespace { + +const ReadPreferenceSetting kPrimaryOnlyReadPreference{ReadPreference::PrimaryOnly}; + +class TransitionFromDedicatedConfigServerCommand + : public TypedCommand { +public: + using Request = TransitionFromDedicatedConfigServer; + + std::string help() const override { + return "transition from dedicated config server to config shard"; + } + + AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { + return AllowedOnSecondary::kNever; + } + + bool adminOnly() const override { + return true; + } + + class Invocation final : public InvocationBase { + public: + using InvocationBase::InvocationBase; + + void typedRun(OperationContext* opCtx) { + + ConfigsvrTransitionFromDedicatedConfigServer cmdToSend; + cmdToSend.setDbName(DatabaseName::kAdmin); + + auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); + + // Force a reload of this node's shard list cache at the end of this command. + auto cmdResponseWithStatus = configShard->runCommandWithFixedRetryAttempts( + opCtx, + kPrimaryOnlyReadPreference, + "admin", + CommandHelpers::appendMajorityWriteConcern(cmdToSend.toBSON({}), + opCtx->getWriteConcern()), + Shard::RetryPolicy::kIdempotent); + + Grid::get(opCtx)->shardRegistry()->reload(opCtx); + + uassertStatusOK(cmdResponseWithStatus); + uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(cmdResponseWithStatus)); + } + + private: + NamespaceString ns() const override { + return {}; + } + + bool supportsWriteConcern() const override { + return true; + } + + void doCheckAuthorization(OperationContext* opCtx) const override { + uassert(ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::transitionFromDedicatedConfigServer)); + } + }; +}; + +MONGO_REGISTER_FEATURE_FLAGGED_COMMAND(TransitionFromDedicatedConfigServerCommand, + gFeatureFlagTransitionToCatalogShard); + +} // namespace +} // namespace mongo diff --git a/src/mongo/s/commands/cluster_transition_to_catalog_shard_cmd.cpp b/src/mongo/s/commands/cluster_transition_to_catalog_shard_cmd.cpp deleted file mode 100644 index 10ed514dfddc7..0000000000000 --- a/src/mongo/s/commands/cluster_transition_to_catalog_shard_cmd.cpp +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Copyright (C) 2023-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog_shard_feature_flag_gen.h" -#include "mongo/db/commands.h" -#include "mongo/s/client/shard_registry.h" -#include "mongo/s/grid.h" -#include "mongo/s/request_types/transition_to_catalog_shard_gen.h" - -#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand - -namespace mongo { -namespace { - -const ReadPreferenceSetting kPrimaryOnlyReadPreference{ReadPreference::PrimaryOnly}; - -class TransitionToCatalogShardCommand : public TypedCommand { -public: - using Request = TransitionToCatalogShard; - - std::string help() const override { - return "transition from dedicated config server to catalog shard"; - } - - AllowedOnSecondary secondaryAllowed(ServiceContext*) const override { - return AllowedOnSecondary::kNever; - } - - bool adminOnly() const override { - return true; - } - - class Invocation final : public InvocationBase { - public: - using InvocationBase::InvocationBase; - - void typedRun(OperationContext* opCtx) { - uassert(7467201, - "The catalog shard feature is disabled", - gFeatureFlagCatalogShard.isEnabled(serverGlobalParams.featureCompatibility)); - - ConfigsvrTransitionToCatalogShard cmdToSend; - cmdToSend.setDbName({"admin"}); - - auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); - - // Force a reload of this node's shard list cache at the end of this command. - auto cmdResponseWithStatus = configShard->runCommandWithFixedRetryAttempts( - opCtx, - kPrimaryOnlyReadPreference, - "admin", - CommandHelpers::appendMajorityWriteConcern(cmdToSend.toBSON({}), - opCtx->getWriteConcern()), - Shard::RetryPolicy::kIdempotent); - - Grid::get(opCtx)->shardRegistry()->reload(opCtx); - - uassertStatusOK(cmdResponseWithStatus); - uassertStatusOK(Shard::CommandResponse::getEffectiveStatus(cmdResponseWithStatus)); - } - - private: - NamespaceString ns() const override { - return {}; - } - - bool supportsWriteConcern() const override { - return true; - } - - void doCheckAuthorization(OperationContext* opCtx) const override { - uassert(ErrorCodes::Unauthorized, - "Unauthorized", - AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::transitionToCatalogShard)); - } - }; -}; - -MONGO_REGISTER_FEATURE_FLAGGED_COMMAND(TransitionToCatalogShardCommand, - gFeatureFlagTransitionToCatalogShard); - -} // namespace -} // namespace mongo diff --git a/src/mongo/s/commands/cluster_transition_to_dedicated_config_server_cmd.cpp b/src/mongo/s/commands/cluster_transition_to_dedicated_config_server_cmd.cpp index dc5001cf5b5f2..6c453f7bbbff3 100644 --- a/src/mongo/s/commands/cluster_transition_to_dedicated_config_server_cmd.cpp +++ b/src/mongo/s/commands/cluster_transition_to_dedicated_config_server_cmd.cpp @@ -27,14 +27,31 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/catalog_shard_feature_flag_gen.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/transition_to_dedicated_config_server_gen.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -64,11 +81,12 @@ class TransitionToDedicatedConfigServerCmd : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::transitionToDedicatedConfigServer)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::transitionToDedicatedConfigServer)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } @@ -81,16 +99,13 @@ class TransitionToDedicatedConfigServerCmd : public BasicCommand { BSONObjBuilder& result) override { // (Ignore FCV check): TODO(SERVER-75389): add why FCV is ignored here. uassert(7368401, - "The transition to catalog shard feature is disabled", + "The transition to config shard feature is disabled", gFeatureFlagTransitionToCatalogShard.isEnabledAndIgnoreFCVUnsafe()); - uassert(7467200, - "The catalog shard feature is disabled", - gFeatureFlagCatalogShard.isEnabled(serverGlobalParams.featureCompatibility)); auto configShard = Grid::get(opCtx)->shardRegistry()->getConfigShard(); ConfigsvrTransitionToDedicatedConfig transitionToDedicatedConfigServer; - transitionToDedicatedConfigServer.setDbName({"admin"}); + transitionToDedicatedConfigServer.setDbName(DatabaseName::kAdmin); // Force a reload of this node's shard list cache at the end of this command. auto cmdResponseWithStatus = configShard->runCommandWithFixedRetryAttempts( diff --git a/src/mongo/s/commands/cluster_update_test.cpp b/src/mongo/s/commands/cluster_update_test.cpp index c413f0e96e513..905c0296c1ca7 100644 --- a/src/mongo/s/commands/cluster_update_test.cpp +++ b/src/mongo/s/commands/cluster_update_test.cpp @@ -28,9 +28,20 @@ */ -#include "mongo/platform/basic.h" +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/db/namespace_string.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/commands/cluster_command_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp b/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp index f3570edf82df3..8cfdaf535e34b 100644 --- a/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp +++ b/src/mongo/s/commands/cluster_update_zone_key_range_cmd.cpp @@ -27,17 +27,31 @@ * it in the license file. */ -#include - -#include "mongo/bson/util/bson_extract.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/write_concern_options.h" -#include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog/type_tags.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/update_zone_key_range_request_type.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -83,12 +97,13 @@ class UpdateZoneKeyRangeCmd : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const final { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::enableSharding)) { + if (as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::enableSharding)) { return Status::OK(); } diff --git a/src/mongo/s/commands/cluster_user_management_commands.cpp b/src/mongo/s/commands/cluster_user_management_commands.cpp index 42512a64b3e61..7d3fc8ec2f1d6 100644 --- a/src/mongo/s/commands/cluster_user_management_commands.cpp +++ b/src/mongo/s/commands/cluster_user_management_commands.cpp @@ -28,25 +28,42 @@ */ -#include "mongo/platform/basic.h" - #include - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/bson/mutable/document.h" -#include "mongo/config.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/authorization_manager.h" -#include "mongo/db/auth/user_management_commands_parser.h" +#include "mongo/db/auth/user_name.h" #include "mongo/db/commands.h" #include "mongo/db/commands/user_management_commands_common.h" #include "mongo/db/commands/user_management_commands_gen.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/rpc/write_concern_error_detail.h" -#include "mongo/s/catalog/type_shard.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/namespace_string_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kAccessControl @@ -139,7 +156,7 @@ class CmdUMCPassthrough : public TypedCommandcatalogClient()->runUserManagementWriteCommand( opCtx, Request::kCommandName, - cmd.getDbName().db(), + DatabaseNameUtil::serialize(cmd.getDbName()), applyReadWriteConcern( opCtx, this, @@ -240,7 +257,7 @@ class CmdUMCInfo : public TypedCommand> { BSONObjBuilder builder; const bool ok = Grid::get(opCtx)->catalogClient()->runUserManagementReadCommand( opCtx, - cmd.getDbName().toString(), + DatabaseNameUtil::serialize(cmd.getDbName()), applyReadWriteConcern( opCtx, this, diff --git a/src/mongo/s/commands/cluster_validate_cmd.cpp b/src/mongo/s/commands/cluster_validate_cmd.cpp index 3d9e62e8e4532..5c1163371c52b 100644 --- a/src/mongo/s/commands/cluster_validate_cmd.cpp +++ b/src/mongo/s/commands/cluster_validate_cmd.cpp @@ -28,13 +28,38 @@ */ -#include "mongo/platform/basic.h" - +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -91,8 +116,10 @@ class ValidateCmd : public BasicCommand { opCtx, this, CommandHelpers::filterCommandRequestForPassthrough(cmdObj)), ReadPreferenceSetting::get(opCtx), Shard::RetryPolicy::kIdempotent, - {}, - {}); + {} /*query*/, + {} /*collation*/, + boost::none /*letParameters*/, + boost::none /*runtimeConstants*/); Status firstFailedShardStatus = Status::OK(); bool isValid = true; diff --git a/src/mongo/s/commands/cluster_validate_db_metadata_cmd.cpp b/src/mongo/s/commands/cluster_validate_db_metadata_cmd.cpp index 33b4584390ad1..dc4494de2bd7e 100644 --- a/src/mongo/s/commands/cluster_validate_db_metadata_cmd.cpp +++ b/src/mongo/s/commands/cluster_validate_db_metadata_cmd.cpp @@ -28,15 +28,36 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/read_preference.h" #include "mongo/db/commands.h" #include "mongo/db/commands/validate_db_metadata_common.h" #include "mongo/db/commands/validate_db_metadata_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/idl/idl_parser.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/async_requests_sender.h" #include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" -#include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -79,7 +100,7 @@ class ValidateDBMetadataCmd : public TypedCommand { Reply typedRun(OperationContext* opCtx) { auto shardResponses = scatterGatherUnversionedTargetAllShards( opCtx, - request().getDbName().db(), + DatabaseNameUtil::serialize(request().getDbName()), applyReadWriteConcern( opCtx, this, diff --git a/src/mongo/s/commands/cluster_validate_db_metadata_cmd_test.cpp b/src/mongo/s/commands/cluster_validate_db_metadata_cmd_test.cpp index dc8df15db0394..9450e32a3c2e2 100644 --- a/src/mongo/s/commands/cluster_validate_db_metadata_cmd_test.cpp +++ b/src/mongo/s/commands/cluster_validate_db_metadata_cmd_test.cpp @@ -28,9 +28,25 @@ */ -#include "mongo/platform/basic.h" - +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/dbmessage.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/commands/cluster_command_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp b/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp index 6191134aec8b3..2cb677bfc2613 100644 --- a/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp +++ b/src/mongo/s/commands/cluster_whats_my_uri_cmd.cpp @@ -27,10 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace { diff --git a/src/mongo/s/commands/cluster_write_cmd.cpp b/src/mongo/s/commands/cluster_write_cmd.cpp index 169fcc0ca7497..0ddffa5c32e64 100644 --- a/src/mongo/s/commands/cluster_write_cmd.cpp +++ b/src/mongo/s/commands/cluster_write_cmd.cpp @@ -30,31 +30,65 @@ #include "mongo/s/commands/cluster_write_cmd.h" +#include +#include +#include + +#include +#include +#include +#include + #include "mongo/base/error_codes.h" -#include "mongo/client/remote_command_targeter.h" -#include "mongo/db/catalog/document_validation.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/client/read_preference.h" #include "mongo/db/curop.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/fle_crud.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/resource_yielder.h" +#include "mongo/db/server_options.h" #include "mongo/db/stats/counters.h" #include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/db/transaction/transaction_api.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/write_concern_error_detail.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/client/num_hosts_targeted_metrics.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/cluster_write.h" #include "mongo/s/collection_routing_info_targeter.h" #include "mongo/s/commands/cluster_explain.h" #include "mongo/s/commands/document_shard_key_update_util.h" #include "mongo/s/grid.h" +#include "mongo/s/multi_statement_transaction_requests_sender.h" +#include "mongo/s/request_types/cluster_commands_without_shard_key_gen.h" #include "mongo/s/session_catalog_router.h" #include "mongo/s/transaction_router.h" #include "mongo/s/transaction_router_resource_yielder.h" #include "mongo/s/would_change_owning_shard_exception.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/s/write_ops/batched_upsert_detail.h" +#include "mongo/s/write_ops/write_without_shard_key_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/out_of_line_executor.h" #include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -163,10 +197,9 @@ void handleWouldChangeOwningShardErrorNonTransaction(OperationContext* opCtx, auto& executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); auto txn = txn_api::SyncTransactionWithRetries( - opCtx, sleepInlineExecutor, nullptr /* resourceYielder */, inlineExecutor); + opCtx, executor, nullptr /* resourceYielder */, inlineExecutor); // Shared state for the transaction API use below. struct SharedBlock { @@ -229,6 +262,7 @@ struct UpdateShardKeyResult { UpdateShardKeyResult handleWouldChangeOwningShardErrorTransaction( OperationContext* opCtx, BatchedCommandRequest* request, + const NamespaceString& nss, BatchedCommandResponse* response, const WouldChangeOwningShardInfo& changeInfo) { // Shared state for the transaction API use below. @@ -240,15 +274,11 @@ UpdateShardKeyResult handleWouldChangeOwningShardErrorTransaction( NamespaceString nss; bool updatedShardKey{false}; }; - auto sharedBlock = std::make_shared(changeInfo, request->getNS()); + auto sharedBlock = std::make_shared(changeInfo, nss); auto& executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); - auto txn = - txn_api::SyncTransactionWithRetries(opCtx, - sleepInlineExecutor, - TransactionRouterResourceYielder::makeForLocalHandoff(), - inlineExecutor); + auto txn = txn_api::SyncTransactionWithRetries( + opCtx, executor, TransactionRouterResourceYielder::makeForLocalHandoff(), inlineExecutor); try { txn.run(opCtx, @@ -305,6 +335,7 @@ void updateHostsTargetedMetrics(OperationContext* opCtx, bool ClusterWriteCmd::handleWouldChangeOwningShardError(OperationContext* opCtx, BatchedCommandRequest* request, + const NamespaceString& nss, BatchedCommandResponse* response, BatchWriteExecStats stats) { auto txnRouter = TransactionRouter::get(opCtx); @@ -322,7 +353,7 @@ bool ClusterWriteCmd::handleWouldChangeOwningShardError(OperationContext* opCtx, serverGlobalParams.featureCompatibility)) { if (txnRouter) { auto updateResult = handleWouldChangeOwningShardErrorTransaction( - opCtx, request, response, *wouldChangeOwningShardErrorInfo); + opCtx, request, nss, response, *wouldChangeOwningShardErrorInfo); updatedShardKey = updateResult.updatedShardKey; upsertedId = std::move(updateResult.upsertedId); } else { @@ -363,7 +394,7 @@ bool ClusterWriteCmd::handleWouldChangeOwningShardError(OperationContext* opCtx, // Clear the error details from the response object before sending the write again response->unsetErrDetails(); - cluster::write(opCtx, *request, &stats, response); + cluster::write(opCtx, *request, nullptr /* nss */, &stats, response); wouldChangeOwningShardErrorInfo = getWouldChangeOwningShardErrorInfo( opCtx, *request, response, !isRetryableWrite); if (!wouldChangeOwningShardErrorInfo) @@ -374,11 +405,19 @@ bool ClusterWriteCmd::handleWouldChangeOwningShardError(OperationContext* opCtx, // insert a new one. updatedShardKey = wouldChangeOwningShardErrorInfo && documentShardKeyUpdateUtil::updateShardKeyForDocumentLegacy( - opCtx, request->getNS(), *wouldChangeOwningShardErrorInfo); + opCtx, nss, *wouldChangeOwningShardErrorInfo); // If the operation was an upsert, record the _id of the new document. if (updatedShardKey && wouldChangeOwningShardErrorInfo->getShouldUpsert()) { - upsertedId = wouldChangeOwningShardErrorInfo->getPostImage()["_id"].wrap(); + // For timeseries collections, the 'userPostImage' is returned back + // through WouldChangeOwningShardInfo from the old shard as well and it should + // be returned to the user instead of the post-image. + auto postImage = [&] { + return wouldChangeOwningShardErrorInfo->getUserPostImage() + ? *wouldChangeOwningShardErrorInfo->getUserPostImage() + : wouldChangeOwningShardErrorInfo->getPostImage(); + }(); + upsertedId = postImage["_id"].wrap(); } // Commit the transaction @@ -419,11 +458,20 @@ bool ClusterWriteCmd::handleWouldChangeOwningShardError(OperationContext* opCtx, try { // Delete the original document and insert the new one updatedShardKey = documentShardKeyUpdateUtil::updateShardKeyForDocumentLegacy( - opCtx, request->getNS(), *wouldChangeOwningShardErrorInfo); + opCtx, nss, *wouldChangeOwningShardErrorInfo); // If the operation was an upsert, record the _id of the new document. if (updatedShardKey && wouldChangeOwningShardErrorInfo->getShouldUpsert()) { - upsertedId = wouldChangeOwningShardErrorInfo->getPostImage()["_id"].wrap(); + // For timeseries collections, the 'userPostImage' is returned back + // through WouldChangeOwningShardInfo from the old shard as well and it should + // be returned to the user instead of the post-image. + auto postImage = [&] { + return wouldChangeOwningShardErrorInfo->getUserPostImage() + ? *wouldChangeOwningShardErrorInfo->getUserPostImage() + : wouldChangeOwningShardErrorInfo->getPostImage(); + }(); + + upsertedId = postImage["_id"].wrap(); } } catch (const ExceptionFor& ex) { Status status = ex->getKeyPattern().hasField("_id") @@ -534,12 +582,15 @@ bool ClusterWriteCmd::InvocationBase::runImpl(OperationContext* opCtx, batchedRequest.unsetWriteConcern(); } - cluster::write(opCtx, batchedRequest, &stats, &response); + // Record the namespace that the write must be run on. It may differ from the request if this is + // a timeseries collection. + NamespaceString nss = batchedRequest.getNS(); + cluster::write(opCtx, batchedRequest, &nss, &stats, &response); bool updatedShardKey = false; if (_batchedRequest.getBatchType() == BatchedCommandRequest::BatchType_Update) { updatedShardKey = - handleWouldChangeOwningShardError(opCtx, &batchedRequest, &response, stats); + handleWouldChangeOwningShardError(opCtx, &batchedRequest, nss, &response, stats); } // Populate the 'NotPrimaryErrorTracker' object based on the write response @@ -637,6 +688,78 @@ void ClusterWriteCmd::InvocationBase::run(OperationContext* opCtx, CommandHelpers::appendSimpleCommandStatus(bob, ok); } +bool ClusterWriteCmd::InvocationBase::_runExplainWithoutShardKey( + OperationContext* opCtx, + const NamespaceString& nss, + ExplainOptions::Verbosity verbosity, + BSONObjBuilder* result) { + if (_batchedRequest.getBatchType() == BatchedCommandRequest::BatchType_Delete || + _batchedRequest.getBatchType() == BatchedCommandRequest::BatchType_Update) { + bool isMultiWrite = false; + BSONObj query; + BSONObj collation; + bool isUpsert = false; + if (_batchedRequest.getBatchType() == BatchedCommandRequest::BatchType_Update) { + auto updateOp = _batchedRequest.getUpdateRequest().getUpdates().begin(); + isMultiWrite = updateOp->getMulti(); + query = updateOp->getQ(); + collation = updateOp->getCollation().value_or(BSONObj()); + isUpsert = updateOp->getUpsert(); + } else { + auto deleteOp = _batchedRequest.getDeleteRequest().getDeletes().begin(); + isMultiWrite = deleteOp->getMulti(); + query = deleteOp->getQ(); + collation = deleteOp->getCollation().value_or(BSONObj()); + } + + if (!isMultiWrite && + write_without_shard_key::useTwoPhaseProtocol( + opCtx, + nss, + true /* isUpdateOrDelete */, + isUpsert, + query, + collation, + _batchedRequest.getLet(), + _batchedRequest.getLegacyRuntimeConstants())) { + // Explain currently cannot be run within a transaction, so each command is instead run + // separately outside of a transaction, and we compose the results at the end. + auto clusterQueryWithoutShardKeyExplainRes = [&] { + ClusterQueryWithoutShardKey clusterQueryWithoutShardKeyCommand( + ClusterExplain::wrapAsExplain(_batchedRequest.toBSON(), verbosity)); + const auto explainClusterQueryWithoutShardKeyCmd = ClusterExplain::wrapAsExplain( + clusterQueryWithoutShardKeyCommand.toBSON({}), verbosity); + auto opMsg = + OpMsgRequest::fromDBAndBody(nss.db(), explainClusterQueryWithoutShardKeyCmd); + return CommandHelpers::runCommandDirectly(opCtx, opMsg).getOwned(); + }(); + + // Since 'explain' does not return the results of the query, we do not have an _id + // document to target by from the 'Read Phase'. We instead will use a dummy _id target + // document 'Write Phase'. + auto clusterWriteWithoutShardKeyExplainRes = [&] { + ClusterWriteWithoutShardKey clusterWriteWithoutShardKeyCommand( + ClusterExplain::wrapAsExplain(_batchedRequest.toBSON(), verbosity), + clusterQueryWithoutShardKeyExplainRes.getStringField("targetShardId") + .toString(), + write_without_shard_key::targetDocForExplain); + const auto explainClusterWriteWithoutShardKeyCmd = ClusterExplain::wrapAsExplain( + clusterWriteWithoutShardKeyCommand.toBSON({}), verbosity); + auto opMsg = + OpMsgRequest::fromDBAndBody(nss.db(), explainClusterWriteWithoutShardKeyCmd); + return CommandHelpers::runCommandDirectly(opCtx, opMsg).getOwned(); + }(); + + auto output = write_without_shard_key::generateExplainResponseForTwoPhaseWriteProtocol( + clusterQueryWithoutShardKeyExplainRes, clusterWriteWithoutShardKeyExplainRes); + result->appendElementsUnique(output); + return true; + } + return false; + } + return false; +} + void ClusterWriteCmd::InvocationBase::explain(OperationContext* opCtx, ExplainOptions::Verbosity verbosity, rpc::ReplyBuilderInterface* result) { @@ -657,6 +780,13 @@ void ClusterWriteCmd::InvocationBase::explain(OperationContext* opCtx, auto nss = req ? req->getNS() : _batchedRequest.getNS(); auto requestBSON = req ? req->toBSON() : _request->body; auto requestPtr = req ? req.get() : &_batchedRequest; + auto bodyBuilder = result->getBodyBuilder(); + + // If we aren't running an explain for updateOne or deleteOne without shard key, continue and + // run the original explain path. + if (_runExplainWithoutShardKey(opCtx, nss, verbosity, &bodyBuilder)) { + return; + } const auto explainCmd = ClusterExplain::wrapAsExplain(requestBSON, verbosity); @@ -667,7 +797,6 @@ void ClusterWriteCmd::InvocationBase::explain(OperationContext* opCtx, BatchItemRef targetingBatchItem(requestPtr, 0); std::vector shardResponses; _commandOpWrite(opCtx, nss, explainCmd, targetingBatchItem, &shardResponses); - auto bodyBuilder = result->getBodyBuilder(); uassertStatusOK(ClusterExplain::buildExplainResult(opCtx, shardResponses, ClusterExplain::kWriteOnShards, diff --git a/src/mongo/s/commands/cluster_write_cmd.h b/src/mongo/s/commands/cluster_write_cmd.h index f29f418280856..e439bbc626302 100644 --- a/src/mongo/s/commands/cluster_write_cmd.h +++ b/src/mongo/s/commands/cluster_write_cmd.h @@ -29,14 +29,37 @@ #pragma once +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" #include "mongo/db/commands/update_metrics.h" #include "mongo/db/commands/write_commands_common.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/not_primary_error_tracker.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/explain_verbosity_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/async_requests_sender.h" #include "mongo/s/multi_statement_transaction_requests_sender.h" #include "mongo/s/write_ops/batch_write_exec.h" #include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { @@ -77,6 +100,7 @@ class ClusterWriteCmd : public Command { */ static bool handleWouldChangeOwningShardError(OperationContext* opCtx, BatchedCommandRequest* request, + const NamespaceString& nss, BatchedCommandResponse* response, BatchWriteExecStats stats); @@ -155,6 +179,15 @@ class ClusterWriteCmd::InvocationBase : public CommandInvocation { return static_cast(definition()); } + /** + * Runs a two-phase protocol to explain an updateOne/deleteOne without a shard key or _id. + * Returns true if we successfully ran the protocol, false otherwise. + */ + bool _runExplainWithoutShardKey(OperationContext* opCtx, + const NamespaceString& nss, + ExplainOptions::Verbosity verbosity, + BSONObjBuilder* result); + const OpMsgRequest* _request; BatchedCommandRequest _batchedRequest; diff --git a/src/mongo/s/commands/cluster_write_cmd_s.cpp b/src/mongo/s/commands/cluster_write_cmd_s.cpp index c7c5781f8c0e8..9469152794543 100644 --- a/src/mongo/s/commands/cluster_write_cmd_s.cpp +++ b/src/mongo/s/commands/cluster_write_cmd_s.cpp @@ -27,6 +27,16 @@ * it in the license file. */ +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/auth/authorization_session.h" +#include "mongo/db/commands.h" +#include "mongo/db/commands/write_commands_common.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/s/commands/cluster_write_cmd.h" namespace mongo { diff --git a/src/mongo/s/commands/cluster_write_without_shard_key_cmd.cpp b/src/mongo/s/commands/cluster_write_without_shard_key_cmd.cpp index 8672e30cee927..d5a3cdbaa91f7 100644 --- a/src/mongo/s/commands/cluster_write_without_shard_key_cmd.cpp +++ b/src/mongo/s/commands/cluster_write_without_shard_key_cmd.cpp @@ -27,26 +27,120 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/explain_gen.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" -#include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/matcher/expression_parser.h" +#include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/parsed_update.h" +#include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/projection.h" +#include "mongo/db/query/projection_parser.h" +#include "mongo/db/query/projection_policies.h" +#include "mongo/db/service_context.h" #include "mongo/db/shard_id.h" +#include "mongo/db/update/update_driver.h" +#include "mongo/db/update/update_util.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" #include "mongo/s/cluster_commands_helpers.h" -#include "mongo/s/commands/cluster_find_and_modify_cmd.h" -#include "mongo/s/commands/cluster_write_cmd.h" +#include "mongo/s/commands/cluster_explain.h" #include "mongo/s/grid.h" #include "mongo/s/is_mongos.h" #include "mongo/s/multi_statement_transaction_requests_sender.h" #include "mongo/s/request_types/cluster_commands_without_shard_key_gen.h" -#include "mongo/s/write_ops/batch_write_op.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand namespace mongo { namespace { +// Returns true if the update or projection in the query requires information from the original +// query. +bool requiresOriginalQuery(OperationContext* opCtx, + const boost::optional& updateRequest, + const boost::optional nss = boost::none, + const boost::optional& query = boost::none, + const boost::optional& projection = boost::none) { + if (updateRequest) { + ParsedUpdateForMongos parsedUpdate(opCtx, &updateRequest.get()); + uassertStatusOK(parsedUpdate.parseRequest()); + if (parsedUpdate.getDriver()->needMatchDetails()) { + return true; + } + } + + // Only findAndModify can specify a projection for the pre/post image via the 'fields' + // parameter. + if (projection && !projection->isEmpty()) { + auto findCommand = FindCommandRequest(*nss); + findCommand.setFilter(*query); + findCommand.setProjection(*projection); + + // We can ignore the collation for this check since we're only checking if the field name in + // the projection requires extra information from the query. + auto expCtx = + make_intrusive(opCtx, findCommand, nullptr /* collator */, false); + auto res = MatchExpressionParser::parse(findCommand.getFilter(), + expCtx, + ExtensionsCallbackNoop(), + MatchExpressionParser::kAllowAllSpecialFeatures); + auto proj = projection_ast::parseAndAnalyze(expCtx, + *projection, + res.getValue().get(), + *query, + ProjectionPolicies::findProjectionPolicies(), + false /* shouldOptimize */); + return proj.requiresMatchDetails() || + proj.metadataDeps().test(DocumentMetadataFields::MetaType::kTextScore); + } + return false; +} + BSONObj _createCmdObj(OperationContext* opCtx, const ShardId& shardId, const NamespaceString& nss, @@ -54,7 +148,7 @@ BSONObj _createCmdObj(OperationContext* opCtx, const BSONObj& writeCmd, const BSONObj& targetDocId) { const auto cri = uassertStatusOK(getCollectionRoutingInfoForTxnCmd(opCtx, nss)); - uassert(ErrorCodes::InvalidOptions, + uassert(ErrorCodes::NamespaceNotSharded, "_clusterWriteWithoutShardKey can only be run against sharded collections.", cri.cm.isSharded()); const auto shardVersion = cri.getShardVersion(shardId); @@ -73,11 +167,6 @@ BSONObj _createCmdObj(OperationContext* opCtx, auto updateRequest = write_ops::UpdateCommandRequest::parse( IDLParserContext("_clusterWriteWithoutShardKeyForUpdate"), opMsgRequest.body); - // The targeted query constructed should contain the targetDocId and the original query in - // case the original query has importance in terms of the operation being applied, such as - // using the positional operator ($) to modify an inner array element. - queryBuilder.appendElementsUnique(updateRequest.getUpdates().front().getQ()); - // The original query and collation are sent along with the modified command for the // purposes of query sampling. if (updateRequest.getUpdates().front().getSampleId()) { @@ -88,6 +177,19 @@ BSONObj _createCmdObj(OperationContext* opCtx, updateRequest.getUpdates().front().getCollation()); updateRequest.setWriteCommandRequestBase(writeCommandRequestBase); } + + // If the original query contains either a positional operator ($) or targets a time-series + // collection, include the original query alongside the target doc. + auto updateOpWithNamespace = UpdateRequest(updateRequest.getUpdates().front()); + updateOpWithNamespace.setNamespaceString(updateRequest.getNamespace()); + if (requiresOriginalQuery(opCtx, updateOpWithNamespace) || + nss.isTimeseriesBucketsCollection()) { + queryBuilder.appendElementsUnique(updateRequest.getUpdates().front().getQ()); + } else { + // Unset the collation because targeting by _id uses default collation. + updateRequest.getUpdates().front().setCollation(boost::none); + } + updateRequest.getUpdates().front().setQ(queryBuilder.obj()); auto batchedCommandRequest = BatchedCommandRequest(updateRequest); @@ -97,11 +199,6 @@ BSONObj _createCmdObj(OperationContext* opCtx, auto deleteRequest = write_ops::DeleteCommandRequest::parse( IDLParserContext("_clusterWriteWithoutShardKeyForDelete"), opMsgRequest.body); - // The targeted query constructed should contain the targetDocId and the original query in - // case the original query has importance in terms of the operation being applied, such as - // using the positional operator ($) to modify an inner array element. - queryBuilder.appendElementsUnique(deleteRequest.getDeletes().front().getQ()); - // The original query and collation are sent along with the modified command for the // purposes of query sampling. if (deleteRequest.getDeletes().front().getSampleId()) { @@ -113,8 +210,16 @@ BSONObj _createCmdObj(OperationContext* opCtx, deleteRequest.setWriteCommandRequestBase(writeCommandRequestBase); } - deleteRequest.getDeletes().front().setQ(queryBuilder.obj()); + // If the query targets a time-series collection, include the original query alongside the + // target doc. + if (nss.isTimeseriesBucketsCollection()) { + queryBuilder.appendElementsUnique(deleteRequest.getDeletes().front().getQ()); + } else { + // Unset the collation because targeting by _id uses default collation. + deleteRequest.getDeletes().front().setCollation(boost::none); + } + deleteRequest.getDeletes().front().setQ(queryBuilder.obj()); auto batchedCommandRequest = BatchedCommandRequest(deleteRequest); batchedCommandRequest.setShardVersion(shardVersion); @@ -124,17 +229,51 @@ BSONObj _createCmdObj(OperationContext* opCtx, auto findAndModifyRequest = write_ops::FindAndModifyCommandRequest::parse( IDLParserContext("_clusterWriteWithoutShardKeyForFindAndModify"), opMsgRequest.body); - // The targeted query constructed should contain the targetDocId and the original query in - // case the original query has importance in terms of the operation being applied, such as - // using the positional operator ($) to modify an inner array element. - queryBuilder.appendElementsUnique(findAndModifyRequest.getQuery()); - // The original query and collation are sent along with the modified command for the // purposes of query sampling. if (findAndModifyRequest.getSampleId()) { findAndModifyRequest.setOriginalQuery(findAndModifyRequest.getQuery()); findAndModifyRequest.setOriginalCollation(findAndModifyRequest.getCollation()); } + + if (findAndModifyRequest.getUpdate()) { + auto updateRequest = UpdateRequest{}; + updateRequest.setNamespaceString(findAndModifyRequest.getNamespace()); + update::makeUpdateRequest(opCtx, findAndModifyRequest, boost::none, &updateRequest); + + // If the original query contains either a positional operator ($) or targets a + // time-series collection, include the original query alongside the target doc. + if (requiresOriginalQuery(opCtx, + updateRequest, + findAndModifyRequest.getNamespace(), + findAndModifyRequest.getQuery(), + findAndModifyRequest.getFields().value_or(BSONObj())) || + nss.isTimeseriesBucketsCollection()) { + queryBuilder.appendElementsUnique(findAndModifyRequest.getQuery()); + } else { + // Unset the collation and sort because targeting by _id uses default collation and + // we should uniquely target a single document by _id. + findAndModifyRequest.setCollation(boost::none); + findAndModifyRequest.setSort(boost::none); + } + } else { + // If the original query includes a positional operator ($) or targets a time-series + // collection, include the original query alongside the target doc. + if (requiresOriginalQuery(opCtx, + boost::none, + findAndModifyRequest.getNamespace(), + findAndModifyRequest.getQuery(), + findAndModifyRequest.getFields().value_or(BSONObj())) || + nss.isTimeseriesBucketsCollection()) { + queryBuilder.appendElementsUnique(findAndModifyRequest.getQuery()); + } else { + // Unset the collation and sort because targeting by _id uses default collation and + // we should uniquely target a single document by _id. + findAndModifyRequest.setCollation(boost::none); + findAndModifyRequest.setSort(boost::none); + } + } + findAndModifyRequest.setQuery(queryBuilder.obj()); // Drop the writeConcern as it cannot be specified for commands run in internal @@ -202,6 +341,56 @@ class ClusterWriteWithoutShardKeyCmd : public TypedCommand arsRequestVector({arsRequest}); + + Timer timer; + MultiStatementTransactionRequestsSender ars( + opCtx, + Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(), + request().getDbName(), + std::move(arsRequestVector), + ReadPreferenceSetting(ReadPreference::PrimaryOnly), + Shard::RetryPolicy::kNoRetry); + + auto response = ars.next(); + uassertStatusOK(response.swResponse); + + const auto millisElapsed = timer.millis(); + + auto bodyBuilder = result->getBodyBuilder(); + uassertStatusOK(ClusterExplain::buildExplainResult(opCtx, + {response}, + ClusterExplain::kWriteOnShards, + millisElapsed, + writeCmdObj, + &bodyBuilder)); + } + NamespaceString ns() const override { return NamespaceString(request().getDbName()); } @@ -214,8 +403,9 @@ class ClusterWriteWithoutShardKeyCmd : public TypedCommandgetClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(request().getDbName().tenantId()), + ActionType::internal)); } }; @@ -233,9 +423,9 @@ class ClusterWriteWithoutShardKeyCmd : public TypedCommand& apiVersions() const { return kApiVersions1; } diff --git a/src/mongo/s/commands/document_shard_key_update_test.cpp b/src/mongo/s/commands/document_shard_key_update_test.cpp index 56884e5892340..f8567ffdd8c5d 100644 --- a/src/mongo/s/commands/document_shard_key_update_test.cpp +++ b/src/mongo/s/commands/document_shard_key_update_test.cpp @@ -28,13 +28,17 @@ */ -#include "mongo/platform/basic.h" - -#include - +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/namespace_string.h" #include "mongo/s/commands/document_shard_key_update_util.h" - -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/s/commands/document_shard_key_update_util.cpp b/src/mongo/s/commands/document_shard_key_update_util.cpp index 66784a600af8f..d8698d9fac7e4 100644 --- a/src/mongo/s/commands/document_shard_key_update_util.cpp +++ b/src/mongo/s/commands/document_shard_key_update_util.cpp @@ -26,19 +26,37 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/commands/document_shard_key_update_util.h" - -#include "mongo/base/status_with.h" +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/commands/txn_cmds_gen.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/cluster_write.h" +#include "mongo/s/commands/document_shard_key_update_util.h" +#include "mongo/s/transaction_router.h" #include "mongo/s/would_change_owning_shard_exception.h" +#include "mongo/s/write_ops/batch_write_exec.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" -#include "mongo/util/str.h" +#include "mongo/util/future_impl.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -64,7 +82,7 @@ bool executeOperationsAsPartOfShardKeyUpdate(OperationContext* opCtx, BatchedCommandResponse deleteResponse; BatchWriteExecStats deleteStats; - cluster::write(opCtx, deleteRequest, &deleteStats, &deleteResponse); + cluster::write(opCtx, deleteRequest, nullptr /* nss */, &deleteStats, &deleteResponse); uassertStatusOKWithContext(deleteResponse.toStatus(), "During delete stage of updating a shard key"); @@ -90,13 +108,13 @@ bool executeOperationsAsPartOfShardKeyUpdate(OperationContext* opCtx, BatchedCommandResponse insertResponse; BatchWriteExecStats insertStats; - cluster::write(opCtx, insertRequest, &insertStats, &insertResponse); + cluster::write(opCtx, insertRequest, nullptr, &insertStats, &insertResponse); uassertStatusOKWithContext(insertResponse.toStatus(), "During insert stage of updating a shard key"); uassert(ErrorCodes::NamespaceNotFound, "Document not successfully inserted while changing shard key for namespace " + - insertRequest.getNS().toString(), + insertRequest.getNS().toStringForErrorMsg(), insertResponse.getN() == 1); return true; @@ -149,7 +167,11 @@ bool updateShardKeyForDocumentLegacy(OperationContext* opCtx, auto updatePreImage = documentKeyChangeInfo.getPreImage().getOwned(); auto updatePostImage = documentKeyChangeInfo.getPostImage().getOwned(); - auto deleteCmdObj = constructShardKeyDeleteCmdObj(nss, updatePreImage); + // If the WouldChangeOwningShard error happens for a timeseries collection, the pre-image is + // a measurement to be deleted and so the delete command should be sent to the timeseries view. + auto deleteCmdObj = constructShardKeyDeleteCmdObj( + nss.isTimeseriesBucketsCollection() ? nss.getTimeseriesViewNamespace() : nss, + updatePreImage); auto insertCmdObj = constructShardKeyInsertCmdObj(nss, updatePostImage, fleCrudProcessed); return executeOperationsAsPartOfShardKeyUpdate( @@ -190,8 +212,11 @@ SemiFuture updateShardKeyForDocument(const txn_api::TransactionClient& txn const NamespaceString& nss, const WouldChangeOwningShardInfo& changeInfo, bool fleCrudProcessed) { + // If the WouldChangeOwningShard error happens for a timeseries collection, the pre-image is + // a measurement to be deleted and so the delete command should be sent to the timeseries view. auto deleteCmdObj = documentShardKeyUpdateUtil::constructShardKeyDeleteCmdObj( - nss, changeInfo.getPreImage().getOwned()); + nss.isTimeseriesBucketsCollection() ? nss.getTimeseriesViewNamespace() : nss, + changeInfo.getPreImage().getOwned()); auto deleteOpMsg = OpMsgRequest::fromDBAndBody(nss.db(), std::move(deleteCmdObj)); auto deleteRequest = BatchedCommandRequest::parseDelete(std::move(deleteOpMsg)); @@ -238,7 +263,7 @@ SemiFuture updateShardKeyForDocument(const txn_api::TransactionClient& txn uassert(ErrorCodes::NamespaceNotFound, "Document not successfully inserted while changing shard key for namespace " + - nss.ns(), + nss.toStringForErrorMsg(), insertResponse.getN() == 1); return true; diff --git a/src/mongo/s/commands/document_shard_key_update_util.h b/src/mongo/s/commands/document_shard_key_update_util.h index 7c87457f9a6bf..766676f778714 100644 --- a/src/mongo/s/commands/document_shard_key_update_util.h +++ b/src/mongo/s/commands/document_shard_key_update_util.h @@ -33,11 +33,18 @@ #include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" #include "mongo/db/session/logical_session_id.h" #include "mongo/db/transaction/transaction_api.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/s/transaction_router.h" +#include "mongo/util/future.h" +#include "mongo/util/out_of_line_executor.h" namespace mongo { diff --git a/src/mongo/s/commands/flush_router_config_cmd.cpp b/src/mongo/s/commands/flush_router_config_cmd.cpp index 85bf2a8cd07f4..3e02745b03874 100644 --- a/src/mongo/s/commands/flush_router_config_cmd.cpp +++ b/src/mongo/s/commands/flush_router_config_cmd.cpp @@ -28,13 +28,28 @@ */ -#include "mongo/platform/basic.h" - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/catalog_cache.h" #include "mongo/s/grid.h" -#include "mongo/s/is_mongos.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -69,11 +84,12 @@ class FlushRouterConfigCmd : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::flushRouterConfig)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), + ActionType::flushRouterConfig)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/s/commands/get_shard_map_cmd.cpp b/src/mongo/s/commands/get_shard_map_cmd.cpp index f59d59717f4a5..43a6ae2fc42f5 100644 --- a/src/mongo/s/commands/get_shard_map_cmd.cpp +++ b/src/mongo/s/commands/get_shard_map_cmd.cpp @@ -27,14 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/db/auth/action_set.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -60,11 +69,11 @@ class CmdGetShardMap : public BasicCommand { } Status checkAuthForOperation(OperationContext* opCtx, - const DatabaseName&, + const DatabaseName& dbName, const BSONObj&) const override { auto* as = AuthorizationSession::get(opCtx->getClient()); - if (!as->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::getShardMap)) { + if (!as->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(dbName.tenantId()), ActionType::getShardMap)) { return {ErrorCodes::Unauthorized, "unauthorized"}; } diff --git a/src/mongo/s/commands/internal_transactions_test_command.h b/src/mongo/s/commands/internal_transactions_test_command.h index ac1252beab9e1..eac194f1e742c 100644 --- a/src/mongo/s/commands/internal_transactions_test_command.h +++ b/src/mongo/s/commands/internal_transactions_test_command.h @@ -30,7 +30,7 @@ #include "mongo/db/auth/authorization_session.h" #include "mongo/db/commands.h" #include "mongo/db/commands/internal_transactions_test_command_gen.h" -#include "mongo/db/query/find_command_gen.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/transaction/transaction_api.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/thread_pool_task_executor.h" @@ -102,7 +102,8 @@ class InternalTransactionsTestCommandBase : public TypedCommand { continue; } - const auto res = txnClient.runCommandSync(dbName, command); + const auto res = txnClient.runCommandSync( + DatabaseName::createDatabaseName_forTest(boost::none, dbName), command); sharedBlock->responses.emplace_back( CommandHelpers::filterCommandReplyForPassthrough( @@ -133,11 +134,13 @@ class InternalTransactionsTestCommandBase : public TypedCommand { } void doCheckAuthorization(OperationContext* opCtx) const override { - uassert(ErrorCodes::Unauthorized, - "Unauthorized", - AuthorizationSession::get(opCtx->getClient()) - ->isAuthorizedForActionsOnResource(ResourcePattern::forClusterResource(), - ActionType::internal)); + uassert( + ErrorCodes::Unauthorized, + "Unauthorized", + AuthorizationSession::get(opCtx->getClient()) + ->isAuthorizedForActionsOnResource( + ResourcePattern::forClusterResource(Base::request().getDbName().tenantId()), + ActionType::internal)); } std::shared_ptr getTransactionExecutor() { diff --git a/src/mongo/s/commands/internal_transactions_test_command_s.cpp b/src/mongo/s/commands/internal_transactions_test_command_s.cpp index 5ea97bc1367b1..a9aa77aba5045 100644 --- a/src/mongo/s/commands/internal_transactions_test_command_s.cpp +++ b/src/mongo/s/commands/internal_transactions_test_command_s.cpp @@ -27,7 +27,20 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/db/commands.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/resource_yielder.h" +#include "mongo/db/transaction/transaction_api.h" #include "mongo/executor/inline_executor.h" +#include "mongo/executor/task_executor.h" #include "mongo/s/commands/internal_transactions_test_command.h" #include "mongo/s/transaction_router_resource_yielder.h" @@ -43,12 +56,10 @@ class InternalTransactionsTestCommandS StringData commandName, bool useClusterClient) { auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor( - Grid::get(opCtx)->getExecutorPool()->getFixedExecutor()); return txn_api::SyncTransactionWithRetries( opCtx, - sleepInlineExecutor, + executor, TransactionRouterResourceYielder::makeForLocalHandoff(), inlineExecutor); } diff --git a/src/mongo/s/commands/kill_sessions_remote.cpp b/src/mongo/s/commands/kill_sessions_remote.cpp index 0070b2c649366..5c2032c9750f5 100644 --- a/src/mongo/s/commands/kill_sessions_remote.cpp +++ b/src/mongo/s/commands/kill_sessions_remote.cpp @@ -28,20 +28,33 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/commands/kill_sessions_remote.h" - -#include "mongo/db/client.h" +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/connection_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/session/kill_sessions.h" #include "mongo/db/session/kill_sessions_common.h" +#include "mongo/db/session/kill_sessions_gen.h" #include "mongo/executor/async_multicaster.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/commands/cluster_commands_gen.h" +#include "mongo/s/commands/kill_sessions_remote.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/s/commands/kill_sessions_remote.h b/src/mongo/s/commands/kill_sessions_remote.h index 04db421294602..dfbd78acde8b5 100644 --- a/src/mongo/s/commands/kill_sessions_remote.h +++ b/src/mongo/s/commands/kill_sessions_remote.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/db/operation_context.h" #include "mongo/db/session/session_killer.h" namespace mongo { diff --git a/src/mongo/s/commands/s_read_write_concern_defaults_server_status.cpp b/src/mongo/s/commands/s_read_write_concern_defaults_server_status.cpp index 4a012777bdc9b..9a6cdf42e1494 100644 --- a/src/mongo/s/commands/s_read_write_concern_defaults_server_status.cpp +++ b/src/mongo/s/commands/s_read_write_concern_defaults_server_status.cpp @@ -27,10 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands/rwc_defaults_commands_gen.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults.h" namespace mongo { diff --git a/src/mongo/s/commands/sharding_expressions.cpp b/src/mongo/s/commands/sharding_expressions.cpp index e20683d251eaa..c1252fe900fc2 100644 --- a/src/mongo/s/commands/sharding_expressions.cpp +++ b/src/mongo/s/commands/sharding_expressions.cpp @@ -29,26 +29,66 @@ #include "mongo/s/commands/sharding_expressions.h" -#include "mongo/client/index_spec.h" +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/ordering.h" +#include "mongo/bson/util/builder.h" #include "mongo/db/catalog/index_key_validate.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" +#include "mongo/db/fts/fts_spec.h" +#include "mongo/db/hasher.h" #include "mongo/db/index/2d_common.h" #include "mongo/db/index/btree_key_generator.h" #include "mongo/db/index/expression_keys_private.h" #include "mongo/db/index/expression_params.h" +#include "mongo/db/index/index_access_method.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" #include "mongo/db/index/s2_common.h" #include "mongo/db/index/wildcard_key_generator.h" #include "mongo/db/index_names.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/expression.h" #include "mongo/db/pipeline/expression_context.h" -#include "mongo/db/pipeline/expression_visitor.h" #include "mongo/db/pipeline/variables.h" #include "mongo/db/query/collation/collator_factory_interface.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/record_id.h" #include "mongo/db/s/sharding_state.h" +#include "mongo/db/shard_id.h" +#include "mongo/db/storage/key_string.h" +#include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/grid.h" #include "mongo/s/is_mongos.h" -#include "mongo/s/shard_version_factory.h" +#include "mongo/s/shard_cannot_refresh_due_to_locks_held_exception.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/shared_buffer_fragment.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -273,7 +313,7 @@ class IndexKeysObjectsGenerator { // This helper accepts the key string 'keyString' and returns a 'BSONObj' that maps a field // names to its index key. auto buildObjectFromKeyString = [&](const auto& keyString) { - auto keyStringObj = KeyString::toBson(keyString, Ordering::make(BSONObj())); + auto keyStringObj = key_string::toBson(keyString, Ordering::make(BSONObj())); BSONObjBuilder keyObjectBuilder; switch (_indexDescriptor->getIndexType()) { @@ -353,7 +393,7 @@ class IndexKeysObjectsGenerator { std::unique_ptr _collatorInterface; // The key string version to be used for generating the key strings. - const KeyString::Version _keyStringVersion = KeyString::Version::kLatestVersion; + const key_string::Version _keyStringVersion = key_string::Version::kLatestVersion; // The ordering to be used for generating the key strings. const Ordering _ordering = Ordering::allAscending(); @@ -393,13 +433,13 @@ Value ExpressionInternalOwningShard::evaluate(const Document& root, Variables* v // Invalidate catalog cache if the chunk manager version is stale. if (cri.cm.getVersion().isOlderThan(shardVersion.placementVersion())) { - uasserted(StaleConfigInfo(ns, - cri.getCollectionVersion(), - boost::none /* wanted */, - ShardingState::get(opCtx)->shardId()), + catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection( + ns, boost::none /* wanted */, ShardId()); + + uasserted(ShardCannotRefreshDueToLocksHeldInfo(ns), str::stream() - << "Sharding information of collection " << ns - << " is currently stale and needs to be recovered from the config server"); + << "Routing information for collection " << ns.toStringForErrorMsg() + << " is currently stale and needs to be refreshed from the config server"); } // Retrieve the shard id for the given shard key value. @@ -483,8 +523,8 @@ Value ExpressionInternalIndexKey::evaluate(const Document& root, Variables* vari auto specObj = _spec->evaluate(root, variables).getDocument().toBson(); // Parse and validate the index spec and then create the index descriptor object from it. - auto indexSpec = - index_key_validate::parseAndValidateIndexSpecs(getExpressionContext()->opCtx, specObj); + auto indexSpec = index_key_validate::parseAndValidateIndexSpecs( + getExpressionContext()->opCtx, specObj, false /* checkFCV */); BSONObj keyPattern = indexSpec.getObjectField(kIndexSpecKeyField); auto indexDescriptor = std::make_unique(IndexNames::findPluginName(keyPattern), indexSpec); diff --git a/src/mongo/s/commands/sharding_expressions.h b/src/mongo/s/commands/sharding_expressions.h index e02ddc323fc88..5580c6120b210 100644 --- a/src/mongo/s/commands/sharding_expressions.h +++ b/src/mongo/s/commands/sharding_expressions.h @@ -29,6 +29,10 @@ #pragma once +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/db/exec/document_value/value.h" #include "mongo/db/index/index_descriptor.h" @@ -36,6 +40,8 @@ #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_visitor.h" #include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/s/commands/strategy.cpp b/src/mongo/s/commands/strategy.cpp index 5534cc42dca68..11f4654b168fd 100644 --- a/src/mongo/s/commands/strategy.cpp +++ b/src/mongo/s/commands/strategy.cpp @@ -28,63 +28,85 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/commands/strategy.h" - +#include +#include #include - -#include "mongo/base/data_cursor.h" -#include "mongo/base/init.h" +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/status.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/bson/util/builder.h" -#include "mongo/db/audit.h" -#include "mongo/db/auth/action_type.h" -#include "mongo/db/auth/authorization_checks.h" -#include "mongo/db/auth/authorization_session.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/commands/txn_cmds_gen.h" #include "mongo/db/curop.h" +#include "mongo/db/database_name.h" #include "mongo/db/error_labels.h" #include "mongo/db/initialize_api_parameters.h" +#include "mongo/db/initialize_operation_session_info.h" +#include "mongo/db/logical_time.h" #include "mongo/db/logical_time_validator.h" -#include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/namespace_string.h" #include "mongo/db/not_primary_error_tracker.h" +#include "mongo/db/operation_context.h" #include "mongo/db/operation_time_tracker.h" -#include "mongo/db/ops/write_ops.h" -#include "mongo/db/query/find_common.h" -#include "mongo/db/query/getmore_command_gen.h" +#include "mongo/db/query/max_time_ms_parser.h" #include "mongo/db/query/query_request_helper.h" +#include "mongo/db/read_concern_support_result.h" #include "mongo/db/read_write_concern_defaults.h" -#include "mongo/db/repl/repl_server_parameters_gen.h" -#include "mongo/db/session/initialize_operation_session_info.h" -#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/read_write_concern_defaults_gen.h" +#include "mongo/db/read_write_concern_provenance.h" +#include "mongo/db/repl/optime.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/stats/api_version_metrics.h" #include "mongo/db/stats/counters.h" #include "mongo/db/transaction_validation.h" #include "mongo/db/vector_clock.h" -#include "mongo/db/views/resolved_view.h" #include "mongo/db/write_concern_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/check_allowed_op_query_cmd.h" #include "mongo/rpc/factory.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/metadata.h" #include "mongo/rpc/metadata/client_metadata.h" #include "mongo/rpc/metadata/tracking_metadata.h" #include "mongo/rpc/op_msg.h" -#include "mongo/rpc/op_msg_rpc_impls.h" +#include "mongo/rpc/protocol.h" +#include "mongo/rpc/reply_builder_interface.h" #include "mongo/rpc/rewrite_state_change_errors.h" +#include "mongo/rpc/topology_version_gen.h" +#include "mongo/s/analyze_shard_key_role.h" #include "mongo/s/catalog_cache.h" -#include "mongo/s/client/shard_registry.h" -#include "mongo/s/cluster_commands_helpers.h" -#include "mongo/s/commands/cluster_explain.h" +#include "mongo/s/commands/strategy.h" #include "mongo/s/grid.h" #include "mongo/s/is_mongos.h" #include "mongo/s/load_balancer_support.h" #include "mongo/s/mongos_topology_coordinator.h" -#include "mongo/s/query/cluster_cursor_manager.h" -#include "mongo/s/query/cluster_find.h" #include "mongo/s/query_analysis_sampler.h" #include "mongo/s/session_catalog_router.h" #include "mongo/s/shard_invalidated_for_targeting_exception.h" @@ -93,16 +115,21 @@ #include "mongo/transport/hello_metrics.h" #include "mongo/transport/service_executor.h" #include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/future_impl.h" #include "mongo/util/future_util.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/scopeguard.h" -#include "mongo/util/str.h" -#include "mongo/util/timer.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding namespace mongo { namespace { + MONGO_FAIL_POINT_DEFINE(hangBeforeCheckingMongosShutdownInterrupt); const auto kOperationTime = "operationTime"_sd; @@ -416,7 +443,7 @@ class ParseAndRunCommand final { std::shared_ptr _invocation; boost::optional _ns; - boost::optional _osi; + OperationSessionInfoFromClient _osi; boost::optional _wc; boost::optional _isHello; }; @@ -481,30 +508,28 @@ void ParseAndRunCommand::_updateStatsAndApplyErrorLabels(const Status& status) { auto opCtx = _rec->getOpCtx(); const auto command = _rec->getCommand(); - if (command) - command->incrementCommandsFailed(); NotPrimaryErrorTracker::get(opCtx->getClient()).recordError(status.code()); + + if (!command) + return; + + command->incrementCommandsFailed(); + // WriteConcern error (wcCode) is set to boost::none because: // 1. TransientTransaction error label handling for commitTransaction command in mongos is // delegated to the shards. Mongos simply propagates the shard's response up to the client. // 2. For other commands in a transaction, they shouldn't get a writeConcern error so this // setting doesn't apply. - - if (_osi.has_value()) { - - auto errorLabels = getErrorLabels(opCtx, - *_osi, - command->getName(), - status.code(), - boost::none, - false /* isInternalClient */, - true /* isMongos */, - repl::OpTime{}, - repl::OpTime{}); - - - _errorBuilder->appendElements(errorLabels); - } + auto errorLabels = getErrorLabels(opCtx, + _osi, + command->getName(), + status.code(), + boost::none, + false /* isInternalClient */, + true /* isMongos */, + repl::OpTime{}, + repl::OpTime{}); + _errorBuilder->appendElements(errorLabels); } void ParseAndRunCommand::_parseCommand() { auto opCtx = _rec->getOpCtx(); @@ -573,21 +598,18 @@ void ParseAndRunCommand::_parseCommand() { // Set the logical optype, command object and namespace as soon as we identify the command. If // the command does not define a fully-qualified namespace, set CurOp to the generic command // namespace db.$cmd. - _ns.emplace(_invocation->ns().toString()); + _ns.emplace(NamespaceStringUtil::serialize(_invocation->ns())); auto nss = (request.getDatabase() == *_ns ? NamespaceString(*_ns, "$cmd") : NamespaceString(*_ns)); // Fill out all currentOp details. CurOp::get(opCtx)->setGenericOpRequestDetails(nss, command, request.body, _opType); - _osi.emplace(initializeOperationSessionInfo(opCtx, - request.body, - command->requiresAuth(), - command->attachLogicalSessionsToOpCtx(), - true)); + _osi = initializeOperationSessionInfo( + opCtx, request, command->requiresAuth(), command->attachLogicalSessionsToOpCtx(), true); auto allowTransactionsOnConfigDatabase = !isMongos() || client->isFromSystemConnection(); - validateSessionOptions(*_osi, command->getName(), nss, allowTransactionsOnConfigDatabase); + validateSessionOptions(_osi, command->getName(), nss, allowTransactionsOnConfigDatabase); _wc.emplace(uassertStatusOK(WriteConcernOptions::extractWCFromCommand(request.body))); @@ -664,7 +686,7 @@ Status ParseAndRunCommand::RunInvocation::_setup() { CommandHelpers::evaluateFailCommandFailPoint(opCtx, invocation.get()); bool startTransaction = false; - if (_parc->_osi->getAutocommit()) { + if (_parc->_osi.getAutocommit()) { _routerSession.emplace(opCtx); load_balancer_support::setMruSession(opCtx->getClient(), *opCtx->getLogicalSessionId()); @@ -676,7 +698,7 @@ Status ParseAndRunCommand::RunInvocation::_setup() { invariant(txnNumber); auto transactionAction = ([&] { - auto startTxnSetting = _parc->_osi->getStartTransaction(); + auto startTxnSetting = _parc->_osi.getStartTransaction(); if (startTxnSetting && *startTxnSetting) { return TransactionRouter::TransactionActions::kStart; } @@ -915,7 +937,7 @@ void ParseAndRunCommand::RunAndRetry::_setup() { // Re-parse before retrying in case the process of run()-ning the invocation could // affect the parsed result. _parc->_invocation = command->parse(opCtx, request); - invariant(_parc->_invocation->ns().toString() == _parc->_ns, + invariant(NamespaceStringUtil::serialize(_parc->_invocation->ns()) == _parc->_ns, "unexpected change of namespace when retrying"); } @@ -1156,8 +1178,6 @@ Future ParseAndRunCommand::run() { }); } -} // namespace - // Maintains the state required to execute client commands, and provides the interface to construct // a future-chain that runs the command against the database. class ClientCommand final { @@ -1315,6 +1335,8 @@ Future ClientCommand::run() { .then([this] { return _produceResponse(); }); } +} // namespace + Future Strategy::clientCommand(std::shared_ptr rec) { return future_util::makeState(std::move(rec)).thenWithState([](auto* runner) { return runner->run(); diff --git a/src/mongo/s/commands/strategy.h b/src/mongo/s/commands/strategy.h index 73916229f7f03..ad276da5345dd 100644 --- a/src/mongo/s/commands/strategy.h +++ b/src/mongo/s/commands/strategy.h @@ -29,7 +29,11 @@ #pragma once +#include + +#include "mongo/db/dbmessage.h" #include "mongo/db/request_execution_context.h" +#include "mongo/util/future.h" namespace mongo { diff --git a/src/mongo/s/comparable_chunk_version_test.cpp b/src/mongo/s/comparable_chunk_version_test.cpp index 63f6ca4a59c13..4e73f981a60ec 100644 --- a/src/mongo/s/comparable_chunk_version_test.cpp +++ b/src/mongo/s/comparable_chunk_version_test.cpp @@ -27,8 +27,15 @@ * it in the license file. */ +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/s/chunk_manager.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/chunk_version.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/s/comparable_database_version_test.cpp b/src/mongo/s/comparable_database_version_test.cpp index 8a91faf5d9f87..440e7d6d21209 100644 --- a/src/mongo/s/comparable_database_version_test.cpp +++ b/src/mongo/s/comparable_database_version_test.cpp @@ -27,11 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/database_version.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { diff --git a/src/mongo/s/comparable_index_version_test.cpp b/src/mongo/s/comparable_index_version_test.cpp index 2d7e7e7c94a89..380ccb47758d6 100644 --- a/src/mongo/s/comparable_index_version_test.cpp +++ b/src/mongo/s/comparable_index_version_test.cpp @@ -27,8 +27,16 @@ * it in the license file. */ +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/s/sharding_index_catalog_cache.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/s/concurrency/locker_mongos.h b/src/mongo/s/concurrency/locker_mongos.h deleted file mode 100644 index 754486a734c75..0000000000000 --- a/src/mongo/s/concurrency/locker_mongos.h +++ /dev/null @@ -1,255 +0,0 @@ -/** - * Copyright (C) 2021-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/concurrency/locker.h" - -namespace mongo { - -/** - * Locker for mongos. Based on, but not inherited from, LockerNoop. - */ -class LockerMongos : public Locker { -public: - LockerMongos() {} - - // TODO(SERVER-60229): Return false when mongos has a working lock manager. - bool isNoop() const final { - return true; - } - - ClientState getClientState() const override { - MONGO_UNREACHABLE; - } - - LockerId getId() const override { - MONGO_UNREACHABLE; - } - - stdx::thread::id getThreadId() const override { - MONGO_UNREACHABLE; - } - - void updateThreadIdToCurrentThread() override { - MONGO_UNREACHABLE; - } - - void unsetThreadId() override { - MONGO_UNREACHABLE; - } - - void setSharedLocksShouldTwoPhaseLock(bool sharedLocksShouldTwoPhaseLock) override { - MONGO_UNREACHABLE; - } - - void setMaxLockTimeout(Milliseconds maxTimeout) override { - MONGO_UNREACHABLE; - } - - bool hasMaxLockTimeout() override { - MONGO_UNREACHABLE; - } - - void unsetMaxLockTimeout() override { - MONGO_UNREACHABLE; - } - - void lockGlobal(OperationContext* opCtx, LockMode mode, Date_t deadline) override { - MONGO_UNREACHABLE; - } - - bool unlockGlobal() override { - MONGO_UNREACHABLE; - } - - void beginWriteUnitOfWork() override {} - - void endWriteUnitOfWork() override {} - - bool inAWriteUnitOfWork() const override { - return false; - } - - bool wasGlobalLockTakenForWrite() const override { - return false; - } - - bool wasGlobalLockTakenInModeConflictingWithWrites() const override { - return false; - } - - bool wasGlobalLockTaken() const override { - return false; - } - - void setGlobalLockTakenInMode(LockMode mode) override {} - - LockResult lockRSTLBegin(OperationContext* opCtx, LockMode mode) override { - MONGO_UNREACHABLE; - } - - void lockRSTLComplete(OperationContext* opCtx, - LockMode mode, - Date_t deadline, - const LockTimeoutCallback& onTimeout) override { - MONGO_UNREACHABLE; - } - - bool unlockRSTLforPrepare() override { - MONGO_UNREACHABLE; - } - - void lock(OperationContext* opCtx, ResourceId resId, LockMode mode, Date_t deadline) override {} - - void lock(ResourceId resId, LockMode mode, Date_t deadline) override {} - - void downgrade(ResourceId resId, LockMode newMode) override { - MONGO_UNREACHABLE; - } - - bool unlock(ResourceId resId) override { - return true; - } - - LockMode getLockMode(ResourceId resId) const override { - MONGO_UNREACHABLE; - } - - bool isLockHeldForMode(ResourceId resId, LockMode mode) const override { - return true; - } - - bool isDbLockedForMode(const DatabaseName& dbName, LockMode mode) const override { - return true; - } - - bool isCollectionLockedForMode(const NamespaceString& nss, LockMode mode) const override { - return true; - } - - ResourceId getWaitingResource() const override { - MONGO_UNREACHABLE; - } - - void getLockerInfo(LockerInfo* lockerInfo, - boost::optional lockStatsBase) const override { - MONGO_UNREACHABLE; - } - - boost::optional getLockerInfo( - boost::optional lockStatsBase) const override { - return boost::none; - } - - bool saveLockStateAndUnlock(LockSnapshot* stateOut) override { - MONGO_UNREACHABLE; - } - - void restoreLockState(OperationContext* opCtx, const LockSnapshot& stateToRestore) override { - MONGO_UNREACHABLE; - } - - bool releaseWriteUnitOfWorkAndUnlock(LockSnapshot* stateOut) override { - MONGO_UNREACHABLE; - } - - void restoreWriteUnitOfWorkAndLock(OperationContext* opCtx, - const LockSnapshot& stateToRestore) override { - MONGO_UNREACHABLE; - }; - - void releaseWriteUnitOfWork(WUOWLockSnapshot* stateOut) override { - MONGO_UNREACHABLE; - } - - void restoreWriteUnitOfWork(const WUOWLockSnapshot& stateToRestore) override { - MONGO_UNREACHABLE; - }; - - void releaseTicket() override { - MONGO_UNREACHABLE; - } - - void reacquireTicket(OperationContext* opCtx) override { - MONGO_UNREACHABLE; - } - - virtual bool hasReadTicket() const { - MONGO_UNREACHABLE; - } - - virtual bool hasWriteTicket() const { - MONGO_UNREACHABLE; - } - - void dump() const override { - MONGO_UNREACHABLE; - } - - bool isW() const override { - return false; - } - - bool isR() const override { - MONGO_UNREACHABLE; - } - - bool isLocked() const override { - // This is necessary because replication makes decisions based on the answer to this, and - // we wrote unit tests to test the behavior specifically when this returns "false". - return false; - } - - bool isWriteLocked() const override { - return true; - } - - bool isReadLocked() const override { - return true; - } - - bool isRSTLExclusive() const override { - return true; - } - - bool isRSTLLocked() const override { - return true; - } - - bool hasLockPending() const override { - MONGO_UNREACHABLE; - } - - bool isGlobalLockedRecursively() override { - return false; - } -}; - -} // namespace mongo diff --git a/src/mongo/s/concurrency/locker_mongos_client_observer.h b/src/mongo/s/concurrency/locker_mongos_client_observer.h deleted file mode 100644 index d1774b6f3d5ce..0000000000000 --- a/src/mongo/s/concurrency/locker_mongos_client_observer.h +++ /dev/null @@ -1,57 +0,0 @@ -/** - * Copyright (C) 2021-present MongoDB, Inc. - * - * This program is free software: you can redistribute it and/or modify - * it under the terms of the Server Side Public License, version 1, - * as published by MongoDB, Inc. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * Server Side Public License for more details. - * - * You should have received a copy of the Server Side Public License - * along with this program. If not, see - * . - * - * As a special exception, the copyright holders give permission to link the - * code of portions of this program with the OpenSSL library under certain - * conditions as described in each individual source file and distribute - * linked combinations including the program with the OpenSSL library. You - * must comply with the Server Side Public License in all respects for - * all of the code used other than as permitted herein. If you modify file(s) - * with this exception, you may extend this exception to your version of the - * file(s), but you are not obligated to do so. If you do not wish to do so, - * delete this exception statement from your version. If you delete this - * exception statement from all source files in the program, then also delete - * it in the license file. - */ - -#pragma once - -#include "mongo/db/service_context.h" -#include "mongo/s/concurrency/locker_mongos.h" - -namespace mongo { - -/** - * ServiceContext hook that ensures OperationContexts are created with a valid - * Locker instance. Intended for use in mongos. - */ -class LockerMongosClientObserver : public ServiceContext::ClientObserver { -public: - LockerMongosClientObserver() = default; - ~LockerMongosClientObserver() = default; - - void onCreateClient(Client* client) final {} - - void onDestroyClient(Client* client) final {} - - void onCreateOperationContext(OperationContext* opCtx) final { - opCtx->setLockState(std::make_unique()); - } - - void onDestroyOperationContext(OperationContext* opCtx) final {} -}; - -} // namespace mongo diff --git a/src/mongo/s/config_server_catalog_cache_loader.cpp b/src/mongo/s/config_server_catalog_cache_loader.cpp index a9706b68a6724..6f87e1437ec2d 100644 --- a/src/mongo/s/config_server_catalog_cache_loader.cpp +++ b/src/mongo/s/config_server_catalog_cache_loader.cpp @@ -29,13 +29,34 @@ #include "mongo/s/config_server_catalog_cache_loader.h" +#include +#include +#include +#include +#include + +#include + #include "mongo/db/catalog_shard_feature_flag_gen.h" #include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/keypattern.h" +#include "mongo/db/logical_time.h" #include "mongo/db/operation_context.h" -#include "mongo/db/repl/replication_coordinator.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/vector_clock.h" -#include "mongo/logv2/log.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/grid.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future_impl.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -51,16 +72,8 @@ CollectionAndChangedChunks getChangedChunks(OperationContext* opCtx, const NamespaceString& nss, ChunkVersion sinceVersion) { const auto readConcern = [&]() -> repl::ReadConcernArgs { - // (Ignore FCV check): This is in mongos so we expect to ignore FCV. - if (serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) && - !gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafe()) { - // When the feature flag is on, the config server may read from a secondary which may - // need to wait for replication, so we should use afterClusterTime. - return {repl::ReadConcernLevel::kSnapshotReadConcern}; - } else { - const auto vcTime = VectorClock::get(opCtx)->getTime(); - return {vcTime.configTime(), repl::ReadConcernLevel::kSnapshotReadConcern}; - } + const auto vcTime = VectorClock::get(opCtx)->getTime(); + return {vcTime.configTime(), repl::ReadConcernLevel::kSnapshotReadConcern}; }(); auto collAndChunks = Grid::get(opCtx)->catalogClient()->getCollectionAndChunks( @@ -104,13 +117,17 @@ void ConfigServerCatalogCacheLoader::onStepUp() { MONGO_UNREACHABLE; } +void ConfigServerCatalogCacheLoader::onReplicationRollback() { + MONGO_UNREACHABLE; +} + void ConfigServerCatalogCacheLoader::shutDown() { _executor->shutdown(); _executor->join(); } -void ConfigServerCatalogCacheLoader::notifyOfCollectionPlacementVersionUpdate( - const NamespaceString& nss) { +void ConfigServerCatalogCacheLoader::notifyOfCollectionRefreshEndMarkerSeen( + const NamespaceString& nss, const Timestamp& commitTime) { MONGO_UNREACHABLE; } @@ -131,6 +148,13 @@ SemiFuture ConfigServerCatalogCacheLoader::getChunks .then([=]() { ThreadClient tc("ConfigServerCatalogCacheLoader::getChunksSince", getGlobalServiceContext()); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + auto opCtx = tc->makeOperationContext(); return getChangedChunks(opCtx.get(), nss, version); @@ -143,6 +167,13 @@ SemiFuture ConfigServerCatalogCacheLoader::getDatabase(StringData .then([name = dbName.toString()] { ThreadClient tc("ConfigServerCatalogCacheLoader::getDatabase", getGlobalServiceContext()); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + auto opCtx = tc->makeOperationContext(); return Grid::get(opCtx.get()) ->catalogClient() diff --git a/src/mongo/s/config_server_catalog_cache_loader.h b/src/mongo/s/config_server_catalog_cache_loader.h index 04c3471e77e77..3482eaa0cf46f 100644 --- a/src/mongo/s/config_server_catalog_cache_loader.h +++ b/src/mongo/s/config_server_catalog_cache_loader.h @@ -29,8 +29,16 @@ #pragma once +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog_cache_loader.h" +#include "mongo/s/chunk_version.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/future.h" namespace mongo { @@ -46,8 +54,10 @@ class ConfigServerCatalogCacheLoader final : public CatalogCacheLoader { void initializeReplicaSetRole(bool isPrimary) override; void onStepDown() override; void onStepUp() override; + void onReplicationRollback() override; void shutDown() override; - void notifyOfCollectionPlacementVersionUpdate(const NamespaceString& nss) override; + void notifyOfCollectionRefreshEndMarkerSeen(const NamespaceString& nss, + const Timestamp& commitTime) override; void waitForCollectionFlush(OperationContext* opCtx, const NamespaceString& nss) override; void waitForDatabaseFlush(OperationContext* opCtx, StringData dbName) override; diff --git a/src/mongo/s/database_version.cpp b/src/mongo/s/database_version.cpp index df160cd1edaa5..c7b4875674f60 100644 --- a/src/mongo/s/database_version.cpp +++ b/src/mongo/s/database_version.cpp @@ -27,9 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/s/database_version.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" namespace mongo { diff --git a/src/mongo/s/database_version.h b/src/mongo/s/database_version.h index 3868c3b370b0b..734d7902a871f 100644 --- a/src/mongo/s/database_version.h +++ b/src/mongo/s/database_version.h @@ -29,7 +29,19 @@ #pragma once +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/idl/idl_parser.h" #include "mongo/s/database_version_gen.h" +#include "mongo/util/uuid.h" namespace mongo { diff --git a/src/mongo/s/grid.cpp b/src/mongo/s/grid.cpp index 87c585a02c014..7f37b082b235a 100644 --- a/src/mongo/s/grid.cpp +++ b/src/mongo/s/grid.cpp @@ -30,9 +30,17 @@ #include "mongo/s/grid.h" +#include +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/operation_context.h" #include "mongo/s/balancer_configuration.h" #include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -78,6 +86,12 @@ void Grid::init(std::unique_ptr catalogClient, _network = network; _shardRegistry->init(); + + _isGridInitialized.store(true); +} + +bool Grid::isInitialized() const { + return _isGridInitialized.load(); } bool Grid::isShardingInitialized() const { @@ -114,6 +128,7 @@ void Grid::clearForUnitTests() { _balancerConfig.reset(); _executorPool.reset(); _network = nullptr; + _isGridInitialized.store(false); } } // namespace mongo diff --git a/src/mongo/s/grid.h b/src/mongo/s/grid.h index 41bc64b894299..41dd6f0860da2 100644 --- a/src/mongo/s/grid.h +++ b/src/mongo/s/grid.h @@ -30,12 +30,21 @@ #pragma once #include +#include +#include +#include + +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/connection_pool_stats.h" #include "mongo/executor/task_executor_pool.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/client/shard_registry.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/hierarchical_acquisition.h" namespace mongo { @@ -95,6 +104,11 @@ class Grid { */ void setShardingInitialized(); + /** + * Returns true if init() has successfully completed. + */ + bool isInitialized() const; + /** * If the instance as which this sharding component is running (config/shard/mongos) uses * additional connection pools other than the default, this function will be present and can be @@ -103,31 +117,41 @@ class Grid { CustomConnectionPoolStatsFn getCustomConnectionPoolStatsFn() const; void setCustomConnectionPoolStatsFn(CustomConnectionPoolStatsFn statsFn); + /** + * These getter methods are safe to run only when Grid::init has been called. + */ ShardingCatalogClient* catalogClient() const { + dassert(_isGridInitialized.load()); return _catalogClient.get(); } CatalogCache* catalogCache() const { + dassert(_isGridInitialized.load()); return _catalogCache.get(); } ShardRegistry* shardRegistry() const { + dassert(_isGridInitialized.load()); return _shardRegistry.get(); } ClusterCursorManager* getCursorManager() const { + dassert(_isGridInitialized.load()); return _cursorManager.get(); } executor::TaskExecutorPool* getExecutorPool() const { + dassert(_isGridInitialized.load()); return _executorPool.get(); } executor::NetworkInterface* getNetwork() { + dassert(_isGridInitialized.load()); return _network; } BalancerConfiguration* getBalancerConfiguration() const { + dassert(_isGridInitialized.load()); return _balancerConfig.get(); } @@ -158,6 +182,7 @@ class Grid { executor::NetworkInterface* _network{nullptr}; AtomicWord _shardingInitialized{false}; + AtomicWord _isGridInitialized{false}; mutable Mutex _mutex = MONGO_MAKE_LATCH(HierarchicalAcquisitionLevel(0), "Grid::_mutex"); diff --git a/src/mongo/s/index_version.cpp b/src/mongo/s/index_version.cpp index 044552e6325be..13670d86939a5 100644 --- a/src/mongo/s/index_version.cpp +++ b/src/mongo/s/index_version.cpp @@ -28,7 +28,12 @@ */ #include "mongo/s/index_version.h" + +#include + +#include "mongo/idl/idl_parser.h" #include "mongo/s/index_version_gen.h" + namespace mongo { CollectionIndexes CollectionIndexes::parse(const BSONElement& element) { diff --git a/src/mongo/s/index_version.h b/src/mongo/s/index_version.h index df4bb48c6efb6..fd3b0f0ffec94 100644 --- a/src/mongo/s/index_version.h +++ b/src/mongo/s/index_version.h @@ -29,6 +29,15 @@ #pragma once +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/s/chunk_version.h" #include "mongo/util/uuid.h" diff --git a/src/mongo/s/initialize_tenant_to_shard_cache.cpp b/src/mongo/s/initialize_tenant_to_shard_cache.cpp index 735b54dfa7832..e3935364ccec9 100644 --- a/src/mongo/s/initialize_tenant_to_shard_cache.cpp +++ b/src/mongo/s/initialize_tenant_to_shard_cache.cpp @@ -28,6 +28,11 @@ */ #include "mongo/s/initialize_tenant_to_shard_cache.h" + +#include + +#include + #include "mongo/db/service_context.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/s/is_mongos.cpp b/src/mongo/s/is_mongos.cpp index 6c85241db1851..b7400cc987784 100644 --- a/src/mongo/s/is_mongos.cpp +++ b/src/mongo/s/is_mongos.cpp @@ -27,10 +27,8 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/s/is_mongos.h" - +#include "mongo/db/cluster_role.h" #include "mongo/db/server_options.h" namespace mongo { diff --git a/src/mongo/s/load_balancer_support.cpp b/src/mongo/s/load_balancer_support.cpp index 77bcec56c9c4f..bf38737bebbf7 100644 --- a/src/mongo/s/load_balancer_support.cpp +++ b/src/mongo/s/load_balancer_support.cpp @@ -28,16 +28,26 @@ */ #include "mongo/s/load_balancer_support.h" +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/oid.h" #include "mongo/db/client.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/hello_gen.h" #include "mongo/db/service_context.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/s/mongos_server_parameters_gen.h" +#include "mongo/transport/session.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" namespace mongo::load_balancer_support { diff --git a/src/mongo/s/load_balancer_support.h b/src/mongo/s/load_balancer_support.h index 4a59cb35c7a99..107be2d297592 100644 --- a/src/mongo/s/load_balancer_support.h +++ b/src/mongo/s/load_balancer_support.h @@ -27,10 +27,13 @@ * it in the license file. */ +#include + #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" #include "mongo/db/operation_context.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" namespace mongo::load_balancer_support { diff --git a/src/mongo/s/load_balancer_support_test.cpp b/src/mongo/s/load_balancer_support_test.cpp index 33ea1497868de..c7b65029e24fc 100644 --- a/src/mongo/s/load_balancer_support_test.cpp +++ b/src/mongo/s/load_balancer_support_test.cpp @@ -27,19 +27,27 @@ * it in the license file. */ -#include "mongo/s/load_balancer_support.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/logv2/log.h" -#include "mongo/s/concurrency/locker_mongos_client_observer.h" +#include "mongo/s/load_balancer_support.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/assert_that.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/matcher.h" +#include "mongo/unittest/matcher_core.h" +#include "mongo/util/assert_util.h" #include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - namespace mongo { namespace { @@ -47,10 +55,7 @@ using namespace unittest::match; class LoadBalancerSupportTest : public ServiceContextTest { public: - LoadBalancerSupportTest() { - auto service = getServiceContext(); - service->registerClientObserver(std::make_unique()); - } + LoadBalancerSupportTest() = default; using ServiceContextTest::ServiceContextTest; diff --git a/src/mongo/s/mock_ns_targeter.cpp b/src/mongo/s/mock_ns_targeter.cpp index c8ea5669b5b70..a8e50036da789 100644 --- a/src/mongo/s/mock_ns_targeter.cpp +++ b/src/mongo/s/mock_ns_targeter.cpp @@ -27,9 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/mock_ns_targeter.h" +#include "mongo/s/shard_version.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/s/mock_ns_targeter.h b/src/mongo/s/mock_ns_targeter.h index 21cdd3d13a9a6..91a2f69a864fa 100644 --- a/src/mongo/s/mock_ns_targeter.h +++ b/src/mongo/s/mock_ns_targeter.h @@ -29,8 +29,18 @@ #pragma once +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/ns_targeter.h" +#include "mongo/s/stale_exception.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/unittest.h" namespace mongo { @@ -82,7 +92,7 @@ class MockNSTargeter : public NSTargeter { OperationContext* opCtx, const BatchItemRef& itemRef, std::set* chunkRanges = nullptr) const override { - return _targetQuery(itemRef.getUpdate().getQ(), chunkRanges); + return _targetQuery(itemRef.getUpdateRef().getFilter(), chunkRanges); } /** @@ -94,7 +104,7 @@ class MockNSTargeter : public NSTargeter { OperationContext* opCtx, const BatchItemRef& itemRef, std::set* chunkRanges = nullptr) const override { - return _targetQuery(itemRef.getDelete().getQ(), chunkRanges); + return _targetQuery(itemRef.getDeleteRef().getFilter(), chunkRanges); } std::vector targetAllShards( @@ -134,7 +144,11 @@ class MockNSTargeter : public NSTargeter { } bool isShardedTimeSeriesBucketsNamespace() const override { - return false; + return _isShardedTimeSeriesBucketsNamespace; + } + + void setIsShardedTimeSeriesBucketsNamespace(bool isShardedTimeSeriesBucketsNamespace) { + _isShardedTimeSeriesBucketsNamespace = isShardedTimeSeriesBucketsNamespace; } private: @@ -149,6 +163,8 @@ class MockNSTargeter : public NSTargeter { NamespaceString _nss; std::vector _mockRanges; + + bool _isShardedTimeSeriesBucketsNamespace = false; }; void assertEndpointsEqual(const ShardEndpoint& endpointA, const ShardEndpoint& endpointB); diff --git a/src/mongo/s/mongos.cpp b/src/mongo/s/mongos.cpp index 131cbd4ee7369..35ba5d3548619 100644 --- a/src/mongo/s/mongos.cpp +++ b/src/mongo/s/mongos.cpp @@ -27,11 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/s/mongos_main.h" #include "mongo/util/exit.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep #if defined(_WIN32) // In Windows, wmain() is an alternate entry point for main(), and receives the same parameters diff --git a/src/mongo/s/mongos_core_options_stub.cpp b/src/mongo/s/mongos_core_options_stub.cpp index 2de5990ece190..a3080e5c08cde 100644 --- a/src/mongo/s/mongos_core_options_stub.cpp +++ b/src/mongo/s/mongos_core_options_stub.cpp @@ -27,8 +27,10 @@ * it in the license file. */ -#include "mongo/base/init.h" -#include "mongo/base/status.h" +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" namespace mongo { namespace { diff --git a/src/mongo/s/mongos_hello_response.cpp b/src/mongo/s/mongos_hello_response.cpp index a58369cd3e472..6c3b244c2bce1 100644 --- a/src/mongo/s/mongos_hello_response.cpp +++ b/src/mongo/s/mongos_hello_response.cpp @@ -28,8 +28,8 @@ */ #include "mongo/s/mongos_hello_response.h" + #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/s/mongos_hello_response.h b/src/mongo/s/mongos_hello_response.h index 4d640b1a9443f..df27fc77ff199 100644 --- a/src/mongo/s/mongos_hello_response.h +++ b/src/mongo/s/mongos_hello_response.h @@ -29,10 +29,13 @@ #pragma once -#include "mongo/rpc/topology_version_gen.h" #include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/rpc/topology_version_gen.h" + namespace mongo { class BSONObj; diff --git a/src/mongo/s/mongos_main.cpp b/src/mongo/s/mongos_main.cpp index fbe6d16c75b81..8dd77f3c682c7 100644 --- a/src/mongo/s/mongos_main.cpp +++ b/src/mongo/s/mongos_main.cpp @@ -29,54 +29,93 @@ #include "mongo/s/mongos_main.h" +#include +#include +#include #include - -#include "mongo/base/init.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/initializer.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/connpool.h" #include "mongo/client/dbclient_rs.h" #include "mongo/client/global_conn_pool.h" #include "mongo/client/remote_command_targeter_factory_impl.h" +#include "mongo/client/replica_set_change_notifier.h" #include "mongo/client/replica_set_monitor.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/audit.h" #include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/auth/authz_manager_external_state.h" #include "mongo/db/auth/authz_manager_external_state_s.h" #include "mongo/db/auth/user_cache_invalidator_job.h" #include "mongo/db/change_stream_options_manager.h" #include "mongo/db/client.h" #include "mongo/db/client_metadata_propagation_egress_hook.h" #include "mongo/db/commands.h" -#include "mongo/db/dbdirectclient.h" #include "mongo/db/ftdc/ftdc_mongos.h" #include "mongo/db/initialize_server_global_state.h" +#include "mongo/db/keys_collection_client.h" #include "mongo/db/keys_collection_client_sharded.h" -#include "mongo/db/log_process_details.h" #include "mongo/db/logical_time_validator.h" #include "mongo/db/operation_context.h" #include "mongo/db/process_health/fault_manager.h" +#include "mongo/db/query/query_settings_manager.h" +#include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/server_options.h" +#include "mongo/db/serverless/multitenancy_check.h" #include "mongo/db/service_context.h" #include "mongo/db/service_liaison_mongos.h" #include "mongo/db/session/kill_sessions.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_impl.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/session.h" +#include "mongo/db/session/session_catalog.h" #include "mongo/db/session/session_killer.h" +#include "mongo/db/shard_id.h" #include "mongo/db/startup_warnings_common.h" #include "mongo/db/vector_clock_metadata_hook.h" #include "mongo/db/wire_version.h" +#include "mongo/executor/task_executor.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/idl/cluster_server_parameter_refresher.h" #include "mongo/logv2/log.h" -#include "mongo/platform/process_id.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/rpc/metadata/metadata_hook.h" #include "mongo/s/balancer_configuration.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/catalog_cache_loader.h" #include "mongo/s/client/shard_factory.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/client/shard_remote.h" #include "mongo/s/client/sharding_connection_hook.h" #include "mongo/s/commands/kill_sessions_remote.h" -#include "mongo/s/concurrency/locker_mongos_client_observer.h" #include "mongo/s/config_server_catalog_cache_loader.h" #include "mongo/s/grid.h" #include "mongo/s/is_mongos.h" @@ -95,36 +134,41 @@ #include "mongo/s/sharding_uptime_reporter.h" #include "mongo/s/transaction_router.h" #include "mongo/s/version_mongos.h" -#include "mongo/scripting/dbdirectclient_factory.h" #include "mongo/scripting/engine.h" -#include "mongo/stdx/thread.h" +#include "mongo/stdx/unordered_map.h" #include "mongo/transport/ingress_handshake_metrics.h" +#include "mongo/transport/service_entry_point.h" +#include "mongo/transport/transport_layer.h" #include "mongo/transport/transport_layer_manager.h" -#include "mongo/util/admin_access.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/background.h" +#include "mongo/util/clock_source.h" #include "mongo/util/cmdline_utils/censor_cmdline.h" #include "mongo/util/concurrency/idle_thread_block.h" #include "mongo/util/concurrency/thread_name.h" -#include "mongo/util/exception_filter_win32.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/debugger.h" +#include "mongo/util/duration.h" #include "mongo/util/exit.h" #include "mongo/util/exit_code.h" +#include "mongo/util/fail_point.h" #include "mongo/util/fast_clock_source_factory.h" +#include "mongo/util/future.h" #include "mongo/util/latch_analyzer.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/net/ocsp/ocsp_manager.h" #include "mongo/util/net/private/ssl_expiration.h" -#include "mongo/util/net/socket_exception.h" #include "mongo/util/net/socket_utils.h" #include "mongo/util/net/ssl_manager.h" -#include "mongo/util/ntservice.h" -#include "mongo/util/options_parser/startup_options.h" +#include "mongo/util/ntservice.h" // IWYU pragma: keep +#include "mongo/util/options_parser/startup_options.h" // IWYU pragma: keep #include "mongo/util/periodic_runner.h" #include "mongo/util/periodic_runner_factory.h" -#include "mongo/util/processinfo.h" #include "mongo/util/quick_exit.h" #include "mongo/util/signal_handlers.h" -#include "mongo/util/stacktrace.h" -#include "mongo/util/str.h" -#include "mongo/util/text.h" -#include "mongo/util/version.h" +#include "mongo/util/text.h" // IWYU pragma: keep +#include "mongo/util/time_support.h" +#include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -132,10 +176,6 @@ namespace mongo { using logv2::LogComponent; -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif - // Failpoint for disabling replicaSetChangeConfigServerUpdateHook calls on signaled mongos. MONGO_FAIL_POINT_DEFINE(failReplicaSetChangeConfigServerUpdateHook); @@ -152,6 +192,180 @@ constexpr auto kSignKeysRetryInterval = Seconds{1}; boost::optional shardingUptimeReporter; +class ShardingReplicaSetChangeListener final + : public ReplicaSetChangeNotifier::Listener, + public std::enable_shared_from_this { +public: + ShardingReplicaSetChangeListener(ServiceContext* serviceContext) + : _serviceContext(serviceContext) {} + ~ShardingReplicaSetChangeListener() final = default; + + void onFoundSet(const Key& key) noexcept final {} + + void onConfirmedSet(const State& state) noexcept final { + const auto& connStr = state.connStr; + const auto& setName = connStr.getSetName(); + + try { + LOGV2(471693, + "Updating the shard registry with confirmed replica set", + "connectionString"_attr = connStr); + Grid::get(_serviceContext) + ->shardRegistry() + ->updateReplSetHosts(connStr, + ShardRegistry::ConnectionStringUpdateType::kConfirmed); + } catch (const ExceptionForCat& e) { + LOGV2(471694, + "Unable to update the shard registry with confirmed replica set", + "error"_attr = e); + } + + bool updateInProgress = false; + { + stdx::lock_guard lock(_mutex); + if (!_hasUpdateState(lock, setName)) { + _updateStates.emplace(std::piecewise_construct, + std::forward_as_tuple(setName), + std::forward_as_tuple()); + } + auto& updateState = _updateStates.at(setName); + updateState.nextUpdateToSend = connStr; + updateInProgress = updateState.updateInProgress; + } + + if (!updateInProgress) { + _scheduleUpdateConfigServer(setName); + } + } + + void onPossibleSet(const State& state) noexcept final { + try { + Grid::get(_serviceContext) + ->shardRegistry() + ->updateReplSetHosts(state.connStr, + ShardRegistry::ConnectionStringUpdateType::kPossible); + } catch (const DBException& ex) { + LOGV2_DEBUG(22849, + 2, + "Unable to update sharding state with possible replica set due to {error}", + "Unable to update sharding state with possible replica set", + "error"_attr = ex); + } + } + + void onDroppedSet(const Key& key) noexcept final {} + +private: + // Schedules updates for replica set 'setName' on the config server. Loosly preserves ordering + // of update execution. Newer updates will not be overwritten by older updates in config.shards. + void _scheduleUpdateConfigServer(const std::string& setName) { + ConnectionString updatedConnectionString; + { + stdx::lock_guard lock(_mutex); + if (!_hasUpdateState(lock, setName)) { + return; + } + auto& updateState = _updateStates.at(setName); + if (updateState.updateInProgress) { + return; + } + updateState.updateInProgress = true; + updatedConnectionString = updateState.nextUpdateToSend.value(); + updateState.nextUpdateToSend = boost::none; + } + + auto executor = Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor(); + auto schedStatus = + executor + ->scheduleWork([self = shared_from_this(), + setName, + update = std::move(updatedConnectionString)](const auto& args) { + self->_updateConfigServer(args.status, setName, update); + }) + .getStatus(); + if (ErrorCodes::isCancellationError(schedStatus.code())) { + LOGV2_DEBUG(22848, + 2, + "Unable to schedule updating sharding state with confirmed replica set due" + " to {error}", + "Unable to schedule updating sharding state with confirmed replica set", + "error"_attr = schedStatus); + return; + } + uassertStatusOK(schedStatus); + } + + void _updateConfigServer(const Status& status, + const std::string& setName, + const ConnectionString& update) { + if (ErrorCodes::isCancellationError(status.code())) { + stdx::lock_guard lock(_mutex); + _updateStates.erase(setName); + return; + } + + if (MONGO_unlikely(failReplicaSetChangeConfigServerUpdateHook.shouldFail())) { + _endUpdateConfigServer(setName, update); + return; + } + + try { + LOGV2(22846, + "Updating sharding state with confirmed replica set", + "connectionString"_attr = update); + ShardRegistry::updateReplicaSetOnConfigServer(_serviceContext, update); + } catch (const ExceptionForCat& e) { + LOGV2(22847, + "Unable to update sharding state with confirmed replica set", + "error"_attr = e); + } catch (...) { + _endUpdateConfigServer(setName, update); + throw; + } + _endUpdateConfigServer(setName, update); + } + + void _endUpdateConfigServer(const std::string& setName, const ConnectionString& update) { + bool moreUpdates = false; + { + stdx::lock_guard lock(_mutex); + invariant(_hasUpdateState(lock, setName)); + auto& updateState = _updateStates.at(setName); + updateState.updateInProgress = false; + moreUpdates = (updateState.nextUpdateToSend != boost::none); + if (!moreUpdates) { + _updateStates.erase(setName); + } + } + if (moreUpdates) { + auto executor = Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor(); + executor->schedule([self = shared_from_this(), setName](const auto& _) { + self->_scheduleUpdateConfigServer(setName); + }); + } + } + + // Returns true if a ReplSetConfigUpdateState exists for replica set setName. + bool _hasUpdateState(WithLock, const std::string& setName) { + return (_updateStates.find(setName) != _updateStates.end()); + } + + ServiceContext* _serviceContext; + + mutable Mutex _mutex = MONGO_MAKE_LATCH("ShardingReplicaSetChangeListenerMongod::mutex"); + + struct ReplSetConfigUpdateState { + ReplSetConfigUpdateState() = default; + ReplSetConfigUpdateState(const ReplSetConfigUpdateState&) = delete; + ReplSetConfigUpdateState& operator=(const ReplSetConfigUpdateState&) = delete; + + // True when an update to the config.shards is in progress. + bool updateInProgress = false; + boost::optional nextUpdateToSend; + }; + stdx::unordered_map _updateStates; +}; + Status waitForSigningKeys(OperationContext* opCtx) { auto const shardRegistry = Grid::get(opCtx)->shardRegistry(); @@ -193,7 +407,6 @@ Status waitForSigningKeys(OperationContext* opCtx) { } } - /** * Abort all active transactions in the catalog that has not yet been committed. * @@ -230,6 +443,11 @@ void implicitlyAbortAllTransactions(OperationContext* opCtx) { }); auto newClient = opCtx->getServiceContext()->makeClient("ImplicitlyAbortTxnAtShutdown"); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*newClient.get()); + newClient.get()->setSystemOperationUnkillableByStepdown(lk); + } AlternativeClientRegion acr(newClient); Status shutDownStatus(ErrorCodes::InterruptedAtShutdown, @@ -265,8 +483,15 @@ void cleanupTask(const ShutdownTaskArgs& shutdownArgs) { { // This client initiation pattern is only to be used here, with plans to eliminate this // pattern down the line. - if (!haveClient()) + if (!haveClient()) { Client::initThread(getThreadName()); + + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + } Client& client = cc(); ServiceContext::UniqueOperationContext uniqueTxn; @@ -297,6 +522,12 @@ void cleanupTask(const ShutdownTaskArgs& shutdownArgs) { tl->shutdown(); } + if (audit::shutdownSynchronizeJob) { + audit::shutdownSynchronizeJob(); + } + + ClusterServerParameterRefresher::onShutdown(serviceContext); + try { // Abort transactions while we can still send remote commands. implicitlyAbortAllTransactions(opCtx); @@ -311,11 +542,8 @@ void cleanupTask(const ShutdownTaskArgs& shutdownArgs) { lsc->joinOnShutDown(); } - if (analyze_shard_key::isFeatureFlagEnabled()) { - LOGV2_OPTIONS( - 6973901, {LogComponent::kDefault}, "Shutting down the QueryAnalysisSampler"); - analyze_shard_key::QueryAnalysisSampler::get(serviceContext).onShutdown(); - } + LOGV2_OPTIONS(6973901, {LogComponent::kDefault}, "Shutting down the QueryAnalysisSampler"); + analyze_shard_key::QueryAnalysisSampler::get(serviceContext).onShutdown(); ReplicaSetMonitor::shutdown(); @@ -351,6 +579,7 @@ void cleanupTask(const ShutdownTaskArgs& shutdownArgs) { } if (auto pool = Grid::get(opCtx)->getExecutorPool()) { + LOGV2_OPTIONS(7698300, {LogComponent::kSharding}, "Shutting down the ExecutorPool"); pool->shutdownAndJoin(); } @@ -359,6 +588,13 @@ void cleanupTask(const ShutdownTaskArgs& shutdownArgs) { } if (Grid::get(serviceContext)->isShardingInitialized()) { + // The CatalogCache must be shuted down before shutting down the CatalogCacheLoader as + // the CatalogCache may try to schedule work on CatalogCacheLoader and fail. + LOGV2_OPTIONS(7698301, {LogComponent::kSharding}, "Shutting down the CatalogCache"); + Grid::get(serviceContext)->catalogCache()->shutDownAndJoin(); + + LOGV2_OPTIONS( + 7698302, {LogComponent::kSharding}, "Shutting down the CatalogCacheLoader"); CatalogCacheLoader::get(serviceContext).shutDown(); } @@ -386,7 +622,9 @@ void cleanupTask(const ShutdownTaskArgs& shutdownArgs) { #endif } -Status initializeSharding(OperationContext* opCtx) { +Status initializeSharding( + OperationContext* opCtx, + std::shared_ptr* replicaSetChangeListener) { auto targeterFactory = std::make_unique(); auto targeterFactoryPtr = targeterFactory.get(); @@ -455,6 +693,24 @@ Status initializeSharding(OperationContext* opCtx) { return status; } + *replicaSetChangeListener = + ReplicaSetMonitor::getNotifier().makeListener( + opCtx->getServiceContext()); + + // Reset the shard register config connection string in case it missed the replica set monitor + // notification. + auto configShardConnStr = + Grid::get(opCtx->getServiceContext())->shardRegistry()->getConfigServerConnectionString(); + if (configShardConnStr.type() == ConnectionString::ConnectionType::kReplicaSet) { + ConnectionString rsMonitorConfigConnStr( + ReplicaSetMonitor::get(configShardConnStr.getSetName())->getServerAddress(), + ConnectionString::ConnectionType::kReplicaSet); + Grid::get(opCtx->getServiceContext()) + ->shardRegistry() + ->updateReplSetHosts(rsMonitorConfigConnStr, + ShardRegistry::ConnectionStringUpdateType::kConfirmed); + } + status = loadGlobalSettingsFromConfigServer(opCtx, Grid::get(opCtx)->catalogClient()); if (!status.isOK()) { return status; @@ -494,183 +750,15 @@ MONGO_INITIALIZER_WITH_PREREQUISITES(WireSpec, ("EndStartupOptionHandling"))(Ini WireSpec::instance().initialize(std::move(spec)); } -class ShardingReplicaSetChangeListener final - : public ReplicaSetChangeNotifier::Listener, - public std::enable_shared_from_this { -public: - ShardingReplicaSetChangeListener(ServiceContext* serviceContext) - : _serviceContext(serviceContext) {} - ~ShardingReplicaSetChangeListener() final = default; - - void onFoundSet(const Key& key) noexcept final {} - - void onConfirmedSet(const State& state) noexcept final { - const auto& connStr = state.connStr; - const auto& setName = connStr.getSetName(); - - try { - LOGV2(471693, - "Updating the shard registry with confirmed replica set", - "connectionString"_attr = connStr); - Grid::get(_serviceContext) - ->shardRegistry() - ->updateReplSetHosts(connStr, - ShardRegistry::ConnectionStringUpdateType::kConfirmed); - } catch (const ExceptionForCat& e) { - LOGV2(471694, - "Unable to update the shard registry with confirmed replica set", - "error"_attr = e); - } - - bool updateInProgress = false; - { - stdx::lock_guard lock(_mutex); - if (!_hasUpdateState(lock, setName)) { - _updateStates.emplace(std::piecewise_construct, - std::forward_as_tuple(setName), - std::forward_as_tuple()); - } - auto& updateState = _updateStates.at(setName); - updateState.nextUpdateToSend = connStr; - updateInProgress = updateState.updateInProgress; - } - - if (!updateInProgress) { - _scheduleUpdateConfigServer(setName); - } - } - - void onPossibleSet(const State& state) noexcept final { - try { - Grid::get(_serviceContext) - ->shardRegistry() - ->updateReplSetHosts(state.connStr, - ShardRegistry::ConnectionStringUpdateType::kPossible); - } catch (const DBException& ex) { - LOGV2_DEBUG(22849, - 2, - "Unable to update sharding state with possible replica set due to {error}", - "Unable to update sharding state with possible replica set", - "error"_attr = ex); - } - } - - void onDroppedSet(const Key& key) noexcept final {} - -private: - // Schedules updates for replica set 'setName' on the config server. Loosly preserves ordering - // of update execution. Newer updates will not be overwritten by older updates in config.shards. - void _scheduleUpdateConfigServer(const std::string& setName) { - ConnectionString updatedConnectionString; - { - stdx::lock_guard lock(_mutex); - if (!_hasUpdateState(lock, setName)) { - return; - } - auto& updateState = _updateStates.at(setName); - if (updateState.updateInProgress) { - return; - } - updateState.updateInProgress = true; - updatedConnectionString = updateState.nextUpdateToSend.value(); - updateState.nextUpdateToSend = boost::none; - } - - auto executor = Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor(); - auto schedStatus = - executor - ->scheduleWork([self = shared_from_this(), - setName, - update = std::move(updatedConnectionString)](const auto& args) { - self->_updateConfigServer(args.status, setName, update); - }) - .getStatus(); - if (ErrorCodes::isCancellationError(schedStatus.code())) { - LOGV2_DEBUG(22848, - 2, - "Unable to schedule updating sharding state with confirmed replica set due" - " to {error}", - "Unable to schedule updating sharding state with confirmed replica set", - "error"_attr = schedStatus); - return; - } - uassertStatusOK(schedStatus); - } - - void _updateConfigServer(const Status& status, - const std::string& setName, - const ConnectionString& update) { - if (ErrorCodes::isCancellationError(status.code())) { - stdx::lock_guard lock(_mutex); - _updateStates.erase(setName); - return; - } - - if (MONGO_unlikely(failReplicaSetChangeConfigServerUpdateHook.shouldFail())) { - _endUpdateConfigServer(setName, update); - return; - } - - try { - LOGV2(22846, - "Updating sharding state with confirmed replica set", - "connectionString"_attr = update); - ShardRegistry::updateReplicaSetOnConfigServer(_serviceContext, update); - } catch (const ExceptionForCat& e) { - LOGV2(22847, - "Unable to update sharding state with confirmed replica set", - "error"_attr = e); - } catch (...) { - _endUpdateConfigServer(setName, update); - throw; - } - _endUpdateConfigServer(setName, update); - } - - void _endUpdateConfigServer(const std::string& setName, const ConnectionString& update) { - bool moreUpdates = false; - { - stdx::lock_guard lock(_mutex); - invariant(_hasUpdateState(lock, setName)); - auto& updateState = _updateStates.at(setName); - updateState.updateInProgress = false; - moreUpdates = (updateState.nextUpdateToSend != boost::none); - if (!moreUpdates) { - _updateStates.erase(setName); - } - } - if (moreUpdates) { - auto executor = Grid::get(_serviceContext)->getExecutorPool()->getFixedExecutor(); - executor->schedule([self = shared_from_this(), setName](const auto& _) { - self->_scheduleUpdateConfigServer(setName); - }); - } - } - - // Returns true if a ReplSetConfigUpdateState exists for replica set setName. - bool _hasUpdateState(WithLock, const std::string& setName) { - return (_updateStates.find(setName) != _updateStates.end()); - } - - ServiceContext* _serviceContext; - - mutable Mutex _mutex = MONGO_MAKE_LATCH("ShardingReplicaSetChangeListenerMongod::mutex"); - - struct ReplSetConfigUpdateState { - ReplSetConfigUpdateState() = default; - ReplSetConfigUpdateState(const ReplSetConfigUpdateState&) = delete; - ReplSetConfigUpdateState& operator=(const ReplSetConfigUpdateState&) = delete; - - // True when an update to the config.shards is in progress. - bool updateInProgress = false; - boost::optional nextUpdateToSend; - }; - stdx::unordered_map _updateStates; -}; - ExitCode runMongosServer(ServiceContext* serviceContext) { ThreadClient tc("mongosMain", serviceContext); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + logMongosVersionInfo(nullptr); // Set up the periodic runner for background job execution @@ -713,12 +801,6 @@ ExitCode runMongosServer(ServiceContext* serviceContext) { // Add sharding hooks to both connection pools - ShardingConnectionHook includes auth hooks globalConnPool.addHook(new ShardingConnectionHook(std::move(unshardedHookList))); - // Hook up a Listener for changes from the ReplicaSetMonitor - // This will last for the scope of this function. i.e. until shutdown finishes - auto shardingRSCL = - ReplicaSetMonitor::getNotifier().makeListener( - serviceContext); - // Mongos connection pools already takes care of authenticating new connections so the // replica set connection shouldn't need to. DBClientReplicaSet::setAuthPooledSecondaryConn(false); @@ -729,12 +811,16 @@ ExitCode runMongosServer(ServiceContext* serviceContext) { ReadWriteConcernDefaults::create(serviceContext, readWriteConcernDefaultsCacheLookupMongoS); ChangeStreamOptionsManager::create(serviceContext); + query_settings::QuerySettingsManager::create(serviceContext); auto opCtxHolder = tc->makeOperationContext(); auto const opCtx = opCtxHolder.get(); + // Keep listener alive until shutdown. + std::shared_ptr replicaSetChangeListener; + try { - uassertStatusOK(initializeSharding(opCtx)); + uassertStatusOK(initializeSharding(opCtx, &replicaSetChangeListener)); } catch (const DBException& ex) { if (ex.code() == ErrorCodes::CallbackCanceled) { invariant(globalInShutdownDeprecated()); @@ -788,9 +874,8 @@ ExitCode runMongosServer(ServiceContext* serviceContext) { clusterCursorCleanupJob.go(); UserCacheInvalidator::start(serviceContext, opCtx); - if (gFeatureFlagClusterWideConfigM2.isEnabled(serverGlobalParams.featureCompatibility)) { - ClusterServerParameterRefresher::start(serviceContext, opCtx); - } + + ClusterServerParameterRefresher::start(serviceContext, opCtx); if (audit::initializeSynchronizeJob) { audit::initializeSynchronizeJob(serviceContext); @@ -944,6 +1029,7 @@ ExitCode mongos_main(int argc, char* argv[]) { if (argc < 1) return ExitCode::badOptions; + waitForDebugger(); setupSignalHandlers(); @@ -959,10 +1045,7 @@ ExitCode mongos_main(int argc, char* argv[]) { } try { - auto serviceContextHolder = ServiceContext::make(); - serviceContextHolder->registerClientObserver( - std::make_unique()); - setGlobalServiceContext(std::move(serviceContextHolder)); + setGlobalServiceContext(ServiceContext::make()); } catch (...) { auto cause = exceptionToStatus(); LOGV2_FATAL_OPTIONS( @@ -997,6 +1080,8 @@ ExitCode mongos_main(int argc, char* argv[]) { logCommonStartupWarnings(serverGlobalParams); + setUpMultitenancyCheck(service, gMultitenancySupport); + try { if (!initialize_server_global_state::checkSocketPath()) return ExitCode::abrupt; diff --git a/src/mongo/s/mongos_options.cpp b/src/mongo/s/mongos_options.cpp index f0dcd5968776b..f1d6e1052a253 100644 --- a/src/mongo/s/mongos_options.cpp +++ b/src/mongo/s/mongos_options.cpp @@ -28,22 +28,27 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/mongos_options.h" - +// IWYU pragma: no_include "ext/alloc_traits.h" #include +#include #include #include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" -#include "mongo/bson/util/builder.h" -#include "mongo/config.h" -#include "mongo/db/server_options_base.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/server_options.h" #include "mongo/db/server_options_server_helpers.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_component_settings.h" +#include "mongo/logv2/log_manager.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/s/mongos_options.h" #include "mongo/s/version_mongos.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/net/socket_utils.h" #include "mongo/util/options_parser/startup_options.h" #include "mongo/util/str.h" @@ -117,13 +122,6 @@ Status storeMongosOptions(const moe::Environment& params) { return ret; } - if (params.count("net.port")) { - int port = params["net.port"].as(); - if (port <= 0 || port > 65535) { - return Status(ErrorCodes::BadValue, "error: port number must be between 1 and 65535"); - } - } - if (params.count("security.javascriptEnabled")) { mongosGlobalParams.scriptingEnabled = params["security.javascriptEnabled"].as(); } diff --git a/src/mongo/s/mongos_options.h b/src/mongo/s/mongos_options.h index 538f82bd965c5..b025707314600 100644 --- a/src/mongo/s/mongos_options.h +++ b/src/mongo/s/mongos_options.h @@ -29,12 +29,16 @@ #pragma once +#include +#include + #include "mongo/base/status.h" #include "mongo/client/connection_string.h" #include "mongo/db/server_options.h" #include "mongo/s/is_mongos.h" #include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/option_section.h" +#include "mongo/util/options_parser/value.h" namespace mongo { diff --git a/src/mongo/s/mongos_options_init.cpp b/src/mongo/s/mongos_options_init.cpp index 6a1a50693d780..3f60c3876ac57 100644 --- a/src/mongo/s/mongos_options_init.cpp +++ b/src/mongo/s/mongos_options_init.cpp @@ -27,17 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/mongos_options.h" - #include +#include +#include +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" #include "mongo/db/cluster_auth_mode_option_gen.h" #include "mongo/db/keyfile_option_gen.h" #include "mongo/db/server_options_base.h" #include "mongo/db/server_options_nongeneral_gen.h" +#include "mongo/s/mongos_options.h" +#include "mongo/util/assert_util.h" #include "mongo/util/exit_code.h" +#include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/startup_option_init.h" #include "mongo/util/options_parser/startup_options.h" #include "mongo/util/quick_exit.h" diff --git a/src/mongo/s/mongos_server_parameters.cpp b/src/mongo/s/mongos_server_parameters.cpp index bb31d272b5ffc..372ec7adc8fcb 100644 --- a/src/mongo/s/mongos_server_parameters.cpp +++ b/src/mongo/s/mongos_server_parameters.cpp @@ -29,7 +29,16 @@ #include "mongo/s/mongos_server_parameters.h" +#include + +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/tenant_id.h" #include "mongo/s/mongos_server_parameters_gen.h" #include "mongo/util/str.h" diff --git a/src/mongo/s/mongos_topology_coordinator.cpp b/src/mongo/s/mongos_topology_coordinator.cpp index c1f048e6221bc..a7b1d76cc097b 100644 --- a/src/mongo/s/mongos_topology_coordinator.cpp +++ b/src/mongo/s/mongos_topology_coordinator.cpp @@ -28,13 +28,39 @@ */ -#include "mongo/logv2/log.h" - +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" #include "mongo/db/client.h" #include "mongo/db/service_context.h" #include "mongo/db/shutdown_in_progress_quiesce_info.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" #include "mongo/s/mongos_topology_coordinator.h" +#include "mongo/transport/hello_metrics.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand diff --git a/src/mongo/s/mongos_topology_coordinator.h b/src/mongo/s/mongos_topology_coordinator.h index b01cd6ed23b41..f344b1973120b 100644 --- a/src/mongo/s/mongos_topology_coordinator.h +++ b/src/mongo/s/mongos_topology_coordinator.h @@ -29,13 +29,21 @@ #pragma once +#include +#include +#include +#include + #include "mongo/db/operation_context.h" #include "mongo/platform/mutex.h" #include "mongo/rpc/topology_version_gen.h" #include "mongo/s/mongos_hello_response.h" #include "mongo/transport/hello_metrics.h" #include "mongo/util/concurrency/with_lock.h" -#include +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/s/mongos_topology_coordinator_test.cpp b/src/mongo/s/mongos_topology_coordinator_test.cpp index 12488d9137995..02b6c4ce0e8dc 100644 --- a/src/mongo/s/mongos_topology_coordinator_test.cpp +++ b/src/mongo/s/mongos_topology_coordinator_test.cpp @@ -27,33 +27,41 @@ * it in the license file. */ - -#include "mongo/logv2/log.h" - +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/db/client.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/platform/basic.h" -#include "mongo/s/concurrency/locker_mongos_client_observer.h" +#include "mongo/platform/atomic_word.h" #include "mongo/s/mongos_topology_coordinator.h" +#include "mongo/stdx/thread.h" #include "mongo/transport/hello_metrics.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" #include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - -using std::unique_ptr; - namespace mongo { namespace { class MongosTopoCoordTest : public ServiceContextTest { public: - MongosTopoCoordTest() { - auto service = getServiceContext(); - service->registerClientObserver(std::make_unique()); - } + MongosTopoCoordTest() = default; virtual void setUp() { _topo = std::make_unique(); diff --git a/src/mongo/s/move_primary/move_primary_feature_flag.idl b/src/mongo/s/move_primary/move_primary_feature_flag.idl deleted file mode 100644 index 61099c5cbdd52..0000000000000 --- a/src/mongo/s/move_primary/move_primary_feature_flag.idl +++ /dev/null @@ -1,42 +0,0 @@ -# Copyright (C) 2023-present MongoDB, Inc. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the Server Side Public License, version 1, -# as published by MongoDB, Inc. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Server Side Public License for more details. -# -# You should have received a copy of the Server Side Public License -# along with this program. If not, see -# . -# -# As a special exception, the copyright holders give permission to link the -# code of portions of this program with the OpenSSL library under certain -# conditions as described in each individual source file and distribute -# linked combinations including the program with the OpenSSL library. You -# must comply with the Server Side Public License in all respects for -# all of the code used other than as permitted herein. If you modify file(s) -# with this exception, you may extend this exception to your version of the -# file(s), but you are not obligated to do so. If you do not wish to do so, -# delete this exception statement from your version. If you delete this -# exception statement from all source files in the program, then also delete -# it in the license file. -# - -# Feature flag for movePrimary. - -global: - cpp_namespace: "mongo::move_primary" - -imports: - - "mongo/db/basic_types.idl" - -feature_flags: - featureFlagOnlineMovePrimaryLifecycle: - description: When enabled, uses improved machinery for movePrimary operation. - cpp_varname: gFeatureFlagOnlineMovePrimaryLifecycle - default: true - version: 7.0 diff --git a/src/mongo/s/multi_statement_transaction_requests_sender.cpp b/src/mongo/s/multi_statement_transaction_requests_sender.cpp index 64426321ef526..7878a38d87a7d 100644 --- a/src/mongo/s/multi_statement_transaction_requests_sender.cpp +++ b/src/mongo/s/multi_statement_transaction_requests_sender.cpp @@ -27,13 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/s/multi_statement_transaction_requests_sender.h" +#include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/db/baton.h" #include "mongo/db/operation_context.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/s/multi_statement_transaction_requests_sender.h" #include "mongo/s/transaction_router.h" #include "mongo/s/transaction_router_resource_yielder.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/database_name_util.h" namespace mongo { @@ -80,16 +87,18 @@ MultiStatementTransactionRequestsSender::MultiStatementTransactionRequestsSender const DatabaseName& dbName, const std::vector& requests, const ReadPreferenceSetting& readPreference, - Shard::RetryPolicy retryPolicy) + Shard::RetryPolicy retryPolicy, + AsyncRequestsSender::ShardHostMap designatedHostsMap) : _opCtx(opCtx), _ars(std::make_unique( opCtx, std::move(executor), - dbName.db(), + DatabaseNameUtil::serialize(dbName), attachTxnDetails(opCtx, requests), readPreference, retryPolicy, - TransactionRouterResourceYielder::makeForRemoteCommand())) {} + TransactionRouterResourceYielder::makeForRemoteCommand(), + designatedHostsMap)) {} MultiStatementTransactionRequestsSender::~MultiStatementTransactionRequestsSender() { invariant(_opCtx); @@ -106,7 +115,7 @@ bool MultiStatementTransactionRequestsSender::done() { } AsyncRequestsSender::Response MultiStatementTransactionRequestsSender::next() { - const auto response = _ars->next(); + auto response = _ars->next(); processReplyMetadata(_opCtx, response); return response; } diff --git a/src/mongo/s/multi_statement_transaction_requests_sender.h b/src/mongo/s/multi_statement_transaction_requests_sender.h index 1508e254c7914..daefe6825a154 100644 --- a/src/mongo/s/multi_statement_transaction_requests_sender.h +++ b/src/mongo/s/multi_statement_transaction_requests_sender.h @@ -30,8 +30,14 @@ #pragma once #include +#include +#include "mongo/client/read_preference.h" +#include "mongo/db/database_name.h" +#include "mongo/db/operation_context.h" +#include "mongo/executor/task_executor.h" #include "mongo/s/async_requests_sender.h" +#include "mongo/s/client/shard.h" namespace mongo { @@ -53,7 +59,8 @@ class MultiStatementTransactionRequestsSender { const DatabaseName& dbName, const std::vector& requests, const ReadPreferenceSetting& readPreference, - Shard::RetryPolicy retryPolicy); + Shard::RetryPolicy retryPolicy, + AsyncRequestsSender::ShardHostMap designatedHostsMap = {}); ~MultiStatementTransactionRequestsSender(); diff --git a/src/mongo/s/query/SConscript b/src/mongo/s/query/SConscript index 1664b79209d09..dc37df7d0a16c 100644 --- a/src/mongo/s/query/SConscript +++ b/src/mongo/s/query/SConscript @@ -8,6 +8,7 @@ env.Library( target='cluster_query', source=[ 'cluster_client_cursor_impl.cpp', + 'cluster_client_cursor_params.cpp', 'cluster_find.cpp', 'cluster_query_knobs.idl', 'store_possible_cursor.cpp', @@ -41,6 +42,7 @@ env.Library( '$BUILD_DIR/mongo/db/pipeline/pipeline', '$BUILD_DIR/mongo/db/pipeline/process_interface/mongos_process_interface', '$BUILD_DIR/mongo/db/pipeline/sharded_agg_helpers', + '$BUILD_DIR/mongo/db/query/query_shape', '$BUILD_DIR/mongo/db/shared_request_handling', '$BUILD_DIR/mongo/db/views/view_catalog_helpers', '$BUILD_DIR/mongo/db/views/views', @@ -62,6 +64,7 @@ env.Library( 'router_stage_limit.cpp', 'router_stage_mock.cpp', 'router_stage_pipeline.cpp', + 'router_stage_queued_data.cpp', 'router_stage_remove_metadata_fields.cpp', 'router_stage_skip.cpp', ], @@ -104,14 +107,15 @@ env.Library( ) env.Library( - target="cluster_cursor_manager", + target='cluster_cursor_manager', source=[ - "cluster_cursor_manager.cpp", + 'cluster_cursor_manager.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/base', '$BUILD_DIR/mongo/db/auth/auth', '$BUILD_DIR/mongo/db/auth/authprivilege', + '$BUILD_DIR/mongo/db/commands', '$BUILD_DIR/mongo/db/generic_cursor', '$BUILD_DIR/mongo/db/query/op_metrics', '$BUILD_DIR/mongo/db/query/query_knobs', @@ -154,6 +158,7 @@ env.CppUnitTest( "$BUILD_DIR/mongo/db/auth/saslauth", "$BUILD_DIR/mongo/db/query/query_request", "$BUILD_DIR/mongo/db/query/query_test_service_context", + "$BUILD_DIR/mongo/db/service_context_non_d", "$BUILD_DIR/mongo/db/session/logical_session_id", "$BUILD_DIR/mongo/db/shared_request_handling", "$BUILD_DIR/mongo/executor/thread_pool_task_executor_test_fixture", diff --git a/src/mongo/s/query/async_results_merger.cpp b/src/mongo/s/query/async_results_merger.cpp index f27c9e154c882..456cf31714ed9 100644 --- a/src/mongo/s/query/async_results_merger.cpp +++ b/src/mongo/s/query/async_results_merger.cpp @@ -28,22 +28,33 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/query/async_results_merger.h" - +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/simple_bsonobj_comparator.h" -#include "mongo/client/remote_command_targeter.h" #include "mongo/db/pipeline/change_stream_constants.h" #include "mongo/db/pipeline/change_stream_invalidation_info.h" #include "mongo/db/query/cursor_response.h" #include "mongo/db/query/getmore_command_gen.h" #include "mongo/db/query/kill_cursors_gen.h" -#include "mongo/db/query/query_feature_flags_gen.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/executor/remote_command_request.h" -#include "mongo/executor/remote_command_response.h" -#include "mongo/s/catalog/type_shard.h" +#include "mongo/rpc/metadata.h" +#include "mongo/s/query/async_results_merger.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -458,12 +469,14 @@ Status AsyncResultsMerger::_askForNextBatch(WithLock, size_t remoteIndex) { if (_params.getSessionId()) { BSONObjBuilder newCmdBob(std::move(cmdObj)); - BSONObjBuilder lsidBob(newCmdBob.subobjStart(OperationSessionInfo::kSessionIdFieldName)); + BSONObjBuilder lsidBob( + newCmdBob.subobjStart(OperationSessionInfoFromClient::kSessionIdFieldName)); _params.getSessionId()->serialize(&lsidBob); lsidBob.doneFast(); if (_params.getTxnNumber()) { - newCmdBob.append(OperationSessionInfo::kTxnNumberFieldName, *_params.getTxnNumber()); + newCmdBob.append(OperationSessionInfoFromClient::kTxnNumberFieldName, + *_params.getTxnNumber()); } if (_params.getAutocommit()) { @@ -502,7 +515,7 @@ Status AsyncResultsMerger::_scheduleGetMores(WithLock lk) { // Reveal opCtx errors (such as MaxTimeMSExpired) and reflect them in the remote status. invariant(_opCtx, "Cannot schedule a getMore without an OperationContext"); - const auto interruptStatus = _opCtx->checkForInterruptNoAssert(); + auto interruptStatus = _opCtx->checkForInterruptNoAssert(); if (!interruptStatus.isOK()) { for (size_t i = 0; i < _remotes.size(); ++i) { if (!_remotes[i].exhausted()) { diff --git a/src/mongo/s/query/async_results_merger.h b/src/mongo/s/query/async_results_merger.h index a96785822a2b0..9b68ad6fdb00b 100644 --- a/src/mongo/s/query/async_results_merger.h +++ b/src/mongo/s/query/async_results_merger.h @@ -29,19 +29,34 @@ #pragma once +#include #include +#include +#include +#include +#include #include +#include +#include #include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/db/cursor_id.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/tailable_mode_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/s/query/async_results_merger_params_gen.h" #include "mongo/s/query/cluster_query_result.h" #include "mongo/stdx/future.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/s/query/async_results_merger_params.idl b/src/mongo/s/query/async_results_merger_params.idl index 42074b7b62eee..e6d5db960abbf 100644 --- a/src/mongo/s/query/async_results_merger_params.idl +++ b/src/mongo/s/query/async_results_merger_params.idl @@ -51,51 +51,66 @@ types: structs: RemoteCursor: description: A description of a cursor opened on a remote server. + query_shape_component: true fields: shardId: type: string description: The shardId of the shard on which the cursor resides. + query_shape: anonymize hostAndPort: type: HostAndPort description: The exact host (within the shard) on which the cursor resides. + query_shape: anonymize cursorResponse: type: CursorResponse + query_shape: literal description: The response after establishing a cursor on the remote shard, including the first batch. AsyncResultsMergerParams: description: The parameters needed to establish an AsyncResultsMerger. chained_structs: - OperationSessionInfoFromClient : OperationSessionInfo + OperationSessionInfoFromClientBase : OperationSessionInfo + query_shape_component: true fields: sort: type: object description: The sort requested on the merging operation. Empty if there is no sort. optional: true + query_shape: literal compareWholeSortKey: type: bool default: false + query_shape: literal description: >- When 'compareWholeSortKey' is true, $sortKey is a scalar value, rather than an object. We extract the sort key {$sortKey: }. The sort key pattern is verified to be {$sortKey: 1}. - remotes: array + remotes: + type: array + query_shape: literal tailableMode: type: TailableMode optional: true description: If set, the tailability mode of this cursor. + query_shape: parameter batchSize: type: safeInt64 optional: true description: The batch size for this cursor. - nss: namespacestring + query_shape: literal + nss: + type: namespacestring + query_shape: custom allowPartialResults: type: bool default: false description: If set, error responses are ignored. + query_shape: parameter recordRemoteOpWaitTime: type: bool default: false + query_shape: parameter description: >- This parameter is not used anymore but should stay for a while for backward compatibility. diff --git a/src/mongo/s/query/async_results_merger_test.cpp b/src/mongo/s/query/async_results_merger_test.cpp index f92195f292594..8585024fb6335 100644 --- a/src/mongo/s/query/async_results_merger_test.cpp +++ b/src/mongo/s/query/async_results_merger_test.cpp @@ -27,26 +27,51 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/query/async_results_merger.h" - +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include #include - -#include "mongo/db/json.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/client.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/change_stream_constants.h" #include "mongo/db/pipeline/resume_token.h" #include "mongo/db/query/cursor_response.h" #include "mongo/db/query/getmore_command_gen.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/client/shard_registry.h" +#include "mongo/s/query/async_results_merger.h" #include "mongo/s/query/results_merger_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { - namespace { LogicalSessionId parseSessionIdFromCmd(BSONObj cmdObj) { @@ -2119,20 +2144,6 @@ TEST_F(AsyncResultsMergerTest, GetMoresShouldIncludeLSIDAndTxnNumIfSpecified) { }); } -DEATH_TEST_REGEX_F(AsyncResultsMergerTest, - ConstructingARMWithTxnNumAndNoLSIDShouldCrash, - R"#(Invariant failure.*params.getSessionId\(\))#") { - AsyncResultsMergerParams params; - - OperationSessionInfoFromClient sessionInfo; - sessionInfo.setTxnNumber(5); - params.setOperationSessionInfo(sessionInfo); - - // This should trigger an invariant. - ASSERT_FALSE( - std::make_unique(operationContext(), executor(), std::move(params))); -} - DEATH_TEST_F(AsyncResultsMergerTest, ShouldFailIfAskedToPerformGetMoresWithoutAnOpCtx, "Cannot schedule a getMore without an OperationContext") { diff --git a/src/mongo/s/query/blocking_results_merger.cpp b/src/mongo/s/query/blocking_results_merger.cpp index eccbf67b8c804..52bb278ed1aa9 100644 --- a/src/mongo/s/query/blocking_results_merger.cpp +++ b/src/mongo/s/query/blocking_results_merger.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/db/curop.h" #include "mongo/db/query/find_common.h" -#include "mongo/db/session/session_catalog_mongod.h" -#include "mongo/db/transaction/transaction_participant.h" #include "mongo/s/query/blocking_results_merger.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/scopeguard.h" namespace mongo { diff --git a/src/mongo/s/query/blocking_results_merger.h b/src/mongo/s/query/blocking_results_merger.h index c05cecc5da8a8..bbdfbf9acaa99 100644 --- a/src/mongo/s/query/blocking_results_merger.h +++ b/src/mongo/s/query/blocking_results_merger.h @@ -29,10 +29,25 @@ #pragma once +#include +#include #include - +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/tailable_mode_gen.h" +#include "mongo/db/resource_yielder.h" +#include "mongo/executor/task_executor.h" #include "mongo/s/query/async_results_merger.h" +#include "mongo/s/query/async_results_merger_params_gen.h" +#include "mongo/s/query/cluster_query_result.h" #include "mongo/s/query/router_exec_stage.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/s/query/blocking_results_merger_test.cpp b/src/mongo/s/query/blocking_results_merger_test.cpp index 5871d49d4ac2b..047834e648bf2 100644 --- a/src/mongo/s/query/blocking_results_merger_test.cpp +++ b/src/mongo/s/query/blocking_results_merger_test.cpp @@ -27,12 +27,35 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/query/cursor_response.h" #include "mongo/db/query/find_common.h" +#include "mongo/db/service_context.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/platform/mutex.h" #include "mongo/s/query/blocking_results_merger.h" #include "mongo/s/query/results_merger_test_fixture.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/clock_source_mock.h" +#include "mongo/util/decorable.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/s/query/cluster_aggregate.cpp b/src/mongo/s/query/cluster_aggregate.cpp index 02a41e64b1711..89ff8b3be0bfe 100644 --- a/src/mongo/s/query/cluster_aggregate.cpp +++ b/src/mongo/s/query/cluster_aggregate.cpp @@ -28,63 +28,77 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/query/cluster_aggregate.h" - -#include - -#include "mongo/db/api_parameters.h" -#include "mongo/db/auth/authorization_session.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/basic_types_gen.h" #include "mongo/db/catalog/collection_uuid_mismatch_info.h" #include "mongo/db/catalog_shard_feature_flag_gen.h" -#include "mongo/db/client.h" -#include "mongo/db/commands.h" #include "mongo/db/curop.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/fle_crud.h" #include "mongo/db/operation_context.h" -#include "mongo/db/pipeline/document_source_change_stream.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" #include "mongo/db/pipeline/document_source_internal_unpack_bucket.h" -#include "mongo/db/pipeline/document_source_out.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" #include "mongo/db/pipeline/pipeline.h" #include "mongo/db/pipeline/process_interface/mongos_process_interface.h" #include "mongo/db/pipeline/sharded_agg_helpers.h" #include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/collation/collator_interface.h" #include "mongo/db/query/explain_common.h" -#include "mongo/db/query/find_common.h" -#include "mongo/db/query/fle/server_rewrite.h" -#include "mongo/db/query/telemetry.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/query/query_stats.h" +#include "mongo/db/query/query_stats_aggregate_key_generator.h" +#include "mongo/db/query/query_stats_key_generator.h" +#include "mongo/db/query/tailable_mode_gen.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" #include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_options.h" #include "mongo/db/views/resolved_view.h" -#include "mongo/db/views/view.h" -#include "mongo/db/write_concern_options.h" #include "mongo/executor/task_executor_pool.h" -#include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/rpc/op_msg_rpc_impls.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/client/num_hosts_targeted_metrics.h" -#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" -#include "mongo/s/multi_statement_transaction_requests_sender.h" +#include "mongo/s/query/cluster_aggregate.h" #include "mongo/s/query/cluster_aggregation_planner.h" -#include "mongo/s/query/cluster_client_cursor_impl.h" -#include "mongo/s/query/cluster_client_cursor_params.h" -#include "mongo/s/query/cluster_cursor_manager.h" -#include "mongo/s/query/cluster_query_knobs_gen.h" -#include "mongo/s/query/document_source_merge_cursors.h" -#include "mongo/s/query/establish_cursors.h" -#include "mongo/s/query/owned_remote_cursor.h" -#include "mongo/s/query/router_stage_pipeline.h" -#include "mongo/s/query/store_possible_cursor.h" -#include "mongo/s/stale_exception.h" #include "mongo/s/transaction_router.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/net/socket_utils.h" +#include "mongo/util/str.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kCommand @@ -100,7 +114,7 @@ namespace { // definition. It's okay that this is incorrect, we will repopulate the real namespace map on the // mongod. Note that this function must be called before forwarding an aggregation command on an // unsharded collection, in order to verify that the involved namespaces are allowed to be sharded. -auto resolveInvolvedNamespaces(stdx::unordered_set involvedNamespaces) { +auto resolveInvolvedNamespaces(const stdx::unordered_set& involvedNamespaces) { StringMap resolvedNamespaces; for (auto&& nss : involvedNamespaces) { resolvedNamespaces.try_emplace(nss.coll(), nss, std::vector{}); @@ -139,16 +153,6 @@ boost::intrusive_ptr makeExpressionContext( mergeCtx->inMongos = true; - // If the request explicity specified NOT to use v2 resume tokens for change streams, set this - // on the expCtx. We only ever expect to see an explicit value during testing. - if (request.getGenerateV2ResumeTokens().has_value()) { - // If $_generateV2ResumeTokens was specified, we must be testing and it must be false. - uassert(6528201, - "Invalid request for v2 resume tokens", - getTestCommandsEnabled() && !request.getGenerateV2ResumeTokens()); - mergeCtx->changeStreamTokenVersion = 1; - } - // Serialize the 'AggregateCommandRequest' and save it so that the original command can be // reconstructed for dispatch to a new shard, which is sometimes necessary for change streams // pipelines. @@ -278,6 +282,58 @@ std::vector rebuildPipelineWithTimeSeriesGranularity( return newPipeline; } +/** + * Builds an expCtx with which to parse the request's pipeline, then parses the pipeline and + * registers the pre-optimized pipeline with query stats collection. + */ +std::unique_ptr parsePipelineAndRegisterQueryStats( + OperationContext* opCtx, + const stdx::unordered_set& involvedNamespaces, + const NamespaceString& executionNss, + AggregateCommandRequest& request, + boost::optional cri, + bool hasChangeStream, + bool shouldDoFLERewrite) { + // Populate the collection UUID and the appropriate collation to use. + auto [collationObj, uuid] = [&]() -> std::pair> { + // If this is a change stream, take the user-defined collation if one exists, or an + // empty BSONObj otherwise. Change streams never inherit the collection's default + // collation, and since collectionless aggregations generally run on the 'admin' + // database, the standard logic would attempt to resolve its non-existent UUID and + // collation by sending a specious 'listCollections' command to the config servers. + if (hasChangeStream) { + return {request.getCollation().value_or(BSONObj()), boost::none}; + } + + return cluster_aggregation_planner::getCollationAndUUID( + opCtx, + cri ? boost::make_optional(cri->cm) : boost::none, + executionNss, + request.getCollation().value_or(BSONObj())); + }(); + + // Build an ExpressionContext for the pipeline. This instantiates an appropriate collator, + // includes all involved namespaces, and creates a shared MongoProcessInterface for use by + // the pipeline's stages. + boost::intrusive_ptr expCtx = + makeExpressionContext(opCtx, + request, + collationObj, + uuid, + resolveInvolvedNamespaces(involvedNamespaces), + hasChangeStream); + + auto pipeline = Pipeline::parse(request.getPipeline(), expCtx); + // Skip query stats recording for queryable encryption queries. + if (!shouldDoFLERewrite) { + query_stats::registerRequest(opCtx, executionNss, [&]() { + return std::make_unique( + request, *pipeline, expCtx, involvedNamespaces, executionNss); + }); + } + return pipeline; +} + } // namespace Status ClusterAggregate::runAggregate(OperationContext* opCtx, @@ -319,6 +375,9 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx, << AggregateCommandRequest::kFromMongosFieldName << "] cannot be set to 'true' when sent to mongos", !request.getNeedsMerge() && !request.getFromMongos()); + uassert(ErrorCodes::BadValue, + "Aggregate queries on mongoS may not request or provide a resume token", + !request.getRequestResumeToken() && !request.getResumeAfter()); const auto isSharded = [](OperationContext* opCtx, const NamespaceString& nss) { const auto [resolvedNsCM, _] = @@ -333,10 +392,6 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx, auto shouldDoFLERewrite = ::mongo::shouldDoFLERewrite(request); auto startsWithDocuments = liteParsedPipeline.startsWithDocuments(); - if (!shouldDoFLERewrite) { - telemetry::registerAggRequest(request, opCtx); - } - // If the routing table is not already taken by the higher level, fill it now. if (!cri) { // If the routing table is valid, we obtain a reference to it. If the table is not valid, @@ -376,36 +431,14 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx, boost::intrusive_ptr expCtx; const auto pipelineBuilder = [&]() { - // Populate the collection UUID and the appropriate collation to use. - auto [collationObj, uuid] = [&]() -> std::pair> { - // If this is a change stream, take the user-defined collation if one exists, or an - // empty BSONObj otherwise. Change streams never inherit the collection's default - // collation, and since collectionless aggregations generally run on the 'admin' - // database, the standard logic would attempt to resolve its non-existent UUID and - // collation by sending a specious 'listCollections' command to the config servers. - if (hasChangeStream) { - return {request.getCollation().value_or(BSONObj()), boost::none}; - } - - return cluster_aggregation_planner::getCollationAndUUID( - opCtx, - cri ? boost::make_optional(cri->cm) : boost::none, - namespaces.executionNss, - request.getCollation().value_or(BSONObj())); - }(); - - // Build an ExpressionContext for the pipeline. This instantiates an appropriate collator, - // resolves all involved namespaces, and creates a shared MongoProcessInterface for use by - // the pipeline's stages. - expCtx = makeExpressionContext(opCtx, - request, - collationObj, - uuid, - resolveInvolvedNamespaces(involvedNamespaces), - hasChangeStream); - - // Parse and optimize the full pipeline. - auto pipeline = Pipeline::parse(request.getPipeline(), expCtx); + auto pipeline = parsePipelineAndRegisterQueryStats(opCtx, + involvedNamespaces, + namespaces.executionNss, + request, + cri, + hasChangeStream, + shouldDoFLERewrite); + expCtx = pipeline->getContext(); // If the aggregate command supports encrypted collections, do rewrites of the pipeline to // support querying against encrypted fields. @@ -454,15 +487,30 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx, cluster_aggregation_planner::AggregationTargeter::TargetingPolicy::kMongosRequired); if (!expCtx) { - // When the AggregationTargeter chooses a "passthrough" policy, it does not call the - // 'pipelineBuilder' function, so we never get an expression context. Because this is a - // passthrough, we only need a bare minimum expression context anyway. + // When the AggregationTargeter chooses a "passthrough" or "specific shard only" + // policy, it does not call the 'pipelineBuilder' function, so we've yet to construct an + // expression context or register query stats. Because this is a passthrough, we only need a + // bare minimum expression context on mongos. invariant(targeter.policy == cluster_aggregation_planner::AggregationTargeter::kPassthrough || targeter.policy == cluster_aggregation_planner::AggregationTargeter::kSpecificShardOnly); + expCtx = make_intrusive( opCtx, nullptr, namespaces.executionNss, boost::none, request.getLet()); + expCtx->addResolvedNamespaces(involvedNamespaces); + + // Skip query stats recording for queryable encryption queries. + if (!shouldDoFLERewrite) { + // We want to hold off parsing the pipeline until it's clear we must. Because of that, + // we wait to parse the pipeline until this callback is invoked within + // query_stats::registerRequest. + query_stats::registerRequest(opCtx, namespaces.executionNss, [&]() { + auto pipeline = Pipeline::parse(request.getPipeline(), expCtx); + return std::make_unique( + request, *pipeline, expCtx, involvedNamespaces, namespaces.executionNss); + }); + } } if (request.getExplain()) { @@ -541,11 +589,7 @@ Status ClusterAggregate::runAggregate(OperationContext* opCtx, "passthrough from mongos", nonChangeStreamLite.allowedToPassthroughFromMongos()); ShardId shardId(std::string(request.getPassthroughToShard()->getShard())); - // (Ignore FCV check): This is in mongos so we expect to ignore FCV. - uassert(6273803, - "$_passthroughToShard not supported for queries against config replica set", - shardId != ShardId::kConfigServerId || - gFeatureFlagCatalogShard.isEnabledAndIgnoreFCVUnsafe()); + // This is an aggregation pipeline started internally, so it is not eligible for // sampling. const bool eligibleForSampling = false; diff --git a/src/mongo/s/query/cluster_aggregate.h b/src/mongo/s/query/cluster_aggregate.h index d9e122070401e..b74b4ebf96e82 100644 --- a/src/mongo/s/query/cluster_aggregate.h +++ b/src/mongo/s/query/cluster_aggregate.h @@ -29,9 +29,15 @@ #pragma once +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/privilege.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/lite_parsed_pipeline.h" diff --git a/src/mongo/s/query/cluster_aggregation_planner.cpp b/src/mongo/s/query/cluster_aggregation_planner.cpp index e10721b2915a4..b5d2361b7855d 100644 --- a/src/mongo/s/query/cluster_aggregation_planner.cpp +++ b/src/mongo/s/query/cluster_aggregation_planner.cpp @@ -29,36 +29,101 @@ #include "mongo/s/query/cluster_aggregation_planner.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/client/connpool.h" +#include "mongo/client/dbclient_base.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/catalog_shard_feature_flag_gen.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/client.h" +#include "mongo/db/commands.h" +#include "mongo/db/curop.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/pipeline/aggregate_command_gen.h" #include "mongo/db/pipeline/change_stream_constants.h" #include "mongo/db/pipeline/change_stream_invalidation_info.h" +#include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/document_source_skip.h" #include "mongo/db/pipeline/sharded_agg_helpers.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/collation/collation_spec.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/cursor_response.h" #include "mongo/db/query/find_common.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/shard_id.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/random.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/op_msg_rpc_impls.h" +#include "mongo/s/analyze_shard_key_common_gen.h" +#include "mongo/s/async_requests_sender.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" +#include "mongo/s/index_version.h" #include "mongo/s/multi_statement_transaction_requests_sender.h" +#include "mongo/s/query/async_results_merger_params_gen.h" +#include "mongo/s/query/cluster_client_cursor.h" +#include "mongo/s/query/cluster_client_cursor_impl.h" #include "mongo/s/query/cluster_cursor_manager.h" #include "mongo/s/query/cluster_query_knobs_gen.h" +#include "mongo/s/query/cluster_query_result.h" #include "mongo/s/query/document_source_merge_cursors.h" +#include "mongo/s/query/establish_cursors.h" +#include "mongo/s/query/owned_remote_cursor.h" +#include "mongo/s/query/router_exec_stage.h" #include "mongo/s/query/router_stage_limit.h" #include "mongo/s/query/router_stage_pipeline.h" #include "mongo/s/query/router_stage_remove_metadata_fields.h" #include "mongo/s/query/router_stage_skip.h" #include "mongo/s/query/store_possible_cursor.h" #include "mongo/s/query_analysis_sampler_util.h" -#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/s/sharding_index_catalog_cache.h" #include "mongo/s/transaction_router.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -104,7 +169,7 @@ AsyncRequestsSender::Response establishMergingShardCursor(OperationContext* opCt {{mergingShardId, mergeCmdObj}}, ReadPreferenceSetting::get(opCtx), sharded_agg_helpers::getDesiredRetryPolicy(opCtx)); - const auto response = ars.next(); + auto response = ars.next(); tassert(6273807, "requested and received data from just one shard, but results are still pending", ars.done()); @@ -266,11 +331,21 @@ BSONObj establishMergingMongosCursor(OperationContext* opCtx, const NamespaceString& requestedNss, std::unique_ptr pipelineForMerging, const PrivilegeVector& privileges) { - ClusterClientCursorParams params(requestedNss, APIParameters::get(opCtx), ReadPreferenceSetting::get(opCtx), - repl::ReadConcernArgs::get(opCtx)); + repl::ReadConcernArgs::get(opCtx), + [&] { + if (!opCtx->getLogicalSessionId()) + return OperationSessionInfoFromClient(); + + OperationSessionInfoFromClient osi{ + *opCtx->getLogicalSessionId(), opCtx->getTxnNumber()}; + if (TransactionRouter::get(opCtx)) { + osi.setAutocommit(false); + } + return osi; + }()); params.originatingCommandObj = CurOp::get(opCtx)->opDescription().getOwned(); params.tailableMode = pipelineForMerging->getContext()->tailableMode; @@ -278,14 +353,8 @@ BSONObj establishMergingMongosCursor(OperationContext* opCtx, // size we pass here is used for getMores, so do not specify a batch size if the initial request // had a batch size of 0. params.batchSize = batchSize == 0 ? boost::none : boost::make_optional(batchSize); - params.lsid = opCtx->getLogicalSessionId(); - params.txnNumber = opCtx->getTxnNumber(); params.originatingPrivileges = privileges; - if (TransactionRouter::get(opCtx)) { - params.isAutoCommit = false; - } - auto ccc = cluster_aggregation_planner::buildClusterCursor( opCtx, std::move(pipelineForMerging), std::move(params)); @@ -360,15 +429,16 @@ BSONObj establishMergingMongosCursor(OperationContext* opCtx, int nShards = ccc->getNumRemotes(); auto&& opDebug = CurOp::get(opCtx)->debug(); - // Fill out the aggregation metrics in CurOp, and record telemetry metrics, before detaching the - // cursor from its opCtx. + // Fill out the aggregation metrics in CurOp, and record queryStats metrics, before detaching + // the cursor from its opCtx. opDebug.nShards = std::max(opDebug.nShards, nShards); opDebug.cursorExhausted = exhausted; opDebug.additiveMetrics.nBatches = 1; + CurOp::get(opCtx)->setEndOfOpMetrics(responseBuilder.numDocs()); if (exhausted) { - collectTelemetryMongos(opCtx, ccc->getOriginatingCommand(), responseBuilder.numDocs()); + collectQueryStatsMongos(opCtx, ccc->getKeyGenerator()); } else { - collectTelemetryMongos(opCtx, ccc, responseBuilder.numDocs()); + collectQueryStatsMongos(opCtx, ccc); } ccc->detachFromOperationContext(); @@ -569,7 +639,7 @@ bool isMergeSkipOrLimit(const boost::intrusive_ptr& stage) { } bool isAllLimitsAndSkips(Pipeline* pipeline) { - const auto stages = pipeline->getSources(); + const auto& stages = pipeline->getSources(); return std::all_of( stages.begin(), stages.end(), [](const auto& stage) { return isMergeSkipOrLimit(stage); }); } diff --git a/src/mongo/s/query/cluster_aggregation_planner.h b/src/mongo/s/query/cluster_aggregation_planner.h index 22a4ec0e82ba5..4f32e1aa9884b 100644 --- a/src/mongo/s/query/cluster_aggregation_planner.h +++ b/src/mongo/s/query/cluster_aggregation_planner.h @@ -29,14 +29,33 @@ #pragma once +#include +#include +#include +#include #include +#include +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/query/explain_options.h" +#include "mongo/db/shard_id.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/database_version.h" #include "mongo/s/query/cluster_aggregate.h" #include "mongo/s/query/cluster_client_cursor_guard.h" #include "mongo/s/query/cluster_client_cursor_impl.h" #include "mongo/s/query/cluster_client_cursor_params.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/uuid.h" namespace mongo { namespace cluster_aggregation_planner { diff --git a/src/mongo/s/query/cluster_client_cursor.h b/src/mongo/s/query/cluster_client_cursor.h index c92dab456a324..15e26aa2881df 100644 --- a/src/mongo/s/query/cluster_client_cursor.h +++ b/src/mongo/s/query/cluster_client_cursor.h @@ -157,6 +157,13 @@ class ClusterClientCursor { */ virtual bool remotesExhausted() = 0; + /** + * Returns whether or not the cursor has been killed. Repeated calls to kill() can occur in + * ~ClusterClientCursorGuard() if the cursor was killed while the cursor was checked out or in + * use with the guard. + */ + virtual bool hasBeenKilled() = 0; + /** * Sets the maxTimeMS value that the cursor should forward with any internally issued getMore * requests. @@ -227,6 +234,9 @@ class ClusterClientCursor { void incrementCursorMetrics(OpDebug::AdditiveMetrics newMetrics) { _metrics.add(newMetrics); + if (!_firstResponseExecutionTime) { + _firstResponseExecutionTime = _metrics.executionTime; + } } // @@ -259,11 +269,20 @@ class ClusterClientCursor { */ virtual bool shouldOmitDiagnosticInformation() const = 0; + /** + * Returns and releases ownership of the KeyGenerator associated with the request this + * cursor is handling. + */ + virtual std::unique_ptr getKeyGenerator() = 0; + protected: // Metrics that are accumulated over the lifetime of the cursor, incremented with each getMore. - // Useful for diagnostics like telemetry. + // Useful for diagnostics like queryStats. OpDebug::AdditiveMetrics _metrics; + // The execution time collected from the initial operation prior to any getMore requests. + boost::optional _firstResponseExecutionTime; + private: // Unused maxTime budget for this cursor. Microseconds _leftoverMaxTimeMicros = Microseconds::max(); diff --git a/src/mongo/s/query/cluster_client_cursor_guard.h b/src/mongo/s/query/cluster_client_cursor_guard.h index 4a135cb716a34..84036717e6f81 100644 --- a/src/mongo/s/query/cluster_client_cursor_guard.h +++ b/src/mongo/s/query/cluster_client_cursor_guard.h @@ -53,9 +53,12 @@ class ClusterClientCursorGuard final { * necessary. May block waiting for remote cursor cleanup. * * If no cursor is owned, does nothing. + * + * If the cursor has been killed by a previous command, does nothing. hasBeenKilled() will be + * true if the cursor was killed while the cursor was checked out or in use with a Guard. */ ~ClusterClientCursorGuard() { - if (_ccc && !_ccc->remotesExhausted()) { + if (_ccc && !_ccc->remotesExhausted() && !_ccc->hasBeenKilled()) { _ccc->kill(_opCtx); } } diff --git a/src/mongo/s/query/cluster_client_cursor_impl.cpp b/src/mongo/s/query/cluster_client_cursor_impl.cpp index 40e3df899b2a5..ec42cc85069a9 100644 --- a/src/mongo/s/query/cluster_client_cursor_impl.cpp +++ b/src/mongo/s/query/cluster_client_cursor_impl.cpp @@ -29,15 +29,28 @@ #include "mongo/s/query/cluster_client_cursor_impl.h" +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/commands/server_status_metric.h" #include "mongo/db/curop.h" -#include "mongo/db/query/telemetry.h" -#include "mongo/logv2/log.h" +#include "mongo/db/query/query_stats.h" +#include "mongo/db/query/tailable_mode_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/s/query/async_results_merger.h" #include "mongo/s/query/router_stage_limit.h" #include "mongo/s/query/router_stage_merge.h" #include "mongo/s/query/router_stage_remove_metadata_fields.h" #include "mongo/s/query/router_stage_skip.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/string_map.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -75,7 +88,8 @@ ClusterClientCursorImpl::ClusterClientCursorImpl(OperationContext* opCtx, _lastUseDate(_createdDate), _queryHash(CurOp::get(opCtx)->debug().queryHash), _shouldOmitDiagnosticInformation(CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation), - _telemetryStoreKey(CurOp::get(opCtx)->debug().telemetryStoreKey) { + _queryStatsStoreKeyHash(CurOp::get(opCtx)->debug().queryStatsStoreKeyHash), + _queryStatsKeyGenerator(std::move(CurOp::get(opCtx)->debug().queryStatsKeyGenerator)) { dassert(!_params.compareWholeSortKeyOnRouter || SimpleBSONObjComparator::kInstance.evaluate( _params.sortToApplyOnRouter == AsyncResultsMerger::kWholeSortKeySortPattern)); @@ -93,7 +107,9 @@ ClusterClientCursorImpl::ClusterClientCursorImpl(OperationContext* opCtx, _createdDate(opCtx->getServiceContext()->getPreciseClockSource()->now()), _lastUseDate(_createdDate), _queryHash(CurOp::get(opCtx)->debug().queryHash), - _shouldOmitDiagnosticInformation(CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation) { + _shouldOmitDiagnosticInformation(CurOp::get(opCtx)->debug().shouldOmitDiagnosticInformation), + _queryStatsStoreKeyHash(CurOp::get(opCtx)->debug().queryStatsStoreKeyHash), + _queryStatsKeyGenerator(std::move(CurOp::get(opCtx)->debug().queryStatsKeyGenerator)) { dassert(!_params.compareWholeSortKeyOnRouter || SimpleBSONObjComparator::kInstance.evaluate( _params.sortToApplyOnRouter == AsyncResultsMerger::kWholeSortKeySortPattern)); @@ -131,20 +147,17 @@ StatusWith ClusterClientCursorImpl::next() { } void ClusterClientCursorImpl::kill(OperationContext* opCtx) { - if (_hasBeenKilled) { - LOGV2_DEBUG(7372700, - 3, - "Kill called on cluster client cursor after cursor has already been killed, so " - "ignoring"); - return; - } - - if (_telemetryStoreKey && opCtx) { - telemetry::writeTelemetry(opCtx, - _telemetryStoreKey, - getOriginatingCommand(), - _metrics.executionTime.value_or(Microseconds{0}).count(), - _metrics.nreturned.value_or(0)); + tassert(7448200, + "Cannot kill a cluster client cursor that has already been killed", + !_hasBeenKilled); + + if (_queryStatsStoreKeyHash && opCtx) { + query_stats::writeQueryStats(opCtx, + _queryStatsStoreKeyHash, + std::move(_queryStatsKeyGenerator), + _metrics.executionTime.value_or(Microseconds{0}).count(), + _firstResponseExecutionTime.value_or(Microseconds{0}).count(), + _metrics.nreturned.value_or(0)); } _root->kill(opCtx); @@ -210,6 +223,10 @@ bool ClusterClientCursorImpl::remotesExhausted() { return _root->remotesExhausted(); } +bool ClusterClientCursorImpl::hasBeenKilled() { + return _hasBeenKilled; +} + Status ClusterClientCursorImpl::setAwaitDataTimeout(Milliseconds awaitDataTimeout) { return _root->setAwaitDataTimeout(awaitDataTimeout); } @@ -219,7 +236,7 @@ boost::optional ClusterClientCursorImpl::getLsid() const { } boost::optional ClusterClientCursorImpl::getTxnNumber() const { - return _params.txnNumber; + return _params.osi.getTxnNumber(); } Date_t ClusterClientCursorImpl::getCreatedDate() const { @@ -282,4 +299,8 @@ bool ClusterClientCursorImpl::shouldOmitDiagnosticInformation() const { return _shouldOmitDiagnosticInformation; } +std::unique_ptr ClusterClientCursorImpl::getKeyGenerator() { + return std::move(_queryStatsKeyGenerator); +} + } // namespace mongo diff --git a/src/mongo/s/query/cluster_client_cursor_impl.h b/src/mongo/s/query/cluster_client_cursor_impl.h index 4f9d988acbb0e..641a28a343612 100644 --- a/src/mongo/s/query/cluster_client_cursor_impl.h +++ b/src/mongo/s/query/cluster_client_cursor_impl.h @@ -29,16 +29,33 @@ #pragma once +#include +#include +#include +#include #include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/query_stats_key_generator.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/executor/task_executor.h" #include "mongo/s/query/cluster_client_cursor.h" #include "mongo/s/query/cluster_client_cursor_guard.h" #include "mongo/s/query/cluster_client_cursor_params.h" #include "mongo/s/query/cluster_query_result.h" #include "mongo/s/query/router_exec_stage.h" +#include "mongo/util/duration.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -96,6 +113,8 @@ class ClusterClientCursorImpl final : public ClusterClientCursor { bool remotesExhausted() final; + bool hasBeenKilled() final; + Status setAwaitDataTimeout(Milliseconds awaitDataTimeout) final; boost::optional getLsid() const final; @@ -118,6 +137,8 @@ class ClusterClientCursorImpl final : public ClusterClientCursor { bool shouldOmitDiagnosticInformation() const final; + std::unique_ptr getKeyGenerator() final; + public: /** * Constructs a CCC whose result set is generated by a mock execution stage. @@ -181,12 +202,12 @@ class ClusterClientCursorImpl final : public ClusterClientCursor { bool _shouldOmitDiagnosticInformation = false; // If boost::none, telemetry should not be collected for this cursor. - boost::optional _telemetryStoreKey; + boost::optional _queryStatsStoreKeyHash; + // The KeyGenerator used by query stats to generate the query stats store key. + + std::unique_ptr _queryStatsKeyGenerator; - // Tracks if kill() has been called on the cursor. Multiple calls to kill() are treated as a - // noop. - // TODO SERVER-74482 investigate where kill() is called multiple times and remove unnecessary - // calls + // Tracks if kill() has been called on the cursor. Multiple calls to kill() is an error. bool _hasBeenKilled = false; }; diff --git a/src/mongo/s/query/cluster_client_cursor_impl_test.cpp b/src/mongo/s/query/cluster_client_cursor_impl_test.cpp index be09e027337a1..f0389f431c1e8 100644 --- a/src/mongo/s/query/cluster_client_cursor_impl_test.cpp +++ b/src/mongo/s/query/cluster_client_cursor_impl_test.cpp @@ -27,28 +27,34 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/s/query/cluster_client_cursor_impl.h" - -#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/s/concurrency/locker_mongos_client_observer.h" +#include "mongo/s/query/cluster_client_cursor_impl.h" #include "mongo/s/query/router_stage_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { - namespace { class ClusterClientCursorImplTest : public ServiceContextTest { protected: ClusterClientCursorImplTest() { - auto service = getServiceContext(); - service->registerClientObserver(std::make_unique()); _opCtx = makeOperationContext(); } @@ -64,8 +70,11 @@ TEST_F(ClusterClientCursorImplTest, NumReturnedSoFar) { ClusterClientCursorImpl cursor( _opCtx.get(), std::move(mockStage), - ClusterClientCursorParams( - NamespaceString::createNamespaceString_forTest("unused"), APIParameters(), {}), + ClusterClientCursorParams(NamespaceString::createNamespaceString_forTest("unused"), + APIParameters(), + boost::none /* ReadPreferenceSetting */, + boost::none /* repl::ReadConcernArgs */, + OperationSessionInfoFromClient()), boost::none); ASSERT_EQ(cursor.getNumReturnedSoFar(), 0); @@ -91,7 +100,11 @@ TEST_F(ClusterClientCursorImplTest, QueueResult) { ClusterClientCursorImpl cursor( _opCtx.get(), std::move(mockStage), - ClusterClientCursorParams(NamespaceString::createNamespaceString_forTest("unused"), {}), + ClusterClientCursorParams(NamespaceString::createNamespaceString_forTest("unused"), + APIParameters(), + boost::none /* ReadPreferenceSetting */, + boost::none /* repl::ReadConcernArgs */, + OperationSessionInfoFromClient()), boost::none); auto firstResult = cursor.next(); @@ -133,7 +146,11 @@ TEST_F(ClusterClientCursorImplTest, RemotesExhausted) { ClusterClientCursorImpl cursor( _opCtx.get(), std::move(mockStage), - ClusterClientCursorParams(NamespaceString::createNamespaceString_forTest("unused"), {}), + ClusterClientCursorParams(NamespaceString::createNamespaceString_forTest("unused"), + APIParameters(), + boost::none /* ReadPreferenceSetting */, + boost::none /* repl::ReadConcernArgs */, + OperationSessionInfoFromClient()), boost::none); ASSERT_TRUE(cursor.remotesExhausted()); @@ -166,7 +183,11 @@ TEST_F(ClusterClientCursorImplTest, RemoteTimeoutPartialResultsDisallowed) { ClusterClientCursorImpl cursor( _opCtx.get(), std::move(mockStage), - ClusterClientCursorParams(NamespaceString::createNamespaceString_forTest("unused"), {}), + ClusterClientCursorParams(NamespaceString::createNamespaceString_forTest("unused"), + APIParameters(), + boost::none /* ReadPreferenceSetting */, + boost::none /* repl::ReadConcernArgs */, + OperationSessionInfoFromClient()), boost::none); ASSERT_TRUE(cursor.remotesExhausted()); @@ -190,7 +211,11 @@ TEST_F(ClusterClientCursorImplTest, RemoteTimeoutPartialResultsAllowed) { mockStage->markRemotesExhausted(); auto params = - ClusterClientCursorParams(NamespaceString::createNamespaceString_forTest("unused"), {}); + ClusterClientCursorParams(NamespaceString::createNamespaceString_forTest("unused"), + APIParameters(), + boost::none /* ReadPreferenceSetting */, + boost::none /* repl::ReadConcernArgs */, + OperationSessionInfoFromClient()); params.isAllowPartialResults = true; ClusterClientCursorImpl cursor( @@ -218,7 +243,11 @@ TEST_F(ClusterClientCursorImplTest, ForwardsAwaitDataTimeout) { ClusterClientCursorImpl cursor( _opCtx.get(), std::move(mockStage), - ClusterClientCursorParams(NamespaceString::createNamespaceString_forTest("unused"), {}), + ClusterClientCursorParams(NamespaceString::createNamespaceString_forTest("unused"), + APIParameters(), + boost::none /* ReadPreferenceSetting */, + boost::none /* repl::ReadConcernArgs */, + OperationSessionInfoFromClient()), boost::none); ASSERT_OK(cursor.setAwaitDataTimeout(Milliseconds(789))); @@ -236,7 +265,11 @@ TEST_F(ClusterClientCursorImplTest, ChecksForInterrupt) { ClusterClientCursorImpl cursor( _opCtx.get(), std::move(mockStage), - ClusterClientCursorParams(NamespaceString::createNamespaceString_forTest("unused"), {}), + ClusterClientCursorParams(NamespaceString::createNamespaceString_forTest("unused"), + APIParameters(), + boost::none /* ReadPreferenceSetting */, + boost::none /* repl::ReadConcernArgs */, + OperationSessionInfoFromClient()), boost::none); // Pull one result out of the cursor. @@ -259,14 +292,22 @@ TEST_F(ClusterClientCursorImplTest, ChecksForInterrupt) { TEST_F(ClusterClientCursorImplTest, LogicalSessionIdsOnCursors) { // Make a cursor with no lsid auto mockStage = std::make_unique(_opCtx.get()); - ClusterClientCursorParams params(NamespaceString::createNamespaceString_forTest("test"), {}); + ClusterClientCursorParams params(NamespaceString::createNamespaceString_forTest("test"), + APIParameters(), + boost::none /* ReadPreferenceSetting */, + boost::none /* repl::ReadConcernArgs */, + OperationSessionInfoFromClient()); ClusterClientCursorImpl cursor{ _opCtx.get(), std::move(mockStage), std::move(params), boost::none}; ASSERT(!cursor.getLsid()); // Make a cursor with an lsid auto mockStage2 = std::make_unique(_opCtx.get()); - ClusterClientCursorParams params2(NamespaceString::createNamespaceString_forTest("test"), {}); + ClusterClientCursorParams params2(NamespaceString::createNamespaceString_forTest("test"), + APIParameters(), + boost::none /* ReadPreferenceSetting */, + boost::none /* repl::ReadConcernArgs */, + OperationSessionInfoFromClient()); auto lsid = makeLogicalSessionIdForTest(); ClusterClientCursorImpl cursor2{_opCtx.get(), std::move(mockStage2), std::move(params2), lsid}; ASSERT(*(cursor2.getLsid()) == lsid); @@ -278,9 +319,16 @@ TEST_F(ClusterClientCursorImplTest, ShouldStoreLSIDIfSetOnOpCtx) { { // Make a cursor with no lsid or txnNumber. ClusterClientCursorParams params(NamespaceString::createNamespaceString_forTest("test"), - {}); - params.lsid = _opCtx->getLogicalSessionId(); - params.txnNumber = _opCtx->getTxnNumber(); + APIParameters(), + boost::none /* ReadPreferenceSetting */, + boost::none /* repl::ReadConcernArgs */, + [&] { + if (!_opCtx->getLogicalSessionId()) + return OperationSessionInfoFromClient(); + return OperationSessionInfoFromClient{ + *_opCtx->getLogicalSessionId(), + _opCtx->getTxnNumber()}; + }()); auto cursor = ClusterClientCursorImpl::make(_opCtx.get(), nullExecutor, std::move(params)); ASSERT_FALSE(cursor->getLsid()); @@ -293,9 +341,16 @@ TEST_F(ClusterClientCursorImplTest, ShouldStoreLSIDIfSetOnOpCtx) { { // Make a cursor with an lsid and no txnNumber. ClusterClientCursorParams params(NamespaceString::createNamespaceString_forTest("test"), - {}); - params.lsid = _opCtx->getLogicalSessionId(); - params.txnNumber = _opCtx->getTxnNumber(); + APIParameters(), + boost::none /* ReadPreferenceSetting */, + boost::none /* repl::ReadConcernArgs */, + [&] { + if (!_opCtx->getLogicalSessionId()) + return OperationSessionInfoFromClient(); + return OperationSessionInfoFromClient{ + *_opCtx->getLogicalSessionId(), + _opCtx->getTxnNumber()}; + }()); auto cursor = ClusterClientCursorImpl::make(_opCtx.get(), nullExecutor, std::move(params)); ASSERT_EQ(*cursor->getLsid(), lsid); @@ -308,9 +363,16 @@ TEST_F(ClusterClientCursorImplTest, ShouldStoreLSIDIfSetOnOpCtx) { { // Make a cursor with an lsid and txnNumber. ClusterClientCursorParams params(NamespaceString::createNamespaceString_forTest("test"), - {}); - params.lsid = _opCtx->getLogicalSessionId(); - params.txnNumber = _opCtx->getTxnNumber(); + APIParameters(), + boost::none /* ReadPreferenceSetting */, + boost::none /* repl::ReadConcernArgs */, + [&] { + if (!_opCtx->getLogicalSessionId()) + return OperationSessionInfoFromClient(); + return OperationSessionInfoFromClient{ + *_opCtx->getLogicalSessionId(), + _opCtx->getTxnNumber()}; + }()); auto cursor = ClusterClientCursorImpl::make(_opCtx.get(), nullExecutor, std::move(params)); ASSERT_EQ(*cursor->getLsid(), lsid); @@ -326,8 +388,11 @@ TEST_F(ClusterClientCursorImplTest, ShouldStoreAPIParameters) { apiParams.setAPIStrict(true); apiParams.setAPIDeprecationErrors(true); - ClusterClientCursorParams params( - NamespaceString::createNamespaceString_forTest("test"), apiParams, {}); + ClusterClientCursorParams params(NamespaceString::createNamespaceString_forTest("test"), + apiParams, + boost::none /* ReadPreferenceSetting */, + boost::none /* repl::ReadConcernArgs */, + OperationSessionInfoFromClient()); ClusterClientCursorImpl cursor( _opCtx.get(), std::move(mockStage), std::move(params), boost::none); @@ -338,5 +403,4 @@ TEST_F(ClusterClientCursorImplTest, ShouldStoreAPIParameters) { } } // namespace - } // namespace mongo diff --git a/src/mongo/s/query/cluster_client_cursor_mock.cpp b/src/mongo/s/query/cluster_client_cursor_mock.cpp index 33a51ffa3b679..8063f890d7446 100644 --- a/src/mongo/s/query/cluster_client_cursor_mock.cpp +++ b/src/mongo/s/query/cluster_client_cursor_mock.cpp @@ -28,10 +28,14 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/s/query/cluster_client_cursor_mock.h" +#include +#include +#include "mongo/s/query/cluster_client_cursor_mock.h" #include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -130,6 +134,10 @@ bool ClusterClientCursorMock::remotesExhausted() { return _remotesExhausted; } +bool ClusterClientCursorMock::hasBeenKilled() { + return _killed; +} + void ClusterClientCursorMock::queueError(Status status) { _resultsQueue.push({status}); } @@ -166,4 +174,8 @@ bool ClusterClientCursorMock::shouldOmitDiagnosticInformation() const { return false; } +std::unique_ptr ClusterClientCursorMock::getKeyGenerator() { + return nullptr; +} + } // namespace mongo diff --git a/src/mongo/s/query/cluster_client_cursor_mock.h b/src/mongo/s/query/cluster_client_cursor_mock.h index 2693901193dbc..5562c3748bcc2 100644 --- a/src/mongo/s/query/cluster_client_cursor_mock.h +++ b/src/mongo/s/query/cluster_client_cursor_mock.h @@ -29,12 +29,30 @@ #pragma once +#include #include +#include +#include +#include #include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/query_stats_key_generator.h" +#include "mongo/db/repl/read_concern_args.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/s/query/cluster_client_cursor.h" +#include "mongo/s/query/cluster_query_result.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -111,6 +129,8 @@ class ClusterClientCursorMock final : public ClusterClientCursor { */ bool remotesExhausted() final; + bool hasBeenKilled() final; + /** * Queues an error response. */ @@ -118,6 +138,8 @@ class ClusterClientCursorMock final : public ClusterClientCursor { bool shouldOmitDiagnosticInformation() const final; + std::unique_ptr getKeyGenerator() final; + private: bool _killed = false; std::queue> _resultsQueue; diff --git a/src/mongo/s/query/cluster_client_cursor_params.cpp b/src/mongo/s/query/cluster_client_cursor_params.cpp new file mode 100644 index 0000000000000..a783605bdfef4 --- /dev/null +++ b/src/mongo/s/query/cluster_client_cursor_params.cpp @@ -0,0 +1,62 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/s/query/cluster_client_cursor_params.h" + +namespace mongo { + +ClusterClientCursorParams::ClusterClientCursorParams( + NamespaceString nss, + APIParameters apiParameters, + boost::optional readPreference, + boost::optional readConcern, + OperationSessionInfoFromClient osi) + : nsString(std::move(nss)), + apiParameters(std::move(apiParameters)), + readPreference(std::move(readPreference)), + readConcern(std::move(readConcern)), + osi(std::move(osi)) {} + +AsyncResultsMergerParams ClusterClientCursorParams::extractARMParams() { + AsyncResultsMergerParams armParams; + if (!sortToApplyOnRouter.isEmpty()) { + armParams.setSort(sortToApplyOnRouter); + } + armParams.setCompareWholeSortKey(compareWholeSortKeyOnRouter); + armParams.setRemotes(std::move(remotes)); + armParams.setTailableMode(tailableMode); + armParams.setBatchSize(batchSize); + armParams.setNss(nsString); + armParams.setAllowPartialResults(isAllowPartialResults); + armParams.setOperationSessionInfo(osi); + + return armParams; +} + +} // namespace mongo diff --git a/src/mongo/s/query/cluster_client_cursor_params.h b/src/mongo/s/query/cluster_client_cursor_params.h index a1fbd0fca04e3..0e60b10d2b30d 100644 --- a/src/mongo/s/query/cluster_client_cursor_params.h +++ b/src/mongo/s/query/cluster_client_cursor_params.h @@ -31,7 +31,6 @@ #include #include -#include #include #include "mongo/bson/bsonobj.h" @@ -45,15 +44,13 @@ #include "mongo/db/query/cursor_response.h" #include "mongo/db/query/tailable_mode.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/session/logical_session_id.h" #include "mongo/s/client/shard.h" #include "mongo/s/query/async_results_merger_params_gen.h" #include "mongo/util/net/hostandport.h" namespace mongo { -class OperationContext; -class RouterExecStage; - /** * The resulting ClusterClientCursor will take ownership of the existing remote cursor, generating * results based on the cursor's current state. @@ -65,48 +62,15 @@ class RouterExecStage; struct ClusterClientCursorParams { ClusterClientCursorParams(NamespaceString nss, APIParameters apiParameters, - boost::optional readPref = boost::none, - boost::optional readConcernArgs = boost::none) - : nsString(std::move(nss)), apiParameters(std::move(apiParameters)) { - if (readPref) { - readPreference = std::move(readPref.get()); - } - if (readConcernArgs) { - readConcern = std::move(readConcernArgs.get()); - } - } + boost::optional readPreference, + boost::optional readConcern, + OperationSessionInfoFromClient osi); /** * Extracts the subset of fields here needed by the AsyncResultsMerger. The returned * AsyncResultsMergerParams will assume ownership of 'remotes'. */ - AsyncResultsMergerParams extractARMParams() { - AsyncResultsMergerParams armParams; - if (!sortToApplyOnRouter.isEmpty()) { - armParams.setSort(sortToApplyOnRouter); - } - armParams.setCompareWholeSortKey(compareWholeSortKeyOnRouter); - armParams.setRemotes(std::move(remotes)); - armParams.setTailableMode(tailableMode); - armParams.setBatchSize(batchSize); - armParams.setNss(nsString); - armParams.setAllowPartialResults(isAllowPartialResults); - - OperationSessionInfoFromClient sessionInfo; - boost::optional lsidFromClient; - - if (lsid) { - lsidFromClient.emplace(lsid->getId()); - lsidFromClient->setUid(lsid->getUid()); - } - - sessionInfo.setSessionId(lsidFromClient); - sessionInfo.setTxnNumber(txnNumber); - sessionInfo.setAutocommit(isAutoCommit); - armParams.setOperationSessionInfo(sessionInfo); - - return armParams; - } + AsyncResultsMergerParams extractARMParams(); // Namespace against which the cursors exist. NamespaceString nsString; @@ -153,18 +117,13 @@ struct ClusterClientCursorParams { // Set if a readConcern must be respected throughout the lifetime of the cursor. boost::optional readConcern; + // Session/transaction information to attach with this request (if run under a session or + // transaction) + OperationSessionInfoFromClient osi; + // Whether the client indicated that it is willing to receive partial results in the case of an // unreachable host. bool isAllowPartialResults = false; - - // The logical session id of the command that created the cursor. - boost::optional lsid; - - // The transaction number of the command that created the cursor. - boost::optional txnNumber; - - // Set to false for multi statement transactions. - boost::optional isAutoCommit; }; } // namespace mongo diff --git a/src/mongo/s/query/cluster_cursor_cleanup_job.cpp b/src/mongo/s/query/cluster_cursor_cleanup_job.cpp index 005ad8908b87e..42570bc8c7cdb 100644 --- a/src/mongo/s/query/cluster_cursor_cleanup_job.cpp +++ b/src/mongo/s/query/cluster_cursor_cleanup_job.cpp @@ -27,15 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/s/query/cluster_cursor_cleanup_job.h" +#include +#include "mongo/base/string_data.h" #include "mongo/db/client.h" #include "mongo/db/cursor_server_params.h" +#include "mongo/db/service_context.h" #include "mongo/s/grid.h" +#include "mongo/s/query/cluster_cursor_cleanup_job.h" #include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/concurrency/idle_thread_block.h" +#include "mongo/util/duration.h" #include "mongo/util/exit.h" #include "mongo/util/time_support.h" @@ -50,6 +56,12 @@ std::string ClusterCursorCleanupJob::name() const { void ClusterCursorCleanupJob::run() { ThreadClient tc(name(), getGlobalServiceContext()); + // TODO(SERVER-74662): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(*tc.get()); + tc.get()->setSystemOperationUnkillableByStepdown(lk); + } + auto* const client = Client::getCurrent(); auto* const manager = Grid::get(client->getServiceContext())->getCursorManager(); invariant(manager); diff --git a/src/mongo/s/query/cluster_cursor_cleanup_job.h b/src/mongo/s/query/cluster_cursor_cleanup_job.h index e5e629aab8f55..97913f82c7fa2 100644 --- a/src/mongo/s/query/cluster_cursor_cleanup_job.h +++ b/src/mongo/s/query/cluster_cursor_cleanup_job.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include "mongo/util/background.h" diff --git a/src/mongo/s/query/cluster_cursor_manager.cpp b/src/mongo/s/query/cluster_cursor_manager.cpp index 9d6e3a49f7fdb..fc0608bbbda18 100644 --- a/src/mongo/s/query/cluster_cursor_manager.cpp +++ b/src/mongo/s/query/cluster_cursor_manager.cpp @@ -28,21 +28,35 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/query/cluster_client_cursor_impl.h" -#include "mongo/s/query/cluster_cursor_manager.h" - -#include - +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/allocate_cursor_id.h" +#include "mongo/db/auth/authorization_manager.h" +#include "mongo/db/auth/authorization_session.h" #include "mongo/db/curop.h" #include "mongo/db/query/query_knobs_gen.h" -#include "mongo/db/query/telemetry.h" +#include "mongo/db/query/query_stats.h" #include "mongo/db/session/kill_sessions_common.h" #include "mongo/db/session/logical_session_cache.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/util/assert_util.h" #include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -455,6 +469,9 @@ ClusterCursorManager::Stats ClusterCursorManager::stats() const { case CursorType::MultiTarget: ++stats.cursorsMultiTarget; break; + case CursorType::QueuedData: + ++stats.cursorsQueuedData; + break; } } @@ -588,37 +605,26 @@ StatusWith ClusterCursorManager::_detachCursor(WithLoc return std::move(cursor); } -void collectTelemetryMongos(OperationContext* opCtx, - const BSONObj& originatingCommand, - long long nreturned) { - auto curOp = CurOp::get(opCtx); - telemetry::collectMetricsOnOpDebug(curOp, nreturned); - +void collectQueryStatsMongos(OperationContext* opCtx, + std::unique_ptr keyGenerator) { // If we haven't registered a cursor to prepare for getMore requests, we record - // telemetry directly. + // queryStats directly. auto&& opDebug = CurOp::get(opCtx)->debug(); - telemetry::writeTelemetry( - opCtx, - opDebug.telemetryStoreKey, - originatingCommand, - opDebug.additiveMetrics.executionTime.value_or(Microseconds{0}).count(), - opDebug.additiveMetrics.nreturned.value_or(0)); -} - -void collectTelemetryMongos(OperationContext* opCtx, - ClusterClientCursorGuard& cursor, - long long nreturned) { - auto curOp = CurOp::get(opCtx); - telemetry::collectMetricsOnOpDebug(curOp, nreturned); - cursor->incrementCursorMetrics(curOp->debug().additiveMetrics); -} - -void collectTelemetryMongos(OperationContext* opCtx, - ClusterCursorManager::PinnedCursor& cursor, - long long nreturned) { - auto curOp = CurOp::get(opCtx); - telemetry::collectMetricsOnOpDebug(curOp, nreturned); - cursor->incrementCursorMetrics(curOp->debug().additiveMetrics); + int64_t execTime = opDebug.additiveMetrics.executionTime.value_or(Microseconds{0}).count(); + query_stats::writeQueryStats(opCtx, + opDebug.queryStatsStoreKeyHash, + std::move(keyGenerator), + execTime, + execTime, + opDebug.additiveMetrics.nreturned.value_or(0)); +} + +void collectQueryStatsMongos(OperationContext* opCtx, ClusterClientCursorGuard& cursor) { + cursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().additiveMetrics); +} + +void collectQueryStatsMongos(OperationContext* opCtx, ClusterCursorManager::PinnedCursor& cursor) { + cursor->incrementCursorMetrics(CurOp::get(opCtx)->debug().additiveMetrics); } } // namespace mongo diff --git a/src/mongo/s/query/cluster_cursor_manager.h b/src/mongo/s/query/cluster_cursor_manager.h index 8666b262c5de8..9652096fd4cda 100644 --- a/src/mongo/s/query/cluster_cursor_manager.h +++ b/src/mongo/s/query/cluster_cursor_manager.h @@ -29,14 +29,32 @@ #pragma once +#include +#include +#include +#include +#include +#include #include +#include #include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/client.h" #include "mongo/db/cursor_id.h" #include "mongo/db/generic_cursor.h" +#include "mongo/db/generic_cursor_gen.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/query/query_stats_key_generator.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/kill_sessions.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_killer.h" #include "mongo/platform/mutex.h" #include "mongo/platform/random.h" @@ -44,8 +62,12 @@ #include "mongo/s/query/cluster_client_cursor_guard.h" #include "mongo/s/query/cluster_client_cursor_params.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/clock_source.h" #include "mongo/util/concurrency/with_lock.h" #include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -85,6 +107,9 @@ class ClusterCursorManager { // Represents a cursor retrieving data from multiple remote sources. MultiTarget, + + // Represents a cursor retrieving data queued in memory on the router. + QueuedData, }; enum class CursorLifetime { @@ -110,6 +135,9 @@ class ClusterCursorManager { // Count of open cursors registered with CursorType::SingleTarget. size_t cursorsSingleTarget = 0; + // Count of open cursors registered with CursorType::QueuedData. + size_t cursorsQueuedData = 0; + // Count of pinned cursors. size_t cursorsPinned = 0; }; @@ -594,19 +622,19 @@ class ClusterCursorManager { }; /** - * Record metrics for the current operation on opDebug and aggregates those metrics for telemetry + * Record metrics for the current operation on opDebug and aggregates those metrics for queryStats * use. If a cursor is provided (via ClusterClientCursorGuard or * ClusterCursorManager::PinnedCursor), metrics are aggregated on the cursor; otherwise, metrics are * written directly to the telemetry store. + * NOTE: Metrics are taken from opDebug.additiveMetrics, so CurOp::setEndOfOpMetrics must be called + * *prior* to calling these. + * + * Currently, telemetry is only collected for find and aggregate requests (and their subsequent + * getMore requests), so these should only be called from those request paths. */ -void collectTelemetryMongos(OperationContext* opCtx, - const BSONObj& originatingCommand, - long long nreturned); -void collectTelemetryMongos(OperationContext* opCtx, - ClusterClientCursorGuard& cursor, - long long nreturned); -void collectTelemetryMongos(OperationContext* opCtx, - ClusterCursorManager::PinnedCursor& cursor, - long long nreturned); +void collectQueryStatsMongos(OperationContext* opCtx, + std::unique_ptr keyGenerator); +void collectQueryStatsMongos(OperationContext* opCtx, ClusterClientCursorGuard& cursor); +void collectQueryStatsMongos(OperationContext* opCtx, ClusterCursorManager::PinnedCursor& cursor); } // namespace mongo diff --git a/src/mongo/s/query/cluster_cursor_manager_test.cpp b/src/mongo/s/query/cluster_cursor_manager_test.cpp index b343d34baf17c..bb134a9043c70 100644 --- a/src/mongo/s/query/cluster_cursor_manager_test.cpp +++ b/src/mongo/s/query/cluster_cursor_manager_test.cpp @@ -27,31 +27,48 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/session/logical_session_cache.h" #include "mongo/db/session/logical_session_cache_noop.h" -#include "mongo/s/concurrency/locker_mongos_client_observer.h" #include "mongo/s/query/cluster_client_cursor_mock.h" #include "mongo/s/query/cluster_cursor_manager.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/query/cluster_query_result.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" namespace mongo { namespace { using unittest::assertGet; + const NamespaceString nss = NamespaceString::createNamespaceString_forTest("test.collection"); class ClusterCursorManagerTest : public ServiceContextTest { protected: ClusterCursorManagerTest() { - auto service = getServiceContext(); - service->registerClientObserver(std::make_unique()); _opCtx = makeOperationContext(); LogicalSessionCache::set(getServiceContext(), std::make_unique()); } diff --git a/src/mongo/s/query/cluster_exchange_test.cpp b/src/mongo/s/query/cluster_exchange_test.cpp index 0b4b7a3ca2bd2..8053d6cbe68ae 100644 --- a/src/mongo/s/query/cluster_exchange_test.cpp +++ b/src/mongo/s/query/cluster_exchange_test.cpp @@ -27,20 +27,57 @@ * it in the license file. */ -#include "mongo/client/remote_command_targeter_factory_mock.h" -#include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/pipeline/document_source_group.h" +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/document_source_limit.h" #include "mongo/db/pipeline/document_source_match.h" #include "mongo/db/pipeline/document_source_merge.h" #include "mongo/db/pipeline/document_source_out.h" -#include "mongo/db/pipeline/document_source_project.h" -#include "mongo/db/pipeline/document_source_sort.h" +#include "mongo/db/pipeline/exchange_spec_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/field_path.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/process_interface/stub_mongo_process_interface.h" #include "mongo/db/pipeline/sharded_agg_helpers.h" -#include "mongo/s/catalog/type_shard.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/query/sharded_agg_test_fixture.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/sharding_feature_flags_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { diff --git a/src/mongo/s/query/cluster_find.cpp b/src/mongo/s/query/cluster_find.cpp index 7b472f79ff9df..f2d491d492030 100644 --- a/src/mongo/s/query/cluster_find.cpp +++ b/src/mongo/s/query/cluster_find.cpp @@ -29,46 +29,107 @@ #include "mongo/s/query/cluster_find.h" +#include +#include +#include +#include #include - +#include +#include #include +#include +#include #include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" -#include "mongo/bson/util/bson_extract.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" #include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/auth/action_type.h" #include "mongo/db/auth/authorization_session.h" +#include "mongo/db/auth/privilege.h" +#include "mongo/db/auth/resource_pattern.h" +#include "mongo/db/auth/user_name.h" +#include "mongo/db/basic_types.h" #include "mongo/db/catalog/collection_uuid_mismatch_info.h" +#include "mongo/db/client.h" #include "mongo/db/commands.h" #include "mongo/db/curop.h" #include "mongo/db/curop_failpoint_helpers.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/pipeline/change_stream_invalidation_info.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/canonical_query_encoder.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/find_common.h" #include "mongo/db/query/getmore_command_gen.h" #include "mongo/db/query/query_planner_common.h" -#include "mongo/db/query/telemetry.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/sort_pattern.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/platform/overflow_arithmetic.h" -#include "mongo/s/analyze_shard_key_cmd_gen.h" +#include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/client/num_hosts_targeted_metrics.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/collection_uuid_mismatch.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" #include "mongo/s/query/async_results_merger.h" +#include "mongo/s/query/async_results_merger_params_gen.h" +#include "mongo/s/query/cluster_client_cursor.h" +#include "mongo/s/query/cluster_client_cursor_guard.h" #include "mongo/s/query/cluster_client_cursor_impl.h" +#include "mongo/s/query/cluster_client_cursor_params.h" #include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/s/query/cluster_query_result.h" #include "mongo/s/query/establish_cursors.h" -#include "mongo/s/query/store_possible_cursor.h" #include "mongo/s/query_analysis_sampler_util.h" +#include "mongo/s/shard_version.h" #include "mongo/s/stale_exception.h" #include "mongo/s/transaction_router.h" +#include "mongo/stdx/thread.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -192,7 +253,8 @@ std::vector> constructRequestsForShards( } if (opCtx->getTxnNumber()) { - cmdBuilder.append(OperationSessionInfo::kTxnNumberFieldName, *opCtx->getTxnNumber()); + cmdBuilder.append(OperationSessionInfoFromClient::kTxnNumberFieldName, + *opCtx->getTxnNumber()); } if (shardId == sampleShardId) { analyze_shard_key::appendSampleId(&cmdBuilder, *sampleId); @@ -235,20 +297,24 @@ CursorId runQueryWithoutRetrying(OperationContext* opCtx, // Construct the query and parameters. Defer setting skip and limit here until // we determine if the query is targeting multi-shards or a single shard below. ClusterClientCursorParams params( - query.nss(), APIParameters::get(opCtx), readPref, repl::ReadConcernArgs::get(opCtx)); + query.nss(), APIParameters::get(opCtx), readPref, repl::ReadConcernArgs::get(opCtx), [&] { + if (!opCtx->getLogicalSessionId()) + return OperationSessionInfoFromClient(); + + OperationSessionInfoFromClient osi{*opCtx->getLogicalSessionId(), + opCtx->getTxnNumber()}; + if (TransactionRouter::get(opCtx)) { + osi.setAutocommit(false); + } + return osi; + }()); params.originatingCommandObj = CurOp::get(opCtx)->opDescription().getOwned(); params.batchSize = findCommand.getBatchSize(); params.tailableMode = query_request_helper::getTailableMode(findCommand); params.isAllowPartialResults = findCommand.getAllowPartialResults(); - params.lsid = opCtx->getLogicalSessionId(); - params.txnNumber = opCtx->getTxnNumber(); params.originatingPrivileges = { Privilege(ResourcePattern::forExactNamespace(query.nss()), ActionType::find)}; - if (TransactionRouter::get(opCtx)) { - params.isAutoCommit = false; - } - // This is the batchSize passed to each subsequent getMore command issued by the cursor. We // usually use the batchSize associated with the initial find, but as it is illegal to send a // getMore with a batchSize of 0, we set it to use the default batchSize logic. @@ -435,6 +501,7 @@ CursorId runQueryWithoutRetrying(OperationContext* opCtx, *partialResultsReturned = ccc->partialResultsReturned(); } + CurOp::get(opCtx)->setEndOfOpMetrics(results->size()); // If the cursor is exhausted, then there are no more results to return and we don't need to // allocate a cursor id. if (cursorState == ClusterCursorManager::CursorState::Exhausted) { @@ -443,7 +510,7 @@ CursorId runQueryWithoutRetrying(OperationContext* opCtx, if (shardIds.size() > 0) { updateNumHostsTargetedMetrics(opCtx, cm, shardIds.size()); } - collectTelemetryMongos(opCtx, ccc->getOriginatingCommand(), results->size()); + collectQueryStatsMongos(opCtx, ccc->getKeyGenerator()); return CursorId(0); } @@ -454,7 +521,7 @@ CursorId runQueryWithoutRetrying(OperationContext* opCtx, ? ClusterCursorManager::CursorLifetime::Immortal : ClusterCursorManager::CursorLifetime::Mortal; auto authUser = AuthorizationSession::get(opCtx->getClient())->getAuthenticatedUserName(); - collectTelemetryMongos(opCtx, ccc, results->size()); + collectQueryStatsMongos(opCtx, ccc); auto cursorId = uassertStatusOK(cursorManager->registerCursor( opCtx, ccc.releaseCursor(), query.nss(), cursorType, cursorLifetime, authUser)); @@ -918,10 +985,11 @@ StatusWith ClusterFind::runGetMore(OperationContext* opCtx, // Set nReturned and whether the cursor has been exhausted. opDebug.cursorExhausted = (idToReturn == 0); opDebug.additiveMetrics.nBatches = 1; + CurOp::get(opCtx)->setEndOfOpMetrics(batch.size()); const bool partialResultsReturned = pinnedCursor.getValue()->partialResultsReturned(); pinnedCursor.getValue()->setLeftoverMaxTimeMicros(opCtx->getRemainingMaxTimeMicros()); - collectTelemetryMongos(opCtx, pinnedCursor.getValue(), batch.size()); + collectQueryStatsMongos(opCtx, pinnedCursor.getValue()); // Upon successful completion, transfer ownership of the cursor back to the cursor manager. If // the cursor has been exhausted, the cursor manager will clean it up for us. diff --git a/src/mongo/s/query/cluster_find.h b/src/mongo/s/query/cluster_find.h index 4f03e55991f41..a1643f3e341ab 100644 --- a/src/mongo/s/query/cluster_find.h +++ b/src/mongo/s/query/cluster_find.h @@ -29,10 +29,15 @@ #pragma once +#include #include +#include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" #include "mongo/db/cursor_id.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/cursor_response.h" #include "mongo/db/query/explain_options.h" #include "mongo/db/query/getmore_command_gen.h" diff --git a/src/mongo/s/query/cqf_utils.cpp b/src/mongo/s/query/cqf_utils.cpp index 680e8d14767a1..eb4a3dda8534f 100644 --- a/src/mongo/s/query/cqf_utils.cpp +++ b/src/mongo/s/query/cqf_utils.cpp @@ -27,11 +27,15 @@ * it in the license file. */ +#include + +#include "mongo/base/error_codes.h" #include "mongo/db/pipeline/abt/document_source_visitor.h" #include "mongo/db/pipeline/visitors/document_source_visitor_registry_mongos.h" #include "mongo/db/query/cqf_command_utils.h" #include "mongo/db/service_context.h" #include "mongo/s/query/document_source_merge_cursors.h" +#include "mongo/util/assert_util.h" namespace mongo::optimizer { diff --git a/src/mongo/s/query/document_source_merge_cursors.cpp b/src/mongo/s/query/document_source_merge_cursors.cpp index 9a35057412aab..84200c86bc0e3 100644 --- a/src/mongo/s/query/document_source_merge_cursors.cpp +++ b/src/mongo/s/query/document_source_merge_cursors.cpp @@ -27,14 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/s/query/document_source_merge_cursors.h" -#include "mongo/db/pipeline/document_source_sort.h" -#include "mongo/db/query/find_common.h" -#include "mongo/s/grid.h" -#include "mongo/s/query/establish_cursors.h" +#include +#include +#include +#include +#include + +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/pipeline/lite_parsed_document_source.h" +#include "mongo/db/pipeline/process_interface/mongo_process_interface.h" +#include "mongo/db/query/allowed_contexts.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/query/cluster_query_result.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { @@ -117,11 +126,7 @@ DocumentSource::GetNextResult DocumentSourceMergeCursors::doGetNext() { Value DocumentSourceMergeCursors::serialize(SerializationOptions opts) const { invariant(!_blockingResultsMerger); invariant(_armParams); - if (opts.redactIdentifiers || opts.replacementForLiteralArgs) { - MONGO_UNIMPLEMENTED_TASSERT(7484301); - } - - return Value(Document{{kStageName, _armParams->toBSON()}}); + return Value(Document{{kStageName, _armParams->toBSON(opts)}}); } boost::intrusive_ptr DocumentSourceMergeCursors::createFromBson( diff --git a/src/mongo/s/query/document_source_merge_cursors.h b/src/mongo/s/query/document_source_merge_cursors.h index 3abf4a78a15c4..e370b51831a32 100644 --- a/src/mongo/s/query/document_source_merge_cursors.h +++ b/src/mongo/s/query/document_source_merge_cursors.h @@ -29,12 +29,37 @@ #pragma once +#include +#include +#include +#include +#include +#include #include - +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/value.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/document_source.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/pipeline/pipeline.h" +#include "mongo/db/pipeline/stage_constraints.h" +#include "mongo/db/pipeline/variables.h" +#include "mongo/db/query/serialization_options.h" +#include "mongo/db/shard_id.h" #include "mongo/executor/task_executor.h" +#include "mongo/s/query/async_results_merger_params_gen.h" #include "mongo/s/query/blocking_results_merger.h" #include "mongo/s/query/router_stage_merge.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/s/query/establish_cursors.cpp b/src/mongo/s/query/establish_cursors.cpp index d829d4ad6e8c9..815d1159c215f 100644 --- a/src/mongo/s/query/establish_cursors.cpp +++ b/src/mongo/s/query/establish_cursors.cpp @@ -30,10 +30,27 @@ #include "mongo/s/query/establish_cursors.h" #include - +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/connection_string.h" #include "mongo/client/remote_command_retry_scheduler.h" -#include "mongo/client/remote_command_targeter.h" #include "mongo/db/catalog/collection_uuid_mismatch_info.h" +#include "mongo/db/client.h" #include "mongo/db/cursor_id.h" #include "mongo/db/query/cursor_response.h" #include "mongo/db/query/kill_cursors_gen.h" @@ -42,10 +59,20 @@ #include "mongo/executor/remote_command_request.h" #include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/multi_statement_transaction_requests_sender.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/string_map.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -54,6 +81,8 @@ namespace mongo { namespace { +constexpr StringData kOperationKeyField = "clientOperationKey"_sd; + /** * This class wraps logic for establishing cursors using a MultiStatementTransactionRequestsSender. */ @@ -62,12 +91,16 @@ class CursorEstablisher { CursorEstablisher(OperationContext* opCtx, std::shared_ptr executor, const NamespaceString& nss, - bool allowPartialResults) + bool allowPartialResults, + std::vector providedOpKeys, + AsyncRequestsSender::ShardHostMap designatedHostsMap) : _opCtx(opCtx), _executor{std::move(executor)}, _nss(nss), _allowPartialResults(allowPartialResults), - _opKey{UUID::gen()} {} + _defaultOpKey{UUID::gen()}, + _providedOpKeys(std::move(providedOpKeys)), + _designatedHostsMap(std::move(designatedHostsMap)) {} /** * Make a RequestSender and thus send requests. @@ -105,7 +138,7 @@ class CursorEstablisher { static void killOpOnShards(ServiceContext* srvCtx, std::shared_ptr executor, - OperationKey opKey, + std::vector opKeys, std::set remotes) noexcept; private: @@ -122,26 +155,20 @@ class CursorEstablisher { const NamespaceString _nss; const bool _allowPartialResults; - const OperationKey _opKey; + // Callers may provide an array of OperationKeys already included in the given requests and + // those will be used to clean up cursors on failure, otherwise one key will be generated and + // appended to all requests. + const OperationKey _defaultOpKey; + const std::vector _providedOpKeys; boost::optional _ars; boost::optional _maybeFailure; std::vector _remoteCursors; std::vector _remotesToClean; + AsyncRequestsSender::ShardHostMap _designatedHostsMap; }; -// Attach our OperationKey to a request. This will allow us to kill any outstanding -// requests in case we're interrupted or one of the remotes returns an error. Note that although -// the opCtx may have an OperationKey set on it already, do not inherit it here because we may -// target ourselves which implies the same node receiving multiple operations with the same -// opKey. -BSONObj appendOpKey(const OperationKey& opKey, const BSONObj& request) { - BSONObjBuilder newCmd(request); - opKey.appendToBuilder(&newCmd, "clientOperationKey"); - return newCmd.obj(); -} - void CursorEstablisher::sendRequests(const ReadPreferenceSetting& readPref, const std::vector>& remotes, Shard::RetryPolicy retryPolicy) { @@ -150,18 +177,39 @@ void CursorEstablisher::sendRequests(const ReadPreferenceSetting& readPref, // TODO SERVER-47261 management of the opKey should move to the ARS. for (const auto& remote : remotes) { - requests.emplace_back(remote.first, appendOpKey(_opKey, remote.second)); + if (_providedOpKeys.size()) { + // Caller provided their own keys so skip appending the default key. + dassert(remote.second.hasField(kOperationKeyField)); + requests.emplace_back(remote.first, remote.second); + } else { + requests.emplace_back(remote.first, appendOpKey(_defaultOpKey, remote.second)); + } } - LOGV2_DEBUG(4625502, - 3, - "Establishing cursors on remotes", - "opId"_attr = _opCtx->getOpID(), - "numRemotes"_attr = remotes.size(), - "opKey"_attr = _opKey); + if (shouldLog(MONGO_LOGV2_DEFAULT_COMPONENT, logv2::LogSeverity::Debug(3))) { + logv2::DynamicAttributes attrs; + attrs.add("opId", _opCtx->getOpID()); + attrs.add("numRemotes", remotes.size()); + if (_providedOpKeys.size()) { + BSONArrayBuilder bab; + for (auto&& opKey : _providedOpKeys) { + opKey.appendToArrayBuilder(&bab); + } + attrs.add("providedOpKeys", bab.arr()); + } else { + attrs.add("defaultOpKey", _defaultOpKey); + } + LOGV2_DEBUG(4625502, 3, "Establishing cursors on remotes", attrs); + } // Send the requests - _ars.emplace(_opCtx, _executor, _nss.dbName(), std::move(requests), readPref, retryPolicy); + _ars.emplace(_opCtx, + _executor, + _nss.dbName(), + std::move(requests), + readPref, + retryPolicy, + _designatedHostsMap); } void CursorEstablisher::waitForResponse() noexcept { @@ -208,11 +256,11 @@ void CursorEstablisher::waitForResponse() noexcept { StatusWith scheduleCursorCleanup( std::shared_ptr executor, ServiceContext* svcCtx, - OperationKey opKey, + std::vector opKeys, std::set&& remotesToClean) { return executor->scheduleWork([svcCtx = svcCtx, executor = executor, - opKey = opKey, + opKeys = std::move(opKeys), remotesToClean = std::move(remotesToClean)]( const executor::TaskExecutor::CallbackArgs& args) mutable { if (!args.status.isOK()) { @@ -221,7 +269,7 @@ StatusWith scheduleCursorCleanup( return; } CursorEstablisher::killOpOnShards( - svcCtx, std::move(executor), std::move(opKey), std::move(remotesToClean)); + svcCtx, std::move(executor), std::move(opKeys), std::move(remotesToClean)); }); } @@ -244,8 +292,11 @@ void CursorEstablisher::checkForFailedRequests() { // Filter out duplicate hosts. auto remotes = std::set(_remotesToClean.begin(), _remotesToClean.end()); - uassertStatusOK( - scheduleCursorCleanup(_executor, _opCtx->getServiceContext(), _opKey, std::move(remotes))); + uassertStatusOK(scheduleCursorCleanup( + _executor, + _opCtx->getServiceContext(), + _providedOpKeys.size() ? _providedOpKeys : std::vector{_defaultOpKey}, + std::move(remotes))); // Throw our failure. uassertStatusOK(*_maybeFailure); @@ -310,18 +361,23 @@ void CursorEstablisher::_handleFailure(const AsyncRequestsSender::Response& resp void CursorEstablisher::killOpOnShards(ServiceContext* srvCtx, std::shared_ptr executor, - OperationKey opKey, + std::vector opKeys, std::set remotes) noexcept try { ThreadClient tc("establishCursors cleanup", srvCtx); auto opCtx = tc->makeOperationContext(); for (auto&& host : remotes) { + BSONArrayBuilder opKeyArrayBuilder; + for (auto&& opKey : opKeys) { + opKey.appendToArrayBuilder(&opKeyArrayBuilder); + } + executor::RemoteCommandRequest::Options options; options.fireAndForget = true; executor::RemoteCommandRequest request( host, "admin", - BSON("_killOperations" << 1 << "operationKeys" << BSON_ARRAY(opKey)), + BSON("_killOperations" << 1 << "operationKeys" << opKeyArrayBuilder.arr()), opCtx.get(), executor::RemoteCommandRequestBase::kNoTimeout, options); @@ -355,14 +411,32 @@ BSONObj appendReadPreferenceNearest(BSONObj cmdObj) { } // namespace +// Attach our OperationKey to a request. This will allow us to kill any outstanding +// requests in case we're interrupted or one of the remotes returns an error. Note that although +// the opCtx may have an OperationKey set on it already, do not inherit it here because we may +// target ourselves which implies the same node receiving multiple operations with the same +// opKey. +BSONObj appendOpKey(const OperationKey& opKey, const BSONObj& request) { + BSONObjBuilder newCmd(request); + opKey.appendToBuilder(&newCmd, kOperationKeyField); + return newCmd.obj(); +} + std::vector establishCursors(OperationContext* opCtx, std::shared_ptr executor, const NamespaceString& nss, const ReadPreferenceSetting readPref, const std::vector>& remotes, bool allowPartialResults, - Shard::RetryPolicy retryPolicy) { - auto establisher = CursorEstablisher(opCtx, executor, nss, allowPartialResults); + Shard::RetryPolicy retryPolicy, + std::vector providedOpKeys, + AsyncRequestsSender::ShardHostMap designatedHostsMap) { + auto establisher = CursorEstablisher(opCtx, + executor, + nss, + allowPartialResults, + std::move(providedOpKeys), + std::move(designatedHostsMap)); establisher.sendRequests(readPref, remotes, retryPolicy); establisher.waitForResponses(); establisher.checkForFailedRequests(); @@ -472,7 +546,7 @@ std::vector establishCursorsOnAllHosts( if (!remotesToClean.empty()) { uassertStatusOK(scheduleCursorCleanup( - executor, opCtx->getServiceContext(), opKey, std::move(remotesToClean))); + executor, opCtx->getServiceContext(), {opKey}, std::move(remotesToClean))); } uassertStatusOK(failure.value()); diff --git a/src/mongo/s/query/establish_cursors.h b/src/mongo/s/query/establish_cursors.h index cd19af7eea996..99fb22140f11a 100644 --- a/src/mongo/s/query/establish_cursors.h +++ b/src/mongo/s/query/establish_cursors.h @@ -31,13 +31,21 @@ #include #include +#include +#include #include #include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" #include "mongo/db/cursor_id.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" +#include "mongo/s/async_requests_sender.h" #include "mongo/s/client/shard.h" #include "mongo/s/query/async_results_merger_params_gen.h" #include "mongo/util/net/hostandport.h" @@ -45,8 +53,6 @@ namespace mongo { -class CursorResponse; - /** * Establishes cursors on the remote shards by issuing requests in parallel, using the readPref to * select a host within each shard. @@ -59,9 +65,16 @@ class CursorResponse; * On success, the ownership of the cursors is transferred to the caller. This means the caller is * now responsible for either exhausting the cursors or sending killCursors to them. * + * If providedOpKeys are given, this assumes all requests have been given an operation key and will + * use the provided keys to kill operations on failure. Otherwise a unique operation key is + * generated and attached to all requests. + * * @param allowPartialResults: If true, unreachable hosts are ignored, and only cursors established * on reachable hosts are returned. * + * @param designatedHostsMap: A map of hosts to be targeted for particular shards, overriding + * the read preference setting. + * */ std::vector establishCursors( OperationContext* opCtx, @@ -70,7 +83,9 @@ std::vector establishCursors( ReadPreferenceSetting readPref, const std::vector>& remotes, bool allowPartialResults, - Shard::RetryPolicy retryPolicy = Shard::RetryPolicy::kIdempotent); + Shard::RetryPolicy retryPolicy = Shard::RetryPolicy::kIdempotent, + std::vector providedOpKeys = {}, + AsyncRequestsSender::ShardHostMap designatedHostsMap = {}); /** * Establishes cursors on every host in the remote shards by issuing requests in parallel with the @@ -103,4 +118,9 @@ void killRemoteCursor(OperationContext* opCtx, RemoteCursor&& cursor, const NamespaceString& nss); +/** + * Appends the given operation key to the given request. + */ +BSONObj appendOpKey(const OperationKey& opKey, const BSONObj& request); + } // namespace mongo diff --git a/src/mongo/s/query/establish_cursors_test.cpp b/src/mongo/s/query/establish_cursors_test.cpp index be90e23e1dcb4..0fd5f581d86bb 100644 --- a/src/mongo/s/query/establish_cursors_test.cpp +++ b/src/mongo/s/query/establish_cursors_test.cpp @@ -27,20 +27,43 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include - +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_factory_mock.h" #include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/json.h" +#include "mongo/db/client.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/s/client/shard_registry.h" #include "mongo/s/query/establish_cursors.h" #include "mongo/s/sharding_router_test_fixture.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -56,9 +79,21 @@ const std::vector kTestShardHosts = {HostAndPort("FakeShard1Host", HostAndPort("FakeShard2Host", 12345), HostAndPort("FakeShard3Host", 12345)}; +std::vector extractOperationKeys(BSONObj obj) { + ASSERT_TRUE(obj.hasField("operationKeys")) << obj; + + std::vector opKeys; + for (auto&& elem : obj["operationKeys"].Array()) { + auto opKey = unittest::assertGet(UUID::parse(elem)); + opKeys.push_back(std::move(opKey)); + } + return opKeys; +} + class EstablishCursorsTest : public ShardingTestFixture { public: - EstablishCursorsTest() : _nss("testdb.testcoll") {} + EstablishCursorsTest() + : _nss(NamespaceString::createNamespaceString_forTest("testdb.testcoll")) {} void setUp() override { ShardingTestFixture::setUp(); @@ -76,6 +111,8 @@ class EstablishCursorsTest : public ShardingTestFixture { std::unique_ptr targeter( std::make_unique()); + _targeters.push_back(targeter.get()); + targeter->setConnectionStringReturnValue(ConnectionString(kTestShardHosts[i])); targeter->setFindHostReturnValue(kTestShardHosts[i]); @@ -89,11 +126,23 @@ class EstablishCursorsTest : public ShardingTestFixture { /** * Mock a response for a killOperations command. */ - void expectKillOperations(size_t expected) { + void expectKillOperations(size_t expected, std::vector expectedOpKeys = {}) { for (size_t i = 0; i < expected; i++) { - onCommand([this](const RemoteCommandRequest& request) { + onCommand([&](const RemoteCommandRequest& request) { ASSERT_EQ("admin", request.dbname) << request; ASSERT_TRUE(request.cmdObj.hasField("_killOperations")) << request; + + ASSERT_TRUE(request.cmdObj.hasField("operationKeys")) << request; + if (expectedOpKeys.size()) { + auto sentOpKeys = extractOperationKeys(request.cmdObj); + ASSERT_EQ(expectedOpKeys.size(), sentOpKeys.size()); + std::sort(expectedOpKeys.begin(), expectedOpKeys.end()); + std::sort(sentOpKeys.begin(), sentOpKeys.end()); + for (size_t i = 0; i < expectedOpKeys.size(); i++) { + ASSERT_EQ(expectedOpKeys[i], sentOpKeys[i]); + } + } + return BSON("ok" << 1); }); } @@ -101,6 +150,7 @@ class EstablishCursorsTest : public ShardingTestFixture { protected: const NamespaceString _nss; + std::vector _targeters; // Targeters are owned by the factory. }; TEST_F(EstablishCursorsTest, NoRemotes) { @@ -142,6 +192,41 @@ TEST_F(EstablishCursorsTest, SingleRemoteRespondsWithSuccess) { future.default_timed_get(); } +TEST_F(EstablishCursorsTest, SingleRemoteRespondsWithDesignatedHost) { + BSONObj cmdObj = fromjson("{find: 'testcoll'}"); + std::vector> remotes{{kTestShardIds[0], cmdObj}}; + + AsyncRequestsSender::ShardHostMap designatedHosts; + auto shard0Secondary = HostAndPort("SecondaryHostShard0", 12345); + _targeters[0]->setConnectionStringReturnValue( + ConnectionString::forReplicaSet("shard0_rs"_sd, {kTestShardHosts[0], shard0Secondary})); + designatedHosts[kTestShardIds[0]] = shard0Secondary; + auto future = launchAsync([&] { + auto cursors = establishCursors(operationContext(), + executor(), + _nss, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + remotes, + false, // allowPartialResults + Shard::RetryPolicy::kIdempotent, + {}, // providedOpKeys + designatedHosts); + ASSERT_EQUALS(remotes.size(), cursors.size()); + ASSERT_EQUALS(cursors[0].getHostAndPort(), shard0Secondary); + }); + + // Remote responds. + onCommand([this](const RemoteCommandRequest& request) { + ASSERT_EQ(_nss.coll(), request.cmdObj.firstElement().valueStringData()); + + std::vector batch = {fromjson("{_id: 1}"), fromjson("{_id: 2}")}; + CursorResponse cursorResponse(_nss, CursorId(123), batch); + return cursorResponse.toBSON(CursorResponse::ResponseType::InitialResponse); + }); + + future.default_timed_get(); +} + TEST_F(EstablishCursorsTest, SingleRemoteRespondsWithNonretriableError) { BSONObj cmdObj = fromjson("{find: 'testcoll'}"); std::vector> remotes{{kTestShardIds[0], cmdObj}}; @@ -182,9 +267,13 @@ TEST_F(EstablishCursorsTest, SingleRemoteInterruptedWhileCommandInFlight) { barrier->countDownAndWait(); }); + auto seenOpKey = UUID::gen(); onCommand([&](const RemoteCommandRequest& request) { ASSERT_EQ(_nss.coll(), request.cmdObj.firstElement().valueStringData()); + ASSERT_TRUE(request.cmdObj.hasField("clientOperationKey")) << request; + seenOpKey = unittest::assertGet(UUID::parse(request.cmdObj["clientOperationKey"])); + // Now that our "remote" has received the request, interrupt the opCtx which the cursor is // running under. { @@ -201,7 +290,7 @@ TEST_F(EstablishCursorsTest, SingleRemoteInterruptedWhileCommandInFlight) { }); // We were interrupted so establishCursors is forced to send a killOperations out of paranoia. - expectKillOperations(1); + expectKillOperations(1, {seenOpKey}); future.default_timed_get(); } @@ -400,17 +489,27 @@ TEST_F(EstablishCursorsTest, MultipleRemotesOneRemoteRespondsWithNonretriableErr }); // First remote responds with success. + auto seenOpKey = UUID::gen(); onCommand([&](const RemoteCommandRequest& request) { ASSERT_EQ(_nss.coll(), request.cmdObj.firstElement().valueStringData()); + ASSERT_TRUE(request.cmdObj.hasField("clientOperationKey")) << request; + seenOpKey = unittest::assertGet(UUID::parse(request.cmdObj["clientOperationKey"])); + std::vector batch = {fromjson("{_id: 1}"), fromjson("{_id: 2}")}; CursorResponse cursorResponse(_nss, CursorId(123), batch); return cursorResponse.toBSON(CursorResponse::ResponseType::InitialResponse); }); // Second remote responds with a non-retriable error. - onCommand([this](const RemoteCommandRequest& request) { + onCommand([&](const RemoteCommandRequest& request) { ASSERT_EQ(_nss.coll(), request.cmdObj.firstElement().valueStringData()); + + // All commands receive the same opKey. + ASSERT_TRUE(request.cmdObj.hasField("clientOperationKey")) << request; + auto opKey = unittest::assertGet(UUID::parse(request.cmdObj["clientOperationKey"])); + ASSERT_EQ(seenOpKey, opKey); + return createErrorCursorResponse(Status(ErrorCodes::FailedToParse, "failed to parse")); }); @@ -418,13 +517,94 @@ TEST_F(EstablishCursorsTest, MultipleRemotesOneRemoteRespondsWithNonretriableErr onCommand([&](const RemoteCommandRequest& request) { ASSERT_EQ(_nss.coll(), request.cmdObj.firstElement().valueStringData()); + // All commands receive the same opKey. + ASSERT_TRUE(request.cmdObj.hasField("clientOperationKey")) << request; + auto opKey = unittest::assertGet(UUID::parse(request.cmdObj["clientOperationKey"])); + ASSERT_EQ(seenOpKey, opKey); + std::vector batch = {fromjson("{_id: 1}"), fromjson("{_id: 2}")}; CursorResponse cursorResponse(_nss, CursorId(123), batch); return cursorResponse.toBSON(CursorResponse::ResponseType::InitialResponse); }); // Expect two killOperation commands, one for each remote which responded with a cursor. - expectKillOperations(2); + expectKillOperations(2, {seenOpKey}); + + future.default_timed_get(); +} + +TEST_F(EstablishCursorsTest, AcceptsCustomOpKeys) { + std::vector providedOpKeys = {UUID::gen(), UUID::gen()}; + auto cmdObj0 = BSON("find" + << "testcoll" + << "clientOperationKey" << providedOpKeys[0]); + auto cmdObj1 = BSON("find" + << "testcoll" + << "clientOperationKey" << providedOpKeys[1]); + auto cmdObj2 = BSON("find" + << "testcoll" + << "clientOperationKey" << providedOpKeys[1]); + std::vector> remotes{ + {kTestShardIds[0], cmdObj0}, {kTestShardIds[1], cmdObj1}, {kTestShardIds[2], cmdObj2}}; + + auto future = launchAsync([&] { + ASSERT_THROWS(establishCursors(operationContext(), + executor(), + _nss, + ReadPreferenceSetting{ReadPreference::PrimaryOnly}, + remotes, + false, // allowPartialResults + Shard::RetryPolicy::kIdempotent, + providedOpKeys), + ExceptionFor); + }); + + // First remote responds with success. + onCommand([&](const RemoteCommandRequest& request) { + ASSERT_EQ(_nss.coll(), request.cmdObj.firstElement().valueStringData()); + + // All commands use the opKey they were given. + ASSERT_TRUE(request.cmdObj.hasField("clientOperationKey")) << request; + auto opKey = unittest::assertGet(UUID::parse(request.cmdObj["clientOperationKey"])); + ASSERT_TRUE(std::find(providedOpKeys.begin(), providedOpKeys.end(), opKey) != + providedOpKeys.end()); + + std::vector batch = {fromjson("{_id: 1}"), fromjson("{_id: 2}")}; + CursorResponse cursorResponse(_nss, CursorId(123), batch); + return cursorResponse.toBSON(CursorResponse::ResponseType::InitialResponse); + }); + + // Second remote responds with a non-retriable error. + onCommand([&](const RemoteCommandRequest& request) { + ASSERT_EQ(_nss.coll(), request.cmdObj.firstElement().valueStringData()); + + // All commands use the opKey they were given. + ASSERT_TRUE(request.cmdObj.hasField("clientOperationKey")) << request; + auto opKey = unittest::assertGet(UUID::parse(request.cmdObj["clientOperationKey"])); + ASSERT_TRUE(std::find(providedOpKeys.begin(), providedOpKeys.end(), opKey) != + providedOpKeys.end()); + + return createErrorCursorResponse(Status(ErrorCodes::FailedToParse, "failed to parse")); + }); + + // Third remote responds with success (must give some response to mock network for each remote). + onCommand([&](const RemoteCommandRequest& request) { + ASSERT_EQ(_nss.coll(), request.cmdObj.firstElement().valueStringData()); + + // All commands use the opKey they were given. + ASSERT_TRUE(request.cmdObj.hasField("clientOperationKey")) << request; + auto opKey = unittest::assertGet(UUID::parse(request.cmdObj["clientOperationKey"])); + ASSERT_TRUE(std::find(providedOpKeys.begin(), providedOpKeys.end(), opKey) != + providedOpKeys.end()); + + std::vector batch = {fromjson("{_id: 1}"), fromjson("{_id: 2}")}; + CursorResponse cursorResponse(_nss, CursorId(123), batch); + return cursorResponse.toBSON(CursorResponse::ResponseType::InitialResponse); + }); + + // Expect two killOperation commands, one for each remote which responded with a cursor. Both + // should include all provided opKeys. + expectKillOperations(2, providedOpKeys); future.default_timed_get(); } diff --git a/src/mongo/s/query/owned_remote_cursor.h b/src/mongo/s/query/owned_remote_cursor.h index 0e4e03103fef2..1dcd25da5eaee 100644 --- a/src/mongo/s/query/owned_remote_cursor.h +++ b/src/mongo/s/query/owned_remote_cursor.h @@ -31,7 +31,6 @@ #include "mongo/executor/task_executor_pool.h" #include "mongo/s/grid.h" -#include "mongo/s/query/async_results_merger_params_gen.h" #include "mongo/s/query/establish_cursors.h" namespace mongo { diff --git a/src/mongo/s/query/results_merger_test_fixture.cpp b/src/mongo/s/query/results_merger_test_fixture.cpp index 637c879e87b76..0ae5793af7845 100644 --- a/src/mongo/s/query/results_merger_test_fixture.cpp +++ b/src/mongo/s/query/results_merger_test_fixture.cpp @@ -27,14 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include "mongo/s/query/results_merger_test_fixture.h" + +#include +#include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_factory_mock.h" #include "mongo/client/remote_command_targeter_mock.h" #include "mongo/db/curop.h" -#include "mongo/executor/thread_pool_task_executor_test_fixture.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/s/query/results_merger_test_fixture.h" namespace mongo { const HostAndPort ResultsMergerTestFixture::kTestConfigShardHost = diff --git a/src/mongo/s/query/results_merger_test_fixture.h b/src/mongo/s/query/results_merger_test_fixture.h index 30b72c341adb3..ba1b3520c009f 100644 --- a/src/mongo/s/query/results_merger_test_fixture.h +++ b/src/mongo/s/query/results_merger_test_fixture.h @@ -29,9 +29,52 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/crypto/sha256_block.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/query_request_helper.h" +#include "mongo/db/query/tailable_mode_gen.h" +#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/executor/remote_command_response.h" +#include "mongo/executor/task_executor.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/query/async_results_merger.h" +#include "mongo/s/query/async_results_merger_params_gen.h" #include "mongo/s/sharding_router_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" namespace mongo { @@ -90,17 +133,15 @@ class ResultsMergerTestFixture : public ShardingTestFixture { params.setAllowPartialResults(findCommand->getAllowPartialResults()); } - OperationSessionInfoFromClient sessionInfo; - boost::optional lsidFromClient; - if (auto lsid = operationContext()->getLogicalSessionId()) { - lsidFromClient.emplace(lsid->getId()); - lsidFromClient->setUid(lsid->getUid()); + OperationSessionInfoFromClient sessionInfo([&] { + LogicalSessionFromClient lsidFromClient(lsid->getId()); + lsidFromClient.setUid(lsid->getUid()); + return lsidFromClient; + }()); + sessionInfo.setTxnNumber(operationContext()->getTxnNumber()); + params.setOperationSessionInfo(sessionInfo); } - - sessionInfo.setSessionId(lsidFromClient); - sessionInfo.setTxnNumber(operationContext()->getTxnNumber()); - params.setOperationSessionInfo(sessionInfo); return params; } /** diff --git a/src/mongo/s/query/router_stage_limit.cpp b/src/mongo/s/query/router_stage_limit.cpp index e419e9a98740c..e5810a14d295c 100644 --- a/src/mongo/s/query/router_stage_limit.cpp +++ b/src/mongo/s/query/router_stage_limit.cpp @@ -28,9 +28,13 @@ */ -#include "mongo/platform/basic.h" +#include + +#include +#include #include "mongo/s/query/router_stage_limit.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/s/query/router_stage_limit.h b/src/mongo/s/query/router_stage_limit.h index d392a4902dc31..b83dd8b91014b 100644 --- a/src/mongo/s/query/router_stage_limit.h +++ b/src/mongo/s/query/router_stage_limit.h @@ -29,6 +29,10 @@ #pragma once +#include + +#include "mongo/base/status_with.h" +#include "mongo/s/query/cluster_query_result.h" #include "mongo/s/query/router_exec_stage.h" namespace mongo { diff --git a/src/mongo/s/query/router_stage_limit_test.cpp b/src/mongo/s/query/router_stage_limit_test.cpp index 6ad6120e82615..9f5886522cc53 100644 --- a/src/mongo/s/query/router_stage_limit_test.cpp +++ b/src/mongo/s/query/router_stage_limit_test.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/query/router_stage_limit.h" - #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/s/query/router_stage_limit.h" #include "mongo/s/query/router_stage_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/s/query/router_stage_mock.cpp b/src/mongo/s/query/router_stage_mock.cpp index 8993489755649..e4ef82a59f200 100644 --- a/src/mongo/s/query/router_stage_mock.cpp +++ b/src/mongo/s/query/router_stage_mock.cpp @@ -28,10 +28,13 @@ */ -#include "mongo/platform/basic.h" - #include "mongo/s/query/router_stage_mock.h" +#include +#include + +#include "mongo/base/error_codes.h" + #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/s/query/router_stage_mock.h b/src/mongo/s/query/router_stage_mock.h index 426489d004439..a2f0c17ee5e10 100644 --- a/src/mongo/s/query/router_stage_mock.h +++ b/src/mongo/s/query/router_stage_mock.h @@ -29,11 +29,16 @@ #pragma once +#include #include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/s/query/cluster_query_result.h" #include "mongo/s/query/router_exec_stage.h" +#include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/s/query/router_stage_pipeline.cpp b/src/mongo/s/query/router_stage_pipeline.cpp index 253f28d84ee8c..9c8c7386be7e4 100644 --- a/src/mongo/s/query/router_stage_pipeline.cpp +++ b/src/mongo/s/query/router_stage_pipeline.cpp @@ -27,15 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/query/router_stage_pipeline.h" - +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/exec/document_value/document_metadata_fields.h" +#include "mongo/db/exec/document_value/value.h" #include "mongo/db/pipeline/document_source.h" -#include "mongo/db/pipeline/document_source_change_stream.h" -#include "mongo/db/pipeline/document_source_list_local_sessions.h" #include "mongo/db/pipeline/expression_context.h" #include "mongo/s/query/document_source_merge_cursors.h" +#include "mongo/s/query/router_stage_pipeline.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/s/query/router_stage_pipeline.h b/src/mongo/s/query/router_stage_pipeline.h index a853f5caa4154..ea064102e35ec 100644 --- a/src/mongo/s/query/router_stage_pipeline.h +++ b/src/mongo/s/query/router_stage_pipeline.h @@ -29,11 +29,22 @@ #pragma once -#include "mongo/s/query/router_exec_stage.h" - +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/exec/document_value/document.h" +#include "mongo/db/operation_context.h" #include "mongo/db/pipeline/document_source.h" #include "mongo/db/pipeline/pipeline.h" +#include "mongo/s/query/cluster_query_result.h" #include "mongo/s/query/document_source_merge_cursors.h" +#include "mongo/s/query/router_exec_stage.h" +#include "mongo/util/duration.h" +#include "mongo/util/intrusive_counter.h" namespace mongo { diff --git a/src/mongo/s/query/router_stage_queued_data.cpp b/src/mongo/s/query/router_stage_queued_data.cpp new file mode 100644 index 0000000000000..d6bd0dbd6dd31 --- /dev/null +++ b/src/mongo/s/query/router_stage_queued_data.cpp @@ -0,0 +1,80 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include +#include + +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/s/query/router_stage_queued_data.h" +#include "mongo/util/assert_util_core.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery + + +namespace mongo { + +void RouterStageQueuedData::queueResult(const ClusterQueryResult& result) { + auto resultObj = result.getResult(); + if (resultObj) { + invariant(resultObj->isOwned()); + } + _resultsQueue.push({result}); +} + +void RouterStageQueuedData::queueError(Status status) { + _resultsQueue.push({status}); +} + +StatusWith RouterStageQueuedData::next() { + if (_resultsQueue.empty()) { + return {ClusterQueryResult()}; + } + + auto out = _resultsQueue.front(); + _resultsQueue.pop(); + return out; +} + +void RouterStageQueuedData::kill(OperationContext* opCtx) { + // No child to kill. +} + +bool RouterStageQueuedData::remotesExhausted() { + // No underlying remote cursor. + return true; +} + +std::size_t RouterStageQueuedData::getNumRemotes() const { + return 0; +} + +} // namespace mongo diff --git a/src/mongo/s/query/router_stage_queued_data.h b/src/mongo/s/query/router_stage_queued_data.h new file mode 100644 index 0000000000000..96c61311740d2 --- /dev/null +++ b/src/mongo/s/query/router_stage_queued_data.h @@ -0,0 +1,75 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/s/query/cluster_query_result.h" +#include "mongo/s/query/router_exec_stage.h" + +namespace mongo { + +/** + * Initialized by adding results to its results queue, it then passes through the results in its + * queue until the queue is empty. + */ +class RouterStageQueuedData final : public RouterExecStage { +public: + RouterStageQueuedData(OperationContext* opCtx) : RouterExecStage(opCtx) {} + ~RouterStageQueuedData() final {} + + StatusWith next() final; + + void kill(OperationContext* opCtx) final; + + bool remotesExhausted() final; + + std::size_t getNumRemotes() const final; + + /** + * Queues a BSONObj to be returned. + */ + void queueResult(const ClusterQueryResult& result); + + /** + * Queues an error response. + */ + void queueError(Status status); + +private: + std::queue> _resultsQueue; +}; + +} // namespace mongo diff --git a/src/mongo/s/query/router_stage_remove_metadata_fields.cpp b/src/mongo/s/query/router_stage_remove_metadata_fields.cpp index 11b04a8b1281a..9d8797e13f360 100644 --- a/src/mongo/s/query/router_stage_remove_metadata_fields.cpp +++ b/src/mongo/s/query/router_stage_remove_metadata_fields.cpp @@ -27,14 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include - -#include "mongo/s/query/router_stage_remove_metadata_fields.h" +#include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/db/exec/document_value/document.h" +#include "mongo/bson/util/builder.h" +#include "mongo/s/query/router_stage_remove_metadata_fields.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/s/query/router_stage_remove_metadata_fields.h b/src/mongo/s/query/router_stage_remove_metadata_fields.h index b966a4f1aec3c..6f2fa74b7f356 100644 --- a/src/mongo/s/query/router_stage_remove_metadata_fields.h +++ b/src/mongo/s/query/router_stage_remove_metadata_fields.h @@ -29,8 +29,11 @@ #pragma once +#include #include +#include "mongo/base/status_with.h" +#include "mongo/s/query/cluster_query_result.h" #include "mongo/s/query/router_exec_stage.h" #include "mongo/util/string_map.h" diff --git a/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp b/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp index 9fa199a2ac9c8..26b2cd7932073 100644 --- a/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp +++ b/src/mongo/s/query/router_stage_remove_metadata_fields_test.cpp @@ -27,17 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/query/router_stage_remove_metadata_fields.h" - #include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/exec/document_value/document.h" #include "mongo/s/query/router_stage_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/query/router_stage_remove_metadata_fields.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/s/query/router_stage_skip.cpp b/src/mongo/s/query/router_stage_skip.cpp index 1fb09faaa63be..014e7761bc73f 100644 --- a/src/mongo/s/query/router_stage_skip.cpp +++ b/src/mongo/s/query/router_stage_skip.cpp @@ -28,9 +28,13 @@ */ -#include "mongo/platform/basic.h" +#include + +#include +#include #include "mongo/s/query/router_stage_skip.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/s/query/router_stage_skip.h b/src/mongo/s/query/router_stage_skip.h index 25accb7426a22..41621acff9fa2 100644 --- a/src/mongo/s/query/router_stage_skip.h +++ b/src/mongo/s/query/router_stage_skip.h @@ -29,6 +29,10 @@ #pragma once +#include + +#include "mongo/base/status_with.h" +#include "mongo/s/query/cluster_query_result.h" #include "mongo/s/query/router_exec_stage.h" namespace mongo { diff --git a/src/mongo/s/query/router_stage_skip_test.cpp b/src/mongo/s/query/router_stage_skip_test.cpp index 0264731b34c6e..cd09365fdcdca 100644 --- a/src/mongo/s/query/router_stage_skip_test.cpp +++ b/src/mongo/s/query/router_stage_skip_test.cpp @@ -27,16 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/query/router_stage_skip.h" - #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/s/query/router_stage_mock.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/query/router_stage_skip.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/s/query/store_possible_cursor.cpp b/src/mongo/s/query/store_possible_cursor.cpp index eb24db3e7cd10..376c214390522 100644 --- a/src/mongo/s/query/store_possible_cursor.cpp +++ b/src/mongo/s/query/store_possible_cursor.cpp @@ -27,22 +27,39 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/s/query/store_possible_cursor.h" +#include +#include #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" #include "mongo/db/auth/authorization_session.h" #include "mongo/db/curop.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/query_stats_key_generator.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/shard_id.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/s/grid.h" +#include "mongo/s/query/async_results_merger_params_gen.h" +#include "mongo/s/query/cluster_client_cursor.h" +#include "mongo/s/query/cluster_client_cursor_guard.h" #include "mongo/s/query/cluster_client_cursor_impl.h" #include "mongo/s/query/cluster_client_cursor_params.h" #include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/s/query/store_possible_cursor.h" #include "mongo/s/transaction_router.h" +#include "mongo/util/decorable.h" namespace mongo { @@ -94,19 +111,29 @@ StatusWith storePossibleCursor(OperationContext* opCtx, // a split aggregation pipeline, and the shards half of that pipeline may have targeted multiple // shards. In that case, leave the current value as-is. opDebug.nShards = std::max(opDebug.nShards, 1); + CurOp::get(opCtx)->setEndOfOpMetrics(incomingCursorResponse.getValue().getBatch().size()); if (incomingCursorResponse.getValue().getCursorId() == CursorId(0)) { opDebug.cursorExhausted = true; - collectTelemetryMongos(opCtx, - CurOp::get(opCtx)->opDescription(), - incomingCursorResponse.getValue().getBatch().size()); + collectQueryStatsMongos(opCtx, std::move(opDebug.queryStatsKeyGenerator)); return cmdResult; } ClusterClientCursorParams params(incomingCursorResponse.getValue().getNSS(), APIParameters::get(opCtx), - boost::none, - repl::ReadConcernArgs::get(opCtx)); + boost::none /* ReadPreferenceSetting */, + repl::ReadConcernArgs::get(opCtx), + [&] { + if (!opCtx->getLogicalSessionId()) + return OperationSessionInfoFromClient(); + + OperationSessionInfoFromClient osi{ + *opCtx->getLogicalSessionId(), opCtx->getTxnNumber()}; + if (TransactionRouter::get(opCtx)) { + osi.setAutocommit(false); + } + return osi; + }()); params.remotes.emplace_back(); auto& remoteCursor = params.remotes.back(); remoteCursor.setShardId(shardId.toString()); @@ -119,19 +146,13 @@ StatusWith storePossibleCursor(OperationContext* opCtx, incomingCursorResponse.getValue().getPostBatchResumeToken())); params.originatingCommandObj = CurOp::get(opCtx)->opDescription().getOwned(); params.tailableMode = tailableMode; - params.lsid = opCtx->getLogicalSessionId(); - params.txnNumber = opCtx->getTxnNumber(); params.originatingPrivileges = std::move(privileges); if (routerSort) { params.sortToApplyOnRouter = *routerSort; } - if (TransactionRouter::get(opCtx)) { - params.isAutoCommit = false; - } - auto ccc = ClusterClientCursorImpl::make(opCtx, std::move(executor), std::move(params)); - collectTelemetryMongos(opCtx, ccc, incomingCursorResponse.getValue().getBatch().size()); + collectQueryStatsMongos(opCtx, ccc); // We don't expect to use this cursor until a subsequent getMore, so detach from the current // OperationContext until then. ccc->detachFromOperationContext(); diff --git a/src/mongo/s/query/store_possible_cursor.h b/src/mongo/s/query/store_possible_cursor.h index 8695ea9c15048..b95e4ce309d69 100644 --- a/src/mongo/s/query/store_possible_cursor.h +++ b/src/mongo/s/query/store_possible_cursor.h @@ -29,12 +29,22 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/auth/privilege.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/query/tailable_mode.h" +#include "mongo/db/query/tailable_mode_gen.h" #include "mongo/db/shard_id.h" +#include "mongo/executor/task_executor.h" +#include "mongo/s/grid.h" #include "mongo/s/query/owned_remote_cursor.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/s/query/store_possible_cursor_test.cpp b/src/mongo/s/query/store_possible_cursor_test.cpp index ed92a642bc4f4..2429ee5976bfc 100644 --- a/src/mongo/s/query/store_possible_cursor_test.cpp +++ b/src/mongo/s/query/store_possible_cursor_test.cpp @@ -27,14 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/s/concurrency/locker_mongos_client_observer.h" #include "mongo/s/query/cluster_cursor_manager.h" #include "mongo/s/query/store_possible_cursor.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/clock_source_mock.h" #include "mongo/util/net/hostandport.h" @@ -47,11 +56,7 @@ const ShardId shardId("testshard"); class StorePossibleCursorTest : public ServiceContextTest { protected: - StorePossibleCursorTest() : _manager(&_clockSourceMock) { - auto service = getServiceContext(); - service->registerClientObserver(std::make_unique()); - _opCtx = makeOperationContext(); - } + StorePossibleCursorTest() : _opCtx(makeOperationContext()), _manager(&_clockSourceMock) {} OperationContext* opCtx() const { return _opCtx.get(); @@ -85,7 +90,8 @@ TEST_F(StorePossibleCursorTest, ReturnsValidCursorResponse) { auto parsedOutgoingResponse = CursorResponse::parseFromBSON(outgoingCursorResponse.getValue()); ASSERT_OK(parsedOutgoingResponse.getStatus()); - ASSERT_EQ(nss.toString(), parsedOutgoingResponse.getValue().getNSS().toString()); + ASSERT_EQ(nss.toString_forTest(), + parsedOutgoingResponse.getValue().getNSS().toString_forTest()); ASSERT_EQ(0U, parsedOutgoingResponse.getValue().getCursorId()); ASSERT_EQ(2U, parsedOutgoingResponse.getValue().getBatch().size()); ASSERT_BSONOBJ_EQ(fromjson("{_id: 1}"), parsedOutgoingResponse.getValue().getBatch()[0]); diff --git a/src/mongo/s/query_analysis_client.cpp b/src/mongo/s/query_analysis_client.cpp index cd66f1beb7dbd..7e4c6a309a1ae 100644 --- a/src/mongo/s/query_analysis_client.cpp +++ b/src/mongo/s/query_analysis_client.cpp @@ -29,10 +29,32 @@ #include "mongo/s/query_analysis_client.h" +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/concurrency/lock_manager_defs.h" #include "mongo/db/concurrency/replication_state_transition_lock_guard.h" #include "mongo/db/dbdirectclient.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/repl/replication_coordinator.h" -#include "mongo/logv2/log.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/net/hostandport.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -72,8 +94,8 @@ void QueryAnalysisClient::setTaskExecutor(ServiceContext* service, bool QueryAnalysisClient::_canAcceptWrites(OperationContext* opCtx, const DatabaseName& dbName) { repl::ReplicationStateTransitionLockGuard rstl(opCtx, MODE_IX); - return mongo::repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase( - opCtx, dbName.toString()); + return mongo::repl::ReplicationCoordinator::get(opCtx)->canAcceptWritesForDatabase(opCtx, + dbName); } BSONObj QueryAnalysisClient::_executeCommandOnPrimaryLocal( @@ -103,7 +125,7 @@ BSONObj QueryAnalysisClient::_executeCommandOnPrimaryRemote( invariant(executor, "Failed to run command since the executor has not been initialized"); executor::RemoteCommandRequest request( - std::move(hostAndPort), dbName.toString(), cmdObj, opCtx); + std::move(hostAndPort), DatabaseNameUtil::serialize(dbName), cmdObj, opCtx); auto [promise, future] = makePromiseFuture(); auto promisePtr = std::make_shared>( std::move(promise)); @@ -169,7 +191,7 @@ void QueryAnalysisClient::insert(OperationContext* opCtx, auto insertCmdObj = insertCmd.toBSON( {BSON(WriteConcernOptions::kWriteConcernField << kMajorityWriteConcern.toBSON())}); - executeCommandOnPrimary(opCtx, nss.db(), std::move(insertCmdObj), uassertCmdStatusFn); + executeCommandOnPrimary(opCtx, nss.dbName(), std::move(insertCmdObj), uassertCmdStatusFn); } } // namespace analyze_shard_key diff --git a/src/mongo/s/query_analysis_client.h b/src/mongo/s/query_analysis_client.h index a8ebb97f3e4f3..9eabfee01eebf 100644 --- a/src/mongo/s/query_analysis_client.h +++ b/src/mongo/s/query_analysis_client.h @@ -29,11 +29,17 @@ #pragma once -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/bson/bsonobj.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/basic.h" namespace mongo { namespace analyze_shard_key { diff --git a/src/mongo/s/query_analysis_sample_tracker.cpp b/src/mongo/s/query_analysis_sample_tracker.cpp index 2d598fb576c29..7f7d023fd4b0f 100644 --- a/src/mongo/s/query_analysis_sample_tracker.cpp +++ b/src/mongo/s/query_analysis_sample_tracker.cpp @@ -29,8 +29,22 @@ #include "mongo/s/query_analysis_sample_tracker.h" +#include +#include +#include +#include + +#include + +#include "mongo/db/cluster_role.h" +#include "mongo/db/server_options.h" #include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/is_mongos.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding namespace mongo { namespace analyze_shard_key { @@ -59,14 +73,16 @@ void QueryAnalysisSampleTracker::refreshConfigurations( auto it = _trackers.find(configuration.getNs()); if (it == _trackers.end() || it->second->getCollUuid() != configuration.getCollectionUuid()) { + // There is no existing CollectionSampleTracker for the collection with this specific + // collection uuid so create one for it. newTrackers.emplace(std::make_pair( configuration.getNs(), std::make_shared(configuration.getNs(), configuration.getCollectionUuid(), - configuration.getSampleRate(), + configuration.getSamplesPerSecond(), configuration.getStartTime()))); } else { - it->second->setSampleRate(configuration.getSampleRate()); + it->second->setSamplesPerSecond(configuration.getSamplesPerSecond()); it->second->setStartTime(configuration.getStartTime()); newTrackers.emplace(std::make_pair(configuration.getNs(), it->second)); } @@ -116,7 +132,7 @@ QueryAnalysisSampleTracker::_getOrCreateCollectionSampleTracker( .emplace(std::make_pair( nss, std::make_shared( - nss, *collUuid, 0 /* sampleRate */, startTime))) + nss, *collUuid, 0 /* samplesPerSec */, startTime))) .first; _sampledNamespaces.insert(nss); } @@ -141,7 +157,7 @@ BSONObj QueryAnalysisSampleTracker::CollectionSampleTracker::reportForCurrentOp( report.setSampledWritesBytes(_sampledWritesBytes); } if (isMongos() || serverGlobalParams.clusterRole.has(ClusterRole::None)) { - report.setSampleRate(_sampleRate); + report.setSamplesPerSecond(_samplesPerSec); } report.setStartTime(_startTime); @@ -163,5 +179,17 @@ BSONObj QueryAnalysisSampleTracker::reportForServerStatus() const { return res.toBSON(); } +bool QueryAnalysisSampleTracker::isSamplingActive(const NamespaceString& nss, + const UUID& collUuid) { + stdx::lock_guard lk(_mutex); + + auto it = _trackers.find(nss); + if (it == _trackers.end()) { + return false; + } + auto& tracker = it->second; + return tracker->getCollUuid() == collUuid; +} + } // namespace analyze_shard_key } // namespace mongo diff --git a/src/mongo/s/query_analysis_sample_tracker.h b/src/mongo/s/query_analysis_sample_tracker.h index f431601e1e869..1cdd0e6ed8a5c 100644 --- a/src/mongo/s/query_analysis_sample_tracker.h +++ b/src/mongo/s/query_analysis_sample_tracker.h @@ -29,6 +29,16 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" @@ -36,12 +46,9 @@ #include "mongo/platform/mutex.h" #include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/time_support.h" #include "mongo/util/uuid.h" -#include -#include -#include - namespace mongo { namespace analyze_shard_key { @@ -57,9 +64,12 @@ class QueryAnalysisSampleTracker { public: CollectionSampleTracker(const NamespaceString& nss, const UUID& collUuid, - double sampleRate, + double samplesPerSec, const Date_t& startTime) - : _nss(nss), _collUuid(collUuid), _sampleRate(sampleRate), _startTime(startTime){}; + : _nss(nss), + _collUuid(collUuid), + _samplesPerSec(samplesPerSec), + _startTime(startTime){}; NamespaceString getNs() const { return _nss; @@ -69,8 +79,8 @@ class QueryAnalysisSampleTracker { return _collUuid; } - void setSampleRate(double sampleRate) { - _sampleRate = sampleRate; + void setSamplesPerSecond(double samplesPerSec) { + _samplesPerSec = samplesPerSec; } void setStartTime(Date_t startTime) { @@ -106,7 +116,7 @@ class QueryAnalysisSampleTracker { int64_t _sampledReadsBytes = 0; int64_t _sampledWritesCount = 0; int64_t _sampledWritesBytes = 0; - double _sampleRate; + double _samplesPerSec; Date_t _startTime; }; @@ -145,6 +155,12 @@ class QueryAnalysisSampleTracker { */ BSONObj reportForServerStatus() const; + /** + * Returns true if query sampling is active for the collection with the given namespace and + * collection UUID. + */ + bool isSamplingActive(const NamespaceString& nss, const UUID& collUuid); + private: std::shared_ptr _getOrCreateCollectionSampleTracker( WithLock, diff --git a/src/mongo/s/query_analysis_sample_tracker_test.cpp b/src/mongo/s/query_analysis_sample_tracker_test.cpp index 0465e3a5bb5f7..ffe604b418a45 100644 --- a/src/mongo/s/query_analysis_sample_tracker_test.cpp +++ b/src/mongo/s/query_analysis_sample_tracker_test.cpp @@ -29,23 +29,26 @@ #include "mongo/s/query_analysis_sample_tracker.h" +#include +#include +#include + +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/service_context_test_fixture.h" -#include "mongo/logv2/log.h" +#include "mongo/db/server_options.h" +#include "mongo/idl/idl_parser.h" #include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/is_mongos.h" -#include "mongo/s/sharding_router_test_fixture.h" #include "mongo/s/sharding_test_fixture_common.h" -#include "mongo/transport/transport_layer_mock.h" -#include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/tick_source_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/uuid.h" -#include -#include -#include - #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest namespace mongo { @@ -84,23 +87,20 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { (serverGlobalParams.clusterRole.has(ClusterRole::ShardServer) || serverGlobalParams.clusterRole.has(ClusterRole::None)); - const double sampleRate0 = 0.0; - const double sampleRate1Before = 0.0000000001; - const double sampleRate1After = 222.2; - // The mock size for each sampled read or write query, in bytes. const int64_t sampledQueryDocSizeBytes = 10; auto opCtx = operationContext(); QueryAnalysisSampleTracker& tracker = QueryAnalysisSampleTracker::get(opCtx); - // Add first configuration and refresh. + // Add the configuration for collection0 and refresh. std::vector configurationsV1; - auto startTime0 = now(); - configurationsV1.emplace_back(nss0, collUuid0, sampleRate0, startTime0); + const double samplesPerSec0Before = 0.0; + const auto startTime0Before = now(); + configurationsV1.emplace_back(nss0, collUuid0, samplesPerSec0Before, startTime0Before); tracker.refreshConfigurations(configurationsV1); - // Verify currentOp, one configuration. + // Verify currentOp, one active collection. std::vector ops; tracker.reportForCurrentOp(&ops); ASSERT_EQ(1, ops.size()); @@ -111,9 +111,9 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(parsedOp.getNs(), nss0); ASSERT_EQ(parsedOp.getCollUuid(), collUuid0); if (supportsSampling) { - ASSERT_EQ(parsedOp.getSampleRate(), sampleRate0); + ASSERT_EQ(parsedOp.getSamplesPerSecond(), samplesPerSec0Before); } - ASSERT_EQ(parsedOp.getStartTime(), startTime0); + ASSERT_EQ(parsedOp.getStartTime(), startTime0Before); ASSERT_EQ(parsedOp.getSampledReadsCount(), 0); ASSERT_EQ(parsedOp.getSampledWritesCount(), 0); if (supportsPersisting) { @@ -121,7 +121,7 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(*(parsedOp.getSampledWritesBytes()), 0L); } - // Verify server status, one configuration. + // Verify server status, one active collection. BSONObj serverStatus; serverStatus = tracker.reportForServerStatus(); auto parsedServerStatus = QueryAnalysisServerStatus::parse( @@ -136,14 +136,15 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(*parsedServerStatus.getTotalSampledWritesBytes(), 0L); } - // Add second configuration and refresh. + // Add the configuration for collection1 and refresh. std::vector configurationsV2; - configurationsV2.emplace_back(nss0, collUuid0, sampleRate0, startTime0); - auto startTime1Before = now(); - configurationsV2.emplace_back(nss1, collUuid1, sampleRate1Before, startTime1Before); + configurationsV2.emplace_back(nss0, collUuid0, samplesPerSec0Before, startTime0Before); + const double samplesPerSec1Before = 0.0000000001; + const auto startTime1Before = now(); + configurationsV2.emplace_back(nss1, collUuid1, samplesPerSec1Before, startTime1Before); tracker.refreshConfigurations(configurationsV2); - // Verify currentOp, two configurations. + // Verify currentOp, two active collections. ops.clear(); tracker.reportForCurrentOp(&ops); ASSERT_EQ(2, ops.size()); @@ -154,9 +155,9 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(parsedOp.getNs(), nss0); ASSERT_EQ(parsedOp.getCollUuid(), collUuid0); if (supportsSampling) { - ASSERT_EQ(parsedOp.getSampleRate(), sampleRate0); + ASSERT_EQ(parsedOp.getSamplesPerSecond(), samplesPerSec0Before); } - ASSERT_EQ(parsedOp.getStartTime(), startTime0); + ASSERT_EQ(parsedOp.getStartTime(), startTime0Before); ASSERT_EQ(parsedOp.getSampledReadsCount(), 0); ASSERT_EQ(parsedOp.getSampledWritesCount(), 0); if (supportsPersisting) { @@ -169,7 +170,7 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(parsedOp.getNs(), nss1); ASSERT_EQ(parsedOp.getCollUuid(), collUuid1); if (supportsSampling) { - ASSERT_EQ(parsedOp.getSampleRate(), sampleRate1Before); + ASSERT_EQ(parsedOp.getSamplesPerSecond(), samplesPerSec1Before); } ASSERT_EQ(parsedOp.getStartTime(), startTime1Before); ASSERT_EQ(parsedOp.getSampledReadsCount(), 0); @@ -179,7 +180,7 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(*(parsedOp.getSampledWritesBytes()), 0L); } - // Verify server status, two configurations. + // Verify server status, two active collections. serverStatus = tracker.reportForServerStatus(); parsedServerStatus = QueryAnalysisServerStatus::parse( IDLParserContext("QueryAnalysisSampleTrackerTest.RefreshConfigIncrementAndReport_TEST"), @@ -193,10 +194,10 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(*parsedServerStatus.getTotalSampledWritesBytes(), 0L); } - // Modify second configuration and refresh. + // Update the sample rate for collection1 and refresh. std::vector configurationsV3; - configurationsV3.emplace_back(nss0, collUuid0, sampleRate0, startTime0); - configurationsV3.emplace_back(nss1, collUuid1, sampleRate1After, startTime1Before); + configurationsV3.emplace_back(nss0, collUuid0, samplesPerSec0Before, startTime0Before); + configurationsV3.emplace_back(nss1, collUuid1, samplesPerSec1Before, startTime1Before); tracker.refreshConfigurations(configurationsV3); // Increment read and write counters. @@ -214,7 +215,7 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { tracker.incrementWrites(opCtx, nss1, collUuid1); } - // Verify currentOp, two configurations, updated sample rate. + // Verify currentOp, two active collections. ops.clear(); tracker.reportForCurrentOp(&ops); ASSERT_EQ(2, ops.size()); @@ -224,9 +225,9 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(parsedOp.getNs(), nss0); ASSERT_EQ(parsedOp.getCollUuid(), collUuid0); if (supportsSampling) { - ASSERT_EQ(parsedOp.getSampleRate(), sampleRate0); + ASSERT_EQ(parsedOp.getSamplesPerSecond(), samplesPerSec0Before); } - ASSERT_EQ(parsedOp.getStartTime(), startTime0); + ASSERT_EQ(parsedOp.getStartTime(), startTime0Before); ASSERT_EQ(parsedOp.getSampledReadsCount(), 1); ASSERT_EQ(parsedOp.getSampledWritesCount(), 1); if (supportsPersisting) { @@ -239,7 +240,7 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(parsedOp.getNs(), nss1); ASSERT_EQ(parsedOp.getCollUuid(), collUuid1); if (supportsSampling) { - ASSERT_EQ(parsedOp.getSampleRate(), sampleRate1After); + ASSERT_EQ(parsedOp.getSamplesPerSecond(), samplesPerSec1Before); } ASSERT_EQ(parsedOp.getStartTime(), startTime1Before); ASSERT_EQ(parsedOp.getSampledReadsCount(), 2); @@ -249,7 +250,7 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(*(parsedOp.getSampledWritesBytes()), sampledQueryDocSizeBytes); } - // Verify server status, two configurations. + // Verify server status, two active collections. serverStatus = tracker.reportForServerStatus(); parsedServerStatus = QueryAnalysisServerStatus::parse( IDLParserContext("QueryAnalysisSampleTrackerTest.RefreshConfigIncrementAndReport_TEST"), @@ -263,12 +264,12 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(*parsedServerStatus.getTotalSampledWritesBytes(), 2 * sampledQueryDocSizeBytes); } - // Second configuration becomes inactive. + // Remove the configuration for collection1. std::vector configurationsV4; - configurationsV4.emplace_back(nss0, collUuid0, sampleRate0, startTime0); + configurationsV4.emplace_back(nss0, collUuid0, samplesPerSec0Before, startTime0Before); tracker.refreshConfigurations(configurationsV4); - // Verify currentOp, one remaining configuration. + // Verify currentOp, one active configuration. ops.clear(); tracker.reportForCurrentOp(&ops); ASSERT_EQ(1, ops.size()); @@ -278,9 +279,9 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(parsedOp.getNs(), nss0); ASSERT_EQ(parsedOp.getCollUuid(), collUuid0); if (supportsSampling) { - ASSERT_EQ(parsedOp.getSampleRate(), sampleRate0); + ASSERT_EQ(parsedOp.getSamplesPerSecond(), samplesPerSec0Before); } - ASSERT_EQ(parsedOp.getStartTime(), startTime0); + ASSERT_EQ(parsedOp.getStartTime(), startTime0Before); ASSERT_EQ(parsedOp.getSampledReadsCount(), 1); ASSERT_EQ(parsedOp.getSampledWritesCount(), 1); if (supportsPersisting) { @@ -288,7 +289,7 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(*(parsedOp.getSampledWritesBytes()), sampledQueryDocSizeBytes); } - // Verify server status, one remaining configuration. + // Verify server status, one active configuration. serverStatus = tracker.reportForServerStatus(); parsedServerStatus = QueryAnalysisServerStatus::parse( IDLParserContext("QueryAnalysisSampleTrackerTest.RefreshConfigIncrementAndReport_TEST"), @@ -302,14 +303,15 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(*parsedServerStatus.getTotalSampledWritesBytes(), 2 * sampledQueryDocSizeBytes); } - // Second configuration becomes active again + // Add new configuration for collection1 (same collection uuid). std::vector configurationsV5; - configurationsV5.emplace_back(nss0, collUuid0, sampleRate0, startTime0); - auto startTime1After = now(); - configurationsV5.emplace_back(nss1, collUuid1, sampleRate1After, startTime1After); + configurationsV5.emplace_back(nss0, collUuid0, samplesPerSec0Before, startTime0Before); + const double samplesPerSec1After = 222.2; + const auto startTime1After = now(); + configurationsV5.emplace_back(nss1, collUuid1, samplesPerSec1After, startTime1After); tracker.refreshConfigurations(configurationsV5); - // Verify currentOp, two configurations, updated sample rate. + // Verify currentOp, two active collections. ops.clear(); tracker.reportForCurrentOp(&ops); ASSERT_EQ(2, ops.size()); @@ -320,9 +322,9 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(parsedOp.getNs(), nss0); ASSERT_EQ(parsedOp.getCollUuid(), collUuid0); if (supportsSampling) { - ASSERT_EQ(parsedOp.getSampleRate(), sampleRate0); + ASSERT_EQ(parsedOp.getSamplesPerSecond(), samplesPerSec0Before); } - ASSERT_EQ(parsedOp.getStartTime(), startTime0); + ASSERT_EQ(parsedOp.getStartTime(), startTime0Before); ASSERT_EQ(parsedOp.getSampledReadsCount(), 1); ASSERT_EQ(parsedOp.getSampledWritesCount(), 1); if (supportsPersisting) { @@ -336,7 +338,7 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(parsedOp.getNs(), nss1); ASSERT_EQ(parsedOp.getCollUuid(), collUuid1); if (supportsSampling) { - ASSERT_EQ(parsedOp.getSampleRate(), sampleRate1After); + ASSERT_EQ(parsedOp.getSamplesPerSecond(), samplesPerSec1After); } ASSERT_EQ(parsedOp.getStartTime(), startTime1After); ASSERT_EQ(parsedOp.getSampledReadsCount(), 0); @@ -346,7 +348,7 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(*(parsedOp.getSampledWritesBytes()), 0L); } - // Verify server status, two configurations. + // Verify server status, two active collections. serverStatus = tracker.reportForServerStatus(); parsedServerStatus = QueryAnalysisServerStatus::parse( IDLParserContext("QueryAnalysisSampleTrackerTest.RefreshConfigIncrementAndReport_TEST"), @@ -359,6 +361,50 @@ void QueryAnalysisSampleTrackerTest::testRefreshConfigIncrementAndReport() { ASSERT_EQ(*parsedServerStatus.getTotalSampledReadsBytes(), 3 * sampledQueryDocSizeBytes); ASSERT_EQ(*parsedServerStatus.getTotalSampledWritesBytes(), 2 * sampledQueryDocSizeBytes); } + + // Remove the configuration for collection1, and make the configuration for collection0 have + // a different collection uuid. + std::vector configurationsV6; + const auto collUuid0After = UUID::gen(); + const auto samplesPerSec0After = 1.5; + const auto startTime0After = now(); + configurationsV6.emplace_back(nss0, collUuid0After, samplesPerSec0After, startTime0After); + tracker.refreshConfigurations(configurationsV6); + + // Verify currentOp, one active collection. The counters should have been reset. + ops.clear(); + tracker.reportForCurrentOp(&ops); + ASSERT_EQ(1, ops.size()); + parsedOp = CollectionSampleCountersCurrentOp::parse( + IDLParserContext("QueryAnalysisSampleTrackerTest.RefreshConfigIncrementAndReport_TEST"), + ops[0]); + ASSERT_EQ(parsedOp.getDesc(), kCurrentOpDescFieldValue); + ASSERT_EQ(parsedOp.getNs(), nss0); + ASSERT_EQ(parsedOp.getCollUuid(), collUuid0After); + if (supportsSampling) { + ASSERT_EQ(parsedOp.getSamplesPerSecond(), samplesPerSec0After); + } + ASSERT_EQ(parsedOp.getStartTime(), startTime0After); + ASSERT_EQ(parsedOp.getSampledReadsCount(), 0); + ASSERT_EQ(parsedOp.getSampledWritesCount(), 0); + if (supportsPersisting) { + ASSERT_EQ(*parsedOp.getSampledReadsBytes(), 0); + ASSERT_EQ(*parsedOp.getSampledWritesBytes(), 0); + } + + // Verify server status, one active collection. + serverStatus = tracker.reportForServerStatus(); + parsedServerStatus = QueryAnalysisServerStatus::parse( + IDLParserContext("QueryAnalysisSampleTrackerTest.RefreshConfigIncrementAndReport_TEST"), + serverStatus); + ASSERT_EQ(parsedServerStatus.getActiveCollections(), 1); + ASSERT_EQ(parsedServerStatus.getTotalCollections(), 2); + ASSERT_EQ(parsedServerStatus.getTotalSampledReadsCount(), 3); + ASSERT_EQ(parsedServerStatus.getTotalSampledWritesCount(), 2); + if (supportsPersisting) { + ASSERT_EQ(*parsedServerStatus.getTotalSampledReadsBytes(), 3 * sampledQueryDocSizeBytes); + ASSERT_EQ(*parsedServerStatus.getTotalSampledWritesBytes(), 2 * sampledQueryDocSizeBytes); + } } TEST_F(QueryAnalysisSampleTrackerTest, RefreshConfigIncrementAndReportMongos) { @@ -395,6 +441,17 @@ TEST_F(QueryAnalysisSampleTrackerTest, RefreshConfigIncrementAndReportReplSetMon testRefreshConfigIncrementAndReport(); } +TEST_F(QueryAnalysisSampleTrackerTest, IsSamplingActive) { + auto& tracker = QueryAnalysisSampleTracker::get(operationContext()); + + ASSERT_FALSE(tracker.isSamplingActive(nss0, collUuid0)); + auto configuration0 = + CollectionQueryAnalyzerConfiguration(nss0, collUuid0, 100 /* samplesPerSec */, now()); + tracker.refreshConfigurations({configuration0}); + ASSERT(tracker.isSamplingActive(nss0, collUuid0)); + ASSERT_FALSE(tracker.isSamplingActive(nss0, UUID::gen())); +} + } // namespace } // namespace analyze_shard_key } // namespace mongo diff --git a/src/mongo/s/query_analysis_sampler.cpp b/src/mongo/s/query_analysis_sampler.cpp index def42f0b9d500..21e8d41d9edf8 100644 --- a/src/mongo/s/query_analysis_sampler.cpp +++ b/src/mongo/s/query_analysis_sampler.cpp @@ -27,18 +27,51 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/query_analysis_sampler.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/database_name.h" +#include "mongo/db/server_options.h" #include "mongo/db/stats/counters.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/s/analyze_shard_key_util.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/analyze_shard_key_role.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/is_mongos.h" #include "mongo/s/query_analysis_client.h" #include "mongo/s/query_analysis_sample_tracker.h" +#include "mongo/s/query_analysis_sampler.h" #include "mongo/s/refresh_query_analyzer_configuration_cmd_gen.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/fail_point.h" #include "mongo/util/net/socket_utils.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -108,7 +141,7 @@ QueryAnalysisSampler& QueryAnalysisSampler::get(OperationContext* opCtx) { } QueryAnalysisSampler& QueryAnalysisSampler::get(ServiceContext* serviceContext) { - invariant(analyze_shard_key::supportsSamplingQueries(serviceContext, true /* ignoreFCV */)); + invariant(analyze_shard_key::supportsSamplingQueries(serviceContext)); return getQueryAnalysisSampler(serviceContext); } @@ -122,7 +155,9 @@ void QueryAnalysisSampler::onStartup() { PeriodicRunner::PeriodicJob queryStatsRefresherJob( "QueryAnalysisQueryStatsRefresher", [this](Client* client) { _refreshQueryStats(); }, - Seconds(1)); + Seconds(1), + // TODO(SERVER-74662): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/); _periodicQueryStatsRefresher = periodicRunner->makeJob(std::move(queryStatsRefresherJob)); _periodicQueryStatsRefresher.start(); @@ -139,7 +174,9 @@ void QueryAnalysisSampler::onStartup() { "error"_attr = redact(ex)); } }, - Seconds(gQueryAnalysisSamplerConfigurationRefreshSecs)); + Seconds(gQueryAnalysisSamplerConfigurationRefreshSecs), + // TODO(SERVER-74662): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/); _periodicConfigurationsRefresher = periodicRunner->makeJob(std::move(configurationsRefresherJob)); _periodicConfigurationsRefresher.start(); @@ -266,7 +303,7 @@ bool QueryAnalysisSampler::SampleRateLimiter::tryConsume() { return false; } -void QueryAnalysisSampler::SampleRateLimiter::refreshRate(double numTokensPerSecond) { +void QueryAnalysisSampler::SampleRateLimiter::refreshSamplesPerSecond(double numTokensPerSecond) { // Fill the bucket with tokens created by the previous rate before setting a new rate. _refill(_numTokensPerSecond, _getBurstCapacity(numTokensPerSecond)); _numTokensPerSecond = numTokensPerSecond; @@ -328,15 +365,10 @@ void QueryAnalysisSampler::_refreshConfigurations(OperationContext* opCtx) { SampleRateLimiter{opCtx->getServiceContext(), configuration.getNs(), configuration.getCollectionUuid(), - configuration.getSampleRate()}); + configuration.getSamplesPerSecond()}); } else { auto rateLimiter = it->second; - if (it->second.getNss() != configuration.getNs()) { - // Nss changed due to collection rename. - // TODO SERVER-73990: Test collection renaming during query sampling - it->second.setNss(configuration.getNs()); - } - rateLimiter.refreshRate(configuration.getSampleRate()); + rateLimiter.refreshSamplesPerSecond(configuration.getSamplesPerSecond()); sampleRateLimiters.emplace(configuration.getNs(), std::move(rateLimiter)); } } diff --git a/src/mongo/s/query_analysis_sampler.h b/src/mongo/s/query_analysis_sampler.h index b27969afa8af9..30f82c3ac310f 100644 --- a/src/mongo/s/query_analysis_sampler.h +++ b/src/mongo/s/query_analysis_sampler.h @@ -29,12 +29,23 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" #include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/analyze_shard_key_role.h" #include "mongo/s/analyze_shard_key_server_parameters_gen.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/periodic_runner.h" +#include "mongo/util/tick_source.h" +#include "mongo/util/uuid.h" namespace mongo { namespace analyze_shard_key { @@ -133,7 +144,7 @@ class QueryAnalysisSampler final { return _collUuid; } - double getRate() const { + double getSamplesPerSecond() const { return _numTokensPerSecond; } @@ -153,7 +164,7 @@ class QueryAnalysisSampler final { * Sets a new rate. Causes the bucket to be refilled with tokens created since last refill * time according to the previous rate. */ - void refreshRate(double numTokensPerSecond); + void refreshSamplesPerSecond(double numTokensPerSecond); private: /** diff --git a/src/mongo/s/query_analysis_sampler_test.cpp b/src/mongo/s/query_analysis_sampler_test.cpp index 9a5eadf213c4d..b6fd3c45b23b4 100644 --- a/src/mongo/s/query_analysis_sampler_test.cpp +++ b/src/mongo/s/query_analysis_sampler_test.cpp @@ -29,20 +29,47 @@ #include "mongo/s/query_analysis_sampler.h" +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/repl/repl_settings.h" +#include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/repl/replication_coordinator_mock.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context_test_fixture.h" #include "mongo/db/stats/counters.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/idl/idl_parser.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/is_mongos.h" #include "mongo/s/refresh_query_analyzer_configuration_cmd_gen.h" #include "mongo/s/sharding_router_test_fixture.h" +#include "mongo/stdx/future.h" +#include "mongo/transport/session.h" #include "mongo/transport/transport_layer_mock.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/periodic_runner_factory.h" #include "mongo/util/tick_source_mock.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -83,19 +110,19 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, BurstMultiplierEqualToOne) { // multiplier * rate > 1 auto rateLimiter0 = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 5); - ASSERT_EQ(rateLimiter0.getRate(), 5); + ASSERT_EQ(rateLimiter0.getSamplesPerSecond(), 5); ASSERT_EQ(rateLimiter0.getBurstCapacity(), 5); // multiplier * rate = 1 auto rateLimiter1 = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 1); - ASSERT_EQ(rateLimiter1.getRate(), 1); + ASSERT_EQ(rateLimiter1.getSamplesPerSecond(), 1); ASSERT_EQ(rateLimiter1.getBurstCapacity(), 1); // multiplier * rate < 1 auto rateLimiter2 = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 0.1); - ASSERT_EQ(rateLimiter2.getRate(), 0.1); + ASSERT_EQ(rateLimiter2.getSamplesPerSecond(), 0.1); ASSERT_EQ(rateLimiter2.getBurstCapacity(), 1); } @@ -106,19 +133,19 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, BurstMultiplierGreaterThanOne) { // multiplier * rate > 1 auto rateLimiter0 = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 5); - ASSERT_EQ(rateLimiter0.getRate(), 5); + ASSERT_EQ(rateLimiter0.getSamplesPerSecond(), 5); ASSERT_EQ(rateLimiter0.getBurstCapacity(), 12.5); // multiplier * rate = 1 auto rateLimiter1 = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 0.4); - ASSERT_EQ(rateLimiter1.getRate(), 0.4); + ASSERT_EQ(rateLimiter1.getSamplesPerSecond(), 0.4); ASSERT_EQ(rateLimiter1.getBurstCapacity(), 1); // multiplier * rate < 1 auto rateLimiter2 = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 0.1); - ASSERT_EQ(rateLimiter2.getRate(), 0.1); + ASSERT_EQ(rateLimiter2.getSamplesPerSecond(), 0.1); ASSERT_EQ(rateLimiter2.getBurstCapacity(), 1); } @@ -128,7 +155,7 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, ConsumeAfterOneSecond) { auto rateLimiter = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 2); - ASSERT_EQ(rateLimiter.getRate(), 2); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 2); ASSERT_EQ(rateLimiter.getBurstCapacity(), 2); // There are no token available in the bucket initially. ASSERT_FALSE(rateLimiter.tryConsume()); @@ -146,7 +173,7 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, ConsumeAfterLessThanOneSecond) { auto rateLimiter = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 4); - ASSERT_EQ(rateLimiter.getRate(), 4); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 4); ASSERT_EQ(rateLimiter.getBurstCapacity(), 4); // There are no token available in the bucket initially. ASSERT_FALSE(rateLimiter.tryConsume()); @@ -164,7 +191,7 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, ConsumeAfterMoreThanOneSecond) { auto rateLimiter = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 0.5); - ASSERT_EQ(rateLimiter.getRate(), 0.5); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 0.5); ASSERT_EQ(rateLimiter.getBurstCapacity(), 1); // There are no token available in the bucket initially. ASSERT_FALSE(rateLimiter.tryConsume()); @@ -181,7 +208,7 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, ConsumeEpsilonAbove) { auto rateLimiter = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 1); - ASSERT_EQ(rateLimiter.getRate(), 1); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 1); ASSERT_EQ(rateLimiter.getBurstCapacity(), 1); ASSERT_GTE(QueryAnalysisSampler::SampleRateLimiter::kEpsilon, 0.001); // There are no token available in the bucket initially. @@ -199,7 +226,7 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, ConsumeRemainingTokens) { auto rateLimiter = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 2); - ASSERT_EQ(rateLimiter.getRate(), 2); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 2); ASSERT_EQ(rateLimiter.getBurstCapacity(), 2); // There are no token available in the bucket initially. ASSERT_FALSE(rateLimiter.tryConsume()); @@ -221,7 +248,7 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, ConsumeBurstCapacity) { auto rateLimiter = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 1); - ASSERT_EQ(rateLimiter.getRate(), 1); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 1); ASSERT_EQ(rateLimiter.getBurstCapacity(), 2); // There are no token available in the bucket initially. ASSERT_FALSE(rateLimiter.tryConsume()); @@ -239,7 +266,7 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, ConsumeAboveBurstCapacity) { auto rateLimiter = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 1); - ASSERT_EQ(rateLimiter.getRate(), 1); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 1); ASSERT_EQ(rateLimiter.getBurstCapacity(), 2); // There are no token available in the bucket initially. ASSERT_FALSE(rateLimiter.tryConsume()); @@ -257,7 +284,7 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, ConsumeBelowBurstCapacity) { auto rateLimiter = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 1); - ASSERT_EQ(rateLimiter.getRate(), 1); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 1); ASSERT_EQ(rateLimiter.getBurstCapacity(), 2); // There are no token available in the bucket initially. ASSERT_FALSE(rateLimiter.tryConsume()); @@ -278,7 +305,7 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, ConsumeAfterRefresh_RateIncreased) { auto rateLimiter = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 0.1); - ASSERT_EQ(rateLimiter.getRate(), 0.1); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 0.1); ASSERT_EQ(rateLimiter.getBurstCapacity(), 1); // There are no token available in the bucket initially. ASSERT_FALSE(rateLimiter.tryConsume()); @@ -286,8 +313,8 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, ConsumeAfterRefresh_RateIncreased) { advanceTime(Milliseconds(20000)); // The number of tokens available in the bucket right after the refill is 2 (note that this is // greater than the pre-refresh capacity). - rateLimiter.refreshRate(1); - ASSERT_EQ(rateLimiter.getRate(), 1); + rateLimiter.refreshSamplesPerSecond(1); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 1); ASSERT_EQ(rateLimiter.getBurstCapacity(), 2); ASSERT(rateLimiter.tryConsume()); ASSERT(rateLimiter.tryConsume()); @@ -307,7 +334,7 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, ConsumeAfterRefresh_RateDecreased) { auto rateLimiter = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 1); - ASSERT_EQ(rateLimiter.getRate(), 1); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 1); ASSERT_EQ(rateLimiter.getBurstCapacity(), 2); // There are no token available in the bucket initially. ASSERT_FALSE(rateLimiter.tryConsume()); @@ -315,8 +342,8 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, ConsumeAfterRefresh_RateDecreased) { advanceTime(Milliseconds(2000)); // The number of tokens available in the bucket right after the refill is 1 (note that this is // less than the pre-refresh capacity). - rateLimiter.refreshRate(0.1); - ASSERT_EQ(rateLimiter.getRate(), 0.1); + rateLimiter.refreshSamplesPerSecond(0.1); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 0.1); ASSERT_EQ(rateLimiter.getBurstCapacity(), 1); ASSERT(rateLimiter.tryConsume()); ASSERT_FALSE(rateLimiter.tryConsume()); @@ -338,15 +365,15 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, ConsumeAfterRefresh_RateUnchanged) { auto rateLimiter = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 1); - ASSERT_EQ(rateLimiter.getRate(), 1); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 1); ASSERT_EQ(rateLimiter.getBurstCapacity(), 2); // There are no token available in the bucket initially. ASSERT_FALSE(rateLimiter.tryConsume()); advanceTime(Milliseconds(1000)); // The number of tokens available in the bucket right after the refill is 1. - rateLimiter.refreshRate(1); - ASSERT_EQ(rateLimiter.getRate(), 1); + rateLimiter.refreshSamplesPerSecond(1); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 1); ASSERT_EQ(rateLimiter.getBurstCapacity(), 2); advanceTime(Milliseconds(1000)); @@ -362,7 +389,7 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, MicrosecondResolution) { auto rateLimiter = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 1e6); - ASSERT_EQ(rateLimiter.getRate(), 1e6); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 1e6); ASSERT_EQ(rateLimiter.getBurstCapacity(), 1e6); // There are no token available in the bucket initially. ASSERT_FALSE(rateLimiter.tryConsume()); @@ -379,7 +406,7 @@ TEST_F(QueryAnalysisSamplerRateLimiterTest, NanosecondsResolution) { auto rateLimiter = QueryAnalysisSampler::SampleRateLimiter(getServiceContext(), nss, collUuid, 1e9); - ASSERT_EQ(rateLimiter.getRate(), 1e9); + ASSERT_EQ(rateLimiter.getSamplesPerSecond(), 1e9); ASSERT_EQ(rateLimiter.getBurstCapacity(), 1e9); // There are no token available in the bucket initially. ASSERT_FALSE(rateLimiter.tryConsume()); @@ -416,9 +443,9 @@ class QueryAnalysisSamplerTest : public ShardingTestFixture { serverGlobalParams.clusterRole = ClusterRole::None; } - void setUpRole(ClusterRole role, bool isReplEnabled = true) { + void setUpRole(std::initializer_list roles, bool isReplEnabled = true) { setMongos(false); - serverGlobalParams.clusterRole = role; + serverGlobalParams.clusterRole = roles; auto replCoord = [&] { if (isReplEnabled) { @@ -503,33 +530,26 @@ class QueryAnalysisSamplerTest : public ShardingTestFixture { const UUID collUuid2 = UUID::gen(); private: - RAIIServerParameterControllerForTest _featureFlagController{"featureFlagAnalyzeShardKey", true}; bool _originalIsMongos; }; TEST_F(QueryAnalysisSamplerTest, CanGetOnShardServer) { - setUpRole(ClusterRole::ShardServer); + setUpRole({ClusterRole::ShardServer}); QueryAnalysisSampler::get(operationContext()); } TEST_F(QueryAnalysisSamplerTest, CanGetOnStandaloneReplicaSet) { - setUpRole(ClusterRole::None); + setUpRole({ClusterRole::None}); QueryAnalysisSampler::get(operationContext()); } TEST_F(QueryAnalysisSamplerTest, CanGetOnConfigServer) { - setUpRole(ClusterRole::ConfigServer); + setUpRole({ClusterRole::ShardServer, ClusterRole::ConfigServer}); QueryAnalysisSampler::get(operationContext()); } DEATH_TEST_F(QueryAnalysisSamplerTest, CannotGetOnStandaloneMongod, "invariant") { - setUpRole(ClusterRole::None, false /* isReplEnabled */); - QueryAnalysisSampler::get(operationContext()); -} - -DEATH_TEST_F(QueryAnalysisSamplerTest, CannotGetIfFeatureFlagNotEnabled, "invariant") { - RAIIServerParameterControllerForTest _featureFlagController{"featureFlagAnalyzeShardKey", - false}; + setUpRole({ClusterRole::None}, false /* isReplEnabled */); QueryAnalysisSampler::get(operationContext()); } @@ -721,113 +741,113 @@ TEST_F(QueryAnalysisSamplerQueryStatsTest, TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsReplSetMongod_NotCountInsertsTrackedByOpCounters) { - setUpRole(ClusterRole::None); + setUpRole({ClusterRole::None}); testInsertsTrackedByOpCounters(false /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsReplSetMongod_CountUpdatesTrackedByOpCounters) { - setUpRole(ClusterRole::None); + setUpRole({ClusterRole::None}); testUpdatesTrackedByOpCounters(true /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsReplSetMongod_CountFindAndModify) { - setUpRole(ClusterRole::None); + setUpRole({ClusterRole::None}); testFindAndModify(true /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsReplSetMongod_CountDeletesTrackedByOpCounters) { - setUpRole(ClusterRole::None); + setUpRole({ClusterRole::None}); testDeletesTrackedByOpCounters(true /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsReplSetMongod_CountQueriesTrackedByOpCounters) { - setUpRole(ClusterRole::None); + setUpRole({ClusterRole::None}); testQueriesTrackedByOpCounters(true /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsReplSetMongod_NotCountCommandsTrackedByOpCounters) { - setUpRole(ClusterRole::None); + setUpRole({ClusterRole::None}); testCommandsTrackedByOpCounters(false /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsReplSetMongod_CountAggregates) { - setUpRole(ClusterRole::None); + setUpRole({ClusterRole::None}); testAggregates(true /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsReplSetMongod_CountCounts) { - setUpRole(ClusterRole::None); + setUpRole({ClusterRole::None}); testCounts(true /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsReplSetMongod_CountDistincts) { - setUpRole(ClusterRole::None); + setUpRole({ClusterRole::None}); testDistincts(true /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsReplSetMongod_NotCountNestedAggregatesTrackedByOpCounters) { - setUpRole(ClusterRole::None); + setUpRole({ClusterRole::None}); testNestedAggregates(false /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsShardSvrMongod_NotCountInsertsTrackedByOpCounters) { - setUpRole(ClusterRole::ShardServer); + setUpRole({ClusterRole::ShardServer}); testInsertsTrackedByOpCounters(false /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsShardSvrMongod_NotCountUpdatesTrackedByOpCounters) { - setUpRole(ClusterRole::ShardServer); + setUpRole({ClusterRole::ShardServer}); testUpdatesTrackedByOpCounters(false /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsShardSvrMongod_NotCountDeletesTrackedByOpCounters) { - setUpRole(ClusterRole::ShardServer); + setUpRole({ClusterRole::ShardServer}); testDeletesTrackedByOpCounters(false /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsShardSvrMongod_NotCountFindAndModify) { - setUpRole(ClusterRole::ShardServer); + setUpRole({ClusterRole::ShardServer}); testFindAndModify(false /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsShardSvrMongod_NotCountQueriesTrackedByOpCounters) { - setUpRole(ClusterRole::ShardServer); + setUpRole({ClusterRole::ShardServer}); testQueriesTrackedByOpCounters(false /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsShardSvrMongod_NotCountCommandsTrackedByOpCounters) { - setUpRole(ClusterRole::ShardServer); + setUpRole({ClusterRole::ShardServer}); testCommandsTrackedByOpCounters(false /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsShardSvrMongod_NotCountAggregates) { - setUpRole(ClusterRole::ShardServer); + setUpRole({ClusterRole::ShardServer}); testAggregates(false /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsShardSvrMongod_NotCountCounts) { - setUpRole(ClusterRole::ShardServer); + setUpRole({ClusterRole::ShardServer}); testCounts(false /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsShardSvrMongod_NotCountDistincts) { - setUpRole(ClusterRole::ShardServer); + setUpRole({ClusterRole::ShardServer}); testDistincts(false /* shouldCount */); } TEST_F(QueryAnalysisSamplerQueryStatsTest, RefreshQueryStatsShardSvrMongod_CountNestedAggregatesTrackedByOpCounters) { - setUpRole(ClusterRole::ShardServer); + setUpRole({ClusterRole::ShardServer}); testNestedAggregates(true /* shouldCount */); } @@ -876,12 +896,12 @@ TEST_F(QueryAnalysisSamplerTest, RefreshQueryStatsAndConfigurations) { auto it0 = rateLimiters1.find(refreshedConfigurations1[0].getNs()); ASSERT(it0 != rateLimiters1.end()); ASSERT_EQ(it0->second.getCollectionUuid(), refreshedConfigurations1[0].getCollectionUuid()); - ASSERT_EQ(it0->second.getRate(), refreshedConfigurations1[0].getSampleRate()); + ASSERT_EQ(it0->second.getSamplesPerSecond(), refreshedConfigurations1[0].getSamplesPerSecond()); auto it1 = rateLimiters1.find(refreshedConfigurations1[1].getNs()); ASSERT(it1 != rateLimiters1.end()); ASSERT_EQ(it1->second.getCollectionUuid(), refreshedConfigurations1[1].getCollectionUuid()); - ASSERT_EQ(it1->second.getRate(), refreshedConfigurations1[1].getSampleRate()); + ASSERT_EQ(it1->second.getSamplesPerSecond(), refreshedConfigurations1[1].getSamplesPerSecond()); // The per-second counts after: [0, 2]. globalOpCounters.gotUpdate(); @@ -913,7 +933,7 @@ TEST_F(QueryAnalysisSamplerTest, RefreshQueryStatsAndConfigurations) { auto it = rateLimiters2.find(refreshedConfigurations2[0].getNs()); ASSERT(it != rateLimiters2.end()); ASSERT_EQ(it->second.getCollectionUuid(), refreshedConfigurations2[0].getCollectionUuid()); - ASSERT_EQ(it->second.getRate(), refreshedConfigurations2[0].getSampleRate()); + ASSERT_EQ(it->second.getSamplesPerSecond(), refreshedConfigurations2[0].getSamplesPerSecond()); // The per-second counts after: [0, 2, 5]. globalOpCounters.gotQuery(); @@ -1055,7 +1075,7 @@ TEST_F(QueryAnalysisSamplerTest, RefreshConfigurationsNewCollectionUuid) { auto oldIt = oldRateLimiters.find(oldConfigurations[0].getNs()); ASSERT(oldIt != oldRateLimiters.end()); ASSERT_EQ(oldIt->second.getCollectionUuid(), oldConfigurations[0].getCollectionUuid()); - ASSERT_EQ(oldIt->second.getRate(), oldConfigurations[0].getSampleRate()); + ASSERT_EQ(oldIt->second.getSamplesPerSecond(), oldConfigurations[0].getSamplesPerSecond()); advanceTime(Milliseconds(1000)); // The number of tokens available in the bucket right after the refill is 0 + 2. @@ -1079,7 +1099,7 @@ TEST_F(QueryAnalysisSamplerTest, RefreshConfigurationsNewCollectionUuid) { auto newIt = newRateLimiters.find(newConfigurations[0].getNs()); ASSERT(newIt != newRateLimiters.end()); ASSERT_EQ(newIt->second.getCollectionUuid(), newConfigurations[0].getCollectionUuid()); - ASSERT_EQ(newIt->second.getRate(), newConfigurations[0].getSampleRate()); + ASSERT_EQ(newIt->second.getSamplesPerSecond(), newConfigurations[0].getSamplesPerSecond()); // Cannot sample if time has not elapsed. There should be no tokens available in the bucket // right after the refill unless the one token from the previous configurations was diff --git a/src/mongo/s/query_analysis_sampler_util.cpp b/src/mongo/s/query_analysis_sampler_util.cpp index 66bfae64df497..17f534b490fea 100644 --- a/src/mongo/s/query_analysis_sampler_util.cpp +++ b/src/mongo/s/query_analysis_sampler_util.cpp @@ -29,6 +29,14 @@ #include "mongo/s/query_analysis_sampler_util.h" +#include +#include +#include + +#include +#include +#include + #include "mongo/idl/idl_parser.h" #include "mongo/platform/random.h" #include "mongo/util/static_immortal.h" diff --git a/src/mongo/s/query_analysis_sampler_util.h b/src/mongo/s/query_analysis_sampler_util.h index 2ae61dfb58352..7b3ebc811e32e 100644 --- a/src/mongo/s/query_analysis_sampler_util.h +++ b/src/mongo/s/query_analysis_sampler_util.h @@ -29,14 +29,32 @@ #pragma once -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/cluster_role.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" +#include "mongo/db/shard_id.h" +#include "mongo/platform/basic.h" #include "mongo/s/analyze_shard_key_common_gen.h" #include "mongo/s/analyze_shard_key_role.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/query_analysis_sampler.h" #include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/transport/session.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/testing_proctor.h" #include "mongo/util/uuid.h" namespace mongo { @@ -125,13 +143,14 @@ boost::optional getOrGenerateSampleId(OperationContext* opCtx, const auto isInternalClient = !opCtx->getClient()->session() || (opCtx->getClient()->session()->getTags() & transport::Session::kInternalClient); uassert(ErrorCodes::InvalidOptions, - "Cannot specify 'sampleRate' since it is an internal field", - !request.getSampleId() || (isInternalClient || getTestCommandsEnabled())); + "Cannot specify 'sampleId' since it is an internal field", + !request.getSampleId() || isInternalClient || + TestingProctor::instance().isEnabled()); return request.getSampleId(); } if (serverGlobalParams.clusterRole.has(ClusterRole::None)) { uassert(ErrorCodes::InvalidOptions, - "Cannot specify 'sampleRate' since it is an internal field", + "Cannot specify 'sampleId' since it is an internal field", !request.getSampleId()); return QueryAnalysisSampler::get(opCtx).tryGenerateSampleId(opCtx, nss, cmdName); } diff --git a/src/mongo/s/query_analysis_server_status.cpp b/src/mongo/s/query_analysis_server_status.cpp index 13a04ea7c8e32..28a53839ed404 100644 --- a/src/mongo/s/query_analysis_server_status.cpp +++ b/src/mongo/s/query_analysis_server_status.cpp @@ -27,9 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/s/analyze_shard_key_role.h" #include "mongo/s/query_analysis_sample_tracker.h" diff --git a/src/mongo/s/read_write_concern_defaults_cache_lookup_mongos.cpp b/src/mongo/s/read_write_concern_defaults_cache_lookup_mongos.cpp index 6fcf446537698..e1596c196fce1 100644 --- a/src/mongo/s/read_write_concern_defaults_cache_lookup_mongos.cpp +++ b/src/mongo/s/read_write_concern_defaults_cache_lookup_mongos.cpp @@ -27,12 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/s/read_write_concern_defaults_cache_lookup_mongos.h" +#include +#include "mongo/client/read_preference.h" #include "mongo/db/commands/rwc_defaults_commands_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" +#include "mongo/s/read_write_concern_defaults_cache_lookup_mongos.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/s/read_write_concern_defaults_cache_lookup_mongos.h b/src/mongo/s/read_write_concern_defaults_cache_lookup_mongos.h index d99b076e6f7ee..d3b9b64b29dba 100644 --- a/src/mongo/s/read_write_concern_defaults_cache_lookup_mongos.h +++ b/src/mongo/s/read_write_concern_defaults_cache_lookup_mongos.h @@ -29,7 +29,11 @@ #pragma once +#include + +#include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults.h" +#include "mongo/db/read_write_concern_defaults_gen.h" namespace mongo { diff --git a/src/mongo/s/request_types/abort_reshard_collection.idl b/src/mongo/s/request_types/abort_reshard_collection.idl index 99e7a3277c155..d48b89ebd05bc 100644 --- a/src/mongo/s/request_types/abort_reshard_collection.idl +++ b/src/mongo/s/request_types/abort_reshard_collection.idl @@ -60,6 +60,7 @@ commands: namespace: type api_version: "" type: uuid + reply_type: OkReply fields: userCanceled: type: bool diff --git a/src/mongo/s/request_types/add_shard_request_test.cpp b/src/mongo/s/request_types/add_shard_request_test.cpp index 58b1a1f497465..112fcad0e09d9 100644 --- a/src/mongo/s/request_types/add_shard_request_test.cpp +++ b/src/mongo/s/request_types/add_shard_request_test.cpp @@ -27,11 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" #include "mongo/s/request_types/add_shard_request_type.h" - -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/s/request_types/add_shard_request_type.cpp b/src/mongo/s/request_types/add_shard_request_type.cpp index 23c918f9883e7..00911fa1daeee 100644 --- a/src/mongo/s/request_types/add_shard_request_type.cpp +++ b/src/mongo/s/request_types/add_shard_request_type.cpp @@ -27,12 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/request_types/add_shard_request_type.h" - +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/server_options.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/s/request_types/add_shard_request_type.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/str.h" namespace mongo { @@ -40,9 +53,6 @@ namespace mongo { using std::string; using str::stream; -class BSONObj; -template -class StatusWith; const BSONField AddShardRequest::mongosAddShard("addShard"); const BSONField AddShardRequest::mongosAddShardDeprecated("addshard"); diff --git a/src/mongo/s/request_types/add_shard_request_type.h b/src/mongo/s/request_types/add_shard_request_type.h index a4491dc8f5213..f98d7f300a43f 100644 --- a/src/mongo/s/request_types/add_shard_request_type.h +++ b/src/mongo/s/request_types/add_shard_request_type.h @@ -29,11 +29,19 @@ #pragma once +#include #include +#include +#include +#include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/connection_string.h" #include "mongo/db/operation_context.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp b/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp index 7a9b2b8141e47..74e2c6037ad4c 100644 --- a/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp +++ b/src/mongo/s/request_types/add_shard_to_zone_request_test.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/s/request_types/add_shard_to_zone_request_type.h" - -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/s/request_types/add_shard_to_zone_request_type.cpp b/src/mongo/s/request_types/add_shard_to_zone_request_type.cpp index 4e96143085e8c..eeeb813c5df1e 100644 --- a/src/mongo/s/request_types/add_shard_to_zone_request_type.cpp +++ b/src/mongo/s/request_types/add_shard_to_zone_request_type.cpp @@ -27,13 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/s/request_types/add_shard_to_zone_request_type.h" +#include -#include "mongo/bson/bson_field.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/write_concern_options.h" +#include "mongo/s/request_types/add_shard_to_zone_request_type.h" namespace mongo { diff --git a/src/mongo/s/request_types/add_shard_to_zone_request_type.h b/src/mongo/s/request_types/add_shard_to_zone_request_type.h index 2fabeddb00ab4..15fe1abddfe1a 100644 --- a/src/mongo/s/request_types/add_shard_to_zone_request_type.h +++ b/src/mongo/s/request_types/add_shard_to_zone_request_type.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/base/error_extra_info.h" #include "mongo/base/status_with.h" namespace mongo { diff --git a/src/mongo/s/request_types/cluster_commands_without_shard_key.idl b/src/mongo/s/request_types/cluster_commands_without_shard_key.idl index 9ec89c04de862..d7c33b13beba4 100644 --- a/src/mongo/s/request_types/cluster_commands_without_shard_key.idl +++ b/src/mongo/s/request_types/cluster_commands_without_shard_key.idl @@ -33,19 +33,26 @@ imports: - "mongo/db/basic_types.idl" - "mongo/s/sharding_types.idl" structs: - clusterQueryWithoutShardKeyResponse: + clusterQueryWithoutShardKeyResponse: description: "The response for the '_clusterQueryWithoutShardKeyFind' command." is_command_reply: true strict: false fields: targetDoc: description: >- - "Contains _id or the constructed upsert document if upsert:true and no matches were + "Contains _id or the constructed upsert document if upsert:true and no matches were found." type: object optional: true stability: internal - shardId: + userUpsertDocForTimeseries: + description: >- + "Contains the user-level document if we upsert the document into a timeseries + collection." + type: object + optional: true + stability: internal + shardId: description: "The shard id of the target shard." type: string optional: true @@ -55,7 +62,7 @@ structs: type: bool default: false stability: internal - clusterWriteWithoutShardKeyResponse: + clusterWriteWithoutShardKeyResponse: description: "The response for the '_clusterWriteWithoutShardKeyFind' command." is_command_reply: true strict: false @@ -72,18 +79,18 @@ commands: _clusterQueryWithoutShardKey: command_name: _clusterQueryWithoutShardKey cpp_name: ClusterQueryWithoutShardKey - description: >- + description: >- "An internal command used to broadcast a query to all shards for updates/deletes /findAndModifies that do not provide a shard key." namespace: ignored - api_version: "1" # This is an internal command that is run as a sub-operation to an - # update/delete/findAndModify without shard key. In the current implementation - # of the Stable API, sub-operations run under a command in the Stable API where - # a client specifies {apiStrict: true} are expected to also be Stable API - # compliant, when they technically should not be. To satisfy this requirement, - # this command is marked as part of the Stable API, but is not truly a part of + api_version: "1" # This is an internal command that is run as a sub-operation to an + # update/delete/findAndModify without shard key. In the current implementation + # of the Stable API, sub-operations run under a command in the Stable API where + # a client specifies {apiStrict: true} are expected to also be Stable API + # compliant, when they technically should not be. To satisfy this requirement, + # this command is marked as part of the Stable API, but is not truly a part of # it, since it is an internal-only command. - access_check: + access_check: none: true reply_type: clusterQueryWithoutShardKeyResponse fields: @@ -94,16 +101,16 @@ commands: _clusterWriteWithoutShardKey: command_name: _clusterWriteWithoutShardKey cpp_name: ClusterWriteWithoutShardKey - description: >- - "An internal command used to target an update/delete/findAndModify command to a + description: >- + "An internal command used to target an update/delete/findAndModify command to a specific shard using a shardId." namespace: ignored - api_version: "1" # This is an internal command that is run as a sub-operation to an - # update/delete/findAndModify without shard key. In the current implementation - # of the Stable API, sub-operations run under a command in the Stable API where - # a client specifies {apiStrict: true} are expected to also be Stable API - # compliant, when they technically should not be. To satisfy this requirement, - # this command is marked as part of the Stable API, but is not truly a part of + api_version: "1" # This is an internal command that is run as a sub-operation to an + # update/delete/findAndModify without shard key. In the current implementation + # of the Stable API, sub-operations run under a command in the Stable API where + # a client specifies {apiStrict: true} are expected to also be Stable API + # compliant, when they technically should not be. To satisfy this requirement, + # this command is marked as part of the Stable API, but is not truly a part of # it, since it is an internal-only command. access_check: none: true diff --git a/src/mongo/s/request_types/commit_reshard_collection.idl b/src/mongo/s/request_types/commit_reshard_collection.idl index ce8e9e98ca999..ba5f511728989 100644 --- a/src/mongo/s/request_types/commit_reshard_collection.idl +++ b/src/mongo/s/request_types/commit_reshard_collection.idl @@ -60,6 +60,7 @@ commands: namespace: type api_version: "" type: namespacestring + reply_type: OkReply fields: reshardingUUID: type: uuid diff --git a/src/mongo/s/request_types/drop_collection_if_uuid_not_matching.idl b/src/mongo/s/request_types/drop_collection_if_uuid_not_matching.idl index e298282a6ab1e..e4bebcf34b4c3 100644 --- a/src/mongo/s/request_types/drop_collection_if_uuid_not_matching.idl +++ b/src/mongo/s/request_types/drop_collection_if_uuid_not_matching.idl @@ -26,9 +26,7 @@ # it in the license file. # -# This IDL file describes the BSON format for _shardsvrDropCollectionIfUUIDNotMatching and -# _shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern command. -# TODO SERVER-74324: deprecate _shardsvrDropCollectionIfUUIDNotMatching after 7.0 is lastLTS. +# This IDL file describes the BSON format for _shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern command. global: cpp_namespace: "mongo" @@ -38,19 +36,6 @@ imports: commands: - _shardsvrDropCollectionIfUUIDNotMatching: - command_name: _shardsvrDropCollectionIfUUIDNotMatching - cpp_name: ShardsvrDropCollectionIfUUIDNotMatchingRequest - description: "Internal dropCollectionIfUUIDNotMatching request." - strict: false - namespace: concatenate_with_db - api_version: "" - fields: - expectedCollectionUUID: - type: uuid - description: "The expected collection UUID: if the local catalog has a different uuid - associated to the namespace, the collection will be dropped." - _shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern: command_name: _shardsvrDropCollectionIfUUIDNotMatchingWithWriteConcern cpp_name: ShardsvrDropCollectionIfUUIDNotMatchingWithWriteConcernRequest @@ -63,3 +48,4 @@ commands: type: uuid description: "The expected collection UUID: if the local catalog has a different uuid associated to the namespace, the collection will be dropped." + reply_type: OkReply diff --git a/src/mongo/s/request_types/flush_database_cache_updates.idl b/src/mongo/s/request_types/flush_database_cache_updates.idl index ae4bfc63529d9..1773275ab27fd 100644 --- a/src/mongo/s/request_types/flush_database_cache_updates.idl +++ b/src/mongo/s/request_types/flush_database_cache_updates.idl @@ -62,3 +62,4 @@ commands: type: bool description: "If true, forces a refresh of the cache entry from the config server before waiting for updates to be persist" default: true + reply_type: OkReply diff --git a/src/mongo/s/request_types/flush_resharding_state_change.idl b/src/mongo/s/request_types/flush_resharding_state_change.idl index 7a56c05d0c64b..c56d624a442b4 100644 --- a/src/mongo/s/request_types/flush_resharding_state_change.idl +++ b/src/mongo/s/request_types/flush_resharding_state_change.idl @@ -42,6 +42,7 @@ commands: namespace: type api_version: "" type: namespacestring + reply_type: OkReply fields: reshardingUUID: type: uuid diff --git a/src/mongo/s/request_types/flush_routing_table_cache_updates.idl b/src/mongo/s/request_types/flush_routing_table_cache_updates.idl index 134a197ae4ee7..55ed4a76e42da 100644 --- a/src/mongo/s/request_types/flush_routing_table_cache_updates.idl +++ b/src/mongo/s/request_types/flush_routing_table_cache_updates.idl @@ -59,6 +59,7 @@ commands: namespace: type api_version: "" type: namespacestring + reply_type: OkReply fields: syncFromConfig: type: bool diff --git a/src/mongo/s/request_types/get_historical_placement_info.idl b/src/mongo/s/request_types/get_historical_placement_info.idl deleted file mode 100644 index c2e6026e93e0e..0000000000000 --- a/src/mongo/s/request_types/get_historical_placement_info.idl +++ /dev/null @@ -1,65 +0,0 @@ -# Copyright (C) 2023-present MongoDB, Inc. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the Server Side Public License, version 1, -# as published by MongoDB, Inc. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Server Side Public License for more details. -# -# You should have received a copy of the Server Side Public License -# along with this program. If not, see -# . -# -# As a special exception, the copyright holders give permission to link the -# code of portions of this program with the OpenSSL library under certain -# conditions as described in each individual source file and distribute -# linked combinations including the program with the OpenSSL library. You -# must comply with the Server Side Public License in all respects for -# all of the code used other than as permitted herein. If you modify file(s) -# with this exception, you may extend this exception to your version of the -# file(s), but you are not obligated to do so. If you do not wish to do so, -# delete this exception statement from your version. If you delete this -# exception statement from all source files in the program, then also delete -# it in the license file. - -# TODO (SERVER-73029): remove this file and the class implementing _configsvrGetHistoricalPlacement - -global: - cpp_namespace: "mongo" - -imports: - - "mongo/db/basic_types.idl" - - "mongo/s/sharding_types.idl" - - "mongo/s/catalog/type_namespace_placement.idl" - -structs: - ConfigsvrGetHistoricalPlacementResponse: - description: "Response for the _configsvrGetHistoricalPlacement command" - strict: false - is_command_reply: true - fields: - historicalPlacement: HistoricalPlacement -commands: - - _configsvrGetHistoricalPlacement: - command_name: _configsvrGetHistoricalPlacement - cpp_name: ConfigsvrGetHistoricalPlacement - description: "Internal command to retrieve the list of shard IDs hosting data for the - namespace/cluster being targeted at a specific point in time" - namespace: type - api_version: "" - type: namespacestring - strict: false - fields: - at: - type: timestamp - description: "The requested point in time" - optional: false - targetWholeCluster: - type: bool - description: "When true, the command retrieves placement information concerning - the whole cluster (ignoring the namespace parameter)" - default: false diff --git a/src/mongo/s/request_types/merge_chunks_request_test.cpp b/src/mongo/s/request_types/merge_chunks_request_test.cpp index 368fd33d127a4..fffe1a300020f 100644 --- a/src/mongo/s/request_types/merge_chunks_request_test.cpp +++ b/src/mongo/s/request_types/merge_chunks_request_test.cpp @@ -27,12 +27,23 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/shard_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/request_types/merge_chunk_request_gen.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { diff --git a/src/mongo/s/request_types/migration_secondary_throttle_options.cpp b/src/mongo/s/request_types/migration_secondary_throttle_options.cpp index 931cb63ad4397..e136c4c366991 100644 --- a/src/mongo/s/request_types/migration_secondary_throttle_options.cpp +++ b/src/mongo/s/request_types/migration_secondary_throttle_options.cpp @@ -27,13 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include -#include "mongo/s/request_types/migration_secondary_throttle_options.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/db/write_concern_options.h" +#include "mongo/s/request_types/migration_secondary_throttle_options.h" +#include "mongo/stdx/variant.h" namespace mongo { namespace { diff --git a/src/mongo/s/request_types/migration_secondary_throttle_options.h b/src/mongo/s/request_types/migration_secondary_throttle_options.h index 943598567c2bf..c5c56d99c3afa 100644 --- a/src/mongo/s/request_types/migration_secondary_throttle_options.h +++ b/src/mongo/s/request_types/migration_secondary_throttle_options.h @@ -29,9 +29,13 @@ #pragma once +#include #include +#include +#include "mongo/base/status_with.h" #include "mongo/bson/bsonobj.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp b/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp index 4d42b3cc8d58a..3bd401c329874 100644 --- a/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp +++ b/src/mongo/s/request_types/migration_secondary_throttle_options_test.cpp @@ -27,12 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/write_concern_options.h" #include "mongo/s/request_types/migration_secondary_throttle_options.h" -#include "mongo/unittest/unittest.h" +#include "mongo/stdx/variant.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/s/request_types/move_primary.idl b/src/mongo/s/request_types/move_primary.idl index e21c8a0e5f6cf..ef4052c633e2a 100644 --- a/src/mongo/s/request_types/move_primary.idl +++ b/src/mongo/s/request_types/move_primary.idl @@ -36,35 +36,40 @@ imports: - "mongo/s/sharding_types.idl" structs: - movePrimary: - description: "The public movePrimary command on mongos" + MovePrimaryRequestBase: + description: The movePrimary request fields shared by several commands strict: false fields: - movePrimary: - type: namespacestring - description: "The namespace of the database whose primary shard is to be reassigned." - optional: true - moveprimary: - type: namespacestring - description: "The deprecated version of this command's name." - optional: true to: - type: string - description: "The shard serving as the destination for un-sharded collections." + type: shard_id + description: Shard serving as the destination for un-sharded collections + validator: + callback: "ShardId::validate" + +commands: + movePrimary: + command_name: movePrimary + command_alias: moveprimary + description: The public movePrimary command on mongos + cpp_name: MovePrimary + namespace: type + type: database_name + api_version: + strict: false + chained_structs: + MovePrimaryRequestBase: MovePrimaryRequestBase - ShardMovePrimary: - description: "The internal movePrimary command on a primary shard" - generate_comparison_operators: true + _shardsvrMovePrimary: + command_name: _shardsvrMovePrimary + description: The internal movePrimary command on a primary shard + cpp_name: ShardsvrMovePrimary + namespace: type + type: database_name + api_version: strict: false - fields: - _shardsvrMovePrimary: - type: namespacestring - description: "The namespace of the database whose primary shard is to be reassigned." - to: - type: string - description: "The shard serving as the destination for un-sharded collections." + chained_structs: + MovePrimaryRequestBase: MovePrimaryRequestBase -commands: _shardsvrMovePrimaryEnterCriticalSection: command_name: _shardsvrMovePrimaryEnterCriticalSection description: Block CRUD operations on the given database by entering the critical section. diff --git a/src/mongo/s/request_types/placement_history_commands.idl b/src/mongo/s/request_types/placement_history_commands.idl new file mode 100644 index 0000000000000..b91822925df19 --- /dev/null +++ b/src/mongo/s/request_types/placement_history_commands.idl @@ -0,0 +1,82 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. + +global: + cpp_namespace: "mongo" + +imports: + - "mongo/db/basic_types.idl" + - "mongo/s/sharding_types.idl" + - "mongo/s/catalog/type_namespace_placement.idl" + +structs: + ConfigsvrGetHistoricalPlacementResponse: + description: "Response for the _configsvrGetHistoricalPlacement command" + strict: false + is_command_reply: true + fields: + historicalPlacement: HistoricalPlacement +commands: + + clusterResetPlacementHistory: + command_name: resetPlacementHistory + cpp_name: ClusterResetPlacementHistory + description: "Admin command to reinitialize the content of config.placementHistory + based on the current state of the Sharding catalog." + namespace: ignored + api_version: "" + strict: false + + _configsvrResetPlacementHistory: + command_name: _configsvrResetPlacementHistory + cpp_name: ConfigsvrResetPlacementHistory + description: "Definition of the resetPlacementHistory command called + from routers on the config server." + namespace: ignored + api_version: "" + strict: false + + # TODO (SERVER-73029): remove the _configsvrGetHistoricalPlacement and the code supporting it. + _configsvrGetHistoricalPlacement: + command_name: _configsvrGetHistoricalPlacement + cpp_name: ConfigsvrGetHistoricalPlacement + description: "Internal command to retrieve the list of shard IDs hosting data for the + namespace/cluster being targeted at a specific point in time" + namespace: type + api_version: "" + type: namespacestring + strict: false + fields: + at: + type: timestamp + description: "The requested point in time" + optional: false + targetWholeCluster: + type: bool + description: "When true, the command retrieves placement information concerning + the whole cluster (ignoring the namespace parameter)" + default: false diff --git a/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp b/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp index 67981bd7f67d8..13dbd3b3c44a3 100644 --- a/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp +++ b/src/mongo/s/request_types/remove_shard_from_zone_request_test.cpp @@ -27,12 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/s/request_types/remove_shard_from_zone_request_type.h" - -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/s/request_types/remove_shard_from_zone_request_type.cpp b/src/mongo/s/request_types/remove_shard_from_zone_request_type.cpp index 45c4d16afdfab..c6a83e048a373 100644 --- a/src/mongo/s/request_types/remove_shard_from_zone_request_type.cpp +++ b/src/mongo/s/request_types/remove_shard_from_zone_request_type.cpp @@ -27,13 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/s/request_types/remove_shard_from_zone_request_type.h" +#include -#include "mongo/bson/bson_field.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/write_concern_options.h" +#include "mongo/s/request_types/remove_shard_from_zone_request_type.h" namespace mongo { diff --git a/src/mongo/s/request_types/remove_shard_from_zone_request_type.h b/src/mongo/s/request_types/remove_shard_from_zone_request_type.h index c489f95b335f4..23377de7ad442 100644 --- a/src/mongo/s/request_types/remove_shard_from_zone_request_type.h +++ b/src/mongo/s/request_types/remove_shard_from_zone_request_type.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/base/error_extra_info.h" #include "mongo/base/status_with.h" namespace mongo { diff --git a/src/mongo/s/request_types/reset_placement_history.idl b/src/mongo/s/request_types/reset_placement_history.idl new file mode 100644 index 0000000000000..c05b22e7f8321 --- /dev/null +++ b/src/mongo/s/request_types/reset_placement_history.idl @@ -0,0 +1,52 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +global: + cpp_namespace: "mongo" + +imports: + - "mongo/db/basic_types.idl" + +commands: + clusterResetPlacementHistory: + command_name: resetPlacementHistory + cpp_name: ClusterResetPlacementHistory + description: "Admin command to reinitialize the content of config.placementHistory + based on the current state of the Sharding catalog." + namespace: ignored + api_version: "" + strict: false + + _configsvrResetPlacementHistory: + command_name: _configsvrResetPlacementHistory + cpp_name: ConfigsvrResetPlacementHistory + description: "Definition of the resetPlacementHistory command called + from routers on the config server." + namespace: ignored + api_version: "" + strict: false diff --git a/src/mongo/s/request_types/reshard_collection.idl b/src/mongo/s/request_types/reshard_collection.idl index 63b9544a91e3f..1dcf20e037f74 100644 --- a/src/mongo/s/request_types/reshard_collection.idl +++ b/src/mongo/s/request_types/reshard_collection.idl @@ -72,6 +72,20 @@ commands: type: uuid description: "The expected UUID of the collection." optional: true + shardDistribution: + type: array + description: "The key ranges for the new shard key. This should be continuous and complete." + optional: true + forceRedistribution: + type: bool + description: "Whether initiate reshardCollection if the shardKey doesn't change." + optional: true + reshardingUUID: + type: uuid + description: >- + "A user-provided identifier used to identify this resharding operation for + retryability purposes." + optional: true _configsvrReshardCollection: command_name: _configsvrReshardCollection @@ -108,3 +122,17 @@ commands: "Mapping of chunk ranges to be used as the initial split output. This is only for testing purposes." optional: true + shardDistribution: + type: array + description: "The key ranges for the new shard key. This should be continuous and complete." + optional: true + forceRedistribution: + type: bool + description: "Whether initiate reshardCollection if the shardKey doesn't change." + optional: true + reshardingUUID: + type: uuid + description: >- + "A user-provided identifier used to identify this resharding operation for + retryability purposes." + optional: true diff --git a/src/mongo/s/request_types/sharded_ddl_commands.idl b/src/mongo/s/request_types/sharded_ddl_commands.idl index bc0b3f9e9c058..61d184fc60829 100644 --- a/src/mongo/s/request_types/sharded_ddl_commands.idl +++ b/src/mongo/s/request_types/sharded_ddl_commands.idl @@ -232,6 +232,20 @@ structs: type: uuid description: "The expected UUID of the collection." optional: true + shardDistribution: + type: array + description: "The key ranges for the new shard key. This should be continuous and complete." + optional: true + forceRedistribution: + type: bool + description: "Whether initiate reshardCollection if the shardKey doesn't change." + optional: true + reshardingUUID: + type: uuid + description: >- + "A user-provided identifier used to identify this resharding operation for + retryability purposes." + optional: true SetAllowMigrationsRequest: description: "Parameters sent for the set allow migrations command" @@ -310,6 +324,7 @@ commands: api_version: "" cpp_name: ShardsvrDropDatabaseParticipant strict: false + reply_type: OkReply _shardsvrDropCollection: description: "Parser for the _shardsvrDropCollection command" @@ -336,6 +351,7 @@ commands: type: bool description: "Whether the drop comes as a result of an interrupted migration process." optional: true + reply_type: OkReply _shardsvrRenameCollection: command_name: _shardsvrRenameCollection @@ -352,6 +368,7 @@ commands: C2C needs to do the renames to replicate create collection." type: bool optional: true + reply_type: OkReply _shardsvrSetAllowMigrations: command_name: _shardsvrSetAllowMigrations @@ -363,22 +380,6 @@ commands: chained_structs: SetAllowMigrationsRequest: SetAllowMigrationsRequest - _configsvrRenameCollectionMetadata: - command_name: _configsvrRenameCollectionMetadata - cpp_name: ConfigsvrRenameCollectionMetadata - description: "Internal command for renaming collection metadata on the CSRS" - strict: false - namespace: concatenate_with_db - api_version: "" - fields: - to: - type: namespacestring - description: "The new namespace for the collection being renamed." - optFromCollection: - description: "Information of the source collection to rename, used only for sharded collection." - type: CollectionType - optional: true - _shardsvrReshardCollection: command_name: _shardsvrReshardCollection cpp_name: ShardsvrReshardCollection diff --git a/src/mongo/s/request_types/transition_from_dedicated_config_server.idl b/src/mongo/s/request_types/transition_from_dedicated_config_server.idl new file mode 100644 index 0000000000000..3af24a672e461 --- /dev/null +++ b/src/mongo/s/request_types/transition_from_dedicated_config_server.idl @@ -0,0 +1,51 @@ +# Copyright (C) 2023-present MongoDB, Inc. +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the Server Side Public License, version 1, +# as published by MongoDB, Inc. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# Server Side Public License for more details. +# +# You should have received a copy of the Server Side Public License +# along with this program. If not, see +# . +# +# As a special exception, the copyright holders give permission to link the +# code of portions of this program with the OpenSSL library under certain +# conditions as described in each individual source file and distribute +# linked combinations including the program with the OpenSSL library. You +# must comply with the Server Side Public License in all respects for +# all of the code used other than as permitted herein. If you modify file(s) +# with this exception, you may extend this exception to your version of the +# file(s), but you are not obligated to do so. If you do not wish to do so, +# delete this exception statement from your version. If you delete this +# exception statement from all source files in the program, then also delete +# it in the license file. +# + +# transitionFromDedicatedConfigServer IDL file + +global: + cpp_namespace: "mongo" + +imports: + - "mongo/db/basic_types.idl" + +commands: + transitionFromDedicatedConfigServer: + description: "The public transitionFromDedicatedConfigServer command on mongos." + command_name: transitionFromDedicatedConfigServer + strict: true + namespace: ignored + api_version: "" + + _configsvrTransitionFromDedicatedConfigServer: + description: "The transitionFromDedicatedConfigServer command for config server." + command_name: _configsvrTransitionFromDedicatedConfigServer + cpp_name: ConfigsvrTransitionFromDedicatedConfigServer + strict: false + namespace: ignored + api_version: "" diff --git a/src/mongo/s/request_types/transition_to_catalog_shard.idl b/src/mongo/s/request_types/transition_to_catalog_shard.idl deleted file mode 100644 index e2f4258816ee9..0000000000000 --- a/src/mongo/s/request_types/transition_to_catalog_shard.idl +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright (C) 2023-present MongoDB, Inc. -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the Server Side Public License, version 1, -# as published by MongoDB, Inc. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# Server Side Public License for more details. -# -# You should have received a copy of the Server Side Public License -# along with this program. If not, see -# . -# -# As a special exception, the copyright holders give permission to link the -# code of portions of this program with the OpenSSL library under certain -# conditions as described in each individual source file and distribute -# linked combinations including the program with the OpenSSL library. You -# must comply with the Server Side Public License in all respects for -# all of the code used other than as permitted herein. If you modify file(s) -# with this exception, you may extend this exception to your version of the -# file(s), but you are not obligated to do so. If you do not wish to do so, -# delete this exception statement from your version. If you delete this -# exception statement from all source files in the program, then also delete -# it in the license file. -# - -# transitionToCatalogShard IDL file - -global: - cpp_namespace: "mongo" - -imports: - - "mongo/db/basic_types.idl" - -commands: - transitionToCatalogShard: - description: "The public transitionToCatalogShard command on mongos." - command_name: transitionToCatalogShard - strict: true - namespace: ignored - api_version: "" - - _configsvrTransitionToCatalogShard: - description: "The transitionToCatalogShard command for config server." - command_name: _configsvrTransitionToCatalogShard - cpp_name: ConfigsvrTransitionToCatalogShard - strict: false - namespace: ignored - api_version: "" diff --git a/src/mongo/s/request_types/update_zone_key_range_request_test.cpp b/src/mongo/s/request_types/update_zone_key_range_request_test.cpp index 95ef2c8984f4a..b164674831b76 100644 --- a/src/mongo/s/request_types/update_zone_key_range_request_test.cpp +++ b/src/mongo/s/request_types/update_zone_key_range_request_test.cpp @@ -27,13 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/request_types/update_zone_key_range_request_type.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/json.h" -#include "mongo/db/jsobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/db/namespace_string.h" +#include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/request_types/update_zone_key_range_request_type.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { @@ -49,7 +56,7 @@ TEST(UpdateZoneKeyRangeRequest, BasicValidMongosAssignCommand) { ASSERT_OK(requestStatus.getStatus()); auto request = requestStatus.getValue(); - ASSERT_EQ("foo.bar", request.getNS().ns()); + ASSERT_EQ("foo.bar", request.getNS().ns_forTest()); ASSERT_BSONOBJ_EQ(BSON("x" << 1), request.getRange().getMin()); ASSERT_BSONOBJ_EQ(BSON("x" << 100), request.getRange().getMax()); ASSERT_FALSE(request.isRemove()); @@ -66,7 +73,7 @@ TEST(UpdateZoneKeyRangeRequest, BasicValidMongosRemoveCommand) { ASSERT_OK(requestStatus.getStatus()); auto request = requestStatus.getValue(); - ASSERT_EQ("foo.bar", request.getNS().ns()); + ASSERT_EQ("foo.bar", request.getNS().ns_forTest()); ASSERT_BSONOBJ_EQ(BSON("x" << 1), request.getRange().getMin()); ASSERT_BSONOBJ_EQ(BSON("x" << 100), request.getRange().getMax()); ASSERT_TRUE(request.isRemove()); @@ -227,7 +234,7 @@ TEST(CfgAssignKeyRangeToZoneRequest, BasicValidMongosAssignCommand) { ASSERT_OK(requestStatus.getStatus()); auto request = requestStatus.getValue(); - ASSERT_EQ("foo.bar", request.getNS().ns()); + ASSERT_EQ("foo.bar", request.getNS().ns_forTest()); ASSERT_BSONOBJ_EQ(BSON("x" << 1), request.getRange().getMin()); ASSERT_BSONOBJ_EQ(BSON("x" << 100), request.getRange().getMax()); ASSERT_FALSE(request.isRemove()); @@ -244,7 +251,7 @@ TEST(CfgAssignKeyRangeToZoneRequest, BasicValidMongosRemoveCommand) { ASSERT_OK(requestStatus.getStatus()); auto request = requestStatus.getValue(); - ASSERT_EQ("foo.bar", request.getNS().ns()); + ASSERT_EQ("foo.bar", request.getNS().ns_forTest()); ASSERT_BSONOBJ_EQ(BSON("x" << 1), request.getRange().getMin()); ASSERT_BSONOBJ_EQ(BSON("x" << 100), request.getRange().getMax()); ASSERT_TRUE(request.isRemove()); diff --git a/src/mongo/s/request_types/update_zone_key_range_request_type.cpp b/src/mongo/s/request_types/update_zone_key_range_request_type.cpp index cfbce85948391..a48141f3632b0 100644 --- a/src/mongo/s/request_types/update_zone_key_range_request_type.cpp +++ b/src/mongo/s/request_types/update_zone_key_range_request_type.cpp @@ -27,13 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/s/request_types/update_zone_key_range_request_type.h" +#include +#include -#include "mongo/bson/bson_field.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bson_extract.h" -#include "mongo/db/write_concern_options.h" +#include "mongo/s/request_types/update_zone_key_range_request_type.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" namespace mongo { @@ -58,7 +66,7 @@ StatusWith UpdateZoneKeyRangeRequest::parseFromConfig } void UpdateZoneKeyRangeRequest::appendAsConfigCommand(BSONObjBuilder* cmdBuilder) { - cmdBuilder->append(kConfigsvrUpdateZoneKeyRange, _ns.ns()); + cmdBuilder->append(kConfigsvrUpdateZoneKeyRange, NamespaceStringUtil::serialize(_ns)); _range.append(cmdBuilder); if (_isRemove) { diff --git a/src/mongo/s/request_types/update_zone_key_range_request_type.h b/src/mongo/s/request_types/update_zone_key_range_request_type.h index db143941c5069..5509120dbcb55 100644 --- a/src/mongo/s/request_types/update_zone_key_range_request_type.h +++ b/src/mongo/s/request_types/update_zone_key_range_request_type.h @@ -29,7 +29,11 @@ #pragma once +#include + #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/jsobj.h" #include "mongo/db/namespace_string.h" #include "mongo/s/catalog/type_chunk.h" diff --git a/src/mongo/s/resharding/common_types.idl b/src/mongo/s/resharding/common_types.idl index b28385881d2d9..5009a445cc1be 100644 --- a/src/mongo/s/resharding/common_types.idl +++ b/src/mongo/s/resharding/common_types.idl @@ -49,6 +49,7 @@ enums: kBlockingWrites: "blocking-writes" kAborting: "aborting" kCommitting: "committing" + kQuiesced: "quiesced" kDone: "done" DonorState: @@ -72,6 +73,7 @@ enums: kAwaitingFetchTimestamp: "awaiting-fetch-timestamp" kCreatingCollection: "creating-collection" kCloning: "cloning" + kBuildingIndex: "building-index" kApplying: "applying" kError: "error" kStrictConsistency: "strict-consistency" @@ -120,6 +122,11 @@ structs: type: date description: "Operation start time." optional: true + userReshardingUUID: + type: uuid + optional: true + description: >- + The resharding UUID provided by the user, used to identify retries. AbortReason: description: "Not meant to be used directly. Only use internal fields." @@ -271,10 +278,27 @@ structs: description: "A struct representing the information needed for a resharding pipeline to determine which documents belong to a particular shard." strict: true + query_shape_component: true fields: recipientShardId: type: shard_id description: "The id of the recipient shard." + query_shape: anonymize reshardingKey: type: KeyPattern description: "The index specification document to use as the new shard key." + query_shape: custom + ShardKeyRange: + description: "A struct to represent the key ranges of shards." + fields: + min: + type: object_owned + description: "The min key of this shard key range." + optional: true + max: + type: object_owned + description: "The max key of this shard key range." + optional: true + shard: + type: shard_id + description: "Id of the shard for this shard key range." diff --git a/src/mongo/s/resharding/resharding_coordinator_service_conflicting_op_in_progress_info.cpp b/src/mongo/s/resharding/resharding_coordinator_service_conflicting_op_in_progress_info.cpp index 06ca0d9d6dcee..5b2c47f802a70 100644 --- a/src/mongo/s/resharding/resharding_coordinator_service_conflicting_op_in_progress_info.cpp +++ b/src/mongo/s/resharding/resharding_coordinator_service_conflicting_op_in_progress_info.cpp @@ -29,7 +29,9 @@ #include "mongo/s/resharding/resharding_coordinator_service_conflicting_op_in_progress_info.h" -#include "mongo/base/init.h" +#include + +#include "mongo/base/init.h" // IWYU pragma: keep namespace mongo { diff --git a/src/mongo/s/resharding/resharding_coordinator_service_conflicting_op_in_progress_info.h b/src/mongo/s/resharding/resharding_coordinator_service_conflicting_op_in_progress_info.h index 2342b48943f9e..b330047d8f1ba 100644 --- a/src/mongo/s/resharding/resharding_coordinator_service_conflicting_op_in_progress_info.h +++ b/src/mongo/s/resharding/resharding_coordinator_service_conflicting_op_in_progress_info.h @@ -29,7 +29,12 @@ #pragma once +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/repl/primary_only_service.h" namespace mongo { diff --git a/src/mongo/s/resharding/resharding_feature_flag.idl b/src/mongo/s/resharding/resharding_feature_flag.idl index afdad3d093ea1..3d8567f0ef581 100644 --- a/src/mongo/s/resharding/resharding_feature_flag.idl +++ b/src/mongo/s/resharding/resharding_feature_flag.idl @@ -35,8 +35,9 @@ imports: - "mongo/db/basic_types.idl" feature_flags: - featureFlagResharding: - description: When enabled, allows users to reshard their sharded collections. - cpp_varname: gFeatureFlagResharding - default: true - version: 5.0 + featureFlagReshardingImprovements: + description: "Feature flag for applying improvements on resharding to make it faster and + support same-key resharding." + cpp_varname: gFeatureFlagReshardingImprovements + default: false + shouldBeFCVGated: true diff --git a/src/mongo/s/router_role.cpp b/src/mongo/s/router_role.cpp index 618b5db67f949..c03853a6137ef 100644 --- a/src/mongo/s/router_role.cpp +++ b/src/mongo/s/router_role.cpp @@ -30,9 +30,22 @@ #include "mongo/s/router_role.h" +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" #include "mongo/s/grid.h" +#include "mongo/s/shard_version.h" #include "mongo/s/stale_exception.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -135,10 +148,10 @@ void CollectionRouter::_onException(RouteContext* context, Status s) { catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection( si->getNss(), si->getVersionWanted(), si->getShardId()); } else if (s == ErrorCodes::StaleEpoch) { - auto si = s.extraInfo(); - tassert(6375905, "StaleEpoch must have extra info", si); - catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection( - si->getNss(), si->getVersionWanted(), ShardId()); + if (auto si = s.extraInfo()) { + catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection( + si->getNss(), si->getVersionWanted(), ShardId()); + } } else { uassertStatusOK(s); } diff --git a/src/mongo/s/router_role.h b/src/mongo/s/router_role.h index 2004f377aa8ff..c58821fe0c4df 100644 --- a/src/mongo/s/router_role.h +++ b/src/mongo/s/router_role.h @@ -29,10 +29,19 @@ #pragma once +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/database_version.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace sharding { diff --git a/src/mongo/s/router_transactions_metrics.cpp b/src/mongo/s/router_transactions_metrics.cpp index b3cad2b1edc7a..fb3a82a905b23 100644 --- a/src/mongo/s/router_transactions_metrics.cpp +++ b/src/mongo/s/router_transactions_metrics.cpp @@ -27,14 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/s/router_transactions_metrics.h" +#include +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/s/router_transactions_metrics.h" #include "mongo/s/router_transactions_stats_gen.h" #include "mongo/s/transaction_router.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/s/router_transactions_metrics.h b/src/mongo/s/router_transactions_metrics.h index f7f6b1577355a..ce5c2f07fb976 100644 --- a/src/mongo/s/router_transactions_metrics.h +++ b/src/mongo/s/router_transactions_metrics.h @@ -29,10 +29,19 @@ #pragma once +#include +#include +#include + +#include + #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" #include "mongo/s/router_transactions_stats_gen.h" #include "mongo/s/transaction_router.h" +#include "mongo/util/duration.h" #include "mongo/util/hierarchical_acquisition.h" namespace mongo { diff --git a/src/mongo/s/router_transactions_server_status.cpp b/src/mongo/s/router_transactions_server_status.cpp index 8b81b0afbc0a5..2d4bff4561c0a 100644 --- a/src/mongo/s/router_transactions_server_status.cpp +++ b/src/mongo/s/router_transactions_server_status.cpp @@ -27,12 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/operation_context.h" #include "mongo/s/router_transactions_metrics.h" #include "mongo/s/router_transactions_stats_gen.h" -#include "mongo/s/transaction_router.h" namespace mongo { namespace { diff --git a/src/mongo/s/routing_table_history_test.cpp b/src/mongo/s/routing_table_history_test.cpp index a067f030c82d3..baa14b1abf3bf 100644 --- a/src/mongo/s/routing_table_history_test.cpp +++ b/src/mongo/s/routing_table_history_test.cpp @@ -27,19 +27,65 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/keypattern.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/service_context.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/shard_id.h" +#include "mongo/platform/random.h" #include "mongo/s/catalog/type_chunk.h" +#include "mongo/s/chunk.h" #include "mongo/s/chunk_manager.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/chunks_test_util.h" +#include "mongo/s/resharding/type_collection_fields_gen.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { + +using chunks_test_util::assertEqualChunkInfo; +using chunks_test_util::calculateCollVersion; +using chunks_test_util::calculateIntermediateShardKey; +using chunks_test_util::calculateShardsMaxValidAfter; +using chunks_test_util::calculateShardVersions; +using chunks_test_util::genChunkVector; +using chunks_test_util::genRandomChunkVector; +using chunks_test_util::getShardId; +using chunks_test_util::performRandomChunkOperations; + namespace { +PseudoRandom _random{SecureRandom().nextInt64()}; + const ShardId kThisShard("thisShard"); const NamespaceString kNss = NamespaceString::createNamespaceString_forTest("TestDB", "TestColl"); @@ -106,66 +152,69 @@ ChunkInfo* getChunkToSplit(const RoutingTableHistory& rt, const BSONObj& min, co return firstOverlappingChunk.get(); } -/** - * Test fixture for tests that need to start with a fresh routing table with - * only a single chunk in it, with bytes already written to that chunk object. - */ class RoutingTableHistoryTest : public unittest::Test { public: - void setUp() override { - const UUID uuid = UUID::gen(); - const OID epoch = OID::gen(); - const Timestamp timestamp(1); - ChunkVersion version({epoch, timestamp}, {1, 0}); - - auto initChunk = - ChunkType{uuid, - ChunkRange{_shardKeyPattern.globalMin(), _shardKeyPattern.globalMax()}, - version, - kThisShard}; - - _rt.emplace(RoutingTableHistory::makeNew(kNss, - uuid, - _shardKeyPattern, - nullptr, - false, - epoch, - timestamp, - boost::none /* timeseriesFields */, - boost::none /* reshardingFields */, - true, - {initChunk})); - ASSERT_EQ(_rt->numChunks(), 1ull); - } - const KeyPattern& getShardKeyPattern() const { return _shardKeyPattern; } - const RoutingTableHistory& getInitialRoutingTable() const { - return *_rt; + const OID& collEpoch() const { + return _epoch; } -private: - boost::optional _rt; + const Timestamp& collTimestamp() const { + return _collTimestamp; + } + + const UUID& collUUID() const { + return _collUUID; + } - KeyPattern _shardKeyPattern{BSON("a" << 1)}; + std::vector genRandomChunkVector(size_t minNumChunks = 1, + size_t maxNumChunks = 30) const { + return chunks_test_util::genRandomChunkVector( + _collUUID, _epoch, _collTimestamp, maxNumChunks, minNumChunks); + } + + RoutingTableHistory makeNewRt(const std::vector& chunks) const { + return RoutingTableHistory::makeNew(kNss, + _collUUID, + _shardKeyPattern, + nullptr, + false, + _epoch, + _collTimestamp, + boost::none /* timeseriesFields */, + boost::none /* reshardingFields */, + true, + chunks); + } + +protected: + KeyPattern _shardKeyPattern{chunks_test_util::kShardKeyPattern}; + const OID _epoch{OID::gen()}; + const Timestamp _collTimestamp{1, 1}; + const UUID _collUUID{UUID::gen()}; }; /** - * Test fixture for tests that need to start with three chunks in it, with the - * same number of bytes written to every chunk object. + * Test fixture for tests that need to start with three chunks in it */ class RoutingTableHistoryTestThreeInitialChunks : public RoutingTableHistoryTest { public: void setUp() override { RoutingTableHistoryTest::setUp(); + _initialChunkBoundaryPoints = {getShardKeyPattern().globalMin(), BSON("a" << 10), BSON("a" << 20), getShardKeyPattern().globalMax()}; - _rt.emplace(splitChunk(RoutingTableHistoryTest::getInitialRoutingTable(), - _initialChunkBoundaryPoints)); + ChunkVersion version{{collEpoch(), collTimestamp()}, {1, 0}}; + auto chunks = + genChunkVector(collUUID(), _initialChunkBoundaryPoints, version, 1 /* numShards */); + + _rt.emplace(makeNewRt(chunks)); + ASSERT_EQ(_rt->numChunks(), 3ull); } @@ -173,114 +222,497 @@ class RoutingTableHistoryTestThreeInitialChunks : public RoutingTableHistoryTest return *_rt; } + uint64_t getBytesInOriginalChunk() const { + return _bytesInOriginalChunk; + } + std::vector getInitialChunkBoundaryPoints() { return _initialChunkBoundaryPoints; } private: + uint64_t _bytesInOriginalChunk{4ull}; + boost::optional _rt; std::vector _initialChunkBoundaryPoints; }; +/* + * Test creation of a Routing Table with randomly generated chunks + */ +TEST_F(RoutingTableHistoryTest, RandomCreateBasic) { + const auto chunks = genRandomChunkVector(); + const auto expectedShardVersions = calculateShardVersions(chunks); + const auto expectedCollVersion = calculateCollVersion(expectedShardVersions); + + // Create a new routing table from the randomly generated chunks + auto rt = makeNewRt(chunks); + + // Checks basic getter of routing table return correct values + ASSERT_EQ(kNss, rt.nss()); + ASSERT_EQ(ShardKeyPattern(getShardKeyPattern()).toString(), rt.getShardKeyPattern().toString()); + ASSERT_EQ(chunks.size(), rt.numChunks()); + + // Check that chunks have correct info + size_t i = 0; + rt.forEachChunk([&](const auto& chunkInfo) { + assertEqualChunkInfo(ChunkInfo{chunks[i++]}, *chunkInfo); + return true; + }); + ASSERT_EQ(i, chunks.size()); + + // Checks collection version is correct + ASSERT_EQ(expectedCollVersion, rt.getVersion()); + + // Checks version for each chunk + for (const auto& [shardId, shardVersion] : expectedShardVersions) { + ASSERT_EQ(shardVersion, rt.getVersion(shardId)); + } + + ASSERT_EQ(expectedShardVersions.size(), rt.getNShardsOwningChunks()); + + std::set expectedShardIds; + for (const auto& [shardId, shardVersion] : expectedShardVersions) { + expectedShardIds.insert(shardId); + } + std::set shardIds; + rt.getAllShardIds(&shardIds); + ASSERT(expectedShardIds == shardIds); + + // Validate all shard maxValidAfter + const auto expectedShardsMaxValidAfter = calculateShardsMaxValidAfter(chunks); + const auto expectedMaxValidAfter = [&](const ShardId& shard) { + auto it = expectedShardsMaxValidAfter.find(shard); + return it != expectedShardsMaxValidAfter.end() ? it->second : Timestamp{0, 0}; + }; + for (const auto& [shardId, _] : expectedShardVersions) { + ASSERT_EQ(rt.getMaxValidAfter(shardId), expectedMaxValidAfter(shardId)); + } + ASSERT_EQ(rt.getMaxValidAfter(ShardId{"shard-without-chunks"}), (Timestamp{0, 0})); +} + +/* + * Test that creation of Routing Table with chunks that do not cover the entire shard key space + * fails. + * + * The gap is produced by removing a random chunks from the randomly generated chunk list. Thus it + * also cover the case for which min/max key is missing. + */ +TEST_F(RoutingTableHistoryTest, RandomCreateWithMissingChunkFail) { + auto chunks = genRandomChunkVector(2 /*minNumChunks*/); + + { + // Chunks gap are detected only if the gap is between chunks belonging to different shards, + // thus associate each chunk to a different shard. + // TODO SERVER-77090: stop forcing chunks on different shards + for (size_t i = 0; i < chunks.size(); i++) { + chunks[i].setShard(getShardId(i)); + chunks[i].setHistory({}); + } + } + + // Remove one random chunk to simulate a gap in the shardkey + chunks.erase(chunks.begin() + _random.nextInt64(chunks.size())); + + // Create a new routing table from the randomly generated chunks + ASSERT_THROWS_CODE(makeNewRt(chunks), DBException, ErrorCodes::ConflictingOperationInProgress); +} + +/* + * Test that creation of Routing Table with chunks that do not cover the entire shard key space + * fails. + * + * The gap is produced by shrinking the range of a random chunk. + */ +TEST_F(RoutingTableHistoryTest, RandomCreateWithChunkGapFail) { + auto chunks = genRandomChunkVector(2 /*minNumChunks*/); + + { + // Chunks gap are detected only if the gap is between chunks belonging to different shards, + // thus associate each chunk to a different shard. + // TODO SERVER-77090: stop forcing chunks on different shards + for (size_t i = 0; i < chunks.size(); i++) { + chunks[i].setShard(getShardId(i)); + chunks[i].setHistory({}); + } + } + + auto& shrinkedChunk = chunks.at(_random.nextInt64(chunks.size())); + auto intermediateKey = + calculateIntermediateShardKey(shrinkedChunk.getMin(), shrinkedChunk.getMax()); + if (_random.nextInt64(2)) { + // Shrink right bound + shrinkedChunk.setMax(intermediateKey); + } else { + // Shrink left bound + shrinkedChunk.setMin(intermediateKey); + } + + // Create a new routing table from the randomly generated chunks + ASSERT_THROWS_CODE(makeNewRt(chunks), DBException, ErrorCodes::ConflictingOperationInProgress); +} + +/* + * Updating ChunkMap with gaps must fail + */ +TEST_F(RoutingTableHistoryTest, RandomUpdateWithChunkGapFail) { + auto chunks = genRandomChunkVector(); + + { + // Chunks gap are detected only if the gap is between chunks belonging to different shards, + // thus associate each chunk to a different shard. + // TODO SERVER-77090: stop forcing chunks on different shards + for (size_t i = 0; i < chunks.size(); i++) { + chunks[i].setShard(getShardId(i)); + chunks[i].setHistory({}); + } + } + + // Create a new routing table from the randomly generated chunks + auto rt = makeNewRt(chunks); + auto collVersion = rt.getVersion(); + + auto shrinkedChunk = chunks.at(_random.nextInt64(chunks.size())); + auto intermediateKey = + calculateIntermediateShardKey(shrinkedChunk.getMin(), shrinkedChunk.getMax()); + if (_random.nextInt64(2)) { + // Shrink right bound + shrinkedChunk.setMax(intermediateKey); + } else { + // Shrink left bound + shrinkedChunk.setMin(intermediateKey); + } + + // Bump chunk version + collVersion.incMajor(); + shrinkedChunk.setVersion(collVersion); + + ASSERT_THROWS_CODE(rt.makeUpdated(boost::none /* timeseriesFields */, + boost::none /* reshardingFields */, + true, + {std::move(shrinkedChunk)}), + AssertionException, + ErrorCodes::ConflictingOperationInProgress); +} + +/* + * Creating a Routing Table with overlapping chunks must fail. + */ +TEST_F(RoutingTableHistoryTest, RandomCreateWithChunkOverlapFail) { + auto chunks = genRandomChunkVector(2 /* minNumChunks */); + + { + // Chunks overlaps are detected only if the overlap is between chunks belonging to different + // shards, thus associate each chunk to a different shard. + // TODO SERVER-77090: stop forcing chunks on different shards + for (size_t i = 0; i < chunks.size(); i++) { + chunks[i].setShard(getShardId(i)); + chunks[i].setHistory({}); + } + } + + auto chunkToExtendIt = chunks.begin() + _random.nextInt64(chunks.size()); + + // Current implementation does not fail if the overlap between two chunks is complete + // (e.g. [0, 5] and [0, 10]) + // thus we need to generate a semi overlap between two chunks + // (e.g. [0, 5] and [3, 10]) + // TODO SERVER-77090: extend check to cover for complete overlaps + + const auto canExtendLeft = chunkToExtendIt > chunks.begin(); + const auto extendRight = + !canExtendLeft || ((chunkToExtendIt < std::prev(chunks.end())) && _random.nextInt64(2)); + const auto extendLeft = !extendRight; + if (extendRight) { + // extend right bound + chunkToExtendIt->setMax(calculateIntermediateShardKey( + chunkToExtendIt->getMax(), std::next(chunkToExtendIt)->getMax())); + } + + if (extendLeft) { + invariant(canExtendLeft); + // extend left bound + chunkToExtendIt->setMin(calculateIntermediateShardKey(std::prev(chunkToExtendIt)->getMin(), + chunkToExtendIt->getMin())); + } + + // Create a new routing table from the randomly generated chunks + ASSERT_THROWS_CODE(makeNewRt(chunks), DBException, ErrorCodes::ConflictingOperationInProgress); +} + +/* + * Updating a ChunkMap with overlapping chunks must fail. + */ +TEST_F(RoutingTableHistoryTest, RandomUpdateWithChunkOverlapFail) { + auto chunks = genRandomChunkVector(2 /* minNumChunks */); + + { + // Chunks overlaps are detected only if the overlap is between chunks belonging to different + // shards, thus associate each chunk to a different shard. + // TODO SERVER-77090: stop forcing chunks on different shards + for (size_t i = 0; i < chunks.size(); i++) { + chunks[i].setShard(getShardId(i)); + chunks[i].setHistory({}); + } + } + // Create a new routing table from the randomly generated chunks + auto rt = makeNewRt(chunks); + auto collVersion = rt.getVersion(); + + auto chunkToExtendIt = chunks.begin() + _random.nextInt64(chunks.size()); + + // Current implementation does not fail if the overlap between two chunks is complete + // (e.g. [0, 5] and [0, 10]) + // thus we need to generate a semi overlap between two chunks + // (e.g. [0, 5] and [3, 10]) + // TODO SERVER-77090: extend check to cover for complete overlaps + + const auto canExtendLeft = chunkToExtendIt > chunks.begin(); + const auto extendRight = + !canExtendLeft || (chunkToExtendIt < std::prev(chunks.end()) && _random.nextInt64(2)); + const auto extendLeft = !extendRight; + if (extendRight) { + // extend right bound + chunkToExtendIt->setMax(calculateIntermediateShardKey( + chunkToExtendIt->getMax(), std::next(chunkToExtendIt)->getMax())); + } + + if (extendLeft) { + invariant(canExtendLeft); + // extend left bound + chunkToExtendIt->setMin(calculateIntermediateShardKey(std::prev(chunkToExtendIt)->getMin(), + chunkToExtendIt->getMin())); + } + + // Bump chunk version + collVersion.incMajor(); + chunkToExtendIt->setVersion(collVersion); + + ASSERT_THROWS_CODE(rt.makeUpdated(boost::none /* timeseriesFields */, + boost::none /* reshardingFields */, + true, + {*chunkToExtendIt}), + AssertionException, + ErrorCodes::ConflictingOperationInProgress); +} + +/* + * Creating a Routing Table with wrong min key must fail. + */ +TEST_F(RoutingTableHistoryTest, RandomCreateWrongMinFail) { + auto chunks = genRandomChunkVector(); + + chunks.begin()->setMin(BSON("a" << std::numeric_limits::min())); + + // Create a new routing table from the randomly generated chunks + ASSERT_THROWS_CODE(makeNewRt(chunks), DBException, ErrorCodes::ConflictingOperationInProgress); +} + +/* + * Creating a Routing Table with wrong max key must fail. + */ +TEST_F(RoutingTableHistoryTest, RandomCreateWrongMaxFail) { + auto chunks = genRandomChunkVector(); + + chunks.begin()->setMax(BSON("a" << std::numeric_limits::max())); + + // Create a new routing table from the randomly generated chunks + ASSERT_THROWS_CODE(makeNewRt(chunks), DBException, ErrorCodes::ConflictingOperationInProgress); +} + +/* + * Creating a Routing Table with mismatching epoch must fail. + */ +TEST_F(RoutingTableHistoryTest, RandomCreateMismatchingTimestampFail) { + auto chunks = genRandomChunkVector(); + + // Change epoch on a random chunk + auto chunkIt = chunks.begin() + _random.nextInt64(chunks.size()); + const auto& oldVersion = chunkIt->getVersion(); + + const Timestamp wrongTimestamp{Date_t::now()}; + ChunkVersion newVersion{{collEpoch(), wrongTimestamp}, + {oldVersion.majorVersion(), oldVersion.minorVersion()}}; + chunkIt->setVersion(newVersion); + + // Create a new routing table from the randomly generated chunks + ASSERT_THROWS_CODE(makeNewRt(chunks), DBException, ErrorCodes::ConflictingOperationInProgress); +} + +/* + * Updating a Routing Table with mismatching Timestamp must fail. + */ +TEST_F(RoutingTableHistoryTest, RandomUpdateMismatchingTimestampFail) { + auto chunks = genRandomChunkVector(); + + // Create a new routing table from the randomly generated chunks + auto rt = makeNewRt(chunks); + + // Change epoch on a random chunk + auto chunkIt = chunks.begin() + _random.nextInt64(chunks.size()); + const auto& oldVersion = chunkIt->getVersion(); + const Timestamp wrongTimestamp{Date_t::now()}; + ChunkVersion newVersion{{collEpoch(), wrongTimestamp}, + {oldVersion.majorVersion(), oldVersion.minorVersion()}}; + chunkIt->setVersion(newVersion); + + ASSERT_THROWS_CODE(rt.makeUpdated(boost::none /* timeseriesFields */, + boost::none /* reshardingFields */, + true, + {*chunkIt}), + AssertionException, + ErrorCodes::ConflictingOperationInProgress); +} + + +/* + * Test update of the Routing Table with randomly generated changed chunks. + */ +TEST_F(RoutingTableHistoryTest, RandomUpdate) { + auto initialChunks = genRandomChunkVector(); + + const auto initialShardVersions = calculateShardVersions(initialChunks); + const auto initialCollVersion = calculateCollVersion(initialShardVersions); + + // Create a new routing table from the randomly generated initialChunks + auto initialRt = makeNewRt(initialChunks); + + auto chunks = initialChunks; + const auto maxNumChunkOps = 2 * initialChunks.size(); + const auto numChunkOps = _random.nextInt32(maxNumChunkOps); + + performRandomChunkOperations(&chunks, numChunkOps); + + std::vector updatedChunks; + for (const auto& chunk : chunks) { + if (!chunk.getVersion().isOlderOrEqualThan(initialCollVersion)) { + updatedChunks.push_back(chunk); + } + } + + const auto expectedShardVersions = calculateShardVersions(chunks); + const auto expectedCollVersion = calculateCollVersion(expectedShardVersions); + + auto rt = initialRt.makeUpdated(boost::none /* timeseriesFields */, + boost::none /* reshardingFields */, + true, + updatedChunks); + + // Checks basic getter of routing table return correct values + ASSERT_EQ(kNss, rt.nss()); + ASSERT_EQ(ShardKeyPattern(getShardKeyPattern()).toString(), rt.getShardKeyPattern().toString()); + ASSERT_EQ(chunks.size(), rt.numChunks()); + + // Check that chunks have correct info + size_t i = 0; + rt.forEachChunk([&](const auto& chunkInfo) { + assertEqualChunkInfo(ChunkInfo{chunks[i++]}, *chunkInfo); + return true; + }); + ASSERT_EQ(i, chunks.size()); + + // Checks collection version is correct + ASSERT_EQ(expectedCollVersion, rt.getVersion()); + + // Checks version for each shard + for (const auto& [shardId, shardVersion] : expectedShardVersions) { + ASSERT_EQ(shardVersion, rt.getVersion(shardId)); + } + + ASSERT_EQ(expectedShardVersions.size(), rt.getNShardsOwningChunks()); + + std::set expectedShardIds; + for (const auto& [shardId, shardVersion] : expectedShardVersions) { + expectedShardIds.insert(shardId); + } + std::set shardIds; + rt.getAllShardIds(&shardIds); + ASSERT(expectedShardIds == shardIds); + + // Validate all shard maxValidAfter + const auto expectedShardsMaxValidAfter = calculateShardsMaxValidAfter(chunks); + const auto expectedMaxValidAfter = [&](const ShardId& shard) { + auto it = expectedShardsMaxValidAfter.find(shard); + return it != expectedShardsMaxValidAfter.end() ? it->second : Timestamp{0, 0}; + }; + for (const auto& [shardId, _] : expectedShardVersions) { + ASSERT_EQ(rt.getMaxValidAfter(shardId), expectedMaxValidAfter(shardId)) + << "For shardid " << shardId; + } + ASSERT_EQ(rt.getMaxValidAfter(ShardId{"shard-without-chunks"}), (Timestamp{0, 0})); +} + TEST_F(RoutingTableHistoryTest, TestSplits) { - const UUID uuid = UUID::gen(); - const OID epoch = OID::gen(); - const Timestamp timestamp(1); - ChunkVersion version({epoch, timestamp}, {1, 0}); + ChunkVersion version{{collEpoch(), collTimestamp()}, {1, 0}}; auto chunkAll = - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()}, version, kThisShard}; - auto rt = RoutingTableHistory::makeNew(kNss, - uuid, - getShardKeyPattern(), - nullptr, - false, - epoch, - timestamp, - boost::none /* timeseriesFields */, - boost::none /* reshardingFields */, - true, - {chunkAll}); + auto rt = makeNewRt({chunkAll}); std::vector chunks1 = { - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)}, - ChunkVersion({epoch, timestamp}, {2, 1}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 1}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {2, 2}), + ChunkVersion{{collEpoch(), collTimestamp()}, {2, 2}}, kThisShard}}; - auto rt1 = rt.makeUpdated( - boost::none /* timeseriesFields */, boost::none /* reshardingFields */, true, chunks1); - auto v1 = ChunkVersion({epoch, timestamp}, {2, 2}); + auto rt1 = rt.makeUpdated(boost::none /* timeseriesFields */, boost::none, true, chunks1); + auto v1 = ChunkVersion{{collEpoch(), collTimestamp()}, {2, 2}}; ASSERT_EQ(v1, rt1.getVersion(kThisShard)); std::vector chunks2 = { - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {2, 2}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 2}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << -1)}, - ChunkVersion({epoch, timestamp}, {3, 1}), + ChunkVersion({collEpoch(), collTimestamp()}, {3, 1}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << -1), BSON("a" << 0)}, - ChunkVersion({epoch, timestamp}, {3, 2}), + ChunkVersion({collEpoch(), collTimestamp()}, {3, 2}), kThisShard}}; auto rt2 = rt1.makeUpdated( boost::none /* timeseriesFields */, boost::none /* reshardingFields */, true, chunks2); - auto v2 = ChunkVersion({epoch, timestamp}, {3, 2}); + auto v2 = ChunkVersion({collEpoch(), collTimestamp()}, {3, 2}); ASSERT_EQ(v2, rt2.getVersion(kThisShard)); } TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) { - const UUID uuid = UUID::gen(); - const OID epoch = OID::gen(); - const Timestamp timestamp(1); - std::vector initialChunks = { - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {1, 0}), + ChunkVersion({collEpoch(), collTimestamp()}, {1, 0}), kThisShard}}; - auto rt = RoutingTableHistory::makeNew(kNss, - uuid, - getShardKeyPattern(), - nullptr, - false, - epoch, - timestamp, - boost::none /* timeseriesFields */, - boost::none /* reshardingFields */, - true, - initialChunks); + auto rt = makeNewRt(initialChunks); ASSERT_EQ(rt.numChunks(), 1); std::vector changedChunks = { - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)}, - ChunkVersion({epoch, timestamp}, {2, 1}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 1}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {2, 2}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 2}), kThisShard}}; auto rt1 = rt.makeUpdated(boost::none /* timeseriesFields */, boost::none /* reshardingFields */, true, changedChunks); - auto v1 = ChunkVersion({epoch, timestamp}, {2, 2}); + auto v1 = ChunkVersion({collEpoch(), collTimestamp()}, {2, 2}); ASSERT_EQ(v1, rt1.getVersion(kThisShard)); ASSERT_EQ(rt1.numChunks(), 2); @@ -299,261 +731,192 @@ TEST_F(RoutingTableHistoryTest, TestReplaceEmptyChunk) { } TEST_F(RoutingTableHistoryTest, TestUseLatestVersions) { - const UUID uuid = UUID::gen(); - const OID epoch = OID::gen(); - const Timestamp timestamp(1); - std::vector initialChunks = { - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {1, 0}), + ChunkVersion({collEpoch(), collTimestamp()}, {1, 0}), kThisShard}}; - auto rt = RoutingTableHistory::makeNew(kNss, - uuid, - getShardKeyPattern(), - nullptr, - false, - epoch, - timestamp, - boost::none /* timeseriesFields */, - boost::none /* reshardingFields */, - true, - initialChunks); + auto rt = makeNewRt(initialChunks); ASSERT_EQ(rt.numChunks(), 1); std::vector changedChunks = { - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {1, 0}), + ChunkVersion({collEpoch(), collTimestamp()}, {1, 0}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)}, - ChunkVersion({epoch, timestamp}, {2, 1}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 1}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {2, 2}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 2}), kThisShard}}; auto rt1 = rt.makeUpdated(boost::none /* timeseriesFields */, boost::none /* reshardingFields */, true, changedChunks); - auto v1 = ChunkVersion({epoch, timestamp}, {2, 2}); + auto v1 = ChunkVersion({collEpoch(), collTimestamp()}, {2, 2}); ASSERT_EQ(v1, rt1.getVersion(kThisShard)); ASSERT_EQ(rt1.numChunks(), 2); } TEST_F(RoutingTableHistoryTest, TestOutOfOrderVersion) { - const UUID uuid = UUID::gen(); - const OID epoch = OID::gen(); - const Timestamp timestamp(1); - std::vector initialChunks = { - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)}, - ChunkVersion({epoch, timestamp}, {2, 1}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 1}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {2, 2}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 2}), kThisShard}}; - auto rt = RoutingTableHistory::makeNew(kNss, - uuid, - getShardKeyPattern(), - nullptr, - false, - epoch, - timestamp, - boost::none /* timeseriesFields */, - boost::none /* reshardingFields */, - true, - initialChunks); + auto rt = makeNewRt(initialChunks); ASSERT_EQ(rt.numChunks(), 2); std::vector changedChunks = { - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << 0), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {3, 0}), + ChunkVersion({collEpoch(), collTimestamp()}, {3, 0}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)}, - ChunkVersion({epoch, timestamp}, {3, 1}), + ChunkVersion({collEpoch(), collTimestamp()}, {3, 1}), kThisShard}}; auto rt1 = rt.makeUpdated(boost::none /* timeseriesFields */, boost::none /* reshardingFields */, true, changedChunks); - auto v1 = ChunkVersion({epoch, timestamp}, {3, 1}); + auto v1 = ChunkVersion({collEpoch(), collTimestamp()}, {3, 1}); ASSERT_EQ(v1, rt1.getVersion(kThisShard)); ASSERT_EQ(rt1.numChunks(), 2); auto chunk1 = rt1.findIntersectingChunk(BSON("a" << 0)); - ASSERT_EQ(chunk1->getLastmod(), ChunkVersion({epoch, timestamp}, {3, 0})); + ASSERT_EQ(chunk1->getLastmod(), ChunkVersion({collEpoch(), collTimestamp()}, {3, 0})); ASSERT_EQ(chunk1->getMin().woCompare(BSON("a" << 0)), 0); ASSERT_EQ(chunk1->getMax().woCompare(getShardKeyPattern().globalMax()), 0); } TEST_F(RoutingTableHistoryTest, TestMergeChunks) { - const UUID uuid = UUID::gen(); - const OID epoch = OID::gen(); - const Timestamp timestamp(1); - std::vector initialChunks = { - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << 0), BSON("a" << 10)}, - ChunkVersion({epoch, timestamp}, {2, 0}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 0}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 0)}, - ChunkVersion({epoch, timestamp}, {2, 1}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 1}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << 10), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {2, 2}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 2}), kThisShard}}; - auto rt = RoutingTableHistory::makeNew(kNss, - uuid, - getShardKeyPattern(), - nullptr, - false, - epoch, - timestamp, - boost::none /* timeseriesFields */, - boost::none /* reshardingFields */, - true, - initialChunks); + auto rt = makeNewRt(initialChunks); + ASSERT_EQ(rt.numChunks(), 3); - ASSERT_EQ(rt.getVersion(), ChunkVersion({epoch, timestamp}, {2, 2})); + ASSERT_EQ(rt.getVersion(), ChunkVersion({collEpoch(), collTimestamp()}, {2, 2})); std::vector changedChunks = { - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << 10), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {3, 0}), + ChunkVersion({collEpoch(), collTimestamp()}, {3, 0}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 10)}, - ChunkVersion({epoch, timestamp}, {3, 1}), + ChunkVersion({collEpoch(), collTimestamp()}, {3, 1}), kThisShard}}; auto rt1 = rt.makeUpdated(boost::none /* timeseriesFields */, boost::none /* reshardingFields */, true, changedChunks); - auto v1 = ChunkVersion({epoch, timestamp}, {3, 1}); + auto v1 = ChunkVersion({collEpoch(), collTimestamp()}, {3, 1}); ASSERT_EQ(v1, rt1.getVersion(kThisShard)); ASSERT_EQ(rt1.numChunks(), 2); } TEST_F(RoutingTableHistoryTest, TestMergeChunksOrdering) { - const UUID uuid = UUID::gen(); - const OID epoch = OID::gen(); - const Timestamp timestamp(1); - std::vector initialChunks = { - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << -10), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {2, 0}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 0}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << -500)}, - ChunkVersion({epoch, timestamp}, {2, 1}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 1}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << -500), BSON("a" << -10)}, - ChunkVersion({epoch, timestamp}, {2, 2}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 2}), kThisShard}}; - auto rt = RoutingTableHistory::makeNew(kNss, - uuid, - getShardKeyPattern(), - nullptr, - false, - epoch, - timestamp, - boost::none /* timeseriesFields */, - boost::none /* reshardingFields */, - true, - initialChunks); + auto rt = makeNewRt(initialChunks); ASSERT_EQ(rt.numChunks(), 3); - ASSERT_EQ(rt.getVersion(), ChunkVersion({epoch, timestamp}, {2, 2})); + ASSERT_EQ(rt.getVersion(), ChunkVersion({collEpoch(), collTimestamp()}, {2, 2})); std::vector changedChunks = { - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << -500), BSON("a" << -10)}, - ChunkVersion({epoch, timestamp}, {2, 2}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 2}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << -10)}, - ChunkVersion({epoch, timestamp}, {3, 1}), + ChunkVersion({collEpoch(), collTimestamp()}, {3, 1}), kThisShard}}; auto rt1 = rt.makeUpdated(boost::none /* timeseriesFields */, boost::none /* reshardingFields */, true, changedChunks); - auto v1 = ChunkVersion({epoch, timestamp}, {3, 1}); + auto v1 = ChunkVersion({collEpoch(), collTimestamp()}, {3, 1}); ASSERT_EQ(v1, rt1.getVersion(kThisShard)); ASSERT_EQ(rt1.numChunks(), 2); auto chunk1 = rt1.findIntersectingChunk(BSON("a" << -500)); - ASSERT_EQ(chunk1->getLastmod(), ChunkVersion({epoch, timestamp}, {3, 1})); + ASSERT_EQ(chunk1->getLastmod(), ChunkVersion({collEpoch(), collTimestamp()}, {3, 1})); ASSERT_EQ(chunk1->getMin().woCompare(getShardKeyPattern().globalMin()), 0); ASSERT_EQ(chunk1->getMax().woCompare(BSON("a" << -10)), 0); } TEST_F(RoutingTableHistoryTest, TestFlatten) { - const UUID uuid = UUID::gen(); - const OID epoch = OID::gen(); - const Timestamp timestamp(1); - std::vector initialChunks = { - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 10)}, - ChunkVersion({epoch, timestamp}, {2, 0}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 0}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << 10), BSON("a" << 20)}, - ChunkVersion({epoch, timestamp}, {2, 1}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 1}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << 20), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {2, 2}), + ChunkVersion({collEpoch(), collTimestamp()}, {2, 2}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {3, 0}), + ChunkVersion({collEpoch(), collTimestamp()}, {3, 0}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{getShardKeyPattern().globalMin(), BSON("a" << 10)}, - ChunkVersion({epoch, timestamp}, {4, 0}), + ChunkVersion({collEpoch(), collTimestamp()}, {4, 0}), kThisShard}, - ChunkType{uuid, + ChunkType{collUUID(), ChunkRange{BSON("a" << 10), getShardKeyPattern().globalMax()}, - ChunkVersion({epoch, timestamp}, {4, 1}), + ChunkVersion({collEpoch(), collTimestamp()}, {4, 1}), kThisShard}, }; - auto rt = RoutingTableHistory::makeNew(kNss, - UUID::gen(), - getShardKeyPattern(), - nullptr, - false, - epoch, - timestamp, - boost::none /* timeseriesFields */, - boost::none /* reshardingFields */, - true, - initialChunks); + auto rt = makeNewRt(initialChunks); ASSERT_EQ(rt.numChunks(), 2); - ASSERT_EQ(rt.getVersion(), ChunkVersion({epoch, timestamp}, {4, 1})); + ASSERT_EQ(rt.getVersion(), ChunkVersion({collEpoch(), collTimestamp()}, {4, 1})); auto chunk1 = rt.findIntersectingChunk(BSON("a" << 0)); - ASSERT_EQ(chunk1->getLastmod(), ChunkVersion({epoch, timestamp}, {4, 0})); + ASSERT_EQ(chunk1->getLastmod(), ChunkVersion({collEpoch(), collTimestamp()}, {4, 0})); ASSERT_EQ(chunk1->getMin().woCompare(getShardKeyPattern().globalMin()), 0); ASSERT_EQ(chunk1->getMax().woCompare(BSON("a" << 10)), 0); } diff --git a/src/mongo/s/s_sharding_server_status.cpp b/src/mongo/s/s_sharding_server_status.cpp index 791d40aa0fbb1..558f39b408d66 100644 --- a/src/mongo/s/s_sharding_server_status.cpp +++ b/src/mongo/s/s_sharding_server_status.cpp @@ -27,10 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" #include "mongo/db/commands/server_status.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/vector_clock.h" #include "mongo/executor/hedging_metrics.h" #include "mongo/s/balancer_configuration.h" diff --git a/src/mongo/s/service_entry_point_mongos.cpp b/src/mongo/s/service_entry_point_mongos.cpp index 0dcabb98682f7..f021e8dc1c21a 100644 --- a/src/mongo/s/service_entry_point_mongos.cpp +++ b/src/mongo/s/service_entry_point_mongos.cpp @@ -28,32 +28,54 @@ */ +#include +#include +#include #include - -#include "mongo/platform/basic.h" - -#include "mongo/s/service_entry_point_mongos.h" - -#include "mongo/client/server_discovery_monitor.h" +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/auth/authorization_session.h" -#include "mongo/db/commands.h" +#include "mongo/db/catalog/collection_catalog.h" #include "mongo/db/curop.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/dbmessage.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/not_primary_error_tracker.h" #include "mongo/db/operation_context.h" #include "mongo/db/request_execution_context.h" -#include "mongo/db/service_context.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/session.h" #include "mongo/db/session/session_catalog.h" -#include "mongo/db/stats/counters.h" #include "mongo/db/stats/top.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/check_allowed_op_query_cmd.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/rpc/message.h" #include "mongo/s/commands/strategy.h" #include "mongo/s/grid.h" #include "mongo/s/load_balancer_support.h" #include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/s/service_entry_point_mongos.h" #include "mongo/s/transaction_router.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/s/service_entry_point_mongos.h b/src/mongo/s/service_entry_point_mongos.h index 42645ac236fe0..b6be0b4c828d5 100644 --- a/src/mongo/s/service_entry_point_mongos.h +++ b/src/mongo/s/service_entry_point_mongos.h @@ -31,7 +31,13 @@ #include +#include "mongo/base/counter.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/dbmessage.h" +#include "mongo/rpc/message.h" #include "mongo/transport/service_entry_point_impl.h" +#include "mongo/util/future.h" namespace mongo { diff --git a/src/mongo/s/session_catalog_router.cpp b/src/mongo/s/session_catalog_router.cpp index e89d8f8b8609e..3eab918070624 100644 --- a/src/mongo/s/session_catalog_router.cpp +++ b/src/mongo/s/session_catalog_router.cpp @@ -28,12 +28,15 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/session_catalog_router.h" +#include +#include +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/db/session/sessions_collection.h" +#include "mongo/s/session_catalog_router.h" #include "mongo/s/transaction_router.h" +#include "mongo/util/assert_util_core.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/s/session_catalog_router.h b/src/mongo/s/session_catalog_router.h index 37b5734bc02eb..0044eec612a57 100644 --- a/src/mongo/s/session_catalog_router.h +++ b/src/mongo/s/session_catalog_router.h @@ -29,7 +29,9 @@ #pragma once +#include "mongo/db/operation_context.h" #include "mongo/db/session/session_catalog.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/s/sessions_collection_sharded.cpp b/src/mongo/s/sessions_collection_sharded.cpp index 17902d7ace1c7..5bdcd32a42456 100644 --- a/src/mongo/s/sessions_collection_sharded.cpp +++ b/src/mongo/s/sessions_collection_sharded.cpp @@ -27,28 +27,49 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/sessions_collection_sharded.h" - +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/api_parameters.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/query_request_helper.h" -#include "mongo/db/session/sessions_collection_rs.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/db/shard_id.h" #include "mongo/rpc/op_msg.h" #include "mongo/rpc/op_msg_rpc_impls.h" #include "mongo/s/catalog_cache.h" -#include "mongo/s/client/shard.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/s/chunk.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/cluster_write.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_find.h" +#include "mongo/s/sessions_collection_sharded.h" #include "mongo/s/write_ops/batch_write_exec.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -64,7 +85,8 @@ std::vector SessionsCollectionSharded::_groupSessionIdsByOwnin const auto [cm, _] = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo( opCtx, NamespaceString::kLogicalSessionsNamespace)); uassert(ErrorCodes::NamespaceNotSharded, - str::stream() << "Collection " << NamespaceString::kLogicalSessionsNamespace + str::stream() << "Collection " + << NamespaceString::kLogicalSessionsNamespace.toStringForErrorMsg() << " is not sharded", cm.isSharded()); @@ -89,7 +111,8 @@ std::vector SessionsCollectionSharded::_groupSessionRecord const auto [cm, _] = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo( opCtx, NamespaceString::kLogicalSessionsNamespace)); uassert(ErrorCodes::NamespaceNotSharded, - str::stream() << "Collection " << NamespaceString::kLogicalSessionsNamespace + str::stream() << "Collection " + << NamespaceString::kLogicalSessionsNamespace.toStringForErrorMsg() << " is not sharded", cm.isSharded()); @@ -134,7 +157,7 @@ void SessionsCollectionSharded::refreshSessions(OperationContext* opCtx, BatchedCommandResponse response; BatchWriteExecStats stats; - cluster::write(opCtx, request, &stats, &response); + cluster::write(opCtx, request, nullptr /* nss */, &stats, &response); uassertStatusOK(response.toStatus()); }; @@ -153,7 +176,7 @@ void SessionsCollectionSharded::removeRecords(OperationContext* opCtx, BatchedCommandResponse response; BatchWriteExecStats stats; - cluster::write(opCtx, request, &stats, &response); + cluster::write(opCtx, request, nullptr, &stats, &response); uassertStatusOK(response.toStatus()); }; diff --git a/src/mongo/s/sessions_collection_sharded.h b/src/mongo/s/sessions_collection_sharded.h index 1bb383e25f98d..621c005b76d4f 100644 --- a/src/mongo/s/sessions_collection_sharded.h +++ b/src/mongo/s/sessions_collection_sharded.h @@ -30,8 +30,10 @@ #pragma once #include +#include #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/sessions_collection.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/s/sessions_collection_sharded_test.cpp b/src/mongo/s/sessions_collection_sharded_test.cpp index 023ccf627bec4..4c43e3f52cb8b 100644 --- a/src/mongo/s/sessions_collection_sharded_test.cpp +++ b/src/mongo/s/sessions_collection_sharded_test.cpp @@ -27,24 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" -#include "mongo/client/remote_command_targeter_factory_mock.h" -#include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/commands.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/cursor_response.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog_cache_test_fixture.h" -#include "mongo/s/client/shard_registry.h" -#include "mongo/s/session_catalog_router.h" #include "mongo/s/sessions_collection_sharded.h" -#include "mongo/s/sharding_router_test_fixture.h" -#include "mongo/s/stale_exception.h" -#include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/concurrency/thread_pool.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { namespace { diff --git a/src/mongo/s/shard_cannot_refresh_due_to_locks_held_exception.cpp b/src/mongo/s/shard_cannot_refresh_due_to_locks_held_exception.cpp index 6e338f06d1932..e1f067259c65c 100644 --- a/src/mongo/s/shard_cannot_refresh_due_to_locks_held_exception.cpp +++ b/src/mongo/s/shard_cannot_refresh_due_to_locks_held_exception.cpp @@ -27,12 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/s/shard_cannot_refresh_due_to_locks_held_exception.h" - -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { namespace { @@ -42,7 +41,7 @@ MONGO_INIT_REGISTER_ERROR_EXTRA_INFO(ShardCannotRefreshDueToLocksHeldInfo); } // namespace void ShardCannotRefreshDueToLocksHeldInfo::serialize(BSONObjBuilder* bob) const { - bob->append(kNssFieldName, _nss.ns()); + bob->append(kNssFieldName, NamespaceStringUtil::serialize(_nss)); } std::shared_ptr ShardCannotRefreshDueToLocksHeldInfo::parse( diff --git a/src/mongo/s/shard_cannot_refresh_due_to_locks_held_exception.h b/src/mongo/s/shard_cannot_refresh_due_to_locks_held_exception.h index a7e4d33aad3b2..3d694e9df6920 100644 --- a/src/mongo/s/shard_cannot_refresh_due_to_locks_held_exception.h +++ b/src/mongo/s/shard_cannot_refresh_due_to_locks_held_exception.h @@ -29,8 +29,16 @@ #pragma once +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/namespace_string.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/s/shard_invalidated_for_targeting_exception.cpp b/src/mongo/s/shard_invalidated_for_targeting_exception.cpp index 52a52dfef147f..0a16dedc03b53 100644 --- a/src/mongo/s/shard_invalidated_for_targeting_exception.cpp +++ b/src/mongo/s/shard_invalidated_for_targeting_exception.cpp @@ -27,12 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/s/shard_invalidated_for_targeting_exception.h" - -#include "mongo/base/init.h" -#include "mongo/util/assert_util.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/util/namespace_string_util.h" namespace mongo { namespace { @@ -44,7 +43,7 @@ constexpr StringData kNss = "nss"_sd; } // namespace void ShardInvalidatedForTargetingInfo::serialize(BSONObjBuilder* bob) const { - bob->append(kNss, _nss.ns()); + bob->append(kNss, NamespaceStringUtil::serialize(_nss)); } std::shared_ptr ShardInvalidatedForTargetingInfo::parse(const BSONObj& obj) { diff --git a/src/mongo/s/shard_invalidated_for_targeting_exception.h b/src/mongo/s/shard_invalidated_for_targeting_exception.h index 16d848cd3728a..a4de18a24affe 100644 --- a/src/mongo/s/shard_invalidated_for_targeting_exception.h +++ b/src/mongo/s/shard_invalidated_for_targeting_exception.h @@ -29,10 +29,14 @@ #pragma once +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/namespace_string.h" +#include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/s/shard_key_pattern.cpp b/src/mongo/s/shard_key_pattern.cpp index 5be0af5b3f9e5..7adf9e823c0df 100644 --- a/src/mongo/s/shard_key_pattern.cpp +++ b/src/mongo/s/shard_key_pattern.cpp @@ -29,12 +29,20 @@ #include "mongo/s/shard_key_pattern.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/ordering.h" #include "mongo/bson/simple_bsonelement_comparator.h" -#include "mongo/db/field_ref_set.h" #include "mongo/db/hasher.h" #include "mongo/db/index_names.h" #include "mongo/db/matcher/path_internal.h" #include "mongo/db/storage/key_string.h" +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { @@ -72,15 +80,20 @@ std::vector> parseShardKeyPattern(const BSONObj& keyPa // Empty parts of the path, ".."? for (size_t i = 0; i < newFieldRef->numParts(); ++i) { + const StringData part = newFieldRef->getPart(i); + uassert(ErrorCodes::BadValue, str::stream() << "Field " << patternEl.fieldNameStringData() << " contains empty parts", - !newFieldRef->getPart(i).empty()); + !part.empty()); + // Reject a shard key that has a field name that starts with '$' or contains parts that + // start with '$' unless the part is a DBRef (i.e. is equal to '$id', '$db' or '$ref'). uassert(ErrorCodes::BadValue, str::stream() << "Field " << patternEl.fieldNameStringData() << " contains parts that start with '$'", - !newFieldRef->getPart(i).startsWith("$")); + !part.startsWith("$") || + (i != 0 && (part == "$db" || part == "$id" || part == "$ref"))); } // Numeric and ascending (1.0), or "hashed" with exactly hashed field. @@ -217,7 +230,7 @@ std::string ShardKeyPattern::toString() const { } std::string ShardKeyPattern::toKeyString(const BSONObj& shardKey) { - KeyString::Builder ks(KeyString::Version::V1, Ordering::allAscending()); + key_string::Builder ks(key_string::Version::V1, Ordering::allAscending()); BSONObjIterator it(shardKey); while (auto elem = it.next()) { diff --git a/src/mongo/s/shard_key_pattern.h b/src/mongo/s/shard_key_pattern.h index 93dad3c5f2657..2ae2610b54e31 100644 --- a/src/mongo/s/shard_key_pattern.h +++ b/src/mongo/s/shard_key_pattern.h @@ -29,9 +29,16 @@ #pragma once +#include +#include +#include +#include #include +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/field_ref.h" #include "mongo/db/jsobj.h" #include "mongo/db/keypattern.h" diff --git a/src/mongo/s/shard_key_pattern_query_util.cpp b/src/mongo/s/shard_key_pattern_query_util.cpp index 37d27d23e1074..327c1ab30e439 100644 --- a/src/mongo/s/shard_key_pattern_query_util.cpp +++ b/src/mongo/s/shard_key_pattern_query_util.cpp @@ -29,14 +29,56 @@ #include "mongo/s/shard_key_pattern_query_util.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/db/basic_types_gen.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/field_ref_set.h" +#include "mongo/db/hasher.h" +#include "mongo/db/index/index_descriptor.h" +#include "mongo/db/index/multikey_paths.h" +#include "mongo/db/index_names.h" +#include "mongo/db/matcher/expression.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_noop.h" #include "mongo/db/matcher/path_internal.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/index_bounds_builder.h" +#include "mongo/db/query/index_entry.h" +#include "mongo/db/query/interval.h" #include "mongo/db/query/query_planner.h" #include "mongo/db/query/query_planner_common.h" +#include "mongo/db/query/query_planner_params.h" +#include "mongo/db/query/query_solution.h" +#include "mongo/db/query/stage_types.h" #include "mongo/db/update/path_support.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/chunk.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/intrusive_counter.h" #include "mongo/util/transitional_tools_do_not_use/vector_spooling.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/s/shard_key_pattern_query_util.h b/src/mongo/s/shard_key_pattern_query_util.h index 5696f444e20c6..be960efc4d06b 100644 --- a/src/mongo/s/shard_key_pattern_query_util.h +++ b/src/mongo/s/shard_key_pattern_query_util.h @@ -29,8 +29,19 @@ #pragma once +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/query/canonical_query.h" #include "mongo/db/query/index_bounds.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/chunk_manager.h" #include "mongo/s/shard_key_pattern.h" diff --git a/src/mongo/s/shard_key_pattern_query_util_index_bounds_test.cpp b/src/mongo/s/shard_key_pattern_query_util_index_bounds_test.cpp index c498d7ea8e668..fa23493a0a503 100644 --- a/src/mongo/s/shard_key_pattern_query_util_index_bounds_test.cpp +++ b/src/mongo/s/shard_key_pattern_query_util_index_bounds_test.cpp @@ -27,14 +27,41 @@ * it in the license file. */ +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" #include "mongo/db/hasher.h" -#include "mongo/db/json.h" +#include "mongo/db/matcher/expression_parser.h" #include "mongo/db/matcher/extensions_callback_noop.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/pipeline/expression_context.h" #include "mongo/db/pipeline/expression_context_for_test.h" #include "mongo/db/query/canonical_query.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/query/index_bounds.h" +#include "mongo/db/query/interval.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/shard_key_pattern_query_util.h" #include "mongo/s/sharding_router_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/intrusive_counter.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/s/shard_key_pattern_test.cpp b/src/mongo/s/shard_key_pattern_test.cpp index 0d8f96d697e6a..9ab4f04c4b747 100644 --- a/src/mongo/s/shard_key_pattern_test.cpp +++ b/src/mongo/s/shard_key_pattern_test.cpp @@ -27,13 +27,32 @@ * it in the license file. */ +#include +#include + +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/json.h" #include "mongo/db/bson/dotted_path_support.h" #include "mongo/db/hasher.h" -#include "mongo/db/json.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/s/concurrency/locker_mongos_client_observer.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/shard_key_pattern_query_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { @@ -41,8 +60,6 @@ namespace { class ShardKeyPatternTest : public ServiceContextTest { protected: ShardKeyPatternTest() { - auto service = getServiceContext(); - service->registerClientObserver(std::make_unique()); _opCtxHolder = makeOperationContext(); _opCtx = _opCtxHolder.get(); } @@ -82,12 +99,18 @@ TEST_F(ShardKeyPatternTest, SingleFieldShardKeyPatternsValidityCheck) { ASSERT_THROWS(ShardKeyPattern(BSON("$" << 1)), DBException); ASSERT_THROWS(ShardKeyPattern(BSON("$a" << 1)), DBException); ASSERT_THROWS(ShardKeyPattern(BSON("$**" << 1)), DBException); + ASSERT_THROWS(ShardKeyPattern(BSON("$id" << 1)), DBException); + ASSERT_THROWS(ShardKeyPattern(BSON("$db" << 1)), DBException); + ASSERT_THROWS(ShardKeyPattern(BSON("$ref" << 1)), DBException); } TEST_F(ShardKeyPatternTest, CompositeShardKeyPatternsValidityCheck) { ShardKeyPattern s1(BSON("a" << 1 << "b" << 1)); ShardKeyPattern s2(BSON("a" << 1.0f << "b" << 1.0)); ShardKeyPattern s3(BSON("a" << 1 << "b" << 1.0 << "c" << 1.0f)); + ShardKeyPattern s4(BSON("a.$id" << 1)); + ShardKeyPattern s5(BSON("a.$db" << 1)); + ShardKeyPattern s6(BSON("a.$ref" << 1)); ASSERT_THROWS(ShardKeyPattern(BSON("a" << 1 << "b" << -1)), DBException); ASSERT_THROWS(ShardKeyPattern(BSON("a" << 1 << "b" diff --git a/src/mongo/s/shard_util.cpp b/src/mongo/s/shard_util.cpp index 6d58aab032321..d344a16d32e3b 100644 --- a/src/mongo/s/shard_util.cpp +++ b/src/mongo/s/shard_util.cpp @@ -28,21 +28,43 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/shard_util.h" - +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/bson/util/bson_extract.h" #include "mongo/client/read_preference.h" -#include "mongo/client/remote_command_targeter.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/s/auto_split_vector.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/auto_split_vector_gen.h" #include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_util.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/namespace_string_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -102,7 +124,8 @@ StatusWith retrieveCollectionShardSize(OperationContext* opCtx, } const Minutes maxTimeMSOverride{10}; - const auto cmdObj = BSON("dataSize" << ns.ns() << "estimate" << estimate); + const auto cmdObj = + BSON("dataSize" << NamespaceStringUtil::serialize(ns) << "estimate" << estimate); auto statStatus = shardStatus.getValue()->runCommandWithFixedRetryAttempts( opCtx, ReadPreferenceSetting{ReadPreference::PrimaryPreferred}, @@ -204,7 +227,7 @@ StatusWith> splitChunkAtMultiplePoints( } BSONObjBuilder cmd; - cmd.append("splitChunk", nss.ns()); + cmd.append("splitChunk", NamespaceStringUtil::serialize(nss)); cmd.append("from", shardId.toString()); cmd.append("keyPattern", shardKeyPattern.toBSON()); cmd.append("epoch", epoch); diff --git a/src/mongo/s/shard_util.h b/src/mongo/s/shard_util.h index 95d614902f776..8fce3d114f1c4 100644 --- a/src/mongo/s/shard_util.h +++ b/src/mongo/s/shard_util.h @@ -29,12 +29,23 @@ #pragma once +#include #include +#include +#include #include #include +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/shard_id.h" #include "mongo/s/catalog/type_chunk.h" #include "mongo/s/client/shard.h" +#include "mongo/s/shard_key_pattern.h" namespace mongo { diff --git a/src/mongo/s/shard_version.cpp b/src/mongo/s/shard_version.cpp index 2403ea0bc8a5c..dcff641237783 100644 --- a/src/mongo/s/shard_version.cpp +++ b/src/mongo/s/shard_version.cpp @@ -29,6 +29,11 @@ #include "mongo/s/shard_version.h" +#include +#include + +#include "mongo/idl/idl_parser.h" +#include "mongo/s/index_version_gen.h" #include "mongo/s/shard_version_gen.h" namespace mongo { @@ -56,4 +61,12 @@ std::string ShardVersion::toString() const { return (_indexVersion ? _indexVersion->toString() : "") + "||" + placementVersion().toString(); } +BSONObj ShardVersion::toBSON() const { + BSONObjBuilder builder; + if (_indexVersion) { + builder.append("indexVersion", *_indexVersion); + } + builder.append("placementVersion", placementVersion().toBSON()); + return builder.obj(); +} } // namespace mongo diff --git a/src/mongo/s/shard_version.h b/src/mongo/s/shard_version.h index 2cbb4c62b8502..2e4be95c0a9d8 100644 --- a/src/mongo/s/shard_version.h +++ b/src/mongo/s/shard_version.h @@ -28,6 +28,20 @@ */ #pragma once +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/s/chunk_version.h" #include "mongo/s/index_version.h" @@ -86,6 +100,8 @@ class ShardVersion { std::string toString() const; + BSONObj toBSON() const; + private: ShardVersion(const ChunkVersion& chunkVersion, const boost::optional& collectionIndexes) diff --git a/src/mongo/s/shard_version_factory.cpp b/src/mongo/s/shard_version_factory.cpp index c99d5d0f03fd6..5442c51c7723c 100644 --- a/src/mongo/s/shard_version_factory.cpp +++ b/src/mongo/s/shard_version_factory.cpp @@ -29,6 +29,14 @@ #include "mongo/s/shard_version_factory.h" +#include + +#include + +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" + namespace mongo { ShardVersion ShardVersionFactory::make( diff --git a/src/mongo/s/shard_version_factory.h b/src/mongo/s/shard_version_factory.h index 1b3884c1810a2..ed453ce340c3f 100644 --- a/src/mongo/s/shard_version_factory.h +++ b/src/mongo/s/shard_version_factory.h @@ -28,7 +28,13 @@ */ #pragma once +#include + #include "mongo/db/s/collection_metadata.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/index_version.h" #include "mongo/s/shard_version.h" namespace mongo { diff --git a/src/mongo/s/shard_version_test.cpp b/src/mongo/s/shard_version_test.cpp index 9c40bddaf5bf7..1d7e44738ae03 100644 --- a/src/mongo/s/shard_version_test.cpp +++ b/src/mongo/s/shard_version_test.cpp @@ -27,8 +27,12 @@ * it in the license file. */ #include "mongo/s/shard_version.h" + +#include "mongo/bson/oid.h" #include "mongo/s/shard_version_factory.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { diff --git a/src/mongo/s/sharding_feature_flags.idl b/src/mongo/s/sharding_feature_flags.idl index 36929ede96504..858187dc40731 100644 --- a/src/mongo/s/sharding_feature_flags.idl +++ b/src/mongo/s/sharding_feature_flags.idl @@ -36,86 +36,53 @@ feature_flags: description: "Feature flag for enabling sharding catalog features for global indexes" cpp_varname: feature_flags::gGlobalIndexesShardingCatalog default: false - featureFlagRangeDeleterService: - description: "Feature flag protecting instantiation and usages of the range deleter service" - cpp_varname: feature_flags::gRangeDeleterService - default: true - version: 6.2 - featureFlagCollModCoordinatorV3: - description: "Feature for enabling new coll mod coordinator v3" - cpp_varname: feature_flags::gCollModCoordinatorV3 - default: true - version: 6.1 - featureFlagCreateCollectionCoordinatorV3: - description: "Feature for enabling new createCollection coordinator v3" - cpp_varname: feature_flags::gCreateCollectionCoordinatorV3 - default: true - version: 6.2 - # TODO SERVER-68217 remove once 7.0 becomes last LTS - featureFlagHistoricalPlacementShardingCatalog: - description: "Feature flag for enabling the storage and access to historical placement data at shards granularity through the Sharding Catalog" - cpp_varname: feature_flags::gHistoricalPlacementShardingCatalog - default: true - version: 7.0 - featureFlagImplicitDDLTimeseriesNssTranslation: - description: "When enabled, the logic to evaluate whether a DDL is targeting a Timeseries operation - will always be executed by the DDL Coordinator (VS doing the evaluation when the command is received by the primary shard). - The feature increases causal consistency guarantees, but it is not backwards-compatible" - cpp_varname: feature_flags::gImplicitDDLTimeseriesNssTranslation - default: true - version: 6.1 + shouldBeFCVGated: true featureFlagConcurrencyInChunkMigration: description: "Feature flag for enabling concurrency within a chunk migration" cpp_varname: feature_flags::gConcurrencyInChunkMigration default: true version: 6.3 + shouldBeFCVGated: true featureFlagStopUsingConfigVersion: # TODO SERVER-68889 remove once 7.0 becomes last LTS description: "Stop using deprecated config version fields to check metadata compatibility between different version" cpp_varname: feature_flags::gStopUsingConfigVersion default: true version: 6.2 - # TODO (SERVER-71309): Remove once 7.0 becomes last LTS. - featureFlagResilientMovePrimary: - description: "Enable the resilient coordinator for the movePrimary command in order to improve - the tolerance in case of a failure on donor and recipient nodes" - cpp_varname: feature_flags::gResilientMovePrimary - default: true - version: 7.0 - featureFlagConfigSettingsSchema: - description: "Feature flag for adding schema to config.settings collection" - cpp_varname: feature_flags::gConfigSettingsSchema - default: true - version: 6.2 - featureFlagAutoMerger: - description: "Feature flag for enabling auto-merging of contiguous chunks belonging to the same shard" - cpp_varname: feature_flags::gAutoMerger - default: true - version: 7.0 + shouldBeFCVGated: true # TODO (SERVER-70396): Remove once 7.0 becomes last LTS featureFlagCheckMetadataConsistency: description: "Feature flag for checking metadata consistency in the cluster, database or collection" cpp_varname: feature_flags::gCheckMetadataConsistency default: true version: 7.0 + shouldBeFCVGated: true # TODO SERVER-73627: Remove once 7.0 becomes last LTS. featureFlagDropCollectionHoldingCriticalSection: description: "Feature flag for enabling the new implementation of the dropCollection DDL operation." cpp_varname: feature_flags::gDropCollectionHoldingCriticalSection default: true version: 7.0 - # TODO (SERVER-74477): Remove once 7.0 becomes last LTS. - featureFlagAllowMigrationsRefreshToAll: - description: "Feature flag for sending flushRoutingTableCacheUpdates to all shards in setAllowMigrations" - cpp_varname: feature_flags::gAllowMigrationsRefreshToAll - default: true - version: 7.0 + shouldBeFCVGated: true featureFlagCheckForDirectShardOperations: description: "Feature flag for checking for direct shard operations." cpp_varname: feature_flags::gCheckForDirectShardOperations - default: false - featureFlagClusterCardinalityParameter: - description: "Feature flag for enabling the cluster parameter tracking cluster cardinality." - cpp_varname: feature_flags::gClusterCardinalityParameter default: true - version: 7.0 + version: 7.1 + shouldBeFCVGated: true + featureFlagMultipleGranularityDDLLocking: + description: "Feature flag for enabling multiple granularity DDL locking" + cpp_varname: feature_flags::gMultipleGranularityDDLLocking + default: false + shouldBeFCVGated: false + # TODO (SERVER-73632): Remove once 8.0 becomes last LTS. + featureFlagCohostedRouter: + description: "Feature flag to enable shard to double as router" + cpp_varname: feature_flags::gCohostedRouter + default: false + shouldBeFCVGated: true + featureFlagClusterFsyncLock: + description: "Feature flag to enable locking of a cluster through mongos" + cpp_varname: feature_flags::gClusterFsyncLock + default: false + shouldBeFCVGated: true diff --git a/src/mongo/s/sharding_index_catalog_cache.cpp b/src/mongo/s/sharding_index_catalog_cache.cpp index df57b0b5f488e..b52fa9ceaae5b 100644 --- a/src/mongo/s/sharding_index_catalog_cache.cpp +++ b/src/mongo/s/sharding_index_catalog_cache.cpp @@ -29,6 +29,21 @@ #include "mongo/s/sharding_index_catalog_cache.h" +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" + #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding namespace mongo { diff --git a/src/mongo/s/sharding_index_catalog_cache.h b/src/mongo/s/sharding_index_catalog_cache.h index 1214a057f4709..f74605cfa67c1 100644 --- a/src/mongo/s/sharding_index_catalog_cache.h +++ b/src/mongo/s/sharding_index_catalog_cache.h @@ -29,10 +29,24 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/namespace_string.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/platform/atomic_word.h" #include "mongo/s/catalog/type_index_catalog.h" +#include "mongo/s/catalog/type_index_catalog_gen.h" #include "mongo/s/index_version.h" #include "mongo/util/read_through_cache.h" +#include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/s/sharding_index_catalog_cache_test.cpp b/src/mongo/s/sharding_index_catalog_cache_test.cpp index 2abfe0b9da904..a32d49fc4c42c 100644 --- a/src/mongo/s/sharding_index_catalog_cache_test.cpp +++ b/src/mongo/s/sharding_index_catalog_cache_test.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/logv2/log.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/s/sharding_index_catalog_cache.h" #include "mongo/s/sharding_router_test_fixture.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/s/sharding_initialization.cpp b/src/mongo/s/sharding_initialization.cpp index 26b214a03d030..e239c3eb5f18f 100644 --- a/src/mongo/s/sharding_initialization.cpp +++ b/src/mongo/s/sharding_initialization.cpp @@ -28,25 +28,36 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/sharding_initialization.h" - +#include +#include #include #include +#include +#include + +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/db/audit.h" -#include "mongo/db/keys_collection_client_sharded.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/client/connection_string.h" +#include "mongo/db/cluster_role.h" #include "mongo/db/keys_collection_manager.h" +#include "mongo/db/keys_collection_manager_gen.h" #include "mongo/db/logical_time_validator.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/repl/optime_with.h" +#include "mongo/db/repl/read_concern_level.h" #include "mongo/db/repl/replication_coordinator.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" -#include "mongo/db/time_proof_service.h" #include "mongo/executor/async_multicaster.h" #include "mongo/executor/connection_pool.h" -#include "mongo/executor/connection_pool_stats.h" +#include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/network_interface.h" #include "mongo/executor/network_interface_factory.h" #include "mongo/executor/network_interface_thread_pool.h" #include "mongo/executor/scoped_task_executor.h" @@ -54,13 +65,17 @@ #include "mongo/executor/task_executor_pool.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/logv2/log.h" -#include "mongo/rpc/metadata/metadata_hook.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/analyze_shard_key_role.h" #include "mongo/s/balancer_configuration.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/sharding_catalog_client_impl.h" +#include "mongo/s/catalog/type_database_gen.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog_cache.h" #include "mongo/s/client/num_hosts_targeted_metrics.h" -#include "mongo/s/client/shard_factory.h" #include "mongo/s/client/sharding_network_connection_hook.h" #include "mongo/s/cluster_identity_loader.h" #include "mongo/s/grid.h" @@ -69,14 +84,16 @@ #include "mongo/s/query/cluster_cursor_manager.h" #include "mongo/s/query_analysis_client.h" #include "mongo/s/query_analysis_sampler.h" +#include "mongo/s/sharding_initialization.h" #include "mongo/s/sharding_task_executor.h" #include "mongo/s/sharding_task_executor_pool_controller.h" -#include "mongo/s/sharding_task_executor_pool_gen.h" -#include "mongo/stdx/thread.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" #include "mongo/util/exit.h" -#include "mongo/util/net/socket_utils.h" -#include "mongo/util/str.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -203,6 +220,7 @@ Status initializeGlobalShardingState( // The shard registry must be started once the grid is initialized grid->shardRegistry()->startupPeriodicReloader(opCtx); + // Start up the cluster time keys manager with a sharded keys client. auto keysCollectionClient = initKeysClient(grid->catalogClient()); auto keyManager = std::make_shared(KeysCollectionManager::kKeyManagerPurposeString, @@ -213,13 +231,9 @@ Status initializeGlobalShardingState( LogicalTimeValidator::set(service, std::make_unique(keyManager)); initializeTenantToShardCache(service); - // The checks below ignore the FCV because FCV is not initialized until after the replica set - // is initiated. - if (analyze_shard_key::isFeatureFlagEnabled(true /* ignoreFCV */)) { - analyze_shard_key::QueryAnalysisClient::get(opCtx).setTaskExecutor( - service, Grid::get(service)->getExecutorPool()->getFixedExecutor()); - } - if (analyze_shard_key::supportsSamplingQueries(service, true /* ignoreFCV */)) { + analyze_shard_key::QueryAnalysisClient::get(opCtx).setTaskExecutor( + service, Grid::get(service)->getExecutorPool()->getFixedExecutor()); + if (analyze_shard_key::supportsSamplingQueries(service)) { analyze_shard_key::QueryAnalysisSampler::get(service).onStartup(); } @@ -227,7 +241,7 @@ Status initializeGlobalShardingState( } void loadCWWCFromConfigServerForReplication(OperationContext* opCtx) { - if (!serverGlobalParams.clusterRole.exclusivelyHasShardRole()) { + if (!serverGlobalParams.clusterRole.hasExclusively(ClusterRole::ShardServer)) { // Cluster wide read/write concern in a sharded cluster lives on the config server, so a // config server node's local cache will be correct and explicitly checking for a default // write concern via remote command is unnecessary. @@ -246,17 +260,20 @@ Status loadGlobalSettingsFromConfigServer(OperationContext* opCtx, } try { - // It's safe to use local read concern on a config server because we'll read from the - // local node, and we only enter here if we found a shardIdentity document, which could - // only exist locally if we already inserted the cluster identity document. Between - // inserting a cluster id and adding a shard, there is at least one majority write on - // the added shard (dropping the sessions collection), so we should be guaranteed the - // cluster id cannot roll back. - auto readConcern = serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer) - ? repl::ReadConcernLevel::kLocalReadConcern - : repl::ReadConcernLevel::kMajorityReadConcern; - uassertStatusOK(ClusterIdentityLoader::get(opCtx)->loadClusterId( - opCtx, catalogClient, readConcern)); + // TODO SERVER-78051: Re-evaluate use of ClusterIdentityLoader. + // + // Skip loading the cluster id on config servers to avoid an issue where a failed + // initial sync may lead the config server to transiently have a shard identity document + // but no cluster id, which would trigger infinite retries. + // + // To match the shard behavior, the config server should load the cluster id, but + // currently shards never use the loaded cluster id, so skipping the load is safe. Only + // the config server uses it when adding a new shard, and each config server node will + // load this on its first step up to primary. + if (!serverGlobalParams.clusterRole.has(ClusterRole::ConfigServer)) { + uassertStatusOK(ClusterIdentityLoader::get(opCtx)->loadClusterId( + opCtx, catalogClient, repl::ReadConcernLevel::kMajorityReadConcern)); + } // Assert will be raised on failure to talk to config server. loadCWWCFromConfigServerForReplication(opCtx); diff --git a/src/mongo/s/sharding_initialization.h b/src/mongo/s/sharding_initialization.h index d9c7d8c126bc9..f4adcf6e6aa48 100644 --- a/src/mongo/s/sharding_initialization.h +++ b/src/mongo/s/sharding_initialization.h @@ -30,12 +30,18 @@ #pragma once #include +#include +#include #include #include +#include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/bson/oid.h" #include "mongo/db/keys_collection_client.h" +#include "mongo/db/operation_context.h" +#include "mongo/executor/task_executor.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/client/shard_registry.h" namespace mongo { @@ -54,6 +60,7 @@ class TaskExecutor; namespace rpc { class EgressMetadataHook; + using ShardingEgressMetadataHookBuilder = std::function()>; } // namespace rpc diff --git a/src/mongo/s/sharding_router_test_fixture.cpp b/src/mongo/s/sharding_router_test_fixture.cpp index 37d7ac1f9e5c1..9b973e496d92f 100644 --- a/src/mongo/s/sharding_router_test_fixture.cpp +++ b/src/mongo/s/sharding_router_test_fixture.cpp @@ -27,49 +27,79 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/sharding_router_test_fixture.h" - +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" #include +#include +#include #include +#include +#include +#include #include -#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/connection_string.h" #include "mongo/client/remote_command_targeter_factory_mock.h" #include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/db/client.h" #include "mongo/db/client_metadata_propagation_egress_hook.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" +#include "mongo/db/logical_time.h" #include "mongo/db/namespace_string.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/collation/collator_factory_interface.h" #include "mongo/db/query/collation/collator_factory_mock.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/query_request_helper.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/service_context.h" #include "mongo/db/vector_clock.h" #include "mongo/db/vector_clock_metadata_hook.h" +#include "mongo/executor/network_connection_hook.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor_pool.h" +#include "mongo/executor/task_executor_test_fixture.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" #include "mongo/rpc/metadata/egress_metadata_hook_list.h" +#include "mongo/rpc/metadata/metadata_hook.h" #include "mongo/rpc/metadata/repl_set_metadata.h" #include "mongo/rpc/metadata/tracking_metadata.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/balancer_configuration.h" #include "mongo/s/catalog/sharding_catalog_client_impl.h" #include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog/type_collection_gen.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/catalog_cache_loader.h" #include "mongo/s/client/num_hosts_targeted_metrics.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_factory.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/client/shard_remote.h" #include "mongo/s/config_server_catalog_cache_loader.h" #include "mongo/s/grid.h" #include "mongo/s/query/cluster_cursor_manager.h" +#include "mongo/s/sharding_router_test_fixture.h" #include "mongo/s/sharding_task_executor.h" #include "mongo/s/write_ops/batched_command_response.h" #include "mongo/transport/mock_session.h" -#include "mongo/transport/transport_layer_mock.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/net/sockaddr.h" +#include "mongo/util/tick_source.h" #include "mongo/util/tick_source_mock.h" namespace mongo { @@ -249,13 +279,14 @@ void ShardingTestFixture::setupShards(const std::vector& shards) { void ShardingTestFixture::expectGetShards(const std::vector& shards) { onFindCommand([this, &shards](const RemoteCommandRequest& request) { - const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String()); + const NamespaceString nss = NamespaceString::createNamespaceString_forTest( + request.dbname, request.cmdObj.firstElement().String()); ASSERT_EQ(nss, NamespaceString::kConfigsvrShardsNamespace); // If there is no '$db', append it. auto cmd = OpMsgRequest::fromDBAndBody(nss.db(), request.cmdObj).body; auto query = query_request_helper::makeFromFindCommandForTests(cmd, nss); - ASSERT_EQ(*query->getNamespaceOrUUID().nss(), NamespaceString::kConfigsvrShardsNamespace); + ASSERT_EQ(query->getNamespaceOrUUID().nss(), NamespaceString::kConfigsvrShardsNamespace); ASSERT_BSONOBJ_EQ(query->getFilter(), BSONObj()); ASSERT_BSONOBJ_EQ(query->getSort(), BSONObj()); @@ -321,7 +352,7 @@ void ShardingTestFixture::expectUpdateCollection(const HostAndPort& expectedHost const auto& update = updates.front(); ASSERT_EQ(expectUpsert, update.getUpsert()); ASSERT(!update.getMulti()); - ASSERT_BSONOBJ_EQ(BSON(CollectionType::kNssFieldName << coll.getNss().toString()), + ASSERT_BSONOBJ_EQ(BSON(CollectionType::kNssFieldName << coll.getNss().toString_forTest()), update.getQ()); const auto& updateBSON = update.getU().type() == write_ops::UpdateModification::Type::kReplacement @@ -345,8 +376,9 @@ void ShardingTestFixture::expectCount(const HostAndPort& configHost, ASSERT_EQUALS(configHost, request.target); const std::string cmdName(request.cmdObj.firstElement().fieldName()); ASSERT_EQUALS("count", cmdName); - const NamespaceString nss(request.dbname, request.cmdObj.firstElement().String()); - ASSERT_EQUALS(expectedNs.toString(), nss.toString()); + const NamespaceString nss = NamespaceString::createNamespaceString_forTest( + request.dbname, request.cmdObj.firstElement().String()); + ASSERT_EQUALS(expectedNs.toString_forTest(), nss.toString_forTest()); if (expectedQuery.isEmpty()) { auto queryElem = request.cmdObj["query"]; diff --git a/src/mongo/s/sharding_router_test_fixture.h b/src/mongo/s/sharding_router_test_fixture.h index c93a6d4a9a868..cb2005c59ce11 100644 --- a/src/mongo/s/sharding_router_test_fixture.h +++ b/src/mongo/s/sharding_router_test_fixture.h @@ -29,8 +29,29 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/task_executor.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/catalog/type_collection.h" #include "mongo/s/catalog/type_shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/sharding_test_fixture_common.h" +#include "mongo/transport/session.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/s/sharding_task_executor.cpp b/src/mongo/s/sharding_task_executor.cpp index fd6b4e02f12ed..93bec3fc8032c 100644 --- a/src/mongo/s/sharding_task_executor.cpp +++ b/src/mongo/s/sharding_task_executor.cpp @@ -28,21 +28,37 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/s/sharding_task_executor.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/status_with.h" -#include "mongo/bson/timestamp.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/crypto/hash_block.h" #include "mongo/db/logical_time.h" #include "mongo/db/operation_time_tracker.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/executor/connection_pool_stats.h" #include "mongo/executor/thread_pool_task_executor.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/s/client/shard.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/grid.h" #include "mongo/s/is_mongos.h" -#include "mongo/s/transaction_router.h" +#include "mongo/s/sharding_task_executor.h" +#include "mongo/util/assert_util.h" #include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/s/sharding_task_executor.h b/src/mongo/s/sharding_task_executor.h index 3db773c5f42ee..d0a3c8c3d682d 100644 --- a/src/mongo/s/sharding_task_executor.h +++ b/src/mongo/s/sharding_task_executor.h @@ -33,9 +33,17 @@ #include #include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/baton.h" +#include "mongo/db/operation_context.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" +#include "mongo/util/future.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { namespace executor { diff --git a/src/mongo/s/sharding_task_executor_pool_controller.cpp b/src/mongo/s/sharding_task_executor_pool_controller.cpp index 826145e33397a..1d706d38c927f 100644 --- a/src/mongo/s/sharding_task_executor_pool_controller.cpp +++ b/src/mongo/s/sharding_task_executor_pool_controller.cpp @@ -28,14 +28,27 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/error_codes.h" +#include "mongo/client/connection_string.h" #include "mongo/client/replica_set_monitor.h" #include "mongo/executor/connection_pool_stats.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/client/shard_registry.h" #include "mongo/s/is_mongos.h" #include "mongo/s/sharding_task_executor_pool_controller.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kConnectionPool diff --git a/src/mongo/s/sharding_task_executor_pool_controller.h b/src/mongo/s/sharding_task_executor_pool_controller.h index 353da591ab631..b2934e64d90a0 100644 --- a/src/mongo/s/sharding_task_executor_pool_controller.h +++ b/src/mongo/s/sharding_task_executor_pool_controller.h @@ -30,13 +30,27 @@ #pragma once #include +#include +#include +#include +#include +#include +#include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/client/replica_set_change_notifier.h" +#include "mongo/db/tenant_id.h" #include "mongo/executor/connection_pool.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/synchronized_value.h" namespace mongo { diff --git a/src/mongo/s/sharding_task_executor_test.cpp b/src/mongo/s/sharding_task_executor_test.cpp index ac9ef0ad86ef2..1480a35dedac8 100644 --- a/src/mongo/s/sharding_task_executor_test.cpp +++ b/src/mongo/s/sharding_task_executor_test.cpp @@ -28,16 +28,32 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include + +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/remote_command_targeter_mock.h" -#include "mongo/executor/task_executor_test_common.h" +#include "mongo/crypto/sha256_block.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/executor/network_interface_mock.h" #include "mongo/executor/task_executor_test_fixture.h" #include "mongo/executor/thread_pool_mock.h" #include "mongo/executor/thread_pool_task_executor.h" #include "mongo/executor/thread_pool_task_executor_test_fixture.h" +#include "mongo/idl/idl_parser.h" #include "mongo/s/sharding_router_test_fixture.h" #include "mongo/s/sharding_task_executor.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kExecutor diff --git a/src/mongo/s/sharding_test_fixture_common.cpp b/src/mongo/s/sharding_test_fixture_common.cpp index d891353ba3587..623ad4e94ff59 100644 --- a/src/mongo/s/sharding_test_fixture_common.cpp +++ b/src/mongo/s/sharding_test_fixture_common.cpp @@ -28,29 +28,43 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/sharding_test_fixture_common.h" - -#include "mongo/db/concurrency/locker_noop_client_observer.h" -#include "mongo/logv2/log.h" +#include +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/db/client.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/storage/storage_options.h" +#include "mongo/executor/network_interface.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/rpc/op_msg.h" #include "mongo/s/catalog/type_changelog.h" +#include "mongo/s/grid.h" +#include "mongo/s/sharding_test_fixture_common.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault - namespace mongo { using executor::NetworkTestEnv; using executor::RemoteCommandRequest; using unittest::assertGet; -ShardingTestFixtureCommon::ShardingTestFixtureCommon() { - auto service = getServiceContext(); - service->registerClientObserver( - std::make_unique()); +ShardingTestFixtureCommon::ShardingTestFixtureCommon() : _tempDir("sharding_test_fixture_common") { + storageGlobalParams.dbpath = _tempDir.path(); } ShardingTestFixtureCommon::~ShardingTestFixtureCommon() { diff --git a/src/mongo/s/sharding_test_fixture_common.h b/src/mongo/s/sharding_test_fixture_common.h index e58624c9db2f5..089486e6475f3 100644 --- a/src/mongo/s/sharding_test_fixture_common.h +++ b/src/mongo/s/sharding_test_fixture_common.h @@ -29,12 +29,29 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/client/remote_command_targeter_factory_mock.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/executor/network_interface_mock.h" #include "mongo/executor/network_test_env.h" +#include "mongo/s/catalog/sharding_catalog_client.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/grid.h" #include "mongo/transport/session.h" +#include "mongo/unittest/temp_dir.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -134,6 +151,9 @@ class ShardingTestFixtureCommon : public virtual ServiceContextTest { private: // Keeps the lifetime of the operation context ServiceContext::UniqueOperationContext _opCtxHolder; + + // The temporary dbpath for the tests in this fixture. + unittest::TempDir _tempDir; }; } // namespace mongo diff --git a/src/mongo/s/sharding_uptime_reporter.cpp b/src/mongo/s/sharding_uptime_reporter.cpp index 011a1c6f74d58..66d4ee936e036 100644 --- a/src/mongo/s/sharding_uptime_reporter.cpp +++ b/src/mongo/s/sharding_uptime_reporter.cpp @@ -28,22 +28,43 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/sharding_uptime_reporter.h" - +#include +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" +#include "mongo/db/operation_context.h" #include "mongo/db/read_write_concern_defaults.h" #include "mongo/db/server_options.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" #include "mongo/s/balancer_configuration.h" +#include "mongo/s/catalog/sharding_catalog_client.h" #include "mongo/s/catalog/type_mongos.h" #include "mongo/s/grid.h" +#include "mongo/s/sharding_uptime_reporter.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/idle_thread_block.h" +#include "mongo/util/duration.h" #include "mongo/util/exit.h" +#include "mongo/util/fail_point.h" #include "mongo/util/net/hostname_canonicalization.h" #include "mongo/util/net/socket_utils.h" #include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/timer.h" #include "mongo/util/version.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -119,6 +140,12 @@ void ShardingUptimeReporter::startPeriodicThread() { _thread = stdx::thread([created] { Client::initThread("Uptime-reporter"); + // TODO(SERVER-74658): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + const std::string hostName(getHostNameCached()); const std::string instanceId(constructInstanceIdString(hostName)); const Timer upTimeTimer; diff --git a/src/mongo/s/stale_exception.cpp b/src/mongo/s/stale_exception.cpp index 7d3e3e43b1c2c..0c5e0c909a93a 100644 --- a/src/mongo/s/stale_exception.cpp +++ b/src/mongo/s/stale_exception.cpp @@ -29,9 +29,17 @@ #include "mongo/s/stale_exception.h" -#include "mongo/base/init.h" +#include + +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/bson/bsonelement.h" #include "mongo/s/shard_version.h" #include "mongo/util/assert_util.h" +#include "mongo/util/namespace_string_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { @@ -43,7 +51,7 @@ MONGO_INIT_REGISTER_ERROR_EXTRA_INFO(StaleDbRoutingVersion); } // namespace void StaleConfigInfo::serialize(BSONObjBuilder* bob) const { - bob->append("ns", _nss.ns()); + bob->append("ns", NamespaceStringUtil::serialize(_nss)); _received.serialize("vReceived", bob); if (_wanted) _wanted->serialize("vWanted", bob); @@ -68,11 +76,9 @@ std::shared_ptr StaleConfigInfo::parse(const BSONObj& obj) } void StaleEpochInfo::serialize(BSONObjBuilder* bob) const { - bob->append("ns", _nss.ns()); - if (_received) - _received->serialize("vReceived", bob); - if (_wanted) - _wanted->serialize("vWanted", bob); + bob->append("ns", NamespaceStringUtil::serialize(_nss)); + _received.serialize("vReceived", bob); + _wanted.serialize("vWanted", bob); } std::shared_ptr StaleEpochInfo::parse(const BSONObj& obj) { @@ -85,15 +91,13 @@ std::shared_ptr StaleEpochInfo::parse(const BSONObj& obj) wanted = ShardVersion::parse(vWantedElem); uassert(6375907, - str::stream() << "Either both vReceived (" << received << ")" - << " and vWanted (" << wanted << ") must be present or none", - received.is_initialized() == wanted.is_initialized()); - - if (received) - return std::make_shared( - NamespaceString(obj["ns"].String()), *received, *wanted); - else - return std::make_shared(NamespaceString(obj["ns"].String())); + str::stream() << "Both vReceived (" << received << ")" + << " and vWanted (" << wanted << ") must be present", + received && wanted); + + + return std::make_shared( + NamespaceString(obj["ns"].String()), *received, *wanted); } void StaleDbRoutingVersion::serialize(BSONObjBuilder* bob) const { diff --git a/src/mongo/s/stale_exception.h b/src/mongo/s/stale_exception.h index 3da30934803af..50f68e6b553d8 100644 --- a/src/mongo/s/stale_exception.h +++ b/src/mongo/s/stale_exception.h @@ -29,11 +29,23 @@ #pragma once +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/namespace_string.h" #include "mongo/db/shard_id.h" #include "mongo/s/database_version.h" #include "mongo/s/shard_version.h" #include "mongo/util/concurrency/notification.h" +#include "mongo/util/future.h" namespace mongo { @@ -93,8 +105,8 @@ class StaleConfigInfo final : public ErrorExtraInfo { boost::optional _duringOperationType; }; -// TODO (SERVER-74380): Rename the StaleEpoch code to StaleDownstreamRouter and the info to -// StaleDownstreamRouterInfo +// TODO (SERVER-75888): Rename the StaleEpoch code to StaleUpstreamRouter and the info to +// StaleUpstreamRouterInfo class StaleEpochInfo final : public ErrorExtraInfo { public: static constexpr auto code = ErrorCodes::StaleEpoch; @@ -102,9 +114,6 @@ class StaleEpochInfo final : public ErrorExtraInfo { StaleEpochInfo(NamespaceString nss, ShardVersion received, ShardVersion wanted) : _nss(std::move(nss)), _received(received), _wanted(wanted) {} - // TODO (SERVER-74380): Remove this constructor - StaleEpochInfo(NamespaceString nss) : _nss(std::move(nss)) {} - const auto& getNss() const { return _nss; } @@ -123,10 +132,8 @@ class StaleEpochInfo final : public ErrorExtraInfo { private: NamespaceString _nss; - // TODO (SERVER-74380): These two fields are boost::optional for backwards compatibility. Either - // both of them are boost::none or both are set. - boost::optional _received; - boost::optional _wanted; + ShardVersion _received; + ShardVersion _wanted; }; class StaleDbRoutingVersion final : public ErrorExtraInfo { diff --git a/src/mongo/s/stale_exception_test.cpp b/src/mongo/s/stale_exception_test.cpp index 83e5f91e31a09..904823da0cd05 100644 --- a/src/mongo/s/stale_exception_test.cpp +++ b/src/mongo/s/stale_exception_test.cpp @@ -28,7 +28,13 @@ */ #include "mongo/s/stale_exception.h" -#include "mongo/unittest/unittest.h" + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -54,22 +60,6 @@ TEST(StaleExceptionTest, StaleConfigInfoSerializationTest) { ASSERT_EQUALS(deserializedInfo->getShardId(), kShardId); } -TEST(StaleExceptionTest, StaleEpochInfoLegacySerializationTest) { - StaleEpochInfo info(kNss); - - // Serialize - BSONObjBuilder bob; - info.serialize(&bob); - - // Deserialize - auto deserializedInfo = - std::static_pointer_cast(StaleEpochInfo::parse(bob.obj())); - - ASSERT_EQUALS(deserializedInfo->getNss(), kNss); - ASSERT(!deserializedInfo->getVersionReceived()); - ASSERT(!deserializedInfo->getVersionWanted()); -} - TEST(StaleExceptionTest, StaleEpochInfoSerializationTest) { StaleEpochInfo info(kNss, ShardVersion::UNSHARDED(), ShardVersion::UNSHARDED()); diff --git a/src/mongo/s/stale_shard_version_helpers.cpp b/src/mongo/s/stale_shard_version_helpers.cpp index aad9bf091adaa..65e90d9099177 100644 --- a/src/mongo/s/stale_shard_version_helpers.cpp +++ b/src/mongo/s/stale_shard_version_helpers.cpp @@ -28,11 +28,16 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/stale_shard_version_helpers.h" +#include +#include "mongo/base/error_codes.h" +#include "mongo/db/database_name.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/stale_exception.h" +#include "mongo/s/stale_shard_version_helpers.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -82,8 +87,9 @@ void checkErrorStatusAndMaxRetries(const Status& status, // epoch refresh. If no shard is provided, then the epoch is stale and we must refresh. if (auto staleInfo = status.extraInfo()) { invariant(staleInfo->getNss() == nss, - str::stream() << "StaleConfig error on unexpected namespace. Expected " << nss - << ", received " << staleInfo->getNss()); + str::stream() << "StaleConfig error on unexpected namespace. Expected " + << nss.toStringForErrorMsg() << ", received " + << staleInfo->getNss().toStringForErrorMsg()); catalogCache->invalidateShardOrEntireCollectionEntryForShardedCollection( nss, staleInfo->getVersionWanted(), staleInfo->getShardId()); } else { diff --git a/src/mongo/s/stale_shard_version_helpers.h b/src/mongo/s/stale_shard_version_helpers.h index 59228149cfd9b..932962e4aa6f1 100644 --- a/src/mongo/s/stale_shard_version_helpers.h +++ b/src/mongo/s/stale_shard_version_helpers.h @@ -29,10 +29,30 @@ #pragma once +#include +#include +#include +#include +#include +#include + #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/db/cancelable_operation_context.h" +#include "mongo/db/client.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/pipeline/aggregation_request_helper.h" +#include "mongo/db/service_context.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/catalog/type_tags.h" +#include "mongo/s/catalog_cache.h" #include "mongo/s/grid.h" #include "mongo/s/stale_exception.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/cancellation.h" #include "mongo/util/future_util.h" #include "mongo/util/out_of_line_executor.h" diff --git a/src/mongo/s/stale_shard_version_helpers_test.cpp b/src/mongo/s/stale_shard_version_helpers_test.cpp index 980977d2e7447..03530b5168125 100644 --- a/src/mongo/s/stale_shard_version_helpers_test.cpp +++ b/src/mongo/s/stale_shard_version_helpers_test.cpp @@ -27,12 +27,32 @@ * it in the license file. */ -#include "mongo/logv2/log.h" +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/task_executor.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/grid.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" #include "mongo/s/sharding_router_test_fixture.h" +#include "mongo/s/stale_exception.h" #include "mongo/s/stale_shard_version_helpers.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -123,7 +143,7 @@ TEST_F(AsyncShardVersionRetry, ExhaustedRetriesShouldThrowOriginalException) { auto future = shardVersionRetry( service(), nss(), catalogCache, desc(), getExecutor(), token, [&](OperationContext*) { if (++tries < 2 * kMaxNumStaleVersionRetries) { - uassert(StaleDbRoutingVersion(nss().db().toString(), + uassert(StaleDbRoutingVersion(nss().db_forTest().toString(), DatabaseVersion(UUID::gen(), Timestamp(2, 3)), DatabaseVersion(UUID::gen(), Timestamp(5, 3))), "testX", diff --git a/src/mongo/s/transaction_router.cpp b/src/mongo/s/transaction_router.cpp index d160847d40cea..36024842fe8c8 100644 --- a/src/mongo/s/transaction_router.cpp +++ b/src/mongo/s/transaction_router.cpp @@ -27,42 +27,70 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include #include - -#include "mongo/s/transaction_router.h" - +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/client/read_preference.h" #include "mongo/client/remote_command_retry_scheduler.h" -#include "mongo/db/commands.h" #include "mongo/db/commands/txn_cmds_gen.h" #include "mongo/db/commands/txn_two_phase_commit_cmds_gen.h" -#include "mongo/db/jsobj.h" +#include "mongo/db/database_name.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/server_options.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/session/session.h" #include "mongo/db/transaction_validation.h" -#include "mongo/db/txn_retry_counter_too_old_info.h" #include "mongo/db/vector_clock.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/logv2/attribute_storage.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/async_requests_sender.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/client/shard_registry.h" #include "mongo/s/cluster_commands_helpers.h" #include "mongo/s/grid.h" #include "mongo/s/multi_statement_transaction_requests_sender.h" #include "mongo/s/router_transactions_metrics.h" #include "mongo/s/shard_cannot_refresh_due_to_locks_held_exception.h" +#include "mongo/s/stale_exception.h" +#include "mongo/s/transaction_router.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" #include "mongo/util/exit.h" #include "mongo/util/fail_point.h" #include "mongo/util/log_with_sampling.h" #include "mongo/util/net/socket_utils.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction - namespace mongo { namespace { @@ -77,78 +105,6 @@ const char kReadConcernLevelSnapshotName[] = "snapshot"; const auto getTransactionRouter = Session::declareDecoration(); -/** - * Attaches the given atClusterTime to the readConcern object in the given command object, removing - * afterClusterTime if present. Assumes the given command object has a readConcern field and has - * readConcern level snapshot. - */ -BSONObj appendAtClusterTimeToReadConcern(BSONObj cmdObj, LogicalTime atClusterTime) { - dassert(cmdObj.hasField(repl::ReadConcernArgs::kReadConcernFieldName)); - - BSONObjBuilder cmdAtClusterTimeBob; - for (auto&& elem : cmdObj) { - if (elem.fieldNameStringData() == repl::ReadConcernArgs::kReadConcernFieldName) { - BSONObjBuilder readConcernBob = - cmdAtClusterTimeBob.subobjStart(repl::ReadConcernArgs::kReadConcernFieldName); - for (auto&& rcElem : elem.Obj()) { - // afterClusterTime cannot be specified with atClusterTime. - if (rcElem.fieldNameStringData() != - repl::ReadConcernArgs::kAfterClusterTimeFieldName) { - readConcernBob.append(rcElem); - } - } - - dassert(readConcernBob.hasField(repl::ReadConcernArgs::kLevelFieldName) && - readConcernBob.asTempObj()[repl::ReadConcernArgs::kLevelFieldName].String() == - kReadConcernLevelSnapshotName); - - readConcernBob.append(repl::ReadConcernArgs::kAtClusterTimeFieldName, - atClusterTime.asTimestamp()); - } else { - cmdAtClusterTimeBob.append(elem); - } - } - - return cmdAtClusterTimeBob.obj(); -} - -BSONObj appendReadConcernForTxn(BSONObj cmd, - repl::ReadConcernArgs readConcernArgs, - boost::optional atClusterTime) { - // Check for an existing read concern. The first statement in a transaction may already have - // one, in which case its level should always match the level of the transaction's readConcern. - if (cmd.hasField(repl::ReadConcernArgs::kReadConcernFieldName)) { - repl::ReadConcernArgs existingReadConcernArgs; - dassert(existingReadConcernArgs.initialize(cmd)); - dassert(existingReadConcernArgs.getLevel() == readConcernArgs.getLevel()); - - return atClusterTime ? appendAtClusterTimeToReadConcern(std::move(cmd), *atClusterTime) - : cmd; - } - - BSONObjBuilder bob(std::move(cmd)); - readConcernArgs.appendInfo(&bob); - - return atClusterTime ? appendAtClusterTimeToReadConcern(bob.asTempObj(), *atClusterTime) - : bob.obj(); -} - -BSONObjBuilder appendFieldsForStartTransaction(BSONObj cmd, - repl::ReadConcernArgs readConcernArgs, - boost::optional atClusterTime, - bool doAppendStartTransaction) { - // startTransaction: true always requires readConcern, even if it's empty. - auto cmdWithReadConcern = - appendReadConcernForTxn(std::move(cmd), readConcernArgs, atClusterTime); - - BSONObjBuilder bob(std::move(cmdWithReadConcern)); - if (doAppendStartTransaction) { - bob.append(OperationSessionInfoFromClient::kStartTransactionFieldName, true); - } - - return bob; -} - // Commands that are idempotent in a transaction context and can be blindly retried in the middle of // a transaction. Writing aggregates (e.g. with a $out or $merge) is disallowed in a transaction, so // aggregates must be read operations. Note: aggregate and find do have the side-effect of creating @@ -260,7 +216,7 @@ std::string actionTypeToString(TransactionRouter::TransactionActions action) { MONGO_UNREACHABLE; } -} // unnamed namespace +} // namespace TransactionRouter::TransactionRouter() = default; @@ -433,7 +389,8 @@ BSONObj TransactionRouter::Participant::attachTxnFieldsIfNeeded( } else if (OperationSessionInfoFromClient::kAutocommitFieldName == elem.fieldNameStringData()) { hasAutoCommit = true; - } else if (OperationSessionInfo::kTxnNumberFieldName == elem.fieldNameStringData()) { + } else if (OperationSessionInfoFromClient::kTxnNumberFieldName == + elem.fieldNameStringData()) { hasTxnNum = true; } } @@ -469,7 +426,7 @@ BSONObj TransactionRouter::Participant::attachTxnFieldsIfNeeded( } if (!hasTxnNum) { - newCmd.append(OperationSessionInfo::kTxnNumberFieldName, + newCmd.append(OperationSessionInfoFromClient::kTxnNumberFieldName, sharedOptions.txnNumberAndRetryCounter.getTxnNumber()); } else { auto osi = OperationSessionInfoFromClient::parse(IDLParserContext{"OperationSessionInfo"}, @@ -603,13 +560,8 @@ bool TransactionRouter::AtClusterTime::canChange(StmtId currentStmtId) const { return !_stmtIdSelectedAt || *_stmtIdSelectedAt == currentStmtId; } -bool TransactionRouter::Router::mustUseAtClusterTime() const { - return o().atClusterTime.has_value(); -} - -LogicalTime TransactionRouter::Router::getSelectedAtClusterTime() const { - invariant(o().atClusterTime); - return o().atClusterTime->getTime(); +boost::optional TransactionRouter::Router::getSelectedAtClusterTime() const { + return o().atClusterTime ? boost::make_optional(o().atClusterTime->getTime()) : boost::none; } const boost::optional& TransactionRouter::Router::getCoordinatorId() const { @@ -908,10 +860,8 @@ void TransactionRouter::Router::onSnapshotError(OperationContext* opCtx, const S invariant(o().participants.empty()); invariant(!o().coordinatorId); - stdx::lock_guard lk(*opCtx->getClient()); - // Reset the global snapshot timestamp so the retry will select a new one. - o(lk).atClusterTime.reset(); + stdx::lock_guard lk(*opCtx->getClient()); o(lk).atClusterTime.emplace(); } @@ -1488,7 +1438,9 @@ void TransactionRouter::Router::_resetRouterState( } OperationContextSession::observeNewTxnNumberStarted( - opCtx, _sessionId(), txnNumberAndRetryCounter.getTxnNumber()); + opCtx, + _sessionId(), + {txnNumberAndRetryCounter.getTxnNumber(), SessionCatalog::Provenance::kRouter}); }; void TransactionRouter::Router::_resetRouterStateForStartTransaction( @@ -1506,11 +1458,10 @@ void TransactionRouter::Router::_resetRouterStateForStartTransaction( stdx::lock_guard lk(*opCtx->getClient()); o(lk).apiParameters = APIParameters::get(opCtx); o(lk).readConcernArgs = readConcernArgs; - } - if (o().readConcernArgs.getLevel() == repl::ReadConcernLevel::kSnapshotReadConcern) { - stdx::lock_guard lk(*opCtx->getClient()); - o(lk).atClusterTime.emplace(); + if (o().readConcernArgs.getLevel() == repl::ReadConcernLevel::kSnapshotReadConcern) { + o(lk).atClusterTime.emplace(); + } } LOGV2_DEBUG(22889, @@ -1574,7 +1525,8 @@ void TransactionRouter::Router::_logSlowTransaction(OperationContext* opCtx, attrs.add("parameters", parametersBuilder.obj()); - + // Since DynamicAttributes (attrs) binds by reference, it is important that the lifetime of this + // variable lasts until the LOGV2 call at the end of this function. std::string globalReadTimestampTemp; if (_atClusterTimeHasBeenSet()) { globalReadTimestampTemp = o().atClusterTime->getTime().toString(); @@ -1900,4 +1852,85 @@ void TransactionRouter::MetricsTracker::endTransaction( } } +BSONObj TransactionRouter::appendFieldsForStartTransaction( + BSONObj cmdObj, + const repl::ReadConcernArgs& readConcernArgs, + const boost::optional& atClusterTimeForSnapshotReadConcern, + bool doAppendStartTransaction) { + BSONObjBuilder cmdBob; + bool hasReadConcern = false; + for (auto&& elem : cmdObj) { + if (elem.fieldNameStringData() == repl::ReadConcernArgs::kReadConcernFieldName) { + bool hasLevel = false; + bool hasAfterClusterTime = false; + bool hasAtClusterTime = false; + + BSONObjBuilder readConcernBob = + cmdBob.subobjStart(repl::ReadConcernArgs::kReadConcernFieldName); + for (auto&& rcElem : elem.Obj()) { + if (rcElem.fieldNameStringData() == repl::ReadConcernArgs::kLevelFieldName) { + dassert(readConcernArgs.getLevel() == + repl::readConcernLevels::fromString(rcElem.checkAndGetStringData()), + "The read concern level changed during processing of the request"); + readConcernBob.append(rcElem); + hasLevel = true; + } else if (rcElem.fieldNameStringData() == + repl::ReadConcernArgs::kAfterClusterTimeFieldName) { + dassert( + !readConcernArgs.getArgsAfterClusterTime() || + rcElem.timestamp() >= + readConcernArgs.getArgsAfterClusterTime()->asTimestamp(), + "The afterClusterTime moved backwards during processing of the request"); + if (!atClusterTimeForSnapshotReadConcern) + readConcernBob.append(rcElem); + hasAfterClusterTime = true; + } else if (rcElem.fieldNameStringData() == + repl::ReadConcernArgs::kAtClusterTimeFieldName) { + dassert(atClusterTimeForSnapshotReadConcern); + dassert(rcElem.timestamp() == + atClusterTimeForSnapshotReadConcern->asTimestamp()); + readConcernBob.append(rcElem); + hasAtClusterTime = true; + } else { + readConcernBob.append(rcElem); + } + } + + if (!hasAfterClusterTime && readConcernArgs.getArgsAfterClusterTime()) { + readConcernBob.append(repl::ReadConcernArgs::kAfterClusterTimeFieldName, + readConcernArgs.getArgsAfterClusterTime()->asTimestamp()); + } + + if (!hasAtClusterTime && atClusterTimeForSnapshotReadConcern) { + if (!hasLevel) + readConcernBob.append(repl::ReadConcernArgs::kLevelFieldName, "snapshot"); + readConcernBob.append(repl::ReadConcernArgs::kAtClusterTimeFieldName, + atClusterTimeForSnapshotReadConcern->asTimestamp()); + } + + hasReadConcern = true; + } else { + cmdBob.append(elem); + } + } + + if (!hasReadConcern) { + if (atClusterTimeForSnapshotReadConcern) { + BSONObjBuilder readConcernBob = + cmdBob.subobjStart(repl::ReadConcernArgs::kReadConcernFieldName); + readConcernBob.append(repl::ReadConcernArgs::kLevelFieldName, "snapshot"); + readConcernBob.append(repl::ReadConcernArgs::kAtClusterTimeFieldName, + atClusterTimeForSnapshotReadConcern->asTimestamp()); + } else { + readConcernArgs.appendInfo(&cmdBob); + } + } + + if (doAppendStartTransaction) { + cmdBob.append(OperationSessionInfoFromClient::kStartTransactionFieldName, true); + } + + return cmdBob.obj(); +} + } // namespace mongo diff --git a/src/mongo/s/transaction_router.h b/src/mongo/s/transaction_router.h index 0a68361ebe4f9..222f6681a2fd7 100644 --- a/src/mongo/s/transaction_router.h +++ b/src/mongo/s/transaction_router.h @@ -29,19 +29,40 @@ #pragma once +#include #include - +#include +#include +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/api_parameters.h" +#include "mongo/db/client.h" #include "mongo/db/commands/txn_cmds_gen.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" #include "mongo/db/session/session_catalog.h" #include "mongo/db/shard_id.h" #include "mongo/db/stats/single_transaction_stats.h" #include "mongo/s/async_requests_sender.h" #include "mongo/s/client/shard.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" #include "mongo/util/string_map.h" +#include "mongo/util/tick_source.h" +#include "mongo/util/time_support.h" namespace mongo { @@ -462,15 +483,10 @@ class TransactionRouter { void onViewResolutionError(OperationContext* opCtx, const NamespaceString& nss); /** - * Returns true if the associated transaction is running at snapshot level read concern. + * If the transaction is not running at a read concern snapshot, returns boost::none. + * Otherwise returns the timestamps that has been selected for the transaction. */ - bool mustUseAtClusterTime() const; - - /** - * Returns the read timestamp for this transaction. Callers must verify that the read - * timestamp has been selected for this transaction before calling this function. - */ - LogicalTime getSelectedAtClusterTime() const; + boost::optional getSelectedAtClusterTime() const; /** * Sets the atClusterTime for the current transaction to the latest time in the router's @@ -737,6 +753,32 @@ class TransactionRouter { return Observer(osession); } + /** + * Takes a cmdObj which could have come from one of the two paths: + * 1. Verbatim taken from the user's request (and therefore *may contain* read concern + * arguments) or + * 2. Newly generated by the feature based on a user's request (and *doesn't contain* read + * concern arguments) + * + * AND outputs a new request that contains the original fields of the request along with the + * respective readConcernArgs augmented with atClusterTimeForSnapshotReadConcern if the request + * asks for a snapshot level. + * + * The 'atClusterTimeForSnapshotReadConcern' will be boost::none in all cases except when the + * read concern level is 'snapshot' or the caller provided `atClusterTime`. + * + * TODO (SERVER-77506): This code re-checks that the input cmdObj is in sync with the parsed + * readConcernArgs (i.e., that we didn't swap majority for local or snapshot somewhere along the + * command execution path). This is very error prone and wasteful and a better architecture + * would be if cmdObj was not allowed to contain any read concern arguments so that we can just + * append the ones passed to the function. + */ + static BSONObj appendFieldsForStartTransaction( + BSONObj cmdObj, + const repl::ReadConcernArgs& readConcernArgs, + const boost::optional& atClusterTimeForSnapshotReadConcern, + bool doAppendStartTransaction); + private: /** * State in this struct may be read by methods of Observer or Router, and may be written by diff --git a/src/mongo/s/transaction_router_resource_yielder.cpp b/src/mongo/s/transaction_router_resource_yielder.cpp index 16dd6f1b2b35e..3dfd163c4cb79 100644 --- a/src/mongo/s/transaction_router_resource_yielder.cpp +++ b/src/mongo/s/transaction_router_resource_yielder.cpp @@ -29,11 +29,20 @@ #include "mongo/s/transaction_router_resource_yielder.h" +#include + +#include + +#include "mongo/db/session/session.h" #include "mongo/db/session/session_catalog.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/s/is_mongos.h" #include "mongo/s/session_catalog_router.h" +#include "mongo/util/assert_util.h" #include "mongo/util/exit.h" +#include "mongo/util/fail_point.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTransaction diff --git a/src/mongo/s/transaction_router_resource_yielder.h b/src/mongo/s/transaction_router_resource_yielder.h index 7334d76b178ed..886e96c89574b 100644 --- a/src/mongo/s/transaction_router_resource_yielder.h +++ b/src/mongo/s/transaction_router_resource_yielder.h @@ -29,6 +29,8 @@ #pragma once +#include + #include "mongo/db/operation_context.h" #include "mongo/db/resource_yielder.h" diff --git a/src/mongo/s/transaction_router_test.cpp b/src/mongo/s/transaction_router_test.cpp index e0ba9490a74e3..15334bc808266 100644 --- a/src/mongo/s/transaction_router_test.cpp +++ b/src/mongo/s/transaction_router_test.cpp @@ -28,31 +28,68 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include #include +#include +#include #include - +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/remote_command_targeter_mock.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" +#include "mongo/db/repl/optime.h" #include "mongo/db/repl/read_concern_args.h" -#include "mongo/db/txn_retry_counter_too_old_info.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/server_options.h" #include "mongo/db/vector_clock.h" -#include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" -#include "mongo/rpc/get_status_from_command_result.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/network_interface_mock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/rpc/metadata/client_metadata.h" #include "mongo/s/catalog/type_shard.h" #include "mongo/s/router_transactions_metrics.h" #include "mongo/s/session_catalog_router.h" +#include "mongo/s/shard_version.h" #include "mongo/s/sharding_router_test_fixture.h" +#include "mongo/s/stale_exception.h" #include "mongo/s/transaction_router.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/clock_source_mock.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/net/socket_utils.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/tick_source_mock.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -1291,6 +1328,85 @@ TEST_F(TransactionRouterTestWithDefaultSession, future.default_timed_get(); } +TEST(TransactionRouterTest, AppendFieldsForStartTransactionDefaultRC) { + repl::ReadConcernArgs defaultRCArgs; + auto result = TransactionRouter::appendFieldsForStartTransaction( + BSON("MyCmd" << 1), defaultRCArgs, boost::none, true /* doAppendStartTransaction */); + + ASSERT_EQ(result["MyCmd"].numberLong(), 1); + ASSERT_BSONOBJ_EQ(result["readConcern"].Obj(), BSONObj()); + ASSERT_EQ(result["startTransaction"].boolean(), true); +} + +TEST(TransactionRouterTest, AppendFieldsForStartTransactionDefaultRCMajority) { + repl::ReadConcernArgs defaultRCArgs(repl::ReadConcernLevel::kMajorityReadConcern); + auto result = TransactionRouter::appendFieldsForStartTransaction( + BSON("MyCmd" << 1), defaultRCArgs, boost::none, true /* doAppendStartTransaction */); + + ASSERT_EQ(result["MyCmd"].numberLong(), 1); + repl::ReadConcernArgs resultArgs; + ASSERT_OK(resultArgs.parse(result["readConcern"].Obj())); + ASSERT_EQ(result["readConcern"]["level"].valueStringData(), "majority"); + ASSERT(!result["readConcern"]["atClusterTime"]); + ASSERT_EQ(result["startTransaction"].boolean(), true); +} + +TEST(TransactionRouterTest, AppendFieldsForStartTransactionDefaultRCCommandSpecifiesRCLocal) { + repl::ReadConcernArgs defaultRCArgs(repl::ReadConcernLevel::kLocalReadConcern); + auto result = + TransactionRouter::appendFieldsForStartTransaction(BSON("MyCmd" << 1 << "readConcern" + << BSON("level" + << "local")), + defaultRCArgs, + boost::none, + true /* doAppendStartTransaction */); + + ASSERT_EQ(result["MyCmd"].numberLong(), 1); + repl::ReadConcernArgs resultArgs; + ASSERT_OK(resultArgs.parse(result["readConcern"].Obj())); + ASSERT_EQ(result["readConcern"]["level"].valueStringData(), "local"); + ASSERT_EQ(result["startTransaction"].boolean(), true); +} + +TEST(TransactionRouterTest, AppendFieldsForStartTransactionDefaultRCCommandSpecifiesRCSnapshot) { + repl::ReadConcernArgs defaultRCArgs(repl::ReadConcernLevel::kSnapshotReadConcern); + auto result = + TransactionRouter::appendFieldsForStartTransaction(BSON("MyCmd" << 1 << "readConcern" + << BSON("level" + << "snapshot")), + defaultRCArgs, + LogicalTime(Timestamp(1, 2)), + false /* doAppendStartTransaction */); + + ASSERT_EQ(result["MyCmd"].numberLong(), 1); + repl::ReadConcernArgs resultArgs; + ASSERT_OK(resultArgs.parse(result["readConcern"].Obj())); + ASSERT_EQ(result["readConcern"]["level"].valueStringData(), "snapshot"); + ASSERT_EQ(result["readConcern"]["atClusterTime"].timestamp(), Timestamp(1, 2)); + ASSERT(!result["startTransaction"]); +} + +TEST(TransactionRouterTest, + AppendFieldsForStartTransactionDefaultRCCommandSpecifiesRCSnapshotAndAtClusterTime) { + repl::ReadConcernArgs defaultRCArgs(repl::ReadConcernLevel::kSnapshotReadConcern); + defaultRCArgs.setArgsAtClusterTimeForSnapshot(Timestamp(1, 2)); + auto result = TransactionRouter::appendFieldsForStartTransaction( + BSON("MyCmd" << 1 << "readConcern" + << BSON("level" + << "snapshot" + << "atClusterTime" << Timestamp(1, 2))), + defaultRCArgs, + LogicalTime(Timestamp(1, 2)), + false /* doAppendStartTransaction */); + + ASSERT_EQ(result["MyCmd"].numberLong(), 1); + repl::ReadConcernArgs resultArgs; + ASSERT_OK(resultArgs.parse(result["readConcern"].Obj())); + ASSERT_EQ(result["readConcern"]["level"].valueStringData(), "snapshot"); + ASSERT_EQ(result["readConcern"]["atClusterTime"].timestamp(), Timestamp(1, 2)); + ASSERT(!result["startTransaction"]); +} + TEST_F(TransactionRouterTest, CommitWithRecoveryTokenWithNoParticipants) { LogicalSessionId lsid(makeLogicalSessionIdForTest()); TxnNumber txnNum{3}; @@ -2575,14 +2691,14 @@ TEST_F(TransactionRouterTestWithDefaultSession, NonSnapshotReadConcernHasNoAtClu operationContext(), txnNum++, TransactionRouter::TransactionActions::kStart); // No atClusterTime is placed on the router by default. - ASSERT_FALSE(txnRouter.mustUseAtClusterTime()); + ASSERT(!txnRouter.getSelectedAtClusterTime()); // Can't compute and set an atClusterTime. txnRouter.setDefaultAtClusterTime(operationContext()); - ASSERT_FALSE(txnRouter.mustUseAtClusterTime()); + ASSERT(!txnRouter.getSelectedAtClusterTime()); // Can't continue on snapshot errors. - ASSERT_FALSE(txnRouter.canContinueOnSnapshotError()); + ASSERT(!txnRouter.canContinueOnSnapshotError()); } } diff --git a/src/mongo/s/vector_clock_mongos.cpp b/src/mongo/s/vector_clock_mongos.cpp index a4c3b96d57e29..8d55fdb488ebc 100644 --- a/src/mongo/s/vector_clock_mongos.cpp +++ b/src/mongo/s/vector_clock_mongos.cpp @@ -27,9 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/db/service_context.h" #include "mongo/db/vector_clock.h" +#include "mongo/util/decorable.h" namespace mongo { namespace { diff --git a/src/mongo/s/version_mongos.cpp b/src/mongo/s/version_mongos.cpp index 22f3ddcb59436..97b168d1962a3 100644 --- a/src/mongo/s/version_mongos.cpp +++ b/src/mongo/s/version_mongos.cpp @@ -31,14 +31,9 @@ #include "mongo/s/version_mongos.h" #include +#include #include "mongo/db/log_process_details.h" -#include "mongo/db/server_options.h" -#include "mongo/logv2/log.h" -#include "mongo/logv2/log_domain_global.h" -#include "mongo/logv2/log_manager.h" -#include "mongo/platform/process_id.h" -#include "mongo/util/debug_util.h" #include "mongo/util/version.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding diff --git a/src/mongo/s/would_change_owning_shard_exception.cpp b/src/mongo/s/would_change_owning_shard_exception.cpp index eec555187ad79..88e736cd0189d 100644 --- a/src/mongo/s/would_change_owning_shard_exception.cpp +++ b/src/mongo/s/would_change_owning_shard_exception.cpp @@ -27,12 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/s/would_change_owning_shard_exception.h" -#include "mongo/base/init.h" -#include "mongo/util/assert_util.h" +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" namespace mongo { namespace { @@ -42,6 +45,7 @@ MONGO_INIT_REGISTER_ERROR_EXTRA_INFO(WouldChangeOwningShardInfo); constexpr StringData kPreImage = "preImage"_sd; constexpr StringData kPostImage = "postImage"_sd; constexpr StringData kShouldUpsert = "shouldUpsert"_sd; +constexpr StringData kUserPostImage = "userPostImage"_sd; } // namespace @@ -49,6 +53,9 @@ void WouldChangeOwningShardInfo::serialize(BSONObjBuilder* bob) const { bob->append(kPreImage, _preImage); bob->append(kPostImage, _postImage); bob->append(kShouldUpsert, _shouldUpsert); + if (_userPostImage) { + bob->append(kUserPostImage, *_userPostImage); + } } std::shared_ptr WouldChangeOwningShardInfo::parse(const BSONObj& obj) { @@ -56,11 +63,14 @@ std::shared_ptr WouldChangeOwningShardInfo::parse(const BS } WouldChangeOwningShardInfo WouldChangeOwningShardInfo::parseFromCommandError(const BSONObj& obj) { - return WouldChangeOwningShardInfo(obj[kPreImage].Obj().getOwned(), - obj[kPostImage].Obj().getOwned(), - obj[kShouldUpsert].Bool(), - boost::none, - boost::none); + return WouldChangeOwningShardInfo( + obj[kPreImage].Obj().getOwned(), + obj[kPostImage].Obj().getOwned(), + obj[kShouldUpsert].Bool(), + boost::none, + boost::none, + obj.hasField(kUserPostImage) ? boost::make_optional(obj[kUserPostImage].Obj().getOwned()) + : boost::none); } } // namespace mongo diff --git a/src/mongo/s/would_change_owning_shard_exception.h b/src/mongo/s/would_change_owning_shard_exception.h index 1d6333b285db9..1407dafeb1878 100644 --- a/src/mongo/s/would_change_owning_shard_exception.h +++ b/src/mongo/s/would_change_owning_shard_exception.h @@ -29,10 +29,19 @@ #pragma once +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/namespace_string.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -51,12 +60,14 @@ class WouldChangeOwningShardInfo final : public ErrorExtraInfo { const BSONObj& postImage, const bool shouldUpsert, boost::optional ns, - boost::optional uuid) + boost::optional uuid, + boost::optional userPostImage = boost::none) : _preImage(preImage.getOwned()), _postImage(postImage.getOwned()), _shouldUpsert(shouldUpsert), _ns(ns), - _uuid(uuid) {} + _uuid(uuid), + _userPostImage(userPostImage) {} const auto& getPreImage() const { return _preImage; @@ -78,6 +89,10 @@ class WouldChangeOwningShardInfo final : public ErrorExtraInfo { return _uuid; } + const boost::optional& getUserPostImage() const { + return _userPostImage; + } + BSONObj toBSON() const { BSONObjBuilder bob; serialize(&bob); @@ -105,6 +120,9 @@ class WouldChangeOwningShardInfo final : public ErrorExtraInfo { // The uuid of collection containing the document. Does not get serialized into the BSONObj for // this error. boost::optional _uuid; + + // The user-level post image for shard key update on a sharded timeseries collection. + boost::optional _userPostImage; }; using WouldChangeOwningShardException = ExceptionFor; diff --git a/src/mongo/s/write_ops/SConscript b/src/mongo/s/write_ops/SConscript index 7d4e45368418b..30329e3e910ce 100644 --- a/src/mongo/s/write_ops/SConscript +++ b/src/mongo/s/write_ops/SConscript @@ -7,9 +7,8 @@ env = env.Clone() env.Library( target='batch_write_types', source=[ - 'batched_command_request.cpp', - 'batched_command_response.cpp', - 'batched_upsert_detail.cpp', + 'batched_command_request.cpp', 'batched_command_response.cpp', 'batched_upsert_detail.cpp', + 'bulk_write_command_modifier.cpp' ], LIBDEPS=[ '$BUILD_DIR/mongo/base', diff --git a/src/mongo/s/write_ops/batch_write_exec.cpp b/src/mongo/s/write_ops/batch_write_exec.cpp index 4d252551db2d2..293c05b954035 100644 --- a/src/mongo/s/write_ops/batch_write_exec.cpp +++ b/src/mongo/s/write_ops/batch_write_exec.cpp @@ -28,28 +28,57 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/s/write_ops/batch_write_exec.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include #include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/bson/util/builder.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/connection_string.h" -#include "mongo/client/remote_command_targeter.h" +#include "mongo/client/read_preference.h" #include "mongo/db/error_labels.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/server_options.h" #include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/timeseries/timeseries_write_util.h" +#include "mongo/db/transaction/transaction_api.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor_pool.h" #include "mongo/logv2/log.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/client/shard.h" #include "mongo/s/grid.h" #include "mongo/s/multi_statement_transaction_requests_sender.h" #include "mongo/s/request_types/cluster_commands_without_shard_key_gen.h" +#include "mongo/s/stale_exception.h" #include "mongo/s/transaction_router.h" +#include "mongo/s/write_ops/batch_write_exec.h" #include "mongo/s/write_ops/batch_write_op.h" +#include "mongo/s/write_ops/write_op.h" #include "mongo/s/write_ops/write_without_shard_key_util.h" +#include "mongo/util/assert_util.h" #include "mongo/util/exit.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -380,58 +409,16 @@ void executeChildBatches(OperationContext* opCtx, } } -void executeTwoPhaseWrite(OperationContext* opCtx, - NSTargeter& targeter, - BatchWriteOp& batchOp, - TargetedBatchMap& childBatches, - BatchWriteExecStats* stats, - const BatchedCommandRequest& clientRequest, - bool& abortBatch, - bool allowShardKeyUpdatesWithoutFullShardKeyInQuery) { - const auto targetedWriteBatch = [&] { - // If there is a targeted write with a sampleId, use that write instead in order to pass the - // sampleId to the two phase write protocol. Otherwise, just choose the first targeted - // write. - for (auto&& [_ /* shardId */, childBatch] : childBatches) { - auto nextBatch = childBatch.get(); - - // For a write without shard key, we expect each TargetedWriteBatch in childBatches to - // contain only one TargetedWrite directed to each shard. - tassert(7208400, - "There must be only 1 targeted write in this targeted write batch.", - !nextBatch->getWrites().empty()); - - auto targetedWrite = nextBatch->getWrites().begin()->get(); - if (targetedWrite->sampleId) { - return nextBatch; - } - } - return childBatches.begin()->second.get(); - }(); - - auto cmdObj = batchOp - .buildBatchRequest(*targetedWriteBatch, - targeter, - allowShardKeyUpdatesWithoutFullShardKeyInQuery) - .toBSON(); - - auto swRes = write_without_shard_key::runTwoPhaseWriteProtocol( - opCtx, clientRequest.getNS(), std::move(cmdObj)); - - Status responseStatus = swRes.getStatus(); - BatchedCommandResponse batchedCommandResponse; - if (swRes.isOK()) { - // Explicitly set the status of a no-op if there is no response. - if (swRes.getValue().getResponse().isEmpty()) { - batchedCommandResponse.setStatus(Status::OK()); - } else { - std::string errMsg; - if (!batchedCommandResponse.parseBSON(swRes.getValue().getResponse(), &errMsg)) { - responseStatus = {ErrorCodes::FailedToParse, errMsg}; - } - } - } - +// Only processes one write response from the child batches. Currently this is used for the two +// phase protocol of the singleton writes without shard key and time-series retryable updates. +void processResponseForOnlyFirstBatch(OperationContext* opCtx, + NSTargeter& targeter, + BatchWriteOp& batchOp, + TargetedBatchMap& childBatches, + const BatchedCommandResponse& batchedCommandResponse, + const Status& responseStatus, + BatchWriteExecStats* stats, + bool& abortBatch) { // Since we only send the write to a single shard, record the response of the write against the // first TargetedWriteBatch, and record no-ops for the remaining targeted shards. We always // resolve the first batch due to a quirk of this protocol running within an internal @@ -446,8 +433,7 @@ void executeTwoPhaseWrite(OperationContext* opCtx, for (auto&& childBatch : childBatches) { auto nextBatch = std::move(childBatch.second); - // If we're using the two phase write protocol we expect that each TargetedWriteBatch should - // only contain 1 write op for each shard. + // We expect that each TargetedWriteBatch should only contain 1 write op for each shard. invariant(nextBatch->getWrites().size() == 1); if (responseStatus.isOK()) { @@ -490,6 +476,119 @@ void executeTwoPhaseWrite(OperationContext* opCtx, } } } + +void executeTwoPhaseWrite(OperationContext* opCtx, + NSTargeter& targeter, + BatchWriteOp& batchOp, + TargetedBatchMap& childBatches, + BatchWriteExecStats* stats, + const BatchedCommandRequest& clientRequest, + bool& abortBatch, + bool allowShardKeyUpdatesWithoutFullShardKeyInQuery) { + const auto targetedWriteBatch = [&] { + // If there is a targeted write with a sampleId, use that write instead in order to pass the + // sampleId to the two phase write protocol. Otherwise, just choose the first targeted + // write. + for (auto&& [_ /* shardId */, childBatch] : childBatches) { + auto nextBatch = childBatch.get(); + + // For a write without shard key, we expect each TargetedWriteBatch in childBatches to + // contain only one TargetedWrite directed to each shard. + tassert(7208400, + "There must be only 1 targeted write in this targeted write batch.", + !nextBatch->getWrites().empty()); + + auto targetedWrite = nextBatch->getWrites().begin()->get(); + if (targetedWrite->sampleId) { + return nextBatch; + } + } + return childBatches.begin()->second.get(); + }(); + + auto cmdObj = batchOp + .buildBatchRequest(*targetedWriteBatch, + targeter, + allowShardKeyUpdatesWithoutFullShardKeyInQuery) + .toBSON(); + + auto swRes = write_without_shard_key::runTwoPhaseWriteProtocol( + opCtx, targeter.getNS(), std::move(cmdObj)); + + Status responseStatus = swRes.getStatus(); + BatchedCommandResponse batchedCommandResponse; + if (swRes.isOK()) { + // Explicitly set the status of a no-op if there is no response. + if (swRes.getValue().getResponse().isEmpty()) { + batchedCommandResponse.setStatus(Status::OK()); + } else { + std::string errMsg; + if (!batchedCommandResponse.parseBSON(swRes.getValue().getResponse(), &errMsg)) { + responseStatus = {ErrorCodes::FailedToParse, errMsg}; + } + } + } + + processResponseForOnlyFirstBatch(opCtx, + targeter, + batchOp, + childBatches, + batchedCommandResponse, + responseStatus, + stats, + abortBatch); +} + +void executeRetryableTimeseriesUpdate(OperationContext* opCtx, + NSTargeter& targeter, + BatchWriteOp& batchOp, + TargetedBatchMap& childBatches, + BatchWriteExecStats* stats, + const BatchedCommandRequest& clientRequest, + bool& abortBatch, + size_t& nextOpIndex) { + auto wholeOp = clientRequest.getUpdateRequest(); + auto singleUpdateOp = timeseries::buildSingleUpdateOp(wholeOp, nextOpIndex); + BatchedCommandRequest singleUpdateRequest(singleUpdateOp); + const auto stmtId = write_ops::getStmtIdForWriteAt(wholeOp, nextOpIndex++); + + auto executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); + auto inlineExecutor = std::make_shared(); + txn_api::SyncTransactionWithRetries txn( + opCtx, executor, nullptr /* resourceYielder */, inlineExecutor); + BatchedCommandResponse batchedCommandResponse; + auto swResult = + txn.runNoThrow(opCtx, + [&singleUpdateRequest, &batchedCommandResponse, stmtId]( + const txn_api::TransactionClient& txnClient, ExecutorPtr txnExec) { + auto updateResponse = + txnClient.runCRUDOpSync(singleUpdateRequest, {stmtId}); + batchedCommandResponse = std::move(updateResponse); + + return SemiFuture::makeReady(); + }); + Status responseStatus = Status::OK(); + if (!swResult.isOK()) { + responseStatus = swResult.getStatus(); + } else { + if (!swResult.getValue().cmdStatus.isOK()) { + responseStatus = swResult.getValue().cmdStatus; + } + if (auto wcError = swResult.getValue().wcError; !wcError.toStatus().isOK()) { + batchedCommandResponse.setWriteConcernError( + std::make_unique(wcError).release()); + } + } + + processResponseForOnlyFirstBatch(opCtx, + targeter, + batchOp, + childBatches, + batchedCommandResponse, + responseStatus, + stats, + abortBatch); +} } // namespace void BatchWriteExec::executeBatch(OperationContext* opCtx, @@ -514,6 +613,7 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx, int numCompletedOps = 0; int numRoundsWithoutProgress = 0; bool abortBatch = false; + size_t nextOpIndex = 0; while (!batchOp.isFinished() && !abortBatch) { // @@ -566,12 +666,24 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx, break; } } else { - // If the targetStatus value is true, then we have detected an updateOne/deleteOne - // request without a shard key or _id. We will use a two phase protocol to apply the - // write. - if (feature_flags::gFeatureFlagUpdateOneWithoutShardKey.isEnabled( - serverGlobalParams.featureCompatibility) && - targetStatus.getValue()) { + if (targetStatus.getValue() == WriteType::TimeseriesRetryableUpdate) { + // If the targetStatus value is 'TimeseriesRetryableUpdate', then we have detected + // a retryable time-series update request. We will run it in the internal + // transaction api and collect the response. + executeRetryableTimeseriesUpdate(opCtx, + targeter, + batchOp, + childBatches, + stats, + clientRequest, + abortBatch, + nextOpIndex); + } else if (feature_flags::gFeatureFlagUpdateOneWithoutShardKey.isEnabled( + serverGlobalParams.featureCompatibility) && + targetStatus.getValue() == WriteType::WithoutShardKeyOrId) { + // If the targetStatus value is 'WithoutShardKeyOrId', then we have detected an + // updateOne/deleteOne request without a shard key or _id. We will use a two phase + // protocol to apply the write. tassert( 6992000, "Executing write batches with a size of 0", childBatches.size() > 0u); @@ -680,7 +792,7 @@ void BatchWriteExec::executeBatch(OperationContext* opCtx, 0, {ErrorCodes::NoProgressMade, str::stream() << "no progress was made executing batch write op in " - << clientRequest.getNS().ns() << " after " + << clientRequest.getNS().toStringForErrorMsg() << " after " << kMaxRoundsWithoutProgress << " rounds (" << numCompletedOps << " ops completed in " << rounds << " rounds total)"})); break; diff --git a/src/mongo/s/write_ops/batch_write_exec.h b/src/mongo/s/write_ops/batch_write_exec.h index b4016f22d7741..450d25ae78380 100644 --- a/src/mongo/s/write_ops/batch_write_exec.h +++ b/src/mongo/s/write_ops/batch_write_exec.h @@ -29,15 +29,21 @@ #pragma once +#include #include +#include #include +#include "mongo/bson/oid.h" #include "mongo/bson/timestamp.h" #include "mongo/client/connection_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/shard_id.h" #include "mongo/s/ns_targeter.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/net/hostandport.h" namespace mongo { diff --git a/src/mongo/s/write_ops/batch_write_exec_test.cpp b/src/mongo/s/write_ops/batch_write_exec_test.cpp index e32d7579c96cf..de4e1d6e65d0f 100644 --- a/src/mongo/s/write_ops/batch_write_exec_test.cpp +++ b/src/mongo/s/write_ops/batch_write_exec_test.cpp @@ -27,16 +27,53 @@ * it in the license file. */ +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/remote_command_targeter_factory_mock.h" #include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/db/baton.h" #include "mongo/db/commands.h" +#include "mongo/db/commands/txn_cmds_gen.h" +#include "mongo/db/logical_time.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/service_context.h" #include "mongo/db/session/logical_session_id.h" #include "mongo/db/vector_clock.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" +#include "mongo/idl/server_parameter_test_util.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/catalog/type_chunk.h" #include "mongo/s/catalog/type_shard.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/index_version.h" #include "mongo/s/mock_ns_targeter.h" #include "mongo/s/session_catalog_router.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" #include "mongo/s/sharding_router_test_fixture.h" #include "mongo/s/stale_exception.h" @@ -44,8 +81,13 @@ #include "mongo/s/write_ops/batch_write_exec.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -64,11 +106,11 @@ const int kMaxRoundsWithoutProgress = 5; BSONObj expectInsertsReturnStaleVersionErrorsBase(const NamespaceString& nss, const std::vector& expected, const executor::RemoteCommandRequest& request) { - ASSERT_EQUALS(nss.db(), request.dbname); + ASSERT_EQUALS(nss.db_forTest(), request.dbname); const auto opMsgRequest(OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj)); const auto actualBatchedInsert(BatchedCommandRequest::parseInsert(opMsgRequest)); - ASSERT_EQUALS(nss.toString(), actualBatchedInsert.getNS().ns()); + ASSERT_EQUALS(nss.toString_forTest(), actualBatchedInsert.getNS().ns_forTest()); const auto& inserted = actualBatchedInsert.getInsertRequest().getDocuments(); ASSERT_EQUALS(expected.size(), inserted.size()); @@ -109,11 +151,11 @@ BSONObj expectInsertsReturnStaleVersionErrorsBase(const NamespaceString& nss, BSONObj expectInsertsReturnStaleDbVersionErrorsBase(const NamespaceString& nss, const std::vector& expected, const executor::RemoteCommandRequest& request) { - ASSERT_EQUALS(nss.db(), request.dbname); + ASSERT_EQUALS(nss.db_forTest(), request.dbname); const auto opMsgRequest(OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj)); const auto actualBatchedInsert(BatchedCommandRequest::parseInsert(opMsgRequest)); - ASSERT_EQUALS(nss.toString(), actualBatchedInsert.getNS().ns()); + ASSERT_EQUALS(nss.toString_forTest(), actualBatchedInsert.getNS().ns_forTest()); const auto& inserted = actualBatchedInsert.getInsertRequest().getDocuments(); ASSERT_EQUALS(expected.size(), inserted.size()); @@ -138,7 +180,7 @@ BSONObj expectInsertsReturnStaleDbVersionErrorsBase(const NamespaceString& nss, errorBuilder.append("code", int(ErrorCodes::StaleDbVersion)); auto dbVersion = DatabaseVersion(UUID::gen(), Timestamp(1, 1)); - errorBuilder.append("db", nss.db()); + errorBuilder.append("db", nss.db_forTest()); errorBuilder.append("vReceived", dbVersion.toBSON()); errorBuilder.append("vWanted", dbVersion.makeUpdated().toBSON()); @@ -161,11 +203,11 @@ BSONObj expectInsertsReturnTenantMigrationAbortedErrorsBase( const std::vector& expected, const executor::RemoteCommandRequest& request, int numberOfFailedOps) { - ASSERT_EQUALS(nss.db(), request.dbname); + ASSERT_EQUALS(nss.db_forTest(), request.dbname); const auto opMsgRequest(OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj)); const auto actualBatchedInsert(BatchedCommandRequest::parseInsert(opMsgRequest)); - ASSERT_EQUALS(nss.toString(), actualBatchedInsert.getNS().ns()); + ASSERT_EQUALS(nss.toString_forTest(), actualBatchedInsert.getNS().ns_forTest()); const auto& inserted = actualBatchedInsert.getInsertRequest().getDocuments(); ASSERT_EQUALS(expected.size(), inserted.size()); @@ -257,11 +299,11 @@ class BatchWriteExecTest : public ShardingTestFixture { void expectInsertsReturnSuccess(std::vector::const_iterator expectedFrom, std::vector::const_iterator expectedTo) { onCommandForPoolExecutor([&](const executor::RemoteCommandRequest& request) { - ASSERT_EQUALS(nss.db(), request.dbname); + ASSERT_EQUALS(nss.db_forTest(), request.dbname); const auto opMsgRequest(OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj)); const auto actualBatchedInsert(BatchedCommandRequest::parseInsert(opMsgRequest)); - ASSERT_EQUALS(nss.toString(), actualBatchedInsert.getNS().ns()); + ASSERT_EQUALS(nss.toString_forTest(), actualBatchedInsert.getNS().ns_forTest()); const auto& inserted = actualBatchedInsert.getInsertRequest().getDocuments(); const size_t expectedSize = std::distance(expectedFrom, expectedTo); @@ -306,12 +348,12 @@ class BatchWriteExecTest : public ShardingTestFixture { const BatchedCommandResponse& errResponse) { onCommandForPoolExecutor([&](const executor::RemoteCommandRequest& request) { try { - ASSERT_EQUALS(nss.db(), request.dbname); + ASSERT_EQUALS(nss.db_forTest(), request.dbname); const auto opMsgRequest( OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj)); const auto actualBatchedInsert(BatchedCommandRequest::parseInsert(opMsgRequest)); - ASSERT_EQUALS(nss.toString(), actualBatchedInsert.getNS().ns()); + ASSERT_EQUALS(nss.toString_forTest(), actualBatchedInsert.getNS().ns_forTest()); const auto& inserted = actualBatchedInsert.getInsertRequest().getDocuments(); ASSERT_EQUALS(expected.size(), inserted.size()); @@ -344,6 +386,11 @@ class BatchWriteExecTest : public ShardingTestFixture { boost::none), BSON("x" << MINKEY), BSON("x" << MAXKEY))}}; + +private: + // The tests using this fixture expects that a write without shard key is not allowed. + RAIIServerParameterControllerForTest _featureFlagController{ + "featureFlagUpdateOneWithoutShardKey", false}; }; // @@ -1982,6 +2029,11 @@ class BatchWriteExecTargeterErrorTest : public ShardingTestFixture { } const NamespaceString nss = NamespaceString::createNamespaceString_forTest("foo.bar"); + +private: + // The tests using this fixture expects that a write without shard key is not allowed. + RAIIServerParameterControllerForTest _featureFlagController{ + "featureFlagUpdateOneWithoutShardKey", false}; }; TEST_F(BatchWriteExecTargeterErrorTest, TargetedFailedAndErrorResponse) { @@ -2452,11 +2504,11 @@ class BatchWriteExecTransactionTest : public BatchWriteExecTest { void expectInsertsReturnTransientTxnErrors(const std::vector& expected) { onCommandForPoolExecutor([&](const executor::RemoteCommandRequest& request) { - ASSERT_EQUALS(nss.db(), request.dbname); + ASSERT_EQUALS(nss.db_forTest(), request.dbname); const auto opMsgRequest(OpMsgRequest::fromDBAndBody(request.dbname, request.cmdObj)); const auto actualBatchedInsert(BatchedCommandRequest::parseInsert(opMsgRequest)); - ASSERT_EQUALS(nss.toString(), actualBatchedInsert.getNS().ns()); + ASSERT_EQUALS(nss.toString_forTest(), actualBatchedInsert.getNS().ns_forTest()); const auto& inserted = actualBatchedInsert.getInsertRequest().getDocuments(); ASSERT_EQUALS(expected.size(), inserted.size()); diff --git a/src/mongo/s/write_ops/batch_write_op.cpp b/src/mongo/s/write_ops/batch_write_op.cpp index 549ca807e4b08..a18f60f90113f 100644 --- a/src/mongo/s/write_ops/batch_write_op.cpp +++ b/src/mongo/s/write_ops/batch_write_op.cpp @@ -27,22 +27,45 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/s/write_ops/batch_write_op.h" - +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include #include +#include +#include #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/basic_types.h" #include "mongo/db/catalog/collection_uuid_mismatch_info.h" -#include "mongo/db/internal_transactions_feature_flag_gen.h" +#include "mongo/db/commands/server_status_metric.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/write_ops.h" -#include "mongo/s/client/num_hosts_targeted_metrics.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/stats/counters.h" +#include "mongo/db/write_concern_options.h" #include "mongo/s/collection_uuid_mismatch.h" #include "mongo/s/transaction_router.h" +#include "mongo/s/write_ops/batch_write_op.h" #include "mongo/s/write_ops/write_without_shard_key_util.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" #include "mongo/util/transitional_tools_do_not_use/vector_spooling.h" +#include "mongo/util/uuid.h" namespace mongo { namespace { @@ -54,28 +77,6 @@ struct WriteErrorComp { } }; -/** - * Returns a new write concern that has the copy of every field from the original - * document but with a w set to 1. This is intended for upgrading { w: 0 } write - * concern to { w: 1 }. - */ -BSONObj upgradeWriteConcern(const BSONObj& origWriteConcern) { - BSONObjIterator iter(origWriteConcern); - BSONObjBuilder newWriteConcern; - - while (iter.more()) { - BSONElement elem(iter.next()); - - if (strncmp(elem.fieldName(), "w", 2) == 0) { - newWriteConcern.append("w", 1); - } else { - newWriteConcern.append(elem); - } - } - - return newWriteConcern.obj(); -} - /** * Helper to determine whether a number of targeted writes require a new targeted batch. */ @@ -150,49 +151,6 @@ bool wouldMakeBatchesTooBig(const std::vector>& w return false; } -/** - * Gets an estimated size of how much the particular write operation would add to the size of the - * batch. - */ -int getWriteSizeBytes(const WriteOp& writeOp) { - const BatchItemRef& item = writeOp.getWriteItem(); - const BatchedCommandRequest::BatchType batchType = item.getOpType(); - - using UpdateOpEntry = write_ops::UpdateOpEntry; - using DeleteOpEntry = write_ops::DeleteOpEntry; - - if (batchType == BatchedCommandRequest::BatchType_Insert) { - return item.getDocument().objsize(); - } else if (batchType == BatchedCommandRequest::BatchType_Update) { - // Note: Be conservative here - it's okay if we send slightly too many batches. - const auto& update = item.getUpdate(); - auto estSize = write_ops::getUpdateSizeEstimate( - update.getQ(), - update.getU(), - update.getC(), - update.getUpsertSupplied().has_value(), - update.getCollation(), - update.getArrayFilters(), - update.getHint(), - update.getSampleId(), - update.getAllowShardKeyUpdatesWithoutFullShardKeyInQuery()); - - // When running a debug build, verify that estSize is at least the BSON serialization size. - dassert(estSize >= update.toBSON().objsize()); - return estSize; - } else if (batchType == BatchedCommandRequest::BatchType_Delete) { - const auto& deleteOp = item.getDelete(); - auto estSize = write_ops::getDeleteSizeEstimate( - deleteOp.getQ(), deleteOp.getCollation(), deleteOp.getHint(), deleteOp.getSampleId()); - - // When running a debug build, verify that estSize is at least the BSON serialization size. - dassert(estSize >= deleteOp.toBSON().objsize()); - return estSize; - } - - MONGO_UNREACHABLE; -} - /** * Given *either* a batch error or an array of per-item errors, copies errors we're interested in * into a TrackedErrorMap @@ -256,13 +214,18 @@ int getEncryptionInformationSize(const BatchedCommandRequest& req) { } // namespace -StatusWith targetWriteOps(OperationContext* opCtx, - std::vector& writeOps, - bool ordered, - bool recordTargetErrors, - GetTargeterFn getTargeterFn, - GetWriteSizeFn getWriteSizeFn, - TargetedBatchMap& batchMap) { +// 'baseCommandSizeBytes' specifies the base size of a batch command request prior to adding any +// individual operations to it. This function will ensure that 'baseCommandSizeBytes' plus the +// result of calling 'getWriteSizeFn' on each write added to a batch will not result in a command +// over BSONObjMaxUserSize. +StatusWith targetWriteOps(OperationContext* opCtx, + std::vector& writeOps, + bool ordered, + bool recordTargetErrors, + GetTargeterFn getTargeterFn, + GetWriteSizeFn getWriteSizeFn, + int baseCommandSizeBytes, + TargetedBatchMap& batchMap) { // // Targeting of unordered batches is fairly simple - each remaining write op is targeted, // and each of those targeted writes are grouped into a batch for a particular shard @@ -296,7 +259,7 @@ StatusWith targetWriteOps(OperationContext* opCtx, // [{ skey : y }, { skey : z }] // - bool isWriteWithoutShardKeyOrId = false; + WriteType writeType = WriteType::Ordinary; std::map> nsEndpointMap; std::map> nsShardIdMap; @@ -306,9 +269,10 @@ StatusWith targetWriteOps(OperationContext* opCtx, if (writeOp.getWriteState() != WriteOpState_Ready) continue; - // If we got a write without shard key in the previous iteration, it should be sent in its - // own batch. - if (isWriteWithoutShardKeyOrId) { + // If we got a write without shard key or a time-series retryable update in the previous + // iteration, it should be sent in its own batch. + if (writeType == WriteType::WithoutShardKeyOrId || + writeType == WriteType::TimeseriesRetryableUpdate) { break; } @@ -358,7 +322,7 @@ StatusWith targetWriteOps(OperationContext* opCtx, writeOp.setOpError(targetError); if (ordered) - return isWriteWithoutShardKeyOrId; + return writeType; continue; } else { @@ -397,6 +361,12 @@ StatusWith targetWriteOps(OperationContext* opCtx, break; } + if (targeter.isShardedTimeSeriesBucketsNamespace() && + writeOp.getWriteItem().getOpType() == BatchedCommandRequest::BatchType_Update && + opCtx->isRetryableWrite() && !opCtx->inMultiDocumentTransaction() && batchMap.empty()) { + writeType = WriteType::TimeseriesRetryableUpdate; + } + // Check if an updateOne or deleteOne necessitates using the two phase write in the case // where the query does not contain a shard key or _id to target by. if (auto writeItem = writeOp.getWriteItem(); @@ -409,32 +379,40 @@ StatusWith targetWriteOps(OperationContext* opCtx, bool isUpsert = false; if (writeItem.getOpType() == BatchedCommandRequest::BatchType_Update) { - auto updateReq = writeItem.getUpdate(); + auto updateReq = writeItem.getUpdateRef(); isMultiWrite = updateReq.getMulti(); - query = updateReq.getQ(); + query = updateReq.getFilter(); collation = updateReq.getCollation().value_or(BSONObj()); isUpsert = updateReq.getUpsert(); } else { - auto deleteReq = writeItem.getDelete(); + auto deleteReq = writeItem.getDeleteRef(); isMultiWrite = deleteReq.getMulti(); - query = deleteReq.getQ(); + query = deleteReq.getFilter(); collation = deleteReq.getCollation().value_or(BSONObj()); } if (!isMultiWrite && - write_without_shard_key::useTwoPhaseProtocol(opCtx, - targeter.getNS(), - true /* isUpdateOrDelete */, - isUpsert, - query, - collation)) { - + write_without_shard_key::useTwoPhaseProtocol( + opCtx, + targeter.getNS(), + true /* isUpdateOrDelete */, + isUpsert, + query, + collation, + writeItem.getLet(), + writeItem.getLegacyRuntimeConstants())) { // Writes without shard key should be in their own batch. if (!batchMap.empty()) { writeOp.cancelWrites(nullptr); break; } else { - isWriteWithoutShardKeyOrId = true; + if (writeItem.getOpType() == BatchedCommandRequest::BatchType_Update) { + updateOneNonTargetedShardedCount.increment(1); + } else { + deleteOneNonTargetedShardedCount.increment(1); + } + + writeType = WriteType::WithoutShardKeyOrId; } }; } @@ -444,7 +422,7 @@ StatusWith targetWriteOps(OperationContext* opCtx, const auto& shardId = write->endpoint.shardName; TargetedBatchMap::iterator batchIt = batchMap.find(shardId); if (batchIt == batchMap.end()) { - auto newBatch = std::make_unique(shardId); + auto newBatch = std::make_unique(shardId, baseCommandSizeBytes); batchIt = batchMap.emplace(shardId, std::move(newBatch)).first; } @@ -463,7 +441,24 @@ StatusWith targetWriteOps(OperationContext* opCtx, break; } - return isWriteWithoutShardKeyOrId; + return writeType; +} + +BSONObj upgradeWriteConcern(const BSONObj& origWriteConcern) { + BSONObjIterator iter(origWriteConcern); + BSONObjBuilder newWriteConcern; + + while (iter.more()) { + BSONElement elem(iter.next()); + + if (strncmp(elem.fieldName(), "w", 2) == 0) { + newWriteConcern.append("w", 1); + } else { + newWriteConcern.append(elem); + } + } + + return newWriteConcern.obj(); } BatchWriteOp::BatchWriteOp(OperationContext* opCtx, const BatchedCommandRequest& clientRequest) @@ -479,9 +474,9 @@ BatchWriteOp::BatchWriteOp(OperationContext* opCtx, const BatchedCommandRequest& } } -StatusWith BatchWriteOp::targetBatch(const NSTargeter& targeter, - bool recordTargetErrors, - TargetedBatchMap* targetedBatches) { +StatusWith BatchWriteOp::targetBatch(const NSTargeter& targeter, + bool recordTargetErrors, + TargetedBatchMap* targetedBatches) { const bool ordered = _clientRequest.getWriteCommandRequestBase().getOrdered(); auto targetStatus = targetWriteOps( @@ -497,12 +492,12 @@ StatusWith BatchWriteOp::targetBatch(const NSTargeter& targeter, // corresponding to the statements that got routed to each individual shard, so they // need to be accounted in the potential request size so it does not exceed the max BSON // size. - // - // The constant 4 is chosen as the size of the BSON representation of the stmtId. - const int writeSizeBytes = getWriteSizeBytes(writeOp) + + const int writeSizeBytes = writeOp.getWriteItem().getSizeForBatchWriteBytes() + getEncryptionInformationSize(_clientRequest) + write_ops::kWriteCommandBSONArrayPerElementOverheadBytes + - (_batchTxnNum ? write_ops::kWriteCommandBSONArrayPerElementOverheadBytes + 4 : 0); + (_batchTxnNum ? write_ops::kStmtIdSize + + write_ops::kWriteCommandBSONArrayPerElementOverheadBytes + : 0); // For unordered writes, the router must return an entry for each failed write. This // constant is a pessimistic attempt to ensure that if a request to a shard hits @@ -516,6 +511,8 @@ StatusWith BatchWriteOp::targetBatch(const NSTargeter& targeter, ordered ? 0 : write_ops::kWriteCommandBSONArrayPerElementOverheadBytes + 272; return std::max(writeSizeBytes, errorResponsePotentialSizeBytes); }, + // TODO SERVER-77653: Account for the size of top-level command fields here. + 0 /* baseCommandSizeBytes */, *targetedBatches); if (!targetStatus.isOK()) { diff --git a/src/mongo/s/write_ops/batch_write_op.h b/src/mongo/s/write_ops/batch_write_op.h index 5f396728e5a32..eff835a981792 100644 --- a/src/mongo/s/write_ops/batch_write_op.h +++ b/src/mongo/s/write_ops/batch_write_op.h @@ -29,22 +29,33 @@ #pragma once +#include +#include #include +#include #include #include #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/shard_id.h" #include "mongo/rpc/write_concern_error_detail.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/ns_targeter.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/s/write_ops/batched_upsert_detail.h" #include "mongo/s/write_ops/write_op.h" #include "mongo/stdx/unordered_map.h" namespace mongo { class OperationContext; + class TrackedErrors; // Conservative overhead per element contained in the write batch. This value was calculated as 1 @@ -82,6 +93,12 @@ struct ShardWCError { using TargetedBatchMap = std::map>; +enum class WriteType { + Ordinary, + WithoutShardKeyOrId, + TimeseriesRetryableUpdate, +}; + /** * The BatchWriteOp class manages the lifecycle of a batched write received by mongos. Each * item in a batch is tracked via a WriteOp, and the function of the BatchWriteOp is to @@ -131,12 +148,12 @@ class BatchWriteOp { * targeting errors, but if not we should refresh once first.) * * Returned TargetedWriteBatches are owned by the caller. - * If a write without a shard key is detected, return an OK StatusWith that has 'true' as the - * value. + * If a write without a shard key or a time-series retryable update is detected, return an OK + * StatusWith that has the corresponding WriteType as the value. */ - StatusWith targetBatch(const NSTargeter& targeter, - bool recordTargetErrors, - TargetedBatchMap* targetedBatches); + StatusWith targetBatch(const NSTargeter& targeter, + bool recordTargetErrors, + TargetedBatchMap* targetedBatches); /** * Fills a BatchCommandRequest from a TargetedWriteBatch for this BatchWriteOp. @@ -272,12 +289,20 @@ typedef std::function GetTargeterFn; typedef std::function GetWriteSizeFn; // Helper function to target ready writeOps. See BatchWriteOp::targetBatch for details. -StatusWith targetWriteOps(OperationContext* opCtx, - std::vector& writeOps, - bool ordered, - bool recordTargetErrors, - GetTargeterFn getTargeterFn, - GetWriteSizeFn getWriteSizeFn, - TargetedBatchMap& batchMap); +StatusWith targetWriteOps(OperationContext* opCtx, + std::vector& writeOps, + bool ordered, + bool recordTargetErrors, + GetTargeterFn getTargeterFn, + GetWriteSizeFn getWriteSizeFn, + int baseCommandSizeBytes, + TargetedBatchMap& batchMap); + +/** + * Returns a new write concern that has the copy of every field from the original + * document but with a w set to 1. This is intended for upgrading { w: 0 } write + * concern to { w: 1 }. + */ +BSONObj upgradeWriteConcern(const BSONObj& origWriteConcern); } // namespace mongo diff --git a/src/mongo/s/write_ops/batch_write_op_test.cpp b/src/mongo/s/write_ops/batch_write_op_test.cpp index 4bb00b7a81762..673c4aafa8d44 100644 --- a/src/mongo/s/write_ops/batch_write_op_test.cpp +++ b/src/mongo/s/write_ops/batch_write_op_test.cpp @@ -27,17 +27,51 @@ * it in the license file. */ +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/session/logical_session_id_helpers.h" #include "mongo/idl/server_parameter_test_util.h" +#include "mongo/s/catalog_cache.h" #include "mongo/s/catalog_cache_test_fixture.h" -#include "mongo/s/concurrency/locker_mongos_client_observer.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/index_version.h" #include "mongo/s/mock_ns_targeter.h" #include "mongo/s/session_catalog_router.h" +#include "mongo/s/shard_key_pattern.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" #include "mongo/s/sharding_router_test_fixture.h" +#include "mongo/s/stale_exception.h" #include "mongo/s/transaction_router.h" #include "mongo/s/write_ops/batch_write_op.h" #include "mongo/s/write_ops/batched_command_request.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/fail_point.h" namespace mongo { namespace { @@ -108,14 +142,18 @@ void addWCError(BatchedCommandResponse* response) { class WriteOpTestFixture : public ServiceContextTest { protected: WriteOpTestFixture() { - auto service = getServiceContext(); - service->registerClientObserver(std::make_unique()); _opCtxHolder = makeOperationContext(); _opCtx = _opCtxHolder.get(); } ServiceContext::UniqueOperationContext _opCtxHolder; OperationContext* _opCtx; + + // This failpoint is to skip running the useTwoPhaseWriteProtocol check which expects the Grid + // to be initialized. With the feature flag on, the helper always returns false, which signifies + // that we have a targetable write op. + std::unique_ptr _skipUseTwoPhaseWriteProtocolCheck = + std::make_unique("skipUseTwoPhaseWriteProtocolCheck"); }; using BatchWriteOpTest = WriteOpTestFixture; @@ -1916,7 +1954,7 @@ TEST_F(WriteWithoutShardKeyFixture, SingleUpdateWithoutShardKey) { std::map> targeted; auto status = batchOp.targetBatch(targeter, false, &targeted); ASSERT_OK(status); - ASSERT_EQUALS(status.getValue(), true); + ASSERT_EQUALS(status.getValue(), WriteType::WithoutShardKeyOrId); ASSERT(!batchOp.isFinished()); ASSERT_EQUALS(targeted.size(), 1u); assertEndpointsEqual(getFirstTargetedWriteEndpoint(targeted.begin()->second), endpoint); @@ -1948,7 +1986,7 @@ TEST_F(WriteWithoutShardKeyFixture, MultipleOrderedUpdateWithoutShardKey) { std::map> targeted; auto status = batchOp.targetBatch(targeter, false, &targeted); ASSERT_OK(status); - ASSERT_EQUALS(status.getValue(), true); + ASSERT_EQUALS(status.getValue(), WriteType::WithoutShardKeyOrId); ASSERT(!batchOp.isFinished()); ASSERT_EQUALS(targeted.size(), 1u); assertEndpointsEqual(getFirstTargetedWriteEndpoint(targeted.begin()->second), endpoint); @@ -1964,7 +2002,7 @@ TEST_F(WriteWithoutShardKeyFixture, MultipleOrderedUpdateWithoutShardKey) { auto status2 = batchOp.targetBatch(targeter, false, &targeted); ASSERT_OK(status2); - ASSERT_EQUALS(status2.getValue(), true); + ASSERT_EQUALS(status2.getValue(), WriteType::WithoutShardKeyOrId); ASSERT(!batchOp.isFinished()); ASSERT_EQUALS(targeted.size(), 1u); ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 1u); @@ -2001,7 +2039,7 @@ TEST_F(WriteWithoutShardKeyFixture, MultipleUnorderedUpdateWithoutShardKey) { std::map> targeted; auto status = batchOp.targetBatch(targeter, false, &targeted); ASSERT_OK(status); - ASSERT_EQUALS(status.getValue(), true); + ASSERT_EQUALS(status.getValue(), WriteType::WithoutShardKeyOrId); ASSERT(!batchOp.isFinished()); ASSERT_EQUALS(targeted.size(), 1u); assertEndpointsEqual(getFirstTargetedWriteEndpoint(targeted.begin()->second), endpoint); @@ -2017,7 +2055,7 @@ TEST_F(WriteWithoutShardKeyFixture, MultipleUnorderedUpdateWithoutShardKey) { auto status2 = batchOp.targetBatch(targeter, false, &targeted); ASSERT_OK(status2); - ASSERT_EQUALS(status2.getValue(), true); + ASSERT_EQUALS(status2.getValue(), WriteType::WithoutShardKeyOrId); ASSERT(!batchOp.isFinished()); ASSERT_EQUALS(targeted.size(), 1u); ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 1u); @@ -2048,7 +2086,7 @@ TEST_F(WriteWithoutShardKeyFixture, SingleDeleteWithoutShardKey) { std::map> targeted; auto status = batchOp.targetBatch(targeter, false, &targeted); ASSERT_OK(status); - ASSERT_EQUALS(status.getValue(), true); + ASSERT_EQUALS(status.getValue(), WriteType::WithoutShardKeyOrId); ASSERT(!batchOp.isFinished()); ASSERT_EQUALS(targeted.size(), 1u); assertEndpointsEqual(getFirstTargetedWriteEndpoint(targeted.begin()->second), endpoint); @@ -2081,7 +2119,7 @@ TEST_F(WriteWithoutShardKeyFixture, MultipleOrderedDeletesWithoutShardKey) { std::map> targeted; auto status = batchOp.targetBatch(targeter, false, &targeted); ASSERT_OK(status); - ASSERT_EQUALS(status.getValue(), true); + ASSERT_EQUALS(status.getValue(), WriteType::WithoutShardKeyOrId); ASSERT(!batchOp.isFinished()); ASSERT_EQUALS(targeted.size(), 1u); assertEndpointsEqual(getFirstTargetedWriteEndpoint(targeted.begin()->second), endpoint); @@ -2097,7 +2135,7 @@ TEST_F(WriteWithoutShardKeyFixture, MultipleOrderedDeletesWithoutShardKey) { auto status2 = batchOp.targetBatch(targeter, false, &targeted); ASSERT_OK(status2); - ASSERT_EQUALS(status2.getValue(), true); + ASSERT_EQUALS(status2.getValue(), WriteType::WithoutShardKeyOrId); ASSERT(!batchOp.isFinished()); ASSERT_EQUALS(targeted.size(), 1u); ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 1u); @@ -2134,7 +2172,7 @@ TEST_F(WriteWithoutShardKeyFixture, MultipleUnorderedDeletesWithoutShardKey) { std::map> targeted; auto status = batchOp.targetBatch(targeter, false, &targeted); ASSERT_OK(status); - ASSERT_EQUALS(status.getValue(), true); + ASSERT_EQUALS(status.getValue(), WriteType::WithoutShardKeyOrId); ASSERT(!batchOp.isFinished()); ASSERT_EQUALS(targeted.size(), 1u); assertEndpointsEqual(getFirstTargetedWriteEndpoint(targeted.begin()->second), endpoint); @@ -2150,7 +2188,7 @@ TEST_F(WriteWithoutShardKeyFixture, MultipleUnorderedDeletesWithoutShardKey) { auto status2 = batchOp.targetBatch(targeter, false, &targeted); ASSERT_OK(status2); - ASSERT_EQUALS(status2.getValue(), true); + ASSERT_EQUALS(status2.getValue(), WriteType::WithoutShardKeyOrId); ASSERT(!batchOp.isFinished()); ASSERT_EQUALS(targeted.size(), 1u); ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 1u); @@ -2161,5 +2199,202 @@ TEST_F(WriteWithoutShardKeyFixture, MultipleUnorderedDeletesWithoutShardKey) { ASSERT(batchOp.isFinished()); } +// +// Tests targeting for retryable time-series updates. +// +class TimeseriesRetryableUpdateFixture : public CatalogCacheTestFixture { +public: + void setUp() override { + CatalogCacheTestFixture::setUp(); + const ShardKeyPattern shardKeyPattern(BSON("a" << 1)); + _cm = makeCollectionRoutingInfo( + kNss, shardKeyPattern, nullptr, false, {BSON("a" << splitPoint)}, {}) + .cm; + } + + ChunkManager getChunkManager() const { + return *_cm; + } + + OperationContext* getOpCtx() { + return operationContext(); + } + + const TxnNumber kTxnNumber = 5; + +private: + boost::optional _cm; +}; + +TEST_F(TimeseriesRetryableUpdateFixture, SingleUpdateWithShardKey) { + ShardEndpoint endpoint(ShardId("shard"), + ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), + boost::none); + auto targeter = initTargeterFullRange(kNss, endpoint); + targeter.setIsShardedTimeSeriesBucketsNamespace(true); + + // Sets up for retryable writes. + getOpCtx()->setLogicalSessionId(makeLogicalSessionIdForTest()); + getOpCtx()->setTxnNumber(kTxnNumber); + + // Do single-target, single doc batch write op. + BatchedCommandRequest request([&] { + write_ops::UpdateCommandRequest updateOp(kNss); + updateOp.setUpdates({buildUpdate(BSON("a" << 1), BSONObj(), false)}); + return updateOp; + }()); + + BatchWriteOp batchOp(getOpCtx(), request); + + std::map> targeted; + auto status = batchOp.targetBatch(targeter, false, &targeted); + ASSERT_OK(status); + ASSERT_EQUALS(status.getValue(), WriteType::TimeseriesRetryableUpdate); + ASSERT(!batchOp.isFinished()); + ASSERT_EQUALS(targeted.size(), 1u); + assertEndpointsEqual(getFirstTargetedWriteEndpoint(targeted.begin()->second), endpoint); +} + +TEST_F(TimeseriesRetryableUpdateFixture, SingleUpdateWithoutShardKey) { + ShardEndpoint endpoint(ShardId("shard"), + ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), + boost::none); + auto targeter = initTargeterFullRange(kNss, endpoint); + targeter.setIsShardedTimeSeriesBucketsNamespace(true); + + // Sets up for retryable writes. + getOpCtx()->setLogicalSessionId(makeLogicalSessionIdForTest()); + getOpCtx()->setTxnNumber(kTxnNumber); + + // Do single-target, single doc batch write op. + BatchedCommandRequest request([&] { + write_ops::UpdateCommandRequest updateOp(kNss); + updateOp.setUpdates({buildUpdate(BSON("y" << 1), BSONObj(), false)}); + return updateOp; + }()); + + BatchWriteOp batchOp(getOpCtx(), request); + + std::map> targeted; + auto status = batchOp.targetBatch(targeter, false, &targeted); + ASSERT_OK(status); + ASSERT_EQUALS(status.getValue(), WriteType::WithoutShardKeyOrId); + ASSERT(!batchOp.isFinished()); + ASSERT_EQUALS(targeted.size(), 1u); + assertEndpointsEqual(getFirstTargetedWriteEndpoint(targeted.begin()->second), endpoint); +} + +TEST_F(TimeseriesRetryableUpdateFixture, MultipleOrderedUpdateWithShardKey) { + ShardEndpoint endpoint(ShardId("shard"), + ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), + boost::none); + auto targeter = initTargeterFullRange(kNss, endpoint); + targeter.setIsShardedTimeSeriesBucketsNamespace(true); + + // Sets up for retryable writes. + getOpCtx()->setLogicalSessionId(makeLogicalSessionIdForTest()); + getOpCtx()->setTxnNumber(kTxnNumber); + + // Do single-target, multi doc batch write op. + BatchedCommandRequest request([&] { + write_ops::UpdateCommandRequest updateOp(kNss); + updateOp.setWriteCommandRequestBase([] { + write_ops::WriteCommandRequestBase wcb; + wcb.setOrdered(true); + return wcb; + }()); + updateOp.setUpdates({buildUpdate(BSON("a" << 1), BSONObj(), false), + buildUpdate(BSON("a" << 1), BSONObj(), false)}); + return updateOp; + }()); + + BatchWriteOp batchOp(getOpCtx(), request); + + std::map> targeted; + auto status = batchOp.targetBatch(targeter, false, &targeted); + ASSERT_OK(status); + ASSERT_EQUALS(status.getValue(), WriteType::TimeseriesRetryableUpdate); + ASSERT(!batchOp.isFinished()); + ASSERT_EQUALS(targeted.size(), 1u); + assertEndpointsEqual(getFirstTargetedWriteEndpoint(targeted.begin()->second), endpoint); + + BatchedCommandResponse response; + buildResponse(1, &response); + + // Respond to first targeted batch. + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); + ASSERT(!batchOp.isFinished()); + + targeted.clear(); + + auto status2 = batchOp.targetBatch(targeter, false, &targeted); + ASSERT_OK(status2); + ASSERT_EQUALS(status2.getValue(), WriteType::TimeseriesRetryableUpdate); + ASSERT(!batchOp.isFinished()); + ASSERT_EQUALS(targeted.size(), 1u); + ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 1u); + assertEndpointsEqual(getFirstTargetedWriteEndpoint(targeted.begin()->second), endpoint); + + // Respond to second targeted batch. + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); + ASSERT(batchOp.isFinished()); +} + +TEST_F(TimeseriesRetryableUpdateFixture, MultipleUnorderedUpdateWithShardKey) { + ShardEndpoint endpoint(ShardId("shard"), + ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), + boost::none); + auto targeter = initTargeterFullRange(kNss, endpoint); + targeter.setIsShardedTimeSeriesBucketsNamespace(true); + + // Sets up for retryable writes. + getOpCtx()->setLogicalSessionId(makeLogicalSessionIdForTest()); + getOpCtx()->setTxnNumber(kTxnNumber); + + // Do single-target, multi doc batch write op. + BatchedCommandRequest request([&] { + write_ops::UpdateCommandRequest updateOp(kNss); + updateOp.setWriteCommandRequestBase([] { + write_ops::WriteCommandRequestBase wcb; + wcb.setOrdered(false); + return wcb; + }()); + updateOp.setUpdates({buildUpdate(BSON("a" << 1), BSONObj(), false), + buildUpdate(BSON("a" << 1), BSONObj(), false)}); + return updateOp; + }()); + + BatchWriteOp batchOp(getOpCtx(), request); + + std::map> targeted; + auto status = batchOp.targetBatch(targeter, false, &targeted); + ASSERT_OK(status); + ASSERT_EQUALS(status.getValue(), WriteType::TimeseriesRetryableUpdate); + ASSERT(!batchOp.isFinished()); + ASSERT_EQUALS(targeted.size(), 1u); + assertEndpointsEqual(getFirstTargetedWriteEndpoint(targeted.begin()->second), endpoint); + + BatchedCommandResponse response; + buildResponse(1, &response); + + // Respond to first targeted batch. + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); + ASSERT(!batchOp.isFinished()); + + targeted.clear(); + + auto status2 = batchOp.targetBatch(targeter, false, &targeted); + ASSERT_OK(status2); + ASSERT_EQUALS(status2.getValue(), WriteType::TimeseriesRetryableUpdate); + ASSERT(!batchOp.isFinished()); + ASSERT_EQUALS(targeted.size(), 1u); + ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 1u); + assertEndpointsEqual(getFirstTargetedWriteEndpoint(targeted.begin()->second), endpoint); + + // Respond to second targeted batch. + batchOp.noteBatchResponse(*targeted.begin()->second, response, nullptr); + ASSERT(batchOp.isFinished()); +} + } // namespace } // namespace mongo diff --git a/src/mongo/s/write_ops/batched_command_request.cpp b/src/mongo/s/write_ops/batched_command_request.cpp index dc853ec5f17c2..eab2495678f8f 100644 --- a/src/mongo/s/write_ops/batched_command_request.cpp +++ b/src/mongo/s/write_ops/batched_command_request.cpp @@ -27,14 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/db/pipeline/variables.h" #include "mongo/s/write_ops/batched_command_request.h" -#include "mongo/util/assert_util.h" -#include "mongo/util/overloaded_visitor.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo { namespace { @@ -349,4 +356,95 @@ BatchItemRef::BatchItemRef(const BulkWriteCommandRequest* request, int index) } } +int BatchItemRef::getSizeForBatchWriteBytes() const { + tassert(7328113, "Invalid BatchedCommandRequest reference", _batchedRequest); + + switch (_batchType) { + case BatchedCommandRequest::BatchType_Insert: + return getDocument().objsize(); + + case BatchedCommandRequest::BatchType_Update: { + auto& update = _batchedRequest->getUpdateRequest().getUpdates()[_index]; + auto estSize = write_ops::getUpdateSizeEstimate( + update.getQ(), + update.getU(), + update.getC(), + update.getUpsertSupplied().has_value(), + update.getCollation(), + update.getArrayFilters(), + update.getHint(), + update.getSampleId(), + update.getAllowShardKeyUpdatesWithoutFullShardKeyInQuery().has_value()); + // When running a debug build, verify that estSize is at least the BSON serialization + // size. + dassert(estSize >= update.toBSON().objsize()); + return estSize; + } + + case BatchedCommandRequest::BatchType_Delete: { + auto& deleteOp = _batchedRequest->getDeleteRequest().getDeletes()[_index]; + auto estSize = write_ops::getDeleteSizeEstimate(deleteOp.getQ(), + deleteOp.getCollation(), + deleteOp.getHint(), + deleteOp.getSampleId()); + // When running a debug build, verify that estSize is at least the BSON serialization + // size. + dassert(estSize >= deleteOp.toBSON().objsize()); + return estSize; + } + default: + MONGO_UNREACHABLE; + } +} + +int BatchItemRef::getSizeForBulkWriteBytes() const { + tassert(7353600, "Invalid BulkWriteCommandRequest reference", _bulkWriteRequest); + + switch (_batchType) { + case BatchedCommandRequest::BatchType_Insert: { + auto insertOp = *BulkWriteCRUDOp(_bulkWriteRequest->getOps()[_index]).getInsert(); + auto estSize = write_ops::getBulkWriteInsertSizeEstimate(insertOp.getDocument()); + // When running a debug build, verify that estSize is at least the BSON serialization + // size. + dassert(estSize >= insertOp.toBSON().objsize()); + return estSize; + } + + case BatchedCommandRequest::BatchType_Update: { + auto updateOp = *BulkWriteCRUDOp(_bulkWriteRequest->getOps()[_index]).getUpdate(); + auto estSize = + write_ops::getBulkWriteUpdateSizeEstimate(updateOp.getFilter(), + updateOp.getUpdateMods(), + updateOp.getConstants(), + updateOp.getUpsertSupplied().has_value(), + updateOp.getCollation(), + updateOp.getArrayFilters(), + updateOp.getHint(), + updateOp.getSort(), + updateOp.getReturn(), + updateOp.getReturnFields()); + // When running a debug build, verify that estSize is at least the BSON serialization + // size. + dassert(estSize >= updateOp.toBSON().objsize()); + return estSize; + } + case BatchedCommandRequest::BatchType_Delete: { + auto deleteOp = *BulkWriteCRUDOp(_bulkWriteRequest->getOps()[_index]).getDelete(); + auto estSize = + write_ops::getBulkWriteDeleteSizeEstimate(deleteOp.getFilter(), + deleteOp.getCollation(), + deleteOp.getHint(), + deleteOp.getSort(), + deleteOp.getReturn().has_value(), + deleteOp.getReturnFields()); + // When running a debug build, verify that estSize is at least the BSON serialization + // size. + dassert(estSize >= deleteOp.toBSON().objsize()); + return estSize; + } + default: + MONGO_UNREACHABLE; + } +} + } // namespace mongo diff --git a/src/mongo/s/write_ops/batched_command_request.h b/src/mongo/s/write_ops/batched_command_request.h index 4ff0f8701502e..65ec7e13a24cd 100644 --- a/src/mongo/s/write_ops/batched_command_request.h +++ b/src/mongo/s/write_ops/batched_command_request.h @@ -29,16 +29,31 @@ #pragma once +#include +#include #include +#include +#include +#include #include +#include +#include +#include +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/commands/bulk_write_crud_op.h" #include "mongo/db/commands/bulk_write_gen.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" #include "mongo/rpc/op_msg.h" #include "mongo/s/database_version.h" #include "mongo/s/shard_version.h" -#include "mongo/util/overloaded_visitor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/overloaded_visitor.h" // IWYU pragma: keep namespace mongo { @@ -54,15 +69,25 @@ class BatchedCommandRequest { : _batchType(BatchType_Insert), _insertReq(std::make_unique(std::move(insertOp))) {} + BatchedCommandRequest(std::unique_ptr insertOp) + : _batchType(BatchType_Insert), _insertReq(std::move(insertOp)) {} + BatchedCommandRequest(write_ops::UpdateCommandRequest updateOp) : _batchType(BatchType_Update), _updateReq(std::make_unique(std::move(updateOp))) {} + BatchedCommandRequest(std::unique_ptr updateOp) + : _batchType(BatchType_Update), _updateReq(std::move(updateOp)) {} + BatchedCommandRequest(write_ops::DeleteCommandRequest deleteOp) : _batchType(BatchType_Delete), _deleteReq(std::make_unique(std::move(deleteOp))) {} + BatchedCommandRequest(std::unique_ptr deleteOp) + : _batchType(BatchType_Delete), _deleteReq(std::move(deleteOp)) {} + BatchedCommandRequest(BatchedCommandRequest&&) = default; + BatchedCommandRequest& operator=(BatchedCommandRequest&&) = default; static BatchedCommandRequest parseInsert(const OpMsgRequest& request); static BatchedCommandRequest parseUpdate(const OpMsgRequest& request); @@ -93,6 +118,18 @@ class BatchedCommandRequest { return *_deleteReq; } + std::unique_ptr extractInsertRequest() { + return std::move(_insertReq); + } + + std::unique_ptr extractUpdateRequest() { + return std::move(_updateReq); + } + + std::unique_ptr extractDeleteRequest() { + return std::move(_deleteReq); + } + std::size_t sizeWriteOps() const; void setWriteConcern(const BSONObj& writeConcern) { @@ -234,6 +271,178 @@ class BatchedCommandRequest { boost::optional _writeConcern; }; + +// TODO SERVER-76655: Update getter names to be consistent with the names we choose for arguments in +// bulkWrite. +/** + * Provides access to information for an update operation. Used to abstract over whether a + * BatchItemRef is pointing to a `mongo::write_ops::UpdateOpEntry` (if it's from an `update` + * command) or a `mongo::BulkWriteUpdateOp` (if it's from a `bulkWrite` command). + */ +class UpdateRef { +public: + UpdateRef() = delete; + UpdateRef(const mongo::write_ops::UpdateOpEntry& batchUpdateReq) + : _batchUpdateRequest(batchUpdateReq) {} + UpdateRef(const mongo::BulkWriteUpdateOp& bulkUpdateReq) + : _bulkWriteUpdateRequest(bulkUpdateReq) {} + + /** + * Returns the filter for the update operation this `UpdateRef` refers to, i.e. the `q` + * value for an update operation from an `update` command, or the `filter` value for an update + * operation from a `bulkWrite` command. + */ + const BSONObj& getFilter() const { + if (_batchUpdateRequest) { + return _batchUpdateRequest->getQ(); + } else { + tassert(7328100, "invalid bulkWrite update op reference", _bulkWriteUpdateRequest); + return _bulkWriteUpdateRequest->getFilter(); + } + } + /** + * Returns the `multi` value for the update operation this `UpdateRef` refers to. + */ + bool getMulti() const { + if (_batchUpdateRequest) { + return _batchUpdateRequest->getMulti(); + } else { + tassert(7328101, "invalid bulkWrite update op reference", _bulkWriteUpdateRequest); + return _bulkWriteUpdateRequest->getMulti(); + } + } + + /** + * Returns the `upsert` value for the update operation this `UpdateRef` refers to. + */ + bool getUpsert() const { + if (_batchUpdateRequest) { + return _batchUpdateRequest->getUpsert(); + } else { + tassert(7328102, "invalid bulkWrite update op reference", _bulkWriteUpdateRequest); + return _bulkWriteUpdateRequest->getUpsert(); + } + } + + /** + * Returns the update modification for the update operation this `UpdateRef` refers to, + * i.e. the `u` value for an update operation from an `update` command, or the `updateMods` + * value for an update operation from a `bulkWrite` command. + */ + const write_ops::UpdateModification& getUpdateMods() const { + if (_batchUpdateRequest) { + return _batchUpdateRequest->getU(); + } else { + tassert(7328103, "invalid bulkWrite update op reference", _bulkWriteUpdateRequest); + return _bulkWriteUpdateRequest->getUpdateMods(); + } + } + + /** + * Returns the `collation` value for the update operation this `UpdateRef` refers to. + */ + const boost::optional& getCollation() const { + if (_batchUpdateRequest) { + return _batchUpdateRequest->getCollation(); + } else { + tassert(7328104, "invalid bulkWrite update op reference", _bulkWriteUpdateRequest); + return _bulkWriteUpdateRequest->getCollation(); + } + } + + /** + * Returns the BSON representation of the update operation this `UpdateRef` refers to. + */ + BSONObj toBSON() const { + if (_batchUpdateRequest) { + return _batchUpdateRequest->toBSON(); + } else { + tassert(7328105, "invalid bulkWrite update op reference", _bulkWriteUpdateRequest); + return _bulkWriteUpdateRequest->toBSON(); + } + } + +private: + /** + * Only one of the two of these will be present. + */ + boost::optional _batchUpdateRequest; + boost::optional _bulkWriteUpdateRequest; +}; + +// TODO SERVER-76655: Update getter names to be consistent with the names we choose for arguments in +// bulkWrite. +/** + * Provides access to information for an update operation. Used to abstract over whether a + * BatchItemRef is pointing to a `mongo::write_ops::DeleteOpEntry` (if it's from a `delete` + * command) or a `mongo::BulkWriteDeleteOp` (if it's from a `bulkWrite` command). + */ +class DeleteRef { +public: + DeleteRef() = delete; + DeleteRef(const mongo::write_ops::DeleteOpEntry& batchDeleteReq) + : _batchDeleteRequest(batchDeleteReq) {} + DeleteRef(const mongo::BulkWriteDeleteOp& bulkDeleteReq) + : _bulkWriteDeleteRequest(bulkDeleteReq) {} + + /** + * Returns the filter for the delete operation this `DeleteRef` refers to, i.e. the `q` + * value for a delete operation from a `delete` command, or the `filter` value for a delete + * operation from a `bulkWrite` command. + */ + const BSONObj& getFilter() const { + if (_batchDeleteRequest) { + return _batchDeleteRequest->getQ(); + } else { + tassert(7328106, "invalid bulkWrite delete op reference", _bulkWriteDeleteRequest); + return _bulkWriteDeleteRequest->getFilter(); + } + } + + /** + * Returns the `multi` value for the delete operation this `DeleteRef` refers to. + */ + bool getMulti() const { + if (_batchDeleteRequest) { + return _batchDeleteRequest->getMulti(); + } else { + tassert(7328107, "invalid bulkWrite delete op reference", _bulkWriteDeleteRequest); + return _bulkWriteDeleteRequest->getMulti(); + } + } + + /** + * Returns the `collation` value for the update operation this `DeleteRef` refers to. + */ + const boost::optional& getCollation() const { + if (_batchDeleteRequest) { + return _batchDeleteRequest->getCollation(); + } else { + tassert(7328108, "invalid bulkWrite update op reference", _bulkWriteDeleteRequest); + return _bulkWriteDeleteRequest->getCollation(); + } + } + + /** + * Returns the BSON representation of the dekete operation this `DeleteRef` refers to. + */ + BSONObj toBSON() const { + if (_batchDeleteRequest) { + return _batchDeleteRequest->toBSON(); + } else { + tassert(7328109, "invalid bulkWrite delete op reference", _bulkWriteDeleteRequest); + return _bulkWriteDeleteRequest->toBSON(); + } + } + +private: + /** + * Only one of the two of these will be present. + */ + boost::optional _batchDeleteRequest; + boost::optional _bulkWriteDeleteRequest; +}; + /** * Similar to above, this class wraps the write items of a command request into a generically usable * type. Very thin wrapper, does not own the write item itself. @@ -262,23 +471,26 @@ class BatchItemRef { return BulkWriteCRUDOp(op).getInsert()->getDocument(); } } - const auto& getUpdate() const { + + UpdateRef getUpdateRef() const { if (_batchedRequest) { - return _batchedRequest->getUpdateRequest().getUpdates()[_index]; + return UpdateRef(_batchedRequest->getUpdateRequest().getUpdates()[_index]); } else { - // TODO(SERVER-73281): Support bulkWrite update. tassert(7263704, "invalid bulkWrite request reference", _bulkWriteRequest); - MONGO_UNIMPLEMENTED; + auto updateOp = BulkWriteCRUDOp(_bulkWriteRequest->getOps()[_index]).getUpdate(); + tassert(7328111, "bulkWrite op unexpectedly not an update", updateOp); + return UpdateRef(*updateOp); } } - const auto& getDelete() const { + DeleteRef getDeleteRef() const { if (_batchedRequest) { - return _batchedRequest->getDeleteRequest().getDeletes()[_index]; + return DeleteRef(_batchedRequest->getDeleteRequest().getDeletes()[_index]); } else { - // TODO(SERVER-73281): Support bulkWrite delete. tassert(7263705, "invalid bulkWrite request reference", _bulkWriteRequest); - MONGO_UNIMPLEMENTED; + auto deleteOp = BulkWriteCRUDOp(_bulkWriteRequest->getOps()[_index]).getDelete(); + tassert(7328112, "bulkWrite op unexpectedly not a delete", deleteOp); + return DeleteRef(*deleteOp); } } @@ -286,9 +498,7 @@ class BatchItemRef { if (_batchedRequest) { return _batchedRequest->getLet(); } else { - // TODO(SERVER-73231): Support top-level 'let' variable. - tassert(7263706, "invalid bulkWrite request reference", _bulkWriteRequest); - MONGO_UNIMPLEMENTED; + return _bulkWriteRequest->getLet(); } } @@ -302,10 +512,31 @@ class BatchItemRef { } } + /** + * Gets an estimate of how much space, in bytes, the referred-to write operation would add to a + * batched write command, i.e. insert, update, or delete. This method *must* only be called if + * the underlying write op is from an insert/update/delete command. Do not call this method if + * the underlying write op is from a bulkWrite - use getSizeForBulkWriteBytes() instead. + */ + int getSizeForBatchWriteBytes() const; + + /** + * Gets an estimate of how much space, in bytes, the referred-to write operation would add to a + * bulkWrite command. This method *must* only be called if the underlying write op is from a + * bulkWrite command. Do not call this method if the underlying write op is from an insert, + * update, or delete command - use getSizeForBatchWriteBytes() instead. + */ + int getSizeForBulkWriteBytes() const; + private: boost::optional _batchedRequest; boost::optional _bulkWriteRequest; const int _index; + /** + * If this BatchItemRef points to an op in a BatchedCommandRequest, stores the type of the + * entire batch. If this BatchItemRef points to an op in a BulkWriteRequest, stores the type + * of this individual op (the batch it belongs to may have a mix of op types.) + */ BatchedCommandRequest::BatchType _batchType; }; diff --git a/src/mongo/s/write_ops/batched_command_request_test.cpp b/src/mongo/s/write_ops/batched_command_request_test.cpp index b8fa16ad9bcac..bcf4d86d53121 100644 --- a/src/mongo/s/write_ops/batched_command_request_test.cpp +++ b/src/mongo/s/write_ops/batched_command_request_test.cpp @@ -27,11 +27,25 @@ * it in the license file. */ -#include "mongo/bson/json.h" +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/ops/write_ops_parsers_test_helpers.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/index_version.h" #include "mongo/s/shard_version_factory.h" #include "mongo/s/write_ops/batched_command_request.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -48,7 +62,7 @@ TEST(BatchedCommandRequest, BasicInsert) { const auto opMsgRequest(toOpMsg("TestDB", origInsertRequestObj, docSeq)); const auto insertRequest(BatchedCommandRequest::parseInsert(opMsgRequest)); - ASSERT_EQ("TestDB.test", insertRequest.getInsertRequest().getNamespace().ns()); + ASSERT_EQ("TestDB.test", insertRequest.getInsertRequest().getNamespace().ns_forTest()); ASSERT(!insertRequest.hasShardVersion()); } } @@ -71,7 +85,7 @@ TEST(BatchedCommandRequest, InsertWithShardVersion) { const auto opMsgRequest(toOpMsg("TestDB", origInsertRequestObj, docSeq)); const auto insertRequest(BatchedCommandRequest::parseInsert(opMsgRequest)); - ASSERT_EQ("TestDB.test", insertRequest.getInsertRequest().getNamespace().ns()); + ASSERT_EQ("TestDB.test", insertRequest.getInsertRequest().getNamespace().ns_forTest()); ASSERT(insertRequest.hasShardVersion()); ASSERT_EQ(ShardVersionFactory::make(ChunkVersion({epoch, timestamp}, {1, 2}), boost::optional(boost::none)) @@ -97,7 +111,7 @@ TEST(BatchedCommandRequest, InsertCloneWithIds) { const auto clonedRequest(BatchedCommandRequest::cloneInsertWithIds(std::move(batchedRequest))); - ASSERT_EQ("xyz.abc", clonedRequest.getNS().ns()); + ASSERT_EQ("xyz.abc", clonedRequest.getNS().ns_forTest()); ASSERT(clonedRequest.getWriteCommandRequestBase().getOrdered()); ASSERT(clonedRequest.getWriteCommandRequestBase().getBypassDocumentValidation()); ASSERT_BSONOBJ_EQ(BSON("w" << 2), clonedRequest.getWriteConcern()); diff --git a/src/mongo/s/write_ops/batched_command_response.cpp b/src/mongo/s/write_ops/batched_command_response.cpp index 39249363a6612..d4244837290f5 100644 --- a/src/mongo/s/write_ops/batched_command_response.cpp +++ b/src/mongo/s/write_ops/batched_command_response.cpp @@ -29,13 +29,23 @@ #include "mongo/s/write_ops/batched_command_response.h" -#include "mongo/base/init.h" -#include "mongo/bson/util/bson_extract.h" -#include "mongo/db/commands.h" +#include +#include + +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/field_parser.h" +#include "mongo/db/ops/write_ops_gen.h" #include "mongo/db/repl/bson_extract_optime.h" #include "mongo/rpc/get_status_from_command_result.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/s/write_ops/batched_command_response.h b/src/mongo/s/write_ops/batched_command_response.h index e1d9f2cb22417..ea37b3aab3927 100644 --- a/src/mongo/s/write_ops/batched_command_response.h +++ b/src/mongo/s/write_ops/batched_command_response.h @@ -29,13 +29,31 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/error_extra_info.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" #include "mongo/db/jsobj.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/repl/optime.h" #include "mongo/db/session/logical_session_id.h" #include "mongo/rpc/write_concern_error_detail.h" #include "mongo/s/write_ops/batched_upsert_detail.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/s/write_ops/batched_command_response_test.cpp b/src/mongo/s/write_ops/batched_command_response_test.cpp index 0b6eee16eb54e..82400b926c0bd 100644 --- a/src/mongo/s/write_ops/batched_command_response_test.cpp +++ b/src/mongo/s/write_ops/batched_command_response_test.cpp @@ -27,12 +27,29 @@ * it in the license file. */ -#include "mongo/db/jsobj.h" -#include "mongo/db/ops/write_ops.h" +#include +#include + +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" #include "mongo/s/stale_exception.h" #include "mongo/s/write_ops/batched_command_response.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -183,7 +200,7 @@ TEST(BatchedCommandResponseTest, CompatibilityFromWriteErrorToBatchCommandRespon ASSERT_EQ(ErrorCodes::StaleConfig, response.getErrDetailsAt(0).getStatus().code()); ASSERT_EQ("Test stale config", response.getErrDetailsAt(0).getStatus().reason()); auto staleInfo = response.getErrDetailsAt(0).getStatus().extraInfo(); - ASSERT_EQ("TestDB.TestColl", staleInfo->getNss().ns()); + ASSERT_EQ("TestDB.TestColl", staleInfo->getNss().ns_forTest()); ASSERT_EQ(versionReceived, staleInfo->getVersionReceived()); ASSERT(!staleInfo->getVersionWanted()); ASSERT_EQ(ShardId("TestShard"), staleInfo->getShardId()); diff --git a/src/mongo/s/write_ops/batched_upsert_detail.cpp b/src/mongo/s/write_ops/batched_upsert_detail.cpp index e455f291cbd48..c624813ecfed6 100644 --- a/src/mongo/s/write_ops/batched_upsert_detail.cpp +++ b/src/mongo/s/write_ops/batched_upsert_detail.cpp @@ -29,8 +29,12 @@ #include "mongo/s/write_ops/batched_upsert_detail.h" +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/field_parser.h" -#include "mongo/util/str.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/s/write_ops/batched_upsert_detail.h b/src/mongo/s/write_ops/batched_upsert_detail.h index c012f2b906c13..c0e56d1a3458a 100644 --- a/src/mongo/s/write_ops/batched_upsert_detail.h +++ b/src/mongo/s/write_ops/batched_upsert_detail.h @@ -31,6 +31,8 @@ #include +#include "mongo/bson/bson_field.h" +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" namespace mongo { diff --git a/src/mongo/s/write_ops/bulk_write_command_modifier.cpp b/src/mongo/s/write_ops/bulk_write_command_modifier.cpp new file mode 100644 index 0000000000000..51f8e95c56037 --- /dev/null +++ b/src/mongo/s/write_ops/bulk_write_command_modifier.cpp @@ -0,0 +1,243 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include + +#include +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/db/commands/bulk_write_gen.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/s/write_ops/bulk_write_command_modifier.h" +#include "mongo/util/assert_util.h" + +namespace mongo { + +void BulkWriteCommandModifier::parseRequestFromOpMsg(const NamespaceString& nss, + const OpMsgRequest& request) { + auto shardVersionField = request.body[ShardVersion::kShardVersionField]; + if (!shardVersionField.eoo()) { + auto shardVersion = ShardVersion::parse(shardVersionField); + if (shardVersion == ShardVersion::UNSHARDED()) { + setDbVersion(nss, DatabaseVersion(request.body)); + } + setShardVersion(nss, shardVersion); + } + + // The 'isTimeseriesNamespace' is an internal parameter used for communication between mongos + // and mongod. + auto isTimeseriesNamespace = + request.body[write_ops::WriteCommandRequestBase::kIsTimeseriesNamespaceFieldName]; + uassert(7299100, + "the 'isTimeseriesNamespace' parameter cannot be used on mongos", + !isTimeseriesNamespace.trueValue()); + + setIsTimeseriesNamespace(nss, isTimeseriesNamespace.trueValue()); +} + +std::tuple BulkWriteCommandModifier::getNsInfoEntry( + const NamespaceString& nss) { + if (_nsInfoIdxes.contains(nss)) { + // Already have this NamespaceInfoEntry stored. + auto idx = _nsInfoIdxes[nss]; + return std::tie(_nsInfos[idx], idx); + } + // Create new NamespaceInfoEntry. + auto nsInfoEntry = NamespaceInfoEntry(nss); + auto idx = _nsInfos.size(); + _nsInfos.emplace_back(nsInfoEntry); + + _nsInfoIdxes[nss] = idx; + return std::tie(_nsInfos[idx], idx); +} + +void BulkWriteCommandModifier::finishBuild() { + _request->setOps(std::move(_ops)); + _request->setNsInfo(std::move(_nsInfos)); +} + +void BulkWriteCommandModifier::addOp(write_ops::InsertCommandRequest insertOp) { + auto nss = insertOp.getNamespace(); + auto [nsInfoEntry, idx] = getNsInfoEntry(nss); + nsInfoEntry.setEncryptionInformation(insertOp.getEncryptionInformation()); + + for (const auto& doc : insertOp.getDocuments()) { + auto op = BulkWriteInsertOp(idx, doc); + _ops.emplace_back(op); + } +} + +void BulkWriteCommandModifier::addOp(write_ops::UpdateCommandRequest updateOp) { + auto nss = updateOp.getNamespace(); + auto [nsInfoEntry, idx] = getNsInfoEntry(nss); + nsInfoEntry.setEncryptionInformation(updateOp.getEncryptionInformation()); + + for (const auto& update : updateOp.getUpdates()) { + auto op = BulkWriteUpdateOp(idx, update.getQ(), update.getU()); + + op.setArrayFilters(update.getArrayFilters()); + op.setMulti(update.getMulti()); + op.setCollation(update.getCollation()); + op.setUpsert(update.getUpsert()); + op.setHint(update.getHint()); + op.setConstants(update.getC()); + + _ops.emplace_back(op); + } +} + +void BulkWriteCommandModifier::addOp(write_ops::DeleteCommandRequest deleteOp) { + auto nss = deleteOp.getNamespace(); + auto [nsInfoEntry, idx] = getNsInfoEntry(nss); + nsInfoEntry.setEncryptionInformation(deleteOp.getEncryptionInformation()); + + for (const auto& delOp : deleteOp.getDeletes()) { + auto op = BulkWriteDeleteOp(idx, delOp.getQ()); + + op.setHint(delOp.getHint()); + op.setMulti(delOp.getMulti()); + op.setCollation(delOp.getCollation()); + + _ops.emplace_back(op); + } +} + +void BulkWriteCommandModifier::addInsert(const OpMsgRequest& request) { + auto parsedInsertOp = InsertOp::parse(request); + + auto nss = parsedInsertOp.getNamespace(); + + parseRequestFromOpMsg(nss, request); + + addOp(parsedInsertOp); +} + +void BulkWriteCommandModifier::addUpdate(const OpMsgRequest& request) { + auto parsedUpdateOp = UpdateOp::parse(request); + + auto nss = parsedUpdateOp.getNamespace(); + + parseRequestFromOpMsg(nss, request); + + addOp(parsedUpdateOp); +} + +void BulkWriteCommandModifier::addDelete(const OpMsgRequest& request) { + auto parsedDeleteOp = DeleteOp::parse(request); + + auto nss = parsedDeleteOp.getNamespace(); + + parseRequestFromOpMsg(nss, request); + + addOp(parsedDeleteOp); +} + +void BulkWriteCommandModifier::addInsertOps(const NamespaceString& nss, + const std::vector docs) { + auto [nsInfoEntry, idx] = getNsInfoEntry(nss); + + for (const auto& doc : docs) { + auto op = BulkWriteInsertOp(idx, doc); + + _ops.emplace_back(op); + } +} + +void BulkWriteCommandModifier::addUpdateOp( + const NamespaceString& nss, + const BSONObj& query, + const BSONObj& update, + bool upsert, + bool multi, + const StringData& returnField, + const boost::optional>& arrayFilters, + const boost::optional& collation, + const boost::optional& sort, + const boost::optional& returnFields, + const boost::optional& hint) { + auto [nsInfoEntry, idx] = getNsInfoEntry(nss); + + auto op = BulkWriteUpdateOp(idx, query, update); + + op.setUpsert(upsert); + op.setMulti(multi); + op.setReturn(returnField); + op.setReturnFields(returnFields); + op.setCollation(collation); + op.setHint(hint.value_or(BSONObj())); + op.setArrayFilters(arrayFilters); + op.setSort(sort); + + _ops.emplace_back(op); +} + +void BulkWriteCommandModifier::addPipelineUpdateOps(const NamespaceString& nss, + const BSONObj& query, + const std::vector& updates, + bool upsert, + bool useMultiUpdate) { + auto [nsInfoEntry, idx] = getNsInfoEntry(nss); + + auto updateMod = write_ops::UpdateModification(); + auto op = BulkWriteUpdateOp(idx, query, updates); + + op.setUpsert(upsert); + op.setMulti(useMultiUpdate); + + _ops.emplace_back(op); +} + +void BulkWriteCommandModifier::addDeleteOp(const NamespaceString& nss, + const BSONObj& query, + bool multiDelete, + bool returnField, + const boost::optional& collation, + const boost::optional& sort, + const boost::optional& returnFields, + const boost::optional& hint) { + auto [nsInfoEntry, idx] = getNsInfoEntry(nss); + + auto op = BulkWriteDeleteOp(idx, query); + + op.setMulti(multiDelete); + op.setReturn(returnField); + op.setReturnFields(returnFields); + op.setHint(hint.value_or(BSONObj())); + op.setSort(sort); + op.setCollation(collation); + + _ops.emplace_back(op); +} + +} // namespace mongo diff --git a/src/mongo/s/write_ops/bulk_write_command_modifier.h b/src/mongo/s/write_ops/bulk_write_command_modifier.h new file mode 100644 index 0000000000000..45d2c0878fc60 --- /dev/null +++ b/src/mongo/s/write_ops/bulk_write_command_modifier.h @@ -0,0 +1,178 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/crypto/fle_field_schema_gen.h" +#include "mongo/db/commands/bulk_write_crud_op.h" +#include "mongo/db/commands/bulk_write_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/s/database_version.h" +#include "mongo/s/shard_version.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util_core.h" + +namespace mongo { + +/** + * Helper functions which add new operations into an existing BulkWriteCommandRequest. + */ +class BulkWriteCommandModifier { +public: + BulkWriteCommandModifier(BulkWriteCommandRequest* request, size_t capacity = 0) + : _request(request), _ops(request->getOps()), _nsInfos(request->getNsInfo()) { + invariant(_request); + for (size_t i = 0; i < _nsInfos.size(); i++) { + auto nsInfo = _nsInfos[i]; + _nsInfoIdxes[nsInfo.getNs()] = i; + } + + if (capacity > 0) { + _ops.reserve(capacity); + } + } + + BulkWriteCommandModifier(BulkWriteCommandModifier&&) = default; + + /** + * This function must be called for the BulkWriteCommandRequest to be in a usable state. + */ + void finishBuild(); + + void addOp(write_ops::InsertCommandRequest insertOp); + void addOp(write_ops::UpdateCommandRequest updateOp); + void addOp(write_ops::DeleteCommandRequest deleteOp); + + void addInsert(const OpMsgRequest& request); + void addUpdate(const OpMsgRequest& request); + void addDelete(const OpMsgRequest& request); + + size_t numOps() const { + return _request->getOps().size(); + } + + void setIsTimeseriesNamespace(const NamespaceString& nss, bool isTimeseriesNamespace) { + auto [nsInfoEntry, idx] = getNsInfoEntry(nss); + nsInfoEntry.setIsTimeseriesNamespace(isTimeseriesNamespace); + } + + void setEncryptionInformation(const NamespaceString& nss, + const EncryptionInformation& encryption) { + auto [nsInfoEntry, idx] = getNsInfoEntry(nss); + nsInfoEntry.setEncryptionInformation(encryption); + } + + void setShardVersion(const NamespaceString& nss, const ShardVersion& sv) { + auto [nsInfoEntry, idx] = getNsInfoEntry(nss); + nsInfoEntry.setShardVersion(sv); + } + + const ShardVersion& getShardVersion(const NamespaceString& nss) { + auto [nsInfoEntry, idx] = getNsInfoEntry(nss); + invariant(nsInfoEntry.getShardVersion()); + return *nsInfoEntry.getShardVersion(); + } + + void setDbVersion(const NamespaceString& nss, const DatabaseVersion& dbv) { + auto [nsInfoEntry, idx] = getNsInfoEntry(nss); + nsInfoEntry.setDatabaseVersion(dbv); + } + + const DatabaseVersion& getDbVersion(const NamespaceString& nss) { + auto [nsInfoEntry, idx] = getNsInfoEntry(nss); + invariant(nsInfoEntry.getDatabaseVersion()); + return *nsInfoEntry.getDatabaseVersion(); + } + + void addInsertOps(const NamespaceString& nss, std::vector docs); + + void addUpdateOp(const NamespaceString& nss, + const BSONObj& query, + const BSONObj& update, + bool upsert, + bool multi, + const StringData& returnField, + const boost::optional>& arrayFilters, + const boost::optional& collation, + const boost::optional& sort, + const boost::optional& returnFields, + const boost::optional& hint); + + void addPipelineUpdateOps(const NamespaceString& nss, + const BSONObj& query, + const std::vector& updates, + bool upsert, + bool useMultiUpdate); + + void addDeleteOp(const NamespaceString& nss, + const BSONObj& query, + bool multiDelete, + bool returnField, + const boost::optional& collation, + const boost::optional& sort, + const boost::optional& returnFields, + const boost::optional& hint); + +private: + BulkWriteCommandRequest* _request; + + stdx::unordered_map _nsInfoIdxes; + + std::vector< + stdx::variant> + _ops; + std::vector _nsInfos; + + /** + * Gets the NamespaceInfoEntry for the associated namespace. If one does not exist + * then it will be created. Returns a reference to the NamespaceInfoEntry and the index in + * the nsInfo array. + */ + std::tuple getNsInfoEntry(const NamespaceString& nss); + + void parseRequestFromOpMsg(const NamespaceString& nss, const OpMsgRequest& request); +}; + +} // namespace mongo diff --git a/src/mongo/s/write_ops/bulk_write_command_modifier_test.cpp b/src/mongo/s/write_ops/bulk_write_command_modifier_test.cpp new file mode 100644 index 0000000000000..4aa3ba55ad030 --- /dev/null +++ b/src/mongo/s/write_ops/bulk_write_command_modifier_test.cpp @@ -0,0 +1,509 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/commands/bulk_write_crud_op.h" +#include "mongo/db/commands/bulk_write_gen.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/ops/write_ops_parsers_test_helpers.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/index_version.h" +#include "mongo/s/shard_version_factory.h" +#include "mongo/s/write_ops/bulk_write_command_modifier.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" + +namespace mongo { +namespace { + +TEST(BulkWriteCommandModifier, AddInsert) { + BSONArray insertArray = BSON_ARRAY(BSON("a" << 1) << BSON("b" << 1)); + + BSONObj origInsertRequestObj = BSON("insert" + << "test" + << "documents" << insertArray << "writeConcern" + << BSON("w" << 1) << "ordered" << true); + + for (auto docSeq : {false, true}) { + const auto opMsgRequest(toOpMsg("TestDB", origInsertRequestObj, docSeq)); + + BulkWriteCommandRequest request; + BulkWriteCommandModifier builder(&request); + builder.addInsert(opMsgRequest); + builder.finishBuild(); + + auto nsInfo = request.getNsInfo(); + ASSERT_EQ(1, nsInfo.size()); + ASSERT_EQ("TestDB", nsInfo[0].getNs().db_forTest()); + ASSERT_EQ("test", nsInfo[0].getNs().coll()); + ASSERT_EQ(2, request.getOps().size()); + ASSERT_EQ(boost::none, nsInfo[0].getShardVersion()); + } +} + +TEST(BulkWriteCommandModifier, AddOpInsert) { + auto nss = NamespaceString::createNamespaceString_forTest("TestDB", "test"); + auto req = write_ops::InsertCommandRequest(nss); + auto docs = std::vector(); + docs.emplace_back(BSON("a" << 1)); + docs.emplace_back(BSON("b" << 1)); + req.setDocuments(docs); + + BulkWriteCommandRequest request; + BulkWriteCommandModifier builder(&request); + builder.addOp(req); + builder.finishBuild(); + + auto nsInfo = request.getNsInfo(); + ASSERT_EQ(1, nsInfo.size()); + ASSERT_EQ("TestDB", nsInfo[0].getNs().db_forTest()); + ASSERT_EQ("test", nsInfo[0].getNs().coll()); + ASSERT_EQ(2, request.getOps().size()); + ASSERT_EQ(boost::none, nsInfo[0].getShardVersion()); +} + +TEST(BulkWriteCommandModifier, AddInsertOps) { + auto nss = NamespaceString::createNamespaceString_forTest("TestDB", "test"); + auto docs = std::vector(); + docs.emplace_back(BSON("a" << 1)); + docs.emplace_back(BSON("b" << 1)); + + BulkWriteCommandRequest request; + BulkWriteCommandModifier builder(&request); + builder.addInsertOps(nss, docs); + builder.finishBuild(); + + auto nsInfo = request.getNsInfo(); + ASSERT_EQ(1, nsInfo.size()); + ASSERT_EQ("TestDB", nsInfo[0].getNs().db_forTest()); + ASSERT_EQ("test", nsInfo[0].getNs().coll()); + ASSERT_EQ(2, request.getOps().size()); + ASSERT_EQ(boost::none, nsInfo[0].getShardVersion()); +} + +TEST(BulkWriteCommandModifier, InsertWithShardVersion) { + BSONArray insertArray = BSON_ARRAY(BSON("a" << 1) << BSON("b" << 1)); + + const OID epoch = OID::gen(); + const Timestamp timestamp(2, 2); + const Timestamp majorAndMinor(1, 2); + + BSONObj origInsertRequestObj = BSON("insert" + << "test" + << "documents" << insertArray << "writeConcern" + << BSON("w" << 1) << "ordered" << true << "shardVersion" + << BSON("e" << epoch << "t" << timestamp << "v" + << majorAndMinor)); + + for (auto docSeq : {false, true}) { + const auto opMsgRequest(toOpMsg("TestDB", origInsertRequestObj, docSeq)); + + BulkWriteCommandRequest request = BulkWriteCommandRequest(); + BulkWriteCommandModifier builder(&request); + builder.addInsert(opMsgRequest); + builder.finishBuild(); + + auto nsInfo = request.getNsInfo(); + ASSERT_EQ(1, nsInfo.size()); + ASSERT_EQ("TestDB", nsInfo[0].getNs().db_forTest()); + ASSERT_EQ("test", nsInfo[0].getNs().coll()); + ASSERT_NE(boost::none, nsInfo[0].getShardVersion()); + ASSERT_EQ(ShardVersionFactory::make(ChunkVersion({epoch, timestamp}, {1, 2}), + boost::optional(boost::none)) + .toString(), + (*nsInfo[0].getShardVersion()).toString()); + } +} + +TEST(BulkWriteCommandModifier, AddUpdate) { + auto nss = NamespaceString::createNamespaceString_forTest("TestDB", "test"); + const BSONObj query = BSON("x" << 1); + const BSONObj update = BSON("$inc" << BSON("x" << 1)); + const BSONObj collation = BSON("locale" + << "en_US"); + const BSONObj arrayFilter = BSON("i" << 0); + for (bool upsert : {false, true}) { + for (bool multi : {false, true}) { + auto rawUpdate = + BSON("q" << query << "u" << update << "arrayFilters" << BSON_ARRAY(arrayFilter) + << "multi" << multi << "upsert" << upsert << "collation" << collation); + auto cmd = BSON("update" << nss.coll() << "updates" << BSON_ARRAY(rawUpdate)); + for (bool seq : {false, true}) { + auto opMsgRequest = toOpMsg(nss.db_forTest(), cmd, seq); + + BulkWriteCommandRequest request; + BulkWriteCommandModifier builder(&request); + builder.addUpdate(opMsgRequest); + builder.finishBuild(); + + auto nsInfo = request.getNsInfo(); + ASSERT_EQ(1, nsInfo.size()); + ASSERT_EQ("TestDB", nsInfo[0].getNs().db_forTest()); + ASSERT_EQ("test", nsInfo[0].getNs().coll()); + ASSERT_EQ(boost::none, nsInfo[0].getShardVersion()); + + ASSERT_EQ(1, request.getOps().size()); + auto op = BulkWriteCRUDOp(request.getOps()[0]); + ASSERT_EQ(upsert, op.getUpdate()->getUpsert()); + ASSERT_EQ(multi, op.getUpdate()->getMulti()); + ASSERT_BSONOBJ_EQ(query, op.getUpdate()->getFilter()); + ASSERT_BSONOBJ_EQ(update, op.getUpdate()->getUpdateMods().getUpdateModifier()); + ASSERT_BSONOBJ_EQ(collation, op.getUpdate()->getCollation().value_or(BSONObj())); + ASSERT(op.getUpdate()->getArrayFilters()); + auto filter = (*op.getUpdate()->getArrayFilters())[0]; + ASSERT_BSONOBJ_EQ(arrayFilter, filter); + } + } + } +} + +TEST(BulkWriteCommandModifier, AddOpUpdate) { + auto nss = NamespaceString::createNamespaceString_forTest("TestDB", "test"); + const BSONObj query = BSON("x" << 1); + const BSONObj update = BSON("$inc" << BSON("x" << 1)); + const BSONObj collation = BSON("locale" + << "en_US"); + const BSONObj arrayFilter = BSON("i" << 0); + + auto updateOp = write_ops::UpdateOpEntry(); + updateOp.setQ(query); + updateOp.setU(update); + updateOp.setCollation(collation); + updateOp.setArrayFilters({{arrayFilter}}); + + for (bool upsert : {false, true}) { + for (bool multi : {false, true}) { + auto req = write_ops::UpdateCommandRequest(nss); + updateOp.setMulti(multi); + updateOp.setUpsert(upsert); + req.setUpdates({updateOp}); + + BulkWriteCommandRequest request; + BulkWriteCommandModifier builder(&request); + builder.addOp(req); + builder.finishBuild(); + + auto nsInfo = request.getNsInfo(); + ASSERT_EQ(1, nsInfo.size()); + ASSERT_EQ("TestDB", nsInfo[0].getNs().db_forTest()); + ASSERT_EQ("test", nsInfo[0].getNs().coll()); + ASSERT_EQ(boost::none, nsInfo[0].getShardVersion()); + + ASSERT_EQ(1, request.getOps().size()); + auto op = BulkWriteCRUDOp(request.getOps()[0]); + ASSERT_EQ(upsert, op.getUpdate()->getUpsert()); + ASSERT_EQ(multi, op.getUpdate()->getMulti()); + ASSERT_BSONOBJ_EQ(query, op.getUpdate()->getFilter()); + ASSERT_BSONOBJ_EQ(update, op.getUpdate()->getUpdateMods().getUpdateModifier()); + ASSERT_BSONOBJ_EQ(collation, op.getUpdate()->getCollation().value_or(BSONObj())); + ASSERT(op.getUpdate()->getArrayFilters()); + auto filter = (*op.getUpdate()->getArrayFilters())[0]; + ASSERT_BSONOBJ_EQ(arrayFilter, filter); + } + } +} + +TEST(BulkWriteCommandModifier, AddUpdateOps) { + auto nss = NamespaceString::createNamespaceString_forTest("TestDB", "test"); + const BSONObj query = BSON("x" << 1); + const BSONObj update = BSON("$inc" << BSON("x" << 1)); + const BSONObj collation = BSON("locale" + << "en_US"); + const BSONObj arrayFilter = BSON("i" << 0); + + for (bool upsert : {false, true}) { + for (bool multi : {false, true}) { + for (std::string returnField : {"pre", "post"}) { + BulkWriteCommandRequest request; + BulkWriteCommandModifier builder(&request); + builder.addUpdateOp(nss, + query, + update, + upsert, + multi, + returnField, + {{arrayFilter}}, + collation, + boost::none, + boost::none, + boost::none); + builder.finishBuild(); + + auto nsInfo = request.getNsInfo(); + ASSERT_EQ(1, nsInfo.size()); + ASSERT_EQ("TestDB", nsInfo[0].getNs().db_forTest()); + ASSERT_EQ("test", nsInfo[0].getNs().coll()); + ASSERT_EQ(boost::none, nsInfo[0].getShardVersion()); + + ASSERT_EQ(1, request.getOps().size()); + auto op = BulkWriteCRUDOp(request.getOps()[0]); + ASSERT_EQ(upsert, op.getUpdate()->getUpsert()); + ASSERT_EQ(multi, op.getUpdate()->getMulti()); + ASSERT_BSONOBJ_EQ(query, op.getUpdate()->getFilter()); + ASSERT_BSONOBJ_EQ(update, op.getUpdate()->getUpdateMods().getUpdateModifier()); + ASSERT_BSONOBJ_EQ(collation, op.getUpdate()->getCollation().value_or(BSONObj())); + ASSERT(op.getUpdate()->getArrayFilters()); + auto filter = (*op.getUpdate()->getArrayFilters())[0]; + ASSERT_BSONOBJ_EQ(arrayFilter, filter); + ASSERT_EQ(returnField, *op.getUpdate()->getReturn()); + } + } + } +} + +TEST(CommandWriteOpsParsers, BulkWriteUpdateWithPipeline) { + auto nss = NamespaceString::createNamespaceString_forTest("TestDB", "test"); + const BSONObj query = BSON("q" << BSON("x" << 1)); + std::vector pipeline{BSON("$addFields" << BSON("x" << 1))}; + const BSONObj update = BSON("u" << pipeline); + for (bool upsert : {false, true}) { + for (bool multi : {false, true}) { + + BulkWriteCommandRequest request; + BulkWriteCommandModifier builder(&request); + builder.addPipelineUpdateOps(nss, query, pipeline, upsert, multi); + builder.finishBuild(); + + auto nsInfo = request.getNsInfo(); + ASSERT_EQ(1, nsInfo.size()); + ASSERT_EQ("TestDB", nsInfo[0].getNs().db_forTest()); + ASSERT_EQ("test", nsInfo[0].getNs().coll()); + ASSERT_EQ(boost::none, nsInfo[0].getShardVersion()); + + ASSERT_EQ(1, request.getOps().size()); + auto op = BulkWriteCRUDOp(request.getOps()[0]); + ASSERT_EQ(upsert, op.getUpdate()->getUpsert()); + ASSERT_EQ(multi, op.getUpdate()->getMulti()); + ASSERT_BSONOBJ_EQ(query, op.getUpdate()->getFilter()); + ASSERT_BSONOBJ_EQ(pipeline[0], op.getUpdate()->getUpdateMods().getUpdatePipeline()[0]); + } + } +} + +TEST(BulkWriteCommandModifier, AddDelete) { + auto nss = NamespaceString::createNamespaceString_forTest("TestDB", "test"); + const BSONObj query = BSON("x" << 1); + const BSONObj collation = BSON("locale" + << "en_US"); + for (bool multi : {false, true}) { + auto rawDelete = + BSON("q" << query << "limit" << (multi ? 0 : 1) << "collation" << collation); + auto cmd = BSON("delete" << nss.coll() << "deletes" << BSON_ARRAY(rawDelete)); + for (bool seq : {false, true}) { + auto opMsgRequest = toOpMsg(nss.db_forTest(), cmd, seq); + + + BulkWriteCommandRequest request; + BulkWriteCommandModifier builder(&request); + builder.addDelete(opMsgRequest); + builder.finishBuild(); + + auto nsInfo = request.getNsInfo(); + ASSERT_EQ(1, nsInfo.size()); + ASSERT_EQ("TestDB", nsInfo[0].getNs().db_forTest()); + ASSERT_EQ("test", nsInfo[0].getNs().coll()); + ASSERT_EQ(boost::none, nsInfo[0].getShardVersion()); + + ASSERT_EQ(1, request.getOps().size()); + auto op = BulkWriteCRUDOp(request.getOps()[0]); + ASSERT_EQ(multi, op.getDelete()->getMulti()); + ASSERT_BSONOBJ_EQ(query, op.getDelete()->getFilter()); + ASSERT_BSONOBJ_EQ(collation, op.getDelete()->getCollation().value_or(BSONObj())); + } + } +} + +TEST(BulkWriteCommandModifier, AddOpDelete) { + auto nss = NamespaceString::createNamespaceString_forTest("TestDB", "test"); + const BSONObj query = BSON("x" << 1); + const BSONObj collation = BSON("locale" + << "en_US"); + + auto delOp = write_ops::DeleteOpEntry(); + delOp.setCollation(collation); + delOp.setQ(query); + for (bool multi : {false, true}) { + auto delReq = write_ops::DeleteCommandRequest(nss); + delOp.setMulti(multi); + delReq.setDeletes({delOp}); + + BulkWriteCommandRequest request; + BulkWriteCommandModifier builder(&request); + builder.addOp(delReq); + builder.finishBuild(); + + auto nsInfo = request.getNsInfo(); + ASSERT_EQ(1, nsInfo.size()); + ASSERT_EQ("TestDB", nsInfo[0].getNs().db_forTest()); + ASSERT_EQ("test", nsInfo[0].getNs().coll()); + ASSERT_EQ(boost::none, nsInfo[0].getShardVersion()); + + ASSERT_EQ(1, request.getOps().size()); + auto op = BulkWriteCRUDOp(request.getOps()[0]); + ASSERT_EQ(multi, op.getDelete()->getMulti()); + ASSERT_BSONOBJ_EQ(query, op.getDelete()->getFilter()); + ASSERT_BSONOBJ_EQ(collation, op.getDelete()->getCollation().value_or(BSONObj())); + } +} + +// Add delete ops +TEST(BulkWriteCommandModifier, AddDeleteOps) { + auto nss = NamespaceString::createNamespaceString_forTest("TestDB", "test"); + const BSONObj query = BSON("x" << 1); + const BSONObj collation = BSON("locale" + << "en_US"); + for (bool multi : {false, true}) { + for (bool returnField : {false, true}) { + BulkWriteCommandRequest request; + BulkWriteCommandModifier builder(&request); + builder.addDeleteOp( + nss, query, multi, returnField, collation, boost::none, boost::none, boost::none); + builder.finishBuild(); + + auto nsInfo = request.getNsInfo(); + ASSERT_EQ(1, nsInfo.size()); + ASSERT_EQ("TestDB", nsInfo[0].getNs().db_forTest()); + ASSERT_EQ("test", nsInfo[0].getNs().coll()); + ASSERT_EQ(boost::none, nsInfo[0].getShardVersion()); + + ASSERT_EQ(1, request.getOps().size()); + auto op = BulkWriteCRUDOp(request.getOps()[0]); + ASSERT_EQ(multi, op.getDelete()->getMulti()); + ASSERT_EQ(returnField, op.getDelete()->getReturn()); + ASSERT_BSONOBJ_EQ(query, op.getDelete()->getFilter()); + ASSERT_BSONOBJ_EQ(collation, op.getDelete()->getCollation().value_or(BSONObj())); + } + } +} + +TEST(BulkWriteCommandModifier, TestMultiOpsSameNs) { + auto nss = NamespaceString::createNamespaceString_forTest("TestDB", "test"); + auto docs = std::vector(); + docs.emplace_back(BSON("a" << 1)); + docs.emplace_back(BSON("b" << 1)); + + const BSONObj query = BSON("x" << 1); + const BSONObj collation = BSON("locale" + << "en_US"); + + BulkWriteCommandRequest request; + BulkWriteCommandModifier builder(&request); + builder.addInsertOps(nss, docs); + builder.addDeleteOp(nss, query, true, true, collation, boost::none, boost::none, boost::none); + builder.finishBuild(); + + auto nsInfo = request.getNsInfo(); + ASSERT_EQ(1, nsInfo.size()); + ASSERT_EQ("TestDB", nsInfo[0].getNs().db_forTest()); + ASSERT_EQ("test", nsInfo[0].getNs().coll()); + ASSERT_EQ(boost::none, nsInfo[0].getShardVersion()); + + ASSERT_EQ(3, request.getOps().size()); + { + auto op = BulkWriteCRUDOp(request.getOps()[0]); + ASSERT_EQ(BulkWriteCRUDOp::kInsert, op.getType()); + } + { + auto op = BulkWriteCRUDOp(request.getOps()[1]); + ASSERT_EQ(BulkWriteCRUDOp::kInsert, op.getType()); + } + { + auto op = BulkWriteCRUDOp(request.getOps()[2]); + ASSERT_EQ(BulkWriteCRUDOp::kDelete, op.getType()); + ASSERT_EQ(true, op.getDelete()->getMulti()); + ASSERT_EQ(true, op.getDelete()->getReturn()); + ASSERT_BSONOBJ_EQ(query, op.getDelete()->getFilter()); + ASSERT_BSONOBJ_EQ(collation, op.getDelete()->getCollation().value_or(BSONObj())); + } +} + +// Multiple ops (different types) different namespaces +TEST(BulkWriteCommandModifier, TestMultiOpsDifferentNs) { + auto nss = NamespaceString::createNamespaceString_forTest("TestDB", "test"); + auto nss2 = NamespaceString::createNamespaceString_forTest("TestDB", "test1"); + auto docs = std::vector(); + docs.emplace_back(BSON("a" << 1)); + docs.emplace_back(BSON("b" << 1)); + + const BSONObj query = BSON("x" << 1); + const BSONObj collation = BSON("locale" + << "en_US"); + + BulkWriteCommandRequest request; + BulkWriteCommandModifier builder(&request); + builder.addInsertOps(nss, docs); + builder.addDeleteOp(nss2, query, true, true, collation, boost::none, boost::none, boost::none); + builder.finishBuild(); + + auto nsInfo = request.getNsInfo(); + ASSERT_EQ(2, nsInfo.size()); + ASSERT_EQ("TestDB", nsInfo[0].getNs().db_forTest()); + ASSERT_EQ("test", nsInfo[0].getNs().coll()); + ASSERT_EQ("TestDB", nsInfo[1].getNs().db_forTest()); + ASSERT_EQ("test1", nsInfo[1].getNs().coll()); + ASSERT_EQ(boost::none, nsInfo[0].getShardVersion()); + + ASSERT_EQ(3, request.getOps().size()); + { + auto op = BulkWriteCRUDOp(request.getOps()[0]); + ASSERT_EQ(BulkWriteCRUDOp::kInsert, op.getType()); + ASSERT_EQ(0, op.getNsInfoIdx()); + } + { + auto op = BulkWriteCRUDOp(request.getOps()[1]); + ASSERT_EQ(BulkWriteCRUDOp::kInsert, op.getType()); + ASSERT_EQ(0, op.getNsInfoIdx()); + } + { + auto op = BulkWriteCRUDOp(request.getOps()[2]); + ASSERT_EQ(BulkWriteCRUDOp::kDelete, op.getType()); + ASSERT_EQ(1, op.getNsInfoIdx()); + ASSERT_EQ(true, op.getDelete()->getMulti()); + ASSERT_EQ(true, op.getDelete()->getReturn()); + ASSERT_BSONOBJ_EQ(query, op.getDelete()->getFilter()); + ASSERT_BSONOBJ_EQ(collation, op.getDelete()->getCollation().value_or(BSONObj())); + } +} + +} // namespace +} // namespace mongo diff --git a/src/mongo/s/write_ops/bulk_write_exec.cpp b/src/mongo/s/write_ops/bulk_write_exec.cpp index 89303c51a490b..68558a56369d6 100644 --- a/src/mongo/s/write_ops/bulk_write_exec.cpp +++ b/src/mongo/s/write_ops/bulk_write_exec.cpp @@ -29,27 +29,186 @@ #include "mongo/s/write_ops/bulk_write_exec.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "mongo/base/error_codes.h" -#include "mongo/client/remote_command_targeter.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/commands/bulk_write_common.h" +#include "mongo/db/commands/bulk_write_crud_op.h" #include "mongo/db/commands/bulk_write_gen.h" +#include "mongo/db/commands/bulk_write_parser.h" +#include "mongo/db/database_name.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/session/logical_session_id_helpers.h" +#include "mongo/db/tenant_id.h" +#include "mongo/db/write_concern_options.h" +#include "mongo/executor/remote_command_response.h" #include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" -#include "mongo/s/client/shard_registry.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/s/async_requests_sender.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/client/shard.h" +#include "mongo/s/database_version.h" #include "mongo/s/grid.h" +#include "mongo/s/index_version.h" +#include "mongo/s/multi_statement_transaction_requests_sender.h" +#include "mongo/s/shard_version.h" +#include "mongo/s/shard_version_factory.h" +#include "mongo/s/stale_exception.h" #include "mongo/s/transaction_router.h" -#include "mongo/s/write_ops/write_without_shard_key_util.h" +#include "mongo/s/write_ops/batch_write_op.h" +#include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/s/write_ops/write_op.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding namespace mongo { +namespace bulk_write_exec { namespace { // The number of times we'll try to continue a batch op if no progress is being made. This only // applies when no writes are occurring and metadata is not changing on reload. const int kMaxRoundsWithoutProgress(5); +bool isVerboseWc(const BSONObj& wc) { + BSONElement wElem = wc["w"]; + return !wElem.isNumber() || wElem.Number() != 0; +} + +// Send and process the child batches. Each child batch is targeted at a unique shard: therefore +// one shard will have only one batch incoming. +void executeChildBatches(OperationContext* opCtx, + TargetedBatchMap& childBatches, + BulkWriteOp& bulkWriteOp, + stdx::unordered_map& errorsPerNamespace) { + std::vector requests; + for (auto& childBatch : childBatches) { + auto request = [&]() { + auto bulkReq = bulkWriteOp.buildBulkCommandRequest(*childBatch.second); + + // Transform the request into a sendable BSON. + BSONObjBuilder builder; + bulkReq.serialize(BSONObj(), &builder); + + logical_session_id_helpers::serializeLsidAndTxnNumber(opCtx, &builder); + + auto wc = opCtx->getWriteConcern().toBSON(); + if (isVerboseWc(wc)) { + builder.append(WriteConcernOptions::kWriteConcernField, + opCtx->getWriteConcern().toBSON()); + } else { + // Mongos needs to send to the shard with w > 0 so it will be able to see the + // writeErrors + builder.append(WriteConcernOptions::kWriteConcernField, upgradeWriteConcern(wc)); + } + + auto obj = builder.obj(); + // When running a debug build, verify that estSize is at least the BSON serialization + // size. + dassert(childBatch.second->getEstimatedSizeBytes() >= obj.objsize()); + return obj; + }(); + + requests.emplace_back(childBatch.first, request); + } + + // Use MultiStatementTransactionRequestsSender to send any ready sub-batches to targeted + // shard endpoints. Requests are sent on construction. + MultiStatementTransactionRequestsSender ars( + opCtx, + Grid::get(opCtx)->getExecutorPool()->getArbitraryExecutor(), + DatabaseName::kAdmin, + requests, + ReadPreferenceSetting(ReadPreference::PrimaryOnly), + opCtx->isRetryableWrite() ? Shard::RetryPolicy::kIdempotent : Shard::RetryPolicy::kNoRetry); + + while (!ars.done()) { + // Block until a response is available. + auto response = ars.next(); + + Status responseStatus = response.swResponse.getStatus(); + // When the responseStatus is not OK, this means that mongos was unable to receive a + // response from the shard the write batch was sent to, or mongos faced some other local + // error (for example, mongos was shutting down). In these cases, throw and return the + // error to the client. + // Note that this is different from an operation within the bulkWrite command having an + // error. + uassertStatusOK(responseStatus); + + auto bwReply = BulkWriteCommandReply::parse(IDLParserContext("bulkWrite"), + response.swResponse.getValue().data); + + // TODO (SERVER-76958): Iterate through the cursor rather than looking only at the + // first batch. + auto cursor = bwReply.getCursor(); + const auto& replyItems = cursor.getFirstBatch(); + TargetedWriteBatch* writeBatch = childBatches.find(response.shardId)->second.get(); + + // Capture the errors if any exist and mark the writes in the TargetedWriteBatch so that + // they may be re-targeted if needed. + bulkWriteOp.noteBatchResponse(*writeBatch, replyItems, errorsPerNamespace); + } +} + +void noteStaleResponses( + OperationContext* opCtx, + const std::vector>& targeters, + const stdx::unordered_map& errorsPerNamespace) { + for (auto& targeter : targeters) { + auto errors = errorsPerNamespace.find(targeter->getNS()); + if (errors != errorsPerNamespace.cend()) { + for (const auto& error : errors->second.getErrors(ErrorCodes::StaleConfig)) { + LOGV2_DEBUG(7279201, + 4, + "Noting stale config response.", + "shardId"_attr = error.endpoint.shardName, + "status"_attr = error.error.getStatus()); + targeter->noteStaleShardResponse( + opCtx, error.endpoint, *error.error.getStatus().extraInfo()); + } + for (const auto& error : errors->second.getErrors(ErrorCodes::StaleDbVersion)) { + LOGV2_DEBUG(7279202, + 4, + "Noting stale database response.", + "shardId"_attr = error.endpoint.shardName, + "status"_attr = error.error.getStatus()); + targeter->noteStaleDbResponse( + opCtx, + error.endpoint, + *error.error.getStatus().extraInfo()); + } + } + } +} + } // namespace -namespace bulk_write_exec { std::vector execute(OperationContext* opCtx, const std::vector>& targeters, @@ -72,6 +231,10 @@ std::vector execute(OperationContext* opCtx, // re-batch ops based on their targeted shard id. TargetedBatchMap childBatches; + // Divide and group ("target") the operations in the bulk write command. Some operations may + // be split up (such as an update that needs to go to more than one shard), while others may + // be grouped together if they need to go to the same shard. + // These operations are grouped by shardId in the TargetedBatchMap childBatches. bool recordTargetErrors = refreshedTargeter; auto targetStatus = bulkWriteOp.target(targeters, recordTargetErrors, childBatches); if (!targetStatus.isOK()) { @@ -84,27 +247,23 @@ std::vector execute(OperationContext* opCtx, targeter->noteCouldNotTarget(); } refreshedTargeter = true; - } + } else { + stdx::unordered_map errorsPerNamespace; - // 2: Use MultiStatementTransactionRequestsSender to send any ready sub-batches to targeted - // shard endpoints. + // Send the child batches and wait for responses. + executeChildBatches(opCtx, childBatches, bulkWriteOp, errorsPerNamespace); - // 3: Wait for responses for all those sub-batches and keep track of the responses from - // sub-batches based on the op index in the original bulkWrite command. Abort the batch upon - // errors for ordered writes or transactions. - // TODO(SERVER-72792): Remove the logic below that mimics ok responses and process real - // batch responses. - for (const auto& childBatch : childBatches) { - bulkWriteOp.noteBatchResponse(*childBatch.second); + // If we saw any staleness errors, tell the targeters to invalidate their cache + // so that they may be refreshed. + noteStaleResponses(opCtx, targeters, errorsPerNamespace); } - - // 4: Refresh the targeter(s) if we receive a target error or a stale config/db error. if (bulkWriteOp.isFinished()) { // No need to refresh the targeters if we are done. break; } + // Refresh the targeter(s) if we received a target error or a stale config/db error. bool targeterChanged = false; try { LOGV2_DEBUG(7298200, 2, "Refreshing all targeters for bulkWrite"); @@ -157,6 +316,7 @@ BulkWriteOp::BulkWriteOp(OperationContext* opCtx, const BulkWriteCommandRequest& : _opCtx(opCtx), _clientRequest(clientRequest), _txnNum(_opCtx->getTxnNumber()), + _writeConcern(opCtx->getWriteConcern()), _inTransaction(static_cast(TransactionRouter::get(opCtx))), _isRetryableWrite(opCtx->isRetryableWrite()) { _writeOps.reserve(_clientRequest.getOps().size()); @@ -165,9 +325,9 @@ BulkWriteOp::BulkWriteOp(OperationContext* opCtx, const BulkWriteCommandRequest& } } -StatusWith BulkWriteOp::target(const std::vector>& targeters, - bool recordTargetErrors, - TargetedBatchMap& targetedBatches) { +StatusWith BulkWriteOp::target(const std::vector>& targeters, + bool recordTargetErrors, + TargetedBatchMap& targetedBatches) { const auto ordered = _clientRequest.getOrdered(); return targetWriteOps( @@ -183,10 +343,21 @@ StatusWith BulkWriteOp::target(const std::vector nsInfo = _clientRequest.getNsInfo(); - for (auto&& targetedWrite : targetedBatch.getWrites()) { + std::vector stmtIds; + if (_isRetryableWrite) + stmtIds.reserve(targetedBatch.getNumOps()); + + for (const auto& targetedWrite : targetedBatch.getWrites()) { const WriteOpRef& writeOpRef = targetedWrite->writeOpRef; ops.push_back(_clientRequest.getOps().at(writeOpRef.first)); @@ -224,6 +398,10 @@ BulkWriteCommandRequest BulkWriteOp::buildBulkCommandRequest( nsInfoEntry.setShardVersion(targetedWrite->endpoint.shardVersion); nsInfoEntry.setDatabaseVersion(targetedWrite->endpoint.databaseVersion); + + if (_isRetryableWrite) { + stmtIds.push_back(bulk_write_common::getStatementId(_clientRequest, writeOpRef.first)); + } } request.setOps(ops); @@ -235,8 +413,11 @@ BulkWriteCommandRequest BulkWriteOp::buildBulkCommandRequest( request.setOrdered(_clientRequest.getOrdered()); request.setBypassDocumentValidation(_clientRequest.getBypassDocumentValidation()); - // TODO (SERVER-72989): Attach stmtIds etc. when building support for retryable - // writes on mongos + if (_isRetryableWrite) { + request.setStmtIds(stmtIds); + } + + request.setDbName(DatabaseName::kAdmin); return request; } @@ -284,11 +465,76 @@ void BulkWriteOp::abortBatch(const Status& status) { dassert(isFinished()); } -// TODO(SERVER-72792): Finish this and process real batch responses. -void BulkWriteOp::noteBatchResponse(const TargetedWriteBatch& targetedBatch) { - for (auto&& write : targetedBatch.getWrites()) { +void BulkWriteOp::noteBatchResponse( + TargetedWriteBatch& targetedBatch, + const std::vector& replyItems, + stdx::unordered_map& errorsPerNamespace) { + LOGV2_DEBUG(7279200, + 4, + "Processing bulk write response from shard.", + "shard"_attr = targetedBatch.getShardId(), + "replyItems"_attr = replyItems); + int index = -1; + bool ordered = _clientRequest.getOrdered(); + boost::optional lastError; + for (const auto& write : targetedBatch.getWrites()) { + ++index; WriteOp& writeOp = _writeOps[write->writeOpRef.first]; - writeOp.noteWriteComplete(*write); + // When an error is encountered on an ordered bulk write, it is impossible for any of the + // remaining operations to have been executed. For that reason we cancel them here so they + // may be retargeted and retried. + if (ordered && lastError) { + invariant(index >= (int)replyItems.size()); + writeOp.cancelWrites(&*lastError); + continue; + } + + // On most errors (for example, a DuplicateKeyError) unordered bulkWrite on a shard attempts + // to execute following operations even if a preceding operation errored. This isn't true + // for StaleConfig or StaleDbVersion errors. On these errors, since the shard knows that it + // following operations will also be stale, it stops right away. + // For that reason, although typically we can expect the size of replyItems to match the + // size of the number of operations sent (even in the case of errors), when a staleness + // error is received the size of replyItems will be <= the size of the number of operations. + // When this is the case, we treat all the remaining operations which may not have a + // replyItem as having failed with a staleness error. + if (!ordered && lastError && + (lastError->getStatus().code() == ErrorCodes::StaleDbVersion || + ErrorCodes::isStaleShardVersionError(lastError->getStatus()))) { + // Decrement the index so it keeps pointing to the same error (i.e. the + // last error, which is a staleness error). + LOGV2_DEBUG(7695304, + 4, + "Duplicating the error for op", + "opIdx"_attr = write->writeOpRef.first, + "error"_attr = lastError->getStatus()); + invariant(index == (int)replyItems.size()); + index--; + } + + auto& reply = replyItems[index]; + if (reply.getStatus().isOK()) { + writeOp.noteWriteComplete(*write); + } else { + lastError.emplace(write->writeOpRef.first, reply.getStatus()); + writeOp.noteWriteError(*write, *lastError); + + auto origWrite = BulkWriteCRUDOp(_clientRequest.getOps()[write->writeOpRef.first]); + auto nss = _clientRequest.getNsInfo()[origWrite.getNsInfoIdx()].getNs(); + + if (errorsPerNamespace.find(nss) == errorsPerNamespace.end()) { + TrackedErrors trackedErrors; + trackedErrors.startTracking(ErrorCodes::StaleConfig); + trackedErrors.startTracking(ErrorCodes::StaleDbVersion); + errorsPerNamespace.emplace(nss, trackedErrors); + } + + auto trackedErrors = errorsPerNamespace.find(nss); + invariant(trackedErrors != errorsPerNamespace.end()); + if (trackedErrors->second.isTracking(reply.getStatus().code())) { + trackedErrors->second.addError(ShardError(write->endpoint, *lastError)); + } + } } } @@ -313,6 +559,53 @@ std::vector BulkWriteOp::generateReplyItems() const { return replyItems; } + +int BulkWriteOp::getBaseBatchCommandSizeEstimate() const { + // For simplicity, we build a dummy bulk write command request that contains all the common + // fields and serialize it to get the base command size. + // TODO SERVER-78301: Re-evaluate this estimation method and consider switching to a more + // efficient approach. + // We only bother to copy over variable-size and/or optional fields, since the value of fields + // that are fixed-size and always present (e.g. 'ordered') won't affect the size calculation. + BulkWriteCommandRequest request; + + // These have not been set yet, but will be set later on for each namespace as part of the + // write targeting and batch building process. To ensure we save space for these fields, we + // add dummy versions to the namespaces before serializing. + static const ShardVersion mockShardVersion = + ShardVersionFactory::make(ChunkVersion::IGNORED(), CollectionIndexes()); + static const DatabaseVersion mockDBVersion = DatabaseVersion(UUID::gen(), Timestamp()); + + auto nsInfo = _clientRequest.getNsInfo(); + for (auto& ns : nsInfo) { + ns.setShardVersion(mockShardVersion); + ns.setDatabaseVersion(mockDBVersion); + } + request.setNsInfo(nsInfo); + + request.setDbName(_clientRequest.getDbName()); + request.setDollarTenant(_clientRequest.getDollarTenant()); + request.setLet(_clientRequest.getLet()); + // We'll account for the size to store each individual op as we add them, so just put an empty + // vector as a placeholder for the array. This will ensure we properly count the size of the + // field name and the empty array. + request.setOps({}); + + if (_isRetryableWrite) { + // We'll account for the size to store each individual stmtId as we add ops, so similar to + // above with ops, we just put an empty vector as a placeholder for now. + request.setStmtIds({}); + } + + BSONObjBuilder builder; + request.serialize(BSONObj(), &builder); + // Add writeConcern and lsid/txnNumber to ensure we save space for them. + logical_session_id_helpers::serializeLsidAndTxnNumber(_opCtx, &builder); + builder.append(WriteConcernOptions::kWriteConcernField, _opCtx->getWriteConcern().toBSON()); + + return builder.obj().objsize(); +} + } // namespace bulk_write_exec } // namespace mongo diff --git a/src/mongo/s/write_ops/bulk_write_exec.h b/src/mongo/s/write_ops/bulk_write_exec.h index 41d6ebf9c4d7e..196448feed925 100644 --- a/src/mongo/s/write_ops/bulk_write_exec.h +++ b/src/mongo/s/write_ops/bulk_write_exec.h @@ -29,13 +29,25 @@ #pragma once +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/bson/timestamp.h" #include "mongo/client/connection_string.h" #include "mongo/db/commands/bulk_write_gen.h" +#include "mongo/db/commands/bulk_write_parser.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/repl/optime.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/write_concern_options.h" #include "mongo/s/ns_targeter.h" #include "mongo/s/write_ops/batch_write_op.h" #include "mongo/s/write_ops/write_op.h" +#include "mongo/stdx/unordered_map.h" namespace mongo { namespace bulk_write_exec { @@ -98,12 +110,12 @@ class BulkWriteOp { * targeting errors, but if not we should refresh once first.) * * Returned TargetedWriteBatches are owned by the caller. - * If a write without a shard key is detected, return an OK StatusWith that has 'true' as the - * value. + * If a write without a shard key or a time-series retryable update is detected, return an OK + * StatusWith that has the corresponding WriteType as the value. */ - StatusWith target(const std::vector>& targeters, - bool recordTargetErrors, - TargetedBatchMap& targetedBatches); + StatusWith target(const std::vector>& targeters, + bool recordTargetErrors, + TargetedBatchMap& targetedBatches); /** * Fills a BulkWriteCommandRequest from a TargetedWriteBatch for this BulkWriteOp. @@ -127,8 +139,14 @@ class BulkWriteOp { */ void abortBatch(const Status& status); - // TODO(SERVER-72792): Finish this and process real batch responses. - void noteBatchResponse(const TargetedWriteBatch& targetedBatch); + /** + * Processes the response to a TargetedWriteBatch. The response is captured by the vector of + * BulkWriteReplyItems. Sharding related errors are then grouped by namespace and captured in + * the map passed in. + */ + void noteBatchResponse(TargetedWriteBatch& targetedBatch, + const std::vector& replyItems, + stdx::unordered_map& errorsPerNamespace); /** * Returns a vector of BulkWriteReplyItem based on the end state of each individual write in @@ -136,6 +154,12 @@ class BulkWriteOp { */ std::vector generateReplyItems() const; + /** + * Calculates an estimate of the size, in bytes, required to store the common fields that will + * go into each sub-batch command sent to a shard, i.e. all fields besides the actual write ops. + */ + int getBaseBatchCommandSizeEstimate() const; + private: // The OperationContext the client bulkWrite request is run on. OperationContext* const _opCtx; @@ -149,6 +173,9 @@ class BulkWriteOp { // Cached transaction number (if one is present on the operation contex). boost::optional _txnNum; + // The write concern that the bulk write command was issued with. + WriteConcernOptions _writeConcern; + // Set to true if this write is part of a transaction. const bool _inTransaction{false}; const bool _isRetryableWrite{false}; diff --git a/src/mongo/s/write_ops/bulk_write_exec_test.cpp b/src/mongo/s/write_ops/bulk_write_exec_test.cpp index 30830e2b30f48..cbb76c87e81a9 100644 --- a/src/mongo/s/write_ops/bulk_write_exec_test.cpp +++ b/src/mongo/s/write_ops/bulk_write_exec_test.cpp @@ -27,23 +27,71 @@ * it in the license file. */ -#include "mongo/idl/server_parameter_test_util.h" +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/bson/util/builder.h" +#include "mongo/client/connection_string.h" +#include "mongo/client/remote_command_targeter_factory_mock.h" +#include "mongo/client/remote_command_targeter_mock.h" +#include "mongo/crypto/sha256_block.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/commands/bulk_write_crud_op.h" +#include "mongo/db/commands/bulk_write_gen.h" +#include "mongo/db/database_name.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/shard_id.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/logv2/log.h" -#include "mongo/s/catalog_cache_test_fixture.h" -#include "mongo/s/concurrency/locker_mongos_client_observer.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/s/catalog/type_shard.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/index_version.h" #include "mongo/s/mock_ns_targeter.h" -#include "mongo/s/session_catalog_router.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" #include "mongo/s/sharding_router_test_fixture.h" -#include "mongo/s/transaction_router.h" +#include "mongo/s/stale_exception.h" #include "mongo/s/write_ops/batch_write_op.h" #include "mongo/s/write_ops/batched_command_request.h" #include "mongo/s/write_ops/bulk_write_exec.h" #include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" -#include "mongo/unittest/unittest.h" -#include -#include +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -121,43 +169,57 @@ using namespace bulk_write_exec; class BulkWriteOpTest : public ServiceContextTest { protected: BulkWriteOpTest() { - auto service = getServiceContext(); - service->registerClientObserver(std::make_unique()); _opCtxHolder = makeOperationContext(); _opCtx = _opCtxHolder.get(); } ServiceContext::UniqueOperationContext _opCtxHolder; OperationContext* _opCtx; + + // This failpoint is to skip running the useTwoPhaseWriteProtocol check which expects the Grid + // to be initialized. With the feature flag on, the helper always returns false, which signifies + // that we have a targetable write op. + std::unique_ptr _skipUseTwoPhaseWriteProtocolCheck = + std::make_unique("skipUseTwoPhaseWriteProtocolCheck"); }; // Test targeting a single op in a bulkWrite request. TEST_F(BulkWriteOpTest, TargetSingleOp) { ShardId shardId("shard"); - NamespaceString nss("foo.bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest("foo.bar"); ShardEndpoint endpoint( shardId, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); std::vector> targeters; targeters.push_back(initTargeterFullRange(nss, endpoint)); - BulkWriteCommandRequest request({BulkWriteInsertOp(0, BSON("x" << 1))}, - {NamespaceInfoEntry(nss)}); + auto runTest = [&](const BulkWriteCommandRequest& request) { + BulkWriteOp bulkWriteOp(_opCtx, request); - BulkWriteOp bulkWriteOp(_opCtx, request); + TargetedBatchMap targeted; + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 1u); + ASSERT_EQUALS(targeted.begin()->second->getShardId(), shardId); + ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 1u); + assertEndpointsEqual(targeted.begin()->second->getWrites()[0]->endpoint, endpoint); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + }; - TargetedBatchMap targeted; - ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); - ASSERT_EQUALS(targeted.size(), 1u); - ASSERT_EQUALS(targeted.begin()->second->getShardId(), shardId); - ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 1u); - assertEndpointsEqual(targeted.begin()->second->getWrites()[0]->endpoint, endpoint); - ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + // Insert + runTest( + BulkWriteCommandRequest({BulkWriteInsertOp(0, BSON("x" << 1))}, {NamespaceInfoEntry(nss)})); + // Update + runTest(BulkWriteCommandRequest( + {BulkWriteUpdateOp(0, BSON("x" << 1), BSON("$set" << BSON("y" << 2)))}, + {NamespaceInfoEntry(nss)})); + // Delete + runTest( + BulkWriteCommandRequest({BulkWriteDeleteOp(0, BSON("x" << 1))}, {NamespaceInfoEntry(nss)})); } // Test targeting a single op with target error. TEST_F(BulkWriteOpTest, TargetSingleOpError) { - NamespaceString nss("foo.bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest("foo.bar"); ShardEndpoint endpoint(ShardId("shard"), ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); @@ -167,29 +229,39 @@ TEST_F(BulkWriteOpTest, TargetSingleOpError) { // an error. targeters.push_back(initTargeterHalfRange(nss, endpoint)); - BulkWriteCommandRequest request({BulkWriteInsertOp(0, BSON("x" << 1))}, - {NamespaceInfoEntry(nss)}); + auto runTest = [&](const BulkWriteCommandRequest& request) { + BulkWriteOp bulkWriteOp(_opCtx, request); - BulkWriteOp bulkWriteOp(_opCtx, request); + TargetedBatchMap targeted; + // target should return target error when recordTargetErrors = false. + ASSERT_NOT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 0u); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Ready); - TargetedBatchMap targeted; - // target should return target error when recordTargetErrors = false. - ASSERT_NOT_OK(bulkWriteOp.target(targeters, false, targeted)); - ASSERT_EQUALS(targeted.size(), 0u); - ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Ready); + // target should transition the writeOp to an error state upon target errors when + // recordTargetErrors = true. + ASSERT_OK(bulkWriteOp.target(targeters, true, targeted)); + ASSERT_EQUALS(targeted.size(), 0u); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Error); + }; - // target should transition the writeOp to an error state upon target errors when - // recordTargetErrors = true. - ASSERT_OK(bulkWriteOp.target(targeters, true, targeted)); - ASSERT_EQUALS(targeted.size(), 0u); - ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Error); + // Insert + runTest( + BulkWriteCommandRequest({BulkWriteInsertOp(0, BSON("x" << 1))}, {NamespaceInfoEntry(nss)})); + // Update + runTest(BulkWriteCommandRequest( + {BulkWriteUpdateOp(0, BSON("x" << 1), BSON("$set" << BSON("y" << 2)))}, + {NamespaceInfoEntry(nss)})); + // Delete + runTest( + BulkWriteCommandRequest({BulkWriteDeleteOp(0, BSON("x" << 1))}, {NamespaceInfoEntry(nss)})); } // Test multiple ordered ops that target the same shard. TEST_F(BulkWriteOpTest, TargetMultiOpsOrdered_SameShard) { ShardId shardId("shard"); - NamespaceString nss0("foo.bar"); - NamespaceString nss1("bar.foo"); + NamespaceString nss0 = NamespaceString::createNamespaceString_forTest("foo.bar"); + NamespaceString nss1 = NamespaceString::createNamespaceString_forTest("bar.foo"); // Two different endpoints targeting the same shard for the two namespaces. ShardEndpoint endpoint0( shardId, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); @@ -203,30 +275,47 @@ TEST_F(BulkWriteOpTest, TargetMultiOpsOrdered_SameShard) { targeters.push_back(initTargeterFullRange(nss0, endpoint0)); targeters.push_back(initTargeterFullRange(nss1, endpoint1)); - BulkWriteCommandRequest request( - {BulkWriteInsertOp(1, BSON("x" << 1)), BulkWriteInsertOp(0, BSON("x" << 2))}, - {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)}); - - BulkWriteOp bulkWriteOp(_opCtx, request); + auto runTest = [&](const BulkWriteCommandRequest& request) { + BulkWriteOp bulkWriteOp(_opCtx, request); + + // Test that both writes target the same shard under two different endpoints for their + // namespace. + TargetedBatchMap targeted; + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 1u); + ASSERT_EQUALS(targeted.begin()->second->getShardId(), shardId); + ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 2u); + assertEndpointsEqual(targeted.begin()->second->getWrites()[0]->endpoint, endpoint1); + assertEndpointsEqual(targeted.begin()->second->getWrites()[1]->endpoint, endpoint0); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); + }; - // Test that both writes target the same shard under two different endpoints for their - // namespace. - TargetedBatchMap targeted; - ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); - ASSERT_EQUALS(targeted.size(), 1u); - ASSERT_EQUALS(targeted.begin()->second->getShardId(), shardId); - ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 2u); - assertEndpointsEqual(targeted.begin()->second->getWrites()[0]->endpoint, endpoint1); - assertEndpointsEqual(targeted.begin()->second->getWrites()[1]->endpoint, endpoint0); - ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); - ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); + // Two inserts + runTest(BulkWriteCommandRequest( + {BulkWriteInsertOp(1, BSON("x" << 1)), BulkWriteInsertOp(0, BSON("x" << 2))}, + {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)})); + // Two updates + runTest(BulkWriteCommandRequest( + {BulkWriteUpdateOp(1, BSON("x" << 1), BSON("$set" << BSON("y" << 2))), + BulkWriteUpdateOp(0, BSON("x" << 2), BSON("$set" << BSON("y" << 2)))}, + {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)})); + // Two deletes + runTest(BulkWriteCommandRequest( + {BulkWriteDeleteOp(1, BSON("x" << 1)), BulkWriteDeleteOp(0, BSON("x" << 2))}, + {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)})); + // Mixed op types: update + delete + runTest(BulkWriteCommandRequest( + {BulkWriteUpdateOp(1, BSON("x" << 1), BSON("$set" << BSON("y" << 2))), + BulkWriteDeleteOp(0, BSON("x" << 2))}, + {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)})); } // Test multiple ordered ops where one of them result in a target error. TEST_F(BulkWriteOpTest, TargetMultiOpsOrdered_RecordTargetErrors) { ShardId shardId("shard"); - NamespaceString nss0("foo.bar"); - NamespaceString nss1("bar.foo"); + NamespaceString nss0 = NamespaceString::createNamespaceString_forTest("foo.bar"); + NamespaceString nss1 = NamespaceString::createNamespaceString_forTest("bar.foo"); ShardEndpoint endpoint0( shardId, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); ShardEndpoint endpoint1( @@ -241,44 +330,59 @@ TEST_F(BulkWriteOpTest, TargetMultiOpsOrdered_RecordTargetErrors) { targeters.push_back(initTargeterHalfRange(nss0, endpoint0)); targeters.push_back(initTargeterFullRange(nss1, endpoint1)); - // Only the second op would get a target error. - BulkWriteCommandRequest request({BulkWriteInsertOp(1, BSON("x" << 1)), - BulkWriteInsertOp(0, BSON("x" << 2)), - BulkWriteInsertOp(0, BSON("x" << -1))}, - {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)}); - - BulkWriteOp bulkWriteOp(_opCtx, request); - - TargetedBatchMap targeted; - ASSERT_OK(bulkWriteOp.target(targeters, true, targeted)); - - // Only the first op should be targeted as the second op encounters a target error. But this - // won't record the target error since there could be an error in the first op before executing - // the second op. - ASSERT_EQUALS(targeted.size(), 1u); - ASSERT_EQUALS(targeted.begin()->second->getShardId(), shardId); - ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 1u); - assertEndpointsEqual(targeted.begin()->second->getWrites()[0]->endpoint, endpoint1); - ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); - ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Ready); - ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Ready); + auto runTest = [&](const BulkWriteCommandRequest& request) { + BulkWriteOp bulkWriteOp(_opCtx, request); + + TargetedBatchMap targeted; + ASSERT_OK(bulkWriteOp.target(targeters, true, targeted)); + + // Only the first op should be targeted as the second op encounters a target error. But this + // won't record the target error since there could be an error in the first op before + // executing the second op. + ASSERT_EQUALS(targeted.size(), 1u); + ASSERT_EQUALS(targeted.begin()->second->getShardId(), shardId); + ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 1u); + assertEndpointsEqual(targeted.begin()->second->getWrites()[0]->endpoint, endpoint1); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Ready); + + targeted.clear(); + + // Pretending the first op was done successfully, the target error should be recorded in the + // second op. + ASSERT_OK(bulkWriteOp.target(targeters, true, targeted)); + ASSERT_EQUALS(targeted.size(), 0u); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Error); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Ready); + }; - targeted.clear(); + // Requests where only the second op would get a target error. - // Pretending the first op was done successfully, the target error should be recorded in the - // second op. - ASSERT_OK(bulkWriteOp.target(targeters, true, targeted)); - ASSERT_EQUALS(targeted.size(), 0u); - ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Error); - ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Ready); + // Insert gets the target error + runTest(BulkWriteCommandRequest({BulkWriteInsertOp(1, BSON("x" << 1)), + BulkWriteInsertOp(0, BSON("x" << 2)), + BulkWriteInsertOp(0, BSON("x" << -1))}, + {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)})); + // Update gets the target error + runTest(BulkWriteCommandRequest( + {BulkWriteInsertOp(1, BSON("x" << 1)), + BulkWriteUpdateOp(0, BSON("x" << 2), BSON("$set" << BSON("y" << 2))), + BulkWriteInsertOp(0, BSON("x" << -1))}, + {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)})); + // Delete gets the target error + runTest(BulkWriteCommandRequest({BulkWriteInsertOp(1, BSON("x" << 1)), + BulkWriteDeleteOp(0, BSON("x" << 2)), + BulkWriteInsertOp(0, BSON("x" << -1))}, + {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)})); } // Test multiple ordered ops that target two different shards. TEST_F(BulkWriteOpTest, TargetMultiOpsOrdered_DifferentShard) { ShardId shardIdA("shardA"); ShardId shardIdB("shardB"); - NamespaceString nss0("foo.bar"); - NamespaceString nss1("bar.foo"); + NamespaceString nss0 = NamespaceString::createNamespaceString_forTest("foo.bar"); + NamespaceString nss1 = NamespaceString::createNamespaceString_forTest("bar.foo"); ShardEndpoint endpointA0( shardIdA, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); ShardEndpoint endpointB0( @@ -296,10 +400,17 @@ TEST_F(BulkWriteOpTest, TargetMultiOpsOrdered_DifferentShard) { // ops[0] -> shardA // ops[1] -> shardB // ops[2] -> shardA - BulkWriteCommandRequest request({BulkWriteInsertOp(0, BSON("x" << -1)), - BulkWriteInsertOp(0, BSON("x" << 1)), - BulkWriteInsertOp(1, BSON("x" << 1))}, - {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)}); + // ops[3] -> shardB + // ops[4] -> shardA + BulkWriteCommandRequest request( + { + BulkWriteInsertOp(0, BSON("x" << -1)), + BulkWriteInsertOp(0, BSON("x" << 1)), + BulkWriteInsertOp(1, BSON("x" << 1)), + BulkWriteDeleteOp(0, BSON("x" << 1)), + BulkWriteUpdateOp(0, BSON("x" << -1), BSON("$set" << BSON("y" << 2))), + }, + {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)}); BulkWriteOp bulkWriteOp(_opCtx, request); @@ -314,6 +425,8 @@ TEST_F(BulkWriteOpTest, TargetMultiOpsOrdered_DifferentShard) { ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Ready); ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Ready); targeted.clear(); @@ -326,6 +439,8 @@ TEST_F(BulkWriteOpTest, TargetMultiOpsOrdered_DifferentShard) { ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Ready); targeted.clear(); @@ -338,21 +453,307 @@ TEST_F(BulkWriteOpTest, TargetMultiOpsOrdered_DifferentShard) { ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Ready); + + targeted.clear(); + + // The resulting batch should be {shardB: [ops[3]]}. + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 1u); + ASSERT_EQUALS(targeted.begin()->second->getShardId(), shardIdB); + ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 1u); + assertEndpointsEqual(targeted.begin()->second->getWrites()[0]->endpoint, endpointB0); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Ready); + + targeted.clear(); + + // The resulting batch should be {shardA: [ops[4]]}. + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 1u); + ASSERT_EQUALS(targeted.begin()->second->getShardId(), shardIdA); + ASSERT_EQUALS(targeted.begin()->second->getWrites().size(), 1u); + assertEndpointsEqual(targeted.begin()->second->getWrites()[0]->endpoint, endpointA0); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Pending); } -// TODO(SERVER-74096): Test sub-batching logic with multi-target writes. -// 1. Test targeting ordered ops where a multi-target sub-batch must only contain writes for a -// single write op. -// 2. Test targeting unordered ops of the same namespace that target the same shard under with two -// different endpoints/shardVersions. This happens when a bulkWrite includes a multi-target write -// and a single-target write. +// Test targeting ordered ops where a multi-target sub-batch must only contain writes for a +// single write op. +TEST_F(BulkWriteOpTest, TargetMultiTargetOpsOrdered) { + ShardId shardIdA("shardA"); + ShardId shardIdB("shardB"); + NamespaceString nss0 = NamespaceString::createNamespaceString_forTest("foo.bar"); + ShardEndpoint endpointA( + shardIdA, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); + ShardEndpoint endpointB( + shardIdB, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); + + std::vector> targeters; + targeters.push_back(initTargeterSplitRange(nss0, endpointA, endpointB)); + + // Ordered update and delete ops. We place multi-target ops in between single-target ops to the + // same shards, to ensure we correctly separate the multi-target ops into their own batches. + // Expected targets: + // ops[0] -> shardA + // ops[1] -> shardA and shardB + // ops[2] -> shardB + // ops[3] -> shardB + // ops[4] -> shardA and shardB + // ops[5] -> shardA + BulkWriteCommandRequest request( + { + BulkWriteUpdateOp(0, BSON("x" << -1), BSON("$set" << BSON("z" << 3))), + BulkWriteUpdateOp( + 0, BSON("x" << BSON("$gte" << -5 << "$lt" << 5)), BSON("$set" << BSON("y" << 2))), + BulkWriteUpdateOp(0, BSON("x" << 1), BSON("$set" << BSON("z" << 3))), + BulkWriteDeleteOp(0, BSON("x" << 1)), + BulkWriteDeleteOp(0, BSON("x" << BSON("$gte" << -5 << "$lt" << 5))), + BulkWriteDeleteOp(0, BSON("x" << -1)), + }, + {NamespaceInfoEntry(nss0)}); + + BulkWriteOp bulkWriteOp(_opCtx, request); + + // The resulting batches should be: + // {shardA: [ops[0]} + // {shardA: [ops[1]]}, {shardB: [ops[1]]} + // {shardB: [ops[2], ops[3]]} + // {shardA: [ops[4]]}, {shardB: [ops[4]]} + // {shardA: [ops[5]]} + + TargetedBatchMap targeted; + + // {shardA: [ops[0]} + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 1u); + ASSERT_EQUALS(targeted[shardIdA]->getWrites().size(), 1u); + ASSERT_EQUALS(targeted[shardIdA]->getWrites()[0]->writeOpRef.first, 0); + assertEndpointsEqual(targeted[shardIdA]->getWrites()[0]->endpoint, endpointA); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(5).getWriteState(), WriteOpState_Ready); + + targeted.clear(); + + // {shardA: [ops[1]]}, {shardB: [ops[1]]} + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 2u); + ASSERT_EQUALS(targeted[shardIdA]->getWrites().size(), 1u); + ASSERT_EQUALS(targeted[shardIdA]->getWrites()[0]->writeOpRef.first, 1); + assertEndpointsEqual(targeted[shardIdA]->getWrites()[0]->endpoint, endpointA); + ASSERT_EQUALS(targeted[shardIdB]->getWrites().size(), 1u); + ASSERT_EQUALS(targeted[shardIdB]->getWrites()[0]->writeOpRef.first, 1); + assertEndpointsEqual(targeted[shardIdB]->getWrites()[0]->endpoint, endpointB); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(5).getWriteState(), WriteOpState_Ready); + + targeted.clear(); + + // {shardB: [ops[2], ops[3]]} + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 1u); + ASSERT_EQUALS(targeted[shardIdB]->getWrites().size(), 2u); + assertEndpointsEqual(targeted[shardIdB]->getWrites()[0]->endpoint, endpointB); + ASSERT_EQUALS(targeted[shardIdB]->getWrites()[0]->writeOpRef.first, 2); + assertEndpointsEqual(targeted[shardIdB]->getWrites()[1]->endpoint, endpointB); + ASSERT_EQUALS(targeted[shardIdB]->getWrites()[1]->writeOpRef.first, 3); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(5).getWriteState(), WriteOpState_Ready); + + targeted.clear(); + + // {shardA: [ops[4]]}, {shardB: [ops[4]]} + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 2u); + ASSERT_EQUALS(targeted[shardIdA]->getWrites().size(), 1u); + ASSERT_EQUALS(targeted[shardIdA]->getWrites()[0]->writeOpRef.first, 4); + assertEndpointsEqual(targeted[shardIdA]->getWrites()[0]->endpoint, endpointA); + ASSERT_EQUALS(targeted[shardIdB]->getWrites().size(), 1u); + ASSERT_EQUALS(targeted[shardIdB]->getWrites()[0]->writeOpRef.first, 4); + assertEndpointsEqual(targeted[shardIdB]->getWrites()[0]->endpoint, endpointB); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(5).getWriteState(), WriteOpState_Ready); + + + targeted.clear(); + + // {shardA: [ops[5]]} + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 1u); + ASSERT_EQUALS(targeted[shardIdA]->getWrites().size(), 1u); + ASSERT_EQUALS(targeted[shardIdA]->getWrites()[0]->writeOpRef.first, 5); + assertEndpointsEqual(targeted[shardIdA]->getWrites()[0]->endpoint, endpointA); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(5).getWriteState(), WriteOpState_Pending); +} + +// Test targeting unordered ops of the same namespace that target the same shard/endpoint under two +// different shardVersions. +// As discussed in SERVER-34347, because of the way that (non-transactional) multi-target writes +// disregard the shardVersion and use ChunkVersion::IGNORED, we cannot have together in a single +// sub-batch an op for a multi-target write *and* an op for a single-target write that target +// the same endpoint, because the single target write will use the actual shardVersion. +TEST_F(BulkWriteOpTest, TargetMultiOpsUnordered_OneShard_TwoEndpoints) { + ShardId shardIdA("shardA"); + ShardId shardIdB("shardB"); + NamespaceString nss0 = NamespaceString::createNamespaceString_forTest("foo.bar"); + + // The endpoints we'll use for our targeter. + ShardEndpoint endpointA( + shardIdA, + ShardVersionFactory::make(ChunkVersion({OID::gen(), Timestamp(2)}, {10, 11}), + boost::optional(boost::none)), + boost::none); + ShardEndpoint endpointB( + shardIdB, + ShardVersionFactory::make(ChunkVersion({OID::gen(), Timestamp(3)}, {11, 12}), + boost::optional(boost::none)), + boost::none); + + std::vector> targeters; + targeters.push_back(initTargeterSplitRange(nss0, endpointA, endpointB)); + + + // Used for assertions below; equivalent to the endpoints that multi-target ops will use (same + // as those above but no shard version.) + ShardEndpoint endpointANoVersion( + shardIdA, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); + ShardEndpoint endpointBNoVersion( + shardIdB, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); + + // We expect the ops to target the following endpoints with/without shardVersion as indicated: + // ops[0] -> A, shardVersion included + // ops[1] -> A shardVersion ignored, B shardVersion ignored + // ops[2] -> B, shardVersion included + // ops[3] -> A shardVersion ignored, B shardVersion ignored + // ops[4] -> A shardVersion included + + // Due to the interleaving of ops, each op should end up split into its own sub-batch, since no + // two consecutive ops target the same endpoint with the same shardVersion. + BulkWriteCommandRequest request( + { + BulkWriteUpdateOp(0, BSON("x" << -1), BSON("$set" << BSON("z" << 3))), + BulkWriteUpdateOp( + 0, BSON("x" << BSON("$gte" << -5 << "$lt" << 5)), BSON("$set" << BSON("y" << 2))), + BulkWriteDeleteOp(0, BSON("x" << 1)), + BulkWriteDeleteOp(0, BSON("x" << BSON("$gte" << -5 << "$lt" << 5))), + BulkWriteInsertOp(0, BSON("x" << -2)), + }, + {NamespaceInfoEntry(nss0)}); + request.setOrdered(false); + + BulkWriteOp bulkWriteOp(_opCtx, request); + + TargetedBatchMap targeted; + + // batch with ops[0] + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 1u); + ASSERT_EQUALS(targeted[shardIdA]->getWrites().size(), 1); + ASSERT_EQUALS(targeted[shardIdA]->getWrites()[0]->writeOpRef.first, 0); + assertEndpointsEqual(targeted[shardIdA]->getWrites()[0]->endpoint, endpointA); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Ready); + + targeted.clear(); + + // batch with ops[1] + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 2u); + ASSERT_EQUALS(targeted[shardIdA]->getWrites().size(), 1); + ASSERT_EQUALS(targeted[shardIdA]->getWrites()[0]->writeOpRef.first, 1); + assertEndpointsEqual(targeted[shardIdA]->getWrites()[0]->endpoint, endpointANoVersion); + ASSERT_EQUALS(targeted[shardIdB]->getWrites().size(), 1); + ASSERT_EQUALS(targeted[shardIdB]->getWrites()[0]->writeOpRef.first, 1); + assertEndpointsEqual(targeted[shardIdB]->getWrites()[0]->endpoint, endpointBNoVersion); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Ready); + + targeted.clear(); + + // batch with ops[2] + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 1u); + ASSERT_EQUALS(targeted[shardIdB]->getWrites().size(), 1); + ASSERT_EQUALS(targeted[shardIdB]->getWrites()[0]->writeOpRef.first, 2); + assertEndpointsEqual(targeted[shardIdB]->getWrites()[0]->endpoint, endpointB); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Ready); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Ready); + + targeted.clear(); + + // batch with ops[3] + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 2u); + ASSERT_EQUALS(targeted[shardIdA]->getWrites().size(), 1); + ASSERT_EQUALS(targeted[shardIdA]->getWrites()[0]->writeOpRef.first, 3); + assertEndpointsEqual(targeted[shardIdA]->getWrites()[0]->endpoint, endpointANoVersion); + ASSERT_EQUALS(targeted[shardIdB]->getWrites().size(), 1); + ASSERT_EQUALS(targeted[shardIdB]->getWrites()[0]->writeOpRef.first, 3); + assertEndpointsEqual(targeted[shardIdB]->getWrites()[0]->endpoint, endpointBNoVersion); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Ready); + + targeted.clear(); + + // batch with ops[4] + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 1u); + ASSERT_EQUALS(targeted[shardIdA]->getWrites().size(), 1); + ASSERT_EQUALS(targeted[shardIdA]->getWrites()[0]->writeOpRef.first, 4); + assertEndpointsEqual(targeted[shardIdA]->getWrites()[0]->endpoint, endpointA); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Pending); +} // Test multiple unordered ops that target two different shards. TEST_F(BulkWriteOpTest, TargetMultiOpsUnordered) { ShardId shardIdA("shardA"); ShardId shardIdB("shardB"); - NamespaceString nss0("foo.bar"); - NamespaceString nss1("bar.foo"); + NamespaceString nss0 = NamespaceString::createNamespaceString_forTest("foo.bar"); + NamespaceString nss1 = NamespaceString::createNamespaceString_forTest("bar.foo"); ShardEndpoint endpointA0( shardIdA, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); ShardEndpoint endpointB0( @@ -370,41 +771,54 @@ TEST_F(BulkWriteOpTest, TargetMultiOpsUnordered) { // ops[0] -> shardA // ops[1] -> shardB // ops[2] -> shardA - BulkWriteCommandRequest request({BulkWriteInsertOp(0, BSON("x" << -1)), - BulkWriteInsertOp(0, BSON("x" << 1)), - BulkWriteInsertOp(1, BSON("x" << 1))}, - {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)}); + // ops[3] -> shardB + // ops[4] -> shardA + BulkWriteCommandRequest request( + { + BulkWriteInsertOp(0, BSON("x" << -1)), + BulkWriteInsertOp(0, BSON("x" << 1)), + BulkWriteInsertOp(1, BSON("x" << 1)), + BulkWriteDeleteOp(0, BSON("x" << 1)), + BulkWriteUpdateOp(0, BSON("x" << -1), BSON("$set" << BSON("y" << 2))), + }, + {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)}); request.setOrdered(false); BulkWriteOp bulkWriteOp(_opCtx, request); // The two resulting batches should be: - // {shardA: [ops[0], ops[2]]} - // {shardB: [ops[1]]} + // {shardA: [ops[0], ops[2], ops[4]]} + // {shardB: [ops[1], ops[3]]} TargetedBatchMap targeted; ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); ASSERT_EQUALS(targeted.size(), 2u); - ASSERT_EQUALS(targeted[shardIdA]->getWrites().size(), 2u); + ASSERT_EQUALS(targeted[shardIdA]->getWrites().size(), 3u); ASSERT_EQUALS(targeted[shardIdA]->getWrites()[0]->writeOpRef.first, 0); assertEndpointsEqual(targeted[shardIdA]->getWrites()[0]->endpoint, endpointA0); ASSERT_EQUALS(targeted[shardIdA]->getWrites()[1]->writeOpRef.first, 2); assertEndpointsEqual(targeted[shardIdA]->getWrites()[1]->endpoint, endpointA1); + ASSERT_EQUALS(targeted[shardIdA]->getWrites()[2]->writeOpRef.first, 4); + assertEndpointsEqual(targeted[shardIdA]->getWrites()[2]->endpoint, endpointA0); - ASSERT_EQUALS(targeted[shardIdB]->getWrites().size(), 1u); + ASSERT_EQUALS(targeted[shardIdB]->getWrites().size(), 2u); ASSERT_EQUALS(targeted[shardIdB]->getWrites()[0]->writeOpRef.first, 1); assertEndpointsEqual(targeted[shardIdB]->getWrites()[0]->endpoint, endpointB0); + ASSERT_EQUALS(targeted[shardIdB]->getWrites()[1]->writeOpRef.first, 3); + assertEndpointsEqual(targeted[shardIdB]->getWrites()[1]->endpoint, endpointB0); ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(0).getWriteState(), WriteOpState_Pending); ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(1).getWriteState(), WriteOpState_Pending); ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(2).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(3).getWriteState(), WriteOpState_Pending); + ASSERT_EQUALS(bulkWriteOp.getWriteOp_forTest(4).getWriteState(), WriteOpState_Pending); } // Test multiple unordered ops where one of them result in a target error. TEST_F(BulkWriteOpTest, TargetMultiOpsUnordered_RecordTargetErrors) { ShardId shardId("shard"); - NamespaceString nss0("foo.bar"); - NamespaceString nss1("bar.foo"); + NamespaceString nss0 = NamespaceString::createNamespaceString_forTest("foo.bar"); + NamespaceString nss1 = NamespaceString::createNamespaceString_forTest("bar.foo"); ShardEndpoint endpoint0( shardId, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); ShardEndpoint endpoint1( @@ -447,13 +861,12 @@ TEST_F(BulkWriteOpTest, TargetMultiOpsUnordered_RecordTargetErrors) { // bulk command request. TEST_F(BulkWriteOpTest, BuildChildRequestFromTargetedWriteBatch) { ShardId shardId("shard"); - NamespaceString nss0("foster.the.people"); - NamespaceString nss1("sonate.pacifique"); + NamespaceString nss0 = NamespaceString::createNamespaceString_forTest("foster.the.people"); + NamespaceString nss1 = NamespaceString::createNamespaceString_forTest("sonate.pacifique"); // Two different endpoints targeting the same shard for the two namespaces. - ShardEndpoint endpoint0(ShardId("shard"), - ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), - boost::none); + ShardEndpoint endpoint0( + shardId, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); ShardEndpoint endpoint1( shardId, ShardVersionFactory::make(ChunkVersion({OID::gen(), Timestamp(2)}, {10, 11}), @@ -466,9 +879,11 @@ TEST_F(BulkWriteOpTest, BuildChildRequestFromTargetedWriteBatch) { BulkWriteCommandRequest request( { - BulkWriteInsertOp(0, BSON("x" << 1)), // to nss 0 - BulkWriteInsertOp(1, BSON("x" << 2)), // to nss 1 - BulkWriteInsertOp(0, BSON("x" << 3)) // to nss 0 + BulkWriteInsertOp(0, BSON("x" << 1)), // to nss 0 + BulkWriteInsertOp(1, BSON("x" << 2)), // to nss 1 + BulkWriteInsertOp(0, BSON("x" << 3)), // to nss 0 + BulkWriteUpdateOp(0, BSON("x" << 1), BSON("$set" << BSON("y" << 2))), // to nss 0 + BulkWriteDeleteOp(1, BSON("x" << 1)), }, {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)}); @@ -495,8 +910,8 @@ TEST_F(BulkWriteOpTest, BuildChildRequestFromTargetedWriteBatch) { request.getBypassDocumentValidation()); - ASSERT_EQUALS(childRequest.getOps().size(), 3u); - for (size_t i = 0; i < 3u; i++) { + ASSERT_EQUALS(childRequest.getOps().size(), 5u); + for (size_t i = 0; i < 5u; i++) { const auto& childOp = BulkWriteCRUDOp(childRequest.getOps()[i]); const auto& origOp = BulkWriteCRUDOp(request.getOps()[i]); ASSERT_BSONOBJ_EQ(childOp.toBSON(), origOp.toBSON()); @@ -509,6 +924,451 @@ TEST_F(BulkWriteOpTest, BuildChildRequestFromTargetedWriteBatch) { ASSERT_EQUALS(childRequest.getNsInfo()[1].getNs(), request.getNsInfo()[1].getNs()); } +// Tests that stmtIds are correctly attached to bulkWrite requests when the operations +// are ordered. +TEST_F(BulkWriteOpTest, TestOrderedOpsNoExistingStmtIds) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("mgmt.kids"); + + ShardEndpoint endpointA(ShardId("shardA"), + ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), + boost::none); + ShardEndpoint endpointB(ShardId("shardB"), + ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), + boost::none); + + std::vector> targeters; + targeters.push_back(initTargeterSplitRange(nss, endpointA, endpointB)); + + // Because the operations are ordered, the bulkWrite operations is broken up by shard + // endpoint. In other words, targeting this request will result in two batches: + // 1) to shard A, and then 2) another to shard B after the first batch is complete. + BulkWriteCommandRequest request({BulkWriteInsertOp(0, BSON("x" << -1)), // stmtId 0, shard A + BulkWriteInsertOp(0, BSON("x" << -2)), // stmtId 1, shard A + BulkWriteInsertOp(0, BSON("x" << 1)), // stmtId 2, shard B + BulkWriteInsertOp(0, BSON("x" << 2))}, // stmtId 3, shard B + {NamespaceInfoEntry(nss)}); + request.setOrdered(true); + + // Setting the txnNumber makes it a retryable write. + _opCtx->setLogicalSessionId(LogicalSessionId(UUID::gen(), SHA256Block())); + _opCtx->setTxnNumber(TxnNumber(0)); + BulkWriteOp bulkWriteOp(_opCtx, request); + + std::map> targeted; + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + + auto* batch = targeted.begin()->second.get(); + auto childRequest = bulkWriteOp.buildBulkCommandRequest(*batch); + auto childStmtIds = childRequest.getStmtIds(); + ASSERT_EQUALS(childStmtIds->size(), 2u); + ASSERT_EQUALS(childStmtIds->at(0), 0); + ASSERT_EQUALS(childStmtIds->at(1), 1); + + // Target again to get a batch for the operations to shard B. + targeted.clear(); + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + + batch = targeted.begin()->second.get(); + childRequest = bulkWriteOp.buildBulkCommandRequest(*batch); + childStmtIds = childRequest.getStmtIds(); + ASSERT_EQUALS(childStmtIds->size(), 2u); + ASSERT_EQUALS(childStmtIds->at(0), 2); + ASSERT_EQUALS(childStmtIds->at(1), 3); +} + +// Tests that stmtIds are correctly attached to bulkWrite requests when the operations +// are unordered. +TEST_F(BulkWriteOpTest, TestUnorderedOpsNoExistingStmtIds) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("zero7.spinning"); + + ShardEndpoint endpointA(ShardId("shardA"), + ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), + boost::none); + ShardEndpoint endpointB(ShardId("shardB"), + ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), + boost::none); + + std::vector> targeters; + targeters.push_back(initTargeterSplitRange(nss, endpointA, endpointB)); + + // Since the ops aren't ordered, two batches are produced on a single targeting call: + // 1) the ops to shard A (op 0 and op 2) are a batch and 2) the ops to shard B (op 1 + // and op 3) are a batch. Therefore the stmtIds in the bulkWrite request sent to the shards + // will be interleaving. + BulkWriteCommandRequest request({BulkWriteInsertOp(0, BSON("x" << -1)), // stmtId 0, shard A + BulkWriteInsertOp(0, BSON("x" << 1)), // stmtId 1, shard B + BulkWriteInsertOp(0, BSON("x" << -1)), // stmtId 2, shard A + BulkWriteInsertOp(0, BSON("x" << 2))}, // stmtId 3, shard B + {NamespaceInfoEntry(nss)}); + request.setOrdered(false); + + // Setting the txnNumber makes it a retryable write. + _opCtx->setLogicalSessionId(LogicalSessionId(UUID::gen(), SHA256Block())); + _opCtx->setTxnNumber(TxnNumber(0)); + BulkWriteOp bulkWriteOp(_opCtx, request); + + std::map> targeted; + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + + // The batch to shard A contains op 0 and op 2. + auto* batch = targeted[ShardId("shardA")].get(); + auto childRequest = bulkWriteOp.buildBulkCommandRequest(*batch); + auto childStmtIds = childRequest.getStmtIds(); + ASSERT_EQUALS(childStmtIds->size(), 2u); + ASSERT_EQUALS(childStmtIds->at(0), 0); + ASSERT_EQUALS(childStmtIds->at(1), 2); + + // The batch to shard B contains op 1 and op 3. + batch = targeted[ShardId("shardB")].get(); + childRequest = bulkWriteOp.buildBulkCommandRequest(*batch); + childStmtIds = childRequest.getStmtIds(); + ASSERT_EQUALS(childStmtIds->size(), 2u); + ASSERT_EQUALS(childStmtIds->at(0), 1); + ASSERT_EQUALS(childStmtIds->at(1), 3); +} + +// Tests that stmtIds are correctly attached to bulkWrite requests when the operations +// are unordered and stmtIds are attached to the request already. +TEST_F(BulkWriteOpTest, TestUnorderedOpsStmtIdsExist) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("zero7.spinning"); + + ShardEndpoint endpointA(ShardId("shardA"), + ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), + boost::none); + ShardEndpoint endpointB(ShardId("shardB"), + ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), + boost::none); + + std::vector> targeters; + targeters.push_back(initTargeterSplitRange(nss, endpointA, endpointB)); + + // Since the ops aren't ordered, two batches are produced on a single targeting call: + // 1) the ops to shard A (op 0 and op 2) are a batch and 2) the ops to shard B (op 1 + // and op 3) are a batch. Therefore the stmtIds in the bulkWrite request sent to the shards + // will be interleaving. + BulkWriteCommandRequest request({BulkWriteInsertOp(0, BSON("x" << -1)), // stmtId 6, shard A + BulkWriteInsertOp(0, BSON("x" << 1)), // stmtId 7, shard B + BulkWriteInsertOp(0, BSON("x" << -1)), // stmtId 8, shard A + BulkWriteInsertOp(0, BSON("x" << 2))}, // stmtId 9, shard B + {NamespaceInfoEntry(nss)}); + request.setOrdered(false); + request.setStmtIds(std::vector{6, 7, 8, 9}); + + // Setting the txnNumber makes it a retryable write. + _opCtx->setLogicalSessionId(LogicalSessionId(UUID::gen(), SHA256Block())); + _opCtx->setTxnNumber(TxnNumber(0)); + BulkWriteOp bulkWriteOp(_opCtx, request); + + std::map> targeted; + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + + // The batch to shard A contains op 0 and op 2. + auto* batch = targeted[ShardId("shardA")].get(); + auto childRequest = bulkWriteOp.buildBulkCommandRequest(*batch); + auto childStmtIds = childRequest.getStmtIds(); + ASSERT_EQUALS(childStmtIds->size(), 2u); + ASSERT_EQUALS(childStmtIds->at(0), 6); + ASSERT_EQUALS(childStmtIds->at(1), 8); + + // The batch to shard B contains op 1 and op 3. + batch = targeted[ShardId("shardB")].get(); + childRequest = bulkWriteOp.buildBulkCommandRequest(*batch); + childStmtIds = childRequest.getStmtIds(); + ASSERT_EQUALS(childStmtIds->size(), 2u); + ASSERT_EQUALS(childStmtIds->at(0), 7); + ASSERT_EQUALS(childStmtIds->at(1), 9); +} + +// Tests that stmtIds are correctly attached to bulkWrite requests when the operations +// are unordered and the stmtId field exists. +TEST_F(BulkWriteOpTest, TestUnorderedOpsStmtIdFieldExists) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("zero7.spinning"); + + ShardEndpoint endpointA(ShardId("shardA"), + ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), + boost::none); + ShardEndpoint endpointB(ShardId("shardB"), + ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), + boost::none); + + std::vector> targeters; + targeters.push_back(initTargeterSplitRange(nss, endpointA, endpointB)); + + // Since the ops aren't ordered, two batches are produced on a single targeting call: + // 1) the ops to shard A (op 0 and op 2) are a batch and 2) the ops to shard B (op 1 + // and op 3) are a batch. Therefore the stmtIds in the bulkWrite request sent to the shards + // will be interleaving. + BulkWriteCommandRequest request({BulkWriteInsertOp(0, BSON("x" << -1)), // stmtId 6, shard A + BulkWriteInsertOp(0, BSON("x" << 1)), // stmtId 7, shard B + BulkWriteInsertOp(0, BSON("x" << -1)), // stmtId 8, shard A + BulkWriteInsertOp(0, BSON("x" << 2))}, // stmtId 9, shard B + {NamespaceInfoEntry(nss)}); + request.setOrdered(false); + request.setStmtId(6); // Produces stmtIds 6, 7, 8, 9 + + // Setting the txnNumber makes it a retryable write. + _opCtx->setLogicalSessionId(LogicalSessionId(UUID::gen(), SHA256Block())); + _opCtx->setTxnNumber(TxnNumber(0)); + BulkWriteOp bulkWriteOp(_opCtx, request); + + std::map> targeted; + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + + // The batch to shard A contains op 0 and op 2. + auto* batch = targeted[ShardId("shardA")].get(); + auto childRequest = bulkWriteOp.buildBulkCommandRequest(*batch); + auto childStmtIds = childRequest.getStmtIds(); + ASSERT_EQUALS(childStmtIds->size(), 2u); + ASSERT_EQUALS(childStmtIds->at(0), 6); + ASSERT_EQUALS(childStmtIds->at(1), 8); + + // The batch to shard B contains op 1 and op 3. + batch = targeted[ShardId("shardB")].get(); + childRequest = bulkWriteOp.buildBulkCommandRequest(*batch); + childStmtIds = childRequest.getStmtIds(); + ASSERT_EQUALS(childStmtIds->size(), 2u); + ASSERT_EQUALS(childStmtIds->at(0), 7); + ASSERT_EQUALS(childStmtIds->at(1), 9); +} + +// Test BatchItemRef.getLet(). +TEST_F(BulkWriteOpTest, BatchItemRefGetLet) { + NamespaceString nss = NamespaceString::createNamespaceString_forTest("foo.bar"); + + // The content of the request (updateOp and Let) do not matter here, + // only that BatchItemRef.getLet() matches BulkWriteCommandRequest.setLet(). + BulkWriteCommandRequest request({BulkWriteUpdateOp(0, BSON("x" << 1), BSON("x" << 2))}, + {NamespaceInfoEntry(nss)}); + + BSONObj expected{BSON("key" + << "value")}; + request.setLet(expected); + + BulkWriteOp bulkWriteOp(_opCtx, request); + const auto& letOption = bulkWriteOp.getWriteOp_forTest(0).getWriteItem().getLet(); + ASSERT(letOption.has_value()); + ASSERT_BSONOBJ_EQ(letOption.value(), expected); +} + +using BulkOp = + stdx::variant; + +BulkOp makeTestInsertOp(BSONObj document) { + BulkWriteInsertOp op; + op.setInsert(0); + op.setDocument(document); + return op; +} + +BulkOp makeTestUpdateOp(BSONObj filter, + mongo::write_ops::UpdateModification updateMods, + mongo::OptionalBool upsertSupplied, + mongo::BSONObj hint, + boost::optional> arrayFilters, + boost::optional constants, + boost::optional collation, + boost::optional sort, + boost::optional returnValue, + boost::optional returnFields) { + BulkWriteUpdateOp op; + op.setUpdate(0); + op.setFilter(filter); + op.setUpdateMods(updateMods); + if (upsertSupplied.has_value()) { + op.setUpsert(true); + op.setUpsertSupplied(upsertSupplied); + } + op.setArrayFilters(arrayFilters); + op.setHint(hint); + op.setConstants(constants); + op.setCollation(collation); + op.setSort(sort); + op.setReturn(returnValue); + op.setReturnFields(returnFields); + return op; +} + +BulkOp makeTestDeleteOp(BSONObj filter, + mongo::BSONObj hint, + boost::optional collation, + boost::optional sort, + mongo::OptionalBool returnValue, + boost::optional returnFields) { + BulkWriteDeleteOp op; + op.setDeleteCommand(0); + op.setFilter(filter); + op.setHint(hint); + op.setCollation(collation); + op.setSort(sort); + op.setReturn(returnValue); + op.setReturnFields(returnFields); + return op; +} + +int getSizeEstimate(BulkOp op) { + // BatchItemRef can only be created from an underlying request, but the only field we care + // about on the request is the ops. The other fields are necessary to satisfy invariants. + BulkWriteCommandRequest dummyBulkRequest; + dummyBulkRequest.setOps({op}); + dummyBulkRequest.setDbName(DatabaseName::kAdmin); + dummyBulkRequest.setNsInfo({}); + return BatchItemRef(&dummyBulkRequest, 0).getSizeForBulkWriteBytes(); +} + +int getActualSize(BulkOp op) { + return BulkWriteCRUDOp(op).toBSON().objsize(); +} + +// Test that we calculate accurate estimates for bulkWrite insert ops. +TEST_F(BulkWriteOpTest, TestBulkWriteInsertSizeEstimation) { + auto basicInsert = makeTestInsertOp(fromjson("{x: 1}")); + ASSERT_EQ(getSizeEstimate(basicInsert), getActualSize(basicInsert)); + + auto largerInsert = makeTestInsertOp(fromjson("{x: 1, y: 'hello', z: {a: 1}}")); + ASSERT_EQ(getSizeEstimate(largerInsert), getActualSize(largerInsert)); +} + +// Test that we calculate accurate estimates for bulkWrite update ops. +TEST_F(BulkWriteOpTest, TestBulkWriteUpdateSizeEstimation) { + auto basicUpdate = makeTestUpdateOp(fromjson("{x: 1}") /* filter */, + write_ops::UpdateModification(fromjson("{$set: {y: 1}}")), + mongo::OptionalBool() /* upsertSupplied */, + BSONObj() /* hint */, + boost::none, + boost::none, + boost::none, + boost::none, + boost::none, + boost::none); + ASSERT_EQ(getSizeEstimate(basicUpdate), getActualSize(basicUpdate)); + + auto updateAllFieldsSetBesidesArrayFilters = + makeTestUpdateOp(fromjson("{x: 1}") /* filter */, + write_ops::UpdateModification(fromjson("{$set: {y: 1}}")), + mongo::OptionalBool(true) /* upsertSupplied */, + fromjson("{a: 1}") /* hint */, + boost::none, + fromjson("{z: 1}") /* constants */, + fromjson("{locale: 'simple'}") /* collation */, + fromjson("{p: 1}") /* sort */, + StringData("pre") /* returnValue */, + fromjson("{abc: 1, def: 1}") /* returnFields */); + ASSERT_EQ(getSizeEstimate(updateAllFieldsSetBesidesArrayFilters), + getActualSize(updateAllFieldsSetBesidesArrayFilters)); + + std::vector arrayFilters = {fromjson("{j: 1}"), fromjson("{k: 1}")}; + auto updateAllFieldsSet = + makeTestUpdateOp(fromjson("{x: 1}") /* filter */, + write_ops::UpdateModification(fromjson("{$set: {y: 1}}")), + mongo::OptionalBool(true) /* upsertSupplied */, + fromjson("{a: 1}") /* hint */, + arrayFilters, + fromjson("{z: 1}") /* constants */, + fromjson("{locale: 'simple'}") /* collation */, + fromjson("{p: 1}") /* sort */, + StringData("pre") /* returnValue */, + fromjson("{abc: 1, def: 1}") /* returnFields */); + // We can't make an exact assertion when arrayFilters is set, because the way we estimate BSON + // array index size overcounts for simplicity. + ASSERT(getSizeEstimate(updateAllFieldsSet) > getActualSize(updateAllFieldsSet)); + + std::vector pipeline = {fromjson("{$set: {y: 1}}")}; + auto updateWithPipeline = makeTestUpdateOp(fromjson("{x: 1}") /* filter */, + write_ops::UpdateModification(pipeline), + mongo::OptionalBool() /* upsertSupplied */, + BSONObj() /* hint */, + boost::none, + boost::none, + boost::none, + boost::none, + boost::none, + boost::none); + // We can't make an exact assertion when an update pipeline is used, because the way we estimate + // BSON array index size overcounts for simplicity. + ASSERT(getSizeEstimate(updateWithPipeline) > getActualSize(updateWithPipeline)); +} + +// Test that we calculate accurate estimates for bulkWrite delete ops. +TEST_F(BulkWriteOpTest, TestBulkWriteDeleteSizeEstimation) { + auto basicDelete = makeTestDeleteOp(fromjson("{x: 1}"), + BSONObj() /* hint */, + boost::none, + boost::none, + OptionalBool() /* returnValue */, + boost::none); + ASSERT_EQ(getSizeEstimate(basicDelete), getActualSize(basicDelete)); + + auto deleteAllFieldsSet = makeTestDeleteOp(fromjson("{x: 1}") /* filter */, + fromjson("{y: 1}") /* hint */, + fromjson("{locale: 'simple'}") /* collation */, + fromjson("{z: -1}") /* sort */, + OptionalBool(true) /* returnValue */, + fromjson("{a: 1, b: 1}") /* returnFields */); + ASSERT_EQ(getSizeEstimate(deleteAllFieldsSet), getActualSize(deleteAllFieldsSet)); +} + +// Simulates a situation where we receive a bulkWrite request with large top-level fields (in this +// case, 'let') that is very close to MaxBSONObjInternalSize. Confirms that we factor in top- +// level fields when deciding when to split batches. +TEST_F(BulkWriteOpTest, TestBulkWriteBatchSplittingLargeBaseCommandSize) { + ShardId shardId("shard"); + NamespaceString nss0 = NamespaceString::createNamespaceString_forTest("foo.bar"); + NamespaceString nss1 = NamespaceString::createNamespaceString_forTest("bar.foo"); + // Two different endpoints targeting the same shard for the two namespaces. + ShardEndpoint endpoint0( + shardId, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); + ShardEndpoint endpoint1( + shardId, + ShardVersionFactory::make(ChunkVersion({OID::gen(), Timestamp(2)}, {10, 11}), + boost::optional(boost::none)), + boost::none); + + std::vector> targeters; + targeters.push_back(initTargeterFullRange(nss0, endpoint0)); + targeters.push_back(initTargeterFullRange(nss1, endpoint1)); + + BulkWriteCommandRequest bigReq; + + // Create a ~15 MB let. + auto giantLet = BSON("a" << std::string(15077000, 'a')); + bigReq.setLet(giantLet); + + // Create a ~.1 MB document to insert. + auto insertDoc = BSON("x" << 1 << "b" << std::string(100000, 'b')); + std::vector ops; + for (auto i = 0; i < 17; i++) { + auto op = BulkWriteInsertOp(i % 2, insertDoc); + ops.push_back(op); + } + + bigReq.setLet(giantLet); + bigReq.setOps(ops); + bigReq.setNsInfo({NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)}); + bigReq.setDbName(DatabaseName::kAdmin); + + // Ensure we've built a request that's actual serialized size is slightly bigger than + // BSONObjMaxUserSize, which is the threshold we use to split batches. This should guarantee + // that the estimated size we calculate for a sub-batch containing all of these writes + // would also be bigger than BSONMaxUserObjSize and that we will split into multiple batches. + ASSERT(bigReq.toBSON(BSONObj()).objsize() > BSONObjMaxUserSize); + + BulkWriteOp bulkWriteOp(_opCtx, bigReq); + + TargetedBatchMap targeted; + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 1u); + // We shouldn't have targeted all of the writes yet. + auto targetedSoFar = targeted.begin()->second->getWrites().size(); + ASSERT(targetedSoFar < bigReq.getOps().size()); + targeted.clear(); + + ASSERT_OK(bulkWriteOp.target(targeters, false, targeted)); + ASSERT_EQUALS(targeted.size(), 1u); + auto remainingTargeted = targeted.begin()->second->getWrites().size(); + // We should have been able to target all the remaining writes in a second batch. + ASSERT_EQ(targetedSoFar + remainingTargeted, bigReq.getOps().size()); +} + /** * Mimics a sharding backend to test BulkWriteExec. */ @@ -517,20 +1377,48 @@ class BulkWriteExecTest : public ShardingTestFixture { BulkWriteExecTest() = default; ~BulkWriteExecTest() = default; + const ShardId kShardIdA = ShardId("shardA"); + const ShardId kShardIdB = ShardId("shardB"); + void setUp() override { ShardingTestFixture::setUp(); + configTargeter()->setFindHostReturnValue(HostAndPort("FakeConfigHost", 12345)); + + std::vector> remoteShards{ + {kShardIdA, HostAndPort(str::stream() << kShardIdA << ":123")}, + {kShardIdB, HostAndPort(str::stream() << kShardIdB << ":123")}, + }; + + std::vector shards; + + for (size_t i = 0; i < remoteShards.size(); i++) { + ShardType shardType; + shardType.setName(std::get<0>(remoteShards[i]).toString()); + shardType.setHost(std::get<1>(remoteShards[i]).toString()); + + shards.push_back(shardType); + + std::unique_ptr targeter( + std::make_unique()); + targeter->setConnectionStringReturnValue( + ConnectionString(std::get<1>(remoteShards[i]))); + targeter->setFindHostReturnValue(std::get<1>(remoteShards[i])); + + targeterFactory()->addTargeterToReturn(ConnectionString(std::get<1>(remoteShards[i])), + std::move(targeter)); + } + + setupShards(shards); } }; TEST_F(BulkWriteExecTest, RefreshTargetersOnTargetErrors) { - ShardId shardIdA("shardA"); - ShardId shardIdB("shardB"); - NamespaceString nss0("foo.bar"); - NamespaceString nss1("bar.foo"); + NamespaceString nss0 = NamespaceString::createNamespaceString_forTest("foo.bar"); + NamespaceString nss1 = NamespaceString::createNamespaceString_forTest("bar.foo"); ShardEndpoint endpoint0( - shardIdA, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); + kShardIdA, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); ShardEndpoint endpoint1( - shardIdB, + kShardIdB, ShardVersionFactory::make(ChunkVersion({OID::gen(), Timestamp(2)}, {10, 11}), boost::optional(boost::none)), boost::none); @@ -549,33 +1437,56 @@ TEST_F(BulkWriteExecTest, RefreshTargetersOnTargetErrors) { {BulkWriteInsertOp(0, BSON("x" << 1)), BulkWriteInsertOp(1, BSON("x" << 1))}, {NamespaceInfoEntry(nss0), NamespaceInfoEntry(nss1)}); - // Test unordered operations. Since only the first op is untargetable, the second op will - // succeed without errors. But bulk_write_exec::execute would retry on targeting errors and try - // to refresh the targeters upon targeting errors. - request.setOrdered(false); - auto replyItems = bulk_write_exec::execute(operationContext(), targeters, request); - ASSERT_EQUALS(replyItems.size(), 2u); - ASSERT_NOT_OK(replyItems[0].getStatus()); - ASSERT_OK(replyItems[1].getStatus()); - ASSERT_EQUALS(targeter0->getNumRefreshes(), 1); - ASSERT_EQUALS(targeter1->getNumRefreshes(), 1); - - // Test ordered operations. This is mostly the same as the test case above except that we should - // only return the first error for ordered operations. - request.setOrdered(true); - replyItems = bulk_write_exec::execute(operationContext(), targeters, request); - ASSERT_EQUALS(replyItems.size(), 1u); - ASSERT_NOT_OK(replyItems[0].getStatus()); - // We should have another refresh attempt. - ASSERT_EQUALS(targeter0->getNumRefreshes(), 2); - ASSERT_EQUALS(targeter1->getNumRefreshes(), 2); + LOGV2(7695300, "Sending an unordered request with untargetable first op and valid second op."); + auto future = launchAsync([&] { + // Test unordered operations. Since only the first op is untargetable, the second op will + // succeed without errors. But bulk_write_exec::execute would retry on targeting errors and + // try to refresh the targeters upon targeting errors. + request.setOrdered(false); + auto replyItems = bulk_write_exec::execute(operationContext(), targeters, request); + ASSERT_EQUALS(replyItems.size(), 2u); + ASSERT_NOT_OK(replyItems[0].getStatus()); + ASSERT_OK(replyItems[1].getStatus()); + ASSERT_EQUALS(targeter0->getNumRefreshes(), 1); + ASSERT_EQUALS(targeter1->getNumRefreshes(), 1); + }); + + // Mock a bulkWrite response to respond to the second op, which is valid. + onCommandForPoolExecutor([&](const executor::RemoteCommandRequest& request) { + LOGV2(7695301, + "Shard received a request, sending mock response.", + "request"_attr = request.toString()); + BulkWriteCommandReply reply; + reply.setCursor(BulkWriteCommandResponseCursor( + 0, // cursorId + std::vector{BulkWriteReplyItem(0)})); + reply.setNumErrors(0); + return reply.toBSON(); + }); + future.default_timed_get(); + + LOGV2(7695302, "Sending an ordered request with untargetable first op and valid second op."); + // This time there is no need to mock a response because when the first op's targeting fails, + // the entire operation is halted and so nothing is sent to the shards. + future = launchAsync([&] { + // Test ordered operations. This is mostly the same as the test case above except that we + // should only return the first error for ordered operations. + request.setOrdered(true); + auto replyItems = bulk_write_exec::execute(operationContext(), targeters, request); + ASSERT_EQUALS(replyItems.size(), 1u); + ASSERT_NOT_OK(replyItems[0].getStatus()); + // We should have another refresh attempt. + ASSERT_EQUALS(targeter0->getNumRefreshes(), 2); + ASSERT_EQUALS(targeter1->getNumRefreshes(), 2); + }); + + future.default_timed_get(); } TEST_F(BulkWriteExecTest, CollectionDroppedBeforeRefreshingTargeters) { - ShardId shardId("shardA"); - NamespaceString nss("foo.bar"); + NamespaceString nss = NamespaceString::createNamespaceString_forTest("foo.bar"); ShardEndpoint endpoint( - shardId, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); + kShardIdA, ShardVersionFactory::make(ChunkVersion::IGNORED(), boost::none), boost::none); // Mock targeter that throws StaleEpoch on refresh to mimic the collection being dropped. class StaleEpochMockNSTargeter : public MockNSTargeter { diff --git a/src/mongo/s/write_ops/write_op.cpp b/src/mongo/s/write_ops/write_op.cpp index 19ef1a3849148..de3f48a7c1620 100644 --- a/src/mongo/s/write_ops/write_op.cpp +++ b/src/mongo/s/write_ops/write_op.cpp @@ -29,8 +29,23 @@ #include "mongo/s/write_ops/write_op.h" +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/s/query_analysis_sampler_util.h" +#include "mongo/s/shard_version.h" #include "mongo/s/transaction_router.h" +#include "mongo/s/write_ops/batched_command_response.h" #include "mongo/util/assert_util.h" namespace mongo { @@ -71,7 +86,7 @@ write_ops::WriteError getFirstNonRetryableError(const std::vectorerror->getStatus().code()); }); - dassert(nonRetryableErr != errOps.end()); + invariant(nonRetryableErr != errOps.end()); return *(*nonRetryableErr)->error; } @@ -235,16 +250,6 @@ void WriteOp::_updateOpState() { void WriteOp::cancelWrites(const write_ops::WriteError* why) { invariant(_state == WriteOpState_Pending || _state == WriteOpState_Ready); - - for (auto& childOp : _childOps) { - if (childOp.state == WriteOpState_Pending) { - childOp.endpoint.reset(new ShardEndpoint(childOp.pendingWrite->endpoint)); - if (why) - childOp.error = *why; - childOp.state = WriteOpState_Cancelled; - } - } - _state = WriteOpState_Ready; _childOps.clear(); } diff --git a/src/mongo/s/write_ops/write_op.h b/src/mongo/s/write_ops/write_op.h index e6f268fcf5ecc..96a06d7d74df1 100644 --- a/src/mongo/s/write_ops/write_op.h +++ b/src/mongo/s/write_ops/write_op.h @@ -30,10 +30,22 @@ #pragma once #include +#include +#include +#include #include +#include +#include +#include + +#include "mongo/db/operation_context.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/shard_id.h" +#include "mongo/s/chunk_manager.h" #include "mongo/s/ns_targeter.h" #include "mongo/s/write_ops/batched_command_request.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -248,7 +260,12 @@ class TargetedWriteBatch { TargetedWriteBatch& operator=(const TargetedWriteBatch&) = delete; public: - TargetedWriteBatch(const ShardId& shardId) : _shardId(shardId) {} + /** + * baseCommandSizeBytes specifies an estimate of the size of the corresponding batch request + * command prior to adding any write ops to it. + */ + TargetedWriteBatch(const ShardId& shardId, const int baseCommandSizeBytes) + : _shardId(shardId), _estimatedSizeBytes(baseCommandSizeBytes) {} const ShardId& getShardId() const { return _shardId; @@ -279,9 +296,9 @@ class TargetedWriteBatch { // TargetedWrite*s are owned by the TargetedWriteBatch std::vector> _writes; - // Conservatively estimated size of the batch, for ensuring it doesn't grow past the maximum - // BSON size - int _estimatedSizeBytes{0}; + // Conservatively estimated size of the batch command, for ensuring it doesn't grow past the + // maximum BSON size. + int _estimatedSizeBytes; }; } // namespace mongo diff --git a/src/mongo/s/write_ops/write_op_test.cpp b/src/mongo/s/write_ops/write_op_test.cpp index 2a74f5d6d6a47..ce846cb2cd4a2 100644 --- a/src/mongo/s/write_ops/write_op_test.cpp +++ b/src/mongo/s/write_ops/write_op_test.cpp @@ -27,16 +27,41 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/record_id.h" +#include "mongo/db/service_context.h" #include "mongo/db/service_context_test_fixture.h" -#include "mongo/s/concurrency/locker_mongos_client_observer.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/storage/duplicate_key_error_info.h" +#include "mongo/s/chunk_version.h" +#include "mongo/s/database_version.h" +#include "mongo/s/index_version.h" #include "mongo/s/mock_ns_targeter.h" #include "mongo/s/session_catalog_router.h" +#include "mongo/s/shard_version.h" #include "mongo/s/shard_version_factory.h" +#include "mongo/s/stale_exception.h" #include "mongo/s/transaction_router.h" #include "mongo/s/write_ops/write_op.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { @@ -142,8 +167,6 @@ class WriteOpTest : public ServiceContextTest { } WriteOpTest() { - auto service = getServiceContext(); - service->registerClientObserver(std::make_unique()); _opCtxHolder = makeOperationContext(); _opCtx = _opCtxHolder.get(); } diff --git a/src/mongo/s/write_ops/write_without_shard_key_util.cpp b/src/mongo/s/write_ops/write_without_shard_key_util.cpp index d66c26d16df60..773fb44e3cc59 100644 --- a/src/mongo/s/write_ops/write_without_shard_key_util.cpp +++ b/src/mongo/s/write_ops/write_without_shard_key_util.cpp @@ -29,20 +29,62 @@ #include "mongo/s/write_ops/write_without_shard_key_util.h" +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/mutable/document.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/feature_flag.h" +#include "mongo/db/field_ref.h" +#include "mongo/db/field_ref_set.h" #include "mongo/db/internal_transactions_feature_flag_gen.h" +#include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/pipeline/expression_context.h" +#include "mongo/db/query/canonical_query.h" #include "mongo/db/query/collation/collation_index_key.h" #include "mongo/db/query/collation/collator_factory_interface.h" -#include "mongo/db/timeseries/timeseries_constants.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/resource_yielder.h" +#include "mongo/db/server_options.h" +#include "mongo/db/session/logical_session_id.h" +#include "mongo/db/storage/storage_parameters_gen.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/db/timeseries/timeseries_update_delete_util.h" +#include "mongo/db/timeseries/timeseries_write_util.h" #include "mongo/db/transaction/transaction_api.h" +#include "mongo/db/update/update_driver.h" #include "mongo/db/update/update_util.h" -#include "mongo/logv2/log.h" +#include "mongo/executor/inline_executor.h" +#include "mongo/executor/task_executor_pool.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/platform/compiler.h" +#include "mongo/rpc/get_status_from_command_result.h" #include "mongo/s/catalog_cache.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/cluster_commands_helpers.h" +#include "mongo/s/collection_routing_info_targeter.h" #include "mongo/s/grid.h" #include "mongo/s/request_types/cluster_commands_without_shard_key_gen.h" #include "mongo/s/shard_key_pattern_query_util.h" #include "mongo/s/transaction_router_resource_yielder.h" +#include "mongo/s/type_collection_common_types_gen.h" +#include "mongo/s/write_ops/batched_command_response.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/future.h" +#include "mongo/util/intrusive_counter.h" +#include "mongo/util/out_of_line_executor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kSharding @@ -50,54 +92,11 @@ namespace mongo { namespace write_without_shard_key { namespace { +MONGO_FAIL_POINT_DEFINE(skipUseTwoPhaseWriteProtocolCheck); + constexpr auto kIdFieldName = "_id"_sd; const FieldRef idFieldRef(kIdFieldName); -// Used to do query validation for the _id field. -const ShardKeyPattern kVirtualIdShardKey(BSON(kIdFieldName << 1)); - -/** - * This returns "does the query have an _id field" and "is the _id field querying for a direct - * value" e.g. _id : 3 and not _id : { $gt : 3 }. - */ -bool isExactIdQuery(OperationContext* opCtx, - const NamespaceString& nss, - const BSONObj query, - const BSONObj collation, - bool hasDefaultCollation) { - auto findCommand = std::make_unique(nss); - findCommand->setFilter(query); - if (!collation.isEmpty()) { - findCommand->setCollation(collation); - } - const auto cq = CanonicalQuery::canonicalize(opCtx, - std::move(findCommand), - false, /* isExplain */ - nullptr, - ExtensionsCallbackNoop(), - MatchExpressionParser::kAllowAllSpecialFeatures); - if (cq.isOK()) { - // Only returns a shard key iff a query has a full shard key with direct/equality matches on - // all shard key fields. - auto shardKey = extractShardKeyFromQuery(kVirtualIdShardKey, *cq.getValue()); - BSONElement idElt = shardKey["_id"]; - - if (!idElt) { - return false; - } - - if (CollationIndexKey::isCollatableType(idElt.type()) && !collation.isEmpty() && - !hasDefaultCollation) { - // The collation applies to the _id field, but the user specified a collation which - // doesn't match the collection default. - return false; - } - return true; - } - - return false; -} - bool shardKeyHasCollatableType(const BSONObj& shardKey) { for (BSONElement elt : shardKey) { if (CollationIndexKey::isCollatableType(elt.type())) { @@ -108,9 +107,13 @@ bool shardKeyHasCollatableType(const BSONObj& shardKey) { } } // namespace -BSONObj generateUpsertDocument(OperationContext* opCtx, const UpdateRequest& updateRequest) { - ExtensionsCallbackNoop extensionsCallback = ExtensionsCallbackNoop(); - ParsedUpdate parsedUpdate(opCtx, &updateRequest, extensionsCallback); +std::pair generateUpsertDocument( + OperationContext* opCtx, + const UpdateRequest& updateRequest, + boost::optional timeseriesOptions, + const StringData::ComparatorInterface* comparator) { + // We are only using this to parse the query for producing the upsert document. + ParsedUpdateForMongos parsedUpdate(opCtx, &updateRequest); uassertStatusOK(parsedUpdate.parseRequest()); const CanonicalQuery* canonicalQuery = @@ -124,7 +127,17 @@ BSONObj generateUpsertDocument(OperationContext* opCtx, const UpdateRequest& upd immutablePaths, parsedUpdate.getDriver()->getDocument()); - return parsedUpdate.getDriver()->getDocument().getObject(); + auto upsertDoc = parsedUpdate.getDriver()->getDocument().getObject(); + if (!timeseriesOptions) { + return {upsertDoc, BSONObj()}; + } + + tassert(7777500, + "Expected timeseries buckets collection namespace", + updateRequest.getNamespaceString().isTimeseriesBucketsCollection()); + auto upsertBucketObj = timeseries::makeBucketDocument( + std::vector{upsertDoc}, updateRequest.getNamespaceString(), *timeseriesOptions, comparator); + return {upsertBucketObj, upsertDoc}; } BSONObj constructUpsertResponse(BatchedCommandResponse& writeRes, @@ -177,59 +190,76 @@ bool useTwoPhaseProtocol(OperationContext* opCtx, bool isUpdateOrDelete, bool isUpsert, const BSONObj& query, - const BSONObj& collation) { + const BSONObj& collation, + const boost::optional& let, + const boost::optional& legacyRuntimeConstants) { + // For existing unittests that do not expect sharding utilities to be initialized, we can set + // this failpoint if we know the test will not use the two phase write protocol. if (!feature_flags::gFeatureFlagUpdateOneWithoutShardKey.isEnabled( - serverGlobalParams.featureCompatibility)) { + serverGlobalParams.featureCompatibility) || + MONGO_unlikely(skipUseTwoPhaseWriteProtocolCheck.shouldFail())) { return false; } - auto [cm, _] = + auto cri = uassertStatusOK(Grid::get(opCtx)->catalogCache()->getCollectionRoutingInfo(opCtx, nss)); // Unsharded collections always target the primary shard. - if (!cm.isSharded()) { + if (!cri.cm.isSharded()) { return false; } - // Check if the query has specified a different collation than the default collation. - auto collator = collation.isEmpty() - ? nullptr // If no collation is specified we return a nullptr signifying the simple - // collation. - : uassertStatusOK( - CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation)); - auto hasDefaultCollation = - CollatorInterface::collatorsMatch(collator.get(), cm.getDefaultCollator()); - - auto tsFields = cm.getTimeseriesFields(); + auto tsFields = cri.cm.getTimeseriesFields(); bool isTimeseries = tsFields.has_value(); // updateOne and deleteOne do not use the two phase protocol for single writes that specify // _id in their queries, unless a document is being upserted. An exact _id match requires // default collation if the _id value is a collatable type. if (isUpdateOrDelete && query.hasField("_id") && - isExactIdQuery(opCtx, nss, query, collation, hasDefaultCollation) && !isUpsert && - !isTimeseries) { + CollectionRoutingInfoTargeter::isExactIdQuery(opCtx, nss, query, collation, cri.cm) && + !isUpsert && !isTimeseries) { return false; } - BSONObj deleteQuery = query; - if (isTimeseries) { - auto expCtx = make_intrusive(opCtx, std::move(collator), nss); - deleteQuery = - timeseries::getBucketLevelPredicateForRouting(query, expCtx, tsFields->getMetaField()); - } - - auto shardKey = uassertStatusOK( - extractShardKeyFromBasicQuery(opCtx, nss, cm.getShardKeyPattern(), deleteQuery)); + auto expCtx = makeExpressionContextWithDefaultsForTargeter(opCtx, + nss, + collation, + boost::none, // explain + let, + legacyRuntimeConstants); + + bool arbitraryTimeseriesWritesEnabled = feature_flags::gTimeseriesDeletesSupport.isEnabled( + serverGlobalParams.featureCompatibility) || + feature_flags::gTimeseriesUpdatesSupport.isEnabled(serverGlobalParams.featureCompatibility); + auto shardKey = uassertStatusOK(extractShardKeyFromBasicQueryWithContext( + expCtx, + cri.cm.getShardKeyPattern(), + !isTimeseries + ? query + : timeseries::getBucketLevelPredicateForRouting(query, + expCtx, + tsFields->getTimeseriesOptions(), + arbitraryTimeseriesWritesEnabled))); // 'shardKey' will only be populated only if a full equality shard key is extracted. if (shardKey.isEmpty()) { return true; } else { - // If the default collection collation is not used and any field of the shard key is a - // collatable type, then we will use the two phase write protocol since we cannot target - // directly to a shard. - if (!hasDefaultCollation && shardKeyHasCollatableType(shardKey)) { + // Check if the query has specified a different collation than the default collation. + auto hasDefaultCollation = [&] { + if (collation.isEmpty()) { + return true; + } + auto collator = uassertStatusOK( + CollatorFactoryInterface::get(opCtx->getServiceContext())->makeFromBSON(collation)); + return CollatorInterface::collatorsMatch(collator.get(), cri.cm.getDefaultCollator()); + }(); + + // If the default collection collation is not used or the default collation is not the + // simple collation and any field of the shard key is a collatable type, then we will use + // the two phase write protocol since we cannot target directly to a shard. + if ((!hasDefaultCollation || cri.cm.getDefaultCollator()) && + shardKeyHasCollatableType(shardKey)) { return true; } else { return false; @@ -260,13 +290,9 @@ StatusWith runTwoPhaseWriteProtocol(Operati auto& executor = Grid::get(opCtx)->getExecutorPool()->getFixedExecutor(); auto inlineExecutor = std::make_shared(); - auto sleepInlineExecutor = inlineExecutor->getSleepableExecutor(executor); - auto txn = - txn_api::SyncTransactionWithRetries(opCtx, - sleepInlineExecutor, - TransactionRouterResourceYielder::makeForLocalHandoff(), - inlineExecutor); + auto txn = txn_api::SyncTransactionWithRetries( + opCtx, executor, TransactionRouterResourceYielder::makeForLocalHandoff(), inlineExecutor); auto sharedBlock = std::make_shared(nss, cmdObj); auto swResult = txn.runNoThrow( @@ -297,11 +323,13 @@ StatusWith runTwoPhaseWriteProtocol(Operati auto writeRes = txnClient.runCRUDOpSync(insertRequest, std::vector{kUninitializedStmtId}); - auto upsertResponse = - constructUpsertResponse(writeRes, - queryResponse.getTargetDoc().get(), - sharedBlock->cmdObj.firstElementFieldNameStringData(), - sharedBlock->cmdObj.getBoolField("new")); + auto upsertResponse = constructUpsertResponse( + writeRes, + queryResponse.getUserUpsertDocForTimeseries() + ? queryResponse.getUserUpsertDocForTimeseries().get() + : queryResponse.getTargetDoc().get(), + sharedBlock->cmdObj.firstElementFieldNameStringData(), + sharedBlock->cmdObj.getBoolField("new")); sharedBlock->clusterWriteResponse = ClusterWriteWithoutShardKeyResponse::parseOwned( IDLParserContext("_clusterWriteWithoutShardKeyResponse"), @@ -339,5 +367,92 @@ StatusWith runTwoPhaseWriteProtocol(Operati return StatusWith(swResult.getStatus()); } } + +BSONObj generateExplainResponseForTwoPhaseWriteProtocol( + const BSONObj& clusterQueryWithoutShardKeyExplainObj, + const BSONObj& clusterWriteWithoutShardKeyExplainObj) { + // To express the two phase nature of the two phase write protocol, we use the output of the + // 'Read Phase' explain as the 'inputStage' of the 'Write Phase' explain for both queryPlanner + // and executionStats sections. + // + // An example output would look like: + + // "queryPlanner" : { + // "winningPlan" : { + // "stage" : "SHARD_WRITE", + // ... + // “inputStage”: { + // queryPlanner: { + // winningPlan: { + // stage: "SHARD_MERGE", + // ... + // + // } + // } + // } + // } + // } + // + // executionStats : { + // "executionStages" : { + // "stage" : "SHARD_WRITE", + // ... + // }, + // "inputStage" : { + // "stage" : "SHARD_MERGE", + // ... + // } + // + // } + // ... other explain result fields ... + + auto queryPlannerOutput = [&] { + auto clusterQueryWithoutShardKeyQueryPlannerObj = + clusterQueryWithoutShardKeyExplainObj.getObjectField("queryPlanner"); + auto clusterWriteWithoutShardKeyQueryPlannerObj = + clusterWriteWithoutShardKeyExplainObj.getObjectField("queryPlanner"); + + auto winningPlan = clusterWriteWithoutShardKeyQueryPlannerObj.getObjectField("winningPlan"); + BSONObjBuilder newWinningPlanBuilder(winningPlan); + newWinningPlanBuilder.appendObject("inputStage", + clusterQueryWithoutShardKeyQueryPlannerObj.objdata()); + auto newWinningPlan = newWinningPlanBuilder.obj(); + + auto queryPlannerObjNoWinningPlan = + clusterWriteWithoutShardKeyQueryPlannerObj.removeField("winningPlan"); + BSONObjBuilder newQueryPlannerBuilder(queryPlannerObjNoWinningPlan); + newQueryPlannerBuilder.appendObject("winningPlan", newWinningPlan.objdata()); + return newQueryPlannerBuilder.obj(); + }(); + + auto executionStatsOutput = [&] { + auto clusterQueryWithoutShardKeyExecutionStatsObj = + clusterQueryWithoutShardKeyExplainObj.getObjectField("executionStats"); + auto clusterWriteWithoutShardKeyExecutionStatsObj = + clusterWriteWithoutShardKeyExplainObj.getObjectField("executionStats"); + + if (clusterQueryWithoutShardKeyExecutionStatsObj.isEmpty() && + clusterWriteWithoutShardKeyExecutionStatsObj.isEmpty()) { + return BSONObj(); + } + + BSONObjBuilder newExecutionStatsBuilder(clusterWriteWithoutShardKeyExecutionStatsObj); + newExecutionStatsBuilder.appendObject( + "inputStage", clusterQueryWithoutShardKeyExecutionStatsObj.objdata()); + return newExecutionStatsBuilder.obj(); + }(); + + BSONObjBuilder explainOutputBuilder; + if (!queryPlannerOutput.isEmpty()) { + explainOutputBuilder.appendObject("queryPlanner", queryPlannerOutput.objdata()); + } + if (!executionStatsOutput.isEmpty()) { + explainOutputBuilder.appendObject("executionStats", executionStatsOutput.objdata()); + } + // This step is to get 'command', 'serverInfo', and 'serverParamter' fields to return in the + // final explain output. + explainOutputBuilder.appendElementsUnique(clusterWriteWithoutShardKeyExplainObj); + return explainOutputBuilder.obj(); +} } // namespace write_without_shard_key } // namespace mongo diff --git a/src/mongo/s/write_ops/write_without_shard_key_util.h b/src/mongo/s/write_ops/write_without_shard_key_util.h index ed4ab44e8594f..675b6f6953dae 100644 --- a/src/mongo/s/write_ops/write_without_shard_key_util.h +++ b/src/mongo/s/write_ops/write_without_shard_key_util.h @@ -29,19 +29,37 @@ #pragma once +#include +#include + +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/ops/parsed_update.h" +#include "mongo/db/pipeline/legacy_runtime_constants_gen.h" +#include "mongo/db/timeseries/timeseries_gen.h" #include "mongo/s/request_types/cluster_commands_without_shard_key_gen.h" namespace mongo { namespace write_without_shard_key { +// Used as a "dummy" target document for constructing explain responses for single writes without +// shard key. +const BSONObj targetDocForExplain = BSON("_id" + << "WriteWithoutShardKey"); + /** * Uses updateDriver to produce the document to insert. Only use when {upsert: true}. */ -BSONObj generateUpsertDocument(OperationContext* opCtx, const UpdateRequest& updateRequest); +std::pair generateUpsertDocument( + OperationContext* opCtx, + const UpdateRequest& updateRequest, + boost::optional timeseriesOptions, + const StringData::ComparatorInterface* comparator); /** * Returns true if we can use the two phase protocol to complete a single write without shard @@ -52,7 +70,9 @@ bool useTwoPhaseProtocol(OperationContext* opCtx, bool isUpdateOrDelete, bool isUpsert, const BSONObj& query, - const BSONObj& collation); + const BSONObj& collation, + const boost::optional& let, + const boost::optional& legacyRuntimeConstants); /** * Runs and returns the result of running a write without a shard key using the two phase protocol. @@ -73,6 +93,13 @@ bool useTwoPhaseProtocol(OperationContext* opCtx, StatusWith runTwoPhaseWriteProtocol(OperationContext* opCtx, NamespaceString nss, BSONObj cmdObj); +/** + * Return a formatted 'explain' response that describes the work done in the two phase write + * protocol. + **/ +BSONObj generateExplainResponseForTwoPhaseWriteProtocol( + const BSONObj& clusterQueryWithoutShardKeyExplainObj, + const BSONObj& clusterWriteWithoutShardKeyExplainObj); } // namespace write_without_shard_key } // namespace mongo diff --git a/src/mongo/s/write_ops/write_without_shard_key_util_test.cpp b/src/mongo/s/write_ops/write_without_shard_key_util_test.cpp index 363fbc9e19b3b..26c24152c8ddc 100644 --- a/src/mongo/s/write_ops/write_without_shard_key_util_test.cpp +++ b/src/mongo/s/write_ops/write_without_shard_key_util_test.cpp @@ -27,14 +27,36 @@ * it in the license file. */ +#include +#include +#include +#include + +#include + +#include "mongo/bson/json.h" +#include "mongo/db/cursor_id.h" +#include "mongo/db/feature_flag.h" #include "mongo/db/ops/update_request.h" +#include "mongo/db/ops/write_ops_gen.h" +#include "mongo/db/ops/write_ops_parsers.h" +#include "mongo/db/query/collation/collator_interface.h" +#include "mongo/db/query/cursor_response.h" +#include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/executor/network_test_env.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/idl/server_parameter_test_util.h" -#include "mongo/logv2/log.h" +#include "mongo/s/catalog/type_collection.h" +#include "mongo/s/catalog_cache.h" #include "mongo/s/catalog_cache_test_fixture.h" -#include "mongo/s/concurrency/locker_mongos_client_observer.h" +#include "mongo/s/chunk_manager.h" +#include "mongo/s/shard_key_pattern.h" #include "mongo/s/sharding_feature_flags_gen.h" #include "mongo/s/write_ops/write_without_shard_key_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -86,20 +108,14 @@ class UnshardedCollectionTest : public CatalogCacheTestFixture { }; class ProduceUpsertDocumentTest : public ServiceContextTest { -public: - void setUp() override { - ServiceContextTest::setUp(); - auto service = getServiceContext(); - service->registerClientObserver(std::make_unique()); - _opCtx = makeOperationContext(); - } +protected: + ProduceUpsertDocumentTest() = default; OperationContext* getOpCtx() const { return _opCtx.get(); } -protected: - ServiceContext::UniqueOperationContext _opCtx; + ServiceContext::UniqueOperationContext _opCtx{makeOperationContext()}; }; TEST_F(WriteWithoutShardKeyUtilTest, WriteQueryContainingFullShardKeyCanTargetSingleDocument) { @@ -111,15 +127,20 @@ TEST_F(WriteWithoutShardKeyUtilTest, WriteQueryContainingFullShardKeyCanTargetSi true /* isUpdateOrDelete */, false /* isUpsert */, BSON("a" << 1 << "b" << 1), - {} /* collation */); + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, false); - useTwoPhaseProtocol = write_without_shard_key::useTwoPhaseProtocol(getOpCtx(), - kNss, - false /* isUpdateOrDelete */, - false /* isUpsert */, - BSON("a" << 1 << "b" << 1), - {} /* collation */); + useTwoPhaseProtocol = + write_without_shard_key::useTwoPhaseProtocol(getOpCtx(), + kNss, + false /* isUpdateOrDelete */, + false /* isUpsert */, + BSON("a" << 1 << "b" << 1), + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, false); } @@ -133,15 +154,20 @@ TEST_F(WriteWithoutShardKeyUtilTest, true /* isUpdateOrDelete */, false /* isUpsert */, BSON("a" << 1), - {} /* collation */); + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, true); - useTwoPhaseProtocol = write_without_shard_key::useTwoPhaseProtocol(getOpCtx(), - kNss, - false /* isUpdateOrDelete */, - false /* isUpsert */, - BSON("a" << 1), - {} /* collation */); + useTwoPhaseProtocol = + write_without_shard_key::useTwoPhaseProtocol(getOpCtx(), + kNss, + false /* isUpdateOrDelete */, + false /* isUpsert */, + BSON("a" << 1), + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, true); } @@ -155,7 +181,9 @@ TEST_F(WriteWithoutShardKeyUtilTest, true /* isUpdateOrDelete */, false /* isUpsert */, BSON("_id" << 1), - {} /* collation */); + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, false); } @@ -169,15 +197,20 @@ TEST_F(WriteWithoutShardKeyUtilTest, true /* isUpdateOrDelete */, false /* isUpsert */, BSON("x" << 1), - {} /* collation */); + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, true); - useTwoPhaseProtocol = write_without_shard_key::useTwoPhaseProtocol(getOpCtx(), - kNss, - false /* isUpdateOrDelete */, - false /* isUpsert */, - BSON("x" << 1), - {} /* collation */); + useTwoPhaseProtocol = + write_without_shard_key::useTwoPhaseProtocol(getOpCtx(), + kNss, + false /* isUpdateOrDelete */, + false /* isUpsert */, + BSON("x" << 1), + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, true); } @@ -190,7 +223,10 @@ TEST_F(WriteWithoutShardKeyUtilTest, FindAndModifyQueryWithOnlyIdMustUseTwoPhase false /* isUpdateOrDelete */, false /* isUpsert */, BSON("_id" << 1), - {} /* collation */); + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); + ; ASSERT_EQ(useTwoPhaseProtocol, true); } @@ -203,7 +239,9 @@ TEST_F(WriteWithoutShardKeyUtilTest, FindAndModifyQueryWithoutShardKeyMustUseTwo false /* isUpdateOrDelete */, false /* isUpsert */, BSON("x" << 1), - {} /* collation */); + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, true); } @@ -216,7 +254,9 @@ TEST_F(WriteWithoutShardKeyUtilTest, QueryWithFeatureFlagDisabledDoesNotUseTwoPh false /* isUpdateOrDelete */, false /* isUpsert */, BSON("x" << 1), - {} /* collation */); + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, false); } @@ -249,7 +289,9 @@ TEST_F(UnshardedCollectionTest, UnshardedCollectionDoesNotUseTwoPhaseProtocol) { true /* isUpdateOrDelete */, false /* isUpsert */, BSON("x" << 1), - {} /* collation */); + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, false); } @@ -267,7 +309,9 @@ TEST_F(WriteWithoutShardKeyUtilTest, << "b" << "b"), BSON("collation" - << "lowercase") /* collation */); + << "lowercase") /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, true); } @@ -282,7 +326,9 @@ TEST_F(WriteWithoutShardKeyUtilTest, false /* isUpsert */, BSON("a" << 1 << "b" << 1), BSON("collation" - << "lowercase") /* collation */); + << "lowercase") /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, false); } @@ -298,7 +344,9 @@ TEST_F(WriteWithoutShardKeyUtilTest, BSON("_id" << "hello"), BSON("collation" - << "lowercase") /* collation */); + << "lowercase") /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, true); } @@ -313,7 +361,9 @@ TEST_F(WriteWithoutShardKeyUtilTest, false /* isUpsert */, BSON("_id" << 1), BSON("collation" - << "lowercase") /* collation */); + << "lowercase") /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, false); } @@ -326,7 +376,9 @@ TEST_F(WriteWithoutShardKeyUtilTest, WriteQueryWithOnlyIdAndUpsertUsesTwoPhasePr true /* isUpdateOrDelete */, true /* isUpsert */, BSON("_id" << BSON("$eq" << 1)), - {} /* collation */); + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, true); } @@ -340,7 +392,9 @@ TEST_F(WriteWithoutShardKeyUtilTest, true /* isUpdateOrDelete */, true /* isUpsert */, BSON("a" << 1 << "_id" << BSON("$eq" << 1)), - {} /* collation */); + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, true); } @@ -354,7 +408,9 @@ TEST_F(WriteWithoutShardKeyUtilTest, true /* isUpdateOrDelete */, false /* isUpsert */, BSON("_id" << BSON("$gt" << 1)), - {} /* collation */); + {} /* collation */, + boost::none /* let */, + boost::none /* legacyRuntimeConstants */); ASSERT_EQ(useTwoPhaseProtocol, true); } @@ -367,7 +423,8 @@ TEST_F(ProduceUpsertDocumentTest, produceUpsertDocumentUsingReplacementUpdate) { updateCommandRequest.setUpdates({entry}); UpdateRequest updateRequest(updateCommandRequest.getUpdates().front()); - auto doc = write_without_shard_key::generateUpsertDocument(getOpCtx(), updateRequest); + auto [doc, _] = write_without_shard_key::generateUpsertDocument( + getOpCtx(), updateRequest, /*timeseriesOptions=*/boost::none, /*comparator=*/nullptr); ASSERT_BSONOBJ_EQ(doc, fromjson("{ _id: 3, x: 2 }")); } @@ -387,7 +444,8 @@ TEST_F(ProduceUpsertDocumentTest, produceUpsertDocumentUsingLetConstantAndPipeli updateCommandRequest.setUpdates({entry}); UpdateRequest updateRequest(updateCommandRequest.getUpdates().front()); - auto doc = write_without_shard_key::generateUpsertDocument(getOpCtx(), updateRequest); + auto [doc, _] = write_without_shard_key::generateUpsertDocument( + getOpCtx(), updateRequest, /*timeseriesOptions=*/boost::none, /*comparator=*/nullptr); ASSERT_BSONOBJ_EQ(doc, fromjson("{ _id: 4, x: 'foo', y: 3 }")); } @@ -406,7 +464,8 @@ TEST_F(ProduceUpsertDocumentTest, produceUpsertDocumentUsingArrayFilterAndModifi updateCommandRequest.setUpdates({entry}); UpdateRequest updateRequest(updateCommandRequest.getUpdates().front()); - auto doc = write_without_shard_key::generateUpsertDocument(getOpCtx(), updateRequest); + auto [doc, _] = write_without_shard_key::generateUpsertDocument( + getOpCtx(), updateRequest, /*timeseriesOptions=*/boost::none, /*comparator=*/nullptr); ASSERT_BSONOBJ_EQ(doc, fromjson("{ _id: 4, x: [ { a: 93 } ] }")); } @@ -431,7 +490,8 @@ TEST_F(ProduceUpsertDocumentTest, produceUpsertDocumentUsingCollation) { updateCommandRequest.setUpdates({entry}); UpdateRequest updateRequest(updateCommandRequest.getUpdates().front()); - auto doc = write_without_shard_key::generateUpsertDocument(getOpCtx(), updateRequest); + auto [doc, _] = write_without_shard_key::generateUpsertDocument( + getOpCtx(), updateRequest, /*timeseriesOptions=*/boost::none, /*comparator=*/nullptr); ASSERT_BSONOBJ_EQ(doc, fromjson("{ _id: 4, x: [ { a: 'FOO' }, { a: 'FOO' }, { a: 'foo' } ] }")); } diff --git a/src/mongo/scripting/SConscript b/src/mongo/scripting/SConscript index fb5b8ccc41e4e..c9b283bd82235 100644 --- a/src/mongo/scripting/SConscript +++ b/src/mongo/scripting/SConscript @@ -7,7 +7,14 @@ Import([ 'serverJs', ]) -env.Library( +scripting_common_env = env.Clone() +# TODO(SERVER-77205): Review and Possibly Remove '-Wno-deprecated' After Mozjs Update +scripting_common_env.Append( + CXXFLAGS=[] if scripting_common_env.TargetOSIs('windows') else [ + '-Wno-deprecated', + ], ) + +scripting_common_env.Library( target='scripting_common', source=[ 'deadline_monitor.cpp', @@ -54,8 +61,9 @@ if jsEngine: # TODO(SERVER-59992): Remove -Wno-class-memacces where possible. '-Wno-unknown-warning-option', '-Wno-class-memaccess', + # TODO(SERVER-77205): Review and Possibly Remove '-Wno-deprecated' After Mozjs Update + '-Wno-deprecated', ], ) - scriptingEnv.InjectMozJS() scriptingEnv.JSHeader( diff --git a/src/mongo/scripting/bson_template_evaluator.cpp b/src/mongo/scripting/bson_template_evaluator.cpp index 37a1bffec8497..846e203d54f6c 100644 --- a/src/mongo/scripting/bson_template_evaluator.cpp +++ b/src/mongo/scripting/bson_template_evaluator.cpp @@ -29,11 +29,15 @@ #include "mongo/scripting/bson_template_evaluator.h" -#include -#include +#include +#include +#include #include "mongo/base/static_assert.h" -#include "mongo/util/str.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder_fwd.h" +#include "mongo/util/duration.h" +#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/scripting/bson_template_evaluator.h b/src/mongo/scripting/bson_template_evaluator.h index ff1de6f420c07..0687aded1710a 100644 --- a/src/mongo/scripting/bson_template_evaluator.h +++ b/src/mongo/scripting/bson_template_evaluator.h @@ -44,10 +44,15 @@ */ #pragma once +#include +#include #include #include #include +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/jsobj.h" #include "mongo/platform/random.h" diff --git a/src/mongo/scripting/bson_template_evaluator_test.cpp b/src/mongo/scripting/bson_template_evaluator_test.cpp index 6309fb79f35a1..9016abc817da6 100644 --- a/src/mongo/scripting/bson_template_evaluator_test.cpp +++ b/src/mongo/scripting/bson_template_evaluator_test.cpp @@ -27,9 +27,14 @@ * it in the license file. */ -#include "mongo/db/jsobj.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/scripting/bson_template_evaluator.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/scripting/dbdirectclient_factory.cpp b/src/mongo/scripting/dbdirectclient_factory.cpp index 5a3b5a89e2845..49517e15235cd 100644 --- a/src/mongo/scripting/dbdirectclient_factory.cpp +++ b/src/mongo/scripting/dbdirectclient_factory.cpp @@ -27,13 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/scripting/dbdirectclient_factory.h" - -#include "mongo/db/dbdirectclient.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/scripting/dbdirectclient_factory.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" namespace mongo { diff --git a/src/mongo/scripting/dbdirectclient_factory.h b/src/mongo/scripting/dbdirectclient_factory.h index eed8ec5dcf53f..04260905373ca 100644 --- a/src/mongo/scripting/dbdirectclient_factory.h +++ b/src/mongo/scripting/dbdirectclient_factory.h @@ -32,9 +32,12 @@ #include #include +#include "mongo/client/dbclient_base.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" + namespace mongo { -class DBClientBase; class OperationContext; class ServiceContext; diff --git a/src/mongo/scripting/deadline_monitor.cpp b/src/mongo/scripting/deadline_monitor.cpp index 964ae42ebfc85..3c5682d392830 100644 --- a/src/mongo/scripting/deadline_monitor.cpp +++ b/src/mongo/scripting/deadline_monitor.cpp @@ -27,9 +27,8 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/scripting/deadline_monitor.h" +#include "mongo/platform/atomic_word.h" #include "mongo/scripting/deadline_monitor_gen.h" diff --git a/src/mongo/scripting/deadline_monitor.h b/src/mongo/scripting/deadline_monitor.h index 40c8217cb07b5..eb4cbc7e1e6b7 100644 --- a/src/mongo/scripting/deadline_monitor.h +++ b/src/mongo/scripting/deadline_monitor.h @@ -28,7 +28,10 @@ */ #pragma once +// IWYU pragma: no_include "cxxabi.h" #include +#include +#include #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" @@ -37,6 +40,8 @@ #include "mongo/stdx/unordered_map.h" #include "mongo/util/concurrency/idle_thread_block.h" #include "mongo/util/concurrency/mutex.h" +#include "mongo/util/concurrency/thread_name.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/scripting/deadline_monitor_test.cpp b/src/mongo/scripting/deadline_monitor_test.cpp index 8024bc8982e2e..05ba0d4646d52 100644 --- a/src/mongo/scripting/deadline_monitor_test.cpp +++ b/src/mongo/scripting/deadline_monitor_test.cpp @@ -29,12 +29,14 @@ // DeadlineMonitor unit tests -#include "mongo/platform/basic.h" - -#include "mongo/scripting/deadline_monitor.h" +#include +#include -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/scripting/deadline_monitor.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { diff --git a/src/mongo/scripting/engine.cpp b/src/mongo/scripting/engine.cpp index 7faf89e5ae883..b9888c117c160 100644 --- a/src/mongo/scripting/engine.cpp +++ b/src/mongo/scripting/engine.cpp @@ -28,24 +28,47 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/engine.h" - #include +#include #include - +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" #include "mongo/client/dbclient_base.h" #include "mongo/client/dbclient_cursor.h" +#include "mongo/client/read_preference.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/service_context.h" +#include "mongo/db/storage/recovery_unit.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" +#include "mongo/platform/compiler.h" +#include "mongo/platform/mutex.h" #include "mongo/scripting/dbdirectclient_factory.h" +#include "mongo/scripting/engine.h" +#include "mongo/stdx/thread.h" #include "mongo/util/ctype.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" #include "mongo/util/file.h" -#include "mongo/util/text.h" +#include "mongo/util/str.h" +#include "mongo/util/text.h" // IWYU pragma: keep #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/scripting/engine.h b/src/mongo/scripting/engine.h index cb6fbd3fbdafb..cdd06b6342454 100644 --- a/src/mongo/scripting/engine.h +++ b/src/mongo/scripting/engine.h @@ -29,10 +29,29 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + #include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/db/jsobj.h" #include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" +#include "mongo/platform/decimal128.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/time_support.h" namespace mongo { typedef unsigned long long ScriptingFunction; diff --git a/src/mongo/scripting/engine_none.cpp b/src/mongo/scripting/engine_none.cpp index 8a44a523739fe..42b02fe4a24d5 100644 --- a/src/mongo/scripting/engine_none.cpp +++ b/src/mongo/scripting/engine_none.cpp @@ -27,6 +27,8 @@ * it in the license file. */ +#include + #include "mongo/scripting/engine.h" namespace mongo { diff --git a/src/mongo/scripting/jsexception.cpp b/src/mongo/scripting/jsexception.cpp index ad76b21d84658..c39ac2abf6273 100644 --- a/src/mongo/scripting/jsexception.cpp +++ b/src/mongo/scripting/jsexception.cpp @@ -27,11 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/scripting/jsexception.h" - -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" namespace mongo { diff --git a/src/mongo/scripting/jsexception.h b/src/mongo/scripting/jsexception.h index 0b7cdb6cb23bf..438f2eaa66562 100644 --- a/src/mongo/scripting/jsexception.h +++ b/src/mongo/scripting/jsexception.h @@ -29,11 +29,16 @@ #pragma once +#include #include +#include + +#include #include "mongo/base/error_codes.h" #include "mongo/base/error_extra_info.h" #include "mongo/base/status.h" +#include "mongo/util/assert_util_core.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/PosixNSPR.cpp b/src/mongo/scripting/mozjs/PosixNSPR.cpp index 77f0aedd07f8f..2e98911a8fb80 100644 --- a/src/mongo/scripting/mozjs/PosixNSPR.cpp +++ b/src/mongo/scripting/mozjs/PosixNSPR.cpp @@ -11,18 +11,11 @@ * entrypoints. */ -#include "mongo/platform/basic.h" - -#include -#include +#include #include -#include "mongo/platform/mutex.h" -#include "mongo/stdx/chrono.h" -#include "mongo/stdx/condition_variable.h" -#include "mongo/stdx/thread.h" -#include "mongo/util/concurrency/thread_name.h" -#include "mongo/util/time_support.h" +#include +#include #define MONGO_MOZ_UNIMPLEMENTED(ReturnType, funcName, ...) \ ReturnType funcName(__VA_ARGS__) { \ diff --git a/src/mongo/scripting/mozjs/base.cpp b/src/mongo/scripting/mozjs/base.cpp index 7fe0081f20701..f2cdcbe7a2b8a 100644 --- a/src/mongo/scripting/mozjs/base.cpp +++ b/src/mongo/scripting/mozjs/base.cpp @@ -27,7 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include + +#include +#include +#include +#include +#include +#include #include "mongo/scripting/mozjs/base.h" diff --git a/src/mongo/scripting/mozjs/base.h b/src/mongo/scripting/mozjs/base.h index 8a3916942f3ee..79016facce17e 100644 --- a/src/mongo/scripting/mozjs/base.h +++ b/src/mongo/scripting/mozjs/base.h @@ -29,6 +29,12 @@ #pragma once +#include +#include +#include +#include +#include +#include #include namespace mongo { diff --git a/src/mongo/scripting/mozjs/bindata.cpp b/src/mongo/scripting/mozjs/bindata.cpp index cf34248de5643..c19caae44723c 100644 --- a/src/mongo/scripting/mozjs/bindata.cpp +++ b/src/mongo/scripting/mozjs/bindata.cpp @@ -27,21 +27,37 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/bindata.h" - +#include +#include #include #include +#include +#include +#include #include - +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/scripting/mozjs/bindata.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/internedstring.h" #include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" #include "mongo/util/base64.h" #include "mongo/util/hex.h" #include "mongo/util/str.h" diff --git a/src/mongo/scripting/mozjs/bindata.h b/src/mongo/scripting/mozjs/bindata.h index eafecc602e925..44fab517ec0a6 100644 --- a/src/mongo/scripting/mozjs/bindata.h +++ b/src/mongo/scripting/mozjs/bindata.h @@ -29,6 +29,12 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/bson.cpp b/src/mongo/scripting/mozjs/bson.cpp index 69a892a4b394d..3143fe0c13866 100644 --- a/src/mongo/scripting/mozjs/bson.cpp +++ b/src/mongo/scripting/mozjs/bson.cpp @@ -26,21 +26,37 @@ * exception statement from all source files in the program, then also delete * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/bson.h" - -#include +#include +#include +#include +#include #include +#include #include -#include - +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bson_comparator_interface_base.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj_comparator_interface.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/scripting/mozjs/bson.h" #include "mongo/scripting/mozjs/idwrapper.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/internedstring.h" +#include "mongo/scripting/mozjs/jsstringwrapper.h" #include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" +#include "mongo/util/assert_util.h" #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/bson.h b/src/mongo/scripting/mozjs/bson.h index 868c40173c8b1..02c825a06adc6 100644 --- a/src/mongo/scripting/mozjs/bson.h +++ b/src/mongo/scripting/mozjs/bson.h @@ -29,9 +29,14 @@ #pragma once +#include +#include +#include #include +#include "mongo/bson/bsonobj.h" #include "mongo/db/jsobj.h" +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/code.cpp b/src/mongo/scripting/mozjs/code.cpp index 2744558dffec6..440ff2c779959 100644 --- a/src/mongo/scripting/mozjs/code.cpp +++ b/src/mongo/scripting/mozjs/code.cpp @@ -27,17 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/scripting/mozjs/code.h" - -#include +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/scripting/mozjs/code.h" #include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/internedstring.h" #include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" -#include "mongo/scripting/mozjs/valuewriter.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/code.h b/src/mongo/scripting/mozjs/code.h index c3c8b770142af..64525a6457519 100644 --- a/src/mongo/scripting/mozjs/code.h +++ b/src/mongo/scripting/mozjs/code.h @@ -29,6 +29,11 @@ #pragma once +#include +#include +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/countdownlatch.cpp b/src/mongo/scripting/mozjs/countdownlatch.cpp index 1e3974b89591d..79a7adf9d0ad4 100644 --- a/src/mongo/scripting/mozjs/countdownlatch.cpp +++ b/src/mongo/scripting/mozjs/countdownlatch.cpp @@ -27,18 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/countdownlatch.h" - +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include #include +#include +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/platform/mutex.h" -#include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/countdownlatch.h" #include "mongo/scripting/mozjs/objectwrapper.h" -#include "mongo/scripting/mozjs/valuewriter.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/countdownlatch.h b/src/mongo/scripting/mozjs/countdownlatch.h index f97166c607a72..653de38d21ce2 100644 --- a/src/mongo/scripting/mozjs/countdownlatch.h +++ b/src/mongo/scripting/mozjs/countdownlatch.h @@ -29,6 +29,10 @@ #pragma once +#include +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/cursor.cpp b/src/mongo/scripting/mozjs/cursor.cpp index 9a019b96be287..50f7b13f24366 100644 --- a/src/mongo/scripting/mozjs/cursor.cpp +++ b/src/mongo/scripting/mozjs/cursor.cpp @@ -27,18 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include #include +#include -#include "mongo/scripting/mozjs/cursor.h" +#include +#include -#include "mongo/scripting/mozjs/bson.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/scripting/mozjs/cursor.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/internedstring.h" #include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/cursor.h b/src/mongo/scripting/mozjs/cursor.h index 287182a79c354..68573962675aa 100644 --- a/src/mongo/scripting/mozjs/cursor.h +++ b/src/mongo/scripting/mozjs/cursor.h @@ -29,7 +29,16 @@ #pragma once +#include +#include + +#include +#include +#include + #include "mongo/client/dbclient_cursor.h" +#include "mongo/scripting/engine.h" +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/cursor_handle.cpp b/src/mongo/scripting/mozjs/cursor_handle.cpp index 74bfd6c4dcf88..68cd428a660ac 100644 --- a/src/mongo/scripting/mozjs/cursor_handle.cpp +++ b/src/mongo/scripting/mozjs/cursor_handle.cpp @@ -28,16 +28,22 @@ */ -#include "mongo/platform/basic.h" - +#include #include +#include + +#include +#include #include "mongo/client/dbclient_base.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/scripting/mozjs/cursor_handle.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/scripting_util_gen.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/scripting/mozjs/cursor_handle.h b/src/mongo/scripting/mozjs/cursor_handle.h index eb28e206d224f..d15add73dd930 100644 --- a/src/mongo/scripting/mozjs/cursor_handle.h +++ b/src/mongo/scripting/mozjs/cursor_handle.h @@ -29,7 +29,15 @@ #pragma once +#include +#include +#include +#include +#include + #include "mongo/client/dbclient_cursor.h" +#include "mongo/db/namespace_string.h" +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/db.cpp b/src/mongo/scripting/mozjs/db.cpp index 22e3ca486e5d3..47e97781b1800 100644 --- a/src/mongo/scripting/mozjs/db.cpp +++ b/src/mongo/scripting/mozjs/db.cpp @@ -27,21 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/db.h" - +#include #include +#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/operation_context.h" +#include "mongo/scripting/mozjs/db.h" +#include "mongo/scripting/mozjs/dbcollection.h" #include "mongo/scripting/mozjs/idwrapper.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/internedstring.h" +#include "mongo/scripting/mozjs/jsstringwrapper.h" #include "mongo/scripting/mozjs/objectwrapper.h" -#include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" +#include "mongo/scripting/mozjs/wraptype.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/db.h b/src/mongo/scripting/mozjs/db.h index 5e63b0f6df56d..6ce9fbfa0135b 100644 --- a/src/mongo/scripting/mozjs/db.h +++ b/src/mongo/scripting/mozjs/db.h @@ -29,6 +29,10 @@ #pragma once +#include +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/dbcollection.cpp b/src/mongo/scripting/mozjs/dbcollection.cpp index 020e512f51483..eb2ee0d7c9fa0 100644 --- a/src/mongo/scripting/mozjs/dbcollection.cpp +++ b/src/mongo/scripting/mozjs/dbcollection.cpp @@ -27,17 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/scripting/mozjs/dbcollection.h" +#include +#include -#include "mongo/db/namespace_string.h" -#include "mongo/db/operation_context.h" -#include "mongo/scripting/mozjs/bson.h" +#include "mongo/base/error_codes.h" #include "mongo/scripting/mozjs/db.h" +#include "mongo/scripting/mozjs/dbcollection.h" #include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/internedstring.h" #include "mongo/scripting/mozjs/objectwrapper.h" -#include "mongo/scripting/mozjs/valuewriter.h" +#include "mongo/scripting/mozjs/wraptype.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/dbcollection.h b/src/mongo/scripting/mozjs/dbcollection.h index 27cbe632c66bd..1d30ed54857bc 100644 --- a/src/mongo/scripting/mozjs/dbcollection.h +++ b/src/mongo/scripting/mozjs/dbcollection.h @@ -29,6 +29,10 @@ #pragma once +#include +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/dbpointer.cpp b/src/mongo/scripting/mozjs/dbpointer.cpp index 467ae2359b55d..970d933d05b47 100644 --- a/src/mongo/scripting/mozjs/dbpointer.cpp +++ b/src/mongo/scripting/mozjs/dbpointer.cpp @@ -27,15 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/scripting/mozjs/dbpointer.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/scripting/mozjs/dbpointer.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/internedstring.h" #include "mongo/scripting/mozjs/objectwrapper.h" -#include "mongo/scripting/mozjs/valuereader.h" -#include "mongo/util/str.h" +#include "mongo/scripting/mozjs/oid.h" +#include "mongo/scripting/mozjs/wraptype.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/dbpointer.h b/src/mongo/scripting/mozjs/dbpointer.h index 6e049fccfbf98..ee2b56e00ab3b 100644 --- a/src/mongo/scripting/mozjs/dbpointer.h +++ b/src/mongo/scripting/mozjs/dbpointer.h @@ -29,6 +29,10 @@ #pragma once +#include +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/dbquery.cpp b/src/mongo/scripting/mozjs/dbquery.cpp index 0e12f899609ca..dcb76818dff4d 100644 --- a/src/mongo/scripting/mozjs/dbquery.cpp +++ b/src/mongo/scripting/mozjs/dbquery.cpp @@ -27,17 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/dbquery.h" - +#include #include +#include #include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/scripting/mozjs/dbquery.h" #include "mongo/scripting/mozjs/idwrapper.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/internedstring.h" #include "mongo/scripting/mozjs/objectwrapper.h" +#include "mongo/scripting/mozjs/wraptype.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/dbquery.h b/src/mongo/scripting/mozjs/dbquery.h index b6419e0cfcb67..79bba221a2df6 100644 --- a/src/mongo/scripting/mozjs/dbquery.h +++ b/src/mongo/scripting/mozjs/dbquery.h @@ -29,6 +29,10 @@ #pragma once +#include +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/dbref.cpp b/src/mongo/scripting/mozjs/dbref.cpp index 9f602813c84d6..95b1ccd8e212a 100644 --- a/src/mongo/scripting/mozjs/dbref.cpp +++ b/src/mongo/scripting/mozjs/dbref.cpp @@ -28,15 +28,22 @@ */ #include +#include +#include +#include -#include "mongo/platform/basic.h" +#include +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/scripting/mozjs/bson.h" #include "mongo/scripting/mozjs/dbref.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/internedstring.h" #include "mongo/scripting/mozjs/objectwrapper.h" -#include "mongo/scripting/mozjs/valuewriter.h" +#include "mongo/scripting/mozjs/wraptype.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/dbref.h b/src/mongo/scripting/mozjs/dbref.h index 9f446d2de02d8..28affc64bcd05 100644 --- a/src/mongo/scripting/mozjs/dbref.h +++ b/src/mongo/scripting/mozjs/dbref.h @@ -29,6 +29,12 @@ #pragma once +#include +#include +#include + +#include "mongo/bson/bsonobj.h" +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/engine.cpp b/src/mongo/scripting/mozjs/engine.cpp index db5372f49313f..e39bddf2b331a 100644 --- a/src/mongo/scripting/mozjs/engine.cpp +++ b/src/mongo/scripting/mozjs/engine.cpp @@ -28,17 +28,34 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/engine.h" - +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/scripting/mozjs/engine.h" #include "mongo/scripting/mozjs/engine_gen.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/proxyscope.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/scripting/mozjs/engine.h b/src/mongo/scripting/mozjs/engine.h index b1952f4e4ab87..ddc3d90a1d2ee 100644 --- a/src/mongo/scripting/mozjs/engine.h +++ b/src/mongo/scripting/mozjs/engine.h @@ -29,7 +29,9 @@ #pragma once +#include #include +#include #include "mongo/platform/mutex.h" #include "mongo/scripting/deadline_monitor.h" diff --git a/src/mongo/scripting/mozjs/error.cpp b/src/mongo/scripting/mozjs/error.cpp index fe2e8be96484f..405f5c00e085b 100644 --- a/src/mongo/scripting/mozjs/error.cpp +++ b/src/mongo/scripting/mozjs/error.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/scripting/mozjs/error.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/error.h b/src/mongo/scripting/mozjs/error.h index 4f9bf7a0897fe..43d06767a9e29 100644 --- a/src/mongo/scripting/mozjs/error.h +++ b/src/mongo/scripting/mozjs/error.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/exception.cpp b/src/mongo/scripting/mozjs/exception.cpp index d7f3f2f6e408c..4af68e46aadba 100644 --- a/src/mongo/scripting/mozjs/exception.cpp +++ b/src/mongo/scripting/mozjs/exception.cpp @@ -29,16 +29,23 @@ #include "mongo/scripting/mozjs/exception.h" +#include +#include +#include #include -#include -#include #include +#include -#include "mongo/base/static_assert.h" +#include +#include + +#include "mongo/scripting/mozjs/error.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/jsstringwrapper.h" #include "mongo/scripting/mozjs/objectwrapper.h" +#include "mongo/scripting/mozjs/status.h" #include "mongo/scripting/mozjs/valuewriter.h" +#include "mongo/scripting/mozjs/wraptype.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/exception.h b/src/mongo/scripting/mozjs/exception.h index 27aca6e528dde..daede315a0762 100644 --- a/src/mongo/scripting/mozjs/exception.h +++ b/src/mongo/scripting/mozjs/exception.h @@ -29,10 +29,15 @@ #pragma once +#include +#include #include +#include #include "mongo/base/error_codes.h" +#include "mongo/base/status.h" #include "mongo/base/string_data.h" +#include "mongo/platform/compiler.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/global.cpp b/src/mongo/scripting/mozjs/global.cpp index 7fe731e5a957e..f52f5e965589f 100644 --- a/src/mongo/scripting/mozjs/global.cpp +++ b/src/mongo/scripting/mozjs/global.cpp @@ -28,20 +28,33 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/global.h" - +#include +#include +#include #include +#include +#include +#include -#include "mongo/base/init.h" +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/logv2/log_truncation.h" #include "mongo/scripting/engine.h" +#include "mongo/scripting/mozjs/global.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/jsstringwrapper.h" -#include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/version.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/scripting/mozjs/global.h b/src/mongo/scripting/mozjs/global.h index 0d987c3a277c5..949e0b3d6b403 100644 --- a/src/mongo/scripting/mozjs/global.h +++ b/src/mongo/scripting/mozjs/global.h @@ -29,6 +29,10 @@ #pragma once +#include +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/idwrapper.cpp b/src/mongo/scripting/mozjs/idwrapper.cpp index a774d9e0f81db..fb2d821a4a546 100644 --- a/src/mongo/scripting/mozjs/idwrapper.cpp +++ b/src/mongo/scripting/mozjs/idwrapper.cpp @@ -27,12 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/scripting/mozjs/idwrapper.h" +#include +#include #include "mongo/base/error_codes.h" #include "mongo/scripting/mozjs/exception.h" +#include "mongo/scripting/mozjs/idwrapper.h" #include "mongo/scripting/mozjs/jsstringwrapper.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/scripting/mozjs/idwrapper.h b/src/mongo/scripting/mozjs/idwrapper.h index 72eb0e9a43c6b..6915033764e21 100644 --- a/src/mongo/scripting/mozjs/idwrapper.h +++ b/src/mongo/scripting/mozjs/idwrapper.h @@ -29,6 +29,9 @@ #pragma once +#include +#include +#include #include #include diff --git a/src/mongo/scripting/mozjs/implscope.cpp b/src/mongo/scripting/mozjs/implscope.cpp index 243a127321209..3f6c6c84f4a91 100644 --- a/src/mongo/scripting/mozjs/implscope.cpp +++ b/src/mongo/scripting/mozjs/implscope.cpp @@ -28,50 +28,72 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/implscope.h" - -#include -#include - -#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include +#include +#include +#include +#include #include #include #include #include +#include +#include #include #include +#include #include #include #include #include - -#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include #include "mongo/base/error_codes.h" -#include "mongo/config.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/operation_context.h" +#include "mongo/logv2/constants.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/logv2/log_truncation.h" +#include "mongo/logv2/redaction.h" #include "mongo/platform/decimal128.h" #include "mongo/platform/mutex.h" #include "mongo/platform/stack_locator.h" +#include "mongo/scripting/deadline_monitor.h" #include "mongo/scripting/jsexception.h" +#include "mongo/scripting/mozjs/exception.h" +#include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/jsstringwrapper.h" #include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" #include "mongo/util/assert_util.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif - namespace mongo { // Generated symbols for JS files diff --git a/src/mongo/scripting/mozjs/implscope.h b/src/mongo/scripting/mozjs/implscope.h index 82830420cb4b8..fa14a86b74d25 100644 --- a/src/mongo/scripting/mozjs/implscope.h +++ b/src/mongo/scripting/mozjs/implscope.h @@ -29,12 +29,39 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include +#include #include +#include +#include +#include +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/dbclient_cursor.h" +#include "mongo/db/operation_context.h" +#include "mongo/platform/decimal128.h" +#include "mongo/platform/mutex.h" +#include "mongo/scripting/engine.h" #include "mongo/scripting/mozjs/bindata.h" #include "mongo/scripting/mozjs/bson.h" #include "mongo/scripting/mozjs/code.h" @@ -67,7 +94,11 @@ #include "mongo/scripting/mozjs/status.h" #include "mongo/scripting/mozjs/timestamp.h" #include "mongo/scripting/mozjs/uri.h" +#include "mongo/scripting/mozjs/wraptype.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" #include "mongo/stdx/unordered_set.h" +#include "mongo/util/duration.h" #include "mongo/util/string_map.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/internedstring.cpp b/src/mongo/scripting/mozjs/internedstring.cpp index a4e040317f484..9dfd3bc2dab14 100644 --- a/src/mongo/scripting/mozjs/internedstring.cpp +++ b/src/mongo/scripting/mozjs/internedstring.cpp @@ -27,11 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include -#include "mongo/scripting/mozjs/internedstring.h" +#include +#include +#include #include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/internedstring.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace mozjs { @@ -48,6 +52,7 @@ InternedStringTable::InternedStringTable(JSContext* cx) { _internedStrings[i++].init(cx, JS::PropertyKey::fromPinnedString(s)); \ } while (0); #include "mongo/scripting/mozjs/internedstring.defs" + #undef MONGO_MOZJS_INTERNED_STRING } diff --git a/src/mongo/scripting/mozjs/internedstring.h b/src/mongo/scripting/mozjs/internedstring.h index aa092981d8435..d7fb18eeab70f 100644 --- a/src/mongo/scripting/mozjs/internedstring.h +++ b/src/mongo/scripting/mozjs/internedstring.h @@ -30,6 +30,10 @@ #pragma once #include +#include +#include +#include +#include #include namespace mongo { @@ -43,6 +47,7 @@ namespace mozjs { enum class InternedString { #define MONGO_MOZJS_INTERNED_STRING(name, str) name, #include "mongo/scripting/mozjs/internedstring.defs" + #undef MONGO_MOZJS_INTERNED_STRING NUM_IDS, }; diff --git a/src/mongo/scripting/mozjs/jscustomallocator.cpp b/src/mongo/scripting/mozjs/jscustomallocator.cpp index 28afb60fa8897..76251dc4db969 100644 --- a/src/mongo/scripting/mozjs/jscustomallocator.cpp +++ b/src/mongo/scripting/mozjs/jscustomallocator.cpp @@ -27,14 +27,14 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include -#include #include -#include -#include "mongo/config.h" +#include +#include +#include +#include + +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/scripting/mozjs/implscope.h" #ifdef __linux__ @@ -49,10 +49,6 @@ #define MONGO_NO_MALLOC_USABLE_SIZE #endif -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif - /** * This shim interface (which controls dynamic allocation within SpiderMonkey), * consciously uses std::malloc and friends over mongoMalloc. It does this diff --git a/src/mongo/scripting/mozjs/jsstringwrapper.cpp b/src/mongo/scripting/mozjs/jsstringwrapper.cpp index 441a2415663e3..ab68ca026bf0e 100644 --- a/src/mongo/scripting/mozjs/jsstringwrapper.cpp +++ b/src/mongo/scripting/mozjs/jsstringwrapper.cpp @@ -27,16 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/jsstringwrapper.h" - +#include +#include +#include #include -#include -#include +#include + +#include #include "mongo/base/error_codes.h" #include "mongo/scripting/mozjs/exception.h" +#include "mongo/scripting/mozjs/jsstringwrapper.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/jsstringwrapper.h b/src/mongo/scripting/mozjs/jsstringwrapper.h index 65b61a42b0183..bd14e4a4cdc7c 100644 --- a/src/mongo/scripting/mozjs/jsstringwrapper.h +++ b/src/mongo/scripting/mozjs/jsstringwrapper.h @@ -29,7 +29,9 @@ #pragma once +#include #include +#include #include #include #include diff --git a/src/mongo/scripting/mozjs/jsthread.cpp b/src/mongo/scripting/mozjs/jsthread.cpp index caa010f7083bc..3ccbf7c7b3777 100644 --- a/src/mongo/scripting/mozjs/jsthread.cpp +++ b/src/mongo/scripting/mozjs/jsthread.cpp @@ -28,27 +28,44 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/jsthread.h" - -#include +#include +#include #include +#include #include +#include +#include +#include #include #include -#include - +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/client.h" -#include "mongo/db/jsobj.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/mutex.h" +#include "mongo/scripting/engine.h" +#include "mongo/scripting/mozjs/engine.h" +#include "mongo/scripting/mozjs/exception.h" #include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/internedstring.h" +#include "mongo/scripting/mozjs/jsthread.h" +#include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" -#include "mongo/scripting/mozjs/valuewriter.h" -#include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" -#include "mongo/util/stacktrace.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -193,6 +210,13 @@ class JSThreadConfig { MozJSImplScope scope(static_cast(getGlobalScriptEngine()), boost::none /* Don't override global jsHeapLimitMB */); Client::initThread("js"); + + // TODO(SERVER-74662): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + scope.setParentStack(thisv->_sharedData->_stack); thisv->_sharedData->_returnData = scope.callThreadArgs(thisv->_sharedData->_args); } catch (...) { diff --git a/src/mongo/scripting/mozjs/jsthread.h b/src/mongo/scripting/mozjs/jsthread.h index 785ce622a6500..d326b5d109d6b 100644 --- a/src/mongo/scripting/mozjs/jsthread.h +++ b/src/mongo/scripting/mozjs/jsthread.h @@ -29,6 +29,11 @@ #pragma once +#include +#include +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/maxkey.cpp b/src/mongo/scripting/mozjs/maxkey.cpp index dd49131254f56..9579b580f233f 100644 --- a/src/mongo/scripting/mozjs/maxkey.cpp +++ b/src/mongo/scripting/mozjs/maxkey.cpp @@ -27,15 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/scripting/mozjs/maxkey.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/internedstring.h" +#include "mongo/scripting/mozjs/maxkey.h" #include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/maxkey.h b/src/mongo/scripting/mozjs/maxkey.h index 0023314b56c50..07f34a6e053d0 100644 --- a/src/mongo/scripting/mozjs/maxkey.h +++ b/src/mongo/scripting/mozjs/maxkey.h @@ -29,6 +29,11 @@ #pragma once +#include +#include +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/minkey.cpp b/src/mongo/scripting/mozjs/minkey.cpp index 287eb10315547..f9b11fc2e4be2 100644 --- a/src/mongo/scripting/mozjs/minkey.cpp +++ b/src/mongo/scripting/mozjs/minkey.cpp @@ -27,15 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/scripting/mozjs/minkey.h" +#include +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/internedstring.h" +#include "mongo/scripting/mozjs/minkey.h" #include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/minkey.h b/src/mongo/scripting/mozjs/minkey.h index 46fff7de9f6bd..f939549ea24b1 100644 --- a/src/mongo/scripting/mozjs/minkey.h +++ b/src/mongo/scripting/mozjs/minkey.h @@ -29,6 +29,11 @@ #pragma once +#include +#include +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/module_loader.cpp b/src/mongo/scripting/mozjs/module_loader.cpp index a2b3d62906ce2..6070ff31c1a35 100644 --- a/src/mongo/scripting/mozjs/module_loader.cpp +++ b/src/mongo/scripting/mozjs/module_loader.cpp @@ -27,18 +27,47 @@ * it in the license file. */ -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/errc.hpp" +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/module_loader.h" #include "mongo/util/file.h" -#include -#include -#include -#include - #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault namespace mongo { diff --git a/src/mongo/scripting/mozjs/module_loader.h b/src/mongo/scripting/mozjs/module_loader.h index 1a180c1bf8230..4467036e845f3 100644 --- a/src/mongo/scripting/mozjs/module_loader.h +++ b/src/mongo/scripting/mozjs/module_loader.h @@ -30,9 +30,13 @@ #pragma once #include +#include #include - +#include +#include +#include #include +#include #include "mongo/base/string_data.h" diff --git a/src/mongo/scripting/mozjs/module_loader_test.cpp b/src/mongo/scripting/mozjs/module_loader_test.cpp index f85951ef21dfd..c6611278dd275 100644 --- a/src/mongo/scripting/mozjs/module_loader_test.cpp +++ b/src/mongo/scripting/mozjs/module_loader_test.cpp @@ -27,11 +27,19 @@ * it in the license file. */ -#include +#include +#include +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/scripting/engine.h" #include "mongo/scripting/mozjs/implscope.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/mongo.cpp b/src/mongo/scripting/mozjs/mongo.cpp index 8ebcf06e796fa..689bc5bb14ea1 100644 --- a/src/mongo/scripting/mozjs/mongo.cpp +++ b/src/mongo/scripting/mozjs/mongo.cpp @@ -29,33 +29,65 @@ #include "mongo/scripting/mozjs/mongo.h" +#include +#include +#include +#include +#include #include +#include #include - -#include "mongo/bson/simple_bsonelement_comparator.h" +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/client_api_version_parameters_gen.h" +#include "mongo/client/connection_string.h" #include "mongo/client/dbclient_base.h" +#include "mongo/client/dbclient_cursor.h" #include "mongo/client/dbclient_rs.h" -#include "mongo/client/global_conn_pool.h" #include "mongo/client/mongo_uri.h" +#include "mongo/client/read_preference.h" #include "mongo/client/replica_set_monitor.h" +#include "mongo/client/replica_set_monitor_manager.h" #include "mongo/client/sasl_oidc_client_conversation.h" +#include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/database_name.h" #include "mongo/db/namespace_string.h" -#include "mongo/db/operation_context.h" -#include "mongo/db/session/logical_session_id.h" -#include "mongo/db/session/logical_session_id_helpers.h" -#include "mongo/scripting/dbdirectclient_factory.h" +#include "mongo/db/query/find_command.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/metadata.h" #include "mongo/scripting/engine.h" #include "mongo/scripting/mozjs/cursor.h" +#include "mongo/scripting/mozjs/cursor_handle.h" #include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/internedstring.h" +#include "mongo/scripting/mozjs/numberlong.h" #include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/session.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep #include "mongo/util/assert_util.h" #include "mongo/util/exit_code.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/quick_exit.h" +#include "mongo/util/str.h" +#include "mongo/util/uuid.h" namespace mongo { namespace mozjs { @@ -65,6 +97,11 @@ const JSFunctionSpec MongoBase::methods[] = { MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(cleanup, MongoExternalInfo), MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(close, MongoExternalInfo), MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(compact, MongoExternalInfo), + MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(setAutoEncryption, MongoExternalInfo), + MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(getAutoEncryptionOptions, MongoExternalInfo), + MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(unsetAutoEncryption, MongoExternalInfo), + MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(toggleAutoEncryption, MongoExternalInfo), + MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(isAutoEncryptionEnabled, MongoExternalInfo), MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(cursorHandleFromId, MongoExternalInfo), MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(find, MongoExternalInfo), MONGO_ATTACH_JS_CONSTRAINED_METHOD_NO_PROTO(generateDataKey, MongoExternalInfo), @@ -90,6 +127,7 @@ const JSFunctionSpec MongoBase::methods[] = { const char* const MongoBase::className = "Mongo"; EncryptedDBClientCallback* encryptedDBClientCallback = nullptr; +EncryptedDBClientFromExistingCallback* encryptedDBClientFromExistingCallback = nullptr; GetNestedConnectionCallback* getNestedConnectionCallback = nullptr; const JSFunctionSpec MongoExternalInfo::freeFunctions[4] = { @@ -101,12 +139,58 @@ const JSFunctionSpec MongoExternalInfo::freeFunctions[4] = { namespace { +// This class holds a "raw" DBClient connection, along with an optional "encrypted" DBClient +// that performs automatic encryption of requests before forwarding them to the raw connection. +// If auto encryption is enabled, this returns the encrypted DBClient in getConnection(). +class DBClientWithAutoEncryption { +public: + DBClientWithAutoEncryption(std::shared_ptr rawConn, + std::shared_ptr encryptedConn) + : _rawConn(std::move(rawConn)), + _encryptedConn(std::move(encryptedConn)), + _encryptionEnabled(_encryptedConn != nullptr) { + invariant(_rawConn); + } + + void setEncryptedConnection(std::shared_ptr encryptedConn) { + _encryptedConn = std::move(encryptedConn); + } + + std::shared_ptr& getConnection() { + return this->isEncryptionEnabled() ? _encryptedConn : _rawConn; + } + + std::shared_ptr& getRawConnection() { + return _rawConn; + } + + std::shared_ptr& getEncryptedConnection() { + return _encryptedConn; + } + + bool isEncryptionEnabled() const { + return _encryptionEnabled && _encryptedConn != nullptr; + } + + void toggleAutoEncryption(bool enable) { + uassert(7760001, + "Auto encryption is not configured on this connection", + _encryptedConn != nullptr); + _encryptionEnabled = enable; + } + +private: + std::shared_ptr _rawConn; + std::shared_ptr _encryptedConn; + bool _encryptionEnabled; +}; + const std::shared_ptr& getConnectionRef(JS::CallArgs& args) { - auto ret = - static_cast*>(JS::GetPrivate(args.thisv().toObjectOrNull())); + auto ret = static_cast*>( + JS::GetPrivate(args.thisv().toObjectOrNull())); uassert( ErrorCodes::BadValue, "Trying to get connection for closed Mongo object", *ret != nullptr); - return *ret; + return (*ret)->getConnection(); } DBClientBase* getConnection(JS::CallArgs& args) { @@ -126,11 +210,10 @@ void setCursor(MozJSImplScope* scope, JS::HandleObject target, std::unique_ptr cursor, JS::CallArgs& args) { - auto client = - static_cast*>(JS::GetPrivate(args.thisv().toObjectOrNull())); + auto client = getConnectionRef(args); // Copy the client shared pointer to up the refcount - JS::SetPrivate(target, scope->trackedNew(std::move(cursor), *client)); + JS::SetPrivate(target, scope->trackedNew(std::move(cursor), client)); } void setCursorHandle(MozJSImplScope* scope, @@ -138,13 +221,12 @@ void setCursorHandle(MozJSImplScope* scope, NamespaceString ns, long long cursorId, JS::CallArgs& args) { - auto client = - static_cast*>(JS::GetPrivate(args.thisv().toObjectOrNull())); + auto client = getConnectionRef(args); // Copy the client shared pointer to up the refcount. JS::SetPrivate( target, - scope->trackedNew(std::move(ns), cursorId, *client)); + scope->trackedNew(std::move(ns), cursorId, client)); } void setHiddenMongo(JSContext* cx, JS::HandleValue value, JS::CallArgs& args) { @@ -160,12 +242,12 @@ void setHiddenMongo(JSContext* cx, JS::CallArgs& args) { void setHiddenMongo(JSContext* cx, std::shared_ptr resPtr, - DBClientBase* origConn, + std::shared_ptr origClient, JS::CallArgs& args) { ObjectWrapper o(cx, args.rval()); // If the connection that ran the command is the same as conn, then we set a hidden "_mongo" // property on the returned object that is just "this" Mongo object. - if (resPtr.get() == origConn) { + if (resPtr.get() == origClient->getConnection().get()) { setHiddenMongo(cx, args.thisv(), args); } else { JS::RootedObject newMongo(cx); @@ -174,8 +256,16 @@ void setHiddenMongo(JSContext* cx, scope->getProto().newObject(&newMongo); auto host = resPtr->getServerAddress(); - JS::SetPrivate(newMongo, - scope->trackedNew>(std::move(resPtr))); + + auto encConn = origClient->getEncryptedConnection(); + if (encConn && encryptedDBClientFromExistingCallback) { + encConn = encryptedDBClientFromExistingCallback(encConn, resPtr, cx); + } + + auto newClient = std::make_unique(std::move(resPtr), encConn); + JS::SetPrivate( + newMongo, + scope->trackedNew>(newClient.release())); ObjectWrapper from(cx, args.thisv()); ObjectWrapper to(cx, newMongo); @@ -204,10 +294,20 @@ EncryptionCallbacks* getEncryptionCallbacks(DBClientBase* conn) { return callbackPtr; } +std::shared_ptr runEncryptedDBClientCallback(std::shared_ptr conn, + JS::HandleValue arg, + JS::HandleObject mongoConnection, + JSContext* cx) { + if (encryptedDBClientCallback != nullptr) { + return encryptedDBClientCallback(std::move(conn), arg, mongoConnection, cx); + } + return nullptr; +} + } // namespace void MongoBase::finalize(JSFreeOp* fop, JSObject* obj) { - auto conn = static_cast*>(JS::GetPrivate(obj)); + auto conn = static_cast*>(JS::GetPrivate(obj)); if (conn) { getScope(fop)->trackedDelete(conn); @@ -215,11 +315,13 @@ void MongoBase::finalize(JSFreeOp* fop, JSObject* obj) { } void MongoBase::trace(JSTracer* trc, JSObject* obj) { - auto conn = static_cast*>(JS::GetPrivate(obj)); - if (!conn) { + auto client = static_cast*>(JS::GetPrivate(obj)); + if (!client || !(*client)) { return; } - auto callbackPtr = dynamic_cast(conn->get()); + auto conn = (*client)->getConnection(); + + auto callbackPtr = dynamic_cast(conn.get()); if (callbackPtr != nullptr) { callbackPtr->trace(trc); } @@ -229,7 +331,7 @@ void MongoBase::Functions::close::call(JSContext* cx, JS::CallArgs args) { getConnection(args); auto thisv = args.thisv().toObjectOrNull(); - auto conn = static_cast*>(JS::GetPrivate(thisv)); + auto conn = static_cast*>(JS::GetPrivate(thisv)); conn->reset(); @@ -297,7 +399,10 @@ void doRunCommand(JSContext* cx, JS::CallArgs args, MakeRequest makeRequest) { } ValueReader(cx, args.rval()).fromBSON(reply, nullptr, false /* read only */); - setHiddenMongo(cx, std::get<1>(res), conn.get(), args); + + auto origClient = static_cast*>( + JS::GetPrivate(args.thisv().toObjectOrNull())); + setHiddenMongo(cx, std::get<1>(res), *origClient, args); ObjectWrapper o(cx, args.rval()); if (!o.hasField(InternedString::_commandObj)) { @@ -320,7 +425,8 @@ void MongoBase::Functions::_runCommandImpl::call(JSContext* cx, JS::CallArgs arg str::stream() << "The options parameter to runCommand must be a number", args.get(2).isNumber()); auto options = ValueWriter(cx, args.get(2)).toInt32(); - return rpc::upconvertRequest({boost::none, database}, cmd, options); + return rpc::upconvertRequest( + DatabaseName::createDatabaseName_forTest(boost::none, database), cmd, options); }); } @@ -400,6 +506,81 @@ void MongoBase::Functions::cleanup::call(JSContext* cx, JS::CallArgs args) { ptr->cleanup(cx, args); } +void MongoBase::Functions::setAutoEncryption::call(JSContext* cx, JS::CallArgs args) { + if (args.length() < 1) + uasserted(ErrorCodes::BadValue, "setAutoEncryption needs at least 1 arg"); + + std::shared_ptr kvClient; + JS::RootedObject keyvaultConn(cx); + if (args.length() > 1 && !args.get(1).isUndefined()) { + uassert(7760001, + str::stream() << "the second parameter to setAutoEncryption() must be an object", + args.get(1).isObject()); + keyvaultConn.set(args.get(1).toObjectOrNull()); + kvClient = *(static_cast*>( + JS::GetPrivate(args.get(1).toObjectOrNull()))); + } else { + keyvaultConn.set(args.thisv().toObjectOrNull()); + kvClient = *(static_cast*>( + JS::GetPrivate(args.thisv().toObjectOrNull()))); + } + + auto client = *(static_cast*>( + JS::GetPrivate(args.thisv().toObjectOrNull()))); + + if (client->getEncryptedConnection() != nullptr) { + uasserted(ErrorCodes::BadValue, "Auto encryption is already set on this connection"); + } + + auto encConn = + runEncryptedDBClientCallback(client->getRawConnection(), args.get(0), keyvaultConn, cx); + bool retval = encConn != nullptr; + if (encConn) { + client->setEncryptedConnection(std::move(encConn)); + } + args.rval().setBoolean(retval); +} + +void MongoBase::Functions::getAutoEncryptionOptions::call(JSContext* cx, JS::CallArgs args) { + auto client = *(static_cast*>( + JS::GetPrivate(args.thisv().toObjectOrNull()))); + auto encConn = client->getEncryptedConnection(); + if (encConn) { + auto ptr = getEncryptionCallbacks(encConn.get()); + ptr->getEncryptionOptions(cx, args); + } else { + // no auto-encryption is set, so return undefined + args.rval().setUndefined(); + } +} + +void MongoBase::Functions::unsetAutoEncryption::call(JSContext* cx, JS::CallArgs args) { + auto client = *(static_cast*>( + JS::GetPrivate(args.thisv().toObjectOrNull()))); + client->toggleAutoEncryption(false); + client->setEncryptedConnection(nullptr); + args.rval().setBoolean(true); +} + +void MongoBase::Functions::toggleAutoEncryption::call(JSContext* cx, JS::CallArgs args) { + if (args.length() != 1) + uasserted(ErrorCodes::BadValue, "toggleAutoEncryption needs 1 arg"); + if (!args.get(0).isBoolean()) { + uasserted(ErrorCodes::BadValue, "first argument to toggleAutoEncryption must be a boolean"); + } + auto enable = ValueWriter(cx, args.get(0)).toBoolean(); + auto client = *(static_cast*>( + JS::GetPrivate(args.thisv().toObjectOrNull()))); + client->toggleAutoEncryption(enable); + args.rval().setBoolean(true); +} + +void MongoBase::Functions::isAutoEncryptionEnabled::call(JSContext* cx, JS::CallArgs args) { + auto client = *(static_cast*>( + JS::GetPrivate(args.thisv().toObjectOrNull()))); + args.rval().setBoolean(client->isEncryptionEnabled()); +} + void MongoBase::Functions::compact::call(JSContext* cx, JS::CallArgs args) { auto conn = getConnection(args); auto ptr = getEncryptionCallbacks(conn); @@ -444,7 +625,7 @@ void MongoBase::Functions::cursorHandleFromId::call(JSContext* cx, JS::CallArgs JS::RootedObject c(cx); scope->getProto().newObject(&c); - setCursorHandle(scope, c, NamespaceString(ns), cursorId, args); + setCursorHandle(scope, c, NamespaceString::createNamespaceString_forTest(ns), cursorId, args); args.rval().setObjectOrNull(c); } @@ -503,19 +684,11 @@ void MongoBase::Functions::_markNodeAsFailed::call(JSContext* cx, JS::CallArgs a args.rval().setUndefined(); } -std::unique_ptr runEncryptedDBClientCallback(std::unique_ptr conn, - JS::HandleValue arg, - JS::HandleObject mongoConnection, - JSContext* cx) { - if (encryptedDBClientCallback != nullptr) { - return encryptedDBClientCallback(std::move(conn), arg, mongoConnection, cx); - } - return conn; -} - void setEncryptedDBClientCallbacks(EncryptedDBClientCallback* encCallback, + EncryptedDBClientFromExistingCallback* encFromExistingCallback, GetNestedConnectionCallback* getCallback) { encryptedDBClientCallback = encCallback; + encryptedDBClientFromExistingCallback = encFromExistingCallback; getNestedConnectionCallback = getCallback; } @@ -556,7 +729,7 @@ void MongoExternalInfo::construct(JSContext* cx, JS::CallArgs args) { boost::optional appname = cs.getAppName(); std::string errmsg; - std::unique_ptr conn( + std::shared_ptr conn( cs.connect(appname.value_or("MongoDB Shell"), errmsg, boost::none, &apiParameters)); if (!conn.get()) { @@ -569,9 +742,12 @@ void MongoExternalInfo::construct(JSContext* cx, JS::CallArgs args) { scope->getProto().newObject(&thisv); ObjectWrapper o(cx, thisv); - conn = runEncryptedDBClientCallback(std::move(conn), args.get(1), thisv, cx); + auto encConn = runEncryptedDBClientCallback(conn, args.get(1), thisv, cx); - JS::SetPrivate(thisv, scope->trackedNew>(conn.release())); + auto client = std::make_unique(conn, encConn); + + JS::SetPrivate( + thisv, scope->trackedNew>(client.release())); o.setBoolean(InternedString::slaveOk, false); o.setString(InternedString::host, cs.connectionString().toString()); @@ -626,14 +802,13 @@ void MongoBase::Functions::getApiParameters::call(JSContext* cx, JS::CallArgs ar } void MongoBase::Functions::_startSession::call(JSContext* cx, JS::CallArgs args) { - auto client = - static_cast*>(JS::GetPrivate(args.thisv().toObjectOrNull())); + auto client = getConnectionRef(args); LogicalSessionIdToClient id; id.setId(UUID::gen()); JS::RootedObject obj(cx); - SessionInfo::make(cx, &obj, *client, id.toBSON()); + SessionInfo::make(cx, &obj, client, id.toBSON()); args.rval().setObjectOrNull(obj.get()); } @@ -657,9 +832,10 @@ void MongoBase::Functions::_setOIDCIdPAuthCallback::call(JSContext* cx, JS::Call // the function as a string, stash it into a lambda, and execute it directly when needed. std::string stringifiedFn = ValueWriter(cx, args.get(0)).toString(); SaslOIDCClientConversation::setOIDCIdPAuthCallback( - [=](StringData userName, StringData idpEndpoint) { + [=](StringData userName, StringData idpEndpoint, StringData userCode) { auto* jsScope = getGlobalScriptEngine()->newScope(); - BSONObj authInfo = BSON("userName" << userName << "activationEndpoint" << idpEndpoint); + BSONObj authInfo = BSON("userName" << userName << "userCode" << userCode + << "activationEndpoint" << idpEndpoint); ScriptingFunction function = jsScope->createFunction(stringifiedFn.c_str()); jsScope->invoke(function, nullptr, &authInfo); }); diff --git a/src/mongo/scripting/mozjs/mongo.h b/src/mongo/scripting/mozjs/mongo.h index 57a08201ed5d4..859b609f1c454 100644 --- a/src/mongo/scripting/mozjs/mongo.h +++ b/src/mongo/scripting/mozjs/mongo.h @@ -29,19 +29,35 @@ #pragma once +#include + +#include +#include +#include +#include +#include +#include + #include "mongo/client/dbclient_base.h" +#include "mongo/scripting/mozjs/base.h" +#include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { namespace mozjs { -using EncryptedDBClientCallback = std::unique_ptr(std::unique_ptr, +using EncryptedDBClientCallback = std::shared_ptr(std::shared_ptr, JS::HandleValue, JS::HandleObject, JSContext*); +using EncryptedDBClientFromExistingCallback = std::shared_ptr( + std::shared_ptr, std::shared_ptr, JSContext*); + using GetNestedConnectionCallback = DBClientBase*(DBClientBase*); + void setEncryptedDBClientCallbacks(EncryptedDBClientCallback* encCallback, + EncryptedDBClientFromExistingCallback* encFromExistingCallback, GetNestedConnectionCallback* getCallback); /** @@ -60,6 +76,13 @@ struct MongoBase : public BaseInfo { MONGO_DECLARE_JS_FUNCTION(close); MONGO_DECLARE_JS_FUNCTION(cleanup); MONGO_DECLARE_JS_FUNCTION(compact); + + MONGO_DECLARE_JS_FUNCTION(setAutoEncryption); + MONGO_DECLARE_JS_FUNCTION(getAutoEncryptionOptions); + MONGO_DECLARE_JS_FUNCTION(unsetAutoEncryption); + MONGO_DECLARE_JS_FUNCTION(toggleAutoEncryption); + MONGO_DECLARE_JS_FUNCTION(isAutoEncryptionEnabled); + MONGO_DECLARE_JS_FUNCTION(cursorHandleFromId); MONGO_DECLARE_JS_FUNCTION(find); MONGO_DECLARE_JS_FUNCTION(generateDataKey); @@ -84,7 +107,7 @@ struct MongoBase : public BaseInfo { MONGO_DECLARE_JS_FUNCTION(_refreshAccessToken); }; - static const JSFunctionSpec methods[24]; + static const JSFunctionSpec methods[29]; static const char* const className; static const unsigned classFlags = JSCLASS_HAS_PRIVATE; @@ -114,6 +137,7 @@ class EncryptionCallbacks { virtual void cleanup(JSContext* cx, JS::CallArgs args) = 0; virtual void compact(JSContext* cx, JS::CallArgs args) = 0; virtual void trace(JSTracer* trc) = 0; + virtual void getEncryptionOptions(JSContext* cx, JS::CallArgs args) = 0; }; void setEncryptionCallbacks(DBClientBase* conn, EncryptionCallbacks* callbacks); diff --git a/src/mongo/scripting/mozjs/mongohelpers.cpp b/src/mongo/scripting/mozjs/mongohelpers.cpp index 518ebe10f5c0f..aeefb1ea33721 100644 --- a/src/mongo/scripting/mozjs/mongohelpers.cpp +++ b/src/mongo/scripting/mozjs/mongohelpers.cpp @@ -27,19 +27,21 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/mongohelpers.h" - +#include #include - #include +#include + +#include "mongo/base/error_codes.h" #include "mongo/scripting/engine.h" #include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/mongohelpers.h" #include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" +#include "mongo/scripting/mozjs/wraptype.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace JSFiles { diff --git a/src/mongo/scripting/mozjs/mongohelpers.h b/src/mongo/scripting/mozjs/mongohelpers.h index 6d2165ebc0a2c..e0415e7293f76 100644 --- a/src/mongo/scripting/mozjs/mongohelpers.h +++ b/src/mongo/scripting/mozjs/mongohelpers.h @@ -29,6 +29,11 @@ #pragma once +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/nativefunction.cpp b/src/mongo/scripting/mozjs/nativefunction.cpp index ab60a71721a0c..872872f698065 100644 --- a/src/mongo/scripting/mozjs/nativefunction.cpp +++ b/src/mongo/scripting/mozjs/nativefunction.cpp @@ -27,19 +27,24 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/nativefunction.h" - -#include #include #include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" #include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/nativefunction.h" #include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" -#include "mongo/scripting/mozjs/valuewriter.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/nativefunction.h b/src/mongo/scripting/mozjs/nativefunction.h index 633f48bd4f20e..623521f059f2c 100644 --- a/src/mongo/scripting/mozjs/nativefunction.h +++ b/src/mongo/scripting/mozjs/nativefunction.h @@ -29,7 +29,13 @@ #pragma once +#include +#include +#include +#include + #include "mongo/scripting/engine.h" +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/numberdecimal.cpp b/src/mongo/scripting/mozjs/numberdecimal.cpp index 531687bbddf15..3026ef8bc06b8 100644 --- a/src/mongo/scripting/mozjs/numberdecimal.cpp +++ b/src/mongo/scripting/mozjs/numberdecimal.cpp @@ -27,20 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/numberdecimal.h" - #include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/platform/decimal128.h" #include "mongo/scripting/mozjs/implscope.h" -#include "mongo/scripting/mozjs/objectwrapper.h" +#include "mongo/scripting/mozjs/numberdecimal.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/numberdecimal.h b/src/mongo/scripting/mozjs/numberdecimal.h index c79158c7fa812..4c5697c16283c 100644 --- a/src/mongo/scripting/mozjs/numberdecimal.h +++ b/src/mongo/scripting/mozjs/numberdecimal.h @@ -29,7 +29,13 @@ #pragma once +#include +#include +#include +#include + #include "mongo/platform/decimal128.h" +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/numberint.cpp b/src/mongo/scripting/mozjs/numberint.cpp index d1a5892430f83..214e0e48d9d4b 100644 --- a/src/mongo/scripting/mozjs/numberint.cpp +++ b/src/mongo/scripting/mozjs/numberint.cpp @@ -27,17 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/numberint.h" - +#include #include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/scripting/mozjs/implscope.h" -#include "mongo/scripting/mozjs/objectwrapper.h" +#include "mongo/scripting/mozjs/numberint.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/numberint.h b/src/mongo/scripting/mozjs/numberint.h index 005c8c7540a98..e9ee34205b4c4 100644 --- a/src/mongo/scripting/mozjs/numberint.h +++ b/src/mongo/scripting/mozjs/numberint.h @@ -29,6 +29,12 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/numberlong.cpp b/src/mongo/scripting/mozjs/numberlong.cpp index 55cf3a88bdb65..caf085dc9073e 100644 --- a/src/mongo/scripting/mozjs/numberlong.cpp +++ b/src/mongo/scripting/mozjs/numberlong.cpp @@ -27,23 +27,38 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/numberlong.h" - -#include -#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/parse_number.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/internedstring.h" +#include "mongo/scripting/mozjs/numberlong.h" #include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" #include "mongo/util/represent_as.h" #include "mongo/util/str.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/numberlong.h b/src/mongo/scripting/mozjs/numberlong.h index 9400997b0a373..7e7cbba0e7332 100644 --- a/src/mongo/scripting/mozjs/numberlong.h +++ b/src/mongo/scripting/mozjs/numberlong.h @@ -29,8 +29,14 @@ #pragma once +#include +#include +#include +#include +#include #include +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/object.cpp b/src/mongo/scripting/mozjs/object.cpp index ec1de9203915e..32d86c1df9f49 100644 --- a/src/mongo/scripting/mozjs/object.cpp +++ b/src/mongo/scripting/mozjs/object.cpp @@ -27,14 +27,17 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include -#include "mongo/scripting/mozjs/object.h" +#include -#include "mongo/scripting/mozjs/implscope.h" -#include "mongo/scripting/mozjs/objectwrapper.h" -#include "mongo/scripting/mozjs/valuereader.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/scripting/mozjs/object.h" #include "mongo/scripting/mozjs/valuewriter.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/object.h b/src/mongo/scripting/mozjs/object.h index f0ada4c11bccb..c869988fd60bc 100644 --- a/src/mongo/scripting/mozjs/object.h +++ b/src/mongo/scripting/mozjs/object.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/objectwrapper.cpp b/src/mongo/scripting/mozjs/objectwrapper.cpp index d515223718d30..40d5de6cbea29 100644 --- a/src/mongo/scripting/mozjs/objectwrapper.cpp +++ b/src/mongo/scripting/mozjs/objectwrapper.cpp @@ -27,23 +27,39 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/objectwrapper.h" - +#include #include -#include +#include +#include +#include #include - #include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include #include "mongo/base/error_codes.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/util/builder.h" #include "mongo/platform/decimal128.h" +#include "mongo/scripting/mozjs/bson.h" +#include "mongo/scripting/mozjs/dbref.h" #include "mongo/scripting/mozjs/idwrapper.h" #include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" +#include "mongo/scripting/mozjs/wraptype.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/objectwrapper.h b/src/mongo/scripting/mozjs/objectwrapper.h index 79f08cc2b7f66..d4e19c15abe17 100644 --- a/src/mongo/scripting/mozjs/objectwrapper.h +++ b/src/mongo/scripting/mozjs/objectwrapper.h @@ -29,13 +29,29 @@ #pragma once +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include - #include #include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/platform/decimal128.h" #include "mongo/scripting/engine.h" #include "mongo/scripting/mozjs/exception.h" diff --git a/src/mongo/scripting/mozjs/oid.cpp b/src/mongo/scripting/mozjs/oid.cpp index b76766e95b3dc..7893dcae49643 100644 --- a/src/mongo/scripting/mozjs/oid.cpp +++ b/src/mongo/scripting/mozjs/oid.cpp @@ -27,18 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/oid.h" - +#include #include -#include - +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/scripting/engine.h" #include "mongo/scripting/mozjs/implscope.h" -#include "mongo/scripting/mozjs/objectwrapper.h" +#include "mongo/scripting/mozjs/internedstring.h" +#include "mongo/scripting/mozjs/oid.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/oid.h b/src/mongo/scripting/mozjs/oid.h index 9d4803f342fd8..15f28afe6d5ff 100644 --- a/src/mongo/scripting/mozjs/oid.h +++ b/src/mongo/scripting/mozjs/oid.h @@ -29,6 +29,13 @@ #pragma once +#include +#include +#include +#include + +#include "mongo/bson/oid.h" +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/proxyscope.cpp b/src/mongo/scripting/mozjs/proxyscope.cpp index 1adc91f78d3d6..8dbd848718275 100644 --- a/src/mongo/scripting/mozjs/proxyscope.cpp +++ b/src/mongo/scripting/mozjs/proxyscope.cpp @@ -27,20 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/proxyscope.h" +#include +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include #include "mongo/db/client.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/platform/decimal128.h" #include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/proxyscope.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/idle_thread_block.h" #include "mongo/util/destructor_guard.h" #include "mongo/util/functional.h" -#include "mongo/util/quick_exit.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/interruptible.h" namespace mongo { namespace mozjs { @@ -364,9 +369,16 @@ void MozJSProxyScope::shutdownThread() { * break out of the loop and return. */ void MozJSProxyScope::implThread(MozJSProxyScope* proxy) { - if (hasGlobalServiceContext()) + if (hasGlobalServiceContext()) { Client::initThread("js"); + // TODO(SERVER-74662): Please revisit if this thread could be made killable. + { + stdx::lock_guard lk(cc()); + cc().setSystemOperationUnkillableByStepdown(lk); + } + } + std::unique_ptr scope; // This will leave _status set for the first noop runOnImplThread(), which diff --git a/src/mongo/scripting/mozjs/proxyscope.h b/src/mongo/scripting/mozjs/proxyscope.h index bd0d3953b617c..6110c2288acb3 100644 --- a/src/mongo/scripting/mozjs/proxyscope.h +++ b/src/mongo/scripting/mozjs/proxyscope.h @@ -29,10 +29,22 @@ #pragma once +#include +#include #include +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" #include "mongo/client/dbclient_cursor.h" +#include "mongo/db/operation_context.h" +#include "mongo/platform/decimal128.h" #include "mongo/platform/mutex.h" +#include "mongo/scripting/engine.h" #include "mongo/scripting/mozjs/engine.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" diff --git a/src/mongo/scripting/mozjs/regexp.cpp b/src/mongo/scripting/mozjs/regexp.cpp index 75d7a7ac915d8..193c7da8860cc 100644 --- a/src/mongo/scripting/mozjs/regexp.cpp +++ b/src/mongo/scripting/mozjs/regexp.cpp @@ -27,11 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/scripting/mozjs/internedstring.h" +#include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/regexp.h" #include "mongo/scripting/mozjs/valuereader.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/regexp.h b/src/mongo/scripting/mozjs/regexp.h index 87ee019cb63d3..b2ba0d63c0b2d 100644 --- a/src/mongo/scripting/mozjs/regexp.h +++ b/src/mongo/scripting/mozjs/regexp.h @@ -29,6 +29,9 @@ #pragma once +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/session.cpp b/src/mongo/scripting/mozjs/session.cpp index 3a18c6c401c3b..1e656ef97dd34 100644 --- a/src/mongo/scripting/mozjs/session.cpp +++ b/src/mongo/scripting/mozjs/session.cpp @@ -28,19 +28,32 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/session.h" - +#include +#include +#include #include - +#include +#include +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/database_name.h" #include "mongo/logv2/log.h" -#include "mongo/scripting/mozjs/bson.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/scripting/mozjs/implscope.h" -#include "mongo/scripting/mozjs/mongo.h" #include "mongo/scripting/mozjs/scripting_util_gen.h" +#include "mongo/scripting/mozjs/session.h" #include "mongo/scripting/mozjs/valuereader.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/valuewriter.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery @@ -129,7 +142,7 @@ void endSession(SessionHolder* holder) { << holder->txnNumber << "autocommit" << false); [[maybe_unused]] auto ignored = - holder->client->runCommand(DatabaseName(boost::none, "admin"), abortObj, out); + holder->client->runCommand(DatabaseName::kAdmin, abortObj, out); } EndSessions es; @@ -137,7 +150,7 @@ void endSession(SessionHolder* holder) { es.setEndSessions({holder->lsid}); [[maybe_unused]] auto ignored = - holder->client->runCommand(DatabaseName(boost::none, "admin"), es.toBSON(), out); + holder->client->runCommand(DatabaseName::kAdmin, es.toBSON(), out); holder->client.reset(); } diff --git a/src/mongo/scripting/mozjs/session.h b/src/mongo/scripting/mozjs/session.h index a31ce50eaffcc..de36f09387432 100644 --- a/src/mongo/scripting/mozjs/session.h +++ b/src/mongo/scripting/mozjs/session.h @@ -29,7 +29,15 @@ #pragma once +#include + +#include +#include +#include + +#include "mongo/bson/bsonobj.h" #include "mongo/client/dbclient_base.h" +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/status.cpp b/src/mongo/scripting/mozjs/status.cpp index e272fbf5b5c65..bed429bf72d55 100644 --- a/src/mongo/scripting/mozjs/status.cpp +++ b/src/mongo/scripting/mozjs/status.cpp @@ -27,19 +27,30 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/status.h" - +#include +#include +#include #include +#include +#include #include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/scripting/jsexception.h" +#include "mongo/scripting/mozjs/error.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/internedstring.h" #include "mongo/scripting/mozjs/objectwrapper.h" +#include "mongo/scripting/mozjs/status.h" #include "mongo/scripting/mozjs/valuereader.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/status.h b/src/mongo/scripting/mozjs/status.h index 87881631763a9..b641b9c67fd91 100644 --- a/src/mongo/scripting/mozjs/status.h +++ b/src/mongo/scripting/mozjs/status.h @@ -29,6 +29,11 @@ #pragma once +#include +#include + +#include "mongo/base/status.h" +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/timestamp.cpp b/src/mongo/scripting/mozjs/timestamp.cpp index 485d4efcff28e..31268e7a8f3a3 100644 --- a/src/mongo/scripting/mozjs/timestamp.cpp +++ b/src/mongo/scripting/mozjs/timestamp.cpp @@ -27,19 +27,27 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/timestamp.h" - +#include +#include #include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/internedstring.h" #include "mongo/scripting/mozjs/objectwrapper.h" +#include "mongo/scripting/mozjs/timestamp.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" #include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/timestamp.h b/src/mongo/scripting/mozjs/timestamp.h index 7ee738d7e9833..d477ed4a3befa 100644 --- a/src/mongo/scripting/mozjs/timestamp.h +++ b/src/mongo/scripting/mozjs/timestamp.h @@ -29,6 +29,12 @@ #pragma once +#include +#include +#include + +#include "mongo/bson/timestamp.h" +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/uri.cpp b/src/mongo/scripting/mozjs/uri.cpp index 8b2eecd16ac4a..e15cd83ce233d 100644 --- a/src/mongo/scripting/mozjs/uri.cpp +++ b/src/mongo/scripting/mozjs/uri.cpp @@ -27,21 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/uri.h" - -#include -#include -#include - +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/client/mongo_uri.h" #include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/internedstring.h" #include "mongo/scripting/mozjs/objectwrapper.h" +#include "mongo/scripting/mozjs/uri.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" -#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" -#include "mongo/util/str.h" +#include "mongo/scripting/mozjs/wrapconstrainedmethod.h" // IWYU pragma: keep +#include "mongo/util/assert_util.h" +#include "mongo/util/net/hostandport.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/uri.h b/src/mongo/scripting/mozjs/uri.h index 38e914c80624b..4087b33ca4639 100644 --- a/src/mongo/scripting/mozjs/uri.h +++ b/src/mongo/scripting/mozjs/uri.h @@ -29,6 +29,11 @@ #pragma once +#include +#include +#include + +#include "mongo/scripting/mozjs/base.h" #include "mongo/scripting/mozjs/wraptype.h" namespace mongo { diff --git a/src/mongo/scripting/mozjs/valuereader.cpp b/src/mongo/scripting/mozjs/valuereader.cpp index d1e0507c883bb..9cf44823d582e 100644 --- a/src/mongo/scripting/mozjs/valuereader.cpp +++ b/src/mongo/scripting/mozjs/valuereader.cpp @@ -28,24 +28,49 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/valuereader.h" - #include #include +#include #include #include #include +#include #include +#include +#include +#include #include +#include +#include + +#include +#include #include "mongo/base/error_codes.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/oid.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/decimal128.h" +#include "mongo/scripting/mozjs/bindata.h" +#include "mongo/scripting/mozjs/bson.h" +#include "mongo/scripting/mozjs/code.h" +#include "mongo/scripting/mozjs/dbpointer.h" +#include "mongo/scripting/mozjs/dbref.h" #include "mongo/scripting/mozjs/implscope.h" -#include "mongo/scripting/mozjs/objectwrapper.h" +#include "mongo/scripting/mozjs/maxkey.h" +#include "mongo/scripting/mozjs/minkey.h" +#include "mongo/scripting/mozjs/numberdecimal.h" +#include "mongo/scripting/mozjs/numberlong.h" +#include "mongo/scripting/mozjs/oid.h" +#include "mongo/scripting/mozjs/regexp.h" +#include "mongo/scripting/mozjs/timestamp.h" +#include "mongo/scripting/mozjs/valuereader.h" +#include "mongo/scripting/mozjs/wraptype.h" +#include "mongo/util/assert_util.h" #include "mongo/util/base64.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kQuery diff --git a/src/mongo/scripting/mozjs/valuereader.h b/src/mongo/scripting/mozjs/valuereader.h index 8aeceb3aadbf7..f12ccd9a0694e 100644 --- a/src/mongo/scripting/mozjs/valuereader.h +++ b/src/mongo/scripting/mozjs/valuereader.h @@ -29,10 +29,16 @@ #pragma once +#include +#include +#include #include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/platform/decimal128.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/valuewriter.cpp b/src/mongo/scripting/mozjs/valuewriter.cpp index 38ca200029ad3..59c4a42982e49 100644 --- a/src/mongo/scripting/mozjs/valuewriter.cpp +++ b/src/mongo/scripting/mozjs/valuewriter.cpp @@ -27,26 +27,50 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/scripting/mozjs/valuewriter.h" - +#include +#include +#include #include +#include #include #include #include #include +#include #include +#include +#include + +#include +#include #include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/platform/decimal128.h" +#include "mongo/scripting/mozjs/bindata.h" +#include "mongo/scripting/mozjs/code.h" +#include "mongo/scripting/mozjs/dbpointer.h" #include "mongo/scripting/mozjs/exception.h" #include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/internedstring.h" #include "mongo/scripting/mozjs/jsstringwrapper.h" +#include "mongo/scripting/mozjs/maxkey.h" +#include "mongo/scripting/mozjs/minkey.h" +#include "mongo/scripting/mozjs/nativefunction.h" +#include "mongo/scripting/mozjs/numberdecimal.h" +#include "mongo/scripting/mozjs/numberint.h" +#include "mongo/scripting/mozjs/numberlong.h" #include "mongo/scripting/mozjs/objectwrapper.h" -#include "mongo/scripting/mozjs/valuereader.h" +#include "mongo/scripting/mozjs/oid.h" +#include "mongo/scripting/mozjs/timestamp.h" +#include "mongo/scripting/mozjs/valuewriter.h" +#include "mongo/scripting/mozjs/wraptype.h" +#include "mongo/util/assert_util.h" #include "mongo/util/base64.h" #include "mongo/util/represent_as.h" +#include "mongo/util/str.h" +#include "mongo/util/time_support.h" namespace mongo { namespace mozjs { diff --git a/src/mongo/scripting/mozjs/valuewriter.h b/src/mongo/scripting/mozjs/valuewriter.h index e78d1ee5d51c1..21b8bed680968 100644 --- a/src/mongo/scripting/mozjs/valuewriter.h +++ b/src/mongo/scripting/mozjs/valuewriter.h @@ -29,10 +29,22 @@ #pragma once +#include +#include +#include +#include #include #include +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/timestamp.h" +#include "mongo/platform/decimal128.h" +#include "mongo/scripting/engine.h" +#include "mongo/scripting/mozjs/jsstringwrapper.h" #include "mongo/scripting/mozjs/objectwrapper.h" namespace mongo { diff --git a/src/mongo/scripting/utils.cpp b/src/mongo/scripting/utils.cpp index 08e08f3f6d7c5..19cde94683157 100644 --- a/src/mongo/scripting/utils.cpp +++ b/src/mongo/scripting/utils.cpp @@ -27,12 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/json.h" +#include "mongo/bson/oid.h" #include "mongo/scripting/engine.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/md5.h" #include "mongo/util/md5.hpp" -#include "mongo/util/time_support.h" namespace mongo { diff --git a/src/mongo/shell/SConscript b/src/mongo/shell/SConscript index e43690d06db1c..9239cc3cdddcc 100644 --- a/src/mongo/shell/SConscript +++ b/src/mongo/shell/SConscript @@ -115,6 +115,7 @@ env.Library( 'shell_options_storage', ], LIBDEPS_PRIVATE=[ + '$BUILD_DIR/mongo/client/sasl_client', '$BUILD_DIR/mongo/util/options_parser/options_parser', ], ) @@ -210,8 +211,9 @@ if get_option('ssl') == 'on': # TODO(SERVER-59992): Remove -Wno-class-memacces where possible. '-Wno-unknown-warning-option', '-Wno-class-memaccess', + # TODO(SERVER-77205): Review and Possibly Remove '-Wno-deprecated' After Mozjs Update + '-Wno-deprecated', ], ) - scriptingEnv.InjectMozJS() scriptingEnv.Library( @@ -274,6 +276,7 @@ if not has_option('noshell') and jsEngine: "$BUILD_DIR/mongo/db/query/command_request_response", "$BUILD_DIR/mongo/db/query/query_request", "$BUILD_DIR/mongo/db/server_base", + "$BUILD_DIR/mongo/db/service_context_non_d", "$BUILD_DIR/mongo/db/session/logical_session_id_helpers", "$BUILD_DIR/mongo/db/storage/duplicate_key_error_info", "$BUILD_DIR/mongo/db/traffic_reader", diff --git a/src/mongo/shell/assert.js b/src/mongo/shell/assert.js index 681b25432b06f..59c402cd52fff 100644 --- a/src/mongo/shell/assert.js +++ b/src/mongo/shell/assert.js @@ -373,7 +373,7 @@ assert = (function() { return; } - diff = (new Date()).getTime() - start.getTime(); + const diff = (new Date()).getTime() - start.getTime(); if (diff > timeout) { msg = _buildAssertionMessage(msg, msgPrefix); if (runHangAnalyzer) { @@ -965,7 +965,8 @@ assert = (function() { const found = expectedCode.some((ec) => writeErrorCodes.has(ec)); if (!found) { errMsg = "found code(s) " + tojson(Array.from(writeErrorCodes)) + - " does not match any of the expected codes " + tojson(expectedCode); + " does not match any of the expected codes " + tojson(expectedCode) + + ". Original command response: " + tojson(res); } } diff --git a/src/mongo/shell/bench.cpp b/src/mongo/shell/bench.cpp index b71e82a5a5074..bd3c11338a5eb 100644 --- a/src/mongo/shell/bench.cpp +++ b/src/mongo/shell/bench.cpp @@ -29,23 +29,48 @@ #include "mongo/shell/bench.h" +#include +#include +#include +#include +#include +// IWYU pragma: no_include "cxxabi.h" +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include #include +#include #include "mongo/base/shim.h" -#include "mongo/client/dbclient_cursor.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/json.h" +#include "mongo/bson/timestamp.h" +#include "mongo/client/read_preference.h" +#include "mongo/client/read_preference_gen.h" +#include "mongo/db/basic_types.h" +#include "mongo/db/database_name.h" +#include "mongo/db/dbmessage.h" #include "mongo/db/namespace_string.h" #include "mongo/db/query/cursor_response.h" +#include "mongo/db/query/find_command.h" #include "mongo/db/query/getmore_command_gen.h" #include "mongo/db/query/query_request_helper.h" +#include "mongo/idl/idl_parser.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/scripting/bson_template_evaluator.h" #include "mongo/stdx/thread.h" -#include "mongo/util/md5.h" #include "mongo/util/pcre.h" #include "mongo/util/pcre_util.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/str.h" #include "mongo/util/time_support.h" #include "mongo/util/timer.h" -#include "mongo/util/version.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -113,7 +138,7 @@ BSONObj fixQuery(const BSONObj& obj, BsonTemplateEvaluator& btl) { return obj; BSONObjBuilder b(obj.objsize() + 128); - verify(BsonTemplateEvaluator::StatusSuccess == btl.evaluate(obj, b)); + MONGO_verify(BsonTemplateEvaluator::StatusSuccess == btl.evaluate(obj, b)); return b.obj(); } @@ -134,23 +159,24 @@ bool runCommandWithSession(DBClientBase* conn, for (const auto& cmdArg : cmdObj) { uassert(ErrorCodes::IllegalOperation, "Command cannot contain session id", - cmdArg.fieldName() != OperationSessionInfo::kSessionIdFieldName); + cmdArg.fieldName() != OperationSessionInfoFromClient::kSessionIdFieldName); uassert(ErrorCodes::IllegalOperation, "Command cannot contain transaction id", - cmdArg.fieldName() != OperationSessionInfo::kTxnNumberFieldName); + cmdArg.fieldName() != OperationSessionInfoFromClient::kTxnNumberFieldName); cmdObjWithLsidBuilder.append(cmdArg); } { BSONObjBuilder lsidBuilder( - cmdObjWithLsidBuilder.subobjStart(OperationSessionInfo::kSessionIdFieldName)); + cmdObjWithLsidBuilder.subobjStart(OperationSessionInfoFromClient::kSessionIdFieldName)); lsid->serialize(&lsidBuilder); lsidBuilder.doneFast(); } if (txnNumber) { - cmdObjWithLsidBuilder.append(OperationSessionInfo::kTxnNumberFieldName, *txnNumber); + cmdObjWithLsidBuilder.append(OperationSessionInfoFromClient::kTxnNumberFieldName, + *txnNumber); } if (options & kMultiStatementTransactionOption) { @@ -179,7 +205,7 @@ void abortTransaction(DBClientBase* conn, BSONObj abortTransactionCmd = BSON("abortTransaction" << 1); BSONObj abortCommandResult; const bool successful = runCommandWithSession(conn, - DatabaseName(boost::none, "admin"), + DatabaseName::kAdmin, abortTransactionCmd, kMultiStatementTransactionOption, lsid, @@ -209,8 +235,7 @@ int runQueryWithReadCommands(DBClientBase* conn, Milliseconds delayBeforeGetMore, BSONObj readPrefObj, BSONObj* objOut) { - const auto dbName = - findCommand->getNamespaceOrUUID().nss().value_or(NamespaceString()).dbName(); + const auto dbName = findCommand->getNamespaceOrUUID().dbName(); BSONObj findCommandResult; BSONObj findCommandObj = findCommand->toBSON(readPrefObj); @@ -243,9 +268,10 @@ int runQueryWithReadCommands(DBClientBase* conn, while (cursorResponse.getCursorId() != 0) { sleepFor(delayBeforeGetMore); + invariant(findCommand->getNamespaceOrUUID().isNamespaceString()); GetMoreCommandRequest getMoreRequest( cursorResponse.getCursorId(), - findCommand->getNamespaceOrUUID().nss().value_or(NamespaceString()).coll().toString()); + findCommand->getNamespaceOrUUID().nss().coll().toString()); getMoreRequest.setBatchSize(findCommand->getBatchSize()); BSONObj getMoreCommandResult; uassert(ErrorCodes::CommandFailed, @@ -840,7 +866,7 @@ void BenchRunState::tellWorkersToCollectStats() { void BenchRunState::assertFinished() const { stdx::lock_guard lk(_mutex); - verify(0 == _numUnstartedWorkers + _numActiveWorkers); + MONGO_verify(0 == _numUnstartedWorkers + _numActiveWorkers); } bool BenchRunState::shouldWorkerFinish() const { @@ -853,7 +879,7 @@ bool BenchRunState::shouldWorkerCollectStats() const { void BenchRunState::onWorkerStarted() { stdx::lock_guard lk(_mutex); - verify(_numUnstartedWorkers > 0); + MONGO_verify(_numUnstartedWorkers > 0); --_numUnstartedWorkers; ++_numActiveWorkers; if (_numUnstartedWorkers == 0) { @@ -863,7 +889,7 @@ void BenchRunState::onWorkerStarted() { void BenchRunState::onWorkerFinished() { stdx::lock_guard lk(_mutex); - verify(_numActiveWorkers > 0); + MONGO_verify(_numActiveWorkers > 0); --_numActiveWorkers; if (_numActiveWorkers + _numUnstartedWorkers == 0) { _stateChangeCondition.notify_all(); @@ -907,7 +933,7 @@ bool BenchRunWorker::shouldCollectStats() const { * Executes the workload on a worker thread. This is the main routine for benchRunXXX() benchmarks. */ void BenchRunWorker::generateLoadOnConnection(DBClientBase* conn) { - verify(conn); + MONGO_verify(conn); long long count = 0; Timer timer; @@ -926,8 +952,7 @@ void BenchRunWorker::generateLoadOnConnection(DBClientBase* conn) { BSONObj result; uassert(40640, str::stream() << "Unable to create session due to error " << result, - conn->runCommand( - DatabaseName(boost::none, "admin"), BSON("startSession" << 1), result)); + conn->runCommand(DatabaseName::kAdmin, BSON("startSession" << 1), result)); lsid.emplace(LogicalSessionIdToClient::parse(IDLParserContext("lsid"), result["id"].Obj())); } @@ -1085,7 +1110,7 @@ void BenchRunOp::executeOnce(DBClientBase* conn, "namespace"_attr = this->ns, "expected"_attr = this->expectedDoc, "got"_attr = result); - verify(false); + MONGO_verify(false); } } break; case OpType::COMMAND: { @@ -1093,12 +1118,13 @@ void BenchRunOp::executeOnce(DBClientBase* conn, BSONObj result; { BenchRunEventTrace _bret(&state->stats->commandCounter); - ok = runCommandWithSession(conn, - DatabaseName(this->tenantId, this->ns), - fixQuery(this->command, *state->bsonTemplateEvaluator), - this->options, - lsid, - &result); + ok = runCommandWithSession( + conn, + DatabaseName::createDatabaseName_forTest(this->tenantId, this->ns), + fixQuery(this->command, *state->bsonTemplateEvaluator), + this->options, + lsid, + &result); } if (!ok) { ++state->stats->errCount; @@ -1115,12 +1141,13 @@ void BenchRunOp::executeOnce(DBClientBase* conn, uassert(ErrorCodes::CommandFailed, str::stream() << "getMore command failed; reply was: " << getMoreCommandResult, - runCommandWithSession(conn, - DatabaseName(this->tenantId, this->ns), - getMoreRequest.toBSON({}), - kNoOptions, - lsid, - &getMoreCommandResult)); + runCommandWithSession( + conn, + DatabaseName::createDatabaseName_forTest(this->tenantId, this->ns), + getMoreRequest.toBSON({}), + kNoOptions, + lsid, + &getMoreCommandResult)); cursorResponse = uassertStatusOK(CursorResponse::parseFromBSON(getMoreCommandResult)); count += cursorResponse.getBatch().size(); @@ -1197,7 +1224,7 @@ void BenchRunOp::executeOnce(DBClientBase* conn, "namespace"_attr = this->ns, "expected"_attr = this->expected, "got"_attr = count); - verify(false); + MONGO_verify(false); } LOGV2_DEBUG(22798, 5, "Result from benchRun thread [query]", "count"_attr = count); } break; @@ -1258,7 +1285,8 @@ void BenchRunOp::executeOnce(DBClientBase* conn, txnNumberForOp = state->txnNumber; } runCommandWithSession(conn, - DatabaseName(this->tenantId, nsToDatabaseSubstring(this->ns)), + DatabaseName::createDatabaseName_forTest( + this->tenantId, nsToDatabaseSubstring(this->ns)), builder.done(), kNoOptions, lsid, @@ -1296,7 +1324,8 @@ void BenchRunOp::executeOnce(DBClientBase* conn, txnNumberForOp = state->txnNumber; } runCommandWithSession(conn, - DatabaseName(this->tenantId, nsToDatabaseSubstring(this->ns)), + DatabaseName::createDatabaseName_forTest( + this->tenantId, nsToDatabaseSubstring(this->ns)), builder.done(), kNoOptions, lsid, @@ -1326,7 +1355,8 @@ void BenchRunOp::executeOnce(DBClientBase* conn, txnNumberForOp = state->txnNumber; } runCommandWithSession(conn, - DatabaseName(this->tenantId, nsToDatabaseSubstring(this->ns)), + DatabaseName::createDatabaseName_forTest( + this->tenantId, nsToDatabaseSubstring(this->ns)), builder.done(), kNoOptions, lsid, @@ -1574,7 +1604,7 @@ BSONObj BenchRunner::benchRunSync(const BSONObj& argsFake, void* data) { * give each worker all the entries to be executed round-robin until the 'seconds' timer expires. */ BSONObj BenchRunner::benchRunOnce(const BSONObj& argsFake, void* data) { - verify(argsFake.firstElement().isABSONObj()); + MONGO_verify(argsFake.firstElement().isABSONObj()); BSONObj args = argsFake.firstElement().Obj(); // Add a config field to indicate this variant. @@ -1590,7 +1620,7 @@ BSONObj BenchRunner::benchRunOnce(const BSONObj& argsFake, void* data) { * benchRun( { ops : [] , host : XXX , db : XXXX , parallel : 5 , seconds : 5 } */ BSONObj BenchRunner::benchStart(const BSONObj& argsFake, void* data) { - verify(argsFake.firstElement().isABSONObj()); + MONGO_verify(argsFake.firstElement().isABSONObj()); BSONObj args = argsFake.firstElement().Obj(); // Get new BenchRunner object diff --git a/src/mongo/shell/bench.h b/src/mongo/shell/bench.h index 36f87704ff2d7..5499b2d751db8 100644 --- a/src/mongo/shell/bench.h +++ b/src/mongo/shell/bench.h @@ -29,17 +29,36 @@ #pragma once +#include #include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/oid.h" #include "mongo/client/dbclient_base.h" #include "mongo/db/jsobj.h" #include "mongo/db/ops/write_ops.h" +#include "mongo/db/ops/write_ops_parsers.h" #include "mongo/db/session/logical_session_id.h" +#include "mongo/db/session/logical_session_id_gen.h" +#include "mongo/db/tenant_id.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/platform/random.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/thread.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/pcre.h" #include "mongo/util/timer.h" diff --git a/src/mongo/shell/collection.js b/src/mongo/shell/collection.js index 8956b858bccda..cfa9f6c78227e 100644 --- a/src/mongo/shell/collection.js +++ b/src/mongo/shell/collection.js @@ -657,7 +657,7 @@ DBCollection.prototype.dropIndexes = function(indexNames) { DBCollection.prototype.drop = function(options = {}) { const cmdObj = Object.assign({drop: this.getName()}, options); - ret = this._db.runCommand(cmdObj); + const ret = this._db.runCommand(cmdObj); if (!ret.ok) { if (ret.errmsg == "ns not found") return false; @@ -1450,6 +1450,10 @@ DBCollection.prototype.distinct = function(keyString, query, options) { cmd.collation = opts.collation; } + if (opts.hint) { + cmd.hint = opts.hint; + } + // Execute distinct command var res = this.runReadCommand(cmd); if (!res.ok) { diff --git a/src/mongo/shell/data_consistency_checker.js b/src/mongo/shell/data_consistency_checker.js index 96e28902e9cf9..319343f0117e1 100644 --- a/src/mongo/shell/data_consistency_checker.js +++ b/src/mongo/shell/data_consistency_checker.js @@ -122,6 +122,35 @@ var {DataConsistencyChecker} = (function() { } class DataConsistencyChecker { + /** + * This function serves as a wrapper for the various comparison functions we use in the data + * consistency checker. In particular, through + * 'TestData.ignoreFieldOrderForDataConsistency', it becomes possible to ignore field + * ordering when comparing documents (i.e. the documents {a: 1, b: 2} and {b: 2, a: 1} will + * be equal when field ordering is ignored). This is useful for versions of MongoDB that + * don't support field ordering like 4.2. + * @param a The first document + * @param b The second document + * @param checkType Whether to ignore differences in types when comparing documents. For + * example, NumberLong(1) and NumberInt(1) are equal when types are ignored. + * @returns a boolean when checkType is true and an integer otherwise. + */ + static bsonCompareFunction(a, b, checkType = true) { + if (TestData && TestData.ignoreFieldOrderForDataConsistency) { + // When the bsonCompareFunction is invoked with checkType, a boolean return value is + // expected. For that reason we compare the unordered compare result with 0. + if (checkType) { + return bsonUnorderedFieldsCompare(a, b) == 0; + } + return bsonUnorderedFieldsCompare(a, b); + } + + if (checkType) { + return bsonBinaryEqual(a, b); + } + return bsonWoCompare(a, b); + } + static getDiff(cursor1, cursor2) { const docsWithDifferentContents = []; const docsMissingOnFirst = []; @@ -134,7 +163,7 @@ var {DataConsistencyChecker} = (function() { const doc1 = cursor1.peekNext(); const doc2 = cursor2.peekNext(); - if (bsonBinaryEqual(doc1, doc2)) { + if (this.bsonCompareFunction(doc1, doc2)) { // The same document was found from both cursor1 and cursor2 so we just move // on to the next document for both cursors. cursor1.next(); @@ -142,7 +171,8 @@ var {DataConsistencyChecker} = (function() { continue; } - const ordering = bsonWoCompare({_: doc1._id}, {_: doc2._id}); + const ordering = + this.bsonCompareFunction({_: doc1._id}, {_: doc2._id}, false /* checkType */); if (ordering === 0) { // The documents have the same _id but have different contents. docsWithDifferentContents.push({first: doc1, second: doc2}); @@ -195,7 +225,8 @@ var {DataConsistencyChecker} = (function() { if (!map1.hasOwnProperty(spec.name)) { indexesMissingOnFirst.push(spec); } else { - const ordering = bsonWoCompare(map1[spec.name], spec); + const ordering = + this.bsonCompareFunction(map1[spec.name], spec, false /* checkType */); if (ordering != 0) { indexesWithDifferentSpecs.push({first: map1[spec.name], second: spec}); } @@ -253,6 +284,12 @@ var {DataConsistencyChecker} = (function() { } static canIgnoreCollectionDiff(sourceCollInfos, syncingCollInfos, collName) { + if (collName === "system.preimages") { + print(`Ignoring hash inconsistencies for 'system.preimages' as those can be ` + + `expected with independent truncates. Content is checked separately by ` + + `ReplSetTest.checkPreImageCollection`); + return true; + } if (collName !== "image_collection") { return false; } @@ -407,15 +444,18 @@ var {DataConsistencyChecker} = (function() { // Although rare, the 'config.image_collection' table can be inconsistent after // an initial sync or after a restart (see SERVER-60048). Dump the collection // diff anyways for more visibility as a sanity check. + // + // 'config.system.preimages' can potentially be inconsistent via hashes, + // there's a special process that verifies them with + // ReplSetTest.checkPreImageCollection so it is safe to ignore failures here. this.dumpCollectionDiff( collectionPrinted, sourceCollInfos, syncingCollInfos, coll.name); const shouldIgnoreFailure = this.canIgnoreCollectionDiff(sourceCollInfos, syncingCollInfos, coll.name); if (shouldIgnoreFailure) { - prettyPrint( - `Collection diff in ${dbName}.${coll.name} can be ignored: ${dbHashesMsg} - . Inconsistencies in the image collection can be expected in certain - restart scenarios.`); + prettyPrint(`Collection diff in ${dbName}.${coll.name} can be ignored: ` + + `${dbHashesMsg}. Inconsistencies in the collection can be ` + + `expected in certain scenarios.`); } success = shouldIgnoreFailure && success; didIgnoreFailure = shouldIgnoreFailure || didIgnoreFailure; @@ -445,7 +485,7 @@ var {DataConsistencyChecker} = (function() { delete syncingInfo.idIndex.ns; } - if (!bsonBinaryEqual(syncingInfo, sourceInfo)) { + if (!this.bsonCompareFunction(syncingInfo, sourceInfo)) { prettyPrint( `the two nodes have different attributes for the collection or view ${ dbName}.${syncingInfo.name}`); @@ -593,10 +633,11 @@ var {DataConsistencyChecker} = (function() { dbHashesMsg}`); if (didIgnoreFailure) { // We only expect database hash mismatches on the config db, where - // config.image_collection is expected to have inconsistencies in certain - // scenarios. - prettyPrint(`Ignoring hash mismatch for the ${dbName} database since - inconsistencies in 'config.image_collection' can be expected`); + // config.image_collection and config.system.preimages are expected to have + // inconsistencies in certain scenarios. + prettyPrint(`Ignoring hash mismatch for the ${dbName} database since ` + + `inconsistencies in 'config.image_collection' or ` + + `'config.system.preimages' can be expected`); return success; } success = false; diff --git a/src/mongo/shell/encrypted_dbclient_base.cpp b/src/mongo/shell/encrypted_dbclient_base.cpp index 62ce4d06a2e8d..b065ce35a201e 100644 --- a/src/mongo/shell/encrypted_dbclient_base.cpp +++ b/src/mongo/shell/encrypted_dbclient_base.cpp @@ -27,41 +27,81 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/shell/encrypted_dbclient_base.h" - +#include +#include +#include +#include +#include +#include +#include +#include #include #include - -#include "mongo/base/data_cursor.h" +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/data_range_cursor.h" #include "mongo/base/data_type_validated.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" #include "mongo/bson/bson_depth.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/util/builder.h" #include "mongo/client/dbclient_base.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/crypto/aead_encryption.h" #include "mongo/crypto/fle_crypto.h" #include "mongo/crypto/fle_data_frames.h" #include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/crypto/symmetric_crypto.h" -#include "mongo/db/client.h" -#include "mongo/db/commands.h" #include "mongo/db/matcher/schema/encrypt_schema_gen.h" #include "mongo/db/namespace_string.h" -#include "mongo/rpc/object_check.h" +#include "mongo/db/repl/read_concern_args.h" +#include "mongo/db/repl/read_concern_level.h" +#include "mongo/db/tenant_id.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep #include "mongo/rpc/op_msg_rpc_impls.h" +#include "mongo/rpc/reply_interface.h" #include "mongo/scripting/mozjs/bindata.h" +#include "mongo/scripting/mozjs/code.h" +#include "mongo/scripting/mozjs/db.h" +#include "mongo/scripting/mozjs/dbcollection.h" +#include "mongo/scripting/mozjs/dbref.h" #include "mongo/scripting/mozjs/implscope.h" +#include "mongo/scripting/mozjs/internedstring.h" #include "mongo/scripting/mozjs/maxkey.h" #include "mongo/scripting/mozjs/minkey.h" #include "mongo/scripting/mozjs/mongo.h" +#include "mongo/scripting/mozjs/numberdecimal.h" #include "mongo/scripting/mozjs/objectwrapper.h" #include "mongo/scripting/mozjs/valuereader.h" #include "mongo/scripting/mozjs/valuewriter.h" +#include "mongo/scripting/mozjs/wraptype.h" +#include "mongo/shell/encrypted_dbclient_base.h" #include "mongo/shell/kms.h" #include "mongo/shell/kms_gen.h" -#include "mongo/shell/shell_options.h" +#include "mongo/stdx/variant.h" #include "mongo/util/assert_util.h" +#include "mongo/util/base64.h" +#include "mongo/util/database_name_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/str.h" namespace mongo { @@ -91,7 +131,7 @@ static void validateCollection(JSContext* cx, JS::HandleValue value) { mozjs::getScope(cx)->getProto().instanceOf(coll)); } -EncryptedDBClientBase::EncryptedDBClientBase(std::unique_ptr conn, +EncryptedDBClientBase::EncryptedDBClientBase(std::shared_ptr conn, ClientSideFLEOptions encryptionOptions, JS::HandleValue collection, JSContext* cx) @@ -167,7 +207,7 @@ BSONObj EncryptedDBClientBase::encryptDecryptCommand(const BSONObj& object, } invariant(frameStack.size() == 1); // Append '$db' which shouldn't contain tenantid. - frameStack.top().second.append("$db", dbName.toString()); + frameStack.top().second.append("$db", dbName.toString_forTest()); // If encrypt request, append '$tenant' which contains tenantid. if (encrypt && dbName.tenantId() && !object.hasField("$tenant")) { dbName.tenantId()->serializeToBSON("$tenant", &frameStack.top().second); @@ -240,8 +280,9 @@ EncryptedDBClientBase::RunCommandReturn EncryptedDBClientBase::handleEncryptionR auto& request = params.request; auto commandName = request.getCommandName().toString(); const DatabaseName dbName = request.body.hasField("$tenant") - ? DatabaseName(TenantId(request.body["$tenant"].OID()), request.getDatabase()) - : DatabaseName(boost::none, request.getDatabase()); + ? DatabaseNameUtil::deserialize(TenantId(request.body["$tenant"].OID()), + request.getDatabase()) + : DatabaseName::createDatabaseName_forTest(boost::none, request.getDatabase()); if (std::find(kEncryptedCommands.begin(), kEncryptedCommands.end(), StringData(commandName)) == std::end(kEncryptedCommands)) { @@ -548,7 +589,7 @@ boost::optional EncryptedDBClientBase::getEncryptedFieldCo const NamespaceString& nss) { auto collsList = _conn->getCollectionInfos(nss.dbName(), BSON("name" << nss.coll())); uassert(ErrorCodes::BadValue, - str::stream() << "Namespace not found: " << nss.toString(), + str::stream() << "Namespace not found: " << nss.toStringForErrorMsg(), !collsList.empty()); auto info = collsList.front(); auto opts = info.getField("options"); @@ -573,7 +614,7 @@ NamespaceString validateStructuredEncryptionParams(JSContext* cx, str::stream() << "1st param to " << cmdName << " has to be a string"); } std::string fullName = mozjs::ValueWriter(cx, args.get(0)).toString(); - NamespaceString nss(fullName); + NamespaceString nss = NamespaceString::createNamespaceString_forTest(fullName); uassert( ErrorCodes::BadValue, str::stream() << "Invalid namespace: " << fullName, nss.isValid()); @@ -607,19 +648,38 @@ void EncryptedDBClientBase::cleanup(JSContext* cx, JS::CallArgs args) { builder.append("cleanupTokens", efc ? FLEClientCrypto::generateCompactionTokens(*efc, this) : BSONObj()); - // TODO SERVER-72937: Add call to cleanup function - mozjs::ValueReader(cx, args.rval()).fromBSON(BSONObj(), nullptr, false); - return; + BSONObj reply; + runCommand(nss.dbName(), builder.obj(), reply, 0); + reply = reply.getOwned(); + mozjs::ValueReader(cx, args.rval()).fromBSON(reply, nullptr, false); } void EncryptedDBClientBase::trace(JSTracer* trc) { JS::TraceEdge(trc, &_collection, "collection object"); } +void EncryptedDBClientBase::getEncryptionOptions(JSContext* cx, JS::CallArgs args) { + mozjs::ValueReader(cx, args.rval()).fromBSON(_encryptionOptions.toBSON(), nullptr, false); +} + +const ClientSideFLEOptions& EncryptedDBClientBase::getEncryptionOptions() const { + return _encryptionOptions; +} + JS::Value EncryptedDBClientBase::getCollection() const { return _collection.get(); } +JS::Value EncryptedDBClientBase::getKeyVaultMongo() const { + JS::RootedValue mongoRooted(_cx); + JS::RootedObject collectionRooted(_cx, &_collection.get().toObject()); + JS_GetProperty(_cx, collectionRooted, "_mongo", &mongoRooted); + if (!mongoRooted.isObject()) { + uasserted(ErrorCodes::BadValue, "Collection object is incomplete."); + } + return mongoRooted.get(); +} + std::unique_ptr EncryptedDBClientBase::find(FindCommandRequest findRequest, const ReadPreferenceSetting& readPref, ExhaustMode exhaustMode) { @@ -678,7 +738,7 @@ NamespaceString EncryptedDBClientBase::getCollectionNS() { uasserted(ErrorCodes::BadValue, "Collection object is incomplete."); } std::string fullName = mozjs::ValueWriter(_cx, fullNameRooted).toString(); - NamespaceString fullNameNS = NamespaceString(fullName); + NamespaceString fullNameNS = NamespaceString::createNamespaceString_forTest(fullName); uassert(ErrorCodes::BadValue, str::stream() << "Invalid namespace: " << fullName, fullNameNS.isValid()); @@ -731,6 +791,7 @@ BSONObj EncryptedDBClientBase::getEncryptedKey(const UUID& uuid) { findCmd.setFilter(BSON("_id" << uuid)); findCmd.setReadConcern( repl::ReadConcernArgs(repl::ReadConcernLevel::kMajorityReadConcern).toBSONInner()); + BSONObj dataKeyObj = _conn->findOne(std::move(findCmd)); if (dataKeyObj.isEmpty()) { uasserted(ErrorCodes::BadValue, "Invalid keyID."); @@ -818,7 +879,7 @@ void createCollectionObject(JSContext* cx, JS::MutableHandleValue collection) { invariant(!client.isNull() && !client.isUndefined()); - auto ns = NamespaceString(nsString); + auto ns = NamespaceString::createNamespaceString_forTest(nsString); uassert(ErrorCodes::BadValue, "Invalid keystore namespace.", ns.isValid() && NamespaceString::validCollectionName(ns.coll())); @@ -840,14 +901,14 @@ void createCollectionObject(JSContext* cx, collectionArgs[0].setObject(client.toObject()); collectionArgs[1].setObject(*databaseObj); mozjs::ValueReader(cx, collectionArgs[2]).fromStringData(ns.coll()); - mozjs::ValueReader(cx, collectionArgs[3]).fromStringData(ns.ns()); + mozjs::ValueReader(cx, collectionArgs[3]).fromStringData(ns.toString_forTest()); scope->getProto().newInstance(collectionArgs, collection); } // The parameters required to start FLE on the shell. The current connection is passed in as a // parameter to create the keyvault collection object if one is not provided. -std::unique_ptr createEncryptedDBClientBase(std::unique_ptr conn, +std::shared_ptr createEncryptedDBClientBase(std::shared_ptr conn, JS::HandleValue arg, JS::HandleObject mongoConnection, JSContext* cx) { @@ -858,7 +919,7 @@ std::unique_ptr createEncryptedDBClientBase(std::unique_ptr createEncryptedDBClientBase(std::unique_ptr base = - std::make_unique(std::move(conn), encryptionOptions, collection, cx); - return std::move(base); + return std::make_shared( + std::move(conn), encryptionOptions, collection, cx); +} + +std::shared_ptr createEncryptedDBClientBaseFromExisting( + std::shared_ptr encConn, std::shared_ptr rawConn, JSContext* cx) { + auto encConnPtr = dynamic_cast(encConn.get()); + uassert(ErrorCodes::BadValue, "Connection is not a valid encrypted connection", encConnPtr); + + JS::RootedValue encOptsRV(cx); + JS::RootedObject kvMongoRO(cx); + mozjs::ValueReader(cx, &encOptsRV) + .fromBSON(encConnPtr->getEncryptionOptions().toBSON(), nullptr, false); + kvMongoRO.set(encConnPtr->getKeyVaultMongo().toObjectOrNull()); + + return createEncryptedDBClientBase(rawConn, encOptsRV, kvMongoRO, cx); } DBClientBase* getNestedConnection(DBClientBase* conn) { @@ -910,7 +984,8 @@ DBClientBase* getNestedConnection(DBClientBase* conn) { } MONGO_INITIALIZER(setCallbacksForEncryptedDBClientBase)(InitializerContext*) { - mongo::mozjs::setEncryptedDBClientCallbacks(createEncryptedDBClientBase, getNestedConnection); + mongo::mozjs::setEncryptedDBClientCallbacks( + createEncryptedDBClientBase, createEncryptedDBClientBaseFromExisting, getNestedConnection); } } // namespace diff --git a/src/mongo/shell/encrypted_dbclient_base.h b/src/mongo/shell/encrypted_dbclient_base.h index 0070d5c2e4354..b74f24184aaff 100644 --- a/src/mongo/shell/encrypted_dbclient_base.h +++ b/src/mongo/shell/encrypted_dbclient_base.h @@ -27,22 +27,56 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include "mongo/base/data_cursor.h" +#include "mongo/base/data_range.h" #include "mongo/base/data_type_validated.h" +#include "mongo/base/secure_allocator.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_depth.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/connection_string.h" #include "mongo/client/dbclient_base.h" -#include "mongo/config.h" +#include "mongo/client/dbclient_cursor.h" +#include "mongo/client/read_preference.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/crypto/aead_encryption.h" +#include "mongo/crypto/encryption_fields_gen.h" #include "mongo/crypto/fle_crypto.h" +#include "mongo/crypto/fle_crypto_types.h" +#include "mongo/crypto/fle_data_frames.h" +#include "mongo/crypto/fle_field_schema_gen.h" #include "mongo/crypto/symmetric_crypto.h" +#include "mongo/crypto/symmetric_key.h" #include "mongo/db/client.h" #include "mongo/db/commands.h" +#include "mongo/db/database_name.h" #include "mongo/db/matcher/schema/encrypt_schema_gen.h" #include "mongo/db/namespace_string.h" -#include "mongo/rpc/object_check.h" +#include "mongo/db/query/find_command.h" +#include "mongo/platform/basic.h" +#include "mongo/rpc/message.h" +#include "mongo/rpc/object_check.h" // IWYU pragma: keep +#include "mongo/rpc/op_msg.h" #include "mongo/rpc/op_msg_rpc_impls.h" +#include "mongo/rpc/unique_message.h" #include "mongo/scripting/mozjs/bindata.h" #include "mongo/scripting/mozjs/implscope.h" #include "mongo/scripting/mozjs/maxkey.h" @@ -55,6 +89,9 @@ #include "mongo/shell/kms_gen.h" #include "mongo/shell/shell_options.h" #include "mongo/util/lru_cache.h" +#include "mongo/util/net/ssl_types.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" namespace mongo { @@ -66,7 +103,7 @@ constexpr uint8_t kRandomEncryptionBit = 0x02; static constexpr auto kExplain = "explain"_sd; -constexpr std::array kEncryptedCommands = {"aggregate"_sd, +constexpr std::array kEncryptedCommands = {"aggregate"_sd, "count"_sd, "delete"_sd, "distinct"_sd, @@ -79,7 +116,8 @@ constexpr std::array kEncryptedCommands = {"aggregate"_sd, "update"_sd, "create"_sd, "createIndexes"_sd, - "collMod"_sd}; + "collMod"_sd, + "bulkWrite"_sd}; class EncryptedDBClientBase : public DBClientBase, public mozjs::EncryptionCallbacks, @@ -87,7 +125,7 @@ class EncryptedDBClientBase : public DBClientBase, public: using DBClientBase::find; - EncryptedDBClientBase(std::unique_ptr conn, + EncryptedDBClientBase(std::shared_ptr conn, ClientSideFLEOptions encryptionOptions, JS::HandleValue collection, JSContext* cx); @@ -130,6 +168,11 @@ class EncryptedDBClientBase : public DBClientBase, using EncryptionCallbacks::trace; void trace(JSTracer* trc) final; + using EncryptionCallbacks::getEncryptionOptions; + void getEncryptionOptions(JSContext* cx, JS::CallArgs args) final; + + const ClientSideFLEOptions& getEncryptionOptions() const; + std::unique_ptr find(FindCommandRequest findRequest, const ReadPreferenceSetting& readPref, ExhaustMode exhaustMode) final; @@ -148,6 +191,8 @@ class EncryptedDBClientBase : public DBClientBase, DBClientBase* getRawConnection(); + JS::Value getKeyVaultMongo() const; + #ifdef MONGO_CONFIG_SSL const SSLConfiguration* getSSLConfiguration() override; @@ -246,7 +291,7 @@ class EncryptedDBClientBase : public DBClientBase, boost::optional getEncryptedFieldConfig(const NamespaceString& nss); protected: - std::unique_ptr _conn; + std::shared_ptr _conn; ClientSideFLEOptions _encryptionOptions; private: @@ -258,7 +303,7 @@ class EncryptedDBClientBase : public DBClientBase, }; using ImplicitEncryptedDBClientCallback = - std::unique_ptr(std::unique_ptr conn, + std::shared_ptr(std::shared_ptr conn, ClientSideFLEOptions encryptionOptions, JS::HandleValue collection, JSContext* cx); diff --git a/src/mongo/shell/explainable.js b/src/mongo/shell/explainable.js index 1a76ea715dff3..352dd5715da70 100644 --- a/src/mongo/shell/explainable.js +++ b/src/mongo/shell/explainable.js @@ -162,6 +162,10 @@ var Explainable = (function() { distinctCmd.maxTimeMS = options.maxTimeMS; } + if (options && options.hasOwnProperty("hint")) { + distinctCmd.hint = options.hint; + } + var explainCmd = buildExplainCmd(distinctCmd, this._verbosity); var explainResult = this._collection.runReadCommand(explainCmd); return throwOrReturn(explainResult); diff --git a/src/mongo/shell/kms.cpp b/src/mongo/shell/kms.cpp index f379c5f43e21c..755b2f7e4f995 100644 --- a/src/mongo/shell/kms.cpp +++ b/src/mongo/shell/kms.cpp @@ -29,10 +29,16 @@ #include "kms.h" -#include "mongo/platform/random.h" +#include + +#include +#include + +#include "mongo/idl/idl_parser.h" #include "mongo/shell/kms_gen.h" -#include "mongo/util/net/hostandport.h" -#include "mongo/util/text.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" +#include "mongo/util/text.h" // IWYU pragma: keep namespace mongo { diff --git a/src/mongo/shell/kms.h b/src/mongo/shell/kms.h index a67b82db2834f..02a8c17bdaf41 100644 --- a/src/mongo/shell/kms.h +++ b/src/mongo/shell/kms.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include #include diff --git a/src/mongo/shell/kms_local.cpp b/src/mongo/shell/kms_local.cpp index a6b5990f10804..26bf1e47727c9 100644 --- a/src/mongo/shell/kms_local.cpp +++ b/src/mongo/shell/kms_local.cpp @@ -27,18 +27,27 @@ * it in the license file. */ -#include - -#include "mongo/base/init.h" +#include +#include +#include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/secure_allocator.h" -#include "mongo/base/status_with.h" -#include "mongo/bson/json.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/crypto/aead_encryption.h" +#include "mongo/crypto/fle_data_frames.h" #include "mongo/crypto/symmetric_crypto.h" #include "mongo/crypto/symmetric_key.h" +#include "mongo/idl/idl_parser.h" #include "mongo/shell/kms.h" #include "mongo/shell/kms_gen.h" -#include "mongo/util/base64.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace { diff --git a/src/mongo/shell/kms_shell.cpp b/src/mongo/shell/kms_shell.cpp index 673fe7fb9212e..a88acf93926b8 100644 --- a/src/mongo/shell/kms_shell.cpp +++ b/src/mongo/shell/kms_shell.cpp @@ -27,7 +27,8 @@ * it in the license file. */ -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/scripting/engine.h" #include "mongo/shell/shell_utils.h" diff --git a/src/mongo/shell/kms_test.cpp b/src/mongo/shell/kms_test.cpp index f8e7d2f9f2b7e..2cd7613950324 100644 --- a/src/mongo/shell/kms_test.cpp +++ b/src/mongo/shell/kms_test.cpp @@ -27,14 +27,22 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include #include "kms.h" #include "mongo/base/data_range.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" -#include "mongo/unittest/unittest.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/bsontypes_util.h" +#include "mongo/idl/idl_parser.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/shell/linenoise.cpp b/src/mongo/shell/linenoise.cpp index 75f60e910c6ec..3a347f956964c 100644 --- a/src/mongo/shell/linenoise.cpp +++ b/src/mongo/shell/linenoise.cpp @@ -83,12 +83,11 @@ * */ -#include "mongo/platform/basic.h" - #ifdef _WIN32 #include #include + #define strcasecmp _stricmp #define strdup _strdup #define isatty _isatty @@ -101,27 +100,39 @@ #include #include #include -#include #include -#include #endif /* _WIN32 */ -#include "linenoise.h" -#include "linenoise_utf8.h" -#include "mk_wcwidth.h" +#ifdef __linux__ +#include +#include +#endif + #include +#include #include #include #include #include #include #include +#include #include +// IWYU pragma: no_include "ext/alloc_traits.h" #include "mongo/base/data_view.h" +#include "mongo/base/error_codes.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/shell/linenoise.h" +#include "mongo/shell/linenoise_utf8.h" +#include "mongo/shell/mk_wcwidth.h" #include "mongo/util/errno_util.h" +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif + using std::string; using std::vector; diff --git a/src/mongo/shell/linenoise_utf8.cpp b/src/mongo/shell/linenoise_utf8.cpp index 226999a371d56..46678da78d90c 100644 --- a/src/mongo/shell/linenoise_utf8.cpp +++ b/src/mongo/shell/linenoise_utf8.cpp @@ -30,12 +30,18 @@ #include "mongo/shell/linenoise_utf8.h" #ifdef _WIN32 -#include "mongo/platform/windows_basic.h" -#include "mongo/util/text.h" #include + +#include "mongo/platform/windows_basic.h" +#include "mongo/util/text.h" // IWYU pragma: keep #else +#include "mongo/config.h" // IWYU pragma: keep + +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) #include #endif +#endif + namespace linenoise_utf8 { diff --git a/src/mongo/shell/mongo.cpp b/src/mongo/shell/mongo.cpp index 2dac76a86d9cf..535ca06f08cc3 100644 --- a/src/mongo/shell/mongo.cpp +++ b/src/mongo/shell/mongo.cpp @@ -27,11 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/shell/mongo_main.h" #include "mongo/util/quick_exit.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep #ifdef _WIN32 int wmain(int argc, wchar_t* argvW[]) { diff --git a/src/mongo/shell/mongo_main.cpp b/src/mongo/shell/mongo_main.cpp index dbb170bdcd8da..0cfb708824db7 100644 --- a/src/mongo/shell/mongo_main.cpp +++ b/src/mongo/shell/mongo_main.cpp @@ -27,77 +27,113 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - -#include "mongo/shell/mongo_main.h" - +#include +#include #include #include #include -#include -#include #include #include +#include #include -#include +#include +#include +#include // IWYU pragma: keep #include - -#include "mongo/base/init.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +// IWYU pragma: no_include "boost/log/detail/attachable_sstream_buf.hpp" +// IWYU pragma: no_include "boost/log/detail/locking_ptr.hpp" +#include +#include +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" +#include + +#include "mongo/base/error_extra_info.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/initializer.h" #include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/client/authenticate.h" #include "mongo/client/mongo_uri.h" #include "mongo/client/sasl_aws_client_options.h" #include "mongo/client/sasl_oidc_client_params.h" -#include "mongo/config.h" -#include "mongo/db/auth/sasl_command_constants.h" -#include "mongo/db/client.h" -#include "mongo/db/concurrency/locker_noop_client_observer.h" -#include "mongo/db/log_process_details.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" #include "mongo/db/wire_version.h" #include "mongo/logv2/attributes.h" #include "mongo/logv2/component_settings_filter.h" #include "mongo/logv2/console.h" #include "mongo/logv2/json_formatter.h" +#include "mongo/logv2/log_attr.h" #include "mongo/logv2/log_domain_global.h" #include "mongo/logv2/log_manager.h" -#include "mongo/logv2/text_formatter.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/logv2/plain_formatter.h" #include "mongo/platform/atomic_word.h" +#include "mongo/platform/mutex.h" +#include "mongo/platform/process_id.h" #include "mongo/scripting/engine.h" #include "mongo/shell/linenoise.h" +#include "mongo/shell/mongo_main.h" #include "mongo/shell/program_runner.h" #include "mongo/shell/shell_options.h" #include "mongo/shell/shell_utils.h" #include "mongo/shell/shell_utils_launcher.h" #include "mongo/stdx/utility.h" #include "mongo/transport/asio/asio_transport_layer.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/util/assert_util.h" #include "mongo/util/ctype.h" +#include "mongo/util/duration.h" #include "mongo/util/errno_util.h" #include "mongo/util/exit.h" #include "mongo/util/exit_code.h" #include "mongo/util/file.h" #include "mongo/util/net/ocsp/ocsp_manager.h" -#include "mongo/util/net/ssl_options.h" #include "mongo/util/password.h" #include "mongo/util/pcre.h" #include "mongo/util/quick_exit.h" -#include "mongo/util/scopeguard.h" #include "mongo/util/signal_handlers.h" -#include "mongo/util/stacktrace.h" #include "mongo/util/str.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep +#include "mongo/util/time_support.h" #include "mongo/util/version.h" +#include "mongo/util/version/releases.h" #ifdef _WIN32 #include #include + #define isatty _isatty #define fileno _fileno #else +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) #include #endif +#endif #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -717,9 +753,8 @@ int mongo_main(int argc, char* argv[]) { mongo::shell_utils::RecordMyLocation(argv[0]); mongo::runGlobalInitializersOrDie(std::vector(argv, argv + argc)); - auto serviceContextHolder = ServiceContext::make(); - serviceContextHolder->registerClientObserver(std::make_unique()); - setGlobalServiceContext(std::move(serviceContextHolder)); + setGlobalServiceContext(ServiceContext::make()); + // TODO This should use a TransportLayerManager or TransportLayerFactory auto serviceContext = getGlobalServiceContext(); diff --git a/src/mongo/shell/mongodbcr.cpp b/src/mongo/shell/mongodbcr.cpp index e90320c527e91..95d6e5de0e28c 100644 --- a/src/mongo/shell/mongodbcr.cpp +++ b/src/mongo/shell/mongodbcr.cpp @@ -27,19 +27,33 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include -#include "mongo/client/authenticate.h" +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/client/authenticate.h" #include "mongo/db/auth/sasl_command_constants.h" #include "mongo/rpc/get_status_from_command_result.h" #include "mongo/rpc/op_msg.h" -#include "mongo/rpc/unique_message.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/md5.h" +#include "mongo/util/md5.hpp" #include "mongo/util/password_digest.h" namespace mongo { diff --git a/src/mongo/shell/named_pipe_test_helper.cpp b/src/mongo/shell/named_pipe_test_helper.cpp index 39421d16ed9b6..68550075f5fd7 100644 --- a/src/mongo/shell/named_pipe_test_helper.cpp +++ b/src/mongo/shell/named_pipe_test_helper.cpp @@ -29,24 +29,36 @@ #include "mongo/shell/named_pipe_test_helper.h" -#include +#include +#include +#include #include +#include #include +#include #include -#include "mongo/bson/bsonelement.h" +#include +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/catalog/virtual_collection_options.h" #include "mongo/db/pipeline/external_data_source_option_gen.h" +#include "mongo/db/storage/input_stream.h" #include "mongo/db/storage/multi_bson_stream_cursor.h" #include "mongo/db/storage/named_pipe.h" +#include "mongo/db/storage/record_data.h" #include "mongo/db/storage/record_store.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/random.h" -#include "mongo/stdx/chrono.h" #include "mongo/stdx/thread.h" +#include "mongo/util/assert_util.h" #include "mongo/util/static_immortal.h" #include "mongo/util/synchronized_value.h" diff --git a/src/mongo/shell/named_pipe_test_helper.h b/src/mongo/shell/named_pipe_test_helper.h index 95d61f7b7724b..11def23f2e058 100644 --- a/src/mongo/shell/named_pipe_test_helper.h +++ b/src/mongo/shell/named_pipe_test_helper.h @@ -30,6 +30,7 @@ #pragma once #include +#include #include "mongo/bson/bsonobj.h" diff --git a/src/mongo/shell/program_runner.cpp b/src/mongo/shell/program_runner.cpp index ef290a296d864..755cf2358e536 100644 --- a/src/mongo/shell/program_runner.cpp +++ b/src/mongo/shell/program_runner.cpp @@ -29,30 +29,65 @@ #include "mongo/shell/program_runner.h" +#include #include #include -#include #include -#include +#include +#include +#include +#include #include +#include +#include +#include +#include + +#include +#include +#include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include +// IWYU pragma: no_include "boost/iostreams/detail/error.hpp" +// IWYU pragma: no_include "boost/iostreams/detail/streambuf/indirect_streambuf.hpp" +#include +#include +#include #ifdef _WIN32 #include + #define SIGKILL 9 #else -#include -#include -#include -#include #include -#include #endif #include "mongo/base/environment_buffer.h" +#include "mongo/base/error_codes.h" #include "mongo/base/parse_number.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" +#include "mongo/logv2/log_tag.h" +#include "mongo/logv2/log_truncation.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/errno_util.h" -#include "mongo/util/text.h" +#include "mongo/util/exit_code.h" +#include "mongo/util/str.h" +#include "mongo/util/text.h" // IWYU pragma: keep + +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif + #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault diff --git a/src/mongo/shell/program_runner.h b/src/mongo/shell/program_runner.h index b1aafd784bec3..2d5e9ec068083 100644 --- a/src/mongo/shell/program_runner.h +++ b/src/mongo/shell/program_runner.h @@ -30,11 +30,19 @@ #pragma once #include +#include +#include +#include +#include +#include #include "mongo/bson/bsonobj.h" #include "mongo/db/service_context.h" +#include "mongo/platform/mutex.h" #include "mongo/platform/process_id.h" #include "mongo/stdx/thread.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/stdx/unordered_set.h" namespace mongo::shell_utils { class ProgramRunner; diff --git a/src/mongo/shell/replsettest.js b/src/mongo/shell/replsettest.js index 8ea55d6efb3e5..909acf9651e7c 100644 --- a/src/mongo/shell/replsettest.js +++ b/src/mongo/shell/replsettest.js @@ -722,8 +722,8 @@ var ReplSetTest = function(opts) { /** * Blocks until the secondary nodes have completed recovery and their roles are known. Blocks on - * all secondary nodes or just 'secondaries', if specified. Waits for all 'newlyAdded' fields to - * be removed by default. + * all secondary nodes or just 'secondaries', if specified. Does not wait for all 'newlyAdded' + * fields to be removed by default. */ this.awaitSecondaryNodes = function( timeout, secondaries, retryIntervalMS, waitForNewlyAddedRemoval) { @@ -1447,10 +1447,12 @@ var ReplSetTest = function(opts) { // set. If this is a config server, the FCV will be set as part of ShardingTest. // versions are supported with the useRandomBinVersionsWithinReplicaSet option. let setLastLTSFCV = (lastLTSBinVersionWasSpecifiedForSomeNode || - jsTest.options().useRandomBinVersionsWithinReplicaSet) && + jsTest.options().useRandomBinVersionsWithinReplicaSet == 'last-lts') && !self.isConfigServer; let setLastContinuousFCV = !setLastLTSFCV && - lastContinuousBinVersionWasSpecifiedForSomeNode && !self.isConfigServer; + (lastContinuousBinVersionWasSpecifiedForSomeNode || + jsTest.options().useRandomBinVersionsWithinReplicaSet == 'last-continuous') && + !self.isConfigServer; if ((setLastLTSFCV || setLastContinuousFCV) && jsTest.options().replSetFeatureCompatibilityVersion) { @@ -1465,15 +1467,23 @@ var ReplSetTest = function(opts) { // Authenticate before running the command. asCluster(self.nodes, function setFCV() { let fcv = setLastLTSFCV ? lastLTSFCV : lastContinuousFCV; + print("Setting feature compatibility version for replica set to '" + fcv + "'"); - const res = self.getPrimary().adminCommand({setFeatureCompatibilityVersion: fcv}); + // When latest is not equal to last-continuous, the transition to last-continuous is + // not allowed. Setting fromConfigServer allows us to bypass this restriction and + // test last-continuous. + const res = self.getPrimary().adminCommand( + {setFeatureCompatibilityVersion: fcv, fromConfigServer: true}); // TODO (SERVER-74398): Remove the retry with 'confirm: true' once 7.0 is last LTS. if (!res.ok && res.code === 7369100) { // We failed due to requiring 'confirm: true' on the command. This will only // occur on 7.0+ nodes that have 'enableTestCommands' set to false. Retry the // setFCV command with 'confirm: true'. - assert.commandWorked(self.getPrimary().adminCommand( - {setFeatureCompatibilityVersion: fcv, confirm: true})); + assert.commandWorked(self.getPrimary().adminCommand({ + setFeatureCompatibilityVersion: fcv, + confirm: true, + fromConfigServer: true + })); } else { assert.commandWorked(res); } @@ -1733,6 +1743,18 @@ var ReplSetTest = function(opts) { } } + // Wait for the query analysis writer to finish setting up to avoid background writes + // after initiation is done. + if (!doNotWaitForReplication) { + primary = self.getPrimary(); + // TODO(SERVER-57924): cleanup asCluster() to avoid checking here. + if (self._notX509Auth(primary) || primary.isTLS()) { + asCluster(self.nodes, function() { + self.waitForQueryAnalysisWriterSetup(primary); + }); + } + } + // Turn off the failpoints now that initial sync and initial setup is complete. if (failPointsSupported) { this.nodes.forEach(function(conn) { @@ -1767,6 +1789,16 @@ var ReplSetTest = function(opts) { } }); + // Wait for the query analysis writer to finish setting up to avoid background writes + // after initiation is done. + asCluster(this.nodes, function() { + const newPrimary = self.nodes[0]; + self.stepUp(newPrimary); + if (!doNotWaitForPrimaryOnlyServices) { + self.waitForQueryAnalysisWriterSetup(newPrimary); + } + }); + print("ReplSetTest initiateWithNodeZeroAsPrimary took " + (new Date() - startTime) + "ms for " + this.nodes.length + " nodes."); }; @@ -1868,6 +1900,41 @@ var ReplSetTest = function(opts) { }, "Timed out waiting for primary only services to finish rebuilding"); }; + /** + * If query sampling is supported, waits for the query analysis writer to finish setting up + * after a primary is elected. This is useful for tests that expect particular write timestamps + * since the query analysis writer setup involves building indexes for the config.sampledQueries + * and config.sampledQueriesDiff collections. + */ + this.waitForQueryAnalysisWriterSetup = function(primary) { + primary = primary || self.getPrimary(); + + const serverStatusRes = assert.commandWorked(primary.adminCommand({serverStatus: 1})); + if (!serverStatusRes.hasOwnProperty("queryAnalyzers")) { + // Query sampling is not supported on this replica set. That is, either it uses binaries + // released before query sampling was introduced or it uses binaries where query + // sampling is guarded by a feature flag and the feature flag is not enabled. + return; + } + + const getParamsRes = primary.adminCommand({getParameter: 1, multitenancySupport: 1}); + if (!getParamsRes.ok || getParamsRes["multitenancySupport"]) { + // Query sampling is not supported on a multi-tenant replica set. + return; + } + + jsTest.log("Waiting for query analysis writer to finish setting up"); + + assert.soonNoExcept(function() { + const sampledQueriesIndexes = + primary.getCollection("config.sampledQueries").getIndexes(); + const sampledQueriesDiffIndexes = + primary.getCollection("config.sampledQueriesDiff").getIndexes(); + // There should be two indexes: _id index and TTL index. + return sampledQueriesIndexes.length == 2 && sampledQueriesDiffIndexes.length == 2; + }, "Timed out waiting for query analysis writer to finish setting up"); + }; + /** * Gets the current replica set config from the specified node index. If no nodeId is specified, * uses the primary node. @@ -2548,82 +2615,76 @@ var ReplSetTest = function(opts) { this.checkReplicaSet(checkOplogs, liveSecondaries, this, liveSecondaries, msgPrefix); }; - /** - * Check oplogs on all nodes, by reading from the last time. Since the oplog is a capped - * collection, each node may not contain the same number of entries and stop if the cursor - * is exhausted on any node being checked. - */ - function checkOplogs(rst, secondaries, msgPrefix = 'checkOplogs') { - secondaries = secondaries || rst._secondaries; - const kCappedPositionLostSentinel = Object.create(null); - const OplogReader = function(mongo) { - this._safelyPerformCursorOperation = function(name, operation, onCappedPositionLost) { - if (!this.cursor) { - throw new Error("OplogReader is not open!"); - } + const ReverseReader = function(mongo, coll, query) { + this.kCappedPositionLostSentinel = Object.create(null); - if (this._cursorExhausted) { - return onCappedPositionLost; - } + this._safelyPerformCursorOperation = function(name, operation, onCappedPositionLost) { + if (!this.cursor) { + throw new Error("ReverseReader is not open!"); + } - try { - return operation(this.cursor); - } catch (err) { - print("Error: " + name + " threw '" + err.message + "' on " + this.mongo.host); - // Occasionally, the capped collection will get truncated while we are iterating - // over it. Since we are iterating over the collection in reverse, getting a - // truncated item means we've reached the end of the list, so return false. - if (err.code === ErrorCodes.CappedPositionLost) { - this.cursor.close(); - this._cursorExhausted = true; - return onCappedPositionLost; - } + if (this._cursorExhausted) { + return onCappedPositionLost; + } - throw err; + try { + return operation(this.cursor); + } catch (err) { + print("Error: " + name + " threw '" + err.message + "' on " + this.mongo.host); + // Occasionally, the capped collection will get truncated while we are iterating + // over it. Since we are iterating over the collection in reverse, getting a + // truncated item means we've reached the end of the list, so return false. + if (err.code === ErrorCodes.CappedPositionLost) { + this.cursor.close(); + this._cursorExhausted = true; + return onCappedPositionLost; } - }; - - this.next = function() { - return this._safelyPerformCursorOperation('next', function(cursor) { - return cursor.next(); - }, kCappedPositionLostSentinel); - }; - this.hasNext = function() { - return this._safelyPerformCursorOperation('hasNext', function(cursor) { - return cursor.hasNext(); - }, false); - }; + throw err; + } + }; - this.query = function(ts) { - const coll = this.getOplogColl(); - const query = {ts: {$gte: ts ? ts : new Timestamp()}}; - // Set the cursor to read backwards, from last to first. We also set the cursor not - // to time out since it may take a while to process each batch and a test may have - // changed "cursorTimeoutMillis" to a short time period. - this._cursorExhausted = false; - this.cursor = - coll.find(query).sort({$natural: -1}).noCursorTimeout().readConcern("local"); - }; + this.next = function() { + return this._safelyPerformCursorOperation('next', function(cursor) { + return cursor.next(); + }, this.kCappedPositionLostSentinel); + }; - this.getFirstDoc = function() { - return this.getOplogColl() - .find() - .sort({$natural: 1}) - .readConcern("local") - .limit(-1) - .next(); - }; + this.hasNext = function() { + return this._safelyPerformCursorOperation('hasNext', function(cursor) { + return cursor.hasNext(); + }, false); + }; - this.getOplogColl = function() { - return this.mongo.getDB("local")[oplogName]; - }; + this.query = function() { + // Set the cursor to read backwards, from last to first. We also set the cursor not + // to time out since it may take a while to process each batch and a test may have + // changed "cursorTimeoutMillis" to a short time period. + this._cursorExhausted = false; + this.cursor = coll.find(query) + .sort({$natural: -1}) + .noCursorTimeout() + .readConcern("local") + .limit(-1); + }; - this.cursor = null; - this._cursorExhausted = true; - this.mongo = mongo; + this.getFirstDoc = function() { + return coll.find(query).sort({$natural: 1}).readConcern("local").limit(-1).next(); }; + this.cursor = null; + this._cursorExhausted = true; + this.mongo = mongo; + }; + + /** + * Check oplogs on all nodes, by reading from the last time. Since the oplog is a capped + * collection, each node may not contain the same number of entries and stop if the cursor + * is exhausted on any node being checked. + */ + function checkOplogs(rst, secondaries, msgPrefix = 'checkOplogs') { + secondaries = secondaries || rst._secondaries; + function assertOplogEntriesEq(oplogEntry0, oplogEntry1, reader0, reader1, prevOplogEntry) { if (!bsonBinaryEqual(oplogEntry0, oplogEntry1)) { const query = prevOplogEntry ? {ts: {$lte: prevOplogEntry.ts}} : {}; @@ -2658,7 +2719,8 @@ var ReplSetTest = function(opts) { } print("checkOplogs going to check oplog of node: " + node.host); - readers[i] = new OplogReader(node); + readers[i] = new ReverseReader( + node, node.getDB("local")[oplogName], {ts: {$gte: new Timestamp()}}); const currTS = readers[i].getFirstDoc().ts; // Find the reader which has the smallestTS. This reader should have the most // number of documents in the oplog. @@ -2682,7 +2744,7 @@ var ReplSetTest = function(opts) { while (firstReader.hasNext()) { const oplogEntry = firstReader.next(); bytesSinceGC += Object.bsonsize(oplogEntry); - if (oplogEntry === kCappedPositionLostSentinel) { + if (oplogEntry === firstReader.kCappedPositionLostSentinel) { // When using legacy OP_QUERY/OP_GET_MORE reads against mongos, it is // possible for hasNext() to return true but for next() to throw an exception. break; @@ -2697,7 +2759,8 @@ var ReplSetTest = function(opts) { const otherOplogEntry = readers[i].next(); bytesSinceGC += Object.bsonsize(otherOplogEntry); - if (otherOplogEntry && otherOplogEntry !== kCappedPositionLostSentinel) { + if (otherOplogEntry && + otherOplogEntry !== readers[i].kCappedPositionLostSentinel) { assertOplogEntriesEq.call(this, oplogEntry, otherOplogEntry, @@ -2717,6 +2780,108 @@ var ReplSetTest = function(opts) { print("checkOplogs oplog checks complete."); } + function getPreImageReaders(msgPrefix, rst, secondaries, nsUUID) { + const readers = []; + const nodes = rst.nodes; + for (let i = 0; i < nodes.length; i++) { + const node = nodes[i]; + + if (rst._primary !== node && !secondaries.includes(node)) { + print(`${msgPrefix} -- skipping preimages of node as it's not in our list of ` + + `secondaries: ${node.host}`); + continue; + } + + // Arbiters have no documents in the oplog and thus don't have preimages + // content. + if (isNodeArbiter(node)) { + jsTestLog(`${msgPrefix} -- skipping preimages of arbiter node: ${node.host}`); + continue; + } + + print(`${msgPrefix} -- going to check preimages of ${nsUUID} of node: ${node.host}`); + readers[i] = new ReverseReader( + node, node.getDB("config")["system.preimages"], {"_id.nsUUID": nsUUID}); + // Start all reverseReaders at their last document for the collection. + readers[i].query(); + } + + return readers; + } + + /** + * Check preimages on all nodes, by reading reading from the last time. Since the preimage may + * or may not be maintained independently, each node may not contain the same number of entries + * and stop if the cursor is exhausted on any node being checked. + */ + function checkPreImageCollection(rst, secondaries, msgPrefix = 'checkPreImageCollection') { + secondaries = secondaries || rst._secondaries; + + print(`${msgPrefix} -- starting preimage checks.`); + print(`${msgPrefix} -- waiting for secondaries to be ready.`); + this.awaitSecondaryNodes(self.kDefaultTimeoutMS, secondaries); + if (secondaries.length >= 1) { + let collectionsWithPreimages = {}; + const nodes = rst.nodes; + for (let i = 0; i < nodes.length; i++) { + const node = nodes[i]; + + if (rst._primary !== node && !secondaries.includes(node)) { + print(`${msgPrefix} -- skipping preimages of node as it's not in our list of ` + + `secondaries: ${node.host}`); + continue; + } + + // Arbiters have no documents in the oplog and thus don't have preimages content. + if (isNodeArbiter(node)) { + jsTestLog(`${msgPrefix} -- skipping preimages of arbiter node: ${node.host}`); + continue; + } + + const preImageColl = node.getDB("config")["system.preimages"]; + // Reset connection preferences in case the test has modified them. + preImageColl.getMongo().setSecondaryOk(true); + preImageColl.getMongo().setReadPref(rst._primary === node ? "primary" + : "secondary"); + + // Find all collections participating in pre-images. + const collectionsInPreimages = + preImageColl.aggregate([{$group: {_id: "$_id.nsUUID"}}]).toArray(); + for (const collTs of collectionsInPreimages) { + collectionsWithPreimages[collTs._id] = collTs._id; + } + } + for (const nsUUID of Object.values(collectionsWithPreimages)) { + const readers = getPreImageReaders(msgPrefix, rst, secondaries, nsUUID); + + while (true) { + let preImageEntryToCompare = undefined; + for (const reader of readers) { + if (reader.hasNext()) { + const preImageEntry = reader.next(); + if (preImageEntryToCompare === undefined) { + preImageEntryToCompare = preImageEntry; + } else { + assert(bsonBinaryEqual(preImageEntryToCompare, preImageEntry), + `Detected preimage entries that have different content`); + } + } + } + if (preImageEntryToCompare === undefined) { + break; + } + } + } + } + print(`${msgPrefix} -- preimages check complete.`); + } + + this.checkPreImageCollection = function(msgPrefix) { + var liveSecondaries = _determineLiveSecondaries(); + this.checkReplicaSet( + checkPreImageCollection, liveSecondaries, this, liveSecondaries, msgPrefix); + }; + /** * Waits for an initial connection to a given node. Should only be called after the node's * process has already been started. Updates the corresponding entry in 'this.nodes' with the @@ -2972,6 +3137,12 @@ var ReplSetTest = function(opts) { * Restarts a db without clearing the data directory by default, and using the node(s)'s * original startup options by default. * + * When using this method with mongobridge, be aware that mongobridge may not do a good + * job of detecting that a node was restarted. For example, when mongobridge is being used + * between some Node A and Node B, on restarting Node B mongobridge will not aggressively + * close its connection with Node A, leading Node A to think the connection with Node B is + * still healthy. + * * Option { startClean : true } forces clearing the data directory. * Option { auth : Object } object that contains the auth details for admin credentials. * Should contain the fields 'user' and 'pwd' @@ -3158,6 +3329,8 @@ var ReplSetTest = function(opts) { // refuses to log in live connections if some secondaries are down. print("ReplSetTest stopSet checking oplogs."); asCluster(this._liveNodes, () => this.checkOplogs()); + print("ReplSetTest stopSet checking preimages."); + asCluster(this._liveNodes, () => this.checkPreImageCollection()); print("ReplSetTest stopSet checking replicated data hashes."); asCluster(this._liveNodes, () => this.checkReplicatedDataHashes()); } else { diff --git a/src/mongo/shell/servers.js b/src/mongo/shell/servers.js index 2e6e5bbbb2e7e..673902c301190 100644 --- a/src/mongo/shell/servers.js +++ b/src/mongo/shell/servers.js @@ -462,6 +462,8 @@ MongoRunner.arrOptions = function(binaryName, args) { // If we've specified a particular binary version, use that if (o.binVersion && o.binVersion != "" && o.binVersion != shellVersion()) { binaryName += "-" + o.binVersion; + } else { + binaryName = getMongoSuffixPath(binaryName); } // Manage legacy options @@ -743,7 +745,6 @@ MongoRunner.mongodOptions = function(opts = {}) { _removeSetParameterIfBeforeVersion( opts, "enableDefaultWriteConcernUpdatesForInitiate", "5.0.0"); _removeSetParameterIfBeforeVersion(opts, "enableReconfigRollbackCommittedWritesCheck", "5.0.0"); - _removeSetParameterIfBeforeVersion(opts, "featureFlagRetryableFindAndModify", "5.0.0"); _removeSetParameterIfBeforeVersion(opts, "allowMultipleArbiters", "5.3.0"); _removeSetParameterIfBeforeVersion( opts, "internalQueryDisableExclusionProjectionFastPath", "6.2.0"); @@ -893,6 +894,8 @@ MongoRunner.mongosOptions = function(opts) { _removeSetParameterIfBeforeVersion( opts, "mongosShutdownTimeoutMillisForSignaledShutdown", "4.5.0", true); + _removeSetParameterIfBeforeVersion( + opts, "failpoint.skipClusterParameterRefresh", "7.1.0", true); return opts; }; @@ -1027,8 +1030,7 @@ MongoRunner.runMongod = function(opts) { } } - var mongodProgram = MongoRunner.getMongodPath(); - opts = MongoRunner.arrOptions(mongodProgram, opts); + opts = MongoRunner.arrOptions("mongod", opts); } var mongod = MongoRunner._startWithArgs(opts, env, waitForConnect); @@ -1065,8 +1067,7 @@ MongoRunner.runMongos = function(opts) { runId = opts.runId; waitForConnect = opts.waitForConnect; env = opts.env; - var mongosProgram = MongoRunner.getMongosPath(); - opts = MongoRunner.arrOptions(mongosProgram, opts); + opts = MongoRunner.arrOptions("mongos", opts); } var mongos = MongoRunner._startWithArgs(opts, env, waitForConnect); @@ -1102,8 +1103,7 @@ MongoRunner.runMongoq = function(opts) { runId = opts.runId; waitForConnect = opts.waitForConnect; env = opts.env; - var mongoqProgram = MongoRunner.getMongoqPath(); - opts = MongoRunner.arrOptions(mongoqProgram, opts); + opts = MongoRunner.arrOptions("mongoqd", opts); } var mongoq = MongoRunner._startWithArgs(opts, env, waitForConnect); @@ -1435,16 +1435,6 @@ function appendSetParameterArgs(argArray) { argArray.push(...["--setParameter", "backtraceLogFile=" + backtraceLogFilePath]); } - // When launching a 5.0 mongod, if we're mentioning the - // `storeFindAndModifyImagesInSideCollection` setParameter and the corresponding feature - // flag is not set, add it for good measure. - if (programMajorMinorVersion === 500 && - isSetParameterMentioned(jsTest.options().setParameters, - "storeFindAndModifyImagesInSideCollection") && - !argArrayContainsSetParameterValue("featureFlagRetryableFindAndModify=")) { - argArray.push(...['--setParameter', "featureFlagRetryableFindAndModify=true"]); - } - // New mongod-specific option in 4.9.x. if (programMajorMinorVersion >= 490) { const parameters = jsTest.options().setParameters; @@ -1518,11 +1508,16 @@ function appendSetParameterArgs(argArray) { } } - // TODO: Make this unconditional in 3.8. - if (programMajorMinorVersion > 340) { - if (!argArrayContainsSetParameterValue('orphanCleanupDelaySecs=')) { - argArray.push(...['--setParameter', 'orphanCleanupDelaySecs=1']); - } + // Reduce the default value for `orphanCleanupDelaySecs` to 1 sec to speed up jstests. + if (!argArrayContainsSetParameterValue('orphanCleanupDelaySecs=')) { + argArray.push(...['--setParameter', 'orphanCleanupDelaySecs=1']); + } + + // Increase the default value for `receiveChunkWaitForRangeDeleterTimeoutMS` to 90 + // seconds to prevent failures due to occasional slow range deletions + if (!argArrayContainsSetParameterValue('receiveChunkWaitForRangeDeleterTimeoutMS=')) { + argArray.push( + ...['--setParameter', 'receiveChunkWaitForRangeDeleterTimeoutMS=90000']); } if (programMajorMinorVersion >= 360) { diff --git a/src/mongo/shell/session.js b/src/mongo/shell/session.js index b077301f0cdf5..4f7b6e20c1cd1 100644 --- a/src/mongo/shell/session.js +++ b/src/mongo/shell/session.js @@ -97,6 +97,16 @@ var { this.toString = function toString() { return "SessionOptions(" + this.tojson() + ")"; }; + + this.getRawOpts = function getRawOpts() { + return { + readPreference: _readPreference, + readConcern: _readConcern, + writeConcern: _writeConcern, + causalConsistency: _causalConsistency, + retryWrites: _retryWrites + }; + }; } const kWireVersionSupportingCausalConsistency = 6; @@ -795,6 +805,8 @@ var { } else if (cmdName === "findAndModify" || cmdName === "findandmodify") { // Operations that modify a single document (e.g. findOneAndUpdate()) can be retried. return true; + } else if (cmdName === "bulkWrite") { + return true; } return false; diff --git a/src/mongo/shell/shardingtest.js b/src/mongo/shell/shardingtest.js index e85329bbeb0b0..97a123d836bc9 100644 --- a/src/mongo/shell/shardingtest.js +++ b/src/mongo/shell/shardingtest.js @@ -402,8 +402,8 @@ var ShardingTest = function(params) { const replicaSetsToTerminate = []; [...(shardRS.map(obj => obj.test))].forEach(rst => { // Generating a list of live nodes in the replica set - liveNodes = []; - pidValues = []; + const liveNodes = []; + const pidValues = []; rst.nodes.forEach(function(node) { try { node.getDB('admin')._helloOrLegacyHello(); @@ -416,6 +416,7 @@ var ShardingTest = function(params) { if (!node.pid) { // Getting the pid for the node + let serverStatus; rst.keyFile = rst.keyFile ? rst.keyFile : this.keyFile; if (rst.keyFile) { serverStatus = authutil.asCluster(node, rst.keyFile, () => { @@ -512,10 +513,7 @@ var ShardingTest = function(params) { }; this.stop = function(opts = {}) { - // TODO SERVER-74534: Enable metadata consistency check on catalog shard deployment - if (!isCatalogShardMode) { - this.checkMetadataConsistency(); - } + this.checkMetadataConsistency(); this.checkUUIDsConsistentAcrossCluster(); this.checkIndexesConsistentAcrossCluster(); this.checkOrphansAreDeleted(); @@ -537,7 +535,7 @@ var ShardingTest = function(params) { print("ShardingTest stopped all shards, took " + (new Date() - startTime) + "ms for " + this._connections.length + " shards."); - if (!isCatalogShardMode) { + if (!isConfigShardMode) { this.stopAllConfigServers(opts); } @@ -838,7 +836,6 @@ var ShardingTest = function(params) { return; } - var result; for (var i = 0; i < 5; i++) { var otherShard = this.getOther(this.getPrimaryShard(dbName)).name; let cmd = {movechunk: c, find: move, to: otherShard}; @@ -847,7 +844,7 @@ var ShardingTest = function(params) { cmd._waitForDelete = waitForDelete; } - result = this.s.adminCommand(cmd); + const result = this.s.adminCommand(cmd); if (result.ok) break; @@ -875,11 +872,11 @@ var ShardingTest = function(params) { var replSet = this._rs[i]; if (!replSet) continue; - nodes = replSet.test.nodes; - keyFileUsed = replSet.test.keyFile; + const nodes = replSet.test.nodes; + const keyFileUsed = replSet.test.keyFile; for (var j = 0; j < nodes.length; ++j) { - diff = (new Date()).getTime() - start.getTime(); + const diff = (new Date()).getTime() - start.getTime(); var currNode = nodes[j]; // Skip arbiters if (currNode.getDB('admin')._helloOrLegacyHello().arbiterOnly) { @@ -1192,12 +1189,13 @@ var ShardingTest = function(params) { var numShards = otherParams.hasOwnProperty('shards') ? otherParams.shards : 2; var mongosVerboseLevel = otherParams.hasOwnProperty('verbose') ? otherParams.verbose : 1; var numMongos = otherParams.hasOwnProperty('mongos') ? otherParams.mongos : 1; - const usedDefaultNumConfigs = !otherParams.hasOwnProperty('config'); + const usedDefaultNumConfigs = + !otherParams.hasOwnProperty('config') || otherParams.config === undefined; var numConfigs = otherParams.hasOwnProperty('config') ? otherParams.config : 3; - let isCatalogShardMode = - otherParams.hasOwnProperty('catalogShard') ? otherParams.catalogShard : false; - isCatalogShardMode = isCatalogShardMode || jsTestOptions().catalogShard; + let isConfigShardMode = + otherParams.hasOwnProperty('configShard') ? otherParams.configShard : false; + isConfigShardMode = isConfigShardMode || jsTestOptions().configShard; if ("shardAsReplicaSet" in otherParams) { throw new Error("Use of deprecated option 'shardAsReplicaSet'"); @@ -1227,8 +1225,8 @@ var ShardingTest = function(params) { numShards = tempCount; } - if (isCatalogShardMode) { - assert(numShards > 0, 'Catalog shard mode requires at least one shard'); + if (isConfigShardMode) { + assert(numShards > 0, 'Config shard mode requires at least one shard'); } if (Array.isArray(numMongos)) { @@ -1238,7 +1236,7 @@ var ShardingTest = function(params) { numMongos = numMongos.length; } else if (isObject(numMongos)) { - var tempCount = 0; + let tempCount = 0; for (var i in numMongos) { otherParams[i] = numMongos[i]; tempCount++; @@ -1249,14 +1247,14 @@ var ShardingTest = function(params) { if (Array.isArray(numConfigs)) { assert(!usedDefaultNumConfigs); - for (var i = 0; i < numConfigs.length; i++) { + for (let i = 0; i < numConfigs.length; i++) { otherParams["c" + i] = numConfigs[i]; } numConfigs = numConfigs.length; } else if (isObject(numConfigs)) { assert(!usedDefaultNumConfigs); - var tempCount = 0; + let tempCount = 0; for (var i in numConfigs) { otherParams[i] = numConfigs[i]; tempCount++; @@ -1359,9 +1357,9 @@ var ShardingTest = function(params) { var setIsConfigSvr = false; - if (isCatalogShardMode && i == 0) { - otherParams.configOptions = - Object.merge(otherParams.configOptions, {configsvr: ""}); + if (isConfigShardMode && i == 0) { + otherParams.configOptions = Object.merge( + otherParams.configOptions, {configsvr: "", storageEngine: "wiredTiger"}); rsDefaults = Object.merge(rsDefaults, otherParams.configOptions); setIsConfigSvr = true; } else { @@ -1378,7 +1376,9 @@ var ShardingTest = function(params) { rsDefaults = Object.merge(rsDefaults, otherParams.rsOptions); rsDefaults.nodes = rsDefaults.nodes || otherParams.numReplicas; } else { - if (jsTestOptions().shardMixedBinVersions) { + // Our documented upgrade/downgrade paths let us assume config server nodes will + // always be fully upgraded before shard nodes, so skip a config shard. + if (jsTestOptions().shardMixedBinVersions && !setIsConfigSvr) { if (!otherParams.shardOptions) { otherParams.shardOptions = {}; } @@ -1418,9 +1418,9 @@ var ShardingTest = function(params) { numReplicas = 1; } - // Unless explicitly given a number of config servers, a catalog shard uses the shard's + // Unless explicitly given a number of config servers, a config shard uses the shard's // number of nodes to increase odds of compatibility with test assertions. - if (isCatalogShardMode && i == 0 && !usedDefaultNumConfigs) { + if (isConfigShardMode && i == 0 && !usedDefaultNumConfigs) { numReplicas = numConfigs; } @@ -1452,7 +1452,7 @@ var ShardingTest = function(params) { {setName: setName, test: rs, nodes: rs.startSetAsync(rsDefaults), url: rs.getURL()}; } - if (isCatalogShardMode) { + if (isConfigShardMode) { this.configRS = this._rs[0].test; } else { // @@ -1505,7 +1505,7 @@ var ShardingTest = function(params) { // for (let i = 0; i < numShards; i++) { print("Waiting for shard " + this._rs[i].setName + " to finish starting up."); - if (isCatalogShardMode && i == 0) { + if (isConfigShardMode && i == 0) { continue; } this._rs[i].test.startSetAwait(); @@ -1530,7 +1530,7 @@ var ShardingTest = function(params) { // const shardsRS = this._rs.map(obj => obj.test); var replSetToIntiateArr = []; - if (isCatalogShardMode) { + if (isConfigShardMode) { replSetToIntiateArr = [...shardsRS]; } else { replSetToIntiateArr = [...shardsRS, this.configRS]; @@ -1830,26 +1830,25 @@ var ShardingTest = function(params) { var n = z.name || z.host || z; var name; - if (isCatalogShardMode && idx == 0) { + if (isConfigShardMode && idx == 0) { name = "config"; - print("ShardingTest " + testName + " transitioning to catalog shard"); + print("ShardingTest " + testName + " transitioning to config shard"); - function transitionToCatalogShard() { + function transitionFromDedicatedConfigServer() { return assert.commandWorked( - admin.runCommand({transitionToCatalogShard: 1})); + admin.runCommand({transitionFromDedicatedConfigServer: 1})); } - // TODO SERVER-74448: Investigate if transitionToCatalogShard should be - // added to the localhost bypass exception like addShard. if (keyFile) { - authutil.asCluster(admin.getMongo(), keyFile, transitionToCatalogShard); + authutil.asCluster( + admin.getMongo(), keyFile, transitionFromDedicatedConfigServer); } else if (mongosOptions[0] && mongosOptions[0].keyFile) { authutil.asCluster(admin.getMongo(), mongosOptions[0].keyFile, - transitionToCatalogShard); + transitionFromDedicatedConfigServer); } else { - transitionToCatalogShard(); + transitionFromDedicatedConfigServer(); } z.shardName = name; diff --git a/src/mongo/shell/shell_options.cpp b/src/mongo/shell/shell_options.cpp index eeb06779abcb5..3f170093e3173 100644 --- a/src/mongo/shell/shell_options.cpp +++ b/src/mongo/shell/shell_options.cpp @@ -28,27 +28,43 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/shell/shell_options.h" - -#include - #include +#include +#include +#include + +#include +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/bson/util/builder.h" +#include "mongo/bson/util/builder_fwd.h" #include "mongo/client/client_api_version_parameters_gen.h" #include "mongo/client/mongo_uri.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/auth/sasl_command_constants.h" #include "mongo/db/server_options.h" -#include "mongo/logv2/log.h" +#include "mongo/db/server_parameter.h" +#include "mongo/db/tenant_id.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_component_settings.h" +#include "mongo/logv2/log_manager.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/shell/shell_options.h" #include "mongo/shell/shell_utils.h" #include "mongo/transport/message_compressor_options_client_gen.h" #include "mongo/transport/message_compressor_registry.h" +#include "mongo/util/assert_util.h" #include "mongo/util/net/socket_utils.h" +#include "mongo/util/options_parser/environment.h" +#include "mongo/util/options_parser/option_section.h" #include "mongo/util/options_parser/startup_options.h" +#include "mongo/util/options_parser/value.h" #include "mongo/util/str.h" #include "mongo/util/version.h" @@ -67,7 +83,6 @@ const std::set kSetShellParameterAllowlist = { "awsEC2InstanceMetadataUrl", "awsECSInstanceMetadataUrl", "disabledSecureAllocatorDomains", - "featureFlagFLE2Range", "featureFlagFLE2ProtocolVersion2", "newLineAfterPasswordPromptForTest", "ocspClientHttpTimeoutSecs", @@ -296,8 +311,7 @@ Status storeMongoShellOptions(const moe::Environment& params, } if (!shellGlobalParams.networkMessageCompressors.empty()) { - const auto ret = - storeMessageCompressionOptions(shellGlobalParams.networkMessageCompressors); + auto ret = storeMessageCompressionOptions(shellGlobalParams.networkMessageCompressors); if (!ret.isOK()) { return ret; } diff --git a/src/mongo/shell/shell_options.h b/src/mongo/shell/shell_options.h index ca84f56a3116e..d4dcc6ccb90eb 100644 --- a/src/mongo/shell/shell_options.h +++ b/src/mongo/shell/shell_options.h @@ -34,6 +34,7 @@ #include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/util/duration.h" namespace mongo { diff --git a/src/mongo/shell/shell_options.idl b/src/mongo/shell/shell_options.idl index b161325c43c72..9ecfa8bcefcfe 100644 --- a/src/mongo/shell/shell_options.idl +++ b/src/mongo/shell/shell_options.idl @@ -29,6 +29,7 @@ global: cpp_namespace: "mongo" cpp_includes: + - "mongo/client/sasl_oidc_client_params.h" - "mongo/shell/shell_options.h" - "mongo/shell/shell_utils.h" configs: @@ -203,6 +204,15 @@ configs: description: "Remote host name to use for purpose of GSSAPI/Kerberos authentication" arg_vartype: String + oidcAccessToken: + description: >- + If set, the shell will pass this token to the server for any user that tries + authenticating with the MONGODB-OIDC mechanism. This will bypass the device authorization + grant flow. + source: cli + arg_vartype: String + cpp_varname: oidcClientGlobalParams.oidcAccessToken + "idleSessionTimeout": description: "Terminate the Shell session if it's been idle for this many seconds" arg_vartype: Int diff --git a/src/mongo/shell/shell_options_init.cpp b/src/mongo/shell/shell_options_init.cpp index 6924eaa1954b7..ee13b0804590b 100644 --- a/src/mongo/shell/shell_options_init.cpp +++ b/src/mongo/shell/shell_options_init.cpp @@ -27,12 +27,16 @@ * it in the license file. */ -#include "mongo/shell/shell_options.h" - #include +#include +#include -#include "mongo/transport/message_compressor_registry.h" +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/shell/shell_options.h" +#include "mongo/util/assert_util.h" #include "mongo/util/exit_code.h" +#include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/startup_option_init.h" #include "mongo/util/options_parser/startup_options.h" #include "mongo/util/quick_exit.h" diff --git a/src/mongo/shell/shell_options_storage.cpp b/src/mongo/shell/shell_options_storage.cpp index a2462851b1f23..3b42de7d64080 100644 --- a/src/mongo/shell/shell_options_storage.cpp +++ b/src/mongo/shell/shell_options_storage.cpp @@ -27,7 +27,8 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include #include "mongo/base/string_data.h" #include "mongo/client/mongo_uri.h" diff --git a/src/mongo/shell/shell_options_test.cpp b/src/mongo/shell/shell_options_test.cpp index 55da4d0fa4887..c42958d22e366 100644 --- a/src/mongo/shell/shell_options_test.cpp +++ b/src/mongo/shell/shell_options_test.cpp @@ -27,12 +27,14 @@ * it in the license file. */ +#include #include #include #include #include "mongo/shell/shell_options.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/shell/shell_utils.cpp b/src/mongo/shell/shell_utils.cpp index 698bb89292b5a..797ca772d0b75 100644 --- a/src/mongo/shell/shell_utils.cpp +++ b/src/mongo/shell/shell_utils.cpp @@ -28,48 +28,76 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/shell/shell_utils.h" - #include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include #include +#include #include #include +#include #include #ifndef _WIN32 #include -#include #endif +#include "mongo/base/data_range.h" +#include "mongo/base/error_codes.h" #include "mongo/base/shim.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/client/client_api_version_parameters_gen.h" +#include "mongo/client/connection_string.h" #include "mongo/client/dbclient_base.h" +#include "mongo/client/mongo_uri.h" #include "mongo/client/replica_set_monitor.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/crypto/hash_block.h" +#include "mongo/crypto/sha256_block.h" #include "mongo/db/auth/security_token_gen.h" #include "mongo/db/auth/validated_tenancy_scope.h" +#include "mongo/db/database_name.h" #include "mongo/db/hasher.h" -#include "mongo/logv2/log.h" #include "mongo/platform/decimal128.h" #include "mongo/platform/mutex.h" #include "mongo/platform/random.h" #include "mongo/scripting/engine.h" #include "mongo/shell/bench.h" #include "mongo/shell/shell_options.h" +#include "mongo/shell/shell_utils.h" #include "mongo/shell/shell_utils_extended.h" #include "mongo/shell/shell_utils_launcher.h" +#include "mongo/unittest/golden_test_base.h" +#include "mongo/util/assert_util.h" #include "mongo/util/ctype.h" #include "mongo/util/fail_point.h" #include "mongo/util/processinfo.h" -#include "mongo/util/quick_exit.h" #include "mongo/util/represent_as.h" -#include "mongo/util/text.h" +#include "mongo/util/str.h" +#include "mongo/util/text.h" // IWYU pragma: keep #include "mongo/util/version.h" -#include "mongo/unittest/golden_test_base.h" -#include "mongo/unittest/test_info.h" +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -658,6 +686,48 @@ BSONObj _closeGoldenData(const BSONObj& input, void*) { return {}; } +/** + * This function is a light-weight BSON builder to support building an arbitrary BSON in shell. + * This function is particularly useful for testing invalid BSON object which is impossible to be + * constructed from JS shell environment. + * + * The field names and values in the `args` are in the order like: name1, value1, name2, value... + * + * args: + * "0": string; field name for the first field + * "1": any; value for the first field + * "2": string; field name for the second field + * "3": any; value for the second field + * "4": ... + * + * e.g. + * > let bsonObj = _buildBsonObj("_id", 1, "a", 2, "foo", "bar"); + * > printjson(bsonObj) + * { "_id" : 1, "a" : 2, "foo" : "bar" } + */ +BSONObj _buildBsonObj(const BSONObj& args, void*) { + ::mongo::BSONObjBuilder builder(64); + int fieldNum = 0; // next field name in numeric form + BSONElement name; // next pipe relative path + BSONElement value; // next pipe relative path + + do { + name = args.getField(std::to_string(fieldNum++)); + value = args.getField(std::to_string(fieldNum++)); + if (name.type() == BSONType::EOO) { + break; + } + uassert(7587900, + str::stream() << "BSON field name must be a string: " << name, + name.type() == BSONType::String); + uassert(7587901, + str::stream() << "Missing BSON field value: " << value, + value.type() != BSONType::EOO); + builder << name.str() << value; + } while (name.type() != BSONType::EOO); + return BSON("" << builder.obj()); +} + void installShellUtils(Scope& scope) { scope.injectNative("getMemInfo", JSGetMemInfo); scope.injectNative("_createSecurityToken", _createSecurityToken); @@ -677,6 +747,7 @@ void installShellUtils(Scope& scope) { scope.injectNative("_openGoldenData", _openGoldenData); scope.injectNative("_writeGoldenData", _writeGoldenData); scope.injectNative("_closeGoldenData", _closeGoldenData); + scope.injectNative("_buildBsonObj", _buildBsonObj); installShellUtilsLauncher(scope); installShellUtilsExtended(scope); @@ -750,7 +821,7 @@ void ConnectionRegistry::registerConnection(DBClientBase& client, StringData uri command = BSON("whatsmyuri" << 1); } - if (client.runCommand({boost::none, "admin"}, command, info)) { + if (client.runCommand(DatabaseName::kAdmin, command, info)) { stdx::lock_guard lk(_mutex); _connectionUris[uri.toString()].insert(info["you"].str()); } @@ -771,7 +842,7 @@ void ConnectionRegistry::killOperationsOnAllConnections(bool withPrompt) const { const std::set& uris = connection.second; BSONObj currentOpRes; - conn->runCommand({boost::none, "admin"}, BSON("currentOp" << 1), currentOpRes); + conn->runCommand(DatabaseName::kAdmin, BSON("currentOp" << 1), currentOpRes); if (!currentOpRes["inprog"].isABSONObj()) { // We don't have permissions (or the call didn't succeed) - go to the next connection. continue; @@ -808,7 +879,7 @@ void ConnectionRegistry::killOperationsOnAllConnections(bool withPrompt) const { if (!withPrompt || prompter.confirm()) { BSONObj info; conn->runCommand( - {boost::none, "admin"}, BSON("killOp" << 1 << "op" << op["opid"]), info); + DatabaseName::kAdmin, BSON("killOp" << 1 << "op" << op["opid"]), info); } else { return; } diff --git a/src/mongo/shell/shell_utils.h b/src/mongo/shell/shell_utils.h index 5677c44c3be20..2d6a9185b0700 100644 --- a/src/mongo/shell/shell_utils.h +++ b/src/mongo/shell/shell_utils.h @@ -30,10 +30,15 @@ #pragma once #include +#include #include #include #include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/client/connection_string.h" #include "mongo/client/mongo_uri.h" #include "mongo/db/jsobj.h" #include "mongo/platform/mutex.h" diff --git a/src/mongo/shell/shell_utils_extended.cpp b/src/mongo/shell/shell_utils_extended.cpp index be8a1806c347b..41115cee5a142 100644 --- a/src/mongo/shell/shell_utils_extended.cpp +++ b/src/mongo/shell/shell_utils_extended.cpp @@ -28,32 +28,57 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include // IWYU pragma: keep +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" #ifndef _WIN32 #include -#include #endif -#include -#include -#include -#include - +#include "mongo/base/data_range_cursor.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bson_bin_util.h" #include "mongo/bson/bson_validate.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/bsoncolumn.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/scripting/engine.h" #include "mongo/shell/shell_utils.h" -#include "mongo/shell/shell_utils_launcher.h" +#include "mongo/util/assert_util.h" #include "mongo/util/errno_util.h" -#include "mongo/util/file.h" +#include "mongo/util/md5.h" #include "mongo/util/md5.hpp" #include "mongo/util/net/socket_utils.h" #include "mongo/util/password.h" #include "mongo/util/scopeguard.h" #include "mongo/util/str.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep + +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -456,7 +481,7 @@ BSONObj writeBsonArrayToFile(const BSONObj& args, void* data) { BSONObj getHostName(const BSONObj& a, void* data) { uassert(13411, "getHostName accepts no arguments", a.nFields() == 0); char buf[260]; // HOST_NAME_MAX is usually 255 - verify(gethostname(buf, 260) == 0); + MONGO_verify(gethostname(buf, 260) == 0); buf[259] = '\0'; return BSON("" << buf); } diff --git a/src/mongo/shell/shell_utils_launcher.cpp b/src/mongo/shell/shell_utils_launcher.cpp index 30a04df5a8910..7d2f6260bf7a1 100644 --- a/src/mongo/shell/shell_utils_launcher.cpp +++ b/src/mongo/shell/shell_utils_launcher.cpp @@ -30,58 +30,69 @@ #include "mongo/shell/shell_utils_launcher.h" #include -#include -#include -#include -#include -#include +#include #include -#include +#include +#include #include -#include +#include // IWYU pragma: keep #include #include #include #include +#include +#include +#include +#include #include +// IWYU pragma: no_include "boost/container/detail/std_fwd.hpp" +#include +#include +#include +#include +#include +// IWYU pragma: no_include "boost/system/detail/error_code.hpp" + #ifdef _WIN32 #include + #define SIGKILL 9 #else -#include -#include -#include -#include -#include -#include #endif -#include "mongo/base/environment_buffer.h" +#include "mongo/base/data_type_endian.h" +#include "mongo/base/data_view.h" #include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/bson/util/builder.h" -#include "mongo/client/dbclient_connection.h" +#include "mongo/client/dbclient_connection.h" // IWYU pragma: keep +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/service_context.h" #include "mongo/db/storage/named_pipe.h" #include "mongo/db/traffic_reader.h" #include "mongo/logv2/log.h" -#include "mongo/platform/basic.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/scripting/engine.h" #include "mongo/shell/named_pipe_test_helper.h" #include "mongo/shell/program_runner.h" #include "mongo/shell/shell_options.h" #include "mongo/shell/shell_utils.h" +#include "mongo/stdx/thread.h" #include "mongo/util/assert_util.h" -#include "mongo/util/ctype.h" #include "mongo/util/destructor_guard.h" -#include "mongo/util/exit.h" +#include "mongo/util/errno_util.h" #include "mongo/util/exit_code.h" -#include "mongo/util/net/hostandport.h" -#include "mongo/util/quick_exit.h" -#include "mongo/util/scopeguard.h" -#include "mongo/util/signal_win32.h" -#include "mongo/util/str.h" -#include "mongo/util/text.h" +#include "mongo/util/shared_buffer.h" +#include "mongo/util/signal_win32.h" // IWYU pragma: keep +#include "mongo/util/text.h" // IWYU pragma: keep +#include "mongo/util/time_support.h" #include "mongo/util/version/releases.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault @@ -410,7 +421,7 @@ inline void kill_wrapper(ProcessId pid, int sig, int port, const BSONObj& opt) { BSONObjBuilder b; b.append("shutdown", 1); b.append("force", 1); - conn.runCommand(DatabaseName(boost::none, "admin"), b.done(), info); + conn.runCommand(DatabaseName::kAdmin, b.done(), info); } catch (...) { // Do nothing. This command never returns data to the client and the driver // doesn't like that. diff --git a/src/mongo/shell/shell_utils_test.cpp b/src/mongo/shell/shell_utils_test.cpp index 64c82f872df5b..41e6bf0d26076 100644 --- a/src/mongo/shell/shell_utils_test.cpp +++ b/src/mongo/shell/shell_utils_test.cpp @@ -28,8 +28,9 @@ */ #include "mongo/shell/shell_utils.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" + +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo { namespace { diff --git a/src/mongo/shell/utils.js b/src/mongo/shell/utils.js index d759e743dd137..f0ad3d60220f4 100644 --- a/src/mongo/shell/utils.js +++ b/src/mongo/shell/utils.js @@ -437,7 +437,7 @@ jsTestOptions = function() { backupOnRestartDir: TestData.backupOnRestartDir || false, evergreenTaskId: TestData.evergreenTaskId || null, - catalogShard: TestData.catalogShard || false, + configShard: TestData.configShard || false, }); } return _jsTestOptions; diff --git a/src/mongo/shell/utils_sh.js b/src/mongo/shell/utils_sh.js index 8428057847576..193fe902968e9 100644 --- a/src/mongo/shell/utils_sh.js +++ b/src/mongo/shell/utils_sh.js @@ -65,11 +65,11 @@ sh._writeBalancerStateDeprecated = function(onOrNot) { * Asserts the specified command is executed successfully. However, if a retryable error occurs, the * command is retried. */ -sh._assertRetryableCommandWorked = function(cmd, msg) { +sh.assertRetryableCommandWorkedOrFailedWithCodes = function(cmd, msg, expectedErrorCodes = []) { var res = undefined; assert.soon(function() { try { - res = cmd(); + res = assert.commandWorked(cmd()); return true; } catch (err) { if (err instanceof WriteError && ErrorCodes.isRetriableError(err.code)) { @@ -84,6 +84,9 @@ sh._assertRetryableCommandWorked = function(cmd, msg) { return false; } } + if (expectedErrorCodes.includes(err.code)) { + return true; + } throw err; } }, msg); @@ -231,19 +234,27 @@ sh.startBalancer = function(timeoutMs, interval) { sh.startAutoMerger = function(configDB) { if (configDB === undefined) configDB = sh._getConfigDB(); - return assert.commandWorked( - configDB.settings.update({_id: 'automerge'}, - {$set: {enabled: true}}, - {upsert: true, writeConcern: {w: 'majority', wtimeout: 30000}})); + + // Set retryable write since mongos doesn't do it automatically. + const mongosSession = configDB.getMongo().startSession({retryWrites: true}); + const sessionConfigDB = mongosSession.getDatabase('config'); + return assert.commandWorked(sessionConfigDB.settings.update( + {_id: 'automerge'}, + {$set: {enabled: true}}, + {upsert: true, writeConcern: {w: 'majority', wtimeout: 30000}})); }; sh.stopAutoMerger = function(configDB) { if (configDB === undefined) configDB = sh._getConfigDB(); - return assert.commandWorked( - configDB.settings.update({_id: 'automerge'}, - {$set: {enabled: false}}, - {upsert: true, writeConcern: {w: 'majority', wtimeout: 30000}})); + + // Set retryable write since mongos doesn't do it automatically. + const mongosSession = configDB.getMongo().startSession({retryWrites: true}); + const sessionConfigDB = mongosSession.getDatabase('config'); + return assert.commandWorked(sessionConfigDB.settings.update( + {_id: 'automerge'}, + {$set: {enabled: false}}, + {upsert: true, writeConcern: {w: 'majority', wtimeout: 30000}})); }; sh.shouldAutoMerge = function(configDB) { @@ -267,8 +278,8 @@ sh.disableAutoMerge = function(coll) { sh._checkMongos(); } - return sh._assertRetryableCommandWorked(() => { - dbase.getSiblingDB("config").collections.update( + return sh.assertRetryableCommandWorkedOrFailedWithCodes(() => { + return dbase.getSiblingDB("config").collections.update( {_id: coll + ""}, {$set: {"enableAutoMerge": false}}, {writeConcern: {w: 'majority', wtimeout: 60000}}); @@ -286,8 +297,8 @@ sh.enableAutoMerge = function(coll) { sh._checkMongos(); } - return sh._assertRetryableCommandWorked(() => { - dbase.getSiblingDB("config").collections.update( + return sh.assertRetryableCommandWorkedOrFailedWithCodes(() => { + return dbase.getSiblingDB("config").collections.update( {_id: coll + ""}, {$unset: {"enableAutoMerge": 1}}, {writeConcern: {w: 'majority', wtimeout: 60000}}); @@ -365,8 +376,8 @@ sh.disableBalancing = function(coll) { sh._checkMongos(); } - return sh._assertRetryableCommandWorked(() => { - dbase.getSiblingDB("config").collections.update( + return sh.assertRetryableCommandWorkedOrFailedWithCodes(() => { + return dbase.getSiblingDB("config").collections.update( {_id: coll + ""}, {$set: {"noBalance": true}}, {writeConcern: {w: 'majority', wtimeout: 60000}}); @@ -384,8 +395,8 @@ sh.enableBalancing = function(coll) { sh._checkMongos(); } - return sh._assertRetryableCommandWorked(() => { - dbase.getSiblingDB("config").collections.update( + return sh.assertRetryableCommandWorkedOrFailedWithCodes(() => { + return dbase.getSiblingDB("config").collections.update( {_id: coll + ""}, {$set: {"noBalance": false}}, {writeConcern: {w: 'majority', wtimeout: 60000}}); @@ -591,26 +602,8 @@ sh.addTagRange = function(ns, min, max, tag) { {upsert: true, writeConcern: {w: 'majority', wtimeout: 60000}})); }; -sh.removeTagRange = function(ns, min, max, tag) { - var result = sh.removeRangeFromZone(ns, min, max); - if (result.code != ErrorCodes.CommandNotFound) { - return result; - } - - var config = sh._getConfigDB(); - // warn if the namespace does not exist, even dropped - if (config.collections.findOne({_id: ns}) == null) { - print("Warning: can't find the namespace: " + ns + " - collection likely never sharded"); - } - // warn if the tag being removed is still in use - if (config.shards.findOne({tags: tag})) { - print("Warning: tag still in use by at least one shard"); - } - // max and tag criteria not really needed, but including them avoids potentially unexpected - // behavior. - return assert.commandWorked( - config.tags.remove({_id: {ns: ns, min: min}, max: max, tag: tag}, - {writeConcern: {w: 'majority', wtimeout: 60000}})); +sh.removeTagRange = function(ns, min, max) { + return sh._getConfigDB().adminCommand({updateZoneKeyRange: ns, min: min, max: max, zone: null}); }; sh.addShardToZone = function(shardName, zoneName) { diff --git a/src/mongo/stdx/condition_variable_bm.cpp b/src/mongo/stdx/condition_variable_bm.cpp index 92799ad1387e5..4f892910e7be9 100644 --- a/src/mongo/stdx/condition_variable_bm.cpp +++ b/src/mongo/stdx/condition_variable_bm.cpp @@ -27,13 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +// IWYU pragma: no_include "cxxabi.h" +#include +#include #include "mongo/stdx/condition_variable.h" -#include "mongo/stdx/mutex.h" -#include "mongo/stdx/thread.h" namespace mongo { diff --git a/src/mongo/stdx/set_terminate_dispatch_test.cpp b/src/mongo/stdx/set_terminate_dispatch_test.cpp index 6326ec3dd3354..a345ba77ab0cf 100644 --- a/src/mongo/stdx/set_terminate_dispatch_test.cpp +++ b/src/mongo/stdx/set_terminate_dispatch_test.cpp @@ -27,13 +27,11 @@ * it in the license file. */ -#include "mongo/stdx/exception.h" - #include - +#include #include -#include "mongo/stdx/thread.h" +#include "mongo/stdx/exception.h" #include "mongo/util/exit_code.h" namespace { diff --git a/src/mongo/stdx/set_terminate_from_main_die_in_thread_test.cpp b/src/mongo/stdx/set_terminate_from_main_die_in_thread_test.cpp index 4e8df03569d8f..a4bcc99891574 100644 --- a/src/mongo/stdx/set_terminate_from_main_die_in_thread_test.cpp +++ b/src/mongo/stdx/set_terminate_from_main_die_in_thread_test.cpp @@ -27,12 +27,11 @@ * it in the license file. */ -#include "mongo/stdx/exception.h" - #include - +#include #include +#include "mongo/stdx/exception.h" #include "mongo/stdx/thread.h" #include "mongo/util/exit_code.h" diff --git a/src/mongo/stdx/set_terminate_from_thread_die_in_main_test.cpp b/src/mongo/stdx/set_terminate_from_thread_die_in_main_test.cpp index 03287fe41cfee..751762ac8a3a3 100644 --- a/src/mongo/stdx/set_terminate_from_thread_die_in_main_test.cpp +++ b/src/mongo/stdx/set_terminate_from_thread_die_in_main_test.cpp @@ -27,12 +27,11 @@ * it in the license file. */ -#include "mongo/stdx/exception.h" - #include - +#include #include +#include "mongo/stdx/exception.h" #include "mongo/stdx/thread.h" #include "mongo/util/exit_code.h" diff --git a/src/mongo/stdx/set_terminate_from_thread_die_in_thread_test.cpp b/src/mongo/stdx/set_terminate_from_thread_die_in_thread_test.cpp index 9fc720dbcb4b8..584526849facc 100644 --- a/src/mongo/stdx/set_terminate_from_thread_die_in_thread_test.cpp +++ b/src/mongo/stdx/set_terminate_from_thread_die_in_thread_test.cpp @@ -27,12 +27,11 @@ * it in the license file. */ -#include "mongo/stdx/exception.h" - #include - +#include #include +#include "mongo/stdx/exception.h" #include "mongo/stdx/thread.h" #include "mongo/util/exit_code.h" diff --git a/src/mongo/stdx/set_terminate_internals.cpp b/src/mongo/stdx/set_terminate_internals.cpp index 21494e652944d..852171deb7f71 100644 --- a/src/mongo/stdx/set_terminate_internals.cpp +++ b/src/mongo/stdx/set_terminate_internals.cpp @@ -27,14 +27,13 @@ * it in the license file. */ +#if defined(_WIN32) #include "mongo/stdx/exception.h" #include #include -#if defined(_WIN32) - namespace mongo { namespace stdx { // `dispatch_impl` is circularly dependent with the initialization of `terminationHandler`, but diff --git a/src/mongo/stdx/sigaltstack_location_test.cpp b/src/mongo/stdx/sigaltstack_location_test.cpp index 6c57146721f79..d0b9aa60d0b19 100644 --- a/src/mongo/stdx/sigaltstack_location_test.cpp +++ b/src/mongo/stdx/sigaltstack_location_test.cpp @@ -27,23 +27,35 @@ * it in the license file. */ -#include "mongo/stdx/thread.h" -#include "mongo/util/exit_code.h" -#include +// IWYU pragma: no_include "bits/types/siginfo_t.h" +// IWYU pragma: no_include "bits/types/stack_t.h" #include -#include +#include +#include +#include +#include +#include #include #include -#include -#include +#include #ifndef _WIN32 -#include #include +#endif + +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/stdx/thread.h" +#include "mongo/util/exit_code.h" + +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) #include #endif +#if !defined(__has_feature) +#define __has_feature(x) 0 +#endif + #if !MONGO_HAS_SIGALTSTACK int main() { @@ -53,10 +65,6 @@ int main() { #else // MONGO_HAS_SIGALTSTACK -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif - namespace mongo::stdx { namespace { diff --git a/src/mongo/stdx/unordered_map.h b/src/mongo/stdx/unordered_map.h index b748e636debff..ef1ad47d8a6a9 100644 --- a/src/mongo/stdx/unordered_map.h +++ b/src/mongo/stdx/unordered_map.h @@ -29,9 +29,10 @@ #pragma once -#include "mongo/stdx/trusted_hasher.h" - #include +#include + +#include "mongo/stdx/trusted_hasher.h" namespace mongo { namespace stdx { diff --git a/src/mongo/stdx/unordered_map_test.cpp b/src/mongo/stdx/unordered_map_test.cpp index 55efb79bf355b..6f284dd1babed 100644 --- a/src/mongo/stdx/unordered_map_test.cpp +++ b/src/mongo/stdx/unordered_map_test.cpp @@ -30,13 +30,15 @@ #include "mongo/stdx/unordered_map.h" #include -#include +#include #include #include -#include +#include -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/stdx/variant.h b/src/mongo/stdx/variant.h index b009190fd3ab0..d38c84a099cde 100644 --- a/src/mongo/stdx/variant.h +++ b/src/mongo/stdx/variant.h @@ -29,31 +29,31 @@ #pragma once -#include +#include namespace mongo::stdx { -using ::mpark::get; -using ::mpark::get_if; -using ::mpark::holds_alternative; -using ::mpark::variant; -using ::mpark::visit; +using ::std::get; +using ::std::get_if; +using ::std::holds_alternative; +using ::std::variant; +using ::std::visit; -using ::mpark::variant_alternative; -using ::mpark::variant_alternative_t; -using ::mpark::variant_size; -using ::mpark::variant_size_v; +using ::std::variant_alternative; +using ::std::variant_alternative_t; +using ::std::variant_size; +using ::std::variant_size_v; -constexpr auto variant_npos = ::mpark::variant_npos; +constexpr auto variant_npos = ::std::variant_npos; -using ::mpark::operator==; -using ::mpark::operator!=; -using ::mpark::operator<; -using ::mpark::operator>; -using ::mpark::operator<=; -using ::mpark::operator>=; +using ::std::operator==; +using ::std::operator!=; +using ::std::operator<; +using ::std::operator>; +using ::std::operator<=; +using ::std::operator>=; -using ::mpark::bad_variant_access; -using ::mpark::monostate; +using ::std::bad_variant_access; +using ::std::monostate; } // namespace mongo::stdx diff --git a/src/mongo/tla_plus/PriorityTicketHolder/MCPriorityTicketHolder.cfg b/src/mongo/tla_plus/PriorityTicketHolder/MCPriorityTicketHolder.cfg index cd09326e99a6e..00f160cdefccd 100644 --- a/src/mongo/tla_plus/PriorityTicketHolder/MCPriorityTicketHolder.cfg +++ b/src/mongo/tla_plus/PriorityTicketHolder/MCPriorityTicketHolder.cfg @@ -9,4 +9,3 @@ SPECIFICATION Spec INVARIANT TicketsAreAtMostTheInitialNumber AllTicketsAvailableImpliesNoWaiters -NumQueuedAlwaysGreaterOrEqualTo0 diff --git a/src/mongo/tools/mongo_tidy_checks/MongoAssertCheck.cpp b/src/mongo/tools/mongo_tidy_checks/MongoAssertCheck.cpp index 447e06055348c..e3ed479dc3116 100644 --- a/src/mongo/tools/mongo_tidy_checks/MongoAssertCheck.cpp +++ b/src/mongo/tools/mongo_tidy_checks/MongoAssertCheck.cpp @@ -59,7 +59,9 @@ AST_MATCHER(Expr, isAssertMacroExpansion) { // Check if the file name contains "assert.h" // will also be caught because it redirects to assert.h auto FileName = SM.getFilename(SM.getSpellingLoc(MacroLoc)); - return llvm::StringRef(FileName).contains("assert.h"); + + return llvm::StringRef(FileName).contains("assert.h") && + !llvm::StringRef(FileName).contains("src/mongo/unittest/assert.h"); } return false; } diff --git a/src/mongo/tools/mongo_tidy_checks/MongoCctypeCheck.h b/src/mongo/tools/mongo_tidy_checks/MongoCctypeCheck.h index 3ee5a8e2ab8c5..8e75d5945134a 100644 --- a/src/mongo/tools/mongo_tidy_checks/MongoCctypeCheck.h +++ b/src/mongo/tools/mongo_tidy_checks/MongoCctypeCheck.h @@ -36,7 +36,7 @@ namespace mongo::tidy { /** Overrides the default PPCallback class to primarly override the InclusionDirective call which is called for each include. This - allows the chance to check whether or included or not + allows the chance to check whether or included or not */ class MongoCctypeCheck : public clang::tidy::ClangTidyCheck { public: diff --git a/src/mongo/tools/mongo_tidy_checks/MongoCollectionShardingRuntimeCheck.cpp b/src/mongo/tools/mongo_tidy_checks/MongoCollectionShardingRuntimeCheck.cpp new file mode 100644 index 0000000000000..b60feb6d39b53 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoCollectionShardingRuntimeCheck.cpp @@ -0,0 +1,109 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "MongoCollectionShardingRuntimeCheck.h" + +#include + +namespace mongo::tidy { + +using namespace clang; +using namespace clang::ast_matchers; + +MongoCollectionShardingRuntimeCheck::MongoCollectionShardingRuntimeCheck( + StringRef Name, clang::tidy::ClangTidyContext* Context) + : ClangTidyCheck(Name, Context), + exceptionDirs(clang::tidy::utils::options::parseStringList( + Options.get("exceptionDirs", "src/mongo/db/s"))) {} + +void MongoCollectionShardingRuntimeCheck::registerMatchers(MatchFinder* Finder) { + // Match instances of the CollectionShardingRuntime class + Finder->addMatcher( + varDecl(hasType(cxxRecordDecl(hasName("CollectionShardingRuntime")))).bind("instanceCall"), + this); + + // Match function calls made by the CollectionShardingRuntime class + Finder->addMatcher(callExpr(callee(functionDecl(hasParent( + cxxRecordDecl(hasName("CollectionShardingRuntime")))))) + .bind("funcCall"), + this); +} + +void MongoCollectionShardingRuntimeCheck::check(const MatchFinder::MatchResult& Result) { + const auto* SM = Result.SourceManager; + + if (!SM) { + return; + } + + // Get the location of the current source file + const auto FileLoc = SM->getLocForStartOfFile(SM->getMainFileID()); + if (FileLoc.isInvalid()) { + return; + } + + // Get the current source file path + const auto FilePath = SM->getFilename(FileLoc); + if (FilePath.empty()) { + return; + } + + std::string suffix = "_test.cpp"; + // Check if FilePath ends with the suffix "_test.cpp" + if (FilePath.size() > suffix.size() && + FilePath.rfind(suffix) == FilePath.size() - suffix.size()) { + return; + } + + // If the file path is in an exception directory, skip the check. + for (const auto& dir : this->exceptionDirs) { + if (FilePath.startswith(dir)) { + return; + } + } + + // Get the matched instance or function call + const auto* instanceDecl = Result.Nodes.getNodeAs("instanceCall"); + if (instanceDecl) { + diag(instanceDecl->getBeginLoc(), + "Illegal use of CollectionShardingRuntime outside of mongo/db/s/; use " + "CollectionShardingState instead; see src/mongo/db/s/collection_sharding_state.h for " + "details."); + } + + const auto* funcCallExpr = Result.Nodes.getNodeAs("funcCall"); + if (funcCallExpr) { + diag(funcCallExpr->getBeginLoc(), + "Illegal use of CollectionShardingRuntime outside of mongo/db/s/; use " + "CollectionShardingState instead; see src/mongo/db/s/collection_sharding_state.h for " + "details."); + } +} + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoCollectionShardingRuntimeCheck.h b/src/mongo/tools/mongo_tidy_checks/MongoCollectionShardingRuntimeCheck.h new file mode 100644 index 0000000000000..48a0f4523754e --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoCollectionShardingRuntimeCheck.h @@ -0,0 +1,59 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#pragma once + +#include +#include + +namespace mongo::tidy { + +/** + * MongoCollectionShardingRuntimeCheck is a custom clang-tidy check for detecting + * the usage of CollectionShardingRuntime outside of the "mongo/db/s/" directory. + * + * It extends ClangTidyCheck and overrides the registerMatchers and check functions. + * The registerMatchers function adds custom AST Matchers to detect the instances and function calls + * of CollectionShardingRuntime. These matchers bind matched nodes with names for later retrieval in + * check function. + * + * The check function then retrieves the nodes bound by the matchers and emit diagnostics if + * any illegal usage is detected. An usage is considered illegal if the source file is not in the + * exception directories (e.g., "src/mongo/db/s/"). + * + */ +class MongoCollectionShardingRuntimeCheck : public clang::tidy::ClangTidyCheck { +public: + MongoCollectionShardingRuntimeCheck(clang::StringRef Name, + clang::tidy::ClangTidyContext* Context); + void registerMatchers(clang::ast_matchers::MatchFinder* Finder) override; + void check(const clang::ast_matchers::MatchFinder::MatchResult& Result) override; + std::vector exceptionDirs; +}; + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoConfigHeaderCheck.cpp b/src/mongo/tools/mongo_tidy_checks/MongoConfigHeaderCheck.cpp new file mode 100644 index 0000000000000..a1a113a366b06 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoConfigHeaderCheck.cpp @@ -0,0 +1,141 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include "MongoConfigHeaderCheck.h" + +#include +#include + +namespace mongo::tidy { + +using namespace clang; +using namespace clang::ast_matchers; + +class MongoConfigHeaderPPCallbacks : public clang::PPCallbacks { +public: + explicit MongoConfigHeaderPPCallbacks(MongoConfigHeaderCheck& Check, + clang::LangOptions LangOpts, + const clang::SourceManager& SM) + : Check(Check), LangOpts(LangOpts), SM(SM) {} + + // Function to check the usage of MONGO_CONFIG_* macros + void checkMacroUsage(clang::SourceLocation Loc, const clang::Token& MacroNameTok) { + if (ConfigHeaderIncluded) + return; + llvm::StringRef macroName = MacroNameTok.getIdentifierInfo()->getName(); + if (macroName.startswith("MONGO_CONFIG_")) { + Check.diag(Loc, "MONGO_CONFIG define used without prior inclusion of config.h"); + } + } + + // Callback function for handling macro definitions + void MacroDefined(const clang::Token& MacroNameTok, const clang::MacroDirective* MD) override { + if (ConfigHeaderIncluded) + return; + checkMacroUsage(MD->getLocation(), MacroNameTok); + } + + // Callback function for handling #ifdef directives + void Ifdef(clang::SourceLocation Loc, + const clang::Token& MacroNameTok, + const clang::MacroDefinition& MD) override { + if (ConfigHeaderIncluded) + return; + checkMacroUsage(Loc, MacroNameTok); + } + + // Callback function for handling #ifndef directives + void Ifndef(clang::SourceLocation Loc, + const clang::Token& MacroNameTok, + const clang::MacroDefinition& MD) override { + if (ConfigHeaderIncluded) + return; + checkMacroUsage(Loc, MacroNameTok); + } + + // Callback function for handling #if directives + void If(clang::SourceLocation Loc, + clang::SourceRange ConditionRange, + clang::PPCallbacks::ConditionValueKind ConditionValue) override { + + if (ConfigHeaderIncluded) + return; + + // Get the beginning and end locations of the condition in the #if directive + clang::SourceLocation Start = ConditionRange.getBegin(); + clang::SourceLocation End = ConditionRange.getEnd(); + + // Get the source text of the condition in the #if directive + bool Invalid = false; + llvm::StringRef ConditionText = Lexer::getSourceText( + CharSourceRange::getTokenRange(Start, End), SM, clang::LangOptions(), &Invalid); + + if (!Invalid) { + if (ConditionText.contains("MONGO_CONFIG_")) { + Check.diag(Loc, "MONGO_CONFIG define used without prior inclusion of config.h"); + } + } + } + + // Callback function for handling #include directives + void InclusionDirective(clang::SourceLocation HashLoc, + const clang::Token& IncludeTok, + llvm::StringRef FileName, + bool IsAngled, + clang::CharSourceRange FilenameRange, + const clang::FileEntry* File, + llvm::StringRef SearchPath, + llvm::StringRef RelativePath, + const clang::Module* Imported, + clang::SrcMgr::CharacteristicKind FileType) override { + + if (FileName.equals("mongo/config.h")) { + ConfigHeaderIncluded = true; + } + } + +private: + MongoConfigHeaderCheck& Check; + clang::LangOptions LangOpts; + const clang::SourceManager& SM; + bool ConfigHeaderIncluded = false; +}; + +MongoConfigHeaderCheck::MongoConfigHeaderCheck(llvm::StringRef Name, + clang::tidy::ClangTidyContext* Context) + : ClangTidyCheck(Name, Context) {} + +void MongoConfigHeaderCheck::registerPPCallbacks(const clang::SourceManager& SM, + clang::Preprocessor* PP, + clang::Preprocessor* ModuleExpanderPP) { + PP->addPPCallbacks(::std::make_unique(*this, getLangOpts(), SM)); +} + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoConfigHeaderCheck.h b/src/mongo/tools/mongo_tidy_checks/MongoConfigHeaderCheck.h new file mode 100644 index 0000000000000..05861f1c6396f --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoConfigHeaderCheck.h @@ -0,0 +1,53 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#pragma once + +#include +#include + +namespace mongo::tidy { + +/** + * MongoConfigHeaderCheck is a custom clang-tidy check for detecting + * the usage of MONGO_CONFIG_* macros without prior inclusion of "mongo/config.h" header. + * + * It extends ClangTidyCheck and overrides the registerPPCallbacks function. The registerPPCallbacks + * function adds a custom Preprocessor callback class (MongoConfigHeaderPPCallbacks) to handle + * preprocessor events and detect MONGO_CONFIG_* macro usages without proper inclusion of + * "mongo/config.h". + */ +class MongoConfigHeaderCheck : public clang::tidy::ClangTidyCheck { +public: + MongoConfigHeaderCheck(clang::StringRef Name, clang::tidy::ClangTidyContext* Context); + void registerPPCallbacks(const clang::SourceManager& SM, + clang::Preprocessor* PP, + clang::Preprocessor* ModuleExpanderPP) override; +}; + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoCxx20BannedIncludesCheck.cpp b/src/mongo/tools/mongo_tidy_checks/MongoCxx20BannedIncludesCheck.cpp new file mode 100644 index 0000000000000..2422a575d29f5 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoCxx20BannedIncludesCheck.cpp @@ -0,0 +1,93 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include "MongoCxx20BannedIncludesCheck.h" + +#include +#include + +namespace mongo::tidy { + +namespace { + +class MongoCxx20BannedIncludesPPCallbacks : public clang::PPCallbacks { +public: + explicit MongoCxx20BannedIncludesPPCallbacks(MongoCxx20BannedIncludesCheck& Check, + clang::LangOptions LangOpts) + : Check(Check), LangOpts(LangOpts) {} + + void InclusionDirective(clang::SourceLocation HashLoc, + const clang::Token& IncludeTok, + llvm::StringRef FileName, + bool IsAngled, + clang::CharSourceRange FilenameRange, + const clang::FileEntry* File, + llvm::StringRef SearchPath, + llvm::StringRef RelativePath, + const clang::Module* Imported, + clang::SrcMgr::CharacteristicKind FileType) override { + + if (FileName.equals("coroutine") || FileName.equals("format")) { + Check.diag(FilenameRange.getBegin(), + "Use of prohibited %0%1%2 header. There are no override waivers issued for " + "this header. For questions please reach out to #cxx-discuss.") + << (IsAngled ? "<" : "\"") << FileName << (IsAngled ? ">" : "\""); + } + + if (FileName.equals("syncstream") || FileName.equals("ranges") || + FileName.equals("barrier") || FileName.equals("latch") || + FileName.equals("semaphore")) { + Check.diag(FilenameRange.getBegin(), + "Use of prohibited %0%1%2 header. There are override waivers issued for " + "this header. You need to follow the process in PM-3140 to override this " + "warning. For questions please reach out to #cxx-discuss.") + << (IsAngled ? "<" : "\"") << FileName << (IsAngled ? ">" : "\""); + } + } + +private: + MongoCxx20BannedIncludesCheck& Check; + clang::LangOptions LangOpts; +}; + +} // namespace + +MongoCxx20BannedIncludesCheck::MongoCxx20BannedIncludesCheck(llvm::StringRef Name, + clang::tidy::ClangTidyContext* Context) + : ClangTidyCheck(Name, Context) {} + +void MongoCxx20BannedIncludesCheck::registerPPCallbacks(const clang::SourceManager& SM, + clang::Preprocessor* PP, + clang::Preprocessor* ModuleExpanderPP) { + PP->addPPCallbacks( + ::std::make_unique(*this, getLangOpts())); +} + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoCxx20BannedIncludesCheck.h b/src/mongo/tools/mongo_tidy_checks/MongoCxx20BannedIncludesCheck.h new file mode 100644 index 0000000000000..0cdcffd818b6c --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoCxx20BannedIncludesCheck.h @@ -0,0 +1,49 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#pragma once + +#include +#include + +namespace mongo::tidy { + +/** + Overrides the default PPCallback class to primarly override + the InclusionDirective call which is called for each include. This + allows checking for several c++20 banned headers. +*/ +class MongoCxx20BannedIncludesCheck : public clang::tidy::ClangTidyCheck { +public: + MongoCxx20BannedIncludesCheck(clang::StringRef Name, clang::tidy::ClangTidyContext* Context); + void registerPPCallbacks(const clang::SourceManager& SM, + clang::Preprocessor* PP, + clang::Preprocessor* ModuleExpanderPP) override; +}; + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoCxx20StdChronoCheck.cpp b/src/mongo/tools/mongo_tidy_checks/MongoCxx20StdChronoCheck.cpp new file mode 100644 index 0000000000000..1f6b7810f9b76 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoCxx20StdChronoCheck.cpp @@ -0,0 +1,86 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "MongoCxx20StdChronoCheck.h" + +namespace mongo::tidy { + +using namespace clang; +using namespace clang::ast_matchers; + +MongoCxx20StdChronoCheck::MongoCxx20StdChronoCheck(StringRef Name, + clang::tidy::ClangTidyContext* Context) + : ClangTidyCheck(Name, Context) {} + +void MongoCxx20StdChronoCheck::registerMatchers(ast_matchers::MatchFinder* Finder) { + auto chronoClassDeclMatcher = cxxRecordDecl(hasAnyName("utc_clock", + "tai_clock", + "gps_clock", + "local_t", + "last_spec", + "day", + "month", + "year", + "weekday", + "weekday_indexed", + "weekday_last", + "month_day", + "month_day_last", + "month_weekday", + "month_weekday_last", + "year_month", + "year_month_day", + "year_month_day_last", + "year_month_weekday", + "year_month_weekday_last", + "tzdb", + "tzdb_list", + "time_zone", + "sys_info", + "local_info", + "zoned_time", + "leap_second", + "leap_second_info", + "time_zone_link", + "nonexistent_local_time", + "ambiguous_local_time"), + hasDeclContext(namedDecl(hasName("chrono")))); + Finder->addMatcher( + loc(recordType(hasDeclaration(chronoClassDeclMatcher))).bind("loc_cxx20_chrono"), this); +} + +void MongoCxx20StdChronoCheck::check(const ast_matchers::MatchFinder::MatchResult& Result) { + const TypeLoc* loc_cxx20_chrono = Result.Nodes.getNodeAs("loc_cxx20_chrono"); + if (loc_cxx20_chrono) { + diag(loc_cxx20_chrono->getBeginLoc(), "Illegal use of prohibited type %0.") + << loc_cxx20_chrono->getType(); + } +} + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoCxx20StdChronoCheck.h b/src/mongo/tools/mongo_tidy_checks/MongoCxx20StdChronoCheck.h new file mode 100644 index 0000000000000..580e9f0721cbd --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoCxx20StdChronoCheck.h @@ -0,0 +1,44 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#pragma once + +#include +#include + + +namespace mongo::tidy { + +class MongoCxx20StdChronoCheck : public clang::tidy::ClangTidyCheck { +public: + MongoCxx20StdChronoCheck(clang::StringRef Name, clang::tidy::ClangTidyContext* Context); + void registerMatchers(clang::ast_matchers::MatchFinder* Finder) override; + void check(const clang::ast_matchers::MatchFinder::MatchResult& Result) override; +}; + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoFCVConstantCheck.cpp b/src/mongo/tools/mongo_tidy_checks/MongoFCVConstantCheck.cpp new file mode 100644 index 0000000000000..da51b4c013983 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoFCVConstantCheck.cpp @@ -0,0 +1,71 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "MongoFCVConstantCheck.h" + +#include + +namespace mongo::tidy { + +using namespace clang; +using namespace clang::ast_matchers; + +MongoFCVConstantCheck::MongoFCVConstantCheck(StringRef Name, clang::tidy::ClangTidyContext* Context) + : ClangTidyCheck(Name, Context) {} + +void MongoFCVConstantCheck::registerMatchers(ast_matchers::MatchFinder* Finder) { + + Finder->addMatcher( + // Match a FCV comparison function whose argument is a FCV constant. + declRefExpr( + // Find a reference to FeatureCompatibilityVersion enum. + hasDeclaration(enumConstantDecl( + hasType(enumDecl(hasName("mongo::multiversion::FeatureCompatibilityVersion"))))), + // Find a call to FCV comparison functions. + hasParent(callExpr(anyOf( + callee(functionDecl(hasName("FeatureCompatibility::isLessThan"))), + callee(functionDecl(hasName("FeatureCompatibility::isGreaterThan"))), + callee(functionDecl(hasName("FeatureCompatibility::isLessThanOrEqualTo"))), + callee(functionDecl(hasName("FeatureCompatibility::isGreaterThanOrEqualTo"))), + callee(functionDecl(hasName("FeatureCompatibility::isUpgradingOrDowngrading"))))))) + .bind("fcv_constant"), + this); +} + +void MongoFCVConstantCheck::check(const ast_matchers::MatchFinder::MatchResult& Result) { + + const auto* loc_match = Result.Nodes.getNodeAs("fcv_constant"); + if (loc_match) { + diag(loc_match->getBeginLoc(), + "Illegal use of FCV constant in FCV comparison check functions. FCV gating should be " + "done through feature flags instead."); + } +} + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoFCVConstantCheck.h b/src/mongo/tools/mongo_tidy_checks/MongoFCVConstantCheck.h new file mode 100644 index 0000000000000..817ffe1946089 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoFCVConstantCheck.h @@ -0,0 +1,54 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#pragma once + +#include +#include + +namespace mongo { +namespace tidy { +/** + * MongoFCVConstantCheck is a custom clang-tidy check for detecting the usage of + * comparing FCV using the FeatureCompatibilityVersion enums, e.g. + * FeatureCompatibilityVersion::kVersion_X_Y. + * + * It extends ClangTidyCheck and overrides the registerMatchers + * and check functions. The registerMatchers function adds matchers + * to identify the usage of nongmongo assert, while + * the check function flags the matched occurrences + */ +class MongoFCVConstantCheck : public clang::tidy::ClangTidyCheck { +public: + MongoFCVConstantCheck(clang::StringRef Name, clang::tidy::ClangTidyContext* Context); + void registerMatchers(clang::ast_matchers::MatchFinder* Finder) override; + void check(const clang::ast_matchers::MatchFinder::MatchResult& Result) override; +}; + +} // namespace tidy +} // namespace mongo diff --git a/src/mongo/tools/mongo_tidy_checks/MongoMacroDefinitionLeaksCheck.cpp b/src/mongo/tools/mongo_tidy_checks/MongoMacroDefinitionLeaksCheck.cpp new file mode 100644 index 0000000000000..fe9ef3dc71cc3 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoMacroDefinitionLeaksCheck.cpp @@ -0,0 +1,114 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + + +#include "MongoMacroDefinitionLeaksCheck.h" + +#include +#include + +namespace mongo::tidy { + +using namespace clang; +using namespace clang::ast_matchers; + +// Callbacks for handling preprocessor events +class MongoMacroPPCallbacks : public clang::PPCallbacks { +public: + explicit MongoMacroPPCallbacks(MongoMacroDefinitionLeaksCheck& Check, + clang::LangOptions LangOpts, + const clang::SourceManager& SM) + : Check(Check), LangOpts(LangOpts), SM(SM) {} + + // Callback for when a macro is defined + void MacroDefined(const clang::Token& MacroNameTok, const clang::MacroDirective* MD) override { + llvm::StringRef macroName = MacroNameTok.getIdentifierInfo()->getName(); + if (macroName == "MONGO_LOGV2_DEFAULT_COMPONENT") { + defineUndefDiff += 1; + lastMacroLocation = MD->getLocation(); + } + } + + // Callback for when a macro is undefined + void MacroUndefined(const clang::Token& MacroNameTok, + const clang::MacroDefinition& MD, + const clang::MacroDirective* Undef) override { + llvm::StringRef macroName = MacroNameTok.getIdentifierInfo()->getName(); + + if (macroName == "MONGO_LOGV2_DEFAULT_COMPONENT") { + defineUndefDiff -= 1; + } + } + + // Callback for when a file is included or excluded + void FileChanged(SourceLocation Loc, + FileChangeReason Reason, + SrcMgr::CharacteristicKind FileType, + FileID PrevFID) override { + if (Reason != EnterFile && Reason != ExitFile) + return; + + const FileEntry* CurrentFile = SM.getFileEntryForID(SM.getFileID(Loc)); + if (!CurrentFile) + return; + + if (Reason == EnterFile) { + // Push the file to the stack + fileStack.push(CurrentFile->getName().str()); + defineUndefDiff = 0; + } else if (Reason == ExitFile && !fileStack.empty()) { + // Get the top file from the stack + std::string currentFileName = fileStack.top(); + fileStack.pop(); + if (defineUndefDiff != 0) { + Check.diag(lastMacroLocation, "Missing #undef 'MONGO_LOGV2_DEFAULT_COMPONENT'"); + } + } + } + +private: + MongoMacroDefinitionLeaksCheck& Check; + clang::LangOptions LangOpts; + const clang::SourceManager& SM; + int defineUndefDiff = 0; + clang::SourceLocation lastMacroLocation; + std::stack fileStack; +}; + +MongoMacroDefinitionLeaksCheck::MongoMacroDefinitionLeaksCheck( + llvm::StringRef Name, clang::tidy::ClangTidyContext* Context) + : ClangTidyCheck(Name, Context) {} + +void MongoMacroDefinitionLeaksCheck::registerPPCallbacks(const clang::SourceManager& SM, + clang::Preprocessor* PP, + clang::Preprocessor* ModuleExpanderPP) { + PP->addPPCallbacks(::std::make_unique(*this, getLangOpts(), SM)); +} + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoMacroDefinitionLeaksCheck.h b/src/mongo/tools/mongo_tidy_checks/MongoMacroDefinitionLeaksCheck.h new file mode 100644 index 0000000000000..070675cdbd69f --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoMacroDefinitionLeaksCheck.h @@ -0,0 +1,58 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#pragma once + +#include +#include + +namespace mongo::tidy { + +/** + * MongoMacroDefinitionLeaksCheck is a custom clang-tidy check for detecting + * the imbalance between the definitions and undefinitions of the macro + * "MONGO_LOGV2_DEFAULT_COMPONENT" in the same file. + * + * It extends ClangTidyCheck and overrides the registerPPCallbacks function. The registerPPCallbacks + * function adds a custom Preprocessor callback class (MongoMacroPPCallbacks) to handle + * preprocessor events and detect an imbalance in the definitions and undefinitions of the + * "MONGO_LOGV2_DEFAULT_COMPONENT" macro within each file. + * + * If a .h or .hpp file is found to have a non-zero difference between definitions and undefinitions + * of the "MONGO_LOGV2_DEFAULT_COMPONENT" macro, it's considered a leak and the check raises a + * diagnostic message pointing out the location of the last macro definition. + */ +class MongoMacroDefinitionLeaksCheck : public clang::tidy::ClangTidyCheck { +public: + MongoMacroDefinitionLeaksCheck(clang::StringRef Name, clang::tidy::ClangTidyContext* Context); + void registerPPCallbacks(const clang::SourceManager& SM, + clang::Preprocessor* PP, + clang::Preprocessor* ModuleExpanderPP) override; +}; + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoMutexCheck.cpp b/src/mongo/tools/mongo_tidy_checks/MongoMutexCheck.cpp index 7a0f730220fee..a1d79ccf1e7e8 100644 --- a/src/mongo/tools/mongo_tidy_checks/MongoMutexCheck.cpp +++ b/src/mongo/tools/mongo_tidy_checks/MongoMutexCheck.cpp @@ -38,20 +38,18 @@ MongoMutexCheck::MongoMutexCheck(StringRef Name, clang::tidy::ClangTidyContext* : ClangTidyCheck(Name, Context) {} void MongoMutexCheck::registerMatchers(ast_matchers::MatchFinder* Finder) { - // TODO: SERVER-74929 Remove the NOLINT comment below after we remove _check_for_mongo_polyfill - // check from simplecpplint.py // This matcher finds variable declarations (outside of structs/classes) with a type of either // std::mutex or stdx::mutex. It works by matching variable declarations whose type, when // reduced to its canonical form, has a declaration named "::std::mutex". Finder->addMatcher(varDecl(hasType(qualType(hasCanonicalType( - hasDeclaration(namedDecl(hasName("::std::mutex"))))))) // NOLINT + hasDeclaration(namedDecl(hasName("::std::mutex"))))))) .bind("mutex_var"), this); // This matcher finds field declarations (inside structs/classes) with a type of either // std::mutex or stdx::mutex. - Finder->addMatcher(fieldDecl(hasType(qualType(hasCanonicalType(hasDeclaration( - namedDecl(hasName("::std::mutex"))))))) // NOLINT + Finder->addMatcher(fieldDecl(hasType(qualType(hasCanonicalType( + hasDeclaration(namedDecl(hasName("::std::mutex"))))))) .bind("mutex_field"), this); } diff --git a/src/mongo/tools/mongo_tidy_checks/MongoPolyFillCheck.cpp b/src/mongo/tools/mongo_tidy_checks/MongoPolyFillCheck.cpp new file mode 100644 index 0000000000000..02869735838be --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoPolyFillCheck.cpp @@ -0,0 +1,126 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "MongoPolyFillCheck.h" + +#include + +namespace mongo::tidy { + +using namespace clang; +using namespace clang::ast_matchers; + +// Generate a list of fully qualified polyfill names by prefixing each name +// in the input list with 'std::' and 'boost::' +std::vector generateQualifiedPolyfillNames( + const std::vector& bannedNames) { + std::vector fullyBannedNames; + for (const auto& name : bannedNames) { + fullyBannedNames.push_back("std::" + name); + fullyBannedNames.push_back("boost::" + name); + } + return fullyBannedNames; +} + +// List of base polyfill names from the std and boost namespaces to be checked +std::vector MongoPolyFillCheck::basePolyfillNames = {"adopt_lock", + "async", + "chrono", + "condition_variable", + "condition_variable_any", + "cv_status", + "defer_lock", + "future", + "future_status", + "get_terminate", + "launch", + "lock_guard", + "mutex", + "notify_all_at_thread_exit", + "packaged_task", + "promise", + "recursive_mutex", + "set_terminate", + "shared_lock", + "shared_mutex", + "shared_timed_mutex", + "this_thread", + "thread", + "timed_mutex", + "try_to_lock", + "unique_lock", + "unordered_map", + "unordered_multimap", + "unordered_multiset", + "unordered_set"}; + +MongoPolyFillCheck::MongoPolyFillCheck(StringRef Name, clang::tidy::ClangTidyContext* Context) + : ClangTidyCheck(Name, Context) { + // Generate a list of fully polyfill names + fullyQualifiedPolyfillNames = generateQualifiedPolyfillNames(basePolyfillNames); +} + + +void MongoPolyFillCheck::registerMatchers(ast_matchers::MatchFinder* Finder) { + // Create an ArrayRef from the vector of banned names. This provides a + // lightweight, non-owning reference to the array of names. + std::vector basePolyfillNamesRefVector(basePolyfillNames.begin(), + basePolyfillNames.end()); + llvm::ArrayRef basePolyfillNamesRefArray(basePolyfillNamesRefVector); + + // Register an AST Matcher to find type declarations that use any of the banned names + Finder->addMatcher(loc(hasUnqualifiedDesugaredType(recordType( + hasDeclaration(namedDecl(hasAnyName(basePolyfillNamesRefArray)))))) + .bind("bannedNames"), + this); +} + +void MongoPolyFillCheck::check(const ast_matchers::MatchFinder::MatchResult& Result) { + const auto* MatchedTypeLoc = Result.Nodes.getNodeAs("bannedNames"); + if (MatchedTypeLoc) { + auto typeStr = MatchedTypeLoc->getType().getAsString(); + // we catch this_thread but not this_thread::at_thread_exit + if (typeStr.find("this_thread::at_thread_exit") != std::string::npos) + return; + + // Check if the type string starts with 'std' or 'boost' and contains a banned name. + for (const auto& name : fullyQualifiedPolyfillNames) { + if ((typeStr.find("std") == 0 || typeStr.find("boost") == 0) && + typeStr.find(name) != std::string::npos) { + auto location = MatchedTypeLoc->getBeginLoc(); + if (location.isValid()) + diag(MatchedTypeLoc->getBeginLoc(), + "Illegal use of banned name from std::/boost:: for %0, use mongo::stdx:: " + "variant instead") + << name; + } + } + } +} +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoPolyFillCheck.h b/src/mongo/tools/mongo_tidy_checks/MongoPolyFillCheck.h new file mode 100644 index 0000000000000..9de13d5128797 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoPolyFillCheck.h @@ -0,0 +1,56 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#pragma once + +#include +#include + +namespace mongo::tidy { + +/** + * MongoPolyFillCheck is a custom clang-tidy check for detecting + * the usage of listed names from the std or boost namespace in the source code. + * + * It extends ClangTidyCheck and overrides the registerMatchers + * and check functions. The registerMatchers function adds matchers + * to identify the usage of banned names, while the check function + * flags the matched occurrences. + */ +class MongoPolyFillCheck : public clang::tidy::ClangTidyCheck { + +public: + MongoPolyFillCheck(clang::StringRef Name, clang::tidy::ClangTidyContext* Context); + void registerMatchers(clang::ast_matchers::MatchFinder* Finder) override; + void check(const clang::ast_matchers::MatchFinder::MatchResult& Result) override; + static std::vector basePolyfillNames; + +private: + std::vector fullyQualifiedPolyfillNames; +}; +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoRandCheck.cpp b/src/mongo/tools/mongo_tidy_checks/MongoRandCheck.cpp new file mode 100644 index 0000000000000..95adc5b0b2ac3 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoRandCheck.cpp @@ -0,0 +1,54 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "MongoRandCheck.h" + +namespace mongo::tidy { + +using namespace clang; +using namespace clang::ast_matchers; + +MongoRandCheck::MongoRandCheck(StringRef Name, clang::tidy::ClangTidyContext* Context) + : ClangTidyCheck(Name, Context) {} + +void MongoRandCheck::registerMatchers(ast_matchers::MatchFinder* Finder) { + // Matcher for srand and sand functions + Finder->addMatcher( + callExpr(callee(functionDecl(hasAnyName("::srand", "::rand")))).bind("callExpr"), this); +} + +void MongoRandCheck::check(const ast_matchers::MatchFinder::MatchResult& Result) { + // Get the matched check + const auto* CallExpr = Result.Nodes.getNodeAs("callExpr"); + if (CallExpr) { + diag(CallExpr->getBeginLoc(), + "Use of rand or srand, use or PseudoRandom instead."); + } +} +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoRandCheck.h b/src/mongo/tools/mongo_tidy_checks/MongoRandCheck.h new file mode 100644 index 0000000000000..6331d44f4f88f --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoRandCheck.h @@ -0,0 +1,53 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#pragma once + +#include +#include + +namespace mongo::tidy { + +/** + * MongoRandCheck is a custom clang-tidy check for detecting + * the usage of 'rand' and 'srand' functions in the source code. + * + * It extends ClangTidyCheck and overrides the registerMatchers + * and check functions. The registerMatchers function adds matchers + * to identify the usage of 'rand' and 'srand', + * while the check function flags the matched occurrences. + */ +class MongoRandCheck : public clang::tidy::ClangTidyCheck { + +public: + MongoRandCheck(clang::StringRef Name, clang::tidy::ClangTidyContext* Context); + void registerMatchers(clang::ast_matchers::MatchFinder* Finder) override; + void check(const clang::ast_matchers::MatchFinder::MatchResult& Result) override; +}; + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoTidyModule.cpp b/src/mongo/tools/mongo_tidy_checks/MongoTidyModule.cpp index fcf60f737f707..6e1e0378bd07f 100644 --- a/src/mongo/tools/mongo_tidy_checks/MongoTidyModule.cpp +++ b/src/mongo/tools/mongo_tidy_checks/MongoTidyModule.cpp @@ -29,12 +29,21 @@ #include "MongoAssertCheck.h" #include "MongoCctypeCheck.h" +#include "MongoCollectionShardingRuntimeCheck.h" +#include "MongoConfigHeaderCheck.h" +#include "MongoCxx20BannedIncludesCheck.h" +#include "MongoCxx20StdChronoCheck.h" +#include "MongoFCVConstantCheck.h" #include "MongoHeaderBracketCheck.h" +#include "MongoMacroDefinitionLeaksCheck.h" #include "MongoMutexCheck.h" +#include "MongoPolyFillCheck.h" +#include "MongoRandCheck.h" #include "MongoStdAtomicCheck.h" #include "MongoStdOptionalCheck.h" #include "MongoTraceCheck.h" #include "MongoUninterruptibleLockGuardCheck.h" +#include "MongoUnstructuredLogCheck.h" #include "MongoVolatileCheck.h" #include @@ -52,12 +61,24 @@ class MongoTidyModule : public clang::tidy::ClangTidyModule { "mongo-uninterruptible-lock-guard-check"); CheckFactories.registerCheck("mongo-header-bracket-check"); CheckFactories.registerCheck("mongo-cctype-check"); + CheckFactories.registerCheck("mongo-config-header-check"); + CheckFactories.registerCheck( + "mongo-cxx20-banned-includes-check"); + CheckFactories.registerCheck("mongo-cxx20-std-chrono-check"); CheckFactories.registerCheck("mongo-std-optional-check"); CheckFactories.registerCheck("mongo-volatile-check"); CheckFactories.registerCheck("mongo-trace-check"); CheckFactories.registerCheck("mongo-std-atomic-check"); CheckFactories.registerCheck("mongo-mutex-check"); CheckFactories.registerCheck("mongo-assert-check"); + CheckFactories.registerCheck("mongo-fcv-constant-check"); + CheckFactories.registerCheck("mongo-unstructured-log-check"); + CheckFactories.registerCheck( + "mongo-collection-sharding-runtime-check"); + CheckFactories.registerCheck( + "mongo-macro-definition-leaks-check"); + CheckFactories.registerCheck("mongo-rand-check"); + CheckFactories.registerCheck("mongo-polyfill-check"); } }; diff --git a/src/mongo/tools/mongo_tidy_checks/MongoUnstructuredLogCheck.cpp b/src/mongo/tools/mongo_tidy_checks/MongoUnstructuredLogCheck.cpp new file mode 100644 index 0000000000000..a4416bff6ab93 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoUnstructuredLogCheck.cpp @@ -0,0 +1,59 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "MongoUnstructuredLogCheck.h" + +namespace mongo::tidy { + +using namespace clang; +using namespace clang::ast_matchers; + +MongoUnstructuredLogCheck::MongoUnstructuredLogCheck(StringRef Name, + clang::tidy::ClangTidyContext* Context) + : ClangTidyCheck(Name, Context) {} + +void MongoUnstructuredLogCheck::registerMatchers(ast_matchers::MatchFinder* Finder) { + // match function calls to either 'logd' or 'doUnstructuredLogImpl' functions + Finder->addMatcher( + callExpr(callee(functionDecl(anyOf(hasName("logd"), hasName("doUnstructuredLogImpl"))))) + .bind("unstructuredLogCall"), + this); +} + +void MongoUnstructuredLogCheck::check(const ast_matchers::MatchFinder::MatchResult& Result) { + // Get the matched unstructured logging call + const auto* unstructuredLogCall = Result.Nodes.getNodeAs("unstructuredLogCall"); + if (unstructuredLogCall) { + diag(unstructuredLogCall->getBeginLoc(), + "Illegal use of unstructured logging, this is only for local " + "development use and should not be committed"); + } +} + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/MongoUnstructuredLogCheck.h b/src/mongo/tools/mongo_tidy_checks/MongoUnstructuredLogCheck.h new file mode 100644 index 0000000000000..703125d4f19bc --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/MongoUnstructuredLogCheck.h @@ -0,0 +1,52 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +#pragma once + +#include +#include + +namespace mongo::tidy { + +/** + * MongoUnstructuredLogCheck is a custom clang-tidy check for detecting + * the usage of logd and doUnstructuredLogImpl functions in the source code. + * + * It extends ClangTidyCheck and overrides the registerMatchers + * and check functions. The registerMatchers function adds matchers + * to identify the usage of logd and doUnstructuredLogImpl, + * while the check function flags the matched occurrences + */ +class MongoUnstructuredLogCheck : public clang::tidy::ClangTidyCheck { +public: + MongoUnstructuredLogCheck(clang::StringRef Name, clang::tidy::ClangTidyContext* Context); + void registerMatchers(clang::ast_matchers::MatchFinder* Finder) override; + void check(const clang::ast_matchers::MatchFinder::MatchResult& Result) override; +}; + +} // namespace mongo::tidy diff --git a/src/mongo/tools/mongo_tidy_checks/SConscript b/src/mongo/tools/mongo_tidy_checks/SConscript index c9d8c589fe07d..6cdefe31a7128 100644 --- a/src/mongo/tools/mongo_tidy_checks/SConscript +++ b/src/mongo/tools/mongo_tidy_checks/SConscript @@ -124,13 +124,22 @@ mongo_custom_check = env.SharedLibrary( "MongoHeaderBracketCheck.cpp", "MongoUninterruptibleLockGuardCheck.cpp", "MongoCctypeCheck.cpp", + "MongoConfigHeaderCheck.cpp", + "MongoCxx20BannedIncludesCheck.cpp", + "MongoCxx20StdChronoCheck.cpp", "MongoStdOptionalCheck.cpp", "MongoVolatileCheck.cpp", + "MongoPolyFillCheck.cpp", "MongoStdAtomicCheck.cpp", "MongoTidyModule.cpp", "MongoTraceCheck.cpp", "MongoMutexCheck.cpp", "MongoAssertCheck.cpp", + "MongoFCVConstantCheck.cpp", + "MongoUnstructuredLogCheck.cpp", + "MongoCollectionShardingRuntimeCheck.cpp", + "MongoMacroDefinitionLeaksCheck.cpp", + "MongoRandCheck.cpp", ], LIBDEPS_NO_INHERIT=[ '$BUILD_DIR/third_party/shim_allocator', diff --git a/src/mongo/tools/mongo_tidy_checks/tests/MongoTidyCheck_unittest.py b/src/mongo/tools/mongo_tidy_checks/tests/MongoTidyCheck_unittest.py index b3fcab7e8b41f..1b5fd8c6461a1 100644 --- a/src/mongo/tools/mongo_tidy_checks/tests/MongoTidyCheck_unittest.py +++ b/src/mongo/tools/mongo_tidy_checks/tests/MongoTidyCheck_unittest.py @@ -131,6 +131,36 @@ def test_MongoCctypeCheck(self): self.run_clang_tidy() + def test_MongoCxx20BannedIncludesCheck(self): + + self.write_config( + textwrap.dedent("""\ + Checks: '-*,mongo-cxx20-banned-includes-check' + WarningsAsErrors: '*' + HeaderFilterRegex: '(mongo/.*)' + """)) + + self.expected_output = [ + "Use of prohibited header.", + "Use of prohibited header.", + "Use of prohibited header.", + "Use of prohibited header.", + "Use of prohibited header.", + ] + + self.run_clang_tidy() + + def test_MongoCxx20StdChronoCheck(self): + self.write_config( + textwrap.dedent("""\ + Checks: '-*,mongo-cxx20-std-chrono-check' + WarningsAsErrors: '*' + """)) + prohibited_types = ["day", "day", "month", "year", "month_day", "month", "day", "day"] + self.expected_output = [ + f"Illegal use of prohibited type 'std::chrono::{t}'." for t in prohibited_types] + self.run_clang_tidy() + def test_MongoStdOptionalCheck(self): self.write_config( @@ -181,7 +211,7 @@ def test_MongoTraceCheck(self): ] self.run_clang_tidy() - + def test_MongoStdAtomicCheck(self): self.write_config( @@ -228,6 +258,116 @@ def test_MongoAssertCheck(self): self.run_clang_tidy() + def test_MongoFCVConstantCheck(self): + + self.write_config( + textwrap.dedent("""\ + Checks: '-*,mongo-fcv-constant-check' + WarningsAsErrors: '*' + """)) + + self.expected_output = [ + "error: Illegal use of FCV constant in FCV comparison check functions. FCV gating should be done through feature flags instead.", + ] + + self.run_clang_tidy() + + def test_MongoUnstructuredLogCheck(self): + + self.write_config( + textwrap.dedent("""\ + Checks: '-*,mongo-unstructured-log-check' + WarningsAsErrors: '*' + """)) + + self.expected_output = [ + "error: Illegal use of unstructured logging, this is only for local development use and should not be committed [mongo-unstructured-log-check,-warnings-as-errors]\n logd();", + "error: Illegal use of unstructured logging, this is only for local development use and should not be committed [mongo-unstructured-log-check,-warnings-as-errors]\n doUnstructuredLogImpl();", + ] + + self.run_clang_tidy() + + def test_MongoConfigHeaderCheck(self): + + self.write_config( + textwrap.dedent("""\ + Checks: '-*,mongo-config-header-check' + WarningsAsErrors: '*' + HeaderFilterRegex: '(mongo/.*)' + """)) + + self.expected_output = [ + "error: MONGO_CONFIG define used without prior inclusion of config.h [mongo-config-header-check,-warnings-as-errors]\n#define MONGO_CONFIG_TEST1 1", + "error: MONGO_CONFIG define used without prior inclusion of config.h [mongo-config-header-check,-warnings-as-errors]\n#ifdef MONGO_CONFIG_TEST1", + "error: MONGO_CONFIG define used without prior inclusion of config.h [mongo-config-header-check,-warnings-as-errors]\n#if MONGO_CONFIG_TEST1 == 1", + "error: MONGO_CONFIG define used without prior inclusion of config.h [mongo-config-header-check,-warnings-as-errors]\n#ifndef MONGO_CONFIG_TEST2", + "error: MONGO_CONFIG define used without prior inclusion of config.h [mongo-config-header-check,-warnings-as-errors]\n#if defined(MONGO_CONFIG_TEST1)", + ] + self.run_clang_tidy() + + def test_MongoCollectionShardingRuntimeCheck(self): + + self.write_config( + textwrap.dedent("""\ + Checks: '-*,mongo-collection-sharding-runtime-check' + WarningsAsErrors: '*' + CheckOptions: + - key: mongo-collection-sharding-runtime-check.exceptionDirs + value: 'src/mongo/db/s' + """)) + + self.expected_output = [ + "error: Illegal use of CollectionShardingRuntime outside of mongo/db/s/; use CollectionShardingState instead; see src/mongo/db/s/collection_sharding_state.h for details. [mongo-collection-sharding-runtime-check,-warnings-as-errors]\n CollectionShardingRuntime csr(5, \"Test\");", + "error: Illegal use of CollectionShardingRuntime outside of mongo/db/s/; use CollectionShardingState instead; see src/mongo/db/s/collection_sharding_state.h for details. [mongo-collection-sharding-runtime-check,-warnings-as-errors]\n int result = CollectionShardingRuntime::functionTest(7, \"Test\");", + ] + + self.run_clang_tidy() + + def test_MongoMacroDefinitionLeaksCheck(self): + self.write_config( + textwrap.dedent("""\ + Checks: '-*,mongo-macro-definition-leaks-check' + WarningsAsErrors: '*' + HeaderFilterRegex: '(mongo/.*)' + """)) + + self.expected_output = [ + "Missing #undef 'MONGO_LOGV2_DEFAULT_COMPONENT'", + ] + + self.run_clang_tidy() + + def test_MongoPolyFillCheck(self): + self.write_config( + textwrap.dedent("""\ + Checks: '-*,mongo-polyfill-check' + WarningsAsErrors: '*' + """)) + + self.expected_output = [ + "error: Illegal use of banned name from std::/boost:: for std::mutex, use mongo::stdx:: variant instead", + "error: Illegal use of banned name from std::/boost:: for std::future, use mongo::stdx:: variant instead", + "error: Illegal use of banned name from std::/boost:: for std::condition_variable, use mongo::stdx:: variant instead", + "error: Illegal use of banned name from std::/boost:: for std::unordered_map, use mongo::stdx:: variant instead", + "error: Illegal use of banned name from std::/boost:: for boost::unordered_map, use mongo::stdx:: variant instead", + ] + + self.run_clang_tidy() + + def test_MongoRandCheck(self): + self.write_config( + textwrap.dedent("""\ + Checks: '-*,mongo-rand-check' + WarningsAsErrors: '*' + """)) + + self.expected_output =[ + "error: Use of rand or srand, use or PseudoRandom instead. [mongo-rand-check,-warnings-as-errors]\n srand(time(0));", + "error: Use of rand or srand, use or PseudoRandom instead. [mongo-rand-check,-warnings-as-errors]\n int random_number = rand();", + ] + + self.run_clang_tidy() + if __name__ == '__main__': parser = argparse.ArgumentParser() diff --git a/src/mongo/tools/mongo_tidy_checks/tests/SConscript b/src/mongo/tools/mongo_tidy_checks/tests/SConscript index a75a772066137..15c242282cd76 100644 --- a/src/mongo/tools/mongo_tidy_checks/tests/SConscript +++ b/src/mongo/tools/mongo_tidy_checks/tests/SConscript @@ -14,6 +14,7 @@ if env.GetOption('ninja') == 'disabled': mongo_tidy_test_env.Append(CPPPATH=[ '.', '#src', + '#src/third_party/boost', ], ) # These test files will purposefully be error prone, so we can disable warnings any warnings we expect @@ -29,11 +30,20 @@ if env.GetOption('ninja') == 'disabled': 'test_MongoVolatileCheck.cpp', 'test_MongoUninterruptibleLockGuardCheck.cpp', 'test_MongoCctypeCheck.cpp', + 'test_MongoConfigHeaderCheck.cpp', + 'test_MongoCxx20BannedIncludesCheck.cpp', + 'test_MongoCxx20StdChronoCheck.cpp', 'test_MongoStdOptionalCheck.cpp', 'test_MongoTraceCheck.cpp', 'test_MongoStdAtomicCheck.cpp', 'test_MongoMutexCheck.cpp', 'test_MongoAssertCheck.cpp', + 'test_MongoFCVConstantCheck.cpp', + 'test_MongoUnstructuredLogCheck.cpp', + 'test_MongoCollectionShardingRuntimeCheck.cpp', + 'test_MongoMacroDefinitionLeaksCheck.cpp', + 'test_MongoRandCheck.cpp', + 'test_MongoPolyFillCheck.cpp', ] # So that we can do fast runs, we will generate a separate compilation database file for each diff --git a/src/mongo/tools/mongo_tidy_checks/tests/test_MongoCollectionShardingRuntimeCheck.cpp b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoCollectionShardingRuntimeCheck.cpp new file mode 100644 index 0000000000000..1feccf15c8d7b --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoCollectionShardingRuntimeCheck.cpp @@ -0,0 +1,27 @@ +#include +namespace mongo { +class CollectionShardingRuntime { +public: + // Constructor + CollectionShardingRuntime(int a, std::string s) : value(a), name(s) {} + + // Static function + static int functionTest(int a, std::string s) { + return a + s.length(); + } + +private: + int value; + std::string name; +}; + + +int testMongoUnstructuredLogFuncton1111() { + // Create an instance of the object + CollectionShardingRuntime csr(5, "Test"); + + // Call the static function + int result = CollectionShardingRuntime::functionTest(7, "Test"); +} + +} // namespace mongo diff --git a/src/mongo/tools/mongo_tidy_checks/tests/test_MongoConfigHeaderCheck.cpp b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoConfigHeaderCheck.cpp new file mode 100644 index 0000000000000..70553eea0bd6c --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoConfigHeaderCheck.cpp @@ -0,0 +1,18 @@ +//#include "mongo/config.h" + +namespace mongo { +#define MONGO_CONFIG_TEST1 1 + +#ifdef MONGO_CONFIG_TEST1 +#endif + +#if MONGO_CONFIG_TEST1 == 1 +#endif + +#ifndef MONGO_CONFIG_TEST2 +#endif + +#if defined(MONGO_CONFIG_TEST1) +#endif + +} // namespace mongo diff --git a/src/mongo/tools/mongo_tidy_checks/tests/test_MongoCxx20BannedIncludesCheck.cpp b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoCxx20BannedIncludesCheck.cpp new file mode 100644 index 0000000000000..940ec0d958e53 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoCxx20BannedIncludesCheck.cpp @@ -0,0 +1,12 @@ +#include +#include +#include +#include +#include +// This header does not exist in this version of clang +// Based on the other tests I am going to assume this is working +// #include +// This header needs to be compiled with -fcoroutines. This causes the check to not compile. +// Based on the other tests I am going to assume this is working. +// Also no one can include this header without also adding this flag. +// #include diff --git a/src/mongo/tools/mongo_tidy_checks/tests/test_MongoCxx20StdChronoCheck.cpp b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoCxx20StdChronoCheck.cpp new file mode 100644 index 0000000000000..65517cb660331 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoCxx20StdChronoCheck.cpp @@ -0,0 +1,37 @@ +/** + * Forward-declaring C++20 types from std::chrono, because this test is currently compiled with + * -std=c++17. This trick lets us test the new clang-tidy rule without changing the C++ standard, + * but we are limited to testing only the pointer types. + * TODO SERVER-77406: Compile clang-tidy tests with -std=c++20 and add tests for non-pointer types. + */ +namespace std { +namespace chrono { +class day; +class month; +class month_day; +class year; +} // namespace chrono +} // namespace std + +namespace mongo { + +// Decorated types in variable declarations. +std::chrono::day* d1; +const std::chrono::day* d2; + +// Source types in 'typedefs' and 'using'. +typedef std::chrono::month month_t; +using year_t = std::chrono::year; + +// Types of function parameters and return values. +std::chrono::month_day* getLastDay(std::chrono::month* value); + +// Types in data member (field) declarations. +struct DayWrapper { + std::chrono::day* d3; +}; + +// Types without full qualification. +using namespace std::chrono; +day* d4; +} // namespace mongo diff --git a/src/mongo/tools/mongo_tidy_checks/tests/test_MongoFCVConstantCheck.cpp b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoFCVConstantCheck.cpp new file mode 100644 index 0000000000000..5e3dd4e867978 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoFCVConstantCheck.cpp @@ -0,0 +1,56 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ +namespace mongo { +namespace multiversion { + +enum class FeatureCompatibilityVersion { + kInvalid, + kVersion_7_0, +}; + +} // namespace multiversion + +using FCV = multiversion::FeatureCompatibilityVersion; + +struct ServerGlobalParams { + struct FeatureCompatibility { + bool isLessThan(FCV version) const { + return true; + } + } mutableFeatureCompatibility; + const FeatureCompatibility& featureCompatibility = mutableFeatureCompatibility; +}; + +void testFCVConstant() { + const ServerGlobalParams mockParam = {}; + mockParam.featureCompatibility.isLessThan( + multiversion::FeatureCompatibilityVersion::kVersion_7_0); +} + +} // namespace mongo diff --git a/src/mongo/tools/mongo_tidy_checks/tests/test_MongoMacroDefinitionLeaksCheck.cpp b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoMacroDefinitionLeaksCheck.cpp new file mode 100644 index 0000000000000..1eb296011dd6d --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoMacroDefinitionLeaksCheck.cpp @@ -0,0 +1 @@ +#include "test_MongoMacroDefinitionLeaksCheck.h" diff --git a/src/mongo/tools/mongo_tidy_checks/tests/test_MongoMacroDefinitionLeaksCheck.h b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoMacroDefinitionLeaksCheck.h new file mode 100644 index 0000000000000..0e7b6caad0997 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoMacroDefinitionLeaksCheck.h @@ -0,0 +1 @@ +#define MONGO_LOGV2_DEFAULT_COMPONENT 1 diff --git a/src/mongo/tools/mongo_tidy_checks/tests/test_MongoPolyFillCheck.cpp b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoPolyFillCheck.cpp new file mode 100644 index 0000000000000..fca700f5b1d2d --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoPolyFillCheck.cpp @@ -0,0 +1,16 @@ +#include +#include +#include +#include +#include + +namespace mongo { +void mongoPolyFillCheckTest() { + std::mutex myMutex; + std::future myFuture; + std::condition_variable cv; + std::unordered_map myMap; + boost::unordered_map boostMap; +} + +} // namespace mongo diff --git a/src/mongo/tools/mongo_tidy_checks/tests/test_MongoRandCheck.cpp b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoRandCheck.cpp new file mode 100644 index 0000000000000..14f5db0f6ab81 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoRandCheck.cpp @@ -0,0 +1,8 @@ +#include +#include +namespace mongo { +void mongoRandCheck() { + srand(time(0)); + int random_number = rand(); +} +} // namespace mongo diff --git a/src/mongo/tools/mongo_tidy_checks/tests/test_MongoUnstructuredLogCheck.cpp b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoUnstructuredLogCheck.cpp new file mode 100644 index 0000000000000..e4348708461b0 --- /dev/null +++ b/src/mongo/tools/mongo_tidy_checks/tests/test_MongoUnstructuredLogCheck.cpp @@ -0,0 +1,16 @@ +namespace mongo { + +void logd() { + // logd function +} +void doUnstructuredLogImpl() { + // doUnstructuredLogImpl function +} + +int testMongoUnstructuredLogFuncton() { + logd(); + doUnstructuredLogImpl(); + return 0; +} + +} // namespace mongo diff --git a/src/mongo/tools/mongobridge_tool/SConscript b/src/mongo/tools/mongobridge_tool/SConscript index f917a686bd23f..8dfad368610e8 100644 --- a/src/mongo/tools/mongobridge_tool/SConscript +++ b/src/mongo/tools/mongobridge_tool/SConscript @@ -7,16 +7,17 @@ yamlEnv = env.Clone() yamlEnv.InjectThirdParty(libraries=['yaml']) mongobridge = env.Program( - target="mongobridge", + target='mongobridge', source=[ - "bridge.cpp", - "bridge_commands.cpp", - "mongobridge_options.cpp", - "mongobridge_options.idl", - "mongobridge_options_init.cpp", + 'bridge.cpp', + 'bridge_commands.cpp', + 'mongobridge_options.cpp', + 'mongobridge_options.idl', + 'mongobridge_options_init.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/db/dbmessage', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/rpc/rpc', '$BUILD_DIR/mongo/transport/message_compressor', '$BUILD_DIR/mongo/transport/message_compressor_options_server', diff --git a/src/mongo/tools/mongobridge_tool/bridge.cpp b/src/mongo/tools/mongobridge_tool/bridge.cpp index 8724197ec3e9f..95723f8473262 100644 --- a/src/mongo/tools/mongobridge_tool/bridge.cpp +++ b/src/mongo/tools/mongobridge_tool/bridge.cpp @@ -28,42 +28,65 @@ */ -#include "mongo/platform/basic.h" - -#include #include #include - -#include "mongo/base/init.h" +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/initializer.h" -#include "mongo/db/concurrency/locker_noop_client_observer.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/db/client.h" #include "mongo/db/dbmessage.h" +#include "mongo/db/namespace_string.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" -#include "mongo/platform/atomic_word.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/redaction.h" #include "mongo/platform/mutex.h" #include "mongo/platform/random.h" #include "mongo/rpc/factory.h" #include "mongo/rpc/message.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/protocol.h" #include "mongo/rpc/reply_builder_interface.h" -#include "mongo/stdx/thread.h" #include "mongo/tools/mongobridge_tool/bridge_commands.h" #include "mongo/tools/mongobridge_tool/mongobridge_options.h" #include "mongo/transport/asio/asio_transport_layer.h" #include "mongo/transport/message_compressor_manager.h" +#include "mongo/transport/service_entry_point.h" #include "mongo/transport/service_entry_point_impl.h" -#include "mongo/transport/service_executor_synchronous.h" +#include "mongo/transport/session.h" +#include "mongo/transport/transport_layer.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/exit.h" +#include "mongo/util/exit_code.h" #include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" #include "mongo/util/quick_exit.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/signal_handlers.h" -#include "mongo/util/static_immortal.h" #include "mongo/util/str.h" -#include "mongo/util/text.h" +#include "mongo/util/text.h" // IWYU pragma: keep #include "mongo/util/time_support.h" -#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kBridge @@ -73,10 +96,10 @@ namespace mongo { namespace { boost::optional extractHostInfo(const OpMsgRequest& request) { - // The initial isMaster request made by mongod and mongos processes should contain a hostInfo - // field that identifies the process by its host:port. + // The initial hello/isMaster request made by mongod and mongos processes should contain a + // hostInfo field that identifies the process by its host:port. StringData cmdName = request.getCommandName(); - if (cmdName != "isMaster" && cmdName != "ismaster") { + if (cmdName != "isMaster" && cmdName != "ismaster" && cmdName != "hello") { return boost::none; } @@ -162,7 +185,6 @@ BridgeContext* BridgeContext::get() { return &_get(getGlobalServiceContext()); } -class ServiceEntryPointBridge; class ProxiedConnection { public: ProxiedConnection() : _dest(nullptr), _prng(BridgeContext::get()->makeSeededPRNG()) {} @@ -195,10 +217,10 @@ class ProxiedConnection { return; _seenFirstMessage = true; - // The initial isMaster request made by mongod and mongos processes should contain a + // The initial hello/isMaster request made by mongod and mongos processes should contain a // hostInfo field that identifies the process by its host:port. StringData cmdName = request.getCommandName(); - if (cmdName != "isMaster" && cmdName != "ismaster") { + if (cmdName != "isMaster" && cmdName != "ismaster" && cmdName != "hello") { return; } @@ -502,7 +524,6 @@ int bridgeMain(int argc, char** argv) { startSignalProcessingThread(LogFileStatus::kNoLogFileToRotate); auto serviceContextHolder = ServiceContext::make(); - serviceContextHolder->registerClientObserver(std::make_unique()); setGlobalServiceContext(std::move(serviceContextHolder)); auto serviceContext = getGlobalServiceContext(); diff --git a/src/mongo/tools/mongobridge_tool/bridge_commands.cpp b/src/mongo/tools/mongobridge_tool/bridge_commands.cpp index c88c60296a655..213bdf1471f3b 100644 --- a/src/mongo/tools/mongobridge_tool/bridge_commands.cpp +++ b/src/mongo/tools/mongobridge_tool/bridge_commands.cpp @@ -27,15 +27,25 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/tools/mongobridge_tool/bridge_commands.h" - -#include "mongo/base/init.h" +#include +#include +#include + +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/base/status.h" #include "mongo/base/status_with.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/util/bson_extract.h" +#include "mongo/tools/mongobridge_tool/bridge_commands.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/str.h" #include "mongo/util/string_map.h" diff --git a/src/mongo/tools/mongobridge_tool/bridge_commands.h b/src/mongo/tools/mongobridge_tool/bridge_commands.h index 8b32fbba86c1c..d50d5044259b5 100644 --- a/src/mongo/tools/mongobridge_tool/bridge_commands.h +++ b/src/mongo/tools/mongobridge_tool/bridge_commands.h @@ -29,13 +29,18 @@ #pragma once +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/unordered_map.h" +#include "mongo/util/duration.h" #include "mongo/util/time_support.h" namespace mongo { class BSONObj; + struct HostAndPort; class Status; template diff --git a/src/mongo/tools/mongobridge_tool/mongobridge_options.cpp b/src/mongo/tools/mongobridge_tool/mongobridge_options.cpp index cbd2f731eb520..d73b3ef6dcd49 100644 --- a/src/mongo/tools/mongobridge_tool/mongobridge_options.cpp +++ b/src/mongo/tools/mongobridge_tool/mongobridge_options.cpp @@ -33,10 +33,17 @@ #include #include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_component_settings.h" +#include "mongo/logv2/log_manager.h" +#include "mongo/logv2/log_severity.h" #include "mongo/platform/random.h" +#include "mongo/util/options_parser/environment.h" +#include "mongo/util/options_parser/option_section.h" #include "mongo/util/options_parser/startup_options.h" +#include "mongo/util/options_parser/value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kBridge diff --git a/src/mongo/tools/mongobridge_tool/mongobridge_options_init.cpp b/src/mongo/tools/mongobridge_tool/mongobridge_options_init.cpp index e4099a397133a..7a9c8b60ee6ca 100644 --- a/src/mongo/tools/mongobridge_tool/mongobridge_options_init.cpp +++ b/src/mongo/tools/mongobridge_tool/mongobridge_options_init.cpp @@ -27,14 +27,20 @@ * it in the license file. */ -#include "mongo/tools/mongobridge_tool/mongobridge_options.h" - #include +#include +#include +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" +#include "mongo/tools/mongobridge_tool/mongobridge_options.h" #include "mongo/transport/message_compressor_registry.h" +#include "mongo/util/assert_util.h" #include "mongo/util/exit_code.h" +#include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/startup_option_init.h" #include "mongo/util/options_parser/startup_options.h" +#include "mongo/util/options_parser/value.h" #include "mongo/util/quick_exit.h" namespace mongo { diff --git a/src/mongo/transport/SConscript b/src/mongo/transport/SConscript index 868890f48ba61..4c83a0d69628a 100644 --- a/src/mongo/transport/SConscript +++ b/src/mongo/transport/SConscript @@ -233,7 +233,7 @@ tlEnv.CppUnitTest( '$BUILD_DIR/mongo/client/clientdriver_network', '$BUILD_DIR/mongo/db/auth/authmocks', '$BUILD_DIR/mongo/db/dbmessage', - '$BUILD_DIR/mongo/db/service_context', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/service_context_test_fixture', '$BUILD_DIR/mongo/rpc/message', '$BUILD_DIR/mongo/rpc/rpc', @@ -284,7 +284,7 @@ env.Benchmark( 'service_executor_bm.cpp', ], LIBDEPS=[ - '$BUILD_DIR/mongo/base', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/service_context_test_fixture', 'service_executor', 'transport_layer_mock', @@ -297,6 +297,7 @@ env.Benchmark( 'session_workflow_bm.cpp', ], LIBDEPS=[ + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/service_context_test_fixture', 'service_entry_point', 'service_executor', diff --git a/src/mongo/transport/asio/asio_networking_baton.h b/src/mongo/transport/asio/asio_networking_baton.h index 122962fc54f98..3d3086a30b1a6 100644 --- a/src/mongo/transport/asio/asio_networking_baton.h +++ b/src/mongo/transport/asio/asio_networking_baton.h @@ -40,7 +40,6 @@ #include "mongo/stdx/unordered_map.h" #include "mongo/transport/asio/asio_session.h" #include "mongo/transport/baton.h" -#include "mongo/util/concepts.h" #include "mongo/util/functional.h" #include "mongo/util/future.h" #include "mongo/util/hierarchical_acquisition.h" diff --git a/src/mongo/transport/asio/asio_session_impl.cpp b/src/mongo/transport/asio/asio_session_impl.cpp index 7f567fef20b1a..7834861caf0b9 100644 --- a/src/mongo/transport/asio/asio_session_impl.cpp +++ b/src/mongo/transport/asio/asio_session_impl.cpp @@ -99,15 +99,8 @@ Status makeCanceledStatus() { return {ErrorCodes::CallbackCanceled, "Operation was canceled"}; } -bool connHealthMetricsEnabled() { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - return gFeatureFlagConnHealthMetrics.isEnabledAndIgnoreFCVUnsafe(); -} - -CounterMetric totalIngressTLSConnections("network.totalIngressTLSConnections", - connHealthMetricsEnabled); -CounterMetric totalIngressTLSHandshakeTimeMillis("network.totalIngressTLSHandshakeTimeMillis", - connHealthMetricsEnabled); +CounterMetric totalIngressTLSConnections("network.totalIngressTLSConnections"); +CounterMetric totalIngressTLSHandshakeTimeMillis("network.totalIngressTLSHandshakeTimeMillis"); } // namespace diff --git a/src/mongo/transport/asio/asio_transport_layer.cpp b/src/mongo/transport/asio/asio_transport_layer.cpp index 4e9412c38fd3f..2f07c7e838119 100644 --- a/src/mongo/transport/asio/asio_transport_layer.cpp +++ b/src/mongo/transport/asio/asio_transport_layer.cpp @@ -483,7 +483,7 @@ class WrappedResolver { // Then, if the numeric (IP address) lookup failed, we fall back to DNS or return the error // from the resolver. return _resolve(peer, flags | Resolver::numeric_host, enableIPv6) - .onError([=](Status) { return _resolve(peer, flags, enableIPv6); }) + .onError([=, this](Status) { return _resolve(peer, flags, enableIPv6); }) .getNoThrow(); } @@ -495,9 +495,8 @@ class WrappedResolver { // We follow the same numeric -> hostname fallback procedure as the synchronous resolver // function for setting resolver flags (see above). const auto flags = Resolver::numeric_service; - return _asyncResolve(peer, flags | Resolver::numeric_host, enableIPv6).onError([=](Status) { - return _asyncResolve(peer, flags, enableIPv6); - }); + return _asyncResolve(peer, flags | Resolver::numeric_host, enableIPv6) + .onError([=, this](Status) { return _asyncResolve(peer, flags, enableIPv6); }); } void cancel() { @@ -507,7 +506,7 @@ class WrappedResolver { private: boost::optional _checkForUnixSocket(const HostAndPort& peer) { #ifndef _WIN32 - if (str::contains(peer.host(), '/')) { + if (isUnixDomainSocket(peer.host())) { asio::local::stream_protocol::endpoint ep(peer.host()); return EndpointVector{WrappedEndpoint(ep)}; } @@ -1329,23 +1328,16 @@ std::vector> AsioTransportLayer::getListenerSocketBackl } void AsioTransportLayer::appendStatsForServerStatus(BSONObjBuilder* bob) const { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (gFeatureFlagConnHealthMetrics.isEnabledAndIgnoreFCVUnsafe()) { - bob->append("listenerProcessingTime", _listenerProcessingTime.load().toBSON()); - } + bob->append("listenerProcessingTime", _listenerProcessingTime.load().toBSON()); } void AsioTransportLayer::appendStatsForFTDC(BSONObjBuilder& bob) const { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (gFeatureFlagConnHealthMetrics.isEnabledAndIgnoreFCVUnsafe()) { - BSONArrayBuilder queueDepthsArrayBuilder( - bob.subarrayStart("listenerSocketBacklogQueueDepths")); - for (const auto& record : _acceptorRecords) { - BSONObjBuilder{queueDepthsArrayBuilder.subobjStart()}.append( - record->address.toString(), record->backlogQueueDepth.load()); - } - queueDepthsArrayBuilder.done(); + BSONArrayBuilder queueDepthsArrayBuilder(bob.subarrayStart("listenerSocketBacklogQueueDepths")); + for (const auto& record : _acceptorRecords) { + BSONObjBuilder{queueDepthsArrayBuilder.subobjStart()}.append( + record->address.toString(), record->backlogQueueDepth.load()); } + queueDepthsArrayBuilder.done(); } void AsioTransportLayer::_runListener() noexcept { diff --git a/src/mongo/transport/asio/asio_transport_layer_test.cpp b/src/mongo/transport/asio/asio_transport_layer_test.cpp index 2ab09b01f4e40..c0ca89eebe019 100644 --- a/src/mongo/transport/asio/asio_transport_layer_test.cpp +++ b/src/mongo/transport/asio/asio_transport_layer_test.cpp @@ -27,8 +27,6 @@ * it in the license file. */ -#include "mongo/transport/asio/asio_transport_layer.h" - #include #include #include @@ -40,7 +38,6 @@ #include "mongo/client/dbclient_connection.h" #include "mongo/config.h" -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context_test_fixture.h" #include "mongo/idl/server_parameter_test_util.h" @@ -50,6 +47,7 @@ #include "mongo/rpc/op_msg.h" #include "mongo/stdx/thread.h" #include "mongo/transport/asio/asio_session.h" +#include "mongo/transport/asio/asio_transport_layer.h" #include "mongo/transport/baton.h" #include "mongo/transport/service_entry_point.h" #include "mongo/transport/transport_options_gen.h" @@ -67,7 +65,6 @@ #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest - namespace mongo { namespace { @@ -79,6 +76,17 @@ using SetsockoptPtr = char*; using SetsockoptPtr = void*; #endif +std::string loadFile(std::string filename) try { + std::ifstream f; + f.exceptions(f.exceptions() | std::ios::failbit); + f.open(filename); + return {std::istreambuf_iterator{f}, std::istreambuf_iterator{}}; +} catch (const std::ifstream::failure& ex) { + auto ec = lastSystemError(); + FAIL("Failed to load file: \"{}\": {}: {}"_format(filename, ex.what(), errorMessage(ec))); + MONGO_UNREACHABLE; +} + class JoinThread : public stdx::thread { public: using stdx::thread::thread; @@ -352,8 +360,6 @@ class TestFixture { } private: - RAIIServerParameterControllerForTest _featureFlagController{"featureFlagConnHealthMetrics", - true}; std::unique_ptr _tla; MockSEP _sep; @@ -625,15 +631,17 @@ TEST(AsioTransportLayer, EgressConnectionResetByPeerDuringSessionCtor) { // `fp` pauses the `AsioSession` constructor immediately prior to its // `setsockopt` sequence, to allow time for the peer reset to propagate. - FailPoint& fp = transport::asioTransportLayerSessionPauseBeforeSetSocketOption; + auto fp = std::make_unique( + "asioTransportLayerSessionPauseBeforeSetSocketOption"); Acceptor server(ioContext); server.setOnAccept([&](std::shared_ptr conn) { + LOGV2(7598701, "waiting for the client to reach the fail-point"); + (*fp)->waitForTimesEntered(fp->initialTimesEntered() + 1); LOGV2(6101604, "handling a connection by resetting it"); conn->socket.set_option(asio::socket_base::linger(true, 0)); conn->socket.close(); - sleepFor(Seconds{1}); - fp.setMode(FailPoint::off); + fp.reset(); }); JoinThread ioThread{[&] { ioContext.run(); @@ -642,7 +650,6 @@ TEST(AsioTransportLayer, EgressConnectionResetByPeerDuringSessionCtor) { ioContext.stop(); }; - fp.setMode(FailPoint::alwaysOn); LOGV2(6101602, "Connecting", "port"_attr = server.port()); using namespace unittest::match; // On MacOS, calling `setsockopt` on a peer-reset connection yields an @@ -835,11 +842,7 @@ TEST_F(AsioTransportLayerWithServiceContextTest, ShutdownDuringSSLHandshake) { conn.setSoTimeout(1); // 1 second timeout TransientSSLParams params; - params.sslClusterPEMPayload = [] { - std::ifstream input("jstests/libs/client.pem"); - std::string str((std::istreambuf_iterator(input)), std::istreambuf_iterator()); - return str; - }(); + params.sslClusterPEMPayload = loadFile("jstests/libs/client.pem"); params.targetedClusterConnectionString = ConnectionString::forLocal(); auto status = conn.connectSocketOnly({"localhost", port}, std::move(params)); @@ -850,32 +853,27 @@ TEST_F(AsioTransportLayerWithServiceContextTest, ShutdownDuringSSLHandshake) { #ifdef __linux__ -/** - * Helper constants to be used as template parameters for AsioNetworkingBatonTest. Indicates - * whether the test is allowed to accept connections after establishing the first Session associated - * with `_client`. - */ -enum class AllowMultipleSessions : bool {}; - /** * Creates a connection between a client and a server, then runs tests against the * `AsioNetworkingBaton` associated with the server-side of the connection (i.e., `Client`). The * client-side of this connection is associated with `_connThread`, and the server-side is wrapped * inside `_client`. - * - * Instantiated with `shouldAllowMultipleSessions` which indicates whether the listener thread for - * this test's transport layer should accept more than the connection initially established by - * `_client`. */ -template -class AsioNetworkingBatonTest : public LockerNoopServiceContextTest { - // A service entry point that emplaces a Future with the ingress session corresponding to the - // first connection. +class AsioNetworkingBatonTest : public ServiceContextTest { +public: + /** + * Emplaces a Promise with the first ingress session. Can optionally accept + * further sessions, of which it takes co-ownership. + */ class FirstSessionSEP : public ServiceEntryPoint { public: explicit FirstSessionSEP(Promise> promise) : _promise(std::move(promise)) {} + ~FirstSessionSEP() override { + _join(); + } + Status start() override { return Status::OK(); } @@ -887,32 +885,48 @@ class AsioNetworkingBatonTest : public LockerNoopServiceContextTest { } void startSession(std::shared_ptr session) override { - if (!_isEmplaced) { - _promise.emplaceValue(std::move(session)); - _isEmplaced = true; + stdx::lock_guard lk{_mutex}; + _sessions.push_back(session); + if (_promise) { + _promise->emplaceValue(std::move(session)); + _promise.reset(); return; } - invariant(static_cast(shouldAllowMultipleSessions), - "Created a second ingress Session when only one was expected."); + invariant(_allowMultipleSessions, "Unexpected multiple ingress sessions"); } - void endAllSessions(transport::Session::TagMask) override {} + void endAllSessions(transport::Session::TagMask) override { + _join(); + } bool shutdown(Milliseconds) override { + _join(); return true; } size_t numOpenSessions() const override { - MONGO_UNREACHABLE; + stdx::lock_guard lk{_mutex}; + return _sessions.size(); } logv2::LogSeverity slowSessionWorkflowLogSeverity() override { MONGO_UNIMPLEMENTED; } + void setAllowMultipleSessions() { + _allowMultipleSessions = true; + } + private: - Promise> _promise; - bool _isEmplaced = false; + void _join() { + stdx::lock_guard lk{_mutex}; + _sessions.clear(); + } + + bool _allowMultipleSessions = false; + mutable Mutex _mutex; + std::vector> _sessions; + boost::optional>> _promise; }; // Used for setting and canceling timers on the networking baton. Does not offer any timer @@ -928,11 +942,14 @@ class AsioNetworkingBatonTest : public LockerNoopServiceContextTest { } }; -public: + virtual void configureSep(FirstSessionSEP& sep) {} + void setUp() override { auto pf = makePromiseFuture>(); auto servCtx = getServiceContext(); - servCtx->setServiceEntryPoint(std::make_unique(std::move(pf.promise))); + auto sep = std::make_unique(std::move(pf.promise)); + configureSep(*sep); + servCtx->setServiceEntryPoint(std::move(sep)); auto tla = makeTLA(servCtx->getServiceEntryPoint()); const auto listenerPort = tla->listenerPort(); @@ -964,10 +981,12 @@ class AsioNetworkingBatonTest : public LockerNoopServiceContextTest { std::unique_ptr _connThread; }; -class IngressAsioNetworkingBatonTest - : public AsioNetworkingBatonTest {}; +class IngressAsioNetworkingBatonTest : public AsioNetworkingBatonTest {}; -class EgressAsioNetworkingBatonTest : public AsioNetworkingBatonTest { +class EgressAsioNetworkingBatonTest : public AsioNetworkingBatonTest { + void configureSep(FirstSessionSEP& sep) override { + sep.setAllowMultipleSessions(); + } }; // A `JoinThread` that waits for a ready signal from its underlying worker thread before returning diff --git a/src/mongo/transport/grpc/SConscript b/src/mongo/transport/grpc/SConscript index bd9d781e25a73..6cdd58b0fbc4d 100644 --- a/src/mongo/transport/grpc/SConscript +++ b/src/mongo/transport/grpc/SConscript @@ -9,10 +9,19 @@ env.Library( target='grpc_transport_layer', source=[ 'client_cache.cpp', + 'grpc_transport_layer.cpp', + 'server.cpp', + 'service.cpp', + 'util.cpp', 'wire_version_provider.cpp', ], LIBDEPS=[ + '$BUILD_DIR/mongo/db/service_context', '$BUILD_DIR/mongo/db/wire_version', + '$BUILD_DIR/mongo/rpc/client_metadata', + '$BUILD_DIR/mongo/transport/transport_layer_common', + '$BUILD_DIR/mongo/util/net/network', + '$BUILD_DIR/mongo/util/net/ssl_util', '$BUILD_DIR/third_party/shim_grpc', ], ) @@ -34,14 +43,20 @@ env.Library( env.CppUnitTest( target='grpc_transport_layer_test', source=[ + 'channel_pool_test.cpp', 'client_cache_test.cpp', 'grpc_session_test.cpp', + 'grpc_transport_layer_test.cpp', 'mock_server_stream_test.cpp', + 'mock_stub_test.cpp', + 'server_test.cpp', 'service_test.cpp', ], LIBDEPS=[ '$BUILD_DIR/mongo/base', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/service_context_test_fixture', + '$BUILD_DIR/mongo/db/wire_version', '$BUILD_DIR/mongo/rpc/message', '$BUILD_DIR/mongo/util/clock_source_mock', '$BUILD_DIR/third_party/shim_grpc', diff --git a/src/mongo/transport/grpc/bidirectional_pipe.h b/src/mongo/transport/grpc/bidirectional_pipe.h index f1c29471eb010..b53d61b519cf9 100644 --- a/src/mongo/transport/grpc/bidirectional_pipe.h +++ b/src/mongo/transport/grpc/bidirectional_pipe.h @@ -89,20 +89,36 @@ class BidirectionalPipe { } /** - * Close both ends of the pipe. In progress reads and writes on either end will be - * interrupted. + * Close both the read and write halves of this end of the pipe. In-progress reads and + * writes on this end and writes on the other end will be interrupted. + * + * Messages that have already been transmitted through this end of the pipe can still be + * read by the other end. */ void close() { _sendHalf.close(); _recvHalf.close(); } + /** + * Returns true when at least one of the following conditions is met: + * - This end of the pipe is closed. + * - The other end of the pipe is closed and there are no more messages to be read. + */ + bool isConsumed() const { + auto stats = _recvHalfCtrl.getStats(); + return stats.consumerEndClosed || (stats.queueDepth == 0 && stats.producerEndClosed); + } + private: friend BidirectionalPipe; explicit End(SingleProducerSingleConsumerQueue::Producer send, - SingleProducerSingleConsumerQueue::Consumer recv) - : _sendHalf{std::move(send)}, _recvHalf{std::move(recv)} {} + SingleProducerSingleConsumerQueue::Consumer recv, + SingleProducerSingleConsumerQueue::Controller recvCtrl) + : _sendHalf{std::move(send)}, + _recvHalf{std::move(recv)}, + _recvHalfCtrl(std::move(recvCtrl)) {} bool _isPipeClosedError(const DBException& e) const { return e.code() == ErrorCodes::ProducerConsumerQueueEndClosed || @@ -111,14 +127,17 @@ class BidirectionalPipe { SingleProducerSingleConsumerQueue::Producer _sendHalf; SingleProducerSingleConsumerQueue::Consumer _recvHalf; + SingleProducerSingleConsumerQueue::Controller _recvHalfCtrl; }; BidirectionalPipe() { SingleProducerSingleConsumerQueue::Pipe pipe1; SingleProducerSingleConsumerQueue::Pipe pipe2; - left = std::unique_ptr(new End(std::move(pipe1.producer), std::move(pipe2.consumer))); - right = std::unique_ptr(new End(std::move(pipe2.producer), std::move(pipe1.consumer))); + left = std::unique_ptr(new End( + std::move(pipe1.producer), std::move(pipe2.consumer), std::move(pipe2.controller))); + right = std::unique_ptr(new End( + std::move(pipe2.producer), std::move(pipe1.consumer), std::move(pipe1.controller))); } std::unique_ptr left; diff --git a/src/mongo/transport/grpc/channel_pool.h b/src/mongo/transport/grpc/channel_pool.h new file mode 100644 index 0000000000000..903d249a9d4b9 --- /dev/null +++ b/src/mongo/transport/grpc/channel_pool.h @@ -0,0 +1,264 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +#include "mongo/logv2/log.h" +#include "mongo/stdx/mutex.h" +#include "mongo/stdx/unordered_map.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/functional.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/synchronized_value.h" +#include "mongo/util/time_support.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork + +namespace mongo::transport::grpc { + +/** + * Allows maintaining a pool of `Channel` objects and using them to create instances of `Stub`. + * `Channel` and `Stub` are defined as template types to facilitate unit-testing. + * This type is oblivious to how gRPC channels and stubs are created, and relies on the factory + * functions (`ChannelFactory` and `StubFactory`) to handle that. + */ +template +class ChannelPool : public std::enable_shared_from_this> { +public: + using SSLModeResolver = unique_function; + using ChannelFactory = unique_function; + using StubFactory = unique_function; + + /** + * Maintains state for an individual `Channel`: allows deferred creation of `Channel` as well as + * tracking its last-used-time. + * All public APIs for this type are thread-safe. + */ + class ChannelState { + public: + ChannelState(std::shared_ptr pool, + HostAndPort remote, + bool useSSL, + Future channel) + : _pool(std::move(pool)), + _remote(std::move(remote)), + _useSSL(useSSL), + _channel(std::move(channel)) {} + + ChannelState(const ChannelState&) = delete; + ChannelState& operator=(const ChannelState&) = delete; + + Channel& channel() { + return _channel.get(); + } + + const HostAndPort& remote() const { + return _remote; + } + + bool useSSL() const { + return _useSSL; + } + + void updateLastUsed() { + auto now = _pool->_clockSource->now(); + **_lastUsed = now; + } + + Date_t lastUsed() const { + return **_lastUsed; + } + + private: + std::shared_ptr _pool; + const HostAndPort _remote; + const bool _useSSL; + Future _channel; + synchronized_value _lastUsed; + }; + + /** + * RAII type for `Stub` that helps with identifying idle channels. + * In terms of thread-safety, this type follows the semantics of `Stub`. + */ + class StubHandle { + public: + explicit StubHandle(std::shared_ptr channelState, Stub stub) + : _channelState(std::move(channelState)), _stub(std::move(stub)) {} + + ~StubHandle() { + _channelState->updateLastUsed(); + } + + Stub& stub() { + return _stub; + } + + private: + std::shared_ptr _channelState; + Stub _stub; + }; + + /** + * Constructs a new instance of `ChannelPool` and accepts the following: + * - `clockSource` is used to record last-used-time for channels (doesn't need much accuracy). + * - `sslModeResolver` translates `ConnectSSLMode` into a boolean that decides if an encrypted + * channel should be used to create new stubs. + * - `channelFactory` is the factory for creating new channels. + * - `stubFactory` is the factory for creating new stubs. + */ + explicit ChannelPool(ClockSource* clockSource, + SSLModeResolver sslModeResolver, + ChannelFactory channelFactory, + StubFactory stubFactory) + : _clockSource(clockSource), + _sslModeResolver(std::move(sslModeResolver)), + _channelFactory(std::move(channelFactory)), + _stubFactory(std::move(stubFactory)) {} + + /** + * Creates a new stub to `remote` that uses `sslMode`. Internally, an existing channel is used + * to create the new stub, if available. Otherwise, a new channel is created. + */ + std::unique_ptr createStub(HostAndPort remote, ConnectSSLMode sslMode) { + std::shared_ptr cs = [&] { + const auto useSSL = _sslModeResolver(sslMode); + ChannelMapKeyType key{remote, useSSL}; + auto lk = stdx::unique_lock(_mutex); + if (auto iter = _channels.find(key); iter != _channels.end()) { + return iter->second; + } else { + auto pf = makePromiseFuture(); + auto state = std::make_shared( + this->shared_from_this(), remote, useSSL, std::move(pf.future)); + _channels.insert({key, state}); + lk.unlock(); + LOGV2_INFO(7401801, + "Creating a new gRPC channel", + "remote"_attr = remote, + "useSSL"_attr = useSSL); + pf.promise.setWith([&] { return _channelFactory(remote, useSSL); }); + return state; + } + }(); + return std::make_unique(std::move(cs), _stubFactory(cs->channel())); + } + + /** + * Drops all idle channels that are not used for the past `sinceLastUsed` minutes. An idle + * channel is one that is not referenced by any instance of `StubHandle`. Returns the number of + * dropped channels. + * Internally, this will iterate through all channels in the pool. This should not have any + * performance implications since we drop idle channels infrequently (e.g., every 30 minutes) + * and expect the maximum number of open channels to be a two digit number. + */ + size_t dropIdleChannels(Minutes sinceLastUsed) { + auto keepIf = [threshold = _clockSource->now() - sinceLastUsed](const auto& cs) { + if (cs.use_count() > 1 || cs->lastUsed() > threshold) + // There are stubs referencing this channel, or it's recently used. + return true; + return false; + }; + auto droppedChannels = _dropChannels(std::move(keepIf)); + + for (const auto& channel : droppedChannels) { + LOGV2_INFO(7401802, + "Dropping idle gRPC channel", + "remote"_attr = channel->remote(), + "useSSL"_attr = channel->useSSL(), + "lastUsed"_attr = channel->lastUsed()); + } + return droppedChannels.size(); + } + + /** + * Drops all channels and returns the number of dropped channels. May only be called when all + * stub handles (i.e., instances of `StubHandle`) created by this pool are released. Otherwise, + * it will terminate the process. + */ + size_t dropAllChannels() { + auto droppedChannels = _dropChannels([](const auto&) { return false; }); + for (const auto& channel : droppedChannels) { + LOGV2_INFO(7401803, + "Dropping gRPC channel as part of dropping all channels", + "remote"_attr = channel->remote(), + "useSSL"_attr = channel->useSSL()); + } + return droppedChannels.size(); + } + + size_t size() const { + auto lk = stdx::lock_guard(_mutex); + return _channels.size(); + } + +private: + /** + * Iterates through all channels, calls into `shouldKeep` for each channel with a reference to + * its `ChannelState`, and decides if the channel should be dropped based on the return value. + * A channel cannot be dropped so long as it's being referenced by a `Stub`. Attempting to do + * so is a process fatal event. + * Returns a vector containing the only reference to the dropped channels. + */ + std::vector> _dropChannels( + std::function&)> shouldKeep) { + std::vector> droppedChannels; + auto lk = stdx::lock_guard(_mutex); + for (auto iter = _channels.begin(); iter != _channels.end();) { + auto prev = iter++; + const auto& cs = prev->second; + if (shouldKeep(cs)) + continue; + invariant(cs.use_count() == 1, "Attempted to drop a channel with existing stubs"); + droppedChannels.push_back(std::move(prev->second)); + _channels.erase(prev); + } + return droppedChannels; + } + + ClockSource* const _clockSource; + SSLModeResolver _sslModeResolver; + ChannelFactory _channelFactory; + StubFactory _stubFactory; + + mutable stdx::mutex _mutex; // NOLINT + + using ChannelMapKeyType = std::pair; + stdx::unordered_map> _channels; +}; + +} // namespace mongo::transport::grpc + +#undef MONGO_LOGV2_DEFAULT_COMPONENT diff --git a/src/mongo/transport/grpc/channel_pool_test.cpp b/src/mongo/transport/grpc/channel_pool_test.cpp new file mode 100644 index 0000000000000..5620ba880a69a --- /dev/null +++ b/src/mongo/transport/grpc/channel_pool_test.cpp @@ -0,0 +1,242 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include + +#include "mongo/transport/grpc/channel_pool.h" + +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/thread.h" +#include "mongo/unittest/barrier.h" +#include "mongo/unittest/death_test.h" +#include "mongo/unittest/unittest.h" +#include "mongo/util/clock_source_mock.h" +#include "mongo/util/fail_point.h" +#include "mongo/util/scopeguard.h" + +namespace mongo::transport::grpc { +namespace { + +MONGO_FAIL_POINT_DEFINE(blockBeforeCreatingNewChannel); +MONGO_FAIL_POINT_DEFINE(blockBeforeCreatingNewStub); + +class ChannelPoolTest : public unittest::Test { +public: + class DummyChannel {}; + class DummyStub {}; + using PoolType = ChannelPool; + + void setUp() override { + _clockSource = std::make_unique(); + _pool = std::make_shared( + _clockSource.get(), + [this](ConnectSSLMode mode) { return _resolveSSLMode(mode); }, + [this](HostAndPort& remote, bool useSSL) { return _makeChannel(remote, useSSL); }, + [this](DummyChannel& channel) { return _makeStub(channel); }); + } + + void tearDown() override { + _pool.reset(); + _clockSource.reset(); + } + + auto& clockSource() { + return *_clockSource; + } + + void setSSLMode(bool enable) { + _sslMode.store(enable); + } + + auto& pool() { + return *_pool; + } + +private: + bool _resolveSSLMode(ConnectSSLMode mode) { + auto sslMode = _sslMode.load(); + ASSERT_TRUE(mode == ConnectSSLMode::kDisableSSL || sslMode); + if (mode == ConnectSSLMode::kGlobalSSLMode) + return sslMode; + return mode == ConnectSSLMode::kEnableSSL; + } + + DummyChannel _makeChannel(HostAndPort&, bool) { + blockBeforeCreatingNewChannel.pauseWhileSet(); + return {}; + } + + DummyStub _makeStub(DummyChannel&) { + blockBeforeCreatingNewStub.pauseWhileSet(); + return {}; + } + + std::unique_ptr _clockSource; + std::shared_ptr _pool; + AtomicWord _sslMode{false}; +}; + +TEST_F(ChannelPoolTest, StartsEmpty) { + ASSERT_EQ(pool().size(), 0); +} + +TEST_F(ChannelPoolTest, CanReuseChannel) { + HostAndPort remote("FakeHost", 123); + auto s1 = pool().createStub(remote, ConnectSSLMode::kDisableSSL); + ASSERT_EQ(pool().size(), 1); + auto s2 = pool().createStub(remote, ConnectSSLMode::kDisableSSL); + ASSERT_EQ(pool().size(), 1); +} + +TEST_F(ChannelPoolTest, ConsidersSSLMode) { + setSSLMode(true); + ON_BLOCK_EXIT([&] { setSSLMode(false); }); + HostAndPort remote("FakeHost", 123); + auto s1 = pool().createStub(remote, ConnectSSLMode::kEnableSSL); + ASSERT_EQ(pool().size(), 1); + auto s2 = pool().createStub(remote, ConnectSSLMode::kDisableSSL); + ASSERT_EQ(pool().size(), 2); +} + +TEST_F(ChannelPoolTest, DropUnusedChannel) { + { + // Create a new stub and immediately discard it. This should internally create a new + // channel to `SomeHost:123`. + pool().createStub({"SomeHost", 123}, ConnectSSLMode::kDisableSSL); + } + ASSERT_EQ(pool().size(), 1); + clockSource().advance(Minutes{5}); + ASSERT_EQ(pool().dropIdleChannels(Minutes{5}), 1); + ASSERT_EQ(pool().size(), 0); +} + +TEST_F(ChannelPoolTest, UpdatesLastUsed) { + { + auto stub = pool().createStub({"Mongo", 123}, ConnectSSLMode::kDisableSSL); + ASSERT_EQ(pool().size(), 1); + // Advance time before destroying `stub` to update the last-used-time for the channel. The + // stub, which is the only active user of its channel, is removed as we leave this scope. + clockSource().advance(Minutes{1}); + } + clockSource().advance(Minutes{4}); + ASSERT_EQ(pool().dropIdleChannels(Minutes{5}), 0); + ASSERT_EQ(pool().size(), 1); +} + +TEST_F(ChannelPoolTest, DropNotRecentlyUsedChannelsWithoutStubs) { + HostAndPort remoteA("RemoteA", 123), remoteB("RemoteB", 123); + auto s1 = pool().createStub(remoteA, ConnectSSLMode::kDisableSSL); + { + // Create a new stub and immediately discard it. This creates a new channel to `remoteB`. + pool().createStub(remoteB, ConnectSSLMode::kDisableSSL); + } + ASSERT_EQ(pool().size(), 2); + clockSource().advance(Minutes{2}); + ASSERT_EQ(pool().dropIdleChannels(Minutes{2}), 1); + + // Verifying that remoteA's channel remains open. + auto s2 = pool().createStub(remoteA, ConnectSSLMode::kDisableSSL); + ASSERT_EQ(pool().size(), 1); +} + +TEST_F(ChannelPoolTest, DropAllChannelsWithNoStubs) { + const auto kNumChannels = 10; + for (int i = 1; i <= kNumChannels; i++) { + // Each iteration results in creating a new channel, targeting "FakeHost:(123 + i)". + pool().createStub({"FakeHost", 123 + i}, ConnectSSLMode::kDisableSSL); + } + ASSERT_EQ(pool().size(), kNumChannels); + ASSERT_EQ(pool().dropAllChannels(), kNumChannels); + ASSERT_EQ(pool().size(), 0); +} + +DEATH_TEST_F(ChannelPoolTest, DropAllChannelsWithStubs, "invariant") { + const auto kNumChannels = 10; + for (int i = 1; i <= kNumChannels; i++) { + auto stub = pool().createStub({"FakeHost", 123 + i}, ConnectSSLMode::kDisableSSL); + if (i == kNumChannels) { + ASSERT_EQ(pool().size(), kNumChannels); + pool().dropAllChannels(); // Must be fatal. + } + } +} + +TEST_F(ChannelPoolTest, CannotDropIdleChannelWhileCreatingNewStub) { + unittest::Barrier beforeCreatingStub(2); + stdx::thread worker([&] { + beforeCreatingStub.countDownAndWait(); + auto stub = pool().createStub({"FakeHost", 123}, ConnectSSLMode::kDisableSSL); + }); + ON_BLOCK_EXIT([&] { worker.join(); }); + + FailPointEnableBlock fp("blockBeforeCreatingNewChannel"); + beforeCreatingStub.countDownAndWait(); + fp->waitForTimesEntered(fp.initialTimesEntered() + 1); + // At this point, `worker` is blocked on the creation of a new channel, which should have + // already been added to the list of open channels. + ASSERT_EQ(pool().size(), 1); + clockSource().advance(Minutes{2}); + ASSERT_EQ(pool().dropIdleChannels(Minutes{1}), 0); +} + +TEST_F(ChannelPoolTest, OneChannelForMultipleStubs) { + const HostAndPort remote{"SomeHost", 1234}; + unittest::Barrier beforeCreatingFirstStub(2); + unittest::Barrier beforeCreatingSecondStub(2); + stdx::thread channelCreator([&] { + beforeCreatingFirstStub.countDownAndWait(); + // We create this one first, which should also create the underlying channel. + auto stub1 = pool().createStub(remote, ConnectSSLMode::kDisableSSL); + }); + stdx::thread channelUser([&] { + beforeCreatingSecondStub.countDownAndWait(); + // This one is created second, which should reuse the created channel. + auto stub2 = pool().createStub(remote, ConnectSSLMode::kDisableSSL); + }); + ON_BLOCK_EXIT([&] { + channelCreator.join(); + channelUser.join(); + }); + + FailPointEnableBlock sFP("blockBeforeCreatingNewStub"); + { + FailPointEnableBlock cFP("blockBeforeCreatingNewChannel"); + beforeCreatingFirstStub.countDownAndWait(); + cFP->waitForTimesEntered(cFP.initialTimesEntered() + 1); + // `channelCreator` is now blocked in the factory function for creating new channels. + beforeCreatingSecondStub.countDownAndWait(); + // `channelUser` can now go ahead with creating `stub2`, but it should wait for + // `channelCreator` to return from creating the new channel. + } + sFP->waitForTimesEntered(sFP.initialTimesEntered() + 2); + ASSERT_EQ(pool().size(), 1); +} + +} // namespace +} // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/client_context.h b/src/mongo/transport/grpc/client_context.h new file mode 100644 index 0000000000000..b4b8238cce65d --- /dev/null +++ b/src/mongo/transport/grpc/client_context.h @@ -0,0 +1,92 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +#include "mongo/transport/grpc/metadata.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" + +namespace mongo::transport::grpc { + +/** + * Base class modeling a gRPC ClientContext. + * See: https://grpc.github.io/grpc/cpp/classgrpc_1_1_client_context.html + */ +class ClientContext { +public: + virtual ~ClientContext() = default; + + /** + * Add an entry to the metadata associated with the RPC. + * + * This must only be called before invoking the RPC. + */ + virtual void addMetadataEntry(const std::string& key, const std::string& value) = 0; + + /** + * Retrieve the server's initial metadata. + * + * This must only be called after the first message has been received on the ClientStream + * created from the RPC that this context is associated with. + */ + virtual boost::optional getServerInitialMetadata() const = 0; + + /** + * Set the deadline for the RPC to be executed using this context. + * + * This must only be called before invoking the RPC. + */ + virtual void setDeadline(Date_t deadline) = 0; + + virtual Date_t getDeadline() const = 0; + + virtual HostAndPort getRemote() const = 0; + + /** + * Send a best-effort out-of-band cancel on the call associated with this ClientContext. There + * is no guarantee the call will be cancelled (e.g. if the call has already finished by the time + * the cancellation is received). + * + * Note that tryCancel() will not impede the execution of any already scheduled work (e.g. + * messages already queued to be sent on a stream will still be sent), though the reported + * sucess or failure of such work may reflect the cancellation. + * + * This method is thread-safe, and can be called multiple times from any thread. It should not + * be called before this ClientContext has been used to invoke an RPC. + * + * See: + * https://grpc.github.io/grpc/cpp/classgrpc_1_1_client_context.html#abd0f6715c30287b75288015eee628984 + */ + virtual void tryCancel() = 0; +}; +} // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/client_stream.h b/src/mongo/transport/grpc/client_stream.h new file mode 100644 index 0000000000000..7d11e9f0847f3 --- /dev/null +++ b/src/mongo/transport/grpc/client_stream.h @@ -0,0 +1,79 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +#include "mongo/util/shared_buffer.h" + +namespace mongo::transport::grpc { + +/** + * Base class modeling a synchronous client side of a gRPC stream. + * See: https://grpc.github.io/grpc/cpp/classgrpc_1_1_client_reader_writer.html + * + * ClientStream::read() is thread safe with respect to ClientStream::write(), but neither method + * should be called concurrently with another invocation of itself on the same stream. + * + * ClientStream::finish() is thread safe with respect to ClientStream::read(). + */ +class ClientStream { +public: + virtual ~ClientStream() = default; + + /** + * Block to read a message from the stream. + * + * Returns boost::none if the stream is closed, either cleanly or due to an underlying + * connection failure. + */ + virtual boost::optional read() = 0; + + /** + * Block to write a message to the stream. + * + * Returns true if the write was successful or false if it failed due to the stream being + * closed, either explicitly or due to an underlying connection failure. + */ + virtual bool write(ConstSharedBuffer msg) = 0; + + /** + * Block waiting until all received messages have been read and the stream has been closed. + * This indicates to the server side that the client will not be sending any further messages. + * + * Returns the final status of the RPC associated with this stream. + * + * This method should only be called once. + */ + virtual ::grpc::Status finish() = 0; +}; + +} // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/grpc_server_context.h b/src/mongo/transport/grpc/grpc_server_context.h index a81165028986d..672074ddf075d 100644 --- a/src/mongo/transport/grpc/grpc_server_context.h +++ b/src/mongo/transport/grpc/grpc_server_context.h @@ -53,10 +53,10 @@ class GRPCServerContext : public ServerContext { } explicit GRPCServerContext(::grpc::ServerContext* ctx) - : _ctx{ctx}, _hostAndPort{parseURI(_ctx->peer())} { + : _ctx{ctx}, _remote{parseURI(_ctx->peer())} { for (auto& kvp : _ctx->client_metadata()) { - _clientMetadata.insert({std::string_view{kvp.first.data(), kvp.first.length()}, - std::string_view{kvp.second.data(), kvp.second.length()}}); + _clientMetadata.insert({StringData{kvp.first.data(), kvp.first.length()}, + StringData{kvp.second.data(), kvp.second.length()}}); } } @@ -74,8 +74,8 @@ class GRPCServerContext : public ServerContext { return Date_t{_ctx->deadline()}; } - HostAndPort getHostAndPort() const override { - return _hostAndPort; + HostAndPort getRemote() const override { + return _remote; } bool isCancelled() const override { @@ -89,7 +89,7 @@ class GRPCServerContext : public ServerContext { private: ::grpc::ServerContext* _ctx; MetadataView _clientMetadata; - HostAndPort _hostAndPort; + HostAndPort _remote; }; } // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/grpc_session.h b/src/mongo/transport/grpc/grpc_session.h index 38f2038673747..c208cf7ed953b 100644 --- a/src/mongo/transport/grpc/grpc_session.h +++ b/src/mongo/transport/grpc/grpc_session.h @@ -29,19 +29,24 @@ #pragma once -#include #include +#include + #include "mongo/base/error_codes.h" #include "mongo/base/status.h" #include "mongo/config.h" #include "mongo/logv2/log.h" #include "mongo/platform/compiler.h" +#include "mongo/transport/grpc/client_context.h" +#include "mongo/transport/grpc/client_stream.h" #include "mongo/transport/grpc/server_context.h" #include "mongo/transport/grpc/server_stream.h" +#include "mongo/transport/grpc/util.h" #include "mongo/transport/session.h" #include "mongo/util/assert_util.h" #include "mongo/util/future.h" +#include "mongo/util/shared_buffer.h" #include "mongo/util/synchronized_value.h" #include "mongo/util/uuid.h" @@ -54,40 +59,96 @@ namespace mongo::transport::grpc { */ class GRPCSession : public Session { public: - explicit GRPCSession(TransportLayer* tl) : _tl(tl) {} + explicit GRPCSession(TransportLayer* tl, HostAndPort remote, boost::optional clientId) + : _tl(tl), _remote(std::move(remote)), _clientId(std::move(clientId)) {} - virtual ~GRPCSession() { - invariant(terminationStatus(), "gRPC sessions must always be terminated"); - } + virtual ~GRPCSession() = default; /** * Returns the unique identifier used for the underlying gRPC stream. */ - virtual boost::optional clientId() const = 0; + boost::optional clientId() const { + return _clientId; + }; + + const HostAndPort& remote() const { + return _remote; + } /** - * Terminates the underlying gRPC stream. + * Cancels the RPC associated with the underlying gRPC stream and updates the termination status + * of the session to include the provided reason. + * + * In-progress reads and writes to this session will be interrupted, and future reads and writes + * will fail with an error. + * + * If this session is already terminated, this has no effect. */ - virtual void terminate(Status status) { - auto ts = _terminationStatus.synchronize(); - if (MONGO_unlikely(ts->has_value())) - return; - ts->emplace(std::move(status)); + void cancel(StringData reason) { + // Need to update terminationStatus before cancelling so that when the RPC caller/handler is + // interrupted, it will be guaranteed to have access to the reason for cancellation. + if (_setTerminationStatus({ErrorCodes::CallbackCanceled, std::move(reason)})) { + _tryCancel(); + } } /** - * Returns the termination status (always set at termination). Remains unset until termination. + * Mark the session as gracefully terminated. + * + * In-progress reads and writes to this session will not be interrupted, but future ones will + * fail with an error. + * + * If this session is already terminated, this has no effect. + */ + void end() final { + _setTerminationStatus(Status::OK()); + } + + StatusWith sourceMessage() noexcept override { + if (MONGO_likely(isConnected())) { + if (auto maybeBuffer = _readFromStream()) { + return Message(std::move(*maybeBuffer)); + } + } + return Status(ErrorCodes::StreamTerminated, "Unable to read from gRPC stream"); + } + + Status sinkMessage(Message message) noexcept override { + if (MONGO_likely(isConnected() && _writeToStream(message.sharedBuffer()))) { + return Status::OK(); + } + return Status(ErrorCodes::StreamTerminated, "Unable to write to gRPC stream"); + } + + /** + * Returns the reason for which this stream was terminated, if any. "Termination" includes + * cancellation events (e.g. network interruption, explicit cancellation, or + * exceeding the deadline) as well as graceful closing of the session via end(). + * + * Remains unset until termination. */ boost::optional terminationStatus() const { - return **_terminationStatus; + auto status = _terminationStatus.synchronize(); + // If the RPC was cancelled, return a status reflecting that, including in the case where + // the RPC was cancelled after the session was already locally ended (i.e. after the + // termination status was set to OK). + if (_isCancelled() && (!status->has_value() || (*status)->isOK())) { + return Status(ErrorCodes::CallbackCanceled, + "gRPC session was terminated due to the associated RPC being cancelled"); + } + return *status; } TransportLayer* getTransportLayer() const final { return _tl; } - void end() final { - terminate(Status::OK()); + /** + * The following inspects the logical state of the underlying stream: the session is considered + * not connected when the user has terminated the session (either with or without an error). + */ + bool isConnected() final { + return !_terminationStatus->has_value(); } /** @@ -98,6 +159,10 @@ class GRPCSession : public Session { return false; } + std::string clientIdStr() const { + return _clientId ? _clientId->toString() : "N/A"; + } + /** * The following APIs are not implemented for both ingress and egress gRPC sessions. */ @@ -144,15 +209,43 @@ class GRPCSession : public Session { MONGO_UNIMPLEMENTED; } +protected: + /** + * Sets the termination status if it hasn't been set already. + * Returns whether the termination status was updated or not. + */ + bool _setTerminationStatus(Status status) { + auto ts = _terminationStatus.synchronize(); + if (MONGO_unlikely(ts->has_value() || _isCancelled())) + return false; + ts->emplace(std::move(status)); + return true; + } + private: + virtual void _tryCancel() = 0; + + virtual bool _isCancelled() const = 0; + + virtual boost::optional _readFromStream() = 0; + + virtual bool _writeToStream(ConstSharedBuffer msg) = 0; + // TODO SERVER-74020: replace this with `GRPCTransportLayer`. TransportLayer* const _tl; + const HostAndPort _remote; + const boost::optional _clientId; + synchronized_value> _terminationStatus; }; /** * Represents an ingress gRPC session (the server side of the stream). + * + * Calling sinkMessage() or sourceMessage() is only allowed from the thread that owns the underlying + * gRPC stream. This is necessary to ensure accessing _ctx and _stream does not result in + * use-after-free. */ class IngressSession final : public GRPCSession { public: @@ -160,81 +253,132 @@ class IngressSession final : public GRPCSession { ServerContext* ctx, ServerStream* stream, boost::optional clientId) - : GRPCSession(tl), - _ctx(ctx), - _stream(stream), - _clientId(std::move(clientId)), - _remote(ctx->getHostAndPort()) { + : GRPCSession(tl, ctx->getRemote(), std::move(clientId)), _ctx(ctx), _stream(stream) { LOGV2_DEBUG(7401101, 2, "Constructed a new gRPC ingress session", "id"_attr = id(), - "clientId"_attr = _clientIdStr(), - "remote"_attr = _ctx->getHostAndPort()); + "remoteClientId"_attr = clientIdStr(), + "remote"_attr = remote()); } ~IngressSession() { auto ts = terminationStatus(); - LOGV2_DEBUG(7401102, + tassert( + 7401491, "gRPC sessions must be terminated before being destructed", ts.has_value()); + LOGV2_DEBUG(7401402, 2, "Finished cleaning up a gRPC ingress session", "id"_attr = id(), - "clientId"_attr = _clientIdStr(), - "remote"_attr = _ctx->getHostAndPort(), - "status"_attr = ts); - if (MONGO_unlikely(!ts)) { - terminate({ErrorCodes::StreamTerminated, "Terminating session through destructor"}); - } + "remoteClientId"_attr = clientIdStr(), + "remote"_attr = remote(), + "status"_attr = *ts); } - boost::optional clientId() const override { - return _clientId; - } /** - * The following inspects the logical state of the underlying stream: the session is considered - * not connected when: the underlying stream is closed/canceled, or the user has terminated the - * session (either with or without an error). + * Mark the session as logically terminated with the provided status. In-progress reads and + * writes to this session will not be interrupted, but future attempts to read or write to this + * session will fail. + * + * This has no effect if the stream is already terminated. */ - bool isConnected() override { - return !(_ctx->isCancelled() || terminationStatus()); + void terminate(Status status) { + _setTerminationStatus(std::move(status)); } - StatusWith sourceMessage() noexcept override { - if (MONGO_likely(isConnected())) { - if (auto maybeBuffer = _stream->read()) { - return Message(std::move(*maybeBuffer)); - } - } - return Status(ErrorCodes::StreamTerminated, "Unable to read from ingress session"); +private: + void _tryCancel() override { + _ctx->tryCancel(); } - Status sinkMessage(Message message) noexcept override { - if (MONGO_likely(isConnected() && _stream->write(message.sharedBuffer()))) { - return Status::OK(); - } - return Status(ErrorCodes::StreamTerminated, "Unable to write to ingress session"); + bool _isCancelled() const override { + return _ctx->isCancelled(); } - void terminate(Status status) override { - if (MONGO_unlikely(!status.isOK())) { - _ctx->tryCancel(); - } - GRPCSession::terminate(std::move(status)); + boost::optional _readFromStream() override { + return _stream->read(); } - const HostAndPort& remote() const override { - return _remote; + bool _writeToStream(ConstSharedBuffer msg) override { + return _stream->write(msg); + } + + // _stream is only valid while the RPC handler is still running. It should not be + // accessed after the stream has been terminated. + ServerContext* const _ctx; + ServerStream* const _stream; +}; + +/** + * Represents the client side of a gRPC stream. + */ +class EgressSession final : public GRPCSession { +public: + EgressSession(TransportLayer* tl, + std::shared_ptr ctx, + std::shared_ptr stream, + boost::optional clientId) + : GRPCSession(tl, ctx->getRemote(), std::move(clientId)), + _ctx(std::move(ctx)), + _stream(std::move(stream)) { + LOGV2_DEBUG(7401401, + 2, + "Constructed a new gRPC egress session", + "id"_attr = id(), + "remoteClientId"_attr = clientIdStr(), + "remote"_attr = remote()); + } + + ~EgressSession() { + auto ts = terminationStatus(); + tassert( + 7401411, "gRPC sessions must be terminated before being destructed", ts.has_value()); + LOGV2_DEBUG(7401403, + 2, + "Finished cleaning up a gRPC egress session", + "id"_attr = id(), + "localClientId"_attr = clientIdStr(), + "remote"_attr = remote(), + "status"_attr = *ts); + } + + /** + * Indicates to the server side that the client will not be sending any further messages, then + * blocks until all messages from the server have been read and the server has returned a final + * status. Once a status has been received, this session's termination status is updated + * accordingly. + * + * Returns the termination status. + * + * This method should only be called once. + * This method should be used instead of end() in most cases, since it retrieves the server's + * return status for the RPC. + */ + Status finish() { + _setTerminationStatus(util::convertStatus(_stream->finish())); + return *terminationStatus(); } private: - std::string _clientIdStr() const { - return _clientId ? _clientId->toString() : "N/A"; + void _tryCancel() override { + _ctx->tryCancel(); } - ServerContext* const _ctx; - ServerStream* const _stream; - const boost::optional _clientId; - const HostAndPort _remote; + bool _isCancelled() const override { + // There is no way of determining this client-side outside of finish(). + return false; + } + + boost::optional _readFromStream() override { + return _stream->read(); + } + + bool _writeToStream(ConstSharedBuffer msg) override { + return _stream->write(msg); + } + + const std::shared_ptr _ctx; + const std::shared_ptr _stream; }; } // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/grpc_session_test.cpp b/src/mongo/transport/grpc/grpc_session_test.cpp index 24baf977f1e4d..0fe81ba721230 100644 --- a/src/mongo/transport/grpc/grpc_session_test.cpp +++ b/src/mongo/transport/grpc/grpc_session_test.cpp @@ -29,164 +29,265 @@ #include -#include "mongo/platform/atomic_word.h" +#include "mongo/base/error_codes.h" +#include "mongo/logv2/log.h" #include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" #include "mongo/transport/grpc/grpc_session.h" #include "mongo/transport/grpc/test_fixtures.h" -#include "mongo/unittest/death_test.h" +#include "mongo/transport/grpc/util.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/unittest.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/scopeguard.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest namespace mongo::transport::grpc { namespace { -class SessionTest : public ServiceContextWithClockSourceMockTest { -public: - auto makeMessage() { - OpMsg msg; - msg.body = BSON("id" << _nextMessage.fetchAndAdd(1)); - return msg.serialize(); - } - -private: - AtomicWord _nextMessage{0}; -}; - -class IngressSessionTest : public SessionTest { +class GRPCSessionTest : public ServiceContextWithClockSourceMockTest { public: static constexpr auto kStreamTimeout = Seconds(1); static constexpr auto kClientId = "c08663ac-2f6c-408d-8829-97e67eef9f23"; - static constexpr auto kRemote = "FakeHost:1234"; void setUp() override { - SessionTest::setUp(); - _fixture = std::make_unique( - HostAndPort{kRemote}, kStreamTimeout, _clientMetadata); + ServiceContextWithClockSourceMockTest::setUp(); + _streamFixtures = _makeStreamFixtures(); } void tearDown() override { - _fixture.reset(); + _streamFixtures.reset(); + ServiceContextWithClockSourceMockTest::tearDown(); } - auto fixture() { - return _fixture.get(); + template + auto makeSession(boost::optional clientId) { + if constexpr (std::is_same::value) { + return std::make_unique(nullptr, + _streamFixtures->rpc->serverCtx.get(), + _streamFixtures->rpc->serverStream.get(), + std::move(clientId)); + } else { + static_assert(std::is_same::value == true); + return std::make_unique(nullptr, + _streamFixtures->clientCtx, + _streamFixtures->clientStream, + std::move(clientId)); + } } + template auto makeSession() { - auto swClientId = UUID::parse(kClientId); - ASSERT_OK(swClientId.getStatus()); - return std::make_unique(nullptr, - _fixture->serverCtx.get(), - _fixture->serverStream.get(), - swClientId.getValue()); + return makeSession(uassertStatusOK(UUID::parse(kClientId))); + } + + /** + * Runs the provided callback twice, each time with a new fixture: + * - First with an instance of `EgressSession`. + * - Then with an instance of `IngressSession`. + * Upon completion, `_streamFixtures` is restored to its original state. + */ + using CallbackType = std::function; + void runWithBoth(CallbackType cb) { + _runCallback(cb); + _runCallback(cb); + } + + auto& fixtures() const { + return *_streamFixtures; } private: - const MetadataView _clientMetadata = {{"foo", "bar"}}; - std::unique_ptr _fixture; + std::unique_ptr _makeStreamFixtures() { + // The MockStreamTestFixtures created here doesn't contain any references to the channel or + // server, so it's okay to let stubFixture go out of scope. + MockStubTestFixtures stubFixture; + MetadataView metadata = {{"foo", "bar"}}; + return stubFixture.makeStreamTestFixtures( + getServiceContext()->getFastClockSource()->now() + kStreamTimeout, std::move(metadata)); + } + + template + void _runCallback(CallbackType& cb) { + // Install a new fixture for the duration of this call. + auto localFixtures = _makeStreamFixtures(); + _streamFixtures.swap(localFixtures); + ON_BLOCK_EXIT([&] { _streamFixtures.swap(localFixtures); }); + + auto session = makeSession(); + ON_BLOCK_EXIT([&] { session->end(); }); + LOGV2(7401431, + "Running test with gRPC session", + "type"_attr = std::is_same::value ? "egress" : "ingress"); + cb(*_streamFixtures->rpc, *session); + } + + std::unique_ptr _streamFixtures; }; -TEST_F(IngressSessionTest, NoClientId) { - auto session = std::make_unique( - nullptr, fixture()->serverCtx.get(), fixture()->serverStream.get(), boost::none); - ASSERT_FALSE(session->clientId()); - session->end(); -} +TEST_F(GRPCSessionTest, NoClientId) { + { + auto session = makeSession(boost::none); + ASSERT_FALSE(session->clientId()); + session->end(); + } -TEST_F(IngressSessionTest, AlwaysCancelsStream) { - auto session = makeSession(); - ASSERT_FALSE(fixture()->serverCtx->isCancelled()); - session.reset(); - ASSERT_TRUE(fixture()->serverCtx->isCancelled()); + { + auto session = makeSession(boost::none); + ASSERT_FALSE(session->clientId()); + session->end(); + } } -TEST_F(IngressSessionTest, GetClientId) { - auto session = makeSession(); - ASSERT_TRUE(session->clientId()); - ASSERT_EQ(session->clientId()->toString(), kClientId); +TEST_F(GRPCSessionTest, GetClientId) { + runWithBoth([&](auto&, auto& session) { + ASSERT_TRUE(session.clientId()); + ASSERT_EQ(session.clientId()->toString(), kClientId); + }); } -TEST_F(IngressSessionTest, GetRemote) { - auto session = makeSession(); - ASSERT_EQ(session->remote().toString(), kRemote); +TEST_F(GRPCSessionTest, GetRemote) { + runWithBoth([&](auto&, auto& session) { + auto expectedRemote = dynamic_cast(&session) + ? MockStubTestFixtures::kBindAddress + : MockStubTestFixtures::kClientAddress; + ASSERT_EQ(session.remote(), HostAndPort(expectedRemote)); + }); } -TEST_F(IngressSessionTest, IsConnected) { - auto session = makeSession(); +TEST_F(GRPCSessionTest, IsConnected) { + auto session = makeSession(); ASSERT_TRUE(session->isConnected()); - fixture()->serverCtx->tryCancel(); + session->cancel("shutdown error"); ASSERT_FALSE(session->isConnected()); + ASSERT_TRUE(fixtures().rpc->serverCtx->isCancelled()); } -TEST_F(IngressSessionTest, Terminate) { - auto session = makeSession(); - session->terminate(Status::OK()); - ASSERT_FALSE(session->isConnected()); - ASSERT_TRUE(session->terminationStatus()); - ASSERT_OK(*session->terminationStatus()); +TEST_F(GRPCSessionTest, End) { + runWithBoth([&](auto& rpc, auto& session) { + session.end(); + ASSERT_FALSE(session.isConnected()); + ASSERT_TRUE(session.terminationStatus()); + ASSERT_OK(session.terminationStatus()); + ASSERT_FALSE(rpc.serverCtx->isCancelled()); + }); } -TEST_F(IngressSessionTest, TerminateWithError) { - const Status error(ErrorCodes::InternalError, "Some Error"); - auto session = makeSession(); - session->terminate(error); - ASSERT_FALSE(session->isConnected()); - ASSERT_TRUE(session->terminationStatus()); - ASSERT_EQ(*session->terminationStatus(), error); +TEST_F(GRPCSessionTest, CancelWithReason) { + constexpr StringData kExpectedReason = "Some error condition"_sd; + runWithBoth([&](auto& rpc, auto& session) { + session.cancel(kExpectedReason); + ASSERT_FALSE(session.isConnected()); + ASSERT_TRUE(session.terminationStatus()); + ASSERT_TRUE(ErrorCodes::isCancellationError(*session.terminationStatus())); + ASSERT_EQ(session.terminationStatus()->reason(), kExpectedReason); + ASSERT_EQ(session.sourceMessage().getStatus(), ErrorCodes::StreamTerminated); + ASSERT_EQ(session.sinkMessage(makeUniqueMessage()), ErrorCodes::StreamTerminated); + ASSERT_TRUE(rpc.serverCtx->isCancelled()); + }); } -TEST_F(IngressSessionTest, TerminateRetainsStatus) { - const Status error(ErrorCodes::InternalError, "Some Error"); - auto session = makeSession(); - session->terminate(error); - session->terminate(Status::OK()); - ASSERT_EQ(*session->terminationStatus(), error); +TEST_F(GRPCSessionTest, TerminationStatusIsNotOverridden) { + constexpr StringData kExpectedReason = "first reason"_sd; + runWithBoth([&](auto&, auto& session) { + session.cancel(kExpectedReason); + + // Cancelling the session again should have no effect on the reason. + session.cancel("second reason"); + ASSERT_TRUE(ErrorCodes::isCancellationError(*session.terminationStatus())); + ASSERT_EQ(session.terminationStatus()->reason(), kExpectedReason); + + // Ending the session again should have no effect either. + session.end(); + ASSERT_TRUE(ErrorCodes::isCancellationError(*session.terminationStatus())); + ASSERT_EQ(session.terminationStatus()->reason(), kExpectedReason); + + if (auto egressSession = dynamic_cast(&session)) { + // EgressSession::finish() should report the proper status. + auto finishStatus = egressSession->finish(); + ASSERT_TRUE(ErrorCodes::isCancellationError(finishStatus)); + ASSERT_EQ(finishStatus.reason(), kExpectedReason); + } else if (auto ingressSession = dynamic_cast(&session)) { + // Recording the status should not overwrite the prior cancellation status. + ingressSession->terminate(Status::OK()); + ASSERT_TRUE(ErrorCodes::isCancellationError(*session.terminationStatus())); + ASSERT_EQ(session.terminationStatus()->reason(), kExpectedReason); + } + }); } -TEST_F(IngressSessionTest, ReadAndWrite) { - auto session = makeSession(); - { - auto msg = makeMessage(); - fixture()->clientStream->write(msg.sharedBuffer()); - auto swReceived = session->sourceMessage(); +TEST_F(GRPCSessionTest, ReadAndWrite) { + auto ingressSession = makeSession(); + auto egressSession = makeSession(); + ON_BLOCK_EXIT([&] { + ingressSession->end(); + egressSession->end(); + }); + + auto sendMessage = [&](Session& sender, Session& receiver) { + auto msg = makeUniqueMessage(); + ASSERT_OK(sender.sinkMessage(msg)); + auto swReceived = receiver.sourceMessage(); ASSERT_OK(swReceived.getStatus()); ASSERT_EQ_MSG(swReceived.getValue(), msg); + }; + + sendMessage(*egressSession, *ingressSession); + sendMessage(*ingressSession, *egressSession); +} + +enum class Operation { kSink, kSource }; +Status runDummyOperationOnSession(Session& session, Operation op) { + if (op == Operation::kSink) { + return session.sinkMessage({}); + } else { + return session.sourceMessage().getStatus(); } +} - { - auto msg = makeMessage(); - ASSERT_OK(session->sinkMessage(msg)); - auto sent = fixture()->clientStream->read(); - ASSERT_TRUE(sent); - ASSERT_EQ_MSG(Message{*sent}, msg); +TEST_F(GRPCSessionTest, ReadAndWriteFromClosedStream) { + for (auto op : {Operation::kSink, Operation::kSource}) { + runWithBoth([&](auto&, auto& session) { + session.end(); + ASSERT_EQ(runDummyOperationOnSession(session, op), ErrorCodes::StreamTerminated); + }); } } -TEST_F(IngressSessionTest, ReadFromClosedStream) { - auto session = makeSession(); - fixture()->serverCtx->tryCancel(); - auto swReceived = session->sourceMessage(); - ASSERT_EQ(swReceived.getStatus(), ErrorCodes::StreamTerminated); +TEST_F(GRPCSessionTest, ReadAndWriteTimesOut) { + for (auto op : {Operation::kSink, Operation::kSource}) { + runWithBoth([&](auto&, auto& session) { + clockSource().advance(2 * kStreamTimeout); + ASSERT_EQ(runDummyOperationOnSession(session, op), ErrorCodes::StreamTerminated); + + if (auto egressSession = dynamic_cast(&session)) { + // Verify that the right `ErrorCode` is delivered on the client-side. + ASSERT_TRUE(ErrorCodes::isExceededTimeLimitError(egressSession->finish())); + } + }); + } } -TEST_F(IngressSessionTest, ReadTimesOut) { - auto session = makeSession(); - clockSource().advance(2 * kStreamTimeout); - auto swReceived = session->sourceMessage(); - ASSERT_EQ(swReceived.getStatus(), ErrorCodes::StreamTerminated); +TEST_F(GRPCSessionTest, FinishOK) { + auto session = makeSession(); + fixtures().rpc->sendReturnStatus(::grpc::Status::OK); + ASSERT_OK(session->finish()); } -TEST_F(IngressSessionTest, WriteToClosedStream) { - auto session = makeSession(); - fixture()->serverCtx->tryCancel(); - ASSERT_EQ(session->sinkMessage(Message{}), ErrorCodes::StreamTerminated); +TEST_F(GRPCSessionTest, FinishError) { + auto session = makeSession(); + ::grpc::Status error{::grpc::UNAVAILABLE, "connection error"}; + fixtures().rpc->sendReturnStatus(error); + ASSERT_EQ(session->finish(), util::convertStatus(error)); } -TEST_F(IngressSessionTest, WriteTimesOut) { - auto session = makeSession(); - clockSource().advance(2 * kStreamTimeout); - ASSERT_EQ(session->sinkMessage(Message{}), ErrorCodes::StreamTerminated); +TEST_F(GRPCSessionTest, FinishCancelled) { + auto ingressSession = makeSession(); + auto egressSession = makeSession(); + ingressSession->cancel("some reason"); + auto finishStatus = egressSession->finish(); + ASSERT_TRUE(ErrorCodes::isCancellationError(finishStatus)); } } // namespace diff --git a/src/mongo/transport/grpc/grpc_transport_layer.cpp b/src/mongo/transport/grpc/grpc_transport_layer.cpp new file mode 100644 index 0000000000000..83f21794287f1 --- /dev/null +++ b/src/mongo/transport/grpc/grpc_transport_layer.cpp @@ -0,0 +1,91 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/transport/grpc/grpc_transport_layer.h" + +namespace mongo::transport::grpc { + +GRPCTransportLayer::GRPCTransportLayer(ServiceContext* svcCtx, const WireSpec& wireSpec) + : TransportLayer(wireSpec), _svcCtx(svcCtx) { + auto sslModeResolver = [this](ConnectSSLMode sslMode) { + // TODO SERVER-74020: use `sslMode` and the settings to decide if we should use SSL. + return false; + }; + + auto channelFactory = [this](HostAndPort& remote, bool useSSL) { + // TODO SERVER-74020: use the gRPC client to create a new channel to `remote`. + return ChannelType{}; + }; + + auto stubFactory = [this](ChannelType& channel) { + // TODO SERVER-74020: use `channel` to create a new gRPC stub. + return StubType{}; + }; + + // The pool calls into `ClockSource` to record the last usage of gRPC channels. Since the pool + // is not concerned with sub-minute durations and this call happens as part of destroying gRPC + // stubs (i.e., on threads running user operations), it is important to use `FastClockSource` to + // minimize the performance implications of recording time on user operations. + _channelPool = std::make_shared(_svcCtx->getFastClockSource(), + std::move(sslModeResolver), + std::move(channelFactory), + std::move(stubFactory)); +} + +Status GRPCTransportLayer::start() try { + // Periodically call into the channel pool to drop idle channels. A periodic task runs to drop + // all channels that have been idle for `kDefaultChannelTimeout`. + PeriodicRunner::PeriodicJob prunerJob( + "GRPCIdleChannelPrunerJob", + [pool = _channelPool](Client*) { + pool->dropIdleChannels(GRPCTransportLayer::kDefaultChannelTimeout); + }, + kDefaultChannelTimeout, + // TODO(SERVER-74659): Please revisit if this periodic job could be made killable. + false /*isKillableByStepdown*/); + invariant(!_idleChannelPruner); + _idleChannelPruner.emplace(_svcCtx->getPeriodicRunner()->makeJob(std::move(prunerJob))); + _idleChannelPruner->start(); + + // TODO SERVER-74020: start the Server and Client services. + + return Status::OK(); +} catch (const DBException& ex) { + return ex.toStatus(); +} + +void GRPCTransportLayer::shutdown() { + // TODO SERVER-74020: shutdown the Server and Client services. + + if (_idleChannelPruner) { + _idleChannelPruner->stop(); + } +} + +} // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/grpc_transport_layer.h b/src/mongo/transport/grpc/grpc_transport_layer.h new file mode 100644 index 0000000000000..26fb0123137e0 --- /dev/null +++ b/src/mongo/transport/grpc/grpc_transport_layer.h @@ -0,0 +1,117 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include +#include + +#include "mongo/config.h" +#include "mongo/db/service_context.h" +#include "mongo/transport/grpc/channel_pool.h" +#include "mongo/transport/transport_layer.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/periodic_runner.h" + +namespace mongo::transport::grpc { + +/** + * Wraps the Server and Client implementations of MongoDB's gRPC service. This abstraction layer + * aims to hide gRPC-specific details from `SessionWorkflow`, `ServiceEntryPoint`, and the remainder + * of the command execution path. + * + * TODO SERVER-74020: extend this documentation with more details. + */ +class GRPCTransportLayer : public TransportLayer { +public: + constexpr static auto kDefaultChannelTimeout = Minutes{30}; + + // TODO SERVER-74016: replace the empty structs with proper types. + using ChannelType = struct {}; + using StubType = struct {}; + + GRPCTransportLayer(ServiceContext* svcCtx, const WireSpec& wireSpec); + + Status start() override; + + void shutdown() override; + + StatusWith> connect( + HostAndPort peer, + ConnectSSLMode sslMode, + Milliseconds timeout, + boost::optional transientSSLParams = boost::none) override { + // TODO SERVER-74020 + MONGO_UNIMPLEMENTED; + } + + Future> asyncConnect( + HostAndPort peer, + ConnectSSLMode sslMode, + const ReactorHandle& reactor, + Milliseconds timeout, + std::shared_ptr connectionMetrics, + std::shared_ptr transientSSLContext) override { + // TODO SERVER-74020 + MONGO_UNIMPLEMENTED; + } + + Status setup() override { + // TODO SERVER-74020 + MONGO_UNIMPLEMENTED; + } + + ReactorHandle getReactor(WhichReactor) override { + // TODO SERVER-74020 + MONGO_UNIMPLEMENTED; + } + +#ifdef MONGO_CONFIG_SSL + Status rotateCertificates(std::shared_ptr manager, + bool asyncOCSPStaple) override { + // TODO SERVER-74020 + MONGO_UNIMPLEMENTED; + } + + StatusWith> createTransientSSLContext( + const TransientSSLParams& transientSSLParams) override { + // TODO SERVER-74020 + MONGO_UNIMPLEMENTED; + } +#endif + +private: + using ChannelPoolType = ChannelPool; + ServiceContext* const _svcCtx; + std::shared_ptr _channelPool; + boost::optional _idleChannelPruner; +}; + +} // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/grpc_transport_layer_test.cpp b/src/mongo/transport/grpc/grpc_transport_layer_test.cpp new file mode 100644 index 0000000000000..4d7ef09d129f7 --- /dev/null +++ b/src/mongo/transport/grpc/grpc_transport_layer_test.cpp @@ -0,0 +1,168 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include + +#include "mongo/db/wire_version.h" +#include "mongo/logv2/log.h" +#include "mongo/transport/grpc/grpc_transport_layer.h" +#include "mongo/transport/grpc/test_fixtures.h" +#include "mongo/unittest/unittest.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest + +namespace mongo::transport::grpc { +namespace { + +/** + * TODO SERVER-74021: add documentation for the test fixture. + */ +class GRPCTransportLayerTest : public ServiceContextWithClockSourceMockTest { +public: + void setUp() override { + ServiceContextWithClockSourceMockTest::setUp(); + _tl = std::make_unique(getServiceContext(), WireSpec::instance()); + } + + void tearDown() override { + _tl.reset(); + ServiceContextWithClockSourceMockTest::tearDown(); + } + + GRPCTransportLayer& transportLayer() { + return *_tl; + } + +private: + std::unique_ptr _tl; +}; + +/** + * Modifies the `ServiceContext` with `PeriodicRunnerMock`, a custom `PeriodicRunner` that maintains + * a list of all instances of `PeriodicJob` and allows monitoring their internal state. We use this + * modified runner to test proper initialization and teardown of the idle channel pruner. + */ +class IdleChannelPrunerTest : public GRPCTransportLayerTest { +public: + class PeriodicRunnerMock : public PeriodicRunner { + public: + /** + * Owns and monitors a `PeriodicJob` by maintaining its observable state (e.g., `kPause`). + */ + class ControllableJobMock : public ControllableJob { + public: + enum class State { kNotSet, kStart, kPause, kResume, kStop }; + + explicit ControllableJobMock(PeriodicJob job) : _job(std::move(job)) {} + + void start() override { + _setState(State::kStart); + } + + void pause() override { + _setState(State::kPause); + } + + void resume() override { + _setState(State::kResume); + } + + void stop() override { + _setState(State::kStop); + } + + Milliseconds getPeriod() const override { + return _job.interval; + } + + void setPeriod(Milliseconds ms) override { + _job.interval = ms; + } + + bool isStarted() const { + return _state == State::kStart; + } + + bool isStopped() const { + return _state == State::kStop; + } + + private: + void _setState(State newState) { + LOGV2(7401901, + "Updating state for a `PeriodicJob`", + "jobName"_attr = _job.name, + "oldState"_attr = _state, + "newState"_attr = newState); + _state = newState; + } + + State _state = State::kNotSet; + PeriodicJob _job; + }; + + PeriodicJobAnchor makeJob(PeriodicJob job) override { + auto handle = std::make_shared(std::move(job)); + jobs.push_back(handle); + return PeriodicJobAnchor{std::move(handle)}; + } + + std::vector> jobs; + }; + + void setUp() override { + GRPCTransportLayerTest::setUp(); + getServiceContext()->setPeriodicRunner(std::make_unique()); + } + + PeriodicRunnerMock* getPeriodicRunnerMock() { + return static_cast(getServiceContext()->getPeriodicRunner()); + } +}; + +TEST_F(IdleChannelPrunerTest, StartsWithTransportLayer) { + ASSERT_TRUE(getPeriodicRunnerMock()->jobs.empty()); + ASSERT_OK(transportLayer().start()); + ASSERT_EQ(getPeriodicRunnerMock()->jobs.size(), 1); + auto& prunerJob = getPeriodicRunnerMock()->jobs[0]; + ASSERT_EQ(prunerJob->getPeriod(), GRPCTransportLayer::kDefaultChannelTimeout); + ASSERT_TRUE(prunerJob->isStarted()); +} + +TEST_F(IdleChannelPrunerTest, StopsWithTransportLayer) { + ASSERT_OK(transportLayer().start()); + transportLayer().shutdown(); + ASSERT_EQ(getPeriodicRunnerMock()->jobs.size(), 1); + auto& prunerJob = getPeriodicRunnerMock()->jobs[0]; + ASSERT_TRUE(prunerJob->isStopped()); +} + +} // namespace +} // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/metadata.h b/src/mongo/transport/grpc/metadata.h index be188d0d3e38b..6cecbbc75216a 100644 --- a/src/mongo/transport/grpc/metadata.h +++ b/src/mongo/transport/grpc/metadata.h @@ -32,9 +32,18 @@ #include #include +#include "mongo/base/string_data.h" + namespace mongo::transport::grpc { +/** + * A gRPC metadata map that owns its keys and values. + */ using MetadataContainer = std::multimap; -using MetadataView = std::multimap; + +/** + * A gRPC metadata map that references its keys and values but does not own them. + */ +using MetadataView = std::multimap; } // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/mock_client_context.h b/src/mongo/transport/grpc/mock_client_context.h index 04311414e356d..5dad907840334 100644 --- a/src/mongo/transport/grpc/mock_client_context.h +++ b/src/mongo/transport/grpc/mock_client_context.h @@ -29,29 +29,54 @@ #pragma once -#include -#include -#include +#include "mongo/transport/grpc/client_context.h" #include "mongo/transport/grpc/mock_client_stream.h" namespace mongo::transport::grpc { -// TODO: SERVER-74015 introduce a ClientContext interface that covers the whole API surface of -// gRPC's ClientContext type, and implement that interface here. -class MockClientContext { +class MockClientContext : public ClientContext { public: - explicit MockClientContext(MockClientStream* stream) : _stream{stream} {} - ~MockClientContext() = default; + MockClientContext() : _deadline{Date_t::max()}, _stream{nullptr} {} - boost::optional getServerInitialMetadata() const { + void addMetadataEntry(const std::string& key, const std::string& value) override { + invariant(!_stream); + _metadata.insert({key, value}); + }; + + boost::optional getServerInitialMetadata() const override { + invariant(_stream); if (!_stream->_serverInitialMetadata.isReady()) { return boost::none; } return _stream->_serverInitialMetadata.get(); } + Date_t getDeadline() const override { + return _deadline; + } + + void setDeadline(Date_t deadline) override { + invariant(!_stream); + _deadline = deadline; + } + + HostAndPort getRemote() const override { + invariant(_stream); + return _stream->_remote; + } + + void tryCancel() override { + invariant(_stream); + _stream->_cancel(); + } + private: + friend class MockStub; + friend struct MockStreamTestFixtures; + + Date_t _deadline; + MetadataContainer _metadata; MockClientStream* _stream; }; diff --git a/src/mongo/transport/grpc/mock_client_stream.cpp b/src/mongo/transport/grpc/mock_client_stream.cpp index 0952c8be49cc4..dfc00f8c51a06 100644 --- a/src/mongo/transport/grpc/mock_client_stream.cpp +++ b/src/mongo/transport/grpc/mock_client_stream.cpp @@ -29,26 +29,64 @@ #include "mongo/transport/grpc/mock_client_stream.h" -#include "mongo/db/service_context.h" #include "mongo/transport/grpc/mock_util.h" +#include "mongo/util/interruptible.h" namespace mongo::transport::grpc { -MockClientStream::MockClientStream(HostAndPort hostAndPort, - Milliseconds timeout, +MockClientStream::MockClientStream(HostAndPort remote, Future&& initialMetadataFuture, + Future<::grpc::Status>&& rpcReturnStatus, + std::shared_ptr rpcCancellationState, BidirectionalPipe::End&& pipe) - : _deadline{getGlobalServiceContext()->getFastClockSource()->now() + timeout}, + : _remote{std::move(remote)}, _serverInitialMetadata{std::move(initialMetadataFuture)}, + _rpcReturnStatus{std::move(rpcReturnStatus)}, + _rpcCancellationState(std::move(rpcCancellationState)), _pipe{std::move(pipe)} {} boost::optional MockClientStream::read() { + // Even if the server side handler of this stream has set a final status for the RPC (i.e. + // _rpcReturnStatus is ready), there may still be unread messages in the queue that the server + // sent before setting that status, so only return early here if the RPC was cancelled. + // Otherwise, try to read whatever messages are in the queue. + if (_rpcCancellationState->isCancelled()) { + return boost::none; + } + return runWithDeadline>( - _deadline, [&](Interruptible* i) { return _pipe.read(i); }); + _rpcCancellationState->getDeadline(), [&](Interruptible* i) { return _pipe.read(i); }); } bool MockClientStream::write(ConstSharedBuffer msg) { - return runWithDeadline(_deadline, [&](Interruptible* i) { return _pipe.write(msg, i); }); + if (_rpcCancellationState->isCancelled() || _rpcReturnStatus.isReady()) { + return false; + } + + return runWithDeadline(_rpcCancellationState->getDeadline(), + [&](Interruptible* i) { return _pipe.write(msg, i); }); +} + +::grpc::Status MockClientStream::finish() { + // We use a busy wait here because there is no easy way to wait until all the messages in the + // pipe have been read. + while (!_pipe.isConsumed() && !_rpcCancellationState->isDeadlineExceeded()) { + sleepFor(Milliseconds(1)); + } + + invariant(_rpcReturnStatus.isReady() || _rpcCancellationState->isCancelled()); + + if (auto cancellationStatus = _rpcCancellationState->getCancellationStatus(); + cancellationStatus.has_value()) { + return *cancellationStatus; + } + + return _rpcReturnStatus.get(); +} + +void MockClientStream::_cancel() { + _rpcCancellationState->cancel(::grpc::Status::CANCELLED); + _pipe.close(); } } // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/mock_client_stream.h b/src/mongo/transport/grpc/mock_client_stream.h index 694581c393d20..f07af95f8d1fa 100644 --- a/src/mongo/transport/grpc/mock_client_stream.h +++ b/src/mongo/transport/grpc/mock_client_stream.h @@ -29,37 +29,52 @@ #pragma once -#include -#include +#include "mongo/transport/grpc/client_stream.h" #include "mongo/transport/grpc/bidirectional_pipe.h" #include "mongo/transport/grpc/metadata.h" -#include "mongo/transport/grpc/server_stream.h" +#include "mongo/transport/grpc/mock_util.h" #include "mongo/util/future.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/synchronized_value.h" #include "mongo/util/time_support.h" namespace mongo::transport::grpc { -// TODO: SERVER-74015 introduce a ClientStream interface that covers the whole API surface of -// gRPC's ClientReaderWriter type, and implement that interface here. -class MockClientStream { +class MockClientStream : public ClientStream { public: - ~MockClientStream() = default; + MockClientStream(HostAndPort remote, + Future&& serverInitialMetadata, + Future<::grpc::Status>&& rpcReturnStatus, + std::shared_ptr rpcCancellationState, + BidirectionalPipe::End&& pipe); - boost::optional read(); - bool write(ConstSharedBuffer msg); + boost::optional read() override; - explicit MockClientStream(HostAndPort hostAndPort, - Milliseconds timeout, - Future&& serverInitialMetadata, - BidirectionalPipe::End&& pipe); + bool write(ConstSharedBuffer msg) override; + + ::grpc::Status finish() override; private: friend class MockClientContext; - Date_t _deadline; + void _cancel(); + + HostAndPort _remote; + MetadataContainer _clientMetadata; Future _serverInitialMetadata; + + /** + * The mocked equivalent of a status returned from a server-side RPC handler. + */ + Future<::grpc::Status> _rpcReturnStatus; + + /** + * State used to mock RPC cancellation, including explicit cancellation (client or server side) + * or network errors. + */ + std::shared_ptr _rpcCancellationState; + BidirectionalPipe::End _pipe; }; } // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/mock_server_context.cpp b/src/mongo/transport/grpc/mock_server_context.cpp index a862fe17e4951..a4b91ce5b01f4 100644 --- a/src/mongo/transport/grpc/mock_server_context.cpp +++ b/src/mongo/transport/grpc/mock_server_context.cpp @@ -40,11 +40,11 @@ const MetadataView& MockServerContext::getClientMetadata() const { } Date_t MockServerContext::getDeadline() const { - return _stream->_deadline; + return _stream->_rpcCancellationState->getDeadline(); } void MockServerContext::tryCancel() { - _stream->close(); + _stream->cancel(::grpc::Status::CANCELLED); } bool MockServerContext::isCancelled() const { diff --git a/src/mongo/transport/grpc/mock_server_context.h b/src/mongo/transport/grpc/mock_server_context.h index bd1fa05d47b1b..0c867f43e185e 100644 --- a/src/mongo/transport/grpc/mock_server_context.h +++ b/src/mongo/transport/grpc/mock_server_context.h @@ -43,11 +43,8 @@ class MockServerContext : public ServerContext { const MetadataView& getClientMetadata() const override; Date_t getDeadline() const override; bool isCancelled() const override; - HostAndPort getHostAndPort() const override { - return _stream->_hostAndPort; - } - CancellationToken getCancellationToken() { - return _stream->_cancellationSource.token(); + HostAndPort getRemote() const override { + return _stream->_remote; } void tryCancel() override; diff --git a/src/mongo/transport/grpc/mock_server_stream.cpp b/src/mongo/transport/grpc/mock_server_stream.cpp index 8c8ca1df55924..8fa89bdba902c 100644 --- a/src/mongo/transport/grpc/mock_server_stream.cpp +++ b/src/mongo/transport/grpc/mock_server_stream.cpp @@ -31,42 +31,68 @@ #include "mongo/db/service_context.h" #include "mongo/transport/grpc/mock_util.h" +#include "mongo/util/assert_util.h" #include "mongo/util/interruptible.h" namespace mongo::transport::grpc { -MockServerStream::MockServerStream(HostAndPort hostAndPort, - Milliseconds timeout, +MockServerStream::MockServerStream(HostAndPort remote, Promise&& initialMetadataPromise, + Promise<::grpc::Status>&& rpcTerminationStatusPromise, + std::shared_ptr rpcCancellationState, BidirectionalPipe::End&& serverPipeEnd, MetadataView clientMetadata) - : _deadline{getGlobalServiceContext()->getFastClockSource()->now() + timeout}, + : _remote(std::move(remote)), _initialMetadata(std::move(initialMetadataPromise)), + _rpcReturnStatus(std::move(rpcTerminationStatusPromise)), + _finalStatusReturned(false), + _rpcCancellationState(std::move(rpcCancellationState)), _pipe{std::move(serverPipeEnd)}, - _clientMetadata{std::move(clientMetadata)}, - _hostAndPort(std::move(hostAndPort)) {} + _clientMetadata{std::move(clientMetadata)} {} boost::optional MockServerStream::read() { + invariant(!*_finalStatusReturned); + return runWithDeadline>( - _deadline, [&](Interruptible* i) { return _pipe.read(i); }); + _rpcCancellationState->getDeadline(), [&](Interruptible* i) { return _pipe.read(i); }); } bool MockServerStream::isCancelled() const { - return _cancellationSource.token().isCanceled() || - getGlobalServiceContext()->getFastClockSource()->now() > _deadline; + return _rpcCancellationState->isCancelled(); } bool MockServerStream::write(ConstSharedBuffer msg) { - if (_cancellationSource.token().isCanceled() || - getGlobalServiceContext()->getFastClockSource()->now() > _deadline) { + invariant(!*_finalStatusReturned); + if (isCancelled()) { return false; } + _initialMetadata.trySend(); - return runWithDeadline(_deadline, [&](Interruptible* i) { return _pipe.write(msg, i); }); + return runWithDeadline(_rpcCancellationState->getDeadline(), + [&](Interruptible* i) { return _pipe.write(msg, i); }); } -void MockServerStream::close() { - _cancellationSource.cancel(); +void MockServerStream::sendReturnStatus(::grpc::Status status) { + { + auto finalStatusReturned = _finalStatusReturned.synchronize(); + invariant(!*finalStatusReturned); + *finalStatusReturned = true; + // Client side ignores the mocked return value in the event of a cancellation, so don't need + // to check if stream has been cancelled before sending the status. + } + _rpcReturnStatus.emplaceValue(std::move(status)); + _pipe.close(); +} + +void MockServerStream::cancel(::grpc::Status status) { + // Only mark the RPC as cancelled if a status hasn't already been returned to client. + auto finalStatusReturned = _finalStatusReturned.synchronize(); + if (*finalStatusReturned) { + return; + } + // Need to update the cancellation state before closing the pipe so that when a stream + // read/write is interrupted, the cancellation state will already be up to date. + _rpcCancellationState->cancel(std::move(status)); _pipe.close(); } diff --git a/src/mongo/transport/grpc/mock_server_stream.h b/src/mongo/transport/grpc/mock_server_stream.h index eb7177f46ccee..88fbe51d5eb07 100644 --- a/src/mongo/transport/grpc/mock_server_stream.h +++ b/src/mongo/transport/grpc/mock_server_stream.h @@ -32,13 +32,14 @@ #include #include +#include + #include "mongo/transport/grpc/bidirectional_pipe.h" #include "mongo/transport/grpc/metadata.h" +#include "mongo/transport/grpc/mock_util.h" #include "mongo/transport/grpc/server_stream.h" -#include "mongo/util/cancellation.h" #include "mongo/util/future.h" #include "mongo/util/net/hostandport.h" -#include "mongo/util/time_support.h" namespace mongo::transport::grpc { @@ -47,16 +48,19 @@ class MockServerStream : public ServerStream { ~MockServerStream() = default; boost::optional read() override; + bool write(ConstSharedBuffer msg) override; - explicit MockServerStream(HostAndPort hostAndPort, - Milliseconds timeout, + explicit MockServerStream(HostAndPort remote, Promise&& initialMetadataPromise, + Promise<::grpc::Status>&& rpcTerminationStatusPromise, + std::shared_ptr rpcCancellationState, BidirectionalPipe::End&& serverPipeEnd, MetadataView clientMetadata); private: friend class MockServerContext; + friend class MockRPC; class InitialMetadata { public: @@ -86,13 +90,51 @@ class MockServerStream : public ServerStream { }; bool isCancelled() const; - void close(); - CancellationSource _cancellationSource; - Date_t _deadline; + /** + * Cancel the RPC associated with this stream. This is used for mocking situations in + * which an RPC handler was never able to return a final status to the client (e.g. manual + * cancellation or a network interruption). + * + * This method has no effect if the stream is already terminated. + */ + void cancel(::grpc::Status status); + + /** + * Closes the stream and sends the final return status of the RPC to the client. This is the + * mocked equivalent of an RPC handler returning a status. + * + * This does not mark the stream as cancelled. + * + * This method must only be called once, and this stream must not be used after this method has + * been called. + */ + void sendReturnStatus(::grpc::Status status); + + HostAndPort _remote; InitialMetadata _initialMetadata; + + /** + * _rpcReturnStatus is set in sendReturnStatus(), and it is used to mock returning a status from + * an RPC handler. sendReturnStatus itself is called via MockRPC::sendReturnStatus(). + */ + Promise<::grpc::Status> _rpcReturnStatus; + + /** + * _finalStatusReturned is also set in sendReturnStatus(), and it is used to denote that a + * status has been returned and the stream should no longer be used. + */ + synchronized_value _finalStatusReturned; + + /** + * _rpcCancellationState is set via cancel(), which is called by either + * MockServerContext::tryCancel() or MockRPC::cancel(). It is used to mock situations in which a + * server RPC handler is unable to return a status to the client (e.g. explicit cancellation or + * a network interruption). + */ + std::shared_ptr _rpcCancellationState; + BidirectionalPipe::End _pipe; MetadataView _clientMetadata; - HostAndPort _hostAndPort; }; } // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/mock_server_stream_test.cpp b/src/mongo/transport/grpc/mock_server_stream_test.cpp index 1452f9f3f4ac1..697106c9cece0 100644 --- a/src/mongo/transport/grpc/mock_server_stream_test.cpp +++ b/src/mongo/transport/grpc/mock_server_stream_test.cpp @@ -33,13 +33,16 @@ #include #include -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" +#include + +#include "mongo/db/service_context_test_fixture.h" #include "mongo/platform/mutex.h" #include "mongo/rpc/message.h" #include "mongo/stdx/thread.h" #include "mongo/transport/grpc/metadata.h" #include "mongo/transport/grpc/mock_server_context.h" #include "mongo/transport/grpc/mock_server_stream.h" +#include "mongo/transport/grpc/mock_stub.h" #include "mongo/transport/grpc/test_fixtures.h" #include "mongo/unittest/assert.h" #include "mongo/unittest/death_test.h" @@ -47,6 +50,7 @@ #include "mongo/unittest/unittest.h" #include "mongo/util/concurrency/notification.h" #include "mongo/util/duration.h" +#include "mongo/util/future.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/scopeguard.h" #include "mongo/util/system_clock_source.h" @@ -58,32 +62,29 @@ template class MockServerStreamBase : public Base { public: static constexpr Milliseconds kTimeout = Milliseconds(100); - static constexpr const char* kRemote = "abc:123"; virtual void setUp() override { Base::setUp(); - _fixtures = std::make_unique( - HostAndPort{kRemote}, kTimeout, _clientMetadata); - } - MockStreamTestFixtures& getFixtures() { - return *_fixtures; + MockStubTestFixtures fixtures; + _fixtures = fixtures.makeStreamTestFixtures( + Base::getServiceContext()->getFastClockSource()->now() + kTimeout, _clientMetadata); } MockServerStream& getServerStream() { - return *getFixtures().serverStream; + return *_fixtures->rpc->serverStream; } MockServerContext& getServerContext() { - return *getFixtures().serverCtx; + return *_fixtures->rpc->serverCtx; } - MockClientStream& getClientStream() { - return *getFixtures().clientStream; + ClientStream& getClientStream() { + return *_fixtures->clientStream; } - MockClientContext& getClientContext() { - return *getFixtures().clientCtx; + ClientContext& getClientContext() { + return *_fixtures->clientCtx; } const Message& getClientFirstMessage() const { @@ -133,7 +134,7 @@ class MockServerStreamBase : public Base { std::unique_ptr _fixtures; }; -class MockServerStreamTest : public MockServerStreamBase {}; +class MockServerStreamTest : public MockServerStreamBase {}; class MockServerStreamTestWithMockedClockSource : public MockServerStreamBase {}; @@ -220,6 +221,7 @@ TEST_F(MockServerStreamTestWithMockedClockSource, DeadlineIsEnforced) { ASSERT_FALSE(getServerStream().write(makeUniqueMessage().sharedBuffer())); ASSERT_FALSE(getClientContext().getServerInitialMetadata()); ASSERT_FALSE(getClientStream().read()); + ASSERT_EQ(getClientStream().finish().error_code(), ::grpc::StatusCode::DEADLINE_EXCEEDED); } TEST_F(MockServerStreamTest, TryCancelSubsequentServerRead) { @@ -265,4 +267,44 @@ TEST_F(MockServerStreamTest, ClientMetadataIsAccessible) { ASSERT_EQ(getServerContext().getClientMetadata(), getClientMetadata()); } +TEST_F(MockServerStreamTest, ClientSideCancellation) { + ASSERT_TRUE(getClientStream().write(makeUniqueMessage().sharedBuffer())); + ASSERT_TRUE(getServerStream().read()); + + getClientContext().tryCancel(); + + ASSERT_FALSE(getClientStream().read()); + ASSERT_FALSE(getClientStream().write(makeUniqueMessage().sharedBuffer())); + ASSERT_FALSE(getServerStream().read()); + ASSERT_FALSE(getServerStream().write(makeUniqueMessage().sharedBuffer())); + ASSERT_TRUE(getServerContext().isCancelled()); + + ASSERT_EQ(getClientStream().finish().error_code(), ::grpc::StatusCode::CANCELLED); +} + +TEST_F(MockServerStreamTest, CancellationInterruptsFinish) { + auto pf = makePromiseFuture<::grpc::Status>(); + auto finishThread = + stdx::thread([&] { pf.promise.setWith([&] { return getClientStream().finish(); }); }); + ON_BLOCK_EXIT([&] { finishThread.join(); }); + + // finish() won't return until server end hangs up too. + ASSERT_FALSE(pf.future.isReady()); + + getServerContext().tryCancel(); + ASSERT_EQ(pf.future.get().error_code(), ::grpc::StatusCode::CANCELLED); +} + +TEST_F(MockServerStreamTestWithMockedClockSource, DeadlineExceededInterruptsFinish) { + auto pf = makePromiseFuture<::grpc::Status>(); + auto finishThread = + stdx::thread([&] { pf.promise.setWith([&] { return getClientStream().finish(); }); }); + ON_BLOCK_EXIT([&] { finishThread.join(); }); + + // finish() won't return until server end hangs up too. + ASSERT_FALSE(pf.future.isReady()); + + clockSource().advance(kTimeout * 2); + ASSERT_EQ(pf.future.get().error_code(), ::grpc::StatusCode::DEADLINE_EXCEEDED); +} } // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/mock_stub.h b/src/mongo/transport/grpc/mock_stub.h new file mode 100644 index 0000000000000..5af39aeda9386 --- /dev/null +++ b/src/mongo/transport/grpc/mock_stub.h @@ -0,0 +1,217 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include "mongo/base/string_data.h" +#include "mongo/transport/grpc/mock_client_context.h" +#include "mongo/transport/grpc/mock_server_context.h" +#include "mongo/transport/grpc/mock_server_stream.h" +#include "mongo/transport/grpc/mock_util.h" +#include "mongo/transport/grpc/service.h" +#include "mongo/unittest/thread_assertion_monitor.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/notification.h" +#include "mongo/util/producer_consumer_queue.h" + +namespace mongo::transport::grpc { + +class MockRPC { +public: + /** + * Close the stream and send the final return status of the RPC to the client. This is the + * mocked equivalent of returning a status from an RPC handler. + * + * The RPC's stream must not be used after calling this method. + * This method must only be called once. + */ + void sendReturnStatus(::grpc::Status status) { + serverStream->sendReturnStatus(std::move(status)); + } + + /** + * Cancel the RPC with the provided status. This is used for mocking situations in which an RPC + * handler was never able to return a final status to the client (e.g. network interruption). + * + * For mocking an explicit server-side cancellation, use serverCtx->tryCancel(). + * This method has no effect if the RPC has already been terminated, either by returning a + * status or prior cancellation. + */ + void cancel(::grpc::Status status) { + serverStream->cancel(std::move(status)); + } + + StringData methodName; + std::unique_ptr serverStream; + std::unique_ptr serverCtx; +}; + +using MockRPCQueue = MultiProducerMultiConsumerQueue, MockRPC>>; + +class MockServer { +public: + explicit MockServer(MockRPCQueue::Consumer queue) : _queue(std::move(queue)) {} + + boost::optional acceptRPC() { + try { + auto entry = _queue.pop(); + entry.first.emplaceValue(); + return std::move(entry.second); + } catch (const DBException& e) { + if (e.code() == ErrorCodes::ProducerConsumerQueueEndClosed || + e.code() == ErrorCodes::ProducerConsumerQueueConsumed) { + return boost::none; + } + throw; + } + } + + /** + * Starts up a thread that listens for incoming RPCs and then returns immediately. + * The listener thread will spawn a new thread for each RPC it receives and pass it to the + * provided handler. + * + * The provided handler is expected to throw assertion exceptions, hence the use of + * ThreadAssertionMonitor to spawn threads here. + */ + void start(unittest::ThreadAssertionMonitor& monitor, + std::function<::grpc::Status(MockRPC&)> handler) { + _listenerThread = monitor.spawn([&, handler = std::move(handler)] { + std::vector rpcHandlers; + while (auto rpc = acceptRPC()) { + rpcHandlers.push_back(monitor.spawn([rpc = std::move(*rpc), handler]() mutable { + try { + auto status = handler(rpc); + rpc.sendReturnStatus(std::move(status)); + } catch (DBException& e) { + rpc.sendReturnStatus( + ::grpc::Status(::grpc::StatusCode::UNKNOWN, e.toString())); + } + })); + } + + for (auto& thread : rpcHandlers) { + thread.join(); + } + }); + } + + /** + * Close the mocked channel and then block until all RPC handler threads (if any) have exited. + */ + void shutdown() { + _queue.close(); + if (_listenerThread) { + _listenerThread->join(); + } + } + +private: + boost::optional _listenerThread; + MockRPCQueue::Consumer _queue; +}; + +class MockChannel { +public: + explicit MockChannel(HostAndPort local, HostAndPort remote, MockRPCQueue::Producer queue) + : _local(std::move(local)), _remote{std::move(remote)}, _rpcQueue{std::move(queue)} {}; + + void sendRPC(MockRPC&& rpc) { + auto pf = makePromiseFuture(); + _rpcQueue.push({std::move(pf.promise), std::move(rpc)}); + pf.future.get(); + } + + const HostAndPort& getLocal() const { + return _local; + } + + const HostAndPort& getRemote() const { + return _remote; + } + +private: + HostAndPort _local; + HostAndPort _remote; + MockRPCQueue::Producer _rpcQueue; +}; + +class MockStub { +public: + explicit MockStub(std::shared_ptr channel) : _channel{std::move(channel)} {} + + ~MockStub() {} + + std::shared_ptr unauthenticatedCommandStream(MockClientContext* ctx) { + return _makeStream(CommandService::kUnauthenticatedCommandStreamMethodName, ctx); + } + + std::shared_ptr authenticatedCommandStream(MockClientContext* ctx) { + return _makeStream(CommandService::kAuthenticatedCommandStreamMethodName, ctx); + } + +private: + std::shared_ptr _makeStream(const StringData methodName, + MockClientContext* ctx) { + MetadataView clientMetadata; + for (auto& kvp : ctx->_metadata) { + clientMetadata.insert(kvp); + } + + BidirectionalPipe pipe; + auto metadataPF = makePromiseFuture(); + auto terminationStatusPF = makePromiseFuture<::grpc::Status>(); + auto cancellationState = std::make_shared(ctx->getDeadline()); + + MockRPC rpc; + rpc.methodName = methodName; + rpc.serverStream = + std::make_unique(_channel->getLocal(), + std::move(metadataPF.promise), + std::move(terminationStatusPF.promise), + cancellationState, + std::move(*pipe.left), + clientMetadata); + rpc.serverCtx = std::make_unique(rpc.serverStream.get()); + auto clientStream = + std::make_shared(_channel->getRemote(), + std::move(metadataPF.future), + std::move(terminationStatusPF.future), + cancellationState, + std::move(*pipe.right)); + + ctx->_stream = clientStream.get(); + _channel->sendRPC(std::move(rpc)); + return clientStream; + } + + std::shared_ptr _channel; +}; + +} // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/mock_stub_test.cpp b/src/mongo/transport/grpc/mock_stub_test.cpp new file mode 100644 index 0000000000000..bfc2589da1544 --- /dev/null +++ b/src/mongo/transport/grpc/mock_stub_test.cpp @@ -0,0 +1,204 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include +#include + +#include "mongo/db/service_context_test_fixture.h" +#include "mongo/rpc/message.h" +#include "mongo/stdx/thread.h" +#include "mongo/transport/grpc/mock_client_context.h" +#include "mongo/transport/grpc/mock_stub.h" +#include "mongo/transport/grpc/test_fixtures.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/thread_assertion_monitor.h" +#include "mongo/unittest/unittest.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/producer_consumer_queue.h" +#include "mongo/util/scopeguard.h" + +namespace mongo::transport::grpc { + +class MockStubTest : public ServiceContextTest { +public: + void setUp() override { + _fixtures = std::make_unique(); + } + + MockStub makeStub() { + return _fixtures->makeStub(); + } + + MockServer& getServer() { + return _fixtures->getServer(); + } + + void runEchoTest(std::function(MockClientContext&)> makeStream) { + unittest::threadAssertionMonitoredTest([&](unittest::ThreadAssertionMonitor& monitor) { + getServer().start(monitor, [](auto& rpc) { + ASSERT_EQ(rpc.serverCtx->getRemote().toString(), + MockStubTestFixtures::kClientAddress); + auto msg = rpc.serverStream->read(); + ASSERT_TRUE(msg); + ASSERT_TRUE(rpc.serverStream->write(*msg)); + return ::grpc::Status::OK; + }); + + std::vector clientThreads; + for (int i = 0; i < 10; i++) { + clientThreads.push_back(monitor.spawn([&]() { + auto clientMessage = makeUniqueMessage(); + MockClientContext ctx; + auto stream = makeStream(ctx); + ASSERT_TRUE(stream->write(clientMessage.sharedBuffer())); + + auto serverResponse = stream->read(); + ASSERT_TRUE(serverResponse); + ASSERT_EQ_MSG(Message{*serverResponse}, clientMessage); + })); + } + + for (auto& thread : clientThreads) { + thread.join(); + } + + getServer().shutdown(); + }); + } + + std::pair, MockRPC> makeRPC(MockClientContext& ctx) { + auto clientStreamPf = makePromiseFuture>(); + auto th = stdx::thread([&, promise = std::move(clientStreamPf.promise)]() mutable { + promise.setWith([&] { + auto stub = makeStub(); + return stub.unauthenticatedCommandStream(&ctx); + }); + }); + ON_BLOCK_EXIT([&th] { th.join(); }); + auto rpc = getServer().acceptRPC(); + ASSERT_TRUE(rpc); + return {clientStreamPf.future.get(), std::move(*rpc)}; + } + +private: + std::unique_ptr _fixtures; +}; + +TEST_F(MockStubTest, ConcurrentStreamsAuth) { + auto stub = makeStub(); + runEchoTest([&](auto& ctx) { return stub.authenticatedCommandStream(&ctx); }); +} + +TEST_F(MockStubTest, ConcurrentStreamsNoAuth) { + auto stub = makeStub(); + runEchoTest([&](auto& ctx) { return stub.unauthenticatedCommandStream(&ctx); }); +} + +TEST_F(MockStubTest, ConcurrentStubsAuth) { + runEchoTest([&](auto& ctx) { return makeStub().authenticatedCommandStream(&ctx); }); +} + +TEST_F(MockStubTest, ConcurrentStubsNoAuth) { + runEchoTest([&](auto& ctx) { return makeStub().unauthenticatedCommandStream(&ctx); }); +} + +TEST_F(MockStubTest, RPCReturn) { + const ::grpc::Status kFinalStatus = + ::grpc::Status{::grpc::StatusCode::FAILED_PRECONDITION, "test"}; + const int kMessageCount = 5; + + MockClientContext ctx; + auto [clientStream, rpc] = makeRPC(ctx); + + for (auto i = 0; i < kMessageCount; i++) { + ASSERT_TRUE(rpc.serverStream->write(makeUniqueMessage().sharedBuffer())); + } + + rpc.sendReturnStatus(kFinalStatus); + ASSERT_FALSE(rpc.serverCtx->isCancelled()) + << "returning a status should not mark stream as cancelled"; + ASSERT_FALSE(clientStream->write(makeUniqueMessage().sharedBuffer())); + + auto finishPf = makePromiseFuture<::grpc::Status>(); + auto finishThread = stdx::thread( + [&clientStream = *clientStream, promise = std::move(finishPf.promise)]() mutable { + promise.setWith([&] { return clientStream.finish(); }); + }); + ON_BLOCK_EXIT([&finishThread] { finishThread.join(); }); + // finish() should not return until all messages have been read. + ASSERT_FALSE(finishPf.future.isReady()); + + // Ensure messages sent before the RPC was finished can still be read. + for (auto i = 0; i < kMessageCount; i++) { + ASSERT_TRUE(clientStream->read()); + } + ASSERT_FALSE(clientStream->read()); + + // Ensure that finish() returns now that all the messages have been read. + auto status = finishPf.future.get(); + ASSERT_EQ(status.error_code(), kFinalStatus.error_code()); + + // Cancelling a finished RPC should have no effect. + rpc.serverCtx->tryCancel(); + ASSERT_FALSE(rpc.serverCtx->isCancelled()); +} + +TEST_F(MockStubTest, RPCCancellation) { + const ::grpc::Status kCancellationStatus = + ::grpc::Status{::grpc::StatusCode::UNAVAILABLE, "mock network error"}; + + MockClientContext ctx; + auto [clientStream, rpc] = makeRPC(ctx); + + ASSERT_TRUE(clientStream->write(makeUniqueMessage().sharedBuffer())); + + rpc.cancel(kCancellationStatus); + + ASSERT_TRUE(rpc.serverCtx->isCancelled()); + ASSERT_FALSE(clientStream->write(makeUniqueMessage().sharedBuffer())); + ASSERT_FALSE(clientStream->read()); + ASSERT_EQ(clientStream->finish().error_code(), kCancellationStatus.error_code()); +} + +TEST_F(MockStubTest, CannotReturnStatusForCancelledRPC) { + MockClientContext ctx; + auto [clientStream, rpc] = makeRPC(ctx); + + ASSERT_TRUE(clientStream->write(makeUniqueMessage().sharedBuffer())); + + rpc.cancel(::grpc::Status::CANCELLED); + rpc.sendReturnStatus(::grpc::Status::OK); + + ASSERT_TRUE(rpc.serverCtx->isCancelled()); + ASSERT_FALSE(clientStream->write(makeUniqueMessage().sharedBuffer())); + ASSERT_FALSE(clientStream->read()); + ASSERT_EQ(clientStream->finish().error_code(), ::grpc::StatusCode::CANCELLED); +} + +} // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/mock_util.h b/src/mongo/transport/grpc/mock_util.h index 456437cfe1f0f..2e0fc9c02ed50 100644 --- a/src/mongo/transport/grpc/mock_util.h +++ b/src/mongo/transport/grpc/mock_util.h @@ -32,13 +32,62 @@ #include #include +#include +#include +#include + #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/util/interruptible.h" +#include "mongo/util/synchronized_value.h" #include "mongo/util/time_support.h" namespace mongo::transport::grpc { +/** + * Class containing the shared cancellation state between a MockServerStream and its corresponding + * MockClientStream. This mocks cases in which an RPC is terminated before the server's RPC handler + * is able to return a status (e.g. explicit client/server cancellation or a network error). + */ +class MockCancellationState { +public: + explicit MockCancellationState(Date_t deadline) : _deadline(deadline) {} + + Date_t getDeadline() const { + return _deadline; + } + + bool isCancelled() const { + return _cancellationStatus->has_value() || isDeadlineExceeded(); + } + + bool isDeadlineExceeded() const { + return getGlobalServiceContext()->getFastClockSource()->now() > _deadline; + } + + boost::optional<::grpc::Status> getCancellationStatus() { + if (auto status = _cancellationStatus.synchronize(); status->has_value()) { + return *status; + } else if (isDeadlineExceeded()) { + return ::grpc::Status(::grpc::StatusCode::DEADLINE_EXCEEDED, "Deadline exceeded"); + } else { + return boost::none; + } + } + + void cancel(::grpc::Status status) { + auto statusGuard = _cancellationStatus.synchronize(); + if (statusGuard->has_value()) { + return; + } + *statusGuard = std::move(status); + } + +private: + Date_t _deadline; + synchronized_value> _cancellationStatus; +}; + /** * Performs the provided lambda and returns its return value. If the lambda's execution is * interrupted due to the deadline being exceeded, this returns a default-constructed T instead. diff --git a/src/mongo/transport/grpc/server.cpp b/src/mongo/transport/grpc/server.cpp new file mode 100644 index 0000000000000..677d26a269bf6 --- /dev/null +++ b/src/mongo/transport/grpc/server.cpp @@ -0,0 +1,153 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/transport/grpc/server.h" + +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_options.h" +#include "mongo/transport/grpc/util.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/net/socket_utils.h" +#include "mongo/util/net/ssl_util.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork + +namespace mongo::transport::grpc { + +Server::Server(std::vector> services, Options options) + : _options{std::move(options)}, _services{std::move(services)}, _shutdown{false} {} + + +Server::~Server() { + invariant(!isRunning()); +} + +std::shared_ptr<::grpc::ServerCredentials> _makeServerCredentials(const Server::Options& options) { + auto clientCertPolicy = ::grpc_ssl_client_certificate_request_type:: + GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_AND_VERIFY; + + if (options.tlsAllowConnectionsWithoutCertificates && options.tlsAllowInvalidCertificates) { + clientCertPolicy = + ::grpc_ssl_client_certificate_request_type::GRPC_SSL_DONT_REQUEST_CLIENT_CERTIFICATE; + } else if (options.tlsAllowConnectionsWithoutCertificates) { + clientCertPolicy = ::grpc_ssl_client_certificate_request_type:: + GRPC_SSL_REQUEST_CLIENT_CERTIFICATE_AND_VERIFY; + } else if (options.tlsAllowInvalidCertificates) { + clientCertPolicy = ::grpc_ssl_client_certificate_request_type:: + GRPC_SSL_REQUEST_AND_REQUIRE_CLIENT_CERTIFICATE_BUT_DONT_VERIFY; + } + + ::grpc::SslServerCredentialsOptions sslOps{clientCertPolicy}; + ::grpc::SslServerCredentialsOptions::PemKeyCertPair certPair; + sslOps.pem_key_cert_pairs = {util::parsePEMKeyFile(options.tlsPEMKeyFile)}; + if (options.tlsCAFile) { + sslOps.pem_root_certs = uassertStatusOK(ssl_util::readPEMFile(*options.tlsCAFile)); + } + return ::grpc::SslServerCredentials(sslOps); +} + +void Server::start() { + stdx::lock_guard lk(_mutex); + invariant(!_shutdown, "Cannot start the server once it's stopped"); + invariant(!_server, "The server is already started"); + + ::grpc::ServerBuilder builder; + + auto credentials = _makeServerCredentials(_options); + + for (auto& address : _options.addresses) { + std::string fullAddress; + if (!isUnixDomainSocket(address)) { + fullAddress = fmt::format("{}:{}", address, _options.port); + } else { + fullAddress = fmt::format("unix://{}", address); + } + builder.AddListeningPort(fullAddress, credentials); + } + for (auto& service : _services) { + builder.RegisterService(service.get()); + } + builder.SetMaxReceiveMessageSize(MaxMessageSizeBytes); + builder.SetMaxSendMessageSize(MaxMessageSizeBytes); + builder.SetDefaultCompressionAlgorithm(::grpc_compression_algorithm::GRPC_COMPRESS_NONE); + ::grpc::ResourceQuota quota; + quota.SetMaxThreads(_options.maxThreads); + builder.SetResourceQuota(quota); + + _server = builder.BuildAndStart(); + + if (!_server) { + LOGV2_ERROR_OPTIONS(7401309, + {logv2::UserAssertAfterLog(ErrorCodes::UnknownError)}, + "Failed to start gRPC server", + "addresses"_attr = _options.addresses, + "port"_attr = _options.port, + "services"_attr = _services); + } + + LOGV2_INFO(7401305, + "Started gRPC server", + "addresses"_attr = _options.addresses, + "port"_attr = _options.port, + "services"_attr = _services); +} + +bool Server::isRunning() const { + stdx::lock_guard lk(_mutex); + return _server && !_shutdown; +} + +void Server::shutdown() { + stdx::lock_guard lk(_mutex); + invariant(!_shutdown, "Cannot shutdown the server once it's stopped"); + _shutdown = true; + if (!_server) { + return; + } + + LOGV2_INFO(7401306, "Shutting down gRPC server"); + + for (auto& service : _services) { + service->shutdown(); + } + + _server->Shutdown(); + LOGV2_DEBUG(7401307, 1, "gRPC server shutdown complete"); + _services.clear(); +} + +} // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/server.h b/src/mongo/transport/grpc/server.h new file mode 100644 index 0000000000000..8bce4fdf8c1e0 --- /dev/null +++ b/src/mongo/transport/grpc/server.h @@ -0,0 +1,79 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include + +#include + +#include "mongo/stdx/mutex.h" +#include "mongo/transport/grpc/service.h" + +namespace mongo::transport::grpc { + +class Server { +public: + struct Options { + /** + * List of IP addresses, hostnames, and/or Unix domain socket paths to bind to. + */ + std::vector addresses; + + int port; + size_t maxThreads; + StringData tlsPEMKeyFile; + boost::optional tlsCAFile; + bool tlsAllowConnectionsWithoutCertificates; + bool tlsAllowInvalidCertificates; + }; + + Server(std::vector> services, Options options); + + ~Server(); + + void start(); + + /** + * Initiates shutting down of the gRPC server, blocking until all pending RPCs and their + * associated handlers have been completed. + */ + void shutdown(); + + bool isRunning() const; + +private: + Options _options; + mutable Mutex _mutex = MONGO_MAKE_LATCH("grpc::Server::_mutex"); + std::vector> _services; + std::unique_ptr<::grpc::Server> _server; + bool _shutdown; +}; + +} // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/server_context.h b/src/mongo/transport/grpc/server_context.h index a957bc2658225..97e467c526018 100644 --- a/src/mongo/transport/grpc/server_context.h +++ b/src/mongo/transport/grpc/server_context.h @@ -56,7 +56,7 @@ class ServerContext { virtual void addInitialMetadataEntry(const std::string& key, const std::string& value) = 0; virtual const MetadataView& getClientMetadata() const = 0; virtual Date_t getDeadline() const = 0; - virtual HostAndPort getHostAndPort() const = 0; + virtual HostAndPort getRemote() const = 0; /** * Attempt to cancel the RPC this context is associated with. This may not have an effect if the @@ -65,6 +65,15 @@ class ServerContext { * This is thread-safe. */ virtual void tryCancel() = 0; + + /** + * Return true if the RPC associated with this ServerContext failed before the RPC handler could + * return its final status back to the client (e.g. due to explicit cancellation or a network + * issue). + * + * If the handler was able to return a status successfully, even if that status was + * Status::CANCELLED, then this method will return false. + */ virtual bool isCancelled() const = 0; }; diff --git a/src/mongo/transport/grpc/server_test.cpp b/src/mongo/transport/grpc/server_test.cpp new file mode 100644 index 0000000000000..fd32f97d487ba --- /dev/null +++ b/src/mongo/transport/grpc/server_test.cpp @@ -0,0 +1,264 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include + +#include +#include +#include +#include +#include + +#include "mongo/logv2/log.h" +#include "mongo/stdx/chrono.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/thread.h" +#include "mongo/transport/grpc/server.h" +#include "mongo/transport/grpc/test_fixtures.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/barrier.h" +#include "mongo/unittest/thread_assertion_monitor.h" +#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/concurrency/notification.h" +#include "mongo/util/net/socket_utils.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest + +namespace mongo::transport::grpc { + +class ServerTest : public unittest::Test { +public: + void runCertificateValidationTest(Server::Options options, + ::grpc::StatusCode validCertResult, + ::grpc::StatusCode noClientCertResult, + ::grpc::StatusCode selfSignedClientCertResult) { + auto clientThread = [&](Server&, unittest::ThreadAssertionMonitor& monitor) { + { + auto stub = CommandServiceTestFixtures::makeStub(); + ASSERT_EQ(stub.connect().error_code(), validCertResult); + } + { + CommandServiceTestFixtures::Stub::Options options; + options.tlsCAFile = CommandServiceTestFixtures::kCAFile; + auto stub = CommandServiceTestFixtures::makeStub(options); + + ASSERT_EQ(stub.connect().error_code(), noClientCertResult); + } + { + CommandServiceTestFixtures::Stub::Options options; + options.tlsCertificateKeyFile = + CommandServiceTestFixtures::kClientSelfSignedCertificateKeyFile; + options.tlsCAFile = CommandServiceTestFixtures::kCAFile; + auto stub = CommandServiceTestFixtures::makeStub(options); + ASSERT_EQ(stub.connect().error_code(), selfSignedClientCertResult); + } + }; + + CommandServiceTestFixtures::runWithServer([](auto) {}, clientThread, options); + } +}; + +TEST_F(ServerTest, MaxThreads) { + unittest::Barrier waitForWorkerThreads(CommandServiceTestFixtures::kMaxThreads); + Notification okayToReturn; + + auto callback = [&](auto session) { + waitForWorkerThreads.countDownAndWait(); + ASSERT_OK(session->sinkMessage(makeUniqueMessage())); + // Block this thread until the test thread notifies it to return. + okayToReturn.get(); + }; + + auto clientThread = [&](Server&, unittest::ThreadAssertionMonitor& monitor) { + auto stub = CommandServiceTestFixtures::makeStub(); + std::vector threads; + + // A minimum of one thread is reserved for the completion queue, so we can only create + // kMaxThreads - 1 streams. + for (int i = 0; i < CommandServiceTestFixtures::kMaxThreads - 1; i++) { + auto thread = monitor.spawn([&, i]() { + ::grpc::ClientContext ctx; + CommandServiceTestFixtures::addRequiredClientMetadata(ctx); + auto stream = stub.unauthenticatedCommandStream(&ctx); + SharedBuffer msg; + ASSERT_TRUE(stream->Read(&msg)); + // We don't need to block this thread, since the gRPC thread does not return until + // we set `okayToReturn`. + }); + threads.push_back(std::move(thread)); + } + + waitForWorkerThreads.countDownAndWait(); + + // Now that we've reached the maximum number of threads, the next RPC should fail. + { + auto status = stub.connect(); + ASSERT_EQ(status.error_code(), ::grpc::StatusCode::RESOURCE_EXHAUSTED) + << status.error_message(); + } + + okayToReturn.set(); + for (auto& thread : threads) { + thread.join(); + } + }; + + CommandServiceTestFixtures::runWithServer(callback, clientThread); +} + +TEST_F(ServerTest, ECDSACertificates) { + const std::string kECDSACAFile = "jstests/libs/ecdsa-ca.pem"; + + auto options = CommandServiceTestFixtures::makeServerOptions(); + options.tlsPEMKeyFile = "jstests/libs/ecdsa-server.pem"; + options.tlsCAFile = kECDSACAFile; + + CommandServiceTestFixtures::runWithServer( + [](auto) {}, + [&](auto&, auto&) { + auto stubOptions = CommandServiceTestFixtures::Stub::Options{}; + stubOptions.tlsCAFile = kECDSACAFile; + stubOptions.tlsCertificateKeyFile = "jstests/libs/ecdsa-client.pem"; + + auto stub = CommandServiceTestFixtures::makeStub(stubOptions); + ASSERT_EQ(stub.connect().error_code(), ::grpc::StatusCode::OK); + }, + options); +} + +TEST_F(ServerTest, IntermediateCA) { + auto options = CommandServiceTestFixtures::makeServerOptions(); + options.tlsPEMKeyFile = "jstests/libs/server-intermediate-ca.pem"; + + CommandServiceTestFixtures::runWithServer( + [](auto) {}, + [&](auto&, auto&) { + auto stub = CommandServiceTestFixtures::makeStub(); + ASSERT_EQ(stub.connect().error_code(), ::grpc::StatusCode::OK); + }, + options); +} + +TEST_F(ServerTest, InvalidServerCertificateOptions) { + const std::string kMissingPrivateKeyPath = "jstests/libs/ecdsa-ca-ocsp.crt"; + const std::string kNonExistentPath = "non_existent_path"; + + auto runInvalidCertTest = [&](Server::Options options) { + auto server = CommandServiceTestFixtures::makeServer({}, options); + ASSERT_THROWS(server.start(), DBException); + }; + + { + auto options = CommandServiceTestFixtures::makeServerOptions(); + options.tlsPEMKeyFile = kNonExistentPath; + runInvalidCertTest(options); + } + { + auto options = CommandServiceTestFixtures::makeServerOptions(); + options.tlsPEMKeyFile = CommandServiceTestFixtures::kServerCertificateKeyFile; + options.tlsCAFile = kNonExistentPath; + runInvalidCertTest(options); + } + { + auto options = CommandServiceTestFixtures::makeServerOptions(); + options.tlsPEMKeyFile = kMissingPrivateKeyPath; + runInvalidCertTest(options); + } +} + +TEST_F(ServerTest, DefaultClientCertificateValidation) { + runCertificateValidationTest( + CommandServiceTestFixtures::makeServerOptions(), + /* valid client cert succeeds */ ::grpc::StatusCode::OK, + /* no client cert fails */ ::grpc::StatusCode::UNAVAILABLE, + /* self-signed client cert fails */ ::grpc::StatusCode::UNAVAILABLE); +} + +TEST_F(ServerTest, AllowConnectionsWithoutCertificates) { + auto serverOptions = CommandServiceTestFixtures::makeServerOptions(); + serverOptions.tlsAllowConnectionsWithoutCertificates = true; + + runCertificateValidationTest( + serverOptions, + /* valid client cert succeeds */ ::grpc::StatusCode::OK, + /* no client cert succeeds */ ::grpc::StatusCode::OK, + /* self-signed client cert fails */ ::grpc::StatusCode::UNAVAILABLE); +} + +TEST_F(ServerTest, AllowInvalidClientCertificate) { + auto serverOptions = CommandServiceTestFixtures::makeServerOptions(); + serverOptions.tlsAllowInvalidCertificates = true; + + runCertificateValidationTest(serverOptions, + /* valid client cert succeeds */ ::grpc::StatusCode::OK, + /* no client cert fails */ ::grpc::StatusCode::UNAVAILABLE, + /* self-signed client cert succeeds */ ::grpc::StatusCode::OK); +} + +TEST_F(ServerTest, DisableCertificateValidation) { + auto serverOptions = CommandServiceTestFixtures::makeServerOptions(); + serverOptions.tlsAllowInvalidCertificates = true; + serverOptions.tlsAllowConnectionsWithoutCertificates = true; + + runCertificateValidationTest(serverOptions, + /* valid client cert succeeds */ ::grpc::StatusCode::OK, + /* no client cert succeeds */ ::grpc::StatusCode::OK, + /* self-signed client cert succeeds */ ::grpc::StatusCode::OK); +} + +TEST_F(ServerTest, MultipleAddresses) { + auto addresses = std::vector{ + "localhost", "127.0.0.1", "[::1]", makeUnixSockPath(CommandServiceTestFixtures::kBindPort)}; + + Server::Options options = CommandServiceTestFixtures::makeServerOptions(); + options.addresses = addresses; + // Use this certificate because the default one doesn't have SANs associated with all of the + // addresses being tested here. + options.tlsPEMKeyFile = "jstests/libs/server_SAN.pem"; + + CommandServiceTestFixtures::runWithServer( + [](auto) {}, + [&addresses](auto&, auto&) { + for (auto& address : addresses) { + std::string fullAddress; + if (isUnixDomainSocket(address)) { + fullAddress = "unix://{}"_format(address); + } else { + fullAddress = "{}:{}"_format(address, CommandServiceTestFixtures::kBindPort); + } + auto stub = CommandServiceTestFixtures::makeStub(fullAddress); + ASSERT_EQ(stub.connect().error_code(), ::grpc::StatusCode::OK) + << "failed to connect to " << address; + } + }, + options); +} + +} // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/service.cpp b/src/mongo/transport/grpc/service.cpp new file mode 100644 index 0000000000000..62b62a977293c --- /dev/null +++ b/src/mongo/transport/grpc/service.cpp @@ -0,0 +1,307 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include "mongo/transport/grpc/service.h" + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/logv2/log.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/rpc/metadata/client_metadata.h" +#include "mongo/transport/grpc/grpc_server_context.h" +#include "mongo/transport/grpc/grpc_server_stream.h" +#include "mongo/transport/grpc/metadata.h" +#include "mongo/util/scopeguard.h" +#include "mongo/util/shared_buffer.h" + +#define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork + +namespace mongo::transport::grpc { + +namespace { + +const Status kShutdownTerminationStatus{ErrorCodes::ShutdownInProgress, + "gRPC server is shutting down"}; + +::grpc::Status parseWireVersion(const MetadataView& clientMetadata, int& wireVersionOut) { + auto clientWireVersionEntry = clientMetadata.find(CommandService::kWireVersionKey); + if (clientWireVersionEntry == clientMetadata.end()) { + return ::grpc::Status( + ::grpc::StatusCode::FAILED_PRECONDITION, + "Clients must specify the server wire version they are targeting in the \"{}\" metadata entry"_format( + CommandService::kWireVersionKey)); + } + if (auto parseResult = std::from_chars(clientWireVersionEntry->second.begin(), + clientWireVersionEntry->second.end(), + wireVersionOut); + parseResult.ec != std::errc{}) { + return ::grpc::Status( + ::grpc::StatusCode::INVALID_ARGUMENT, + "Invalid wire version: \"{}\""_format(clientWireVersionEntry->second)); + } + + return ::grpc::Status::OK; +} + +::grpc::Status verifyClientWireVersion(const MetadataView& clientMetadata, + int clusterMaxWireVersion) { + int clientWireVersion; + if (auto parseResult = parseWireVersion(clientMetadata, clientWireVersion); !parseResult.ok()) { + return parseResult; + } + if (clientWireVersion > clusterMaxWireVersion) { + return ::grpc::Status( + ::grpc::StatusCode::FAILED_PRECONDITION, + "Provided wire version ({}) exceeds cluster's max wire version ({})"_format( + clientWireVersion, clusterMaxWireVersion)); + } else if (auto serverMinWireVersion = + WireSpec::instance().get()->incomingExternalClient.minWireVersion; + clientWireVersion < serverMinWireVersion) { + return ::grpc::Status( + ::grpc::StatusCode::FAILED_PRECONDITION, + "Provided wire version ({}) is less than this server's minimum accepted wire version ({})"_format( + clientWireVersion, serverMinWireVersion)); + } + + return ::grpc::Status::OK; +} + +::grpc::Status verifyReservedMetadata(const MetadataView& clientMetadata) { + static const StringDataSet kRecognizedClientMetadataKeys{ + CommandService::kAuthenticationTokenKey, + CommandService::kClientIdKey, + CommandService::kClientMetadataKey, + CommandService::kWireVersionKey}; + static constexpr StringData kReservedMetadataKeyPrefix = "mongodb"_sd; + + for (const auto& entry : clientMetadata) { + const auto& key = entry.first; + if (key.startsWith(kReservedMetadataKeyPrefix) && + !kRecognizedClientMetadataKeys.contains(key)) { + return ::grpc::Status(::grpc::StatusCode::INVALID_ARGUMENT, + "Unrecognized reserved metadata key: \"{}\""_format(key)); + } + } + return ::grpc::Status::OK; +} + +::grpc::Status extractClientId(const MetadataView& clientMetadata, + boost::optional& clientId) { + if (auto clientIdEntry = clientMetadata.find(CommandService::kClientIdKey); + clientIdEntry != clientMetadata.end()) { + auto clientIdStatus = UUID::parse(clientIdEntry->second); + if (!clientIdStatus.isOK()) { + return ::grpc::Status( + ::grpc::StatusCode::INVALID_ARGUMENT, + "The provided client ID (\"{}\") is not a valid UUID: {}"_format( + clientIdEntry->second, clientIdStatus.getStatus().toString())); + } + clientId = std::move(clientIdStatus.getValue()); + } + return ::grpc::Status::OK; +} + +/** + * Logs the metadata document provided by the client, if any. + * If a clientId was specified by the remote, log at INFO level. Otherwise, log at DEBUG level. + * If the document is not valid BSON, log at WARNING level. + */ +void logClientMetadataDocument(const MetadataView& clientMetadata, const IngressSession& session) { + auto clientMetadataEntry = clientMetadata.find(CommandService::kClientMetadataKey); + if (clientMetadataEntry == clientMetadata.end()) { + return; + } + + try { + fmt::memory_buffer buffer{}; + base64::decode(buffer, clientMetadataEntry->second); + BSONObj metadataDocument{buffer.data()}; + auto metadata = ClientMetadata{metadataDocument}; + + if (session.clientId()) { + LOGV2_INFO(7401301, + "Received client metadata for gRPC stream", + "remote"_attr = session.remote(), + "remoteClientId"_attr = session.clientIdStr(), + "streamId"_attr = session.id(), + "doc"_attr = metadataDocument); + } else { + LOGV2_DEBUG(7401302, + 2, + "Received client metadata for gRPC stream", + "remote"_attr = session.remote(), + "remoteClientId"_attr = session.clientIdStr(), + "streamId"_attr = session.id(), + "doc"_attr = metadataDocument); + } + } catch (const DBException& e) { + LOGV2_WARNING(7401303, + "Received invalid client metadata for gRPC stream", + "remote"_attr = session.remote(), + "remoteClientId"_attr = session.clientIdStr(), + "streamId"_attr = session.id(), + "error"_attr = e); + } +} + +template +auto makeRpcServiceMethod(CommandService* service, const char* name, HandlerType handler) { + return new ::grpc::internal::RpcServiceMethod( + name, + ::grpc::internal::RpcMethod::BIDI_STREAMING, + new ::grpc::internal::BidiStreamingHandler( + [handler = std::move(handler)]( + CommandService* service, + ::grpc::ServerContext* nativeServerCtx, + ::grpc::ServerReaderWriter* nativeServerStream) { + GRPCServerContext ctx{nativeServerCtx}; + GRPCServerStream stream{nativeServerStream}; + return handler(service, ctx, stream); + }, + service)); +} + +} // namespace + +const std::string CommandService::kClusterMaxWireVersionKey = "mongodb-maxwireversion"; + +CommandService::CommandService(GRPCTransportLayer* tl, + RPCHandler callback, + std::shared_ptr wvProvider) + : _tl{tl}, + _callback{std::move(callback)}, + _wvProvider{std::move(wvProvider)}, + _clientCache{std::make_unique()} { + + AddMethod(makeRpcServiceMethod( + this, + kUnauthenticatedCommandStreamMethodName, + [](CommandService* service, GRPCServerContext& ctx, GRPCServerStream& stream) { + return service->_handleStream(ctx, stream); + })); + + AddMethod(makeRpcServiceMethod( + this, + kAuthenticatedCommandStreamMethodName, + [](CommandService* service, GRPCServerContext& ctx, GRPCServerStream& stream) { + return service->_handleAuthenticatedStream(ctx, stream); + })); +} + +::grpc::Status CommandService::_handleStream(ServerContext& serverCtx, ServerStream& stream) { + auto clusterMaxWireVersion = _wvProvider->getClusterMaxWireVersion(); + serverCtx.addInitialMetadataEntry(kClusterMaxWireVersionKey, + std::to_string(_wvProvider->getClusterMaxWireVersion())); + + auto clientMetadata = serverCtx.getClientMetadata(); + if (auto result = verifyClientWireVersion(clientMetadata, clusterMaxWireVersion); + !result.ok()) { + return result; + } + if (auto result = verifyReservedMetadata(clientMetadata); !result.ok()) { + return result; + } + boost::optional clientId; + if (auto result = extractClientId(clientMetadata, clientId); !result.ok()) { + return result; + } + + auto session = std::make_shared(_tl, &serverCtx, &stream, clientId); + std::list::iterator it; + { + stdx::lock_guard lk{_mutex}; + + if (_shutdown) { + session->terminate(kShutdownTerminationStatus); + return util::convertStatus(*session->terminationStatus()); + } + it = _sessions.insert(_sessions.begin(), session); + } + ON_BLOCK_EXIT([&]() { + stdx::lock_guard lk{_mutex}; + _sessions.erase(it); + if (_sessions.empty()) { + _shutdownCV.notify_one(); + } + }); + + if (!clientId || _clientCache->add(*clientId) == ClientCache::AddResult::kCreated) { + logClientMetadataDocument(clientMetadata, *session); + } + + _callback(session); + auto status = session->terminationStatus(); + invariant(status.has_value()); + return util::convertStatus(std::move(*status)); +} + +::grpc::Status CommandService::_handleAuthenticatedStream(ServerContext& serverCtx, + ServerStream& stream) { + if (auto meta = serverCtx.getClientMetadata(); + meta.find(kAuthenticationTokenKey) == meta.end()) { + return ::grpc::Status{ + ::grpc::StatusCode::UNAUTHENTICATED, + "{} RPCs must contain an authentication token in the \"{}\" metadata entry"_format( + kAuthenticatedCommandStreamMethodName, kAuthenticationTokenKey)}; + } + + return _handleStream(serverCtx, stream); +} + +void CommandService::shutdown() { + stdx::unique_lock lk{_mutex}; + + invariant(!_shutdown, "Cannot shut down {} gRPC service once it's stopped"_format(name())); + _shutdown = true; + + auto nSessionsTerminated = _sessions.size(); + for (auto& session : _sessions) { + session->cancel(kShutdownTerminationStatus.reason()); + } + + _shutdownCV.wait(lk, [&]() { return _sessions.empty(); }); + + LOGV2_DEBUG(7401308, + 1, + "MongoDB gRPC service shutdown complete", + "terminatedSessionsCount"_attr = nSessionsTerminated); +} + +} // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/service.h b/src/mongo/transport/grpc/service.h index e7424b75373fe..f6c03bcba84fb 100644 --- a/src/mongo/transport/grpc/service.h +++ b/src/mongo/transport/grpc/service.h @@ -29,27 +29,39 @@ #pragma once -#include -#include -#include +#include -#include -#include -#include #include -#include -#include #include -#include -#include "mongo/transport/grpc/grpc_server_context.h" -#include "mongo/transport/grpc/grpc_server_stream.h" -#include "mongo/transport/grpc/server_context.h" -#include "mongo/transport/grpc/server_stream.h" -#include "mongo/util/shared_buffer.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/stdx/mutex.h" +#include "mongo/transport/grpc/client_cache.h" +#include "mongo/transport/grpc/grpc_session.h" +#include "mongo/transport/grpc/grpc_transport_layer.h" +#include "mongo/transport/grpc/wire_version_provider.h" namespace mongo::transport::grpc { +/** + * Base type for all gRPC services, allowing type-specific shutdown and stringifying logic for each + * service. + */ +class Service : public ::grpc::Service { +public: + virtual StringData name() const = 0; + + virtual void shutdown() = 0; + + std::string toString() const { + return name().toString(); + } +}; + +inline std::string toStringForLogging(const std::unique_ptr& service) { + return service->toString(); +} + /** * A gRPC service definition for handling commands according to the MongoDB gRPC Protocol. * The service's name is "MongoDB", and it provides two methods: "UnauthenticatedCommandStream" and @@ -58,56 +70,63 @@ namespace mongo::transport::grpc { * These methods use SharedBuffer as the message type (not a protocol buffer), the contents of which * are either OP_MSG or OP_COMPRESSED encoded bytes. */ -class Service : public ::grpc::Service { +class CommandService : public Service { public: - using RpcHandler = std::function<::grpc::Status(ServerContext&, ServerStream&)>; + using InSessionPtr = std::shared_ptr; + using RPCHandler = std::function; static constexpr const char* kAuthenticatedCommandStreamMethodName = - "/MongoDB/AuthenticatedCommandStream"; + "/mongodb.CommandService/AuthenticatedCommandStream"; static constexpr const char* kUnauthenticatedCommandStreamMethodName = - "/MongoDB/UnauthenticatedCommandStream"; - - Service(RpcHandler unauthenticatedCommandStreamHandler, - RpcHandler authenticatedCommandStreamHandler) - : _unauthenticatedCommandStreamHandler{std::move(unauthenticatedCommandStreamHandler)}, - _authenticatedCommandStreamHandler{std::move(authenticatedCommandStreamHandler)} { - - AddMethod(new ::grpc::internal::RpcServiceMethod( - kAuthenticatedCommandStreamMethodName, - ::grpc::internal::RpcMethod::BIDI_STREAMING, - new ::grpc::internal::BidiStreamingHandler( - [](Service* service, - ::grpc::ServerContext* nativeServerCtx, - ::grpc::ServerReaderWriter* - nativeServerStream) { - GRPCServerContext ctx{nativeServerCtx}; - GRPCServerStream stream{nativeServerStream}; - - return service->_authenticatedCommandStreamHandler(ctx, stream); - }, - this))); - - AddMethod(new ::grpc::internal::RpcServiceMethod( - kUnauthenticatedCommandStreamMethodName, - ::grpc::internal::RpcMethod::BIDI_STREAMING, - new ::grpc::internal::BidiStreamingHandler( - [](Service* service, - ::grpc::ServerContext* nativeServerCtx, - ::grpc::ServerReaderWriter* - nativeServerStream) { - GRPCServerContext ctx{nativeServerCtx}; - GRPCServerStream stream{nativeServerStream}; - - return service->_unauthenticatedCommandStreamHandler(ctx, stream); - }, - this))); + "/mongodb.CommandService/UnauthenticatedCommandStream"; + + // Client-provided metadata keys. + static constexpr StringData kAuthenticationTokenKey = "authorization"_sd; + static constexpr StringData kClientIdKey = "mongodb-clientid"_sd; + static constexpr StringData kClientMetadataKey = "mongodb-client"_sd; + static constexpr StringData kWireVersionKey = "mongodb-wireversion"_sd; + + // Server-provided metadata keys. + // This is defined as a std::string instead of StringData to avoid having to copy it when + // passing to gRPC APIs that expect a const std::string&. + static const std::string kClusterMaxWireVersionKey; + + /** + * The provided callback is used to handle streams created from both methods. The status + * returned from the callback will be communicated to the client. The callback MUST terminate + * the session before returning. + * + * The session's termination status will be converted to the closest matching gRPC status and + * returned to the client once the handler exits. This conversion is lossy though, so it is + * better to communicate errors to the client by writing messages to the stream rather than by + * setting a termination status. + */ + CommandService(GRPCTransportLayer* tl, + RPCHandler callback, + std::shared_ptr wvProvider); + + ~CommandService() = default; + + StringData name() const override { + return "mongodb.CommandService"_sd; } - ~Service() = default; + void shutdown() override; private: - RpcHandler _unauthenticatedCommandStreamHandler; - RpcHandler _authenticatedCommandStreamHandler; + ::grpc::Status _handleStream(ServerContext& serverCtx, ServerStream& stream); + + ::grpc::Status _handleAuthenticatedStream(ServerContext& serverCtx, ServerStream& stream); + + GRPCTransportLayer* _tl; + RPCHandler _callback; + std::shared_ptr _wvProvider; + std::unique_ptr _clientCache; + + mutable stdx::mutex _mutex; // NOLINT + stdx::condition_variable _shutdownCV; + std::list _sessions; + bool _shutdown = false; }; } // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/service_test.cpp b/src/mongo/transport/grpc/service_test.cpp index d7656388360e3..ec5768d96f280 100644 --- a/src/mongo/transport/grpc/service_test.cpp +++ b/src/mongo/transport/grpc/service_test.cpp @@ -27,367 +27,631 @@ * it in the license file. */ +#include #include #include +#include #include #include -#include #include #include +#include +#include +#include "mongo/db/wire_version.h" +#include "mongo/logv2/constants.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" #include "mongo/rpc/message.h" #include "mongo/stdx/thread.h" +#include "mongo/transport/grpc/grpc_server_context.h" #include "mongo/transport/grpc/metadata.h" #include "mongo/transport/grpc/service.h" #include "mongo/transport/grpc/test_fixtures.h" +#include "mongo/transport/grpc/wire_version_provider.h" #include "mongo/unittest/assert.h" +#include "mongo/unittest/log_test.h" #include "mongo/unittest/thread_assertion_monitor.h" #include "mongo/unittest/unittest.h" +#include "mongo/util/concurrency/notification.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest namespace mongo::transport::grpc { +namespace { -class TestStub { +class CommandServiceTest : public unittest::Test { public: - using ReadMessageType = SharedBuffer; - using WriteMessageType = ConstSharedBuffer; - using ClientStream = ::grpc::ClientReaderWriter; - - TestStub(const std::shared_ptr<::grpc::ChannelInterface> channel) - : _channel(channel), - _unauthenticatedCommandStreamMethod(Service::kUnauthenticatedCommandStreamMethodName, - ::grpc::internal::RpcMethod::BIDI_STREAMING, - channel), - _authenticatedCommandStreamMethod(Service::kAuthenticatedCommandStreamMethodName, - ::grpc::internal::RpcMethod::BIDI_STREAMING, - channel) {} - ~TestStub() = default; - - ClientStream* authenticatedCommandStream(::grpc::ClientContext* context) { - return ::grpc::internal::ClientReaderWriterFactory:: - Create(_channel.get(), _authenticatedCommandStreamMethod, context); - } + using ClientContextType = ::grpc::ClientContext; + using StubType = CommandServiceTestFixtures::Stub; + using StreamFactoryType = + std::function(ClientContextType&, StubType&)>; - ClientStream* unauthenticatedCommandStream(::grpc::ClientContext* context) { - return ::grpc::internal::ClientReaderWriterFactory:: - Create(_channel.get(), _unauthenticatedCommandStreamMethod, context); - } + /** + * Provides the client-side implementation for a gRPC client. `runTestWithBothMethods` will + * invoke this callback and provide it with its arguments. + */ + using ClientCallbackType = std::function; /** - * Executes both RPCs defined in the service sequentially and passes the resultant stream to the - * body closure each time. The makeCtx closure will be invoked once before each RPC invocation, - * and the resultant context will then be used for the RPC. + * Runs a test twice: once for each method provided by CommandService. + * + * On each run of the test, this creates a new CommandService instance that uses the provided + * handler, starts a new server instance, and then spawns a client thread that constructs a stub + * towards the server and runs `callback`. The client may use the provided factory to make new + * streams based on the method (e.g., unauthenticated) being tested. */ - void executeBoth(std::function makeCtx, - std::function body) { - ::grpc::ClientContext unauthenticatedCtx; - makeCtx(unauthenticatedCtx); - auto unauthenticatedStream = unauthenticatedCommandStream(&unauthenticatedCtx); - body(unauthenticatedCtx, *unauthenticatedStream); - - ::grpc::ClientContext authenticatedCtx; - makeCtx(authenticatedCtx); - auto authenticatedStream = unauthenticatedCommandStream(&authenticatedCtx); - body(authenticatedCtx, *authenticatedStream); + void runTestWithBothMethods(CommandService::RPCHandler serverStreamHandler, + ClientCallbackType callback) { + StreamFactoryType unauthCmdStreamFactory = [](auto& ctx, auto& stub) { + return stub.unauthenticatedCommandStream(&ctx); + }; + StreamFactoryType authCmdStreamFactory = [](auto& ctx, auto& stub) { + return stub.authenticatedCommandStream(&ctx); + }; + + for (auto& factory : {unauthCmdStreamFactory, authCmdStreamFactory}) { + CommandServiceTestFixtures::runWithServer( + serverStreamHandler, + [&](Server& server, unittest::ThreadAssertionMonitor& monitor) { + callback(server, CommandServiceTestFixtures::makeStub(), factory, monitor); + }); + } } -private: - std::shared_ptr<::grpc::ChannelInterface> _channel; - const ::grpc::internal::RpcMethod _unauthenticatedCommandStreamMethod; - const ::grpc::internal::RpcMethod _authenticatedCommandStreamMethod; -}; + using ContextInitializerType = std::function; -class ServiceTest : public unittest::Test { -public: - static constexpr auto kServerAddress = "localhost:50051"; - - TestStub makeStub() { - LOGV2(7401202, - "gRPC client is attempting to connect to the server", - "address"_attr = kServerAddress); - return TestStub{ - ::grpc::CreateChannel(kServerAddress, ::grpc::InsecureChannelCredentials())}; + /** + * Fixture used to test the error codes returned to the client as the result of metadata + * validation. Each test provides its own logic for initializing the client context, and + * compares the returned gRPC status code against an expected value. + */ + void runMetadataValidationTest(::grpc::StatusCode expectedStatusCode, + ContextInitializerType initContext) { + runTestWithBothMethods( + [](auto) {}, + [&](auto&, auto stub, auto streamFactory, auto&) { + ::grpc::ClientContext ctx; + initContext(ctx); + auto stream = streamFactory(ctx, stub); + ASSERT_EQ(stream->Finish().error_code(), expectedStatusCode); + + // The server should always respond with the cluster's max wire version, regardless + // of whether metadata validation failed. The one exception is for authentication + // failures. + auto serverMetadata = ctx.GetServerInitialMetadata(); + auto it = serverMetadata.find(CommandService::kClusterMaxWireVersionKey); + ASSERT_NE(it, serverMetadata.end()); + ASSERT_EQ(it->second, + std::to_string(wireVersionProvider().getClusterMaxWireVersion())); + }); } /** - * Starts up a gRPC server with a Service registered that uses the provided handler for both RPC - * methods. Executes the clientThreadBody in a separate thread and then waits for it to exit - * before shutting down the server. + * Verifies the number and the severity of logs, concerning client-metadata, that are emitted in + * response to accepting gRPC streams on the server side. */ - void runTest(Service::RpcHandler handler, - std::function clientThreadBody) { - unittest::threadAssertionMonitoredTest([&](unittest::ThreadAssertionMonitor& monitor) { - Service service{handler, handler}; - - ::grpc::ServerBuilder builder; - builder.AddListeningPort(kServerAddress, ::grpc::InsecureServerCredentials()) - .RegisterService(&service); - std::unique_ptr<::grpc::Server> server(builder.BuildAndStart()); - - auto serverThread = monitor.spawn([&] { - LOGV2(7401201, - "gRPC server is listening for connections", - "address"_attr = kServerAddress); - server->Wait(); + void runMetadataLogTest(ContextInitializerType initContext, + size_t nStreamsToCreate, + size_t nExpectedLogLines, + logv2::LogSeverity expectedSeverity) { + stdx::unordered_set clientMetadataLogIds = {7401301, 7401302, 7401303}; + runTestWithBothMethods( + [](auto) {}, + [&](auto&, auto stub, auto streamFactory, auto&) { + // Temporarily maximize verbosity for networking logs. + unittest::MinimumLoggedSeverityGuard severityGuard{ + logv2::LogComponent::kNetwork, + logv2::LogSeverity::Debug(logv2::LogSeverity::kMaxDebugLevel)}; + + startCapturingLogMessages(); + for (size_t i = 0; i < nStreamsToCreate; i++) { + ::grpc::ClientContext ctx; + initContext(ctx); + auto stream = streamFactory(ctx, stub); + ASSERT_EQ(stream->Finish().error_code(), ::grpc::OK); + } + stopCapturingLogMessages(); + + auto logLines = getCapturedBSONFormatLogMessages(); + auto observed = + std::count_if(logLines.cbegin(), logLines.cend(), [&](const BSONObj& line) { + return line.getStringField(logv2::constants::kSeverityFieldName) == + expectedSeverity.toStringDataCompact() && + clientMetadataLogIds.contains( + line.getIntField(logv2::constants::kIdFieldName)); + }); + + ASSERT_EQ(observed, nExpectedLogLines); }); - auto clientThread = monitor.spawn([&] { clientThreadBody(monitor); }); + } - clientThread.join(); - server->Shutdown(); - serverThread.join(); + using TerminationCallbackType = std::function; + + /** + * Creates a stream against each command stream method, and then uses the provided callback to + * terminate the stream. The goal is to verify the termination status (and potentially the + * reason) that is visible to the client-side of the stream. + */ + void runTerminationTest(TerminationCallbackType terminationCallback, + ::grpc::StatusCode expectedStatus, + boost::optional expectedReason = boost::none) { + const size_t kMessageCount = 5; + std::unique_ptr> terminated; + + CommandService::RPCHandler serverHandler = [&](auto session) { + for (size_t i = 0; i < kMessageCount; i++) { + ASSERT_OK(session->sinkMessage(makeUniqueMessage())); + } + terminationCallback(*session); + terminated->set(); + ASSERT_NOT_OK(session->sinkMessage(makeUniqueMessage())); + }; + + runTestWithBothMethods(serverHandler, [&](auto&, auto stub, auto streamFactory, auto&) { + // Initialize the termination notification for this run. + terminated = std::make_unique>(); + + ::grpc::ClientContext ctx; + CommandServiceTestFixtures::addAllClientMetadata(ctx); + auto stream = streamFactory(ctx, stub); + + terminated->get(); + + // We should be able to read messages sent before the RPC was cancelled. + for (size_t i = 0; i < kMessageCount; i++) { + SharedBuffer buffer; + ASSERT_TRUE(stream->Read(&buffer)); + } + + SharedBuffer buffer; + ASSERT_FALSE(stream->Read(&buffer)); + + auto status = stream->Finish(); + ASSERT_EQ(status.error_code(), expectedStatus); + if (expectedReason) { + ASSERT_EQ(status.error_message(), *expectedReason); + } }); } + + WireVersionProvider& wireVersionProvider() const { + return *_wvProvider; + } + +private: + std::shared_ptr _wvProvider = std::make_shared(); }; -TEST_F(ServiceTest, Echo) { - auto echoHandler = [](auto& serverCtx, auto& stream) { - while (auto msg = stream.read()) { - stream.write(*msg); +TEST_F(CommandServiceTest, Echo) { + CommandService::RPCHandler echoHandler = [](auto session) { + while (true) { + try { + auto msg = uassertStatusOK(session->sourceMessage()); + ASSERT_OK(session->sinkMessage(std::move(msg))); + } catch (ExceptionFor&) { + // Continues to serve the echo commands until the stream is terminated. + return; + } } - return ::grpc::Status::OK; }; - runTest(echoHandler, [&](auto&) { - auto stub = makeStub(); - stub.executeBoth([](auto& ctx) {}, - [&](auto& ctx, TestStub::ClientStream& stream) { - auto message = makeUniqueMessage(); - ASSERT_TRUE(stream.Write(message.sharedBuffer())); - SharedBuffer readMsg; - ASSERT_TRUE(stream.Read(&readMsg)); - ASSERT_EQ_MSG(Message{readMsg}, message); - }); + runTestWithBothMethods(echoHandler, [&](auto&, auto stub, auto streamFactory, auto&) { + ::grpc::ClientContext ctx; + CommandServiceTestFixtures::addAllClientMetadata(ctx); + + auto stream = streamFactory(ctx, stub); + auto toWrite = makeUniqueMessage(); + ASSERT_TRUE(stream->Write(toWrite.sharedBuffer())); + SharedBuffer toRead; + ASSERT_TRUE(stream->Read(&toRead)) << stream->Finish().error_message(); + ASSERT_EQ_MSG(Message{toRead}, toWrite); }); } -TEST_F(ServiceTest, ClientMetadataIsAccessible) { - MetadataView clientMetadata = {{"foo", "bar"}, {"baz", "quux"}}; +TEST_F(CommandServiceTest, CancelSession) { + runTerminationTest([](auto& session) { session.cancel("some reason"); }, ::grpc::CANCELLED); +} - Service::RpcHandler metadataHandler = [&clientMetadata](auto& serverCtx, auto& stream) { - MetadataView receivedClientMetadata = serverCtx.getClientMetadata(); - for (auto& kvp : clientMetadata) { - auto it = receivedClientMetadata.find(kvp.first); - ASSERT_NE(it, receivedClientMetadata.end()); - ASSERT_EQ(it->second, kvp.second); - } - ASSERT_TRUE(stream.write(makeUniqueMessage().sharedBuffer())); - return ::grpc::Status::OK; +TEST_F(CommandServiceTest, TerminateSessionWithShutdownError) { + Status shutdownError(ErrorCodes::ShutdownInProgress, "shutdown error"); + runTerminationTest([&](auto& session) { session.terminate(shutdownError); }, + util::errorToStatusCode(shutdownError.code()), + shutdownError.reason()); +} + +TEST_F(CommandServiceTest, TerminateSessionWithCancellationError) { + Status cancellationError(ErrorCodes::CallbackCanceled, "cancelled"); + runTerminationTest([&](auto& session) { session.terminate(cancellationError); }, + util::errorToStatusCode(cancellationError.code()), + cancellationError.reason()); +} + +TEST_F(CommandServiceTest, EndSession) { + runTerminationTest([](auto& session) { session.end(); }, ::grpc::OK); +} + +TEST_F(CommandServiceTest, TooLowWireVersionIsRejected) { + ContextInitializerType initContext = [](auto& ctx) { + ctx.AddMetadata(CommandService::kWireVersionKey.toString(), "-1"); + ctx.AddMetadata(CommandService::kAuthenticationTokenKey.toString(), "my-token"); }; + runMetadataValidationTest(::grpc::StatusCode::FAILED_PRECONDITION, initContext); +} - runTest(metadataHandler, [&](auto&) { - auto stub = makeStub(); +TEST_F(CommandServiceTest, InvalidWireVersionIsRejected) { + ContextInitializerType initContext = [](auto& ctx) { + ctx.AddMetadata(CommandService::kWireVersionKey.toString(), "foo"); + ctx.AddMetadata(CommandService::kAuthenticationTokenKey.toString(), "my-token"); + }; + runMetadataValidationTest(::grpc::StatusCode::INVALID_ARGUMENT, initContext); +} - auto makeContext = [&](auto& ctx) { - for (auto kvp : clientMetadata) { - ctx.AddMetadata(std::string{kvp.first}, std::string{kvp.second}); - } - }; - auto body = [&](auto& ctx, auto& stream) { - SharedBuffer msg; - ASSERT_TRUE(stream.Read(&msg)); - }; - stub.executeBoth(makeContext, body); - }); +TEST_F(CommandServiceTest, InvalidClientIdIsRejected) { + ContextInitializerType initContext = [](auto& ctx) { + CommandServiceTestFixtures::addRequiredClientMetadata(ctx); + ctx.AddMetadata(CommandService::kClientIdKey.toString(), "not a valid UUID"); + }; + runMetadataValidationTest(::grpc::StatusCode::INVALID_ARGUMENT, initContext); +} + +TEST_F(CommandServiceTest, MissingWireVersionIsRejected) { + ContextInitializerType initContext = [](auto& ctx) { + ctx.AddMetadata(CommandService::kAuthenticationTokenKey.toString(), "my-token"); + }; + runMetadataValidationTest(::grpc::StatusCode::FAILED_PRECONDITION, initContext); } -TEST_F(ServiceTest, ServerMetadataIsAccessible) { - MetadataContainer serverMetadata = {{"foo", "bar"}, {"baz", "quux"}}; +TEST_F(CommandServiceTest, ClientMetadataDocumentIsOptional) { + ContextInitializerType initContext = [](auto& ctx) { + CommandServiceTestFixtures::addRequiredClientMetadata(ctx); + ctx.AddMetadata(CommandService::kClientIdKey.toString(), UUID::gen().toString()); + }; + runMetadataValidationTest(::grpc::StatusCode::OK, initContext); +} - Service::RpcHandler metadataHandler = [&serverMetadata](ServerContext& serverCtx, - ServerStream& stream) { - for (auto& kvp : serverMetadata) { - serverCtx.addInitialMetadataEntry(kvp.first, kvp.second); - } - ASSERT_TRUE(stream.write(makeUniqueMessage().sharedBuffer())); - return ::grpc::Status::OK; +TEST_F(CommandServiceTest, ClientIdIsOptional) { + ContextInitializerType initContext = [](auto& ctx) { + CommandServiceTestFixtures::addRequiredClientMetadata(ctx); + CommandServiceTestFixtures::addClientMetadataDocument(ctx); }; + runMetadataValidationTest(::grpc::StatusCode::OK, initContext); +} - runTest(metadataHandler, [&](auto&) { - auto stub = makeStub(); +TEST_F(CommandServiceTest, InvalidMetadataDocumentBase64Encoding) { + // The MongoDB gRPC Protocol doesn't specify how an invalid metadata document should be handled, + // and since invalid metadata doesn't affect the server's ability to execute the operation, it + // was decided the server should just continue with the command and log a warning rather than + // returning an error in such cases. + ContextInitializerType initContext = [](auto& ctx) { + CommandServiceTestFixtures::addRequiredClientMetadata(ctx); + ctx.AddMetadata(CommandService::kClientMetadataKey.toString(), "notvalidbase64:l;;?"); + }; + runMetadataValidationTest(::grpc::StatusCode::OK, initContext); +} - auto body = [&](::grpc::ClientContext& ctx, auto& stream) { - SharedBuffer msg; - ASSERT_TRUE(stream.Read(&msg)); - auto receivedServerMetadata = ctx.GetServerInitialMetadata(); +TEST_F(CommandServiceTest, InvalidMetadataDocumentBSON) { + ContextInitializerType initContext = [](auto& ctx) { + CommandServiceTestFixtures::addRequiredClientMetadata(ctx); + ctx.AddMetadata(CommandService::kClientMetadataKey.toString(), base64::encode("Not BSON")); + }; + runMetadataValidationTest(::grpc::StatusCode::OK, initContext); +} - for (auto& kvp : serverMetadata) { - auto it = receivedServerMetadata.find(kvp.first); - ASSERT_NE(it, receivedServerMetadata.end()); - ASSERT_EQ((std::string_view{it->second.data(), it->second.length()}), kvp.second); - } - }; - stub.executeBoth([](auto&) {}, body); - }); +TEST_F(CommandServiceTest, UnrecognizedReservedKey) { + ContextInitializerType initContext = [](auto& ctx) { + CommandServiceTestFixtures::addRequiredClientMetadata(ctx); + ctx.AddMetadata("mongodb-not-recognized", "some value"); + }; + runMetadataValidationTest(::grpc::StatusCode::INVALID_ARGUMENT, initContext); } -TEST_F(ServiceTest, ClientSendsMultipleMessages) { - Service::RpcHandler serverHandler = [](ServerContext& serverCtx, ServerStream& stream) { - size_t nReceived = 0; - while (auto msg = stream.read()) { - nReceived++; +TEST_F(CommandServiceTest, NewClientsAreLogged) { + ContextInitializerType initContext = [clientId = UUID::gen().toString()](auto& ctx) { + CommandServiceTestFixtures::addRequiredClientMetadata(ctx); + CommandServiceTestFixtures::addClientMetadataDocument(ctx); + ctx.AddMetadata(CommandService::kClientIdKey.toString(), clientId); + }; + runMetadataLogTest(initContext, + 5, // nStreamsToCreate + 1, // nExpectedLogLines + logv2::LogSeverity::Info()); +} + +TEST_F(CommandServiceTest, OmittedClientIdIsLogged) { + ContextInitializerType initContext = [](auto& ctx) { + CommandServiceTestFixtures::addRequiredClientMetadata(ctx); + CommandServiceTestFixtures::addClientMetadataDocument(ctx); + }; + runMetadataLogTest(initContext, + 3, // nStreamsToCreate + 3, // nExpectedLogLines + logv2::LogSeverity::Debug(2)); +} + +TEST_F(CommandServiceTest, NoLogsForMissingMetadataDocument) { + ContextInitializerType initContext = [clientId = UUID::gen().toString()](auto& ctx) { + CommandServiceTestFixtures::addRequiredClientMetadata(ctx); + ctx.AddMetadata(CommandService::kClientIdKey.toString(), clientId); + }; + runMetadataLogTest(initContext, + 7, // nStreamsToCreate + 0, // nExpectedLogLines + logv2::LogSeverity::Info()); +} + +TEST_F(CommandServiceTest, ClientSendsMultipleMessages) { + CommandService::RPCHandler serverHandler = [](auto session) { + int nReceived = 0; + try { + while (true) { + uassertStatusOK(session->sourceMessage()); + nReceived++; + } + } catch (ExceptionFor&) { + // Continue to receive client messages until the stream is terminated. } - auto response = SharedBuffer::allocate(sizeof(size_t)); - memcpy(response.get(), &nReceived, sizeof(size_t)); - ASSERT_TRUE(stream.write(response)); - return ::grpc::Status::OK; + + OpMsg response; + response.body = BSON("nReceived" << nReceived); + ASSERT_OK(session->sinkMessage(response.serialize())); }; - runTest(serverHandler, [&](auto&) { - auto stub = makeStub(); + ClientCallbackType clientCallback = [&](auto&, auto stub, auto streamFactory, auto&) { + ::grpc::ClientContext ctx; + CommandServiceTestFixtures::addAllClientMetadata(ctx); + auto stream = streamFactory(ctx, stub); - auto body = [&](::grpc::ClientContext& ctx, TestStub::ClientStream& stream) { - size_t nSent = 12; + const int kMessages = 13; + for (auto i = 0; i < kMessages; i++) { auto msg = makeUniqueMessage(); - for (size_t i = 0; i < nSent; i++) { - ASSERT_TRUE(stream.Write(msg.sharedBuffer())); - } - ASSERT_TRUE(stream.WritesDone()); + ASSERT_TRUE(stream->Write(msg.sharedBuffer())); + } + ASSERT_TRUE(stream->WritesDone()); - SharedBuffer serverResponse; - ASSERT_TRUE(stream.Read(&serverResponse)); + SharedBuffer serverResponse; + ASSERT_TRUE(stream->Read(&serverResponse)); - size_t nReceived = *reinterpret_cast(serverResponse.get()); - ASSERT_EQ(nReceived, nSent); - }; - stub.executeBoth([](auto&) {}, body); - }); + auto responseMsg = OpMsg::parse(Message{serverResponse}); + int32_t nReceived = responseMsg.body.getIntField("nReceived"); + ASSERT_EQ(nReceived, kMessages); + }; + + runTestWithBothMethods(serverHandler, clientCallback); } -TEST_F(ServiceTest, ServerSendsMultipleMessages) { - Service::RpcHandler serverHandler = [&](ServerContext& serverCtx, ServerStream& stream) { - size_t nSent = 13; - for (size_t i = 0; i < nSent - 1; i++) { +TEST_F(CommandServiceTest, ServerSendsMultipleMessages) { + CommandService::RPCHandler serverHandler = [](auto session) { + const int kMessages = 17; + for (auto i = 0; i < kMessages - 1; i++) { auto msg = makeUniqueMessage(); OpMsg::setFlag(&msg, OpMsg::kMoreToCome); - ASSERT_TRUE(stream.write(msg.sharedBuffer())); + ASSERT_OK(session->sinkMessage(msg)); } - ASSERT_TRUE(stream.write(makeUniqueMessage().sharedBuffer())); + ASSERT_OK(session->sinkMessage(makeUniqueMessage())); - auto response = stream.read(); - ASSERT_TRUE(response); - ASSERT_EQ(*reinterpret_cast(response->get()), nSent); - return ::grpc::Status::OK; + auto response = OpMsg::parse(uassertStatusOK(session->sourceMessage())); + int32_t nReceived = response.body.getIntField("nReceived"); + ASSERT_EQ(nReceived, kMessages); }; - runTest(serverHandler, [&](auto&) { - auto stub = makeStub(); + ClientCallbackType clientCallback = [](auto&, auto stub, auto streamFactory, auto&) { + ::grpc::ClientContext ctx; + CommandServiceTestFixtures::addAllClientMetadata(ctx); + auto stream = streamFactory(ctx, stub); - auto body = [&](::grpc::ClientContext&, TestStub::ClientStream& stream) { - size_t nReceived = 0; - - while (true) { - SharedBuffer buf; - ASSERT_TRUE(stream.Read(&buf)); - nReceived++; + int nReceived = 0; + while (true) { + SharedBuffer buffer; + ASSERT_TRUE(stream->Read(&buffer)); + nReceived++; - if (!OpMsg::isFlagSet(Message{buf}, OpMsg::kMoreToCome)) { - break; - } + if (!OpMsg::isFlagSet(Message{buffer}, OpMsg::kMoreToCome)) { + break; } + } - auto response = SharedBuffer::allocate(sizeof(size_t)); - memcpy(response.get(), &nReceived, sizeof(size_t)); - ASSERT_TRUE(stream.Write(response)); - }; - stub.executeBoth([](auto&) {}, body); - }); + OpMsg response; + response.body = BSON("nReceived" << nReceived); + ASSERT_TRUE(stream->Write(response.serialize().sharedBuffer())); + }; + + runTestWithBothMethods(serverHandler, clientCallback); } -TEST_F(ServiceTest, ServerHandlesMultipleClients) { - const auto kMetadataId = "client-thread"; - Service::RpcHandler serverHandler = [&](ServerContext& serverCtx, ServerStream& stream) { - auto threadIdIt = serverCtx.getClientMetadata().find(kMetadataId); - ASSERT_NE(threadIdIt, serverCtx.getClientMetadata().end()); - - LOGV2(7401203, - "ServerHandlesMultipleClients received stream request", - "thread-id"_attr = threadIdIt->second); - serverCtx.addInitialMetadataEntry(kMetadataId, std::string{threadIdIt->second}); - while (auto msg = stream.read()) { - ASSERT_TRUE(stream.write(*msg)); - } - return ::grpc::Status::OK; +TEST_F(CommandServiceTest, MissingAuthToken) { + auto makeStreamWithoutAuthToken = [&](bool useAuth) { + ::grpc::ClientContext ctx; + auto stub = CommandServiceTestFixtures::makeStub(); + ctx.AddMetadata(CommandService::kWireVersionKey.toString(), + std::to_string(wireVersionProvider().getClusterMaxWireVersion())); + auto stream = useAuth ? stub.authenticatedCommandStream(&ctx) + : stub.unauthenticatedCommandStream(&ctx); + return stream->Finish(); + }; + + CommandServiceTestFixtures::runWithServer( + [](auto) {}, + [&](auto&, auto&) { + ASSERT_TRUE(makeStreamWithoutAuthToken(false /* Don't use Auth */).ok()); + }); + + CommandServiceTestFixtures::runWithServer( + [](auto) {}, + [&](auto&, auto&) { + ASSERT_EQ(makeStreamWithoutAuthToken(true /* Use Auth */).error_code(), + ::grpc::StatusCode::UNAUTHENTICATED); + }); +} + +TEST_F(CommandServiceTest, ServerProvidesClusterMaxWireVersion) { + auto serverHandler = [](auto session) { + auto message = uassertStatusOK(session->sourceMessage()); + ASSERT_OK(session->sinkMessage(message)); }; - runTest(serverHandler, [&](unittest::ThreadAssertionMonitor& monitor) { - auto stub = makeStub(); + ClientCallbackType clientCallback = [&](auto&, auto stub, auto streamFactory, auto&) { + ::grpc::ClientContext ctx; + ctx.AddMetadata(CommandService::kAuthenticationTokenKey.toString(), "my-token"); + ctx.AddMetadata(CommandService::kWireVersionKey.toString(), + std::to_string(WireVersion::WIRE_VERSION_50)); + + auto stream = streamFactory(ctx, stub); + ASSERT_TRUE(stream->Write(makeUniqueMessage().sharedBuffer())); + + SharedBuffer buffer; + ASSERT_TRUE(stream->Read(&buffer)); + + auto serverMetadata = ctx.GetServerInitialMetadata(); + auto it = serverMetadata.find(CommandService::kClusterMaxWireVersionKey); + ASSERT_NE(it, serverMetadata.end()); + ASSERT_EQ(it->second, std::to_string(wireVersionProvider().getClusterMaxWireVersion())); + }; + runTestWithBothMethods(serverHandler, clientCallback); +} + +TEST_F(CommandServiceTest, ServerHandlesMultipleClients) { + CommandService::RPCHandler serverHandler = [](auto session) { + while (true) { + try { + auto msg = uassertStatusOK(session->sourceMessage()); + auto response = OpMsg::parseOwned(msg); + response.body = response.body.addFields( + BSON(CommandService::kClientIdKey << session->clientId()->toString())); + ASSERT_OK(session->sinkMessage(response.serialize())); + } catch (ExceptionFor&) { + // Continues to serve the echo commands until the stream is terminated. + return; + } + } + }; + + ClientCallbackType clientCallback = [](auto&, auto stub, auto streamFactory, auto& monitor) { + const auto kNumClients = 10; std::vector threads; - for (size_t i = 0; i < 10; i++) { + for (auto i = 0; i < kNumClients; i++) { threads.push_back(monitor.spawn([&, i] { - auto makeCtx = [i, &kMetadataId](::grpc::ClientContext& ctx) { - ctx.AddMetadata(kMetadataId, std::to_string(i)); - }; + const auto clientId = UUID::gen().toString(); - auto body = [&, i](::grpc::ClientContext& ctx, TestStub::ClientStream& stream) { - auto msg = makeUniqueMessage(); - ASSERT_TRUE(stream.Write(msg.sharedBuffer())); + ::grpc::ClientContext ctx; + CommandServiceTestFixtures::addRequiredClientMetadata(ctx); + ctx.AddMetadata(std::string{CommandService::kClientIdKey}, clientId); + CommandServiceTestFixtures::addClientMetadataDocument(ctx); - SharedBuffer receivedMsg; - ASSERT_TRUE(stream.Read(&receivedMsg)); - ASSERT_EQ_MSG(Message{receivedMsg}, msg); + auto stream = streamFactory(ctx, stub); - auto serverMetadata = ctx.GetServerInitialMetadata(); - auto threadIdIt = serverMetadata.find(kMetadataId); - ASSERT_NE(threadIdIt, serverMetadata.end()); - ASSERT_EQ(threadIdIt->second, std::to_string(i)); - }; + OpMsg msg; + msg.body = BSON("thread" << i); + ASSERT_TRUE(stream->Write(msg.serialize().sharedBuffer())); - stub.executeBoth(makeCtx, body); + SharedBuffer receivedMsg; + ASSERT_TRUE(stream->Read(&receivedMsg)); + + auto response = OpMsg::parse(Message{receivedMsg}); + ASSERT_EQ(response.body.getIntField("thread"), i); + ASSERT_EQ(response.body.getStringField(CommandService::kClientIdKey), clientId); })); } for (auto& t : threads) { t.join(); } - }); + }; + + runTestWithBothMethods(serverHandler, clientCallback); } -class GRPCInteropTest : public unittest::Test {}; +TEST_F(CommandServiceTest, ServerHandlerThrows) { + CommandService::RPCHandler serverHandler = [](auto) { + iasserted(Status{ErrorCodes::StreamTerminated, "test error"}); + }; -TEST_F(GRPCInteropTest, SharedBufferDeserialize) { - std::string_view expected{"foobar"}; + ClientCallbackType clientCallback = [](auto&, auto stub, auto streamFactory, auto&) { + ::grpc::ClientContext ctx; + CommandServiceTestFixtures::addRequiredClientMetadata(ctx); + auto stream = streamFactory(ctx, stub); + ASSERT_NE(stream->Finish().error_code(), ::grpc::StatusCode::OK); + }; - auto deserializationTest = [&](std::vector<::grpc::Slice> slices) { - ::grpc::ByteBuffer buffer(&slices[0], slices.size()); + runTestWithBothMethods(serverHandler, clientCallback); +} + +TEST_F(CommandServiceTest, Shutdown) { + std::unique_ptr> rpcStarted; + std::unique_ptr> rpcTerminated; + + ClientCallbackType clientCallback = [&](auto& server, auto stub, auto streamFactory, auto&) { + // Initialize the notifications for this run. + rpcStarted = std::make_unique>(); + rpcTerminated = std::make_unique>(); + + ::grpc::ClientContext ctx; + CommandServiceTestFixtures::addRequiredClientMetadata(ctx); + auto stream = streamFactory(ctx, stub); + + rpcStarted->get(); + server.shutdown(); + auto serverTerminationStatus = rpcTerminated->get(); + + SharedBuffer buffer; + ASSERT_FALSE(stream->Read(&buffer)); + ASSERT_EQ(stream->Finish().error_code(), + util::errorToStatusCode(serverTerminationStatus.code())); + }; + CommandService::RPCHandler serverHandler = [&](auto session) { + rpcStarted->set(); + ASSERT_NOT_OK(session->sourceMessage()); + + auto ts = session->terminationStatus(); + ASSERT_TRUE(ts); + ASSERT_NOT_OK(*ts); + rpcTerminated->set(*ts); + }; + + runTestWithBothMethods(serverHandler, clientCallback); +} + +TEST(GRPCInteropTest, SharedBufferDeserialize) { + auto deserializationTest = [](std::vector<::grpc::Slice> slices, std::string_view expected) { SharedBuffer out; + ::grpc::ByteBuffer buffer(&slices[0], slices.size()); auto status = ::grpc::SerializationTraits::Deserialize(&buffer, &out); - ASSERT_TRUE(status.ok()) << "expected deserialization to succed but got error: " + ASSERT_TRUE(status.ok()) << "expected deserialization to succeed: " << status.error_message(); ASSERT_EQ((std::string_view{out.get(), expected.length()}), expected); }; - deserializationTest({std::string{"foo"}, std::string{"bar"}}); - deserializationTest({std::string{"foobar"}}); + std::string_view expected{"foobar"}; + deserializationTest({std::string{"foobar"}}, expected); + deserializationTest({std::string{"foo"}, std::string{"bar"}}, expected); } -TEST_F(GRPCInteropTest, SerializationRoundTrip) { - ::grpc::ByteBuffer grpcBuf; +TEST(GRPCInteropTest, SerializationRoundTrip) { + ::grpc::ByteBuffer grpcBuffer; auto message = makeUniqueMessage(); { - bool own_buf; - auto newBuf = SharedBuffer::allocate(message.capacity()); - std::memcpy(newBuf.get(), message.buf(), newBuf.capacity()); - auto status = - ::grpc::SerializationTraits::Serialize(newBuf, &grpcBuf, &own_buf); + auto buffer = SharedBuffer::allocate(message.capacity()); + std::memcpy(buffer.get(), message.buf(), buffer.capacity()); + bool ownsBuffer; + auto status = ::grpc::SerializationTraits::Serialize( + buffer, &grpcBuffer, &ownsBuffer); ASSERT_TRUE(status.ok()) << "expected serialization to succeed: " << status.error_message(); } - // Even though the source buffer is out of scope, the serialized gRPC ByteBuffer should still be - // valid. - SharedBuffer outputBuffer; - auto status = ::grpc::SerializationTraits::Deserialize(&grpcBuf, &outputBuffer); + // The source buffer is out of scope, but the serialized gRPC ByteBuffer should still be valid. + SharedBuffer buffer; + auto status = ::grpc::SerializationTraits::Deserialize(&grpcBuffer, &buffer); ASSERT_TRUE(status.ok()) << "expected deserialization to succeed: " << status.error_message(); - - ASSERT_EQ_MSG(Message{outputBuffer}, message); + ASSERT_EQ_MSG(Message{buffer}, message); } -TEST_F(GRPCInteropTest, URIParsing) { +TEST(GRPCInteropTest, URIParsing) { { HostAndPort hp = GRPCServerContext::parseURI("ipv4:127.0.0.1"); ASSERT_TRUE(hp.isLocalHost()); @@ -413,4 +677,5 @@ TEST_F(GRPCInteropTest, URIParsing) { } } +} // namespace } // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/test_fixtures.h b/src/mongo/transport/grpc/test_fixtures.h index 29310ab6fef73..3f4fb06f1e2a7 100644 --- a/src/mongo/transport/grpc/test_fixtures.h +++ b/src/mongo/transport/grpc/test_fixtures.h @@ -33,8 +33,15 @@ #include #include -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" +#include +#include +#include +#include +#include + +#include "mongo/db/service_context_test_fixture.h" #include "mongo/rpc/message.h" +#include "mongo/rpc/metadata/client_metadata.h" #include "mongo/rpc/op_msg.h" #include "mongo/transport/grpc/bidirectional_pipe.h" #include "mongo/transport/grpc/metadata.h" @@ -42,8 +49,16 @@ #include "mongo/transport/grpc/mock_client_stream.h" #include "mongo/transport/grpc/mock_server_context.h" #include "mongo/transport/grpc/mock_server_stream.h" +#include "mongo/transport/grpc/mock_stub.h" +#include "mongo/transport/grpc/server.h" +#include "mongo/transport/grpc/service.h" +#include "mongo/transport/grpc/util.h" +#include "mongo/unittest/thread_assertion_monitor.h" #include "mongo/util/clock_source_mock.h" #include "mongo/util/net/hostandport.h" +#include "mongo/util/net/socket_utils.h" +#include "mongo/util/net/ssl_util.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/uuid.h" namespace mongo::transport::grpc { @@ -57,31 +72,59 @@ inline Message makeUniqueMessage() { } struct MockStreamTestFixtures { - MockStreamTestFixtures(HostAndPort hostAndPort, - Milliseconds timeout, - MetadataView clientMetadata) { - BidirectionalPipe pipe; - auto promiseAndFuture = makePromiseFuture(); - - serverStream = std::make_unique(hostAndPort, - timeout, - std::move(promiseAndFuture.promise), - std::move(*pipe.left), - clientMetadata); - serverCtx = std::make_unique(serverStream.get()); - - clientStream = std::make_unique( - hostAndPort, timeout, std::move(promiseAndFuture.future), std::move(*pipe.right)); - clientCtx = std::make_unique(clientStream.get()); + std::shared_ptr clientStream; + std::shared_ptr clientCtx; + std::unique_ptr rpc; +}; + +class MockStubTestFixtures { +public: + static constexpr auto kBindAddress = "localhost:1234"; + static constexpr auto kClientAddress = "abc:5678"; + + MockStubTestFixtures() { + MockRPCQueue::Pipe pipe; + + _channel = std::make_shared( + HostAndPort(kClientAddress), HostAndPort(kBindAddress), std::move(pipe.producer)); + _server = std::make_unique(std::move(pipe.consumer)); + } + + MockStub makeStub() { + return MockStub(_channel); + } + + std::unique_ptr makeStreamTestFixtures( + Date_t deadline, const MetadataView& clientMetadata) { + MockStreamTestFixtures fixtures{nullptr, std::make_shared(), nullptr}; + + auto clientThread = stdx::thread([&] { + fixtures.clientCtx->setDeadline(deadline); + for (auto& kvp : clientMetadata) { + fixtures.clientCtx->addMetadataEntry(kvp.first.toString(), kvp.second.toString()); + } + fixtures.clientStream = + makeStub().unauthenticatedCommandStream(fixtures.clientCtx.get()); + }); + + auto rpc = getServer().acceptRPC(); + ASSERT_TRUE(rpc); + fixtures.rpc = std::make_unique(std::move(*rpc)); + clientThread.join(); + + return std::make_unique(std::move(fixtures)); + } + + MockServer& getServer() { + return *_server; } - std::unique_ptr clientStream; - std::unique_ptr clientCtx; - std::unique_ptr serverStream; - std::unique_ptr serverCtx; +private: + std::unique_ptr _server; + std::shared_ptr _channel; }; -class ServiceContextWithClockSourceMockTest : public LockerNoopServiceContextTest { +class ServiceContextWithClockSourceMockTest : public ServiceContextTest { public: void setUp() override { _clkSource = std::make_shared(); @@ -99,4 +142,169 @@ class ServiceContextWithClockSourceMockTest : public LockerNoopServiceContextTes std::shared_ptr _clkSource; }; +class CommandServiceTestFixtures { +public: + static constexpr auto kBindAddress = "localhost"; + static constexpr auto kBindPort = 1234; + static constexpr auto kMaxThreads = 12; + static constexpr auto kServerCertificateKeyFile = "jstests/libs/server.pem"; + static constexpr auto kClientCertificateKeyFile = "jstests/libs/client.pem"; + static constexpr auto kClientSelfSignedCertificateKeyFile = + "jstests/libs/client-self-signed.pem"; + static constexpr auto kCAFile = "jstests/libs/ca.pem"; + + class Stub { + public: + using ReadMessageType = SharedBuffer; + using WriteMessageType = ConstSharedBuffer; + using ClientStream = ::grpc::ClientReaderWriter; + + struct Options { + boost::optional tlsCertificateKeyFile; + boost::optional tlsCAFile; + }; + + Stub(const std::shared_ptr<::grpc::ChannelInterface>& channel) + : _channel(channel), + _unauthenticatedCommandStreamMethod( + CommandService::kUnauthenticatedCommandStreamMethodName, + ::grpc::internal::RpcMethod::BIDI_STREAMING, + channel), + _authenticatedCommandStreamMethod( + CommandService::kAuthenticatedCommandStreamMethodName, + ::grpc::internal::RpcMethod::BIDI_STREAMING, + channel) {} + + ::grpc::Status connect() { + ::grpc::ClientContext ctx; + CommandServiceTestFixtures::addAllClientMetadata(ctx); + auto stream = unauthenticatedCommandStream(&ctx); + return stream->Finish(); + } + + std::shared_ptr authenticatedCommandStream(::grpc::ClientContext* context) { + return std::shared_ptr{ + ::grpc::internal::ClientReaderWriterFactory:: + Create(_channel.get(), _authenticatedCommandStreamMethod, context)}; + } + + std::shared_ptr unauthenticatedCommandStream(::grpc::ClientContext* context) { + return std::shared_ptr{ + ::grpc::internal::ClientReaderWriterFactory:: + Create(_channel.get(), _unauthenticatedCommandStreamMethod, context)}; + } + + private: + std::shared_ptr<::grpc::ChannelInterface> _channel; + ::grpc::internal::RpcMethod _unauthenticatedCommandStreamMethod; + ::grpc::internal::RpcMethod _authenticatedCommandStreamMethod; + }; + + static Server::Options makeServerOptions() { + Server::Options options; + options.addresses = std::vector{kBindAddress}; + options.port = kBindPort; + options.maxThreads = kMaxThreads; + options.tlsCAFile = kCAFile; + options.tlsPEMKeyFile = kServerCertificateKeyFile; + options.tlsAllowInvalidCertificates = false; + options.tlsAllowConnectionsWithoutCertificates = false; + return options; + } + + static Server makeServer(CommandService::RPCHandler handler, Server::Options options) { + std::vector> services; + services.push_back(std::make_unique( + /* GRPCTransportLayer */ nullptr, + std::move(handler), + std::make_shared())); + + return Server{std::move(services), std::move(options)}; + } + + /** + * Starts up a gRPC server with a CommandService registered that uses the provided handler for + * both RPC methods. Executes the clientThreadBody in a separate thread and then waits for it to + * exit before shutting down the server. + * + * The IngressSession passed to the provided RPC handler is automatically ended after the + * handler is returned. + */ + static void runWithServer( + CommandService::RPCHandler callback, + std::function clientThreadBody, + Server::Options options = makeServerOptions()) { + unittest::threadAssertionMonitoredTest([&](unittest::ThreadAssertionMonitor& monitor) { + auto handler = [callback](auto session) { + ON_BLOCK_EXIT([&] { session->end(); }); + callback(session); + }; + auto server = makeServer(std::move(handler), std::move(options)); + server.start(); + + auto clientThread = monitor.spawn([&] { clientThreadBody(server, monitor); }); + + clientThread.join(); + if (server.isRunning()) { + server.shutdown(); + } + }); + } + + static Stub makeStub(boost::optional options = boost::none) { + return makeStub("localhost:{}"_format(kBindPort), options); + } + + static Stub makeStub(StringData address, boost::optional options = boost::none) { + if (!options) { + options.emplace(); + options->tlsCAFile = kCAFile; + options->tlsCertificateKeyFile = kClientCertificateKeyFile; + } + + ::grpc::SslCredentialsOptions sslOps; + if (options->tlsCertificateKeyFile) { + auto certKeyPair = util::parsePEMKeyFile(*options->tlsCertificateKeyFile); + sslOps.pem_cert_chain = std::move(certKeyPair.cert_chain); + sslOps.pem_private_key = std::move(certKeyPair.private_key); + } + if (options->tlsCAFile) { + sslOps.pem_root_certs = ssl_util::readPEMFile(options->tlsCAFile.get()).getValue(); + } + auto credentials = ::grpc::SslCredentials(sslOps); + + return Stub{::grpc::CreateChannel(address.toString(), credentials)}; + } + + /** + * Sets the metadata entries necessary to ensure any CommandStream RPC can succeed. This may set + * a superset of the required metadata for any individual RPC. + */ + static void addRequiredClientMetadata(::grpc::ClientContext& ctx) { + ctx.AddMetadata( + CommandService::kWireVersionKey.toString(), + std::to_string(WireSpec::instance().get()->incomingExternalClient.maxWireVersion)); + ctx.AddMetadata(CommandService::kAuthenticationTokenKey.toString(), "my-token"); + } + + static void addClientMetadataDocument(::grpc::ClientContext& ctx) { + static constexpr auto kDriverName = "myDriver"; + static constexpr auto kDriverVersion = "0.1.2"; + static constexpr auto kAppName = "MyAppName"; + + BSONObjBuilder bob; + ASSERT_OK(ClientMetadata::serialize(kDriverName, kDriverVersion, kAppName, &bob)); + auto metadataDoc = bob.obj(); + auto clientDoc = metadataDoc.getObjectField(kMetadataDocumentName); + ctx.AddMetadata(CommandService::kClientMetadataKey.toString(), + base64::encode(clientDoc.objdata(), clientDoc.objsize())); + } + + static void addAllClientMetadata(::grpc::ClientContext& ctx) { + addRequiredClientMetadata(ctx); + ctx.AddMetadata(CommandService::kClientIdKey.toString(), UUID::gen().toString()); + addClientMetadataDocument(ctx); + } +}; + } // namespace mongo::transport::grpc diff --git a/src/mongo/transport/grpc/util.cpp b/src/mongo/transport/grpc/util.cpp new file mode 100644 index 0000000000000..aaca6a794593f --- /dev/null +++ b/src/mongo/transport/grpc/util.cpp @@ -0,0 +1,101 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#include + +#include "mongo/transport/grpc/util.h" + +#include "mongo/util/assert_util.h" +#include "mongo/util/net/ssl_util.h" +#include "mongo/util/testing_proctor.h" + +using namespace fmt::literals; + +namespace mongo::transport::grpc::util { + +::grpc::SslServerCredentialsOptions::PemKeyCertPair parsePEMKeyFile(StringData filePath) { + + ::grpc::SslServerCredentialsOptions::PemKeyCertPair certPair; + + auto certificateKeyFileContents = uassertStatusOK(ssl_util::readPEMFile(filePath)); + certPair.cert_chain = certificateKeyFileContents; + certPair.private_key = certificateKeyFileContents; + + return certPair; +} + +ErrorCodes::Error statusToErrorCode(::grpc::StatusCode statusCode) { + switch (statusCode) { + case ::grpc::OK: + return ErrorCodes::OK; + case ::grpc::UNAUTHENTICATED: + return ErrorCodes::AuthenticationFailed; + case ::grpc::CANCELLED: + return ErrorCodes::CallbackCanceled; + case ::grpc::INVALID_ARGUMENT: + return ErrorCodes::BadValue; + case ::grpc::DEADLINE_EXCEEDED: + return ErrorCodes::ExceededTimeLimit; + case ::grpc::FAILED_PRECONDITION: + return ErrorCodes::RPCProtocolNegotiationFailed; + case ::grpc::UNIMPLEMENTED: + return ErrorCodes::NotImplemented; + case ::grpc::INTERNAL: + return ErrorCodes::InternalError; + case ::grpc::UNAVAILABLE: + return ErrorCodes::HostUnreachable; + case ::grpc::PERMISSION_DENIED: + return ErrorCodes::Unauthorized; + case ::grpc::RESOURCE_EXHAUSTED: + return ErrorCodes::ResourceExhausted; + default: + return ErrorCodes::UnknownError; + } +} + +::grpc::StatusCode errorToStatusCode(ErrorCodes::Error errorCode) { + switch (errorCode) { + case ErrorCodes::OK: + return ::grpc::OK; + case ErrorCodes::UnknownError: + return ::grpc::UNKNOWN; + case ErrorCodes::InterruptedAtShutdown: + case ErrorCodes::ShutdownInProgress: + return ::grpc::UNAVAILABLE; + case ErrorCodes::CallbackCanceled: + case ErrorCodes::ClientMarkedKilled: + return ::grpc::CANCELLED; + default: + invariant(TestingProctor::instance().isEnabled(), + "No known conversion for MongoDB error code: "_format(errorCode)); + return ::grpc::UNKNOWN; + } +} + +} // namespace mongo::transport::grpc::util diff --git a/src/mongo/transport/grpc/util.h b/src/mongo/transport/grpc/util.h new file mode 100644 index 0000000000000..539152bc48ffe --- /dev/null +++ b/src/mongo/transport/grpc/util.h @@ -0,0 +1,75 @@ +/** + * Copyright (C) 2023-present MongoDB, Inc. + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the Server Side Public License, version 1, + * as published by MongoDB, Inc. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * Server Side Public License for more details. + * + * You should have received a copy of the Server Side Public License + * along with this program. If not, see + * . + * + * As a special exception, the copyright holders give permission to link the + * code of portions of this program with the OpenSSL library under certain + * conditions as described in each individual source file and distribute + * linked combinations including the program with the OpenSSL library. You + * must comply with the Server Side Public License in all respects for + * all of the code used other than as permitted herein. If you modify file(s) + * with this exception, you may extend this exception to your version of the + * file(s), but you are not obligated to do so. If you do not wish to do so, + * delete this exception statement from your version. If you delete this + * exception statement from all source files in the program, then also delete + * it in the license file. + */ + +#pragma once + +#include + +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" + +namespace mongo::transport::grpc::util { + +/** + * Parse a PEM-encoded file that contains a single certificate and its associated private key + * into a PemKeyCertPair. + */ +::grpc::SslServerCredentialsOptions::PemKeyCertPair parsePEMKeyFile(StringData filePath); + +/** + * Converts a gRPC status code into its corresponding MongoDB error code. + */ +ErrorCodes::Error statusToErrorCode(::grpc::StatusCode statusCode); + +/** + * Converts a MongoDB error code into its corresponding gRPC status code. + * Note that the mapping between gRPC status codes and MongoDB errors codes is not 1 to 1, so the + * following does not have to evaluate to true: + * `errorToStatusCode(statusToErrorCode(sc)) == sc` + */ +::grpc::StatusCode errorToStatusCode(ErrorCodes::Error errorCode); + +/** + * Converts a MongoDB status to its gRPC counterpart, and vice versa. + * Prefer using this over direct invocations of `errorToStatusCode` and `statusToErrorCode`. + */ +template +inline auto convertStatus(StatusType status) { + if constexpr (std::is_same::value) { + return ::grpc::Status(errorToStatusCode(status.code()), status.reason()); + } else { + static_assert(std::is_same::value == true); + return Status(statusToErrorCode(status.error_code()), status.error_message()); + } +} + +} // namespace mongo::transport::grpc::util diff --git a/src/mongo/transport/hello_metrics.cpp b/src/mongo/transport/hello_metrics.cpp index 786ed73bd99dd..925abf25aeba7 100644 --- a/src/mongo/transport/hello_metrics.cpp +++ b/src/mongo/transport/hello_metrics.cpp @@ -28,6 +28,10 @@ */ #include "mongo/transport/hello_metrics.h" +#include + +#include "mongo/util/decorable.h" + namespace mongo { namespace { const auto HelloMetricsDecoration = ServiceContext::declareDecoration(); diff --git a/src/mongo/transport/hello_metrics.h b/src/mongo/transport/hello_metrics.h index 6d6acf49e2b90..57e36e6225381 100644 --- a/src/mongo/transport/hello_metrics.h +++ b/src/mongo/transport/hello_metrics.h @@ -29,9 +29,13 @@ #pragma once +#include + +#include "mongo/base/string_data.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" +#include "mongo/transport/session.h" namespace mongo { diff --git a/src/mongo/transport/ingress_handshake_metrics.cpp b/src/mongo/transport/ingress_handshake_metrics.cpp index 891deac89f697..954af23f74fff 100644 --- a/src/mongo/transport/ingress_handshake_metrics.cpp +++ b/src/mongo/transport/ingress_handshake_metrics.cpp @@ -28,10 +28,25 @@ */ #include "mongo/transport/ingress_handshake_metrics.h" +#include +#include +#include + +#include +#include + +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" #include "mongo/db/commands/server_status_metric.h" #include "mongo/db/connection_health_metrics_parameter_gen.h" -#include "mongo/db/server_feature_flags_gen.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/compiler.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" #include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork @@ -41,17 +56,11 @@ namespace { const auto getIngressHandshakeMetricsDecoration = Session::declareDecoration(); -bool connHealthMetricsEnabled() { - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - return gFeatureFlagConnHealthMetrics.isEnabledAndIgnoreFCVUnsafe(); -} - bool connHealthMetricsLoggingEnabled() { return gEnableDetailedConnectionHealthMetricLogLines; } -CounterMetric totalTimeToFirstNonAuthCommandMillis("network.totalTimeToFirstNonAuthCommandMillis", - connHealthMetricsEnabled); +CounterMetric totalTimeToFirstNonAuthCommandMillis("network.totalTimeToFirstNonAuthCommandMillis"); } // namespace IngressHandshakeMetrics& IngressHandshakeMetrics::get(Session& session) { @@ -59,9 +68,6 @@ IngressHandshakeMetrics& IngressHandshakeMetrics::get(Session& session) { } void IngressHandshakeMetrics::onSessionStarted(TickSource* tickSource) { - if (!connHealthMetricsEnabled()) - return; - invariant(!_tickSource); invariant(!_sessionStartedTicks); @@ -70,7 +76,7 @@ void IngressHandshakeMetrics::onSessionStarted(TickSource* tickSource) { } void IngressHandshakeMetrics::onCommandReceived(const Command* command) { - if (MONGO_likely(_firstNonAuthCommandTicks || !connHealthMetricsEnabled())) + if (MONGO_likely(_firstNonAuthCommandTicks)) return; invariant(_sessionStartedTicks); @@ -105,7 +111,7 @@ void IngressHandshakeMetrics::onCommandReceived(const Command* command) { void IngressHandshakeMetrics::onCommandProcessed(const Command* command, rpc::ReplyBuilderInterface* response) { - if (MONGO_likely(_firstNonAuthCommandTicks || _helloSucceeded || !connHealthMetricsEnabled())) + if (MONGO_likely(_firstNonAuthCommandTicks || _helloSucceeded)) return; if (command->handshakeRole() != Command::HandshakeRole::kHello) @@ -123,8 +129,7 @@ void IngressHandshakeMetrics::onCommandProcessed(const Command* command, void IngressHandshakeMetrics::onResponseSent(Milliseconds processingDuration, Milliseconds sendingDuration) { - if (MONGO_likely(_helloSucceeded || !_helloReceivedTime) || !connHealthMetricsEnabled() || - !connHealthMetricsLoggingEnabled()) + if (MONGO_likely(_helloSucceeded || !_helloReceivedTime) || !connHealthMetricsLoggingEnabled()) return; LOGV2(6724100, diff --git a/src/mongo/transport/ingress_handshake_metrics.h b/src/mongo/transport/ingress_handshake_metrics.h index 74ea3a036cab1..bf4748e375e4d 100644 --- a/src/mongo/transport/ingress_handshake_metrics.h +++ b/src/mongo/transport/ingress_handshake_metrics.h @@ -29,12 +29,18 @@ #pragma once +#include #include +#include #include "mongo/db/commands.h" +#include "mongo/db/operation_context.h" +#include "mongo/rpc/op_msg.h" +#include "mongo/rpc/reply_builder_interface.h" #include "mongo/transport/session.h" #include "mongo/util/duration.h" #include "mongo/util/tick_source.h" +#include "mongo/util/time_support.h" namespace mongo::transport { diff --git a/src/mongo/transport/max_conns_override_test.cpp b/src/mongo/transport/max_conns_override_test.cpp index 63ce06383575a..9e04fe723fe57 100644 --- a/src/mongo/transport/max_conns_override_test.cpp +++ b/src/mongo/transport/max_conns_override_test.cpp @@ -27,11 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include "mongo/base/string_data.h" #include "mongo/transport/mock_session.h" #include "mongo/transport/service_entry_point_impl.h" -#include "mongo/unittest/unittest.h" +#include "mongo/transport/session.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/net/cidr.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/sockaddr.h" namespace mongo { namespace { diff --git a/src/mongo/transport/message_compressor_manager.cpp b/src/mongo/transport/message_compressor_manager.cpp index e3a7f431b2072..1033e40e7c5c8 100644 --- a/src/mongo/transport/message_compressor_manager.cpp +++ b/src/mongo/transport/message_compressor_manager.cpp @@ -28,18 +28,32 @@ */ -#include "mongo/platform/basic.h" +#include +#include +#include +#include +#include +#include -#include "mongo/transport/message_compressor_manager.h" +#include +#include "mongo/base/data_range.h" #include "mongo/base/data_range_cursor.h" #include "mongo/base/data_type_endian.h" +#include "mongo/base/error_codes.h" +#include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/rpc/message.h" +#include "mongo/transport/message_compressor_manager.h" #include "mongo/transport/message_compressor_registry.h" #include "mongo/transport/session.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/decorable.h" +#include "mongo/util/shared_buffer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/transport/message_compressor_manager.h b/src/mongo/transport/message_compressor_manager.h index 42fdb16bc5a8f..7aadc7024076d 100644 --- a/src/mongo/transport/message_compressor_manager.h +++ b/src/mongo/transport/message_compressor_manager.h @@ -29,17 +29,24 @@ #pragma once +#include +#include +#include + #include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/rpc/message.h" #include "mongo/transport/message_compressor_base.h" #include "mongo/transport/session.h" -#include - namespace mongo { class BSONObj; class BSONObjBuilder; class Message; + class MessageCompressorRegistry; class MessageCompressorManager { @@ -62,14 +69,14 @@ class MessageCompressorManager { MessageCompressorManager& operator=(MessageCompressorManager&&) = default; /* - * Called by a client constructing an isMaster request. This function will append the result + * Called by a client constructing a "hello" request. This function will append the result * of _registry->getCompressorNames() to the BSONObjBuilder as a BSON array. If no compressors * are configured, it won't append anything. */ void clientBegin(BSONObjBuilder* output); /* - * Called by a client that has received an isMaster response (received after calling + * Called by a client that has received a "hello" response (received after calling * clientBegin) and wants to finish negotiating compression. * * This looks for a BSON array called "compression" with the server's list of @@ -79,7 +86,7 @@ class MessageCompressorManager { void clientFinish(const BSONObj& input); /* - * Called by a server that has received an isMaster request. + * Called by a server that has received a "hello" request. * * If no compressors are configured that match those requested by the client, then it will * not append anything to the BSONObjBuilder output. diff --git a/src/mongo/transport/message_compressor_manager_test.cpp b/src/mongo/transport/message_compressor_manager_test.cpp index cbb9a7378a4e0..011abf45eab01 100644 --- a/src/mongo/transport/message_compressor_manager_test.cpp +++ b/src/mongo/transport/message_compressor_manager_test.cpp @@ -28,13 +28,28 @@ */ -#include "mongo/platform/basic.h" - +#include +#include +#include +#include +#include +#include #include #include +#include #include +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/data_range_cursor.h" +#include "mongo/base/data_type_endian.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/rpc/message.h" #include "mongo/transport/message_compressor_manager.h" #include "mongo/transport/message_compressor_noop.h" @@ -42,7 +57,11 @@ #include "mongo/transport/message_compressor_snappy.h" #include "mongo/transport/message_compressor_zlib.h" #include "mongo/transport/message_compressor_zstd.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/shared_buffer.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/transport/message_compressor_metrics.cpp b/src/mongo/transport/message_compressor_metrics.cpp index d231a106407ed..04a27fa545471 100644 --- a/src/mongo/transport/message_compressor_metrics.cpp +++ b/src/mongo/transport/message_compressor_metrics.cpp @@ -27,9 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonmisc.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/transport/message_compressor_base.h" #include "mongo/transport/message_compressor_registry.h" namespace mongo { diff --git a/src/mongo/transport/message_compressor_registry.cpp b/src/mongo/transport/message_compressor_registry.cpp index cbb77606b9200..d4b0350420b62 100644 --- a/src/mongo/transport/message_compressor_registry.cpp +++ b/src/mongo/transport/message_compressor_registry.cpp @@ -27,20 +27,26 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/transport/message_compressor_registry.h" - +#include #include #include #include - -#include "mongo/base/init.h" +#include +#include + +#include +// IWYU pragma: no_include "boost/algorithm/string/detail/classification.hpp" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/transport/message_compressor_noop.h" -#include "mongo/transport/message_compressor_snappy.h" -#include "mongo/transport/message_compressor_zlib.h" -#include "mongo/transport/message_compressor_zstd.h" -#include "mongo/util/options_parser/option_section.h" +#include "mongo/transport/message_compressor_registry.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/transport/message_compressor_registry.h b/src/mongo/transport/message_compressor_registry.h index 4e20c2dad1c85..5f3bf06ee684c 100644 --- a/src/mongo/transport/message_compressor_registry.h +++ b/src/mongo/transport/message_compressor_registry.h @@ -29,10 +29,6 @@ #pragma once -#include "mongo/base/status.h" -#include "mongo/transport/message_compressor_base.h" -#include "mongo/util/string_map.h" - #include #include #include @@ -40,6 +36,13 @@ #include #include +#include "mongo/base/error_extra_info.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/transport/message_compressor_base.h" +#include "mongo/util/string_map.h" + namespace mongo { namespace optionenvironment { diff --git a/src/mongo/transport/message_compressor_registry_test.cpp b/src/mongo/transport/message_compressor_registry_test.cpp index 84b78fe877be0..b19bc6d6b0a8f 100644 --- a/src/mongo/transport/message_compressor_registry_test.cpp +++ b/src/mongo/transport/message_compressor_registry_test.cpp @@ -27,14 +27,13 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include #include "mongo/transport/message_compressor_noop.h" #include "mongo/transport/message_compressor_registry.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" /** * Asserts that a value is null diff --git a/src/mongo/transport/message_compressor_snappy.cpp b/src/mongo/transport/message_compressor_snappy.cpp index 1d926174cf5d4..f1fb5193e0ebc 100644 --- a/src/mongo/transport/message_compressor_snappy.cpp +++ b/src/mongo/transport/message_compressor_snappy.cpp @@ -27,17 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include +#include + +#include -#include "mongo/base/data_range_cursor.h" -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" #include "mongo/transport/message_compressor_registry.h" #include "mongo/transport/message_compressor_snappy.h" -#include - namespace mongo { SnappyMessageCompressor::SnappyMessageCompressor() diff --git a/src/mongo/transport/message_compressor_snappy.h b/src/mongo/transport/message_compressor_snappy.h index 135aa49fad3e2..f23a2958ff74c 100644 --- a/src/mongo/transport/message_compressor_snappy.h +++ b/src/mongo/transport/message_compressor_snappy.h @@ -27,6 +27,10 @@ * it in the license file. */ +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/status_with.h" #include "mongo/transport/message_compressor_base.h" namespace mongo { diff --git a/src/mongo/transport/message_compressor_zlib.cpp b/src/mongo/transport/message_compressor_zlib.cpp index 878431b28244c..cfe497f8a3289 100644 --- a/src/mongo/transport/message_compressor_zlib.cpp +++ b/src/mongo/transport/message_compressor_zlib.cpp @@ -27,16 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include +#include + +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" #include "mongo/transport/message_compressor_registry.h" #include "mongo/transport/message_compressor_zlib.h" -#include - namespace mongo { ZlibMessageCompressor::ZlibMessageCompressor() : MessageCompressorBase(MessageCompressor::kZlib) {} diff --git a/src/mongo/transport/message_compressor_zlib.h b/src/mongo/transport/message_compressor_zlib.h index 2c25cac339c22..a628475d6625f 100644 --- a/src/mongo/transport/message_compressor_zlib.h +++ b/src/mongo/transport/message_compressor_zlib.h @@ -27,6 +27,10 @@ * it in the license file. */ +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/status_with.h" #include "mongo/transport/message_compressor_base.h" namespace mongo { diff --git a/src/mongo/transport/message_compressor_zstd.cpp b/src/mongo/transport/message_compressor_zstd.cpp index ddfe58c12f326..52380ab17bd2b 100644 --- a/src/mongo/transport/message_compressor_zstd.cpp +++ b/src/mongo/transport/message_compressor_zstd.cpp @@ -27,15 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include - +#include #include -#include "mongo/base/init.h" +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" +#include "mongo/base/status.h" #include "mongo/transport/message_compressor_registry.h" #include "mongo/transport/message_compressor_zstd.h" +#include "mongo/util/str.h" namespace mongo { diff --git a/src/mongo/transport/message_compressor_zstd.h b/src/mongo/transport/message_compressor_zstd.h index c393f63b2f376..2507bdf39fb2a 100644 --- a/src/mongo/transport/message_compressor_zstd.h +++ b/src/mongo/transport/message_compressor_zstd.h @@ -27,6 +27,10 @@ * it in the license file. */ +#include + +#include "mongo/base/data_range.h" +#include "mongo/base/status_with.h" #include "mongo/transport/message_compressor_base.h" namespace mongo { diff --git a/src/mongo/transport/proxy_protocol_header_parser.cpp b/src/mongo/transport/proxy_protocol_header_parser.cpp index f2b674530e6c2..016270f8cd175 100644 --- a/src/mongo/transport/proxy_protocol_header_parser.cpp +++ b/src/mongo/transport/proxy_protocol_header_parser.cpp @@ -30,10 +30,15 @@ #include "mongo/transport/proxy_protocol_header_parser.h" -#include +#include +#include #include -#include #include +#include +#include + +#include +#include #ifndef _WIN32 #include @@ -41,8 +46,8 @@ #endif #include "mongo/base/parse_number.h" +#include "mongo/base/static_assert.h" #include "mongo/base/string_data.h" -#include "mongo/logv2/log.h" #include "mongo/platform/endian.h" #include "mongo/util/assert_util.h" diff --git a/src/mongo/transport/proxy_protocol_header_parser.h b/src/mongo/transport/proxy_protocol_header_parser.h index 4ccbec882fa29..9e81586910a03 100644 --- a/src/mongo/transport/proxy_protocol_header_parser.h +++ b/src/mongo/transport/proxy_protocol_header_parser.h @@ -30,7 +30,10 @@ #pragma once #include +#include #include +#include +#include #include #ifndef _WIN32 @@ -38,6 +41,7 @@ #endif #include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/util/assert_util.h" #include "mongo/util/net/sockaddr.h" diff --git a/src/mongo/transport/proxy_protocol_header_parser_test.cpp b/src/mongo/transport/proxy_protocol_header_parser_test.cpp index b8be4aa57258d..241a58e8eacb6 100644 --- a/src/mongo/transport/proxy_protocol_header_parser_test.cpp +++ b/src/mongo/transport/proxy_protocol_header_parser_test.cpp @@ -29,10 +29,23 @@ #include "mongo/transport/proxy_protocol_header_parser.h" +#include +#include +#include +#include +#include +#include + +#ifndef _WIN32 +#include +#endif + +#include "mongo/unittest/assert.h" #include "mongo/unittest/assert_that.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/matcher.h" +#include "mongo/unittest/matcher_core.h" #include "mongo/util/assert_util.h" -#include "mongo/util/shared_buffer.h" namespace mongo::transport { namespace { diff --git a/src/mongo/transport/service_entry_point_impl.cpp b/src/mongo/transport/service_entry_point_impl.cpp index 24f2a7c04ed82..c61f390f81541 100644 --- a/src/mongo/transport/service_entry_point_impl.cpp +++ b/src/mongo/transport/service_entry_point_impl.cpp @@ -30,39 +30,52 @@ #include "mongo/transport/service_entry_point_impl.h" -#include -#include +#include +#include +#include +#include +#include #include +// IWYU pragma: no_include "cxxabi.h" +#include #include +#include #include +#include +#include #include #include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/auth/restriction_environment.h" -#include "mongo/db/server_feature_flags_gen.h" #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/unordered_map.h" -#include "mongo/stdx/variant.h" #include "mongo/transport/hello_metrics.h" #include "mongo/transport/ingress_handshake_metrics.h" -#include "mongo/transport/service_entry_point.h" #include "mongo/transport/service_entry_point_impl_gen.h" #include "mongo/transport/service_executor.h" #include "mongo/transport/service_executor_fixed.h" -#include "mongo/transport/service_executor_gen.h" #include "mongo/transport/service_executor_reserved.h" #include "mongo/transport/service_executor_synchronous.h" #include "mongo/transport/session.h" +#include "mongo/transport/session_id.h" #include "mongo/transport/session_workflow.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/duration.h" -#include "mongo/util/hierarchical_acquisition.h" #include "mongo/util/net/cidr.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/sockaddr.h" +#include "mongo/util/time_support.h" +#include "mongo/util/uuid.h" #if !defined(_WIN32) #include @@ -70,11 +83,6 @@ #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif - namespace mongo { using namespace fmt::literals; @@ -140,7 +148,7 @@ size_t getSupportedMax() { return serverGlobalParams.maxConns; #else struct rlimit limit; - verify(getrlimit(RLIMIT_NOFILE, &limit) == 0); + MONGO_verify(getrlimit(RLIMIT_NOFILE, &limit) == 0); size_t max = (size_t)(limit.rlim_cur * .8); @@ -441,11 +449,7 @@ void ServiceEntryPointImpl::appendStats(BSONObjBuilder* bob) const { appendInt("current", sessionCount); appendInt("available", _maxSessions - sessionCount); appendInt("totalCreated", sessionsCreated); - - // (Ignore FCV check): This feature flag doesn't have any upgrade/downgrade concerns. - if (gFeatureFlagConnHealthMetrics.isEnabledAndIgnoreFCVUnsafe()) { - appendInt("rejected", _rejectedSessions); - } + appendInt("rejected", _rejectedSessions); invariant(_svcCtx); appendInt("active", _svcCtx->getActiveClientOperations()); diff --git a/src/mongo/transport/service_entry_point_impl.h b/src/mongo/transport/service_entry_point_impl.h index 8e0821d0ebd73..cc602c5714beb 100644 --- a/src/mongo/transport/service_entry_point_impl.h +++ b/src/mongo/transport/service_entry_point_impl.h @@ -29,13 +29,18 @@ #pragma once +#include #include #include #include +#include #include #include "mongo/base/status.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" +#include "mongo/db/service_context.h" +#include "mongo/logv2/log_severity.h" #include "mongo/logv2/log_severity_suppressor.h" #include "mongo/stdx/variant.h" #include "mongo/transport/service_entry_point.h" diff --git a/src/mongo/transport/service_executor.cpp b/src/mongo/transport/service_executor.cpp index 49a6ef4f67cc2..569168adf8027 100644 --- a/src/mongo/transport/service_executor.cpp +++ b/src/mongo/transport/service_executor.cpp @@ -28,20 +28,24 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/transport/service_executor.h" - #include -#include -#include +#include #include +#include + #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/thread.h" #include "mongo/transport/service_entry_point.h" +#include "mongo/transport/service_executor.h" #include "mongo/transport/service_executor_fixed.h" #include "mongo/transport/service_executor_reserved.h" #include "mongo/transport/service_executor_synchronous.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/clock_source.h" +#include "mongo/util/decorable.h" #include "mongo/util/processinfo.h" #include "mongo/util/synchronized_value.h" diff --git a/src/mongo/transport/service_executor.h b/src/mongo/transport/service_executor.h index d6b53453ba644..38a6133a96591 100644 --- a/src/mongo/transport/service_executor.h +++ b/src/mongo/transport/service_executor.h @@ -29,7 +29,9 @@ #pragma once +#include #include +#include #include "mongo/base/status.h" #include "mongo/bson/bsonobjbuilder.h" @@ -41,6 +43,7 @@ #include "mongo/util/duration.h" #include "mongo/util/functional.h" #include "mongo/util/out_of_line_executor.h" +#include "mongo/util/time_support.h" namespace mongo::transport { diff --git a/src/mongo/transport/service_executor_bm.cpp b/src/mongo/transport/service_executor_bm.cpp index 9fd762d9e231a..255d0d49fa5b5 100644 --- a/src/mongo/transport/service_executor_bm.cpp +++ b/src/mongo/transport/service_executor_bm.cpp @@ -28,13 +28,20 @@ */ #include - -#include "mongo/db/concurrency/locker_noop_client_observer.h" -#include "mongo/logv2/log.h" +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include + +#include "mongo/base/status.h" +#include "mongo/db/service_context.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/transport/service_executor.h" #include "mongo/transport/service_executor_synchronous.h" #include "mongo/unittest/barrier.h" +#include "mongo/util/duration.h" #include "mongo/util/processinfo.h" -#include "mongo/util/scopeguard.h" #define MONGO_LOGV2_DEFAULT_COMPONENT mongo::logv2::LogComponent::kTest @@ -51,7 +58,7 @@ const auto kMaxChainSize = 1; #else /** 2x to benchmark the case of more threads than cores for curiosity's sake. */ const auto kMaxThreads = 2 * ProcessInfo::getNumCores(); -const auto kMaxChainSize = 2 << 10; +const auto kMaxChainSize = 64; #endif struct Notification { @@ -74,10 +81,9 @@ struct Notification { class ServiceExecutorSynchronousBm : public benchmark::Fixture { public: void firstSetup() { - auto usc = ServiceContext::make(); - sc = usc.get(); - usc->registerClientObserver(std::make_unique()); - setGlobalServiceContext(std::move(usc)); + auto service = ServiceContext::make(); + sc = service.get(); + setGlobalServiceContext(std::move(service)); (void)executor()->start(); } @@ -119,7 +125,6 @@ BENCHMARK_DEFINE_F(ServiceExecutorSynchronousBm, ScheduleTask)(benchmark::State& runOnExec(&*runner, [](Status) {}); } } -BENCHMARK_REGISTER_F(ServiceExecutorSynchronousBm, ScheduleTask)->ThreadRange(1, kMaxThreads); /** A simplified ChainedSchedule with only one task. */ BENCHMARK_DEFINE_F(ServiceExecutorSynchronousBm, ScheduleAndWait)(benchmark::State& state) { @@ -130,7 +135,6 @@ BENCHMARK_DEFINE_F(ServiceExecutorSynchronousBm, ScheduleAndWait)(benchmark::Sta done.get(); } } -BENCHMARK_REGISTER_F(ServiceExecutorSynchronousBm, ScheduleAndWait)->ThreadRange(1, kMaxThreads); BENCHMARK_DEFINE_F(ServiceExecutorSynchronousBm, ChainedSchedule)(benchmark::State& state) { int chainDepth = state.range(0); @@ -170,9 +174,17 @@ BENCHMARK_DEFINE_F(ServiceExecutorSynchronousBm, ChainedSchedule)(benchmark::Sta loopState.done.get(); } } + +#if !__has_feature(address_sanitizer) && !__has_feature(thread_sanitizer) +BENCHMARK_REGISTER_F(ServiceExecutorSynchronousBm, ScheduleTask)->ThreadRange(1, kMaxThreads); +BENCHMARK_REGISTER_F(ServiceExecutorSynchronousBm, ScheduleAndWait)->ThreadRange(1, kMaxThreads); BENCHMARK_REGISTER_F(ServiceExecutorSynchronousBm, ChainedSchedule) ->Range(1, kMaxChainSize) ->ThreadRange(1, kMaxThreads); +#else +BENCHMARK_REGISTER_F(ServiceExecutorSynchronousBm, ScheduleTask)->ThreadRange(1, 1); +#endif + } // namespace } // namespace mongo::transport diff --git a/src/mongo/transport/service_executor_fixed.cpp b/src/mongo/transport/service_executor_fixed.cpp index 36f067e2729bb..1e209133a6087 100644 --- a/src/mongo/transport/service_executor_fixed.cpp +++ b/src/mongo/transport/service_executor_fixed.cpp @@ -30,15 +30,29 @@ #include "mongo/transport/service_executor_fixed.h" +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include + #include "mongo/base/error_codes.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/thread.h" #include "mongo/transport/service_executor_gen.h" #include "mongo/transport/session.h" #include "mongo/transport/transport_layer.h" #include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" #include "mongo/util/fail_point.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/testing_proctor.h" -#include "mongo/util/thread_safety_context.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kExecutor diff --git a/src/mongo/transport/service_executor_fixed.h b/src/mongo/transport/service_executor_fixed.h index c254ab75fe198..959b248608b2e 100644 --- a/src/mongo/transport/service_executor_fixed.h +++ b/src/mongo/transport/service_executor_fixed.h @@ -29,10 +29,18 @@ #pragma once +#include #include +#include +#include +#include #include +#include +#include +#include #include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" @@ -40,9 +48,13 @@ #include "mongo/stdx/thread.h" #include "mongo/stdx/unordered_map.h" #include "mongo/transport/service_executor.h" +#include "mongo/transport/session.h" #include "mongo/util/concurrency/thread_pool.h" #include "mongo/util/concurrency/with_lock.h" +#include "mongo/util/duration.h" +#include "mongo/util/functional.h" #include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/hierarchical_acquisition.h" namespace mongo { diff --git a/src/mongo/transport/service_executor_reserved.cpp b/src/mongo/transport/service_executor_reserved.cpp index 78c1dc5e9139b..68f10d35ed70e 100644 --- a/src/mongo/transport/service_executor_reserved.cpp +++ b/src/mongo/transport/service_executor_reserved.cpp @@ -28,16 +28,23 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/transport/service_executor_reserved.h" +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include "mongo/base/error_codes.h" +#include "mongo/base/string_data.h" #include "mongo/db/server_options.h" #include "mongo/logv2/log.h" -#include "mongo/stdx/thread.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/transport/service_executor_reserved.h" #include "mongo/transport/service_executor_utils.h" -#include "mongo/util/processinfo.h" -#include "mongo/util/thread_safety_context.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/functional.h" +#include "mongo/util/out_of_line_executor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kExecutor diff --git a/src/mongo/transport/service_executor_reserved.h b/src/mongo/transport/service_executor_reserved.h index 1c2a59510dee6..c59bf8c344e5f 100644 --- a/src/mongo/transport/service_executor_reserved.h +++ b/src/mongo/transport/service_executor_reserved.h @@ -29,14 +29,21 @@ #pragma once +#include +#include #include +#include +#include #include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/transport/service_executor.h" +#include "mongo/transport/session.h" +#include "mongo/util/duration.h" namespace mongo { namespace transport { diff --git a/src/mongo/transport/service_executor_synchronous.cpp b/src/mongo/transport/service_executor_synchronous.cpp index 0fefffa9ee779..6b3d9ff6f67b7 100644 --- a/src/mongo/transport/service_executor_synchronous.cpp +++ b/src/mongo/transport/service_executor_synchronous.cpp @@ -29,9 +29,23 @@ #include "mongo/transport/service_executor_synchronous.h" +#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/logv2/log.h" -#include "mongo/stdx/thread.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/transport/service_executor_utils.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/decorable.h" +#include "mongo/util/functional.h" +#include "mongo/util/out_of_line_executor.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kExecutor diff --git a/src/mongo/transport/service_executor_synchronous.h b/src/mongo/transport/service_executor_synchronous.h index 95acc2c134f4e..7ae56c825d0ab 100644 --- a/src/mongo/transport/service_executor_synchronous.h +++ b/src/mongo/transport/service_executor_synchronous.h @@ -29,14 +29,19 @@ #pragma once +#include #include +#include #include "mongo/base/status.h" +#include "mongo/bson/bsonobjbuilder.h" #include "mongo/db/service_context.h" #include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" #include "mongo/transport/service_executor.h" +#include "mongo/transport/session.h" +#include "mongo/util/duration.h" #include "mongo/util/hierarchical_acquisition.h" namespace mongo { diff --git a/src/mongo/transport/service_executor_test.cpp b/src/mongo/transport/service_executor_test.cpp index 2adec7d9d90c7..2959006953544 100644 --- a/src/mongo/transport/service_executor_test.cpp +++ b/src/mongo/transport/service_executor_test.cpp @@ -28,30 +28,54 @@ */ -#include -#include -#include - -#include "mongo/bson/bsonobjbuilder.h" +#include // IWYU pragma: keep +#include +#include +#include +#include +#include +#include +#include +#include + +// IWYU pragma: no_include "asio/impl/dispatch.hpp" +// IWYU pragma: no_include "asio/impl/io_context.hpp" +// IWYU pragma: no_include "asio/impl/post.hpp" +// IWYU pragma: no_include "asio/impl/system_executor.hpp" +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/string_data.h" #include "mongo/db/service_context.h" -#include "mongo/db/service_context_test_fixture.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/thread.h" #include "mongo/transport/mock_session.h" +#include "mongo/transport/service_executor.h" #include "mongo/transport/service_executor_fixed.h" -#include "mongo/transport/service_executor_gen.h" #include "mongo/transport/service_executor_synchronous.h" #include "mongo/transport/transport_layer.h" #include "mongo/transport/transport_layer_mock.h" -#include "mongo/unittest/assert_that.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/barrier.h" -#include "mongo/unittest/death_test.h" -#include "mongo/unittest/matcher.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/matcher_core.h" #include "mongo/unittest/thread_assertion_monitor.h" -#include "mongo/unittest/unittest.h" +#include "mongo/util/assert_util.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/functional.h" #include "mongo/util/future.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/out_of_line_executor.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/transport/service_executor_utils.cpp b/src/mongo/transport/service_executor_utils.cpp index 317e42615988b..db14dfa55ebaf 100644 --- a/src/mongo/transport/service_executor_utils.cpp +++ b/src/mongo/transport/service_executor_utils.cpp @@ -29,30 +29,33 @@ #include "mongo/transport/service_executor_utils.h" +#include +#include +#include #include -#include #include +#include +#include +#include +#if !defined(_WIN32) +#include +#include +#endif + +#include "mongo/base/error_codes.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_options.h" #include "mongo/stdx/thread.h" -#include "mongo/transport/service_executor.h" #include "mongo/util/assert_util.h" -#include "mongo/util/debug_util.h" -#include "mongo/util/scopeguard.h" +#include "mongo/util/errno_util.h" #include "mongo/util/thread_safety_context.h" -#if !defined(_WIN32) -#include -#include -#endif - #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kDefault - -#if !defined(__has_feature) -#define __has_feature(x) 0 -#endif - namespace mongo::transport { namespace { diff --git a/src/mongo/transport/service_executor_utils.h b/src/mongo/transport/service_executor_utils.h index 3874a6ce65301..bc9aeb6427810 100644 --- a/src/mongo/transport/service_executor_utils.h +++ b/src/mongo/transport/service_executor_utils.h @@ -29,6 +29,7 @@ #pragma once +#include "mongo/base/status.h" #include "mongo/transport/service_executor.h" #include "mongo/transport/session.h" #include "mongo/util/functional.h" diff --git a/src/mongo/transport/session.cpp b/src/mongo/transport/session.cpp index c35d49aff1cb6..15dd78e60e04a 100644 --- a/src/mongo/transport/session.cpp +++ b/src/mongo/transport/session.cpp @@ -27,17 +27,9 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include "mongo/transport/session.h" - -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/platform/atomic_word.h" -#include "mongo/transport/transport_layer.h" -#ifdef MONGO_CONFIG_SSL -#include "mongo/util/net/ssl_manager.h" -#include "mongo/util/net/ssl_types.h" -#endif namespace mongo { namespace transport { diff --git a/src/mongo/transport/session.h b/src/mongo/transport/session.h index 10b03489946a4..95cc5b98bf4b8 100644 --- a/src/mongo/transport/session.h +++ b/src/mongo/transport/session.h @@ -29,14 +29,20 @@ #pragma once +#include +#include +#include #include -#include "mongo/config.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/baton.h" #include "mongo/platform/atomic_word.h" #include "mongo/rpc/message.h" #include "mongo/transport/session_id.h" #include "mongo/util/decorable.h" +#include "mongo/util/duration.h" #include "mongo/util/future.h" #include "mongo/util/net/hostandport.h" #include "mongo/util/net/sockaddr.h" diff --git a/src/mongo/transport/session_workflow.cpp b/src/mongo/transport/session_workflow.cpp index edac8545619e7..a282bb36a2d14 100644 --- a/src/mongo/transport/session_workflow.cpp +++ b/src/mongo/transport/session_workflow.cpp @@ -28,44 +28,66 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/transport/session_workflow.h" - +#include +#include +#include +#include #include -#include +#include +#include +#include +#include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/config.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/client.h" #include "mongo/db/client_strand.h" #include "mongo/db/connection_health_metrics_parameter_gen.h" +#include "mongo/db/cursor_id.h" #include "mongo/db/dbmessage.h" +#include "mongo/db/namespace_string.h" +#include "mongo/db/operation_context.h" #include "mongo/db/query/kill_cursors_gen.h" +#include "mongo/db/server_options.h" #include "mongo/db/stats/counters.h" #include "mongo/db/traffic_recorder.h" #include "mongo/executor/split_timer.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" #include "mongo/platform/atomic_word.h" -#include "mongo/platform/mutex.h" +#include "mongo/platform/compiler.h" #include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" #include "mongo/transport/ingress_handshake_metrics.h" #include "mongo/transport/message_compressor_base.h" #include "mongo/transport/message_compressor_manager.h" #include "mongo/transport/service_entry_point.h" +#include "mongo/transport/service_executor.h" #include "mongo/transport/session.h" +#include "mongo/transport/session_workflow.h" #include "mongo/transport/transport_layer.h" #include "mongo/util/assert_util.h" +#include "mongo/util/clock_source.h" #include "mongo/util/concurrency/idle_thread_block.h" -#include "mongo/util/debug_util.h" #include "mongo/util/duration.h" #include "mongo/util/fail_point.h" +#include "mongo/util/functional.h" #include "mongo/util/future.h" -#include "mongo/util/net/socket_exception.h" -#include "mongo/util/net/ssl_manager.h" +#include "mongo/util/future_impl.h" #include "mongo/util/net/ssl_peer_info.h" #include "mongo/util/time_support.h" +#include "mongo/util/timer.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kExecutor @@ -832,7 +854,7 @@ void SessionWorkflow::Impl::_cleanupExhaustResources() { void SessionWorkflow::Impl::_cleanupSession(const Status& status) { LOGV2_DEBUG(5127900, 2, "Ending session", "error"_attr = status); - if (_work && _work->opCtx()->getKillStatus() == ErrorCodes::OK) { + if (_work && _work->opCtx() && _work->opCtx()->getKillStatus() == ErrorCodes::OK) { // Make sure we clean up and delist the operation in the case we error between creating // the opCtx and getting a response back for the work item. This is required in the case // that we need to create a new opCtx to kill existing exhaust resources. diff --git a/src/mongo/transport/session_workflow.h b/src/mongo/transport/session_workflow.h index de92acf00ce81..1e646635ff15c 100644 --- a/src/mongo/transport/session_workflow.h +++ b/src/mongo/transport/session_workflow.h @@ -31,7 +31,9 @@ #include #include +#include +#include "mongo/db/client.h" #include "mongo/db/service_context.h" #include "mongo/transport/service_executor.h" #include "mongo/transport/session.h" diff --git a/src/mongo/transport/session_workflow_bm.cpp b/src/mongo/transport/session_workflow_bm.cpp index f9a85ad1d170e..142e17c2b08c9 100644 --- a/src/mongo/transport/session_workflow_bm.cpp +++ b/src/mongo/transport/session_workflow_bm.cpp @@ -27,30 +27,51 @@ * it in the license file. */ -#include -#include - +#include #include - -#include "mongo/bson/bsonelement.h" -#include "mongo/db/concurrency/locker_noop_client_observer.h" +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "mongo/base/error_codes.h" +#include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/client.h" #include "mongo/db/dbmessage.h" #include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" #include "mongo/logv2/log_component_settings.h" #include "mongo/logv2/log_manager.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/platform/mutex.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" -#include "mongo/transport/mock_service_executor.h" -#include "mongo/transport/service_entry_point_impl.h" +#include "mongo/transport/service_entry_point.h" #include "mongo/transport/service_executor.h" #include "mongo/transport/service_executor_synchronous.h" #include "mongo/transport/session.h" #include "mongo/transport/session_workflow_test_util.h" +#include "mongo/transport/transport_layer.h" #include "mongo/transport/transport_layer_mock.h" -#include "mongo/util/assert_util_core.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" #include "mongo/util/out_of_line_executor.h" #include "mongo/util/processinfo.h" +#include "mongo/util/time_support.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kExecutor @@ -271,9 +292,6 @@ class SessionWorkflowBm : public benchmark::Fixture { setGlobalServiceContext(ServiceContext::make()); auto sc = getGlobalServiceContext(); - invariant(sc); - sc->registerClientObserver(std::make_unique()); - _coordinator = std::make_unique(sc, exhaustRounds + 1); sc->setServiceEntryPoint(_coordinator->makeServiceEntryPoint()); sc->setTransportLayer(std::make_unique()); diff --git a/src/mongo/transport/session_workflow_test.cpp b/src/mongo/transport/session_workflow_test.cpp index ad87fad7a29de..20c31279114cc 100644 --- a/src/mongo/transport/session_workflow_test.cpp +++ b/src/mongo/transport/session_workflow_test.cpp @@ -27,43 +27,61 @@ * it in the license file. */ - -#include "mongo/platform/basic.h" - -#include -#include +#include +#include +#include +#include #include #include -#include +// IWYU pragma: no_include "cxxabi.h" +#include +#include +#include +#include +#include +#include +#include #include +#include +#include #include #include -#include #include "mongo/base/checked_cast.h" +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" +#include "mongo/base/status_with.h" +#include "mongo/base/string_data.h" #include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/baton.h" #include "mongo/db/client.h" #include "mongo/db/client_strand.h" -#include "mongo/db/concurrency/locker_noop_service_context_test_fixture.h" #include "mongo/db/dbmessage.h" +#include "mongo/db/operation_context.h" #include "mongo/db/service_context.h" +#include "mongo/db/service_context_test_fixture.h" #include "mongo/logv2/log.h" -#include "mongo/platform/compiler.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/platform/atomic_word.h" #include "mongo/platform/mutex.h" +#include "mongo/rpc/message.h" #include "mongo/rpc/op_msg.h" -#include "mongo/transport/mock_session.h" +#include "mongo/stdx/condition_variable.h" #include "mongo/transport/service_entry_point.h" -#include "mongo/transport/service_entry_point_impl.h" #include "mongo/transport/service_executor.h" -#include "mongo/transport/service_executor_utils.h" #include "mongo/transport/session_workflow.h" #include "mongo/transport/session_workflow_test_util.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/util/assert_util.h" -#include "mongo/util/concurrency/notification.h" #include "mongo/util/concurrency/thread_pool.h" +#include "mongo/util/duration.h" +#include "mongo/util/functional.h" +#include "mongo/util/future.h" +#include "mongo/util/future_impl.h" +#include "mongo/util/scopeguard.h" #include "mongo/util/synchronized_value.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -205,13 +223,13 @@ class MockExpectationSlot { std::unique_ptr _cb; }; -/** Fixture that mocks interactions with a `SessionWorkflow`. */ -class SessionWorkflowTest : public LockerNoopServiceContextTest { - using Base = LockerNoopServiceContextTest; - +/** + * Fixture that mocks interactions with a `SessionWorkflow`. + */ +class SessionWorkflowTest : public ServiceContextTest { public: void setUp() override { - Base::setUp(); + ServiceContextTest::setUp(); auto sc = getServiceContext(); sc->setServiceEntryPoint(_makeServiceEntryPoint(sc)); initializeNewSession(); @@ -221,7 +239,7 @@ class SessionWorkflowTest : public LockerNoopServiceContextTest { void tearDown() override { ScopeGuard guard = [&] { - Base::tearDown(); + ServiceContextTest::tearDown(); }; // Normal shutdown is a noop outside of ASAN. invariant(sep()->shutdownAndWait(Seconds{10})); @@ -336,7 +354,7 @@ class SessionWorkflowTest : public LockerNoopServiceContextTest { std::unique_ptr _makeServiceEntryPoint(ServiceContext* sc) { auto sep = std::make_unique(sc); - sep->handleRequestCb = [=](OperationContext* opCtx, const Message& msg) { + sep->handleRequestCb = [=, this](OperationContext* opCtx, const Message& msg) { if (!gInitialUseDedicatedThread) { // Simulates an async command implemented under the borrowed // thread model. The returned future will be fulfilled while @@ -359,7 +377,7 @@ class SessionWorkflowTest : public LockerNoopServiceContextTest { } return _onMockEvent(std::tie(opCtx, msg)); }; - sep->onEndSessionCb = [=](const std::shared_ptr& session) { + sep->onEndSessionCb = [=, this](const std::shared_ptr& session) { _onMockEvent(std::tie(session)); }; sep->derivedOnClientDisconnectCb = [&](Client*) { diff --git a/src/mongo/transport/transport_layer.cpp b/src/mongo/transport/transport_layer.cpp index 01cebe1f2b1f1..8a03ba666499e 100644 --- a/src/mongo/transport/transport_layer.cpp +++ b/src/mongo/transport/transport_layer.cpp @@ -27,12 +27,11 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/db/operation_context.h" #include "mongo/platform/atomic_word.h" -#include "mongo/transport/baton.h" #include "mongo/transport/transport_layer.h" namespace mongo { diff --git a/src/mongo/transport/transport_layer.h b/src/mongo/transport/transport_layer.h index 0c18900475cb4..de9bd1e29c995 100644 --- a/src/mongo/transport/transport_layer.h +++ b/src/mongo/transport/transport_layer.h @@ -29,17 +29,28 @@ #pragma once +#include +#include +#include #include #include #include "mongo/base/status.h" -#include "mongo/config.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/baton.h" #include "mongo/db/operation_context.h" +#include "mongo/db/service_context.h" #include "mongo/db/wire_version.h" #include "mongo/executor/connection_metrics.h" +#include "mongo/transport/session.h" #include "mongo/transport/ssl_connection_context.h" +#include "mongo/util/duration.h" #include "mongo/util/functional.h" #include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_options.h" #include "mongo/util/out_of_line_executor.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/transport/transport_layer_egress_init.cpp b/src/mongo/transport/transport_layer_egress_init.cpp index 322af8862cc52..d773fc0f65e8a 100644 --- a/src/mongo/transport/transport_layer_egress_init.cpp +++ b/src/mongo/transport/transport_layer_egress_init.cpp @@ -27,13 +27,16 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include -#include "mongo/base/init.h" +#include + +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/db/service_context.h" #include "mongo/transport/asio/asio_transport_layer.h" - -#include +#include "mongo/transport/transport_layer.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace { diff --git a/src/mongo/transport/transport_layer_manager.cpp b/src/mongo/transport/transport_layer_manager.cpp index a387030d69f23..458d483c228d8 100644 --- a/src/mongo/transport/transport_layer_manager.cpp +++ b/src/mongo/transport/transport_layer_manager.cpp @@ -28,24 +28,21 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/transport/transport_layer_manager.h" - -#include -#include #include +#include +#include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/db/server_options.h" #include "mongo/db/service_context.h" -#include "mongo/logv2/log.h" #include "mongo/transport/asio/asio_transport_layer.h" -#include "mongo/transport/service_executor_synchronous.h" #include "mongo/transport/session.h" -#include "mongo/util/net/ssl_types.h" -#include "mongo/util/time_support.h" +#include "mongo/transport/transport_layer_manager.h" +#include "mongo/util/assert_util.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kNetwork diff --git a/src/mongo/transport/transport_layer_manager.h b/src/mongo/transport/transport_layer_manager.h index a632c779f4c7a..a2990ee05264d 100644 --- a/src/mongo/transport/transport_layer_manager.h +++ b/src/mongo/transport/transport_layer_manager.h @@ -29,18 +29,39 @@ #pragma once +#include +#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include #include #include "mongo/base/status.h" -#include "mongo/config.h" +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/baton.h" +#include "mongo/db/operation_context.h" +#include "mongo/db/server_options.h" +#include "mongo/db/service_context.h" +#include "mongo/db/wire_version.h" +#include "mongo/executor/connection_metrics.h" #include "mongo/platform/mutex.h" #include "mongo/transport/session.h" +#include "mongo/transport/ssl_connection_context.h" #include "mongo/transport/transport_layer.h" +#include "mongo/util/assert_util_core.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" #include "mongo/util/hierarchical_acquisition.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_manager.h" +#include "mongo/util/net/ssl_options.h" #include "mongo/util/time_support.h" -#include - namespace mongo { struct ServerGlobalParams; class ServiceContext; diff --git a/src/mongo/transport/transport_layer_mock.cpp b/src/mongo/transport/transport_layer_mock.cpp index 73ae7cc0a5d75..ef43abda689e8 100644 --- a/src/mongo/transport/transport_layer_mock.cpp +++ b/src/mongo/transport/transport_layer_mock.cpp @@ -27,17 +27,18 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/transport/transport_layer_mock.h" - +#include #include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/config.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/transport/mock_session.h" #include "mongo/transport/transport_layer.h" -#include "mongo/util/time_support.h" +#include "mongo/transport/transport_layer_mock.h" +#include "mongo/util/assert_util.h" namespace mongo { namespace transport { diff --git a/src/mongo/transport/transport_layer_mock.h b/src/mongo/transport/transport_layer_mock.h index 3831eb3fb9570..942c9c06c0347 100644 --- a/src/mongo/transport/transport_layer_mock.h +++ b/src/mongo/transport/transport_layer_mock.h @@ -29,11 +29,24 @@ #pragma once +#include +#include +#include + #include "mongo/base/status.h" -#include "mongo/config.h" +#include "mongo/base/status_with.h" +#include "mongo/config.h" // IWYU pragma: keep +#include "mongo/db/wire_version.h" +#include "mongo/executor/connection_metrics.h" #include "mongo/stdx/unordered_map.h" #include "mongo/transport/session.h" +#include "mongo/transport/ssl_connection_context.h" #include "mongo/transport/transport_layer.h" +#include "mongo/util/duration.h" +#include "mongo/util/future.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/net/ssl_manager.h" +#include "mongo/util/net/ssl_options.h" #include "mongo/util/net/ssl_peer_info.h" #include "mongo/util/net/ssl_types.h" #include "mongo/util/time_support.h" diff --git a/src/mongo/unittest/SConscript b/src/mongo/unittest/SConscript index 0d1dfc44e6795..31aa1cf3cc02b 100644 --- a/src/mongo/unittest/SConscript +++ b/src/mongo/unittest/SConscript @@ -20,7 +20,7 @@ env.Library( utEnv = env.Clone() utEnv.InjectThirdParty(libraries=['yaml']) utEnv.Library( - target="unittest", + target='unittest', source=[ 'barrier.cpp', 'bson_test_util.cpp', @@ -46,7 +46,7 @@ utEnv.Library( ) env.Library( - target="unittest_main", + target='unittest_main', source=[ 'unittest_main.cpp', 'unittest_options.idl', @@ -63,22 +63,23 @@ env.Library( ) env.Library( - target="integration_test_main", + target='integration_test_main', source=[ 'integration_test_main.cpp', 'integration_test_main.idl', ], LIBDEPS=[ - '$BUILD_DIR/mongo/base', + '$BUILD_DIR/mongo/db/service_context_test_fixture', 'unittest', ], LIBDEPS_PRIVATE=[ '$BUILD_DIR/mongo/client/connection_string', '$BUILD_DIR/mongo/db/commands/test_commands_enabled', + '$BUILD_DIR/mongo/db/server_base', '$BUILD_DIR/mongo/db/server_options', '$BUILD_DIR/mongo/db/server_options_base', '$BUILD_DIR/mongo/db/serverinit', - '$BUILD_DIR/mongo/db/service_context', + '$BUILD_DIR/mongo/db/service_context_non_d', '$BUILD_DIR/mongo/db/wire_version', '$BUILD_DIR/mongo/util/options_parser/options_parser', '$BUILD_DIR/mongo/util/options_parser/options_parser_init', diff --git a/src/mongo/unittest/assert.h b/src/mongo/unittest/assert.h index c70ccc644adb3..7c6be27d80db6 100644 --- a/src/mongo/unittest/assert.h +++ b/src/mongo/unittest/assert.h @@ -45,6 +45,7 @@ #include "mongo/base/status_with.h" #include "mongo/base/string_data.h" +#include "mongo/bson/mutable/mutable_bson_test_utils.h" #include "mongo/logv2/log_debug.h" #include "mongo/logv2/log_detail.h" #include "mongo/unittest/bson_test_util.h" @@ -425,8 +426,8 @@ class ComparisonAssertion { name(), aExpression, bExpression, - stringify::stringifyForAssert(a), - stringify::stringifyForAssert(b))); + stringify::invoke(a), + stringify::invoke(b))); } public: @@ -440,7 +441,6 @@ class ComparisonAssertion { StringData a, StringData b); - // Use a single implementation (identical to the templated one) for all pointer and array types. // Note: this is selected instead of the StringData overload for char* and string literals // because they are supposed to compare pointers, not contents. @@ -450,16 +450,18 @@ class ComparisonAssertion { StringData bExpression, const void* a, const void* b); - TEMPLATE(typename A, typename B) - REQUIRES(!(std::is_convertible_v && std::is_convertible_v)&& // - !(std::is_pointer_v && std::is_pointer_v)&& // - !(std::is_array_v && std::is_array_v)) - static ComparisonAssertion make(const char* theFile, - unsigned theLine, - StringData aExpression, - StringData bExpression, - const A& a, - const B& b) { + + template + requires( // + !(std::is_convertible_v && std::is_convertible_v)&& // + !(std::is_pointer_v && std::is_pointer_v)&& // + !(std::is_array_v && std::is_array_v)) // + static ComparisonAssertion make(const char* theFile, + unsigned theLine, + StringData aExpression, + StringData bExpression, + const A& a, + const B& b) { return ComparisonAssertion(theFile, theLine, aExpression, bExpression, a, b); } diff --git a/src/mongo/unittest/assert_that_test.cpp b/src/mongo/unittest/assert_that_test.cpp index 01482a61149e9..4dbcb1951afe5 100644 --- a/src/mongo/unittest/assert_that_test.cpp +++ b/src/mongo/unittest/assert_that_test.cpp @@ -27,25 +27,29 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include -#include +#include #include #include -#include +#include #include +#include #include -#include +#include #include +#include + +#include "mongo/base/error_codes.h" #include "mongo/base/status.h" -#include "mongo/logv2/log.h" +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" #include "mongo/stdx/unordered_map.h" -#include "mongo/stdx/unordered_set.h" #include "mongo/unittest/assert_that.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/assert_util.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/matcher.h" +#include "mongo/unittest/stringify.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest @@ -258,22 +262,21 @@ TEST(AssertThat, Demo) { } TEST(StringifyForAssert, PrintableTypes) { - using stringify::stringifyForAssert; - ASSERT_EQ(stringifyForAssert(3.14), "3.14"); - ASSERT_EQ(stringifyForAssert(std::string{"pi"}), "pi"); - ASSERT_EQ(stringifyForAssert(std::pair{"pi", 3.14}), "(pi, 3.14)"); - ASSERT_EQ(stringifyForAssert(std::tuple{1, 2}), "(1, 2)"); - ASSERT_EQ(stringifyForAssert(std::tuple<>{}), "()"); - ASSERT_EQ(stringifyForAssert(std::vector{1, 2, 3}), "[1, 2, 3]"); - ASSERT_EQ(stringifyForAssert(std::list{1, 2, 3}), "[1, 2, 3]"); - ASSERT_EQ(stringifyForAssert(std::map{{1, "a"}, {2, "b"}}), + ASSERT_EQ(stringify::invoke(3.14), "3.14"); + ASSERT_EQ(stringify::invoke(std::string{"pi"}), "pi"); + ASSERT_EQ(stringify::invoke(std::pair{"pi", 3.14}), "(pi, 3.14)"); + ASSERT_EQ(stringify::invoke(std::tuple{1, 2}), "(1, 2)"); + ASSERT_EQ(stringify::invoke(std::tuple<>{}), "()"); + ASSERT_EQ(stringify::invoke(std::vector{1, 2, 3}), "[1, 2, 3]"); + ASSERT_EQ(stringify::invoke(std::list{1, 2, 3}), "[1, 2, 3]"); + ASSERT_EQ(stringify::invoke(std::map{{1, "a"}, {2, "b"}}), "[(1, a), (2, b)]"); ASSERT_THAT( - stringifyForAssert(std::unordered_map{{1, "a"}, {2, "b"}}), // NOLINT + stringify::invoke(std::unordered_map{{1, "a"}, {2, "b"}}), // NOLINT AnyOf(Eq("[(1, a), (2, b)]"), Eq("[(2, b), (1, a)]"))); - ASSERT_THAT(stringifyForAssert(stdx::unordered_map{{1, "a"}, {2, "b"}}), + ASSERT_THAT(stringify::invoke(stdx::unordered_map{{1, "a"}, {2, "b"}}), AnyOf(Eq("[(1, a), (2, b)]"), Eq("[(2, b), (1, a)]"))); - ASSERT_EQ(stringifyForAssert(std::array{1, 2, 3, 4}), "[1, 2, 3, 4]") + ASSERT_EQ(stringify::invoke(std::array{1, 2, 3, 4}), "[1, 2, 3, 4]") << "std::array is both a sequence and a tuple. Should prefer sequence notation"; } @@ -283,8 +286,7 @@ TEST(AssertThat, UnprintableValues) { } v{123}; std::string lastResort = stringify::lastResortFormat(typeid(v), &v, sizeof(v)); // Test that the lastResortFormat function is used for unprintable values. - using stringify::stringifyForAssert; // Augment ADL with the "detail" NS. - ASSERT_EQ(stringifyForAssert(v), lastResort); + ASSERT_EQ(stringify::invoke(v), lastResort); // Test that a typical matcher like Eq uses it. ASSERT_STRING_CONTAINS(Eq(v).describe(), lastResort); } diff --git a/src/mongo/unittest/barrier.cpp b/src/mongo/unittest/barrier.cpp index aa0eae5fc9331..195208562082f 100644 --- a/src/mongo/unittest/barrier.cpp +++ b/src/mongo/unittest/barrier.cpp @@ -27,10 +27,12 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +// IWYU pragma: no_include "cxxabi.h" -#include "mongo/unittest/barrier.h" +#include + +#include "mongo/unittest/barrier.h" #include "mongo/util/assert_util.h" namespace mongo { diff --git a/src/mongo/unittest/barrier.h b/src/mongo/unittest/barrier.h index de21587ea3bf5..c631df596b285 100644 --- a/src/mongo/unittest/barrier.h +++ b/src/mongo/unittest/barrier.h @@ -29,6 +29,10 @@ #pragma once +#include +#include +#include + #include "mongo/platform/mutex.h" #include "mongo/stdx/condition_variable.h" diff --git a/src/mongo/unittest/benchmark_main.cpp b/src/mongo/unittest/benchmark_main.cpp index 6d9f5a3ba5aac..3f894bc9789fb 100644 --- a/src/mongo/unittest/benchmark_main.cpp +++ b/src/mongo/unittest/benchmark_main.cpp @@ -28,17 +28,26 @@ */ -#include "mongo/platform/basic.h" - +#include #include +#include +#include +#include #include "mongo/base/initializer.h" -#include "mongo/config.h" +#include "mongo/base/status.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/logv2/redaction.h" #include "mongo/unittest/benchmark_options_gen.h" #include "mongo/unittest/log_test.h" +#include "mongo/util/exit_code.h" +#include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/option_section.h" #include "mongo/util/options_parser/options_parser.h" +#include "mongo/util/options_parser/value.h" #include "mongo/util/signal_handlers_synchronous.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/unittest/bson_test_util.cpp b/src/mongo/unittest/bson_test_util.cpp index 7d2636413156c..bcb0bb545c35c 100644 --- a/src/mongo/unittest/bson_test_util.cpp +++ b/src/mongo/unittest/bson_test_util.cpp @@ -27,11 +27,19 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include +#include +#include "mongo/bson/bsonobj_comparator_interface.h" #include "mongo/bson/json.h" +#include "mongo/bson/oid.h" +#include "mongo/bson/simple_bsonelement_comparator.h" +#include "mongo/bson/simple_bsonobj_comparator.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/inline_auto_update.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/str.h" namespace mongo { namespace unittest { diff --git a/src/mongo/unittest/bson_test_util.h b/src/mongo/unittest/bson_test_util.h index ed5ed49b517cb..673776c6f7260 100644 --- a/src/mongo/unittest/bson_test_util.h +++ b/src/mongo/unittest/bson_test_util.h @@ -29,6 +29,11 @@ #pragma once +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/simple_bsonelement_comparator.h" #include "mongo/bson/simple_bsonobj_comparator.h" #include "mongo/unittest/assert.h" diff --git a/src/mongo/unittest/bson_test_util_test.cpp b/src/mongo/unittest/bson_test_util_test.cpp index 114c855baf575..5cee4288697da 100644 --- a/src/mongo/unittest/bson_test_util_test.cpp +++ b/src/mongo/unittest/bson_test_util_test.cpp @@ -27,8 +27,13 @@ * it in the license file. */ +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/json.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace mongo::unittest { namespace { diff --git a/src/mongo/unittest/death_test.cpp b/src/mongo/unittest/death_test.cpp index 7a6006a0b8c4f..5d22ce2aa1b41 100644 --- a/src/mongo/unittest/death_test.cpp +++ b/src/mongo/unittest/death_test.cpp @@ -27,47 +27,59 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - -#include "mongo/unittest/death_test.h" - +#include +#include #include +#include #include +#include +#include +#include +#include +#include -#include "mongo/bson/json.h" -#include "mongo/unittest/assert.h" -#include "mongo/unittest/temp_dir.h" -#include "mongo/util/exit_code.h" +#include + +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/death_test.h" +#include "mongo/unittest/test_info.h" +#include "mongo/util/errno_util.h" +#include "mongo/util/scopeguard.h" #ifndef _WIN32 -#include -#include -#include #include #include -#include #endif #if defined(__APPLE__) #include #endif -#if defined(__has_feature) && __has_feature(thread_sanitizer) +#if __has_feature(thread_sanitizer) #include #endif -#include - +#include "mongo/bson/json.h" +#include "mongo/config.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/temp_dir.h" #include "mongo/util/assert_util.h" #include "mongo/util/debugger.h" +#include "mongo/util/exit_code.h" #include "mongo/util/pcre_util.h" #include "mongo/util/quick_exit.h" +#if defined(MONGO_CONFIG_HAVE_HEADER_UNISTD_H) +#include +#endif + #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest -#if defined(__has_feature) #if __has_feature(thread_sanitizer) #define TSAN_ENABLED_ #endif @@ -77,7 +89,6 @@ #if __has_feature(memory_sanitizer) #define MSAN_ENABLED_ #endif -#endif // __has_feature namespace mongo { namespace unittest { diff --git a/src/mongo/unittest/fixture_test.cpp b/src/mongo/unittest/fixture_test.cpp index 18a7acb96503e..a9ab5e8f15e33 100644 --- a/src/mongo/unittest/fixture_test.cpp +++ b/src/mongo/unittest/fixture_test.cpp @@ -31,7 +31,9 @@ * Unit tests of the unittest framework itself. */ -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" namespace { diff --git a/src/mongo/unittest/golden_test.cpp b/src/mongo/unittest/golden_test.cpp index 59fec0f3870ed..7469ca2a0abb5 100644 --- a/src/mongo/unittest/golden_test.cpp +++ b/src/mongo/unittest/golden_test.cpp @@ -28,21 +28,20 @@ */ -#include -#include -#include -#include -#include -#include +#include #include -#include -#include +#include -#include "mongo/base/init.h" -#include "mongo/base/status.h" +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/golden_test.h" -#include "mongo/util/ctype.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/unittest/golden_test.h b/src/mongo/unittest/golden_test.h index 3845d30a965d2..4c83503a9d3c3 100644 --- a/src/mongo/unittest/golden_test.h +++ b/src/mongo/unittest/golden_test.h @@ -29,7 +29,14 @@ #pragma once +#include +#include +#include +#include +#include + #include "mongo/base/string_data.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/golden_test_base.h" #include "mongo/unittest/test_info.h" #include "mongo/unittest/unittest.h" diff --git a/src/mongo/unittest/golden_test_base.cpp b/src/mongo/unittest/golden_test_base.cpp index 1544fd3a37bb0..03dbc9ef3d89e 100644 --- a/src/mongo/unittest/golden_test_base.cpp +++ b/src/mongo/unittest/golden_test_base.cpp @@ -27,21 +27,38 @@ * it in the license file. */ -#include +#include #include -#include -#include -#include -#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include #include -#include - -#include "mongo/base/init.h" -#include "mongo/base/status.h" -#include "mongo/logv2/log.h" +#include // IWYU pragma: keep +#include +#include +#include +#include // IWYU pragma: keep + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsontypes.h" #include "mongo/unittest/golden_test_base.h" +#include "mongo/util/assert_util.h" #include "mongo/util/ctype.h" +#include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/unittest/golden_test_base.h b/src/mongo/unittest/golden_test_base.h index db02ab365702b..2586a4fdb3f44 100644 --- a/src/mongo/unittest/golden_test_base.h +++ b/src/mongo/unittest/golden_test_base.h @@ -30,7 +30,11 @@ #pragma once #include +#include +#include #include +#include +#include #include #include #include diff --git a/src/mongo/unittest/golden_test_test.cpp b/src/mongo/unittest/golden_test_test.cpp index 6749a7b64bae1..874d73bf52abe 100644 --- a/src/mongo/unittest/golden_test_test.cpp +++ b/src/mongo/unittest/golden_test_test.cpp @@ -27,12 +27,20 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - +#include +#include #include +#include +#include +#include + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/golden_test.h" -#include "mongo/unittest/unittest.h" +#include "mongo/unittest/golden_test_base.h" +#include "mongo/unittest/test_info.h" #include "mongo/util/assert_util.h" namespace mongo::unittest { diff --git a/src/mongo/unittest/inline_auto_update.cpp b/src/mongo/unittest/inline_auto_update.cpp index 1e6739168bf19..8ef42caf4203b 100644 --- a/src/mongo/unittest/inline_auto_update.cpp +++ b/src/mongo/unittest/inline_auto_update.cpp @@ -29,9 +29,18 @@ #include "mongo/unittest/inline_auto_update.h" -#include -#include +#include +#include +// IWYU pragma: no_include "ext/alloc_traits.h" +#include +#include +#include +#include +#include // IWYU pragma: keep +#include #include +#include +#include #include "mongo/unittest/framework.h" #include "mongo/util/str_escape.h" diff --git a/src/mongo/unittest/inline_auto_update.h b/src/mongo/unittest/inline_auto_update.h index 535b9cef8e4d6..3f3e548e30027 100644 --- a/src/mongo/unittest/inline_auto_update.h +++ b/src/mongo/unittest/inline_auto_update.h @@ -29,9 +29,13 @@ #pragma once +#include #include +#include #include +#include "mongo/unittest/assert.h" + namespace mongo::unittest { /** * Computes a difference between the expected and actual formatted output and outputs it to the diff --git a/src/mongo/unittest/inline_auto_update_test.cpp b/src/mongo/unittest/inline_auto_update_test.cpp index b13d82a6fad57..5e89447464335 100644 --- a/src/mongo/unittest/inline_auto_update_test.cpp +++ b/src/mongo/unittest/inline_auto_update_test.cpp @@ -28,7 +28,11 @@ */ #include "mongo/unittest/inline_auto_update.h" -#include "mongo/unittest/unittest.h" + +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" + namespace mongo::unittest { namespace { TEST(AutoUpdateAssertion, DiffTest) { diff --git a/src/mongo/unittest/integration_test_main.cpp b/src/mongo/unittest/integration_test_main.cpp index e3c54a001a960..f48b4a74d512f 100644 --- a/src/mongo/unittest/integration_test_main.cpp +++ b/src/mongo/unittest/integration_test_main.cpp @@ -28,30 +28,33 @@ */ -#include "mongo/platform/basic.h" - #include +#include #include +#include #include -#include "mongo/base/init.h" +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/initializer.h" +#include "mongo/base/status_with.h" #include "mongo/client/connection_string.h" #include "mongo/db/commands/test_commands_enabled.h" -#include "mongo/db/concurrency/locker_noop_client_observer.h" #include "mongo/db/server_options_base.h" #include "mongo/db/server_options_helpers.h" #include "mongo/db/service_context.h" #include "mongo/db/wire_version.h" #include "mongo/logv2/log.h" -#include "mongo/transport/asio/asio_transport_layer.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/framework.h" +#include "mongo/util/assert_util.h" #include "mongo/util/exit_code.h" #include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/option_section.h" -#include "mongo/util/options_parser/options_parser.h" #include "mongo/util/options_parser/startup_option_init.h" #include "mongo/util/options_parser/startup_options.h" +#include "mongo/util/options_parser/value.h" #include "mongo/util/quick_exit.h" #include "mongo/util/signal_handlers_synchronous.h" #include "mongo/util/testing_proctor.h" @@ -87,7 +90,6 @@ int main(int argc, char** argv) { runGlobalInitializersOrDie(std::vector(argv, argv + argc)); setTestCommandsEnabled(true); auto serviceContextHolder = ServiceContext::make(); - serviceContextHolder->registerClientObserver(std::make_unique()); setGlobalServiceContext(std::move(serviceContextHolder)); quickExit(unittest::Suite::run(std::vector(), "", "", 1)); } diff --git a/src/mongo/unittest/matcher.h b/src/mongo/unittest/matcher.h index 5db68ebc2c1f7..6a281f9dc0663 100644 --- a/src/mongo/unittest/matcher.h +++ b/src/mongo/unittest/matcher.h @@ -30,18 +30,27 @@ #pragma once #include +#include #include +#include +#include #include #include #include +#include #include +#include +#include "mongo/base/status.h" #include "mongo/base/string_data.h" #include "mongo/bson/bsonelement.h" #include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsontypes.h" #include "mongo/stdx/type_traits.h" #include "mongo/unittest/assert.h" #include "mongo/unittest/matcher_core.h" +#include "mongo/unittest/stringify.h" +#include "mongo/util/assert_util.h" /** * Defines a basic set of matchers to be used with the ASSERT_THAT macro (see @@ -152,7 +161,7 @@ class RelOpBase : public Matcher { explicit RelOpBase(T v) : _v{std::move(v)} {} std::string describe() const { - return format(FMT_STRING("{}({})"), self().name, stringify::stringifyForAssert(_v)); + return format(FMT_STRING("{}({})"), self().name, stringify::invoke(_v)); } template , int> = 0> diff --git a/src/mongo/unittest/matcher_core.h b/src/mongo/unittest/matcher_core.h index 9ae44c7801aab..f43c74063770b 100644 --- a/src/mongo/unittest/matcher_core.h +++ b/src/mongo/unittest/matcher_core.h @@ -146,7 +146,7 @@ struct MatchAssertion { if (!mr) { msg = format(FMT_STRING("value: {}, actual: {}{}, expected: {}"), eStr, - stringify::stringifyForAssert(e), + stringify::invoke(e), mr.message().empty() ? "" : format(FMT_STRING(", {}"), mr.message()), m.describe()); } diff --git a/src/mongo/unittest/stringify.cpp b/src/mongo/unittest/stringify.cpp index 5ca50bf3d3314..72104b01ea69d 100644 --- a/src/mongo/unittest/stringify.cpp +++ b/src/mongo/unittest/stringify.cpp @@ -27,14 +27,12 @@ * it in the license file. */ -#include "mongo/unittest/matcher_core.h" - -#include +#include #include #include -#include - +#include "mongo/base/string_data.h" +#include "mongo/unittest/stringify.h" #include "mongo/util/assert_util.h" #include "mongo/util/hex.h" diff --git a/src/mongo/unittest/stringify.h b/src/mongo/unittest/stringify.h index bd0fcfa26351f..4d717cfa1eb12 100644 --- a/src/mongo/unittest/stringify.h +++ b/src/mongo/unittest/stringify.h @@ -30,13 +30,17 @@ #pragma once #include +#include #include +#include #include +#include #include #include #include #include #include +#include #include #include @@ -51,20 +55,13 @@ */ namespace mongo::unittest::stringify { +template +std::string invoke(const T& x); + std::string formatTypedObj(const std::type_info& ti, StringData obj); std::string lastResortFormat(const std::type_info& ti, const void* p, size_t sz); -/** - * `stringifyForAssert` can be overloaded to extend stringification - * capabilities of the matchers via ADL. - * - * The overload in this namespace is used for types for - * which the unittest library has built-in support. - */ -template -std::string stringifyForAssert(const T& x); - template std::string doFormat(const T& x) { return format(FMT_STRING("{}"), x); @@ -100,7 +97,8 @@ class Joiner { public: template Joiner& operator()(const T& v) { - _out += format(FMT_STRING("{}{}"), _sep, stringifyForAssert(v)); + // `stringify::` qualification necessary to disable ADL on `v`. + _out += format(FMT_STRING("{}{}"), _sep, stringify::invoke(v)); _sep = ", "; return *this; } @@ -135,7 +133,14 @@ std::string doTuple(const T& tup) { } /** - * The default stringifyForAssert implementation. + * The only definitions in this namespace are some "built-in" overloads of + * `stringifyForAssert`. It defines no types, so ADL will not find it. A + * `stringify::invoke` call will consider these in the overload set along with + * any overloads found by ADL on the argument. + */ +namespace adl_barrier { +/** + * The default `stringifyForAssert` implementation. * Encodes the steps by which we determine how to print an object. * There's a wildcard branch so everything is printable in some way. */ @@ -169,5 +174,25 @@ inline std::string stringifyForAssert(std::nullptr_t) { inline std::string stringifyForAssert(ErrorCodes::Error ec) { return ErrorCodes::errorString(ec); } +} // namespace adl_barrier + +/** + * The entry point for the `unittest::stringify` system, this is + * called to produce a string representation of an arbitrary value + * `x` through the `stringifyForAssert` extension hook. + * + * An overload for `stringifyForAssert` is selected from a few + * "built-in" overloads, and then from any that are found in + * namespaces associated with `x` via argument-dependent lookup. + * + * The `stringifyForAssert` name is an ADL extension point for + * user-defined types, and should not be invoked directly. Call + * `stringify::invoke` instead. + */ +template +std::string invoke(const T& x) { + using adl_barrier::stringifyForAssert; + return stringifyForAssert(x); +} } // namespace mongo::unittest::stringify diff --git a/src/mongo/unittest/system_resource_canary_bm.cpp b/src/mongo/unittest/system_resource_canary_bm.cpp index 945b46b7b22e3..83d36b2300cd8 100644 --- a/src/mongo/unittest/system_resource_canary_bm.cpp +++ b/src/mongo/unittest/system_resource_canary_bm.cpp @@ -27,9 +27,10 @@ * it in the license file. */ -#include "mongo/platform/basic.h" - #include +#include +#include +#include #include "mongo/util/assert_util.h" #include "mongo/util/processinfo.h" diff --git a/src/mongo/unittest/task_executor_proxy.cpp b/src/mongo/unittest/task_executor_proxy.cpp index 9c992e3a35237..27736b8777261 100644 --- a/src/mongo/unittest/task_executor_proxy.cpp +++ b/src/mongo/unittest/task_executor_proxy.cpp @@ -27,7 +27,7 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include #include "mongo/unittest/task_executor_proxy.h" diff --git a/src/mongo/unittest/task_executor_proxy.h b/src/mongo/unittest/task_executor_proxy.h index 991d8c68c18dc..d35bd004f7682 100644 --- a/src/mongo/unittest/task_executor_proxy.h +++ b/src/mongo/unittest/task_executor_proxy.h @@ -29,7 +29,18 @@ #pragma once +#include "mongo/base/status_with.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/db/baton.h" +#include "mongo/db/operation_context.h" +#include "mongo/executor/remote_command_request.h" #include "mongo/executor/task_executor.h" +#include "mongo/platform/atomic_word.h" +#include "mongo/stdx/condition_variable.h" +#include "mongo/util/future.h" +#include "mongo/util/interruptible.h" +#include "mongo/util/net/hostandport.h" +#include "mongo/util/time_support.h" namespace mongo { namespace unittest { diff --git a/src/mongo/unittest/temp_dir.cpp b/src/mongo/unittest/temp_dir.cpp index d5bb8768055a1..5c7c26a32db08 100644 --- a/src/mongo/unittest/temp_dir.cpp +++ b/src/mongo/unittest/temp_dir.cpp @@ -28,17 +28,20 @@ */ -#include "mongo/platform/basic.h" +#include -#include "mongo/unittest/temp_dir.h" - -#include +#include +#include -#include "mongo/base/init.h" +#include "mongo/base/error_codes.h" +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/initializer.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" -#include "mongo/util/options_parser/startup_option_init.h" -#include "mongo/util/options_parser/startup_options.h" +#include "mongo/logv2/log_attr.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/temp_dir.h" +#include "mongo/util/assert_util.h" +#include "mongo/util/options_parser/value.h" #include "mongo/util/str.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/unittest/temp_dir_test.cpp b/src/mongo/unittest/temp_dir_test.cpp index 76f0783391d42..db95251ab9c30 100644 --- a/src/mongo/unittest/temp_dir_test.cpp +++ b/src/mongo/unittest/temp_dir_test.cpp @@ -27,14 +27,15 @@ * it in the license file. */ -#include "mongo/platform/basic.h" +#include // IWYU pragma: keep -#include "mongo/unittest/temp_dir.h" - -#include -#include +#include +#include -#include "mongo/unittest/unittest.h" +#include "mongo/base/string_data.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/temp_dir.h" using mongo::unittest::TempDir; diff --git a/src/mongo/unittest/thread_assertion_monitor.h b/src/mongo/unittest/thread_assertion_monitor.h index 8c29b79a88fbb..c0376ce454541 100644 --- a/src/mongo/unittest/thread_assertion_monitor.h +++ b/src/mongo/unittest/thread_assertion_monitor.h @@ -29,11 +29,16 @@ #pragma once +// IWYU pragma: no_include "cxxabi.h" #include +#include +#include +#include #include "mongo/stdx/condition_variable.h" #include "mongo/stdx/mutex.h" #include "mongo/stdx/thread.h" +#include "mongo/unittest/assert.h" #include "mongo/unittest/unittest.h" #include "mongo/util/scopeguard.h" @@ -106,8 +111,10 @@ class ThreadAssertionMonitor { stdx::unique_lock lk(_mu); do { _cv.wait(lk, [&] { return _done || _ex; }); - if (_ex) + if (_ex) { + _done = true; std::rethrow_exception(std::exchange(_ex, nullptr)); + } } while (!_done); } diff --git a/src/mongo/unittest/thread_assertion_monitor_test.cpp b/src/mongo/unittest/thread_assertion_monitor_test.cpp index 56cd26a34a248..fdc375f57a0f0 100644 --- a/src/mongo/unittest/thread_assertion_monitor_test.cpp +++ b/src/mongo/unittest/thread_assertion_monitor_test.cpp @@ -30,8 +30,13 @@ #include "mongo/unittest/thread_assertion_monitor.h" +#include +#include + +#include "mongo/base/string_data.h" #include "mongo/logv2/log.h" -#include "mongo/unittest/unittest.h" +#include "mongo/logv2/log_component.h" +#include "mongo/unittest/framework.h" #define MONGO_LOGV2_DEFAULT_COMPONENT ::mongo::logv2::LogComponent::kTest diff --git a/src/mongo/unittest/unittest.cpp b/src/mongo/unittest/unittest.cpp index b450935309929..6f7266c9de102 100644 --- a/src/mongo/unittest/unittest.cpp +++ b/src/mongo/unittest/unittest.cpp @@ -28,32 +28,56 @@ */ -#include "mongo/platform/basic.h" - -#include "mongo/unittest/unittest.h" - -#include +#include +#include +#include +#include +#include #include -#include +#include // IWYU pragma: keep #include #include +#include #include #include - -#include "mongo/base/checked_cast.h" -#include "mongo/base/init.h" +#include +#include +#include + +#include +#include +// IWYU pragma: no_include "boost/log/detail/attachable_sstream_buf.hpp" +// IWYU pragma: no_include "boost/log/detail/locking_ptr.hpp" +#include +#include +#include +#include +#include +#include + +#include "mongo/base/init.h" // IWYU pragma: keep +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonelement.h" +#include "mongo/bson/bsonobj.h" +#include "mongo/bson/bsonobjbuilder.h" +#include "mongo/bson/bsontypes.h" +#include "mongo/bson/simple_bsonelement_comparator.h" #include "mongo/db/server_options.h" #include "mongo/logv2/bson_formatter.h" -#include "mongo/logv2/component_settings_filter.h" +#include "mongo/logv2/domain_filter.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_attr.h" #include "mongo/logv2/log_capture_backend.h" -#include "mongo/logv2/log_domain.h" -#include "mongo/logv2/log_domain_global.h" +#include "mongo/logv2/log_component.h" #include "mongo/logv2/log_manager.h" #include "mongo/logv2/log_truncation.h" #include "mongo/logv2/plain_formatter.h" -#include "mongo/platform/mutex.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/test_info.h" +#include "mongo/unittest/unittest.h" #include "mongo/util/assert_util.h" +#include "mongo/util/duration.h" #include "mongo/util/exit_code.h" #include "mongo/util/pcre.h" #include "mongo/util/signal_handlers_synchronous.h" diff --git a/src/mongo/unittest/unittest_main.cpp b/src/mongo/unittest/unittest_main.cpp index c55a49e426ff7..224393842b112 100644 --- a/src/mongo/unittest/unittest_main.cpp +++ b/src/mongo/unittest/unittest_main.cpp @@ -27,25 +27,29 @@ * it in the license file. */ +#include #include #include #include -#include "mongo/base/init.h" +#include + +#include "mongo/base/init.h" // IWYU pragma: keep #include "mongo/base/initializer.h" #include "mongo/base/status.h" #include "mongo/db/commands/test_commands_enabled.h" #include "mongo/db/wire_version.h" -#include "mongo/logv2/log_domain_global.h" -#include "mongo/logv2/log_manager.h" +#include "mongo/logv2/log_severity.h" +#include "mongo/unittest/framework.h" #include "mongo/unittest/log_test.h" #include "mongo/unittest/temp_dir.h" -#include "mongo/unittest/unittest.h" #include "mongo/unittest/unittest_options_gen.h" +#include "mongo/util/assert_util_core.h" #include "mongo/util/exit_code.h" #include "mongo/util/options_parser/environment.h" #include "mongo/util/options_parser/option_section.h" #include "mongo/util/options_parser/options_parser.h" +#include "mongo/util/options_parser/value.h" #include "mongo/util/signal_handlers_synchronous.h" #include "mongo/util/testing_proctor.h" diff --git a/src/mongo/unittest/unittest_test.cpp b/src/mongo/unittest/unittest_test.cpp index bb159eab781ec..6c92deb4c9786 100644 --- a/src/mongo/unittest/unittest_test.cpp +++ b/src/mongo/unittest/unittest_test.cpp @@ -32,15 +32,36 @@ */ -#include "mongo/platform/basic.h" - -#include +#include +#include +#include +#include +#include #include +#include +#include +#include #include +#include +#include + +#include +#include +#include +#include +#include +#include "mongo/base/string_data.h" +#include "mongo/bson/bsonobj.h" #include "mongo/bson/bsonobjbuilder.h" #include "mongo/logv2/log.h" +#include "mongo/logv2/log_component.h" +#include "mongo/stdx/type_traits.h" +#include "mongo/unittest/assert.h" +#include "mongo/unittest/bson_test_util.h" #include "mongo/unittest/death_test.h" +#include "mongo/unittest/framework.h" +#include "mongo/unittest/stringify.h" #include "mongo/unittest/unittest.h" #include "mongo/util/assert_util.h" @@ -49,6 +70,7 @@ namespace { namespace stdx = mongo::stdx; +namespace mus = mongo::unittest::stringify; bool containsPattern(const std::string& pattern, const std::string& value) { return value.find(pattern) != std::string::npos; @@ -210,11 +232,10 @@ class UnitTestFormatTest : public mongo::unittest::Test { template